From dae95edaedbc7c5841cde6c2958d55426481b311 Mon Sep 17 00:00:00 2001 From: fupeng29 Date: Fri, 10 Nov 2023 14:34:55 +0800 Subject: [PATCH] Back to dev mode after v18.0.0 (#14475)... (#119) * fix conflicts * merge origin v18.0.0 * Back to dev mode after v18.0.0 (#14475) * Release of v18.0.0 (#14406) * [release-18.0] VReplication: Handle multiple streams in UpdateVReplicationWorkflow RPC (#14447) (#14468) * [release-18.0] VDiff: "show all" should only report vdiffs for the specified keyspace and workflow (#14442) (#14466) * [release-18.0] TestStreamMigrateMainflow: fix panic in test (#14425) * [release-18.0] vtgate: Allow more errors for the warning check (#14421) (#14423) * [release-18.0] servenv: Remove double close() logic (#14457) (#14459) * [release-18.0] viper: register dynamic config with both disk and live (#14453) (#14455) * [release-18.0] vtgate/engine: Fix race condition in join logic (#14435) (#14441) * tx_throttler: remove topo watchers metric (#14444) * [release-18.0] VDiff tablet selection: pick non-serving tablets in Reshard workflows (#14413) (#14418) * Code freeze of release-18.0 (#14405) * [release-18.0] Online DDL: lint DDL strategy flags (#14373) (#14399) * [release-18.0] tuple: serialized form (#14392) (#14394) * [release-18.0] schemadiff: fix missing `DROP CONSTRAINT` in duplicate/redundant constraints scenario. (#14387) (#14391) * [release-18.0] Performance Fixes for Vitess 18 (#14383) (#14393) * [release-18.0] Vtctld SwitchReads: fix bug where writes were also being switched as part of switching reads when all traffic was switched using SwitchTraffic (#14360) (#14379) * [release-18.0] VDiff: wait for shard streams of one table diff to complete for before starting that of the next table (#14345) (#14382) * [release-18.0] [Docker] Fix VTadmin build (#14363) (#14378) * [release-18.0] Fix backup on s3 like storage (#14311) (#14362) * [release-18.0] use aggregation engine over distinct engine when overlapping order by (#14359) (#14361) * [release-18.0] Automatic approval of `vitess-bot` clean backports (#14352) (#14357) * [release-18.0] evalengine: Misc bugs (#14351) (#14354) * [release-18.0] OnlineDDL: reduce vrepl_stress workload in forks (#14302) (#14349) * [release-18.0] VReplication: Add --all-cells flag to create sub-commands (#14341) (#14343) * [release-18.0] Incremental backup: fix race condition in reading 'mysqlbinlog' output (#14330) (#14335) * [release-18.0] release notes: edit summary for consistency (#14319) (#14320) * [release-18.0] Bump @babel/traverse from 7.21.4 to 7.23.2 in /web/vtadmin (#14304) (#14308) * [release-18.0] Rename Foreign Key enum values in VSchema and drop `FK_` prefix (#14274) (#14299) * [release-18.0] VReplication: error on vtctldclient commands w/o tablet types (#14294) (#14298) * [release-18.0] Make vtctldclient mount command more standard (#14281) (#14283) * [release-18.0] VReplication: Add traffic state to vtctldclient workflow status output (#14280) (#14282) * [release-18.0] fix: analyze statement parsing and planning (#14268) (#14275) * [release-18.0] Bypass cobra completion commands so they still function (#14217) (#14234) * [release-18.0] Bump golang.org/x/net from 0.14.0 to 0.17.0 (#14260) (#14264) * Add vtctldclient info to the 18.0 summary (#14259) * [release-18.0] Bump postcss from 8.4.21 to 8.4.31 in /web/vtadmin (#14173) (#14258) * [release-18.0] Bump github.com/cyphar/filepath-securejoin from 0.2.3 to 0.2.4 (#14239) (#14253) * [release-18.0] Tablet throttler: fix race condition by removing goroutine call (#14179) (#14198) * [release-18.0] fix: insert with negative value (#14244) (#14247) * [release-18.0] Fix anonymous paths in cobra code-gen (#14185) (#14238) * [release-18.0] Throttler: set timeouts on gRPC communication and on topo communication (#14165) (#14167) * [release-18.0] Move all examples to vtctldclient (#14226) (#14241) * [release-18.0] VReplication: Add missing info to vtctldclient workflow SHOW output (#14225) (#14240) * [release-18.0] Upgrade the Golang version to `go1.21.3` (#14230) * [release-18.0] Optimize the GetWorkflows RPC (#14212) (#14233) * [Release 18.0]: Online DDL: timeouts for all gRPC calls (#14182) (#14189) * [release-18.0] Migrate Materialize command to vtctldclient (#14184) (#14214) * [Release 18.0] Backport of #17174 (#14210) * [release-18.0] Upgrade the Golang version to `go1.21.2` (#14195) * [release-18.0] Migrate CreateLookupVindex and ExternalizeVindex to vtctldclient (#14086) (#14183) * Back to dev mode after `v18.0.0-rc1` release (#14169) * Release of v18.0.0-rc1 (#14136) * [release-18.0] docker: add dedicated vtorc container (#14126) (#14148) * [release-18.0] gen4: Support explicit column aliases on derived tables (#14129) (#14156) * Code freeze of release-18.0 (#14131) * VTGate FK stress tests suite: improvements (#14098) * DDL execution to commit open transaction (#14110) * Summary changes for foreign keys (#14112) * Move subqueries to use the operator model (#13750) * servenv: Allow for explicit bind address (#13188) * vtorc: add detected_problems counter (#13967) * fix bad copy-paste in zkctld docgen (#14123) * VDiff: Cleanup the controller for a VDiff before deleting it (#14107) * Remove deprecated flags before `v18.0.0` (#14071) * Remove FOSSA Test from CI until we can do it in a secure way (#14119) * go/cmd/vtbackup: wait for plugins to finish initializing (#14113) * bugfix: change column name and type to json (#14093) * `vtctld`/`vtorc`: improve reparenting stats (#13723) * E2E Fuzzing testing for foreign keys (#13980) * Backup/restore: provision and restore a tablet with point-in-time recovery flags (#13964) * anonymize homedirs in generated docs (#14101) * VDiff: properly split cell values in record when using TabletPicker (#14099) * miscellaneous cobras (#14069) * Reduce network pressure on multi row insert (#14064) * [cli] cobra zookeeper (#14094) * switch casing in onlineddl subcommand help text (#14091) * Fix Fk verification and update queries to accommodate for bindVariables being NULL (#14061) * actually test vtcombo (#14095) * VDiff: Migrate client command to vtctldclient (#13976) * ci: pool-related test flakyness (#14076) * Improve the rewriter to simplify more queries (#14059) * update docgen to embed commit ID in autogenerated doc frontmatter (#14056) * [CLI] cobra lots of things (#14007) * Add VSchema DDL support for dropping sequence and auto increment (#13882) * json: Fix quoting JSON keys (#14066) * Flakes: Address TestMigrate Failures (#12866) * [cli] migrate mysqlctl and mysqlctld to cobra (#13946) * evalengine: Mark UUID() function as non-constant (#14051) * Fix cascading Delete failure while using Prepared statements (#14048) * remove query_analyzer binary and release (#14055) * go/vt/mysqlctl: instrument s3 upload time (#12500) * Add session flag for stream execute grpc api (#14046) * Endtoend: stress tests for VTGate FOREIGN KEY support (#13799) * metrics: change vtbackup_duration_by_phase to binary-valued vtbackup_phase (#12973) * TableGC: support DROP VIEW (#14020) * Support arbitrary ZooKeeper config lines (#13829) * Bump protobufjs from 7.2.3 to 7.2.5 in /web/vtadmin (#13833) * Bump tough-cookie and @cypress/request in /vitess-mixin/e2e (#13768) * Remove excessive logging in transactions (#14021) * Fix bug in `fileNameFromPosition` test helper (#13778) * MoveTables Cancel: drop denied tables on target when dropping source/target tables (#14008) * java: update to latest dependencies for grpc and protobuf (#13996) * OnlineDDL: cleanup cancelled migration artifacts; support `--retain-artifacts=` DDL strategy flag (#14029) * moved timeout test to different package (#14028) * fix: cost to include subshard opcode (#14023) * VReplication VPlayer: set foreign_key_checks on initialization (#14013) * go/cmd/vtbackup: report replication status metrics during catch-up phase (#13995) * Fix the `SELECT` query we run on the child table to verify that update is allowed on a RESTRICT constraint (#13991) * fix data race in join engine primitive olap streaming mode execution (#14012) * test: added test to check binlogs to contain the cascade events (#13970) * Fix `TestLeftJoinUsingUnsharded` and remove instability when running E2E locally (#13973) * Enable failures in `tools/e2e_test_race.sh` and fix races (#13654) * Improve release process documentation (#14000) * Make `Static Code Checks Etc` fail if the `./changelog` folder is out-of-date (#14003) * Fix foreign key plan tests expectation (#13997) * Flakes: Add recently added 'select rows_copied' query to ignore list (#13993) * Foreign key cascade: retain "for update" lock on select query plans (#13985) * Fix `NOT IN` expression used in the SET NULL for a child table on an update (#13988) * consolidate docs (#13959) * VDiff: correct handling of default source and target cells (#13969) * [cobra] vtgate and vttablet (#13943) * handle large number of predicates without timing out (#13979) * Fix missing deprecated flags in `vttablet` and `vtgate` (#13975) * wrangler,workflow/workflow: materialize from intersecting source shards based on primary vindexes (#13782) * Cache v3 (#13939) * Implement Reshard in vtctldclient (#13792) * Disallow Insert with Duplicate key update and Replace Into queries on foreign key column, set locks on fk queries (#13953) * End to end testing suite for foreign keys (#13870) * copy over existing vreplication rows copied to local counter if resuming from another tablet (#13949) * vtctldclient OnlineDDL: support `throttle`, `unthrottle` (#13916) * Change internal vindex type recommendation for integrals to xxhash (#13956) * VTOrc converts a tablet to DRAINED type if it detects errant GTIDs on it (#13873) * Fix `ApplySchema --batch-size` with ` --allow-zero-in-date` (#13951) * Tablet throttler: empty list of probes on non-leader (#13926) * VReplication: Handle SQL NULL and JSON 'null' correctly for JSON columns (#13944) * [vtctld] more cobra binaries (#13930) * Merge branch 'planetscale-fk-verify-update-planning' * [main] Upgrade the Golang version to `go1.21.1` (#13933) * update fk error messages * cascade with new value should work * small refactor - removed unused code * return verify error based on type * store the verification type * update plan output test * small refactor * feat: add code to verify valdity of ON UPDATE RESTRICT foreign keys wherever required * Rewrite `USING` to `ON` condition for joins (#13931) * test: fix tests to reflect the recent changes * feat: add code to verify update with cascade run with foreign key checks 0 and validate all the foreign keys on vtgate * add where condition to fk verify query * changed parent verification query and accordingly change the engine primitive for foreign key constraint verification * foreign key verify operator and logical * refactor: add comment explaining map in input * add support for foreign key constraint verify on update * refactor: move DML logic to sql_builder.go (#13920) * OnlineDDL: fix nil 'completed_timestamp' for cancelled migrations (#13928) * Silence 'CheckThrottler' gRPC calls (#13925) * migrate vtorc to use cobra commands (#13917) * MoveTables: allow copying all tables in a single atomic copy phase cycle (#13137) * proto: Faster clone (#13914) * Properly support ignore_nulls in CreateLookupVindex (#13913) * [staticcheck] Last few staticchecks! (#13909) * icuregex: Update to ICU 73 (#13912) * gen4: Fast aggregations (#13904) * Use correct syntax in test (#13907) * Misc Local Install improvements. (#13446) * Consolidate helper functions for working with proto3 time messages (#13905) * Add vtsql flags to vtadmin (#13674) * MoveTables: add flag to specify that routing rules should not be created when a movetables workflow is created (#13895) * [staticcheck] miscellaneous tidying (#13892) * [misc] tidy imports (#13885) * Remove duplicate ACL check in tabletserver handleHTTPConsolidations (#13876) * vtexplain: Fix passing through context for cleanup (#13900) * [staticcheck] Cleanup deprecations (#13898) * inputs method to return additional information about the input primitive (#13883) * vttablet: do not notify `vtgate` about internal tables (#13897) * Skip launchable if the Pull Request is marked as a Draft (#13886) * vtctldclient: support OnlineDDL `complete`, `launch` commands (#13896) * collations: implement collation dumping as a docker image (#13879) * sqlparser: Tablespace option is case sensitive (#13884) * Clean up deprecated slice header usage and unused code (#13880) * [misc] Delete more unused functions, tidy up dupe imports (#13878) * collations: Refactor to separate basic collation information from data (#13868) * [wrangler] cleanup unused functions (#13867) * Fix setup order to avoid races (#13871) * Add Foreign key verify constraint engine primitive (#13848) * Foreign key cascade planning for DELETE and UPDATE queries (#13823) * Fix merge conflict with new tests (#13869) * vtctldclient OnlineDDL CANCEL (#13860) * Add leak checking for vtgate tests (#13835) * Fix for "text type with an unknown/unsupported collation cannot be hashed" error (#13852) * Go 1.21 cleanups (#13862) * VTGate Buffering: Use a more accurate heuristic for determining if we're doing a reshard (#13856) * docker/bootstrap: remove --no-cache flag (#13785) * Flakes: Improve reliability of vreplication_copy_parallel test (#13857) * [main] Upgrade the Golang version to `go1.21.0` (#13853) * Fix regular expression issue in Golang Upgrade and remove `release-14.0` from target branch (#13846) * Migrates most workflows to 4 and 16 cores Large GitHub-Hosted-Runners (#13845) * [onlineddl] retry and cleanup (#13830) * Bump word-wrap from 1.2.3 to 1.2.4 in /web/vtadmin (#13569) * Bump tough-cookie from 4.1.2 to 4.1.3 in /web/vtadmin (#13767) * Skip VTAdmin build in Docker tests (#13836) * Flakes: Synchronize access to logErrStacks in vterrors (#13827) * Flakes: VReplication unit tests: reduce goroutine leakage (#13824) * Remove explicit usage of etcd v2 (api and storage) (#13791) * Add OnlineDDL show support (#13738) * VReplication: Improve MoveTables Create Error Handling (#13737) * Add 2 new metrics with tablet type labels (#13521) * Add 2 more durability policies that allow RDONLY tablets to send semi-sync ACKs (#13698) * CI: Misc test improvements to limit failures with various runners (#13825) * Flakes: empty vtdataroot before starting a new vreplication e2e test (#13803) * Fix `BackupShard` to get its options from its own flags (#13813) * Flakes: skip flaky check that ETA for a VReplication VDiff2 Progress command is in the future. (#13804) * Copy release notes for v17.0.2 and v16.0.4 (#13811) * More union merging (#13743) * Add Foreign key Cascade engine primitive (#13802) * Add support for tuple as value type (#13800) * Foreign Keys: `UPDATE` planning (#13762) * Improving random query generation for endtoend testing (#13460) * Flakes: Delete VTDATAROOT files in reparent test teardown within CI (#13793) * vtgate: fix race condition iterating tables and views from schema tracker (#13673) * Fixing `backup_pitr` flaky tests via wait-for loop on topo reads (#13781) * Do not drain tablet in incremental backup (#13773) * Address vttablet memory usage with backups to Azure Blob Service (#13770) * Run auto golang upgrade only on vitessio/vitess (#13766) * Flakes: remove non-determinism from vtctldclient MoveTables unit test (#13765) * Minor --initialize-target-sequences followups (#13758) * CI: fix onlineddl_scheduler flakiness (#13754) * VReplication: Initialize Sequence Tables Used By Tables Being Moved (#13656) * Bump docker images to `bullseye` (#13664) * Use NodeJS v18 in VTAdmin Dockerfile (#13751) * Refactor Expression and Statement Simplifier (#13636) * Foreign Keys: `DELETE` planning (#13746) * build: Allow passing in custom -ldflags (#13748) * Point in time recovery: fix cross-tablet GTID evaluation (#13555) * Cache info schema table info (#13724) * BackupShard: support incremental backup (#13522) * OnlineDDL: support @@migration_context in vtgate session. Use if non-empty (#13675) * schemadiff: add time measure test for massive schema load and diff (#13697) * Vtgate: pass 'SHOW VITESS_MIGRATIONS' to tablet's query executor (#13726) * Move UNION planning to the operators (#13450) * Fix vtcombo DBDDL plugin race condition (#13117) * Foreign Keys: `INSERT` planning (#13676) * go/vt/vitessdriver: implement driver.{Connector,DriverContext} (#13704) * Backup: safe compressor/decompressor closure (#13668) * sqlparser: Track if original default value is a literal (#13730) * Solve RevertMigration.Comment read/write concurrency issue (#13700) * `ApplySchema`: support `--batch-size` flag in 'direct' strategy (#13693) * Fix closed channel `panic` in Online DDL cutover (#13729) * mysql: Refactor dependencies (#13688) * vtgate tablet gateway buffering: don't shutdown if not initialized (#13695) * [OnlineDDL] add label so break works as intended (#13691) * fastparse: Fix bug in overflow detection (#13702) * Refactor code to remove `evalengine` as a dependency of `VTOrc` (#13642) * `txthrottler`: remove `txThrottlerConfig` struct, rely on `tabletenv` (#13624) * [vtctldclient] flags need to be defined to be deprecated (#13681) * Add dry-run/monitoring-only mode for TxThrottler (#13604) * Enhancing VTGate buffering for MoveTables and Shard by Shard Migration (#13507) * Improvements to PRS (#13623) * Errant GTID Metrics Refactor (#13670) * Improve logging and renaming PrimaryTermStartTimestamp in vttablets (#13625) * Fix a couple of logs in VTOrc (#13667) * Throttler: exempt apps via `UpdateThrottlerConfig --throttle-app-exempt` (#13666) * Tablet throttler: inter-checks via gRPC (#13514) * Reroute 'ALTER VITESS_MIGRATION ... THROTTLE ...' through topo (#13511) * Build foreign key definition in schema tracker (#13657) * evalengine: Fix JSON weight string computation (#13669) * Throttler: verify deprecated flags are still allowed (#13615) * Backup & Restore: vtctldclient to support PITR flags (#13513) * `UpdateThrottlerConfig --unthrottle-app ...` (#13494) * tx throttler: healthcheck all cells if `--tx-throttler-healthcheck-cells` is undefined (#12477) * evalengine: Improve weight string support (#13658) * mysqlctl: Reduce logging for running commands (#13659) * Add v15.0.4, v16.0.3, and v17.0.1 changelogs (#13661) * [viper WatchConfig] platform-specific write to ensure callback fires exactly once (#13627) * go/mysql: switch to new API for x/exp/slices.SortFunc (#13644) * Per workload TxThrottler metrics (#13526) * VReplication: Make Source Tablet Selection More Robust (#13582) * Update known issues in `v16.x` and `v17.0.0` (#13618) * Augment VTOrc to also store the shard records and use it to better judge Primary recoveries (#13587) * icuregex: Lazy load ICU data into memory (#13640) * Ensure to call `servenv.Init` when needed (#13638) * GetSchema: limit concurrent operations (#13617) * check keyspace snapshot time if none specified for backup restores (#13557) * vtgate buffering logic: remove the deprecated healthcheck based implementation (#13584) * Fix type comparisons for Nullsafe* functions (#13605) * Reintroduce `TestReadOutdatedInstanceKeys` with debugging information (#13562) * mysqlctl: Remove noisy log line (#13599) * Vtctldclient MoveTables (#13015) * txthrottler: add metrics for topoWatcher and healthCheckStreamer (#13153) * Reduce usages of old horizon planning fallback (#13595) * Throttler: reintroduce deprecated flags so that deprecation actually works (#13597) * Incremental backup & recovery: restore-to-timestamp (#13270) * Fix potential panics due to "Fail in goroutine after test completed" (#13596) * ignore all error for views in engine reload (#13590) * vtgate table schema tracking to use GetSchema rpc (#13544) * MoveTables sequence e2e tests: change terminology to use basic vs simple everywhere for partial movetables workflows (#13435) * Skip VTAdmin build in more places (#13588) * Fix show character set (#13565) * Remove unused chromedriver (#13573) * fix TestQueryTimeoutWithTables flaky test (#13579) * CI: Fix make build related issues (#13583) * docker/mini: remove refs to orc configs (#13495) * stats: use *time.Ticker instead of time.After() (#13492) * VReplication: Ensure ROW events are sent within a transaction (#13547) * Add a `keyspace` configuration in the `vschema` for foreign key mode (#13553) * vreplication: Move to use collations package (#13566) * Flaky tests: Fix race in memory topo (#13559) * Fix flaky vtgate test TestInconsistentStateDetectedBuffering (#13560) * Flaky tests: Fix wrangler tests (#13568) * Optimize `make build` in `test.go` and in CI (#13567) * Unset the PREFIX environment variable when building VTAdmin (#13554) * vtctldclient: Add missing new backup option (#13543) * Skip flaky test `TestReadOutdatedInstanceKeys` (#13561) * Fix a number of encoding issues when evaluating expressions with the evalengine (#13509) * Deflake `TestPlannedReparentShardPromoteReplicaFail` (#13548) * [vipersync] deflake TestWatchConfig (#13545) * vtgate v3 planner removal (#13458) * Fix flakiness in VTOrc tests (#13489) * Replace deprecated `github.com/golang/mock` with `go.uber.org/mock` (#13512) * Fix dependencies in docker build script (#13520) * fix docgen for subcommands (#13518) * Merge pull request #13515 from planetscale/partial-movetables-traffic-status * Correct unit test * docker/k8s: add bookworm builds (#13436) * ignore ongoing backfill vindex from routing selection (#13505) * Adjust function names for improved clarity * More fixes related to partial traffic handling * Ignore unrelated shards in partial movetables workflow status * Better handling of vreplication setState() failure (#13488) * flags: Remove hardcoded runner paths (#13482) * viperutil: Remove potential cross site reflecting issue (#13483) * skip flaky test (#13501) * feat: remove --disable_active_reparents flag in vttablet-up.sh (#13504) * fix: error.as method usage to send pointer to the reference type expected. (#13496) * Fix `TestGatewayBufferingWhileReparenting` flakiness (#13469) * ApplySchema: deprecate '--allow_long_unavailability' flag (#10717) * Incremental backup: accept GTID position without 'MySQL56/' flavor prefix (#13474) * Deprecating and removing tablet throttler CLI flags and tests (#13246) * Online DDL: improved row estimation via ANALYE TABLE with --analyze-table strategy flag (#13352) * Tablet throttler: throttled app configuration via `vtctl UpdateThrottlerConfig` (#13351) * added no-commit-collection option to launchable record build command (#13490) * Fix remote VersionString API (#13484) * Fix logging by omitting the host and port in `SetReadOnly` (#13470) * `vtctl OnlineDDL`: complete command set (#12963) * Random selection of keyspace based on available tablet (#13359) * Improve and Fix Distinct Aggregation planner (#13466) * Fix ubi8.arm64.mysql80 build package mirrorserver error (#13431) * backup: Allow for upgrade safe backups (#13449) * Merge pull request #13468 from timvaillancourt/examples-compose-fix-consul-tag * Update a number of dependencies (#13031) * Enable Tcp keep alive and provide keep alive period setting (#13434) * rm mistaken commit * Update docker-compose.beginners.yml too * compose: fix `consul:latest` error * Add support for kill statement (#13371) * feat: remove excessive logging (#13459) * schema.Reload(): ignore column reading errors for views only, error for tables (#13442) * [CI] deflake viper sync tests (#13185) * mysql: introduce icuregex package (#13391) * Fix `Fakemysqldaemon` to store the host and port after `SetReplicationSource` call (#13439) * Examples: only terminate vtadmin if it was started (#13433) * [main] Upgrade-Downgrade Fix: Schema-initialization stuck on semi-sync ACKs while upgrading (#13411) (#13440) * Move more horizon planning to the operators (#13412) * Refactor backup_pitr into two distinct CI tests: builtin vs Xtrabackup (#13395) * Improve VTOrc logging statements, now that we have alias as a field (#13428) * vtorc: Cleanup more unused code (#13354) * VReplication Workflows: make sequence tables follow routing rules (#13238) * Ignore error while reading table data in Schema.Engine reload (#13421) * Improve time taken to run the examples by optimizing `vtadmin` build (#13262) * vtctl,vindexes: logs warnings and export stat for unknown vindex params (#13322) * Add end-of-life documentation + re-organize internal documentation (#13401) * Adding random query generation for endtoend testing of the Gen4 planner (#13260) * Aggregation engine refactor (#13378) * Deflake `TestQueryTimeoutWithDual` test (#13405) * Optimize release notes generation to use GitHub Milestones (#13398) * feat: don't run any reparent commands if the host is empty (#13396) * `vttestserver`: persist vschema changes in `--persistent_mode` (#13065) * Tablet throttler: only start watching SrvKeyspace once it's confirmed to exist (#13384) * Support views in BaseShowTablesWithSizes for MySQL 8.0 (#13394) * Fix incorrect output in release scripts (#13385) * Adds support for ANY_VALUE (#13342) * BaseShowTablesWithSizes: optimize MySQL 8.0 query (#13375) * Forward port of release notes changes from v17.0.0 GA (#13370) * feat: add timestamp to vtorc debug page (#13379) * Prevent resetting replication every time we set replication source (#13377) * Local example 101: idempotent on existing clusters (#13373) * governance: clean up language, link steering doc from first occurrence instead of from a random occurrence (#13337) * Add group_concat aggregation support (#13331) * Improve VTOrc failure detection to be able to better handle dead primary failures (#13190) * Improve lock action string (#13355) * Add RestorePosition and RestoredBackupTime as metrics to vttablet (#13339) * update link for reparenting guide (#13350) * Handle inconsistent state error in query buffering (#13333) * mysqlctl: Move more to use built in MySQL client (#13338) * VTOrc: Update the primary key for all the tables from `hostname, port` to `alias` (#13243) * docker/k8s: Cleanup done TODO (#13347) * bug: don't always wrap aggregation in coalesce (#13348) * Vttablet schema tracking: Fix _vt.schema_version corruption (#13045) * bugfixes: collection of fixes to bugs found while fuzzing (#13332) * Remove CI endtoend test for VReplication copy throttling (#13343) * Use sqlparser for all dynamic query building in VDiff2 (#13319) * txthrottler: verify config at vttablet startup, consolidate funcs (#13115) * mysqlctl: Use DBA connection for schema operations (#13178) * Bug fix: SQL queries erroring with message `unknown aggregation random` (#13330) * Support complex aggregation in Gen4's Operators (#13326) * [VTAdmin] Upgrade to use node 18.16.0 (#13288) * Cleanup unused Dockerfile entries (#13327) * Add metric for showing the errant GTIDs in VTOrc (#13281) * vtgate planner: HAVING in the new operator horizon planner (#13289) * vtgr: Remove deprecated vtgr (#13308) * mysqlctl: Correctly encode database and table names (#13312) * sqlparser: Add support for TIMESTAMPADD (#13314) * Refactor and add a comment to schema initialisation code (#13309) * Release notes for 17.0.0-rc2 (#13306) * remove os.Exit (#13310) * k8stopo: Remove the deprecated Kubernetes topo (#13303) * vindexes: return unknown params (#12951) * Operator planner refactor (#13294) * Deprecate VTGR (#13301) * k8stopo: Include deprecation warning (#13299) * [main] Upgrade the Golang version to `go1.20.5` (#13256) * evalengine: implement date/time math (#13274) * increase size of reparent_journal columns (#13287) * VReplication: Fix VDiff2 DeleteByUUID Query (#13255) * Miscellaneous code modifications based on observations made while doing a code walkthrough (#12873) * Set the number of threads for release notes generation with a flag (#13273) * Fix and Make aggregation planner handle aggregation functions better (#13228) * fix: ShardedRouting clone to clone slice of reference correctly (#13265) * Remove viper warnings from local examples (#13234) * Add flag to VTOrc to enable/disable its ability to run ERS (#13259) * Remove `out.txt` and add `release-17.0` to go upgrade automation (#13261) * build(deps-dev): bump vite from 4.2.1 to 4.2.3 in /web/vtadmin (#13240) * Copy v17.0.0-rc changelog to main (#13248) * fix: GetField to use existing session for query (#13219) * Fix flakiness in `TestDeadPrimaryRecoversImmediately` (#13232) * Augmenting the `GetSchema` RPC to also work for `Table` and `All` type of input (#13197) * Incremental backup and point in time recovery for XtraBackup (#13156) * VReplication: More intelligently manage vschema table entries on unsharded targets (#13220) * Use $hostname in vtadmin script as other scripts do (#13231) * Tablet throttler: throttler-config-via-topo defaults 'true', deprecation message for old flags (#13130) * [ci] add generator for templated flag testdata (#13150) * schemadiff: validating case-sensitive view names (#13208) * Add security audit report (#13221) * gentler warning message on config-not-found (#13215) * Bump the vitess version on main (#13212) * Handle DISTINCT with the new operators (#13201) * Gen4: move insert planner to gen4 (#12934) --- .github/CODEOWNERS | 3 +- .github/workflows/assign_milestone.yml | 2 +- .github/workflows/auto_approve_pr.yml | 23 + .../check_make_vtadmin_authz_testgen.yml | 2 +- .../check_make_vtadmin_web_proto.yml | 4 +- .github/workflows/cluster_endtoend_12.yml | 25 +- .github/workflows/cluster_endtoend_13.yml | 25 +- .github/workflows/cluster_endtoend_15.yml | 25 +- .github/workflows/cluster_endtoend_18.yml | 25 +- .github/workflows/cluster_endtoend_21.yml | 25 +- .github/workflows/cluster_endtoend_22.yml | 25 +- .../cluster_endtoend_backup_pitr.yml | 25 +- .../cluster_endtoend_backup_pitr_mysql57.yml | 25 +- ...luster_endtoend_backup_pitr_xtrabackup.yml | 151 + ...ndtoend_backup_pitr_xtrabackup_mysql57.yml | 175 + ...ter_endtoend_ers_prs_newfeatures_heavy.yml | 25 +- .../workflows/cluster_endtoend_mysql80.yml | 25 +- .../cluster_endtoend_mysql_server_vault.yml | 25 +- .../cluster_endtoend_onlineddl_ghost.yml | 25 +- ...uster_endtoend_onlineddl_ghost_mysql57.yml | 25 +- .../cluster_endtoend_onlineddl_revert.yml | 25 +- ...ster_endtoend_onlineddl_revert_mysql57.yml | 25 +- .../cluster_endtoend_onlineddl_scheduler.yml | 25 +- ...r_endtoend_onlineddl_scheduler_mysql57.yml | 25 +- .../cluster_endtoend_onlineddl_vrepl.yml | 25 +- ...uster_endtoend_onlineddl_vrepl_mysql57.yml | 25 +- ...luster_endtoend_onlineddl_vrepl_stress.yml | 25 +- ...ndtoend_onlineddl_vrepl_stress_mysql57.yml | 25 +- ..._endtoend_onlineddl_vrepl_stress_suite.yml | 25 +- ...d_onlineddl_vrepl_stress_suite_mysql57.yml | 25 +- ...cluster_endtoend_onlineddl_vrepl_suite.yml | 25 +- ...endtoend_onlineddl_vrepl_suite_mysql57.yml | 25 +- .../cluster_endtoend_schemadiff_vrepl.yml | 25 +- ...ster_endtoend_schemadiff_vrepl_mysql57.yml | 25 +- .../cluster_endtoend_tabletmanager_consul.yml | 25 +- ...cluster_endtoend_tabletmanager_tablegc.yml | 25 +- ...endtoend_tabletmanager_tablegc_mysql57.yml | 25 +- ..._endtoend_tabletmanager_throttler_topo.yml | 25 +- ...cluster_endtoend_topo_connection_cache.yml | 25 +- ...dtoend_vreplication_across_db_versions.yml | 25 +- .../cluster_endtoend_vreplication_basic.yml | 25 +- ...luster_endtoend_vreplication_cellalias.yml | 25 +- ...vreplication_migrate_vdiff2_convert_tz.yml | 25 +- ...luster_endtoend_vreplication_multicell.yml | 25 +- ...vreplication_partial_movetables_basic.yml} | 59 +- ...plication_partial_movetables_sequences.yml | 170 + .../cluster_endtoend_vreplication_v2.yml | 25 +- .../cluster_endtoend_vstream_failover.yml | 25 +- ...r_endtoend_vstream_stoponreshard_false.yml | 25 +- ...er_endtoend_vstream_stoponreshard_true.yml | 25 +- ...dtoend_vstream_with_keyspaces_to_watch.yml | 25 +- .../workflows/cluster_endtoend_vtbackup.yml | 25 +- ..._vtctlbackup_sharded_clustertest_heavy.yml | 25 +- .../cluster_endtoend_vtgate_concurrentdml.yml | 25 +- ...ter_endtoend_vtgate_foreignkey_stress.yml} | 35 +- .../cluster_endtoend_vtgate_gen4.yml | 25 +- .../cluster_endtoend_vtgate_general_heavy.yml | 25 +- .../cluster_endtoend_vtgate_godriver.yml | 25 +- ...uster_endtoend_vtgate_partial_keyspace.yml | 25 +- .../cluster_endtoend_vtgate_queries.yml | 25 +- ...cluster_endtoend_vtgate_readafterwrite.yml | 25 +- .../cluster_endtoend_vtgate_reservedconn.yml | 25 +- .../cluster_endtoend_vtgate_schema.yml | 25 +- ...cluster_endtoend_vtgate_schema_tracker.yml | 25 +- ...dtoend_vtgate_tablet_healthcheck_cache.yml | 25 +- .../cluster_endtoend_vtgate_topo.yml | 25 +- .../cluster_endtoend_vtgate_topo_consul.yml | 25 +- .../cluster_endtoend_vtgate_topo_etcd.yml | 25 +- .../cluster_endtoend_vtgate_transaction.yml | 25 +- .../cluster_endtoend_vtgate_unsharded.yml | 25 +- .../cluster_endtoend_vtgate_vindex_heavy.yml | 25 +- .../cluster_endtoend_vtgate_vschema.yml | 25 +- .github/workflows/cluster_endtoend_vtorc.yml | 25 +- .../cluster_endtoend_vtorc_mysql57.yml | 25 +- .../cluster_endtoend_vttablet_prscomplex.yml | 25 +- .../workflows/cluster_endtoend_xb_backup.yml | 27 +- .../cluster_endtoend_xb_backup_mysql57.yml | 25 +- .../cluster_endtoend_xb_recovery.yml | 27 +- .../cluster_endtoend_xb_recovery_mysql57.yml | 25 +- .github/workflows/codeql_analysis.yml | 4 +- .github/workflows/create_release.yml | 7 +- .github/workflows/docker_test_cluster_10.yml | 4 +- .github/workflows/docker_test_cluster_25.yml | 4 +- .github/workflows/e2e_race.yml | 6 +- .github/workflows/endtoend.yml | 10 +- .github/workflows/local_example.yml | 13 +- .github/workflows/region_example.yml | 13 +- .github/workflows/static_checks_etc.yml | 10 +- .github/workflows/unit_race.yml | 9 +- .github/workflows/unit_test_mysql57.yml | 29 +- .github/workflows/unit_test_mysql80.yml | 29 +- .github/workflows/update_golang_version.yml | 7 +- .../upgrade_downgrade_test_backups_e2e.yml | 12 +- ...owngrade_test_backups_e2e_next_release.yml | 12 +- .../upgrade_downgrade_test_backups_manual.yml | 12 +- ...grade_test_backups_manual_next_release.yml | 12 +- ...e_downgrade_test_query_serving_queries.yml | 12 +- ...est_query_serving_queries_next_release.yml | 12 +- ...de_downgrade_test_query_serving_schema.yml | 12 +- ...test_query_serving_schema_next_release.yml | 12 +- ...rade_downgrade_test_reparent_new_vtctl.yml | 12 +- ...e_downgrade_test_reparent_new_vttablet.yml | 12 +- ...rade_downgrade_test_reparent_old_vtctl.yml | 12 +- ...e_downgrade_test_reparent_old_vttablet.yml | 12 +- .github/workflows/vtadmin_web_build.yml | 4 +- .github/workflows/vtadmin_web_lint.yml | 2 +- .github/workflows/vtadmin_web_unit_tests.yml | 4 +- .golangci.yml | 4 +- GOVERNANCE.md | 8 +- Makefile | 85 +- bootstrap.sh | 69 - build.env | 2 +- changelog/15.0/15.0.2/summary.md | 2 +- changelog/15.0/15.0.4/changelog.md | 61 + changelog/15.0/15.0.4/release_notes.md | 7 + .../15.0/15.0.4/summary.md | 0 changelog/15.0/README.md | 4 + changelog/16.0/16.0.3/changelog.md | 67 + changelog/16.0/16.0.3/release_notes.md | 7 + changelog/16.0/16.0.3/summary.md | 0 changelog/16.0/16.0.4/changelog.md | 24 + changelog/16.0/16.0.4/release_notes.md | 7 + changelog/16.0/16.0.4/summary.md | 0 changelog/16.0/README.md | 8 + changelog/18.0/18.0.0/changelog.md | 529 + changelog/18.0/18.0.0/release_notes.md | 326 + changelog/18.0/18.0.0/summary.md | 318 + changelog/18.0/README.md | 4 + changelog/README.md | 1 + config/user.json | 2 +- dev.env | 3 - doc/VIT-03-report-security-audit.pdf | Bin 0 -> 1157423 bytes doc/design-docs/VTGateBuffering.md | 63 + doc/internal/{Overview.md => README.md} | 4 +- .../{ => release}/.images/post-release-01.png | Bin .../{ => release}/.images/release-01.png | Bin .../{ => release}/.images/release-02.png | Bin .../{ => release}/.images/release-03.png | Bin .../{ => release}/.images/release-04.png | Bin doc/internal/release/README.md | 13 + doc/internal/release/docker-images.md | 3 + doc/internal/release/eol-process.md | 12 + .../how-to-release.md} | 258 +- doc/internal/release/java-packages.md | 27 + doc/internal/release/release-branches.md | 17 + doc/internal/release/release-tags.md | 6 + doc/internal/release/versioning.md | 36 + doc/vtadmin/clusters.yaml | 11 +- docker/base/Dockerfile | 2 +- docker/base/Dockerfile.mysql57 | 2 +- docker/base/Dockerfile.percona57 | 2 +- docker/base/Dockerfile.percona80 | 2 +- docker/bootstrap/CHANGELOG.md | 26 +- docker/bootstrap/Dockerfile.common | 4 +- docker/bootstrap/Dockerfile.mysql57-arm64v8 | 65 - docker/bootstrap/Dockerfile.mysql80 | 4 +- docker/bootstrap/Dockerfile.percona57 | 7 +- docker/bootstrap/Dockerfile.percona80 | 4 +- docker/bootstrap/build.sh | 13 +- docker/k8s/Dockerfile | 9 +- docker/k8s/orchestrator/Dockerfile | 38 - docker/k8s/vtadmin/Dockerfile | 2 +- docker/k8s/{pmm-client => vtorc}/Dockerfile | 31 +- docker/k8s/vttablet/Dockerfile | 8 - docker/lite/Dockerfile.mysql57 | 2 +- docker/lite/Dockerfile.mysql80 | 2 +- docker/lite/Dockerfile.percona57 | 4 +- docker/lite/Dockerfile.percona80 | 4 +- docker/lite/Dockerfile.testing | 4 +- docker/lite/Dockerfile.ubi7.mysql57 | 3 +- docker/lite/Dockerfile.ubi7.mysql80 | 3 +- docker/lite/Dockerfile.ubi7.percona57 | 2 +- docker/lite/Dockerfile.ubi7.percona80 | 2 +- docker/lite/Dockerfile.ubi8.arm64.mysql80 | 5 +- docker/lite/Dockerfile.ubi8.mysql80 | 3 +- docker/lite/install_dependencies.sh | 45 +- docker/local/Dockerfile | 3 +- docker/local/run.sh | 2 +- docker/mini/Dockerfile | 4 - docker/mini/orchestrator-up.sh | 19 - .../mini/orchestrator-vitess-mini.conf.json | 65 - docker/mini/vttablet-mini-up.sh | 1 - docker/orchestrator/Dockerfile | 38 - docker/orchestrator/build.sh | 37 - docker/orchestrator/orchestrator.conf.json | 114 - docker/release.sh | 11 +- docker/vttestserver/Dockerfile.mysql57 | 4 +- docker/vttestserver/Dockerfile.mysql80 | 4 +- examples/backups/restart_tablets.sh | 22 +- examples/backups/start_cluster.sh | 10 +- examples/backups/stop_tablets.sh | 4 +- examples/backups/take_backups.sh | 2 +- examples/backups/upgrade_cluster.sh | 6 +- examples/common/env.sh | 16 +- examples/common/lib/utils.sh | 33 +- examples/common/scripts/etcd-up.sh | 4 +- examples/common/scripts/k3s-down.sh | 28 - examples/common/scripts/k3s-up.sh | 57 - examples/common/scripts/mysqlctl-up.sh | 2 + examples/common/scripts/vtadmin-up.sh | 15 + examples/common/scripts/vtgate-up.sh | 4 +- examples/common/scripts/vtorc-up.sh | 1 + examples/common/scripts/vttablet-up.sh | 7 +- examples/compose/README.md | 12 +- examples/compose/client.go | 1 - examples/compose/docker-compose.beginners.yml | 20 +- examples/compose/docker-compose.yml | 26 +- examples/compose/externaldb_vreplication.sh | 28 +- examples/compose/lvtctl.sh | 4 +- examples/compose/schemaload.sh | 12 +- .../compose/vtcompose/docker-compose.test.yml | 26 +- examples/compose/vtcompose/vtcompose.go | 20 +- examples/compose/vttablet-up.sh | 3 +- examples/local/101_initial_cluster.sh | 19 +- examples/local/202_move_tables.sh | 5 +- examples/local/203_switch_reads.sh | 2 +- examples/local/204_switch_writes.sh | 2 +- examples/local/205_clean_commerce.sh | 3 +- examples/local/301_customer_sharded.sh | 1 + examples/local/303_reshard.sh | 6 +- examples/local/304_switch_reads.sh | 2 +- examples/local/305_switch_writes.sh | 3 +- examples/local/306_down_shard_0.sh | 2 +- examples/local/401_teardown.sh | 11 +- examples/local/README.md | 23 +- examples/local/vschema_customer_sharded.json | 26 +- examples/operator/101_initial_cluster.yaml | 14 +- examples/operator/201_customer_tablets.yaml | 14 +- examples/operator/302_new_shards.yaml | 14 +- examples/operator/306_down_shard_0.yaml | 14 +- examples/operator/README.md | 36 +- examples/operator/operator.yaml | 2 +- examples/operator/pf.sh | 1 - .../region_sharding/101_initial_cluster.sh | 2 - examples/region_sharding/201_main_sharded.sh | 6 +- examples/region_sharding/203_reshard.sh | 2 +- examples/region_sharding/204_switch_reads.sh | 2 +- examples/region_sharding/205_switch_writes.sh | 2 +- examples/region_sharding/301_teardown.sh | 4 +- go.mod | 136 +- go.sum | 563 +- go/cache/cache.go | 88 - go/cache/cache_test.go | 47 - go/cache/lru_cache.go | 6 +- go/cache/null.go | 73 - go/cache/ristretto.go | 28 - go/cache/ristretto/bloom/bbloom.go | 151 - go/cache/ristretto/bloom/bbloom_test.go | 86 - go/cache/ristretto/cache.go | 697 - go/cache/ristretto/cache_test.go | 690 - go/cache/ristretto/policy.go | 423 - go/cache/ristretto/policy_test.go | 276 - go/cache/ristretto/ring.go | 92 - go/cache/ristretto/ring_test.go | 87 - go/cache/ristretto/sketch.go | 156 - go/cache/ristretto/sketch_test.go | 104 - go/cache/ristretto/store.go | 240 - go/cache/ristretto/store_test.go | 224 - go/cache/theine/LICENSE | 21 + go/cache/theine/bf/bf.go | 116 + go/cache/theine/bf/bf_test.go | 24 + go/cache/theine/entry.go | 93 + go/cache/theine/list.go | 205 + go/cache/theine/list_test.go | 91 + go/cache/theine/mpsc.go | 86 + .../{perf_test.go => theine/mpsc_test.go} | 40 +- go/cache/theine/singleflight.go | 196 + go/cache/theine/singleflight_test.go | 211 + go/cache/theine/sketch.go | 137 + go/cache/theine/sketch_test.go | 54 + go/cache/theine/slru.go | 79 + go/cache/theine/store.go | 615 + go/cache/theine/store_test.go | 82 + go/cache/theine/tlfu.go | 197 + go/cache/theine/tlfu_test.go | 156 + go/cmd/internal/docgen/docgen.go | 162 +- go/cmd/mysqlctl/command/init.go | 71 + go/cmd/mysqlctl/command/init_config.go | 57 + .../{ => command}/plugin_prometheusbackend.go | 2 +- go/cmd/mysqlctl/command/position.go | 74 + go/cmd/mysqlctl/command/reinit_config.go | 58 + go/cmd/mysqlctl/command/root.go | 77 + go/cmd/mysqlctl/command/shutdown.go | 66 + go/cmd/mysqlctl/command/start.go | 67 + go/cmd/mysqlctl/command/teardown.go | 70 + go/cmd/mysqlctl/docgen/main.go | 37 + go/cmd/mysqlctl/mysqlctl.go | 262 +- go/cmd/mysqlctld/cli/mysqlctld.go | 178 + .../{ => cli}/plugin_grpcmysqlctlserver.go | 2 +- .../{ => cli}/plugin_prometheusbackend.go | 2 +- go/cmd/mysqlctld/docgen/main.go | 37 + go/cmd/mysqlctld/mysqlctld.go | 134 +- go/cmd/query_analyzer/query_analyzer.go | 149 - .../cli}/plugin_consultopo.go | 2 +- .../cli}/plugin_etcd2topo.go | 2 +- go/cmd/topo2topo/{ => cli}/plugin_zk2topo.go | 2 +- go/cmd/topo2topo/cli/topo2topo.go | 158 + go/cmd/topo2topo/docgen/main.go | 37 + go/cmd/topo2topo/plugin_kubernetestopo.go | 23 - go/cmd/topo2topo/topo2topo.go | 123 +- go/cmd/vtaclcheck/cli/vtactlcheck.go | 67 + go/cmd/vtaclcheck/docgen/main.go | 37 + go/cmd/vtaclcheck/vtaclcheck.go | 37 +- .../cli}/plugin_azblobbackupstorage.go | 2 +- .../{ => cli}/plugin_cephbackupstorage.go | 2 +- .../vtbackup/{ => cli}/plugin_consultopo.go | 2 +- go/cmd/vtbackup/{ => cli}/plugin_etcd2topo.go | 2 +- .../{ => cli}/plugin_filebackupstorage.go | 2 +- .../{ => cli}/plugin_gcsbackupstorage.go | 2 +- .../vtbackup/cli/plugin_opentsdb.go} | 13 +- .../{ => cli}/plugin_prometheusbackend.go | 2 +- .../{ => cli}/plugin_s3backupstorage.go | 2 +- go/cmd/vtbackup/{ => cli}/plugin_zk2topo.go | 2 +- go/cmd/vtbackup/cli/vtbackup.go | 875 + go/cmd/vtbackup/docgen/main.go | 37 + go/cmd/vtbackup/vtbackup.go | 750 +- go/cmd/vtbench/cli/vtbench.go | 246 + go/cmd/vtbench/docgen/main.go | 37 + go/cmd/vtbench/vtbench.go | 185 +- .../vtclient/{ => cli}/plugin_opentracing.go | 3 +- go/cmd/vtclient/cli/vtclient.go | 431 + go/cmd/vtclient/{ => cli}/vtclient_test.go | 17 +- go/cmd/vtclient/docgen/main.go | 37 + go/cmd/vtclient/vtclient.go | 432 +- go/cmd/vtcombo/cli/main.go | 358 + go/cmd/vtcombo/{ => cli}/plugin_dbddl.go | 9 +- .../{ => cli}/plugin_grpcvtctldserver.go | 2 +- .../cli}/plugin_grpcvtctlserver.go | 2 +- .../{ => cli}/plugin_grpcvtgateservice.go | 2 +- .../vtcombo/{ => cli}/plugin_opentracing.go | 2 +- go/cmd/vtcombo/{ => cli}/status.go | 4 +- go/cmd/vtcombo/cli/vschema_watcher.go | 117 + go/cmd/vtcombo/docgen/main.go | 37 + go/cmd/vtcombo/main.go | 321 +- go/cmd/vtctld/cli/cli.go | 89 + .../cli}/plugin_azblobbackupstorage.go | 2 +- .../{ => cli}/plugin_cephbackupstorage.go | 2 +- go/cmd/vtctld/{ => cli}/plugin_consultopo.go | 2 +- go/cmd/vtctld/{ => cli}/plugin_etcd2topo.go | 2 +- .../cli}/plugin_filebackupstorage.go | 2 +- .../{ => cli}/plugin_gcsbackupstorage.go | 2 +- .../vtctld/{ => cli}/plugin_grpctabletconn.go | 2 +- .../cli}/plugin_grpctmclient.go | 2 +- .../{ => cli}/plugin_grpcvtctldserver.go | 2 +- .../cli}/plugin_grpcvtctlserver.go | 2 +- .../vtctld/{ => cli}/plugin_grpcvtgateconn.go | 2 +- go/cmd/vtctld/{ => cli}/plugin_opentracing.go | 3 +- go/cmd/vtctld/{ => cli}/plugin_opentsdb.go | 2 +- .../{ => cli}/plugin_prometheusbackend.go | 2 +- .../{ => cli}/plugin_s3backupstorage.go | 2 +- go/cmd/vtctld/{ => cli}/plugin_zk2topo.go | 2 +- go/cmd/vtctld/{ => cli}/schema.go | 21 +- go/cmd/vtctld/docgen/main.go | 37 + go/cmd/vtctld/main.go | 48 +- go/cmd/vtctldclient/cli/awk.go | 4 +- go/cmd/vtctldclient/cli/json.go | 40 +- go/cmd/vtctldclient/cli/shards.go | 6 +- go/cmd/vtctldclient/command/backups.go | 66 +- go/cmd/vtctldclient/command/keyspaces.go | 12 +- go/cmd/vtctldclient/command/onlineddl.go | 404 + go/cmd/vtctldclient/command/reparents.go | 3 + go/cmd/vtctldclient/command/root.go | 36 +- go/cmd/vtctldclient/command/schema.go | 29 +- go/cmd/vtctldclient/command/throttler.go | 35 +- .../command/vreplication/common/cancel.go | 83 + .../command/vreplication/common/complete.go | 75 + .../command/vreplication/common/show.go | 68 + .../command/vreplication/common/status.go | 63 + .../vreplication/common/switchtraffic.go | 129 + .../command/vreplication/common/update.go | 170 + .../command/vreplication/common/utils.go | 245 + .../command/vreplication/common/utils_test.go | 153 + .../vreplication/lookupvindex/lookupvindex.go | 321 + .../vreplication/materialize/create.go | 189 + .../vreplication/materialize/materialize.go | 64 + .../command/vreplication/migrate/migrate.go | 134 + .../command/vreplication/mount/mount.go | 183 + .../command/vreplication/movetables/create.go | 122 + .../vreplication/movetables/movetables.go | 85 + .../command/vreplication/reshard/create.go | 94 + .../command/vreplication/reshard/reshard.go | 65 + .../command/vreplication/vdiff/vdiff.go | 887 + .../vreplication/vdiff/vdiff_env_test.go | 351 + .../command/vreplication/vdiff/vdiff_test.go | 530 + .../command/vreplication/workflow/delete.go | 76 + .../command/vreplication/workflow/get.go | 67 + .../command/vreplication/workflow/show.go | 85 + .../command/vreplication/workflow/state.go | 106 + .../command/vreplication/workflow/update.go | 135 + .../command/vreplication/workflow/workflow.go | 90 + go/cmd/vtctldclient/command/workflows.go | 193 - go/cmd/vtexplain/cli/vtexplain.go | 196 + go/cmd/vtexplain/docgen/main.go | 37 + go/cmd/vtexplain/vtexplain.go | 139 +- go/cmd/vtgate/cli/cli.go | 192 + .../{ => cli}/plugin_auth_clientcert.go | 2 +- go/cmd/vtgate/{ => cli}/plugin_auth_ldap.go | 2 +- go/cmd/vtgate/{ => cli}/plugin_auth_static.go | 2 +- go/cmd/vtgate/{ => cli}/plugin_auth_vault.go | 2 +- .../cli}/plugin_consultopo.go | 2 +- .../cli}/plugin_etcd2topo.go | 2 +- .../vtgate/{ => cli}/plugin_grpctabletconn.go | 2 +- .../{ => cli}/plugin_grpcvtgateservice.go | 2 +- go/cmd/vtgate/{ => cli}/plugin_opentracing.go | 2 +- go/cmd/vtgate/{ => cli}/plugin_opentsdb.go | 2 +- .../{ => cli}/plugin_prometheusbackend.go | 2 +- go/cmd/vtgate/cli/plugin_statsd.go | 23 + go/cmd/vtgate/{ => cli}/plugin_zk2topo.go | 2 +- go/cmd/vtgate/{ => cli}/status.go | 3 +- go/cmd/vtgate/docgen/main.go | 42 + go/cmd/vtgate/plugin_kubernetestopo.go | 23 - go/cmd/vtgate/plugin_statsd.go | 7 - go/cmd/vtgate/vtgate.go | 147 +- go/cmd/vtgateclienttest/cli/main.go | 64 + .../{ => cli}/plugin_grpcvtgateservice.go | 2 +- go/cmd/vtgateclienttest/docgen/main.go | 37 + go/cmd/vtgateclienttest/main.go | 38 +- go/cmd/vtgateclienttest/services/callerid.go | 14 +- go/cmd/vtgateclienttest/services/echo.go | 8 +- go/cmd/vtgateclienttest/services/errors.go | 8 +- go/cmd/vtgateclienttest/services/fallback.go | 10 +- go/cmd/vtgateclienttest/services/terminal.go | 7 +- go/cmd/vtgr/main.go | 51 - go/cmd/vtorc/cli/cli.go | 103 + .../cli}/plugin_consultopo.go | 2 +- .../{vtgate => vtorc/cli}/plugin_etcd2topo.go | 2 +- .../cli}/plugin_grpctmclient.go | 2 +- .../{ => cli}/plugin_prometheusbackend.go | 2 +- go/cmd/vtorc/{ => cli}/plugin_zk2topo.go | 2 +- go/cmd/vtorc/docgen/main.go | 37 + go/cmd/vtorc/main.go | 110 +- go/cmd/vtorc/main_test.go | 48 - go/cmd/vtorc/plugin_kubernetestopo.go | 23 - go/cmd/vttablet/cli/cli.go | 276 + .../{ => cli}/plugin_azblobbackupstorage.go | 2 +- .../{ => cli}/plugin_cephbackupstorage.go | 2 +- .../vttablet/{ => cli}/plugin_consultopo.go | 2 +- .../cli}/plugin_etcd2topo.go | 2 +- .../cli}/plugin_filebackupstorage.go | 2 +- .../{ => cli}/plugin_filecustomrule.go | 2 +- .../vttablet/{ => cli}/plugin_filelogger.go | 2 +- .../{ => cli}/plugin_gcsbackupstorage.go | 2 +- .../{ => cli}/plugin_grpcbinlogplayer.go | 2 +- .../{ => cli}/plugin_grpcbinlogstreamer.go | 2 +- .../{ => cli}/plugin_grpcqueryservice.go | 2 +- .../{ => cli}/plugin_grpctabletconn.go | 2 +- .../{ => cli}/plugin_grpcthrottlerserver.go | 2 +- .../vttablet/{ => cli}/plugin_grpctmclient.go | 2 +- .../vttablet/{ => cli}/plugin_grpctmserver.go | 2 +- .../vttablet/{ => cli}/plugin_opentracing.go | 2 +- go/cmd/vttablet/{ => cli}/plugin_opentsdb.go | 2 +- .../{ => cli}/plugin_prometheusbackend.go | 2 +- .../{ => cli}/plugin_s3backupstorage.go | 2 +- go/cmd/vttablet/cli/plugin_statsd.go | 22 + .../vttablet/{ => cli}/plugin_sysloglogger.go | 2 +- .../{ => cli}/plugin_topocustomrule.go | 2 +- go/cmd/vttablet/{ => cli}/plugin_zk2topo.go | 2 +- go/cmd/vttablet/{ => cli}/status.go | 5 +- go/cmd/vttablet/docgen/main.go | 37 + go/cmd/vttablet/plugin_kubernetestopo.go | 23 - go/cmd/vttablet/plugin_statsd.go | 7 - go/cmd/vttablet/vttablet.go | 200 +- .../v001__create_customer_table.sql | 0 .../v002__add_customer_vschema.sql | 0 .../data/schema/app_customer/vschema.json | 0 .../test_keyspace/v001__create_test_table.sql | 0 .../v002__create_hash_vindex.sql | 0 .../test_keyspace/v003__add_table_vschema.sql | 0 .../v004__create_test_table1.sql | 0 go/cmd/vttestserver/cli/main.go | 308 + .../main_test.go} | 62 +- go/cmd/vttestserver/docgen/main.go | 37 + go/cmd/vttestserver/main.go | 282 +- go/cmd/vttlstest/cli/vttlstest.go | 135 + go/cmd/vttlstest/docgen/main.go | 37 + go/cmd/vttlstest/vttlstest.go | 116 +- .../status.go => zk/command/add_auth.go} | 26 +- go/cmd/zk/command/cat.go | 103 + go/cmd/zk/command/chmod.go | 91 + go/cmd/zk/command/cp.go | 43 + go/cmd/zk/command/edit.go | 101 + go/cmd/zk/command/ls.go | 153 + go/cmd/zk/command/rm.go | 97 + go/cmd/zk/command/root.go | 66 + go/cmd/zk/command/stat.go | 88 + go/cmd/zk/command/touch.go | 93 + go/cmd/zk/command/unzip.go | 81 + go/cmd/zk/command/wait.go | 78 + go/cmd/zk/command/watch.go | 86 + go/cmd/zk/command/zip.go | 116 + go/cmd/zk/docgen/main.go | 37 + go/cmd/zk/internal/zkfilepath/zkfilepath.go | 75 + go/cmd/zk/internal/zkfs/zkfs.go | 174 + go/cmd/zk/zkcmd.go | 982 +- go/cmd/zkctl/command/init.go | 32 + go/cmd/zkctl/command/root.go | 63 + go/cmd/zkctl/command/shutdown.go | 32 + go/cmd/zkctl/command/start.go | 32 + go/cmd/zkctl/command/teardown.go | 32 + go/cmd/zkctl/docgen/main.go | 37 + go/cmd/zkctl/zkctl.go | 58 +- go/cmd/zkctld/cli/zkctld.go | 100 + go/cmd/zkctld/docgen/main.go | 37 + go/cmd/zkctld/zkctld.go | 67 +- go/constants/sidecar/name.go | 42 + go/constants/sidecar/queries.go | 55 + go/flags/endtoend/flags_test.go | 48 +- go/flags/endtoend/mysqlctl.txt | 38 +- go/flags/endtoend/mysqlctld.txt | 26 +- go/flags/endtoend/topo2topo.txt | 44 + go/flags/endtoend/vtaclcheck.txt | 9 +- go/flags/endtoend/vtbackup.txt | 50 +- go/flags/endtoend/vtbench.txt | 97 + go/flags/endtoend/vtclient.txt | 52 + go/flags/endtoend/vtcombo.txt | 437 + go/flags/endtoend/vtctld.txt | 31 +- go/flags/endtoend/vtctldclient.txt | 15 +- go/flags/endtoend/vtexplain.txt | 45 +- go/flags/endtoend/vtgate.txt | 44 +- go/flags/endtoend/vtgateclienttest.txt | 67 + go/flags/endtoend/vtgr.txt | 93 - go/flags/endtoend/vtorc.txt | 27 +- go/flags/endtoend/vttablet.txt | 67 +- go/flags/endtoend/vttestserver.txt | 14 +- go/flags/endtoend/zk.txt | 37 +- go/flags/endtoend/zkctl.txt | 20 +- go/flags/endtoend/zkctld.txt | 32 +- go/flagutil/flagutil.go | 11 + go/hack/hack.go | 8 +- go/hack/runtime.go | 4 +- go/internal/flag/flag.go | 6 +- go/ioutil/timeout_closer.go | 59 + go/ioutil/timeout_closer_test.go | 53 + go/maps2/maps.go | 37 + go/mysql/auth_server.go | 11 +- go/mysql/auth_server_clientcert_test.go | 4 +- go/mysql/auth_server_config.go | 30 +- go/mysql/auth_server_static.go | 16 +- go/mysql/auth_server_static_flaky_test.go | 4 +- go/mysql/binlog/binlog_json_test.go | 8 +- go/mysql/binlog_dump.go | 5 +- go/mysql/binlog_event.go | 5 +- go/mysql/binlog_event_filepos.go | 24 +- go/mysql/binlog_event_make.go | 4 +- go/mysql/binlog_event_make_test.go | 10 +- go/mysql/binlog_event_mariadb.go | 9 +- go/mysql/binlog_event_mariadb_test.go | 6 +- go/mysql/binlog_event_mysql56.go | 15 +- go/mysql/binlog_event_mysql56_test.go | 33 +- go/mysql/client.go | 73 +- go/mysql/client_test.go | 22 +- go/mysql/collations/charset/convert.go | 75 + go/mysql/collations/charset/korean/tables.go | 2 - .../charset/simplifiedchinese/tables.go | 2 - go/mysql/collations/coercion.go | 212 +- go/mysql/collations/collation.go | 157 - go/mysql/collations/{ => colldata}/8bit.go | 27 +- .../collations/{ => colldata}/cached_size.go | 2 +- go/mysql/collations/colldata/collation.go | 374 + go/mysql/collations/{ => colldata}/fuzz.go | 2 +- .../collations/{ => colldata}/fuzz_test.go | 6 +- go/mysql/collations/colldata/golden_test.go | 82 + .../collations/{ => colldata}/multibyte.go | 11 +- .../collations/{ => colldata}/mysqldata.go | 18 +- .../{ => colldata}/mysqlucadata.bin | Bin .../collations/{ => colldata}/mysqlucadata.go | 21 +- go/mysql/collations/{ => colldata}/uca.go | 13 +- .../{ => colldata}/uca_contraction_test.go | 2 +- .../{ => colldata}/uca_tables_test.go | 11 +- .../collations/{ => colldata}/uca_test.go | 8 +- go/mysql/collations/{ => colldata}/unicase.go | 2 +- go/mysql/collations/{ => colldata}/unicode.go | 13 +- .../collations/{ => colldata}/wildcard.go | 2 +- .../{ => colldata}/wildcard_test.go | 2 +- go/mysql/collations/env.go | 149 +- go/mysql/collations/golden_test.go | 145 +- .../collations/integration/charset_test.go | 4 +- .../collations/integration/coercion_test.go | 52 +- .../collations/integration/collations_test.go | 6 +- .../collations/integration/helpers_test.go | 10 +- .../integration/weight_string_test.go | 17 +- .../collations/integration/wildcard_test.go | 3 +- .../collations/internal/uca/contractions.go | 14 - .../collations/internal/uca/fasttables.go | 16 + go/mysql/collations/internal/uca/layout.go | 17 +- go/mysql/collations/mysqlversion.go | 807 +- go/mysql/collations/remote/collation.go | 13 +- go/mysql/collations/supported.go | 294 + ...ions_MySQL80.csv => collations_MySQL8.csv} | 0 go/mysql/collations/tools/colldump/Dockerfile | 20 + .../collations/tools/colldump/colldump.cc | 418 + .../collations/tools/colldump/colldump.sh | 8 + .../tools/makecolldata/codegen/codegen.go | 20 + .../tools/makecolldata/codegen/tablegen.go | 15 +- .../collations/tools/makecolldata/main.go | 4 +- .../tools/makecolldata/maketables.go | 6 +- .../tools/makecolldata/mysqldata.go | 22 +- .../tools/makecolldata/mysqlversions.go | 12 +- .../tools/maketestdata/maketestdata.go | 23 +- go/mysql/conn.go | 188 +- go/mysql/conn_fake.go | 83 + go/mysql/conn_flaky_test.go | 77 +- go/mysql/constants.go | 516 - go/mysql/constants_test.go | 26 +- go/mysql/datetime/{types.go => datetime.go} | 188 +- go/mysql/datetime/helpers.go | 23 +- go/mysql/datetime/interval.go | 425 + go/mysql/datetime/mydate.go | 83 + go/mysql/datetime/mydate_test.go | 59 + go/mysql/datetime/parse.go | 7 +- go/mysql/datetime/parse_test.go | 52 + go/mysql/datetime/testdata/daynr_to_date.json | 8188 ++ go/mysql/datetime/testdata/year_to_daynr.json | 1 + go/mysql/datetime/time_zone_test.go | 40 + go/mysql/datetime/timeparts.go | 6 +- go/mysql/decimal/decimal.go | 11 +- go/mysql/decimal/weights.go | 56 + go/mysql/endtoend/client_test.go | 10 +- go/mysql/endtoend/main_test.go | 6 +- go/mysql/endtoend/query_test.go | 7 +- go/mysql/endtoend/replication_test.go | 11 +- go/mysql/endtoend/schema_change_test.go | 8 +- go/mysql/fakesqldb/server.go | 9 +- go/mysql/flavor.go | 152 +- go/mysql/flavor_filepos.go | 78 +- go/mysql/flavor_filepos_test.go | 80 - go/mysql/flavor_mariadb.go | 55 +- go/mysql/flavor_mariadb_test.go | 50 - go/mysql/flavor_mysql.go | 90 +- go/mysql/flavor_mysql_test.go | 72 - go/mysql/flavor_mysqlgr.go | 35 +- go/mysql/flavor_mysqlgr_test.go | 10 +- go/mysql/handshake_test.go | 4 +- go/mysql/hex/hex.go | 4 +- go/mysql/icuregex/compiler.go | 3646 + go/mysql/icuregex/compiler_table.go | 357 + go/mysql/icuregex/debug.go | 151 + go/mysql/icuregex/error.go | 152 + .../icuregex/errors}/error.go | 16 +- go/mysql/icuregex/icu_test.go | 415 + .../icuregex/internal/bytestrie/bytes_trie.go | 354 + go/mysql/icuregex/internal/icudata/README.md | 46 + go/mysql/icuregex/internal/icudata/char.brk | Bin 0 -> 13680 bytes go/mysql/icuregex/internal/icudata/embed.go | 101 + go/mysql/icuregex/internal/icudata/nfc.nrm | Bin 0 -> 35392 bytes go/mysql/icuregex/internal/icudata/nfkc.nrm | Bin 0 -> 55112 bytes .../icuregex/internal/icudata/nfkc_cf.nrm | Bin 0 -> 52432 bytes go/mysql/icuregex/internal/icudata/pnames.icu | Bin 0 -> 44272 bytes go/mysql/icuregex/internal/icudata/ubidi.icu | Bin 0 -> 27616 bytes go/mysql/icuregex/internal/icudata/ucase.icu | Bin 0 -> 30482 bytes go/mysql/icuregex/internal/icudata/uemoji.icu | Bin 0 -> 13024 bytes .../icuregex/internal/icudata/ulayout.icu | Bin 0 -> 13728 bytes go/mysql/icuregex/internal/icudata/unames.icu | Bin 0 -> 295992 bytes go/mysql/icuregex/internal/icudata/uprops.icu | Bin 0 -> 141028 bytes go/mysql/icuregex/internal/icudata/word.brk | Bin 0 -> 22232 bytes .../icuregex/internal/normalizer/constants.go | 122 + .../internal/normalizer/normalizer.go | 482 + .../icuregex/internal/pattern/unescape.go | 314 + .../internal/pattern/unescape_test.go | 48 + go/mysql/icuregex/internal/pattern/utils.go | 111 + go/mysql/icuregex/internal/ubidi/loader.go | 125 + go/mysql/icuregex/internal/ubidi/ubidi.go | 390 + go/mysql/icuregex/internal/ucase/fold.go | 243 + go/mysql/icuregex/internal/ucase/loader.go | 101 + go/mysql/icuregex/internal/ucase/ucase.go | 359 + go/mysql/icuregex/internal/uchar/constants.go | 238 + go/mysql/icuregex/internal/uchar/loader.go | 139 + go/mysql/icuregex/internal/uchar/uchar.go | 316 + go/mysql/icuregex/internal/udata/udata.go | 155 + go/mysql/icuregex/internal/uemoji/loader.go | 69 + go/mysql/icuregex/internal/uemoji/uemoji.go | 82 + go/mysql/icuregex/internal/ulayout/ulayout.go | 128 + go/mysql/icuregex/internal/unames/loader.go | 90 + go/mysql/icuregex/internal/unames/unames.go | 406 + .../icuregex/internal/unames/unames_test.go | 64 + .../icuregex/internal/uprops/constants.go | 664 + go/mysql/icuregex/internal/uprops/loader.go | 93 + .../icuregex/internal/uprops/properties.go | 475 + go/mysql/icuregex/internal/uprops/uprops.go | 217 + .../icuregex/internal/uprops/uprops_binary.go | 249 + .../icuregex/internal/uprops/uprops_int.go | 265 + go/mysql/icuregex/internal/uprops/uscript.go | 505 + go/mysql/icuregex/internal/uset/close.go | 96 + go/mysql/icuregex/internal/uset/frozen.go | 339 + go/mysql/icuregex/internal/uset/pattern.go | 107 + .../icuregex/internal/uset/unicode_set.go | 686 + .../internal/uset/unicode_set_test.go | 43 + go/mysql/icuregex/internal/utf16/helpers.go | 65 + go/mysql/icuregex/internal/utrie/ucptrie.go | 708 + go/mysql/icuregex/internal/utrie/utrie2.go | 433 + go/mysql/icuregex/matcher.go | 1671 + go/mysql/icuregex/ops.go | 414 + go/mysql/icuregex/pattern.go | 136 + go/mysql/icuregex/perl_test.go | 211 + go/mysql/icuregex/sets.go | 104 + go/mysql/icuregex/sets_test.go | 66 + go/mysql/icuregex/testdata/re_tests.txt | 923 + go/mysql/icuregex/testdata/regextst.txt | 2793 + .../icuregex/testdata/regextst_extended.txt | 128 + go/mysql/json/helpers.go | 37 +- go/mysql/json/json_path_test.go | 3 +- go/mysql/json/marshal.go | 14 +- go/mysql/json/marshal_test.go | 57 + go/mysql/json/parser.go | 33 +- go/mysql/json/update.go | 2 +- go/mysql/json/weights.go | 169 + go/mysql/json/weights_test.go | 44 + go/mysql/query.go | 97 +- go/mysql/query_test.go | 4 +- go/mysql/replication.go | 9 +- go/mysql/{ => replication}/filepos_gtid.go | 61 +- .../{ => replication}/filepos_gtid_test.go | 26 +- go/mysql/{ => replication}/gtid.go | 2 +- go/mysql/{ => replication}/gtid_set.go | 2 +- go/mysql/{ => replication}/gtid_test.go | 2 +- go/mysql/{ => replication}/mariadb_gtid.go | 8 +- .../{ => replication}/mariadb_gtid_test.go | 12 +- go/mysql/{ => replication}/mysql56_gtid.go | 2 +- .../{ => replication}/mysql56_gtid_set.go | 23 +- .../mysql56_gtid_set_test.go | 2 +- .../{ => replication}/mysql56_gtid_test.go | 14 +- go/mysql/{ => replication}/primary_status.go | 35 +- .../{ => replication}/replication_position.go | 23 +- .../replication_position_test.go | 20 +- .../{ => replication}/replication_status.go | 125 +- .../replication/replication_status_test.go | 292 + go/mysql/replication/state.go | 49 + go/mysql/replication_status_test.go | 115 - go/mysql/schema.go | 34 - go/mysql/server.go | 82 +- go/mysql/server_flaky_test.go | 118 +- go/mysql/sqlerror/constants.go | 503 + go/mysql/{ => sqlerror}/sql_error.go | 34 +- go/mysql/{ => sqlerror}/sql_error_test.go | 2 +- go/mysql/streaming_query.go | 15 +- go/mysql/vault/auth_server_vault.go | 8 +- go/netutil/netutil.go | 14 +- go/netutil/netutil_test.go | 10 +- go/{slices2/slices.go => slice/slice.go} | 34 +- go/sqltypes/bind_variables.go | 7 + go/sqltypes/bind_variables_test.go | 60 +- go/sqltypes/cast.go | 54 + go/sqltypes/cast_test.go | 123 + go/sqltypes/marshal.go | 484 + go/sqltypes/marshal_test.go | 115 + go/sqltypes/result.go | 2 +- go/sqltypes/testing.go | 137 + go/sqltypes/value.go | 139 +- go/sqltypes/value_test.go | 10 +- go/stats/counter_test.go | 6 +- go/stats/counters.go | 43 +- go/stats/counters_test.go | 24 +- go/stats/duration_test.go | 8 +- go/stats/export.go | 45 +- go/stats/export_test.go | 14 +- go/stats/histogram_test.go | 6 +- go/stats/multidimensional_test.go | 2 +- go/stats/opentsdb/backend.go | 58 + go/stats/opentsdb/by_metric.go | 54 + .../opentsdb/{opentsdb.go => collector.go} | 212 +- go/stats/opentsdb/datapoint.go | 90 + go/stats/opentsdb/datapoint_reader.go | 53 + go/stats/opentsdb/doc.go | 18 + go/stats/opentsdb/file_writer.go | 52 + .../status.go => stats/opentsdb/flags.go} | 17 +- go/stats/opentsdb/http_writer.go | 51 + go/stats/opentsdb/init.go | 104 + go/stats/opentsdb/opentsdb_test.go | 13 +- .../opentsdb/writer.go} | 7 +- go/stats/rates.go | 20 +- go/stats/rates_test.go | 11 +- go/stats/statsd/statsd.go | 14 +- go/stats/timings.go | 4 +- go/stats/timings_test.go | 10 +- go/streamlog/streamlog.go | 9 +- .../backup/pitr/backup_mysqlctld_pitr_test.go | 243 - .../endtoend/backup/pitr/backup_pitr_test.go | 83 + .../backup_pitr_xtrabackup_test.go | 61 + .../backup/vtbackup/backup_only_test.go | 94 +- .../backup/vtctlbackup/backup_utils.go | 214 +- .../backup/vtctlbackup/pitr_test_framework.go | 694 + go/test/endtoend/cluster/cluster_process.go | 80 +- go/test/endtoend/cluster/cluster_util.go | 53 +- go/test/endtoend/cluster/mysqlctl_process.go | 49 +- go/test/endtoend/cluster/mysqlctld_process.go | 30 +- go/test/endtoend/cluster/topo_process.go | 64 +- go/test/endtoend/cluster/vtbackup_process.go | 4 +- .../endtoend/cluster/vtctlclient_process.go | 5 +- go/test/endtoend/cluster/vtctld_process.go | 17 + .../endtoend/cluster/vtctldclient_process.go | 10 + go/test/endtoend/cluster/vtgate_process.go | 16 +- go/test/endtoend/cluster/vtgr_process.go | 106 - go/test/endtoend/cluster/vtorc_process.go | 9 +- go/test/endtoend/cluster/vttablet_process.go | 35 +- go/test/endtoend/clustertest/etcd_test.go | 39 +- .../encrypted_replication_test.go | 4 +- .../encrypted_transport_test.go | 4 +- go/test/endtoend/keyspace/keyspace_test.go | 10 +- go/test/endtoend/messaging/message_test.go | 26 +- go/test/endtoend/messaging/r | 7 - go/test/endtoend/mysqlctl/mysqlctl_test.go | 4 +- go/test/endtoend/mysqlctld/mysqlctld_test.go | 17 +- .../endtoend/mysqlserver/mysql_server_test.go | 10 +- .../onlineddl/revert/onlineddl_revert_test.go | 97 +- .../scheduler/onlineddl_scheduler_test.go | 67 +- .../onlineddl/vrepl/onlineddl_vrepl_test.go | 175 +- .../onlineddl_vrepl_mini_stress_test.go | 79 +- .../onlineddl_vrepl_stress_suite_test.go | 5 +- .../testdata/gbk-charset/create.sql | 26 - .../testdata/rename-retype-json/after_columns | 1 + .../testdata/rename-retype-json/alter | 1 + .../rename-retype-json/before_columns | 1 + .../testdata/rename-retype-json/create.sql | 22 + go/test/endtoend/onlineddl/vtctlutil.go | 3 - go/test/endtoend/onlineddl/vtgate_util.go | 139 +- .../endtoend/preparestmt/stmt_methods_test.go | 21 + .../recovery/pitr/shardedpitr_test.go | 6 +- .../reparent/plannedreparent/reparent_test.go | 53 +- go/test/endtoend/reparent/utils/utils.go | 18 +- .../endtoend/sharded/sharded_keyspace_test.go | 4 +- .../buffer/buffer_test_helpers.go | 1 - .../buffer/reparent/failover_buffer_test.go | 2 +- .../tabletmanager/custom_rule_topo_test.go | 2 +- .../tabletmanager/primary/tablet_test.go | 30 +- .../tabletmanager/tablegc/tablegc_test.go | 17 + .../tabletmanager/tablet_health_test.go | 34 +- .../tablet_security_policy_test.go | 6 +- go/test/endtoend/tabletmanager/tablet_test.go | 4 +- .../tabletmanager/throttler/throttler_test.go | 319 - .../throttler_custom_config/throttler_test.go | 264 - .../throttler_topo/throttler_test.go | 44 +- go/test/endtoend/throttler/util.go | 190 +- go/test/endtoend/utils/cmp.go | 41 +- go/test/endtoend/utils/mysql.go | 36 +- .../endtoend/utils/mysqlvsvitess/main_test.go | 2 +- go/test/endtoend/utils/utils.go | 71 +- go/test/endtoend/vreplication/cluster_test.go | 122 +- go/test/endtoend/vreplication/config_test.go | 117 +- .../endtoend/vreplication/fk_config_test.go | 65 + go/test/endtoend/vreplication/fk_test.go | 274 + go/test/endtoend/vreplication/helper_test.go | 302 +- .../vreplication/initial_data_test.go | 35 +- .../endtoend/vreplication/materialize_test.go | 25 +- go/test/endtoend/vreplication/migrate_test.go | 170 +- .../vreplication/movetables_buffering_test.go | 45 + .../partial_movetables_seq_test.go | 583 + .../vreplication/partial_movetables_test.go | 93 +- .../resharding_workflows_v2_test.go | 123 +- .../endtoend/vreplication/time_zone_test.go | 8 +- .../vreplication/unsharded_init_data.sql | 3 +- go/test/endtoend/vreplication/vdiff2_test.go | 109 +- .../vreplication/vdiff_helper_test.go | 181 +- .../vdiff_multiple_movetables_test.go | 135 + .../vreplication/vreplication_test.go | 358 +- .../vreplication/vreplication_test_env.go | 10 +- go/test/endtoend/vreplication/vstream_test.go | 5 +- .../endtoend/vreplication/wrappers_test.go | 206 + .../vtgate/errors_as_warnings/main_test.go | 9 +- .../vtgate/foreignkey/fk_fuzz_test.go | 734 + go/test/endtoend/vtgate/foreignkey/fk_test.go | 776 + .../endtoend/vtgate/foreignkey/main_test.go | 188 + .../vtgate/foreignkey/sharded_schema.sql | 521 + .../vtgate/foreignkey/sharded_vschema.json | 355 + .../foreignkey/stress/fk_stress_test.go | 1165 + .../vtgate/foreignkey/unsharded_schema.sql | 472 + .../vtgate/foreignkey/unsharded_vschema.json | 41 + .../endtoend/vtgate/foreignkey/utils_test.go | 144 + go/test/endtoend/vtgate/gen4/gen4_test.go | 69 +- go/test/endtoend/vtgate/gen4/main_test.go | 2 +- .../vtgate/gen4/system_schema_test.go | 3 +- go/test/endtoend/vtgate/grpc_api/main_test.go | 1 + .../keyspace_watches/keyspace_watch_test.go | 3 - go/test/endtoend/vtgate/lookup_test.go | 6 +- go/test/endtoend/vtgate/main_test.go | 2 +- go/test/endtoend/vtgate/misc_test.go | 16 +- go/test/endtoend/vtgate/mysql80/misc_test.go | 4 +- .../queries/aggregation/aggregation_test.go | 304 +- .../vtgate/queries/aggregation/fuzz_test.go | 212 - .../vtgate/queries/aggregation/main_test.go | 2 +- .../queries/benchmark/benchmark_test.go | 80 + .../vtgate/queries/benchmark/main_test.go | 124 + .../queries/benchmark/sharded_schema.sql | 16 + .../benchmark}/vschema.json | 4 +- .../vtgate/queries/derived/derived_test.go | 4 +- .../vtgate/queries/foundrows/main_test.go | 2 +- .../informationschema_test.go | 7 +- .../queries/informationschema/main_test.go | 3 +- .../endtoend/vtgate/queries/kill/kill_test.go | 246 + .../endtoend/vtgate/queries/kill/main_test.go | 148 + .../endtoend/vtgate/queries/kill/schema.sql | 16 + .../endtoend/vtgate/queries/kill/vschema.json | 42 + .../queries/lookup_queries/main_test.go | 1 - .../endtoend/vtgate/queries/misc/main_test.go | 6 +- .../endtoend/vtgate/queries/misc/misc_test.go | 32 +- .../queries/normalize/normalize_test.go | 36 +- .../vtgate/queries/orderby/main_test.go | 2 +- .../vtgate/queries/random/main_test.go | 94 + .../vtgate/queries/random/query_gen.go | 639 + .../vtgate/queries/random/query_gen_test.go | 62 + .../vtgate/queries/random/random_expr_test.go | 59 + .../vtgate/queries/random/random_test.go | 371 + .../endtoend/vtgate/queries/random/schema.sql | 20 + .../vtgate/queries/random/simplifier_test.go | 116 + .../vtgate/queries/random/svschema.json | 6 + .../vtgate/queries/random/vschema.json | 26 + .../vtgate/queries/subquery/main_test.go | 2 +- .../vtgate/queries/timeout/main_test.go | 110 + .../vtgate/queries/timeout/schema.sql | 5 + .../vtgate/queries/timeout/timeout_test.go | 100 + .../vtgate/queries/timeout/uschema.sql | 5 + .../vtgate/queries/timeout/vschema.json | 18 + .../vtgate/queries/union/main_test.go | 2 +- .../reservedconn/reconnect1/main_test.go | 8 + .../vtgate/reservedconn/sysvar_test.go | 7 +- go/test/endtoend/vtgate/schema.sql | 3 + go/test/endtoend/vtgate/schema/schema_test.go | 33 +- .../restarttablet/schema_restart_test.go | 9 +- .../schematracker/sharded/st_sharded_test.go | 22 +- .../sharded_prs/st_sharded_test.go | 9 +- .../schematracker/unauthorized/schema.sql | 5 - .../unauthorized/unauthorized_test.go | 136 - .../unsharded/st_unsharded_test.go | 10 +- go/test/endtoend/vtgate/sequence/seq_test.go | 5 +- .../vtgate/transaction/single/schema.sql | 8 +- .../endtoend/vtgate/unsharded/main_test.go | 1 - go/test/endtoend/vtgr/my.cnf | 41 - go/test/endtoend/vtgr/test_config.json | 9 - go/test/endtoend/vtgr/vtgr_test.go | 366 - go/test/endtoend/vtorc/api/api_test.go | 70 +- go/test/endtoend/vtorc/general/vtorc_test.go | 54 +- .../vtorc/primaryfailure/main_test.go | 2 +- .../primaryfailure/primary_failure_test.go | 129 +- .../vtorc/readtopologyinstance/main_test.go | 16 +- go/test/endtoend/vtorc/utils/utils.go | 163 +- go/test/fuzzing/tablet_manager_fuzzer.go | 1 + go/test/fuzzing/vtctl_fuzzer.go | 2 +- go/test/utils/noleak.go | 96 + go/test/vschemawrapper/vschema_wrapper.go | 325 + go/textutil/strings.go | 10 +- go/timer/timer.go | 18 +- go/tools/astfmtgen/main.go | 2 +- go/tools/asthelpergen/asthelpergen.go | 52 +- go/tools/asthelpergen/clone_gen.go | 2 +- go/tools/asthelpergen/copy_on_rewrite_gen.go | 16 - go/tools/asthelpergen/rewrite_gen.go | 29 +- go/tools/go-upgrade/go-upgrade.go | 70 +- go/tools/go-upgrade/go-upgrade_test.go | 105 + go/trace/trace_test.go | 10 - go/viperutil/config.go | 1 + go/viperutil/debug/handler.go | 6 +- go/viperutil/internal/sync/sync.go | 44 +- .../internal/sync/sync_darwin_test.go | 34 + .../internal/sync/sync_internal_test.go | 136 + go/viperutil/internal/sync/sync_linux_test.go | 39 + go/viperutil/internal/sync/sync_test.go | 140 +- go/vt/binlog/binlog_connection.go | 13 +- go/vt/binlog/binlog_streamer.go | 16 +- go/vt/binlog/binlog_streamer_rbr_test.go | 21 +- go/vt/binlog/binlog_streamer_test.go | 154 +- go/vt/binlog/binlogplayer/binlog_player.go | 88 +- .../binlog/binlogplayer/binlog_player_test.go | 130 +- go/vt/binlog/binlogplayer/dbclient.go | 9 +- go/vt/binlog/binlogplayer/mock_dbclient.go | 58 +- go/vt/binlog/event_streamer.go | 7 +- go/vt/binlog/eventtoken/compare.go | 6 +- go/vt/binlog/updatestreamctl.go | 9 +- go/vt/dbconfigs/credentials.go | 1 - go/vt/dbconfigs/dbconfigs.go | 2 +- go/vt/dbconnpool/connection.go | 3 +- go/vt/discovery/fake_healthcheck.go | 18 +- go/vt/discovery/healthcheck_test.go | 355 +- go/vt/discovery/keyspace_events.go | 234 +- go/vt/discovery/keyspace_events_test.go | 232 +- go/vt/discovery/replicationlag.go | 7 - go/vt/discovery/replicationlag_test.go | 4 + go/vt/discovery/tablet_health_check.go | 2 +- go/vt/discovery/tablet_picker.go | 181 +- go/vt/discovery/tablet_picker_test.go | 312 +- go/vt/discovery/topology_watcher.go | 3 +- go/vt/discovery/topology_watcher_test.go | 34 +- go/vt/discovery/utils.go | 4 +- go/vt/external/golib/sqlutils/sqlutils.go | 57 +- go/vt/grpcclient/client.go | 1 - go/vt/grpcclient/client_flaky_test.go | 2 +- go/vt/logutil/logger.go | 11 +- go/vt/logutil/logger_test.go | 9 +- go/vt/logutil/proto3.go | 26 - go/vt/logutil/proto3_test.go | 99 - go/vt/logutil/throttled.go | 2 +- go/vt/mysqlctl/backup.go | 46 +- go/vt/mysqlctl/backup_blackbox_test.go | 601 + go/vt/mysqlctl/backup_test.go | 193 +- go/vt/mysqlctl/backupengine.go | 298 +- go/vt/mysqlctl/backupstats/fake_stats.go | 2 +- go/vt/mysqlctl/backupstats/stats.go | 7 +- go/vt/mysqlctl/binlogs_gtid.go | 178 +- go/vt/mysqlctl/binlogs_gtid_test.go | 359 +- go/vt/mysqlctl/builtinbackupengine.go | 314 +- go/vt/mysqlctl/builtinbackupengine2_test.go | 35 - go/vt/mysqlctl/builtinbackupengine_test.go | 501 +- go/vt/mysqlctl/compression_benchmark_test.go | 3 + go/vt/mysqlctl/fakemysqldaemon.go | 76 +- go/vt/mysqlctl/grpcmysqlctlclient/client.go | 41 +- go/vt/mysqlctl/grpcmysqlctlserver/server.go | 16 +- go/vt/mysqlctl/mysql_daemon.go | 26 +- go/vt/mysqlctl/mysqlctlclient/interface.go | 9 +- go/vt/mysqlctl/mysqld.go | 275 +- go/vt/mysqlctl/mysqld_test.go | 67 + go/vt/mysqlctl/query.go | 4 +- go/vt/mysqlctl/reparent.go | 27 +- go/vt/mysqlctl/replication.go | 37 +- go/vt/mysqlctl/s3backupstorage/s3.go | 24 +- go/vt/mysqlctl/s3backupstorage/s3_test.go | 124 +- go/vt/mysqlctl/schema.go | 78 +- go/vt/mysqlctl/tmutils/schema.go | 2 +- go/vt/mysqlctl/tmutils/schema_test.go | 2 +- go/vt/mysqlctl/xtrabackupengine.go | 41 +- go/vt/proto/binlogdata/binlogdata.pb.go | 1007 +- .../proto/binlogdata/binlogdata_vtproto.pb.go | 3428 +- go/vt/proto/binlogservice/binlogservice.pb.go | 4 +- .../binlogservice/binlogservice_grpc.pb.go | 2 +- go/vt/proto/logutil/logutil.pb.go | 4 +- go/vt/proto/logutil/logutil_vtproto.pb.go | 25 +- go/vt/proto/mysqlctl/mysqlctl.pb.go | 584 +- go/vt/proto/mysqlctl/mysqlctl_grpc.pb.go | 74 +- go/vt/proto/mysqlctl/mysqlctl_vtproto.pb.go | 1189 +- go/vt/proto/query/query.pb.go | 305 +- go/vt/proto/query/query_vtproto.pb.go | 1538 +- go/vt/proto/queryservice/queryservice.pb.go | 182 +- .../queryservice/queryservice_grpc.pb.go | 73 +- .../replicationdata/replicationdata.pb.go | 4 +- .../replicationdata_vtproto.pb.go | 118 +- go/vt/proto/tableacl/tableacl.pb.go | 4 +- go/vt/proto/tableacl/tableacl_vtproto.pb.go | 64 +- .../tabletmanagerdata/tabletmanagerdata.pb.go | 2786 +- .../tabletmanagerdata_vtproto.pb.go | 18811 ++- .../tabletmanagerservice.pb.go | 882 +- .../tabletmanagerservice_grpc.pb.go | 212 +- go/vt/proto/throttlerdata/throttlerdata.pb.go | 4 +- .../throttlerdata/throttlerdata_vtproto.pb.go | 233 +- .../throttlerservice/throttlerservice.pb.go | 4 +- .../throttlerservice_grpc.pb.go | 2 +- go/vt/proto/topodata/cached_size.go | 18 + go/vt/proto/topodata/topodata.pb.go | 485 +- go/vt/proto/topodata/topodata_vtproto.pb.go | 970 +- go/vt/proto/vschema/vschema.pb.go | 342 +- go/vt/proto/vschema/vschema_vtproto.pb.go | 306 +- go/vt/proto/vtadmin/vtadmin.pb.go | 4 +- go/vt/proto/vtadmin/vtadmin_grpc.pb.go | 2 +- go/vt/proto/vtadmin/vtadmin_vtproto.pb.go | 2341 +- go/vt/proto/vtctldata/vtctldata.pb.go | 19163 ++- go/vt/proto/vtctldata/vtctldata_vtproto.pb.go | 53015 ++++--- go/vt/proto/vtctlservice/vtctlservice.pb.go | 1778 +- .../vtctlservice/vtctlservice_grpc.pb.go | 1004 +- go/vt/proto/vtgate/vtgate.pb.go | 388 +- go/vt/proto/vtgate/vtgate_vtproto.pb.go | 521 +- go/vt/proto/vtgateservice/vtgateservice.pb.go | 4 +- .../vtgateservice/vtgateservice_grpc.pb.go | 2 +- go/vt/proto/vtrpc/vtrpc.pb.go | 4 +- go/vt/proto/vtrpc/vtrpc_vtproto.pb.go | 47 +- go/vt/proto/vttest/vttest.pb.go | 4 +- go/vt/proto/vttest/vttest_vtproto.pb.go | 80 +- .../vttime/cached_size.go} | 22 +- go/vt/proto/vttime/vttime.pb.go | 4 +- go/vt/proto/vttime/vttime_vtproto.pb.go | 41 +- go/vt/schema/ddl_strategy.go | 53 +- go/vt/schema/ddl_strategy_test.go | 138 +- go/vt/schema/online_ddl.go | 9 + go/vt/schema/online_ddl_test.go | 35 +- go/vt/schemadiff/diff_test.go | 16 + go/vt/schemadiff/schema_diff_test.go | 14 + go/vt/schemadiff/schema_test.go | 114 +- go/vt/schemadiff/table.go | 29 +- go/vt/schemadiff/table_test.go | 39 + go/vt/schemamanager/schemamanager_test.go | 157 +- go/vt/schemamanager/tablet_executor.go | 279 +- go/vt/schemamanager/tablet_executor_test.go | 186 +- go/vt/servenv/grpc_server.go | 8 +- go/vt/servenv/run.go | 11 +- go/vt/servenv/servenv.go | 82 +- go/vt/servenv/version.go | 2 +- go/vt/sidecardb/identifier_cache.go | 3 +- go/vt/sidecardb/schema/vdiff/vdiff.sql | 2 +- .../schema/vreplication/vreplication_log.sql | 3 +- go/vt/sidecardb/sidecardb.go | 93 +- go/vt/sidecardb/sidecardb_test.go | 13 +- go/vt/sqlparser/analyzer.go | 18 +- go/vt/sqlparser/analyzer_test.go | 2 +- go/vt/sqlparser/ast.go | 209 +- go/vt/sqlparser/ast_clone.go | 142 +- go/vt/sqlparser/ast_copy_on_rewrite.go | 224 +- go/vt/sqlparser/ast_equals.go | 265 +- go/vt/sqlparser/ast_format.go | 140 +- go/vt/sqlparser/ast_format_fast.go | 200 +- go/vt/sqlparser/ast_funcs.go | 521 +- go/vt/sqlparser/ast_funcs_test.go | 38 + go/vt/sqlparser/ast_rewrite.go | 1094 +- go/vt/sqlparser/ast_rewriting.go | 183 +- go/vt/sqlparser/ast_rewriting_test.go | 40 +- go/vt/sqlparser/ast_test.go | 138 +- go/vt/sqlparser/ast_visit.go | 164 +- go/vt/sqlparser/cached_size.go | 128 +- go/vt/sqlparser/comments.go | 7 + go/vt/sqlparser/constants.go | 151 +- go/vt/sqlparser/keywords.go | 12 +- go/vt/sqlparser/keywords_test.go | 12 +- go/vt/sqlparser/literal.go | 123 + go/vt/sqlparser/normalizer.go | 2 +- go/vt/sqlparser/normalizer_test.go | 28 + go/vt/sqlparser/parse_next_test.go | 13 +- go/vt/sqlparser/parse_test.go | 74 +- go/vt/sqlparser/parsed_query.go | 17 +- go/vt/sqlparser/precedence.go | 2 - go/vt/sqlparser/precedence_test.go | 52 +- go/vt/sqlparser/predicate_rewriting.go | 4 - go/vt/sqlparser/random_expr.go | 465 +- go/vt/sqlparser/reserved_vars.go | 180 + go/vt/sqlparser/rewriter_api.go | 2 +- go/vt/sqlparser/rewriter_test.go | 5 +- go/vt/sqlparser/sql.go | 21673 +-- go/vt/sqlparser/sql.y | 189 +- go/vt/sqlparser/testdata/select_cases.txt | 104 +- go/vt/sqlparser/testdata/union_cases.txt | 2 +- go/vt/sqlparser/tracked_buffer_test.go | 4 + go/vt/sqlparser/walker_test.go | 5 +- go/vt/srvtopo/discover_test.go | 7 +- .../srvtopo/keyspace_filtering_server_test.go | 41 +- go/vt/srvtopo/resilient_server.go | 7 +- go/vt/srvtopo/resilient_server_test.go | 67 +- go/vt/srvtopo/resolver.go | 6 +- go/vt/srvtopo/resolver_test.go | 14 +- go/vt/srvtopo/watch.go | 17 +- go/vt/srvtopo/watch_srvkeyspace.go | 14 +- go/vt/srvtopo/watch_srvvschema.go | 15 +- go/vt/sysvars/sysvars.go | 6 +- go/vt/tableacl/tableacl.go | 8 +- go/vt/tableacl/testlib/testlib.go | 5 - go/vt/throttler/demo/throttler_demo.go | 8 +- go/vt/throttler/manager_test.go | 4 +- go/vt/throttler/max_replication_lag_module.go | 6 +- .../max_replication_lag_module_config.go | 6 +- go/vt/throttler/throttlerlogz.go | 2 +- go/vt/throttler/throttlerz.go | 2 +- go/vt/tlstest/tlstest.go | 8 +- go/vt/tlstest/tlstest_test.go | 5 +- go/vt/topo/cell_info.go | 3 + go/vt/topo/consultopo/server.go | 3 +- go/vt/topo/consultopo/server_flaky_test.go | 12 +- go/vt/topo/etcd2topo/server_test.go | 4 +- go/vt/topo/faketopo/faketopo.go | 4 +- go/vt/topo/helpers/copy.go | 62 +- go/vt/topo/helpers/copy_test.go | 7 +- go/vt/topo/helpers/tee_topo_test.go | 8 +- go/vt/topo/k8stopo/VitessTopoNodes-crd.yaml | 43 - go/vt/topo/k8stopo/apis/topo/v1beta1/doc.go | 5 - .../k8stopo/apis/topo/v1beta1/register.go | 38 - go/vt/topo/k8stopo/apis/topo/v1beta1/types.go | 32 - .../topo/v1beta1/zz_generated.deepcopy.go | 102 - .../client/clientset/versioned/clientset.go | 98 - .../k8stopo/client/clientset/versioned/doc.go | 20 - .../versioned/fake/clientset_generated.go | 83 - .../client/clientset/versioned/fake/doc.go | 20 - .../clientset/versioned/fake/register.go | 57 - .../client/clientset/versioned/scheme/doc.go | 20 - .../clientset/versioned/scheme/register.go | 57 - .../versioned/typed/topo/v1beta1/doc.go | 20 - .../versioned/typed/topo/v1beta1/fake/doc.go | 20 - .../topo/v1beta1/fake/fake_topo_client.go | 41 - .../topo/v1beta1/fake/fake_vitesstoponode.go | 131 - .../typed/topo/v1beta1/generated_expansion.go | 21 - .../typed/topo/v1beta1/topo_client.go | 90 - .../typed/topo/v1beta1/vitesstoponode.go | 179 - .../informers/externalversions/factory.go | 181 - .../informers/externalversions/generic.go | 63 - .../internalinterfaces/factory_interfaces.go | 41 - .../externalversions/topo/interface.go | 46 - .../topo/v1beta1/interface.go | 45 - .../topo/v1beta1/vitesstoponode.go | 91 - .../topo/v1beta1/expansion_generated.go | 27 - .../listers/topo/v1beta1/vitesstoponode.go | 95 - go/vt/topo/k8stopo/config.go | 17 - go/vt/topo/k8stopo/directory.go | 98 - go/vt/topo/k8stopo/election.go | 129 - go/vt/topo/k8stopo/error.go | 54 - go/vt/topo/k8stopo/file.go | 301 - go/vt/topo/k8stopo/file_test.go | 131 - go/vt/topo/k8stopo/lock.go | 120 - go/vt/topo/k8stopo/server.go | 241 - go/vt/topo/k8stopo/server_flaky_test.go | 147 - go/vt/topo/k8stopo/version.go | 42 - go/vt/topo/k8stopo/watch.go | 128 - go/vt/topo/keyspace.go | 4 +- go/vt/topo/memorytopo/file.go | 1 - go/vt/topo/memorytopo/memorytopo.go | 18 +- go/vt/topo/memorytopo/server_test.go | 7 +- go/vt/topo/server.go | 7 +- go/vt/topo/shard.go | 18 +- go/vt/topo/shard_test.go | 20 +- go/vt/topo/tablet.go | 15 +- go/vt/topo/test/directory.go | 7 +- go/vt/topo/test/election.go | 12 +- go/vt/topo/test/file.go | 7 +- go/vt/topo/test/keyspace.go | 6 +- go/vt/topo/test/lock.go | 6 +- go/vt/topo/test/replication.go | 6 +- go/vt/topo/test/serving.go | 11 +- go/vt/topo/test/shard.go | 6 +- go/vt/topo/test/tablet.go | 7 +- go/vt/topo/test/testing.go | 45 +- go/vt/topo/test/trylock.go | 3 +- go/vt/topo/test/vschema.go | 10 +- go/vt/topo/test/watch.go | 11 +- go/vt/topo/topoproto/tablet.go | 18 + go/vt/topo/topotests/cell_info_test.go | 33 +- go/vt/topo/topotests/cells_aliases_test.go | 6 +- go/vt/topo/topotests/keyspace_test.go | 12 +- go/vt/topo/topotests/replication_test.go | 6 +- go/vt/topo/topotests/shard_watch_test.go | 18 +- go/vt/topo/topotests/srv_keyspace_test.go | 54 +- go/vt/topo/topotests/srv_vschema_test.go | 6 +- go/vt/topo/topotests/tablet_test.go | 6 +- go/vt/topo/topotests/wildcards_test.go | 18 +- go/vt/topo/vschema.go | 11 +- go/vt/topo/zk2topo/server_test.go | 7 +- go/vt/topotools/keyspace.go | 5 - go/vt/topotools/routing_rules.go | 50 +- go/vt/topotools/routing_rules_test.go | 18 +- go/vt/topotools/shard_test.go | 30 +- go/vt/topotools/tablet.go | 26 + go/vt/topotools/vschema_ddl.go | 29 + go/vt/vitessdriver/convert.go | 46 +- go/vt/vitessdriver/convert_test.go | 184 +- go/vt/vitessdriver/driver.go | 111 +- go/vt/vitessdriver/driver_test.go | 122 +- go/vt/vitessdriver/fakeserver_test.go | 21 +- go/vt/vitessdriver/time.go | 8 +- go/vt/vitessdriver/time_test.go | 16 +- go/vt/vtadmin/api.go | 2 +- go/vt/vtadmin/api_test.go | 86 +- go/vt/vtadmin/cluster/cluster.go | 17 +- go/vt/vtadmin/cluster/cluster_test.go | 86 +- .../discovery/discovery_static_file_test.go | 5 +- .../internal/caches/schemacache/cache.go | 5 +- .../vtadmin/cluster/resolver/resolver_test.go | 14 - go/vt/vtadmin/testutil/cluster.go | 4 +- go/vt/vtadmin/vtsql/config.go | 47 +- go/vt/vtadmin/vtsql/config_test.go | 91 + go/vt/vtcombo/tablet_map.go | 41 +- go/vt/vtctl/backup.go | 29 +- go/vt/vtctl/endtoend/get_schema_test.go | 5 +- go/vt/vtctl/endtoend/onlineddl_show_test.go | 6 +- .../fake_loggerevent_streamingclient.go | 3 +- go/vt/vtctl/grpcvtctlclient/client_test.go | 9 +- go/vt/vtctl/grpcvtctldclient/client_gen.go | 225 + go/vt/vtctl/grpcvtctldclient/client_test.go | 18 +- .../endtoend/init_shard_primary_test.go | 11 +- go/vt/vtctl/grpcvtctldserver/query.go | 243 + go/vt/vtctl/grpcvtctldserver/query_test.go | 236 + go/vt/vtctl/grpcvtctldserver/server.go | 724 +- .../grpcvtctldserver/server_slow_test.go | 71 +- go/vt/vtctl/grpcvtctldserver/server_test.go | 2191 +- .../testutil/proto_compare.go | 14 +- .../testutil/test_tmclient.go | 98 +- go/vt/vtctl/grpcvtctldserver/testutil/util.go | 8 +- go/vt/vtctl/localvtctldclient/client_gen.go | 125 + go/vt/vtctl/plugin_kubernetestopo.go | 23 - go/vt/vtctl/reparent.go | 3 +- go/vt/vtctl/reparentutil/durability.go | 30 +- go/vt/vtctl/reparentutil/durability_test.go | 316 +- .../reparentutil/emergency_reparenter.go | 47 +- .../reparentutil/emergency_reparenter_test.go | 328 +- .../vtctl/reparentutil/planned_reparenter.go | 61 +- .../planned_reparenter_flaky_test.go | 562 +- go/vt/vtctl/reparentutil/reparent_sorter.go | 8 +- .../reparentutil/reparent_sorter_test.go | 33 +- go/vt/vtctl/reparentutil/replication.go | 59 +- go/vt/vtctl/reparentutil/replication_test.go | 256 +- go/vt/vtctl/reparentutil/util.go | 60 +- go/vt/vtctl/reparentutil/util_test.go | 30 +- go/vt/vtctl/schematools/marshal.go | 158 + go/vt/vtctl/schematools/marshal_test.go | 68 + go/vt/vtctl/schematools/reload_test.go | 6 +- go/vt/vtctl/schematools/schematools.go | 65 +- go/vt/vtctl/schematools/schematools_test.go | 69 + ...unknown-params-logged-dry-run-vschema.json | 18 + .../unknown-params-logged-vschema.json | 18 + go/vt/vtctl/vdiff2.go | 3 - go/vt/vtctl/vdiff2_test.go | 10 +- go/vt/vtctl/vdiff_env_test.go | 4 +- go/vt/vtctl/vtctl.go | 295 +- go/vt/vtctl/vtctl_env_test.go | 4 +- go/vt/vtctl/vtctl_test.go | 222 +- go/vt/vtctl/vtctlclienttest/client.go | 10 +- go/vt/vtctl/workflow/log_recorder.go | 58 + .../vtctl/workflow/log_recorder_test.go} | 20 +- go/vt/vtctl/workflow/materializer.go | 739 + go/vt/vtctl/workflow/materializer_env_test.go | 363 + go/vt/vtctl/workflow/materializer_test.go | 3286 + go/vt/vtctl/workflow/mount.go | 88 + go/vt/vtctl/workflow/resharder.go | 348 + go/vt/vtctl/workflow/server.go | 3138 +- go/vt/vtctl/workflow/state.go | 85 +- go/vt/vtctl/workflow/stream_migrator.go | 24 +- go/vt/vtctl/workflow/stream_migrator_test.go | 9 +- go/vt/vtctl/workflow/switcher.go | 151 + go/vt/vtctl/workflow/switcher_dry_run.go | 388 + go/vt/vtctl/workflow/switcher_interface.go | 57 + go/vt/vtctl/workflow/traffic_switcher.go | 1486 +- go/vt/vtctl/workflow/traffic_switcher_test.go | 2 +- go/vt/vtctl/workflow/utils.go | 768 + go/vt/vtctl/workflow/vexec/vexec.go | 3 +- go/vt/vtctl/workflow/vreplication_stream.go | 8 +- go/vt/vtctld/api.go | 9 +- go/vt/vtctld/api_test.go | 6 +- go/vt/vtctld/explorer_test.go | 34 +- go/vt/vtctld/tablet_data_test.go | 16 +- go/vt/vtctld/vtctld.go | 3 - go/vt/vterrors/code.go | 11 + go/vt/vterrors/errors_test.go | 8 +- go/vt/vterrors/state.go | 28 + go/vt/vterrors/vterrors.go | 22 +- go/vt/vterrors/vterrorsgen/main.go | 6 +- go/vt/vtexplain/vtexplain.go | 10 +- go/vt/vtexplain/vtexplain_test.go | 51 +- go/vt/vtexplain/vtexplain_vtgate.go | 43 +- go/vt/vtexplain/vtexplain_vttablet.go | 8 +- go/vt/vtexplain/vtexplain_vttablet_test.go | 13 +- go/vt/vtgate/autocommit_test.go | 118 +- go/vt/vtgate/bench_test.go | 29 +- go/vt/vtgate/buffer/buffer.go | 78 +- go/vt/vtgate/buffer/buffer_helper_test.go | 11 - go/vt/vtgate/buffer/flags.go | 13 +- go/vt/vtgate/buffer/shard_buffer.go | 68 +- go/vt/vtgate/buffer/variables.go | 5 + go/vt/vtgate/endtoend/last_insert_id_test.go | 5 +- go/vt/vtgate/endtoend/lookup_test.go | 6 +- go/vt/vtgate/endtoend/main_test.go | 13 + go/vt/vtgate/endtoend/oltp_test.go | 132 + go/vt/vtgate/endtoend/schema.sql | 86 +- go/vt/vtgate/endtoend/vstream_test.go | 20 +- go/vt/vtgate/engine/aggregations.go | 446 + go/vt/vtgate/engine/aggregations_test.go | 181 + go/vt/vtgate/engine/cached_size.go | 204 +- go/vt/vtgate/engine/compare_utils.go | 73 - go/vt/vtgate/engine/concatenate.go | 4 +- go/vt/vtgate/engine/ddl.go | 5 + go/vt/vtgate/engine/ddl_test.go | 85 + go/vt/vtgate/engine/delete.go | 6 +- go/vt/vtgate/engine/delete_test.go | 52 +- go/vt/vtgate/engine/distinct.go | 6 +- go/vt/vtgate/engine/distinctV3.go | 180 - go/vt/vtgate/engine/dml.go | 34 +- .../vtgate/engine/exec_prepared_statement.go | 8 +- go/vt/vtgate/engine/fake_primitive_test.go | 4 +- go/vt/vtgate/engine/fake_vcursor_test.go | 25 + go/vt/vtgate/engine/filter.go | 19 +- go/vt/vtgate/engine/filter_test.go | 2 +- go/vt/vtgate/engine/fk_cascade.go | 189 + go/vt/vtgate/engine/fk_cascade_test.go | 160 + go/vt/vtgate/engine/fk_verify.go | 129 + go/vt/vtgate/engine/fk_verify_test.go | 125 + go/vt/vtgate/engine/gen4_compare_v3.go | 144 - go/vt/vtgate/engine/hash_join.go | 12 +- go/vt/vtgate/engine/insert.go | 82 +- go/vt/vtgate/engine/insert_test.go | 261 +- go/vt/vtgate/engine/join.go | 48 +- go/vt/vtgate/engine/limit.go | 4 +- go/vt/vtgate/engine/memory_sort.go | 4 +- go/vt/vtgate/engine/memory_sort_test.go | 10 +- go/vt/vtgate/engine/merge_sort.go | 4 +- go/vt/vtgate/engine/merge_sort_test.go | 4 +- go/vt/vtgate/engine/online_ddl.go | 7 +- go/vt/vtgate/engine/opcode/constants.go | 87 +- go/vt/vtgate/engine/opcode/constants_test.go | 30 + go/vt/vtgate/engine/ordered_aggregate.go | 455 +- go/vt/vtgate/engine/ordered_aggregate_test.go | 492 +- go/vt/vtgate/engine/plan_description.go | 46 +- go/vt/vtgate/engine/plan_description_test.go | 2 +- go/vt/vtgate/engine/primitive.go | 26 +- go/vt/vtgate/engine/projection.go | 4 +- go/vt/vtgate/engine/rename_fields.go | 4 +- go/vt/vtgate/engine/replace_variables.go | 4 +- go/vt/vtgate/engine/route.go | 37 +- go/vt/vtgate/engine/route_test.go | 48 +- go/vt/vtgate/engine/scalar_aggregation.go | 132 +- .../vtgate/engine/scalar_aggregation_test.go | 267 +- go/vt/vtgate/engine/semi_join.go | 14 +- go/vt/vtgate/engine/set.go | 13 +- go/vt/vtgate/engine/simple_projection.go | 4 +- go/vt/vtgate/engine/sql_calc_found_rows.go | 9 +- go/vt/vtgate/engine/throttle_app.go | 89 + ...t_subquery.go => uncorrelated_subquery.go} | 63 +- ..._test.go => uncorrelated_subquery_test.go} | 90 +- go/vt/vtgate/engine/update.go | 14 +- go/vt/vtgate/engine/update_test.go | 63 +- go/vt/vtgate/engine/vexplain.go | 21 +- go/vt/vtgate/engine/vindex_func.go | 2 +- go/vt/vtgate/engine/vindex_lookup.go | 6 +- go/vt/vtgate/evalengine/api_aggregation.go | 497 + .../vtgate/evalengine/api_aggregation_test.go | 166 + .../vtgate/evalengine/api_arithmetic_test.go | 550 +- go/vt/vtgate/evalengine/api_compare.go | 178 +- go/vt/vtgate/evalengine/api_compare_test.go | 121 +- go/vt/vtgate/evalengine/api_hash.go | 61 +- go/vt/vtgate/evalengine/api_hash_test.go | 187 +- go/vt/vtgate/evalengine/api_literal.go | 46 +- go/vt/vtgate/evalengine/api_types.go | 174 - go/vt/vtgate/evalengine/arithmetic.go | 20 +- go/vt/vtgate/evalengine/cached_size.go | 66 +- go/vt/vtgate/evalengine/collation.go | 27 + go/vt/vtgate/evalengine/compare.go | 7 +- go/vt/vtgate/evalengine/compiler.go | 13 +- go/vt/vtgate/evalengine/compiler_asm.go | 565 +- go/vt/vtgate/evalengine/compiler_test.go | 99 +- go/vt/vtgate/evalengine/eval.go | 98 +- go/vt/vtgate/evalengine/eval_bytes.go | 9 +- go/vt/vtgate/evalengine/eval_json.go | 5 +- go/vt/vtgate/evalengine/eval_result.go | 3 +- go/vt/vtgate/evalengine/eval_temporal.go | 95 +- go/vt/vtgate/evalengine/expr_bvar.go | 12 +- go/vt/vtgate/evalengine/expr_collate.go | 25 +- go/vt/vtgate/evalengine/expr_compare.go | 65 +- go/vt/vtgate/evalengine/expr_convert.go | 23 +- go/vt/vtgate/evalengine/expr_env.go | 4 +- go/vt/vtgate/evalengine/fn_compare.go | 5 +- go/vt/vtgate/evalengine/fn_hex.go | 16 +- go/vt/vtgate/evalengine/fn_json.go | 6 +- go/vt/vtgate/evalengine/fn_misc.go | 4 + go/vt/vtgate/evalengine/fn_numeric.go | 2 +- go/vt/vtgate/evalengine/fn_regexp.go | 1064 + go/vt/vtgate/evalengine/fn_string.go | 189 +- go/vt/vtgate/evalengine/fn_time.go | 104 +- go/vt/vtgate/evalengine/format.go | 12 +- .../evalengine/integration/comparison_test.go | 34 +- .../evalengine/integration/fuzz_test.go | 35 +- .../testdata/mysql_golden_1686149004.json | 36046 +++++ go/vt/vtgate/evalengine/mysql_test.go | 26 +- go/vt/vtgate/evalengine/testcases/cases.go | 378 +- go/vt/vtgate/evalengine/testcases/helpers.go | 7 - go/vt/vtgate/evalengine/testcases/inputs.go | 51 + go/vt/vtgate/evalengine/translate.go | 26 +- go/vt/vtgate/evalengine/translate_builtin.go | 184 +- go/vt/vtgate/evalengine/translate_convert.go | 14 +- go/vt/vtgate/evalengine/translate_simplify.go | 8 +- go/vt/vtgate/evalengine/weights.go | 178 + go/vt/vtgate/evalengine/weights_test.go | 103 + go/vt/vtgate/executor.go | 205 +- go/vt/vtgate/executor_ddl_test.go | 4 +- go/vt/vtgate/executor_dml_test.go | 719 +- go/vt/vtgate/executor_framework_test.go | 173 +- go/vt/vtgate/executor_scatter_stats.go | 3 +- go/vt/vtgate/executor_scatter_stats_test.go | 23 +- go/vt/vtgate/executor_select_test.go | 1288 +- go/vt/vtgate/executor_set_test.go | 162 +- go/vt/vtgate/executor_stream_test.go | 34 +- go/vt/vtgate/executor_test.go | 1060 +- go/vt/vtgate/executor_vschema_ddl_test.go | 238 +- go/vt/vtgate/executor_vstream_test.go | 28 +- go/vt/vtgate/grpcvtgateconn/suite_test.go | 18 +- go/vt/vtgate/grpcvtgateservice/server.go | 26 +- go/vt/vtgate/legacy_scatter_conn_test.go | 75 +- go/vt/vtgate/load_data.go | 4 +- go/vt/vtgate/mysql_protocol_test.go | 178 - go/vt/vtgate/plan_execute.go | 166 +- .../vtgate/planbuilder/aggregation_pushing.go | 578 - go/vt/vtgate/planbuilder/builder.go | 174 +- go/vt/vtgate/planbuilder/bypass.go | 1 - go/vt/vtgate/planbuilder/collations_test.go | 24 +- go/vt/vtgate/planbuilder/concatenate.go | 82 +- go/vt/vtgate/planbuilder/concatenateGen4.go | 119 - go/vt/vtgate/planbuilder/ddl.go | 35 +- go/vt/vtgate/planbuilder/delete.go | 163 +- go/vt/vtgate/planbuilder/distinct.go | 17 - go/vt/vtgate/planbuilder/dml_planner.go | 395 +- go/vt/vtgate/planbuilder/doc.go | 85 - go/vt/vtgate/planbuilder/expr.go | 337 - go/vt/vtgate/planbuilder/expr_test.go | 104 - .../planbuilder/expression_converter.go | 4 +- go/vt/vtgate/planbuilder/fallback_planner.go | 50 - .../planbuilder/fallback_planner_test.go | 108 - go/vt/vtgate/planbuilder/filter.go | 36 - go/vt/vtgate/planbuilder/filtering.go | 118 - go/vt/vtgate/planbuilder/fk_cascade.go | 85 + go/vt/vtgate/planbuilder/fk_verify.go | 103 + go/vt/vtgate/planbuilder/from.go | 430 - .../planbuilder/gen4_compare_v3_planner.go | 134 - go/vt/vtgate/planbuilder/gen4_planner.go | 708 - go/vt/vtgate/planbuilder/grouping.go | 128 - go/vt/vtgate/planbuilder/hash_join.go | 111 - go/vt/vtgate/planbuilder/horizon_planning.go | 1208 - .../planbuilder/horizon_planning_test.go | 80 - go/vt/vtgate/planbuilder/insert.go | 448 +- go/vt/vtgate/planbuilder/join.go | 242 +- go/vt/vtgate/planbuilder/joinGen4.go | 98 - go/vt/vtgate/planbuilder/jointab.go | 78 - go/vt/vtgate/planbuilder/jointab_test.go | 46 - go/vt/vtgate/planbuilder/logical_plan.go | 194 +- go/vt/vtgate/planbuilder/memory_sort.go | 95 +- go/vt/vtgate/planbuilder/merge_sort.go | 41 +- go/vt/vtgate/planbuilder/migration.go | 66 +- .../planbuilder/operator_transformers.go | 882 +- .../planbuilder/operators/SQL_builder.go | 371 +- .../operators/aggregation_pushing.go | 299 +- .../planbuilder/operators/aggregator.go | 377 +- .../planbuilder/operators/apply_join.go | 395 +- .../planbuilder/operators/ast2op_test.go | 205 + .../vtgate/planbuilder/operators/ast_to_op.go | 401 + .../vtgate/planbuilder/operators/comments.go | 85 + .../operators/correlated_subquery.go | 126 - go/vt/vtgate/planbuilder/operators/delete.go | 203 +- go/vt/vtgate/planbuilder/operators/derived.go | 268 - .../vtgate/planbuilder/operators/distinct.go | 89 +- .../planbuilder/operators/dml_planning.go | 78 +- .../planbuilder/operators/expressions.go | 32 +- go/vt/vtgate/planbuilder/operators/filter.go | 78 +- .../planbuilder/operators/fk_cascade.go | 106 + .../vtgate/planbuilder/operators/fk_verify.go | 80 + go/vt/vtgate/planbuilder/operators/helpers.go | 10 +- go/vt/vtgate/planbuilder/operators/horizon.go | 180 +- .../operators/horizon_expanding.go | 303 + .../planbuilder/operators/horizon_planning.go | 900 - .../operators/info_schema_planning.go | 22 +- go/vt/vtgate/planbuilder/operators/insert.go | 342 +- go/vt/vtgate/planbuilder/operators/join.go | 38 +- .../planbuilder/operators/join_merging.go | 216 + go/vt/vtgate/planbuilder/operators/limit.go | 35 +- go/vt/vtgate/planbuilder/operators/logical.go | 678 - go/vt/vtgate/planbuilder/operators/merging.go | 323 - .../planbuilder/operators/offset_planning.go | 155 +- .../vtgate/planbuilder/operators/operator.go | 88 +- go/vt/vtgate/planbuilder/operators/ops/op.go | 39 +- .../planbuilder/operators/ops/to_json.go | 20 +- .../vtgate/planbuilder/operators/ordering.go | 52 +- go/vt/vtgate/planbuilder/operators/phases.go | 209 + .../planbuilder/operators/projection.go | 606 +- .../planbuilder/operators/query_planning.go | 896 + .../planbuilder/operators/querygraph.go | 39 +- .../planbuilder/operators/queryprojection.go | 236 +- .../operators/queryprojection_test.go | 4 +- .../operators/rewrite/rewriters.go | 13 +- go/vt/vtgate/planbuilder/operators/route.go | 200 +- .../planbuilder/operators/route_planning.go | 84 +- .../planbuilder/operators/sharded_routing.go | 95 +- .../vtgate/planbuilder/operators/subquery.go | 321 +- .../planbuilder/operators/subquery_builder.go | 424 + .../operators/subquery_container.go | 94 + .../operators/subquery_planning.go | 935 +- go/vt/vtgate/planbuilder/operators/table.go | 57 +- go/vt/vtgate/planbuilder/operators/union.go | 233 +- .../planbuilder/operators/union_merging.go | 259 + go/vt/vtgate/planbuilder/operators/update.go | 663 +- go/vt/vtgate/planbuilder/operators/vindex.go | 47 +- go/vt/vtgate/planbuilder/ordered_aggregate.go | 297 +- go/vt/vtgate/planbuilder/ordering.go | 356 - go/vt/vtgate/planbuilder/other_read.go | 2 +- go/vt/vtgate/planbuilder/plan_test.go | 941 +- .../plancontext/planning_context.go | 73 +- .../vtgate/planbuilder/plancontext/vschema.go | 15 +- go/vt/vtgate/planbuilder/planner.go | 98 + .../{gen4_planner_test.go => planner_test.go} | 0 go/vt/vtgate/planbuilder/postprocess.go | 111 - go/vt/vtgate/planbuilder/primitive_builder.go | 49 - go/vt/vtgate/planbuilder/primitive_wrapper.go | 3 +- go/vt/vtgate/planbuilder/project.go | 173 - go/vt/vtgate/planbuilder/projection.go | 50 +- .../vtgate/planbuilder/projection_pushing.go | 335 - go/vt/vtgate/planbuilder/pullout_subquery.go | 141 - go/vt/vtgate/planbuilder/rewrite.go | 92 +- go/vt/vtgate/planbuilder/rewrite_test.go | 70 +- go/vt/vtgate/planbuilder/route.go | 805 +- go/vt/vtgate/planbuilder/routeGen4.go | 255 - go/vt/vtgate/planbuilder/route_test.go | 168 - go/vt/vtgate/planbuilder/select.go | 655 +- go/vt/vtgate/planbuilder/semi_join.go | 9 +- go/vt/vtgate/planbuilder/set.go | 2 +- go/vt/vtgate/planbuilder/show.go | 170 +- go/vt/vtgate/planbuilder/show_test.go | 49 +- go/vt/vtgate/planbuilder/simple_projection.go | 54 +- go/vt/vtgate/planbuilder/simplifier_test.go | 44 +- .../planbuilder/single_sharded_shortcut.go | 13 +- .../vtgate/planbuilder/sql_calc_found_rows.go | 50 +- go/vt/vtgate/planbuilder/subquery_op.go | 118 - go/vt/vtgate/planbuilder/symtab.go | 617 - go/vt/vtgate/planbuilder/symtab_test.go | 227 - go/vt/vtgate/planbuilder/system_tables.go | 136 - .../planbuilder/testdata/aggr_cases.json | 3647 +- .../planbuilder/testdata/ddl_cases.json | 34 +- .../ddl_cases_no_default_keyspace.json | 83 +- .../planbuilder/testdata/dml_cases.json | 1891 +- .../planbuilder/testdata/filter_cases.json | 3328 +- .../testdata/foreignkey_cases.json | 1657 + .../planbuilder/testdata/from_cases.json | 3155 +- .../testdata/info_schema57_cases.json | 719 +- .../testdata/info_schema80_cases.json | 738 +- .../planbuilder/testdata/large_cases.json | 186 +- .../testdata/large_union_cases.json | 1629 +- .../planbuilder/testdata/lock_cases.json | 73 +- .../testdata/memory_sort_cases.json | 611 +- .../planbuilder/testdata/misc_cases.json | 109 +- .../planbuilder/testdata/oltp_cases.json | 182 +- .../testdata/other_read_cases.json | 12 +- .../testdata/postprocess_cases.json | 1838 +- .../planbuilder/testdata/rails_cases.json | 117 +- .../planbuilder/testdata/reference_cases.json | 367 +- .../planbuilder/testdata/select_cases.json | 5614 +- .../testdata/select_cases_with_default.json | 46 +- .../select_cases_with_user_as_default.json | 25 +- .../planbuilder/testdata/show_cases.json | 8 +- .../planbuilder/testdata/symtab_cases.json | 42 +- .../testdata/sysschema_default.json | 75 +- .../planbuilder/testdata/tpcc_cases.json | 843 +- .../planbuilder/testdata/tpch_cases.json | 1430 +- .../planbuilder/testdata/union_cases.json | 2068 +- .../testdata/unsupported_cases.json | 232 +- .../planbuilder/testdata/vexplain_cases.json | 46 +- .../planbuilder/testdata/view_cases.json | 19 +- .../testdata/vindex_func_cases.json | 343 +- .../planbuilder/testdata/vschemas/schema.json | 160 + .../planbuilder/testdata/wireup_cases.json | 920 +- .../planbuilder/uncorrelated_subquery.go | 90 + go/vt/vtgate/planbuilder/union.go | 146 - go/vt/vtgate/planbuilder/update.go | 150 + go/vt/vtgate/planbuilder/update_planner.go | 165 - go/vt/vtgate/planbuilder/vindex_func.go | 87 +- go/vt/vtgate/planbuilder/vindex_op.go | 5 +- go/vt/vtgate/plugin_mysql_server.go | 236 +- go/vt/vtgate/plugin_mysql_server_test.go | 60 +- go/vt/vtgate/querylog.go | 30 +- go/vt/vtgate/queryz.go | 3 +- go/vt/vtgate/queryz_test.go | 21 +- go/vt/vtgate/safe_session.go | 16 +- go/vt/vtgate/sandbox_test.go | 42 +- go/vt/vtgate/scatter_conn.go | 8 +- go/vt/vtgate/scatter_conn_test.go | 65 +- go/vt/vtgate/schema/tracker.go | 199 +- go/vt/vtgate/schema/tracker_test.go | 385 +- go/vt/vtgate/schema/update_controller.go | 6 +- go/vt/vtgate/semantics/analyzer.go | 49 +- go/vt/vtgate/semantics/analyzer_test.go | 387 +- go/vt/vtgate/semantics/binder.go | 83 +- go/vt/vtgate/semantics/bitset/bitset.go | 11 +- go/vt/vtgate/semantics/check_invalid.go | 38 + go/vt/vtgate/semantics/derived_table.go | 80 +- go/vt/vtgate/semantics/early_rewriter.go | 192 +- go/vt/vtgate/semantics/early_rewriter_test.go | 121 +- go/vt/vtgate/semantics/errors.go | 116 +- go/vt/vtgate/semantics/info_schema.go | 16 +- .../vtgate/semantics/info_schema_gen_test.go | 63 - go/vt/vtgate/semantics/scoper.go | 11 +- go/vt/vtgate/semantics/semantic_state.go | 177 +- go/vt/vtgate/semantics/table_collector.go | 132 +- go/vt/vtgate/semantics/typer.go | 17 +- .../simplifier/expression_simplifier.go | 147 +- go/vt/vtgate/simplifier/simplifier.go | 699 +- go/vt/vtgate/simplifier/simplifier_test.go | 31 +- go/vt/vtgate/tabletgateway.go | 96 +- go/vt/vtgate/tabletgateway_flaky_test.go | 38 +- go/vt/vtgate/tabletgateway_test.go | 81 +- go/vt/vtgate/tx_conn_test.go | 136 +- go/vt/vtgate/vcursor_impl.go | 139 +- go/vt/vtgate/vcursor_impl_test.go | 17 +- go/vt/vtgate/vindexes/binary.go | 26 +- go/vt/vtgate/vindexes/binary_test.go | 56 +- go/vt/vtgate/vindexes/binaryhash.go | 6 +- go/vt/vtgate/vindexes/binarymd5.go | 24 +- go/vt/vtgate/vindexes/binarymd5_test.go | 70 +- go/vt/vtgate/vindexes/cached_size.go | 292 +- go/vt/vtgate/vindexes/cfc.go | 39 +- go/vt/vtgate/vindexes/cfc_test.go | 188 +- go/vt/vtgate/vindexes/consistent_lookup.go | 82 +- .../vtgate/vindexes/consistent_lookup_test.go | 86 +- go/vt/vtgate/vindexes/foreign_keys.go | 252 + go/vt/vtgate/vindexes/foreign_keys_test.go | 314 + go/vt/vtgate/vindexes/hash.go | 32 +- go/vt/vtgate/vindexes/hash_test.go | 62 +- go/vt/vtgate/vindexes/lookup.go | 83 +- go/vt/vtgate/vindexes/lookup_hash.go | 79 +- go/vt/vtgate/vindexes/lookup_hash_test.go | 47 +- .../vindexes/lookup_hash_unique_test.go | 47 +- go/vt/vtgate/vindexes/lookup_internal.go | 50 +- go/vt/vtgate/vindexes/lookup_test.go | 341 +- .../vindexes/lookup_unicodeloosemd5_hash.go | 77 +- .../lookup_unicodeloosemd5_hash_test.go | 99 +- go/vt/vtgate/vindexes/lookup_unique_test.go | 16 +- go/vt/vtgate/vindexes/main_test.go | 94 + go/vt/vtgate/vindexes/multicol.go | 21 +- go/vt/vtgate/vindexes/multicol_test.go | 176 +- go/vt/vtgate/vindexes/null.go | 25 +- go/vt/vtgate/vindexes/null_test.go | 57 +- go/vt/vtgate/vindexes/numeric.go | 30 +- go/vt/vtgate/vindexes/numeric_static_map.go | 60 +- .../vindexes/numeric_static_map_test.go | 186 +- go/vt/vtgate/vindexes/numeric_test.go | 58 +- go/vt/vtgate/vindexes/region_experimental.go | 46 +- .../vindexes/region_experimental_test.go | 94 +- go/vt/vtgate/vindexes/region_json.go | 43 +- go/vt/vtgate/vindexes/reverse_bits.go | 23 +- go/vt/vtgate/vindexes/reverse_bits_test.go | 61 +- go/vt/vtgate/vindexes/unicodeloosemd5.go | 24 +- go/vt/vtgate/vindexes/unicodeloosemd5_test.go | 59 +- go/vt/vtgate/vindexes/unicodeloosexxhash.go | 24 +- .../vindexes/unicodeloosexxhash_test.go | 63 +- go/vt/vtgate/vindexes/vindex.go | 39 +- go/vt/vtgate/vindexes/vindex_test.go | 94 + go/vt/vtgate/vindexes/vschema.go | 93 +- go/vt/vtgate/vindexes/vschema_test.go | 493 +- go/vt/vtgate/vindexes/xxhash.go | 24 +- go/vt/vtgate/vindexes/xxhash_test.go | 56 +- go/vt/vtgate/vschema_manager.go | 72 +- go/vt/vtgate/vschema_manager_test.go | 201 +- go/vt/vtgate/vschema_stats.go | 18 +- go/vt/vtgate/vstream_manager.go | 13 +- go/vt/vtgate/vstream_manager_test.go | 112 +- go/vt/vtgate/vtgate.go | 158 +- go/vt/vtgate/vtgate_test.go | 434 +- go/vt/vtgate/vtgateconn/vtgateconn.go | 10 +- go/vt/vtgate/vtgateconn/vtgateconn_test.go | 3 +- go/vt/vtgate/vtgateservice/interface.go | 15 +- go/vt/vtgr/config/vtgr_config.go | 604 - go/vt/vtgr/config/vtgr_config.json | 4 - go/vt/vtgr/config/vtgr_config_test.go | 37 - go/vt/vtgr/controller/diagnose.go | 586 - go/vt/vtgr/controller/diagnose_test.go | 900 - go/vt/vtgr/controller/group.go | 443 - go/vt/vtgr/controller/group_test.go | 454 - go/vt/vtgr/controller/mock_refresh.go | 148 - go/vt/vtgr/controller/refresh.go | 360 - go/vt/vtgr/controller/refresh_test.go | 159 - go/vt/vtgr/controller/repair.go | 767 - go/vt/vtgr/controller/repair_test.go | 1355 - go/vt/vtgr/db/db.go | 381 - go/vt/vtgr/db/generate_base.go | 862 - go/vt/vtgr/db/generate_patches.go | 583 - go/vt/vtgr/db/mock_mysql.go | 191 - go/vt/vtgr/db/mysql.go | 590 - go/vt/vtgr/db/tls.go | 152 - go/vt/vtgr/inst/instance_key.go | 125 - go/vt/vtgr/inst/instance_key_test.go | 67 - go/vt/vtgr/log/log.go | 53 - go/vt/vtgr/log/log_test.go | 16 - go/vt/vtgr/plugin_consultopo.go | 23 - go/vt/vtgr/plugin_grpctmclient.go | 23 - go/vt/vtgr/plugin_zk2topo.go | 23 - go/vt/vtgr/ssl/ssl.go | 62 - go/vt/vtgr/ssl/ssl_test.go | 123 - go/vt/vtgr/vtgr.go | 233 - go/vt/vtgr/vtgr_test.go | 55 - go/vt/vthash/hash.go | 10 + go/vt/vthash/highway/LICENSE | 202 + go/vt/vthash/highway/highwayhash.go | 184 + go/vt/vthash/highway/highwayhashAVX2_amd64.s | 258 + go/vt/vthash/highway/highwayhash_amd64.go | 80 + go/vt/vthash/highway/highwayhash_amd64.s | 304 + go/vt/vthash/highway/highwayhash_arm64.go | 64 + go/vt/vthash/highway/highwayhash_arm64.s | 322 + go/vt/vthash/highway/highwayhash_generic.go | 350 + go/vt/vthash/highway/highwayhash_ppc64le.go | 49 + go/vt/vthash/highway/highwayhash_ppc64le.s | 180 + go/vt/vthash/highway/highwayhash_ref.go | 39 + go/vt/vthash/highway/highwayhash_test.go | 228 + go/vt/vthash/metro/metro.go | 5 + go/vt/vtorc/collection/collection.go | 7 - go/vt/vtorc/config/config.go | 32 +- go/vt/vtorc/db/db.go | 21 +- go/vt/vtorc/db/generate_base.go | 475 +- go/vt/vtorc/discovery/aggregated.go | 6 +- go/vt/vtorc/discovery/metric.go | 16 +- go/vt/vtorc/discovery/metric_json.go | 74 - go/vt/vtorc/discovery/queue.go | 31 +- go/vt/vtorc/inst/analysis.go | 86 +- go/vt/vtorc/inst/analysis_dao.go | 265 +- go/vt/vtorc/inst/analysis_dao_test.go | 485 +- go/vt/vtorc/inst/audit.go | 26 - go/vt/vtorc/inst/audit_dao.go | 67 +- go/vt/vtorc/inst/audit_dao_test.go | 81 +- .../vtorc/inst/candidate_database_instance.go | 56 - .../inst/candidate_database_instance_dao.go | 69 - go/vt/vtorc/inst/downtime.go | 52 - go/vt/vtorc/inst/downtime_dao.go | 193 - go/vt/vtorc/inst/durability.go | 83 - go/vt/vtorc/inst/instance.go | 138 +- go/vt/vtorc/inst/instance_binlog.go | 62 - go/vt/vtorc/inst/instance_dao.go | 561 +- go/vt/vtorc/inst/instance_dao_test.go | 675 +- go/vt/vtorc/inst/instance_key.go | 189 - go/vt/vtorc/inst/instance_key_map.go | 141 - go/vt/vtorc/inst/instance_key_map_test.go | 125 - go/vt/vtorc/inst/instance_key_test.go | 209 - go/vt/vtorc/inst/instance_test.go | 42 +- go/vt/vtorc/inst/instance_utils.go | 36 - go/vt/vtorc/inst/keyspace_dao.go | 10 + go/vt/vtorc/inst/keyspace_dao_test.go | 48 +- go/vt/vtorc/inst/maintenance.go | 45 - go/vt/vtorc/inst/maintenance_dao.go | 86 - go/vt/vtorc/inst/oracle_gtid_set.go | 13 +- go/vt/vtorc/inst/postponed_functions.go | 69 - go/vt/vtorc/inst/process.go | 32 - go/vt/vtorc/inst/replication_thread_state.go | 22 +- go/vt/vtorc/inst/resolve.go | 265 - go/vt/vtorc/inst/resolve_dao.go | 219 - go/vt/vtorc/inst/shard_dao.go | 97 + go/vt/vtorc/inst/shard_dao_test.go | 107 + go/vt/vtorc/inst/tablet_dao.go | 105 +- go/vt/vtorc/inst/tablet_dao_test.go | 93 + go/vt/vtorc/inst/tag.go | 121 - go/vt/vtorc/inst/tag_dao.go | 206 - go/vt/vtorc/inst/tag_test.go | 141 - ...scovery.go => keyspace_shard_discovery.go} | 69 +- ...st.go => keyspace_shard_discovery_test.go} | 147 +- go/vt/vtorc/logic/tablet_discovery.go | 119 +- go/vt/vtorc/logic/tablet_discovery_test.go | 89 +- go/vt/vtorc/logic/topology_recovery.go | 548 +- go/vt/vtorc/logic/topology_recovery_dao.go | 130 +- .../vtorc/logic/topology_recovery_dao_test.go | 13 +- go/vt/vtorc/logic/topology_recovery_status.go | 6 +- go/vt/vtorc/logic/topology_recovery_test.go | 129 +- .../vtorc/logic/{orchestrator.go => vtorc.go} | 84 +- .../{orchestrator_test.go => vtorc_test.go} | 0 go/vt/vtorc/server/api.go | 24 +- go/vt/vtorc/server/api_test.go | 3 + go/vt/vtorc/server/discovery.go | 10 - go/vt/vtorc/test/recovery_analysis.go | 8 +- go/vt/vtorc/util/math.go | 58 - .../topocustomrule/topocustomrule_test.go | 6 +- go/vt/vttablet/endtoend/config_test.go | 13 - .../endtoend/connkilling/main_test.go | 5 +- go/vt/vttablet/endtoend/framework/client.go | 10 +- go/vt/vttablet/endtoend/framework/server.go | 11 +- go/vt/vttablet/endtoend/framework/testcase.go | 3 +- go/vt/vttablet/endtoend/healthstream_test.go | 20 +- go/vt/vttablet/endtoend/main_test.go | 7 +- go/vt/vttablet/endtoend/misc_test.go | 50 +- .../streamtimeout/healthstream_test.go | 61 +- .../endtoend/streamtimeout/main_test.go | 6 +- go/vt/vttablet/endtoend/transaction_test.go | 12 +- go/vt/vttablet/endtoend/vstreamer_test.go | 6 +- go/vt/vttablet/faketmclient/fake_client.go | 24 +- go/vt/vttablet/grpcqueryservice/server.go | 10 + go/vt/vttablet/grpctabletconn/conn.go | 40 + go/vt/vttablet/grpctmclient/client.go | 74 +- go/vt/vttablet/grpctmserver/server.go | 52 +- go/vt/vttablet/onlineddl/analysis.go | 1 + go/vt/vttablet/onlineddl/executor.go | 162 +- go/vt/vttablet/onlineddl/schema.go | 16 +- go/vt/vttablet/onlineddl/vrepl.go | 56 +- .../fakes/stream_health_query_service.go | 12 +- go/vt/vttablet/queryservice/queryservice.go | 4 + go/vt/vttablet/queryservice/wrapped.go | 7 + go/vt/vttablet/sandboxconn/sandboxconn.go | 21 +- .../tabletconntest/fakequeryservice.go | 15 +- .../vttablet/tabletconntest/tabletconntest.go | 2 +- .../vttablet/tabletmanager/framework_test.go | 492 + go/vt/vttablet/tabletmanager/restore.go | 86 +- go/vt/vttablet/tabletmanager/rpc_agent.go | 10 +- go/vt/vttablet/tabletmanager/rpc_backup.go | 1 + go/vt/vttablet/tabletmanager/rpc_query.go | 10 +- .../vttablet/tabletmanager/rpc_replication.go | 124 +- go/vt/vttablet/tabletmanager/rpc_schema.go | 9 +- go/vt/vttablet/tabletmanager/rpc_throttler.go | 53 + .../tabletmanager/rpc_vreplication.go | 323 +- .../tabletmanager/rpc_vreplication_test.go | 968 +- go/vt/vttablet/tabletmanager/shard_sync.go | 6 +- .../vttablet/tabletmanager/shard_sync_test.go | 5 +- go/vt/vttablet/tabletmanager/tm_init.go | 29 +- go/vt/vttablet/tabletmanager/tm_init_test.go | 58 +- go/vt/vttablet/tabletmanager/tm_state.go | 17 +- go/vt/vttablet/tabletmanager/tm_state_test.go | 57 +- go/vt/vttablet/tabletmanager/vdiff/action.go | 193 +- .../tabletmanager/vdiff/action_test.go | 169 +- .../tabletmanager/vdiff/controller.go | 56 +- go/vt/vttablet/tabletmanager/vdiff/engine.go | 19 +- .../tabletmanager/vdiff/engine_test.go | 20 +- .../tabletmanager/vdiff/framework_test.go | 10 +- .../tabletmanager/vdiff/primitive_executor.go | 2 +- go/vt/vttablet/tabletmanager/vdiff/schema.go | 59 +- .../tabletmanager/vdiff/table_differ.go | 233 +- .../tabletmanager/vdiff/table_plan.go | 11 +- go/vt/vttablet/tabletmanager/vdiff/utils.go | 4 +- .../tabletmanager/vdiff/workflow_differ.go | 64 +- .../vdiff/workflow_differ_test.go | 16 +- .../tabletmanager/vreplication/controller.go | 86 +- .../vreplication/controller_plan.go | 18 +- .../vreplication/controller_test.go | 48 +- .../tabletmanager/vreplication/engine.go | 27 +- .../tabletmanager/vreplication/engine_test.go | 10 +- .../vreplication/external_connector.go | 12 + .../vreplication/framework_test.go | 56 +- .../tabletmanager/vreplication/fuzz.go | 4 +- .../vreplication/insert_generator.go | 4 +- .../vreplication/insert_generator_test.go | 3 +- .../vreplication/replicator_plan.go | 38 +- .../vreplication/replicator_plan_test.go | 49 + .../tabletmanager/vreplication/stats.go | 11 +- .../tabletmanager/vreplication/stats_test.go | 25 +- .../vreplication/table_plan_builder.go | 25 +- .../tabletmanager/vreplication/utils.go | 149 +- .../tabletmanager/vreplication/vcopier.go | 53 +- .../vreplication/vcopier_atomic.go | 310 + .../vreplication/vcopier_test.go | 63 +- .../tabletmanager/vreplication/vdbclient.go | 7 +- .../tabletmanager/vreplication/vplayer.go | 70 +- .../vreplication/vplayer_flaky_test.go | 82 +- .../tabletmanager/vreplication/vreplicator.go | 115 +- .../vreplication/vreplicator_test.go | 54 + go/vt/vttablet/tabletserver/bench_test.go | 15 +- .../vttablet/tabletserver/connpool/dbconn.go | 10 +- .../tabletserver/connpool/dbconn_test.go | 7 +- go/vt/vttablet/tabletserver/connpool/pool.go | 52 +- go/vt/vttablet/tabletserver/controller.go | 11 +- go/vt/vttablet/tabletserver/debugenv.go | 2 - .../tabletserver/exclude_race_test.go | 62 + go/vt/vttablet/tabletserver/gc/tablegc.go | 98 +- .../vttablet/tabletserver/gc/tablegc_test.go | 87 + .../vttablet/tabletserver/health_streamer.go | 51 +- .../tabletserver/health_streamer_test.go | 21 +- .../tabletserver/messager/message_manager.go | 26 +- .../messager/message_manager_test.go | 3 +- .../tabletserver/planbuilder/permission.go | 5 +- .../vttablet/tabletserver/planbuilder/plan.go | 9 +- .../planbuilder/testdata/exec_cases.txt | 8 +- .../planbuilder/testdata/stream_cases.txt | 14 + go/vt/vttablet/tabletserver/query_engine.go | 284 +- .../tabletserver/query_engine_test.go | 99 +- go/vt/vttablet/tabletserver/query_executor.go | 51 +- .../tabletserver/query_executor_test.go | 13 +- go/vt/vttablet/tabletserver/queryz.go | 3 +- go/vt/vttablet/tabletserver/queryz_test.go | 14 +- .../tabletserver/repltracker/reader.go | 11 +- .../tabletserver/repltracker/repltracker.go | 2 +- .../tabletserver/repltracker/writer.go | 12 +- go/vt/vttablet/tabletserver/rules/rules.go | 6 +- go/vt/vttablet/tabletserver/schema/db.go | 8 +- go/vt/vttablet/tabletserver/schema/db_test.go | 15 +- go/vt/vttablet/tabletserver/schema/engine.go | 80 +- .../tabletserver/schema/engine_test.go | 39 +- .../vttablet/tabletserver/schema/historian.go | 23 +- .../tabletserver/schema/load_table_test.go | 14 +- .../vttablet/tabletserver/schema/main_test.go | 2 +- go/vt/vttablet/tabletserver/schema/schema.go | 19 + go/vt/vttablet/tabletserver/schema/tracker.go | 39 +- go/vt/vttablet/tabletserver/state_manager.go | 21 +- .../tabletserver/state_manager_test.go | 9 +- .../tabletserver/stateful_connection.go | 10 +- .../stateful_connection_pool_test.go | 18 +- .../vttablet/tabletserver/tabletenv/config.go | 119 +- .../tabletserver/tabletenv/config_test.go | 156 +- .../vttablet/tabletserver/tabletenv/stats.go | 8 + go/vt/vttablet/tabletserver/tabletserver.go | 147 +- .../tabletserver/tabletserver_test.go | 330 +- .../throttle/base/app_throttle.go | 4 +- .../vttablet/tabletserver/throttle/client.go | 7 +- .../tabletserver/throttle/mysql/probe.go | 3 + .../tabletserver/throttle/throttler.go | 377 +- .../tabletserver/throttle/throttler_test.go | 221 + .../tabletserver/throttle/throttlerapp/app.go | 16 +- .../throttle/throttlerapp/app_test.go | 43 + go/vt/vttablet/tabletserver/twopc.go | 25 +- go/vt/vttablet/tabletserver/twopc_test.go | 13 +- go/vt/vttablet/tabletserver/tx_engine_test.go | 6 + .../vttablet/tabletserver/tx_executor_test.go | 118 +- go/vt/vttablet/tabletserver/tx_pool_test.go | 62 +- .../tabletserver/txthrottler/tx_throttler.go | 345 +- .../txthrottler/tx_throttler_test.go | 138 +- go/vt/vttablet/tabletserver/vstreamer/copy.go | 30 +- .../vttablet/tabletserver/vstreamer/engine.go | 47 +- .../tabletserver/vstreamer/engine_test.go | 2 + .../tabletserver/vstreamer/main_flaky_test.go | 15 +- .../vstreamer/packet_size_test.go | 18 +- .../tabletserver/vstreamer/planbuilder.go | 50 +- .../vstreamer/planbuilder_test.go | 25 +- .../tabletserver/vstreamer/rowstreamer.go | 97 +- .../vstreamer/rowstreamer_test.go | 36 +- .../tabletserver/vstreamer/snapshot_conn.go | 53 +- .../tabletserver/vstreamer/tablestreamer.go | 191 + .../vstreamer/tablestreamer_test.go | 78 + .../tabletserver/vstreamer/testenv/testenv.go | 33 +- .../tabletserver/vstreamer/uvstreamer.go | 22 +- .../vstreamer/uvstreamer_flaky_test.go | 3 + .../tabletserver/vstreamer/vstreamer.go | 105 +- .../vstreamer/vstreamer_flaky_test.go | 93 +- go/vt/vttablet/tabletservermock/controller.go | 15 +- go/vt/vttablet/tmclient/rpc_client_api.go | 17 +- go/vt/vttablet/tmrpctest/test_tm_rpc.go | 49 +- go/vt/vttest/environment.go | 6 +- go/vt/vttest/local_cluster.go | 58 +- go/vt/vttest/mysqlctl.go | 2 +- go/vt/vttest/toxiproxyctl.go | 15 +- go/vt/vttest/vtprocess.go | 12 +- go/vt/vttls/crl.go | 2 +- go/vt/wrangler/external_cluster_test.go | 5 +- go/vt/wrangler/fake_dbclient_test.go | 4 - go/vt/wrangler/fake_tablet_test.go | 39 +- go/vt/wrangler/keyspace.go | 142 +- go/vt/wrangler/materializer.go | 442 +- go/vt/wrangler/materializer_env_test.go | 66 +- go/vt/wrangler/materializer_test.go | 1082 +- go/vt/wrangler/reparent.go | 3 +- go/vt/wrangler/resharder.go | 8 +- go/vt/wrangler/resharder_env_test.go | 4 +- go/vt/wrangler/resharder_test.go | 69 +- go/vt/wrangler/schema.go | 5 - go/vt/wrangler/stream_migrater_test.go | 34 +- go/vt/wrangler/switcher.go | 12 + go/vt/wrangler/switcher_dry_run.go | 41 +- go/vt/wrangler/switcher_interface.go | 3 + go/vt/wrangler/tablet.go | 4 +- go/vt/wrangler/tablet_test.go | 25 +- .../testlib/apply_schema_flaky_test.go | 125 - go/vt/wrangler/testlib/backup_test.go | 130 +- .../testlib/copy_schema_shard_test.go | 4 +- .../testlib/emergency_reparent_shard_test.go | 76 +- .../testlib/external_reparent_test.go | 30 +- go/vt/wrangler/testlib/fake_tablet.go | 13 +- go/vt/wrangler/testlib/find_tablet_test.go | 9 +- go/vt/wrangler/testlib/permissions_test.go | 5 +- .../testlib/planned_reparent_shard_test.go | 129 +- go/vt/wrangler/testlib/reparent_utils_test.go | 34 +- go/vt/wrangler/testlib/shard_test.go | 8 +- go/vt/wrangler/testlib/version_test.go | 9 +- go/vt/wrangler/testlib/vtctl_topo_test.go | 5 +- go/vt/wrangler/traffic_switcher.go | 642 +- go/vt/wrangler/traffic_switcher_env_test.go | 392 +- go/vt/wrangler/traffic_switcher_test.go | 88 +- go/vt/wrangler/vdiff.go | 65 +- go/vt/wrangler/vdiff_env_test.go | 4 +- go/vt/wrangler/vdiff_test.go | 229 +- go/vt/wrangler/version.go | 39 - go/vt/wrangler/vexec.go | 85 +- go/vt/wrangler/vexec_test.go | 54 +- go/vt/wrangler/workflow.go | 56 +- go/vt/wrangler/workflow_test.go | 92 +- go/vt/wrangler/wrangler_env_test.go | 4 +- go/vt/zkctl/zkconf.go | 6 + go/vt/zkctl/zkctl_test.go | 12 + java/client/pom.xml | 2 +- java/example/pom.xml | 2 +- java/grpc-client/pom.xml | 2 +- java/jdbc/pom.xml | 10 +- java/pom.xml | 21 +- proto/binlogdata.proto | 31 + proto/mysqlctl.proto | 24 + proto/query.proto | 8 +- proto/queryservice.proto | 3 + proto/tabletmanagerdata.proto | 121 +- proto/tabletmanagerservice.proto | 9 +- proto/topodata.proto | 16 + proto/vschema.proto | 9 + proto/vtctldata.proto | 560 +- proto/vtctlservice.proto | 54 + proto/vtgate.proto | 3 + test.go | 19 +- test/ci_workflow_gen.go | 26 +- test/client/client.go | 1 - test/config.json | 169 +- test/local_example.sh | 3 +- test/templates/cluster_endtoend_test.tpl | 27 +- .../cluster_endtoend_test_docker.tpl | 8 +- .../cluster_endtoend_test_mysql57.tpl | 25 +- .../cluster_endtoend_test_self_hosted.tpl | 7 +- test/templates/dockerfile.tpl | 2 +- test/templates/unit_test.tpl | 29 +- test/templates/unit_test_self_hosted.tpl | 8 +- tools/back_to_dev_mode.sh | 2 +- tools/code_freeze.sh | 2 +- tools/create_release.sh | 2 +- tools/dependency_check.sh | 7 +- tools/e2e_test_race.sh | 4 +- tools/make-release-packages.sh | 2 +- tools/rowlog/rowlog.go | 4 +- tools/tools.go | 4 - vitess-mixin/e2e/package-lock.json | 143 +- vitess-mixin/e2e/vttablet-up.sh | 1 - web/vtadmin/README.md | 5 +- web/vtadmin/build.sh | 55 + web/vtadmin/package-lock.json | 414 +- web/vtadmin/package.json | 6 +- web/vtadmin/src/proto/vtadmin.d.ts | 37209 +++-- web/vtadmin/src/proto/vtadmin.js | 107257 +++++++++------ 2078 files changed, 360348 insertions(+), 193435 deletions(-) create mode 100644 .github/workflows/auto_approve_pr.yml create mode 100644 .github/workflows/cluster_endtoend_backup_pitr_xtrabackup.yml create mode 100644 .github/workflows/cluster_endtoend_backup_pitr_xtrabackup_mysql57.yml rename .github/workflows/{cluster_endtoend_tabletmanager_throttler_custom_config.yml => cluster_endtoend_vreplication_partial_movetables_basic.yml} (70%) create mode 100644 .github/workflows/cluster_endtoend_vreplication_partial_movetables_sequences.yml rename .github/workflows/{cluster_endtoend_tabletmanager_throttler.yml => cluster_endtoend_vtgate_foreignkey_stress.yml} (78%) create mode 100644 changelog/15.0/15.0.4/changelog.md create mode 100644 changelog/15.0/15.0.4/release_notes.md rename go/test/endtoend/onlineddl/vrepl_suite/testdata/gbk-charset/extra_args => changelog/15.0/15.0.4/summary.md (100%) create mode 100644 changelog/16.0/16.0.3/changelog.md create mode 100644 changelog/16.0/16.0.3/release_notes.md create mode 100644 changelog/16.0/16.0.3/summary.md create mode 100644 changelog/16.0/16.0.4/changelog.md create mode 100644 changelog/16.0/16.0.4/release_notes.md create mode 100644 changelog/16.0/16.0.4/summary.md create mode 100644 changelog/18.0/18.0.0/changelog.md create mode 100644 changelog/18.0/18.0.0/release_notes.md create mode 100644 changelog/18.0/18.0.0/summary.md create mode 100644 changelog/18.0/README.md create mode 100644 doc/VIT-03-report-security-audit.pdf create mode 100644 doc/design-docs/VTGateBuffering.md rename doc/internal/{Overview.md => README.md} (61%) rename doc/internal/{ => release}/.images/post-release-01.png (100%) rename doc/internal/{ => release}/.images/release-01.png (100%) rename doc/internal/{ => release}/.images/release-02.png (100%) rename doc/internal/{ => release}/.images/release-03.png (100%) rename doc/internal/{ => release}/.images/release-04.png (100%) create mode 100644 doc/internal/release/README.md create mode 100644 doc/internal/release/docker-images.md create mode 100644 doc/internal/release/eol-process.md rename doc/internal/{ReleaseInstructions.md => release/how-to-release.md} (66%) create mode 100644 doc/internal/release/java-packages.md create mode 100644 doc/internal/release/release-branches.md create mode 100644 doc/internal/release/release-tags.md create mode 100644 doc/internal/release/versioning.md delete mode 100644 docker/bootstrap/Dockerfile.mysql57-arm64v8 delete mode 100644 docker/k8s/orchestrator/Dockerfile rename docker/k8s/{pmm-client => vtorc}/Dockerfile (56%) delete mode 100755 docker/mini/orchestrator-up.sh delete mode 100644 docker/mini/orchestrator-vitess-mini.conf.json delete mode 100644 docker/orchestrator/Dockerfile delete mode 100755 docker/orchestrator/build.sh delete mode 100644 docker/orchestrator/orchestrator.conf.json delete mode 100755 examples/common/scripts/k3s-down.sh delete mode 100755 examples/common/scripts/k3s-up.sh delete mode 100644 go/cache/cache.go delete mode 100644 go/cache/cache_test.go delete mode 100644 go/cache/null.go delete mode 100644 go/cache/ristretto.go delete mode 100644 go/cache/ristretto/bloom/bbloom.go delete mode 100644 go/cache/ristretto/bloom/bbloom_test.go delete mode 100644 go/cache/ristretto/cache.go delete mode 100644 go/cache/ristretto/cache_test.go delete mode 100644 go/cache/ristretto/policy.go delete mode 100644 go/cache/ristretto/policy_test.go delete mode 100644 go/cache/ristretto/ring.go delete mode 100644 go/cache/ristretto/ring_test.go delete mode 100644 go/cache/ristretto/sketch.go delete mode 100644 go/cache/ristretto/sketch_test.go delete mode 100644 go/cache/ristretto/store.go delete mode 100644 go/cache/ristretto/store_test.go create mode 100644 go/cache/theine/LICENSE create mode 100644 go/cache/theine/bf/bf.go create mode 100644 go/cache/theine/bf/bf_test.go create mode 100644 go/cache/theine/entry.go create mode 100644 go/cache/theine/list.go create mode 100644 go/cache/theine/list_test.go create mode 100644 go/cache/theine/mpsc.go rename go/cache/{perf_test.go => theine/mpsc_test.go} (53%) create mode 100644 go/cache/theine/singleflight.go create mode 100644 go/cache/theine/singleflight_test.go create mode 100644 go/cache/theine/sketch.go create mode 100644 go/cache/theine/sketch_test.go create mode 100644 go/cache/theine/slru.go create mode 100644 go/cache/theine/store.go create mode 100644 go/cache/theine/store_test.go create mode 100644 go/cache/theine/tlfu.go create mode 100644 go/cache/theine/tlfu_test.go create mode 100644 go/cmd/mysqlctl/command/init.go create mode 100644 go/cmd/mysqlctl/command/init_config.go rename go/cmd/mysqlctl/{ => command}/plugin_prometheusbackend.go (98%) create mode 100644 go/cmd/mysqlctl/command/position.go create mode 100644 go/cmd/mysqlctl/command/reinit_config.go create mode 100644 go/cmd/mysqlctl/command/root.go create mode 100644 go/cmd/mysqlctl/command/shutdown.go create mode 100644 go/cmd/mysqlctl/command/start.go create mode 100644 go/cmd/mysqlctl/command/teardown.go create mode 100644 go/cmd/mysqlctl/docgen/main.go create mode 100644 go/cmd/mysqlctld/cli/mysqlctld.go rename go/cmd/mysqlctld/{ => cli}/plugin_grpcmysqlctlserver.go (98%) rename go/cmd/mysqlctld/{ => cli}/plugin_prometheusbackend.go (98%) create mode 100644 go/cmd/mysqlctld/docgen/main.go delete mode 100644 go/cmd/query_analyzer/query_analyzer.go rename go/cmd/{vtorc => topo2topo/cli}/plugin_consultopo.go (98%) rename go/cmd/{vtorc => topo2topo/cli}/plugin_etcd2topo.go (98%) rename go/cmd/topo2topo/{ => cli}/plugin_zk2topo.go (98%) create mode 100644 go/cmd/topo2topo/cli/topo2topo.go create mode 100644 go/cmd/topo2topo/docgen/main.go delete mode 100644 go/cmd/topo2topo/plugin_kubernetestopo.go create mode 100644 go/cmd/vtaclcheck/cli/vtactlcheck.go create mode 100644 go/cmd/vtaclcheck/docgen/main.go rename go/cmd/{vtctld => vtbackup/cli}/plugin_azblobbackupstorage.go (97%) rename go/cmd/vtbackup/{ => cli}/plugin_cephbackupstorage.go (97%) rename go/cmd/vtbackup/{ => cli}/plugin_consultopo.go (97%) rename go/cmd/vtbackup/{ => cli}/plugin_etcd2topo.go (97%) rename go/cmd/vtbackup/{ => cli}/plugin_filebackupstorage.go (97%) rename go/cmd/vtbackup/{ => cli}/plugin_gcsbackupstorage.go (97%) rename go/{vt/vtgr/controller/controller.go => cmd/vtbackup/cli/plugin_opentsdb.go} (74%) rename go/cmd/vtbackup/{ => cli}/plugin_prometheusbackend.go (98%) rename go/cmd/vtbackup/{ => cli}/plugin_s3backupstorage.go (97%) rename go/cmd/vtbackup/{ => cli}/plugin_zk2topo.go (97%) create mode 100644 go/cmd/vtbackup/cli/vtbackup.go create mode 100644 go/cmd/vtbackup/docgen/main.go create mode 100644 go/cmd/vtbench/cli/vtbench.go create mode 100644 go/cmd/vtbench/docgen/main.go rename go/cmd/vtclient/{ => cli}/plugin_opentracing.go (98%) create mode 100644 go/cmd/vtclient/cli/vtclient.go rename go/cmd/vtclient/{ => cli}/vtclient_test.go (90%) create mode 100644 go/cmd/vtclient/docgen/main.go create mode 100644 go/cmd/vtcombo/cli/main.go rename go/cmd/vtcombo/{ => cli}/plugin_dbddl.go (93%) rename go/cmd/vtcombo/{ => cli}/plugin_grpcvtctldserver.go (98%) rename go/cmd/{vtctld => vtcombo/cli}/plugin_grpcvtctlserver.go (98%) rename go/cmd/vtcombo/{ => cli}/plugin_grpcvtgateservice.go (98%) rename go/cmd/vtcombo/{ => cli}/plugin_opentracing.go (98%) rename go/cmd/vtcombo/{ => cli}/status.go (96%) create mode 100644 go/cmd/vtcombo/cli/vschema_watcher.go create mode 100644 go/cmd/vtcombo/docgen/main.go create mode 100644 go/cmd/vtctld/cli/cli.go rename go/cmd/{vtbackup => vtctld/cli}/plugin_azblobbackupstorage.go (97%) rename go/cmd/vtctld/{ => cli}/plugin_cephbackupstorage.go (97%) rename go/cmd/vtctld/{ => cli}/plugin_consultopo.go (98%) rename go/cmd/vtctld/{ => cli}/plugin_etcd2topo.go (98%) rename go/cmd/{vttablet => vtctld/cli}/plugin_filebackupstorage.go (97%) rename go/cmd/vtctld/{ => cli}/plugin_gcsbackupstorage.go (97%) rename go/cmd/vtctld/{ => cli}/plugin_grpctabletconn.go (98%) rename go/cmd/{vtorc => vtctld/cli}/plugin_grpctmclient.go (98%) rename go/cmd/vtctld/{ => cli}/plugin_grpcvtctldserver.go (98%) rename go/cmd/{vtcombo => vtctld/cli}/plugin_grpcvtctlserver.go (98%) rename go/cmd/vtctld/{ => cli}/plugin_grpcvtgateconn.go (98%) rename go/cmd/vtctld/{ => cli}/plugin_opentracing.go (98%) rename go/cmd/vtctld/{ => cli}/plugin_opentsdb.go (98%) rename go/cmd/vtctld/{ => cli}/plugin_prometheusbackend.go (98%) rename go/cmd/vtctld/{ => cli}/plugin_s3backupstorage.go (97%) rename go/cmd/vtctld/{ => cli}/plugin_zk2topo.go (98%) rename go/cmd/vtctld/{ => cli}/schema.go (63%) create mode 100644 go/cmd/vtctld/docgen/main.go create mode 100644 go/cmd/vtctldclient/command/onlineddl.go create mode 100644 go/cmd/vtctldclient/command/vreplication/common/cancel.go create mode 100644 go/cmd/vtctldclient/command/vreplication/common/complete.go create mode 100644 go/cmd/vtctldclient/command/vreplication/common/show.go create mode 100644 go/cmd/vtctldclient/command/vreplication/common/status.go create mode 100644 go/cmd/vtctldclient/command/vreplication/common/switchtraffic.go create mode 100644 go/cmd/vtctldclient/command/vreplication/common/update.go create mode 100644 go/cmd/vtctldclient/command/vreplication/common/utils.go create mode 100644 go/cmd/vtctldclient/command/vreplication/common/utils_test.go create mode 100644 go/cmd/vtctldclient/command/vreplication/lookupvindex/lookupvindex.go create mode 100644 go/cmd/vtctldclient/command/vreplication/materialize/create.go create mode 100644 go/cmd/vtctldclient/command/vreplication/materialize/materialize.go create mode 100644 go/cmd/vtctldclient/command/vreplication/migrate/migrate.go create mode 100644 go/cmd/vtctldclient/command/vreplication/mount/mount.go create mode 100644 go/cmd/vtctldclient/command/vreplication/movetables/create.go create mode 100644 go/cmd/vtctldclient/command/vreplication/movetables/movetables.go create mode 100644 go/cmd/vtctldclient/command/vreplication/reshard/create.go create mode 100644 go/cmd/vtctldclient/command/vreplication/reshard/reshard.go create mode 100644 go/cmd/vtctldclient/command/vreplication/vdiff/vdiff.go create mode 100644 go/cmd/vtctldclient/command/vreplication/vdiff/vdiff_env_test.go create mode 100644 go/cmd/vtctldclient/command/vreplication/vdiff/vdiff_test.go create mode 100644 go/cmd/vtctldclient/command/vreplication/workflow/delete.go create mode 100644 go/cmd/vtctldclient/command/vreplication/workflow/get.go create mode 100644 go/cmd/vtctldclient/command/vreplication/workflow/show.go create mode 100644 go/cmd/vtctldclient/command/vreplication/workflow/state.go create mode 100644 go/cmd/vtctldclient/command/vreplication/workflow/update.go create mode 100644 go/cmd/vtctldclient/command/vreplication/workflow/workflow.go delete mode 100644 go/cmd/vtctldclient/command/workflows.go create mode 100644 go/cmd/vtexplain/cli/vtexplain.go create mode 100644 go/cmd/vtexplain/docgen/main.go create mode 100644 go/cmd/vtgate/cli/cli.go rename go/cmd/vtgate/{ => cli}/plugin_auth_clientcert.go (98%) rename go/cmd/vtgate/{ => cli}/plugin_auth_ldap.go (98%) rename go/cmd/vtgate/{ => cli}/plugin_auth_static.go (98%) rename go/cmd/vtgate/{ => cli}/plugin_auth_vault.go (98%) rename go/cmd/{topo2topo => vtgate/cli}/plugin_consultopo.go (98%) rename go/cmd/{vttablet => vtgate/cli}/plugin_etcd2topo.go (98%) rename go/cmd/vtgate/{ => cli}/plugin_grpctabletconn.go (98%) rename go/cmd/vtgate/{ => cli}/plugin_grpcvtgateservice.go (98%) rename go/cmd/vtgate/{ => cli}/plugin_opentracing.go (98%) rename go/cmd/vtgate/{ => cli}/plugin_opentsdb.go (98%) rename go/cmd/vtgate/{ => cli}/plugin_prometheusbackend.go (98%) create mode 100644 go/cmd/vtgate/cli/plugin_statsd.go rename go/cmd/vtgate/{ => cli}/plugin_zk2topo.go (98%) rename go/cmd/vtgate/{ => cli}/status.go (96%) create mode 100644 go/cmd/vtgate/docgen/main.go delete mode 100644 go/cmd/vtgate/plugin_kubernetestopo.go delete mode 100644 go/cmd/vtgate/plugin_statsd.go create mode 100644 go/cmd/vtgateclienttest/cli/main.go rename go/cmd/vtgateclienttest/{ => cli}/plugin_grpcvtgateservice.go (98%) create mode 100644 go/cmd/vtgateclienttest/docgen/main.go delete mode 100644 go/cmd/vtgr/main.go create mode 100644 go/cmd/vtorc/cli/cli.go rename go/cmd/{vtgate => vtorc/cli}/plugin_consultopo.go (98%) rename go/cmd/{vtgate => vtorc/cli}/plugin_etcd2topo.go (98%) rename go/cmd/{vtctld => vtorc/cli}/plugin_grpctmclient.go (98%) rename go/cmd/vtorc/{ => cli}/plugin_prometheusbackend.go (98%) rename go/cmd/vtorc/{ => cli}/plugin_zk2topo.go (98%) create mode 100644 go/cmd/vtorc/docgen/main.go delete mode 100644 go/cmd/vtorc/main_test.go delete mode 100644 go/cmd/vtorc/plugin_kubernetestopo.go create mode 100644 go/cmd/vttablet/cli/cli.go rename go/cmd/vttablet/{ => cli}/plugin_azblobbackupstorage.go (97%) rename go/cmd/vttablet/{ => cli}/plugin_cephbackupstorage.go (97%) rename go/cmd/vttablet/{ => cli}/plugin_consultopo.go (98%) rename go/cmd/{topo2topo => vttablet/cli}/plugin_etcd2topo.go (98%) rename go/cmd/{vtctld => vttablet/cli}/plugin_filebackupstorage.go (97%) rename go/cmd/vttablet/{ => cli}/plugin_filecustomrule.go (98%) rename go/cmd/vttablet/{ => cli}/plugin_filelogger.go (98%) rename go/cmd/vttablet/{ => cli}/plugin_gcsbackupstorage.go (97%) rename go/cmd/vttablet/{ => cli}/plugin_grpcbinlogplayer.go (98%) rename go/cmd/vttablet/{ => cli}/plugin_grpcbinlogstreamer.go (98%) rename go/cmd/vttablet/{ => cli}/plugin_grpcqueryservice.go (98%) rename go/cmd/vttablet/{ => cli}/plugin_grpctabletconn.go (98%) rename go/cmd/vttablet/{ => cli}/plugin_grpcthrottlerserver.go (98%) rename go/cmd/vttablet/{ => cli}/plugin_grpctmclient.go (98%) rename go/cmd/vttablet/{ => cli}/plugin_grpctmserver.go (98%) rename go/cmd/vttablet/{ => cli}/plugin_opentracing.go (98%) rename go/cmd/vttablet/{ => cli}/plugin_opentsdb.go (98%) rename go/cmd/vttablet/{ => cli}/plugin_prometheusbackend.go (98%) rename go/cmd/vttablet/{ => cli}/plugin_s3backupstorage.go (97%) create mode 100644 go/cmd/vttablet/cli/plugin_statsd.go rename go/cmd/vttablet/{ => cli}/plugin_sysloglogger.go (98%) rename go/cmd/vttablet/{ => cli}/plugin_topocustomrule.go (98%) rename go/cmd/vttablet/{ => cli}/plugin_zk2topo.go (98%) rename go/cmd/vttablet/{ => cli}/status.go (97%) create mode 100644 go/cmd/vttablet/docgen/main.go delete mode 100644 go/cmd/vttablet/plugin_kubernetestopo.go delete mode 100644 go/cmd/vttablet/plugin_statsd.go rename go/cmd/vttestserver/{ => cli}/data/schema/app_customer/v001__create_customer_table.sql (100%) rename go/cmd/vttestserver/{ => cli}/data/schema/app_customer/v002__add_customer_vschema.sql (100%) rename go/cmd/vttestserver/{ => cli}/data/schema/app_customer/vschema.json (100%) rename go/cmd/vttestserver/{ => cli}/data/schema/test_keyspace/v001__create_test_table.sql (100%) rename go/cmd/vttestserver/{ => cli}/data/schema/test_keyspace/v002__create_hash_vindex.sql (100%) rename go/cmd/vttestserver/{ => cli}/data/schema/test_keyspace/v003__add_table_vschema.sql (100%) rename go/cmd/vttestserver/{ => cli}/data/schema/test_keyspace/v004__create_test_table1.sql (100%) create mode 100644 go/cmd/vttestserver/cli/main.go rename go/cmd/vttestserver/{vttestserver_test.go => cli/main_test.go} (89%) create mode 100644 go/cmd/vttestserver/docgen/main.go create mode 100644 go/cmd/vttlstest/cli/vttlstest.go create mode 100644 go/cmd/vttlstest/docgen/main.go rename go/cmd/{vtorc/status.go => zk/command/add_auth.go} (56%) create mode 100644 go/cmd/zk/command/cat.go create mode 100644 go/cmd/zk/command/chmod.go create mode 100644 go/cmd/zk/command/cp.go create mode 100644 go/cmd/zk/command/edit.go create mode 100644 go/cmd/zk/command/ls.go create mode 100644 go/cmd/zk/command/rm.go create mode 100644 go/cmd/zk/command/root.go create mode 100644 go/cmd/zk/command/stat.go create mode 100644 go/cmd/zk/command/touch.go create mode 100644 go/cmd/zk/command/unzip.go create mode 100644 go/cmd/zk/command/wait.go create mode 100644 go/cmd/zk/command/watch.go create mode 100644 go/cmd/zk/command/zip.go create mode 100644 go/cmd/zk/docgen/main.go create mode 100644 go/cmd/zk/internal/zkfilepath/zkfilepath.go create mode 100644 go/cmd/zk/internal/zkfs/zkfs.go create mode 100644 go/cmd/zkctl/command/init.go create mode 100644 go/cmd/zkctl/command/root.go create mode 100644 go/cmd/zkctl/command/shutdown.go create mode 100644 go/cmd/zkctl/command/start.go create mode 100644 go/cmd/zkctl/command/teardown.go create mode 100644 go/cmd/zkctl/docgen/main.go create mode 100644 go/cmd/zkctld/cli/zkctld.go create mode 100644 go/cmd/zkctld/docgen/main.go create mode 100644 go/constants/sidecar/name.go create mode 100644 go/constants/sidecar/queries.go create mode 100644 go/flags/endtoend/topo2topo.txt create mode 100644 go/flags/endtoend/vtbench.txt create mode 100644 go/flags/endtoend/vtclient.txt create mode 100644 go/flags/endtoend/vtcombo.txt create mode 100644 go/flags/endtoend/vtgateclienttest.txt delete mode 100644 go/flags/endtoend/vtgr.txt create mode 100644 go/ioutil/timeout_closer.go create mode 100644 go/ioutil/timeout_closer_test.go create mode 100644 go/maps2/maps.go rename go/mysql/collations/{ => colldata}/8bit.go (92%) rename go/mysql/collations/{ => colldata}/cached_size.go (98%) create mode 100644 go/mysql/collations/colldata/collation.go rename go/mysql/collations/{ => colldata}/fuzz.go (98%) rename go/mysql/collations/{ => colldata}/fuzz_test.go (96%) create mode 100644 go/mysql/collations/colldata/golden_test.go rename go/mysql/collations/{ => colldata}/multibyte.go (95%) rename go/mysql/collations/{ => colldata}/mysqldata.go (99%) rename go/mysql/collations/{ => colldata}/mysqlucadata.bin (100%) rename go/mysql/collations/{ => colldata}/mysqlucadata.go (99%) rename go/mysql/collations/{ => colldata}/uca.go (96%) rename go/mysql/collations/{ => colldata}/uca_contraction_test.go (99%) rename go/mysql/collations/{ => colldata}/uca_tables_test.go (95%) rename go/mysql/collations/{ => colldata}/uca_test.go (99%) rename go/mysql/collations/{ => colldata}/unicase.go (99%) rename go/mysql/collations/{ => colldata}/unicode.go (96%) rename go/mysql/collations/{ => colldata}/wildcard.go (99%) rename go/mysql/collations/{ => colldata}/wildcard_test.go (99%) create mode 100644 go/mysql/collations/supported.go rename go/mysql/collations/testdata/versions/{collations_MySQL80.csv => collations_MySQL8.csv} (100%) create mode 100644 go/mysql/collations/tools/colldump/Dockerfile create mode 100644 go/mysql/collations/tools/colldump/colldump.cc create mode 100755 go/mysql/collations/tools/colldump/colldump.sh create mode 100644 go/mysql/conn_fake.go rename go/mysql/datetime/{types.go => datetime.go} (70%) create mode 100644 go/mysql/datetime/interval.go create mode 100644 go/mysql/datetime/mydate.go create mode 100644 go/mysql/datetime/mydate_test.go create mode 100644 go/mysql/datetime/testdata/daynr_to_date.json create mode 100644 go/mysql/datetime/testdata/year_to_daynr.json create mode 100644 go/mysql/decimal/weights.go delete mode 100644 go/mysql/flavor_filepos_test.go create mode 100644 go/mysql/icuregex/compiler.go create mode 100644 go/mysql/icuregex/compiler_table.go create mode 100644 go/mysql/icuregex/debug.go create mode 100644 go/mysql/icuregex/error.go rename go/{vt/vtgr/controller => mysql/icuregex/errors}/error.go (55%) create mode 100644 go/mysql/icuregex/icu_test.go create mode 100644 go/mysql/icuregex/internal/bytestrie/bytes_trie.go create mode 100644 go/mysql/icuregex/internal/icudata/README.md create mode 100644 go/mysql/icuregex/internal/icudata/char.brk create mode 100644 go/mysql/icuregex/internal/icudata/embed.go create mode 100644 go/mysql/icuregex/internal/icudata/nfc.nrm create mode 100644 go/mysql/icuregex/internal/icudata/nfkc.nrm create mode 100644 go/mysql/icuregex/internal/icudata/nfkc_cf.nrm create mode 100644 go/mysql/icuregex/internal/icudata/pnames.icu create mode 100644 go/mysql/icuregex/internal/icudata/ubidi.icu create mode 100644 go/mysql/icuregex/internal/icudata/ucase.icu create mode 100644 go/mysql/icuregex/internal/icudata/uemoji.icu create mode 100644 go/mysql/icuregex/internal/icudata/ulayout.icu create mode 100644 go/mysql/icuregex/internal/icudata/unames.icu create mode 100644 go/mysql/icuregex/internal/icudata/uprops.icu create mode 100644 go/mysql/icuregex/internal/icudata/word.brk create mode 100644 go/mysql/icuregex/internal/normalizer/constants.go create mode 100644 go/mysql/icuregex/internal/normalizer/normalizer.go create mode 100644 go/mysql/icuregex/internal/pattern/unescape.go create mode 100644 go/mysql/icuregex/internal/pattern/unescape_test.go create mode 100644 go/mysql/icuregex/internal/pattern/utils.go create mode 100644 go/mysql/icuregex/internal/ubidi/loader.go create mode 100644 go/mysql/icuregex/internal/ubidi/ubidi.go create mode 100644 go/mysql/icuregex/internal/ucase/fold.go create mode 100644 go/mysql/icuregex/internal/ucase/loader.go create mode 100644 go/mysql/icuregex/internal/ucase/ucase.go create mode 100644 go/mysql/icuregex/internal/uchar/constants.go create mode 100644 go/mysql/icuregex/internal/uchar/loader.go create mode 100644 go/mysql/icuregex/internal/uchar/uchar.go create mode 100644 go/mysql/icuregex/internal/udata/udata.go create mode 100644 go/mysql/icuregex/internal/uemoji/loader.go create mode 100644 go/mysql/icuregex/internal/uemoji/uemoji.go create mode 100644 go/mysql/icuregex/internal/ulayout/ulayout.go create mode 100644 go/mysql/icuregex/internal/unames/loader.go create mode 100644 go/mysql/icuregex/internal/unames/unames.go create mode 100644 go/mysql/icuregex/internal/unames/unames_test.go create mode 100644 go/mysql/icuregex/internal/uprops/constants.go create mode 100644 go/mysql/icuregex/internal/uprops/loader.go create mode 100644 go/mysql/icuregex/internal/uprops/properties.go create mode 100644 go/mysql/icuregex/internal/uprops/uprops.go create mode 100644 go/mysql/icuregex/internal/uprops/uprops_binary.go create mode 100644 go/mysql/icuregex/internal/uprops/uprops_int.go create mode 100644 go/mysql/icuregex/internal/uprops/uscript.go create mode 100644 go/mysql/icuregex/internal/uset/close.go create mode 100644 go/mysql/icuregex/internal/uset/frozen.go create mode 100644 go/mysql/icuregex/internal/uset/pattern.go create mode 100644 go/mysql/icuregex/internal/uset/unicode_set.go create mode 100644 go/mysql/icuregex/internal/uset/unicode_set_test.go create mode 100644 go/mysql/icuregex/internal/utf16/helpers.go create mode 100644 go/mysql/icuregex/internal/utrie/ucptrie.go create mode 100644 go/mysql/icuregex/internal/utrie/utrie2.go create mode 100644 go/mysql/icuregex/matcher.go create mode 100644 go/mysql/icuregex/ops.go create mode 100644 go/mysql/icuregex/pattern.go create mode 100644 go/mysql/icuregex/perl_test.go create mode 100644 go/mysql/icuregex/sets.go create mode 100644 go/mysql/icuregex/sets_test.go create mode 100644 go/mysql/icuregex/testdata/re_tests.txt create mode 100644 go/mysql/icuregex/testdata/regextst.txt create mode 100644 go/mysql/icuregex/testdata/regextst_extended.txt create mode 100644 go/mysql/json/marshal_test.go create mode 100644 go/mysql/json/weights.go create mode 100644 go/mysql/json/weights_test.go rename go/mysql/{ => replication}/filepos_gtid.go (68%) rename go/mysql/{ => replication}/filepos_gtid_test.go (77%) rename go/mysql/{ => replication}/gtid.go (99%) rename go/mysql/{ => replication}/gtid_set.go (99%) rename go/mysql/{ => replication}/gtid_test.go (99%) rename go/mysql/{ => replication}/mariadb_gtid.go (97%) rename go/mysql/{ => replication}/mariadb_gtid_test.go (98%) rename go/mysql/{ => replication}/mysql56_gtid.go (99%) rename go/mysql/{ => replication}/mysql56_gtid_set.go (97%) rename go/mysql/{ => replication}/mysql56_gtid_set_test.go (99%) rename go/mysql/{ => replication}/mysql56_gtid_test.go (90%) rename go/mysql/{ => replication}/primary_status.go (53%) rename go/mysql/{ => replication}/replication_position.go (88%) rename go/mysql/{ => replication}/replication_position_test.go (94%) rename go/mysql/{ => replication}/replication_status.go (63%) create mode 100644 go/mysql/replication/replication_status_test.go create mode 100644 go/mysql/replication/state.go delete mode 100644 go/mysql/replication_status_test.go create mode 100644 go/mysql/sqlerror/constants.go rename go/mysql/{ => sqlerror}/sql_error.go (80%) rename go/mysql/{ => sqlerror}/sql_error_test.go (99%) rename go/{slices2/slices.go => slice/slice.go} (60%) create mode 100644 go/sqltypes/cast.go create mode 100644 go/sqltypes/cast_test.go create mode 100644 go/sqltypes/marshal.go create mode 100644 go/sqltypes/marshal_test.go create mode 100644 go/stats/opentsdb/backend.go create mode 100644 go/stats/opentsdb/by_metric.go rename go/stats/opentsdb/{opentsdb.go => collector.go} (54%) create mode 100644 go/stats/opentsdb/datapoint.go create mode 100644 go/stats/opentsdb/datapoint_reader.go create mode 100644 go/stats/opentsdb/doc.go create mode 100644 go/stats/opentsdb/file_writer.go rename go/{vt/status/status.go => stats/opentsdb/flags.go} (64%) create mode 100644 go/stats/opentsdb/http_writer.go create mode 100644 go/stats/opentsdb/init.go rename go/{vt/topo/k8stopo/boilerplate.go.txt => stats/opentsdb/writer.go} (83%) delete mode 100644 go/test/endtoend/backup/pitr/backup_mysqlctld_pitr_test.go create mode 100644 go/test/endtoend/backup/pitr/backup_pitr_test.go create mode 100644 go/test/endtoend/backup/pitr_xtrabackup/backup_pitr_xtrabackup_test.go create mode 100644 go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go delete mode 100644 go/test/endtoend/cluster/vtgr_process.go delete mode 100755 go/test/endtoend/messaging/r delete mode 100644 go/test/endtoend/onlineddl/vrepl_suite/testdata/gbk-charset/create.sql create mode 100644 go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-retype-json/after_columns create mode 100644 go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-retype-json/alter create mode 100644 go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-retype-json/before_columns create mode 100644 go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-retype-json/create.sql delete mode 100644 go/test/endtoend/tabletmanager/throttler/throttler_test.go delete mode 100644 go/test/endtoend/tabletmanager/throttler_custom_config/throttler_test.go create mode 100644 go/test/endtoend/vreplication/fk_config_test.go create mode 100644 go/test/endtoend/vreplication/fk_test.go create mode 100644 go/test/endtoend/vreplication/movetables_buffering_test.go create mode 100644 go/test/endtoend/vreplication/partial_movetables_seq_test.go create mode 100644 go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go create mode 100644 go/test/endtoend/vreplication/wrappers_test.go create mode 100644 go/test/endtoend/vtgate/foreignkey/fk_fuzz_test.go create mode 100644 go/test/endtoend/vtgate/foreignkey/fk_test.go create mode 100644 go/test/endtoend/vtgate/foreignkey/main_test.go create mode 100644 go/test/endtoend/vtgate/foreignkey/sharded_schema.sql create mode 100644 go/test/endtoend/vtgate/foreignkey/sharded_vschema.json create mode 100644 go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go create mode 100644 go/test/endtoend/vtgate/foreignkey/unsharded_schema.sql create mode 100644 go/test/endtoend/vtgate/foreignkey/unsharded_vschema.json create mode 100644 go/test/endtoend/vtgate/foreignkey/utils_test.go delete mode 100644 go/test/endtoend/vtgate/queries/aggregation/fuzz_test.go create mode 100644 go/test/endtoend/vtgate/queries/benchmark/benchmark_test.go create mode 100644 go/test/endtoend/vtgate/queries/benchmark/main_test.go create mode 100644 go/test/endtoend/vtgate/queries/benchmark/sharded_schema.sql rename go/test/endtoend/vtgate/{schematracker/unauthorized => queries/benchmark}/vschema.json (79%) create mode 100644 go/test/endtoend/vtgate/queries/kill/kill_test.go create mode 100644 go/test/endtoend/vtgate/queries/kill/main_test.go create mode 100644 go/test/endtoend/vtgate/queries/kill/schema.sql create mode 100644 go/test/endtoend/vtgate/queries/kill/vschema.json create mode 100644 go/test/endtoend/vtgate/queries/random/main_test.go create mode 100644 go/test/endtoend/vtgate/queries/random/query_gen.go create mode 100644 go/test/endtoend/vtgate/queries/random/query_gen_test.go create mode 100644 go/test/endtoend/vtgate/queries/random/random_expr_test.go create mode 100644 go/test/endtoend/vtgate/queries/random/random_test.go create mode 100644 go/test/endtoend/vtgate/queries/random/schema.sql create mode 100644 go/test/endtoend/vtgate/queries/random/simplifier_test.go create mode 100644 go/test/endtoend/vtgate/queries/random/svschema.json create mode 100644 go/test/endtoend/vtgate/queries/random/vschema.json create mode 100644 go/test/endtoend/vtgate/queries/timeout/main_test.go create mode 100644 go/test/endtoend/vtgate/queries/timeout/schema.sql create mode 100644 go/test/endtoend/vtgate/queries/timeout/timeout_test.go create mode 100644 go/test/endtoend/vtgate/queries/timeout/uschema.sql create mode 100644 go/test/endtoend/vtgate/queries/timeout/vschema.json delete mode 100644 go/test/endtoend/vtgate/schematracker/unauthorized/schema.sql delete mode 100644 go/test/endtoend/vtgate/schematracker/unauthorized/unauthorized_test.go delete mode 100644 go/test/endtoend/vtgr/my.cnf delete mode 100644 go/test/endtoend/vtgr/test_config.json delete mode 100644 go/test/endtoend/vtgr/vtgr_test.go create mode 100644 go/test/utils/noleak.go create mode 100644 go/test/vschemawrapper/vschema_wrapper.go create mode 100644 go/tools/go-upgrade/go-upgrade_test.go create mode 100644 go/viperutil/internal/sync/sync_darwin_test.go create mode 100644 go/viperutil/internal/sync/sync_internal_test.go create mode 100644 go/viperutil/internal/sync/sync_linux_test.go delete mode 100644 go/vt/logutil/proto3_test.go create mode 100644 go/vt/mysqlctl/backup_blackbox_test.go delete mode 100644 go/vt/mysqlctl/builtinbackupengine2_test.go rename go/vt/{vtgr/plugin_etcd2topo.go => proto/vttime/cached_size.go} (61%) create mode 100644 go/vt/sqlparser/literal.go create mode 100644 go/vt/sqlparser/reserved_vars.go delete mode 100644 go/vt/topo/k8stopo/VitessTopoNodes-crd.yaml delete mode 100644 go/vt/topo/k8stopo/apis/topo/v1beta1/doc.go delete mode 100644 go/vt/topo/k8stopo/apis/topo/v1beta1/register.go delete mode 100644 go/vt/topo/k8stopo/apis/topo/v1beta1/types.go delete mode 100644 go/vt/topo/k8stopo/apis/topo/v1beta1/zz_generated.deepcopy.go delete mode 100644 go/vt/topo/k8stopo/client/clientset/versioned/clientset.go delete mode 100644 go/vt/topo/k8stopo/client/clientset/versioned/doc.go delete mode 100644 go/vt/topo/k8stopo/client/clientset/versioned/fake/clientset_generated.go delete mode 100644 go/vt/topo/k8stopo/client/clientset/versioned/fake/doc.go delete mode 100644 go/vt/topo/k8stopo/client/clientset/versioned/fake/register.go delete mode 100644 go/vt/topo/k8stopo/client/clientset/versioned/scheme/doc.go delete mode 100644 go/vt/topo/k8stopo/client/clientset/versioned/scheme/register.go delete mode 100644 go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/doc.go delete mode 100644 go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/fake/doc.go delete mode 100644 go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/fake/fake_topo_client.go delete mode 100644 go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/fake/fake_vitesstoponode.go delete mode 100644 go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/generated_expansion.go delete mode 100644 go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/topo_client.go delete mode 100644 go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/vitesstoponode.go delete mode 100644 go/vt/topo/k8stopo/client/informers/externalversions/factory.go delete mode 100644 go/vt/topo/k8stopo/client/informers/externalversions/generic.go delete mode 100644 go/vt/topo/k8stopo/client/informers/externalversions/internalinterfaces/factory_interfaces.go delete mode 100644 go/vt/topo/k8stopo/client/informers/externalversions/topo/interface.go delete mode 100644 go/vt/topo/k8stopo/client/informers/externalversions/topo/v1beta1/interface.go delete mode 100644 go/vt/topo/k8stopo/client/informers/externalversions/topo/v1beta1/vitesstoponode.go delete mode 100644 go/vt/topo/k8stopo/client/listers/topo/v1beta1/expansion_generated.go delete mode 100644 go/vt/topo/k8stopo/client/listers/topo/v1beta1/vitesstoponode.go delete mode 100644 go/vt/topo/k8stopo/config.go delete mode 100644 go/vt/topo/k8stopo/directory.go delete mode 100644 go/vt/topo/k8stopo/election.go delete mode 100644 go/vt/topo/k8stopo/error.go delete mode 100644 go/vt/topo/k8stopo/file.go delete mode 100644 go/vt/topo/k8stopo/file_test.go delete mode 100644 go/vt/topo/k8stopo/lock.go delete mode 100644 go/vt/topo/k8stopo/server.go delete mode 100644 go/vt/topo/k8stopo/server_flaky_test.go delete mode 100644 go/vt/topo/k8stopo/version.go delete mode 100644 go/vt/topo/k8stopo/watch.go create mode 100644 go/vt/vtctl/grpcvtctldserver/query.go create mode 100644 go/vt/vtctl/grpcvtctldserver/query_test.go delete mode 100644 go/vt/vtctl/plugin_kubernetestopo.go create mode 100644 go/vt/vtctl/schematools/marshal.go create mode 100644 go/vt/vtctl/schematools/marshal_test.go create mode 100644 go/vt/vtctl/schematools/schematools_test.go create mode 100644 go/vt/vtctl/testdata/unknown-params-logged-dry-run-vschema.json create mode 100644 go/vt/vtctl/testdata/unknown-params-logged-vschema.json create mode 100644 go/vt/vtctl/workflow/log_recorder.go rename go/{cmd/vtctld/plugin_kubernetestopo.go => vt/vtctl/workflow/log_recorder_test.go} (57%) create mode 100644 go/vt/vtctl/workflow/materializer.go create mode 100644 go/vt/vtctl/workflow/materializer_env_test.go create mode 100644 go/vt/vtctl/workflow/materializer_test.go create mode 100644 go/vt/vtctl/workflow/mount.go create mode 100644 go/vt/vtctl/workflow/resharder.go create mode 100644 go/vt/vtctl/workflow/switcher.go create mode 100644 go/vt/vtctl/workflow/switcher_dry_run.go create mode 100644 go/vt/vtctl/workflow/switcher_interface.go create mode 100644 go/vt/vtctl/workflow/utils.go create mode 100644 go/vt/vtgate/endtoend/oltp_test.go create mode 100644 go/vt/vtgate/engine/aggregations.go create mode 100644 go/vt/vtgate/engine/aggregations_test.go delete mode 100644 go/vt/vtgate/engine/compare_utils.go create mode 100644 go/vt/vtgate/engine/ddl_test.go delete mode 100644 go/vt/vtgate/engine/distinctV3.go create mode 100644 go/vt/vtgate/engine/fk_cascade.go create mode 100644 go/vt/vtgate/engine/fk_cascade_test.go create mode 100644 go/vt/vtgate/engine/fk_verify.go create mode 100644 go/vt/vtgate/engine/fk_verify_test.go delete mode 100644 go/vt/vtgate/engine/gen4_compare_v3.go create mode 100644 go/vt/vtgate/engine/opcode/constants_test.go create mode 100644 go/vt/vtgate/engine/throttle_app.go rename go/vt/vtgate/engine/{pullout_subquery.go => uncorrelated_subquery.go} (71%) rename go/vt/vtgate/engine/{pullout_subquery_test.go => uncorrelated_subquery_test.go} (83%) create mode 100644 go/vt/vtgate/evalengine/api_aggregation.go create mode 100644 go/vt/vtgate/evalengine/api_aggregation_test.go delete mode 100644 go/vt/vtgate/evalengine/api_types.go create mode 100644 go/vt/vtgate/evalengine/collation.go create mode 100644 go/vt/vtgate/evalengine/fn_regexp.go create mode 100644 go/vt/vtgate/evalengine/integration/testdata/mysql_golden_1686149004.json create mode 100644 go/vt/vtgate/evalengine/weights.go create mode 100644 go/vt/vtgate/evalengine/weights_test.go delete mode 100644 go/vt/vtgate/mysql_protocol_test.go delete mode 100644 go/vt/vtgate/planbuilder/aggregation_pushing.go delete mode 100644 go/vt/vtgate/planbuilder/concatenateGen4.go delete mode 100644 go/vt/vtgate/planbuilder/doc.go delete mode 100644 go/vt/vtgate/planbuilder/expr.go delete mode 100644 go/vt/vtgate/planbuilder/expr_test.go delete mode 100644 go/vt/vtgate/planbuilder/fallback_planner.go delete mode 100644 go/vt/vtgate/planbuilder/fallback_planner_test.go delete mode 100644 go/vt/vtgate/planbuilder/filtering.go create mode 100644 go/vt/vtgate/planbuilder/fk_cascade.go create mode 100644 go/vt/vtgate/planbuilder/fk_verify.go delete mode 100644 go/vt/vtgate/planbuilder/from.go delete mode 100644 go/vt/vtgate/planbuilder/gen4_compare_v3_planner.go delete mode 100644 go/vt/vtgate/planbuilder/gen4_planner.go delete mode 100644 go/vt/vtgate/planbuilder/grouping.go delete mode 100644 go/vt/vtgate/planbuilder/hash_join.go delete mode 100644 go/vt/vtgate/planbuilder/horizon_planning.go delete mode 100644 go/vt/vtgate/planbuilder/horizon_planning_test.go delete mode 100644 go/vt/vtgate/planbuilder/joinGen4.go delete mode 100644 go/vt/vtgate/planbuilder/jointab.go delete mode 100644 go/vt/vtgate/planbuilder/jointab_test.go create mode 100644 go/vt/vtgate/planbuilder/operators/ast2op_test.go create mode 100644 go/vt/vtgate/planbuilder/operators/ast_to_op.go create mode 100644 go/vt/vtgate/planbuilder/operators/comments.go delete mode 100644 go/vt/vtgate/planbuilder/operators/correlated_subquery.go delete mode 100644 go/vt/vtgate/planbuilder/operators/derived.go create mode 100644 go/vt/vtgate/planbuilder/operators/fk_cascade.go create mode 100644 go/vt/vtgate/planbuilder/operators/fk_verify.go create mode 100644 go/vt/vtgate/planbuilder/operators/horizon_expanding.go delete mode 100644 go/vt/vtgate/planbuilder/operators/horizon_planning.go create mode 100644 go/vt/vtgate/planbuilder/operators/join_merging.go delete mode 100644 go/vt/vtgate/planbuilder/operators/logical.go delete mode 100644 go/vt/vtgate/planbuilder/operators/merging.go create mode 100644 go/vt/vtgate/planbuilder/operators/phases.go create mode 100644 go/vt/vtgate/planbuilder/operators/query_planning.go create mode 100644 go/vt/vtgate/planbuilder/operators/subquery_builder.go create mode 100644 go/vt/vtgate/planbuilder/operators/subquery_container.go create mode 100644 go/vt/vtgate/planbuilder/operators/union_merging.go delete mode 100644 go/vt/vtgate/planbuilder/ordering.go create mode 100644 go/vt/vtgate/planbuilder/planner.go rename go/vt/vtgate/planbuilder/{gen4_planner_test.go => planner_test.go} (100%) delete mode 100644 go/vt/vtgate/planbuilder/primitive_builder.go delete mode 100644 go/vt/vtgate/planbuilder/project.go delete mode 100644 go/vt/vtgate/planbuilder/projection_pushing.go delete mode 100644 go/vt/vtgate/planbuilder/pullout_subquery.go delete mode 100644 go/vt/vtgate/planbuilder/routeGen4.go delete mode 100644 go/vt/vtgate/planbuilder/route_test.go delete mode 100644 go/vt/vtgate/planbuilder/subquery_op.go delete mode 100644 go/vt/vtgate/planbuilder/symtab.go delete mode 100644 go/vt/vtgate/planbuilder/symtab_test.go delete mode 100644 go/vt/vtgate/planbuilder/system_tables.go create mode 100644 go/vt/vtgate/planbuilder/testdata/foreignkey_cases.json create mode 100644 go/vt/vtgate/planbuilder/uncorrelated_subquery.go delete mode 100644 go/vt/vtgate/planbuilder/union.go create mode 100644 go/vt/vtgate/planbuilder/update.go delete mode 100644 go/vt/vtgate/planbuilder/update_planner.go create mode 100644 go/vt/vtgate/vindexes/foreign_keys.go create mode 100644 go/vt/vtgate/vindexes/foreign_keys_test.go create mode 100644 go/vt/vtgate/vindexes/main_test.go delete mode 100644 go/vt/vtgr/config/vtgr_config.go delete mode 100644 go/vt/vtgr/config/vtgr_config.json delete mode 100644 go/vt/vtgr/config/vtgr_config_test.go delete mode 100644 go/vt/vtgr/controller/diagnose.go delete mode 100644 go/vt/vtgr/controller/diagnose_test.go delete mode 100644 go/vt/vtgr/controller/group.go delete mode 100644 go/vt/vtgr/controller/group_test.go delete mode 100644 go/vt/vtgr/controller/mock_refresh.go delete mode 100644 go/vt/vtgr/controller/refresh.go delete mode 100644 go/vt/vtgr/controller/refresh_test.go delete mode 100644 go/vt/vtgr/controller/repair.go delete mode 100644 go/vt/vtgr/controller/repair_test.go delete mode 100644 go/vt/vtgr/db/db.go delete mode 100644 go/vt/vtgr/db/generate_base.go delete mode 100644 go/vt/vtgr/db/generate_patches.go delete mode 100644 go/vt/vtgr/db/mock_mysql.go delete mode 100644 go/vt/vtgr/db/mysql.go delete mode 100644 go/vt/vtgr/db/tls.go delete mode 100644 go/vt/vtgr/inst/instance_key.go delete mode 100644 go/vt/vtgr/inst/instance_key_test.go delete mode 100644 go/vt/vtgr/log/log.go delete mode 100644 go/vt/vtgr/log/log_test.go delete mode 100644 go/vt/vtgr/plugin_consultopo.go delete mode 100644 go/vt/vtgr/plugin_grpctmclient.go delete mode 100644 go/vt/vtgr/plugin_zk2topo.go delete mode 100644 go/vt/vtgr/ssl/ssl.go delete mode 100644 go/vt/vtgr/ssl/ssl_test.go delete mode 100644 go/vt/vtgr/vtgr.go delete mode 100644 go/vt/vtgr/vtgr_test.go create mode 100644 go/vt/vthash/highway/LICENSE create mode 100644 go/vt/vthash/highway/highwayhash.go create mode 100644 go/vt/vthash/highway/highwayhashAVX2_amd64.s create mode 100644 go/vt/vthash/highway/highwayhash_amd64.go create mode 100644 go/vt/vthash/highway/highwayhash_amd64.s create mode 100644 go/vt/vthash/highway/highwayhash_arm64.go create mode 100644 go/vt/vthash/highway/highwayhash_arm64.s create mode 100644 go/vt/vthash/highway/highwayhash_generic.go create mode 100644 go/vt/vthash/highway/highwayhash_ppc64le.go create mode 100644 go/vt/vthash/highway/highwayhash_ppc64le.s create mode 100644 go/vt/vthash/highway/highwayhash_ref.go create mode 100644 go/vt/vthash/highway/highwayhash_test.go delete mode 100644 go/vt/vtorc/discovery/metric_json.go delete mode 100644 go/vt/vtorc/inst/audit.go delete mode 100644 go/vt/vtorc/inst/candidate_database_instance.go delete mode 100644 go/vt/vtorc/inst/candidate_database_instance_dao.go delete mode 100644 go/vt/vtorc/inst/downtime.go delete mode 100644 go/vt/vtorc/inst/downtime_dao.go delete mode 100644 go/vt/vtorc/inst/durability.go delete mode 100644 go/vt/vtorc/inst/instance_binlog.go delete mode 100644 go/vt/vtorc/inst/instance_key.go delete mode 100644 go/vt/vtorc/inst/instance_key_map.go delete mode 100644 go/vt/vtorc/inst/instance_key_map_test.go delete mode 100644 go/vt/vtorc/inst/instance_key_test.go delete mode 100644 go/vt/vtorc/inst/maintenance.go delete mode 100644 go/vt/vtorc/inst/maintenance_dao.go delete mode 100644 go/vt/vtorc/inst/postponed_functions.go delete mode 100644 go/vt/vtorc/inst/process.go delete mode 100644 go/vt/vtorc/inst/resolve.go delete mode 100644 go/vt/vtorc/inst/resolve_dao.go create mode 100644 go/vt/vtorc/inst/shard_dao.go create mode 100644 go/vt/vtorc/inst/shard_dao_test.go create mode 100644 go/vt/vtorc/inst/tablet_dao_test.go delete mode 100644 go/vt/vtorc/inst/tag.go delete mode 100644 go/vt/vtorc/inst/tag_dao.go delete mode 100644 go/vt/vtorc/inst/tag_test.go rename go/vt/vtorc/logic/{keyspace_discovery.go => keyspace_shard_discovery.go} (54%) rename go/vt/vtorc/logic/{keyspace_discovery_test.go => keyspace_shard_discovery_test.go} (58%) rename go/vt/vtorc/logic/{orchestrator.go => vtorc.go} (82%) rename go/vt/vtorc/logic/{orchestrator_test.go => vtorc_test.go} (100%) create mode 100644 go/vt/vttablet/tabletmanager/framework_test.go create mode 100644 go/vt/vttablet/tabletmanager/rpc_throttler.go create mode 100644 go/vt/vttablet/tabletmanager/vreplication/vcopier_atomic.go create mode 100644 go/vt/vttablet/tabletserver/exclude_race_test.go create mode 100644 go/vt/vttablet/tabletserver/throttle/throttler_test.go create mode 100644 go/vt/vttablet/tabletserver/throttle/throttlerapp/app_test.go create mode 100644 go/vt/vttablet/tabletserver/vstreamer/tablestreamer.go create mode 100644 go/vt/vttablet/tabletserver/vstreamer/tablestreamer_test.go delete mode 100644 go/vt/wrangler/testlib/apply_schema_flaky_test.go create mode 100755 web/vtadmin/build.sh diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index b541376f4de..755ca395b68 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -17,7 +17,8 @@ go.sum @ajm188 @deepthi @harshit-gangal @mattlord @rohit-nayak-ps @systay @froui /go/cache @vmg /go/cmd @ajm188 @deepthi /go/cmd/vtadmin @ajm188 @notfelineit -/go/cmd/vtctldclient @ajm188 @notfelineit +/go/cmd/vtctldclient @ajm188 @mattlord +/go/cmd/vtctldclient/command/vreplication @mattlord @rohit-nayak-ps /go/internal/flag @ajm188 @rohit-nayak-ps /go/mysql @harshit-gangal @systay @mattlord /go/pools @deepthi @harshit-gangal diff --git a/.github/workflows/assign_milestone.yml b/.github/workflows/assign_milestone.yml index 3aae05bacc0..7c56f45728f 100644 --- a/.github/workflows/assign_milestone.yml +++ b/.github/workflows/assign_milestone.yml @@ -20,7 +20,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Checkout code uses: actions/checkout@v3 diff --git a/.github/workflows/auto_approve_pr.yml b/.github/workflows/auto_approve_pr.yml new file mode 100644 index 00000000000..552f1ec2e68 --- /dev/null +++ b/.github/workflows/auto_approve_pr.yml @@ -0,0 +1,23 @@ +name: Auto Approval of Bot Pull Requests +on: + pull_request: + types: [opened, reopened] + +jobs: + auto_approve: + name: Auto Approve Pull Request + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Auto Approve Pull Request + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + # here we are checking that the PR has been created by the vitess-bot[bot] account and that it is not a draft + # if there is a merge conflict in the backport, the PR will always be created as a draft, meaning we can rely + # on checking whether or not the PR is a draft + if [[ "${{github.event.pull_request.user.login}}" == "vitess-bot[bot]" ]] && [[ "${{github.event.pull_request.draft}}" == "false" ]]; then + gh pr review ${{ github.event.pull_request.number }} --approve + fi diff --git a/.github/workflows/check_make_vtadmin_authz_testgen.yml b/.github/workflows/check_make_vtadmin_authz_testgen.yml index 98f5f4fd767..8f9199e7658 100644 --- a/.github/workflows/check_make_vtadmin_authz_testgen.yml +++ b/.github/workflows/check_make_vtadmin_authz_testgen.yml @@ -50,7 +50,7 @@ jobs: uses: actions/setup-go@v4 if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.vtadmin_changes == 'true' with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.vtadmin_changes == 'true' diff --git a/.github/workflows/check_make_vtadmin_web_proto.yml b/.github/workflows/check_make_vtadmin_web_proto.yml index 5f3fd821893..5f3302fc97c 100644 --- a/.github/workflows/check_make_vtadmin_web_proto.yml +++ b/.github/workflows/check_make_vtadmin_web_proto.yml @@ -52,14 +52,14 @@ jobs: uses: actions/setup-go@v4 if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.proto_changes == 'true' with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Setup Node if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.proto_changes == 'true' uses: actions/setup-node@v3 with: # node-version should match package.json - node-version: '16.19.0' + node-version: '18.16.0' - name: Install npm dependencies if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.proto_changes == 'true' diff --git a/.github/workflows/cluster_endtoend_12.yml b/.github/workflows/cluster_endtoend_12.yml index f27268263d5..5ce650f1ea6 100644 --- a/.github/workflows/cluster_endtoend_12.yml +++ b/.github/workflows/cluster_endtoend_12.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (12) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard 12 | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_13.yml b/.github/workflows/cluster_endtoend_13.yml index f08387a9921..fa98916736f 100644 --- a/.github/workflows/cluster_endtoend_13.yml +++ b/.github/workflows/cluster_endtoend_13.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (13) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard 13 | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_15.yml b/.github/workflows/cluster_endtoend_15.yml index ad3082a4f69..2501f26ab58 100644 --- a/.github/workflows/cluster_endtoend_15.yml +++ b/.github/workflows/cluster_endtoend_15.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (15) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard 15 | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_18.yml b/.github/workflows/cluster_endtoend_18.yml index d4826b6f2ed..234e672afb0 100644 --- a/.github/workflows/cluster_endtoend_18.yml +++ b/.github/workflows/cluster_endtoend_18.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (18) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -109,7 +116,7 @@ jobs: make tools - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -118,7 +125,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -129,16 +136,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard 18 | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_21.yml b/.github/workflows/cluster_endtoend_21.yml index 13c861a0ee7..feeedcd46b8 100644 --- a/.github/workflows/cluster_endtoend_21.yml +++ b/.github/workflows/cluster_endtoend_21.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (21) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard 21 | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_22.yml b/.github/workflows/cluster_endtoend_22.yml index 59367df8278..f4cee992fb2 100644 --- a/.github/workflows/cluster_endtoend_22.yml +++ b/.github/workflows/cluster_endtoend_22.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (22) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard 22 | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_backup_pitr.yml b/.github/workflows/cluster_endtoend_backup_pitr.yml index 7e70dabe84f..9b97e08f7b2 100644 --- a/.github/workflows/cluster_endtoend_backup_pitr.yml +++ b/.github/workflows/cluster_endtoend_backup_pitr.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (backup_pitr) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -107,7 +114,7 @@ jobs: sudo apt-get install percona-xtrabackup-80 lz4 - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -116,7 +123,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -127,16 +134,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard backup_pitr | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_backup_pitr_mysql57.yml b/.github/workflows/cluster_endtoend_backup_pitr_mysql57.yml index 8d93e57728d..210dfc9ba95 100644 --- a/.github/workflows/cluster_endtoend_backup_pitr_mysql57.yml +++ b/.github/workflows/cluster_endtoend_backup_pitr_mysql57.yml @@ -20,7 +20,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (backup_pitr) mysql57 - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -40,6 +40,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -68,7 +75,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -131,7 +138,7 @@ jobs: fi - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -140,7 +147,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -151,16 +158,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard backup_pitr | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_backup_pitr_xtrabackup.yml b/.github/workflows/cluster_endtoend_backup_pitr_xtrabackup.yml new file mode 100644 index 00000000000..6cad7922321 --- /dev/null +++ b/.github/workflows/cluster_endtoend_backup_pitr_xtrabackup.yml @@ -0,0 +1,151 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (backup_pitr_xtrabackup) +on: [push, pull_request] +concurrency: + group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (backup_pitr_xtrabackup)') + cancel-in-progress: true + +permissions: read-all + +env: + LAUNCHABLE_ORGANIZATION: "vitess" + LAUNCHABLE_WORKSPACE: "vitess-app" + GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" + +jobs: + build: + name: Run endtoend tests on Cluster (backup_pitr_xtrabackup) + runs-on: gh-hosted-runners-4cores-1 + + steps: + - name: Skip CI + run: | + if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then + echo "skipping CI due to the 'Skip CI' label" + exit 1 + fi + + - name: Check if workflow needs to be skipped + id: skip-workflow + run: | + skip='false' + if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + skip='true' + fi + echo Skip ${skip} + echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + + - name: Check out code + if: steps.skip-workflow.outputs.skip-workflow == 'false' + uses: actions/checkout@v3 + + - name: Check for changes in relevant files + if: steps.skip-workflow.outputs.skip-workflow == 'false' + uses: frouioui/paths-filter@main + id: changes + with: + token: '' + filters: | + end_to_end: + - 'go/**/*.go' + - 'test.go' + - 'Makefile' + - 'build.env' + - 'go.sum' + - 'go.mod' + - 'proto/*.proto' + - 'tools/**' + - 'config/**' + - 'bootstrap.sh' + - '.github/workflows/cluster_endtoend_backup_pitr_xtrabackup.yml' + + - name: Set up Go + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@v4 + with: + go-version: 1.21.3 + + - name: Set up python + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-python@v4 + + - name: Tune the OS + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + # Limit local port range to not use ports that overlap with server side + # ports that we listen on. + sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" + # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio + echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf + sudo sysctl -p /etc/sysctl.conf + + - name: Get dependencies + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + + # Setup Percona Server for MySQL 8.0 + sudo apt-get update + sudo apt-get install -y lsb-release gnupg2 curl + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo percona-release setup ps80 + sudo apt-get update + + # Install everything else we need, and configure + sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils libncurses5 + + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + # install JUnit report formatter + go install github.com/vitessio/go-junit-report@HEAD + + sudo apt-get install -y percona-xtrabackup-80 lz4 + + - name: Setup launchable dependencies + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + run: | + # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up + pip3 install --user launchable~=1.0 > /dev/null + + # verify that launchable setup is all correct. + launchable verify || true + + # Tell Launchable about the build you are producing and testing + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . + + - name: Run cluster endtoend test + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + timeout-minutes: 45 + run: | + # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file + # which musn't be more than 107 characters long. + export VTDATAROOT="/tmp/" + source build.env + + set -exo pipefail + + # run the tests however you normally do, then produce a JUnit XML file + eatmydata -- go run test.go -docker=false -follow -shard backup_pitr_xtrabackup | tee -a output.txt | go-junit-report -set-exit-code > report.xml + + - name: Print test output and Record test result in launchable if PR is not a draft + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() + run: | + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi + + # print test output + cat output.txt diff --git a/.github/workflows/cluster_endtoend_backup_pitr_xtrabackup_mysql57.yml b/.github/workflows/cluster_endtoend_backup_pitr_xtrabackup_mysql57.yml new file mode 100644 index 00000000000..b895a19a8d0 --- /dev/null +++ b/.github/workflows/cluster_endtoend_backup_pitr_xtrabackup_mysql57.yml @@ -0,0 +1,175 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (backup_pitr_xtrabackup) mysql57 +on: [push, pull_request] +concurrency: + group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (backup_pitr_xtrabackup) mysql57') + cancel-in-progress: true + +permissions: read-all + +env: + LAUNCHABLE_ORGANIZATION: "vitess" + LAUNCHABLE_WORKSPACE: "vitess-app" + GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" + + # This is used if we need to pin the xtrabackup version used in tests. + # If this is NOT set then the latest version available will be used. + #XTRABACKUP_VERSION: "2.4.24-1" + +jobs: + build: + name: Run endtoend tests on Cluster (backup_pitr_xtrabackup) mysql57 + runs-on: gh-hosted-runners-4cores-1 + + steps: + - name: Skip CI + run: | + if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then + echo "skipping CI due to the 'Skip CI' label" + exit 1 + fi + + - name: Check if workflow needs to be skipped + id: skip-workflow + run: | + skip='false' + if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + skip='true' + fi + echo Skip ${skip} + echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + + - name: Check out code + if: steps.skip-workflow.outputs.skip-workflow == 'false' + uses: actions/checkout@v3 + + - name: Check for changes in relevant files + if: steps.skip-workflow.outputs.skip-workflow == 'false' + uses: frouioui/paths-filter@main + id: changes + with: + token: '' + filters: | + end_to_end: + - 'go/**/*.go' + - 'test.go' + - 'Makefile' + - 'build.env' + - 'go.sum' + - 'go.mod' + - 'proto/*.proto' + - 'tools/**' + - 'config/**' + - 'bootstrap.sh' + - '.github/workflows/cluster_endtoend_backup_pitr_xtrabackup_mysql57.yml' + + - name: Set up Go + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@v4 + with: + go-version: 1.21.3 + + - name: Set up python + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-python@v4 + + - name: Tune the OS + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" + # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio + echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf + sudo sysctl -p /etc/sysctl.conf + + - name: Get dependencies + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + sudo apt-get update + + # Uninstall any previously installed MySQL first + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + + sudo systemctl stop apparmor + sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common + sudo apt-get -y autoremove + sudo apt-get -y autoclean + sudo deluser mysql + sudo rm -rf /var/lib/mysql + sudo rm -rf /etc/mysql + + # Get key to latest MySQL repo + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + # Bionic packages are still compatible for Jammy since there's no MySQL 5.7 + # packages for Jammy. + echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections + echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections + sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* + sudo apt-get update + sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5 + + sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + + # install JUnit report formatter + go install github.com/vitessio/go-junit-report@HEAD + + wget "https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb" + sudo apt-get install -y gnupg2 + sudo dpkg -i "percona-release_latest.$(lsb_release -sc)_all.deb" + sudo apt-get update + if [[ -n $XTRABACKUP_VERSION ]]; then + debfile="percona-xtrabackup-24_$XTRABACKUP_VERSION.$(lsb_release -sc)_amd64.deb" + wget "https://repo.percona.com/pxb-24/apt/pool/main/p/percona-xtrabackup-24/$debfile" + sudo apt install -y "./$debfile" + else + sudo apt-get install -y percona-xtrabackup-24 + fi + + - name: Setup launchable dependencies + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + run: | + # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up + pip3 install --user launchable~=1.0 > /dev/null + + # verify that launchable setup is all correct. + launchable verify || true + + # Tell Launchable about the build you are producing and testing + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . + + - name: Run cluster endtoend test + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + timeout-minutes: 45 + run: | + # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file + # which musn't be more than 107 characters long. + export VTDATAROOT="/tmp/" + source build.env + + set -exo pipefail + + # run the tests however you normally do, then produce a JUnit XML file + eatmydata -- go run test.go -docker=false -follow -shard backup_pitr_xtrabackup | tee -a output.txt | go-junit-report -set-exit-code > report.xml + + - name: Print test output and Record test result in launchable if PR is not a draft + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() + run: | + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi + + # print test output + cat output.txt diff --git a/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml b/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml index b46206057b6..f65d2625c28 100644 --- a/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml +++ b/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (ers_prs_newfeatures_heavy) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,7 +131,7 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # Increase our open file descriptor limit as we could hit this ulimit -n 65536 @@ -147,11 +154,13 @@ jobs: # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard ers_prs_newfeatures_heavy | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_mysql80.yml b/.github/workflows/cluster_endtoend_mysql80.yml index a1d1151aa5a..5c3739aafd0 100644 --- a/.github/workflows/cluster_endtoend_mysql80.yml +++ b/.github/workflows/cluster_endtoend_mysql80.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (mysql80) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard mysql80 | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_mysql_server_vault.yml b/.github/workflows/cluster_endtoend_mysql_server_vault.yml index d808fe29dc2..793e7372309 100644 --- a/.github/workflows/cluster_endtoend_mysql_server_vault.yml +++ b/.github/workflows/cluster_endtoend_mysql_server_vault.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (mysql_server_vault) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -109,7 +116,7 @@ jobs: make tools - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -118,7 +125,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -129,16 +136,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard mysql_server_vault | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_onlineddl_ghost.yml b/.github/workflows/cluster_endtoend_onlineddl_ghost.yml index ea414171ac1..af61a6a5059 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_ghost.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_ghost.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (onlineddl_ghost) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -65,7 +72,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -105,7 +112,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -114,7 +121,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -125,16 +132,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard onlineddl_ghost | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_onlineddl_ghost_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_ghost_mysql57.yml index 4c348478573..43dc184c204 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_ghost_mysql57.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_ghost_mysql57.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (onlineddl_ghost) mysql57 - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -65,7 +72,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -116,7 +123,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -125,7 +132,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -136,16 +143,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard onlineddl_ghost | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_onlineddl_revert.yml b/.github/workflows/cluster_endtoend_onlineddl_revert.yml index 7b43c85def4..d2c6e23ee86 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_revert.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_revert.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (onlineddl_revert) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -65,7 +72,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -105,7 +112,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -114,7 +121,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -125,16 +132,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard onlineddl_revert | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_onlineddl_revert_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_revert_mysql57.yml index 3a3ec4c4168..ac93c1ac532 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_revert_mysql57.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_revert_mysql57.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (onlineddl_revert) mysql57 - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -65,7 +72,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -116,7 +123,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -125,7 +132,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -136,16 +143,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard onlineddl_revert | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml b/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml index b698c74544b..38031f4441e 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (onlineddl_scheduler) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -65,7 +72,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -105,7 +112,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -114,7 +121,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -125,16 +132,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard onlineddl_scheduler | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_onlineddl_scheduler_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_scheduler_mysql57.yml index d3401f81ce3..0a205266c4f 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_scheduler_mysql57.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_scheduler_mysql57.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (onlineddl_scheduler) mysql57 - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -65,7 +72,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -116,7 +123,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -125,7 +132,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -136,16 +143,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard onlineddl_scheduler | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml index 363ed20249f..d83fb7010b8 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (onlineddl_vrepl) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -65,7 +72,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -105,7 +112,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -114,7 +121,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -125,7 +132,7 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail cat <<-EOF>>./config/mycnf/mysql80.cnf binlog-transaction-compression=ON @@ -134,11 +141,13 @@ jobs: # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_mysql57.yml index 75f4493ae28..a941c9faef0 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_mysql57.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_mysql57.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (onlineddl_vrepl) mysql57 - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -65,7 +72,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -116,7 +123,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -125,7 +132,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -136,16 +143,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml index 9769be1cfd2..a51cb6c33fe 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (onlineddl_vrepl_stress) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -65,7 +72,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -105,7 +112,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -114,7 +121,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -125,7 +132,7 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail cat <<-EOF>>./config/mycnf/mysql80.cnf binlog-transaction-compression=ON @@ -134,11 +141,13 @@ jobs: # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl_stress | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_mysql57.yml index 63f5a8e4b07..77626919a89 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_mysql57.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_mysql57.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (onlineddl_vrepl_stress) mysql57 - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -65,7 +72,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -116,7 +123,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -125,7 +132,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -136,16 +143,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl_stress | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml index d1eab9ca6ed..1230fcd3518 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (onlineddl_vrepl_stress_suite) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -65,7 +72,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -105,7 +112,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -114,7 +121,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -125,7 +132,7 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail cat <<-EOF>>./config/mycnf/mysql80.cnf binlog-transaction-compression=ON @@ -134,11 +141,13 @@ jobs: # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl_stress_suite | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite_mysql57.yml index 19ba98829a0..86ef8eec019 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite_mysql57.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite_mysql57.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (onlineddl_vrepl_stress_suite) mysql57 - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -65,7 +72,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -116,7 +123,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -125,7 +132,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -136,16 +143,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl_stress_suite | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml index dd9d6cf45aa..34e521d648f 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (onlineddl_vrepl_suite) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -65,7 +72,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -105,7 +112,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -114,7 +121,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -125,7 +132,7 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail cat <<-EOF>>./config/mycnf/mysql80.cnf binlog-transaction-compression=ON @@ -134,11 +141,13 @@ jobs: # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl_suite | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite_mysql57.yml index 45156849e79..a400ea99677 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite_mysql57.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite_mysql57.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (onlineddl_vrepl_suite) mysql57 - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -65,7 +72,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -116,7 +123,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -125,7 +132,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -136,16 +143,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl_suite | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml b/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml index 7b169b16e01..68a25ee46ec 100644 --- a/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml +++ b/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (schemadiff_vrepl) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -65,7 +72,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -105,7 +112,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -114,7 +121,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -125,7 +132,7 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail cat <<-EOF>>./config/mycnf/mysql80.cnf binlog-transaction-compression=ON @@ -134,11 +141,13 @@ jobs: # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard schemadiff_vrepl | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_schemadiff_vrepl_mysql57.yml b/.github/workflows/cluster_endtoend_schemadiff_vrepl_mysql57.yml index 8c2d2dc9dc1..ba57948d162 100644 --- a/.github/workflows/cluster_endtoend_schemadiff_vrepl_mysql57.yml +++ b/.github/workflows/cluster_endtoend_schemadiff_vrepl_mysql57.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (schemadiff_vrepl) mysql57 - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -65,7 +72,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -116,7 +123,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -125,7 +132,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -136,16 +143,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard schemadiff_vrepl | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_tabletmanager_consul.yml b/.github/workflows/cluster_endtoend_tabletmanager_consul.yml index c21ae5be321..0fe0d4e18da 100644 --- a/.github/workflows/cluster_endtoend_tabletmanager_consul.yml +++ b/.github/workflows/cluster_endtoend_tabletmanager_consul.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (tabletmanager_consul) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -109,7 +116,7 @@ jobs: make tools - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -118,7 +125,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -129,16 +136,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard tabletmanager_consul | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml b/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml index 0e81f901b79..5af0e2ff852 100644 --- a/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml +++ b/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (tabletmanager_tablegc) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard tabletmanager_tablegc | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_tabletmanager_tablegc_mysql57.yml b/.github/workflows/cluster_endtoend_tabletmanager_tablegc_mysql57.yml index b1bba866718..e1ae8eeb69c 100644 --- a/.github/workflows/cluster_endtoend_tabletmanager_tablegc_mysql57.yml +++ b/.github/workflows/cluster_endtoend_tabletmanager_tablegc_mysql57.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (tabletmanager_tablegc) mysql57 - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -115,7 +122,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -124,7 +131,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -135,16 +142,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard tabletmanager_tablegc | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_tabletmanager_throttler_topo.yml b/.github/workflows/cluster_endtoend_tabletmanager_throttler_topo.yml index e8928bd2e32..8b6826f257c 100644 --- a/.github/workflows/cluster_endtoend_tabletmanager_throttler_topo.yml +++ b/.github/workflows/cluster_endtoend_tabletmanager_throttler_topo.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (tabletmanager_throttler_topo) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard tabletmanager_throttler_topo | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_topo_connection_cache.yml b/.github/workflows/cluster_endtoend_topo_connection_cache.yml index 68e0aef2216..bb59336df48 100644 --- a/.github/workflows/cluster_endtoend_topo_connection_cache.yml +++ b/.github/workflows/cluster_endtoend_topo_connection_cache.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (topo_connection_cache) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard topo_connection_cache | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml b/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml index f424f6757b1..ec3d101629e 100644 --- a/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml +++ b/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vreplication_across_db_versions) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,7 +131,7 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # Increase our open file descriptor limit as we could hit this ulimit -n 65536 @@ -151,11 +158,13 @@ jobs: # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vreplication_across_db_versions | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vreplication_basic.yml b/.github/workflows/cluster_endtoend_vreplication_basic.yml index 16f03764d0d..ea6219bf869 100644 --- a/.github/workflows/cluster_endtoend_vreplication_basic.yml +++ b/.github/workflows/cluster_endtoend_vreplication_basic.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vreplication_basic) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,7 +131,7 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # Increase our open file descriptor limit as we could hit this ulimit -n 65536 @@ -151,11 +158,13 @@ jobs: # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vreplication_basic | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vreplication_cellalias.yml b/.github/workflows/cluster_endtoend_vreplication_cellalias.yml index ea842718f62..5ef46750668 100644 --- a/.github/workflows/cluster_endtoend_vreplication_cellalias.yml +++ b/.github/workflows/cluster_endtoend_vreplication_cellalias.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vreplication_cellalias) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,7 +131,7 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # Increase our open file descriptor limit as we could hit this ulimit -n 65536 @@ -151,11 +158,13 @@ jobs: # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vreplication_cellalias | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vreplication_migrate_vdiff2_convert_tz.yml b/.github/workflows/cluster_endtoend_vreplication_migrate_vdiff2_convert_tz.yml index 92f85f7e7ae..d8961314a46 100644 --- a/.github/workflows/cluster_endtoend_vreplication_migrate_vdiff2_convert_tz.yml +++ b/.github/workflows/cluster_endtoend_vreplication_migrate_vdiff2_convert_tz.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vreplication_migrate_vdiff2_convert_tz) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,7 +131,7 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # Increase our open file descriptor limit as we could hit this ulimit -n 65536 @@ -151,11 +158,13 @@ jobs: # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vreplication_migrate_vdiff2_convert_tz | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vreplication_multicell.yml b/.github/workflows/cluster_endtoend_vreplication_multicell.yml index d4f68493680..328c062e1d0 100644 --- a/.github/workflows/cluster_endtoend_vreplication_multicell.yml +++ b/.github/workflows/cluster_endtoend_vreplication_multicell.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vreplication_multicell) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,7 +131,7 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # Increase our open file descriptor limit as we could hit this ulimit -n 65536 @@ -151,11 +158,13 @@ jobs: # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vreplication_multicell | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_tabletmanager_throttler_custom_config.yml b/.github/workflows/cluster_endtoend_vreplication_partial_movetables_basic.yml similarity index 70% rename from .github/workflows/cluster_endtoend_tabletmanager_throttler_custom_config.yml rename to .github/workflows/cluster_endtoend_vreplication_partial_movetables_basic.yml index 60d117c49d9..28dca240332 100644 --- a/.github/workflows/cluster_endtoend_tabletmanager_throttler_custom_config.yml +++ b/.github/workflows/cluster_endtoend_vreplication_partial_movetables_basic.yml @@ -1,9 +1,9 @@ # DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" -name: Cluster (tabletmanager_throttler_custom_config) +name: Cluster (vreplication_partial_movetables_basic) on: [push, pull_request] concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (tabletmanager_throttler_custom_config)') + group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vreplication_partial_movetables_basic)') cancel-in-progress: true permissions: read-all @@ -15,8 +15,8 @@ env: jobs: build: - name: Run endtoend tests on Cluster (tabletmanager_throttler_custom_config) - runs-on: ubuntu-22.04 + name: Run endtoend tests on Cluster (vreplication_partial_movetables_basic) + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -58,13 +65,13 @@ jobs: - 'tools/**' - 'config/**' - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_tabletmanager_throttler_custom_config.yml' + - '.github/workflows/cluster_endtoend_vreplication_partial_movetables_basic.yml' - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,16 +131,40 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x - + set -exo pipefail + + # Increase our open file descriptor limit as we could hit this + ulimit -n 65536 + cat <<-EOF>>./config/mycnf/mysql80.cnf + innodb_buffer_pool_dump_at_shutdown=OFF + innodb_buffer_pool_in_core_file=OFF + innodb_buffer_pool_load_at_startup=OFF + innodb_buffer_pool_size=64M + innodb_doublewrite=OFF + innodb_flush_log_at_trx_commit=0 + innodb_flush_method=O_DIRECT + innodb_numa_interleave=ON + innodb_adaptive_hash_index=OFF + sync_binlog=0 + sync_relay_log=0 + performance_schema=OFF + slow-query-log=OFF + EOF + + cat <<-EOF>>./config/mycnf/mysql80.cnf + binlog-transaction-compression=ON + EOF + # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard tabletmanager_throttler_custom_config | tee -a output.txt | go-junit-report -set-exit-code > report.xml + eatmydata -- go run test.go -docker=false -follow -shard vreplication_partial_movetables_basic | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vreplication_partial_movetables_sequences.yml b/.github/workflows/cluster_endtoend_vreplication_partial_movetables_sequences.yml new file mode 100644 index 00000000000..c002a72d1e7 --- /dev/null +++ b/.github/workflows/cluster_endtoend_vreplication_partial_movetables_sequences.yml @@ -0,0 +1,170 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (vreplication_partial_movetables_sequences) +on: [push, pull_request] +concurrency: + group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vreplication_partial_movetables_sequences)') + cancel-in-progress: true + +permissions: read-all + +env: + LAUNCHABLE_ORGANIZATION: "vitess" + LAUNCHABLE_WORKSPACE: "vitess-app" + GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" + +jobs: + build: + name: Run endtoend tests on Cluster (vreplication_partial_movetables_sequences) + runs-on: gh-hosted-runners-4cores-1 + + steps: + - name: Skip CI + run: | + if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then + echo "skipping CI due to the 'Skip CI' label" + exit 1 + fi + + - name: Check if workflow needs to be skipped + id: skip-workflow + run: | + skip='false' + if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + skip='true' + fi + echo Skip ${skip} + echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + + - name: Check out code + if: steps.skip-workflow.outputs.skip-workflow == 'false' + uses: actions/checkout@v3 + + - name: Check for changes in relevant files + if: steps.skip-workflow.outputs.skip-workflow == 'false' + uses: frouioui/paths-filter@main + id: changes + with: + token: '' + filters: | + end_to_end: + - 'go/**/*.go' + - 'test.go' + - 'Makefile' + - 'build.env' + - 'go.sum' + - 'go.mod' + - 'proto/*.proto' + - 'tools/**' + - 'config/**' + - 'bootstrap.sh' + - '.github/workflows/cluster_endtoend_vreplication_partial_movetables_sequences.yml' + + - name: Set up Go + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@v4 + with: + go-version: 1.21.3 + + - name: Set up python + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-python@v4 + + - name: Tune the OS + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + # Limit local port range to not use ports that overlap with server side + # ports that we listen on. + sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" + # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio + echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf + sudo sysctl -p /etc/sysctl.conf + + - name: Get dependencies + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + + # Get key to latest MySQL repo + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + # Setup MySQL 8.0 + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections + sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* + sudo apt-get update + # Install everything else we need, and configure + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + # install JUnit report formatter + go install github.com/vitessio/go-junit-report@HEAD + + - name: Setup launchable dependencies + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + run: | + # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up + pip3 install --user launchable~=1.0 > /dev/null + + # verify that launchable setup is all correct. + launchable verify || true + + # Tell Launchable about the build you are producing and testing + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . + + - name: Run cluster endtoend test + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + timeout-minutes: 45 + run: | + # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file + # which musn't be more than 107 characters long. + export VTDATAROOT="/tmp/" + source build.env + + set -exo pipefail + + # Increase our open file descriptor limit as we could hit this + ulimit -n 65536 + cat <<-EOF>>./config/mycnf/mysql80.cnf + innodb_buffer_pool_dump_at_shutdown=OFF + innodb_buffer_pool_in_core_file=OFF + innodb_buffer_pool_load_at_startup=OFF + innodb_buffer_pool_size=64M + innodb_doublewrite=OFF + innodb_flush_log_at_trx_commit=0 + innodb_flush_method=O_DIRECT + innodb_numa_interleave=ON + innodb_adaptive_hash_index=OFF + sync_binlog=0 + sync_relay_log=0 + performance_schema=OFF + slow-query-log=OFF + EOF + + cat <<-EOF>>./config/mycnf/mysql80.cnf + binlog-transaction-compression=ON + EOF + + # run the tests however you normally do, then produce a JUnit XML file + eatmydata -- go run test.go -docker=false -follow -shard vreplication_partial_movetables_sequences | tee -a output.txt | go-junit-report -set-exit-code > report.xml + + - name: Print test output and Record test result in launchable if PR is not a draft + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() + run: | + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi + + # print test output + cat output.txt diff --git a/.github/workflows/cluster_endtoend_vreplication_v2.yml b/.github/workflows/cluster_endtoend_vreplication_v2.yml index 8faa40f1e7d..9229b34a5bf 100644 --- a/.github/workflows/cluster_endtoend_vreplication_v2.yml +++ b/.github/workflows/cluster_endtoend_vreplication_v2.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vreplication_v2) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,7 +131,7 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # Increase our open file descriptor limit as we could hit this ulimit -n 65536 @@ -151,11 +158,13 @@ jobs: # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vreplication_v2 | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vstream_failover.yml b/.github/workflows/cluster_endtoend_vstream_failover.yml index f8d6871a539..a620b8caad9 100644 --- a/.github/workflows/cluster_endtoend_vstream_failover.yml +++ b/.github/workflows/cluster_endtoend_vstream_failover.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vstream_failover) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vstream_failover | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml b/.github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml index de34b4824da..5db27dad710 100644 --- a/.github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml +++ b/.github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vstream_stoponreshard_false) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vstream_stoponreshard_false | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml b/.github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml index 1e4fca5fcba..32e7685bf8f 100644 --- a/.github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml +++ b/.github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vstream_stoponreshard_true) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vstream_stoponreshard_true | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml b/.github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml index eebc7fef114..27620919d99 100644 --- a/.github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml +++ b/.github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vstream_with_keyspaces_to_watch) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vstream_with_keyspaces_to_watch | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtbackup.yml b/.github/workflows/cluster_endtoend_vtbackup.yml index 2f41e175096..8f2dcd3768b 100644 --- a/.github/workflows/cluster_endtoend_vtbackup.yml +++ b/.github/workflows/cluster_endtoend_vtbackup.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtbackup) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtbackup | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml b/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml index 75580e5b858..aad84a910c6 100644 --- a/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml +++ b/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtctlbackup_sharded_clustertest_heavy) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,7 +131,7 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # Increase our open file descriptor limit as we could hit this ulimit -n 65536 @@ -147,11 +154,13 @@ jobs: # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtctlbackup_sharded_clustertest_heavy | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml b/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml index bb673daa144..19bb9efe86c 100644 --- a/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml +++ b/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtgate_concurrentdml) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtgate_concurrentdml | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_tabletmanager_throttler.yml b/.github/workflows/cluster_endtoend_vtgate_foreignkey_stress.yml similarity index 78% rename from .github/workflows/cluster_endtoend_tabletmanager_throttler.yml rename to .github/workflows/cluster_endtoend_vtgate_foreignkey_stress.yml index 813d7932f62..e2824c5844d 100644 --- a/.github/workflows/cluster_endtoend_tabletmanager_throttler.yml +++ b/.github/workflows/cluster_endtoend_vtgate_foreignkey_stress.yml @@ -1,9 +1,9 @@ # DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" -name: Cluster (tabletmanager_throttler) +name: Cluster (vtgate_foreignkey_stress) on: [push, pull_request] concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (tabletmanager_throttler)') + group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_foreignkey_stress)') cancel-in-progress: true permissions: read-all @@ -15,8 +15,8 @@ env: jobs: build: - name: Run endtoend tests on Cluster (tabletmanager_throttler) - runs-on: ubuntu-22.04 + name: Run endtoend tests on Cluster (vtgate_foreignkey_stress) + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -58,13 +65,13 @@ jobs: - 'tools/**' - 'config/**' - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_tabletmanager_throttler.yml' + - '.github/workflows/cluster_endtoend_vtgate_foreignkey_stress.yml' - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard tabletmanager_throttler | tee -a output.txt | go-junit-report -set-exit-code > report.xml + eatmydata -- go run test.go -docker=false -follow -shard vtgate_foreignkey_stress | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtgate_gen4.yml b/.github/workflows/cluster_endtoend_vtgate_gen4.yml index 2d9d59566e0..205de4b5e68 100644 --- a/.github/workflows/cluster_endtoend_vtgate_gen4.yml +++ b/.github/workflows/cluster_endtoend_vtgate_gen4.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtgate_gen4) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtgate_gen4 | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml b/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml index 30a1405afa4..98d59d60aee 100644 --- a/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml +++ b/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtgate_general_heavy) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,7 +131,7 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # Increase our open file descriptor limit as we could hit this ulimit -n 65536 @@ -147,11 +154,13 @@ jobs: # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtgate_general_heavy | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtgate_godriver.yml b/.github/workflows/cluster_endtoend_vtgate_godriver.yml index 1162fc3e282..2f4082d10d4 100644 --- a/.github/workflows/cluster_endtoend_vtgate_godriver.yml +++ b/.github/workflows/cluster_endtoend_vtgate_godriver.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtgate_godriver) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtgate_godriver | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml b/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml index bd5048a9aa8..4a9f6e227fb 100644 --- a/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml +++ b/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtgate_partial_keyspace) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtgate_partial_keyspace -partial-keyspace=true | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtgate_queries.yml b/.github/workflows/cluster_endtoend_vtgate_queries.yml index 41c0871c6b9..6d41d922fc4 100644 --- a/.github/workflows/cluster_endtoend_vtgate_queries.yml +++ b/.github/workflows/cluster_endtoend_vtgate_queries.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtgate_queries) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtgate_queries | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml b/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml index 678dc12099b..028e1492029 100644 --- a/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml +++ b/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtgate_readafterwrite) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtgate_readafterwrite | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml b/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml index 7c1577d4dd7..5972472402e 100644 --- a/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml +++ b/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtgate_reservedconn) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtgate_reservedconn | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtgate_schema.yml b/.github/workflows/cluster_endtoend_vtgate_schema.yml index c85d8386ec5..68a2bd697be 100644 --- a/.github/workflows/cluster_endtoend_vtgate_schema.yml +++ b/.github/workflows/cluster_endtoend_vtgate_schema.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtgate_schema) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtgate_schema | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml b/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml index 3e6fe5f6059..1c5d1e675f8 100644 --- a/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml +++ b/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtgate_schema_tracker) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtgate_schema_tracker | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml b/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml index 04ac695c5e4..26adb43fd74 100644 --- a/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml +++ b/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtgate_tablet_healthcheck_cache) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtgate_tablet_healthcheck_cache | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtgate_topo.yml b/.github/workflows/cluster_endtoend_vtgate_topo.yml index 06a199e2585..49945a607d8 100644 --- a/.github/workflows/cluster_endtoend_vtgate_topo.yml +++ b/.github/workflows/cluster_endtoend_vtgate_topo.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtgate_topo) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtgate_topo | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml b/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml index d2064784715..ee72650dcbd 100644 --- a/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml +++ b/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtgate_topo_consul) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -109,7 +116,7 @@ jobs: make tools - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -118,7 +125,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -129,16 +136,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtgate_topo_consul | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml b/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml index 9550cc1e0c3..4051373d9aa 100644 --- a/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml +++ b/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtgate_topo_etcd) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtgate_topo_etcd | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtgate_transaction.yml b/.github/workflows/cluster_endtoend_vtgate_transaction.yml index 19de405dadc..b7cc848692f 100644 --- a/.github/workflows/cluster_endtoend_vtgate_transaction.yml +++ b/.github/workflows/cluster_endtoend_vtgate_transaction.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtgate_transaction) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtgate_transaction | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtgate_unsharded.yml b/.github/workflows/cluster_endtoend_vtgate_unsharded.yml index a50581b44dd..b6359682993 100644 --- a/.github/workflows/cluster_endtoend_vtgate_unsharded.yml +++ b/.github/workflows/cluster_endtoend_vtgate_unsharded.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtgate_unsharded) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtgate_unsharded | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml b/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml index f5a8bde45f5..83fb2b2d829 100644 --- a/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml +++ b/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtgate_vindex_heavy) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,7 +131,7 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # Increase our open file descriptor limit as we could hit this ulimit -n 65536 @@ -147,11 +154,13 @@ jobs: # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtgate_vindex_heavy | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtgate_vschema.yml b/.github/workflows/cluster_endtoend_vtgate_vschema.yml index 85aa4c14eb8..4c2f3b2637d 100644 --- a/.github/workflows/cluster_endtoend_vtgate_vschema.yml +++ b/.github/workflows/cluster_endtoend_vtgate_vschema.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtgate_vschema) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtgate_vschema | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtorc.yml b/.github/workflows/cluster_endtoend_vtorc.yml index 41e33da32aa..872576ab8b5 100644 --- a/.github/workflows/cluster_endtoend_vtorc.yml +++ b/.github/workflows/cluster_endtoend_vtorc.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtorc) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtorc | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtorc_mysql57.yml b/.github/workflows/cluster_endtoend_vtorc_mysql57.yml index 0e46ecd4972..72baf7940b6 100644 --- a/.github/workflows/cluster_endtoend_vtorc_mysql57.yml +++ b/.github/workflows/cluster_endtoend_vtorc_mysql57.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtorc) mysql57 - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -115,7 +122,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -124,7 +131,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -135,16 +142,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtorc | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml b/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml index 4ce2061a0df..b56d4dc61a5 100644 --- a/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml +++ b/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vttablet_prscomplex) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -113,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -124,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vttablet_prscomplex | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_xb_backup.yml b/.github/workflows/cluster_endtoend_xb_backup.yml index 4130f7b4ee0..f24baaf31af 100644 --- a/.github/workflows/cluster_endtoend_xb_backup.yml +++ b/.github/workflows/cluster_endtoend_xb_backup.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (xb_backup) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,10 +111,10 @@ jobs: # install JUnit report formatter go install github.com/vitessio/go-junit-report@HEAD - sudo apt-get install percona-xtrabackup-80 lz4 + sudo apt-get install -y percona-xtrabackup-80 lz4 - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -116,7 +123,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -127,16 +134,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard xb_backup | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_xb_backup_mysql57.yml b/.github/workflows/cluster_endtoend_xb_backup_mysql57.yml index 49707ea16dc..b85628a0dbe 100644 --- a/.github/workflows/cluster_endtoend_xb_backup_mysql57.yml +++ b/.github/workflows/cluster_endtoend_xb_backup_mysql57.yml @@ -20,7 +20,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (xb_backup) mysql57 - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -40,6 +40,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -68,7 +75,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -131,7 +138,7 @@ jobs: fi - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -140,7 +147,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -151,16 +158,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard xb_backup | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_xb_recovery.yml b/.github/workflows/cluster_endtoend_xb_recovery.yml index 70e98a618a4..3fbe34b0569 100644 --- a/.github/workflows/cluster_endtoend_xb_recovery.yml +++ b/.github/workflows/cluster_endtoend_xb_recovery.yml @@ -16,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (xb_recovery) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -104,10 +111,10 @@ jobs: # install JUnit report formatter go install github.com/vitessio/go-junit-report@HEAD - sudo apt-get install percona-xtrabackup-80 lz4 + sudo apt-get install -y percona-xtrabackup-80 lz4 - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -116,7 +123,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -127,16 +134,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard xb_recovery | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_xb_recovery_mysql57.yml b/.github/workflows/cluster_endtoend_xb_recovery_mysql57.yml index 1982c1ad1ea..aaa2b034105 100644 --- a/.github/workflows/cluster_endtoend_xb_recovery_mysql57.yml +++ b/.github/workflows/cluster_endtoend_xb_recovery_mysql57.yml @@ -20,7 +20,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (xb_recovery) mysql57 - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -40,6 +40,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -68,7 +75,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -131,7 +138,7 @@ jobs: fi - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -140,7 +147,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -151,16 +158,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard xb_recovery | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/codeql_analysis.yml b/.github/workflows/codeql_analysis.yml index 5cef0644623..8bafc62213a 100644 --- a/.github/workflows/codeql_analysis.yml +++ b/.github/workflows/codeql_analysis.yml @@ -44,7 +44,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Get base dependencies run: | @@ -79,7 +79,7 @@ jobs: sudo apt-get install -y gnupg2 sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb sudo apt-get update - sudo apt-get install percona-xtrabackup-24 + sudo apt-get install -y percona-xtrabackup-24 - name: Building binaries timeout-minutes: 30 diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml index 7c2c75b2afe..52c90038680 100644 --- a/.github/workflows/create_release.yml +++ b/.github/workflows/create_release.yml @@ -20,7 +20,12 @@ jobs: - name: Set up Go uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 + + - name: Setup node + uses: actions/setup-node@v3 + with: + node-version: '18.16.0' - name: Tune the OS run: | diff --git a/.github/workflows/docker_test_cluster_10.yml b/.github/workflows/docker_test_cluster_10.yml index 606ae666820..3ff9a2a6e74 100644 --- a/.github/workflows/docker_test_cluster_10.yml +++ b/.github/workflows/docker_test_cluster_10.yml @@ -5,7 +5,7 @@ jobs: build: name: Docker Test Cluster 10 - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -54,7 +54,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' diff --git a/.github/workflows/docker_test_cluster_25.yml b/.github/workflows/docker_test_cluster_25.yml index 02f984e90b1..e01caf200b1 100644 --- a/.github/workflows/docker_test_cluster_25.yml +++ b/.github/workflows/docker_test_cluster_25.yml @@ -5,7 +5,7 @@ jobs: build: name: Docker Test Cluster 25 - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -54,7 +54,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' diff --git a/.github/workflows/e2e_race.yml b/.github/workflows/e2e_race.yml index 2c75ecf0cdb..0d773d936e4 100644 --- a/.github/workflows/e2e_race.yml +++ b/.github/workflows/e2e_race.yml @@ -5,7 +5,7 @@ jobs: build: name: End-to-End Test (Race) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI run: | @@ -52,7 +52,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -87,4 +87,4 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' timeout-minutes: 30 run: | - make e2e_test_race + NOVTADMINBUILD=1 make e2e_test_race diff --git a/.github/workflows/endtoend.yml b/.github/workflows/endtoend.yml index c7d83929e82..1c0b5f00342 100644 --- a/.github/workflows/endtoend.yml +++ b/.github/workflows/endtoend.yml @@ -5,7 +5,7 @@ jobs: build: name: End-to-End Test - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI run: | @@ -52,7 +52,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -78,10 +78,14 @@ jobs: - name: Build if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' run: | - make build + NOVTADMINBUILD=1 make build - name: endtoend if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' timeout-minutes: 30 run: | + # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file + # which musn't be more than 107 characters long. + export VTDATAROOT="/tmp/" + eatmydata -- tools/e2e_test_runner.sh diff --git a/.github/workflows/local_example.yml b/.github/workflows/local_example.yml index b90e9b463d0..bd6bbb247e3 100644 --- a/.github/workflows/local_example.yml +++ b/.github/workflows/local_example.yml @@ -4,11 +4,10 @@ permissions: read-all jobs: build: - name: Local example using ${{ matrix.topo }} on ${{ matrix.os }} - runs-on: ${{ matrix.os }} + name: Local example using ${{ matrix.topo }} on ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 strategy: matrix: - os: [ubuntu-22.04] topo: [consul,etcd,k8s] steps: @@ -58,7 +57,13 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 + + - uses: actions/setup-node@v3 + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true' + with: + # node-version should match package.json + node-version: '18.16.0' - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true' diff --git a/.github/workflows/region_example.yml b/.github/workflows/region_example.yml index b795eee02a2..5d58a6bb5bf 100644 --- a/.github/workflows/region_example.yml +++ b/.github/workflows/region_example.yml @@ -4,11 +4,10 @@ permissions: read-all jobs: build: - name: Region Sharding example using ${{ matrix.topo }} on ${{ matrix.os }} - runs-on: ${{ matrix.os }} + name: Region Sharding example using ${{ matrix.topo }} on ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 strategy: matrix: - os: [ubuntu-22.04] topo: [etcd] steps: @@ -58,7 +57,13 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 + + - uses: actions/setup-node@v3 + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true' + with: + # node-version should match package.json + node-version: '18.16.0' - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true' diff --git a/.github/workflows/static_checks_etc.yml b/.github/workflows/static_checks_etc.yml index 478ce2da297..34714d00256 100644 --- a/.github/workflows/static_checks_etc.yml +++ b/.github/workflows/static_checks_etc.yml @@ -33,12 +33,6 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 - - name: Run FOSSA scan and upload build data - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: fossa-contrib/fossa-action@v2 - with: - fossa-api-key: 76d7483ea206d530d9452e44bffe7ba8 - - name: Check for changes in Go files if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: frouioui/paths-filter@main @@ -102,12 +96,13 @@ jobs: release_notes: - 'changelog/**' - './go/tools/releases/**' + - '.github/workflows/static_checks_etc.yml' - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && (steps.changes.outputs.go_files == 'true' || steps.changes.outputs.parser_changes == 'true' || steps.changes.outputs.proto_changes == 'true') uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.go_files == 'true' @@ -218,3 +213,4 @@ jobs: echo 'Running `go run ./go/tools/releases/releases.go` on CI yields the following changes:' echo "$output" echo "" + exit 1 diff --git a/.github/workflows/unit_race.yml b/.github/workflows/unit_race.yml index 3c45330cbe6..80121299139 100644 --- a/.github/workflows/unit_race.yml +++ b/.github/workflows/unit_race.yml @@ -10,7 +10,7 @@ jobs: build: name: Unit Test (Race) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 steps: - name: Skip CI run: | @@ -57,7 +57,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' @@ -95,4 +95,9 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' timeout-minutes: 45 run: | + # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file + # which musn't be more than 107 characters long. + export VTDATAROOT="/tmp/" + export NOVTADMINBUILD=1 + eatmydata -- make unit_test_race diff --git a/.github/workflows/unit_test_mysql57.yml b/.github/workflows/unit_test_mysql57.yml index a14b469c148..5c5b9c2a206 100644 --- a/.github/workflows/unit_test_mysql57.yml +++ b/.github/workflows/unit_test_mysql57.yml @@ -16,7 +16,7 @@ env: jobs: test: name: Unit Test (mysql57) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' @@ -128,7 +135,7 @@ jobs: make tools - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -137,19 +144,27 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' timeout-minutes: 30 run: | + set -exo pipefail + # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file + # which musn't be more than 107 characters long. + export VTDATAROOT="/tmp/" + + export NOVTADMINBUILD=1 eatmydata -- make unit_test | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/unit_test_mysql80.yml b/.github/workflows/unit_test_mysql80.yml index b969104eaa6..0427ef18158 100644 --- a/.github/workflows/unit_test_mysql80.yml +++ b/.github/workflows/unit_test_mysql80.yml @@ -16,7 +16,7 @@ env: jobs: test: name: Unit Test (mysql80) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -36,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,7 +71,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' @@ -125,7 +132,7 @@ jobs: make tools - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -134,19 +141,27 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' timeout-minutes: 30 run: | + set -exo pipefail + # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file + # which musn't be more than 107 characters long. + export VTDATAROOT="/tmp/" + + export NOVTADMINBUILD=1 eatmydata -- make unit_test | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/update_golang_version.yml b/.github/workflows/update_golang_version.yml index edb249ea74e..a01e3a815e0 100644 --- a/.github/workflows/update_golang_version.yml +++ b/.github/workflows/update_golang_version.yml @@ -9,19 +9,20 @@ permissions: read-all jobs: update_golang_version: + if: github.repository == 'vitessio/vitess' permissions: contents: write pull-requests: write strategy: matrix: - branch: [ main, release-16.0, release-15.0, release-14.0 ] + branch: [ main, release-17.0, release-16.0, release-15.0 ] name: Update Golang Version runs-on: ubuntu-latest steps: - name: Set up Go uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Check out code uses: actions/checkout@v3 @@ -55,8 +56,10 @@ jobs: # Check if the PR already exists, if it does then do not create new PR. gh pr list -S "is:open [${{ matrix.branch }}] Upgrade the Golang version to go${go_version}" > out.txt 2>&1 | true if [ -s out.txt ]; then + rm -f out.txt exit 0 fi + rm -f out.txt echo "create-pr=true" >> $GITHUB_OUTPUT - name: Create Pull Request diff --git a/.github/workflows/upgrade_downgrade_test_backups_e2e.yml b/.github/workflows/upgrade_downgrade_test_backups_e2e.yml index 424a2aa25b3..9532995d49c 100644 --- a/.github/workflows/upgrade_downgrade_test_backups_e2e.yml +++ b/.github/workflows/upgrade_downgrade_test_backups_e2e.yml @@ -13,7 +13,7 @@ jobs: get_previous_release: if: always() name: Get Previous Release - Backups - E2E - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 outputs: previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} @@ -34,7 +34,7 @@ jobs: timeout-minutes: 60 if: always() && needs.get_previous_release.result == 'success' name: Run Upgrade Downgrade Test - Backups - E2E - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 needs: - get_previous_release @@ -85,7 +85,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -114,7 +114,7 @@ jobs: sudo apt-get install -y gnupg2 sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb sudo apt-get update - sudo apt-get install percona-xtrabackup-24 + sudo apt-get install -y percona-xtrabackup-24 # Checkout to the last release of Vitess - name: Check out other version's code (${{ needs.get_previous_release.outputs.previous_release }}) @@ -133,7 +133,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-other/ cp -R bin /tmp/vitess-build-other/ rm -Rf bin/* @@ -153,7 +153,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-current/ cp -R bin /tmp/vitess-build-current/ diff --git a/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml b/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml index 3e21997dfd0..cc8e3afb42a 100644 --- a/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml +++ b/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml @@ -13,7 +13,7 @@ jobs: get_next_release: if: always() name: Get Latest Release - Backups - E2E - Next Release - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 outputs: next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }} @@ -34,7 +34,7 @@ jobs: timeout-minutes: 60 if: always() && needs.get_next_release.result == 'success' name: Run Upgrade Downgrade Test - Backups - E2E - Next Release - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 needs: - get_next_release @@ -88,7 +88,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -117,7 +117,7 @@ jobs: sudo apt-get install -y gnupg2 sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb sudo apt-get update - sudo apt-get install percona-xtrabackup-24 + sudo apt-get install -y percona-xtrabackup-24 # Checkout to the next release of Vitess - name: Check out other version's code (${{ needs.get_next_release.outputs.next_release }}) @@ -136,7 +136,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-other/ cp -R bin /tmp/vitess-build-other/ rm -Rf bin/* @@ -156,7 +156,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-current/ cp -R bin /tmp/vitess-build-current/ diff --git a/.github/workflows/upgrade_downgrade_test_backups_manual.yml b/.github/workflows/upgrade_downgrade_test_backups_manual.yml index b00a9e38682..6789dda2067 100644 --- a/.github/workflows/upgrade_downgrade_test_backups_manual.yml +++ b/.github/workflows/upgrade_downgrade_test_backups_manual.yml @@ -13,7 +13,7 @@ jobs: get_previous_release: if: always() name: Get Previous Release - Backups - Manual - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 outputs: previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} @@ -35,7 +35,7 @@ jobs: timeout-minutes: 40 if: always() && (needs.get_previous_release.result == 'success') name: Run Upgrade Downgrade Test - Backups - Manual - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 needs: - get_previous_release @@ -87,7 +87,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -134,7 +134,7 @@ jobs: sudo apt-get install -y gnupg2 sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb sudo apt-get update - sudo apt-get install percona-xtrabackup-24 + sudo apt-get install -y percona-xtrabackup-24 # Checkout to the last release of Vitess - name: Checkout to the other version's code (${{ needs.get_previous_release.outputs.previous_release }}) @@ -153,7 +153,7 @@ jobs: timeout-minutes: 5 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-other/ cp -R bin /tmp/vitess-build-other/ rm -Rf bin/* @@ -178,7 +178,7 @@ jobs: timeout-minutes: 5 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-current/ cp -R bin /tmp/vitess-build-current/ diff --git a/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml b/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml index 3d991a5a4bd..0120571a78e 100644 --- a/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml +++ b/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml @@ -13,7 +13,7 @@ jobs: get_next_release: if: always() name: Get Previous Release - Backups - Manual - Next Release - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 outputs: next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }} @@ -35,7 +35,7 @@ jobs: timeout-minutes: 40 if: always() && (needs.get_next_release.result == 'success') name: Run Upgrade Downgrade Test - Backups - Manual - Next Release - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 needs: - get_next_release @@ -90,7 +90,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -137,7 +137,7 @@ jobs: sudo apt-get install -y gnupg2 sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb sudo apt-get update - sudo apt-get install percona-xtrabackup-24 + sudo apt-get install -y percona-xtrabackup-24 # Checkout to the next release of Vitess - name: Checkout to the other version's code (${{ needs.get_next_release.outputs.next_release }}) @@ -156,7 +156,7 @@ jobs: timeout-minutes: 5 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-other/ cp -R bin /tmp/vitess-build-other/ rm -Rf bin/* @@ -181,7 +181,7 @@ jobs: timeout-minutes: 5 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-current/ cp -R bin /tmp/vitess-build-current/ diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml b/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml index 9aa7e8edd14..a3dc81f3723 100644 --- a/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml +++ b/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml @@ -16,7 +16,7 @@ jobs: get_previous_release: if: always() name: Get Previous Release - Query Serving (Queries) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 outputs: previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} @@ -36,7 +36,7 @@ jobs: upgrade_downgrade_test: if: always() && (needs.get_previous_release.result == 'success') name: Run Upgrade Downgrade Test - Query Serving (Queries) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 needs: - get_previous_release @@ -87,7 +87,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -132,7 +132,7 @@ jobs: sudo apt-get install -y gnupg2 sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb sudo apt-get update - sudo apt-get install percona-xtrabackup-24 + sudo apt-get install -y percona-xtrabackup-24 # Checkout to the last release of Vitess - name: Check out other version's code (${{ needs.get_previous_release.outputs.previous_release }}) @@ -151,7 +151,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-other/ cp -R bin /tmp/vitess-build-other/ rm -Rf bin/* @@ -171,7 +171,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-current/ cp -R bin /tmp/vitess-build-current/ diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml b/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml index f2838971d96..923c766e377 100644 --- a/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml +++ b/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml @@ -16,7 +16,7 @@ jobs: get_next_release: if: always() name: Get Latest Release - Query Serving (Queries) Next Release - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 outputs: next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }} @@ -36,7 +36,7 @@ jobs: upgrade_downgrade_test: if: always() && (needs.get_next_release.result == 'success') name: Run Upgrade Downgrade Test - Query Serving (Queries) Next Release - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 needs: - get_next_release @@ -90,7 +90,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -135,7 +135,7 @@ jobs: sudo apt-get install -y gnupg2 sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb sudo apt-get update - sudo apt-get install percona-xtrabackup-24 + sudo apt-get install -y percona-xtrabackup-24 # Checkout to the next release of Vitess - name: Check out other version's code (${{ needs.get_next_release.outputs.next_release }}) @@ -154,7 +154,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-other/ cp -R bin /tmp/vitess-build-other/ rm -Rf bin/* @@ -174,7 +174,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-current/ cp -R bin /tmp/vitess-build-current/ diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml b/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml index 09e9fac200d..14c8afaf87f 100644 --- a/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml +++ b/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml @@ -16,7 +16,7 @@ jobs: get_previous_release: if: always() name: Get Previous Release - Query Serving (Schema) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 outputs: previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} @@ -36,7 +36,7 @@ jobs: upgrade_downgrade_test: if: always() && (needs.get_previous_release.result == 'success') name: Run Upgrade Downgrade Test - Query Serving (Schema) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 needs: - get_previous_release @@ -87,7 +87,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -132,7 +132,7 @@ jobs: sudo apt-get install -y gnupg2 sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb sudo apt-get update - sudo apt-get install percona-xtrabackup-24 + sudo apt-get install -y percona-xtrabackup-24 # Checkout to the last release of Vitess - name: Check out other version's code (${{ needs.get_previous_release.outputs.previous_release }}) @@ -151,7 +151,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-other/ cp -R bin /tmp/vitess-build-other/ rm -Rf bin/* @@ -171,7 +171,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-current/ cp -R bin /tmp/vitess-build-current/ diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml b/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml index b3b76683a00..f22ece10010 100644 --- a/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml +++ b/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml @@ -16,7 +16,7 @@ jobs: get_next_release: if: always() name: Get Latest Release - Query Serving (Schema) Next Release - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 outputs: next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }} @@ -36,7 +36,7 @@ jobs: upgrade_downgrade_test: if: always() && (needs.get_next_release.result == 'success') name: Run Upgrade Downgrade Test - Query Serving (Schema) Next Release - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 needs: - get_next_release @@ -90,7 +90,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -135,7 +135,7 @@ jobs: sudo apt-get install -y gnupg2 sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb sudo apt-get update - sudo apt-get install percona-xtrabackup-24 + sudo apt-get install -y percona-xtrabackup-24 # Checkout to the next release of Vitess - name: Check out other version's code (${{ needs.get_next_release.outputs.next_release }}) @@ -154,7 +154,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-other/ cp -R bin /tmp/vitess-build-other/ rm -Rf bin/* @@ -174,7 +174,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-current/ cp -R bin /tmp/vitess-build-current/ diff --git a/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml b/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml index 34d34965dfd..82d6f267856 100644 --- a/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml +++ b/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml @@ -16,7 +16,7 @@ jobs: get_next_release: if: always() name: Get Latest Release - Reparent New Vtctl - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 outputs: next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }} @@ -36,7 +36,7 @@ jobs: upgrade_downgrade_test: if: always() && (needs.get_next_release.result == 'success') name: Run Upgrade Downgrade Test - Reparent New Vtctl - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 needs: - get_next_release @@ -90,7 +90,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -135,7 +135,7 @@ jobs: sudo apt-get install -y gnupg2 sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb sudo apt-get update - sudo apt-get install percona-xtrabackup-24 + sudo apt-get install -y percona-xtrabackup-24 # Checkout to the next release of Vitess - name: Check out other version's code (${{ needs.get_next_release.outputs.next_release }}) @@ -154,7 +154,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-other/ cp -R bin /tmp/vitess-build-other/ rm -Rf bin/* @@ -174,7 +174,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-current/ cp -R bin /tmp/vitess-build-current/ diff --git a/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml b/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml index a5f0cf682d3..c5b6c964124 100644 --- a/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml +++ b/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml @@ -16,7 +16,7 @@ jobs: get_next_release: if: always() name: Get Latest Release - Reparent New VTTablet - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 outputs: next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }} @@ -36,7 +36,7 @@ jobs: upgrade_downgrade_test: if: always() && (needs.get_next_release.result == 'success') name: Run Upgrade Downgrade Test - Reparent New VTTablet - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 needs: - get_next_release @@ -90,7 +90,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -135,7 +135,7 @@ jobs: sudo apt-get install -y gnupg2 sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb sudo apt-get update - sudo apt-get install percona-xtrabackup-24 + sudo apt-get install -y percona-xtrabackup-24 # Checkout to the next release of Vitess - name: Check out other version's code (${{ needs.get_next_release.outputs.next_release }}) @@ -154,7 +154,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-other/ cp -R bin /tmp/vitess-build-other/ rm -Rf bin/* @@ -174,7 +174,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-current/ cp -R bin /tmp/vitess-build-current/ diff --git a/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml b/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml index 86760edab8b..c4391efdef5 100644 --- a/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml +++ b/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml @@ -16,7 +16,7 @@ jobs: get_previous_release: if: always() name: Get Previous Release - Reparent Old Vtctl - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 outputs: previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} @@ -36,7 +36,7 @@ jobs: upgrade_downgrade_test: if: always() && (needs.get_previous_release.result == 'success') name: Run Upgrade Downgrade Test - Reparent Old Vtctl - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 needs: - get_previous_release @@ -87,7 +87,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -132,7 +132,7 @@ jobs: sudo apt-get install -y gnupg2 sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb sudo apt-get update - sudo apt-get install percona-xtrabackup-24 + sudo apt-get install -y percona-xtrabackup-24 # Checkout to the last release of Vitess - name: Check out other version's code (${{ needs.get_previous_release.outputs.previous_release }}) @@ -151,7 +151,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-other/ cp -R bin /tmp/vitess-build-other/ rm -Rf bin/* @@ -171,7 +171,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-current/ cp -R bin /tmp/vitess-build-current/ diff --git a/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml b/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml index 44e2e54909b..f3ffcaa2d17 100644 --- a/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml +++ b/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml @@ -16,7 +16,7 @@ jobs: get_previous_release: if: always() name: Get Previous Release - Reparent Old VTTablet - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 outputs: previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} @@ -36,7 +36,7 @@ jobs: upgrade_downgrade_test: if: always() && (needs.get_previous_release.result == 'success') name: Run Upgrade Downgrade Test - Reparent Old VTTablet - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 needs: - get_previous_release @@ -87,7 +87,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -132,7 +132,7 @@ jobs: sudo apt-get install -y gnupg2 sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb sudo apt-get update - sudo apt-get install percona-xtrabackup-24 + sudo apt-get install -y percona-xtrabackup-24 # Checkout to the last release of Vitess - name: Check out other version's code (${{ needs.get_previous_release.outputs.previous_release }}) @@ -151,7 +151,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-other/ cp -R bin /tmp/vitess-build-other/ rm -Rf bin/* @@ -171,7 +171,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-current/ cp -R bin /tmp/vitess-build-current/ diff --git a/.github/workflows/vtadmin_web_build.yml b/.github/workflows/vtadmin_web_build.yml index 441561447ee..24ade4d9227 100644 --- a/.github/workflows/vtadmin_web_build.yml +++ b/.github/workflows/vtadmin_web_build.yml @@ -16,7 +16,7 @@ permissions: read-all jobs: build: - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI run: | @@ -42,7 +42,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' with: # node-version should match package.json - node-version: '16.19.0' + node-version: '18.16.0' - name: Install dependencies if: steps.skip-workflow.outputs.skip-workflow == 'false' diff --git a/.github/workflows/vtadmin_web_lint.yml b/.github/workflows/vtadmin_web_lint.yml index fe88053ff5a..055e1934fb0 100644 --- a/.github/workflows/vtadmin_web_lint.yml +++ b/.github/workflows/vtadmin_web_lint.yml @@ -42,7 +42,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' with: # node-version should match package.json - node-version: '16.19.0' + node-version: '18.16.0' - name: Install dependencies if: steps.skip-workflow.outputs.skip-workflow == 'false' diff --git a/.github/workflows/vtadmin_web_unit_tests.yml b/.github/workflows/vtadmin_web_unit_tests.yml index cab00c8dea9..1efa474fde3 100644 --- a/.github/workflows/vtadmin_web_unit_tests.yml +++ b/.github/workflows/vtadmin_web_unit_tests.yml @@ -16,7 +16,7 @@ permissions: read-all jobs: unit-tests: - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI run: | @@ -42,7 +42,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' with: # node-version should match package.json - node-version: '16.19.0' + node-version: '18.16.0' - name: Install dependencies if: steps.skip-workflow.outputs.skip-workflow == 'false' diff --git a/.golangci.yml b/.golangci.yml index 50bf68f4bfb..9c674953a76 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,8 +1,6 @@ run: - go: 1.19 + go: 1.20 timeout: 10m - skip-dirs: - - go/vt/topo/k8stopo/client linters-settings: errcheck: diff --git a/GOVERNANCE.md b/GOVERNANCE.md index ffe72221dce..8b408d8ba55 100644 --- a/GOVERNANCE.md +++ b/GOVERNANCE.md @@ -55,13 +55,13 @@ A maintainer is not allowed to merge their change without approval from other ma Anyone can become a maintainer; there are no special requirements, other than to have shown a willingness and ability to participate in the project as a team player. Typically, a potential maintainer will need to show that they have an understanding of the project, its objectives and its strategy. They will also have provided valuable contributions to the project over a period of time. -New maintainers can be nominated by any existing maintainer. Once they have been nominated, there will be a vote by the steering committee. Maintainer voting is one of the few activities that takes place on the project’s private management list. This is to allow committee members to freely express their opinions about a nominee without causing embarrassment. Once the vote has been held, the aggregated voting results are published on the public mailing list. The nominee is entitled to request an explanation of any ‘no’ votes against them, regardless of the outcome of the vote. This explanation will be provided by the Steering Committee Chair (see below) and will be anonymous and constructive in nature. +New maintainers can be nominated by any existing maintainer. Once they have been nominated, there will be a vote by the maintainer team to decide whether to accept or reject the nomination. -Nominees may decline their appointment as a maintainer. However, this is unusual, as the project does not expect any specific time or resource commitment from its community members. The intention behind the role of maintainer is to allow people to contribute to the project more easily, not to tie them in to the project in any formal way. +Nominees may decline their appointment as a maintainer. The project does not expect any specific time or resource commitment from its community members, however it is expected that maintainers are evangelists for the project. -It is important to recognise that maintainer-ship is a privilege, not a right. That privilege must be earned and once earned it can be removed by the Steering Committee for conduct inconsistent with the [Guiding Principles](https://github.com/vitessio/vitess/blob/main/GUIDING_PRINCIPLES.md) or if they drop below a level of commitment and engagement required to be a maintainer, as determined by the Steering Committee. The Steering Committee also reserves the right to remove a person for any other reason inconsistent with the goals of the project. +It is important to recognise that maintainer-ship is a privilege, not a right. That privilege must be earned and once earned it can be removed by the [Steering Committee](https://github.com/vitessio/vitess/blob/main/STEERING.md) for conduct inconsistent with the [Guiding Principles](https://github.com/vitessio/vitess/blob/main/GUIDING_PRINCIPLES.md) or if they drop below a level of commitment and engagement required to be a maintainer, as determined by the Steering Committee. The Steering Committee also reserves the right to remove a person for any other reason inconsistent with the goals of the project. -A maintainer who shows an above-average level of contribution to the project, particularly with respect to its strategic direction and long-term health, may be nominated to become a member of the Steering Committee. This role is described in the [Steering Committee document](https://github.com/vitessio/vitess/blob/main/STEERING.md). +A maintainer who shows an above-average level of contribution to the project, particularly with respect to its strategic direction and long-term health, may be nominated to become a member of the Steering Committee. # Support diff --git a/Makefile b/Makefile index a063d405ba9..249dafa6a47 100644 --- a/Makefile +++ b/Makefile @@ -61,6 +61,10 @@ ifdef VT_EXTRA_BUILD_FLAGS export EXTRA_BUILD_FLAGS := $(VT_EXTRA_BUILD_FLAGS) endif +ifdef VT_EXTRA_BUILD_LDFLAGS +export EXTRA_BUILD_LDFLAGS := $(VT_EXTRA_BUILD_LDFLAGS) +endif + # This should be the root of the vitess Git directory. ifndef VTROOT export VTROOT=${PWD} @@ -76,7 +80,7 @@ ifndef NOBANNER endif bash ./build.env go build -trimpath $(EXTRA_BUILD_FLAGS) $(VT_GO_PARALLEL) \ - -ldflags "$(shell tools/build_version_flags.sh)" \ + -ldflags "$(EXTRA_BUILD_LDFLAGS) $(shell tools/build_version_flags.sh)" \ -o ${VTROOTBIN} ./go/... # build the vitess binaries statically @@ -89,8 +93,12 @@ endif # Binaries will be placed in ${VTROOTBIN}. CGO_ENABLED=0 go build \ -trimpath $(EXTRA_BUILD_FLAGS) $(VT_GO_PARALLEL) \ - -ldflags "$(shell tools/build_version_flags.sh)" \ + -ldflags "$(EXTRA_BUILD_LDFLAGS) $(shell tools/build_version_flags.sh)" \ -o ${VTROOTBIN} ./go/... +ifndef NOVTADMINBUILD + echo "Building VTAdmin Web, disable VTAdmin build by setting 'NOVTADMINBUILD'" + PREFIX="" ./web/vtadmin/build.sh +endif # cross-build can be used to cross-compile Vitess client binaries # Outside of select client binaries (namely vtctlclient & vtexplain), cross-compiled Vitess Binaries are not recommended for production deployments @@ -107,7 +115,7 @@ endif mkdir -p ${VTROOTBIN}/${GOOS}_${GOARCH} CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} go build \ -trimpath $(EXTRA_BUILD_FLAGS) $(VT_GO_PARALLEL) \ - -ldflags "$(shell tools/build_version_flags.sh)" \ + -ldflags "$(EXTRA_BUILD_LDFLAGS) $(shell tools/build_version_flags.sh)" \ -o ${VTROOTBIN}/${GOOS}_${GOARCH} ./go/... @if [ ! -x "${VTROOTBIN}/${GOOS}_${GOARCH}/vttablet" ]; then \ @@ -121,7 +129,7 @@ endif bash ./build.env go build -trimpath \ $(EXTRA_BUILD_FLAGS) $(VT_GO_PARALLEL) \ - -ldflags "$(shell tools/build_version_flags.sh)" \ + -ldflags "$(EXTRA_BUILD_LDFLAGS) $(shell tools/build_version_flags.sh)" \ -gcflags -'N -l' \ -o ${VTROOTBIN} ./go/... @@ -243,7 +251,6 @@ install_protoc-gen-go: PROTO_SRCS = $(wildcard proto/*.proto) PROTO_SRC_NAMES = $(basename $(notdir $(PROTO_SRCS))) -PROTO_GO_OUTS = $(foreach name, $(PROTO_SRC_NAMES), go/vt/proto/$(name)/$(name).pb.go) # This rule rebuilds all the go files from the proto definitions for gRPC. proto: $(PROTO_GO_OUTS) vtadmin_web_proto_types @@ -256,9 +263,10 @@ $(PROTO_GO_OUTS): minimaltools install_protoc-gen-go proto/*.proto --go_out=. --plugin protoc-gen-go="${VTROOTBIN}/protoc-gen-go" \ --go-grpc_out=. --plugin protoc-gen-go-grpc="${VTROOTBIN}/protoc-gen-go-grpc" \ --go-vtproto_out=. --plugin protoc-gen-go-vtproto="${VTROOTBIN}/protoc-gen-go-vtproto" \ - --go-vtproto_opt=features=marshal+unmarshal+size+pool \ + --go-vtproto_opt=features=marshal+unmarshal+size+pool+clone \ --go-vtproto_opt=pool=vitess.io/vitess/go/vt/proto/query.Row \ --go-vtproto_opt=pool=vitess.io/vitess/go/vt/proto/binlogdata.VStreamRowsResponse \ + --go-vtproto_opt=pool=vitess.io/vitess/go/vt/proto/binlogdata.VStreamTablesResponse \ -I${PWD}/dist/vt-protoc-21.3/include:proto $(PROTO_SRCS) cp -Rf vitess.io/vitess/go/vt/proto/* go/vt/proto rm -rf vitess.io/vitess/go/vt/proto/ @@ -269,7 +277,7 @@ $(PROTO_GO_OUTS): minimaltools install_protoc-gen-go proto/*.proto # This rule builds the bootstrap images for all flavors. DOCKER_IMAGES_FOR_TEST = mysql57 mysql80 percona57 percona80 DOCKER_IMAGES = common $(DOCKER_IMAGES_FOR_TEST) -BOOTSTRAP_VERSION=18.0 +BOOTSTRAP_VERSION=22.1 ensure_bootstrap_version: find docker/ -type f -exec sed -i "s/^\(ARG bootstrap_version\)=.*/\1=${BOOTSTRAP_VERSION}/" {} \; sed -i 's/\(^.*flag.String(\"bootstrap-version\",\) *\"[^\"]\+\"/\1 \"${BOOTSTRAP_VERSION}\"/' test.go @@ -332,6 +340,9 @@ DOCKER_LITE_TARGETS = $(addprefix docker_lite_,$(DOCKER_LITE_SUFFIX)) $(DOCKER_LITE_TARGETS): docker_lite_%: ${call build_docker_image,docker/lite/Dockerfile.$*,vitess/lite:$*} +docker_lite_push: + for i in $(DOCKER_LITE_SUFFIX); do echo "pushing lite image: $$i"; docker push vitess/lite:$$i || exit 1; done + docker_lite_all: docker_lite $(DOCKER_LITE_TARGETS) docker_local: @@ -384,69 +395,11 @@ tools: minimaltools: echo $$(date): Installing minimal dependencies - BUILD_CHROME=0 BUILD_JAVA=0 BUILD_CONSUL=0 ./bootstrap.sh + BUILD_JAVA=0 BUILD_CONSUL=0 ./bootstrap.sh dependency_check: ./tools/dependency_check.sh -install_k8s-code-generator: tools/tools.go go.mod - go install k8s.io/code-generator/cmd/deepcopy-gen - go install k8s.io/code-generator/cmd/client-gen - go install k8s.io/code-generator/cmd/lister-gen - go install k8s.io/code-generator/cmd/informer-gen - -DEEPCOPY_GEN=$(VTROOTBIN)/deepcopy-gen -CLIENT_GEN=$(VTROOTBIN)/client-gen -LISTER_GEN=$(VTROOTBIN)/lister-gen -INFORMER_GEN=$(VTROOTBIN)/informer-gen - -GEN_BASE_DIR ?= vitess.io/vitess/go/vt/topo/k8stopo - -client_go_gen: install_k8s-code-generator - echo $$(date): Regenerating client-go code - # Delete and re-generate the deepcopy types - find $(VTROOT)/go/vt/topo/k8stopo/apis/topo/v1beta1 -name "zz_generated.deepcopy.go" -delete - - # We output to ./ and then copy over the generated files to the appropriate path - # This is done so we don't have rely on the repository being cloned to `$GOPATH/src/vitess.io/vitess` - - $(DEEPCOPY_GEN) -o ./ \ - --input-dirs $(GEN_BASE_DIR)/apis/topo/v1beta1 \ - -O zz_generated.deepcopy \ - --bounding-dirs $(GEN_BASE_DIR)/apis \ - --go-header-file ./go/vt/topo/k8stopo/boilerplate.go.txt - - # Delete existing code - rm -rf go/vt/topo/k8stopo/client - - # Generate clientset - $(CLIENT_GEN) -o ./ \ - --clientset-name versioned \ - --input-base $(GEN_BASE_DIR)/apis \ - --input 'topo/v1beta1' \ - --output-package $(GEN_BASE_DIR)/client/clientset \ - --fake-clientset=true \ - --go-header-file ./go/vt/topo/k8stopo/boilerplate.go.txt - - # Generate listers - $(LISTER_GEN) -o ./ \ - --input-dirs $(GEN_BASE_DIR)/apis/topo/v1beta1 \ - --output-package $(GEN_BASE_DIR)/client/listers \ - --go-header-file ./go/vt/topo/k8stopo/boilerplate.go.txt - - # Generate informers - $(INFORMER_GEN) -o ./ \ - --input-dirs $(GEN_BASE_DIR)/apis/topo/v1beta1 \ - --output-package $(GEN_BASE_DIR)/client/informers \ - --versioned-clientset-package $(GEN_BASE_DIR)/client/clientset/versioned \ - --listers-package $(GEN_BASE_DIR)/client/listers \ - --go-header-file ./go/vt/topo/k8stopo/boilerplate.go.txt - - # Move and cleanup - mv vitess.io/vitess/go/vt/topo/k8stopo/client go/vt/topo/k8stopo/ - mv vitess.io/vitess/go/vt/topo/k8stopo/apis/topo/v1beta1/zz_generated.deepcopy.go go/vt/topo/k8stopo/apis/topo/v1beta1/zz_generated.deepcopy.go - rm -rf vitess.io/vitess/go/vt/topo/k8stopo/ - vtadmin_web_install: cd web/vtadmin && npm install diff --git a/bootstrap.sh b/bootstrap.sh index de4efcf5363..f95302ea771 100755 --- a/bootstrap.sh +++ b/bootstrap.sh @@ -25,7 +25,6 @@ source ./dev.env BUILD_JAVA=${BUILD_JAVA:-1} BUILD_CONSUL=${BUILD_CONSUL:-1} -BUILD_CHROME=${BUILD_CHROME:-1} VITESS_RESOURCES_DOWNLOAD_BASE_URL="https://github.com/vitessio/vitess-resources/releases/download" VITESS_RESOURCES_RELEASE="v4.0" @@ -171,35 +170,6 @@ install_etcd() { ln -snf "$dist/etcd-${version}-${platform}-${target}/etcdctl" "$VTROOT/bin/etcdctl" } - -# Download and install k3s, link k3s binary into our root -install_k3s() { - local version="$1" - local dist="$2" - case $(uname) in - Linux) local platform=linux;; - *) echo "WARNING: unsupported platform. K3s only supports running on Linux, the k8s topology will not be available for local examples."; return;; - esac - - case $(get_arch) in - aarch64) local target="-arm64";; - x86_64) local target="";; - arm64) local target="-arm64";; - *) echo "WARNING: unsupported architecture, the k8s topology will not be available for local examples."; return;; - esac - - file="k3s${target}" - - local dest="$dist/k3s${target}-${version}-${platform}" - # This is how we'd download directly from source: - # download_url=https://github.com/rancher/k3s/releases/download - # wget -O $dest "$download_url/$version/$file" - "${VTROOT}/tools/wget-retry" -O $dest "${VITESS_RESOURCES_DOWNLOAD_URL}/$file-$version" - chmod +x $dest - ln -snf $dest "$VTROOT/bin/k3s" -} - - # Download and install consul, link consul binary into our root. install_consul() { local version="$1" @@ -227,37 +197,6 @@ install_consul() { } -# Download chromedriver -install_chromedriver() { - local version="$1" - local dist="$2" - - case $(uname) in - Linux) local platform=linux;; - *) echo "Platform not supported for vtctl-web tests. Skipping chromedriver install."; return;; - esac - - if [ "$(arch)" == "aarch64" ] ; then - os=$(cat /etc/*release | grep "^ID=" | cut -d '=' -f 2) - case $os in - ubuntu|debian) - sudo apt-get update -y && sudo apt install -y --no-install-recommends unzip libglib2.0-0 libnss3 libx11-6 - ;; - centos|fedora) - sudo yum update -y && yum install -y libX11 unzip wget - ;; - esac - echo "For Arm64, using prebuilt binary from electron (https://github.com/electron/electron/) of version 76.0.3809.126" - "${VTROOT}/tools/wget-retry" https://github.com/electron/electron/releases/download/v6.0.3/chromedriver-v6.0.3-linux-arm64.zip - unzip -o -q chromedriver-v6.0.3-linux-arm64.zip -d "$dist" - rm chromedriver-v6.0.3-linux-arm64.zip - else - "${VTROOT}/tools/wget-retry" "https://chromedriver.storage.googleapis.com/$version/chromedriver_linux64.zip" - unzip -o -q chromedriver_linux64.zip -d "$dist" - rm chromedriver_linux64.zip - fi -} - # Download and install toxiproxy, link toxiproxy binary into our root. install_toxiproxy() { local version="$1" @@ -299,19 +238,11 @@ install_all() { # etcd install_dep "etcd" "v3.5.6" "$VTROOT/dist/etcd" install_etcd - # k3s - command -v k3s || install_dep "k3s" "v1.0.0" "$VTROOT/dist/k3s" install_k3s - # consul if [ "$BUILD_CONSUL" == 1 ] ; then install_dep "Consul" "1.11.4" "$VTROOT/dist/consul" install_consul fi - # chromedriver - if [ "$BUILD_CHROME" == 1 ] ; then - install_dep "chromedriver" "90.0.4430.24" "$VTROOT/dist/chromedriver" install_chromedriver - fi - # toxiproxy install_dep "toxiproxy" "v2.5.0" "$VTROOT/dist/toxiproxy" install_toxiproxy diff --git a/build.env b/build.env index 038f791c3c3..b9e44331e65 100755 --- a/build.env +++ b/build.env @@ -17,7 +17,7 @@ source ./tools/shell_functions.inc go version >/dev/null 2>&1 || fail "Go is not installed or is not in \$PATH. See https://vitess.io/contributing/build-from-source for install instructions." -goversion_min 1.20.5 || echo "Go version reported: `go version`. Version 1.20.5+ recommended. See https://vitess.io/contributing/build-from-source for install instructions." +goversion_min 1.21.3 || echo "Go version reported: `go version`. Version 1.21.3+ recommended. See https://vitess.io/contributing/build-from-source for install instructions." mkdir -p dist mkdir -p bin diff --git a/changelog/15.0/15.0.2/summary.md b/changelog/15.0/15.0.2/summary.md index 6f3346efa47..b12a97879a5 100644 --- a/changelog/15.0/15.0.2/summary.md +++ b/changelog/15.0/15.0.2/summary.md @@ -3,7 +3,7 @@ ### Upgrade to `go1.18.9` Vitess `v15.0.2` now runs on `go1.18.9`. -The patch release of Go, `go1.18.9`, was one of the main reasons for this release as it includes an important security fixe to `net/http` package, which is use extensively by Vitess. +The patch release of Go, `go1.18.9`, was one of the main reasons for this release as it includes an important security fix to `net/http` package, which is used extensively by Vitess. Below is a summary of this patch release. You can learn more [here](https://groups.google.com/g/golang-announce/c/L_3rmdT0BMU). > go1.18.9 (released 2022-12-06) includes security fixes to the net/http and os packages, as well as bug fixes to cgo, the compiler, the runtime, and the crypto/x509 and os/exec packages. diff --git a/changelog/15.0/15.0.4/changelog.md b/changelog/15.0/15.0.4/changelog.md new file mode 100644 index 00000000000..f70fd1090a7 --- /dev/null +++ b/changelog/15.0/15.0.4/changelog.md @@ -0,0 +1,61 @@ +# Changelog of Vitess v15.0.4 + +### Bug fixes +#### Build/CI + * [release-15.0] Small fixes to the auto-upgrade golang tool (#12838) [#12847](https://github.com/vitessio/vitess/pull/12847) + * [release-15.0] Add timeout to golangci-lint and bump its version (#12852) [#12853](https://github.com/vitessio/vitess/pull/12853) + * [release-15.0] Remove recent golangci-lint version bump [#12910](https://github.com/vitessio/vitess/pull/12910) +#### Cluster management + * [release-15.0] Prevent resetting replication every time we set replication source (#13377) [#13393](https://github.com/vitessio/vitess/pull/13393) + * [release-15.0] Don't run any reparent commands if the host is empty (#13396) [#13403](https://github.com/vitessio/vitess/pull/13403) + * [release-15.0] ignore all error for views in engine reload (#13590) [#13592](https://github.com/vitessio/vitess/pull/13592) +#### Examples + * [release-15.0] `examples/compose`: fix `consul:latest` error w/`docker-compose up -d` (#13468) [#13471](https://github.com/vitessio/vitess/pull/13471) +#### Online DDL + * v15 backport: vitess Online DDL atomic cut-over [#13376](https://github.com/vitessio/vitess/pull/13376) +#### Query Serving + * [release-15.0] planbuilder bugfix - do not push aggregations into derived tables [#12824](https://github.com/vitessio/vitess/pull/12824) + * [release-15.0] Fix `vtgate_schema_tracker` flaky tests (#12780) [#12850](https://github.com/vitessio/vitess/pull/12850) + * [release-15.0] fix: union distinct between unsharded route and sharded join (#12968) [#12982](https://github.com/vitessio/vitess/pull/12982) + * gen4 planner: allow last_insert_id with arguments (15.0) [#13035](https://github.com/vitessio/vitess/pull/13035) + * [release-15.0] Fix the resilientQuery to give correct results during initialization (#13080) [#13086](https://github.com/vitessio/vitess/pull/13086) + * [release-15.0] Remove indentation limit in the sqlparser (#13158) [#13167](https://github.com/vitessio/vitess/pull/13167) + * [release-15.0] Fix: TabletServer ReserveBeginExecute to return transaction ID on error (#13193) [#13196](https://github.com/vitessio/vitess/pull/13196) + * [15.0] Fix: errant GTID in health streamer (#13184) [#13226](https://github.com/vitessio/vitess/pull/13226) +#### Schema Tracker + * [release-15.0] Ignore error while reading table data in Schema.Engine reload (#13421) [#13425](https://github.com/vitessio/vitess/pull/13425) + * Backport v15: schema.Reload(): ignore column reading errors for views only, error for tables #13442 [#13457](https://github.com/vitessio/vitess/pull/13457) +### Enhancement +#### Build/CI + * Use go1.20.3 in the upgrade downgrade tests [#12839](https://github.com/vitessio/vitess/pull/12839) + * [release-15.0] Set the number of threads for release notes generation with a flag [#13315](https://github.com/vitessio/vitess/pull/13315) +#### General + * Use `go1.20.4` on `release-15.0` upgrade test [#13071](https://github.com/vitessio/vitess/pull/13071) +#### Query Serving + * [release-15.0] planner fix: scoping rules for JOIN ON expression inside a subquery [#12890](https://github.com/vitessio/vitess/pull/12890) +### Internal Cleanup +#### Operator + * Use vitess-operator `v2.8.4` in the examples [#12993](https://github.com/vitessio/vitess/pull/12993) +#### VTorc + * [release-15.0] Remove excessive logging in VTOrc APIs (#13459) [#13463](https://github.com/vitessio/vitess/pull/13463) +### Performance +#### TabletManager + * [release-15.0] BaseShowTablesWithSizes: optimize MySQL 8.0 query (#13375) [#13388](https://github.com/vitessio/vitess/pull/13388) +### Release +#### Build/CI + * [release-15.0] Optimize release notes generation to use GitHub Milestones (#13398) [#13620](https://github.com/vitessio/vitess/pull/13620) +#### Documentation + * Prepare release note `v15.0.4` [#13619](https://github.com/vitessio/vitess/pull/13619) +### Testing +#### Build/CI + * [release-15.0] fakedbclient: Add locking to avoid races (#12814) [#12821](https://github.com/vitessio/vitess/pull/12821) +#### Cluster management + * [release-15.0] Flaky tests: Fix wrangler tests (#13568) [#13570](https://github.com/vitessio/vitess/pull/13570) +#### General + * [release-15.0] Update Upgrade/Downgrade tests to use `go1.20.5` [#13271](https://github.com/vitessio/vitess/pull/13271) +#### Query Serving + * [release-15.0] Fix benchmarks in `plan_test.go` (#13096) [#13125](https://github.com/vitessio/vitess/pull/13125) + * [release-15.0] Fix `TestGatewayBufferingWhileReparenting` flakiness (#13469) [#13502](https://github.com/vitessio/vitess/pull/13502) +#### VTorc + * [release-15.0]: Fix flakiness in VTOrc tests (#13489) [#13529](https://github.com/vitessio/vitess/pull/13529) + diff --git a/changelog/15.0/15.0.4/release_notes.md b/changelog/15.0/15.0.4/release_notes.md new file mode 100644 index 00000000000..38fa25f9c78 --- /dev/null +++ b/changelog/15.0/15.0.4/release_notes.md @@ -0,0 +1,7 @@ +# Release of Vitess v15.0.4 +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/15.0/15.0.4/changelog.md). + +The release includes 33 merged Pull Requests. + +Thanks to all our contributors: @GuptaManan100, @app/vitess-bot, @frouioui, @harshit-gangal, @shlomi-noach, @systay + diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/gbk-charset/extra_args b/changelog/15.0/15.0.4/summary.md similarity index 100% rename from go/test/endtoend/onlineddl/vrepl_suite/testdata/gbk-charset/extra_args rename to changelog/15.0/15.0.4/summary.md diff --git a/changelog/15.0/README.md b/changelog/15.0/README.md index 332f1a7eb24..17807db2ebc 100644 --- a/changelog/15.0/README.md +++ b/changelog/15.0/README.md @@ -1,5 +1,9 @@ ## v15.0 The dedicated team for this release can be found [here](team.md). +* **[15.0.4](15.0.4)** + * [Changelog](15.0.4/changelog.md) + * [Release Notes](15.0.4/release_notes.md) + * **[15.0.3](15.0.3)** * [Changelog](15.0.3/changelog.md) * [Release Notes](15.0.3/release_notes.md) diff --git a/changelog/16.0/16.0.3/changelog.md b/changelog/16.0/16.0.3/changelog.md new file mode 100644 index 00000000000..3f43d9b6049 --- /dev/null +++ b/changelog/16.0/16.0.3/changelog.md @@ -0,0 +1,67 @@ +# Changelog of Vitess v16.0.3 + +### Bug fixes +#### Cluster management + * [release-16.0] Prevent resetting replication every time we set replication source (#13377) [#13392](https://github.com/vitessio/vitess/pull/13392) + * [release-16.0] Don't run any reparent commands if the host is empty (#13396) [#13402](https://github.com/vitessio/vitess/pull/13402) + * [release-16.0] Upgrade-Downgrade Fix: Schema-initialization stuck on semi-sync ACKs while upgrading (#13411) [#13441](https://github.com/vitessio/vitess/pull/13441) + * [release-16.0] Flaky tests: Fix race in memory topo (#13559) [#13576](https://github.com/vitessio/vitess/pull/13576) + * [release-16.0] ignore all error for views in engine reload (#13590) [#13593](https://github.com/vitessio/vitess/pull/13593) + * [release-16.0] check keyspace snapshot time if none specified for backup restores (#13557) [#13634](https://github.com/vitessio/vitess/pull/13634) +#### Examples + * [release-16.0] `examples/compose`: fix `consul:latest` error w/`docker-compose up -d` (#13468) [#13472](https://github.com/vitessio/vitess/pull/13472) +#### Operator + * [release-16.0] Upgrade mysqld memory limits to 1024Mi (#13122) [#13204](https://github.com/vitessio/vitess/pull/13204) +#### Query Serving + * [release-16.0] Fix the resilientQuery to give correct results during initialization (#13080) [#13087](https://github.com/vitessio/vitess/pull/13087) + * [16.0] evalengine: TypeOf for Columns should only use value type when we have a value [#13154](https://github.com/vitessio/vitess/pull/13154) + * [release-16.0] Remove indentation limit in the sqlparser (#13158) [#13166](https://github.com/vitessio/vitess/pull/13166) + * Fix: errant GTID in health streamer [#13184](https://github.com/vitessio/vitess/pull/13184) + * [16.0] Fix: TabletServer ReserveBeginExecute to return transaction ID on error [#13193](https://github.com/vitessio/vitess/pull/13193) + * [release-16.0] Bug fix: SQL queries erroring with message `unknown aggregation random` (#13330) [#13334](https://github.com/vitessio/vitess/pull/13334) + * [release-16.0] ignore ongoing backfill vindex from routing selection (#13523) [#13607](https://github.com/vitessio/vitess/pull/13607) +#### Schema Tracker + * [release-16.0] Ignore error while reading table data in Schema.Engine reload (#13421) [#13424](https://github.com/vitessio/vitess/pull/13424) + * Backport v16: schema.Reload(): ignore column reading errors for views only, error for tables #13442 [#13456](https://github.com/vitessio/vitess/pull/13456) +#### TabletManager + * [release-16.0] mysqlctl: Correctly encode database and table names (#13312) [#13323](https://github.com/vitessio/vitess/pull/13323) +#### VReplication + * [release-16.0] VReplication: Do not delete sharded target vschema table entries on Cancel (#13146) [#13155](https://github.com/vitessio/vitess/pull/13155) + * [release-16.0] VReplication: Pass on --keep_routing_rules flag value for Cancel action (#13171) [#13194](https://github.com/vitessio/vitess/pull/13194) + * [release-16.0] VReplication: Fix VDiff2 DeleteByUUID Query (#13255) [#13282](https://github.com/vitessio/vitess/pull/13282) + * [release-16.0] VReplication: Ensure ROW events are sent within a transaction (#13547) [#13580](https://github.com/vitessio/vitess/pull/13580) +### CI/Build +#### General + * [release-16.0] Upgrade the Golang version to `go1.20.4` [#13053](https://github.com/vitessio/vitess/pull/13053) +### Documentation +#### Documentation + * [release-16.0] update link for reparenting guide (#13350) [#13356](https://github.com/vitessio/vitess/pull/13356) +### Enhancement +#### Build/CI + * [release-16.0] Set the number of threads for release notes generation with a flag [#13316](https://github.com/vitessio/vitess/pull/13316) +### Performance +#### TabletManager + * [release-16.0] BaseShowTablesWithSizes: optimize MySQL 8.0 query (#13375) [#13389](https://github.com/vitessio/vitess/pull/13389) +### Release +#### Build/CI + * [release-16.0] Optimize release notes generation to use GitHub Milestones (#13398) [#13621](https://github.com/vitessio/vitess/pull/13621) +#### Documentation + * [release-16.0] Fix format error in the `v16.0.2` release notes (#13057) [#13058](https://github.com/vitessio/vitess/pull/13058) +### Testing +#### Backup and Restore + * [release-16.0]: Fix `upgrade-downgrade` test setup and fix the `init_db.sql` [#13525](https://github.com/vitessio/vitess/pull/13525) +#### Cluster management + * [release-16.0] Deflake `TestPlannedReparentShardPromoteReplicaFail` (#13548) [#13549](https://github.com/vitessio/vitess/pull/13549) + * [release-16.0] Flaky tests: Fix wrangler tests (#13568) [#13571](https://github.com/vitessio/vitess/pull/13571) +#### General + * TestFix: `Upgrade Downgrade Testing - Backups - Manual` [#13408](https://github.com/vitessio/vitess/pull/13408) +#### Query Serving + * [release-16.0] Fix benchmarks in `plan_test.go` (#13096) [#13126](https://github.com/vitessio/vitess/pull/13126) + * [release-16.0] Deflake `TestQueryTimeoutWithDual` test (#13405) [#13409](https://github.com/vitessio/vitess/pull/13409) + * [release-16.0] Fix `TestGatewayBufferingWhileReparenting` flakiness (#13469) [#13500](https://github.com/vitessio/vitess/pull/13500) + * [release-16.0] fix TestQueryTimeoutWithTables flaky test (#13579) [#13585](https://github.com/vitessio/vitess/pull/13585) +#### VTorc + * [release-16.0]: Fix flakiness in VTOrc tests (#13489) [#13528](https://github.com/vitessio/vitess/pull/13528) +#### vtctl + * Fix new vtctl upgrade downgrade test on `release-16.0` [#13252](https://github.com/vitessio/vitess/pull/13252) + diff --git a/changelog/16.0/16.0.3/release_notes.md b/changelog/16.0/16.0.3/release_notes.md new file mode 100644 index 00000000000..d377bdc24f9 --- /dev/null +++ b/changelog/16.0/16.0.3/release_notes.md @@ -0,0 +1,7 @@ +# Release of Vitess v16.0.3 +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/16.0/16.0.3/changelog.md). + +The release includes 38 merged Pull Requests. + +Thanks to all our contributors: @GuptaManan100, @app/github-actions, @app/vitess-bot, @frouioui, @harshit-gangal, @shlomi-noach, @systay + diff --git a/changelog/16.0/16.0.3/summary.md b/changelog/16.0/16.0.3/summary.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/changelog/16.0/16.0.4/changelog.md b/changelog/16.0/16.0.4/changelog.md new file mode 100644 index 00000000000..45c4944aa25 --- /dev/null +++ b/changelog/16.0/16.0.4/changelog.md @@ -0,0 +1,24 @@ +# Changelog of Vitess v16.0.4 + +### Bug fixes +#### Backup and Restore + * Manual cherry-pick of 13339 [#13733](https://github.com/vitessio/vitess/pull/13733) + * [release-16.0] Address vttablet memory usage with backups to Azure Blob Service (#13770) [#13774](https://github.com/vitessio/vitess/pull/13774) +#### Online DDL + * v16 backport: Fix closed channel panic in Online DDL cutover [#13732](https://github.com/vitessio/vitess/pull/13732) + * v16 backport: Solve RevertMigration.Comment read/write concurrency issue [#13736](https://github.com/vitessio/vitess/pull/13736) +#### Query Serving + * planbuilder: Fix infinite recursion for subqueries [#13783](https://github.com/vitessio/vitess/pull/13783) + * [release-16.0] vtgate: fix race condition iterating tables and views from schema tracker (#13673) [#13795](https://github.com/vitessio/vitess/pull/13795) + * [16.0] bugfixes: collection of fixes to bugs found while fuzzing [#13805](https://github.com/vitessio/vitess/pull/13805) +### CI/Build +#### Online DDL + * [release-16.0] CI: fix onlineddl_scheduler flakiness (#13754) [#13759](https://github.com/vitessio/vitess/pull/13759) +### Release +#### General + * Back to dev mode after v16.0.3 [#13660](https://github.com/vitessio/vitess/pull/13660) + * Release 16.0 code freeze for `v16.0.3` release [#13810](https://github.com/vitessio/vitess/pull/13810) +### Testing +#### Build/CI + * [release-16.0] Flakes: Delete VTDATAROOT files in reparent test teardown within CI (#13793) [#13797](https://github.com/vitessio/vitess/pull/13797) + diff --git a/changelog/16.0/16.0.4/release_notes.md b/changelog/16.0/16.0.4/release_notes.md new file mode 100644 index 00000000000..d46559f5fec --- /dev/null +++ b/changelog/16.0/16.0.4/release_notes.md @@ -0,0 +1,7 @@ +# Release of Vitess v16.0.4 +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/16.0/16.0.4/changelog.md). + +The release includes 11 merged Pull Requests. + +Thanks to all our contributors: @GuptaManan100, @app/vitess-bot, @dbussink, @rohit-nayak-ps, @shlomi-noach, @systay + diff --git a/changelog/16.0/16.0.4/summary.md b/changelog/16.0/16.0.4/summary.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/changelog/16.0/README.md b/changelog/16.0/README.md index 35e87974acb..75b3f3a0a1f 100644 --- a/changelog/16.0/README.md +++ b/changelog/16.0/README.md @@ -1,5 +1,13 @@ ## v16.0 The dedicated team for this release can be found [here](team.md). +* **[16.0.4](16.0.4)** + * [Changelog](16.0.4/changelog.md) + * [Release Notes](16.0.4/release_notes.md) + +* **[16.0.3](16.0.3)** + * [Changelog](16.0.3/changelog.md) + * [Release Notes](16.0.3/release_notes.md) + * **[16.0.2](16.0.2)** * [Changelog](16.0.2/changelog.md) * [Release Notes](16.0.2/release_notes.md) diff --git a/changelog/18.0/18.0.0/changelog.md b/changelog/18.0/18.0.0/changelog.md new file mode 100644 index 00000000000..acf45ad7b37 --- /dev/null +++ b/changelog/18.0/18.0.0/changelog.md @@ -0,0 +1,529 @@ +# Changelog of Vitess v18.0.0 + +### Bug fixes +#### Backup and Restore + * vtctldclient: Add missing new backup option [#13543](https://github.com/vitessio/vitess/pull/13543) + * Backup: safe compressor/decompressor closure [#13668](https://github.com/vitessio/vitess/pull/13668) + * Address vttablet memory usage with backups to Azure Blob Service [#13770](https://github.com/vitessio/vitess/pull/13770) + * Do not drain tablet in incremental backup [#13773](https://github.com/vitessio/vitess/pull/13773) + * go/cmd/vtbackup: wait for plugins to finish initializing [#14113](https://github.com/vitessio/vitess/pull/14113) +#### Build/CI + * Remove `os.Exit` in release-notes generation [#13310](https://github.com/vitessio/vitess/pull/13310) + * CI: Fix make build related issues [#13583](https://github.com/vitessio/vitess/pull/13583) + * Enable failures in `tools/e2e_test_race.sh` and fix races [#13654](https://github.com/vitessio/vitess/pull/13654) + * Fix regular expression issue in Golang Upgrade and remove `release-14.0` from target branch [#13846](https://github.com/vitessio/vitess/pull/13846) + * Make `Static Code Checks Etc` fail if the `./changelog` folder is out-of-date [#14003](https://github.com/vitessio/vitess/pull/14003) +#### CLI + * viperutil: Remove potential cross site reflecting issue [#13483](https://github.com/vitessio/vitess/pull/13483) + * [vtctldclient] flags need to be defined to be deprecated [#13681](https://github.com/vitessio/vitess/pull/13681) + * Fix missing deprecated flags in `vttablet` and `vtgate` [#13975](https://github.com/vitessio/vitess/pull/13975) + * [release-18.0] Fix anonymous paths in cobra code-gen (#14185) [#14238](https://github.com/vitessio/vitess/pull/14238) + * servenv: Remove double close() logic [#14457](https://github.com/vitessio/vitess/pull/14457) + * [release-18.0] servenv: Remove double close() logic (#14457) [#14459](https://github.com/vitessio/vitess/pull/14459) +#### Cluster management + * Prevent resetting replication every time we set replication source [#13377](https://github.com/vitessio/vitess/pull/13377) + * Don't run any reparent commands if the host is empty [#13396](https://github.com/vitessio/vitess/pull/13396) + * [main] Upgrade-Downgrade Fix: Schema-initialization stuck on semi-sync ACKs while upgrading (#13411) [#13440](https://github.com/vitessio/vitess/pull/13440) + * fix: error.as method usage to send pointer to the reference type expected [#13496](https://github.com/vitessio/vitess/pull/13496) + * check keyspace snapshot time if none specified for backup restores [#13557](https://github.com/vitessio/vitess/pull/13557) + * Flaky tests: Fix race in memory topo [#13559](https://github.com/vitessio/vitess/pull/13559) + * ignore all error for views in engine reload [#13590](https://github.com/vitessio/vitess/pull/13590) + * Fix `BackupShard` to get its options from its own flags [#13813](https://github.com/vitessio/vitess/pull/13813) +#### Docker + * Fix ubi8.arm64.mysql80 build package mirrorserver error [#13431](https://github.com/vitessio/vitess/pull/13431) + * Fix dependencies in docker build script [#13520](https://github.com/vitessio/vitess/pull/13520) + * Use NodeJS v18 in VTAdmin Dockerfile [#13751](https://github.com/vitessio/vitess/pull/13751) + * [release-18.0] [Docker] Fix VTadmin build (#14363) [#14378](https://github.com/vitessio/vitess/pull/14378) +#### Evalengine + * Fix a number of encoding issues when evaluating expressions with the evalengine [#13509](https://github.com/vitessio/vitess/pull/13509) + * Fix type comparisons for Nullsafe* functions [#13605](https://github.com/vitessio/vitess/pull/13605) + * fastparse: Fix bug in overflow detection [#13702](https://github.com/vitessio/vitess/pull/13702) + * evalengine: Mark UUID() function as non-constant [#14051](https://github.com/vitessio/vitess/pull/14051) + * [release-18.0] evalengine: Misc bugs (#14351) [#14354](https://github.com/vitessio/vitess/pull/14354) +#### Examples + * Use $hostname in vtadmin script as all other scripts do [#13231](https://github.com/vitessio/vitess/pull/13231) + * Local example 101: idempotent on existing clusters [#13373](https://github.com/vitessio/vitess/pull/13373) + * Examples: only terminate vtadmin if it was started [#13433](https://github.com/vitessio/vitess/pull/13433) + * `examples/compose`: fix `consul:latest` error w/`docker-compose up -d` [#13468](https://github.com/vitessio/vitess/pull/13468) +#### General + * Flakes: Synchronize access to logErrStacks in vterrors [#13827](https://github.com/vitessio/vitess/pull/13827) + * [release-18.0] viper: register dynamic config with both disk and live (#14453) [#14455](https://github.com/vitessio/vitess/pull/14455) +#### Online DDL + * Solve RevertMigration.Comment read/write concurrency issue [#13700](https://github.com/vitessio/vitess/pull/13700) + * Fix closed channel `panic` in Online DDL cutover [#13729](https://github.com/vitessio/vitess/pull/13729) + * OnlineDDL: fix nil 'completed_timestamp' for cancelled migrations [#13928](https://github.com/vitessio/vitess/pull/13928) + * Fix `ApplySchema --batch-size` with ` --allow-zero-in-date` [#13951](https://github.com/vitessio/vitess/pull/13951) + * TableGC: support DROP VIEW [#14020](https://github.com/vitessio/vitess/pull/14020) + * OnlineDDL: cleanup cancelled migration artifacts; support `--retain-artifacts=` DDL strategy flag [#14029](https://github.com/vitessio/vitess/pull/14029) + * bugfix: change column name and type to json [#14093](https://github.com/vitessio/vitess/pull/14093) + * [Release 18.0]: Online DDL: timeouts for all gRPC calls (#14182) [#14189](https://github.com/vitessio/vitess/pull/14189) +#### Query Serving + * fix: GetField to use existing session for query [#13219](https://github.com/vitessio/vitess/pull/13219) + * VReplication Workflows: make sequence tables follow routing rules [#13238](https://github.com/vitessio/vitess/pull/13238) + * Adding random query generation for endtoend testing of the Gen4 planner [#13260](https://github.com/vitessio/vitess/pull/13260) + * Bug fix: SQL queries erroring with message `unknown aggregation random` [#13330](https://github.com/vitessio/vitess/pull/13330) + * bugfixes: collection of fixes to bugs found while fuzzing [#13332](https://github.com/vitessio/vitess/pull/13332) + * bug: don't always wrap aggregation in coalesce [#13348](https://github.com/vitessio/vitess/pull/13348) + * Random selection of keyspace based on available tablet [#13359](https://github.com/vitessio/vitess/pull/13359) + * Enable Tcp keep alive and provide keep alive period setting [#13434](https://github.com/vitessio/vitess/pull/13434) + * Improving random query generation for endtoend testing [#13460](https://github.com/vitessio/vitess/pull/13460) + * ignore ongoing backfill vindex from routing selection [#13505](https://github.com/vitessio/vitess/pull/13505) + * [release-17.0] ignore ongoing backfill vindex from routing selection [#13523](https://github.com/vitessio/vitess/pull/13523) + * Fix flaky vtgate test TestInconsistentStateDetectedBuffering [#13560](https://github.com/vitessio/vitess/pull/13560) + * Fix show character set [#13565](https://github.com/vitessio/vitess/pull/13565) + * vtgate: fix race condition iterating tables and views from schema tracker [#13673](https://github.com/vitessio/vitess/pull/13673) + * sqlparser: Track if original default value is a literal [#13730](https://github.com/vitessio/vitess/pull/13730) + * Fix for "text type with an unknown/unsupported collation cannot be hashed" error [#13852](https://github.com/vitessio/vitess/pull/13852) + * VTGate Buffering: Use a more accurate heuristic for determining if we're doing a reshard [#13856](https://github.com/vitessio/vitess/pull/13856) + * sqlparser: Tablespace option is case sensitive [#13884](https://github.com/vitessio/vitess/pull/13884) + * Rewrite `USING` to `ON` condition for joins [#13931](https://github.com/vitessio/vitess/pull/13931) + * handle large number of predicates without timing out [#13979](https://github.com/vitessio/vitess/pull/13979) + * Fix `NOT IN` expression used in the SET NULL for a child table on an update [#13988](https://github.com/vitessio/vitess/pull/13988) + * Fix the `SELECT` query we run on the child table to verify that update is allowed on a RESTRICT constraint [#13991](https://github.com/vitessio/vitess/pull/13991) + * fix data race in join engine primitive olap streaming mode execution [#14012](https://github.com/vitessio/vitess/pull/14012) + * fix: cost to include subshard opcode [#14023](https://github.com/vitessio/vitess/pull/14023) + * Add session flag for stream execute grpc api [#14046](https://github.com/vitessio/vitess/pull/14046) + * Fix cascading Delete failure while using Prepared statements [#14048](https://github.com/vitessio/vitess/pull/14048) + * Fix Fk verification and update queries to accommodate for bindVariables being NULL [#14061](https://github.com/vitessio/vitess/pull/14061) + * DDL execution to commit open transaction [#14110](https://github.com/vitessio/vitess/pull/14110) + * fix: analyze statement parsing and planning [#14268](https://github.com/vitessio/vitess/pull/14268) + * [release-18.0] fix: analyze statement parsing and planning (#14268) [#14275](https://github.com/vitessio/vitess/pull/14275) + * [release-18.0] schemadiff: fix missing `DROP CONSTRAINT` in duplicate/redundant constraints scenario. (#14387) [#14391](https://github.com/vitessio/vitess/pull/14391) + * [release-18.0] vtgate/engine: Fix race condition in join logic (#14435) [#14441](https://github.com/vitessio/vitess/pull/14441) +#### Schema Tracker + * Vttablet schema tracking: Fix _vt.schema_version corruption [#13045](https://github.com/vitessio/vitess/pull/13045) + * Ignore error while reading table data in Schema.Engine reload [#13421](https://github.com/vitessio/vitess/pull/13421) + * schema.Reload(): ignore column reading errors for views only, error for tables [#13442](https://github.com/vitessio/vitess/pull/13442) +#### TabletManager + * mysqlctl: Correctly encode database and table names [#13312](https://github.com/vitessio/vitess/pull/13312) + * Fix remote VersionString API [#13484](https://github.com/vitessio/vitess/pull/13484) + * mysqlctl: Remove noisy log line [#13599](https://github.com/vitessio/vitess/pull/13599) + * GetSchema: limit concurrent operations [#13617](https://github.com/vitessio/vitess/pull/13617) + * mysqlctl: Reduce logging for running commands [#13659](https://github.com/vitessio/vitess/pull/13659) +#### Throttler + * Tablet throttler: only start watching SrvKeyspace once it's confirmed to exist [#13384](https://github.com/vitessio/vitess/pull/13384) + * Throttler: reintroduce deprecated flags so that deprecation actually works [#13597](https://github.com/vitessio/vitess/pull/13597) + * Silence 'CheckThrottler' gRPC calls [#13925](https://github.com/vitessio/vitess/pull/13925) + * Tablet throttler: empty list of probes on non-leader [#13926](https://github.com/vitessio/vitess/pull/13926) + * [release-18.0] Throttler: set timeouts on gRPC communication and on topo communication (#14165) [#14167](https://github.com/vitessio/vitess/pull/14167) + * Tablet throttler: fix race condition by removing goroutine call [#14179](https://github.com/vitessio/vitess/pull/14179) + * [release-18.0] Tablet throttler: fix race condition by removing goroutine call (#14179) [#14198](https://github.com/vitessio/vitess/pull/14198) +#### VReplication + * VReplication: Fix VDiff2 DeleteByUUID Query [#13255](https://github.com/vitessio/vitess/pull/13255) + * Better handling of vreplication setState() failure [#13488](https://github.com/vitessio/vitess/pull/13488) + * VReplication: Ignore unrelated shards in partial MoveTables traffic state [#13515](https://github.com/vitessio/vitess/pull/13515) + * VReplication: Ensure ROW events are sent within a transaction [#13547](https://github.com/vitessio/vitess/pull/13547) + * VReplication: Make Source Tablet Selection More Robust [#13582](https://github.com/vitessio/vitess/pull/13582) + * vtgate tablet gateway buffering: don't shutdown if not initialized [#13695](https://github.com/vitessio/vitess/pull/13695) + * VReplication: Improve MoveTables Create Error Handling [#13737](https://github.com/vitessio/vitess/pull/13737) + * Minor --initialize-target-sequences followups [#13758](https://github.com/vitessio/vitess/pull/13758) + * Flakes: skip flaky check that ETA for a VReplication VDiff2 Progress command is in the future. [#13804](https://github.com/vitessio/vitess/pull/13804) + * Flakes: VReplication unit tests: reduce goroutine leakage [#13824](https://github.com/vitessio/vitess/pull/13824) + * Properly support ignore_nulls in CreateLookupVindex [#13913](https://github.com/vitessio/vitess/pull/13913) + * VReplication: Handle SQL NULL and JSON 'null' correctly for JSON columns [#13944](https://github.com/vitessio/vitess/pull/13944) + * copy over existing vreplication rows copied to local counter if resuming from another tablet [#13949](https://github.com/vitessio/vitess/pull/13949) + * VDiff: correct handling of default source and target cells [#13969](https://github.com/vitessio/vitess/pull/13969) + * MoveTables Cancel: drop denied tables on target when dropping source/target tables [#14008](https://github.com/vitessio/vitess/pull/14008) + * VReplication VPlayer: set foreign_key_checks on initialization [#14013](https://github.com/vitessio/vitess/pull/14013) + * json: Fix quoting JSON keys [#14066](https://github.com/vitessio/vitess/pull/14066) + * VDiff: properly split cell values in record when using TabletPicker [#14099](https://github.com/vitessio/vitess/pull/14099) + * VDiff: Cleanup the controller for a VDiff before deleting it [#14107](https://github.com/vitessio/vitess/pull/14107) + * [release-18.0] VReplication: error on vtctldclient commands w/o tablet types (#14294) [#14298](https://github.com/vitessio/vitess/pull/14298) + * [release-18.0] Vtctld SwitchReads: fix bug where writes were also being switched as part of switching reads when all traffic was switched using SwitchTraffic (#14360) [#14379](https://github.com/vitessio/vitess/pull/14379) + * [release-18.0] VDiff: wait for shard streams of one table diff to complete for before starting that of the next table (#14345) [#14382](https://github.com/vitessio/vitess/pull/14382) + * [release-18.0] VDiff tablet selection: pick non-serving tablets in Reshard workflows (#14413) [#14418](https://github.com/vitessio/vitess/pull/14418) + * VReplication: Handle multiple streams in UpdateVReplicationWorkflow RPC [#14447](https://github.com/vitessio/vitess/pull/14447) + * [release-18.0] VDiff: "show all" should only report vdiffs for the specified keyspace and workflow (#14442) [#14466](https://github.com/vitessio/vitess/pull/14466) + * [release-18.0] VReplication: Handle multiple streams in UpdateVReplicationWorkflow RPC (#14447) [#14468](https://github.com/vitessio/vitess/pull/14468) +#### VTAdmin + * Unset the PREFIX environment variable when building VTAdmin [#13554](https://github.com/vitessio/vitess/pull/13554) +#### VTCombo + * Fix vtcombo DBDDL plugin race condition [#13117](https://github.com/vitessio/vitess/pull/13117) +#### VTorc + * Ensure to call `servenv.Init` when needed [#13638](https://github.com/vitessio/vitess/pull/13638) +#### vtctl + * [release-18.0] VReplication: Add missing info to vtctldclient workflow SHOW output (#14225) [#14240](https://github.com/vitessio/vitess/pull/14240) +### CI/Build +#### Backup and Restore + * Refactor `backup_pitr` into two distinct CI tests: builtin vs Xtrabackup [#13395](https://github.com/vitessio/vitess/pull/13395) + * Fixing `backup_pitr` flaky tests via wait-for loop on topo reads [#13781](https://github.com/vitessio/vitess/pull/13781) + * [release-18.0] Incremental backup: fix race condition in reading 'mysqlbinlog' output (#14330) [#14335](https://github.com/vitessio/vitess/pull/14335) +#### Build/CI + * Update a number of dependencies [#13031](https://github.com/vitessio/vitess/pull/13031) + * Cleanup unused Dockerfile entries [#13327](https://github.com/vitessio/vitess/pull/13327) + * flags: Remove hardcoded runner paths [#13482](https://github.com/vitessio/vitess/pull/13482) + * added no-commit-collection option to launchable record build command [#13490](https://github.com/vitessio/vitess/pull/13490) + * Replace deprecated `github.com/golang/mock` with `go.uber.org/mock` [#13512](https://github.com/vitessio/vitess/pull/13512) + * [viper WatchConfig] platform-specific write to ensure callback fires exactly once [#13627](https://github.com/vitessio/vitess/pull/13627) + * build: Allow passing in custom -ldflags [#13748](https://github.com/vitessio/vitess/pull/13748) + * Run auto golang upgrade only on vitessio/vitess [#13766](https://github.com/vitessio/vitess/pull/13766) + * collations: implement collation dumping as a docker image [#13879](https://github.com/vitessio/vitess/pull/13879) +#### Docker + * docker/k8s: add bookworm builds [#13436](https://github.com/vitessio/vitess/pull/13436) + * Bump docker images to `bullseye` [#13664](https://github.com/vitessio/vitess/pull/13664) +#### Documentation + * fix docgen for subcommands [#13518](https://github.com/vitessio/vitess/pull/13518) + * update docgen to embed commit ID in autogenerated doc frontmatter [#14056](https://github.com/vitessio/vitess/pull/14056) +#### General + * go/mysql: switch to new API for x/exp/slices.SortFunc [#13644](https://github.com/vitessio/vitess/pull/13644) + * [main] Upgrade the Golang version to `go1.21.1` [#13933](https://github.com/vitessio/vitess/pull/13933) + * [release-18.0] Upgrade the Golang version to `go1.21.2` [#14195](https://github.com/vitessio/vitess/pull/14195) + * [release-18.0] Upgrade the Golang version to `go1.21.3` [#14230](https://github.com/vitessio/vitess/pull/14230) +#### Online DDL + * CI: fix onlineddl_scheduler flakiness [#13754](https://github.com/vitessio/vitess/pull/13754) + * [release-18.0] OnlineDDL: reduce vrepl_stress workload in forks (#14302) [#14349](https://github.com/vitessio/vitess/pull/14349) +#### Query Serving + * Endtoend: stress tests for VTGate FOREIGN KEY support [#13799](https://github.com/vitessio/vitess/pull/13799) + * ci: pool-related test flakyness [#14076](https://github.com/vitessio/vitess/pull/14076) +#### Throttler + * Deprecating and removing tablet throttler CLI flags and tests [#13246](https://github.com/vitessio/vitess/pull/13246) + * Throttler: verify deprecated flags are still allowed [#13615](https://github.com/vitessio/vitess/pull/13615) +#### VReplication + * Flakes: Remove CI endtoend test for VReplication Copy Phase Throttling [#13343](https://github.com/vitessio/vitess/pull/13343) + * Flakes: Improve reliability of vreplication_copy_parallel test [#13857](https://github.com/vitessio/vitess/pull/13857) +#### VTAdmin + * Improve time taken to run the examples by optimizing `vtadmin` build [#13262](https://github.com/vitessio/vitess/pull/13262) +#### VTorc + * [release-18.0] docker: add dedicated vtorc container (#14126) [#14148](https://github.com/vitessio/vitess/pull/14148) +### Dependabot +#### General + * Bump word-wrap from 1.2.3 to 1.2.4 in /web/vtadmin [#13569](https://github.com/vitessio/vitess/pull/13569) + * Bump tough-cookie from 4.1.2 to 4.1.3 in /web/vtadmin [#13767](https://github.com/vitessio/vitess/pull/13767) + * [release-18.0] Bump github.com/cyphar/filepath-securejoin from 0.2.3 to 0.2.4 (#14239) [#14253](https://github.com/vitessio/vitess/pull/14253) + * [release-18.0] Bump golang.org/x/net from 0.14.0 to 0.17.0 (#14260) [#14264](https://github.com/vitessio/vitess/pull/14264) +#### Java + * java: update to latest dependencies for grpc and protobuf [#13996](https://github.com/vitessio/vitess/pull/13996) +#### Observability + * Bump tough-cookie and @cypress/request in /vitess-mixin/e2e [#13768](https://github.com/vitessio/vitess/pull/13768) +#### VTAdmin + * build(deps-dev): bump vite from 4.2.1 to 4.2.3 in /web/vtadmin [#13240](https://github.com/vitessio/vitess/pull/13240) + * Bump protobufjs from 7.2.3 to 7.2.5 in /web/vtadmin [#13833](https://github.com/vitessio/vitess/pull/13833) + * [release-18.0] Bump postcss from 8.4.21 to 8.4.31 in /web/vtadmin (#14173) [#14258](https://github.com/vitessio/vitess/pull/14258) + * [release-18.0] Bump @babel/traverse from 7.21.4 to 7.23.2 in /web/vtadmin (#14304) [#14308](https://github.com/vitessio/vitess/pull/14308) +### Documentation +#### CLI + * gentler warning message on config-not-found [#13215](https://github.com/vitessio/vitess/pull/13215) + * switch casing in onlineddl subcommand help text [#14091](https://github.com/vitessio/vitess/pull/14091) + * [release-18.0] Bypass cobra completion commands so they still function (#14217) [#14234](https://github.com/vitessio/vitess/pull/14234) +#### Documentation + * Add security audit report [#13221](https://github.com/vitessio/vitess/pull/13221) + * update link for reparenting guide [#13350](https://github.com/vitessio/vitess/pull/13350) + * anonymize homedirs in generated docs [#14101](https://github.com/vitessio/vitess/pull/14101) + * Summary changes for foreign keys [#14112](https://github.com/vitessio/vitess/pull/14112) + * fix bad copy-paste in zkctld docgen [#14123](https://github.com/vitessio/vitess/pull/14123) + * [release-18.0] release notes: edit summary for consistency (#14319) [#14320](https://github.com/vitessio/vitess/pull/14320) +#### General + * Improve release process documentation [#14000](https://github.com/vitessio/vitess/pull/14000) +#### Governance + * governance doc clean up [#13337](https://github.com/vitessio/vitess/pull/13337) +### Enhancement +#### Backup and Restore + * go/vt/mysqlctl: instrument s3 upload time [#12500](https://github.com/vitessio/vitess/pull/12500) + * metrics: change vtbackup_duration_by_phase to binary-valued vtbackup_phase [#12973](https://github.com/vitessio/vitess/pull/12973) + * Incremental backup & recovery: restore-to-timestamp [#13270](https://github.com/vitessio/vitess/pull/13270) + * backup: Allow for upgrade safe backups [#13449](https://github.com/vitessio/vitess/pull/13449) + * Incremental backup: accept GTID position without 'MySQL56/' flavor prefix [#13474](https://github.com/vitessio/vitess/pull/13474) + * Backup & Restore: vtctldclient to support PITR flags [#13513](https://github.com/vitessio/vitess/pull/13513) + * BackupShard: support incremental backup [#13522](https://github.com/vitessio/vitess/pull/13522) + * Point in time recovery: fix cross-tablet GTID evaluation [#13555](https://github.com/vitessio/vitess/pull/13555) + * Backup/restore: provision and restore a tablet with point-in-time recovery flags [#13964](https://github.com/vitessio/vitess/pull/13964) + * go/cmd/vtbackup: report replication status metrics during catch-up phase [#13995](https://github.com/vitessio/vitess/pull/13995) +#### Build/CI + * Set the number of threads for release notes generation with a flag [#13273](https://github.com/vitessio/vitess/pull/13273) + * Optimize `make build` in `test.go` and in CI [#13567](https://github.com/vitessio/vitess/pull/13567) + * Skip VTAdmin build in more places [#13588](https://github.com/vitessio/vitess/pull/13588) + * Skip VTAdmin build in Docker tests [#13836](https://github.com/vitessio/vitess/pull/13836) + * Migrates most workflows to 4 and 16 cores Large GitHub-Hosted-Runners [#13845](https://github.com/vitessio/vitess/pull/13845) + * Skip launchable if the Pull Request is marked as a Draft [#13886](https://github.com/vitessio/vitess/pull/13886) + * [release-18.0] Automatic approval of `vitess-bot` clean backports (#14352) [#14357](https://github.com/vitessio/vitess/pull/14357) +#### CLI + * Vtctldclient MoveTables [#13015](https://github.com/vitessio/vitess/pull/13015) + * migrate vtorc to use cobra commands [#13917](https://github.com/vitessio/vitess/pull/13917) +#### Cluster management + * increase length of reparent_journal columns [#13287](https://github.com/vitessio/vitess/pull/13287) + * Improvements to PRS [#13623](https://github.com/vitessio/vitess/pull/13623) + * Add 2 more durability policies that allow RDONLY tablets to send semi-sync ACKs [#13698](https://github.com/vitessio/vitess/pull/13698) + * `vtctld`/`vtorc`: improve reparenting stats [#13723](https://github.com/vitessio/vitess/pull/13723) +#### Documentation + * consolidate docs [#13959](https://github.com/vitessio/vitess/pull/13959) +#### Evalengine + * evalengine: implement date/time math [#13274](https://github.com/vitessio/vitess/pull/13274) + * sqlparser: Add support for TIMESTAMPADD [#13314](https://github.com/vitessio/vitess/pull/13314) + * mysql: introduce icuregex package [#13391](https://github.com/vitessio/vitess/pull/13391) + * icuregex: Lazy load ICU data into memory [#13640](https://github.com/vitessio/vitess/pull/13640) + * evalengine: Improve weight string support [#13658](https://github.com/vitessio/vitess/pull/13658) + * evalengine: Fix JSON weight string computation [#13669](https://github.com/vitessio/vitess/pull/13669) +#### Examples + * Misc Local Install improvements. [#13446](https://github.com/vitessio/vitess/pull/13446) +#### General + * Refactor code to remove `evalengine` as a dependency of `VTOrc` [#13642](https://github.com/vitessio/vitess/pull/13642) +#### Observability + * vtorc: add detected_problems counter [#13967](https://github.com/vitessio/vitess/pull/13967) +#### Online DDL + * `vtctl OnlineDDL`: complete command set [#12963](https://github.com/vitessio/vitess/pull/12963) + * Online DDL: improved row estimation via ANALYE TABLE with --analyze-table strategy flag [#13352](https://github.com/vitessio/vitess/pull/13352) + * OnlineDDL: support @@migration_context in vtgate session. Use if non-empty [#13675](https://github.com/vitessio/vitess/pull/13675) + * Vtgate: pass 'SHOW VITESS_MIGRATIONS' to tablet's query executor [#13726](https://github.com/vitessio/vitess/pull/13726) + * vtctldclient OnlineDDL CANCEL [#13860](https://github.com/vitessio/vitess/pull/13860) + * vtctldclient: support OnlineDDL `complete`, `launch` commands [#13896](https://github.com/vitessio/vitess/pull/13896) + * [release-18.0] Online DDL: lint DDL strategy flags (#14373) [#14399](https://github.com/vitessio/vitess/pull/14399) +#### Query Serving + * vindexes: return unknown params [#12951](https://github.com/vitessio/vitess/pull/12951) + * Fix and Make aggregation planner handle aggregation functions better [#13228](https://github.com/vitessio/vitess/pull/13228) + * vtgate planner: HAVING in the new operator horizon planner [#13289](https://github.com/vitessio/vitess/pull/13289) + * Support complex aggregation in Gen4's Operators [#13326](https://github.com/vitessio/vitess/pull/13326) + * Adds support for ANY_VALUE [#13342](https://github.com/vitessio/vitess/pull/13342) + * Aggregation engine refactor [#13378](https://github.com/vitessio/vitess/pull/13378) + * Move more horizon planning to the operators [#13412](https://github.com/vitessio/vitess/pull/13412) + * Move UNION planning to the operators [#13450](https://github.com/vitessio/vitess/pull/13450) + * Improve and Fix Distinct Aggregation planner [#13466](https://github.com/vitessio/vitess/pull/13466) + * Enhancing VTGate buffering for MoveTables and Shard by Shard Migration [#13507](https://github.com/vitessio/vitess/pull/13507) + * Add 2 new metrics with tablet type labels [#13521](https://github.com/vitessio/vitess/pull/13521) + * vtgate table schema tracking to use GetSchema rpc [#13544](https://github.com/vitessio/vitess/pull/13544) + * Add a `keyspace` configuration in the `vschema` for foreign key mode [#13553](https://github.com/vitessio/vitess/pull/13553) + * Reduce usages of old horizon planning fallback [#13595](https://github.com/vitessio/vitess/pull/13595) + * Add dry-run/monitoring-only mode for TxThrottler [#13604](https://github.com/vitessio/vitess/pull/13604) + * go/vt/vitessdriver: implement driver.{Connector,DriverContext} [#13704](https://github.com/vitessio/vitess/pull/13704) + * More union merging [#13743](https://github.com/vitessio/vitess/pull/13743) + * Move subqueries to use the operator model [#13750](https://github.com/vitessio/vitess/pull/13750) + * Add support for tuple as value type [#13800](https://github.com/vitessio/vitess/pull/13800) + * icuregex: Update to ICU 73 [#13912](https://github.com/vitessio/vitess/pull/13912) + * Change internal vindex type recommendation for integrals to xxhash [#13956](https://github.com/vitessio/vitess/pull/13956) + * Foreign key cascade: retain "for update" lock on select query plans [#13985](https://github.com/vitessio/vitess/pull/13985) + * Improve the rewriter to simplify more queries [#14059](https://github.com/vitessio/vitess/pull/14059) + * [release-18.0] gen4: Support explicit column aliases on derived tables (#14129) [#14156](https://github.com/vitessio/vitess/pull/14156) +#### Schema Tracker + * vttablet: do not notify `vtgate` about internal tables [#13897](https://github.com/vitessio/vitess/pull/13897) +#### TabletManager + * Tablet throttler: throttled app configuration via `vtctl UpdateThrottlerConfig` [#13351](https://github.com/vitessio/vitess/pull/13351) +#### Throttler + * txthrottler: verify config at vttablet startup, consolidate funcs [#13115](https://github.com/vitessio/vitess/pull/13115) + * txthrottler: add metrics for topoWatcher and healthCheckStreamer [#13153](https://github.com/vitessio/vitess/pull/13153) + * `UpdateThrottlerConfig --unthrottle-app ...` [#13494](https://github.com/vitessio/vitess/pull/13494) + * Reroute 'ALTER VITESS_MIGRATION ... THROTTLE ...' through topo [#13511](https://github.com/vitessio/vitess/pull/13511) + * Tablet throttler: inter-checks via gRPC [#13514](https://github.com/vitessio/vitess/pull/13514) + * Per workload TxThrottler metrics [#13526](https://github.com/vitessio/vitess/pull/13526) + * Throttler: exempt apps via `UpdateThrottlerConfig --throttle-app-exempt` [#13666](https://github.com/vitessio/vitess/pull/13666) +#### Topology + * Support arbitrary ZooKeeper config lines [#13829](https://github.com/vitessio/vitess/pull/13829) +#### VReplication + * MoveTables: allow copying all tables in a single atomic copy phase cycle [#13137](https://github.com/vitessio/vitess/pull/13137) + * VReplication: More intelligently manage vschema table entries on unsharded targets [#13220](https://github.com/vitessio/vitess/pull/13220) + * MoveTables sequence e2e tests: change terminology to use basic vs simple everywhere for partial movetables workflows [#13435](https://github.com/vitessio/vitess/pull/13435) + * wrangler,workflow/workflow: materialize from intersecting source shards based on primary vindexes [#13782](https://github.com/vitessio/vitess/pull/13782) + * Implement Reshard in vtctldclient [#13792](https://github.com/vitessio/vitess/pull/13792) + * VDiff: Migrate client command to vtctldclient [#13976](https://github.com/vitessio/vitess/pull/13976) + * Migrate vreplication commands to vtctldclient: Mount and Migrate [#14174](https://github.com/vitessio/vitess/pull/14174) + * [release-18.0] Migrate CreateLookupVindex and ExternalizeVindex to vtctldclient (#14086) [#14183](https://github.com/vitessio/vitess/pull/14183) + * Migrate Materialize command to vtctldclient [#14184](https://github.com/vitessio/vitess/pull/14184) + * [Release 18.0] Backport of #17174 [#14210](https://github.com/vitessio/vitess/pull/14210) + * [release-18.0] Migrate Materialize command to vtctldclient (#14184) [#14214](https://github.com/vitessio/vitess/pull/14214) + * [release-18.0] VReplication: Add traffic state to vtctldclient workflow status output (#14280) [#14282](https://github.com/vitessio/vitess/pull/14282) + * [release-18.0] VReplication: Add --all-cells flag to create sub-commands (#14341) [#14343](https://github.com/vitessio/vitess/pull/14343) +#### VTAdmin + * [release-18.0] Optimize the GetWorkflows RPC (#14212) [#14233](https://github.com/vitessio/vitess/pull/14233) +#### VTCombo + * `vttestserver`: persist vschema changes in `--persistent_mode` [#13065](https://github.com/vitessio/vitess/pull/13065) +#### VTorc + * Improve VTOrc failure detection to be able to better handle dead primary failures [#13190](https://github.com/vitessio/vitess/pull/13190) + * Add flag to VTOrc to enable/disable its ability to run ERS [#13259](https://github.com/vitessio/vitess/pull/13259) + * Add metric for showing the errant GTIDs in VTOrc [#13281](https://github.com/vitessio/vitess/pull/13281) + * Add timestamp to vtorc debug page [#13379](https://github.com/vitessio/vitess/pull/13379) + * Augment VTOrc to also store the shard records and use it to better judge Primary recoveries [#13587](https://github.com/vitessio/vitess/pull/13587) + * Fix a couple of logs in VTOrc [#13667](https://github.com/vitessio/vitess/pull/13667) + * Errant GTID Metrics Refactor [#13670](https://github.com/vitessio/vitess/pull/13670) + * VTOrc converts a tablet to DRAINED type if it detects errant GTIDs on it [#13873](https://github.com/vitessio/vitess/pull/13873) +#### vtctl + * vtctl,vindexes: logs warnings and export stat for unknown vindex params [#13322](https://github.com/vitessio/vitess/pull/13322) + * vtctldclient OnlineDDL: support `throttle`, `unthrottle` [#13916](https://github.com/vitessio/vitess/pull/13916) +#### web UI + * Add vtsql flags to vtadmin [#13674](https://github.com/vitessio/vitess/pull/13674) +### Feature Request +#### CLI + * [vtctld] more cobra binaries [#13930](https://github.com/vitessio/vitess/pull/13930) + * [cobra] vtgate and vttablet [#13943](https://github.com/vitessio/vitess/pull/13943) + * [cli] migrate mysqlctl and mysqlctld to cobra [#13946](https://github.com/vitessio/vitess/pull/13946) + * [CLI] cobra lots of things [#14007](https://github.com/vitessio/vitess/pull/14007) + * miscellaneous cobras [#14069](https://github.com/vitessio/vitess/pull/14069) + * [cli] cobra zookeeper [#14094](https://github.com/vitessio/vitess/pull/14094) +#### Online DDL + * Add OnlineDDL show support [#13738](https://github.com/vitessio/vitess/pull/13738) + * [onlineddl] retry and cleanup [#13830](https://github.com/vitessio/vitess/pull/13830) +#### Query Serving + * Add group_concat aggregation support [#13331](https://github.com/vitessio/vitess/pull/13331) + * Add support for kill statement [#13371](https://github.com/vitessio/vitess/pull/13371) + * Build foreign key definition in schema tracker [#13657](https://github.com/vitessio/vitess/pull/13657) + * Foreign Keys: `INSERT` planning [#13676](https://github.com/vitessio/vitess/pull/13676) + * Foreign Keys: `DELETE` planning [#13746](https://github.com/vitessio/vitess/pull/13746) + * Foreign Keys: `UPDATE` planning [#13762](https://github.com/vitessio/vitess/pull/13762) + * Add Foreign key Cascade engine primitive [#13802](https://github.com/vitessio/vitess/pull/13802) + * Foreign key cascade planning for DELETE and UPDATE queries [#13823](https://github.com/vitessio/vitess/pull/13823) + * Add Foreign key verify constraint engine primitive [#13848](https://github.com/vitessio/vitess/pull/13848) + * Add VSchema DDL support for dropping sequence and auto increment [#13882](https://github.com/vitessio/vitess/pull/13882) + * Update Cascade Planning leading to Foreign key constraint verification [#13902](https://github.com/vitessio/vitess/pull/13902) + * Disallow Insert with Duplicate key update and Replace Into queries on foreign key column, set locks on fk queries [#13953](https://github.com/vitessio/vitess/pull/13953) +#### VReplication + * VReplication: Initialize Sequence Tables Used By Tables Being Moved [#13656](https://github.com/vitessio/vitess/pull/13656) + * MoveTables: add flag to specify that routing rules should not be created when a movetables workflow is created [#13895](https://github.com/vitessio/vitess/pull/13895) +### Internal Cleanup +#### Build/CI + * docker/k8s: Cleanup done TODO [#13347](https://github.com/vitessio/vitess/pull/13347) + * Remove unused chromedriver [#13573](https://github.com/vitessio/vitess/pull/13573) + * docker/bootstrap: remove --no-cache flag [#13785](https://github.com/vitessio/vitess/pull/13785) +#### CLI + * remove query_analyzer binary and release [#14055](https://github.com/vitessio/vitess/pull/14055) + * [release-18.0] Make vtctldclient mount command more standard (#14281) [#14283](https://github.com/vitessio/vitess/pull/14283) +#### Cluster management + * Fix logging by omitting the host and port in `SetReadOnly` [#13470](https://github.com/vitessio/vitess/pull/13470) + * Improve logging and renaming PrimaryTermStartTimestamp in vttablets [#13625](https://github.com/vitessio/vitess/pull/13625) +#### Evalengine + * collations: Refactor to separate basic collation information from data [#13868](https://github.com/vitessio/vitess/pull/13868) +#### Examples + * docker/mini: remove refs to orc configs [#13495](https://github.com/vitessio/vitess/pull/13495) +#### General + * servenv: Allow for explicit bind address [#13188](https://github.com/vitessio/vitess/pull/13188) + * Remove `out.txt` and add `release-17.0` to go upgrade automation [#13261](https://github.com/vitessio/vitess/pull/13261) + * Deprecate VTGR [#13301](https://github.com/vitessio/vitess/pull/13301) + * mysql: Refactor dependencies [#13688](https://github.com/vitessio/vitess/pull/13688) + * Remove explicit usage of etcd v2 (api and storage) [#13791](https://github.com/vitessio/vitess/pull/13791) + * Go 1.21 cleanups [#13862](https://github.com/vitessio/vitess/pull/13862) + * [wrangler] cleanup unused functions [#13867](https://github.com/vitessio/vitess/pull/13867) + * [misc] Delete more unused functions, tidy up dupe imports [#13878](https://github.com/vitessio/vitess/pull/13878) + * Clean up deprecated slice header usage and unused code [#13880](https://github.com/vitessio/vitess/pull/13880) + * [misc] tidy imports [#13885](https://github.com/vitessio/vitess/pull/13885) + * [staticcheck] miscellaneous tidying [#13892](https://github.com/vitessio/vitess/pull/13892) + * [staticcheck] Cleanup deprecations [#13898](https://github.com/vitessio/vitess/pull/13898) + * Consolidate helper functions for working with proto3 time messages [#13905](https://github.com/vitessio/vitess/pull/13905) + * [staticcheck] Last few staticchecks! [#13909](https://github.com/vitessio/vitess/pull/13909) + * Remove deprecated flags before `v18.0.0` [#14071](https://github.com/vitessio/vitess/pull/14071) +#### Observability + * stats: use *time.Ticker instead of time.After() [#13492](https://github.com/vitessio/vitess/pull/13492) +#### Query Serving + * Operator planner refactor [#13294](https://github.com/vitessio/vitess/pull/13294) + * Refactor and add a comment to schema initialisation code [#13309](https://github.com/vitessio/vitess/pull/13309) + * vtgate v3 planner removal [#13458](https://github.com/vitessio/vitess/pull/13458) + * vtgate buffering logic: remove the deprecated healthcheck based implementation [#13584](https://github.com/vitessio/vitess/pull/13584) + * Refactor Expression and Statement Simplifier [#13636](https://github.com/vitessio/vitess/pull/13636) + * Remove duplicate ACL check in tabletserver handleHTTPConsolidations [#13876](https://github.com/vitessio/vitess/pull/13876) + * inputs method to return additional information about the input primitive [#13883](https://github.com/vitessio/vitess/pull/13883) + * refactor: move DML logic to sql_builder.go [#13920](https://github.com/vitessio/vitess/pull/13920) + * Fix `TestLeftJoinUsingUnsharded` and remove instability when running E2E locally [#13973](https://github.com/vitessio/vitess/pull/13973) + * Remove excessive logging in transactions [#14021](https://github.com/vitessio/vitess/pull/14021) + * moved timeout test to different package [#14028](https://github.com/vitessio/vitess/pull/14028) + * [release-18.0] Rename Foreign Key enum values in VSchema and drop `FK_` prefix (#14274) [#14299](https://github.com/vitessio/vitess/pull/14299) + * tx_throttler: remove topo watchers metric [#14444](https://github.com/vitessio/vitess/pull/14444) +#### TabletManager + * mysqlctl: Use DBA connection for schema operations [#13178](https://github.com/vitessio/vitess/pull/13178) + * k8stopo: Include deprecation warning [#13299](https://github.com/vitessio/vitess/pull/13299) + * k8stopo: Remove the deprecated Kubernetes topo [#13303](https://github.com/vitessio/vitess/pull/13303) + * vtgr: Remove deprecated vtgr [#13308](https://github.com/vitessio/vitess/pull/13308) + * mysqlctl: Move more to use built in MySQL client [#13338](https://github.com/vitessio/vitess/pull/13338) +#### Throttler + * `txthrottler`: remove `txThrottlerConfig` struct, rely on `tabletenv` [#13624](https://github.com/vitessio/vitess/pull/13624) +#### VReplication + * Use sqlparser for all dynamic query building in VDiff2 [#13319](https://github.com/vitessio/vitess/pull/13319) + * vreplication: Move to use collations package [#13566](https://github.com/vitessio/vitess/pull/13566) +#### VTAdmin + * [VTAdmin] Upgrade to use node 18.16.0 [#13288](https://github.com/vitessio/vitess/pull/13288) +#### VTorc + * VTOrc: Update the primary key for all the tables from `hostname, port` to `alias` [#13243](https://github.com/vitessio/vitess/pull/13243) + * vtorc: Cleanup more unused code [#13354](https://github.com/vitessio/vitess/pull/13354) + * Improve lock action string [#13355](https://github.com/vitessio/vitess/pull/13355) + * Improve VTOrc logging statements, now that we have alias as a field [#13428](https://github.com/vitessio/vitess/pull/13428) + * Remove excessive logging in VTOrc APIs [#13459](https://github.com/vitessio/vitess/pull/13459) + * [release-16.0] Remove excessive logging in VTOrc APIs (#13459) [#13462](https://github.com/vitessio/vitess/pull/13462) +#### vtctl + * [release-18.0] Move all examples to vtctldclient (#14226) [#14241](https://github.com/vitessio/vitess/pull/14241) +#### vtexplain + * vtexplain: Fix passing through context for cleanup [#13900](https://github.com/vitessio/vitess/pull/13900) +### Performance +#### General + * proto: Faster clone [#13914](https://github.com/vitessio/vitess/pull/13914) +#### Query Serving + * Cache info schema table info [#13724](https://github.com/vitessio/vitess/pull/13724) + * gen4: Fast aggregations [#13904](https://github.com/vitessio/vitess/pull/13904) + * Cache v3 [#13939](https://github.com/vitessio/vitess/pull/13939) + * Reduce network pressure on multi row insert [#14064](https://github.com/vitessio/vitess/pull/14064) + * VTGate FK stress tests suite: improvements [#14098](https://github.com/vitessio/vitess/pull/14098) +#### TabletManager + * BaseShowTablesWithSizes: optimize MySQL 8.0 query [#13375](https://github.com/vitessio/vitess/pull/13375) + * Support views in BaseShowTablesWithSizes for MySQL 8.0 [#13394](https://github.com/vitessio/vitess/pull/13394) +#### vtctl + * `ApplySchema`: support `--batch-size` flag in 'direct' strategy [#13693](https://github.com/vitessio/vitess/pull/13693) +### Regression +#### Backup and Restore + * Fix backup on s3 like storage [#14311](https://github.com/vitessio/vitess/pull/14311) + * [release-18.0] Fix backup on s3 like storage (#14311) [#14362](https://github.com/vitessio/vitess/pull/14362) +#### Query Serving + * fix: ShardedRouting clone to clone slice of reference correctly [#13265](https://github.com/vitessio/vitess/pull/13265) + * Handle inconsistent state error in query buffering [#13333](https://github.com/vitessio/vitess/pull/13333) + * fix: insert with negative value [#14244](https://github.com/vitessio/vitess/pull/14244) + * [release-18.0] fix: insert with negative value (#14244) [#14247](https://github.com/vitessio/vitess/pull/14247) + * [release-18.0] use aggregation engine over distinct engine when overlapping order by (#14359) [#14361](https://github.com/vitessio/vitess/pull/14361) + * [release-18.0] Performance Fixes for Vitess 18 (#14383) [#14393](https://github.com/vitessio/vitess/pull/14393) + * [release-18.0] tuple: serialized form (#14392) [#14394](https://github.com/vitessio/vitess/pull/14394) +### Release +#### Build/CI + * Fix incorrect output in release scripts [#13385](https://github.com/vitessio/vitess/pull/13385) + * Optimize release notes generation to use GitHub Milestones [#13398](https://github.com/vitessio/vitess/pull/13398) +#### CLI + * Add vtctldclient info to the 18.0 summary [#14259](https://github.com/vitessio/vitess/pull/14259) +#### Documentation + * Add end-of-life documentation + re-organize internal documentation [#13401](https://github.com/vitessio/vitess/pull/13401) + * Update known issues in `v16.x` and `v17.0.0` [#13618](https://github.com/vitessio/vitess/pull/13618) +#### General + * Copy v17.0.0-rc changelog to main [#13248](https://github.com/vitessio/vitess/pull/13248) + * Update release notes for 17.0.0-rc2 [#13306](https://github.com/vitessio/vitess/pull/13306) + * Forward port of release notes changes from v17.0.0 GA [#13370](https://github.com/vitessio/vitess/pull/13370) + * Add v15.0.4, v16.0.3, and v17.0.1 changelogs [#13661](https://github.com/vitessio/vitess/pull/13661) + * Copy release notes for v17.0.2 and v16.0.4 [#13811](https://github.com/vitessio/vitess/pull/13811) + * Code freeze of release-18.0 [#14131](https://github.com/vitessio/vitess/pull/14131) + * Release of v18.0.0-rc1 [#14136](https://github.com/vitessio/vitess/pull/14136) + * Back to dev mode after `v18.0.0-rc1` release [#14169](https://github.com/vitessio/vitess/pull/14169) + * Code freeze of release-18.0 [#14405](https://github.com/vitessio/vitess/pull/14405) +### Testing +#### Build/CI + * Flakes: Address TestMigrate Failures [#12866](https://github.com/vitessio/vitess/pull/12866) + * [vipersync] skip flaky test [#13501](https://github.com/vitessio/vitess/pull/13501) + * [vipersync] deflake TestWatchConfig [#13545](https://github.com/vitessio/vitess/pull/13545) + * Fix bug in `fileNameFromPosition` test helper [#13778](https://github.com/vitessio/vitess/pull/13778) + * Flakes: Delete VTDATAROOT files in reparent test teardown within CI [#13793](https://github.com/vitessio/vitess/pull/13793) + * CI: Misc test improvements to limit failures with various runners [#13825](https://github.com/vitessio/vitess/pull/13825) + * actually test vtcombo [#14095](https://github.com/vitessio/vitess/pull/14095) + * Remove FOSSA Test from CI until we can do it in a secure way [#14119](https://github.com/vitessio/vitess/pull/14119) +#### Cluster management + * Fix `Fakemysqldaemon` to store the host and port after `SetReplicationSource` call [#13439](https://github.com/vitessio/vitess/pull/13439) + * Deflake `TestPlannedReparentShardPromoteReplicaFail` [#13548](https://github.com/vitessio/vitess/pull/13548) + * Flaky tests: Fix wrangler tests [#13568](https://github.com/vitessio/vitess/pull/13568) +#### General + * [CI] deflake viper sync tests [#13185](https://github.com/vitessio/vitess/pull/13185) + * Remove `--disable_active_reparents` flag in vttablet-up.sh [#13504](https://github.com/vitessio/vitess/pull/13504) + * Add leak checking for vtgate tests [#13835](https://github.com/vitessio/vitess/pull/13835) +#### Online DDL + * Fix potential panics due to "Fail in goroutine after test completed" [#13596](https://github.com/vitessio/vitess/pull/13596) + * [OnlineDDL] add label so break works as intended [#13691](https://github.com/vitessio/vitess/pull/13691) +#### Query Serving + * Deflake `TestQueryTimeoutWithDual` test [#13405](https://github.com/vitessio/vitess/pull/13405) + * Fix `TestGatewayBufferingWhileReparenting` flakiness [#13469](https://github.com/vitessio/vitess/pull/13469) + * fix TestQueryTimeoutWithTables flaky test [#13579](https://github.com/vitessio/vitess/pull/13579) + * schemadiff: add time measure test for massive schema load and diff [#13697](https://github.com/vitessio/vitess/pull/13697) + * End to end testing suite for foreign keys [#13870](https://github.com/vitessio/vitess/pull/13870) + * Fix setup order to avoid races [#13871](https://github.com/vitessio/vitess/pull/13871) + * Use correct syntax in test [#13907](https://github.com/vitessio/vitess/pull/13907) + * test: added test to check binlogs to contain the cascade events [#13970](https://github.com/vitessio/vitess/pull/13970) + * E2E Fuzzing testing for foreign keys [#13980](https://github.com/vitessio/vitess/pull/13980) + * Fix foreign key plan tests expectation [#13997](https://github.com/vitessio/vitess/pull/13997) + * [release-18.0] vtgate: Allow more errors for the warning check (#14421) [#14423](https://github.com/vitessio/vitess/pull/14423) +#### VReplication + * Flakes: remove non-determinism from vtctldclient MoveTables unit test [#13765](https://github.com/vitessio/vitess/pull/13765) + * Flakes: empty vtdataroot before starting a new vreplication e2e test [#13803](https://github.com/vitessio/vitess/pull/13803) + * Flakes: Add recently added 'select rows_copied' query to ignore list [#13993](https://github.com/vitessio/vitess/pull/13993) + * [release-18.0] TestStreamMigrateMainflow: fix panic in test [#14425](https://github.com/vitessio/vitess/pull/14425) +#### VTorc + * Fix flakiness in `TestDeadPrimaryRecoversImmediately` [#13232](https://github.com/vitessio/vitess/pull/13232) + * Fix flakiness in VTOrc tests [#13489](https://github.com/vitessio/vitess/pull/13489) + * Skip flaky test `TestReadOutdatedInstanceKeys` [#13561](https://github.com/vitessio/vitess/pull/13561) + * Reintroduce `TestReadOutdatedInstanceKeys` with debugging information [#13562](https://github.com/vitessio/vitess/pull/13562) +#### vtctl + * Fix merge conflict with new tests [#13869](https://github.com/vitessio/vitess/pull/13869) + diff --git a/changelog/18.0/18.0.0/release_notes.md b/changelog/18.0/18.0.0/release_notes.md new file mode 100644 index 00000000000..9851245a648 --- /dev/null +++ b/changelog/18.0/18.0.0/release_notes.md @@ -0,0 +1,326 @@ +# Release of Vitess v18.0.0 +## Summary + +### Table of Contents + +- **[Major Changes](#major-changes)** + - **[Breaking Changes](#breaking-changes)** + - [Local examples now use etcd v3 storage and API](#local-examples-etcd-v3) + - **[New command line flags and behavior](#new-flag)** + - [VTOrc flag `--allow-emergency-reparent`](#new-flag-toggle-ers) + - [VTOrc flag `--change-tablets-with-errant-gtid-to-drained`](#new-flag-errant-gtid-convert) + - [ERS sub flag `--wait-for-all-tablets`](#new-ers-subflag) + - [VTGate flag `--grpc-send-session-in-streaming`](#new-vtgate-streaming-sesion) + - **[Experimental Foreign Key Support](#foreign-keys)** + - **[VTAdmin](#vtadmin)** + - [Updated to node v18.16.0](#update-node) + - **[Deprecations and Deletions](#deprecations-and-deletions)** + - [Legacy Client Binaries](#legacy-client-binaries) + - [Deprecated Flags](#deprecated-flags) + - [Deprecated Stats](#deprecated-stats) + - [Deleted Flags](#deleted-flags) + - [Deleted `V3` planner](#deleted-v3) + - [Deleted `k8stopo`](#deleted-k8stopo) + - [Deleted `vtgr`](#deleted-vtgr) + - [Deleted `query_analyzer`](#deleted-query_analyzer) + - **[New Stats](#new-stats)** + - [VTGate Vindex unknown parameters](#vtgate-vindex-unknown-parameters) + - [VTBackup stat `Phase`](#vtbackup-stat-phase) + - [VTBackup stat `PhaseStatus`](#vtbackup-stat-phase-status) + - [Backup and restore metrics for AWS S3](#backup-restore-metrics-aws-s3) + - [VTCtld and VTOrc reparenting stats](#vtctld-and-vtorc-reparenting-stats) + - **[VTTablet](#vttablet)** + - [VTTablet: New ResetSequences RPC](#vttablet-new-rpc-reset-sequences) + - **[Docker](#docker)** + - [Debian: Bookworm added and made default](#debian-bookworm) + - [Debian: Buster removed](#debian-buster) + - **[Durability Policies](#durability-policies)** + - [New Durability Policies](#new-durability-policies) + +## Major Changes + +### Breaking Changes + +#### Local examples now use etcd v3 storage and API +In previous releases the [local examples](https://github.com/vitessio/vitess/tree/main/examples/local) were +explicitly using etcd v2 storage (`etcd --enable-v2=true`) and API (`ETCDCTL_API=2`) mode. We have now +removed this legacy etcd usage and instead use the new (default) etcd v3 storage and API. Please see +[PR #13791](https://github.com/vitessio/vitess/pull/13791) for details. If you are using the local +examples in any sort of long-term non-testing capacity, then you will need to explicitly use the v2 storage +and API mode or [migrate your existing data from v2 to v3](https://etcd.io/docs/v3.5/tutorials/how-to-migrate/). + +### New command line flags and behavior + +#### VTOrc flag `--allow-emergency-reparent` + +VTOrc has a new flag `--allow-emergency-reparent` that specifies whether VTOrc is allowed to run emergency +failover operations. Users that want VTOrc to fix replication issues, but don't want it to run any failovers +should use this flag. This flag defaults to `true` which corresponds to the default behavior from prior releases. + +#### VTOrc flag `--change-tablets-with-errant-gtid-to-drained` + +VTOrc has a new flag `--change-tablets-with-errant-gtid-to-drained` that allows users to choose whether VTOrc should change the +tablet type of tablets with errant GTIDs to `DRAINED`. By default, this flag is disabled. + +This feature allows users to configure VTOrc such that any tablet that encounters errant GTIDs is automatically taken out of the +serving graph. These tablets can then be inspected for what the errant GTIDs are, and once fixed, they can rejoin the cluster. + +#### ERS sub flag `--wait-for-all-tablets` + +vtctldclient command `EmergencyReparentShard` has a new sub-flag `--wait-for-all-tablets` that makes `EmergencyReparentShard` wait +for a response from all the tablets. Originally `EmergencyReparentShard` was meant only to be run when a primary tablet is unreachable. +We have realized now that there are cases when replication is broken but all tablets are reachable. In these cases, it is advisable to +call `EmergencyReparentShard` with `--wait-for-all-tablets` so that it does not ignore any of the tablets. + +#### VTGate GRPC stream execute session flag `--grpc-send-session-in-streaming` + +This flag enables transaction support on VTGate's `StreamExecute` gRPC API. +When this is enabled, `StreamExecute` will return the session in the last packet of the response. +Users should enable this flag only after client code has been changed to expect such a packet. + +It is disabled by default. + +### Experimental Foreign Key Support + +A new optional field `foreignKeyMode` has been added to the VSchema. This field can be provided for each keyspace. The VTGate flag `--foreign_key_mode` has been deprecated in favor of this field. + +There are 3 foreign key modes now supported in Vitess - +1. `unmanaged` - + This mode represents the default behavior in Vitess, where it does not manage foreign key column references. Users are responsible for configuring foreign keys in MySQL in such a way that related rows, as determined by foreign keys, reside within the same shard. +2. `managed` [EXPERIMENTAL] - + In this experimental mode, Vitess is fully aware of foreign key relationships and actively tracks foreign key constraints using the schema tracker. VTGate will handle DML operations with foreign keys and correctly cascade updates and deletes. + It will also verify `restrict` constraints and validate the existence of parent rows before inserting child rows. + This ensures that all child operations are logged in binary logs, unlike the InnoDB implementation of foreign keys. + This allows the usage of VReplication workflows with foreign keys. + Implementation details are documented in the [RFC for foreign keys](https://github.com/vitessio/vitess/issues/12967). +3. `disallow` - + In this mode Vitess explicitly disallows any DDL statements that try to create a foreign key constraint. This mode is equivalent to running VTGate with the flag `--foreign_key_mode=disallow`. + +#### Upgrade process + +After upgrading from v17 to v18, users should specify the correct foreign key mode for all their keyspaces in the VSchema using the new property. +Once this change has taken effect, the deprecated flag `--foreign_key_mode` can be dropped from all VTGates. Note that this is only required if running in `disallow` mode. +No action is needed to use `unmanaged` mode. + +### VTAdmin + +#### vtadmin-web updated to node v18.16.0 (LTS) + +Building `vtadmin-web` now requires node >= v18.16.0 (LTS). Breaking changes from v16 to v18 are listed +in https://nodejs.org/en/blog/release/v18, but none apply to VTAdmin. Full details on node v18.16.0 are listed +on https://nodejs.org/en/blog/release/v18.16.0. + +### Deprecations and Deletions + +#### Legacy Client Binaries + +`vtctldclient` is our new modern *Vitess controller daemon* (`vtctld`) *client* – which you will use to perform commands +and take actions in your Vitess clusters. It is [replacing the legacy `vtctl`/`vtctlclient` binaries](https://vitess.io/docs/18.0/reference/vtctldclient-transition/overview/). +Some of the benefits are: + +- [Dedicated RPCs for each command](https://github.com/vitessio/vitess/blob/release-18.0/proto/vtctlservice.proto#L32-L353) +that are used between `vtctldclient` and `vtctld` – this offers clean separation of commands and makes it easier to +develop new features without impacting other commands. This also presents an [API that other clients (both Vitess and +3rd party) can use to interface with Vitess](https://vitess.io/blog/2023-04-17-vtctldserver-api/). +- Use of modern frameworks: [`pFlag`](https://github.com/spf13/pflag#readme), [`Cobra`](https://cobra.dev), and [`Viper`](https://github.com/spf13/viper#readme). +This makes development easier while also offering a better UX. For example, this offers a way to use +[configuration files](https://vitess.io/docs/18.0/reference/viper/config-files/) with support for +[dynamic configuration](https://vitess.io/docs/18.0/reference/viper/dynamic-values/) ([see also](https://github.com/vitessio/vitess/blob/release-18.0/doc/viper/viper.md)). +- The [reference documentation](https://vitess.io/docs/18.0/reference/programs/vtctldclient/) is now built through code. This +removes a burden from developers while helping users by ensuring the docs are always correct and up-to-date. + +In Vitess 18 we have completed migrating all client commands to `vtctldclient` – the last ones being the [OnlineDDL](https://github.com/vitessio/vitess/issues/13712) +and [VReplication](https://github.com/vitessio/vitess/issues/12152) commands. With this work now completed, the +legacy `vtctl`/`vtctlclient` binaries are now fully deprecated and we plan to remove them in Vitess 19. You should +[begin your transition](https://vitess.io/docs/18.0/reference/vtctldclient-transition/) before upgrading to Vitess 18. + +#### Deprecated Command Line Flags + +Throttler related `vttablet` flags: + +- `--throttle_threshold` is deprecated and will be removed in `v19` +- `--throttle_metrics_query` is deprecated and will be removed in `v19` +- `--throttle_metrics_threshold` is deprecated and will be removed in `v19` +- `--throttle_check_as_check_self` is deprecated and will be removed in `v19` +- `--throttler-config-via-topo` is deprecated after assumed `true` in `v17`. It will be removed in a future version. + +Cache related `vttablet` flags: + +- `--queryserver-config-query-cache-lfu` is deprecated and will be removed in `v19`. The query cache always uses a LFU implementation now. +- `--queryserver-config-query-cache-size` is deprecated and will be removed in `v19`. This option only applied to LRU caches, which are now unsupported. + +Buffering related `vtgate` flags: + +- `--buffer_implementation` is deprecated and will be removed in `v19` + +Cache related `vtgate` flags: + +- `--gate_query_cache_lfu` is deprecated and will be removed in `v19`. The query cache always uses a LFU implementation now. +- `--gate_query_cache_size` is deprecated and will be removed in `v19`. This option only applied to LRU caches, which are now unsupported. + +VTGate flags: + +- `--schema_change_signal_user` is deprecated and will be removed in `v19` +- `--foreign_key_mode` is deprecated and will be removed in `v19`. For more detail read the [foreign keys](#foreign-keys) section. + +VDiff v1: + +[VDiff v2 was added in Vitess 15](https://vitess.io/blog/2022-11-22-vdiff-v2/) and marked as GA in 16. +The [legacy v1 client command](https://vitess.io/docs/18.0/reference/vreplication/vdiffv1/) is now deprecated in Vitess 18 and will be **removed** in Vitess 19. +Please switch all of your usage to the [new VDiff client](https://vitess.io/docs/18.0/reference/vreplication/vdiff/) command ASAP. + + +#### Deprecated Stats + +The following `EmergencyReparentShard` stats are deprecated in `v18` and will be removed in `v19`: +- `ers_counter` +- `ers_success_counter` +- `ers_failure_counter` + +These metrics are replaced by [new reparenting stats introduced in `v18`](#vtctld-and-vtorc-reparenting-stats). + +VTBackup stat `DurationByPhase` is deprecated. Use the binary-valued `Phase` stat instead. + +#### Deleted Command Line Flags + +Flags in `vtcombo`: +- `--vtctld_addr` + +Flags in `vtctldclient ApplySchema`: +- `--skip-preflight` + +Flags in `vtctl ApplySchema`: +- `--skip_preflight` + +Flags in `vtgate`: +- `--vtctld_addr` + +Flags in `vttablet`: +- `--vtctld_addr` +- `--use_super_read_only` +- `--disable-replication-manager` +- `--init_populate_metadata` +- `--queryserver-config-pool-prefill-parallelism` +- `--queryserver-config-stream-pool-prefill-parallelism` +- `--queryserver-config-transaction-pool-prefill-parallelism` +- `--queryserver-config-schema-change-signal-interval` +- `--enable-lag-throttler` + +Flags in `vtctld`: +- `--vtctld_show_topology_crud` +- `--durability_policy` + +Flags in `vtorc`: +- `--lock-shard-timeout` +- `--orc_web_dir` + +#### Deleted `v3` planner + +The `Gen4` planner has been the default planner since Vitess 14. The `v3` planner was deprecated in Vitess 15 and has been removed in Vitess 18. + +#### Deleted `k8stopo` + +`k8stopo` was deprecated in Vitess 17, see https://github.com/vitessio/vitess/issues/13298. It has now been removed. + +#### Deleted `vtgr` + +`vtgr` was deprecated in Vitess 17, see https://github.com/vitessio/vitess/issues/13300. It has now been removed. + +#### Deleted `query_analyzer` + +The undocumented `query_analyzer` binary has been removed in Vitess 18, see https://github.com/vitessio/vitess/issues/14054. + +### New stats + +#### VTGate Vindex unknown parameters + +The VTGate stat `VindexUnknownParameters` gauges unknown Vindex parameters found in the latest VSchema pulled from the topology. + +#### VTBackup `Phase` stat + +In v17, the `vtbackup` stat `DurationByPhase` stat was added to measure the time spent by `vtbackup` in each phase. This stat turned out to be awkward to use in production, and has been replaced in v18 by a binary-valued `Phase` stat. + +`Phase` reports a 1 (active) or a 0 (inactive) for each of the following phases: + + * `CatchupReplication` + * `InitialBackup` + * `RestoreLastBackup` + * `TakeNewBackup` + +To calculate how long `vtbackup` has spent in a given phase, sum the 1-valued data points over time and multiply by the data collection or reporting interval. For example, in Prometheus: + +``` +sum_over_time(vtbackup_phase{phase="TakeNewBackup"}) * +``` +#### VTBackup `PhaseStatus` stat + +`PhaseStatus` reports a 1 (active) or a 0 (inactive) for each of the following phases and statuses: + + * `CatchupReplication` phase has statuses `Stalled` and `Stopped`. + * `Stalled` is set to `1` when replication stops advancing. + * `Stopped` is set to `1` when replication stops before `vtbackup` catches up with the primary. + +#### Backup and restore metrics for AWS S3 + +Requests to AWS S3 are instrumented in backup and restore metrics. For example: + +``` +vtbackup_backup_count{component="BackupStorage",implementation="S3",operation="AWS:Request:Send"} 823 +vtbackup_backup_duration_nanoseconds{component="BackupStorage",implementation="S3",operation="AWS:Request:Send"} 1.33632421437e+11 +vtbackup_restore_count{component="BackupStorage",implementation="S3",operation="AWS:Request:Send"} 165 +vtbackup_restore_count{component="BackupStorage",implementation="S3",operation="AWS:Request:Send"} 165 +``` + +#### VTCtld and VTOrc reparenting stats + +New VTCtld and VTOrc stats were added to measure frequency of reparents by keyspace/shard: +- `emergency_reparent_counts` - Number of times `EmergencyReparentShard` has been run. It is further subdivided by the keyspace, shard and the result of the operation. +- `planned_reparent_counts` - Number of times `PlannedReparentShard` has been run. It is further subdivided by the keyspace, shard and the result of the operation. + +Also, the `reparent_shard_operation_timings` stat was added to provide per-operation timings of reparent operations. + +### VTTablet + +#### New ResetSequences rpc + +A new VTTablet RPC `ResetSequences` has been added, which is being used by `MoveTables` and `Migrate` for workflows +where a `sequence` table is being moved (https://github.com/vitessio/vitess/pull/13238). This has an impact on the +Vitess upgrade process from an earlier version if you need to use such a workflow before the entire cluster is upgraded. + +Any MoveTables or Migrate workflow that moves a sequence table should only be run after all vitess components have been +upgraded, and no upgrade should be done while such a workflow is in progress. + +#### New Dry-run/monitoring-only mode for the transaction throttler + +A new CLI flag `--tx-throttler-dry-run` to set the Transaction Throttler to monitoring-only/dry-run mode has been added. +If the transaction throttler is enabled with `--enable-tx-throttler` and the new dry-run flag is also specified, the +tablet will not actually throttle any transactions; however, it will increase the counters for transactions throttled +(`vttablet_transaction_throttler_throttled`). This allows users to deploy the transaction throttler in production and +gain observability on how much throttling would take place, without actually throttling any requests. + +### Docker Builds + +#### Bookworm added and made default + +Bookworm was released on 2023-06-10, and will be the new default base container for Docker builds. +Bullseye images will still be built and available as long as the OS build is current, tagged with the `-bullseye` suffix. + +#### Buster removed + +Buster LTS supports will stop in June 2024, and Vitess 18 will be supported through October 2024. +To prevent supporting a deprecated buster build for several months after June 2024, we are preemptively +removing Vitess support for Buster. + +### Durability Policies + +#### New Durability Policies + +Two new built-in durability policies have been added in Vitess 18: `semi_sync_with_rdonly_ack` and `cross_cell_with_rdonly_ack`. +These policies are similar to `semi_sync` and `cross_cell` respectively, the only difference is that `rdonly` tablets can also send semi-sync ACKs. +------------ +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/18.0/18.0.0/changelog.md). + +The release includes 420 merged Pull Requests. + +Thanks to all our contributors: @GuptaManan100, @Juneezee, @L3o-pold, @adsr, @ajm188, @app/dependabot, @app/github-actions, @app/vitess-bot, @arvind-murty, @austenLacy, @brendar, @davidpiegza, @dbussink, @deepthi, @derekperkins, @ejortegau, @frouioui, @harshit-gangal, @hkdsun, @jfg956, @jspawar, @mattlord, @maxenglander, @mdlayher, @notfelineit, @olyazavr, @pbibra, @peterlyoo, @rafer, @rohit-nayak-ps, @shlomi-noach, @systay, @timvaillancourt, @vmg, @yields + diff --git a/changelog/18.0/18.0.0/summary.md b/changelog/18.0/18.0.0/summary.md new file mode 100644 index 00000000000..eb2b6692201 --- /dev/null +++ b/changelog/18.0/18.0.0/summary.md @@ -0,0 +1,318 @@ +## Summary + +### Table of Contents + +- **[Major Changes](#major-changes)** + - **[Breaking Changes](#breaking-changes)** + - [Local examples now use etcd v3 storage and API](#local-examples-etcd-v3) + - **[New command line flags and behavior](#new-flag)** + - [VTOrc flag `--allow-emergency-reparent`](#new-flag-toggle-ers) + - [VTOrc flag `--change-tablets-with-errant-gtid-to-drained`](#new-flag-errant-gtid-convert) + - [ERS sub flag `--wait-for-all-tablets`](#new-ers-subflag) + - [VTGate flag `--grpc-send-session-in-streaming`](#new-vtgate-streaming-sesion) + - **[Experimental Foreign Key Support](#foreign-keys)** + - **[VTAdmin](#vtadmin)** + - [Updated to node v18.16.0](#update-node) + - **[Deprecations and Deletions](#deprecations-and-deletions)** + - [Legacy Client Binaries](#legacy-client-binaries) + - [Deprecated Flags](#deprecated-flags) + - [Deprecated Stats](#deprecated-stats) + - [Deleted Flags](#deleted-flags) + - [Deleted `V3` planner](#deleted-v3) + - [Deleted `k8stopo`](#deleted-k8stopo) + - [Deleted `vtgr`](#deleted-vtgr) + - [Deleted `query_analyzer`](#deleted-query_analyzer) + - **[New Stats](#new-stats)** + - [VTGate Vindex unknown parameters](#vtgate-vindex-unknown-parameters) + - [VTBackup stat `Phase`](#vtbackup-stat-phase) + - [VTBackup stat `PhaseStatus`](#vtbackup-stat-phase-status) + - [Backup and restore metrics for AWS S3](#backup-restore-metrics-aws-s3) + - [VTCtld and VTOrc reparenting stats](#vtctld-and-vtorc-reparenting-stats) + - **[VTTablet](#vttablet)** + - [VTTablet: New ResetSequences RPC](#vttablet-new-rpc-reset-sequences) + - **[Docker](#docker)** + - [Debian: Bookworm added and made default](#debian-bookworm) + - [Debian: Buster removed](#debian-buster) + - **[Durability Policies](#durability-policies)** + - [New Durability Policies](#new-durability-policies) + +## Major Changes + +### Breaking Changes + +#### Local examples now use etcd v3 storage and API +In previous releases the [local examples](https://github.com/vitessio/vitess/tree/main/examples/local) were +explicitly using etcd v2 storage (`etcd --enable-v2=true`) and API (`ETCDCTL_API=2`) mode. We have now +removed this legacy etcd usage and instead use the new (default) etcd v3 storage and API. Please see +[PR #13791](https://github.com/vitessio/vitess/pull/13791) for details. If you are using the local +examples in any sort of long-term non-testing capacity, then you will need to explicitly use the v2 storage +and API mode or [migrate your existing data from v2 to v3](https://etcd.io/docs/v3.5/tutorials/how-to-migrate/). + +### New command line flags and behavior + +#### VTOrc flag `--allow-emergency-reparent` + +VTOrc has a new flag `--allow-emergency-reparent` that specifies whether VTOrc is allowed to run emergency +failover operations. Users that want VTOrc to fix replication issues, but don't want it to run any failovers +should use this flag. This flag defaults to `true` which corresponds to the default behavior from prior releases. + +#### VTOrc flag `--change-tablets-with-errant-gtid-to-drained` + +VTOrc has a new flag `--change-tablets-with-errant-gtid-to-drained` that allows users to choose whether VTOrc should change the +tablet type of tablets with errant GTIDs to `DRAINED`. By default, this flag is disabled. + +This feature allows users to configure VTOrc such that any tablet that encounters errant GTIDs is automatically taken out of the +serving graph. These tablets can then be inspected for what the errant GTIDs are, and once fixed, they can rejoin the cluster. + +#### ERS sub flag `--wait-for-all-tablets` + +vtctldclient command `EmergencyReparentShard` has a new sub-flag `--wait-for-all-tablets` that makes `EmergencyReparentShard` wait +for a response from all the tablets. Originally `EmergencyReparentShard` was meant only to be run when a primary tablet is unreachable. +We have realized now that there are cases when replication is broken but all tablets are reachable. In these cases, it is advisable to +call `EmergencyReparentShard` with `--wait-for-all-tablets` so that it does not ignore any of the tablets. + +#### VTGate GRPC stream execute session flag `--grpc-send-session-in-streaming` + +This flag enables transaction support on VTGate's `StreamExecute` gRPC API. +When this is enabled, `StreamExecute` will return the session in the last packet of the response. +Users should enable this flag only after client code has been changed to expect such a packet. + +It is disabled by default. + +### Experimental Foreign Key Support + +A new optional field `foreignKeyMode` has been added to the VSchema. This field can be provided for each keyspace. The VTGate flag `--foreign_key_mode` has been deprecated in favor of this field. + +There are 3 foreign key modes now supported in Vitess - +1. `unmanaged` - + This mode represents the default behavior in Vitess, where it does not manage foreign key column references. Users are responsible for configuring foreign keys in MySQL in such a way that related rows, as determined by foreign keys, reside within the same shard. +2. `managed` [EXPERIMENTAL] - + In this experimental mode, Vitess is fully aware of foreign key relationships and actively tracks foreign key constraints using the schema tracker. VTGate will handle DML operations with foreign keys and correctly cascade updates and deletes. + It will also verify `restrict` constraints and validate the existence of parent rows before inserting child rows. + This ensures that all child operations are logged in binary logs, unlike the InnoDB implementation of foreign keys. + This allows the usage of VReplication workflows with foreign keys. + Implementation details are documented in the [RFC for foreign keys](https://github.com/vitessio/vitess/issues/12967). +3. `disallow` - + In this mode Vitess explicitly disallows any DDL statements that try to create a foreign key constraint. This mode is equivalent to running VTGate with the flag `--foreign_key_mode=disallow`. + +#### Upgrade process + +After upgrading from v17 to v18, users should specify the correct foreign key mode for all their keyspaces in the VSchema using the new property. +Once this change has taken effect, the deprecated flag `--foreign_key_mode` can be dropped from all VTGates. Note that this is only required if running in `disallow` mode. +No action is needed to use `unmanaged` mode. + +### VTAdmin + +#### vtadmin-web updated to node v18.16.0 (LTS) + +Building `vtadmin-web` now requires node >= v18.16.0 (LTS). Breaking changes from v16 to v18 are listed +in https://nodejs.org/en/blog/release/v18, but none apply to VTAdmin. Full details on node v18.16.0 are listed +on https://nodejs.org/en/blog/release/v18.16.0. + +### Deprecations and Deletions + +#### Legacy Client Binaries + +`vtctldclient` is our new modern *Vitess controller daemon* (`vtctld`) *client* – which you will use to perform commands +and take actions in your Vitess clusters. It is [replacing the legacy `vtctl`/`vtctlclient` binaries](https://vitess.io/docs/18.0/reference/vtctldclient-transition/overview/). +Some of the benefits are: + +- [Dedicated RPCs for each command](https://github.com/vitessio/vitess/blob/release-18.0/proto/vtctlservice.proto#L32-L353) +that are used between `vtctldclient` and `vtctld` – this offers clean separation of commands and makes it easier to +develop new features without impacting other commands. This also presents an [API that other clients (both Vitess and +3rd party) can use to interface with Vitess](https://vitess.io/blog/2023-04-17-vtctldserver-api/). +- Use of modern frameworks: [`pFlag`](https://github.com/spf13/pflag#readme), [`Cobra`](https://cobra.dev), and [`Viper`](https://github.com/spf13/viper#readme). +This makes development easier while also offering a better UX. For example, this offers a way to use +[configuration files](https://vitess.io/docs/18.0/reference/viper/config-files/) with support for +[dynamic configuration](https://vitess.io/docs/18.0/reference/viper/dynamic-values/) ([see also](https://github.com/vitessio/vitess/blob/release-18.0/doc/viper/viper.md)). +- The [reference documentation](https://vitess.io/docs/18.0/reference/programs/vtctldclient/) is now built through code. This +removes a burden from developers while helping users by ensuring the docs are always correct and up-to-date. + +In Vitess 18 we have completed migrating all client commands to `vtctldclient` – the last ones being the [OnlineDDL](https://github.com/vitessio/vitess/issues/13712) +and [VReplication](https://github.com/vitessio/vitess/issues/12152) commands. With this work now completed, the +legacy `vtctl`/`vtctlclient` binaries are now fully deprecated and we plan to remove them in Vitess 19. You should +[begin your transition](https://vitess.io/docs/18.0/reference/vtctldclient-transition/) before upgrading to Vitess 18. + +#### Deprecated Command Line Flags + +Throttler related `vttablet` flags: + +- `--throttle_threshold` is deprecated and will be removed in `v19` +- `--throttle_metrics_query` is deprecated and will be removed in `v19` +- `--throttle_metrics_threshold` is deprecated and will be removed in `v19` +- `--throttle_check_as_check_self` is deprecated and will be removed in `v19` +- `--throttler-config-via-topo` is deprecated after assumed `true` in `v17`. It will be removed in a future version. + +Cache related `vttablet` flags: + +- `--queryserver-config-query-cache-lfu` is deprecated and will be removed in `v19`. The query cache always uses a LFU implementation now. +- `--queryserver-config-query-cache-size` is deprecated and will be removed in `v19`. This option only applied to LRU caches, which are now unsupported. + +Buffering related `vtgate` flags: + +- `--buffer_implementation` is deprecated and will be removed in `v19` + +Cache related `vtgate` flags: + +- `--gate_query_cache_lfu` is deprecated and will be removed in `v19`. The query cache always uses a LFU implementation now. +- `--gate_query_cache_size` is deprecated and will be removed in `v19`. This option only applied to LRU caches, which are now unsupported. + +VTGate flags: + +- `--schema_change_signal_user` is deprecated and will be removed in `v19` +- `--foreign_key_mode` is deprecated and will be removed in `v19`. For more detail read the [foreign keys](#foreign-keys) section. + +VDiff v1: + +[VDiff v2 was added in Vitess 15](https://vitess.io/blog/2022-11-22-vdiff-v2/) and marked as GA in 16. +The [legacy v1 client command](https://vitess.io/docs/18.0/reference/vreplication/vdiffv1/) is now deprecated in Vitess 18 and will be **removed** in Vitess 19. +Please switch all of your usage to the [new VDiff client](https://vitess.io/docs/18.0/reference/vreplication/vdiff/) command ASAP. + + +#### Deprecated Stats + +The following `EmergencyReparentShard` stats are deprecated in `v18` and will be removed in `v19`: +- `ers_counter` +- `ers_success_counter` +- `ers_failure_counter` + +These metrics are replaced by [new reparenting stats introduced in `v18`](#vtctld-and-vtorc-reparenting-stats). + +VTBackup stat `DurationByPhase` is deprecated. Use the binary-valued `Phase` stat instead. + +#### Deleted Command Line Flags + +Flags in `vtcombo`: +- `--vtctld_addr` + +Flags in `vtctldclient ApplySchema`: +- `--skip-preflight` + +Flags in `vtctl ApplySchema`: +- `--skip_preflight` + +Flags in `vtgate`: +- `--vtctld_addr` + +Flags in `vttablet`: +- `--vtctld_addr` +- `--use_super_read_only` +- `--disable-replication-manager` +- `--init_populate_metadata` +- `--queryserver-config-pool-prefill-parallelism` +- `--queryserver-config-stream-pool-prefill-parallelism` +- `--queryserver-config-transaction-pool-prefill-parallelism` +- `--queryserver-config-schema-change-signal-interval` +- `--enable-lag-throttler` + +Flags in `vtctld`: +- `--vtctld_show_topology_crud` +- `--durability_policy` + +Flags in `vtorc`: +- `--lock-shard-timeout` +- `--orc_web_dir` + +#### Deleted `v3` planner + +The `Gen4` planner has been the default planner since Vitess 14. The `v3` planner was deprecated in Vitess 15 and has been removed in Vitess 18. + +#### Deleted `k8stopo` + +`k8stopo` was deprecated in Vitess 17, see https://github.com/vitessio/vitess/issues/13298. It has now been removed. + +#### Deleted `vtgr` + +`vtgr` was deprecated in Vitess 17, see https://github.com/vitessio/vitess/issues/13300. It has now been removed. + +#### Deleted `query_analyzer` + +The undocumented `query_analyzer` binary has been removed in Vitess 18, see https://github.com/vitessio/vitess/issues/14054. + +### New stats + +#### VTGate Vindex unknown parameters + +The VTGate stat `VindexUnknownParameters` gauges unknown Vindex parameters found in the latest VSchema pulled from the topology. + +#### VTBackup `Phase` stat + +In v17, the `vtbackup` stat `DurationByPhase` stat was added to measure the time spent by `vtbackup` in each phase. This stat turned out to be awkward to use in production, and has been replaced in v18 by a binary-valued `Phase` stat. + +`Phase` reports a 1 (active) or a 0 (inactive) for each of the following phases: + + * `CatchupReplication` + * `InitialBackup` + * `RestoreLastBackup` + * `TakeNewBackup` + +To calculate how long `vtbackup` has spent in a given phase, sum the 1-valued data points over time and multiply by the data collection or reporting interval. For example, in Prometheus: + +``` +sum_over_time(vtbackup_phase{phase="TakeNewBackup"}) * +``` +#### VTBackup `PhaseStatus` stat + +`PhaseStatus` reports a 1 (active) or a 0 (inactive) for each of the following phases and statuses: + + * `CatchupReplication` phase has statuses `Stalled` and `Stopped`. + * `Stalled` is set to `1` when replication stops advancing. + * `Stopped` is set to `1` when replication stops before `vtbackup` catches up with the primary. + +#### Backup and restore metrics for AWS S3 + +Requests to AWS S3 are instrumented in backup and restore metrics. For example: + +``` +vtbackup_backup_count{component="BackupStorage",implementation="S3",operation="AWS:Request:Send"} 823 +vtbackup_backup_duration_nanoseconds{component="BackupStorage",implementation="S3",operation="AWS:Request:Send"} 1.33632421437e+11 +vtbackup_restore_count{component="BackupStorage",implementation="S3",operation="AWS:Request:Send"} 165 +vtbackup_restore_count{component="BackupStorage",implementation="S3",operation="AWS:Request:Send"} 165 +``` + +#### VTCtld and VTOrc reparenting stats + +New VTCtld and VTOrc stats were added to measure frequency of reparents by keyspace/shard: +- `emergency_reparent_counts` - Number of times `EmergencyReparentShard` has been run. It is further subdivided by the keyspace, shard and the result of the operation. +- `planned_reparent_counts` - Number of times `PlannedReparentShard` has been run. It is further subdivided by the keyspace, shard and the result of the operation. + +Also, the `reparent_shard_operation_timings` stat was added to provide per-operation timings of reparent operations. + +### VTTablet + +#### New ResetSequences rpc + +A new VTTablet RPC `ResetSequences` has been added, which is being used by `MoveTables` and `Migrate` for workflows +where a `sequence` table is being moved (https://github.com/vitessio/vitess/pull/13238). This has an impact on the +Vitess upgrade process from an earlier version if you need to use such a workflow before the entire cluster is upgraded. + +Any MoveTables or Migrate workflow that moves a sequence table should only be run after all vitess components have been +upgraded, and no upgrade should be done while such a workflow is in progress. + +#### New Dry-run/monitoring-only mode for the transaction throttler + +A new CLI flag `--tx-throttler-dry-run` to set the Transaction Throttler to monitoring-only/dry-run mode has been added. +If the transaction throttler is enabled with `--enable-tx-throttler` and the new dry-run flag is also specified, the +tablet will not actually throttle any transactions; however, it will increase the counters for transactions throttled +(`vttablet_transaction_throttler_throttled`). This allows users to deploy the transaction throttler in production and +gain observability on how much throttling would take place, without actually throttling any requests. + +### Docker Builds + +#### Bookworm added and made default + +Bookworm was released on 2023-06-10, and will be the new default base container for Docker builds. +Bullseye images will still be built and available as long as the OS build is current, tagged with the `-bullseye` suffix. + +#### Buster removed + +Buster LTS supports will stop in June 2024, and Vitess 18 will be supported through October 2024. +To prevent supporting a deprecated buster build for several months after June 2024, we are preemptively +removing Vitess support for Buster. + +### Durability Policies + +#### New Durability Policies + +Two new built-in durability policies have been added in Vitess 18: `semi_sync_with_rdonly_ack` and `cross_cell_with_rdonly_ack`. +These policies are similar to `semi_sync` and `cross_cell` respectively, the only difference is that `rdonly` tablets can also send semi-sync ACKs. \ No newline at end of file diff --git a/changelog/18.0/README.md b/changelog/18.0/README.md new file mode 100644 index 00000000000..97676dc7e39 --- /dev/null +++ b/changelog/18.0/README.md @@ -0,0 +1,4 @@ +## v18.0 +* **[18.0.0](18.0.0)** + * [Changelog](18.0.0/changelog.md) + * [Release Notes](18.0.0/release_notes.md) diff --git a/changelog/README.md b/changelog/README.md index 97c338f9942..ffb8d698b28 100644 --- a/changelog/README.md +++ b/changelog/README.md @@ -1,4 +1,5 @@ ## Releases +* [18.0](18.0) * [17.0](17.0) * [16.0](16.0) * [15.0](15.0) diff --git a/config/user.json b/config/user.json index 28fba88b3c9..773f1b3156e 100644 --- a/config/user.json +++ b/config/user.json @@ -40,7 +40,7 @@ "KeySpaces": [ {"Name":"*", "WhiteIPs": []} ], - "Privilege": 1, + "Privilege": 255, "ReadRole": 1 }, "mysql_rs": { diff --git a/dev.env b/dev.env index 7426dde45f2..b90ef7eed40 100644 --- a/dev.env +++ b/dev.env @@ -25,9 +25,6 @@ source ./build.env export VTPORTSTART=6700 -# Add chromedriver to path for Selenium tests. -PATH=$(prepend_path "$PATH" "$VTROOT/dist/chromedriver") - # Node path. PATH=$(prepend_path "$PATH" "$VTROOT/dist/node/bin") export PATH diff --git a/doc/VIT-03-report-security-audit.pdf b/doc/VIT-03-report-security-audit.pdf new file mode 100644 index 0000000000000000000000000000000000000000..500ee693377b223a0f4d92786d88dde7fff2620f GIT binary patch literal 1157423 zcmc$G^Z9pP3&1EB+h_e;r03c_a39?^gL&{?<>)BE8E&z zKY#JRKjQ!2AE~>&G@<8ys$ytqV&r&+Tg}PPasR)_SwA;3Im4}GVeDv5&wGde?ip@b z6ALqQM|$DA;P(f%FKq4AUOqPh4?i?zB4RCN_@r;JHT@FC4*{xF5ZE?r8GR#K_hdyhPE&#tgiL=gwU|J|PJSdIv{)ljqiF zT;rC;{5{lXh!BLQs6m$utLU?Ze~MOQK7ErzZjkxdQ8`oJ)F7!V1yamkl1hsEG)HK2 zd`_KuD9g)t`qJq8y5cfT9j(TrPl{Jka}=yK=tttB8LQiE?{jFp{bXV~UMNO4usBIa zk9Fv9i+5PjP5%$^|MBNxK#=tbWOYVEc&0!a1$lz&BC{*CsbkrkIc?>}F$ChG=!Gy9 zcPG1ruk4@6MmFBQWwCbnJkrXVbM0C;iUe=GJ*iSK!zlJ%)kWsD2o*u4Zn3T8#q4eh z1VZXV88oY{k{4(H#q+c4O~px0@D!(T6Z~%o#2=1|#m}C~$sH!Q7~_P?EeKv3ZrdEO z9O6e1h+03Y0aFYLm#x+EL^tQe{a;tTaD@E#bJtT@;75OG$Gu$CFe|{sFF9NyuW=Ph zr_~h@U!{7Q;9b6P5o~(XMn_|3!&)u4Ptg^dnWCAM@acPMTPCWp#8Rr~S%PPrzDVbt1RB9Dbjay1QKT5?-$Vq%Lo-8aoG3 z^0aOY=dNJZt9TUrc;lSY^=dBVBg_*m^CZbnRBZN%IgMVK_9YuPfQ7v-j7!Y_=DgOh z_~fMKQ}F(TBJJ}z4xu{h=MUWbOtxxenoDR8`m|cx>!eUWPl8;Xw$#E46LokR=D*9s zM(LVv8Zjl9^wbIoDvMM%c0-}7)8C~b_g6JeHlJZfA`n+Ulhz@Bo*MTx)2P}9it*?e z*9wSinw#BJgrsJ-cRvH6<_1idA}LTQU40PkwuJV2!l&gEOw zrXr@(37wKU2%B?n(&pV0uTN2Nzn5!Gmnff#C8Fq21~%Ts_WfR|_{zSLx?u6h2% zj3~5an*y;sZ|th-WjKCg?<_dc!^9f>M{P#xb$idrM>6=F^h9Sq0Q>IA;}W@Jt4pI~ zz~98lXs@KcLO}+66Cb1_NQrO71if^hcXw5(!<$4P?!1U;7~JbM$sD=k58DVSz*fDq;A1GUT4|w#j_FZNTwmVb2$%IMlo@iIH`p2i zf8ukyA9dyjiO1XQOfW*$uNwoQvIumI`I+-0%d_Hj!u!^EYpI>+CdETH@;NtQIsH0_ zGku+G-`QWa{X>xoWMCD9kLvtLbhg_E393p&$fsJ}Q1@J5kw|t^@#+_vQ@nxu5$7K> zU{>ScvP)HLVWtnC$gkV94_)%(Wuxh*1lTBu^FN7=v(Mc>)NN}`;w6PitFwRi5B0PYNg80vP?k7C z|5IgqckRsv#FAAwuFcb9REpM9r{Av)pV|gu6EzFy_SzJ&>UDgsZqLbi=s$d<(L%mGPOZg0KIq)Oqn<47ZT5n-BuAXL;!!8= zLn`+}OtU6lKA+sf#}VLwslAZH{B^)$RrsU#d_`34(+cl+9#@F25jRs9AN1j5gi2@v zfRI~tXVEQUo^xSayJ7dPz>&KFGV&yj;=*NSc@@|^KJ<&$cPy1|98-K9*Pf^(Vhf>u zg%FMLH&lkQsfCY&zlhJBPm4>YW~LbtdH(6LrV}$-2~v3-BbY_{&Xk{eXN{g4g29SM z#)@~VEY#~wjp?JtmwaKsSHW6}m0dRI^s>k97eIB`@-y_F*jh@9wLgJ;URlC~)&S>v z9WAJ9n8msF{axg~nFpt{R8-@eG98T+4Ot(s0^^K^czrYMpK6(vhb@>yIpbn=MJy=_ zJ(?i0b4CHIknrH_c~0DBHy?Z+`nSWSOdjz|M<2e9xVZ(rs;V}eT@0U#rrIliiy^;GZYB&ux}%3zT?ozb#sex zx)uijL@cnI8=qF1{#0;LByOy%&Fx3sOPtO`*|q|T9>ysI;{Iac`3#y7C5de}17QZ^ z+p1PdzZW1$C&iJ%7X}W}L2&5O|8SdHdgEeDt1W4A+t#oX3jI?5g|#9$vxY$&KrC3K zeNHfAQ_g#SA*xoYLMY{Y79^=1zp35V`{1&0aN*gD2>SNBwsFKpTeD*%n#o(RDWcFE zHA@r?+x*h*=ioKozAM$lXor3g5kBtofo=T!)cZbgn$k>*;K*5VH9;9T?N4Q;+r}+Q4Dy3d1pca`0b?ysSO!Xp!>T?)=#yrxlM8+&IIxK>i5)va921!+;U zpN0}BozTtpv=1Q6ZgLfXLVGv=FWD^14Yn^3uoq%d0V^K4c{Z~MV<>V1Q4JZUz~A2rTz@i!309eJHIoA1xo)3mC5qTS!f^F9~M@faDQu&KFI z!deyH0(Qqhm8DZ}d#a&6Sj#1a@VVQ$8d}ahD!93(o9;q3*Q2nD;@vHJx`CZzsruOh zft^SZy2L8R^{IBKxwp#)kNbVN6ECEET^oXIWwwx-t)4| z8HDjphQbPAsBsUHl5kK|ix?bb52Tb1p#_Qjy2DGY{ijnX*;usjYbOx-AK0#V1q$Tn zd?LZC__=r$yXP>!dgIgdy^u2%SHYS*52@TOfb=XhDNXh^*;D4=YSH6ZAr#46VcRTh zAXF6(odhZTC=?`Mn_%?;qJB#6W89E%&&YF*^GsAsVp|wvr5X6RK!l{=TB#O-g8CFv z_m|pp-I9(`-VUy0`S2n>6=GpfZXwia#!htJe{-;u->K?0Oc#HAM;n)v@l{o!bZ5PI z;bszK?6k#xxiEn%(V^k<0k+=pdK}m3*^oj}%;1K{$bdcF2qbm9V_oFqia3CQko!H3 z;vLA$iYLJ9=fv;9W%pNz7g)d}VkoA$B~|7K!spN6-eTNu7iF7Dhi1P4P%e23ik|Kv z_mBcB_YbSXJ$0>sfoMfY#g`WAThtW^mU&sXk+#0h(n7UQU+#ThGn-`z%kV(OQ|#CY zOgfcJaZPpA0iQ+X&tv0x(%9BK4wxPNq-%V=lqXlHYrbOAt$Rz5q=TY$Zq%lp^jG-& z7mo!KoEPd7qaoE$i5RD?>9px&=Xu`fhq}Wr>NTlHNO0A?8>mJ^*_XCVJG8j8Fs}Dh z3yMDlN%1~nj$domwRv`bOUd@exE(EQ)JA=Mlfm4#@r*AJ%WtJXX^j2CC7(l6Kd#A% zjN3it1Kf}Asb-EMLR3L7RE9eo zM3%w=wt|qFUKnzHmG;*&c`z%YRs|r^rCLxbR5H_Y@`|dX_zXg6L1B9|n=eofP__Un z$w>Cxca|@;g5e)C0!tqz1v!Px`2ol_cUySD&|gbP(d&iM%3maD8>H6rxg;`$J$HX*e<=Q5|lhd$OF4FKz4^(cx z_bv^2gJ`6mFEf?Dy$hj4(yTcP#I!*PhS%^NrnX1w#p1_j{eSAfmc7_xs3c~t;Mrmm zc3=|Ksd10qZDw+q|f?bU@A2r}v%nU{XDM&!#P-`sTK=zWn!oIC#T~ zv^V!~6r`u1l<#*p4=Dg?NN@jA0g6@}L;J-oC{bO>EA19`rdbc>9nWO9^7uqATYYyX z(VZen3ft;r@4ks>L9!_hCMMD!z*5o&k?#f4?(Vc1_*nvW#%yM**0H9|8Y67%;!w5po$fImkYQ%AetZR zaMTgAd8I@9mFDLX6pxo+g7S2G-c>&fp~c(AW5t&t<24+-E5@(q*n&0RdfEt z^?@@G)<^pQXdm+Rc>eTG{rJ$M!Y1bG0*E%U%rq=_`*tTFNVh&O_VY@sUcd-5 zQ7Ui(G+}JJLi~z9o#X6G0u>%{N2mIHXfbnPM;!xV9sM>Pk~D}PQ;&mQG>O3!75qt@ z1!nlT3dyE^Zba=tK5P+()V3s)$;9Bmi>bp5 zapyg+vvaJbh46V+CC{UI(Vh)qE`nxn&G%0uAK2Gv$WvADGW)O`T*_oGZ?RriaA{|> zCocg2 zSZ<79Xk&mh>f4|7ega7baTqvh8<9}^G)XqH+Gsj z08RzVz0B``Yu?CcRi#Xw-<5}LR4b7FdzX;Ihph0jH1m>|)zaNX*+0@zjwg(0M%_Nh zCBsn7&`wYrK*u&NNipOql=2P@K?NdsR4vuTlr!8sH-io)C3ITZ*>6X&Dupz)FyccHr~fLYc>3|8EhJAj^A?lz+(FSeIz zakCE5(;gP&5#Nf_wltzA0iY&|rb!fYxFFYFr1!DG0YBKQVJo5?d#m72H`L&At_{~9 zI6{6{;;I&{i8X2SwF(yGB^%pNcN;W+BQt8INN1Zf0q)Mg!}_-Cl}9f%)s&peVX}7 zFKwj7pG!Wnq|~c*y5{600K#p@<#LYuzsC&`;8HJ@@;LsHEyVgG?&!t6HJhv4<9L)f zQlvnKy-g*e9i~81OPyjBH6C1jqZzLgg@6q#IEWTpe+H58DK6D6^1CmFYNUILD&3D? z8R@PQh4|J<0rLqO?@R`?7C;4qPw~ZUU|S$q*@@cA9(flmVRoT|ZopFLpzBA(HDO#c zTq>J79!cdVL3$GlQGCwp6i?5 zTK1)CJ2NOcBI-tcnr;3yojK}*nuTarb}-#o*$Ekzg6LW-km0PDtwc`Xl#)DtTa?^B zAxRoUUH@G4sJg?190|t+XB$q5E=+%4QwG2rHSWRvjOxo^kZXsetWHFSd%0o%Zxb9$ zLN12$2Uq7=JjolGPKQ_~l#|l_O61l&K8WaNcVc7)#&(iPB&7R*9{K0qm|6wWPu$C) zs)m;J5{_cI;G+oo-HqkeF0n0HSD5J1A>}-oY?nOhpmf#)wE{8c@GE+kH62lF-n+~? zc^>+Mx9Z#ARIpYFKPXjU^Mj_V0*dRRYPDJ;uR5Uy6+p=z9G^01qGmt9keA+xB77oo z%!w^O7<>i;w{z9kd{L{O6O9s@0@Ys~bGvLJVKer2QpVfj>S#vJd6yC_ML~*|$p>n1 zAFOdloj8LIU6(x3Q+-pZYC_U>T%YH+c_|WU91hpZ-p&%wJm`{7%il-?OO^K|NiM^# zQtk6Hv(yJUiSSKNXJxmq`NHy3eqfoRc9DpiHFoM(GjTiLGCPLUz7414-_=BGDl5_R zA!?+(zt9webxEzza{T}m^8th*&UI+9{9U^&^&(Z*|1}^S9a#m~JPyy7cDaUv3a}}= z@zkH)uYV@2H12lYhv>*G@|kWJ{7-^?<_dgeCKJ(N@g#oTrIYdLda?{!|I=yPmDE<372-EC>7m5NCm?{&U=TRW%N`Ig;v%wTmA+{ zC2us|?s1!9T(d#>;dT2q=Cr2WuXKEd?ecKXvyw!NdFwa44Sma;35Y?8~ z9)0_u*xa#?G2F6_?VRGZyC}txGT!U<+jvbA^;dYJvBE3gNo77xKh>=}8355kgUFk! z6m@hvU&-AtY#`a)$&oE8qCf6Q!v!NLuJmTHgN*yJam~=@>0Hgb&=yJG#}qJwJukOh zSiiZY{AXLPbfg#P*`DvjbtP(8OiOpu$2j zUcJizP}2Rhc7HdcZw!H+b4j;cR+VyI71mSp3{+n>=Y2P8leGB)1-BJF-Fzj9W+S>B z3~Latf6wt|E+W}8J#?HX>>EXp3i>kbvsWR@?G>jV%PxO_n znl_PK2^{$Wwe)!=GNHWDPu(l)RTp%QX(6IF$cndak1zouZ)?23jVw>EsNDGAbkepD z0{ZzSjs_FOX?CH(57MYRl3&a3+&h3-`Y6(}^4)&^M^4eT*t90UWEGLhX~prB;M zRFoU9SZQ%~4IV#*_|lVcUe4Ra{w+EJ6q`^BRz*uLS+OXU{ecgC+6}Ag4x>5SA|EeR zwr~mCR#Ep1Yc)sW85-9XmL7x)$0QMOyY{ecPW6awC3x`m`7*A(b6N#C{fuMxiUP!d zH~?**KRA((4OSlvE*$k629(fW2+z0gpDMo?@y^+l;o$8HguO6Hy;h znJ&hX-r%VYGwE$}WH53)jeNdzWLj_DK**q~o=$1B?l^bY+&I!;U#OO=-ZtLkfArJ$ zEii4wl~{wqMG5U4{X^3}#t*#OABxEQR1(5FR}7>Dy6hx3Q3WmSwVV~ZTo`u18jLsi zuM$^&BwaZ!Dg75R;*hy=RYK1-#56KtUj#iB4Tdy$g<o?ZoQ5Tl;jh4Czxb%QgQH3k8I6|LYlP$N}&*ZTmn3Q0ZAh~{-m4%me+f<=pVdyW z$Q@_KOl7>N6>qkq9vzVgWsGVUbgBssX!B~p$C&8Rw%RFR{y7(cRC#|D)RU%I=c$l) z?uX`FRm~DUPQTlo?;halL>K1yRA5Y-*F`cNFoDs|l54#>ZzQh=?T6-5KWWShfhsKm zm?J(qjE2>oPi0X^EKc6!Q?r|iK5z3B~%!EsUq1xVynj1jG z!iNGfvU$jeo>P$MQ78G2Ro_?`a?fAr&+6VD0BUw=Y^}l`{p?$Y5j7F)PruXXxqOL!!Bdq)#vHwJptT5`KD`G5e6Z(#mgq=~A%36`|k;-u(76jG$ zl)j`vRX&+}kml@d=e_^x-oeH2)aAbTZs-*KFx;9B7H>dZHv((gOz)b@eLm>iKJ^x^ z{UWI#Mpa9gY;UA_SfAU1pzN4o9iGLIC(1wt9GrHE2CCe`hZ>;nxtW#UtEpvXP=r8} z!2N7ra7C=;q6rvx)%Sd=1g_X~3Asom%Dlw~Ayn)RNUp>gj!6nolLhu+IU}9+$io?{ z6Ijh|NXA5m;}v5~jnDol74Zf6o-cpIL_PVX(Ri8sfKvVt1hs@LbFg_bwMCTfWCoPJ|b;`fm{wNH3=ofK(kzH*#v-f`BuS*^TE3 zG{fDS`=$&sFw4Bs#ti|$PUf@juk9xhQ3q`_qUNiM?rY}(rpfK$=_R{*dk6*E){irM z*V(NldD8c}@Tp#3Ms!Qcqq!0&9q16@`HaQGp@-%FRuFi$dd1Q9P1oFqmCDUQMSb}O zf#^wTuiJD-j3qTB=DJKAum!qsY~Il^C{?wV4d^yP!Q^8XkPi`FoyfF9D*G@g!Nbgw zwi3gMYEWwTt7z->cH-Cu-MB6fk*jj1bAw;W9muK4c9tV_p#wCVv1foQbdN$v{~Twd zEQSXMm`_h`LF-{gJl01uX1iEtT&4R&G&f)kpb`Tp-OAhJJ9IFgd-j&UZnV@SX}0xo zYGivC*iA(@FA}49x1=xTu&2LRE`i}+iQ;`k2#Srngw?lLw(EpY3sEKLTifRgss5XGBOSA4@tGY|ar_~kpOO5?p)+bu$f|kbWaI|_Nd%MTa z!eRE(JF{hQSur3r67m9eycGBx<^7B9VET)AHs~H_5ySU@uo4Rzc`!4g#7_T?E7fYP z^8R^ryW114e{nK@23}(<7VKg@xyHeKUrG1tMEL7kBL4KDAG%mxieeV!w?sYy4leGh z*I==*gJ)d3FF|m##%GT)xot{cfeAufXxjX4LM6v)>^&!W?#Q1Ph^Ul?G?dGHs`bAq zrH^^vl%V-X_rxAXZ!+3}3?V0iae*r}9f^=*spAzg<8R^ymw>zJHEIo{T|=umkPpQ@ zkjzUC|Cpq`6mYo&)F|+ys%X`_ZBwb})B^>kwfEXBQ?3b05QGily^PDt^Lq`8ko(Rl z>=di7N#|a)xU*|yHJA9R%KM#Tol<*9$q)1u;)ryldBDd0yiqD_am?}gpYZ?{#IfZ5 z7?Ip|O4}*vb-6zEWMZc7l_APPZ5S}k7WOCmJot4WAxVBV>OmL0T)IcyK!WFdKg=xx za;^4K#m~R0^uUX*?R?O&BF9b~@6n4+j%pjBnwAHN_(!o?sma?cfUWwm@9hmJBsv1@ z)1Ii%^CuG7+qk`0XcNKwDIjvS0RIy2SZ9N&Fn$H&RZX{c z=(xVwf-{SM-si7pi73YUG)ZGQPZyh*`g$MO!WYFPv~+^`GziLK65v7+^Z1XJ*`7?e zQI^nq0+TVHJXb3<5vy(^Ctz3*`OiRym+Y2djem5=AD;Him4P0)b}Qc7IBl`SaqWbG z7&bd<&Au-Cq{1Am{!6(iUh6Rkopxct$K|`NW2OE=40qn2*~r1m$>5%(zK8dLF#KqW zf0fpQk6YiKT8rQU&5?KTTd}7i*yw=KlkDqo3QUes#n#iP8A8wKcOhAYSPhI`lo3JQhsfb>pdNoe~!Mh?(Zl7(r`{uHniq^hoY6XyQ@{M9|(gqy^^2s-# zet_!+G2s5gzEX*(iPJ^5KU%xY=cjIeIzI&(oAcZJUZR`c4B&s=0o#us}qA41gDw2SmK4Ts$= z-;PwFAwoj-qaoFK#w$H_c-l&R5bN|c8{o+Z*ke}B09q0c8R3M3s?#9V;FWevi;3i+|F3T z>&pp(KBU$(>ua6Jp_VC-p&X@myxlT_70c+Pl8;JVJ}jN7k6syNEf@UxTgbQ`TEL z|5_~(QLBIRV!I=df2%BlTRm4Az4LFmAd9tCTVbT*QGlnx`u3I$=(4>@l^S9Y?kV|y zs8<)JAeIA30Fg`u`A`1``A)Vsuf*zRgUFc+x%4vgoHzztSRU4Ia>?uj)@&Tb#Eb5)HaG^JYU8h z4EJ-m^TiCF{K+pAy@NOwjj?_b|0t19Ins6Qf>;91+xu7SyThpv;>+T&%~B@cw{D^N z{AdvhBJ>AH2;NozK*va)s&E~XQp8uOV&(;Rr?7*Vo=eytQ-YkhhGOmd>i?Vj`FPV5 z?{Zs_Prfw!^U2IZer|IEnrmtZ$^d=o`&q@Vl#@pghH{lHt8U2#AE)Z{t4;*>JUz!f zvXM7Le#jtJ!@={tIT;d>Ke@fKXpQz;|JYjw-(|eF!rH*ig3@j-Sgi7+iRXHwlStXY zUg=2z)YRxi$G;8rk=BO|6@s9GP4ka24C=rgc|e8j4-4u8Tj}s$$R0CsxX{{C^|eSH zj&+}b1Yh4uD;XmEGPW*QKft7$bJp#s4n>yYDg;M!gPx=u@UmC2EV9!biv z(>ZmkLXD8ixF))7t}>@YQMb5Z-pV)Q))CoZ!!ZGf$W=C_o=tJ_wDc};fr1-#i5zbz zCedx$Q`!!+acPacMXVM7lYVn22`$KyOVhuvI?mcI7H>o=D`=H!H@4fAXHZXHE-@8&-a(}%x-dlQ|;f4~GT{uCzWncSwW8g#JVQX;bh1YpGon!+739M1`q((Nl zo6y3u`S%QN#l!)=RCBOb-oZArdkiCq+~8NWULvEcy_MiTuop$9h!etft1=#FM@ z(q%!NrOUBn4lMnv8n4Cfpu^VY(d-H~`(}-n5R^9G>)s66PVdlcKwp)h63Md-UcB3t zF&n4$PxB}ZO>ntRRAze@@^CETmcdsUC(?4`xSHTLsaZHYToBM$pT<-PhqXw7)0rEY zR2E+$GAEL`TtDSlIy;)-fKd2^ELzKM2r=fT{y23XlHB!F7bq_1-f~pFj=gg9hjxaO z?@9$DnqAtnFR-W+de6_T{gE@Uf!0Cq%Rl-2>bsiyV>uQdI+P&EC6Idl+jCtBQ$g8p z!cCRZJy`T%edMBl?=ApM`kHC!XMdpwk)~PCOP!L^UnXrN!kPl&Sy53MkNEj1b)l0- z$VA7i8UnZHcs>b5ba-ZW_nXr0FJV`)5L&${Cs8slstd0n`Bcqvy-oB?;bCRFcnq`{ zPS=Sxcn|zv`E6Skzeun~Xx~sr*o=gZ;I^3piUZz?c2)ay?ym9HUr{S~8S#=2G$pMu zo`r`o^7^-y(|i+{UbZlLi_%HYS6C#uy3taD>GVQJ1j+S5ew|Gztc5>3gY^1549%kb zLs1>qccF_WXs0Sep#iSeLybeK^V>TrOA$awzIWqsa+7Utt?&qT$$2kcdOy-G-5UuX z?)kKor9zHex+MsF+;}s~TYrLo?r5Q*(7^><1id+bd^@%b%$s)O)?GHbJ_q%w)mq`A znpiZ{$iiZ&3uOv1*=+!x10GuN*%i z2D`|xD`Pz|^SOD3!h9LCT9sgMDgbon%N(%S+r-K)4DBZ;UNjXcz@XIbaioLhRrU7dpH(1Y!~kNoYn*X ztnLHLjsBBtXW+Tnv2WB|y_E5Z8m_0nW(-Nj0upyopZSck{~agSMGb}jQgFQU%UZ6A z6|!y*`q^<>rlc_CnXMZs=Cd?hOD1MV?Du0SlmrprWmXE6)Ez4-H~-1R^wYaSbb~uy zA45-`RNAgsTV|gbS&wR!(dg+9<8c4RfrQ~A+w)_FL1Ytl7Qgoi%qX&Ne2)nu5I$0T zraXk4x9>~VPR1nRgfr;dqgODgf!otOas4yGDjL&n&HH`zFUIdnTfKSJ^rb|^8&XJyJps&yffp8i!HlaBffU8!ll6)xnK=mZrzUoZB-`T zzJ@}FY(VN!9-o{8a{y*aMlUWcC{cwxufa$HjP4~h1kGHUmJUYuQnKbE`j}=qb0wqc z$P)9jM993`un$`pgzgYg&5u5eI{HTr%K(aviJbNM(OKjgzn~}r}od;Ecp&i#BcHS*R zbiL;g3NK!A-&X3n5QRVX$FWx@)Rmo<1z*61OGdW2TP}jDR-=n~p>X`9tvH!F6-KY4 zjK4y3+81=ry9}GId{-X(6v=i7Lw;Xq?(EUYYK=XjJ>m(3ukX`u8E_98!rI?ofpD)T zi2^2MwmPhE{pP3HHs~rBEX9?YB57iw4`4FI&{9~U9z3?9^YrlYx6i5jcxdxmtXf^= zT2zOeO#fcpH56K))B~wHFrK7k83=vPsVz@?2;2M=fACnP(es9Cts-Rj7%}$rP(AYF$ZVN*ayTP}s&Yy7ADGIIrwD4@jz~irgt@PevsY|i zKE7VM7Z7hHzcnTekj)PS@I3U$f*KMOofOf>cLOP zL+3ucktf=RmEWoyN~14_H3na?65R8qx0+d`WGA=(W*{kpOm^YNy6b}xyfC(_b{Q0J zOE{;ZI=X|39Zqt&!-^ocwJOyvG%>4T)%_^T&0%7r50C%1`G?SuyZ`KT)UW9kT^2u~ z>HHWVW}x=ee}`JTT9S|BOq6;|&T3PWBldgbl{b;rSHB)BE=WsS;#!j!2wK6J&d zes~ir;`ezN)L!&S8b|N=m`py((mt#Lm*hvl?bD6K}QFr{J1X21^}SxqdDr-5j>)0nplHyCh|UT zs@q!^$?k;6*il6>&XmekrGPUWkhxotX@7SYyi?o?JZxq@sI~nY@d*ZEGSUtM&^PwoR`?=$198WzA`Om6?Bx5JCDUpI5g+kbpe?E zI_FcDG962OCjHKR+^S!j3mR zm}Maz9y$1ErD2&B0ouoV?Q<nsxtW4}9b zk$uEq#1_}`cwy00#Au-b8EAr+9O^cyoF8=Tv7evpbDvHJqDpg2T2$L2&h+1bEH?zM zf?FRu)o5AxZpAw{j+w#$IT@X%u{VDqILaFDJR9pY=aOZPW zu`bst5vqR&t(nL3n9GonD_%SX?#Q*L?3AuLwE{mz%yBb zigxjTvw2%QS;gn=pDz*Sjf;a+5bbopOViSU7Kuz=S`&vCDiIH(ZOk$kJ~X^e@^}#A zY7Mxdr}QkfR*GkJnkzKd^Zp4Kdabp!h`DgtROP@W<`2PBg=@(@@?C`#1j#5pv=#;Sw*C9_AkBX{P*Ri&$r$}fg$XN%Fr{_i}TQ%Pu!Zu-o!RYN+(x# zWw_e-FA<8Y45YSY#}hdf#GQeNaj~nnL0JGwZ-#PDQkCY3bzCl(+HIN))SrG&f~wqp z2+rSBsJ~)Wa+o_wPcZeyN<5kEJ0hpbbff7F5@bA-?-q>ff8P2S^UHL{*lWBKR0EOi zE7d*fllde1T9;v(8K<^A{`m(}7If*yk76rg)mJ^1N?R2wB=(lr2J~i5T49|S!P@So zeW;uGX~r0C*eYp`tvQ{M8nB&*WWqsTP@1PGo}A`d_=3>nk#|)Sz8p)-R-?w6hqic^ zbi2C;NYevAzWZ}Y3MHm6oR=t(k~Jfs<~cHD7h3)GJqfic_WW92RGWaC+CSo7r|y!8 ziJuNme}fRQjbj9jkS&nCsw<5NrB(sxoM^sL5Yl^s;cNj<&|5li;uP-KqwMAw>c(YA zBakQ+DE#8JXHBj``#mnPY@0bFL(HNxXmIwq|!4F ze~-(GIq_DkeY%~RIgo0;D+&~EIxZ!vJx!%9!yeRx!QBFIOsEXK<4Y|cUm6M5AkPQe zMlB+AoSNI5G-Xen+#-sg7xmwBkh_f*VYCB-lKVa&7TuyMXD`Q1Ajt_)i4E9^aS7qARATEBZUcgV?*W8*L^4;hVQVZC71BS% z@q9U}>phhW4{}u2glDW?U99uavfN|ag>KFi&Zrq6&Be5o>|E`dBY%3}QS{gqj$D1M zjaBAd6ykFQt^P1{>KS%`>#=Cw-!kW6^W1O1?3#;4gtL_sz<3|TER-CKjYkFF7@_v_ z>T7o;pA2r$XomuUt><4e5NBJ@yBuvxxa;;QW|A0d>b$IdeUyZDuU9uIPT$!8Ox-rgU;Cbo8H!$uM&(jXIyb_IVj$LXz#PPTC zC_Kt#-HiH)mkr*Oo^owGn#~|b#+y`q3%BX8jFkWad^Y`-1UZ74(Pn=fjYE1b2k*b0 zwcaTgUz7Hi&^|~w_Qk`x!nKliLf9aDGm>5Ljlg9=BBsCfv^02|kaHM6>rAlqZFtIH zmyYW)#FDz(Woq2?E3nO53rqBR&$LW4T+;mW-h_k-1+=@>C|1n~D`fI97-b3T{_~oO znxnWb*P5_WrX%0Q0LMH)wmDcb3&*cJjwDKv^n}0m6y9|tLXvcB?inS2=Q&|?r>_BE zyAT=(s)WQ;Qg1#pw|V4HR{GAu-Q$0Y^(Lz|5ovxq`)uZZ~dYmOgJ z{n3)c`)H?|gRg4?Z9X^_zQn6j2+G+l!TSdH&%VnA*Wd4HHPMwuW>=@ep9|H7BoH;1 zeacaDiFx1-{^siBR#!a1<55NU=}Cp>Z>Jk?;lV6x{iJSzVWMv7vzSA)nv}&VC(rT# z0fZ>^vd0bga4kmM5I6pL%^I-N`j;tpbEdw2S|UK3aijWGTM_GCPSUi&@ZhKRxRR{wns4VvAuR$67iiFQ`pDN8_l zEy0d+xU$G$dGrg?zS4Gz!iUreK1%$~X%uYY`B6aO&{uZp9oD_qKV-6}IUh-aBz5tL zo6l8uA6o{L=0%eytb_Oym*&jv{%Ve82;4{Hm0f8L=iBxy~qY=xYUP5x0Ht-DOHQQT3>VPrEgZOI*;?KdfuZXBSJX1 z#i}v87xd37>ex3YtWozltdB5)Nt3#=NI_7z@wxmo*X@|r_V-lp3nA=q=|)By)85K$ z-Rc+_(3>`v=9TS*q+(VeQQP+OT{ERqnTk$onL9DJWA9=iWC3GNertt`cThvSoX~!p zT^lC79p7W#jc4_qkD!M?;++ilcsN66LP6F+_bV1kUw`NE{4Q_>f%s~&FN?i|XcN9q zBIr+8#N^I#GjF~tH*-7Y28G`w8fPy?_~&o9dL8q}kh{a6|j0M|SrO7?(BJ&%vDyYJNqsR<|zRNKy% za!bThuRrjhF^Ep>Pw~t9@Y;Wl1ZgV(4Ngzo?ZZENjX@pbUM@mTb9*D(3}sMRM+~mA z!j=DYEsFycXPgvdwVe;FeTVnKX22(?dVQrvAuy*NYG`?~?6L8L2nANx%V<##Ypz{+hPXGot=o>yx%-0IWiFM7 zF=2NY6~5K0Wi8fZv4j0eHYQMpq$MKf7*EMseE)-I!u>m-y}iK}eFcJDMbX-O5&^fIKO+ z!7KigpFLt-eoVTRugKs^%upkABL2Xim5@cR| zZx=+fd(eNE=Psz1=e@+Cf7^RSad)jSU7Mr}nLwV~g3^mvahrH*g@V)rDHajAt~*Ie zpMiE0uQD2(+pUUPhUQuqTv*aK1=Xtb<1H{bRNRncUe@oYW*JLQk#>8_Rp8P$22ZBx zMk`QG$Lr4JQEU;r6h$2*1R01m#ED_jDA!b)?r`_-LW}YQ>xxV43hT3rxYPscjfk8; z8FsoC7|q#5!8g@^*d)$0?|I*rX{8`f?6ym;>2%&FVB{*!Fz<1b@i`e)PIrMl`%*n+M`J z#?QM~*BkFGMJ>YT+TNvvRMbr>hPmk)jJF3qrnL*Gm2gb3BM@E_ZYjU^%*#DcLo?jG z@h2KS^W2KGY8b8hF&9QKGr3BoMufDPOz?lZsBN#ZdF_8fk0`s2R@x+%Oyg7sA@M~u z8lHZrvK3DkW=7k!DWZ0RHgT}nD9LpJy(ED#8Z9ukoLhj?MQUShWTS?7qJ!bGmis!| z3K{J+{x~DBWG{gO8Axa_ah=UnqhSy4Sf3c89)3&Z)TG-m!$~Kuv%CL)YeBp`FZn`l z4J-SxA`#&_kUD5q_ZY(R&Ls+(-dOe`kS!7M$J_m?@RD~t-mjG_T|NmkjKu#CT@4=3 z3NUHtCj{&Y)Ry`=a^)I+yyB_bSsx>>bDI!Vkz8OAL+Nd|ZkBxu`dHzxvcz10RY~mW z$;~fXxNO_+1|pf?SkW z+&NxeE1%qs4HD%qh_soDACkx-u$W%G0zRJ8`H$dxH^b7+Lr$|GM=fmeg|d(3m;QR|4^>!x$BMNANvkt) z?+DHLwRy749NZXr^NqnAGH2~E-Ap9r2TuCu24Uh7z*y&w4hPHiGrbr2huKxF@Cp^}t zd(R5)Q;Uk>AZ-m@q_#3w%HJ>)$VHqb+;r5Bbynwv&m1|8& zS17G(FI6U|))U5I%9@oHe;~~HJ+peR?sxO_@^I^e4D!;RQ_id!+l9HAiM# z`z?R7jAr!XZirFlm-#_I5r?}Qy!AX0?5SpG4DO$7e)05j22{1s5^Her)R1}4(#@OK z7Ywp4T$CLcP_k5;rg^!GiwnY?T}u|ZIi9pq1?Vx3Eq#e;jbez%-NT)z?~1U;vsHBR~h^0p>&MWX;QK4NodFI z+cby}{bGCN4YbWLX*Ubs9rU03;HZo-_7nzBLxj^TCmibf`lH9KWMmjnnc_^DXx4JE zNs`>ug&|B=lkkS=WtYa3Nfh##n24kJIIbp9$r-yevegKckrx&OMFM5 zfiw|UZlXLhFz16`-jvjHd`M*`j^TeUHBlv-aXj>t2Mvm6RVOE#o05JtK}_%E99G-W zrbf}gFYCJ~k)$b>gERd@+F)*?YevLBPlCt%QJ;|LhCT+~t@Ez-{6Wer-Ej&BGiGm( zW|LY^m>pJfLl!jh$Ie{aGKy!v#>3HN1EbZF&7oe$_3x@B0&&UKj zGc`J~q;g*u$C^x6nr`|sz`U1-PdFCd98*NFz1_Q2oOpua-O{Q})oSF%qx8;O>uAL4 zTcwdZU-A{>^!~oFXWyJc8B?(^G%4+(^vIDy1%zgiPvf3POyxX-xD{Flv=C-+q31)e zGBa@DzCV2-rcvF^qsT&{JA2!us~4ybuqb@<$io6r$op85UL!aUBS+i5ohr()9SaE= z9rlhLFFDq<=)0NAk6o`!Fvfp&@;$vG$+0~jN|m32P>)&+Z4c@;?B;wGOkl7(2VB%} znZtL!^h&3QZg|uVhNf|cn$7$|bz|Q*$Bhj5kqfGj0ig(U{u$a%8-i9+dd^zMBQyt@NvuA-`P^*<#pecSbsmwVP{iYl8F9Oh~VX$k23o&C1Mi1- zw~XvAM2$Qc&N_t8h5O6*5wSU=sTs?Vn+Hb8dA2+WoUxS~-mclV3NiP;Tg$pa+kSj$ zo9Hoyw(sP-+iI&*b(LN>k4CIsPD=0j>4&bEuU#_b6a$9FAuDAaQ`OVU;{J-2;wyAN z(tw#!PnunjfM;mfN3hZMo#HyJ+Rg=fQ1Rc$aMFZ1#2!I}Qmvv+OB$CibRNYhvurki zuZh#{*$#je%H7I+zHqG*NyHP7x8!e8AZJ^$mLRsv+mwnK7~m9a49^6*lc$|>YxspJ zvnv{%xQo+&eCYRt@@qco`&`v2t8b8`Q_c18Sy8 z8u5|H{4=r^nWI;=DjmpYC^kaNC^vbHvgV(@WnEbWz0_SC{{-}_1co+XNj~#zlG$Xr z!;3X+%@k7V`yohX|0vEf(kK5aZfLh#P|2RKu|E9)bm?`W|8UBcmZA3#aI&Ym7a(t9 z!s!%`_3FaJB^ah-|DCK0xuMV++i>9~-A3qjP4DfZ=@qUKqqD&D{(azH(@gnPizPj1 zcYEp98n!y{a@2gyuAOnaa&5}x$2w%x*R`z|GIt4DxK+g8l(&WTPSH@PYP`o=^GZpZ zO*~mPO~t)W@J}=0uD|hc`0XIp?UFhj8XZu5V^ApXNpwq6uQCD&_ustO%&eZOH>ANr z);RPvDX%F6PV`rudS(OHELzhRV?mMXh5E1$?G~9QwxH3!yU`;b$o!^L*213*gS8Vi z$nc(}h~4BbFy7VaCE3v!jSPk>oIW=YV=(lJ&gu@@cBxz-KXv7brfI8DR9+B?yW@m# z;7js~(e?GDv~+Ol37Teoy#X@3s?woou)3X`UMV;zIhpSvNn9qOTd zUL-#Mar9R0m0vlAH|5?_cHU1}By=i)xR7MYa{5dLfX)_Kn(kiYlf>2OrEWo~w;JoN z^5Nu=S#*cHdg{d8%i(S6muR7 z#SQt5(4FWQj^VuSail(Cb$+>;b~f`>nn^%~j(Ez%w^9r(M!F_%=_B^+G?L?FiIP>W zkR6}{DtP8ECVor#7EYqOKOB8` zYGxf$U4L5-DZ4!yNnWzlNMhX9rc)DdMC6Z&bKL$4EIMM?7T0Ao(q~6Z7uO7`eF&|L z66k>)vdV;^_Y!OhUn}TN9)_NkGPvcH>mj?pfbrxvL@2|a496AczF^=*jH~;WA8dg8 zmhLNtV&VEX76ug!)xHP$LW9=q?-zTDJ(tn9zxqut$R*mL{i>P=zLAxOS}N1lf5RYE zLXG9RtLnhCYdt|76 zkZC(Ox4&WMU#P&L!+xG1G$_wfios~jnR9duA;&fKxke!u2eV+ghKHzbWy#D@MZ3El zH#JAxquj9$vn`<+2HT&UbhET``(j~tPiAE~;fKp(iDOcMwOsxGqiPBI!<@brCLV@=-&wwUSgm@EES5E(=->mDJ>vI{Qc5tv4k7_K>Mbxuc7wD ztqmaeULrQPRIw-31~E7DXIilqJAaMB87QNy+F2}RT0P!m1;H}Tl%B6pZ925EZjOUv zErgIT>Y}rvs28yFw*Dij?=ct!^AVVMarn&PD<)zG3~oFsjnnb=iC6LFrdt`|;^P-x zJtv>eeDuUJcW@lL;8j>yVv&m*910apREZrq2_9&{CbXzQFED=v!cQMI4?FH4$#?dz zsRbVeg+s#Jp=n58r|{*WVW^#+db3$2m(w!mhe$waz#E?+F34uykKon*Qu@iX_6MMupt>ed54=u&9Zg*B`h@t;EmFFd~`l70CzQU^doeHj4EohA?FhR z3`8<|gf7*zcnC7}^{<4wtP!VonjvF1Fwu@u)2t%}6WQL&lWw(=$`_q@gW>e&8codV zMRaN7eF$M(BMbD2m3;V|+0=kpD|$P}N{iaA_v$9@-ZbgCYP z7eo1-T#GB~B}6V!g`W63dPATjbUAN^?03fw{#T>@;T^K-sa=0Glq-l|r8TLThabVu z1>KeJO_`V;RfqAcL#?{qr%-`<4J$k~x__PS;6ofKX1pw?x;^&+&n|@RYX^@rtB`c| zWYV$JW8Fp%hu=?@H2jEyY#I9tZ3+I?fh*_s(O7$U*U`5TCeHPi5)H__4a6DfTaYhtAU9# zjP~ZKf)M+8Xx8XNao^~}$;jmRSpOmuZtyQy@Ivl-IX*x#kTk!mvowH=Ajm+PvF_`YG?9Oci1@^b>+%q|FR8@G&eP35*^L5b5dQAe)uL)-nG!w35W z#g(C|`u_RWd)920R1r{fD+e!Fm6HOm=FOACijB?h2gZ z2%wT1kZooRK55oJ3XwE=a#t_J zPncAVrSR_&G>^oz7w8L_0#XytbYv1XMp;_`q9 zvtph;sYaCuPe|ps3XZ?5V+UY>Wpmu$RB$y&=r0v_I9!{fB2Mcn1as>^E2~l9A<&dM zALWA3t2OdK60#kQ>g0<-O=g0pR?h3mpOkRpmfanERvIA&s~LGvS`)`YlkPUnU7;8} zm2r<9bdEtVK)rz>?04^*kQP57vK8451nLcY^JA#@bu@(5dAC(9G#}=Yn`)zz<1&Lp zjRjJZmgXm)22jy7dGzW%RDp{+oj{W?V7h}K(3(&`x%B&lF><4FNEyTyE!8`Xh4d5Q z3QE)Uu$^@(gyaE3uo|Ld8Pbk^qa@;75IOR?|Nvy{|*7j)kAiVshRpik z*lTq?%^vUj2{`1eQ?B}~pWnL-2#%6ImmBsv>&v6+twYx@q|(QD`z7GP=zo5diJIuzq16f=ty%*PVLT*cHkxW?1ctc9+f27A^gm-4mV^S)No6V} z(*Jn1Ma@AV8AePWt~)CJm?9+2f7)(ZQ$gE0$63z<0j(r+V)`Liq0WrLe$=DAh;1Cg zgW*`JBu{IOWTPFRZb16w7q=VRsiZeQ^j6qzExl%=$ebzB6g+NxaE%(ZWyC^w*g}wV zA(fC9(0(f5&}USvRqoe-J1u*MlA))``5lkv5+wv@G-U}X!f-vfRqWX0THZ2*oLuA0 zLkbocVe-1KgNnnv7Z=!1QZh$ZR>fgh^SH-1N~L?h6MRBcc%1-U22h4-i3+>CoUjYZ z-nwNg`m8BQF(;sZ{D*qZrQDhTt9Z}+GdA9Aaf-IyVGUca8lO*G>q#ee(K-xKp%9Ec z3zucXeH6HHwOa{=N!_|YoP1UGC#4?sD>aTkZxYBh>4sWs@e7Yo~}Eop;Yw{|+X zn;24b)J&Yle>K%rEGLD;pT;IdOBmT|q$c=RpOH<<9DO0%-lg=zgY8{v@EBo8qf>4X z{rdXG(6?{P)u9@MZy zaL^$0nEIfScs)2GaLS1iru<0R*ys- zoSIRn91``hl4npcWJ~XPw$RV`gnGrOCNDnip+9PuUif};$?bDp_!fn(B)F7xN*#?@ zj7-egXb6y*T|c4Jp0gN%>;ap$A^XkT@b5DrC8zSw^A!o7ZEYUat4Ccc3R0G#-(J zu}^-YnMb}yQOP1yiO?Tq99Qo{Xiy{HQgiy#rq|$)?Cn5p|J@9Yl(B}+mX{sfF$*Ud4)w4R5Ji&Xp&#m@<(G}@*X}A?UEaJ_ z*Z9a;J5}m@)$*#!7uI5Gs}9iLZhgd%lWt9;ts0iKu3L zQaUjidu77#sCisTCIg4< zycvcO5h-_UE>`LaG$yY@=8AjJ1uZHT@5E6@pIFc)d+VOBtQ&YGuKU=#B-VdJOT{yA zHz>c*%^E2}tAq&~w<>Ifb?77Iqf=Xz!mhLEW@suRl+y{q?DvJxcnSSn6XuwdKFKch zxC-*;S>I=CZKsuS5I5R?I`D~s^L}F8eCc}oUuE~3mxGOcqq_=x5E13= z-)Vi?{U8_@K===502-*2t=`-q$noSBrKtghVJ?|u_FvIql7 z|8wug9v2Aa-gY7Dh<^Y@n-+Eyv4)C(l6TPxXKN&LK8outR2^9dnIOKr$KkUVN%kGU|41Kdy<1!ins0C)DorJ>yVyqcmKnLf`)2nV*xnWFv1`v zOqT8X9WO)Ne}2{S)HGx}$wZsn<&1spFp)F5A1_2f7aG?yd1ly16&88(ZNK%(*=HAi& zknb<~`1R{lnG*_6>15#{Qn5y#tm+yIugXr8i)b04qwfaP@#clr%Ll?+(7*7dMub@A zEoTvgL*!Wh8g2!UZAq~K*Hh8vzi$H!S0CG-8H2IXN4U?=x}moZ$q#(FbUJ0=w72|H z&Fq!6Z+YkDAqMt1kBD1xeD-`OYuEQ$<0#m4VVQJ$Fp^aJ!{PMY1f1k0#bVhZ%clUY6m6>iiW zO>h38w+Yq)3TMUg^r+U?L){SE_;6Wxt0f8*tJS;(9_#nwMHU#18G%ZBJC~M!twK^r zcD8Ym2$=~zOw}4|hmxa`t2`F5FQ&VCi27np`M4(ND=t@O*zn_kx#FOasR)Yi$@|6O z#umxetUz3LH7t&AD)P(dQ}u3T&G|qXTQ#=!O$dEMN7>m?T}Hxs3$I(_NQI!;$MW6# z)?eop>Y7Y(^OUJdlimyehLGGFfJDGe(VPlzK6keMSf&y{PG0WMSw8|v&yjnKB}`4* zW30e!?+YlMSbv__0vonl`16O*m*wX5G2nK@S_RMKTK+P$P?LK`Nu17q%{j5Sd~-t$ z$CJ-XL(4D)o&Z8b>94?(@)p<(faBzQ`a~P`nqGI;BZm-2$@H282hvqiY@3X;NM~+6 zn+Bn8>iJb)8PRZz261{9u89yk5IajEb>R~sWwB8gb*o9sWU%}4K1RtuNGbdO(Q$p6 zQ2+RvWP(Q7&Nz{KET}c7LR4=;uYp13Oq%a{OvSq)zGN#7Q(!W*QLrKMYq*)Snz{(t z?D1&cl5TTqBHC+{MJQcUjaKay-&K%Y{Pmn8RwHIGtYpFz*E z)tr^mKA_QDgDS^pon`Nye9tcw4I(8hOBB-=;jVn(oyOj;UmxF&x1SGS`48P-)y(Ce zKd`_F-^CPAp7NcoqglmXpM|8)H77Q5w>2{nTOEXaszRm@UymO{_%^~gl3JP+IQb5d zHBsZ~5a~lF6T@_Dt5!zs@uEm<80~&D_kG=`1BkshK~=zyWV)FiH;A4ZSXMj9_b*P= zjNhQdEYM+=5Cx5#*flkhULWe$+g@z3B+605LI*|iuTlAFaj^g0dUCexd^MIvJEtU~ z!n2$QrREvpRnME^m{7Sv+?&H(4E6W56_|R07}H7zfin6$3cajRmh8*wB)R=M-gW8i z2@B)Ej0z?qj8#J?jVKMs0_>QrJt9!tN8I&A$aT*u&=i+&YaY?v4S2r)a{dz!l64QL zc&Rn6oMFx3iPUu)EnERORz+Wp0sq;&)LU$gi1uI*-sh#aVUdsF#NLB*zWqBZfJ(KA zoO|ri2VsQe#JLS05=cbk1v=?BJXM@<9%7(amf75hX~T=46w+`b2igG{2V|;+wNuXj z7@YceGyfhuzfo%|i~9TZo!iJsjGr65eR7PJF1XEa{@RtQ6P%D@Y39kpqv+d(j?I-0U7Xqm6eW~*cq$qTt4M74}c9SRQ8``?wSb>^iO1{Pf z(0~5^OYm?oN2}(=@cI+(H=OldiBN?kw-;14Ac2C4>doB$?b>TBn$?j1TRCbI< z)x5%mp+^6*yCKdHd>tC81>v$rPO&GgYcAmyX!<^R`Zd3OFO%k(*WA@b&bx)o@zIKy zso-&>=iB4WtfMq4Ek8i`tTS_lkIZ?w##(JQv!U>7Qr3E4-V8&q?BxQR#Mt+!BcEM$ z%C?#g$=NdTN=HMr*?DahRX=+Yq=-!E6r_CsSin7{m`iYbG$cyp0aG|U`X?UNyRz@b zHTQMq-OBFTV0HJXS1KFNkMuI)=9Uq}tPC!vvo9cHtZ`xvVU~Qp=6`Qok97=+@?U#< zymLKTt4sL{BG}bXZ>jEVqBIE~~EBCeI}U zgsrs1zB_Afwi79)F_+5^&c}~i$k=Fn)TSqKBI}q(I1moq)5m`=K+@-%y{E5@Sj*{R z2zr0}KM#)MVvmw0O%Miyb(OuuCaBFpaU_+KZArMEMHA z3r;mZM1ER_cojp;dcKQhf6DCKc5o!c22okvf&cCmC(Am=x*PcUIfVl3iR{JyETvsWSf24)0TMk5kp6aI~ z2CO)u>yXQX`SVMqPQjo=Y%jKQOg)~vCiIh_xF9gSpA%*o$iL+Nk~>ve6XgAnuUW#4A6c?W(eh-&{Ke6(!9<;^=ZF zY@fg=9g8SCW}AGu{G5vNxut;sj&@m?H{h+ZW1{H}O@;i*CR?$@?ZUmTfJ&)vU(WxY zfp5Ejm^a+m4Lx=B#Rw2GP(pM_N)fzVYP@yidK^a0|dzgT23T zp7Z^F)`hIPm;c67!ykY=eNl{4RcxxMdk@DRfErwE{_VuV=)uk zA1*W;w_n?H>E}9>r+cTS@W(}sku-Cizb^H|nB5JHMTDx7l17VjrFwYs34b;yLsG(f zVIp0+DuQP}FM}5E=$`Wf zz!`WS69~QbI(uoGN>jTp?v;38nV=^N(h+elQ_8bz^2Tn4v8zWw(FOG~YvGuS=n8EP zU;aqUVP#xSGfYudrSa2Ws_^7d5==_ zTQK6%ksA9iWfV&)<4v4rEKH~OHM*gz%Df{{PEUQrggzmy<#u0#(1toza)q~%vIgu; zr#`v9wn4Uhs@@-00gT`{bk(VZrK&eJMei6Yb=a}pZS_6)dN*|i1`=@eFTQ}bE>szZxeLjxijO( zC;!RLPNcNnJ|qO?o~<=AT+47N*T(=l{?ZFS7gk%7vKa~|3aptc-?!RFP-rbNdCF3# zP8lEc5hC|bu8oa`A7Iz~?@ZbVUks_6CH@5N`D?eFbQm{M|nc(RGDlHs(^N{I_i)1i+Ng4+{#nY1Nyjk4R>+|D~j$+By*EW zcatZfktl((eMb;uvR{tVKQi`Qy3PQ)$vQt;k&VAlY{Ab>*yVmWNCwhti6MWEe zwXoo`MXm<%MD?0b`CY}hLFem1l=txtc7>SpU_a}au9dA}*Q?QYTVP~I6W1hcy}%L& zq4vI|hR>x^8=7JUnskMM_J)&DY4@&zj@Kj)%Gy*9?RE|mW9*CHEK;wSF5tvg3U+A$ z6Mk1$Y6jf{BYb&X#6xXt1Rhc1!yWMv2*P56#yuB5&TgpAR6Q%IJQFI@gr_zCyXt4( zFf1X5M}q3n5lLRXoZwtz0ce-y zl5y+(x^6G8Z5%xM&eO3JyIxkhOP*ad)H)~6IiBYu(e7!zV31mHIl0Pi#Y^kDMsjMm z7e30-V0z_$4;==RTBxR9+*lBw7@5rZsM4badaQf%42%lZ-TV^;Wr_PG%X*I96XIa;7`6gd)g@)#tYUn~a=oUcgKKMjbrlP>fEN52~OIFIpL zX?UXOPiN|)P<+KgXE2iuJyUYYi$v5smA2oEjK@A}RGprnsv47$1a^9}cdteay?Q0j zR;B4bKp2Jwt-o!{1sk$R5SVHjywCUW&e}=*@@0-4SfVpPb@VvKvvnXq;l96efwE7q z7&)9YHS+%OZx-G-(Xs1Zf7o&1zih*$Pf01senrA7!EzSW^Khi-+1z3KfTf5$ebvPj zI<=KADv1;5uLcro$v5->|5TH}goCi~w(Cp(qHa1Rrtu_+DO7HCapBmhlv)Shh0N)9 z!w}ri1Q3Hr%JtA|Ta_R{5cD^f0v>lK%_KlXyTqWEl++z?QaHL z72N#rg84NpkDJfD^yWL-PV70tZqJHPO}n~Gg-8jJ9wZf60ScZ~w;g)U<1UQcATI>y zYeE+0?Z2F}C)HK7YSOQq2F1Vw>&KnF626dpD}!`V~>C74oSk*rD}fD(Ftf2&@w;w z&2U#M(t|Juu15Ft(Eqv}0v$2i+^{JatpHa)tdUVHWmd zLf|~#nUr;DdE2_Og3OONI_V3t#QOt`I%R&|1?+{P;(g$RGdOw0m7FCB;6N`zJaeYM zte7&)Oe-%wM_ry(xUg_sgjh3R0I;p%)mxjqz-OOcKM_bN5V6VYU6N2SQuyOTfrtOu z)w&yklWSCb3s(;knPk`0KLbivH}xV_N9q$H5&!gy)Stb}UPpBH%KoP8KRavGLn;gU z1(JaugK57slE@;1f+e7z9f%hIq=iKYDZO>zQ9)52k5$aS` z(xBw6G@PoQN5gZxkvG&9p-2@(2mrKkbNMQ`5tFs+j_1X@0ITBAyCuVGbuxIr_RutyrL_--z;J? zUIWF5qDV+KyLBN+h28Gr@)ZAVY4|em*nhcaZL_9kZv-XWs6{V9f$4efMGj^keHNVJ zJhBFhi{RuqGJ2yiReCg{$dz-ei+zyl=Ti4OaxGot5QXPS1i9;lFuUsF*^08sR&AdO zU*&7}KteI`96W;63ca==TR3>wjWFYlk+UGnd17}%qy4Oj$=rua1&OvE9rTJK7XI?_ zK`X0!p#8CVZS|!KYZykWz1EuelOs{a9UfBqy#(aTY13+n$a>D|IW_Ua;5!pB{f^=2 zQZe$o98I?sk&UOpl~xvE-BvKlLF3J^1+V8U9)Q0ps1of5r#D*@(Y*zx35iQb{K zSHqcil;?LD5NzSt!&ONt zC_~i6)bFG>0klyK&_Kb;PPglznAcf(k@?-Tm`?qzj@C(Gf!=$>W)|7O>Z3Nt^eD_) z>Fj%2{oq-@JB+6gK0&eBSFhUVH-23swGeFW?NO370H#*EyqGmJDPmlC5s(44^Ny=S z3QHl0ww@gozPLslbH`W9xW@rk;2K6;=R*=pcG6<^8UKH}zxrBw7nBH-x(THAw#mQm zCko`B?Z+Y_F(Uf7O6?9l(+>JOBFL>Cx*ef0mhIQVK>T_3HH!eN{JPk}Hx}ow`DhAF z^D;ObF_bTgB&8njVB>9@-HeNrTsSlSPP_!41fAHP3MlY8dc@w{Vhrkve9;+eCzQn3 z@A6;)S4)T#P8~k&$`2TC!ygbnN9UfB>j9Jf#R=RXtS%GS)?rFDY)qoq6>f;ee*#FE z3MD?AY5MZ>z{goXCpAA%t|BZ5Sy!-P)iV`iQ5}*vtj^c-J3A|WCY>1vj?FLUH zQ%nIWq=T`Cd6&-?YQWK$eFnN`>pKi|4q7{bMoVEO0)9t>QH(wOmFZ z#JNTQGC>Zj8>u-KCmO@Deh$~ z>Uia9IIVP5;iHVUd$ms|d7f<08W9>Xmc+n;yu(bV7Ojx0TM%jEL(M);@XVlp*sCdMSqLybmDuVA+wM+I; zbe>OX34L;CC%e+VWh^h7l_wc(z#c$6-YDj{zw_Sj$5#k`-)9D)jdVC|L<@EmMr~%J z4+P>lcB`A#^Jk2s9}FbY{gSI!j{*i0*X{{V^JyegL8r?nLm2vU{-qM2V$X4y7c(x8 zaVCnf`|mw^{9;yAvrD4EF!qRRbE!-4FFG<1p|SoA_%;+WZkO*gfWb zgL_~yq%3ZL7nH&TiE~Kl_CL-W7^_Tdr{v%;I4HAo^AL_;GwtZ{)4Kik?gw%IT(F8< z!}KXHt9hvmiB^{Fr(zF63=M_^{S_${WgE44X`tL|>>f}u+jP|@L|zD+9FKkdqjAGE z=jF%0DRQ=e?gMT;4^?Sf48ig;tYRec>Y~t_2VEkaFf0lOPZPc4x7JsLetQYIpSIiq zt@76-7S}U;A7f56%NpZ;(~xAdY@Ce9)U+-iukgD**T6+~GniDuV_&XK0*OJBdU}1H zBIl@$x9zWQh?EnY;6<<=G2`O9cX$;7f?5-~H<|u7D6~ufSWYx_w@}{-xfwD&UU)#0OD?V!>yUw&;FG>~ zSA7P`GhIy_pG85m2$yF6*1f5i?;DLOjD^Ic?QHI#S#+v9*F#WyJ}xA@rn&MgZ#EO+ z@K03x;S^_Ypq-cyIjt0XRO;M`tpe%mr3WUwP<|vcEwZ<44V|w*87|#J@US@%HEhjU zUrc`z@2@lZ^$#@VI!cO0j*wKTc%JX1lmQxx<3 zh!EV&L^Tv?EykAkTU>s9VwBxlc0S%+*GtHQ4eR+(3V)e&XFX}N7Z3*f&bc-0$G{~?OeK&8N1=m zJB{s|wto9-j!+_^>z<&ok^e&~v?IoWmLaYssm(ak8EYd%f$g9ws{} z<05~%r(ZelcHL8YbNtS6)m84yY#_LOO!*{!Rr0wTJSSUGuR4mW*H<}506CtHdO)L; zWCUeTCG*iqAZ5t9)2}?Y!=OKE2pF)*foq*b{NvjdIle#myet=ylxb$x||= zjQ{7x2gPE)IF!9<9V%=`*~UF+-x!gc2PPO-k&Ny6MU-zGN;oChyn7K;w5!LL8Z`c> z+9cBhJXbIF+?!5+a?J6I1s^$e^**YhV)X$?&KrG8^n0IVC;Ru#%@~a%($f$|e9ydY zR>NU)*sS-Kq<`Pt{)ZKwTD}HiE(D+}B&m+dQP6ncX*)WZdwju_gp{q{%5PQkNp@Lw z$O=fb0~Pwg(0(-;!ksSyZi*FkNGe!C>cGPVg@mYVkOBArIlFFv2mF zGuIQ^S)XQ`=cFJli*n%9W_o+`ooMx4GibYi4LAF=trg7Dm+Wbc{@p(+(E2lAt823h zSD@Rm*Y@{i5&CNuoEMtdH7_FB`lYPCDt%9itVX_M1#OMFSwUT`L=|H(^cxm5xaoq#VGE{rCOaq9x zgw>Eyt%R4p3%li{!0S1etBrLTir$*R08{g`eAGFRN>nRYK;SHo7{Q#0-;mvObolKY1xsk}zg5Mk- zjD~3Rg6sO+p|HC0dr#o>^CuQE=)eih*%o-dvWn_APbzxoE}f_S%w)Cvkh3Xk7>eU@ z<13+yKYqDoW%aRrN8}{@`s8(6Fdl57)J1*cW&@ThX4GZkDfAEZKHnM(y06dd%83UB z2*v~t29A%dCGeWJrNZf59ZR1_m}DDs3Bk1Z?*hnP;z|yX>K8}iZ4+xsEVfiTm)gGk zG3EC>9{W)9!1)ju4dUWGFf9coU+e66M+EfeVZlUdzF`QBE;E@Yt^8sZi)U1kn6u^n(LTXWMo7sPuY7ZssZObwp;9nL3S&B_3lmf0 z=Wy@i7^%>UZ71P`&Hx*tYcD{qHT4c{M0iaikwg>O2gJO<08u! z_s^w&d;55>!`YS!aA-wJ1e8o<y{`Ucr~DX9r7WjXQPQyklWl)n5n;~IKE(x8l-co4_N8%u+K zvsXZ#npBou;|V@+RW`oxUj52n>WHI$B)?}qKHc{|MW)R3BC2@u3_0se)9|qw!1xw+ z_;|WDF6a{fd2-t|ITiYmHoTqYJ(PcLhnZs|L>wAq2$)(=2~xd-YD1FX($&nh24Son*y_Cf`JFoq+dY)Cr6qh6r5xaA%i98iwX9mU;>a_DgeGmuwhLmP_P(O^r@CZ}@apQuZ`zg0m zPYGpO`b>Um_Z&%PBAI2@CgkCF$JeN{7AB|u=57ut@~;!8Np^|QN|wSV1y2-A2jLh{ zOz!LXrF$y=i~ClJmSPe-tqiv#!(m_@gum4hU_0|=ZuRL3oJ~%j=9QgUHf-s+S$|{K z>^F#VN26bzTa7Lb8)7Zp!QGFQqc~vaR^+oMqFu!!oR_@#OjSwoTUuAipS_?yP3yZE zx?r#O+nIkCYZ-hB)7O3m@;^J^eDr&^3(rjZcW;Rs-Xn~>HFC)rbw&K`se%#swyntg zM0{hEmUJpmnU2$)YHxqjWq}X_O8y3V z(#d(RCwde(DV0vS(m+z(sp{(=-f)=U*Cg~m%TZJye)L_&or-|T)DN|pbJ)X;^J$P2 z_1BS}?gM5{1@VBtJ$JIj!Zmc2lt}O}RON^MuyP=J{pA-CK`8~^PIC8pDdAbbfE+Tj{ zE!$hZA+`T^M}j$dl76e~I)}Wz4zSSWjG!@6^5)S_^1IK*J>B4h!Ice~Agy_fHX&e@ z;OEqX1{rEZ@_91DV_(xZ^vu!eOYK)mkJDGJZo&}+CkaE`AONbbH$qCuwsk2*6UugKZpl$#tXMg0ne zf^oH8CR5aTt8P1p@&dvEcP-+On=j|3awq*qZP8rhLq!PJ!-8@XFlWH|l$UBnK_w_d zcAO`A6(G2>sh4Au(C)%G={||_#E)LXNn_QbbTJC)A-vn#`!48D2W3+fMgDuyx)G#g z+9xe!mWC-Vtf_7$YHk?{T?LgHS&V`kX|LYqh2-%n!`c_N1uiv(DyFB>6m&7 zK62XC;D#Y%1L3bd2heIfAuodD+7(b_|3d`W3DW(@l@Ta;zeu>{&e58Lo$m5|9YfuP zcI@p)E0Y3wZ8d*AHqm!WaSSHiz|>(&OZ|49uyk2VwFs^?-&oN>M5MvOAuLE?f`w9Q zDx=Z>*dNMu`3+WTC12AvodT;CdKlv={syTU>gS&SW2$3%P0sA>-dhK`1qMHrxa*~x zh&{b@Rxa|c=&3!_gI6N|*>QR2zROyNxDQzg3|)&TEgkgAD=5{>JCjr6_H_ZWS}}MM z`ub8{YSrZ7hVQ7 zJzdq^Kk=YWsiUm$dr8^F3ZzRZ^$LaLiHPpxf(DcccHe))PRU_UHWoMYTHQ2LDO%yzOaP=;nhHk1+4mEe1jB#m*!hURk*evWtoCvvcyH-Fl_$)2>d-1Fl z`N&J6l|8$@BPh)HNq|zKVO|;p_d$%DfaVa!mE^W)9QQ$s1AR^Svdid4)HTjkh`8Hn ztr8lFyLpI zxc}IgYIK5I^Wq{joFb`IV-d-Ga5;AQ#D6FJQdG^$R_}j2@+Ed3{ia|VvW&k3EWzi* zbf)=FEnLtFxnQ=$Ic>ddoBe41GyR51?O2gjoAHVNW9qsCq5l8BPfL9oSIMYwElEO> zJx&85Bq6&9$+%=Y=PH%#vbW00CM)Bdva++cvoG^F^7M$-_(MsT0%Y&KGsLbb&;D0DAGAzscD*{9 zaiIwXT0QD`+-e)K$`=V2S^TFWkMpMIB|j|=E~EWcs`!83m3GUZz%(L;d6oUmP_DhF zHCaeu0irqRXQlgb+DVXvRC}NJI?iZ8SL>F&d#3g(l_f76`rkCilm_tbT^4G}S}K9= zLNIKZ(ZPuz`D*eY@q8&qi)Zx}C92-@{2RO3N;%_99sIP18cebw+aWe?8j}1F!Vx)W zpAJyowklt?jWv`T(3T7&VXVx$YZvZ6$`!zgz6|^QXjSoo=l!xY^+rmzxM^ji@2IgY zs6(6$){B%I-ms)v;pY8jEr&&wwK*Bae%lLak}`h=oT%W0j5bD<9NoHRox~Ft^q}{u ziqi1YW++BpFMP#Lbud`T>hFi}mxsq|^ynjo!auqy2e7Ulj&t$|cYgq*qf1WBjZ%(> zFoiY1mqcHm7Z1^^LzpHlEO4U=HW5#m?Ti=&FA4_ATjU!c3B(*>N@iLPqdo zO%vDdA3=Ei)&hTADBaz6b84!x?7Wg(v~Pb=)2)?q-c8z*Yg4PMp&@i(RjR`C=#id` z*S)q?!_FOH5bFenaK9lj3%yB#HsYprBu>s0@u>#CuZQxUSOiC<@q+#yRrKGN&g&9? zy3OpGg+C>3xrkS)z&RTsjev(S!d?mw+=n3%?v zT=lM<#$jARsfte-g6cEOc@z+hGB8{;uWQ{U_-Na- z+u>EPF@5#@R`NYkl-D!&38?__N^UQYk0auc`GnMFYkP_C{{S!9V-<%HiAq(z(4+y{ zwr`J)n009Jw(_9D_0s&JR)0okMS$PGZn%cQ%BDdwMdz5;!3HNS+-U(VZ7&H$_w*?^ zyxYV}*L&a}nXN+%r{ek;l0n=-iDJy|#xSX*p7OPu4tvn`_Iv>6~xP`+ra8wocnktEJ!C<;!(LBi6Pfd`0RH~ zWbRt2^-O9N#;u6q;JxcGHCSNgY_m>crq=1d)aceu$=}E5Ij*NgTmkD@-*A#YfV}!3spOtWVwaBtnt>UW9}lQ- zMiRJEkh?c~+ejJUM6jN8zTpQs3`ya6m`IVBGx*X0zLLvIf+4GF^uA*?Y_E@3thNS~ z>HjqiJG`CNtF!vr#Z#qalIz$!59f7#-~fFnOH{$Y3QUb~aFOA2cUxHJI!{L3?i*}4 zlnPap%z3SoAarSePA59f#B1);u^i7LYi9hAS>}1p=_$KdOwjf@J!Lgo)a)h{%yiay z>7@i4Ek4po_Y`E3dzGP&a?{nJ3!mb-x1mC@b3Co3mlT6eC?K7dmm-Wly0ag2S2Rvi z>Z?%1M>T}7T<;3@FG_A^)6DkgHK)UWL9$|K5((31|77~=%FkABxt`dxi1Y5EOfs=% zZqtW7Mx~juAoAbw+3S`wEoSyul;LRU?|uu`S=M`E!2?Wlzf`d=igO@NHlyKLDp(;R z>bh#b_WO<5H~Vf7p|%lyN(w~WjEGG-Jz72xlCJAcsD_BdDkYsHgC{cg-wU8wf1l1D zC}$69fW|EU5QY8uk#$IS4=UGL2}WAKpSw_ad+TUgmKX8tBqwrmYSqwg}aqWtk8LnOMXkcm4Ur9 zEwnTR1=4YYN5$4Q5kH|)Wt)*w>cldfGTd4vnIqLP4{#^e96ZZB&Cj3ykXgKK@)cPO z^slZy)|px12))wMF^|nY(1uCsTf=|2(6lpE?9jzVY+M5@3zT^i zm)U4v_k{Fq(*ih=uC97vdc4HXt22Gx?Thn+(LSlOvK+Ba&z_|qe?KICUj3|yj~2tO zkNzrpt@1N#UpLHXVgYBzFmyn=B9Aqn%XJRv5f&bAJW%0m4U3F={ZpKMF+J z!il5V7>^7?-9V8hC}zN7i@0e;di|Q^d_>J66iiEZhnAB{q+M;xy{J|@S(&uUASDsAtn4w7 zs5HG4^3-#)mzMZ z=X_5>w7%7`gQmgIh*R;Ktm!Vd==NBNr(>8Ltv0%JVdCJgKEY64k+yf+R%?g8?YHcm z;sa#8+b+%|L6#e_6+NdCTW{E)_0Y)N+y?V^RqU6qn?x9>%(1~gUkN(JBx@~Jjew#$ zABUvqDss#$zLj7Zvj|9xhH|;?XSsn^Z~42giWQ~ib8!7Mx~0h{cSD%JwEj;BTge;n zvG1q=8vfu_;@!}Si-iivgv&2CiGwm7^S;nydnX_&lM+GbX1!gtp%9ZYxq`!^6Wjd+ z3R)toe&-rkc8pB6&P=x2n3dM~_pMhaC7-zq?D^m`7wmp#EP2aNUAB{GUZ@ez5Gk#F zyf~@+6XF?Ss~m+E)xS)gbnka~7+Z2_un)K>4eC1bM~=Xwg`>8|ep%gCf80NGb>&SX zu-?n*?6cE89zSx(iGFP_lTcfpXU&@8%Dp(t4V9DUgBF&PsuFatUYqUc(jSt``6Hp{ zRDxhII-*qMi8+it1$RaE+gUW@_wU}dTdC0_#b&U^zpPYVSuUS-6JSfRTzAO_OqiTn zZANI)E&RV+ZkXeR<^cFE}|`1s*4ikVDzI(+PsF|CTXGKPTmZgt{~bn9&PT3L zZk`g^f-61$)_UW4f^G6QhX}3dk1|>|3mH3nz;Qh7@rV)R|EaQUaBP8VycufCkpVuf zsXU#azd?5fbc3c2XWHY40`CBbfhxIRZ{1reppXNnWuG?X`$xUiTujnYs}~fUNN&TH?IY!hFmXIj$EA?N7&}F6UWDk z>l%X%p$HIn8HpNbpDEE_9x(gw1D?NkFX?z>B(u1xv9c9Z5R7>((;?ckSeNT&OXp^3 zEI#fj60<=r?;GMKl(``qq5u7qmwRp>=!jc}4iK9TbB}I`P2=FB0(^Ix&j@1HbcQ!H zon=>|?)kkydo@c^yAmC45p!FQiY&g6RxwF&KMh-4pPEDEglTesv&sHcJ2h3Td_Igf zL*qnUBM?T6BrUbpU`j^Mnbp-i0_$%OpWK#*1lB1@T+~D7ybgHFSMouW`OpjOHJL{Yf#4V>I!CT5?7J#L7axnvUIzq zYTTtRA$qiDaa%_yldHraokv)6{^FIt4$im5U;Zte`YCHJwLkdD&kfVHjWR(9c^V<^ zb{Dynl89RCB>yA4F#aY5SSbFX`jAX$s6G>Ajmm)#PqVPkYcO64E{;3tUYh2}76m-lQ~GjmTt?xOdL9o&b^ z4W>>u{p!Hd5%u;bt?_PIeTSd{sUe4m_Q{X(E~;O^3b8UV#3|6V_ky36aB$Q%pk05V z5rh89htgWSmkth4IcwXn{T`Q(Uc4~hb%XOBXU+_-Kq>%?VV6pb0orte$=ceq6X=+7 z$ZWiyn7WCV*VoPEXrDYSCoZBPsBu^TvesmJWXWkaiaHY-)V4jr)8<->-F^D0a=0@Z z|IHG2V8fUq!Fzr_L6RAeq0cQAmx|o&l|FWU#JqXk%%R-8Xa(mKXp1V^`VzMawDueK zbFcve4MWT9d*Gg9+cuZYR~2kSLoMEIW6|}7_k$_8Y*o6aQ?|Q1>|L1zS8bL%m;H;S z=mGj@t18%vwkAoa2x=$y&GPU}D&rv(F=+`~gUO8wnK~^4e1($RGc8GCUB_z9zBW)% z6XP|t`-`I2o4O+WB-AHR^K2#(lnvg!z*TP4U&$YQ`9q7KoK$J?X)WjZKS9!yqk0FD z?Bb=UTSlGSvZ4nO1zV01r%p>3U+&Om6XJqsWN*vwxt+&R0qJUH%-#gY(rh*`=9glPWN88Q@(eTtZi6A$ug6jB~Dx6jqh~# zZZ=?kmR>;s7QXkrnC<{J1yelxFLE8KqTL>qp{um2b0TLx7AiHyuDyT=8=fAdEK32_ zfWj1!-^uCchutbgSI3)X%>d>B-D#Q8EMz_(hiH^u)X|9d6s=m2Va6{)#i+ zHsYF`kMAlKU`&Cr09x=>LPCT9Oh_G!zm!8lTpn`oOSG-*gcd2sCkEuMrU`2tMluO$ z0&mEI6YPZ^&5JtQap3d(R5^LrG4N?2^%u-29D&5)79(e|0smt7uaCSTF3q+l@kZe>Klxk6E*Ox^!Z_4m3i zae;=|PTU}OS~`mjyXVRt-ZmX4;2NA?A5M;{f_?w7&uBt0%VN|hxlg9vvjCDURnDY2 zY|X&lRXgeN6ZtUH!r%k^?oT;0Nk1MoP7?n$3!m2X$z(T z=7_+Hom}!u^q5RVzJ9v0FqgtsQj5#&LIKfgn`oQ;9u@L%M-?3>rXutsoJhDP3jz?1 z^Zw~fIMF9_6RoR5&}E#zuKjm~E}Ivg1J=R&iAQAbaUu8=qw-L)-c5+pG)ajNBgq^> zN$vgtAH0|jH;LJNYX2p9Bo5#_J&9*uGKOL{iVDxgT2D-uTZgWp4^xlTRc4KL>q#@_O9U%{ zSDC)p#J$)Iyvm-zS@06~R{Bh;bf!hE4z!!Bq!DPIuu>grw;iFKy6w`VVVn1L2_835 z7guRUgFJo8?^Jn<(OxnU>l?;ZJI0KLPU^7N(bIMJ5}0)Dt3Od5f$BdXN8gDWji&aoyEtSWku}j647P&bAo%ADZ}umS^keNL6r# zzz*EmcX99#ok=ZMa%^>Lh+QG}j z%a?{kLB0qO-)LJO3!!rkNfvkbs@;VbME`5m8oCYT-v|GxqX+ZNQD#87HDalBR2Q_< z620HqHOo}+>u~Zd{Jzt3{iMysh#36l*c8N+E^P=Zfsj~(%2W`jRNhnZs7%&WAeq_4 znr|9ee&44);S>;>`UeD9ZhFZvo@qfzCqaYuO3cjXz+=SG1ymO*{H+cj1YGS`A2?&p zHM6fW(KY9iD(QIXG^7gk0u5L0^~PNrmS(V%3}U`sos%Q6lBPB~wJKA3TZM}hnr{*+lhpC7rze=5Yps+SEs5J}z!e_)lrT~nTnV|g z>vCHD=v!wo^Ul;eE(pf|+8$ZQeLsAq5Yz!v@_t!HpH+PKW|`L`F7(yGEdcY;QGd~Q@+&Zf#LkK%;T_IMlz5%n-EmMhDa4xYa=zSIJWq-+;t`6!~>l6I4vh~z2P>75CWy>iM zh*(}{T0M^76oATX<~n!f=qb#?(D?3pIont0cx19uC=I$ry=H--C{zot!eguepoLBEZd}YX0DUg?m zvX3vpq`;vOH#C?g=uXC7Qz*Uy(H2R?O!S++rvG!qdbZY5>cG0`y^06fPI$j4&K#EQ+%gT$%P z>BmJJErVqX<*ZpRVrxvih)DrVo&KPl(Dwl1GkHpGiAk0H;@?4&wc4? zF=1u;Pn`DPSzos(Dw0cKDEt)fB325-QCRFP?)IwG~)5 zD>0d@S?F8g?l8}#-W5Q9Xuo$GVLs^+6f{rpmZA{Wik60y*QW7iwn1W3S*QoMP2#hK z0yvPH$pb|7yGfVSkOe?^_lMkj(9f$4+EPS3Gq?2jDktBC-0pQkJ~n3UeYRvSxyYMf zRemmJ|M>i)VyQ5Qjm`#*80B(<=ROrkUG>7c8kA!81?|ewkY{;OAZm)F(?ak4UGcNs zx?x`Cx`s$pwigz@KFqVH_`b+!(LnwTxK&?n0;qYh^e%L>fnB*69gm^noQ&4?(xBeB z1&>+MqKRwoRePJ4?pXMARi>Y}U1NB9wrXde!tdj*+@|;5uHal%>1oxl6;pA@=&AH5NVZ|kPBJJP%39ZNCb$hur^{rT`>fgG z@Ob{_iSlO$VD0I@pIfl(5)I&UG}1Lh)vYDAUV{W#mLz=SwW@3i$8-Cx%%0p2-%*u6 z5|I`aXAWi{ly2e?i_DC(v%t-ueWy)hicbw3nPO!o8EBcTjlNtcCO>kXUM`gW?{_~R z>opZ+l+6@H=|vA4eX2Xj1Z6aoecYR@&h|@kn7|m&nG;BOMMbCc7yC+xinEWmZErcU zhRJa$aeonI6l)m%>+`#EMyNo!mif8@D+Hj7Yfni!q?uvq&0qNc*@kU?J)N)d%K<5$DOHA9;YnU;p4E^Pj z1IBSset&N8kH+`gzQEXST!Oa&l!VMvcO7`Ssn5c_{(eFrF335Ix|H}+7Fh|9{rqE6+DF{a3u7)H0R^!D7?^&b0Nr_k_S?;1|t4HgQ)*r$HW6M@`< z9Ua6UkCYy)fbwvf3UxG0z}=!bu9e5cI(@KlN_kd^e$m+Sg#;5 zA=_qSi=#<)FCZ@kM?@!HQXMF~aAVsmcphb+6Hleig}e2E`U>>-Emp@u`t8DLk4HB} zh%@>ud$s z*MHe;n(I6s(rdYxb46BmI*ge=WB2wb$Zi)k^W4vK>1=!nDE*hw3|)hUD1JG^xP|by z&`tuh00%S}kK|ix4@4dG@|cQ75tyJ3G#d|Lw?de_tFlNm_`A$7Ff<&giGHXQYa}xZ zvkjiRMe%A?#wr9bAAPGGr7ttspT8N(?Kg{9*By#Dvw|-kky;ReB#ebjXVAE-#Q?85 zX%2besl38htLgIag!Q@9?0~{iz>%Iy(_e4f>W85OM=9?2f3-4`!{bQ61 zDM3lZydY=iK=xT^F1Y)lH$_rKs4X&hRUxq_C8ADt?)rb3h?Or=mdqa5r8CSw3;tSO zt$x2yMpAKog&pZ3=GOexCrWG3&zSND*W0}qQEv6Xd13at_p$Ta`N6ZJq$xNI7S?vc z6v6=&qC>gYC-MmCE|2bMcW^;%{KsLI0$Lntf~$FvRdf~ev7h-&-};xA4cqCTk>1vO zYZ}h}cXpOP5n^u-38p34g@AgyHw-GYO>RT51VF@t3B6YsQ@_-#zC*#XS7J|VWz8$CV5JUlb_;0|X8bi*#JyCR5J9r%7@{2&aS>{*e`6!Vk} zVVj4Ha-EkdJh(t{U}Kc&k1Bc|$9bFL)1YQX8IOP}5l5d%3k<8;seFqn%9mfYyPUT< z0zb@?jrj80F8*VV45Db+hC)@2U))FGl~A5||LRoXfz=7>C7BSumF}01!53;B(yt%9 zV|9WTTbRRDwO%TFkP|m&S3RJPVFS4?-SyE$>IeV6N_I1 za;Bu^q{V%~j8Yf_*aT;PuPpKg2#IpeXjZn+9`f5JCkd-@-N7w`R?d_8?RJp<;#G2Di*Wn@`Z*`gJHNh(nh}eFj9&pro#YU-4Sp9S%)y^l^ zp=wBMK1YmKNMi9)^Q8;Xd}!7{|4lKqv5Z^L@3SzvYCBze#Gf>md2Hne#?Qvmi!OOp za+&@4+r4fR7&SbrLQ#5sQMq$HVkq}mFqB|XJR;^hxTQn7nC{PsLe@i_yHvQmK+J0@ zpblcS;HEJJI{!=NAIe?L|I4+qnVN$cc)MVBBAqj?JemprGGBQ6nK7o!wf)?z>^FWn z6KVcltID58yZ9L-+>k`B8&Ts1nt~zcfb5lf5xEVl7%5*0K%_#R-(r4SzjkcS1)%PD zP*e-Qvm@jt74iEPWm;#3*+%iQU>(0kkBZ3e5S&NeEZqR`hKn8wS=*qNQe0G z*(QqfcSvr|yV zZzu1$+rTBikq8@GH7hBZogk@|puBRb#;aT*U^I-)ajtDE&0@Nt7(=O@3#18lPHpKJ zW_>+3+pEgZ9v_!UxN<4!?5%W}R@S5hpcvUK}Xfma-VK8H{6u?O6>*R*QHj@J&LxY#e!rYUfg4Vt!oi5r z%FNj*^7c}RWD6%a76~vI>#A)mvb%2yPEO)ZE4X~r6y$jeWtuPOP5>60=w1NYq1JDT zNbar$a)v^;=>xOV5UW%71b8Ngm;eso#qV38NUx5qT)xy~!S_qk!N8ls^}R(T&=K2v z|6bp)6DBZ&DXH@VvURO3{Op*XE4q#?6m7zO8*>nQ?Eg{UPd|bS0L}g-p$cm1%qAlH zt)r?nR9F}UpBqP%#4Yqbkf&pSLIyY|heQfJYZ{s-fZ8Ix>BiI)BsK-t@rE;ghjE)f zPS?@R3qh#HH%iC+gjT0~4y;m(_}=FpeB%T)<%;iQ;0JIXlc4(M_X`8z10fd%$dM+C zzqkd>m|E@PQz1}hPgk87^GBMH=Am{FL}!;uc)BseC#Po{juo!)jA=}0U4%mZTp};R z^U(E~xo|S@SHRD;6&sv|P8S*{l;6zLRVnX1sX0(Z&a)GfiEDqR$8KrbJt>v!Q!z-7enlhuH zU}_;>R|Z$r90YfX+RC0jyg8c9vkB3|di^F^A=JtFLG=3Ae=*@XqbdT-8c0jFJQnY$ zgiNkPK@C~s@DAs8?%8HMZ7S)(|28*#E6F2Gi*JCL1;@!8TPV;p z;`x!2*-jH^FNbpvEoFO<<-ZI=L^1v(;wY8#{he{&vTSvWtj?f z$^!L_F0-8(o<6RkTd1IxBl$R}qm%sYGWVUMc9D+A?8|Riq1Bn;6R9qEnOhJZp`R0d z5%^;Fn5&&dUv{Y^bj+WY=OUM8zKU2CbDgrhWyY*J!+f*~{eu33>T1}aU@j_GdU9mF zQS+P5bD6*=8K&S{uSI6{sjV#qpoiS@%VdR*J=1Eykelv@2x(LGxH1W`=7%oqODZ)L z;H9_YU6WVG%Y3+*tBS!R7x5dcAyB|w?jF&J9(hHpJh7^Kk364g&oohh^LBju);8=H z+G1cO#+6ZL-RR{@`Qg^VUx;-etYz#;vi zt|va~l^f9G+ynt1kTAb`qjC{eQuJaI7q|(HjAp614_~4E_&y6@3D$`CMOWB{rI4Ne zl%5Ei_PoQif1g*hewbqn53{GR>jnV@^%{0WlQi++(NL|?y4sU$#?4INk0JDM5L;4|B{)f8hoUvg^cdjfX zh%$*#u8a1Bz?*d~9;tCeXU5G$k~syjdN#3FUAe7m1$vaQbk?$R4gFNx|LDs7)qaA} z`K-msEsj6k#f-EJEtNeP(FFz4J{d(%d=6v}S$t7~xfHONA4?WFG++U>AHh)n=N@UX zMszns)V53R^V(hAp$J7K>E*iR1jzI(O_MMiDsow@v+->*H7ZT+sJ?c2-dWl4Gj=Yx zJK=8;d4B&^hk?}lU>Y0*)U3}IfM`P&dOeBBOqMZ}D`6c3!4jdVmY}91n{5Scw=J<{|B*JOFg#Q!|Bhl4F9kkHj{)XQ1CUN&GoetzBTF>H0N&%JQ|oO%`E(rZrxkSjprjEWy1Pz zVUmzWMAZgEg}HC`uYdBEO|UCi(sI41fjT?Ad~gkuq?P6GoFn#CmL3aQi$JY4en~ES zRInWb4}jOz=4mAAg1K2O`?q6X7@ z9D%rk@%lKpGSFMZImWi5a@es_U>+veH3~}grAt+Io*I34{aL1Nx^Rb0@%!{w;C>Fg zL6=t)ejED?3ui(DbNi5jdrCDxa>hJ((J^>wNEkeFGeUk5IS?)aJ*o3rbbGhnb37z6 z4WakGNB2=^6`i3%e=+;lTMe(#m>tSYM|W@#d=(Zx2OT3`{wS%UlwzA&t&Kn=X1Saz z(APQOs>D=b$?a|Sln8Wk+qK+?upWhQB6S7OCm?^>5~MSCHmmnEP44l=-H-v&qS4%2 zcR?u*T`@o>;ErOfsM~?dDNCdEB0{WAl3mMFS{2MT)pO+#n@n?$bfi)uX{hw%n@&jo zPz6Q?;JSW7TXG<0<$k#%N784Y^sMzOE0u^}#uc3+o|{@^W0W6#9FZyBs@?hm@*&0z z{_-na`fT8i{WP`Ll+NUbme**1jEUkM#xM};pYtvg=q*L;$Uk0ePK7>4Wpza=m!H@P zAZ3+_PS7~(0Se**TfKt6$hI5#BBW&}V#m{xkbP5OkQOrAf~?CaPq)?&me*ffhTF!L zOBrO1S3}>dfhEyPED}Tj9lh+XJg*}pE$zRTWS2oM74QrqdAFPfKcPW`y+Few49u~@ zIi6zeCB2%AU>0z2dmWxi1n0kgqL!lLx8?9UybxKUn>B>LWkFCd4@wHK6$In|*v|bp zirLix?V1SQjzIS-6{!l;d#`WO|t!Q-6Jj+_k{3I4B?^f_9zCfm+oslHkDLBvd8RoT6~M` z2LxE`?DZ9(M{15i)I_Ioxz1?e^)D+Gv}~;Q@Sd82=l^y1cD<}o>y69mqRNBDWhlUg&9Ww8neaKRQxyyzgOor9mm;znYt3wNH z%+(^PLP*eg$oWZPoA$}s(ML~Ndt@O(F9en8&;1gp8zWgjZbQOOE~ZN*yK5Q|>3tQ1 zwEbjgLz)>3F^poXU3%HdlMM!@cKH7QEbEmbJf=@|}Lr0x**6yC-+2nh9LAQf4{)D{11uaI3It*EY zR$D(#q6%uD9{&vQAPA_)q*<9Ek3e3<4$@ddYFN;pzGl6&!kYIN(Es^_HK(o$rA6t* z4jWa{Pd_NWCBpk~hxL=HHf<;BP}Mb4W_!ziUnqWb7WU8lRVAXYI>XGU!P7O+&F!r| z<1}(UWj~G%jpevz!6z}-fGSyeE5Qyj3^qoWSN-Z@yuViNjM<#*`}>X61-Mxt+cbb9 z1If=Ivs(~C9+b>K_6Wbh^C%9B+I#yRWPGqno@XTaCLPs0*AG-3Kb?yCqbkb z??9+Ao+9=IkakZUsU2r@hF-=fU`uu;(Qou``&G}Y3kp|Cf&8#gxY2%fG>@e}aALQz z>ys)?!4T(?n`N>oPS{d{A+hJKA@Zp>mzIDiFJFfnCk59>_f_$e;!9Zb6{qE9@89z0 zN;xfQXPHJ_MV|nxH397en6wv)WNSttYEX(OLsjMpzifqT%%P-An=H5KwT2$^|zW*Btt)lLIX-aSJAbSqVP+ zCS%L+p&+1=M1VYnoMu*7-mh;Joa#E|&z>C`5vx(rcRBnXbDp*x{~f+l-jpXZ80`Ul zvC$Mk@2)O8qgGkH>G7lr#0M4E7d|J!^SXZ7Z7^lWbKm^XQ7aMwt+hl~OJT~=_LIZc zTsA|7Bu0zfdMOO>dZt*n(2E!1OnVZ+y@|*~YqJd{Mh)1<^hShD24u;6eT#_yYasVt zC5yyu%*0NCXxB6KsBGrhy8VGr$t~i&GbXmz*Iz^M+?rpHmzA!FAM)D!VRUSLKMg-p zkZ3V8hN57acQxFQjK@P2giJmKkP&bUO^yW<4$KGo28i*BfE?g!S`-Gvxi|m!ozD~P zuP|&(?rN#D{1PlMJjx4)TKiyLLz3KIASezM*8bfF`%T;Ssh*MtIg{Dv^FH0qVwi`d zig9OPY*Wfb2=p3^>&QgcW}dL>u@J)Pe1!ga`!-3ULHzWHptO4<;xl7A9N4P}>$A=g zPFOK8qJz0WvZ{L1VeU+V-jP{D`@Jj4-ojV=p5W1kdw*s7KfAO#lgIMd-ok|E8z&(5 zAqk&ed2%H)uUCVtOKBIgQ3rAikG;2b4*-T-!O(6r9=S0Q!QEY()c+T~cYOdPAxIm# z-n;DC(P4+;w2 z9ZAlp%~7tG>|qlx*voav(EwLzAhfIa=+t>_I9N+TG7IIt8m$toB#ZxE?m$_?o!(F_x zU(^;p2OMh`Ih$C1pZFQ9s)}F3hT>3Zf->P5lyYyc8M;OlARrH`%5C~nt5U1`&$<;f9L+pMk0Xh|&FiJb1J~;4 zcLQcS$`{_hGLO5Kb*PPL_Q6fUcfAxXB2PZ=ll)CfK-VOVXmVvBOf2+9H#^@%YcE7v z;36))Yuh}V;Dn-kFz}{U8-p>3{W?5ei+w}1hA;gal`Z`L>R_2y5B)=Gcm3b04kx<| z5OdV&UOM#r8;O8j6z#6+i84*=gV&u_WS%CHUq6J%7p&ZD zLQBpqr6`d#zS>QwHrM7;_5V{?zlsLpWQK>ym)_%Im8G>q2Mx8~twD7=@f$A4vDeVY zb*M)fw_Dd54JB!Bq=q5x*2BoK&$=hJM%&LAs$++AY}LTeIhheF;o`-k$U)|8XOSo%2NtbDmKp z7aeQnp}2!{XDIY%8PjtMyJg6RK0r~GD;-Bq&;^rYQV)paJmx_-4GPOzhJhc@FGZ+| zYpcUF+$bFi_<}tQHBZ*cdfdv*>vab@j@eH|78EPIoS#o3T?|i@?SjMuOsT|CjHXbq zo8hO0{pbClX_^xjIL04-Cpli~8cnbHMp?`0CM}gH!9f3n;|qEe=pt^cdj~f8e5Vox z&a5gv&a=)G%5vGGkM34glFSb19avG%nY8Zw2XoKKzDK$Ovu*+rE59X1iI(q#bc&kp zuW0gSqDs#^1o>0_hvgxOHK(g%B)>Uykf-OqTW=UAG=y$0>PD$UBeamNqwI+c9HtXP z{%>sphb8#bk8g539sGHtz}Dv)9nmOqhsimgAb8Mg$gS21dF*y}$Bjy36P_qpSz$In zQ^!w^a;cE*!2`BJQfyU6El67Cyb;5A6(gdJ zreh|EeDkLJQOs$E8xdH`|E7NV{!?_4-4A-v+1yf#&u0HT%{>s!Ov=*XoaM2sTqeGj zj!Qi{KM>M$$TP=N?g*Zj>6bP6OyAe51`ggxzMtwxknJF1qOAz`>^K6juNOMB1>uc1 zT%IJ1N2E5YsNZF8K7f!5^668gizH47_=UHVmysROUfPT?cOHbqb1t5z`biF;Q(5!5o*7;XZ{ zr7;C7Tpq39Q-n00cCXCfVC43}=*iR!Af4MBmp-sd>JM*qZSKyuZ0jd!!$#60(nY`w zDS0+RyzM<1buwfb(@pZsaL)JRWsr=q6_coLoSMip39Z+*7n`uv&NDts_EJ7jDrzv1 zu`17Iv`kNcw?_~8o5GSrG^9;q=kk0-4}bj=x$VkB38o_qkTL~NZkhc99x+qepSm;x zr$zv(inh&pWZ849Mu2aIsm}bj8dnmy3b5r6DGam6>f}%~Ce0>Q%`Kqm|U zi%SgS+Iwevrr1!V!ti5HCyg(Kt9R2`$|NZnP!_`|#p#}Mxsk^>zSIe{*V7=?_2hCH z`-9*YTB)1;Rk_d-!ZJ0F<)lLF*5C!cTZN&#{e4zu7I-KsL&0RcO9GE^KKoeji69SQrllS<|3gZWqUycQCpZi0^Bmpzn`d>oz+(Um%&I zF)RE1_(c!D-QKN=hA1smRb5fu`62X*-ol+vUPesyvm{@l227&=3Y2WM^c?(LGqPO8swLd)#tya$=6hT4G7!EjRkimLxOwK&AaCgY@C=Z z+fN!2vcTChi0}NsWCQZF+fZqg=JC!rNIjHW`~?Yi><}$e!V1MKvi8?_+5Yep%bx_* z-h(1nQCVUQdMxLYXZyymhGj5NwPGt^Fzh^{!O1Jo4 znqZM|dX0obHNO%Q*^)%TKolEm$RWZ+9&w;$o}ZacwL$H-zJ+-6;`xm~r{e0uMUZqm zJ$cI_ElZ3_*BiFFw_h1NQ+UD2T+e0KPM2s*N4n0fu_n|d&%^W(%cmcx+(=pN*J9$* zF>|xgr{IfN$;HurpeS*}r*T-BTx5#e?zm_%4GDcmBJZ6uxD>hZ@?;9UMzQ+iZ!2;5 zO7K;ys{AwSt;LOmVsr8yAN2SwNS4~7jj*p=hTkeYo}fYMSZ|;icF4Lyos)h~-He5| zWOp?5@ttPO#jo0}>bPGqh}}T4(?&nikFd2c-2X*}v%Tp4rOx&Tr^bH%&b;pB3z104 z%e#!hN^j80`ituylpd0#%@>E>vPOqzMUdc6L9vDS=IM%g52uUD#R^SjmhlyW&q}f<2MBh><(1~(( z>AlDCm(;yH3uf>9!+?Li{0G5L&)nCc8rg=LnI(7%bJ{iw_dvSyAm@-9C{r9mf^$3N zglpw-#uXY6m$(GCHlu+9I~b8HEDrsuhK{MY1a6wSOA%`_-9fSyl{3%@GdcBi`_{CzJmWc)K`^Hy@k z#cNUjnQPr0cY=%2C7lRaD^-N!i|TDOJIDCQM~QiJz*mUc&5>-lT*E{H+HlUKIaz9s zpNp_`U~R#Rs?Rc2fi0aGJVLmS8X+H|Yjvsd2S9wj6hPbSSx$^k6n&S?3-j*X@lVj7 zjlAWVIqG~%m0_nt&k#~ZEe$|t^m1fWTvAZUQf{ctbx^zY^;4w>1OH=hLBLoV#L4vW zZkr~pyDWCh$#6-$3BZW?n)r~B6KU@j*)Ipri?NL<2pLHe!SZ?_<=d2QJ^Irg5L^%XqKy}eE)QTi$JBkw%E$SYTx z6n3;(IhXv$IQNM|bfGn&uBYo^c2Zrkn*YU@Li$z9XWGL=_x$n;iPnY$gn0$3>b!qOPva_K)5L&Xi^Gvx!{XkTUgKlYlNNm z?o(U@Lq7bnO7VGY1v=Zk6J9S%pOi} zHWlIQZx^}M|B$qom?|3z3|6K*M-29;?Ct>G>B)w~=(A?H8m~N4_<%hBN9dc2#gt`d zyXL72+m}nQJ@@kBofhbE5GQib?w6DORhhX|z@>TbelUG*B_C0XP7UPD!!Il^d~KDU zc`Xo@0*_bB|D(e^tmk@RhW~_ENrYCH4CP*gMcS8|d%PvX*Icty+ z$RjQ%M)Tm1uI7{R8J!`wPrcp#f27S`4qp$Q5K zxR{7ZsS+>ML67z&!n&6nT<-eE(@f*lfj+MphTl!zGUl_nx}(pV_E3K7xInm7eHwN5 zy*$i>8AUd+MAiwhcuSI{T#U{3thQf$=S#PK3dyfe{Ug=REXq;u(RmRwl&e8=~Fe1FgPKb*Z|X6~75u9>;A zqzGf}zifv; zYZ-35`jval$5vjuObE$LfhLF+a3m50qma1b8#Blj^iL_ufRk;jk6VZ1VJ+X;kvoW} z_hpd~Rji(WY74+`WFU`e!wfFx<8k=do)N&712-}YVEs_V7Z*9=8RwZr$TgI~!t2by zlWods%l*(@-rD6^14tgu|F?{zdKT#A(WqU1DGm6ETeemMVltVdi7&y5Y1L`&OyMK3 z-WbWvxb7wIs}yFt&-&GGo4mnq6(qic6y5sQmo~kHTqZa&8&CNZIlf0VEaR|QTmIg5>4vX-b$(l?gPMLS!KJ?qc_`f@bG++0OLbY2=+@0_|R`3;n z9tr!18n{y3Bdr~v9Y!gh*6J~$6_)^&_{8i)Us_MxGsipPYX1LsD-aG*_qWW%0*$!H zmX-k#t-($pD%I-^`@P@Y=iPO#ZjapXZ>8Y{-KBd^<1u{Xaji%50rW08kK!C@E3~J8iMMY$s;Rijp^Fcv9c`HY@($NA{0E zKtkXZ(~GBiKYMrf* zjZyGA`XBx&_=P3@GLZ`RMW@7#+bKO`c1d1e1>-KJLnPLgTUPB3TGv}Lu8N*qy_@^4 z%6!)7OMZ5RBToG3U+3423<=>_Jl>4S8mzmMG;0%4A5w_rF+3=yqezaPBj32WQhDS- zHxw_X(s7~hG#FRA7%P)oFeL^ldhoBKl}H`A>ViEyt$p7Ag$$OJCpzu3y3<4EhV%h0T3ASTPc-zvZd zJe5oYe3yt@$M6G@K$hwaIm?!cgD*9^;0ABXS* z@y5ychc_~EqE;-u8Fi<5qnH8cZq?aZ48Is?Fuc8x&kbyKQDY}O9lT*Nb?4`a#-5fc4n9=@H#g_?n$ZCKw@Uj3g) z_^B}6|o1f~Temm-3Gn#ggky$-Y03a6mT4Vna%JLLz&jy!bHyFq0|2!mogql)QG+h%{y7_4ts|Nh;v z_m8ca@74U4ToKlC&k*&x_=4QrX&L|k<9Mlzh?6Bn)%E19QArJ5im=WC4Sq-w)xQso zBB(8);uZFDzea9=+vn(jbgCa&aw6OhKs~q+K;?A`bD7=s<$af9u94jFesZw3@Lv*a zzO?n>^ZwUf6$kyNN{&$~`3LA@jOTz{0N-*;6GkV{baXwEU{{1URhxwngHZlaV7m`5 zbL*6QU6}J8@}5N(N*^?`Y@@0Rh5E==10tiQFVI%Wfzc!LK$vAu9(x{7%+>HM$jgvN zXSAN+pa4#3CgZxQj!`-tx9WK&`dkVZ#H-po=@@tefiNU542l9%9)Qx;^3aP1$5vVuCWi8IGU zdi%AcfkobMmq2;Bd~Zp<0{q6rwrNiu4c)>Z0yr8#Wn5GuwQY95JuA8mkp%DXL+IX@ zTfSZ=KlsE%=EmnW7Y0uVKW*n-t++FD+SaI;y?h?$EN?}<8;BC)GQ5PMh+B_=6#7}* ztW&q>Q3lkFBFJI{66hln&||uB;fN98L=7bJF} zzN~r+JB|lReDUOqbH+FQdC~DshhrU}x(9%PL5p!z%l#L>j$5QfsWo$ftXNxJ+jMD z$NthL2@Chrxcj>n)ib%L297mca*q~;9oVh9^OLR#q9@a#w?{bNYyzf?Oq5c6*H=gC zV5chYSFcjJD&IdF?y9#B^V&)mcDaZbP!z?uT!r06xHs;U1ddviHPTYN!1v~a4KeCWH zqNm1E_iX~;AslBSt5T* zv=cn{y`CUUOmuuuWx+e%SUVog@TULf$C74-<#SVWN6`C_GXI?{!%EWQIuQvZE;ibn zLs8rJ%$XpqwJ%6|eC*6@zjd+tu&xR}Ny7;|Ig;t_IQS`3N62RUIk%qPTsYZcHLB;> z%{zBL`NS?=GU-D?MmaL$+euVync`#4uEqF24VpG%f7qR0birFYc6Yh{$o;c=ztY@Jch(o~GA{cB@)Xn`Kdx142d!Oe zSD(QV+n0-anInP6Ds8q}0k#*7$Co3L{rm|D(FBKt!uoHd6s+0{D8Xi{136wbTvKdj zEBN*L&&S5vl^^c#-hR5QP#{IoP*FNPmllDy9k^!ZnHz9L8*D^It&+G1^-*^e4n2+E zs6eihymmlOhzP;i4!+!4fMri2tHO6G(;qC3v}b1~ozzr$D%A=s5t87Q50M3_Ia%vb z%JXx{{>=Pg%K9ShYo%uocccwUPQJ$b;z-c2Gy)o>#QfaE()LmeZDfl|t@C#4h_OvB zDKXqNTb1R5K#m~h8cuYAR(mJ-##pDg9xkcSa6#*f+B$n?BLq2@5wx_VNc^-O)zAXR=mKS~WRpQD( zvs_HQUpZ!PhyLQVe@@x~zDliNfp%JqdZW3aue?)@&3%>|oyqrnpzbR^sbY1K>L*m?Y?{VL2}cqy6e;A9xLtm4W#I4!riC!o3ueZq_e@%`zfoMsgJ7VK`>l^`Zh@b7nwwXO zDpsR{5zSRW5QIqW1E(f$BqM<Mle3>C%Lozw*J`TcQD5~7LqDkM7+ys zzv{pTlw;%+e>=8_KODU13(%-fx%O`@pyVW^&Z{*$JF=HNQu4Y%YqGfv`+cM(BuK%f zo)Zem#RNsVDts5MOMQ-rZq)XXj|bR29hl>1Q&2=_oVP_GH>C?Y@GGno zrJ~r8B`w;ytBOY}s@kvtbM)aH|5Q~4PNb7~8QJ8HprH;a2ka0mTO z)RJO*B5sXJNrnB_#?`dxZKH{pj-|A!b+iLU{H6DX$R`cFewLN~9{WI^TU?PIp)1U< zF9qC?dwKj+WtVe0eAx4}ph`{c#z3nF_ z{a*nB5miY_@OH&)R7B@oJE~ahq;r+WtsY-O#>w3=Wlg=vg@*teK#eyz(3AJvPa=;Y zN&0T~yAF(|uF==F6o?&BW)B6*Vp|ik*Ubz)Mi^e}X(MVYJ^TDpxI>@F_ZlYb0-Yt( zOK{D$2P^FiY9479xzWLDal7G1Vsq>I6NWCdX}8QnurWJjMPskmf@VyxcFWjY8>Y@+ zo%RKi)ne5$q<783gR|HL(xVqfzb-PQ=u19lhd`1zjTH_0lw)WE+_9Bz<2-SS`fa}F z%X?9vJFT9;EDluBOa0lF&_u4#cy6XmgTE&R7K22y<%Rwl5Uo4hMOj;}>UV?8e5!Rl z;_ViY>T+?5tE(a#O+vqlOBrT^KO!eZTZ?C=EBN*6yRvkVO846bT-eVQzzX?5$=gGdzAPAC*e?6)h#Ybjx$FJ#ZvZ)&~u`*1>p6e&grN@6#)gdV~| zx6*SHRtCAUVq*`BmecN!9t>8Fw2(8n6AeA z%lXk;e|HmZvLV;;a?NLB*PX#I&*$1-A^q@PyFBW+}aN1Axu{c^ReIqkhaf;>jNqz7( zJ}{;@z%blfJOK@5ZlH-N(+}wYM1#D|51M)a>iUcb5y}rfud^fv=-=wZX(pyQ*qy!Q zL0FbMQ>rO2M8S!yx4);~Q@-G3)iv7w*)B$TAQl#u{O&A0GbH40X99hvI%Qmn7m5z zBpgM^y(VOskQ$-d*(an4!E3+p*e>wHdIWbu_3$Iz@sxP#_)LGK z`mB|D%$Q8;vgN}My3%OIxalrv=_Ll-WQQ~KjHM3^nprO9utgUWOxZ9LC!gi(+$VtRp#%I-qD9GD> zA92G{aa0lwrgTCcEgQ8rt*3cD@ON6VkK#ezU_6MHBR%S?{S~u&Q65mqd8c#DqVriB`T+9D6Y4;Gc_`JW;i{{pR>qi;6CH95&|6 z8=tNR4jV0>e$Gp*((J}oIe5@L%%hZCk}KEz_FJ@LC&jByvLgHey>?I`?)7S@WlyJV z%-M1lNIE*~aGjullRcT6&~0UasJv(aqgICXfaUqFLSM|I?gsK6tP|#w>z&RLSSi#D z`|Rr0(ES_Uxxacuj30*GrLM1}tJPsto$J~`hbXg@_sv%+S07|rIDVMF>bD3!2Zkfz z^VL=RIvb`+n=MAw+zxaPd3gk&gL_8B*UUJVp~temRsCXntO1tt;s}GRl;E@xlSziX z!ym|XbJ0d}0v-s&oq!KTWzBk@n-~YIN4m=caJxk7SEQ(1-+I>KasYM1>BTzvh}itN z6=K5{wfMwx!eyruH;n=z!rN9#`Hy&Jm8*WRhKCzD-VVdwGr5BeWqvafij&UX?=r;O z@OJ0bt%Yud+b2`Hzc4ZK4GM5@(@0Fs)tm*np@Y_V1VoGFe0lc_c+63;2DMQ~ZQ4P* zDE%JS@qN$-ol}cD=#&JVcl&T!@{f2z?Q2Y)xq`owyofddhFvT}FXJ0k1OUCh2Tq1k zfD+lr6vmK_KOgUZ#Lv*x_oiDm^IXO8qby8NQ61$C0r%UBr5T9 zo;SBT+SL;6nfxAoD_2nDfcj9cb`|!X!p(k+Yn0HtL1lJVUy-W)O)>krv)%MTijH;W zm$ErftCzR9Nb5qX?a@$2Tblt9x%BTEmFFE<(ooFkInE1GmMyN6kB&DH8g^RE+_~=x z5}(^EDA^U^zWGN0Ny4$an)ztO?BCb8QL@vO=(54_0eH*go2Lr&Ij76vsmPWK(oyLl_sO*4f%mj1^;<&=BI9g3X zPSub@2N2=pC}euY+Vlt>-T|nbCkOX7VM>XKW(tGq(UPl=h~N5t#-}geMDOnuWizCxugg|D>+Qk~&05OE$3e z-SXQ(!cL;i z)*qgk_6ynDSNm1CVHxV`Vq*xu_cZDBUP36*RQ0USAZ_r9@ZkfrMw2b^GkIS8m&GBW zo`hpMh276c`K`2l;gWmJ0^25cKr)WVAS>~!XT4yLO>#Lqw$cm(-7r4e_2=;S ziF!U+V?9G=k2*5X~DUfi~^OiWE~8UO|abe9OLS!@rgHM8eACg z<^X(*u|a{Sgp%Of==)Bz;>)VU+&L4xHH3xG<8EuF_^(!OI%zT=sZWKRznK>>vMf+m zu7+7k+x3jTZBY3}={zAP$fPMvy-3JfEx;*F>9Y8p{+78VTWR_$5z1nc$?P@mm+<%I zACpYec7(@AhnyQs(H5M}b&^?>v7i&v(ZIoE&4&O^%F98smOnTjx8ku5BX znh?2LD}()I-}M7qk*}JA=%;_u0wLp|1A`jKdeh{S@8vr>9)r?oSzU(KxG7LF#PvgV z3GYg`@!q;1hPW6=Xv3LLabL~}aeyVS)45ej-%Xx4OqRC8l7pJ-e@IdvT1b`V_J??V zeczGn>|TuzS$bHbBY+^qbDT>Fn>!|1N*fS)MU|=%MG#auoY~C)!D-P{q)gQa5mQZV z$z}JJdApw37vO!e_tz&}G}<05pnobet|Pb;=d|{DBSTK~$;|`KUdzDk63N)RyMzOQ zpUI_`U-G=1%jPF6aqq<*r~T>4c-~OPA&(^_c$tv`;_6LKh@1bcCWdxVbKv|#WZj!b_)~Y8EG2+Vj3OLLNG{0nvPo$B;7j|G9UiuZ9+b{SZQp@ zNcMu&IGaEBF%XX$+`)A6Wql(SSYV?4!KXdOBx$>ZOxScu`eSTv5CZCI=({f`SIg8+ z6VqN&f^`2KikEyG#rn*9c%<(7<8@CHJ}Yq+Wf7QGV6s+R+v3x{E7WehwhcQ36FUPU zZzz&9k_iHw)^y=ucT03sEcGnQto>`xuK)oF`;>>ZH|=8rDbesE9xI3U+iNpZ4B-n412hkR zp^v!x$mER*rF_piCuQcH2_Q>h*#?Qzi+4NsFK;bbBZ1qxXrY_gs9?n05XcQ&cyY+L zPvhaPzujCEe0!qbK$W&|w=7@Zz18H~!VALBD5}`qnduYqmZoYKMY>q&rJ*;aE@7f% zl{%HpIyscF!~;G1uVE_Sy7-XL5gd2gh8flmFx<7=#5YFHX83aFJst!?*^X9!d@aK-^jr67D;va|^Mcz)TR(my!;1fUy=8a+P`if&RDJ4g zj=@w>Zt6p{2J0LxlDE-y>Mekk@Z!22f)yX~GkydV?)fqUqDx31`RMk;3*_FcY#9s^FSsd+H* znMH)=i^JIyvc0axi53|fd)#F?2bS_5+W!<{%e`hIY zEinA=pm15$t=#U0coFHQ-ccOaiqP`vk2x3MXYsX;KHHwf0 zq>+e+ul_b#+(x?dYdRmF3J328#P7qnzX9$h&O2MuN;mFu&63=H`W=SWIASeU`%^Be zb{0P;f6)BNnymqQ+3JsmiQE@bQj+Aybv%D7+%D{{Tx9ABNFZ@O4<$=Og>05_nIWu1 zwJ#6u1rLK3+`IL*c9g9h)BV6+V^^s5v>ZjOMs@vJ7S|>Kz{C2X`D?=yziC-So44~O zRpqkuty|Zq7pecgF;P$5N?oueZAn=pnrE`a26rBXVF!z;RFTeosd{gb-izt~lRhO$ z$zC}HX2K9qLrvsx-L+kj<|0iPcKmEXtE1O)`+Ff?IQHE|(0-=n3hDV287Uv{6MXR@sc3F!0W7r zeOhrMa>xBA2BglfT5nBVj68>6&WAA+_zDk^sFy!cOaxjM-ce;~lrCy;D7>X1FKzhb z8G)e^_kHk{`=}v5+WNnLmN+;*7xd)t2D0S?M1pChlC@Y0FNe+^}JaL2g6@fUBOJwaO;EH0n@9Km0#{+~! zOYL+jRhj}lEiuL*XQfHv|A0=oWxFLIFw;qcuzVb~48K7=rR%4hf z?|?!xqHpHSjfL=~LcX}jz&2UE-iy`lcBpXUk~7PO;O3GZIm8cVH1*SUW+BDg!kY}( zb@x2s=?p)IDEqo%F90R)UT=vznoL@h!wn`CmYyaWiT^tLbGY+jT(QBLx}R#CL)LGjBx3tO`Q7L@Xs9 zZ@=Psr1_3b&&=m_Me45RDBI{GQU^7B)?(M!17#@@VW8rw4Ug=zS$`wWgoNu%k0yD2 z7A4-4yne~la(07S^!8jRL#xsdjIxYm7DRBAX;w0Y9<_KrTfFyIbBAU1r*sy_Qx7vyV`l4Z3zNZaS)r4#6PW6HL3I9W$-2Q5FGmR-TXEIah zhkDZraaT3`1Nd!-DxdYYGiWVOWl7u1ojo`K3GayHiZ}8D|26756LL?48zAKyGvAKi zx|j&NPGIw5eUhdBzV+v;X2QP$nvsaoxAN4ptA6x&$NtGr{FnJST(|S7ab>Y1f7E|I z!@Wb7g_97R4C3P}v+6q)hde>mTxr{8A1l9f0Ocrcm@sG)2>4a!G5@T^>ZnKD!di*R zcC+b$(t-4jaGF@j8R8fPv~KP$T4WE8vJ7PwXR-B*oud#@XKXs}n$r9%1H2jjr^LfO z+b0AcIqx%fY>!9FOK{e+%ElhHeETY`?1r)S?t)nSbziBi0)xbO$D$DD@vQaX-p}AQ zDoG5$=p{zjtcsxAPJoc@6Z`u-nKqAy0ie9bUs$&6je!#BVVy&7!a7y9 zzErF8>t>w)odaJI?8A0jnrLRDp&^guj_oCokkwdv;zBQwYSDK-L>>{s{`49`q(bfnYE5eQ@+aS*0a1+Tz#K`HF1U(uzhfLUa= z6NJttI*~rwe4BVvtYGjDhk=k!SGp(^{(s8(2? z38}k~*^(dcm={V~(EgoL`Gx-KJOpJK-wtq7A)(al73?$Z8obf%&bmExCgz3t8DJ|d zCHCn9P1h-6oZH&l67H`vvm@I)&S$Prll8z8!rs->J_Iq(>BYzXNOF#bixjH@jfXD= zqwV~f{sD9~Y|p#CX4PFk_v-p)r>5znUAl5K=mNfSFLp)_@Ufecw@chpZ+{_Tljr8S zn6%s}RMJnA|Cat>Y>0|vj!P!|We9*SYp9$|q6IqLQIpB(tmA43r_uYW5InkI;~ zIdhh^cXFa{hZn=J6ThxF-W8IaIZfOY?s)?lBS79x`p_SNqEZIkBo@BwISDXF@#Tmr zVeBV;x%iUQF826p)84etYhE&5e6nP^9Mw1w#oSTzdg#YBGLW;hhqayh1o4AjFsi1# zJ$alE9EsB>;zOjk#EabtSc+T#M z|8>wFMq^Sk3PeeO~9mcto-^YO3gm`e}uWGx4RQ6$CL z&m><>=IFzgJB0YLy9*A4iH;v_Y^&u&K)2(YZrw6a`a7nL{)&6S2`}~Qe84Hl3PG0= zh5*CEhXr_OnI;pjQ^%cntgqMzI7gc=`9COy=>4tHavcGThj&lk$ea0#`!3(x@i$=C zWVO#Rrep@hfy+OUwZoySOh!zL7rm@NgF#GV5Vi9_NrGe?KF;Zron)}<-TDeV@m5Vk zLPwY%-Y`!r&QjWSvCcN(*-RQ@PD3mup32;6nrHQpI4GUThI1mITZHlw zg5Rd@S)CM^gRs(9yqboZ%THh7lbZ9T(dG#K6t71}vE0dQU9Y-*pA%$iqfw*K-r*%H%|)XUN?g4`HQgu^TRat*(~S`-*%$N_;QCQ$xN zbIO}4Ui6G10gON($5(o$Eb9{RxO8+rHefe6{pCmm6q@z8H7@)kda16Urf;R%xzdXH z-_m{_R=g>+8tus}N50MD;%cOj0<)n`Q&J(lE24%N8HBnF)_5HqeQH*X8w)-;p?f1D z`$youwnI(Bg%d9N+Hdbb(u|!9RDb9r-7>!uOt5r{jO4XM=%XB+U!DnK^x@q^PW?sj z#_+4YeM>;M0o#;PS|-e%|A4I8Ps_8%%o5}3+NapH1d>k!!u(- zQQN8AVHlrjUAx{`_ev?)?EBiYMYB-(3Mi^KsC{XkUA+ZlXR!Ra>d-j}$=mTO_wN@$ z@EK=t3gYbV-*`{&h@O9-h>&e*P8QWJy91rf6JBub^n(R@20=_ie7kq+|C?`VvJ9-Q zB~lL>eDfJx#HKdvYvh#qVdG~{1vxA;<=2-4H+IeWh2>@n>%Pu$lQLBU;>T25=B8C% z`mfS;k8u!xs>o1K-Rj3&5v)y6NqT$2R@Ya(KmxpBE{_N5+5`Zy01*p)8=hCTAHaDUoreriFKzrPqsxg`{@6 zHP$DHJ7=?yMnCT|+46>e9yfW?A;?gt#H9d%k4aSQVG$U)*1y!!e5TZBtB}Pfw;0>3 zK4h`_-h@eG@BO>0Tlw3SBidD8_oGrL6~8=p8F5Ba@oXBvLz=H+g6PdxLUZePlX928 zcrQ>Go&_VUJ{D{an($8xL7`B#3mqv5sicJ;7qD8mrP&paCenE+drV zUi~_pDC1&1+N(s*9`WQ;5TEU4F*WHay*H?@1?tlW(+vjAY5Uqr6SWY=`tEHN2lhsT zio>)^kE@Wm=LicpfDU`IkfUEg3v*tr1OXAOBh0Ia(#?OeUU@-$LOa2PpY*PfiVMyU zBCW{4=+WWH8=#Q?qUg(N zb#k_NPO<=yywmbkd_o{v`AR#snE~>vNp}nz#Naz*$PIr3m!AzgoS*UXkrCE&Z9ds z)2Sr)JINRJ3ObM?TP*Ojz6S)pFhfPU*CKD}CD3uCW`t*JgIhw?$}#8eFH{7Z4pDQJ z=b!Whi}%8Fmvc-kKjmhLqAaWLAwPKa+iL_7!z+J&+gAVddN|_;=&CXu?h@YLSy9sU z&R1bnA+5u<3EyZe)r4)ade(U1VZm1AB6qTWENJF|>IM-zIvA^(hkaUm<>SF>Z3*W4 z*3a+^ixovHW`RQV4*iuMe6zWzkNmqv^W!FG@lK79M)BtUzb1ZR{AEL@p?tZ64b5q0 z0{g+2H|UcD*#h%kj}9Y$-R`G^$!lLeIw}xQ9lRg)EOdqnZ)v&RV}N_Us-ZOUw8~?! zph**s$Q3*?R2R6gEmx(3npfpwmvEt9ugT;y%^M@(gRBpvY9}bS9$uWqvmnKA`A}1z zB+^$Fl2%aFUm}hzyYYrPm z(8*hYxh~h1JoYQcxmf%Wmcl8lC5mw!UCgFofNc#p3IGAiydJdXzP)+C4BLvE;Awsh zJ!-k8k3~w^BD2lN|AJ-C{)dixH=`P!TNxRCZrlM2Bv!#ek%@ua=^ek#=YvfMOcyr% zwfPGw+e^SQN?9&gO3>nw$rPJOVNY^Yh=b8Ko@gzC*vVJuBFJ%SfQj{Li*Cn4KE3JX zdWq1lfMPl>>iaHPrAxmePjRmhHZaH13bNAaqgVgcY_x?}a5q-Hr`YcQghK1tP0<Cl`H4>Zn#0Y{uobbf!<*3R17UXg z>l+(>01D>uBo;Mv^tCJai5oIJpF=WUH@BSWAD%$@P<-@5H;p5saM<0(i57WqmKO~H zioqLu3DhQ<4vJUvik@hgWtP|UD4B2YS7{j#W25gJyN^5Qf+F_wGB*X2ljFI%wVHbmXX=R&^4%SI zB%C~a$YIxsO3e>B&dArZ$&JB*EJ+NL(V*Q6cMa|rq0I~1ODBzZ+(?C1B#(vbSUtfv zyAa;FFTmzS7C)3mq44*SeyleA(TY=sFd!2 zH;TrqEu1Mq-K;{q+2>v9Dvj6KACla``*$v#U<^mLc5D%*oMt#tEO7l8c_>3GDzXy(*pZR3O{aLW;oYdUpIKfZHdewb`pQb-{mEDs+9N&*8%#i@P|(sg=Q zVs2cwo#Txi-}dE_mN(ibC28Pzq9auG$+6`4q{dQ}_rPub^RF?y0pMnRK8> zbK6X~?9ecbcEmFwROzCtennXPqPBl%*tK~vy4h}9Wc%KZE$EwYf@FOUua4)&ScBMz zEnn^V*(9$dL4h*weGcaHlwQ(yk#c3v>}c_nf?jilUD=*1z_5a>GNV zkc-Lh=cdiWWF?{Fn4$^==NT?3Chq^3{- z`=qgaq+b%)Ngp{@6lYW$NWlr>vZmR}MKOAFjHxNv7q)j(r6pls1NPb0@ZWyVmDLjY zsv}xkZl4Dv{!-};H~5^r%u{Q!^ZH;YtUw4H;oa9iEq*fnp{}l}o?j|`L2;)21gv)3 zJ~`<-Unun5M~BQgAP+5X{6VZUOvII|^k$t1G04{D`UELPMFV7Vm!0b+9Tcpde?}y8 z&o8c$>2b$>pP`LR(@_H~L*8N<*A5LV@_M0ud%f$PfY@sO!t`&aX`zaj>)y4wKTOw< zT<^}$%TiU;9DU?Mf#7Pr3N)&{boHj(92-Qg&INxpfbY!(tb_R!h7B$0L=_(Xd@h3q}Z^LDRg`ZoY<2kXKN6y(VYkMbsZH(D-f=NgL&STT*>U>}l#- zIIhY%={}cRH|9JkCdiZq6Zg?DKKkIBMFK987f7#{ALENU)-!%oGeb|f^ligV$kQma zpf(lx889%H|E;lH&qdNLu!|W$K@VPD5jDN8KEO4P9Xs9{&sm%gML~^Piu6-L-|*_Y zVV{NLm_#31|3On2z}vV5;;(kw5oEM-3qH-G}GwGxBkJ0e!gh?Lw~)Po^pG_T&d<7Y$?=iUPG$ zwyeKb7V8vv85>1frw$74$PJwfKYzQarG!#2o&VvZp2n88FAOF0Ho$qB=m>v$^8o{5 z-`^laTq8?HZcQ_Mly7uSef6vf(a2$4lF_`G7u*kJIM4sMx|yJd{B@?0L-TNF`+m>2 z#TieKE~oGYEr#ox&(l;|nDz-jDHyO<^6WTV>-iP0`#om6gg#__eQ~$!yl?C4U!QxS*t7pMu*Ketn8-wL`tI5GdH+oGS~= zy!3K&nQ$@{k!gDXg4n)`)nu_%xE@>!QgC80P<54AZc&aE9motsniu@R<(lpD%&#(2 zujk&Q%rQYsKi3wu_5m#UV#WFO`_TGNLJAZqF@s>(+DM>@qpKOhSv&aGRba=vt(NL8 z83Ay#=;Wt`vRDlzt=^krJI@Y z$u-gzpJHY~^P2*K!Ti7wWQQrEJ{esos#F6is3@^qFnhB#HSu;Xn@SdbDZRsV|1&ix zMj$%^6eDPJIQ?1{dvfix$?J4A8e{IY2JFerF-%hPR^;aJM?)3Tdf^vLW?n=MI(Y|y zxQtIKy(F=G2tD!(`_(raCBD$_)Nqg{yT5?Hms}X)PPgLU&54e@J}?`_I&c%G(IDk+ z$P~2%-P%8YX7`-@{0!IIch)STWqup6-M|YjUVi~jQx(y!`P>&1tZESzPcPQ^biUc4 z?nXwn)nb7c69V@B^4jTSQiJEbE7;`w7UFIG*BE&$4}AV9^x7Te;}e@l&)GDQ&iO94 z^bvMbFOt?i-j8BRCbXW?|7=Rxt-W_T#e~5FrZx9Ki&e>@EVfvRfevI(>=#u0ynQv9 z_kyB~k7u!onqo2+>p+Z>qd0Tj`3>h+-XdO<+w>H}J(n=#+GT_=Y8z!O{ece;vV>Xl z-rcSIY)gE79SDkGL_=%V06fFmS*So}($H$Gu=PbM@IBb|U81#?ubPZn;h%;3>iiY< zdXVq8$H3uCkDV`8s`yh?gx;<$=(MoG6u}Q1)r`2;NiLUv_{0_b$K;udx-g`LpAeWG z|Gbk5`%sqBQB?c{Sx7V|wt^yqi*Z7UYgr{+J(FQL3TAH->zMmYS}yMT!It}FMVZ#& zKCC_gUUsn4FE}pX*abgH)Zu^7@Rf6VgcQ&?AoiB3{95b!AbLXOe1Ar$7|f{|AXAtD z#@W*~mnj=^_6TQN=qEw$UmQ*(;xvd9y6WF z8C;Fnw;L+yY$YE*J?re*Z?_0JY+tqv>BK}v&K#twpcks^gLLp0MNGJIWyQD1y9wp> z`WOO#7;qRR>4&CV>!o%svOxO5A_BZIPPah4g`2Dc;5?)Vj^*^3qrjVql1NCk}zkOVCn(F}FZskad zoUjy}dMsdqybomRvL1#ch;oD6^mc<@t}vz<_6!;$2|2efw6)j8qzo+igc<=j zF(lgCO;%mvGANFWum7a36#rJf)iK`mGClnHt*-T(ob2?o5aX(XRKTZewu3eAi11;m zp3y-Z>HAuk+u9YoOg9oXKQj@1f2)Mn6VK(OW%y|43Z#~%>H64ikJ`T;I9K&p&m`=% zDU&|AcsJ)^U#B5_jnMZ3EjZ!9GJw&2xBkutE|<{*i9?Ma6B*5widLgg6qC=|0TuK+ zuLwzl+0jOSp*h#l!_zr!kgYKMr3Y-lJ;&Ioj!G}Q_6z={9P_d|RYP0QR<0S_aLFhjL(>|I=B{8k!t@g?L>YpPA z!QRR%n>1eLHXQWq*|si+jNXTsU6}NIH(v82wlk*&B_NE?NY&||SK+^bg1D4JZ8tKr z?$7zG-^iTXelifDb=cP68_7QqJyugg=gnSC)O1F5mS$xPwn|1HTK?k-h)Ht7-1mqf znF$J$re5(YF!ld9wWq7WK-2QWp&>bKTn}e83YWQ-s9afIaHtVaZ{$#6rG}+>a4u$j zv$jec5@y>GZ7pB!LPa54liscMDUn^u>QPvy=E~}yt%2Emf}tL|y15F?6pJ0*c?5`! zbPI6Wza+PCX~Fy!c`=*;h`pw$zwRu**)+VE7VB)Q0^!IA-m4`z1KNqZkp;F^3|1&_ zzC0pb^U?-$0slGepqRi9A2Asw9w}s7SiR0GI#hhRbH8B2$=qwF{NSb(3lU-*dP{p|Rq1$Itnc7E8On{O zx5I9^&;3kFu~0u_$A&-@g7AMfH%JJ;L{#Sw4w=QV)(RWMK;&&wIfGAVnmXoBQRQ2g zZ#W&vy2Xh#&|jlouS|&M?Y72<3FFlO)@D z$eJM|T;H2RQt2Hhmok(cl6ua^6I0Bv$4`5F)OJo1S?bQ7tPseV5;X)g96H76&YQ0K z)|m0*QMe_)2)pIzHtwil)gLWyZGizn9k(>lomVP7%~7}<_P&JDx*RDSBobXOluyW_ zDv*bE5@_fER4G+CrpIYY@0Cb^{~@@}M}OA&ADyG#_f;LzFij$?zGrDChm>eAQ23G< z)sbuUQUC^>y!jz9a5h;<o5F({iOxchc$uLPQF9Z*#2KFrx@9=Cu?s~m08T`b5@ zHD0%YM$FY^8|2|BgFL)@x0?gJGyb={D#yKB6k?`ex6l(S!7`Q>z`1`RDe9n%mGBSY z-t$v-?+cGAuc?-lU&X2ODygR|fi-GLz8+7vslgxPD!Su)C0@G&h5fCJphJEr|S8%rhs_%ZCV$oqvJ@t=_Mkw|o_3Gk#F-gRtGMx_)UV znLOyI1*YZvexAJ0H#w=q=ly|u!MVICC{7U~7wN<-OS)$g-H6vXVbC2rc-$c7{+_oX zwJiDwRTr*!rHrp7Tx(2+d%Ia&VL4ke@U+V)M6Dyu60E5FOT^H!aJ>qAT3LUmrf` zt#T8)oEpLAC`>$1CRG{MUQ+4FF9Nv`n{Uy6WcxBf<_!`6$^I~ODum&PDm@+yM9#JlAU6^n}3N|e* zoUzWp)6V%Jkf$?hLcG&&z$OqMm&w3{=hzx>vOJLP+bC$inE&TOfy|=QpL)QLP%CJg zC3R-e-=`CU;or;?e5IC>tHG|CQb2YICX>a#izs#_5K-3U$3W-R)NFj)Auz(N>9Im?c_uO;IR+= zVT^(Dk7`loOQ+bD=Aer!F|Y!@c05REqEp0XN@ZAM$>3jS^Q>v9gZ04+!bc$nzs={3 zmyL!VXi^!302G?6O1f&sodTYRt9d539dN(Ny_U10VKD8Lt)K(j){;#+ge%b^ZY=)R(M!%!~hC+q07|VIb~Kdq~T?uh6#g12OCnnvNfy zUrN=2NU9lRlOOmX2B?12-2}2zK+=X2LfJ+4Pwo33 z)^(e`zu=MBNc+Y+gh}(>LfP~Ub-(!pS`qOf;Hy0m5CiDo$ zHxDqHBv*N83yd->lb7y6hZBCq43V1_0;!{Y;G(92u_-SH!v^d0Q7qY@VNl-wVE^`+ z`Eow|SZ?ugK1CgdY0YiM*qsnC4=UqLI3cA{9NFP&HvM0(DOey$Bl|wib#NrhgZu&?a#BBNxJV zz!CD8&){*t${R@xBlr{0?iO+FNxF;0=!Kod8H7EcSfirqpJ*NWy6c`9cK5(FEJWBJ zGb9(zI?SHS%9aEBKc&fT9$^;M!_(-2Kk1JGBRUCAnd-cPE(JvhsW2cL@2~GEg|4$eqEQOD4iVMP_xTC^;XIR9UM|wlh5NTDd6ksIGp!IX z7^DA&md!gUW&JQ57CBdSp95!&WU?sA{ce)aayrqy$Mht+C^xN$(|RgPH@y1ZQxRba z0&y|@^K>>P?6l-he<5SaK5}wi%)Ln?;*fh~W<7^io+$BG|0XllWv_#;vL@B>mrZ_e zmtIJyZrosnt@GUux|itoWWW3d})U=D(?MJI5sB6i64NBava#30DeLv`W zcX+xrMR1U}K`z%YkT0EC(F8pIQPmSLHZ?}}%D2$L$adCwUAp*LX2DnHyq0v9%x4|U zwe+>hk)@Zz5!**;BtiH54ipO||IRt?e`g6Xo42_DES3L{h6cxXHk63`!N%91?fZ4K zR{`Me(l!4<{e=v{coA_!9Qn>Kk7MjtgMuvNv|LZK4we$aRMFIP9|vTohezcpDM%|) zhbd^BM`x_>a?*be5$sGe^~+!4)a{H&eD;SZ)3td~WIlMokC#Y$Lb}6k399~8y;yQF z6eE_};=a)&$FUBmel~rx??Gw_S`*{6qni_6$-pc@TgWr|KMIit)qzg{zz@ys@63#< zV1!ZLv0sBWgFL)Hyzmb6a%(#GabiqaELj*q28@K9huG#8PCt^Lbok8 z03esdN=io96q@_0f=PS34;Z2*V?sKEkXY>e@9!)m%rvB1?&%^xglU0V&2|e_-iam^ z3{qcN^#+6i1^djIF4=x$jh|>B>vqZ?8vf_puJ@`xeGu}N_Wmy!sO3r@9#3yir&+lS zH99+|D80fv_SlxqPxyAYTL9Ou6|p2)1x3d9Z_>={8(C{P&9k`RedgcHjkiBsegM{% z2PP2D$U=(Z6)puCC{&&D70@LRbF!**ms|m84AvI5jIGV#n!W)jN?iZFHuAojb(>3{ zz1{X&fL7+u2SdT$chQD4R;v0th1>68QKIIVW@eN`@6yXz6wp(N)Ug{eg8>gz6Yxbr z_eL#;Ir^Rudsn{bmfH z`X`TFJ8KQrt|rP}N}k)acg};vtkU@3)KW5gg0;5@T+JwD2(I|(He9$hp;gj?+*JSe zNcAQ`?QoAF4*GH*ZC11HSJ9GNVV-Nkl=Gl%`P;cW%AWUFY8_-+T=?@!&~SDOBAjK# za^FkHP>02KL~NB`h;>k4p`!BeEndP?|DiX2_eag_v|Y08=Gyb{YRuW6oM^!DsY&=- zO2vI30C7Lj1`T|tWmykBoVZEB?ny+9m!>j*3vrW&^ zH{ykWf;w)hGRn+IWkPi8JIh_rmy(w|*t65QlwT?PBo119uF_5F-0adAjAAM@vos3P zvE>q?O`f~UU=KtP@UC;-9t+7zY;8d@q?7IT_Hz63xesH4&|-#@OHff>8kqYeT4JzT zm_Vu6E2il3UsB8$h*2DJE^NNd$Y&`LO&jz1X~UbPO&w6}R6C$WT`JW6yKzpHq&d+j zqV>yESn1`oTh?#s5h}rCM?W9>x+#RE7;Ri!n#+iKL=zYCRQybs+m&@ZXd_&MTyDlZ zbog<~44PW3i7Jg7$v`$l*I*&^5G6AupViYP_yg*;?1J-qV6|N#wSCBB??R;cJ@WP% zMUHEuQ`8QGdO~6sVdehWH*WQ|_O)r#q8WI8`BA>Cu9;gnVu*gVCM+}O3w-OT$BAFh zX3xB#=80!djO6qzXpVC=VRxNuGLzRf$#M;odKZ0s1chi?gtwdj#T(tG1oLw=y(^?;fI zYb24X(CfOSMT`$TK89o{7bi%6qQ35$hf1EgEHcSl*Z(4=-F20J__>}smE{{-=KVzc z=qs4-{{#rn;-fU>USXPd}70rF)Hio zkPj7JSNz@n{FAHPR(m6wI%=bVof&wpOqH83ll^0^fsz+<>t>G`DAd+n(J`mx<=4}e z{kAIqqFw$h2ONf%hWj*|0R>b@63mxu~3I2?|T{v)ZfE< zrjj6V>s`4m^BgxO2N)L>hVm?h8hab!MXS?99Ao^w-BvYhP!jc3y$hbEEy48&(iA5^ zLTUv)H^ND2V$)pc;5mRWfp!DG4Yl9(-xeL&TDgKM-rid`0W^;~u zXL7p}taNM3EXnup;@>30bp1!OrL#i>16&kT9E?A3d3{)*Fr>jwLTep+YBkXHyb+Iu z5X~CH9@#~6dQqRAuMZ=x-6FoxAUsHJjo0YF^DV$MqzEQp^tIhM_6L7fZCM9~xI?%P z9Fu2h#6;TXy425>*7s#_=*oF9JtA!5;h;$CmLZ-6E9DR#O>S z=2^Go4a%uX`=mK6q^A3Xlh(3rtrZOqYZhgnelLGZiT-t_SohHNLmGwm#S`43Anik& z59`p6hTwkI-RUFA@+#}`(rl~RzzGxp>K&!h{Xe^&DUk}b)y-6@sQH7elGPq)w-?O&D?E%2IaNlpy+?SA*xF&x4LNhY zFN)j^FskmIF|T_$6-mMrbR(8PI7kL+IU%$-^TbUGz;uhcDMG24fzt z*X@<{1J`~t=Xx7!(YZ@$y=TPew(pS@#aw-poaD3DV3Rh^x_VhBfZOMB&5wkD<=pjY z(r>TMj5z(z{%3Xj0mR=%AfHO-*-8CePj{1oT>ImZ6FP)70q>&h8(XKA%l)W?q_vdQ zhi868v47FA15nX?20!4C+J>9t^M+8oZitnoa2cU`dxM4qyeD=dC9fm3dXc?E4Rwzo zk8r2`AHA|o63+6cYDKq^=j&xj9bOrNQl}oC8iMTA9N}v)7!G;R)6!Xx zUw?Rv|3}+^_2|*#g?^V;3<`AYJwbO@ST3P7Xjt(iUh-DF5F&eZaJPj1O66n)s_uK{ z6Yh)G6E9wv8?NTe+*8u}z_gK>8Ae!S&c@7a`oQ)#-<_oN^wH0QF-QPCvxxQk}vsQ^{po3eMKQTamLGb;Cz@3SsU!>cm7#I+x4TRu z?xs=n%645jibdT(nVNb!Q>NP(;HL0Thb+)x!x4o3wRWP|ZZqc;onmzS1VZv=ihQX* zCSJ`K3xA~)dbdwKEL6{1_|guN=NWD$^K}>t@#~$xTY9Z3v=sO`#%`3posf%_EKz{y^B-7q>uVx6ny_=u3<*n8oO6PD_VXk zgwnrwOme)vR?`L%kYkWk6N-PT@4uyf*E-*cpTmtzJ-bOElTIbQ{kFNJjwyZl?`H~Al|$J|W*!aPcRTNL*p&&}-zb)b2URM)U7U&pf2 zu9BhK`%Q}2k_I=DW(;W3gn3J`Vncg%_9PlN2v4PP$cD0Yh=JooETJOp37<5)pF2q* zvs~&--_iB_0HOr}Q ztTXPu3;#_x^0-XPoZ~m|Eps5_g!Oz)DMkdKTa!y#foT;i&2Ufb)IFqF8{JBDAV!EPuVEuJHF_hW3)sF5rl zM-piYxuW@WL6P!Bf2itK(FQ{Wdg9sCQKQ(Olh~5dPu$h@?J10IdY*^61tqez;mRhT zSE5RAHXfeU+h>pB<-bl&IxH0vW$nDUDTVEp8w~`wc8MvXs$Fkk^*2)Eq{F2+e64(dtt2D#@ z3bAG=I}sM&Ew;;WnK5&9`dkk`C}&k5*`)iC%Fsim@;S$_16w{AwiYw+crliQZkPlJ z=ot&IG2Xmg0s9XB%s*4}s}nbqJNM&j%Skhi69=hiovsaKiOK0;2qz+k7B20v{@2MR z(FlyaB$~6$m0Gm@qCG0m68z}19DHLj5}R$FyYHw*CDj($O(at}6E6}i=E9HXXpBl{TDTL0>bO#MVl}-fDDnQp8;kaFU>$ik_8y^>_ z;vlhWdQHtpr10NOHF6xc?!?M+3j`&Gh$Pn^yhTZ)B=Ctu zLwKvH9!85Yz@eFE$NY8%)-7zapt<<1t?OBTxIoTho-r^g#=4>lohYOmr! z2EF0?Z?AaTeG{Bo5Ilm{{>*e>oqD5;ApT<@;&n!YcE)?VQ+Nc3Kc59#5FZao!Bz6`^gWEy)X^7=t{(Lyo*U~~;-IkQEYbDy zE;7&Cf7&%wAQuwsOfu9(vNc#uJl!|(a>@w2hrOFmut*2VlR08Rg4BK7^fI*X)MDoP zaEH+L)+s2&CxEG^ADZEtKQPMaqp!)R`g+62^r7cC?oVw{%~lyEXW~MW=gJg@-B=MN zU%WosE1hrdKcpEXkF=eAf(bN|`ODlb7r!**pY~8L*HT(G~OOSWo_*GLK1RYudJA~eJqAftp=nlN!IXd*Iy{KZ|zn`FYlhjsnXH2 z56!XGHJjl&l}cnc`j*2&4I)l}e;|K`L%*gD6{^h>nZYX3eg7e-|D}ieU6!JFyi%_(o@ou3G<8^`#bSY*}+KySPnm!Zs)v z9UU1g(%ikCH(s(Na5ihT5wnvLYWXfSv@1ygH-l0ILk$@0(a+1Fpz^$Y2<>?Mt$tZF zUX3j4`jQTm8qE(vinQxG>%laJKoy{+6dXCi)+=JEC%M(UM6@8E(oi4EgW(ktJdOMShU2`7r*)zhh(Ejp{6x3 zWPihPAC${7aF_k&5qm0Vr_^ea9JNHTkbs6yYmzW>aB_^FqcfH2x#s zGpfG+V!5C)0W6z1Y;F#^6U2qHe=E6r$i103GZQk5Zef{8c5!&)A~a>a40-SuxV!D` zr_?U}ak89}77-PPBt-+ygQY{8tHV57BY1PR_;HYHammew$?}2U5{+QMNdfsgHhGDH zFSckg22N=y>47pL)?EFicdiB*nKFw>7j9mx5#C;xbn_GOLO^+Mc)slwVnwchKZ-cBg&h9ucG_*eX!#fxg<9`lF${AP0 z_}X_X<7XEnlFXp(_`t`<^W?Qm;Z5fwv4zv0QQ7+|GMJU!ae%V2vT^E|ZBEZF`YYPz z_d3XCTIH74*=FYm#H_EpR-nZ{lVg?cB#3i8U~v+7z!5_3hgis($LYc{_Fy{2MJlTM*pIe4IN!S^lt>d#pNr2#8&paY@5E*4T(b)Sxnfnz{F&5e# zyW>9txrXJ2UAdGGg~?6Pb>N*%*(a{L60GN9Kf`1kYhyUs93A`m(;q3@$y(F_J+h%Y z0Ap#WD9Xt6L|qSjJjJTg_Um``AgIRHspzkWPaFJ``)a6Froy`G+Drx)clIDz=8X9N zNH&F?0keqyC79F^CL_?YjZJ<4oyQfLWQ0zeY`4x@07bh%5h)yUp{)^;FpQAdi0?6R|XPs z#x}yzbBjry3_{As@(Jgz5q^T^Yu z>aqT!D-%hkaV{T0OT~>=r48>FQzYPi`Wln#qe|K|fv!kV>R^|rc&G@FG1GdJY2hCa zMD}P`!#^sGidLN7N2S4jdAl*504|@zGaNj?A(Y}~{uI-bpr5^vCJQdV@(u18@2bIK zV1|8d)slR_U9MRNU)B@NPfCZa9$9tv)Sb`F;x1fM7om+DU z7(%!{2k+sb`4>0h#wHAMH?Pt6gxRaGl;M$f1Q&wT7K(&zOCpB3#;(5Yvo}V)KozIM& zxXf3l#U+^-$q{myR&CQ7hD$H)4U9NLR{=}#+NZx>8Ykjz2a&{n%uB166*5 za{kTL6eI8VYaOB1>AgT8v5b$MtBcLK`MHF6*GP9t&O;Qj&Sx|W_=_ZX`Ziae{3EX|DrEd)SIoP|OiU1!c2!L1$kkk<-@4O6 zjo0&UAu552%;G0)iQ!Q9#Wx6hbT`B{H<^%j0ZR_@n7M+zDt84p-hw)(y}yKmqtSz} zP~1i1nzN!b9EOpAQAWHfd35CHDH(r6u9N(S&%qfD zL(HtRkNG-$<0F`*VT)z0-hUbufBA3SCw1EHa{zgNG zt;Opz$k26_6za^-P1pHRDQ+loFB(l>7hjFWCuf3po?lMy(rATr^S{sIHORNh5vo&R zZAG*h!yC2LKVlr6(TxuGw0>4)ie&N;Fd`iB`YIn~L|a#t4rz&sTHfQP%rs%Wi`oRv z{Ox5I%v|TV*Rzv7PFT0to0YAZ!(WqAd%gE~+~>HKs8N&ktRBMj(B45l!>ODK4)0-D zsL{D`)6N7R16)fsTiJoCx;r)Q&WjW)rd<5s-uNh5T4I4bGi}6yL?+6WAmze?hmRl6 z&-amx_46#5ux8|XD{->%5v1v{;K5Qu^$D-r6F#mWO^krVZdqe{2P4mt5dg~&9GgTI zZd)W?qIyMYf05ZtoA#!-=k7G|v4sn2_kZdsPS5TkDnhBcsG!&izf3ZR1u( z;UOQ~dcEKH(EQtHO;3i#-!BqvmoV$8<{bh~_>L;(1PFfxyf(+zg>SjfqQ5%@_}d2{ zA!?t8;6ePj)_Vj<9Bs_0vN@ZWJG;9BavLXe|6qHf5P1}1-qC&+QAFtdR4mc`-#7sn zn>P1E5`3$c8!8jRH7D@ad0;{0r;>)CRqW!Os(*PI51J3P=FabyXXm;+2gl7MraK-G z2_>yoboi>w6fjIYZYW1#HhTs=!zaEVWmu*tt(=~P22fMBl zo}LE3wSWNb(Ij2ZRXTcYJ^EOhEdSNV&Usc6`iG&#FW+(Pn}3s?QU>;GFC-Q|)kb5=-v7Y=@>ha zmDai0@?xTWf0ocHPsDU}v>P5;8ZbxUS8LE@jbLa0d`Z(UBXyHNH3Dt1sk>P~p*Z?)G>j*Jf%o5%m1 zk|Kc;_VBG^#3qDo*1L<0!arH2J`;S>EgyY7I(nWY2>HF*Kn%x~JYJ;Rp^*`<%rkXK zFLUxid3D-%R<*iXYrz9h3K54lHbaN`!D63BrJXkkJCU8ffacOV_EKq+_-y;KI*?LHtK zlCARBggFPg9S;K}qN5|+Fy-`-{~d7NjM%O^R+Pw|&XaBYzlTz*_}CyVtXQ7NkJD-? zrBUp`O81aE-6v9v&426sx(~J?C_$NCrcm}};k}Vydaf6^RkM4lGRY+w?H8^D>k7EF zcK=$dO;N31xa0+UA}2a!-0}Ij@l2(gTM&iZFA zrPmN7rd)6%5KxiQs*O#1jcoq(=kM$K5k$|wHB1wzt)j_dXEACPqns-Yf^<*f*5Py# zclYk=?2j*(MvmK@!CkGmhviE_+Wg8+mN0?EJAeM`ddpG(@=)+~@xx)YTUSSn-D@i8zX6G81K>|pk+;RivRvwaDn)qj6xY`T<2Of;^+t$|1Cn4_^7KsBt z;Z;6t(00P=9DcwrlNlRk#ONhYNzt$85$wO7@d^0Jxp=qaH&?LHe27y@c74+WPob{RqJ%kLY6{a8ivT1h3^&)h zk`P9x+iSMT6(jS_^pB3*I1NBf!8v2^6lMw~;m5AhQeA3) z@!F_;v4pJ9p zGPD0SPW8PYRo~UM@AqeV6lm7n2sW)($;i)g$@q}l#0b?xW8AUSHnX9J8+5&-MG^77 zNMmb~M!GwKI%tT&d1I~%2C#P!eKT}T_s)ddTzgOusR=yfU8o6bb^idT9f5rCL762o zZLmm8DP zF#+El+Iim5HR37!5pi0uF@F0&eW+g*RS3-E+Y)E=%gs$0)>gGJG3jTZWOUVNJB4-d z58)s`h$c~oGZ7h^ZLq{k|Fiae=ujXk0BW5=%?X1K1YfaTyturE2ym;->y)xpTy6Qu z8*b>tolq9DUG5z-Kd%UER7iGD(6jNFKJH8qym7I#s;SsZK1#eGzT8kBE!x!pgM{i< z!AXn8y;wqC6KEqWKM$b6%!kD6_ZVa>SVn4^t9M+BX6&Ej?yq<5Ejxj=t95^ZLP( z26}po1&RjML)>`b|GifQo?VD5SZR{eOxZ=3yp7X=fiRB9v-a6#GRWNcn^o#HMY7p! z>&1^>su%0LRAhr|-Ss`Y*h5T6{XBu~r5n>MC~>{RviIuSVlx%a)#Fax_S8frk{2O@ zSSEOj*Vg*~Y_1J#4upAgNvQsB7AA!B6y+UrwrejY%O5&bzL=6c`52fxUcE^2Q1<)J z!rE(!@`#-=pfj+vv^ue$9j`tfU%s8%KtpVVgIWQ5@ud#5xiOLJ$^|197cKnnw-OFH zm6Z7eC#V6(oOi>?{<<+ZRS>U+`?J)XfSQ$5qUa|JF~?NOYYt z;9utv7_!*{7#JJ!#OZ z7%Vi&}vP1WJJFT<$Mcq18Lo+KBoit!QME|=k zWOlUL=)@+49${&HvF^PX$9o_Uv@R=Qx^lgfRLQB!!{_5f@blv20i8~mG<;09M@qv!WUXxGLr%%-22BJI^Xr@LBMh-`x2?OwF=QA~ag@Hr;F zZtu^-_)sjo$puskDIyK;3|46{7xgUqlbT!-Qid6b@Xp$j_!$w>3YyYnM{$u@y)Mu+ zyHgo)bAyFqYWb+k|G?=?PkWv5r*R+rye!Nmv~TkcG7hla4v%#ddtLo@QN(EEoN zK@J?@PyhellB^&3@TFQugarel=j9J`_51nh_LN6evh2;=4Q{e4JGJ8cjCjk0tu2g; z=7Spd7h>rNeq0>bvMS)^Q}6xaGn51d!!`}=2Y+UT3#)Ow>{0-nS3dlOYaNar z5pm1&ga1xyWuuT8Rk39Mka1T`*sBa|ma%V3D870`q%jG*-G7K{)zJrMT)7&~on3UK zDD-OT6`la$qU6MRouHar5=6zh;FP%m zmmshOwY#Oqb>%Pldb*DPXXR4Cs*Vprddxfs0$q*69lS zRI3dchR83|*!aEjp(#+Elob7b{(6d*zIW(ccaapoMHdd~J$NK(I$az1{PwTEqFvVL@Rg zF#nHKyUOuk;*GhhTAXlJq|IJsNSYf`*ds0T_=H}NP&<0mSw73p zdZHRAOfX!c(z;H$;O(BlY*+}8E^h@+xYJIl9BydE++-*19uC6nQ5LCNL=xr*v%rG9 z-5mY_E6D9UC7mh5p+h=h;g!X&!2>jEArJWa@3ST0UnmBR7m58wtr2SiDN#|mO0`Nu zll=&e>-rQ*?r-cc;dHze(|f`4%RK@2#esvdq2Vu>mC?&dp>bd5MrWPvvvQEO{)==i zE7OPhi%&0NR?dx;*ucH<;2We(k+&he+s;P~$YJDkO@-y}I@b`(9OWX55X!awLFA4P zj3vX^in&wqkZ%s4nyQ_;)77O$(pI{KtJ0kCA}STCNGo@JnbIq&}-Rh%n zyU}OcR!7*7Wh>rcLvRXYmT<-6Dxlo$|zL zZD7;|Xs3P3_01L)f^lWn{pTTttYL-hqGKP+(M@`HK7_(WFs>Uu`wwm;gaNgHj*F7E zve8d0{k`m1Smicogc5&VmV4*Wge;F&)@0dqe>m*2Z+GJXS+H(?cd+SD5>X<)T1eMM zT?V4JW4U#duIm}FvPBoiT<{$pkY97I(>AaV{o^rchVnSWZ!8jtppvM}R)%qEZ!BcK z+8NotSZDn~1i^oC7W;pqxH^PHQshsCR@N8(@n0z*6MwM!tF6RQ4MCEr-)`}AC%Mg0 z^e?5*!syoquSS9bS~EUY;TFV@2oXo1Pkb9$E0@dTn$l>kW}yWup=^e2U7vzYW4dRQ z62nSs1jDY@*_#r;D)zh>{2YWytIH5Yn)K~L=qy7$hPrt$)QUG=R_Raz)~w|QP@ z^IeAWI2ZZrrv$yCj(6~3J$MOWS}pIJBV1PPBrR{GTcE|8!=}8~r|RisY`64p-qI<0 z9=Mk1EncK>ibn=5#CA^?{~phjr&|6BH2`H(iDVj_wPzR zn7+>jecXh|7#B%Zmc^Rg5?6b_DgZX+r2|mRLp#**zkivjWo@#}ew~|ZU=s9y^T<%D z%Qq=hkOc}Q<$lj-nA(q;g01-v^lj6COz5Xy{N>Tla+F_z?$q~>M5oj<_hFz&(+!c; z0a!kN?bLC`aT9@-%kT z+Jq^;S#})hHn{yZ*IiUtAU+g~_najJo}TKvv4rKiW+&kn{>?6?nqARVErUAMBbWYHD zThxiry$8wC5UU1%sdrMcQIs-P5nSDhXX6zXYI~m5)og9)wGmvN=GJ{q2oryxA;?<^ zMU05BXZ>CRcDR>yx7C_xkw3G(pGOt_2ZyqXuP(=9o}MkOvXhHlTZl-CHS_nqKvY-G z%cL4~>;2i&i4z=p5KXQ8OY-o;TH3^_J&q4l^;cH*Qxx6_B*R`G(wr8)+U2Dt*Z*|@0@M6I!cyj}Op2BDJYeFC00}8PMHL1s+>NvfPYK+-BFOVc*l1K- z^LjBXNe32T-cjH^Pm_0Nk@jWL-lhsDnX4QS?RA5cnkh3^rXttCubn{n5kucRG$Ug^ zb8Bjkb(M2S=c*@gesSlXf2QSR&(ZgDRbtO8*`ZiC>>6Y8OBUq8$S-0i-RbWJU9!&B z7eaD}eQPBTo>p1uUx9JR=BGZC&2)Wo=pc{aQ0>YjqOz_@Rh7Bm4#GU;el|^aS11bwg158C-v$I6ybC0J z#=C~H(Rqc*93&U1OsD3}Yjd@)Uj*^U!$Ynjg>Qv4*oPB6pJ~nRB&Lwg$a+<3f>Nvy zhpcP(@Yl9(YFsi^8&u#@W*HMS4wt~Gf@yEp(ifdb#8^w zyoN7>l@eru(qEGc?E)QwSn;YNdRvM=Q?Yr`5{ z6su%PTaARd;WMOfw?95|Q6Mb^pxFAXg{btD1JJP^xsynCZJ{h!&qVyc5~e|%%Nfc) zzw-BErH;ceiwy!KA*^e5?Y zBQhD8GOZ!c0(a)4gnUH3+%o>rFaXrge&!nf_3gbr8;OiDn*ts{d^A{iYs@hgwl0S& z12o2XJ*nNHngFunN;H8{#-tK>=&ey*( z>xLbUdzyUEyPGn&9g6;eo3k8^ihPB#A`rJ`gs+~5FDbva>*+X;DYx}TRmOaWrooE? zLD}5Gul~$@3!OsXahcFxm;1tif0~_tJ8h1-*}2AFkVpI4xX|&UtENsyq~CjjR!e-C z_^r6dh84NP`R!**##Ovd0ld3+q@2s}Y%ya>@&tEzOuF+`l z62g8ENqFui6zT@e_x}>-PSK@s^Q?nXz`Z<ebQQ2er?&&#|NX$Ks;K z32j+07!|?HE0u^Ow>=xEcGi4>C)+o4B(1VW zcu|*tttft})skbLBS7T*n&zhv_R!UIBOjE%CqfL75)Be!W9LbcSU}tyQoa80r(!N( z(DspSZ8bPkvrEw>;r>$ID$nwh;#^_=mT0jd^YFmJfh4%vmGJqfO1zViM`*Pr&{B?& z@u{`(Y4F*4P(M$U$%|D(C=zHX@BK|Y^kt0kF6Y+N|KG#v?{aTCioYyxy zTTM!UhVeeVp4&fC=Cp)`3_QsCz z@!nG*L;~2O1_!7sJ4ehmOw1pSfY&h z>t=f8;<$);9hnK(;?3?v6bhSA$Te2BKiqR8>VxA=gpViA*N8CRcyIjjoZO5Q#@DmLBIC!sZx~rUQsy@DY0akoBEY#`Tl`HsdXm_*R z-Fd-OBD|PzSeRjT5hmVnR(NirPoOz2{;^y`?G_sClg+7N@Mh38O3FOz{^kpqPeZR_ zB``0ZyPoj@6;)nRJd0%EsX{m7K$?>IeP-Na_oL&RGJbnbL@VCgSB!AF)>l4C^DY$B&N3c_9bOH5XN>5ra z84uEJ*}RIIr{8ODSmsxv?W|-G69xUaCU|czGyP{rz3ImHn(7f-dXr%~_-_jHrS&uE*i`gl7h3dJ`c>Ss9;wvlOHt0F9B~K>3QJW)e6MoX8VaBDB z0M;7XfE`n#+vnpVvM1sZC^2n+-h6hBY$d(bMu0GnCw0!{j5E49%xIW>wfZdV`-ppWqUqFsMp~W>`COlrJ^kqdOr!rrTjc|2xxz@@*Z4 z$*t?&sJBMxSQC9_?~(;tx}xpOUv}(Wf^K@dybYOivF zSVX;7wGXU#F^tUR{5k0br<<3%O=eorJdKZ!d-O4P0&kDAyr)U$V|5C5E<(mE?h`YW zfApQ`raz*+nR=OqdDxd~a3{YX$H)Hl$yh2qt)LPuJodWYw(TvwVrCL6R^hMp2S6ja z!m_BHj@>i+TwI(#3qX*g5Du`I8UB>`Rhz?R0RT9C9yO>dceV<{I}P@IBX`Qjvj%v< z&;=NvoyylQ(nt?Rp#-E)y0CV#B2fW@l~JKW)9$LVs2HR^LI;jI!8YhBnv3cVJec2E zeOaP(qg92zcKiZN{Jm)cZG8L+I{NAIQkivn7X(0>8Cf!P_nVV??V_wr8rTsYChYv} zAtkE){MxlYc2pk@(h_0V5W?W`)qdnI$8LtJn+c6TDdCdw&P!IqHSJ;iF)4qNTE^;i z-i^*iDlRtcU(Y{wdDZ+f$!w?{;$a8CgT7yUzzCr(f28`wZ#Sz_#j2z2w$HLz z3gN=jj(ybh)%y@WAffG?+ZgVb1*s#%Uy8F~HoN^aYTQ1VUiGfrB_X*gB;yh|q?$2W zE$*#0Y~B0s>cmY)j$y7nXuC*b)!p&$&g&TZ8)i2%gEX1Ir3-d9X!U43Tikcfme$5D z+vnzM+}+6#5}&#U+q(RJI_F&*6*OLB_|%=dcQm4|r0>CLP@bnipOZEGVQnkh4pjD& zcl-Abjm~PY;E~3+={YFWJsA>C`huv}8FO-QF7x3M;w8k&;xCxllA2zpwhTydh zb`}I#FqT(YA3ezkz0_>ZsksQNmk>q$>)CykMtRqU1=Dx@^6P=*GWE@lyf4Y% zxy&3qA9wu>o`32>T&diQ5>Au5z1$o0rGItTgBpz6LvA7h*W7CI^nZ=C2wk{^5d*{n zyhua#yyI&aKEKK3O-5pGptDZtQDOs#1@tF!B8VV%eKPdLjsgucxyxqGH4hiV4(b0Cj&y|;lMR_ux!Q` z^(=ePSaD|l9PU$_6~4_L>)mCwX4ZzF&{dM9{>A21^Bmy|u==Ox8x_h)a=m0BnYVI* zK$-Y}1mK=iR}$x6phjTs1Yo<8x)5RIh<)SCLkHE=q^V1zm7VNQK}*JFUa^hV={R7G zJ=TM`J5D&yoX85}F1Y33>#uE!y)H`#0z= zOSy00ORL7x|2%Haul_i%m1k(`h)0O$lRW2KNsqS!^{nS1Ki2t&zmPBwau}#iinV~7 z`2E`_AlH~jl>lFhV_^b8!dy;)b_FayklY_%ifR8*HA%6g&rO(9B2_;tLJ(T@J&|PuO&^!5OkTmtFSgFm$ zMnO|_NI*2?dZ+}4|2VMfzGrn~Zl_fC7ySuhhRfm>ll2|3CG9{PGy%-hBh9QOOWM}f zxI_W)#Mpa3AJgN+qSYV;vm=W3MMKcU2^tU>J>6zciaasM&e`D}b8it%xA%TUk!<@@YiC?(UPPp4eX4Oz?K?t5tF9eD6o1va67BDka^3z5iEd_o;aP~j zoD1dGYj=>_m)GnnoylW+?A|bJdhFTA`&}r)zY$le+H*jUlw6 zH5*?*_C6F$2E$v#pFt9ILCdm!(eq!IpyY~N`2-$RC;VAl{CgTLIZwZi-Ym972YnC7h3YJTZy&xMcdiUD54gWRQ1pkek6C2 zbkv79mtx0UK>+Li!c`A2Pag&0$QRHHvY1uv%YhJfbtPKwrOqx8D<^>-M$^_R`g7_< zEjA^3-rz4ZXx#N89L1+Nx)j58+2P4jd^u=^F<=G(s7GoYLbdK1GkQQ&gIk)DxGdi! zL-l$-4r%lRwH$tJL&yA<9y9}4KPpTca(kj7`1unBZnU@zunLaB+%<*?;Q^QYG-Suo zp#b1L9eW(1#j^axDl8|mD`s9!{e^`|h#tJ!eB87IxfWgHgRJ99Q*H({KbMI`HKkmb z9OhZ7=IZ`?1}aQ(%nGRTZQgpaVk0+R{*(gQt>bc;V(I67;h4Kak$bf{ROK&9>~B4E zm%*g|VQb0vUz}JnTIkeZ8G!bYM~`2O4=;^44^^7ea%l*TkMYS)%hm_(Lm=AME54j- zwfz-2&(T#z(hvka5zKQJEXM@pnu}Ig(jt8D#V=vT4)w@)At$T3iH>Jlw-L1k82ReH zyB~22C~3*I4?e_a33-VKyUjJ2puDYN!8NCE8pEG!00SB$r%l%Sf&tWHmGGZ_JFa+# z=0R!Tp4ZPcoqVzn{30;8DYd^}v7~p3j0t%7oA4q=)+$(P&84=Tqig*%sr2%Y|ycC48(BhF|GO^g5i5s4#aupE<0JdD6@s+ zVg2|ox$&j!zvJwJ@+guEs70HXJ+HnBGj&35G|`sbM4C*{CsF-of~MTVV%^$12diwl zypCnpraS9s$z5nMSp_nx^lRY1q12hU$KL}p19w$%yOkKgn^n%uhmU)71A)`6%wTLx z@DtqsN84KlL=|;y!-I$@h=70y2#83Rf^5G!y_94j+`E#?~6(oHS z0U^e{QSY1%yyJ#m@gWbZHgpY&w*nh}M-|F%PsmFD6SKrw7MB zSRD&VSUw{UlpJ?Z?v0?H0_{n2&OrLoS(`1GxJo?Od+(+^8Ar8s$ExHrQsqXU}AyKljp70ThV4B4`xC93EB} zpByc-QUQh#U)sW#;Xq^3wYTe$v~O#cR)fcQS`e$`s+*Vt^Q7eDh1PpsF4aNn2RI+p zKJ(V5_Q-?LmuFOJGa&NWzNy0;_XOA*YP{Nch3WXZK34e{T^K!9sP!w-)N+oA3 z#CT1KmAj)%1F7%Qn8k~|Mb3eF{$!=u-g5_ew2ViFsNrcqtv<@ole+##|L~2M@Abmg zbd|Jt%%C+Z+%1ub2jkM2k$2E5D#WpRf9BmKA>=#|OvF|3wq~i}H$&OF+-LzMw;a-B zs{P)an=H>~V}s5AD+KdPakD|rVsPOA5f?!0ax1*dW7vZaG54#KzKKqHQtN#H@^(O5 zo(1tf){6BBGylwx#9{{6^fmpB15QKv@vpfb)D*I7m#uvCigsTz1B{r^}l*x<{cnAVZdXRl4ctE-ByBie$T3|B@ ziUK3Qyx#gOQqLP2E`cz$2O5~@^zwcRv|A&}r2FxeRO(o+3TTAG-%Of+a<$>@cP`Qx z;nw`L(ep;yyG8pw^4|{SrOEiT;W%m{EQko?(e?1OOQbLL+wU&nYbvS&6x(@bA&w6# zxFt!G1dP`n-C!xkX4S2SajY(-?!QS*U#C+hG(OY%#b%3MJqGqCH;py6J{)!upRE8g z5g>kTfgY4wp__yA)Bi61b;IG!@Ld3~`EcBLmLxUC^N$$v?CR*k@Z%|5&5{sf?v0Ob zcsjD0U@TmMIZgK^$Vxh~W=s;H91Wig6Xb6Dw^O-^$uSX0=(~_H=WRh`jDOSivEH3( zaJBVGwWH&|JG_j|QG{<`D4d^rhFZdK*cXR+DH!Us%a~7}m7AiGg}@fk@&4Tp6z>xn zdnakeAgsfOWw}qiPbL64a*`LVBM zB&l($gcO2NyE?N7gq>n7eyO3XcHHlj!Kj7O_Inn2E)&dL&e)}AASun4?wt8=HAyQUKR^jl~3*IZP(-wc*N z^x-qJ0pq6i}EcQtL_6PSGIfFd^V^RzjxwG;uj#cNfxC+6IJY}{2(8FF^>#Enwa!Yr(ea=;m-|i`^VA> zYfA_F(G7*Z`q^E=-`DC~8_dVFyUaI{sPT%{jB)mY!Gt~ltmXi!!UWzo?2-q$TmzQ4 zzkVMu$qDB8SpH?@#fGO$g8Yfe_y=c~saIxsAxyTQp3r1&EM~|y$80s3Q0#mK&!Mls zu9rOU=cC01=9@nxg{tYh>TdB7Vc*=?{K5FJ(T(}Y-|!Ahsr9R?lO5k}ZwygMbf%PL zaPxc77gP!(CwjHi(%WL@L>J>A53x`9{eZA^;MCCY%(=d(c&SH?AOGx{>Mc)Fn9Psr zUDw}0q2Afwq4SRD?AtFO(zGb}!RydUGYUlP&qZxyi~zN?U(F|K#=C{^L#JPzaDyhb z1}G~Bfvqw6`6gXd zJ(Ud3Iuu2bfpnzu;Yi z081A`$iGNb(bQoAUU~kbR|Mv0%zo1{m#LmU7kQ&%`mk3znUDGSd2HLFn!4zZm*kk% zclAzPY!3abEjau0uT{zl)w{*)aS!$II_e-tYj0H-Lo^gB8A}zm)!i;CO2ZRnPPPsf za4p$N0ytB%=I5Kft0C4y%MLra{KzJyt2usRiPmI1-a`q_Z{>t zX1of3CJO*S@R>btJj$^C`w1@Ol&T&5vb^C6{?Q@qx6gZ)1x$I+e|a+T#*YVMan0p5_(t5wtp4B zv#hr1fet|XISi1e?{gY=x~Aw+cnS#1!MF%QfvZRAcbFjk687MZ$>xr7=M|bC3LwI9 z$y;%GROoOTrnweJ!>7IExCyy zf?@eZW6Rm;AZ=o^CS(=~LcC*h_9r%MEuvD4VU)F5i>Rg&&-6M`W5|+^td~U+!ck2!PxD zxws$zNQf%VY5YBvKJXRKD+Z0n@IM`Qyn|5#a2NQMLML9IiPb%3!U5edfV1A)9+SDe(>w*L^gvpm_ z5VMnEMOHKoODC|_;a$Ut3}ekKuzFsgk!&Sqcu4w%6>VctBMqt*P9pbB*^EH7_;vGb zir!>TuBL$*uL|+obW~JNA7a)=OO;#VPn+c?7(#iCEjo<{nj}j&+l^%K+fU2nm`8hD zJLddR!&RXz>>Vt4Fz_;Bh=^$Ylo32(`xXIBs<<1D2V>82lG}xSa`@76>^?vqU|lKv zf=dhmS3C8OU6ktMZBNe3s_3;?W?6eD*DxeC#(BZ`-MTWa128QgK0l=sPVE#gMP;>k z2Ib83{3HE0-?+I!_n1{s!S|`IoyE_3a3r>ZRG!DMqs$u0nGYG`{e6gym$@ z*t(0&D%y@Q0SbpODECGk#b4>K!GI|1-C^HhS!VOHy5jAo;=c0bMOVfjGFnB0vS@6# zl|##Y>)uE~GHqU&MN)h(0>kT}oAiRtL9c@eoc&L&CN$Kxx<0-FMuYsXsgjSULU$cBpHU53VUNs?Iay6GQvO=$p zMvpmpRNsO!Q}Eq5b}uU5||roU>-nQb9GsdQP619S`0H-CIe z?Ohn!9NZ&?Ze)@#>P+}wVBsH3frzU8`V^WI`{WRXQ);qc{YDc_*7G|j{#7{L5a-5_FV?L7LKzn5 zo@WIK&KTi8G$Up)x;pG)!>9I3;I3e`!4YJVI-592_y-}Lk$qzg{j!a#fzht|v^x<>C_q?1ra>M4Bt-#a`+|4=i;&cCQldK!->k<;PZP*gK(ZEyZFH&vddboEc zVYo$QWne6oe59OOZiU@Lo}zNMLmfmP{(PJSTi$f4*wdlngl4GIWfIM3mX3Go$B(7* z(&t`}K&oxvnYt$z6?uQTNvd>!ut=Aknj9Ej0sp4|agp|ZqJZ??({K1a38J?r9wzxG zx1+hB&Kpa?Rd08@zU(dN0!72K=siTgV{*JR94vGZ!|Lo?x|?9_+BdTBFSm+id;PZE z;&z?EnoCb^EBpt4vz(3Cf%>BG@v<)6-Kx{vZLm`DBZg^gl|w^qrfxo(e7^iM-Aq{_ zNpstFlDQf<6?xkX=V%|Mk7B81x5KpHjyi(@=VV^x_tNNx-UnN6nV(z2JQYs!h%^K} z)eWOVM@cDtP}z9MBwn2Pgv+FdNG5`ZSImBHUd&QfUYbf|yU}0s-{AE>8`}C&ZI+-Q zT^2Y`djYPOfM!FnMRu^m^bYcfn?FvSEf_0-%}GUI5Dvsp>cyjv@tdO{ozK=Y;Vxaq3cSvk*Mo@?`B zR&TxIAb69d=aH7lM$A9VZFiI!`%B9{?G;o7C{M0ZWN;iDTBr?ba!~B_DzC;t zNXB-?>xSi6g9%;|{r3{MW-e@h*!`$pUBsK)vY%H@|5?MuX6#KZLN7efS`P*s+{Asox3Z)2$^jc>5oW!i(xN!z zA^8autz%GX1-0C{y6t78L6nH{P*}tm3IMq+H2Lymy)2*YT_P5j}5)ACN~Nt`fUozpwo7WK?MWWEL> zKu_Q5kj!ZFtp*2pz&w7olvCa20@6Kl-~yTLOY`f`i2z{tEU!lmDwHO9iTkUOs+}li z!TakFfE`winbj?>D{DM#-ypP^hf^w4=1Pn+W+?Lb-WlforIq5Z8pQn)=qa|6P#@Qw z8QPa{X}O8$=ucFS{44KZQB-qxw1l=iSD?mca6P$b4R(()x1iHlHRDU0va!xmVr<$M z=7kL(QtqXOJXKQKc3Ct{kbPYJ8dOxJl8c+v5i)F%a(A^V3`=!2uAVHruc8e$J1N6e zUnC_`SKlOEO??1Y#)Bb{yF&V1Z$JmZCUcwxSL|AL>Q}(E1sNWukjI!C^$!3dL14L) zmkax2~O{7t&i-@{O)uy^lQC*i;J1= zsb7~KQ!R55#)W0y>zw*3xl2sa4LVtWP-)YyMU^_bwc5?iD8Ek#W0=pp9No#F*gYYq zsq$JYN83i2Ta|oIBU%W9CXU(X;uZf`m@=x_Xd3DSim3Q|a6`0qgUrat4Bm0wrl-1J zzxk)(5imhk?5 z4U4*d;?hPz0gXceO5>s65H5i|pBQ`~!t=VF=(2p}3jTBc@8!yy9j`o9wuqVooHG*d z@*vZ-!IHjHZY1{ORewGKHIPFCm(ynR>rDNUsxMaiE^B_X+HTuJzk0&Tz$MHvhtSQ< zikZK6+CBf#Ws4sbc#SrgVrDx5&I%^)(57|#BR_F+oa)bR`?N7hv-8bH*N(nePmGmj z_Hy_{<9B< zwmoD&y|G^NTP$<`#W8?Ap2o_QA$c{@^0IX|W8E~Z;6Bj7@7L%=?#Lt|s3Lh%-ofd} zZ}$F9E-OBCQ^A6K3_xkFfbQkiVJvTWtc$Rkqe5g8b_TbA;bQfR+;^YexyT#j`_#Ss zoZj?!F>XipP7d8muxOTFkK!I-zoN4yJBxmtB?(b=a0dh z%1!oeLzj`ZZCkgf66jSQ8SSF5J9?>^g*V>LoKCk^tFw{SFXW4%HL=N?9oyqbEW!w)qe5beXrYa9X%2ib{+}xHis;Mm;X`?BG{Se%r9Vh6!(!i{*U{g zah=l}YP^i&l;Dt4Mgi%Ct0$$1a{WRBc@<@dcmT@oUYc1M+N-zMV5{=#Kyxs$30vQ&QizqMurid3{AyM!={<)M!g4x3QBb!NE9(;$Ul%7pB?Q*<=8n#nFQi%5MYCO*Z5 zAgcIa-XP=wh1urY^LOCpil=;O$M1_}&ts)UF+01C0uVL-h;y{S@EnD!skbg_5Iox6 z!$+W_7}&jGV@Rg{oXU2_2VY{=8(rT#0q^)Y#hM9{+s$%ssLEsQD)S`*y9 z;`hn`0DaGO%yER&__PZ50|?dsNT7eSHyW#0<;fRGF&xX=ENOG>Ukh!f`)eZFFUUl` zpB?Oj*coMnzO10~P{+oM^yCPVfamecAcQ9sU%e9vHVQRYIMd7xo{?|bo$*)=d?gVF ztI6GB75(6`2%&0kf$}GW@_)W$g#X(YWW75L9Az<_ap=SZQ!7YC6DP%n}@ zK*#E|Lw-xHzBLx)_JQnk?uX#)JapjUc$XRZ<~L1+6CZ`kjUge38jt0;V7nxK#;a8@ zt-{NtpxNH?u)eh9Sj|p*@$?H4REsDL3Cg+v{h~b0`VGcMq)%JQzQ0=HU)h`Y>jT|H zkQZWNly(cXQ_ud%4apl^jg|^x0{|~M)3W1fP7XKaz}KTvLWor_>@heI+WX}Vd@c1( z+IA68ar#U}+@E%+@LYns{yL(+$vE`cigc)~N>OFrRZ9+sKQklj>nXv3X!W1n-Mm2P zK+u9UMpMOzKo!TFqT|!9JKILmeGZEA5b#*UBb}>fGe(G*d#drCr!IkfW=X5{( z`UQEPRkX)6{}sUUX0AhF5q0}{>rE;ug#y`qh*n3VrM+w%lIMjdgW2>GNs>Cqz{VVBsT?~d^PiY`m!6ooozQW|E!v!*pVnOL7 z+4X$yXc?BjmL;oS3+us|&?5J%5cL&10^Rv^g(O%-N_c0#+IqpM-@OikG+%sX{fv+& zodmUML{WrY?`iU1H^2DzxkJS2*cfj8c}CgEl@UBbK^i_)Fe*;8AttJf%jI4UnxNFb zKJ9ts7`FT1M&@pTToAHGY45{!ZI-k)8RfJy;*aK|n@tKtZ}zdf^w(m(M16=+tJv=j0I{kv1xe zTxBHWayid-tlclT?P!pSudPB?5aHe{J(_?pdg^`>xhJyOt`eBe#T zYn{rs^G~Hf&?N&y;I?D>SXX3X8s~SJftHCE=9cd%yQCnWXD{d-81>(1y^A$8c-(w~ zZjGg@maO9%ZNxNxG~;k?kXGuuskkrR9!kH7%dE8eRksYprfDb1W>bx zW-%tgZ$|qL-l{s^e8a|DcJgce7PDejV!fVZIAqkh^{hscIwuL`|0{p`{iDt^=?8Px zCee%I`fQ6iBnabB`xCY6hmKscG@qwacphfGtYXHCRp9w*il8F1{VnSL{*$)3hl2?7 zZ_zP}yy9ZRJR08yD9Fy&sWTb0Ufwx4+!p9D_l^=A*LNvKPB|U=lra~TTo07zcjXBF zczLY;3K9sEJs0(xV=ImBqYn68Y1d*8KsgH#!4|qYj&eyNU|$W3+!7=&UHJI ziF)+jE@OKlIAzH{&^?4?|H1GQ@16XUzN@iDj9V&p2S6E4os2{2E?PGc=dY65j&&WxWlPPXfhQ;rf8}ztW-naxOy;_x{2-VN_2v$W{`&y z@@f3J;t#u?0E(eJ&&11P^xbz?KzqJx-uTYiotFlsarQn#J6(-XFVHS}+qTpfp_%u_ zu3tCX*fTQ`fbnIL^QDr!%@MT;I_iw93k(Ta=Pqhv*L;0BQ)>h7;uN&&{Tdc?7*VEj zHS#NC5kU}Fyvo?3Dx|q&GQJUV5E^Ejk+v`FW}$pO2~uD1h@i!@V(Caz6!1#h`UKc5 zyu|E2xLav6NwbB2;Z*Zgvhaxk;DOlnFnoNZV$BDT4!u38!IMhDEa+G*EyXoig8sv+ zlT)0}_n6wJs5~OE@km<99zW3rY=-}le=)6I*pF+z8T(xC5u-Ut=LffKY)HU{!Ih)y|ukrN!d`z;B&p(qHND9wt-Y`BQ9$gbx zqT&C=Lps`b)M>7Ei;f-+A0KDcsGce~u6SiV|Msv~{Nn3&iesTxp4Pi~d`Lyv*KsYE zU@nC9WopFnqK|bS0JpWd+^K99;;)0AJ7DmAJ(>6SkK(=pv{IddGZpqVG}C;FzVW_GMhCup!qJOX!dB>WWGG2aoeIp%u0=Ag@#YSM$F_9@%)jI4kzy? zpCcE?LDFJKh-Tn+%4A`~VILJ_5)4bqM!&+)pK@J#2t8@N47tiY=G+9@d0gkS+MR~S zjok``3PghM$g@tn_#{n%E^>3L=ZCv)EU`{$t9az{Y91lh4ar=*|R7N%BAP+hS zkR>$S6GyfIfe>+Q(E$tM%zw?QQ_|1S1dhwUhFsQ_p$PHFDOPRd4_Brf70VU7H0pgI zBIZQ{7PZv3ceE)p9w*tANgDtl8`*Mr=k?7S69NCUNZ-zKKT!?k7UOQZIM!=0s0ch= zd$xe|q&C`*1vpwmR%NF1he4DFdE`WnPOh19p)ztcjq-tGQAex=)!G1 z5NBW0NxOFX={SUN&C+XulM;S1({)TNN{0pv$T$W>$m+A zv9(>F>3W@#A#bBT(_QEcgyY(Ht=`$m-*IPxX0gZ3uK4AQ5!S=Xk5tfPnqY`}y&FU% zn4qh7Ew(MNr7!XFV=xjEQ!@(?jwQW1MTcDn3))^i!nD!tyXnL_kk|mqKbkz+b**8S z_+c5)nRLp+F9c9F8u5Rc?8*4MhNpI%W?j@qswqPYfH^`E&xaNE05=-iG> zDoY6wXI&ycz^LG@8=&T6v@vG2)?AuZ8iYectS9p8Luc>8iucYtoHPwl+w%;k?NR$r zuFZzK9;s7GXXg|g64l=sh~EKoCMUv_3uO%4m6iCfmN3jOYvPrCesgEoKPPtL|MSOU z_KlZJ4W9rsSz{2}@v+ExZiVL?4);o($00bJA?M*?wqW*vtlIN4Go#S>yB+OXYg|4N!Itd0}j=wfm4<(+z6rX*Y6&XOZxOQ zY=Zk~ZQ^MVoW5J~7a3WZ!<hTrjh$tBG~&mUiTc&(;flN6_nqzpKY*(lh?q#c$PW)Y`)fi zUnM=|n7gu*ZDFQJ6pUHXPeJzmkqjnwc6K_hGSW8UW9n zJ0~vu12zSEevHl+gKVqx2dAt)n9A)%ftGI-AGb5_P~71U*@}vUrl;2|q_guCuihebc zns5;VrQ86L#fHZ1?9}@6;>ieVOLpe>o9K3m5F72e7+P6UUl@$gjqa#QRsX^Se8DAY zw@XHSX>i`-Wre%2r?V=`GKraCk#l65W3p@Lvy#`nm!#M#G8MaRaT?`HOKJMpPg|w; zRk*dl?%N@gIxMq0bV$O9?RdR0i!b3&Y277Y-c8WujH13l0O;W1E!+s6#PC$vy2h11 zh;ch<>ZiZiZ}o@=0eEQ3^~oG6VvDmUMLi4q2l|_aY{oKQ#zo|$S>Drj{N~BfE^SJzaB54 zmTw;)J7bbT9bXa8)yd$n1vWp|;~YS=vk_ z{#JGQ^co3;9oy;@N2);9_&)EpM3bc z&)vWr^5#c4kP%nXaR&#RnnVq9-|Ho!VF1u%{I_!~m?gc>kQY~k`*Nf*!7cGW16uA# zZy<;B?$p*;bV^gzCrE>_+06c-fK*PzlghB8PKH73DCTPWzNlf-=CU-+>{#;WsUM4K z@XPT#1T_aNB1N~$!)JL0H`))o`$|IImQgevl5Xd%ZD+1lPlX^KVMBhIuFd>iQ!m*N zhbE0(9iuaSLjj2sQTg2m*VPl`MbR%t5bACzL=Ue3BFjA_en&sd;5Kx(CMLM1_!Nk( z_~kgr`A2=hiSyk2t3}okxqe-S(aFNuP!=DxKGY1{NXZ&OY^xL+k%M23HC?iiGucw7 z9U}%Qf?9i>E@}%B5bW?^qlIWm2{!x@|9O({vzmEJe=v#Al4n#*w&fS9d$z zXmVWva)p^)w+P`SsT0Z>Z6yCN3+@R}Jo5O|X2Q~NdPML9NY<0Kd2;PmmDw8U z=mN`}+f$W@3EYcE!UcA9>C(K!<$^PGaj{hjp>cM&9>ldc!=auBFw@1?EHSmeH2$ix8c^LrR>r30xuJJ!4v1TxptlIuZc*m*?9 zG>f{hF1+MecN6dtp1*rgn%%~>2p&G|R>S%0PFF%I5$-k*D=1WICB410&vh1^8C_d^ zd2GxAUp^Tqdp6Ik)Fr`J_36Bw$FxNx&(y;o{d_IqpV3Eb-%nkd$zQighxtMq5gpx~ zf*mvgQ%8&1`@M=f*{bf|!${!{E<1Ak4 zVZJAm`(?WileO26#a%rAIL+W^=X7$ST6C%+J^Hifgdy1q`Y8=m?v-;Rh>U)vJ2+?U zZ6T;@|NF-uj`)d0M|Z|J7epq3>$Da-9^m7IO}|WhrI|>FiJI&?zHd0uL4q z?)tJVUfsbkEZ$G#&XFnPqYkm*VI-vnBew#npu$BKANYpOjt88q?eCQas^j~?i= zD831ta)L^ci@DbYkVR3S{3cM#f$Jv(bfzqJOU7GUhq23LU(4HB(S;!>1>0`6gWZAx z6yCn$=MW(I+&z#~SlvVp|z00K>?|7v6Dh>F7nck47XCEnlb&Ii3bd|e!OgSKQo**zecL5 zY=Xl4&ABo4MXJ!L>cJ}h{3K6{{(wbdZXm>jeV)mA`2FU{_hNwAt^YJ08dcp2!+wHa z=+l;eq7Fl_Ys-$mSVa^mT%#gf`6!L!mtCpi0k^yEuKGpwZ)3h5^xYH-w)kzvo`K}2 z7>i!lc_+zm>IUco$VluE#hk0`kgE1@&)=((^ElgIe|NHZD$L{*;q-g`uF8uiLUCBd~Y9 zpuN^<4Ab;^>Q|Q2GF_UT1+IVN5NGe>)zvO3oLG{dTl-bNx%wIlV6kI5H02eOPQe0n zj7$tVjvN^0+X?O(mt_WSYi;BiuUnJCS+|1xp>ZT4Ce?@W6xm0l8g=;gE7%R(b{F!!4BmK7Z>{f$W`kg=f5@2Yw-&F=)m(AM zI#4IE5OM-oYt#;0XPxy~wW@iV8gp^H(R|*NAx}}W-=aB$7=TZ{-+i`u%gvdJa@+JL zHMILv;)KueWSLX0?10F5_*ofwX~y@QG0VO>hqialR&~p6oi5x_L(jP|h;;5`KciX{1PU(0a*Ij;!pEFc1DDh653)w|b@UFmw1k4OBxf^ymO!QXLgg zxFJQLt8z|^*%W_W<_>DvaO+--!)RC`MO&`PPHu+5=h_>MPENse#=pA=nKPl|boBf^ z_t~l4A?4~l0n33%oO#dy;WB+pr{ZG7X@2>;_4dtZ|5E7wUw6I})J9U4nU{5$w_cKM z>A=f_ZgFiFJvN;!#K6kcim!{wcHqVSlcnG4+-NnTl`nbUyR16}wsrsUla`Xz$@$sz z(bci{`q8!C6f>{qAbH#66xugsF!#jb(A+>l^5nEZ4b|T%U%)dRpitVsGeJh-f~t5s z_ek!<1p-R30YFO<#;pUX-p?EAdX~hi-9=Yrs;ffuRgJ{ zsVsV_6WKFQ&&tLUhP*Y-X+7t4;#2B@+`YGsN&}tsAx0Y|wYDAXD3s^aK7f zG|ug@JJr+K)b^0G@ac?i=9`o38fCh&&_16&D}6bs!lgd)k5wLL3fnPyWo%=l@hoz6 zDQ(ZG>Esb}j?T<-!N!lz9$8;7=1Y2?0Po*Ra2&}0cv&cP2ScT%Z`+o#&$5lhIu6m# zCI^kt>$k-*Nk6-ptF%{%GjOOeeARMAw=?6gtsi% z425dwmBvLE36be<>s@zWg?v>n3>jVx&hgsaul zU?>W@KGzBNII4{U0DdCjZ;Ha19u>XSB+BN;7SZ&QO-$lHQL*WYxi=b=HwQHFfZg6z zYN_KXKi#UEiAK^9&EhcGHhX|$exuP(zD2|8b)4V_49F{2`Ke+`GQ7{TV()XVu2QbK zs#8VF;vFL$%M9z5oPUDQF7FoNV(DM!#hy^Ce38%>WgA~fCIw3a16(`nH!YD_$+=%% z$~F_0W!T^?XnzAQ+%CuDhNq_sYfAoHKMyvpjnKJOhL5-<4q8e=W5O{t>W>tjuU7RH zah`bdv9i^O-I_vSL$%62Nbs6*y3Gpu4qb}A;r(Va@Ng@fAu#re24Nm|zS8UP2l@&= zTy0$r0&f(1sUY-p-0l2xHw&v@nh45u(t4jJIpx82S4kte`l&tGZa%hoHJQ-}>$)1z z$Q5f>cAu55S-?h66@|SUa7X36c~=M58iU75S*64CY0Qe7$UzOVzI09Ow1Y7d+-dqo z$2j=nSX%OD1Vz!@R+rSxAMx_+>Y(CSrOBMuNNRKX_G6O#yvvH+!{NMk$&|6QlXgEyq%ym zsW0rczXo!8N$>QmE;tzC0Eiz`h;dG&Sfy@9LLt%oMX@BC2qQPTwI?cA4#BEiz zmJf(oEy4M#6N%4kw@wU!anrXe+bxX~Mz%SGFJvCim1ZRe%34WXTU86nfBVCS#qANA zoT=Tg7_`*lq}g*V@wO_4dy|n28YRilT4~6?U|)5do3+{A7uqAaNOD3h`0Aq<0{~;* zstMOah^-7DTj}K~vs1?yaR$|7@GFSlkUIYYG|6dI6D0z4iA`sqfZT~8BgXSl zb=C7^bcl^>d+Tnu(vp{TwRkA=u#qJZG~Owt^I~?Tb`(3F!$A8HSA>f;8(!O&Afit; zg5Fd(cTFWvooFfbE!qm2y4Q?zh?nZ+2;+;Q@pQ#a9cy|!S=m&kdH}AZHl9|wg)RNp-#wH9fPJ$Lq+v6r=})W&# zb;v*XB;NBV;82q3imJRq1B7-A=f2rhaoisM5$UZ>Vp#-+6<@9Uj`B zv0BCU1Ea@%2k_a^q&y%=^WL#z6aV?|`}5A`5Vnhy7DzyO4yvp=D32sMGI#romGBHvgb~tGTFD%L$yS0h0rtTo`p-a8zcf zu$m=0@T$4P3Lg`xRJ>t>#jo9ZnL_y*#PqC^&iAImI2%@;CYDdLvu(4mRL4z{k?3eK zUgDfwJZ#_pG$R|ZOo>Wdf#p)aA^EK3)cKI=O zj0})GU$x+rUoq^p!){jXPght?GgvC%9W^xXv?Nkf!9Oc>JPr)te}UBuPgwMFWn5aw zP5uUkYp1I* zQOf_R=cn+IFurz{JTZ0i;K5ONuwg0+cWq|IM=9#?Ho%QW15Wn8FRb{o z0tkCfZneYH=Z5eH5FjT^UyLu1Pslrrm!KAqV zEx(vTL6d`^yrJnLjSS8GA5%Dx@sf-oPgN=Jt`rIT-)fMu9~sMAM0MBytTzEAVNZ#> zdu4$#l1@0uQ|^7D>h1kKhyN+RG~5GAX{7iM{}@sOz&NHc8;GhSX7saU_V03zejy<5Zwu_+)(g4-nMJ~!|68jes)@W@H_xpWyd=~Vweo8d z1#GL;6U8I)mlW^*PjrI^{;gs>p;caq8jx+;$isaoctfR+_hrD%!OieRdt-`x(OvKV zzH)`{Yka}X{B~8|VH-OD6iW>pM)iZMLaman=b{ZzIL19_RyyAX^9TI5^s^^(G64?R zsG>-S?{UQ_xU*}mhpq^F1@mTK-;<92Q~Zc5yuV9@guuZx=!O&tR{-p1Q2Ph2>v=EZ zB4O(PsRYp`p?Xs*ymztB1a|vE(D4Mk7ABH#91(rkE_QAX1`qtVFl`Z^^+o(UK>=R>DuKN!6M=Rd-eZT`2YQ9<{p4P#vyu+Wol>QnHc?AwCuLJdF_^7A z%+A!#`88Nq($dD+)bTZ&q|Ik%Q*l#cmGjK)W}zrJrPqsMx?jd(!k+_W4IVWf5ikk9hcRAHQ+O5FaCwX!-G`<?A_4{dA+z&;Yi-$77VSLrd`Tks7udXIxR`65%U??SpRf3m? z+K#e{VVQGow8tc?b;1y#pRYMlZV%}smboY`k;&L+* zLA`+CFeT~Wuxp2`v-jl-$6%-4Z9sNbC3B6Shkyg@6&*PvvTK^1`^%Og%UJ4zcRyEI zchIE1-|Ygb^Z&-a|4@|o62FeoDs>^=kw<%ZdgZ!9B}Hz&(k#G`$$D)AZ7`O3{0W|Jd4CW_EcCbB(_ z!Z~4=R~PKq7h8_{e}{{R`xaPjdGCCdYIL*w{Ae)^z9CJy9Y!<+j*Fj84xCl+prFcT z6MmiQT+rK6%Ta&0OvZ69rkO1r%g=mXn{caT89sFr{eQZ^JJ^c?;cZ!5fv~6C9L~NS zFSbUk%3D@q%euyE?$D4*{ikDM(WITzKrW57Y;c%-`iWBGx?c3qBKW@7D-Cu#9a5pD zrodH)%Gmce{Z$j(*1t;Q@1K1dB>ss>xfvD=ev0{~{0yxutvcWNCq&#^%=1j;(07Vm zES=ydE^PB49_(if=FectO+MWCA;MHnW7i|J6xLHM@DINjr5YJrk-pZ~nG97$$0a5E zH&6jJP++Rv%dZ6=(9{>KwP&%AGqPLVUPwH%k6fA&1s@vyi=+4w2h}@XHiuD@Ggj@4 zmAKroWG>Nz(~Ean8uw$Uh5^98F#h!!H2l>P-(W1*f}fMy(zv2BEQKh{Q?7((L?j=s zTtjo9`BB5t;Dnu+wQMic;iT(t)#Ot%8KEeBE>*ASHg=6!0E4#C2|i_*7L!w>cS}C| zT&lZT4bGpK$uplF2kF-Ka>h)?s;$%1`O9ruE>NNdNOOq#&Vn5Hnt=(oTwJS3zP$Ty zSooefCZ_T{(7#8x#SnZbw{B?=tdmB>c0Z^sg@sGwHT}DLx%O4ym?lgbvip9r{zp*? zhm=(Hsf-z`JYUo|0M0UN!HJdRJr6cx5d&;R#m9c?u>h3^-@x%(yZCGK$kerWLFoUn zblrhez5oA85ke&tp_1&CnJpQSz2~hYd&ITJ)j$yvva)bB zKHuNp&pGFL-s|5$6yamyvq(_lTjc?(kHll?+=yXZFMOvqcQ>{~{pAWP5j3Dh82vk`np-kk zKX={%O+3M+(RwpgcpOMoQ6_JBw28VW&C&i(R;18c7Tk|Wm$2-O$o@qbnA8BJENc70 zK71dB!RBcy7e9T9i9EiFynp|8_Lw}aduW4d^CKwSr@!qa$u`!EXBGs9nV#kIYa6Pf0hGt>EJl z%J&ledxi;=PH6an#rZgUHV&06`5M+}#w9JGDAsU<@+gVzzr}gPhV5fJ&#EH8`ItCi zl6^n9rKJ=_Wf$g;9Y8JDu}9+xqy`e3vY_C-?3sezFYCS$nF1)+pO{JLWsRPTL=@_c0gA$ZU<+$W0X@m+&GhG>*kc)xCYxod zmiPfCUXVAw;d?Wi!lOPBAo=-K?VHt9g~DTiat;XFEw4LsDk)redlErFXGqpP$OG?^ z)sWDEiJC4OW8_tvQUx$%QI6GVtMakG&wwom@{Am1G1^?;Y3GtEBMzzTVK*kttEmeP zM}eKbEY>S8>jK{m@HxKPK)Jm~oz$|F3$FeM$p^B>ctPfM ze?^TJ`@hI8A@EN?%&oDnllRFAy*Gjbaa}7DQJ@m1dD)DRTS@>dz+)Xvvni^A zKT61g7_MVAqxL>TuX_Xdjf3zI)zOpP$+a>gnQ->?YBPJs6`Ucnq1ELU#8;;qC4gGk7@F0}gTNg`SgC|$n#D`J zo%v}=po)A`g2yp8TvBWz@Hep~b2^M6oU}V%aoKd`f+GB;v?oVQVKHWanrsc3&Bp3q}H>Q>X+;)(C&;oM3RAo2?KOWDw>lY`oCwWi z@fs9Fl4DLgulDP?Bo{vLh-1dvR@I$3jpyQhSH7tXYi4x{;#Qp=CUZF)NMk#NmhmaTgJvh_Q`5_r;Y6kl*nW5$*a4<#-xRnbttdtB)NtYbig2wvy z!)R(0b6=x7o7Y?4X)lc4tH|V0rWg+nKXuR&C(B9?T~>=FTm8SRoP&?FpYZv>IFF|U zk?QYflxn1P%J&ffz3dSFLB8{CF-}tw1UQb0VL!|b^}64J==9$=bsdry7^Y{T*y0IG{NnS_bD*)>P_Q0 zDk0<)!d5$j0lsb^p>Mj@kF%7k?Rst!{Q08Gy17^q3je*V`m31;?ZQTBJt3wnVLMRM z@0hI{)ClK#LdRQS6AT(ig;n%%c4swLSafp>&Jxt2s_lCHyP=||8PMYDWz4%`)uohe zHIFzd+TR`0c?)t?-ECkzl{PT}s75-(-h|iH)vAV&zY4K(eAN@$2C2EU@0%SB+x)6X zMBa4~OaO*et2%@oj12Di=FG85*W@2RRu9~mq9;cyr@cO%h3=52JbzB@w6U^&$!x&j zT<#2}x6PGkLNvITiq4$As!I1zMj`j`Z1hBdHsqW7D(Kb&=@|LGG0%7R>Fzyf2Saai zLDS~qrRx=4K=_aTTXLcz78|P6E>^1?-myE?AjvUqv3KR^x}3R~I0K8$KC6qtZs8o8 zeyZh2RC|9nFW$BfncVaOb@q)xrn+x-ULAqIZ`MLW|L-sUu2+@M`i62xkWXT-r1c52 zQO|C>hJ%E?(=z;?`8KMcO*46i6|#G)&Nq&&i2RYd5O7ssXZ4wKcn}U4%7+xA!aemn z^U8?tPrT0NU`ZDh!}$hsPYBYrn&s?}C%Agl?;o7**82zlEy)g)!`4L%hN{m0_}sPP z$DKR_m18P_Eb`9g2Jm3D{L-LH0_z_;zC*KiRGeW`ctQS1 z2&__JT1()$^=m*jdfHSqm;7TQ@a^PtIp{b9EkI|5d-+iVGgK0ZF24FR6%_tQ4|oc= zTu4|$-3K3V<`Sho4a+ZgPyj*ZMUQ(I_3efYAbJ=)nih+wuf9*CF4ieWb3EMKB$|ca zV-y$wH9pP4j7NGde;lU>-lVcjLmok2xB)nV=gnhe?Ek^Mr`k_mUAjiFDnb+pEZfj_ zhNceEkkI#5ps+QvwvHnm!$w?@*2p`TkQ zc=uwGDuc4I9?;+kp}q8}YshX|?gbcpoUwp=xBL=|xywU03Q&Mj)iQk4%ptX2$2AR5 zdb#Uj^wQ1SXzi$eWZPLNy*Fe49ikT8Lvu=QRwif|L_d#dP9LW?eK_O*CvZaHpVKt*f10Z9L>xFp~tgcI6*a3a@JXdz@Gp}&53$4WVr4TsupgLYKT zFb~8b?m5gWO6b{CogrhGIDd=)vSnC)7*RBw{_cse`@H~FaP6?2&x#bh4`79EQ=^=x z^ShL0$84DG{5+T3u|q0zN4z|f0GNP~<8C)Q{!dAUP2EgdsyPKb?aDcMBn|flz!(kM zu2yM;epUR`gHwJGk<>-CCo2bn&)_FHAbe~X^kkwH(tMmtA0=~D*$~pZeU(H(`oXN1 zJS|{5-Re#=$FUeX{b|@}tY}e1IssWN4{4YED2j~IzX#WJsPjY5T6NE{iF#lp~vAFnBk8M`8y6k4qI!Df&a#xi+cCwUv5>10!@Tj8a zAnmvv1s{*48}9SMpd7{w^;jM-6~F~Ds2{0;JMNIp!UhWtu7gV>Up=#^IorwSEw@=< z?`Y=Yn6HicCYX;_W-VSb-_FQxEC+)^7>LWt`R2y7!I{2NKnfyH%cfXNn1!z+`5_o0 zADRsN(Jwf;GuHVeDbu7~JGC7<%J*ti1W&Qk1+zl|a4vi}+1Q=(!ft=0P<@~P*EToRaznG4E&r(qu77zB3cBTL zIDO*X%Y9qTCgS1j#hkTugrJPKE7YLHg1J|!waYDGH$2?Oz8;K}-yAyk-sM=mNcKk( z8Li#>n7KeM+our8WjK4GI)NtaryyVwMDb8Qqc(UIY9zxbQBry z6&LU9oj&wUXd2VqaQUeGda6QB`P8%qu2-(5(QpqXl~f zA=dP+Gq_I_BP{=rBAWCI*+3CDs52)GErNnK8O&{rPJV!UEw*9frBq(Ll;i^Q7!fzM z64mUn1a$s`dbO`kDz+^LRT;3CmBG5#O8SIW#MIH{4+_*Jvm+VW>qWnVY_&51tT69& zn(C%ib3;FO%yat>f=rEy)`IlDV*$DWfGAdBR_kyb?pIp(Aa zBK$!|^z88tRh#l>k8xFy-|vqchNbEF_%{MVQ~|4ToZgeX>BActlNmTjmE@v#Mx<{u zpa}f9yKKo^4{QWSLeRI@8lZ<>ZP;X{G`tP2Neh6{`KK|jl)Ytz%G`0VjskmXbJ4Uq zr1jFub8i!*tt#9?7#X;ZGY3l&-^hn6JkG1fXR5Pw6^*Q)v+xu&J%P;lYW5zxQavom zJql59)%G9=-5L5?S`t!<`;IcEK7aMFO~qj1%@K|iaS`Sb+GSRlF@=I<5x$Wp%n}cZ z&BJ8DAT@of`ghCF?)&%0x6x;|AW+|{5|n6w(VL1i|3V#wgAcfBNqDpLQotB+W`Hu# zPoB>-=DpT!oCW@ETa3YST9}nYLqv^$IL8N|@SX{5%bt-uO9#{b&;gL;a+WD#t~~LD zK>IMnAe9KT*n@hVK65NHYckg*;h#3ES^|c^{EQxmk)PjOuy!b^SV~ZT)1W09^PcKW ze_296LJ{I;n*(M`g)Bh|FtrWTy;dD8Y5>gN;fFRH@k^FVvS^z5?S1&Fdg`kuB@=nh z269IWBBjFo#|8t9NR!hpV*`H~$)KHBydCAM0>b=@O%@m@A|yv}=-<@lV<*FMJt0eyd< zxbSw{BVE9kBt`N9tlFJS0ZsmX0y8ZfW_wll~0B*f3x7NWxUNEemli*)lXMK-z)xP=Ov14{Ld{pVw|jJ z8fc{7TJCQ!R>a%wReT@zJ%w{LS6Qm;1-h8I2}ltpLQl6V~%Y#TcNbykB~c7>_1#&fZ498in?a!F1nvvg2e z&rWX(jx(&zf3bI|#;US=v5CHSMOMr7xzH>QC{~Bm^pB{p%qw`p@0SCp0@@X~u^0_n zt1+KndJKXCe@5|__wImyBSSZx3~w@*LE_=YUlK}xROG7qgTpc4!VugVM$4d~ zQ%6!Zy=-SjVZ9q#vF?4oG`5E>fBpBgN!DPcNUN^_wq6WGqF*z}iN5gFSe%%!vXf{X zypny~r8X@Cl0E357BXfVGwhp~S55G|3+W8UK3mYo%=|N1gu)~^fPJos(Gq+)k{w~F z`?+z9uf=2&$czTVKA8TFzL>a2Qa-?47=nGVPXk&T3il&U)xzy*RX@#;F+gDo#t5+P zt+@gR1mfD(j*bUz9eL+;g9M$QF`kEvY4m++Ykn!OoFIvUbbsWITW0f~PROG4KYZx} ziY;P?kHqIW);=0TdY=8p>X!J_o)y_Zo=N>vQ8@`(gR$rhZ>zRBGkppDqQ~2*Y$!_P z#apMmv~mLQ6=Zis7KY28ILEbav8T(V=WnR&cMaM?zkKHu)Ur1oYbUifVhBu+$AbID zFv!A(PV$h>wH0&ks@J}`55#Cx?}YngV?%arkLNQ>_p)lvumZ)mJiHkYTcmxJpfdXxg=$!IORyVI0sGlH)Ttc--90*hS(V z1+V&uC~$Y`AymrGzuvO5zm&Dv|8zVG)XG5DwfsBxMc_c3tSVKCoJTSu4_AE3?vFAE zkFNSz-uK|vDK#Si`AnGm-jKeVZ=(Ph&9{48eGtA4oToVE9lmJfG^#_x^~3n~uetlLmZ9 z)U-Y1B^p^+QwNPYkd7CKVSZ`vs_YR(alHP^1q>S8H(OTlD7kI2`w@hI8{Jio5a8H6 zc2isubp7(--I#Ja0`*O|bG4K&*{;dQXMyyp*Nfrg6iFDn8ap|xUKlU6=Tr2Ai^PQA zsXZuT{D*4_7)L}@^NIYFzK&cVORr&u6-f7(fmMlWx1~|v(E6+Tga8+7CGw7x{OS-D`x0fr9_-&jU+}Ts)iP*r0xggx==jH!uiQV+}@Bh zpz3g1nI))6Q8O72Tb{D!1E5j$VR7znuVZD-%#+*xzdGePZ7z9@8iC22 zTqT{O(8)6S@EWS7#2A!o!oW4&4GPZ;g9gPi%6d_-ItQzgyw|lnA&MI&{kdU{m)3?a z$992N#tKqXtuoitX%6lC2PTI>2!dAv*jnZ4%uZiB<(_?g(6POwo26UK%}&+MTvIe{ z0l@!V+>nzN&toOXhh!*Yl3X3Abe;YNggbeH#G9U^bJS^UBJuVCz2nP zpUG~AmI7hS1BB(Dum5SpYE%aZ`yW=?TYQ(1TsSSRvh0;H!QkP-1dC&%PSF6zf3zMrn9OAxgPUd-lV{Al=7UX*V=4ZVu=IW8=y!hM;Oi}y?PWj zv6?G-9M^{~9}6n4Eq)<2)t1&0{vq3J`~j#Qw=3mM8`E`cxjhGzfA0;D(K+8wwlkmb zT}h9QtSL|JRHf!dY;%B0d>dibSdO;U7u#8t;`YDLc(=ofP9F#%D(DPOszN&`{~OR#a*ssJEoHjml^GWD{Z0_!khX#1mZky%Fd4Z=+d1t zS*$TZI*~|ecL9Mh5_0Y^wURX$nOC0e%9IGgih_wFOiA5Oq~8Yf(1uCebpp4;s;7d8 z{AKTq(W@+3JY1Z2ln;mhR{rPDbm;3(2`WrW*+;fqOK&*dw}v=$>B-K@h6p}9)>u+d zW7MZ8>5`VX4yeHb#h3xt%o6yvN_~Axlbpop*K^b&O9^X7oryetCyGp3X`BOwvyQXU z`B*Z~fO@^Oze~tL=f9m&?nV}gqckr@nS6MrjiDzw&rm-7SmMlm^3Ruowtv+}BlSz| zHFl;_nol&A_SVTxx1qMp)d@}?wIgc1YxM!ng-biTq=jm^iLV-Xn5ZOnOk@h*!fts$ ziu~+?XFu?Xa%-CDAvck#2l(DUBJYktvooSMMA<`xsPQSt;&0^}zS^X{lUoZlZd4Zk%-L^TAy0=Xht` zcl3+fcOck{*QM0#+;@h>Re=|VBxdRMW!duo03ygFMDl(ORKAeMd-g+F+rKKQD8IYm zEyt_KJ|wO7jsQka2(!leSY!7*&uBG0gHUPyFtFaE?ECZM`|jg;d*o|Ds9%YQf;&~? zp9`9#y0DoGxdvLmw716Pw31>U%@{$UNdj;6&TgR@-4DqG-dVk*-P-+6(dMn(zlhS( z2=iUW4+76TUd9NzX@4iG9lMOmsK^EUQy96E?N+x?8BzN1NZOv zx`%stn+kive}u9X`VxmU3ofd^dlG4w0kcUEUf8mP4tDXj;*i^?_)zQnRZgtQb{&4c ze|XP#vPQ+?4zE3Uhfi&Q>Z&q-qBqI+sp0ckPdN6sr_!9@CA>1i$* z>GDgb?elY0b^zyA600Gi_NYnnff=p6h{Sh1E(@1R_5j4%rx~{R`$KvAYo+u zAq?V{qd*#I)5a#UAG4_!BWVH};mP=4#6hTu2z|5^k%}JR%mS7tVMfck=_I(4+c;U5 zn8@Q7t@AB!+JdstGw(n;e?O-^El4c7gMjDdJj5aI&!!W-%pMX8zVn1)h=1}@r|oDo zKpsS>-Qwn36ISg^BuZ*DxKUPNq5eD(R?}Vzw6nMw!CK0QX`~}?v?pc1^1Qa;%|&^Q z<#nJQy6CJNFuF(DKM+nS8DRj*5;Vcgh(l<@xs}x#5!9kT7itPnHl+_ASM-C9DQ7+r ztN-w4nvLNXS>k&HJaWwsbaE|xrZz^w^o>Kl+ve{xTn`kiz?Z?we=oUb>#8^PaFaV~ zubd`6!z14}Uxx{xPg_}i|HzM%@c?%0Ky3A$yLmviX(D)O9aBMoK)sh6|C9i5aI^rnRXO)P ztyQR7M*daFEgGjwLQC69bE?c-)Y~keGLG&Pe>K*|rY??$8Fwy^0lrXOgOQ0&*hlFX zfqIn9U~viv^|s|MHBJ3NCDPYy;I78DSkt7xB%}G+7@+Ti!e9sCokg#Y-^tefdj!Pr?3q zXQDxPt|30O+XtWw^HjcXflobn1$hDu8jDbo+lCaQqsqpcsfqUJgAnk^1~l_|y=d*J z;#a#OmiS^XF#my?If2)cR77}WsM!3o-41hBg26qBYOI5c%>jEss(2z*kc)Y#c zwKO*c%F6q32-0H`s#fg#!w>s_&V_L_qKLD~*S%7iHx6i83lpRY633;E$-=bSJVZza zwZ?(_p6=9h3>+g#rxyoNsjr5>mX_{1_>r_W8Q=?7hhxxf9Ps?;11!)aU`h4^$r0(6 z{?#V}h4B~kXLpzAoM6wYopH-|+D%2M-7+<~zghrsU;_rM7UBE;etefuwgOkgAE(Mq z=Zd4@xCKf2la><-kM+4K){QnZuo3?j=;`z1oi9^_uRD z-C8nqd5&5eCo}2$ntc{a#=kpx*4HRJ!5$9GL2P+8@8e!sS*Gj0Y1gR<|B%5c_6HV3 z-1-+@%@QLMzPO;lI~U3~>tBF=x6GoAtekYROzrG@Eg`?DqwEYW10rs{*~X31sbnPg zy0NjiL;r0kQ@)Y{)^`jq>uvAqOXpd^uZx{^w8G=4MAU1Je55L+CY=pT^R^!>Z=OO8 z?QbOKzWj%F6!EvOmtZwhKRLb$BdWZ{6giIcr@`nATON)wr`t7MjGF^288~X#CxKI^ z0r{$?RmEDyX1RwvZ0~#8$(iE{PMyn-)r=&03;H!%zxE|gx%2XEErY#f9hgeUfKNn5 zbt%vW$Fnz%QCQ&MyFQ5*BuS`G?jM!?hEjh$yXDNX;F7`w8F`k%ahi*OOKZ~1+aXzh1q zxUT#p_(O@Y{+O`}V@ha?bb^+mc*C}(mB4N_9earHpEh9Sv-raojy zKK8-hcSHv4o5Zpd`v=3%UWPJnH)Yw;4I@VNTH;`3qhT9k9;n_+z>a60JyBRIfhsR& zTeoKp*fzo!UYRXEp0X+Z z;dRjP8_RS3cl)i&a7y@b^MLa@2AMi>pwP&Bngs?0-;`3RCibQ6dXBt!=Ne*ud=e9l z))=)|W=El|-O$`&wHr|62-!;hh@BKx6@;>F1QAwa`=+xl-$ZJfrafl!<`J6+>q zg(|rF^l*%YFCHKTq%VB%3dNWKmCWX8%1Qkl2w zbH)<-jE>vK=kX;#wt)zRj18U`_WbUu_Y*ox6N^90kDa#qUeb{gz8L1H zZ?$GQI}b(|=Qm(G=b-=r&TMJ_b8Y_y|He}7Pl^(N@8EZ#Upuzu3wlRwk;A(u;v-ZF z)3X=%E4vvt8y;DHub0w>P4Bc5$M?k7)e&RlD0FlK1^@l>AoafLqktK(4Gqhh`D9Jf zF}&EbA#Ml^oz-*gTr$me0R!HeCtcW-hE*X!XL5tPJd<*e!Z(4A-pU z`o2zZ&ht!^d=Bc`3n!df_wEu({);J~PR1Dbh^;LQ$Mo_sq z=iwE(CJ9Sx8EeaY_mlTR^>kN6+6SH(uN`PjP+gDH+7n+kY*y08E9znqnZ&=~hL2rdJ;4 zo4PFgT=1>TUL#*{yhCrr@iYP{Q%>hwpu5~#Zh%MS8AGi2zIWWg_-9&SX4HaYDHHjH9njEZNIG6R4~V{)92$%iP6jU z&+7<6d}s5rJ0ZjqaHzHaT%=_p;kNZtcZ&g_Xmg$hb~xenYGhH;OCP=; z46ycvovg(<@+TK$X^x)xzhMWc_n*jIY#;}eAZ(>ezT(74vZaX|VHmroia+o%{7MUX)Pw0l{8UD*L|(*cqs|_O~h+{)w>)KYnzMLl~xU z&t>6!W=L%0lsUecztKwU z(SMLCp=iEI0AIAex|8f)efecrpbXg$bAq8Wv5_$l#tykZ_q4t8v_*+E?^CQDKli&S z;F^mE==ZXaBUG#`=TfRkq5ABxIN+2Fl9u#@t#OOZwz2G8ygu8%sb~DyQnuuRC+9vz zDg4%5W5^Y*)o6neAO))E1=b5}bO#PiZHl4>GRMfL5Ct7y3HB{_U)cKq)Nabm z(zH0BoD&LN{Nwcxo=%%@>Rw02$VI|;eDcp=VBf)m@f38Pj2%%n_ zE@9~JoevBlQ=d8OT(E$=P1rY)yD6{f`R1!pv_J&`&I)p2Bz!3Bg48b31TIjSaIy6a z={pI|7xjsAb6hR-0g8o}O8$I4L!JEBF2akC0wxV-@~AJ$fdBVV03Dv0r7QC4BmJe< z>ov|taFpMqch~1LAgTUj7S=+wDYGo8VJMB^Pvqht8lL*dGY3XEH2q`}cZR3Z1>e7( zB;_v!wv(Vf5AL62KpVnp)2kEx1Ib+C@LfY6rT@|A;e@1ar2~B+IE!>q3I>~6h8@zI z(kk@i)j&hvlt+w)e#5FPNzg)rTXf?hXP^}hz(`Ej6Tz~!Z`*04+V8${wO=~+0pFe2 zKBD$hx`_(O@Mxoc6ugNfK+|1m{Z%syr$4>^Jm&3OuYO~??-pLFnp-fLXHU$sV>*70 zoNyuSTIz1ws>49sZ<@@VwfxLn&Ntm#0K`gs*EI7LU!MN!U&PJvz98J)5na7c&wNH* zrwIWpZteXBk%Vl-%l#4|T)WBdO*yy$eh{VzeS z^RvA&;a{%J!~Q0BgP?c)F=nqKs?L1@sC%mWSAUd`J;@%HR7y$M*shre=g%|`{JWCW?f7roS%9mQK zkdYHjNGq_lsS9#)*Ke2J(;*c)#$9|q zW0SwzGOC?B!+4UwDO?!%6Il|H6~dTo`z-sUsb5!eEV@azd<8qny* z#&iU6VfVQc6QlEKZcmau$m^-7SN*-{-9wZn(k&lq6$MI&oozJVf09a%IW>IrXQO7& zb-v$-Ce3}0dapS4P*XI0Lf19m5oCl-eP?|5FgE3r{de!X>3#jd$s;>+rd74vTw7gF zOfocw3gdHCt^3J}Uxz!6zh_YmDHw5jm9n+AJ$L5(X(iWs3;gt7)=-wUn)%WSyu5}f zOIP1!1luN+@NRhj1T}7&1nG&9Rje-gokBuW04Gzy)6Gwb@pVR?S>3miogtK4uU7gz z_qg7jyVf=FcX3g8=P)X`KF_n;lUcq*OHKqHa zq%{iVgY|Ksg$5D<*O4AS4e?hq*tyLEbLVJQH$#EI`%k2^(c#Nr+aGpqqENk_FYkB| zEHI9q*ySm+nqz(e@bwLRo+M;Bw?|HtF~8F+j^na*^_P>;H!&Qem`!a&ESO>i#(Km# zG3PFl{831lAX&fAvGv~dbk9bDFAST_Cx5NAy`YBC)tUVgwfWW2$HK}~aEXO*ZzfK? zZ4TLUF7sw@O9?Fez}j+xi-hXeC+eHsD-Z==L{hMyxV8^O{^U-zaCLZ!LFtiJ4yJJn z2jeo2KVyg44{*jBd^*WF0=$CW6FvoHf>9jotAN4@Ngx&03GCJB031Py46lG|ttG7u zw}F)%S(wqPa2M^HViSwe(`G~-**0^DU^KoMp!f*S+8&0a$yL>O&xqyiruXDgUzV== z8}#;KbI!IY@T7n6Zg3Kml#ERpx=<#RoZ8pcfcf#C6n}2OwKX}4H}wK;R)>^#at0v` zrk^v%ZT#@iM$NtT)N|gD<1&0EB;h_su0W&z;wY&0FsZv7Mh_8n`OhUrPc+I2?g%yo_hNxi!j0AexAt0GvAwXF=ntjwIn4h2)h@kM)@Jnh z^0^2`YuWB(wdLpOLcfZrGk#dxUjNUT%b%~!GWmn|prfe^nvubQIM{9s*%3Wkg zS54PettFZg-r~KX+&1>bt}0So(6}5bH^AVQ^WQjbm5fPFuw=a(JDn&bmGytAO9H;? zb}KpRm~F>M9}GkTQLGQUpUYz}9dTzVguhBkk5K9P&^)VCJlQ^G{)8LM2w`QD5|Lyh zp}$>=`B5&QvS*r3o{Ht2r)D<+`?RUk_)JLM5+mq2=$w8zIxTS#c-)ivx0&kc-IRA( z1PAhYfb6H6p3s0gc_x8HgueR1*(Ty1nR%*|s%qZO7m=g0GF@vEH= zKj1r?T;UiPVKOOF%AHmh8Ey*D7Gj?_O6o$!6*zMls z>o;GGB?NB1Q{9(_B>w(rRFZL^O==tKat?axOmV3T${XOX9|IO&Q}u1ETXWKv%Y+80(kh$lo{tr^Sfw9jNVvqvpeWM%M7P15FrW8kej|{1ApEDV3=~1%i&2LQnTUKf z`_o+h1yS(ITt{XFTO6^Kx;TbCT3+d4KXc5pK3% z6{*qhJGC^^1=+*X#IuMsm69(BI1+4DCfQ)TWREOjdk2uOB>u|vuSwG&RMrWy%Odzo`s0KbU&v*udlM!D#f^5yyfBnC!s(t+AL$s9o{BfGPjK<8uU~6v_QW}!Ohi) zHF&o70wR1-#4r8mk6UcLdUaXT>ncQ1>(Q~9+#0X*z%tN?HSRgEgsl779*bplCil}W zwSV~HECov8#ikpnzm8S|iY3}YfA6KX4(&k`U$7{@2_ttw4?O$~90Gr@ap&~Ceg9X4 zW8H~4QE= za?9^pIasAUku5v=ap<8h_S9~cfW3tRu$TMf{4~IzUQQ}HMN~`aTIp+FC-84%cqu)c zvAy)28}n(c7^)Lu^VB%d!mkgnQQlgkK18)&wN?w+ShgQ!sa863YH0rSl#BqQegVEl z0=qt5eMZscfOi)B4bBHD;Hu6(f*;s!Rg6yAe?LpQPYM~{=j~;+YQvk$jFfYrH_O%3 zhi{G7x1*lHWnO&}=h<-JS~H<#(KU}hKcuG9cI$e?I&gh5T;@k7_xSVBaR zIaswlcB`J*WCT0q9_gJr0K-rifvYZMAvxqHi*ZB$CD`>2O%d$lak~KehLi+!y1^tu zBg=V#0coR?bA6RQNmdOK!Gpr)lG??$U=%GWa zteg5AC-kV9bu;ra*#!y4CL_-gVV);@I#+SNO|KSN>0ojX8By5{S@Fkz5g0w?`qoW_ zqmyxDc{?D_gd}yp`zVO1j99qvy(n7EjsE~ngk{M6xS%x_o!Qu8o%)8XyK zg!h}K4vIN14g=D5%7o;1et;57`~3^K&dv3{;jhhOYetM-i1fxO8J=+~#3-OtVcp+%W@Z`&#t-f)#ONTN6ENt_!TzJUAYtBJay+tQ^N2oREkOqUb{F%mKxak&Xd-NI_NB-!tzasJtc7+5NWaOVu_gi_(OsbC%olXZ-ZGUphYiSM}7g z*`lcL4*O}HrC@FqJA^tocL`9ewK>~E@=Y;LjT+y8eH)>#<{62L@0QuJU$TdJ#$%5s zFAM5+ ztuYRdn258bYHab7_UKPSvur->@JdraajSoeFcYB|3xfsijA_B2nM_jpx>wj_6iTn2 zcil}r{h`DU9B8MC@XTlnYr+DlOl@aKRJs*2zA!Jz;_qX1*VQ%RZ6f_VmnxKoq=;Y= zU+Z6-HoHaiYKU4rtR-StY1-42O|>N~HtarHRhmyYrEC=-f=%`6%oXM8+8m=^p%_`? z)`o^s1OA043#`ok&1I18{A)!98(e1?Ra9ywP&gS>niMxt`)N>qZ!VAf`@S9yXU@xDx`KqdgW0{X1Npn@$;3E=K zyL=l~*17jzyDaw!I!r4*P=tX)?i|JPR*h|ctRq~Dm5JdR_6$ql}@qsypenC_MNq{*P>Y=~VASdmKk z(;#6DbebQDE%;at5y76R>s68r=74p%&72w{yGE%o+sKYNRQsD!d}~4QcVgI8d+|q; zSC;Q3btCf;73sU8S1sPIjlO<9KZX)yBv5y;*m8)&0vnZSN14NQZ#GBmOW7 z)T5Y-p{o={O|mjBO|Do^>7HxHM=rh#lT*V2X8WR4#lH5D*I+N>Xre8orbO_in$s`( z68iik=SYX&onn7|DJ7_77bXg`lfcUQ>`K{+n8vmUl3Q>eR(o;DRaR=sN+9--=4Y-L zKP)haK|{ss+$;UVrL3vAYNeVE(pNGHI0GqfoDkbxTFqOox%S}{u$S3gWX`bl1awMo zvM%D$(VAA}cH#IL+eG5HvPBoSj8%@+O;`&bY2hTv-$$n(g;%R}d#SViHf5F7*RFKF z=zItZbR?u?`Am47f`ep|N~R{!z)f)cO0!4TFJ;9ezbml_26aG(rK!NoE*ZIS`jUP8>(%2rsW)fcA;`_pD?ZY8bVgiB&jc2zdH?sYDSy5(wvC(Z%P1Xb zJL_8HV?w4Un<-GAY$tZzVOGfd(FhXkIdx1geH{-iRiX8Lj5(}vUCw+Fb$gOdyDv*x zQS636eccmp4(PfjkyUQy9$uH4-+ok)0wDv%@#^U>kC5{2ksYVQw4a`Wo#+up-0+q= zkwtk>g;yfs&Z7Tp6t(VmhcfXzU7Qld>9AfFbk*T>dWE&4K)df$!SUuJeazjI1>J`z zJY4I19W`F4dW6pg`9SiM#rs!EvDb;=&TK+xg|Vl6Of6I03;w-+ls9DqPfGejx(!*_ zo0gODOsa7TT56!X&XB8nbw!_Ut&B1C18&iFnf-h@oczWVsFP-> zF>(QHJ0pRI(Z{~vl?NvAucD+;hzVn^2cPPAL{SX8Qg zdm@>FZ6jaT+H-;PQni6o{XHq#|G)GPyG}E;OL`9{oM(Q%^0=tuO~O|6zej$a7n4YD z^)*9&$E;N&Hd!g z5&CPv%taJKj3k?Se~(WNyUtUeefddu`Z1QsuS+fXG>050O^{vG&>%=|gkt zLhx1oFS__DzcSfZrr#X{#{#4_l;8ENY5np0lsY`P?be?C-NUhdj(RogPs=K6geJ(#L-6~h{ zJw!2`I|sQ%{w9Wak+2{1jd6@mN9v^W8~=z2T+HvzY`J|ek4HSqvH~U0DINzz)F=Z@ zKl-)2v$O!BdukoC3{GWKC>^;t(a0yrxw)^ki|WhzUl?}VjmdtQt>uvSW9+E^J8M^K zayrSTm)o=+XNcn;Yu#3C}QP;J8ev5i@ zgSOuH67W@5<_jST^X~hJ=GRCAkM8TI*EmbP)!Gkk);ukG1}U=f_z_==THDMqi+8Y# z*Z&dq)p1dM&)-l&0%$)bcnVCE1+;hX8+>bn+d0@~Ewc5QBSfZW4y;E`3`U>ee-%6fq-x?t! z=U$IL#$h?#TYp&r)|;Fte)YYH+=fg^WP4F0`Q7~e{D;_)6NHqJD8`z%CiYR<1`5Da zM*^X4D+is)P=?uw=B_-kA9jz6uixCMxXa*yuvT{>b%MrX@R4zrBjxJ+{!&pecTU24>_02zqTSrT z+H*3(gSv5x8*j?@1JGhY$gc~Tox#iUjlaBN0qt&yyJfI9Q+a{iYO|D7qp{NqeFr}~ z+&n*PIsKgpM}3pkXYfxetH?5b+6&Fwe)Ik=C`iS{CZq26?`N))-f0_WU*{CR_9A(*!8Cew>(qG#Ch3jbx9s*rWNHTbJ4DJ5yj@U) zT{i%U8UII6b6Ys6Qrdu0))pN-x#DnY299@={caB6kvFYUAIUmI=z5uNqBC7+AK2V< zh~FtaE8Kd)+emE{!3-K`(WhtgWbQ5RxB%kW_FUjD%06Rz*vEs9mho)qg~PM&Gt>gA7>f7t1yO!S`sTk@jDS^i{W{$o+ONVF-BtOWOBoY zD<1SuJQkP##rnV4k5SYF)V57O;^N&oN#QXOqS#ZUv$$JvejVxqgcBlH+l_7zl?doj zgFm_Fl@U=xXyrZoFNM31JGJmDdQaKsT>LUh_|Q~rEoe~Lwc|(Ah3&QbEZcTo->yPe zLgr|U?8kl#Y(8iAG6SS|PrI-EA?}bwr-$7DKfV0x*A~}&pTlOYmEJ(#L0~?mAsOrr z7F!Dm-yzpTUWScZTb4D^+9|Ee)h87n(v_gXlZPXL)=uAIcY)9MrDSe}h@#g22#AD* zfzATniM#f&qg{|fVo2+`siHnf-e=w|X_L$0X#<<-86b5qhU2l(CCDc%v>sUD-eGJn>w0SblcBNC^@T zzLi30*wl96S<{6?{rr9&$Ol);@7+OoT#0F|woBakVPjvhVNFp1U{@(K41P*!(T}J4 z(t9pGIi~rztwC+@=CRuka=@d;TK{G$5M(}66E&svhQrPTTHh#Vva}`*aJP9(wL+3fVRZtv$L+>NJ0}O zBFRbvaqdj|rI>^w@`PDHd%OYN!h6~JCs$M%B3ifrj?(osh+cCv@}*h_m>_f_9ASnF?+#zE2R!*k!(NwNQ>RI6dFh%x_Z z!i$gj@;7;PCUliRS23&Is#Wizt5qo!sv+u0?NK!cg{UFV(q*R3PIq6LcE9&2`j!?(}P7 zW<~JL;Oj;1d{miCP19VmBed`qGXd@ON3m-6(sZIO zCCwqr3~2sgH+%z#xLXRldPsveN_oTU6-(){4hA1lf@+HX?CrY7@TkStOTT^k53*qy zJ-wibI7#xepEtS|IXtP8j|J|--$m?%(Uwmp;!LFf1Z@^v!t1E!QM~|5l?=?32R^i_ z9n)GeFO1cfqnqe#iBqz!z~ps!%^CHq^A)&iKjf@yKPxl-LA@98Pivr%_PY&XOPBXz z(z9064nW>LYSC!D*hsqnK5qLW;{BFR+B~eXYI^LM#;v5ATXxlIhv=HCd4;=AF6 zmG7*;n zoCIAS3*BGm`dGwq*}Q(;U@--y`^C(jt%Gk~6mN0p2yQkz^WZ`Bu3b7}3$l+SL(z6Xb<$~dUWbw?IzFW|} z;c&O5R`2{_Lw;;lcO&BoDw*5Ua^<}ni_L<MR%&_7*L3G;iTBY)CEm2&Kw=umv zyf%7W+pf%|norxC&zl{Rk>yj!AWxRfi`JAm5+_|Szo_R8f zC+V`IGXFPF%WHS7Kd&Kj?AKj?%OeGf^vdwnwl{ z{}%325JH(Q{?wtnmAOYb-^vVG*?=kh-yn^vmAx#R?Qmc^pNW8b?%_9i{#&DM$L)@e zyM?4;{$~}N1}p{s{z6UhBs4oh;~SrUhSfU+oGqI?27Q3-ciE+somge^!Fzie9dkGB zm4pvNB8t~b6~rKd@vCrbWJ62+al+!eIlgJBuewcve|cy|vek1UiRVCf6HlexWT#W7 z>}6%EtE8tT(n~>_UIvRF&=8My2iV@p`VUYS&f|y%?^#=8Y0d|4wJ_mIOK0%F*`)>fNvXcYC~M@l^&^8ZF>-~kg`Ct&rD1hLC6KK z&%btN7d_}rt7X-j_($il?JIaH%HgG>69ls4;7eg?=?@R|i`gd^!Ekt9&v?=_G8*Xb ze+Op#uQmlqF#(gU+_aelv2uoOz?p=}vpb&Wx`as!njhTdvmODDTW#DkVKplPxg_|U zs-TdS9u$$q!OVzG9yBpl9oM!8>&1!nsEl9S9nWdH(h%Iq9LL^#roZX;*hFcc0*ZQo z>z%Kdn%QXxSLiYD76wc{}6@BKpUMzJM z?sHnjPN5VpO7#5w-OpMumE$eygls#Z?`b{i5XaQMMd1kT$(aB9pDRK`xUow!$a~@- z|4{?J4`<_7;{)DNT;IVvg>)NHkd#vec?uXgQ{{LxoiQ5o-9e}fr?r73aC&m(i>w$0 z!R<5d$zQ~ZVwK!YD3ZiO*>WW!y-#o(WMuVgR3LIoc~r({;f`G|YnCBbzIcchU-e+L zC}i($ykZhMeFcO6ekbu|{5Wxuz>Rd$NS*j`vdrun>1Bj|7mCsk76!Lc$#9O2^TXm(WpT;!c8 zY~eb`3^Y+ygFs)<&<=^^4ELy5^IhSPF_KvPDH&2YcWbFb;0~D=mADS9yUQKUvr^8u z!Xox`Rpn8|^+<8oPOco-1qE21_-JW!(3w9Q`3-@S+R+kdDs178 zwYgU!ro7`hV-?b^OXz2PZ@dU)=IRJ%LH)?b1R=gozKmY+ zJmUr29??Nb{_Qp1JNzTO1?f>n(Cw%Gr+*UOrI zyKe-TwAFb4{k?$$Nj0TstiRi+e_ME6`^C7UzPw0j1w2QCn}(8V%FbkdcUJ5EwxAtL z=+m7#V|89=Kt0|Uv6~Dx~8;#_<=Ec47>@>E&I}Y;G&YP0cJVcj_UH2Q{ zhNtlSgPUVp0V8`g5yfuXl*aY}+HYce<3>g|^G+DQb2i10mo;#~^M4DXubf&6xkl4z zu3vj9S#UH@qJ@0bA?EQSYhv6en|$nZcwhB`(&ctIiKq~(+-iuwy_gp=9YEUXD@UV` zl5E}D9&68g1O2C2sgua(p1Z$1?3%ayl@?;t8zJXvvB`i|pdfVD(h*=D>EwGteY9kNCcYW0t!M_0|oGAy2(n!UqlG zY(CTRex0Jf<^9T2&J2j2#*lY~S^q?f3cjuhL#$j1JrFw@5Q81Jj&(&8B{a#pd!b?o%l;Q7Fd!)xSYdW3QHS%QMa-g^&& z+oV=Mf?@QhLq%3Om~6qwEuoi82m&ML2E(gt0~04|4(R>>41sw>inJ6+PPt7GI3HBq zcfYvi$@?a^Bm;9m;5OzTUNj#i*y}mf(&=_sKMA z$*i=uK9xa+qF>KP3Wb7H)^$WYj<1}m?cv%^VE6d|T3)*%zD@52zS@YDc`bI#o6u}w zaV~eT7^60^m1`AaKW|)X-~DG7-spc;7hy5W2=`dnn$4etr=rP*AftA(*({O2qN8Yz z_OB67L*S87=%~OuQ40ZbII60Bl^?zozt7Za{#^f~m^M;m-+JNHZcg<&G_dYM8NmxP zUwwW(m7pf7f3N$oT@d+vCeK@-oax2eivGMII4Ix16cA4W8@^STpA${~u2~qqdvHT? z+6nsygEzWSr(r70tky{;C&kIxd&71IN|FlX)io3RAS?%}lwRpC2Y8PTC`+tSQYeyk zXB2%6KyQsaoA}h(fk-vIEPh|J?4}&72^eg16!@Z`r4RS7c+Ga(xtXk3eqoZS;`NQ)zcn~ro8%7w2EQO%YX&r9j+dPX*x z?2UxxVsgZ#iM6d{<6+pnaw|xzM;(e`|GN$Lb&Ar2f~sLmECI5SO@_yZz_5jnTaOYhl0(ag+yqX&un|NGAv$rs+FBkb#>kO8ZYK$0}hShc(R1n^evJ zyQyEyw$9%MS}2C2&@M7rpgcur0ekl-Y-F9s$CgxcR3Xjen?0^=mG^9-71_3mhb&Ky z5}i4owS2qO*j6CzRxlT`!g|H`t2-Y}gY+|>A_lEQGyzm@wlJa;aU19j>KlI@J{esm zk(E))MQudN=Bt%i)xkduU7yB!934?@8<2J=hil54*{>~Fw4LcnOo@()*<$a&@ZRsh z#tsB&W@Iilx=UvsWte0)<%}I&gj&^}HCa8nfj3HqFqtDwFK$7 z8wI8;d;uN6^;7OTJdGimY~?C!B7wP*g0m6z%Qo>i<&0;?|C;`L>ovS9QtgG0*fFDT z@mzdMiFF`p&gLJ!aKw_`ZO`vfBO!v#i*Q9&zssa^d(3gO#v8lNo5vr&E`0U-KMu9m zVyE<@MqIIV7yO2EU8!cr(fm9^Ey=%zwjfPao-fW&q*T^$z6p4f~%3gHfLI&t)jC?uU;}rv!g)P6KdSde;D?XHK_u*r?gZx@M z?r;wM-84FT>r?ovb!K??+Sv+hZU!q96dt@Nc;Yqj5-pI;jA%=!GDtV#z*iA?&=X)Q5zk<8suX^SuyVg_WW#oaxMbPCx7XW6k=;)e# zPM37Et-k%$LN4&F%Hai@#Guc+O=`_=c7uPhJpFzTG44)3^aa_%*ugr4LofCmp?aNv zCIQOsg-ZXF)f8Zj;QxB>UxjTS3tIcK&UYWe@1}aK&r)UVaRABbnANL`Gsryc^dd%_ z2~48^+);QAJ-GV&c7Zjqyp!vFX-t923r}T`Vs$>xYi{iqi7c$w&*HFD=qcNXL)KYl z)}jMBgHue{;*)16`Rhzj>PB>A3s2lSeB3=~g`von(Mer}5xHM__P*LYBt4QS$+DxY z7jOBc7p_F|KZ^LwYx}ZWC`|X-^6VXm`YXx^WwFgep^Z#)PpB6-h|l9A^FEjooPu6| z97QPmMkrSiF;IRRsl!p(q3(I?dC%DH)Hg6pfgt40S!b`sF7lkY7j{n32Xfg3uTM3% zc?!NeMRox#%F3tXaFfGnK}LdSVJ(JJWX9DuLwG;VVXcKor-&#tB??TGDtds-M^2PW zv@6T}Avyb{#_q{-(tTq_4ddKFaPC^i{0r1R8T;rD9>8`ky3JT7DiSy;nCes>F zp&c2Y%7Mq2(w)wa6(0dNK2 zEvNUxvUX-K^H}}3TcB7u?1~caR7jh2+P?dp6+Y|sMQy1*npYmqN(IW(|$g+g8jq;94)Gz|~5Y^JI6uK*lvm$1Fn`PH_s zjP*bINpaniqZddXXVogrG2kz`Y3X6;ruWG&35dqrFoeo>R|I@TG?4}Y#gJ8?qds5w z=Xwnz<6Bu5+`LCuGbGQQj27Hzm@w4bS0$NLUSgrEQu{5JoMB$?7retA*`rjqxFZx_(a4hw^|p!H&dP;jzk zV8VNi4x0iQ=aWc5dd_fa!SL7$rTWs^a_EKtlec$A7XQU;Hp#%(H1u#s*f}m@j^(w2 zK)@xO0D`7RW_AeM@!H{LWUBX}6PR)_*xoCBWK@kEKNV+ufcFanePwEcR`+2qwqoCO zLe%~QrE9APWbVH#U-5hEn)a^dUPMx-2$iIT)Hg`h`>&Bdej+%YN;3U)6ulra7CalP zUVWKM+W-8ZiHiCdM)?Q{unc*0DhiG{Xq+?z+8p@AX5D5W|Efm zo6Ph)N~0vxF1PH3d87h1XonYjz{{iC$AlFG>evM*I0wEzPee?AeR_gq`yfFVLmKq? zD}`IX?ildON#U*MPmz$Dv_kP)BkcDmvB_U>1c}5qNPwWNWzHjkM+?f8UIguX(oKgZvw^KEe`LkWXFiW&h=P#gX{0|^MUjJRvZq~ID-TlHbE)7#( z3;n`tFT*!=ZgUL!FC$^&w!(c*Rb7kGiJq@f!I29UIWb}jLtk#3KEpX@$5SnrfQsJC z{y_kO#!A1c>5qt+v0kV~E;(*og#e#6NdCyD1Mo?zL9WjK&Bpq5H~(qFugbPdE7veB z)9V#{ZK$1AWi0ka5tq9GpItCMx^{!ej8QI8#>4rwqsjM&R( znn^ji)sWm*&oLRMa#os{d)_u2i^CoXl@cRSYW|?3gk1se21?8DbX7vEK0h_3SQdCG z;IiHJlK_WXSfYi&Ylms=HJ zQ~SQP{W_^?ifB|PrFkf))LB=(I|nc0T-Ly^VTNc9uzHureRb90rP**8Czp5&G`baC zQ7>?q)?C{>JPJWyvLx!xUu+?42q?<0=W;^6+MES;4g0^&@;=PBqoQId+r|CFGb`i1 zMWT8-$P^t-SCIQrKD5_aEQMs7B+8g-fx(FE6e;CY6@s>(MTx#UCR%P)w>CL}5s#zE zpC5m#{!{hlp&macUc=*FntYNiwJ)4?CdjYpbc%y4qBc&IzDv^HikWOTUS8e3*@yDE z*XHg9urM4I;kY$))^C2G#NT4gWYm72*AO_08Am$Ioz^V`l-;zHy-ermBt<_}hY zYENt`qv*XFd$E7N-G7G97x1IfKZ-XlTon4Q)lN3yVcAZk+aV70vzUEKugzYkiz{8I z$DQ|C-BOKsPgaJWb$@sfx)H=gs_uT6+9l=Xn#+`JtNu^zHa)g|+zGXH<}}3SK`&6{ z`9o-O1pke9EqaTH>5W@meyXpd(1gVjHuljrf88vWOzwRs@6VdYLMa;T;`39!Bf)VwjoI&mtxE=pP|c=|9$v`nxn%GAN)yJlk78r?f$g0!ovD-B;_|6L%SqF{JuS?u?>)Pa4M zn7-z)?jd(yWK*H)$M5*UU-GS4MC0E}(K9={16RROX zij4YPiZKrR#-(@FEz|95CA@LQ^pE#abPlk5tJfRZrN7@`pt_15!FppjR^9^Rtx2OHyGFFgB7>Hq>e9a^CDL zGkz0oe9oftN@?>Dqs_eb)s?4t&0TXjJGeGyhxfc+a3q^zE37g$$8TL zHF)j4d#uJxfadaJvuYD{jCDi@LI0{~7{71)ZrFnP$>@#Sr>nk8mrMm<*vsz*{aNxh zJxoiZ7trS!M{sdjV4-iIS-y14QRSwZu4yB*u`4M}G@M**=k9*aIDi_;fS58XiKepe zUNfsA#I{{c3OW<^ZFI4u>SFMNGGJ@h?&a~C5v|-sZoGhf>HSiR14ZYF9+Y=?j>S!Q zKdUmPq7x$iFKxv|M6k<@@6&F{wKg?)JI%b-wDtOH&+UN`xC~)h6*T`s)zE_oxiAuN zDMm>dMBV2(JD*Xpe1w(qiMD3#<9VC^zWs|w@qaYXl*NO?h9ZI$>SCL)Hqy=(JXrYb z)Pn+LhG%vWzR8~7(YMBqLGn4|h918M`*cD9b&2s6hxlcDDqcFn*rWTjuZI2*7AQtKJ zKmIjS!T5Dv!5DId%`rUeq2qgxk`lSw()l(v^e}`+rZdXs4<_D>fFt5@Dd=>h@JA(r zksrff?6_xf8XM$({IM)(>VMom9Kj{sNBF|1DL_{R!vWet`H}srh+d|+-@%^^!@kj* zwC8+Pcu3sVwN{=aG9W*b{M~kM7&@ah`ndc@oL7dfPVpy}fSBW7z$&AFF<%(7`1vn9(UhO1 z4b)%tx6U87)7Z(Nu_z1JcCBsVRG`Es*YJ)ymS>C7JB0u4bEz_{sHQ>_56<8+>)HOv zCG6s5#6mhjK+HetlD9s}-2{#**A^z7#3LLTVrM5md@|XW-aEL@!ELKK+Ngde5r{BE z$q3}MLbThM30`ZA>H@*mS7KA87ctwnu;P)J86uF?(E}YPAKN729HKth56U`)VxN(o z;wF5mMZJ9q4vpB3O>516lvh=;yhQX_pPk9t$Xvtk5kK5<7si3Rr2LUd(kf8vkUsFp z!aRl%>_}+aogRx2#kul*W3_`%Rwev91K^pNy{u%Oa<3ZlwXj=$+Hif zl76Q6P;gn3Mxzkpaj1zpDJRVXY%yQc3($hhu1SR>xP$!V6xz}j`|6ce&og&P(#56J zA@!GKNb1si+%kru_+w8h*=*Y^3p;`RjtTep6Ir>#d`lbI+w4B^p2nXy5nB?s>E9cS zd%Z8Vt*g{=EG5>j(n80-xqW^(lF(k+yGoEFDW;eHv%-Grrij$9U=k-s`kk&~hH3_* zRyUy^jo~XfRbgte*rKb$x+Map4FSBFV}1XerhA$k!3liKV#R@j6)ioK_}JR?YDt^Y&t3UFb{fpqhDcWiYq z=4-zfMbe85r8|ZRucPmAYEc}Dxc*YCMCa}A#naxxIMlN40|2R;OEd-k)Q7$A2l3WS z9M}(Ltwb4Pa8kKKvWgasRD&9;==>=x1n5B~4_6WZSV%{ISQ2quK`EB;s%?+W*riyl zRMayRptiTOdm(toAZu~d9af1x>>aIyNc{9&>Mp+OwQ1nuN ztVU>0$}qVLXnMvhI0opOk~oI~b0LC()eDAO9j0~>X(;(DuF%wPr3UP15>~W+h4H(q z8o}UyEh~o_H?c22Ui{o+MXxCs;LFnN4+z(bXQSS-F`Kz z%8ks(VC)G9V7x2+1*3Hs`l&`v9+jsK01IFeUay#tb48^y0f_ZKRE?xS-9ya;o5q&< z&Zp@Yzj3c}qMkf_)8_Te`5BsIld;0^-P^1NmlVNS34x9c>+SX*=fGp8<>0_cgyjYB zgP2`%ZS+RSKoGRU(-k@{IK!J=<~K5*{RVvJ1tbMl0EgP|=NtsJCx=!=_L4=nS46}K zUDtqG^wFC>Ao5!d9$9TzOjRjM@CVV4MSmh8SehKbX*LY7TdGU5ew(Zl7q!bhzg7W| z;S*IN*-4gBjMo?L1i|;1cUTJ_R((#l*;`M~!dNEA+SlbJ1#n}IV8#t{R0{_WYVnth ziM6{X0oA-1{~)pgfHSm)@-qb%pFxhGEI+QpHDa@?mh0V5!-N~UN8-2iMD;~!=~8?> zfBF6BG!sjg`;39%QgO@K`bR>C`1bW_Qg&p_CkTOw5@wuG z8jjH-?`IC=WAnNed95J8>@8+m`YT^GJNbffM(Q$Qa(DB0SBcNxe=#69(5#Bj!}>|O zD6OR7QijlZsi?3>?|mayCkDL?XGBMuGw75=FNUg>5n8|0(}RfmHaqzA(;~mk?c=?X zmNw7vKgugB;un9fb?CH^m;h@Acq;9|t>3>urSb5WtC^DAqKI!9-A6S{kU#XX{BzZq zP>0zAoWLy|?E^-9r8T=FVJ@XwT#dJ+i>P7VFrf<^G9d6lelA@eM=L8QI;Pt*XOfnh z0|I?-_{IlG+sJmTLhteZ&oI9by(ZQ3{^87g!@PqXv4#qAO~a#rq^y##_4_Rne}T+u z`Fo+7UF1dQr`&40-o(rY5018lJ65+>{z~NLZsA=m#_&S!r6s?TRBAD3*@kVu8T%F( zUl_hNIUgke*4MGh^1 zyPjY9PTL~5x|z>$4Ny}^D(Dh1xLnPNbc$CPy-jF*DUvgD$+~VpdZ&`Q-;R*<_P`Hr z@>&qAHwpeJ&|Y^Io3g;AHqi*LO^5D<7c0OA)1_?~VkCHQ6(?tUK8m_5>@X&WO$Jw} zs0XEq#n_khloN6%Umz-0pLnt;ERQ@{#%L>` z^9}JKBK7A(QQ0nFOVV7_1q>3pI^iA$-&a4AKRuT1KK3f7L7k8&4PBR0LC3?vHNHl4 zMxIwEk~%HyE=gkw&t3MdAK_k$Q$DtMhP#iEK?2!WzG(qSR$y;mhI(cF3#y#6fGH{G$DbibBf z2wF(y7d%WUdI-4{u2k!=vh05fB4Dr3FlQD{>D)S_S33xKbgmuYPVTn;$J1b#T^<6S zv-2g;nf-f!n}8j9Y^r;zSe)F1zlSi!URiO)agurBdnp#pY+TsKIxaJ-4}``xNy*#I zeC)dM#Wguqw-j8(&zO}cBF$}H4EAvNo*l$hd^Uq`OH#GXG~6y&r;N#*D!)kHHzZif|UQ9r)Ge}}7 z+M#>-xJ8a6styDsID9&hJ8R-bR5L}!)Q&C}#sUYQ&A`Zb@<^x9F^llmBe3h^CEN zH6x<66CpS7+HZ+Q66GH`dI{-75JSZfX;uhfOwH}DP)^54bqB~poVj|jEJuz!6+0-S z?AN}oUzcJLZ(M69B~Ao*D*)wx&d2T2LSFBBM2?YT;lWul%Dm)Hb6m$RniqUMa=!1& z-ZJ%IsZagwmbyGS#MQ}Qu%v(%s0IP61;?B^g;fD^JM3*}AS5p|{hb=3j6#?*y`{cE zqzQ8@6~fk4Yn4!&SH{oyCRr5c+nKOk;w%YK-5-gSNCpVwO7eX%j=B+}|0Re3-4xd;1j%$^Y zD5gK_>?pyu;-EVTu;(;I4*xMZU5>11x-tAPus@xpLybb5^PcTke?aZ;%kbE|cLZdE zQY_qqswM|%JZ$yZNuPh-3|8Avx9iEj)u8uUL3mHIHVm&syt;o3d!qf_pCg1vBref; zkEUMVtjFCc_WC4`e&Iwfy^qNWqAV4F$G$kQio3DMQGG&&_NkBPn0c7d2s~D# zpK?C9djAzE$SAvAazCi+r{-0JqSa7*UErCucE`15Gi#;)hnXu_cG=eazW`nGd*rHS z@UM?4iYz#9!fCvx^vC=zW}( zB1Hppb^7G7E5?E!&v5Jz%&wt@Q#_oYBmfOHz`Kw=bO?MD3~D#B4MZ`S*m=0FwYT}N zxxJ>CN#~rM!MX~&C@aU4_qv4lL3X_vqfVLx^PVX4W%Y?gavTy@2nB8CgZf2*D0E2f zP$1B)kLROLk5H)2HT@>7j6|w{ZHFKs!K*#p*XCQ;L$Y2}9|{_)FPrhvmpenR_D85{ z2VSHIwW<6Cz8b4<@Av-F`$2WZShp5)S*3pAE!_5<+2N`DjJo-sRQJmpnItD%>55lB zLiVe1y-;;inecm)lS>gi6rdW7Q)_eLHVRU0qUz@ERcR|V7X9dafapiH)5bk@g$^{f zIHi6*D8QR!s+=bIR(a~o6E^7*jxDd<@1^Suu+BAc#@?-F zo<$+z>izYd3rp%-w6KdG8JykBkFdqT>1@?I4JRnDdD%vB`C7(teZIsH!dzc=1Fv|N zBe)(N!yZlfy39VHMISc)(-VT^_ZVFCg%dB0Nk2TVb;8YdekIV%S0A22OcC-AWt_>b zHX906s>P0|-da{a_?HEy?yq7x`CKEbfa)sOjm=LN=LxR(iaMo@8jR7Po6Bo5s{RC0 z*WffSclS#J=iALw-PPc~zF>=5fBbsFOWrdP{RyXi*BHpQ=JJjqDX+LaukH`IK ziT7g+T`|N&C--?Qw!2=+!R0FZ*CTG$?zJ)_j0j25b+c}lZVDjqnEP(}b)d{0aM!x0D$O1CF~CgbDHr)ho|3wI@W=B{e9MSu5Iz=sMvvTUWYV;X{aqRkG#P|)>EmR)DI9(s z;jj3F_^+2m3@${BK3$Xh`?L4o`RZSlTb%b^CG9VJUMXW%Q(uZ5_(U_HBeT#jkip*| zpzbPMzs3DN+{ygjmz@GuyCm~nNF0MLx>W5Un@;gVP|EMKz z#eB$s$bvK4I#1$H&sQ(seejI*n5XVhqUPSj=!mD&4T=7RT-ld3OAGSJkK~z__lW{0 z{f`XVP{gw|Ssu(hl?Z&N5!=N=R@5AAa@`kx&0Jt=(H;uLLFUB`Uuv_oU_)P9kpeP| zX!^E9P){wLA`r!#()z(OVIM!4{omM2td{%FQr?kciBj)tt5Nv#9+pE^TLN0f8h*%v z`Xx99AUG(*x;rE9*}IazC*SgTh!(&6eo)j@rWmWx_JV1{+(_%7mk6Ro#3TM?gBwuv z|BomDC1%2urx|FKGuXgD{Sx;2AJpu#Zv4uegc=D0k=4%7Q03sSw5p>Bmz`V6|L|1+ zO-}`h z0B~#f+kdIW&Df(cakQ=QW9Mhbb`4ppR3m?}_d%yuWdGzv5<0=@EtH}6KShAMb!8#h zwsETmtRCCuLB7ST%OMN%W|Mu_%hA}Rc0?;yNL-7@&_~<JjWAE$i4vKj9wq=NwnhtuIR^pRqyP!z1g70#r8_S*(zmlaD1xuD z-tILoHQ9GCLa9I;)i&rdudeiR@%C`{(E-~i_>clx719TMthAhaPOPF@Ws!+f9uahZ z%bw?L8e%q)=9%wfLXERmm*{jiIw%o7+5hwf*#L2M)WtN&WXBDn6udPr)0f9Be!qP| ziY8>#Np$-1Ev1fW*eUQQxP=1tbTNFunEPo5euCNF>k)Vaw=vg$kVb)zWtV}GGo{`=Na9Q~JlVAgkJ7I_dLlw|zdpW@UCJgz zp1s}x7Aqr$bpJCVWETY0^q8OXTk-8z>^E4M1<0HQevIgYm>3@uskKiB+ZJ1tD>_^U zkZ7<@z7u;E6uUIuDN^>C6*9?Si=}9h2u;VC)RPut3|d|E0rXgjUuu^mZFzn<``scxe$U;Lxj94v<8({lPnE$V>JoLYS%lXAPeoP*;6Wu2s5Zsy93H z8g!SHsn;xD_C9$BVcpM~da1-Kb=468d;g!Bdl3`^+8(m>pR{@{OfEP5s0@e^NUw0@Us`sbCwrifv11WG6PJ04Fh5_v zpU55rbdUnt14vjxu_GK`J}mY!uM<_jjlnK^Wf(aqzY-zsKy@ni4FAlaLuB~%v6X9e z0TmDk9yxHhg!fbGmK~tt6>$ePf|NhhIA=DL%yr4PA#BSRQg_!dZ zG5|ZIBxa&um0;r3WP$Xq+6bbA-s9KWB#`}DwD`tm-5YG;>_^l3ykIaT?cg34^#TTv zazxPj0SQnn88Bh_fhW6b?GhNB8X_L!vLd(HS`#UrWw{6LTJ9}Je312y;)N_jt`iKf z1w)N<{3QO@3o&UXnJi?JX`G3`%33PgnAfhQpJ>{W>3}KLo2kY2{7%zxV;j`lh5YC+ ztK%E6vbn-+!d?^oOUv;atO;LIm#C|DJz#g?ue2@qFindi!18|3PaC}=&9^0BJ)}pt zj(MvaD8%#~U*V;P;dTi{t7wvtn#El?Jd#ANjW8Fu=HQ8 z0)FEN&06_p5{GW77rtdUNyj^qi1v&#-+A9x#9&SF(AR~d@0qk*ho9el=)m>YF~&>V zkpZ<`ItQC?V{4teBwO6kIIFI<2Osdx{x;j4RFBs6*AUs6#)CoTy*4=SW2v0iVFlCm z>cGQ__oZ}WZCo!p%~}Q9Xr8Wf9lXF}QR$eUOcniz`(1eIYhW+fI5LUF z1;4kV3r-y|9id$M=I6lF4^L(zdWMPDbQkNbX1~6$vLWkM@B0|v>rfaJa@p;}LZ=9Q zU;zUnrr~Fi6qcuNYZ^&l%R;}A^n^VPejaBMsdEs(z441f=@aQ;UREP44;mD0uKS6hp`Mj1=uHMN&M*zvFAXI{j1z zwra>N9Rqt5z^9BQLWn9mv6JbpOM_)_;ja&^0$kdd3|JFFizmzYyiz$&%6ddB+OZmw z5(AucZp4mThG-eUIN5#tLZ=|>EF+quzUaPwJl60uUSE9D{OW`h0vmTskub`%^A-NR zl!lE#Hd@NEKVOD5Z|S=gjWhk!u*F8W0$YERdk=qi;6;8uS49@}+6)%mKf>@h{@jkm zZ7LeB&+<#aA+Xy3gm;ERs2K|_$RW7;tFiO@iT9M8IW1D<{DE%fn1R!UTx?l5e-zAdN-bP}c#_>@qU0g$;TL4MGHB-21 zUO=js4D2o4C%^J=LAt^$-wR0N}3wA$^>=zcfI1Z0nPzh=DFyA@N|Qu<|69GKER&$tXW z(t8(-3%!d*3n{;=Y&zX~o&BDWpgmY+je3nc|FBo&MLW}CR-%~y$n@=@P?O7+AILw4 zOs6P4d25`Yz2Jfa{hmWEB*r?lTSP*Yr&m7^Y){I*&a!LC-49!?%$vtB0fVpXm56a^ z$Saw}phrPz5=(ZEH=Rfb`=moC=lknrm=mQ3IRC!$d7X+zw$P>xvV#4)zg~osMQk+G z|9UCEt2r^=Dsk8amaYrO`9_Xt97y0>sLqwNj#~G8o&4Qk4eCd_6#@?5ZrNHRXqTPF zg2%P8HsJc_K9P8^G(7*~fr1%@RX4GsrAEv0^*p#GWd5f0b>FF7sbCMZI4_j}(FcsqM_l4L<_(vsg|n)*aEyQfC9c&4|mi zwr_{gYz3SArIgnNJ&%WI3An-DbxI}8^GRi$H2Io)@*_~k;IRQNM`7W>U4?^}_<5?t z|Bt=DjEeGm1!X+cmzx1LFn1nHJ;C8Rrt29fTLp&MjqhMakB zfB*C1oONEEb)Hwx`m$IH?!B-4>}&VF@7>|&5n;r|Q4M^7obMg+21>sjEAq0oS0k-B ziB%Q-)CJR`m}`8STCe1Ahln4CG*|J&`6I90Z$j5gW?fGGFy!8XJ0-X|M5;YjjK~4l z9H77=J7n9$d*5a4F37oWX8xZX=8Fnr`=tSG87yv0Iz0|#nU z>S}S`lweeCMkA`>`F*nc^p(Wj^M6|~UCFcAcKHz_R+OBas4SIoK`&fo2ioFGZDX=M z)J5pKcL9X1(X4u$`k~}CFFV-}P#!UPLi+VU_qOA2R*#m=s^h@@8YLy&-gMMV!YY&_ zUsAD%1@6MO-P|mcXxb{c_Sc^X{t=>0`%W8Wb^8VO#tfLeDywmQVSB0QDF1Rn+Xb2P zKIi$<$0peh`3EI)8{7)0E%FZhLZN`f790*nR*S+h_Gvohn+_Ma2vG39*0HK!Abtgw z47^d4M9|)hA4n_X9h1&;%v)4n-8$|GiK=5j<38zQGDcT1FC_5J-NmXC9H;782vX`< zc@<9ed8gZ>zn_#!7L;9(l`yr+E3eE&P`S1-NfW9g`L(3q?fNtpFbCp?ccmy_=83w` z(X<0+8NU5QsR5rq8+qP1#$63oCEN2%z)lza}!+@$=Uaz9T}>?n0K!0v1R)Cru` zPrub_7Nzd{-atx4ab!_B-*oj8y-6I)`KTr$gklR_NjJ$ROC7+fxJfdzS_O0p(uTZDE8zn-L1ZskW;H-FBpyIR1`)RgWfG z(!56zYX1BJrb10G<=p*0m&Hb>rc2aFZC@tfsgCC`GzgjRp;mKA$@0+O5YnTDoR};Y zSY7iL+1yEC(iLQXQPJAQb%We6Qq!OQ9bae)b)TXLWd}F4e!1%)2YrmPAt%H7t0)eC z?Bu4W@ETem5Yrm zhYyA%JeWOgaIHX#^T_NH->9qiQraV^hgvN`p^?LokD`R9I%TW zX))?~%-i2fpkmJMD@jEE0;!(BmR+Sf#r8qv9c`wrDX!Bzdbc0A(zgsWiI5$mawnL@7? zP-sL&x{j!hBKA&uKNV_*I@GFCg-hgE|vt(~($6!5Wi$i)#mtUXi}9c1bjXGsc%s&0_L%okreBO-PsfRWK%V=*N?x zENtRmBAo^$9Q*(v*3RYpo^-L0*|2|blUzXP(_{V^%*zlrIJ!XH?S4g5vDV6@)BqAXK?!$0OUe{-iT=~{ z^%L5ff}Y(rLs-#gzZJJ*j0Zx-6oyADO> zfu*>YHbzVZ2^syLm+m_7)oF2z)=!#ji$rbs(+8o~wsKW3o->>WPM0lOd9KmckXC&~ zNuehfy{Ii3irD!k;LgXIp;$q8?8dt8p)>c*9w8leR(-aJDgM$85JczV$@bUdLX&`MOY~{(9rB zwq!>S!}0cnx73l_r1;!`!CTni*t(ze2zXvR>s+YdVPbR`+gspOJAz1*_M?`5s&k$Ygvi4uYXJ#9p3+wjKKoq zih&!NexT$Z)LL=u-rW2&x|-sdh^nnS*QZ_5X=XfMpINa8NEKNZ7LrbJ4RvVOX+^dv zl+FtJk`GfYZzm!(718}5$hFG6;aqLQSUul}zbTY^o~F@xdt&_a zb`QGCZ1uh7LDkyEI>tvAAr;*mgNFw^%Mujc+r#c*GSir?BZ6a@wp{x0MYgGmTaS$; z6IxxGjH`lB9|_HO-TxZ3j^-Mv{kSE8cBHv_*Y|!b4dglYKAyd+F0ZQYPm7>Fx3vp2 zd8g_My|te+4Q;#7Oef}X-mKcSog>EmZF+pm5hbFI1qWOhFfO}!^lLDAksCK z%syMF56MMbKHb>c>!0yG&RvvA{zZ2pc(Xg)fkwyAER|vhv;b3$_g1^ny$I;XKZG#| z)I5$CITF8^1pj#g_Q~a23BB}UD#1}L(@VT4?Q$)`l=%9bUUo?&cz1km?8~#|<1O~j z(UBBp!GR+$I%cn#@zNepOrAGjD)YyvRUEP}-#_vzlu9!r74q){+JnyT=wp>VA*YQJ z2XtM9hpq2>+D`iJ^TobuxIlAFL%}YvlJZ((CP6u6!Syd~d;;=>n{-2-bBRRM=xfr7 zMLP!X^8ZZRI;^8i1LU%;`&g1>^3jq@b<`T+vp)%Q(e=fOp3p-%YBbwuD@zYDeAA=S zMR{h!e!T7`F_VhL!(cCbTu-VTg(Bx!A4+e^oZEw(NK_=49bmMaWv?5MGy)lep-hTvg*{)So-;ElErfoU#2>8xxf(+8)@D~^tQ zL}2ZyM!x~*FxtrXiAlCws1U(Z*{&IEbcd!VNf<<6*<{r5E303(ueT#?aF;A4u!|+dE z!#VZF9xBtkOOZAwZ_8Umh$<8sdO(2xhpJ)Y=D=t39V64-#}*{Xt9P|=xFmduD#Dbj zY~gk&(m%wLFWZ(B1A{)Dw(6Y$^gEHY)zWvNcsTBwc zDc8O4)RZbvwZqSij#q@yds&Ob(L19;n3e|z0ajN~jnQNVe^Vo~?gR(nTD6;2PI zFu;dgqj0L;J1X-swqcffxV7%?M=4>nVvV!=EMz#%#N(0+f0jZj7^3ZGU4Gm9@=_$? zd+jlU!~@f)X_kstenG|)iNHt2<(tftwu3(6#)A?*4mVXV6Gs(u<{)JDoDq5AV1WxcQiI)=o2YSc+X4CVWuZ?%tkDb=&MUU0;D zmSKpwSI2+Ro;}~^{n(dJ6DD!LT2IQUMa+TErQG|B#I2ghFdZ-!ec)-pl%=yFpX#=2 z09K=a1AB0dwydtZFK=_2dO{AYtz)FAz4;}}c4byIYq)0Qu8A+hGwOcEhx4K`0`W?c z)E6--z+p!HU#zopukzc7f0fqjP~-}8FXKdj$k+};*;<5)J>`A&LmBf1b;_4E2Iqt| zSgEm3fQ#MUZl@4&oNpQk-W-s5mL_xf(y%eMyOp#0_GuK{XIVo|_>5$lT+@*CT{TaR z;kJDeuZGNtQ5}gj=;|*TC>=;%f7cL`$k@2(#a!9=sWXV{$m| zByq)n`E~3yY4fgz0n40ecT$HJaPKq9Ix(a6=e%Q8i$i`M7~~^-7_BsoMXa;q4TG0O zC0Ih2MWDY(Y@Sr9BbYR&eQE`gaWsAvGl((|pt@~Ge|?58haUNF<2Y=flGe(51zpMt z-&~GNXf%*?q|8yJ*oRY!8Ac69UZ`bxXA-vW#D9rI65&3vj2I2qmcQz_dkKGIK$LOq zpRZ{Dr{KG5u*T&aZwVXQwq~2F;{7+n7WP)V{EB7uO#%(aPCieAFqx)teVfxEzGv9U z2O=j29)Q{kTC=S5S4cO$uYWC}n5h*@t0f7Ei;d(N1O>6WJ4QErkBXFK-Y8l`u74TVH@Isc@da`9+SJ_f zkQA`qs*gHP8t4Aws|Yvy`Qh_I)BS9IXki*fp%^DGe^Y%z?K`mdTQohF*nmt_6xfj@ zv)9)<#%CKt1tm2R2$4?i5ki`_l?BoC_@Iri<}`=$GNs+>HBr456`UN?c(lxP9-cA! z<$4CAMxK%&eoZ|0G863rqm+HaHya{83otw9vnFaF-|ltveRC5w68|a1sY2);$;N`v z*yJyyHK3!nKgkMt*&{RWLUo3H-U)^Mu4cVDMLqR#ezWxSme{6tpUq~U;&zj-r)x|u zq6Vga)bEonCaT0(z)Sx&aI@&F^a@Ji5&ya5FES;<_r8P1w4w={FW^?Hnjj#3+T`}P z<|&uc<+pr~P~|K!>eerb-W<#TU(?$=OT1)`7B%M-zQT`mC^iRjs_(aPPqmH5gwZY9 zg@}?KXA|;8S-uO zVI0OvkH02}P^|7}6Kzqz&7Rg0*2^!XRYJMH|4nG{mw9EeGW|LuaBE=e$u@1cRdqaY z6ricgLg)OY9geWb^nRjjNDS0?vmc|;x1Xu+hz9DFSkm$(kqE19Vjk;-LOkgM&3o$D ztox2z9<%BagKv~F_v94}L<#4qKeOOZt?SkW(}GfdTC31XG`bQ3yV3<`vFccqrR_;3 zmV$XAh*xEiS@%*K8ddr;`JKH)mpF6eVFsl3FF42C4*SkuH!WSyj2eo0o2XjhT4|_i zMY@<4BaeL(AS7hHs>tJ#5giUz8btat{%ptRLCJo-8+;SyzVI5_zTuRqw*>Bsz!!HX z<5gZ{v;+xXuq#>@Q*2;2yAXDF&jUNBzFl@R-*GuLG|joE=)-RM=0z&rJG&EtBET<^ z`i9dpeNCa5+bthXfNGg{`+msLfvD;HCA`kwdMuB zPkDl;G}`By3f24;CKj0K+`!cCse=R5uqd&p(b@+UXsDT$!wyP0d_!c89$VvxH2IDC zd3=}fN)tuQ0>yw|Qa94LVvk|e08M`>8;GIWuzTHy#Fs~y-TbK#jO1QxB;8moC@%6y zeP@`tbGaNpqyT0Ela)S|)cFAE5iEv%d{ixag~Sx}&@;We{mhPXWXVMfb+1lCwS1_~ z2i<1(#4uWC{;pZRXO89f85BYiyvuL0X_M2NoMl%G7&81Gtl&a|dvIRxUJ-S^Oeh(= z`ReZus-i{TE8G-4mhqIePzU`UVbS>=3`;1;S*s^qDNfv$&#iFlChOerKC9&{%1eni zz2|}*aK#+mqwtey0@Q|n_VKRa^t~=}K{Cmy_bqk_VIt7*&q*-p-b1@X#HE*B|a z6zl?RB;{89!4~hcOxe0Y(#i)G)KnpCu{`GCepQf&(|~{jmr_9otSq)hyerq8`)px! z)&@3N(G$%0LPgk(a0*8xX#2=?y!(@<7>Fg7kVZ7cuz9BXC#uCdb@j2QJx!piz;$M2 z9&cRYTy-SVF6S<7ypvBULH!#|m1v&78Im=}4dhxD%&$Yp#5_mq7eXSwhd%PUKw)Kc z=E}NL>w``NNa?`ROBvD60|;B>rgQovSlfmxoi#rg{;~P{fx}$KkeEyX!{#&~Jx#}L z^Q0-0mmrwtVRfeW9wwshZRqMG@#*{al3xPz)-5+av#s-f1fLrIMM8nhebaemR6^j$q3!GI z(~cLr6+;M!)o7%E`PYa<%H;-{#I%x^%X3ni1-( zK+;Lf!tk0+rwCYbCzPpJ-3wyH5`^M8nvaFxyd}FSqxsc1GM0xtN41c;qsg~1d{f1!32z8WejQ>nBAN>uppIL%HXI{eEHiOPH zDb@j6+W-ppf}L2i0B_2aN51Y*(k1kz+YXK^SN`W=Q=hP>cC5ra+eP^D3fYUDTpy0q z_j(^Z!M%Y(yM9hVETC0qVNK7sB;DWG8Fuum8Nc@NtdluB{(Fe3=wCNGvXb#}Vn55& zEYbUr$di$_19m0m>=1Ob1UXA&>@6v)22XS;)tvK?=O4A#?CMuHc0oNPUT1xa20%PO z@#yj&%R}$3dpMZfb}Za1{u8 zz{U;>_fL-_l-Ap7zqv4150g&jng+mlVRnvzZNOo=>mY+ZkbEj{X2nqXeN9HgJ}rNs z`gfZv!^sssRBI|5ElW66&4NOT2Oq43HonXs?RQ*_-(jRK%2;ip@=y3Y3}sA;w9#Zs z)Kj5Q-MBRU;Q9NmBWK7&sp`D^#fc7d@euc-b?HeRHB8dAISpr9poBCO{1R&pd3&!tpQTGkA@@ zR_F8t?ioMl8hMG=j-y};S)pTf8*qx_vXJI+`OvE&m8HFCs-}Jmie*E|)~`}uTh8ms zlNT@*kbX_1!u4aHCq>6%MEu3={N&ztO)z8RtU!Z@%szX&anut$Iq}zX7+dYkdl4+i zdiA|*@=FYKhG58Yz4KS{YbD~Dji>gO=++~xapuXT3^jhJRW1#}{&THW(aBvs(3A-O z{5K_@MC8U@gkhntOCVs=X~GlD4k{-{>w~aG--{f!x)*k>?1rr8InEqCK~$P)4_uJr zzdJCE7YTl)O=^$43CDng)|g%Ktuzj!(8oGmqO*~PzkFRnPg(nKtx*U{pr2PpYLB7mOnl?*h+Pi6T_YE{OH0(YO1yM&eymd~*ve z6EE&%2P#e(g7otXIYps|Bv!^>Gvd9D0l&ut!>TQAb+utc6%An91bc(^CVvISkCg_4 zB)J!{>H2Q7I5G6M5A^31u!cIv--Mq---9YF!j-28YH~iwRWmCS?ESuf#T~Wgu znu=rlj`U?72FNn3lq(+VXH3!6W^!s9;o8FezOftp!;k!diOF4)pCl)2O@fQKY&RVo zKOd$-J2%>aDh| z6r<&oHvN7rkE2B%Hgi|DIQ7ek!<<{#v!ic$z=4?UK4F%i(8P9BU(4sQgk-vA57TB` z!)7QuBso@MhAR#4fuSzm)W2Qr|$>UK5Ap4{wau0_Qa)ybbp|Z+oX{px=Jk%fR2w^%(42 zQPDH9!p{&|iZY4yE}us9vA-!)5$Gt{CwRY{VL2po@(m;n>+m=eUc&#IL1!%He>Ou_ z#<*bJU;22K<~OO>e8>J0BKxmlEJPoR-5=R@e1DB_)m9gW_@g=o8iBWsNfY-+oQ%{59mFq#n!*gT%t)VQcv1rp4es0vQ=Yk-j5oF5 zj)v)8%|D8Dh8!zJ3LlGq7B~LQ7b+$pVmo)$(M~H>)*>NaoE8R`{l$Z{pZm#VOW6=0 zzWXT@kp+>+piByq_atbiO^!mspzfm$Wvca=MZE?EdBb>A;tR zi`BV?(V3^8isp7E(GcQN?p?8lUWt8o{%W3P;YS4O7fYD%N<%N$a8RoMCpl)`gscQAeT?&^5C zUW0s?y6fz%pP^53_Wk-$&7J%&X?aUa4-W?iBOjkT$>2}qbaZr7RKzq^hdL*n^A=}V zdKLY9_u40fuee`zf3jz`NmqPpsofKN#DC?clmn$6Do_av5JJ15oQ0B3a1Y$ zC+ndReNVKlLgSPb=n@h0Cc=MPbz7GeCy*Woonnl{u%F{aLi@;K78K(R5yqQWG6}gh zpyiuGR68%M`F}}2n7nAt{LHVq@ z+duoq^EG*oH27Ys@};+(zmp0;5=c)p)$y4ng|55IFZAEzHwGO)$?f|u^`(4bt{ne| zDYK#w;OQH}car!YOKN9k0+MbI`QXgtB=Ub!q+8kwg;Bo5G>fCkfFVTR7<-0a%p(ym z-oN*C68t|jGDE}n+y0Fk&@98q|JlcS`gdR(<+%>)*80cO9T|2B7@H(ee9!UE{Po{e z(wnQqgBxcsng2QbD*E8?9{36?ysupL9|Hz41^GNEb+laOe^313`b96WKe&|s@2N|> znQr;6H4*vrpE)OteomT?oCyApCBxeZO{D)+{NMVkAA`a^G@T&DvvRU@_i(c^cl`IC zvxOa=5dACqe}Bcrd9=T{Skd$7t6A7tee}TN(ekwL_+N^mqq&V09*>@_rN<|FzE=XT z@pu%hY;8Vy(7zTE!sC&3c5rsnaxwo1Fu%3(vi)eK`A!C4S21_DrxymqYvS?9*m}6D zTe-N?dyNe{fEGR2>kzz01{oG1BCYfz4-ZWb@Ko2#m{$c z=3oAQU+@&*=M@t8|5)){_WVKmX9@dcsjHwxi-hDUT9?cl4WgDRqDL*&V(Ogg8gF{I zrA!l`V)Ue>#5Hi*pQ-OANF@3GNy@!H|cT3+Jk*#GZH zvTA=?ZC{O+4fPZAJP5>mtXpBy=6ycud!@f{huR$-NBMj1b&REodhU&j-DS0S%4Mk zz@IA*9XFdtk~B3t9gt_>H$a+CMK^(#5Q*E^LbDb3mr5qEIgof?a(8U_xM|n!C1g$_ zq0875P~K52ARv|liTRb>QuG|0_K%8D@-j*erd7*(Q#sS~{woMXbgoG$2@Qg%3OQOM z>^^e=Y!-SmS2^xuf0P?wGo0})w?Gp9LCymef^+!rMs;3_93WAOw6}Is>yV6lSh3xQ zNdThF6sB5s+{aqm<1bGQ*kKPql5_>~k$cmwQO*MYnUVMt901a?LEkB;jiKqH#FaR_ z0-*U3Kk4i?&2yUPG&P^sBnHq(w>TwuxvEwBHQq*h!`d(a8ja=HMmM#JuW=m*a13}m zfcE%i`;kUt0bN#Rfg|+pF$mOIE30d9mZE*l04@Z?ekL;A$Am)#el%QmF9rZ^D(|x) zha{r07T88kUjQCqs7~!d*~U8!!RA6T4zN`(@q?-E%iE zH{i!=u`eCw*I2VBVV{G5KAw%G0M*DWkz{On;?_`h8;?$ z?K$kh=LhbHe<7>Ttpanmh%#7tMhI94s*MtBD*R1ex8461?uQKu(ARaOM`)AhU1bJ3 z1KFMVvmm(btB@mje9tlp@Hx^>Dq&0lySG7QcasVrAlE%ypoZTN>Y7IUqc>Bpf1`ge z_B`1*eD@!FT-|ei=&r=PGg1Ur_z0*N-@bh{gzk#JuwAK!6$0h(wgY+Cv@7<)UY7=5 z3mBBU#8L{~6%E^}#E0_(47qwsGxOVhg=dnxk|F?J?{(557gqbua&{%({Hr$w%4^wc z()oJm2m^q7g5^>Y4ka*}_r_?z0;KzT!?&{+I$zg|2PI0sLdJAAB!OtA zr<}eakA1*=k0gZ^;sXU}GU=NCMuF)Y$O16*HbE}4d?h_ zLIi9Vc)m97rGnLxerhZfYrq72!#*u^+Q=>q=q^f!6_Nq@dVdhmU3U8KzPQ{#97$VB z0HLNSZ6kh#i3JM4nUr@OQ#_fKkppM#`dG}$OwM^ zFE{_e4xWY^)lWC(TY?4(@NZ4+li>Nvp0Vhj;|P!w3Z8CCC6l!F_;v%iggvGM5)5C> zGfG`^Jb*GU@3w?5;0(8bB7yy{o(5eQ|I?~e{jYb!KygYcj=7Aw-GwOTIsax@EexT{l7PrhQrX)lk|9pcuZMzCifM?yu^Nm444m{n2P~imNyf zx^ihn26odv6PyFuk$X1VX9x$`AFsg2qw6-QFHEPz{9s8*V_B76g&9ROfak@~Xh_L~ z!|{%stao~Qfbm_4^Q=Q`i8GorDe-JRB@)dRA3T17-)xNWNeUx=T)jm!i@%W&@ck zxbpVZJUK4iGVV8i2Q-G`pBWJNo68sGyVsYZmm1(`(;;KpSd%VUpbk$Ix{vGKXYroY z0eu@D^(10>24;1((efTf3b;SwG_;*<)T)2|w(*CuiVF7~bJN{R9=jf6NuWV}+4&ea zDq9Y5gr6ze`Ht|wbhPSP9)XX5=*$~{okx`HrwByua%X>k|E`Uw{~h}Iotqxem(}6! zm7FJ=BA>F%LYc#;xZ3)Rh5t3taS|}Ch(MTF>D%880=6UV+ZFkB3otn#v}ja! zv(>ha_WFzkviKyqx44TkJz=Ck#@NQ4`xxtYH@$c3G2@%y4HEyY7MKKm4Qo(~ z0uUA3R;Uhp$!db^SCwx2+;AtC2L4ZmK*!Y6pyw;~AVobT^+5Pu^qvP^;gYx<)gl8_ z?ZsWm9Aslo6x9%R0l_+)kWFN{=@38)-g5_;=;-#hIQ{Bxld|oGh48$Pm3&n@8kY<;vFY4l8U!T&2d~rCunz%lL4-ND-Hp&^w z_87w9+)Ip7wTl_qfiqG)G${dUMqiHZX1{dcYRYVRjnq3GQHDE3{M9gOL<61F;yrF+ z!xNht-K|i0rR{Ak4AzsoAo&N+wceQ&Z0cL|0GoRX>uXKAkqvl-LUz}4AZztD7VA*wWv$qwk+1`;wcD5!1Pc4ai@4hH^!i zQ3T6Em4L3u>iT2_Rj+^4SIXScHh%gNEC_qm&Edj6VFbScK>v}L z(^MGDhqvRB!t8wk8(z*RfOQ%J9n3t>B?PYcGYa(q$SD!8j|4ud+8omYbVcf%12%@p zGH6^2;{>a3eog~ck0ela+T&gaLS$Z8A!>_Zq~S7-tnLVVT#2Sqz++ce++#O2>;fN+ zoECbGgIpDuF+^P$ZUKZPYzmg}MiFJ6L&wKHM&<%O_5*Iwd;SPSzjI@oHs5Oa9Xj-M z=EL=lU@YtaRu6#WD(pUn!4d^rd1<%I4;A~8lSKh50Mc!j@t|yhkFGqnxaB#EPSC+p z$F&J9z`{p$nXM#3z@<}`9rh>e>4=^S*t2>D77VZ$tN7L`-XD0w>cePoADSR7^D=NC z5PM3ZvS3=BlEK1mDU1#IW1H|(Mk)6>G!VT|0rze;=qfFt*Ek||N-VNv>UT)r|8lSj zRMu&ckw@*}EzfxO{b3KZvpFq5m8@p_!i-om%|B`MfWPl4dhF$b@a_Kt(*d*#;*Pl! zKFGOMKaVo>G|cfp1|uYq*G$h%K5MpMDXtKvUI>DZLp`ptf!<1?DG9R1bH?v?-!f5# zh%2^op6B^;n0{pv3*kjQ`T=<|p+OG_TKzLOD0$v00>(5bX8d~!&^kdbp5=J3PB^*?HQN7qamE}+W7Wl*(owjs^BMZ zku0IcRV`KJ9tSW2e4RQVv1T}%8Ud3#f%fbgjUnay&eg|-+*J0#?d{D#Y?q@x(jLin z=&5a2d|l0A_YGx5=v>ZD`sMCf;g(KXN~57>+li$+4VB}V3LZq6Mq(%qkjaK@(@67TW4`dbktCHRs+Uz0!<7* z_+xCW1YP)cdV8zirQ8a1Bhu`S42LhA4U{-sQaX3-%Fnb{D-O6!IrZ%VW{DhujwZnE zS^Ea{hHS{DXk~2!L|pgU`jT#PB>8a9LT4Tq7)(7mFI{OF-QW&b^&C4l+Uhzmt^O`G zH9k;MQ^g12I4FYspppkT#?ywcS301@-V`L}zk2vY(wQ>ZNU~Sb~29 zaIO37^RRpvS#t?66}Y4B6)Fl@GjTD zcSj{bk`sn!a@m2!4Mxra@P}|u5E`g%u~+a%Xx+N1B#82m`&KNwI~#&-d19vt$5biC{n8yj(#FWw9TfuQH4WJd@9~4LqE$cKyaVw9TUMDH8$TWx8 zv;QEGV8H|hG}##_%R;RlQirpAi0WQ2zj(-_4LYbwpnvh4%#NKMsH-dwL;P`ux?T1# zDE6a)L1hsF33(j|rkuXTqxxxJ=(@5&(JRZZ?hl_1`YEh(BXvcoci@rUiE*YjuAvPI zNCh*@B^W_?c2p(kb%{MY&9j*{7l>T!NeSdIL1CWJP%n=KPfqXwnk$VCVbmqx()k^Wh_cN)r=VvdjdwR_rGrqD`!tsMX~9vS;9(HxpSE z#MyI1D+V6x3q$*53~s<=XEten2PGcsoYNP_v**pA*XMTIH}S{D*%!E$1Pz$9ZZA^@ zHCm}CuZ@bodbv$}6_jk85=kxX>xT1Mv? zo;(h46A!f+4p9zgf#Z}}jv_p`cz;!}5%ASCe~`fjg~<_+%R3IeYz-AVrY}}mqDarq zQ!jhzO(LN{~f>Z7X&Y&)dn}oUdY`NyXJ3Ou^vRyg6O?m zTI-WMMZmLYQ>o4Gnhzr{sSfmqC@Oe918DM2UVm~l0J{9zC+SYp#(eKTRuLBkaqW^v zPXK~L5-7-!9IxZW4m+XIty&)P_C8{r5~6v4JGf#hgfnD*#nWqB?J*2jhZ#x#_pS<`AY3a3xC1?(phjc zP9+7BPR#QwB^by1L zs3&gHfr=bv(uN;9o~-kk7ChQDo!D;gG)a3wefvXcF-9i<1cE8@`dxN_GU`isU!hhj zr4AM@G*T|UB7td7?xcF{Z}+`9lq`%LTj$j-s;OiDH+M1H8=w!FC@fv*5)pWi>dtaA z!}LI^8NQ(d<5ErHx&a!6_O1JN;+_IrdeQ(_o#VyVloLKVpxxBiam{0L?{9%oV5fe) zL1};QJcR#@ZT)t|tO(3~6L#eLw&RYQ_@C62Ts-96J#_f?3v+XB=a8gx0_Mn1GUjd8 zSUrqnx&A1a0u$-%Iqu2wtUoCc05W?#<=#elU5`Nhngb z4;-q{#aW|+h%AHfV|p3pTz9E3`-MRjGL(J??+>n(6qr;nLG(>8cPOZ%s$XQ#o`*xi zU*CUN3g6^0r*Y*5Hbmqkk`k|&z|9luwu>f%zhXL6gdEXX%u^IW0Scq0dqE zdooKZW8c|as-dN-EiOQx@}o7szPEQhj!Ol49{tP~qIg$K>5HY73OTs&y>$SA(03_5 z=D8?bxx)74qhl4s277eUtMe%m6PG5XMM0o0O^&Jg7Y+uPdhUf(5~KNlix}iJ^%c@& z*!?MisVThzD>+q=tjYGMPz19R^WUNnc})dwdgY&WEAr3KK{zqoJ0WI6>4RD2egs+z z>3(SydC8O?_$wKV@vO>(5{N+3W~n|1XJ-MYUcc#&MqG2bkN~8ImPn>dvW$UR+Iw6O zJ<%-%mA~vucN?YLtM?UL)*S^0Mau5a1*04>6SY?LRY4$RO7+t836&c&xZ&+27=P2Y ziB?B#*^WE(nq6Bl@iH5l4=mgr^J)b-_cOr;zAL6yZMr0f_~-2xOOWF4#8pSKhM$R& z595J4S1D5|y=8y(0m}B$x)CDTkIItNCUqX7SY1<#JwbW5_!ywLr&7J+{ZutvL1KoQ zdRx|D^HdYb!o8BDkc6ghjNuAV5;HiUHTQC5g4X0De;ja4kLYogWRYdNLEx@1o_DJ1 zhRTED+d6K_RVg%3T*~XI@g^!a3UJNVhq9WQQ6WFvHSq`IOaEW)VM^HrQjuX*g!CY3 zeQeo0r~W<|lv3R>#LCzF{#mETvT=Pu#6HQ!2IcX1mYn4RE1d%d=%n;pmNnLL+)Xtv z+|SMc5{>E^o?Oz|sYHfm*?uA&#D4w#5$GiCd;}lL&_cL9Iulp;xV*a(ztYVkai!g2 z@uzv%3a3NCc|JC1jSU(SVl>p8DW>z08_%j`03xO4}HB{UAgb|ptMDZ#`Q_W=CAr%O0y&r-b)OZKzX2Mt>? z9Qd{gjg6D^hAd$PSs#ix9>3FQ!ejvjB!Z)&8W?Uop<05ai)ZSS!JFUlh0>}&XqgHM zl6#0S+fyX7u*~xL)oyjpp=pvUj&^?1m|HWgBe)3 zLyX`zBnyU$X<;!%^3kP#s`xXlvjTb90fl~Mde7&E{iBNAJskwfoK%sx&-s(Z9?2GA zVzX*{GWxD$m*wJWTKIW60Y4QQ=e8;_C?NhPq&(SE0Q^hb>BX&|&gaN3B@l7zgZGO5 zlu?n_Mxm|9?pxp*MqE(mFYlQ6ud_Ast~P_)!y8sNJ3@SOTm0E8d*rs<>JXjiR^WmS zF<@RK#H4~>%aDN&yda;(1YEE~Ut0-fRhvI3sGJJW+hYO&AQm6SZ&!rZH zA3QZ7zODrAW!7Z_bv)u1I`2N3i{9uDePMeQEv5tlu^wlTTguX|t0w9kv^Sp3fd%Jw zZVn)#1boj6g5DnrWI2DTR@pxtg5jKKAaZ=+SM4Fnh`vip6WqDKJbm~H zBf1^jTJ68dGNQ)cyvwrrnZ*qQ)+x#7nmT)~?F?Y01Ghb2g%`c*1r{7EE@Sf{oWCst zo+vx02!@LR9mFOU1-Ws#zW-Inx6LPsTbRGO*d%KVONKsB#f;6$FSC8baD*mcfL@h_ ze6618=C)_4lc<{6@FyK^T(Q6YegwJN+1ZK6xh1!eZ0;AOn8(@UfHR|k@FAIgp9lt= z|6~=$PK#A>o5I6=Gz5qdAJ9!b3Vc<*IKhMXNr2_}kgxHAmgL~<_U{cF(&i?r+jxxT zu~l=EPz#QC)TK6buDduOv+0qGNA~@cZ~3cbr5&^amAJDm+v7VVEE9C4k`Ox6H>Qi9J8*_`vt{(@^>HGKondc&;>~trG$ZKW0 z_hXG-+Fue0kUa=ZI_g!;IpGqz@EdhE=axV=l}5kTs&gbC&G&@JZ|mOmkz#Tn5R-gD zY$1a&wDzJghNUA&h2NJ@x{2e&>o=N+q5S7;GWN1TU~?(}?bNNMtJq*8LvYKtV)|{ofW=02)|}@Rzx~FqAMqq`;*6wbY8`(%j>_WAlV{9N+g| z*}$i+hyQJLwarWi+4hIR6tSdlZ@-gLS)V-)xW=?PR@B!sOf9#A~pli{8>|C~9`_1=tr#@%J1*@0= zB5f>-0hlsBoql8s%-a>Qg+UKjZ||vVYU}mi*u(MBKnev8Y_#uW zf3dmQP*AfIzfXc6i-s&_u^hbhF9#7lpJUDW>%(;=^3T3}Ck9%%-=gkp6wJj19UzB8 zi(d=t-6rDQ%JdZ~`J%>%fwG=|nhU=4dZ2C(KPX$?<^Crotd5CRN;Z3al()#_g;qa1MGpZt znddg;28aYlkL%2ufUE)(@Zys8=!D|?NS1{Q0jJhrLC>*9*zqsnLIUYGJn6Mppdc|| zCyLy1ZNH-wu(^tDaN2F@G=D_!nb(e0{!yvIqKexr?Fq#{lbA>IX~cgwY2nv}4w3Nh zewf_(Ryjq$=4hg*mV53cl>mA52$cGar*-p?LI`=vh&-sdq<*^qv(|9Fbx~@~PZD5* z`{}v-1M&y=(KrR!Sk}i*V~vI%rca%4UMtUM&FoR?%|vgtN|=2_|EKR(LQ%{kBy%jw zVlC+oy{ZY%z>pyLVIjKA(MnNCaTN9_{hzQO#?LO1?8(4z`s2wZyWtd#hg5g)!xtWd zsRLCe5qCoJ!7MsZfLF9F_Gp|tR4WQ=JNeV_vGk0#dA=&=K6Xj6gu?uL_cOSbnZ+?C z2$aW~5R+m44N>y3UvT@tn?*xu^P9x`=_)FVI3JJHZn|VD zUvcO+q10|kDV$EoVLs7z>X+l0?ov2`-Asx8o4W#A^OiP`#v<99$IKtr34oajpP21C zm7(TPF#lB0{Dka{2g3|La4$+tS zhJ$32&+g8|2tffgmg~PwXt$?#R3|dudm7R~4logtLOZ*dJvyZNQvSn`SmQ5FDNKg5 z+EBSyN*?!Ci%NyRgYpHJfB*AA7ZD$m#=zrZqre3<37z6@F+WL>dU$92UJ!!3Ik@b* z%kp$MFR9QvU=uImIo`J%HyP^wSOy}}X^4N4mYnpw{Bhy5=>4=LVaNHo(PH7~#fPhN zF&-G?w3^^8REL2`jW6KV2NwSZV-)s3k=6-v-iDuygFt?`giq?(eOD+)sJ3Mt1#WjhbC?w&NeKLQJeM13ooIxHaKJlf>Q_^tE3gR*ViJ87J1L z)vWybA5u!o-zn0hU3lU(o z&0g0L*e5gmnGLv6*{TB2wvJ@|cpR>Ch@g0PyQ1%*$^AL2qhQI~ctZp=btf!Lcz1f{a6Fq9*f6Q2fI;}lg4o?=4;j#sRa%|s=XouW) zW&LQYOE$(HqgRz`hMKB0<%=eeW{o814H^_6ifXS zvm1})!fa5}$*)fGREIk(F|WFW7OK=jA4EjZeIJtlwu!Wes}G7QpKDL}fnG=AeBc>M z8Vv+_JWLi(Z>PAvVlFy{ERg_Mfu5`wclpdUMeX+0OUKIeS#xFIc3pl4JN?`q^|fDH z9%%28jZ(i}``y}nlNJvGg&*PCK9}73HmJ!$;KtO}F$zj+p%fKzRfl2o&ZWgk#}>95 zRm57=oSNH*Kuo`l)}_c1&fjVg#kM4uMk!HkzJ`I_w2dGf#`vS&Zt^C`+?=uc>8MFw z1q7OIqavjhXEpf7vfat;Y4#WCeSjbwqTM7lHtk$xZk*Bp#ibW)e>2K}4l?-m@`9DO zeBN^MbSl28#YbEZKqBn9-CNvir)}7oeqZu~D%4jJxVFf;?X4 z>vc2h%9`?>LJnZXmRJhft`66O$(z|h#`6w+qpXVG2~jrT^1xeu;u3>=$FlvYO$NyJ zIKCWS@4Q?x>Cg{E8xN^TcPg_D<+W2n0{w2xu}-ei59Dn?AP(!pz<%~E$tj#BpKxbye0?uSROQJY!kmOEpS*s! z1bJsg3hKzjISl+fJld-HMU|W`B2mpyZY*PUxz1*R-Dai3^Gq0a|M++`b$`Eo)_Rw3 z-)^%|X}z2w%3i@3SR~1Tp!vs|JGWZQv7aX`f9~U%>Gj1kn97h7+~%z~CdItRO^iN`ZT+%40EY zU4+c(2B=%)S8UA^Hh%ccQrc4ij1pLtU(u&8S0(=as2+C_BwQ87p+HtWHwHC5>9F)9 z&1Nz``@HPmwA0K0w&lvN5QsStXTiwZb<~%Gjw$zt|0i z0yo;|%9ndQo8exYXT$Nq2j=s_&iY@=rE zybp}pH&7|0@}nsYsZghrTbCofE#O9;^r^E(`9kFTUmZq3p6ZOKdZCg?8BXuxVMn zv-Aush(rXjGx%AlydL~^j-|Qv4!jZtgUn&d7MqpuqBK9JqXSp-htkaPpX}_zKWN$G zJ^P2Oe3^6kdB+O;2>Dp8IC**9T{NWw!#gPuu`?+{yLhx1Eoheg^1Sb59@s&Ne!N_{W4AzR(o z?|DE30+A2Ky;&$z3@KW2^vC8OT&v#sS&%Y(OhK`j?_GQ>e!{g#QjH(7UGZ(M;r>({ z0LD?S5R6vqB}Nq<@(uZ(?38Ch527E#j+tUlW6oG%R#mEW%@PS=51BUk!=!JX5kVc@ zHtrf^Xe5K5rxWZ_T?A9VzdlX=rl^(qMb}FGC$28JVcVch^zL_9p|*6fUt$IVXhW>= zVO(!eL+&w_Jt-JJytNJCi{^7t!767wzf_|jXxCM8d}RdwKDri`QV7r+{FddR(VElw zBi)p$yXShAM=&N$%UCQ=mSCzJM(TPel7TaOok##)FKJBGapZ&-#?-

VY5GAup=2 zCu5eOg?ETU(nAY3vhc`GouTnm$O8}ZApgo}e*hcEhv+fa*aq%UvIlYK9etvZzkRXo zVoQbuGFV|!K{85$vX5y=6G<^>X6kys&=vNE2s9V()Hq$3Z^}i3Podpv-s_xgPTTsC z*=JXTs>cEGvw}dipj)LFVKPPouk4t)i~j2nk*lU98xsyn<|**N01>MQOOs{|WFxbW zSMbg|SCH5HknZ`r$*6wW9DpCv;fcWrPPoZv#)^cdvIpY$G;jUZa2wG#$kBCoX}m|Q z)K%G@0%uG-tz7io+7choc{b68w<#S0nSY+yfdmQQrhXIeRxm0xpTQ{0=yZ`K(Q8zh z6N{ZKvOKeEQ|knU3_`yS7$Z>xE+F&#+|f$2t3)YtVJ-z6fW zCcLOOKz^IZs@|f!o@G1i;+6sz0-2tunkCbMHs?Wge#qT|I%>;-IciY<(}MZ+ADqTP zi(K;zMExf-lE1a+4Y_v<2~(StKASN9bW##;4a>yD9g&OFqECdIBEYb|a~B16{lde0oSIBAcu0N1^-b=Q5pu>F78_-YwM7e?kmDGo4VZ*vKsIOn%iC^v-yItdJSe{0f?~gtdt6MHs0aK zO0CCCp8I|O(ATD6_^7wb4|t?F57dV_3`UK?`Esr$|EZ$Jq*t>|UfZ?<8s zUG`aeh9r&je3MDqZkTR(Ydp2jrx`Yx7c6v2EzcK8DpMG!RP>k8DnP!>tH`D=@P=tt zsmoaFjb3Hld*#8L(ZgFwJ$=e46n}4Oiwl~F{Z5g_==S0Tt#VO+D$boQKuL}wlYE48 zRVL!j2?s*gb9aqs_yLfrimUc$n~GPSg^^jE{&>Xl0YBD{MsT;8rp{H}bb zDzXjds?(nmyvzo6S!SQP&;rTyS0sA%oypEa=vsP1zw`3VJP&bzmp{%ZBI$4y*{==? zGd4pa=jVMG+zwR3% zsiMmykEt?r_g1znQCXP*03~(nb98aHLfTtXa%!3j=Ut&0w)GW3kRevojr`GAe zpisw9TPxb#AuLQNKc@;TzO}dj6u=#C@wTQ^YKzPI=8?rmk!W&aGrC2X7OubtCup#& zoZ`Xw(GOQ4SkcVSzK|YM3`YTV(62%-;_TFPrmZl79uT#mDGnjxyjDyXt`}W$j*Se; zQ#Rv;{T#bDw{-3{g4HOhNI;-b#5hSvICo%l);~bfshgd|F_+DRAD3hn@T}Ht9hOWk z|9gElHh<|}>%s>BoGAKNz)gj>AFz^b2g{Gp*7#N{9-cNbM{#d-WT13Ib{cL20rNsT zOY_RZxA-2xz&0xGR>C+7vw6KM9z2bc#yTMCy)o{Cf1@iE#Pc=m?%dxDHmm1AyZnGN zUW_OPN|WV&>5;(w*+E==B0F0-lT-smy1@f}fg>wY+Jtk~)ZMzS(7Te|s(i=RkJBuoRd!x^mOs8GaN_*)o8TE%%-9qhK1C$c*j<&&pVzy}Q z>Y5XHHEzS%#xAGFiR3xtamz2>dlx;?`kp4bk2w`Mpx7 zw~6BOr6s!8yf>1%_emzXr?mT9Pp%~%zivFC`nTXGJ`pl#fLT^<)W+R@~wjqY`O&CD@_77XtZ3~H`d?@JnvxVwE?jU(CrKxG zk>+X@*e6skosnr{ILin+Z@mhBDMs5UTxAz3Pg7xcP}rr$dON0UBal7R>jFiL5^nj` zN4kCB0=j%Oq5^>$r2ulgOa}ra%;McjJOeieg4^%3@}vE(9?edmc99t|_e=^;^w#vM zx}(E4cmPx&;v+${A#pNT0FAb4k3Nyqm0r zNw7A(Kc9JP3?1lBL%3k$pll6|!(&It@i3|TzHLQjSU;N3m!yKVs-?EMXou#lN@hq4 zfFVf1#sJ&ktyCCR!Bgbrs#+tU^D+@FyjabQ#rhQOA%ZUIKFVX`L~U+}zT>)LTu+TP zSJLA|H0vVj=db6t5X^}g@p1(i?oN+avYRi{9DK%F%77Uu2GPvf98W)+Ztc^5$R%T4 z8cbPGkg778Dl1Nh;=AsY>lCGAh^az)fYr+T3&7Q(ZvH^|eZy*dnE?V1zEbSDpV67z zX5>ozUg%;7kiUEl>R5aWu#8C2^P`}?x^(H}N{9CK{&wn>0s${rLL#*sK1DQMA%g5a zb3HazP8{R^`5{myPchm3f@Gk_cWf$v^^(GM1wy||xA)u$K=8j>4a4z8PCs0Jb>*kC-Z0HeMy9eD-0*tV_;H#aws!l-ppD=kFnxtp;F{p~ zo@QRU?{4$2lFJzrwSRqp+uqr+@+S?Y6M%ekB~m)a7w;I>e%VxxC|_weAN4YF(TzQffrp7Vj9z18Og%tN+2}iwg zxcMcd;k4&X7JQjholkE-Z7z(H8$U`C`+aeF%utQh9a(n67`jcubfN@})Te6F6n4!Q z<^d4Yn7D*rW_pGml3qT;Z?|FsXCce}Ax=fa); zVX7L-Y_AOQU7d9K8ozfz3t)h?+OaTvqSFt#b<*pZAajcC3sbt~T?mWzirf6EN{s)e zgPadF@e>t;(7I*Kmt=wMKLxqIRi-T}KaD+MZ~Bg;Kv;iaGynm8OQ&NEqc)6hLZO{d z*)Z{&x3+!IapYR;kuTc6NykS?45f0PajcxThA=g>zO0$+?4-7iz z&Z%Pibtl}PQuW1AI-mjRo$Pm}jfD(2iNimI`kS3Foe`W>wUrAOfm=}UEMp3q*lexF z;llqJ#V4-Y_`fsIWd-NzS}E0qFXE`i->C1RK-c?`vt*}*svkz+pbbk2V|~r$W4AZX zFFxvowFKcgvO4RoQm$k-ZJTnMqwL%N*U2E-5Mb~QfAY0=zPaX08)8MfW2YAc zmRs}9#itZ2_D$kkaFm_pH=rKIl@u;_DxiJ=Q7&f3f#y*GLdLHU%?<0n9q; z%>vu}ATzjor~Z_Ylek7B4-y)S%n+>Ae}-HY{QW@Cpf({0zO_Y%(|4blfn_Y$_tMb3 zQQQgTN)*&Xm6CntD{~`}EKO{JC;YK5ZLDj^LFzzGK4lsW_jiDR%uSyzk)vgA_IJyFg%cM%beh z>deaX3C6Pd%h=&az7>GNdPS!3cP--#wbsqTIO`gc;dd5L zOlg!eID!~+4$))i+aqla;6IASWO!M7L+;z||CJ0=^=JLKB>3~d?33+kH(3D|Fav@m z0qsT4-$;f$tVgYk!ueSmw%`7rDAAcp3)zMU4!UaC;Ux~daBX;=9SWoG|8|t1PS!ZC zW*iRA`Y@%jwEzEoylw8I_nZhI;*c=TtLiy@vUT2%oLS7}D#ZJ5-lb%-o#;d; zl7AxjK2SzX9YI^Z55G}u6nDO+8a3w*{y)3YsP3aTpNF?g|6_9VWE-)MIg35{B>MaR z2$II=&F|pHrJOT_TRyP6J9TDY(qV-GN@7n}qyyUDIxCrm(SCsc;NHNT>oocQ^h}_p z_#;XAV5>&$@HJP0^ay;;3SvNpH40x)BmU23(WgCBpLv@2%GjJ<63dwEfsctHW~bPr z|2O+NuLI1};Zb>E(@o+(va0f$RFW<2f-J)hTDt!E)X!YE0XjLHBgIbkTGszKq$JnJ_R4pYvY1Sjy(FDE)sO*eCKy;{93^`^ZV+ zAI#Bd@Uq7hgynbtV-4dhfS+WxjGN;#-6y^qoB2m^8q4xO1k6kk|4FGG8^b4o`=f1^ zV@TIZG4F^5)pNQPy=zajVwxJ|pZ_{SB+qrPIj7Y6^OhFvO1Raq59jbf^o$vTD z?4Lw89Fc-wiVkaK^G*ykav9c5lYD26Zh!wbO>igp)mSlmI*9=YWG2N6ub3u1w2PtV z*3S=SINcpS9To&Vju%q_zNTYdj`m+?|L#eX#7HbkImT5<7{bPnd&r#J@oq9zC2<;n zFv{)|FWh#TjIlQt&Fx_wDd1sUs|z5|t<6;J{m=esae>jq{}G%4?g+h|BQs%E1g!ky zielnbFeKK3;$OH!O8alb@wvit@M-c~{dMU#FY_PPEr!WlH03Qs{@iZ=TlE!z;u%4X zckA8tw^d5=0>Pf79G5dQMJm@7Q!JLBAL9RsPRjqP9ri2_nIz55ir=_z7yH} zx^;gB!DGH1$U?LHG`mJW{3E*bpoKItO=0j&(c7hl-*qG3T(}U^Bj7(K7ZSPp1E-I< zWr_Km9kDkkgvzW6rO?;lR-%flI7Wo11VrC<>@j#ZYFF!5W3L9WBk*bb zK}AmXzhDi6?jI{^*^Zl<7xiVtvQAY!-A`w^>TKon4kGdD0fHYupdSRd7+?U@%PiXy zWD#N$x}K1YO8(}m{f5)^9Oa-XTSbl7)d)C0l!Ao;rd-DkDBVZzFvvMD>L+E}6^#>o zd`7EDlihtd#0E0QuPgvwl>B(XMmEK}M84z_lfAFyui%)FgFAQm{y%R!ZT@xZMKNv! z*pN*bS=T&1^4u`Hn4iYzOAJrXxU=g?m-8l~Qp|1P^5F5;NB#7dV z%AZ9hptQc)^xI($8Gt)e84qeoXj-}=GTDCxSpl#gZU4wz)bd}qKDt$0>}Gr=(swF3 zd^mAS6$~g8Dl{lza0RMSR7eMX9lr_8{cP`Y`|n-2@-f&$XD_@QcItOWv*v8Z85X)O z+d&(iKPDo68U|qnt^35)-zPw~K3u=Epl3XdA)VA8*%{q1wk?dzby}MNHiuf+n7=5& zF%^Hc&*mcZ5TDWo+RSEkR2D1r@bEKl7#?CTThn;Qs`>uSzkJlycN#lhk8g1>z1{wL z__`@;j~eE5wZ+r4fGUIRNec4tj`qVRGQEKKs$u8(1M3g|yeJU40(1b|^DT8tx3sTf zVOdc*0A`~reQx%;5G{3UYPkHM4@7mW`O27nhI?wCWXqKH<4{oGT&6>DgB;CGyW6_f zc!gm{9?CjtcFwC^b2^*#vsJ(H$~+sVfZ#joobtfv2PX~fjf01waNsMXGc83;TZgO2 z4x@qtW!J$p$|<=YUx6)A7hD>pt5PMfRBPa=Wm!3zvMb9|M`hOd6ZpEM^0x3o0-oJ- zn4N_ruLjioGP21~#nEsHr<(NWy7Eb}U#{3GDhdX|ib5N1nQe#CnH^o|&3dBWf_&4^ zgM+$$SMXgDv*2zPxkX|jo$@NmY6rlN%nc9qje=VfMjEq!d7B#pkSNR=)?G}1NX#o zP0gdj&&|5C+ga_kirhQxJ_FmH0e#SGZ9^ENtY(BDz|ON0O_!WJ3G@1+=g{l}XQche&B?k$o`OV=l*3ktds9N4Zf9nx7clcQ46 zp%Dj=$WZk=?C)leOav+AWgu|XD;d5mqG8^LwF`Z@PfUKfZ4*oD_?6zmh8-X^^?v<2 z*f8_Mj+%G_B8#j4M^$l^5o8qDH30=4>=m_ZGhewgfDSvhFp_w^?Z#Lv`E#B6bx~7} z7WoQdMQtU-y&$8ts8)8*NuhQ|fd@FW0$guFgYTX|*<6xG)e=eBWl+qSrEiW9T3Qg(9}%h zwdatR$DqJOL4;;}IaDKbjVC;zAqV5y8xD|E&3KsLaddBOr%goZh zf@dY70!OwdEFwEQKUfuB+)Pdvd_m^xaq{{ZOJ8c3+)tlD;PbA6L9@m|AKr2#o&dP) z?{xYg`WWws!?z_et;ibEUYr8=G5*}lP}Ga>BNRBjT3_x5L^n>mW`gy){vt>Wp@R=Wd_cq?!ovU;@;Ed$6wd~ML)PN; zA_ofls*xVbMn7of1E`uVg@U%5jy6fwF;K;cgmdIG0eyQ&@qGQ$4+#EzOYIsx$rtAa zy4D_hpz0mq=8R9gSUbAMa7VKcs`7UdIpjQaGI>{2S@8+2gEjpF-3smOYRX$BHj~$& ztjS(dBvXw}?(b{}1J($f=;DoJhNo047Y4V8VzPmjaLVIrQvjV*E;1~wx#OCf2s#>I zalYr*+pVT#UfskNQIYXNFo>#qCC}MXyz8SOObkLaMH<&2s@rCc4auM+SRw3|r+*Px z=}=4MeZ+6y>vH0Hhznts7ozgHsy6ZKDkK-sUw<{E|?q!;2QZO=#F4aDT(PE!!wE z_ow5!3Gk>HmBOHgacRe>rT0+@=%@C^(a6&86=&%rVLG_>t-S7-5UxM1IX`1`i#08tT*Gs^Z2|1B!&S!0FNF^NW**j5=<+>5Mfi0v zJt(Qa8qt8^8fo80H9-bB!Pe{VCpZZ;MtDY8TVBx*Oc?+0`p2oh#Wwei>}a&<{DdMpj}{ z$AdgIG048Np%rdc&0%w>7y4sIv=N%=of}9$x5q<~mZh+)MaKzMq!fFVmTQ6{KwNMu zC-{Z6v`a2<3k_{Eqv>`%>3hhqcbS%gwd~n7NhfjdF9Cj!+9C2lWKlKh;NJ9-w{vI@+WORsFUldzwQF>HU?g^Uf8Z{i zJmdT2C1Op7AHhI0@#wm{bFK~u6KdcCH~=$TE5s0**fnG-}c z)S&Sj9GkUg90C@0IEn9rD36!zWu601Rbz;M|1fg=>WG8_!laH0OQH7tU2vGN)aB30 zowY@W)-U-A`)?q5)6UGp!e!}=wxA1Z-IY~rx02}Px&P&)V|1B^L&WHn@!3yw$7spS zt-?qYSFansdN|FxHX*1@mvYZKywemD*>)PXwdHZN6leT6Nu292W%etTs0$(5CM9dDV?UM4k(OGTwbK21dUlg_a@PN&m-rjDr6)ZP91g@7wt z#>6LWom(98Is7xr@;10Jj2CnT4qy0wS&FmlMg6eW91@e-W3}a(we$mq6K|0IJ0DV$aiOv-`(GhwX{;&=jfqyV+8~i&IK}$~X z3$KLLecp|8j)9?0OXW{0sVFS0VJ4Cn3$qxC4<&);FSOsj+l72PjV=?Fk~edCS=^F) zV6aQnMfc%9QGZ@Bx-510L3yKB%tG4^Vy26HPk)l=w7T!qA2sj#LA}Nv`*Yv2i2I4H zXyH8hily8oo$=<6_!D6bTRBrBkW)G zwDX}%!k-Bx9u?G&H&gOVD_i_57$PG~;7O6IU$M`CBdu9UaEy*83MPNcrpo*An{)%( zed$0yHjrT#2dYh+lar!(r`7~Rw-+x|dv1A6CKa-^7hL?ykgEbwPtu8~;GC(y5dVMF zt!BmLuJuJ@I-cWLBxVxmf{u=$V)R^CyF_y(AC?UisH2o)e zo~N@}$#ekC|L7UdHCd=E@5HVx{bUp=rW0VX;+#YLOe@eex+Op$uu`6pRoT{pn!)5L zaI+o0xYnK%cSCC0f~>yG`@dr7-Z7_TxKnD9I$MtqH?<#WM?SAAlIXHd+tQtRpiCX7 z8~uI4ZY(4Uy9@vMuGtNvI=nDsJKfB49Pz3hAqj3AI5_=vt;{4f1-nJa2le4brXHbUmqJ*AU9p&DvpYKkX?~@hf9aSa*m;!p&RV{B(0y@))Ddu8J6$X ztWv>u|Dz91uUcd=iFNXiv5jGOlV6kdqv;}ju~;|2Ip`eXfo?gA$Jc1#F;0*7?^A zI#>G*A9*t-v)xa%YE#JVjn0~bz`Qba8z<$_TK!M9pgnj}w83SIQr+E0*|5?Cb`62h zhnKhSFH6R9woaVve>)Kj%Q6ey?-#=8S0(#|kFpDhx_&U;E?;!V7D_ZtC{)~49+vC( zkmT>+p%zyaskuV5<@ScVTcTw^+;4JLiB+nxL-XM3DqBkE)iOPzySXK=J62>{ z-0$)G-Z|i@0?H@C6uaPDS>lc9NP-j{jhtWApLl7_WHvW4|YA%<9;KHI4EJoY;V+8O_Z{HC{EptE?0Zwnn^o`Va2z8N?Yx3ENb88coDp>Ap>M5 z74&|=tfgd*bnQ#$wpl#CY40_cjc>^%Sc|HuBfb?W@`)dxlS=_XrFB1uNU{=)6;GfuhzR9>w9#n59=T3W`ivy8wO3$XFZzkr6#3r z&11FT?}hgCY=_Nh_+8d1xJ{xG*ZRwC=iYmu)d&H(H0&Dt$M>hYxfAV|At_Of+ucLN zIQrVxQTs-Vea4q)Vn~LXI&C=*c}hVgL!#6otTv$4i@V}BWp!#9rvrON+f*KGpH?jj z^PiIQYjy%PQ?(A-6w)4xBaI)-`Pxx3OuCSX=!fWO7E>5 zj-RT`yxT=*Eo4kkzQ$Rlxk0W-30ZSFDE2yNpuCkDEi3a`Xkr>7m%a0mwEx%b<`w&f z^HNiLrc-(zeu~+NZgtUtq~+V}5<^R5<{o4JD=|*!A45A7L(m2Rg3} z+S3xX8ATaX3|QD;6t)9C2nz?B^K$P{>({-PZ?bDdR&6b1q28mWSUen>Wl14pQmd<~ zr zN4PArhQj9A57|ILNAzBH3uPHDV*2wz4qk!ZVn+zx=!l7CRwirDgz|S@WRb+JGw^NT z@7pf9GLYZOFH^4_@$WPsptRL6*DXWmasRa;yw9iQrnXhz$s%Z;vT%J6jn+=&P}cGc zcWki=TV!!o5uxuH4{pk=$s^E><8ADz|C{W8DxsC7R{D24aTL!cVH~${!QRxe*YoC< z>@GI&_crFbf>)(AhBIjy>AwcZj=414>Wd_l|70rER@;baFSfv0FF=V;1fVm~>0jFt zirfygn0p@cB$C?Y^phIOU4d#$vm9#|F9(JgzoM`lf9q!g{f&=wZ(rTyXkC3;XL~3- z?{sixz0gG51N#OmnI(dlpF1W)%5Px`)mzAoRmPR382sm8FV@vG2u6t4$#+E-le*4U zJgjJ*zb{a@+4*G;}2 zD@Eih`P2$a=Eq^Av5xK^cvkID6%Oz+weT1>znGD`5&k{YRC>;B-Oq^hkAK{_bU?g$ z`4CfFgf(-9S^e+`)DD$Y38l~_r{B&PjBCEIW`Uko2pbv;#;plI zXHHWJpU~;ZfeQlz2Pw|JH5<3)emD?!W`la35AaMH!-6!_7tM<%Hj1h-c+x*D>D{hC zb)V@_WjARvxaF$AZHu2_+>ehP^ZvHMEBkx&Wm3$O&3F=;^>LoH>3b2eM({$VL1o}& zM$6V4GWZ{HnZ=vDsJoQWY&4R?@RZj;*@a|X@HHN>75%)yQ>4L+$z!2*5 zY5*-v9>9>wTb{gt7gI9#RCO8a@ck(HD*E+4Ppk2Bi9(&Lq(5`O9zW^1UOdv0jNbOd zoW^1I;wk8%Vy)lbJ#5-nY6s=YibIBbC)s`G|~R?csBCzifj0lT)B zl~Hr2i!JWZ#D9hD+ldL=!s4A$#4Yu|k`KF}S70u6)BSv)=om8zivHxM%QNb8*rN1@ zuJO4Ni^-co3@JI0JWCaXTgieYk`v!Y<>te14*l9HgKe# zv?8_ZLLzO=JX(jf+Xgi_>zf$h%ua+L>Pw`b-FXqZ0bjQ8NA^kFE5v$u6vpm=@y_Z}H7K6EM!D>jOrm_O2!uHXdH9)Hk1u*% z^YP5GhN4n0-%rKLd&y<@$JD&mcZRpsLC>2ms_J|hNz+zT$iupBh_-;QMJ>FDu8lwYC1EKn>Es()XD!yLVnLqCVB^ z9AG}`*5H}G6{!B%%iBEX8hd`e@Bp{n&Lg~Zp-t3k5sI~p%8|aV1w(MA?G>_8BB{?`{vlV1pT|J)v_p-13woYBCeQ17Kh`0>Ma%NMQLH9O zzgSexB-0>XNSF-73EDu$NS1BWPd#?0n~`VbmHmlqy;@jnGsHr8 z`&akWsFKX-@@Z~S4H^y-;23Z5m!fdBBz%z1voJ4X%heZSyVa$}L$SiUjE|+1okB&! zvFSvu1a@stgX>n+KVseT4DC_Gu^BrVPwGkHmXJ3nQvC+nqy~_$z`z@5 z+mY)1Fd@(0rcuFqYpv(;JXBjzhcDXk`8?9S^^K65iXUYQ)sFiGv%u4ZV76QPhW67V zH`#FygN6ISD|PFoHpi=`d`|Gw&cksO9)_Kk;yP%7IXo+WmF2V6^kcj}Kb=q9wc(|w z@^vU17Tau*k-Yxq%5Kd}E!kwY&*C)3TtPQ_|;HLK1|x0rE>rHMj&usY-; z^zx`(n$$0Xg>$VX%ZVl=Q<{PXj*t>*s2-oSwN@&yD!JftJbM%())tOxX&traaz{gJ zep_$a7Q+|5+SLn=MJ^}U1MABOS*Dq#SCYk^U35Fnseh~2Zg7EWJS4hBlGHBnpPt6r zx_AwINyQtRy|ao=eOJ7iCQceyCX4<#&H4PMg9b2DWD95~R8Q)ck>`ij*306#M<)`?#JWYcFo@*PgJ_;zg31~J9l&JHB)x<}8yu1+bBZ+NWWZ@g>Pb*^}MI1UcL7FvWIiXM4-i7wRX_!!G+ z+b)rb!|RY~*x^v+!iXuj(%;OP`#P(metl#wy15~9$a!pv6YfY;ZpclBDL2cP5cs6 z!o$y{aD_AUdY0ckxqWHn_g~fM+tl2Zy9FwuIuyE^nfZr$e3Tk{wH6C?k^7h{040Zht8SD<$Knv1S?8~uV z#=6w>#6!5?V8uuIr3NqRXI6VlVPT<*cjDGC3OVw`B}6&vK=yLfETc4~l$YEGGuumr zqA+iM*KGfCwLys5Qj!XfXN_=DGB>qVs;eki$jUPu184YE6`&M~Al7=}L=(J~gA3gb z=_~Ld?`+iKAqV~ie4Zxim?*ztYb(an`y+X(95`9?ucA`-+>h0ZDd_q) z?d_m?rd7p8LA^f{rdP%SD@60a123LYZr-~7 zXf08ecLeV9d(V25La?!)QR=NdO!Qo?yr(zx)BZA+jT#N~IoUDFLZSVl$~(wI_ot*Y zOtjCqnU(&tnv&AE*xK+*JZ@SbzzE(}bl+WYxmXv95KQK)1|v;8Ti_X|55?q~3#%~# zmh{fKcA8=5tV^=Yb3kIIO`;luw}!$~pA+|9l{;AECu~by`IO;ZD5uZ=X=5uA zfcFa*EM$2$F6l)y+d6M#+8|{QI=t_#?b)PXwOBE9t)W(pEaM1SG?i9~Y zmO+WRhZFBuQ@A>Lxzo1nHaWyY8}h|7Bul_^{Nj`r*zK=8qRWvpoq^vk`7{g(cHNy5 z&

>v4??qp7aG4xohkeCi5||!85VS_59X0;AHQK%itq@K! zNI|U#j^6wq7J@$bDh5kzuP$1>P<^{XWWQf$F2!Mv01jOS9YRziU9wwhB7W zNyo)H%Z+2}S*rYVkVRp{Z8YWTFbAgL(|S5ClUJNoo2qf_rtHK98)x2&8vMF<tL@1#Y|uTBX6aT?nAOX|zcMG6cN*X)f* zT)}_~YdO;i1>*->1;4#DnedN!1R=Z4kSKJ~y%^uDzt@o5Fh^70lLVYJM(^iB>{M9p znl-8%mU;1e`LHWWYWxyu_fOxG>x9f^V$xqk!sUq0+`GN#1POQ!zm7|EL>$$ccHOsA zy-i8IhqIoZu*r4s{;rMDhd2Lu?*x@#@qDVBD0E{D+4L;(xTda|UME7j6eV0K?b*FN zVNecsiuTezg5bfYnrNB)R9kquO?U@_M;Vp69Qv&^aevjJ0D`6McY~Q0pDeqcw;-&m z)_o&{|NSJ61OHAj3$;AOe7<2*G*4#uwmwSz;KSEx`7@d?w(H!ROVqmwnSxMK4Z}9` zyw5YI=L|o%yy{h_pde$i$Kj*Y$4h+kkm(mI^I?p;Tyu#!B~KMv3hp_`{oo*)=WXW_ z(oME^kpk!~CeA9{vV?GBB2=>GoM%;9f=dPia|ps$qrhO3(!zV+R~6S<2BdRX3%b7; z%WT#q&R$ZFqM!PEdJS@o?fygzUmj*+BQW{oVNbgwuVoX*8LNYFY{sZ6Tkiq7YP z#(LO2xeiS0-Gx|eXS}Z9`P5ik5V8$6>SkSSEJG20lP@!~L*q@4!|`RBQQqdJNlTjE z#d(%eCx6uhJ%hc?OE8Ogfo$6~Wc@C-d1E+ub)X{Km0AP|m9+f5e5~qkB7&kpcR8P4 zm+;%8P$RrZIE1gKBt(}JzMe?6c$|C@lf!QR#3n1@*@DM)_anukFkGvP05$pXqp1~X z)-tiz9Jmho2jOo~@39fnA=%F|x`)R_{|`x59T#`c!*Lky?(XjHZo`MWyA0=GI1G2$ za2w8$4R@De!)3U;bKJeh^Zt4Lq)EO>nx?<@T9ce2f0PjP52S@mnivhRSA={*|00>= z9};RMyeR}h9RsIWHH)oFRzM?J5AiG=;LH-yOG~t}5+F;+GbE8$*jT!GdE{GKtcky0Ixc4+wmrIHbOWStVF*2%^ z+m~dcMK7OvA`M(`5%rgxcFB5|J4B22Xo5Az7M^X85g$fyMzTRbBbRlMnuT3R)kaF*$=qfGt zno3(zXPqpl?e<@9;Z-?pi|itsj&yQn1F?~z>L3N5+{JkpTA@bDGO5kS7Cpu~ zlRKg>-wTu>Q7}YzbkUGco@_^4BI`I> z>XK<~>-No!oL3MeQp_y~6mC_1H4wuMK- zw+RKNT&RtsHfF+gRr6n>(HI5I%TZZ)VF2;pwe=6St+Jcn-6DQSi@z}yZyGiy>m`ro zv}51f!>xQQq+$D1gz8QqLgD)tH)gkognOHxHTRqs`C9Pmw3c$11H>_(%WSMjDELBB z#iX(Pclrn(UfOaxT4*{jLhr)=S%t;f&oE@=={f;62*LM-Qd?bbO93HxBJQ&J?G!y$ z$4$>s7VnQh*USw|i@Zxv0Be&`YWnTk&g&OHCY{ zFx#BfEo>UuIbH#r&YbNoR>8^g2IHI`CeuM%QEmH4`%8~_7z27c${*CkKZGWXnI!Q2 zdAP(?+)G=fzQ;_&+Db5Fm<#saC~rwcssB8STbVx$;Eh@3rDa3p8&3d+S3xH(#*xx; z3^9NttgS5>8vl@*__N8)72^gvy0>YnqEj^(N1y0*viSl@Y_9umuLV1e79x8XeEGDu zdQd2H9*-s#U?u|>^PE~wXAV%hhSu-$L!?^RNx@xIap0@G-_KVlR~Rb$Q>!ANdd2bv zO`Dz1N)cu0p69c;&nsRg$$vty`fc43PnLxd(SlGzENqm+7J{^j*CHPEo@ejsTIYQ+ zoWT;(X65+i6kootQftf^Pp@B|w}K&a7UD->0n%}NF5tE7OSt3bQ3^J{VX!?S56TvP z{36E+PPaS2&9;;>aPxZ}g{P1KCce?;IO zZeC3L^s1gJ%Cmn;JgOSCV-@r-UU=>+c`DcmBsb|=Rnzd7o+*U{%ntmfqRd4Tr{ezK z*R{*O?#zYyD}zkX+@JBet&_w4T+Oa(6Zob&8^gE*AEW3&z6keBq_g1>eU1GEDic?6 z;NP|#;36H#x^BoG-i@jUg9t{eJwM2Sm}2gN}cE#N||YXY!dSNEq0*H`444XK~SI!4!DmjAiuRmAnf9d zfu)}Xlt%~dbFg{A9`bEGz=U@zT$Qu-7Lla+DIh@1u4wN&ga~$S<|5$2l>O{%eEvF) zQybs%gpKcCTKS1lm&|r&BQ04cZu*;Ps4hDJ5pvygIEbVd^_ZcB2oe-UvEV}CG`0%M zx!%-FFB>_(LCEkXyk5YY;LBdgq7Gs$;4~a}+qF#mV_6X2kg9Q}V0u_;vw7YLnn07fypJt=@0P@mLgn zOA*oWJ4FIDUKBIZo)c;yGcKRYx$mUHOASkL81Hcw1Gb{H@pCaVl=Np#?WQA_O-gH>zm6M310_$Ao~nMC+EF#w}^6?CcX{i>#h^k~RT%tAK*lYA36q zww1hK8yfX`l{9y`PW|h!XX58#v8Ogn$lct{(Cy3=GEv4cm9)FYIsWR@#YvR8DNrK- zH^v>XqvA@KpKx#a-&C2g-X{)09NnS6pdN=j>g3aZukdMn(}>XcBZUw0TEZ(#xe*3; zuL^Ezox2ZI6_3Iv+j{q^tI|()UTD0AmDmw<#v%%LG9DtR)~fUKkzyU6tvWzpuTjjO zFW|X|BseqPAcHyl#boUF)Ikjjzn_aia}tmAR>U8Yxdx-atvwLLXV$4Er4;TMEPP03RGMuMh9Zp-`$%Uu~efXeb` z^xfZpL$u}0vv9}=fV-v$iUGz{?#U{~?x9jLf_&u~YKshb1Kze2kXqxew-APb|@ifD!~In7ikYrBl)zKbHT!C3D?__r=9$uz2Zz~Dne z{!$Qbb#|`#a2R3=KW*#(VTHFmSITUa%NXtvt7s2{IsXvIo9(Pf`?paqBWsT7C3OrO z1trr|9a-i~1~B1f)OH{rV(Ck%cvkk5RlYbXv8o-X{=1rKkFV)}J(woL7z>#;ZpAhi zz~7}oPriaBp20{xNBR#FpfYmQQ-_T}ihS(bw)QaKXhXK=HOx*lP5sbo?RZc#-@fK@ptv?1K9eeT=C0l22wgR_=8V$jjgA0+r8I^vBxb^& zpwkPwn%SO)h3b=>_ap}U78@nVOB|hikE=#({~eP2!sh@)JfN8RgU?ulI-zh{&L~|j zB!WCgLlRs0(Hd!-;VE{tD{Z*k@<>3+@+a1cd~DoxDq5E|XwJkG?}oF9bdKt{8Y_YY zDzao**mIGOp83IHupV*e$t};QC_X-D0R<0Awp6@SaTlQ7cm}Dbc0`Xe2+RAI?Hk+k zUK5>)vO1prjU$9y_ZQzt&qyXWlDo$VN z)A)q_%Obcq`Ayfp53Y;lC+xC=pKT13`c;D2f}lT@3D&p3X$*m12Yj;hVvX~$6wKqb zBKBb55oBx`(YqFCgP4~AioN`2`H+QrD>Q>w`7>A#-d(Fohg!e^h|xufU$fbnq-^UgWYD!A2!-Ws8D{;BLT|HLRxFjubrHB zZ}nmk67*+RqwO+Wyqdy$?W1}Vq->iGA!Ab5w<#It?XKwXH{D^0O1o3`YHWnyc4ep# zBp(?q$`LRn59`A>M{2Ng5JsIX1Wi~uI4HITv8Fynd9x@1ENS62Xm!($SQ63Sn#%Ci zN#kD6#>&T0UzEtq8Zme90Bm1BwB$2eqq(PVwieQ-x1wq(6$idYI+xSkF{M2e55vum zD3wg)pyat|aFEN0rxk(O+p+C)2ezR#y8uy}zT+ODr#eDOl7M9{W6PWt@yhCO#&hGX zsu0Br@#LcgGy0-S;wJ@>)vSfxVHF#Qf?4B9v4@@*EKlzW1aJw!7)`u3L=!>{(Faoo z#HE-LVwUv#qlTpgFnm!=V-%H!LZCH-7e~^_TBPoP>DoRnb?kTh}qR22jnAvg;}Zr0*!(emkj7X-$_4NSrt0b1f-O6caN|3wMJv$@e^Vy zGTl%{7m`H(>paL@pHKWX`W`P|_4kuK*=Ycay!WlKe{x zEN8(`7Y0PiF}%sb#d0pC+!n<1qM$Zt*Bj@StTiiWkur3lNrzJ5{sE{j#}rNLTGp@+ z3LnX0p%5Eu37WXRKbhGNLf)*T-36Rvy-ZXN+GS5qg*F2TwBs?%U%&?O^6ABOGrxD# zJN3=p!F0qFCizC;zR}-W-8^ADNP(1W{shsCD)I6J6QU7GLhZB`P`kvdjw%!kh)7ZI ze%N8Vcg8XaP1L=~0xKNX7h%IAS_&g3{^@s(+=2cRODpeAUM-2dOk%-d&qoN! zXo2+p+L z#lbQvbNs~hG_{mTCTMc4a?=VXjwm%%clX0x{QW&Sx3Y_*i_9JtwC4+-N`BLc-c#}o z-V@k1sq}|WjMNCH)V^^t>3A1lPinm_(#j#R4R0kE0 zDxuUXACLwst zOVICn*cE3yN?!Z4YodLJ|x zqrhclVD!YJW#bWt>5vI*?|PVlbM3jCwfh%A_$Lhup$D}yw!&jsQt@Fh2?_lTRqRK( z$w$6W`*qpS9RLXQbP>&a81~6twgT3 zZm&AGPYrwP(T(A#enb8_!m$X0x;l8pm7Ho3Lym|3#S; zvOLjID2(h98=BYHr~;8PFm`n>nVT|ry~VFtaiKIC(7WFuI>fxD?WeooOI(78yDojv4|n$`ls_m2T17{XGrxVSmNT zu?M7mW=x#yLRvINI*3rZwx9`0>&5aI!c@z97)@$XDP>Ox|6@`44HoL@UipVY9K6;I zB}()TBSif?w<4A}pk@L3amp%f3jtV$i9#lm`IflbCzX)5acSF&APuVZOq+vGN-N7T zO2}==qFxPwv@~w{YfV!O>|`bk#@WCa_4cI^Ny#@;=#~-;tJ!^uQRz|`C`hs5Pu^b4 z=5ZgoucOs<%{C0URM$tAt!+V5*>ff(uqHa(4&ruA3U3~yUI~1NrD5DGB95cMz>p~j2pcHT-kD6)GhkqlOUdgI3$FCB<;l;9`}+W3M6GAE^0g?iT}QcF0DK^m|jCeU5l&C^i1I&rr~G*oxIamy#Eck$hZI_I0lFk z+2@@^%A7Goiu@9d zVz1mTlyHDNId#3zm78WhVlzq-pCz$5ZnhD?Yg6`X-kGQ&TVqMn3#RhUn){nvuM8RK z0;Y^)(zPh*HDsblgA2~yJ*bl+(DMw!nsIq<;PUigTG^CiYw6h=U=^GMz6PoHQRoj zSGuiiSA-kqhD|*I694@SA(*w8M)^E1+U}#B?AC=uNuw_E`v&1B+LGPBKE}_by;kLM zF{wiew-TtRj5E1fQnm1K8KiWfYoF`s|JJ}{8?a^!@1u+WPFcUyF*=U3JSmwdoE+ls zg1hADwgR92D9HsewG(y}$+!Ao?51x?*RdWe3yP`4jq-jaVjp>7DIN!5=<*vu(f&;f zK?~=BvZDC-V`GS3^Y+7Ft}av$vmhvX+!hG<&|XZ&)e>63=DGjz?+)NeZB)}i&!<)7 z3WE~(y^+Dux#8TxxWI}8ZO+k|$GC7kdNSc@!3-_n@PhiwN61@YC`&L4c`_njE~S^P zmK0*IYeK&cej-Z*{}n=SFV3!YVv(X)j_8b=}h?EL4?s#ii$x%~~({wtNu`O>(5<27$6GTZrj_%LxcM_*-fTdNv{-!^F{hZXyd(DJ?bo(QcR?IdV9y5N* zvAmvKU4!L7N7PLPI&q{0>7(7Qw{RRwf;9F5`iu7 zsCm^d#dOwHpE7Me_SpFYH_#`ICug!FxD$QfoFvWagcf(}d^Z#d+$ z4QwwNNAoP5nItiL7OpuBsKP}R(@e9;MdjCoi}qn{Y#vfp*Gn4*xkw9fG1jlrFUP{x zqK@-Du)DZv8C`VlcLfI%-Zt$~JGt0j(9{Wn{07&ZwApFB>asb14UxsSQqB2`oHcb2 zSxOgju`B*mhioT7ogIfR+KJ$){>X5Nv!ns_-wLG8}I0g7*&7!Hy@FcP)vLkN;d+gO9VaAs$R zovN z!Cl_`Oz=R|v8;Y=$(rh|`u*pYn z3@hr|(G$Q79513AH@a&=@b3)EV2y{X-xOiuu&!ZevL}Ay*IV%BFuk<6*NEXaN$Ge} z6krYwTo8AzdvtRv00y&T@q3{5?t-gSfJsKln{Y%%Zs6B1^0CYdIo{_CsvQ)>x3D=* zWusTWnzXML7q1DAuS$mR&twcDQCmF}h~38^>>p+H`6%RoPRUQI$DST9PX*cYx}gHi zi*(yVbk^y7mjNTyWL9eDu5(dVtc(3L*7sq7^&WS$x2F&WumxN1VF|nPvfYdN&;`ujhB=0S|$Y?jGdeq z1VS@(yC!+ulyMN`(~3aRkh@SN#055E6#8I;hR$|aA_cU>Q`S&%2KzouZ_pz z!&fZew~q(i^*E$FG4@FFM3l zC3r2>xxR}WWov=^x4Ce$l6VL81BPYVN$ii@GZCzhock)Oa!jV`fHGEg|FKOoSE@1C zDwOVObC*sC2pD7NhJh#f0KD|zx=zfKCqRZIf9es`m4M#|q&`96)S|)b& zIzN;|6Mj%PK~U~ikV5qUYG9QzV(YvJ4pjGNCn}sz!WfX$-x?9zX07!J{Khuo_~Ny$ z{|pWy*|?MY#ZZUGZ^SWFbq<*Y3q#mB4&XT|1emtQvj)MU0XR1WW`6kA4!ABaA4UK& zT0fR*t8PbN{vHkC6RSao@;J0s>p0v2KbirJxm@@)PxBwkA$EeMJ~z*(K9V<2{%;Zd8H@3!7UC7hE!Q z;jl))DsaVH{a9p}q-5LOPBg?>>+6yt@jBL=q64}KM%6ZEZqu{J(N<>X#TwBY2-f(E%siPLZH)BQBf+i=PG4p7{flI zdXl^&!+UZ@Z4do0LC_X6=9@s`Q6Ut%f3Z#GoLZ&Am~S50!^>bsDfebgnb2WY>Rb6> z5^uoJ*zCH?Ie>ga(Mn_3&k*nTrfL8lXHE#mk6?tb@}cUK7d$->G(VolHo*u$>_;57 z5?CuPk^Fw>mKSj&AV|RZqmz~zDL8T;L$EkpdN}CGQj>d0`s8mMHt}&{-Z80oSyFKU@jQVsxxHDI(`~8~a=^sCk+hyj&H9<}wxR+QGCZ6@W z7meJP(dLPgS7hcFwuTxOAnIO9<){7pb(QQd5v$*JuJg)DMv9vJ)v9cO$v8a7c|ur0 z!*$%cC$F@&SoZQDInQn3=g*i`QyN)*7CO)8-fJ^NzY=Dne*3y}lNz`6}RZLUdH~5iB(v{c94OlVb)l6Ds^7Wti`dJ767_t=i zvGp^hEaT>%5vPE^G4ddi2wW}QWKR_YE#aw{Me8pUqtbTc#P~_Q<&^hZ(6#Jad{;V2 zzK$JiHkB=oWrNlHwPwagM`GWpRT!>%GBnmif!t!fb7#A_>C)kEaIz?VNX@sRTYnRn z609(wham`JNxSfh{$4{_{oS{2X5RDBe{OculK0`R@(>KgAx=U}HHLw_6}ZI`5u`+t zs^Zg=t@1-PDpT*2ZD~%BswTcsp;Rcrh&iA6XGWT@5e``6bOBx4CV#Uo^{2_COln{t z3!*^gI=rIeKMBu(gv?q_QgFugsqjr#Ttj^4W$~1P(wPt6U8UU06lp>}(a95-l~N)g zGHuh!+xg+Q(UC!^85>>Sb6N9!i4Oe;X8bXJR*K)Y(Dz}Om;{@<->Q(uynsI+A5&93 zN69?PidMw<&LQs?{b~bU&;$j-(6)3dTRQqlsi;{|L6Gid6{eNxbX;c9fUFn_+D6(# zjG{3LUK*nrdk4zZMwa`EvgE+16zi?E(yXu{;P@8+224-UwGGszL zjGbDC^eflXESKjD>l~`CUN5EJQn^2$JkCid*tiggdpn98Y0!3^ZxaA-$v=D5wl+eM zf_0UquR9cjGxnbE%HoD>A07qq+)jn&90h)V^X@E71ZRlmQtX2+371C3VvJ9G!;rWx zaxVJL%e)g97URJ{>cz(u*I^ z==NM!jYL5HT|uFB~Z&{Nv2`ipy@}Y0mg~A!Cejf&^m(pgUM~ zdc9r`Ik5wX_8Sn3Mh3KR4Q*D%1pbMKUbvRHD{folxsW)Gk^elrqD?ce{5?)YGd@x| z`J=M#_rHPk!c*c6jdgI*Is@zcZ(DLGK^45eKibW!GC=dr)EEJ70leN9^nk-m))lfi z%Y$t&pBGSnpLuJ7S5D2GCZ}!GrykfL6YIWw)f{~=4$QFsTnrh)h$@5yJ+^&!GvYyD zHI7Zc_tLyfKTI zg0R#$#&!WnE+MbtH~QeKhv14pTX>-&s`d(GRt6E}Dt*JnbDdmVCGkjMEKD((eIVK( z{_8F8hF%JSdk7}|dBVAB$5vcX$Jk&tiwNUsQ)ZJF{l-THO<)~kXlFaAvWSL`= z%|tCqoecg zs&T*4dRFT?s7YWm%@wZtM36z}73UhdB|3MDPzlMNf4z6;T-BX_S}$>g5ug*-w3Uj)%U!93$! z@py?hOS{VflYuS2=}w&_FZvHPYFKV;R}P)wLK^~xEoH|e@(_CF`u(T+I1RvoNl+10 zi%n*|fsJaEreO@Yh<<_1=+;(1igziZS#7W`;Ai5&@hp?nM;>gk+z5#hf2V zPphI2yU1SpNQ6>H9u%rFP9HCIJ7XPOL?J2*4Ln*p;34Xp(s)r+7OpKhV({+(j{Kan za6ai0%$o(yPvk)_>Ul4sKCJ)1B_9;?j0WTN#6Lochv zYt){10YN8f43G934L|-QDkq5Cb)3l6^my=kDvihz;(QP7fN!vZ%P3P?!Zu#(3H+GT zftUy$e`-19S~338Uq?JKbpC}4Rf^-Etq3pnxxlB3KgL6 z)U_GeWlV;RclbB+O;-NPj}h$xPS+CYQY(msF+RLTA&v_zeBaRbSbmmbLXXffa{-=d z3pY9*^gEodB2HCk`aEuv^WUB%Emf-5<31hV^83GGLnhq-e^)3`7kbfMi-DJm1HM7e?oa~&Q5ZcGIY@d^{2{PxE0Mvfe@uR%xWzLM<9)yg4 z6Zn08LuWS${R!AJiMI(`;>eBo;8$D6mmlK{%bBNFg>-ge*-7gqhtY6ovuo~r+p~Vn z;qVx|ye2jdf_|MK&`Kg!f=lk|k%aBxFA=xKX0x$tvuOV9$PQA4J}Qtl0`~K3sSfEm zRp2!sKdK;?(u9~t(P(b0aaW>}yhUXp*=~PcE`D{u1vfvP2;DPSY@yPBhnr16;?(j@ zmAPt>Y?Y_7`|QVtjxywAw;?I?5ud|vqtszv<_CE24(FuKN0DoZPQ@t&n$HMDMDF8S z@#He9GQpiloNcsM_awX`H z!zBUDc=Cr*+01hoDH7MKeq1;5-!q=fFn${jN4IghES3Gx&4T_+9(DAQP0D5CFy>}f z$rUHL?zQfN?GnI@yFG~M)S{0kVFMj{!oDiiJM}f&$7=w}Mz#$0_EGD(Bci&EC^*XY z#e&;Jy3P0_(S{kxX~3^at~HWl-2hPqPE-ehbI#hOG7rjwF10>o+z+wJP?n8r*Mr)c zPX3g2UyO!-cO%o^1=-mRo)XKWw2H2-x1>bsbUB)KYrw`S_N|J*a{q`(DOt?Lel*)u z;&o)k0Czxj&eA+{q&L!ZTB_r`Eq*t-GfQhv3i7ov4Uv+P8jPGTD!-q@UPO(V{I#A(Z< z#X?2Tb=^#bmc#C~e3D`W70GstbysbFg!|{?dVq*PSC2$hd28F$`?S5^39u0lmg+ad0*-4Zpkh6=#V`4+bkOCQ?a zrR3Sf!n+C#dsMAA_~fy>p#Mn9f>Kkuw{=9K9prJ;xlZeCq2w97xXi{kzhBJuf>Ez ze1H=#z59#XHsEU;&x`tF-Nkt63`$DB{Dc+JcWK8sUTIq3aVTpm!xX(%LN*4!5fZUK zH|l`mAHC$TF6dp}l7B|u&Ag>ngg!`;go@ROlMj7s;4BiyL?ZzHUVbP>3_U9`sTY11 zvRVzFL|fXlG|)AChV`{~ks8mbFqW&n*qZDSfBYhfDmVXoroJfD9)?HP5$b@BW~y94 z52BUQ%oAM5m0GWsr83&#Kxq83H`W&srT!d--}jiZY1-GsF!J@@T?dd3I_w0ZY3#i? z6j38?n5`{Zr6B(H!wzZ*%5=FH5zBfGSSBd5Pp0RP>sn9w>{<$sIZ)_0qzxDi)vp{Twa|ZW-RJV4YJdEIBJWSIprHE^v8F*o zVbMxfd)%&*y1FATK--M<$?0nbb*YwntA+D*<9`zRJ-=?}+__nk;S@%jB^$aT(H{!Y z2+u6!f#cv2>35}7EeS4frgymA=?6Gfh2%6@TG2b)^ttcuym*mSRg+DjEK_D}ry~U7 z_IDBnX}9S+oJgVYt;n0XPzNeG4X!lXF-=f-X`6A{>2cIi0qN;u&Nt^PA=xzS)z2z_u)|jRbUS_$CpM_H?JfrTR`Rizr);f4` z({AUZyW}*@sXDZe*{2iQ?L>YsY!WM^n%C8|b4do#4T zRbgTeVN{hlE3Rl}z;^W*)c4T0Yuy|VhW%U9=%4pqT(0o4uCB4a%>5(-W%o0Eri?j+ zAi1*VEVf%Y~$j! zqykY(?m(x{%YiwJyb_8sG|ZlQpT{g-sC@&feIFJ(3$Y%@-RGB*&Ijbx%a;+vg#579 z_BsPV=qVk<+0wVQ+kJOXVc%)DsR&D7z|>zJVK8&%)MqmEDPvhqw(D}Zn3`w^NAMwA zQB@i>y`M}3oG*90yC9~!iaEKNrv3vP3`VR3n{lNv7Q6=Rb*&meZc3HJ2{kyIaa^vp z9TrPPY2*eaI`H2Ou_>ZB=bcSYXXaWE+7ejIHt@x9H9GWKaSFbHccCTn$eMOPeY3t6 zFYH_l3%D1S`%Ltb4zh{gEqFn+39C(w<~L2VAe4_!EVk2TfsxZ4OzLxn(IxCue?D8X z88Jv`D?2Xyr3$o}FBeX))XZ&*kZ^NfzL(gkX=Foh?(9V!;4CAg58(f}x7R`-fxv)O zDgBRaJi3Q}ja=(9Em>ksUif?vy@Dq+zfCB$7D;oDkN7nA{QT(7Dr8j0_$l7G7e9dz z#VauwG$RMr(vtwep6Ss;522AZ@cZ!QT{W*=L2zX{Re_$AkFLu7{|YP!4^=V6z6D@2 z!VsS>(Q8HyTK6CJVEHl%b7SD^Vt#cwGP5xOf7CJ;47!Kldkl0meWJgr!r>pz);kspmF*kG{n;s&V<~40TwA01`T+ z7GDFA%`pBnxWIMO%fc8S7!3Y;K@PdQOO^e2RuvUAvd@pKrH%16;g_Mb+@A+V9e@iv zl+|EdG12pPgjxf?mN-f8qmK)kkIF_;iGNoh5@48RD?}YAGr>Oup^Om{F68#2sU2sg zLYzQuv6P{x=FC@2CDUQ>wH7h2gOMhTX}*kPR2RAf>imGNA^~bI8dQ@;9-*_J+WQ%P z+*$osCd36d`5ZxtCSf)EvfCDZ*iGa?u5rn7FmU+!^hDSlesRyk=EDt4BV&PU##+`T zA8o6Z$1LykouVuV#e_c;Xx)0WML0p5S&H?138mn^eQHKYadOORtsQR4!S( z7?fsNmzlQAH@Xi{5d`v};^3wZZdj=IE?|8}6Gh_~Sdbpl> z`7Dfl%(nKxf&Z%ISiS4)CP8*vqP2lAz=_I45`poahK)pHAmoN?9Bb>_ zzsn(zIKvLc9EUZv!WhSSD*|Ze#}>sxX;0T=hPt^qVN}tfz;(%qo+z3*`i8%su%ZJ; zkHUGJ?qk`rKi8nbpel*Vr&Jo;&6<}Ne6| z@l(pfT2g8}KVm`^FJ{PsJN-5l`U5gf0#Jqh@93DP>?uAeE!A*-5+hl+viV39Wnn!A zb+h(a@vLq9iWV1CVq*{G=+jgfI@X>i`c3irw8@Bbe9rTttXA4~e`)hF4z0q5s5T$% zqP4jO;+-aEnD6>)KPZtjUde;VH{&TciHX+izrb+A->wA+e)Njox&$;DBy%AJaN#yo zF3(1C26Ox{KRCJwON&pm{?Km=sI5${cdV`;%s*my{Ozp#&F0bn!VwwA(2 z=3ltyaB)f}4T^zftk~yP|8?C9>lqNv{(znoK@qb;*ny(wpGC>~r@;#I?l&+7oWw9t zj}b##jeD$6u~rmjFDG?CdP6y5mQ)G#$e<#yDSr#4*+)oRk%@jcUJE$+4C4$;h+aNY z)IntR@aYwS&Y--p6_}k^(~B+Wmfk3>yJp7I$c&NYCDo#TczbLp<-ZPq2kURX)HxOA+*->I`hpZ)hbfon|$F;gpqf*(e>Zh1Zoi#+0?Y%)90W3Qa zZox)8oTvkW)QcT_>-oEQ8qjlzZ_}@_PY4MVcXyb+DyZyh1t&&7JnPyPE^SH5L;RAx zyK+&=CS*xWg|)4y*11xg=^#e`Dztux)%pxlpUZ>#CvzFjj6BhY(^xe(?L)czJUm9X zxmB0`qPxdc3^&Cb=bv7YVGJgzaAFOUDQ=r(rGbPvI>w9mh@P0ZWHG;}OD_)KIWC~n zSr?Q+yJ+V`le`D;l)J|Aep32P;qp7o@=X|AYa0-xQY=Hk!XuY0Cp%MWAPcb~@7zCr zu_0XG-B|;_`u`Y`f_o`8eNje8x@}S)FB90dl7XhgYn9nPzquPW*nr3|{)KHeE(wOnOmNd@QPz z0WZXaEVMI4!IbRkKIc@_GxK%2f1aA^W~+@>+3Ls|)V?oK%12pb7hXra2;}k#?K{J) z?rSC7jbj*T)E9N==uUgPboyl4bKpa?#SwmLxtOJUBwDH~njh+LvJ|oo@V(Zgayvoy zb!qb{rYPBRfsFM9)gnQ!snUacf8e@3%pqUA{h;R z&JkE$!+*}TzjC9sJI<*JUv^3xarf|Tx1-}BJfGPtw_cmb_O?dfjbforu%pCb#v)1m zS+}S|(|=bK9WtGz`#65HYYonOyt!to3vQ4acTr3!QT=K0=E`g=C+onC;SnqX8MEep z_DDA@POC4^>GNpx9*&LZ?dCb3I%jm>S&%q>CaPV<_q+XNn!x;*-ah!)KS0(0Ai(u$ zhEw%7wQ<+8|K)@Y?4E|uX=pAU%fltrGuF;%KU_f#f1R5Q1RzX5(IUyW+da`rA|eCT})tSCDt1UoJFpUHai``B$An4GNZ;H=mLu zTM$Nh3_iuJpv0^Uy=2y1ibL2A6Ol0oJsZjGlPtB7Lt=V2^@?{XEXG4}dZ^A#^qqk1 zpNbG#LqAtV?CDzKI}7z&|GEJzt1LuqlPxcZ!&RXadq#XDEm@n|qfbfRNj@NLaGM~~ z4dd5)446T9l*c%7bfBwz#cM+R)xAgv2{_4Fo(Ug&%cJMIy3^OM?_CIWdwj|A2_0X4 zcaObOK+R40_*p7GSMN-=*+Lq06mxflWx1?z^Z&g(SV9_LCZMKa7%L|04_-F^`>mRh zROmmZN@Tl6SLenG5lg>~YJ4bcz_lFoY5v}12$B`yzAt3mn2fL>q@jhIY-znK4>{bR zwL}#!MZ?WBb^kU9Axg))V0I^KB_!1&JAW~KXFZOIIPFgMixAb-JD3I%mz*|VrB>>! z2&~52Ab{fE)#X;WXE2H87>>zLOEcb*dFM<-8}tWxiR$a@;Wcxe8|AwlzNU%}`e0h| zep;ElpMqF>Al)K$)01qJ5bZ_-wWHhXol}!qSC26wk9^ALZJitOdoNP=7yKpYgpU@V zbeDV~oKdyK{g{`0RIRL^Pmz{F z*=;uUu>R*@5lA0Syy(w)#v@|30tL6z5)fUiGU}>J>cY+BDz>YY>n>s5QSQuo<(xtp z6pevXeL;{=CS+VXK*92uh#=%@|6c+(uWL*g;prdsiXVmUA>|5wYIu&i%6@( zC*r#IMK9`rcm9qrr5V>nw)nQl(#NJwPZhN# zf-Fdjh4~!#P*^KdrWHf=bMSkuLO2{i>EMqy5+RO&9}wE*zEliCO|} zS5M*{@~5(`=yd|kaG#`B_uzd9cf#R_Rq)%C4=u?w`Sq*%64f$Uo-KxFF!X;Y(3E6) zmbZm5JIwbdx=c94-gOZyd!4^EiMFcVn<4cnd|%^3kU;%8ic4Z1%`QZ%N2}WjXnD63 zz2IHDTjJ@q>XiNx=~+O=e{Jpy692{$>a2M2;xELbB!K^!RCNxTL2dFgS#{&5SIzo2 zV-D7LL2&td<-w*tWKFZnAjPbgI)eWkH;y#sBO+#Y@CdE9wbY(@8fpD5pIctgst4=T zWW?PhIk|x(Eve$gy1g0-Q9x*4-UcI-R+VD_W%Jd z5ZvM7?r?E;cfG*H-C6SOzTNj~_tn-`y?@@+)YMe}x=)`ur_VV(J>AoPij}(Jo*>ID z2ApqPh0vqtq2Jp`cx=cZNL9}(^V9cUfB)w{-aP*HkybMNwGcV0>$%qnWc(imb+;zR zC9*WV&_}PiXOZJ1mU6A&f6`}o$Bnr1@X}gWPWif&xlZ8CRLH^0xtG<*tYJ zvcV|yxeP_bZ>!nWr=j>@>0hJ?F&0yWlDU|5)HpUeUT9sqx|ssTo+@b9MYoOsGNh`a zvvvPkB5F6CYN_UMQhE#VV#jOnykWp&o@|0armm#fa{KFjU-_%k@1xB_25NjRa78nf5X(ikh{6KnDc|D?#;8(Y2=*RnQqvm)o@{bueW;b`yV=wR;PM$U)K zDs64&X6{1HDs5-%W-e)N>S*>>iGsO<$-k+8!OGr|SoU#ShDeIiAtlY&ysZW)rp63wxbTRRn%*Y+ zo`2D~c)cvHD<~8P*B0iPfRP#7b0A5Jl!BGT)N#6QcIaDC&a|scfaM|^dwACQ%y*u+f+|pHD zYli;380yX1;;eqxRR-y25^J_g9g)(3DCdigD@0Q5zV>2EEl>Dq-`kZd>9rzLy+Mrw zV2y4rjKB{9(`L_$6LA&`E#CKsqT95vHH^pRK4j?e>IUj&4!a&6fk!5llQVj^E}|_{ zm)@+W8fFMY0Ur5n3a3lX@i%Xw+}9(`&a$#s-2gBUyO@{}`w?;&@<2|eiRO}VT;Y}E zVjp!rqqTmzP3>b@(x5_pz(jfKW?iKTHut9B-uvia9PO`VHYSQTWFb}WGp4xd%ai+%tyD((!)pzFQtCn&A;cg+;kpZ zG+*gOh@(gGtU+lfv^>@np62&ER?Va;Ds7G99X-ws>>gs|n3Oy?LsrwyXNzSPY?*;- z6O$=42+!$RTInSSF`;KtGA7Z#Hg67!?)0~7u7cQmbjL#8M~S0yvn_A|C%!(J@eCy8+CUc za+zMr%XUbpMrys^tk}_bzAsIhv2YfLI0FPmxV1P$KObcITy_hG*k~nk0TcS`GJj{B zJ3eC^u(Oa1e!!7lIo)NNy)D|!xLq|yoyD!Q(Kow^{rQ^IQIqyun^Dtv`(>1+IGUdI z(bgGNf_ul{#BsOYBwD^{h0}qFILntb3pvgC#CGYFR{y-#2rIa_;*xkl84!72CS#vN zQNx+c9=)7+w7VWSG^}<1JwULu^}XRK$#&57XeU8O`?1*q0LS&JsLwH8g?0?Yw&jJ@ z+aT7qV_udeE1h1SWNx-Ikt5C2|rrj5?`trToJG9wDc@aM`7o_#d~q|~O6~Ynl@e&~&|bANqEh%bG*&jB=K4LR9gq?^Aw5r;ZFuF$ z>2Jz5fr)CO&SfUkgtf5ESyAz4SwSt`ctT^js!@GVe~j#Il{b2gAe=kTxTokpC@E+d zrAE*HiTE>i&BzHV&zjKsa~2!Nxcv=V>B#mxzl2JK`~2s>5M6^p(CzPX%j-_lAggTd zWbf^ApE<-ed{b%ulV;TXZlG?HV#5JOFh(c_V6ejR!rp`ZHT1xu4W}T(?RTz3x>A#} zBvb=0r95*=-GC#8R0aQ>UZF^P-@L+0J6OeA76V|~$h^*!F=xxAe3piud}v!7Yn~bWI_n z6S@n%!gJ*@P;+?yP*4S?c*bpEGs~^v@83~nfILg^hpofm6bJr$`2Z$S0#0@XJjeIJ zaq7t1;U9(jW`&75!P0Ho!@+u29|xg5QELQc<2h{ca>gS>EFpEtG6peKYqkb{{Cw>) z`X@Pxf4<=l=CbWE>jy5Ew%;{D;tRg{#j!3u1Q7Bjb&&CYXH=_a(mnl8rpdjH3^XG| z{64*5cYirPkr?wFU8VmZFddY@i+F$h?E~`3k#^L3w-c(Y>%IhWvXz1u*~FQn zses4KgdcN6$`rxjJdZq)e}ri?7CxE)hN1pJU9#G6F;tOY$utU_5nZK=zPyX8m~e{< z$JW;P(gP(%x)Hc@9Gb>bEeC!bQis!Lmi59v_Ox-M4r?wsq>obV3iQirU_vJpup_C!~o-^jBK z@=c&W-xFew@_QLtCF+JW) zn#?eZdg2FyzuE<#>qU`fM=KB^cpnI|m*yU0ZZftG$@}ByUT}Ew69&gEAY*-A`-4~$ z#K0Q|r5Bgs%Z2)n1vSs>19|UQra8n4rb6;jtn@GSsn~C#+bi$OV#Cxn z9{Cl#WMh)#@qmCyYw>lBeAygjkC~Q%73<## zT@uh6`<8 z`*BVubirYZpD-5c`vY7{VVaV;bx?xm!rlVx$RUM25V@;^DY`GefWqCbe&k0!rzVPVTatx-Aq3Qb zxqX!2xZlXG%=6b2!S3$UK)CYRwUELc<2n^T-qolEzfOv1+Eg%5koZ?aWPdSi(z@E- zKpMg@kSBGK(?{Gjhd!!Fp3|#kM+10As{oyGB$hU|z)&ScwOk zTJ5+5DoLv0UkY6V3wHtl;D*gd@FYjAEIW-<$M z`lFN+%p_q<6%Pisam1CT*%0)%D0mO%90q?g)>6R_&tuygp%}(4{?@TeHE?TKb6tp- zqZdga0NC+G4vu3(Rs$ERN4y0lsCnvE5>|~@HtYuZQ(ndWD!9 z|D_vaD56`h)n&0ghv~5@f69pBVFj*V^bwDvFF#PGl(Shl#Zq^Rtcg}hqBX1jIwPKa z9lfmKprXM+ri8&RAk6~lIcL~L-_nyJreP`$qk1{Y8OVJ(MsKIZhZ!vj(xx!(_ zTOE3Uyp55f)D+ugy4iJG%QAQP?I}`AL7}D;-X}Eji!oIQn79Q;s9fZt-22khp=rrl z+xx+I&1FiEW_Htmxmz;$d_i=Hq+nSaq^l%d&h7sjeJ`ZelrGA9SoRdCP8Zdq354O? zWe2KZCMFyA2OP?9#O~Z@M1Rs({*uFnscSqVoU>}&j8;?7{qf1i@8zq5>e$Z+?@G@Ih8FLz%t z8$_W7b&YcU8xVIto6VxN*tKA7bG|^!$)x1y@CdJ{U#*h#J_KAVMjO1ihuW{C??`ho zf~}M`#8}ZZDy+jh&P7-NpUQ|@ZlIhV!alYe=31cS8SYc;N6eYHQ>>|HR)#yp{CEHqg-{@2G(GR>fv$+~rMc@O{7G^{;N!F$}aldo|JZVO0U zZK^s`UNsG)?BoUTOAI5ydTdf0YRjV|>$@$G2whcKG-gr079bGUs)%iDe>$(e{Z>|_ zctD{-DQRZtXlUce#N>NcHu%r}bm23;@bIk4HNAxv!rKnH)39hbeFXlgGZVKXdeUp# zLL`?-SkLezr*%UP+Ke)NwPK9H1dC-`rc2oxcXd82ffCaS{jIx_yHd1?Wz}yx6b7dR zS%+$F&W}W?8tzu&zP0mub}lEcFUB;+OJDT;Uzt@Fo~SA9f1R8E0Y@FYZ6TKgU7KW5 z0Ve-ph;3Sf-Im7>J|IcpCpX-QgbXibFnt3i123k|in#ld*^s9n4Z?o_TLL|roBP|w zEiQjqCuvgJU`vP4Mcs#28vcmcVA9O$5YCJrO%fi__-M<{oO>~~Q^bu-DS=(!hNbX~ zIZfkhqMCBSy|J|i^b!ChRp zIRzE|Ac7!>mli(=>^4AS8%P&*7ggE+(bNF9*t#6ybt=k>f8tGyr*E~#G?|2pil$e`yvQy-iz@e6!qajvgad%P*S6Y3F>pt}$rqk0J4d|7qQ z*j9pL8Bl^jo=#Kke{lCoYfxSv)IUw|5AAwdDM-3HI^!)Xvmbr8N58jUbroZeE@M1$ z=qU2rN~8(;9714S%{uvXy8FKWH}YA&qEtYo2}BLC&q$h$~+9xD0A@n2o0|g;8c74^91eaBhjf#>!4BmzKXVf?T8B zzs03Ld|iK=-aqbqoeX5o$rZ8xBl_$)eN+~MN`_h|Ykt*endA07_*{ME3L-qd?rL_| zS){1`p2xQRevZ$W{0sVXPGlBaVy)$T(7lr;93avoV0!kItL|#yspzGZ3?kUjZ@R2y z!7~;po`z>Ez&$6cOfB&D)DuBOAlq2CYSAYx|HJ!W>n_u11<@J9 z#>8deE+XgaQ9IXbj{M#|U$$qD?wJ=MXn@c3$c|@&bL0p}!GGS0 zO{zN-H9L1xe+#!=bzfpUa=x=$JOGl5eYI7Fo7I12KOWqoDk3jpQvc{C)ZE-IBHI89}Nse-m4zC!aCQHWZ=049(G?S@-GN>9t|fF`gWg&SL;04q?=FS z$3zbvMRwm)y|L~s-AM{0WnBgN@(3$|Hno1)$295DG3?^c1=Nw1e4zq0wh0Ka>=iph z-ZGn8zKX;g!Wx0k$6Mhd(WMn&(~;^t}@JsE1u1@Mv5dEX70pR>-hWKKFBJtuVd=B6;q;!p`(nLWyAn&RWr*mCf-TzYR zm(GImxSNl^lj}FLI8o`Zd3~{Q=u1aCY|#F~#`Nz0_ebZ~7dXflqo4na5wCw^NB?ca z>;J2vDH#`I@Bj1BC>|bm?*GSVl)iVClKD%(Y9wQ|T9PE4nT@!dqBs#};uDi%A7OPL z368b8)%|mxexy})v1BBDd@43Y0ar-&Z}EDjrkm$AU%+h!#INJjcg#EE5(H`QAGv>W zU%wPuKfUG8ylu1eZr7#w9PZNHAz;P+o4Ir(Fups9%=iGeCr%#<5ngS`*b}#+OKSsq zZ`}qNvEnp5VLA=HPE6}tr_k)m+GThZr(8;{G*Wi1h@uf)k_(FN>s>-^=`$^(uEaqn zIx7>hcqUE;SydqjiuX=xV zo6ST_1SLP*`4LV8(1gl86(K)xyH5pjH;3weO%KW(Q0e_ZMs_)5mFX3{U3C0uyC-09 zrXJiW#wJ`^?^y(gTBo8F3xc4}-T5ck&3JDi;2q14tItbM;Hi&_j0;{N;x~VpM?#4t zhD(!s2&ul|H&vUNrGy$YTAaHsLu>pH-}ci8YNp=poYdhwb5N>g_mtmv;M_1Z8z3UL6Wyqnt51( z?Z-HsAzNW*_bP9Mtr=WqzoVs6(|Ssrq1LD*9mw(p>3gPz+63VaB3t1`WK_2QE>(d* z1YAb!;m_f1RvQ{|W8w36R?j+$fCx%JHHWJlGv7J;8=>qOfXq1}Ou~k}NpxzolXk~D zR(3g-qj+DEgC>}SBtF+U#`3serOGQ&1O+fJWSwCVDfGH$JgYu4!p-;h2`QU3k^;vO zMZmaKPnT=5V`4%BzZ}ORgIB*anr_st zvAyk;$vt)MDMN30ORVpgpdJ;}NmsqdxZ3;3mC~(c zr10xyvEvWkiwtL+seLI(Cw2+#(Z)HiVyi}5ipZK^i`Dq`cSP@Vj`^@!I(uGKbg9r@ zozz`OHqAc@a;~dZVhTVnPNsWfb{y95G!K7pUXs*Fss3P1f69tY85^NQ2Hfvg0a*Ut zp{x`;@^Qu{YJI1Izf)cPC5S`G^+M%H;{%F?9ZXs&#?PMAHIL@%wAiu$u#vTk1^frA zaCzTwwk{fR>atgTYqmS_Uh}yFHo?$!kKvAoqF=z_Z)6Wf`_9N{SwZzzQJNp;k?(1? zW6sE}1>;PgRX$-araHnWgcdVRg_?ZYAjhVOUW;YTN0W+nzj}|-Hj{H}nleP;-^qFo z$UgX#9cL&r_iaYIzbMpMeEPF^w}7E>oJcEG;W080Pyxy=s(K+F(5K^SBTxtsGmbO( zb2qC(vyapBI;>HhMA0W8T~mgQ{jR@J6C4qkW<2Wo0a+{vft!I13B$Cl>`0VZ(n?!# z2k)TQO-5sl{}(KN_Ls;XXyQHEnR{P9tY;DaO8+ordzHf_*~fy78|PK*^7%U`w@_PX z;yne|l|NCyFKTpb+@DBjP&t0JNfQ!Jj8NoAqabP9MQ$IM4@Q`$sf{`}t$&NHsy+(t zjr?X6WfM28zM@zydcUZ_!JhjmyGr*&oREHQ_^0PS=ZDW>69a?s`gugacXJZrNR(Uv zR&z#GkSQ!s!H-MYsy6uz`u&ck&R&m%7!Z(HjYB{88Rkfm`a^cfr|j&EN>wCf3^z2a+ND3;VIY|dKl{9`mJX)*F;o3h2ohRpRpnQ+ ze?%eJBiY@TCh(h*$j+lULFbHE6)(61bF_g!b~NjvHOWCqm8!_d$8@uf5*4|PZtB`H z%ICO=SpCBZVx9Q#z;`#55pGl6EPBt(3~yRkFqON>c<&5wqJn?Zg0s02ymSH*t+7*t`uVi=RGpQ%$ak&8=E8^dFDo~Ut&kYa~v z21Fu38`H8=>7PH*efz5Y^F5n<-PXC|m!DSG4PliZ1wAz>r2-o*=`t&Sg_j<7#lI<0 z%qfkJ@L%g|Yrd6}3G@TY@i1w$#S0gBxnZP{q0O!pI;|SXW=$#U3QIlpq|xG%eXU zB^YTdCcG39__=KLB8z*i{Nq2R#_61Oc7?4F?oU8sm^s}}^{!BTno8;*NigyZc7uQw zVZU&66>I`!=R*zKwWeY&e)i{#%3s%)b*1SWFRKVJh+iIb`ptBj-YNd8SIBsmMtjdT zwLSrZkiwwzss;cz;-|LEAZZom3$qUU;rS!U8-Z`n%+Da@;1mX_;FV4!w8UDxG(W7l5MhjhLae~ZxY$V}j5)AyXm_*gZ;;2kX{(^Xw_;uRlZ$Mx z3~+ty;(=tXNgi|Sq2crRY|F@h!l^+UrP1H5ej?&$cir0OrCqy7Kl&L5g)Cf7ywlDD zL^yqB6Q^waXio7Ay8Y>G;j{FHeFG1P1Iwm6*gw2kYu%It}Ku*_q!k7~vPRE_W(GbD zmX_&(pdA72y;gucBNVF$g(r9;C^72T4~-D1T_r!^+9a*`zotRO>qfm-6=|Vm;v(mr z^Q&6-7vGVXIR7fm8RoFe6aT{0teo~Sh|%d}>8{VRAK|Kw`Cw$6emC|GQKjE$Oi-tW zx$;4mQ0L8p?Q#p$D7aw%W-my~vu)|&<7Z>+5cst)(_gMk}1 zU&U2bOan>aY7DWFv$ZBF8O;xZf2}2u6t<)zKl;rAuts$g-ieS@z3~hO41cR93+yU( z-B4Qn$gSYSOjk$?GJ*xh{bLX} zLjRdr|DX3Eeb(j`b+V1jsN^8TEnFD+3l}xkp1@7{1{QxybV2bG^TqQ4582?iQDASE zi3!&{=r(JIs%G1?IW=`oV1EZA^H{vCW!U)A$0HK}(C>??)EUopgf)iM-k^ySh))~% z6NFA0w-5qP(%sg zr#g@9n(1}F5ycpVbu+ zb(m)sQwEN0Q&GDK17XH?^6ck$!Iey{lQ!#zw2>=2j1il=2H9LvkP#D%TAy@?74dra z{%lgWF$u|8f3dFaKD~voPRhIX&?YQRr%V0Ze>Na@$LOxcNj&GgY%gMuEg!r$&uKjq zxa73;sQ01uPNCF*NALZ&g8t?d9ztZO#yPCS>SecoKvzss#2xZTBc`TEQ0|B#CS@mw z@U(Sqc4bTjolLPT%04}*!$bj@=dY9*1EI#U3XRB+*wo4vPvBlAYT)(5#8zYmZ45~# z*HBvchTQ<1?ArsuCDBX+6ySlEGtnIQuuy``z%e}=Ge(~XJVH)c#66+a451sTjk`)7S?2ttVDYed^YtimF$IY;V z0a{U?pY(D~lHDN7sGo&q!{5a`?&?)Eem_0+8zw)w?Bnm|_}nRLWwf&F z)RNQ1%_X#?4?E>)Gr_uQghIWK-Qf;-n+#rg>HJ3qL~YL*fB9~s5d*iCxwSf!ep;U}!|5+Xp14>7(b97G9yv4>1+ zdKFvO5~Th#V!8}Z%FNuz%2BTA<3(N$`WXATxwkpRZOk@#W|njIuCWuDOe+f)ps%+R zH6g6yyHN4F_3~qYitb_}72}nU|LBK|?}(t#^vF{mNB5XJj&n_cg%_(I*IN<|&UOVG z)5CmAB}zg=^YO?CP~71ODM!+6D+Tz(Yt9hRo_zBUP=bNx1Bh3j+FbNBxRZGiESWZyLq>{E3jc;`Y2w0Mr$CJqz>lR zq%gKg4}%-9IYFjUJuBvpLKtThuCF~hqFpoTAiuv;3$q@Vw%Z7|cwg290Ni$ShTz}X2wp7nCowO`i%vrW6%TtJcCn zJOKqZ0sE3U54fSOvl*-i+7;sE@q+ujEDX|doQ3=!9m#K-&}${6@ImvGL{6bx3<+ie z#t~S5Zf(#imHtvb5)KXgURu6?u?$o-J9Tbv_l)ck_#m)adYLKtEBErT*mlCQE&I7; z)YK>BfZT9ab=~>mrE;=Ugf9JBX$PTbd#p%7?_~>Z{%OK8{fu^wg*yDACv$qqmCrQS zzpi%y|Eugs)bI_O<#V&IRYFXc;a8%4;o{JIzP<#XSA4I9On{TI?yn2Q8pZhgOJw zhxj)!!nkf`TTNU*%!5mOR<+XF~wpj%kr&j42F_Kv&d!T*8MjG zJOa1MQVw5mWy>kD_@jc;samBPGs|t|pf5c8w7i~Hh_^C!oV#H^pnayMl z4!j|45PO~S2VK>6Q?FTV# z38A0&L$1sqkKWFyRR-MT^fQ-|7#vV|D>k3}v^#WPB+P)+Dv@53ZY)kh5hy(0D=VX( zE!|yjB-QE@V%5n(pDGmyFYCqs)vau+N2ZP8!t5nM+Y?b27Y7Pol-|~p*pb>oCD^!k zVcqv2A7|F~4ic?*@_l&F+aLBqQHgU-*D>3|Wv?Lp8g_=@ybg*NaFNgsD#0%#fTDt8 zVBcu$cFd^fjz;S6qJG{oog3Z$dqnMB+yh}AyqTel)j@}cm<1#eiTREAXczGx$utLKrPfC)p4rVluajR~id+zwbSyw9p4sJ^b&J z(ea?qucg}KO1x0SLBuIBzliGU5kKL%b!QS=d$KgjvdEzGd1sQ`ssZz5dk2eq4Dd#j*41mvpYV<>mf@VOCADYrE-J3bK+# zGA`BNjv5>~?8Me(T;gK^%|(%$1W<{>vPAv5|L;B@iN^(sH@3HJmakAD3Ht8^VhQnR zf;{c)=Lb>RO;CIy#!s1qJjCm_gz4K4=oytvg{N7z{CA=xCj;|NR*(IB7GGuzb%+jB z&zS;p5D7XK3>ykhw_57!mExPFmsGb`5sTe@L~GIOlMTv5z7DWnK`5luwHR-;gx8bS zDg2FTdHdvTP3h{abLawogBB=s zM|3Y+T30cdZYk(Bc1Z7Q3Q>gjo3}u>)ZSar)O+^BF#!d`eUdkXBiGOgO(2sHaKA;r}o}WFO+Cs zWi}uE;Ob<3o1HP6ze?<+uj#U-UsvyZzTD0^)K;8`ujN;;-&tNMe{d`-DJz;fXcbZ1 znleasi@5(eg_Ph{Hb6#KQnKJ>(jMyhV^SjIDM5z}Z?~c?a23A$Vd+{UVfBNprB-jw z);QL^R2RWF`o^T0biM;e*5t2d{_?eoO!RprM@$^gj_q|d&FXP9=yOXvIi<5w_x-YA z)-ezYC6Yd5r)nrvV0DaVW#|em+8E=q)WN8X?k|h!$6k)gDqj4BX9o z8NHuu?vYH>lTSQp?7Z70Y7Q0jxpkkMOI*e0=HBAmh=`Z@4-e=Wm^TxfZ~0tbHLoQM z=^f3e0b>2fy6XzF9(sA?%hj#Kq@i#;a>w_9SCU(pi(}dAnevn1lY8wgsTdJ5!dyGy zroN)EXhmzwCGoe-9+6|c=3L`T7fEL1K@WJU&1(fB0C@MrW4cMt#!rJ|&dEa`1O@^U zoex;n0&;^0S)uT!sT{{au7)2^lrttBc@4t@qZ=mN-&bYOJ$6PJ`H$(9zr!_%!8(hfDkpSnfvV|z5DD>BO9#ejD$V!C01(6 z-1T|0_nJtGr|KR%&W|%X4TcaK{9k6Gf5m=OlpcE!$%1(pY-6-UMdQns^GC)byy8kz zIlUDQ9;*-F2q-TBzRPlTb*#&HMJ#ixNBCcSfa3l?`~bzy$^JGW`oB$%=Hlf1f1Mot ze>xqSotKM)o&W#-3F@fL+l_eMAEMiqRTV$8MMbN`gt1?q5A!X8Hay+jVD0I0OwgP5 zoT9(pEDhW9H+ek*p&-&p-Vxv6qpi!HeM(O?wS2YRVzs%{oW)|otrCap-Uwep6>T`> za89GqX<5n1%}JZatUlTgOfipa!kts!_GTNdpC={0j#{rG?x)YIVIsINfn6KRW5^Bc zEW(77GPw^{dfVf%GFSAyK+PywYT@Gj?(_04uR|l(hmB-IUAjtprlGJwg@i?1(*^%A z3#NYUcI^Qb@S2}shg~(lWcjBH+|Q?MJpBt>d%~>#iVr2@W1Oy2mlwRryUVPyf&sfv z(317P9mU-n1{)QD2i#E*q?VY)juGZdTs;PsRLu->@b4n*G+bN-+u&;Wp|{ro$UDH$ zf~cG!Mn^Axie=Hjr%jE}p4db=5Y~A&{W)9)Wi$w)Eq+rx8*Rc3mpFjB*G1 z4v5X96VcTa+DKZwXBo9z1#k=IR+nGhiSXll*q>rq0&4r8+3`^k5H|%sH86KA@wnWn zgo%!DS}Y+wKb80&f~T=84e|MJr#kbKeOUe5UQx!c_5D1~z%_wHOspbl{NvB4tf8%z z2qnT^=L9F&DBug;%t1XboBgeX6*#c+x#v;KT{IS6Cgh~$%x}FHvP&ky?R3Bxh}S{! z;>X!P9q>YLKq`8@GjR{8#%t&H+hgTlg@7=3!Rqhu&2d@IGGA{x_GHEZ)BIlnPl1pQ z@2e^ZdcaGI3`_fvp`sC?zS)2o@T~u@Zm~qcH5I@}^kETvm)}A73XW`pwEG`b(LTQH zK;aCZ);jaGLdRN#JRXY4p0AE63@VSMVx~vTW^%T$=eBq}vRZ-$bHXEBrEX|n`x(0} z4WI-8+^!FaBPktYf1k%DnjoQL0SUg}?z}TQ1{wc0>}}-t^R623suJYveLhl zr<>6KUA6q)^rYTQd9iFTV{fKPx~@<~c&dm5<20Wv94hSG##?aRU)$9vfIt6mAA2z< zfd&>P63qw#P{sM|tQiU}0H3Z_(;op6Y5irCQ|^Ie#0i!%gY!xR5JLqU-0D+Srtwmy4(cVkJfXu%i}p71k}w^ScJX6{CKp z&2uNEeX&GYA7%6dCG*lxlBLNO!I=FHz|{fXt6D~UynUpE3~|F^%O(#CMzx9BW3%k* z87FN(KJ@IdcDrRM{pJ|w7|KK8IJY(z(>@miI1qWs^%8NX=gIBBC`R3B7mhyN}!N2Rx&X+d;=!|H;`wE0`2Bu*=BlQ~R{jays3JJXjB<7V`-R zX}!M~2Yh*SvweIiFmt33CYHTrNcW#~w2d8;HACjHSklD|opU%V_Z4|9}Q?0pk#fh{J zdr`5L3?-Q{XbZ9~;?5H|UwZg$IwqgU2``?F&)}H!a z^uCV8`VoAV>@6mDz22|?%Xzv~kIeIZP{lW;>h*xKyNvA4Y}h~a`>a9@&lKpKV>-_= zwf1f8z+!5GVzIUHi`Wz0ccF5}f;OJhMiGMfLUWz^{ZO=nLHyJ2eO~G$F+^B$E{)l) z7oH-|j#~eVW*oY#UY*G4cMxTy`g8p$ublJd)X=Z6>DhJEM*09>qgKcHWOX`108`e2 z$BLReCr>(Y21qxHaS)Y*rZEzL?eSu*{`B32X^I_J_QI1OA`c-b0q$b9 z>`#(s#;N1`@8Ws{L$(HCq=>#J~-slHW!&%6$1m=^ngH-Iww3BG>&a zv(?g1I2}trh#@sBt*RCmvzQcf?mJ^T?)S6r0$6_ESNrNGK zYu69(v+DI8E1{(pPB7|cn1{mTut>QTZJxQl#&?gN<99itb~&$hYm~e$dPFKqp%f&6 za=rn2M4BF%VR<2d=iGoK^pvptgbg3{BiNI|0So+B-Eg4SUOr~lpzE%f z7k5cD)YGNMAya*3TJGV`l<+VktwogGgo>%cjJA(l(zYC=Xo^3_?IVh(x!GU(LnrLJKWpS3zY1};N{HTZq8PFomzwD^M&S@teI4Xb>}!8nZ8$yf;>wPetXo8I zrzuyAtl0=n&9LM7$m&(oFT5g6BhfhKl;%0s;kWCPyEH%L&C}=X>nW{X4QT%StgYr! zz`S^wI1lJk-YID_<5C;nO*5ceHd2sqR>c^|F_Eo(L1^E?niY0_Je13`=y>qN$AvW3^@i`gJ&s!8tRyn@6Sk$hh{S*5GPZO zy%@OIjt8tbwv~2=s(YbkC9AGM@%Aw@7Y3VwKm2X1oHx1wZYAvAT&Y80-;Z66xI(8X zD0BR$4Hye+s*h<_9V(k>mh+^NiAouI6IQybuWKSiiriu&;Gl#D5BqpY-TzadN)KCp zBd@OG6$KnUca5or0KDq%wM1Q#C-IhrCt4xuIlg)a`x;Y$ay~}gq+7ECj65Zl+deeq z=snl%K>!Mt6})*wN+0Q&rlK{SFN?qG^5OlJk%n+tT`0izS5i@5x(;C1hsNw~y#DNn zUHv==W1sp%Y9~*!RQG6#;_}GK9=(Fy-GDtQ#IA+!`y}Z2EPh)RmZ4EQJvn2?<*v%U z!`3FU2qMujaq!?a@Kq|Q6{aZ*B<6P0Ykeav+)`z4(1sY+rcqq0Y9=PiUAGj>I-X({x(s}MtQjvJ@w1vKoG z_qsw^x|Zz!#s1lBarE#mBQ`#yb>~KA$)UHkY1HF7AfV+@DeKcJqJZdm`%br@;0QI)5XZ=q-?8$+49QO(zQKu8Loq0|3-18XX3dc-T@%#(d9 zQ%9fgPcwBN9Ci+z0+e*|6}AR6st`G(U=KFZ_{$_4Gf2py) ze5vev8l<%M_1Qk`(moV(g-l968!fDTwbkosW;zhhkht~iVxu>iNAbta`hrq{o*v-75031{YabpZi}F8@#)25|-z=;jzqF|-rgu9F6HU@?>swMR9fS$2 zu5eEmp+C(PzN}7UKG}~SoGLsm5?weN5^u5kZgRia_~WgAYQOU7Sb+EK?sq-NUvnKN z6TTij!fT)BeYCro;Y^@?HKEp`vWi>c53!F~<}u+n(KkIRCSz%^@y(SMJ;iAy3e(xO zX4g;gjMBXRwpPq6AA=y67z%aZti&`m1Z<}#jmek{=CW7)XjWz_o}%{H@~&30hAbf-bj!= zmS)rh%zK`Pw$>xEwz0XLFH7JV;y0g7P2A6ijCF{38Dky0V^N=?&jJ?DH}bW7#{F-5 zg7FYVUtQLN@nX}xMIH+)UKb6Y%LqJ(-c<7msa7*Pw8w}IpE&4LY^A_t+I~xkD5-ID z;jdeUD7$E^3p}G%Z8CGM_}7dn*w0d}Di9ga2^0rjU>^DJpQHfyQl>?jyS7zRn7=qXe;SXuNJ&bqL7 zSuZ^UXjwlRWrtnTa4;YJ<&Z*&0%EIS)g&T21hCj#Nk?rq_8qfG#~gOsAHMwJOtP01 zY>n0vFC%ka0bE=|?fcrj9>Wm=Z#TchH7cY<(~fhWWVC6K_#NhZdZk5}vVOkRTiI@~ zCTOU#6}KMiwzqT>EQs!(zDUT4p;4tQ`yQZsVKrW?1*kSLNIY)ew{<%it=B?JuQ`GR zQ;VnfuSki59-0MV-(KFxfB%n81C_&aeB*GVz01JfUCOXw{Rl<<%8U+D%`SzE+ApNF zMkaCoq`h6x9z{0|qYlVuQZ;T7KE*9k?YQG*#>?~&oiJJkb5-WgPM36B1+{x){`01O zaje-`fVmXuZ+w&9dW9EztR6$)#WwqgZNwn9)gGA7RU6IUFlqJZIH(YBw361Ca9-lJ z_@${GC}WoQQ#n&sdXG^p?ZcNP{kH%1?#G5?5`F7~=6mzA`v*@_45!!)n(#+1`yp=C_ekxiCboRQrY(_T zCk*G0`(zgJcjn3PVviez)rp}7f2lG&IS@@YX?rIOVWgE>AFuO~7-nhYILWSQaY zvYI(`7^yF)nwq^?F-cN@O$vBryA+|Fzaf4-UaZPlK@>=e-=)Ev|b96EBs)y6E zzV^l@&pFK?rBbO!9QIv!$LK;RslY6qv#wb=bfjlG_k(*D^I}bn>3U}hq~ebxbU^)> zUv`aOMUA8E?_Z3{Hz%tTxjJsXCve`$*lV9kQRl5=(psBd{k06U?vOb}=-;cC7^)Ky zD#Cg}JEiloTQb?UOMPw5j>$9+Mqy>Xw?eOn7O`&m0vJARk%)^{Maj{`nXL|=nXQ>( z#D#b_qF&fBeDWCG|Hw=i($(lpL2>gtRr88KiYV3{?c}MhJCp@*OvVmC{2V?TAE*Np z4p;e}g`G)egx7~{CFs@og&|j}^fxUqmFK(m7oOqI^*?$@wWCB2Hr_(Zcw4)lR>bxV z`6;?e^zw4o&;SqgaTP7kEdO(OlG4jbbN6vFo=3h9pTqFv@s}#RdJ%$2B_68&lR4k^ zi=nh(^%dhUtz~}0b-NF+J8PlN&S{=-rIIeKmaW|jp=7D{GchPDO0f+3i(zrmc?h~Q z?Y_~uZ@Twv61&8#+%1cOmv(tV`|E!4w9C6@kjBgMVLJY?v#NU@vpFTjvKN*yCBBil zDW2j;^Pg>hva0*8DDgo=ao@+A5K0)qasmSG}5+yZ99^L^5A37@d*9Ic}J1f}|To5*fAV zvLC--aAS9R&Fd`ml~l~&N1adzb(%A-Zniv4U6t8T{D6WIpJKq{>~2fKV~zl1`XG#l z`9TcQxX{8cJXb`f=M*bP9LyOMk)D0x1LkEgG%V_&P9vAMxA!?In^DNB0(Hl58$>1c3l!BLcJ`DnufQq&61jyoxxcAc$n zGk(@PkGsjy@&lTaKP&3vO?_w#t&+xz_k z@4IdMz;AON$GO(N_I+RLJPz;wNnhDTNbUqXl6258@dX!NZQU)d>EnbTdn`BVA*{WN zF3P>PgK_ciJH7+08I2!)No4hPrGW?THJ=AobERLYSHh2KzM&4+md@)i@74QcAtQP1 z*|9-ky!(!xU?${dO@e^oU@gyxZo)O_Ejt88F{g z(G4hBboHb#)wv1RF%CWK3i34m>;*h zdwh8>%XMyk(q6+Ld22_-?L=tKXJV-D$Y^%4CzT!(xy_F%R|wtxwc2_JmP==5lc&kZ zEAH9Xi6MWxCGDierZGI3uipH9lT8DHgU2jUJ`ahlZrypo2k5#Y#w6cDzhoOkv zOOv`L7r{)GyFxF!Ty~9NZIKpc{L8C@-~(#bs!-$kwztOf$3-$IDWhJQvtVf@HrfS2 zBH@k{jZVI`rgTp>QtzKpNT!{2<`mA?fxnin74TPhc*GRE9woM>|cm$7<}f=J0*|JJfEK8 z!kJ{iY^?u;l%=O2;43oU1xcgfrV_tY6>Py^cJ3zhW}8Rd6H-$>JY@+~*d&I6JMVxF z0(T#44!08cWoKP2&lsR}O|H7Dz5(kz5W+0qzXOwvR5G#x5kky51a&Y!nZ;w9w26q0 zEk*_pMpadTd>{U`W^K1OiRj2wO5CJ_+cy?I{94?h$$u925N2$C6BNw;?YG!?56Srt ze%uC4=)FT^-FKq=ZTX`9hy1=n2$Ifw6y8GK{S7#P|5%ex=RG2^VARDopf7)=*L2kX zLqCC_f_fHS5Ke!o2(}<$D9M$jJPO3x9c@ft&h+>4sMYpyWw z!LqwB1V+82++ivtmYv(V*rBIc%;I`o*N8ZmYtpLASV3VM6y;1ONp{&T z9_{$XxCj@M|9&Co|MB48d_z69PhC157@f7M6)wj;82CbNPF3SgG1g%)7N|(5)XO5* zPG0^rE{(%f)d$E4oTPFf~2a#d9v6$>$tQtuQiwAD=BjOtA!WCJNCMgV8M*Je)A|q#BC{Lu0+1wOCez z5Bqu_coyIZqQr-Cm0jr*1=AvJz<3~u>y$tQwzEJp4sx|DG2>C@V|Vs^6(Rdu(0e@3 zq03r$d$@w(;;(x_YJw0Wd!kT;^wg^^9~kg9=vOWmUX%QTV&qN@9n-5YE?MZsqQQ78 zN`01hadTb8y}1oYPmr*b`bB|}0xYVeOA9G>@>SrhQmJSM&OUN+GkmeW>I5i`=GdT+ z&Qu>AA#jJtQ`)OgZ#4XDX|xf*3GkDqOMj{V*5{T1|AW^Z=_y;)j_ z6D;ppti98%hG;%nq*LnD>T{5NT7`>whA;Kn|8#DCN6p2OM~*1*n3R=^vqL&Pzqcgy zY|#!F){p*S@tMdXBQ&Q+KIPDgZQ^Bj?7nR%B6J3h^U8`@^?=4CA-VAbVnB4OKUX1q zpKO~(UG$!1aZQ%j#8Af}i*NJKJo^ZewmH&t2iw_w58nRS^e*Zcxci|vl6f&Pq~VKU z4FSp3R~z0Di+rZw7Wf_LL!K+yn2 zDx^uGwPq=&Uiod;MZe92AnNHXDPIzhY;EXA9)52usBm&}<74Q(CF5X9fW+)YXTTHZ zhLH68&LM_1@ul2Kx6;ISmAF~gLdlrTiX{?-WeNc`k;^p)aSkFzTip1+nADFlj(=J% zzi_Sg${A3O%RyfNeGU=mL@DKv&-lBUaKB2-DU@?DQX%wgdRut(I=ssdTKg&?@|zKo zMzKb9;uaL%y__A!OqkAsQA$1%XiICiRMAr7gm@+9yZApABMZ?Pt?cAU*1<*9VMcwN z_kKgF8uj0ZTHEj3pu7T0C*kbYAB*|2xp?fa63+nw3CIpaSZX$jyMhyfja{;rjccSO z2o*CzbdP5Sp27U0$4}ZgcA5|nKYkeX2s8b8X?8|j(Xa}0gc7)Ex6NEt5owhTx-!42 zF+#sj?&)4y30;|#Md~owQlhEr!jN-se_{BovDm-M0fH9V{?^t{MgY!B%KK@F3aU9J zbo2ue(ck+7MVLSoZ(N)s#G-38RjP%*!e_2}-S0y0Uf4|@A__}?>~P*+tstB4?1}3H zC|72s9>JL99f#&z#$*W>=N|6V0FQ;_Uh=he+J*Z+UApr0^2H0J$`|pvRJI6XAC}SW zhH&!Mw>Z62da@R5{)UqRA$e&{sZKfmC%^Ij&hL6?CdRk|qO{RG+un4c!N zduiIsa6)9I31{W@Na(WCa+A5I=gFbTOu# zx@^^(s7*i0Tw{RFncH)@gxedH|i!y4N{Om z#$C&mT>r#F@i&YH?`lYRLyw(zF#mcMWfl?bs+~a4snwNQsv{W`d9RM&0QKafJC26( zXEaajSTHv>(iS_V#&3D&z-CftfO5EvN+;c0lL3SoUb#s1Z^SkkR4satFAu2JFPwOxeUArOnG9FN4-k{OBt3r*) zqn`af*X32yKRIXi({p<&LG-j~Im>i&vSL@{kjF7WEp(WWs0i^y_wg^8ys7#hqw6Nr zz7%m2%D-RO_5;#}8uHp#(n{AJ7E@Sz)7~XjQ&}kbSXcJ(==ZyVM4FvBn{G2+&&!Ww zMcnKgPckCKy-d_)6QaJa?$|~lb~UMy5H6WJpd^Gn5iC7&i^17kQa(dx2auD?&0H@r zH!4V^d+~T|c2&#K-sP)^NME|zy`_%5!Xe6`Dgxkhy3Vogj;M{*sBS42&T^|9gHOVy zQfv4fX4IPf-Jz=|v@>VD_!W~8ncPA+5Z-yA-|JBOdmX@N#KkkSBC2koy^&0~1+Lp&53bDqVqSq^^Qzi|Q^7Ata#3^$ddxoj7UZnWHQ z3gNA5nMAli-F|`j`%T4v?RgXeV-n%Od0Mh+UFBe@NlRuAbph1Mw5sH?kH;1|mbK^w zsdWqJ(`t{Lr-C-y3B(W%525ZVQeaG{&w^2m;q%FLMu!*+C4yyV%s0aeCEII>^kDbq zPwu|?M*`fJ{O{k>^rakYD&)qg3}5oVXqCDUp^-1tAk66t?o^HlvB_cH$~kTwxNxN2t!_q^wBvqu!c~JJIK&1fx zyqB@`e=!JZDEM9qI45-3K`T9K9bBbrL8)mD(3Cyp`I+BIs9yB!rIiB1?wzfyW!ckO zv?K4VKi&9H=6VW2LtyO*M>>NG>UdOaI+VraYL9EtL$Y7$|4{Ru3JsDwHpG>JdhE3e~>CgfMx%o${QddKLap<P+oRAxwFOd$Lj;5-|asVV?PmkLi(hO5%x|l%J6xl;$^1~>luOg^}gg`)1bh{ zZC-Q9jXrkyC`7BX`CCB{LO2lN3~VghN^_a}xc`1b%7xiwuCk%Q*N4ad>i&~OOExC0 z|34mqou1ussB(_XOj=F-%q!(*5YvLWZV^t%D7Q$NdfE7>m!Swh z)y93WVJiwJ&s=nv7xCG@G;b^0#dNN`+`NKQf_vu(X@PwhK#Za;zsoel~ux^b)gF%Ql){@PG`jOsAz4RHp}?xce;4iImO znyhE>1rz8%V*ZgezlKE(tEUrpPj|OR`e)jnBvVrxUc9zAu{{Ba(+JGG0(qdiU#auS zVcC+0QfkP0b(oKWx~l9b7>8jbcNefXIWn=l?W_(}@dMAxeslnGv(YeYy@aFt}c@>tElJdK@jW@R*vZH-<5us zD1ef6LZj|2XHe(gs!4ZWfsGsg6J@%8Z%V@w1R`vA7|Jx5PUB3u)Tmffe2+=J$F^50 zMYMp;WTu$%VA}T*a~3CnXbkiYk&49TEVfSAA;@!8laFTI`0h2a&@m8wa=mBT)x%~s z)EEZ~9nq*N=EZS(PTL*G0&Uto<`y@$VvvQG049 z%!f+~q72Td&})o=*gjSybQF`Ov~yX8QZmnPV(ZiKiyzBD5QXuUx<%Wi6nB-$-5M=N2wqB_d5ucKo)1o4QLL-u*BXYK-|Vtmq{5 zkGlDv*7`5A>z9Xzga9w0yC}Wr6`yv>M!qZ&Q34LGL*H?JdWAy5 zu?dW8YocT=a_KlCd$3E(wpw{w&vjD{)16#p8VSQ(#e~!I)Ky1`sE)vcPH5##Qqtgs zQv>pFgp8rgUVh5~NF)z6wkD1GJvcFz zhaZ^$E5oEq2&nFXwlZ2gcEk5OITOU7Z_7zN12=F615EC=IzmV3$vok&BRwNEiP# zo|0&pno29d&=>aGS+ESFsFBI`U>a0FxAY+-xYW$&Pw%x5uHhmaKg50UU2|_gCR{g?x z6aL2);D;57az5F-W2qwOvg`-dmyT&nh)Z{ef{yo2zl{zb)xY2U4d^S9Kb!CZq5jt{ zaDGFnVZnacHgrEc{w_%moB&!ke=A??8tT7U!7U`7_Mh3MTR_?W`+vf4XNVQQuC_Zw zs67W{yO(Rt@^~$_FE^s6BkcvX0;^-7#^if7Xl4@1Um$JH%|F6!3Hc_#z_I%u2mAhG zsJV|&$^d)EmvBV7IZm1N&lXpaGrsJN&j-bKddbRU|F;n^t8m~2$d2mJ#5lFdHv2Wh z&Z61r>Z1;QU2T}wJ}=<}v-z}=RHk->J^{B!Bu9FVf)6p~34-HNJJ%X@5ASuQaJX;ZV2;ccSJ=-m4k_jR#p++Y$2e799Q{kEL^q))A*t2JK_SPT zxW<{$?kb(b47_lj^1VV|&cqZQjp{HVh8$;_a%*X=+jr;guBMA{^@qMnjVJ`i67zJf zG~;okw<=7iUB2HVw?2qcde!L#7Q%($7GwW;Hi9UG@7;tQ|Fd>s0#$rryI>z$-<`+i zSbGZI9bpml>hO4dm7pN;G?cLSpvm$*;GMSLo@1H%`jfqJAAz{d=%T3*QOY46CywdT zY^(UFrO@*C9fxXQ@u9;0j!6F4LyA0KosEo8w*i^q0tQ>wEabH%%kJ=q!h~C{D|6!N zm650z^;VkNVMH8_)*+FC58ZHrwuXM57-HCuZySG`ivr{C{z`)P(V(ryG0Z7Al48{I z>9)i9shV&+h~}hH>Uoi$PDF**oOpm`y|J&U%j23IB9uevg1b$EQv+Voz_UeX0I6&FL*8AIU8K zZ20iCqwC26WPkxE6OhV_bchhl?_4x~jQFKo)j5& zh^WEF7cK~P^7w|H+Z*Tga$Q7KS3sZ0iGD z6qzBI#k1zbdce;yS@fVIblCvZJ(FEba9p-j9!&VBQ{;}JRf}A>&5;ft!R;BPE_3gO z{Y3ZYDe`i7;>1gl=sl~Yk+O3Jz^JSzBmRL?rM_6oQ#_`K+c;MkFZ<5JCb6^_&^R8n zFxW6J^^E^qT^dlj?O(I^il`?rN1<4?bV&t|eFKMSdvKe}Zx*wu9KSa8Qs(s%ub*gK zN|bvVX#LqpN(yqwnbUQroe2thW~qo=Wm-8(^K?l7DAsM|{)d138t-(pJq1Rvch7d+ zUNXb;K9P?t)ECRY{(d$$by7fko>MgEZ?ows;jw8Nu#DV&d${Nru)To{C8dk~N&OD~h}U|%c_;~md1=>L*2jGz*51Ud zDyNqoE_h3(KESO0XOC2ALOr8o?tllRYB0$DKk(D!VMwd8O`%qkqp;?*6bM!*E?9Y= zaXTpkO<}IPJX6QyU-xA(>aelVrjK{XOWJ@(2&Z?2)Bi(|WRVu()C~3;2=Jo#(VrN7 zf)VjvCH>~xqk70^)D!uO-S4T--*=j3Hgngt&Pb0U7C+u&VXo$i^-g&I9U7cI<5DRr zq}TCNkuPE{52ffv1rq2mE7~g*qEr`g&Wuo~9J!Q9H=p@|RX(->h)A6mriI`v2`D#Lfb zQ^I(j#Rl;?HIt)*_|HRU{epRWF`BW0gi(xRF!Du(Dej7j7h}Bi5V{J9@={b%c#wYB zd*&1ymL+ikFMf1qnwy@l^DCQdUZQ1a4Aq7IYC=#$?VwOCEoi)Rv1sqJ5T!DPMil>Q z+G3)x)XXB6r9J9(HdTZtwWIe>uWCrMpu3Z7XC*c2s*L)ozi3tlr6wuy`G?PAO*l?1 zl%~hHNY>1|LIsUkg}ATAL;}%a*mE zLPT-F-7mHGMG5CUSbuFWc^a%bOf;{sAmMOI?j$1nh=X==F88E1q|X-2)eXj=s*Jm6 z#(>mFvYZcRTt*=glsq`|Y$u>v$=b+JfNIzN59;4H{*_A9?+;j|siX6(F?Ro}D$k^A z3R;lj4^qnXN3ly$CpL_N#9QQ%sEo*&0Pd=bv|-+opUmU@ zvo>DF&+<{e$G+&-hS^WJdh;Bj5ai|+_pg#9|3E+RR|5trda0tyY9KT$LP?V2)#*u| zdDe5n;|uNv-kk2bpdqCPJ;J^_rz5kk6F1p;ueS|rz+zvjbKxUqD5fgxyMAaj`*s8F ze&{Vwa1qxeMmOSe6cI29Qg1vjAg@+nFIj&TNt+fz!9b6Y-!$Sp(vxb)UAImsm(TN+ z+E}D116T3-&7bX}i4v_hI)T<^b^l8C;5lRZTUzzj@rO2AfN`t0t9!!Sz87IWrY#Uw zUM}nS`(oco$c=74V9bhpf@~T9)OX$Kda}+I%MPpnCmVg-OJAaT6+giH&kovg2;I`W zXTJHUBCYU=sVTKu#L`eX5rH{fu`9U*h58`tv@MfZ**1mN@gdtrM4>!`A--j`|2)9) zlD|qC?A&GLf`T~Or2Zktti-J z?G7M&_Oqv_LSMJX67*MYLAmZhO&4HriHzruRii`kF^9Nf+)R5Wo)E3=Bt18u_*3=g zgfO_$RP)*7Vfk_nmz`~kXvkO^5>%EwOUYLhOs-0zga^U%ZWlux~4Q-U5 zul7iFk*JvSw#ioZt&s;Ls`;iIHr@53i`&$#xh6;3xSe!Fv^wXol%F`$V1&{ljkJ&4 z#*TV@8yE-pRi#mL<^nb0zqrikDO01!wAGQ^{l4~>XsMC@mC<$ww`w3^X}Kt_1}!~u=OWPfZyk*C#l0ZI~UBvy(>Q;He;nOD(~{Xf1cwO)ziHU zzF#_Xw~X}709}Pwok-9=$}n z{?^Olc&~)xBYJv~Z=bsiGJ=z*ymmdv9G%C$_6pIgiSSK2?wXm#P)Uhv@v9TvWmAKM^!?{9bI83r9R3cb=i?Fq^WR@_ zHJ=apMg{`AuK+c-l^bZApj$tqW->1m?j2v5nqEXtD7yxARoZK~?L0pPnLR>paF!C^ z$L=@WcBwpV_7_6bU!2TN9LWODXGyQHJ*#I&I2CqYAqT)*(-<8Lli#%!h!UV!Br~YV z5Z#-ZE-<-H4KUKvb1Lbrdgi97;DjOg9owIOY96NX?#4*6^(K;l?*RVUX;Q$%(I9B3dxw)eIy-1S{k~qr!ouSZ@I3~+DrgpbyMq8n!=HNXhwizmieIz@ zn6l$o8L@S}aVA43(>l)yfDq#2q%a^NpL``Zf=;6=l;cd|%4=`5u9->%jr#te0?*jX z_O!)QmfPR;uw|ly+uwbEF=?MK@cl+QIq6CMwofb2|26t7vwwl#bT`w_=WsaOub^4X zZJ|A9WDqa?LS+@npF`Gso_Q_8<3cx01+5GvV@;Ux76dVfO!m7ZCUknmewCKIzAW0s zO}{uuMqf>R?`#_L+{kO{G(kqKbmdiCK?71aNW($y-P(x1!QK8F@CP;5H{dsvVXWiV zco>W;v-0Uc>Mh+h#nXD*L59om`*l%!gW<>Y<`Y045l1ctMz7D+B9bO55qxY27xq)- z*Rizd-ii*92O=;oofL#pBd=AsNX?5bZOL*XK3VL5K6zJ6d3};ET#hQ`tVnw;h9MpZ zU4{Lx6b%kT_SnXdp<4mNRo9(V&Z{eAYN3HbkTZSm>SD*Q_d#TwBgd=giV%rT$MQ+t zM=tCZsVPu%le9#m()ruKKP4atGq1f^CO9v&9z{AYG=E$cQXc5IBO0_7&LK^YT}}rW zzePX=){j4VxESxYHf}?&g??h1>Oer10(q|=i%#2X zZo^y$eenTvoo}TfmiJ!I!q}31zSB=ARAuyzaqN;4tK$$&-8!bq z6M@ixT*;LEXZKHV>I`zdrHJJUSqTSn9i^(3mUv-7kUvcDH`b4K~(dIPT04 zW>5^-L^8ye$XCEw@mVN*T;cn`UbuC}q{xSg<@+KE70FhynzY{!+SBBf`W#eb+NeJX z_S$Us$u-y7ql%TVB=h1hSA%3(Ev-nWb^&R)<{ksJnmR5BC@+jmCjve?51dVs_IZaB z6I6%y39tDPRxfqB%!|Nh?;n)YzQ%t zUxJQf{cndw?b&a_f*BavvAGGl4VnvEFIInj6ZBzh+26!lNe8?CjSewK9n(Vts=Llv z7Cbzp<@*8I*}Wp)^f0jQ%XX6BsLjoffPB{KQx6X$W8&k5dVP?n!QqfM)S4umoP#Y|waQ9DbWf?K%^o4e5g&HPHeR=`8cR?9vW5l}E{`R(E%%Po z;^4jkn}OcWLn~*#vXuGenF#Wi+JJzBp+CO*BaHd;uU*z&fW(9&&MrwhbQBWxc#K$$ zSCk@)K37COGaLC3N%kPm{(@>H@(%|1W)?n zog4?vir(?3-+oJer~iI1_0?1*?qwBbD9Vy|*lyrWqYfqd1Oq1zQwL3fe{}Ym;EPefG@WqK8_eWvY$WzktSBtGK~yHHXW*WD ztCDLl|Mg%hB9-&0`mP?lC%%A=G+z+_#eQ1|A5f0aWyR0#eYnczkj}H-rugHB%k-96Rb`G0@Q@~t?OPPQrivDqlfNj z4?m|i-PZ>BAM zQ+2#ge$wO9uKh3jdMxu7LB?pWF93sg7~qHI@1Jjtfh#L%>*bF==Li=@mIR;^of{&C zclV+8%wXTXy4klb5}p&(aFK(KSZhuee_w8ewaSkmPMB|OqI0TfK$D5DHxKHn8hrUS zwSEUY^D9u+qc8^S-;uJaE$G=;73VcZ2&`WfcKq3^!p%pEHIYcvo0T@z(SKDltLp(wxRjXpkE;UI+7k(Ei)tFoRhOmm5+ zx{ilO_=xvek%w4!CQ1L5-9Afxt9|0r1{P_Z1rK*$?VXs?-X%oWPxs%+dj9v)E+8E! zZSR7Ts$^{`$L}j_i{c+zNoEP2%W;6A$`T;OMUQ*deoNKb7!%z zQpdR=9curt39r`AO>$C8`}T3l;pt0$QIM(!K-X?C>?OMB^21Nj2Zp!y>v45^5B!9E zZH3c#6f`!pj#eYnt$q|&oHs6fha2Lb{8q;px1c^pI(e7!p3{X#E0A$dNHs`gIb|hQ zZQIeuh(r>!L%AmQO4-`kv;A6eJyLSYZ~+mCMnyIe`I-JGtuKA$-N}YFO%{tXM!+`3 zKUH*t@|5k@1Y5V-Qa9gSHhfMPnMyD(j*OpF9y1Z_Z*qxRp~SuZc;=2hbHucZu_>po zeyE!Yy?bZh5&EO&?Ea;nKb&;#aOIaN+J< zPUGF+#!%Ke3n)3sD~;wA*3_VK7@S3N?6$!9E`n#*v% z%{Jj{dOhvf$@DbRFPmS*Ip8g2+>2I#CmC0|U4mp^{ugO1o5I=Wk5Qix3Eg9(;gY(l zwRz%;xAlJ!AZ?`x)U8DT_wxf*|lW)nxC9j>vdH;FaNo&sA_D>X>#- z<15Kka4GcK0vB|_O_fSiwoUrd=_sKCa}6odW|%Y7k22itXl7N-I10C?cA*4pH*RIa z;nc9RS|y=*f<5aw zqrq$n5^vkWvBMp*M}_X2&X=pBi)fT59yqP&Y#d-dDx^~+_9D>^x+?MU#XXPSJ>Mv5 z6v%I^cw;H9^d;%XKFyu+Prdb<5>+&CUm~sPDKi3q2R4>BlKS+7fRwEJxuuBXagxNw zy*=Tr{$pB=Zl*<~DqZgV37fO)3`7oOe|kS7-*T~BbUN%324|X*+A0xGw@)|kMNoY_ z*C`YslUdF$A~=ET`q8}l3mTy!pbH`I?54Vg*I^qh?ec1d8jZ-=dpZJ0OLwOQ?GF6tzsp-m*J!hR+S>*{S1(h_KZ*w>xG2g z8UIk#uxK1Bv*L4>tVnuqfjogk7Nh8>JJvVBr6h&&x?3M>knff z#elQ5#dE8)yR$1@k-iNR=F8}rP}zD)>{Bi$CY~5#9(bXzsOZV zF?@X1+%L)T`mF5nfv{I zurvkMq=>WphuZyIILDZ~?lwnK1-x4AsrB7+eBG};{%U0WQ-d8{k>z;ey{&><%t11J z;ZL2P=ArFv0-NzC5S~PP`_DvR#?+nVVJSVTcPE-sx!MIgm32%h6b;PhT9x-(?U3b< zg!dDpwemVMVUW(_2XFU+^bZ;jqKZ-%2NJPn9Vfg8k z$Q_n)o)8|DDteGXTv>xZD7Um4kuVCid{I~MgYxYx?*Y36>^%2U?$o;3=6-4un+YVT zyG-=#Qu*g{akKGQPE0hBGLN~Tn34D8MCO<`ME$HX0B@MoxMZ_fd;)%5T7UOE#25JQ zm0BZ7BkO%CI2^Tk_JSRoeq_M9#XhidG`M-+PN@;i&AKkk=MJ{sRFty%=IVqHu0eHD zpK|2=O=AW=|=^RXX75cp!5|v6f%^ym%YIBzxmhaExDZ-E`16q3{=E* zOljZmSDhCdxC>LiFd5*ekZQADmx-^~fMoh{i$d)EixcK^N@HK){$m;~ZMpeZ2zR~A1mE_i zNGaX9C{|T*Sy-b)NWuS>01W&`A+8LdD;)Ps!PtlG?UgiR6%#)vKz! zr8F4Rh3Pr6Tidw@L-YmheHotdsnx*0(S1pTcVEwMfgul+OJrDzMwnufN*YP8zDwNu zCPDWhJ)k@|g%?hM7TrWwHBSv-(-m7pdx|#~ie9_13X(ZpIY7{*D%yu8i`F%LlZ~8W zi`*w^o3S8AqNjka13@^)+TGssO$j|Z2)w1QG0oTXk%z`6f#XZ?7Y7j}C#jE~i-~8< zkvuzX0K&7sv|f6H3ZRcUg@V9xg_2Kd6<h43?Bi&5tWb2AJyTRtOd~o>>mtDHhRJj;-Tnz7LdEj($+yUIi+amV6 z=m=YnAHJ2tY+zzRP7pp%&&<@~)%HqVee!-jreX5x)cF}{>nrj#2Y_th0akK>PR3`a zIzO`RhO4HJa!si$uy2`Gc}PMAX@tr0yg6dxks<4lSq!S7u1boF25*Yh`ru3I6MobA zPNlG157=@l!A6B(%Y}G8)24}B9w1Zrkj0w7eEA?`B1>7hGmma7GGH>jfnhY) z#-xw4lgS8Xa%lQi@5o}Ke~iVFDfPAY66_-{`rUGJdw0o;2hu5P+(V5_=TCR#NwY{3 zGt+%we7>1HH^o0b`lwdKGnI4@q*m zt`|Y$p|f9~r72Z#RJ9Cm0$V;jHKt^$(4HvlCVTk#r+p*uz^5?1 zWt-QA&$$-~dfX++5)T|#`tA1VZfeb`mGO1+A<# zTV2yhdu(#l_Nz2Sd}PmVZ7xSJ(q~xFibl@PLFCCIqa5>E;Yx*PQ)S-g&YE zy0hDY^&~;DxT^rjuNcuE){wL>#Ef?uUq)#pf%%@=`OMSnXY?kok-HA%CmcY3*nXaNDatgZS|3?{NIL8Mv=V0_txMwN z%Gr~#hM1fegD1>B2D4HJ4>*Ke%In)?q?P(MZ-RD}pm(4}#>wx$okgHCO&Pt@9+~(h z3Y)P81Y`4OV|w~z^pljmg1UPs>-SvFCS zkMBe)&Toc_OSenk_N{oie7y=zDb;!%)CiBz=QT~?NI3{PaOc280Mko>PvtstLk-O97RuO3gF zm?WR|PJyWg)^fk9Tw$vmP%xW@q_dAZv8ZZWe=e=dwZ)#JnCoWD2wyS{*)Iw4p-}0O zt*qN*EUhuBO5{u_FatKb=I=}gaB{b2CmCF(M!m5Pe$=b1MsS`mbwTHPcTsGv>hvTy znJ3!{S^J1;&fjUE=0mn``vA z>io#`-LXlZ`l$_m1{oO zbSKtz-3DE5SraMt=N3z$eif(P=rI@I+;mt;^{D3j_b3j&67w~~7M3{>ww ziE!is$S*%?38Lq@+=8hy_1iLidZ=-!0ZQqU1o;_Ea_eRD!M(8;M%R3(6MmL2zh%_D z?_e3UQkNtJB{Q}AxA-wfTH)EhHZ{>+Ts1Y;<0b*7-Ya2>7Mrb{3VT?nJ6BLv3BUMO zvM96MZb1A->zmgUWQ}w!fEj(Wrm5ID)ysYJ-6xFiuiT{(%1Ja-rC5~}=s2hRnYNFQ zJAkpM+q7Z2X(z55-F20FQlc&S0^ zBm;U7Ku3RMFEfPrAOtWv1k^G(tutxPg`QqRoIIajMlz0p~2(k7BgHa3|& z9J(&i%6MmQD9zVs0O6N*OPe&TDE>9f7q%?t}bpRCSqfKP$lkt z^44B{`Q<5k$`}6>62&nWaKq%oo$%$vXHi_Scbs_G*}4VcrPWNprDr=jNv!InH$o(1 zh?(>unvJ#>LcnTgq11pLW;C%i*rB*LztKq6 z#<|f04v0mb)3aDAD}=SU92X?}G`^+_%Is}4d(Joflaaw<`7(lDX;nq+m1paq-PEha zJRrtK)8_hyN{B%CbU)XNlhw_wCXC{yp`B1=-E^&btEHYXUb960R)9;tJF!EVOTZGs zWa(!xs+xa<^HR0MPC?yjed-No)(9RgfIaBaAA@E1EXQE=xCKtGuBs?)p<{NbJB zj#AL>ZsX%?z5=N|k(YuYI03s6rJ+?Xa(2ybSdM$BlRmw&WBMY69?ae|Clk)vuqIxq zDHdngiat2!e|B8nc$KvANIO-hi-TDP`Lt8Mn4;zJ*0f%+!QK~$Qj*;anD+w*Wt|6eY`vdf^6eSSqM#^mwmHIP<7j%YCYm z%@h}Bh-)O&JT2Kgw?%&C_)$Z2IA(LovNRgTK9PKCMGekfTZaRRi)&HLKJWwKhqH_c5@dV6b$+C$n$7V@WM1TB_x`Y_F zB4H>kqr7~6M#GhI%|sf)<`$`E?qb?Fdt5IKU=uLRD!) zUtPueV1sc=m=(vGaN(Os-b$IAxE-owxM-rN(F(};9D>QN9^q60zIi4uHraOjA6E( znit~+J2J~&ov9~pw}myk*>}-B#dqkJTWD;FcrEk?6bY|~;=hOi5{{a!I`fc`xLb3G z^;wO&3&7m?dR@+x^A!oTBf4)QU~kz1@8DK{0t-nwlj{JiY?wr^z9eR65&Qr@g$4kA zdos{^#N}x31otcpSd*r#=b0j}sZ1xSMJL&bU)BMD8Pv5qs8bU35Y2ict5a|Fl8Cu2 zWp7uamD9;0RSkt zEyO3obVpw-R44q3Z*E7?z_G#WYOb0ZN<%%kW+Ba3*L}d$`tr-gcd}0iX>@Xy;up4a z88ys#rF`x9b0@WlB_MkjnV8dA$;yJUJns~dScCz zidS5+WcV;MZtStmdiM`Jm`Zinax64H0RbB7hTs$@hHz`obWbSnjV(9=^XF7jcoj3a zzg_tZBJAU7Xb3XiLcFcq_1mhbKWarES2LzFNsqk#80X`pQCE^i0s-gJQ6&>LpMKjF zzS1s04GSCh%_5g!OOk^FG9IQ3h(JRk#7N(cVe5{5tA~&)V9(FUZt!3c7OHCu5>>Ds|5f&-2prxT;cV1)!l06;S5}NgYzh&jy3ncS8=2Q(h zhalb}Y+4<^jv#Cgb|C7MCJnZxuCG0Bdv>00)o+f&UAcUh;F%ZwOG}!INDeb(mi8!2 zjas-1=9By)dTgQ{cAhc=XgdBGYqU`Gy3!{_!$*t#k+A9fZmrh` zlE&tPx3yjksB%i4{n?-Wnbh*?U!NSOtVy%!){EL2t$lEk&tto}0kPH=ye4396EE9v zgkPB$_C_W-JpQ`0TU|b}88tS)5Z&)mcYX)|z9;qq?>Y`R^NtzrbvX}ut&%0$LTZ26 zl&q~Zds#c-1ysjtGCSFqa+C>M#c3jP>zpjW`Ejj7(GluZIXv+NvG?8X?fLL0rc`Iy z)%~<=56{A;q5-4xo3=*KtAw2yGANvOI*lwZ&xnEzAPJC%k!_Mv)RicE)gdG>K7)33dMqyLXqOdHNo9VDN@{BTD-Uir?|V5pn*bhce&v` z=X|&4uJ5k=o0a_XWS%`Ud-lvgN#~($SA+e3o`ENS$ulKOnzz{Y2Y~sLI<$b^DQ6BDv*sKZ(yzo2f$2X?L zVQn-MPD<>jL08ULR4L;W)JW~$b($~rP?v!=9SCOCK)xP`Y+`5ta>F!!&rtv1N znbvZVdau7y$`2c=hXx}C7-;)Nrb&d{(ws25W3HLIx7F=0M4(O8m5a`^IHs>-kteL)m0UP;}eq1Z9_>~Vjw0u4J6em;qV@uLDDde9rz{e$H7&g;V($AmW z$<@qD4WaY1kYDNDl>Ok(Tm%66D!98C*eI>j2NxOh$^qaNJaBJKwuFyx7Nb{Rb&;cN ztU2E_n>}r^NA(|mQW7tg=Ekq8Cp$RrX1s(ev!w4U=wCt_rD|a>Df{{|z8hI2drl!c zR2w)oYYq6qVo1Tidd_B>P|`AVxpdHZJh)Wno+zo<7YQmb)^-f1fDKqKRj?j9k)c1Y zewxer+&Ump-9|=0Z9C&=(+X*@+12KcG!`)Bl*wEcl53AXhf)JfyBGH<3y*@z8;^1} z3BY8FLhLrmnTv$v>X#jT6jcrIa8}v;744gE+6|Yx^jfa*^lXAJ4vm+nDm-Yzom6-s zC#*QiDj$%ehpS++&U_Ci{il9P7X2Q34#389{kw=VD2d$Ez*3-SPDrN!>qmTIW7u23 z?5AoRxNG2^#S)y=*6CtK#O~&5e0P#JTb{)~hFW!`-&qV#?v-fiFSGqd!!0;lmHt=t z3W3R+{+gjXC4dJ^9>SDQuT~x5}AI$^w`X9~dz`RS9Ilt7G`{jqg+6C~IhhLkpaJ zA}$Df#6Hoq|GMJ$#TA&_TE5xH5>J}#n_|JBgU1=aYpn?xEWsLdM$eqREoO0gz%^m} z1n*rvwOLB|2>(;$(rtbCaz%FQSo*7H(8d8_&gypuEV2cGYp(mc(cu-F(a<$uf_hS7 zXz*B|8m!FOnvGOV;&27G$pYsnuQj<_qD!#z%#_9|`L%1Zs1yy{^XnMC6~W$X@{1t0 z%y!bzki}Y7ol#YNKEaD5SsaB6pDhVuFxmX8 z_K4N_t_~T?UxxihJi)baXJG&ee}}jKTOZeop0Kw5CvYr%7kKa}D-3X}p*ca?(u8|o zWNIUg40kPcLWSdXRCB$sw+orW@$keK$QstHNTI>Pva^rE;*QK@0INKOR9<}F)UKJ= z3p;5wP|d+s_T-4W8u6oQt2fc5fZCi;bY(7RDUgMTqM~D2h$*uech8J>oJKQ29zSg;dya*ezBq) zolg}FH2Mb`(=|12`CCs<&@K$SLF~}&jDW;5v@jFQN|bL$FYmE*ntcclK6giy_I2$k zVE3SY)AhcDzrL&{PUKsQKOAl0Uf*q5*+y>Mo$(Ss^Q+1(Invh16tZ!XR@2Z@No!Z0 z%qMVBh-IX=QhTM6yN)*o@CI@3kfYX2AlD<9gnIqbXE2VOqTW4Ez^TG=8ErFwV@rse zbCCB6W~Dr66If%%H>RQ7sIHDsa3M5YNsBB|(4ry6xNJHzbr*%%;DcbrW-01>E0Z})hkeCZ~aG0sb^w<8po zFJb4%!&_hWGm1?gh(82XYD9I#y*i9A!F^ksNm+AW2Laz1GP)a3&lbfFj{m--Rvqov z0Vg!0^9c;9Yi?@bS30F;5(2Ngq#9>CFk4<(UA+08_bBZGthm;q+c$+5kwLh{rmK*Y z%Ss`^Z6PLSITk$YdOX^TBc$Bh=_k9S&xbU)*65g;Q5gO)#;Y_U#34qK%;xq|0_@0H%!D$PQ+2)sRWh&`{U-VXtnOsoVq^V-%zw^I-kgzimyh;!2_+ zfOz(@5h`o+Q-HAD)e)7v*dNh`0`DIojVaP)_O@z(s79@KkJ^vTY>i-O6$7JL3XywN zolr_rf%BaJB)!Z2yOcPS>t0>i6=yTmi)SFH z_{m=#tdJNv0*E=!{_Abz|N%y9&+ZMN2`-lXoNkRCy`R*4Q<}5q%q1a}Vf6Jk zT`wX3P=)GX9%QlV0K)>Arr=LD%kPBrY_e|dk^RrplZBS;sO$w+B>oaPMOMOLdBb>g z-P53}k_=tyF@=}^eR2BhQKh4Es+XW%4&QgemeYjAWjX-LIU1j9mHy<53aBseL7ZJzUCN z6190dZ=W5I6kD}SZlm$E7AAE7MF|FQi#2&kS!JJ}xr5zocG^ZI#+yN(K`Z}(g=!bBcKvcJIwo{in1OYv-K5 z0Y?`hm3nw-?4|m|@cwGuPyRu=#6Q52LVE>l?_%h?5xJ~VgkO}(`b52dpEPHvMO4Cz z>l`!AKlO*po`=)eDis*(HsQLVULCT*qV&hqJd=5PWMxlL*Uo3J52QdpJlzgEer4`JmYft+ zdo<{22VaX>rDYDpO`JNa#WIYtIBP6X8el?B)-6omHk45*|5P~{Fr#qeYjES}m8fd?x)Gcg1rH`^4%@8eeg=3wV0GokgX&&~A~^(TCO_iCQ%EtQV;HgXv|jm%>C31v z9lMSZSHqJs9MLoddIuN-JS?`4^!>XTOOKT1*8`&%n}pi`M%obQ?+4nkl~94cu=GDO zXAYSM^A9cN7R#i|Pir`&ELO#JG^~LcLy9D6B1UHjh*2%}!xZYo6tFaDBDW*C-Mz?4 z&JO^`yHv4J_`z(u~!T<{@Zr_ajW@BER1 z$!`ZKI_vZ_1Fhq;_Og=1bT22&85jc!=gi@+TKGg#m6c6P&b=tT z{ChQpn@6{?lYKuxz_<;%;blP;7=Tu#;ZRzN>qFU3PmY}5rY^G64_ujEGQE4s6nA}< zQGz5FOf5EOrkE%?SE(-r*t^ZJ-*6bM1_6tAy@MsnMmx@U1wC!C1TLj)ZZjS(hb5FJ zGtSp_P{C;pE3c}u(q~5&Y&pyGWeZl+y2QikF<_;n%1>c0{$KQfiiI>ge1VqIULhi9dm z7kA4_23vcrFGqdYp*yz$@hgc_{mrI|bKWRp*OH?iZOVQ^Ix-2u`%^Iv8_2r>3qJ zw_66)IPJT>@U?lb3_V#U&li3K=u$}U2y>pgEzjL?GP?T)xnt5khuDsYbh#rh{65F| zeKb9(UhCdj`^?kwT9N;yB|X~TKoJ4W_0yAFF9mD>bynh`@yFRn*qsl5sm}~L zbkzGM_$byzx5}tVEaP&-ddOB3OGf1x_asb3u#&zG~Tz*ukDjY3P}JZDU61L)bw(0yiy z{qV@Vnd0`~uoEO?Uz=a?LWO6YF(xvVKF zngZBoWqq&Qg~MpoXv&!`)y$T~ zi+lLy!}SP`KpAv;iZK6azsHo)13ZtQ-LjOmhp61;c^U=u;J#qlm^@R7r7C-65Z4T? zEyV(7wVh==7QMPDEP_cr+7XzkVsi}zUw>b0sK)j(KWR7VsBpd3du{6yj8Q^SK-OnP z>qjF@)XB<649|7)<__A?LlB`4ko)k0()8~ZlRYeLwtA_W#_PR#L; zCca9Q?sb?jZR|cg4f%gAJR%DbUPiUvlgpG;yg?O`Y-sh{%Mak!vRx#&y5_ZTq+?N4 z*}2YT8C_BQz)F$z?*NxP<9y4E=;1s+3GiLU|%9 zna0=51J8xh@V<`vocVFME_(C_(M<8Y>EKm6X9d&8=zjfCqHIX3X?sN{uwaj%F_^># zD9YHt_)%`Z@P0`S)Pw%112psh_6KM6pziUU97rot6zMbVy2!gimx_KOgPo=O&gFLnK)F0VP;6Hrx(kex zS6q7zEcUP10kPd6aHa0r8%8MoEy*c6Gc|@GJRW9vc>z~W+N*1ln&}u|YnkoQ2WZ)r ze&u*eSd~l0zip%;dIAu5Kg+Fz#F$!#>m~1&I}=wSBBi_o!C5*3-ENGo=QB+99`PUF z!tVI#{oMVk2{j`tm$f4aWyi{Sj5RIY5g8uH2#|avExxq%R7P)p<#XaVxLotJIilD#ujbSawJ&P#OH`Mg|p~6{T zyN8|{6hdS+_k(^_xZI6s*)5#3WyLT^J8EicPKx@;GJ`9bWfKBpRUCgb#;M(w=8wud zMp90X#8|I;x|)k-W%U))?JY32oqxaoi9I<9x>7wz;hJ1_ND*D}Yb8hsPug_ALP5Kjs)inUNGW1%q zskX^a4{1?rQV|18{wK>%_yRQrvPv&U3kzP>_RK<^&oWiSFE4>=K5{Xp2FeQS9q~lH zd(~tsuG%M0VFv05hw961G@3rRQ9(drC%&Q}z!yZ&i3=+`!(0%hSttK=>(G<+hV4ErCmhn$e&%)*<$W|? zA`Kw6$}RHD%*`qwG+9SsT{*{KN6c>du0i(~@RmM-MXG+mAQO>~c#_;)b;Gt_TE6Uh z_%%P{hZw-&^cBf+_x`wo!Dv;=^Me_rf1-`2V4_^6P-@hk8*g!y+2C)6*DxeT!v<#e z@qF>qZT5)S#Q>cbC1gD>ZR`!ZdV=gtLRdpcsP$IehqkB!wvNWI*US?k=Uvt#Eso1==rh{B!Ny<2fg!Z}u6g;i#@pSz)q76l;v3+4IXm6Jv z<{zZW-IEr@KAl9yb}a9&k{ZsfVQAL8C*^NI22Gud{(dHm_Uh=nO*_72?h7cpz(;r| zrCICtDnF+jQrKjx#!ZeZ9U_Pi+>l{Q>zI?q5lZW%r7!gGsJKG;feYQ>#zb zqSp!IJsgdEcU4{>ZDc$9{k%K&qr25ZuGf!Nuz9J72Y4@KkzDVQNyK*-p6$GN7La-d zF?2RVu05l@O#tl4*@ny{Fs!0?GL-N=0C>WGtljY{Dd>}_+@gZx?4-n#FlI0D0A zXNfxp%_wfO)diEUc<8&`x3z}6HGY8_e<`PagHDmbFXDctbAJl-7S8B58hyCgbS$0S z*V{QTofF3(gFMInG4nDYgBNPJdL&bp#7Fvif|Who67YqbJI#(*=$gIklMi)BdJ{)$ ze1V4M<>1_iWT6Z3(r6P`8&CmZ$vEE}e^%*+N@@tPr=V`qd#sY&z z%mpsqbz|dp1I7JL4x_0MFz)Cug}2e6vd4wc(BeyU4SsWW>3m%);QPJ5rFU(s$|d}J9c5K9`32^E$8KSQ*r)agGKTe?(p_u2)m4M$ ziuM;#wOqBo1Rb$n12hjPDsCFd$PXGGnp83gzfBVMzsUg>9Br}AT%<0l7<7S7=rB@# zxZhsT-!6woXRb}hm!aVJVd!59u-gfLUT8ehm~{=OVIXm<=^TGQk>}twwShkuuq*c0ZOgnPkX2wz3qa*Yfq!n`S*Mdc2FGrXrYqS27eT z8CFo+Oo9Vcdug`?~lqw zm$sIcU`r-vYHQ~coeMA;1Up+SVyTY~i_{+KFwGs|r*PTH``q0|CDb$n-nE!% zxyT%q@Qf#NGPOT6*D#ZrAgI06vnzwf{6Vk(ek>$v^V2$2Yl2x{f<6=y*c!so2HH!Y zwbbWy#Cg3;?RW-#*4dZx31&jriK00~Njx-FhRZ4^^(rnL7QEoS~ zl$&^u7)V<#xY#?(4_z;)uJ^xY)43P`Qgqmj_K4m;AQ`#!dQK@UDVkYZTbfHnjL_GirZXC^n6rR*5_ zK_Eku(|4;4^a|Z%dI~X|@O}azdgK?rO31$FH)2U*46e86s+)D;cZ3qFAPb>@$PQ#8 z1&<>>B6SwlmXC;-UT8W0Op4^>)lc?^sOV~Vz1}5$LR2yMsDACqJd)c#QyGdB4AidTOO%)vw0s>)%FgOu zxRql;lbu31s(Z#7#jw)OXRj`DSicaQZFWsmD4qsk&E$KG19`Y7tTOLrB0KxSmNB{S ziFMjgYF!(aM32K5E#8?C?1JAR_)NTW(bs|CIN^lNa&U1~GW}U~zVqEZmBi#q%gL7r z)%B3^qBWCKj88D^@mm4}BMP&gr-51b-JrIdOCE1Wa`_{-{=&Fh+}-J1zMm%%Et#l_ zIZ@^SN`Y;uIVq{92vHD}W` zFo*G=Jqd4TN7>0kirx&Ngu%g72Klvx~ajBS!JJg8=UaqGMvj!xs=^i&0|ikSkU5 zeR2uCMiPBoH%CgxImNkS-o;85Y(7_wISEb9AF;p&@5sPrrc8OFf+y82-UQm>_!zL?@VWz3L z5z1+1^11QQ+Mem`21|@GQ|gQLplep0FqCRQr;#5)UYGLDs;B*}(7+{* z&fF)b??f%viMn451=)cGj=88MZy9hGVJBfY@fE9AF1VXw#GpwrWv{AzwnM5Pq z19O9JPLW3W?9<46TKc&AE}3lb+lZKf!QyIHmy=AKVAHRP#QgO~qeoNy6(X}Q_T4u# zqcYW}`R=waL+OOvG#$gM-!xgC{WFr`FV8d`33WiyIr7>rd5sJaYRsDa7Cgjs+=EM0 zQ-8DPZy90pskHpW28Svuc7qoW&F#Y%1{)pmEUg5hxnbS2VkJwPfEPEL~vG*eX=P*F!2(ghq^x$NxXmc06ihmT#LShb;~ zUnW!y0e#adV0u@Dk>m(0e!)7!yGXCdqua^2uHZ>a+JZmYSUAH%^`&I-!WqMB4ySXb zBPWUNFWj`)v2iLacrkosO8F$xQm`q4Epn-~WH!rDkFldq7AR?PZl7X^guay~o7Ep( znmF&#_e+1L94qn~BeVm9M0zO_8ojsJSj1(dEZN9ju#8sfGVFDeBriDY( zy7X%E9-^zo-m4u^Rh<=^gS}H$#{Cj^ECrxPH`aN*nJOoK_2D!WA^_TboB4E|yvF@P zw_?^&b0%xF8x&WI<_s_F7m;@J#L>Au4X?(4xKj|>n0xJ(ib&iIn~WZzQ&IzkvJx2h z#RBAhNEH;|!5Bh3gEr$DDeuN`*voy(Gd>>NlP)v8R@MIMF1mfn*7HHmF1=;|yNt`V zpl?=juq;An?ycte_lSu3#hX-72<)aiE9-~Gc)^vkg8YDjL88p?ki<_y6$@6UBTwvq z(j>P3PLn8q)JFu2zC;s814n(FQw?(?uI+s<$7#K6u}A!d{mymSC&G63^{Cgw6Mp4s?*Q~; zN%Ogzxv_?2oKc{K9Z;`fe^2CslWF6@0HL%mVRa37fT|t}89ibL8bz=8KOs8JMlct^ zL*ZgZA&BDR&i+}O**Xqm0e4EidWx30yNdz?Kl%p3ig2a7&#(M4#o*pPD!j91R$(*O z`}71YNI8TmFI7GQ0AD4-cJg@HOn&uB%H;HDFWGt~$E+@H$ZRdXB6E ze4N1X0#>Gpz>jhCUwf--B(jKUJs%fcgV_DKj$LRj3@|90-H~gW1goNG!>>Y{1|z`je%zV2xy~-&|ss{A~uWA>dWyirrjGLAZ_T*yx-uAgICXAv+ZX9a)VNQ?!ro5hI;a>a?F}SV|p_AQ{#*0R&KjY%`6* zY+eZkq1RKG3a%afUOB(jm;TY6Im?*W7BRtr9+w9)l(>2Yj(4Cabc^I`c)?)`xxJP? zuPI~X-{bSsoB9x%siC2oxDnfbzUvC$ZKZz0vdv;mC%VcnMZ%|;X;7H-BO^n*j4nV7 zwZ`%9lE8KkXuE32BzzQu6zdp$l8{ECAh2Q9pdrfv9Ok!H@Zpq4CCgX#gEVgNJ|C_} z#RNm|?HWI5#Px-rY{?S8(WU3a6)*Kc#+`heNk7ZPR=ps!SNeH=V9XskRKKjOpA zlGgrUMi51$flp^98#@_WV)4NkkkLJR70_$J;nPXu+>IS1SQ(@9@N^wjr{%3|-ho<_ zz(rcK?Zc-TTwYN+kN3b^I>j)Si6i5@C~hm-tdKU*4qQ$-^o&`DN?AFJ$wp}FogqH+ zVxPpfHZR%n52t*Ml@YhyvyO(5==rrQ2K*_h0{e{(^HE&g+l@NUfoB5~)Up1`Ga_wC73LgPm|0MIWRp7F_pVg|`Pa6v+ z7m!DnmoCQn_jO5|Z{){1B1zXj>NRt;@8PC7O_{H`rmwk1GdvAbpAh5*R^kmK}PZ;D0O;r2En= z2~%(&YhqZiLJNu{GRFqP6^R)fzB`TWNANr8*A0R(DQ=fyfTHZUkZ(TU@+~kwr9YUS zJ{uTMd6aehZl)zl)v@Hf{Jf@jLx?QA{I|tS2SZ1Q9rta{LHi9igWA>@jzOH12_Kw4 zE@meE$A=ZZk68E!V5iv^<=mvV;whI%8$l_tHu=Pc6+ccL9B0QTm}<8-r_NGc53A=u zF!aYk7<45Dm<#Qsoadit8XH>#@BOEC&tGXl@Z;+vj5i#LQ=-|`s@|wDKHL00EHNbL zHDj+kH;W~jL=Z7xSYw-QtM{DQNS1;2iu@-RBsKM@FZ=Yc?&EP>Y^`lV{;7JH3m-zJ zK=U_PbHI9Ni(Na3JN&c#Hm{2zcjrG}^{2BANTmwMUzOwpVpIw|(#Fp*5Rw=4>B5K~ znwUde>E8jwGHIB|m(l zoqbttR8nVB5wtkIvfvGGyg7S7UR48p zO_trCkZ~DzhHX%>QRYOtb~&CO?R6M(E!oQgYmKHz?Y0CFj0NZJ7RYh&`s#p6Or}^g zq#A7+tAIMe_NzEbOPBd+!W^H^c}%(GoaYZ!Ih&!eDHr%h*~iiu)88D{^eW)Ex8%Nl zxE4XN?Ihmc09$l5R0^v31o<>g(Zd!NZe#wb89kC%vPDlF5a?)GMRz<;&I!bC7XPtR z86{@iu=b*<0NT^T1fE=?P56ny%hqpe4rmM~qwOwjPS_tsc8rMb8=$JW^xR(bn!P^v z-j~P%s?{mvF=FAQCurQpao%nAoA;I~?*-JIt76=bfRMuN3c;Pk2wn$#WDgZZG7v>G z7z&l`VWtr8QXa>qTeNhFnsPkytgDV`$=f+Z?29-g8!m4d0;*8keyCcTPKt~5Y%w*;QETnLXB9MUztY8T8-~R zgr2p~NtnfdoQjz#G0gT^*)wo)XnOCtD7R2XOs)y@9p}%}on^Nt`uDBEfUf#JBU5}x zC9|hVjZPx&QZ#Dt8h~AY@8_v|IP^An?AkX>LcN+MC|%5De?-9c?w3>SSCla74UH7R zi`?w2#9RT`tx=N`%G(~hcs2_sqaGqTy;nm0X)?~Z)nm#naWT48Tp@Thr$}}Fs-j;j zaRIB?&E9srkl`CGBo(v{L1v1`UUB^ej&~*bP(4goW;MjK8zS_ahfgO(jm;|1`8C~} zKs~GP35+DedU@7C3cvnO}s-a)y|p&+aRgW27BznXK$f>S1AT1i$wislElV zQjvpm`jQ%y2(da|X50jHs&RgeL+mi9F=Ay#u_!7U} zV!w(eR{>)r`#>%mc+X`}xTqDXpT)9q#$qJFh$US0oBoFo^LL%)M#7P6blj=!3;WzK zdp@;g!p2{sx4J0H~4IDUsFDMF&j3dToqA$il&uTpFwYK2Y=)eUr` z01rS0>I%S;{%_R=mXP{L@^*G}m|jYj>H63mWMp%QB}^l0_;JwIFY|mH-IFY9UwE4F91!#xEIHo!T>*x+@zOsy(0>jf|`#^p{5* zM-brv5T1rnE$$Aj?tftIt-Bj(DQMo8@0>G12Obn_1F81HX;VY!C z{skH@jy_+(592EMBp`nuK8%C&`(po2O@DBIp-vxb?}+q)%kh*}j!E6W~kkNRh9vXj7DZ)?r>IyarW?f*h=3z$(l2dQ2SOJ;JzMD)IfjZuLp*2r;+`DK#@r)F=64ZYOmcAr+CLgVy zLcXE!a^nQlv5>F^1VMV+foih{59i-%VZXJ&+p^`&rAGtYYVu*bj8V}}^qJqgLFyR> z=rD$Y?nTL<2puKZyD>6Sb688_;IfGQGHtI*3qrF|YzNic*^(Rio_F_VLkl8@2)Y@H zp{D+6`*e0BADtHE#u_Ep!>8QBzbC#+a~pl$I*eqpEFL#9vVK-lw7FqxAXNG%?(rzR zzRx8qoXY&#n`{(swKh=My2jP_3$5GSQej|nL=6BjVbbRx`uqbC3sIel%?=MUez zLk$*h31BrGZ|kdZi4I{%Ox0uE9Zk-RE?_hzd#9% zr^b!HrD%KD`Wrvr%^{aZVRY2BbLy=_btdbc*Pcz)92E5T=QwjcrV_eehSwRuaMfRw z(l8^>=yY#BIJUrzR~pjI(!-fal3qi>_@w^kqRK?hbCyizme0`2j`=-mmt%+ZaDIa` zTxYAcMFG7{4Bp*$#qdOk?fowj)%1~VV5T_y{UKZY#tEE{E75#Xv8Y?;)25EwO?K@Dn zM;HfxqGRn;O6aaymh|BoV~`$(URG@3%1b< z0Jt7J--B}a>2D&nDXvQT>`ui$_59;J@v%cw!_hR8To~{hr}We}J#GmRsFV4MwkjAb z-^KCTJ@j`i->sY~ONdsU$n`e1-AUx&+^&-OrHgXl9x}Cq;Qn|S^)&jV|6PzT#)824 zccZ(-tU**#;^gjbv?>yCsuCxZw1ql^hCp6+#$A#CH8%D+&o{Q^C*ZcP|B1ZMRg5&E zH0cwq#MU}^3>~Zz*6OlP(DxOE*e5zGaBh#Cr>627fm>lcwL{yfS!ly$RBD9CCJv1r#Fe+ zP6rH}NoZ*H@1#-AlwGCx()*r7UoHioxcMHaZ{5hX-sk#;f@g@rxYUNQ>QTQfOMX9* z|GR8xRzGaCOJO8xl6&p?9i{ey#!X;N%4PqnkAA@e|Bc_-&CinWVWIif*|`{13?thc1W|_1;L#@AI&Q@o<*UaPKM{p@9!$u&ITFaIY0_3$P zGqlalzR&8>TA!=$=KW<)ljf8=;_yrM8`SAn2DNGVWAe3A`xi-aSjSj!Ox-E#JN~FU^M>u%jL$G+8S#`6! zIoE<8Z=(5=A0g4ix{w#c&wr6e?>|F1bNtY=@<`MAyVYPE;WFEp+^4JM0x1d-NqBj= z`4Sl;tw;HO&D1Ze1UpYB!r3-xVtI#4TJ^Oz4MWPaks*tcX-;#$iA^k-Ie4n=E~%}* zWhc5%x&+7MOfLCzi+=obC@q4$E0jStnSnr9ero-w*TF^*b6mjW5e0{93{j z=#cgvb)Ch+leZ; zKVw!iQ>M$1taIwC`Se2h{ik@efN9X!0Ajho|1I{AfSd3M!Qm?a2WKOlV+MJMo9s^W z<5*jc^S!HS+#~6r-c>A4rNZO&T`TF*OT&y8ae}#Q+Qwt%xa5!Yqd$VP7Shih_9z~{ z(pHtd=L2e1kQ|4mW(?$3jNabyYR}qqV3k5awo}`ScI{%Y1+%8#)Q*TgM7tKd`U#G{ z3MGXLT(~&lE-!f3oH62ZPJY8J_5;<{fuz6hacMd&e`KZ3X7Y5(o5%`&u24NOEdP6q z1+S%thUL5n+rtSy7q^LYlyG{V`)4ZDqsoJkzZ9W00=H7Wdz}+xloQL>A@`|S@&e+5 zP7jj{UmMEULmKG_f0NSYJ|=K<{|&bQ$EXQM2{gQciPBtW46`YON*SbY1ATk2c+t(Q zPsMnYXHN)DdI|A|iXzVM+!)>7D<-cfHGcT9A6}z+?%2X{B3S%vFl&jzQ&=6{D#8p0uRp+6sh8Wr5gx`nG}EXEt(C84C$Alk@^xaeBXLK zljeIGd`m(;Z<(4BCYx3rHoYa#*hM>3cJ_~mtqD=4(=N(B7BZ5ZthiBsB}2r2H+MSZ zB_g5Qbf4H0$)&Sw)3nXyh`C~8+hezIoiszi?nFyV&lMsnX=@wqRue?=wlt@YTWtp# zHMd`z=E>1ochX+Kz@Wi1aP`wl3bqh?yy$qsi72GiIR707vvh$(zYk5lsjgSP7BW6! z*xhfLIfL8|&)ghtPzlBHf0!K7U*WLT=YRhp^F;UG-t0UZ-3tKh&Pg8KbfU{hDk|Xd z)X|{w<=bfxhrwD=8a!iot(WalYrh_v=?vq;it}Q)@rWhGZ5375Ltk593I)Fu(c6if z_G%<;(UmxzUe3_e5RK+-E(sZ!(DF0@-#*xlTei+Nwy$Y1-*CdF!N)S05)1gb3iV@&@7y*!F_9aLf!704 zo{^SRQOqrwD0qO64!1&QYD{2})UzK!H)5Aq`0{L}B0A@Z)ZfO$Oup6&;kEPlBLpKg zGt-6#bJqe;30J9?=JJ^w{0YF=pOV_!Ud^yu48*`t_Kd$n=>>b}uQ(Z$gc zw<&v#GXgjNX1`o}cCn{kD%@*mTTF?v-h*{9Yf~E@q&gU#AZCh%$RudiR(;$pzp}G8 ziQ17|&zo4}17F($HPuL6afS3_5ULr3B%x61@4p<8`lx)x!f-``TbV?}4T(dhzB3PrcZVw?q|*O$ha_PuvQ(1Ya2h%C9|t=d@bAMXVw z{GNj2)KIJs>HTJEjrU3{wMTm>WQ$_MWal}6#Ipl>+ z`Rx<0) zr(=f4%ZkQPX}}?5@vt5lG$7N@jLTTkDUzj>=J*rc*^IR~itp4)c6g z>0l%Bd!N1Bb8#srRTeHxj%wyYpJS6d^WzO8#s6jw9Uz{_W*Q_2w+*je*P$J3d!;TeULV)NYpxQBTkIQc)D6=uZ%K0HYl5X9;0JNT)ST@0tw!~KNnmK4i!mHqLaZV z5K?i>XMTi;F!o=2-3hXCML~P2YNzqC_98L`?j3RSeEmbwQ)eg-7S?wz88+Vfh~iM` z)W^ZPT6lx_mvr~bF3{dh(vTcGC`_kP%k5N0Pt}XsV6xHUq)))%eN1~!l4yM2cGaKP z&c_zOvF9>LNyjh#S94K-^$g?Fj6_86vb|XS`n)rY*A|Tq6)qSs^QL2|vTcTY#0keNIus~$`O+?2m=U8yMnB*7N-8>H?8Bn7zsn*cHtpBme># zCZEyok9wbsFPt6d?m&Zo;#aCp9BNeKhA|FYR-S2API~nRl{;7b8TdLq9Z)vsxY4wK zJsp!mqCdL&GZv<~B(H6xg#Nm{rR2qI4Xeb*jj z0`&N?d?yj`m-N4=IME{qOzy2HZSUN#pe&yD&9Zir+ep)z8 zt4ElgimM#kJyV047G^`xu3qkZB|H?dv+ZF&22F$B24*Z=`p(Mqb@XFAPK0Q=31gdP zS}hps?DJTV#&4&m5y~4db`%a!elXSXJ3T$kcf-tfNsD3qI4f?8@e#+iq> zn6f_V$PJCwdJyO3JD1(%<0^4pZXmV|kto?>s2!jGE>mzd!V;<9oBdt1&GB1FgV&vG}q>EQ=;d+Z-+75j7|>6(z7=^Aflw~6^Pe*r7^Ts#g<3c$=k!g;JI14Q|^$=_-t#}7* zdYATfUb^sEWZr~mB`*BxVx@E4XOGTF{U388&vI&RElUY1CPjR(9awOhH`H-&8E3Y3 z$<}Jo3}aW04UqP-6@$<%WD3g^KiuXk_LAUnpge6`(H47h!4X*lv#4rp)EDJBy`JhW zV&FX?v6Mft^$sGo-3zDcVN zN-q9?46RMQ8ZYV9vk`b0LUF;ccz2urpjG<5bIuoJOx?P-xWS}J(G9oy(`D)YPFNKq zS2KH9GE6r?FwpwF8jL>)jUdRFM`hv{q*urwu3(mCfD+`_lMcXoSf3;B4OR9rB_L9t zA-XH9cnXnpKMDSKpNkkj@;2bMy~@#?=l=Mo>##SbXrExErbBK;{f}>4LfD%Uq^5+P zIf+*6^54(ElQN&7qa0RkS%GdU(rLqOTNgoZ(pTDSb&wl{5A2FW*nrt+g*&-PQoI!B zVpq1A$CL~A8l2(D;=8o;ebimrhHEi& zO?YA>+)5Z-SNVVSSi2H^0Czfx+C=!)u1|I0WwgTdJou7<)DX+zlV?q5!;U6vgJI)0 zJk8VO(XcnVLTXR6@23va%*?)}tJ0fEt@0-HQDE)9)pFWbF~Bc_BFVLlSpm0>%$BP5 z$73e=m@pYJ&84_zd`M!7%4dABaX|*0?IN>6qX<}Ps`k2rUj#`dj+UNQFgRR)%i6Kg zqu(&sv(Y1bioG)V%p^~r{6CxdsNkJ<7cK?N9DB~gkbSbZH5!{yg3e@ewacxuO^g@a zXr+2WcC*84?7&r=Z|bf83WaZ;19@wYm?Jig;?&uavo17O9#q@y1RNG7uK4tEr$~yi zC>fj~mY9)f6P4FJ5OYQa^x!rbCvGP9ERnc*Z}nUHzJWc+@(pM_9kJqZ|0s}K@)9aU zaf63el4uznRFYJ{MH~VBOaAY~Q~19Vk54po+a>?`_g^Y+pmN#SFO-U1TP4-p7+mrF zR_R(8ybJ6;`^lYA#>bTIawm9f;E6CF$d*$*St<&mkM)tSHW*G+v)m{QOxba!y}{`9 zSZu?`R5Sn|U!CK;myl`jj0t`2IrjcU%Gs!=C-$9AWFl332OR*vp%)gZZqv7iB~s^S z-d@gm@^GlB@>@5~&14NU{K^xjutYiQ--(^O>3=max{?q$JM+PpmrJU3EpXVNNx(BP z)CBAK1M*6DEF+%wCig32QDl|mxFjM*I|@(gsODS5)ye-D!y4u)vmxH8EK7CS6-AJb zzwL(KL$K>7@~`K(f^miVBEg*O_zP{paA-|gR;K7_V?(;imFZQ*)-yg&(MGA1|u6W{L_TtyO{Xo z?OV@oMN~zS&eI!`@^rzsQp2^MreCS7Lfni$skt;u z-PH9nw6dzIM{IN`Fq#j-TAWXLT#Up&5{Nz5~FOSy5RIU!ITK81{37Ze z5<-@w`WTzh;E`Z0n^m9g06#o-t{zD}w>s}8zI4$3P44Ct1&9l)c8cIB(f1${eOnJC z7Tb`N={ci@!W6R^Gtyo8cmI28TEyahV*z3luANrwwYG7P+#p(l`=&%M{C$Cj;Wyod zi(ju`^A-{TO>xwm#?RPNxm?oIp!$;o@U5T!4FU0qf}%w5 zy%!H*kL>=nL7X#ANqB*cc9hNXng;M8L$?h_^gIf`F*nXHgTQNgd;WTsn>OfmUS3Z? z>S$!4Q+UnOdp?ak+*DIg)Cr00vrzr~9awSobR}OPud$7nwk&R>tr{?~QC>2R6DCa$C z88iAXpvY@nSLjfzt;-i}1aX(yn!eBF{(QOH3rpfRUWEBinhMvyj%S1fwY%QmD-Dii zvsU;F1>E9|AZxD?_2azn)oCXKD}-1c&xW~Fwn7jwr65?nXi#A!Wsa_38@(#5lRtHP zJ#2T{R=B#~e*3V(R}Fquqmdm{=DHDxNhw=XJVtrlFwi#(6%1kKgix+7?zo0nAU^x0~i1^{dDQ$y<*S#X|(LtsCQm-x%SS)6^2rJsP75Dn0 zpiAl%YCY>G+PW58L9{~8x$0ip$H1r*+~56e8G~zFPhRl^2~_ofzWq*#S;K7fk<6Yg zj;Yc3V}!~mQj%Msd8b+JXpSweH1b~j20$&hLk;xv<<5P+_a2At0*L8XH$iN=v3Pz& zYv)GMT%+szid9Xyg)ZU!#bdn}%ja0rneGlv-=wy&BxO3}R{|jB_R^V)524||f4?X$ zCX&4XT+Fm`8%8tpT2FpJG4dS29mh=}8F}aV;!8tHj% za2MWyi^}-wQ@FfDN~`=@%DDJnx~Vh?+}t_-JEY^baPE5k1;;WXFgsA1v>oC?`B7K9 z;R6|G2!rjbtS@LoSPwDoQ4gcnJzECY&25sxbzlFK05Tx0nTuw&hBaBs0MUhB;i%TZ z?Ho6^V-MH^ZVK*fWmL=CEnbsM0M`~1GC$*M#+DDGfN6(k_Zf?luH3gxNJ_tEAK$xiYUX0| zjv7&+FXh}5V>b>ieyepxppU&gwBzYDXQrPO#17Occqd%|QAhl&3nStvsOk^`x1RcI z@zb{5Iz#O_`2L=YMdy-l|6o7xNU?L~OsC=|lK1|T>S~-V&jcz>9~MgD@*2|aqD%(+ znjrgP*dt&JwM`7UdEy0=-l3Z-)NWPSLY)3m)IW?CX(tiwU5c{P&B z;({1oV+(pe>Z`(pU+0@pTAzm|IZvLEBb}judGf3`xisj$<$!|-8%~hYfJ||0c30K- z5g}gT#=i3ryhH3{G6=8VTE0CdUGhCAvT5SP0Kl5@?fcd@Ki)nlJ=qK~M-$>lJQ-}Q zUIC>2OO*l)b<}!Q*lzWLF4R>V&kI!{BdsPE?Q#FIB~I~S*y#ckaELfPDj3dxZ8dVB z*v&I7hBZn4#qn3dZp;TGKaFL;QBrAzi?}WS5L0h4b?~RqT5$dOfC!a8C$K)*raIp~ z?{Oef{lJ*A9j(6q%`!d_nGC#~1Ex{Gtc?lYAOiCq`4>O}=+6-rvhgb7d72e|iE24l zZ_GnCERB@b!pN&3m<_fFj27$N7!vP0>aF~H;2A=tn*=~wt~|2FTBu(wj@`Nd9Id#X z<#l`&=ZUUYAAr7>NsMj*4Rxz8J~Ku)Y;1$}jdN3CvOL8P-Lh#7HlPl|EZgP!l3s1it<=?B%kA*LylvGJv0iqZxQ?w+gkte%RmvoceBSd z_ot6yt;6nvH*fL?b)Y2PFVLp68~zWlI91+uaM&9AOyU2kRgWxpFMp4lQjcISmI6hn z9B5qR`cdL!V-VAOY=<8-+`ty1{7hUc7|1~4s@hk}om`#R!GLp+x^4cizoGe$zky)& z)JYuWYVuR*LWEtROH;61hm-RI=@VL3U7rz$xaFg)CQ+ z`FU;W!1;j85_E77v=Jm#z$PY-&t&@ZjFx87*~btU>Pfmm{&5en^bqOpqn%TS4v z&_C4%irI@eYXF*1W6DFckR>O(FE0n`%PreCDzU)hCWN+`;j4J;v(K8R0J$HNgQjO1 zf{OeGvsWIcVruMkGwS;0)0J2;q9gq1LH?Fax}Q7tIXK5ViNNJr<#}3996eXRYK(q3 z^9Kae>6L3a2Kc$^bMOLP3X-+)C6>fhtbGKNfuQ{8R+FKrX6H^~Y=I<>S8b?tlo2c7 zL3vX>0b?#f2{u5eD5krl@?NMXxA{ezflIcK3sln~)+c-cps0np>>Xd->hQv}-qBjr z?VkJa=9f-?eBl0>4ZIz@!*Vkce?M3nXZcK)uEmvp3DxXY`&BtR(0rHv?UUd1heHW; zmuFwiGfj}3E0zk>uAG`(jNDVoxg4NflQjd0XzId_WhFkG#wUfAO82eveySd zO8?DNT@wkNa3N}Ne2zP^Bs9Hitn|k6bH_g$TjvtsN2uhV~D| z5cM#X`!9&v1mU@kdTc;m`br8ge-S+ z{BXH7SF>^1Z(;R2cS2Rl@Idme*t#(Nfjk5o2lz3TNtdKI{1=*6y&=!mdia%rMmt;} zUgQRKHY{mKd=E>L&iM-$-e;%_Hr3u54GV-CiPRr(ocvTE?Gy(6fN+nx31l0A^ESl}t4oGXqb>4@Rp02_@qH z2qk+xfC0Pjm*oS1JHM~!KwNy7)Mr<5$5l`n-cQaP4$RI;BJ+S9KMEtiZ_CK*M;BxV zD#<@=QGul~R%WC9fYI;qS&;F;_fjo*nmEl9F-FTppEtEYT+2g;kIAjelRrGg^D*Yx zrgu=BMVsG7X2;Q1t_uGYkzneFW4<;=q!y+7BE(|xH8BbuYO-xTdTRzElyVo8ZP)pT zrH!|r9n#DX;TCZE=5+J3(M8Bma(?Sf96nH`JgqDVy*4~Ipa9`{er-O1*hM58wJwT=uKDDt6(CiFmoTq<6ko)bWy zd(!`vaI%R&!uePN4Z+YposH3sm*Q^ zJZ2_JAr)`v7_jWal zOL@FKMLN0iLB$rB!T92$$hL$BKtk9y%lj4W&DN8&UjZIj z^9I(i2VxJF%pF*C`Lip)d^92YOG{bzK%FQnIx~!-v86-c`emtItH4h?TpWRIjrlJ4 z7Xbbc?+Q&8BAJO8fhO2tDx4iS|a6m(C!c1Kem%%yQ#Mnw*t9?Itm(s}Dds z0B5HI)-l<>0%!dvt;jrnbfshj;lJUDDm+#kTauR_1Z@?qNCYTbTo%LcWBefHr)CC& z$R{|I8?&fBIM2ZahI@&AYlbmFuQ*=lWe(r>^|;uJ3ro?U_t;|<|23g?J6yt(;=4&Z zR7?M2_?#r=8`Cfa2-jPgXtJ&QsVYa5qe3MA_zkaN;SPrHuSH?e$h^PFJ}h8kS>9L# z^u{h91p9U7@`Grv*pIi7obQNtXLSm1%vl?GeeH2a;cq$~Fb0xdm}RUKO%i~gmpxxm zgI&CEwT7X(gr0S35Ao1M5H@O-aG;(06x@NuEq4?bxEPkQN#dEhdgrlqMrA5e20h z!SGfzy!MEyM_L;dmgtnIq6 zrCy!BEi3o;+!J*cU5YY0+udT~WRMalw`sL~ECbI&L z9*J(I$lzoR=if}g)l2l?-H5GC1~sp(VC*^d{8_RMb+LPE?6qDzWg3L(lzb0^>JWh) zh5p~Lfwh==`2s(OY^M9R6LCVVjt7Q2gM1gtBxr*SMj9>7F{@&)g4`FVhrGQP0z7WT z2dP$vjYaH5Hgso<(*{cr!1zuEBE14bwvs8AZp|*hmr}W-O8kwn9L;@%P%KBhB9m?+V%O zy?6#!T>8il!t!h<^kblMZ_}|sAC~b+J?Ru&D21Usnn|J%dOM1@&7nAn#^1V07AU4Bnyz@e=OZy~u!AN6^~6xLm{ahy**|Z;K!L%v}bFu&y_2>B`FtfAmfJROw3s z{GUw1_|Htz3m0gEnlyP)Zg}Djz{TjCPJ(1%nXDQV#)+)*$L#8UM>_GsS6&23vBncQ z5#v}|=C&J%Itv!Wv~wocpovr5{=66of1n322vXiWPz+{_pj6CdQf?IP1F?1? z(a?E;1Cocl$c%OGl+q^K!{LUwF#Yr%_^Q%9f+VNE;YoFjP!+`YqYHM0(nOfKPZ?u` z%lsVk*V5FCaj+rOlB17LOl~@V2=LC4r-xAgJ+hvq434x|_2k2#MRS18^uCj+w^!0h z_zb0b5FCJ?-*v?MTl2`tstSt2IWn;uRa~K5ABwpT?6Bqv+0sUu8?@L)Gpx=Yuebhk zI!m-oEX}}<>LEY$ zzO(hzTB~Z~07XR<1VTg#JocAP`;Jf=|NtCeit-#dz7M zs)2x*7jD&|Rc(#3;C4QvH4??gJsex@1>B_y`*!gVF`n*n^II5*MFn$dqF2G=9d}c% z5(0uno00K@VYenDYk3w+l9U1BQWvp0VjDxFY9CCM&Ds~=3LVl_YC%<)Cb=jBDrN0d zf14O)jqcyDD=+e2)rWJNl9h4FBXysw8hrdp#Ez2jBZ(&|^8ie;uU9eSXxRU*pG&}o zj6tICT@Apx%1(2aI5N5Tw~0)8*BtL=zrFHkx=?-9NMT_Fvpla6q8j@@YDjD_p~4d2 zB=9-7I}$5%m>)>H*ZJt^zS-D2*zecHF!oHIhqEDHNo$0)l8x5?$}9cHN_`+yW>YKz zhEy~0@`uO#ma4c!`OIvm&}_>Z=!x0Zb;)OWm^PV4H%aV({FXzJW)t<%_lN4vQiILYK z6s-DBh5668kTnQ@ryZi9pRZqK!eirZ1AFBibmH}Oxml_W$p?q7V+CJ>-g&Wq&h>Mf01tdHE46TXNHl_n&GS>w58mk>%TzlYuozr>-Mi9X4DVt5JL_%nZ zZK+5lS%Gpa&*RoFd~7UtP&cNJfW{u*XG}a%jXS?pJg#qIT{cLS-qP(OW(a;YJj}!` z2yvfUMl$=egN13&snSm0wyHk@K8C-ElsQLy>3Ju~F~6|bTj~ERykVqL`~%&cl>kzT z2WWpD5o*w#m>?agcwM?@eiw8)+3MRl-dcHGf$Tu}=?iWVC}Lc?dGQ83pyDcSsqlBh zwSUk>$0D40l2HC(Z9?R(x3CpD`PPx0Vpv+Z);=8-H-A>j1@M({h*As_oQ%kdPXOu& z{FxQyR>jAU%3LX<*Ug1d(nGXoMVTz{Gm#x=kh=y97Y;HpITGGLQZ0 z>axJa%h>=&9SCmIMN3an8TT+Ui@x#W?0a?~>sae474R!ErApTl;OLX0Q}>mSNM}?2 zzTH4#EftXS!(NW=kDahqt^H>h1DSD5>*E$~Z6q0ywtrph7#WRR z{WQ!4tMaNp&W~M_d9=G7OnhG!TjpJ=bx3P?!tTxJ4%!?Dj18teU37L5D!1Y~$|pa3 zlC0geF=S7*C9*ftYm7=tW-3^&`&ZeP8m;^f&}iGWm1*w<`&JQ#yeT1{tDM}&o3@Ha zE6`830Ffy-(*46;LJX8ry!7`|7e3dBm#Y5^4gSs;^G7Hu@9Xg()f(~u;2pXX@tUW$ zUXZUzhv57;gfRhE;@cOU7gED+MTqU5esl!M&ehNU7uKw>i+C;fbbU`eM(=rP=eWyU zA1^M;&utkHwQ(-62(??j$aQ*gYww#yFZ(7<9ohy6w3O~eb2X=Ed$bt_Jt(eX`KCYP zBcI7TVpx1&+V)?E3kdT==-XrlaPP~~RIUUN~JqXXY9uHy7+me zo;$~;u6tV1fAy!c$7NBf_@OOKW_O2wpr<3Y?@l(gW?#_CP%3i)|ZQ;Lfc%*3rVMB*9=Y_0gddC*^UHwds)ml#mSnC?QWF z+#D((CK)JOA=lQYrY{TH{krU&tc^E1NuK8`afNBwmN#Eqtq1g7*K&>65TPL$>h$r; z%a~`FeYLS7HFMAX_U8^#o&O6BpRwBX~Rxtiq<8C-T zks29f+^va5+L02>nU*V&ukzlw3l^K#mT!M}vy<2Mb8ZUuDgqQE(BYTAXz|Q>lZOZ9 ziAumF{ki6^^G$Nl8yNcXwvefiCDebk{qXu1Z(#b==baDD5{%03%*>dHZ~qhgDF0Cu zWg!%hIBY8{)tS3DnF|}Evpze*JEIU7~ePzlI8+go2oe35R zTK)qK<`eT(Bnz}@ht7EXj=U*~?J!M1Q+d}Y-~C2GI}P0;!j1s!pq^k9rRGWYc$N8R zO1moulvq;GW}85ridYqnF12)t_(<&vo%@>ioTL9#(P{Y?c6Wy3{=W2iPNq-@bbdA3 zryyZ+bYAOYmOh(Zloe0q?&JCX^{VPuQ(lR>xNNKD+rI^V33kuI9DpnOsq5Ie$~7pVbru~s+Mth2_?7gmA&ocUDzkL-T1>vV zeV>+68b958Q=cZITHM%b99BJjq57en)jm7_hAEpV(ILg$N^@0($`CW2Y%+L!*4U* z@P;0{7vKJfYiWQ~Gq0QrwO#^Ic@rP_Tczu7+MunEr*=LvzeXCklC?HM=z6ZwN4~IB zU+2#bx5iSsC`{eZTzx{4?z+M$`f2$kI6YcCKsf}e+<-An49;{xMkyR3w7yav$Z=R_ zOust3k+4KL!5&Z0>i!r-$$Y-b%sBCiYrW)zcT2iEXf4AW}T)$0ozl_tz%g=0v_UcIU zTL(4PF%WvMOmd=C%>i4!{n_~lsoL|=7IL9<7lAYrzhnt3ckn=Ms_6DyLlIg1Q3#uU z)R z7$x-hv@{-ft2R09=_r>{XMd~(3g7FVsTFWVY-Q1kai{%PIO$|C80t`Lpq?NYL3!$F zC@NMx=6K`m-pq1vzAxw1*zrJoIQ4ojY7JD;!*SiA7$=16**mgJ-aWa2K43rzjDNf@ zJ^na};j8=$A)jg*1D){#J0h+kG5yhWrj)CejL4HMaP6W6&UdrsEE9!068$y*0`;$^ zB-{`-`-GavdVr1)jAqpET48=@oy7sl`TnqKP8e5-szB_4NE@;(yX{I&RD;El@Hc5g z=^QTcFjy&G4<_1WoSVHR1&0JnvN<^ZWTzlu$)fmcPVC@k-BH+K0A%gl}X24DHP;bg=nOY7K>cpIvx&1b3Fxh6N(VOxR(aQaMT>p^;Po({Q<727n zB7`bh(An)2cCh3sVn<|raG}fL6nF^zb*V2ib(&HCho9sk3o?yknaNC&M08bYH}DVK0*N9(`-7r zB5)Gg;qH?^IOBa&Bs!j!5xVp9cjKYZM zbW+|Jf6aQm5jWd<$s(O-AgK2)HA&ZQ-R!@%WI@OfZn@f|FZK-DZh4fC0Bt>ttdLu( z-juU!otwkc>z^x3hUSw|m-f^i{VtT&jwIPXX(t+Bo-Ci!hxk!>nPICAz3}u0b#raK zXbdcZJB|tm!?X_>iaR}k>TSq<5$j2M)5?cS9J&Lxa1%msb6?b&XFlzPuzwWxGoXlD zXnGi^mLA4K{Fbp2iKU z?d}p$V8T}It7BpzU`bCPk#oE7)lE$Yy-jzhB2JXa$fiWuBXvIY@Bs&itY zcyE1guB@+WnPfdIZb5)nhyxtbPFccFC3A-=7Ordh`M$ znm@P;X#6eZ8f`3mh&-HFFmiTBwTk;G8p@q*S9d{seFterZ^`LEG6pS4aaZ`LPexlA z_Dt3!a#(zZV#M%C$Y4b=$q;3={S`L)(5k!i%FAv+KuP@b&eF~!>xN5bcch)nD%%N` zoP^jhP;P-&>UmQE0{kqIcZbaW;J18*irW3_4`tJJ+$%t&M@Kj3YtL^i;`E7QuL!{g zwsYpq+Rjazg(xp#0?KuHqI_k3*{KxIEeB4P%;u;Ye}(53C4`78#qqxWdb}Nbj3apc zmHI%bI{R#lL`rr47JBRn#PULD5y{N1Sf5@d@RKO6y(FTLr-4z(VFSQTk3K8B4_Zk= zoStP3y6=x^^r9W~%q8rE&HuLz!f)1vgAwO;!k`LO1{l|0$?rT4+4iX&UE+b8x3zwz z4p|oIwz5-;e%0?hEMxU68A5HQI)C7?wN(*+dzv~AxEhyhjV&m?iUzHt#@Kn>>xn;f zFZ;5N(#kH=lvu~8ftCB>R|DCZd^!<6Sgr=y%opX6zDR~&tX(P1|2f5Q zS)?gnPRnFl02Zb#j&Ab$@!VTtr^GV0{=%~E6W~xmrLJn&XT=NyBrhJ`g`>`i7lK+kB4SHJV$M`s8Ok~`jk{C?d5!> za?1OJweGP#RZ?@*;38DuMj9Gydw`4v95jY%=%7fQ8#w^_rEc;V{m3JvItyvyB>AKo zO%jLUY9J@_1XOD3p#ROpd@wn@-*m3=cb#lmrP&l)ZL@uGJU?5UdBoIP$_0z#S_MhB zm%x3-U5#tQxt=K$zUT&2f8U)7@h#sev3>YFDy_8Wj+N+x}fVPVTT#UPtQ(!?qM>Ft_d6`+?W>uuP3wXclyh`)x zQ79~)BiCSb#_}op422y<{lMi?pnlo61|P@I_8R*WS?x~if6bj-Duh_unoJ{cX}u`l ze52dGmrzacC4a**b`Ai`C{ALD3cqdoGI+_ZCtXmHcmFFgEgj2@gv#k+;8!#Qb^*Iv z2sLZLi_q1{FsO0lBIP5~zG|pgzvSqkQ-LTQw@H&yz7__Gjv8BkxZ=D;Qa8&`vIWGq zMzLv5l!x z-gIc^{C8W;s^)Y4t$vh{pB__Qoocs@Y<(m#O=}7LAUmFPp&Qv0BEuv*+wBs0`+zdc zZ@DH!R)_6)$8qI$%&y{sSP*!&MSBtU!MyKfFy%!%w!mMpf4$2Xi|G|{>Z-p4?C4zt zz7`4c@SV6G7w3qw-s8;OT_s5Rqu;Ob@nxD))`=g*T@|ZK6in+i18}C})7eiQtT_eB zn=}Y|m$*q+{OS@B8M!mXG1CQ-(=R?kGFv3&_h*tG@8e0nEpBdY7`fsd$3**TJ&imnr6{{fz zSh$mYFX#=CbPSTSp9CX9a&)epN31fLL5WjieA0-xh)}okZ`=p?{G@&XL1{}HVo)3) z)eLblW|loRa)=+`x$o7o;abg>XQQ%zdh8%7MONsaN04of1y;G)~(Nv_Rc_Gl=ZIy zE@CsC`1S|0;DsXuPj6K--8mG=FLx_I=@F*ojwS z;|e;_i&IU}t7C=3BW8bt%f7)K_4v*(sE0Km!M(4lH?x|B6G3>jwcNF;H{ptqmlvzv zPsY9P0$n58yLO(*XZy-k`w-G5S*dps482v@5Na7M{N>N%SsFffoH!fu->{3F)qP$F)+M#~c=<;p)zyVp>I{()?q4*(j5>s`dh zkL<5F?}(LBg?rrC(?#mL=K_}YHyQ_|v`j}^c6+MT){%~RYP5=(!`@m@M30`#BF15= zL;Yb;jPkl~zs}?~@)lP+k(#~g*T`A{1U^b7R(V~?xm)cgdCU2Hhedjk) zsChFcFBIyn8!fdOizUBy-c~q4C~awv(@PKJQGcw2#zC%l#;oe_j$V3)8bSs7!W~TU zH()AlP;d0Lm!f-Tu_C_VJX<)h(_H6C?p0y2s4~!I6LFJUuDe15s`7-Rk#@>XjC<1c zbpwN(7&z#9FFE@)t_i{7;YabGRBEj+2#1objyT^YDIyqc8nWe%anLdV5%is~M9-J- zL}@EbX6m(;C+)-W>Gz@u$C!fKOQ%tfhI-yd0{P#ogb^23BdrtTi>-KXq zC4xzaUkCs*v0ru5&xdA0mOPfQ>!-Gt;KY~jq>X2)u>bV5OK1K~R|z-DujZKb;&whq z(AmD2m_TB)`3w@GDjuQ<*V7y?MJ2?uUZZ~g7{bmuz3_>3L9KJr=<9*+ItZw4yFjLp z!6cB>!TV5f4)YucKo7BPp62L^Xs-poX~5&C8c&_*=~_x~N~3SIFS$F3)2%8Y;Z=hNKx9tJDXgzpaDYP>tDk=&G+ zzjJP$FQB-;g9TC`A|Pd<1~cMRlr4kj<4*2M^W4@-bep7$%_}~PUlS~@tdZx@i?b50 zbGYE@z`DC;?_#3Owvy77yMFZtxaXuyo1fzHOc=E8!>vx-QA9NK@*qfBpN>RS1HZkGJk;O~;l43UE>-649Y-D@T!X(Vv6ep+4D zy095c@ox?ShG!PrNiN$$Xz(}$%ovN9NRD{l{+gA;qP6U`Pm<s^^^%0is3+IZ#K=%#A&bi zHyFJzWbfT1zVik;p-SWO{oKl(f<#SRP7hsGbTeY;P`@4fi~(MSdp)*@8vm=!%G_EG|I-v8L(%q$; z<+7Vo)-UMlJp!6$_$He|!jQ7(>S)Xgkwh%lp2*I65oAgm&uVV>o9KrR-SPUv%Q>&8 z(>>m5L-C^Vvo#mwz9t5Z`J8J{&&G!!$nbKr{qUAm;c1dTU}sgz-2Lqf?H@Ler;kiR zM{(vIn{i(4t2Zg*Q-de`c2|7$vb}7&j(&cqKwdl%N-FtWHnV_?XH1@QFg!}Ny6zuK z=?-VOLN2Y#51A|K$u_c|E6z_{_=-pL>bavGU8Ifw=}0rd2=rohqh*h;w-kP`<`I05 z4u?GQ%MY*ROlIln(Ldtrs;3Ak&wkL>p)*KCRd(*Ps%ML=%Ps4RW|*jq%I9+Sc_K5L z_(QlOUTwdT&n7)WmcDPh$k70zZP311BT8A{npzEJHuN#=7IoN1#L7*)-<(V>F2;nK zT+M7{rI=xd4;4R_-ILpGb_N^|<++$Aa&!TB9)6TY{u7a_y){*<>{V%D;(mpAXzG4;cZ&JV5s;YbkvD zUL%8j7lUR;bL({OQSav6B@coQv4rU~Hl&;-jSZ!A8o`30&c+dhCNp(!Hnq2^J2+g{ z&1uj%696J15d8F=pLu|w+gjcGM`D}OowW0ZW!tvAvpDh9IuvO$X)%m4Q_?(qH8bk5 zt`X2i!r6_q|g-ZJ)M}b?1)!6X|&Uwe?l%6jU3`LS+6!V4Bo@ zngN+ThlPjnV*E*b4z|p+t&L;sAbnjX)a^^Se<#V?zTd&B#4;OZdSIlh7lHw%(B`}s zRi1a`X1j&gn zyk^nn!)Etsj_B2FhpINQ9^2Y0Ks@??DW5oVt^&CpZS~vQtXroVM~P_^t9ucTx0V`b zPb&w+G6|rB;(mE>*oAfuTRe=jFCPCXt#a%kiytj^(@1!0lL-&COZ&bva^tT&#Muxs zp8g)Mm;jaVW8MmtpOe`f;%;20zas^p*~8SOYaqVLJDbaawrWifFHd@DHQMROqw{Wn z(H+&2+)`Pusybufd&zB31PR@>z)V={QD06)nT;~HO_N(3(d|CXJwghiCY;;vec7&v zE&D>Ru8a7aX=j_4y{g>iP+1ELSWoEC)E2sC;dn3ayzsBh<=cbFhc#Ed`%pHVw&S&u z+19M)jNWXkUPbME+$P1n5a`Up?}-8U9;$NV+HFB<+tPT!(?36=MQF&Pw6Uh4$b7~; zRVaL`WM2{4lAHRUp}`4O*b_A}^%Q6fiRDMDZR-d`4h0mlOKhD{EnbB8#&5*tS5<^S z^z}H(RTQ*$Y=jug#bm})XJoaxiDyp!{IypmC06H3Ye-UhF+=q7li@xlf)%`3h`2wC z|DoO_gO`J1GyI{eG^qW&B@}xZjx~q9@VPz`GhSegA7b){{NrCTMD_o0Ve$ov$=;}| zPJE;_#yJvG0qXRH1VaHE8}n&371t=L!nC1N#O9q#t)qVsnp_WF(%Kw0$5(@Q`SvLM z=0puq`bnEfg;wrNyp|!<@?gKPYfYTj=(m1aSR$ZAhtef0= zmBOw=a>7jowz9(`K-fvvpl zQliU$rM!(-6mxY`bQ=7Pv;&igdxH`!YGE@&tW*W;eO?7Fa98Lj2ZQ&wSnla2RrY=A zd?r04WVQjfrg!RJX>Ts&Z(LcgPXW6ud@NLBog(`h|9U5x-xV5bjYP8*2D zB4~-bv_jN@3y}IWUX4p5!b2i8_|Am0?0Yi`WDq;m27~F;IIb;|^?6QV%}!VdB5{?p z@k~$_Pp19)7e+`*4IGsviD3%hvhjq|>5FfNQWh?lFkp6uQ}3RXCn zMQD44mVTlpa2{SXG31IWj6jP)ozNK{d2u-gEoB>1RgsxuDryo$k$5aD$s3XxwV3w3 zz7R1D1myM3jiL@{8zX??ytEZ5rqbv8n!-1X6XW3a%j9^w?$%iG%vF%h@XISH%-cpM zZH^{7p=ii3_L^0BO7fYQ`eXl@|50kaYA@Q)XiuSy$iIx~Z{JkDr%d_l8KKXjTFol{ z9^~@EwH-%l@$bH$4CGcZD5GE1D7?z$H0xp(_x_Av(v-)!c9d5B?pcp#?Z5>nsv++s zurxv3{<4%T2u4n0w9@>T1ZWMC$jRxi1TnEkVGKG&W#r|tlbn9as2?`Io0O?h%<87 zz^YU&ZqX%PKl~J2YHN01=K>vy`#v1?jWe~;n=VKHZ1^%T5768@EN}RDRgrC;@RoBM ztJJ9**F^N#Ex@q(9l;2DbX~iY^2w+;1UHB`#Y|ONGJY_Cw?*xWR$@>DO41gu=WJ zTmt~|yNMac<*?merjQW-ov^BeJ(HTn%y4Lw%pr3j={))FOilOom zG;k_uj~mP2^@IJV-HN~J9=G{qLL=rz*6Am0DdyqYmD|g>G^pXFl)){NvQ}rywyO1x z=*fVE9a#*cbNW>0VXEIFK5zb1LoWLUzFFG$stOXKCa#YvGD-hpFvxC^{|Cqbyb2^) z6aEu7L6-6cph|aXS^8)O;%?cRw7QKC^gZgHl&85HTb$`elNLW(CB31&vw94TKa;vj zT}JC%d|Wj^rM-GLIX$SjeD??3_BdL4Y({)iXTdd*cqc#IilF0*d4oN)XXFdMCBIc> zWe+I9ES?WAcW`P)YC3q^@jN28zYiV_l^dooSSt^meGW#Eg&~7UF$9JyvIzUQXnqW$ z-0MhZI3789ooH`rRDrH%@Mz?W(H4G+ZbZ{-b~2L5@V4xqRQf$USjXRA4HJ~#K2r_U z#(P`-_MI$uZLBVeZAuAy5(5G!Ur5SgtU>G|X)lZ1413&ZfB??cw-MCu*5m8f$`QMS zxopwRwM{`eok9CT_nqTPjY(>1M=A(y;3!%B_mpmvFBO`1LeMR>X|G^9apNMUvkTEi zEvC@81mkN|MMdTF;Jp`9>~(xcS+d~^FW+qcpx6+e7ZjJabX*FAuN23>l+~DWXT}eq z$$wjrrIRx0e^@>WbymziG+Xh(b)aO%HJmaNe!(%(Q4*jLEA*uhOQdhL`7e0`%Al}Q zbP=bs-&E&)VA>c_n_7C<)Q4i8R3df*62$fnVlVPPZ&~>qth@wgf_+<$e|>)mg|71i z?u5rla>bvoX3ZdJ0P{v$C;uR}9zKalF7ar36_r*FH`dl~O^k+mb*^tmS~p8vWe~~h zBg>m^d#3*zJrqt^nq|`FlzT8S?4dQ}`eS2w`^yWPsmB^!0Bw z3yk~ICG5-BRNU;wHg?eMaX*3Gj0s4t-sfbf{&+dKO{-OM2 zC%}5Q>)=YU^Kx{dsxYD@ms5qg0P@$uEhhNXhq24-7u}AS8P5*U)}W@Xv*-~h9Auo0 z*x=j_#Y+G5;`hKBzdj}KRN2p)%7;;7vb%HPzJoAIiWeaysLF>~O=GaSj%ypdxB*f* zIr@L-`pU4VyLE3xQc{pE>5vwN?(Xge0f`y9OS(ZCX^=)5W`=H1xmKN!H=Q;bj z-gEZ3&i=;dS%22L?_YjBxKaA3jM~p>yb%1f6|k1~__=zXLzUBHV7o zcT5k0+x;zhZR>BD2JBG<2%-4-+d9gyBY~;mWSQSe_+Z=LsFwQS z_vGH0N-w4c;5w_y>V_k2yRl-b{a;{j*+0*n7bQZ|l~9n?a$-CU>IzDs_d+7zcOY%? zojDXnrRaK$7nf?RN&G&jKa>P%?^mNr)d}545At-wASSlIx?{uWWHq?Wv#O@ zzvxz^VnCa6qTR|XQwF1`b{LW?PfGGPJr{Bfh51-P-LaHInvia#I$T`t{2!!AcAX)< zc3awjAj?VT7T=m2=AF$I zu}x9%*^*K&Hf+hiDP@zf8r{FcnG?J3H^vu%-pw5wDK2&W!u4J5eUX`q83F{k&CZG_$W-k5Y4FItd+LFv!~}Qy^W08!dTmGeP2X6+i06>XQy7VWFQa| zVtHhn;KP*scOrj3YM}+>d`8i?5>v9*Wq9Tzwb>B8u|RFE{M!qZp?bEax(URxfdRa-2In_XD3F zWr*RN*{Nl?kNi7JLjWP>^wKqb1yNEgGAL+JR9FspljHp3d!%B< z1m8_|eX^yT7f$8tx%D~=ROj8cy6*EcmTx4uLsnxLBRpYIEC@FeV%mA{@+uQAq)nqA zJ$ThnH+T?u-3mTxYj(#69-vuM#TT97b&!#9)p`w0jO-W#0s6)dzMZbd)g=--O2t}{ zk-BM2^z2s)&U`*69=CS}9oGC*@iMn$yKy^O-@Oqc&rYScA4+|O_kqi;IV~a16`$8d=IC%hh|O;S#h<4B{Jt@XO!jU znNMpxR~P#2{G`=GZqxeWto_{XX~XTkMQ)E>u1r_I54^D}DC&A*0}^ylqI9B^b{5O7 zaC@NT`V@cCv`QvOJ=RvROCq!=VeoIfZFOpyT6 zl)b@R{Eu}`5Wb@Kea;LsM*`~pTCEXR{Nct1-j&exGKbvRMElBdhM7dN$bir_HLhU$ zuDpf}@`$jM_U{-rJ@MEAZ7j9f<6<@D=HiPj8gFjAZ2>nYpSG_X0>TYH!(W5S^YS$_ z{rh6(!(3UgRbK#um+9Z973<1MdwzXQ+6P-j&Pne=LP*eHj8L{cAgQ?ZAeeYG&U z5_(j$yBGcO;Ww75Kz*m%^`g+_*9aJNC6Y}Z@HE0qFaV3Rd@B+zh4%F-sJUg5%J1`K zB5yNX)ML#qKSkGI?yZ|c2vc#Pz0H~H7GEuMuY})rl>1`0SEK#mgy$26uz1J~O<8Bs zuZ6e=(;t)#D2Y69%>`ja7hyTI6}p8=P1R!zaL{8RD(D`EU@)inle_;lX5FdpO?kPR zLp^{n&e!z)flsbMdG=6FlS_LuLZa{wd!@tS@Glvm7kfHlxk12r5ktVQPFGc~uOZqoI5C>%*Hd^SnVT^L`ev|KL;j`)z9 z9k?2C*(p@gArzm>i_+Y|y?Z`TP=9`adYZpT6|m?k$^0ekZIOUCDkxPj!nM6V03PHw zU1e4SkyYeWj7(2eE`@uPh09+i+*5tzU+ZZ;F%7;wE=dfdRn z&A?Jd30Y3#6S5c16X@-cG!UV|_n%6?7k=`4$o$ZXC7AGaB5x=wfvHeSM@Nz`)-U6u zjhxmwf5)-3_{#Qb7tI$7!3SenC;9LL5~%>{KSNnPZ@~!&@4_=-FHqs7U3MWEi~v7@ znkI5kgfrCX)l)@(@C#t)4xuoW&vy-Jd^Osw~_!d zqEtp_JnxYM#V!7uB>JpeJaZsgP6UjrUeT5d!M~qEa0DKH4&eo5(G0;BNyr>tWX`V5ll*pYIBjrk=XMo%g+}=lruIxz-kr09QMoM8NF=q}#^NiRUXZ-Obe##`j9=s8b& zUgYTBrh(V_drHgTvAZJJ0PiD7b#X}y0svc=F1Ru5&*<)=_vj%cX+}AxbtAbQPG*W+ zz9a8HK#Y;;qi>^y9dPJHz^x=DS(u6MPG2C>CywIjYS(te&#sc5{PZ-{VwXNwqd>QE zXG{W&u8d>l8eR+mbb{v6O8&v_@moZ+lPcT zcd)_6f8#q57Pd-+1gabnFu@BV|5X18?Njp}hNO5cvwxXbpC4#wVBls}#RBPeR`(o| zzL9M5FhzWA^ zF={yO-uSg@_xOY)bvqVx(n2mc@FDg{kuQb$m)O>uawni|!2wj-3cg4;Gr55_VkObI4o?U8tP!>Ho7$EQ^uf@ zGK1zMiF6(=`%lM#f`Dt{c_};J*(Nb|8qKu+c9e`MT7~eI(JF?%vu(hR`sgw6_*gXg z@N|3ZZ!k%faoCdj9`=JjQHtOuih#~O6zr*>6}6vD{}b?qN4(Kj2SpwmECDAZaoY6i zKC60MyS;?7C{D5}$*~!7IQ8iY zv7$Bm){uiZ!q+L5UU!0!L4*bYdPUYEil2jCn6@^th=9VV`4XYL1Zt^Emy$!&G>a7p zXP3)c04f?diDE`R@77+Jv|K2PKbOrO{@89O!dLUKW9l5#p(7+|MfoGY%HsP5LOWl@ zDd&9abn?Hon=LxAus!WqSPP;SWVf*6Gps5fQ5oj&5;t;NJjgOH*gwoa`ce%a@ZAb1 zW@;*g_Eq+dZEXP&?=Y1{QmtNZZfj5K@hP*wfY?<;=?J$bjhU(%Ef}`Vl zxw*XCmVEv052ef#2jfy4(xVD4nBeDmF9qRvqsmRIkWrh z!bz3mo|)4|d_rIQ)Y~E-6eWpn_-(A@Ao=7ok_7kp&eJ1`L}`C~h=xm#Z=AQSgQsoe zprHs`87<@cai1Z|e1G!RZ~yS;MJrKM4-kT?SUbs99gVc&@3smzfr>As%Q9^+IORi; zA<%OsLS#uI8_ikqi_WdZ72XJdv-95sfmhBlHL20$vK#aRn#lg|#L2Ntd5YrutFwa) zoF1Wy6pP z!Y2Qqs;15l^c+j%p34Kf(3R5!eBT$X@Z}}BHl3tL6zGAQdC7BMjGn4ChHwJ}MqFGY z;~Jjm595R%G1@t8_vn7WCS6_up7C0ZgnRBbGB^+loS@GEv69;{79y56bkPMb%$e*M zqA%VQyknuXp>KiprCX<`}CIX-<8ILF8GIn<4V<+{g~k6NgCHKx5V@%hJNJTChc@H*tH*;_guH z{R@Jj0nuowK<2pwq%ggMhH1Zbz|uC0#sjKLdlGY{>O!_j_pxgHJr^S=(|_-xcJ;Yb z{=@J)g5261(jVAxA{DqEksbx@(i8f!;V|F-iiP?A2c7|~(aU9nuEhH7dYu0xxI5xD z4c^}vHsOQ^Rf-S#7614EUlL1%->CMzp^#_q$EMV_eOc%LzcT+0t>IV)%fgppl}I3- zDAi5@uTHAJ*IopRZT`Xi@I{<8w(r{yN_SKLx4sxYNX0k}T{MTU-K*4~$L8S$6=dRp zac8qbJy&Wf;yYuYtrjErdVJrX>w z7sJFYfL*(FubIAMngLnz$Eci+>o$Nq0DK%cSx(hI^;hTi?K`G?V%C^Sz+t+y=ZEAD zO%F$@UK6Ay5hlf9&%E}avlI#VLQ3<6d~Tlh4`mnFUy%NW*Z4Qddeum>k9$49HTo0u z@>Ax~1U@vhye`T-h{~f`hWmhP9ceN~De1%cg?qVWT&d0*eo~RWj`;fjFe*8cs(>~C z4|pA^KX*$B3>o5rpPoh1gIBJu0Ue(vg`T@T=uK%@N7J2q2n6qjoS_}}*`b{iURX+jPJ74>35}FnB?rHgV0NhbIh1V@@N{M5 zGbI;cY`#$m3=en3;uu!d?BA+y;I;JAu}0pe33zbHKBY)Ua?s#1w0ZEdg#wom#ECqa z1I2b1*+EZ4L|?s8_;oAbKnX1gJe%nOQv2OZ`>Kvs8W|4FRaaJpME>5SV)`cUua~@S zw+WW%SSr?3RvWT~-Kjma^Ms1MSByau_um0akFSEj>_5-slcU~#MukUW4sOf2rvsev3$M&^{EQOUP+A>%M<}`lI72H;XKamil6%knJ1LCYftlXZHIA z>bgr-k(^qL_Y>^fIIx}bP#_*GqmU@}{tJ2S2YMCb?K)hL@hZ@K%r6pM zmO@Q0bnLg48%;F+I%x{0XS(}py*>J1E*==+oKy`Ab{7Z%dkF`)>I+1Oys^8toFi%i zL(5t^DqM{PUqN&~L}b&x;$dcGzD@U;dOB(j*l95(m%iuHgB95kR4`0d;xqaYknx{am2MFK8XEtZ<97SAJ6789IyCWA^cWx!d}3}jTMUpgqsIFZoz9&tJq6nEDsgGy ztzW$69ASJx$H|@#Gz%ZlYY~~r=%nK3BX~7w+I2g7s&8@vvQ$*ybJAEcP2@ksLqrI$ zH|`abdYH-gWox{}p4$Qo?ygi2E!RVXL9%!~E2?f~n*XF@MT}VeJDA{A^EyQLgfi?@ zSFfalO`3yQQQH=UCCPgz8;n>K$I>DEXlUe*(rna{z(*gZUrYAuUJjO-j#}h{z%a78 zSj84KKCEsg=xa;~~zWN@0Z-0WYz?b4X*04IX(r3o~6Sm!W zC)4%tJMYvDyb9)+i|C!D%9aUH+!l|I!n1E95v&dYGiW#NemzF4QYrUe%lU60?V$~f z!JPNS#@@KWBHs=THHqI{koE!}TDK#>vsKoAstE=qds;t>dok9n6P~R-yMuqZ!-jxw z7i0osOJ%Ge9j>19{vz zM%2FzP&_StNXbuT&}83Ho~3UJBn0JP%Ku#z~H2#}`$t?+9KO2t*8VK++z_`n3fm3B7RB4?~5mHjj5gCmwHuluU3Rc4z zE#qE`&aKbxUH+$Il!`PzBviT2ClBYT_I3<-R&@(gZN*uF7aYHzqIh+^Ee69Er1x|%mDs+^48W}-x zY##$`|HI%P_j}sZ& z$LxzZJtgmo9T{coNUt5dpbuf{meqsBkwRojiLA$Ty@}E1&M zR3cZU_EKuUCqzKmB9pB~@ocP{S3H$hRpZ?VZ-qi-t^zy*%I?~p%j)XSkPdzPz6rkQ z7?V|AY{CAXz%+-q5-wG!K%~P&4%a9Gacz@R_6qtpL11z7oZVCpNI{-;l*lzi6?*ad7%eJ!Tf-8-Qn0Cb zuDPL=6xY8%KnKc|E2(Dj8!qa;W2~RawdwsrJ4D1J?``h@3IOcovYAJ>oLz0^Ae(Lw zS#uGIUdcSo)Hplt@8fJ1uchI)GOb@`GdK#^9CDsa#7%z=c9wN|>&%i{bxMK>{Ms}B zLM2FmObLq**kQdDhZC^b@WCA{+J8(j(ML8Y`tl1oNBFq`N3CLm}sqUVaSsXU>Z99_E0*nFr*nPu=N`C!ds#99M)$qA%Yk z>%0@>a3wEAhH%R*99T7ZaAUi_@-dT1Kx9Pb@LDM?>@#UpbMFFx@Zx8-d4*L9u;KhkFkOT#Y-RL2c(JAj|kEWo!mF73R| z%dFW`ZqS4-V1tG<*3e+{!-{jkQrC6hW#*di!KJ*b>$_~NT5?Sxzr)(7;a)8NB893q zuH-A@93XF<7Z7=5&~+R(l60{`2;zR)L^Jcfz7Hy^Qs3GhRH56;w~wTmCE5#uM{amY zR_KwW$yC0`+H+d55b`Q1=Of&$6iy6)`W}Y(RSf(PRoc!HumwrY+1p4t!r4F2+FD2< zXN0LqT+~Xta}!2i0z7c$dsP}*&FzP6yTzu712$BHU0`9)u6`T(z5`CLT#di8pUFbU zGi8m%KN()NLT=%l#o#UQ==y8%FtwZ* z7BYGYrT4mCZ}0Moo%sNt;GS*12q5L`9stI`v^X4+5!a*frk9{D!i&Y~djD&sy_1Ax z<(=H7nBYY_>w)cC<9K~&h zfb9dqKb4(hvzJWC?OH0YPAI&@x3k_?O!p~lGUjsHCFAf~ zzgk_G608l6O8e2vx42U0^xKpe)*ATNT3jr--^FDMi^e^7uerm>XLV-NY87nLxE+|2 zokoBI9rrQXF(io-yK2!MCwInfxduClmNh3aledx86=(sDus4`_&5|F{BZBsvxBI(J z43Dxd8_qQXc$UD=zIiJJH;aN<<9jT8M;&tIMJg4t2_2H2@y!t4^w$mJ^kcP9-z%L8##KZ$F%JT{%SEBgrZ7S|xg&;-cd~a7on;PvH;G(+7R%{8DZ5WgG-C)EQk;*Q zE`Q}2lfQ5n^*eRn-5Ktt(pm-KTW-HCt#p2P2ed$cvGFmZ0VQkTkI_VlN6RcR5%x}5 z`c44V+mT)I;B?=Mt80|HE=;yb2`e_}W#P;MR@qx2S;=DoDICGYI(FDuR?^lkg zVRe4@2+#{~9}e0RwAexlv7$+HM3{8#a&zz@3?Sv!PqK(t2=UAA=)}O@R}LIkZ34E& zrm#K$VRdU)FL$brl|C9AkZ1};i6W+*%9x_o#JPF1tx7@oZu};duPqrH&?u?;$kYG1 ziopvq)1@DC)qZ{}ftSZchV_2qNh4|&e<<^!kiGZVZnizp(!nywJ9c`!oRXm2IVwte zkqW!{)OpF{P&D|~89KaBY;$4)Mpxozt|CdoB*)V*1knI_;u}`oCx?-aowbYaUl|XL z1TV~2c^Jf${kHDx-Tq|iresfUl&a7YevP&AobY|A#R`#<@FMlR-aeHi+rHN8jz<0-atQ^IQo+Gn1I8>w0=ECp|sA%b4!CKZ_*eZ!DxL{dM62 zkfP(Fz)qeXh1V`;4S|VEjMkA~-#g~uNoU9*gEF1AcJ@zm^C#^Dp5flR9LF8)EM4C$ zM+==@DhRQ?RG9UTYfee}iH}Vk3H;4nx7vdOSj!PHNEdk~z=lVu2f&^4R$)Ev)aCq=G8B11Y!XE-#E6vh z5(247T6roxo!IDi$&b?fOok4do13xczCV-Q@@=`*-vqy9N&l~rhyj$T%>b$^!G~1G z!DN^+EGJ2bmPDM7pC#O#?)!8G2~e%lK9q4Q3Ng_)7T^`1Y?F58He%BT6u)78A{Q_`NPx4d$IzqVTiPWG>|FeM+Y7}c11yS*=KF@xvZajyO)T6=opi}Nu@j*841{z%E+E^~&$^iTJF#?Qo`yZ^G4`oi7;B0XK~q+NeyhuQC`sw^CXei4b1 zX%p{8=Yx-f#aqrAA#my?s3^%HFmM{$Sh>&v+4B5J7qTt-B0c0sNQ?2+qO%`No=Oly z;Oip#eEnILjg}~G7&^{!H7+B65gXemuVn{>gl|s8=z~)_3{jG|8|!#&@;cpTCj{}3 zWs{%i*B&{v1yz-GJiC0rUR3K%XzBV?)#9$udUtZ!0OSmQOn&wm`P{%fx#VOUd65g; zYla^+{>N3o)D%AC)En*?ui}1Re*0jP2bqIh^2&6+O5?sITxDuQCwQgQXig8R3Tk5U zs5R;b-pJ9}^$;5lm`ybcg2K3e1s++~Y~=g~z!4UnPtocuz*MFCk{MUzdF(glX}V6D zBDeL|LGzd$L!QuyUSLnN+SJQZYm@b>{k zJb#M_A~EFN(2cExgLq){X;Q=1xAXoG&w4iM@G7q@TV!D!2CH9j)ARr8J*VV*%34K$E57QCHMW>^8J02pRLRSS4?qDiC{#_^^Ykv+MW$`K0*e=+zf8W7k=r;sYz_V zdQI0)maQ6==PcfqUA`O>nfluI7CG;mk`>Zw$7WlyXn(=i#e{e{41IR7au{j4-Do^4 zIyG|kh`EVAKUO8Wd&VtSp9Jt615@JkFNS z=5-6VhzX)wC%fsg-W0PiU~Q>i#trI=(J6hbzm23DDGOA(54l`aXW@kV8hWh%`FLs` za1>)GrE~Pu6rSJv2p?ORiJL{y$q7chJjq&u|2~Pek3fOHsE3Z zwzFx@QB7R&EZ3I~Y6-a*RgM^Zb0SD)WTvvIT-Y0x!zd;hCi%39H>K7JidTF?UXK&T ziKQ44MKzlTxtFPWCw@-^M11g#m zARxh7r~SIhbJC;Ep_)^zS_&ioV~zU9vZpAq^O8jWYWG`Tk>1gIw)tYA{O!^M-fH9O zf-Z$sqaU*r#IRrUzUlpic6-r$&Rn@fYxl{+Is718@Fl;wzmEN3uTTviJd&7F`qEhb zFtxwvhlI6HW9zZY=zM$aeW(A^<=#O&u>4kPRRB1R^68Tbf~)+$Ui06ZfU>L5>7Fcp z1tI4{i(8S0QYIt7>Ul?EWe=ZrLWA^(?tP9~0P~b!q0RT;@ckMJwIAZNMc+5nYz%BK|;NTWnLXn$j8kqYm8e&?jvYtL1#rE_o|Bai+Xzbl zMj6B87F$j`Yfd*lr1kZWeRXdu%|>M$`SQ;eF(Ui^zkYW(N4XN*OpyVkPYfZIbLO$F zv``8=!0cb~4gy@awXec_=2w;RhhL!P)aB_|-KQt!-%VgJxK<1eJtOYf-TcZiLwiPt zT)mAevGXNP&hDq(QFHh_-U;y6ADnmJBPXOK;ZHGpp)8i4Fz667kMwYb^CAK}*!jx_ z?PT;th~L8-U8P$6K90wga|GI7w~>SH6~(&Wu?|&Il)V?Ymbd0~C zK1Lb6>#?Ki-n0qxj4>yepUU^#cb@9bab26HUD4hnbH5c z6x#Mcjt;2xZwqtqNho+pD0fvatj&cljn8X$uZr$|tY;Hb>eRL(vZA4vRHZ9|$yNUv zv2Y=L-$gmdTzfNF)r3PC5eUp=(7k@py%!AD>wk9MzBouMB>Sww27JW-N+>#nyI~*S0 zCw|8s$5rOxDG5hQThibXny1d38ADSSf|hbFtxMyWAMAx@_}_$ia20psrqkaO%6}rnz+R9OgpMe%p(MKeyL|ekupRLVCL?< zUj=zSfvr9^89v&lZ*xgDGty>o;}ZLkU^2bS%z+HnOY=EsQEV5yyJ{D|-P$wl=C8Lm zekGL5ly=2k8mNO|y$!&F%ow(W)rL}TsbLA$d(~Sb8IYTy(cK%t)fgnA5|Fh+7kcdlvK6hQxs*Xx!@0#DMEdhGg_w(>)g+o&pj8GYj4U!_X74`$ZC-?~ zLWsmx?O-q44ageL+xV5DW}rU6S>H zEo*hVA?(>UphOH%VdA-Xx1G;_d>2}qvz=CCcTHe=_SBG z0sp4-Q3Inty19Id&;R+p8C7p;?ECIneg5oMo7jXOGV<(s9uwA2o1m!>0^&`|u`-+8 zU?7Y%Y~SAYc@fL^oSNEssh)|1yuuImo>@*?MkDOaQU1v}_Rp74)XlA;?amQG(aAX6 zf#AJ$+=3C?fugwodrYX>1lJg5$A~b`@K(nkyjU}_#!@Mi&(CrU2AW$Bo2h*3JhxyL zt8B*A>+EIwZPF3}n_$#4`#TlWp+42kKKc$C2CG+BJ)D}GB?9d|Or&CVL!QIBFMqR? z2Ikj0l2u(w1&-PR6XofZg{eu2lA}5bKEY(WN|relI;6-X!w1&Wqy4&8@Yd$U|)ShIBUQnMvJ@Oc~`HUnIhc3j7l7SFSK4Q8U+J;hs+$(K&rFthp}`h)yt>i*S%K*8(oIW=`j^ z*ggvS7)<4BM4*{JFeW@sh%M6GOL8qr=M6Tgm8%FgNAw~gdwqIv2S3j6$JfN20;5ZQ z7aA0AWr3Lm4NmshZQEmoprQFpo zG&GNcW;taMYlL-l-3zfGI&i-x4ii?K!&15!`%E4*1NMLVd#~R?JCE1e-ujK)yIfAI zcK~WoD9o&r>wl8SL}!39uvwLV@F8!VV?I9@fIB1PHKgeX3$Vu25f5JW97Fy)mptN_ZZmBb! zl_v||tsVk$ziyCq_I-&xroGu&Pco`uWm|#Jh>A8ab~%n`P%#9&a1L^>>I!6=6C-uX z8TjJXb36emZNhYE|K2%N-7?6IJoyl7VH;@?ATlL}_vy5IyDcncO?H$3v}qPzrIYfy;6R@>DpBw zR;eWENML_{SH>tUx^LrQzHBifVAcykrmH}32DGIfe=SA(_;MYV{bYV|&69OQY%Xf| zJVEzOlL5{XqTwt4UOBq|y>JE6Y=KS37B)+N+Vmyb8N#0S#UCAj4-0MkT@uBdQ-UP} z{&@T;(zOJ7h%b^4r_3Uld4@VZG=j^BtEg88=cO$YsQVy<)b_CF7pfm&GKzld_4wCx z6XmFTL-PB^LB+%i0&piFNH}uirT*qqBN;R4b?j4GMq(hYxF2Jfw|cZ-6jK|IGSry( zR(PpuB^6$V6f9Fo);EuyQCAdeagh1&b18c}(c-%>o_f)2tF^=dv-dqULhFvGdSMV^ z`132#Iu-r&Ncf8RuWIb1b0PQWsDty3mOtT@dTgd(A5Y(K;G-j&^s#W^OSCO*mudl@ z1)0(FT4)d-g;L2vxU=l5ycj2$2iAF5we6yzG$`Nj8>m zhk0S~Q%r0+@-u!&RvIN`C{1+(!RI#l7>_tAKMh4H&CRH^{ZEe>iVQp?(PaxBSA3wZ z8ioKE?Ed)#(GN!IJisg`;G)Oum4yCtvr2HWZ4kY|Dn6mE^q^G)M7rKq%bB%{WKFgO zH4d2IDl~lBtGpfOTp{ncyYkE+gPLQw(Io=TYR?bqv!v^7*kk^>lI-6Jc-VoAe zP{qNx$y|(MUheyUuxh(;4OKb_y$HSKMoUmzAPgkz(Ap|KrL*qohi1GgND{EFe z*L@UiVDG+!smFk#aV&!721Z}<$zMNQK@vU%eJH*9xhJxzD2BeVj?&urPl*dFyD>Yd^+&U zx``K{Zr2XJ)+RC3jE*pvg`{9BXym42^GDW8NuqCkEFU6tA{HAgzIQXYd7nO-{}g}% zD*@wz4a_zKv^-dj2)wbweW{+S!(DGt{BJ~oDOvYpiX&p6p!1}>wj{4b;B@=UILnA! z4WHBOAFy)|T5*szOqP8yY*)m|aMV4$PgbDGM%zkUFpoU=EE${gM7tK+QWWT1f3YeN zpoMo>KO-1IcB5k%xpmjQTePsNJ8x%2UIFrG<{xYCZs=Q_kzbL{xa7@r#Z}PZB!OxA6kpKX;Be`{v#d^3!)g zI6udWhiYp*h?N41eGkJ%_&N^>9&5-!**^OX1f)%|oe}P8=o_@=trvc3JMFkZZG3Iz z4V~=_zB;XlA|uCVsn$QAMi;y>;Z%{o03?UCao5p=I(7lfr#>72G;Fr{^$c#2Wn2;w zIB^dpt*MgnDYaLyZAQa~3$9U^AIIp$umAbpd%%gd(zljeU2}R+T<^)%>k`)9V0q6= zW6<}kQ|scR=^w6tX-kol53^7rB!(4*s7CMdYZNT4WkK(4_|=3ENTx`E=j!f7af+U1 zYv05BLEr!b(-*!vcU0RoMCg7DEYIT4?@UF-+t1TD&`x3(|Ch(CT$Q#?8BLWt&_W%i znbluIW523Cthr4dBvEG&K=WLO_Bv*#G+be$*Bw^D=C1>( zU1TSt&qRju7?Z1%DL%!nEIcMoP73#KJDI`HSeN^MM?m*vFo_AT1?yiJt2z`+*t?+B zwLY#CjU$GzjmQBV!%@5W9gLPp6oKWpr{)A*q+?(#f&_402+b zf}V`uVgrzc7V$x3Z1hbA^Mvtwtt;zBQffqInUG6tp#-s&y=}m$Y&+}FHH!eB* z=EW@Pc}eX1_4t&SpXXmjQ&KuFxp|CZpML7PLKktfE`Gn{EE*>L%q^lz%(8bwkcT&F^Q}1J{#A^a=8be8rbwyNi9HO?}tDc1iITKtIcwS{W&elJwhWX59*GMaUlo8lTVma?80f#7Z`60!eNEFLA zGgw$U1=&!Bh9;ly5Jr}%hSBWT=R`_1#AGBY?u_K~#XM5q93}CDv+ZX&PE1Yw50Eub zatjPuN%tFIFK$smgHY#~lCKasomMBQEet2IWVP5kl#_SdZ9}=iFaO|U=wm%^OL=cHXTEbRz)HDm0 zdnjoywgT1ANURF2Yo_}XAv91!MVijahHj_!UQiO93vcj>&+`cM>HeE{;BR4dt?Zf* z1H8zn{h}qSicr_~!y@&_zTlae2KJ@tI;Rud&YP^6U1UwH81VLhTfZY#AcoAS=<9?? z7X_1m2r$_aFVt%^S;L2Z3eXI`P>}ZMU})t0Bi!C~xmNCGC7F)b^xs7NF(xGho0sSD z9vu_pe@h_)DpDpDb~No`E3;E#1ESYL4QV#5$1y#KO+g!beNrX6Di_ zv{tX!x*`XWiPHS09pwpEznI?wTQH1TGA#BU?;^j_;R%2SI!ZrCP@Xte$E2LsK3-0* zw)|W{%$KMq1UXEK*7PEtnb95JETb`1t5pZJf8a$gp$wgdlB}7)gMN}H*2Z|F^;=&i zvy?s>yz>*m6a0-#pLXE;#_k1!&}#{rzD3QbDRUMV|L3saU^#fv0pl`jb?+e_Gm8|M zvz(&T=ks)3nY#tfiqKiZF4&l?2+C#BL}ZkRAmP4*l55K6SXZ_nL1yC(Pj*Q+8Qy<* z{pJr}G)!Y2KC$lsP~O!k+ZhBDAl|nE_vNB%@wOe@C?lA&BM8EDvRazR$rPa{M>Rp=jzdU`d3W~Y%Uo6cwnz#Lb%zg8FCC}DxY)mG$Z5xvb zCbn(cwl$g9wz+rgWF~ed*|BZ&=A8SU_k8bvaDVN7y6dT~>Z-L?eL%LA>Xtx+#dkcn z_ZJFsEr)Rc<+|g6HoIoh|F39*qbo)o+UnI>r&}?lQ$Fjbt_r>~v-i$S&+Dm5&u`1= z5)0QvgoItU6O4i;GyPm=m6DXrR)9dsu~!Ux&X*ikoMeledw98kyZD_^M17Yd8Ps{o zjucDg@Sk>F8ue#Cv(&4aWTb-qQk-Qm5}#^(-^_4KJin?010`cvKk{c(=a3^(COkL> z9sKWFeRAuo^A6abMDd!VW6-~!!_{-46@uw;%L&qmOb~Pjsc#XskpQ1VglbVh@jk6Xu8Xo*5f4H zxq$XeEZE*|2Z%}1Z?!$eS zhEPKLzxVy_6M5JV=lL>7*dyBsU}|8-=yCf9G2kI5GeMRg zBYr(KSGwO5ab9oc=`(3Tn$w+TplLDK{accVzeMq1Xx@fr$-9oZBXL|bz}T?vMwY(X zXLsJ%+B96)1_G>^g=RKxXsYLMd*-g>^G@XEa4Rs^KM}Au8EZaX;hsFg_Pj)M6LxTP z)>sX!)ifkK-|16wmAl3pH}zpylS@aWbuwv1LMmW#3Ms}?Bw$}Is(d&=Tn!D;%2X|P zFz!#2I=u%M4XnFq7T0R~4%O_<%(PoddAlSk_N(;vntqG8^sN4UFJh|m{dUqVhhN`) z2l7)j6`fiRJ&nc*ofCjFwuik*hBeo?H*!YlHLxmcfFx1grYlcr$`x_MGr4;`@DNbS z>TTlZoAnh`7K#d0--r@DL2hgZ<{4O2fvw>|wai9tL?`9Nro(Ivy&X&9r=&wy^Af{^;`uQ3Z7hAufe{ zLKz5iAE3OUxHGqchoLmf3N$rbY(Zo80>0Ao;;C$iQ*hcf4-*lfu-#oIPNR3Pkq%_B z0I6+8xvP$ltGDehN!m0S@FRC$qE8L-fIA-o-Vo>Lgv;p+ll&xE9e*5#K1GCw?e@WFQ*AH+yrmi}a%i+;t9L;Ht1M<(bBp=>44R7s zB??-nCJDLzH@U)}ow%9BmRsm!SQYvr`z3?@${j#WwM{3b^O7uGAOW-v&sgiLcyy8ONMIX} zmK~Kv9$V~2*IUq7>$y6Y-AD@}bEqsuu!}oW@7y<;EQSlub6=p3w!smmKb^Y>)Yqgw z#t2Aw4ra)!#P-AW#oc&h=8LCcg|Y+LqqR@?_a52TC=fQEy-(k6cSwfZ7R=+L2{lv9 z`uuEn6X$9d72ovVZUCB|2BeIFWQw;x-T2bKcJ|$~ji64dr#olpCy1%M#?GP19P6iGJfU@Ehtptzq-5@{xlw8qR#))W7QZgf;RsDb^y16) zr7JGplWgJ%22LZ1CI$`vpu;$dxSz&Y-oRUH;&QgxxT*tFJX|d`CxvRyR(74@jf?Q| z-^wnXDAy9v)XjOhs#BwB7z3dl;Ji#6c6xfmZau*dOTU@GNSB>GxE( z2K~Lvpwi!#Dz?gP=7qIqB<2&*u?tZo76MdXbFs}sFMqgz#LD$dRTyp_(Z5L6L@^Uu z+e_Nn5o>O+61DHgx2B=`eo*k1RI(tH-L%7P)M~Tvjz@-m2|PD8Y-2IOGQaT}d*4r( zvjJ_iGc{D*t+1AyiR8Q}Znrt=iia%)ImaJ}N~SIScJ%vN3kf7i%~tInkaqp7_v5Xw zO;7v3v_IDT3sK!vs%H1UhmZGR@t{GuBJErIoo|MQ0N-7D;m*#RniF#F8sv?|&Zkyv zeL#)TTvp~bKi-Z{i4gq?0jLY6p0Cr#&h!CqzAOLo`Pc8(gOLk;mpgoYpPPm`Eyf%G za){}{-NM0wS(xjM|EfFA99d~o?e)#_@_|REzu)$RTw<0@rN53b|B|i$3Fp%KbC2*_ z9ea2pkXN4nK7!SLgGSj(vOe4$9{0*jEoO z`*y<0>evJV&=SUpb(DKe)M(=ud-}y~TzX;SX25SlO&{ zt{JH?Cml^5W2@j&+zI8+!>f=7&SC%O{yorNs9Z&~?`WFVbF(2`yE9&{og zTa^=2p=XW}gn|3wGAN|-0+h>0+pYH`;279_^~@$F;y3%NzTV2F&(xw4>U+dL#~&g5 z)&J>Trk}emM&x8otH;TQ#y6jrbtPFqm6*Rcv@H3a76@Y;0Xg$sdY8mo6*QcD=}97l zI~dYTbw$v?BJ1^8)bwGgfqGSYH6$UM42<95{32Db?LPmF@-yl>s!Ko)-3~#H`UYGa z863W3G?6a*i@as}77eZv(+MSC?>)XO!dmPa+4YHdG(13at?$W~>ebmu5LunyI^26Na`i|K$V6J2<$EIJfW=m)qYx;+R43q>I&a*pU*u6T+P6vMt z{8Om?a$RPnV==rV)8u7z+031r%AIS(y>)o79(}#5oA9o&b!|eU`wjX^@8a!uU8$SN zw=kt&{IK7bR5{>k8;T2DOx}b!?MmbQw|-qR-f8rl{n`bmnN@dC)Wco|C#O4HIc&X- zKd72l^ffTq{uh$o=H*Po!}j9_)wk@QVJHqD41Pk_rYRC+C~R{x{(R0nbt-(TvG(9! zc+frNvG%gu=izUD(M5djzZtWJ>MMTSRN^<%vl{R^<~Oo6q~Q}UJnM0q_$i2pMDQ|N z;34>n@AazZxo1NKelD=-Q!IJ^IHV+O?`X@tAUp@odbmP<=9S+BXW-(|@|Z9W*LO;z zgZR;TVXQb`@F9TL?czWp-tRf0XLM)Zx>74UczKleVN$wP#_>hq!5;e>uPtbgKBD~>pDr3f!C)u75(Dd>M@Q(K@`ZNzlU48id>F^#l z490g=TlcVj6ZOZ#|D1lqf5q_>IRyTz&e~0FyX$J=1~`_$#_GUXYzSz)m-Nf1kd2mb zmeLFHqwGRfoI=lv+lMep_lrSzf-u|DW|}jZh$y>UgS{dAYA)q{aO7cy+G=Z8(N+a^ zxiNy9`V8>_YqY++pp6dOkO--)r43qM7W3) zBaXERh;m3UU$S9`c>9ao0>>|lEPK8lsgS*uD}f!8Qj*F4I_4?2x_VxOLwy2jG^x(8X_iiar7KJg36Ozb!nx z-=$?Fg|ou0N>7uqig|fvNh4+1aE&1@WJ7aU9M0K|Ea~0T>2Rg5)~kl3-()6QxMxoH zyu6~SzzXyDUX&GNc@V!~(YgChkF;L|h;gh19C~fW_@VlCU%WHl6Msbb<3OFi(8(eT zKBTqu__m!8evowI^*CL2*P#&knLe)9p>80q9n4fjbl)dY-_QY!gtkMQvPT)KTHBEZ zN_*WV)j}H_J}mYx^ZYqKhbXr503W9Ququr zPYIXI1_Bc;FF&r@e8kuZA3l0l{Z6l85^C&f=#=d?w$g!#Z0q6U%TZD|BQ(Z-yO>S1 zA8nm5(=c|A71?(;v0l-$Vb*DX3a>NNCJb`Sw;k>H$5dlHc90i%jOM` zVCYY(ipMx*eJNI@Chie9q&Y_1^HcyQ~MU)(eMd0?T-Ua)*;D zNaQlSLh^z~%kRF`th_UwclBQ}9|?$hvEc&`o#r3my5SKVjNMO^M2>n}RNR&bhjH2Y z!rR~m&s9tOM3!Sm*ui6$#9>3Oww?zS^^iQ$V9|@>a=yDIg1$$AM5A;dw(vt{@aues zYBx6Cd%KH>-*y2C0UxIRsW;I0OOnpn2Q)cjCIdGOjNcN3(SZ{=oo`+Z-m&WK*6dw$ zz_Su*$W<;Hd+?x17wdK3*JdGR^M#BiOC!GY2dXgFjF*^zS1rf&G6u;@K8H+&an>~} z52}OE##0FPmX%|+*yU8{CW;BeXjGa5+?bsCYU=AD2!`15Fpp8bfEwwoRR3>h=-axn z(5tKz5C6vGphXRyArb+i&b$=L(TQMGS-IsYRr4ZTYFk&PaMUvQO{>sE-ye|v;pIotVU{1u81W3I`jIxwAah+^0?k>MVWp#rz4>Bz8> zgoizD$?t9+1Y|@7480++jKt?3$g}!||7^B0XGA2zyLtMamGOxS{&6g&_aXHuF1x|+ z_d2k#9&p;E^I=GF9bGV12kH^(w$Za4Y5!ZhcsRN5?%4$>@zXE?O_sI_%5A-gb_8#o zPp4k>roxw)AaT-8MvHa3+gsJeCG?yJGq4)juWD%N~2j^SIA-0c2ZJ~(3 zClj1#=w386kg4X!SjSxHEUNHZ%6weYc)vf#?yxG+u3 z>n@?j@sV4;y>JVT`kwtsy5wO6|RT_7S}l0bGc=#0WraUAjak~7e<=;H{0>8vx2SC1S2SS zD^=eGD2J3z$tZYYy2aATiXNeI@C`>OGkPz!MBCmcyjG%MA(zTD^~TF~Fjd;3>c4E; z?s_NCr$w3#K(jwQuf(>`G<;Yy*Ae!u(K}qZ#TBt&-lFtxSuaAM5cC(TFQ&L;qV8we zC>J6v^-p2!p_cYcbqhqVyDoK)PWbQi$`V=xvAw$Sti=hjW)>~8=zncQCIs+Yu}l!3 zanUEtO}tj!R|BeU?Xj^kqJd{a2(!gpJ#=gqA>K6n`AdDz7A$E9Y zJZjvhdsN_t)+8Y?3vf0UD_qM)vW$-RG0EJbAi(!F9r0e{bG+lQepRc{^E5qta?xJ6 z(>hkjU6-)>`Z{>VdfeS}^mTjd(tF7m!9u3GB{>aVbAopA+9H*2I`Z!5hG5 z=R6k$43FhXf&PSvN;~S%kh9Dm$8O1|XWN&4PMJ2Sw! z1E2dTkpoqE-T~}PCAXSebWNSmX17mNZK3!Pb-sr4DNulst6os!Ubt=2|zqHdA(GfdytnEJ;pGV2T zL0wITKdbE%+n|s0k(-=>r^w{lM>qAqPuSZE^-7&x&G&(qMhFJA&SDkH$7yJ4AWo5IWj?jAwu>Qs^`~;fHdtU|YcfZZuCghEdv!#{Jt>!kJvbPH| zZzElguf^0TxBdi}V_)MgurSi`h3yTeYu>bbXa4kZ>U3`)$*!G>5g+N~Ytd!A`Q{)W z3+}d~IDV?cC41k2&;$sY&J2wr}r8fAZT*40GBr(8|@iCh|uD%$@xjxQfqXBkg@8V_?V z*b2sJtj?D&ODz6tdi^P^8p`p7XhjAJmM!Ck4ETH|>4u?gQYGpd0!)FxA_B%|^3bJ4 z(YcX^S-EC+B`w@Mz%*1S@th;O5Gk>?VK(|IbDU&gA7Xtkathyqn#2|*5P2~^C>p#W z)a9WJD>E|O>{yjOT_4@9)Nv@Kik;w;lSTlO!L9*V+#gU80mAt_AkTJEH@VkgjUI4s zb&g@d(XcW)3hC&P5Ch(I)=uoYc#VB(Pwl7AQbtbk2~vb zku?t>531pgfi2?=$Ck<+%;PS|>ug^-m`@$~N|?Fu1sOPEEDxJbYH7qXf#GKbj&OYU zob112VZxqme5T2z#8Cb$?D4qwk#bP^RD9# z*4dVL_3T_dSSojdP*nlIO7;Ih#c1XCK7os3t5p805-r(^g7v?$|B4I3ng^!{=8%T( zd(NdIdnS1k9RrfEE3B3)!^@(bIRfpHlogc+8(eBOA?$s22Z1Jj+Udv9=*YOCm8Gez z)!79WUc7l8EeN8Fqb&kMi*PVL+9tU5*Ko`{jf7&2z$FHa7!YMeIYKRs(W9aO5WAWy z>8QO#D}7inL72uTseZ`CNx8Y#uJmY-)xQaEs=U-nJ1mp22m_?>>83FNy*lj66=5Sx zSu^HTpx(3mh%nbrzE+*II=-BYMm`eSWxmIV#%<%Xdi?5q<^@2s`_%!{83D>Hf;fj` zeq{q#EOas%F5mXfk;$qs0vaQ+#f(@A(JU9Zr1&MRoO1UBMhMQSN-J$rBL;9B2N3fl z0^M;<&PRzodfIU``-d`qC!0*tX~a{-9hW|Bx;MVG9?pT@e+*1gK|AU27WkA{e<>= zWKLWMnc~g_F6NI~9T)&VerST#ea$tqQ{&ZusW44T-DL`@1vi{X-dednxN4hNbc?OJ z*u6IGK9v5m?9nSRM^UjN>h@I1)OfjRy6x68oq1XVFTN8{>B0PCoW^;qwC2L#Cq&iq zpNJ|Jdr-e9&GWpjKPhEyAQ@zPoFP1(w4g#u%uu&8A>&ihf~w=zRA-uy$s}lU->2z$ zNcpHa2V%)?*B`78^|s8xLThbrkAr2r$+AF%k_MVz zUVoUGP@HSqpF@ZTb;c(MXp((eE_||A*w>}URyvHfX?08;or(DvRpj1NPuu0*Y7LxQ z@7-Iag-1ZbJ~vd9ee11zbtIq88J&>&wy844r*9scWlPSZvv^l)KZUzq*?GC4@7O=)kC|4EUE#-LR${*g#l zQd<1qdd|P(1O9dK-*@S#{;^tqQUU(&)z90?tQG&=vOkp~{^w2N(aE2JNdF|fmi*^c z#j3LZuX@XWn>SYeZ`Jf)wB!G6A2aD6c%y%@>HgDxMfyMT?N5r*e|jP=J@o%iSMJ4Q zK)z8}0gB{HDHkpc*-O920|q?$0;kr_`(At8Yd>{Gsj&ifvh&WCf4MwWBCJnVnn^AE z-AhlL(k~nLs1dIads8p$VeeU?KXTEVe6}nJGi0Vk=(ugaKB$whHrfqHJ&3_XXd50L zXqvzts$=#GO$i9dPzdZH6HPl2a~9%b<^`QPrebL)|L}`u{$sy=_laB=>bsqI>WHveuzOir94pkaW7N7P-KKLBisubgIA8xQqq_JTR1U@!^m+@SfR|J9i(n%Hbvx5;i|Km37r zFvD7=yNe9wYK-^e&bDSbOfkbK|g3H?~%g2kZ)$!O}@N z(8n6vYCTd>hN8c?Nz<;hV25~&#=_9KMl|E10@f8rHw7FuJt79pr+W>=w5X8S1Kg$e z_bkFe77c1?nO7}pJnm-40L%Ndt>Y9%r#k=bZlEJ*YNe21^^VeEErDUi0v<4?-LsIv z96i|ese5cAplj5<*W=|_HQ12+)Hn{+ogOQhvm7s8;*6b zOPBoTABF!UK^nvY&on-cZRF%EEZPQAie#rw5Tz~L8|RT|UNc_=tIG{+o^N~FF5?J^=iXOJ~lsDIP|kb!IfyE zZ*58-%|=cN`WR5XGq9X~7`)S;Vj6a(1*aoupLQR*2c8f1vOs*FXZ1qs*Za#t@484E zAJvbac5<6^Z-}1pz1xjR<2)4HGhx3>kNBo>mP`G9ZzGfLLv4&bi}@c*)o1GKL&=UA zP@k@L^wp#`nhwUOCuHANV&B1gHl_IS$zLZHOC&25Ym{uMb)%zee#8zvT}UG<;F6n)4LB=FJy5>soR_HK+!6|LX?Pn?^(nu33f_D4TedmFRMa~0sPKC~eRwDd z>p9Kpt?y*=##uhp2Ibzqc6tIJ+1Rr_~;v}*g9=lp^E;q#N8zlUHD9K5i& z(IW3who&MmnAz5swM@1R-uCGmVyZ?aX^vmMeC*RkOfa6ydz$&};?aHFBx0IJn1S>w z{y9#p+pQb6J3>Eh#~GWrYK^~8ty8s#yN`Sp^`WQatqQe;?fJiLRO#xM8FQDO+mD&9 z$D0I#Uo%Ur#Z|h&Y?gH7^b}b*EBqUvJ$vYKQEZq&4`rO$jk;1RT(>pONX( z$y}+a0?#Oa2xKvB?B;^or2R;2dGpDr61o<)IkT|Rp&R#kWsq@Sz(<9)C%3^Ta^f%6 z^&A$T?!rjbScI_WpXy$XVAD>$539<`kHxm1x-NJyX1~Ur8kUh&=>gvq;U4HqYTP!SL=ZC=6IX40-s{#(xW0XG=`DeK>MzVARc>CGe*BOHq3X=0zMhrU>#DHtkUm&0x!CE5=D2!MHthNS z33ZOwrhz>_nJm1Gmg!s9LQM(I$)x=p7MDzN1VTF}um6Up83mt7N;Xh0)ZW;~qI5nO z%VD;QJE7%?ZFu)d;rEd6&5O71#w4QC&X4J5-YP5R24c&j6YBrC^!Nm9xlqnU}>nrJB~O@y5bagkARDXrjRRMTj& zsH4EjaX*`k%|5^#bNmcKVXfz~C-QZ0u;dZKw`jOsOYz59()Vl|n|Qc!*>Tk1XK?#a zsmO@i{@CVVlR>l%RSYBMIv6i!j&6lSijT>DkySf@`-2%d+lW-|H$umPgp3D?!CAs* zl6g@0S;LRdkeIZ>Nv&|Eah7KsFr9XW56S{W?&ET0+E3z{OC&$i2^q|p=VZH;W-QEe zjHDyL7<`kJX~sk_7)^&47h}M*?pPchVcE=edV#PAuQZcY@vy}{JW}j&bkhcwPJEZq zEb!+&h-Fo(oPH6QTYtH^rn|*Gl$a0Piev{&v8cmY8odWq1)J$$1xW`s%;l%Q;*^caCgSJnLosOd6DTkbN45RB5TzHmgLbvZ8Y?Ep7x2?1WGY1b`ne5~?Hqv2dAcd-^@?Qd+P za~CFlqx6qp5S!c{@*|_R1fjLrtk2gd1Px-C_Z6{pCsRpgX(719p%<#!%KzmfBc>^4 z0XKZ>$fxiZVn<7wJr+EW*`x$aCZxqY-B18d<&P1#G*3OS7F~~9 zZ9#Pl8XPa8Cm)NF>JF}u(mzT=HuW1 zJ}eU7?%JbC0Av;zm~*V|G|g0odh6mS?6^kRcJ>hAFm1U}Mqv*UB3Rodxx$I`#A&&0<-PJj57mn4iB` zr#twNBE^PjAd`sdFeDrhj$e9R`t5Kupl$i)qsZxWja+LQu0DKAk#;|o7xv7*O@&D^ zVW5mtu&BpSg%0rCVjp5Cz9SW2jF^}8u#8KYN7YiOUw%m&U0ye23#MkdOdYc03$#tn zH3}-z#-XtJg42sE=1g_%pR8ZAnr&B3d4ZmC%wld`$(8pf7u+9!c*K`;H2%an8r>%S z1G=yVsi8Hauf$D;l0|F_q2*0CBZ1-94gr1G&rUO|Nw$1&M)4)Yn56KGnLFN5_I}&{ zQ4*m0Ot)QE^@R@cY8+Xbal%W$U4tsAd&bQ~ zrKafHo1p+#Q+hX4Wx0zJT1nukOo!^j#?#D(e2T0AJX68S^ma!s67~`@d5c)3eV?vc zlSnCOb4s^tipEEDO&nR4t%*%{op#p!@1*e-yjfGlH@t*IW1t{S`&F#kYz9pesJ3t( zf6jVIm-fU3*JGRvq@My7IEdki6|6Vma50pq!Co+NgHRace}Bm#zHwhF@_J&h zJ1DDJw5d%l1>R`#{1$>rkKaN_u(!@MNk9AG3uCVc!&!yyih-LP_zm$6u?NOarW&8YKq%FW(a7!!+U)$=(4>eS<$}O+gF+E9 zwGx-tGw!1}5Ttk=h67VIymDYIZmYj}sT(Pe!ot>TXU{uDpR76Ed?h^?8CPpJw`sjT z%Q=c`a2-T1wvJ`gS#JriVZ3a;&f)rqGLu*-e45wOf2NZgy-gM`YEWN(#IUo;R@9W= zXGkTbqb}iDD@W}Bf1BKCB+Ab&E{ygzj3Mv1S)se^r1O2XHRdJ=-wxUXj(mOmRcdXN zHw#E8>8~HA#3u(Q5PS?q+u6#1GsqTygQt%_EO5zygjkwUMus4hZOQy&`Un0o!z-IF zYpgpLCh*L+LkoU&U)KX+vERP7-xGRIA(#Iqu}S}1pKYmElBIEb;ArDbB}!Ac`60v~ zw{$x>dg|3xw)9VFs^l2e&wY^qs7KI(kZ24ei$FE4VYRQ^0j#Sz*6^V1?GUW@i3HkB z>^4ukA}ZV=If0lqxiY!ib0s9R>?*{jzTTn-LMJV91o9gn&NuUz#q?ulOh%1X45v?) z4`(>`p`qo#s_W>hxjc2+lv=i?uN5d^bCEq`i6V-^VURW{(IeGPF@P_c}oY zQs=Z&<-(hD^L+s~v6;vM<4GO<>u(92{+rM!1m1&W)tyQqTT%o+QoH>(bMp!oxGl0d zuN#D&9d`h;$+>i7R9~kpm7tuR9Hk4Do?X3n8+0#{CU_cR_OlwJJi^)tpm<=~AbaVy zDYNb!?5hYnS*Vu}-PqEGUh9=IF~)fYA1v&nKu_#(s+P@}OAA>ih%6LzH{Ne4B4?~} zY-0ZLC|l7nl;YzC{sZ3R*9GJhG@vV>C@mY4%?KEQE&vdqYs@ac{u1*=QW-J>N5(0BEMX>#s{$wBBXL;3Hwi zLd*z9d|Y_DGwq~mZXsL9<};H3wl{~BGxsM}VqXoBMA6k*bfWWwvaC1~l%n}D^aQtb z9|_~!Z0t4`ptl_=bTTb z<>H9H^bn?%W#KCim^_qN5w$QPG8amk|BdMiPB^~bX|xL`mDPG|7s%9i_VwZRxcD|9 z`Z9Y-HODcfQ6LOHWshfS#a$a}(-@D#-a?MQ=96)LU{tu*I%%7dzt6~DvyAxkk=Jz* zm{8Qdn0KZvRT~-_XeXs(ZXRQrWxf+k3r<7`8Z=y8@X@<7ZWDnEO-_5(CxItT1`7g% zFKC;U^0NxXZjY&M?UK#=;Pr6Vpr=`4eYwlooxez??E%Sw~f#&eRfF?mk z>Zy*Gsx(k$CD!{QK_0l|DZdLXn-fQ0{L)IJ;MJ@uRmfPqu|q+N;z14fEgscLH!`9L zN9xB`Itl$Kyx?So(5|CxY;s@BiryfPm1!(Ks*~Zm7&?ydreUe4n?N*YkfVpzn&=8T zbgBvmiUn)xrZEAr%x@=I`(*O$U#tAe?E%jtV?;kLAb&N3RVl$E@(dZ}qEdv4=t3dI zKl~8MqIrASmStSlr1uB|43>HJM2OLfSaTI3*wPc=+=jt26-CfwY}A@rLGYvqt9Qqv z^NN8t88>5CBJDU4FmE=+Cvk+q;TnDMGwq#v|M@dA(XvKhCN5ouAWb&cT$um-N!AiP zh{Oguq&q#9{7PAXEH3^B3mP%(pMd@o>bL6tiZa*)G$M1B{y*CJ7?h0jNd4EGnAPWo&;^8!#(p-3{^!ZF{FqR{Mc^MN`?FG`d5^OLTSW!G$WL0sE`tz z6M+N@mxub;n^>y)&j;%<33wm!17}Aj&ySb!R=_u_aF8~TtH@4WJCB$M8QRO#KPpj+ z7;RojDXw&LcVridoTkcO{0beCJoVU!aC=uO^5xU(vyp0#J@2b;TAqpoFD;yVd?As1 zc@O_rzvrnwVnHX@OMd2Lw+$Dyqot`1_seYh4vm^+r}Y)R1A~s2&KB<4PVbS?DtCR~ zHx2c)0Kk<@!p7~+hVW}vZVV2&XkmGsw`r%T)4MDJED&DNi#DOh8h0?GSNs!S>7W;LFvtdO{oup7) z{`|EJOo|vVRyP%9x!B@iTfN$4-yu`p8G?qM+=B8)lZb@EM7mDQ|IqZL2x3i3NE7@L z<=lXSQ|_Fcgx**^;UGrwDjkiN<}}OtJXHlIGCchK;?1hKnPRV0c+oyu(yPwul^o)9h7$l z+yQJnJ%JKF`aYJf4u4Vv;o(tsZ$wfgh?_##CeLq6v0;@mM9>O+B_NKICW-gdO43Nr zc$1ouJQ}w1-1}AN6~wuQhCUBf*Nf{4TSwt0N#W$%x*!=BP_}h{%j)OrIl{5}{()|9 zb4xz}@1%2Xy6&~*3 zPg|NcQ%NC8LMS-_>aeY<`p0#v=fU?2Rlo+|^;oY-dekMRiXj&PUB}#1I!-ntqlTqP z&D)}+Ps_~IT23~nPpPPBN%Liui(|J=vZPcfs0e@BoI#MZQ8B2W$PqTUEk?rW>a^iu= z;8X>IgTx6f$dw&ZD~?`q9?I;^h~cNip9K414W7On@qbx#fXI2c@iGHJP=EizOCg7X zTaIOyLz!5*NkSP}>h3B_WvS?|q@R5&l_gswq^=wQ7Yej$R7NVr%-Bi(LX8sVLZE)Q zoi}8B;RYF5C+<|(uYobZO3N|yvR2ot1yjI524@od1%^KruBw3qTLw;>j`KHrI%L?0 z^D64avi#7uO|El}bQJw6zQk>UfL%=vI=bJpaHwL#BM>G?VAulXUx;rJA<9}hl+m4{ zWPae|NkViIO;~87Ybf93&$W#M(k}z6xRe`hZ$WD31NZ(@YJMfRd{z zHV9R`^e)8kgRS?hmxns$@%FKt#xc0n04x}meA^RB3*WE@ipMKkg|$Koj3Ue#hKS`_ z(;x2o=b(3k<98|Jv&iaL3Jk>nNgk_>JV?O%CBUYt`$B)_Qwi!sb|f_kn{(=#OFJi} zw7+IJcmfNwD9~=4+A*x#tcH&TY9;W&ef`Fyk~F10Y0Q;wPj;J_>iK1tH=TS!VIKQA~dEzk6*TCA2Wm z?z%m0zghCg2Gp7yrA*K-y4GmUH95Nx$Eq#e+CYA^m0Cc@Guc%S&NQ*zrO_|S{=w~b zdTRN6a<0=_??4>iJ5^u5>$LH@uWsFsA-MCHR)v2do4YL>I+&a~Gf7_E@mdUnA9l+mMszUGs^=7OMN3%sqk_Cn{P8jif>d#$O~J&p6@?!5|)cf2U0UbLd-3dD)f zk0UAB4=!@cl&&-IfXSe53Jlo*6!D&?x3aorpx>Vow*v!^OW#V6nubT7k2ok=2A}E0 zcxh6{|5-RdkZO#W0Qwcybr4O@GN)$#isfTY(!Z(Z+=ians(UlKR%R2c_Vl^G1;nhy z4Rka+dLG69@TW+|BnlBGg1_$QzV3~du}*uFoGUqjW!41&uCFJVb>{+(R?e{X_xL|X zOIvhaKDaREMdOT=WT>XVIFpbn19WYPHfUT@Fr1ZnAD z3Zg+MO489IJWv#J@>BC(lwFA%mU_ZVRuu7GZ0P{8{*SOZmbzF9R7XUmZ=NvXE@wcVV~hL&z?+e@jdp0pcE^RhYPrc-9&-j0^2Z=t;eE3~Lk z?cS=>1>{9~Os#3o(A8^>_#E6o*y`N6Jv9$408y5{D_}G$$eG(!%vv7Lz7o+Pu%!9Y~|*NZ{dmKlS|M9dz<75Y$;7ZZwhemqVdb?j5^PPyYU1 z8CpqAgqGsR6&@)aoJo&P0$fV>)SLr>x1o&nAt7acHND~ zWHAA;#uFx8>MXUKtk%p<_y8NvoAhXq&GWTaWiT z7u}C5lPPHcJx{l(wlp_?Be@N;r87-;sj<-y%T*^3WD)5*kKe7*&IYPa_Dgl6Ymejp zP5-a7wkA}9j^|DO4ZpO2G#VA-v{ACJ_mtSeuPjpOatR8{A~}SVv^h@WS~8T_Rx=#p z6TQEwTN9}XTp_d+i@}*15GW|+;LsIHQ;ZE@Ska>thJIVt(tHIn%J$lMSIy{Kxw?^* zUN)euUC1p2;ev#Zq1HGS3z-8|g!~9K3XK=QUUkxJrWy676hD zpr0pJ9GGf&h?UiorJupTaW(5e-Cu;Fs?|d}_NG1a(p6~<%_BN?J)6>#3E8*g>ZVfWYn%@-OeY`sjja8(-|XP?N>9 z$itDXUU>h#fXH^L<`{YV1{)DkMj>(Gj(`a3M&hQ;mjh8233vy`TzO0CK0skHet-TIwbMq$g2 zr#;v_GTyrhTA{(8Le+hD)dCP z;iL`53@3WARlI46xtVpfXyDSoZBXA~oeiJVLfCV%t}Va(Hp618mi5YQqC?g2v=dVC z9yW_JF(li>fDdKYkq6j~YI>so6jNd+4=F!1qfGw-ky-n(98|=9rlEPpt(if`u9CFi zi<<#Mt`lPu$k@VGQjTG0$Aj;z`KT5b&otLmV(wuTUo|Vbq;+~{ZV{JKJ7IEc!E*OR zVn@K~b%{`PlxAI2g#e3~RuFf5dW-0bf_PkkB#cm31FzpaYsP+b*L0RfKyF6$+l__{ zABT7^5iJl5)SBg=V$ow1UNsK^3a#awar5>FOI#C3T5jFG2JijBBKh0B)|?Twg*uO7 zS60xMgPD@_tE}aIF&ttl z{PM~@J9sMXKXxtDrzA=7cxa+cqMx<)SA1Vu1@O51bKq<14R>GFhnTXuG%iOt* zz{(v^5B~5aD;KQNBb=>^tbL-I&^NA`tK>3gwrQ3Wo%CKic=v+w0zMQlU;Vk%Dck1Y z`7m*m?J=#4f!@)&oS{_4j(+SnSijrE&sEba+e1XJ-GFZg`u)^kWKziU@YurOW3pfn zG`LF29;S!pr6ylo&PL7f_vnh2fGwzdM{Nz!2BWi5|3qc_n3{-=Ay zdGS3ZwD`TfyRmqkfOlm(xbsZ3k8yh6&9ea_BZ0;9}I@#JWv;Yqr@BNZ24ZG z_yNc@X0psxQ#LOibC%W~rzA`+%4p zHy=fL#KTf6>sv6ny~SB$X-7V1dc_14xZEk;%Uk>NtNpDpan}+sXasufdA-!wl5<{o zAJ5YxPSx1Fuc%CQ<3>_t3Ca}w`1y!F-vTL>?|J?mKJM^Kl_G z<5S?=gTb%Xxj0>z5j0H6AdnlYdwT}nUWI}lJ;h9e#PQns9|$LIR09RAoFB(0QKIep z<{{eWegGzmm*Q>cUi^s37Lh8)m`)~K)KqNQ_pI_)C-_?D4o_Qnypf^psC?eC#ToOo zR^3O?B_++gkl@Rjt7X3R=Bf>5ky2JzQh%DqLe9<$DOTGqZn}?b7V%Ef{q(GNqOm|6 z+AIy%vvpt8bG#D~ot0L>yvp0Pr;HHg$-pdzOXa+~obCpI6XOzgdAH*aRg6UlX=>1; zmDLOSxCn-rFM;iy7DwYbE%h!sn$Z#)erMIw1P;36|F!=qYOcYJ#S!!+-JpoMx`F5a z0O>#$ziQ3)O2G9dTlrjX&EiY?yKZ@E-ye^?72pCS#sn7}^2cDyNO37V!ehH9Y-=5f z2qQ>Hm=cNVWVlU;@8VLjS36Po9}3*&i6D!{LkR_G*58e&zxu8JGu}F@vjNtyj)CEW zfvU>cVRV8#>^8dFKC^h+lYb0_ko${QIT6bqQgB;JF1$%WfeE-xY+6zp8x+J~f|we2 z?j4@t-emQ*n2HCe?Bsbb1=H>BMUCKYHn7cr@(eUAkL^@p4@DEr;;E+?&@v4=XP4Qv zUC;Kyv>NFyqqEy+>x5yjcj%l%FT+Chbl`v=T%cKSWO+TEWI?u3AaB|2lxru&4j9;W zD^vNo;tj*H9EK5-`6zBwlp*lY6bT-C!_5bb&MLFJ3APEYK)5e2sJVJ8(|FEhU*^aX z;bl=!g=~vSC$RPe+(v&+Bou|+6%Gi;juP(>@^6p(<3nJ2fnYiC>S*X{}3 z+hPPw?dmYPx{R(~y&Lp??Zzv5&<^j(R+P33qCJ9!ZZ{gx)a3>GP-WENVe#MN4E5Gn;XcU7KQVlrwq& zW$Vt9({NkE^+~v`$eKka69;>UM7PBc!^5D-47A~s@LwXS9S`pH&!IP&Z53t!3p7Dn zm`vU=olO2oI--ITq9Dr&>wfaTP!^Y$7``q4+>Mn?oeA1#y<;6@!)P-pfIn%&z0#!|L+9o-F| zqxbYvwl-=(72J-=-D36Bn;d{0HqLNYFId{t`K8AWjYYI#y0|0_jgL#DuSoJPGR02z zhQu`7e(yshWrNEu>&t`L*Wk9N2|mzB_vV5fI1fzU)*QB$`uv7-)_mo~H~g>&6S6Fb zaZ*IC#NvQ_q%?`HlAf(}7E!kPBI{$y;kMOT1f!j1j}h$QMURXw#?sZaW)xgAHmw~L_>Az=}tYsZAZ6`ZZ+A~Rd2tG^z)Np z=@H)cs&1`K&9$wSO9}&?@b`I^(C-E8YpKSWe1TsU~=E62W*ZG{C0rY1}B1U;<_P9*_9r! zP`f}WgOE0og?aNsusNBE}NrfqWQnn;aP_ z2Qo_G(c!2j75%wuFL# z(R8~qty#HrLdiSuUS1V~qByK*aJ$LG^l5P0RMJh^YK?B6&e>vSTg=W{BVBd=mj67K zP_eZ-Ngv{n`UDyO5>fGZ&8VoE2JX}y({TI!k8}5_=kHgS?NQHqPFePnuyXIwb^G4B z<4s{mV}JyuB}G*^gvlLpRFydFSgs1O>G3HYyhanDXsnErBiNd)n{1IFryXnmXSmI! z6=N!^P3sE89dBJv#W|^bHOw+j1_sU&iu%wV{~YJ0oT7>xrVBKq4&x3TLRm;}gbeQMENhGCJUc{9Q=a{6XI-5-LiD6t=l*3?T#&|v|N>ReZKBg%5 z9C<+8)b-t7f|^W*4^@9F1mA7ufa`+p2T>Q18N&fN&si>btCS=^bsxR;vWq_cHd*dB|k8{Jk8FqZ1UYO*fs zG6BRXfhAy7RFkLDD-cDoF_$?kqwO+}d_)JlRV z3lmyH$ykI_9x$C+xc#&JZ(5o139RiQ!wrC_!{i3@lAtF?H@c)isvw5)5tf7Zz28|l z*HcU##WYZC6Xj~gu&KwV_qHPdhrLr&PdOTpx|34N&&xT&C2qwCpWE>;p+b#LZ1O5GE3O#SBc8g4@Zj z6iA5xmgEFv`GJGuv)NJ$-K2B0>F8|ci)lgG$iOUv0I7pva5kb+aCGZHXamG{R8tIy zmnOZZ%fJlkm;r>+ZXbC|hrtCBwb$tG(X-t|vk*26_<;sxgenHl*<6o~ZqzxOQB^r% z&G#bebauie|;5 zrfk>anYaJl!vVn}gb3UQjGtp~GShuJgxg&p$<4(b=HhPZv>LsmdCnOiUaBmP^@HDf zawHn{$4IwvnRF+R1X&VO_IajdD}M+Zc^YnK1-CEUr>=NLU3ySm`GUOanUVF+9Qx54 z*s%rSHm<3-Duobn!&!*pKNT)T+&4WweM6Wqij>L5QnC;<18$!{wBmTUO=`DOQi8@7K1i&|!MY}MPh z7@S*7%(fiQHmkSH#B4OPo6N4wIM_`_W|P6W!C>F`nS~8G_A=dqO=d@>6##+@Z6TAV zgL058K3BW3xZB|E$K^+N>A?z}yY93zTJmgLudBQFP&|&HTZzUo4>o}JCz0Du^2BM9 zA&ctdX}JB~2fz}9+m~+7XY12&8zXlBBpuyWS1(mquXnZOz*1!D=DIiC@$4A}-$VfImjO<07?7Y&)jN!pFjy?$CR;Q|tDWtLFN!+> z8R@+~qq{}#tkv7LS)E%>@@`mN{k7*_8wUIK?O{R0_*Wt@6enqFavFqAXi1fzkN>KEnvo(|>}Zpd{s%q{_Wo}Gx+voOW& z26vOu-C%MxnAm0u-DRPB^bSBAt?;_3q{&>|qIb1obhV(HOl}Lwi|EvWq;-?31RB&z zyWR~d28)hOtk1%9lF#YR>UxZ9yM=ADFg_zzBJ}Cme$XHmwkek_{ruwVhGf#=0+Sd3 zo(W+-EMY`#s^PXqH$f^Cw0uR)BTVuUBBJ71R(Gt-DIiu5F-zOrU`1 zjV5-B#a)`?DYv-FjPy30Z6h^*6SbhyY;VnB+w^oj1@eKdHZ5$*JA-(}kZ+{j$oA-+ zokZu><~l3qEo$h$?WH#Y5@@23B#lR-5iTTQacxG1XEHxTqdv$=A7K#(JcLn2{S3X) zYRiDz&L$&lZN_Kz^qSnTa%xSk#{A+cW@Y_@dkzJYO>{&KyIcFQjixN1q8BsGa&a1N zf2@WEw^u$*;P&(Cs;B+ypE-KR8|qOaq{ys^NX3JZg*b9YilhZRuCeL!r}3<0WW{7g z$E3+4{d%_1iV3(aq~NwN@%c(Js|`&itriA_mq>V@rf5W!r0trD14P`Fk{x!G1~Mzs#3(&(r(+G|Yq zI!jSK43n+iXrH=Km&deNo%Q*y=9#Wq(}L@B3$I;vb>BGAwkf!c1j5cf9lMJv=%r5U z&=qy%&>a>>quEwrb!^EgTyv7^di%24<=5_B*YacE*AIOCXRrTg&yk-T9RJC|v7bCU zdgs28@BHH3FW>Xmp6|Wb_O$~`*X*<}Z0hHL8yJ*npY?|e1H5ZhgdBu(Q?Rocbf=?i$LdrEKrn$+rZ95IOKQOp0CE@m9 zK1NsD5NpG56E-}Upn?W-39+CYow)!8Pg{4+<=w9z6L~q^*f%O*XY~-8e@cepK#NaK zxi_;SZ_DtzV2vXH2jjfm=k zOcf7BqsyZCReWOB*e!aIY!@X+3o*+th{3N~4a%1l|#m(gBrbd(#NTTIT4Cg*0eeVf@{ zZN@aDhJ06DuBX(@Zq_+B>zy@bcb|3f&4hNfg{+)TRL3-_U2-<*ozWpzC!drgk59ERCelTu98X2-T%#}`efp8Hlz2$H^XR94nar9w)ZYtblwr#btRTfu0N%gNs$;SAw%8BhY zmF%)CywTtq)H^z@_6Eb}H(p-){R6L#j>&RT?aKcUO-`DP*}8u*x&)|sOjb{JZp^hc zS<-OZ-C^)_PzCil#rSS(|3>7a>}64KM>Pu*FOlQqm5O~gb>sut0f zC#PX2^Hs7r(%Nt$W<8N#L|zw3CkR|`>9t2n%cp2e1O&Ca!TypmdVufS*beZ6oLiP`icY)M*^BU@E6*z=!7)Y?@1#p6soj%WTRkxaRZT>o2^v^R4l4QXmt^ zN$R!5WOQ4}aEm3eb`*xp2LU}`t8qa+3Kq`K|L(mA5Yrli7wnlXs-(wsMjz&~(v3QA zFIe?D=b*{4W2R^4XG*r`+H1iUb)4I}rutj=J@L-7zYV-Q$^)j-Ht-my9u2F0K@G{t zEghC+zaS1p__zJsu%r%&;%^W6zkctZ*VNwbx}fW%;;J0`7JX5Zs#7Y#?&>(41%c+j)SuKZYxcx7WOt`HB<1+(p zn*giMtIa!W2UXaNWuZl#xlDa-!MeJx2VQ$8n$T2+90s_psNsIW3M zIZU0wRzoeQ%5h_#BLko_yARz{fJWIq%Do+_y0B0egjg3MIP9YVwtBzb(Vow?7(Rc4 zdqvj+PY;7-j`?JX81ZIvq)a=V&z8|fP@X7%CR~eRw$aS&4)Tq%#K)Dfu&O?I@bDbE z{ABNqx_JXeXCHyvJ|YkX2;3%jN8mQKp!FnA^U2J{U%eCz2m+5XFhCG@#sntecG9Jw zOiZ-qvlA6b2Dv#F79QU7PQjwZ;jE}NEOs#y11u}^1>W-VGXjg&FF5+FK*3cD`&9VX0aPV z(ad2s%%wNYp|@tMUuMze@WzwKd*L!>GNm)wt?+>-v1{0+H;jOYCfzB5@n}p;MEDrM z=P8EULXw9^l_y74RDYZs6A)$Wy8VUE6qiw_U1xFD=Pn#HmJA>($bx1H%dvN%CSv+$ zEZS*d+w_H{#s%w6@@!gov2T;_&hJ0=?w^h&0t)8SXd{HZEmUklF3KY$j-Q#u7?7mH zabfSP!#nOc@P*B{+LpJ@@zk1a)kb@h1u01z^`2hy;@uQdi?m{KcTqRx95gc9XSv!f zg_}GVcKrC6cl<=qh2p}baxMH%V@}J?{j0enVxs7m)XyT^ZcM{%orjd|+uJB)((4Aq zol^o@vSx;>-c-1$@4K)3#h(b``sOh2OCCYSRM7Pqg&-fs{+))~CjhreGWqfs;D^YX zJ)!@6Tm7@DzO7=6`;e->OU7YU9Z~UlhEi*LdVKl@%WCW-3tgYu5sn>=PlMywHDC%AN{P}bx`mSAFIvQ)Y zwc4?1xcx~wvWMHqdQ@0-%8J4A-7o)m@5p`o19v?!^0NbD_w60Kci-4g_9w?Z`^N6w zi{l$&iC#adtmSA{m&e|=c(}@K6d26{r)Fj@!$9OA7c}0ON&)uM-VN^dLpzf zC*#@(w=qLW9u06&;zu3){%<~0vXwgJs{Dm*7*_TSBH<5O={;ClNc)V=HmkEe&(UmN zP<|3ybLp1v{qXTaZ^cl*2V+RJqqXnh;wndog%#`tlFUuP{<&gOI!M}{BHMX{$FDvj z2zNa*ysqV*Q_iZkvfHSFO;k~(-sLj@#>chSj#eoA;2Jc!2Xmb5x>Gh6p4sTT@#i6u zFe)ZxPHJQchNt27dmkFyUXF0P34|WjOS+pixUHkxEhYVwqge;`uw#dgz0u_Kfvcm%t|*Zd;gA47noo>&CD$X%lH=8s;o) z-PV8b;gsyJopMOsB5tx^2%e$BoMo9{U5P;Pym9g^+BdGErcw zYJ~ZM5SC?LB8eGcMZNWor+@as$lV8qvz5}I0#G&VAH4_Odw}SLJ!AJhGxm!Y!}mTl z`r{|xdhiwh1HTP!|MA;rU3aI^*+kJzq!OVA*N?M9@9Lbnxcjtoy6)IFb{EkpKixZa z{~jDah0ocOoqmD0KlluepFT73UU>2Ty+i+X&$BS~?;HtfT?2Soit)TK<;mm-w~w0_ zg$O11(mxyuZEpSHNuDxuQDv^X%Lo9|-bmRS^b31Tiv~$~QIFZ(mqT~x=U3(wZgZ~e zDeZpb;phGDB;;{oxkY4IfNhFHyQ*~Z6JS7IQQ()nD8dl6-)k;EKe2D$jB($(|LCgg zzBkv?li0D;?iMU6Qa(9wDmv{8i(xu@?k1N<-aAw6SJqtWFvv%O|}RX)4r(LapA zU5tpaC_hH3AR|bwC1%2Hc^YnitOmgC^Y*CcA5fP)ub#1=JNxO#n&+j#zpM8I)hFWW zQ*rfiUVThdACc5w2

ub#D~U)8kV-4hZVLkos}}y(`hAZG^R-+8$1qPNuR4vrINq z(y9>QHmP_qq38Wm3TM*l611D9r|y4d?A~X_e!O?AV)x6R zIlGT4sxlULQ??cgtl33iXSRLhka-$zXK`c+w@siUXrGyB$Ym<-+!q*D2||!$%^4M% z>;ejA%RO^~Rub`8-~J_fN=k>Hi|rcaiEVB_7GsF?y>A|l4dRRKOJ&hxQ^ zAjD*KOqO?k_odmct#goOfQOU~baW>MxV->m`YEPgXYa5SHqNv)eAZcg<%X{x35UlxH6$c>DdxvgbO9O5 zvtACA%qmFw-Z*ej`1SD(M())(Ii`iWyC3YRB}0&E3>N`!bUvr;TIb;yf5)o?o+ zh``q!OGFPR5(}1==}z04>;Agl_9dOY-|PWET4OC~)dL8kTXPooAXyyKV{{Er^ILLV z16EtvX{VPS5rkn`OsLT~f-oYGL)Cp0(y{uuhCjGex%}q;huaUHzbRa+vxvR)76)D#npa z>Qj<=hFPtjP%3H=ObcKYaV-J{ljl>c=TCDBFTW|M2!-S_@S{Ogd&QPbS$dg^~*mL zoUvi9d(%v&bV`$TG24pSG>6`#Wns-F8C>~H<;hF>sDg5esk5BXhebVXy8)AVTBaL; zeUgqW;kJdqZHu#h2EFy}eSvW-k5Xfztoe+2vgR|_Kl!ITb;S;RibX+#s}m$)YO2IT zBqM{5R1&mA6hTaY;U7l)9S747^GL=;amTk`oOgQt3|C#Ay8{#UoLyAOZj5N#+pHxw z>TG?w1&yCu=$q--()HE-PyTvrOb|lEydgZOav~uozgI~EZDP18(NqJ$=mg?cgrupH z^tYp-%VRtoO{UjEF#z2!9f?)$`uV)2^;X*!tE1jXlWuE{7K&}rdtr(CjP{OPd;iR$ zj`<}WS8e{@k!X}S97I42zhuCUBRmpL!!ynEl_ zVL|W{13e~5M@NU?dt#wrW?ik4^s7%j+=ed_rAPv?;r8!6I?KC-DyXIkck1jnTiiEN z3u-K`Dg(W3{owNkrp~}N89g1O>ZIGi^c$VMIriGQo-N;c==Y<@0x5Ay48Wtn$vkO8 zEQ*hbCC_>=z#w0#C32rC-2^#25)jvw= zq`yJQFY%HT6XYoIV~+^JpN@nNyc$|vea~!nRUT7mK~A=6imjsDbvbA5K$>?)r^VfC z1TcQ`I%aum|7}kWaZ*HPZs*$SEi~aNW!ka5IMZgt zmZSX!x|1rx97{dhZE)|PY(AZ%D$l!hOaG4!#p9$KqsZ}5RfrREEu12SXs$TM;ivYP zhTB=e?eq3>EBA9to)gY~QC;?ey6S-Xg~wwTKQw;b)A6-?!AG$raVS!3ejxiF#_}E&4yg zZKBW#CmR?UF|eJ{h6gbU*#53xy>^oOi`1!WO!mqtrJ^$0E8#1UnZ#CUEy7{Ja-JG$ zei=4=EN(&iSqIYd8cKS!QnHU6HBZCs_j_ayx6QQA>a3qhZ@zC|0Nd0l#B59P3u%pV z2zfqsr&6Yn#1BU@nyn<6gtclc60#wFd&~qp-hxt*c?e)cOyI^at}Voe0%4fMQBmIc zotJD&nsle$V0JW^m|i5JarRO6PCeab0<}1=W{#(IE>rC}x9bEu^eUtvwZo}sqEuXJ!yJ_E{IOen`;1g5;H<2uvV5i~sdmSQy zAImQ5OTld)u0dFrUk28Zutd3hNGN1 z9*ZTXWJ21j1a3zNn^0D@joBU_2HZ9}J4s)v0T_B`55Vm_XZalG+DD!p4I?U%kqVsb zSxJcyCd2o?-^oyj=Sp*1k?B^*)WifOm>|T6ih|*3SsoLmM-K+pRNQeAb3?xU`ne11 zay*R`U2A}|tHrRW7h`0OW)SHZAa*vI=}P;`#-G1*B3ISsP3XGk*S1tkM+tN28toEndj z!DlC*<3tq8ns83KL$!i*9&-X^0f1BjCTmz#m#x{A=iFv>G#c$4MrRUk8y&5AO!Ya} z+~gG0zaOmzK9cjArjW>q-L1#eQB2{Of34Th1;k-r7=I6c?m^`s`4>e zeLE7_(EOv>jxCmgiW#0xGmACs=0)2nZ!g98K6~~c_4)Ny+x2rj8?LIj?U_H0jEQ18 zrwb<$+pKen%oC?BlH-w%Hvg!VCZCn$bASyEQzJ{rl?U^b5Ctu>J%qB0`|5qa_gvJL z$J~%hZ#Fy2$OCDjY)wYb0Oja1IlIh7jexlAOKNZV;fqHJ)E$XN1o%9F+r$H%Reh7j z44k4$)8KX&3UJ1UeomHb+oJaTMZ3`>v7iA7=RLcqdG)!AdUUqUv)${y_sC!T*h5X? zq?B7hV>JdtA>Zf6CzFW4`v+#f&lK;q2jm1Qrf7rvxk`K)@?KK0@! z5*IujUAjMf`m^EF55~{h&!79Wvg#>i>0S|jK5Kts`uyoTFW(!zdT(Uwp5Yt+5PX%? zaYs>n2nq^3RQlZH(010fA*qZm6vNoMi1=g4iy$vRRidm&h+t0Ne_-g8Gpq8wjV88j zDjO|zViym#-Nd$A=*}D`4hutmMYYas&h(e%_TLIv&`sj$@AVw&D(A+7@Xnko8_$Cefw|TiX#38 z+c*IiHVwBwc(~o0&(ksjQvh^(ZlY}4VdEIFk$Rie+hMkEI_0d2 zr~m4QZybuHqHJjbE>*)L8bjBV!ndi0+i&F2r6@BA+@|R5@Iwl2uX*IzV`12lXnV(r z&lrr}VAk>h8HTl?p$Xz`Q^Z6h3?{uOh=5->PVoojaFk@m5~;NBe?sf)ZvSj?S)OBS zF3g>ykt(dF=w{4M@U)`G7%RKsQM6E}t|?qv_w$#Jz0HdOB2z>~68KoMJIN%t%}q7j zCbk78-aqzAz-qtN>a8~~>_cuS_C~6xdv3AMTyXXJuDjllq+NHvYMj3zk8Q&Yb9<|) zQwZXnTZeKNXTbLrOmXsfOE$xSHVQ5XS{YtT7%|l&T#+GcUM)Cflkj zXw^HqFk{f!VkqvRh)<6?b@OQ@b@LWi^nK&$A&w)p6-qD>z^(^EP~yXi#K{t`VBahF zK@k7B>XG>rnaOeb2;p{uS2zLpHb8eFmf(V%5{`)BkSYOSeD00tRn=epoOknVy39~m zMLBA9-Y%WHmzv*f^$wnN#?86~b>_k?3zk$o^5W1T4tYVtJReVB3O66+vxVER8dHtH zZm`KApV~w%e3m8fVMDm6^6ml0Y77h4ln7F>1s1B>ehR^kP?>u=ppko+|J@Sk=pGJ zGgCi<*>K-}e-ds>>7MKnV*QHhMD-s>hy*i`#Rq=Y=+0767!vH@l3fI(WWXjPhA7Lh ztKh$AKoEJYR(eDV@J(YCMN z^)e{#p{NQ#IO#0csFP$~OpM9mF|7E^Xe0D*2aysXQmd_T+oT>BG3*m@*&h_aNYI>k ze|mf9k>}nhI;Y<1*kX2U$Xi&Yb5&FJYUC$mZ#KJcp-%J7_WEYKOMmdh8{=A8s}PZ< zlgU3&xD5+t=@ngh8r&u{*Lt=GY$v@Pkd6;q-@2M8*LKRa%joVgvhA21;qEfA4MxZ2 zbJySUS|~KkW0o>8=jGI95!F=Y!xTTyDTmuuW(#t*qa}%S**dxp9yKPD(`z4oZX|?> z#Ih)9a677CL$oaALmC!kZf%fPF%BjjfSr(xUJ^@J5Mz8SqHr3s=mXCTU3~qwXVcX= zMO9X|)d085w&}1Upcx0#L={z=owcTdO$(Ma{&jr(uz+I4p9td?R8j%%6x=4IUQ-UY z0qTHUxVHKSInD}$s}YTJP(yS#RnU4;aoa5V>PLPTJfbMS9gCm3u+D0$w*oGsn=!=# zRI{_i;%&`iw{?H>CBN2qTj3=sh($CK0v`c@q6W7~?hUs1MnsHDF(q9|X$+C)Qqq~3 z&3s`w4+pUNpTKP{rivk%JEFnuQH0yO^PJ@tXT8DMhW;ziJFWo=77tssbTxt7DhX{X znsYKV;hTE@*Q02)>+w{cyoR{N18-JzD_WC0XJWC8(%Ff;!|^kHKmj z;`zaEJ-=XKljXEcc?;X}&K#hM5cZng{Zx@p=kQtCrrDl~8T1W*8yh>uV?`4hurXfb z#q8m>7!t){MLmhRAqU}hCxP27AZGQRRvk8B@1zQSIc!%B>(je1<=(V#CuM8V+ne%T z&4$x%s2cdi2rMv39TVi398U-#8E*_(%Efqg*K-2LG~CV#Zm-%eoOe)K_JVr$bLyG< z)U%&b&wE0>=t=d$XYjXYydaglBrSSTIqO+<)n4_2XOLZO#XfcU{^|3l@C=i?>PhaZ zCj-5|kN$;N$!M*C3Xn)szmy_QrYB|Jh-Wg~7857~lWIbK2XkAmmu61 zB_mUx$8Na$;CM)DlOqxP2dqM5`IaXP8zp7uOzFVu7fBQdtlv0~XJN9rsBJ-6(h}Wc z5fv?nL_!dvBwKsmYvYTr=rr265)6(q@-zS-g}UeM$Kp^iH>naV{JX|{b23!N-Qq$GRhZN zY!?#y%55x5P4qFc`ZqU^Px)mN?aHc(6*WotK<>@vGMC}*Q?(JpF1qu$$X^)^~< z*SFsCq@NTXka-c(gj8gGs-JYY{j&pqx3ZfMx?+>-K5U4_^ywL&g{hiJU;DG?M*~P~ zC<_v=pbgGpF=tjHLOOnTOd0~{OIhSZNk9~th^V8-0-WTJa3e#3u`z;MVa9Ly!T-|B zd-Cj+W?P++Zlavcl&zj}x0=rA)_Hw1PT!@QUpa#EHEol)G44gq@uwXhc^%k1=-7nCo%y6c#tz&IqUS8w~l3|pg-@nNxv ztok75+s>?$(mcNbfuZD)6?4e>7qamGp%NK_1SB7 z`;%~+M7MD!qZ0YF6HzfKhp^_Goq*dDt}m556b$rFb>D4C<`XiO#={N%^x)9v7FQT; zY=^0&%ettODylXVZoTljZ@m1sQMmu%C|tUd5F&tuMQ%tC{E`^K zpthpI%nG6`D6*(XQnEq3mUu5nsZuZP-pN0;UnTp<$G8v{dML!IjcQLrBP>74OK(OJ zhY~8MvCQ56;30N-4|VDdMi=fNleY&Ar1{O3;_YBKQ46Z)xT_amGH^@~Vu~7xiHSJz z#%J?<6q0TcByf8!dxOPMX9DT%@*z_kQ)BeD7~O3KXS3efXDDhi*y@o$!`Ya7I(B0; z+S@J8-q~#HT&Db+_rIB>YFFc75x{p0=>iifAI>h?egeld+|CMaFMpa_xldm9oVxS{ zb@_AZ%6;m^Ppg+crC$6D)^VTpg1Go)@${GEbDmSbuvfk08TA4{Wc$^X2d2-T!t;5s zDxTmjeLT?lo7i7S?KX$4GGb~ph_X4u*`9O@OUW)9@AsGtw_`{%m4w@1NGJfeIW7^G zVOQSq^tj$$N;yHFg2~r2RRiovPdoO(1k(`>%U%N@Zl+Ji_8PpTlc}2|_Smt4#%%A) zcMTGroR1zjPs8o^bz}**wMJk%w!!M!aM!crevG<;=}xksmG~r%(~LUDDID^|^^3%N zPZC08KJD_FFpst7vvCC(5%5I-WMECa^k#Uy?+2#BwI`k4P8HNs_RgF|H)5Y4TLZ;5 znTs0@r>*sz>wEawQP9_~533D`nYHfi&WvwYM^SI%;8{=yBnycG^-mr6bjxBuOt*_}(TXwP%iB;hvGW1t6&j@}&F zM-|oDR)2fW1vgVRpP_g_=d1(Uk6N^ovUO74ejU@C$5t*lv+AB-y_srM$E66UkU~hS zp!|fx?bpoi^_07TIB9z^%FhgdEHyLL`Sf-7zkuY5fFBc*MgXef!p?HbE9o)unvAtq z7~6&m;Utk41VMpI62*8to=7CLRh>}f;Y8v{f`2D2{xzs9zGUZYSH0O*pXcs1x_Z#P z20MuM8;e~FWu7h&7<&sRSw6-Nal(7;lu5yqHO=h<9S78If(K)F@YA4 zm%9bln|c10+kX7(aZ<-JB&&BlI`%m*<84jIBIT$7iH%+Am?ne0G0(R7u1AN%JR!Ic zj!4q5Du+nh1kF3D(Qa_08a)NKr8FT&1_(U?`HXyWC;$}_JJ86}$b5iEj?3JLs$lK* z5Md@a)3s)%R!27?qyk;+?Z+;1?wT{!+%&3f63m6r>XlNTre=Rh$r(f_r$zG>b&@tA zzXd%K2bCeBc{g=%n-isBQH{VY{qb<1_<{j*VcE=u-PF8hbTv4;D7uGoz^(*zR&Q}P zExPI}ua2v4`?14UL=fUwS1rgwBoPULeTah;3J9Xc1qa)JPpkL|#gx=all4(*#1njS zzo%SSDlD5sG!Z;T((i?_STshwj(P=cF^A7tVT4T7FQvaF24)GwS(! z@mgBLr(W@Nr2E&AzY!N7W(7)GK8`#|tVt=voz-?eULD9=;8jY!h#0@Ofuif{@JZ7WGzS%%Gq1T8Vz+vm4+#SY6&6NF%&J~qEeB^II zl6n{wB`pXP3Xbv#FagG8d0dhrLX6k)S0&9mCTR^;2sMl(Bq?jOpS9N|IqjMgC&O(b zEo7}PYO+D9ED5|2i^N49!R?_@-2QuhAN8KMQ+Haq305Y+ZMKc_G~uw7pL|X?b=tZ) z?y|10zdTM@Zph4L%=n}y_>aqB8gBo~BNg3l&vn&fLN5pky2rp^=UG@kISbo2-15r# zO^@hZ?N-2>g_UT67PeZ>`36-8YpFTExHH$b;f#xYqq0JH_T*GcqEwEq{Lkm*_~T#C z;J*f;(cDn>@&Yg6_n3l z)}MOD4fpN)`xpiSrDSzXDtkvt!tHpL4lQ{iMyzB`U^5B=C zpv{CVPW~17txSmBur^=L5^g7FE;)m;l1Y~P4^IbHte}npE*^@Jn5CxA1EYIjP&dxn_E+_f+(TS1eU)X=lO^rYWrwZmVfljv3V=DQwug( z+zmRWg>tsSW$QfKEem&3^BZ&QHK%whzkS#5kp5i=bKEhlrs1E9vCMUong|VUliKYK z7JI!BEmF)RH+Wkyv6E>tI9tE{)XT0zCJxZmE&BTV&%%G!u>CgzfJ^>A`>T}l(uKn&O=!}kx+%Krc;|_ zZHvpA!&&={h`x5rMZ9OvP@bm>b}PY&ol`Uj{A?#!yW=0eD2Y!~ShW*?_9q%^Ps8o^ z3%A*(Tz1pF2LjeLp4vrk74>gX1Od-I$#*Z*lq83`(J?^-=GE<{Ftt!Xeg%ZQrMHO!c3sBE==?W{YjxE)@9ve~B5lps_ z0I~^E2vkO7EGl!8n${f$ZIVZNRcqaRvYUV@hTEj?c2Z?vVLM36?*GT$dB90lRr&wE zs_F`zCJ-ddbf}!VdjcYV-DP!Mvn&z>hKW7XIftsw-IGBDF^h_8SU`-332_w(D#kS; z0tx~XI(NnQ>gD%8=f0}yo&g4`*TG@t-Osg|8M~|ByW!mPz4zSTF`>9(XSrv~-47>n z!Q>g0p1Ubx!NccxWT8vsiv$hp56ow8E$2-Hs-J0b4li^!Mn7Y{@GV7E*V{u82Ao1Dcy!aXx}j++$P+tT^fD8TuZKIk5g&7ifl?#eXZdXs(!)qDBh;v-QbI^x&>Ai26)yR#neuo z2!_x3d(6S!jaPm!zIc$W$V(Y{x#b0B)-nID-WvGiFCVuhJQ2xG7&RM?o;^KhQ=IO? z$-FG`BBqu@Wqyg3@owN3zWZ#^+V?vAokcZcm`@O1JTy)!x{5qwD?__Y=U!L)AH#o5 zvxm5>(h=JkmFc!GwP7>rYtMTzOWY#q0#m z1%w)zUbgk`6a37V-7@6ISRU`tSxv?Qna+>IxFv}Dt0>Xr>7 z5E7D<614MOTWqc#vwxTg^inzW9xMD7@KGxpzWVd{ub)kMUca-%d6_jBA#0(JddxMw zD5U0%76&J+p5da(?H~HuUuf#~l%&n!G)!rdE)|$>w`1(Bq%OV2uPk*e3Md9{8`Y@G zt9ZcupnBO4qPVT-r&N+Yy>c>0axOJPAti@Aa{dmHB;@i7<8J}n#_LAv zUH2a@4K@*P1gP+zPN}C&HE{c7;&zE|+m|1VrS+ic9By3wg2u47xxl1*H0Av^c z)vlwU1m1S?0)+Z1iBWMLl|0U-o{Qd&z@>(`zsY%fnzbL(OjVzYRM-4v)D2MRdBY-B z!@Hs8(rqT$4QVs$I-Z*vJ}-*4A27%MozKwuz&I-&MKY#^S<$F77@TGsURCMK^ET7J480o9sw<6ElS9k#C42&J}9MFfT!lOgKe&AraVdue?|{ zD}-Ur9$u+;t65&pgt19fZ@3-#6}I?_KVdFCG!B!;L*HTwve^d&qdKU= zY$lsyy7l9umqb4jvA{iUXv5Ug6GgP#{=`!+EO5Y9R_`~IlU_-qsP{{_tKKr9n3kMT zR_IrM)egW5#bf#AIkO-tc)BbGh=`F2WucIgsTy7@vG}r`!l1G_WGy2{u{as{q|Ac> zZ_Wnrj$>8Ni7+=TZ?c3zL?H~ttg7n2Zgdd3THC!3B%gD=@z4@er zCDwkNy-L@C)kU3FB#!4i0u0)sYAs;Fpo%osam~Vngefr+xY9&T&#)_ph=cOovL+TB z&l5CZh#p+?nm5Fh?C0RMTiMUi`x`+}uPL1Nq2K@^2S4try$Pn}vG0nJUZUmHMnnin zXR9&B2pfVZW%_|9Ll`&m<_+^n%^oD+vQ8 z*#;edy=%AlXrq&R2KKX~y4zMt4Z>}P7Y23nv9H`!@1oikj@HkAwgoliVLc7h4{@>y zB-^%Hy)`0p2*QH}&Y(JBBKPmLh0{H)(*n2uifkt=i{kwlJ(Q4k->_83n6L}=W8kWW znZOV8R7?WYj?>FA!w}p(Keb%m^TPhEa$ZTF!ye{nOLDytr%9BgIIo5(VwRrW3a@Vj zA;oDa$?_EGg0H|H{1U#vw(k&eJvCbaZFV)ax-Ggs&OY^`iauKEuxQsW2$7*XqQMnS z9mD6$)kUl)A;LnsxxjMNDeN-EFgPIS#OIJk z_Fb&@(MhkYvnVUOYk->iGhe;$5A^Wy9(sej0Ovoxo(|-p+m=PRNui%Lb$M~HS1fo%XM1qhIymSQgnqA!yViT)XLa* z$lx(bDxQ`|!!naq+hU#K%-Uix6diM&G|Ho9ynVo1L1zhzOO3wP`?`F0dO|oL;C-*9 zYeB#DK{K4^Jt}6|e#mJGY^FIej$oG^*wF47?3m?&4a^E*q&#`i%nW-h#@gGi@MZf6 zDcF6t(_5)$BBin@VsID^T0-rVfiQ5@Ei5^dcb>;(DaO0N@GA1lqU;gg3aGncl38vC z7o|`Ps4*Hi&SBnK4-Kqw;;?yT9|pa3#rw^MYv1nl#IO{!Y|=8Y17z`FWb1$0^sc1+!apl|M z!>hFL!qf`KnwY?=NVZ9&)s&gwR)oQ$uK!v)fu_r6%0*nW(-x`QhmJJhdw*>!o|SqC z(wMN9@pCxuRc-zHBtr7KBCQ5>orh)K_S`)+qIJZvW@Y~N){tmMWPsH$b>WZ4PI5Pn zO1_poj;jd*mH$WGVasgMmEfj zYB#P&%(=gy2S+0YO7<_6j?*jFkm*L7eGx@e z&K3o8c=%$~j_K!pa0!gx1t?6?aRQbe_ph=v;pU0AYB)(RI8>hYDB``j?$j#>?)B`9 zg_tSSI;)2<7X9!}fcJTfm|s2oly$+3%f3^LQTFuJk7z+3t9{;83V+gAT!i)t1;;xk z-*{$e%2&6vH?MEqO+A7EcweQ(u|gTb$lL@jr(#CTjZ!fONe%E|A{Clx|GgngiGZ9_ z&o~+b(X?XX=50ZXot|!n-Y?zeF(s5CG`rVp0RzDRYeY|dML6^XAP@s~)T~o0P%xXf zoqn=UE_XOGQ6nzW5lBj%$1u#9ZGX#YZ}7mq80{4R zcMMuY8l)2O(ity)AKeVrZjV z=zbqY*694q1?m|zW!`EoaA5#>2?PgXlAT0E`h}dmPp>%MH=93gVR*35Z1xkNN+XuP zvMH(Bt>tl8-oJ}m&EA^@K};>~vwa}kS~Fp^Kwdp&qL_<%+BCkt;R!E*9FO-@1wIjs z7+Wpx?l-va=!CnL&J_Zk#Qyp`bFx1(&hBbtVYwWWnELa*9M37nzl%1LKS#4;c?H9x z@+OIJoRZIjuRYSuYlD_7qDu3bV*jp1l_p>l3g*jcbvz!e4wfeM%4gF5yr@rCI>i2D zBX+x_xEw9|Z?zlP)|w&veg5N*1NuQxhjN6Q&Z6^i3wOCa?n8S)N&TG>Ca|lYhBO}P zN)INZ1OLv~)l7Q-)$MN6B07m}C9moD3Di4 zdIK_;e1)fkgQBtv;ob{$KP^G{%YRj; zqN(~$et*2&P=(k=>t-;>)PlbYgPZAD+w*dBIQcje3p{iDvIf>-4r0DR1OG%uI<3)b zEojm@Jgp|MZYN5NZ3)88<2K0eYbCtiF^JdB(YOADcqUb_X_N3TToLc5>NcrYqDh`S z-l`e(m~BDa^-wjJHm2-(=-hS!Okz!f2fCdKsXcrax{v4Ds38GHn^&{Ck~CSBsvF(r zHnX%`4$mjQL=8BNs@7kS-d~FTY?UqnzI@PX^XvXy^0#$SazFUXoKc7kk;KznkMb+r zVVm;=F84O`9X?oc(yn1D;H^D!aJ=h0$y_vZ@(`8m-7cb*h)lHLQ;|~VM3byAhX#rz zy~yRp{`Q)C8d`|G=8#W%41Xc2h&J(`JEH0f=)OLZaaq~l5EiZ+jC~FR5CjEGVRgkV z{hp&rRoHMoP4#m^I97H3vN7lMN1LdDBtWiSExp=4^hEbU-&lQ$u_gcPiwcliG+_5* zG#){lG2pn;5MU2!bt*>)*Ekty&D;L1xSWso(M8eVOPZrGKtf4%Cp zI_YWIK6(Fl`=CbG!)j4@$@%#Hq-#iEnI-WxN8%H_{nP9FoonSXytH9EMjS)ybj^=j=UCp}d-;ylLKZCOs{i^a@Xb_xKk&H-C zv_NTvzE|KR!oi{fsnNVoXd}#JKB5di)ZUc z_YHUb=P{FYG`rQH(TRHVMO@E_PkX(WGXfiY+Xu}{@){mV(Fl0l32Vxlo)@^|e3i!e z-iKDOYpi{o!$?arhw#fYtYEm}2b{Q_Lz+ZYCQaq>aA8|-U#~3z^VypveN&7CrRGSm z+R0$+xV8Psja}TYOkK`6Bqe!Y92)?85a@_(CLTD(P>zmSb%a63ElNTpfL>c-pvz!@ zx#rT{tur7s{ABAd2B-2W|DB=nz~1L;l=Ae~lh`FTA>M6DP7G&FnUzMPCe^im_gJhe zY9Bp=OFx+Edk+&`6Ng1SZ&M1oK+8Uunjwd+e>KRndEj#Ib7nr8SUN3;t~}q6Vh5y9 z+xfS^z~EZ7*8EoNWAlN2%lVlBepNMAYVLfR z?@U2~VmI&0`ClZt#@+;G`#&lGsc`@2^{J}0cGtD<7{S0u2qTl!7woHFx)jW|UHgUR zfy9e2@yzWIsSOHF zI7A3$ESUrJ;BAu#M}hmd{o-is9_6+HnM=d@5FMjq_I(m}&^l%Tk zPccnTpdfVJB|-`byzawjRs(OK80kRTJ<+{SCUi*-WOgueka2IL^~?N~S6+6c9C$yK zvAG^nl{H&Bw;PyixLNeE;tk4jcvQtc<2R*Ycm=|9`pPbtEL{X-a(iJRiQT2z#Q29D z+d%}mG{E0&MtuB9wF&50N*4X3QWl^q36m^>z`dvMcrvfgm65Y&+8HJW+!OItXJXWg z^IwR-4{Myx^XU%Cp>H+Yq+?fIC&01Q4>5kE?8I zSyK1r#b~PgU6J-V#aU@raHvg64>%J4n*qoGT!E(Aze__wgd|IpBJt0^W0{!2iwm&_)=xCtbJ&j!@I0}wwmh^ z&}c)WXtPv%)ea)Wx%I_TRyb5Kx7zPtK-8RsQrTfTKBII;k8q{OrT|Kd)NsCHo`^s@mjK!+rk0j@^=U-t4|L{3@{rDJ2 zJxlrb)a)<+(baP;*hfl8KnU`4B&QF<_Y`3C?;=_%Dy8NZ1wekZ)C5gsT2qtb^2X0o zx%yx@h4wEIw#^z6=B<$uXx3La37hHJ|CaivPtmIPwF zmT_0NSk@6#Y5&zfr?s>8d(Y!oFq6C#^vW>UH=eAek?YLfOg^3jedGDiWXrMM`vqZ} z(i&SwT;~*=p|Q2u+}+Tf+T>-OHR`by@~Fb`pEMxq5wv&TN0>^oKQ^4XvlNcSJo-rN z-GfD9DxsS9(poD7?a*txCo9Y zE}W-1s(huP)>p*QH>ww;;Hk?`Fam2(*3G^8X3?!*6{1?k`wIs-+L8egpFIkqEa>)r zinMl3XYYi!8X(K_Rd6A0M!j?`_5?G+tJnXj#_b*VwUn#-$C+c9TwM13(@b3;qXZ{g zW)n5eTC0p^0fK_-e@QQ7)3=T+^!kj*(DpZIK`hN)L2T;dqMpS@&{7 z*d+7wLhRge8rRUaC!c)r?5TU(u;iapA@}2FoEty3QqH#$#X4jrRgd7cuFC`Tv>YLR>2US);DxbELf_nOS0e@H zI1?*~jW2GC_do^(T-^RG>t&x_5{qDlp?XZWuR7W{kC`kjMtIOeftRj>cUmdkW>xsR_y?ts%9R?6~wrYaV0F1Tk90XU!W zZoJO_zI6;$veV;P+b+swgL1t1M`~kHMN=%y%@dMRwPIgM*Tb}zAJ7lZg0N6dc}I11 z*&Sa#yI#hX>5NpqotB>3GxZWr8kF)!!mV9JJDqG^{{I6-3M6T|^p2|)O+^EZOahB6 zILRmY<1j|%9^`)TUR8>l#a8gFPi0AUF@ad0bn$b8bFb0Dv;c^L3sI5!YQ>gcvckoI zj%9}zeKtJq#g|GYn0;!_swlyRsY%ns*6|x%K)+~Y7Ivp)%rlDijP`*eOvHT%mCTpy zi*JBr0yHL3J=+1cCT%&QT&(a4{P>O(TqprF_Ll$#;TInffqfKIT{$F6*{7J3oHrhR zyS;l9*+&7z2?%dTy5_{gmyX{|mKC>Nw`Z=4ngpI^=&)dX6nCJkyubx*0=?2dpVGgf*RQ51 zDY0T2Y$p^<38iy#el`h}puGE}Thxo3@Q)JY)gg&<1Oq9i^NC!uA*P5(5dZtAYo`fXjma)6 zcpsR#OEO&=kvM8CDZT9?k{Z^d8!2E}#{pnF+o$I9lNfE+_utFu8doKj{r#O$mKZ*l znF&~1)OUEJ_)7q@D2nYh+uxp@={>&KEl1G>RMQYB1fZOhH7ES{H2Z44xA$%{2C?qBrGrN0Jv6o6LM1mHvr+|#n>;s1hh z4inrHqo*U6xGOev7MZas`1@Znn=dcjZ!t+qXV!4TJIP^*(_fyUil#>l0M_}ZZN(Qq zbqsSz=QM1IsWR)XcL0G~%uD9;Q5a%bGeW0pvpS;&kB<$#w_D)?L89djZ`P$R!U09C z6l<{ucE7tWA1;j(l(ghi#W7B~{jfgiroo$)PLilccX*A8+68>b!DlG*I}AOCP(#=g zCd)6RM&ST5i@TuynD89Hq1R&R;_+IZLxQ^rjcGcVzOZdb(=W8ew`_i`FWYN(Sn(um zU%6l-e(mSZkFr_XtK9YUx?s_QS@%=YN%~wSsAmcHwY8GuXM!N9ENf^ouZ7gX%a}Q#bspQTp!f zGvqG({9vN^$!w4!pBJ@6(xIGAq{Zu(ew&rvG%;6hSDJ7AXhtM7M!71r-z2I&TT4e; zK^sg*o*eNsn4=3GAYU%uZ$y3F1dJuNZ5V-mg0CRj+&5pvh_8{@E0el_)kjZt!k(B| zft-zOFGuB#KKXcq%t(`n<90i@@3otomTK_41Q9%qXh3RC_%Sce<*L+4z(s0kL$p9r z9;19>ijG>J0TIk=bX%mg?WRO_=W_^05<@67n7;q`wU`yaI@tHood%S}V`CDs#IT)X zu9mCYHgL2`&`AqQ_^cAy4di=L$S(s-D|0lb-Gc)V#z6@q=O!OymQ%G&@M@P!N&W|X z&#?c0;|TZs*rI0mPe%9D<-MV%!9l;gDBafl$@57_wL`?3%rLs$Q|%`TY(oAXv?7N( z*v9iBH$(1F`Izb`k$TQg17&57QX~(UM73oF`u9!pgLAgMvYC3>YD$~5u1UuP!ch>5 zv7}XVKI9^nKRD_JRC}DBh zy`T31SUV!~vNLH#)b3iy?rd4r|CeO?CWy0GUujwsoW`U_06vSG`7!>_L~I&(IW z<@eW;O^b8ppV+s`rJ^j9oOXRHc;QrCZD)PtOL~6lq=u4wgWrd!%)x6*1_$jAN#=oE z<%FvgpPg=G<+iL)bF~&U)^FS3xa)47Vxy2e+X!X+dTi|XV@vpkOD6YGlDYA=JJuLV zq<77>q(ECjn}!BAP@+ox_-F;+)cJJ4T&Bpwh`B*!t?RsJ0e%nOU$k`C=)%Fvrc|Gr z82?R0?1T4%BCs(;;6}1y3o_$SPR!e>dTbM|7{rOu)xAw`}0T!}!&T`;dnGe)XCDOBlV&{yJtggLmIFeMphzyS^XZ&PzjdliYg;z(M4G zNh|aTg??JfW{Ipt;Mb?i4b2#;jWpxQ0bA3++jKWUYJ08`3&%zb<@?=@l?#!h`g`LU zLM%U;?APhb9RUhuEv|jdtR)5nf)t0I>_LSx_x8jbj=wkG_LCh)mB;$Mb2Rj*EN@Ja=^Vx({rjhdBpj4n=kYsl9}038lc$F zY#-DA3mNP3&6{L~e+Qw_LbhCtECIjwO9&+{8kC$mejFyWl0`4gcM;!IhTGS@2E#$n+W;ah4 zC@@-;tR)+^P|B%>YDZ`PI7>G2b_?z1O;a#H2Uh!D_#P1 zjTGF*Z>odxb>K%ll@%J9UZ4KSM6T^j4PV4Au*%V=2cWoYh;s7q;f$~Y69Ve zlNB4^vX7>!TP7d3WA5YR$ru0{!y!4*iS648keAm~wVOXca9w|GInD}FJ(zn>%%)2I zPb^w`n~B>mDpVRP;@;0ezFY2XJnZ~kgrnfLLytXfD$Y$o{SeUs&&9&h^#(l}8it#8 z(VW??2W-+@mEsjxu$^VV8YHJl&FE!3xuSPOFYkln+_0M{2S>0V{)}#}H<-kA1xBm? zI*JXqWdo|N6dpe?4UBMF7-3h?JV%JzE0lFI!4xVaqH5E;-|kJDX;HD`YEO_NO~^7i0}+;kaE9rL}D&lM3! zDY}`GuO1wQzFNIL;k@B<82dWU7iF0sSpTN6gtdMt^}A!C<3uHO#Ol$1bVcKzj_g1} z0Xc&qM&_qMKSU4#HxXbBvoD64DYJjHibwgnPe!BD=H0vB;2a&1)n2q@$bF=!3XG_M z*J#f{OlJOWOEM?DSb2h<9UB@gP_OtB3{PP{ay=!!9qmNtK zya%^FidtS6q(UTi9qE;FyaW^364r~@&>genkY|CPVBMEq8$qPrKXxz0w4@~S$l_8| z7#EOoyIfBniJ@PghcWi>k!YvKFg)Sa+%$ zu)Q4*E?o#R?yk(k$=zreHI(ru8^6UOVf&?%EJM3n$%|;v_prqA>puQOO1Sr+Sy@{O zad3m)tnr;|hoDFmafC5r7BoaB@wou9h`u#*eGV?*=?(=5XTfe3l`^`*beaT(Nxc7H z`)?sn6W#t|^TQu6_VjjCn@nvsM>nBv-9%U4$x{vcJPLI+WfL{MuIr?*6tW40A>|CF*yTcH@=S3MGVH8E*>Qd?Z^C*UAhLFWz!9lwi<6Tqy-(SdHkWB>mHs)!I$R z=`+ZT*bhui+VEbRD8Z$1LI7g#QQBWSB!#yrmkm+dnYd|QBneVcVv&OXwwr`NTUwMc z$b|?xHHPgdB-nxX>_F}J#Sl7fY@s!`XG?S^Kss${a6vSc*=TXWc31r#obrV{^^?i%?3ULDt#7?_bpK26r07EyRflRj@+SJpaGl zqd3Ir+=JpCynZ~TA3e;A2FdU$V{2xaTJSgP%owDm;7WB! ztXN|McKnxUfo`}`Qb%qVM{6<}?4G8&w03gpRvI$pbj+Q4L*dx zs#)U_?H8@|)6mzcA&;M#y;=dyJML{`I|+H0vM?Y}^7L2H2VID_#4C1SU#?(pNFMWS zrVw+qF&2Wyvw5ZCUXACJGC~P|IolKdmt~<%y(ZsvX@R1v@bE8suB3yUs1EssBKNV5jFBOS3GD?bk6v6DO`N#*409bTqHS3{z6v)fPJ+Li?Cg^wTizDbrho zx7`8M?CW(yN3gB7Mq7Hn_uD>lME}rx{G}76G+@8+Mh#iR!ip+Bf-Fqk@Ymli zt4K*Iv5lf%k?O{!v|yRkLgk7%&bO5e>rE?Y9jz}5NJpt{Ujwz9eE8cGnjx&1Zt-L~ zXG&7|PJuob`(tQM$W^4h7vJda{&pH zZ?ay1$sXN}OZLpW-wdhRYYueYL)!(|#|}4$06|u53u|hN%^?)FD=sLPglJae8jbS{ zJ7utch;ZYCX&7^ZR!EvQ?|10`dWZD>*E_@;1&uU~gl3)idy9MohcH{GVmcM4-35AM zP+o{_zwq|#k)Sf?e@y~tK9b2Y3;A; znEC#^R8WD%RkkmUf2!5jN&S$ia;xlJk^JCeBBIz_uxDiQf!8?Jjh1VL?C^lDh8!2~ z@1__a{g4&G<2PXt1wOviZ+8wyY-x`drK*r!rY{e~3%cAZsOV)pmukfYq+|q5bx;i)nC#+-S9CFQ)R1~v+1)ivs$T14NHXL@uA)A%9B?meVz1qIG zI*7NY;vg#-)ivAZNu9vGEIYA;v3G??&3;Kcr+y`%M~&Ob4StF4svO_V)GHxV+wn)@5T6z#+? z$gI%qqekPe4P^to^Wd{o6*=Bg@?~+#Xwjgj>nmJQ<7_}P^`BjW_hd`o0DP7;cIKvY z7e8yI(0B#1lw1m!RI*=lu{lU(NJW={cjA4Tt5b=p-NUy;YJ=BiF51{*@|7S=Q6W+( zv&7gHNzHo&Pv^4z=$QQr?_^4Iy;+VB`S6N~w01yZk?5_P$*(*|&fI_{?eSHIm_A0Y z@+iFwo{}#AXihIvV>p7t6<_eFgGue0H2~>~+!8pc+53IJ_dfG)>`zU3?z<-!9m(G6cJH487xb8$!e>+q@Zp+IWf@l zB9t4TsR5B4f&70v zg3mA>RM=p)B7nKGS^{YM3cTNNxBL%l;cK*Uzf*?5&4|=mD%(M)(@~eS5WEgC|8^H{=pl?KVI>XhTChQ-Zz)kq@^4mChGQ;9ooqo{ol&A?(m zY!Jf?h?G*%6$&KD(%StfK~Ty58^0~I7=N@>E}&>5>=eJ>Y3D^cb|h3e-YiakyIzXD zQnAOI^~>#*U+EN%hgkQL(Ok=UVpbHZ3P;=MlKKx^C|6C{z=CHF^RL{SN%Qz&p%9%S z8okdohy0(l`k`pI-DC2r|GAVyqN7*Jd=xs)8oGYBHLvo`O|yZ({7=#`&LLuolqW9A z>SJgThUs$N!A3k>{7E^_xCSblUxcgkj!9?V%5XYRzWLW(=;ml9*;sFruJbQs!8*_U z;jw=~PY?Q(8ABc9A%?x3QQXqh6EL+7m%-GU=3#4pyRB*Yc4X?CT-%B0?+XW`Xuulz z=6kOMsvkpZ&iL_$08NWiucLO7SFd|6P}MPN@+>2*Z`fn4KREs%Tb+zjQ1XpG4>7xm z?RWaPpx^O8o-r7Mv+0RvbT66&7Z>grSeNT}a%NCj4rA;WH#wj)_#1d~HA!fr!`-Lq z;K|5b;^1ySnwygAD)@ny^oK6sCsq@7=G8w`^A6BGj(J;4>jo`MvD+xz4wZN6$J)xj zNjh_}dmu@lIgVmla3?JXcnFy&6s*+p*cWOa%ZQm#SzhcG}FZC&+q`664hUd@ya94SZ(Aje}xcJ@hzhTX#CKxP-6q z8A+&ZirFmDfcEjF`0Y`s zb2S}H4U$DL(2MxHuZLwsA1FkiN*VDBRVL!Jx(v5~R?tQA8goo?%`61E%zb+2R+XB) z(8&AbQZo*<@la4(mwV5BGoJZIOBe+b(0sD0v2i&@(d&d*h=jTo+>~4+Ot&0gz?#vo zi8Uy&FZuH=;ql9BPQuRXkM6`E<7utvM3XyDao7h6-xf$UmtoHt@h7cUrRc{M&CeVa zkkP%9*E!Pe7ri86gCRtM2G064Zd)Z_r^i1@7iVG)?!#>AiW^FM@1xRAqYbx?o&S&W zEzJ*vN?DO#mrc;IBiqM=1WgY7ch(rBVWZ}6&D>|$K-yGhw0*4GoTn@ah`6%th?`+LN^Th zCks1-kC)wFYoYV;A`D*PByx(^f$|B^jKAG4sssxi2!1N4;5Kg$a**5jR~oX1G>trH zi2-pNm6>^x`n!<3MN)@9HT!Zfer<=#BKMoTrKGyw6uzjChC0(4QX!Zo2^8r7*Y5b4kD>9h zeo^+RuU}W8bR{{C5&>khzWvt;A8m()XkXf-c6-_{gVj#=QcGz{SN7z*KpL zy)I4rZqH|1x1S^~G_uY=&?T_lRdgHY>00t^4xMvVtmdPC3?08G2RFeQB?nXxX5l&X zj6qUyTNL~Xe>=9?1}1-b5*nmy;&I?!2?!VVubCem`>R9#5=QXLNtt0Kk=*7kAjNfV z-X5m_Ae+1W9JxWVdfW5N_xstJ&(9xm5v_`+ED-0&?-A7=j76;jTReXY0zi^K{=p#4daALxuQ+kZ-gOq#3EYv2sO1 zrKIL#OLQM`-4)YDDEPw)Echv>_y&{OBC4Mj4;7_5@^%p-6p>KxI0}^bb>g%z@64=; zX*rLffH~&L!4zRY|B8+IQUqALV>{M%dTX@>;S_o*)le`xa(AqQM8d++?k|P@PcE^+ z{NJc(DL%QJ@Z$azrf3rP{}qc=V5k(Y*H?hp_{*qP1YW8)8>_a~%qetMv+@sY3t<)u z+aA1eZP2j+r?1j>!3HW@FV<`99(8_KVtgg?reLB}aINYo!;WZx-rp@6Dyu59N$2)C z*`?p=(*gT}40ONov|T7BEk;-G=JRWb%aT3aD_Ok4`!xYYO0|D!2-R7q-N^g5L6Vhs z@*rc*S72t{gxi^1E>_^HQa|PCWN_epl-uGl+nMl>(T}PwwA#>!EjJ0;!-!}T)A)+! zgC#`g_BvykPPvO4Oq(*Mhyy3)8lwCEBM!+b{+BpR>vlS+Tc81Xuj+Y=2F2w7V+>bz zOi=%{2bJu=1ceVta$;*;5rdvQ^W?cq2~dHm){3{M-b9YJJGhGnzMLuapHYdrom4Hl zM%JuAh$=|Y;TaRdl7jC-yMCy+LYeGQ`3{^5E)p>k=|1+O&&lvJnSv7gYI1>Ym-J_l zC;U=YgMuZ)^+S|xUAxWl0nAf<%GSmmTCA+9ElvY@Othwg{8}Uz6UT`BwH8&NXu26X zF)Z*;en|aVyd6rn^+IdGoRso)4jUp)2fQMGpI-5Y!+hvg_&6wIZ2F&&mL9g{2=h?Y zd!IMQR-0GYxb2DEQ5^0Q3i=FnH*GH1FYyb2?S6T~GCkgkPEYYc&{nL_k%3*fNUq)) zby@#eLwodR+>44k1f5s%z3)h-X}pK5C3inmPsi;Ch`?acq~X4ha9nodT~ZWZjkoTI zUB*p;J)k7?Pi!ia8WTZ(FRo(ZY-54RqPOF3a2V*)D5Q^4196DnY0=(Ckd86Q*>$(w z*NP|YJ|X`(97oN1J{zs!J%>8d&bYhPeB%$>Yx342FV%J0mJC-lsTMhTxqbX@w^f!G z8_z(hF#aH3)T|?Sklc=#a!~;mZ*BGSqcY4aSw-8eBntV9Tj|`-;Ti9(ly8fmq46#! z@T9hF?EU5T0Cpl5)on==ioJ5+G0J)RN&s%^1sTuN$izTL;kLb1>061@?R=fJhaJ^+ z6S~DYJPnl_T5dm(PtXO^yRr2`?qk+wW-B_EIxhCQia&h}UfBBrL;NTbipw7x{vQFa zR_(vmL{cE3QGlt)MvcI;-${X%eY&E`MrSM*ImAy3Ku~fR?2!%#eZOUf;KVK`%0*tc0QT0e5bEk5rw}b5R+ttnI^3Bk2Ci!7&ATjW5 zC%KzT!icaANb`^(XH1R7n%-mXh4yxRy4LK)Ed=Os_Fc1Y3?NX-?!7>|?XP2PmF}I*6Q^ySsEEH+C+e zAPX-Ndf@6JnHH%&f^SY;$>La^F7|7dZ(*rVgX^n5bh)J`z99Oe>K_!tIcGCb6%)GD zO+G_t8wX^lxg>u-{65EWg{fk;s3)pBn1kwn$Y$67_h|X?%*y~xQWf>2uoLMW3Z z=4Dkl0X-^{l&v_829T^;U1|K_a8Vf=m7;5ou9Obic$3M6m7`ankCLq5)ZH6iimZY> zH)`!n>MFC7XkHXc?j*Ln+cD{AWhYEG=AXJKzbu8=P>7WI%o8SD z3K^a^kiocmn=uJqsAJGI}Ul}J-;*807i3MwZ zD!q+mPoWSjh25J)OIm9IC%~yZhVM-s9;1!MzpB;6#Di-oqM_5A z@*-IPxm>E@F$SpZla+Tkyty$FYUoy7r{XGy+#|AzGu705;2Gmgi`?uFjKES-h+%Zk z8((X^CLtX7B3{Tg`uPW0=`4Cm%b?WX6XQ=K3VJ)hCN6mM@JN&xK&8g#f=_$KhVbNF z=&ML#4MGXc6sJnG3Fj6}jSJhvzhB*~vC@>x#j6#8$b3R}X2i7uY)>3}H4w<+CdM;% zYLR=tw=o3*8g3z2$TSbbvtIZe(|m|(OVdG?4=d*NC*3Dg^XCzwbV=Nbew->c(8{CM z|0JdKi;%{DUwz~)K@2`K=YbxlPOhXAE9$u#)T$4KGURI>!#x;{vj#6q zrqQItfY*xSy3nZ&$Zi=ET~|jQ&#fBrLyJhGq4SU0_4Ia&XgYOsI(%EoW|kB}_Ih*D zch#&%IZgyYxg(N2=-2!`kNuKzQiXaObqj0Qrl*;ibTU2y;>CZmn51}%{)wrk@P1-P zN_uH*r)ur5qo&S!`zeAfJ&(KnO141osC0iKc*ufh`|y$}eLI0SZPK?hjO}JN7lEm0 z`EZKw$z0!lX3q*ub$zHp#&UD8;W5PKSlHb~j&Ez$3!+P~7$#`rqo~%{dh$?*^}W}o z|3W&Lma~B3Z%cR1-%n?QrV}>zHDGu5pZ7g+-5VP^j^H4;0~*XmDwNZqO^r++eDs7F z+h=GtwZnt$eRAak`2`T*D$0(6RM%1uZ8eiq5L-xeo}KQ9=OZsO(VVN^NSrGoXMN3M zbi+AQgv0IZv)Uwp5d2rymwhRn^)UdKLmVtO8lBDNlSZLeIRAwOP;Ia8`2cPyz;DoM z%^!GMvdXVDqe)9|xaHiobNmy7DuQ;X;TBxTkTQ!gs?%fv)5}eO{`1lC(3_1vJr}Bk zuVq6D)^C2Ai%+(#6HziRY_7&7w|f$nfBjBm_abTc$V&_=+IfN21rX49ccUXW>09-fmE=_q{)uA+G$qE7 zw7|a5dOIO8kryY@oMQ}-3k^NncOY+MCjIle6Ea?rTZe_owQy;>WO6-Z_~g@bL4CQ7 zq_H*GX~Da&t-?fYl379snVN2CkxyKfX37Swx)a?khqzx-pUnGg#(v3oDwvd}szu%K zlI^8?^~=WgvpYgSikkEg`?vD!A2uW)sRJ{F` z4oX@CFP**T3SQF-GWXfMD=c8+49f-fH41BRiWvI?Z1SXDQ#{*gx-VVpfSTdNe!7SYRZLmS6Tc5z&5`wZ zDxUR~;txd{nWW+zk=HPX=Z}?@X=kxNWo3FOL+O!1wa!Y{DAaSqISCtsE8^{#>*VBx z1dw;-Oe`##{#DAr@xzroIoRsrv#;JejuaY(%J>uLghPKW=P?eL0mjAh6+P`0+1 zeB7e4iRJUv`>L8AEid!290sWrxXlb*qLxK9?ls2$g(Yto)T&>&|2r~VORfm%5?j_g zfcoe;Y5(@B>}#7qtXW=IdU$)0`ZcF`H>X0pmH}}hUReuso&ue{`}$Q~SGC&2T=vnu z(efs6?fMdDmocg+cykrt>Z@;^vb7cYY_wRfdGsr+ z>Aodgma<_Of_ku6HXJ7h!6)MVy zgF#uzMP5e_$=1iIuH!!Q1eG+{u||1AroXI(J4BW)bDy6q>#(c1V%X-HcRZw3UKzzlua#Bbw)e7j-re9hhc2uP-3)@5ab}Lq{~xa2`Yo!kYa14kE(z(7?k;H& zL6DN}7Lb^shwg3x>Fy4RnV}n`L7Ev_Kw=2#{^q{l=XsCg{r-UYVefs-TGu+)8S4cT z!p@+A?U{WQY&r5*xF(ZG(xsl@zIHaN&j@l1>DcflUvtCJd>P>h>pEGHMQC2mfxoUWHf>gz>JecSPLYSvbNd891Ecu&k z#4P!*+UU2wlw+-2c|hz;a@xpu^l@Qg$woOezTo}w`cGWe$gdoR>W+e^BS`etsA}4K zDy)ky{Q9SVBVIsaCFPj&A|z;~8^3p^mTq*lWlgq>2>heWKPvibxaHH0WdI*~()5xa z2FNB^#r&Hv<0fhMG=#4X%=U({0o#WG!Lrrmr2}q*xdhVbs(xiV@Z_3nju9mG=EY6| zNlI!23gR;5t#OXGg$P9Aw=xZy|1SLoJ^9rAFt98P5OJzsmN6!wZFuEda1vF%PEG#P zhjH*}VbOT*u~TMZs*)q(NAz81|uUL=}Vz$GqI<2j@ix?Dd4Z^_56{*37(|DCKJi8Z`#WgA7;1#BQVR#7Y&r-|D z-Ra$RI{-?ZBe^#Bnh5OOnPM;&;mn9#!Q$ zR3Nv7Fuxmp_34bE)^nQnz?MVQZ0&*lCeK?fVw2vI#hJhU*nHz^Y#1S*`Q17U= z+&OIrB5(}e&NctYvJNCs!nvjoZezrs>N$9S&(yii$dSW@Bo|3%FCWZtvi2b2Kuj#W zLBTY)OX}3eV1~b{o^%s}(miu=0>_F!t3BlBb($E^?W4pJN;z!zxqxG6+ajwe$<3v+ z78*`3NPjigl*mUbpNG&|{P;c>Y((224v~*et8|3YtS5F}S53~T)xHf_Vr5aLNAG7#Ef7aw)qOK{GcB@)zingu&{G-xe{0UVjqsO@E3kG8XhK}PK$YVd*P z)oXNpi5HKtvAOeaT;3gaKOHVB=}eRw7K%>VW&bK?1I9+d?2$U<>sKz_rgKa@%oUc# zI)PWXMhC0bE+8VOax6*l6E>HGH@A_FU0|Yho#)4GTcq?V+}UVyO|$3K`J{d2*e}JsE+_rbCl%aWu zK_?t2L{KGDiO!%+*BhZ%qcjQcNqGIWA+W_0mib5B#h{kPCvMofOf4H06Axi9XL@=p z3*qc@1gh%jxyOS24ss{SXWFmbYu7+zzDOI3Mds+}=HsYu zO03bdWPdry&O7)*+H23=PU{U2(0hl(T6P{?=H`87^^7JS5cUcS6?kps*HXQ{8B^yj zI$`PA{}MgEcNemsFo@8y=l|{E8%@;ygUl&x#3f9JsA1IaW-PJx)N@4=mGo4*-^*&% zShUxx$T}~fF{awqD~`n=1SUDJ6uS?N?*bx; zfsAAIS~J5^FBEBspb_HHN3^26a85jiRue$|NR(9NgQEoZiv^f|RYJ-345@lLzX zuML={ai@lvl4ixUr@aV;!-ZtEk8ksDlMiYa+IWZ#7DSgeHMpNFvL!6**UtKfQ$GE@ zeV6N`z0JJ`%|mkOX(M4IJ-ux2-w1|`q7pr&rt5`g)f@wd*5je^og(&lxG+CN=~MNI z*Uc}#r?$If#urE?sxkI3fRI3=#PL)nU+ZZ2jF~;to77YBH_<;&q>#PueA*a>X4?J5m4Zg5i9V2mqr*Eb@d*(Vl?>6;yTW+i&iO6}EgM($}x63__ zj4!rx_E{X?F_wdQAJLJmpOlL8+!2ENaz1c-^PMtQ{D{)X{i;y zZTlb<^Ra@m34zOXW@oW=m&^T1gwe^Z6?HM-wOW%d=KOEAp-Ka5ALO*8uy1+kdbQqk zHjc!PZ^>})YnW844+2H}dv#YQYvQwvi5Id^;;v*T>gat79CVyv1H;G49G@rS37aZA z@!&kKA$El6EJK?6+Arb9(x;)WPSvgAF?YCLZFqD7zq~LIlUCzu1@APi?H3vOo!8UYf$j6*P}LB|AehQ3d53Fey=}PLELYv?^Lw~%A}WH* zq5AKfd6kNkx)hP<*x7jvLl4RQ+DX}>DO~@k3gUei?G}ec!*oozb1lnP?fsF_+YJMXgt19#SL1r|Z~ZNM$3Pw=5Q zCPo%#S4mW4zg(X<|H%bXGSIoyqJU_O$8UjsaFGnl(Yl8C{z>h6nKcH|IC2q*pW$#9bAb*Ax}c#hS^dFhwMPpXxl(R)3Db0hOt zuW#3V)S*n>+0k-oO^;eKJmy=B~iU1{*g@oVL324p#Jxunx zkB$myCl7;7K`>GoD7>8zf|9^jz7{pOs7DLGETx-hDAMLTuJqcMQ?#<*D5>#3itUyf zhJCF*`so$)F)P!V`0N7qfDyy_$$tH!kacSxO@6#!fr59>*{F1L+kp5JFVv>AA$>!n zbDDR5^imwy{|?nLa^`99es8jtih&HDwDsEH!338abi#(E!grjYQ%FpQG%Nc4r! zaGc|QX5C@q3JH+a)25|7yfk&lzFDh!U*78Tx`rR?k}%FZdgWy?zU>M2UV?$Vg)lR> zYIl`$c6nu8Vn2eUTynzxydi5X;Mu(uGQSm<)tQkpW z>{2|-Y=G9>CR?%+U~Ikx=GSk1s;jt~LL|AX&e+8~v`HB09*)tdd=bYMnl1TWK!#(i zErjJl?~=pMdr%(nicZVVGxbm91gwd}o9X{Q{bc$8U{0m(xN*K99$>3hEfv~T^crfR5ZsT2tg`EhK6OWH*~=#oTNRh}ET zxSEa!HMxGA{oE;`C|^yHL;ROy>h<>OAtr)fLc+1`dhN1l8M#L}%rtkbI@N>%&c3=B z`RHtdlE_@L*Wh$Q_onP{U3Mb&zb9w0w}0O#z5Yox+kCB=A!4FQ%&+r8&v)#Xp6)9u z#2PTxDeMd{vp903(uXes!Tmf;V9Jshf3nR$%yZ6H>;}!CBY!#bm2gOIQ+l zrfa6#=TFUN8~qk}^PBD|V;_i1eBehkpM;&%C8{`mee}X8rJo-e*){yu4)Po_$q<;T zuY7b{!rh6`e;d{NKYI&H{pnEi{RZ*a#x!VZ{rX57>D1>3bkn*BAXj8pdM%{!MKEN5 zGa32i3%-UjmpHnMJ(iC0Lv!!zE2>Ln(QIULG|IJwA9kh6S~{6~{Rfs~>ZBV)Iv6)h z8*ZuBcVzs33j{??IU9Fcqw4-kljRRJSPtynkMwG(;)fJz~Q&uIi$hPb2)?SYg= zG=Ces(PO9i$?1g||9))6II632dnu7%^R)-TtZ8>U3jp4h!(0P4c2tUD0=3wIW6OAi zroD(!^6C3cow&5c?yajVh+7x?hbbXC;gLcj^0J* z%2*90tkE`GSu6OQq`6ITo()Eb zE}Mwhw`OwpH{yscOqPd>mfxZhq>7*YjM5kU{+b)UvSMMkm;*CaH-1R8%3mEom=tAkU{Ddk-I`96&O_ku`$vo~!B)3}3_TOMEEJHGq^p{9uaL+GC zyCVqHF%N6};J0Z%+nE#Cr@bZ^6oVxv0*vhMhG{PA_SHQvST(hd|1n&s2zUcT>j7aOy;k(BY5y0Jy;v}!f-r- z##1#p2N~9?ATTR|(pEaNWC0hSKWmK3WI~Ker;XKPvN&fv!g?NvUi}raR_8ijYN`}; zI7H;>#yW|9(>&H#^ViAlAV38>V$Y2h4lmEk9puNru+)UMRzqV{&DSl z?kA)hDh9nDEwZviV5UMO9Ouak+;z&usHP>4%AdTB&+xW`xTb7`wlSV_D^BOgeo8Un zVi(4S-H&tReD&Uz=sUFi$#9z!QJ8Oo6fpWd-)NQ6OuS*dRs#wS{HiS}5F88@6m)J^ zSI4of{l<3Ll3c|mo1Z2!Ru!OJ5$a_~)3s*lB&g-R{j2+tXkkLH%!bLZ5@2&u!lInT z#~c&s4xbs#d|GzGh{xPEWZMAAV*g_)34)b_k>@+!*_WKEmxkFf<@(>0F)~5@*>#p? z&!qLds2bRHT4lrw{YSiKcAI%#-?9C`DT61|FY$KI%>U3^Z$02x7{WVnD6f}j(rqmy zx9V#93WZQ^R$jL7b#$ae5&a~`XnNq&mR%v9yfEPwEi?S}7C7*QGt7{JeLL&;IS|A4 zT)rZ4008*8B~t%q{^g3u3*%|*tbOAyGi51ub~PYlzX$9CBF%h&h`gm?8~?eM2W)F+ ze(2wwQ_^aNcRu@fLFS1uJ8-|n8J0c>$aXs00^hAbC!EFLyRRW*q_hw_i>z&OnT*q^ zjA-`X5L%yG$3Ta(f1q;PUwxOwRDxwRf8RQi1trhI4qCq+f$N_>8$nM81-oI0L0h;0LA4V)}8+f4)(d@TGN1kvxH&#m=Po&4EGtM>8(A{m4%!m>`e76zJd`CYL$VU+lL=Y|8%D|v}d03WS4u}nUCGqlV_@) zWiW6vP9cV5bRy7K}}>oYOn?5Zirjc3)}<6WH~ExQ6#0{u=2?k@faDln2+4R`)& zYcw&4m}2@tVKVjiZ%|<{Q1y_K_E*3D=UAV{AXt4OZ1>M}Qj?7~qpUiT6CsE#R5>$u zeJO~F7bo!nX^cU~2UmV+BI4;@>FcIBAIycvT^U_zd=s69Qp}5#jN5bfDGRzn<7p+9 zFBOZGf72VNoi@6x6O@hGgIAV-npA_RkIpiK2zn!(?ftGpi;~i2MrVn~G8ZF>V-*`i z7MfvIzW$8lopxHU;7Vg7?EI5p)8ocD3ro?hfretDeV+G8e%& zsT~|tTe7GS-PaH_LEVZV7#jPpXD9ZJt4eO<>-UHA{JSJWu2t&1M`8f^&ogs^Serpr zw69C$^h0p;QYbi@gw(iYtwpY%K6La!9(Jeddul^;$oIU%w?1+N>u75erX2Rcqbm>m zpF5wMPZexvfkqas*90ln+&fQcP7glo7swb=ffQa-xQN?4EgFO$WF(ujTwEIX@IWsC z36`~;v4IywM%Gd`db57^o0$#G?tYjai{`Q;b-sUB7d{LKUnHi;N)#73Bi<8pi0wpN zO|$JjU`(HT5P|!5Ph4ENzbkren7=b-U$D)jp zU%x_;4?BVLmjY8y#75Tc0Yt~)at(F3Y0l(hC(k1p6D|*TeyISx81rG z<6S;Ae{Vbt^Q<(k$C!dEPVN zFcB+DiHaAEhNYn2eEL^JK}PCPLb=p>6*PBp6YRJp!nw`eX(rGoO6!p&x&vD4Ae%J8MsE? zt#F7hU%w1LLFDylj}4vaGi=IWT)hn9->G zE}k;}oc|I?H`_7|#UtZYE?CowIJ^G1ySjMXs8`QhP1SsZ&K*n>M2}mhdf0%G&81UK zbF?dLay|GaobucpVHaaRq%lhLbRf?PiMQzGplw2y z3&7-?M?P1suBcFxdzh5nla7@P!HNP;o7?d`U+;2MG&=c}+0#sHf^~W-dKQJggI{PE zP59+tU(37D>?!GM7VRLM+boxyW=azdR-q%xU(u3~fU5rFs zB+4i&C4v1~eFe{Oy(hm2r_g@lUc<~n`Z@XPh)qG0;6g!Y*`rdD^y~U+&B;t-Vp51; zYZ6|;yO$7I3KfSeO#81lU^5-BY<`NjjUpH9`d|3U?>;Y;VPEUmtO7K2Tm|I|N7#s< zMf8wPwJDRajY+5jSHZ6dz%^sWLv~^3_vp{w0vCOY^cAzGV;ySL2?~snmP*m zn&xl8i4UjU<-f6MyA?GfhDy(DNO?zr*b4+uOOL{dQ90s(i3fOwsJ3z{E+dCo$hq+G<}$2If`ycxok;=b?~nfl{(8d{_B^dCqusZKSd7 z7Iqb9v)2ziwH9}Fe1BhMPDSa^ z2TBwj$coZQdCZ9f9qj9tgxo!;+rDTq+YL=`-JN|Ks7b2d>CqKT1j~w*;PIme zRV!lGQ~GKuOtc~`5y}`msfdpve4WEQ--SvjS}#&~m?%Fw1l;Yti*qwgwGJO1h|7wJ zz9@tw>QQuEI>T^HCpSTc6V}t36{63mi7L6rCfk-UZd1cIIPMw%nKO{MpTBYkUbnw^ zbS@^KZfm5HBBC2=1)1PxNrCQ>6B8%x&&Jf*xECY$^`%cyH153S0z#A<;O?n|L-upw zTocdP5+*G7ME*z^t0@o6!E1TFFi`N#)E6O(Un840(2kkZ%EMKJc_ajM*nmE z);HXcyBu($E~q!=8nkUR9wVsH8)rRHG(YN>%^M)!>w+UG(MBhb1=OBOhkQp8f(L-z!)sl z@P?fn?i2mi+t#q}dHsl1Z98NOd^~)$0nX@?!1%q)J6u3_FY{brz3ic8*1Zae<)X9v zAg99Z{w-7(KKaUrD>Yj#-CgGDk7F|aMjQhcwDZU7TRWdJu|3tg?L-0Yd2VEf7UuP$ zmPS?g{gUSChd5&ZRinLMlKK4N(XSa!PYd$Y4HNBRP-3W1`Zj9p(@cFp%d?zmZHPvW z5Gbzderj+2(SCN(2Ffna%$eQN|4F4sT{l4pY`Cv`n$gno^@7U~hAEBGM2yf3-(=|! z?xIt66}dGQmb$wbYW*JZ!Uj?~2UuNCeq`Wz%iaVzzQhKXfet#%36IQvfK{&xmB|K^Y#EH9N%Gm!_yt;9 z*L_{`X#C^>KAET*JX8h1yF2mXe2R*@lBzMc>8_~y6Dz+}9M8;N>MVnys#nwL;;c9k z#B%b($Dse#z+qlAxO5A$?K*|Q`WLdK%OUl1O^;&)s>eH}xHhb_+IU(&>>6&q3BA`S zjT9pa7xQ@$pSIH1QS_YYs3uKun!V?esq6K|*=YXb$akxVpL#mg>#JK^47idwu#uW2 zS-4bO;1VIZtIvVc-!@+kvp1aAH^|zDXNZZr8ezv!b967x?T;qI^p%3v7O~L@3MA%x zc|`+LTat|}U!S}ZcQ{x*S&^Up^UbJQXK6{Ti`iQ)&E~B|wAMENn`C9iy zGqUvEkx3?mVZ`#DYhKhv0@fzvKu-VazkZHEhJwbsYzB9Sj6 zIhNgM_ol}jmD7LhG!(qEn2u&tn}=B$Z5X;udgJU=ECsy`IrQvi8k-vA_e`mnjzQf; zHwTw2Qgv}Ch3)Pt}8#zZBUGO%GX8x8VTl+!2J0%Z8L)d z2WvkKA;AwXj7`$o_}h66dqv<+;lqMnbJ@8++kl3tOUCwVO4dmxx9?N?U5qp?ekck? znOw}6l&%5MtAm@9#EBujH8E*-2fQ#Z7`>aD?1at6l)v`D$4#(oKSchV-SVO48{3PS zz~yYe_0)W}T1P`lN{b~6?ZgZ%|FBngv3iI`ZGfj2Fec!Db!x&!U{{0;)TH9%oAnu+ zSop8ORM-F8c-PrCDL?VQzozVN#*=^K>y9R?LEMn z0uEVAn6F|3250u%MUaN4VTJWzq`2!O(j{-c-`R*`EylR z-0s}=7(pgUv^E<{QTdpVzuaN8X+Yg#KJU3ACs)#zf)%&&n5%OFlrCW70HD%j3rVFH zs7M(LQ5vMcZeEn$C0FJ9TV#)Q=eme4p`s?7;(sY`xZvwm8$wDq>ef$$2yEP|NpmFm+yPf}@$t*S{M99VhxLleQC&Lfrj*jW!eDV16 zMwa{_gxdFP0|k119|+n2bRh=k<3UcmL%{wo#$vL3GbkCarK~s8EKlNPd|kzX^RDkl!I*a1&}pdYHLlor|xsYmgz4;74< z_E-{gCX=vCCI#}554FDbLWjh4%FdJ-SuGV#DSOsA&pzbza_(=3~sSnaM=k!e&VNPX(?3Q%{>IZ+YGR zC{9)(ZBRCX&f5_%Fx1kj8;AOu|35JxamKjkY||bym)j}k>kjC2>a-C^PjwZti-HQK zGE4o$FN(1*>xj<0h@Ozfn~6Lo^SxdNCYx-PM{kc!amd$#38`Q{E8)d5zU)^-@3EIo zW0JLnW&6MTnabz}-$!LX)oA0-=ozlO9_Id4Q%0k$a}#4LTzh>iW3V)zwgDmvTBBov zzq9Z(TEdDzu%7|Er!G+iFTVJ3v|=bQZh!^4Eob>nouuHCG`6vGDaIo_;d3R~DrFn& zx+W`z2dldSS%jgAMumiZ@`$H3&AJ^cqvNP-32rYTanBXyv6z{8yCfQCx(Jx~AfjeI z#@}7>wdtRD!H`QHr}B9Wj%uuHEC&oFk0!y_+?T+(wSN-^HLmriTSpx!E}=Hc67voH-Y|Hp? zuoDO0%(e-r66_4R3k=$xetZ!r@*9K#!O{G_jYPcjS2TMNqCEh7!hXu7W8Au~>%|l= zY^%Q%cnw3%M??{=SoOBt>NI3p4X{)+BKE_p+KJSSUz^}Ej%E=Ix>>%Q7MCrF(zRMO zvvRZ`94>LB(1DIMkG9PwUszC`f9#Xl%&m3bxDMmidQ^G7J z$W1ZDoZ2)AVAA0xZFt=y{fc;ym>W9*`hZXlF#_WT@GDGBejm^VNCP?R<6)$HI4F1z zp~zjJ(|vp0<9N{^;M*&c865+u5^^(NT$k*#PwJ%b2-wgM?dc{%V-W!*o!sP-a(t{s zA%7$~W%kF8y^@AS$Gkf6dz;HxjEhmhx~9p=1rYjm0#SeZ@FPxrpgdgi+S;?^TY_=v z_sj=89=Jn_5WE4~uVSnZyj2J;Pr<_1hX!zBXce?K*IlC;0kg|KYYv7guDz;0f{&MS z_lL!6qn%}jVW-<*CgXk_N{JEciNKU(+dm1`DsohVk1J`eB zcW0d35djU4yOCK={w0V>8NYY`R2O3ulnbyT`Q0f*6`;AVOhz%NyULn$YwjmvE2Suc zhyC8Lr5Xto|2Xq# zUn)zbqa5fY%sLimkJB7>Jou423f{5I;*Z=0OHSDj4tjMnJsybtgzHVsxi5uEM71Y{ zweuOTt{z-?ra_(VF7)WiDJ0~T@+Rs#nrlCPhOb2T0#=Iny6`orBE)|>C>RUnNs$Mh zv_R5a@Id;)9a3L+_=3-{R(`{r&hYf@SJdib;jeE;rSr-)BpZtcG@G=_KtGtN7n}A= zG}&J1eh=wj|6lAdbO~Ta*Dp^EUl4fqxR&yHYxgo+JVi==Ml0hxoVx86s`04!JQ`T= z=i#u~d)UjHa=JoYPrR7jr5}1-?rV}N`s0p~v@X|KV~eKcZb(s;b-T`tNI&p-yJ^-d zr)0D-uOEQ>nC7y}EdbYTvJXaL*>5kW2Ti~CHtPvX1IP&;)W`RiKl;&-co(I$A%QjU zO%Zc1rk3{(Yxfh3B1XdiuG^Tebg$}7W9zy_WB#F7-*|i$)SH?gwYe4p&Zv<=RnjBl zTgF!?bO?f7Ke_U5!`y$rOR?IL)zD%#(bAD8vQ`Pi`$oBVx!y%W(9vDp*B$QyF$%ix zKut^CE=b_&mDOzRWSZ93_LsZZmI=>OwL|9}i%j~NC(LCBMn&Gt>)q_K7`}OR`7L-G zML{O84?h$Tq?CC3(Db@!5_22PGzHu!^P80KkO{@Wwy2eNIas3hNo-Q41v)Y|neAA@ zfy;>x?4}y+;d71_FA*Z-kw-yNLN@1#FMy#u47N_3c*w}3>|EjO(M}@{dyzD6Fcc~E zTCfxA2;N0v_l0@NLrdhSfz2lK_v4l=XF93wb^RVrCWpO8XBekVG2DU`KlTzG>cX zfdVP;72ISKcC^)2!?)Ve$t0vAslf@`v7}Nb00bDk#vjc%b~ObGh1;yh?B7`q0IYfT zqH|2H?T812{55BR&v#8c>YXiZV-%E;%VAiQrJyO`X)klO-za8TVGCiddqHeg4M4kO|A#98|MIgtw6g6i zFVge{s+E&uU4_7dnU5i#3_Wuhv+BT#_x~B+&Hi_Me+@x&MUClohqXV!x^qY>_`_{! z?fbrHk!Q2*C6_3*LNzWi8U0cIv^Z0#4wBSk3uc{-$C(;>d$2DK+`wM*AXV<`&_URa zIV9cf5aZ+l%{2zdG~33I(W0dj1M5RR^c))bba}zG^dKO73(SG(ii?|(`+QbT1fHQi z7#2D*hIkC(Q@WF~&I_;)cw-be1;O5@chUC%I5FTW-cN5JR`VX# zKhKg`Dk!u94`T3lC!`2YT*AS}wrU`A|Bpw&#a6L1m~1 zGhc-N%$A!cvC7Aw;IM=?i}vo?fo_1E^B8I(&5L%)+T97TF?y{x1r|2+Q5*?MP|SOY z6sI~@W5H0a__QRxp2;`RelZW3bC*D^ZSTG#7ix?^RQ>*E`j490S`HkqLWJvOs+~N2 zVP}x1R&BTX?e9IuwgJ+Mim#XF@4Jo9*?~)ac>KNmEJA;(Xx`4_sRdTA!i;JEa@oLQ zTGh})J?|1V9G^@vnnhrT8*K<<&!leRf#VZ**IaAk^s!Zg;}hQNYXc4=CDjr(AxAU* zm$S=#@-w@U-h@HIz#qC=}sH zZ-ec_XQQL8kr9uL8F1jM&L6TV4Y`9)LzdSoCr?vzB-1T58CSFb=Rrv?9lLj@MOl3H z;V|(C?t)?1Jm6YZYgS3P_j@Bg}dP|DdJT|Pq@A_lX=!6RD7+|D;&!dJdrSsC*Qy!$9n zXoXR{`D5L-*Vxn&QD{~AV8z+_~>ATXRDk|j=eZ^mt zv`uFCNzX_N6*uzULKvUv5z={kzSjgjrN3n}4Qzs-lv zHVwUWMEKkR)WtjJC+p9H}ha(&s0P*^gw%OenbpDX~_5)sbNL#)C7aO z70G7AJPYD-UZO$ugiIAnr8S?ueJ`_!>Si{Pywh4;8S*f>uztRj~5wtqh}BjOis^>$$^7$(pNkroelvKw*d%e49y9Q1|~;;c;LA2JSv} z|Ck4`Iu0v^z}_X>J@BnjOtZyAL?ysyS%Yb@(0 zC3ju0yu*Bt&h={7&aLhRHqhz-U8!J7S3(6Ui{sAu;93zG)|R+0ykvVuVBlXoAQOi} z+90TC{;LnKKO3~}m1H}&OKS<`|AfF-MzAr+Nc1#x{%Y+>F4)^Ve0e(GA6MVr*M0dY zs-_ZS`C*XFT~=H8@?Gkt5hX;J7&0ND8jC@~c!PtLChObLFJ{*3QTg8=7NW1)zC#tqY z%4dL-qnFv7rzK(7kvc`|5fmhhVp!(9hnI<A##2VDDji*f*JN345(q{gCRD1*O7A#KA)hu`Ht6-xX}C1I zAK}c=O~ROCZnP6_rYb48iz-;YZi% zK|q6EmCa4*05fCGq%%}PuuQMlE+HV`NbL=7FgoBP)aOdwO^mYCL^`+3) z>c|ZP4EGH@fo{Yl?~$2}GFU1`GA7v%GgblevfuC&*dsi*37$-|1A}@W)T9RRhzk>G z;SeITf#Y!6lO|Wfpie}=ek4(rw#BQsJlsg_+U&fYtbAJP!(yeH)bY~NOJFu^vdH!I z+~p4@Rf?fcV7S&en9xZy0RK}Y3KhF0m5H&{d^lcU*7(3oFLOEKY#8Zi>qoQ+PEZ~n zem>MwJGrcTUp_DKp@TTCdyELlQqglk6wEg5tVM1|nhy#v$%+MuAMn8=4A`;)7;)lY zr*8x~SVHqXHG@>+3a^`7B}=U=v&fh)C!HQ@;j4{d2=?=9!EYX(8{<`np%mIKPnZT^ z&}}Oq&?G1zlWaN_4j_^`M$&e8ALBPP++Ji69V;6O|#X(BY7*st;Dw-(9Wu3 z*x3eOw?6~(2mM}?&JHAzYFq7Ou8u(7?o45_9k84hLiCJD7%&)U9ULV=QP+?E#m}+N znN7EDmFOGu+g{Qi@2dyt*#CJ7IF`@j}iZ^5bNq@bkHg{_sO zq8%#fj;nQiYd-UQG8SR(G&NQox0Buh0NVLj_hFVSF@&4i#;^d^ZNdUHyO`Ph`d`c} z=YN@^gg^ofcmm12=))o-f75Y#%+eA0Qg>%1&AzOv|B{ZEv%QZBM^|^-&EEC%sGoA zoGQok#7P6;xf@7QP1bZC-!kPbmow3)e^efmNodt7#PrI+W~66Pnt=s$40Nw<(J*m& zd?vUyIEaXDpWRQ;hAtixf2AL`yzB5SMpY`L^|28)=dY%`++K)qDvRd31}Ckg2Wvph zw83i+G>hJcN)>B{vqzSZM{E3j2}xO?#i|6?V=KW}0?0h06~lO7RP=bjpkg<5zGMj% zhW6!FH4F6#6gg42Lo=qjcI5ZQR(zkj6{J1ZvBOPIDr1I5dPu8}>oyp)R5Q0XR8_HoPUGWS zl>stJo<09E2wV}R`{cMz<^#?1xb_A}6* z9a0##s(TFJItQouWXHAw2I0DXmjD@%i_2SBP9%=M+gkmmg!V)BXsBq~jcAJXCG(R7 z21b)_kItfc$EpZ$j)L6Ndj(lJyrZhJy(sUz)$-}+KRrp1p(=BJ?9g$hV#O-}{oP0m7rtEC%P8TZsAGyJ)=`+x4dj1 zi6Vk;U6THlIrmoy*&AR#Gv)`Dl-_M$QxYu~olRyrX{Hzq-0kL6(RWLC3_y&^WrIKK z)|(MEPiibnc6(s&byzEbF7N8E9){X4>eNz3pVb%7W253I-s4%*TS0_Zg{TsY7cAzg z#97uzv`&LArk@DZRnU1&om|Y+KRn78YGgyQ=iFf-j0-FOVVTLC*!)Xq>Cb<_I!+<4 z6nc)aWRnEi&lVNgJw!p#p;BM3MGG0R%ihOX{vgHW2<#qP_dfeHByeH0XZGy=hh0(j z$>4wHPk&q}-VZ|dO%eDtCQI+o`rt*;z9f+I)*=z7qOsuSmSgVCIscy**vU-UoL=J5 zP3VsIoVtnPkF`wG>PxyrN8o|Xgbo8rsHGIjoo{37jU+H=36K$E;kBGPAm5mCnfxlH$PwMyki~&MBP55Pu(84ryzQGhlOwS z5@Fqvz2>j#uAoD_#4+lJn`vwPwW%&zfu)X3G;niq=O~S#2Xum;L)jw5B#8+(*+Lj@ zt(^|*3Gl1xC45C3y)WW-F{duGBgu0l|J1w`BAk=}k_!BealRW=69Bz!0>5!k^b*_# zx_Cz-@@RBX@=iSR zebbFB9?-0fp)t(l{AM;2_b$?MgilbSwe|1e?ZY1uh)%#!&2V^)EkyAFe zkR#r8!67u?C(kR=r10CuKcj5vv+4z-K=Rg|VG{|u{XDM@hb>4_ZX#s$U(iKeZP*_Suhk#8kww~sF0oyyZDhw0jKz7*0#HVd6cb}Pr zX>zAHn8A85gajCyai7nT7H46@H6XF5BO5)qvAAWoee2bBCv~b}g!Nqyz*%W$86qkO zAEtOFH|QF1AiwFG7gx zJ5NU7xgQ9POrX*3YvA#*8LUdQRTnR*3VrK@%b+y{ia`zG)6`iXN+9@0FA}VcC7Z;o1V#i zNF+=dI*;o4^jgsN%pS?{yq)^-h&5YkXD6IA*UT$(kz9Si((d{F24u;{KIBm(1Inz2 z^mX1+^;~7ujkiN`G9vxmkYEr-AP$h$$~M@WapJT2n##WT^(GIIvD}ACklDZ{g*n_R z&WX&TIKf%#G*orAt?S0wNSX*@$JJ0Zq}3-o@){+LRo+8EhgJGfzty*vaXLByv{m$eZSk)A-s+x-pV`)nULl>};+f3dFG zC0w&1XeRXZU6#*(d(7r?t?zJevYGa~=fLcdJGBJ#Hj!P=3>AWM^)-Hxc)GaTOJK92 zBgw_n;AOia0yF;bsObhH*5995*a!r_Y#PYkCSWgdkng-QA5e(%sU`&hhQHDy)O-Z!6bjOeNasW3H%kMYnHs%>n}?plp8P_eY17G_3a|ZN;|!mCQ5F)xHU4k0fsy*^fBxWHZHIVAYF)=4 z;_6#Z;~X|>*0x=jK*YIElw@smHNK1(=xb=gZXP1hLGw{1k*!|F?e^nzqlxNU>5M?< zKSx#encVN1v7=RLN$MPxEPAS6@9v(N&5!Y@xRQz+D`+&hm7R#5cypDcinn)iLm-IC zX3Xh|hu48*?>f9z@2`H8TpF)rzk1UI(@#d&dgP;QlKpw`j_4DXUW2(=M0yS{3ryK# z2@wz5zgD;pW6{;0gtgHGz%q=4Cof50L0RM>AtpLJV;+s}lfW-j{~N=jr3GfDx`R!0 z(n}_E{d@_vyD{v+i5h@LBt7E)aKUU)W3blKCPcSBx#}|3zCL5*4EC#KP+YKk@yIzn zX*Tcv#Q;YyHj2L0r{yJqz-0VCkx9XOXvEd54*W7lo}tdC{DjB`B6s z`*Z4WHLUKH^)=*c$)&&Vj?4FTG2JP#SP@}L&HDx2N(wYAfk!fXq@j^*V7kGg8v9f7L(( zA}xf5d%sN7@9@p{+>42~mOzMgI+~uG_rBqa${VeE*h>_hc|>s7Tb!?tryqNm*JSz%`#XRg z0B_rY^>QxNvjzfv+a=H}=Et1j-@{en)%*LKUWUT%(+;oEYkh4iJ0+uf>+7E~zaO(C zEK`>HH(L9nsu|^-iToIjG-`yWbd@cZcZRV)&b4;DfI{B_P7BKhL4?5ViqZpqNjAHd zo=mLj&&lx{>uh^sI7p&PhW)@_7bcY?6#2Nl0NPh{AG+3}=1M5&B2|T2Y@P~%i84eW z9Z3iF;_|`8`Vyzwh)!=Z7IB_KIpxgd6DZpJ4K&=$VbI|{$XQ7_xd5Q4;}dLw+uCfi z)s`4qPupW&g2!Gg&#t792F_lWEwM#vjG$n0Ts>Lz z{rq$Jkf<&&o8Vvn5XyCHAOO$L_1(3~f0R0=`v0s{RV<-AwfxXf9Q7~z1)-caTrp>_ ze-)ZDxPLqh&a|3FI!vp!MQXg4h)FygPG}ttS&ZpQlh4Qgn)_XvD?cU>GN?K+itT*B z|7&=g$MVa!+V)h=bcF?|9;to(uXMd-8%Y5mWj;-B)0ah=;q=s1r?+*>U!$5j51O_c zQwkMAXXF~!iQFHUNz~_qLO*-@&*`oHYKoBPSgsm)bsC(t^5D(&c-R{*?J#kko53hR~p54}r6U5|LqFnDhd%iyfIP_3;>N162;j9c^vsNisnZBioj{7&1P)P#2^yCT=(>(T5$ET2grsG* zgyn)57@`SPXQ#ilS6N6!jqvr4?Psl@T!JtiI$#ikzZi{V9-MR$6wR+$SG*l@DR(sW zGvYLT+jHuF>^hcpy>D$i^|-%g|Bcd>UY>J1!*WkV{fD!%%=>E-zvrhFo0>*PeXq+4 znen*d`!IT;>O~%#f9luW{4vQ%QF5LWFt!L5K3W30xw#ha>Fe5TO$JfcTD8sxV(+@< z++ZIjxA#9D7N}p9&RU#Z{WDVkVfx`XO_dM%zMmTS+CJ}?iH_((pR~ssA|Wv}>ZC`5 zn27fT_8J1ER~#{|A*1wGw63DxBYx4HZ*K%n8?|^T?c@aEqgfDVW#wo74h@V~qIGZa z>Qg9T!Y34gb4tDC$2coM3lJ-tjV;2laW)pZoC@GUUTyhK361EsIj*8R~Y^41mVk=+CgCp-rH`LII2@K#O93>?0V@> zINzIbtR3K$v3+FT{Jdq?IN!t(AQR@GaMPy$mV#^fS5tG>;wve8EadRn2WeZacW`Td zuG1C%{5c|YO0Xl^y&Lo$14YUPhxzLQITFO}adaY9BsD)==L*cck3Bw_oj1XcK&-{! zXUZZytM*$}fKb{_@C?B`s!ka9?W<`U40too3v|H{Y<~~4J;Giy43f%cj6xs}aAk}a z=`ZgpxVChZEFEWmU`!dWowv-w1C+b5Jy_KRB{tS1OnT9DeX$iayI!(Nh%!!BQ{@MK z7;?-+#H+ayAS$o-^F}UvM`C0Y$9fjx;wq^3Y&dxfuymLq!HyJFJ)Y{!7mh_8F_;t6 zdyc=!>uDrKMR~no1$sidL@jGqLQT_5TQzY)ao`jmS9#9j#@ZfMn*GDN(gQ_%-7-Yl zllo4ct%n$k3G;}sounVO$I305Zm4qx&E8B7MoEe)!h#3oOrVo)I6jB09k+>2s++S( zoO4O}gKQ7vpG;P>zS}%*ep}>v6JwvEHU4cSapvIR1=>jeX%Y-Owp_!7w3x#VjT0Pq z6yFAGU(Zphl9UuRn?N~%M4B1*xo$S86Z zKxySFsW8udG}KiJnuXY5M49S^zq_`Hbg{s@;A#P%q}HqnSEfJ6qTw`tvP@AZoE~@l z_9@4o0=RFt7Vz>lf})8+V|tRH73~cqRd8y)sxW0oJ|B&%g3*U&`sYFn&)a`KTCnd$ zd1|3X|L47#K?(PfrjnS%@V5Ks4IWtG2Ol1n!1F@h=egc0n4P1vQ&n<_Dykq<7xUhv8MCIovAH}+ zA5{LvsBVo4r)84U_(?Pq1lTi)SnDb<;R2UgzKRH=bj`J1we1EV3NA+Mio<~}%srOC zpTFfz>AAful4j$avnIFnAA6$<&v}8XGs$Y1d@T*76Uz#iNMv{n)t6(fm+_}9f71x*{;uiSa3>oF5Zd)V;JPa_Izvj8e*iBIsNh1_qN1)58e7#?QaN+=NGKcHydB7nc@P{$<6bc13!0KUP`)b zm0eBBy=~U=eZ$e*$p>*;$K1rX9nT+~EICOQ0h8vq5^=T_J*RtQ6e<^a(fs`6XNs#b zBQ6>KX+;^NAX=>LcNN170%DL0cGy<)U}XA-PlQzz@qf(;KIi7VqV2yIoZ zAxBo3@6z#?7S4(4>@7pt9Q|*DHR3<3Xrg7>sT#70SV(`KiOA6UNA*RDa!7CT>B>m>cUP{Fu_FqLW(uri1v1KMuQS=<0)+GsUxG& zI{VtSv*kRWsSi8ky`W{KeHn69#~ZIY zCs$oRlWW3U^qs+M<^bRfQ%^msC4BV^x!LHx-ul10iuV86RpcgtU%a(*(7)L3**Hn_ zml5smLtSZj?W+k{j4qtEY$!KHwocrum!wlb{FOYbw`8%U<@u<%O)b^T=GLWIFH&eO zr9VVpgSUAcvoeNF^3^C)ux_t(a3kIx{b?gA7E^-}hcAIXClf`8GM90F2>9a=MhX4Y z`x^h9gArky4`ozo3$_avPqNSwGz1lf$^^6D>gkH(Y@|Oq`WoGp#$TW8?{i3DZ!HO1iffkZZV= zn{Hkt(-{@?!^Q2=p$v1UcpL3ug-wh7HVg$s)KF2;eql9fNc#b_3sbP21a2;V>?)${ zM?MCMJ-lld$yN-uhlbqm-Pf5FJ&7^sf(&D>WaKn+6G@s_?Tk7LZWgD=WWPhsOoWm* zqE|g$v)Z=_X)u{Z9gxaxo9em;wzb@Q4-O``dHfGIo0XmPwN0vePpr6@Y4R!ie;OUM z?Ftra^0Li$G&2R%fR#&-?Q_IXKj@J z{zKvBQI*NYjppLI5m!R>|LUz*mkeju&S85Mo5#uT!CA!-vl75*OO%_*qZY{0Uu17G z?0aTUeRN#CVT~nCJpXpDt#SYB0Y5b(wB&w0)pdw}F{jfx?OsN1m9I=nf@EyWZYVOv z;ij|ls{H+kBz+HY6ahK29R5^v`nzj;if~FtefG6hu&{%89yodk?8++LyQ`_z^wMxy zsDfOa1Hiq@8(1{I0O|I2P`#5Zjy6R~+eO=-YkEEB<!oC* zJAcotJ$Y>b`&}!P3XWFfj26vSthgNUs$e)!oh^0GDaFAOwx@L;z7gjeM)~xgMz;n= zkFnJFQ?Qu6BHSl_IZXF3p7(#OQ2x_Ca7w}eZ#TNAz*~=`2vz=XmTg214x|ErxuUR% zO!rAsnv(NHUV`{#=S+SQNOz|acW7iu?dqLb>>f9F&6yKG{d$^AhX4A4y3|nL$TZR- zHE8*T&+G-fit+y=g_()|cTb zW}Eh+V_yrLnm#k1f$Khxm%hw$?Aa<-PWoO_nnPHF1;U)*f77SFQZb4qXG z6t%j5@xO8hQji7GLDrIL7Iy8n@%5-#kk&2Zs@4G9T7Ce zuX?@a~&nrBb7B50fGc&23-u9m%gvPD%_KZ%Zv*H)>ncZK~!1M{WYC*Rq38SSwDb=k-SeA&k!e$=pYC4pD|bVCHR>VE3ir z5k}%{y_v|5AUElu-M_MDt9M(kI>#N0(b#vmc4e`_p}%Quw-FH>#1|=s$C61 z!whLZf;j2pC3MaFx6b60@(mu;EguL4Yt9N6&e8Miha#M|qB*#QEjn*4nuy3=SM3u9 zHz#Q(f_HowxNitcA=aD4g9nWayg zjJ?ly)1qke>Fe4LB%3b@9kkotLWnnZe_fJXhP_EhXfm;9uGf=J(rTZope=`LcC6upUV!y<2%} z5V2?LCT;5697AN_46n?EO(_ca^LLKJ$pe1P-ZSy*W^E(tFL$q>xSduqs;4iTaF@3? z=){n^ooL-d8n*sH-hbCO%a!_5Z^(#iogCRcBg%Gv7zv*N-Z|ziX5)P)pghhNkuz4b ztDJH=cn93pUaz1h;XIPYq$=p$6@le)XmLJiBq(>T@yq;;oGmEKwHbc-r)LrISn_sBM>oSH{l3TLAa7+O*zx5pAa0aar;>%wgcF$)sSCy z8LCjzaui~79X5<0oe8csk>Oe;=E>k71KkY#hL}VKEvP3xp$Q`x-(Ib&W#m#6!+APV=(9V0UYJE$m9-lb*Azax1R`lW+}$m8ymw#dKN0Cg z`|o$p|G&Hj6$$7hnGqRtulFgQ;=+4QFT_`NM!Ho&f7Lx*ZQKYQUkF=vB<+@7Q{vpG zxQf$Dv7)HeYu${|Ol=JVBMZZL989)ED!IRSYvVrIj5&%$umqf??9&RX5zH1I?aMa7 z!d_2n@x=@ACg5@QUS;F=bKCE~Gp){*$8V=HePR7d!d+8foawG3L)rd^r=g$LT2e00 zYUA#8`?V`&&OnziFn(GFYKT9f*A}E4EGq{-=#52JVjLA;`z?o+lk?s#$cd^r+7~~^ zS;9%M@WAozPJ}~qYuy0rkYb)hD{HQUicg|fC%1L3?IG~)+lO(&YSP+=)^ji*?E!*O zsUhOQl=)=UT*^i#HhNwFtKw1GQWw2;CTN@AJpw%qMs%;Cx?y+oG(_VB zFt7i#_txWm=OgI;oTTC4p3RHgOd*84r?@RsbqpR`^*h{j$8Fi)Y?kox7mb}iY!i8~ zu6MYaSwSwhX>z0tMmT5+L#kdYQG4S_^*I4}ys?N60h!p9$i6rd5h$pkiW*)o2U$MvF!PMH94xZe;X5#;yp zdPVji!mtQS&xUrBjnhlDu5cW*rs;L!@9W~P#)%ql=aos>MUY8$HuKEiw&V!mvd!z# z)R9w<2vk*A&3y=2?Mkn!zTNn)Ydzj9q_z_y&`X(y$P5*SE+UC=T@M>uTDooHtn%GO z7=?A^Tu7fVSs(Oxfk|4<&4vGaVe)^P_|ImuJGl5x01SA|pa>U&LL!mW{g*4-?^D2qOHE@uH~qZ(Qa|H z9f>ie&s@I*Ud(9}oCXOuS2U%aM4Swfe|*ok)aM9bsVTSi>Nqp>noi!{d#dTv|xMDTW;zT61 zEebvEF!^r5eP;al;(wftK}7Tux7jG)KBlK4oU^vy%NtJCjJv};)q2Xf97h>Wr_Zbgwk zyLxgw?(lQ{p$VKI%Xt2po@H~!3S^`jx;#SJ-HLuw#(|k;;t?pBQF+7f_%8OCPU={U z1RWt$KX58&uZ9p2pUD=VenFNMCiGh8QM%*}GiR`W1iw%eTNf}4k5GMypJ9ADs}}_A zcvGN%k`EEzK@-q&g&$z57&cxAN=eG?vYdj;5sjNZ@;GfkAdGLD+^PMhNHTzEqXG&) zd0XrUyy$2L;^6zjSy!0m0H+Ke2*9B6z=1m=+fS$ao6swC(R)-c(_{Xo< z2oo4gSwgdvB?PjQWcL`_Tfc(RyDQ}-RLu&8x4lB}x^Ep`qFJD6ZAr&82AM5jq*}bu zS=)=nOanWCx9uKwIuI?xA>AvZ(k7auseG!ZVH{dYGHn&)(w_SnDy0fX=Jj1WW)PwM zk{8kc2jh~h`AU2P>U6?DLs>G*X@LO=kJaj4io>T!%osxL`3tTh2O9K?eq+7Nq`pK; z?-R*L$Y9zJcOb)cYTY+|2YP2$db(3GrXYZ~YM}=?w+_l@=$0W%oc9 zx@2aV%+O|`MOejTy9XULa)$V<8jYevc0Yi6Y-LO1ir4D}2^lovp&T8x-9%Zu6@geW z^%fKD=Y6DI6=JbaVA5GSwDmDFjfe9!$ACz4#^!QvB`xBJ;pe#?*G)-{ar*=A&Pwo| z9TN_-J*{V?g`*(&up)*Znq%hzD45v1?Q+lTJMou*ru<=4763M6JaX_SN~XEe>ypOr zo;@G3-u8|$D`wAQ*P9UQYpCFP%mgU!0GoX>thH}i7NM7WpL_U%wWZn@IK8pTu?lj^ zCQd^{g=oTMx(@=RwgT-uayzQvn4yYKf~JSweHu1+&hxVdN}aWc*-m{{T~}sm15)qO zt28xJ9zGiqB=)=)&bO$t8QSErWFoJTKqS&k&@3F)S?yKZEC&8=1Xygu= z%Vg|Nz19~gA(e+YUJCeFQnD6#($eG=as`SdK($o$xS=R2&ZJMbU*{}$p8cnMW(LdRR%|ZajX@j;bb!T%Zzhf}Zsw zEMq_gynH9(3kEDT(K=?@6JiBl{TfVT9z+wwGZJx<>uIR;no{|@oZI|RF@6uYP`gRQ ze=E?+Y&+ue4+C6G5Uu?5m0AqLv5-mrV)=yi+t6p7NNy3K$G5~Rr)z!mrhv0KJ>@j? z1H!$LP?MJX?6Lyy`qR01JBwN%Ro@THn4S}q+2(g08R6ph@3wg8WNe1r*fKPJ4A9yB zvfAtHqUzZH=KS#xFj*IURNS3jfuq%dQRwPpln2|kKc_f!ML799L_;QW0F7PQ8}3v= z5c}E*JUutfePZBLQwz`EJ;x{GHrTHqN)AL#l+G;$YOTyQ!C;Oi?`1yPmLT0$&W46JxQT@IjewbV62k=bxRC%#2cWzXqW;!^jD=sLWtUiapVSN<&k zV^FlUGttCYk0O6MJ&-M(mbb&&i9NWUT* zLgiXAFQM~S*%rnf`}33>o=&vnEn0!8VH^2nhg3jp)L43C4#2Vtx zY5mjF9;f#D#j3FcSA9kC$aNfbZ_E$cG~10ykzW`@z)D?cN-3=D1Xq?pGAUG&`&idT z&Ktn@V#zV$SFy0k3Q7t7E-w7>8b$)s!x~3DP9HiMr?;N;0Cw!wlp04A2kUli6Byi& z30=q^Z=u1o&f*@&$wPqPbir?u!?Ep&G88Xc#n;h?vF7=+RtH5@weL!(3Y^2IWlAu1 zgN^=ITIE?s{g+>xJVosa@f4<>Cr7@w0hbv+k5XUztDw@X!2AnPZ_)`Z!cdEQzpHMC z;exdO3)xxM5l9l6OtO%mS0xSyGzTOCrncOVDk@uJhX9JisX_vaQ#WLTYu4H?0xNfr z9^T)M?_!xK2$G@}1i~SMUaM`rfSVI1{j5O9JU%EMF5Klr*hc5tH1GD89Q-UOJVZwP zCItn0N&zO}Xk#SK86|r~-Q-d27|x=E5cizCv|#%DzC#~(Vy5wk3^^$Qqhp6T`6LEF zOgzUnn)Apqbv69Rsv&0^iXs>CIu&XL@R)NlUVfg6n#PLArQTyM=W8b)^Yd?#A0Ui86tNMSPh&X#IwY6bE@zzk2)i@`i{i*Wpie$EV;9nxZq6#;W) zs%_$g=XZ*cf_Bm5*3Lt_(t6F?KtjP;DCoU+X8};&N)0LVO-feG+5-Y`8ui&@4>CJm` z)+YkvPy5TUcFw?`er$_xBf{ZMeidbnz_&qL~P0 zetrQnV?%MH40cDI1B^`+Nfb1w`%194UiatH3qxy*J`>_rO?UjOF|VI5lK31Se;=xGgt zAnvE6HFcO`rWw#~@XUHf_-uawj>!}DW}!iIHWAA^`XpvcI)EHM^fTwyaM0Sl>Kh*y zI;L_rJ6^B55f%P=qZ(a)-9akrR>IzSBl`-h9GpB>IY1N`& zhaYS-WuC{LsuUadsP!#EQ1&gSi%HXd5gY zt@r3RLujcr>8#><0Gbg*;0Xl!uSw_`(ce#bMv=fuJ}X)VcOgqjCe{{#@|I+v3zAKU z{mK(~AiD=(3lHkR2W7GYBX@48abi90?b(e z-jt;K(Yn91vG_&jqL9+uxYO%^iQC594bBYMYORqy?YkA7DJQUyEM)H6bmPh=NLWy7{Me4z)jkm!*e!iZQ zKCd;xf@@{`ANwoot93?)Cp16MOzrw>&_ce^IuSX9_Z6so`DvB9%rB8E+boVBu84dpsDD*Avy7`@vXdV6fsj z#G4ygX>0I>bISG7Nl|aJ^q=`Mo^>JvrdP8}IX$N9R9Cz}P{*<=_#mnHWf(_BL1Jzc z5??yCbp~p!m{0(M3m!#{6e7RlF)@TkP()Jc#4jd)8QGJ+rlZGh;Y++w%`0JF8le{N zabT6CW>iwx^g=bCGS`{jrf?kH48FduEcZRLMAeRk;s^U+3XQUvWiY*JeY1%iX?e(K z6%4yD$GWj57CKD4klf)>xgFhd59@>c=B9oRse`kcGk}KHfDl>vXK*RCd<+;(OS(t zqaSRVsrv>xzw&+Z$)q!GOSc1Y-r!Yrx+Sq4L^(ZOXxpzTnJn~Fq0uJ#{XSx^Hzm?f z)o;9gEgekw(Hq&GSA@0EmyBsCrAVkqe%_Sg$~W^@wr}iv@+|(ct8~hMA_<(_%nZIY zm9)Y+!$K&vg|9&<6=Lh_c;)uIAnE9peS;G>7evZ*bqSBW8llP(p}N#@v%UAQAtLkC z&lY(qA;^ZFT&X;`hH%?NqiL4>kg&acAc>FZ-%LQ!y}Sm!&gY_3F5OkXvaQ zVG^mxBV48<18>#&_>_c%;5Tm0u8nX79R}_*FJcA$N)Q3O^D+zFzdZ-ZwL#+Mhrlru;P%L;U5tSd0G-Qr zVwV!zWzu}GwxqACJtoFeS;74E>*2Ln;k;&;0rM9~Fan<83fhx{YJ)IlvuCD@uzr{G zRIag5T$Cxi=3G9z9Hj#-74}C`4g4RHD&xm^%Z=uLcnH#+qvo<1oGm0fqbqIYH94WB zVv6x`U*^CjQP0!AF_SggV{&}Go-ZTxnRB@J#P~^&DO8Ypv2zZFG6P4*oR|ROk+t;g zd0VG`VdoWOu?%@_ScUQK1ry$n58eImdaYt(~X*;xmd-62rgh-15e*O(FMP?!ey(O**q z^^%*ynBFX3K3{qavY_>I z+ffK^5T0MS;bQT3h&;P1%CqK>-1}6!s502lIA!Rp-dVbQd=gOA@G3GKVziV1s+t;I zn8eWFT74WV{J}#^h&WEWk}G3PaGh7x%)fo1imYUolweWiC9TO{!f3-z5advKz%Gve znX4-L?GZx~wS7?+Oep(#&`m$}l7AT=Z(TzbJIRIBW$4DH+eGW42cx85%zTNcHX9 zn)f~LGM0>XKIzLhQ+i~M5V7MVH{JRLQ8CY1DC(t!d#Z+1eZ0KhOfGE;9N)T5H%JKn zePKvF>@=}5jlpzG)4yQ$pi&Res_C1QsygLCU5vr4bpYRJd+tV+j%+C&*h>0yaP9A`mHsMZfOWe#maf1)4zgYS3IxA}vpiFW%6z{@?&*&Y*k zgP*X{@rf)BTTYJ+?-?b+Z*0J5kKL|No*3q@zofD@uLAY8lQi_E)iPcegSNGS4?F@_ zC9|pDzKgFuyCB6?P8vWh2c|oye%w0nwh<~{Rh@xv0(i{@!l8f7gp4HHsc0T{>KM$H z({^SrbcC{HE3x}TE!5kb zA&r8_;1dS~=pHhV^8<*CAK6SQ-3^8wWA||03uYi<&5L&u%EIk@xx&M}U)+DFJPtr+ zI4%$pR8s5;KM16R_eT~Q_w#&hoc;x&B7UGU2WgWnA~=HdZyi5F?c#}MkQTj z1A2%)@qnudUNycmO8KTv?r`>>MB%wU8N3-?e>Q`i$nzyi9yfm`BqJ9Q{oKc#c-u5x zBOg|JSTppB!aJJv!@$Xj`GbonU@#Y@4&X@@vMt6pb(NZLvZY+vYXq?IaUoM=hZaHHjC zRK&8a1W(rtTKWZEO^T@rWRh!=C7gDT-w!;yg|ka8wrwV1!P`upS$V)Lv4qaJv( zD%mZ5O7v0&s{nP&Sx?I>f}><~JA;Z)cU%TNf|H0WO%_Ed()^$54{;o-fC+sZ za_+_&j0=-kpYyxEedBrIT!@r8)N(~BfTXb&gW0AqZLK{4uCihD)M)W7dL2z1(Ty6m z9DQ}+CzlH+m{3W-SE*@jUEQoi3WsCa6_1SH?3oo|cGnM#7Mr)M_&If+@j+~vju9h& zU`O$t9q`52Z?JV|P^f0%*p|bm_q#h3BJM<TIW`E;Nr zMAA3=-mFEXIaQPSNc8=s^j|3>%mfsX&)JI4)U-p1=BgrusWWnN^74;5YGiU(DkA=# z=KpFMwC47`LAri8znKf=iOP$``{yaN-!J&syfJIlh^}jjuD3L8$I!+w#P!+Y$9Ue$ z(QeAEWB6|OhcX`a{+0@s+xnW@n|IhzHNRKG-J)mzy84AQxR+)WTf8O(YO(72S4ri~ zjf;{P$5S#263E6)>cIdUJHr541#bM=h+A-tnt8ivt2a2a?4QeMK^&bG_i3G|anA@( zTjm)xRChIf88bICfI&kQ(Z*2+d2v3a;$!U!xIis4treFQ1;NCQ_$qqzHGd~VCOdAg z%y+73r&3FX!~JFevhPY@?<5@ z383WQYfj16V#LMIt`us0um`1hN0l~p9aze`W8 zO4r7cnnX5hwdU1I{{xQpQE5YAjI$r!u_@?g>_qkQ!GxTgb62%iY#R2&zp;?2bAYMFefm8D==QdR{KdZMzSqLuHeB7S}3qX`1b)CD^Ua zrQ;jje{bvw(0#AC2*vv2E!kK}5nK6uQE2|`Lpl7Ws%<&O<|v4bnL2RdL+URa7Ub$n zHytIfN9U0la!|ai%!oZsTR!s#tTayWT~a=~Gk^#f0b{Z*a11>Vruoe#5E99gMcWWA z?EduUtAD;+Q}4jxvATKQGD@s`&uou4F$xJ@@*cXZ$=bpcu*-f5j4?ER+cw6y*zT8Q zk+-ReB^oWFv6YZWi>gvDrQ07OcQ2tA$W0PMQ7w^~Fs+eAk7;Kp-wtFkakG$3ad?|l z4?rbyW*0VH9i0C-X`{$#rgzZ&&g^MC%_}{WGexT*RZHXrkvJ&I^n)4~xv)80(LZ%_ zI2c51Q2N4J?`<>a8xv}+a4|T=Okx-!{owu2!lwt61sG#SpPghz_00sitv|?fA-G5= zgkvVBb$j&3mNW&I6eXoc$A*^Q*A)=|jjr0)*>CgHHg(!n^Z6XM+rh6fhIggqr;rMu4YnvJUs;8R6%4oysWLEUQFppHT$LF{*gHtya%ai%4DM$AeKN?2 zea8jaGGTPit~`6Z(Z3k~+*lgTBF3AIBj9C`MA~j5(xDDK7T1u~3qPSR%?ikqC&UbMqA^1~E=bS*M|o5F zXIEeEOn_ocvO)lzyH(A}O=u+cV75?cq#G3xm{|0yz7f5K^-jcNZW5Xmq-lq@P@Mmh z*BP{tJuuD#d5&h(K9fxlCWQy#c;d;a4KFYCyL|#qojUDuK_|hreQ29UA}ni!3?XFDWk!=ge`8iL*)v_gDyM}yG294ei|zLjPs2< zi|6SfACb^Cc*<|~Fi(VCyh`h~{Y+wf6Z zqnyUVamkhtE{OIM^he1zoTVrj@Ab|Ax8&o(e%$r8mNwpmt3;=iF{HF#_1fI z#8l4>pSrB$;hTkA4^5wEu!haPQ1Sm)Wcwn;S$9R3>EB}e0Q`(YUqxwq04Ii4H2zl5 z9NA>|CD~+43Y5nE6`xrSuVKpmn$^%^3L8NGAQ~8O8C<{hR^5vm7lKV3=R1|?z z+7n0{%V=_Kp66O&f^GLNIUG~?-y#7K(rt4T_6f7*&Tk`^=JkvBvTW3#V|r zfeRgHPdy>PyPDD{sMuiM^mY>m5K? z25XMHLY^QmCJ*znb4BgtA2iV0Et=EsxzE^Nbg{g2KimikC*>^P|H5!e!Uqn96SB%i zq{)t%%kfL+nS9sOcX)B@sd)5)^0+Q|GwMe^q%xov_FC?I%GGP-9a;q%s`X@%d2e`7 z?ie(Gx4SHY)}YL z-aHu^sL$;l|ATwBT`@KCIgjnDLCriqjeZy)-wCZ#$6kd0_FIv5!ZkAui0P4ooA9|c z{x;MeAGC^-TDA6B9nH#KH4_E6{tSCBZ@-tF8wtt7lKyQYZO>Xa&o;xhHf3tktb?K?d@xdsb z?aL6M8T&SSKSuLKm`A8nH1Ua1(0rs$Hr+)|SLI`nlQ2)wQEvH)LK|p$4Hv6pEg(%x zU#@nZX!2t2?`gEh^NvH@CuU~Gh(KE8a-HhZ*#m6Ri8}<)t_?W)GyXHuxh!e&&04Lf zmD8H_9dGvt>0*-B?Us#8ChJw%y%Gt zPN!?n8Sg{;oITCORh~~%sl>?@R4Bog!PAq?cVBYWC`J@t@h3EK_N3Afx)r zl4;wq)--vIUcO`kfe&kx{-naAN9^p0z2fqyzCsKo<{3#d4?Fc@FOy(cOOva?{)JJ> zjygRG@$YVNB!wWnu$`x$oM79DU#otF$&f&UdDpK|5r^IV26LeXC(YgNZMCz9RU#dY z?hiP^{-jtk^~a$bj?C2kKgdh$Y(iou{&~^IDT;A$B>D%pEJqCpuEo4Z`<>;k_6qE8 z+VdVe^SS0`vo|;89$S;b9#T)dWYhl{l3G+$5j*@yuKGL~Bw30$y6-jW0=5iew0HC9 zU1myaCg+H^q{vf~P}G{1O%BBw^U7JdsTWdL(WkY*Ibm^RyIB>vYMxGud$Ei51+v9+W!iHRl^>;n{BnWqumT67bWAV~T(0OF zM>k=9;e+BvqllK@Sn;?VybpurB56P8MNq+sm`TIvU_N_kH$kk61vn?-|ZDb^bPkjnKX->E`>{6X86_yV{fPBN8TvHJ9O(v!krS)7H`ij#fHQQ33)24EL6$6^q# z{PvcoQofbBwG_HA;$wJujQu9sbYX{X)$;#Xd&eNjx+q$(%U$X&+qP}nwr$&HS9NvS zwv}ac+3IqZZCjJy#KgRLF%d7`kC#7gocwX)?93bYoOSkIdoBF4fZC3(`FeELt{|a`B0qg(};m5wV3q+fn@X)`(|ncNzuN3I*IJ=7JH*r8FXkII5fVXy5MT>1|TBSKSY}s*$nXJt1d(^k3)S0hm@5na`S7Yoc$lq1* zA=?WcsP!I%L-$7CM!vv5_j60FL1*>0sTnlD5wmW!9PhBVm+S54V28dbt(NO@(ZGP& zH)r0=h^WwGvD(8ww$DSPNjwy|P)+$m>h4O3Pb1{OU)S%lq;pqayEX3dTiXof+*3^! z1iq#Y`5d0w@Fp~gRew83;=t+qzJEq>&ql(Dj-q=!`_2~HA2*+^%6hs5y!@2B0n9f$ zKhtRk?X?k$<5Lo{XGke6lx=g|P#rDal+y@slI5y&Ek%@&FF=PM@zsoO*wJ3JFcEj! zo~<6Kec7|+B%+}w5$?*8V(922g;8UtB#c~WcQseT2OPHjjCGzhFp`O{I(6JcnlWfV zL{B9y4IVzrJQF?tX5y)&r&!w?(Bt{hgQ*f)p(X=u3Be+cpm?iA0b1mk9TxOZ!<}(8 zRB6rfhTAS`nh0XoRWJZ0t53P$J8^mbA2pBwe`p8E(%naINI*1WEYajjhMpoh!VcB- zF4JbTiVxh(5mm7;mUA9%D@3HN7Cd63!C?&ur<#!>pg!?C5s3BuF4&Hia1 zkQWPIKDID38hV_-zc}IHC-)~$YL9o)DKZ*37xTaS59QAj&Ee&i%|BP`G1EGsK=hZR z`&se}D`W3X@6MCHANz4aRDS^QIo)G!j}K{%FXNf9>T-Kpr5~ zZPPafEJJWZE$=g5lb>BtBt!GX(`r)7!KTvny&gofXkPMouU)BII~-n2ZTBW#9qJ1RE@CWRE5*AW(IOL zd#NM?0_l&(hZXllPxR~sOMMkbdK*^WPI;h5Rqsy}hd1cNPMCIRCF8us4QtP(sV6&u ztIQ=!16%?%w4%>cs*)WhVN|K8*SKLr4Hj(M#kj|m2HoJpra&vE%1W(-*NBgGPGtQf4BnuZ-AD^j_TT zYy!z_gp!52duc2Rrl_%{vTlBt;kZH$h>?yHe%%QRAj;_}+@&oY;yiMSr_iJYX&nx} z5}7kVG%S+Il4)#*S~ymv*bcY(-<;|zPdQN>S?DSTgWPZk1N&zVM5Wv*ihCON%JuH+My&q*0O(KbNoQ6g_ZlK?a&Gxsj=uBBXF?w~; zpSoMMpQtr61s}8CdN7X*7R(SxZ_8ekx zlT%!PoOYXB`O9i;s-I9R2bV4v`WoZWbZPUKAHd|r^r2NJW@+*2b2Xog{JIcYU6Ndt z$il0@%%TWXEa*h9M1LDYR)WQ#d(cpHI&cSAI_u_|Yy=QG;r8+tQ&e=KpE@icb>fr! z==(Fh4>C%Qq-@L4FyEwN6!;^X8kZT7RQT&|9$7p-x`StYfa~*EGju5Jp3&&^t_?$N zg~joNA<>as-{|#cB3?VxltwFg9(<-iV}X;nneLhY&kqYLxg=Eyrf`bkl6z?fQPkgZ zNn~S5nfnDm2xw4pM8HM=BH`9QzC0gzUfc9a`YWX~`btTEv;E^4UP&(v>MAL>eo@`m z$N9pt)$gT;k~q>vjsvdp?&evUlpe$!GWWiG6(YQf&K4cTVj7Je+k!XDFWFQ>6*mhil8w2Nwc)1;er<}kV4Ix|YJ>S*#x zlo)Bt?yYOY;0*@2<9j$V#O5XdeC-USAHKGK5^8_9%X1kbCGDOe5MXu~C}8Nf7hkfp z;$g1+GkgKK^-FwheTgAR|8N=!- z3knDkZa@&m!8AuHaY35d0a%JeStZZ(-XCTKmwVV&E9grU;C|3MV>(_f&Q76O*8R*e z=vEX$L4(AR=0W0#N@JJrkI%OJ`z(R(JKL1$GNVCzOW{%Hn?ckFLecuzgM^+(oZYO} z&U>C_(g(=Qc~Kn#ltZ4_y~SfNc|uT9(l3EQQS|Oqb{3>LfjB+#agAlBgS-eoW7O^a zXoqNU9#AS5involE0jhheN%sUwyO8+A=+x>SWHb@%z%;nF`?8W8DPs7-+STeNHN{p z=$Y%PDxmQNcWu?omHI$r$!el&SE7X&o6gUBZkLB28sR#zi@si4)${Sz`#FHD5^XK9 z$4WCXylph?a~t@F-_ZX!yNVaD7Wce%io!BJ!@f!M-iOo0o%B7FQY39VFKbe+rrl^j zQl`oy-~mt+gF^no0Ld&~*+U^Y5_xSuq@tsOw2AKUwC-24AH5ATXY&{a>j22&M_2c~ zf&x-}FHOSpYO4O+F^^5l%hqlkh7A`BG917$VymfyS$JJ=jO$=PIAlR@=8YP{ys8~+ z&}RtbDEp+>deeHR@#J?Nfqzu95%8IrgQ*8NBU^v5^=M}-H*D)N>9O?BJv;QX*yD4s zt@W*Hspp$PZEO$K@QjU*PR}uJE=J_;tp2=64B$hO@X~;MB}(h5fl0@BY1J5X*&rDSy-&0)C?Rp z$PvtFl-P$iijY5jPK|a(RRlUqzs?N|SfV*j_XmZhwbsEm@FK+){H#|3G&QW-42ODZ%rxFtH}P*j z(tnAvwlp&)S9Bs~jERowUTtSiciw8mt+P}hN+-cho~sCTvm(rcQ#+?CsS7LJ7coBr zB|p&&onIdHJhd5?gD{r0uIrzDjr$w5w(pfc@uN-(vv>FmJr$Jo9d=Sx+vRB)6Wjtn zF%^4JaObw|Y$R?ute=03jhpro+Tye=Ki#D2S^QaSSsZ8Irvs_pNn*AdUzf>3mkSBf z*t9@)F#8;Ntyha5c-JCw!u0m$MdBDcB9{CCr$WQY7NQJSQz6N?8Ba&pQiL-cYq3%6SdWM3k^BL!cHYBM8xFfxrO;Fv@sd)dEB{RjP+NQ# zM_qvIDnW*I>jj80yS+i&t;lm&rZeC$@$&Wyqa}pa{qQdEp|}MU2~i$I?qRa3S{_v1 z`K7>vgWk{DUN?Em{l~nmx54YX(Dwjo{=!g*M`C$g5_FF^g0EKa4`Sn{8R5v#MsY6z_7_8|a=^v#!4AeYAY#`vPz*-c0&e)DaNY z9Gc0&W$yPlF&@jjlOXwL>thJF+c4nt^tP@;03&3e*`d1M;}Z%AMtBZsja&Y*zhnS*L1(4FyXpY3qQ$;FyEl zX0a-|@V}Gh_NLuN*bliQ0tD*?>4jTpz*rRF$ImrNxxpN8mt?e64%(=G?a5n-%_q=| za|Q%l-|y1M?oi;0g*O+76T2F5-kuL0o9Q>8CTkgn(VUd5XxNOky4G!QXU}zezt}tJ zs+x;OR60KkMw12clvF+m-e@ok+jHfSNr8t4`@}8CX4*D1#{Vc?>R01kfr9}|4XdxT z$-u-0)^NXAxNEMi@%qS}~d9M;B7|&eD%6+pov@L%|1wXV6A09_C9MA?zTnL)Nh& zo_;I8lv6Quc1?hGa&h%ibHLw1vwEEq3R zDMmca#Z7UdnsYo5Sh`S~+3alCbCu-uCo9+LB-QmqGYqlxPKB8Fuu5Utzr5^82D_?r zZGFLj+^3Nw*yzc}P9T>ezq@@MEDrW=y-1$zK6nRvNQoG%A?8}2E5xiv>utC>bDpPs z8Z@JQttp^8@E(m{I%`QT)XF}dr15e5M;{*#$op)FMA7`bo$YIa*(AF~vH@<>YbrJI z$}VNRZ|rfrWbHZOCTSRDFx=X}(VTu>FW1f2%I8P=N>?1WKjyX&fT!yxmo13le1f5U z^TYxM#Q0Jfu4v2)x6@43-d+jtz0Sn1OFToMflo}f!BX~x(QQ2(PjIv*k=0rY4b?akq-Nr=sSsxg{WU9n)|QnvS4JzP6|PSTc>+&?JYQ<;tk7k=zIFpZBZ z^8@G8h%qi&(C>M)aDLX0A$y>C|5^7OX;443jVJ0GVzYT8@r!TyuEF?naQ4G-5$-L& zuHp{ILHjayjDr(ejrO*#=5vG1SD8rF(tTcM=~ecbPnrSxjowo8e%I}S?9$Yc1J_2c zdvKYFfGTH#{gkN>-86xkR3i%Vmzf$694yZ3Jws=gVk1JJ;~=?Y%EMys@14z6+tCZL z$qPPm*kn@{Bty&Pb+-nX#D$A^ssnUYc5V3b)LSj&sZvJOcmzy*m87C14DQw!?C&iMM zdW(!YuD%AasXzHC0wK}a3&h>a>!FzGDQYV%u4X_z0Ri%}XyWK8+5iXI?XN&Q2il}b z2xi(2Hi}Bq%GaZQHS?oDZFomE@5*EI4X5TPNd!{j^#s3eq~f5H;brT`W?sGr%uO#| zjUh>L>*R!9dmVozjU3?XyCD-)Ow(lY_zZyMtX89^(tRMWO&aPbmWv-4Fo%T9{!y!{T( zl=yEw%^C38Cj`YaN&l3o55c#Itg*0C#F20>npl}NH*l2%b9??QM`18#aO!V=z$sYF zynO~!)EEq>rm>URZPY39l*U_kq8X7-=!sgn4ZgG)-!Z#PS+ zH8X2F0AE~f=5Y2azin+4YA{v$T>FR9%Y`nsIEy|@!iYQTDG$c6w*O{T&Efjz!wWo= zrnaQdg;r61F_squNdPDt^B*{Xr9bTO19E6iio5K&d!`d=UUrYwfT@(4H+DaNY`0P^ zo%_%T&RSUD(DnR?AvTG(W`h#>;dvcGD~KyE)mR({ucDH=Med+z*QZj-6;4L3B6IqE zM%2S!`t)tB3F;`^PF&aJ!@s;9g5H|hbpr#z)-LQ;!bRi0Xo$VMKH`>*S7sfLoH(

-*8?Z41~V1Y&aL>U3EXc%laMTL(Yf zt;$;_H**2y)r9G%*tFj-w&!O!MD75}!Au9Xl5!x1;#&KsUM=U$M)Drr+kPoyz+Lc1 z-EfMtRAp&QVp0&=z@n7g!9R4i0W*o`14-42^2#4&VXPbG99nD0NYCLZ<@Ly0J!1>M z2Q;?c&LE@>q~PF&-=AOrl$wa|dBzLFII3!i!wV)ruk^0sE-xbgo3$C`aNR^(cr->= z&slOnVbehSgad>pvLB`#{Bz$k$(@W2gI=d+_P@|{pPaYifJ@Z>aH8?~OrS{b@y_5W z%+D+D%j;k{;?YUrT9$@CHmfW4?iP(hZ4}PBnGx%-o8>1&R4567-ER%cuKouWj;azz zdA3*bXX7HCFcmN86yYvXHfS7i^*zDnq)V54#N+xYdv$MPs(WAeZ8a6c+8umg7G|5f z?fy@Db*n)1?`ML!IzHA_q_a6^XQI;-xSG6Mou9rx32@LBlMC-oiW9NEEc@-v!2v1O zZSo{oU>pncR%&>%r%7NNGhL}GNbd#)a}URpwuf(_Wwk)_2T^vs^0>XTkT5AN2;QklMr3D#L(5!XUpSy&T!x2KL zs6@AVq=&lXRjv4c?tP)tGQnCaW`(sBQWva^(P(&WRdh6VLl{JY%6@%OH~VfC&cluD zX5rW3(5_optpBdUMi<5dSf2!W4=Jee1hu&JA=R$$T#Vi|AJ$KKXx1!h8@p@p)axAE z_WA#U`HA+~&ateU6*@8!|8>UUD$K2~C5z5cNIvBR*MF}K&RavyJLR>1lWxjCiwVv@ zy9LcXmXmVVwia9aHP86PWMg(9V{0NpFjkC^MEQdxZi^|N!XkMb!AW{arhQWS7d^SN3RfczvQ@R2xOm4g~5orh# zJ7%k!qvt*5hnf?r{=9`DPY@h)OlyX$N4^lJ`da+8AH182T~&OIpyyPLtllhPFe6pl@CIg6di zv#+b3oLkf^_F)@CT6L6C8D3@Qbh9NJl4(l$?{a$Cgv_Cl8(T_qb7mWec0gJx!o z)r~pb#$)rRih9dUS5Ks*Awts)4`wVCqahKF97Bqtp{6lZ=9-}1FC4BhArH5%Z-#ty zdE*YF<~9vp&2$^FdZ6m^ueN}-#q6_6fj=A1x5Nap19Mb$_?&S~{Gtl4<0>c>r&N91 zVYx~zTV0s~{Porb&hgz?cOp@@?xOm>ij)VS<_ia!j`EwEw2{V#5GbX9xN9DJ6|@{A zfVldSn7X292rXY+QH;x62cwpDSC}Z;g7L6!4W}IXo;_fwUwpdrQdYuE^iVFE5ADLg z-(GpHDAR!Xn(X2)N%{EcMg80BR4xIURRM&suJVAmW&p?UrE7*vrTcrf&Lio-h|H-D zB86R!rRU)V7*rLyiUAqC`R1>u$?YC^v{Rk0#R>1=o`w zTs#A~e>%}|VmT``XxF1WvFXI+Wm~oM)n9rdyn~1R0z((|!vnRHP$uu#RyGzrN{0_C zOW|x2o6z}&_3=ylf-1Zy2MwQn%aa27L_K3yamV1pT-$wgg}oJoU##`i?XNz5^6^@D z)r?$+{T5gGV3MZ@^*gu8ERd;A9N03}9yvQq9>P5*pu1kb(TD;*P%zj%{8X~D>?wN; z9*HzlmlYjqSbe0JmnN>SCQDE5LaE3jkS~iQgKDgsFss~K{JcMFNT`O(2_Q&SSWG?W zyvE2=?E+aMmhS$jEJmll0;}R&FnQw03;ft*KF|Vwu6!LIea+|zE(c=0#pbFA+*r9> zghqM@-YuPV5Tv1nEAD=XkEL=>(sf6RO6SIoP%|u;4 zd>2AT(3sPlh+_~lGbn&m6jBYuN>_+~VydPm7j=N<-kZ0MU#G`{p69K>>pB$2t|$QY z6~4=jr|}RW`uG;TQgG)*3(L@fFkg9`~=8f1_Mueo!0HrT{^Zf^ft>%xkii z=G>@fRg!oFdl?sFJbB8TvauBt|Bqa|7?-U-k^(&*KRUrZ7{u1)4LUam z4YiXY3_HBLBIgnySC&ugU-s7(5F=WbMMo>udW>(_cmOh*GM=oYDA76EJM4g)dSCYK z!TcJId1DL0lQ8MDw#XFDfukzmXwiBt7M}6(HB;uT?K4+hZ@)uBGaqeaa8$!~?w@y# zy(GBgD%n)+7Fc^q`Ttg%6vU{EOSw=~spy2kF^5!BilG?PSU+9j_PFe&9icmnQ`6*K zIgaqSUN#DRxA))(qMu(Ee?Nv+Qf72{P@Z-e7axwiXsyq&;E0oi=GLVG>X{|y@B4=x z@>tcbA8r`mT{XS}7B<1)SUbxS+D<%(M|H6x_U*#aEtXU9wwAj3mhe18#gAqTaq#d- z3!>6<3U`&o@iD_dJqtBB9>(Q2#n{mqikC^z6=%DP>lb>YE)kPG7%}iFmp++8K1Qeq zYkDnvl%agZ)f%RiEPKt@fg|kBd`a4KEYe+!rg(?u+$Db-MC6n|KLo#=UdL-j+qnV7 zLvV_bVJ?^&5am9))Q-4AY&!brM|G>b#bcOSLw(37{DbX z-lFvP-@QF+vhR3d!>vkkw4g3iJ><1TV-I(u;t(eY4sh#o?Ew*g6#mavIaDrdR>bXd zu-5a|ICtFhg8hx_TW8@5nkoSKGGQskBI9+~^z{;hlOt76amC&e%%=-c4%=<^gpC`h z3?||?PXtGoB85M(ws7lj4lO*v#$^73Le(^;@k4fIpI%#%e$C=8LVlCUZ|1Lst&t?b zvL6k=-hL?U4;g=cD)kFx9-70afoea&uyOt}9gWCMS-Irbt z;-VVBRD;3Cy=sxPQM+{bBiHxA-W2<6m<^a)&~U$@S6Lb_tFpIQ=GrAZu5JDVSZzoi zJ|A1$wVzW~b01R&kS!z@2ak~4=nwu-1obGpv4B_^BjWCUVp!PdL(JL)s{fGXui7pO zTm=@gbiMY#6<4)sL)s1AxlV~U5fLso5{E)gAkT#IBgxJ}`7AVAv!F+kqNdi3!!;vj z|M~!R+kZs%u8(_U0=)!Jn|*w&6??JoT6UPHQDD+Zx)eI81ADuRXC_6mMZDqy=XA*rFV)h zZLA5G#zhh}M>bftbU*I=BfXT9*zH@pqwgn`vp^lt88f8Q=UT^AAW89^=)SUf5gBCm zkMeT%jaynZua@YIAdIy2Sl?U7n@ z)zEj@6h=~Aj>V6Thb}rA^tSp7IqS}(m+aGtDd^Po(W2cOcUe=gP!N2(^U}k?Ti%nV z(og;bavCD&F(o$D-?}-HjVziuvuA3lkRl8~=|=j=-_I+oZArTFBC`eoA)-!7>W6oS zS<7-BAGNypEffJ6;cioC3$ehq2U1NzGaSpc=sK^O_TpbOwc@#%poaDGdT8{2bW{#G zR~4RUcG{mA-M@7S1KpJfP?N);=SpEg8rG^<9%xV@+Kri=1*}z9)f0KoAc~N{?c6E8W0b=T95z2Iv;}G==c6# zq{*{&X=5#q2NKJlF1zbj1@#P(5Bvxlxdg1sP=W)!ea}jIfL;t;cXIZKpT4sIjIb1*w5q`sNe=2Y}Y>C402{tg8S|{GX3L|2{XsGqDrc!7Hsy`&gBND z-Bwn9tU)N$w`K6@KhI@dF9=rj#6Egxzvnd9O~|Pr5OZz^MPIPC`{L&Y`)vsctlKkM zwJ;#?q&816+88Qnjpd@&T6I1Z9L-CTe=yZ02p>2Q=gO5gLc{Kp-xebb_ZP7X=a0y+ z7RgJ`hz}m-lXRh=rUex4AviW|EG8PRg#r(v1{R7~>)Muelp6ZFKQAK<2&GRN(y3xq zQf&^XXZ?Zf7sa+5b<_lO+7mf)VH_IvV%n)t*eInRBm8yQ+By#IX-ACqfLB|2yZ8Y@ zXRm>~*w9q+WkVvjBkO#PWB7UMrUsbO(g~tPYjBjb0bz& z#75+QL->cRh9yz!&{UT9`1toFr3~tcqHEC4zZtN#yOF!MrGN=_m%he#^t#ZNB?uC- z`T6xWL|PaHAP#*%E8PsRcmbeauaL$gNU&s(zz$Wy?Q_$io*~#FDByz&Sp_^ zbEybT1R>w^GG8FJPV?BH+U#U&`y)XMTucQEGdr8AxwHC;-nvs$2Mt{X~n>uF|-+OP5ees8H*g2bi*u;`5@g~?#TV`25 zVT`-0{_uU+P$aJ3^tnbF z5KB!pjg%Oc{Ntjs*dTCIa@FH;vk1r=*Enpaamg6xbFA6Wlx%2;vd*ukh-kr#`*R+I zT}2fPR^9N;!sOTknJp+tTk1&M8(MDj!eUEdEup;;;+~`x8GL|0CAn@`2N*?snLMAW z+Qr@)&I!W+xAhn=h%X=g3_prwQ&$FwP0RVm&_J!xlT04r4b2gdfe_DPY^;o^is>z? zWTB@1hfICitOtxzPd%2_hlISWh%Yb9%J5@I^z|2`VAJiwhUGKIQ3vO`p1DTr{N$1_ zk0*m2^=2?oKPq^=%-HW$-pg-=VV{h20IMdexapGBo0$5~Y9~aaWm67PJ|cJBDe|(A zZ{}>$tzNAKLDNN1@c0Abme1}?k!d5IChG!Ms>~iEt!?z&&4<_*a#;>pUkW074lJl3 zk{YWKX|?R5*1W2rXG~k$;KSswTo95)H@P<4y_Ha*v&5+O8}jBWV4?RYlP7eH8UDrz z`ll0ez+araOu2!;BPe69bELYnNVSJObI%p{x(QeNMvPQ$pziHQiq=z6I$T!S(q-)= zHBG14gHSTn{2=;1NYq3*Ud>t>`J78xx$|pI^EQWby}lVzyCY?H+=;b8_hv>cTVplW z#%>xXwkpCyleiVm*!ZlrOh3N4YG?ly8h3gBnvIuFUcMwBy{h8*z1x;!a~=5dPjvNt z?nkvpoCLisD*4F+0(rmO!a--=t7t&~12@#7w_W#`y1m76RvUeDXTOS)y`f6$?iL9% zpS@WUyLQ(28X*F3y7(xBaA=Yt2wNMagly(ct9lU9IZ%Z9#jMkM?%^;b%jI2{UXQ!f zsHv*IpM3?+Q6Q!Bl=A1t2^J8#0Fv2#etoAk-&jnu0gW|#j9I=;uN&r!^@nB2qPBG~ z&xtOb!MmupfUavl`?~4%rnQ%+IJ+J86}-dV5W*Y}J1^nrPHW*1XQn=gfS5C|ZbV%x za_QbKj?35^@Kai{c3cYqmmE#;GxJiI%^A%!`?N0_i8(FdR{}Pp_t|WJb9ISy`%4S2 zeIAlenxG=HfLY@|3nQ+juMZSlQchkxDB;tg7Dq!uqM|QBN2)(9@2o9v3usr@ z>>>1>Zs#167mF-3&u*}s#+#3zcnWyRzVhq6^WVC16}e43*NLwnI1J3dZBF&csX?|; z^UCgon>W$zw`kU})kWl>kg5QCB&GHGP?P!0B+Iv$s@h@PQBZ^7V9Bu;k7?1itX%$V zWI1WwUAJVDAE)hhoZf2feHdbr1~)n(Hh%v_C?qD_r=67rkzt&W?AVKib^Jp3{y=!aPTJPT`|kd1$|=u=aR99&Ugjz%zb^5b@_lA;9p9w zWy=W{m#^1L@3+p0JC$6kdewOLBAldkk-Fp=gGO(TOg4T zzyuaDLSEl7QWbf4cNC#(DRE+|O;v2J-7ymgQT;sp)8senD~%$eY=dvq9x)K2ml_p0 zHp--Ca5&vTpsN<=Vm}5J4;f<}hA;^^gj(Ep3r9&-Ka9NzG4Y+%EXrg6PgIs~T8V1{ zTHHDD_foasYuUUzUtbGL=USapHgwi)ky&up=N~)l{kzo zoF_UIej@I)3KgLE0;y&KmHknT+L7`4dcIAJg7phwgh=H#yXTf+`Fx+Ws3YTVFWB<) z9P(Ni7_NS?_T;l+Vw7Q~muX5a&XA37H~({f@HAuKZ=CM&0xXP*{jDed4`fzeEeLL$ z8Qdhs=VNhg*$zK)pn+3uE|hb6G34{M>-J_6TC-xM558z!i|SU?g4UH~!3fPy>ew&` zx7oeKjHqYx__z7|<)k!RLUwNISBYSrcEHF6dat+F6zL*vp5Kkt(Qo*xJa|3WzCuy9 zxDj*kkRW&$FE&1PRIlfx?>`p8V5%*Uy_=;)LRhuhNx8pCY^O{Y>>jec@@eQN5q$qd zy?P<9%FDIVt?uo%@iXvGs^D_itw-qb0Zsm*Y)be%$VBM+F7?7m zaE2JN4!-Ypz>VEK@4^gP2$df!k^{$Ku}JB)H=WPaj^tmaC*OAwB3TzzQfz8voBrX_ zlcUo8#8|8~F8+>TY*L>Y0{3|8ggf^%VS)s1{3R~ZcD8dg%#AamO%tDbi=eAr=^pK@ zW9~z+5;76+(YIGFp`t{*Edsu;?Ej{miZtTXQ-Gv;^DkGoG^K1EqfA8N11_{kMsTWv zUC@MS>T4rU6A@9>4T}`}Cq@q+2}9JVYATfE=<1AAWRWvem`JPJmg3q<{PHeEF2EbVHrogspTFTgm=Ea`_4=_LL1X#$J1imY3Yj z_7vR59@g_iu4^JaV`NJ#I6Y|EMl@O`@JfEdZ7;%!SFK+}#%kPRzg*Imle0JJZUP?}nB__`cTOK25UmWz-K9#pg^V&zdVizU&){~Y(d_u%~GhME~QTm8y|0dr3ZDMJT{SJC8!=vfar85yJNzRPcUgFFfF{m(UeLN}$BoIPh=S zq`CqNz zOwJa}*9sr5ur|`KxPEyc$kmpfY<0r+-&z(}&(fNv)rGuuc;8{gZ2EZ8R7(Bd%$6mA zvls7{x2G;iKa64^Ujpnd({w?UbIehH$H?<_{`aa}DLp9^dN?U$;jd^crc96Dtp&mDDoo_A@@#+?sMDzK?_Pycb+pZc^j`M zgfSdIo|ZK$9hQEBnki=rZQCFxw-%EEw2#=qcF4GOT7E5&1@&LB zuXWMHm*|=5RI;bYQQ5Vf{Z5M^5Ul@#`0@_98B2*Isy#5epwEWDg43z-3n~J;wA9Xo zg%-Wxq#k@iX2VW$!y{N>-ClS3+j}{h65qXPKN8vcrTpDZ@gfVz0BT-DUqZ6j}y)c43Bs(#=mD zysr)PlHK?k@goOYj1qKzYKjxczpPIe+4VvUGk!O9r-=q5gjFwU61P1 zxf$=d;FE}x8U1D9Iy2vo(hDZ0hBmhu@F77E?R(Y>LSA7~^T3OPB(kO!2_2T$Smk>~ zHnv9xVxYIh%ewi_+(1~qUr6fl{(9rUC$Qs1Us>^QzYDWn5b%6k55=Nwmu};*UYl{< zU#x%h#HaUJ{jGd;^WxNNB$B|{ zNSCp-ef?vtb8|{k5BITs^9}A)*Pw!>6i++fG*LNno$LZ^%kva|x1<}BN1?+rw;$m4 z#^K)1iRWs6GUD;{mXLeu54+RiCGQU)Y_<0{EA zxP31+9!YA&vVBm{Ep+P_wey!54`-}SG7t^V+E}K3$?7*8ER&ao?2)8UeAi>{)2ctF zC!&|(iN_TewwG()+uvzLr+P{F?Or9)hunm&q5cc2AUvLuLoej>dA9Lw%Y$(-Cv;@t zboX!?DFT7Bt@<;O5#uLa%8wlX+zWqEqc<(j(S^+`L9gMb;m^Q3k=>XlVt0OyU-5$d zbCDh%g!6H7ZeTP^zHYv19J5y~x|IS9!~D3>TuGk8^(r{%HW>{!FxZPZF;}p?3F+h@ z`R|Q%3`?N6!PvG4{}dJTV`g*?g)?(HeFeD-{@4I#BW&e2Wrdt1YH^4J(jBMYzPp8; z^aR}4W4c>6=rl=X#=NY_pa>b_B95?Ol6CP*Q$SOU((C#$QA($ z{w%KFDPS>9+0Ha#vUd+E4w@Au(C3= zF`t5KY@(F3Id)}Ukcowp@=omktq=;#ec^itwkFUnWz%JUGb2-C|^_@igE>P8P`&XI{#~ErGxyH(CgU( ziDrXR?6IHW(XGhgqlPSt_zPAwqgJXW`gSn1(Oiv^3}KgsOdQ#c&=7iMIpM7`@3Pm? zSobrVp2xO`1HiGmgdiGP9iIuLNKc0V6idX{DBJ4RG7FZrEN!^VWQIg8F={y*9jtwB z_y>IZ8Orx6XP`!lNKs0N!oYZFje=UMucH6;{eV;g?<3m_y@pmXA*!*1d4lFfSFN*# z=UI{-`v8I02M#AkVa~UJoE(9u;DfOF&nr5dxr3Rjn~S-z{eL4z6B{^Y4k9L^{|0<~ zjB4Ia=0uFb4i1iPaEz+%CT{=tcR6bZTR27uA~rZi5l0s@a~C3ACeS@h`f!XY=B92$ zy3Fhx4BTuS%uF0aEG*m%oSZD2oJ1__Tnt<+%v@|lENrX{?A+X(+@KqTiT=klrf$}b z4xo967}Zsz;r_c2QP~??+qpUN7@HZ}3xO_nj+WM@t_-G*_S6EP&;J`U=l_wI+1Q!c z{)?2Em4Sm3v3D9o@{fmqecE)bz zV&_DN8;p5v!KtvX-``+7ld%aPYCJ5>T!PEa7 zB0_vPs`4DTAITHcci0@eUNB7Kxe|vVN!^(tWDqpy8T>&Yst{dYfCEE6D1e5DaWjTh z#k)7L<;U?J;kcY39dB&`=-BVm(G-p33V`8}3$)__`c(~NZ4tM{V9@W`;Hvo>$Q1_L zGi|}YdFB*ICXa zk%b~Qo#;<@VdBWZs~b4QJ;yzR*pOw$=(E zb>t6oiJGAa2-?s$V~IefVi~GpWZbxz-wKZ;KP3*^2DMEYVuPK5cB`qd?FZIS4&6v@ zE2*<~m|U!7gB=VvfLNrm!CXScK03upMP?DKJ2x0Y9!XU(RqD(uI8+jiTOS1Pnh8h_ znTBdqYXNRQWz*<$My6DW3mzVLm~#G?6&OdR4f<0Fb!I_6n2iM%_R_#>VB02iamd%Y$#^o;As-hLzvK{%N`&mo?p+H`N9{mqmyIbz;lu#4FqXK0NaOAaDmPF2!68fL^lkbpF0NF$46F6q zU{u9yK}$;e%-YJ&KBSDd94dx?GNX42AXSbGF>TdLE_F&SDKVLTFh#D8X%b|YQFvZ` z9U<}t^k49P7}O1=(GQzh*7>YKP+)l7B$`hRb{MdYCq!H$6b31B*x4!`t&V>c7r19E z0f%t8lqzz*m?#%b*^R;PM3;i#idH6)>v_5a4Y{0_$?CpGg)bO*Zg6DPq-<+8_+qN4 z?LTT_%tCOGr)M1($@2!SEIR$>f0d>t{i`Y_*^Xj+fRRVdGpvqfbwDF~vHLb4Ite^L z!}SVr(c~wQN8b}vY~P}5{@qT5+fB5^z9Gf~n?ZDQpq9da)mX@VMSSmP!F8QyyZ_uA z*LlH|3fZ<>Q6^D!;>qG~bB`hvqfGHjK5J|cb&WZEzh$CHW9^Sc+=UQ{9j(}v6&SBN z+DI3@vQ8?IBE?qfGn$l|(sbzex-^jkht=5dM*9?w4Vg$|nICYt^JURw4qi(Q5cCF5Tq>a}&BBf3G0okx%-xDfR8B*uz1m}}DmEXyF zLu&Rq$b5ug#Wu8xo;)X}iI+Bpho_NlR|b3oA*bDm@COX+A9tId?2bApNVU2p7+}Xm z1vk&Jck^Z#J7pU=6df?M*c`*zP*~H>YF^hxZLbYPr-dIZ(B(EdsIxYE1tf&u;ZPEC zVg1<9n%i^Z$f6fuUR4tuol6!p%V)bb6(lIz@ETd^Q@(bb!zIi6hdQcO6MiI!u}6A< zhzoc=t?T+@&xD*`_P~F5aD4k=O^@3Y=_~_LU+6$P4eahIy$ky|_(!_k6ES=bz>+aE zp0D&-K42^LbUvvK7nqXjwOzfx?h3rTv?)yt#`wSokAFh^ZM*8vKBps$UlW>)6+;ca zquO@Px<)wl3b?ZdUxs-dEAN$DaJXXUb(~Vz0fVVWvW@PzI`TwGQ466wmi!p;sUUZy zpMkA<6X!skHRiFt|2N~tpCwneN_*AJA07U8a?SBQVMUb3N5ZYTNB2OT^3pj#5)6;i zeA$16AVNk?Mhbz_Mb#7Lyu2a+0kIDn?_P|y*g6s-;z4+0Z$uDkc8 zNUR33t-h^T+}gLoNLqCyF1WfUTJ;q>{q~%=QfMkCQfvdEbnf|Puz>-bLu!zcMC7pQ zn|S>`@1^@X^?VlxRm&Uf(p}xIPk8P{*yBG-&a0q?ZMYVc37ZF4L@+-vHtL1zvyxVt z;hT?Z#4xCL?g(kibgJNf8FBB`~x2^Wn&Cm2yK5^cm<7X%W9mI=TF@1yKAqM z1z<-yI#~Se^$GOt6rk_o!Cz*4)N_G1`|PZUE1>U7)l2b`i>sSQ$I=5{w#M?;*6yID zWkVoDdEZ9#{~xxn{0Bk)C)lE;Xkr6`BmW=ZB5iMM3EEjTt<69iD<>-l9HW%EwWXCC z5i=V*9HXeCouiAYld&lX@`#yxSeu%wNQ!_Uj_#A-{bCzCUbFHGIm94X|=5#bKH8-H`Y_*Ct*WNr&_0XMsvt@q7 zheZ1=4~Co~F06!R&WZ^MDQ>baJ=Wy~=CdpA|2Q%9%ZSfjYD?0NE`0o#I9L!lf` zs&|23VVz(Vql@i%r8mL+*_;i7V}6=}y+gNiE~ovhU7uw0)G~W|wie^tw;+Nf+>A57 zx~Aie!Yf06>t#8v|BJP^jH+wdx<%38Zoyp=+&wF}1qkjC+}+(hKyZRP!QF$q2X}W1 z?sg}8zkN>5yRY5W-u+H~tRG>`s$Qc;^`SKvArhh2*sWYW_5`8E$*P+}*LYYB{K9uC zNv}_>7f@%`jwjSDrn87(U>{dMj>YVVPB{39?jS)U)f`fPTg&U1qKEpf-#1K%1Ic)O zbv-WAxRvXcul`m;Jg&DXm1p4!5)AAY@p<+|E(>)Vu>`h6O)*VCOaS^po$u-TbLPYL zx1kygM$*7O_&iqXJ@H#gUneYZFtAG~k@`-@u;Sd7mFxXQ(^+}gzE*D0Wv0*E5I8V1 zyVH|0!1b%E%SQe+D$|1{Vmp=h@inyYU|^XjnY)G%TH4!zk_%YVNx$la3N@hBgGX#q z!ohJ&b-<$=@top69tS}K+ zB<_h;T=Umlx&GhZ-rfIkQo&ZcWolu0Qd3K8S^licKb2ffFgs8-_<_*<#8bmtk9|%u zXX_XLH+Aq9%hQRq^g5!2L_sK?6rR$cQ^R(r0IvF`i55`8PJ*kXoveJ3vy_ur z&zzJxC(D7;p>`#vt*dYO8&tpupqMDyic^;?OeJ7=CITnMxsmzyCjPacdlqo$_f44B z&hz#Ot}e^E<5yDfQJ}Mz6p*DO*=MJGy2ln9gxJL~V>oTR5__YsB5NBTM~fO|oQ{u= zus)MWhHR>ejk1t~A~CfyRp1r1*Gb3u7U*gI%J`^7EtZMO<9Ye#l5{InhzHq zT;?_UJIIz{O4skSsO`%Ma}%9m0Vv>~jROvYWO!3&s!nt8Bki1Rx-6PB9F21GvhFa1 z&0P=c#Olse`33)-5@f4KEnyTHNokZFvSg>~&RTnB?J^>@W5$PO*TZ*WQ zZuBwbr5hZL(``e8IvD1JfV`v*$&aB*3}xWM7TLvam#q@-Ts~MhS<{SG_LWi$(g~vZ zYf3AVMduwEXu3Ll%_?{8O z`3$D|MXo9qEf@}ut?xP*r+PjizBUT$9y@m-Z=nIqZR=_o6!t3lA-*xCF^!P8-#M{O zc^RU?-u#yR7Uk&OPnhUC7r|43pe>;XC*yrT>VO7$Mn{>O!&(J^?Z>#Yr7#wHfB&_= zw^P(WgiAlIYTQYi8XR#6SS>v>@&)om>5XMx>F;X-ZRaF~pLvCbVxGYvzUqVOHuY{X zt}!9L0Q}4Nl~L_xIj2;l{Z`KFP$J}dhJ;;2*>U|)~Hq%T#0G492jTsX>_8>?Qz_SRiyu*zFi>D{<&!M*+F+=!h- z<`G(POo}(jIp5Z?}L&&9V;Oh#|QkrCC1-vLCD?GNbPy>$POJOz>~OiQv8-VIJ=S1hR*r zHKH|RG?OZAa_hd14vnPxwjCFZKZ*7s4jmc``ji@RjHk;v>BLoJcwT#B!ZT;l2)M{+ zvDH8U-();MG#bKyp%n+Ow)#p_4g*W^p@-(@$h#7lY|FfaI5ljIirJajV9!v|2cb_E z>(AeKEge-EjvE7F(+H#K<$et>&$X3r#P*;=13UW*(oY+C!-DTvY>~DJ3my9S$4NVV z0#Wq`(Z9#AeFwWZ&Kv2NTZ%liqH<$rR)=poyEsoP_c{G07T{tD%vY|SZIpBD>I`tF z>0fsYrbLQ3q$B9%65kE4fq8`6IR9|;-cs4W6cB8FOBtbYC`u_eh3VcCZNmw!geMw6 zSTeC2>4Jyb_R2Qc3=Y~gimUOj4|W~O{vsWVfS7d3U5CQy#umrBH``Ov+)MT6jtP1L zK$r7)IT)I^;8{JSyIN}j=sab8$sdb+CF&bn4Tld%*XmH#ZEH`~iN8Kvp>&f=tPn}* z@6g5s+RJ!kBznrA}-TC0-rNSJtKxrj+0K8?x7!z$Sb}zK`88A8F8d z>TiogMYXZH5*XbBb`97&DBv7}?V<7^#Or{Z(|mkVg}AxE)!eILVNtxz`r;5EUf}o@FPmQCFRen=Tb?^(0JTiY`0IGmHC&$M#Kqp$h&D5B`9m-s4fiRcd(N-g-%+pdvrV`0WYzH*289W_?(b zeXDgr?KtnqRcO1~hZc+Wvz-q>58)p_1KWlLg8?z*RO z#~->wS9k)7?wlFz>H3TdN^ z;k%TpX>VPfLIM~Gy|Wc*(t2P2m@K_JvvA$dA;TZgRty0U)3={pt_|tiB|~cOV`QFp zDP-hF4DI|Kr(d^&_c`}eHET(Ggwjf)TcT+2M>ZK8AQP&9Hk{FUKEREt(X`GYE4-Iy zFB^}1VNMUDAOQAZ`HVVvhF}9*!rDYZ3)!pcg32y74LI^!qO$H$$FLfsJGM6n&F|oI zT2)h%Uj@;fae3m#8sJ9yq>8$9X~ipqd&CO{ft0Y9-j=b7Yn%HxfzJIQro z<~pRM+cTJW0L3m}Yn8|D#Re=sW?*ex?V8oqp0}t1CUBfShDQb=7k4P9BI%#5`kNN( z1LSXGW>2t76Uz_&txd1JejzQB>L6y025E-y5_Cl z$4Ln!niT5;hO29UmP`Gk#xn8iLOU`HZVlJAvR;5EB>+~9O~j9v(jWvLGyTgGqR6Hm zDcfOKI7q^e=cM~Y$Av}ocuHdF=nV#JBD6Pp1J^f&l<6u1PNgAqX=!OS#bw1cakZ*%?Qfo8 zR$`?3@G6*m6tV!P_0@YkxCi&{>|e<^eY@;AbTdBf6w5Dl{e>7?oraH2v%yAp?^l-d z62!rN{}3vnfzbrJg)ZAF~Z|3Jc$nkgVlIeMn~3?_KNgFQg#lR$ElcWYlWf+SoXs(Xbv# z<+i`v8+Rx#Y36Mt9CR^4G6*)ti0zPVa^{iQihoiuDLw=!mE7IC^CIiiuHUd}!78Sa z7lS%xuuoZ5R#y2i_wX6&Dv5}QD3(e(d%Wf)ic%^e6~uV%QMs_NfP~Lc@kpUHbwfS& z;o+#1S|**Zv9VDt#lpbQ(DSqd#>mL1=D-W1<2g4cNB`maxIgw+Sw{xRzN}3#!`zDL z;$qhCQcCu;h_nW@pDeED(0@s7?Tz{(V)cIOxU|Zm^u5?Aa_!EPgkc4b8If9Ptjt3! z0uIA)GJCja&Zuu?H%kDdQga%kSs?)`nt6#QK8_~x!`|_h6E0)>jOr5=mB7= z6)^MRt=>4i7`&KG#6DdOWZd4~BI2?HNXCvry7A1|X-Yafa~?#R&ieZg#8MZxh2pHqs5V1@yui33jYnCf#Vy~WCIs*S8zeHdKn>T+ zaWMo01ojq&&Qq5;|7u(Z0@|EBkvDs$hn2WiU4?>V4GT- z#A+NF7}#@nv0GeJWKdflE*w`7x7ievbj4gVZHo$+ZUxI1mpiwV!;cB=issBQLk*}AHR6=wcJ2$5~ZEVKM%F4;fIn@?BtDvpj3d}i;Y;u3O z&uO*v>&Fl4wkPG#}m2o3kO{NK%|J_(R+rdWeMmQ?n7+D$)-(Fm<;B)e2>s`v`6Zi0!M-P z`*BBac@>oc9z5O70JzRxj*t+5Z_K>!2dFcC=s+A)D+c?tp7r7OnvR^ftKE@wSBN+SdL(kTYDL5R3;+#r79G{=(wp?JF*$>BIRQ7KDy7a3zgHF9B#PN3$v%bS20ecDiYBd!W#y&+N76g-!TAZL-R@EHA0veFOQUu&P5*25Zg3R z(J7*$5-NCtJ0~r1#@x%OtgNhYd_qxrVDoU#CGH*)t#L` zj(terJ0C;k3q3YMsa2`f)z#5a>Bs=HL0b$z2737k?lps(SzkT-%-}wnKlViG%pHZK z{60=*vvJjZQVIDvAUY2uAVBzg=b37cr$=d~p69t*vF-eW;GpAdMon#P#9QVKe=3C|wztw&y4;BU&|n#wx*ge&85BxUD2Uaw<{Z55h7L1hw)A!~I{M=`w2YgFMGh4k2I87}0 zC=7MCL}-9zya3}O8$fYE(Ww^@eTv`&6ZUhD4+6qrHseQG*ZhQ_E0!1mz=d>$_?%YV zPh7mbDOa0~_Pa5Pv}h2I5*{9WhjH6OiLr9D*q)xAC079>!@~}qG|?PTEd&APXL#@q z`flo1J!YC%WT41gGC06|gS`C~70?L*Pd}gs*54RrEaZk97WS8xcc9WzgQ+kPM@RNzc!dI)Jo+%azoJt* z?iAiOV}pC!1dK7LW%nCs?V3|gMRnO`&tH3Hi2l9;)@5j;0=e1L7D{ze_ zJ##b;_IM6td3br%s>C7*Bs4G+0Cr7S;9PNmyxH9Bi>mgb8?$XoCTj;|Vp?Z9NyTE; zlbxCAqZ9}@X_MF@5t#Lunm4}B;dMMn;&q>Tm9~Hw@7xX0S(XcGAV{&19OaT*K@uRB zSPGS%BZQYKdb?*R}$bosRpC2$!cn9sunz+cA)^P>xrjTY0p^dzA0}_ zMM?{-6ft+DwRFV$r8;L~)xB(TqEngilL%0k;m3ipU1wWbATD9Bq$(7a`a9jwZ%!&N z2IDOV287|a82&fZ5h8OR_-*6>R}^oPbu74=0f=RJ@OWUuU!A{!jLZ*&YF0A1&FPCG zpMq3xL5E{%IDl&kh!u!Z{}=}F-mS@SJQ#i{7clQ?j;$BPt|tnp0#k^}JO}Kih!+{Z zXU5fwZsNyMNvk0={m+LRnDY|y69}(zb+}B%A_noXsg}n2w{&1$xou0xpm?usV~B_ph|O_EbA4KfJqH#W{f-NBQ9S8lK= zdr(zrZ+AmCFC^-}N3y*X&+;0k#V|13Vn6_2XwP_C2jV`1bdqL;s0O1T12POMh1Com z2*Tk&)hmNqeE(K+D8XJl-`%JX zZj6XFw2cZq7961AU{L7t+i(nND)nZ+ zm4Pqr2!|abswJA_3$d($0>5LG(Drr=rdUe)`QIlQ_#}q6hLq6zr!_JS)O>Aa3i0&r_A-$cC94m0?Ihg zubUb;4v3cC6EAoAm^Zs9598N+RMBiaxW9-?fC9!G$QX_V@!z9P{g5jyZK}aZ=g>s% zaKc3g&Iai@#*x4M*?=Ad1hBN4r-5mvqto%OLu4tuWQtcKU!-PJ^YPN=@9kw)^p;mw zcMFEU*d0SbK@q!QvUH5WICq0BV{g7*WPeRh)0DxgjQpsTgMtEd(BuIl&bUM@f_hzU z;B&HKSn*B=3Q8Ce@*^BLFk8@pY@Fma5a^Yf$)R=yqv$l+8Jd{QI z1jNT<6@VHCDs;P6TzAtTVUTNT_g7YQ`EpOX8D5C$-$mB(L~{d{R@1PWyecmeu=H66 zvMi6RHn#e0k?{-+SdrfD1S<2i4Y7D>&sNs>}mrl7Gepg6__q7O&q;4z`7#frNDNQS0_YcLem` zI|CfFkd>8{G+yV?BxWJPM8CH@4tpuqt+?pu+0Epp8no*4U+KBwIejNFF2HNW)7wpeuNJSo5jYXBV0g7CMne@kFZ z_W6^oZKbHFVgEN$NYT8;#_x4bwR3=f}OHJIP+HWIk&Q6d?|!?11M(Yh0tL9 zG;&|Jx=LUf*S_`xk@_q>4m3F)P8}gl8*g-nK4JsA2)etwi<+QlG6%Q5l*W8@A#sjen z-%-b;!rPto2Nj)t^jCcY*xz~>@ecKj@ZIZ3A)tYQfps|eXJG|NDJhuVA8&DS3uC=8 zFfcCl9VDfZM#@*V-)&E6h`SOY-65fG69605m7!>;DA!iFDKx00;_Lw; zLGnfiPR6}7l7vHSfDQ+=>8UW6X2kJzUPx&#sKt{Wn2BFR8uiXiy z8BXQKc!Z-L92^Yrye}*&icwS@hdYvSwMmkyvjMCa@bLQlI}~L5XzbB{nI+S2DRQCe z_#!R3aTjPeEfn}DfF%uih}|2@DO{DY`=G}eR&j>}VopX$(1#kO` z8}>dn{hA1#|I*w?8X7>Xql9C8cx zz#%b&Q}B<|Kt^3uR0L!{bUe;Ph;pjDp4=h^m#Pg=$|45CM`M4G*##N1Meen{1#}`4 zw2rG8XaB7nX-efBSzm1PNT*UKCFWn~5)@Tb++AJiH5)!pXM9TGkbbgTFk7I5QM2Rt z{FK4kB=Ve^g>mV0y%15gIUHL1-BCp4FzlW(C>o$58#2cacADP{4Yv0NI;I_cd=~cY?`pg z!Ttvf!64qRQc^(qaZT@E2v>eQ3z%-nKBk{_)ynjo^-K&Ta6?x@H`G8eB4INFsv8ss zns52S$soMWY*{dQz|cb-ZfgpsiAmF-3eYAXP9IP-CqF;G-;;#CTuvjJ^W+xLd>~Ky zY-i9UsAHW0SpL-Sl>`R!%F9&B0MjC(c~1a!WsS&T+mf8?q3C@EK78w}ueAyF@U#H! z|E<_t5ggB3aaZp&3rGg>W_hGvJu$mcYhFfW%)dskTUci+drV@~t6|hB;BH!{Vc<5_ z8il6?PVm16FaUZM8l3j;)zGeCCrw?PqAy-j+EnEL=?Xs%j8Z#o3^GbHOt*TZ*qAE3pl$?m+RH6PrB@9iVWav_$un zzGn6M2SYQmN~`eV`+e-aD8i?KO?kx(4aUq*|CW&7)4;$>YIm(TkT3YAUMFMoxVYJX znO60IQW8iu0=JDSTPBqs&M~|s+T)`bDzCQ>!S`f|60^H?FrYF2<%^w*CV>QGKLG*j zx_a(G_%u}9PzK-$YKGN;>a^m300tHJ8JF*bvxaeXQx`?^4hWSO63bzLz}ByoZ{L<4@`DA9)j`qUZssB@wygrc4yQ<^gua`_!b7NR2IE>f&fLZx zTa$IE(?isx^LWA8x$*pQd{Wi7e6Uv)1^@zoy4$pJjFX6yy$V0SzJB9P#tzV}z;pM= z2+|`QuHw@`n)AxQg?oA}m9QqZ(%9+uU4JR{m32N#Ne&zD3GKzv3tO?2CwsJcwE2dZ zacHb!c(*rZP&N`mcZzMPE$%l}3we%*GZNGqbeQmg(b3Vr?_-@;Tw!iDySs&FqI3Y4 zGRAD$0H9L3G)hWJzBZgS*~iC6)dG);5l+1s@LR^BAaB5l;cHv1_aH8NLbKJt<=;%D zb%L+}s?ZB@7HdF0LyhS$>$SqUfJt=kL{nztAkb6fm+p%PeuQ+F@P^YsUOq}Jdjo1!~IrAVMLQ3PuTp=&Z~Ra znpLC^u|a-s%vsGfugF26Q~9T52@IO+3l>AgH<_Yf?9fY*8MH_2WeNlcYvJP)%F{T|K?-llM9DZbE)8_2V?)M~MCaERuM@XYI- zjq;vUs83-~8#Jx<6=Zk~#<-o5thxyR$&$9KK7SaLR@==t%(VyK($YqX#vku4?qt!{ zWWON@*6q$H1R>$ijE|FQ^ZS4*s;cy~2n~vCLC7x5V6*2YccnTf5*LzPc~&RI?$9{q z70(oBv%;D^BR#4KmSZvee?iNGzlS%2uaISCG(W|>m^aR-Os2B7CRA;0@9OfWbl>ca zc+CUO&N`p8+*A>K(b6Iudj}-LIf~8J*VR4Ku?b;dU=lh*XTp@1QZ#^SA|cWm(Va=Tc|FjZHJrXpGE|4>o6!sO$rwNz zueT$mGbR^eM6OUbo7JPnw)}$qholsShcq2h{s>)KJD()TH*jS}rc=82A%nd8cvc)ph7I%vph2*6kd4MW`$*y1`rJ=VSAPvw^=;<9 z{6W#}$W0euZ?b6|$E`p6`$%P;Otu~*R-4ZwYPdcs8#_riq&74 z(>^q|p&=n(-U+u@njaqPSb74*gA$BHIyJH{-e%+bM@Nc;-;cJo92PMG?=S+mo6=r} zfs$hwUFr+~^z7_R?ge{$UbD+6NtyLCB24-_QoqDHRs!z{)v5mKvC;N*wv~O+ygJD1 z&m^hP2N}K-+0&_pkl^gFKh)Z_@`E58-OU08nSmD^D6?KAwmpiZrN@zpU+YKl_r|oi zoNcUVmnySjhc7KHHUDl3ryQG^QBJd$l9B?FqN=-A^Hm~dBlU(?2h(&){TM~89x4?e z0nV%g&7HQFr<3y+sCV8nX*}#Mr(cwTadnd-iHciOZB;Z@L_VvFD-6Y{X>b;BwvzXm=;o4aQfFlDs^+}-tVOg6MC zLggD1G!|{#MBW=L<9P*IPC4gOVuhc{+R&cHOcr^WAKoq5y^^FOvD@frw}?QZY#eG3 zSQ9}5n^i`uR!+8JJ#XXYO~K~3Z>ZcJYVBv+i8@7t+7({}q>*YZ%VpE~TyK`0t0_ny zoUf16t`G!db_xs9=Xyihvo~#yPc4{u7O2MJykj`;`wFP!{amK)O$>!`9 z!vV;LW&m~Ot0EqU++%Ux$~$4;=*_iQE55cGBnmNv+xZqTDsN_IA{0z|MKS$Q`xpVq z@EvIGz>Cl4Inwn3^5t(}U^v;?B@_`X_l}NILEf!JJ90Uq^Yim)+6*}r6*kjFN+t8C zkQR$|(vuTmVPU-y_yy`yHKvou1Y93`*{-jzf6rC=*HR3DMSZxZc}cFU&&~tw@9(>A z$E$Ezj~Wgo5PtA@J0l_&?B@rep|M!0-&d4BK0N%@8-1qIo4Bi%RlRJ&)JNC@)mLg~ z;Dxu))b#PW8x0Pb&=r^aW>l)=L^oh#ZKcG+8!Vqb7gQ1PXTCW%Bp{wR<}V zT9vZLo3@vh@0g&MM^-b%J43)062)|?(6I0 z<>uzUS+XxtQnEgulm)>xHvoSxVIQ<&F&vCzYu<- zx0_Mi4=mkV1F?6UOS)NkAYHGFA^&QckoB(Mw$n}oVvo~~`c*HfqaERZprCUT(HALs zMC41cr~AWlz`PEvgrJcF0ZOj|TAtuO%Q*DxC?*l@)Sf(q>$q!!0nn|Qzn>HA`v8;) z(rZ=Bdz#p2?lgd(4+{@uNLBSkgqq@7i`i!x5ScGHQ>e?y*#zozHqyAR*o@j98YBJG zHmxFAk(HIqg9Vug4=~@qe}AphYXQBkbNW$Q&H3{rY{yS<7Y`bkOSB)tG#l*#6)bgi zNq1A&8krprrhtxsQ{&cK1Lm@5TY}xWS@)FrZ7>9&9a;A)BtxEI!4TW~h+Q@rVC%omj*)SKyB zBO_y^aN`nM1A5EWFrHR>4{Wn;$T0JBzB zRyJ86ORZT5lq>HZ9sqw$L(`k(qnKD9V1!mYE~KZ~jU=*#o}uPpAd+UeD)P}Ne>)wu(I0y90;l(H5$A>S{iCQC@vmOFD&?6YGcVn~CEMxj4EiD%Xa=$)ODpq!N zaS7R|RlG@=S;<&};-2X2?9361dh_NDdMh_teEd@NmYKQvKzN5QfxDTRnUqwx&t*{Q zmbL*L|3h3F+6#2^`sWx`JpUM2eO1v8l~S`3|JM#&^ju6Eg!BE60Co3NvtPn}z8=G)2$S+0KZ~*o4X6QvcsjSpT=XR{w+Z z!_LLZz{$?S&hf|ouyX@<#b4q5Ar&)6LskwqX9o^*1M5GaF#i{&!p_0Yz`?=A&h;l0 zE~dZX{Q-rI-O5hSgoD$@S^tmo!^-mCgu=$jz|O_S&iyA84wk>+{QC{rNzd5a ziv5oR#LD{Lgu=?qz|G43AG*TM#`X%2jq?vEEEY~iR-8^&F1q>-e{9enT>F2%Hpj*M z=dHPEwZ+Iq4ph$@mA%G~4Xi(r;4s0jH%9E?A&=cBp=iEgOTQ5BWl>00wdhYY9|kXk z*Jefsu?8Dkp`!$x!1T6l9iKkjEQ5H-b@?>BZDWGZpSCBN2p@0jnjf};tY3bIX;u*@ zmEwCnRz5U6AR-A;_4I$Hk@$oxGl(@h_fgoiVkW@utQ>^oy?!9Uf-5iF5M;BPP0ox9 zI`E-n(}t1C_n(ewUZq2l_11sV6NnBZsEC{!k=K}=f!3r;mL|vt(da~~JfQ?HFINZN ztaP78ub(>hVSTWUwdH7FddtXN*}z8-R!baaPd3`jM$n1~!f1T7<$7!!qGA%k;lum{ z@xlcU|CWiiSp?Mp(?zIL&9B5cSMD1kj8RO@3YwU2dww^0o!IV8a00I$lIyO1Ft4qX z7=iVc?2SvW(Z}jX^+(S_*j30EqIShXCrEXj4lr0#h zPxxrorapdBKu_%pw`^bBhw6!HI0kf+$*P%^F;U9YE#MV{jg_L}pDn*PM)Z4$LgCAX z6gx&3o++lPqgB>|*?=ip+4o@%cTmu^SL{K>7+8$QZ(eiOC!xhQ=9bBv;Ixs- z2?J?!E9|f3hAXiQHW9KlE7VHBz5nB_9sJl$B%C*vX@BZa+W`geEY)taoy4J z#jD2scP1e+eU%x?VX8cf>8V1g@X9Z@3{%i}#v6yR4NmCAQLt?=UYE(OsS@XzA(zs3 zuwn0CJR0dW+h_O?Ly#l;@ubB(TbF!WewkDn)tdMnDlMCn^vQrRk)pz4EW%o7;Kn<` z-6iPuSxPd_HiGwzSyf3Rafg4Zr;OD0F#{b-O)M!#7+E=pLk%0hI~CN_2ibzu)APOA z!Q@3}w$dLTKtr0tmdIIS{s~MGxtu=|mG^rUzul=FuGYMt-Gahv$o{l($bA-8b|`R> zqvXEo>QDa$b>LYPX`CfIjm${vig+LLt;1GSGExIn%j#E0_Mo>?A-)A6kkmgJs!VbyG%dzPfH+@#MZjNY+})^b$V)<2C{ znLoQG`cxBw@4|hxc@{PNZU$1w4mMQas`+|wfEe)+O0SXr7qjv%Xt9&a*M;Dr@2A58 z{iR{^=O99wpJ=x?wT#eAah%M#XAf8x(aHSkIW*wC}rHYWgeK>H^b;yxELLo zx&NMCQ6dQDL6RqG40ukoikXKUvA$Zl{B4;AyiJ?q(d=?}*&Ca&x8a> znBf`qbzeU}36U@XpOSxls=U6GMfwv7Gw@#u0wfN0j=+6qL1_|38EZRBU5h_x`41QC z|3u5|tXy1w((D-W z=Q#sT_c@Un(v%i{{Nu8k>Ytd!GHDN`dtc5T@H=0scDr<*&n|UjsRDl=oN_Wu2zb3L zo}ly*d)*yQ*0(taw>{s)F3I)~hs+Ur-JR_h6c95BR#Ekdzh&AskhDRwj%Fn&sl98r z@Z)*>mD`c|rkl_4xRF`AEPVWYbn)_!R$IY(?gQDMVMoo-iT<16frC1I zfsF>)^=PmhAD7*vmIJtOBT3V3qu#0C-|8(_*Z+ppV7Psltk!JvIoYz`Dk65+SKuUS z^VoaNxkc;lObP%A<-NfmJB6tb0S#$1b4QmRaX%Xao!wlX=PXy73GV*hU))XFvr+h0 zw%5+?SXXQtRlTkh8j|UEg$tijGMchTh4f%4lzJM=Jj1$3FDT=@+e?xc)J2Y z$N~F&4_v+mOdK#3eym6CMmz0vemJ&HP0@JF;E&yKC?=^tPq^RGu((TZv$3!h7^^oZgw9j!zv6F-41yY45r}ka_DuN_pGGB!R+KPkz6Khdq0m zDPxwgkkAw7ivb+64tax4lEq9He~dVd?LLYIYv$ zG^saMHUZd~2KGbbI3N2ToHVy}6$TdNL-4`0yMFShUN%4Rh$pSFP}w{OS^N6~Y$^7= z4k6~CswmaaRBfm_m@t8+PAZ*8KdYB9!NTswi1nxZi^LBxrxGhh>-UJCGQxF4YkBOH zt!`(o+^nT51%!OFG7OMOLl>S!>l^*9>ued&mfG*8J|t=!V2EvoycPAY9Z1*xIXvz8 z1;oLs9?JTe!xyq1!?>-K2w^9t?^a9b5lgd3&L(Ir!hU8onn%~8Md@a25 zxhPqUIzKY4{|wP))(leB#>6JHMo_@xuiHP{8ZjGx5(cIjCA#U25 zmeSM^J71;YFxd}3(tm{n9(^=}%UEn(iG}c;kL53yv~C>zVu_WHrD(34)X?tHnRNVU z4GJ8PV~%U-WVNp<*F|gRY03ss>@9_ASS*Obd{rgE3^O;@;e|(p5tR#bshWoG4uA#K zsAJ9u&*$;a-}psYOY%65WbG`OZ1=42?wO3fgUywr@&6`Ww}mO#D5w0^LbCFZ!+ zW^mh`AHIb4v4|7An)v$oWoy%&YPvh-d3VDU=9euH;Mg@4A|EkVLv^j9yhztTI z$(p~8WP+1Uw@L)Hh<*;GN?sO{j_VF&O?{zXvjbaL`g-R5(Dwo~^Q?N_90#{O-h;fK zA9jgfZf3jM9xo@`h=8>F>Djhxm@3~My|+tmW+wJb%{ON=A!NBV#5Qd^0fjTjoneAbVf17sk2V#2(edsR3UyHURUL~5X+U`+;dGS zyi(vj>KdNB#Z4f4K`D!FE5o8wc{e;!n+ej~USR|Nfi$L}){J@8=E2#7+5NW_v5CQG zI#fgz>uLXc5?dZk7>gy8eJ33pDseVbiZM-QCB`Y!NkTch zVHVxnf_HDWNpu9aZ`Kq$^4R5W2x}}*j^XK4bU*O3V3_>+KL7E(m`Hxi!n^VWbm`PE ziR;zXaE0k&N3TuAHrpcSjzVRwJe4FEw+k;&un$I}i(FD|)VHAd&pGiCvL9$Lr&BJD zwq263#0-?7+=C$oH6>{Zp{Hx2`#BmRatBD4q|1I0Zo}WC05!((+K=*tN6TNA1HVf9 zUYb7eVsc`@QJ-qdoz`Xtm=wyGQdRKSHZwH9;@fRYyeG$M_Xdk}rTp=*d{{86^6vT8 z3-G1{5gm<@q?XFc9}~FFv~<)d zxnC36m92|GxQO7_5t9al55C{+FQZ2@`Ps$$t9JM%LW-mE5y76}pVZ&YO>?S&3vfBNi*25q~Dq>GqD{hPNBW0UWxDu1uRDWd&95QTG z%qwMQhXIvv=^c5^vH~hjif)ZL2@YDsxKb=;qGW@Cm3Q5vF`aKA+z z5;>D<4EjYy41alDf>C{$;}!`yprsFi`%HvRs}ACK;~)N-rcjZ8bqAN`CbvON}1DKg}#fkhDbP)#x`*rPxBVW9~PtRN54D zp~*1oh)39B=9xjvsm97pQiMUpq7s-0m(}-Y+jImR3BWHp9%!il(uk!*NBvf%r^IU% zuFjoL8q=D1uWzuz0}D@GFz|CMUpejz4~<>Gb(cRx!f%Dg!J=i=rXW|fo(DRoSRuv! z_f@}Jg$rc9p=0hj_zyl(`e?}kzufL#Y>1JqyUYH(GQtnSsN*&8MEN8Qe>sBT%$Z8| z@$-v+wwYd^5Y*3Dr%>V)mZQ$E7hVzAa&r550l&-&>5OBl@8%u`kt&F%z$mZ1=OCDES~ZQ@t4zd%$80 zPuli4h=X!klu0zguO93)=_jp7(b9PF5b0$*qj;mzZT#ljaD`MjP4Yv>X%- zzVUCMCtE~px^N;=3b=rIYYvUg)XXcSJvhxUBiq3g(F z)Xu6YTFl`FUQoxBiPdNM6X|KSj7}|@#GY<>c^aZXx3M0P&^YGT#Bos(%0-zB zNXrq2uw)pyl7P=*7o!}d(N<#SULsN5!{O(+T*(}nzMmNszzCy1NvZHzDIxgcCPM$e zSbN79*#ZDtw{6?DZQHhO+qO>IcAvIw+dggE*6qoAdAXB2nU_p%Qa^UeRllm%UhDfT z^j4zYGr+ct$9qzlgBlC>X8j}u6KPHh_hatF#~`)xOwKv7Ff1~)qWxczIb)qEWM$S^o;1B_+ z*#5Mu%i=cSzw?eOY3iE>%`Ge~lmOKTTJ;?-EF`|{7n$8f}H%o;9TTdjq9!dy5Qe4K$+(AjH* zh)jk|d2!@1;Q*Dpt6g2htm}exbi@u89MNG&5XyHRLdF~N`nV;hhkLV>g20FS#?MzX z3qEm5QOC(8`WjXnmEx4E@YULDpHE%EinY(qap~dwffhT+Yd3^+ySUS=aeZJ;YXED; zsr6oX#}r;e)afG1P6XQY_M%g@|C%Y1rRAlwJP$&5UZ_p{CNm62l>X*k7>}%{qZP3& zS+cLavu*MmEULA?IQ<$0(G!GUTX?1^(5~YCNu@pFJ)rCBJfeWw!^x?zU%dnv(YTCK zL{_|PT2~;>GvX5Oysh)f4vn*|x=%&9c$9`r$A%uF*Z&9C?cT0UG-!hu()Ene%6X+& z;2NF0ZXu7vO77DOqJWQBQkRJ^)mw5QbGD6DFT8TyNv_lEicT`#RMRH5RdUfx^+t5* zWk&1vY1$(S9od=3LVLc?i*aEyJ{zGP&dfjB&e&`B0{NNET`cj~NlTO}%A^TyECjOBQZ1XT$8^#}3exag2H8Gk;b2WHqi+S_Z?FTqp z`4UF5$S0xlHI~VpY7OELz))%h9=*{joB*?J zN&r^j6u_^f;Mxxi(yQYkmrLtE;q{O%26 z`rUYkIeb8`#!KMQ4!SoNrY0N*ZC}3%_8ZIoWcW3I6n-`EW&7)Ou1|`dI6`NLW}gG2 z-ayqPf|HZlCDK%oH17~PDf+{&nYd&OD;30=)RSJRX=m?y>BNQygF44pY+Em}&qX2i zuTx*U3OIhc2GUL?X)XoII)j~r_60je=3y0eL zf_-h1KZ#i8wyFo2H5}T+0{5O~!^?lv1Qk0K^&Me7>`g{o0FfA_lrYlG)C72`F=a{!!6groA4tM=11nsM z@#DT>Uz@Ud8Y3`WM-;>{C0X16-KMB|w75q zw*FhUis9=rsB0E$`=Km%6()-C;AiV0+RPo{Z3DvDgW+5p)*c!=nSc9m+i~bJlt7d` zHNY>Dc_l2$qwb$=k1*CnwKP(V$wB*pk8E>sl9-o)|Ly((9RUcOd<27@zb6561KlSl zNumt3(s1o)f;b)h_n=xLQDqc{XKJCD60oe8UY)fb)USgq8)PwHY%b=$5D>9GYZ0Jo z)>^EnpD`QsS4)*CbAw!>5PKVMv-0E{QtuBDNTnK%t|J(mo5L9@=U-PAE?B>Dh-Y>? zWDUiSFuQ<$4#Vgo_L;xJsnZq;O=%D|VF^+aZMR%WrBr@s^Ly1Y73Yz{pDhva!?FYL zFN)G}Q(JgS4e<{-6A*UOtZaVwY-K`Zn&aFc*z9)>pTKf{?{BosWmfPh*9^EAYz#5d zWRO-*n@2FIH(irySmTx(sN!vhov_y+$Ra?C!j!t(R>lBS&VvmdndgI{2 zEr;$*e10A8W+nMu#2|EPUL92O zKevzm6X%X#|3yOkzn!7|?=n?p4yOMjQ^jqC_1n>B{9_9+NkkcmWB_t&np4Kp#2ViN zW{K1!vA`x9`2FdZ>723dtj4{cJ&DKN0_)Aou82a9PbpQ{fA9i5pwD@GyEW_n)8mXa zTG{FEm=N~7_4?5up*L-`o4KcP;-K0GpBHkiyZKOOz~u!KXUV zQ{k(Wis$ip-MpRfrqqYS|8`W@A;}{F^D%HH?!vWSas<>AeP%C=V-I?e5-pOmKfDuj zLy7DE6<>cNAar#hx9u##N&;s`kA$Y(&+Lxe{64uWw|9r^oIz~PW=1NkHJ^7CQ)ItcIob8vfNJP4cscUHM`bKHtIR6DrIK2WCeS4{?j&OZl%mirtTo3MgLP7&{pe;+ z=&V$J;c~Dgx5zUkobD^tnkparAG;a3gV6D?bxCMLU|Jj)PGp?=($A zciYYb>W}uYc++ z8%s7S@POcJrG#{MAG@h4K3-1t1fo;ud{)5MyUm-kVi`l?9B;Xf-6asS-0-~T1dRM^ zMtiU}Iel>-+YtIr><)|~d!2Ht2e9|>WNx%~8%`3RU#Qwo;H|}=)~G% zlOEi1vs&qMYUt839Ce)8nAGsY1CB7c9oh=&D=UY`kBSnH>d<+ znSzuN1Y0K6S4Zu*5CUqGv4f24yv+#Sh3wkAMYBn@x|0OLTnPa3=IjA(3Pxs?pHm)D z(lgcFR2OUgsSw9|M6V9^D~&S|y83PrOu`_!$rLewgye@@$TbC^VnIk|5}&RzOo&43$ICtJ*Il;FTgrW_Hr?gY{ z>l=Fd!a5nMFp$;Gh1W9_2}B^v(3fLp2_BvKC7Kha><9MPjOS4COG5|Kh8B?`_HC1Z zx4;uNecI1j`DpUB5L>C3fd=#yq+C4(5~=`CMre#E)2zk=?8nKIGvs|&l^|Sr+oi<4 zJ`kHJhoi4|Q3QDi!AG35vP9hBjKA(a`C){`t#|}5jB5hX57pDC2XOJc^BK*Mj(E{^ zhWcIiMuEBkPGzpV0FengGFBCBTKOX2u{Q+o7$@)J3QJ}*sG>h3_1<=LR-_7J2nCM< zo&Y>iIJL?H4>0v4=;-H(xu}6#h;_YuezFVlupROrC&YH?yZrybm!Y(JhIk zoMtQH6TMZ8CgL=VD{&>L^+T`$3W{OUCv$oGofSlAdAQO|77tJ8{Gq!SOlrbh9heMP z{glGzM+uxW!f6}N*s_rw8kFV~`#xfm<`5m;cfGZqo=whK0H@ieP4WScO6uXO#alNe z6DJ|N%|XHCsgK*ksd$w!Su7Y)+x$_O7aeKfL`5bPay^tD!iZ+66xg@ZWtl)>FRIYV zdUT&&Bl|gVC9M$Wz6H)SLhC7v$|i*hTw$x%SaMq`1~fa2#h&(%%TkW6jNyPu{+W$K zN=Bj`Hn)V+NL9x|m>)0_?!T>|bnjq|W;rwMtzM8KHJkiBX*JO7 zY=OV!I;)a3S-?EM~rYvZ2=D{j5dPZWOpqh9QK_c!A|MdLvL27KqVogO`hkJlCYST zwHv0dC#}1SXm>ANg{-GkG3ch^tR4N0^^r}9FlCgp_Iy}iGnj6_gQjtTF0iw%OhqMe zOE;zWup}$XG_=q@SY7>Nh7rVpAGVDU9v$6o*XTL8IOUM=OPYP2=utO9eqf0qLGqdw{oY8~04-}r1{xe=s$clc2`np2vzBcn*#!d3xb!G~wEhqCb zsK27fc&*&D;u8jqLp;UPS4q0)a=hlKpNr~w{a4Yk-?Mn4J+cC`3~#40_}WPeM6dm%EtDq;X}FH}aFOWBks8c;`v9-$G7Am)09(>Mpb1 zfaUGltT?I?7G5s=4%2fS1|Oq%CmgK7;MO>m1?C&rvw3- zI@<*PaYK6GnQgPC{Y>sdUD$`Bn#bpRWz+MNI`dvnkEOw2go`j0DwMhqe<4&C8ry~G zq%RNAiFcguxIsoTSb>(t%uAv&IXTqe^X=qas5rDHyhocg<^z+op@M+x#d?YOOp~;+ zU*MU_Ts6~B#}I-uQ%AgmS!L^{NQ;ed!@dC)7TS8YPTrT>tM4)^ZhyG|Jx)H|2n*~b zA6fp^ZJ>$gW_;F7u?mt{7%ytC_2CQLcq?Bm;RU3D`bdh_J@Y-)+E*6m74PLSoe8qU zd~H$Z*E$JfnrafcB)9FF>9e&K$&q8MEB`=%;+^}i>tA6G#QD|+)(R{mwdUV5`n2wG zfw+FB-IA}h%+TQhU)Ds;BQkXww*X=bx3Tf~%-(+mlu)VhSS<^BT5m?Xhn&&y@~i!!+nUt^XNwc9(r{>tH7&SxI zx6e+DAji#6?$eGf=IksOn~oDJyH1%|!Zp)tZd1DX!Ir~t0rNb5t@s)e_2( zVWuK!b$6_w`U?UmY&ug6L2|dIW4EmjMvgvrKtv6O5P$*15UDK8xwIUOM!7CuW@GZWo^A&DuK#A2AEb(tJgaEB z<*=?B1A7Jt)Bb2)OG;EZ+;!aFtvlt}wZnhDOgYIb)jAz)~GYrW-$P3KKqZ_R@1m{~2W=S%*v-!I80RbH;6%>s2p;Z5nfu;3!u zw5Oa2q<)iD5iT!vfF?@|JFESbEK(c8Bdiud)9Y+e@SZUK5H{n1z!|=J`sJJ(b|HAX z9F}$V{zF8NghL3szl~jg3T7`x0axZ%qng!Ar^DyLZ)G1CJSm$5pY0_OuTx<(nlqSW zF_^l5hWwN8tDUF}pM~CUITalIy!#p+EK}ZS=JxmL@Ot$jb+O^%@i262?@=86{CUn% zZ2Uj}JG1=X?D76qb{Ux&nOQgpn19>X%)do_HUdsoy8mdwGc)}*R{#H`Qx+~R4$fTk z^yZc>7OqDBVVk0Nvve_acDA(te-G$?m070rzj7)&dh_4wTz)TxCbpJ#|H(@AI}Z5& zlobDM9>B@`pQ^7{xa)B^t#`h?M!CQdM)e**f&iB4_s&RwYXh%uxlpn_1Bf_4)c#U;Q2qI)^z?KPkX)aRBd)HpIKmg13%wqWzs(ft@?O; z-yv!G-!FZu!VTyE!D&0+cef9N_-Al}m8ZOOu);7@oR3XV3YcZhap=a$2jl*4TSq%A zoxKad18Ej8u<-UuWeS>o}J{ALn--^-_yi~gr?Fo62b0s=_bNZ2n{-Xj4k+YkA(!H z@9wW|(srz4&*0PSe|#h+0-sN5w(@%o@H?SnV1TIiT>3+=gH*W$py;Nkjob;T900h6 zJVS5jK!PZR!Z(Ls)9jnCK#H0~FW_xP;Wr`sJ!RLbfXqpEhql@be;BRobwA-^+DDBc z9LOkv#)`RsVo(tXu1CSngdY4JSA#zY?OwPB0KY-V1;8nE5!7WaSEP@6UPI+>C{B2c zxxI=nw!`G$*n_D9WzbooWAF99Mi?mUo5{g00^hJA)MLf2n%*9ZVLf2;@>u+spi%6u z(-VPm>VSP;X7c$heVR6Se9ymuf7-nhm)*8|nmhrNK-kU?qc^I7VIM)e>a-oGG^Ak+ zS4A8H*)s1EF~>0=gp@QNK)VHoo-KqI$%IMmM3Ixq1|SdiF^Je7E=wP%ScvJ&qV1CQKM{SSMR%F zXS#y`neDe#{Y-l?@HeiWtAUzXKb?j#);fMZP|p3bPAFWFtwco>ifh$z5H_LkD*z>W z2pSA7UgsDI@sD9-ii{Y~G+gA~`j|u%c*qdB=@@?}lhVvzZAB*&TCtFlI!SVhfCrA( z)V}*D%mX#qEv?AN4K(k)TPlPrjG{$Q?y9=a7#ameOA#bN(2P-T3cDRcdgJo>J@R-6 zvQD!sUerX#(PAN!mNAzx*)^ccasi8Up3x4*gn3#{A><<*W7c%mRIFr_4X6uFBXuWh zo5&}TCjzD3b>Mh=(v8!oyOzuKx$07L4P&Y{f-}Y3e*+OOPXpPCad_uaEFPG8Ya{tb z1MMsp4H0H&S1A_P)qqWY9Ros9n)gCo>ye|;bR%EoJxY6S5vB^FKu``vCtZaf2sq#d zvC=IQJV7E-I53b;lw}!>zPal98MXfmf;iV$_?FMY3D#v3u=J@X287IKq3>okpHR$d zETdTMhUo0)l+*8nBE$M|Bbz|a|2zOYxOfRHZFsh&es%mc4;%v1Bi>$bybV0#F?e?M zM9W}k6`!cx0kSFox^~prIare)N}5)oETC1#)R)EcqhZ86*2KMdOlUl>*6K>_Ce2os zfDA2u(L}m2r5vVi@I=i?y~t8m>ZalhCbdyfuH!CF_?7(>In|*&WTrNGINybkIr|ok zhS#+oFZESvL#~Qz2rt+!#ES4&9~Wag(MjUI2d!7zwmLX7*$q)_qZJ?fHM3*NiB$G& zCKu*8W5Pq0(!Cww=4Nb`!B-DP04^fiIB`Y zfc~}ZxCe;d(k&D$s1@vKIx^*NBeLXSQh|mb$mqFNVGwua95%&Mfe)rH=q%%4Lb8H) zy(Nl|?BVkEze7yaS+iL#)1XB?6VK=<58*#T&n8~4Vo8#IAycrbh2YP2pKzm!Hbd_g zJ$3j>(OL&0V&X38Hwt@#wvdwEJ;Q)I*yG%#pz7uplz`NUs%$~1!qO%{@@jr&u2m)R zIi~ywG8OlWHAJC9u@ZAlB&Rn2%!68nky{IoHk;8fNi&5r@HHAZ#_MFXyEjA7&5#i& zAlP-WcOi=catj^wd`-7uiKWurA8=dJq%sFC{(}0PR+sP9D%Rir481f znXXq!H%(H_&9cN6ovGtI2V2OCL_6<1qmjBIZMUlzpc|#=6mGACc+$W`?N1H6J|`+F z>#HUK1chIuq?Ib$KQ)OvY-)KZ<1qSBhT6InMU&AX#f9}Zb5kY{sROW6{&q6gzBcc=x|FwHil0l_-K6Nh) zEdN~+8;{}bYm%;34s)y6P&)W))K#1Vu6tU%z;S7_PDVuCVx+!g*0#m3->sa7S((9% zC0wY@IE)P!$)wd3F7kr5fPDPHr_y zM$R3d_?}Q=C5@|nvy;tgNNC40TVem+JC}V(uWE$TocakvTRImH><|IARQM(?%p#S$ zo2b@Lm3M^0qi6m2j~HS1s&}O?zx1DaSz()$qTo3Tcu*i?5qD-ak8M0-X@28?8gPG7 zeDTB!w)jn(-S?c;t7_Y;P(uBA&y_k&x)#rYCH7XPOD0OWE<9I`(`ga^(u+NtH-e8= zq6&1jK#mZBgmp~#t}C~FR$Ikb2^Xr~L=}o6k7H1ZZqG+t$c_WsgOuZAx6zL!N4ngs zvq&jW$c|e3u6oFfY#Ljk3Oz>5(AC=S%mpr@=on3yO8~wFewv2&WXe4SC$gGsA9p6Nk6IV(|LbIgaqI*vc%U_~%!-6-<{K6mM(6 z6iDHgLJ!e!TSZWhU~rqhibFcZhTDqnQ;fqsVyEyzwSyW5A{u^VZ6u>i!xQsB`v^L1 z9->}5a~|q;4wsxx>{^ac?n(T!<1XtK3gf?>uL|t|=n7x%)3qt8Z6xWe>rMgXM?0%$3lI)ByVp@zN z#~KyboqfkB?enJat57>=vfv6{Dr+_P29Q5r61i+}>wGoa3k}XAwIP+rW$(HlGR+4K zMe2&eyt9TK!X8ny_{sz#R?!@8W`1u52ghkLRDbOFaXiJ{V{^U$hY?v5c!$`gAL*MpHuYoc9@?*)YK^OlUU=RYp2Mp&Xe;plOo&C6=$kci`5l6X-44qS-M1rMdgI%Anm8U3kOOU% zmxS=M|MjBD!6hy#IGJ!We zt>H9i`_CO^xKc@y3PD-Q?`4f~Lu{>o-!Vwultt@q%U?KLnD(S?IRmdj_K>{C606A4ipk|2vvxm}i-2Id=?(=U*rSO&=U;ix{ z%3Q-7s;4AplC?;$pKsrg{dng!bLzoqFW$F~Om(7rqVIsF=)LpF4zCAs=dILP&S@PfW(tAeVJRQ06b4iTPj3L88 zHf?2FLUdsR>NXnRtgr zZR(&B6tjpHjfPdCg}}IZIH&|}9F4{mLM(~wI@@)OrK?Cd%)8np;ufSO0@3%@+6`Om z#YMsi>q;#2vB-46>$~b=F2G4IFrQdme3R4=(_{(IJDyX-)Vd{;AD+IfGuh0SK|U`$ zZ87aVV6^%srS96+f~}(ro+E$C{EzWPdoG?l-V#Ci!KG)D`122->HG)Ne-S(UZ|l_m z;l7)R={I6v=J*XTSXmkAm{>SiSqPYanE=1~YxdvGIUC#WtxT-{cVdVC954KD=z#w& zUSMV7;QY@MF3mqU?FoK2`ig(_fJ-s@cR^vm;9bO_;(OB zHN!iof+XG4Tw7QY85UIB7(IvXh9%cLzrV0|e^3Ut^uNDv`QhnC&lU%FJ_#=mU-11p zH-8RJdilCOY}dY?{P7PxYnp)j>OS7W!}0O`W{FL1q@?8-7MD$!@-pzpfAXU%!}uJ<+TW zUrczsm){oT=piJ<+$>vv27~c`e6fB8Pr}{v;#URf#%sGiJ`aF#TK?3v1uzcf6zM=8 zD!AvFw)7?pAK_3WZ=L*X_@4Nd@7(bHya>A@`amUk1bhK|W#f&R-8NZq@I`of3Cx(6 za~?0UPdw&xL;&!mEwV2>5$#{PZpUCHFPmZ7RfJ#B9CLJFg;v}7H*~rB;jaO7g zKD&eho5z0)S|c-PMZbq4!rSw9&aH_1@8vMy#>bR702EKlFI<8kcM;Jw}2`M9nvv)EQy6fPoP^Nzs@;1|{(mt89s{rTxROdks^(QlY5`vw|{Nqlt;XMq9Y z;#--APo$aU5Tl4CB<9!HY_I#R%gV236`}4=@KSAbc)N)2CudEZy)7%a5K!BweA#xi z#zP}6dL9oxV--B%2Zry=(lYKdp5>E2IQS_(%>o`jO&;HjDdH@1{?-R{tD!Ux2Eo@i zRp9VV!)yFJh@jtY%TSJpKr17r@TQ)U=!E=QjJ(mQ$%I?_0f87M@0e2K@Aq~E<&I#0 zPP6hMs=h{VzF&XM8joA(&>Yej>oI>P;jg4mqM(|A`MjI|6*Q=OW{BB3{~ty7k@c#<%%y_C9Q`gM(s zJprmezCE^7C1($x`LxvqR4R%dp?*vDI1-FdfC?Ol;?d0IOfnW^WYXvNT&Y?QQ|I9YTDLG9GeS4 z-&?A4=~_By6A>{_Qj7#!&?8J@c0Ee(&=|BxIiR0KGiD|^a?2pe9q|ovV`Tgxm3?fc z7$Y>Kl4)5>uVdIGZMu_7;Cq*X24M}XVx~(~?qrOJgV{yLoH%^qr);7h6v)SUvbb&9 z8CnDRySr@r-6!dx2#QpNeR9?hW(ma?*%UO!(&eFQ7>F8STAonpQQ2x7x6fe}M3M`b zT6PK&C&qVK@)b$QF!WwF=1$Qy6dI7R*)^G*aZq1(jR?!bTzq`Jzt=}?hv8LlO+|vA zPELV59Hs7tYRY$s_=NqTj6xmKr;Up0OR@sw)pZ=hx*vh?$==VEh#CM&8t$lSS#$@+ zvZMIu@{y(uV3$PnY^=!fX)(QllcE4qUxrPFWW5=H<&XyM_hSY_q1EiQ5~5c7)BWM9 zw0!kc3;x)X*I$~nsm1^ZwmvWT5@c2aXD%kc%F8VWmB&uE7lUsyF_0x`GNhfu;suSa zU@SL+z->cKxzJ0DN^|8Z9H}25!16;fRxAMs@7BZmD4ERrgz<*0Z;b?DVkjRftv)Bh zibKtJ4s|eV_ywRX*E=dp4HmRm{zM{5kh*(CH#B`Sq(l*@lvSJUE()%UwdawdVtPA@ z_wpz20wom%=KJ!=h{wI@z`JM%(NbScv{=D!Fi?9C!gR<6IuS& zVl}oT@iJ%_9ngp>a$r~^6p)MrwI<<}J)ckCfB-;3Nt^0dl~5v?9h92W+*am2b;HE`dRI5_FM2EG}}5{V?TJL?H|n$ApXx5;M~_Vs zA5TegdL=8BZc&a)5S|Uboltd^flKS=P9^L)p#aQ8% zG-BM4-Ys|fO5*!TCDgjiYm#!$!QWoxgFje$Q-GwMc&3$M9p5?NS(np`8}m;W0UH^p zUzx}Dm5l*y?jlxN{~-gbrBl_01A-1NnKg;$l>L>)rh_%+Vr^HE(reXgx2z@}`^Dg; zJ)TSDC+JRf8umAmU25X7*o)ZFmG1Nk%4$CkMEWn?fQ^mKmn3Hx_yJ^x%LOwujEBG!A9r(&5WU_cd}-uFQB`jU9!Za$I6;9fuCC zbD4?a7Q>9qZkI7)sR*`qsGD=*%_aynV;OZ!uU2dheo2Ko^28uuoQ`7JbuzmOSoz}oPq)`N+Cilw5Ac2l(-pCYp?H1Bw#1{lt)e*&s!ie@QBi@CNqgkL+$z&8^# zCyItW%eQ?s<}n|qC-4 zS~6@$61(T!X)Ok^mfa@57!$k4NM^Qfj_r#eI-TP4hb8~Z=#fZfI85br_JYk){}hUK zX(qu++LfbLna(7}5#(?l)lrf)h zrxCu8dOg9@xwUXrry4tzF?>nI9FtaPvKN>k@boShns7KAuMl8>@fVU!alTADpNJLY z3)Yj5B__btX(+?f^5DyYLN>K$U1iVKNx*DcUMZ`E2|S<|6nyIHsy`Zbvf-JiYfT?y zMb=6={2j>5?h2XRoBz~!`_jk~$R{fIF^m$hZ+g%-c9FW~$)PMf% zFuTG`Aj*q+!c*DVuhCb5j+uJ3no=GoNq^It?uVUgQpy@hpKz3VQT=&C$9`%@IOoRZ z)sAhfY5lJKLi9v&MZBQUJyyrOC8a0RVAkmWd6Bjjz8|_x%=-Li1?`XTMrH`?|)WZZF4DxBcaZd45U zJD#;-0t#7}PU_b!7qyp50+`e$Q57HrougTRV6{!Tq)?`Y8ufUYTib#i>_la#zvu5- z{*)0uToNp{S@RXBKi>PZ{D=R0lX)g9-S(%S%h*AyJM`j02j{JA$gRWRDC~*_tLaLBR<>1Cu$>q%%2lzC zeL>NQ;z59F^5Xra=4%}KK9N~v>#MAR+1jqJxYQ2QxfG^RXe z=^=n7NNV5Tmn<_}r%6z z7_+1|G8^CFcsR3*=TMMzUpk{vgBA$8eY&xkyUn(SI+u;66Ej1aMl)u{s~AaZ4sD-( zeWILwJU;sV5)Zky)Klz+EC}*x>)a~rKIRY@Qf)^|tzWkxNi%<7g>H*xLJ6k^+-4v& z#PXmjc86c^{!2wiJhMg@qt>Sla5I5AcybeF-ffD7?8kXJZP6uI9rATS zDWhzC#>YlCXPW?s+4YusiVX+rNkqhP3DlQd`Zk1IukPvur^1#Uva=Rnw|0|qD9I5h zb!XI#r?^_x0$*R(p*1q!=5EnrFhiCFTxT4R9D5PnIdjV=MtEl@m{D8$lvZA+4#mP) zLi4@Q+t;=nHpVt?P(}U}73@uSqtUKWp5v$%j1S1L6UO!~l;Few-@H(Gt-V0^1S$n$ zS%^IHADb;=JAtX8XEniIFYx@@=^aKC1Nw2q)GmT?L{MrO^)?W%mQe}1WgX41DQ{if z_|7(pwZ4z{#!FPDZ2_@cVzMsYe22ia)Hd&-x3xnlS7tHBBFS6a&uDcY6A6pU zFVkBRC3zyB48)tGwFBmr-xdq$bYlzq*VY>{EGY;4+v5A87!cqxm|wR={UU8~k4BV@ z^+guMq;j-Z=O}1`Z4o?%edlASkb0d-p8aAl>C!GC>6)l6`trqbc`FDaYQ8LkWK&VP zsqs?LJtKK&7Lqy0ct|f}5vDG21I~Cb)K9Zild2f_yNXs2Zx%t!3W|^_vU+0~1husH zqEXRONjZluV$9gNbh_WMckMJZ;vutr?z>#d@peY-YgD~JlKlB;#TtVjrkn62b!%1$ zlGC@`%0`4U;-XQd2oa4$>+)$OP@9Z%+~GxB6$pL5o!_po``MJx8%!V^u0|CuDA9;HWo$?%DyE z6Uo`|O0Ra~l|tR28y-!hxb7_`7$}>d|L=3aNcud9;gTx9B8U5oPTkAHdPJ=zvh!e4m?x_+{;1*h*O7C;vrQ`+vYV zHRO$~OpRTBSy!$`E}jmi1oV=&hUUM>D|Jf~7mHssD-#sGgsG*ug$n^A2OAWk%IS%t$%die0Q2B?9J3Xf)&a48{Y#n-)Ud>8lhLdfVG;>m1}g-^dHV-Mv4)y1n7i zQM<9*A8*djsi>%=BqT8SeA+&-r(eO{?gZ# zr}kC=066oDi^ds9XOF+^b<@$f#LZTJH*6Z31a@cM;c|GKZaNCn>Y zd;X;)KZ;0A426Zk0^BCxfs;a1PbUy$Bs#L$y*-KY@wE5eNNgvd z+U$w0|47NtD4zF}{yV&7Cnj|9e|+Zbs3<7sl`#kwn&=e6%jc`j&d$zQ%3#m^o6~tL zUPyXKI!-%Df6Pd1bKvuHt^0JAcRlUtZT!6Fi%Z=t84*)`e_sTxs0D_j>Sw4YNT_ym4skGN3 zh7lZ@cj;HDd};9fhWjSZ4w3e^g)W$rEA$IxC9>mXp&hBfo(y8umFnOzMT6XW}k9O`0JXcAjc|ow&Oo-{$$SNvj>@Vurbd zj&}VkPKcG1l*i$6srpImr#Jmy)V*g|Q&G1r8bv_`0YRD|z4u+nQs%@0fGE zR=PfeA6twACU)vcJ~eIjIS&a58DJ`ad(Wyd{Pfyk>fC21EGsS+M1elbfk5wy zd5aSBJmjRlLDx5t``K*k_ z0~w;w#evPVGri<)k!?Jnfq7Fd*@QC;6Mn>CN=pZUiGsQHO?JF+Zb<$$KAi|%tqwDdC8E1T6ih>kI_mfuXmoF_1e4mpe$xZ!O4uI zR@@&gejZK8aM+zAuO&b={zcTIb3pt2NplEj$^3(VIK%P40>M;^_bDK;v$i=lHikm^ z6zP$I=DJ3X;?d4u)`vfub%bn_(9qDdwzg`*9)lV}t$tDzrCctvzqt!!E}IAB=~+`P zt*xK5nu3<_N4nCo;TwexozebiB%n#fcD<0GC8v7$&}02}=5r_{SE=#L1G9CO#qjeW zgEkscFvSZIXl@fj?@a9R%%+$UuB;QBN@pz8hw*_gWbWv#`#B7cWBcq9c?|PMo&+p; zm8kr9Vp5LJ&T|FRI;<6!%~2|U2zwtDaRzC+3|yVV;lj|MmFr`% zS$z5Odn(YBBri2+7j~F0A8tdqO(H$A2;89PlIOp8$d)yd?TgJbQh^Rqjxc!S!x3sS z1@acSEy|I_YM^9~w-2j;QVmYYHwB+vyl29gDWjUpI<-+E$QUOkj4W{BR2-I{=Qt4? ziTO{rfh$_z03*ZCCmWyDcGMY;%56iQtXF*3E`k|1R^+Wp6I{6HO_BhwJPnP(`MCr> zj(bGj+0GHeE&vncark~s3Z_^c`yL{X^EM8^5`UkSvth)TQ88j53)dcV4aaw}j&luS zF5OFj(Ha}CeX)NOmseO;#x3Sv=vM(~l7)x-(|U(@`kn*xvwnke^u*3iwHdf!cNB~q zJ6_$Kge|(QcFiw|$0=?8wbRN7mk`uUAOX*Mu#2%$waZKd9m}B1>D(*5j`?i{pWm@j zToj4@lx4nh9V|Ps(jqec;lPV1s{i$J&jCewAD-v4U4i|qwmGDY>phKxdv&63FFN8 z)YR0Z+Oz(zrCo>uQBk)F!z3Q{Sh)p2QR%DTvR!%q47rNBk@Mw|Q0sw+kJ|}yALoBC zcm6vfg#&3+ZLO$1`q&`s?Cv>Vcvy~`bLFg%CdwM zWV}zlwfMTaW@GghY*A-6fGH6dbxYMVTF_xXn0h@jedWKHxB6kMSg7g*m1{kaq5bwP z#pQe5vI3};l@;#5t1c&JY2#1&lVLCl!9Ff$M9mZ8c3xT{mo0taZE)Y9=lY7KO2U0} zhtjmxzID%h`lJbSaZrA_S4~(I;f{q~=WT8LfpokP%FmE|El|Dfl25j=P9ZJ{}Uh!h|>;zGIUauv4J_)#V zGtPE#*#%sq9cs}mb}RY0yCPPVQW(7c_l>3k-LRHd_$SYwAND?U72&a(tuslUP>hJi ztm~yqK;VGChgn5C1V!PAaBRAV6JdV9TB?p4H#ZO@Rpwn}`F~V3>80&P9!goetKak9 z$1!BN^OfpB1jP$08!dI$Yfql7jrPLU66j_0w_Vvx60m>=Y-*@E{Wk668;?Ud#%z-} z4Na)X!55|_?M?2G)FQ+{$@j*5fs}T)gw4uk>!I;8j{%lPJ zu%5KUg@&tc9K#wM)|SQrKWUs9!GfD79Oz;wy}cm>TcC%05Id*6?I;>Fa(~p2uTV3y z2@qTXH0JgVXLlfb0<^4=6pFA5nokabML2*SyU^L9Q0z+qFJwJ&Z?XdK$`Z+?u%{mY z9%jD$XG%8|xN(Hff>S*>al!6mp~1fHGQ+J9oCI)1uow71Rh&RX?_sw$ec z6&H{++@JMiJcoxDA`4j{+~9gKtbIH~tm3S``mSwJ;3>;{dBJngWO!FQ&wgMHneg+8 zx`K5FbrhmmJsoC3cZhdG>-V$kx!hAJI>NxMzW&`9*zh3h=HNjARjM96c0M!@K2 zd@5d6*8UFa!)lkGccU9J62p%(X-4Wk{spLet9xktIvfRe*^YRMnetHV^?wyWcZbwg zIYiEI3aI`NI}x$&A9=!}J|vdnlt1X65W|wG_0?>&(D9@i7?i^&U;{-1t?hR`hywbz zZ?h)Glk1ZyP4P?$ZS|fp4{p6fh{vt>*uyUtn7$g+z4=HEzO4?YI|V1~b*sPZ1-tcv zQM*XhJKDcv^?Dtw@_Gvsz`Y0PwFHqWRlaT9jDu0WLSGlS#@1T_N~Ak+P{`!O3vFGoGbeI)O;6sk!awco9jX0wq#)-iCBe=UFUI z5>m~a5v7YdJ#u!T>m6nlgBNWsG6YTN`tk!BON%>cW;8l$y)*DUU9qkfab3y(|S zEm+mGmULb!9<~|sP+d6K8ctgzYlO#1Xl_0V&)XIN5-|(A1_V^n&sPa~9?Wq-is11s zD@v`r#w33C!$yk)y8TgT_yNqO$BMQudF)<}dk2_Bz-Xg-kl)eZF(|!_GZL9<52-a9Ra0On4?BuhkG!Iur#S8P z@Qq{ZA$l^B@`cNMaD@x9R2<`Q zPSbB#F;A`{1;Mf#&8i-vwIo| zuRr-OD7-Vxx!U|GqE0Xs;D&%A5T$?qyf-jNGSwD&vG@tm=sHBOuc4u#wbg`!*lQ5lz#T&Gxk2EYmyAV{6tFz<_Qu`*1LoBT^0%`Y@RPD1 zqaI<%y<@c}Fg{M#ccTm2Wv6;NwJ1Zx;JkMTv8E|^$SZoIZ4+>PY`03(@*$$OVwRC% zSpsecRB!Xs_vdM6!1P1Q!!G62Qah+PyBWuWmv9TrL3#7xWIhz&SNi*ZpB(c|R@M5j za>SIEJC1Srj24Q9_(-I*0Qz*s7735)-8MFyCX9CdO)p}sQ4WKGxwxANWsZw^rScp8 z^HWn|-Y)hEICq;>Cz|iCXC#h1=NJ88d99s>C{IsMDoRRVdu192aVmDXgnFc2YYT4e zd9>7xtw1aDVce~Rg{v&V2;wBUpKHCNyc(TpVzj8q(|QEd=lt|)Q=}Ffi@l=|Hi;=9 zQ`0WI5YR{$S1f!;c~B{;F|wQ7R6fWCDB!gm4jUXBRBxCz3>79xv8)yZVI7=>s|R}>GfseRjrqD6oec`|TvwT(K&@+G^E29~yALjv>-_J2nG80?tiL`lB+yifa+ zcUvGT$%)ROEa2XBzO+YT&!8MsSLe*>BSgei1EtO-0IORpVVVU$lFc(>GkUX=K8QwI zts?l3JW-SFXA=16u5zr;)32q)@bQV=n{tq{eYkt);YI7&Z&MUnG<6dL_z~Bc?>$*D zd{p5?jg64BY%alsj>qM7#WR8zK{*6Lb4Y9h$90zkOTdO4oE@Ho2a7 zDC&a&3W1GB&SA~0CU*k|?_xYpJU!)OHRG<|$y~MBXc>4Qg||<(`9^)zix#w!Qd1k8 z&FWr%eXNRW!}{VZb8KWb7AK*EO@8{W6tyyYTnsknJNHp#yLm;!u2J2+n*;sbo3YNa04$Z-_FlEl>*9P4Dx zWl{~5Q62n=n;14xz^%5F8O~18o0>tvyZYn7r<*og<#ouSvDuk}-lJ~GgM5Qg-@PbN zzrD)t^&jJXbniC)8XIqZ5t?#Ds)@}Iq2=8+`csK7L{}tqk|oWNh2uQQd;bJb& zcAfvr9VfSeR?j<>K1|&CBj|HD8ppzMPAQF9pg$G!5qLO~y#o6;bjKWreMmsKt#z#T z?{Mtiza9eU432}JD26Nj2k@0I{~P?j=kPcQTdqOE`I_fLA~qVrlR^Pt#DBxGf8nc+ zj9Q|)aXx(2h|3x=u~+)0PN0>t(?J!gZ;J{oXsO*&FVBeSqu>KT^}*%q)$e3f+a&K) z1~R23%zoX%AgoRlU%!qUb+~n{3}{ON($4FH`X7cxI0SDD?w6J&Z`}gkvN302B1T`2ue;#~g4Z4DjIy@{vFJ7;GUO9Uk$>@7P zD?12?Rts5lC8nfE|K%laBPS#)1a66D!u5V^`533vgPlJ3@Vp2bD+S2O>_(W=e3Sd$ zlu*%I5|B(!fXGMShODYMJfkO%aVv>Ah$sh~2N;zuAkw(OOAcxXA#}oL#8`aMVOkv_ zYa{_x_PdBY^^>JgL3Q6zKiFJb>pZ7m10CH&`~iI!b4$PuRLOUCvlM_x!7Insppiwc z8%`Sppw7p3NEEhL^Ce?VyM5NyUNS{YXOmd*7EfN1X+7iSKbN)SjQiSBf z+__8{rJROgYD<-Ua0_$?mWuIh&xZy%i^zkP%*wE>??&YJdi+5jpO?)OJw=5+kT}Hy z-3-fw-C@Uce4Zl!5yh|be16~A?41HL) z&Lrxby(Au!9Nz#bnBl%G70(lqs-Yw#E+{AuxCC&liC2HQ{%k|+`2}}oZvL7x={C&h z{J5Ke1cEZ6Us2c6+AIX>;7r#@!Ifyt1~hYh4_iGq2Qou%jN(QL(gBc~ zpZ^jd@`t!uTj2*YwcxjQ>wWGKR00x3%MBk^cXtfk>;6eqN785Vj;=2PNDuHF1PrQ1 zHm{Lq5GO3R0PGbwEA5Hv&e(R~OB|??!42gKv?F8Mo&;V7;q=~gwfm>P8nzr-SuFt= z6yD+3-eB{#h`z5+z(`N>W%67KmxhLCp%&Bwh$O^3mfjTkV^vpG#r?JYx^!)=vPesU zNElPFDF`4yUuOap+^>Sk$qEym(@1KeU*Ln%Tqpj&-75LZPvj}HU$uuJeVjakQ?lk5Oo9UotUz=NmXCJm@@D)n9xz$^Pt%u zz1f~|mGZmq*E$^FVWU75evwL@p7LAzw zOGn4mB^VlFG{!1IL@WMYP)bhc?{Et91jJRx=rV+@)HF2*sK9{{T@eJV1e6_QOgi;9>ys zWm}0iM&YfP0P78N4K12WJ-zJvv51rEmj+dqRsi$#tOy>78O*qHQQ3xN|K7s8Y`*cc z@jh@D3qV%Wss)lIoXUZXrY|2Bs%4uHOPMV9?U1B>Wi%`4vX&rx3PhEppS11n!e2Xg zN0VWu<=|`>z`hj}@WtY+Z3G-_x9HZ~p$Ky54HRg2ehLVT6}RW(qG&xwN2$~b_1!#_Il!$BPt=8-MCREva+zqiY2>N)M zAPlENt8KHZ(^QMh_$ zQRFcbu;n&G<6D^-aEEdzM>qEKZ9F1{r%!K?)7q*us>Z1^=TLHa-w38A%h$nU?YzTD z-B0*D4s`D>HFHpGY%F0)PWea5S3T$p8LL)=KccsB-yeB}+xXhwD~GNcSa5(XHtoAU zk-p0mM5=J-|Ba7hz9?k@h~dOekuy4y40xs~0TPV6-rkW-xqcArN;@x?2NWf2@&Ov$ zFti<5EbkMHv$L}k6UVA;w63GpRoQ`J$vl@t zf?K?9iyG?Gu=$}aIHo8(sPCg`TLe0>I^@7DE1Us&^Nx7iSs)}HGF}=nkcl3lKpXjF z|1EL+D?s&^h(?%ITDrNUL@ilz3b!>}x3(t7#ii(AZ)mBi+#Mz6Ag++M^ z`U#IfS69qD6OUl0s>&L9Fi%UUt=%ni(v#<9V%$>RirFbMIJL5B@cI?R;juS0IsCz7 zoY~D|{|T#d*>}54Y0u%-i-pi&mAl+z<>lD~7Epr2Q7ljovM>GpFF!EsfCd4bZ}cJj z@Exw&2A(QpLHyRV(S@%ihRxl5Q#tApA=TlW_p1frDIiM}$?vfz#oQTEt`zB1logXH z9SAEc3SuOzSaJvkhISZjZzpJFY&rjcrYscw?WWELu`^3Y4d{dB``E`)lBW!V;xn~{ z2*gD|?Lg0PcXZ}&xWi=fyV9a86ztC*SzMLn@He=a1X!Ut0F9LG?ez~KejIRSo8C6- zy4hx2U-8i4n+X)TCx*zvX&1?5N*>1)tG{1p(Z(nKam%;MUL_ zlUtJH{0LT8_x|aIY@WpC=m3#;Apfc3CwCBV z`^p;&gT1}4i4{(9@2F+4m#b9_HPNB>Y3ponF|xRn)04`$OJOIN3xFryjhxi&|2b#-O*OBrXz z%Y?#g=-3VCyK@5%flj=Rj06+fqVw2EH;9W~es$T*^o*tvDa^^q0d%@W!lZDvziq5F zQ$%x^NWy7RSMF#}w>eI}`P`96YiwVVgOQOu!~q@CPtM87X>WH$f+K(xld2*Vnh%kgT5^ zI$M7jr|CsaM=vqO!1rOC_Lc59KZ?gNxqf$$t;$d=~ z41wa-g2+%mCN~{WAL3#Mk?Z1bOaI6F^qs$2-%u3o;(G|3Tl*%_*`fS99M)n=*UG9~ zj{jpHi2LsDBt(b=GplhWG7HYs%ILsi(6l=srfi{UCjO|V7scAoOMG7Zoooa}`7B23 z>DKBoiAlOIGoPqG+1JhK1;2X5T79{6AnPmFdC#_lwrkGJd5l~$yRGHDL0ehm3xCsv zrlM@qjEp&((~ZneOo6)v1=5xSAn|So8s(?+KptCAU8cn0*R-?=r)jW!eKB5f5&bV2 z$kg&gxsmQ;&=2CLcbM{wS z(%&1BDR*8IGFGi*yuh<6KmHJyqu+)A&x60M{DbRdCda_6@^disSqD40n9F8cJQo?D7T~xeS>9hJRm0DcHBjKP#8<9;P=v) zZwba}cTgN8z_nY+6D^xF@jYM$yH^Wn{AP zunny*&1T*Awr&Gg@ae(omTmJ2GU@PhtyB^Hfd8y;;q{(3!WmQ6#crDB?(%!gf~PQ` z#(pE5RD7|f<@dgWddqJP8`ukO*EYd@eMZ-kOO=~nC(da(a&MZuOB=c0C%bBw7Z9H? zvFhZ#w;_re!B%AJA1aSGr`STxpY9#{Y*;FB=ZrN};wmC4pj^rd!QlNvRQP$o5VkXN zYVo|kH%zB90fSRTcZ=HMf&i19nv3zLJhp_^VJv8Cpuh0UaRDg6EHAJ5R_A~ok^?irfnJy0K_{ln@o4s`PvQr`0b0nEjKRr5ToI5?7J^(rwgPS?br`?A~?x@r6VTF=SJ^zFpUmM~c4@q(`y9EbV2j?iO) z7*#w4o==RF_eY)l{1h0!XVBvHT-2p%E|8o9TUk;fWL-~kkTx>%`1^N@qeh>LgZpB8 zQi!+>ha@b(qWzCQQkAa&Bi^bY~|6u-p8i$kBO6 z2}kbkkOl$suTp+7Wjog0IQ0$))-J4$@P?g4ZZHCQmv)1Ly-8TWBMJ zg*QX)+|%0WqisrFuZ0Tc$JRM+kL2a})HsGNkR(N6$#=T3^mgb`tTj3`TjZzr7A>L%oVUm|0|5rbTB< zxpw_v)!aWk{@e7~(+uW({0kE%rF{ynm6DLXC6Nm*fPGB$&{ z3QoVUSZpIHgz=C_c*CCof|EBZwm5Pmf_@a2~gD8dYC0qC*XbPYajz( zrlzOsfpL{BY-k2~ISt~d$JUmX-&>ax`rh^c+tVhV)9q`$46=Y)YFfUw)KT=%eLk9? zHIU$mI|>bbTVc#|JLQ&X9%sVry^9B5BpbAF1DymSC+WJMBg*W`7GD6njdlrkTMb#Av=h zL+0n9-|)>GZp9Sy{>QuaQkNCKp2F{c*m^NxDd91R&9x$;In@58yM;Uyw&+Q^CvU+R%^)vf-pScfn=D0??A!5u zw#A!cUrT}A5)D0jEj{xpgeZLGyW+5@-AFc@h=c?y>~U7*R7h>PS3ht{KByCJ_A9LR@o2B-UzQK^+>`fXq-YmIR(-CXg__^Hof>L z#k?S@T#IlWidR%`DBfp~?-BF{oR9y}r?KhHU3FORUnQ9n>Gy+seOVus+pEARBB-bC z6%lgWFCvC4pR{6-0U2)sG1utlE9cMFZBE=k8VW+uU zy868f)`J7j%)$`sTL~O)1MK&6Xg;L4D$o&vR7}aOvY)H^d;wz z`@U_$E;+G!^NoFy=1|^FBYp3ylbemrAA|`BC6=5|D9tYp?DveF{2p{Jv?{MIV3DiX z5yfcNFcB`ArzasMOfe#-1Ja>InwSXFa0T(F3kk%!{`$%c7ZQUvV$F>y^5ah{3A|;7 zs$wT?AUo?nSal1O`Rgt5{X}&XoXMFk2C^_UmK4WvW4F!%-r-eS|52767CeAGY9tN_ zCqZi+*8OdFK#}8R-;~R_o6K-w-o69XNFjr4#Lta<&1e_K9`B7p6)5 zlidRCj2s@mOmSTSM)gjRHS`>|FceD{_DJ}PSVvT#FR89=@6VDJ|Meu{+VWzW=PSJl z48S~LYEVMwb^r9yUQDwQMYUj9NyS#?{>D%GAD?T^Uf*F68VGjwMeAOT^=WW-8VHJU zZ@Sp(k_b(~8@!C(1{8Wwr)$JId>9Duk!LPmy7id|zm zyR%Zn^fFw*;@k@Z!9O0Ih)^|_T;Tu&V*x?s-bZ?J))aV~hZ9|0jbrcB1dk_;M~JyH zEe;*sHGEmj)PkHL9zziY=US>8`>%bXo?Sf1>s@MF8$UT|wU=b%8tP`dx^=#jpU!gA z<03C0;`^Mbe#62+gK7GCz5DLlj8DUP%2W>@ib_Zze%%?k@*JRcS==T;cJ*U7)LHl* zy{D%hYH{LZ?)ROAv@7ZB^n z{|O?0! zmPGJ(kgA`Anl;iFs%S4_zGB#f0E)U{q9LakkBC8TFu}th0@zbuE8%dtB{R z3n3tpDTd&r47$CyaW8G7Pp zQ3%&sG}1r+4S!N7Yd5m_%78Y>a8qGw8oLvB%bT}n0Gq5u->V|A(D%o`*V#7IK5}p` zW;gg`?%M3b_Imqf{?C_KB(k*l;c~FgiT`}l$aig<+&&KNLMyeR{-T}v=8LsnZCz`R z-&m@fuDGEg?U*a@otMwLL&vL&m(8^NGQ3Z-;%rPHa#3w6vYK*$w__3@Q48N^Q0TIC z#L#(e0gLKmGZr|qGdADOGE4&AuKoERJnbUv?OhUj8Kta*c*oZ-dh`B-6lDPwezYue zNMgrt+@K^ooSmLNIduuC6}~$@vCHHWBGE zKfqkb3>E}VJek?6`b-qdW+1x(+rCs&?e(Y(OeFKMe|*mjF>4%?mX`csl|82zhgy#8 zpgAdL;cQqVMd{t!0;_x*s75^^B`m+}!AACk9PS@ZI`dn8z6;=+@Gi)8Fj6f4L3l`r z)=>V416Mq?x8K5fvn2jAw8-9PWXxy|MpZP-6P+zN{3-b|OBU=NpZB%J*Hi4#fbCvh zZ;a=7Vj#b-xj%W0dClk$Lc|u0~#n5gDV>BDmjRIgoxF*Qs8u*%UCzs?!+^5RCIHOYMPy1SH8| z{*H+2YrlGJr5-33Kfk7?rug;T!ePKgb6uTyeZq@7$0PuTD1*T~r78G7Ry`ciE8y7y z6$5GqoC;6$%Jhc;l_nF6fq{V&#}`sE8#-AZQE6#k0DgP$%mtwL>lb`>0oGFv5XjbN z?`PR^2W{VaM;OuYIrmA4ik8;lKXHBR<WxmZ=))BEfU`V?q~zo- z;kbnxw*~t2iEokW33@8>nKZer^~8B)rllF`>aKg-yvtcet?o-2%5DK`jXlTv(G}L2tb!U5%l%-&1o<(bt0Y~ z*v{itsNfC1zhvXCH;HEl(!>xyu=Q={%Ko~rBbu8QgTMd`% zUrRS-R|uo>0(ish}D5^mT+wLbCYha_+LE2vmLI6quyMNh)9?kV%wrJtYh%yB0tF4Xb@ z0$Q!^`!ixK&GzFZK;v*0%`{W9<8@AIf2x5i3wp_OaBx&8Cj!9;#qSKT-`Ih9Y))-0 zMXCQP?c(JH9A{!=gl;|bhZO{aEic4~5^k&%Zxv$Ze_xX_FmSO2Dbbl=G9~s8=k`hd zZuNo$oThuueF(j7e%Jmvl-`H4>28$P$_HW4{vjoC!qr_S*3Z;qi!=Z)K9BuH0=?`A zC+FzfE!Bfzcl6-g{QS{s+^YzZA|7 z0q*G)!{y~_3`@GoBI9pgczIwfTuP@jw$Qh7!yAoE5iO_9b$y zcE`l%lVMc;d2S#!?{!AM)}8Ph9yZ*pm?D24K|$a zOaFd%7(3i`O`2^Z{rL`4Tl+H%SW=GM9U@p8VsYI~B!XujODW)Y*1IDjZ2Gw*k8oJ#G2Xc@GSRG6_IT45nw7 z%ivze_x$XyYiHHoa&pg*ea5VpEdV=C;%ec`Ol@apZkjtRK@GtXI|RUMD{+0zu+3lb z5+9&V226=vFr3qiOPH{!?7uZM?JzkfJUNAymzVPwwi5uOQmIbl3Ro?PI~&zHX9M1o zf{TtC;6rhNmMKW-mOwGMr?*#CRrP~3#fO-7u?Ktln(FGi)76{6as^ZWb6}P9py!hr zTnC932gik~DvhD`D6{&LCr)Js)eltDL~Pp6wyWJ~iRmPZOG;k9EiJK61-NHmU4n%a zw!*lvE_XSBMr1?30#XO{$8Mvtqhd%nAkDF=9iy?G<@e9UWCYVkd@>W0UA*%GIg9v7GI8)0uwf4P; z5}2#UtF+n*3)$GLIK>UxT5=k%$hh%BNhByJog@KP@0|~}(rAwk2e2oPrA8wER3=PU zyY?GxBf&!F`ey@LWUX3vj;ZN&;6Jl*5t2Vhc0hq0T|+#?8ZiO3s1Uf=>@>V!y}GRI zt3LTcN(j`&CEIgR)?9)aY}|5Rom~Uvsy+Ifv&y(6yb3dnTm+y){hmPf^YI&d z?4SnE6=3c%Zh-}u0SjKQujFB`4GhAOZEdW;%7{A1B8#Ja;KXkPA*Bq=2M?$sz^=%Z zwOIe@N=_c+zr}tJpvPEYe|Bm27RTI#_#DZwnNQr`YG^pbu3|$tuyi}1O4(_Esux;$ zj>){ff=e<{qq6}*(+6cK+1laVkeoC0jT@QuN!Sc9G|IgQT*#P*j=`OenyQy*c z`7^V(AVc3m^=1bl*q`I)`D1BhZrxC`Cxll^mF7zw)M0niV;8{^03hTBUr*h4r?dbe z2mp^MI^ElQHrJOmq7c_H{@J|LC4ahLwRk`K#~pUS&LLsS1;EikDP&OAE@Lhh!KSA4 znkUTeVsYH6WO9&HWkAUBHpxCl!mFIJqfEZT6`M5Q`+3` zc3ysa4dee(LPp(WoA3G#Fh)!q0*~}W*7CX^*T^%mRy-Jf%IqUwO?JQLwm@le=E6=V zd?jKGbfPe`cgf>KmfoYlM#c7w7udf-ry%~i_vtSk0SVJ$c0rHNGZ7}g@?U(S&@o@j z(dX>`ZGS?!#`8Iv7axsy_MuFZ7R{4{SdWF5sIjZ+;`)LUTDhDV&&`gLMY}p$(!A8q ziNC9*m3&>|tA&<@D{eJYG+Nv?0FU!4G*~8Iajc~9dI4RdV$AZJx6gJh}pX=x@ zQZ$LGF>yq!L=e@q=icA+rj<+yq?EqNH8k1hnrLA1xAl~!84!O~u_XiVQd2m6V`GNq z)vZ&Ya|(Ol*3CE9Kp;ou7c*OV&5MHkq^eep zs*HUY;B-#*VKdXtnFD;9GfrV4hG$MUu6W;!50_fHC}bpY1akQ}v^FCpj2SUX+pzO+ zW@|KUAt|GJan`j_Qx$xktr{WkbOQ}69(i=QymFT{s3EKo?>hOZG!GqkYj&r5rvmd^ z03Tf%ZB<6|au%%ne6AK=Qdyiwy$8{ZiPq^QTE+S}YUJAA*-;9%g97fuG`r5CHC%sT?o`0z zl`JkE^dR_w8o>>@%Uo7x($m?XvZcwPx(}YSzs+r_Cmky{%et z?IQ;FQh&>oF;3LI)K96#n%m(9(hm}D>B}9$4BJq7DJWNH$RKmaK?tG!PZw;ycjNrH zTh_Tv9Xwa!Zs~T1sC_+?@xMK0OP4C@KxuqPyP)1}t>lI=0a{wV(#q5z3E6~^uH7cs zS+3RGS3f!n03m|huR6hz`!M1X)KB+(3IW9VRTL@_glWe9n7dB=77z33(VV!uY5zp# zm?{!;Y&PDa=B@XKU~7Uc@9%1l|DEfJ!UV45XPKCI{?8dh#039cf$6`P-Wd>(0`#MV z04*mWF;PH5N=ialf)QAy#V;-;Bn*5hF2ye*C?X;B-yYdv4k!<1G5h6TZLjD`zphk65-OJ~j=vlSQxGx5zF#H&sZyaIE-GqS`2FXPI-6|hxrPz ziwx^nS#P=WB7A6Fo!hfeU>{{X zlB+2+W=7;Fy?fu*lmsy%+8;hH(5T6AJ$c@FMcIAFV5bN#k26rym(yR7z~Sxm_vsUS z>TJ>-s`rd_1I)yp=BiyD{YoIP?j85`c)RY4o>x6{k=4~=9tGr&QY&J$>n;nKL?Z|A zBwt$IRY-z-o}_%1B%t9!c8{C0ozW)#^#lDENu?q#io7q^9<~kT_IvF#z==#6)|HC6 zkD~p9`vQZ$hFwd_wXdCGJ$i2c;=I-^*RH_~7i^gjPt-YxjK|DLTitbg@wz#!^9AX% zuxbrcz5p78WSjfDUSZFO#h#BhUGP`Mn>*;csHroQL^TOtPCc}GjCrr4YkAgj_jwnC zr&9Nqf+rJdmp=29LfA@1HCja~Ze{n6Qi1EQEUdQOthRU}K1torB~stDg#uS^aC??_*~@@ zj(zlt88g00dknkV#V9;2vh_3N{)x*3%^BThpJ04am(I3YTh&Nk%&X!QBahVwBhrCb zvxTVAOd9@B=ih2eR~xL^8#0QsQDl`uDbs8LzJOHNV?ow0wUyHN(>qmBp>PeQXDOf) zLfff$#Yo+#Rj%6`6N2(>JH)L}O1U8t2^PuWxsJ~r^ER>*qKv7@$7Iv!yIRKFmQB&} zPJX0DUzCl%S=YW~x@%(PKQ6eny;htY8*;BmTRVcV%u9c5%Y%77eqp5aM)hgi(xX!y zc$DTOF-LU6$XwcsMI!#qheV|vk9hK$V)!Z)q%!@o_;^(6my4EfH#{q>!{gwAIUn?Y zZSoo9#S=Uwx^m1E!pKF8f3x_Vi07^6sy2}bE|}J;kG0*<8WdsWMoBTs8|(=fKT!HJ zaQj_qrZ3K;Md@|^#7ma;nDPNUDU}76jK>;ldi{k}V>P>a%*B-~_?cWiPUDuw@6w*V z+{~zbp`)wwsPJd0P+D82y3Mj=`<~@7NL;YWuhq~^zb=-6Tb&zK5AVFYz1bp6hgZ%eaz^<5TjfxbhwDRA{4CZLEL+L4y6`R2 z4}P?-*h4&YTS}luzfp6KCh(k$*;O{EdG!mTXNYN^Q|m9otJ zTXX9$$rU<}7en3_@2m5g;JdwC5`J2%O{n*@8-@(uA9`cu>=#+5N31kTbmLdzy1n(| z%G#X?*%8~pQF5DTCD=~kC#uWP>ZSOaTizFWXzS1SAL>=6R%}Rv^d{(;I0y%1Z>Rmd z>Ef@9P@4qZ=lS*Ev&|a|wTY~kW+8ePAE2z)_Z+AC*&0xn#1q;fcYeP2P2rV23Dg>M z?xJ=mJVdRLsPGc87iMoJ-RMbKlH+>&{y5 z{AjwH>N<6*YVZB+Z||zggx28yVGvXB9r=yBEs4&OodOB^C4vSqqDC1CuguSnSnX{< zjx!Bn&VN&$ud=}x8uV&`GU)gRe@>(5dJ-mcdXEwIiePogVuD84%7I+m@;XL7CP7H9 zzH4aO7^p!5`$e`;*s`Yqm4+B^*$&X;qmDE7JP3esP5%OwO$}x z4EW+-9#yE+(-b>5<<-$dM(+*LWz9HuI^(%a>%~dbP-3W&>}CwtmJ-MUlFRs>#4$DyZoH>NJ3A|iA)!cGP!5PtG{1i>DR zu*;x|6W`XTMl2nGcdGFnK+^V9Ow4#?7 zEI8S9BD=o6uY%53JKf41bXLIysIO4jS#Q_tqDjFq`y(lPteWvMhIbo3U!ke&-J4aT zvaT3klNI~VE<9%i{n!k^W5Eznn&Vt%p)Xu4lvH209h?55&e=*?v0A74SuW_cZ*W{~ zzApC|)6oOP-u-ekb6`BP&Yez(uRj+D=2aW3MG_@xkSQ7YAgv)So>jv)TsDq%GwgtT z(27*zH87bQuZVL#?{U%84t{=tV?`(S7e$tbY>Aui^ z+t`{J)cc38>!^2P7^fgrx9V~?3UQL0O1)jLne?3?H@4u3g>IQVEJb^(7Rk-A9FAUJ zAjCb#kt)HEmEfJiK(?BL=EVH22XnBetFoEQm}0DWwi~_Pomr%Q?Qje}wVi5S+pKIx zlen6%HlDU^}oh0pBs!x1oKPF31M_qNheg=3s6KYrb&YTnmm0*(v zfRY;WH*!;h_0rQ{pVo8fbk?rV;+?r^DTfionp*A^2=!=;-GoK0M>>~N0Z%BvJv`-$M!dC|7h z7#iP;PaM1VcZmx*PMl|K@k-sxf^MQhXfJzrhL#*=8rhoAHtu5Z-h0Hpj__HcoJ#rv zzgSS~qrT56`7Y;j{nZKwUpYk@_rrVZc@}ASV~&E@)Z{N}A#(W#u|M>;3A9}k20yRm zSd)m>CjQdVF5=`&dNNk{MakF_mcv#Dj+!9x zd$|v8u!cujnF>b3m6CKi9IroYWLkzbjf_3o#+OK6u<8B1QuO#`JoO*{RuHSnl|63@V5E36J0jGY==yzYBW0-~~;#Vu6Bss>;R zw)2ccDvtzv%y=0nA`@74aS?}?i3(Un&gCce7X(10yXtm1`M!+rdtvJyK1Rj>)Ba1Z zE+O`NPjxR5Uq$WcGRYSmd&>r()7HQ*Q7lckS>2jmYBs% zS8;37bZH-}{&&22bmDK^H$q4+)0`A^*x!R!_QoFf9?DBP9+2Q;q?G(*&*p}KdWru+Sy<@W5YcVy)Hy2|@}BDMrkJ;um_^?R&RA&WToZV3}f&PWdl zdu7kBbLG#`Sk5A)w;nbmn$;E`9eD*1U9vQnyE1r=de@KcUMF_^!v94|*fI6x>hYP4 zF=6q4!y0n`Z^qa(rKa(XNy zVi?ZPlTBDEh|rOO&F2P2BMw;fDt@Cmk%tB^!H~vAKrxAdF@Yi-NT1mfQ2g3`eSC;Y zJW*PFhIy48hU$l;lvP`%OYQ^6qz)5Zd6l&%9JDucez+Xl0{dG2U$3m6p0=Jgqxf!| zPes0z)|byN%vmtjwO{AN(J9o#ZS)O{m#o(fn6=(FmW}LOE!vSQM8~x zf16ts5A6=uG+m`fBe3}j?f+OvC2J=8Tb(zE86y9w<+}L9f8O!)2oo;}L7SJ#;p1Hk z_Gt@si`D(D-SP4=uqb;f?eTE=a)Rvf9CZx1QDsUw8L&$wT;v;eg!B4|A0EfHjt*{> z6I!OSx=`{8??k+oJEDLaKJZn})zK!I@Iw~ryT?1vNn~wGVMq(P(&};mmTxFf4lafvNnOhT-j&qH{rSk50jNP@X)(>~`m&|yv0rL#%{T3= z(?^FLVdHP5Y&d14oWZ3&cj8$m+F3PkVq`e*z1Af@>pWMW;8F^s2nR_j(UF*oQBSK>I^oy+g|m@q4T6KiCrSk84^P*T<6K+S zBuNdDLw)s7$npD&}e^fLw3Ix?MGgtcH{2r`1RVxj}T-Sr!j zFU$l9Z^GZ%^L0Wlh$g7%c1k2k#ZMXoi>SF1wod+4q4?)=9g{pw%?&(XLK~kH3WR%V z?vI7lK@G0f^4&+NSejWWJCXN*X8A$lmZC5`v~6Lm`@J~r$4mSb%XyEhlLFgB$IT6M z$GTtT?T={b(B4e;*qMSCg-O>b*}tGH%j-*o4ddH1o?x9Biuug0|WI zm%<0CHB72Cd}hL=lQn&D#GcS}iqj#_#<7wO(P3nuIk|y&*Ua2|#<9$MHAEq{pXSw< z;+M(`cpi7Cj45=$J9s|F4~gnfhD1h3>z$G!&33Cy^jr^(>xZA0s|VGzy%!Fny;F>I z>8nS6`pHwzcXM5h@F+|G0RrQ7kLwqz?6eq}v5l(+xXi=>!Xq;wMq1|7y^?hlM=Pjc zIdw7Kdw)Yf04Si&Xab*`A7Q9T+VczMobgFq52}gpxJ>iw1s+f!@xMBlKN5&s@Qz66 zwOgIbi4do9cEumkPe?=}_KLqTzsUK#t>0hh`l+KCb#E0VyMWL57fm=I0h!OZL*{i7 z9YVUqV$DD8&7-{M{~b99ixE5G~5qNDTK zp^QMpl-1&~SjGKd7!v4v6#t+`ay!jYe~W(Q@a?QlyRL6)7`eTx$#4KkQ39M=dKQGj zr@ylER3XljjGX3mBd;sI?AF;V$eFz!#7_{qK^`R#xm37$ug!)~$uBri5IAc=CNlVQ zI*j3iF?K`>_=@_&o*+yY4$eD|;vcdC&mKY@I)r@U%^nhG0o5}`3H)@u-1B3!vkkrS z@Vx_6FJQgSbPIa!uG?_Z%Oj9X8I?fBx{5AcQP5U0fP8l)5X%#&Aj&0IkOmf`?wfq$ z@nnZny;ml(pd*}mxoCB@5i?k$KF93rA!RQyw27)|$L%y=Mn@(_HVNP>QC-)h7n;#v zP?m`pr>xxFi}s+zE;b0VpZmx-SsHwKTVW}Po4L^(F4k$G^LO!D5SD+?7mR%R3LKYPbX>Hi&$h z=HPUbL^@#OHwb)4nZJ7^ z2*11EZuJ-RAv;e!oyEWcyK;MKPwSaJ(o?B_`5NwKetf7uKOFvC)?Io0W&O4zfH|#O z`>8v{{%dbC(Tos=td^dBmNWu29Esen{_;Ya?|RZbEfF~UfU>x-U1Zi>_qSpe?)1-{ zi|BALNL6F2ahNjnSpXDhN%#CNVG%b*S(&9}ne1$YCm_La0!|?%gDQ3^)QC(FHIo`{ zmCbWw)lxQuWD@DJ$NQUK_hrK9W@gU}KrXKs_04gvleg~A%Tb$jJ?eF5cZ%yD`@Sn)qdJj3f!TV|3 zljeh<@?Gl9JnH9onZFrVnYa7OUQYHqE@ztH9N{g)(!j+Q>r#bRx{L)6ZY?JlhUIE7 zu4}zy*zXrTeKrdZRu@IlA;qc`gq%wJkUBt5-xdOo<2+;lO&6+7s$|v~Dz$tv;4;bJ z1O?IQmhhILEb3LSWXLPeM@m>6%7dYE+OP74pIaEevvdc3$LB{Z-M5L0Q|P&PfW(iO zCD5mt;FJ?QRDwjo!9_hyM$g+qXdo;K9(D0z=fORTlnex_VV&hlAE_gsP@Z0PW~iP> z&a=VwM01b=i`m=Qwa2r)EjS(*uNjKN*48nYW(iHa-kc2vLYzE}#<pj%wMBS|>lRCu3uT(XaBdTXfO#q=?(nK?Eoi1xaG_>L}q-6ixQT1v_} z@ge35ZnqMMKwAX>d+j|!t0QRoD(q36+voB^L6Z$<+dKHKs~G+=bA*pQB;?Wni9KZh zjsEq6v}!7IqWD8wLybTbI54q&M8MbDI}&wBk(pVBPzRb$54piJx2#aDq%wFC9p_Q} zGd7w9A(SUQ8XBsncjphBGw`opEbE@P&X2=W{fph@yWPv@*UPsnUYEmHF=KFW(xvqq z-Ii{4wbe@~oADZjV$(WFc2dCnF!l#*@Aa`N5$E2I1;-2V-kaX*uJ@nks@eYf5}qo1 z#rMvC`aN3QV{w{qY+H;`gx#+V@P_&hRTtulJ?9Gk^w$EXzxOJOkDHb^!t&h@D>YaZ zbYP{y3y%+11-7wtrg@7C{{HvIAS)h=$yf&u8Bhz26BdvM<7n)2=mU^Rv6)qfH$a{p92uXcvY zYDqWL($Je2sPXltOFrzLy)smdO>=K)TvL^Ckyn6s7>}s04x@H=R69`g=%OeN?ImNI z5l9{H?c~s*F{rC$s0ntL{-At6WluR^K0j^gPj`3_y}zU0-xBw0|H2D3jo;A$<~U;EwQnTtQHaYfSK8BGi}b(n|H;*F3S01hu`yT&E)ThUj#u-+2C&&W&&ne7ETr)I z6hvL#>j1JHf&)XMu*uJ)Fks#orZ64xE_=2?-hQdtBJ8?)ulNJ}W@-ZB{!p>|jmXC3 zLmXs{s7g77b{B%2baURY-jOJVi+v6TNmvva(~-x$WEODgl9gx10lZv57qo?fHG0?) zrquP22*Y4Qiqag}ix)q@Vj)56%5_3V=@?};b%95Qv7MVd4^!9e%9lQrmK5IThl*L> zu!355SNTN0HL=Ws8a`FJ0ND92#HZNqd3TV0yl4cXx`=xgwqpS5^?Bct2z~fz)!vWX zG?V0!lc(_A2o%F<2^bLM<@}7AM7H!i{8=Gi^;$ScV7CN!dvZd3gphADyKj;QXr2v$ z6;@R&k#44=cY|ynixu`rw>PEz3@{%(D8_#Ndo)?5eD5%xea6m|hzx0B=Q%0H>!f&K{!e^j4v?0*+lRP{{Y#w!^lfXW9tlnA{r#1QyQZDU6KxtzK4_#ISee6e9k`>!uIz)*fEyQh{4e7fSD#F2DpV)o4 zIDR;cNTXBTZfILJ0`2fO2&v&w-Wsc7+rNN^^Zq2C(TsBF@NtRF*WbwPvxAP;MgKhA z;QH|tG3URiBvg%J4`8|RW|HUFgL$e6guYt3pyhL(F>bev}{Ubf-f#1`ZS4I)=Cc^9WF z4kHJ9rMS3Vi`*C8lx=@Vm~N{3!afPo2D`v)VOpE-7 zo*S7Q(C6~_O~iZSoEE>S`7rpXyFa+TsE=18z=IOYP2vF)zH#2fYi5v{rlKzcCa5R{ zz*F%Oz2azWfK8h+ao&e|WB3MT>K_Wkg|qwa*=0s7<(e#oX}UVhHS0Dr<4Dc6apA05 zoH0q`e}N_#*m&w(JuJ1W&|>hIT@X5ZfXp--h}(b4M-)4uaD0<*DD4@=hjQQc zhCK7b#SV(NXb^zs*f9-3sB|Pf6px6^kl&}McTCSheWnT*&7hGQ(zC)uL-Lr+p$ru6 z^~}e-;g1f@))Rs=_So>|&S`|4-z7mhWK#reYitN5$n}*!c}LbrjX^7LM~>e&_- zT7JULlkHwq_U+KOFF&!-ik{F6G#57aEtydOIA&@3#S=uA@J>Tmi^1sDnWfDmT9y%* zlpqt~gP1rqgg+~omiWyG!Suj36hzQIMHrJvFZynBr~0YofY2EjNEIF;1_KEK5oU2dJ}OfF++NcIhu|f z!6f496&+7ND_8V*Mdx_McdTCgK z1NvWcGm+OSx>ic^BcB-Rr47%*?o=8q=^&B)z_Hi^Uln8b4Zb?B*tx$lB!E981X|Q; z{nZZl$l&PmiGW}M8Gz*Qy(;nlPp$O-{_LHn|6h6H;P&Z>{o+pgCm48C#rU5<(e#2^ z%2b*~z1G{3Q7uxh!6)AeEd-zzxebn5W-S>u{I8!!F+bhXne-pH|C%KOzzOPa5FBZu zt^ubU4sKZdJD4Hgo}7rVO5Om(N7B=PWN!@b@pQM=N~~}~)8s@1KJae>5uthVau(oQ z@4P3@3k`kr6!{~2h=t=*B}?z=pK!1CGj(o}?OBO!@~1EV=NEqK@Z;st=3`1IjPP@n zLoL9@od5NTkI)7BJLyfeo=2PhllYIx^9P4nf< zQt!J)JG&Z))TG=Hvn=7g)zMhrWk7Ywa*7W?RzBXQy94~w!pOYNycSt;#O;%3iqDU; zS%j?npPf8SCfo$=c_x6D-B`OU@$w>_oTL(hNV=S_H_JkQ^E3$wy9+Nc17x^f#eX7b zrvo-BG!ra+ZEm)IdBu>x7v4pXJ#JUr3~ldE$dn>qi96ptEWP7S4l!iQv$!u)hgt*t zQ=^@$hqZGY^CV}tr^lQe^yucwb6x89Ls?8n@f%`JC1J?cUc~)VI}_fpL0I4)_jUm8 zxqh63z^4}hHwle63Dj~&&*mGm2>3A8kGpd7wLiV$0&Z-eksh+=;Dn#LIcOIh$_qB> zj}y8=D~2Ed!QAy^-3$QsY~2j!u~0RN_N{TrV(44t*86P6GDFp1pfQ zSOk#lJp4@pABY6zC-a*=WP1T~{Rekxse7kNkS~gy zvGwD1q6V^Xc^HFS=1P6NrX-ex=6XU~y>N!p0S@C%_fY(8DgfmNp1e5gzYOWB*QCr6 z%!A`x=woO=A7W>*&tvIB8O`ArBO5LVPa2z6Q?Mc>GIb^Zv6!&{`UwJ&Cu@H|j78lG zYTuz9`0Hb}a;)xWJw3nuD)aaM5l^Al1H=mellw~t9WF{vLYM5};p&SCk-O97u&$Ha zsxZ^qUx(?}?EI}?2oUW#p3E54vin>MtDn`t^hcFZ9t9*h0@n<1$N})dp6UmG;9DC{ z<|(kZ9?oC5HW7a-s8~@G%RH%G*?(83>td#gw?5ra(OI0FdvUTXJFp=IM*u{Z$yA-d>uv_74u%#i74^NDE(J5o-%V7SRV6k3A;R zW6mzslf2l&WBrc%hIyuoIY#!K5S0gSdtMHQ(n2pjsQ$PxxkDI5>bCps?sD32%1&3i z=5|*um_j6b!aF}qE+wqK*)~;UwC$ZOrL@r-{<9IPA)&A!2(o|BLxS(b-np%vifSogxO4I6gLu zy0tUnrd<;CCKHG6!;-&;>h%z`H8!>gJBBwq2K^wTglsLcbU4IHWFwp?7=t#d>WH%+ zoQ`>rc%OxJ>=unV3h!b>gTL>oT73HG8Vn0fUor&I*|4a;o%FzDLQ}gjZKTRI_@}JajObBJCijNsVA6hh{==lUz7yULrt=mv-2tRZ z+v!(7%qeQx)EsS@rm$qHKXHpySsL%(uJ@JmC>U?1E7A!o`pGU*5 z@+){gR~r%UwdpUeX}CE%)t@ySWYplUefxIq7QpDoOf&;CNF9_8WAaBP@|10i>8V2h zPEawbZ(h6)s9RzBxm+7)JBav0ZR%zJIpW;*;clrUE`X;zMyRaFYzp2mk;XrtFCj6; zxwYL??iOCVx&XJpJ$PEwYr{0p_heT!56^8BSQXVyo<;vjqI)q(vUf3}TI|JXM9?N; zv!8h8IDfaa=xifPG^qYuaFGx*$USh}7{quoL%d}RRzU6FZiK?fQTiEhNoEP4F zmyv^)-^k0rx@mTo^?iM|L+f3Yd+$yHk!>a{VWDHI?^=Swy*J|?&HZupcN^!(AOevy zcC{1j9?_V^)hJ#!uIPxNyg4JAQY_(~ZT84N8gV5C&(}#*IlQSIMuO#qhus&GLl!kZ zG8_34S6VjO+1Sc|oDj*sk2k6l=VjeY$3+%v{#t}_ph zp3CHdlA=_JVe=!t_VV5jVkZN^)zG(R89l87Mah4BrHnfpxU{70y|O29Ec#byK5i;f zAt*Ff34U&?RiOwZ+O1ldi7VYJUrAk^?vRQwKqmd2X*j?XC+Ky$8SYtXNK{UC*Io6~ z@Vz6=tR}6S?0QIVcTT=?p^9P&+SPsN=mKY#^718k>rl|<7Bppj+`#rxrlNJYg+O{d4)5%DK?>^qpbjqZ1BhwLa@=8Ls$CNEc{zH+%!=7GQA3!Vs^MUgI+}4Kt!m9mNh?i1k8ePQT1qHq`*JzRzN)EG}>`A;Y zx9TA&GreH(JcCmXis0~5w+T`)>tvuXGZht8_LyeF3E}a4LGeYBWIn}(Vm|u^47?o}V+$pz{j`)!{a7sYUys$^UkR74y_^3os zm8}j%liULOo06PCTh(&0fBX*Jb%I4f#nf7olmf%5=d1bas+at?-t1|9dD%A%Ge~}u zTrC#t)tvXdp*t|9XLf!)c@bB{h9Cz^vPayXx@jf=?d*CX7B0^Z$v<_Fa1yINl9Dq7 zE$24X!=1dnn>^Q7?4ndn1ZP{Ou|OinlJ1y7>s;HaQ<~4$E@0AwjfhQjiG{fuj7aq! zD>{sa@pGP&%aLCs!u*Op0Deh8D0%bwO1GD=^m1W$wL&?MA=d^;-+eHVEu{_AC*?pbo$B|1#*br(93R`n6 z$$S!asc(_jM5zamgKHYEt~D8ifk$U7W|!F6T1mrdz>5SFPzOEyMaDXv1qS1KCT2JcSMwVQjGu~Z45f@ElvT8#q=o(_XJHm z??rCVt^%4)bsHwC&5kg_oW2QWsl+$D3+xIf#8UT$feUYXnv5m5Oxr05Z6B->z&)kc zqc!W!l3qN1Y@EfexwdhZn9u5jI>iuyUZu|tBHdc($^Uc?c6Qt5QWsSztF2?j_g`mD zykTmPCMW4W`#3eS=MWe|ZS<#g{aixgWm)I5`2z~GB>EHya=?)bH{v&yoKq}VYdE>+ zC2F42=~Trv1lfkF=+~H=er%}|*^4vtoPUTDH_>ptb)M3+H_IN~w6E23{wdmn+yv{a zb!>25sYcSr>&-%jT2o#|-!K(QaNX3(9Z4lCC4&EqfDcyABnT)a(64oC@i>AHc)9n) zpc!rR@pb2&-mpaLN@hSJqn4f@XD@sa$QHhVPdt96>M|q+w9TQPkHGKqcLJZ55V5O%?_@oho3Z`iHDb#PbBO z^o{_+t?Y;v?#|{@Ziy<|`aGz~hpY6~X3$apx7WK|1(!(~eaNe3)l8LXk z3qxFI@No#Pc2MX<4Ey08If%Pckg&u+&eyHGB(_Q^KoMEW4b->sa)-7Sa5Q)JvkYH{ z5p80^KC+q0GYX~!B_><`cHlXpbr6ctq~G2xV1-n3ajzcsBuik*g@ovKQuD88WfUH~ zFu}cbtyjG+&FI|IbJ6{}?Wb0;ckzN9;qe569RcrI-eOuz9cOe*o0-Ra1-8UNe~U+x z568(a2J_6>(N!JW$bQTaGL{t?W248)_G8fHYo{U|{h6|@vKL4~IbLr^HKhhpY3e;P zr6}WukTX$-b;4W;D;nEZ^F*&4o@e!mvsM$J9bq3or}>^@4>QODhO|XNfAq{yob2iO0jYdo`Y4>R zt&_0jPPNbxc_Sj5>eD&dx8U~*;}95;UB~95Ahgx}*gYfMsUhIN6m8K~8bO$FD+g`a zxf@saspFVPHYtUuC-0|>UHks`Cj2{tW48^|bDv@JN-ufc^jjtpFboy$^=<%H$uD88 zMFK9Al%Q?g&Gq%lsv$_*3X{#Z;1VAY?GLlm7ouSWtVy9ul>Z|a-Ott4%NyT0PKd0EUqt$&du)y#6QhTBOKHPbqy;KK}%>Z9Mg z{!Pc+?Th6cPB%MiE6Lf#NyihbXd@BZ^b|b#Fj;fb#b@T2NOnnYwZD@ops5;{;4n6> zBjmol`XaN&3m{DocUPI*wj*YW1)sSAu}t(w8l<(9uLUF57gO`8x4I64zr5O;zrmjb zZcSoNwZLT8&?%d#$}Mf4*GKo66(5$(Hu5N!H0?>r z2R3!VrT2!uxA~VTRQl(D>iUcCDR<`Fpy@l-UT3*``2nJ;du7kFPB?Pq#(OA-jXmgu zTKFt3oAvR1vRCfd4qq~55*q*xYC-MAzlU%Bgz9+~Ojt7PWVafEx)A7S$sw&j&yv9g zFVB{E4^lHUov5;H%)PXo?CQJvFx+j~5!{S`-~4|1&1Moff9q_v zaj4ev(`MFF@Q1}0hZF)~;V)F!%u4|07gY`Cc8Re0o_XFr`lr$8K;@6vd=UuX8^&JT z4we|DQWa*~Q(m(cr?WMQu?Xgv?&B)ZHsqE#y7DVuPhMhE68>W;b_b37Q?RCi!~btMk~8MhVXYS-PcAyY13sUE9OxPL z5Tm}+9YKE$2{1Wl(DIr+uljBPGQ!jON@mH+FU{T1+sT_LKJ{|+E|k1FWQ#`Zd~bTp zrkRKaD@C&$m7q4jqMr>3HCaOz&oRG9~)AM1VB4WI_NYLxx zPe6f5zEVWqT|L93@^O$$dI)}NX{IH5iLA@O*gFPoUJrIDgb5i;Gy2F{^SKHnn7G{WsPzE+ zzA-<@dhYZYi3q%0Is~2zqaD3a8gu%IXEWQ~^jmN~cmGh_T;{?1mH!4!ADi#Eg4hIZ zi$cwZgXh158+ai34w;=%g^aTd-CWTYAoq2fxFC9`>5^^-=DYlh&5lYe;LPvrq7g&juM+T9XDE9-`L~8mAy> zP6&0k6t(_gMd(|MawdO53UXfJs;c@bI0N^p5FX|KiUJv zz#;ItWMID(?|nvZ={h4EgRcHX2ep3SXS>RQv%c(4%e&a#(Dd#p3&#;gMS39Q$W&Q7 zQf&+%KvG;`@X1aX07UVC5!vPWNg(1pLeUx#+(M;9yzxQg&OheU z5|Jq1Y7frnG@Tw~ZwHb4R`Zuw`jZ96{8##S_#r?0RW}eF0Elh>Zlu*PO)>ZV2cgyA zjpjAT7kdI9^Ju!=(1U0^=P6PA_rbX5N}yzuFlK<}U){ZE$vugzwuF-B-Sr6hilNqP zh)=AI^_AC-0mMxwjv7BCa7rf3ocIkS-zhq#!2q9oO8z-6^#29PteXowQNN4yQE4x8*lzRSud4q7JrtUoSK zw;#M=|K?&vB4T~8gtDfRn)YG~>3yj4kXYE4b!oF^vM>oDPO8SbP$X(Y1>q6an` z@9u-A?K~@%be$JeamAx4JY)x#C$7K9(^P!sg|8o)+Sj#ONqTW$>4m3|O2h8EMf0J1 zV9gFRdAD2W2wfr(uYE6lg0Dzoy4+B}OA*rK2f9dmi4`zHFma0q#?dWT7a#I?SF z2}rn6xAKL3%M#(dAuV4Oy>r5JSzXp`k_Mn3n|@^*T>&d2dw~t7h}tgnA@DUlyWwic z2MP4;$&=2X9mOXP`dv&wI$*ht)s}*3Ld*dP4-n6L{5Ua$e1{#&H>$)$g%wx$cREsv zbIuX{0Lt3WHz0S0wSHGUjEx67bBmV|w6Xt?kZ*DwH(?r10Fqx_P$FA^51huobnK+* zri`hnUnS@WxIC{vxhr_RQS4%MjH3Sy@8NB{N%n{9w_DP#-eS|zvjH^DaZ-IELJQ5l zt6$s47Q(7DkN z`qlQx^zCGWfV<%6;}GQPSDF}u?(}AAw*Hd*#9Ncf1kaKW6OG?irrY!-zdah1*Ty?WdlUfxX~z381P`8GW84(5tTd@f>w_3}B;9o~t^$QY0D2oX){Q z$)CA82b54^qHxQG(ec`2Ab)TsBYc2;k*TV)0jA z?X~KRZe$KpVKn;tA!pZyE)letA>-#P=?GaY&*gJt&7(UVo=QGc8Y5daS6`_YYdM=lcH$9i;-N$K>%0whx*#9SVmHWz~2&I@q~6JS}380V?*6(s<(OsS&v#zuUY%D^Md+K&<=v(o%p7 zqS}=Ee|t=PL1qZecd-ZYW|jxOh=n2AznzjRS^}h5MMV&rK0SU!rj4`5|4v~o1+wzY zgWEVL5mkZo$ko!PEDsh9m;tD3+^>Afd4wLH5iWbg(=#fjJD=xAp+E1_ z6*M(x9)#J)AUoz!k|ex-3JJmG5?YZVKK;e<%J{!8{5UeW7YA@HZAaY1zOYIWp&+j`dV}y{=~1}zyCwN z!h>h?bo>a^nj%78XoYc&yhl>8+8N@N`NvFMK7--W|T>_Hr~z zO-!LweIwqtrh4^i%{G2co5{)RcA)rTM{}wah-&{|FMuf;T70~|IrNh8;L6?HjX9s9 zwvzJzWQcv$$nN^fZhsy<>Zc`r(U%ITbFgis1)b|r46QFZX*U9L@3?HYe2i!E${tyF zB#JFaF1gksRSr$uEWUC#9S?5b)WXV3d^l{+gxo!9O7H5NJ|C?`%r^ddF42pC0Jo8V zf<^akYkV2>q}$?D4ZRj#G}PK-yZ2y|bZ zL`*dMT(gAMQxVQviY}NqB#g#0;a$K7s=+RnlVUtRr7D)3(5k+XdlPHL5EQJPy#?#* zE49H^EScvaZ)5F?y8&8yO7 zp-U;MLnh~A-&U09bLtZ$ICAD7Vjpt@8v{tcG!d0;zWuTpxI>8-8bTapr^MS$$UFLU zr~E)-)IpVkynGYKM28M21`*K!C6SQrAD^4H@=)Sq8<+8(jAThuZP{p9&Wj!qT@1Ll zg>~;HS=U2$rR4S$qqxru8MX~8E2YFyN9-V4FWpiPvI6#H>r6Y)$6=x!W&X$Lb%Qifxx(;#2PSD zvETml>y4Q`#>+WIGgkGL2cG-x<;C(9TW9-|+y0pIa@g|5DW36dQSz8>Y1H+sh766Z z!2!C>Lh`qkPj@nNos8Rd+%~7&-kQLay%2yD@!1++1TKHGF-_)nieH7AR~L}%ZJ+W# z6g(Ura8`&Ju9@^C-}_%yHKwfS{vtyQ=ZM$(0&)K{Payg(bt!l>V7pQ^VsC7r=#;Et zDlKJ#=XdN?H*N3De@_X`HDVC3>Y&NHTSlg~N?cROVEfma8y z7t0{4ugy`A%U2%aQKHnak%LcD+yKleq=}ysmk1BhRoDr}R-EH*bsRn;Yjt}PRsc@7WVmm9B`X98|dg=}^0?HY$nf(t8-#-~oh=pIMtY4@X({pOLd<)P~=wfhi%klBUnnd7VQv0e_?nB@xZTH*yov9W|64kC10b`x5EZ|^;hxZJEkZ);6vYd$CSj9l}kT&z2_+?h(;Ub{`lL!3rg zpBWR}*YkA`Zd4SwZJBm|I4$jcy;BanM~Yw1G4ktS6*O9`gBgW`VPRL!cxhb0t|_yw z;;>SySW)x+($Tt_b+&I0%5h#8o`D#`U7GdRV5@^!a?=`B%Z1xx^ZRNi*TaXU_gXLB z?W=X_j2tu%SoR$ljF;0wFz ziMnO#ERb1@aXQZEdUI1ZccK`3sb|BlaXe(Hx3NoV7=hRf^*|x<=H%P$?N~)LdvI$~Qy}GvGmfCJ!0X7biYQ*NfcoQWcgpT@2(!!qf)sTicNmom~u@uXPC!XhtbjxB2Kwh zU{d_bB&25a(_0C?cPVGy$L3wrsd8qB5WA-c;2AqJ_Pw~w_YL4%H_zv;B6~FyZV02C zcI(%d;&H1sE?B*l>77^I7T|F5wvLFoPn~rOSq+@#@AYf8sVN@H%HsoAJ`2(aItp;kzz%ERjLOPWudvl|teOR81jv zP>mC}hNr5)O4mZF3-`P=g&|e)_t(U%=fyCQSohC!}t0n%3 z_Sz3M;|L^W= z`E!XcQAiz%h+Vm3oI<5??6@MZ0d~NKm^HFJeLnY28IzO`@`f zpvgkA#-oCRVGy^ksg-@KAJ#DMH4I}vf9+9_#XF}6C{?PHXBCTG)r$I@TB!hzKPi@9?Qk}c}eblJ9zTefZAvTfV8U3JU0 zZQHhO+qhF-&%{iB-5nh>(fwocN5;wX=j6Nhj-6}0x%RW-l}`n2Zg87wA8#h)i5-l3 zB1`vjWzmDBLtS{Vc~Bvy0&#a1$Ql!rR5|K(Uo?wIq42d2_Q{&!Oa0oZOzR&hP~(ZW z@*Rfa5LFiau4s6#Q)v}^oP&ij#*xVLY?ASM9`)~*8oHbx^md%*A*#S^F77joJ4M3D z0i_gv8Q2M|Hzey-BBT zm=$#6a5}8IAS59OCJ}S9M-YWZA`b}1aGX!*kE8pydXRRBFUO9F)GH>%;>dHXUy*3{ zY300y=;VsrdC5HE|7!9u@?Z@idwP9Uj#{XycF>C4P}%>kK4>qWL-%)vZ*vyvWj z;-HF7+w$x%e^K@Hu2vfpjcNUB$?85=xZPNF>fjg{XpFwTMSEeL{o12F@Zou+2C2zR=aPdp>s{Sp(41zzvxdWftYbd zhQ+%63M<8-jw5?v-8h%phKEv5EGhCb_qEhTf=yC|cl3>M`^SXmXjNVT%{y1>_|o2neA7(2w;^i4#C!|Eiu@WbqCl%Yhm5}?7D|IXdZ zq@zI9M87Q?tHFHr?*s$RN1o%eFNdExOWmlwwMj*8c=Q$Uw=s?h)?aUwpIBvnu>2p9 zOEx0MPbC=$!vB8%w~@3TTmMrL`Ow#T|Cj#M)n(#FJL-!^qL4{SSBsLDqQg%L6fhLh z>RH)k`X$)V)afME}b~t-{uA}|)xEM*SpU_Q9-(XmC2=>Anj^UWn zp|vAU*7GM4Po<&#y=g_8(H?4Jr*6J&J#Bk<+k)~FE*a)?*#-^&NjeS~$1b)U>BkX^ zsUUO4nH-QA0|ZC~Sx{$w#AVqMjx2dEYKz8E`q6+<5L`ToJrqY|a@vhktsBa!8z0#H zqcbY{i~uXFLUljV=nH{kJ&O3}i~B{6h}8eoga+Kz^!{7b|L(J&M>QP}h0QYBHMymY zDgM^7%qD;h@AcVF5;$y8M|h2Ad86JlYehr1HDB{n2!_s9qe?i)XIJx+n`Gwvi}p+X z1rhnRT8_bv?#H0fp9DIjC?UjLv~5j`)fs+kw|>5JL;7bga3CT4QJ#x8r)5Ai@;`M8 z;&3b~&K&{&+~)n!3=2^=B$!inLlGl3=$|3Qkw_H`GV5f2I}Vdw%5< zNKNrH4t?xgN_Ehz+n3_sY5M0IogCBC)zU+Y=s&|HjTHa$H0j<;k(j2cgKg2j_INF> zv96r4CEfQWg-cxPIvL+q0KHKQEn!V;me4PWEG-QwEzZzFCV#)5h(kkVyIb#HL+^xR zrcUU7=j>2q^Q!S|>27h~{NQ{-fASlF0%ft7=&Sp~bDpcw_o>*JB!T$jdBw=K4QuJ2 z+)OW-hIMeu@QUXGJ-zCvNb_CU8+h%pxNdI=lSMK9;3wZ1e0b@OA1Id>aR)9fN+O&%eX(8jiP)A_7KRi1+sD`4BuI6j7u^$nlZ#}H zpKoh@n}d^g^}N|N7TyX=KCfSiG9S2lnsUU&EeywI(&&)cA;^5{qoSnVZ#TQV4DQe@ z-DcOUBq?#kM5RC@qNW-rs!o6;O%|Y6L|$Tmx!_+f{CB^+2?G0%39EYd-V>-KqVCtT z@wJ?st_w?aO1{)Mxfq-`%FuKb64(r(as2p z`g9Hj+nO}-*^Mxr=z(y~$Bq=QBq0>(cNGrdp0NC;{3SMtj@q zlk04)^l4v2pVEt1%z8`aE;SzuX=nia*gx?7RPQM!%x1VyGs_2)%2-hcTD z+nC%d$2^&~r~g#BVG7Y<7;V8wqrB1_T(Ma)O1W=vQKjC}}hKvq9Cc75GR*B=&Ao zAOa@^78_xMrx<}_z61XtfW4(tvb23(?F{!c+HRsiS$Ie+rYvB1*%x@B0lEL1@0Yw&Cku zucMJHtsBD`Pu!T7N)$K+{GATBK)cYQPZ|yA5mZ%;)!OXb+TeLf6#4~`h(k#{;-`~! zC-(A>{QuipHpcECW8rC~rgI#WDqMQf@Gn?#1S zzp(f-+J{O7;0KBQMzlt#J-fg7(Szd{`*-4J18v2O#8EXp`fH*^;$i>eLg_sd{o}mq zfATsW{0lfgR%iGhSWz7R)`}9av9Wc6qEmD>aB{aZCZLlxx3Pqx6D44Vq7$@rFfw)^ z(EJC?oBoHhtzc~EM4-z6xi!-uw z&~mUca4`HU+&U9I3oSh}3&($tuK)V15=*nOu>A87{CASX8Gojcm4StkiGY!wftHPx zk%{$(v(EOzp66iv|7KRJFL%+F2f9HKC!|g>@O%Q#*YB-6{}|m&vX-opq+V(mhUdGziOaKUQUCn{`HY94^L;-%+mR)E z-#dTg(+!IJ{Mw35xeLwt`}{Z%dwt*Xms`Sf(g~2?_3e#L*4O);<^`cjf)KzJLy>t6 zc?V=Vw`I8)I{)r|i|_ONUiSNbm2sX><`Z|~XA32%4Jm{!9)Qy|B<3@6#iW26mrJc{-t=y&5tm z2rwf+WLna3peGFd^8i|FCc$!%RETI2xFjY0SZ>80SZ!_S%`pVrP4F{VH1Q zwvR{ctK%2-#7brd6`Cxs$jb1#`#Uyb(#BRh1h@bUnv68!?y;0M4l$6XG!O*hq(st4f$SzG3TswQ8gC{xeN?V~U00!MQLFWjno)1t@*8=rKdL8QM^oIzUqBb_U~pkXo?C%Kko| zOw`a|FYIV_e|=G4!?Uu}S+cRtrsN51GA9ltTUp%No9%{+j>h92c!^=+ra;sfK z2niXbhcvadeLrbha$a6ibGWEpodq#wVR+Y0PLbIcB9=@Kr@WOI0lz(h#k|pZ!{}A^ z8YF?RHDdS`(*ID|MXL60y)LCrywuhMWo8L;c2UoJ&0)`R8(NnD3)k%KG*;DAv@60>$9pS zz*NzjaIr;}!588{=hJ48U1dU0+ zg;MI#>1fL0teWyt>F*|Ty2!w69#mrt6UQLPNuar9cq+J}0U-(uW&nXo9Gt^~nEr*% zR_lGOMz{!aT>7kdR?f%-7QqmrmZ&@RiMG>{0v$O%xjnEiFJL^ln>Oa)MkQHutE|ms z;m!|7#v?y`Gl0cFjxoosX+KJnqkfhtq$Vo+JgK$u$8)0YPm47dCjlj_to`G7`}pxe zcjKoT0uk-QmO*YSN)8QycgyUldP7=TLn}T zu&hp}dmX8v?nI+uel%AbQv)`)W0wwr)XI`uQltCbp*mQXQu&vdPOB|Y`cA7OOe}<4 zCl6WvI{~#}n4z`vq2PrJm%G}Ipul_pAfS9C1r-?Lpwa1Xwx;+oSlLh>drN?a^3#Pu z(+B$B_2Jy6?5I|_vMacCG+%D%kquD9|ppw65Lf{)cpsIj?CaeW@11l665t+azU5XY%qno#v8WTrc0#Bq{ z66DhWp*iEMIJ0W8tJEM?^Bf=~SPGnc@dJ`EjVi(|wupcA)+3UugxRaBR2;)Cre(|O z5q`|!D3o&Cgw%v5gg4a+rjaxDO=)NOjBJpoGQ6X1oCN5HN1C!SuS)$khTVT`(wy~@rHah>U&qKW!&gVhC~nYW&nU(w1$k3dxf)5mxEa(mai9jM z+qAv7h)R$wjBXb)~ff12Qc9GBsr~zNF}U$I;kO740rp&;^d)R zEH=O3*CEEaM|+p%7rSnP^Dk>PU$U@2g;@i@5WKZh+IiGi`{@%0p`NPkvZFK`jz39$ z*J(}GRl=UB4qt0xTYH5~2Dj1^8WGeLU}q4ssO%!mS4Y8D-i7b4Go(IBRlukxHC~NTK^l?hK=Qm6#)HF?wGSGOG`DQFDb|z=!Tu(Mve&-_Bu1%*BF_k$iiP#Xwg!Ag1}#|>GvfMNQ*ohC%+GNfJ%YIKB$^qPC$PuB z31TzED$`K46-Qi`X%5EP_VAIY%rne#aSIqp`_0+yi%>++Fu(PD$kljT4B1V z_?0KzGL%QeJ`eN>s@D%V&X5vt=GBiGR9CMz+e}K84~i(J(h^So%1^!8*FHg~Zwj90 z>Qs}T+2IJ7EejqksMu(&8{>*b3*>OIuTrb5%mB4!I=Zn`^)H0g*xgO41F$Myqn1x+ zf%327`tCefBvLgLaf^2TCDJ)%E@yEiLVW}ax}O-!F; zZZVxbYLg37p)!~;>OJXW?l^PGSUfq3t8HOnLQa7V1{{mnT`kr|9~>v$ZNgR~6*j9& zUH08(X4NFso4NmV{^N{s&#;n`XZ=$Ow)lGjMT0!lwql74{ukzOU(Sc(Q@tyni^9gQ zLp8qD^xo{%7MsP#Ez&p(9PQGpCu0syRjETIhiy|tXPUa8`=}(P3C2cbscPqld*K9C z?LVsop(8ZGlGa%ZmZz(Ac8@*Ze{OAE?eFKeEn~Sea=<0;dwB&_$^=n8_oH|jZz5%~ z-1icrD+KdvYINyX-CVaci*nG9+7+w$s8(vmoulz;yma#xrosb!#0SL*loi3Ryk+T1 zbOU6YB32v9zBv&Btw2JzJYQ^CbtQUBwR45bRA9iTQM!aY$}6Dad7eJGC$k?X-H*LV zOnDQSya7&#ITR61l9al1&Tqf_i>a`6!*@tG>#B)8bBE51$U%sk7BG^*@%iPjE|oh5 zy0&u_M1EjUxlT<=0<|R$md$KPh{|Ed4$oJ8Kda6X&mEEsd$S1~y}bXqK&k?&X*-D$ zhH1G~V)4xIlv=)3>WHDHW-|{mvluL?u(&xuinG+yE?A2tW8NvsxpMF5Em48))vosL z&M*0IB6#Cv3jJZyY&G20s=Br<)Kc?#J8yA=e3U9y1T)|95t19`Us!#+H&^0^`&FmT zc=t^F0^V^Stb|Hd+5DO3_4mHZdkvcgawNqAndcyM5{9QKn+NEh<80FaQ}$)sRX8{p zJEn8z&+Y`u_oYbH4>yhx6G2jto8z*MCVC?%wfR|At(pb)FD~@Rboq6kzSN!XoI2R+ z)s6vrqZ*yvlH-GfWcDvnsqL@oAy3tPvN+jT3)wrfdk?M83tpYrzQ5Zh)h*#=)b$M$ ze~{UVAT_~Bx*FO@;OBDK*n!HS(x=DElkl{Jix_F!H$oIH_YYYv`@)pq4{L-K^Ei}h=aQ!mm-G+z9@L$2hnO?fu* zI`5Ac9pp9D$aUbJ8C`GXT%;3bs)gXIqpQ_~$31y9w8L+@o2lW!xrW6Oeva=g(;Xzh z&2vb|qLt{tL|^s<)0j4^*iBLNMcD@g{mQAA?1*fq5KUOHznS8-Z=GXuDoKJ>t;>TW%r{9AHOQqc@I7M}`WD;_RHWEwN z)AOk@$02?sVlHE!DNLKKY#ZN&qY$cm6Mr~0F^ESx2E+hnZX*I0FZ9OS(}~KcPU`-i z7d}xw)pOA~X+*%0v%&Z)*DOvbp3M_&dUVY9`?%< zxX`VdwV4&TmF1eYDXU-{ASZ?~iFKxI>m0Vzmp{n0MQasJX0V;5KBsrqr+KLO`lplZ zPD5fDE$rDyLz{>Y5gb8eB==;`fUl44@I0F!xH_V}r``-5x=@?B)*m1|hd#Qyp`K+T zztUmr>bO*y5RD9FM@UTipIdt`PECr%E01hgIs8uUvo0tjpsmN$dsH9f-Q!eGn&1}A z)GGvrC20%Nj81L3q?YTUXcUMIq9;#D+&OOmiyX`D$|N74E-l9InPduyd%%vKW@-9+fCHvl%mA!Xz zhgYy~()t=%xZ#Rj#KqK&Q6RlnAnzAC3JR}BK=>5oZ_^;(b-S7s?`8iOO~y5{4cE4d zyBm6;SNCylV)(xM8pH*hyvX!+kOrW?M+7o+ZW8EyMm^8wI`khutiuNFG6e&(gM zZOE~8*;KD*kPc+(um!^dKf0LU8B6lS0xmZ1{R?CvC>8SW?MYUa|6@=7XFbzc*cdtf zsgO-uDwbr#zI&$Tw1CvF4Kv=>kD#_@ZVX%lT3l6t3>1RMdVz5_=O(PO{5tz=ENuL; z{2r#QW8i2~NtKtFxZ8~^lltom>@?@{?r|jt@B5;!P|mmGGsg##{QJY!mtAJ(^QUi@ ztFK*2&({kupX^W$z-*S!+uq(t=uKI$kxUF?Y2TkjioOM@T^KM+YhJ?i-kh(68(F@E z&=T?m_E$vj;rYlDGDfw7Hb|_{D*OU_=^d@%yXzS<*_`h{kH6n*FxA5fC#I75+tq)I zj%|G3oFeAhkqezzgy<2ERoc+;dtVjYNO5?^%MQI*M#zC9z>ZZPZv8S_79$3{fNZU5 zDF&9_dp5QAR2^UrCj1a&#PRA1RBO#&mHDTmxW|!zh z4-_Fd;9jshAxVc2JlkQHNqV*Pypp7f$*2==-))G~FMC6{R5(lz3?tev)T(f4%}hFT99b#`evb@z_&~S7&z9r+kIDJ4jA10C~T?EVL}Q-*25w?BGX0 z>J>m|W2NG~#y$Z)*kP&IpMVoCfiugmZ=%c}MQh<30R#Fi@7{}6!Xrgf2%$pC`uu&1 zWX0VdTIRx6l_sBFkoYkGD$3xdTfkN_Wl~Ra8lrg*xm2jVv|0fCF-Tz{BD(=jv7u)( zV`Zzrw;t+ZsZN9z@vK8oA0hJ?IPI#yr`VK$?>{6aby-707_=D>%hxU+leaXABtnW?ry1D`>Z33UOe~+XfdYqvM6O%Acj|BgmB_2}k_eJYPUo^_%`Z0k zW}2MfrM1oa`lRE|EPChDo_S)wxy_wiF~j(jR0Hw&-q0?#!mYv1VYt=&9-CemRVS{$ zJRiQ0x$j>=%`LWW-}YMyb7(mOnsiDl*;>_KXVb1s`&KmSKo@hDJ#e>Gg*la4yZ~o4t1V z(bhGyaI-?6ZtCVsx4uxEpEC$yBD#J-dNO|SBK)!%yICVPaGy+K{bh>QGZAY|fsrTy zDn-pHkdoFuA92Pf9}rS(+X3gfksS!J_&Z9c5oSpM;4JNKjo=c9G*F^jVMWlJ9atzJ zdwCBOi;Ig-TsXRrH4A??NGaeh8U7qpGT*XG-_$z4)hzGNL|}a<;%q-Et?kR6>FbnC ze7ZwlUCs%w-$$4#z{|lWfhU^4T@3H-SG`JjNaXb!cQUa@2O#wj51p{h%^>%zG|CCh zS=KvcU%@hsfD0!I#sGhdCKA8n9`BS4Xtsu+H~ePdXdH%%*_m8twYTUa8Hs|QShKr2 zv}&oUt9<*MMc2DQ*kRFc{FC1jJSES1PfEJ%>E~t+MkYv33bxnE9rmW$#(}doUa5SL zUG-;SzX~A~HDJKpI8Ij5UuYh<9Jfyf%x#y0Kvr@18v|QI{$OvMnr6hA9`Y}`3@N2Q zzU8q{*HGtxBNae4@FTi4XUiR+-9L2Y?eRB4pf^MWx@)^<*p5DcsxO@O5nBwvGi$Jp zK@ZqwCm(s;l~}jM+X>cSO54?aqM1$03;@w*7c)juxpVC;FzP9w-x}mUt}bWM>W$xp zu!mv?mepVrAvlE(BcY`cm>giRnBJn*LKD)8O2zPn*vbdPG%@Y7dWkwYDwdg67;?5@v;0~rL zYJ=N?DCeRRV2wq*BaAyAgD`&$hE>de*gvK$@ajlp`;a@s1aVT&oHAs^=g+62ER17Bt2nE$8joN}bC& ze5P+G&elt%mn;Y7k^>Z$Ylb6^t0c&T)jQ5q?%<6Ho(M&7_^)E z5oWN2I4xs``HcCTlo1#96svf`VFfK2W+Sz&oke0pH@FoFFH)a%OZ7*z6zE z^OF0Ie_KBTl3I*0Ow^7VKa|Bbo{?gpzg6AGEhs*8-Vw-ro(<$+v6*$w>~SKiJ?qg3 zoCd&cVg-FYYq&*oJeFE=BzEGPjA*f36UNZl9weKQ*9LN}}q)7!BoP3pJ z@EHWHtm0VFy2uyswmSPfXYImkhXrjG>6sLUT!o^ znQ#*@(ndahwcA;3WaJ{b0(Ng~YedFp@stWmk#NBEwo%xxt7Tj;?NO_|UPM_7WEV3X zvN3kcvsyQDi>7fbDfXWE@I_$u4q+`jYC=#_5$z!K%dq?~RIo&7;2~;Q z6L4~RXThKp(iFMGH5SZ)+WLW4KD2i#YM|!d5B)-?wR7hn-W5)kpB7P->q7ygS^eNt zwXV@go4-i6mOId5lmK~gQF3q~lo)9<)I15kRFJ$PvsdfoBEbn@@*Tf>!?_2%n>P;4+)1L?2=*DpPkh7f9}qC@#Zlm=6G4%BuJ z&3@14ZOJuY){aCnx^`KbtAX2*Vy_2KQzS?CIdQdq6pcgW_$cQU(hvD#9PANL$|_9w zHwXc)%FL~M*=1w$w5eD#uVJwxm?W9%m<^~KO8r2(YfhZl87YKGD9Og`(LqpqbHxsr z-lvFJQGvU1M>gqmij`H36UJJFNl4C;zr-|}m^cRPH9jQ>qeEYt^sT-m@u9AH{)V>?l3G+WMHcU32~3&k(8QWM~z5Tf}m!9Kb^*v|pc3;Ti|9VckeJ??uq{JquzVio&b{I#@+0%0{M zJSwt=tG_yB+<~Gk2PWUztV$5~m-3ge+EP(0UBr}Ge;%q3uDxRcyL66mY*oWN46 zHApm!XmUfF*;Ha_bu=e_zY&?yJa^JrW32hd7yvLL*`ln%)s)SO8;7F<{OCN1EgrZE znxHMoi1_qdQ_SF9oR6*5W)~WLhvF#F)MMtN#JBd(U4U!hD;$=Q^W7l~M z0u(S%9JXpOfgXluyzO3+B9ToBirj^6U#tsl z1=7OA%~gC;LJ$-BpJEDPFH8`>QwP5SNGiZ11fsYw*c(d_BDEEL^SCa9EM3UNB9etK zYB-5=(Vp}^vg!R(rV75c!^i+mUqYXud6rNG2@+H9_hyTieUX?X-_i5*XLO)y8f<{3 zT`OfMw15;1ptr^xr${fzDvsZsN*1Fny=a#_o+Tpgye)qj1Q#ufnRbe`5Us>Ke~TWb z`%OE7y>}XIzjA+ZhtEx_fN4fPhuC>~_sK-^B3la;gZ)Y7X*t(rRO3l8Nk+3W_Ka1` z_$FT2P+m9g^DmS*lL>WV?T46p(<@GC^bcu}kT9b$;q@M>sB*6=igL6UvQCz5)-Xqn zmqH!NT&dKqn!VJmKiVQO^q8h{scm2K^2{5wt^g%7tn5xkj<4t#(2w=fkLUfZH{O=r zW*28^wV@T`Atkg`4Vj@ik{i5%V$8zV?~uQ*rMOqi$zGN(5m&B*m)IRY-D}GYG>gkZ~mCqt>Gb+cfXMjCwqAaw4tIrVN_JWxLqNb9{ zirm#?@1&^cpZR;DgYWx9*XR3r!}aFx`^?|B3BK;n+n&GNUlXx>-`9^@t|jm}yxZfV zTBk?4e7q>tBi5#Y8o8AOu;My^zO@*K8PuTrSa1W*ybVHvl>uIv z<&D9(4kC@wG*+HlB4_t%u|*JYp-HqiWp}Ma-8pIkFJ1@quEz!2B3+w`jGjgAH7?ep2Qi|;JU~kaZ+Hy%-YmXmUPI>HLg=3hm zH^jDn?odi0W;8{lJL}nMNjqI(1l-7UkX_Vr{S{TfvpHe&+ONUvUZi221`UW97l$)5 zqI@Wh>Md!U;fsvQ0fPFP#y!8th<|VyVQqWUXqWlehc(}H#1C#2zc5s&FvDfG;(x0< zDljh3RofcmN^;I$zw12Gyr~E!(i`9vi%_M(v5MCEjnY(W@5$rgDJN0k(k+X24n4SJ z!rx`IK8V$&oS|%(S5X8n7vYL`OZ-^UP)h38V0@(J;Cd~O*$;*+P)%HX{`;zS>cnHp zB}@%6BT?FJDS$x6t&>vo8B761M*9nuQ+L4He0rtU;O_pq+9iKtTlQl7eZAR>^WXoy^4E|U%A(Dp|-SUL3zuW-2u^L zp)AWN&{O2|X7dMxFJV)(<)G6V zi6J{22~Eoyr_hrY4X5S8^p)~xTlJB8vS+Yd(YgGq*5_qJ*MEB+Wm7HY~q2IB=F*lKL+}fzcl3FANc)fyatlgs9HaCQcy*k zdIjX!jl@-@C`KW{K@7B8OY}^7`Uy)AZ>bc7Z5L8}n=jl{qLweEkmHy;X{c=Aqu?NW z5j(Iro`BZeJATp6NV3=50!pa!o=`EwzcP?b9W;tTDNS2>15u!SvZ$t+MzV8ea@;L5 z=pZO`wHW?7FpO2I()t>j56*HG21YnPFaQF7^NsDaq~(N*MFm!Ny%oJ3(j!m-cPk*B z$^y^dqTcrMFxoqV3UTxzRX5o7NDeS3ak78dIP0Veu|gR3VRN-C^YKx-2@!N{Zyues72RAlG2 zi_q-g>~|w)6MZ5FEV22t(y4O0CN3_#&lS!u3AVjmOtCxKKQGzM>H1l&p$Uj6={g8Q zazzzh^yQ9CTr}}UWcdhpJ)0D?tk{(=Gl^k)zdz?E$`O?`U0@cv)2B5A>eir!>m{^* zCi~<8O9-!Eo;$kfoWidq=8#YrjuG}zzLkJPT}2??iDJE4cQUTTZm#|NVIsKRQ(VLF zdl8g^%U~_{Zpq_+{vAR8*rbvzC_Y)Z1butmP`JlsdVz$OcsSR}y z^LA9j$McxW5VZ5SlW-usg37d8)@W}_w?Q2DcgkrutKIKp+lf6(fbMPLm#K;lv1*=K z1u5G8;%p>bGUD1IIFZ7IN|ZU=^0JaXTZqK7%631oHoKjlpZ7!JW~~yX_Tpo>4{OQG zJeU`)DgM<=V4H!8)}<^s8Zirr@QZ_rBP|{gg$>(<^{a8Gi)y?HUZq4v`e;~fCZAji zZJ@*^tcE{s&n6EJwH{s0G-8xthwMud)GcpRtxuojqp<)lxd@AeUu*5P^KdFj5)}!X z{NTw_rE3QS4$CW-L&uFlnes5HTYZsNvj_~iYQqOH2tr2CVToRc0}>a4sM$@VkZ{5^ znZ>At5i1NV-cVCXa+H};Vr&iEP(tYb&nJt~J359my55BCE}n_b8O!QRGneHKq(!;i zU?@b|sSD}q)qvtiZk8JY?Tgv07dW&y{l#MN3i@+G61-4Qcb{P>4N5+2{*?x;Tyf3o|WN2P7WSk`+U z$l;O%!nS%V$T@GxMrC*PUHB9RXrS&07e$~Qx74@DQ0FH@TucJ^D{M0R(YEzRpZ0IW zS4R17$M=x|eEfW8GJwSZZ$9m>$4AATxOo(a;1Gxs^Kvgp0AD-_Q7{PA9`A4W=Qrs{ z$FH{u+qZ`uzE6Db{Ev_Sz^MLLtt$UBqxz>%l;NL^YR#I1;dmdu9>LfSz{#`n>TW<_ z;OvPpMZ9`Uf)5L~D*)(`j3-WhpKlfU!b0M*@->=>0w%+a+L1!>=cf0H4b@u5&lk4d zzvDq2KV0C)9$6u%gQd3~FGQF77vE3&mcIivzMdWrVm%)le|g5fl+3{Wd|obMW%+o% z(strD;|xK3^vBJA`M(n00)w4sZ4<%?_-1gO z9n7@ie(>RJ+#U1HUGaXx`+m)s^00S9Fu((Vif<2A_1ANKvHbvf_wS$wr$+G{4__4U zxQ(H>oIXXlU9@G{(yi{~z8t`& zf!L_rg<=y%vzggJNZ;Njh!yIIGw|?I#R&hasi)HTzTo0CU zQ3Rj5F;_$rSJ_+G_Ama;)Gmy(#i+aWSp&FDz-|?&|Fi)1D}?b|$Cq4>KK^b!3I5t! z!qW}`y+96_=e%8qeF`6NXc!a_RLsZWMALnQP+z`dPYUz@+(gGx!*cWac=i7EXyDYF z)e|(_e4PU;M6{1sb#UfwrRN z&E{NKdXt1Z1~=JVuBghXPWrJVeW5E48|Wq zgFe;CXpw};%BiB~mm1jQymX{RN%sQ4xY(D9^mKGM5C=DL>ln(BG9`{Ln1_3r&MvJ`_b zuS-wsmSCE^>>iKdqqKH#lRk7za8K+I0^_-XVrZ3j(dor9cWJbbhyZWYpJowo&hLE* zqrw?Oc@h0+fbkW3Btr#c@-0B6TB4}ViKs1znROehAi#EJRrG7X(}xn=!*>`VYka^W z!j33C*anXg^vuWyqX_2IEOqQSH+->eh2j@h}C2d5sbQu?xoN%Ia zWGMB;Juj2=-RZ!#Inv9l8^6Kt%m%Ga1f|;C`6FSDXvtC{xthI3*=u-o(IVZt2Amir~882rM=}{O2U)W z2%(P9#pW4myW-o48jix|xtS8gfI?Nl1-8BkQE!LVvm$utaqFi>k0Ve7ZlJJaI;X}I zvl?zgfaMqlRJ%Kd^sfK>=Q!F~75^LTsaDgyv5yY0sLUe-CO(H@8o@zXo?i$IY%w{y z8oLQJIgbzsBUB6ALQ%EEr69s3q7pj5ie^K7=P7vO4BagHJ@3}XL+}u`7q!)eFx;m1 zu5%i2E_25w67Lq|BbA4`4xM)AhJ^w+4V_om7V=57(t-a|4dbzu6z$g%fCD)5wR(0f zeVsH}IfJ;!!2^POnu|2_x;iejv7=hCBGRitpI#R*OG<<(f@WC0F-&@w#V^BM+D*k2 zDv`j?VgIc?LOFVTeOUMp=5bW1KTjW}UbMh7cFkao>gHwDJ^>_B4A_Fj3){;D4G2`h zG2=Wi6T_;=1Cw#oI}#(R%8;x(zZ`(|-jqOh52iQlI+OPD<$A7!4X(E=qI?h zOSpOSc-7y-s^JfY=x%@$So(B@!~;P|x>92bK;x-Ae)F~%(bewXyQdg5S88twmKlP( zjn8Kq1*vEGLir?|kUnEZ-H&#>Zk>R^bYBjADk+5}KPxS<)QgpR1( zP+*G7o#=Xj*YVW{xv%^*CuWH`O)k6d99AY!0>-;6$)J5VbsVcAe^D+TR^lOCs(qfe zo_QPwj$aISgRjQe@umH{@~nUR(GDpCEh*%P+ zxVkybTz}NN`;Ju?-}x~Sk1V4dr^*eWB9u)y`ho9JrNT^Px)qXi+SO~o(}A0t?5G$u zfu_g<%0%x}9ZXd_qi)WiC0H_Cv{V4@D^C6Id~gQ%^4&xR?R%B_K^l8;QC@s@oxcY2 zYHMU`*QLsEV=k?Q-WnoCECboXUua|J8g}KZp0RjIRLaWA;g{hmm7 zMwH3tXuP{<;SZilBewx?yV1ys`Y*5F>N zCHGOU>6;ayIJE*=cB4WV=Vo>#(HzZp))1l|bi!k_0` z%1BpuYQ@*DK2OStJ8Lf;Bfg9ULvKPRDa5)~1S8(^NS23| z5P8#oEo@;$#}*)N*Rw3|uQb2n-r-!&UAdcE83QMb2_p4_m55t=dcfm8mV zjCU2$;H4=Isu2DXOjfv**uf7)BkCo2V2F9=~+H|PoODv7`}u>`-22GOcY z*sc<(JL*_N#92_nm8)0&$wxV_9l3dgN&Cm7sN3Gmc``YPIywitrE%H5RuklA+=DJg zHMuH0&POG;7@it1o4RQ`u^#Xd;HbiNl1fF_CH#}|<7n>uPVcwAkkYb=X!>Fj;yEzJ z@RX8ELeTItq>XD+W8zC=3AE|}0d90=&5%RGGdPPN8bXK}tSJ|Cu$ z56SOMBujWCWYR-MZ4jVx?;ATjW55sc+I$ND6J=V)*5=Ez;YpPN-VF1IAbTjFbhIMX=^^mi4fjz;to9G&==PdUi|JmQ^7BXvtpB=Cg{_}AWEvH56zPY6CqcDQwkk5O_d@h(vW<2{rPdUDa|~hkl_jM_1~)E_TWiEN$CB zrnl7B{4}?vtBXU7{)2Avt-$3|6M|YxsRSU!vv?`bG%ZhwE2ujDX-JL*wk=ji(r~*A z3*GN$)rllT`~9NzzTt3o)4Mq_ANzuwF3(@1_z4&sAKXP3RwTG@8&3m@Cx1div?_++ zm$k&E-bm*cP$R==Fe%i#xm0(Z6kzz|zLaYmu>|hqOjg+{^nW53kys+^c7W;?eXng5od1157Lmk1pL8Xok^P| zsN727t|YTR@vQcMeQv@D8PP->POPMg`8JfNlM7`Yfi!4JvQ(}2+enWg8#CFa($DLl*C3 z*yCsM(*>C1ss`n$^C^8!w+by3M6k5#5B4)fZIPOVVmn zdM`{+l0k @jq!JHWkBhTi6*3i=x^s$E9`%qzpE_x4JCg=+Pi9J9aE@$1Ku`t! zwt|5tBy02^L{*vn(=Z!r8bxjRyaIo|YuHg&=sgdtH^ixw7ENT^+rdgvM&BcB*IyXO z!v-sCBJbg%crr+lT$crr*rI1mPnDlo#S5mQj;hY+v^Rt(lG!u94PXZ@qz({rgZ>g5 z%R21cx+gsN#}a|K19)l=7F1$lo9OC3<^soUrGHkKX5t42Nta%V0Vx%Z7$;$+8`urU zj_1Qm(;lY%Gl1)6?Y;V=aQ(#wb6__ASh*j?Q&>N&PfY)u>4*`BtjBPqOoBVpO;-@^ z3-{B{^B|pT)(dk&(nP9thlx4s8y=J-)RlEB;S4kZ3#i3;Z?w^G+g{Jd&3q@kWgLZ0 z5b!i8Q0l)lk&zDrb7M*a{j|}mwl~1LiYh2qjOvJ288$Mo9ESTJ+`R=*TwAv;N+1vt zf?IHR3+@uUq0!*(1ef5Ngg|g>Tmy7NaCZ+9TpDdWXz)OAz0Jej9M zP8C(qt66J~@y#*D9OIj7>VeIcg zX^AJ8yrUC9hPEP}?PPQviH!1mvI>fDj?ox<%w3()4fsO&23!C0!an9|D6=D_hQ8R6 zdwFl_;`L8-7jMBgoE$g`noJY=V~1KvUNRZEX)JXm8>f6u@|XrC$*ezv)dz4mUr%Fv z*NzFp81yQptD^y`*1s(Mmbgr1ttaJFEHUvNLE1z;Ud*D%4&N`B_Qf1)Fw~r8?(67W zx~(If6$%rD#TBPMI{omj*((fN(xcdI#X~te-3Ix*)beqAo}HhQ_1{9u2nSx58sb1I zBzaCp4xau@jS=Os5t^IAEG^J8&_I{5qwF?wPog%nO3-cXdx@_mJ?z;6#S2Bi$v4Y2bv@`G&3%8Ay(AuWM2hj|XiP_8;EQj{&G{}Nsx@u4dIq_!DE;fB z3-%G%&NMl?&d09_VGSKtE~9vD$bCGKq5Yo7&l%TBM%D>@{OuCoBfshQ&0*p!COOZ8 ztf0)h!HBJkiZv4T5ZMl<&eV)nR-XEyN>I1^8nJ62KkrW$LLg>VXpZIPA(l53gz+Os5JlAb9LB@~Z;&z(`Fu>eTl7|-^N&r%1WhJU-Z=~!Pw13L+GG+E|SH?q&9StbdhkxG9UI%-2tVp)d z)!O6omwj1D^$gx{G|E;FHcME?FD{dd!hCC5mgrdXV{{SwDo?eg+SPNi-t)!&e4L~c z9VW@3_W@s7)^=@fZMo<|Noy5=SI@WP+N^hFXMc~^Ic;6$<~b|Vc@y=?n)|3|h53`Y zL4{IdXT2ZHy6e7Z#52_S`evN^IgF;f;w5<4oYV5lIrgFo{XHqe%wO@v4|ZAfv*nsT z`7pl2Z&P~=kRGguZ{ek(G8-h9BT|i(v6K-o5VL{t=~={7+LX+tC={Q?!c^Eyi+DG| zBBK15{mn4CnbA&><#g{z73D|pPrsj+w&<@sqZJYOHNWmGuRo6q!HL)qU5=NtcBzzf zpR~0fGu#P%#2q#onPPIbB^n*K-ksw%9KlW#-IfCPkM=-tvjNh+&^v_~Z0oDJ6AJVMO>)>k=p0vrO&2 zv<*9doS#&dMCpT}o4z_lTqiB?6#L~Kvidp%OOys7>{8+Vd@7v`lH$lsr*WX))h>lc zd&Xe3K*xzd`_otC#u|o423eH@`v53WzFN|=#_Rj+CZ6xKMO?Q+s)V zZ4{pzbXGb|-$#X!c~}RF5wz6O>WcYGKPe}CZXMwmc4~u#=PDdkB zAyRx^xu6xpMx_O4SN<&2!bQctM2I}T7-&>`B5EAnQSrEA@s<0+8q46p?M7iVp~;!R zOaQR|+)j?tVX*S&F{L2;{n{%W7gSnJ<>Dv(tyJAXVHVZ&9zHOOenu2n=4ne@Zn=42mb=|HEI6XSD=<@b6M z`?jFW=a$AG?(LxYRLC~tM~(NA>aGfXRDlISOCQv@K#Xp$b9QR!o63e1D0QR4ouy#U z*69`~*y2Ndy{1`l)UVn^o(QwI+?*5hw)8ygAnCg@^lF=uhTaq0Z zpp-x+kF(hUe0qH|1ZfhbJ)*`MT$pQ-$etp1)mfGEMa{1DKI|@;lRBvR8d(3S#mBcV zKji6Z^|w5Jm4+Fc)OYbR))ibA=@V$zL7&v1yizqdr-u^kUs#Y9*4``VNp_w$Us`lt z8Fu+WNy1)JX@JQ5a4S7OUAl8@3=J$!k@ikqY$tExb9}*0>Qd#>^;lrma#YN!ddZBi>-usU_86V%+gi0JLV08rIn}3chrfoE^yS5`MMA}i3{U+%D8U+BPk!P+EuTfNWb5Ixh3JTFb!LbD9PS~7{^Sbgh4uT!ELRjS(h z#TJzK(jzHMi#b|hoF}`J@Nz>RD_MKGE3pdQmi;d5`AH;W@a(Rm3&ZY4@<;6Jv?qPZ zNyhPV@mqze0PB<^#vS#VU#;X+T_3k(%#_EA%zkb3JS_w^o3AG0D>fU3hpFa$v`=Da zkO~j`xkpbi{H(B4RMRN~`7ASKF{XEiy!yJ;ekx-4z%e;~_Sz%or@Hl6F>D62pCAm= zm3(v@SD<_Gj>7Tu`)D%jjRek*7v@5FL94iN#{HOT%tRLzsn=a*ypZutU-q6&t>(f` zRxI#ot*m0nA(f72-y@H2fq;2qBO>39i7vwn**cl*6OAJ#RP<8;*K%C{nPb%?cyZLD~`TelUc zd*9)K^X<{`FR7y9=3)wP5zmyU=-Q&;kg}&{A#FHVOia47_h@L;R=H81@3W1Oli@%~(;&2x^2idcD z9lIL&z1u|qKL3-F zHarqHE$Gf;N*XxAuHL8eS;tE}2B9}(~*=(xk+6OrrS#1*1lc2pR8m`SL^TbLFWET8FKCcwT@SdNc~47 z%I$nU*{xWl)l1%P=35XOqn5Nojq3~33{(0KL0u)W=TSyJNmMGStT=XDV9$2m>MUo< z#=y+eawJ3-bbM5@}-Ng+*a5uVX`* z>j{JK^~2SjL|oUTu~^v_)XkVMvIxK7p$F(N;^bv;d;S`0P4B9ukc~FaT%)`sii8|7 zs=uzid{3FnEnA|}UzVrSkmqHprnqw1_3;Hdo3}DVZGiO@MM|2$!hB$9A0-yz^y_ps znCV^IIa18f26v>hsjgPKZRrdd3w~G6`_Ac%eqf8p)#G2fJ=|92Z#!&Tq_htpdfH+3 zsu-1&Ays@U^Gcf&n%O=%04)LuEkX9Atc}J!c2p-NRt$e?9!GmA1U!+AiA!Ov$9f~^ z$Raf3^QQQU{Hq;Y(eu4mCdN3bLpZ6{v86e$*xxY=)D_`qgqe!1Bv-+Vh8dmBOf{Vp zSC*bIKH`;=FOf|ZQRzr)5Nj{%Ys`%DlO=FJ**$7#3h0t7op^^ zyEBmImk)(+5rqr1}G5$%XRI}8X zv+kYGodi_+1Z9An&n09%LXG6X%CihBMOm6l!m2tYGOXBaSjKw> zR1NL?>Hc-3_7Iu3xmbYRA!en2#`;a%Y)O2>45!{>?Nj=68nzxlEXsmdAa742-P7}@ zK>|4RH^*mQFL8h9(|(||*pX@rzRd4iW5MiU$!KrOF_}&Crqnu1D)jz|n`OJ#Gg6Sh z8^RFv4W(24xTv!f`xT@h8uc~FvNtxtot5R2k4-~|WN$uru93zzEp!%3h0LZjsKQ=! zI8UE*7h)}1qoO1)p=*0_I_l^z4RAuxauni{<`>ptQ_Y8hk1x&jtyKfdH!$Umj|E=h z4LN`V?dZgxH3erDA(PBu&+0d%0s9O3`a0updX!gOENo&O5i{7>LK0O`lAa3xSn%3K z?st;axTLF;=H7J1e1$u;xU`lw{E~LGxb~HeOlWy*MMvE{7HM3qY|D^9COJwY#^doz zH*W*4O40f6VEtu#OGMuR6J!%Nf)LrY;B@9C{qIu+kB50oEstWYbAw9=uW_(?8RWmb z?AM-TkePBHBr_h&gRn8AB}T5&b|}7uN)q^HKjsN}W7QN1<02&J4ly)oWR(yY|KP3i zLcQe5e1j*ADCc|eBj>5q0oSJ>SXC;*)@*H;%R6$R6PvUF) zmYRdUsg{0>zcki5_jaGpirpDgYHP?@lg?OL1=i9YR;k9kx&Ppy&oLw0qo_r?vLbZ& z49TFs7}1HLX|m_?eH_~OBl=zhF)nSDT%`4o7og{L_+%>#Pk=7&IqzKRQtomw2cAdx zyD+E4Nmq(sI<}5&bpA5A(B($Q%PGk+%n?#en$IQ^#@C?s-~dFPcujYP|MV*7ZJX7Z zfJp_97D=5`rabxNsL0L`F;%KldX>~5n#JXjIrM(%&%F)KI7mt?k!${Im9TYS9mv46Gjm7S7 zhUX6ht`FA1WD8_r+ zr}EM$b=5j{8D&vh?}yv7Q`u9(ZUXos_!c0)z@UH40#&tN^>?%6A^b=M!27LFpLM!_ z@aJ@g48Za>p6NNs6*X13fBZ=N)uq4wCj<4?*yTQDsqfF6;rF5z*->kb=hCC(&Txlf5^ ztdt#ygU^F;dr9hlAUX3>5u_P}`J(nY^w^|+FxS;;CUKU>at)EKXZqs*a|ge5`LiKW zF@`~tq^%wC_g)2L9@?Rg0!*f3xj*P4oYcc;^|^> z;mVB%ED?b#xKfdPNVbtfkXMRnmLnAx9raUdCBwttl1JhnkL1l2keJQENd;y0)lSaM z{MKCqL&}I6OSahIs0k5r7>hOJQ%jv-9C8wDQ30`Cl5aoW6^U*4NzN5ww#FHfe*&K8 zTkW5AQz=e&dm3^S+1_`Aih4`478eOTZSx(-$iig0YQt826@v!w|YOGt*d%|JS_j#1X?G;~n4C~jH1VmnduBNjvGC=95tug0h^ zMvRUsK#G_qG0;jzBp>b%R9qXDu#u@6)-G#?I#r(Cq6m5stgrZ^64Ahwysc^)BJ|(m%Sa7}Yl1ZrQo2q@w zWH#re=@fj^Q5CZZ_7da3A5(pUyh$H$o-oXBvOZp-0C_-s5{HL|>XOYpn1b(Gzt;NE z%XSyr6QFF`vo1ZmIoAeS2Pv+6v6L#+eNx_Pt-`g5*)8}Oz_Bszq4|xS3#pR}w<%~_ zoqRquL`67^T3f3MQGjDID&~2!RKCS___Dt!VwNz+V_18uzby*Y&#zm*s_ptu>n&f$ zIl%gbG`5mpPgg~)hE2U=^ewoFlg;IEMk>!%*tt zGDW4^+=6|tSPp6X9XXHY(Aa>GtNK*Vi#YvK2?a4j5T0#q?S^HvB3n3ixw;Z^s)C5p zjF5#)u(kavz0)N`ypwy-ymi>1C8UgjI*x;z#q~0J&5`AkIw@i~jN_PAq|=$5FvZ-M zO#omX^X3Dt7yY-#0j^S_WxF9}DT{)LfYo7v1r#n?`{;6-nU_4lnMr7|+zb#+lu zSxIiywt!=yZbk;y&xN{85!kxQ1wN>h?)H^FIjBtUK9UYIZZPl(tVhUiau8pv*q%y-u3&x~#jPKR~6O8>re0V4y#QCDgWleE-2h>_eGr z|9<|S1qrFzF4iZkSH%2X5-ihq8K2oW7)!@?karG>FYZJSDHVYcDQxu{xXcVQI4v*N zh8%U?Bj`zE1A53HT8x} zxgr3*hUKDaQ1%QvDp5*k`o(j`DE)WGPf^)5=|rT)Spob$Gl)DRXtE=oAL3v)2{Y?u zV_w`ZBmq?iTa4c!vVBDIdM>-0PzzgL%sdkE$D>WcguhPSG3;r*tibn%jiEHuF({U% zPK&&Ct!){h^QG}c3nc1Yx+%54o$Z){c1Ql2)?nvW&J4vFJGI#8Sm34g-i#DFwEJ06 zjd9gjPb64Fg$+`$O{ql-{EqS_oh4nF2hF@Jp2`%49m*g`Cqg@TAUsM*<0F9e zCcAeBCxM4E@JF7<_t_gYU(Mpsq!(t*p61)dA`%EG2Z$JlwP-uI%}=e*V(>{>H??6SN~ z@i=_GCOzXtrMAR;I!qS7h((c0$oZ;x?9h<3{F{#^qTzI>>(x_2;Tvy)A-z52gAUr^ zlBay%sdoz*NS!bI-kY!6yf;gmyu}XqXiK&zv=M|H-I->2xY$L+k`kYR4BFI@s2BJZ z6vxYNXH&t{BpRtXO`L*7fMQ<_J@2PXGc$U@epmGbiL4k*Z{MbfQSAyt z5i&CpO`Q^cEQ9Ij;uLD4JDQCTKz^i${UU=Gpox5%`=R6}%jp5!00p&|{FJ!ni+g8x zxhC-vq^%TR5{lJ>sT3$>1+mgXnb>@P23NvrhJ-BI>VRccEepc=+#-SwL3Zyqce&aCqynT>esctAx5Va+P1`h z>E)NLGrMs5{IssCV68b<11H#F{a~vf{eMC+1p5;IjJ0ORgx_-Wp8AaQ`}*8ee6#C&x{QJ zf7&rsN5#w*zQOH3w!#HCm|9yt{#d@2?J(4+CZCM>hUY-?hlkQ(%5RM z*kHxO*7LSW2u-VJQt+ZBtS#Nb6LilX)}6wXN?Xkh`hMyyLLoN&GS$zTE7foLQu)Y> zF+BJGo}Wo(hx^~#p65!@KOD4$kP-}h_VD{+oIG3Ap}vS_i9z`orgUx_#h>-w4(JKYW1J2>6uT>vICw3X`me=XT&J zt|a}zY_3dysn9r%&95^KW5+9B+co3_hxh?{Y@o+ms1dR81`2iySGxFhNpG#8>+aZx z`J%+iCO8m~SU{fDbwDOEVJy%VnTmo3rt|@5Ygxh$2aU-B`UTczY~6`W3(|vc1)*^j z8V4wioPzfUYcP$6Mjq!fKEpB5nkLIoa|jl@BYl-VZ!K*CdHYUVt1pVM8$UqrLj)=ygp zb2X_V=IxZ!gV32sNqX`>-r`PokEas=*`dg^$c{bOVA` zOLu`wabl!J%ZRr3rU z4TRR*BA0?uIJHLlI?-rySr>nCeB};&|7zR{Mi657tnvctPOZlx+!ae0w~*-?G%k1-A0A3y%iyV3P! zkT~K8u0v+MaPI^Bs*-)++ys<9L^^eK6AU3MXsi+B0IrPYARvLtj%g>_PX3d@hAwkg zG<=)>cwuUdrlSi**0{YBYNa;@id9`Il}?|U$}dBwRc1MBzi`_C0$;f6nq&Ltmj3bp z!CmSdZyewj6ub{cMj<;H07EZ-qyuDvYK^BDyh>#kU2ZS->ubf;JRU1By|8uibCWht zfWF_8u%6|?HmLe^*V{(6)4WM@_0m?5R%{2T2c?iToYs-fezV)M_~3sgN*3x-_yS#u zIjfmVwNM;T=S_W<-#3oIyn9C~g#=$*LRFjsX#R<{ck(?lAvf$!Lw&YB)F=fa;1MRmSxa|V%QmQ#4)QOjC#A|VG;;`hpG zvj5an;_W_;4tL&1Y=0o-Q~H1uf!}YW9(>&mL(>XhjXtq8?+bgX29rY=WGsL^rx!WF zpAGhiciZAUM$4X1@>zXti~k%`=4a;Xwl6iWn<}cD%zhS4SPD;C4i`r_#!i|~0gGn- z@PK~^zYib=0TO$QPMUh-28I>7tK`eK3M*kygC1W7(`MQ#uq~6|rXLpY;#BX!^w}Kj?fKA*#GXSDX@W$r8W7)|a% zqd!6h^0MQ@$O8{og*9DPK>Da3OUY`i!3ulNm>`ZuxV%c8xo(BBSgHPz(??bP;CFJhFQ{i&OdAV}6{E#_hXtnfr-Cj}tdwwudn*w3&x z;UK7-=KDhx%ukCxk8m9Y3TdfHLh_f=;RYrB5GWkbtD2ivdKg@j%r=0`=_7sMxRJ4T ztFQDEUz}d#aOM|sv%)==uaMM>NJdy`2pm^-SwWJM^xeErU;wOE!ntb!{ju~X7A)54 zFwmXGl?gODskg0opSy5*8=a&ZS5uh_z41kP+^wH^S#z=5%W70rVy8Is!rg-;Rr)A+ zG?t=|^7DDXv%yqX5Hx_nSC}zJPlgD>2-u!QPl{sbwNQt3itWQ*0h!W)Qq)0kI};V< zhL%m+e;V3bZIR5`1Z1A0&jg;ez39yQWQN^6h`gCp z^|ru`9x6dg(#vb54jBe)5Rj`^p|Lm=*i)kg;JU;9Xp%(sp-H=rlLwpTrUUK#Wq9z; zVK$`DtRtZpu5`O81~+$BT{4~liCrg#Dn5l=VNT%RP${2T^jAd9w7r9ZG$Gan&!C;K z6@Gb-AAGctaz3zP1lZ87Q^6HF0P=NH3ATsD)mA?9VD8#;H@W2SL0S>T8X7;ZrPTk2 zmfG-<^xdP941neJ*&UAPR!8}j85aa?MqWc6)Nw&&*#t9uMeZm!EwG zH-llRo8mV)?U_|Btl;|J7$n~|^l-`VaEgG4TT0Us0jECAwdL?U2LVY(1P(l-rs5^H zBq~{RCwVaZ6wBMDP@f95<=~fls+m4LRqx0*d`sSVmz^a~xZ2R`#?>k5brP{bvlSFh zvZUd0aC`9Ip6%}U``Pc}t&V^@YA^Vo>njM#EIT^S*2%;HYP)|{Y=M06lv_2JA?Z19 z3?yQl_B6MP>Z%~O-SaEqSHyC2B>5VhsT!>Av+Vw#EmU-;TpogGUioJQI*IFGCC0&=>hWA!_-ZawAXc-=Sh*Br}9HzSA_W6V@_m6xdcNPA? z1gAnGb?Wgi#`#jqBRO)HH=Qd1hG0>hM6m+#WSf5Q-Q`Eo0S(~irOU~xcaIdTi);;? z?@2Ap3&jQW89+W7DlVtV7ut}D@@tP5YiTo{@VG8Z&+Dqs%MaQ)eiiX>kPJdXhF3p0 zAQyhTizhT7;m1w!g6lZK>XZ~T`wiGjbz7^5c#Eh7d}kSHCT3O}$`Ar(2>ex3UKvU- z7RZnmTMux{sjU|Wcbd1~a3%5T=GKj*(kPL_7u= zlRa2mlT+cUr;X@$yB6xCn3nEW^qBeANj0<24B^?aY0oFJCZ~k~ruE9AcXil#^F~-X z{dGUM>$k9f+!j^Qp-9E+<@R?fjNbDw|1VVe4Vf+*C~av}S_G~b98I?WCYtsCO*EI( z|9fcqD*m_7)bn`LvMvp*a&{BN{Bv&X7@Y8B`rxBK*p502A938HP`H)EJ;X8l{*wUuRcttR@F0eO zFdM&p3+$$W+lJgjnpZ*he`mueAz3=aW}EqBRKP>;uUG#<0;R@n=RaKO1CJs9OFM*A zyru^`{R0@d#Yz(X3sWQu^&cPpg$mq@3}gQS6g?J@gZ3{-;Ev<=>Mu0;BPmDw^YoLfa`W1X+HE@PvDez%uX$jW!g#Y^u`D% zv*CJHna}qL+f+&eWpk^4b5+bXI?|BLZ?TL-;0b73js1AJ6rkc&Xv$lkl07+Dk+S2G z*e)FDsJSb5vwjP8Y)%V?vYD2$;iXrKy!7mFGEDbRFnH7IXr;SSBP3JWV$ccHC?R&A zaLj_Ra(JE27Qol2j618~|coB~)gs*cy~%3pAhvAy)lHQp@Cu zTvG-VJt{r3hOffYdQ^cs7!vB<_)Ai`R?8+aG#d(0739(q@~7u1J3#!&@ zUNmR^M?_d&`o|PW*TpcCA>c3Cz;qOzVDckw_fl+M=0FlS-qMPuXx$c&!y?S>o@1DD zl#qpXjZ>lY@^Udz!b9YPMIq-OswMUr{gkdVX1?lw|5SQZ>x4V8*Z)H{H9gwFXv9Ii zm-H<$cn2}l=zf^X&0=FZX+gah>(Hq$)3#?CN5*R0PK@qKay1OPY~4L^JKxB|SG2{t zFTNs=sevl2L*!16?#*^23EhBt$))sPe?s0$rYFljD&Km+U3fy4a9}Md2ENm0*llC$ zX%I|hZ)#0dfZy@0XL$kdIX}ifdX9SDTWiNYls)xFmF-^m7YYwXD+rzqWKk~=3yr84 zA9KLt9VC<8rrvlijQ4ZxPB9DgvM&pn)FTT*`(4H(P1=}HW7|xi%9?D4i-LxvL`(_- z6|kc9y?DRJN9liB!Qd{N36L^UY>7Q#U}E$9s3wG;h^fit1BUIQ;B2Chp97 zSIjfQlEy}6z-ReZo!VF0Q{LQS6dc=TAm8P-kj?}zf=5Hy&ufrN;DPaPc+ZZ^NF!eh zjq!uK6$i=lz9fuDUMa2yT4Kg0oQ;9GpO2J=)N1)dWQ4MDhQVe4&9V_ag))w!LBB}* ziUHe5H+uD|N78%;fM15QYVB}#pwENdNPewcsB`jHwEcXIg?i$1ot-P#mjMwbStrRCkmPOKdL)`!9n(!3H=W5JK0t5A@j zkj=jMu8W+7+>kzz56}|3*O4(`ZOo?}!p4@$vlmvTCssi9$@xW#U@)D3xs+=S6op7c zN#1eF(}`e_w{~^I%Zk$Ww`>14WC$jWkE)s*AGHupRf2&INXZ(yly)Bm{RYWXqmm-; z%$Zu5maSQu+BC+)JO1z6V?3xQK9(UDfsX^B(^CK~&N7r?FPZ*DZ zT11*o%qKXIlmskN3~q^yO9v4^W2$?liA3^=xN!Z?t;Q|YD>bE?nBQGU{t>2rmb?o4>*=@d~x|II6}R~ zf}vsz&Ujy-kKfPW41*T$q{J*`9hLR^$~snB3h^$3ewCjf>`C9ASt+FQI6o_b>UW(+ zn>hUwxIzXRHa0@NN!MUp8wvsYrnvoG6!u574uQ^u%5#{{-4LUk9Hfht{U1703PsGK zU>tfM<0h8RFJM6oiwWuVJsv=bjqPJ{2ucFu%FrhU0O680lUu;$lso!htnKrqaZi$o zteM1P_{k_k(-e!LnvVlqLX6=jTD8y)|HAAiqp^X{NViJ|mwv5jX*Kpne5+y zGE)n!DoRr&{#vvMg1^gp1RVhhoQxbzU*|AB_Q3MYzu4C_eMp|J~RuWE&w`P4? zGJ{Ia7H~IdrV-9w0kh8U#ZGWXEoAVhH;$L?6<@IY^pHQ?#7(JY_vL2uetmt~j4=Ou z?A3X{?W^IAZRoEtr+730eVUxfy6zDxT;F!fkQql z>@_@CCsz(6?%4##?TI>T>fuxwP)$6}DxO&aZrp))?AkOm2>lUP{~V5=0hi&+(S*bm zAz(u;HX6bJX#WTEWc6iW;SAe#(;Y^94Q9TWh=qh*qO=eNmrW!(hVVk(?OdaR1w6!= zYHi=}E?sbi!%a}A3u7iJUfKz!$*d%*x0DGFCqK8?mAMPuiqXJdS4!#DXCgbstEmn( zK4<#u55t2CEHxo~y`Grz;CI?l(s3eML=b0G#~Chld;0S-Ij<641toH zP^zZP>g}@_TmI37`C&pJS)qrJ>qhTxQk?(zeIg10TYd1< z@%FTr;#k8VSj}FUxc_eK`!W;qma0whjS42r42e^$GfY<#_%8L|Nknvu1fHN|1$IIpSfV zTi%qwFL}7yQNadZ=u*i#>phP3zY8@^cuv`d& z%g4XNIt^VDK+mzAd;t^*F~;){+nI6KX!7_3^~W7@C@qoSg42SPooeTUZX zO%odyfwM=}t+6qQXO*X+T!kljRK8&vFb@>Wf_Q*tl4vfF`9Xr9JGNIoJ8$~fS-hgQ zYx&{X_djwTURi~6bq;)<6!l!17*k;pXy@nHvhv>D4dMyC!ar3F=*;x44E}hs-$S#y zVc|4QG&s*}#VBf^b$$2(+Y+i%_iM$}hpTX)N-hCl8Ayk^^z){e8f?0{A?e}LDIWh) z`B?(vKlIFm=Cr(z&h0g_(;0_|Y|ForpD`S~bP1{DJG+bpnA-J?8 zF%i5<8?1@xl1%{AlD;q3c;6LS8)(!Ahc}V&9IoB*RUpz+B4vEs$CxE{m%(h|t-7pR zpx)yNbQ70<#8G9pCh2n76CRB#{HoSn^`w5V!Z4bKd_gj>$e&mr?(Ub;E)BJ^P^o1Z zJ}nOe(F{WLy}QK6d6AF-bt?`BV6miX&St$XGJdc3U3ewH+H*70NzY zA_|gqG5sFhnpr>Yb}*qKbUG!l1+4ogd0?Z zDS4^bEqkQ*NG))zCS#Xy{HE4gJ#nG~e##gwRdeqL%!5$t7agO!#W_kOV2!?}~=w*F_lCZsy=a+ODL5>XOUCe%qrjLM;kEt8+h2 znPy%)?(UA_>{+&+dX~bL@mW6eW}TMe8qF}6s-2;)t=$4D?U1UWHy#BE5iM%b_*gGG zl?5;tah@@L-}-9yx~DH7Um*^#(O33GU3~dF8-As#IK8@vz6%+1R0v{$a?(eoYVe^IQ8LCk)9u zzwLx?iV0DnNYzlV`GrQ#sZDH}EV?6=+?opQTzaphv8{urOds4D_q%i?luPAl3VLx% z=i6b5=jnvG{ry|~v4ycAgvHU(&OB9R(vy}JgrY*QI9?jJiG9ldr@m=emes&n58IX< zWs}tkGEIFHv}OgY7{H!%`7%U0bs z>8NMz{RpzuCt35GjAMI(D0G(Umbh^ z?_eiJxxq$A?hF{h&n{HIvo)5ej$y!=-x51B1>NGWtgRf9gWPYcJh4!Rz_V0?qIF)F zqc_-GN>ix+wKeAF%;P%v!Gq4h)A3O)BXJPwf`cV`?IH^_FnI63a znAqup$mEFiXNBBjPJfEqsxXce=Ye|bSru_2NpRN~?3N=F*q=sd6Iniu^05XMsI4@i zZ|Opnc~(JqAD3T2a^oda8HHSqi=EIg5C!?s%#T|rB2X3%lRa`r=;rtxm5)~W;k5FH zSjD4{k>!A33n(K-12RH4)1TkM3++Sz2WGMBUfGGn@_+uWKMNb?=~3Td8GUm@xS5DD z8{3b7z&GtbzlxR3wv6@lB#YspXnJ6QSRxMY~e+suFAbj}8 zGvO@y|3pISe8sQ*Z>r=OlE7@hGqLjvcpfCa%`g-!*nz%%z<#lp3sCW3{uF*;* z%v1H70}_HGPkGB}{I>D=^gm>A*(&6MWV8E!qrqpMEYbb7M9h1lo~KtUw>#}{u~$7+ zq|sO)8SE+KKG4_atm>kPjEp=3omk!(%UNCYoGCFf{5A}KG9#v##{5aS?=H>1a^=Z} zUmChj^!_!Hm+^yO&qS7b|9QTnZ9ZXhHSA=s89w`2XKPp$oQe$9Yh|F|zTI7d6PXWm z-~;Q$dmy5lfAX3x10a(t=)8iE%0sT$V(05|$gZFN9M1aY%F<84>B_%2kF^n8$e@U3 zlUo>hvwoR9j3VITJ&-ttZmBj?*c1L6?_8kn{*N{A~B-pj_MVY}tGF>_BtY*TB- z_T}dcYi{so2Fj4NUy}s~&rooTwEKu%rehTN)(r{yPi}ay(9;#<%kQ`~8I&>ReLgQ7 z7vm4l7ci7DyT#MvsD~%Jc3R~|jb<_v*PjqfQf#N}Cd+iqzv6mYw`kBe>uyITD;WD0 zMqUUP>uwJvE2#M8N@k`l+4TK8I8W07c2gxs0Ta!}MS+Ln6eX8^H27uYt)pu;&bFg# z9OA~sww68RHnqSVHv+u%=II)i9e=yK8<*(S-I!+4G6&;~H*OQ#<>GkQ>|Pm@6fnd@ ze;(5xp}K&wRq81b&DiAh%uIc(%TQZWm!m=lMXT~f?e#$9g7c|}a%%-^)5?qG+^@N@ z{>l7mLTIgljS-SoTP?0{b}nEH)nrwLuOCmhocwC`I8YpR*+~iN(uykw2R=zY12WTJ zY?;Tn?~UbH)!G_hJ(XA}G^(|o8b}OgPGq(b=K?F7rsa_=WX_kk%|&<`*j&^n5IU;Z z=<#hJA1<%PwOB#E@iyoKrT&BAg4)bIcaxGw!HGrFd#47FmhyWK7n9T+Hj>j)?hT9V zoK%uDZ+7uC_z{0G%YhZ@GM6XVn|@Hq`d>RaAH>eL{FY|p{QJK%<8_v<+s5~ zu0NU_W@>X6f0QR9-%pOVg)P_nllJ%Ri+9qD`3qvDPBT=Bi>uh$E&<17R6()_iXM1` zVa^5_Z(R=ia@Oh&7cUGCjaq%qcgFMN4vj>8PboZkARTKh0*(^P+q)`w%@McN%*Ufb zHeJp8z6VX4CwNbc_!x%>@U1gpC&M!|F$QeU0}KxA^%sj88ocJ~9OR-&kJ_iha2Y)I zW~rhebKHeL`dTMzY*wdBYi+03C5#bVz*eU_mWK^%mm%o$ZFfGqTBS`M+s8w%Rl+#R zD^yAhTCE136w~vw`NDVYgf4keTJOA@)MhsFJ{v8(Zxgn*y)SkW^Q-N5&`wLTIWe5;esw}a(Q+QS|I(qk=S(P^?{6@cFWKAFkK98p*Lm8X z5!~ZVDCa+US860RjSrMbYJHK z4cjhF%8I|NhbW<;p^at=^P6)i}GLoN%;;j7R*tnP{mnT7`oTAZ6NX2xTdZ%vT6Y~T}Q50haJ zBr&mPOX^89SJk%ztumw5ZU1vf>(6%uzQJEpH!xKEEH~3>dQih{*S47R( zH|M|3wTo!EHHgJPP2tDY?D$H>hd-RHJ1hN16vKiI?F3Qk+}5qUKPe%N_wNff`*;oV zi-~SW$vM_@5E#wF7Yb}Sm3xV>v6+TK1OQh8MxO+le_eyb7y z4OMC8n5_%+%O1&_@n&ea>H6w9=YHX_ck&de`Tly!=$k=SCdjAWXKNyL;+y~2)y7$C zTPCl=UbBNS1<1M6B|zqz$6}0?cU|PpT-7f^?)(_v`zXGyBr=zXFGgl`)07?qQM_-`3jjWQqHEYQB&%EOzdXeombiv(|Wv1lLuU3dDM)bhO)p*CZ*4S65 zQgbR!&@Biz?!hoxkk`^F+rF$YGj#N@wMWjJ;z!dSxemVsgw)WI#Z@*$=SQ@bI zI8Ine)m_@!<3Dj)F%jcG;kbHd^X9i~MyNj;OfI%DFA`NXp64qf9CT&zmxK(%hAE|^ zsHWLNTll8;q`fb%@6n?1HC!E(IBW%{3>sTGPu63(eEC>j7}TQc{PxVNA2)qFcG`P= zBr+wX-eJD2DTGxd4n%v?99GUrKK@H_wg-*EI8ey-j0PjB?RZ9lIh2SKWjXq^{Xh_T zvQtJkUXg@_;7G)FF~?RU5R1g^8oW`o;;ZZ1uNWB4Wpj!nOjPDP?Eh9KO$<_RoV`-q z1kO?=FY-aEy^`2?sqdn2YZ~SdilEh8zBBO z5FD%b^NGNHiWRv??X^WzRKXStkK5>NUMaMBC12N*-k{F2yGl~QuqBpTK&5Rrjgt?w zuy|);HID>7#x2!fvW){#skMytXc62jv*-uTm-kc~%GkA-+dlCSO#I)o?ZdGNv9GNvcpn_)8(i1$M^cQg>=jcA5^ ztIZ?f*ulTr7rqypXT#x1WiBL?O_7^H_0#g0`4t-+DL89SKjnbYMu}uMRLTgo%w4cI z!R#(?yAHzgKipdHi2~R4w#>>#PObV6R}Zgig4`PCW3*=LvU}nQHD&ny)^}cJ5GU;C zr@NW2Vi|><`j@etV7aWJaR0Ti6roCjnPR>=N|(=OF>`&i`fz{qRYOfJ4(dMB8g|tR z23(42=FO#)SZ42s_9hS~s}{ectf9KiJ$yb?)@~mh(#LKqHFXKuG7@ke`y+eDTTYID&iq9UlTyY!0kII&epHMY=XISjE8kl@SlqueG zyfI>uqw1QFy;qiuA7{I$rP2l^1=q16S2C5iSBHr;=MxbYR&<+bEO#6x8&3cezls55 zAJ0WZa7yC-a2tzIeGjrVGmL|Zwj2WU3bbAic|+&8K0Hvzcu*$$Spzy zs9tyb%~&oFR(TdUU=pJ`eKvV=iUwxZk9LGD?0 z^&fWb_arN08%((^3`0#IvRt7B`-O05t}-Eql%LghnytOQZ#yL`*PzH!e>&0Bk*OE4I z2FIsbW+YWNx6SEYiA>S)6+uzudbh498>oBVo-VtLgh1spT8TiZG`EPLXnmDOYn2?m zTvHP-#N$Hmnb;LfixBla!w2~*wd?EKVeFj=a@&?xx25V5PyNCk^uUzbr9gM(09>F~ zo`B^ph}zSO4`p-J{hrul)_Z8ac?)NXK%xyY7e7TeNhay${-<=xMs-j@?VFK%NVjrphtd&26eV3viN&7O9{sm3;6pM34fMD>R z^F-bFf$>w29?7p~JuFheQ>o-Nmd%RmOE|GE9@n$8O3HE@>S;8_#fa@qY=5)D1H5(x zcRZ8gS<$t4gPj?N;oRMks}aB7IyKdNzX1}`N94)z&vvWMe~K$r_eh9lrFA+PE!;)8 z^C;?7J9rT!pXmJgn4V4?QP^{#^-3oA?V zXtn$F+gESY-tfw3U1ZBpLg1j359*nab4R}1m4bcB*j>*XT1<`31erYQ?Vq<*G?^}i z8|A{Sq&BN)7_*;}|J!`9TR`d0;7R;|-#iDs{fo^QXP&VQ`t{~O3o=3e8W2Oj0036|QCNI^ zM7b=S&#-#k#q*^E-eSH@#HvEezb3hhgS^@yE3_F91abaKCe3hNL;UpXNU$~WHok;c zNyikQ0%ezR@LcLiy8vN`+Vpxz3)ti{Dtr(Lz%uu*A{>CGv_D(XB)x|LWB)SVfL!!0 z!;8l++ps;RsL};{P(J(+Hk0gEZqpvfyk z7831=mHuVw&i012mlmz2Qu!aeTHgOylqhi@wC=NvJ4|t%*eBfy+v&@G)DW8}1uZbl zd)uv)HS2Wbmv;&xrnLWHX+YECA7r_Exg5-X7)_0hZ&V7@|rB?J>@M#jW4U+Hy}q@h;^sTc8LFrvjHuS%Ns2B#uCA3KEznw zZM3ME7~$KS(-?u^^ckd1ieh|B%+vM<8hnETmx0O};~%|(>?n+8Y~E}q65E257V1T5 z>geo6!~g`D)RNIiE(LF4DXGDL;wY`Z&4JI8z7eJSnMuSEEMqHjPoOusy`@FgOK_;} zhT8q8uhFTgaPt1OT zwI4EQZ$ZBy*x1ORpgGm|3!T_a45r>6@9E`4SQX zM3KOZs_JS+>T*T08HJ$rBdND-Dah>;$!bm9r>T*Jt+sfSQI4n z_eU?==liN`S+_fYe_PRew)dIU-VCf`QM#$h1v^CyZ?tkt#)Cw^1mER`4WbZ^xjS6dxdWdmq= z{eeTWyw~xw?93M)zc1L)v=u`x+}>n)pXj~Wk>N?;!M7UsZKub*ikxrhY=y8M1L@6m z)cJ{+IS<|&IBPy$>FV0^QKj^0r=FhP6aHr{K%yw_8m~qMFbS;DHO=+~wIX|O{3sL)FUeG0!I_kO z!u!jr(yln+!*Y)MBzUZ{&&J1-(NsBggY^C@HCI#v8CWQ%dXeX@-tph*Ri`__zznZ+ z92VVMXmV|R+V}G1uf+k>?NnO+gW_BH&a_uonjp&q;KmLFzi#eo40LDdhSSJrGTFaZ zgqc=HqaMOZ?N-4jqOEg0e6pIk)Sbd+gf2}d4qGkBX_9Yz8g5U1%Xeo(%NDp-1%#BH zaHlP>I`Se5!e9CuN78OU0(6GTLxqidnAN&!Lqj{>FTS%Z$-8bxJr>K>odgpe1Uk$P zHnnGM3p-<-)t<4;TUC1s#2&F-J}y+;a89Mf3LRkG92%MY<#H?->q2}@&oowMe>H{I zd$++~${#=~(13E$_QqY6A~_ckv9Ym9Ny7C_^4_c0Q%2Bm8W_Q6QSUFOpkQ`rmosu$ zU|O!RdK^UZ4N^aU{yfs^CNNMI_fNZX-w0{_*!H5UWGEu|($%eZOPc8qLh2^iE zQ|BeXnwnh#6%BO*Ld=bp@W-c+jT zX6xx)Tp}O!!svQ0E8P`vc@`UJcf&)d=&f=RbobkV@9bM`^ACIaUp%eG&?7}`*MHc`BKsK)Ekc`kU_KjU8eC#vz>&d@O&oV+85$rixL=g zmTIX~<}fhFVglme+eLDlN<8ih{v2N>yS;(zMd+wWe9*e@W^{$FD-}dT5$d7aoibMb zn`*@o@_svZua2L+ox~~cnGO1SjL#d;^UZyA9@sVS$MNy;>mLzPef|AS5R3w$c6P|W z{;kgtkm!s9HmAT$=A2p+6R-vPt34JOxa2+YeQN#~doufZD;eWLb2*=lskGEspKH9l zY6q0NK-a{-?c`w=bk0CYfcxdU0LmeeN5&;-$C{(}BeLZ~lgd1y_!=MIfpt!Ed=++G zJ5f4FG%W5F&18nLm&b0rqTnQwiYb?AeG15>iB{n>sz*lbtZ~Rbn;I#$1|*{d-Aj^k zG@60~mZ;8gxKLM_goF@qr@Ei{_t#KYCY|@FUcQ7_LZwSz6*v!qo`6)}RGg{x+aG5f zm-el9TVYhs@(YG^agWS2v2jTG+APh$uHXu&`J+rcT5Jey|4NlNxe+wf=;_X}y$Szj z4jiX_U_E}TCT=j#6<#4BFgYtQCI{(p6hn`cjXEMANI%h{BIC`ynzL4Z^I_c#7TRVY z_(2R)d`h~bS&WFOxa-!&M=ihkr3E<;R&6ej{B-u+&uVyMQLA=0X100Zi%NF5y=xH) zwxj#NwFMT}g@s3?Av?&yz{c&aRqSV<>6g8#^^Oy3Ksoq|v6ltrUPq=^C#pRF zD+zd$Z35;_=!MJP=C)l)q^Vbo6Om8!Hl=O=WV#m* z^@U64m!<)=n%kOh?|Z2Evsw{~QM9ap)k4}gl^G~4-WIsa+^=kj{tTD=0DLEEe|Fpz zy_?#SGY>WQ$A+w9zC$v{qW90UC5QQLFW93+lI;q7u*Xib9+g3T{CMdZ*BKFsVn@Kx zCM6{q6__T=y4|W7{_cJN#Y-S zp5p(J`dVpG%nA&tE@52PQuLN1Vuq3?<^5*T4XpUQkle{2TYHTc3T|ItA0^1*BMhV5 z+{Z>Xp=!(R@=7jCL`-R|eio0VveV2~$Kwddlg#VZt@}5E7U3O8HTyum&=kMIFSA-o zzk83ggq`TlM4H#fENUox*q}s3KdufhN|bi_>P?6;W(eh$3Z3Yh4+peum?c#)7f1+1hIa=lRX`~@U z3Eh%q1=}?w;Oa_CN*Wu_0jA85_hmtxDiH7j3du_~Uj02JB%~=H2P0YlX^x`WlVu=g z3FJfxai3e88?mVSum6zhd8V90i+yqqlqhL`rKGhjT8K@|vbNE9CPFQQHCYL2WF50Z zs}O#S&-n<%Cle`3&=^q*zv*ws!YD_2$AF+m*sP+1vR(LY=^TiVU%wl! zU_Tm#xY(V_Q_{`?`y6W(Ag82cVN)rb2nIG!dJ1?vVq~q=ob)UjUQj&+U0p7L;|@D! zl2_P=eVx8;uBcD4AjLTM?uQs#!s_Pr=Y@vNV&}k|J>(!!oa#$;v>|#|2Dc4HRPMm5 zRALcXd&*D(WxM+zT&cchcN4lIs+6QiORP+>-*@j;>F;_{rMMWQxcf{MUyMq3*L6Lx z1U6wplZj&{Lu!@hc#gihZ5YXAXpAyikGQ)n#*?Dt_U)}|D{d*?+rQ)U4Soq&{LWJD zquE%S{fI8FOc726`S)O5=LCsIy`nc@eM53rZJp0>o=^7n!CtrGQ;PiEv9Hf!CNe!r zm8n?^wRBy+(iR)l+8>iY?#A1WZ&enz@0io4#@p~vsEJ3cv$|7dAnOA`*0%X_t6j44 zZmVVexcfG`_ZLX>9+_IHC?Db`vd81GfS``xZ=hE^_WhZikV)90H*NJUB=8m=5Ckr7 zOgAXiH@M#kV;=+piI=|;_;0T!9B1TS4Vr`8rJdGJQ5>D*4kX;XRNT!hGLQnM+l~~$ zH~yc1Lib?tGvK$1>?;X^On~WNg$2EjaK3Fkvujl?b8iL@xII7@t=-*A_Jp{CY=eYX zL8rxK1A=ME61F-(AS>r>?1#k`m{kJ7#kFez-UOTl61t0ZZvjA%1i!D*~E~vt(5vcZdW6jL>!El#q{zph-!#`Sgx+q1{L30=lPK! z#5m;Vj1=;v%$8H7XdmFsE`-Z7!$4QFRstWj==YT`;bD1zi?8%+TKYB=UdJ{F6j2yYro3DOaj$bGfln8SaDv?X1qhP%hcRclQB%vi zr5gH-e%<^miMz)l4Q=y@%1Hs9-`;*OlA0{r2(p07g0cB|Z|^n9zw5tsKmxF?BkFhn5}pjH~e zDR*8xUR4wk?0Pnm6$s@c%kJtIZgg)i zb{AQpsen&9*Lkn85bzenNjSXq125GgaGdQX1CKJfnZxxRt;TiAEGl>6^wa%~X&`<* z?`TFA@zVyPdZ+%(Fkr7{++K8Ud4Jj0*LQSSlPr>wmjqVQJd`fjcyP#c@r@aux9^-) z7;bF~=MkZ_^>rz1Mte+ovmub%j~oU;?JP+w1io4sQzUH&xxxo3ZQFAPQy;)bJzO3| zKm|diRQH2yZ$>GKXL8)&s241E$d7U>(sr>df*ZPnht$+g?pt{&oHa|fdS=s+MSzjj zXbfV5t`lAfDB0?6G!JYl-iRka@!@bhk{6*~Apd^BRjaFYSM^S}Hfi*d^=59))gjJv zH$L07uU@7c*Tq%&_9qzTd2?zDZdHkqKl^pWwT7^Db4YkEzt9{;q)B*ny-Q2%^7F9k z8twelEOK`{&=50NUfLXAXJBH0 zoYXKrTx5#)>_?5F_c$+pdRw}KQ^{{DX?~^h+9Go@wtDCA1|kNid%KekB;wUlcIYM< zIp7nqA0))MO;GM?-`+|wVZtZf&0Q|Ur3+g<4iv7J1=?^v1A#t`ya)oqSy=$u{rso_ z$#(tO^@-F04~qQss}qc@X}YibBd@C0lNJ)`>FHBWo&YO&uL21tD{a!n%GWNnu< zjjh`yyrw=29T$slCk9x~oZHOca|Xpr%Ktm8Dd}CWb-)%ze7P9`#A0Q3K;`cKM4ej2 z?e00i(cHf_0VL@_66cNY%K*|QA*1{UfD?On?kXVvA@gW4&FO~k{k@deQtJApi15`h$e02$F*QKm?J&*U zDqRr`?_a$rF}x-}y>^@R>6-QfaqEK}HAipr?Iu;*gLuQ!PdiN#-X}$;x^J3m3`JZ` zQuD>LeJGTLd%u3TIXHLP*uxxW?S<&#RQ7yEqf!n7_NN=0BqyI|9Xj;kft|o_b-E_| zUh#I@o6{FbZ)S?HA^aU0vebTZ@Rn$jVhNn~)k9!(#;RX5Sqa{xkhI)V>%Q|ouCsUB zSRA{4i=aqcQ}A33rPcPNlXsW`895cbrN@UmLgz{Y8CF4_tDcz+Yi{%Awa)~)hnbCC z4qC6C*joenP&Mw1D0JM5lG0p7KzEmSznvB*4bSC=QxsqlI2Z-tu;H}XGa*>$$LE!^4NZ3Pr?fgB4BD40p<(;%M} z5~;UV=3$Pl-hC8=u-^MAaEJMR^p=&69Dnu`S0Rt925v)}L09h(bcIfV1k~;i3rR^C z-bQ~3m|Y&s15~u{l_X~yK(oT*q1kaRQpC8MdVpeR+^dsKK<-6bSln%5v&uKC6zwzn zU@!I)iDf(}$IesL)j^^V?J(l0L~zc7wzU?pcAfXDZF3(-p`#fIY z)2QOPJ)rlK+Z;sC7QSl+3#_okN2O=WiWDCL@0l%b`4JKcS@PEt6xHxt78e(P>8h%G zpP7X%IyN?oa%a%Z;HNh#6;4x;28uB)$XHPecK~l|0temnTOBDL7%<3ME2O{qquEXt zLx}4*RBjR1Ma`?8C8b{%)LEWuMcCWh@0~_6h})O_Ae7kp(mmifDvp?bh|27(;PmW$ z0kh+JC*mbd|GRlPQJMhcb)VPtC(?qn4yID6)XhY$Zi}jCJ0mwn1|vUVI+R5t(yuQc zFZh(jm`L^FiTLjq18Uhi0+s4!k%f;(42@FsNtyn@n+@n=`t=_k48s)vGqYWM6@k0- z9qhaRl;tODvb(;KtgFFSt{@JmK$M9Z-{9k=pp45;UB=G+r@!|*$cI`-97G_8JL3|_xW|0-@ zd>Pk+#q<7YX#?iO!z?I$Px?51{pZF10ir|yL%RXc9{AVfba9?>jp8B#ck=Ib{r4Fv zV0B%cTRf*A5)1&b9fJP={y?v?HHL(gIr-0W|LTBL6#u@lrqLjPLh1iWVVy$TB)s3M z_5ZT~B_KPpa`p?m3OS>D=69IDj>`X9CX67P^j>Ahk4Q8-4xhyI+TcFs%EicIQ#xi(ATk-X6XdSg zFSH=>4WgY~h(vd_qlTriqDKGcePNj*^u5m((tiV{DMY%A77;Z{;n+!q)L@9jdbfEwvFEMYi8;N2k&rY;-*S?VaCZSgl^pisV`O4XLB%JGE zgf*mY0_R!1#RZ@Mh~>F}ZO;WqsS=cu*?adU=#wJgK)+>XT0&2|REd*2`T&($RJ)QR zerPt>3D$AXQ4}f!NnM#@Kr zgPEmkFQ1I2;MOlFQ0Q1>U%a1tLvlUcT57yZXklD-*So$PXA7Il#F5&~J=h&%U>ki5_)G8kkp*lRONzUfA|c zmCc+7^4oyzE&_!Y)w`wDSEl4SZ*VKFiw9qBa>p23sD?e!d+jFu_+9LCaAqhyve6n zYola78sYW~)6_SDcM>2{5WnGkZ=y{d>E73tEK&c1Zv9t&av{NMZuXy^Uzf<4bt1l% z3ZNt=x>EZ=%av58&O$nV-DLUCwku0w%08&>9diP+T3Y;oMO0*h|K{Vqj3`gr-iT5G z?}dzO{6Q!5CEQ<`MaISl%)4)8%(dqjOEf~yWmC45g@`CCT1Cez)fgU>r+@(7J>k>! zCEvo@vZ(f0(I8tJR%(9rdTmG9X*SUeTb6xlQz2=@E&G(dV@QNr! z9C;9&9{R7b{8Nt$0bgF^re|DSI)X?gDE57_*?MPGSRIzc?cql*CE$^iZG~p@uwrJ4eA*QE!hCP3qL9`MZ8-Al_A3E+&BR21VXsq)K|gnBfdBQ;;iXWq|xE6W?YfY!k7bezx^@6%xjK>J3I(+=QL2`*Ll z<^=AhNK|1;BZlf{36&FM40fY&12lUo8ba=Z8Q=in!MYYAv6@QT8t5lsb3og{%<8Hl|)nx zCU|XS1?;biQ73r>m4-Iv-|hy`CTaS)204iiRH#)A|MWPk3}s6OjUvUIGIRv(f1zV( z$6DqN6tTlw~{g9>M5US5*kEX zfWYB9O$=84Qg-Cp-C=hLsFLA=F}e;f7M+@~t(u9^kMn#o{@P8SnA9FNqY2n-!9E~> z`ZLb?({W-TVV}?pOJnWU4O+{n*7oSx@Xd10t9dr?XI4gsr%Z~lF$$YkUHMqTMOv(S z#1CirOxBj4zy~fBStY-G)$S~Jcln(tVj*>Hk2VTrT)N)Y1gE7;lHf(gzYXLW=Xk3j zO_l~<`msP!FN0rxymCb^Lk6$(eCMd%C3^6sYnQ`fUQ71TUw)5+mcSqf9q^~IH%~f` zVpmuBQWq65r3$0T`7c7Z_v^R2vMAKuFRF}6dkej{cAYJs+(+~^N#K0eQ`cye2bC$Q z)RNWPx4cWELm9|>euv&%-2N_6#<_cA%}$f^-Jr2F7={)aU#y@k3M4v9RaNgMspDu> z6oYQP{0;qJ1AT<@);$acmspHWso671Zh!rPR!c}z=Y}s76qnZOW)4k1*8Al-mkJ`` zY6A1vZAk&oqc)&)`D9o2VOa;~m37O0Ppi1b&c;dDAR@((wv&Dkn=)oogov)b*8U;D z6;BMvr_Q3sGF<^vr@lyTaIWK|fP1@`#kkUbDh8!A zvQCAQXK5EHG%BemoHz-gUG0Xs>jWEHBPv!#sFM_8qp>6|Qc0#neRi#hW50|XlgyDz2d8P@M%+@rw|6zSpVZ7GcE+yU0}{qgIIX&`qu1@D zUFm+Icv1VYG3eE#u(-XD*T<}B0%6AycS9Jib7;&1+k7)5e^Gbahd4oKbZgwc_PuG$ zvl?~7i36oz#GSj^W1DGk2nQ`SHkz?TqwZC=*;%Bd@f5Bx2={b~!RYEM zz4B3)l?B-EGlS~#?^2NsXznHH0M(Okgpd7Frf8?<0W2-?9p9VqkE`kiS^b{yJ|iEo zxXj>!557qwBzF9sV09%wp7g>+4dm_$qENp!X`;JQr4`|AUuvw!k$%n@uaL-$mGy;v zG5B6&IF~#bHLTDtCY@N<-qSo@Ix&G{d5oxVq)j!z>Sc`scFQ*pX-j)w8n_fDdXRi& zgU8gRJD0F)#o=pX#C?Cm0=IlmTG7=ezP0_;>x45iV?tsiQ7E;SVU62o2hNf1vh8~| zOk18jMG7{ne`yq>&TTHY^Tvfyxo5ig2*|4Wfxs^x=l_p#X7V`$BGWGU6?}fKxOlE> zv9+%5tb#h86@AxbZSu3TLEB;xZ{49mGo!2B3VZfY)pv!g1?7kmiM>h$M`DjEBZPZr zv*^(OSKSn4Cij71o57bk@O-rJ{Jgeyn`Azn zZIyAi-)8jvStj5NsTYR|wN=dn$X{dq?RxdO#wrQ>Ew5e>*L&tNVj?xj^@$>rfF^%F^zi}56P9qOn)6Bd$iTuEhKk|Qe`~j2L z4q<{kuqGS4gkM^B{oso#7x#LdvN&;>647!Est$UYZO+qTS5MSV)al0t@&web^w-@1DmI*%zh=pRPtYKK<{4jiqREZ3+X3tf+1(+@)b0IG5nk{#LXQ}~ zm@-1&m7G4^LynMF+_;Bv`80t@kQA^zIjX$4_J}TJd1!5t5j2zojZkq7sDEPoYo33by!;66Jg0ri`APz_{L2!a`^jB0DL&KXMj*&A@w3 zG%Gi!(8#VZOl#|S!OqLEv9W{W8Pi4~XV0KvxC*=Iw(pDb8?^PdQy3~i^k0;_{^n9XJv1plvx&YDDi=l*oyyuzEJJ;(*X-ADsU zv?i7?;HT}#S&&?M_a>skp%>uxCANQE(jJ?(L*1tuqEERszodO0+fR$OgZCnb4iA`| z-LcpT{-0S$O$QekJM=Lm_aF-_{}9Qse?>x3Lzor~Ah~*x1y=v_nWvKd33J`C12y1G z#pmY#>in3MWX^8FknA_v|ImZ+rB~@RKgT|(HQKG1HXFkC<}b?8?+y_a$NpYcdlW^H zN&XX{iQfr#OhS!c^njSv{UMywk~ubQ0V(v!_W^JwBp6mizbmMD+Kz zwU$BwZT-#~!gXNn(zpLw`;X7QvkmnT{QskyYqXAk{a8Qi|L4bU>n@949;~PA`iw;W zmRNjAs_iXX=D+lzi4T^cx+vxGx%s}(-xB|60ar8lns)yqIdL0M_v^0fKsfqcI^o}T zksO^)60`PL8z7XEX*TKLdA$RpAM3yA$tTlx14S+;FyA^U!lW%2 z`!lE3zIL64TbLVMHW!#b^Nw9`9zxlBtk$NUF{;_9tiY|%*$tgm^}Bb!FN zpe&wpk`rOC!dQd=}`hoSxH%lI3J(Z3>XeK>Z zP)hBxuc_Xw)pj15D-*rF69#$~oY}6C+ZKIIEwZV}aFG4&V8H|%ZOA3&JvtUXD|Fi9WqL`pq*#hY zV%Ln1{*8;NCC%E8HR5$<)t2%Os}G`@=&R$!74wXfl~`a?cVFu^Hkp1vAC&UuRusCO z?#NyI{KO=LGIudpBPcwmB{xGhW&ZU+@UOJ@QR0+cr$>!tNajK_MxT_T$3br!QZUxR zUNb`4G};|2m<{Pv#K@_-ecOS3=p|l}t0!}7#%AR4#3+X?%@-P8M|+X(KPpi%I2PDX zl_I^!$EN4omXkRuwM>^--N6F&ydeDQRx-ciMU^5w%bF;TSa)8OxtjY+l}|N#_Y95h zN5=WhYh>KXu)7OJC8byuSs%SK5u}ssJMC`n_b4Obk!;e7#H-z(wOmYhLf;(zswXI5 z-E}Q%(Y&LR1YJpczj|EKzMV36EJZWuOcGilH=LE$whg|l&RWwfbhjeCY{%bWkCfjh z|LRMxMntkD4jg?()x7q-{>UZXr_O3>eQ#vEqbGBh%wAs_2tf;Ai2pO?PX>)>H@#>Zp8zH z(c0wV;yOpw4x#7Dt9uFOP~NmrKb?C~bDZ?Sa_pSN4tZAo$I^;gOzOOc`MNjiTyUpl zeAZ<-eZL4jq$a%LFOmASKyex7xfr>0mHIVN@v%mkaKqFY@_I1H-+6bQWrd>WXmZD- z8ou~~1^SZF??jgw{+zeT3qEJLi z$tos(I30>hjEFR$m%>8!tuI}8T5+qf!%|Fg|)!& zq@sHUELBcp8%+JKfHSRj@`D6?@=A9aH z9bt=6KlNmA&k)MMXGH(2+uZZTJp^wqdG46w#6&~?l?GkW2NnF+bTQV>yb{dt{pB{z z_F*n?(Wl`4Pi)R71+N^tV^QNeY1x4v)-gB0FYToqGz_ot)!UENa zxmc#1xMNT(NNwDdN)edhXnmXBndq;dCk5tLHCkl|=@i}oEtP)-X=8d+PGI*M4T9jI z#_6{fTs9-_wrOjjKX_M{nYK^I-m>gP?)?>7|KZ5kG2r56;NFa$^TB4$WiF{A{m9@p z=fnwJwv}TqbQNHa$0owIEoKMXf(sIfYao3Cz4vwNMQAL-QHsUaafmClV8$oi408D<$Ks;DValOT5ze&;oY}W z=^xGY{O{>$8%X`OyKGjL8(%R{gGzWitFD`6%+biEU-R~6>L=)}k}}T3#F&!FmDe=< zz&k{-ZQ=EDFP8d%tP*zF7=xpv6tP2i>o*Q?+3W?}Z&4V$s9aAO4Sv=8SGvwDmq1@^6I6aA+{iEl24g*2aR zMm29#RpOOe5(8SAdBz4Ynb44_82LRiDe>uGf(!PW4E5YQ_?0BQD|HaP(dT+DySTDMmdeFw zgOH+X$H<(W*UdMx&%W-9vzVca)}b1d*}i8ry1N!{?C01d#u9Z-iq*zmQ%d0VZx!A@ zZJ;qEcxB+Y@#MRzf%f8bf!Nu84}`=vLVUa1cJ7^SQR7v)sR@!8#N-Zlyu@=%aAzm>_D`A-$@^_dTn@c^0qR_k9sJ`p@T>HL-mHUw+*K zEaV4s#WhN4I+t8yI*OFiz~UOxjy9~^HR?Xu_BKJnmWg~-PrR+{_*@$7V z_?(n`Otm=9oZ>q(QA;v>8 zjC~*2udFGS_Xmbo3)|TD_x5d#LAeG0j$D6S0&b0WRu+FnDA6+&5~ranWp_8aMtS3I zOJtnT#n_M%O+yVXff~ZI5$8mM4lO#jncoyVIV(jzvHNLx_R;B z6^W2qJ%J2IRi*d@?SGvNA%{vz*b?C{zn1ptqzyf z(I|PW&vdZvY;^i}WfT8N!NZqu9#wjr3u*MN##M*BKYb{b% zb6^)cX~^zk*Vj5rVo7V}tGgW$-^iuKC($iVr#uh2b3eoDi7!`L-*2Lt`!wgrrworP zfjP(8>ZAg}MKS|V^iy?bDjMGhYdDaBcCKhG|I0b|&MO!LHI-u9qPv6wyZ1CfgB&n$ zHIc2`*~rw}@Pk8<1j%7bZH>}&MPN_b{6zBDldw-K+PN79xu)obuT*-H32B~Wn1JfS zvLu$(Bpn0@qc6qv^%`&AQv>i}MDsTM4On~Zy@(VZcT_L$8gcC*D5(QZV`K7p!3^4- zY~L?7i*tYV;#zC8M>Qm1{;BKZg#w$iOW(;afNUF#QQFp<~z&o(VsA{RuaaEEUa z>jHI(qD1dSSAx|}w#9^*!%Am}yu5#p^#%_ll6_wU~dJ4}EC@s~$QUeDn>%dGl@ zx7R9c5sp)p>3OZU&L@)NQr{dofOOjlM7wh@y4vaWhiR-mQxfRjmTVCJ?bb+)jfqgV z6Hwgn{mZ-lS=_K^=jR%N=M*Fi#Ko7z%ptApHT`ywbv}bdMr)!VzYVQ?d*ylG*P~NyPTOg-r@)3vzH=9t;&hhprk5ls`oA7s z(n-CWL4LI~@@q-SYfs`M>6#m;Uvo}JzmB=&klS*V+Tq8z;KX46F(^rMfU4Sen&GuR z=~Q~*+ntRkI*XBlbZLF(@!%Z>#;3DK7M?L17DmF~4?MwZt7iDx+CB$QX1oTMS8VbU{ic~dr^=0H zRHOqvopll>PdXro_qtIHv%y66pr%BCwiw^V*)@&9KGsc5Ng(=hU zgKg*J`?YnX`Y2khj#ggJIlh{gA~o2LccQ}j^083#=`(*|GRJ-fBEy%ceeW*v^7fykP=6y)KcUOhSf^)V7wkM(#uiNL{zP@?einEXz_F$Y zi>0Zb0Y)yB(THrF#m_EvZ%yjfMLqrh2F!jedU;e0AhEyej4cB+JT!Y?bV)!i!>$^B z^o!HKSmj@@+E^Dm3$0CzCcbCq@zU)1M}u~X+|kRT+9OMCdFAeKhG#PW<9A|bR|dRV z95n;|ukMhZgW{j_IVS_Ta)}z=R!NbJ@Nf{u9s86c&obqEO}Jv{VFTmP!F0=*1XWr6 zWqKZcJ46qLPs)nb^?dqfP1#sTJ?EU!`W%-nF29@p@)e_X#v|*;f95MEZY?7%0s?bf z`k9mti7}wy3)lpPV}=iTIQHEsCm*C zae0V(0(n-i*y)>?x(COJxbh5)(q7f-mX?rnQfE%lnO!$*iA~^StK<#ypqh!9Vkjope+UOaLu>_j=bH ze6nj;Jd99t$zS1>ZWTV^o}Q58Qle`uZ`Mpkn%>mZzZmnMbrqdr@S%owjln?#-&MH$ z*Z)V^cZW5#HDBLr11wk&P^yZ6^xm5aN)yn~q)V4BHFN|4l`g#(3B5$=5JCh6q(*ut z^cG0yp(NxNEcaf}_j{i2?{jjp_ng^t*34S7H$};VB>t^W@2!-yqn4!Xv1*ksCsGu> zlPV74UA0raJg4J~b5Pr>E{+(;)0_QdF)mE(!r0|53B@|RvuYko==r#LrFhp9O77qf z)#sYO7`q!PmK#L(I4H{q9=9)lih-!h>jVVxGP zXfyKedZIsa}QC1?n6EV`MC+6*Hx0ye!nWK;?kA6EMkoV~vl5({`<($kbZnv}a z0LEO-jdMb)u!9yA`$^ORlb3B zGV@io6T7V^46&yURuKLYIRUMn-u+j+n=B9Jov~SgI-WZGd8)sC3|>1UZD^PuL{&KG zl*#vuQL1Q@8M!Y*O|atdbKMMHP0?WQP@6k?`=1AK^UeeMg{|U}2T5rs$-px?baePx zJNay_V6{%{Td6?u@?K_Z=nNP3V4iTI^iD44_NO?hB5p?S)%-jk_pCmUJI-!BAK7zX zWHu}T(K+VE3%&vT%tc^D+2Lg(^Mgz1IGV`GQZeRA8^Arevoo*}_Ad{J4cIR)Y58iL zlef74K?r|fN7ycN5Y{jVn%@c6Aa-kP_(JcB*3Dqo+Hn_61CjIh-M21z@r(^$&s%q& z!&>_jS8(+9;(UXTaGca-W>2Y91A4DuyC5d#v&W;Kxp5TdWde^g9BT;()96)8eJV5? zm9WoNV0j_rv6C6Rik?Eopo-?BN4*dq(CQSY*?c>ksYu*WQb3TkINdj2ejQ5??2))N zKC#`9t(B0JXZ+@FYI-mvPwzA3Qv6=oV*G*A-Ag{U`K{gYX_7leDv!L20L@mMT-deO z9J14V(H;bM{afH>(o-3BzK~c}K5}2F70G(hPMhDcsStsWhrY;C4CHr=ZX*V!Nm*?- zY!Nw9g%s4j$d?N*ax`m?FE{b9tiCs~l_Or+zGGZpEZvOr!nNMXxV5{`Fywd;FS!vu zbk61U?Tp}d5Ur zL;fnGMfHOG9vj>Q%@ey%4{RyiWYqi0Nwyw*T7X0C4L&qKO3>`;n>AK$(VHBqcXBT| z1fS+$zEj};#pxy(@RjYI#@+Yj_q78f)R|Uf)j4j1&dQ7U#a1^0Wuin+AOEWL2m>p{ z2y~G<0iO)|iwWi7L{e_JzW$24`DyH5n)Rp9>NCimkc}8y+|Ua3)mXff6?;|PLOwLL zJ1RtApTN33BePthEs$q03ScmF@FD&kO5|81JH@OWfckG=^k@$L>pE$@RjmKD@H6@M zq(=cUsl1QbO}kPL2XZQiqJr)!uj9-C%|khb!G$IR&WL@-mymyH=jT4^BGnz@(kaig zmz$pw4Y>LMtdgS5c9%q*J(p&`Y$jW!+Iilv1zS8OlyXMAT@%T!>0jE2H9Xzc+LlKE ztyvJ8h(xsoIUYsA|9zE{5BSg$gAutI`s86#OXg0|=l1~}L_1OT4tU|0*Z=)K4jVX< z8hlT^4E3^@2#SEHAf8$B7otooitC;l)~zuc*Q<@9niv&Tj5`fBMcotMqn`o_H<5PV zkd~In0;HOy#og6@FeudULEBh3M}~rmAlu`t)UM!+e5ie9*lm==;cx~orRcv=xN<^; z!@^D0AP^*qM6BOwf#1WCZBu77>}d&!@6xUUR_0@PC|DXjTb%yf?%vnVLv zkc;K^-s)6htqTB{grhgUrW=qhCDEO8_05bC!|yEj`7#jI4WrAuq*7UV)R^hrDmCL| z(_HiL>EVK>^=?H6u=!luP{2>V+Piq*xmRHXeH`OCQz|4I;yImCk9&=satNeIo~u!7 z3?-cETrRrgq$@X*Kt6(lkM)AyW8BpVb)};kjqe2Tnm)JU=_Gy@=GeAAG*3Q@UH#pCX~QOmI0l}>yz7P8zn{Qs6yNRJ6TDs={6-nNPq?AqKL1DfvQ8NZYtMOb8L#mI(YSkig`kVj_ zaG=qf_CY|ln=|WqT^s%3mBdZ`IFb8*(p8YI9Omu()v3{$Zgn+x=H))MNE^0PZ}GRS z0}~)^XA3ioP$MdmTPQ#Ao|&;AN4u@%^?~cod%o9IBsN;7aLk>u?y&vV9^+U!p+KqC zwuu}`tt|e!7YLPCboAVvY`6Dj^7zc(U*TQPx0v202&2+2y4GNb*b2V9h!GR)uxUy{ zoyh!FTJDyoSh@3X+Wk+)v?)Q^_AoJqT1_TCdpg}C2DX>km}=XAqv+SLs$K?oqt>zn zGAW-*#d&kVW=p^}c6cQ(j&enOxyIBGZ5s~dag1<@g#dCR|JC zi^f-YZp?)`QWQ&iX*o_V%%vSXU79LcY`YA7?KusYzpbRum7Ho~*9AIF*^ed(;p(?a z;?FBXar!y~32VsW*cu|xW{}n;XCQ`_bT-SPrZQqW0Qy=QO~|Er6dN%brp-25bhEvUDVmGuYbMRtnAOi1Q0z2O1ff|8`_+rs zb@t}@un#Y!>V1yQNqNc(?C(XGu@klx&{7ia8r1{TNLg%Tbe}2DP|kv_(ed~LWXF&< zzONpW{|K=TSI+=HaG=#5kT|Q-Y+ALSLI~A+W@ETp@Fs)jwLS|rri7!)J`_47#hbn1 z%B-SJEfJ~a*Xrxwb1fGNiC}e}f$GbD7ah3~WRBCd{^@4L#G#hi%jT)y z)_%%m^es@ueM+q4jLZup4^_q${Hg_uD95$@=J?NOcRfuXASf^gV~irSE8;(xD&_Ys zDCS=^6;L3)X`K{1a7Uzb7@zr?>6ixPv$ees;JMC)+XVvOSS7N$-!V1jOg*&Pw$O?K z@Mj7{h!TDc8GaT?lGuAxfvNyXC2n1q#I)5_QOk{E$=~)gFnJS=kXP{DA+50_d?t@+Qgu)(}b8 z7@=&F2=bNhGPGm1iHD3l{ImWCeTm;<kv)-$y_h}06o3{u3x%Z$>z9#Dc9L*G0Xnwe50JeA z3MWWN|9Wxh+c9wuFn18NawrfBPK@TNj-Ohhous_teCKw?-4?-_EE?g|V$C=h>b|5(Tm<{v9FU+jqe+)Ps<%ROw~yISYxjcGz7Tp+d1IQI0k-|K z=F1KjB;0xX$HZoydcM1s5M8q*vnn6{QVt&R1rR!Uzr8vtj_S*D$l3EQQYp-J92ie* z-ezQ3)dtT4QGN9}dIwSATb(`g{+crn1lalur%dEP1^7i{W@E2cSJZ;DkADo!N7G?CS8AS7cpnf&2oP-C<}kO;>5X zO!O1;osW+UTqP|H78w1et+*Ji2kDR_NDxZR7Z#~pyuibkYRjjRZf)Ho2;?QW_KSoi&W*+PA9uYn(v zey*usm1y^&SwGt==qms+1guJlmZDMB)|sJ=cfBdWct>=@#ALa;u=eFNPx;lfE8e{9 zvY&;nqBg>N_raun=nn4#73 z!K$4r7pm{9yv!4ux$K$YtEsR2@NlFw$45##T&@Y;Rw+y_SNl*be_eSGH*1&qRFFVb ztIB;#Zo13HFY_ue*UsqGlaFj9LFLwiy^&0_Z_50IF)w>${gffQ5A^aFkQ;!Dr?Qh0 zH{*c|rvuUpIx*Y9;#BsXV?wuz9|!f+mfqU{rWH~^o@kX>YDzR zM%5#s@ibIh@fy(+mX!Reviyf$0)K1WHvkLD;e-9mV-(6$1y^KqvV?uJVyK|@7W(?& zzQsF>>NI(590hpghxhE<@fWyF9=NGngeF4wLLOVTnBmO|cSP)liT@O8 zrE3OFLK*tSR;^u~Ed8H98eXgliQ=)ynIdc<#%x0puCPWL^gdo}rT*?W_sDmC(PYA5 z>iM8&?NtID%qT`8s;$B*l#WcVyCo2@1>D@xDW935VI4a zgy8Yj29z5Zhk}^&n4Wflv?W%SXvcRH+hb#U-}$8%=Cf<)(_)6vv}W8)^BsV~Pf@K! zQ4eb+$E3OH)C@|TmUEvVinw~$+1a?0KdpC5HEMQG87o(9bJ~kB+VV?Y%F-I&qzWW| z-UFdxKCc?`X;l-`BfJ&^J9s!9U$~Q)tpfH6&fd2WX;jH#9x}U^|2UtQ7yQNSDuvLs zL^BI$;I}KADbxt9#OTNBX=kN8?yPRUd((SGX(K86#9w!je;s!N8(yI1kDRORNqWoh zUx&fCBd%M;%S*@Rz1oBIQLp@Wm)CiC%$`T3EpXHGJ|V3&Q%qCmyU#;VAM zB-iftvv2WG_xfC@2Po>3*`e2&n}XQK4TyqwZjono>%#$A+o|TOqGp8Ed{>7s)vysq z>BL1c$2x-RyWMXl&{*G-4C-e2xU+(mfIwT~P4Sv~2Hx@z?vZ4=l z23MW^jgS4Ch*cRF2b%Oc|BKZR@8}O4L~~GsK`M|7$qw=yF?}B!>BV`bFjXP`ZqwuR zqEj6=PreQ_Sdu;4(Ak29t_}Iq!*HN^K6ofi<(=|C3Rfnhflu8kRn|Nw)3~jLxIo2iGBNRP+6>Kd^Hj}VgC9v6DfL0qYQ3~% z%B;5ZS@w~VxGGlufhg4s<0XX>4eM3Wgd9~(N^4!TJG)|rz$kZVTsq3;`oC)O4w1Yl zTsxU)-A_%o7)X(Rgs_*r(h?d~*(tzY_xN+d2&i=nv4tKk#jDa1HpUTpDZtrRP(>qI zBU_2Clo7aNS^4PNupM^0@D$#6EDeWLOSx-seE?Ja5h)!N&gu5S3*Av~@!-K}O2+~> z-;u59PhoKU0NdPp%#O5ln)^CZebWC^+^l|U6`)8%f7iwYJSl}zi)h_NOKwcP4xZwg z&CVhm%o9II8wG(04|8SKp#WbOB)PEORq5L^t+I4(-fv}fGs@;2I9f(#m@~;UYb}WPY%_TK8jqo(w`s#jxO37huzD>wa|}C%9P;Rs^*{8P({{Q!K|#aC=LN>CjrF& zYUcRfIWP!<4r_V%bWs0?xbQuqK>t)F&9e=s;ipd;&~JPmgg^AG&D~rI62`_1W=;oUmbd!b z#We02O}>RQ?33aR6`$D5F{?-(;5z-A zGD$z~=X9xT(w43~j({OPX5PDOPpR+-17TNq8!tLHTMT}rP+;DeZq>zP<4$=kAXY2 zY_|rpubniIoE891KEz1t_ej!pTg{YtYQIS0oyy?EF?HxbtcNwnYBsgWb=9uJ%B`B` z4+iJxmn+e@RK0z#@|wVmZ55Bqp3+P+c0E2cd{>{unY;^fW^-A{ur3(#x;fApsWQhS zFn^MQUCZm!#_D?#JC~Jx>7YT7?m0mg#)I=D*>x@4x!NlFZ^E@89mcy9VCZCAepxhN z#AHlQnoSEx`PW~=C;lzfe%IN(f^06D1~ZNM2tVT1Cvq>nwNHvT(C0o6f1(Ww^=Tix zoEH^4Fq=uriJ!|+Pf)0T98+$*n~ZK&N#5ycKlyG5WUnbZ4;w=zs2CSqvmcnfKIwC? z*8`kpR!)F!_hz}TK}YK7MJGxT{l&W2j;goFi;cN%zKm3UF1W4Fn5elT%y}n7optj^ zcb?c7MmlQBu>HNHHPtv7vx3GkGuhpal)W~>)0C26JRg&LW>R!Sjaz{TrJcoam|u76 zB2ws`#Xa@CY|KqYb}x;fF*!ksV@}h|0GBt zpMhM-Pc4H1y`=#7-oey7a+R@aqgZqwINqrJL$-WPH3?<8uyfkB(hQzTm!|14*p7` zk2EF`$z%N7T3f3jp{A_}Wu&oE`}lBVvLnfL#tT<{lEPW*ve|{$By#SXd$M~x(=PNi z5jdmGs5Hd*KwTS-Og#9TMtrGn`AU6aow1Wpo2fRg7 z{>S}$o7Namj3e-{x9AV8SG2v2t`dvOStk<9)c<_LXqKHyo;;88VXw-uh9*Qf>1m-2 z>9pyN=))#2eOl(m(XoEc`*%Kgo|*Af)`Cnznu_f5M`SLNoghg&BWnr%Oz9I!&O& z{>PKE1;YN+DfZ*R*2xfv+Mim~{!t}Rjz|R9cWeBQzdp3vgF^e62*CSx^&$lIp9e^| ziwNUq&rcH6b2&s?rC>8UH$F=Aq|)QU@667TMzo(I0m6|Yg4dX{w0x)S3U<_oq~Th< zm&)!d3O;|c#}uSOPIccV{R?jv`|v-_R!18l3I8~-$CPzVKa0`fESp0xtDVMZasRt1 zD-*axvnG4<8DrmZ0FQEvXygYc_;=_0ts!1yXQ>9#FTm2V8}Q}S!!LwgvMyZ^iK3y1 zx#A)yvIE*w8%6m{(Rk$q-*W5RLBe17#VgVH1gqS4>~?7jBu;50!0m4UD!;xfAqujB zW5wN1C`^7Uwf}TJ{$hxgX_VzwSpjgT9AR|7s45~RPVap>Rpl~8FT zQwfREcH_F1;ww2pUH{?)%bE0wy*Daf-uf4olEkrXihq7##9g!daI{#y5!{S=UPju`dTTI;rqz7Yu_dU;M&RRxYAlicXrsI|;LFYEc2oq~h{T`AMn zR@j@)q8wM2lO%hb-x-b@AAFiQqkw4;7n(~5_nS`Wc{@_%NRAyI=&jP1d$Dt;7Qsf3 z-9%RTdhK7pEns@wsCYgsIlTAAFX3)V%Z9m{FQ=~Ru7$>hgQ*1OUc2@9(C%s@4$#Vj zlS_g&%llA+<&AnclB?i!YTtfw0*b%7;#; zwkEExe&D04muDP?ghvrl&hA8MMVMDUvXnt8X^u9EOM~^v)wQOcF+|!&K1mjyDW}As zKH8n>&$42$if(2}k-e`Y67=*ELQf}{H=S>N+LTO%_w3_1L{VQL-06Rs`D+G^xg|l zHzF?m&ahxba4(NT$Amh6N|n{KHa@ag>x*)EQx?vlBeny$c~<^2HH({izXA52YMM_? zggpt|rBedm%iE;eO)Q?cKe0<7o};t6zv!SEEYb`lukX6fj|bz5uckabSm4VHGrJs0 zbOwiFL2tQv)vWCRU^@l(UNKiuU!8v_wZFN_l~V*9s>a5_=~ntZfPQuCMp`+_cBja@ zr;sk5VjmEiY*t@n%eyl5QCrAVwKcVLW^#c`SKYS$N>9d*vw^+qI2j#OD|60xWam;}JZ}^gwutXc3z(A5_N=FT z6^L6fe9T;jZz#3%6!JmTzuMQ85XVVxSPs=rG^V7vOR*a;6EDZwzdGXysnGRQw3zuC z=K+5KFMc!Bg$am*;T;RYF`dBu+RaJ&RMF{iwvtYclpSx3`I%}amR+5qJu4#h8YWf8 zawln1)SePubGH@i*%A$~#EYrc3y#1TZCw=t{!;!;-$?Tk$6qW z&=X3ujQ47JT__nAFxd6Z;LorjubCYl*ysH*Y0J}-LHK3T@H)C=27$#r_+IVi?mF)p z2csb_&1bLjaa7_gabdyX(H^Nij%iTVw@lQFwCU@_~J{3QfV4`eR;3cxgSfz zuA%Mr_E}<~y-eZ1uvt2^QCaev^pHnY(LDNUn62QB0 zbrOZPfS^e~WhOdOaU|JQ=hD|F;r_{@Dy5Y^VjiFGy^OIHY!w=7`X+Afc~=n()7EH8 zg|12MHIc_sVLq*eQp?XRo%>1+duPuqByMVQ7SJuM#u{tZvwgKxEi0!nwpQ@IWpclj ze8l%?_we6kvjP@*%{3JMk+gVGpIhTld*6GvM7=zJNp!0tXoMsPI#8W$qq;_vW$7^X zoG(U{MUF!!faE7R^J~+A@7aM^t|B6JCG|szygZAdTOzqFGh@#Mjbt^6mi()b<2++6s-*D++D>o@Y${oaHvvoXJf3G+Z(Cj}f(=U+u zqdh;dx7GQ7Vda=P)Hr@7D1p#`{u{>q&gjDklztllV(R_sApdXXkN&Gin1vwJ`G;5g z1z*3<P+WczR{~uK3c)@q|FXR`}Kic(Ap0s(IK->L;j*k*4BQQq<9ic_`4dqX4qm?!g!=}KKjN{lBTmY5F>{r;u=s% zU19#D8KKy@b;>4+B)3SvJe6W&!zCytzgyKmz~hjocq#rH?Pis-w_SzLBj<}dry;-s zWEbxg5*|j&g!nwSekOxmd*eTSc}-RtzMCSO0@N($SS^H>B>>IBUEXx*8y3~+;@H9WZmU%=S+66 zRc@#RHPkQ;o$>XYTluNBHU2ipcdUCo3@-o1Not|aeH}tuvz=7hx%G1XV+e1anS4-I z==H|4_t&}@Q&MTCQnH041nf8Am2_019GXKr^j?XKp$3^*H@g#e4MI}af$YupAbar` zL*e^-E|s*JF`6H*Bf+Qh!T`OR?jAefvDr!&$h>H{qsZ2m@-BeI8F1KbmP02KCOXbb zvI#Dp(}0gb`#~W4N2)hs{$@4`F88xY16l**RgLF7;7h%jSGj;``y03&f4C$Ku7BYP zI%Mp!i@bGfcZ7h%K54QzVK-SjQfk4uoXouPskSgKx}Mn%t<$2uebFoW*o`Cx1Ql+4 zfI8gtF*{iWt}=%Xy{?;f6CIwE^fZqYQM#N79H+(2iKQuxMXR^1hY96in%+Gw(a&OG zF97#lBJ_C&Yr3Vd^#wg~^SY`oJ&NS5{qrhZu?uzvCJC}Mo-M5D7rH|zXHE)_Co_@H zp1Qd)v8n7eei3&6O`6P;h$-3^>kV1#ad~zJIp}aL&|~tCYm3bgZ&4)r>(>uyrFBAhB1Bdnt%q$?|dE?bJ~5B{a-(k9jQhU?y2RmsAHQ6K?pr8w5VU zFOLzqk~=qGaPwYZjNaJ(_~dP2PP%-LgRT%jb$Uw0X7Ixm0wWYn?z8e(6W7gCK zYGlBQlbI3PdVTow+{+anXzUq_geLK`IrjxZ&AY|N-wQ47@kzG0%#~6m$_TC;Z0)od z)NVamJevYMljtNf)Qp6W(z~WCPA<|1sM*jscG8PIvh9W!Ga4Lh4l&=ShOV!~+atoL zgbrTkB`FVr+CD1Xx+R%xzuBMB=;qoM2^=2aR4-~9j#~7FO%_L)naq3i=q2$n3QQX* zXw-c6z)j_WVKh?npDyUs0cVveMQCeNqj{Ph@g|nJ`lp#Ugb+3&3TIO1T6G7kj888B z@fwCvbg6rt2K07ysN~yvV+Xy{;r#e#F83QUF5B_rkj5(~2jmC5^p;e7Y_^wH8-wRz z-_il)f@%8pidK?VYcDb{f`#?0U|vN=u1WNf~D#T(#!h7b8!Hf5voHThOY^~wp`4&t@AVURo^jy+ynALeA) zD5T{pdI<_Rzw)La1MV~MOD-HyK z?np2Mnu~~QxnO9EEFEWrgK0pARwSW0W<{}8E$-RC+4!5ktn?G56@4_tiXhzLA)f7h-Co97?U%jNdst0mTQB2eRd9 zr%z62{`zYIYv{)maPSwB5v0B;P;Mj976HC2MRGz4YT&xHvcef~AyRfhb z^NKpdHNT5i_MA<4P|*|hZlyM1RxZYL?j1hf)^V5yWrXkDvO%$1czoqN*Vd z4x+l_(MeiD-PSNhA1RDrgCl&WWZ70k3jMISH-v_8Xn4ZWSM~4)_Kl@zH5wxJbj%r0 z*-QzwKqSw8+|abu`d~TozE5YvhX<%--9Be9s_)Z<>S1>C0^(xCy<4JyuiajUVSSM6 zr|YIMI1y@zAT8GsIb#4M=_d3igUHB=FT|fqXfn~Qf#AB~vi!N0g7 zhbVDHtM8r*ACR({+`FIHOKZ)jV*6x{mV|JiIB-eSq+Ir(mAcBz-NTnlPu@#Ze4{VF zw4PpU913_2fm4{yi-mw_cB`>?Rg2L&w%u?JZ)4b7RgEw)0X%uPwdQ(Xo8nwMerF^= ztaILEBn2q`N(k~5KJh(l9!7y5OYw)#eJ<(yjMtrPQwdbi=FjV4sOAN?8}Fng?& zTp%j;z&bpk=m&YHA;=_3fDoa;w9bf`=c3#ma*#NwL|%(WJ3gUoI@ z9;@H-ngOO&W7-k0bk+!L*S#S!*5)h}dK6`u1LU(cqTnS3i9jJ21s<(=qWvJiZ zl1|;X^qNSYkXz8tdNh9-X-m&2od(2U9{;QJsZ_xEH(c#O%Hz z+JvDumL{p9w%csjO!Xvqa7p1-{;^gYlf72TE%W4QX)rd15%pCipkply(4yNZsb`XP z7}xWs(DAV4O)H*^7mFCKZGM8He}%raZ{<5)An!$2O`}reAz0EN)u!fYuVy`A6P7%H zA1m7=qm>FC-)@_wW+C(F!^=LMwQsc_7KZR%>3f?Dad7&fa`%-~GUh2FU=aHl2(!j6&$}eHHcXS+ zix6B~JelSt25cMhL)%B2a;-Pl|MQ9dPwZpPH<1Y-k`DCbe+t;p;ub-r&4bxyU<0c2 zlzUp-4$*tO-g^_(>jI*(^li~t2jM+Zc6^=s7?prdZN-wDyK-ySO*@xj^-|lShc>cc z3Fh5W$}rbT7ZbH4&DTsIeX4JDePYJ_ZJ2jeHp+21OMSM()%aDrn#$#p)!tBJq>txz zOSV#C`SYQ*(CZ&_Qr6%4E^OCQzaEK8^_nMhvgz#Y?cGt0i?ki?u>pEMw|J6PrFiH# zpZ@t0{|J~=j103(w1vg;DYoba^$`ub;Yz0MID|IT0{wnr1+ycr6n=H*&h@mJwYdW- zZ@X31XMSi-BxdHouBL5z^^~Mm(_-vQjROQ)4L5Lh!`6#o_Gikrc4zI*z`lCi`ZzM( zQ327bSBc`Wju3U^;)IA>RGiRSDiIOieYKh0@lG#V#r=D1BmkoS#>OzhZq5YAt}X!_ zSXs+49|~vG7qy>TCjc_|SBI~&uHgMO$|@ypL0kZi2W-B9>s=Wc-7`a}jAg#L@ENs9 zQLDX&6k9t}9MO7DyS60zD3bX3#TQ+qmnO=y>b@K6p7p7(1@gi}eQejO@v^H|F*C!6 z^M0a*b}R$Kh(#$RcrO98dlYN^_y98RjE;S)&riCV=vZ|^MXz)Aykxaz2lRj4`Zxw{ zf9g0a`!Sl($jjs>(e_`Q0l~*$+t2Ht%i{=C~mrEY~*#i20!`l=6gZ)=;V6R!ds2_+5%*A^GuF)LDI-5B& ztBl`ofcs5WL;C*!^=7>(&JW0QgdYVehnNzwqGA?S6GI%h@XHEAXL$Yh0>1S>k?_X!2FUf1FwZLYo7qm3QN-Px}Xv zSp|;mv40?x^u=P->BdKMuiVJXv-HGIc1QIpn=1|XGqe;1JGCgp>>6X;+JxE++V$$(#)CQ> z;~HBLh$@E8=`F-cv!iMYt!E#Id8b@g#TT|dLra0$J-6d0v@yD?1W#$EfT?&oP&6maL}F9}X`-{oN%1%4inU3@h}G~-cK zZGL}IHtkwD%BUW1AIH!cho`O+Npm4CS_dSHq2cx03cFJYi7`vS>pT|yo9}Q1foeO8 z>6!<{Liq6A+rZUqhr;nXCS)vnG!0X^OWj%2n1rCX#nzmXh!?35>D(C;tGh1du~4;J zD#)B|0_m|c&GRe`S-vIp>O1%%WNN{(J8n$vguRXroqdK$9q-;fqQv@oEa9Zbz_Mb9 zSfcB~3(w7MN78D(b2flo+3s+7xMyM0piN68-x_q=`PMBfex!ibLH!B>rjh4802R7z zr#%I_=aq&Wtlrl#`0_PM2W9!w9O@%wtPa^&ugB7hLAQqv z3cO(}?celM43>pv6Act3Dkf8G?FuCm++S=(3mGgOfH}FmS7$eS7QOL48@yIE9lJEXwDp*s<-0rh zWX&F+_PBiJIiJ$oj@xh5oWb2?`%DW+%_+@VRMxs821%N71m&=%h>d(tS3k&AxYL2bH@ zs1w&+dao}V%A{YL;Ak{oW)@Q*KC~iLP(iN&`iS+7Gt7u$Z&xGF-lxtp*wp|@ikN#Zx3ZBiZTh9>f_g0x^(P0bvZb;6djpo^B`e9N9wValOY zA=+gJTXTkBWpP$L15NZt1snwp3C)dSJ8QL?Zdu3a$gp-&ykY{EM%o2BL2U5rt&qZD z>|##pn*dvGasL{pNR+akzNWgu6#ybl72Os>PvIRg1gHY5auMjWT7x-t+QV@^!X zu?`3O?+6{B^5sM4#DSI1F)BYZ=Obm{;ks1*9a$B#*w3hK)1l>|&QD^khj83M(2TLr z0wrZ1Es>sUU4PQvL-o~QsXWH94wP8-X5hTH~Dv5la!~I|Q&6|oSlUs$_ zS|=sKt{J!U9}A!F6=Y5&sS#uzHvd8NU^b9^isUXr5&<(_;oWja)m(Kove6zLb)TiP z+TXpEB(?gHw}pgye5IF_HQz<;l?%fhx?Hil5h^7_Y$Lk=RC^UA(I#6vlsoi#y{p;V zQV+7EZg9ZV&yj*2TW-;mm1zS4U2y&novAvMBG4@mpWQ(h6~SCOty25_$~gK*v9MW) z%S22QIfJNY>cTs5N*AXfR#i;E<+YDhPgs>3 zTgB+~2ET;2hw|!r8uK?V&hP=VwiXxnl=`$tNH@|xC(GP)5bB`mHsj}5!&|y6QrXSk zjA}0vq4f=CjG@_HrnV3vt@OO;k~Fr`4*l4XI(@#@Mg71&XDl_5)7)uQ{N}3NZple} zna)AwjFg%ANa?b#3f`#&7U|j6yP1?g8jd};bD$%TsCba$n4DTxC|1%tx7iZT!)FlD zT0#p1dmKX85TTb2yK^5kPXC9z z@w-|P2J7#W^-@Be)) z(>hFxV)jS%2&P&3!|;|E8Sc{)XZv-A2+J_nTpQ^zRjw6lVhGZrrx}ncG~!t z7yo1f{*$Aosr>uSeozHJF1|}DYj~ymn~{IiA!q7Roo@W=Fh`!6r+@3#X1xP{-t5tL z@UyV(KCuOIIDE1HES;bH#gD4VCr2PX@pYNssF@!n{QS5##GaBYzh|63h9wgdLhKfS z1Rc+pe>|G-Do>`Vv^oO{j-{hN>HspU)sRM1uI!wKq|yT z>W}OP;U#<{YFnd|jr|oVj!gH>gmbqq!OF)H=%3SxeUnQlPK**{58q?RPg2S7R6}@f zwkq^6%_5xI^2fz2R`o@j8D8)(G0?GKe?Cv=A(G~aW zO@^HBJLF=tQ-wX;R&%uo$Jx`Z;|f*hi}$XM7PYO^In90H+P}Cm23-9#85L(MT6?yH zu!@FCOT*~jO2th@z$oav&iv7+gcn^#C?hj}6K_^#_7bs0)n}RqhNn1AwaiP! zTpa9>+cN>0t4(e~jx`%b%Pvu;@!Mg+r432%4g+pM*wIqG8rSvf?;DZwv#W$6g)PW@ z<*nH6)|b<7DT_nnxqT3mJ?p?3G-YH~wbGvYfaN3?LEPOUF9Cz}O#4`_)_X#3n`Y@t zRGH5w3Lq=!B0@_iZ~c8WS6rAkd-vmu6!%Yo#w1@JI^OYcH>|!>v8JT-qnVV z$!`&mtqHf+&1jc7njwfY{O#4sW;Z9iukk!MEet^hbs2eEAmZJ^MYOr)*eqD6M_Fny z@Q!dsYPi87SzyQ8WAztE#EbVY1u6vo6H_b+>VV4sWADAAn(DrGQ5zN%P*D(RB27R* zq<0koLyLg)CemBzgdP=Q9YoLArA6WefzFm#>=wu-| zkKy4G=G}URAD(}oT_uuNjg`k1ZfefoJ=`2kvo9rXh&Iz zjpoT8WHxd?PrSrb zQ;RIrGej)S7q}mGN6jP{geD#I?YWt8=~GL%ti3+BmH#2Fb)0)9Gh^Snp7}=sQ=^xFobDc)Q3R ztcSy82-{Og{%Y^K8&Y*yn7M%hzOxt+mhWkClm{iZ1~#{L9Fn}ds@xmqyry_{cX_Q6 zp0FQmF4ShId+iRO)TsE34xAIvle@&WGDpM76^Z|QvAOB;o9lI$d}svB)Hp1q41aih znzchSD%k`z??E%G&q}-D>DtF-Kr4|HYjPYEIW4XTD?2e#&sqN$0 zLiLl~MfqN1SxO)j(p z=|qwUSnOwTT9&)R9o#3tg(jFcRE2H3J>Nd$qhV7?x?~@SJ{$e)rDC8Drhw^ zm}V%xK_wB|TX)z?aVl%-oqYx(VuXQyAAy#QeYfQ$H8f%VrEZg;i^So>M;Z66bU!27 zlH!FbJd$MDQI~Czq5~}{Y*N4HdVedwTkN{4gkfajS7(Meih3yXlTeWk`}n>Q$qygQ zC^uEwM_6cw3#aJiq@5RfOP!JX3=D4$=V%l}t8-LC97w9Y?>_tbaX!IUQX4o7f-QEJ zrKIGl^DH%+rP%b$9d1w;B##c1^q%yJ){7|$-N~iaUafpLZ?l%vv#-IWw^%7Cv9zTf zLkzfP;xf7-B85Ln9d=)}gnJh@_Bb<2rHbT>tcp?d?hb`^V%N6rH?r_kW3PEJ@6pM9 zY5ifrmVYYya~Hi5NFibb_L8MoPX{MTEL2nZ$hNmJBg7}rizpfiy}=|8JYCFm%udiZ zXe^)U%GW~jjJ%-)bF92XGkk4*w|Tk|sB!J-nzol$hoKC~_|#Osb(Ln(9=3$xvxMh2;m&b?&Ut>32=@d3#HhbfD3e6w&jw8^H&r{qYvykDySmnuY(4ZRlv64QhT?o8uP`e$?%oCpt%?5}d+~XIV&UkP+x^TSF(1X#0cc z^mj&waX{0>Wdl~CqlbI}BTt^~iR=?Hwu8#g*Lk7W=zn8{NJj^s?OG5IeQNx#xs2A$ zM3z4To1gKU^(~0SpAp7SsfKtZ#7%th@cF;x8eo+D$Ebg1@EAa~Vc_`#|DI=Dauo-2 zkU#Q=pL3AUj&Kawzjofb`(k|Wzvnbq5jHULb^F(5;ATr7ke&W}Z1eM=*F0$cR_^#| z)U{w1KkEhmv+D9wignX%eDvVt@6-L$z%6#_s=4AC5gPAt08#-W}LXN$BoZYjsxT;9j8Enp-(7srh{ z=O0O=7>w01kBg1hj?ZIUZ2PLD4oH(kofdk_6cbzuLc~0#Teja}$}IcpY^q7~;w875 z(?Ybog`Z$HmYbQQkrg|u8XihR?rL=hqJ(}%TXm$9Kjkr^gOEW4-*LE!a7-h!_h;Z%Mc2bSn$gK$-+j;M|s^dxf!& z^aj-noodxHC8T+~Uz%c>i@jro3LBAE=}0km97fV0j)LjwNw~MpeMLL^$#>jZ_6j9$ zk9yw(OGveuOT_vQcH9qNG?94E@1oYCn9%QtF3n2qa|$63WN`=jh%uWQI@y)r0)Kps z7OY!GLp6QGBOJjy$pASLJ<%_`HtFcsx3K<2>FEm05B#uCS!46^BiXV3v*&s#i)cJKDo!iWDbXq{Nk_RyF)lEyIEAZ1DjfsTVAb z6HB)1=rycvyV2Ex_74j3m@l&#v{YCzfwaU8W~idEtVz{&_?|VrGa=LiIabxDq0R(v z=|ojRC8PY3uLkwUmG_?d7Z2+xJ4-R&-@7fxNL(jRxk;JpHTf~QlT3GRquP{o@jwZM zsUyskvXCUWxM8aIme-!n1a?A>ViE=MpTQz-aGE4i_{t%TP=mtx?*T+PWgR+&IBS*7 zo!+*J>3J7Ap?=#vsLt+JXl&gkcqa6A6K*LUr(8_Tc{*JVtPqK}_IY~`^i4?1qa2s2 zNu-i#lUL5_e{B;UFHp^Mc+*)kfb}UKTavV$*FsItRS3pQjHL~9lAlp0$>K3QP=mKj zhDHIb+gat5xtfQo9yW9Yv!JN?#4#kt49nrOMIwj;M!VP!w!J%Ql_B6Nw(WbVa1;Km z?uvTdvTNAP^?|Ks$wToeN%9!3gbnA1i?Y?;wQ4Y-)*7Qb|F+zv#jewB-mAQ?t;;%E zHQ$srlmmufZc8Ik=VMe)dx{>`Gaz@rCDp{eIXs+UhTC*cR9{Ha7i4+=pgz5^lf*S& zzZL$$1QOABaGDhu9TT-x4j06on*|8R)YJR`hLCh_`As<{iCwZCPxsk z!>!i)dXmrg)p84D?nxr{=j9Egt54FpPgJtfxx1;<;LCT@NJ0pNPPC$kabF+x8B=ks zTA9WHfY*KdJBwP5*ue%KuP9O_yS1O>r$>rMn~mG57B_e}(i|CAp1xmz=P#wzN>!lN zX~_$=YnBk&8mw@Dd{VR_Jy@F(8FjerM&Y&bMUsFY2vi&swnTdBBVb6vw&|)$-Wh{d ztL|d2;a6%FxX+vXJgqVL!^w7iCa8_s^QAduIOEwbnEDp=0}+FHYyVb3FRSKw zXQO?It#!$SGaxG9sTCr3^-P|Ugh={4U{FK|&dVToX}8d=bIOr;v79yy`uU_Af{XCc z>jUP3cfxx@d2RX0JT~*#)7xzZg(fp;zNy~Yc%5+t?TV*;0@Xb_dr1{uFUSlxMsZ~i zHU(=@-Yh`|T&lP*m~D6?#?Ebqdd~;CO;XcdFjLOFig~j725x$0tW0nZcjq&dcIOnL zh1@m5Zo;strqaOGi%^63!a9nxB?1`eQ;6!X87cVu1ibl5Ue{kcUaGjXd1{J_qcJXO zdSAQ)U5+b=VB|ZPs#8w~#wig(xpR}6m;V{|QSO?%#z{ufzMT>eb!cvmgX1=_3m4N~ zjDKD(H`5Q(y2H4uncet+o-_O0FL~+_u!-+lF9NxY%C1|XOj8JQpI@;*Flctmhjl+pXlKas)puUNGphz{t?jqk=deP=bsTTR9TxG z{}NVsnlc)zb;pM(zPJ15f&K=Jk#{UZ{tOm>LMA7RpRR!{@4pApzhRx{Jos`iq!2m& zN{s#u7@QCfrJE4>Gg!>J{CBRJ*biRHul()bxymWkIjbHtm9=gg)4Sc3GWU<%|6hdd zA9x&X`}lOzG_t#RB>~=8Fz#}PUtFg{byD~=>yZLY{Rt-jcE^%KPnDR)l&I-MA;-QZ zdWujmCOBY|hozY`N3qv5{VU*S3DVCk`_m*Kfs*##gR#Pu24vUy9kHLu>pVt_v4V}q zQGq!*mDssUqt9i2$!UK>3_r@&w-DZ~+zW~Co!&91OBOKR&Lu&U*?>-9B9m&KOOJbd zvD&+Q06}O#o;=ZaS;1AyeC%nrW;fuYKgPit)y58jr7x=FH ztZKg@p06z2lX|IY#N0RAOoZhnsR%t?NJ=sN=0&zNu9Hbuw!Bi@OP3z1v12DveY#^eVy+F z$_YAHMhS+aqEd0&&(t`j!PH3;zWt-k4wxPJN=#+7W&IeWSY7_}0unXNep}v)G-hCW zs{x8py^D`4ZFKakyC=q+B5}ude>=-jovtpDc8}iaV8^+2pdm>iSdRy>nR`{XV0f0g zImr^`#$(!*NT1O*L=$u6kxFy9!^2L?OX8%1ClM9ibKhT5^<=bZdoMhQ#>R zHSe0Wtf={uLmzUFyKZ(zDV{fLps8Bf#fj@<^_I3kX6Fsok(wolGW zz%>q?RfMOkT?V3NO15HD^Yx)gm9Ep}ND95WWcSI*9vOxR0RM*s}kD zm}-XhXohHs$9{k%$?0~{31nKI!I^lzF;=|;6Q$P7kNcd-R%qQiB!V+o9xE@%TGLbH zWFF#@R1J5nT1$}^uXF+ymAS<>I-`A%UJ;VKb*c;X?Gk#i(=j7IykN*LtH8kknoSVN z!cTjUCa(zMGrClxL*r|Vw|mo#K5!jM>`9|0k=TZ$O-grQvy(oqO9R&JoKX`IDN!R| zYngYz+AN85EHd*63~_I7d84$GsWO*EW;*jIf)gear4r?rC*)@CJ?>9uRk`LlabmHi z4l1sDuZDM(sBVoP`mX{wB-ftW9i z1T4R2y4|Is*dg&QE{G<>tb3)8x{UTVo?r{^B6|qgO!3Om+EK@}+n*vm7sqH|t)99Q z-dd;1t(LvH|CRGa%xlvXcaiM?vWzuNkh?H`-OKJXMTH(d7fTQjX_XnEy=yoa?#yZNnvL{ z-qqS3~?v66#{Y7qRHe z`q?cZMitRIwq|HNacHjIFWENF>kK^&51U(O2lHy6;TlV4X_Adosg`NfT zd|3R&G-!@3LxRTBU}1nvmCXd47rrLx%{#g~Tj2)U?E`!S&Ao$Za{tg|FMeju$=-%e zS*7Yo&mMS8r|+8tKA1y+WVFZKebwUj>h?OS-_XK(^?Ud({15&oY@ZrJhSb6S|*`h)l-@H;?~n}HuCPpx3Y4QbUmf^ zA({J|Ne`tZ`iwx17?X*@;oeZUNlxj~;33ZJx|qtl*(s^4&9E6J8w-QQH~uLLWxNd; zbE(Q9!9T;h1(unaq`-0D< zna8!V9irkL2r7XXE+7!S3>8N(x%VVt~V#(zJ>b@Q651*mV4Y zNtX_k`OKLAz-kl|O4M+CSFt*jMqi}DxWQC@s@n%;A(!F@SmP(fa5CVLC5%TtCl-=-cmYKtDhjMT+1c3|C7^F1XO&Uvp^PAlO%aud27 zKX46IV83&Vp26q9Dp_plD<|bgQ1(-wG+O-ll5z5Z#$BvyKX|Hk;EuGvhY+_hJ89V< z8>~4^VzIk4Az;2>PH2Q~Jn!lD{;CotE@*&Dw~k@_!#b3WbA>R=(k{Gvf4MBBeQbJD zer;+jVBn2_(fV?GLOE;HnNz<=2BhQo zbiTB&^r|diaQ47d&Q`pQx` zN&DKsKSH}7VIvq+>i(lR`$s4mMOuSddm6w}%J*}~al}%F{%h&>2!YKeN{H-yS@7kiRy-o;Y?)Gvcq!li*7hqnZBPnG9~Ff6jdFPnzC; zn*OoJ^q*oP3RZZ{1-=Gq6fA%KK`1|x*YTgWl!6EPeLVKZhEMR|x=-MllfUl^1#fx& z&+VSaj~%=H{Ev;5|5J?$&@W?8RB`EOJzqBhk8M6iEc;`x>rp~+75pjin{{^-CVHhG4d(ZuBN&fI4 zM^>5W&#l~`s44FKu_^t?njiaP8}|?M^FPqYpqz+@$VsoP=oqvA=c|ida-pxP+${Ro z6-hBN9=f)8lzH{u;oU_9AC-kgGqa@GL3{r{@p8K z@?=tE7F!i{!Y$J((tB~N!C@@Em#$zmElXTdA?M9XT#A>bi!7_PcI>rEM|1Zpa))8+ zUb-Kw1al;~j>6&*4r|R`HSKFO>HDi`T>eqhBV7hbm)GuZ zgr>__fvi#;uO6t|{MB^v z=&ADHV)(!4{G2o1?&Uh5*Mymsw&KK9jA>WhM>dvXp4O~4I!KD;lWAD@&e%7si8A2} zN65mKxB3$J-E=wNtCR%IHUGw)*#C%h8(+R z%6&__((Z7lmQpKf3#FF+D(xQ=byx3bjx9FPUxv+9Vq^Xr)5(XkF|;{4{0B6oSB?%b z0*|L6K03wh*s+VgM!z|FvYgYJOYrx7dF%4ALe?^(3$q_+S4R1Z*&CFtmEvo#$28DI@5(!s&FS zTupm9QCI!^%EXE7HTY=A#h8gW$LS-jwXzofK1{Q%BPLfBISbj>U`{ zoLgx z_OJKWT4T67X-kc}MH!B)WaW!ECsNu=<9_9im$mLn%vNpRZVULUMTV)+E?smzv{u+JOE2*7>v~n#TlDcg9 zqC`*Qz;AZyQX8*@r+c*q+LoK5Pe`88d-qCgO#ij`GslkY>!m=pwgm-{-rnB3r@EVI zRA5Mm#?{76b*XU`>$n20QY#jlFj?r-TPAu+kF{KLwsTR^a9#j|@K)#_`!siUsd%Y`j6VTyca&LsEHE ziStRWupn)Nid|C4GTPt2pSlEbDT1>k@oMCWL!xhYJWmR;RA^M(Ri%+g-m=XrZGO8d zZDwIp-kMeE=Imc^5A>SQ=@td;w(BQ(EqJt9sF*CIpexr)*I*H5L1-pHRtgP_R#|1N z4KisZ9)f%yh-NzJi+emR-)QfNgR_KO8yuTRq+NzMSj|hDEmrbfi1&om)q5`81(S#N zs5qzH{r&hFt4A>QPh5{CWz4;GrS8q%t6m&y?50 zuBUQ2kJg7(`B6teusZ%3*IskEi_MZGLq&2zPIoNk#tLpV*EjeS1-n+8cBgr0@LCiu zd0u_{?2P_?TGullWLyblQaaLe-5sa+AflqcO@uvcYCY(sRZ3NFI+t^nyxx-g_bP#F z;MAeXniU&xiQI_0%JcYU9nhLPmQ>eiu;}2QaH1`syUXSVJ)xx zMAM#g(LgqZwHu4S!4=(uKkle9;L~6Fc_*%9S2tL>GHYWn@{vqVPWy-uvP|O^`xr-j zCy`#QxZ{9AymfIrBuIMpzK0G=(uYk7+b^}5nSB}MGZ{lpy%D%UxBfXcT5n~$Rn?e3 zx=W@#a2nLS;Z^5Uhn^)V&6|pR@7u+VyWNVD4+O1JN^>{z?^>)fWQeb3(N>sC0pyPE zf!ej__yD@6mO7XHk)p5N)c=dlqPCSQ7o9}-{p7YTc5PicFzJ5QhcIMm4_azV3pvx+ zyM3iJo_VWljO`ms2wQ$?!%hn_WAirb6U+Y=-@w^ygBj-V)gMNOa=ndepip5G!i3wzPRk zi*(1mmFP~^D9Az3Qb1rsvHdW*WH$4C;3oO)EOG8YGv|M{7L#L1`dXllw747VOol~v zoaX_#XYT{8n57Y18Lgn5*b-zptf_iS0iFJ*4!Gtjhy;XRwzXE+)HU_@XiDoJ&GYKN zi=;r=Ji4x``yBp+B~RSdydSs6ko;+ql4wBUhx#0Ke&?;Tjh!{iYQQy;LKB|2{Ls45 zJzSb&bmi<8M7E*arjvCv9I`cDXUy|){+4w|+Wef4zq-cSY#hx$Cs(i|jryjH4$sBW z+ensjsk!Bu8oURWy}ggnsb;gQWGzL@GCQl`3hp9F?t$#4GnFSr;oZE^?uslnVy@=S zQB29r|I)WG*Lxo_~Rxg{txz>-dt~g!f9XaS{Wrnzs6eqP`Wm)XqCfq)qW9;uX7;w6ClEXzsL8|pqRiCew z{=VNQ%VLpv!|%`LYmDoC8(s?*a8V=5G$7Xp%ahKOup%k^Dd#7~T|+w)Ay$TVT-p7# z=s-*Juz9e{MyoLpYKHeV&fh45ceEa&A=@Kc75n$P({HgNo;F%0j)X&|Q*x--Q!K;+C^g7+ zJy=usU7oV&g*317hq~{sZ<={Kj5!j~h#dVrw(iS}WWaNf|LSRP6eTZTG>kzn!@z z9bHr6fqehoLr@IrQp_YO`?hXw8=T$eN|(Hm+~N_9t>9FAy0^;wQDFphQcopA6fCE} z7g8Rpwlwapx9tG0O;q;Xwb;-q2QS`v%!5=Sa}}?$0KbS;VzF40>s}rn)eD9nzzhHN zhh`SBXe|Gg*aT+m=;UBxXmj+N?K4Ya@W}Z)H;#T06XTY#uy!(WxWO%BZRlk3(8S2r z*o2r{!31XJWPXE3@Xj3xi5tH+c1>K>mbax5yikWJh;@9?bW=Lu^5ZK_^&OSIFXqN% zpP#Vl+6GQCo0++M18LvI`q*#*(g@$m%i}Y$kUI}?ZYMXP%rxHnhwB3jc610)fE)Gd zKEiuXh(^*A=UH(O>(_~(kL(CJeN3S~#()$>R~JJoB-406(Rzur@3lwe7lo(x_}@(L8NQibBmXv8pYlZy$H7^C+xnVD zVt*j#LB@Mg|GP9m%UnihD*;1C1}D2^`S{kpzkj0v*&XVzkAAO&XBjS*6DJO@XLi>q zkH(UxgL@^!JgJQ=$4SCawAxJ<q{=n?@V5X<1(Yqz^Kiyr$}RM$OfzQ_i99hYjDmT$sVrN9B5Hg1hBypdbPNEefKqq|T& zo}5`rKGeJU{uOr3E%{i5cQ^%^tCF!PTE|br-4FO;IZvKXxOcgg>L9vIex`cVK_@=B zs3#LzVE9tyfqZ%ii9OV~+2*v;l9^m6^Rd_ZhYBxlZ6>EbN=%PhV2XGk<0zeAgk+Rn z%w&D{vHex>C3RqOhK}V+Q>nH2Uh1N>&a2NmXY$eR7whm zU-}hdrmjr`?AbU0pg2pg?ElIzgbEis)^*$Cpo0Di37`D)R!b-NZ;EsB zoY~%LNWRSWF|AbfjYkMwCEIpWGMuvqeG*M_rWKZE&1F)n)p`6qy48erO8hI)Wv4FR z{9QYf=vFPW$ll0vX_x>yQ_pYYqXJF>>^hc@;ce+ECoT$Kw7IT@*1XqmCNPRmWt_25 zV!1{f=k^i3PrV-Zmdx);ZNB@caNx-fu57u^=*p)T1uG{meE0x=!)7Yv{FP@ilnf;6 zzwDS3^mD8xzx{dUOrC)28eN9NS&8sZd1J>v^{JZ5#JO>N;-79tWJs<^WF8BsY9p$( z03nz#pG;UG0g+KoEaP(N|zGuR^jc*N|%g!a%Z|_eMx$WSHPNp>Q zzqmKIbpLaKHB2h*?cM&BuN@p09z$Yhf*Yhz1N2eMPnta2A#wb}v5a9_nZa{tt8~b5 zclMR569Cz6dh(OJXC5D!mdQ`=hClHXJr@}7l9TY6D?WpT96XfWrm$-N!~?;VFyp9IHX34-gLOttd6akmNRF)E78qWx>-B1y8+hUW=5aTxZb8i z)8BMFNy6f~;W(2Umaj9q?taz$uFdWJ1*gK>oY}!-RXd4k_vo_6XpL(-nVU7cl-|pg zYC4G$BtkWEzc{>45%^x-Fj54-R@TVt@$m=r?b!I*fAMvEfBCv6ZgwU&xHX~AEKQ7@ zh`CjrpE>=w0`5g%GTP}LDkOC2yFe( z#KpqML`C)i*j~}l(dvdEKllI@V(tePPL7XF9HeY*>}+8sFsB>fUH-SumFF(6knrE# zD|ke>=9R`1XY3aB?q%P6`SkNk%};(L9@0-%YO#K_)$gC3eDe6osVk9OYRZK#o<9o+ zyc)UvbtXO5Tw%qH(X7M7Y)x#v;_8~@IL2c7BUL||xR;)AFE{nEYT|GCwFWc43q_@7-k*^jc%aDB31Aipa$2+x4yprXbd?mN$2+1Z^p#|7ljXmYFn>~N`DJbN zCFYW+1l{ax2-ThEbb5>1YalNlzifms8+VY!*k#R(o{HPhpV6Gsdp2&b++# zXGBCr^pzBzWP9BD^(i^8aR3sJ50>&TyRGx>>dLgL-CXd*Vb--y%xHCT^ta{XH#tPK z*KMXB5De_a>+FqQW@LQu^UK6KIXkI$#-&=*zW&(|u@?r$0tWJsEL*e21DEBczGjk6O;{b?cJ9s{jr zl%{kWz?+)7NTQyNN8RG>DbePyA`b`T*_r?oY-4-E*0(SX4Gl`?BtO4zy*p7CmfxRFO?(C=hB82LEi%r&0mfUK);Bh9Qcv_(?QDF1Otg)s z7lau%ev2U3&dx9m!#Demb7<&8-#9FGXa;QQB$KbuD2V88uYL{G0#Dx%x)=J$ z0z2#Q?Agb#u!^LR^OS+6!OEHhV0(9UM_*rLc)0B|&EhFIB> zCr-dxy-6D~>EU@H36DsZwTf1ktWC(b6HPB}_aflho>Qbwilw?9#=A)4(--k?pcJ63cD2`& zR{p%$bJAn`v5A)`+tSjKp-vgzZR^AB>3RPk%8Za}@6xlhrp{7eh!%l@qM|F|0P=nW zd^^)8iMAN}jpRC$*IY_MV<^coy4+QBqcJK|Dqg_+;^I7O)}`)~Qv2KQ&U}hamp7oe zf_Ip0afL$XUbxtF-Q?!kNL}3D^1wcWuU@vHb1ZRnT6SUNRODjwnj9*gSqMTGz)YhN z_=*}*Rd6Bw6sWKlZgv3l$)W?*s;pIqUeto1VJewz<9k4PMzk87x~UH*26A}>_##zO zbV^D~D#+^3QA{e35bq}R-&D$_>w zyoDc&&%o3)bZHz&MHl3Ma4|n{8B5)RD^t7M?tPVX4c9z=4|s5OecTn!B$mNt~^%i;Rpa;cqIE z$Eut^4QTA&xDo@J14+F)1;Newh9DpC4ROl{T>75;!j;OSRVu?J1!~K#=9y{j9-vpq z+jhco>G280)ucV>2xTpYdC3VzoLmBR8Qb4oa*44i;6_3HlS{waBZW*nVib?lLzf+z zXAc#c$Zz;8fa{MkS-2D; z7w+Fzv6x>;q`JBTN^$Pko(PB4qZEm5o$uYfOyV*?rRNs#NK$gHoO)p~54#x=8Hfxe zK^HanIY%-_c_)bib{jVfXdyV@WdON&m9tU4>?cl}c>aT(VSe|mF)>fq>A}MM{QM$Y zI;!=Q?i}g)&Vka>tK~9&2Hj(|En>J%)ZZ-JgY_$!m&w@yxP#sIp1vsNJ~8Ko@JzPK zd4TO;*VlUZp+Lfc|7;8ZIP5HyOx8F1HCLQZD={D0>+VXOqL5$)5Qh`@(^69Q8>h_m z?QOcIIQm;E3y9A*Pk!&|VpFr4y4i>a2#Y-=E30wi-RU;y11hQriCAh%*2789s?4nH z4m#esun1m8NDDySn`NZyU=CqtJHYlNuv#phVv|;ts`vPIVU2`wsP^_P(8AsQ-a}q4 zFkJ70tMgI0|BHi)eT+k`G}z)ClvN!%0mgD$dO7jW`bsz;!Jo{?WvwJ%KS?9cdv%za zk}LQ;ap3wE^;kiQceBS$V{Y~KxUGRz^m`d_NT{gfvF_#o@10MjZd35B~lezX$(el)=)}4^riW{{qYV=HDVJn6ciRiS?de>sYMOe zwrsV8yPkXRR?*EH+Pt>OUU}Lz#Gw2N($gZ}v0_Y&OW^jG%8iUnu2NphoFM~%pubi% z1DJlW^B8Uc&!afN|GWrJB7eR{n3kDcf&TjsGs%FhZbi8F%i6?EKoT~VC<>#gg<<@N z&L_{z13I2Q6QH50Wl{Lv*FUGe(3hp!h1+^B(~<}PUf4Czqc2*_B=i;-xX!f&y*!~Z zZ(=xEUu>qQQ!>%V!y`ag4dQn7=kFbeA&8JOGNNW%rL8Dx*w}9-jFvl^&G|KR0AXar zcliZApjkac+!h|Q$zL;g9q`UW%0L6IxG@@CE-hU^FLp}=-cji7rBBPxU#d(;htflF z!Q|Wt(9i<&R~i1kZyW=)GBg@PYubL zio4`)?}SU{(ArECRw z16@l^&AkFm<+gXnx|L#I*iD=^iCab0EsB12wyS*?(>+UK#u(I*lChk5g|)Lru{J7sZEE3> z24Ncy6o|t6J5y|%v~@LArHp`YAu1B`q+H_dm)P2=`Q4Qr4xq36d-}GWo#6#KtjW}} zDgr)hm+j0s`KxxQ$ZbRUeyZ0@H2JiCK>*&$fz}>nf@B$=ow2j>j*60oh8d*ew-D4EIeVkk2GNPP^w)3* zZ+CaA{%vj!v-T-PRYwl-!-Zt=7;^G-C-1D^mRY;|NUcY;i==MNqk-}}{Ih+%UHydV zZzT4<#r|YHc@~*Do!9sS2YO@T;@Z@XN$`M1b$xR2^;rnHOeK8rX1od{q9#?ApPz4m ztmC?PyvCNhcKo{+$Mt%wrXFr+JZb?F1k68qoOyA<)%^j3@}xRryV4iM za#wXGI#O+0|KSMnWlyvOxfVClr{kbouY|FFzL&o6rLBIt6Twa8Q8NALe08xvikHii z^>%W(k@|~O&?cOG$FtAnigspz+}Rf6#Ej2FIvt0|TYGa+iu(o1=&*uxxFsTa+*gC` zrR^B29G)e3tTg1@So1i2WT~%hz1OoCxqU5Nb~8YI>^%4?+GCza@C-Z8yG;b>uf6L$ z<8)NR@dlvL=vx_rW@aRTdPtcpj;_@ct%^@7Ed@hKFhY}e?gf<&w9}%wwgy$3uFy^c zEytQ_Wel6SmBr}fiu}hgveEd^vfa=q4mZU^yT$HDaCp@w>l^@v4`QHyfkj{Ouv!+& zDpw+KChPep<5MD&a@030z2oABYLppe2Ec%gt~O#5faT|p422V-gaPOfDkcBzhYu9R zR+P2ei|^iTYZaSs&Pb+@9iNmte~t%qU5K(MB)Q|0TdXs{!+WQvJt?EK&@9P_WTrie z7W09jkbY+>jUC9cEODJpNP_B1mdZaWspnT(P=S@*oyYZAb-3Q|_L2+hfld>R z*FZ}cl6m5c`Ol|vX5|>$Fc`+wy`vj_L?zda^*@EVcocy{eiu3L=I{V;U%g)b;RX(+ z>T?=QIh?$}-~gz0FsaF4b)M2{=8#3XrAr-SfE)mVJJf4{VN<|E@_a-C&^N%5)oDICK0Hze?$%u%`=OS6SAlBx%Nygok|t;HTc>CJ(Q zExdt46ok?u44KnG7Xn7}*4W+tfj$%1X|^Rx%V<;6RXyF^{k?F3boi0^XblM1u^RZm zL4Ez&V1KTKnN53YYI;LXR@S>qwMPCGpC@z6&o$*YsE}pG$PxfOC8g9V=QyuHl$FL9 zBUXV|Hbg~DEnPBMhSUAP3kDPH*P&ibO2Q@Tc#@Zu9(N}%&2?#3>Ytq%&WbwgTWBv)f#2fDr^h@tMzqlo-;GmJQil&+sk+E zm$7f4?MyZKT=gX(^vYlGK=1V6{o?&>t91jogR^bi=lITyyR(4tt6t}4_zCK%K7$`1 zMrWEBPEr^mZiY2VPK#g}1!0ZJ-;(F>H$U>;M*R5KK_L#2c#T!Z4xp5ee;yk9DVH0I zPpLrc^Wmhfs@wLMw05UU>Q^!88%Jk6!2!6;TkadyG_RM@H$B6Q!@{rX5Tl%!s<&DLBwkbpK=!%1#= zvZ*Z&z4N(^y-%p}s^*zj3e3(vf_`fHGNWiV0UtpZ2)%UzQk}TSP!>g+ql}IiqniPA zSrK82y!jlptu5gItLIfq3nL}W`AvZMzS|}MXI}bkb^}etlY948qdW`Bx-}2 z`Kq(?4ySzdYwBE!=GUW-Iuk`5USaz-+kx>bSJQo8-6?-8C1o@f&QL>@&V<{>qe4SM zT%(SIi#Vd6Kf2b*&Yn%kQO2D$y!hZFN)k@{>b|%;J~BpQ*hi(30Z6GfD%$=m;2xK8 zgW3;oK+yy&dfg`&Fx&uS3Eb>Wz>Q=As-NT}oat(%q{WD4`Ncs-ZZ_wU^@BLL3o>Srh^=P3G1OWQ+3q6%qe z01Rn+x;$n~M%fLJ2n=|0iOPaovdrmak@6-USSfz>EuXaeOhPSk74##4;{r24(LDn6 zhmeosBsUSt_X$89k?<_gXQh+4rx>f|ab(tv8C;2<1lZ^oy~07ig0RnFTnhKBPmgTy zgjd>^Ss?nuhwVtr&DltR986&xr>0mg&{=b!gN~6b_f}pculJyo;=&<3%;rHnL??#x zAak`f#!&+ToBH(nRI4b!dRSw;^u*ofC40k`P+o!{Cbg9i=;clrFS+ltY+>hK+V$n`R2_-yf>$tLl^1ZzJrid;*NUDTvQYK1XDeT zuD!sgj8V=2S5IsrW`OrE@div)b}0iL7R6;s_)H7xMiIFEdz8|Xxjkry8)N^=-fteXeiN&l-PAM&6TgWWDga) z6&#CX2PDaXE9<}SQOYTtVpnW zi1MDAAFK5301y>USp^ByKUNXE#fAWYDg5`V;JcL`ku`aLOoNW?sOl%173fb%hc_2) z>DBDv4iC~rZnoTO-U4PPx1RJ}<1pE3Nqi3ZZBB355D=4Ua)YI`-iynw4>s|gPfE^$ zxQ{QWi$j};qGAunvM$P$CUiA-4aLM&UfljR-oVA)fX@7T5!az}T8X2r^y#pP+M^=Q z4c{Xbr6>-ntuX?vq}zQ#Xoh;8D?N`O&n50acJPi7f@Jgr=lKp0MSi`Cj$^2-aI!&1 zV};#MqsPMUHo8Xcr{6qJDU-UM~qe2JNc^20`yVWXxeDUIzmHni;J6B-I1o~BniV!` zyt@U&5E0FE24s}OYAX*{szpz9n>{v%N8>Kvb6lAg&`T5WT8^q8Cns+9pxEWR)r=vkK0?)4pV}L^4Lz7 z#ofvGhUz`Wp+H(~CAvc_q%(TB3A9BSDPMc4x%aQ&&EhMeJ@F zc6%6UT$uEJBai(M^wMmlb<|}o<>4addi9ir=i?6r8WSaRFy;?d#a6mj8)I?rG)^|L zba^Nm1A-53Zu;ELZC~)Bzf)YEcIk`N{7P}^i+MrLL2*5K*{yo5JDIByUd0i0qK`-g z9Z$w)99!&LU9@7!#n~;_3=DRpRX-}H0cIb84roTc6F^=&cnbLKnbi0^ARdZPyQJUf zEs?krvEutxrb80?jP!K(UiEGY>#EQyujBF_S?Xxjbzy@q`KveDTO8_Mc@-MLJ{fzZ zVvFNNg$A_TnmA?~&vv_l2HLnj^bqrUJixi9Jh-@s+x}!>{quf#Dbq%T+w+x}G)8$s z;)QEf(7;Yr!aB=gCwOA+2Bm(+rcyT@BUJ%a1oD5;_LXr>uW`J342}w7f`EXEf=EeA z?kED%D$*s=AR|Y^jH5J2H&ePpItD5Y1EhNs2BUMp7#sWFhjZ`e-uvplyKjlb^Zz}+ z_{Q^yMIoXI0sIiNiEG!gsAW?_RNc3w(l8AD;(DsNZL~A7cVaBTl zQRb;zh#M2)ZS#;6#KF~(M$356=Jsqq>G%49(2HoTu1d_JN;#u~-(39OjiF$Q!JyPU zpoL&tnF-sGX2K$V)^9m@;1ifhOYcN=J^H8%rMM&8TQEACw=DfLzXp0_$8H*UQIX2v zTJmRgUqOod=hLm@oWGEpD+(3c=9M;s0Z_%Qz)VFo1NO$Oa1>BcUgAAm5Oha8u0(fe zP{dJog<;>GL=>+WWu_=6`%~^`06KJIbc;K)9AH5i(sENZJanOdn>N!H zdlHf;G{S6)HXWm@p0h_;1>Yn8`bz)2yRlM`8CzB7LG#+PS4u*AVSMKY<}JTL44NZx zfsJ>o+;&{l#ntc+UI~i{2k}Y&DE2;7X{5|=xc4%dq()57a23QHXy~uES0(aXt`M^~ zGMjFS1&xT=xxzcl@7*+ zl{aAS^%pnGHs?G*a|n_yecc+)*O+;xK>DS}&)=0R|BAOnPBX;Ep3^e!F(U-WGQl-K zI+Kg#97Y-n5iHt$KG)Bus!}D4yG<(oEq$UauqRgiFaw6oGHH>UFaqGC4O*Q$p;W~| z`Gyhepn#iNbJvWgjU%`JGs)=fyCvp)`;6OpzCRngGvnzq3P&96Yc>b>wWL(svI&q1 zj`J?ML!+WDyFRTg11%~_3yT?6lX4o0fUZeWh3 z8P3xsC5zFzS(Bl;$!#k&V5C4)Q{}~N*wW1BTDXRh|6y?Jb_=?VS5QD&=DL~iUtNsM zgzE~p=aKZet(#dU!ryUO(c2Z4&>E{p7>V|@4byGe9>4v9L<(^#Ki=7jcaL}WQNWS3 zo9n@Ywpn{!_uWD|MlVkC_FUUH|HI#VU>eSx5{jfk*nI6UAaIbLYdF?LUxLgrFlvd`kF zt7!)T0!UFi+t)t|kHM=?_2?(Y(ms_2TmhdS+I4<`RF<%SrO~({n`l%=FZBFAV|Fhj zYIe6yYx!FaZC0FxK_LA4EK#UY)ovmb#h;&-=gu}EZ1l@%8aPZ~@P*%P6ziml3^teY z5;Ly*LkV>s7iseKJ71aeZ}sd0P=J3SPyyosMlfN<{r>%wJew@O>UN*)MN(^m&s5U z&O(X*W}Z_KX`p|!NZa3W_zkb5cqjQg5z~HFaILOZ<%N#6<3RQ~(P9BNnwF2(%4gy4 zC~gp*=lUkPWsyl_BvCUWD_~V#AdCNEiNjY>FL?8?@@0&GI*s;xY?eLi?aQ>BD!N{4 zDpv*Oc$6OFFqkfyZ@Bw1Bq^?|Tfubqdj-myhU=rBOr+?4rs5`|whJ@TBl-X@4zId8JBz{M0gX7_C}sC7n0agDl@6Wz zre^bB&T`^afSPX1p=Y4?y2>ZljKmFw30FKGI?E7%_6Q#%Ol4#M#9UfJqOp6V7qhgN z4D=qKD9*KcOSTSBXPx%eT^O#32~QFN0Di3&K4w!6vGtY3A^pw-5duK3Q5n|#R`%yQ z@_oig&_{RM?xjjehB{?7`fPnwo2qvIXqj8vpi&LJ6T)k(9C~=L1YQg-1epe@qyH%2 z5=`>>ixOG(!Xx%W~x>9*H%SkGvC3s>}(L;Bb00i4pE}~c}F+O{syC2`vLP(=f?xU5T zy3fil!Us+7+)~sq(NRlqfz`hSK^**9=u36#r3x`P{u1TgHov4OR62&ix_Ov6q9aec zv@7bsHP3=N$INuPZQwO=z7K4%Mb8&4r4RIs&t($kFO^jW9hw8j5_!4$=B# z`>8#rfc=X7QSQDvHD0cRz3zbC>|k3o!uyn76B+6gWxFfKhAXgfQz4}(auykA)c_!9CG1Ds?=~m zu{~Pb)auwgRqqKMwKHG|;<@REB`;;qMUMS@4nV8K2gsDPnaopomj-~Ny7Gh|Upy~E z@Z#?+Na5SjyyVwVNRlYak)1UYkS3HIlcsL^Y7@G=8Z{7y!auUDV%X*H27tKYP}-Q0^Ab7%~P5QfsM4^mEAb z+tHBN-liYo4&9WUm*03DX`4t_G@|RDA5~^O2ST{EJvyf~%TyU%?f{-qk(5I~$FPLj zF~pP!c{ocxT6COj3Y}t4U5zbZcQh$3?+F}?e52Y8c~``$Jz%QBbhZZ>Y5zk*t}V8J zu>W#o^t5eoRJQ&#r<|rHEZk|V*~-p{>r+_Zj6q7TtGEB@=Rk+m7 z=a1?Mw6dj_I!-o%{HTOK`z;v)kK5AED@9Mqmodl4f&f{bU`rN>$A9bjO+H%Ax0c{3 zwZv|I$&M9(7#pfverCJ#FWL`T3GFUOQ#2+g^Atrm3|wAl>#a&;a6#-h z&ykUT>cTUmpgP5$U|;Z&79A6IR4-P15r+9pye1Hd^YxCFy7ih@nWYGz)^M)Le~+m@ zOR;b9l?mh9pAE>}599rq#eBh`;`yjzWD?X9&~}5c@EB*wGqWMDIJtTq^JeOYVRwTy zNU~c}$H2if&2vF@=y({h@5bG8DE@ILjx3CRumf~Qc4Nn6F& zgxioAK>6IXMn;C!!@?EK9`_I35uBz%jxPqfKGGGk#3v$j6%`c$SF-@NM+oOiKmZgl z`To>;YMPD?dC1~yk)fhcYuPdSM6$QD50Q+#4{&GPoB@vHIBoj|qc!_%;huJG2*)Hu zn`;A-BoWSPD8Bxivy^aDiPB+nT+GmNP*B3BPtB$c)qVtZm6{OCe&`;;iRCj09J^*q>GrDF_e4)EWO+ncB6BP{w8t>P5R%WkO zE)Tuw|0H0^_DslIeKS9^-z+nbKES7Kyb^V%%Ai8$OYhW|GSL47fv%md?@hD6LAE*> z=|0;;fJf}&an_^UmP^s=JT}w2z4!tH5mTPqCjRk0q3=OVnNFXrk<;}XxYTnX+E*eB z={$WoEIyL!#FB8}=rveFXG75#9_5H2S_en{+`v_Y z$Fd36xgn`CfQkBZ%^-x;o?iX+oxA`6qO(haE%`f=GigIccW}*b{$mUC z#jj(#x2+EDL}r==Mw#fB`JN@7k&IkMFP%*~)|)s6t!|q|Y)$TJ7=F{7QS>Kq&+MAc zw!v6Gi1?fMJPZ)yXGD>f{g0{45FXIQ*P(eIOg6i4xhc7`gnmq$8N0`mcbN|~?f?qK z4zajB-%|BuPT+(>C40U=9h+Ez9&pAgSyXRrp%|h7!ffrwd-rN+pnb-$rYRxnv6HFs z>#zI*(?^>XK!P_VA=E~x$jB(ksRp`$!zrlA*V7Bv#CcR3_vzCG2iAK(iO9LKW+Y3y ztW!iLvWExaamCLi;#GZa+spy-#9FH z09AZ*_-?o;m%H=6?jgrX-i;XFpv6r2Lk)c}Q(AJ~c08=S2s?t*CBZBb#D7QOyqLXIPi_vu{_lAw*C-d#Ltw`rJw~vJcpo&588+%28eA*Z^uv z3l`8qb`xk8x6g?|)LW)0Ac6-8TW3x^^$*xL0JlN6B$Y~yixnkp>D;gR>DV#$MLySI zABBxEa@^3|&wyEh;(seE*PG{*MeDEka|n~J7l~;9J4BR?+I}xV^VuC7wfQZNUCA&N zWt`Lb@RE+&fpn#YlZ}pjQS_jnBGgf z^ghEX%G@8Sv1V^C6_`;%%lcvCiha_z52@I6jrdj-Q1t zew++iwQN%7Xm}IEw94`LU@xul>^%SkK8z97bZ@(*H@RK-S-A8@b-4YGVAgW8lCX{a ziddF_=ckQbT!w?pjuRz%W8tD2*iU))k9le|uGGv-1W^#1JCS`2NHg%}=1zA{2ZM2- zS>S>6RhBN}6A4B@RRM9!q)7E#Dt$CQG2ljd1q20yu=xhok>bL`@_d83Y#>4gx>VEe z@wG=loZ+L2te;+(mZ?onU7P;_a8T8O=X#r<0Z-*jU(>XI7PPDI&wgcL;cDYHe!2M& zuz@fl@mM@H7FS)sT?*LRBzCr*sb8+$5LEAG(@;~x&-L6mjGn$)(4_mO0K`;!ttgjh zuG8DTw*nBm3jqsc46h9hxz%3sxmfjI*^zaAiCjOgUv2{&?F}y&j7ISqkzC*zo2@O4 znTy+ta<{X{mZGwbqGKV4726%xur^;7Vv3!u9X)a9k1ktPY}97e%0Oz3zU81Noy^5~ zp+|$>&l=`8)v_91?m&N1i#cax5e!xDMJZb{5Ghf+U<`ZgZCCeFlHqz^oCGdcw=wv6qrWw>=Fq^0_%${7X?@@zZo-Ue;F{}4^P|D z$;)Q2xDe>IXLB)9LGn}*@iqU1jogsX26BIpENw+-0P ziBhs&M%R7Be~hPYpKqy+Q}A{KEw2oto!MN-wvH`@7N&zQOkV1cr=OL0Mw@-S|DlA% zbs~~9Vrk}u*k#zOh2Q{VdKUu3pwC$RP${c&7E1X$N+yo$`+|bXA3rWn*tK?ZK|HiZ zZ#Vp}Puxm_x{B0RDr&smoflpEpl0}?WaI^U_19?LA3YEx5MV9uP9(JN-O$n+0Nr2q zdo&vAj%&PURtKm?OPiyp;ENmaex84P3=IM4YasD%?cm+U3^9TuZa3%vbXvH!&}FGl zt=T!*3zw15dJjvpKk_0O0^>KGO}{3K1SYRQ{c4Ovz8R`P`kg+Y5FjR8+*G4wm-41t zuqq(R$1q7m;77pZ98XC5uT0jDuG$(VC$EBY;zO%mo^|Fmr$HM79i5*h5AV(WR4QKO zY$D!%k4tF?H|;647eBT6yKa~NK7fsI0O2bjoFy2Hy8H;ty#qkUs>%geLf*G*VfQ9F zAwhXaYwib&zn6G#u?8t2>?DXzP|ifuN-Ap?e=!jV2sUg~4Hw{(fkMh1ByEhNK4(Mh z)a%j0SOPqZPCFOQ+EUCE#W`+eOVO1}2?aCn*PIenrDs4{<>WbimZ%{0QrfFh{t$8B z$qX?5W?pGeluDZ}LmlYyx>=fRz;E{z7y6?g9$=QArfah}PZIV~&Q=|Y2=~5V+e}8nwXq!W5WR>S4Sq{W%~xv0^=_tWZQ5;@8~t*KNrD{g z9n|&ZjKxwyI)iZ;6fG;EWDFXfb#!eaqvEQGa!r9IcU8P?zf0P3@^L+~Q}lT$Te-pv z`+#kX;S#c@N3;I^dL6Wk7L9anoT4-g5t;H?JoCbS{G$r^8R62H0{en#doZkItmfqv zfFY=hy=1~^P=ecQNW98jJmE9uvKc9n46C6}#8Q4(A-5^acfju9tYbDFiG{Hzea`X0 z1<$k!9dmpgjkb*Gt9{M3FfCTGP^4rpX42iMFYq<&Xe>`qf#vF*rXGN6;<@ph8RDzDizY_k?d#sC8_Pud zw%syWlutUsY7e6sPbH@M9C0J`#{M)*zd%<@`L_ktHz(+6m?)%$iK)18f(`V;Mh$Er z73yId^W~`;ts59G+0TLzd#j}>l0q;!6V(IAscr#kjZ6@9G^xf6hHYEt<@o_=YqfJ5 zCMmkJKdSO>MzxQ z;Z5u}zOr>h^r9Yq3vg`D44!-Q#Y5VF^iNkg{ki%T4$t8E&>YbX`Hgaf zd*Mn50b033P0PF4#Xb^?@-~7T+20o-OE1Q>G1P>J(;5BCzC1?tBAz9_koM~G`GSH6 z9HIv?*11%#JmPN;_XQCQgxuK73&{8=lRGmYmiFjQ9uU&6GBW zFG*0!D@oJqb?-cl&-S|jT`X@Bq>RPRl-=X9aGLIWk4@lZQyLHljXT(?$%psB<^ZCF zXDC25w)6;eRg1589@xBFTx(i<)N{v=Kt`@&1-vY{w^JUj`h*!Ac>~FjeGYJ#1@t;& z^)`EFxC6paM(tjVmveUqiAYkHoTi-h+F0&91h6$n9BSb92!GhV3H8f3T{`aR81UE{ zyIEeIA%qFwml5|zJNU1gRpLXQ81B8syw}CGrN3n?t@4&(sdx`mDJYslESLB~RNkXU zW9Ry?(E_ux(DdALv`^T|Q{?Y_g_c)+MGhG;c}Z;ewcmvWJt@o);M)&-N%C;iu-3it z>f7j;ehUY%^?12}g-iX(VI8rW1Hrp!&BqlH5xZCBk5PprfGWJ?ICkv-_F*wIqnh8~ z``bfz9($8N@^HArVz122OU{(}E}m#k4Q9$B7CptIY1!7gbuCqjSTO(eUY51#n>!|= zJo2V5@c@PB4v)l@z5e+HOI3D=*Gyrm_pUf_S=tmb%`$3o-@U)dae5t)l=&2rP_k^D zZaCx5sL)Kjhtp<{ii%~5N}5@TNhVvfH}&8c8Fr+(lXvnz(n$)L%Kn;}NhEI7m4BP< zq#bpkY;HHJdEQxzUEWw5e^hi~($Fw8SN&%T^!@oSDvCPigv)AeePdW}h}UDl%a0?e3HPnx zzg?ai`7rJF!ywLP>l>vx{A37F2<~Ux9JIHk|1!Qn!sMw6#K}m$b{*|u#H+rf!QOtO zD;57qgqA*_`(|#e3hdv+E5YFPyiv#tUI!me5Iy_P0Dtg} zFqd`4`oH4Y(Q`kFM<)-&F@lTGoLfJ#xLM}s7xtY?Y;{;U5K>Nz{1ty2N{9lPg3tCr zDK|ic&27r}#_h2#QTHH5*_wgsllX8&QvS?xxbb%mlqvTFf<%rBI~T=v|T5352~@#k1_qDGq1e^g$H9X=T^G3 z1UEj;@m&-VoV3>EmeHjAb`(6HZV;F3ZC#Cf>hh7UbTJpP7eCQK`WmEn;mp_ZAjevGWX6e;fgEt%k)-0P**ollzPl%?5f{pBq zleq;!sxPDe_`bS*-<;OW<9or$QQN|et2z4WN0fj3Ll*|F8)DUB^SuRWMwfRWAbsAk z?~aCi`>a}-x99hN3OIFG>)D=mdKrb$oY$I%?Fx$WEph$=-8&gEYVsO7+u#SCG4h}|k3{nbldiO^Vq z({-my%sEkOk)kF1k9zz2^&gBTH+yU`0gFwpgEx zOfX;ft|_#{Kq@@H5n@&1V^0O<{1E_U<1Vri>C_8#l6%$#C3O`)OCmzCoEOF4uwPS? zal?k5{kSOXF_aO{LSjEDY)yj&j68J7<8Ze3*BE8I)<5A7x^szEp-hK>H#E*vbX!oB z`dQwH5(cmSt)VA^lnHTj{yU>UeB{&hN0{%18olkR{WdoPfT<-eihs=G`^-;RVLUt(K0jtQLQfxA_9ojzVE4U*%gitH>RR$x{#evMDwxV{a_mFgwd+7|X2qsz?qe zzm5TuwDR=pNa3`l^MI3S^nU1i3|=k1Udaqv?-+CE9EnQzQhgaE4teGoR8x^a%KbSf zjC8TRc&_45W{P$a}UV*c7;FgwgzAiR^YA+q|C7d*yWpuLF3#l(;$V?5YO zt2pBEubgb4HqSf!1vbChet(A3&)5R}F}_mC&tUPtQh^Efa(5{{(b^$0$}cLCf}e!wZ2!yC1&`PiDl4dAC^@YGJSbi4%t;iw(jJl=lY_ zPXk3$6SMzXfPhw6MDq21b?|O0^LH6AzHDH6{do{$qQe3j%H6ji9BMJw=~cZ@b`O8^ zxw*4REcx}HS=mA30cnwEf6x6fxF{Glr0v7INktwKT#}{3#w4wsP|r3$0n?&(q=nyA zz1n>@UPsh@6@cCK*OJ>X(A8#!&4*tZOzO%D6^sWc!Mb2sN7gaRdyiXzJbJNF<3bnF z;m^wi+AGS*;-!QzjN=U#&;0Yt=G))fdRi7mol3%iWS?dayq`%z(eZ03it>HaowK25 ze@ou7Wy7frKXd6C>3W)GSuoufWphQy?u8dtrWW?VC>1g0brHUF@szw8SWH4}h?R(u)MF4oppF zFWx0QC9SZo$cT&*n+%(PV|jhXzg;wPAHSrT_557voFU2G;)R?h%U=wVt^~RRx^EB$rmLaL<=z5A@8~%}c+*vN zj43tON_iS?IZIdcfk3TLEvZf88>6Du^niReq9p8-iwZ!~Z+AS4_mG!^^oE|}kAB2C z(O~TA* z_|270Wvx9jxg+v~CY1kboC@g!o@gqfw7g8%LZZ+}C%5Ofhr;3>>3r%&YqIZ6a`Q7F zo0RKtw-$_go_(N%{hH*TLy6#m3Nv#x^zvfK(1eZh;(?Zkh=taH_Q>$-AG@X&je+Wh zk_Ec{N}3T=6s%qTHU!k3`{VtA_t^7+92RJQK8Q$)_HUb;o=P;)lLBgB2cVRU6Dv(v zbCLYd(sm#VXw~C~Eu&XTpgjbCE^}HlKG~+ zlpoVa2~^9=E__SBRvCp~D1yh_V@^W>L^Qr=%dE6ObMu^uXDare2dRf#68QewrmHL} zc7Qivmo=oIjD_$gulFOPDexSZcKcro$qr|EVef~U8PCF1d+#Naq66X{T zOZAC=sVLG2K#n^+7+S~NRLIE4fwYJU-61AGlp+bGrwJopP2DJHDWl~;$n74GZgyN{ zd7E8p3ni?VK6-@mu8#u-Wg@pDkTNNp>!IzV2DJ|jyNq1{ot3<)4?h+waxpVTZ+m?* zor&6yX*JRW@v!B6<@ltVJpBN)kB)X`>&LGoQIXTQf)GW}!~wKzvi`f^^#|GdBNg)! zMg~M=VSSiWDcqr{*|L_WLG=q1X(YU>o-_)j)j7(}028JCqWQWel|vw6r&|buIk(}y z>;-v2ymCXxxI}%`0kd?r+~CoNF&c z*SDfklWFl4$W8b?i|v7OzTu*i)|h!IWK`Rq&xMyL=U;`oB_W)LUpQ;kVR3v30W=c# zdIOx+f7JGx+gVg^&Q-r;c(CnlY2TLks=%0t6k#>mf2$;^sM_l@(o(t^i_ZV5Zl-3` zZlVVYFMQ--y|s5KB;zo3n&5Ep0_kDN4X`-R?fz$$z6GfD@mlBGB*`2_?VQH@_b>Ui zJYdTHPd_pG`;`q%)pYU~gN4@7Wj+fU3JBi%Pb=vkc@v-+WzP&gkT9DiJ&k(n!|pmR zQE78s!mSdyjm-wnJGwkTroHeX=Q2=Egz%*XfD=otpVcTS{aq^BrN1Iqa>0Z+L`3dv zpDn&o7!)rBah6-&I$q>SK&CV%gQFfQa9JfG`u|8i>Atb~(mKT4u8S^z+-lU?D z2<^G3^%k>SD2p%={uj$tHxKayM4H2|v-FpP+om0q=INxXY+5H9snG6_7Z9?{0?Cg( z%7zAWe~he;$f+hI=P5RpMhK3NWwBdaEGcP`$&&YhMnDDZdYr%croS*hBV({O{M1p) zlQ3_+5)2vn&*w;lTsvr8q>h%@E`iV2WhXEB`pS&v_Rqn@CcEjn=E!cWVW$=HcxKPX z;v2pQcNNN7lV8{8RF}kSO}~VTgHo_+k&rkd_HFe)!%*Hd-~mpYc<=?+bnr$Wki0UU zObw}t2jEnQErvq>bkwm@ST>YSaZ20DkfK zGn=+(c7irDXTbf88XFN^XS7_}cOVM)BoK=83v1YouDbBuU+7S{GoAY#2x(z^kE%CM zdsH6qlA+lK-$X_=QrdX<#R$S?vg+8|mCnk>*nn1*>!6S#T}B5Os7 zP^(jmfTL9}q_W^~WHb;*(OA)JiY-AM-v4k@u{q|+Tdnn#fUVP*^R zi)VZ5w0N#~`$~$6?WfLY`E4Y49{-y+kjc1fh!ilFVeef|A;^5D#VQI^{JD9Kd7a;m zc^xz%3`S!X@P}mYZU+tEml@wn18PX_iaW{`Xk|WZ|4?KIpn|^3;B`L0xujDM1d{b8 z{Lfnit*|bgUVJ;N8{-sLK_%}` zJ#I};+-CzOJPXdXQa~dRN!uc^gtQcAy!a5S$N~uEP>*G7Y)Kae8bXE_7zAD>G-$2g zw&Pq#ft*w=i{3lf>o|hhYgccxf6BCBpkYG?DDsAL0!WwHgS}3g>SwXH+CpUC0LvMhJNPsIzUYyylPR&u>lyDf?e)^Q z=Xbq3tQib2+h&=Goi&MoSaGh z{?gdo_mH(-+;vyk*LNpbl|ptdEl*qS2AM<)7R)9R8TBJ&_#xK2`swTRBk8KmR=7QD zSnzLPK#J^SeEa8@S67B3jM*oWu#u=f#_vdLeh|H_`7cO*iP26s=NmPQn9)yhLBf9n zu<57YA^JQYwD~AVSW7+)YL-#StAWSz~{Hv+!d@jR+44 zRcrlkZDzLKE#gOF*@9?bR?m98c1Xc#_5c4zri6?fAd9GTljLz>7v3zZbMJi#PD@yo_O8xmyM3&9bsWC4F=J_vB_FaToV@+y zUyFvb`H^>O#k;hK+(noo(h)<1;rEdG1SX+(B5wvLH6=wW`W`Hk z={HM%x*dKpWw@_%e46aLKmTQhet+zg(}hxGBp<70@=S0ExZX$iKqh zYN|_gsYtiSi>jRvnD!WF=;z@Izx2MfGljV)BtfV{u6Pzg4bhhOzu4jN_bhuLo+Py# zo=r#iZH6XvKB>P78XCMjw;@oI)FH3X^{%Onx!W29vc!Us5f#7+v*fV&bxDA|_nr1V zLJf@8bD6+}D`Z3_p0&Zha56c?ULOJAO)O!(bIW|=ktoE&)!LmN1$o{WPsVNopMcvb zZinR^y)8= z?WgKT_`|)lYdI<8hf>KHIR7RUw$Kp-=!bV>okb7OZ^bGC)lwi)oZRiqSWjlcG%i?M z@?koC>C0p)>~4RgQTW1dC>db}GmrNT;C7J!TngtF8zI_{f%Q}~N`;PqG z5B_QwvpQ7rY3$)8o15##2)iXzDKKEe6abmmrCG;dm#bR5^BeJ$&c{;|7MzLV%p04g z{f)4=2*Tk%{N(ufzu3D_^VS2aAG`AQD^%Qf9v~_>ih8nR)EFmc!QrLh7!I5;Z6i00 zSAHYh8QlG1^qh@i#NJDZLj3`0_V^!(EME(PAJ0k7Vv0cvt>u%$S!GyWkZd1000#!3 zZ6CWIyI6P9A$LiqH_`k}Jkaf_P`H>xu5f?=K-Y!wmRezs2Z-Vb<7c>s1v?uCaGQ;a z(!xl7%f9s%v|OdU$2kxJ=$|P{7Qf%Nqr)Ct*+GgtNerACpMJP)MEkqT+xIQ5&2i6Y z%jVkH=*VPpb+T`wE6%E57Nh)C%b6e#%tPw^TBg~L&WG|&cdU1@v&dr*HNd_W`s79Z zWs#N94#9;!2#*9Pf^N+#UWf|)T8;f?$}vCpj~+g}Jtop(H+*-=9!RW#f+vgZ1<;-f zcZah%!~}S(k44uVxzlhi&)EpFc6?lowIjekZ6Wb&5!%!oR1AyzfJ@6N^0ssBxeZn; zX4K^pequ*jv#Ks(2*cbpBoUrnC{-`<$jc(3_PEJ2iY&J7TNQWh`g&owpn>Y}E9KQ7 z>&*2TC3nnM!O2rL4+FU2&$jw#oA&>dJk*wqFgI=J99XROSm}R6D0Z9xLsYNRbo+aH z6Y#ZId+n2l9jt{Kc-fRg1?A;kdJ@%K54htF25thr(^z8Jex5q#nCnMGe*XmKwOpYy zz_ZXb>|k))J77L`cH}KKUu7ab>-ZGNlmCql@4ml?KHe{XTT>UipZcP*-$3ZEyBHHqFNGmCgj$ze&*5 zzM0e;MtKE;-}NfM+~6HIgbZGht~`~wy`fQeZVM3anP|EpUWIbacE8_fRl7yVn-%6ad%{`uzF6^C_ zNS`nSYU90fwZwH`m%E05j^cPIh!v`d_B{drCyUTW!mgFPAKcpN3a#zp3wGT6bymZ$ zshc|bWSFo;kJp6O;Iu375j(nFv3MdHv(oO6!4$gsWm~)S6T3-q$9>5S%2U8gfz}NB ze(e#WNWHi)$}z_3*#Wmm&WZ1p;)~qa`3&|ivkGPnomyQ$5>QvX-i7@|O&Y`$y9BHK z#Io2AU(Zwp!m`VuXH}#fd!8Rcl5~b^ACE9rrM_VvSCI%{_K#Adv$H2=B3|CfM^%74 zqG%CZciI`(HB&p%siLY?ldY$dV0_ZD*%(B|4;#RfAD;vo9z74eUz7;3&Rt~Mp~yQa zUEn5s|C|NfK-aZW<;@wm6l2jqKAr1iEgP(w&}p<8V+xElz&1A>y!ZR6;Tz_CT~!#X1lqR9)_H-CEr&s+C@4jPsUzlX%1 zh-F1JWjF0PSGe}T8A^e!1?G-Uw%4_&OeXCzC&dFl1`Wh@o$KuWjtKh=9~Ck*tg)tI z;-%;(^x9$4UX86ivkk6be5&8^PD(jos{5ty??}JB&AMcPUXI%0@bL_*YXh?t&~O^QuoYwS2-Yc1}g5DmRAici}R- zB6+Muv;?y(57-+oKXRKo%_XWp$o^-aiahvhe+K>ix(iK95DzwqLwU*BkJcoqSk7xv zoMH}?rv!D~bhpESDPC|0LE<_rxa63va}R)y5~^dgy{5{=F;x8 z=D=f5gN~p;6lCsbkuj--I55h)VY3{A%-U0j)l@;hxhdMj<|G`|FLhi$>hB>Cc0!Z( ztoH0@D`$%leBUK}%K2*^Cnfv^*rw^Zsn|XfmQ58Ep?Mwzkdp0s#48_9*L{yaSA5i? z*I=k-H{+sI0g?R)c}_r{{8#*MLt4Ro#!d9=St$0< z-K5~63@?V!hv=m?NcURGwea%-`3-5H+TTQe@+|;-GG%aTFw*4V2%IW9l_=c(mKw#yTiB8Tij?TNlupa1d}UX*Ii6`O7(6;0oUVto6A zmY}GdkF4uR{p3!AL3iiyQ>JVqUc)PM`P6K4t&K9ZWaSs-%hPOHzAl|el8Tg&@EMIJ2W_gxy|Ih=xbQmEv_!nKRnOrJl-^bmB&wtF<+=X{1zJ8>!!U6 z1zeacB?azK(4PLpqeX2esO|{1QyTA?b#(vnfL#Y}-Pt#WZNraDNYn1qa356cpci!A zA!86@Pl{hH{u#76Ksso_J^mmkv1vZOlQd2_mC~6XJj>jx(3w(eNm&oF(~!v^I*Zq5 zK+s1n^cT;7^%Qi0qr|hx-EFd>J1Z@NdHqzu#+2ocOuN$0SLHZXOri-7ekkUeK(%U> zi2NF-i{SHgcjl4`Qwa~&?`bsSd}{)6O@-six6i)~p>f)YZ?{!OhDYZt!C-K3S;VCH z=@SyjpL~~2sKoqB!1xcv4^%(1^$wGHKuDrr$wInwZ z$IGVE^NABr!JqnKr}hLj+^i`XLqe<2fQc#Vp=BHcpDW$iqb;&02WL8bDwExHN_1I^ zZ2Bk-;eTXmRuin7*=cTaF0I)|8>a z11~ti8d@sH)aupuw;IDbq_L#HQ>wC6J?WH9to!e)q-Gc2m6`xJ6ghg^=Pdh6(cY@+ z3%M}%-40v-&(@?M4q`q|ZZm!;Yp*g7bsOlet-Sb6P9A9jErGQ*L9;+Vl2nCw`QwZJj^;&tNQt2D8`M?2w*G z@Ud%#ag-?!J=S{rN1e7e%5Dd-m@X`-erk@ByIV+UHpE6HkDbp+-YaZQC$DwJp^umH zDNcGXZ!$&`AC`Rzvk*<}FMKM}AvXVWH>`ruWOvqdJ~l!iB=wcaObv_Y+5di$3dY1)Z2+^|_W zbx{)Ljd(1zMzia|Yj-d9`BL|nO&P;A$@-58lg`66GqHLrX7RL3IzM9xhCgr!uY>gl zRanXPl!>QLWea+ff92Jo`1e3-lC8*}fm0XN+^qLtW8<3(vs#LjEU=1%dN*(MHr7o! z&HHKc$jlscQEugrwF2s9{1BJh%tEzx;7yOU+iiCixmTb8j?jJYBlkMrNPo}f&G2Rh zQ*3e3A+oXv?q+1_x9BZH6YRZQZ$g^9cX*pAc6LmZEjPV-6kZwDqGlwF2z-kz4v9JIUIEkMS7Y4!MAl|`o&$XN^9CB z(N){xZ2{+?3}h-%zr&XjFeP8juqM*b){Lvqv~8P&=Q{Q`?Cs|I%8ffc_A87UT@v^A z4VXAgAA`&7=Gq8JQL`Q})}RaVyOM+ZM20auQMZ3L;NajwK=eH}>Uxo~3|7WVv4$a4 zh18Ed)a#L<9kd8xpzU{Zo3@8zAp1{uI!1Eb7Jzxpj@pz=m@lGIt(NAH=|M%=w{P|&Og@F?+KNK0LY?i6e`0s<7Q4)mv<8RWY#gNrI2tG;MvVD; zcplXZo?meg{9y1mPtTB*P6Ni9h%2VO*u8l$wO+ekSCd-3uSi*>-Va=~dg`RGi1Rqy zmnoLJa+tDSV|Nf)F{Q3bBnR~x?B)fO;J9qC&r6qDHuEa%<%bC z2uEZ2z;(s3vhMOg*?|Qb)28IPCZ7{O+Ao;CKk&#!b z>~@jh7w>MIJ`RZR|4u5s^zqO2zu#pI{CB6on@f#o$`tR)P zpvV8^n^`#S)6s9iu@8DngMpE$YvzyGVdTzRt=4-IM{3iKjv z&gk|h$B80Vb|PVd4;;E;-s2Q9jyCJr7V=n(X3r*5&{SHKTALT?~j^y^8yy) z+28P=-0omDI=oGHa|@m#@zb-fbSxwIDyoTuEv}g?7>_`c2h~27vQZ8H&qwYqFFQi^ zrb?NZ21p^64GLxr=(iCd|vc=8s~U=B;a2Tba(2}_|JI*A zA@sH0ijKlk_#d=ut_H<#cPUH~dtX0kj8%MP?7LN^TgH^6^F^?Z=~9-RzxUTK)GX~O zg>#cOpW50RJQ$pLSbS2GYM+r3>cs5;(v^i6Tm*|^wn5#`!VWR!mdY;Z3>kf|gbayd#l z#PLO8dw7C+@-=_S$ M?;M(N$69rd;DBtT6m{!^umArb@4dsCTDNs^R9slW28dK; z2?7Et(o0lCR0ISBq((t{lMbQ8j(~uQfOMtz&|83rG^vpqTIdjZC@G}Q-&EJ$d#!!$ zKF_)5ckVssc`pBveRym)=D%R zJDh9gV`;#WZZDT?z}!20Ym_uYwA7>Z&1OAJBS2&{=Qf<_In8C5b9Z`Yv_e{iK`<7C ztC&8uqLqCK6EjGu#iiJ57dd?Y^dLE0JA^;*aWAK9Ai!|I?V;=bLW0J|fqRiIegJSu zrmWE+J2Ntn^pvdutQfp}P-@t-3ftGST&27|JT+jD&CYk+9bsMr?TkHZxW4rKw?PPL*os`cn&jC4!`Ck&5vrTb zfFvHNUqzYwbRfY3v>KJce;hUC4-90a>DnPk4YiTJsbCC^6kE_@bIfOK(`z8 zLe)aOL}c^t@eF2aEn`uM7e2!>Z)dJ`r0P`U8w~=VfB!qmKY2LM(%|Vxe8cQ=e$@zU zMPJGDoB7fPmm^Td&)eA9F5i;W8^AuPZyFf-rGV9V8`_0CY)NR5H5V z!X_Ffn3MCFaZ+ltZ}cNIuN$<4apG&4)N+ZkM|~N}MKxIjRTG7^@+NkMY2|r2Hilrw z+dn~UY=;RawTt2zArR-AF2N`}bXq5)g#m%LzTwhUt9eS6Oj4@fv2t9I$_3zZ9&<0n zc##uKcu>c3Gu8ZLM+zTj$>!(Fc4!^k**=fd_ci z>UTDpG~#ICG@w~6h<(&|(#pDW!ZH6jX$%jVhOQcQF~KnJy04hrgH#C9ef{;(7P-6( z<|LDX^98W@Ff2G*)U*K-mnvN;qCQqJ=7y*bJ3Qu-=)$hJ@#XmPK&agEMn{bAcreXs zPh^h%S!&IRp6fu!rHsEjWbw9+JOBe(_&A1WQ1Y0k;P4f=6o)o*Oz}g|r-6Q>ok;H?Me}yM<&em-Y65k8Ew$~N8y0w)aRYZT7PJo|N|C6y4+@S(B#O0u^rum?_z@2FW@&pYz z$&~ZHA8|Y808XyK7!j)rVZhRF$XocBEPqq=on)k9EVzTJzL-Vu>MmmtQ<2t4UtNAw zKfAJ*^QafGz~tq2s=V!;aBbs*FECV=9y#I^r3#aGv-C>0JtPna28ZmP<&}%p2Y#E=NgP^> zF-}t@4Fr)HVpzS8#~m?J{x6Gf|C^sVN{PxB8@>ygmrp z+(QQ%KpJyJ5{=9_dnmnddb(R@Y&y~>c^n0AtiQbkJ`f1A0>FaW+4U)IgX-*JN~?Wk zJ-3x@H$w!u_+w>lI)ud_h$eP5Kfkuf7E&K~eEcB0;_u!m@=%z=MhM?uBO7SYY-?4} zhbN^sMsoCGIF!zK-y9yNz60(~-ypZ?2Cz zjHCwYd(Cy7Rv#Jz(layIbMAk>wk-QhIA}7^6aV2jjgBLYg-aR(AraYsLYQ0wkm|YB zo*E@!;Wd}F0$4zU{oD9`Zt_pon3N4Pg1gjWmqHl~z;i~5WdiaF#DVudrneZtt8gjt zY9qJLhnIlDQ}R0ecT22VCLpSYdQU;ijO~;L*0ZxAeZa%nfATAbbs_ckdcb7XDxf>= zZ#SA0Y|->8LeTHiM-()OSfq?Mcb+rxUg;5#D|q}0be@sY4fp$gOc7Yi%LjwGgp!9k z6~M}mzg7D5HP&pxZz(gE*PEfiy;e6H+436{MD})Tq-3r6KM@p~HxWpbs(F+j#@XNF zzb6}XK$PllXJJ~)0CKgwUvv*!Y;WAoXcCL(dz{U^3+H^6R#R7#98IH- zX=NG6PJR59%glMxv(Ilc*Jm(qHh?Ie^bphwPLV(Y5%;qpV+i4U#7LUG($Y6!J9d84 z|7Am>fA1`d4Xi5&nCZo`k;Gj4%xoTLIjhT1j>1n-eweqYv; zplo%$cYgVwC=MbYNcHc#<$wnIk^b9<$0uTB?1~a(^EVKQ>Hs9|cFe#oGsA+9J{%d7 zIXp5(+#@;v#+<<>fZ0)}DlkY4Z1PS3;Uj!hzz~-lr!-|p1n&JW)=?TlUGy1qO;lnU zV}aBW9P(00H%ztvbr9ZEjJq3IL1A+DsIP| zA&)<9`Z82_u!~o@@^RO~cR=~FOM=oTtCq0d)+{L{su+E(L*k&^a-*4>sStq#C9{8Gw;+KIsdP(Kat0s_u{8t%uJYgu-e*G5n=V711nX$_Y)soFd+9em zk*plH>h$g(`NLX&bSp|&PgeSL?d1W48q;q%O_Yp>2Ns&P$SsF$_ej9*3YQYnqa3r4 z1)#W8g@?-0b?2(c1Ipcd28P~s>>0+5tz;AC8A>ii|v%#}jUFES z;fEW5g~T}R6mze+uqa4T%q6sa7}p2+AlQZ`KVAIK=YS~&eCx?BhlBp*1-BT~!rO|h z-(|7H-N9AcJ@FPuF8)ez7V9X=f>l#6ayC+0g>bjm-xbv8`W9+%`fYsG_=e*!vXqiD zv)WzW0_ITi5V>BeHHJ&oRW;HlFQ!)wTZIpdRJ!y%0)6>!@TtayKd2*kh`JZK7Y}&8 z;?M<;lxvk(63B>YWS1&ck2d~yI^792+VRIC0Mc^~eY7(a^f=qPiZ<8q6MHF)@UAY9 zsRmBglf>m#KMF^Fl=ZUj&2$rCz$b=FLNZrNT&hNXP|zK}FHb-!kap2i^Nl^oTz|0Q zl81Jwfm__^n%PBj5CpQw>g9pl@Ch0Ryn3lm>mbQ7V>tBrlWTw7sUL%og5Z+MpNH#k zC;5y%+nhbP-nUJ}7_d2$6DJ1>^yljgbR7IzdAy_4s8J{&sAQM&T5%J|=S<_ws7VN>R&it0iNulBvP9!w%05lm{>=E|n9t z84MEz?bQ{^-ladg)hPzVZL}OG82Gq8zSbVB@8Q2SSybmy4|20)V-(XD()IbfZ66>! zSqCsy%lyb)2mQQj{XnC|$)2GQL83nO+IWqF3+ z?X*8fn-HcT&(P9Z5APEco6IosXPmFQN zo;rCUOeWb%m|N%L##eQY%t&YIMpMf*g70yW@qpO~2RpNlggb(l7ZGvh9$iN@`Kr-s z8hgA!avxb>$$(Z(B*4-V!GL?Y*n35*M@gaO7F6$=r+@2&ixzubwikf4LCBAK?A`w_IU-?ewen zwhbWhpO7?7f_I2%3I_Z2YMpc|Y2oUHMuBg=8HW*T>j?Tx!V>S+c6Xv&p?ZIoU6S9- zbc}=>EphS$R^x|GHwG3eeg@h1aQh#$`HTQX#`casE?Ld3fzoW@%L!X0-T-ypx>E_fId6i?B}SKy6#v1a$>2d}?z-aJu%w@in5<6~fi$ zwQYQ>xnTo!c}fEE=J-ZXi%mBY5BB z-mKYjIet{nq5?`cqe(t!G!UCjG;ofO?k6b5(eisCP`nhcVY+fIyiDx_~3Gc>u6 z(z}m>-+tozDRCy0|6GeFrJVo2WzqlCT>f{p*Z))}3oD9vyDmqpaH&vw+l<;vnACc7 zNroyPqP6gOz+Si~c$9BPI--aL~}*K?w> zn^EeI=70UmkD^C^ZtXeG|MDUJ=RZfHrcUpB za{ZS(o;j+*y6f$^U#6$>EJTRqn8q*BT=QoezZ%@!C-98r=82ysDd`yaf5XPV8FYK9ERvQ({v)7E{_K3qu6tch z|7Rn^bN^>&@E=;H4@5E`_BSm5Sho*E62P7H>vRMXp&z>m>S55N&p)vWGMRs~9sXsu zz9-d;e$8p%=bX;^4i)$`wf$v-|6`kDgietO8Xd06uB#v7=5d1$W?#vdrqxkQQn^xz zOUOFizZjYWSMVRVc8$9R~Mz2K>)3{&$fg3BmjmQmUA@|BRIPs-8yMr{jMb+LtV@pD;F&v8z|IFaL%4 z-2Vxf|Cfj}n@!`K>_oHGN z@%7OvBEEF!)0?qZ1aaHDIy--frLlKXH|-}s6UQEX$N<7N+#*4fW0}A5Umo!OY+g3? z3?RASuYG|S;OIFB{DlO-XuwF;ygSHy%Jq-M=g#?1h&JSIgL~E$wC2GlvB025g1B62{Yr)fG@e0@l@> z^!8T%4ZX@&a7;F~8DkpdRCy61@+mMN;A^M4k(aNKklx>e>u?g|?yJ&Z zPl4C>-ioLk&wFdaQ3b?sS!chiYooHdMZWs^Ly{T^Yz73Epz*b)tcH^1}Fc?_r($Dn?B)Owdgz`a^# zYMgw9CbM~{W-^Evq=oJ@yGU!IBttDP*#wr9CZk+C+AT|*DO>L&cg=w>X;S%UXJ=z$ z$IWN|jkfvrdZYOkJn6tf4JkZzT-mN=YkoxWtU&hgb%W{OvRqeR5ddMib#=Zg=X$qm z?=A`F8|Mepj}BU@P~P(3@Kx2*4O+bG$c$;V{vIrl8!gW7d%a>5TudII?=2@5oZiXU zv9hV_SJHzOSdY-VUu&QKTWomJeYckhQIWQGp`%TCfk}%#%6MK#)*4X3fe0SVjd4># zF7KCJQ6Z6Mx44sH6*n^SrHTw!HVC)GwE;9Vp6rgK%P-^JELW1Av-tb_D|W{D)H|&+ zaMCH4PFLMrw5)tIU5CI)n$}u14s;3N>#D2<^Xfi`H~0+C;1{Ite?VOV+Qxz}O^Gs8 zD9o{kRjk4`!| zIs_sbyhvH4lWUAmJ19-qwQC1_H*@~bMNNM1)m}|a=!dNnMJ1%S&O{M!j`+#Pz{@xR zK87OjEGP}aZ4vx!{2&Rw+I+ihAeS;!ZTmddTTFrVLg3ozCzibus3?{X)_*@2IKMhT z1!`r0M~BdBikFdXQ}a%zGu#M$ zQb(^tX9s;Rvf0K!;pv`}di^1l@N)2)Prgy1vitW@0RaX)*m8X(1BEX$VNc`RV3 zl+$FSH=+jIHc2$4fovyjR(rWtk!rQnm)U7t(77<8rAfk%!gtt4DEVb5#$1f8TNhKx zj+j^HAb1FW@xjf7FK-qQ^#hBWqLx8r*pKa=TtOF?+5r_cdbB^-Z&#-oa!c_bPKAtM z(+qC#Gqmg}wdqXLu(gwf);enhGdn1$x@bP-X=LU|uVgs4bi~kE)Oq&Z*Ud^FwZAX1 zZtDSy{ZU~%8`CXC8O0M!D6zPysCFjoMiivsH0s(RXEjGmfqx?f2ZgUyV9dP-IpZw2 zRO7TxnVf~W7g>0d_R^&5=DxHwehZf#GI81QON_Pg8wFwCYuHvmjr!V4aM!sdI$$D5 z>Pphj4o;7i zf?h6e%GZ^57{XzXKBxLb+#q$*AGx%mEtS%saluWHZ#`x;O1=-bHJKy+0|kR42Lq~H zCj2+5@mzt*c8&|tQIHtL`S8rnxHrpK(q@~WKZ&@mWn5&rGx$czB>_dCM}sfF5X1+S zY}5y@oq}w4g-h4>rM0Y})rj|6zv-$as^$V;re#D>fN2?bf(nh0nEYE8G&T`>LvYW> zOE$OBU|1n6Ui90TIt(y?#n>ixRf04d|A{p)nGX}E)_P>#X5aM%ajme410e6=D7T-e z^UgPlQrr4gVGOmn4Is=bS{ZGEwX11CIA}HA7s#nfGl=H2NVFjadN&d^&)XN1yAG0o zv0H>D-rj z8a5gm!`*m`!Js73)BP=POcj??R1@Oe1^I;S2pTO4Tcwcll1&T6js#gewkBZh?_9og z3a)cw$R^0>y65YK%?5f0a00YkxG|<(k2K%-jALbCzm*JHQdGb*GOAbI&QeTsRWvj8n9&I0e}CR z&W0X0xPfI^__ms#D)7{?2JhWvQByjVNXZ`xl($|rh)7AUQLvluh{-oXP^|orZHeB= zB=-d~7bSSfRANYE4Hu&3ILDFg{-ph5g2=3!LG0wrJN@`2EYN>st z8lAVwaP#2)STP5(&QF;OVWiE(t2KFe*lvR_@eWg3m4!Vd5eKG3;QjSIWb*hMjE zY3Wf8Nz?M`*+}Km(o&EeRh(u(YUc?Cx-Qr}!?>Pg%?Lh9`v)=5)a;2vX1x);y1)C4 zy||uhp4)eoP2m32cXjrWw^|;2Ciz1s0l!ZZ?V_mxJ&MvQ0A1GD!cZST=+#pn4m#4w z&E5=DW5YI@)?X?wU|w&}{L0po>N~W4P-@(5eJ^d&pvtkc3kN=*q43xp*Vu_X+`X)( zQkAX6^1%&^451Ysq;XCeoK(u@65Sy+NRe1JVxT6dHoYPjKgsl6Of&oKyt0IF{taB6 z>ZBzxv5IM2Vsj_WMnkKhS4`RzR?gm=aD7F_`75TkR>SFJpDA0QXs`r671EU|Ux@CIk20Z8?Mj7s-0CUSsPU(~ zWWc9aH+AJUPFGlWFX{ z6J?Cfj(prs+zNCoZsW?kPNiYULk1^qw5@=(2z1E%NE(+eU>Q zv=RWPq19ogvJ%s(>$^BwZxPq99cPQf6`GT}MYU61l)NXxa)kpX$X}WYO{@IrMPruE z7)ZL-nC}hUZ)nECOE^R!kW^>sVks!+KD3eIR>ezbU(PJ!1;|UE(?crp;}T_XGR~B2 zv5f-i`>7PyCN)b+)76NGc+i$!aR{n;Eq-s07DX|@Yg$~A;xPZG@4<&(Fr%_gH)z0k z89I3No#tq{E4KLRbI%^7d_vjlJvLIXRtqpFX{Q9NkF-Xly7kRfnz`nBEPh_8H%?BF z1-8Gd$ARxyJdA+~VmDvRMuw*>oIPA*Uf8>c#^`sUbR>vVdUjnTiq+Gm=2Sy zqrKUS!^^JG_qeK*z_xqQWB{xW6RvdFv~vB#6)M~xJQTe<;Q_0GlLU>G9@hG(ccvSzHCAgPFkc;0nJHl6=~3 zen`z_x;;B0KVal**-ByE@<+Wmj50ZAlo!SX@MYe?>sx#MlbNJ$C`Td$cGI$V(gEFx zHetHp7SQr;aTi_3*^a9vFqeh}I2dGNyMbtBAfv+f)gf4sUiwi36mzSer-|wf-^^Ut zb4f`BTQL9PmO=KNlgz$wX%2t4uLMQtf+Q_%Gl(2TcfQpVcGlBQCn$T?34rCz&loxx zajgM0H4@v~Azpz?JplrIJIwMVkTccAbfPF3OP<|XfmEARcF8d!--TfMj^1;aod0@@ zD;8PqJVs%VU(s^MCaqh0?n1G$@n_h<{P_IXBB5cG5cNy$`CIL&?cW4 zp?w7{e`BDg=sU)cv`>UT^z4(4H0)0WwA z0>wM;_i#_Z*L|z_f$kdkeLsABxPW8z6dgiToc$ydrACj?)22!%qSM7By>YPLsc0&Sa}XM^&C3O|wU) z6|Ttn1ea&_k``!QEFT$XPC(L~LfH9fhl1T2pP{L~=#R`6O!NeAUyVAlGMN@w^I&@CC_~wMNsds@fuc~$ zd;`24#&XdG2%PRInQDVJ9sHYLxHYn;Y>#<=}Qj zwH=CAnC+ZNq-$l$84=Hl5WL|_Xfv1v&rvH|OFqEL%OrabzPnV&jflTGd_8c}pZ`AERm8sYUJ zd(vFqR1ydRBN(z7lTiG!SKu|NNxW^hP|H^bnf0ENKkl$N8-M#xXC?s!3+48QMTd*y>-{3s5wb=UgU2ma(jiN-XG_>T5#Hsk?`gMU`$=+}Co#}Y+ zzy$GpfcDkY{?^0Te66md(-U>Dr`;l;Eh!0)w|H#U2=ypMK~pPyO_Z~y(0H~J69mR^ zg*;+_BZ0>Ysdg5{0PZj;in$s@y(-Ud zpjcZ!#KC}n`5H-uUjyaW4|_n58ZWW@@`v*qm-AcP;b)j-1(yl((7=kB_! zr4DLo>w}BsO{^VuV{+?``M01f=CaZz@A$3&WxkV}dLKf0zW}?>+qF&Tas)Jb$PreD ziMXX)J5^QUy`G|BPU5{28Dc{FmY#a`1&k3UR%~e^V*&B;$?qqf(_ZgOWJ#~L)3rT{g2=&F60>J!PbqdFkB@uGdgcs#3 zP)AL}A?i2Ypp;*%qnTQ-3J6TmL@&LAMnmf%1FSdN^f zBW($@Y0S>VJBhRvyf#;rdV2)x3 zq3q0#CM2+5Tw6QB$IqY4&(E{8M4k-5w{TnDtb4%vyyFV&RmG7Jir zLIW{|4ut*t`5Xb0v77E`$0V!9Yy39(kNiFp?T?vh!9abnbBzECOTab;W!X^UbCwF^T&vsn z?EBS-bA5@2qHs0nk%dG-G&=k=G*nY`GoCvYzG=8)m!e$QJH_AFwo54O)6cZVzN-dB z0DOtUZSK{p!437B4mJymU7? z@65@Ow)JVywXgek3xNsAA0=z{U40q_pvdC~FMHGo?c=*-_Ru8u>XF~?%xqI`qIvow zqHKQ80`GY1y#&pw>`jc=0Au;R5jK0iGQWTExfSb7x7?k{x|7p+m#NVni8}|v5a~OP z$0pNQNSuE%RNB{r>x$&vnfE%}`69XkPrheVA8Mvt2+0QD2ttx-( z=f>O409d)x*@Y0WdVpw&=Pl!r;>l)=PT*d_9`|rW)=q+zh0s2*rooR?<+Zz0MEG|A zZF??EUp&SKsd~QlI_uTu&&!*B1CAAgjy5M>5t`!9`~f-JT@uk4?>F_9Gu28Ft@@C6 z*8^o+FocR@nl5c0cf^lR_OWC==Xp2ii*Z@gAs9SzGsLZx-+EW27YU4jov{e9FyM>L zvzy-@mAbQD{VL8CZsY9Sh1-6YaxH5g6*zYtG#;LoH|NC1+5~#MQ)8R?kA-IjpXon+ z@vB4VxO&ZcSmnh{PSJ+JU3c2k9P==4L~FQjIXrnvlP>-9-qVR|fS{5)61xZ-{e9J{ zY~CXfKu z>(n$T1mH|BDllTcbFEG~)`k+g6WGwc?bb6|kG3Zb1L3FwzlZwzeP6yj*}2Ag?lc2O z9zXj`sO!;us8BDb`<2{#;$dmb9bNb;(BdBosAoYJ!ZF|;gocLB40FuTU;V(e+f$+H zz)LNQPs9-#aJyJvsRSVB_l9-fNB*7(#F^Nsrv1-O{|?%wP`=T*oVl>uAKwSMIm9B* z4C=)@=SNlzI1gap>CQp$V%mIBW475NIKn`(=#<4htLpQU9#<!l>=C`Bck5?Ml*;Mynr>ifcVczeSVh>;$vde9WbJAXiP=k}Snt~v# zy>&s2NU!*<5@VZMagTZ%4X;~@X1`A#d<{`#VbPO#48X}X+~S$SyQuJ250iAVyKTo% zY8@-YTI~7~6k4ae*-(o_c6N3;IXP)+YI-Xr?@Tg~$Vbq+?zeFTV7eYKArGPH~quX`IIHulsfHf z{iygPu6oCTu5E+WyGQ&Y9}X|!y^jQ=Laq(d*bhd!n-#D#F>d~;#&#FjhPh{?%}Qu{ z_B$pdCPITL^Dz>}Om}rKx^ZWVg}0n}ce4Zh^SuR!171377ySh`4D*f7o;@21aKd-I ze0IHtfF(tEd_pY|+}STJe0$P>sB6t;Li@OMLWlR@*3=9sHZyKFl{*id*SJ(@_{vYvFu884tvRi=uG%fQ zgZ%xeTF{i+Rn5^$T4OsFQb!e19q#1{$ZVp5-3G*OTo!qXTNS~6+jx5?ck|L}wa{=x zdf)WJh>2C}mX!mR*&4Y6)$#2CCoZeXwsr}dEGz20ik~y zblijXmWs>I&d&by=}MhqXQ_ly!ME;i&X77k|0ibJ>nAz!I!U~Srig;bdT@iV6VXqiLwK8OKNMcY-F9zS*r6e`Yh9Yk-BU(f2FD?{C= zE7i$fSzk}-kzxr;J8}+j|ERa4J zas4ANz?5W0W+tN~^WM1=iE=Ipbv6A<=Aas%9;H@m(;16}?4FV-r0=Zlggv}ErCIAJ z(n z567M4V0!|IeMSM*@!rtZS3 zy&0yhCp38WZ`KeCqKpocD=II~+u^)AwCgbDCLfF~%nXbyGcS#v%WlZ6^&6|alR3D8 z;$IP;eAky%+An}~&Vk>|lFJR4t->KqdK9uMkPe8Zft>dz zyC~;6J&o#XQ3z;ym4}L(fFbz)gS;DuxuS!M5yp9a+9g_hLa8@1`w%E5j6o${ym%2- z-%wh*HRguG7C%%E46IYYYg^h?F`;#jXr#58ni_B_Guq8HdS~7pVrOq{X*tZn!S->n z+5Zj*Bh!l8b%?Ai5%-NSKwC$L3F_;zr?j7#=+H%@+Ju=KDU+H>;_h_#2jPj`jk`(@ ziOFLUTzfJib+2r51cBCI%>}hT?-7~dr}kNxUi=qum!@qqIPN>2HKftjqAe{ zqI7&uw0@58vEjEawCpC7rck7^D{qi(E$u5GO;#fdg~^>*Trn$j>Dl~%IdETkAB&KE zRxf~`-T~Ux$>2jH#C0T|i<=v7K9yEp?&alm`Ej(a-Urh{BgU!Cm7=1ej*i>U8;?}C z`YP}x%Dct3KRVy3L$W9=EcEvFJ{2MIrSR)H(4#~|L`(*5ce}dGfqc^h3I*C?JxT#V z9v+@l1*;Kfxp0I=U?6?HGlq6&5>D08kLbIoPa2W{EXsU{A%c?8UWYKQW}D-1NTgQ# z>M8yF-;i5~|H0_KW>i!_L2Ybo2$uDJe-wtu2{*l)5%*t-D^F4=k`@84Sv%b%A|Y<1M~49`!zaSxWL zr)Ow7I&k0kuD(t2eQ}xN2c5=Vg}ibU*K*=D&B!ZJJ)ikP2$_LE;A4y}#Uv!kMx1B9 z9kUZ~?>N>rx;5XaEz&>d%^U`@Gcrzaa)uhDDg4&o*EdvPl0N7-J2R7^?=Z!LCUp-X zpwgD8sw!WPwByo8-Oj~z?zs~+gFYds?&!GO8o>*v%FbYBo=*}p_Wa@ANW7|P-yC`Y z$>&N+?A?$%YdH$cSBJ!rCep5o%~h7KJwT%QEUnqN?#39ws_lE)u_m$H2}*m{(fzBZ z-RVh>rRdpScMBfhSMl|FkKb7%wA55;@^YqjD)lAD40E%vJbtpf2=JVqp#Jbt=>at;Z+_^uHMU3 zGD@p#)WQrBJX2N?@6@p4r|F;AmI+^*M#80>%}WR`t57?i%^sfYu?#|YPWG$$Y*`D9 zH&b@=HOVr;V$X!jhOtAkWZ4*3ovyL~o~J~1-=Vl$rhJw+Fc$AD#=k}Ot4bOgx?Fn2 zdB4>pB~jIC>rIqrkauQFq4;Pzdh(rc)!i9g-<97)sc>D3Tb8I%CAshVcd-lKIWo?= zAzW5`6|{*lE%lDkJ1i&GG$1Zv?cLpBT6f}d4fnQ}V3nk2D=P;-ES&hz?o}xi*K)I2 zWBmQuW9=XsYdjxwo;VRmUCKTc@d^hwdFBYZFTNlf8yk=$2t;Wn37`OGfP3YMkTuXw ze3i8~1ZF*IIyyTsg=V5mKKHQWZfDr%zA%7CeC#1>ib zuo@Mbq*v3QTeS-IcykaOA3Ht}+Q)!=3C=lsAg}tXRo0{QhG`0ET5G$2t!;+o3A8KX zXwOYw$o*{U?cNmK7$d6gS@Zj%Z{dS1EMLD}rUI%6K-e;;e+jI1kX~6?nXEdz(`@3D zeZJPW2oPo-iE0BOkfxuTWf0DwZ=+RJRh1D-L|9pAX;@fTR$W{1H;@Z_bVD)ZLG3cOOgIX?zuA(k0xb@@{~mNg6qPMz<W5)Y zC+kPQCHmx~7vweClV2Nq=8pFHo*1jE%vyfGaP@F!RGFIQ$aMhif2gI#{}OMDAHhF9 zUL7Xiw&{w!Vy<7*w&HNE-9b=*cg3wok}Ms2t>%t*rsCN_+4-{?*aO0yn;k`4Z&X5C zmNM;Azl?ewvw{%@s)aWA3i%yJj+MC#yySfHR?Ozqv!9iJ*Y|zjAe)jAd>?I@x zz&yHCc)!GIYa_s&9v1idxnAAFDHEN#1dtosDY{!0=Hz^&S2u!Zr}n|&X3Yv$Y^`8$ z_h^Mv%gC`dKvQu>^2qA(sC&I*=|QNj;1iLQkUc`K5PZyNi_5L?wAXHfM_z(w{5#Pv zq|asCHdr;}B@%%(o*lDX=I|q1IdmUyBzqXAR8NKK$a%9ONA2Rt`Gqy!EljwKw#N*uR=_I69Trkxo&e{3XrD?FP#SVR}f@h@0DNfpqG-!-aA(HWB zY)FY{2+h_7w$+M|UkI?5^XxW*I3wJ|v?vdjkobB^huLIvl&RO4T#-3xNfXNam?lsJ zU)pR_qfdtQ)egOCwJq#DJ@MIK=%ZQhH2?>i2DQv~mBCTgCF(v;G=qIVG`8CtRevZ}ME|ovG&_ zmSb*rlzDV^P1GlAoV*$ws-J6KV)att(+`Y#uMC9>yMw7{n*BX*wNq+uGWC4;h$&gqDRU#L2C{2l-OH zWz;N?&bP_6FDo=g>=jH_B43g_duO9nkUgoI)8fO7nVt>L;$ZR4d6q6I0>JWg@W9@s zo{oxiZ8tJm?QXsrWGqshL2d#Snt{t`?RhGA`U*;S)-JwzJE+Aw18UClT%hE};I-xKF#eHg`gR<*?Q(X7-+t#|%TEq#+(-hWxhaR2PH{v1aa z2g4=F@n*x;NA-ubB^{FoUHM#`T%8@)F4Y9S!DQ-*c~eXDo=4#JhU?_YAX!t5Wyi`AaNy$G-9L~hF}G1PaxERe^8UGri%sR`gXORA>1Jh z^~d}?ZIkcj88tnR1Y2{TJ|uz1>|x>k(2rIKs&mHdrU(V(CQC~K1I()z?N)Y5+PiEh zv22cmZX}}1r2*qojb^AT%agmFPZ$tY_l%XxW3+lhOPZGr$id?}&CJY@NMyIIXY&Vu zU`d5vJ<<#<%goe3U=9X|0isRYDyr_V`S#c7t~Vb(mz_-UX@A9bSkCwpl=rHw!VK%V z$g6RkvXr&q;g9`GE)Qd0&T^&QBRx^2nXs z3May6KJu+lK6YA2ypXH0=-J0`w7a}-#IecI)3<-jPdhT3ZtZ}<4vroyU2Aho$K4?8 z54^=}59fhg>h(2xwK8he-)ZsOOdEXH+NpgkQ^sr)U?tDo=30kgXy1<=fF{3@`$)vi;6sk8r^Rx zjNFOkILZMA@QeeMi^_&8E}Ygsq$RBfxX+|iNzcki!BcJg$#cNnFNyU*FYk$MM(kpF zQvG-p5SA5)Vq-v_y|C!AEwe;$Fwkc{|I$x}g<;qAFc)D?S=`-_E)n+tq+)d5+DXo; zD>LaVXHHV#uWdo$Bp#tKWWIernx1jufZ+PUy=*N9;1V){nf4N&Zs*rrO9HjRwEoLI zfP@ABQGR&xMxjwuM1=nl*8S1LYovGYj>dIzQUx_ZE%C<$i+L@j1eXT`O&nBZ-w88) zPNwyXg^qJ`4mDaH)`clt$+!k^=;aulJC~+)?EHK3Z%BmLCH`l}sV`D$E`A+}FR~lU z;PpDJJU)M{;mMBe9+nGqynx6qzjE zDog|uGJ3@A>4Rg^aaGZh^mWph0helAX9gHiplXezZSuXVNCIh}Ho)0xW!!6yuJ8C{8o!$ zB-Te#{9?-cWmN?(Eyjxy|BGKiVD42UfvY;UX4ot*$?_y#W_}ZBVmud+p7-*9D5q#{ zncuVqCWxM~in<~Wf((KeWDwOE9cNo2g=hWvxb}U%HGZjOZ9C>nhXc_ z;H$zjp__NV?oVn?ief_Yz-3J1QBAX7Ic4#&;|N^q;06A;j0@lr&iH5ru3ou6YvVD0 z)4D`_2pw;++C6C~roogJ{?d@gi$**O2hTfzYn(vxoUocj(;KRRimzGAu&sJfF5;x} zZWf{Utcw6XL4hgAfW$LXr^i2lzyi8Eta69ncCya&l$4!>1jG2 zPa7l#@3_jxS@mcpegD^Y4<9Vnl%F90M|OoRzIQV$xwlBl;+kN~Gaum!;_beMpZ#x@Z9Bw%py6UgY2dpijvu}sG<*=~Ob}``g*M7A7`1!9?TUrS0N>D&7`P0cQ zyzZ`N8sjN$-2`#kU~?Pyi=$wx4Iolo#ryeBp}Vhws*L;H^C+QC%E(R4y=K=zS%zg; zQ~_}NrzUsFq~+N2(wt5`Z(^R_dTmDJ%)R5E7%9-mw(Ef@XtTq)b75e!Ov~&9?(k8; zEE}D!9nQV6f^Da6$ImGB@%|vV+Pz?bsEgoDBw*jO>sNl|AMZAo9hQI>$s|{GYtjqP zed$v4O>{FTG#MWajZ)YbvuDmdw=5M+_~CH|Bz$5KsC z@8;v{OpTrotkh1J7Z+{t-@~%neHpw0wMYUAHkA7tIJfhIs=-7Xn21vRNf^$Y(>JV$ zvWU|`H~oKPy>(nvU(`N&Ku|&&0qO3NmKqE|KtQ?#6r=@2ItP)I5-C9t>F&;<5s;E@ zq`RA8&OQ3&z3=aLum9p_&)IvgeActpp7dep`4xU2A(CA5mX1mKHp81#FzAIg7b)^h z4x;v@C@g_NI{mT#L+p3^dnG{DaKWTPrB3$fn^fcgZJYw*P`*dagK7~bz5*AEgTpV{ZR?8MXq6h1Si zBHcO-(#J!Bj2mlUY2>kBtJuXn=3gZfgw*+OotyNvq`Iuh$AbX{U0xtP_lWIU7fP?CR;E<--p{LUp2s^ViGw z3SE;&PtHfPric?)eu7Yw84*Up#f@YIr_yQi@Kn;>?;?RqU=xbB6CK#)8j9vHN z-Qz?yJ4dVwhqrL$J_gf2tGRt+vTn;FE^E=p86Kb?xJCh~a@6DlWHrmz){9G|kDo4} zz#*E4?V!NpnDa^9>YDe*HMT<9S9i)FapCK&_7WR0Y+d*Wjf7BNSc0CNTKn>O7nF?& zwbPaDGTK6Ix^$JtKh$ZINf`A!$MSXSe({jRG2EtY)KJxM_T=~^>r!B<>kAy&vIV+# z3jUr>O#V(umr}umcq@~dt~Xb>rGNR$w0iFHm15)DEPYu>l^AYR3H|PgH7aULT8L}~ z*8DvreF+LwUlj+QTL!!!tO4P7uVKF$tef^L=TI8zQb#3Aw2#}8z8;u<`FdqDs<(@? zcnbqa8xtA>Qfy0WT=zfpiM=Q2*@%BRY9{tlP57f5z4!`PsaQQlWkn)0x?BWW{ zE{^s_jBJ$zZMT3|L!f7YsX{)iX(O*!_2I$t#gVGmMFr+Kua8h+S#MEZ?cQ(4Ra&pj zyoTLtTfE7Z)i3rswv>bJr)O;RSdXP|b}`d^IdW#<`EH$F=G7uat!t1)20;osQ2YAT ze!|G(V^Bu1uKe(lxb3A7NccrX`Q+`0C?OpyK$r|^0C`Z$0f_QQNjIpTfFbP9Gr!Sn zfQMuavQ}5nYiB^**=fwfx%%P8ZL;wJ(bGnkl*;t)lhan{kK3eAK<<^Em(|C=Od}t@ zM?vBH=>jh3bDEV;X5`i9{V-f0sB9J0+n?;ZzL9@)Rt}Ok1XkLkmevJ}d7~vx_n;jR zoxB!5)_%TDmlF2EjJ&e5wIHvrn-2%9PlRRwGX= zGe=YZY<;=1*%dJ;O8!*_0d~m@O(>9AWuJ1Ci4OLlZr6cz32kr6MpB|FThb1czs1@E zpL(#WVz8?FzR|DP(ap-Hr;cL5u zXV|)RltqrQE!)VMNi2>bs<*FvJ`R`;kCJBafhLnzbY6!$iOR7&PZpzL8u!@z=smIj zi>YvolV_(o^o$nmiP2g^#jM*)pLikBAFYEQf^Dldv-RI}F$=b5Kl0_EA?13h@kYPU zmeV3MIFbekrX_MfB4jSaG*(!l>SO6-lpM;en!K@Y{k7 z?}W$7hWvMzoux*PZv%x_^P!+?Cx->M*j`#FZ^M2JiK_Q{UpS85{1#ZVk+GyIzU!|s zAbpk=p!4$jPn~3sO<$y;!z{~M%IVrsan0gkIp}p7TP<1E*br@U;ov;kIvnxvtViOz z_hpJj%DZ2GEM{L6n+s_3K3oeE>__%qkV6fB_p_5+E*gt_X}n7El`$Uu4}SwkLzu>N z++whOo)+CdU3Q~F%r7|7gdTD|zsjte-qn_!B$_NqCgt8~?Sk|jDp$uj5iEQ51`{v> z!Nh2nU_EjT#0y#iy-BjqoDY>*!Q?RWH9h~oxNM0~Mx zQ+@Ck=Al$@Yugk|IXyu{oDVy=!p=5+%r#EKu8Y9zx83tiK2ykayUPK65P|*HzyVo_ ztD^gExcnn)y}ZG7wP1vlLyJ~Qk#a#^LGW}5Yr049dkMuaW6*F)y{;tpj!oOllM_7k zO0`Kn)I=ZdTFSxrUi8t_R3iM+Wy4QHrE%+`c!YJlChM@yQB~@4LZjc{yxPLp$MfRy zY2zreHQa*M2kA#H{Zd0)E142urIF{~@(08c*t*fsKRJ2c+m}u_`FDWrYS|4ZGYJ>8 zryR#1|2LeR-)j!Pl55Z_$T0io6^OU=} zUS&h{x?6G*W*eT{i3n=mz8`k7d}QyI_av2WT+_(2zxznFc7OKXfpv89K`Htc^1O5l zUt-Ze-`?}Qx3V;uDP1}a*E%kxXA2=OqM>WYo9Ul8jhIjcXaYT9dI@F+)xXmlLGw(iyBT!k3DMLc=Q~Hyme9> zMm9qu$#Sd=aiDFlwC{T!085S#17rs&TYNlb!MV3UH(J`QG3i4@+sOP zf4%|1c-a7NlJP%MEZFl#Zil7h+&-yKvVP}`T8GP}k8a*I4_xVls$5_NuHCHBSbgs; z(gu8up;NrGd8d<7d0K~aJyET2v5#6-j93DFH^CXdz(jrvHXO}w>y$eWq4UJcsqHN! z6@rES9H^&L59zWm1Ae}@lsuCu^r&yl(`Kia?g8i-F}FcJoX8^(CHJGzrOu1vdf5GV zVBD>#Dd`f$xb$gD0_CoChOmDisx{1gATP#rjgTF^96%VlEe~Z&pZ~THE|kIvNUP3o ztjKvTV(z^C8;lf;I2Kj{y;_kl<7?eUO`aWF!_!4D`Q5VG+T7ucZ%e<2Q*z;KqFUqR z+kT^~xmyrqB#_@HX9(1I2f$Kd;OqZIemyCEY3)wlSYSIY&9(3hjV-I{&TsH zM$(eZ!xR)M=ceGm)lB0~p}@)4?zeV*Mu`1_X#7NY_i5nlfJPYo&*yjk3kw56{}}Z( zPSP&gUjvK-QW6bq6PSd`kW2rSOEsWcsv|YT4HJuFldeU>m~EhVtrp@it81Dp<5Pns zvy43m&i^q6VGWmXB6+U7jpgYaJh(I5=fl~zfW$L<%UaKtpNFE7tlk-?AomV$h_S;p z%NmX6cDF@J_5PGiQT;-aBX2z_HBBV*2}A8wTKVcP2?sN~@tOr2K81#z9^44&O@G|t zrj@F1b}!w!GqY2?e(S+KJ(?|DIh1zm-u@{(LoL-pBTX1Ex zkx~~8RMAQvKJM>m`;J2S5T|ihDekB91hei(-CZee2aDu&P9wi!ld$}@AKZj?+;@+y zcc(ub(!I)>sc#b;{AdUap+jEkGiB4OYu&bRvrG3<^aCrrSF7AV-#5V~47*?R&;wXy z&$d9l@29%!n|uva_V3{CwVspCzvJ!JJuW>oymR5>z(J6x;5CAxc=%;(G;+>{PwL%3 zV>MsX)?TFa(aNSKf)4()pI$G4o zZ8?^YRD|4=PVZp%3s`?PH*Ih1HUAOxx+)j$d*bnW5kQ@mg+c#sHKN0H?(7V1r&w&( ziP@VYWOlTP#nrB~oGGGxG+$i55+nxq2X(=I;1cmS_X`5)4yH9t`J70pxjxX%B4@Rsj0Vz+B>e+#^>2gLNX_U6t@j&_u2 zlz2D2H?1c(2^#Pv`T~dTON*hi`fgSr*Z5LIfIF6RG~FdWp1jjNMV=`h@2(;bug9S^ z_s)F{uAodNXac7>#Zr!b7Imhc5usa9M@~#{U{L1!fT>O74x#s)JXM#FI=Dhve^s55 zKAhK%o5^(e;G?Y96dUged;3*VC{Qo98yxBVRSn8{YnZ+MINGX1=|sTlTN*^x&#!Oui48HhVa#CFq55Dr7X2~y9My| z;fE^_Zk`SXZRC+03L4&X)v*Z~Fa$6RlWeAQ@{8}`1QRGgcpTa|h+uws1y6zBqqpA# zM~dg)>To6Sdm;bk%-D7}7?S1U${|Eqgc#)qXHLM79JT8MXE`G|A6no0#87@G4t>;f zB|PJjQB6V$>qQx%D67ROJp?!tY%RkKu=YG!V$`A-m@TsHg7(hl$%OS>w zjeAH*=)MDlO^j9G*^*@!6abr zR|MZL-GRgz9L2uKuk3@Ub-&sYL7@I?$}h=jR-O!{5CU}oO9MJSek0I=n5rm}`XBa+ zcatXwUrQx!{oZ((G2epr^da~VZ`jg7k~oEge!cVZ4&7871aF>BpCzs?RsZ1=Zl78Gtd}6f^)8`f_G+5vVvwBO+d{F8&FK>M%%7uNJR$X55l zp>}6qtO5r}eM}um7dKtdrnq=?3xntqJ^{;oeqeCzEzi2JEeU9REq6wuD4io9IKKqb zcOSY>gm+6O&VMj4HuyjK@+X?Z2SO2Jnu*}Ow}E=`2Op+2ZA{+aES6(n;6jTM1UPMc z+5NCoHPp_=0wtfGE4pQciRTNLzWmUVU6-(bsB+l-lfgmqKo5)lITVx@ zafxJ!bA}LD09f7{G$5Sg271Q0fu5r}ev&HnZU?n5CLw^--Om(%X4NHBFbL6(Z+#rd zxF=a6)yv0n^9en%Maukq~yjXmz%Ai7U) zI!Yc%CzwEhc^EVI6#*fwimFL8)m`arbSyZTmPVsbKHay*X=4^_d8pdp?`KlC3~^}M zNcKf3>BWlp9=xgJ3?{Gzu*@J?#N{;%``Zuj|AM;JG8tI!-5tl*PwtayqtLL(7cC8A zR79O44l~#Y#bh8Jx9?@hAqcU_O`{!JjeTF$l|Dzz8p(S3-jex=(x(OzQ?Ox3NY*>VW?NRk|CY_uzuko&782c?R&K$%b_ij#2 z*)`8J^ooHVm`Imd0~;5JssC0S9)14#pRK`UOv{${d@I=l_kG6_G2d^;bC7^>N(D*q zH?71~s9@2~cHbAvy@P=N49_2aJdD!&X!IEv+aNiwv_&fMHS8Y{ z4XFgw+eBs=SDX21T#>qc-UY4s!Z|97)kHz$^; zOfx&RCAeOjF>i||^CH;PT}^y;E`S8hpAIrj`LKEf4}aI2MU?0)wcy`K|{&EuO(`iVcnVxD^%@J}zM1T9`2R#|ZDh{dN{qfD49cg*1>L~j6+SdI1oG3kLb$pK$X*<-xa&K?nv3hG zhStq!j^F_ie&uyAHIPrY$XZJB1B>(rz<2$JE~-|<>TjXwJplMYwN+g(h!orp!&)R{ZNseB-mBjiK(Zur~$zXPEXOVT;P2D zn0E>LAXep)F-W%tvOrNUu#I;K|H=w&_IWu0SFvh#Fd`G(H=&9>xoqia^K0~Lc#Kk+ zB4mcvI!q?vzJu@I3S7sgg`bn60U6IH!Tc2*Bf>%c0jLi1zh5P2>mF-nclY8<1F8tO zN1+gkTn7*DWZk-k3qfUgaOj5^a|D~T70^tV9Q)_fgHhrXcEC3n!W(VjdJ_{lgX@?< z-_j}iamza((F0DQuY67dsfHZhv338m)~QxtXfft#QMrMV*Xjojh#sI%+_c}=LK_1D z3^q_DOt_=-rTzVb4RpZVxETfhrcyC>8*S{lG9ufQxL^OROzicT8x8I4YuJN=X6z+t zGA`pCssn|ka|b%McS+62!{?WLZyriUw?Ims6$0ItV*}Ht_)pY9?XTR?d@UVbgzdj+ z>gDL*lu5xY6@ zCD62NfEFDc{qT20j_}>W}mJ?$5qF~~g z5?lkbf9R!^faaKpc{#Yyj8?}z;uKNfl?W8>+aH~Q@u?ueJZioR`><)3&g95IOJ!=y z)@s)>dQ(csv{XDyJ3jtTqtKF*JC8_qpR%W9%;ct%8h0BGEuWdt^bRe&QmJ7xDO@CW zMcFiQ#|pFbf`%;ctVYU|(%D1Ky>WzsWuW;ug+2d~j^?C@iPM4PICn0Gl}0^#@#JPn zZF^trfa+}2aK>l+-o(pF+CWF~q;zppIS)1E`L97)ecJVtyQy9c-IkU+Q8vF&vs@4l zi`~vmNFG^@%XFhFf=>m1_%IF#6L5jIo+dyU4c)JUpHQ9Ff&mL^P9x>aP_!?kV3L-F zYNyH7ZKabhbmToY0pCe~6if&+6SR1kTAk^BQP=LwQK=tVo|h)~yYRufla@2hWPot^ zM~^}|16}PD4fT2{L{VCkAeXWFWNyX8j$VQuZbSa0!8(~!w8MiOtedsZ^FlqGs3@MyEdxcCry z4<}%n+%$%rAPm2*1y;o_Eo@HkEOw>#rFo`OA}XW|1pE#~{>#`cq3$s=8;Om#U+U zqLq!xYJ_IDjG5LGTc`n7MBivQG3jg5!}`o>I6rG;L8ft?`;+%3l@*1JJks?|m4%K^ zyo}5pW~b<)l|1pJrJ+vUlm*k7+DS8(MmcBuqt4}?B2;R7%$H6mDUnCQ=1-MM$Y z*-|x%t#wC~6f#;tY=J%uEyU%eQ{wp* zD?q=JD5M`43HWo`&M!zQ)K+&`>`8?;%-=ntMq|m0g?yVxRm$3NWEU-J1ba;riBxAZVwv8V@{3V{vDkkw6 ztmJdsJ3j>;r5;Yzsqf#kvjy&V{VwZOvX+3)eNp46j!``Z3zZ8m@Mb3y0u+daR}M&5 z<8*}1ELOG1giqIORp-@5B*gf?YX2^B^l(%k9^1CG-;1pxuzp@-sb|lLRg4b#2iis1 z7lYXEFM6zT4_u!EGGoP#By&PWK#Z8d3orCG`x+a1Mx_LRha;O3CLfC zsvx$Zn(|f0W@5nsH1y$%-YzJyMYW{p*}f&D6Vq^{_0r*qYU^nO4fGV0o}NwBsMGOX zy8#=D%IWB;jFnS^&I%@n`(OI!T2e@kjH4{*B@NjYYSdd%f zAipCc?>$>a=eis(S~MG@r#W3}=BZ}oXnz>zAMP$yMzo>oFezJD#;P!?^9?uO;Od)4 zj5c5B2|Nq?pGRV3U+=sy}?U$f7d82&0#D$(|g_=GS3tgt15J^Rhy8H%r`7C&)>+`&DC+5C) z(n5va);AA1C;UQnEyj)Q^>}Bz@QBSmsW$!kpv_$h3IVqr7bST_&*FhqOwZz{tS^C& z8hgp{VhheCD4k93eOYZyM8oYnJv)vcKNE889x*34=pyrL-%(_un+Q0b+huC!PZ=AXHWEFta_MQo3*vBtimYbD^lO|e5D*H9|o`T^;H&F$@VRD zKdfOMOm{1>Ug&;b!8}N52g{N`T)V~TgE+3=!l~Lw_tl z{6i^5{5$v2eN)7!Mx4TkQsQ5UCN_QO!1m;Y2r}|tne&&ExbGa~0s#IX1d(kuUdO&A zI?%%e%r)PQgQa!r56T4*Wtnxgs?1g!@#o;Q&gh7N_V}n^!CY@4`?qLy%XNl(g(7Nl zxGbaUa$ThL0QKC98TnTg7rkI|B^Vs2{@e>^A>MuzHGuitf?TEq>K+f4{O9n4iuo^(&SN`V|r6l#Bn=1 zAZYywLh5_Za>44_`w5*fhEtX<>bFD7?PU^;=U$Fxj!jD%-s%bLXDDy04?3Jzu`nyK z_wH*vz^i6R*|2_XWY~R5$=L-Vg@Y*fdChkFS%z?S_{G(FI|2AV6_RiwO5=~3d%MQ&qfdop8PLRE) z&etg+sQIr3bKB-M4+=$!8#0}r3FegP8akzDP7gkObNK|^J62aVYw|SyGferlAFa;Y ze>eAR_Dp2_f!xXQ14EoClWa=ybvrz>N{thq=PJU(4A}-Y-jR)+;OBwkJgex0JX@2) zt^48OW#T!zkMxlbwQR#$WlmfkOx}C0Odus5-gDiYu#KQW!qQUfJU5CFOSIR%y_P@V zS6IG*l)`G}f5T`51mz1sLmd1%IJBgbwi1*sp<%`Tqz)?Qo)Pj%bio5^Q0FLU`T14u zLxCZAuiJnxeIf~y7voN=JOD6GXl{XgGXtm;^E+2TpA6)F@pi7BrXZ_z@+KX@y(}G{ zegyGS`2&<0m`o}>vRn@u7CFWkUGP+k$CrLx7lr-t$;-^g@rp!rG+s;9XAZ`bcFkRe z?XMCvEo!qq2)P~**j*vG08z^t1{<+lHS$I)#zrk>%a^DUTd@RVV;6rZZ#r}JDx#&LyZ{p4+m;fwc z3)a#N<=fwng)k+c(B4hfse8WXuA}KYaa!MJjz|&lSv>`>FK7_fSOOy68jpkXlVH82 zHf!w4_;KEImBvx%vR%bC2IkhJ`(JHWz+-9a<0+AqpmBAef${nt2s@`EH&E4Cz4{Ku zC>5t`ky-!fIQvQE^({d?l7G*qrFg)CQ)$A$c$r1rqtAM`+%2VF)_MGbR=3rj_wdBV za`?Evj;bAH%si~_-S~?fR<>yReF!zb@|xO?5=iqO?t4cKAq-g%F%t);?Z;*<`q~Us z+6)EgM}EzUql(eveO}0VW%(&UKGQ&0=NuB+2MT{9a)Il@UoSSfSfxjS=I2;7s7Mfr zxN=1zq8Fc=+t@ylf8;E`^!ZVPS8*5UnNw&}%Z51W=85Tel#nzclfH)r2UQCV`U-n3 zj?<67rM&;fTrv81W%+9Eg05O1)XHjK>X>t$U#>eSK@_$+uh&+jY64cr+Uoq?@>vjx z=QrmwD;pdoo{!J3tsXBcRGbnC?}Gcf7=ml@C!j>az*w0TVh^m$qCW%4>ZdK86AO5O zU^lq~gb4YP=@8f!15p6)M8x-o>N{Jo{#k$n6Xk+mCj7{xq|pf(A)u;?r?W2fy=}X^ z6!7bF?Smz#%fjRyy>IZ}c`w<9O~uz>@YH9Wp+$4C`k)8XNjQaSrY;haO{l1$zzC^h z(1l=he%JFrYBySWo1nX?;C5Ddzw6nzmunQ2Jm0-dS?Yyjy^pngl<;Op```BM^B8Qv z8W8oN?yFj;qo*kuA1^opuPvqOIby>(Oh|8gj9S~7GMFBy-Sr1hOA+)^r1}(o+1c73Y z(erKtSj*=aP3cgc-Q?$R3`RktC3JJGTK;Ztu0i+D9HsI@2%4Be^Id=lMf1CR;F;|; z*loIyPw82%MCI6%=%U4K6>p>L$@Qnuu1BP0^Kg%da8Icr>%4O3EFL^8w0Upb*1l42 zWV`tOz+h{cyno(K8n+Kr@xWyne$kdUFO9h{PxRwZVlYN%3`4oW*q)m57~#T}pzhR9#2CGgDZn(o|;yt(}jVVDc3 z4l;i1n@6e|1w%L*A>;q-V7*bD8c6^QS5}Ouc#WYSgG3yoYR>97vR+}gnhPL)sb_w1 z@!y&W2*{LRoFNe<(?-6@rzchaXEA2*!;9m5Rh8vn#5Ed(vzwr^?dg!D)I$T6z@Hj@ z$cZLhy#?~C_ zt>R#V^#u7P(w#e}+tYkJ`*&BIJvY`T)dUNu2l6Sa`?T5SfAhSF>;LU7>vbbUs8T#G zx<2QDj-f!HB(mLoiLTbp0iQuSQkKWe9(D-^6WK?-jI(EuE<*GD0rie_{Vo#!Jxl~p z*FdEN*!7_Sord&5(cnzm#B4|U+5P=#D z9Oguf)$$|GCCMrS>n@?~20}1?Y|)1&OJa*3DS@AL%mZ;NMfB1fXw7IiP)h7{x-xmu zvuf93xzccVEA@3;4ab=5g2+7iF4Bj@4d>ji@+67F{V8 z(1=UaJDRUa3$7K+cs%MuM!zK*&0=LUTaLAPmNA~2_v!uO5{Z&~Uq;IdW5cAFuQ3Md zg@a#Wh6p{Ex{O~szFnYZQwh6$&>=N{5&LYzF!M|0y*C!jS?0peMuO8ac$}>iLOAYY zs%Ofv6sfAJD5?zDl5(*>-Pk-Dv!9#((P?;Ds_2sqiiTI*ipBn%D3gaDWBAF#;PG8+ zrxnf~dl?SBm}g@bpGcDtA2{!^;!>$S5fr%F@bGbDQO!rNTDci?B;01S#K^J1xU-lM zDW*2T=K=smTI_Fl=5&R zUW<=iN!)d_Kx@q}mv*IT?GA$&G!7$d^=aeHdk3uA2$aG6g=W zL=mlc_73%RZA|H9KdV&PfN#yLOn;AZ*w_(>El8D|7}&e)7~W;bwPs@c7Fzvwet-rv zei3o_>l#r)oa~V0#=|a=G_@2)X`p6uJ60!jgt)>=@SZ{VqB_STiM*o6AqOKpJDS>5 zdAUDK1as?LG>t=CQZmv)KWm2I;N(`QXNc_OeHu*9r(FPTN%lc%jCw3ew0oH+DVH{- z#v1^@?~t%u%)^!2W%SJf`upjlvImR#4T*QCQ+a3sz-gZ5c#j|ZF2-GegeD&ahO2(} zR(`?7Pr#3rsb>{VUA9D3=wIXJ+g%-Q%|i1H+}`w=0_lbIHJ+}+swRj@1sK;Wo4=FhKEi?O?s`+HINVhP$4XS9!6{u8@ z=ccTxkM4)dRa&0aRdydsKR*?79j^W;^USDx>5569pWRvsj0|Jv4ik6(ICUv2D{d=EZgHq8UZ9)Ehf)@4(3U>vshu z3gbh%KBt4fra1z+gL(kM4T;GXgU5Q^~4cyZ&f3qQag}!uD*>=Csd9m}y z+fcmsMvd=UvaYj9j%;iWiB)JHR@jUiRo4(RDP&;GOf@;L{m~H3-+GD`kH0%{b`pm`=cxLOErHSLT$XvNcT)BjC&%oE^ zZ8jtB(1A^ig_!ue@9z&X$?Vh7NpkGrX%R+QKJOlnVp|UoMwe2V%eSjRkGFid$9s4E71eu+OJtV8Ly$Kogql{TxME< z&+ITNcYk%%fu=1J4o(^;mD)@34tsNt^I9quci9)hee42>7xHiL)$?9F<6j|C60Uf_ zH;0|&wzFz;8?>+Y1=r{&WC!2X#!W?BZ{CI)!xj`jQFtbIK57P4orv*>@I&w>B8>r{ zlY#GckM}+7iF^(Kkh?tt!t(IvF(grR=%u!YS4Utvxl~AzFIcU1bI7_cyij+1_z(F2 zpz4peF-<_d{u2EH$R^1+!uk>a^9AJJ1X2#PUc=r)n;m$|83+LzuO6E~ z@7Ssax`VJs>ee*(X;ZvK#fBt=y4i##MI}4fgu1zyIrupZ2L}~DU-RXN63u$_x<6C2 z|4LzdN8gVJWKm4+MpP>9eEyCOfCTqbQW4Qek7ded$xTm^ovCH}#nj0SbN#t9Sl%Hl zJh5-=1C3VrKA+v~Z5<(qvO@xYC;ZzP_w+Jc!xv1cVg5{{RCfOE2nGOS?yXPAQ^Q+~ z%%DShS#S=jQh?xU2@)~qOKFfvg#A$N)6h9itsk59)(fh2;r-3TBPDmr z<#`%MjDBdqQBB!e^@3t$%q_M7F-#ZN&= zVG{%fkfOng{h!^^a+u)a1%SW~_6zt^wG&AnFIrX2d1hjpaNVA3aF|Y> z*sM$Y+GjIvJMFyi#WO}Xm8tUs7@y}M1sj_AO-UaAepHaPyl!>`uqd_Y(u3h{yO+0T5Q{@#-T!p|eX85kWC^>zX|d1W)u6+(r{hL{(EPhNhe zZju}OLP52);^f@ow<7)E0*y_yLBv&6+0fL-qdEuzE+bpT?MU#@ED^kUZDTogrC%n#dx}FUiEkln)(`c8(gy9|7qFr-wxoa ztmojtGs=5ah%7=o)E(eeFl3*+iN~qa{wXfw14|G@5?#QV0%8Od-YZ6^z6yhDC%>i# zi?hSzE(Vm?yh~52{06nN1MJSS=c>?vDqzkl<71Ns5EL68vp>F6bJ9!Gqtl zB&#OHO#nb4u6+$sg@MKQqd-x0424`x%1hPSz>(-qG^qV7=S#^E=(U+i$LEPzYxoUj zbK2MJA0GWZ89pe%F=KQ3%l-QhW{4;PzB+5Bb{}(+^GDmBfuMXG4+$+4}!_KWfo43eQYrM-!8aa!LGEdzL` z`4j-K9XoJK$nX6+x@8zU2R9}N`%^Ra#fNTpi4^d~?pl2QNQfxB@%Am~YIOHNFdB;M z?#_eq%&R%V;$*Q6mp<2!QI%tg+JrCl2H=|i(%4~#=WB7Y_{0v(cKymS2uZ| zkI+q92$`a6&zsX(wtlR1H`#|PV0*b;KG=|=Hy$QsOLJ%6_cZJx z!ogL9oGG_@?tY}HK?z+p06Z7_j{8EaD%+&E9l8?6svR(eo+BVJSefJ5FLuRW>HnBakGBARc+|q{cRRDTY`2%6<#Y=oTy1~5 z_geAhTNJdM->LjQL3a(D`mu!^G%7lHvRBm-8Sc>4dKc}K>936R*u4JojK&EK10cCI zJqONDWBd@+ZgX?3DlXq3q!EdJ5uYbe1iqzrQ5D#e(9uOLPEI}pY% z28BaENJ&K-{+c;A$jmjl_kH`xXWa}PhJQp|!Ev0#-8Y0=1u=nF6<8(vI?R$g`K>c_Z_?2 zF7=&`2X)IQ@J=Vyd~7_bGq(NTdJYLmgZ_=2kg&7s9)QtCUZ%fc^VDPK2;iShmj35q zpcRKkaPT`7N{y=kpc(VDOJP))AEQ|pK*7@p^CK_Wt4^flkwI*bA&8fM8OE@wyM2!m z06v;rqd@Uu~|!`x|P^qU#0euwH)5C%O{G1%Q_e*yN_pdQztz zk+@K(LCAccag0&a& z#wF0UXuQhxOUFH50!YxmS^oRJj}z5tIxzr9ED%|=VZY`*wZ;R0t-J3*sJuq-H;|Z+ z{il^=KgH+ex=a6CLqwLN2R&*Pm)byIq+wdkY0eE)|Etg`fJ}tTU55p`jJl_#QfCoUv<=?HveW9Ds7^ zQ9^@sq_V{R`ttZz_doq6@sP5jo@y9r4oDqOkLDVEI*`Yn?+E=M_diE%K+ol7R%L?F zgwhrpH}h6PARQlaZ36+F1wqN-6Om9Dx*d2$le@ujQhLe0eL@8Qw;lw8K?iU)KXBYC zrn1s64d-?A(@|ZGgcmbHF_~c*O|_-9wNHfC<&MR}eRXu$w9okNoz(UGKKxqo+dQFY zYasBF^>wI|B_1vz)|)W&zrjznjP?Wtb~2Q%^N)FvU_SxE#G!EKp%wdE1v0v;7Az>YG}A&VzWyd>AEP9$ z$mJ%h#h9x&JTp@?NX+>9iFZ(-fc3c_YidrdUV){ZUG8cKj*^YPt>P^Op|SCJO>C~b zEqzjqa+jXLF-GW5|UCS=%sIZ?aJ`lMSM~S=`K{m z+u^`~A|H|e4)n)|A8tsU##a-Mpa?Ay5`&G)JDxO_^N_TxF;lgHtLdFS{OG_d3*?wG z{6`j{9NKMPhtPW<47(Fdrbi)~vLx9>bOt$@?#ARuf}bp`?-|F$#2I(mD-Lk~Dkdab zTYhSx{bn^zb5vN?zcXO-%sKh0&-plWiO$`F zGOrpZ55=r5%b#7s)0^@#xEYEQRV?3rFUe&Q;blV91qkuGB1!a}wVa*x8B{ar zSVFNs$jA}@GyIs8Sg&i>T?db#x#p(*_DLHlJ&fDVKfEESOmeJ!cqA>4^WQB&L2CuO z2BGXz)8Z8XSdVY%1R>A-9*eRpTJ03PEv3ds+R6N8ZA|Du5!WmF!sD@l(aWSSTGX*d zylTW5c^xorsq!4|2Zj8+99&|#HSD4isWO`Sy87zVgeftGvqfVhpT3^|3TrqFD-*wG z^;6-wN>S9Qou7{8%oZ4~?{2OCNICzPrBFsY^Qv~`@^TE8KIv?^dS{l$YYzrhY)IYj ztNza|VfENu#FG5H^Y@m5lG*D}Ek<{yME{E2@E58Qx)3P+Vc`pUCY7NuVe=3GE-{fH zdn!X1PNh*6qb20lGa2NE!06i_TP8Rc~QB z-UNwKd!F@jS;u_7ZTs|@S4fEzTm8f@Fx1})${0R7klkELFcbEdpoP^Z&=!*J(B!rq z5Z_#iBuRGUW#Kp4t0+iy3=g@;+a@pHLcj&k$u@=raqYd31ELpgXP|!Tk1=9yA#o`e zHkinuy>af4QcN9wfcDI-Yx~?!a|BOpF)89e`(>?w4)efHGxCv&;JhAF97;Tow7Rl9 z^+9b7qx;E8RFuiuFWF+Ng@ZcY?CUSv-g94T9A444Ro2teGITUIQfPc`L9dmY(BaIp z7#4=CIrb7g`%K!D=1SsxFP0;=PztWL*cmvH(eTwj>|FB0V-7yt7s`1%xSY43DCp5j54>sP|%+wC9* z0C4%G<1QQ$h_&rZ0|0TlTOeW>B-ld1{W65a1rRauS}$S^HIp?oXHVt}S~q9DOp%qo zE$O(wr~GmD&wMB{K0hQv<&P?p+GYmlhdzTYk^YyiQJu(BtECPJ27saD1ho zMLH*Teny3X{9oTjt8>Njm;}rPv!|=c7pImVfp&eC^z}NB`gQ&Xh?C_QzZq?H^>&)z zQw4wpR`6g*AwOWk=CQ>8!_{9nH1)Xu11O<@2%>}t2uMpyH$xCmQb0j+bc!h56DjF# z=>`F5i7`a!md>ek*QmjE?m^$5d%wT?2W&fg&Xcd#69kulr?pA*?F#|$MbjVIm$0UT zz{M08Tf#sHDs&4tCK#X1C5i66{W0C)I;oWiY5fOuYmH=E30upTvJ!dIjgw=7G9H0n z>Co_W)-qyk8DADfb2hV_r!0z^%ug+*U}=-M3RB#PZ4TNtE`f_9>K-@chd+FVwAk!A z#XDjUDyr@a=fgd);D#pk4u{)~PetIoc-m9~i3HJR=P){@{1=HSdN!++!5nw%9Y_QD zaRTJnpZ*KDB~aoBI_w^wt1ReM7)(&(Yt$m9RX29-fyH6+LuT~3oWzO6Z7_1Ln%M+q zTnhoS(SwhJ7?@5550?Zr6RV?BKlksowG_)q*Lx3tP}EZ<9*9Ubpham*f8|5lE+*1K znh!CClk{zOM=F^~2x{QQJX-c{lJd+tWo`uWn&1tBFnLwP6U{rSFKOXgrP|!%l4(Xv zI(NJS;~tQxB$cW2MK@KvU7;V)l#SLq_JgRg9x7|t#NSHsMIEed`G0-hSLGsoy!4r-)|!7I{?#bo!~W@7ubpk0+No^>%P5PC zT}ClZon+vPi5902J@`Iy{=?5X*&8doD*BrIa>P;oBT9rue-kf<|LMUHhllj8 z`44|b(4NI|x3X)W(SyqE)7ic>aU{4zcb`W1@d5ggPCV(^aHNs7pV>-2AHjC_1F)ZN zFyoH-_QaKeNB+bBl=AVDN>15tr#{NlxNx_Ph67|0^}GBq@J3n+vW3Aw4M^K^K~H$B zvM1g!(%61=x-d+LFh;DX#g_3pY|b%ex;3?U|GtD)qA8OPmanb&`0jKI=_+$@I6oafl7ctp^eUv6=siyNH`mzx67+TZt0>q!p_xr@6%3w5&9wy!iDezTNJ?3J zqeY~Cb~edRH}xj02?M5X6O@*<95FUc#@=CEA~&3U4qaQP&t!@1eR%~05jFg))??Wy zvk^i{C$TKSnAs}@k)MLh<*hurw*N3LPUkF5bwuud59v35>t&t;%jM#O!PDQeI3T3y z!{9Eew3%0}=Z%B&<;x!c;{UtCrwM@0|4d9aOp42*(3@2<*Z@NF@>|xK7Y4@3S)uLrwUn3zB_#%*k8miet&!eJ{~Yd20KLR``o&Z zdGozy1)0Um#VvjQeNs_XH`_hx<$6mu^_Ni$(7bp7xM-7qVm<_KVL178E>HFT&50xb zVMz~EDMdO8bGf#u*} zmejEyFA;I_DuvqP%n8p5tu93?Ca^F(e}hBxq0Wc0PfiFUf0VB6hwd4V#cc1*qT zx2@%yG8?5|Y@9#aFIyTaB@l@=8^A^q$D-_)!}k&Lce<4-gyycsqa`K&iWeQ=MBoejdp*=R5_ zjbv8x@=3_i8t8EeZf$GRs$?HOyk(-UPO3RQ$&^wzl|9!mZg%uC7BcW|2wz+D*Fj48u;r`eDUq+<=uDYz zD0A@K5sZ@Oqds&fcerleG^v{jpgeu@<@5K}t1%0xI-;j9CF>P$g2f_OaS;4Z6wEFe z*+=6R2rjUS>jxKhg{;+?R*~(HUz`A_J+|RLbQvcVM-N+Ns||4FMXGh;X}>UnPU3A) z_k53`quq45?EBgRtz4{#M#gCsBGcL5JhMvKr;%hCfO!8N{Cxr-C$8m;-zIM|u#Z+N ztanCAjUpi^0+q$*{-$F!WW^O4pUFL_0@p(XmSnp{KhL@qDXPYWv=tH^CB4GV?6J<} zjjy+4;oUwRaa%*IERkJ#yPoN5SNR;@2cCDswmu!5A2DU1uj5mglIPoSoZeDs`O$`f zQAcl3ukoj~?Ci5=_svnPEXuD%C#ERm9Kpbp_oMPB(z(9AFM57B-MN}$M|X$yYyFZ` zPm->w;|O1SMGvHan=+dL%9k` zc6hXZiZ4yT2R)fBDsE*GPVQ42wRCiBEA$ayv<4szp#@J_6c9ScA-Eh-gdcr8`*8lE z9ghF-!;isbN?-Wz?m|F;L43>$U892Fx^)ncC4h#5>%hiF5IKCxSw^k)zb!+-3*X3eI=8`!4dD|X8QSDo#%x3_Aha}uIE$p+~uNGZ{NO6 z6>(aP&d+dS{S;-}5&<{$Jm~^EA(u`7BQOgGk<9-+iK0)^UpD!e2$az*k)q*C`V)gu z?5~f2=H7k7edp!g>;0?hS<*A#zM1_FKSyTIdUNdHVU~z}W?ntHGg_;yIQ{QAgxbm+ zcsHiG<#3*k5=z+^o7CF{2jcs6Wl7t7`& ze^M!t7lu3u#(0BbF@5u;tkUSOSmJALivI!Z;$0t4nQIQG%J@0k6scv_n9j6@KY$2KaV=iz`K}Nrpbp$3hC;MDQ~Iyp=?b0pGZLXH2l5^?Jn_coaUz@ zA>Ne6*|l%ok>|7D%+LdNP)GgWpZwBT1b>7BfbIQApkKfJ3P+kF_-9tHDDR53{<*#c zu4;UrodQQ)?E9MKk7L`253HS56)i5Y1jYjvcXb2&7M<%A(1n)v7o<2%qV#PXBV zvbYLNm)qj3ue{l)Xu7%fWoSyLNMq)UM}^_H`fG0Q{S4zC?*iSjSbja+D_HO~-G62i zr!{>>Eq2j#{b20v@@=~=RawlyNr{j*_ZO!ue&$hb{|25ep{(^%(k%m^)ram$=X>NKXw5xQqE&tg8Og$G6j%8^ zVU|{mO{^NtSNy+*HwqTty|@D!Dxnvz+Mn6DNO(8_j?VneN?yZS%NqO(Um2I7$V}x% zaT{p#?tYg^k5&g715VN|spFo(_Q6PN`G@d7xv?&b0exW|{sQLQ1~0esw^d9awK6`` zoy6!>rl@t@G~S46wmY1z)}3F|V;EezuKq#ay>at4j)lIF?1ipM9BSw3?0nTb|2;ot z%2eck&u>RA5D<2yYkGT{wlk!k zk`Pu+=lCdz7}lW>ig^nX6M@p3=F?CeA`o6!in8?kV$oHYgygC9CC+J?@IUypqbzD1 zBNbRU;g9;V3qY>@KWHj*4`qBeBGX#0@xce6@y0Iz*JxhNVe07G^p=uTU_UmS{TZDn z8+yEMV+>7AoWP-(DQflY&};6N(;-h=oJN#loG;alQ;FH(1P_n{d9d3OI7zL`c8~ds z-VjbZvp(Z?qMKK*UIc+8R7&k#YK!eq5#!{720Yffj^ZUA7Uqlhx4;nnN8?!%YjKMw zjR#;DcMxV?e=z0*1>z5$uP&s`LxO5DV<_XjQ18-OrC&!J>x9q!TqaWrg;efIV|3@_a*G=wcgaS4u2pFuiY2cVIJ+5U4NJ2Z_N+@0QD5!qc_!wcT6PU zRA}DIv4k1B$(twCVkmdHQ|()E>uwNKiI6p*Nqqqc@RPE)r}Um^1jy2lm9P};t-*_h zCI^o-QH?$oa^8kJFwpF za%*vV>7}=WFq+1nWxg`26%4B82Z>d)FXJOYso-#bwS?-Q9RCUE!h1Bh?InbOzz0Rv zR3gWzeX5n;Qvty5n@)cFF^%7Vzd_#p_CIeWsHo5K>TcN&e;RXz*DHXHVvdZ2we+7L z>!940qyhG3{~PO3B?Y9JeabMz9DH0M+E)9Kkjrhdjiv4_ zx;IA@jOJV3L9-FUe=W$+v1H??ig->vU^COjUfy+O%}zu4iH==coq7m}JVFb;J3Jsu>#jZqDQsve(>=urDCr(8+6y-R0Z4bYDh=sZ3)zcprL1yFZ zMBUy{9#y9g9ogvBF*5{@=N!6QK5}rX>C6_1D?>0JuE(>I!J( zcMFW`1n)lr{mSU4Jx!;UOlR4vV88#p!yABHdFg%N^uhi!up#6|>13S2mBYRU{hy9| z)xTAWrM=Se>LdVR*k>}#5l~VWH9Gsb1hsB2;GK80c-H-Clz%4h{;@Z{q~Sa~sdiP! zM&wbWZ+8EVOOG)Yp-Pm|RaX)^Hlss-bI{tei++JmQQHw%$?a!sR~U8|#g*Rv*rv)* za7jSi^fIFYpyFRP?)P8J6{DwJHj2(VUSHEWYh)x|W#a8MDKL&APM_hw)d^4=@K{ME zc|H8zs`VYwv3(GVXwK8|!uSR^=doz1(R*{p!bg!(`3q?!CbGY@H==CcTx+>u>JYDg z^CspFVk5@A7zOXq2Ou)fd9MRbAX<_UpF=Fp==^%9V@BaMUaFv5U;ISG$)~yB$sQt| zrh$GdM3BC%RBU8akGSc)b`66-BCnOU<{c zQ(zw>l>h*BiII;rvQvI;_pDTfL(5)@l7yVLPy;M-u6nAj;eGo@0}u%+8t`8JfJR_w ze}qJ4fmUxXVEdYE<`{l4em0+EW_Gr%hNzHE?QYv&9?yA)B!<)CnU*QTca&) zEC+zz5y9)b0#Rxa?ggn-7dsDF>3+op_zTTqfu#pUV#Sa1*+wskTw%kK#X)(S(M(B@ z$ELdXRDnIcXkqpkc~E*lw=zd1Z3BR;d9(1Pl9cIohUHc+=|P{_>CL+Q)@@lj(rplk zdn8!IF2Z_`xf?6F-mTj5c541M-|CiQIiBaxw2T}Al*O1{dIf!WdylY*u}jpKV1D~j zs`|sN^z!TKY1fm2SA2QRz9a$!Ec(WY8L^-Azd7mJUs#XW3%8cpn*@5kl3DwuSiy3! zjLcgMtKJRTG*R8hEH5T{f=#z{#W6>W*F;1H&* zP^u{6*g_H1Yv<;yS$NY3+6vP)8I8gzkBZZ=I{6bZWtUUrN=@nOTA1d^ljxuz;gt6U zs6+PZyp|hsdE=#Zfe^g^Kh6KDa`uTM>_2Z3Q?ZrNEEMK9=A)81^C!|?RP0>uD6JU( zRj27W5S^uq&lVtP_&n!!W{<^-qmLM1Y_|u|^*SCBvvH@yVbjUzxi!n!lDeG~F?Fuf zpNq*RL4x|!y=RSrC>t^N^Wo*1)9*9$ZKeI@S_w5y@p65UW%Dg|SwCJsPGT6`qiHzP zCmXjskcch=fWVv029~0n<<~KtmbUMINU~1l5(|V^+N66jY`1!xR^V7aB^n#?In2+( z#oKMMIHAv9(wc3QPX>w+yvBpVC|Ja7Y7Ev~0NuUt4OxRZ_)Ius0CJCl0yQ91@ZICY zD`|mta=o3gtIixU0^)h%z*1opQ(hNVk_dFRs$gjYKfTmwY%Q;LhHQ(z55D3&=$3#D z)w+|Ske-4L!N(4v>Em<{O%T}Zg2*Rhx}m3Qz^|XoagFtE52?aGEPqVsj>h=@QE`6r zt#lz{$_WYET<*=MUhtxq46bAALoO}%3Qf&-WKf}#H&Oj5PV-;gccwSEAODVPQ_NSt*~s}QzxGD_cv>E^QWCtU902s$@ zj8xx8!4~~$qox61&zO?sQAXm=_a+3KYuNkNU z7H_U+{HEehbYx!Mps$f_^7&c5xGi;QII;dNLGa6f1g#YS=y|v{&cD`HB=jCeIL*Z1 zk0uMF>~6aZ!=|`Tk&ln^2k*;blJ+ajvT!$y?Um!tCFd^Kt1n@C>(K3~h|v~oAVNoK z)oC{b-S%TM0>YzYSo)ubUw@0n9B*2o6F^Xt|2 zZ9OUMh4LD;t}Tn$=Z(x9#;y%!`-Quh%joi9(5S-xew#sQS|hfn zF6jM*;ai{Dn$0%0$%?An5nI(>aox8ZI8Yz9400EZj0sxMR(Y&}z>zdw~IfuS^_AnntR5&t-)^AAQGn9mAAR zNT!-K5Sf|2nH9@iTX;}QBh2dEX0!dvA6#8ShKb?E1`_eRMvaqzmNhEqusfh2cB&Kx zC&ZB6&WVI|=>=ot@WIZc*?57IsmEYQ@f5NO|2|}4_(Ac_bfz4Cz8e5hETtsQ`ohE{ zDj|%BD;UASX4441D&Fo1CtEn(X|8I#?~Zu4S$o~EeC{QxvocTp?d!FWz$t}-<`eoO zSp%5hJb-SC;TT+mEvK-J$r}5KK92n@>oU~rnl*i~TuDQxu^IJZKTZ8+BlL4+a<6?B486nVotXJfGa3#j(xgQNq-MU5|KDcRBb##oDXufkFZa4<{TzHV7e&!oz_9&#aUEz8 z!qDz~(FnhvRA_plg2>Ir`9GllYWB2l|6s<}bPGOy{{Fq{9UtxRp4TfD+kjn>;)7h* z_V~u~v>{)$sS^${%0arfSbvM0k?0@0oxB*BJ{>wi3d=%tl^an!kfc7qMU(ANE*bWW`Yi(-UMhfgjk3{!r>+D^Yk(mk$*?9F?g>85_N1>9Gq zxczTAlno*eR$7PKQblLXdKX&VP5K-|)3$1^YY#&k2B&?yYzy!-D>C4}zgCTTh4u>R zI!%jW7N(}K%d|9}7H!|_z?H_9y+f+x_&@2Cz-|oM!jNrxK#xbqvS97&HFle+I=6>D zUDuy))Lm=217cd&@Mw1a83Cjp0A_Do0^)}NE)^#6#N-kX+U;pf_p_1W4(44ydhUvN zrYr_({#NK7P)j$8@J+Mg#UM1BX8O7p0bnMH2Je-E0XIyY(13Cb zO{QF>JTge9yMF8#AKlO~KR?iV=+1V;+OggX@Hz48c|h<+a>si`3RE0oPhE*wwKiWU zd_DiyQ|a}$upUAtCigX8yo35pCo5gQc$o1Di+r(g>dcSLUL|FuWHKdKtB;?(g?jfK z$qWiTgCg#UgJWhg4Y-D5m5qN{E6h?CnfNqhRawB14OogM5B~0RF$rFh67jx&3F9E8 zx>978^2W+mLvM#y2wYLhm$I8G=8(RSjxmqX58`#=X7>gnl{CFbd=#9{^QDhwGdLa# zb?nt&B3vFrgEFX`>{-$IULqIGjD3!RbHdPY>xHADoeU4Wne2fQ{2S_1BP|B%{E3mcFL@@G~HA+cz$4w&}&xmXgJ(w)fB1nEa@s+8c5|UB1H_ z&-H8zBQK}O;PkkEITEw~$#Jz$a$AqFq41}Ck|qxTV{6R*xtI?WdyP^!YAT-(J}qSY zBH8z7BLHTJ0ff0n)688TILS@2=0ZH&-wDZLAcGL;5j17u3^+!irZX}yUtvE3#4Nu5 z^JH=n8{_2{#gmLhHh!pnIUSR8&`xv-E6B*`Ug>X_yD#p(d1QP+Xui~h-z|O1IUK&` zF_bRm@@ms`bv_NG`hZS1h{H<|`wcy&Gkta!U@8t1*dF>IZv863Op#UDC6s?yxfeie ze}A!B#Oa{Sn3`d~53saX2Vt6!1Km(A(`(Wj`3#_@r+ng`*2HbU_NTpmkGjf0{PWQ< z2%%2yUTzl8LB8m^32hkFy!mhp2;l+9>q|Qq7AS&6dMDc(`6{Yh<3Vbj5A^P+L@hdR zO@~ZiZw;yL2L<%zqjK;C$A_saTZDr;`r_u7Z6}$$!j^ni(2Md%$e|@rMCP zZ#r^*_%*O*;oe)tGRoxQ;uC{~sq@4~B>@n}+`fxrz*u_uBlcG~b|Z>!nK>$WS|Vve zW-jmPc!_X^|mccj1fwPxe@ z{(Irl{R1Y~K!6%V+iAZ6bF}*a{Kr$j6X>}=`m>#a@D1o2BY&X-qsSkjSTpxM!x#=0 zNR&gDey$Eqy84gM7l;0Xy;(A8=Wq<0^2|>{q|`P*`e4ki}6)= zjY*{LOzesT48W+tZcC1<|Nj4-fhwxgoy2s_R9+%1m@XdbB&_R4sBQq)(xWJIm_~;z zYiV{zfvV`o+Aem!_3g=6K!Sn_wvfl4X#&8Y#ohj25<)XmVHuU~Hh4bYTqG6}?mqdV zto^oeAJA;haSof2erb-<!XONqg+UzvfUP!A|TY(BOHZWnB;pC0#0HbTha1{BZ}q z9Eevou&Q<2R-i8+MkSr>t<`nnm>i&acjBq zjyVjj;GwvS+a|D`nebm>1G~>$zH7mofMG{@;6O|VCA>J%<0^KW0QLASava%|bDL=s zGw}qheKyB34VDC`Pq1+m>-AX}1(-HCgzIIAg;{IDF32SFA{3w6Jj$5e0Z)S(@I;m2lJVKHTuMcb`$PX5fVl?p2)~DDjq_aZ=9fTGjF5dt$f-_(gq{h`h zaQ*tY)W)ep5O+gpZQ#tlzpoYx>F&k6HX(p2c3ky{DbL)lMgfxK6Mb3%o^s5aBe1{_ zI1{>HTi>c7>vq6QEQ-bAAr^qa)69_SV)X2C2_N>{#=ie~(lEmx?zs=N-*ax_r0`iG z!QmmV4_FI~xy1z(52iNw?vSGg7#6p~-~*=}rdY<^|7S|#j-`r&)q=$kg~fvorbCZL zf2B8UBoE8)U{Zs1*>m@PJ~yH82{GVA2c8d}pRNNOgn6R%GbG;lhZUB@a!1HVs4We+ z^b-JTnp+=ZCEx(QKlK}r@o0SPt3XuHp+Rh20ugQ{Sy{}1nY`hsVaortq+0{FJiJXY z7OO;Q5YeV$-3Jk=xzG=(uinP|f(Bz(zN>k6Q0+Vu+IsNxS&ox-^Ax~sqx3Fyhmqyz z!T0Qi)ZMVjE&)x)FM^anw$`x8rV_6AK$sod`NP{Q4G$(Yzk*8Pemg!j((1uzo_g52 zQ|9ok%dW7Eo$Mqxfq??**5`HSEw}qdz=N=EXn{=Hv?7&xy%+lc{=vO7Knv#*7Wrds zqyj#<0t`q9P3{@^&91%m?6*z3ruXa+oH_sRHNB+oVR9XBbOg4`yVh!LJ>a)LfG9PzKg-C4}78hiDKE&;jAZxhobSmfl}c|bV~PG+>=xT4WPQ(Npb-E!hPGCD$V zs>Rzo*ic6uxI7(G!z73YXikL-F^`PO7v0*nQnf!#fC|Tm=QhPz>*%pPYQC6zQA_P% z{fP8)kps3uD4ix?o;yvz4<(b-^zEZP+r!Avu-3fRYCevn) zLnif4+8cDsK$|+!gRmk}CIh==Me%%jibegdP*VKcWQX*Ld1 znHx|_7>zF}!oNPAmps&RU6}Q-^s$di>gXC{tvGe-nv*U``gxq64zL>1V3n$lk?jXj zv0#+n!_b?l$wXsGP8v$N(yPl8KFI}|;6U!W8m{;Elm!XqfVbj<83Lqx3Raz zY;4eTKDn`Wx~7i1`KGQ9k*^P?@JA!E*0t~aIdyfO&VBrhxsHbb0M~rFzf3f*%EQe` z5idl75yXX>RQ%W8_=b+26Xq~zjR`Q|m~$Tqjr(>9ng5T?SNM-)OFSuSE?(H_12O+C zfYj`!;nuU;d5Bl@w%|(m-yC=v><#7f~ZbxnQd*< zBA3;;3IbFi;aYjL*RKviXL=#gO1ol7{&PXzn$fP{5-wl6 zJS-^IFBi^C8pWUhD5nxM)wM$*TS~V(m-roIXTjL+dCrcV-w|wE$}@Am$b7YK8_4#L zNl<8>azU^VgXvb8P>zJzz&+e#`S*)@dJ*?_24*=pZcY#0#&lJcRVDq1_wbuO8e6Mc zEX}?q*eR-BOO7d4J_Htn4;JCqlWo%|Jf;dDEsxZf>1j{;TJo6EijIbPCCzJ407~|A zN-eYi#=at)-}@b8{wUweUstPHmoEG4x@euqlrlN=#Q1o8YB2I@$wW!aM$n)Gst z2Quv3ir4T z6bC7xPaOoW6TNl`oc%4q|JUx-LkZ6Od1;bgOoTdmjl?ziRq9Ke;XdVf{2n`sWZAr8 z010c9Gj;~wVt|N6jFG-ap>dRGH_Iehf(e*%IyogSPREo!A~lnZ)M?kXm=>c%Y7hYIEkXLXJFzsW~KEKx5m;p$y;rK{7kr|E6!mWNhWr`gK$9Q^#bu z06lOjp-E}fkA0=j6xCB&*8_6W7S`Q0%CP=!%3+_EBOC?S+=;6QxOD7;2Q3Yo@lP zjlSkudk-_3l;%C#TIG}NT*M*;qE~HFJxr-I@m=V!NPYzu=4NV;ywjx4;@gzFJ{rGH z53Pk)vjiJYkR{eBp;7`gaZxlnme}WJ&m55BR07t4qpnFu30Yc^!0#}5do=q*4_XON zGGUnEZYY(vj0m}Pkoj20D>M)=YehmCPT_t^EKZr_rl436C{{XFsA4DQbo}WhF9w1s zXAeOF-A^e_0Q? zN1}c!C1QEHBF*tJS+0AqcWdSa%!AWO<6>@)?`dxi7X8#qPv+LA4WUgMFh(VwPbzpW zU;$5dAMQ0U6Cv{}Z4y6XG6Xf9DwkfN&E1%UHXKlKROE3a$n|r)tE!#Dn>`G5C(N)i5vBP}+wEB+2RXU|< zch?V!f>2&VGhN3_#lUUe+$Gsj5&f{y+-=&Hzbxs>l`^9xoi-yW(>IWKam3J*|LMSZ zyK$aVY*u95l@DF+0HMRQaEGr7K}9DiC|m{B7Z^C6RK1#xQNN*P5oBjJuR7`kHdsRS z`19bzmx!pao}P?V2I&cz*l#Q!J68-YF0128Zk)}WEBDI+#bB4=a(OV=@fCxq`kdd< z3h}Y~!f2&n`2%z1^|#guB)nyCWVS7fUe3K(FC5Yd13MdYScnB+S<#gS86zo} z)W+N+jFJwcAmnRobFJCD_+}2B2aj{%pVg=>e3l(_^ zuKT`_%gO8X{Yt+(+6wW`ZC{e0;fmCR!`?Zh?|QPQ#B3fhV&XeVuDynv_UFavxt%g& z>un8)&=YWDeZ+F$o5Y;N)m=C6-o?zF@2`BViH-{7QhN=HJZ5((Q3s~^N<$<{Hb+|K zI>--t;Un|EU(@DzUGeMW=2dh%G5<}07Z}TkKyKeCd!JuNndozGb3DYBc|??bJjeuO@mIEulRwu?9{71d{V^ z-GB2--6_#ZU?@V<3#pMx)wN4psuh4;@ZtjJAay)-BbpeI+ik9Q4VR|p(nn{6hVi4< zpD>MKhr%6hpzDQL79lr<9O%VIN(ZywDKq&Bm1@8ysc(YHsW~%brM9Kyv_6BiIMm5S zUI6^mxrpXRfy+w7(?ilF=`^dJBYSg8eA{Hr3W06A+Y*~rjFc>vVz@?J@-ZG*%8e;cZ=7mDgAe>ICYmb1qGHw?ou0RIS%b;^KC(pV?J zh(J~h79``R`A8VE8rPk=9#^}e+vJ|U?^0vWURjRJ=jJ%C;8ea72qbQunXmto0N_Ew+ z-2nRME{gaX*CR_*<`*{2=tJeKWl#fdHMd<|UAH z)dYSC(;ihVZUAENPur=8fS;k)P{M}E>e21h5O-Z%rSIT=hILzA>@DlKQ2Nnb zlG+Li0+cGlk@r*sgNstP%fRGa&TVecGoC&=MKhuL&rDL3->X zc4mz7P?>t!cJYWDei$e*@Re#h=6S}xd%-=QPNKKTo=E@=g5?Oj8GI(?Hk$~di1%F} z>pET>6XHrshmnNsK#b=9;c4FlsA)|S=>~CDL%~5H9M@{;7n~sDPW<)<(EXI=9O=gK zJCXBNIl|bB^V0Jv{311K-yzeOsxYQG_NwG^1{-C(_}xADj0LoN%86whe==Ql+SdnL zQ1^^XTB3p!#awxmdcj_E!8|aN0w>adYyt_io((R8>yz?7GxW2p;{fbrS8AY?%MAYv za&x_T(|@;lp}Df9`3;m4PxS8AhX>&f3XdPl6+X-y6Ce9XB!jZ zg|D%n`icp)qtuv$IC}k%Yp44ImhoyupM>Vc#MWY4q?Iq7t%-P~D2_EV2b|BxQl2eL zMD5L8WO^b?*&BW0vwd!z;o2OtEVC}GrzZ$zALZGdh4rvQsO0cQ;0|5 z0yx6B(>He}6?`NFx1Nu4XWUVnj)+Un_5oPM;zPZ&)M#dbEt(pMckK#4$$ZO#`#Z`J zH^7grdpl1}AT@XA+N5Cn=DNF7$$pik;2S$LOie`pAi7K}AjOJ$B2}w+w|D`l;d$R3 z#KL3wMoZ&I$K9fxpN|39r}K5V&?o>U!ym)Hy1xb}@TG07kb(>y9~6S?F;U9CcgtwZ+N%2!xZo!-)=?|lUcj58<{T?K}uuQ?vPL&5L3e9 zapL1|Z+P;nm`vWG*N+AGBawU&NIA}-^eb0bXS%mXK^5v9aN&dbSW1h#TSQiUazDlCM(m7MM&YjLhN|5iJW07tiv@0LJ}Gi{YyBhjOTIC53P zODnUID4<~tg@MJ3p1q$M>Quc8Z$3z?#|Mqa zpX#xv@6Fu#8nXCE_3yZ>m_^P4gyUqpfL}jv#+{gz#@%i_LH<;uN*Zh@3r46CBzd0# zz{|}fGW8rl)1hYDRn0GG_6Iy={7Q5gpy+1KoW3Ha_z`Wdr_P7-S}-LL)@G#{;M4>K zP`t*j*ayBSO?c?KK%DucG@%s5m3il{rhfQOQ)xC8k?gAfG*vR5XEYzkzBaS~aM*un zYpOe4Pk$?w-JC#auxe9J3D z{pV}C=8xhMf|qaY;d(fLXU1$?4&VRuw?(mQvoUe!DFgo4#n1xLTb_?>4licU3~rpK zt-WV^-!{m^afn_VbYyF6Hr!of#7%DOrV2}LO?mrQLePhe(hXP|3jp9Is}cHs*wh7H zIus26*VMm?=Q~ro9BrIJ9ITW&eq;6+r>Du!1R&MNz+4NJy5qv2`HkYjHR$fbq(+*G z!@X5>Fn&l>PVjpM5lV495V(jbkmG z5SVH-R$tV(_x4JUV!Rvj7gArJZwDHzV1F}go^~!bV%%{X60oqxsfDHNGQ)m#<>++F z=SZ?SCX3LASguymeW>`YMjSV`O9nJa}BErC%q7RkPQIQ2aHUI0WpGPiAo`j+wh z3UtMM4OUegpM-@iGTIC=+5k?buS&IjBxI$^a|Br$r6p=vIcvo^Tcm4qxH!wd*0fpm z91p74DZf!NP%%*Q_Exeta96h|KAEK=QF^Cp5mlmIA$Dw{&$LrnR=K6S8Ok|eH-&|R z##;3A)D27Qm?F=y;Snb9aKB$c5!_$>(eZztvRdBk)tigDUdYvoGAc3Z;N`m7Iu51c zWpJ}D-~><6+1AqZbtidy2e^n-d$`G;MOinr zX&JrJt(wRnFv)hT8d|uRNlvQb^ZBzuJ^6b6clPrF(VBUiyL6uyaV>+&L)AFx;L=mkk65|Jlja0dXSib@9s0#D)hAR^xv57V&VvQA>4RF<;Rbk zvj{+3(kMMl^kWeU>XYp8bl38IyOrwq1@EX_$Y!SPlozE08{QoTm@D@3=VOLl>%Yd@ z_YTBzdkEG6YYdBJLDjM$S{Fb&6UZ+OFj@zOJeKC_%}HNz<~@&EONwbDS5K$Hm-WuX z0Gs7E`9PORR>g68q4sAaLj?g|UYDD*#Q@s}CxBz-;d-KUZQ8^JN~Dl2MZ4U}J}|&+ zD`WO%e{dighHo)hk`$GPUe{rSG`KoChn1AEB6sFbTRiq!?j_|<_V8scV$kWlp||c! zp23|GI$mYnMNO8Gk*m(Ks#C2+(=WnMYPwb#Un}1p8S+CAm+-N+XM>LF)Wex)=wAn4 z#s!b>*}EH_0?AfD)$!%vU`5(_l9mAH8s5(!@+j1%D$a}9Q87HZ|7$9NmQ?kN8+LjA zIC$Vbs$$OCk|gx!^~4ci4&US$$Sx+4y>V&qHE`n|Q`44xr{oD1F{NIa?DsnRX5)*~ zq5#5GJN4y&at>?AFR8N|A31jc1m}h!fwCiO*2;3g7GD%VBlk0d5T?!N9Wp=RvZfhC zRvUCHMPA_iXoBJlShhIFp2j#{fSr9AH$R8hwt9X;FRZ$ewoM%Nw8gYhK};R+1&6Ru znf;{eMp<8*SAUjZfVHN;6d`9`4#dg5mHu~>>i}*^?f-|Y_l~FffB(myl2k|~S)nqs z3lX7GWF;ee&t#K54wYnuWRDZFH`%jfWpj}1onwz<9L{+ZrX{Ca)uk)~44kj;}0?$OfuQuT6A$LSt(&cWr! zvI?#)`zg_bo(xgS&Ks@iFqhlH&t0k$(`PSfnFSso^1d>w>}nrMXYp`w*>44q%4b*M=V8+yq7ZvDnT$RVbT~HjRJHGcm)c9&-|))U1D0p~`3I!yHyp)q z@au6xHOs}m^8*bh1{=cEQy;HwzCOZh6(2@MxxE)3+3eO7zfvfWJ|W|_>qBQI5J3#` z`~idqaG=emMvqU+9XA}To5UQbZAvgc;Wg@^UhnbtDS?L8!ElxZK>TB+nkKe*<$ zgKj5&y90aAc?s3@zcwi+-^(0)JntW}>4fX_NBuZ8Dx1tkPNcWbzUxseXt{2i8i}7U zxQzmghXjjYNEtstrmQ`Fh?LVp6N8c>ih6y| zq6Jlul!+Iy2#>;|-04&5{+f2S&*6#Ot4OmCe&I}Uj8fin7Fp5tZEVds8S-=rQM#7( zZ8V-2og7k4pTUeFgYa70)eEJ}=}7`N9@|%^4b<DE3gfg8t1RT<*Zr%$E4 zy_}l%L~GwE&iRDKX)5N(eV3zDKDcU8xd^-Ow(Yi7!(mJSM&Y`}*@;PkF zr&|e&*0#l7Y&Vj|&8V0TSA4}%VTP_Pz>fBuEalbV+GDLzFWaN}*wsMTM4X6{$B%CH z)*oo+-(eR$s1-4I4)YF#yHIfRic}1TSUsjW%nA}|9hFP9j|d8jRGr0dyK~*%u81U^KK89JfAZcBqN*fDqS24-O=3^sh@2#?~=a!wwl3n zem`0q=ee-CfE=^GmM_ftVvJZj!XZd;b+pQ?eLNzgs{-5n#W{e8xV(N3D-u14`keX6 zxTSt%^J`V7hwjMSudG!##Onb5A<&$?=>QipuZ7KKCWsX2j8x<6<@mTh>)Kt;1DB;O28XdTQt2-`lsx=Q&F;dI75 z()UUSn}kdG=(XMC`xa_p60V_rutzl6UfBCs#Wlw9(b9hht2>XsGR#;~n=)$lEL!JsTqf)Gz;p1333NT-K#Y zM%rq8Nf>dckSkq`>sx{)pP$Iywd9@{x!;YlV|3@NS}T)0K=5$Owv#o|a?QEHb`l?5 znhQ5JxbvAM@9v4uiA?K**07Ir`ZnnVe`A=yX7XsgWiHcTP}W+Sq;t-0dyUh3bOd0o ze+VZT_?{XiKS$ijV#4r=&tz|22u4HJ^6dP82$EspF_J`Htox| zhS|1+jn1gcQY>>asJCz9iVJ(%?Jf2#0%RydbjbwM!o!sa@V*4m5n8ivdF)jjw4CM2 zrGBvn)?JyWqW%!`jCFu|fC;rZHr8%}m-f7d(;D`U*gi(RT$cp$( z@-H{zG~hKg`M5~0=#S3fvFao%BcqQ8!o>7;O~I~7Oq$=Yk|r}|#S6~rtr6&d%dvkg z6taSBTZQi+Dj$1tN+H&VQf`H%6{Q~P*jzea5J%v_*^Y^_Mil5yt$hAEGKL-cFmX;( z^S5msb3tNo+RxK@3_Y&9YcSnh6q|hq(b~ZHreK1)Lmcq1@By6UIH?fn#J!*9L}ZG= zMXCuto<(1NaX)h>qe4AgMLzN9DMqYZj1x#Y-+m8nOLqrX07 zv@{kd!b|9()cao06mtg2uel&S_)kqd{Cw5+k=7ENLDJg%FvUFAOK+9pvP+noulnsv zb@RL9%wKH2tGN{)z%4wQdf7b(%BNqq6$c+C$jzE3&H7)TRx-MvdZ(lK_NdEpRBsf7 zy>8qPD#gs?_y8-8eK708?c+46c+Ga*WnNk=9xcxB9K0fMzZK8bKDk|Q2;(O*K6pmm z?eIQXYY`TaHbv-7kn7qI|0QJIL?P%y%>}v99OAo6;r+7s-YryL1s2Y|ycq`jbc^ob zkq2TTVP11im!Hf`oj!vJxZtypy91Z7_P~zR?XM)v`tJqDnz2b>R7IrH2jvP@i9S80 zYc5_UKWZV*3i>&@y>llKKsDvQ*LN50pb3B`KUQeAa+XA3J%8r{sQa4tMWF50EoHE5 zU)bYR+aE``Pxpq`(BNGoiTUGYt`aZ)2BhI=9M4*>X54Z`|8~yhrms(T5fQ#JiFK~iLPZ^)%W}A5K(#mwZvi%UP9^NG#z(A zQ^ih4c+!E*^R_7~Y--;;L%`x^@tY^s6Rd||UrSPT%R`2lx;sa^VMX_c|DfeyRXl8X zD*w-9M2o3?W6+GxSvtaXz!_nO8RA46i%n>GGA#M!amN#jEUx%!!YE zXrFydaK^G`6L-e?s{TS)<=2^t2rAx)nMdk24D8Iv&*7r}xxxaMC3dfRI6Ss%jI5q! zXnu7q1UK0t@fjYv8P&;(zhL0@I6B_FE=8)CBWhG|wlbKJYzUS{Y2Lmy{!#vLso-v~ z)jTHTCrVKf*ps5LG%p+oXXpkGNzrwh9^awQG})ZbBEvRHKsdk-w?OcTQ>%mJqQO-Ewu8!euh!H!3Waq8aqYY zXx2*I%lo=fl$)iv9l=%A2=a(F1(oW(P>OpnLTvm-X4>%?1J&9t|DCn;@vN^u*FZM6 zA(nqAOOO&k-qZ6)>mmm<|FMT;>-u7jzxi0ktK#dW7@n()XY=X(&d z=ZJAI=BKLK{Ua26XK4xUkc2_w%KPUtz{)bD=$#~6IzX+CjCG>OB|*)civ0x>Ss_ep zk7hDlPLV&Bg*X$b#Aw53I>-{n5u2u`MisLc1dUyT|4wi$BRJPg}xACcSe-%_UkY#G#qPq8d~p1dIg)N zasTrLZ!%{V9=d=@LQt(vRbS>Y$N*K_gYb+;1m}|r#|D0yHma)3P|qjn&k&=Km=K8) z@=PKn79o1U_?`FN{6s9MAM=~?b702NE{;B5g8zO5|KE?0+ElA}A#yv3(C0===UvWk z&rl2Z*xrIQ_#DTSB=aIod>VqhP4;KnS>vxCFQm|~eOLq3>j7WUb^h;Hv`sO$g%WXL zUSEJFgP>untGt&35L`c84&F zD0=eCo`GT}r*TVLzUR)DGZS3Q+KvHR@(jv$y+%U=Po1M)074@(Y?uE#7CEt*Y*R&7THu zf~*FJk3f6*qtmTfB&hZTX8?@VOfEM8ySy-(y9?)Y?{zn$?r?VMPHGe@nmCH0KT zzf&yNTLTM6ozcWhP=io%B{<>|3t)S_bw099%oQYy|AwIVx960R7YQ0(0Lgi{hk0lY zEX0@lAdft#Gf*o<~k6m@&hgb%9anoVQ)nd|2gcrVFLp4R6P=)DHDEj{))|fviQNBa{i4) zu+0y@T$`_x<3P`xgBrA>vU(mF zd-}rd0FobXv~ZK?@rFXysBIoH69Ec}b^+9&z)QLgu3kFL)pH6_zmPH1#*ZxA%A2{Y zdG;M<)apC5C|sWL-^?5#^512^U(-n!vR{ikNMiaVnP!TOhR1F_GWF6{iOz=oe%)Lq zItqh89k0Jrh!5cg^~}z8ZVss+cM}+}i0aJ(0F>;Bp}Q_Y#1zKwlEoQM_(bC2HsXc5 zJO0vdoMx8b5s|qlJW?>|#gejYy!H}@*EA=_OnCyOry*Ve3sHA&=9eYZ^x5QT=BN#+ zsz=Rd)WGPz&^nU*^Ug0yX$_rBwWOZ-Jsu51SbQDL=<+j(qGByTV63CRpH0=%)zyj# zXQ+2<#CC@sxI9jyZ1P)-b(?eImQ4)!p+_k4S|p{~ z-L)H2ev`|G!V?3Wpl^xR;OQd+X+n9%>uRf1T;iIJy!NH|X2yoadYWZ0N4_@he?lj=_vjpE)kwE`; z@x?qMiotp(p!8EhBM)Z-vbsH<9L>zFAd?nhPT-T@GV_Zfh8?|i4U$)eZOsDi7qrQD zzO4K>n0q@`ez_1=pJb@uzL-3~TT>|^Fju@3N2Z@5!|1?-2QngFh={~<0Dc!|mKBv!*m6d1SeMc`67 ztCvVJ40GtAPv`+Ir4KhgU_F5!!on?#3AkCDBkNB%UNf~#M_Y{{K`U=&+^nN<7nWKL z{LD37-GX#Kh_Gf{6`^6iOqr)wwf7+EVaA6weHgnlQ^yfi_~x<0NjT|1_DN7t^)=hq zX>j+xL+|o{bR&(^$YMd+s_=!YtC!5#-mV?);RYC2xGj|WMPPmLLQoRNX6M|K*4)*P z!Hy&bN51ao?LXMWB=JkSq0GCtSfw-2`q%063NC`T;l!7~>H|M}3t2g;4GDkm^h$6k6hs)?ax^#Yg9D3C4wQkF}j28zvwCeV8J2}o!v5l`2W@7Nb zYQbeOJd(Sm_QUlP@2^Od!&@OidRmEUY(mPvh%~YUca~r$>pX6Jw2H%vgD%iyX`5E; z^${U*XAFaJSV0#x}F|A}Ouo4bCRD@XR%~qX zmv?z|LWyKu&)P&@>nPg%+78e*sY|-qq<6Oz*G{OErK!i0v?yp4aevjxXoAY){h5>E zO0kbvXp}~TY{xhEhc3hl(lW`J!us!}Y(MOX4(O-!IR6kFJG)XUcPXw{5r2$CI{!Is zMPk~}Au>J|>w5+gE=N|yGHre5T9FAQ2XR~c!HH)mz{jo+^44HptqP#|uR$gnEAYZN zxGwBLD}lEIwDK~sEYUN?Ocx#U&nUJAC~!Z&>=OD?l3%Vvto@|Dtt0aN+_>o(7u(T# zEAra#6|Y8AP&KRLoMaSdyvEyFcOw4M&H8AqmR76;k=;rMLT=xddX)yCi_@>#(2obF@C0i_l8BX_{;_YpoX_}95^uqr=c3m)67xnq=#OtCw+oJ zs>!2e2;LPZGAK+q)lHhhA5%fDlslJkaToJL@Kmg0y&>Zr0lA2mGbvUd9XA>KN(+tt$tKQV5_k@E@Ec=jp3%=}JLU<2Sau)~1V zoWR08>XUSxTD3B%#$4MSJt1K2Kws;~e};@j`?0B1+1fXvze9YQHuk2W88016$S^d2 z7TJ-W2Gw%3$aHrwMa|ET7df~L4>Rq4f-{a4DrLuPoZiWhM-RM5Btca!&>m(o7r$&A z+)kx}Tyi~utFM(Yxo-QiNn8}V_mz~OIxj^aB;=Dg#ff`wF`Y&z^ z8vN1x=o&3rn*C@by4<;W8nh$5;wQLVlKc&OCIH`)$@+RIMyu9TapDvAcyjCM`4OI7yFU5^`XHZGvt`b2B-D`Ihz8<0SJK(t@ z>`^`=L3hb2k3CiVB7u+pe2cS=GRspWA$UXqf)l_#-ai7@j(>4&Ge}q2m(?d>@1KQ) zk^VSR`ez!4=IEoTEr{N-X#lNq_)61BewxyR1f)MkJ6?shY17~qti&<24Ezf%sf!*= zN#ERDJc0G+fF|YBE5ky1&vN1UBkO_`9>KP@JXsf4&plOP422pv%U${CWI3}GeIUCK z3uikj16i1-O6pr#`&=6~O?J8UbG(v;`Nu)@or#0Ut6X zS%9K0J#`#dJ}3GE6Px8UM+#Zg-cwv6m~^*uY@Mg}ODXfkCNxVP4VzV#d%%;GNv;S@ zzQ(XTL$QfL)F!tE0TQH19M7z8?m6H!Hjt{qYw&*ARBC*Nibo`V{oF&%EUSwclTB1j zQ(p`|I-$uxL<1Bqr)hcB*n?ZGVL$%>EZ92X{E;yQ!^#z|l`|oL5gEzjvmQbQr-RzjA6nmp7%6GH_&-!r8v8k##-4SFGK$2BaH!#C zTnGKLVgGZ;+>bGoy4HFNb-SUQPicNmyCNksUYU|w-YNU=fjn z?3KTEeWqt9;2g7zt-Wa$V-0B7Ry7=n6H)rn z$&51R_vYZ{z;w%kQYfZMln%wWbL*)=NsM66vXcDYw|T9K384r}H#0)a;yLJT@d~~k z$V!$zN@vW*Dm4G1ReGcn{eW9RmTUFaMI{vv!^)>Qj8DDj0%Hr3=eXCX~ zFTWsfDU9QxU}UVO+p`dQ+Sk^X&!G{U^{Up-Us$b^1>0|glF8;jk68L#t4NUf(pZz| z^pXGh=l)WV*D@TL@FGKz>p=|{aGgV8$~{tE3^V*P#?DgJK|~Y}aBEk$sXocqG?Et- z68f|`)d*9Yd_O-2U6TWiw#cfD?S!Ea5GNoTF|xqo7MtA_Nni)Q`QyH2s5e!q6d z?pp43e%G(d{`EB)%-N6k_ZqFuyOOlZcW~r``QZIts$};q){$-aR%u;19BrvU6l)>z z#*8XDs(O#@V`=uRx<1t>6s@%0ujBQJ_+!#Uan!nrzw_({NC|K=RFEClXid>|owUX_ zSQUVO?L}r!o0n`D`?JuKqcIk=>&GDoN>$AwpDXuVj=WlRMQ#p zczBOf$xk7BwRo4RzGkD{x?60wK5X=|6dQhiW><*5_Os9}{~>Ptwu#-OA5%f@u;$3m zaIdKux1d2!c&vQH`HHZI5ljN>ZqC0|De|4Dt+pR>@hIcg!P>a0;e-!`_Q$@2hKEYXCCDIC1`TUI5Q%&9)!yyQP5W_+PBWuCm@56ue7EP+*zS1k z`2rGu}|XMyvJ2+bx7AIzSwjI3JoS< z);0Ej(Jg^P;MJ$kl`mS2ICv$^s9(~ODrY%-wc*qPyrfVJz*K8#dd;@UeOGV5F9+FL zDS>y0z)&vz71&V}gl5yt69dTm4e~LLrTesCMvmsE?gq!e+&l206#*#}Ve-~zOi3l- zhtZZk$O-4`>^C2;H~&!ZjW+JK(l6SstxcnRgsdxl+2v>{ONVx;EnVj& zmsp@wALhz#Z)=r_)rIqT_v-cfbt+r0VPWM+ zy;SpNJN08tkLhs$aQU};g8WL7)B*b`=t}!hGR~0-fI0I;=0jVMK+(Fd(_rUznUF+qLCgUD=Fl{^pR}#a96j@#nT>zT9`;8pPS;$vPU_ z_eGHl>C0OR#LA}>ci7gA@AtMK8Jx~64)Rw{y=r7T6eWF_@j_%{E~(H#CW8+Fui`d{ z=Aut7bP<`x$y-gfjqya6q$MhNOC%hcjAX=13{$B8{G{T1DnV>>U#Z*1kEdy-0UNp# zS$2cuL*wqs@K)1;KErNc1iIt)9T=I~dsF$@+QobB0S|OlhQc&;^x4$5#cvpBQav{A zq4D>mIF2ORj-i+Kr=?z{F;}FJB<>zqi6${UdFf|Nm`W0G+gB)Xe<8RD(uZG@6M|hK7gpP;sPD?KFP! zzgSmQUxa`NRry90i){OkJEu}V%cbO6 z?bd-)v1oL9!;4zy-~3mP_BrsDF9@SaPIKTGVGBhAQJn4MJ4k<#V(pwN@usZL5F)P*4tLU zwq|!MIeNGL+ovV!x7CcCLSL>W_T~=WyjA+Ocrx_CP+mpxa;UN~_52+9@t1t~=S%tq z0=?&aO#=xGUXJQ~?W9JTbAxYP(G#J=Yv_n#ZjwMx=2sygTCqZ_T!DhWmo35f5VW;2 zdB*M`8()rdMDogik1uxRN!XJNFJVH+`F%W#bF*Alw|Wl=t{!)glI|MkgKs5*%r{+i zL@&bxh2{|;!=vLk$8u_>{@TA7Eut##NYhS4186dBFyJ*V?`A=Q*+1(J90Fpzcy?kB zof)bF+0g>Ze4IN|#<*yY+y_@&ca$vB`cHZcqcU*YlYuwhxBn7>!O79XLa`_Lxz?Y= z_3!fnO$VNqPEOwFz%CPsZpEuFIjA7fEJ|!FD_vCdz`$4a98!0zAK+5y|D|`8vqlKrT84;Rw7++H8ob_8DG2cW4!4-loxLonr=ly*h(fXFuDMc_`7Tt365#s?*(Mxul z;3W&Z#@_eyosh#D31@y4#&&43g5k6hb3D_Z&j5K3tG_9dKXEK~nDw)dx8V+YK_W=Q zuH?R(1wY2^%-aV*0sJJ0AK}a515Ju}>L@+&;vn=XYS@Q&j#c!30NeP9;V4b^(Zs8% z-Z6yGj)D2B4tWpdSBA;3%hk5|)?p=Gls`WtoT-2Co5yfLA0jq%!XXm=o!@+otIQYVQ%PrZX_3^il zd);rxWQ?w_t@l4=9D*k8Y_@J)v2A;jAr?T;K&>ful)^fWC;v&J()`M%)j!=UpX$|0 z2z|Lrm1~`El_E~{wb0p*4NO)zzP&vw)Dp52R%{M>4WdM!nZi0e`QV?N-aoVhE1?SS z+1AyHc9I;3V7=E?Oo57DGz6n@*z#nhBHjQ3Jbns^mKgYPK%t5p`e+)Zg^ z!R z)o^wjJ}29gxWfq&<2TPi=cZb0(kUyQZp@zs*DMsASPJMPKFcn#lBJd&c!`uX9tDGGsD<#w(N>@`IFbY3#SufyH~OgpDyhGQB&H@EcK_ z;DTshmZA>C$1~Uk!;Kwxj74Gn{JsD!mT!YL$#eRJ*=g01*?ly}_g>mx%0Y%D_c^w@ zz!!S~+;JiPp25#dJ5lBgg#Zk`mSvkFrrWZqe^;Lk@da9(*omDTRK3gw6ADuUm=$ac z>oYadD)6zM0?qrbp1w8kcgQx@*55VveWF3D4+)#kNX4ZT@r8y=;41PBoDLBef2B_q za6lSPLBw0N!V%byM9_>W?hqdS2}c90{y&aeU>H@J68&HR75W-7DCQ;^u$xK5CJ?3q z1FHab+MO+Dv)e2OIGZ@4Ge)_G*Qqb30x%$@iKp{nOyQXPP8AchXMUn*PDs^N+4D8U<{?}>z zxriV2pq}Sh>0Hik-VcPj{ON{x-)3ub&}DV=^GN{@s2hB3e%F@+ZOjNQ_b84kyaK?h z{}Lo04Bn;eFq(A%pZxEHz5*HGDSlAEU+I8d2-Df)vW~a-2@-@H&eXuR~Vo`V`|;lb#fT4jLl-aLg15U$AoQnr4g#i z>gMBf{tYMIyGwytIDZF218ScBu=R9m(JtxN#5x{**^z|d|CnpKvaVZy$=6>rC$KHj zkR2|aq?P}^mW_@IQrkcWB)b$n-snidYgf++&W}w7W}pZr0~b#?TWG1g;fca7gLdU+ zxI=Jr9Qkbf_kZ~gRU^M|Kksf7-0e|^gkS5s#u#rFZidbj0VUl65EmGJr%7cDTKObS z4q7Q!wCg)o+@Lx-r~=@icE|bsH@_celO1UFjOe;reLQkLE}Qm)G43;9RL;9JE;_2h^5 zbnU1GiWr~nZx_Lt_qWqsPC?CEXgnkL+4Ww?!09E(cqyOoSu*+m&G9^C>0Bqdj)1a+ z+6kY4sb;bh2EaS4=T!eJ%OM=z?uTvx6s%pZrCuyAr9XB zJE@wHl5v2K>K^_?B22M2$FA}h33^qzuOAmnqyIW=d$dJq7~^U%*g3=Uxqmg*Rj z{1F*lf5)T7em8-6|3;VA$wH6vsv_cu&+Kp_NO)#!m-4}Qn0SFsdTwcRY~ z*3;LJbjbe1zd+3OR)#O?g5r$1Y2MUC5R)5^Mtg_~+L7qzNi4#Ajzd0^EQAvXcrHU4qvsvUT|UOTid>g*gKfmJvH?C|c%g*q{hqI^kf zqc0oxlMZ(2oymf!+67Mi$`e0=RR1vH%l>swwsmENc21lsWE^$98+EV@s5b3N=NtB;A9n!g#iVM^ zbQc!n)B!BHHohOtHm+U0>es~U4fQ?G(B2MUmINcQ>)H zSrpkmwlmiDN?2>vgOmPId56p$YlkHy$cehO5%LLli>AR;Lp?U6KGs`F(qD%aR8n5i zfzc)1_3O*iLSDDfk;OY+@#-sB@y{x%>+}6vb)*(Nig)W@ZlJ&aDC;Q^rQ*VUQ7Opk zP&jm$K{plH{K$Rb_z9@A{wE226h_a%Wf9BhQ|m+UJdW-YZ$nc3lOM7${L1!m#aVdR z&eg)|d&}IMP-SdpH#1})3Bpy!HtwT4D?p(cUr(D@B=u8ERS&7J6|JwR#NESLLTq-NCZyZwaBS-i4^UL*d6FdA$ zx}~ub-X2!FRLESNa;6O0qZr}vpVOGJoH9u48H@@of%XFEyfh!>p%w^YKv=VXwfTGr zUr`9waXH~~eNf59W>PXZrq@$%*kqVcZ*b;&YyhBD&UWPC4Jq(q%d zwTR)Qqft}+Fg3L}vogbYDg0^wue=W~%+k^){hHFv=FBduwxL7}9~t@}5%(U;JE@o( zK2eL!RN%HIBO~Rp33L83D6Lm_Jtf=+(zC8GT*6+-|wTt^L6nQDMH~&OY#j`FZP3%m=?C_ND8|912P1+FWk8`^UR|t)7 z^W{ELBp5$o&QkgV3tHR9u)Xq<5W7h=!cFrn}WZ&`Kp{KM6<^~bO z8qbe`qcWh5H>R4+U)ztN!_T{aH8=|Z80mj+w`m0b7qNU>Xv^H*!REUXBZhAn#BPD* zT>JVLK3JTnvjiVc67uUNS*+w`T(jszM-T3PFM0M z$e;sKi$kKx-0>h)hty!~WB)8)Jkr*Ptk-s7U~JhiMSK8iaK*96U(S7>AszxXpzi&b zX?YZu8m20g8~9X7;w+(^I46^;#*V)_nA+1ARvTwkDm8)=c3gn3sg_gDQoITPjg&TQ zo__uh0dNJZKX|%uVfNl(>8d) z;a!`SejB@c%U7*ISD~+LM=LV00Ex6zqg{FGq#lZ>p+j zkIQ@MG@>E|%*?-3Yw%q|!RNkx99GeHEg_cl+9dE%8vFf1FKPIET4=JkEr`)+^I&Aa z-YF;eD5B2R>uerdJ=*MKZ=X|>@1owk$SztY zuTO>-@?PH70ldPIOfuj!^!6x}mU(7zBwBk(0I77pfF&sY(lAVa5h~I?$|xA*%MCSr+7j_H1u_xIFGOi8*uiG z7d~9#?>Z!&~M+Fq~U? z7E(=NA~bVX%I8{zeG%t<*8FdAa1T&RY;USF<3+D{5D-4y5tUd`FMafLiwd$Dhv}&K zjm`WGjVj5N)&}T_{!K7D&<4i6C0W#!Xvu*f?3C2^$}c=_WP^wP1U+!$5pBar8m@xO zgJLRxTJo%+B7>^=w_TIfBP*l zMKWrd_7R*t@|Ow2dOn7@VOyt#@3+eKXkKq}lGne~sO9&%zH9KF?XsxvZ-0n;dai-N z&iER8M&nc76!F_%$3MaTmVj9X(vW%#xC4qI*Wahqw~Du=KEhCs^aY zxT)^}`~*{3b1ZHQJUlJ+qL{H8_kEulr8O^zdpvU1c_~%-%b;%Ol8^7r0J;}O6n)w( zwEqE8w!R6$n(RQ71vj+_C+-l?Lytz+U`g@j7|){0Bz)#cQ4j!QSjC%=bJN& zO)o+Wfg3cdmi@MHXk6yuEOc{`!c(!X!m6n(R9p+Qi9qbPizaznbP6qade%+sN7r{F zJUXjN*AN%gM_&kllupD~lfES~spbx9?x}Gzd9w%aMiyZ0I!v%?%dIF9?jgQd`DerS z2k$-g$JwXy+xXz#w(wPxzw*FodsV?CpT1KgIjSrWS|y*_W21GgtJ@y@1E14&|EJm4uNMz8I* z{V;h#Ii6MB(oIgPqKc_$KZ=nutFfAyk(A4(3Q9l9Ge)z~oS|!C>$u7-q}O3@zO7D< zHswwYBTyaw@0FE<8W2Vl=&H@!fF|iyUGM|#=vUL1(Z=VOX~5(;WEr>Wxq3;}564c% zyaaP^3?HitWJVK$xd)bl;-#_tY$({{Wb@mWg+2Wz7jC;;iXt&qIqSV1FwG;=#X;W( z$9HnYFO_%=n1wFM4r zN1m|Fvh3w+)UxCVTQ-d%#9_Q)DC3j*oxaVIYj;FMz0dqxT}|X*)ETLOY-#g;OYiF| zWaG$>xXfmm4JdPLLsv5FZ|JvV6rCpYBvmYuK)4p+C)ed<^10{&N?Hv1Nx8HDch@XT zo!N$035uex_lupFLC61c46;$_HnU6QD{zRJ+CW0}_6CtY{hbZG{|J9} zHO;M0aOl?E!`T#<8qdR5-!5v+VRz6S<;Yma-TLUcvZX3*&cWKM?@Y;BYREE^@dx!H z{b__b>R$Vjrf$jY!;T9G;e*W@L|u7{gv4TB^_p$>jZSY5e5%Jc%S$UMWAPKB%i*7H zryM;B;XOvA+E*9SbFC}F#+^c`E(<^lF6C5?;(2mP{r9{^eQ}a7Ia5GS6OXJ#x_B~y za1rq<$`;&*?1RV|)6id~MkJAN10lZ2OO#!d1U*l^}i-WDkP@htnJ+`@z>4zqAs zd^b|sOkH~{o9Z%X6B$SAV1{Rs>#OP{I2hO6AHET7>%y(S^~lxgdyXSq>|T!0Q=ZJm zAm=}t)CsP&F>O3>^M5`hZ5itN-SQ|(`WAl-$GF4#`B6cDpMtj=%id0Q;HNC#{jd~5 z1!RT_+nxRme+>k@xJrOEDnk&;*ammXRQjkYaJzh0EOD?Gd3TKm5N)fWZYS=(&%97N zAnkl{dR78OQc=&MqD_KH2^$VxM3dTFI z3>wwW(Q=;e4mjnaYqzE{ETeeG-M5v!W!I}hL^pppsKE=v#sLo=NjDB5JzbZLKsAYz z_xC7{1$0Ey0eyW@8Lz+mJO?F^?`qfbJ#N9hRbe!pyw?N2cB~=G;e?yw0Asyt8 zRw*w(Wt@Zz%E7x>n86md6(}0Jg}G7QW7|c1rTV;UC-7QNPQTicTYAYfHfZoWiXA1= zAGxEQvG$?$nMlOf=W02E^oe76(_L!zLN1!o50iW2shd$_EEc&pu#+LtqsDU8pJfx< zI&?L&R2^(-sUuYT#sI>{e%FihgqqW@x(Y<>46 zbq4nIK5kMm98IW&7rel@9y$F3U~`fypgIF}q?|fv#=A-bz+;1yx4lw*YFP}n=M>~| z``11^qYe8T8)*1xO>nEoo%(R2ROGeAce-0|ZkC*y|5d$Q>OvM(9uP2Bc#6VO;KJ&~ z)w824W89S5<6it%6~_$^^rQ8NaQb^qD13Mz&G6US-RQjci7n7fD>OToF@-v1Rom&MA3kjdx~6z z5*p~efAPL}iYIJf2fRG_#4fxt=H{tm?)4i+ zBXO^*G?(kNbK5o-;QJaf4D*Ut;~ACB*hPisrt9&x!8E(QFvTUYlPu4lBP1K&r!T2H z>!})WUu8u~uv6FN7dyf`tQ*LZtwkcr=u4Nx(#d%^c-G$6o0~j{?XSdQ=2}wgx4$xD zQuxjB>ZTki64AZ^w#yiV-tO!W&{hjI%QyTTKgV{$*KaAbE|ZS}M% zwdJ4j0A6T|TRIOVLbh)T) zEs}QuyycO4#TDiD(kk2LWD$w@ZcX;PMf0{CzjV$~CsizT(H`!MAEZdQ`O26qV_*yd zH$lxWC%`u+wjAx9wG8da}-Dw8EuO2GwJ3-?56n|*c z0dML<_E+m1Kj_`|Duu)|e41g$rXglk2uL0r1mL-3m{sRi1QY(p?D@BSwu8Hk14&Wr zC%qtoc#d81hAGML+R}=wi81~``r{gZ`x_L}BA`lzSbg$%(j}lL+P6A^lBxe)+KD=6 zz?ocAW_OD$C~|d9X^aX`w;lEAP#Pd8r-Q@yxMkMynBG=76L!)vR2|m-3!!mLC$yad81ob~G4|xrIX)>_;27AA^r!vF)Dl zpEg~&tL(>)b;FOVt5>1jT={jT%j-5dy$I#a#9A$yZBl@@a}E=&t21WcGtykk>D_Xc z?nRikwy^M|8??-uEpy5#4g#Oz|7-&M2d8IpWr!664=3>}pxY3(?K6pxx0QnGlINpgDephXJ|KbL&{- zWz6?4J`QWNP~VgV`kmeFPEeBY{7A+0msNCS8Xzf_E6HI1#jMcf8GTPst~cGa(R_h@ zt5(wiF+K43x^$ThDuig|gT8%MnRzl}lF;9%cq3K0x`*tcl{~?*NzD`baOIwA0ptwcz4~dwt-BxBe~* z-7&8_(jZ(fFV}(hok#x(@+-H=i;53?NKxomxUc!k=@+N9IXMo)G8g+p{36K}-Qu5T zfv{?>76*Jeb(tawp0TyGrmx7*bzYGsV-Nz=$#JCt7W9uan2k^(Z8WTb6|};ojalGe z2=6QJS3#SyrAsOm-T@1Vr;HckUEX6tn4XVe&Y=qBG)w1&5agTaqYSu<0j$d1#b7{f zNY>QwM@U`z1Uf{KW+D1iC?y_!@fZXaoJ(PjCh>7%Rk66qGd4W8%87WI9Ayqf+IeN!Ji(U#LD71dl{ zxiMP2(L2^o9oAN`4DrD-VV(ql-KJPmEdp@WpCkKlAm~qsDSLuNFQkLRT=+FiiJLd! z8_dLIHKXL3Hrmb3G6js14;Y(5^T1*s!naw>)wW(h-fO-%LzY-eYFhKlnr?!%y>XER ztVGZt!x4>H9RNF1426%BH(hLP=B~u<7V(a{CoBYDk>@} zsAZk9d}+SbezY`#?>5ur;3$BWB$VN9SZ z>kAXOZnL%Ab7Jkj{VHdEg@e9b-+1}A&n*>E-!wLck+eYnH80f0zyi{J-2lm4G6}IS z_c(Hxpd!Bs-`|bxZiTB^7c(O1cZn8$)p@bqpUh1067Rc|w!UEn!+2kKZ?U0UamK(b ztK`ZW62##_rR~fVd@dzb*NF;6(7V-apk{J>9`a?U1eAM6I5JR=@zJ*=CWkQQ0_i5AJotQj4({ak$p0 zPdQH?kL&G$bDdEc2_%nstmiGrmZ!9ny~==9MGNH24Bcu^LqT|To_=8s4XG?h3zGb# z(eVQ;gW!H%MzV&~HcR;b@?FL3aG#+IDJt)Z z7Bc(0$1ZO}XTOT#iAR(m{z}n{RQ*F~WLUirDUUwd?F6gI?l75(UrdROW5juczY!el zw-%-CNxBt@V4@e$sleuu!)4_jGG`reUlDVDkmNEC!popc{w}pqKk9zI$u-H5@xCll zA`ORtEmj3A-J;1BR&B6ZYz>K91dIq4#h2M*W)iFo{8uF{YKc|afj{6yNDt?Zl~^Ay z`f`YHA$2he60p8$Tke_EN@E%zT^z={lNSnU6M6<_0ZRli)T)8{kTeNH#yJ#LEw(#2 zZ;i&p1=5Ax2;Eg14zf;IH}}n4uDBXTG`I2oYqwU|wv*36aEFj>|2M0be;#j4X{L4s z7YRYu{U~CG-dEXnwFZ{cAK_*iDhr;-Z8RSQpP76@WBmaoxxfLaDh)_*n7(RfCY1$L z(`3a-7QZnL?$hugOwfUDU1dp5J*KbcNWMT`pd_N^`AY7qH!&18eJMV9m8u{#l&d{u zOp4YFn!>>^vYf#9QHAzrY}6iM<-~7HZ^Ee60_tqc>DO@v{m?Np9~$?V_gu|Hs<1dJ z^-Bfvv-8-31#M;@_Zc>5Ei(}@GYxD7ZJMprzU~SZW`j~@1q+c2-EjY{=%KM%AHqhe z33KCvc7Aq2HIof4o^J?YRIxMg!bCY~ZyV7+-f4pAGK}<-GEaMr1O=IG@=;rhiEE~e ztM@8f82kdbN~d$&e!9Q6NZkQ|#^Q--DrC}W`p2r*pp+WSC8%VX(3HGyscii#{Py(= z&5J;hV(2=+8mz_V0P#_~(Z;A&Lq4|@O62M;PnI+K@@zUnnU!zt3Jd!Nu&}}F%7 zr~$8V?B!=#96-jAs_X~rx#D0%?oB?q1Sg~QSd{OA6N)-u4X3T<(u}nb_|4cMb9zZ} z!5hG*;Ru4hsx|~HOyfn*cH3+gnvlP$Lspl#p>*;9L;*V(Gh#rF@CvYDAY)%)oU3NI zWotIO0B^Of&yE}J2Rhdgn>1B9XH5lI%>wjsN9oo1uDf9M}V*O zskR3@-Nw0TNiyWnDSa6p=Z?eH)qbq5Fl0b;FBhBs*=H&;%h?E)Ffw2yi(CSqBs*4~ zQjEv0e$$3IsSeC17S)jZWa+*hBa2?OO96;3Qz;s&0kca{1vQi6F=Rm*Au;kTzwzJ$ zaQIr3GtA^0JIc4AVBhpwAT@&BADLRt%)(RYS%t_9$9v3Rm93mt6VvKYT5l7u{Lb%8 zHNrp~Nd|xNoZtn^`7EqRpAxs-&S{ruKj9#kwSSXuxFTaa=QEuj18!_VX@E*c<$J%U z_|h)`wLK!-@c=GJKwV*R{7Sg29PmeDHi_A&oTAAqw9%Y z5b9|4r}4#MAMdOD&%Pg=8|L_%x)u> zM%(WzGxX7aEpNzlrKyNR-+5_i$ymk9cdUNCynAq;7h`J;4%C;)($#Pg4az*CY=Hro z0Sn4tB$85z^1)bd@-EIP)U20~C@mdOBL#OVxzU2IwjC0&x#d6`N zPpGw@K5Y*2Qm1>$)FqE+*v=jS0#S{C(Iagy~W6OhjJ*u7}?WfF?^oziaw4gQc;@p zcG|8_1rNN>|5^mlo`O4pYV$T7@h0pKPBR!E?J>-kwMHr{>?>Pf|g z(XrxeQlyP_iZuU%Vcp&+KM~i+R=oIpbsEuot^0Kk;>2lAP`lsvv6$va^g_Ach)iSI z8%x6%fTgH+NBdx9vUCsp0EqrK8n{t9OCm-T;5<3TIXbA7u$9}P0Sm*#nDdcQGWZg*_P$S2Q%vL&1j@6%VurIOh zLHNH2HUw;+^#1oQM)0{y`7Z-lj}o8d;;^o^W*D_t50v-d`#rk|*?Vd#p3gRPRFSfs zI&he9#e}3~bblrw`FOyl!^W%M)CEt3Bn>%SR_cC}HEoSYQkT&tgL+3rB9Y(r&|_N` zfTAI=l(*_UdHQEzcmn@}xkqqAN@nqy@$=1|j&`lu?B_z%BD8J;->IVkRrwA~oSNIF z6Op$YhwkBXIB*@XeU;b!Xo{~Wz{EqHjN7Z*nHQqPte8ugW(dz)L8n+E+y0>flbAb9 zggmIunI7dxhN*Zh*K>`mtrd@PF5jLaW?%zis#}iRQ)Deaom-(i(cp{B0Mt2bDEg9ggI*c6xIt`?wtt9!et=VY`fy)qKwg+qh-MH z>U{#B76IPACT0OL%#FiW%fHe9ho69kCpxMU*XNn#I?t zkDd=%as70fZs&JqP=g}-SO9)}pBEnk zov;Bj3tYviD#PTV%axRd9@q(pmn+1KXzRb(n}S8tCKEQm2GR`iWrJd(>*gJ@+TKfN z81S}dpwi?~8w{YQFvLxUmxNfX{D}E45&8&hDXe54>_yx3${>I;1h%@E9IGkvZl9We>U=j)fj+4FD?P7dA`$1FT!Z z%353@xJ=QmBbY>m4Jmu()p7|cU_IR=>^n)~8uv1iuAs-M4FxWO7ZQ)*4A5@e>uzqo z(G{Zx`(E)i+g8u4T!eaxZ8<%x2LMn2qgfU?58Wio7|+ZkV> zw$~MpvFPM!4S-jYHikT$))UzYNC2xHMKf}R=-gQj7PJau!hGH^(YL`;zVwQ2)R6w> zSXHL2#vG!2C4D>B6vZZQp)-$k&FLmW@7mg3|zdKtNd*DBRhM6=;O+Jzh#dH1?^Ex(05Y0R?8d?*W-bUEKyORRC9UT zWgp34cr=9WM`=JOMAbgOv++r?7#$x~drq82u34n`jATtGcgvGHNw_p#25l=FO9%^G zrPv~6yx7okZVr{0PMj-o!wsfRB(*cbp+^02G>jYm7E!S+0CGezA;=0Ze>Em-0Ek-v zG%mR*=%dPVr|VyRUi(lpb_AI01T0P8L+G4MJP=2$p`>Fn%+i%R4dCPx6H@qyN_S~a z-QyE_CU8&PMX7+5mo*uhk|Bv~v1Oh96~Ly*Adpovd;UPPOa*X#q3|^ZAzd`m|IlCC zVZ6!;Ql!j$J%0jkIO5V3(v6ORQ=i%mv8wxy>GspK)z#3}`-g~zMemj+0C{NW z>A2y_fN+8RGnRW4vlR=#cGXz<5wl~k3|?UiSj4)^9lAQL46;S>8yqR$j=9N5j1>mz zPOe@D&?kouixd#eii;h_Ln0{CPHy*2Y?Z$Q?A?agc)~Fj&`_?cFru};oo*mefboq9 zA%QnJqe^9T^8xM0J0luBL_8iGorhy3{Teq?;fo?Ztwro&d2Nk&$@-BTt}AetnvH}d z48C+?xP(ozM^bu{XeU`*Wa_(mf_nou=pX3@&d#XbRsq2GPo_28+rIJz)eIHEHwBSsEQrYrnCL9Cq&!DSE zKDPI|{V02(1x6v@cBT+D@L02fQ@LYL&O9LJO{yL|>$|PSpLSKm$HoO{+R;3!%Q;dE zb>%B+R5on(KNnM~0t`IG498Rui^Gf7YKPQ~ZB9uOAl}SLv7~reedtGlcHAN9=dU-3tJ#uspsM&Klu z)~+ZL3_L`Qan_vz9k9DdI}EAW`+XiMz+SWO534b7 z*kY>^waDEf4x)Q4UFCI|wgmG|;ZcGZM51VQ>Qz(|%$IYBEN&*bwcG?-2QFXs-M9Ok zG!=5$1gp2cRJ&Q5RX&FfQGiSLS-%qe-OeY=qUsNVsIUpD!h=9AuSytSoy8oy?j1P` zu-ESU@$wc+Ofo;)E>VaCVq&ZHB@4@?WGvg(GYX&l8cd2AnM6mCJKKO$0DR(KrdUDk z%ZS<20Sm}y2H{Nb21DeQ(5fs@U42}HFvx2&WB?vhU@=<0hDsO`UozK=E31dRFI7YC z`w|LJ(jDdedY=mO}~Isi!oz!6JJbQLD~HAOfz$M0-|gF z3qPmz;zWfP@rH5FrKo?l#8GcX!Ffog(fmd41_AaX?_kximWjf75XuST*M>Isd8hYm ztzm*$_TP!v9ah6>IdvVpVQ?69CcpUH9@WY?uN4SSkMJ>;oF3q@qx7w5gp89n)U2=L zlqV$X`7&4q*nRDwT=ZaujoWs0DnYdXyclGoEE#jkOV4PW)$%Yq8GLB@fgdl^SlG`V zIQ`X~{2V-g+pNRZkA`Ug0j^>}-l!H`XGy;U1PeZEs#}GZVFQ|Ga4-WOvYx7-g&cwe zjxnnf@G3FZi@Irhqz3G$f@&7_4EHi|18P26dxd)I;8i0f>an`lYP;@tlT#-ogZJ>t zU0|t8U;fUv^}2{4kS0!=fL-|I{f210MA1Z?j+Rb>F7(ti0? z7tD7<{>|%c5+fOu2R;pG0Q!4blVyt~$Ls*K_y~gk^FD#lr?}I`|G5dOrS_Td#_NLLJQOBE;MTpC?^TT{{a7 zU~3AMo9$+Ap|aJ)=F8N!UdSThHf#5AuZ_;1O3D{o$}?4dM0^BeVR@nUi&EGp3z?nc zFTfZA!<>T=<5gZ*O=a?sjNN9zO4auc^P6ltCS}$y&m?&0I#3 zlIT`G1=t;#K@+w~+1KAxiL>|T#frMc+6(|;sAQ^dz2pqV;>a~g6Y-kP# zMGPv>&-B3#zCQLZ2@HJwg5_5T>;d2GJyyGW*ZT;f zvY+^6A*xxSsgdajCk%Z6{OGFmZRib_^jiQc_~yVRU$))nc&HHY_4TVgCdXLjz5~0! zH?Q7}y<)i=%-k78gM4c5bp}-gHBX~OI+Y*ERT0CXL7b1_wy=F|Ib~;Q3fA)vUBmXq6c1pyAf`m z`P=`7iAs0^DB~SdordgO*y(EU`GY1k_UD)>zky>|+Q0oLLm7K6_U;(5e&;i_NrvK2 z>s;M2gNtS2PqW_rcg_0$(uDrea|ZMO=((XG4uiQJ15RGtMu+qg>rjzXvdRVBlL3$& z1nK?a^c@79`xU#OF|M=x;>c|T?f#GB^vqA+z4%3k;K3}rZvRXh|22kC>tiUAjP-!N z`53&Bq*r55DhX?{`o88?$Kc^6yd=0a0gJ|w9e5bRNU`dNLIUHUP@x>&uzwT(|A)LA zzB&KXlRx9!Y71CH9|VCT|G(7dXfKMDP9ouhIJ#6Ncdc z6bk$aq0nE#vSpY7#1;rR{DS!LZMyo)VIPYy**jGB`)@l`_JMlj|1b*;BRptW>4mHz zeO$3~_ghg_grl%eYmHUMeDTU=*mfxB-rBbGUrQtK&z89%|KUXNUp~OXe`4zYA)Wa+ z3|c&VK(3}cqKm7>xBTvaW_Xcv{-08qP=2M!&-BK>^O?^8*jn5E?oR^#GX?$y4gdSn z#(!vt$rTz*=AZBy5oEo-gTXqvogBQ4VXdFRN&lHD4fK9u^xO{PR0LQj|Fqtn|56e8 zXJcA{_Z1jS7E_0-Rkz)6`KMbzHo~7ZJr#i1Nh;PqCzloTm&s+M-ubg9e}48qDO3_R zUM~!lYw<;04ngwm=#t!jl1%@1M9RN8i!^&iCVgOmm~XUROO@xB;RqlhKj=V)A3k(- zH6r6KfZnP5_>&5`)$>5@=&L~z5=n5m`%?nR{`=Yt>?D(tGIsbhENe%If$dbhbe0hC zWe1^Gop%I~#2o;(iA~-87f_pY{3Pk?J6~BDNc{!!pK@?3UZ z6xBCsf6-MAafSVV(u6H2v7EoWj|l}gM+2?@OAi0+HKe3NJGE&|Sj*ws{Iyn)9?@+cWkN`Dgr0?kL^AYZkD<{2%BL z{F{m5SJwUiLvi=78A?l5X7}~GOPZVc$_cU;bZa);!TpC=SP1pN9+T3-?}x0NpSrob zT0eH$eg%DEe@IsP+|_g2FO-zTANf4DJ|}Ld{lwnd%KebIuE!Jioxj|3dTeWbNZi2g zsr$2Y*RD#*9TLB5ZD;$;{hWk^tn4B2TTn-+tM2p1R^ZXMtv&6mtkql}`y3L#Y3J_t zz}oc|)ag0Y+1lCt9C+XzJ4bhG*K^``93Q(|-?p}bJ_XOLVeM?YeQHS=2^E!dzb?=_ zY3Z8;Sdbsy{RdH)7$|(;;`U7b{`a>9{4d-baHpEchi&cmCd zrM_i_?OVa#EJGL3lj0c$<&tMN0HXX;_dOvaS_me|^oUiq$_W0u@j9`N& z;yM@aY1OxlJf$h436Zgq;18%2t9?@N#7{ny5nJ(HEPxb*zTCTf(5I5`r^RHf0`~7? zc@A0K?y!RYs!LQ}?`Emsqd<1vzQ7s5Z|q{hM$&(NzmAxX{)2^usJ2s{fNgBALE8r` zESvm0wImA*xCf7<5X%c%)J`qQ!ZMt^&UKQdj?uc)YydX_?WP_EH_8c|0(MT%;%WQ$ z@P8ZsCl>yO-rs@u@6nFqQ&M}3Zt&5VvzUq=9SRhkWHhzpBvSLOg%J2Gdzy8(fzPQ^ zo|au_?C$0#N)k5n4AO8LgHinR&qJbuWt|Pj_N+8|*`nuh*PMpG{BXjg1P7apT?zd& zRecP1e}>(%Xt(u_g_cD)I#^8O0T((09nAg>mGxiSh#F&4nH9ezZE(*ZuBs`pmHCrRBbjhyv+3H><@5qyXmy?;3R00mXn3S_WT zP|IW@?vAv>e82^*;Azbe@jtV4`<+&xK$Qulr)zy$C8)=VKrY8BmOp+yCiqENIg&~- z8cW}L_2;bM_&XhMEYkrL@hQpW8WcSnC^E-M0h|^_fpM^|v7kg05fnxJo ze4*)A@WS@}Wm?-82F^xtGSXpaOw>eQdHztweVn38{i@`X%5W7GU-(7M4^z=)4i>+I zp3D7*;Tsv?mKhiXtlC$4vGL&e*t-*uB*o3yLD9YsVx2&fR#QYHFG?vufvB*CPLzcq z-7hN%-BnchS_Ha7yP-aHcuH>-5cDE3U6vBc#m_5UT?P>ZAFEyPe+^W5ED$>4*1Ld; zKnA7O&m{qXu(6q34Xp8Py%&AaXmN?LI98i|bc4EHqy-<0+{8%Q5`%+OLW}*<&K&n5 zp7zrYd#pD3U|q@;9r{=#h#VbHuHlCyK`cg=pQqbXR#TIU%nJhE|0!vH3~y9r0!yDH zLj|>xWE|slye)r(E-zQNM1;=|s9Rv9TqYW=)j!VSkc>}e)nPY?pVt?fE?Lnwx?<%^ z`pb{MIW*_deMWwDVyt%bQtA%olB#g=_9MMaZ0$N80r4U>zfBcn zfa3fbQ%Q;bgSsx;nYejQN{YA&>nY#uvzi@jn&&zVKif$M=wr2# zie3?B z1H8K)3>)!%dux2*m_vU-=Uvk&fWTn5p_o;V2;a&aig@H6l;3`L@!p4Qzs^MAI8u^4 zwR$076k*Vj4WDZJAo@v82QvlQA3D`(M=uXtpNE*$Y@L%T)$Pj!jWWRe>bNq2CgBp5 zyuTP@sPb-MYA%=0T`#?Vr4Mn7wM@Iw{I+bSdUUf6{~Dp2r_`sUG-mFs+`OReV7|e| zlBDz2LX7b1DtC!X6@$BK;pyG}j2hto!vv^zQQ= zkMdPCDWBHORB~BvXOM;p!tvns(3bea^h{sRKXi>9D!X*ip!n%Yy=io^$zjVfiAZPMnf{DW$?z2r z*f&11Tb!+QnSF2m?libjji@W3;N0VLf0nY`E+$Ms>(jNK{aI@*;eGbc^_Z}wx|d2( zCKID{Ag&|MWT1$oq1!~jxlSVw9qd7bgHa2zDpy00e5K;bTu)V*!f)S)&YvH+qVJA8 zzO)AJhZ^_oTNB%=h^n8(XE_s=4)#CdYr$jvOo~J7XFH zX!BgVJ=*w&0)YG>%66(IR;J+V3%O;`rKX1u45NHKrduPGK3vkY$Eu2~0?{Vr>Gxvp zU-_PWC;auo;5%ipWT|_;!%>$!=HK%uhOr8a);O*{?SVcNSkKc_tPP-5;;LIV7i>qv z3_1azD!}i0mX>|l$)&_+M`DY0tr~V$jckJHdA|* zZoC)xOFiP)m`s6v1)-(dxx7Z`N@OU2n~d%jGAqs2f9(Y@DQ<*7ep@Y%4;%eC58o9@ zsj{6Mz6wn#f0qClz-Ug=bzYB|z*cv1;QdTonYB8jiG(jQ7X@Y#o1uw=IcdRF1@i+Z zf-OgN=K3=FBEPzh6pv!JV5Zd@o^!nq+j6Hn930M~!VG&2&b}%=e1p~;&WEeYKp35L zer#BwPkX!ON+W%W`~BS>)?7{BCOYJK-=#jgHANY_)!}bXX@PEp`)`^oml?FP%178F z10K0Jqj_uht5IMnXXOid8@_wBFI=>e@646v507S&uUV5*08&*g z8)ry~jmYsYJ*$-bv zdYOzV4W#f5>Yp*aZNC((E8Kz}Y~EMoAv&-o&HwE8=_Bff`=P+y`G!|Kk_FY1Rz3|A z-CxXb>9@Lvy4|`8Dr03kkND<0o#*jO8Jtr=N?=xlR2^C)#OBvk#-o|wr}rkhu}{o3 za~OI-aR;{AWJjHvoHUg;PE)23nz5Up;=g_qto^L{iJjYuAv+EX!v{=kWybEgzD()C zwaGVZ0--0P+NJVzTS_0>GvV^Lg8DD(+5=`F=QBsp~o!!Y?=6*{Szd_wmx|tea9AuPPllJY;73G?n&fxu#rRbGgTje<@V)Y3W zk(YUVqTU=5P!9}TO*j967t{`Mj=Eyk_s2_#944421In17E}#t+$%bQfMzg%Xy|Pfq zUb8cdS5Su~Jjw~vop?K*J~&dVmj`e>e-Bv5U#nXp6-QKA2lLw!B+`&e+@~{%ALl>$EyQ); z`htECb}1r_q}^?YZeSD^GLugS&X1UveKA`DUZb26eEqY(vfg1OuL3T~#@D`01A=fW z^nk@vC8@{XND6H#mmdv{AA+x=`_djky<~JY6=G8 zQ~XqoK_~AH9x964h>>%d(5dz1%1ZF@p1iZpTGVb8Eu55oRa*jyjrN-5T_lLExa&D_l{TYXKTq9Y$!Xg$J&v6eHK5UZv{n$YYDx8eRo}UbUH!ytB zHnd;Gwz)d%NDJSRW62liAnLoYBi_)bZ6{z$57{u%0Gvzef?was=GX0bm4*-p$^H)q z#Q@0iLbEGlRsD#w3L>bi-~ymMO94KFM!QLA3*gFT;K`Q8@pSx^ovz10uqE;_yh0R! z8Wp8{?Tq9Ut1+s0ezI~wopTzHFSZ$&LHAT%1q`y@Tuu_lKeoWL$~w!909rEu3|BRf zp~{wHI9+UwdZ^#cgUwJs*&=P*FEob%1xG=$yksxZ8BZKefA>gakOCy9sSWJW_$Z{4 zmLLtdjlW7Oh;SPI!&&;~TECBIeCEypP{XvKuR#`K7J&#;c@GQUmsyV= z*N&D&BMTlBPubqM@zCnd`n&*#iHdcIq#0T7?p+&gZKvq;d(njnD$1uAbnP2VD5n~) zW1iyI)U>JeIOqZhTjyuZUm;%pa%IJ(n=oT0WN!KT+#in70EsptZlS2X3hNjV0X8Y_ zN!0!ibO`eNcG|r=3$~XW8=WXHbh%TwH8vK|J}dX&%3LlT=qWJ49WHWOfDiR%F7=nr z&Fo`CMq))XlR5!DY(>u>caG#3hI@@sm{|Fo^aMbgOUV!@&s#lqUN7V%7;6%P%Uc^u z#CDe^F#uR^kJ$)6lfxQ+O*N!WGck4e`Bwy@Y9RICiS#E++%g~A)9IV3OfaDY|tty*Zl#R3HP=mQn!~i)R>p{FGAxAy+ zYt9`2luN&SpyCR^xh|4$^*k6f_IeZ1G9L4(l(o<_WPwy&Ya1OH5;h3o6cOT3kGrIq zb3wH|R_O6ZY28aXhJaQw*|(o_MmIylr}65I@9IO-fOvfaLd0~I>nr^w`IExayZyS@ zu=x>Poc%)mn}-r*$)G63t$IoH(eV}6>(m8T0X2kk%%lCea+LxxDF&RH%l3VjM$L~m zXz<he6tZ(nvkre#cyS8+6U+I5E{LC!nhUz0x&Dz$^v;^QaaAn+VO-y9eJ0=MJ4= zc+kp6PYbRAa53}Fc$I)s#cPaGhyJ@of?iW@n?-MZwzQ#mXKY;FDgfeFpP+Ueer39y zD~?-6ulhcRM+2@S_FhxwQLYOy_Yy9)QAbM3pk^NcNR5wVA3l1mk<8!ali};W5WBLw zGwFN-!nV7n9o)uX3h?Z4@+iq(i=KPgfct)U6?h*%1>ExT6djbVZq!-i;(HIYoR|U* zN&tMYFL2*lG{~>3LGp8Yt1#QW5caD9w?7ozTW#VLGzr`SCoN;So$4|vz$%GJ6wu|)ycTwab>c1;I&iCBg8=>;Xz`-{w# z=Nk@>d~J|>Hgwzk?XF9$fYW%tE^KlNGLo42z-`?(TDbDFz!Y#^q7684b`|h`MTeZ9 z0S=m(5&#D%x&wEm;1R5u{RG0|p#|OgD}5Qf6~VD?+k>D;1#^HxLdrCGV+o`!=~EGC z3aWJWMROVWte7ufK(TlwMyx-SQw)G0l38J783$Ybf0m!N) zW;L%bu`^d63ETB~Z0dffxR$5up#wU_OR4OoN8TKSlPzFFf?FJ!k%lm@z!vKxrK4kf zx}${7trtMP*CRQtHCRuya8cK6_1EM;_kx)rbML^3F}@*ri+8i*9$x;p5wkG|ln2WV zUG3W2gofUE$IyY0Pv_$cK87B6+vG!6)*y{_O2+1)HLL&9d959xwP zB3Ht4n}u&CU6OFN@_GQugaJ%Zcy`uE(kAYE@STalE56#0_qqIIN!-^qEE`*0dS}pa z5jeaWrlx)Qc&M0$!pcy$To=#;7j$vZvtV-aei*j-r22OWw`3_;a6aItT&gEbIN`sc|F(-%RU0aqhT8!``wB8 zGqJT`KgN|1VMpOdB8{%uNS$(iRAN)p1<*Vevt!+zm_-y2;yJHbZP6bDK%Tbm0vRDn z*PXBii$uMC!qBb0AH&CARAbpLM|$vCcI^F?ccFBPq>2z|Z3eHwC2h0=E4zYjLof+d z4S-^omzUoo3-rIHN}gDc@7{Wf^_V%#ReGpN3{;Ba zPdAxqQ2?gCt{^$=<-M%wBVI7nCEY#jm+!8~OOCm(X$NA`GsQ@U#axA4RYp8}SRZ~m zj&pOQZXRF4Jg%G@{n{op+*5#)D$_BlFwZ+KorQAcM?JK+z*RX!K$oHwJvHt zj3**yDvJ4nyA>$?cz4WK8ZA5VEYm6!bbkLH&O`34as={eD<^KCHr=vCqnmtv`g2pM zW;4%hUf{wrCagzPH{z(+Tueu4x2b3!8Pm_a{_!{XtZa|!Sn&QCDEylaX0>cB$>048 zwcKg)6G`6fnS4UlhZ)J=6qT1df})t_5i)aM$PHa^@o;LJ){jXXc-&i+xN^}UaUjT# z4dmU>T9H~My?(ByT^lP7!a={!9at>RefOy15gMB+hzR_eAK|sH?25!z0I8cjkoQNn zM0DighiZ>LsrU@xS6uX%eB`HSFeZf$3!QE{zq#tP1T+}0zi%4&d`fIpN8hC?ys4_l zPAtYJt;-<~%9Jg{)8HPe)lc1QO!dwh-Yxnr1z2z>I1ps>RN}#35j(9o7l~KOjp#r^ zCyxu3u7%tgNld`YhIn;}73%g~H42lM+xu?+StLqtv4hb0dyP|{10YmoAPiY*yjm7G zNe3`UW22gdg>Lt^Ha(Pg%^PnSA&I%8kSb=P;v&;ovtJu~n?`edM)NKC*jqlqtejkk zarp>@#%e1`UB@b?w>RNWy4ZTUPc#NLS)A|?4iJhsIy*gk`9+0hPU2h0n~77SeckN>vTvsP8XD1nyA-+$qBc z8022m%zg9?Q_j^<^Mnb2{YJ2iw$shhk8~i;_Gp#6nc|W2Gsr0I@9L}z0Fg?Qc$qf{ zh<=nUA{RtcVExS-b7=-!HlJ_K-yFDlg9%?w-f}P)?=~VQop)=vILW@D$an%jQ-Cme zgqEVbGoS#h(PO8UA}{<=C{Z3ZmlqGGBP&6^G2#{s^(E~ocUSU<S&6RhU(9bM%>b!I3Fv~~oVo_?)RwK3G(`lpH!rJt2!~VCHz>R#nR_2N4F-PLhG*!) z+$X1#W4w=sH^+9D?e7EINEw%*8?AHMxi^!qiC16iBEKIPHWj?`NLx*073R{q6xWX? zG@ds&{ouh8+B*h_9PXP;KCjB-B-Y$;yyW22YdGuM6DQYC@M$tGlNPsZVoEF1=oWPF zaatqcwOKKdU^d~vk%V`JpowM7^W(%qd`^o4>FzTm1bhgDqQlz>lDTTY%9s?uH%E%e(CHPj?-;pIMkO23CC(sF(0Z{BJe&(@)X^`V7&qJHjqx1htY z(}7|u;Bte#dXNW6OmBU5umEJOyG>(x55~c&KIT5}+FBp7fIMgtdP(CLeWrT=#Q_NG zeYjwBEuj8vmjwdq<*aDMdjW=`#|H(}&9)B@ zKRvpv8^pTbexKyyK-=a9-ZE-P$^J2DJ1P&;npTceh;VelwgroHJZ@rYtA#$`(+e#d8rPMB%eK z_mG|`nK{s~RRFAf`5Cq8YKK!tWk+i+dFoWSo$6zKs62%D5U6>gDe`3MIqElAk=~+% zy8~A__-`vu-PGZA4FeZGTqS;FEL<49MMP#+uDqWF;S~mEw(WL~<8V(fX21)#9(Zbc zAaFxPs?PAW9TNhI2D?1Dc0hHF%qRWUO$SuNIoBg>avF2`o5S4wI`Jy6tBMLt{gFhe@xdWur>lgl3jOcd-sJnGb=01e!-V1as`!yDmf)VIfHoeraXyzKju6CN<5 zXsb2<@p8tsW;!q&bfhon=+a=W9=OKSUQ4kl3*i{x(bTzgraeg_ZG~_gWs1x%Pro(U z{7@NPUk|qZV=vx2ZuM1`jibtsUh&A4IsEmZQ}q5Ly(s0y#XQXZ;?ybb)`LHC^wF}?&Y{(`Ym zwp$E5BsiEB#*+vx;BmIWEds8WyHDKC`5+NMjpxM;FlgbBsMA9TsD?NOCbo@Muv*}| z-R+TgSp0MC%sfaFF6QUZZ@q#9{d+BvcMI{(07o)wF#(!xpXcAo4nUhAX9$nYVP43S zsHaXz`3=474E~hYZ_`d!qavk}*J)Pw)!D8ov;*p!foTgZnyTu?oq2Xt``JI7lFQCa z2muf>%|`n8Nzoh?YJqcJqNjP!T{EH2^)Fua8pGMTeezEDA}ma`X&H4O>3}SMu?8g*S4Nwt{z8SuqbX20X z>F(IZKnPgV%ZwTm^a8b%WX2I2iQECG zDts&HUFr9NDn*wn)#(YTcelov>-YPNh*lsqZF!_AM1`J?9bNlY*HWz+JwJgnhuC<( zQ9(`vsSIO?Y_gddLk8^0_3jqxDAb4^8#(3G3Ig`6V$du>6E3O`um3m~J>+fhXbn&p z4B5t_F=-FqVH@0Q*VKv-ZC8Z8gJ+X@dGZF{2BdHBrd%xxhqT& zu?sFOl7;V%k)K9ZP}A?bBJL|Puj6&K8LIarWHbmEpC(WYa6F(HEzIMN#$q`pO%+@w zzK@FJ3;Sv(Rft-M_(7lE%PB|rPDcgq{viZ#?lB~n&uXu1c9WxmX9cX3CrEFp&&og* zGi{LhPK<+5J~4WDt|x4(+L3yW7yP;2k?n@pR8}Y_{<7|!E=aV1rZL!J0#`g$BI5IB zTGj}RnQlE@#^It`PuGr9Ia(mqgKLJmaZ#Ev*QUYN-26-M>9rQhR(DpPwx!qKs`BV# zqkGUii#(V5XSmjYxrU&7%ASE}UOP4WGZx^AuhlS9L_`Cp4z4Z)J_yu&?o0E`F=#r! zXpY1h&LHv(@<5$AU?akx+m)7-&|GHE@SF}Yzoc~aYRu}7hEH?=f8z!;QIxjb0=1ZDiW|<4j_pdawz113mW$H^_B1u}!dS01zr$GKdI1bgUtE1@nC`&?S*Yfe2>aZCU>mp*8gnl7Nh{_gxV##pq^ z-}y*E(1}Q|cVov`hiz|}1D885MtiYmc`_WlYSp~_+dFz#mZjLIHo8$t2Ay~x{~n!jlKVlr@H_D!12@2 zE=fp=BoraL%odVOD6>dLM)r1ewQRDt>=iPPeN;vW*&$B$%681-yuOd4>&kV#KeykX zzdydW+xNe2w~p6%KKA2&e>`5Z8|H-<^VdFI;HS(S*1jRmXIWV=O64c=ePyttLAf4M z-O~!uM(%tX)gFN<-HNHBm!RQfd2HsaZqB_;R>^hj1iP_N3{-(%MXo8Mm05&7K!s+) zTfope0S%ec`i;McZoX>Q&y<~(Gc=2qJ+cQhaOAGRA^YY8{|JPBQ82UPYz-)krP=G{ zD>6sop?>#$^IqPEDKdAnjNgelCl*RUQ=&`yDS^)+qdvM8p;pzg^hnv`N&;>$3X=Lb zypkoMRo*tD7ow?xh{(6^A68_hGF^*Dy$yOX3G7c|PE+AW)5j2l%@HaoRV zg*s{fwrUTmRq2^0ocqE{Yj0a%1!|=#rstZ&==XQWE&5psFMbh!8Wj&i(h_8h}h5V9J@z^q{ zU^913$qdB`zc0?E=iVo~-*doWfg%5elnmK&YAu8-^CzSn8&Ke>_DWaiI&~iCh)`Jg ziwwMbyNKW_Odo96Om;*HuCEz&0pomWY*|}=Oo)} zS0zra)Az-14)1bF@!vZ0+v0GdIw{>rh%6Q)Yg^Yaj%y zo3z`@BY?fz@E>3Pxqww z7MNuOUz$53oPw!?@N1!-)1?}UN`scZ0u7f5R%&f&tSRd-&6E1@*85d9e(%C4@--&K z6DYc43h4O~phT?VxGH?ZDaUzoG`&|HAVa|jRXMM-4a@b;S`Xw&MbFsxIN^?t)rz;w z3Iqe5{^!O--4FUK`nO%c1@qU1s7BdO_#^0urH=dx69G+YQ`$-HI-hqG%CtF!96n?q zH0W~ibmgX=!8#L0q9a~UaLXO}m@wgu!9loR}SZ~nDhZbM#*ln;m7Ids6+ny|47E-vm zTYHjjQH!$3$#6DT7I^PI%VSP-fe&qTvX3J|323gnC-LYA7dnDnh|3vq!WHoJA??WX2o+lI?# z%$$y{dGFKf-rPl#@d(PEZ<^~ASe!@awcCth%CyZRBG#$a zC#~;WrKP@^UIZMgq1}wK=ET>!a9B2mH%V?wPTu`$Y3wX62j@ea(!j3X&Ix_$>Im}i~>M6P^W&GC-X z@P)4_&*LX_^qDoW_ftYWp=Gf&E@O?k%T9H1V8-^+(b>!+FYI5R3AV^|f|iy}+`L#qe18e}A^#IjNlcTvJH8T@ueN zD^&1n@m?Xi_i`GOYC9Y7ffH?dJ35hlR=}#7r6QzDW5?`oB8Yf{<~O zh}vScSLcdU`<{G7By`isggBp`!v_j6RIbs8&e{bdhED@szE%!QogdEqf=JNGqV4#m zI}mY}!%lfpe3~0YF39dSf0dU2JxJI1#0Wrxe^L{l-oTwtC8`JApx&vA{<$JdPr7=S zR~rdz?R0L_Usm_F8c#V=yr?d|Qff@ZnYfwhijjYuZmxhtXyyG|rWgeye9PCjOvxc=eqaJ(4;=F=J(}ByU`4G{fFP#StrdSS zUF+?wezZh064zgb(Q7!#FJr$xodt$`B8Vni; z7eb1XrTt@xi>v;*~U{*PI{YXTnHF|Ri0mLbDX=SzlyL^U}hE;)| z{j0&UvO1%45n~8MRv%j;4j_Vi@1bZGWXp}6*kybW#r<`8*Ogw=!C~kVD>LuG*abE~ zN@@Q(a>je`g4|G6XnmhR#qt-XUINbMOopK~kUW%0X5Fl4?QD6qmLaw}jv%fuZNr|^ z!WZf(fW)O~CH*zwVys&j%)sC&{|&u&5b|Q0cavwisMoQjKHlFPYMnPezZ&+p%g8*1 z6;D}y{)OHBz*N+f{tCVeJ5y8|v7A-B^wqzd=hft=vfO4wF2{Ufzl@I^bP)QX=#MF3 zO%o5GU}`8v6+jjIP)16h8O{WHzDjNEVfU{8*x&Ia(|W^s z$=2bg!Toq8IBu}oHUzs0+qpO_rSzU?^%di7^2 z9l~_L@pgUjT;rg`I%;VVkEE>lILyo}!$Qh?Zo} z>Qxu4S|oojuin6kVZH;53zrd^C`eiaV{BZVe$H#-vIHX0c7L7J3Y&`PyXsqRJe$E` zkZFx3&bb=7pY(SY5`5%vQeL#rdE{eJKQ4!=f=}q3I;OL!<3@sG2mwBia{owqa#-r! zaf7N*zc!`}mi&xKm3!%)$*Da4m@ANKD@WWPM~2#t>vtk`wos|j<>z419>na^9hN0> zHJ9CE0k#~|DCWqgk;E6tcE?m$zWUn|a~Ku@GpJ!C?`f5Y*{?$xw}ieV=PQ7yYck6U z@!aVR6FR6ot7eRU+M`6wQTW8-3H9pi%Ngo!mHi=cgKt=eII{*MGJV-boh{;RveN6? zEL5B;VuczmXbhdaW6#k_Z~N#K5DACKrJ$SJIpB~lC2zKiLTck8Iknv8hhzngblH`Z zOFo$i5c=BoyfXq+P<)E(wh)KKMSQuU6U0>F9r;8Gvx*T4##l{?+p zh?pafS9^u+;3wqxYKqu5;*FcmDrGH7f~f_W zf!*#KkrP8NSZ8m`i^l8qg1@59KQojWw8 zPu7_^f=neGk_$1_c#@XtG7qoDpDaLA8!Z6Gs~SnsaGWV%x^4x`7i)OG-OE;!8XvORe#kS z7~0p|QI!aQP;K8NQCA=>bI;ZE{vjtPQAos86rOiv0wL^0?5}kr1AXHmF0&&!SYh!g zGrATR1<~Viyu--(U5avYn;YN?q*!lc!qx@r=B)Pq8}&<9hv8a&gH$D{F1?_wOm(_coI)5&(_fauw}&N` zZd?2iv8s+ZBvB7YkZr!9^Xi<1%e>MB-Ac$=EOg7i7w^tt0`;*o{rno3fTza?6k>Qr ztDo=Tn(b$&aszzTs9utRL4P>tf^-`oOY8b|I)@H;3_W-;2IBxTiBdGq&S9Jy_xj3# za-d1gNtl`H45LsmOxlCB9r5UDQBuTsk>Q&hTRY~OWBV~q_~4H(e4jF_bLhHtc~IQY zEF!o}1{ymP%N>_CSmW;C(e#dT1Hcf%1n2)UIzz;a&kay<{18DC4{&i+Fndz6nyX>5 zTfXPgtjxL|KRuQ9nMOW5;MZIn`Qzq*L14yfS@jU}LFw)TSxu%@1uQ(dTCDoxnZs@G zn-2;h>bh8)zzNae6A=r%$Xr2ryvDZmqARm7fCbqeWbpQSyyxDnVC! zrneo~M;hJAY(YTO@O-o11Q$7mOdY*JlSmK;!VZ4rVyS%O?M85CCIPcauiG;x@M*j? zp5hnXR;f>~j)Ly~I3lON1a0OxH;UuPjEW;) z_ETE?TTroTGSYaQhbN32gfxcoiGkS(RpSKSc>0cPPTZ2 zc;iF3yAT?go58DBp8$I@N6ywg9Pa6WZ)K4Q}gbK(=jl z+eJ*a1ns}C&(v-B4Lu?<3?-~03Zi|PocYtVZy#c`*PgdTEc9V@CKvJO&L@zprr`e) zYK;w3a(13tLNcub9cpioFv}W~sn~o86Yi4|ShFpwb!gh;Y9fNv>qy^=Lz!e+v53XG zUWCw%psOPrDgzWWpF+7p_lx!fedt1LHHmQ*#$M^HL^RnUK$;Rme9zjJEv+Md7HU*; zE2a1HAi-rNR`S`T04q7WrzVT3_i0n{!1Ik_R!&H(vpd?y*H;_Y+dlE7=!|ZWipJ&L zmSKoGy~gFgxcsaiJgbmT+G)2_wnT&R>l}RMVeGz$(I&yG6aRq0Jei=RE&HZ6MdeyV8jHq6l zypGj)@6g?iaV_MLWKAAiW}6x;6G7%6?J95_do2w(z4;jg2x!2sNuhq`2)LY@UttZS zy>{nqWK_cAfw@I00Sm~S$u54xf*)H1v3(snkcjwt2ex~ot8G|2SGUmdW|czaRPRyS za`rUqj}op~>4!UNC5Tu+$tHcw_-jMCx9H+c@;_vkmP8_%(=2s01VqJ;Ts1CprRQ5$ z#GY$2)t`?+A{IF#BC+!0h$OfXnk;2Q#}k^;_^+G7VlxHhgA?vLpVszlEnCp& zh9}J?gO1d?9b2nk?SE90%##y{030Y!0<$bSBpy_&gagm^-Y+n)MDn8avWpNC7ch*B zPssJazsyxix}9Q_6%)E|vI0R+iosmnUJ1aa8QmP8jb|eSgs4_X&yUP)SgNGUx_m*L zH;dE`pTd!Mr}^vqY4^bONjI!9d1KFHj^62e*XD4bSGMS1ay*Q$c4H1u1Ni4p7%# zPa@qX68u+zimRcp{1U>wizWYYW{!vVzOO?3CSO)KA#7(=h+6civ%ds-@f4X>IGCZp zZ8pkj0(zmQeyV)gRj?>%incUA^WNCZjaPV!H*>R`u2JdFAuWwf)g{i&;LhUSE00$x zEg^fb<|pgUQ}H$-omJ)cyMJj`-e;i_Uh7L~b$0e#;+g#N4yY@g&5iX~sj-&|8<|^) zVnpC%p;ad?w_CHxNhn)uJxG^5Gm21#ypj6rRDp87E;E9x)tcJU=vCc$^Iq)Z+eJj> z$?8#lM4icoAE6QCOP2+P+Yp<|VGnkRUUht{Mf%oz@meo$0*+6grXqfzoew0LGtGgE zs40Ou{1($JX6PnmwV7Pmx4m0^It_a$7FeGsjp&{MG6vU%2rz=%#vYtzR()fC^2vUG zwN1qzyOQ+y>q3<_zd0b5(nKty%UdD2pvg~_L*&U_>j26;6~%s@lmZL@#R<4S8A{%P zg1*vxUb2IJ$SN z&K;ioK!{i?<}tJN@CN1|LCsadWe(l-bPi)^@>A)9eu|!UzB-+%Ngt5Nupp?GqqOWH z^?hT7H4P^@8RbWiC~;j9jGlgdok*7C)R9)yH(D%1cfo2ihfDKk&) z6>AIr@VP8xe$Qla;x(ydrNSouEo3v|s52y?LWNPgs@IrJ;13GVoE+WVC zH(!HqXgS(RK=eo1sBd+&il_q;=lYpCjZC8{=d2m4N4)L)oUkDbW*u&J$`oHW#uUdA z6=l!QhE8b-hPiOy$_McdmO%FMMPe*8lM*~Ie3dIEYJ!{EJ;NMBh@ z4y@#s2+Pc#7s)`m;}Qf))a4O1H?7O`C~_jzK2`01BJ{;CgCATuXU6f6Q zR)p_X`Q|3S2{ZnPLK#Hf{e%%sQXzj1+EcZW~?Ih60k!@IBP;+#N!=T5=a1kN#^i%!Yn-{^wC&b0X+ujU#P{bt}4CXq-pcXvIQwYZ7S~`DWl+><0y_5h2a;a;62y1$ z92M-s#muDA{0xWXEphNn|fYDNK0p;fcf*BZIREnSpk;|nZ&k}O9 zq(IT`!XFIlSwD}{0K7oT4=rSzMtS7TET?(~vhND?IZ7}Mzhs5ZlsYatQL-4wRJe^% ztMtbO>)0#Mvo4ec@8lUvV>mv~ph;rURg^-PJq612)~AX~$}I@-`nw3C|lT-V>t#n7o!dt6wO3H zYxvEaaxoP%|D|=T@say^Qk+{o-v&1|r`c5!oogQux%gt&WAB}_=vyATg5N#Ww-^g_ zVZ(^R`1@g|8-1<7{O8wpe9(l*Trz$J6*UZ`w7m{Zn`g?WI@3Ww*8%8cdoR<^p?z-P zX5ki?@U5?IG)$+9W@iy{I!o*0`xDgkqHhfp0*gw{7fQuL7zxa&xY3B-1w0#m-#{Ox zF!w8Ty4r%Ofk|UQFwNDu9&KYw764sSDnSG(MAj4wx0*>#z|q?}$()X9qg-gJ1qa!2 zvj5limOZr$zf#RBmluPLxCl_{e`3quY&r6|U8gm6-LO`MtO1Dn6Z_AC@GTqu*-mlr z+Z{9)|Ff$mMSMx-^~sk^%2#lg+=;PF8ZiSuc+V@nzw+*#A}HzUNoS!i2rE5(TJwb< zAoD1TT0bo;Tux6e>{4CF0znpRnhx6pHS2=Us+}uF=)zD)ZzWxCMkBLF1zpq(%t1m3 z?I>B62)fNwNPm3yd1Zc3;yqYX#qp-&g;W4ecBquD(oH@6+BDvEbzu!{@j0?K9C&u^ zfnOnhS`~ys4-5w?RG2g|0Q@wQHv)PJ{!pIp!+vL#yjjFj?^{!itU&SZ-Gy;0Nx&0& z+df?GXK#uBvJ{&6))=_m-BDWgz+%?pV$W_;WHM?2ru$UXc<-~B{CKl(L83$R;2P5W z3|or7BAqTeXRM&&FT-k{1qv0!-_}kOW^mmJh8;~c++He_YmZlf3tRI%jr@a1(<0hd z;8cr;r&_}**Ez|*uJWg7r;Z_;ZY%74f^kA$NJR?v@$zBPQO)(QU(-wq-FR^3ZLz+G zn+^bg8=n(M_|QO%Rutm>F-6SyL<|YW9XNPmMu(AeFHFcXU*dF`k!Rm0IdgC{xqDO7 zqwM8i%sMHF35l$kq#Q=RyfC}rZ%@K{gr58Ju45-#y!Gz3AT_R6)f@S0bE<*)oZq{& z6$xpx=i!ko9mgaylAbyYZ4<7`UT!Q2Sz3n!&gf!0SV^c6hDDNy?wq2G61%NUT|h zqISXDFtp8lR3#HQdbQ|!U9A`hi1|>w9LR;03){c3WHW#9wlOnf9j8DIZ!nPkfl3Pj&75tQ&9eWH(Hz zKCS7va> z47L~Urzi}N>Qclg5`jWZe?#p<^XNMIPPGhi>iwk@9{za}<7316AWU39DcxZ_!_3_P zq1V;A(UCvqYZ+;lv0~C`gxCeWO^Gd+q{a*u$Z@4_O#`~=fAOsCP^&!ty|n72il*t7 z)vE~x!^o^HzU$gj*Y7b4wJ%SGq+EaFV=dqKbWJD@d73IVHO@rh9|H4-ujcMR(PZ-Y zWe~gst4~k2u-6--1Py+ioftw>Eca%ZUkHojTsR{aOhHLnA`GVl|_`3bT_7{2U43RnLdct1%pnYJl|M z)KYCv%S|GYw-w+%CDZ#r4YO^#0H@AKA;a4dt2hvmRc)pcO@Y6ba(D?*Y1vawe)QSm zx_7YyO-!6o=?5~{^G!!89!RTy#N#7e9tTA3p<{Knza7)}kyp>jki$#d1^slXg#V00 zHLx;E2M0bqxcg8q5_xlbq*E!W((*0*>#J@SXHA+({yRTly34bh*+Id6;%!nycKADQ zCHN=zjk_{&8m;vG-(s|rJ$Nph*Xlo2c~K#=z^`ZuiZmd-jmO2DBl|GANpnTJ*m0|W zeX8T4kUr@z>68onQQ+bE9W)JF_qE5f`=vo2FokZ+^sRSkT!jzA(9Dh(ovO0FY=;zfK`gvyGmx-d{- z`?K4(t>w6tfC4|OLqSKgznXyE^6*K^H5o322U+i9-;W_qs!B%!i(!r>nK8RsW4KX5 zRE*3LxkoeT3$)jHZ+EKhC*fk_{A@q2H7-+r{_Bja7<%NRfRx$sJ+d}=kt3RrS5jg& z%Q|gCs3h+VjXiOq1RG*FvCyIE+MJy64eMTfrXw{PJdXK4o0^cl%s?In)&W<5t z7tX(o0G9((5O<^)iJQRIrur2w-LT}FLeG#fD{&?-_bYTk(hW|_=M1Ga1E2(Get_G3 zMxon$PwcCE_qNL)QDKip+uc0rd%2Sr^Zkc|jm9fT)HR)*2PSD2g)2&9aPKojh73((lyUt$*MQnjs)QZZGny_^w{#R z(@klmbB5RekE^9kpGs@BPeQlIC~$cy?_!sMDrKvBEyKwXXjlPRjLeXAuQ+bn*}k&l z9(T6OXX@}GlcB-qWW9y~DMwC-o|~dkk5hXxzv7>Cs1e`D#o=B_0)OLbUZ)6Ct#(p4 zI~lDW9nL>u=AuUTz^cBcK84PUn?*9RAPm}-!c*3k_R5;c;&Z|)<9=#OPcz99-f4`S_|O#U2*dkU?hZ=q0<3B(f*!3B?~zH+q!CX-F=| zCkolSEm?Xy@-G!G@f~|;Sw}g3lc1k}Z4SLyM)-ajT3=27>=^U^;xfl^st{ucP!u1K zyAqo0&Kz_rvIhX&aIf#Tr<5>w?hNUryeKbvvx8Y)q<7NZVldpb0v|(~b1JwU)4-A) z18qx4$~h`TI=}A$1;nlO3|;@+5XEh^Mx?g93}oFSRL3Z(gqGxuVKQdu<;u#_M6(I& zCxm8?IL7Mc^i*O(QR}V>A1MrRmH_NXIYZ3V<;^ju%ka>u`o0JxK>J0r@4)*dif6-c z)Q6%OAhO`Ee^)JX$u%W>bko=x64yJKT1Pe$0LquZZw!*$?oi%}otH*$@p$4Hf#f>^ zNSxafb)7icGTP>vFFmXIH~`0dUfexXd}@$Y&$<5UB&d>it1y^@D^|syoN9}*d(-w> z>zjlnQsKbh(OnuUX2^2YNi7q`jC6hYwy1@sNAAqLRU}+6zpVG;w1wvoC!={FYq0Lk zlIO+jUYmgnv4Eb|smAB)jLIK|aC`Hga7+4b&;Kiyxiv_xz>MS5j*HSi$C^R~kDwm^ zd$XJY)7jI4#IQx$fy6hXNB5v;|Fh7=BWimU6Y$vs?7GX%wHr4X-42l-d9PWRk(;BO zawpez@Ma_3zWXo!v*oC|X&w1JvwdY98zgVqdjU_iE>3Ly-`z>Wyo=~_C;q)wFOOmo zS@R(l_5%F*Nf;_+T-^;?-ESj2nWxm$8MIolm#eU@% z08TnD-$YTn0ZiMmevWT>T0y*X6<=5Q)ZYTL#r&X-Pz({N7o0ZAz^jjh+L{{P$%$kGhB%8I8wc|D1ev}f^4&cER?t|*Zb zSVyu^RFZGF3Uk2^J2O)4=T5UzjMk{ZE`Z0do2ys5{?jYGiEFK2MV_u$jh@t_I)4Q^ zf%j%F_R+Sdb_<%`%O7)Zj*)1Bl|+*hC>Z1AEEFMBADqdAVL^`RZiOfoIUu!CJ3D)$ z9j(TjNf7zso30$1qiZm~O{J20+Mmy`$W*kqU_m!`NP2`R1h{&HDO%oML31q_g>db& zZwTE=bF{{mSd*sTOY31s!CS66+qvi|D_#}0Mch_(>T?Xjm z5~;kGV!ye9bJ>{OubP=69ix#&nwDU4ULh>i9<2xzo9WA}=}>^@43&#~{9_Zq!0xZn z4-ym^rJ^1;2B%AoHsGSK={va(Z{mX*_P^I)Cc3tGQS}hrPQ;HRtG8~^b!RY#%ng(l zUTt&g%t+=JZj+_uIWpZf6F+#z7rGDkF6oNF98L-c4OQiX;`ye}Z<4dzDy)(f2g;jm z)F<_7E;HP8-~6A=Q9=lk=D1{clunXDE>%9n1lI3})yVx?L<}E2f)y&)@&`9V*tRxD zr12=IPGF}LRdZNNar?zZ1ZJ-7P1`PbX{XA*)!T}`*7jHomdb#^>CXElnBlfgRS4spvMF8P&p@!lXQ)YEUPQ)MXJ_u`RYznMDD-Ejc6z!) zN1$bK)LP14pfZTTmKZ0fo_*xC^cz`mUz5&_(M5w{x13*n>hr#W(WZ{DGv@aHkK3UQ z#pXiUSMjNFCLz`55prS61e=8+<>#=;!yFFkYqmC>tIJ(BnS+3!-WR$KO<%e$r&Vp^ zRHYH5*#$`4}v!cQez$P>xK|FhhZ z4hRwAuBA3Wef!q%K1x*B!0V*)^XQ(u$)G;}nReFI>74N|p&Z6-NhAV^vPTXd*?_}m zi`gGx6ugMfbe}rggLC%g!&6ll|1%`gS?+bPMYJ!0zVi3df{ievB?NXHhz?C>sun*Q z5W(NEe%$1QuiE9BN<-M{)V>V+c;ZIJfvYV;!@(A^4m(Qb=1r*By#~GnawpznfvKPf z5mEeX#_E7`C^=)5>;JeAW<*!exkzUW_C;lG;FEX58i?Xz`<&bi1}-fJ6-Q&NV}Gg1 zXSXt^!Y89ZNrow)Q|i52(db> zbDq44APyTg6`#Z{228ut-Z~|b=EuTlznA3SJEy;~j3aKT9B7IiZA|7_s`CpG8iaL6 zQ+`F9T1M+1cD=vxpB+XgrU~MjDYD(i=sOxkDy(4|Occ8osS5{ZPdY4C|$ zQRG@0neC?#bYmSHtnvYrVM_u&a(CE9I=q0yD>O$F4FJN@av%T1S!;8KX|YP=nV zY&OMYx@B_8Lh+K5Ea%+x!J`Tm23-F?cft1dS)lD#-s{xDP&+sb>6*X`(&vj@%WRAj zZ#phHIXIm1R;4*AY%Q5n$A?(pigPW`?uvnXCA&Df8VrO1umA&v7Kg_hSqsZyr^8*! zGs$u>8XM2|tc%0e74hvHV|&4D&mEWvw>E6a?i%}=dNW;FckljbeA+siw9)4XgLy9< z^b9q-GHJ>}Q(+k)^1W2g75SukhE1XN&=gBd5i9Po$Kw3kzeV%8TbB8PVV%7Z^L(7L4`@=3E zu`R3QdZMR`l4ZtNC*8W-@FBx9@G&s?-ybJIp(^DoZ>%5ppF#9xCJ>7%tF1koNN{~n z7#1PE6F(ZZjNT;bmyL>=A&LHHEXcPQp56R=nb^GT;?agTvQsgWKQufpojOUhpErQ0^9zgGF4i{pHCBdZYL09tHRm6zBE-JpaX6 z#8(qT9Qky3b0&X&V|xF}eCUJc_$3iQ=)-vvB zZtfPRq1E;4Bl{UVaAqjl?GNUF#^|!*-nn=T5phNObp|xLKg2)iTGmw2J-MH&x0odD zS0*PqTihAv+GT<9nV*kHjUFoQ`|&g<3s*2XacpW6?b~XE^>RxUgI#r7*_n=gMY`IK z23Wcx=8u!J6-?JkaHDIR>#mhH>1+M_0&e{8%Xj7Fv2&Fq;l%3XB9tS7+UH((TJ+j9 zwV-E<`_Nn6XpYJ`u{x%aNEsPjSoMdE@jpoj6Q29UdFfiI{|3Ekm^rJUtW@>7NR?l2V{YO#>#Yi7V zq@cx5^DrG7rLo1)PRT^=trWSU#l_jcKpFn<^%uHB&h4ZRF z1@X;z%y{u=8!zmzL|lb4W_AXXL+ojJZTu6{99hW8ma$rl+B&d;7S_Ml@z->YxZ6}m zy!mligp`Zqy(B>wbP5dLll&MSRYxcw~T1 zF)WT@bA5b8#W2>Yp*1F_l(2}2b!1__ko0J2EWYxiNn$QeuEF2axVO~yTMUPRL0Mbd z#*A)oaK|ViMs0NL7(qU&$c)b6`AW%XSDTz!(#pD99CDK&u^r= z0Q;`0a_N?R|L9tN>8~Mgu%u$UEvNq@Ym~dwzkLD!?ztB+VK zYuHfdt0$FF$;2No%0cufoH2gd+vc3LHRipdLMfy?Hxz%J#TZn|Y{LFj#`tWdMvv#M|Si?&Dc_G7>whT0;xlt!fx8km6g>7M- zu}0$f4iEXw*p)5x@je@4m(N^*bSF{QMM+;`o7a%k{4^EnvubiE_DAmU3O+x zD0^g{)q59Nh>Qnj}c?H24RBQ)`?2t$DPGuR-vg5}-eNRzg5~?TG zxiJ1r8r2@J;nR(KuxZjWRxtZ`0e23BF1hmp#kzj{+$HRz;`^;=PG>%OY40s%JI8%${8+cm2R^UGQ5*Xxf*yHKNew%@Ir%&e~0OY57WioXwS0 z)%KfBE1Df*Lwloo4UWcc@p4K>w4gM;`XGna&tb19C} zlc8%Xd|hO4EPEhr{M(QIJXEiJsnJB>hpM8Rn{2-LG~vKPyRo4ZW}wu(MSRs%t1p(t zEeFIAd#e&4rH({cbnU*-YM;c($wmbA5BgL_uvt8M#;*NyDVW%DRZ8IZE}I2s__FN$ z-ksC)kuKu5l+*0h6`T{}Pf!uD*S`pVF!wdb!%VIv@4})zVEB{~JF1%)Y%ag?OqY1p zF@%>%xO!7SQqL*eu`+%mCgW&&fBn!Z18l&&x${I6yx*4_O%A!eb@>VQ9=fr>>Gr%= zdQdy8R)>2X;i9cVmG0r9;1k zPZIl;y)OW(ko(vYc&z!rH1%BPvp2@^V=`R=N#K>?!U%_Q=7zEq^-B?($ALTa$^AAm;Sjw z?k)U|)q77gu5e~EQ{0eeiGZt%bMMg} z=dIn>nz?F0ZOYhcPIiZy|9k<4;z0FBW2*KB7)ZO6m^M0cHx1!WRSqtH@k{mg4!p*B z1%(>eF{q!@Cxny@4d!AfYE6)5$-}prfT*Y29?IfI4q=Fi@z0~qYl9=9NwEdvr?RM- zSLOXw7N%|dn{ofKBCnOlN0J@x1O zQMv^Qm!_wdw}M_|4OyO|{FroZI{Dzy(p&o$oW)S6Q(pfFm!aP&j^aqynyj@J*8(XK zxWZNLZN<} z;dU;|Rgl85H7>oMb^n8!cFrp&cHJn7s}_83UVj|@3oiKRUl;UJlDXd+Y-_2N=+H_g zIUPJ2bK?ytV8z<-0hIf}JAXXjPgSDzxz)UvPJSzgxoiif#?YN7J;eP`sBrfkr5G3< z2HTM=^kmg~C{lU0XF*Lhel77J_yozF3mpZ2;%~C_MAFWqPV*swh3tWD&iBU;8PB0m zCwHvC;e~ysV03>#|IRbfe>IYED9e~18Py(2r+)SZ6*E(!-FN>0I3lut{pwfD)KNprD&4gL2-$JC z3OPdVK9qaQPU7T{fyp@ZU}Zn$M{xUGp{7*m>RqUxecP9)VA?{rW&}^)9Qu_lt!veR zkKn@P4{~g4G0VBc(0NO<0pqvTXxWoNf0@cO3XbO;KMtOmY1{6O_9HXqEt_xF+9~1X zzNx3!Lh9np`rvvpuX+(yTlFGwVh^fHeMe>@vPQ1W_KBJ``j_7gVAKoH20UKk`*!`~ zh_^LB;Y4S2y)eT4R*ro7ShnD)WCl-Y{m}1dZr%O9(3Q-4L2>exA9w%dR=)D={w=?? zVbDm1-;YJ|{qsj6#;P;hSN2bs4&kW+MsxDDBOES%j=n%l+l#6S+lfRX*F%{tW8*c- z%XY^Oql1p6ES&%;Wba%lAk<9G#b&eFIR44P&>@Fo=omAl8xkh!d2383Q7GJwvkX|V zQym0nGE_n}&j)FU0u7C)_)U0}qdOkunj{Bf(!dq|>hts(yIS~|zb}T=8wI(bmRjbjtd7`AZ=01jUR}c7ek75-+{)Lt(8J7LI zi{w9@AVYJxWvgKiItJNa3v>tnE^MEret7+h@^e`Y@1>uae&qbyMs{aEs%q(vYB&p) za}byJqTE;S&fF!YSL%oa?5>;{Dy~gxKDkgQj6%^vyBE?`x*`OpzUH@WyE+YN%s;Rt z)e1tsyTx)0sowoCNnE}~k3y0EzV+EjWN8wDL2<7-%kU(-P<8voD-?StOy}-d{A31| zc>OyL!_TmXwMms~)hoy!t*uNT0oFblevT+O`7N@DZ=nvtCjB2>a~>8a1rPOg$Yu)^ z^BpM2So|)1>@u{Y_7q9cYpC$Tg$QquX{KC!& z$C%0tL2m8VcJO5Q7s#5=3nfW&_q?-*$IQgXe+;v(k)@u2KuF!h{J3+gyXog7Z@ zXsGu{?{e7mbj>iUyZ2D!5r|7r&sHV=DS%qDR%sdQN?F>^7={@WL#v$3%+hJwz zx9NqmHIkDN@P<;q6>;A*9RI_Dq24wwF9__hD5scH1BH2)WL$XQ(XQ=0+VGQt$+(^e zHd-Gf${lx4jHb5FvZY zcxpcckuOCtLvJao6lcF=^4%o|M!LxJKSFtUMo*Ks+25F&D;o8hhoj?!u@#iwp;Lo>hkg*(N*1F7(-WS5{zaZ`RzV} z3jg`98!SHPcUnj~_;6$(i}Pe0jIUS==eeS_lW*cvcW*57m6~W@J1f^_{ey4W} zgx%Z!y3nOu-qE&~`xXKZJ}iJf&*`oJCb3&6)bX8+_*63KO^@Yyg~_54sg6H(wu3jN z-HmcTu>GbjkEaUmXCL;Qww2pBA?0tn8Mf^T=xfrLwVK|dcje$!Tq~5EARGKwHF@PU zJ6*oRlK9UG7P6k3(+_qZKwY8O(JIN|*`uRFyC|z1wBjDHi%FwUiVEA#hBqQosEm|O z`rN+hJhiHZ@#vO84Rc@mQVI^&B^H`9;oAyf$iv?T(fd>cCi4*Vth`CT8qCQTeNg`I zUu_@Lje(jNYlha{&H*0miy{)`n2ADB4w-v@S_wX$Y0P_e z@*%3~yedmPl!^4p(Dbi1_ za+hxoLjb%654U^!;bQA;(z0GaolvDXkTvi#H7F+-_2VO#qNY~Ct$$s~kS9W;j}%~z z7`T}iIwb7Xf zHM_D-v>j>(&tSik10R{O=(G0 z_6D7!A9r`|{j#`d*;>8OgLDajYhOx9u6GPvuiDN7FuJH=hy@<<3p@v_`azQf*#y1* z&t*Wp=xOvQxPnkWnxLt2>ruqoS9qALorif*pF1_jad|1IIS|!RERe_vS*w^M%wsX^ zc&LZUWkEUeIg}RgleLB3&zYJ$)vVxZX8%si`+(p?0{UbX7iNq}Nx7gj%7m1A%s|&L0Wz# zfiq@FGvFs1x^cq&uK(P3#(zCn%i|0)W0Jf6))FTW;?;cz#n#l4MI9#sd4vDo)~za7 z^bPELVmu>w&k#RcgOnF3K%((O>Tf@hLGlt;c})(cO4w{)Bj)6QK`=ce4u4-{+xXp~ zi#~Megu4@H-_Q~~rQlA!c4YqJfXl+MgV!sz1pp!I$L+r_&i7_!BIss{qulFjoPBv} z+xCEuy*$7;z_=^TUKeCRfU}YNeKw4w<~C(d*7T}}{T+(K;> z%Wv&goLig>tt{f+aGPs8w|Q|UlQ{dA)xz$B*BdO_Rt&>~stutozDmYyer*?OI5(?5=Z@Nc zN&6C3mfMr&4oaER>#2`P!^t=a(2(dqeyQq)vSGvMhST<=0K{-vJeou7f1Y-atJDi=-M z-1N;*+o3I#dcG0>?!>=CiR3#cEj(e3X`6I)G>_b7H?|s2H*ojYZkJms0#xPfZAIklS5t1*^G z(L?+r9FSkSa(F=U`CoG~R+XE{2nyG0%~MouMthDO2*IlR>{ArN-hrGnzkH#Tu`;E->`)8GK zT0b%~K$O3oC$^CDM*z$zW>bAUrSWLWD^U?|tV#3_V93WfY@)c!vzNo4ZxR!g?JQ|H zWc6QuJl~>xgeXm6vL(DU&-mvkPW_fvul5BS)Rsx`!|bMhxuyd;&&K?cO~+~)ww^=R z&cmybaPdBpxnr8h(1Xu>GWvzTYkVo^<(AV3ZM9VT6Li+e9jhiZ#|c_rgQ|v5n0N0V zhX2J?aT_LMG_AfO%_}Lo%TCmy-?-(1+Oy!jtTVxUIaRQ#gRwuZML7{3Doy0O-nLXGlt2|0P zai4ObWtQPLNWD8@`HeySTm^sO$lxGeov}}0sw&LiJ!nkh)w7E(8s0gu zEv6M@vPYGk_=aNF#rzx78%-L~pNjch8ir!dU{;$lO3{hM>A(ES7AiTan8$2>rnHP- zD@d&>W1r${_PAS0>U9;&{r@F=K+%!dBS{d1SUh|0*iU`70=O&mkMmodtFVzg(_dwa z{asm8Kc`xBhIuIwA3p98E5B81P^OjG6{<8^C=zGP%(kku0*bM+p{g*I{F&4|jARZ? z^{A*Fot0nnZl7J~bYDw#YBl=Df$2^Ny^&Jnu&0Zg%s^gqTD}Hd0@1SVW2UBWei$e| z{>c9oI?S|)u)9x)@Z1g8zxb^uVYdC3kkKu_HXij^xpvPS!JOL+bno2>6JCa;}VI(yl~#(^d2{IQeFVt;Oa=Z7o&rc=mq z>HsQ^Rbp+<%Sd*A{DdgxBwHK(B>4NRNR;rOe?LF`|e_6gFNLJ-|FKw-t1{JcDVnw?x)QIgV;frE!eE zvQ0`@JZ=bVQIO$(Aj6@a8k>F6frC)W(ApkYVf{{i-o(UlgkB}A7G#XmM>)>uIjCfK z9Dn>w1#6sLy+EdA*+s0Fdw04h|6qM7*@)vI0nJ9PCL~{K6nBYBH#>A)_85ct#)s{0 zo)EQ{Lr(hiFd@gKT07@Q@+j~DjDtqW-_^)&GwvYx*xI+@B#@TB>%6%@ z!j?jQsGVu)pT?qFI z+N0qPwGFA(q&Xht=)k3Kl?4QZ8sZlMKW|@k?&32?u%nwk-tP%%!;@tCDQRPV+dF$KCi2*JG+wio`T|DWh3%zp)l zj39bBC9&kJrZIO#3$EjiSiSwtZB_;Z%%v7NzxWY}an~^0d=BFm@X#1Gn)(ri|(&qmE~ONR)KEh5Y)t35&5bo>*l6HnZ`!0)uxxrP|qyeWB6B+H!(iC!ixgqKqeKB)7I>VZ2r{5)Ob0B-;X8w?K)dSRfq^~9lNbf zREO1Yf&>02=d1iw=|Xk=ZCNuUw1l|4>Fz_{*5WXc-`uEj-i>hl?aLAIh~)^OFYWC; zZjB%R*XS%9jHiIM4icgD@wIO@R3}(R%x>gV%Th~eZ#e7b2Jqg>i&bRBno|B6^&YAZ zi8GGxp%p|}%BtIAyISn)U+b|MViDy;IujzdZT@j78HX^C2zX<)|CM2=FgCDv4xmzM zdExGDP9|Yo{mtL#dB-?WwEF5o6L=5I>}$NJWIQ&TY$}8->h_-}*bgVLnxWO44Hit1 zlWG7$i$)Sl?5KRZP8@z+O}!kROY{u~%=|j-QrN#14bVZ{+caQUiA@R1aczx!{=AqR z{r|iu)_sa)d1%&VjPPL$UfUVdh9}xwH_H0DwJx5D9rWP8cd%bi8Xmeqq|2onCt_;X z%u~}m-U+%l$kJKa9i?1_+7Djs84_SWy4Lw~SMb*Pib)4alJlHjAC8otR z)%?Q$dAA2#V>pd@YuwhjO6^8V{~;l0>h**C?-#(x)Y4#b7Kk*ua8U#Yv#;$tJ&RY; ziO-4ffqLSWT+Rt7tn~Q1SL)l8Yn%D~1$htGMg4Wr;I3weF(VxMl8@;H6x%63^mkPV z(kY_q`22s8gX+3w3s4#2GJv=`a}>NkPCGhCM11s))VTa zk(?qTX~%1>x=GtC1Yo#%t3e#7cQ#(CM1&Y@{cDsZ<5&n5z?LU?QUa3!PSS z47I)=2dnlGEM1U4ykN`8x=hTzJJV*T0psb2(HoC8j z48L@LpQv-+mx+bN&6g|RIuy7>zHxeK^ZlNJ(U=1)QRo{qX7SXH{P&^rsNsK>@X6|_ z>$Il~%}Tt3MiWQrV=z5Qh&w4#UO|P#gIzvH7}e9&1Do zEi1d4Hf-%wm0<15V%o&j2_hz@2W(6a>rh+YR7%93**>e`dDTU@yz^eqC7n}w?oHC?0($GOOtF2cF?1!OKb`kX191sx7>WoS~UfU`IXTlloWzk#A> zfq3uPEvEm$k{JR57c&(AvI_niSxqwI$NM5SF8$9>K%2p%f8kpCNJG%~^!^k#Q>@q% z@R4oajv4h%IaUxx7T9W{36`oFMRz*$Tdn=@V^Gs3XyoR$#7~1 z{;A35e_;2&`qIdby)RAz}*P;it68r&H3m&LZ3^jQrD3oP}K;=RbdWiWLIej-53=dqslh zrvv_f*pB}Y5^tsY`z=g)|DxR9g#fCNh3a0e-O&2~L+Sk0(K1J9@lE~fUqK}j!v68E z{-Vi~=I*~e-IEVVAAL8sHa2Qe@!J7}h1~xF=fJif)w!AF*``e@q>mN=i zbJ`=-Y603yO4RS(oV`i%#U()9^}+L+e^5q$O>Hn}Q+##S!eai=d#LRP(3QFXAx7R`(A*pJXJ~!F9tT^*6p>n_%3FG#X8 zo}2O`AKcHk`5#sEbT%(ffxfB5se33tNEZTP^{N$lsLcUt8j7@ylLNn=xtuI`sOrPAoP< z;a@)i9Vzgtr82Kv@;O>`q$AB&iFe2=pwr-Q)sULEC&sI%L3Td=VxY|N4cxuQ+>}HL zi2B*TMv>7Kr(J&VKWN@%FCbF$FTTqKxiu@!IdS7E4@q4D2qDO~cmBmD53=9!^Hpp- zC5Vkkq*0WnDJG0@;!&6SVGp`G@`xgm9eXp9QZO>sv2S zk|V!kqzBtR;wWdlUO!en&j6Y$g#R8(L~B@HkqV&U0Zpz8h^{%G?`OFK2bqY+c&%CI z?=-tiz5Yw-9SY)pDT570egY(*$>#6Hq8GBI(qCC`59fsF*9(;hkLN*uk-rld>`Rtd ze(O{dQp zT~5``h*K^P$maj)1p}F!=nOp#3#GZKv5uWn+_U^75$N1)8kgc#4(~ZuhwajJvnH3Z zNRXvx9oqPJp?;e|f_G+nHp3X8Ko_I_4_E>6E&MU-&A(#CvX{p=wu_D8@}E~gIZnqh za`=r{yZ@N=!)tu+fOlZ%=w_<@))R})l0*L7`@eg`O#4)*wYaIU?%HoMDPSf@5}_Q} zkL`NLO!UT}kBUPT_>{Ds(;^Y|UvsnKD zM3`+!!91y+7|CzS{>}t$c)(h8r4{t~NoEl0IAnLj`q+Tzq30WMxIqfBUCDPeA{i&(YSX1Mt+y1OoDeuR15A8hI!lv4S<4h9*IcM|H5 z=apY9a+C?|FWyef$FA$2DZ#IR1G0C0`-=u!&Ix(8O@kfw9r0}Tp_3V>932US;cu;4 zom$D*ZxLJS*x=sQdh1}?S1V#Id){c+TFE2K8VGt~97XuyhJAq~?yr{WeC{*HbSZ^X2wbORe*|+``gSuSWIl5ceY* z_i_sS=UokL@4R^Lyx;3|Z=m$aNbJEGTE$dkI1^w?&*8Oq&KjD}v9y9wq2$k49^ZF#=(&9+E5 zJU8uZAak-Bg(Xb8S^KT0P1M-wdmi6t*xiXYg+*-0!M^!<)ZtK16LjtuafcY>Q%vTq z*vCdo9>H}B%Db%O1=$x)iL<^9yvX;-YIJnM0k^fQS?D-CxFcJ<OQLNMHX11C zK0h*7EFj7CmNdG`akr&l-qzJS_b{6D&!bZsCHhX%xRU+J&dLp##4Pw!qT3d(CtG;0 z7IZG3wy;Kn0rrEWYf|-nWlcNQN0KLJZn(qD|7HL5B-mCKV0RsyU@~es%r$J@UCc}d z%{YJ+pR2ZKWK7rz+uZ2}YqY~Y%DY2*H`Bw*(F8yF%L<-;oayb+K`|*{Jm~JE&?7Us z50-*ONEYI+CZW!oU~lr zt*<~CT7R5Iqr*2NZ!;7F5)=jfSGMRPJouyPcbA(=e1#iD9G{FIPGFyH!|X^RrcEh+ zZ(e=SOnC*q5l_hsZfOQvXc|9f3jNW11tDSLmrW`sc-rXwv7tapBxm-|Bw>GR^iq>{ z`6_M(b#=C3ee?BB>Z`(1Z!@XZ>oJDKg@incgiYX%b>B0wnyQ6?CG zdiPZ;QOUhO(#}_)+Ql;CYtEOS-7e(dJLjNQnYDiteoS522i(PQZ(YjNPRVx^OLQP& z!)Ui*mLw5BZquhzl^EmTUaNhD-z4CKk<{q^beMG&gAI0i@<7gOoK;?s^Xj`x(6T^5 z6^ttF-6XtEd!BO5X2Ybx{`!+)FeK)7u?&V8X+TH35!%hURy$tYB|o=gp-N!2y;5ft11=)W7MBwSm&Ec%#I3QZPX$?f?K(Wv zuJX=vKmtAML=XxmWnok7hxr|w>0)_|K}@qX7GMUH^~l4RGaCY#g>7XE4=xuSsk=TN z$eIQO>i?h^QTIGC*7Xuv=q?9Y91gdgNm7Ge0QOTvQ77NL4vEZpP~B; zru(Yp*Jbv*Bt(C0fE#x)oRo=FaZ45^6?Alz+Elh>U5 zW||`f44QA3w+Np98-7f zjFr)4B~+E~0a&5qf1p8lE+Sim2T7CIV0qvxM#0NcS%EWwyafnmE}$kXENp)5z;$i; zM@>3(P&PT5r?Fq<`WUaC$84!nR2Lep{d4HvpQ~Itz5VA-XK;E@F@SKK!+Sc9jmv`J zdw33$S3SY&G_kL__KYu(L^A?sd1}Nhhd$G78*Qq6yp<^DZ3XI2WtkSMzKrS>EzhPu zgTv+V*OgL_4@m3Yx8>0}b$Y)*^_%B!!kpYI=}IvRJj366nSY3z#+n_xa2f2*<69y- z1LOy1R%6AG`le^QHO-!!RqB=>p$Dl~Y`wV~2?Lw5dUCW7l2c&fZbCl(DrRIv}@rc$)kZd&EisdT<+dIIncIZ>cwP*JU0h z&_wew(vXb;@<$Y$HX!QiT$B5(^sGG!4m+4-s71T3UpAvK@WN|=dOl*7J<0C3F2t+c z%{6AzV)HyF3bYc*2sat~1s+C#zHO)8DZJ`r!Uvz=eEzp*_Ca(qS#$2Xn!Y{PXE)Vv z?1){a(2*JrF4iyF@R9s_+giuLQ)->GZTGL-(EBoy$g_T8%pB=>1!KI~BvryQ5+d=P z02}dK)IHh`U~P>(mUe)@vJN{;V#(U*uUnZ3Dm_`oh18-&rxGEKY3>VKL}yfG&(-MJ z;)P1cA8F7v0$?I`U33_PQo`NXhK|K+>wxwi&lztvYMBf_!*R;^w=ote&+Zbnyu<>{WXe6olW7LA7A`D`gxO4_>nduq=PHO%x|zjtni zV0#)?TbE+qL_Ha!NI7ld|CwK1b;nKzGyFBuI>hziCFJkY;ehIbh05vsGBx8#I7!G| zl2b=b|5>(y9K?k09he08QtO@ctz+L2tEoPX6LZ-*%YC+0nG03j1p|)v(hp|j-UeqO zL5UCIdQ0{Sx;~+L9(@+XJ27E!ZV*r%KFZRmlx2w~wpB}gy+UWxh`l_huf{-@Y zTM@d(HkNKJHUTCm&u@GILuBL}!iLuP?XZ&e+Zu`}yB}Myq?J=iNKmxK67Ex*>S+97 zl4&8zm=To{aN1f56C>$8yB>Ag+VEuI#fn*F1K&#DKw^i`x?|J!Pt;b+{#OZT;QEC%pzr|VfP?Yz9!*L8=Li)#>B&R0^l#q|jo=!aRZSNMS zKqT&r+rUR^pD^kh=WeMb)vXYdAQ2ul8PS(nH;Rtf3gy1?avpy8HM2$oK0Aipyxa0# z`h@JzJU4CLzqsSb*nQO0-PCh*#$A8>c`oGv=h@Edx6a*q_c}W}kh1&hH3=0TwC8Y+ zh7XrR)`6m+5Bk7=DariY#L$pJ43{^TNTzqQc}tGU)J+jIcPz5^F1o4rV0bqnjki4d ztDkVwW@%O*{G5F=x)ASC+hS~T5FR{qzw*2wWa%wvA*SVd!qaueU;7jLJQYM-nNn!0GJL-Z2o0+g^NJLesl9h7BFd+=w?i(m1Og z1-{^LQpFgT@kI2vqjs5TPh}y2(qxoZLFVFcVP~!I9P=X4;^dN1Iox_NC~P;i(shZq z$BeoZE?uX1Fhg{Vp0&}mO<=oKOaId8mEO$4;BJ|Dhq*!ukwLrmR+ig($VO&iKZR$H zOAsV5rNdy-@>pNbUI zP(@mTtQ|^0T!<6PM-`jUsM_G90-|!mYW6f zaXA}vN$>JIwJV!}TMx|pQe=AGMcwnjdaxplpnA($BNqBlb*);4Le}8*p&j8`LCO`L zwXyS)+B{S(sm^D>o!`&bTMTZflo(!~mXiO(2_;yN&@~izD2iKK>lTEsNWkXm%E2cU zuSifb~u=1=N%KQ z^emIuyA=qWs^%m$JvO?1kNgAT!rWWQ$cbF3rQnDrN`L9DLXxBb4g$vb)vKnd>`NMbR?^D2@=_bZjIB~d6K<9DVlQOs7(La#^?AFx7rqQ zqIsEHfJJg8MMhZ+m1&~hJu@Z9p_QIJ1)4Txz5Ds2`W|_38<3Cn`>ariThFn?vcvJ3 zto*l+ZQWEBX>$UT-HYLLcz1ORYPkYc4JdP6`peDuvnCoc;&a6Nd*<5+#J zcTQeE8Hx!{1B(LHcon~g=R@X+qDF5RbrKj^Bzi7gDm`U}n=NQ-IOYWCuX}r|!<82r{(Xu(2 zt#~mreqkr_0`;kHI%Vq=Vn`eEe^tTj!uoPmNW>pbsS3u$I%T)s2GL9j&kJ zXNK`QHlRLJqS>jleH#OoM^8))777->EmTtt+O~vVZIwN$J(=8UUY=TF%~ zlN(%(($ClfhF+qlb=@6tV;+V7uI{j9=#)?tItv!Rz1@K#H}k>Vou_pf%F|CO;%sU5 zYtBsv9Ygsvy;rLYT%tG9M_xC|`gE@ISRc>kyXq`as5p@uk&#$?S!If6W00TdaYXEi zF(2_F@C)u=MOPsrm5jEgDbKsqm{7My+LV9zGV>)nJy5D~$%DEjqCZsGkL4s+|(1Wj>w6 zw%3b1??1_JcZF=gRav>H#1?q$sUQ@QcvF2ptY)=0_2hKFX=2E0v^zM12jo^nR3l0AcS&mkTn;usS+*JfE! zU+8fNgq7U0#|>(G;OqX8&d-7_wX&BZ0AqgXg@vs};k``x?Ren*yoIm4Uv^+L56vJ) zLyQwJ@4c;X$F=X*A(y#9zM-!OSk(`?Qrc~I_0ML7Vk~rbxVei6v`~(QwZ@ny_2-mH zTDv1E|1y;b5(uN-vi#1gZ}@LS-a<~n^AMB$C(hwozL&gzJrl_lf1(eGR@^Y&9aIgb zBk4^NwkB+N!VGr{_qfm3J|fe5Kl<(BDI zrJ`f9k#69bFT#=+^f3dw(bHEl@Yt!#qWuFgSZdXS_`1=uFy?&zuh^)5kSbX3xN;oj zHk1+g)fYtl5EK8M7~?c*5f6;=KJ<97$)oBkTj0}B8P=rvx}RU*L}Y;{;Fbo}T1>3d zRmgMgFRv-KK{A$-u|j3a2jdV6d>tH5VUJq`ioG0cgJ|5@A#)k^-4&8)cDC0H8@I9o zx5gYyW$pUaIlvj3URpMHp{B0z%(=2_Xid3^wy@3f;6k@^#Yd{8tZ=w&7)7Ch)j>iD%6Cqr=Mk&nzI5CNRs%h)QqSWOm{ zsYQ35wK-qZtnmDW)}nfmaYsXU$v?iP$(-=(7IK^XgQbdN)AK`Gz{s`f&9UXn#_yNx z`5u7XsH79H8uLMOr|s;tYi;G!feAY;N#Ij3jqh(&XJyS)Ou8`Y3|tyv0gRNOj5*(G z;(qiDIH6HI> z*n<*^jQsR)m5(E%?wK&;Gj25{$tk{x^|mnC+*y%&*?29(9`rv-P zT#usV=L)(=F`>U4C?*+M){5DS&g{T1*S-r_Z429Vz4Tl17y5K8wKvNTU%J0;f{rhf zat_y=ToFw=jn>!Tk=uz45dVZkZ-$0!k_Z%ekcWoq(+KGZKNHvcFY?iYWzcaaUu$fW zp%NQ~L8Ikp=`Z}ZHJc>^%HhX%vP$@ccjGK4$|+-$Hj9itK3N=ns}?=^S}u6LA>s9q z;BS6}t2`COZi;gPE?>*(fhrA7oNkN+2aC8Lr@G^y(jETGy|!`xqdrV%%5<|C%syMe z-B70PdkVxF7{1AKeHb{#?_>C8wv@IhVNr0m;+@=Ae|1LO7s@gZWLWAqpBtUZq8*(^ z{{6QCW)FY+DzT9!NugZ&LY=mDXBh|A?h_PRUkb04rH8CWAJsnp#v*i`m1Oscb5G08 zO_Q^&XZEgK%CEi@CvbM{0To4f^38#aN1}5)kUC)yUM|h_qbAR^$F}h={-#w|tWDf8 zG5D%oFDwTKZ7 z4&_3o%3jgfe?m2zd$e@wec^;Iw5r5DNTErtmn6RjitG6=v*DaEiz(K3U7p=ENGT5V z<}NyUrYVKH1H2adJ&42;D((qv@}(-HIxU`;*=_d{9?=u%5%XWk%@xA2kv- zYw1(_C==6%l(ayZFkZ_qu^zA`utP%YCnW#Lf1=bM1jszHPYRjxE}G`rT`Vp#xH-}! zzJUg=5xcMzN{TXV({wc@!n$TqV)G56?Eveb`FR$TPFO}DOY&gWGz0; zTOaq;%Hgz2b07VUqHADSrK*!FmnMIC@+^IzXQnl{7a_f2>CKN5qAU}LKC0$8=B9r% zU$wa~`qP~}rS@fiR_oTnQKVGF`6VuW74*ArMWXjCRGz8CvFxWofXiM_nV%ntC?Mh% z3d}6RQR$*+C9zt@xR_{Ki}VKRr#|3&dUdqXV=L}96txZ5*u*>6Tr%|!lqZ>wrEhde zD9%y4)&g@-%$M~#8e)l*A#Obx-}rbSKE5M}83Y4V>BX}nQX~(2w#xMc)Tt~kmn?7% zxs)de8Xiw=+X4D_{QaW0h}Uh(=m(-5=d#`_VAi!q9r=Y9$%33ftxfwems<2VGmz(; z{TuPky$#0d#=SgSS zh4H#r#DxqU+xv)?BrLooU3}y|>tV0bCx5bn?DE8IWtnT2>10ua zs|NZ~u7;=HM>=PScPX*3PGMHUmNJx zi&Osky;GSt;sOe~7`L~+G{bsEVD*llRbXx*GjXw+_tOuRtePK#WpK9vu#5 zqWG0@s}t#Uql_`~-Y;pcb7IIhqHyz=*LLpma3)9mbJInhuIx9Dcd}?&HHqN`Q7eft z(K?sa)XhKn{_J8$*tg$dn!bBGiA9iBoj+oLB+EU+KU{Q_UH#U zP6jt=Xrq3*k-q|TM&BxuL*{tC&)c?*8y0Z+NgLWpLNn5Z4(Ap_XA4N1J6|~K)Rg25 z>LS6nkBxAXjFn`KLMTCl^sfpStw9ZbirmHEv$p`25-P~T0GK#$?CoT^^=U!&~vr7x>Xw>v9 zD?Po9KZuFn{IGdf6Rq$xt(#9j$oob55!i1wpP6y2mQRIK%KU$;OpH<%{-H!iag4rq zVNs{F5ZJpC=dRlS#jmZA`4`OKYnK9M>Caf69~(G|l1 z9f9~XAPe!S;WkP16Y3yh@W#by?|7H-D++|{z|F#j!$Klmz$X5A7h$2mKtAQ&eJ6VCW$$_F?U(rHe5U?{>}v~p zXhA0pNim@Y&lbee+l;Aq3|U)?==)c0W>PIVZ~=w?u;k&aj9&y zS8oMfUZzx}e%0lB;=RJ4BSs{KD#+A!ZJQ!N+=Ji*-HrS*7=OG#rLI;IBmF}!MxS-A>GyKD^BPyC_lwN2&zR_w zgZ`Izk-SJyyQrS``;1yjyqWMJT23f)qNPeH*}bJjou3iSJ_0m;9B( zF?XId$ivD$q>lTw-(Ha6?dt0RirIouemi3_Y8&huG_s%v=T};3l*4YP9ODdw#WaH? zgXaX{G53uQ!zk|0v}+l3INxVR1on@m%DS=9FCh#@7tC3P_&J~^(*Tzm27lVECWcbM zgNC`%ESa+DFO-gqrfW0@9%t@86MdRmDR_5fCgcy4@267h-Hq{WOL?Utt_Z)gftH^z z(KaeR-}ZZOzk>v;+MswKU%4Nu;dq66o+k6v z!`V$4uaK!}MOTwy%?}7xu1f>A;fiL#w69u)V--`R!gkJ9y%nJVKx!W_B=F_eBF=%# zOq8CsUzOPm9-7X)&#_-5PIIj%_Uk143Wt0cz?77#@xE)j4ha|l*qBZLI7Q&33z+OH ztBJ*sj-FL}%yJEyxKCv;c){J+CGQ zp+@pIaM8tm@_Wl)=Mu~dF2=t#&!^i6t${5uO)&`Tzb}LYrkcf!9=R*UMBlLpZoRX7 zBInD5XdXCSKTE=VeX1w?nl$`LrYuda_zN&wbaEa`X~{53;yuDn2gesRtkDax5-df3 zD&J7AenJBgM?47 zO%c`HZ{SuSvAdnpxymRgB`8(tI9W(Ym;qJOp8HfNMtqMOX)1ASaK+9Lzs-YC_(c;< zy5UZ=`a$NNX}MQAbE`pq@q)36Tp9_V)oT4SKaa~GIX=tXncguBnn~iP_p*x#l)5lH z;$Qy8Pv~_$Sh(yt$H}$0Zz>9+;gGRpz9IYWhHI-d+)yryi>HKIa1CsM>1#|As88Cd zc0ZCZPn@mt7}S+FeTgUge7@L)sCU@jeUK>Cds;`d7D7M{3;W|$Rl+wK&dv8p`FtGEK?qaL9o#c7Xdan3ptjrPiYanI_O_?kV-<&F*(hL; z<&h_flH{%0okXdQQjVSdPFvAq%CVHSoH2jx_Ny|KD`>1Wbc&GlE=R&oSM_?o1}W`; z{{`~Tp(4`{opaZJwQeOAn?75)4w<>**|wn}GoJZgrfB4i2|hJ9(rmq^q~(Z!rNyG? z?6qGOtx8wFlUq%F&pCmiygKBcWsv!i`7&mkX+FVH!Y- zd)gM`4Y2kAKkpdgce1O|S9b497R%cXA{$DxxOZNHxbRW*(#H9kLrF+W%B>MU;}``w z>S9_jJ6z4oYAx^WGk6|cr>L8!b1cZ&v$_qWtm%W;1ze9=t;^)h)j{vz9-xWe%M@~)6?XWk!j@@6=Z~}b4D5wAz z>^q`8;MeYYie|T4|6!Kqh608k>Y@c`-TYmInQvL3gBz3_wOhgUegMPZqHB_~@02Z@i$fZS@#(7k#!EpD^44^go3mobVcLJzzXGzzAE(Bx2E`SSUf$Ssnr%lvz zj8bNdJrSe-g|b97e3RCo%D%1IbNFPZ^^Wz9z7PAk;=`WT5v%00a3golTcSNH=X@uX0*SD_|m*0>vE^{v&L(4S5+?=vD!DsiD{|6_6 zh(!X*-^Ll=E@k0BIY*aOG!Q}LYy(9!JL&k0iUPMg;!{32g_uYAggW5G z&E=F@HM!U0JhKkTj+aQb1Q1aVcYXisFd4rGN%##d&>$!hsJ1OQNOQdB3U7^Ltn z!_)@qs-oQ8Vp6cRNZq|Hoen*?g86_mq<0AX+zq zqM){6zD&%Z8PM8@Tb22n}bt zj&hwI=zfg0gLn*KDEPH_!Uo@bq|-fRejo0e@!@Vay!UgZC}Yy{#&TO*2v}%-DU^vi zMxRsMQ`67CG<9Q*#4&j@dnzq9_{5{8egwPxPR;@D@fkzMZisL*H6kfFONJeY-wWB! zjhS`c86-KTpZs;;+Bxe9GZnEbX=4vkGTgATKY6O7?gYWDhap6lw~qFjZr>2Y4VI_|wJ7PU+o^v7yz zxI7d#QQqGl5V0iZBS;&C^(B{yQ$LFfKPnLNZ#eHkAaR`2)9)37lL2Ney`ZUrTh_UN zIL+!=k4ks4KzM_|9h)NtRE!T-%muhA3*Zg;$szA09v(R+&ox&W%vGiDXa*n zMD#Yiw*Y&g(&A5Q@L7cvL#22hPG3S=X4Oe74l6Qm;9_)u2jU$)v>lp1L@M2@+H=BJ zQaZp5xN?k_E*nd+C%zAA*KmJ~xKT>A5*Hgy8rYNVOB$uP0VnRdyDlNcrHSS>j_;ka z0@pYXYm{Yqm1=Dv{*z~u^f~MX{2lqgphF1RF-Xf|2J$@=cVPU{XJ2Byn>x8g-v^>` z{DH`h^KZdFKQNB|fIW$!lb$^-`Q-$wPM&hqt5yJROY^{Nr=0p&*f7U1^CS09dqk^T zg*X%_j?o^?TXQd+9)Mf_ooa2CeY{gke`nUmP^Gi$JM#YMfFHgL%+C zD{1Z<%21L`dO0!i#HX|4&Sg3rV*l`_M5q2L(ci{SNzf7g=0_v1cby@YP{VtC-KJ=;80!LUaicAl$3bO!QKi@47M?~d= z`z^`m0v+g!o0$hGI;F1@YtIa>l{t7DGPo?!{Xw!j`lAzbe%Q1xe*XXys6N3_IKU-2 zs+7g`4WsNjTKxbQ0SfKL+U&_YWr>ejFAmOQKKic(Ze_SbOEaEsRJD3|c<`dl32L2m zZHxlvs=evpywzEuyhyN4vC%FNUU$HgCHqj6kX{Y~_?}NgyNkz#Qkn=!`XQP%CYE*8 zmBg5HkvKMoABty;j4?xki|;hnETlG*?jjoPUI zG^YWtCL@sUZGn`%<=mF)rR`rPkB$FsU{edzlgt%gLP!Bqhdf~y-*pRHA% zxjd|m@GCrXIh!WF7Vop1f#ci@)i5vhOk1^>T7)~}jux%U{jHAKMlhI-+nVV&e>{nY za0waaNIi17YJTH1uOMXr({B{#u|Lvh>U~!CR*4bMdD*zp~&KU`LE25^iNrfqQqxB8yQ)aVOw5 z?%Hqa5NU%8@CTVSu*N}bsB2ovGxPXybyo%BCw1kZ-vE|Nqg-&YtJWC8Zde#?cl6#g zA7)P290Genz_8xSt&j<^-ydW7*{JbFyRTER0jwb&%4c5vw7r|A2PkC{huV?=KIyXR z7tPkqr$_6p`E6C`WC@>eLQuPRgm;+(yEHttF@jn*N=@wn_Dwc7mS3H|R@F!@R0j^@a*8;+y@qaGf~pJGczURpPz_ zjds3fR*#q)PRNXbZsT{J+>E(@U=EKx>vAq|Qufzi^!vbKppId2<$O3U>$FHJp*)ae0EN~W%0ub(B3%Im z9eDp(g^4@PqC}-GAu`Zgtm6L=_11Awe$V^x0)hepN;e474FZx&BOxJD(jXv`N+Yre zA_7WEm$XPK-AGAycX#NLOYHqSy#0Kihd+J2Jonji=FD6(bIlCS;U(sP3biC0?Ua?8 zn(A3>=z}&UFSrY*A5@aw#(22PV#vc-uwO+*+#$JkVwj0e~uFu2!QGejQXn)nxD zAQ2*eUxxX$5k01a9U%j<@0IFkNIMiop#gcvJ7bTi3A}I+b4PnIYRIhBT{P6f`}&kx z8DL?r?7~4tCD3;l%VmD#USGy#<2T?G3g(sfs?uyb-u8C}L^m3wz+L{Gdyp-GVbc+C zwvbWy<~-oh_QZsO=*Mc{#5TBc<8(P@Ke!MPi57C?d6W%EQ1_#5mX?Az9CC@MnWR7m zuPa#c95Uv=!80Mm@(C*iny{55>2;W{zgH#DnZ#DYY^X5Dw zPWjber5OgwJ6VJh+B8j!jifP{E;9-l9lW{QE@L(974Lluuoja614v#vb=4hobw%?! zk)A3<5+R7eGxr|dh`*Yl7(ru38uHn(m0-C@bwY;CRZ_%5c>Y7^%?0}21CcZzplO6X zmk9bVn?PoaZ5rKt%XCj0iX4w2^57573otU~PZfvVWg)2*DKf{*y}~Xn>|*N6tC>>W z#*+alR>6oXfybMdL^3bFYtbY>du)nluHFgd2OTTb(;I${;DlvO;Z>*+hh>c>+%i|k zBLg4sYPV~@$=TmTZs?jP(Ozzje45}6YliUPb4zd)C<8o+CM^_KRb%Ps}dC4Z{O*|Sb4E?)q`FjJhwudF9osD{4oC z4C0*$GVuo)NzlWw?xab)a=xCIZ)dVPs~BK|uk1s>|B`tm3Y$Bdz=!}x#sP9k_iNUj zY~ORj3i%~U)pVcAnNpdPc0yYq!o+aw%*K!gOZls5C*^F5q>h!N<9;+)us7fYep4%m zZ37;sT7N7s#Uh69u`qUpwZEYPoqszhqVj9c9s+N=2LncD=X{nP@T_YG3pQ}YmZ>q$ z$HI3j4um%opDJ5)g$@#y>)@D&s9+Wi)QBUyVOcV0&HGPS3{wm&16}=K>b9Ye8tw^+ z>IJ&l!=};x*6ih$C6$|e_(EuoqTj*PGntRBH{(nX&c83$Ct%`N`9ZDXBDDlhQbFhwx;4lM}Sc9vsD zX>ef1Jx$EWtwkFoXm>3RPM!JO%MBN#I<3XuQmE}>)pm*A1HFS4Fb>s>vvNN+Dm3%Q zBcX{@OujmCC_QU(WhnG=fg5mEHG|6|BUE7;KC;8j1#i#nw?M>(2>kYZjzq12@l7H@ zWE#GGeQQxfJ>#FgKHe(?f#b87C!3?BVXF-O71DiuEtecHZUC95^wShf$+gXVdM1%n+2C(6n zKr{7O8-O>YzKdt3o0<6IOrzU??^*igiNqkq3!U*GKxOhf7zq)>?63-u0UYa7{OAv! zBX|mrU%h%b!_e&GRjH`ysT~!67e={sw~260-yKP8aSdoV$|>S_3AV~J<{e<>+?81I zu+f+aJjy5fRT8YYsHXP%^e9o4_k&o?ySWeZo47$d07-5Il0nYkL8-ouRT!nLDqwdE z&%Bf_;iYkN6mA318$=Dw$Zb*_wSb-KnqY-d-q&L`je7b%ECXin0z8jnIS~fxO`w@_ zBV%>N0FMnexragFP6RYM(JDkvI?9qBz%>fvw*DR?UJjA~i$)(B9TCQZCpRbMygBfI zLWcwHM1eiryWPCZuK)W4a{-ZVTG;q|3q3Q~&e}Zj;YTwGZxsxGZ7Gsz+K#{j!Xv8D zYP-<_oFNq?!T@>GEaLloLSkK$WEWtRXPR{{ zA+ei9hqn1m9y9X^!(8YPN6ul|!BO$}Wgtt`u~&3w^a*UoB`-!PO7F4h19OmH3%j-X z1*o7z-%X4^gH74TOS4k$dJC2CV(?_fmAl?^mRmyMoqOJid_CJvua;~gz5)|oD_Ft8 z_B%^hT!Tw#k*Hah!FnT-f)-YtQGz$p)e~e0ZgV9jMf>Yl9l`a2lS1X5(PEqj%3 zzl|cpkk}eVF%1v^0;+=+lo zv?!^*(1MRoRXWvz6M;HgxszrrMD_a#Mq!^xf6}(GF(w!V)RcIomE^!vgbRDP3n)KU z6}YR|7MG;k<`w~_0Br&&*zEj8{ad-{Yyr>t(}wF^Nww#vuh}|vcfS?oK`0KlvuvoY zbM(+!ak|?m=r?ed)a-uV{N4jkDPT8BG5E=p|H9-Mx}$J*&)cvrOo!;{{nWy+(-)s{ zcS5vYw6B)jg&FL)tkoEq)QAP*!q~PNA68ejMaB~f0X*LEOZscFFPQEkKZTc&_3HB~ z_WOfKC^@E@Z5e*|A&S9&9&p{#esXtr53B-g2|B_T2$>lwH8S6T;TXm|dt@r!`yQAQ zJsUk1)CUY$MFOI|#zA)!}i=a%O;dLLsovsPm6fhK&b?Z33}9(U$%%Hzjp z{?g|);7~q55p3pV;sWhw6sB%5c z;>q`}9bD)9moGh(QsvRk&+v-80Fb=aeHj2qo|5Kq<`=wgY`@_M8jipUW9rS2AXSD+ zBZ5)>R%!+jE?TS92Kkae$v{ClZzoi%Qvxu3+fz5)VkSuD<Eh_!5Xr4tU8eO4!#c68&538Ax?>o_d3^OoVzVY<#S_ zifOV`sya_EL(Pa#aWL45EtffA#{;|k0zCLVNVMkR=d*)lC8Py5Y~pWpp@;?-D<6f` zKk>6a^`&eC65RlGzwG+Ap%1l@x?;FRu^82sep>qWvK-zPOZQeG)>A7(;&l|JIq-J& zfF?n{7U2B19gno4q3g&f9X*UPkAtbg8PJYfgr~=crYRB!+qZTjq5biCU+pJFNNsyxIUW9|x-f_*K~rrAOEYrbtIH z^vOE&zmU7B(SNG|Enpee56sl!|FnSO%1gkA(E@s@jMfuC4RDo4DsNhyJ$INF#xP(p zXF6I1g=VUz2I@iJA|E+CJ3cTTU7Z2rc?N$*5+bKtk{ zRIXjjoE1TEnNN>0f}FQNBp?4jIhouE9$pQ3$b8G6YybIzq5O(0*zc;A!-|5NKF(qs z2v#I9og-UB!K$0;4M-;&?IzpJ@ z%n)(F6eq^RwI(2f(&6PWrh%_V`iVv-%BOzF3L+S*C|7A%eG3>9gU|`l3$|D{gB5_9 zDCJK{Kd}d6E8%L#FD_PqodXBwaBBa!rKtE;A?)EA@2Tl^nh0r0yh`;2y~-iHT0jr$ zwMf!3O4nC1eeSg;j)Gdw4t%4aoxQM@+sL~&U`l5h4}wbF{(GXZc!%C@mW3*C12ZNm zs-~vu0g{2k3pb-hkjI3ub$*qv#}dKhGUV>5mxxy_r)awQRR&Ex+2ZKSbUx}441*jH zsmK~7?wKI?PfQlmHtrLwUWkAk0u(zD8BYfP*C7^@SVx*0A@}I>e@7!o>h#x zf6xb~)z*m24w36sB{GlQKiQNrq;Ou!%ouzR_^2QM`>DpnAiF-M^0?6%v>X@hQ(*xK zWfSpU++M8%+NRG=2KIP1M08%s+9LQBIdtu z)4g7WR+guzuaqGV8cF!DFcmRXSv)S!^(B0-hKJ(UU$ z#Dw8Wk%bR+5?S&J#-H9L=jcM53oE4s>s8e~bc}u^FYN(z4&XrLZGRzWr@#{icnx&Z z){)8crA~riE)%VObQz$0D90wHQ;7X7>2rO_-4E*!jJ8T=Ue46fE4x@DYwSBBYw*ME z(2*NEXf@!w`8g|O2+;YD8Bv6`5O26gjawY zx^-{UcaKiv;csI971QILdnF>0Pp)IqkqAn|)YADs$tEu5)Xm#-+tYk|LRR>USkvf~ zE6jh&IP3|{`8djw2IGpC0G7~oKVy9*h1T8~11vlDF}K>w*)4{rn*%m6$29Cg9v3-5 z+bDyEO9NU|BoP&iKZFlYu=gqZzb7)2{dNfidZP|{Vg81FI&s%t4t~EE02-peZ~CeF z36~th>mE~1QvLcLY+%Yhj)WfkZ7zHOqy6jfB1Vo&M7apK5#a2cTCFGz(G$R71(zWC_ls?MT?~-{X6@ z0(`%@PFsKnD|+Q4-Uv7=3k~I{zd6v7OE4Ko>3weVUT`D`^j8gl3z7;}040Dk z463Y0vcSKcqXfsOd^r<4D*W$Ppi1~G$-|QFo5y_+QmN=QP_v?NKWpFALB8)+JI;W5H+Wy}q!-~EFLjt>+Z2wxW`a>LB!061Ct?z+NFi09tf z^)2wCp)#%1=!aTagXMmIYw*Z)u3vKJw|ceP&JBz* z73p0)o>s~2*K0o}x*$jF-up{;=HM!(bV!8#->(JY99g0N`)ob6q5OZJomb};$mFiJ z{bw-_Zn$@`hCNt)16q0ufa0oq`yq^c(jC|MI+ugfubn6cdiD{-PvmJw3aGoWbg6Be z@q|#{tI;AG?_ANrIh!i_PPH0ns2(n)DFS5C9y%}h)f;j<_ZzCa^dE7Yn&@5b|MRuk1hC)F0GulUU0i(v(GDskr&Vf`j` zJ5A>{_fw%+iznjklR9#i{^VjjBx8!G2HDspLfy3Vj~rVV)oN90h>l1TpZG6UVR1Zz zC3~88ys^RKgONT|s!JWaKDd2%nyE{8mG_sr2I3YkasiB--d}DuRu9Uad8}=8tU*LI z0yPA!PJJ)UF?$kP;xf{Y>=XD%f^7&WqadOiXLDFYQM&w(rCR;cp0rBtzPnuD?8Dt< zDuK`b{&@-iHaoR-IaDf6G~S;_0*Gcz%Aklj+{mMmU5|OZNw9OqSJdw~ts(HLVhK|J z2T#7eaG$h~8e6^KftgzX__%x<^|P5C`1zA5%R6~tF5vtm7X^0P{N{-D?V4LZ_duTQ zL_4fs^ja^w>=!o>r!-0UGF9xHi8tqpC7&yx7xHXGIB@*@!d^V@FS#JlMBP4u5cBDV z^nL7HALckY=+83xmmi&rGAMaYjs&{-}d0n14{;$KvQy zwJx>DyFjb_*b8$?V&uQ54eX&mqV=})RmjI666J*k@)K)z_w?3KnA1 zBRZj;XNgT`rZsl5oe82_)iwlm$1Z|bZwRdtSx2KM82Wb#3uo_Xmb&6L?jCAKd)wc=A%dYcsZfV*rB=Peb$lEWu0YYL3UQt);@owoEMnN(A zt$y>vt&$86kG?f|6U}9h^OxP%_mgIVng*LC_ubcWQGnaG3W}k;$(AeoySw*(o?S^TL`j!E>Y3XSq+1Hgn=Ey;uiWDB#?x_cp$} z>L?@>5&L_B!dsB{h>B-qj!{qR0nESc@AQrTyS55%>jCf6=n_42wFi3Pn;;gw zlL8tFdcAr#c!xV9NS{9FGM#NczC23*o|Ba;8Zq_b|Y~D zP1bro-)kIv{aIjF^u!A&z$Xnl&X0VNQsgkkzq%GU-nfF8;2z=t8x?ZZi2|QIOPy(c zP^Zt=<(@UW*Oc+;oBt~=%WxYZnykbs<^-c4EVw8W1QCP6!aW39YE}2+OmPgvCv}R| zb=kz+c7l!Oyz2Tjw6qO4^R^Pjb3eDgLREr29yiW^a3}g|!Tj54i+*&M`GB1?3zB~o zW`~(k&2fJrzurV<3NrcC&m8QzYfVe2)@fgx@FqoR-qA`jYn*WQ`$dFO&lgX@c(yiL z1eE?$Z{69i_xaK$D_*fAKqQ)yySYpyagxw5##w zwU1`r{CHn~=T(C4Dilu_d-)*q?%?82kDl)_Q8xkO)#fcG0DJ=1NH7NQE>yRNK}-u) zc??oAvod^Y2QZRtu0hwmvzE+4^rE zQ1EGKx_+g=z!`zmwhi05InIht)Ln9ZhIYX+`Qppj%}dA&51jJ+|g`_l;CFO9 z7-t1D&v^LRD*(1Y2=h`Ey(FU+@V+NVdM#$@naSWdiE#Nosh=IvB%Wp^)(Cb;AXXnY zddm|%V1g#ZT>K^)q)YkW^*gaJ>U(fL%UYr!Pe_ysfng$NOBS3ofv zEeVgQ-vo~WhM)Q^jBP<5118tf@enEZoc-Fc*thMia(ziRJ+u`+ce;Z<%}ME>2u`~-Q> zaAg7x23+3loZm04 zG#A-V5gd_*cZF7S7#o~w9cm*JxvV95`E~c?5nNU8P{Q_KB;adOyH$N(_(h@d2I$nO z)hUTVz@;A=i)c`6n+y_owWfaPtc-C!s{UH|kizp^lvb&fj6c{i`KW|a(9`qniaB?^ znFsw0tMkYy=1q|H2E?>ZD`z+y>8Y`HI?yGRa|vPu8XN z&ut&J5Oa)7!H=Hqh*Q>7~03~cxxH$NMrF~7=U&;be8=s}UmkoN#(vIqMv(C#- zWo9-C;!oF(2D)$Y;qVKPVfE!Gn}}Z|EM~q_8ebhaeAx*&N#U6(^>?U3B{vv)=rmsb zcO;-}g(k=G`gGkBF`9wB7Bv>@MPHJx&Y+Sas^uKC?)egKVcI%PQ^kL)CPLbjWSw zTIOQ8dLw-c1w8~MGc5l;6a`^5S42Wq0wX_t=bn_L2!Z-mA3xPb&XRi!gfN*FYQ9}; zu()g8KtZw#bOY5izteiACII2V9otYok zw%4_8q#@n~dLHec$VC16e)7=E;DFg-z4aV_3(wMw`3&l?wL|DoSRT=H9|eV!TMRVS zrq&Exb<;R;na4d!sGPU(oFoS&_+}!qRb;rqBGFwf@6sdTTwF-?7efDgKIF&>Tv)jt zRWy{RcHazf&aruLs9DA1g&3i+pRWU*H0Nm_Tx3}vAW-|cB@<=i!((B`M3)piMt{&7 zJH>uy8^LhG{MRFgI!Mz2dgKon>^}XUEv5zS>-|^Ov^GTe@>vfq6;C1YuqOmzH4u<1&SseF?~Ey>}t&B4deEy}|_ zB0j2F_bd}@9eh4R5OWJICQK|KI$;FP|+YIL+sqbrg ztS8gU8p;Ds?hA&*embx6d{X6(O~GICp1;cU@QAkzpU&W7O}`1s3ft!Fl0a%uxn>S8W`TNYED6QHK$ zf$TAXt8HyRBU^@*lpaa1#}cVEOX?o2?57OI({B=s@N6>87a3c|!d%w}`MkS>IbPD& z+Ie0kIQ#irm6{QobjVAHotAR5vAh>{lbvXIZN*{tA1ra_8v+~^pKQwA zr$$?M#eb`lh5L$`v3*9rcH`=)iu8Uj+z_F@4Eh=T_cp0U4IGA`oBj*8w;`+#FFAQG zI`Iw>>At@}vzTLCsLHQN#dxK7^!=xefgrc8&YPG3A?<`Pmb?Kc+03b<@DgeAMOL&I zI&iV{#ZS-{Lj7W?wOqUn{R;ln2?@PaCR#d7POyFZf&Mu9RA%^1YEB)1P7*t3?8Oxk z&>;>VPk9jDu~AM#gL}Ao7=AFmsfm$U`YSR+MHxW73L9h{o`v>%fHAl>U}p(dqA%EnK- zr?F*TK}}>s>yoZ5r5Sgqj(a`Qa251%64z|KUtIkaN-F)VL(x(rv(rK=!ZtFuQbD%u zqo|j)W0-~e@Ekf(7;Pb*A+pX&HJqeMeNNZ4G1~Z4Ke85Y<%Z7@RA-{il^$Kfi)C5P zdRrn@Gy04sVKk4#C@eL+!qrTM;;+ne?lxg|M;Wvktkm{XAB0aCCcF)qwnO!Qhj8Y8 zLfywbRacEub*rCLPP{64g1k(C3&dEQs?sKZ9an(C$b;OsAp6W8v=zi0-404B1m%Q< zyjPY-hJq`wfgvcWe#X}ltGij^eOS(BvfJ7-Arq~81wxV)PfbMeiWDg%BHvf2?vXuE z8yoy852Z6HDC3pBbtlMtkMm{hQ_Xq#sTIK;GZqV@9U3Jr&}-mE(B#pwo?8*VPg}e+ zr8^a=&<8Oi!x^!KM|_3Hia$PH$ZM}zN=!!wq%FEyJURTrAf**x^4dsSi{Sf#P3{q4 zhUaqP5dkfj&=nnZ6lDyV1*@Ge33qoysbIR*-^xeyw7vAnB)o!N-os;W5C43g+Pbq@ z0B@VN(#Ee<6~3`3V0PYN6)%a}8vUseTuaMv-@r)(&kEye;k`d-|9*0N6I$=sK0+)A zE+sSU5JU0B&wd_GdJMFTgkUkdT?{=p>+@cA2Y@-?axOtb^86Y)!_2jf*DN2OBkmNh zeTdJRnX{{z&!SAG*Z-XRt=87{2=Rl|Q6>BL-P!eU;+Lg{TgR%uFOH)UXy?X*McN1F zb~kWbW0(4HyBjtKfp{q?R#)_=Pi>0Zx&_my=C|jjcUkq1QdAvvJ$2v88@G%wi|ii6 zN0(1CXn*E`Om7ctG4$jeyE?+rvoLhj-PR{hy{S^`WgzaDAW1Izu3yhYhqni%(vFTO z0w@oCzu)*tj8_UqVpv%7;<$@_`E2th6KA{)*;9T%QnAb}y8GQxGn;LD5HcgO`~YW+ zwHm&EuXcchWz0fi?yI1GW-;M~+qb%9Q`D9u!$;wtH}qdTrZsn;B|2}~s-B+;U;@z& zt52B;z==?pdpd78@V!!y8qs?_cCXdo&*VTo#e;Dou^%nMEO%^(@I^Ka>TUn8K z9q|+JYS5CW)J!%ncR=EjMhAq#E+l@L4JmeUe*UX?#G_b zEz8i|+8TWAj`8b13*T#O2#n+>yONu6-mt%cgpwj>=Gxl$*uhZyuk@xfStWOf6huCe zZ9(%aw)2->%q6=`mUBox#8q$ECWFx4dBg0Je?sTL#lb|6^`Fp{vKKyr5dAkvY$VQy zgbN8Liy9`4f5Xf$WReqMM6IpgoiyL^tS?hN2Q`zNQ#>!?>bqou z(U2mONJC@T)b!!6gw(sb=#~7GwTM$lvqVRu>DfoB2`%deV@fl+cZTCPb%#EPl^G`n z71Vh}bGUjn2Wvdx9!WIIu)pI-k`eO#9W3IFrE7kH3yB(;8U3$bVzr?r-96!?bA+8l zEk*oDla&|+f!d(W-J*he%HBs;RL7e>sECrsQ2P4&`ac-VwxJ1oO>|*XNRxKn z8(}r5>7(Zi+GGqPEelfC7d(e6o-&G@h8axPvC<5@fAb*S5MAe1IO4Ttee}in#&`xK zu@ZhI_PKXF4Nr#0tE_!SyW4OkW@{H`dsyN<`G?_=(`>6Aoq`1xif^{nUzNslLuZDh z=r>#0ZB)I4le#zNTW#w7E6<4vWNLe2WyOp9Eo1x6?tDNJ=ElI=p+9%YlXaVLm7zSp zVN}L!U=5w8I5GJM`4v1zKu3dYxP}a^SqD6A3DwqM?Oj#tN<<#pGlqzH^{0aR`_Lhk zpLlHQrj1@fn~?0}Y|-Dj;QYY$$~NE?GG`|SsG)!7=Kz6 z%0Z_kZp#%{Q}sJ5i$0<;byqKtK5|%pYhf^(_ERS6Nd9Wlq_+{{?H_vamZSdnT$jUL z8`{jYM}bX*q$Bc66nS27s&&B^{QKN_wKOJ*AlG^~biW)C$|afDug5YDV!|cKp{X5G zlRa{yJ4aADQIYC*IqYe}d!M9n&gj;jgK?MzcavG-h7YPzFz_`LRS8n&@2XWf@5+|k zu<#Rx=g|0&vECYN-FQ*JK!=5KU+}1UHWFeiUezhM{d2w}ID2w7W~$r7-EAo7s>}`_tS#g3_7jxIw{gDO@p|`d1uZK^Y&4B3Z1*%O#vHOMjt1F zw0{bwNX)t$QA-g%bye}ODgNYhKEj9(F%6phG#V@_p2TS)dlf(BX}a_3btCg6%Xdta zi9&(pmh=0NXT#2ai>0|0LBW05pCt!0U210s`2`Qrg~5w$*%BNsTdBo&;x}sw7`j*Y zH}CB5G+YY$v?X6&eO2L?ZkZ_S+Y@W+G|Z4P>(8K27s=-p}3fCoH?KWJ6dCi z=-X?*osrM38yv9^3l}*8amICe8DryqF|;}~NVx27@nv=;ykT0KC7br~IOT7jG9i&? zCj-W{gyByh&7u`v68+#}&vm#4-FZ{37T@7QZl;QFpi4zjaekK$X$6u`QuV5-=4PK; zOC{{QiC{5dQs&a++NlLrXZbHb{<#tij1Pu%jPxGf4aq6DkE|^}$tZnYE*)NZY76;{ zyzkx8g0F00PJdI@#!FK6(?+j_jCTxq$B40Hq-iL*w2_`8o)FZI2iZ=z1|)tO7_I8H z#qH&!>U&YUEuN$W!Jo&ULzme;|91*r5{1cU*w`?CQj!~j5#{9I-#`3#eg*&9?cIgf zak5dyAbtD0`(uHonFv;*8y5ljJf`6~?-O#lm$w5(Hwp z!-o2e*a z8cpm$vpc&_Cc~XP*Ulkhhb)ry??kbI;`aQ?P(+E+1k%0Gr}<{AJAc>@SC$kTlrMEL zbns}%HHj8VANFty&O?v<@X*WEBOy}gkTVV(?x=i$PpzFpvrV5y6Hcx?(i8Ni6Q}X7 z!ppRwpXc(vR1MB9OJ-*|cXlxpuH5jMeqfJo_5V&)B)WL5kC6WO!%2sKB(`5J({z#0 z(Xz>lw;`n%w=&+P=c`^M-5G zY(?Lmcg)tf#+MjYS|6}E)vzfDVgglQ^wz=R{p{rT- zV20$s+bR=;=ck=sY^Ia8piFI8^WPLQlora)EOcs|d} z&}RCQkaH!dggW?+glxV^n@*pDR(<6;rT5^A8zGXpS8rQ4wNp#>M>`IXU){?d;~vG6 zR{S0iKJU^=zn2j^X@fsK$iurC|G^W_(EGLp+Dh2K9tEZNZx&9(bDdGPxWpSl$5{>gLlx*0SPI`$hVnv0CxtI~F@9G=KACo{|X_P#;J<@VRQrV&Jja>*qXz^yNwm zE-mk%zq>vgdu5MJdO4%UUf}Si_Pv(> zTfH7P`e^5w>t$((HQftVwSk&B5;~4-qKh`{BRkC3PfbX%juD~GLZvtqid50;2oLP( zBWO#b^QN=(tzye@A3plyu&t1**7CX`I5J$G4+W%X#M$P!rC|(_%*LKzLM4z!o6lXr)L5XDU<8l~}JLGCC*)%$qFIlo>;Xa8a% z9>{8@j?dwW_eIX##L(^Mh&EpyP!8RZ`T!8Z(27f(1oG)eqfN=R)@p?(EY(SHS(hnm z2j-M;8HcnZS*h;?PmJjA2-&APH%p6;x+wVRk8M8P_C8mKUG)#}j^giyKPXfqdgH$B zo|@`xuTNbz`MCd^NyUA4^QE)S<)IzusaT2{yofX`Q2{a|3AyCU^b3jZF!gzH;x68# zJZj7$lf0t+8`fZ{B5^JlemVETU6OiQMe@%7yjb5ao+1C_34W}Hz((}>$sH?oziM~b zksr}@PM`&c@=iZlgqx_vM%QhakaF}$9(&RvTO`ANtw5dZbzN3nEN7Fj+a|>Kf!*Ck zF+;?+ZhLaROvwqwiF*RmE=({-Axb$6Qv~5~HCaDLe)#m3lZL7F7;@L1z2Wi{ zm#Z~yC>PSXOba&Y{y=|}DQW+Q6hl1RY^_zSd&I6;bAvGM+wpTo5AmCVo%&QUfBM?x z{?8EKf-fjs25ehr!sx8GE_F>_RBlTzkVbTIJl*wyzpE41ZAvoOE6N}{Y8%>wCThO+ zlBZdRul-?Yx_91`sU zsyzK6TJWx)S^m;Zy^}PfaLJMY1Z{fC&3^@)wX$c99ePHX6i@xet-X37IQKTe%Y+{{ zip*=WuzkIeCJcAI$>y(fo68k6qOflLuo&;~?l!do5wdG>`Urt7@j(2CR#f_2c(du; z4%3MUjaJvEhX(W=l1VECTHivB5GN~*g@N=U1qKce&f#RkW_THg;dWdXKdb5AZh5t= z-K>w&xjQZ!#_2>+jrCd#4bQ8n=bz+(aBI~6f1xl-(!3;7{Ooj<9&p&Pqc3+cY`pO) zBjzznM zg6-4j615itx9qqc^o}m{a?`oLMK%?TWfLF4d()=4u|3b_%X8}$`9((1QGQ0l zMc+&Kt7!`+okc9TTrqT|dOTE;0r#@~9Nt_sFqtG-7etyU2jyjcv*L#uBlor;(r^nm znug3r*_%3zKODw;#x#EZLUICejQhb4LRvGT8R?(4A_-X~)ILsN$Na(C?|vlZV^f-a z!63*V>lLJAUxEpg=r-ATFLt(H(0tT;jYX_wuQzY61CvLKggtw^$^e*`_&uc1EhRjb zyXhly`~}7O*-m*f(LwOqAlS)d=%Jn+dq`P=$2|WwrCGsiUXW4g=FcCuAJSeyE+YDu zd3eVrC;GgV&vrS`tjJpw9-QJ;MgyJUhQplj?o$Y8q`T zVtSy#V({t+asOcB;K>JKngrgDp%v09nTmcdW0OsWu=Ub4w`0#NZ@%VI_AZ*Fv3pA~ zqN@~)hL7QMUrMm&<}sHwu>wGwI$lfqk>}@cU=T47gN4n| zpB1OFPbjvh^Dn+Ol77s08Qsbj8P1CdOV=>3d0qaIz@?r8xFUk&Na`=-oT2t59?^H3 za%)lQ@AIcl%g#sp+>{jQCX48w_VED#n#5yg07mNFs}%@uXnr`3 z1J_vkz7zV|`%Xi*?PBW?6lOHkG%z%i-C{#;_7)!6g9$=Ocw2i{y-oC9n7cV8-a#_2x0|>{0VB+e17E=oKmY$5+!S9u) zDVz$@+*I|>3Eqbr=3mj84P9nh4P82X!$Na|_Dhq2+zsKjJaZtLabtesBj-TcirJM5 z?`x}^+xA;IG`}Iu{oyUY5V6wa*~ht1dY`ohRToJC2RY3TG@^upbCIU+H%HOiggRDb zDE=kt3`EPSNI|E>o-fcNE8sMy-E%6!KQnrm2lM z)3o@r(}l^-}+>PLa8W3*H2(_!TS_p4A~CW;%3Q zV{A2XHHjk+ZV*(3P0pFlhVI*_)QzNQ%TVc2c%35iZxNI|C)qB0`y-Ci^)m@l|84sz zp@pC2Q`jYYlr32}+H5p*--2!_gSL10 z-3N-d04nIl7Q?(kR93zI7#F~1u&xMbHD#tYK?^E!<@~B*Z#CH075+9cPE&qYAA>2h zcl>#Mq=Km#XZrXmJ@gN>I`Tp7(foH}?RRnS?WK@eh1ynu#N{17X(T=^lQm&niwKBd z!&oJz9zjs>K!%!E%+V>+8aHQm&d*D4ORfs1Hkm6Z_Kr9~ai$iLG^dF4elXP;QE04T z?d0Ydq`4A!>l;#@7}^eY9g?^^xi8Xnb-I5jhyA^rgJOE~i)rpKgix0ae*Hsq{&ZE$ zP^l2&``jk3D(uEp_NShr=PJ{%O*Nx^2+HTVObT&wkyVr1(w^CsyGq!ukbx!$uIF=s zKbMb`pP100nMx&SP1%N^T;XBo_9W{^a-KtoaS9_4ebYHOX^@{K+o>CRO^~x3rtnO7 zIrg2=#o)f1dNBwn>GTq0zY%+x`&03C+1mv{H?{({UT%F-t?#};xzYq4*~6iIj`W*h zHUeW3>dg%#?#gjhKXe}&qT`8Llv z=ly>2huQbqE3b8}wOMe{H)KwrLl2r~ZqqnWUih*F36S@smpQ5uNvWp2lv|y38TEZA z>d%{+rFHDb&^hv|>jsagx$kc8x~pcFqS8}zD`rMp`Cf1~#{&`E&kJYgwWbSmB%3gT z9+t?r8|x&Cokit_6Uj!dF`HAYbvST?LR$vzfN%yB5=brDv2wR_aR_yJz3)H=1OQ`9VJ;~e7s4BMq=jigw z=MyllheT>f6k?q`ckKv*v<7W|dYTG4KeV%@u6c1Rsb8zOmU#Or_>LtS_La4YQ`d}p znQUxLrfze+F4+8Xm=tp39SdIfLBE8DDZXtMh4O76GpJyydA5@%aE z<|Y)YN#r$rZ$C~6UAPbVAGoKkpau_4I(!wPN@QAS#4^B;5HMNTpxB6_VVQVSNp2h+?EmEc|&B?N(bV zv*t5$9%_@SEY;mw{^EYaG^!^62^15hTT{eO?08FbAzRV<&)aW5#iMu{Wwj!{Kfh+( zoe)ew9Xy(tc+46-SyHeaP>IV1B44h$iX5o={!>H2c#F`BsJ?PK!joRD9L{P@!WFW>ic-J{P*UU&+qDKAmS9_GMQ&Z=6 zwFQ2OJd1Cx9U*=#8YZubw-r05A0uPq7XuI^htw6{-Ji5LFbBu=q|t=QzC{T4u^`x8 zvH$ZmMkXg}uB6EEYOGVKzkNK3N+VeHWAkw=BCh ztMR+ugjlc!nK;mPf;#+>;m$3Eu#t+iCmLHj=)#Y-C=V9ewAeb6llsiDnKh>4r(*2` z#geRyQ-ucOK$AciF$xv58z(_@vH|RmB z-J%x3s2dMUga^|Mn%FjSgbmqg%den4ggc$#Uox_ZI$tjnrL4mDZ#%Usi`+pQ(;4$l z@3A}(C{6#|r}44CiB@iA@Ex53=Cw^0AnSm3hApwC`_9`H2}E@o+W)we`%wKAxOO*0 zv%xk~RpG@O9p5bKjrcjV%)L#pV??##^_;&?_CJjnquR4u38?9U6CJXQFLC@wkMaR( zx;W`z$2^2Z{@=}Kd@LS2z>`1OB6CbA_5F| zJ12y{jH4F=NdD?}H7oWob3cR080uZ(9m#+Kr>XB@Cnb-a z7m|-Ajvz)ppEzWu$@`hT))=F(ce zC{TPMt=DL{Qn|%Lv(-@q8yERSiE0&V5UwKLA(>B&Dg`*>jMT1cXA(>EPd|cORjd53 zAyU!slU#$25kmHh=|z4%p=!?my|nK%0>76^iUP@I=CDqJMr7vq^IDRH2oP2!JWq=*3~t)xgS6zv*5-ekAl|D|5E=kSF*vsFOUr zdmE`^r#-IG&a}S2F`osj`)S<(~skRDB%#SHqQuz12@}k729# zDDBc4&i=cmtn~K;#5dRU29P0K2pb(d_f@jvC*1vgiJjN;S6+V zCN1!}I{Hl!xsvs(xvAdQB(coA&npP+E$Fc`kK}?T35Q;4TG5IN#FkUMHkpz6`Xa!w z2-O!Y(TW~pt2;8Flxks$N&2=D-vX`%U9Kd%h1caMHCx?fFRYI4TW5|y&||Qqx-;+T z+{wh7O;vac%;ENknNgXTZ{jZvkQ7TH?;>OJg^AU2wsctJ-4j^F%j+#0&NcaWnSAVpBc8u0C!9z)TcY$UjI&m6vCp4rfz=2rShR`L(fQOSA=D`Xq=sOB7 zjF=xVHn6LXmU!T<@ltY3uA@?2NV9CmYJVGzcF97)3M4pg*AZA94E@f(zFW*vr086P ztfdBl9QGtG<3chS<`;oNzgBc}-k&Wzp)O_FcYl73p5)t3)~#bO9Tgk|xygtVo=t-2 zPYUOQv(Z!@1KJ3wUm2ZoX6W_Wy=80=w6}*O$w?plcO7O7j4k}YUuKcIe?jx&&%6l9 zt8ITc7wPF+8bOf{l2z!YjH&&R8RGAarja$xK@nxY(PYhw2ks0x>5!Pyu z9g;Ell;xXoqyDHOx`9HB`Mk2>l6BgBH5r%+fr*97Idyr>Q5}m}z`$u(y-au5wIaM- z*EaK;^RY;}WkVI3)!vEjH*KkIliADVdoMmHQ|_U^3~`g2z4co}knf>EpV6IrewH$z zvNN|i7k7?v$6U@R`}WUGwS|_|romn@c!2}=(2J$mY*j0yli}Ddz*zetG++3T_9|D0 zOl68*vx|6SD~jFR$4}*HI|Op%^d>K7A=u}#X4Z*PRH)ujlI&cKZ;E(wpmH?mTUT$2 zMkh+`*+owvPg-r_bAI!9oK$MvO>gV8zE*5yvQXj&IOtJB{S4YA`18T(p4g{bKQ151 z_KlvPFMD=-FS>{MuF-n>vUT??&MVfiD@VK?<+p#o0pU~eh=MWd|3$01;o<)?1~l*D zx*pZ6WJSEZ=B^FwhP~xq^pw?dw?$7ia9iY z`Km%oDQh<-d(g;9PM9rG2|exZycwi>4SOmruF9bj-25av!jO8?dh+c&y2?k#S>jOg ztXPPEa~3eZRB&q78_N?VH_j_G&b@-<#cO&qt=D498A3VwLKrz32R(nQ={L7l(R(g^ zTNg|F3=mzaZ-pGOKR-424d?LQRTX!4^tNS1{&Tpz$!Vum+b9}Hcs5(;1j~kui`cn2z_*x!$R;)$(3KT^Hhz&vj@Ufd_5>26x7woQ<)Ya8kGtcb}i!%e>BQTl76j zQ8fxRR>2C(xsm;dub{cgG}(m=Ru9#KE^+Tebb=Wgt#pFG00!`1Pxk*3hV`21L`pMP zulZSVUb?4R_F0T?6%*^W{w~QJKPP)k`zHUx7~OFQ)ER$r{;ZAiFi{) zsX-fsjA&Wk&M2w86?P3X7tP;~P_dEFVOy1OcZ`)V_Y>)EH`A7r#MMdlS+Wz@JZnHu8*iX!wc=TJ3nq!RXxUxJr=*WP}b^Gg&-OASFEv*`c43X}I9 zQklVLYfyetaq@5ytAK{V5a!6u!IV-|$57=rKZB{@zG-MDSLYDitV1W3>$k7bfI4_a z6FRUw45yp4*W_TWunqRwS3hfhUi51o{qxP7498dQ>RYRoZsPO6^GDyVeYy}ZJBpu1 zczk^%xrD(+^h$z9i@<7_=Nhc=C5o4n6FZ^SQvYB0x>hr-t{PkI^t*nwD$Bw1520LO#I6ssNYB&n zLLM0pV>1G;TR(l72!!O2tbpkM0}$t){n6;1tVsM9d+UZ=ffa7WetqZvgsuM2@VsOl zLdK5AZd{<}@EL`(YD}w?)>e-A`Fze?v>*yo3E;quKk`W;=f~P~03vvmk>|bkWBhC4uTfJUh}<%8cbAia z7Y}F_@E%_boQ&Ynw-Vh%6w<+|dlH2Ew7;=C8B%3yze!NC|U6i3>dfuo5 z)h9)qdzr$jzmv;4x_XKGDC(y2x4(6CcaKz8pFQ#6pILCiFJcn^O6@dWdiuL8h^3c5oIvHkDq$l?m6b?_X{2HuGIe9M6nD0id+LQWk#V; zA1zROsm)K=fkW~0ZoVB3Snj{-yY-FmMLSz}6c6FdI4X;*jPJ`Kh|~-IB5X>(M68X5aU@6;WQ1)*-r&FlBzG;<00qIUTDAI4H}B=aZMv_hP*_W=8mgv6W>|C|%3;FeEmCNTl-eh^^0I02&rY==A{M1I5E6u0Y zfG6e-B1SGJG5s>)l?DyMf+z6D;7P&}bD#XNlTM}xGU;ZD1L|K&;vlrhvQ)oP1>}~d zA!VhmS22#8|Km<~H$!?OwyiBE_6m4ab~M3D2l+(}IqZP%Jk0CBkY-Miur zV=Fb%3%~0u$Wxn!Fj*z2#)hX9S@IQ8ZN#xB!0p zEz-FLyM$ivU$Vw*ibjD$!G5$*yW;1lqc$BTtP45eN0k37@MmvzA9Sl_Wpv#$P~)5v zT<6`jF$IltINkzTw%MzJD( z#vIRC0kmAEGW_frr7Y1EpT3W?74aogXQlRm&RWNiap`WYx(sN!c<*Y=(Xe7x=dE8O z>RudePv*-7pgx?hoQ7hQ-Uu1$5BtY*8g8Ke|A#fSwD8ZrUG>kw~5{H(JzHrwP$+N~y9@@sAgEZA^Uxn~*2mTD&k5ZCsl98!I zwtKO=%}uywB3J6{h*dG>;0@1za&IIItDc}Iy-P(8GJ9(9vV^GN0v3V@*j_b5 z)$c+1sx`c-!T>FYd z(s}pNdkAvQHUxck&X8=ySw=FzlJgrZ^TMwW{dfc4NQLdLTt#X#+8ICV$59jTi46VSJHxC-*u&Jq(}{T{LX# zw>q63%-&MIPM)9eg$!B*m@mmCd6W87h2BK#J>uIY41^ zvfbQbBjM})+muiLXoDse^Ebrmfgd*rQ}py;grdhIv3;g&Z=+T?jXHlvA^-Z~gZHkP zp>SJi@W~187vPS}<@fQ1c78lS#Xswm2Al>xV8|lB9{Ainqym;s8(DlB=Dbiu7y3a~ z^k^3y(0OEbOjnLJ>XOb$tXq?bavoC`j-xHge`m5TqE_>vY5w5s!F(;MiQ=}SKdYoM-nu2jY(E3dqgpxrg+&IVs~1(Fy1N} z6;cSzxx-$xe7-8AwZb|n2HUvWhr!AJIs6 zi~k}HS=dtcB@8?NNH}v=)q0eeAjYSBFeIkPV*wET&4RAZ*5eY`ksqpJ$El9OCf1qW z7GXE4Lq6sv{Dp$_f249~@Ye}(f(a2R8=-l_G!~{(?e=r-Z1mLS2ByO%@Ld;2Y_b2V z0VJny06iO1kPwJ(b1zVm%5g@%!!6G|L@fWx6Y@5g&$r&WHU8{hB9Rp#dNEOeP`hkr z{l2&QG0Gz886Yx)*E>M<;@npe;i+1 zB?QZi5o%aKUq;T3hwU_2)g$ce0rtT5Amy%LkhEBkqi;#*JCPvLWm2&R}c04>5+>!byeIA{4h8b#a^$xTfSvK zDgF~T+Qv!O`CSoqMwoE*eA{1RmsV?u7@L9IXxCGj>)`DD*flN3h2<<@4pqprpi(J{ zEc0=PR?kg--al(U$Ku_sNk@2^zmg>ukSdV|+Er1=bboxDzl`$D_@v02mfc1cz=)cb z@le<1c%tHP^6j?Xu}m{<;Nx@-yF5sq@skcL-m7u&0sFpRzxB0((CT*EozFORF-|71 z$n+w+493TdBNG>_Hj-=T)JsW`*az0!tyV3amTu}5v9o!$H|-mNj&`lEb5H+L3vA(5 z{>42eB~NahkD9tDdx%=nMZfk{APd~G36t-*I9}b6c-C_{5a=>4#>FkR4{iJ*43+A4FebI)(|rrFB$lUOh#B>y>Z1&h0p zi-HgzyR`_^PX-DF8O2B2uCYYL@qYlt{<91AU9Y1*iIn~L_>hj4fz9vUoFmpv>mcs> zUA^^!Dq;KdED=VLBpG~12Qnt6odnI}KRo#5U)Ff#G~ad+D>8+=c%rqRISi(Uj2t34 zqtOQ7D=qmw{jA8--8C$GRwS0DUZtcbDrbJSrpJG6wLP?-_`TZKL)VVz=VN!NE09lx@5`O=BH9Ty!z+rTctkJChC+hga4d z;ZkI%<43;kU4MpbOaSOQ>!#JRsSnCB;NB)^jjY&O0a%3%3)HHA&6Np|aB&7e_>X+n zRQX3Z;11G#a=gd05@uF4KoT{p9=%CQ(1=&>uZA-@#;7l-dN7irST@_jc`AlkTWCFO?Hqk{wTh0w(h&C(;erMaWui$$xojm(}8iu*=?IDv7d(@O{exGyMvB3kC zu%K}+H@@cqs()&b@t>{U@W^P60A%ytp=aJV3D@S-P2u$d;%w@X+&Kz6pbBQxXIcA}!w(2(z z8a9V-?rH(XaCr9QP-*-%7<_~&o+;%?M}fTc{@*>VcOpfxfjAhDZQKjl5+Qrhu_Qk4 zdgH$z-~95sfF}JczyG`5Xt3`Z0IKckRoEErT5-AwVAh7|+C1h!r*BzVrxt>rS0f7( z-Og(pH#HnOf2O}S^TtNHV?*$bJWuM$<5tTyko~7VtI@ZywGsSZapN}Mh;Y0vzG}+# zDHBKqoUNETNseLtSAww**PiF5%s8Mn6J3tK?F&x{RPwU8{l)|B0xn;+i>kl=2x{=p z?>O5%w92#N&zr;Fc^ayw6uS-cu_Y|S!u_2+1J=S+w=BLjm1myyj}2H4yv(?k$ll}V zUF^FAekAcoq`*Tn0!Yf1`rp}*Jd?+JH{kqvHiomdcZxL-7%q3o7K@0uggTP{ZQ(-w zArmJDUR}m2_r5HVwv|&K;0=QwYbMu|rC&(mAptP1$ZPPX)+f^%;8PL#f!!RfH(4<{$HJ>t^%ATP{|dYsSxlS`V=x}5rs$J zg?S7c{A!p(+Hefnnp*yn^lcRWyky3n5*fHzDAtefIv|DD!E-zaHuK<;7tg@gmJWoe zK{)7MX$USc(CU_5Ap)wxO0a90YpAMP@qoY#3j>bh-ONq>ff-s7l?p!T^5-EZl_-4; z{D!pQ&)g^*UoieL0H~ir#J|xDTjZ{Ru_ZhgPy`hbbZ{5x9%*g)N>Y zksEN(VPLdJTy70<-){T-EplLKJwzIR2D|v!xgi|H_65%eM3AIP$=$2R9qb4sNUg<@ z27k94tL78?IO~<^^{h1a`@RhiHOK*daIscWMB+Pm0YqNf4r?|wzaLHl{f-L0>MD&a zQDeG0Wp%F$i6x4O`TJ#{_v@9dA=dGuf_RVoP3%>hku-XiR{{@fNatN}k3Ys}tP?=C zKLx4h$r1^`qT}d*=VMWL*|Q)-iRJxi-fLs;`JbO0;l-MOePjTt{^sIs?+7c=4%D9z zc<1elyY@L642S}l+ddC)B}aYot<-ZESSDJ>&o_EQr435~Xeg_rNufm5)nIJm7lDi?nnj3-#C=kT4c9(DQ zJtHd`)k~@1T-NJ|idtufY~L76Uo+lgUzCBC*jlFXa27Q{fvQU;J|VjWnU-~LGhH8*D;kzNh-o~6M?9#%IEbj zl&qyeJ~@zNjOP8_ zT@yxnGj(va8Lajqlzl+SM5)gLqmFLvMdQ)PVIRXB-Y@pW28IIvLlh|e?(^$n6;JHV zX5aKM&LR=mIT46%gqu1RW1Q^FGx(nIag?9lN^56CyS==$k6zb+Y>%V^xYoBnt9K)? zzy{Snq`ztF)$n%yGYzaKRxQVGK7S6K4xo!G<} z>e`(6c(yn>?}B{{W>G=#H^V1?Wq9e{VjcG+k_0W;Pn0;&F`Y@UlKO+2e8@`(YJ5}@ zSz;aagvWXv#Cd8Gm)Wt_J#0;BePKT%T6Ck%^r zh({En>_BZYW8JX#Zx0>Zff_X*@ExbVjQvqGNdSugxdfWQMv*;)>+Ylamq%zMndjO5>C5jyI2CL*>lu!=oLyhE~$laqq zF}y5hIym0MZrESazN~HV@PzS#Yu2(x{^DiP|4lQ4jvuZO*yx2c!0k&;8IeCter32F z%ca2WBH7(%(m*~lT5e#4OcZ6z!#9QC|%LiY~M=_Q>X~)# z5BJ5v{+Qy8`4ubNR6#(HfE5n*x!8&U8zXO6z0oMp8l)t>%>(?igcx>7d)57&d`!q> zB9vtiAY?-YRo%v?{GmyK)&|B#Z0yzV_DE+-qDZ{yW$ zpRv<4Zs9GZDd}(zyN9cabsQFAAf0+xt5L^eU4;SBaf6dkkn+5ZxcRU$>hyy*0hE1U z=>#2pizWAMFl@ycEsu49#s|F6&SjkDX$U^~9HwLnyzkObD%F!Hi+W(Y5n7yVbU08} z%ICTg8E;)vnL$co+h<9eL5>Kz}P1L9#LR!v@rVq6~;(@;8iP1^Di)yb!0H5@~84*RMGNkbNv?o!o&8xQ=nI z5Y0Z**#}f;9ugef`A!h3V;p%_RAU4^Kzq}X7z+|`DoH&aCmiMhi@$*nbG!T6SwjD{ zlo`}0=`5}7uI(0$M<$1Nj7}yOXOfwuzL7rh^ZZMLJZbyirCKp}ha`Ru>>c>SIno$pm)WB3#GCXUe9lhaF5d-pni5G z;dTrT^o5>}yLg4XLpAiQeGVODtOgX^w2vaGWj~^PUauFgMd>`9Kk&L&P%?u+U} z5Tir4l7C|Y{G61r0Rda+s{zH+#ucl62=h9jni4#$Qs1;efB=mDGk?#;vmR-DlXD4(P~S!)7fDkpGg7(+2K*8>8!bT z5Z*8FLpc;vX8`mC1iW=$wo{Ss1~Wo&9Idn0&pvtCpf0|CBwY|OlP)|P=)C;W@m2zD zm?TIXEQ%iql9_a5pqbpG7ttfaK1w%70cG+0K+n(t&{m=iS(hjG^*E95Fbp(ubMb~! zP-%YKPzwjiN;j`wu0MMHn-!t!#ivEZWTR zfX~;#8HesR7JO24RI{_EZ05OaTM!MsGq&$x?muf7LY>fzYP7i#U|sr=9fK;jL@5D! zBGt@)6@(Dvb;g9hEGsVhE^Dg1mwZ49YjYdCwVI5fQGc3rvms@?VbBF?a>wnXeoOA8sXYI248W-jCK42IgaWFydi6O?)-3Cwy~itRH1Sk@ z_ghPq`UE>lKzbofl01kQ11|Bb#k9}p4bBO^m<;H%8qGX<+{6ElcKH!Tzdb##l3rEa zkONLXbb;Ra$9KSibOWtJ&3~O`Nr4(0!!0`iA6wj!z+CnNccvhU*=&|3^@xLBT0KlVAe}ivWZR?n+$)HepPA2blvz z$(Ow?%j8_8P=yeubnmqkl*#GQV z@+fF@PV5^z4Lw(vTl*oGs<9N|vZ=wL zqTfAyeEsf>3>l0IK>$EY(q`~Qb%+87AmM|^i82)iRxp~9_82kn0FT2w$}8Izd8iun zfN12!3i=-heH{FE9O&d!xAK2d$0GabMvI(&^p%7DTnl4~rFB&QV!iOsKik>4hy+44 zx5GhGlUl3o0@!mRJ>}Q;ZQ|Xzto1RVnE^4w_({3tN@3ff(m2;4CE5mn0@O zmE0^G5D}2iWc}ei11qS%8Vn}Mf6ddnm$fteAD&5c2lGO*mlhlZGgIlCoE{u z<2!md%-L^ty)N%1I05_e36JEJF%CpC0#Z8If(sexD+lNW@mQH}=A#C}T|c%^Fm(eK z^(k%RPN6eErSSH9$KG$`Umj9uVHCi@KPI0H4MrITQsq59J9Pu)Z`l+08zyKi4~3q{B<~dUEY5(x8i6wD)SMeNu0OEi#%xrsvi(e z!KMNdk&kzIH9ka-G~EFVPN#>C>FMm|lj2tWN%W8)<_H|M5yOv2#`q54NFy?NrWd*T ztjHJu2H^+tmk3QQM_#JQg(&(2%Uo+eVrdPmg?>;P%TvGC zeCUb^eS8_p(v?{&T8~BpgZhY)PL!%p{v!%C^6|)OPPc=a+};5T2;>Wqsj(0dwgfKp z$tOPbE&_0SvY@|!6cxQ>{qR9|J*c}dPffPTNZB~yl9-)bX05)(o=&eMF=ZieJ1RrA z$LJD^{=ck^9QEGJYmW-lyZ=Cv(_Ws6vQq(fwXOmzztgSCsAa>xMj7n5BjbmD@j#GU zM#Vyu242&hS3nB5&u{HD_(cL2!xlYg-pc>3;|GN_+JYZ+0-~@r|LIy^54i(vR`br) z_bQPfJt>ceOV%yac?p_H!=@G2!^x9~zb{grxmj{Xq2cp)zG^ntmGL=Crv!3idZd60 z2`%V~ZwCCV#NIF|RSKQWtVGGTRzDRUbbEqDE8Z;uefp;NkOq>@z?KbQ}kAU zj`yxk5Jxxj5Der^J6xe41%ECY6ZiuPYP(e#V2a#tQnj;uWv_K>YMnWaoEPsbE zKwyNFcTtaBm8A7GUF z<1&hGEJkAd+mRl{z2|#_b|sYW9zl?u^NX|pa4;ta0-zN4Gz9qd%UgB*Ue=32O7O04 z!~5T_95w)aAzKp3VH54nVSNBf&(|R9?~EJ6c9%rk{jh%KCPatvmG5PH8s4jVvIa58 zmI43>_b(ixOa=OJHR$mF4Z0^DT9P^V znL&{V9_RpP@MyKFF8Cp(boCY!3U-Jw@aF&vaj>Cd=%P>at3|dr%LF~t&+u-|;qL+H z!YB_K1BZ|=mRP@B1aQg4Q7>C6I5pBV^dl%sq84cZdhi2VY-H%7NC6m!+BmCq@bNOL z*0S%)@!P=ln&OkGWj_St!)--3fH?di2EdO1M!)9^w!d1NNvrgqfv=y?46?>DtHA(l zvm6-gXqW*p&pM+!JlstB>PKea>)X$CzSV7yBL;^GbxqV?a)o3_Ibz^K0^h5mPPtZ^ zFQ`WEhX@x)4B_u|so3zu78CT}0y%)cxMK#MM*@%|NJcs4P6!*_P=A;A-zoL&Pgng@ z;dt^1gO%laUFZxO*`iT?NwTF!^^%YbBC`cek=9ZF3ru| z&xWaqBV$N+w&o(V8Lvxh{78)dKL4#JQAmr@y!z@G|GRD?{3;Hz-H4<8L^AI}K}x&b zFMm*Gr>3<2iaaU8r&`swgY}pY$0NfO!C*%P?B5kUpo--ITM(AZf2PqC^A^?j{Q8h< zobDE_EZKF4FCp>*A3DW78Bh>uQn;}^GP+<;;cdU&yT9+Srt%+-Lb*nGzyh)dYa~Wb z8kLRZ0V5b(BLGq0nzt-9kQp0N!kboW=g}UFdPC?_bQg2k6%N*7djSt;e_o^Tg<^B$ZET{EtS;4RsG_mfCePvWMW1k#x<6u=422!YM=qiA;uQH(#MV$u z*X!QB4ph4neI5@T?Tl5~tXVPo>&*%cH6O|U&k-u`e1HDrxB z{gSg~1BQ*Y{=cO%JqGlBD*|){*jHHhe`N$e-+%k4lR5>5@F&{$^zwBW$nw}MWmBB- ztk|t}A=?SFl!uIWTmyD#F&s4M@Rz)@fabhY$@7!BK)+d8*Gx7uSqb=Odt9f$22x5o z?@Sy0wCnQgM>nMu<00XmOQ%NEI(SUC1{AEnRDy&vO(+|F!^458lYq}Cq)Ahv0jaMN zz<3XYr`z6ir-G#&b{}USF&@)n2wA-YB~lW}(tW^P4Ppbs@DdpCrq@9>kMm8a33MU2 z-s$C4tM3+tJObUh#cNLr9ABQ_*9&y_izC$EwbJ)Q-u`LZHaHKdp{&9u^@FfwR~k_) z&1G0j+@pCdu;C-jLxzzeg`i=>6#aBP{e1v9XIhUIug?duCVUk=tz&7r||qP>EE zlz;S3C}gNk?1{KAaiJr`GnzV`8oi{~lnsa2|D6N?xiznXM+iQx2STI?SwQLJZi&d5_CCoeHVM!Tq_8@yHY(cX5{~I_p`cVuch# zr(@W~zL&%IM7cK3(^4*^VmADqwHk}4qhDSlpH1V5uo)oc#)k1 z5Io0)(}Hqe-?AhgK8KS0guinfFq%Y?g$=r`4ZL@iQt#OQ)bY;<32hS9bgLks$FL@9 zyN=#yx;6ykD*-I%oPanuH=rzDcQ;~x;|K3UN#i@NXKu$jZvO8%qHCKE8_Gx!XDrYX zMPeXvI@LUY?BGp6cTB?SWEDp>wYq~qki9c({`-q(1#?&$=$HY>Bosy$2mhG!K=a?r zGTEJTQi$q5!q_J^;ByH#iVmz*JYcR!n~JjO1+_Uq&4p{FGZ-4_$|H@*mpy}8$=ujFo{VmP@uFV*A2Pzfy4{*D`In5 z8ng%-mB<$JDBX!fF_#4(&Y~txO$L;v@+g%HT^13f<$1E})020T8aQe);Y)vahwo44 zIPuz(?5yTIM%y%bzL-lY|JkMZIY(@~D=|1Bxr3KHR2%Nkq{{ zfYFcp?`}y=Uw2GkIj$O@A~~zRz7GQ&IT37aJcw=Lj1M2s+{K3Cwo`IK-rUn4tM;3FZ@_?Z6wyEzE069YR{ltC4uXNR60(4gj z6!)NW618XqHTrDxet_FBBnu9KlJdhap>HgILc))zaEO2jkx1eP zlnrGNIm!98$IVf8u5o401&(1O=Ls~q=b0)mxvpjnL~8-sbw{e0UDGY#c7Lm~)kwYrsTi)V+#^kf;D_Q~6JUPbDD&UB@|xe7LyG{d z6;qgcW7iES;a2a3`ZOg4k~Q*eswt_P)Z5fs*qXQc&g#4L;&HP1klmp>x$k=<9ViUJ z{dwdx6-GtEi!J+p9v48tdQRE5*RGW}K&@;$}vXG&T5w4fRyXgBWv&f9LopD0=(LleG zcn4Po@rtKOX#oQ9_xPpiMI3y4`f`Jh4kNpOx=Ml#g$1_;o&7VAuQxRIFFoV?OZ&(zX@eJcq7ZRm<)3O8W#p-ba=aP?z;I0YfPzLJ5LDgATc7uxb)A_{z`U`f|lJDZ% zHobFXwYh!^ppnA-Fo=`QD4C0R-M=_0a2~u(xM7X<{F&g0kf0S608Kr->oCdwQ zVLq7fHqjLyX(3V%Km(aUkHqa1e;Q#+rG?^z8iu7l5~))(plAr6Vy6W;4$H~|-;*hH zu`38<1(Dy9H^`Lv?2z=|u?Cdk1&N-~v??5s#dgyA^o=fw&z-7If+P8}TrT%J4rD}v zR;xv5eerF|!T6@N-+aV59=b#tiY9 zS#gx$V-5(sfNaDtL#f@3u#g6@g!)Dm2DI0M?RfDV;Dg$xs3bx*kbhATx5;i%q!kZq@#yvPc`+J`Bz45N=GC z&6pV!oTudBNh`<$9xNCOvv}s_#%2TF zMf40xx-L&CMQE=Xz25YKZNgX?6oRwmU{iZVrsBc3F&RpIYr^#W>;BC__hzZ$e-!eP zGVc3I2dCHezB{wGs=R*TINQES{Nr3zZf)-4=ZccJO6tJh(Ly_z9H4kaEjHoqS3Cx-oQQ?%x3Q#=<4mK`?b_`dxo!CkxK z`C-FGl#7AqR;a^K`oOU`r1Fj4PQG6T)vQcpGTQ;Q3b!$ryN_pw2S>yRX{KQUe;>ubEiZWeTr;|JH`m5X&cp7 z-UXaQwJB<6h^)P3_1aH78*Tq(E$ZiKyzWzK3QU0!eroHr45Q+0=cH(rXQAN)3HfGS%%k@ep zy4jkFTLkEE0^fiT3*Ro~kS7ivlDze~z!40;_?pvNus@DN&)m6ATR>(L31_^p&?MjJ z69B&|^(CnJ5N02IC2=U?pftlBp3u?qSkNi&Xi+X1t*v>mC?aWk-V}|&Gu6s=36!e2 zDl!Xy0I&VM0ruShzrQhpF>qnwGVV)lUkQkEOhn1Nx9`JH!A1d3jfkhix4q=YFmMO$ zJcgIzFW)@pufTa|MjYa-mb5R0!#hm-w&F(Fjb3=4ylRrqBcW4puU308IJ4{*^LO1n z8BMsnYWMFsQ3Vo{XChQ{l3?`-LVHCn%Y%S5*HREOXxOm=e+N^n_5$ba-tFI4q<H z1ML#&c*Bd*YEBM$gl&rBG+x?|_4lWv5M7^HG#XqKIr^Wt6@^B>=dUaDL3L`~d-YsD z;qb6jwGH64B^bS*M3i30?ANBtqkEP`&(Z7J?NMI2-PW$|r^jrkYZyHJakjz6jPYcY zPm1s>;T%OxD+cL`>DS`nqu#2nFdaPcbq9~xPUFiK)sBR^=va!*&z`H3NGJH=Q!Af` zUe*6nj@fQp&bW+mfV#8|Mt{dY_cR-Kxm-QtXb~ly>WxG64~yXr-QMq{ZxWBFAyGZ? zS5F)A`KfHCtZ(xLtZ?ebSz@>HII5bksw%y*mldmDndb$@<+VjyG%;SU>GDRu<4hCe zeC93cJJ;q{pGt%PEHJ&$@_TpUop}4J_FMl172^-%U$-DvcM-9(EjNZH+*S04vFl>k zW)1F^!RtStL52$MD=9&|DLvUlzigF%a#kVncY&F%ihZq?V5YtwXc&?yM$j!R8HOIgc+yP!GhT)(Xw5QjqOiQlec%4fPgOfOC2 z5b$DD;K`A#Z?@S>mnui3{8K!M4BKA!So*nlSZG#ihD`> zPPkh5Eg#|n25(dymHYs_9o;9}3zny5cp#_SM99Eh%TZ0P2xsGU=Ld1LZa)*>a{su$ z$5YbAOh?_Z*OqC>ZA&DV@6&Y-J%RI+%g@Ulbkse^rjp^_H`=rmtRN`I9rE`lg|C%9 zIoMqP-sWpskNT1}{3PB{`$(Ye`el)aJ7!ad#23KrBwV?i>uF0?0X!bDL7E4S|gm$BX1u~R*m(gU;0=3kphTR1ss3#~H{m>by-cuokL5PNSwIbvtk80TEz z6KrSL5q-GtLtJmW<{KgXzMw7ni!(|k@uTmcgYajgVn6YDIJNXhbqM~>-OnSGu zsaACm`Zlc1V&R;2CwK^j1(q+NU}rJ)<<&(>Xl;Tm@kYu{Bj*QVMfiGwMv080FYS>U z_K_^}LFpCr+nPg7Huu*H>ly8L>7ERjRzE-Tu@8lx>X9J{Jx1Z5Usi&Ge(a@yS--yo z2Y#mSJxr5d?|C5-remj8`Vb*U9yVS6zFMS(rmjCGjULSYSRUMiUS4EE9#YW1cD}Ff zd~*WLe}(8MBF=1+GdHiTd$r`j_7UL=^-S58`zgnar$PJ=e!;9u<>0o4j+k>4I9VPyu5wpt`vlyXO>$uTjn*F{ zFVOZ{GeDI!mi8=FJp>%Lrkh(@DmuAiBV&tDYJ1DP`pi&iEI6~eZ>n3s?u$FH2D&Mh>_x%(k+X?HH+k ze@TzJ#Sz$*bifIY_>Q^{DcoSICf<^S-$VG`jR`2k$>mb&ntgtA5#3)Rf=XwA%b)HZ zNRM-*!<_rpq}xlh)H$CPe1y-~@asxPW6=4PJJ?h|&ntoJP6NKrqe#a zFL`vtoYg0O?YbdFjZlraW}z@y4iD(u-}HY-%Rj<^7WO$}x$#9*&1sY`h}KQa>d`H4%32PCdE~;#TJ-*7vYfkbGtA8+& zI}1Oah1C}VoTC343`a! zG$W0Aj0QvNB01Y{4%2&Wj-e!trmrNbvALSlg)}9(by~ed*h8gP3XuxHEkuu-7N=E9 z_JxHso#@coA zxi`p%UX~DFIuNKvYL&Z}lul7kJ_|MX9sXpSv@@6V?!r{x*E#fi4~}jYSMZG>#&>Df z)thMX9NrWmO$41Y8(70;QG7N+=x}-FUfdf>ixt=vUb(%K#kcmZQIy;ztTnWrS}86+ zw3CM(J2p4~GZ-W`N=BO<7cbDdPX4l7*L=X#aP1EDrP@rmZZ^kDvXQ#Ny9Y;2KK155 ztgyh|Vge?ZarhzS&8^Wa^NG0d^-HRPK?mK;pN39OwlwlI89mWSLT67>sD{BfiT_mW zTi9W`J~3m%$wlrH*2Y@>c9L(;p8s~SoCYO)yYB4IP$k@2JHR}N6gg+foV(Sb%e)QI z0me1)Bc}&rU(MWr!dAVu9JMkI2hWwn46$jnmnef62M;!-hsS#OyN3>?FDWs?#mD5O zyf&`L?QJ`*EY1bgSn+fV1(ms0v;?X7^Petd+CVC=%M2!E)$&g29XV~Sr{5vGA1C>i zqcQbL;x=8b(RO{{9Fu`&gZ-{=sCoY=s_#;@3>4P^pK{8>rAM>0b6Rd`Ocs|vKiJe+ zHrc^D(jU!ocX+;X4e{#au7k#V`!|mbIE)Tr6oJMwaLWr*9bK*NN7T$zrF2ixx9O{DX8@z%;T(&zWWK6&Nc<)9?+_ItTEdLrlB}T2Q z{6^PC;Uf-nVA1BuB}jx;=XA6cv>eNE@7JGe+AT$Z7*rtkigyC7*|nQ0;k@3{bC)`6 z;&aZ4B_zQ(Z!;_x_Zn*f7UT#VQwb^?p*4K1`X7%4=ElZZj`kgY8g}CDX_<=EvQMwE z&gALK;bA_3aZO#xQ7gWlWy4zBJ4$cl$8^26E zY}+8mI?snowL0~u4xxa*D@``5u&}THcK#l4{3W(>UspzlC@NRfJrPG|0$o7IwqT(S z+jqNn0U6AVRZk<9rR^t8$yWXGm}GqMIK#v4bvpI8JLne(MMT#tGjhdPZ65NMV#QYy zwI7P3_BV3_53wr9M2)VTtXA}`Y&aqnDoq4nBVWwH?7o5KWbw5xq|nr%xxBa5F5Eji zMmPNJyypbRXXA^QN1_)7%+A1>rS1bQQK?F01mEAr4AiGlq?`N6INu-Eu=!YU`1-{( zk@Z@A=myU77IKkee1-319s&F4_zWsx3I6adJHvw$3}o+eFV;SVD*PPyG>S!MQ)Cut zTfZ*O>Gz*WfGkY?8uE%a5^(}s9Gdn^@m`L&jB4D(yV%M8(}W7KFlnYv{e5?kkl9m< zN*ar@xC^)3sNzHT0O?eLoJ{DNn(dSeE&%Ek#bB%Bdw;6R&(X>Nn|X7~T#k0;nIG+W zU6sStPPv`oEjtd^Plm)kI)9JZ*7!riah~KuYF+iNcCpRbN=|h$25aHXpQp2M@?-)5 zv7cOUtj?bhQP{D{JzLp*Ta9RJA!d~4nV8|Z1G^|?q5C9D0Z7Q< z3nvd;@l8b#-zCLcXlQ2ax<6m;J-iTY2XG&rt$q&97JJti$i7|BK5KG_{!YJ4J}o3A zZ0<6jcdd5rA;rVH72xxCN_3q$pW@rat4nvpq?NPxksmj=#W=H`Bl4hgV+?_M^%?iE zH|H0mfMpAq_8qH&`Ln*n@fE=m8g#p1@Mj3^2PmA$JKA!g@?o)0fy?X3cYtT3+KN=G zn-#{Z+p$rR>xL?JUst|n&%oF_lnP&2KQ~QGki$f740zi3d5jx?yy2L%!x=Qh7OZbKFN|uhQXjtmh_V;=<|EtZxV^788|wz$ul+E3QausIm$-Nh zt3AncYHY6jwC0ocXsJ6$0dAmEJS`Dh4|Jk&(_|c-whyoM5S&mT&c^ZPFpyO-;xhf= zWQZ7IM~u*t-W10+@so1{Gq#?*h7knyVv*_h!S_0fy4S}lDcSo8l^3K}f{hgBY&{=Fp>bR_*x=qdk^Id4;OSgT5E}n4y zL6jU(^?9r#LnS1EQJaWBM_gotKZ%lHGq7}-?*b51_(Amt^YZ|NbmKA3&aZmeo)M() zzs$D`TBL*Dg0!~QtL3p|^S=4K*x^u-))>kAD;?icX~qT*Ia~>@o{E}3Z~Fve7zU>R zY$#rrZ;$Nz(lv?{*ZF8Z_XI8nd{J;-c=#H>MAeNX{{=tmeU&KlFG(}4zT8-KA&E14 zXdi_oW%rzPeP$(wb$zgZg7c&Rin4Mz?crU2HVr|<$HMs}$|c7@ts{1PL=@UsN8a?K z3G!W2(!tb+&;`=&ym2ib*eqq2l{bSRvMc?XkETF5O{eYG=rzpG$9W{|eP0Qx`T6r<_M~ZS= zoBLFpngJWg9LK4XwRfLCBmb=#M$r-Mu;N>-GE*G5DHQ; zJY&$s4Wt;zVez#^o0BelCtufmS92$8Hhd81p z&JusHv~s{tSoo!ck|)$DC*w60=MDfoilpt?db;yDl@F*KOOZ*`mBh_WVj2N&s)ww+ zqdc}}6W`EI^hhha)%s~gjuGX~dU^N^vD&l2U`ZT*?X+0U+5c;oC@*kdxEvOb<~=1VU)P0$=X&=oZiw^Ob!Rq?qq6T)~tMJcZ`whzb$BL1GzC=bA922y6ufL;!)D zNsF>>J7~HPQLb`O;3zbKcD&6ql)lpnpAEP9bTeR(@ztR+Q^Na|W&s6sg0&R(1T~P_ zP-rHG!~Q+LN@;;+ZT5$Q3$Bj{H}82j5#jv9m#fXcr$@02;a0bDSIn7E&uZRRM>{KvKZ3p1Arnj96IPVVK8xbv()a<4}~D939Ht zVwHW0L)`>9HmI}`oGUP7&n7rMlIIgboxFqc{X%C{V2rbqo;=Uc-Zd+Pzxvrx<`Rlu z0I(5dH|uuZ7H>}HF=HdISIQBRKE0zGqL~FR4mS}cf3BYL+LY$I%sA4wb?IO^duUq9 z<{VrE5Hmk@J_GmK#dMwg4vgriqoZ}8WvINIBBfa%T_I6&AF&6Wi@o5my_(`NIY}~7 z)iHao<2OCI!`r+_A1aj}0eCCAvXUZX@ zrj^QYEJwBCC5yyk*H^$7TlhgRVZKhje}81F43FH)W$WsDr@UAYxYCMiPI!!EJfdrc z7iy}s9O>nj- zJ$@cb37u_({K2)0$Hs>(XbS3Cjpw<(SUp06j~4APU5r*BtOe$;$nl3?jF;2n9BE=q zI$Yx3ELgdCET98~@Mz)S(#D0jGTT1yY6tgu(gORkHNXoL%8ROQzj%gcHDQ2fxGc3b z5@zcTKwm+#aq3Q^V>IKJZ<)xdtxmO9%V~Zzr5v9+_M~9l5myN>>(_w*wL+3BC9)DN z+Q(K+y#@ojOK{?tWSK4W$D!xQN_o8fF@71sT0Nj}P1C3?eHYKN8GCfA>xqwj5!3xl z$pHU)+jJ@vg!HrRuCBYfbe(e>$hjG1OTw#-grsi~g+H`-tKUNb-6V77Owepv4aR)f z-^teC>mg|2-!88URfwP13={&)BVbWS>Hid8MOO zf|hZABTXsdGen3Oa#*+eQlvi!Ji;I_b3|~#AI3cN(NY!MB#eX z3lpTkTd?>H;oUj!a*)nXE;0WD0&}}apPA)AWW5D3L0ZD=`#QisqiNisn@elH)27cY zXVadtDlOl!F7(bsrhDNrOvbLNbB6smEF*MP5jgK#Xu&Tq4aDV|_~>O{whJO<&yFxi z0j{VXM6JQNuZE!sV?)h|$1ca5JQwcmq&s-w>TKy_;&?aD25B7>TwXkBg0#8^AiSFz z!M?(!K2rF>2;FdAkp=7>j8#^`>mUE2$ImU=EeOv)&iuR-dNJ2V`n|a|q9h~nF<$5> z3Sct!ILlVTTBq-lGx8MU9_4ir+3m__HiEX($XC5EZKuhW zoADPdmf1fa9YUWSCPYpqALUtFmkf0}`tE54R|et7$W~2)j)TkwdH=fs)Z@iRO}T5L zYFvb$|KwgK?s8o)wI!+!H+xfXM}4x!UoWyYO4h&|(9(i$lA0;oXB?a7HznZeQT!U! zqO%_jzoXQiRlIt6X>NkNvQD|1S;dVIA~=^~4=^=t5zNZBDT&6kjo>zMynLfF7Q@M@ zf}Ak6w)63PuH11g;ohsl<(V1nG;TZRlf?6HF1KQz5SVAMbvcvCLJ=xM?w7zJrio(G z7F<-3hUxSu_jMQAwJgQrl=tY_32Vijy5M}_pchB%zFlaI7BK?!@^Pem$3-c=>O@HK zoH+>sDpd_(C*_?KZtJKaR1=3Go!T} z6!h)zDV&YsTxP!XB)^T5ARs~boSdy!Y@;PzIoG50UuS z59N@Kb^I@w_%y!qy}ss3&}+Rie2XbI%ffeYr~3!Lb3mA_;aGLWQrF0aWc>L_eqP06 zU};l5)|o1i>O?BH1Er)m&r!EMs_Pj4RZY~O&Ab(_5$>J%CE~6UNYA{y|+?)T260R;K6MD_;x{^U#-=P&mb)MW*5*j?jwrkumZ9fK2<9l%nymblI z#qcI#dd^Rtz&>%~fTgUA=lec7)qHOl`)SL)3fFQp>;Z6J`o=JgXzeE@4W0Y7qi)?o zac>{0^?MHh7xptMR^HF#;?LY8X{V*TWidRu{TlmJ z8&nZm&gnE62O?q4wV!-so*pUG#o~Jky688C;|s8u*Vp-=%1-5<%yo0ICJrf8o1J>> zJ%k%t<>T?AJp|n|(TwH@$Nfb=FWUms9O{zyv}A~&&9L#Gam^)Jn@;L$+aN+IvT6?2 z43Y)-EV9bY$9yY+!q#GRhq4KhE7an5ng$ZL^jHkfiu(PJ9R_(Y;m* zHYP?fW-JrZCVMNV#%=iQjTqSEfEL@m720Q^Zxl!sAT{@e-d6~k@v}Mp^RBZ$uvOaU zH5^PN-2QPoO35hN+@j2qrJ-kB1^c{PEU@&fy+O*Fk2W7um4(!}uRcuxUr2Hv7u#Wz zxWyTOXG7h*V}}$);QQwJ+Gzq_DRzmj4s>HnLa&4(OZlqU4PBeP=hUS(2P`8y;G#BE z+fICFSFBvJkUPF(@53SEWXs7KeJhI}ea~1K*aE^9!-i11fC`ty*JyopA`Er*4{Wk6 zD&(b9+H9=vZYhb#>?j_e9(GUzsRZWCs2lQSZR`wXbX-+SBS6GYROd3}H#{V>r6OX+>Wa#-67U zvC~N3_N~kZl)0e~zJ-ysQD5PG7WLkxsZsS}!ZtRIm1SkYO}p}jcD0l8n{g*prnIB& zvtXPbg~0hZ6q2olcX##VV9=H$n~T}PdiMs{kJ{Mb8YE)$^_ts2)F!a_wfUGd8>##z z5#COM*T>sPDTmfQ-{5JC2&i5>K~+^&rWJn5N#O?m6IH+ScnX^9D!ny8aCK{}{@!iQ z$-}#auhIKJocbq!S&o!j2^C8t2EM!DD6j9DXnucLkS})U+>~KqgOT0paq+^b`d(z* zZOgZ#ss}eXmVsCxonW0C^1sIIQ=ywQ^@Zf(mj+`TOu5LJdpQ#d2U} zpPNc+UP!XPwk<}L@{GWO3u|&jlaFU z(}*;-0#Q=w2ek=u-23N^CZ5MEgf}j=D>tX60WGNvY)k{T zSbE%5!&%MR-o*W|Z+RBJbO)W6g~62Qp?JJjV58j!{XNAYZbmjV$|y+G)05kaD$}zVzm3K4p@dAedhZ5H zl__d6NzHPGnfJSiWN09^$)Y5>y_T1C<^!Syr! zmgGe?GRU}Z?KGx2t{&$dz(xX1k#r2`y<1?|_H)|o{HE=|=&Mq5$4DIW5rGWQxS@iW zRLUN&+yzkJxTe@;PC2S)vD6H*@82@MlJ4Xbhh4Y3F@#ua-(G?NQUl(9;suoJXb?36s$#{xYiTSD`9FtV7jt|ePWh#=9k%5=Cubks z-&}+R?d?`;+Ky)m_~Yck%By30Zb;2=G~ZoOo|OSoe@>MvFr0DdnMVDljWG4$%-nE~ z_-pZ;QzbYj&3e#iGW5PWcXb8##^n$m#Nt0~rCoX#KvOEf=8D9p-;hwrYBR-k_H6AI z7N=shcV#qksCNSBixhjWmw0QeVY&}ea`cLlgv>C}t^bs(Hk!C73^uVz`QX`-!(Yp3 zsOhYOA{!eLz4C?ccrl+A4N3lD-CGURkE+dBQ4yqc^E$1JZ9r>lY1HF z6^-jlfWhBOu77qC(plShlJ#x&+PeWZ<}urh9nQs*cZd^|il;_bPu02>Q4wsG2gvod zN3rJJZVTDpZhL*fC`we+X>F^>bwzX^=vCYr(4)!$^}6vS0RQ{){Stoq?SzH5p%7e& zH$@J`idLBNB;nLW`wFxT%1ZsB1oof#5_wBX(x!M%?~TdDzOeB4H12(Qp5^5^T8s=K z_@)5Q(1|SfmW7du$)P$<2@XHF#|E!O(-WnMo+gPwQ6XN##I4x^_SMOvUu@GT_D(9C@t`h_8{?N6Y~a6&tl%kvnbzt66TuqMC|;UN3%#MK@)But z-*E_7F-<_P$iwaTv=MR}Gg0lbJ8Idv&C1JiwuB~)ovQ^~XqW^jKU3z~?dZ=;c-9_S zgb4^$g*xoWX*Z5kjlzN!s`$&-5+1c^SM0AHD;Z-pth(9bVE*;6S@&S(UXTJgOSfPU zpvf89#uGQ$2HqgYeLz7Y+B1mfpJjEb;ar-$(TgS(lCAr_?+LkUi$sv998?^DZl1ND zxE!PH71I-6k`CI>Q7<+JsdbOhYMt1FHPq_%q8&2V6Y8EbdrAzcy^0|I1aCJfJ)x(j z4ehIT&9WF}gS+i_aYnXjT1}WF5)YOv1Nz=}N!h5t7m+5K^-LTPql9~+A|fWxClBUs z6rlj??l{hoG{j!1?d#Bl?%q5;hx_p(onfprF zLJtIc;0wuHmDplqrEUqCiwL8MzO;%IFl*}8H`Jq!9Cu1?uHN z4C?ENcvcdhU6>Dp2`~fSiJq?hSi*gZaAk8VK)1sQ9%u_nC2-?R_li6Gz4wP{B|-rs zRf0%lo}W=dIxmZ$a*w{uH^V06*K|qECbS1+edi)lA-=QZ#j{pXtbl3_t2-iP2z8A{ z_(9b@94(u^gYmXo2B`7HD$nKI;6>~_Y6^{OW=j=xH@B>?m0nAW-a0--O;o;vUkfs= zfPI;ma0~BX46l6ZU~D={0D+z>f+3joi4|yGV}S0uO%tO|x0ifA8Y`90Yx}(K4Js3} zr4@N(m~}|~eD|v#DJxVumk+{B5`iOxJ5ABfIlPM}#nrb$$!_U^MP=?kc^EwIe5M?v zcllYzIWX{+!3_R&;N1|avp+xMkW5O<^lpSCS8b+p(bc5`6;Qkb=Ydne=FOBb!XCfD zgNWF2|4Y~%D?rnLpbHNKj_76W6jd{vTdaJ+&O*`1d$UOt6}str^4Y%VG%;y>6vQ;h zV;`X0ezj1dUQgUwmX~ILLaQ1)XkqJPt(jYMtL`xH$$+l*H|x;%x#E;nSUjp(xRl_w zO=NZ3_z)>_c&)&3Eq#>$y1XV87{Tg|FRt~no<_Y)HYjXBep~^_05s((?L6&IB$MK) z&BOorGa!+wkOoyPw3mimb z+P)svoL-_or??f(#&1(iUoEI_PnQ>jSU-X_yjA4 zZn3rTqTi}g{3zq7Tm#9PYsIY-!jcDgirU*^cDKFu`p&Fh1|BFut(5NNSG{kcA54*b zZ?hA3sGxPR-|Ev)DNUq5BTEQwig+#Oo{=&y<<`Aj;c_T+3QK#S)3FzfP<_@BtilWz zSBvV)WkQlc30De11^JNtN%wA8$5j#>V^crTLpUeWmLLvze*D8tOi$(g6$n{<3k_V2&)lT107H_+(v4YGK&~6@y|Kz23(tWbR1x$ICO^ThG zcOb75sC@6Y9s&gMfvzxe=$NtDv-XPjmI33YKM%q%IqjOpnS|Y{6;N%!rypfL30N5r zcq^ucS;y4NX13fZTn{Lw^c;-W6im**hoBxo$sYuZHZttq?GwpD1ih(QF;JIj)nr^( zHUZzdstx(2GVDa33j$(x)}sqRf1Q9MIDB&wOi z$CE&bfW(VZp1NxQXiG*Eto9seEPefU2t}zdn;ZN>4v1aXY2$y6!HN@zq&BmEH`Oe? z85Qol4~l(G9OKV(j<12;j3Hdmo2VbUz4QkZ^gM+^2eav{*l_U6fDe4%f$~4jgV&u1 zr34IJCGg#l?t}k+R&Vgs=#IA=noD$hG-Wn-d7<*o$mE&=i(q5yaq zf_2!SalbE78DMu1%5x`h>@~ait3^){1Vq=}mEbp_irtyeI1$05~dXrPMCivRrc!^&l5?CCQe?;TAy>e4#Xq;6TbOd~;K^YJv` zPXtW*A)K-8?YOnL)-B@3 zKCukkJ!py3Vl*t1NFn|_y;OE9gp0Y+;PUf1+T2qGBMiNpy;Uq2MYF?%>IJ|Lr;3G> zbZ<_taqI^FK-X9P&+N5(JaOpjy`gayy+%W!ampT&crua5*K&JL<&&L`9lu*gBOC0i zd^znsI~$)(-cBRQKk4@TD>E|aNn!a|k7ZqnuTKc{;6_tMD9b$=(|;|1eBA`z7|m2H_`&Ab!M zn+=y8FOKckHV0l#c6UL`U)UcB_0c*gx^A%TtW2o7GBY;H4x8=25?(@mbgc-RzESmJ zk4See;aBX&>UTtY#PPPc@2wvzg89$po=PH#fUE9*_DKd=?aLryoyZr$t64MR%p{3g z(tgESWjpupxw&G-%}@1k5zi)_Yi8`{|KirHO$S!+>Y+5NzAYidlqjx~v2?MrxG_`W zqu_>~)al%~)pM2VD5X7vOg!<}i4Nst32mp>Dn5$(SXqBI*0PjlP)|YqEw|4d)&uF?zA!E^ov|}}|RdobT;zRIk znOK@9pl2VIg8gKjv|}ViI@l)8Uk3|(CFIc^{_1e@cuI*2xBa7rRjcbQVK8K@hn|~9o9d4n%zU}`oUpb2>7M-CBJzRdP)nwZ=c#p!kzCd=(0khgMH;wVdFLQ&I zpz~AU=0->A7RIqpfBokp3_LuWANMzJ;ZOPBQWq!NBd$!yThxYB#2G);X;F7Ldom6k zg~!k%rMNBwKp@N9I=L*6O)V`1w%_TJSph$h*m1+|A0HCLMb zC~8MX<2Q-(liyr{z^(w>Qr}C}JIB)1NWluoL^@OC*a^hDb|mB>`pYFsZGdgWfh$$? zAl8lX%wCQz%aCz?CT`V^v0d1wubrI+%z=bE^9=D+{KeY&M{fe%XJ;@c;Rl;TL1?3$ zRYsHz1pRSp<6A7+1t4f`S6}!tz~c1{=W96Q<>f^j>`5LeGA*s3MG>jk&f9#c?nzA( zoW)~aNKOa2Dxdhhebb8_jEuqNE>}flCATBt9Wjzi{}jQ9Bly?CB+c5r^j8!BDG-PP z{MSucPBaW3eDt#SBEFH5{O;z{LF}JCQ6{`rDH}_djvvf6llp8Xktq@TMIz#sQ*WbB zLnyK1gv#XOQ)S6{F6%nraDMgkB)nBF(3ulfG@{b1bLU%a#xQfm=bI6`vk@+%m4F?!%q-@@O}1qrgkuyn{`F z08>gfW7)62XT3RI#j5y>xdOQ60CYoMexa4b?FSf!y;fMl9rTXT3>E3DNBn=OGX@mT z8jbFQi#`DVOaW|8+VG1A!526rp?>wrYVi4q>e@MCKfvU!J|p9`_h({G8+gm?sb&7O zynuIh=pC;TW<6)aWLspTIzL{-)Iy9;XjQo}Cuvymun`xUyYEwKa=0InJReZDI~b0k2!zWm_~&_1S-(^C2voNeS{@rN~W z;NP6ThN?(SAC)|m*DCwO_4K8SHy$xl4U88hHO-x+EthrCnA)cNHlh?yv{e^dD=m6b z#{FXNRlw^^mW?RT*Z4|4aq4j2Z3=aji}xwNxzog#8ufs;L~!oxzs{Y>T;eVO05w@@ zr~ElXs>d@=U)$ZH+#p_MdfX4@Ut>zdbEqz?qHDZR$P9D!>I>pdxx-hymm83T8Nz7o z46$2KUcPP4mf+1gV}_}rNWhqa5-;87qj&CW7I5lE>2KHP&B34=$-ga4XN8Qk*Vbdt z99Z`dKLOrAHZ$*$#Qs?1?e&|SFyi4TssqWI|fq~$yA6ECW(ln32kl-<;y-|V`s5cV;@K(C1yuY<|8)9@b2N>+B}(&RF} zO57Cy<(TNctN;ILKHr`S)s1yJwm@Tel&j>y^^#>~JJr%#yjIiVIv0eebwu8BsR}Zc z4yN8~{(kk^XD0zq%ax}Nte_>KZ2CUhAk%PsSro$k$B=W zb}8bqZo%fC`ZJGejT}eRlkrx&HRcw_*lhUcP&Wu2K@-|+gFI62z%!?2f1-nEy?q+y z+#owC*TM3d`{NkMA>I7HgrBma1X4~ViMK~Cz4pwLK~RH7ng;)Jxbnp*_)B{Mzw)*M zS^2uO)8H+1B@!z`q6Q!+4Y2o*pMUxF_x(%+mJ0}89~1}x_#vT$8m{4ltrr$$zy6+6 ztoVQbKCTMrRB*9kQtT=eu0)$3d+`pI?*ILXaaC~ZuJY;t2kS%fCxa^$6?Jp80AT$` z^wFA~qQ?l6L5VSdLREjKS(p3>;y$?7d>*?8-m<6uy%(3q{X+J;3CU~zlAOZ{m#V?r ziFbwWwKc?Eas4%3Ge(t53OMN}o+Phuy7PaRZpffZ4~N5cNTJ~R8INXy-EtHTPX^tk z{FBKczE5|8TNQM{SIE-eyi9bHR!jK5L+~v>)7U-)a%JX+yhU- zIi+mD41s^ma8tYxP+6#9h8Q0I^`2lIsenMc}SFl<%MjFFa;OfiAWGl+DLHSHvL zZ0rw=5$$wd z@=aL@)DZ#a>ad<{(6P%V1U4`C%aYcT!A(l4#`w*<`+l}ph8%o<{llxmB~V#>FZ&VV z@Pd2f3#R>C4*Bb!Nap83^HMkatTGpw;!%)s1JZ3$8muDxk9~vBWCJ#yG{BqW8CUJF zmw{0<|1t2=#M0F*aN(fNOCF+M9WFP-9tQ*dg1M!Mg@kF;g~eY3EDX42U=RfV%>j^7 z#Tv?J#ud;ZuPS&7n|-F{NV#C0$=8K?&J(}^{BZ;0uZw$E(88aTC321kc)G`oU6RrS zBDwq@)~Kn!s;NQ(NWna0iX`@6hF-SLhz5eL{8{R`20;7Q(J>rK)>Gp z8^)3Pt|_=;@D!-VH{QWGa%i0yrue3Z#EFAfhaUWnt|!BYYpd_ikle+kmg!En+Y{MN z#>aK>c)_4F8s(_eG-~jw@?SHKk>cQ71k_+@lE_|qO%ssa6?ks+Gejs@j>3P=*n=+m zV)-7pig1FyWW{{(pu*>Il1r8wsz-&&3{WDV2{s!`=i>HsAPzQnj|efZMa@ z=xdDk08(V-qGP)Rpy!{Hta%6u>S;&s)YU3N$u=5~h@kHo2Z0TNcMAS>1bjbtKD$d+ zD@n8-5#aW3Q!`vx0B>mhuQc;3Dm(;Ta-c*)U_M0#Ni?+TYMQBg&2A1r^N-17Lq z*(JwqKiL>aOOVtaX8}Ru9KR(3N>;meGN5-Jf~R{^iD)DDEQ$@!Rp<9uf8faPU+XM zlpRi3NV^6=S2Jl(3GS7T5YNGU&;L49-hxls8ZVu|YZwyKN38C_hlee`F4hBb8M64T zX1}cb=A%`GGE$VDKC=E0Tmps#vC*NTs&_o%;SGkjhMlqL7X5>v1wnJ9P= zBF;eh3<`LLQqJFie_VU=h|9EKK5zu!MS^$jbv&qG^XLG*6;L|=M z|5Y6zXey#J-a%cf!s!S#E}QCPTA)Kb4m|Kw6aUMJ|KgPeEI52tQ$s(UzbA}kU|7Eg zN?Bca3aIA&^nVOu)G=SlH<-Sn_f$z9xzVippv5*12Uc8j_}9^nfi$^Hr7x|4Nw%&P$_r)eLOv0-%$ zwLkO9AtorGak{^jB8rMThP%#5e)O4@?K5S+Yv2M47{T?|h$l^MT)PXg^~q0bD!?-* zn%0Y7aV`L+dHwh=^A+d0x zJ7q20E3Pq2#>s=9D){TCj;ruC!IUi5yG855RBBpCec-9MC>(eIvu5hAVPeH1%M~Oa z`hwai8m^xpe8+B*20YXm%>D;`OoQSa@&-2@tpb@PseNPoYzz`S_)7EFiUPr|2L4T*tMd<(8t$~{g+*&(Q%a)2YV$inbfN0345#=r4-mR zNm-Rlc>=U_G4?-Rfk6=c{r^G))O#*_A3hi9XO!uYhEuWIC0$J*J#;*F@6Q;a^V$2v z$GJ?wYGk`2z_c?`e$03J@`H$7>Gj9FxfPb>4h+9*zycA~)TLiCJcF*ya(@TQZ2|V2l8As{y_&%09wiR#|x%_?SW?>(x?XO>D6nLw3xa| z@^(CMLm;K#`GblsT`%LGY0L0S9yS&$!xY;!)GeU#J5P4hL79^RAn$)(G6fV1+$N=tJG8J_iQB{=Ek< zSVNd}jy<~13q9LD^FMU-Z~W@9k0<|VaKLkGa1;11IgVeDXxHQl2{~B2ay_gDJN5O+ z^N0b%fWzh7NdeE3fRjl0B|Hp<{uU{4S&n27HCY;0FaEj@*k@6xNu9%&Qk4SC3nD; zOHN5^973Giq}s2?&|lMOg6XXJASZf2qe;2ZlSrudAaWDXRj$EbDkNw{36bsBdJawp#H&$};P{P={pFZ~%C zPV5TqjQGxFku&#SpCP||^>mo%@#$I7YF6cIFF&4Y?R925*q)o;j3i39>q!3GjoTzZ zkCcg29$m|_!|EvUTpo$x@9#@Xt8M!Tv+H`4(3Gl^7x^X0LpDgJpjc%aJPijESbF4C z_Reu(V>al1t|=0_r1;+uG;yd1&u)7v^5L$glMzQmWOe7yr@h*2?jIpt2^z~MP)Zt+ zq9*D|UdgsH1O1yDs1>xBga{ebn6<>ErG?o`$ncubW*X~x6Yho? z`I`3-4>oqW@*Y{p*o7_P>#f)1kWN;oW~oE3?OMF>X&uLjlX2oDOYL7wH5Ga%WRF6k z+`k5wu$f=XA^PxhK;!m8+x?HuSq=ZCuNSm(WqPehzgwFXB z><<>h+gZ_@L~Gx0eakVHs94&h-FC#gNQNaK=+G@l zAu>h6;I=7Jc<|Y*T*k4)-=~q6CHkx=rD7ugbNs{v6UXz9goTW>n0?dQ4oYaf{MYUm zh>$=ZFH3rvds{&m&%t*ZfvN7d8$ZQtD^}NDoOeG9d5E0;_vcRBSjnw2Zz;E(b$FMi z+YozvCKYBjAha^mx2_88D({!nU5nA3)?b&R>PI(&j>8q98cp>ZY8_IYh^H$F`SX`r zaW|7Hw{}fY8B4qj$#ZOwlPYOjP<*^}W7YRaI%wUZXNMuj-RvNv`=GW(z?sL@cz(oL z+tFN;UTPEk8UyBko2x85(Kk|TFxtq(dYvaYw@&}fYv+aFi0ZA*gyLcqdt40#Wcd{F zKdsAGy`k0y7GdpXhyC|Zsr;nXXjl2% zGhNnPKcDh$)Ncs6ZMdUTU}IXaK?~`@|J_PMU`)b3*NU>6+1TZNGVEty9#ZN9ZfpKo zbx7!k^nb&`fSoNLwy77|y`3|VDJWoF-CLdAwy%1(?n?>D$p1Gk)wNiXfPF{Bp;mh8 zJ^WT#{@U)6xY4TVaki(Bu73UhcA~4reM+rOyvJ@TOc`~1&dSNwP%!F=PID_@(0VjzEv!;{G*J*Lsy+ z;1Bqwlh^Bc+0z z>noaTgPqg)>TJ~9tPm-@im-Cb%LA(T2?F0NBB_H#92|!)eAuWp4W49e^MF@SI)hlW^S+ zs9x`D!#1-r@7H}gEV0zCNU3-=sB4ca1h1IN{_hM)GSnB}c4svTJ66>9>1q=-Zy>>1L;oP9 z`SI_wZ{WVDint}IbsZ0ntmBy=4*tOGL6MN$t>DHcoymFLwYb`$(XjV4&fQeI&I6ih zU;dN$z@%e>+MA!al)Z04N3%+oz50KyoxAcnCnlm~FgF|xC*iHH)#@FN{1{m|M*(Z~ z`je+O0?cWY=5H{JPi$VtrayTX9qz_cKjUs#v4aRZal{$a>k{GMPic=9tH0YB{Mn8$ zWlMIpySzUQSf``dve)B)EK`0=LlbRtoHv{w*2Qdc>uvd(!)NAoF#ssDzEJW3(+&9R zBwmAjg99>co5}LFj#k41i2C<_o_yRRfJ$d-dy5ahdr`XxOk=Bm8<3zecZc76BLO31 z{%gmwRf<~cDh>O#+kzfT?e(t*<1@z@ii|kg+wmM8bFnn)&MS|{&B1r9DtO;@I5&a_ zz&3>7{>>kQil|BvS+)cntEiuP0 z200yC-W7(sjj6Yly63{k6CdrB*MroyGoMD21a7X4{IDsJJ-p_8z4>Gnn}@C>io#Q? z44>lfhoWABI$B5BXP}AkuD>>_sxq%pys+$kSI?@j1oSrRy8;d7R^6fYt6v1%F4ptG z1P8C3$0Q~^D_89)Z6NO|owD{uGyM)^ZML9+(R(=+pn`{RZ-jz`_cGEOHU&Dj@kL0j zE=!%DPJ*Cv`So+H#^lD+)cH>27w+qRt6)ddd1sNut@h-U-RibRdxP7H7hb6`fvNR} zGB_X5Banm8|IJr}RnKtC)_`1pEaa%dUHz-)yusaxs@wSOfQ`>-=o#PBjtj7&!_43b z2(*0e4~o>$={N{-N_*Q>0`fjw1GQha6K;#F*JfS;<7FKnm;d`bTp*PC+)jnTxM0)h zsIk4tDp<*M-93Q&GE)4Zc5GkX0LgC*zo`oJR085ZP(qr2t5CCZxd$mGP0MiBUCZos)3*;3* z3l8cZ5WTwgZ;i`nIQ}e%Y`}HuY4+&(dqjV0FHlF@{?zTDj^6%Z5Pw5v@h`6i24)EO zeG2|VgPxnWPg&1*|M|RSO?mtL&n@rrAo}nR*xmlvxQplWJm_i=WSAI#4xxXHfz|We zJ=c4|M5`^Z-W4xpLjV837XH6r&;RNJ=n@+=9R5#;=*9j`hqv}OU;6%h2BfJBAHf@0 z8#*}J8|hj7UbE3RhX(=)83=!`aB~9`-E55r0je_k=0*mN@BjrTeaAn(5wp@WF@guE zm>D{n5;8Hcu)_mHjm%6;9SIp3*jeELf;N^m_6oLo2B6kLM$TpiMk4llZt#H5W{wWB zM)ra>R<<_QM%IpmpoU+}EFF#P2?1X$^&E|aj0|iHL0gtIvNrj>X+|IiFE8Q09iVIa zNf$V1(8EGxzqNcc=ED#Lb>v^mzYzEr0{=qbUkLmQfqxpO=# z9N(*W>$?$huITp*;rJ7PLFW!x#~e$eOf1h8K!|?`$0Pfh=u_a%1j%Q~tEftykYR=H zHgg+ppLUB-^r5en0VKU}{_Kp8U?rpE4*HBH;@dC}3VwvVUVi=0x9rfa!_pseSCqj* zbHEhvL+84OE<3|XcI9kPV$NaTgluesE$O~-k-Hq_O~LY2FjiE>-^q!7)o*y#>bAQ% zPimBGTiFa+nLur(g%Hha?J7uD-VE6hS{P8re*BWmB^7OAXr(E<;NtpaDlvHwlRGpAX!A2DfB5zm;d@~9g0;uBHBK`-c}$ColDOkD4b(rs#p0{ zN|`9}voX#P;yK0%#+WUdyF}}PEgzZc4eKJEtjn398=;3% zSju)7q2l~PK9l4N8l-AvxTq>)O@C;)XSGXHxtaU4$W<)!@l(N>_?}{)g!*Fd5tcn$ zuhFUlf7FKjlMi&6(eZpOOY0$i^${gmuwYmNA zXSjGkm6ogw>{`>=_fGD>pRsb!fqH9TW;+4W6A?2Azn+z0h{Ecrh*b1{39uZbIys@t zq76XWOhu0kSa~{=%wg#+-9Ey0xkagV5IkG{nnw9R=3#X$56(A0(Ub2fWtqV_#l};? zI#u4lMY(i3lRb-LB{$1|5wMAFJ_fhzmB_naW0e};Ouw^8Vn_J-2$sX+P%u}Ou3!nV zDI+Rk;`?aKq@16u-4yfXL2}T(AClZ9z1d~c`qzwX$^hRb)19x6eUPR(=A-_P5fHQC zLCXhGJV4M>Zv@JQ61V#R9ene|wkUTY@E`O=zqKQ#(Z^||y+?|_y@b2>4wjnP4qDQ@ z;me|0HrT+x-I0Cy)(4HZERJ&`fiySQZvvnAEwD%sKBs$s%RqnhfH@rFEv#-NuwU{( z`yF!G*94vd_^iaVsX`L`ZgsAUPos_wenW~8@n`$qQ@MwE=mh3>dMM3QL}RGtWY=$9;Fwf#HJc5$Xt6>NvRt1ojbD?z4`eDS-5+l%^1kH)M%~ z2QJ!aX}SAtgU^}!@Tx*iMoZXJjTHhc5kha!f;1h6|D)?x)SE+Ymk*-^<5XS{@B4jZ-3D97>+|)&!T5cEVG?d$A539-5*oMnP&2e2Be@M&5`V|jJfEd^m(H4Z zH6DytK!?G3_>+jC1(r;7r`@+l3-+iJ0_1Ji8z4iP%bUMV(cQF34wNU7bQr$3>+;18 z@>$3vZ55P?jvr)DHODKU{row&D7=qeWLEV_1visS6^*S3$f_NVRP zw9$b#cWQ}B^5%^T@MVhlxO`oI;#90p#2LWfKAmxp4XFP4h1cW zYPorO-23%4k1Yc#{9^zF0?|WztVr(LVGSmS)rX7mzSW1ot;;SQNSj7pIEV z0I$o}2VT1?Q7l$9HV&FAxZ^bC?>$zmF;Zb7myOHh!S?8FJc#8#)6IB~IU2SpNhO%@ zb&717SU-J|xm`M*Bc&bkNRM2{gK@3@xwqa2j=ddO+M|yTvD_BMBIKY-c3=K_KdbZl zunnlt>3BRXT7SLS=Hq>RK3iw&c)DIb1KO-vw)@;+sQ}3tZYx8;j)?9z=?*;I^SQWZ zx~%c!K733=X)#(aZwb^eIxl38*w%U>q%GeFUlH)}lOvB7&rn{a@IF1#n2x(Bn!|6q z>3LL+SD+fwxd+>`((i#fe!9VvB}RTUg^*k2Ns#Nsu6I1kWIi(8A8?h+0+_%yrXMhP zSC)=B-FHn=*17dT7X>b_wFB64X9D(H8EA~AQ7@ZJEvfc;P_Tlt6ncQpXXmct!4VG>4~pb z*!1moEAdQYfKuvaX@5q)guOG(likLLI$1sK!v`M13S3Oj3QeT6-tRZu5i@HkTMwU_ zPQr0Q@IUd~vEXNoll_jdPFBEcQk=j4Zd|Ndn}+uvLA4t7s%;q4@m zc5^5%gS}1l33}PiL@R}v&)-kO6tx!tnBi5*4H?Uwp`O<9MKw}Fq#CZk=kmzQrF{CX z;|!KwfHOIQ8Mn{8zAS>Ez)kq01qvkh9$teg>eT<#1H#;Rt| zsJi*%II>U-cH=`25;EO=<6^`XG*)B5I?b!lh_k>8D?*~XV3V$CuUun}>&uI@hpS}1 zM0};%Q>;nz%TH8w`-tQ7JH&8_M?m%i!bye(*dFR9DBz|BkyEEtt&gd_FefI;1|vVi zjx9`Bd-^t;b*1S7wh1YTR)Zb%^A5O-{iPk7xBe2FguRfe7Rx!>?KuP3 z=jnEiDOamFOO2aOXGlSdiPlIrtpZ-rUTVlwTtwG~=A69}TG=RC9`m=aIy_Qn{P3nf zHXxxw%-S^)X(ueEQ+`l$^DIN3)ij+8qkmq+4hl+4&bM-09xpHReoE$vrsfYb?Fhhu zdMQdx@1ADXu3!^SUVxFlD=sd?lcy?&1CNclLC{X^^nG8V%>|_v!{03sZObEJ4UnmK zJ)Mt@J|4g0r3cggEcMJWlki={g1L|OLz*+T&bw;7QMK$_&7Ly58U$lpkHdGTHvLcb zj$dKVDGT~A;y)1wHKwxJ_-_;unuRuYQROI>Nd!-1@6@RBj=>{cZbb+9uzsSYU2BPn zVml_y^Yv&FOc@-@rK&*EkIYuDX~dYSf`w^uu}F}u=8&LjJQuS)rs(87Il&*~Y*;HB z2AebGke-5&K0Uv}n%5)p-0(_?^dxx1Jr}{1Vb`anQTvGgV|h(CaZ4aMx1oS|>xesY zjDEv51BN@YL~a2&ZUCDoCm-j-59D zFbw-$F^yR~#T}U~!2!5*Mu~InxEBvjH9UNhaD}8^SvLzG?$Uv}!q4lPS|#$GinC

|!}5rbg^Aq$goMo zvTkGZYw-M)CK_T%ncO?;cjm86b|}?_dqrVOlhDefYVt~tm8%XNSw#3N1&SU3^t7kC zx>?mDkg5ll%>ugXA3amYbxH3gi%E#$!=VeFe8*~Qt&nKuj)9)! z@&01|ob1q#>VYxKWKT(H~J5OQ2?gS?xov3v>%=O80VL*{41v zWCu|DR9F1qLvdWWh17hr>|Kf5L^?05n_{WowLq@A#Ia%{C zg!3r;SB&qSKv81G!z&&dfrX;Q?b8O6eu*XcM8MibTyLaFcavn!7siHyL; zd7DlWiHcSuP#u26+ZtdT`IOVVG-9k^%_|KbI9L$$d_rS?rNY!CVRpfF2Aka4%JqNWLJ6y8CF=Z7MwrG_fPHx)qI%WsMEiOb;7cFF2vxy3GiCdIG5e_TJ%2kYIlt_LJ9oajiiTNMZ>b;t zFd{lsaa>AHTbW9#XIBKF&qEoEp`(NM_}VqcwyJ%iIB)QL$~C!@VRwNob8;U_(2u}w z?7hLSvM~dqd-i-*>UW^-t|y?O$cL?ju95q zVe|E;u@_N|Rv%xg+3c3~P)D-aY|0MFRP4jRHq z!x2q&dvk0 zK&moXNK;g}<&ApMcc-Ri=A;~LyyBf3)XgdY+UN`eo1RuFB*Yr9P3_rYTWnJ)1dv5$)4ySWAlN#$z z_=r!z|>FwZep%$Mk2xir{1)U~*ANQWhGWU>r z4fj>4){%Fv(NJk;;p>N#cTZem6)Hh!gwDfOY@6T)-ZlWUHF<`DM4 z|Dnq_Bx+DESJ81EAwDo%)`Wjl;(}LemU%Nx zz%>)7_*Bmlr<0-Fuud%B*X$)(AfD(*MWdx&DpHOtDfHzYp16OwCcc=UH5prB$q}Vl zfynmPKArQ=u52~!0$HXC%%Nn0r5AE^q22wA0JO7T(uq^Sv~`MT4R_%dGG0W`LPp=B zGsS80`9JC$jUX5Cb&jelA169j$jbY$ynYS_Btj8EnIn@uWZ-KMA>MEED9!>d{k(g0 zR_|%R7_Z5w(w*kHiIR%YlVn%CMj)Z~jSxPjC&)5jyL;6b^Odq{Fpc4f%yv7v2XmuJ zBoO(cw*@h*G=J6t8lD~nOE=ZRmQ!t;lN!2?CrJ1%Rm&abk1E*J6glyw@7X~S&E)zm ziUtg;nbUE^N11S^3TvJ2k|$oNE#YQzr1h4$ycU`Wo2+zTt)l5;kwld2PuQf{V^zg? zY6UVq_OWlRKKG-M_BQ3*qs*p%|1PzLKKU&GXv43*X<)UfM4FS80p!qyd8dF?DTXM2 zn{2vzAr1F+5I-!Dw)kn4Do*4xSPp%zw=)Beowql~C(;ri^H$;&SU}h_R2<1(jgz2T zcA4K=A1vf>-4M^E{jd88&GD>x9&xcN*J8bhC&iPVZ@y@}?O_qmfayx&&$A)U5eYzE zZSNG448n^|Ty?z$pU%<}LnbkS8FO}_1KWLOWJTJ*Tuv2RdJJpxjTfZ5F>-`A?5j?31J(yrJ$7nABz zV}OPYN#$hHm;l~`3~^sIBYx8{w%^NFsDLooVeiQ%Q^Q_9wPOo~WaK$N~b`tP;SKo&w26OMivqc$K^4E#VBi&>OXLnRF z!4V3!`Sz^=Lez3zeuMPTA8GPdW)#|yB$@7*g*k&UewZVIdsnh*$)?!4=20WCg2q0C zx*KEripNM~Rh5L+$sx1W8yYa?La~txKTA9At;pjmx0Yk>4V>l)gY)09qdg9xFW3|- z5f9CW%_fJ`YC7GA<{Q6U7N?FTWQbHGBCm%tSwc<-#qBdZxg%GO%4ljMU-8$>RLsieTPGwKMq`*hrSDan(lFv4vE;243+#-7nd3Ln6<;=o?LxYRQ$C z<-TnuP3IT7`yaa4nkQoKx6ug0o6N87iyqUubYe4m-EXMZYgEG+PnoV`eUD>CV!TS* zVNHhJ8{IL3qsVO(POrog=T+&Wp!R90y+S({Eyr=K_j0anxg;9Ab>TYk6VcULynP?L z7kaQG*1a|#;DHIr8xK{oq?fa8OhjUVS2U7A7DV*1A2)s)OCNOX1uvs@u_e%??oex5 zRHV!5*1V67bTkRKI@`<3(@$2-#atfZv4=4MhkOt>I?I1Yy8YQmP)#$;ATq}lmnoIf zsmrKW0J=>pAG#Tr6bJbJJnh?$`EkH^`64FXzz;lM7LMPq(goU%sl_xCtPsCH{>sGN zN9}sMP(0I^6I5S-uQ*@ZzjCWN5xpZ38tiH!MiSy`4vzfl+t2?~7`|q_!^V&$#de(}^r&qjKQ8Iv z5b*YVkvw0a<8?P}^Y(Z_K;ZLwXOp(>^?3a>ng&ceTW{mZB0Hw6^KpGr^d~Z=E3{}e zmS*`ZNYT{DKc?KvkOw>p)DBB+C1=sHWj7Gk-sXXeAOT=D=9Do8F6)I+)DyT~3-;Jq z8!Xj98I&;qO~X<>qq+Zhh-Td;<6Z zDFviySCtaZ1FVPC&$eT_T>ikK6Mc;HJs1_)3&XNj2W~Va~EY_Zm z{foJ0yV>xQz@ZEwDQa|5-iTRyLN;+6?5znQ(u#OqyA>5`K~g@jxV+^$*<40>t8@$YyOBhL1^Gi_jI{k|6f=BDLs4L(udl5qP{7Lbw-sxG@@=gefcH|nVymuTn)Ppj5L#2?%mi-P6O-l`fy>99ClrExA{%En(oIKkZ>C~q`tB0aCaEW$M{|jOQK^e0(-Z5MS#EB5Ts+xcr~=-iP1bP@alymAuq{#i$ij?{kSgZ*5?hX z*g2++cCJj0tF$Q02sm?hc`4s1?6dIh6WLGP*IH$!G76OORhTp@Rx~0G@z|IU5qYLCvVj)ACCywIx2x$)JaLs7TN=& zw*=KCE<99fSK?@!r z4@x#A)BrNjv$L@=vM>=cvNF*#0oj<@fP_q}Ec8sQ46L9p*?{!SYz)k-OrRD5gnuQg z3K%$=*;s>eRS5w~@?!A6-v}v99UW~QH~|0?Ge=V=eR=~MD}aHuf$<-2bjD8Z?q=2| zRJ@?&&m>rO=6^r_|5a88D+dSDe+*4&L>mm)d1vW#4fOg(H@B47_!8>YwOlITqXBd~ zK1LaKR_+SP`941@vJPz?+dh^~j|w82%s0~q^*B7L-^`xSbU0kz*|a?u*)F_1oT0qE zRPHQzyFYh4Pn=z!z1^=`zXj{OUY?JhncgM8xlN2a>}K43b?$h+KcDXD7(uVr5Es|R z|0bbrZuk&aL?*PSxfoa}^s>tLY+aG(Nm`rZ+HA= z`*8s1gJ^xXNpR`x)!~h0kubk=z9S_=;q1;!=5-n8{N%H<8STT%^Y$czvi|(u@MA0K z3(Xnt%k@Td_&^RV265kNcN13D_fy^jQ=d|Yk(6)4r_(;C?M_jLE-f-XUd0mE^m2x7 zov)DI>%0?nF?vew&xF5j!2$G7sRq1Wd9^6RTX}c%$-+ocMBQG14rGu@n5vm-EHI zGA{TmDaE&jESz1kDhd0}TYs@L+%}T|CZQLgqVwjq2zDG7QZ-QMY`58-o{|5N4$BwBu@{MNGxiE6|YFr9>(4@;LA7dE&@ z1JFRb9*vIPdS8bA`NS;e%hfXy;!H<_c3O8Ff&%gR5(*6F`*2#^`Dx3RVAAU-OPkH- zfgY~UiPLBA%9U-6;E1U@n{R9;UN^tf6$5>akqu_Lp41z>BO1!B2DOF9weuVc-Di}m zNiPeTYz^Cv%RO>`)ThuBAj5^`Dl{=v)?kZJKYj09AaM{KciY|VA%HCGT#Fo0 zl*ixcnyaxBc~id>>Y!;@gu7Il_vl04{nRq6^wuVFv&)*G_~Irkr{1R&ts99%3KmDw zRFp`fXJL;!bR#pjhA5j(w?!RLt*;dQ#INKnc@emqt9Q;?51( zJS$U%g{~aZAL5q{2S>M^bVH!s+pEW@5aW9)9_50?Walk+t`k$wm8k)aRE-5m1KpYkM}BR7bRt?dT2=|NNE((C!m8Dx3$rcT*ey>zam{~wx57-BD<+&%yOnL_@5lqO1}dV}QT^0PJoCUH zzat>h_CTsm8ilIO;SvVDe&5eLEdN-A(H7x_dQpX4l_AVd?-?tFc$z;O5{2;g{A1f= zA%owEQ$HP{OEG+!abW7>3{DrNfpJzg7c#TYzI`R%5xUOt8&8VZ28= z7=iJ-{*+#Ll1Z8uyJnYQJlGPlELV0Ze)MV_eMVmsnf>Wgw20C-vvKfHdJNhCUv(RV zr`zC%DN2!&pa`(^-7Mn8Q#6Hl1DBE7?4Vm-8^TDRPq>o?^^p%-%3)7GltSXuyO)!` zL)jCY5ZaDo%0UO)5oy?2gy{$R@*M5d8rg%*f!her^DvxV*RJ5i?n*ELpUg7|GTUn! zJzOoDAQ=p(E_twS+8K>oBTp#yNrxQnYdO5c2%=y`@iJZ|ts!ega$$DKM2Dq5P4&@f zXOp7k+^X}=AE8^v4HWNdH)fvk!?5cgEzk2?4}&v?tH?13%X-+q$J5>rcCUw7m0G5h zw1bYI`r;r>PYm2YXHva4>{QcV6tPH7kh%j8<>{kMMOhi*st*kf-`a`z*>y@e8eEj! z@UdU5-(^BGEgf?rE5jIUpDSz=`hl;Nkv!XEt1tW{fTceu-0%Qrt>+p(oz$C8BPVao=DTce7vcrrjFjUuv)YHMYP z<;|PxE`XRJX~NSLIDz?a2ClpNxXF|VcoN*9f`n3j1mcCBr)Ai}h=7>S@V4xCe@J(i zL`i^Fg76P2X|mmGa?8B|jILzNrnAQ_v{a^&Zkgs1>bI<214j?61| z{wv&7bjjLXu6k^lYC&pzgyApW?i8UHC*&u3C_H=O-LP??D}v-FB`ae3yOQEpAy5Yu^gO`re()ubJEqrf}9X^lCMFekW^Q#@t*GC-#aCVFe-xzLoi~4!AFv$bQDK~7mpAd_oSCiWJJ&xVb&y%h?V!E3Z^*ZGv z@e!@9bHhXQ_cpb&npWs%*wnu_Z+F{u~LG_3|v;1YfL5cKkd)O$>G zY{hr4Uy#V-mJhuKLq2S{5>^-c#$;vda_N(ZaA1b`G`mhk1yn>)TW6IDlNHmJHryyz zjWlOsnqo_^I96yj^gjl|eKV(c+Ca8M4?>caBuN(LF)`d}=Pd?zF2zQYpV(jZhCY-} zVBHmKpK)}%Lx?L(d;hkt4Y^+$w{VnD|3wFV!>DHX709{upuySN}dXJQfb&f zT%NZ;Frn_scKR7VnEs=y3bdm}Rq=O5#tkrWf-uYEB=<%`0Bkk46XBSkj0F zKo?wLF<~(PvHJ6Jpxj{Wm;4yi4zKgb;zVHK@};+|f%pUv``~#ujaA9qvo>77a7j<5KXANlj$;PX8mlJ3;2Br zoj6<;)U$``Y_z6goC-;@N}fZC$d##&*mLZ@X-SL2*pw*#l9(#8PPi<*%mZ6>IiFnG zaVx1A6@+@B?<}<~URkjRNgA8Jy0f{rn5z(d$6obK&tMZ~@y!=_A3S31Aj&xiX`c8o z)2nB0`>?YFg52f^I9;Nc!ZzYj-c5YjSgFx|AgvQ$HwEz{DOfk0OK+!% z)>uq6{xsWR$q7y8(0i4911d;pi?if;nSoT3$#Ze4oel7oj8WK7dT2L~gwsn{k<8N4 zc(nV7DKm(Yimi?W7yf|}b_COTz2z6(qz~vu22_dPNCr`iu9I=(1g=G@QVVPwxxVn4DjZXI!#*Un0cFp+y(+P}4<%7MQ#x(0qO6Fg0|@PmIncRv#Jj6b5Z(Dv8Ci^BNm83 zb$KcUeA^v+Sa&vAv<0Qlft-pcBv!^fSvLt&cyBANQC~m!IkZ=)jWL8(P@)w}h@;k{ zGzA~w=N(Itrf$`-e*NxjV~9fF-Q@;tDu3*zLS++0&WE1?Hm>%*pJd(mb>or7fSu5n zhKVKD5hE|XA0<0rjZO(_7}xoeHeTB5hS8Vs?`x*oO&;Bf!Qlv|Njn=J#tRpxtZIY< zEiGlm(u93)F_0Ujdy(niM@*bvla3pV$qtq`*))C_Lxj=Q7NXA|689l|jCEygdQNEA zZMQ%aLfPl+O0mKT$)HHo0mOZoy$rivnc32Zk_c~SffN(`tp6jMV?F9k_zJM|GynU3 zx2Mf0aWxJ3J_o zKe<>HcVSa$=Dkt1^mZ0#YHEpTdm^&%l(to>(T}kXJngq`ZN-uUN>N*yM*W^U z{%K$&>OInLNv(00Bay4t^q9NwiK>b>ve^v7Nq(n6o_smg5UCp2gD^$8SeS$BPJXYp zJS0YJS5ECLcH>Khj3&MJ46)}6by&t!jXJK1d}j;DMd!q>VD_*!J{ib#L*4a=yA7th zPG9*UeQBwHiQA*#DF-4)#9>Dq7R|(F!XE7OspHAdGoQgxKD2E6aohA+`Z)=0?9)<} z8U|h3_i{Udcdm4{SlxEx)em91{gUM-k@cd1g$??k(aKgv-O)^bC8qK#4Q?~CIS_Rw zvtkMM$)wBQ!g`C+Y%_>AugOtll;@mo$lKy#`Ut8c@IIv&Lec}@U1&$wM4;kuZ3W17 zET7!w#Ou;Idaq=dfx*nD8|5 zDDoJhpO|+sY1ZsRkfvgYmw4pLY(Umg0ET^8Dl-H=rTo-h2Y-BeIm(WXsQWeFCk^lC z?%9&#HUeh`Ivs_y1=754=_ZNP%o1C6ZLo!FNF5Il`rEqg{4H@yzW_YKMZ>tCq4}6@ z!UjW?WzaGN)JWSUL&EguXqiRr{Zz-{%DwfnkKnpa=?e*|`~i#TS7N^=0SV}y_n6OP z>SO4{JC5z0GhpxK=j2~;3ei~fWM;V7ssV5uS zF4nP5ji(EkGl3L)wrgZYG^UHotW~TSJh+f^F5}Lh+lF-RJ`^z>;JR6Fzbgeqk618WirIWhsRU;-e^M zIYzp!$%GlAXrFLWc9yrr0TA+B%FrwYi|(QUST#woP(tnA65ViFqOo9oDa!o~1<;2< zSE63d#S~qvOD}QBg5eq{o_Z-PIWvKf6ak~BOWUHoOJ#_DBM-7<;XG$J&biDG(8Yzu zA?+v;)wQh`Oh%m7bt{hJ%!PAcdAYU|c%4nmFKxv7sk`+I$7qhwRTvqn9E>DjuGrT@ zUb4xW7`W(%zck<8BB-5Zq=+!#@u_Ok(9+Bs+rGFBu{Oe^vZ2SyO_K?{MpjQ4Q~TLZ zu&q*-9#}bt@OD(ua2u}RwsQ+FvB-p`f4achiF=yzgUF8raElYP(vjJRm6Q;{f2v5$ zB9Jyxs)^aUhgY<0KU5biY;LiJ=&CaJfgpW7gXjYrW=BT7y-wjLDWt_GQ)1+HFDIrj zA~1|Dx^yNhm+FA@%`bdbyunx>=3+mSTu@OI99atR$K${% z(83AqfS8i>qY)R7UT!&k-bf{(w^E1l6i86Hl`RbA&%@g_+LfjHkkL+W;5@I7w<>s4 zD<7I#0$(bt-7+SA5MH-O8F#xJ?4OYLWlipkl(vVAZjIziCKuc=*fS<(0~OLuznfD< z)blv?&^n%Wy87#;M4MF0#Ut6^trTj|DPai}2x2Y2ZH!c@*gr}^~hpfdpLOZ8gH z2Qf1kEq`i+&uX;RZ`bZ$Qf!M?Z=^l=hJ>V_mfwaaiSqm2HWL~hx`t+@&jTjsZ1Xll zJ^SdZdg;vGKEs8pQYc7$I!!xD6#c~2^1PG}t%ig)!gw&>J3WhO|GC8S!q3dY)kBCmo^Sth%$KNJH4(JcLe9{4b@D9Z1j->IPx z&tN$S??L-IlwhA^nI5HQTm;mFh8203Vpqy^-!KHemc}w>ni2g1TkSy)B=c%wIbBGj zu$`D8uSzR&jcIX%#d{jmr-wbhrlFMj6)En3;X#-|{L`i5U6#$HwesL5vm6O?I^VR* z{?c+{(V&&%IQOne+(dg*RdXO5Td_8g)YAqa{0j+qvc}$`YWeETN@;yy5t$mL z!kd-xSnhsAJXNxMH@# zGXC4MWHU^giNsXLAfnJ-KJ)^Xob6`4yp+Ux&p^qw84+)=rz8Z?nlqm5pT9r z22@mXP8tg)^c+*hI79RB@jwTxmu=`U2V>L+AD@YDN#3%V&(}669X5GSKFQ4HGXF)q zf%V_#>;E6d8<;rQ{|Gp+60)(gfz}wAnF(1~80pzqfGmV;pdbH;HvEHv4FG*h8+`yf zv%a1oGZW}#$fC#0%E$_21RAg!GqSK4>lrh%>oXYu4Vf4W^Z~Z^Hoq%yH~^f@9E}_t z0KY>PIu=H54!=um=oo;?cl-`|(3{x&#~=pVKN1umD+mFm-23ZO-6FVUb^KS!TVg*^xzXA161bGVPFGwAIQk@_eA?AO8-2N{P*Mk z|IV_k9IPNCVP^yp%D=PhpD_LNK=OAf1^?}F!^+MKvJwtPmOmh2XJ=t2{0pUj9z-M*-TtGZ@VWDani7pP#xL$>Q>@iqNLwIBxf2A|p z0QluSk(23n4XGsYA9L3cKFgH@@)CXeRj&7gxf_NDXF&e9;kAs{+p7_uj&B&vrQ1FS z#Oe2EK2O`<-+osQaC;KretGfHzW1hnh1xj3K8gnLwLR0c5vj!+K)LG;4<&3I5b#{o8c9H~LT*f;T_alW?BLGH6^0c?i! z2pKZLZu`fdGwqvA1p)Y12Ui_g?KM~#A!UAhpNO1* ztyOU1@MiABF2L%qV7dyKnIWr@LM%r6svdSP+j~onam;gh(KLnV!3X&mxn$PLp;u^j zTbA7nZ|PRoMC)Bu=iYdbel$U__61{L{|9OB93x5)uZfOr+qP}b8QZpP+qUP7XU^ER zZQC}^_|4gUd3*2fX7g_LpGu{=x{_aas*?Wt!wD!@_`{uK`y3DcyTy^W#CqXWtq>m} z#3JA?_CfOL{pFi+5Hg{bqu#(VJVi-z0v~@cZℜDlFLO8B!yum+TT4Pt*Kf7%j8F zu7XbWpu0>*Gwv>x!tTEcb2y^BLgKvTQvzJPuafbdY0v3b_=3W6nPM$8h=k9WF@-#aOxlh?dQ;vSnRcLiZg3C~a>DPQdBK zT*RbI ztw9U>qB6Y-ysO(#&&@L53C=I5Tje+3py;YQ;;O#ZcbO%p5AWL#$-{@m?;%7JwNX;C zPN^fe)6J1YTeO@PvLC5hQN}Z1b8Je-Ld7C8pt^{cY)(V;u_lGSLsi`^1rdRiGP zv9yv5-isl!cQ#@YHpeoihhwuVUSJSR8}*PIR}^9IgP2qlT>86+2JyQgN8FNz7H zOtpUmLmESCN*D#ykcpVI)-$w#(P$N|5cwV7oZaa)L`%-3VB9f^iK|moxKb~SPxe%F?G8#8Tub*LDDrfANu>5B$1bM1+bi&l}<%Q99QaiaI zZXS7m_eRT%4yvotY7m!vyqQBVfy!cJ%t+#Je4iL9`!pwtV9A>LSV)YH#IH35xe!4T z8vda(Buh5oIyh??AstiR4FM?VCJbY{WB&t+hA3PT&J5W?J`c+YCpHz3ng3ZmpGJG0 z7tjQGYYHrpr*P>7liRDGGrtQf?1Xk zF%k;q%fSd<6iCESHS*S##4KYYzCTFu!;`w9x`Nb2vF9lvi#ic?y@Vve6gvo_jF%Cz z2uoUnTKFj{k!MJFH(5BvLdEPio?h4ADq)fPUBkvr94nh%;q@Rx#%CY;Ed2``;(-&^ui)UqiU+wmHX1pZ8)*GO-88q_2Dq_sk(UDD5<8DiRG4I;hE$V2V4c4 z5k}#imOK1pW5B^l%u8MmK^Lo*n)HfGLlyD`Ze zgv~6~Y1+WJSvn5V8$g(IcdV`($L89z*UIFkC_0t-XVyX z(oj>sE96qY<@@6#Rheow@3SrV_S<<@z2}a)5NO6Cw!I5mtZN(1wi2q49T~EAKZ3b= z*)APbF8V^24nlH?1?+7-6rN-W;a!)wKY65H|CpSHRAaM{6d{Fy_!x(OVQ}*teQdDM zMOt}rS?chptjhek2yNb#N-5@7ky}E>qB_kvqJy%5!g`2}$QW|Xa%H9hRv$bcppvl} zmsc;Yg}Iz}7Cv<~O~524ym4&7N^-AxG*JBbv{kc_OxfwDH$o4^1So!(ibII;NNDkC z)J1x@tXZi`W%(%fxr`=&2dR$DI7&amZK=j*-CICoea?oC19ncAsjj~UePQO|@R;<) zF#0h19MLRJ%QfbWDc_$>@W_$46fvbMy6i!ivEU{zqFc#%2v+DDi#nfBUGNI9FEGwT z8lkECQuDwUgk|-wCEs<^Es{5jJ`T^65HMfGPForH#eRm*u;X7P$UF zDo>R<9uml6lAfQ~9iKnd`ucS;)l@Uvf+ph<%p|E+Zw9BcDH_)n7RyOV-iR}3$^_OTaQDFze4?Ofny@C%ocu{2X2H;NK}YwS^m<71NXDCgkSJ!qq)Q! z%hYmDBtC#>Uo3fPxM7DMbyY_vtx?Q$eA_9s0we|qB}#})t~xaZYeoG9gG8#+or+<$ zP7;l#sBk?p3s}=YxXMYCGS?jEMj5vuoR&dxWRN&tHraK74%fgnj`424(58CsO@vsC1cL+2I=8%l@~Y&vhQk1Uu>w(+fS{%yk&S2JKPeYBPOA$Y zTnbndIv+eL!)z{mE=cWzz0@i)VkR6#Rbs4JRkizm)bsm1>bTkNe&4YBeqZtP{<{12 z<^Eid>-l^*d%uz6_jd0g8IP&k?CJEQ9A-p1Nh2Z|M$WL3xTgu`vcfKM6YMUvKaO=q zbyaUoZP~&fUBnq094NCuS3p_cZnLoDnTqDHp)^aZ$zCh zJHbNPS=0b#d(tynZ%CHkEK_j{J`5QIVukH)!ia?y#YNwZWgiB8h|)h+*~BC(N`nR0 zVISvi9aX<<#(7c-qTg&t20~x#O(TDP8#7Mgf!jLPL@(K=wR|<1kjj7SxylGQXb|s*+qG-p;u#Dt<%cruLvPOo1K2?fTnWlPv*Zu4=T-X{&2R^%Jo8 zB)zZ5uKqE*ZpzSSPO-MYuuL?8kAv=tLKC*KOh#PrSZ`xKeoP;HQJAJf)dE*T2A`sy z{B0?5)K#ANN!Qdc(URkw&)Q6GkXKS!x+z z<4LFep*6THv>vqaGFkM+E97W~+?084eR5~2)3k~J5|^|&D#2Ps^B|Tj?e83pUrSOD`Bm^sY`_OwWBN5gSy$T+&%m_3ctkP?D6dacr?AiUyWq3+mv?1f?e$o(@n3~YEK@r;Wy`@GK8U!R{#5+Y*E7dXHfDVy zrd93gw&_Bzf8CY2Jybr-H`T#UI)K}LWO6`=r5O0VZ(G6T1D+iXwT5!@l%!CwUSYGh zIFz`%Voc-8b7)y5iIJniaCNers!4^#sTZZ2u|e~tzr$Ev+a}K}AP4E&x!r#=US(#epvOoxaURE485IKlRfp{@Mc?kU?HnZ~0QzeUj*}GDF8ospYJ`pI$;@Q584kk;h6LpUrnL0)KTwl!XqTf&OVuwvg-EEgB|UCR^g`bK4c^k zf1|nJ+XGX3gj<{>$J6EYCaN#xB}4NmvZOmnU~_h&MsgmNENLa>ZtKfE#NxzBFDcuz zp1~ck5$x1v;7ky&ne_N>WFaP{Ged>)3O35hp#-f`J^Vah#oJ${%EcB<`y~GrS24_8 zeFc^Z7q=91N*qLx) zW7BFTwf5*=}YoIk*UvH(k1j^1xp> z-q{dPSoNy%S*1tp+i)Fm3>(xeXfPAK>Nb3_ttM$)wnEF39Ul#D(QGiA+1qxr8i_RA zQT@U(_4{qO!<+s`(p@5xSx4@;(^4~=l^D=Qo%bP|oq`b)+SX8j9RA{b6(Syaxe2n7 zQXW>bt61Rviw*%*>6k_xw(Jr&^O)_*%C_8k-in$IhoXqTAonH1QgQ&0mNLfehyLrS zb|Y|h*bBtb`*XXRH(d>-DdC)StohH`39z1G&si5n(!RXh%J7z$L}(KlOkaW*wJm-Z zSY2w-6P9E1b&zW4V_OpAb%QE7kAV-`RN?{G1F^)F(|wHv_V;y5Z*VA`Pe!pHYP1rr zIBkKNH`MK#*l+MQ*Bc<4@rlZ*u8-H*7zWx_{-4Ke8Z+@fN_x$n3F0jH*+7i1{NiSs z6ki4QbLXtB6)pj9Kaa0rmU>sRUzPg6+Z>iaF5m0V)e}&I`I4!vde0)pqB2V`V2gu7 zPU-$zK1)^H0$YncBm}8{IN1O-98mG(LB>wxEYhqZ7Eb5_jq@kZ%y3n+sZbhjgyxo0 ztNVH`@{2qINRO`7t8rDbz(;E4YMBhTNMjqQ{js6 z>TLM7S*=vFqAB#JcwSAY7`D<^jghGC(OSg)GhISS1TL8(bkOdnFd=LY+urJi%vN z#X7fSKi1@Ws56xBH@keXSIt&Vh(3t~(qEF;N#&?M)H6Ch5G~nk>>%Bi=8WaUU0qeg z+LW&NaM>>r&Xr!KL{d6JD_mW^DB+CX%B#07w(G3DRt3{&A>{@{vB`7StbOQoSkB}x z-Tk6fpP%NiM{X>@J=naBiCHG9up~=?_7}AA>UiDN*$r|dAq``z_8E@qbN*Fik(!8ZX{sx~ zy7A_KjOYF;Dxc4&e{~QU9Y;}SO%7k16=#l{G`ryx(C$g-@$`O$=Sj~o0DEGSttzj` zFGLc~cR+F3RA2pl=d?}c?Sa^gB1`MCipaF$V&C1p8fg-dwV9fszZ^MEj5KUR!fi~V zkE7TNom>OzLjOou;^K24>aVi&>C!WS!~{Z9ewu(4FBq663$;+*xwjGt(>bLYXG!@2 zIjyC(I9kbOGahKS^Q@R110H8Ub)?v&>RaNkY&5AzFb)%G%Z27BNMTAf6q>X&TO5Be zUuV>syRay~tx1usb!jOT5!(t;qBtbX4nOJn`g{Km??R;Yqw2N0tOq5_ee)ZA&inQ@ z`NFj_gYqYG`)al38^km-EVwH-mQWY_O01TK*Lc>DJ`29VI zPu%zl0%+0oE8++xLMmiCAy~upD)jw?7t}^QG*z)tTBK9(?ZViRePe=pE4%Nm2-Y5d zOQdkF(ZaMl-uvqV$hgeuh4pdm$glImV$W#G96~!PPcr!?A5aDiK6daWF!anZBore8 zp7J?%+m+AQ4Ei(Pe3ZnjcZV-ar+PM?$?BQjuiWC0u{-TkS+M|<}gsMk)E6G z5#{geQEsplr)dL87X3vfF?m8VU1HJJ z09U4uWPCOIynCAvP$GSbPAGSqz(dTQVrc2l^4^E_!`^uSfJ0u!7+gsE+0dUySwMq% z0+VbbF+eC9x36~{AbEzMwI;v8!X{9cO?KBOz2nHp5S++WZn*!Un-mf|K(`zN*N6v+2akHzY5f+xk zkcyf}sR7Z48n<3Q5SFw&Kd-l&BqH|YX+I}I?fDx1Z+A?;zn?3m_`Z#4dfzX}xk`i3 z{&i_P-*=A_3vwS|^HlEeA3{(?1;Vxk0b zYmM3{wE%|MR$bksW;wPB4W_V}sKT1lYE5)M37Lu)ek0 zD{Z7WbGN&+UbAt{I7&2;?a_Q^b33)3S2|<|eOdOTnm08p?&ssso;R&8b045l+gzPS zUq&*qr=%F&=%ZL_p5cxx5gGw8m0ZX^>>#B z1Y2pO^7~RG4vU^QpJR_dAa@MUUYVRa-<8M8mWatZg%^A;G8y4K=4)p3*9qANByCYJ z@)ryMB;u8r53==qJQ4F$-{^vy&ClBno(o993#UbM!8SX3Iui%BRI+8D!00?W!LvvY z>*${fj)wG34ld~t>!+FWH^-5_U7vsW`z^i*7V1~C+Tx`Jf*PgHWu zHi{A@59@wSjNN}rMBuHF-!90=4`XC-dxjx`Wa^T570R(7l$EPd8W;m0kDu$MhpDSm zj(m8e2b;7g*{)UCy45@mE8r<$Yo6#I8z|1LqP#!84}r_pDPIp?FF=(K%-SaG{6A;F`s9(RSWeQ~AWR0V)xnPFNXnTe$ zC{+O6lQZcEc*ZmfU!7K(m>ElefySvy5FdWt}ULsFS1r<17X%1^}l_qTC{qQfLeVTw2J3ydrd_yJTwRf|@&Kpb@cx zIkc=LO8l#`YUgaj7s{F``eka8%2AVZz0n|Ht8S&&x->4*`T0scLo_l&|wxUqv%1*|d5D&W=7~&R&KwTaw^)Tzw3JA+#*5TqJ#OU8Q*7 zXz9KLfCFVJJiuWcpy(K?Lx4j0(m^2HdOokt|`qI(zgCO;J2y@SsqipU47|%FJIg8aH;VD%lppdP7PFX1Gc_@mC z`L6P$v5sg#aIx0bkiqZ-^Vh4BLTs9S@ijA{RmhVZG#fFxIL-B#Q1k~kAi&3YJ&+TC z$Zma^7{6%Im-JBDDK?M~u6aZ54>yWbnGq(1_<=xH0{x%`W*x5r9)Tb{n9JP(`n&-8s;E!|7^okxAiDFRKoNLUP({%CNBc)5 zf(E8}+FXnbhY&8B1ubU)@kd+R+CZ5#g$*|Tq$iK`Z=s~VeLPxAG!GCg_|e0ubV!4d zb0Ro}S+9sg)X{FJ!MJgy1( zMVn7!mn?eUjc?-NddS$s18UNj83<%Ti-IGhVWxN|py4K{fvrLzuc~_dOK_nq zlOCSmxREi|F_yc|CTG_~WJ>+&Do5|SmgB?~kE`LGKzJpkN3s_OW+xw@SJYV8ICjJG zcL*az2C-JH-3L-lnhMXq6)6)5eNH4v9{rXs$J0&6!vSy`M4o33Lp6IJs2@e7?yvw> z?Z@JCD5wQiX#U|2^VwN}c!@kPdp%dq)BI2u>$WXD+eR9KId*o^cLmXP@#UPwNmkZb z4K1!l6ftVyESBh3+=DvvjXLuT4?oDhnztV?!FD||qwL6Rm#J-&9J|qAl47x`j3C%u zMj)}(ajD`Rz?~c59k$9SLAU&|FM?k+S+DnFpV;H{b&2K3xI1uQdA?T-25{|5H*kAe zr_off8hD0lpcvF%ui*z;`?!7&>k1YstWfL5us{0qu|-_v-c`2u7^W%^R90-ZXC&_E zxSaD)*#K(eR4C=&t>-@K4qTC{ycWU1#yMKH!y7c!1}BzaH4ZAT5hy>ybwJ!QpWMz| zD69c_g!vfinpQ^5@to8S0a2%M;6}3CM%FehOgO6ucaj3%*uH3LDcpuX^-HDQL6i)G zP19}|6~BlM$BD)qpqrCYiwZRMEGZG2Yd>VI&F+u@Jn<#WPAqJ4+d+o*rq(ks;ZQj` z?Y~NnqP1ow{f$__c}vb?H12K6a()}!G?$^$dH?Lt->%262|AKi-8~`l*sNhhLpnfH z7ba*qmOOagaq=70loUc&A%VmfCO^*+qUi?7(oXp|L{kG(jj)Xo;ziwadBrV9>oXMi z%HfAEvnq3RRrWQfoMwRT6JKB50>dHKb3%l?_49CUjFA5pSW&2bmQ8%@<)E$j3An3p z#0ADk96kHNhNt!>G=FuOZy`oBVT$`UVTc?vF@|TYLN`)LCjxp$Sv2dx)N+m8+gk#m zG*eWc7J~zQrt226u$Y4=O-IT$YF&zyNADwE`cF2%Xyj_RE$7e=dJ*Uo=pS0Tq4^^TrS;EH}*YS3;4&y z)R=k;VF``AZm&q-sn7sv5uG4!Ti_L?6MM&CYhf^`=kR((7wyY>J206ANc} z_=2QYzkb;ODIs&J*9N==lpkIDSb9J)pz^cDQ*wYv&lK$gO5XrUvscIiEL2X4`=6Cp z06dtfN;_NILFp-iigsjy5&Q`p*_YlA@FLf>(aV=IMOV};@CrJ_O|&2aLiJdm318~~ zNzJ70l6pW3NLLrB12%uUA-p;f`3{Mw;>f6sWL$SmX~iWI*UuUv+n=g7GQ(btL6UlZ z5s2WWaM*h#Om5xhwm_#JM~afQFdDg8x`yf*-hykP-yfwm{q*|Qx`Cw6r|Td=n?(JT4axvyCj64IV zqP4bvX_{_&uvOD_8M$yVtD5h_(A#auszdgIM=!qs>n!~9$eYHpW4|S87kc@(+(=^Q z$$)-6slWSm9LH`IPky9N$HPTaSvcc8=3UBP2P)&Ss{Pujq$5cYDf9xo;|QzWJdYz) z;`cP?MwGF-s!!=!kl2kqP(Wo7d$s$~!d$YtBfcqNg z!y$M9v~#~*&1)wYkW{;fTMEYJq7r_VH{n7Y#!*gtcQBTQTF{D4`vCicGd6fC^R(o1 zmZGuc*xxoYXC@M`op=HDXHq=Y`OSTxA_*jUNq-)#AP{ZYcR z`a^Mcjsfd}t9}ZF76I#+Ui;{*#=>;IPP9p`#z$|;_juD^E;(I)8JmSZ;k~b3wQ*m& zU)0-Ca==bk1<1{qrh6pm+X7gC;SRy4J0#&(gBv~&xmCUOuJ9W6{>&e`$oU?0v`W#} zaz-z^YWI>ywXNVb{LXlB&g~Z}6&GWIWDX7jcb$`87 z4K@u@Czy#Db!AzpN?g{pl^8k6{G7H^tXAEAEn{{*g!7UI+Rl+d+KfxQ=h#Meh8My( zMbZhMf4!owMs~2NV8huxJX!NyL##Ifth37-Y0%L+U@umwM9E=9qh8@1q+)U*AkC3f zSPkbRt7hDMME-rOGS4+p<3Jdr>O<{N!T<_|Q^Jo3lFWpi()FYq!D*N$E8pwM`FR-D z0-_|sYAwwSU7YV~Oz86vDJlQ!SK?6R1GiM?My)JuJ)O&zT_?@0sr|cfLm-;e7vt6> zlzyEi3#*U8n>w_ChPGP8#OCw|tj$@AfAuaxV`-*%;|;)CWVRR=*d(aHeI;;8q#!n7 zze1F5yP072RnFQud~Ioeq3KO`2CV!AIE8>5A^&WuX|hc zXOAa-yrU1wACLfNj{`06JG>v6UJ}(4j6iPbj_1E_HzCRfcC#nQ2kA!L{9d9KUOtW< zk{);t5&cFS2FjygsvqV-X9k}14m`5`GEVNUUruH5zorW({@&9h_YT|^6g_w}9_=49 z5*)^kwk4Y+S06@~(xpv)b5Jquo;P?{!$)**36>y-BtWDsNT}xk4f&qBbA6`}j_jQL z&gJ9nd=HnF*%5t}ES1~g?QIRi^FYOLN6Psj0q$G)A{F?MlE_Bz94(ac=XASv`rFCo zecj8q1JEhnZuY$f`0en=)9zWIX5s}e^55o2$ty}YMQ!qr;(`GD-d!YmhDVyA6z&;7 z&*S&2pd~jr;dyN};B-)z{YA)CV(Q|ko0Z+3$xz8}hVC;;pRexT69WJ!HGJ&$*(8~5Joz&m+0XR_LxhmgDmXaT>7hTM@;eBb=HSohV1P~^93?BGnHTo+C? zUnkP2V}I{?kl)9U=TXnYn>%q5MZAN@b*E16czOH-^Sc5b{@;D5ercm(P&6D2PIA@3 z?r3=ZRegYC^=+|C@G-NUmK(faQR#`U42+JFg@^B zE=|F20=Nh}G#U=V&{aF!T@^ux0Z#V94ymTzP$*5MON}R0}yEQiC3+pS^ zzfJW@3rwel`X>1;HLC26jpF9-W^a!7*-xzSnajMJSVJ8RWqs6zZi~Hk?J!t}kV6ga zg-hpvWCUCy6dY9yASy;yJgBGxeY81o*xls6(U59^?}5L1*QN^%jVp(35jvm=&F_v7 zt%b~${*o3jI}7At>kJVl@3gMZXv4>r?NHNtt8z^%ivtiQ&U|xvj4%pih7N)cZfLT0 z3+e&xJqjnbPgbdPOfDy^*DDPK4O9KXTH&bwwjZ7KDn%S8k)&aQMax&w&xYtrYQ=Sg zUf-N^-ytG{8Wgbs&1jdB(OqHRKXR^Xa7fnBkGG|D4sNjXnc2&ZfstW@8Og%L0A0|? znH@lGF%wb_>N_PQpHAYSOV}59OBd`Cg}M+^5*b`asI4^j^3h~(EM|MSMC{gtv0+-Z8}lceFDnV>~IfW!oZN~;jHhs6YZ!}8xzNCk27J`Cq{VnRet zQ4|f;Gt_~O!WeedLOM8`y4j4xqIQ#iOyiV*&aqJ$ z9tDUY-0)G*E^g&65=B53rWCi=xiLss=vxoNbO}G%;ul6Il3b{Z3>y^DE@bzKHI>F# z%U*-AbNl+&GNDxNfu4edxMl0n8~|Y}gu-V3snkB%ga#XGJUy4pT253)vF}naB(6KW zD9*DX8LlTw4d>fV#@dc?4%*?FiBwd1wkWEy#-}ZYN!iMu6Ok0(u8LpGJVt+SnfzlB zJa598Dhc4R6%~x^jWKd7t*LChYsf4XC@=TuEaMbGR1fgRXGs+_k@!0IRPb zw#B7LnrB)IBPZimuP5wRi2#iAg2tRE7z6zbTpE5pFMaXsRa1h?8dV1*$?qX&uCL*) zcR_--Y~hgrrvBo9L)IoAi9Y+-TQ#!2Ow0|)AT5OmoZ=x>bwe>-1)u8od;h^g*}=_J zMv7IoSBOcB#7G_$hhLS8Tt8=cnG`FBnxnmd`T2Il_mWKo2jWuwQ1-YtxLF?!(zIN( z#2`$fY(asS(k7RReew8)Fv^Bv_>r}1xmvdChR~X^Rb)py$tRjIDa{brtoG9<3~%Mu z^Vaf_6?`lRFrc_*#4;6CKpu4_bdw_BCg1%Y`~Xvh;k=yy;w~|eo*lD7bsAb)ds1Lb zJ&nuMCQ9k%L9kvr9IIBLMcriRWZbz|VPwiC|FKvQWLHLiB@hPt7AW$VraPg<-o2qs z44qpHW_uq`MOK;m&sxEjV0-3KV|>a*xM3}e9_&@Y;Alo}sLi6X5VdcQfw4-I9&x~_ zzVzhRduh$sp6{T_T^g-6C{n8=TD-MtX7XF7P;;&S`IHR=nRfYF7or}pMtOsxFzeLE zkB7--2I#!yUM}z4)35v9bTbEBVD%R&D5lzjJVp-Ta5(Xb0FqaY-uuNHtn&5Q_ z)zJFOU^pguH0lnv5q=u<@SX7_ZaS4xfxn77W}&-->BRIZ_0N2RJ9f0n zZJhK!QHiVtlQ)d0R94Z;jaqH`yPZyoXG(=Nl(FOB>8JwCW?QIUyUIw_{4>$5(&y(m zs@AN6R%?PL^`E|RLo1;IEMGB}Q*#k!)tBNI08gnA>@}=gxr0m702E@rVo!!_MxnwR zN80iw8bPNP=L{G(sMxKSL!Fc_gdP|7b3IGgP!$qQS?U-mDPRTz0~|sU)OUVS{+%-4 zDshQn8~atnRcEhS-@qk9y(I2$@twE4g_fotzHxtiF+$!_TP* z>rV+n7L~%W9>W$#)3=sag7-o$X-%}f8d|05E_|QViWnGDS~9Jf~jiB--^+7RnOYq9&*iWj2>(gO3@s~c<9S--T?fZ;aQFp)eP|iGzT4JxY6gXVpuHyRZWJbM4caW{gV z-=bSl9lC4SC^*VPEbh$$2Cb7P`Mkb@gpGhHOfH1Lf7PF zm!MCk9*LbOlVuWsQ}vG5VdcBWXZ&TSDYL?TTH966(R0rQ`5=9m}2N*U7%BV1=*>%+4OT!L^AniuJN<~v# z%W&7Is-qVHY+_rXNEIt5j;aF4V8kW#B|O^+VDPfy z{^{1K2!$C8z^k^}%21jIOwJ%=+AE=+b3|T;bw9t^@HdAPS<2PX+g0XXvmj+va3;ze z>pU=lpm3R^v*Y_Xhu{XF82QeM6;}rKZ|hkZ;=yBFUg_ikQ$}YwWef!+&SW7BzbWTyR{{lOUM5z-_o_Gh!o_fWnK{zgU4>Z)tlLTWOG#6aXu+$9g} zse(kEs~MH`vTVanzG><`8ub0+Xk!^!YB{-U(!B2)J^SZ|G)uK5maSjl*YvXH+gOQ( zGW-0S;F-o7W7(OoTYtal?b%G}BZ>k|J-$!Wg4g23;V#SSFhBc&KH2i26~mF^s`;$= zu#MVId8=Tu-c3^=#*2T0r5cU!i9mz~!=j&*EX6!tp-dFlytfF|BSY4s*mkV>VcQ&l=CT73t-nceYar~skD7jIUOxFB~y?j8(y8f~h*nbbwd6zp=h3D1j2yjy%yqIyb z(X6%WIbU#-sQR$1xsEEP3+}D-gsSA_)v!Y-ZH?O9Kb_BHrqKc!5 z>(bMb#!evgm2w!929Ca_LQfe&YA-JlkhzNAI@S@SvUG`b!w3zZPW@hBn5Gzb;v+^q z@|fBurB4q&hx~!kWz0`CN-Z*}**30mfUBSHdV4OWu>Uoy_ryhg>~34w9=3FN{cg{)qaq}tjb3e9wdtN0 ze7B6h-vID!>nzw6bk8VR2p<}?TTIm8tSi&d;?(Kn#`isr-~0P%@#Ev(XC?~AcKq$17kHDWl9=c53I95$#lHDJ}z4Lg6~7<9<39^EJL$FD2LdCz#wS=H(lT~k+;;n* z;=0>y#`1OhkG9jSx&+f<);r?AFpnKMCy?v!g+HIOULM@pFphh5<&3FU;~>Tixoe5! zIDkZu;Y(|~^`LWlv^6DGjf+gcmMR2l-=N>7z(t>qGPKUt4UL5n56mcEiLLS-1+#aGP`hFlysD-O-ypB~6_gVULRfLUr|qwc_?I=^hyB6X(uS3N@GBaW!so;y`!r!}^}+ z_(`p}xyU-MA&27TDQ3Dwdu_J=zh$SBddLFKKzM)s|;aZdoRj{-h>?q1K3*x2;iMGjanXFRm{#P z>Y^Vrah=~t2YUJ0H%xL>c)^L<_n*o9e4fJ{Hm;F&O6dI9k86Ok9iLymv?J@7O2 zO~}!aojQfmxIBGO6GNp5??KY>g;8YK8{1x%QmkB>;;-6jv!wHz`X(nu3>p&cz&oC4 z^(;o+%;4)ieWOOR9IQQm8mF}aQjftYn#vh}J-)pk&hUo(OvuyqUSdlKlfc{=kjQ42%Rn>U_o@ zknbnUf#t^$g@xhY>i=vF7&w{PS^okroY|Jp&>yI5P(Gcs{WH%1M8OOc3nz|epU=99k`_(<^aopfv$pg>*?EdZ|{@k=f@ zWGaekEho_nn zpEGhde)gsO-v<#pBhlml7qWie3kz~`-(t)n3&jZjWYLtaYs9cXD;jpulH^w0Gd-Vi zJ2_uM{Cqu}1AR58?4e+?N+b-p_b4D?XMSS#+284V?%#RK*?gbyRnr#_+v%h z!0@04YDO(t;mxgp=$^e6`(Jvx)*oJA4+kfuFEy#tKpnl`g9|5lx%x!_jpa_u$OyQ@ z*#zz0Z<7>$Bj6~7!fCR3-xrTLjC6i}Zy!5yZ4df^4-&}Pdb>{Ff$+(_K30$|%5W(LTBBy#Hfs}%B_~qSnvo0PLu)la;7E4| zxBeP@Q)c+{ZTT~PcftdT6yO359%>5tivj}f`!f8ZuXr)ib-NBL<4p(QK1J>Xz~wj# z;V>j>VOFaSF=|XyHq0eUC7zRT3~~RPMxNDw`!XZPuRGa;ASxm_D&;DY!imotpFTOgD?LsZL(h<9ZNNuw5Ip6oABiGK zs*k7FD?kGv@+pF3t4oqochPZ*gvG7^-K)_bug-?R7)V`X$QS)57$suU8`1FP&cu7K z5B%nY9bY$uegEy=_|{Mrg$@@9PsGb!e#VUOK09>EtDGWG3-LG#8=X)|O{@_k z^1%F)Fr3l~bM>gIk&u};0IF&MqDz#W;5a(a(-rH|FI|C3PwA2o z5v7v8J9VT<-)H+Cn*I^N3J4+;KNfBb1 zHp_wFn3O|`KC6T|*MF*Y6UpyqK@CLIm6kfe7>%#nC@*2wi?50Lu#!6#LVhK}RBguh zWq6H!E&<*?u!dfT2X05ge+BAutrgQow}7Cmf1MgJpd(-k2mvIXVfCaJz-VqjD#njd z?iY4kW!zUWZoz!Axl|2!=W_MR7%U{NoD6ps+ZE>4xDG}Iw&?KMd4S-QdVt2s24FH< z)mX<{PGLq|VIXnU8niwp{} zXWG__ZOtE%3%Yiae%NgZAAysk_lMsoEZEY@w5(%`1mP)s3G_Glpo4Ia*nyKf@mq_) z{O*p3ByHoZrBY!0aT%nxGQilA^iOvdJ+@96_BUB)ltV>VlIb4>Geqr}#hNK}66gie z-t7rh5veW0g^MbkMo_NhI1Fg3zLO`*!j=?6j-f0JDO@fw>I5xIB@k>#?!5adIVXmP` zFq#aQxKJ7;Fh#z9;ZBI|2hPQj%wnk+iPO zLDVEP@k70BgDbdWB+<6In2X{ry`U2%j7bLhew&K0LEQ2s>a8B>W8UleXPn>XfL{0e z^=BCW*Mr~p?ZXNG=MKNz_x*z0x98i}`@)FaOq#VBxWeoGyP2bZgjzgWCL2J9#m%@yoSR<)Y;|W)^p&$o?Gw~F4Mz5wQr!j*I3FP{ zZV7f!X{g3Ovp$C>xGJ)ZURs}tqyhu)NzK$Q5&F``VPn$A z6o$6>*n&un25=KOnV<%~U~r;&i!nFyTi||TiZsPgI-q_vWajqmfkIW`9XF03obK$j z9U*vx#1UeFHew|Z4O0?vVP^BZA;TDr5|?XcX~P)F&~(E8L)}}zHu_*$zLliH%*@PA z9IP-^u)@sD%nTJ~W@ct)=A^<Y0b`Rr~k9sl`QMCWy|*U{Ve%+ z&pl^)%5zc#0THV@^P)J=S~g6X?q-A_BRLM^`3HC9AIC;!F%H(BQlc9Z`(#3q357%P zIod_mT?%Xo$LJD@k{n@?v-+1^ZmKMt8|gk0aVP^}aksVM4~QRbKQtKF|Gxtv+tvEJ^ZF%rPo}WsEC>R ziY?z6kTBMhMM(KV8tQA$QsJj%{9HVzFP3ap_A&! z|0@~i5wbAC+Dsawi6@9o+`EzyMt#W(hO#*%ahmijcoZ>2iN(HKPdMCz5;2P`8fQL{ zGa04TxFtPifpf{b>^nv(wOsujtkC4nYr&o|hZDt5sA1!_KzXW7v&3(RE|Z)%$j+-L zM?Os1C46wKQjr4>k}mm~bkL`l?qV9s7u`hek+8#gDdRe~ z6$iIezmy6BnK(C_BXU;A84f(}=#xR(ua#8`>W!(R>MknhR2MC&jRsqyx=6X=m!zY* zq$;R8TkRQi%F>^6DH8HbO_D>>UbPT{r;I*6a@J^s(IQAAYADPB=OGbP5my-!CXI6w zGbc<#QL#rngOJb+i?J>x(2k^Kp$}|MJ&)i_=eQ{-`z0b1U;C}w31pQc>Ak-FQh-ox zTY$2#ZYPV0xI6U_+){mM5xbMdXL0{LPAH}p!HoIc_! zVdyS0Q^!c084T_S0d@Aqpv2gbn6&)&dLPu|>^?RW+WZuOj z(K|BMiO;7I8pismQ;x*u#HVaDmxh-NRDcXEB#1(snc$ODL#-+VbS7{*vh#-;ON?|k zp&BcwlyUw3KDUDJ<(SAkBysdi^rTN=YEF-MhG*5^WNO@XZplVd$LouwowG22SCjf& zKOjkfvzEA$h&SEBv(FPAs$V}J6hBxm(nmmit6goZRUz=^3GahIRwclX7NC#8UGyAw zpf=a4aX&3L+MgY|w=sPo%836gj1nJUbv(u*y-!KFD828M??wW(Yl8%*(OAeKGf=y_ ze7^4DI_c8lSS`U3wS*Rr)B|XzTI6yoTiH(LC#Mz=MGl2$2NcL>3Rb%jUVW}+F~G2t zNwOfdAHnx@U65HzG}pB&$6#u%sZtf@n`+{Vktlt8bIXS>@I@F0IO^`zon}27K}ue zo}ew$zE+1sK(`wJeMX`l*fA@FUWOR*;FFgvrklYdj=`&u?tBU>WK90kUpLDw5X^^~ z%@>&0i#y8*Iyca2CgmjD)45ilIZEhnR52R2T7eXaG9Srdbf7NQF1;F8ttI z#5rPjvAbAbaJU}e6(e!V71u$&4|2mE=pg~0uRF8(psjrgYn{8+g34~}PJt6yoGPj2t<4y3kMpW_54 zAM8j54u^{NJ2h;YO!#1%sa_!@4txXNAW4+b;iP|w*TS3@FbK1TswY~&rjBB72ss3{n6a~M-fp_x8bV}m{<86svl*;~U7Zv> zWre-e1Jztz2UioY42+{{v@BTE>DZN|%b8?R48iZ{Hm79PhC*2yw*wer1|mB_bFaBO z$4hpzdTkG*z1SwXH4jt{&1SvXqP1z!0myHQ8O$dx@@*vT>^gMIcIT$p2R%3Y^SgT^ zT($_zKeMi2`100-FNGaO&UYAcnhtK^uD}Zrt=df`+(}@it9onK3O)FOX=1_=3d*m) zzIO!Mb9%c;Tf19IPe0=YRk=&5h9xg8I=4QNNBQV#&yz{Vp0}BiJqgY`8eYxXf@L4L z@@eMwTBa+n=Ugfv3O99m=15)zqpu`jp@0^S1oN!7u`U;u79%cXi44*#4?89{ri6 zjP!wLBkh{(`q-5#W%5ipvgv(yoZ7aaW{ErUXZXVC7-zU#Bn3=;Ub|hRd!$Y!CYv}D zm5SX{?|o9WgH`(An^~V)z~mU|P@3%3mm_!2d55FwL!|i1%t3pM5_9j6%}YhH5FYSy ziU8`;hq|FT$Et-po1o=}!}t{ybHYR&)uP`U{v!^#$xWkrbWCTAa}pey^EE}hYggT4 z8u@(td_>Yd^aWFT+>e$byFA{e!71qFp4=~uR8rn-_l(4aB@pYGJ-m4*^q+qIBK_$eJC^Ofr)s{M7MKB0 zj?xr4PL5C#@PFjWeR_0tI*>Mp&w9-Ng`?_5pVh-BCqVieo12EGXBn3yyD|;QHjFh* zm;zh!bAZ;TW?8Y$EDT|R5zwriH)Y)f&rM@UL@N4ZfU)B!iW$wUWuTzLjBaEEN567*7NG4*%nM9#69g8+H7c>iXZ-$$~qx)S?)+!0rSvKD1u{SHjY1NJj$7zKyq&JO zxx2dJD!jrr&sYx^4O>hw2C>|jR4scx+I-c0n1#K#p8Y)g7)f*FX;Sj|?kgy_=D1&~ zT6E@S7w&wRuNJ^_xk+HAS(*FhslF)?vI!-R|BEe@N0P#xss*cvb^>xLz!<*FV?0** zd&I*YtUu1Lxn((#pJF=vkBEBR8=J})@7J?xDTJ@p3q0Pv*`Fcm%a#fZ4V6A6iu)N_ zX-i|`6gFbCE-4+XbM;MG49Q-6^X4tmyb%+SVBFy5r%thp&_<|$yzMJemu06m$yeDM z+s5E|)Uh{m@wNHMT2B~IOj$}i5;g7N`{Mz%zHFlyfjvIeo8A2pllc<`7NcJ;|IKE? zqe-G+WbJ!-?NaL4(=n$uVXoNBaBm*fY77DT(ab$#a~G=zgRhHHO%7qjcHEnruuB)3 zd;@{&U@*>|*yyG!O5s}HyP6rZ>>W}+J@LMU0kh=XIG(-2F0-a?JpMJoruNBiOAN0V ze#@s1J+(HT_$ZQ}<-b%ojk_`An>r1;tw&VVbbpb8Y$(Jf3OkJ^BQp{UzY!Cwp@US? zib^Spi9%3s-1d5Z+zni?_ac~;m9b~}uZ_Bpgi}d2j?P+tJOp>1?u+gI{57=Gx4r3m zLA#Gs+{dX5>$Z=S475iG%-^@Z>bQv#`!^Fzr6yEzoWGt z8bdjRezDfBlI4I&ET!QJ z!zu@3${2bDLFF!?)$18Be88;#>UoRH={X_q1zNJ(lnYRSl4SfrWzr$cJ? z$Z`|If$S8T}Z_W4i{ZLZ?!-t_PiVo4imc7}mpw;X8j&U6ETh0&iBgc^4J)?#!oOblzys|Fro zf{-xS#jn1O9p!{YDAKo;nRn+ueN{Sorgs+%B<8gBE(Y%;T@$-g*Yr#E9>J1C$I4p5a|92iBLIH2^KlJQntMo_IxuO#^v8h z1mOGiC+B8#y9PTgH>dpk11u^U_~k#Ce6alMrraQ3awZ^v0|*3}`h#TBEL^M{tRz5I z024bm7s%2c1ol}uI5_?qg#Y(E2pa>CM%v7d*~Y`k!J65?#o1iX$;8ps#POf4f>}T| z#{d#LLrb`S%f0@u=WBBS*_b%lI9Neya29SRkZCYGCkY1|7ZW=tkoCXlo&3)eiobt6 zb}kcEkg+*{gWCwmZo~>wn47Q|ngW5KS21PfGy<9$vU9PsnzHwjwq4p3kLkc1V~5Nw>FhW`ueV8jMA0G(te2J9T1rfi%BT-=6Spd=J_ z0LW0Ao12}J4ZsQHH00#|TSK@ygDz$U&Y)!i3tM9ocM~UOa|0)H(3wFl!vg%L!57Ov zj6MJ5wqRx9U;=;^BUt}l0|1bXjh%!Q2x<`4|CK4_e=@%N`^oX&SBrm`+5V?%6zpsq z|2PEvrK1%~*ox@81@gpzS5Kiig~EY)biG{+WrJE&z?f#M6 z{q{Q-x&|4RdBnf_?d5$C8Cg#WUa0c8J0D5}fsWgXfzJSwtJ3-QAh@V`0_4W z%>?hH+uefgY-;S%O^zJ@1^i0R#aNie#aLUpUo+EiJu%QZ-Pg`D13QLK16s-pVUZ~< zFAJGgN01)x$GAD(!8A!PhPp2WL@@2#m=?bBY&;B(I9uV8lGVlfa#j+5f~S1GP;oZW zoVpONQL1_Ju)QIHj1*noiiKS5588X28~55x`Fgj_%ai2=m=PgNB~QwDFI?g;J%!YJ77^x0G03L(6Aorx zo|KHI-}Di2ZAUU+6bM*7IoR&aDK$$vBeyW23=r_(r{$Rds{$CbpZ^R!!5dOVV?*e`N@ zL0h#g>rV0wSTB} z_*`&@YFLE&?FcQ8?N&lop_Zh7q>f8ptLr-TF|zZ@kI2jC<^K8)9AawDN6=&`F_BC! zL(pd}tTCQm8U)&et8}JZcg#aVQ1Ugo)!xCv7Bg{E%L(W}fM~1?-;s?_5yA$NpA)ld zOvIRyN4afQ8Ktm@qCfa1dR18-W6Ena(0&1h{?LjF#TksH(@deJ+dsG(Y zhgE`UJXcZ}dbe$B?1Q4LJo~G{=!GI#=eD zk4qiET}GrZcku%|Y^ph+R<-M_aez@3xZ-bmqhPplMPuKLn3G0jHS-E{<{@Ykmm3uJ zeh#j*5Dq^q=VP`jjaBazJ?Qca6;d(a&^zaAv3)ZdpX|<9v7N_}CJoCS``vz)ToOVh zitA;wb&befl9T2S$AZ@@guv_S^8(7!ZVDawmVZ;EBK2v_)_!dUNwCC~Oz+%OoD3Bu zA`0_8EG>*z?XWB45HV3vfy3tJ)$Y&31v4>!m*3lE>xb{_Lh04V)B3^S!-FTKu4Dke zZx?Sj??NXMS^1x9jsfY(x7=J0D0Fcs{muK3Q_^P@|1NLl$!;->oCdblPleVDu32QU zV?$*US&y0`aZ-jE%0Xz^bBVFhZ9)f1wS+;T`Z5|S&<9Wv`yeY%Fi#~^^C6dtInU!w zeH9Y7!ZP^PZc--%iIz{ZHu~TDV+h2D0VgW0VS_wCtgto(9K7W@a2H7>5;V6TuZ&`- zrBdY%m9)!JzE+P~j5`}ty9{Re?I~gPL2h}2Sf#rgehloge;Q!gmmbtvh{@eWhxjFR5OGUzbUf4dviI&gYQ;*std)t<&2;N>w0&aIrEzlZ3$Kl*ZJ zhiAWS%Fr7_YkCf5YdT&n)Ebgq**CG20JteIFHjtYteudPJ1iaQp1VU3>7n0L%)f6(hz-hAbC9O>zSl-{qnbP{L?mi;F zQJk$r9N6t*EH(!2@JB7Yu0O7~zn8X=d~H3*3fo6M>yKYa-|bq#tS!dz%N}*n@SNYM z5m`7tKV+P1(reZAKiNsv4x8?;tJ|^ajz37SxR>=X<+qt#m3tezDI-cnufCu=oSZeO zVW^}CD#p7pAhE$hu*+{%!vI1MB;3|P3pP%KMv_f8y#I0074D!^`1BPuCSd*3A&YrB z^LWqFm&YocPD1XCHW_LenzcwgBc(2;%J-r(POSo6|1AlOrc5pqAc>Czq`z_vNob!vv6WjovTW zr6&b-)PvjxA5L6-ask2Qg~AmZsKe0w0W)*=9lNg%&I+O7W_KOBG0X#ZGXr-D?z-k? zs|-1*XAh3#Njw;9@xK#}v5#3yO9^U(+XYL}*RT)t-R8;1g=pMJct6^d)s>1JhH{ZI{iJaIr8Ow|!k{Gh8;kcH zS+EDBi|kE$aPXd7yw*Kw5hnKdg0b@gip0JMTKJN%80V~adXy#{I+WMcQ8fzxN(0Oe zqZj}T8{d2pV@>~lJ`>s`YuSKJtf0XOqKztwm-uxkf`JN(=?9(hxwx7 zIVf${C1JOk{Vm!ldgVojKw-qZoK{HHL+RJNy%5EB=L1fPADkm1ake#-rWF?@nNTPR zPc2_5I_Zt$al(bdD}KRIBPWU0thUn(y#u9uMGWmlDCVQ+v?S0GJ4pNyxCHDhVU61v zBNEFR;5oHKW9rasne>~U5Pn(#e8IbvX zZ*6Aoz8uj}FlMyzKZ-$f@wIcWuAq$9!_tp-1eHkWjT7xTa=PxE9$1DiP%4fSUQ4Di zKse(0V>lyziA2A3sgQYUH$qB%tPHR2tWBS|sRmCw!9$?F(xQxvfNF0CE(i053{b>8 zEuio_NnA1OWG%e2eZGohVnA^fov$!doBlpSFRnqbKiuD4W1m=ouT3;&dR-&Z)+ute=gk8?S zAqK`lr0*UZa%V#~bL}hx!_yo`0DeHY3b&&^7Xqt6juZeh(lEigFvx*Ax9yLkMBv6@ zMVYu|GQtODl*yC7suPNnhBU$D=n9)^o!)3#V<$TXN144L#q{uG_`Ic@K+$-pMLqUM zEuEY7n@&!KqM4i=^a1B)UT=7eDfd^T6LUK06!A)h3@fu6-4ZxwnRy9*nOzt!D8=3ON2^A2GVm}H)%c) z=^8=jhVXsHB{-{3sy$|M2j^5Km-}%iW7o%zu zyLP(c9uhCA`jboPPTBAaT;mR3c)P`J+|HQclA@U)s!u1g#_3u6#&R6e1od>Gos9G%G?naNSJ*pQw^lRpc~>O2U7U|AzqzC|oHw#8eJ@zCPbwAZ z%nWZKyRUY4?r@c?n=f3T8_Ec7Z>^hpkoVqXLmtPnGsw6@aMiQmYaB@ z?AOa#xpMm6d!p%yNv%=QHxq)+^CUyZHQs={2^>lASzjMsbO}SsA2(O2xBS40)9p_4 zz?&Nt)#ensX9(O+4LM%1{Kr&imi@NP-~dai0}F-c?mgQeW?E*D z_3N@I{X*ELjoP>~-K}C@qJKnUt*U!i80FT(qUHYgNxsVXZqH(0b19lHrm|i1FDq8o z)24Dc@Q-orl|RpEr!+PZOeu1&{A;U*+L@jWv3QuiD7fP!2m`&JBuCe3_Ys2r~{%+6!bAMpC@FgtUe_ZrMjsSd&F1IRCwhmTrnuKUL1>B&3giuved26XO zE__Wgr0UhsA&+r@?HBxteGwn@cHfA%qyL^sterCCqB z(L46O?;`7^(0jV!*r~VC4Fy2r-+=L+d{YhAUI>+I;d?~qnYI?*PMQ1xGq7{|IDdO@SUc(6LwQ+v_R`_GKD(c5 z^wh4bulsq_oc!td?QmLrZZBdG`dQo$iFt1|P(r9YCPfY#fWjk{~%;Lr@6_RK* zYjISWLvWnAMx&bu^IiaP?N(x<(8l8U@Ol|*b<`5`($4d^Klw-My?Sm6LOm;{7U$!9 z@Xw~XnW7#z_vZeUvLB4TM2O&kYs^df47hXr$hdjc>cF|vFUA%?u9K@J+TJo_RlIRH z*bhA^oe@)XWs$;b0fAYk$oX8No_3obZ7H9GgThF!&3<|nXF_6#V6+WMC;w=(xZ!|> zMk}w1`=rVl`}Wos+M0ovm14JGug~lv1YwuQ=AJESK=X@A|LNtlz-wM;%My$&oQK@t zQeGVg%sx?@ELbkchR1i*;&_ZA?yyHG zz6?KzXM*R>-4sUK0WYBEQ2j1p_41~%7Q69NVN6c{kC^yB=)wPGEcQ1v$ju4_n}%=2oz5gK*6|FlEe>aMX`oX$#IO-l=I)wn~f&n*7fSc zh8NRV&@mrZkUd;+p}oGHuQP;=Uz(Ww>J0-; z?X2`}y?Z~Q$LMoAjldAhewpl272d&&keudrgQiU{iTrr$9PGV5T}7uB)^SH1wW~4W z>caK)4e(O{G89C1(FjkL0$dPG-^eMFJi_=1N4K zB+X&92|lKUk!uaCal@k(870hm&i;@rKHNV53!!UtIGF&)zMMp>$xt&S{P(w+n`~^Z z^Yf;n1{rvpI0Ju&>1y-};V8_C?F7k#qIC!?2i3d{81*h?3&at*Kotq+lO_PUezAdm z|Mh28a__@q>Q5W|L;+~uJ#6jaAo7xo)+!0l8JY||QOXXd;7;D&Eb+3M*#l*a!NRG7 zvX4j$M`)vN44v&G5xSXrrDRA180W#6Q1HUN ze(EWX!cZ_n&J1*(3ScKk2<`mN-6Kvdv6HS!Bk?tlY@D}3kbUM*KaF!+-&l^mN%;53 z5m28`w2oY!uZNu=`A}R6Z97KtOdxH5WbW}md%AG}AsDQhlu`cFhatGRAL`<8Ma6x1 zh$a}(_Et>dUdqxo4bIWxQ_S9!*T%+O1D-#3Y|bo+Rg z*=AsXp0Ij24(CXBu3s(j!d08WR(D!uB)%1nRegon7AY>7GgiCf!@9%>pZf`xgm{OV zOpB(K?S3sHDML1^$$r%_MMD*dlt>PaSYCqbU_+^kXUIfco>4W>YAH8s3a2IrjIqwF zETj64-!?^_tJqdw0Ndhjuy5=XCh*zUqgTksyYqEN1@61qaV}riyEiowt)sMVM1tmy zb-yPy174&Z22FQqTSc@&b!#u7ubk;VF_PU!n+P|P{U1Dg|5Y%#{J`BhE=D%TR3sY_ zXfn&-)-~HL`}{$c{9uxkM&H38B<0~bX&y^I53775$`=ec8R#!m`9{?o$DvKs(9Pp2 z^>eU(NAmgRt$~Iw<*}bMJ&oz9eKJ3x(S0u4g=kr3i9+_f7=mv9Wl#O(x6<*%c~FIm za&LXZdi{xO`Ed*H5b;{$Az}`2K4wnk4YH1(YfDKR)?S^lfmCs73rx7Hrb4PVhy3I)apSGrzId zxte!~5h0bb4XKl0cDOdWzTJmKpwPm??7`0YFr+_XUcmM zdz$96#Ioh1vfj5f)2sr^qBRP(lIz{bLk+QXtQS#B3AxbkHlfibnl|xs;k<11)m+jk;N1R)}oMHl>&K=(R zKr_ECwK7`tD)ojJ8Eg0nRAS<+a3xInX8*O;WBvG*S+fX}Yo}6?nhs)QFLF@@`EXnG zA~sd^L<1W0x@Esnk#XK_=ek#1ht{KK?fyi+t3t;Zik@^3`VL9GwZ6{FIlkx(N+FBQ z+@fwEl5mp78EuLcekk%ROCnkPyN`M~d|9M=p+add>rGw>i4e&t|7mr&Knl@u-t=za>eGaCy(Ik1Ri@&@!+0 zTY{8L`mU^-IyYa5i-M{nd%aAg!fGrUs*x*@WM#(8TYS=b3mj5q(rH|v?IxZEk9H%U z3j4LR8$IaM#fQ-H-K=?LO*KM88C%}FiKh>dr~4oG)K?uS-wjU>Pl}+Y7Gz*SrW=M z-}|`GsF~DPPX%UH{RXa0!F@0u@S04{Nx{b)zR#iE1#e_%zJz>f6kVphXyWg@Pit(sB*1HV(=fVP(O(pW?pxh`7mjg%?B!Kg18(|AE{@8NB2jeK)3}Pk123W{0LjzHpmJKJ53{Kk#qpuM zW@BeIJVgUBUK`$opq;g58gf{EV?t>=A&Ry+UWC`=75riO1(6iHs(F7*$qL%9#-*!g zs~g1*}&Y(>UDPwG-IaK-eMHA1ff0q;aUu&BaK`KHV*%wW5a-s=#b z{dBohD}O1Sz4kow%NZO_j;)DBB?9Nv!X7+Vy_uHfO;mv0W$t*I&fYQp|kr(5l=r$009-A zB^A$D((7ah@#)*>rm8MwRFW}j_DewO5d}!7GO?Cf39{BztN;=ReCeE%{iDxWEoozE zLPF6#ScNMnsjEo3>aJkr(oNNJ)x+dYTt-7izLhpKdqBzf!g?uZtN52ze!gqbg?dEfti8lVco!#!XIdsDt%3hQ53sv*jck zT&JAj{qaTNksJ5q>K9-^j^m$oN$!tJ?5&sDR@p8$Pkt*b~( ziwO3qyx2$c?mVto3s&8%=2aE^(NCMwg&wRk**{zHO%>b~D^?43wfggU2c`a6-jWek z>ptjCnXdo$&R1rzN~!X5;~^_T3mhIVu zC#n-jS051%dUSjdCtn`)Mu5ki8om{O;8hyuoWo8i8@dk+Zd%dmn3cd)b6P*dBG_#y z^|6_jX^8#o>6d>OQ0gDtQSdhs6nc*gVWZNw;?+q1(ApA%e{1d9pemX`KgC-)p_co^?c&mAz1z9|k zHA>p^%c5f@>Py42-{$*C19ViDOdl)a$d_pJIP)m3Sz)v4BWi$xkh1YfvV|4R7szojn!AY}WOsT4@%2`UFC2gg4&3_Ae8#0F&H_&>X0K*~tg ze`=V&z^(+q#KrM zI9P#y7sdXqWB!-iYW`zx2+o5V1w?lLLWcR@SrFhqfbRbkU~+N*{~f^8`4zj@g6g-`J1_$&yK|i> zD6j9<{ljSuV(qB~R{ma-?foO*M_d7g6j5T*idU+rTc=fK5*Kk6F4A}|Ef?(D2kdq1 z%I$qCv)qRdURHSVGba3w=*P$XAZ_>a!BOdzuY76l$CKy-(=j^8$64>s&Hchc?CN*) zz*ej}p`%|YQQ*O9WkPtQK?0K8I32&4{kY!oYh~HPQ7Z66 z*qqApeo~{AJ^CPdCst!+XbVR}>L+bK(6hGHQirn${z983)+PI?2{*|>b5mU6l#cS? zoP=?MRsn*vGA@o~b5jhaN<9r40c!+_)NrjYPec$naM_AN;=rCE-k(CK-?eacWCrNh zFp8(=gCWlf9e0&}6eQtLSeGIzl7f(#!^yR{L)uOX$eGg{f#ruugFh( zqeu<7Jujwf#?NXgD5WocG(z)(ADOh69}zn8sqDR9q5?oeDHdC1{#67;@dO!WWujf^Q*^jjLG$hH z4^$QS@bHMU1!TNxNB(oSYrcBCZ^#dO!Oye$CCNBh4N@*pRxF>taSK6HMsMI23b+F1 zzbYxY_JVRXme+!XzQeroRSBq{`KL!0X#XJ(Y(`C6J+>GB(oC(6-`6$b`B2}#Cc&wG z^~h7EoTf!z?TxH}aIGuf^88!h1{2Ck=q^I6E(9R|CiAorwT#90)?*Rg;$L0}%ZFu- zD0cRq4KBMA<-e;4{Y|XvA6Tu_GP$t(_U>*UdvPyrkZ z4M^xrhX)B5)aSJCD9>-cB`>$nC!LW|9wEhkm-o-5)LhZ@#+tiD5_rKIyK!f8$%7|A zMhTyS2l`aQi=*D$9&F)YfgJviJJ>*6n98UTD8*pkXw0U~U~5Qr6&!4O^9Jbt9SYBm z!BIE?e=tG9DYsSiow!o2rmBf8{*|Hr^w?Rcbts2(ni!`MxJA=s?rD}$DWk8?-Po%5 zexV~%R+->YMa7uRB06A-Ksfz_xtPl16mf0510k!{0g*tT?*?rU!zy)dGKVch+gG~l ziKX`KfY91V67~_(9*%lS z)o*YDlJf}Wc&|bIh}wERpa?7dFW4-hjGq+7rye46JfBNjF*V(92ikW{tl@r1NPsyF zYgN_7k0@7(&s)L}ao;M5c-~tFVg-LNSko84IQAzJNEUv;A$U0j|6ofh9-AeBEJdMA zgX4&cx^nFRM9e0?LC_M9ZMtWoI zNb~-~Sw`Xo!yHT12~X0Qw)+)T1n*M3X6TgT*n`azs8I|UDd`mJfig@3CBaIk?Gx45 z9M$%Q#!Kv!DFLfcRI=ZU5}nE_#P$!#h&a=3FzeEo_>l1DSXUa~o%oc401jawGw8|h z03~tC`pPa*w;FbQrqvC4j#rNAfa_^ZVAPrVQhK=tn{@9GcSgsTh3^Buakm5_WgP2D zQIXhtM6y{X$js5q{cf(0TW0zl4I33ACrl z)rq9+u!V0-r@k=Gj7rce8CN> z!@yZ1ai?@-e3iD0iZ= zzK370KJP{6Jl3!PUXOTo1ztw=iGXI;_{#tnD+ra`0BfsG=_F;dlL{vap3XS|Rw4!nzYK5dYWXQ{n$4nZ!c8bT= zyMqPatu;gt%k;^6Qff*3Q8Qj`W}qC0DPIY!s3CV~=Ao;Bsz%mA`LmN!DHh2??CIjg zwO00^;8}(UUvu9*A;CrYW9(pc+bgn?@!7;c)M}TW--S=TN$msu=gyQFO=0_-Io**O zKIYL99i^C{nOK}zH-U=z%E$p9uB2c1W=Wh{fJ-3!m^)t$pWeJB!J@Fq zY_(K(Pu{5M6w{a9N*?fFRZDc4jN))LLocSl6^xQ_{xaTt0;iZ?^qA;l?G@u8$GX01 z2bBY{NgTyYVp;eU-ZTO zshB}A1if&p@o=WsR83X!Y#SdVqq0f)~34IV(6+CvQukc*= z7vdi2h~T;D<)<3{U+98>UETm$1?Lj*l6mWM>V z5;Fn{5(|3g-Ir8+c+MO2NHE7HjWMG}6V%4>kn2~M?XPJnU`WUh)~%Ecsilm*DsAyM zO*xlQy&glpU;Lz4j^x5aoJ!l2>PLK5Pon5!pxVHK92w1A%Vp?sEGQX%kf3`kh|*wa z=x}FSJ4tW88pu+2I$Ey>mjhDRqn^j!vlxh0Yaw1;^m}_ec6w0HS4=yz#9->Tv7l0m zNB2HF{RIgee9_Y&gB_&yE$sCRZQE861<^x?%gM754!IriQERq+)ZZ`{h`p}W7;MAE zU>;9TL;M*yBGBt{bv9i!q>XRve+hDLE^*bk-+vb0V*<_HCU983{jmw`b*G4%CqOlY z8T;BJ>D(5_uOTo?V)U*W^$l-;jeQbZrLu;?I_0?=lnC8}T}d#j zKkr|pMA>Fsvy(9rl(l+Dlh~#9@4lefved7)2Z!h+5q(|i*srRnya1anuxTD;`1DD- zYQ>~pC{3;IKv*eNL+j>&@>g!FYpsRsev3gpAa^CL6G@TPG!D+|StpKc((DVbCA*#G zye^N<;|oAD?Pc)!4!drtSz6P%Ndm5k``(waCeU`n;ldkCl^ZO~ zaG@Mg%S^r)gm+NG@rcBxVM`vs%oZY0)(AAe{^FXsw!3&ARpO)I($sns|2|A*3j8eQ zIR`^<`8{v{$eKO+E;aZH0AV>I_j_%#01nK^VX{Tasn*IOq(yYHx60w$FMM~+J>`Ht z9+y8(Fb+J8^}p3ItXyS z#^x6XK6~)o7*QcK`PYS5{8Sah-6;|_C|OxkYC)ZicMzCMo@p!{UV-^hJEtg&gu{-9 zBt7Po#f6s8zGmgDnXC6e!!YOsPd!XtP1K6N*(Z6ERvBt?JDFAT1+&X9x07Gnwa7F2 zm}6q;7#Piy$hrS!odv!@;014er4z45i(?IgxU+pZR=ZmvA)VV~h&5@J=Q9h{%f(F% zHN~2d8+`~)rA?MQ?><95A4R=bKKsNQaB3iDCOlp634*IOyAZmW`wbfHq5xyS~<_08&(Ojz2bkhk&ttvYtqS z8w+k7e=4M?i6CY>tfT0`pT#)wXi~BW_Wk%V&VEx|mvb*CJ~@3`I=_yvPpz`$%VAdV zY%#kT_!Pi%D+Ja(zpJI(yt15RI2i_85$*nSv@xF=~5o{cFBjMd(^B(Frw=&fwMZsU$;^znveJ} zFzs~*zeZBYvyg0_hn!s)f#}ie6muBV?&|1K4i#m&cOR^Z(eCKU$%sfU=EoYAB>jMn zeJ%Kp7UY5dAl~_xaT+HGj)0)V-&V+3S(w;B6pf1nw7v^Mk6f&OAqy}tHe>}M7$Z|o z5Cj7)2^({p7@2Z|Hr5STxd6r-AT=NWXv)cE@Xv6>3210wWaaYzu=fr?nnlaHVAoN!w(H-ybFG#6t;{d& zodC7R49raEotXa#kgziUM}UO+UztwM4gg44*Z~>s1WX)&Y+*pnc~$`DF`%>>+rKn% z`2TU4{5zcfe}$1Sv9huJ2aH5}C2Efi-RJ4YF{ePC$H*8w6iD;v0gszf_dYK+lFXDo z(--16A*Du~O&Y2V`LT>=sblQ7E7wagMbTgVTZq5uvlg!RjyC*UdsA7se1-_%Z?4?7 z5MKJOx-R*A%Y*g4+QRSXt_2G2uivKH^jx#61LAwNq}8WA$OC2y90JitB`7~~5*TiF z{k&PRTD&F!FJ|oym09wSe>XVX-$Rr_2=@EsyH7BgsxJU(IIv*m zD1Bo}R1j&u8B-G*28HlM(-p9Lu&s%xcIw}dIv;~7*gM*l*n@IgJy@U$Z%$`A#Jz;w~ z>d(5N3Ls--)pA2#6#`DneG{S#!hZns(hVDgY237S66$-&mQ92iFt-hH7*dr4QN~BE zIgFq=Zk?oCh(M*FimN54sw@EFKK@!~JvENloyXJM0+o_>{z_f(Kv3Qnvbl5Jk*RwPzarZ`(+s@mtZN~)W}Vfu;B zaE*FKp0>sWSvL@An(>{-e2L!99i^U*yuRt7HaI;^>szmq z5r?!o-Q}P}SY!ltIP>?!pN{-6ndO@g!{3{sSLU;FHRYtjXi3Vxjl?gIn>oh;`>!0I za)i-z!1ml6fWc%?O!C|?(SbKeky>O8n^%VpTBL~24!_~yyvzylltGF5t;5zibH<7& zz-E#4^<;fJvCrBL4QfQLf>%mF%`-7Dy!VR68zANyccj1y^_nICdT<^ph|wp$V!nW8 zDJd{sSK^U1?ji0c%u#aDk(4!Z-EUsZ<0&csoyUtlA~@nu>JAyGz-aBzEYVExh(V&A zx`4vP7gXFILT(+-Zk~{=Mcxgrr zjhaO!kw~^b$W7K|58}fOS2v9o*;-qo6_CClr=4feB$og_PwEOQMMJ0Bncu&Fm9<*p z<*ceADH(eEhsA*cS{-N|{vvIHB}|G2^Ath^pUeaKf_J&_&*~piF>=FsWmn{lM|cw` zj~>r;H!nlvS(KwNh=*iDZ56`6 zLfc_CUiv@!lFp>`8sIJoj)S#HRN@(w)#@@xQ_^VCMV_T4DEb?PMRd1rHKtrLZa_r+ zMRePv8X@Q?fmk~Riz(6rb(+r*@`ZULK>-8uxnrCK-#i5@@$-l3@b1b8zruS=U%k;y zcj*ebV3|+rW0U*fJsC#j%?A-8fN~Nze@-_^#d8^3^i#_(nDj9|b|`hW4?N;z z6NiFI%)E0(4$1Bq;YsUeFJo2>5Oi&SG3pJyap)Puf)K(`mzL99%9`%n&=_8zko?9S z>`1;ru~uV9VsrKO)?&pnV>)@Ws4D}DjbrTi#)b!C&SaV_65C*siTF! znYP^Y`y7U9g!b?qG`3V2Q=b2$X4snd_@0L2r0@JNcI=1!i{p5ls6~-FRu}z_ikpk1 zd40l4%v@Pc27(wLRDvxJmg<3h7f{akha0X_;{&zi?lZR;v4jeQBA0`#q@1}pFN(+G z$dTfOLnP#lbhcbPuJ^IonYTMc5_36y+HIZj?$RJqKfjrjqoIgtK2D6N(Asou1%aehu)H*wZUN|Y_1<-}~| zRHjvHUVa34!{tNnEMrOAz^8LhYA$c4$#aEZ@S(Ocukc0NSo&x~YnUZCdm(kB3+3X= zDo+PD&sQ_gCOmM#b`54vy_BjYCSubD8>VnJ&91OWHxBOvCSxso;(K_a6gQGnyE`*j+0mO{&UUuNkfAHHmqf zIgDin1?sPSV7eEfY>Br(wiec~{Uo0o4@>C5ItyW%(Gq;XuTMk=ia?q& z5l^2gtTnpy7N(yoXt3E^buBI{pB_e?UvP#0kWx(LstSrsagq0Ms{u+N#bAa<`?`s$ z1!By-i4~EyYw>-%3Xfo|v2?8)5s+AL(Ya=?4y zLuP>m2IHKX9B4Y8Dde_F#OAv~QoZdryw7gC!uL#ghVdaY#k{?fdjbD&aL(iJs#$v} zx3m57^&ek^?Aav7fIu|Lt#ws{l-#yv>`B{ek+eavQX6z@8M#}5zk7c5)D{iuU`z@^ z6!Q9>0m8QnFXax=Hnwxe&0%Y$_!EWRE=9bqmha*dvLSZ9^}h)EVE*sV;RDEs=$Kd- z08tVEVj?;QCPu)#BQq-lfTD+ioe{uW1t@LIz{>J3%A)uml{J|D6TuMkzjDf*_3s-o zurhG`!+8RPtpL0zW)3-}K|-zz=E zDUhIaWDn3Ff4&DAE1(mdTNs9RHkvQLZoO+6bm|mISJf&vMrI!s=8*{Ey~$tBjL%<@ z*TkHi?-$9%UmmAMYTI$^0?5M!TGb!Yb1lBRp{iRb75t> zUdESrgyRh>G{>ApurtQ0V%7>g@PvQQTmfod%jMc@&4}lumn&9>v*{m_c#+mP=Ww(B)li(P zS;9!}C*hXy%hu~^_r*VOpgL)S5W#D$Wth+#gi4*JJ4yf^ua8Lcvv3c~2xDY_hU*3B zE_>hV8u~8#M?iY7!>iI|92&nnSX`i?X&5Ko&;1?b+OhE=bFTadU0e8(AS03QB@FnC zN4n?AX~ugF`rGY)90>6PoWw2(cr%UCC)ZG-PMVFu`ZX9#jxIvQ;gFF;aK6Qbm<5Qy zA8jZHX^9GWfKfT4KKT%>684cpuo^TeU2?a z(VKj{cPny={Vl+2pUR&44kgzvq`MU3p#1D=%W7ZbGNO)-B?d*z(eS?KXS(WsMo@-S zB>su~2-wjaIme;i8s<+K1j9s<@^Z$XnY}=Xco+>qefKHRVx9ySJtUSf^0zRq#tje; zp&ILtv*$B#&zthCV3|QPV&_zz%`NVQF)zJ%{#2hWXdSm!Ir7>9a-rn6;|$vUxMt7i zqVLveSlVo<7CSY!U}kvWoC7JDW6GeBpm2L>--2f71ZKODw?xxpDyE2KSk%Fps)uGo zYh5S~Jp#Td{SJAi8hkwPvAo{@^&~xZ7d)HSyM&+XRtLy*hkLn8^*$blnThIAiBwrjGBlZrIe$}9TL96+lrqU# z1JN^>+7u+4WN%6|HAVA#MPRfkOmWhadQKvrKFe=hMEhrGr8@m0$&sd_+)RKIzSMSmUP}Q_HN8^N`?<|I< z%?_QuhIn^E-ZmM#ugbe;NNV<9&zS>mVU0A|%o<7$m<+`nKIv>p?kW#YXdsMYsuEU?rq3X;$h8CB92Z z*-~pI^lVg5+mLQc)QzMVQ47GDga~ry$1Ls%KawGdg}pmHtt4Y{@2yL-4Hq9I-?> z3{fkB{fR#Vg+?1Gl!l7A!YXtb>9{eQ8{s|6SwhDIGMo*T4yi)nCDC%{=7tbom9!sb zSQ*~bfU?!2TRVV~1~a2N*wHhk$lofgl!K|Hx`t}Evn;2;C^3BTw(MEPP*wHQKTH$s zGR~yd0qThWG9c!FX2gyj#m#G)2>60>?`rX^Fx763!bO z(_QG%7N+XcgM5f{c9D#zvNAMJnF28m6VL%Jd$Y|WFyI2(zl374ma9_H?uDGa4fP||vR!_n%aAsX|P(7(saTW?Kk)m%X$m}RP zLccaiDn_ff_RVBvj&^C3Njsploqz%pwGyDD>7Ax$uufklxYm2&0aGu4dgTu{Ah(tZ zwlaDcO}kD*$B40cL_?=7mVo4jm+rb)$=pGBC|sM0n>laJ?*jqKC?-L^Cr%9a^aRcyLr`? z^=_}zxxMKmG@zy0j7AgIP5Rh$3V4=F+}W!PK~@z9VN%y=9nJerkUP`ymtH2yHF^_mI5I^y{u z+%|7=)Y%x*^1XDgA<%LdD?+0FOY=v-Perx-H(dD4WZ?QCwBkKi>am}{V7+9XTT|?+ zuo~-fSC$QfwfhPlJgV-DCSQj#T(mZyjV3+CQ&d$R(lmJ?T6AT#TuqV+6kD&+e>9jc9o{IIgL1@nvII3LK(hFRa8mkg3o!)n@CUz1C6js66 zv@91Zx~oK3SB$Mq#dx&c2!JQ1RnoTZ+#?|5j{)#AjsD&OR#7hj;brXEUcnM_6z4 zSUhYCgSH7BOWQO>{L`2r-oo&Yi4)VIWnOuU-LY#@G22&flg<4cpiU+`o~uIq@eCZa z9-nXQ_Q1{9Tfq{T*Dce_fn(}IIQ+8GF--(CZXc%nWdV6+KRx=`gC>1283U`uf!@OG zTJR25BLal3e+G4l2_tAh(Q*C|B?hfeymDzUTO%C_62Ce?iqXuT($DouzSso)PB}K~ zGFQhIh?TM5YS8BDQq5P{LMp|e9e=#`r<`c5$IxyNwG-yl2dBWITOf81hBPCxpR-g> zGLcfr+Frvlt)cRKIKiK@!PewU3gqr|wsrRmDCqrKBV~?S7NMCigMBH7k*L+l2A0De zLg$)|XFM9!@2rtjq*U+-f}4S|aEyFHJuDNf3BFOE%_wkP*7PWbMk$7(E?DK)i|Ikz zKH2l)7iKEwM=44(PKqk%*{aIFAat3KAaYj{TGBq9%KG3Pw^kqv4_^h|AgqL6dc zTvRNH`j+AOT?u84KYsZyc&%rLKA8paL00`*uJh|)@RoFN=zRH*e3~Lfp-rs{$vWSy zo)nrRf4=dQ;Q)6#lZPYTh>_QVar{AJpWjDS7BY)No_sJUPfR7M`2Ahc`NkM7btd&s zy2`9p!FAWCJ6ZehRyy1U{ljaO@}}h~MKdM8OaR5&x?5(IFdic z$txPo_{oCa@8}?Z^~}5Xa;~q1l_^B0 z&#L?n?BEUlMHcIYr=J!%GctZ~WZDSmXQZw7!T5bXEhZ&D#C6%j_lg@Biiof_&5a}V zHremW_eY#qMWq~#^@T0&9y$iN(&iQYN*u#~;k8;z>|^33r&)JoQx(?4Tv06|T{h}l zL^ed6j7UICV-csKo)UHNX?+;j0;(X`^}D+8y!BOgd}l*kdXum(V~e&iBN_}`ooZwK z?a-IIAZtsTV0&SXnrXA72IOgrJ9A6${v{9U$#VO`nA4=od0lN^Lb#&Y6}QcvV9ndb zI!X`RHchMS}T zqewucuE@yi;&2x~`7A=$z-k)h?w?9q^C+S8xbQ~`XLT>eFnLX*t+!~|wE22%efPdG zk3>9V$35Qs_%o|;Q}guBw=N@aLNh0`RhGwdD6W;r`6RqfZ_l!UvCx?hvFE*quV0m_ zIJqZiZ*-=H50k0NwuzcBx__)#DAG1F8^#-ds4hR6=E1l5e7ExPnan4eNMf3JjpXVw z?J@sI$_y5jXuEUIMzmuiT6^`)R<5Y35~bm+sscjPW$PXNUfvb_3&M{*_@+c%wdolv zNf>hF(x!;7mEES{D>yoXR8$zt)WfX&u=ZxP_Po+cgG-Osf=onX)MjrJ!ph~hJpMJf zpg3j-l3Vbq*F{wk786`x=*Y#z${%a%t&_C}Y@eqnIxgQv@rN8BNe_MC$4O5`J@Ffp zHHfF^vRIojFYL_)+&wBLG-YJ=Dwh(Ag&oj6>loU8%1ih$1A~&VriqWBpblj-JOd3J zZ+0$euOoO|e36H3uZQ}umAOQE#h2d=_9MhN_oi1!_%`8Rp-u=&;J8!sGe7X1wsnpx zn(ED%7I3eJJg$EdtG>ysqaE#5$q_LmhYTY+h~1Ur=Vu(73?>iwox;b&JsUG!GSe@& zadEv7R%w~9frpyoph{0fP)J@bBljaI-qXt)!bAJAqyPB2JDF-zd=Bj+fPjOLU^F)F z9fpLm1Vs&~umm!!Z05?bABxOjM;eAhoDdM0g#FIRthXT!M6A;eS}r~Pejv{8P}8cU ze=R$CmiuX9tl#u74)*pU?U9Kicp8x5Nv_V}v6XapTRU{?f4~GM@6k2oVSIG@d2p-x zwY5KPu&j)Kd@I{%f+RBcuxBVXLqVs&G@X+n}@alVAJ!A>5-{R@R?_4cCpKYkf zr=Y#bvm>*Wutb5DOxRCR6^FTnD<*mWO0Kd<*wN6Y*D7UU)3h%V5yw(rYZUo;~BG#r2` zo_G2;Aw*&Zu<#GeKshU1`jF9^0l&|U13l-5X=bEe9p}iR*gYKN`dSZ!JdvZbkhq=T z(73D5fY;kwJI$|PG{iX>{YaP@|2v*UDaKvV0n`)W^!_^A)go{ju(VT<8SRL4_m0ju z=p2R@%-}$f8}_ep>Oa`YN)M~rIM0hh$G?QGO>l!xn7+o(DzJ-PBFrxIuPc22XwS2- zTZ*n`P@*IR_o7nSq*9&ka>y{Iam};gBQ;0hzDbf8WK@m{dp{q0FcKcM-XIN zETklWSKZu~O+p;7`rxTASSn0bzfM+KM)Hl9lZxLWMDlh}f#pYV50^vl)32t_mFGQ< zPP1GNm*Ju5%h+$G)hmmM*RF$>LzGX8T}lzQTKth{c3nW~Wvr2$5S0-k zPoI8XeIBby87+W5{UT7I4YP^MXox|X4|4WyFCQqa#xxP#zXxKBphi|zxN|5lf6^q)n-_`h5+G9<%haz%U_oH! zfuJE^1;FdCmLCtb!t9VTAukmy^K>jCh;dKK@_b)D#KjEw0T8jB>CjgM`kPV*#dd;bY!9U&H}cTTFM8PhU*;qv2k8V%gM`z|=<|kBN>J zRgXpEL!n0pGgZkUESa0XQ#k}-`j(Qb?NOMPA$Fxr!l`-2bT$0nd25pCEw&RAyL^g|>LHleZokQBok}9417^3cBDV<3H6f z5)6n${M2!hV6`R+b?-_VMj~o(T0_R}DiN5izUs05O-t5o*0TM+YR-cX@ohhCu`!Q0 za+dc%0``VV??B79mC~vsiefQ_z${4#bczhJLlU{vqT_5EVyZ!fG!BRjWb>8La>N%I zekVYnQ;?OOGTUGV`0c=FVL1OPgV?h7@B$R7)JAJiB{fF;m!#EV(_hV@*6#}>LCOzA za4)TtiRgW{&&~>-vsIp+nL3!)@s$P%q2*CuG{5%r@yZT z<42Im?TlY--npUmEi9NMe;1X9!(xQ5&xDhCF@RCW(LgZoS!c~nDIjSr$d;FNMFTE1sD(_%mBIav?yXJMnV$%>d z1&UNPhIFmEMj4U(Gor(X?Cdqm3HM#0Ney03^k@S?dN*qx5`AwCjP%mjM?I1@h$3kG zcx3l5J0gue5{-kK90x`INl8lyMHIf5OjNe@+*f!)7_IMkC+bWpVL|9^?ZNe;vMJpx z_d_*hOV7^j8?U_38^yfMEIKnziGIY|nEkh`%y~Tbw^^g$o3PC#iW-w9=Y;fj=*@F` zpaBy$g^sl1olF!blX69%C+>aq$~~dwP}%GCv_1`RR2e=j@C%?`^Wb!bD_E!l5*mmE zvDxN;@0vMtmbXA%ZKMoh76n8|4QC8FGpuTDYsF6&TCqKonj(M4hvv129mhW7`upM@)W^2M0 zFG03aaqJvd@&-fHd;QlOX2fnX7Skb}aTs3ZOBQ^moLa zuB|*;V?jUX;A} z+MAsNKEA8J2dv&NkL8(3pNlE+l8J71tLE+dkn40&-ajf*`)i=Ij@htj(h;~f zKUcieshBA)AVVQ*rFAgb+vet^iHkD7&u*NjsCP+tZ|@8=^aIM>3$9IMvVSOt!iDW; z?Fei4N#M?oyqs5b+)3!@YODO|4)(@kVYZBqJ5$%*7dBO>)fvHb<&mnVp2`0mWooz% zi5=Bj_-Bg8{YSOKkbvG^%U=3^e#el?mD{;HM2iZ=sk<`mxIh3{otWd27jjo%Teg?t zjyPVEWsphhQDu={s*04IwvLRwX+RS7dAoA7MCZ{Q4ctU1uJW;E_Lt0j0@L`rT=|n{ z2!?J@*GqrS*6M)*f0o7)Q&RL+^4$0FDeLg* z47QGm=#sz_9JGO#@`pbwc%ecP-W*dR2w$rL{9QN6O)m`DpMHd5`mZkz6(dy}9CX95 z&mrrcx-Ft8vyT58jw^>73wZUo`Hr7%n4WsLybrtyB zDH$)AP=3Zlvi^)Rgi`o z6-}1$I=v(qisqP@T4$W#{65~na>OhKK82`eQ>T|!z!>z4CctaOJdjepQ>)*5{$xUP=>v5d z3_i^QfxvJljk1nzbx>gGklt_3U z?nV4LLG-i);l`_0=N@uVykJ~TNl2x9$K-x&Aq`Abj7#s)Rwy7PeB-vP6TgV6AoTotA-3Rm*w zC$(wG+(Wqaa-`#kZOtobT;a|6jm~UTwD?<}x{m{I!q!HzH%Og^KhBU9t~wDGC^Fw> zecm94pG1y=F`n#c(Pmb{M>iE0$xj{pa*L6sFhx`Ho7A=WM7}!02CO5eW6BVz?mg6- zHeaYы;Pa@=n8N(X28GDHO-^Gt@+c5gEwiYo|kha>F< zf?l4S)5mEaEuURe?}UxG-3ZgCnK{69PS;jFucV1Ksblz85v0)Rqn;t2at5tc#Bx!&u&f7uTr&E@-2ff+s}P_YN#pkp!}-A=!wsbebR{TJ*$-nj&~Evow$o5`%Cc7 zxS`vYKD%!t3A5Gtj68seP2jqiBJZAYdAC?lSH-<-${9=B-FGtpN9p-tBHi9d7Xr<6Dz=(0gC}2Jg2f zlGzNO*nu}=P4XXU48AX7Wp2XME{OR=annv;8y8QmG$=4yRQ!7afg_GXTW;f}o!gZo zCg5!+6+2OO7#FR-?30$gz75~9>TgLwSY(Z@Pig9|M-tk4zv);N78lRu%9h{GuXz>3 zA5gE#dHBh5wUj@JW8SZ*9l=Q2+Tq}BVK1A^m-ek9onPm*8$CI1x*s=tA8m*i#P9IB z6Po)ndOVF?3lVLcoXl(=ZG|4?9nFqx-j3E(8z1T5O>I17lFfoUoWGd-=&m|AfcG+- z)b;hz>g3dbuv%DDKO8hItYICd@Pk^}sM80v~}VX-1`nHaZjN#qC;bApF^t zw9kn0Q}PGM;Gz$R+EvXFxuP7tN4e1qQ~owAe*@5Mcu^;iA0pTYBMLI(nZ8UkJ>_hGH5lvP3Q(e~BfF_6s(wM7?}-SeHA{&b*b`A&FrdL=1xc(LHZ4o)%46)rv< z-;=ZS?9jm`st`>~)V28=Dyxp%Fi0QKwc%1&?Zl9r)s z%9uuG|t+0Krd+n$Z~?A|6yV5Y)-()$iWOlFJWR~ zX6{VD$;=KzFKlOR=csHCAWkQs7cp_QFfvgR7lNS|vT$}%FmV(HP*mC3n%FuMaKO<2 zFC*0ejh=y23XpC}_!_ECfhG(omH4@j!$E zLh#T7NKn6b#TYxVj2JrH81!YX29_b#nx8gpMow#a?moncM`~%u0%IYiyOOv1h%HuA zV=FkTCR3U1T&_No+*6Ieek43!Yk--F!{;tTRxT{SVjK3fd zP@90D0to)OJ|eRM{qGlZF)3+jBO{|LkiXY*V5)%Ug~cqpFaiM~LA(m(+cmDQYX=#q z|KmZy{#5*b&m+Af0O{lR4-U>Qr7i1K{^Q)P02*N#LZ}WgC=d{Ev)2>d>iOT}+tmu9 zA!WjbOT$1w0zk)WGUrz+|G1vfe;pX;9CR4>?;Z{g;BPnIPrxA{pzfak;*@#Lx+F;) ztJR|OqA;TM2QKX2+op}9a5x?Bk7t6W5L>J?P%$v3viZB@v-#$V}miHs` zVV$vn@u>RCnSAkkeqQu8jtR(Anot`jTScH~L`_eC*a+UCOJg?WeMM?}tcT9sd5rK|$6? zAYPSVi*2HqHZlQQR>%EeKDFHM=4?IevQ~#bq`&5jdX~Nng=(3dkEee@BC;Kc0Y30l zCif2o1^w{ExipLDV__lg2MI*poZ|c4T;bqjzabAN@cyv%*nn7jTHW^LJbM#U(_7~8 zG}dxOy_R|j8&Z~rE!OobW2P1Uk2(H}<(hm&t*T^Br^Dvv=IiJx?G~Z*kf}>jf{%|oQ92}Vi=ukXQ0eS3N)cs0m1yb+zW{2pC-HQv^ zI_ORdt+9<5Lg5N&YLDP;>;?ooS7)Adk2cE>!N$O?8LkrvP3Hxdv&B?|hH7jVu+unh zCaiFDHPFtPNRg_Yt=7eZC;~I@u%07nN++3|j>znIq^#;p@l{zz3szi&!goK1YT1|91tqGr8y}iAb6=f9_5$FY7 zMXOr5%7mOc*48xp2mnPM3$P@9y4u2LH2hskB#3xS*`aPH3huZBCKF@=4tzjOK+vsS zCE+^ndBErwdx&hTRj+wAJ8IXlf;4mP4))ZxySv+}NoWyQnoI_e0zhRvyWn232e1nH z(GM>7qWRgL+JY*Evi757B0sI!ZC2~uZw$j7#k23wsMTWnQ}-*YtEZQio)-UTNlHq( zU9M}?8jpmckUvY_7mE+NZFd7JwTENS$K?<7_g}}eV8P>h$BC@Qh_xOP6we7> z0u-qpD&W^IZnsP8G9l;F`4N?g9LW25mQd63azezvui_}q8is`tFf%6{j8F=m$gPRE zB5Tvc6pAU^AdP{vaqj;6fqVA4HO=|uDb{Q_hA_Z-GPin`wr+>kE%NnSW9Mp6RdwS! zE~jIGbTZx7Yt`3kt}x{5c*+#c_wO$=;`kp2^nNdETc3|FN7-}Kp`oF;+^+9udS9|J;qlll#0q77lz}eSJnJDN!Z9pP87L z7!5^zY<+#`QH_kzci|u-_naPFW=AO8947`N;?EYd6BGZ!OHN9Pv)AeJ8#T2Nb~bPou|}HS1Uirp3PkbG{4gEdMYj~fS~Fr+_2eg zd7KX6%CE#NY6h`93E@UbM?{O_Y? z?s3!!g{3EBon?v3;~xI6JN~w|HobQD*!JyczjtB{Q2x;VNFJ-{9KiwzmPGs`Wun-a zi1kjNxA&L3s;*DZV!2GrAOqSF+)^;g@y!me%*;$l&=<-3Cb6^k`#Jd&VL2utUv@x0 z=sP=Ks%(KFm%-7ahpwrl)O&gmB|@)>=t0`y`1R>Ciyu#WtJURtp=r~??R<=Mf>@4! zKA?R$p38K;*^%z2-B$;AF~PK=8FLveLYh$>IXQ%h5sVQMHJxi_(!{FV!i-mCY+89g!onDKtpB@Ep}w#na+P zU+zzo!y(}T9W$le@OoxOn)4)6+7qp&-$hw%>xdTj* z%FJ*xFJCT`T{b=i1X{ctDqHr%aTy=P8!S9vU|<|8gs_QP@-^4*?8*rphnh6`z8Z1h zsU3&D22DQB>XnP-;BkUSf79+qy@%t)|4J#+?efjk-N8;Aej}65(l)w@h={oAe4Ct@ zxUlnkOD`{{p1{Gt7}88^8%w62Q{H6#{Cw(4uZmZ>MD%;^n=O<+xw%+t`a0|SEVo%} z{M)kmQGA1nMsQaqZV$#}bx>$WaXWq)i{0GZ6#Y21K-rIqus#?~tdD+I-s~!2*tb2j z6yt7izu7GmOFGUqk%X3n8+lOwm_U`$Dj2H&##O%A?x9SVoL;s59>BL+1sK+5KyOVP z_aDxd?l$-=8mFff)V74g=@*yXZpG6KO26MHYuF9Zb#Yu$JPg!%})AXXwL zWLB~oe>~t>PW{(%2r}8!iFx97s#89z%KrAQX5u25q2oHHg8d;E^H_W2Ph}^Rn%Qcw z!2}8dV$LSWcBG&l%N;7xyVcxY`Q7~6#Pl+E^qSCRUS8hu%}!sitn$P`l(Eg}eCgp> zT&TiGo6DIPp5R!eR#PRRqubFWuIDSDk*U!~C>O~v{)xHoA#tnt#A=PiW^c6H`ULdd zTOLa(91ey60rmH5>mJ|t^B=sae*;)1%aDsFa3EK0;q zX7O63O)M=X^ORR}y_2jeEAcZ4a`~r)rzn+@ED$N@+da?Et1QN?&ur0eMc$34GPA4! z%tH&REv(_OjT)mNHtS`DJ^L9iLFQ?$3D|rEisw>NQqp(U2xo!gIi;%}NPwN0ms(o9 zQ&InGZSjI%O?fg#oY$g8A!WgusXN zlq!w7BB!HC^eTlS3=5?WjDrO+ikA4Xg91%&3|OY*ulK313BBONsg6?by-Z28VXzV0~i#IN$as6v3a+d znx!R&akg>Xz9I^rAYv%qO)|bgtua!C&IzZuSWp1lNV?c;Dhu5&wg}^_Bb{@<(jvw< z6|vAT>ZO`0K;L&FQ#8MR%<57m-SYW;YkWhFd@^S9%-V zy-(1!K3AaP5gNh0!T-9!FG8kivRQll0%%G)yB(fINK?~O*ZWwPyQk-EU2N3HO|)MD zNPO4yJfJbog#&B}@3TLDO*xS;ky(Q{?r-t@?>mU(av22QYlz%pV`J+>r})2kI5^yw zDs?6&CqLevj?C7Y?6zL+j{16gj}c=YO=b%u?{@+AzlFxPrn9B0x0jci1@SQ!0|x;# zl*0|Ck*nct^JvEjg){mIY6=R1%v}DkgalNotd~DXGV?Lqw+dX^hxiaqUwQ0-cy~j= z1zFnk3=A;{uoJ><7OjiXIXb!xmS_{!mXrB?*!cL_@$tgpZ?CUuhy#*lT2#w*W>eP|lLK<+Xv0M67hwX-MFMsMh(0`dK|nf;xl3t&XT^8PU%ODvwn>$UjqQigcl#DP)m zxS5e}(#f&3p4#`8dBQbx=wG%HkdBMk#r;mpY$@O}kC97m30tV!LLB`Du&qZTWTJ%s z+P%xm2&p^(&ngYzntgEPNb-6;T5p#*`ATcuR4}b}|nlBz*TU&dS z!2Q~&tpYoj=vKgQTv6a|wN&}X2gT{HGE>I_r*1{(i0!R@e!L~q>rFnn7f5ouo?p+` zIqZq~Udt<&n)4q1sF-Ww^-IMorJ$^PF;4}U4ul)|LbIzf8-l7-XcL25q0v!@f41tC znNlN+le(lqH1NKBWAAnT+D_5B1@a(%8>Af)CIlz>7$;CM3h92BfWCMcA21$!fB55b z2jZ{vv5dmV_OiOM1;~9kPvSBe4^QU`zaYfl|Fk@GE<$fy8DO*vQ~)ibV7nuT=j7y6 z^W5YkFR;5G`OfJ`vascDRRjrWDZf`(Q#}Dq=MC3K*!6Nfm1@UZT-I}yrOxPKLf{^E ztqj5Zo98qu|Gvh%JJ}Sp23o0hN0ZMG?kqq@pkOmSOVDS^f6(96C_gKz9gC(Jf2@PG zc*za#J*NvJ-*=;ToSfpa%<9@1+HPI*BSc`cM_pY>0Y1N{5g#IaEH@t4eCECScS+28 z5rBgT=#%>T`m)VPQ&UqlG&J5GFYcb6s+v;a906UafW5>)Ntn``&^OPT&Dah?H+sH% zzD2J#c9tigY5Nw?M;Je5@P<#%jv~TDsgCw`Z*Omak8gOIWeNTlD)@;3^S?t1{v(L+ z%GO751CTk8y?O}|SPLl7GGt9n%^`a(#6J#Fln{w55Y|{30|5~L6&4l(bg!_AwSMa# zI7DvFD59$Wzq|XgwL;ZynfU0qST-LNBWVDW}s4*F0 zKtcg^MBm%qh6O)zzX{yKcu0@7KsOx7ab0ZOHt_Rar5${G$U>ZL|5VhvysgSW_Z!lZ zAaDab=jTPrI=d+E0& zIyZS^jI?>L3E>&ALc4R9o_NTCQN!O*7lwo2TaOKg2JHe%ey(fg%5z=tFNg(ZGQX`3 zJ6x;X*-+hvR_DsUL5y~kGjA*`E#yS4f09BZjOPQe1gXn4f&)LsoF=F9D^$qO@T6QL zc~Z@pAZ5wN7Zz@kB`rK@g74m_s_T!OHOf3OeKS*E+N zWl5>J+sjBst#LD@5jM-1{nXh)+7C`lWSMWjJR{CWeoM6%`+XjTAyR4*c~?*01CW7c z-@{>Mp581y>5^}%8hOee*}uxg#Pc17-%WCU(B21%T_#w0#>_Vt$G_~D|8_SS7mo}b zBW9a~O7{{>DLvc4Ytch;QP;1}*09Q(I;Vk#Bj?B#wcN0W~d9a*QCiK#J#}2w3+Awz0Vgj-Iyb2pN#dsV zG{)>o50Mi>qpFau_*tV#bnTiJQUm=%>$mm1@oaRRWOs0&gyVqP3>nyTyW8{{;oeYj z8wAtw!uF|PoVo?$^yZV*+zrHZXc(3t{*wyuRv7R@3|FYN;@Eud8Ghr>WtHgZ;|RXF z70)1`>)NMMoi8Tlhx?+Ds}1a_Gic67JZHa|!jrxqbM!K`C_DQOT?Ch(yY^nX9m4i5 zq8KG5RSPQ)GO^3=p_FQbbSA%VHg%rIv7zhATMh;|vTY8TZuFz?Ek9{J>?XlzSZ+j2>tp19wq?DaG}(a;ja3L&6{~V z=*=A`K7J4U!CZbexK#YUsX=wT!ac*a+Gm-zq2R`WYW^nivtm9Op{aL5mn|A^-8l6g`4FLq&*^hzxp3|sE-Pf;pBLs% zKbn-t*zO~kN-r1#Yx;46dyG)e6HhN11ozObpuJx}R$`50O-p!796V0E%?T%e9s>Sz zorgI?sC*AbA7b~}*4~Lx1(-51%0m=2J7vo3IjvVjkEo$;)!E&NH~OS1RQIIphHWn+ zM%gb%Zs>Rvf(V7nJZ5(JN;13aU-eZNY?zh@F5)ib7VcMbI+2d^Qf(c#yo2-!d7<4{ zqA>%V!?;_|MWR+{LAqsQeQkw}ZyHcdT-)vtQWgu$PYQCPXsuqTHj;<R|E31JkjqRDGJ2b$FI@y0q`_5Y z&Wbb5A26Id_l7A&k2HvEqq^U}m)`q81${!-j_a(ZKQ2E`O@&L}N&ew;Fyw7B`Lt6k zHuZRqWp~%X<_7^a1P@1x*BSU{SjRgIG1*L)qbjbU-YKDKN*%1k3El@6ZF+Tr`0rt7 z0(jDC?uo;k!~mZ6cZUh6Pl4o;yt)JaU@z(t0t7dcnS6d8V1@$MNfwAi!7;YGlL+l( z>3PAv_HYW@!nENl@SBQ&fQhQp+WU<(HYliSCNo?`t4<&m)bxq|SPJl(1uz{jMb}>% zJVUHR4P5msz6r2;05}!Hz05Kv>V+!CNlG;=4*6dIbOs|pZOsL}=EMmBGh%o1VKe`o z*zrl|H3(Sdx4~MC2|i55TYs&^Ffw)Zfe=wxSc-k9Ha5Q{&he-)2%`kv*U#??X#w&| zh$aE8Q9)lAi}-3!UJq{ysh9<*_KYK%LF=)s&9O~lq@cM1Ve^b;wd0B704y~wD=Q2% zN@81Nah%&7Db5tN!v=e#F!mTFamwaBhxeqbtgC~}>PlXgfjH7iq;C~$9H%jrmc-8= z?K5l8CT}6Q>~<@XK+M=q<&;y%(Kbv76{J!n-KIPgZQ3XDn5uPLi z0N)@SgN+U8+dDz#HmDyHq}(gjLhq)|qQ?a{0>?2_tiH+pFnEu)`1>7|0>tW$0B$`K zF3td_Wq|W{3i-Bx55{k?#9;y}X6RgQvNM9!h=RO{+Hw77FN=@gBXZ~l53^NUhfx*% zC8`5_>EAb3f1VsRIgymUfGF&v(xF>iN4ujZtXnz;T?t-X99HzrX zGwI2EOOZ7_J(4{4;+*>sS7xqT8IG%I)eX-+~Ze9)2iIM>b{c zvi|ITOlI=Kbg8eHjou`mT#4ovr@rLL1C^LdCZ@oWQYG%SmN9i#s3yO5YrkkS$B9`~ zA1`;mMoTsLAb3+%Pw2J$<7KNWbfkinIt1Z!meGvkvu`ba#b?kjW#tDsh_X7fiw2nqyxzarMW%g6E;k_Ih@|_4H z-wBV6AhW0ChG%1KFXo)OUqp6RRZ-Mw^_88#bTuKONYO%EOY-5N<(_zA@{n#V$yqJ6n>m((_w@cRFuw8lU7p^9xLn5Pic| zA7tjNyESPWkL36J`Ji-57}v7;m$9azxW~q-HHM{i)`p%Dxv1~d<(pZh$_ZSD4G5xx ztCfR{P3-GVGM&ci_X;OHsvGi|zD$HdJ){VSsk1!gPFx}ci*K|}%vZ`S$gQqBf0Zsv zx~^t-Hfu%$e}^szsio=XaTrd~2!S5Zk+bg@mq)Gms1ui*P0!7N!ih(2{ZEgo>c;{b z?2mRk^C0N82!eM_A-MyOm4G!EqsZGBivp=eyt;_)Oxdp~c{#lU-_Bd^R8LMQoC$(^ z1L{`mxjNq-4Cgu!QDbdbq1V1?XEb@&_g8FVv-R22izc#THJ5OSDG5c5nC3Cquy3L3 zL1<008MI{0Y}&qOaZT#^I2OBh-)wtLkxAEd!f)DZaWH9GU<+Ez_k80Nn|N=RvY%Ty z_V#Z0$=GB-d;Eh3)}AhU3i{kRb;9{ynaXXikTc5!GGx0fYnhzj(Kq{-Wwxoc;7Q+P zk#wTZu{yYodoq+o)a4>T%fFe4bR66&puy9Vz9YT6y^_LBUs35@t`ely(1}XvqdG0K z%Sh4Rq{l{5$h^5gcx@E@?N({LHYAf?dAHm=OdIYxFUUNwWHj+cSHE6>xL@rnk$LTH zfxhG-UIY31o-uCvGJbVzH*La@FC%hRJPNkKXu2s7bV-l#uC|S-6DC^ZpEJqf(P&=e zHN{(H5XrHS$Q)=++i}2Bjz-@Q(GJ*blyPD~!=L(Ldy$=qQ zftIdmPW^1>Tp#F&9~%Z#reF|$xFnl1k@80TS>9@!1o|gXXdQS(zze3BEV?O*d-sz| z0@N1tNS6ZR*5k|V{5<<+Bo3E`*VQK>T8JHOzEGh@?&Qq-hFT#>zbZKKUV82XS#@ka z-ecFjTPYKTHEIx@wRV;fT~dF{U1&8}zJNRRVRPdR^7TwQIsn>3#9j-JI^S?(ud!o; z#uYm@v%Ej@q#6Z3s9xY=NI>DuY}D}V=~k31$<&q&#mnkT#=IQu5tM7d^mX`ZYn1)g z3I0=MN3?kyzM84kjX#!m0NIleJWuRdrgG}5?;UtrAi! zO1<*Wt-k-?Y@1vZ*BWW^AH&ER#B48z8P=Wjc`wZl3^6v zO_cF)X!f9!p`_RMXsFrN^uT!k^P^b>W&x_AtVZG4v3tuqJ2kJl*Tz2vWH+?#%}$Kx zH#SX>g0WYau|?~1x5!bn@legN|H!q0H{Bh4SO(MKmy-_kgwH{kN+ZvH_W|D~X8j0b zmKfK5|5UJRd3or9skVYZW>jG;u;rWmw0yg=`>IKtVP=TxtIEp1jP$gcKRiTy^AS#2 z-RgkIJQmvc$u_YR$-o30LxnCM8mz9?L zBznrV-xErhtaG?=$G8uaY8of%n7Xf?VDEmfi7&77htc@)6C<_toBHw$Eg!9u{QPDo zrT0f}mBmE%%!sYAEvE5DOr|>G92?Wf;h)4fQ-765(wAz2Ldv{a^YlBeE&i~Axv%edwa9QT*PWQX5dG}H&jo^%IUpXP3S#fec z{?74@5iye!jBZ#zp@_6rK$$BLQB?QZ4_~;%+4VB^8y$*ZjZISzqG6i&|) z5&#G~8RKpq%GZ*5LT0KqPKM&llmy%dx?5zh;2`eTv;9hEs_UMuEycf>i>Jqv7%Z$E zoplZ0OB_Fhr{i2?3YXDUYNIrB&O0_7^8(LGbwbVFB0s5N%RIK^>s9V0a!Si=mU30D z{qlHiRNE3b-IcMW19~^H9YHv5BDG-m7?FT)4Fx*`AL_$Z5Pl1pMw;$#!fp3rlN8BQ zDvunC2U<3_&U)5sou!6cyVtI2yJaC-YQ4LPan>GmenK8sJn)jKNx-~cdr{>n5wO(> z-=RaVjABv8xkksZt(+o-8ZHF4_%FwCYu7C=c?0?;@(7yXfl%u~b?>On69})M0}(f- zoyhO|7uf(xs+Ez}dndEcqb4(x&nz2Y=T!}QKCW!t)i=h!d~H!KxO>;7{_;vbvjSuu zpl+u_UqZMrKoN|3J%O=^;wz5@DzrJ74Nx)m`RuXoR-2|6h`_J{05A64%CnAZ#c-hf zIOX?>n>PSvq;AKBliCg96%@L5D_(z{9cTCmb&L2eQ7qRZF)llHmH@2(d?M2B7vr;O z(BpH&y8H8<%`Xj9`N@=f#1x+SbrL_$sqk0F7OID)eU@d7hpC+%^V){gOZA{{j}!xM zN>H1~go<3+$S6e>aH^8Fl4Z5whQO&kg$$-g(BDW4Ub50tFOJbiVGFmo^IxND#RaK}vaXz}A>EuIVfES*5kSCyDO939If9b2a?9hQqd z`)9cU&x;tD*qsSz-B}@+x8zvV>)2Lj9fnbsGG^XJZ4b~RXn(pfFTyeb%%K;NHMP3j zr#BbC&h+47!pQgHnPvak>{7%$qPh8(^fCCtxo5lYj{W6^a;oFHT#3)+de`BjA7(>zv{XZjv*%*n!o+nUhd#vmSgzZgz#OJ@@w?ky=RAFRYs00v z!senf>-AIZ3R!4T4`|&FM=*SCX!vUnU36LJ2$D2+wf2k39ANh6fX87ZRrRh~N3{r% zOc7Sn{xsFCU$ftj>|*7XVoQDNQuL*_X-#TFSGxyv*;&2Bclj~d6NsMM_g~j}TuJHM za92!1c`>SL|Lou@;$j^cjzoauwZBLo=+?qgwn!5$=CgDBWSotT$m;W!Lt8nUDa_0U zsO;9msMKpYD6LBfaXb3D_`Z<)u{OlQXZ#%pzi$&uP1AvXE*DU;Q32Z4yZ7#A5Vg+q zo;S%ifKMEHvT7l6(IPZ13qUJ^E>M|%I;oR!LncFz%1(=@chzY&|1y?AtSFK|YHGIC zO1j-Vxp&vtMQsnO*L&an*sE3cGT8lWH@W*cpgh{1Q^e9Dm9G+1M#dsl z%KPV;K)}891=z1%;;(LwC}zx4f2$cGg7a|;?zlHR`hM$*px2ca-`q|S>w$8FS)ug9 ze9^J&B>UxzOf|!rpFfUfs9ibfSCwMW7yXaE3fI`I`}JkscgV-7q0*;486-S@?u_@* zsWT0AICRq_o#)Qxq6!pPFJ4{R!5{J_vEJ5NXybFtl*-Ajo+?5uMJCL+6$J zM441s>{D!Y09r{UiZFeS`pV!4!lFIq+fTe3x~1R#qGG=hy~_f55LQl2_*jD?ILUXs ze++{5J1z7!5+ysPtAG6u;Y2XvP=p{GBx^g0zg&Ccu&8JXzdJB8_INwY&dgQ;vFM}$ zb->u$uojWygu?rq!`kJ7&f6!&SusU*%p#U@gpUWt}5D|^O^O)cHpTVq@GU3MyvN!0z^sgQD#qB)6vtK>gqr-T4CO9*56S1`qq6C|!q>dD! zUYcJH18b1N)tJhXwVu%Ty6N}qYgssj@OU$6`@JFQ^45`W9q==cmC4P3^Q?J%(>@YB zgA3LZMLk2F#~F_nL1Xm+oo~En{wR$6*|8*R>s?pDm~br3uuM^tX@`E*q2Vj zL6(KyXn&{^UiiAOb$M@NGA}{}StfNkg5-;Czd%V>QaHlroFbTIR)64VTx*AJvETYK z&p*v*l(V>1e^$kYSbV+Iqkr+P#?el@Y5Gj_(#F!-NS!@luASqQ$wfSOqIr(N+l4Dv z2~8+VJ0Vs?&EcmD)$E1L#Oe=by?qYHKOUtHzDTfmOfC<4qy8`acoV<|Ut^rbwlps> zTuFj4;2komUeoYH;aBQEnLUSv4?)nUPs#`UMkmLs)#hn62gS8s)}|&C43B6oMv=|i z+Z+EDCQ@{giaoQ;mGSu5A9)+#aSIMwnk48zFtr$V+f_&xP5SYd>Pq9hzOS{egEIoU zYIyJRw|`i=Uk^T?Id}c&uVr{q9zA2PW6d@~KdWZL0Vj7{ET=2scLJh9OhsX8eTm4> zG=MfN<=Coa?1`W!rEH~ZmONlYMk8-* zsfyooHh1sIJNKK!e8^*b^FH6eg2y@ARY5YPHE`iKorOqO)YfeI$iTZhr6HHoQp=Itsz|4Qhxtgh|J7nT# zQ@Zz)y-DBf@%#(^klFl8y?@He&z<}5h`dBUS8o1`V*!mE5xJGD#cJ1=A|{qE=Z zR4TU&lvp>E;kCggg_^3vC>S-&*LlxXL<7G^vNT|}Z4^l=R?6mRDzaH?dN?FL;NDU# zWE}EhfLlpcNxWU4Ur+KRAd-nu?(B6I@y6B2ziSHGdO@a@HFJR9>hhZTmmfISpM9Po zt|UiX#k%X4pQ57f#)oq-xjhQ;4_9f;v5_+UU;4=q_&e5T%fNLR+Z|rDH`RA2Bw|k= zkuA*Exm(T8BOgAOHz3zzzM^&sBn$l-hO#6ca_QwQ)I8yNn)CH8nF1i!Hn5anesYuR zDBj7L6Tb(KX8rgPeZiP)PX4QJZ-kgYc@^^(q z3e{2>{M(~Tzhlzh%ltzN4_Q5g_EX*9thn?qrGCdwuHdtAdHJ6A?}h%&0}I1(@jypg zKLb^7gTySY0+9)W))F2;aeySdl%zCA>5Ig)XzCc27ywUS0pPZ+A_VfWL`ax=O!(b@ z#K^xuXcvv;^Us1d{oU(==M$yCSqoa3ZtFWAFa-{Qk+6b(wVqG>!&1u>*H^5rRU!~} z=6dA%ldl;S5g0*bC0i{!&le8f20nW3M&^2O1+YEBQfKv=iEZZN<&ONjce{HR7f0G} ztMu`9gL0cxW4XttvuO6oFaOUQS1v`8J5`*%g{FTKd(JEs0(3vV&pEgH19%B3)jM=Eze`~pkK zE~%--=nnPp5nUZ*V^)(ig9@z zlH5UVG-UqDj-Qw$2xiLywqi*8d;r(w@OrkkUe{ihO4XVcJzNFa6 zUJ%Nn6xQ|B`_lcgp8{}Zbw80sQ&Vuo^|?|+B2u}QM?`#dZ_&=6NXHuUCbXqcP*NHw zoo_P0kmbiVe911M<}sfjt7NH3yI!wy=t8Vep@T5k!N%5*mEqhY=zlM2BBpfCQ}|Hf zjht!CRSu|l@BLQ#-kqxU>-mxa7(p&2)wII*{2a!utJH-*)Jkd&VaR@g|+)^8+Xx@Tbd6&{MuBy#)l=b%R zNN-K2lr@bt+qIsWj_^<4F{<=t6e);@5*a{5mz7fA_0|TXX%_NPf1x(P#|Mh13FjuE z&*sY5p%T{jIZWV{Q*3bYqnM?DYaO?VoX_-4X%eDSqjl2~6A1AXuO$Pw!(^0DxYSFF zq1;7!Y8lC;nd0m~Y2?%9mxZNrcz1}cvfbT!vyh`gapC;X39Z5>5qTHBW7(Gz^1v?Z zQQh&j1_2l;^=@3KBHLiR{u5;u%Lef{NNC)sTgKxd&MM8|E9ATM;cts%(KG9CvsF=J z!LF&nngEx}FBh@ezD!2At{K?slom0rrW?PZ28JUWr=C z#$}1cTt%Q#a4ayLi<@oNFe6lkQSW6~20P6XHy4*XEjI87L$BOb6t>$I*B2ke#;sB% zl)3c8qo@k(>Lrdzlc?1)h7==NGHTKCH9GC432PNKKD9imz;tC-3`n)*6gp zt|7+b%J5OjbUIT%Rz2JQL5618nBJS3=`0kgOOT#(WQ|fVL7v{^{O$eA9sFlc_55#_ zlM3B+ zmJhw80HI^Dyw#lY3z9#wln++0=@ALi=iV6=vQU$O2^H0R47ZXfw(DD5q*D9nkjug* z&8^n?iau*`XK$vk(#R-hq$Vi*g-XCzfzYLf4IXtm?j0?FT?XD`=W)A^AEWhfrPn2+ ziJiN@lgEhCrLxaVh&Y^$F7m)6^H7TsI{jpvz$FNidJmgzXvh_njm+yyXXilJC^f*x z$!|%kw;w!QC=`1Hc&nhJU>j33tgrSA|6fzgg9r;%5pix}tp>W|j3SDEm3(f+m6?2* z;M&(*J>eC7hUW?c2r+fpy!Z~@xfX@>4$;|x(3$a6K9Nfc#6GsM3Z^$6G9Y@7E{wJ> z2&olk0YDGx*-^NGQq9jZUJ#@Z3NOM$5YCjiPolB}xU{wv4r% z3I)YtY&GkhYL+_y@pYXYp2kyp!F4L!{Mc<*F7AsFkxpJtDex9zc@%dq4_4?+YK(&@MaT zxCJ#0A938ov_RRiQNR57dPViAb|n);zbY<2oB5 zP$l?@XV}-b|$HgNSZX?#6<-5v^BnF#WCpg=2SK)1K=IaXj zO8L#9RTuCcPAj+d=CSQ}A^Bz+d6-FbPSy9J1Rc;gxrOa!jiE7~i@selT$De;nCnXn z-K^~Yt^p=B8N5t;Y5uAd*%cDYbqbHp0~i*d5%I_m7K?>)8JYdpFX-6^?)QM4B2(0z zMj9GQXgK{v7bTPwu!X!lU$h^0En9KN+g>wV-ANhcre!nnau0HXE5#N$!B?-*A2W$e zr&(3s$TYHjzz;o6FPhR+wW^^-q2(5;EdF>3$SM}w6bXrH)LP@Utr*h zEfc-IhYadtgi@P0=t1Ch&+`{A2Ta6wby;^V^;q9eS19I9QdZf!^4S!z7<^wlVM*~Z z&D~u&EfunD!{sCv=@_Lel=5-M^iW;sh_?oJSkb&-zpr1Qc~^N{+0oiMS6wfj^!Uq1 zd;d|ft3e!eble`av_a=}_msOteoFO^M8QCXyuH0Q0NsqmoZN43C#7grgLQr!a$A+ z3_iE6zwh9^K+g8>r*<q4Jv-g1`0uEXBgdWjzfp*1eOLY? z`}MjVskvfuiplRfRXFoJ2ZHs){4oJNt3|5Ij=|L+SQ?*e?t7p zMw~{kO*TA+?h(X8eiRiMRe_%W)42M-FuuYA+j_%nG?6CJijLDNB?#pp)tke8R^7_D z=b3+_%uxcmF7j)aS?U7w-NG8$YpF7B9uOGp=SBGBWvi}2=wAumUHa4&0)N8N&pprZ zDfGX$(E)T+zG+{DX(0N&-__eNt-<*JoGW+`Q43#JIcnc$MLtD76-U(iXOrHYH6v^7 z7ne?_bF|r>lglnGteMgE%X6=$j-Si^|& zr1Dk!wKmE#pl^T{<=CD+@EK*&D!=?(XZY`4sW!l%3x zl)n?Ax-t^_mx%Rovvu+F=hoL#$j`}0n;f6&*({wxhw`YF@i(d4kzDmSPzoZ4CiJk!(SCvYvk4ZSQvPRSRc3ox^Ajt)|dWH3J{opOpZ4>?oH^9 zzfktOVD+uwmtDs6{S}NpnyM*8Abj6=@mPi*x;H#v-u-jome1Mf2LymU>hMRua{tQO#z^A~IBuCQS=?*mD;^Y7qVej}ruB#K%9*Is@bDEMYuP1i;C?Me^DmyL zcmDUiI<(gF#mVpW)#-=-NRFEs2=F}@J4|(n4cl+X1q_OO%%~wzWI^}WB*X91j7N5> zv>wR}+**C+L~%@}cUg!{b>S81g@=Yi4Al9TU&p@Pc_*IF+p zWU0=TP}m^HfTqgdMx!tB>fsF*;gM&GC%-WSSm1zb-fnurl=5RpOKwQXUz^aui;Plp z8&BMII{0o>h&g~rxWLIbGk6P&Y6|P`w@`tu*>gFz- zn>oE$^d^*lYjV{?Sg7n7d5qP)R8CzZ_>}h&+Q!qv!)_n{0eP2yx<~{60qjxMh%2k& z!Kb02%q=6K$hv{r8U5BP*4?#kCl<*2$J>%nCQIe~jOoT_XM&gK?BP-*yB7{8ZZl#csO*$e*~FC^cLlA`Wb^TV5K&c4^;URA2+=4}B>2sS}()J<-)V6OvGYaBx21 z6FJTn;{D~OPq*x}TRzoc4c*5qmzhHdZF9?H*Yw}@)WwrCe~FXtH3Sd>AL)o}0$Qy^ ze`9XiFEU>Dn=?;If#h>alDVQPglBWt;I7fZI)S-)VPatX3AAb^4CaZK+Bl(iqvOkv z@_P*iC+Ru&xAua(@*8V^rR%Bll_GB(9|SYEW_UbS0&&9b9@5gMxh)eER8z&lbNq2# zH~f$@H#wF}ym-3PMeX`I80XL2$Hu%`c~2d8qQyXICdgA431s9WYa^2q0;)tG|JOzw zx#X2>rOAI?of8TEGvw9`-M9Ye1;K#1O-x;cTd^l&qwiCnk;dqf&}lJ2ecYTW&~+u1 zak{Z-Y;$&S_M#JG=3K|P3-`yR&+?X+TgNNq?3YCyB@T`cfl=+l*wbTOM7_Qry4-#y z2{YX~yqpA}KmPej*>Zp{^ArmM0UK+*3mx;Jmm@$v!V2>b-^k+Kr_d7-%sYNbp8Vw5 z748zUmDx-vpi0Z#Y|MkUBe3w4W zz9=gzAPhi-11iu{a56^lXPX|-4@aE)Ct3@_4r_aBenm1FQ2ps1`2*7<&;5e}{$3oJ zMq&N*>hGqObLSKl{sj{Lf8k?lJ^YJ##y^9AqHQAI>2f}#$nyVq>o<+@XEu;*2FC@x z+;>ktol!sk-BTv_=f7SVR}B&_WIa4qy!waX_%lan$&2N+F2$!0E6k7nbff((bUEbz zEkge}^#4(UF^y{KCjS7L-aHx6Gq{DAVm!*FgfVx(tIP!SeqG$l&S9^UVl4-)8vlso*QZslMV z*!q!P2OB%%=iPigd8Xd_z+UNmpn(L}l<4G`N(ptf5Bj!m8t4)MVEJIPu(+adjX0;Y zgK6(0S?Z-5wvm4h>h-!t_y1z(W5xTn4)@1xz@rC9I#s;8{9u_E?1)$TRyv4O_&_$g zkp<@RcxaaOD{L@<61Fl>rqI4t6+)t~LG*ys2dPaZ77%Kx0ar+IKjX7ln^*q0^sMA}NP-1(+b zs6}stHa3tjkZIh)^}0;sv;L1-hE?T+yd|x7B!^4@n&N2Q(!=ZK!0Fo0R=i$F$-^Vm zw``T9lR;3+OdqMOr(1~4YcDRW)tm8psFFl2jO08wL+9B1R$8``R8!2ODh=HW-wFcl zHrJC{R`VagRPvSRE?!G-C)g8R~id~Xi z;n-U6=U1}BEgNT9qN{ylAB_I`lJ!IlmE9v{tn=)=?DwzsGGGUF8 zd`8}vOj=@#hDfSc-^RE71vz)o1?mg|F0jAvbgT@|K%eCE>erE~@ zZT7j=Mpw%p<)$-qWl~K?-{by*PX=GGt&Cxt-PxI@KYaT0$FyUZM&8R}_VLqDbxu|3 z7E8i|#i%(qLRaF7Og-MTiyyng>});KzMd+!m9orO@n&@OBD6YG1XH9Fs3GQkWrM1} z(f2rhnSzAiCOAvbWp?=2b5m?xN*}HF-7={mM~Pm3frttCoxE z5Ks%4-wIb z*z3e^E_tIV`?l~mlQ-So4o0k`Lgk{Eu1V15YwbdoD>u=!S41-r{`j^IY~LwH-?X8) zyuT=?hvOjl0rFr87ceAce9;ERD4yBjXq33^k`P7y9p6gO@)i@&%Y1a|yM<#e3%7ej z)cn>=`6U@2PqvjY77hP-ZZAlsjLKCD@szBvv$N_XMw#@7#S_tAl8{s6yW~J20Xh0s zNxb5>|LcVK=Jx!hM#&@1uU{Bs>$9Ea8e0a?gTAx<_pI6Yp?K*liH?(JGsl*xH$HY2 zamAKBfU8g*2d7aX7#kc3Cj2s{c$z|Jvn28ER*T!F`BH9lv0qnKl_s-7%00C)xD>wS zQBx6{~~wXdQMUr?bbC7psr_nkp^^o z^h1Gf`Y&qkKNjiRL}v7Ki(c%W%WlfMOT`zKj~+aY$*Gif^VRHP%$O*S2#bpgbDI&H zTNTThhdH+{;HK9i#J^czH!$(GJYaACsXB+rQ>_Dok{n?^*w(BcILu zfx(v%I~I*CPXY$|YAdMoP4*!Ei+5WG4{gE=uD;eETl_BbqJL2f$BuQS>N#hp6rtyg z(RUsToq3qzn<6s?C;Wq-Xx%8;ahul}{|XKl-J#J9*D&cFrC%DbOdW*l$n^zEoGb-7 z;<*Eul^gPlsv6$WDj>p|50J~EgNv?)knN~*BUP*_b4a_H1?@sDUPAW!dUPZsHH1j0 z>nLAQ{+%+L&Y&8tQ+m=RNSFynd`r^v++(FsRQBF^Q#UDP+h`!+dc6fL=CS;=6thNa zAcuC(^*eGm2DUMq{`H?}WL`bI{^^f5tvmZTwkP6s{Bs|6fhl2V$}q(&nLNM5&6hLE zK_mo_4-hcM=4kC0=+>Ue z;McGDkF6U&6s&V64ojZTK$Zo5PfXmsk`@Q75|`6gm2Zn+%a~~ja9bJW>yHF_{c0er zrSJOI4en>zRge-Dg%a{Mdeuo&(%)|O+vnf!8GlAf(Qo2weGr)e@1}g9>t(*O-)FBs zswnpQXf?+tVmk#XB9?U&#Yb`{ZW^fLYs58gdTvTKSXqCSWG0F4PO@fUozuZf7;g6TbrP68U9-Z=h_4;A*Ys7+B+ykG-MZ%Qj)#5&FW{HtX! zzv$OBsdRsy5WALsRN9@(Fsw`u(l?dn)F>8~hlCF>yF1j(yuGF4l~jF{=V&9OzI20O3hz0DtT z4=1KLw2fR`!Cx^R%K>GV`HRZ2>R&V9{Gk{x#@6I)-nWnt2Rz}T;3?3i(+q44q4wk;c*g@4+uoVfz4 zOZo?w8ew=~hKZ(IjVb`4Z4+F!bC8NW+SzYt+QYQL6bmgJ_@H_ANnGPtZv;R$L4@tm z`7_=1u_u76_M00RAiCWd-?^@lyTE)t=aPcw*0X2*^P@fkCj28JYWNFwed&ZA?n{>u zCz~%QNH7snd=k(}UEXkyXlyL@N~w-l{6S_EyJQajpAhNgbu(@XDZPv=gOes^ zrBfxL-%vf4S3I|btb9KN5RB?aH~=!NtP*C1x4gW}eR-iY=lfU>GJ~exz*BaD6`@Gj z<>O%GmECNvPm@13#hmr6oHn(KTBOCn>)X20Gm--~@%ouXPd;I)P%d2PzV!LqzVl2H zv-u#PM3*H{DXr?Eg_N%K&CXSS$06*Tlyx+)lzrjF4~IeD;DutU=`sCXm!@bsNtREg zS$c`xs--rxtC%k7*BbE?p_px{IP^T$xRD^4w-Aim4gc%#Q(n%7TWp9{duL)h89 z+FOI+jgww(d6ee_Cr?%)16Sj`)#`}F?CzU8^`GOeS}>h2)_O+cK)hD?)1ECVz;EUb z=(DEu(?A71Ri> zOOs{7U<23XE(R-W;(pCX>_;YP{Svl%-`>lZZme<}TE6W@kn#^+Pvcy?th0UfkhC!y ztWN`;@7$KL^n`<6fBRrTXIT=mIXwBPbMlt`W6e9T@`ndZln17_H)h{_kL0}%3z_VQ zq?w}F$N+oN-HIuyR&Vo>S(eyxO@bwLTygV51KkEy=V)LndkHd@>Lmt#D>_*xy7c^m zTlCaSB2O&Z>rTqdY-ZNmTQ`khKiYPk>MFV& zaDu@;f#$(DSXiH4v3Scyw;aS-WX-3z-n9lb~# z)3I+YD|ZP#Cwa-cFm)*(N7&W}uED`AMuy6_8&xvUj%Z_FGaAg2Zi`Djmb-btaw&!`cKcX=s|UHCuWFEjme%a^8W!&?GeT#-0f-Z4*R^H7 zU<^&=p(84wQpYhVw4{UNp4&UVbzv2`-tQrgNF1SLq(2ex78HJE`F(dgcOp17Jlbt8JU4> zYJ|A>yckQ~N2lixCEp%kloWvBd(H3A-OOz9bK1-GuR0@LOw3u=jM0?4o6(%)C1kLc z8-UF3HNh;+m^7cm zb*3+S=2pqpP`2Ml7=zJy0WyicN1sDlH~rEw2K}=fp_Zg%Y0K4UC2kI<7UD?aI$;8` zb-FDdrWf_eM8a;auC|N0)f>wFQIb3!V^l?vj>ka6!eT7Cr?I@{~1BN+!WqA0O*1udy;- zqwu+24>f1HJnNQZ&oCQ|<#Uz$G>-(mof?LkE}rri?azJjYG%;8m9F7^MVP7Tr}z2% zfFhQ_xhr3uV9TsF5hvYEE$**XKkgoQUJ~OWWB}3D0tqCu%v;Z|K4Rk+iqv}bG2dTK zdc&SGs=Oq`XtVJMcQDV#VN79S^2&i9?ZL-(^*AM4W4WP1X#fJ25N%A<8#)quQ1fOc z#u9ucA&P8n`3wP16D{98JUn&Yn*bpR&3!Gqu3LDJeK@eu#-%#cXw^4`za3*s#OPz6com0<@ z8#gWlFx}WRGi0Th+dq*f#d#1CA7BTLT{Kn9WS^kHnJ2f5r}xEqzeBNnn>%~#S)a>A z7hXVc40$ppqf-x8dV|mW(5HB0PQwjewpP{d7TNHlZ@sV63ksjID9p~_DG-C8;a*Tr z;~>a2&eQfpc{xO^II^>o&J@B4{Yc)a?Kl4@?At1g_{}-S2eaP?Ju-6qYuV>(j_YiR z{L1T91PuL6s)8bF_xGg={h&sl&WcEp3m2(UHXvaC=3?h}W7qd(`x`$7`pB5;f0#`< zMURmZ66Z=W`d@r6N8CGfGe8E(%F0DGtEnI*ioM`#yb?rVpQV);^ZugH4`n8UjN3)6 z$u8f(Hg}EiR+1c}$C*5T7Wash_0MK)J0RjLR1kHR>aBGVMe*BowR7IB6yxjlo6=Ue zItH87VqAV#KGczBWqlK?U8x@YX%lS`q>lzC0gJKQ_{z=JC+i&ENyQkKwnu0`a9?rX zdKYP^L_t1az!+R$P~8!weIO;8Z~eUswK`YgDIO4|Xa7VOp^5xrKiIU;YwMX^8yx_> zr09(ZXAXiW{yxba&KTGD4=j>IhJ|g2V!Zkn;Fff|)BnNVd&f1kwd?Q4~=SSc-sjrAm$T5<*l2q$P9+q(~<~5+FcGLXtDF zto42S-m}lW`*+U1XW#Srhra-sbBsC0JH~k4=Xu|Ef+1(W_dV-(K^=*+pZZZJ$^|@i z6HA}I;Fsh`>T8^AZ=pikeU-tI({Wp-I@SMe1)b$cUe(o(4%jqUw-2P;`erMaHBNss z;|bK|+|Qq#^<`GkkiKofbnNurAJ-cHeeK=d`18<7vni>qI!7O1 zF%#QWPWVQCqlOiY!;XtzExWZ9)A+_Qns4gymm}Zezz1$FKTJPRe|aku{Xwe4w;P<@ zy8f1Y3y<$s!&?f1YyQS1gQr$+x7heg-G@P6cE6B(wN+E*UCKW;Qap@(A<`1%2aL)0 zzXFNguU`LX^cDEF-H&FBDA%9u{(fWada=`I{u93ZC9U>M=;aD_a0Htq8M>+Gl&g82 zFHbyACV17-`1`250-rR?rJPB;2zlEn)3*&{qV|1+p8M+Dzj4|3M76Y2ZcFCI-oJVq z7GfiQBnMkBhP=8Sul0QdgBx%yHgzgz)rz)kiC}!)$GF5kP*go!=dZutNbJ56(~dgf zr4Pa`;$A?wBHxE%9ov~P&FKHUBZh|Ix9`X)kZ9j2C4Fi*91@1d_PXTYU#)eK)#JP_ zAB)u)YYGtO9i?;MW~#(Z59h-%dG ziDOiX5TjIt9;;gL1>W3f&P?UD`89oZL~TYvzh2PkFNlF!}4Q|xY79$j#ecyGa;|k5H z#k)X#Ez;gw#_15LwnxLh9Qwgqr_&`aD9tan5*K;Vf<3hn_{#qC-Zr{=y3B68GbGE@ zHDqoM-@VKZ()ldMdS`@Q8S&1YY$#^t5g@DNX)9bvoqf+Kowf^YZ?2$!PbRh{*u3-} zBB*{|4zO+8II=E*QVn4*&LJo9O`l+fnN1xCI9}7;YZTi{gRty}n|V z{uEQ~#}2S<+H-V1GIT#Cy?*xd!t=}g0;Ra>V-wDT6Ye&~(oOV1Fnel8t2lfTlK?tDuf;2nlZQIwEF3j%EUa;n&?#DpFpT~8Q;Z`zL7Gy7}o#BlN zp}4?V%%ErRP_JsSdW+R_R1zj>v!)l1n|2<-_QJeon#V%Z`B>yC&2}fGRaVpwn|H=! zFj_NX4_Z>2B!YbI`6Po$3Ci5|;Hk3l-&hXz>&g`v(3G0A(n2G9+}MedRSQ|1a6Ui^ za<9}>nR4#K_NsPZGYRQ6wZHi&WKIOxRa(AWOb8Eb8dq)>F^VGd855y0kb2>eDLf0a ziDhs)DMf-_$j{q?jp=QfdI{L8tcYmag`hU+;J>#ujB#hjbt?o%Lo!S~}+NsYUn zR@G_Qwl!9!v`ylsJvT`ZvOCFpb{rL~Y^69GZGSs=a!gdE$xNWF4^bB1I~IJ{tG}PF z8TJrlXtA`BZNK@YYkt(O3QFHTU5txr_rl=WM`vA9kOWOBzt35VTif9;YS+sxPvknN z#o|>w87(?Pn*xCC)Wefff?5a?14i&Dd-9{M6-CH{%6r#1_u^G((mG|Mhpc%%#Yy|x zpk?KT4l)vQ2z+tdpHYV&>#PUx9_!AH9_}n38`J>0{Pk^#TjpzWzm>qOl@HM~eCFst zCj+^QJ99ClC4BjbeaD*;U#D^miMr`Gph<;oeA37S^T99X$NN#fx+xHRFc!~IDVT)K zvLwbvl2}P`4pyN}YIvuKl0hKnnp6{dP;N_{=3_1@%O-o2D-J^`?(4>#8GLC5{#L8GhJDGA!@%BoU{;|IDIUY-?5ZuCn=wBkidpMam}`-KhT8SizfDpON3FNV+0 zG@k05b8e#|77uBIn@EhZBWJ831Pg16V``sd{Zn(=FmZpzMus)QYE+v@)T0d_G(uyc zn-_-*3;aYd+(a;S1uRc%hq7LpqY-Lhc>FYSL_P2xE^K>Cj1)*JnjHR7S zio;_v_!2}%lhF`=W_$02&%w=gAV(}V|=5mc5>l> zP7Wb1Q=S}3bqWVcu{{2wdCc#m2=TG15Z&kb(;7{50|rkPTMyrUY~ST$`Y@lXi{(VM zug-^4%_$}3;1!EC@315DzlqM=4g2V6AUQXQteZNjj_$o}0h(|g!pM3@PWtxEI=g~i zgz*uMNXd!TRQ~8Vm*h)-R$tp`PHD1b6P~?bv_Pk%Hy3AFLEOZtT3T7pm%1^u$oE6E ztAAR0{gZxS#Sz)Q35@BD^Sjrt59b*q;UxM(AoXe9GxE?zt|)- zejDu8L1&&*fxkIP)j*{GK!A7dQl9FF1=K#i@ANE`d9JeO2JBA&(T0=_)nECJbN)Yo zc>e>W`Tq>U{-@yNf47lW?_ZxMNu05~U1U;ZfqelCtFGfv{TTfTwG)O59l5was2f0_7;~w%Bj#dM5FIuH&c{l zFM~UFSk&b6^_U}6iQGDgAz5&|C@SZBJ8J3-y&pJL+l|?#2!>4IVLvf!$Ofhu?vQ)x zj7Ip2m(J+3pC+~?FujRgO;WC=tAmWhA|TjlEK9sh9pu#wLoWAVepS^X26tEDh^b*# zdyo;jZ->oZEI0pf3^GMIeso34m3nfTwtCxa;1* z1!ab9>A7kDh`U7#NkNtznYXxhp@h@K=D^)DqKkyX>fs$P@AVIL6ISTe!$1WjL2CSu zkqHjrU8P-^sUJT*u7dViiz>1~6>6Vy~sx`wTmif0mo{~8Gn0Jt-4)4}${_TgYqi3GqQ zXjKi_?lQY7S7)G(PCsC^_?3rOyoD8$xeC-!6>VqQv z@N~b4=@?!&o*l!xmzEWGgyT<4TpF^>(^PBlzw7MH7D{BV?&9jkHh~18DiW1O`JY8z z_Z6GmdVoSB-2aH>RxfZD-bQ625w%e-i7F|(<7K+yo&)qg(|)#DS!YNZq~Ox_vB1$V zE9J%y-z9{@eFIM^W0yO04a6FSmRZ3vOh+cR**6_F$LJggE2_#WXE{cno=tX#ukZKH zC@0x2q*1{&%yaJsk`Ec=I*QcO_Ut`-=vI!yaG6M&$v{lRSK_((vUbL&RRoHC%Sh5y zS9yb>jE+hMWJ&PvqmUPfk_ulRh2$$8_Vz~VnC#GySs*QvHOp4}gAC#L!D_8^|R z^@>pBKD&mo$~raK+i(j;d~+s)x%cQmL9m0ZRu9iU%@=2(?Y?e-#+q7Zjf*G0j$h&N zXVn04-ki{t#R4vK3X(q^gW?_)Y`Q@ykb(!V!d9aczPPh%0}k5?5`$o?@{YRJ)JTWTpE<%FYZ+9 zR>wDxr1uL-cy*)E)6$R#uSf04FVit&o-%ufc(a?>yJij4`|HbX=XQEz9MmyRhgYau z67$mE6plb_ixi7-yKMq=GJ+)4s~SfS@t;K|mf4MX4ohqHla`n^5qI;69}m;p^n&^6 z=PPSYWgWAEG&Jn=iUi`0DK3s8mxH*2DPrO~*X9QJv&MHQL3%;hv>DBc}vB z-z0`2DAaUc%iV6O@!Hp1^VDe{uJ7kQ8^(fv~P@y zOHVq8Qo35?-t-ozC~T1h0OR%n$rM)OZNs?YvLh{yF`nj&`C~Du&$9EavSMpNt@uP= z?m$&i_E&4zyrR8neZQ#)bYLn(a^X>=6m;iXcdAGsrB)X=G^TR^iSU5Y2+I1(Lftof zIv)jO=dU~!(+(D2>(c?hqUMMYfq?)q##A?O)k8lJ_@*p$~1I+z!Ws^M=6HCCM+ zm2Y4V&YXu>*Q#`8kh`&iIjRc3oy+TYKqcnCEXL(wrFNSQ#m*m?lFn_i4NUjRej^#i ziAf-4A_S7t3mnkl5Q~|i6XTs`RyQ3mTj9Z9RWdWV0E*HP5J1~1hD}Vo?N^q@!Dg`q zR#Nh2FZotqM5mSM58$iB-Bj!2a30VUw{9M@&MtGx&0(0w$>>pCgP|+IZHUeodE}ZcV|-Hr3SxSj49`Wxpw0>3leV z>1nxd7;e^i^C{}0jGXIKt8kg*{WGH0wP8JTKRNjkgcZ7Y$7zA7whlPonkU>8K;?0+}jOp?`J)AekOQZUu?0gNnZ7X_f@;TZrq@almvlB_W04t)|1#HsM-psnxqZd+RFT z1_w6hW8?MzY;uLxmFZoZ9X!u=Q>OM*Thl^*4aX~=K5R!W(@E>`T44NQUg4UdES7&w`C+|(EaAfBD3D^d0YW!yU9hr6aH=|}d zG8oG*%%Nj2YxN$pfzaS&_~@I?9tWq)yA}MWh|&A0*g6Xpk{iLH$(`VYCl*E~fhA_@ z9`Q0^!|!iPXv)Z-`b<)*Koyj*mo&W=2}ocuX&k+r;kxr^l}(Nwvx0Y$4Jg)d@ zw67og!~QkN#6h=!Y_CWm;$wYmsOGi$!1ZUc7Bp~>S?+b2QX*o5*c>&Wekz}a@sTS$7AU)+g$obiu4<_j85Km+s>{k ze-;8wOgFpr0+MBo{z*kDZ@c};t|GVKwR^oq=IX7G+pP%EDZwAa(X#ZV|1st}$(r{W->k{KQ+%hoLQMTt=BuWuUN8Nh;Bn{mXU}?5GmHD7M_nFPbzK2H z_(6y?9{ls=i^B<}UoDd5`|J$EfP7c_XpibTV2X3hUcAW3SOF;W|yc1Ig|tY_M5wt$<=E|b}Tms z4QBYF(}}$OT=O1X@wwu7jRcED_)E3hb4Tv%K{iEik^Wm@(2Br~xZ$>)L1Di1&seuM zLun-~Od*bVV{SdJv9=U?$&g4|9WI$M13Ds=ixsWso&sx_?nC6RYp5>lhw zi{HNLVW#;$WL;*7cr4*`R&$`2O*)6x1Lqxj{?tm&e?C>hEl#wau5=_Wk7)z1sWYvZvYn7g?xk!Q%QIDSA9Qk}SeaDT1CQ0*H-X-Ja5SVQ9P0rS!j2Dv7b zJvIN*yttr@O{235vX#9=lx1C)8Yh!qsx{I68ksp_a1vHe&0vfL?E_A%yiKNn$nuANpJeH1!Gk{lbtD@wzq(U*z z>z#ZPpVB%T2G6l6R<}IJvIV84V`=h|I#tEC-L}*POQm;#G(wKDjozIGSg^v*^aRGa z_E&4Oj~^8$?M-c@Zn(w&BPcY>#yO~22+UK32Xc+GetpDLfGS=mb4jbph;+_G;0{Irq7DzutSRU-ZgNi4IEWV|V6Acxz_J za!3oI@e?wcDsuK6eK*FaYG_#b^DG?_%n&e(2X<=eTuDC*$BxQ6r!)yM03IIuW$HnT+L`vgv?`8lYz0kkvHW-(Jf&d)bQ8%+Tp~I2f{I`FSShmOw}`jh(FIBL z4(ry+eQybX>>dsvAHK=-`uaL#)|*15sdtsn!){R)+nr7>%w5$wYh5|Hk;gM>Q~ZaF z%|ji^OHXU02y1wd&004|J*H^a`z(f$p7agT|8jqv#ISwn>lEr2xb|-Uz6f2CKXzEC0!MSkg0g~ z8UR>%FkIFv#>%U1bzN{guCxnaLc%?qkjI(T8BHGaCTlF#Z*KF@?4a4oISA$GWL%jb zt|Z&^05VOcDUqr#sC#(@Y2o;CIUw#zka)PI%0;{BdCA~p@IL{TR6w>3lwOgP(E=%~ zr?W<^d>0?5j^d^bEc3XT0_ak|d>&Nfj=FVR2Kx^Tel1wvKU>MU#|DtC&2APtLd5me zm5QQG4-0oSrJE36+MYQzD#vvXf8Q4@?c8!u_H@XYwFVvV|1OrhT^^tef(_llLB7pQ zqPF(ceT=n{$gmo`Pu7-dc8MQ=dYYm2olm)?QK;k5gNWz0b*|$BT2jS4Ox6_OCLCwp z;mok4GlWc+0<5L{NU>@F2|4q1DI`Q|z|2*YNqhQnI}760|Hf`oP#{CVu{;e_>gMz( zq2NyJR~z-qmkUJYZ88QMZ?5rYgbQi-u6}v%XmU-sd%+dX|FqC4|D1mVlpHxZt8>VobWd6Ir!Y>J|-$K=XOn1JW^U6R*C?a*$ zCP)Fr!kK4+Y*ibKn}r;pVU4Q}k^`(J;-;}Vv^&hB|IeAh?W?NXiovliHK%9sDg8)u zvhOZX>Cmkr-<-fJ!h;l5QY3+4U~&8(7_P%!J8~bl+`nNci}kHmBfVYCeGXCoBLDL;>j(b=7XMcL z{f|MUpXl#Kp=qJ3gHFgShJ33w_&MQld0|`AKVu9X~Aw z@MItVS(i=On$icS1PZYA35yT^l3ha<0rT*?6?EsIn3QQ_!JiZPciR@#LN@<8U#DbD zl=!61aGwCy02=;BT^6u8F1yJwl)X7f8z_n5x8Ll~y=oaAlNj?iv-Mo9DfRv7&z6Vx zLId+dzFKmJ|2wFpGCkf#k@m%{a$$UI0TyQ&G*hOCYRNi0N$j5|74fONN`#e@_5JC; zK*t98Dl~UbmpRLt?@@D4-dOp(PSm-2K1Gl=5y0uZ->CWBX?L9=F*MC=L;ZY+uM?BA z!0|(Q_z|RMAEzLHQvi0kWL8{psqb(h5Z2|sV5&O+q^|}lNfNgZ2{`&OY z$MMKlU_l7sCpQTAp=*krX>Kw3RQ1IBwH8_AyLA*g*aeW0&n8*Q3$uPp?a-tec8B!& z`rh;?eZ#d|BzWXl@wA8i8;mAvh4JEKbLn46mwp4GZe z@WWNrZRs`Pw(bQRkz(Zq83^YZTpv$(oj)Tzk>0 zm|;@xe~Z?G_7L91OkC@CN(6V7V)fa%UU&YPb^84k+&W8q3seE`Pjr(3%7GpevIZVQ z;-9%Dt`-1cecjzQ)DN)D2@kQWaC%w1BdcnbruV=qO{w4DufB<|CPnpDY z-(8w*tu9XHdQ=>)se3b$2`ArCUFf<=S4nv?!%I;`3`POKDbRl(@~esFL>NWH1~9OG zl95&)udo3Vyb!tD60va~fDoHZ>4!pEhqqg^@c*+Y^)oo_rhb5D>A&j^KnR_DDb|nhQ;~ zsmKI$P#rATdOZ`Y_bD+TM|FpwKUymE8y$0!V7dw#M`c{5KDzbFw=MIpW0^MZMpCAi z3O6x3XneZ2$^kL*y?9Bwf$_aLg8d;k*K1eWV&n1Dl(rXO`mc5HGaM6~7^GaRSWm`X zZcTTHHvux+_$C_MDllAQuwc}0c38nAT;)%T#;gg=v~6Wc=BvJK-*v;S(Ubwg%Q&S6 zZUgTC%w8@M9xZ)#_SHg-$_bj9;aLB)$G(VLWy9-JU5Pc1i4BQI2xETqGn9K3RT^s} z@6K5mxnE#aoL=xC0>5^JCig{rrI}3a98R>9o2aE1zDCZXlQvhct(!+Cy*@Fh5aG4d zCw>kaX!Zq4i8iIwedC;t^8yZws5E3fH%F}4Wla?spKnDJp-LOOsAx3g1L$fScevaM zzP=q6UpY!lk1nA~?TL8%X_yQS*lfC&SP^i>Mj;>D9Y0hpe;t!tbP!jpbA4{WKFP|g zGc4Ho>YvGHojZoy@kqBQ$HY#P-@Y_qtmg^uD~>Ew?`AoC-gvS7vKwn*hjmW7fCKpO z8rAUc@G{+RerZE+$OhfNzETJiTUscv!#a{3S^VV;}ou@z;tEiE#VWoX8HtLt)G#N`|cb6ZCFpKtwc8=6kZ$n<= zeqbVJW(Ld9pP2Z>oDC?Rp95Q#9mr$h9{qgRQ{CwqUqai%iS)S zgj9ekf08Q3jfcM-a>yPq@dob{-YNMnoDuurryNw4B);XdFPHHI|8R4S8+i|G1&aZI z@;?%+^&dN~|6fLg{a0y3_eW%&#NauT(#3xp85Rxs^*etx?+?%6j+{w*XW6=hTD(81hoRhx(Hph-U+FhWq!#pR>ztiKZXEjXS*X zIzuP^FHtUYGp}RjZOat#K#n4NGBpFdLiw9n*+dbC&ttOAh+`{2`N(k2OtlKCWoPT} z%C&{@Z%Sn+>J%4lj4^K5hVITbjM`^dfx6a5A>>{CDDIE1d~OE|ntYqJ-9JLZpWUfu zn|2W9&f(*-va1M|3U`XexV0@O!Xs{-aj(Vp!}7RAheLKIUWUU@PDcGueAU!9QNzO} z@j0_(wH4K3N%P5CX+Qum(I&+3K3f|Yv9S19K*E9ZyC4YE`$+ilr~v$$`%vT64GG|V zF_y}qoy1|A*)i%rAssHV$x%5H4+i6)nC+F^jj?Vc zoyX*WS@U80I1x(Db4D?4`bAU6L+RUKv*&iW#0M^=qf?yCbWX)uO)+uf(5}f|h zJ77F}1U$6KFYtS3|2|@9Olov9Ux6y<3}wOAz}}hNZ>VKK4M~s@&TxB~jY#HKcnDq3 zZ?LgrI$^p%O@-Fz;jfnKF=wpBJ?mcG_d%rFgG(;!%ADzu{q3t3B8eys)WfRy0=C z`Q>!^ZvH%8YxESv!12>0Z>3m{wr3q@gt659qPmkuf7EBG5ZmwM_&tun5%6Y-R5ucY zv*E#M7br)S3L@q{ukkoJmejA{l86(Z?CKSZ%j1tLr39K3T^mZUZyE?06aiJTd0-S=DpJzr+DAl4LY(>)i^1jLr>VC;R?4JQ02L0I5D!=3 z9kFoRlyjQo3r&zI>FLpI>N)4feX-)LO;rw`LDm}KcMInG5DbMyCDkzZ&#iv#RM0oJ z$qHbb>=@d_$aQC940Z=}J_-a)xYu~0x-9vuQYNH3IR70f{-H@#h$iwY9Y62H?-Yp} z8a(d0jfeZfPDQ0AF8PflD**mb>1L~h^&KOr&U`d5wISBfjOQ_yjY%QDW7nB-ux>V`wZ z1+p-}l0dM>ygF3@+&rQX=4IuIZVrxUT3|Zy!Km!WZl0{=5`BNA#PO99yUs0dPtaE2%3dAgMP0aNZa$%HbSFEe@}!5au#~ zy-#o6s0@AbM8^C;+UswM{4Z*)0JbY@a%>E~Xt$h{f+99T{aNth8bj~~ZgY7W&2eWe7bl!^KsRO*o<00nbJz4Q=j$Y)zT9A>OQ7{T1boG&|cT)&oI;m9@p$a_(@ZbEyA*p%!+qFtvRY-|RpJbE3tQwul(U#-Q0ZVjL%cWdVrTV-xNb~K)GuRaYnzwf)GdJF(q^~_Wi4sP{J9xC(Z zO(e{9CD(x$oqKo1*2?*O{BX@+jLn);`Uun5@^MiNQY)a6B?10`May+s&njXx z&O$C1hM>-;@ZY{6c62^geNELZ>W_~)luSSrWd!1x=V*AjOb>63$wL#3H3bYxg{i6$ zot&MgxyyBXI5uIi$>GsBg3;L^n7d1r3_oUepjs#^gK)GrKN=3WMbULjaMkc+;-K82 z2^n+@5D&5ZWMNOrGo4{6kb+Mqp)J_qB=A@zw+!g52p~>kayQ?BzTEYVmdNG}jyQW!w& zp|?2OV5vy)5AH&ji1$uvJCqj-blJyO zH4`;1tvuBDm2f1rn)6A|*5mVy;bo15nAscoC433*6}yfHG>d`{Y3jL2+O424__WD1 z(|vJw)!iy-roY(($!cZDFwMyCv4EmeBnGh=Sau;}@tiW&!HobSM~>FMTKCQ?04=^Y zsGZrNDl0d+jOGj?uYN>WOvWKETMVxaKaF_XQiXbshVz?Ja+5W&YkDT7?@ABzv%i>Y z?~#U>TCVh#?`y=HbuNxpmM*xaFdOVEC?lTgvVd#R<5t}yl_Po)A~I@IMw*jx_7~ER zY+_j{d%j2B9c;47dy)~tcMR-$pX=qe!F!qX^)$V!Ws|k+zk(G0RIJ0wRyKUyaR19I z#eU@s#Kxydy4L$2L3Kblk-9adi_dh|LY}D8Z#K+Rh>fG%OdDdoQJEE0L_&q%a$$uvQ`su{>q+4UDT(*KC9LK|=gTM~$9#AUI`Z|N!DK)sw*MtrA^%l;fdB24=CXPwT$pb+ zqwbq_{=l$cqnH2xr~cUg_H*$+F>8v|HA(NT6A!gVJL!bH+ghSO5~8q9q9}T6y6|Dq zL=n>434jaunM;)wycw8`XfBY}V@&xvvD|#O%V-_8>Ydk3E+*7<$Mf=2q@h8GO5E&{_<h+<}K({4R@NXHyzvY!C}g z5gnU@U_croy=4WvXe|?!0I@g!w1>Z`d^6q6R<$!jYP>6T$%d%J$hLb>TXyFUGBw=X zms2mrnr;;aJ3g*15uS7EVVM5Lw8db>6Qt@pGS;w^hXHF5kZK#t$*%RBQXt{SqLVT25aB(Uk|tgcpKopqlIxciAxjAubT|0ycTtOfjj8G_u!M;X@m0uX&iU?c0J0k>q-J+H)Hiai^&bI224 z??Ho0ZYXC3sFRUmkebhj5>i>a8y?iea18>1@;I-+TDG$9ty)4n5K&>4@G2S`b=3Q7 z?5c<@nIr=R!=p<-J&6-Kz(1O+Tkq({m6gCh(Y^aK)k`VJiH;DA$uObdo8#RB*8w!) zqhqUne?Dv3oM&aR)hS*yw~m`0=n*Jyb37y@8)mj~U2! zHI`LreiJANJ$7q1@Aq?OUB!vIc)2#5HNJ0bKg^=C5=iSr$)I?5lMnvhx11)^8M>R` zARJF%FR^)xM*0Z(sWzSzve;9CIz+qxtH0%m99Dezh#Qo)pl&^FU@Ya{_f$2+$g&hO z!*apeO5LtMWXWb4&Onr{GuYLu)v=WUU8kC{l|$!j66!z4p4!}ArlA*-qT$-#77;wwkx%$UtSz11S+wMLcU z(1v&%WA1A2*87h1ys#B0=Dm9GAshOmemQViNNR8{X4Bi`{hc{oc7?J@h%*x6Y(sr= z<#RiK1B@DZ52vS=Y0+{ET*1zEsoQ0?Ffh2<9Zbz0g<-4S-Fe}Oa5tI zqZ>anI=>vC02Q{{sWh#6d4eTY;+ELlHs-PGRT@?UeP#zb*08f0my!aZ2E4MxICG+? z;4LF@=c08SkmQAaxv@csc}|ypXBTIQt4Y^hVueho(6v?Q%Nia%uX88~DhY;r;co?e zsr58BrP+KpmW``>-GWlsiQZ+U(3Q@|pG$ZHQb8qqhn}TIi`k6?io5+qW3CuppUDIh zUG*(_U;?-z2V^t{5D50_i)ScW^grj>btf^}=8s|{zhvnw*%JHM55GJ*14LVz!#C&E zE{Fq5ZufrjNX@ldDIKU|RZ|U^3<*yt&+Ny#hUWcXc#w;}hEiSXz|7u3lrX=jN*N?U zS0jW71OO}mK`%|F3uPIW!)p*5sY-ND^Npmcs^ZGj={WAK)}@G{If2wE7EVJ%+-WxIx}B9px$Ej)C9^z!wO`d$50&w0Uj2dhnBgo9c4 z^gDwB!q+fTsPj4WWoep2<$DGX#PD4q6oq4~-hB_XC}?;{unGUQ8XQaDPhJemdcN^m z1)nx3?X=YEx-ajXva7N`ImJvcy7ukw{NKa37HK!}mh_B2=PiGI!GG3fpN3+Jz#UTK zj9{sL%Foa|zNypVufTafC~qJ)I63_m<<(D^xpB$H_j0|l>Q71hKRem@U$FAWN}eGF zvm9{=NZHw#t)R@E4>ut{RSN%+aL&14K=m#^iM+ZTcZrkplg?FjCq557xmLY}Az)_} z`s7@EjQ9vCaD8@%WzVjCU{^Gwku-{XlKrl9h4$17x>IZOf=xg{y8IYXRMb?9#!0TS zeXe$o{TQUg&e~b(AJN2|lP`=c4Ox!VI~=rMJA?vZv@IE&I=CG2PUG@@+fE8eor|F``d1KoZh2gL}0o6W6W zxR=(n;B{B7Gb7(0KjSf0>r-G&bTISCT59G;;_~noM;9lp1T9B&Z#VuhUr~`7b#uyM zt6u*?=(Ew(gXQ@p;-z$fs-+_egS$8`9TqcXq0AyNgBkbR6yJg^Uc{sfR5Le?_N*4n z!a-C2Fv|Zm@$26=i@8goXa=XO4q4%RofK_T?qDpJiZqwuU1FXyp^_16+4+4vk0h|2 z(Pos}a4GkD2XFZ$YWyurdsfC>nd^E)K=Qa&Klab~{bSciZnrCi6+K#>xAeA1LtJW5XQ1TMLBm-SgvKP!9}YYq zEi<35)sIQFew@Rduk zN!Ex!cT*!7WGRfsfZWvm=Wgyi29%fc_G7O%Qyj(BCDhLY47ae%&C-R_(feks%126T z1l&}ays78d+zT^zjc?U=25TMB|L}oLdE`)Nmd`Z|$@;LLS9xC{Nh)YiDC}(SL|7t_ zF45qNuhpoz!6$FSQ8{^C(zRoanBb0?tm=B@2#n{nH=FZdg zT2uCBC^xl-f%@C;nXE>bB@~+Vtq}c^Jrz-NZ_()2=z;4L1yNy zW^)QZAs2V#rzoitMZPTa?qy{M^iDnSg|s`JdaI;W?mC6rxhDe6-N^EBnnCZCKmJGD z%ROgyS$OoN+OFt%N}OGH@H3Mt_vp-1K#_0EPli{xSNFa8q*(6d_`p)j)6(YdR+u-IoY@ zHpDSml9UdJFedBGc^MF@{`FcsqT4W>y6K`GTQ&@vLx*ZWapBD}O$d@Eq_Or!C{9s4 zGn@vGavH8t)RS@c_z?GyH;K$nMZ8TkYrMDNv}9dX!HzFOv0gTVV-ZG}pH12h*n~ct z^jt{FvXZNG+hBFfNEyP-cx|wq$!r z8-jL*1pM`_PFNVst*4q(%N}2=J?{qH049-yxCD=Q4n*Ia6P~1nwnrzHD%Mi!^~E&9 zya(r3t>2hweV_7W29%EEs~N%#C$pV97QvZ}vXv21}Z>?N@LuhF*lVLC)9aepO-h{uv_MeO)8un$y;#nh%L+q)t*8I-XGCE3{({ zWWJ=IAV2$9@Q`Y%2rFX)!d1PvSpeo4)!GhMnfqc)P@K&b6Ax4nwuYxcX62iW-R4LR zFvT21ue<2dOl=l}k`RAv&Q+mJB!Fa%aTi{OK;jVl_`=3>Z{TBFKfAP{)oH7fd}KEz z)q*b0O3tHZ+vb@p+W>dl9PCkLvJ^Evv?FalDNCV;Dfgre-$N^IT5_5c>fY@XPF{X1 zV!QMN1fA;1)E@5AGxfYd)Pc`^x2d#Z)`A{4cq;APW|OEjdn$%I#d?nTm=(SVl{V5Grb4-4q5+csR zn|c<>cQfU$XGsnN&Y29(#H8+v_z##dJp6WJ* zwAl7p8SHzvH;RTEU01*;tnU($ONlLQ1F3O&9*v3G`a62L&1I~07X*f{co=RrJ$i_SOGp|pgwT2IcnP60%z70D>#;i+uvl~kYH|I34p)|q+Fc!cKCgQ1cb zA&9Vlb`^*MtT`_pfz#NPsD_Yc!x_*}|D49SJZdymBhH5O{jl354soln4qVF~ z2f09qGO*e0nRgN4y~Hy~#)mNh@O^n^E$fXb;0h;8Jw~5kmi5 zNb?bT(^40{C92j|Ras%M@qn0{KFIF48&1-jYhQ+OYpAtK zR@01Ua|UhymWkKR*#@F!C`*8KKLlCTgtL;IlCv~>@N;StS31Y2eJXV;UQUX_Vy*b0 zQaa4n)Gb03c6)&?o*vR~pH`9FTYUVRRcNn`cZDL{| z`4wf$N>5IrvK6eeLVyNeWO=5HaTeBT1%Zmf$S6_hXy|OC$IRn6#~g0mPR%i3M}HZ2*=HU(8ztIsD*6hwHl(7A0^ZPyX={wF0WY=My3YUc72Qz$O!zf zgm5rW52Tn^8onMSy8G`<4{0tEq4rm}Op7~KYZ$g|q=+MDagZ$Efe)YCk)h8KYOMcN-JEnQi=>#grGCvt_ar{BW4xN0K~G2%?qBW%g?_V@(zyTzt3T zW`6)Eob`eyo=RE(U7Vi^wyj$zw}w9%n~i$SU4Q6hyG)oY(9i6_vr@0bhg0)xg_mh7 z0Gw)b@N*Kq@D@R>*2yUUx-MgTp{Km9ZcJ`<<5T~cnpXU0df39Vx0)Q38A|!Dr_l7P zWm(EvN_uT1Lg*S7S7e8Y74Dn};Zd1tOFAyUv&=@pccG;gb4fUD2+B&|9qULHtW~dP zDMw{m`M2h3w>^X{lLPD-iH*_Uj@d0ZHWVn#L3b_i!`Ac1bi!k1D5Drk^}6-KuVlJ4 zc7dhzNI1u*)}uRzyVk{P*r#6UPPms5OAwPrc}!KnmBCT*lGqUOrQI)(je7uJs8xm zgfn)EqdEsJYO?hByzmtPb52<>u3&)(YEm(Tr`vp$+nA_(=i4ct2b!|SAB{znqUy*K z^=;^5uX~7Cl`=0b$xCIS!jj{`jvHosFvhRRhp4j7hz71Srcq!W#pH>9!N>pOf%Ctd zhi`7yzwF>&PUM!m{VzNC*BJcE4!%=Q|5^+GS_}UR*TN#|*E&gL(jEuT`+oku4);8^ zK6%->?vYhcK7Qf&)+aSJnLEMW4##D*JUzYq_sAFr*!h3^`KF7f+a4M1;|hCZE_?aj zckn%Kc>%cQg7qGm+Ya{r$1P>$_W+J9E?WZau%hdnYk9Xy@< zosY}QUXa(&IR0Z4g3^Jvd&S}E4nniwJ$}D*me?)t+VQK}dY6Wm-SJ?<3f>*q9oS9H zKaE{)+^KE29$+|jm)fe-y4~o$bS|VjY$R!ZMN_)(4UD^i z_hwGdg0cv(*(eZ~l{;DZ<%37KBo8XZwY|e%7ZyOz4+MOrXQ=DTgtY=OlSl`E`F0d74*j z=4-ip?%?m5`Nz&)&67(?4?gPGzA+wV6oGI{thx2+_I3x&jrZ@yLlgL&1h4EcK9u*t zVT6-@-P)$4>PySPQO9@Ni+K7^b|`?67PVhh9SnDNTu-5#?0$7Tcqp=6XHLxLWzMVr zM%`D(w$*)Grb&YhGcz+YGc?T1%!$L)Ff%hVCk;2$Ff((54mRBI`1`)!@4Yvgr;$c8 z|BPhGmVIU4d(OUVueHwhJ}e}>M1L5^f4xbckB6vog`GG25&0P@$;fj20t1oVI2dHE zEh+o+2bErbTKgCF0B|jQg^!f?8y||7*C*f`-N2$*%rg~2et>7ukVfe32@jY}aY+)* zr-fL8(2sBG)6}UN(Z2hA_lj#)7$(!Tz zqL5dp*wFY9bgrRj}wnFjYdC<-Vz#_Lblpu!}i!-fxOH-N4A009!9(>~k7fdF3h(RZK@&VbiX=r`SkoKr97B(clLHK7O#(d~(w*{Og7j)8 z3g4tpT+Z_i(>@_olPo48)elF=5M>6lgc@w((ALTqNt<4yY?xkPI_sIyQ#6&kY5M9& zI7f)0T)XetN)kvmaLUz*O6CP-A=i?JO$>R%lg;s5AP=dX055~09tH=fc=z4hq&nLe!!- z_+v<N@ z`u8HKbmuD599-y9)#ZENaKDcmn5E^4PInwY=sG;9*wSp&a1+XbE7kvOf-Bw0dC6U8 zHA*;g2#={CYH{8z2b8Cj(2Ak_+6{@t)xbl#y=$^g%V5YPW~MkGZ8=+#YyqGXmCDt=$PMYW5bNZBbvT?3WB6!naP}Hw8S~N zo+F1t?5GKANW(;Gsh-#;-;$YnKWOZPfy?#I^B{L@<7}UDP~I$pyNL0hxXIcp)-oKDmR*( zO_cAA&kZ%Sh%ddJ<`oXz3XH92_+fO-HJHt@eF%e$k&2sc z4arJy&ukwfpW&cq^1(OFhMx3iG%eh1XToId8?&SUcFcF84DnqA#b{TLRhad5_zl%; z6)m{yY3u^UXc{r6_0fKIwBA8n2Drc#$Sc2cqcBoc&FEsu^>SPL>A+`NmLEScW?bD& zrzR+k#m*h2xwF+5XutAbd*cSAD~yUW+_|-0Kk@{eaoI)_r;aYR%dooa-PMkh zXgE_^3xd?3SqxFTe@V)PTlM2_O2%ThtC)KVxEt4d)pwNbl%`l~(sffbvU~=Ulp-Eg zrPE-fnn>^1G#OK(1SnNb!=7PqnSXP=#!&9pqqFcaos^!xwrM3zV2qK_kE#vd=w|Xe zSC&{ORM`Yj6B&tZ$sbFb9zBhI=+d)`i2KmFl-9b=={o17%Zd}jjnG-R(~RY;u0G#> zN-jtVJFZ5=<%C${L?}h-UP1%M{c*i=Z}zK9Y;hjUWW*B*%b`_@y)E}>tNSZ*_uU{I zay9ue-uO9ArMO6}Or81(cDtQj1j2jX6I6Icq6*!K?^GY2Z(rC{tY0tz8d)y3Cq-+G zg6GDvJ-5HDr3OZ$l}^V5AR9V;0~(JgW7!YZFaVXgm2`5dvsu5hW6mPdqq_PEf;F8* zvJl@c5vhQ{eaw3GeGzl;vS=%m+sFT&mnilIsF~_|AwM{;{ZRa+ajm1#B=ACx_aan7 zC8MP;2{Y-dDs!NpEq=?XiEFtX{icK6rmC_C&gvKipUhaE4Ofj%R#oxpYKCDVj?D7|gaQ=vPetUX$`r?2Y9_d9(C82?ccp^SG8rqkP3p z!ai3@cGAvxC$gK2?yLhAt|S&=rZqp61EJHiZc&gf7bkTBw^;L!GF;a7lZLwo6XB$W zS~0Ve#9E?H*7r@@v1Sib>P=q!=Lgm`zEkk4GR+Ac^G8ZS148O-!AjTPJG)Z(^(HId zcApbuF`WjwthTgtDLuPr%F zrSaN|iuI%n@!~wSjudCr&E+ctinfPS!Cm1!bidVkWdsb7#BBh#H>;+%adVts#o0M{ z{l4$D4lHopg}YC`JfJZT;AFz-plV`FrqVspzz82qE#z#GgCBk?J1TA7c>*I()CzBEjlJ_##uT{nPtuSx2W z0NQh@4219tca9qA`tzK zlYFboyXf;;w1HJxXD`HEJrZqzIS&PV$IY`}9_lM~JzGD#()aH!DpKy85BG@@WJ3$? zpS|bbb`s%mY*wJ#w|{5m>4iR-)WZ;X9Us1VFcV7S-$SsZl-prL9g2tL=lOgbsTrGfv&l7)J%0-u;-c<@B177-lXmAs~PO| z?R<-IfO|lR%#_a6uT;|Md zJZ3!1COn)LCd^!{94yA>+#IZ?OqL*>1pqya&Frlmm`v^5TtPzW-_MMej$~h1SlC(q zXXVAg^8au|K-bO4$-&M>%*x5i$j-{j&Oyw{%)`jV#?8w8|IL7~{^x-F@1!U@D;L-Q zBt`Xg>knEH{Dpc=^1jrNo-!6tGas{hDRc?HY;%5!DwA0FKuSqD-0-wg&dZ#lv~Ego zPGg8dXCk7}4oe>X5c>3DuuJ4At{T{7zmNk^l4FVYcH1F#MxO z$!GHNv(Y)Qytmi)UR15@1)@^^tyY$+PwktA@4@l`Wa`0u`>RH-MLg{&X0TfOm;g(>oaB5KGC zgY0rSg=WF?1|tfx%Qoi9r;Wi$jg%>>pMIx+kEDLITKAt8=S~VH8Jt!Af)h0-o+O+?(+iNX4&bH0dZ$krN!+b z#3GITT3ARxjt$&6S|ecFOOGcHvLPyDqr`t)4o%=hYc@(X-qJpZzQnBIP*zvv^ zn~vN#@0}8YLZMXj$T7yn@UgVytOy51YCmv4Y=7>MYCi^8aLP_?f8k7I3{YCuKK~wE zaXtPyCVm8A1Tr;Pu1+f3Ddl8GwS7R&j4R=yFbc&TCAuepljqKI4$^PaB}s!P)oM$G z!wWuq8$VymlyaB6JnK61z>Ze|Lt?XfkZ=Iqs-0|BTg@cORbc%9g$B1czPe$eV772Q zFJYe{YNRNlw*szsU$bY^(^H46V===AJXgv|st(xR0sgD*GGia@*#YJ68^w@i6W}ulh9kmMvi}g(k;FRZ?)72B@-|v<1>+_>yBtCY3vG!3t^=e0)K5}b zwC;r}Ye;Eg*Vc;wa~(XecYhsnLq)wZg!a?>2DddDmBM^$n-Vo+g>%RF8}}z8Z&B$G z;mTS;#3}|++>;Oy6f_t$=JtUTGF;JqM*=m1!=|S=VtF9uXVy}NCASN*EQ_rHL?|4E z0VYzRcYamBx7VAgA#^bGlU~o~*S9Y@Ho(zT@kw3$+GrkQs7%HDN|oD4IKB))WYVYC zwuLL1+IOet*LbYYK?TA0htD-Df^UI{kxyKiU_Ztx zG{z2E!D385_ucH1uFOuw_rN@nkQbxD+@bNa@iyo|ybP8&__PRD`Lrh#?bO<1zJn2X zYq3ZMs7vxw@*qn5l3dHFo{drP=Ann6E41f>T=e(-yic;z#Zig|b{MljIq7W&DE1Mb z{nRzy_lQWeB>5F_b+72w8I@Vm8o<4x#`4m5g5{b=6?j3*J{*b2XDP9m!k6LcRM80O zReEfnEusYk+$#p<0zAq_SxT@ZG9eOds*S!Tek&zvSDx#`7;E^Q1V@pRtb7xRVehX7 zD8NgYY$A2J`kr&O-+z-^v2{EQPiz*ouSy1V9tp1dWES-jE!_p4K zb#%vSg!+F`p4jwqaEogn)kfPwltZ``Mm|uwYod>TkS4C=A~NZk~@Cx z%Qu|mnHT1&DHB#WwoziEM1<`$98pIlOL1zKqO)8lML-e$!8QK$Vaadeg~Ph$*%(*F z#)a05gb`7>vX33ykb^%B+mv6Nq#XS3qDQl~kK%o?+$t zxldb3xjk2J#vz=iEdbPt_7|Gec;$fJno6ITA`4f{ICAA_s}>Gh@=8}7RTIUD#s&Ll z+Na>P>fE@qIP6SzyQ&`RE{ok$h`8~{=u{mA{=yEpXVI2Yo|(KX2GNH__t zJj2$n>lGM|ny%;6MN>A2y2a+l@5Mv7Cd5wDmy-wA-rwiK9uUs!DttSn71^BG` zU{8}%H(DLI7v_c{3sPV#JaEwJ3uJ&;ZOVC1YxXct&YtEA8&_aHuF=t@a&HYbO9lM= z{loNA&8?%?PdgS&%Hqe#YGAV2_zyHeEuG&~hXIUA}_X*O(OdX5%64FsoFDoqgG{L^uA{*Fuo`)y)#8!@A9$Y9IjcnWv; z5;e65!tN;K*!F6ca%e`s+$}g`1PrE^)3$#Z*xb^^eGej_f!h3aFD&n-8miK~@9^h7 z)u)KKzO7ia!1x$HS6Ef6zKaer1N zl`1`6L1!Q+{*;J8LOsSZn;`opW7@*BrUvtcq#mJ&CTU7xVEgxmuB6H8J0!~p(mfa` zjJKNT1ddhNocmaSr9}*l1@6&drRi+`HcsB~gWnSoQ|)cO^|7CLT*CTv1rnq}47fpl z)nuC&hB1NIAI5^8mfq*JV9aaadEvY!L#P{qwl2JvB-Y>JQRXwb%GJ@Re5%tj&k!d6 z^(ltUeqap+mP}vMu&Dj|bGcIv&TsFb>0P}PG&IYwiJ}7wqfKbMWh1S^=gEcsVF-e^ zlNsxQZwi+v`h{{sRG$yWm{d3pb9uAroKZsSXE_!=d9~vwd&nC}9UF2JanU}KWpxEr z^XWeDxFLBN??!K(db`+bM&7OyF+8?4BUEDp_9IVG^IYE(co2F={bGa+9pPZ9n6tNt z_p|eYAqugc<$2hQk&&PTF67QTJHlK>ofp;Nqk4+zzl@Li!kzR)w9P(ihzLJrFu5(%ed?Op@%$8KWuX`0VJR@G|j<%Wxh-#>AJ-I-+ ze$5!v8oDZz4kli+&XtUDVYX~Xr zFE#pU#4JXLA44%>1#n>tUpbem-u&d8d{;y%qApvX*lZh8|}h5$cTaoWW<1j9!%`-!h&z^jf$fP z13wdMrPW=8`3x=^2$BALQFXtDGx0A~q9DmJVLFXRaLBt@stb zAAN5piE)1Lq@z?K=VxR;KH^tvvbVMr26Hl{PL~l<&EXQ^d;{>KpmJ@IFdL5uZYMMq$z| zPv-O)rhd{o^=wKjLC#al+3JMtt{5hI>ddCcnBAL4mLH-rKWBtP`l+l^nk65j4xwgG zAM|APyYI-3Jm0_k&^el%w{4iVdX3Wb;E*giukEItTU_dtI^aEHO612Hpx0<+9JsUPU5dZFbw-CH94{E~!F{q%5;VF&K5l+yoh7ja>(9%)tfj5! zxu;{*k7Jm&7FV2eek&~!W7HxIbsm{iBCSwCxeyo=Ir%h{Ds5VWJQJ+}bHs!r^JQ}4 zLYzFpTvn44xQU!nM_gzwzV!uaS^^klUE!+nxROdzc9tXZ>{TUJwQQj6TV@9gy=tO% zs$`e+c{$OY(h6WXv(A%J148q@@AsGbk9x!WG%Q+l1Q3$m-BwQzhp7Ou?DCNZ^Y2$C zz-+E}!Yrwi62DP6&}8(*;JuWAJtwq@XM&#oE;nC>o@ z*LC{kH*QKtI1iGBWSvj+YZZ31XOpz{pT*o1n}@gLcWTw4p7p*>F3ArYC>v`x5h8Rk zIwHX+V|K0(E&AjEbW+DgWURp1P%kx!9(-^g@%g>TSLCq;{jHEM6iH4n1MHbCKn?n8 zAxC@!c~4aPlx;m`xVCOM`1U=}(1P5a&rO1}A*2#_bBJwEwvwVVwoGxuPfdXl*+hoE zWmug8e}KjEXrPeeZw;t??r4ByLiq|9C) z1D;DzDL>(S3?wqtL9ai>B=V#dLdNiO*JfzcUgk5^JAQnwfMoO{0wV&mgn*?=q56vP ze)D|Kwuo1PuZi>(OG^6c2C{*#Fc0_@|&iOVPvzR4DLYN(yA`jV(b-@|xCW zpcQ!*7H(!ZCTVkPODg~|3kT@6n4_Jei>i~cDX1Dj+}z#T)LcbM6jXg+4RBR5cM)^6 zcXD(vcK{G`gUS;AcT4@uob25H)nGaIgVsV{dG^2QDhnprc&I z_K7x$)-P^s!|tLSu83sl2ZxbPBqf#*7oLlNiMWevXt?X_8~@l6FvGXBSivB{zIqhA z8gPBs|Nb=1Gw|p#JxO~TozgNOY}jkB+vcasP(U2DqB0U8PMRo9jv6_*ZZsU!*Zyc{ zy3^pTT!cQp?DY{T{;9hB=ucDKG*jST$NY8ZVj{Bnl9H=$QVrQMAycNVfvdC6oC;GV z@EdQh(?4G&PO2=`Q&M2s<>uy(F}RZ2i!T7xQ&D5ud49gu%)-J#@3%i3Tdi52$LGPw z&7Glrxz%N9Y#hiB7b;p8R$5w$DWCpGyy-fMy`i|l^ZgXl?;M`qrC0biV5#k`M!@P( z`(-zFM`+VE!|%r*Nr8>{jh~(#K;ALJus5fZ?NfP^uE`0mq4%mj>`v2w4*v3WLHgve zi!1GO;5*|1i~&Z9&7YM|jEs+L9VAXNyA+ z#$AZK-_PM@2Eu)PL_8NuA8}o`d%Di~y2`w<$zm-0tv<6!5($~^6h-?rE0E|us$oph z!Rxi7VumkoB{A_ut@8pr#@4aAqkjD#Seue)^@IQU7d57(PP;$-+Y|d!s&k& zH#?bc$$#p_BgUUks9CIk1l==ngTt8sVyP zHCXR??xy}P5b2I9?ZL@;Bt>sn3~>pojk)hrG`BWYLSGlUD{A&jv;ly-O4V}rt8Iql zvE_P0P(=Lv`SZ8)PLCh5hiQ=LnC0h*v^$uo=5K0AIV6su zgS{93XV)k1z_}^|)ge>u7Qa6yyK@_56OfW$ul@+px5rQGKQE`0L7>qyuGEw*3_;+m zyxoZYbpC>|iR~DZd3yUqKD}|>o1I=(Muu{CVR40mmOvdP-{3G?L`+)7PJz&--LXTk zM3J%4eu`8#I-Lw^p}=_?V*{@p;t^Df!|(q`hWyL2DSn#!8@??|>yKz2457`_pL zV<&i|o9Qn$Rm?|2KZf>K+#mSybK_Q%7;*3T;A3tDy%Q$GM@a25U?gA|oPa}r+mR&* zfm_1KVLc;T8hx#NRC7y;v@kJPrB2%F9Q&h3UqZ#%#lzzw3-PPRqr2V4ZAV@TXa>@L zajQ=9MX$Vem_USq0ol2(bS~oEZRWnTjQ90VY zhV-$66{rj&eK936XWL5^>_XB+N&8E`a59qHABAc=UmaqyFZqnieDaMe;}rc!Z+pBf z8yk*iQILUN{YX#4IX*~r-!J>$(T1pJ0a4%K0Z5&MT;Eq!)wvj@^S)_E=kVP-pBf=I znR7LJV#m5%Za&&5lF{SWiOR@hb^7I3i3*54AkmFc-iQ(H71cd3i{@Tiq-F8>@j9G5rfmwFqy3Xud6^3ssKr*titYxHf9&}jdV#|)nS1a^ZReTInnZQadt ztn}rZ?VF_8PB+q*&OLz}xy1kwi2ULCHOqoc7XKQyC&_6x{X@BwMa+Y+Ppe`ak$KKJ z73D7o{IlMm!LIL*@@~WDb~0t295bF*4aA19V5-Y2ld}WXOwvUAEzn-tXWvD>QwZWz z*|}b!>&wKx1uQo@t{uFg=5~{vOK=c~qI~vwUpVeMMVdz)4^r;U5j6@fn0TDfjvB$4 z^gPaXgutr#jmryVthgJLD(hC`8Sv&Fq8K5mRa4=ULv!So90{}fc{nHnJH)q!Ei%nr z&;De1ipCWMs{@FaPF}XoZWn3$WDDp#{2WSMT0}oqjjWn%fSk@CBimB7a@Sv`zw#Up z0M4hk6h-CpVHo;Nc)6zu?$C5#isS5v}V7u9-jO$75J$YPWq1-idL;Q5h25 zV-(49;%5c*tuFvNCwSkJosj{su(0$9@|qeNX|-XZg}b($Z!eEjl$3S4Z3-$2#+;=7 znDHoS4Iq?^akiXx?F9v+ag?Ya78SW}_s(a33wQkopc9+1*9~FF2G`QSTi_Z?+0lm$ zF*Tm+{N!Wx8hA12pW!g)PndyaU)EaFQ)#2(+MXIXHj+C99IbbMrhZ**vesan5rNVe z^chT#Og~S>UB+Fu-pu?Wy{X&6aPwRXLxbRDL7L4hM z6-Y#tksnD*pP6a9sLb>pO^+Rw;2;6!8~U99US`1Tbl-KzpVNj4QruwvpD7m&RLq0Y1g7 z>5AU^$BqdgD|^z=*osNN1KVw^c-ywckV}dE8|rrSRH>_zMBgME5Xi*!`p-O=`Osrs z%LUI*Qb_eN0*N)uKB|${I>U#eI!7{1C^vX@{bu3E5#RPiWk27Z2kav|JWTPWmz6ZH z(|=rf$qOQG<>FtM5c@~deUfVESD<@c*bg#h}eHIPsc zv9^l@YFJMc9qXfuhc3EZz-XBiGuhTgvr^E_Vnq!pPQKkLIDQsD9ij63tG`ys4*G(c z6c>ZYE?p_`udI?h#>&dNS|}?&vtVv+zIPmnM#V0mSUwLB8*a#qq$W`7xf3LKtjIOZ z&VzZUzydGUTA~lAs~+3hkdCc?k~@F**l76G z{vi<|P!RfxS9}$cd45E^3_$WAx*=ofD|1s*XBu@9>tZ1r4Kmc4B8Q&E4u@J1k* zU=X&Vs{vdP&aw-mW+|`>n;;#83pPDf+`AqlJraYFyt|(Sy$1=)?mrC0w2SmV z{dK!uvEfT-QM5DcV6$O>bAA}7F#1#e9W-txQe-R~*Xk9aNYE0Sc$@4hqGUvz-4w5wTW|mJ(VvFuC^to{tv+r zYBD0zS`7w9U}G}!3;B>>Vi@&bVA%`*B*|oNfq>$y3&3=QUF0^`Mfj#A$VbsicR8 zj^J{`@Jmbikz19oTk31X$AD-laNCA@bx}!{MjQa~36Gpal#+6cqWJHu5nYCsVs6Rw zgqd2VtL0XrS_;IhHwI$)A%!*P8Lz7=TJcUAa2`6vMzF07=a&&Tq_OLBY$rCEj>Mgx zpEKyT`aW)XWM*b|d0Z{O^XuE$oj=_kb99pp$;>c6x1YMDdm;U_Ra_7;{3FkXVbzr$h%&IprJhQ)-8Vf;(8T$O4}pk1cGsL;&$eXj+U@U=az7kUjJ*}R6A0pKO|i_ zo@{b30`i1LKYHD5n`)E$sJ%=Nt`>&!c;eN^-cbg%?&DH^SIi%bxX1FTfll{VlWl$A zp>{_mmqX=tVv&CX`_MF`$?r_(l$&p5pHOXiufb$I!CdQuML;MadryP-5Wsx>DsoKn zre)v$(l`=G!LDymxbz~wdML#En=N}Rq3sEYEcH|(Rz2T&%Wz`xU+}-bQve(cxWgGL zl$&(4*n0`UG+R(!pa6CZzsphV5F*_wAEh+b6Ki&pndcauXKP4ksytmu%h;Ww8_v{` zcJN+F&#|OP(1<(3JR~qGNT%M8sh`bUpdsGk$B%o_l(3K2aC3F;~&G$NauFactAc&c$@37*d?R{X{NAucESG z4GV57wxE911%J8OvA?_9H^02REFvPZ&apL?OqHCJ#O{|aqM#Hp(l^6Gt+kI7z+Hph zot@%?uJ(kQh1k3`)weoj1k=WaAc-#+SFzna^ScU`$CKS6OYZSg+=X{3ce~5l=a~fo z_l~ft&ljS>(6x!4hNP^fxt##ugheGO!~Ri9N5xKKxBujr<#rO4gnzw53YgNO+_1||#(_uX<5wFW^v zX^yM_mhaVl-0c?IN74$HFS1TWW?_3Kz`Pn#+OJsKIs3$aYhcPCMLwQd1#2GkL5f+EabnncfEkE?t;wwUpnHCV*ZTDRn;_iCu#N}U%nQI9|_E%8?!2> z`H)wUc7G>r5qB0kzE;XF-!VtdGfY&`v&wy)^uQs<6q)^NE&xd8DCYf{Rlmzr^U3L; zEfysffP|J&k+aJ8Zm0~M+}|&U{hoqO322JT3#c@89)&|EsVjjhxtoljYTf6-s~ib@ za~XrsjI6sL#w>+OebaaWG5d^*kwToB}sH`RWUkuRr!snM3V^h)e;)su#Y zcHo$q3e#0BI%L}+fW-;z_ba>-A#+Lb+A+Ht^JT0S5dOzzI12%n*W7=_dp}w5h(ohY zTj3gFSB9`R(Y7Ppn4dQ0QMg__aC49 z+ot$?@c-q1fS8w~k+l~4e|hhtu!bBN<}-MpF!BHGlYwzbNy(3wOCSACz{PTHSL1a8 zG!o&WTYOb~U!Sm|qGI6O^tAuOIS+Vo=30kAw>J(Bj)b_lRlO_d=6ZjB-yD&M#`N*_ zUJ|#oC1kUlUR>eIU^R!cbar#&U}3RdJTcu0Uz(h>8;VNJ^5=8d{A0UP2h>ZgQ>cSV zqt~AQZrxm`-(`I2u+iE*d{$2(t)NU`Y-);zfkDBXmXdOJB}5F$q|HCZRa5@H)}OXv z-B?64<0;^#r21u2TFY$lN8$)J+MSd8GE!49(8zKa{9Yb> z{`}r^r3*pF<+S78LB+v=O}t408AIH>yw@BAV)=r8XW|RMlY`SwSq?irZ>cuQ#pUHA zD_ixfDtB?>Y9&RdP)66LIe7`$J6GCa&Dy#=ZG9 zw_O6ec1VaZ#eAjY6gGMS%|nvUN35kWW zsRBN70pf=|@|&BRo+k^LtL?D%w6_;^QA%cJWc5NSNlA!IXl%e704~(zV%-e_3cK*< zR#!FD==?mYsj(gZ;NqfMSDEg2tLxqV;S<<49CTzSThO$(N1>2OHZkxtCL04hJfsw7 zPfBExd5{VQKx`wMYX+U8Ku1eORh0#HPT(u2rKP33z#e<9Ufxot8y+Vq8kPSLxuv%^ zuLbx7O`gT?z8oGyQ|{fqscf!hyIG-5!nejMM?4vBpc0^ zSEGq!d&$vQOxrzvc;?w0wsC7gP34ODN4P(QBSM=WXk`DvhyqAx2C`xMyX&x=&3bsT-9d)-+&JKJIvTL^`SMO6Zc=K79=~<6V%sD2=(SStDRXX{%Y3kn%fdGR zuNIqe*>e^6Ll>OZ*Rg~kAm1VXsd(CT(>u#R?2+44BhjT&U$6xK<%F=3nLlf29#e!O zIKYqg7vm-!iWRSjHewUFc{34fcM*o~er-t8hg|{&ZJyPs>r_x~w>V(EO8uF&Vq5zVTP!sV@Vqx%pHwUoi@>%TW&k>MgeTqVF-;D>L z$m`q(*gW;g<-*d9E!m$XU#uG}ISz3yL!xQj+ zorg%;P9BWHpetb9Qz^-`8lOyI8xh?$S!`;mDl1~lLUh>t;U!n9;~0m>H9aySMVV|| z!i+XPF(DPlK350Cq}PfNx3aRz=Cm)=b8&SA5!U`NVq#*jrnreo57y_kA6~b=e>;`5 zIcyDc1R{z(_iIXHVN~jUIy^i&3KLgQh|4o5e|@#&yyOvHC=_Ma@AVIs&#r>f2eJL~ z%=C1uwzqLF1O6!|w~FKXEUM1-b~V{ra2aV%WtuEdn5k8T!_(%)a=XfvE(#LZyq`A@ z)8i4Xms9_feu6NC7w+GNzg$Z=TT8q#;lnL$C=9G@8a+>NFxS3))nsfw5X}8^URr92 z3dkhR-aS8TSQ2DxBP2`At+!%Xqo`MExqy^^3>dH*K)Xp`r1hFE!}>Tkk1YCiQJC%w zD}Cz$vENL$t(EQ1Ru|*C&XTm2@^Zb(I0Glw_Z@l7feo3qo`4G z9|()3k4@kc6eNt4OJ|J5fsE(WJK@rlNAkJGVnS5`eB#fTjC$;-;9uX`@mDPM*$KWwsg+0Kg0c!58(T1< zsQP)ERQ!99{z88~{Z^^m%IK)Hqod=ANHT5A&n4%~Cxsr5t2_`w%6&W?3;fAEV~%o` zAnrL`dihq3z+uJPJ$A7m!i|T6!-7M-3`kmlcO~rA@Al@fKrhD`T7Yl%JD$!ZlZ+A8 zO!yd#G~}oSXCcq7K$ll`Mw^TyxxK%~Sfp77^!N8`nfzML=aYc_hkUSuGTBsPm3*u_ zo1Uqv`UuJnfpvPKYN^sY-%o`+%@!(k)vP+T!q>LOse;25bvt)A|L#sOV@EyXazVEr+en(1s4}=A+8WDtVPg zLUE&=LNO`#&FT3YRME3>tVKXXDc#al1TzOmZOuGDeH=XNay*reUf*6myaG>j5+vkT zJz<1&t2JsfDGyCw9td^+9=vT8-crj0Uf<(b3T*(6psLs9;kttaw!y zp286cEUc~Q6q-OM7R^Brf@ipZK)^dWahB;}B@FHN{)9!3pk1iH(5PP}dp{Tv6(y=} zN2UUwSjJ{#ZN&>pn)D3Fg|h%6UnL?T4!3U*<-l(QeC{B-5;0OHnUYODJtwC|VnMy? z5kX&HAB#Z;{+K!1Gb=MQS1eQu{q!h}kk4Ijfi@weJyLcK9fPa?JSko{ibG#q+zfOL zgYrTi|Kb$HR^aSg&pTDWkgcju(3cR=+4U_P)I%PoVK!=XW&w^k8@uJiNbFnfvHrXd zdIdmQ+PpvE<82>=%TC|LWG${;qs0beU&z8gl%?dM>6>SEoNJYg&V8jX(W(31yg#d|RVxRBv8k*BV>9tRWgg&h zwN1e7gxvf!Uoe;kH>9fqdCpE_naz^v8YPRAkumP9kb70oX?M3qyLonIB%X-NaXX05 z2h_?B(w#K{NuOQ6^f^*!ly|X@D-MS(%^vJJ@ZGg5BCeY3b$b{pn+7L;^+kuWnm-Jf!7N{vy{N~o$eMam*%IqdzEIG)Kr*4_P3 z6ns+i<-hOv7fl8iK_lnb`pv-=w{J4EYltJ~PIlXH-wE6pSK5N=+ z36L`vuG?GJGb^qh7$rxy`hcIJf6Q~~L8vZJNzSCg*qMVh;6usCD3_`*q3O;`-S)+= zvl-+i1~v6p9-bf4g0l0=+fDSNT4IT#9*&QctdYqYAD%hvTEK#w%IYnEfbSFtbUWP% z_}13uQL5wMdp&dlisX93-cRoRd@3N4Inqkl)Z82!8(y)aUes&7K#AEN#X)dBk;Z@r zqKqIW1j?&fG`QHxUXPaoM&Tz;E-oe#sAy<&`QN^Q)rJ2u!UH$n=3mGY@Oe00n%LY7 znxILyEEL*JqL8~z?A8YNvz;pnjU1x}#7ux7Oi2tTzN4*}IY+=J?ZZ>QR7XUZzQIW! ztzZ|I^A%Z8{PQ#-zO1+KyibzRl{Byv>M<^bL@ewl>@3P}60DD6R2VH!i|rp?dd`N1 zhI&k4F|dp<_@0DbM)D*VOio@tm)DK9$?a@8x4Tgabrukd$JKu@xCqi?ILQ_9gaSX! ze~xXmIa+2ZxMV#&J>lU#eA@Gsn~qtdeme{_~QX)c>h?mo8wZ&-!M3Lm#bf6Vwt@lS&J%w2K%&R{*3Iskc zzATxJ;&0|S+Dmn+SV$okyeG3wbo@v}Hy4n5pg$nmPI|puHeH=(of^k5XFV07Ok(@J z#`r}uK1}h3+HI+-GuJaa?a6B4>v+iZoKxGJ&2CZH8s5=H?I$yG*%a@CsR!o;OHpm` zMqgmtf}EoI2j(aNKZOObe->(cCl~4JmhZF4NUKK2EQzk$lHT9~BvIzZ_#GRtXMSD8 zC!k?+1(PHN5=A2s=(x2wZm-#nYB8=F{cBA>Vsy6DR_Hc>OPITaLU+kvj*eC( zs{b{)aWcbXR~a)+Gfzp)Di!@Pe?pz-c#9^UE`-I_m73~|UHx*iGJYz&b{1OC?&QHw zr;`L{e3{q{-c&Y8#nG1s?Q)2E6PvFVu{=$S=~duD^34TqkIVLPd9##B2cM+Tl+*O*1> z&mZ`mlmU%5A{ONb`P!!#{r+NX27$#A;LpdmyW^XC@M87uvZ#?Jo0s2n{l#ta>0e?p zwyy)UoQQI}?rLe|p((ZW>a^v{qoG;rUJgS;_AzPOS+=p+mX2>_a@zI-xR~VSrASXU zrnbo_iIvf_jkXFTM8v;Z%i=mTyY%u^$uYM%FA?b_=2G=~z1&FI|A|7iw(1W0jrGoo zxR?g?ksJ9`1;t^_J31o;9WW>Ok1zPngho1TyQ=jr6Mn3Sgo9vhDx^!4vwe2AFVtmo}#rJ+@sz_b|!fivJj|7?&T%{m=Od^}YKiyxra3S>8{% z=PsU=uuSoo-&G~r(}*48actY-&`yy!S(bo?cNeK=(OffXg^shC)Myu}*V+n=_JsTl z_5@r`D*GwMSDgfd7g-*a^xZFrZG1#qPv|1M5Td*HE6HJj@4hH)D8^Z7ji9@&KS`nIy6R4p) z|A6bkW0lrx`9)R-U#c!G4hkx9{I3HltNcc%bxDF3GguJG5BY&lu@E;V1D*o`A%5pi z!0pw^sLzA{iUnkq(Ek;8q1XHHU)%-D^qK^rp!?qD6^yNK9n+E^k}HbvpmH_-CmPp- zSVE7tJbi7B+<5oXKkMS+g}!_1!SHo5z6@)*Nzj5rLRlbSJ+W6aJ%ZhZJ?M>!0MX9^ zM*`%E2eam6An`0x(E;dC>4A9V^)lAkcXg4d0m2i3P1;Ds zuqe4B1Dh(y2UkVDO=6tF{ZrS*SV$ITDv*mnK7&`s+0C54zBu{EK8J?eFAggZN)Igh z&q4O&O-zwE3=V`W-q;Bh8mfV4@5;kd z-}#8G1hH+10#+QbZ;+s!I5|-JtmR(+E_<#|Xa5F86jR&#_x03tckYIKyqN|E)TlaC zLFGFFvjjt>1ur`BcB_bJD!!cvry7y<|1Wrp;K#lE{~2%5p0K^(M)eTDCV01mVX`GR z&g3;JP_3vE`6p4s5>O-m+LwTuQtLKbAJtCXqR=nq5M#k-ber?R^)TP>L`X_Pcr<&Tzm$C@4ujn{rUSt*lj3Z zkZy5?~C-wBHFUdD!$ydb)1jV{0sL`uBkXVe*%qWGiw0aCV(mfk=J>fW7F_ ztAGv`wm6+7lbVzumK{bM$z?stpxf<94$7eki>H4|#%tLs%QJYX={b>+|G+p?Tuj*z z1K|f|JAEiUuWOXwrYjI{;YkQo`)e!pS~e|F*G-KMiNdXg$1t0gRnvEZ)g>A;{kFC( zt-~E=Hte}<-E`;oO&)F)ImMN|@^m52k0G2!i2M_y=+V9xmD&zH3*(LI!o7stwcg)B z+nQ_8nc4Fz=kL1JyuMJ}?cD{XQ_mT>+AoSm^Nk-7=PbQbHL8qE7|M}pJ$uMZd{Wj5 z9=|aR$f+mZJk7hFa>+IaXinjp%LCrbft)8lS>xo=*mlO&oYu$7WXKlCW=Sugl@~bq zS!rsOlkom^TnOFsM?JykD20SRB(QGT zGAjn*F-F8LLVBo3#7+l#cjtLe1Z2SU(^-&uNb{tFT1Na~(ZlsdoC`h9CO3GeU0>?fr&sEa#m-joIwGgCUoT8>?4L3XlVe$cz|nP2 zp8q}^{eEms2fo$CxZ#%)&5__J$$_86EM3<%9JCU@Oa8U8aeX1b!D4gGqH-JM0V`<* z7gvF^UZSy~CGKgiVS)0=&dZ%$*le(Ld{P^v)<}FAme-b zzD>owiwswQwokvPD#?AX9<}uVfnQES%0(%$_-RuOqT+ZvIu`&z#q~T-6P>;iAa!i{ zadHO3ZoP8hoPP7~d-!N3ElGnW#sH9ooBA#g8LwK^N^{S;#kP&s0tjyN^Yfr_da>Vn zzzt?f4^}?i__WN;%^huy+xg1IBxRW#yG-wJ98rYLW)+2Y{7koG`4V?7_=GOwD(d^s z^wOaum*w$OsAon-iq8rz87zE1;&Fgw16@%L>LT|#6oB0v);cwz3HX+4r5Lnk4v!m3 zh5T$v_{!I(9Ma;`Eif=g*Dp8s*Sw!hKEb zGHz{x{)a&A&l@p}8u&+e;<2IGU16fr+sF$hx1l+mcV#OH++$BpmCN#2ORl{N==eQi zDy?)93^Tn7wup!F2=bmH`N;uJQc{nz73lGR*iq`W-~B>wTKXSD96C>RQY14edzWXX?;T5bMx5kn$6V@kmKIS5?e723-)3u_J zo7u!iuP=JEobXO9RprT@i1l6i{}$*WJL%plM2Gbk^K!=qu(qdO`xvmkAn8T1SP~2b zV|8e5git5?8f$lzQRLDUX60VxV}A}~^$CZ?mQQ0M^8f0Zs6=s}-ZSF!+>)EXGZuev zc`2TvWiXNJooS5lKWonXkKZ#UO^%Sp|F4T5Pretv#6H8lh?d3pJ}TSTC73J#Y&`&(sd zvz+rgOvqDW=JbT@oib5pI}NfkkycH$&icRA>Rj54Qw(Z3E0a{h^6Ym`*!;wK5t}UZ zLuV>5$Z}KCRb!`Z@)u)O!3GNo#rWj`!~7SGd42Q;#mQbmOirNb7`vS2*M^?Wl=P!bS%V-%BIz2ljT*N*U8aNFzdrExV6HmU$*1pcQ8jE9?L=n&5dr9d;F!895)k^w_KZ$N z8-#t;zh0Nn^HeU)s%m=XVpT`0{qn1GI$Qp8dc{pW*xre@d~L=Wqk!qK$}-1H1>99J z%bZZef!h%qr<)i#hzU8h^6h>8T<`7!hc>Cp+4DxWJe$;>u{YhjzXlrzh6cJhm`Nqd$z);)^b#W72FG}2#AOv zkTKcT%gYBuM9EbIj~=C`rULB0-l+9-Yw#D0Rd;^Ac5`cI=jaU1VcZdyK^b_C*Akfl zj>+xq{f}uk+8im3)?aHrevAq*Z1){?>dYiKMp37K`czlLX>?@lli`IXzLusYufqan zi{vDt>47}WlP3r1PRYmn4pDaVoyPR$2#APkgubTcyzhKoi$78>e!(w@ewBBF87$bd3w%F;j2IgVS1ko8ck@qUDkAzT#Jf} z%^0dO0OEl&l6J%wfrt%20G?rnEKBFBk81hvuvBFDF)a!sw5TGQ&<2?SDM(rabdGp3 z!H1&F-iK>@i;ZdxK;}$kXY#u|wyB`T4AXsTm;HCo8CBnTg4=XSrcx<)!Sg0aj3%swiAlFHQ(31p(7`rkK+z78({~~wDOa^ zr^EGM=UC6DBa*1A3M@WQ-OMA&g1SaQ=5X#!VEYe0_iQh$$L~n?0yJd&{mlB$8o$PT zH+>VCIo3*_(|snhaCinkO%vOGHt?w!&gcG?$C8jaWdSBRRQ+~80lg<*kIey3lly$O zm}bAb{b4X*dSO(oFZi#hp{)8{0xJqkSK|-#a@V4ZL}%IN<`t!iKk3&4T#Ou}lb(;f z+uqL)g)V1GmlvaCGDlnzsZ7qw+`Q_VN!a|$1fy!!=am9^jiR@@?=Y{Ap{k_(_RrV0 zr^`N8)zuMj35IBaxYW~w7=3evW4~IlkoeU38x_J2AnIeIC1+%exGFe?$Yr%l-Cd-6 z;XXb)DB}+ax`53=_*Pv#t^2Re!mAHUwaOBdHJoT^^Kp-854Y5$JU#j8K2PNK0LTa~ zB3}-K;_ZhV(vaw)5hX!4wheX}cGfammhC+FxBk)eM$OWbA5AXn{{H^v+$1vSJh^Rc z{u^@mdl1L2kZ`%Mh=>LoDA5qTfIy?)8HGJ60s;cnPSc1#V}3S3gK%(gWPNvL91QWd zI14EK!`;;qh6rE0baZr-+t}FHf_n^&2U)IXA@Y*5NI}8-I9FFWd`yQ|VC{|IYAtH2P+#SZPxDSmUf2r@wAfAF&gEsaO+hA|q zb%*QW{6!2=lF=`8t;LrU?sozs{LiRy;CYFg_P&SXFO!*ARw#T)QeN{032=(GuDz*; z3#<>#4HS@Bgnp{&C9?>xac%K&TntDvRH+8%l_pe&6&=CvwC;s6((3kmlz^5s(lbjY@QY?6Y?a=aHA!>PEK^u%G)fiQ2rrzMjwo zLJxT3l$4Y(F@06j`PjErvArAo0kjfFi~qV*3b)AWu)v-NvXeaxyaA zmIcWEREdVY8`c`OZFp_aCEUu!#wMi3?`$6?6-GzpN!1h=_aINqYW%jtVpv-qM^nK^ z^&wD2xu$#WOS}RKE-m%vskg%}0W;E2al7XpkdI#*ox}k1u|=iX7YKs!lLbEV>%mE& z2fm4mJ3c*)px`a(W-=sCNjum8zXiUtQ+u2pv=W-#jGa%cgS1xmvK^;s;YDZ>G<`{w zJghb$0WT>S+$ID^IX4d%LB27Qae=5k8|7T|+Fueh?L~D*zEI6L2X?Etl0a7e3mYKj z2KuIWrm6BHGO{>&2FII!r!42dXUNJHrmO{e`1rLv$PSkiJV^w#&|#glyBj`Qe(HRM za0SAlAksxl{XnBW!dYrk$#u<2jMqh>(gRr-S;e8;nN{6bA#w&MRLvrURS~i(EC8ul9H6W)?BIqN6}7$R;!ukp+S1ksaaE(G z0KiJF#tKCL^&I+UajV#D9DK5QYtca;{jUy8uw;b`%!$MrpGqw?iMgIrdQ7>0=DxZn zpG8ORAB)@&twtZb=ELqN0uY-0WC1*hQI$$Q6+2|&BM|w8{cOy6e-052D6Ew5gf)Ts z?t`tvWNRa}>4^ffve()Eaxfwa*D!b`Q}e`|V$tzNEiFXcP9IX{GOw+zHBrQb?g0>Q z48n(T<9~hba`c0}sWt^jtFIUlFD8DIFxls!<&s$b>#1C3^AB(-x5OG1nWD3UZs$Me z2yi&xUL%9;$vuSfeePd~o4EV5J z8;;wjBp@v#(;bPM-gFv*FddS~@6ZC28o|K@hs6dXQ2T)p3usfPl{USS+w6OHM)Yg7 zDfVLEKR0?^n}UERGW_QyQ(&J&E|Cdtyr~W=}$~^K0ApvbYf7R=v2P!33zXxr_L))pZ;+w2M!=H z_uFfq4&>XrjU?5+0gZnLV~MLnv5ei;Q;m2Zj@^HIVf7?k)ivIaZHjwZ)USER@TsET zEwdS#GQ<&DGEvqTUJO46uTNcw!?^goN7j1arrLCp!Y>+F-mni>o7LJ{jV8R#-=a9w zwRNIc7z0EQCejT3v6mGztS=f?pLBYw63Sopf2j5F$t=;JK6c-Zey3kxsKc}HCu(O&|r(KNVB7uwZf zKVJh%MoLqIv7PCwB|#IB#38YeN(lwqoj!WZ^4)lV3`2mJWO+gYtrLZn^< zj9I?>=oKbCINv3uzU_X_{lf&IoyNa^Y3qC-N5XB+lyQ&zKBeoS7)$W$a4UfK3=p23 zob;WTeo6fn@;P`j0i{1zGLq}U-o%8<|NJKjkEQ>~G#wb6774bOQf+H%6P(TEX+l2^ z^H7}}+~41SSHTp;a1Q1$tXrn0K*lJhF#x>gJgFbuedXCE7kniZC4lv%a7ABkeWf*I z$Y=|=@+|tzXJ%)2=9*CAjy2Y`^|BDC)`CJpy9+Q-KgKd6vpBoxUm{~Zdh)Vd=Kyg( z|6l3*fij9%K0oICV3O+UVrVSvw4B5}?Vqv`0wlt`B#Z|fNBd8QwSs~NmZORlHk*v% zR@fSE729tlwM=clcZtmlt78)RV(PnO3Rf^sxbNPu0NB@F_=cs%sI|u5)DvO_AfbY5y$qbGH-LO8=hq{&Y=o)1&3wvkgSIXrcL$dSZ9pl!WV)!KP zURSFRPWf>@50!2Q=cgdyzktDDs3&Rm<&wM0txkZbLNHTPk@XKmM9%e^r23j04C)(AmT=*j!H20+3&P9b+^)W2gu{xj@DcN6pYWU;|x zv2hv!J@4PQ9zF{55Fz!qTajKxOvgSfe9dR7TZNaShwz`YCNe4z&P%gE!I9 zCU8;#M{q4WS!#9z)aZ!o!HNh52F8XO6M-uqxL4I%x3r;r&ywt)3tSk>)oR=_vwD?q zg$%hX>)5l}6K@?~KykXh1%2?{^*({^`M@Qj{#M0V`BTo_NXav8r-zFcFFTk|!PR+! z{Aw5`IDKl8rsOzV@LR`QP5qAlZSs@YLwSCk3M|g*>x1H^jK?J0V1_zF==Ai$lzTPm za+tfO)To$)0(H@2BG*F4n?iJB5u-bwqkT5@=ycg)b)Ij!huDPoNRl>{>++oBipXd; z-2;8TrMUFfOA|2IT?N{!$^P?dQ!}}(iZdNsS_A`(MvhZ5SNFp0=h+3h{z7wRC^RaY z^k<#oqdw9wW(P=d_5_B%L&M7)IeHL#$hLBeDrn-uLrg<*UVT21tEF@GBeOjs4iXWQtL?$sIh-dr;L2 z-~QQJtFmORx_m*@-~H#70~CfpTjX_3Nwg%p$q`_u01ZYXO0m<1!qz??!dIQOXn}LXF{jP zg5oZL-_CeNM_AiLf{@p616BTh7|myrK&QC2Vw+Hd!fuFY=U!_dZ5F+0j+ROYi1D8e zpnLdOkO>wO)*pX(u&6~x$mrz-Mx8#pdpQj>h-B}`558!yWR3^2eXaK>&~IL$zEg2K zs+hjDE+%S!QPx`^p_~5b8rSCZNcz9dcsPl@z&PbjyI#15Uv7tLOXoOhgifnTH~De2 z{lJ+HIJmsuP9J>Z0u5js0VCUN(}yLh<|_gRY(%GILfzm!=z8&E$q&d%IH%c6GQmX} z1r|AeD!vg-xqVrsEiU4}kVf=306YX5>2f%hhn$aLOU(yBYt(&e7!EK(VA!G+?i;a7 zshy|{#>Ts)jrL5!gw{vd_}tx8+)Zpr$1jywaWu_10A6Rd!+%+8-6( z7;z@1sAqL_I9`R^*X_6E!KSTH|LghKvSRo{10hzoA=A~m7{5h^fV`k+)d-fwC1EfD zYj&6Iv1`EZ(s~qT({NF04kOg!4h$(P(2ZhPF2rdFU%<=Prdp zA?>!#K;dr2USc-wDUmS(`~(~X{@`jVng`UBJ4$(lrfXlv{P{yB)+%Rs)UCsw^p1`!Y2!#*hv$Ld2xQW zZ)-JY57E*l3MTtEsf3>%tY)XCHaRW}$ty?Bqu$U{(6X6aS!NQi zk7aBI7RDC!5U>w_VNsE5@a>GIVNaU(m@n=b4TBB(;X1Br;+&ya4y1xK5S(K_BF__c z{GU2(bT6SAHYK7N@Q>o2(jv01iL4FxDr&JvszZMZuKyM0W6%L=0pz`(nNKm24ct0R zRO0V?y6a~@f5{F!dCVVAYUsYJkWR6rpL`6)suQ32F%wf2!KoOl`w8KtSJB%PlTJvB zuwJLo*@Ndaw;_Z?v^%f&828Zg*WUHh^sQ3S4NdaQ)Aa4lu8F;gS%0V%DrzQP;G5q} z^({+GLpWzDDB zX_V2N-3dyL0x8b%TNkYFmaA=UHVQ&zqrdJ%Epy3tqQ|$G(Kb1lrxOzG#)IYFGl7kT2zuiQD3O6Vc^MOl8ZRgMf{LLT3Ul)hBvQuc6{WF>M6>gBap+vUNrZC8gl z-dvt=r?Jt}hLV>~9X~VD)+P$fdg>wp1bG%^0dvgsw5UmOnBoD{zj`X4%q3$1?C*%;fC zq@Mp35W+R`y!`2Y5S}THHT7Npmaqv!t6&u0a?f&Po2=5SyPs+{Ek24|Z>lvV8xQ!+8g-!peGb z+|m-Rad@i!DICcHpaO19?e~6cd`4z1oovNk)~kaQ;x&qSHW#%USwD|@Wa!~yNYlo6 z){6MYt>YStv6Z%mt+r~OiL&J{uhcAr}kY*Ie&yC+R16wyhG_9NxP_F|>mW`~xM zNMN!Lg^{VOZgX13qlX+b?!f0mgUqmR4;@1FXU3iH)gK=Z?muQN>$Nn6Pn*;IOfjr1 z9MiT_VWiq{;~S6DUpt>^1~ik|4|Ms)Uv57fvPCz`v3{52gg5ZpqGuuc3(GAI#)l%m zvfNi1lFWs;U$Kzv--V&3rP9etg*C~g0PCm!(8R;WXeHETeK^hVrs?yX#%9ic0Eh5n z=@Uuyie{?S=fg`6shQb$wpb;coEg+HUk9`* zTSxqGU;06?FT0%}B~(0XR#MALvY3&pQ>9psfUTd4D@Ab2H>M}<0#jQ&we(!b(sRiq z*|@)!W)c(0<1#q$b&QBf?4VUz)ANc@3#vuY;pVgF9)zouuLh3(fARB3{D46_h2Q&) zXPpW5J0$#m(ra18Jr6rhP97Fs62%NhrfBlf(GA}m)MWg254s`K_k{8D&gXoTvAuO= z^VcTQP_r0kZ4-5+sY|u(xw&E3{Y6QB-n=~%3}5b|+TWEfyv#wp(lPilBQmkPJCkx` zp&IOfhEUue~4rOqk5^eyi ziMS)+FIKvxz;Z{<9Ic$Wwll9qr?W}UEEG`#=_dZ)wfgvn65(YyoBXga;|{D-*js5F zW%KtVdd{t$WS7D70tNe49^F4MLB)NqVB5l-_Uz82K>gEh#t9}*#g!O15;^eFGouHf zzwP4e{L048ZYL*)Y7er%F_;7zkdTgHDn_PWvu~Tk1m%a3Vr1GeE6PJ6=%xN>pkmp# ze6?%I8Mc`IfEJrl_RHRimG^WQ*|WzyqlH)Z20Yu(g_(gp2Sux)q*jP$Zdd4xT$?st zTD6K%%dy4(iWpHMb^Ihwd#;iZz9zspu4SuRoT&WV;u|ePY1wU#(&{2Gnq%_L!nVOg zdggRw!us@GkcYprm6371)qutb|vN31n0%&@Llgyy~#QU#? zI)T4gard|GJ*6CJauEQ26Yn>k%{r_*lK*8Lg7Y0%hcvgkXWG7AIPG-^%cLR{5dQ;= z*?k7j26DTl7{LLc<=VN{Ql9vVFP0Z$jM@hR;;OvTsY>r*6w#$}Pd>@9t1XTDJe%5t zdy#}~>J$(>(q|MqGI|g#jq5&r#eujisW-?e1N2v5AIBgeJ3T93x3xCWLV5*`3^JNyi1pQ#<=>s{F;xVKH zIiHH6vM*zj_~5nISvY=m_^F^s@@eFudV5_}Xs~VQhck&UkZJeiM{wbR@LuPtGfito zN9E|`aeps5bAqCzZ?W4^Lh3&PlG+)Mn#)h`wc}R*d=j+uz>AUHCkIZoy1nG_*ZZ*s z_`k=Siohc!{5W&H7h*AL+p?X&OJ0qAA%OXAJG$XVW^-gXANa~2$fJc5)H=;>#!OFc zSr=~gMs#t{6Cv1qQx{se#&{8XLL1<`IRs6AbGt|kaRdw6Joe@P+&)0{r{*Q`z8Yn# zv|3U&B>o(odtw%+rF=yx}!2f=nNc%6ZiT`Uq5!6af zb#?WXfU|ZEqZZGdndLd}FQF(n+by!wvaep{6c-nNwzjeo^E)LA{yMU22->%Cad9*> zGH`#rXnmJOzT z3d)`20*=@9U>vhTL5EQXQ8Ag#W}!}RwqlvnqtRjU<@(9&!{5#tRn^teF)Yh=9&=Gob`J7Ab#*Q*>Nhn4HgG;0y&BdFDAJK36rn!y^yw?(TYW zPr1=)^;V(&<5wtjeP>^XBvC_C_LVvKbXhnl!~*>RI_W>mD*6vlqw-N;s3WJz@RdJq zl0n|bZw6$JMJh`)aWiX_kA`aO)K9|tV9~<)N->k;EiOO~L44;vejR89lo6jDk-of= zQdnVC)x?y$=)&}@Kvh*$SYcDsTyJlIVoJr{LcOeN@)P5(AuysQYiyj|sp@s{%VOjD zXV2RSk&U>Rm|tf&&qsF~U7OpaW0Qep$nN^~v|ZrUUO|r7I~LH&g!tN7dOE^}iXk8K z^73q;mJ{TO6u5q>4}qw@>R(&nI_ys##Nl8g+*cJstdTniOQI<1Ag=5@yxdn?fAsWBa zH1X)J(Tn_>8ljy*(8=M;%9z)0;;Gm*sd3*?2&p4ULz?#|= zNz3uc{9Ir~q|=$Ss&`#GP619<|5kPkgVsKR?dWkw_f?B;@CZF8Mp|WDHLMc)#&~CT zv=_F=obr;U#<>Eco{Trf|v$P<>$L}l_t_t58tqXfuQB|1M zY3^7SyHyNY|EzcMy!L2m|M6AiNAszXGvV?&_t6l+Fu-tObFz(dEcrb?fL_t(T029b z+4q+Ve-1_4c76AOX(2FsheW4uye+}-=81-$&2)f6fTM0zdrFjwu>00riG~$XAaINd zn?F6pmU~`OZB3+J2oxPu?S!$xK@*tT?40-1&DB}LDzJz7L$84G;xAKE1ktQI6**l) zLkhhcsM;QOLI69W16Wz7L%OZJ>D?_Q9w+N`t}q>#t$fVN+>A=oeD?5W^T~?1by#=_ zQAp6u$ro^#oBXZ&t2j;U+gxER1kjnqxjDb6i-f57_`&hcTc#YKsI>yvHP2K*q`Ptwt1Xeu~zQf6%mgpx;pEPS0qb*&Xq6NW zs=tl~r(fNcNKWF{LxqG`g_jZ~6x<)Z`PW9+6iPgV>XvFOSheGDmO1`0!U1MZp&h@Z z{MBCSEgsWGs-M(AQ>QzWf|`*mJT8{PS=pLZ7+3sk;0fqP3``7jeuBOq-X6VCqWcwF zwh;8gP_tD3G^FV2%zE+T22H^*3q|!-->RRUn%32?kFib21s7$(@)z~vpGHqr%PlE2 zFbGB)zNn)U)T6`5j=EL++?o@Cl=`tu^I%fy!=uh&eh-fC+

vi-Flk*>AWPqx$Fl zY#bedrs!Pwz9{sbI9Egbz2MKl)5fXRYFyN1&D^$BMmM*j1pXjd=1b=L_hG%jdWk_P zl@l080nK#;#I+Li-NxOO_H@E@LAPW_#C0+s@q%Hn1NGe05^>xbKp8F-$LRK z>`Llw37^G4<3PA+1h}LbfHAkwhbToZTO0JKA~xvX(>x+H4uP3{xxT(easaLu-ri4( zQqQ;Fx~u(7&N{pLwa=>LuLBVUzq{j{ z7G0f{lRMl=K6Etz~JYP zUCG8$lMAAW+hW5WH@9YNfvHH;=MKABNBh>yLKt$`%#e_+Q17I|2?-v+k6zgFX3FS2 zhXNcZx^U+c9M?lK>f-!7j?8A{$qeu~DU!dKYjC(4%WSO!=n9`*@?YqRGtibhT|GUM z>t3r5`6fO$3NA9Jdi5Q!7kZBuVv_7tR8smN*xhd`;WBR?@4+u7NFGm!16!(IfUdOg zksB1Mmy)l8@onoEcBvTZN%tGQ$B;4LWuu58#l*Wz>crcp>OlVOvyIRz;4=aey`*V> z+<0mcuvUWR!ryOuNIrU$La&(I{gh31*Af&x(xIsY+r#X_t{Valwa*`}DKbfOuyh~| zr}!hQm%~j>OgX;vS~BO;+N2iiiwXDUa0u$&hw26&)#uQ5 z-pL1I$QC*9B1hDT+D+i(XW)0IU*Nw87anC4EZ9sIs}Yh~ni9h}r#u1ditE+do{l1N znh5rnfQO}Y!vaswA!EzS`5rs`zlCBt6CqTIe304g4`2Y~;V^R5{Nv_A&(9HPNV1qj+q3dK!#ogM6`ha8Rjvr}{cbJKFnz-avJ!&IUi| zpzwLvZ-N!yZe?X^U4w9 z@N`PH_ff15B5;Sz_Z5muMmL`8x}G2k7Cz+4tnW-luYXN#q5g>?a@${vqkY` z<~VxGU`wX2J1Qk5 zMV(zh^wN4VjwJh&u~UqH7YQ>s)@F)0z=DHT_Ho%b3m156X)z4JYjXkM=ezTDo1-5A z)gY%9S-*?!2RZNF1unUYib}bHf&%)b+s&oN-)1F%Ndm4;d$X0<&CSR<>Kx;eL7yu- zUq-y$HAg%C1irOCp!$5E38aZXkr0TI)5L;Ab(Y7+an53?%|b8`P?BXy z%;O*PK?bcbFk*pG$w;Q~8C2D3YsP&G&79VE4G!;1!_Mq7%3&<`OO)|+a|?BsmNpCt zl8Jc^wJO0o=U$c|o2=Ml$5# zevI0Eo}BzhH`G*Mc=UrAQEF?6Q6ZXp=9_;GC zs^rc)7JL*z)mJ|WSC%}LgNw{I%jz19sZuQWV3Ao)7Itgd9hSa-GkQDqS>?A= z3GWj!5r)*)r6nceAIHtjXidkn+Ce=>V4nQp18^#-{OweNuY!66EiL~Tm8?=??O0-oVo*^2wh+3aYyQU$h0p!2 z(r!R+Z1tLKVUC>o|IlERDtJiPa+b`F6D7U|k z`M9#x_-|Gu;~CQ8(UtQI&#bg`@`$$ieShzN+4-~o<*U=b?SDzawK(}}|BI;LOU}l& z?Opv@{kLHMi%h4>zj`RDqD47oWgc$=AN5*&=%f%un9-WyGV%mm!}+;)^6AM*yDMx6 zfMPv8J$fo~a#7*o$U~Z1T7%O78K269$2iLR5UVyge4WpgJZPe2OrXU<Ee_6S>eg38N&6mV$}cf?XJIUE^FYdRk8S?XoB8ACY1W-WjFl@6 z+h)PSt%2^|m5D4QDFSvoj#}@2qR!Zzp3DP5#Ucy=v82WiFNHV69hBzMhp<9=s5+d#IR@*2tWg z=ISD@%{kjqtt2&Q=aS-;ADS?0Sh@;f(!=3PAHzVR0 zBTZQLcxM7^TdQ~*HJV(Q^m067^~H?|q@toGKFQL>#RYhUtpYVbiNT;Vu)UzD)ol}7 z6s-1nwB`ti%}GebM}k`BAI14{Iu@|s)Jn()I^oyVtkLnaUnL#5e5O_;8$0x#e5tdY zEMn2nb|RIz^M2~wB@m+2v@k1Av%^ib}jYyIZHVNCM=2l zTr)U0BBY<3@TFJt0PZAp$va+>OUiGj7@`(bU1B2UPS2x-C23#VmOQaFWDIAm@8Nn$HwjF&swtJ2 zk~9|%DB*1^%^%DrcRcG$U^W3u?sh_)i=)HU8mqGU>fG-gbYsBB@5EsgU>D8|w!gFt zr9~8CxBqn4#eE!n-q280P(k>8Q&j>wEkEM|+5grR3i=Ycxw)V)>XIdgV80uNU7a1I zOu6fEP$Xr^tyiPk3kx_|Aa4JLgt5*v2_~Jcc*Q#xl&~ymnHkXj5c{8*0 z;ElM%kMCtK#FYGt#}3m^n-@QSC5gwer#br7*{nI|wuKsb=1Um98er`#>1e!te*a39 z0H8ct7BGOVJdp9up(mSt)a<{LST_FrG@ioH73a?%}a9)DLx89$1HEs5RoozGEd0q11{ zwr9McGg4Z$*j?~<87_jclWFUbFXUHTM5Qf~{X2rge~Gr1=Tf{zE+oG1e;$d zO5ntmcV0F(yhcNQDTK(=XyXR-9rj5#gZ1dyWLnivoRW=(LTfaO1Z?R9O@A1GH?KQuCf%c2s{?h)R&&KoAdh2#26xT464{ zw5;rC`eSM7nq3R1s|pJW%Kt2W=_17wiyc7KnCo!x>hDe3Y%+%D!@uQC3;MtYZL}*J zD~5(FE-uTV-%YxQ2$th(oXlQtA`SyCrE@~*g~06`NvG-^>z6jb5+a>mlLZ&W&b35N zv&1Ua_|(!GB%x=&Ng9N`1;{?#^AsqKhrRI9g0 zFEQ(gZP18}_SC^gy_6q=v$WwaLlP7-J%8>j9?su}Bv;-Xe*Ad}pOJ^Z{|wAdrP}Oo zt4vKux~+4D3$UF|9AxDeyN1Tb8~NTLI&vuLKVQ5jzu!)P`7R7OMuEJQm^&bHxLOR! z^sYo{SZY8aZ=YFH)yV0YGXY>Js$V}1N0pX-7E7=_`f)ey-8%W_)73ok*pyY%x0N+v zLiT@eewqKX`DH2tY<@ZLtjI1Z8)WTWHSSd1bm;>a)Pq@fbRR8cSH18c$NpEU8|IjY zmMArRsrklv^UkGAMR+rJ?wLt3M^ToiH-IUp z(t?Rdvw2H)DZ}qOjkW$vkl|&Nnok;9^QEXr)ihVe0lmqT0741MV77$pS(FJZNX+Mo z8OlY!%DQnP>mKbedOLJ%KLdevuA2OTPzurpt)4JVB_t}Yn#jq?=p}kl7xA}KRbVJv zo%$y=DY3BM>0Nyog0mE-=i9%hQwuRgP)vh2v3R5*^_r_Nm7V_>HMx=-h2z_cf@V0(s ztFiJIs(A%RGwAgZYjv1r4&q!$s6{O9A{1UOJL9K)EUdRe$StDKT7F(!=**rxEGKES4n>q zCiAT&E5FDYA9n$r$EXpx4(nZ55kA(tIfTB1ul!CP{sni=PY^h3?l>f%7R|nEPf!bZ zDU8|hahQnjw!gVJA`axB+abNRSRJgTdKG)H0v#Cplb=Kt?dLp%lrdL3idP5R6c6Za zW8|eRVj^bT2c3lJI$XKF`;_gH%u??^H2&=`lDgKL;xOskRD`o;(;~K}_O$Sg#!3W8 zSD>P)u8V&`Wb$zLa|pgpPeKz$GLAHUhY3n&z&}f0z;XD!^rgshtkK~s2^X=1@zJAn zmxM&K;y|BZ>TbL6zKc2SVe&tEf$c>30)ZM(h*K~R3=wGnm}raIqhnOZQiH}3%s z8cJ^OlH{+4=qEKsYlBCron6Kv#Nn4VLYO2JqB$P5o-?BhZchsH7WX9?oy)clJG1+Z zs@x~mK9tYZ3t}AHI={0p{o(5OB-0Nk+2>-X;dR6B4IzW~AL`~X(I}`x(XJVnyZWZB z-{%nXl4pHRO=L1mfJ7&jEB=3syRn?tAvxg=X~Rh`B+0!os2;}8Zm8bfzP02n%*Q25}z7b{u@~5 zV)Ev9rFu(#&8aarl^is(c5zp?Xi2_LMD$xOu}+*+HSm2}-5j9{n8aXKsn5lGUUw~$ zCBf=+V4}2ui3BEm;5hu#vCA`cjY>&AE%N7`=~&RIEy;^e2;2RC!zLgQASWIsD5MqNh|^5A>cdOD~i@R>1nfQN9}7}{?=pqH%Z z?A$?g{6{6fzrX*HF+aB4c7AHMUjLM(Kd;Vb=yb^BwHLcxZFGr5^05sK-h_e%oC#;PUSrviH*q%KvhI zd>=?JcI$x*JuEtxgV93Ll^tpwo8yTHu)IS#E@4tA0VCzKPAAT31sgV{f!4$2_WgsQg~Nq$MY=z;5EK z^W!^hU?!U#Cp&vxQ**P6xp|qbjN6Zp@KdngX2(Px0c(%fnh*@s>Erqt9JQcPqD@Tz zH8%fj_p0Voto@1Yc)6r6EwG26!VQ~U^dOFiv~MBn9me5ulWwaon2OXrmv-TmG6P8) z9gEw!dC()AJj&yI(+S~TF{=A}l5Vcs2z}ti2tGSL04Ht=LK@8(Y<}{)kz~Cu4@ABR_eU>{hL77oB4sNuN$m_3^4}<7#3fDx#t;a7>;H z{)2l_(azM>wak8+b;B%55|~~3!M!~Ei+f4?!M(iqDpP@{l*g7V{Dg?e`Gw~f#Ou#L zlVF+ecc4Fin?%zBvQ$pr-(VUgMI{sVjx1g^5}R`i+A-|o37yak&1axK%-MoIqEH(y zr0hf@crxfeF{hrq_&-0p{(vQ?6w}+Z0|5h$`yzJ(#xEI)6nLM192}eFvO9sIhvfHn zW+%qH36H<<|3de_zR0y}-c&D`fV+y3 z757h>v_|q%1{PdR^M?-nFBx~U6~GpMqnrmdSl)zKiqk)~YwM>e|4ZI@BooH;@~`PEZhP5-)nsQPd1HQOx5jhqW{!O_W6>=#>)Szup;q)aBJjn ze&JdAcVGT@jA54lWDNh01YlNXw*M~x_)15^j<^lh_qq1;1fUQVUWXr{_u%&Oo)nn) zh64?v0kYxsn}4*u{VlJkg4WB7HJxGoS}|Ezfty!R5mHw7%MajnG^&*E`{Ch+EVbX> z<% zxF$_sG>f%)N&^hcSo0S3acA}rAsi%q>L(Gldni8%+9R0wJJ&^GVN&GIVwE~-F1JiJ zETKB>#mwyP>xGI^`Q)Fno?^wUCNx%-pe)u%SRnR0hP#rKrzlKCd;fZFdQ*<}yl5K> z6?de<2ge@JoOVyU7wg2>HLiYcPIv?2l!Z5^!|SCXr;hzPZDYERB?TA4YjWrORNef! z1E&cQ&#ZpygO)$iyKzqZ@4b-PA{)8k1Kh_b1d3k)w_`I;z?A%cy0atu5&14r3LO4*1*+@Wl z9c{nQ`SX~lHZ!E*KOM-W*F`7=X4c3fXbz@!{fpmOK8JC5yXx*|M!!dkF!HMI)(im} z`T!dFvZ?~6K5uim-ihXa`Gml|2I6PlQSUzu%>BlaJBX#c_;^lk025Gi?*rB6 z-P>0m{xVYo=!1qOnws%0X+t*d!KBF|NulH%zk6;xU9Yr^eHQ8FdxztsQ}BS#c7V?= zdVoxaS-6oySM(^oPakXuou2`U+tF5bb8XmXRCezmh$Mmz@UEMGQ84#OKYcuCHZw`| z_811sl&_MHIRDm~@-0wRiK-Evj(y4tTtJ_jqZN&`va38}e;Oab(b^~7ZaWmv>+t+u zr%*~oT&t;ZEFglZATajtGVW@Y~J{L-*gJO<40v4BrE|Wh;*@5 zeYRMHT=MV3m*dP&*uPi)vS^YXxLL6ZQBO!WubVBW0bT6da3SxAY{t!CyB7rKc{%e+ z4@XW27`ZhP-^=ciJSZ^`$4CU5^p2f{S zTgsc@?x(YGp}mK(E16L7Z`6ixlTr>H;g#y=eT|v3$N*Spbfenof=ty3*kv0NFoPSG z-h~(bjqa!H$XjEX zE0j%4_fKf?k=Gn5;_G;9N=i)J4&xJgf9?ZQQsqH@bJmSrPa>C21!Ud^a6GO-%kUc!IFe>U<|YD!eVp!= zAaR~}3^7?5f(RsU^0^byYKBa#sfIVaP8LP*-er zmqP^*kyHi1bk&@Ph|-{Wg1gkxmX4}G`s=g{*lD6uO|de5IP)kN8;3Kmr{aRLZ8ciS zavykEu%x#S02jZj7+=tcc&C9enhKo!DYi>IGg#|Lpa2V@DddOdu0}MGfLn$ADfRae z%a%9*-S97)!0ydapI$rx3XpWI*wzik;ILXTO%j);7wzxV}vFM4d9lG#1nA%rO6}*sRYGwQwTWD6Lc>?oF5S9gA z6e%10^kUcH;t_DwLrZbVVRqekXag|DnzT*8e1Q*e)!6tgQijG6kYjc2jTw%*lBl5) z`k(56hKNZA2S{h#FZFb-JHxxTRSOl0b$*wJLNzW`m(jA%lZv` zjp;b%0@^g9ja8Xjffdrq^ZUHe{KuA>og@tqff7d%HX9Nx7SL3KAohJuH0#^bPo}C8 z_uGa$2k{CO*)#N83$A`~Shz$w%PRCLoT5h7F!o$WEB2|_f|af+nIyrEsvq%I3laC{ zYKs(7evMHNe^g?@vR$VQv^^`Wv_#GkypWhejQ=+Ern@YKK>)alKZ!+FrEDD+RTf0n zN{4&H=0D;-TD0B%?uSDtx@U!`2dXnn0K6l>fju{KHH$ZTIFSz-tqZaVwVK@D*&E$1 zPu7I{nGr1x!RT(Yzlj00f~3^9C%Gp!6_c=>P*XdF91voOGj>~^d`kOZzFmHjezsK* zeuyD=^fGK!46V6LJ7YLs&8V+&9y1KMB7*TV_tc#=$ zWy@HUo?9OWDz=4zM)kWjnFX@{Qdx4J3skTtmIumky1JQku*|%My#%X3ZBP0Q@CJ|1t1l6EHsr^(gwdfad0mWH&@vW-g;__VU7Y zVO@HW0fM-u2Eqeh4p;aQL_qoS$u-a*FmfnbxmORZ^7OG72t2UolWE04yh3RzrfaG} zIh@^xSIS{#?Dwvg8fnYgB8twxwtA)+`6=?+`Soz{Xy-*!jEmS-XkrIv6 zBmb3QunEd(JPL+6Eua%DXok5QQ(3j$2bj{jf*YQ= z3id;U@SaJM)moDsW27`B>fDN=h21W>xY}ZJhMQ+k6H~0myyI!r{?B}wLjOE}H>-{T zR^))rQOKkI)gjKQXB`NO(H$BzOu=r`@8-#2ob?1-9@v%1wSr*NuL7PiRga}`bZu1g zO#pA=?vGYCQ2+o_^N==hL1E7ht_=8yMnG0>rSG>Cst#z7?BTX=k?iX9#*5VWl=d{K( zqQ@OWnd!ka-5FC%)lx#8HxSdq9~WQTIOsZ0B03x0QHUO?uOIyFcv*S<--QQ&PN`0~ z;Q|i$4UbK)4wP0Ea<{b%9Y2zEZ!LvNTfeplVYawy$m^hFeNFCP z5FSy>)U)Zn9@!TwQ#w*MbvrTdQ*|6WAnFc&0w`?d=ZW>*5_grE4_&N+VJX6 zg<)Uf`>&>3-%dXs>N)pkUd+GL?U_nd<^85qwjCW?2pH=9m{Dz5gxDFT5zB%-X<59L zJCkQ-<b)OL#xV81CJ5Tj~w?olo>Kx); zcZDO_)p{)-&8>}vtPi}`y8(Oh(%{BjvqS57&?D@;iPKlu&Mwd3NonO>W~M7}bA_UA zkEA!BFGH(0*4Kfg2kp&#oY;@p@3n3)skZ161WQlnm%5hSM(4kVF2O?1CmE;Vo;hJx z)dO|!J%GkTTNu}+-9sNU-tTugVWT;e+4^kslG)Hfw)9g|$v91iFm+0lH;BJRe4e<-Fcic zSfzx4<_+st;J^vN?HQ`}s=tp}J$^AKukRj3{_BHImgFGjHMIPf&{ac^Srd)s7?KHO z>o^B95pO;0Jr$;X$*4)YL_c zlQHISfSV!2aCt@W3#@3D#%Y{DlErol!1GDgSZfO=e5gr8a#XHi1IOQP!v-9Lr*Gov z7Eot@06vm5TX_jjUKX^kQLODghA8WT`hEL3rNL<($-a_=g@i5J<$@(eCV%Hn7kQuB zm&LI(9=ygky5-DdTj2R>@M*%yOMK;(>3wYcHH^+()+4!eCH{VSh%M#g7Lh~p>8*o# zFI{Sx=-@rA)aC%6>s=TH+A1%8mcr;4n8DSo(cP)FyVcq$-+9d08^m`e_DM8IEJIO< z?OPBEJoM6p-F3Iv{iDstr?med{i&=ll(0}=@D1uT2leN_%R|}z@A6P)7AB7WUmhBZ zGXm?oqsI^%4%i%Foc-H?U(@0EB6(4%!NaA7ri8_9SI`rCJ!#qi8dFZ1E0OUv{@b~c3>i*WoCdTl?@&!r#%Tc zh`i95#Ejde&G>6VAs$dIcgo01{G$Tdm{Wm0(O(T0dWYQaE%fC3=;RR4es2yr6m{dTkeF9ar)(*yR8N9T9*&sF7u|~qi7-Y=30N#Ew+sahC6(^G=bo@pBExsZ>*+B zIOpzcU`H;OOZN`F6#l!FN(TU)!riy;HK6B)Zj|<}2}%ZC@SN^tnuMI9=rw#(Zxj~< zAocD;_>zt|M=2zS;+N0YmyityB-hE<2<$}ofTJSR90N;b;{E#J;!=>oCr8JrkT2L! z|0gRD2sN%8gj6=bDJoFe#jQj~ofprw!GUMmM&U%AKOX^kBk(+JnVj^cYsz@(5ZxnG zBXpPfJ5G45Sgt3BwvRhO+)+?I(d%vOGSzq1$_RG4@KMx+)5#T(s#$PW?po}fdmp+Q zf+-f2oPM8wyp!S4)r31>2rQU>%#lrzN1S(w_$iXfjrqF4??zR9+$;Q75M{ifE=;j- z4u;5}^Qg0{_uGK7Q-otGyI)CE=P$|ycMzR!v8{?5#9_0j5uH5iwr5E=kgDzKf-!NG zk)hxibo3j+j6Kjhuu0Uo4U)Roe_wUtIk--=O9cLY3Xg`>4_;mzda_GF7R5C4@-guM zBIBl`o!)aF+?gWdju9*{6cxnBUz24BQ&zdsp{wHpdMtD3xB@*=G}oz8pg!nhk%j5nj%%BDBhF{4PYLFE&?j=;GA7ob|m*;6%eIpcXCQ33xkBSehm&gyf3ur1?V zxq)gQg#iV|D&Epa7m_mf>^AFI?({)=qUfWf?EmYDjnM>C7@vm#s>`ZR6c}vqnrpZ^ zU^RTndM}qa()8~E1oP!&6br8|+HXAPCX4jk{@jf1!8|ySV|eXk`da7QdcBVjj1N%(z3?~ioy^BaZ_41-lsQy#s3mG1 z_T#}8rwN1uQy#Lw4_Qy%ecfH1k(RByx|CIHm_QhE+~tF92tSIeFq8B~ z*ImbNkXTe;NNvpq$5Kp6YALh+%5PCceT-K5i0Q?>39Qa_5&u@1m(hL;>(s$HJ>cIdFJ^oAa1!vOKIP8rgBlPSX6b07xk{q1Vf5sZK3RiY|9Rw{gG17Y`lQ zfuvr|3d{{9D~)>gEF(fC#kH7)ld z7%X-6Tq2wLy{~(lQH_7`lmaBjxxD|v_q`-EH7Kl1(4P~hZq_~M5uYxQS8d^AuB*UN1CFS8?VYiFR)^t^8gv;nvwNf4O zD>j*rrBD-QGqMDhe|vlBS~mrkC8S}chwGSxVZen%W400yQJ z2)L++C)|i0*IBU^2qX-&)T`I*=)qt=YlwyPYxY7ViHYj+M2%KW*%rCz)Ct-0ldu-t zM{x_nFG+{IAwjTE>Rj_<_yaGv4eFWqT75L0mZlIlDQ24!Rumr8G-ZZ(y=Z)8Bm<~) zqWTDXD(H3=x1+h`md#V9IJeax1d!}NqZM3+*CceaK$OHH3(%ndcH_aK+m%b<0_$#j zOMxnj`WQx9wfnnJAi<4wW`6)5$TH5GIB9)q?-&M?RP3>c;U3U8z0~P60c01QP4!Nr zPGY~1Cy~;_M8hp97nc|kWxJhQ@WBFcl;GsEF>m`8E5{B<%t5_JqhYPt!HAwvk2=rC z&{KY69OX0*bQ34b zaA0by1o}{|+d%}NC22eVAvW)!T93hxC&tnT{M01ZO@8>t%f&n-;5M+JaZIejDTTH) z=&#_V)kn%wHk)mb6Z+6#)=q*j-tvg@AW$Bi#R<5^f6$ zxx!I1yEw_KtrO~QlNb}i8e8cJ1LmQm5CPTNT#NN43Zj31HMx5XwY@>{Clgn5--WzW z`P(T#k|L-^sIgwaV=~^J1bIrcs;e^}0ik3_R;rkwr`HPjw|^F$T%&I;^`gpP4ALLpD8p``W<9k;KdvqG0W z=Zc2`M9fS?m#S2bF1!@k<7i|# z{c;C+YD>xG^()GT!45QNPMPDl3JwUV7p;3*DwcENX`P;lZ^;QTA*K!Ed@=& zx)ou{`^Go01JAoGCY)0gRlN3`sls4n^mN6n()|XioOWEqeJf&+C66pw>4~W60N+%l z*ZY)*lnUQ1xG5LaweXPIb=DhOhU99-btSQfSi&YCMSC{(ra93LguOE%=xJL0erPRsdj;7OhY2|^|T z571s3d8C5ms4meql*SC4jax8XOS)7;F=g4SAR3Qv>?GSf<+Q>cC{}O5E9G*ff<>>P z4buy8#~=z)DcAGq)uY?l3%tK;aZxh(8>RyM&A0+xmtk}eUJ@IQ5WlGP& zxGtB@t{%f~Zj`0Eh~(fh*^umZ6eqk;NlkH^HwNOsTXiYM{=yd@=8Yl{60(nEoy#65 zbAXPd7pFq~xyE_H4izY{3Be_qa%jc(`DhC<+CEWw(3jzvR2MYAGv{mZ;D zZx&82ySBCFN-PH9$JBO^PhrW1VcYY}EB5+cx6KGQD)QdbxB!~0wK`^rL>4K=`-=w? z!V=!#xrxcb-_2VNgDF*YjQS8fXV7+!gLkqyGX6*@u-`oixuzK@773Zp#d2L z$}x^H;}s57*^l0zD}Fwo3n#ffzV}mf-=A-x@H<~OsS9}B@7J-VblvP-K5jiJusGBk z?|fT)OR4!0u8UAhz)BTWf)VKLO>svdCzOA31oS{?oeeIjS}5wked_49>)x^e(di+W zlMTEqI&K<;j`If&ydM67K@z2d#x;93!zjAKnQ|N20acY4Rrjy(;s>G8sx|__mP)&2 zZffqdlFi#ypl;?4%ID5%2g91w=u&o5x0vrC{`K8wRBiF%6>ol(M;aM;hUs z9UvSAp^p@pY#jxB?cTCzbhX`0c4b&+@Qcp2lIf``e8KmpqAFLX>qxHCpI&zO!hX_k z>r?gMAXJowuP~iX+P|2RsXYHmd5D{s=3H3(y|vDTg{wAzgrg-yZF*2(=C0al!jU%g zD_7OpK*>G}eY7M@;dtcvMVG&)zITv*hIeIgO^n;Cz!hQE2Obrk)4-ulGvo9sAa>4A zjXe^;x*CJuPziP4uC(JAmUGqs#>ao`p3#@mbMj|{$sTqCfHg(*YLV+pkGev3WVPl`{31t5!CGztGYg*3eKT_{Ew> zr`H)vU)ZV-d3K`u789Q)Pctu${wlqPy)m^@nl2lbiqEdh-nUycPuxT%J&W2Y$)i9igFQF?m>ZHSUCqZlNj})`fJOjqq?@$wYA)JH0GN8nn;JJ7AjqNSi3Lq#JKG&!a;BgOD{4^RKf}->hQ4~t|Z%C)0qgZX^ciFb(@%YFrVyCJa`nkk^M^@kIIqoTS)$~J}5az?Fr zf)ApGu0Q+;nG<^5<{oz0`!lllKB-%b97Gv+WpDufyU8mE9lsRi?d!blY*Fts%H=X{ zXIs1GnX$3hI`gn&{sef>h2Rl|a{*0FB??u|5ELPBW1h&->CLGHllMWZa=nqF@3}gU z#-%kMI{?rtP6P%ivl+WN;A>0fWlN^DAQPax#qRhS%I_hl)!+qDL?~Tv*n7fgFuzu~ z{_Q|JUjo`xZK{81=Bxa~Bip+#2eSGJedUx?AaIxK*7Zw;4DO&V4D#pb|m2o9*frUbD)9mzpA z0!uXpW8tpRyd<94_@gE8x-CjX?|-CHpWl*w&I6d()D8mirv$Fn<{1q|wY56FId}ka zFR+r$=yjmz6itkr@wFH@SZSG98JU^znVC6g=^2<<+3=Y-*l8JA=-F8DnOT`> z={eX~S%2OyfX~fM_y4wHRL<}QY`Ms_xI zW_FGi*474eHZJaT_Ab`ebS!Kv6udu=f3C&O&=QJ)^`Eo(XY1$K|C5`7nURD2zp7YT zv0KCcs952IbUz&cAQJ~d|z*`kJuJJUv9W8 zW>}m7#JA}9eux(DDLZjpR1|`07N@*(b$OE zw2N}MZUN0&U)+!0t`5K0{tompF(mImb8t;^D1>g`dh7mD9X^gDe+)EWee^SdIk<6J zsl~g|?G`~R*-iKu+dAmr!YF3YcL<6%JQ1t`Wn8*eApH&=m7GQ?bB^`9=)TK2U)eg? zxv7ZUMIP#x&Jneu)DwE?hSR00RZA z>?yd6<+^aNV_Otj%<~F_eTZDZ$cbMq;A4o?lBlYy+T+~-TF=IwU&q^#C3yX4?{X_m zU==zS{{bm?n(Qb0a4XE%2P$^=VbT6OT7?A9^tO6M_5y6R9YZJP43^)@|e0x?N{ECJX5Jh$eeK{6GCY}x*eC3|;jRE+ zdcMb|OP#bQ5IJ&S$;%B(lhk#>Xvwp!d)ayBm-=94SW(!bdb21T!$bN)!bilvQ2JSQ z&lSkg^~?s%{7D2hr$WGkZ}!+PXM%yt9G#w&;Lwm{F(E+Xhq)5&h63hN?AWK;?<00p zdlH#f?CU5okjVXnw(JandPk6s+IiLD>qO@r{1&r9G*{tem51D@CzKYGLxLpFlS53( zic2X16c`G1CI(IGXbvogm}Id!VP@X@Eg(X9P+2!Une=U(CuX=L@I3oXqxDol12%AMO(9s(_1$dNG;6A5Cp(b8DH z9VJ_VA@G*O%qbMIUbKCUaOC08`dhF90k3l{v8*Vsly53uBvSNO1|ielD-Z)5RlV#B zfQ7q_HTRXr+Tba3sf3xXgKdd@O-eiA@Jt9;#+Ff}%fuwks0abFm>wosLNB>*Aw%R{ zg7m!1!-^UQNZ=3}c$4}l1;iO5lP9T(tm710ecH%e%`a=2b@!3fpr0@7H9gctk=$-^ z%)b1~Zw8$SPa|p2QE#lW4Y4@ZG|HUhD?5}cXLpaMbb5FJ7fX+d$?teczIqfZ6Gs;+ z9zEmWSD)iAD&0puD`&nZFQ}d@U+}vr#kMv^C}(7Jm}BZES$&#srP55$J)y8~@I-i0 zEr{1@c- z6i&6`Cj$(gCYdw*+vtTW@x&D$cOupWPZiigOY|(#Og)k>Z7?GB0+`r1ztY&Q4zG0$ znvPk<5Ll&q4eXqQj6={*6N|CWwJW~NlOCM-D&-spnn}o@!K+HF>U;6anb`dVOW7Od z2RWN)S-zTwu?{xQkxbXEZ>}6w!eSmm3X{jg=)TWq z5&5a4gWil1q3DvN^oM_F1d?yOEyw}MtUu2+C!9uWQNR`39^pSMxe;xmK11w3I!@&%u^MywR3Z4vr5^cFL~n! z>3~M>^#Jha=ef9Z)Fx{paeT)?Rx(W}URo1T|Mp0c%+K?6u)D!lp~glla|WIU zGQ&XUb4ZM^M7xd1smj&1*l%w5)v#&A)S`8ZUjrV&R~EQ3isn_ntMO|yA}bMD zPYpoHLY~i;AcXc(+#u5(GoBfm4UvEWUUc8!XgGan*X_!+Pz^dDnGgISIt4l{_XZub z>DoNNYRyeIyNe7TTf^SsL8$~esHrhlfA9YtU1DVFXht0&$rt{ppuv=_j&xN&lrTJX zN5-EiME6Viv&dRStSJ-1n)s+3Oib0aoDi4n*5&>BSF;sWBB4xSxsmXopz0RRH7mVP zK`7!et}}jyqYeubV+oPBnvw1oEnr zBz#8Qe72tGb>5DDQy#vDF613zt_9U&VjGlR`wT}k(nHt!b!DXXeE2%(`|Dx%TP69C zEF)P#uE=7*Q1n)yNYh%=8ag@v$e@YpH+(A7ua6l{_$%#$VMs-pK&uw^Rbv`$RU zUo-6!aHcsrR7h!1?I+a`X10b}{(>EZL`mHM3lQrrWO<9_SG9WB$=BBYJrqy=C*cS1 zmF;h6kEj@{N&!dHE(gunE~g-6W0A0JZmt#|RQ0dl0UJ4i8xN0}+4TXsLq%AUOShM> zTuwEwQPqMBN`i{h0IFHq=pbzhcgfq86SGyha^~Sw z#yjRF%e+UETyGVHBvm8!!OGUeD9#zx!1!brC+9MZ^QR<>4k)gbL4}5A$cLg!|5{D} z&3>V{@%x9O$=W0-s*^|^ra_MslnzABDozak-5CGHMyUs=t#63<1mFunYtL-vzw)^R z`o1z}TgOFLcd7CW~|#Bww% z>~aPqx;Rbt?GD* zlaDp(woZ$-R>NP0JqW}i%65B2d2LxBlu!(x!$iex9boKg(R^HItL`!S+3}6#k&NrZ z>_kXzHi{TRBeK=u!?!S)lAqQQ=k|_FlUx#!5)rA18h5TLKz9?t#%V5GEfnThaaWoK zjLv*Bz<{z6Ad@5jr_9v1eyhV<@ypbK*a9>l%G1XL@`lpj&A}Y)&|ISt#p-(uUZ#dO zmmc@{iZ7$EEoev0&e+PXNOPc2&11;Fk}R`1e-Eq??+E1!rTQSdIhEzpcLZL$c~-Bt)w{W~fdmE9Lg@RUu}iSip=byb*@bq3MvFes zziRN+t3AZq2}&2*;k*&U)}T$HQ{7oq0!gE@I>-M`N*GjP&)UQ!e*=lj~r^3ivz)R|qfwW8X(~E707%;cNA!ER`UAf1U zvdtZ;mlY{R%_$W$;7g^aRr8@wT#Vs*_13FPW;4C4zjTrUQ=<1iQf5Gy$P~fbst=bQNbQ^C!!s{5|)BRc8RM_UOu<`oqsO>6&R(45QdVwbvSDEOxmtXLD zfhR$PNB28oK*KaNR!shaPEB&BY@NdyC$xqMu(Gmhc!js>2ot<7nOB!Sws2LWx8WTm z&sCD!6kQ3>DsUcM*yL!~NiWr){p>_v%*FSWf&+hlPi|fbCcb6UZW5F+Xzp+nk zKOKSdW=NT|hk9JLjcH=jx#>&TzkJ1@M0%pD2}Wx^=T1^fgyPS`3PmmI0+9j_nbN&4 z4!^cgDzj?%U!rK@STSHPqYkAER(Q1~F`>geIAGMFQm{bV6(^9_18o^UcY7Ce3J?W0|i0@k;FsVAx{x$Ryv@eR51z2v#K|NN`IB&ZmiAp>%TLf#vON zJ^@^|%}A{`#2NnJ)LG7-wBqaj0l7eiTnq7a5sck387OO6#Bzf4L3GBb4l( z5HvbO2omWw+Ed^SWe?X#X>?QXvUpkCIlDPC@+^qge){6#D87$-FvS;@Ec>7ieS=dJ++Wu)~H^i}C_ zAeMC$>A3YFB>Ma?Zs<&Bj12X#vy~!;FDAS;{o?DPGj9G?G7B%?9gP1lSPDOxI&&S{ z?5I7Ce}DFQf7JM{2QVW~&OmyqOCMy!^b5nKv&U!+hy72h_ZNsij-8&N=UuP&UC~y&dyBZe9La)`$^@=Gw`t#JT+}D>E}krH=vTIVA6jh>aa5Y-$^1Y^z6+4RT9yN zJsgept<{U40plbc)z|)QkXO;ZKBLQ~-{Z9c2IGZ(@%i1GAgNO>J)EyxQfXFFs-ROC zx4-)u9~HN2{Mtj;%ZIdgM)&Rgx`U4VY47ruO4l#(^&Y#EJKghfVMoXJxt;y>_7z4o(3_vL8hGQE~7j+xX7j-jUxchX+7akGo(lwh28VMX!ft{7LH;knPm`cZo`=}+S^)8^XIZ@#tZahW_Y-8a8H@X%H!KU5Y43@&GEMo6spcB$lXnA6Gf>2;OhB_NO z)R~Kib3-`_POR3dyKSt54Q?R__r5kmlRN;yTB_>9xM?$tBZURWk)~{C(-kPPCq0ZG z5ATfU8_%h^-2_yW>G*#8XYYD?`T?9lW9%umIW+ZB0i(8G9r%%SK*GXV+MGV$GYfV! zz`u8XOk1BMjmb@Y9{X_9un)Y<5cYc(CTwgk?96fX@#yCVcfwd$t8I09jSMt$LHPuC zSx7MKr~7Xs&+Crr;M5#YuoI96j)o6?j6a?xA{)nZ%a7KtB!yXl;)7@$5p%Ni*O(*rM?FM6bp_Sa|}A!I3X6m9XW>17e(%sqc4n~?~iHma3?$l-=13< zUBeR4h70LmtJ+Tb^HCumB${6l&5x00%KwJ2S6xj+m%^JZd1xVRg*dXn?-T=He1lFn!MURuJEIqo^+?oUi zzuAIT$77WqoWvq-*fXWM>Q2@@Mw2Gt6(E{6wo6(NEYiBF6(ffq8|8WgPAYgO;ssnI zEVTK1g=lm(@);cK{kfAlrSVN!Oqx_eGKX?f2T{G);1}meFTGVdvbB3Rk{>pw6_gFNlq<=Js&+?;#LXd6^q4Akbcv zVNl&$nZ%?~A`WKmTN#y>;D`6c>cA(R^OA~w>f5M4s}nRRs;DU4Ds92mjtHp&!|bE5 zf%b7NBna%D@#BZ6sQ3<(D=d-NrTMmpXIM#0 zblIsXerre*+@JS1*n?9oYu?o6wjy1QVRI?``m3cFPvA-qv)O_)6Df9W$--KmK|kkO z2@bkn^Pn%HjZAtT)3bHt%6r^9%7Cc`X3~JICM%5UR%t#huIp!#DY|xKA6^P#FQV%! zsHw`pt*PS5BzBdczfIP;hE3suW4lkrL_|P6sGUlELYh%G0-D+))svz-TYV#)=p)!d zI$p2%Y!k7Y-HLSa$52foFzJ@;>6`b$dA3YaQ`R8$Q|&$| z=)@!2%-TT_JeRUzfC3uW%KeD3NiK)cYcLR%40mMhjCWLHW~8wn%wffnD zAg+f$S}3BFs3~YBQUZS^^fCq@#Sy8%YT_4_p;xh}TtFL>h>Ucbd^eJL6+-oYcTh?z zmzMpKlvXLD5mkoNWU{0EE0kg=p-TBd*BTS-CxO09ncV~f&W-u|e0k;RiSk|ZUqK%#UK z*}iHUaPDivCcsl@d)z=%)n*$?1ML{&TzPu8FFT|TAB{ez#t!Sa zlZa}!xUv-qP-aI|-i4^(T&A?%p`6fwj2ZL(D%)Wqe)jOsK{==TuqTXeVj$WBmOCJh z;*OmJCxW0zoMc&QG$r^q30*VSrJoMrfVJMosaUP&fcE*H2H1G4JUK_o)X+)*zjz3wp!QEp-L_!B?Ms8pd;mC zRV+HNn;eKQiUk)=>-(Mw@oN1u_^9^lUwPO#P|G9g=RSgIiUkesL}d9jt{sp9Zg8KQ zYq6Wia@2)XX+sWfwkgV(jaJRYrk1TRk|&M>OWi4y^!?i8I3#W239gNB%a zT{C9NmkYX$iRefWx1t02e|US#pt!m=T^N^O0fGeA;Lt!f(6|%a-QC^Y-Q6L$yKAuE z!66Xbg1h@+-kJH%bKaVo`fA?ioT^>D_s?F}x_eizOYU2l>N9+S8Zzf)=|lY*1@TPY zNMgokCMmJo*SfHzLmm&*JRP7+Jry_=w<1kPIV~rx<=l#hHNcuC(P@u?dz2A=UGA+5 zk|4I`%yU8DNCK-WL`n#9&$y#=bCW-d-O?RnsSyc}xMdVB2V!>O#)^13-(I`kC6K~b zMx)I`Y95niXtY%R_Vo(_r!W~8y1r-QpwEdlt~c@W*E|^sS0`i33r;<3z36U=@2fPJ zS!%U63?LXa>iP0CLiJ<@E&oXcMmjX*74N5s+kg=vdWR+=aiLNL!7xz2XCN#ZzwOAF z3`n;;{3g=^RAsN`-*xyvuvsf9!~ThJuKir)7Bz`~m1+q)JsUSmNdoR=Yx{+8nM3@; zAfgrGfyl22$j&A?v<8Zbc(<{r zVCFUk-iR6#&%I7Q(x>7hDB?OOL?H{cd#)hVne*ZV`gN+PO5VsL3 zo($%jCVPhy0a{tk@C`*jPD_vstjW!Ds)yXmVYpIol#oMvnAlU?NKSzY+9IxKKTR7H z^^5!%BDY_uR2jsx5H@BtD$|0Rl)o^e1ndg&4J@I8FG}xnfRJzfFFFWn@gXYrrt?6E zxsq}Ak0bi5wKs)_s5Qd@473UXjxV8#Pu^lpwdSF&Y*9>eTppRMcHbzHV`tF)xcox# z*?x3v$@CDyCaobfymluGhY+L~ZKlTkj7z0|it6SM4b9?!U&7Pj_Vn<=UhX~T&6v96 zj9dfi^cplq%D%x3kK)2=2s62Kt3kZXA$s3Vd;!xZ>)s}1ba-#r^ipHV6E1_PyBv=~ zVf8+#wVweyVuz^8HyOy;az!XkHfxHjl;hR=ROvn3l8c*}T0UtY%M^YxbUx$wU8g9B zQmCx3U;fQw?XXU`v7Wg0zP$&Ypc) zNc8V=c)Id542$&1B^Dd_yuORaUAe6V&(M@l#=wS>6q~uYWC9(G{_Jc|Ye*iWFCivH zjD!}2A0@`KL!o1l+{9)q-%KdqLB4V)bl`*r6JK?yb7#MeW_~YOXFp#E#WXD16ymT6 zbePtY@?RX!{Twfyq8j~ESFYe7+eYS8v#ZSGnPM$EguQB0+48;|q*WevH_DP0Lf}4X zQ*h`omsh!PUJZ1+L2TMA=#;q$*VK=>{Jf!ClqZJg#a=R}espH4%A38?M`IoCEORhZ zmcZT|uj-PXi3)b>c3TZq+YGbs=1~gxN{CX&=J$Q*7xoy3B?fxc#yD~Vp(3s2NdbuN;FNLSdC zd?8j_m*16-=54_1Yj|HhF6Z3R^r!$Bt3e6t8AA;<(JJ%=0WmW$D^HTXI=LJ-6AeQUbczGMn^KQ#F}z(^DLN`JHGS}6{au8KW6&adF5#dV-^&+jPm4n zuHY7a+nLLnbpE)mL1EVmlS;VI;E(dU!C1|9w1PQX({ZA4d;BQJwoPF0O98NtQEY0_ zqH(6kpjossm3zhEj3!joCMZ{-CER||6%B=_4&auO@yWC#>mJ9n+(mpQKDaSA zd*d`dXiS|R9B?WU&8`y*{{%S{`Jr!h4K(B=A4P15Z|CzxFiG0L>zXcat=8Jg*;3I} zKF5det-fuYZEr{bbZU3vjqBjN;Ka$sk9_ErppTe8Zgx*FCvu-Y;Gof~w%oCzgsN+! z)@rS;O|AehZFZ9wiPW^aJpzuWl9);w+v#OL$uvfAOyIvYprxl%`v%ukymkGYWBM|t z&X3GpNv*+!2{B@Zhc12B$xszTET_d0P@XO8Fx#KzBV8vL6wy>&BE?@gW|7^?V zBYB7I8;VG^IssXo=cA_^0dG1SY_5T@VyU<{YLu00Z3zU#2>OGZ&FkfacS{G=$j(>v zB_2I0RYJtxvC@8S{|nwLqsLlzhZidUjwk)wNO%!!2LP9ZSaFQdGO`_ghPTsf4UI0J z?V~Mf{|&VI^_Jm-ysI05oxd!#dvQ;%`*fBZxI^gTwj8Epv~Ps4yPXe$(wqHML%vX1c1tlspO1rv|uHnL_zEZaCqo#_X^H{+}g5%<`3jb(b-#iS8mX(sfGx8Og=*Rk#azss%(b_t~^j3uFla{=~MEi*WW-$B% z{FINr#C^UWulwkPxx&|fw}K)X3zL%4&eagZq45JH>!}TyT8w1z(C<{#A5_EdC&AU( znZ8~BX*vAlQE13hB=@TwZCPX|Ra)1(17ISPgb5etEOr!<*u-DCj&SG&x4^!A9420L z!q)U;fwTwjsMwoW8qD<1Y9J$n|vo<2p-6Oq21cu#<544$f zZR?LTADwINmRtigc3CD+zLe{Wb_U*Nr#La{9rVy}$aEA&qpF{LS0*TiEw(~fPzsMB zc&SCsN=&O*{jNK08eC_GUVs8x!kbz-oX3$4vk!f@uvfNA684<<90?$-Y}hu{t6t~C zTAt#e0P(D`ekMpYH3gC6=HEs_s?#(O?F!O1_}A@9Vcj`1m(>K*cw$GBc3-&MUusW& z(5J9%Fz<2;sTHvyqyLY?*+F1{a*MVIzrh2j2vui%s>_r5DP0K3*cR6cz*&gva_%<14%%v@7_=j zFyMda2z4=cGIn${w`KbG2POk6TLUI$R(8Yp1CYK6keQj8MIQuWHDP0BH)Lbe2eFwL zaToyJ8^jo~01V#!q)aYOOpeA5F2*)4|9NLLwI!EgW&I~#>0i>(|2hr;5F-bO{T-2a zG~RJwVf}YRz(8go^Z(2l`~M6Fj=!-USimfdtjqua8wm>s3nL3V3y6b+6#!%egE+t( ze-94-a0xTNn|;~;X+QtsbpG#r!mR9U|3}(=RZB8}7xh8D^Y8^)r{76-qNPdSA|ej%g$VS08GPY3I)P(&sCcMo}tO{0f(t_|Yw^M4P! zvrfBK_1oG6B|blm?UlUtPp+?by{-Hs>wY~XMFrthlKZ{u>GTvShg2g`5F!UqY*z2O zasR<|O4D){aoVlzzJf2?XRG2VCkOpt%aT5pVDGCQinAkts<|Tue84A_)%iyEdv=>I z{S^o4b8)(#sl*ao@t|V*&ZZH|fE^3sgJ6#HJR3Sm823D40vC9Fj5bx_51r>cvhO)= z8*XK;cGcCc77MUGncTP3_k(W5-~%4aky;g29>N#?bZ-1}7O*=YsexYg>mbG-HHQAE zCcSS_Upz=H!RZ9l=!%qvUD1eH`ElX2S2zgX?dj!W{Xwk{5Yt9&q(g43@_TAWU~;0o zjOoHOmTM*NeUdq$@>fS0M8OU7_J_MfGS4U4fapOH<{WYu(#s))2b22vs3VkH2<34& z?Y`w+XtU_ppz(XidYD0X*_M8IY65nw+o%vSShwvWl(l>0mZLQ2Kv~`nry#SaF9F8z zx6Sp%OGbEwC1JMgwG??ZrrtmtQ&I)tomLkA%)??;O!Qng*neOs?cB>;6GJ61E308;5 zxrwph3oE=BJJdQXeOly*Lv0RB9S`oh??X+E@8kLA%R{E_>(dR|cOBxqtchDzjrK?`1jZn43$G)8zA=5kd8BOwibD_sr~-WaPo*KfL@r)kP~!)3gtd%z>Dj`9RT1~xlH zX(X+)HfnvfxB1C{{<-R>P09R4+VKa~rqn!}$ZPu)CNzm2NS9OLtsrS*kuCW^m!9GhDjM4f;an#n<>A^`Qzn-hPzlLTipPB_=tb&PXA~ zQu5O}$IVJ=o1Z?E@@3H^NJ+8>Z;(cRD-tegIF7(2s4eq@Urc!*tglV(_HWccC@5 z;aSFT`V_j?UewNhk*kE^lOdm?=ZH=!?t(eIXmFvS`^t;UTNuf*c3#s8h?Lkw(yF_#7DL; zCpwqoq~GJzQ-%A15q=h)+|r(eYHyG-qi|O3XqeK6inpEeVbX}Si7RE$-J2bnkO>oG z%Q6&-flfh$I2jgMLDL%VfcLGewaC3sDWfK7j+jw46^ZG6Jhy@x{Pt3jf38AaeVT%* zW74N&GR6|3h~y^G){h@hB}qD}KSv+w3WX^Z8j#?%Rn41HMX22c)?E7=`X+z3hzlv{ zqjsni()^L9Ka`v(f6H9PgB6p>O99s@!ObR&q~oQf%Rrle@05AW+lD571COi*UHe_5>+d#;qcH%wBgv z5?#TX>WGkG!{F{767Kg@z?Vu?wx2N)2%YuE)*kw4I zjA0HtIfSh&?(gtb(?=E*A)rE4R@I?kT($TaM$NgjS411qIjDIw3`IK*dLqVZZ3>nf zHL;<;Pwm0ULi)5SoN))GV?n<(-+HRVQ2it(YDoR3P^kw|Rjg}nEx)lw7rQd=>R5d)!is%!YS1HTBB2=ZYKnp6P&(ecdcK{DJ^sb*@ zsJ^RT5Knr+ckC7!J65<5=~n!yIb$phmmePIUuz(Ax3Vp*Bd;CxbwbNKEg+oq$IrMN ztY2M}v%g`!J#0iCOHUgkOkS@4xvi$LhSDLh78~zDxl}n=mG#HWN--UrAxE@!x~dZp zJ%zTwfnzpyK=A9OY8gtEt$Y5kz;Az4W>tT;38SrQ98ua~zlH4u?{N9+XZnhi>^ad! z|Ih^j5`kb3)2rzgMJ3a+;@AQq9b;nK)(A$iwi9gjtSuCMe&ve8oyN={;9@g^3J=Fi zM$tozb#nBtn92&Gl9tPlHh25;=eHxByCRyT7VYeCzdkv}G(vwpgwnoQQ2NSMM|EqV z_*;$W{#Mk6Wf9^iMh2$R4Uec*9kyp1T|hC+#-Tq&U#~YWp`fx+bG6*NYn4J-RT)Av z;JM1?>Lhj|V(~larjv@86UCNrzK5iK=(2`rpa(4%tn;eg@kf+;)9GRyqCb#?ey~7my{%K?3ZNgb1W3Ool**4`yr7;A^lVc&`CMrsE5_p z)0k0vH*;q^jrZv3ba4h{mzt+6EH99^t8Gi7P5>{LiE|>^Fj4~CTvt(Fu_>`Dky?O- z`&3BO-vnQZ-Ab!yT|$L$sgO8H%)>KH_>!hG;@CH<3E58mI?ua`-T^OV{Tp4%r-{Xm zmK^P7kZxp>tYg`YOjr_Hu=+SJ#Q>N@&5l!oLh@p7k+$mm%u%zoUG}!lgQEV*WYrUj zxzPt_=k?Oq5==pRV&SKw_Aa;co!=!=u)UZXtW0@H21xGxnA=9Wlm5WcfV)m^`c;nC z^r5mHW>?EEwg@UgsY?|$+f(#l=3{Q!&2u?hKamCzMd{3+7ss%qV>70w>v zBQzC%d%8Q-d3F40q`_snYEpOpk)-xZW-%raeM&JBHG!SW84cPb(@_+Ny3}y$fJm6T z$HIs8T9z7W2mj5qMC+J&r75_n$&08cg@6o|25NNj)GpPS984S(DHJS1qJe`&cy*9pqv|h zd0Y9^SHC=VdaRTzJ8-5rc?R*cI+aWK^K!70X+!6#LbZu#cOXRXD#q2Tahv)0Y-bU> z?IljQOxoV5+I8k~r1h(oV3Ya7Wp#V{9$6b0=<#T1E4upD}dgjWJz&O5j_Z=@9{Z+XF^Cc+I9ZRB@fMC3{@uF;?qSS5KU1 zrC_%)pF2qh*m=1d$B{Wyl&SJqPbCxD76(+PDRs@gZJO9}bS%lkJcDo3Q}t9J>bqqv zhcD66%qnMd<+K;-%;D^kP-t-rJFg7lC0FHmArYnhI%@2nf6h+E^i||Sy2<@MOTK0P zuAvHy)}z_u6p7`!bVz%U^EAMWb1ZPp%^X1t3~$aNf63ow^-kcDcR`!f*Sv~0xCo(Y zY)vtKMHv`y_8!ifaiBrUF@CsUR9kjcs^VMmerUhbs{zEJlasOb%tD5_f0fFXL}qW)l!##urFTR{17q$Utg} zb-I8Z3i?Zjb7LMo3mvU9$J2qlha1JKtw-0MuZ25Hj#+D;V^PDne}0n(F^O@kPhy!s z7ccDIRQAVIfc?vy-FY&T#HV;x*`SZ+I`IUWk47C+%KyN1?@`s*=5}{!D5)fm!9cio zmxehg$qeLp=#ih|7`nX7aewc`uCY9IaoP1C$YX9~tP$I3LU6#7@P3*5O}G6{$KqC(De{gM1wsy8m|Mp?$VC!Vd%wT3lb|41; z^j;b4jBM|f0ea^l*%&!kSpckmBNzro>>P#w4nt!j4ih$gR$~x|g&l0f3gFNO>2m-9 zEG9;5>>L0C!~bMq9bEoV9F9)iC=3$A1*X{|y}e!bbmh>?H`m_J7EACp^nGJDd(@uMs||c1~)_olpXP z&C6YrnaFXSR7 z=O{wR+72)SrgEVZxR^{~*@=m7H^hu@KTYq*dB|7>_Her$I>lgSm%Pvh2X?M-6JN)I zMrz(d{y01lhrA78q0DCYDB$En@9nA(`gS-W27Grye-Q3I>aRoe3+`!Y=PyKR3tzDQ zn&LI2mqrhn1m&ClhW!-{K2_@`gwZSyZun_H*MDEg4`SQu_S+ReAwEh@pU{boGX21e z^L*ic37JvOuy1T0n{OD}6qi;oirX>U{);e4Fe#>zDHpQ;v3qG6XDo9sdg+>LlA469 zkZ~3lfGh+4TgYNKFgH|UATb-`y~X;7Q;rn^kJ29IQN**6+ib4n*9Fo^By~@S1lD1A zk~YJ*kEo6_7iG+T;zK0qa@X4s6h_$*@mQygUcV=bHe#p6!rc@0xs{AM{k=HIXkx11 zbXCW#Syf4$hh6SJdgpLri~AyDxbH)eyCEppW#6#*^mJ#X^W7FODm?Z)qaR_Ya~eo$ zOK1ZiHai_@3TNQvifS&7XSu`3Grgo82+{EZ%4;sT>4+TzQ-orLB918=H_f1A1B2p1 zHcCHR0E$`7KV7qa$vxW`z##VjFisG*30?ghGzE{!SBdn8Tn4K{#}Adi=~xLmvfo{P z%dg4x%d%kZLvQ_0Ir+q6=AHtdVldH9J}94iUIFiy+sh+D)?}U6;IS6IuAYy}no;Ah zL-LlULJ=RSwY$ZsUm}Iq@>)KuziwZ~4$3{2jJ164q(G&MqB4Ded-3xggT^Rh)d(Mi z#JPmP#3O;lDW)?Z3L}7q0-zPsc@ZH*DFzYTGsS%Ot%&bWw4hgC=Z|693EJq?w^502 zz;mDlRZkHYTrB)Rvm5VfV&;cZ5=BtVhBU*1E4^O)hC)z0w1R(tK5I+%9u?q# z?=tw*(v`$~ZPK&*u}{>+Nc)`4h{ubgRbkjkz+Ufpl5sydHitK7cnUr+*zmL${G*un-l;-l3tcvn%`g&eaK$0F=X&F+j7{FD` z!UfmTk-!txdHBQPo`g<;q`jtz;26RF(U|yWQv;$ooGRaT5(h=|mm$c8zQvW(lg1h^ z2myONR86<4hG|L&j?YaV3ouGqy95CTQVSO5S_F) zp7?@4BX4h_+k?nfVnz!OLf|Sd6hA>O%Qh(4Oe?IB@!~@(7Vig8Dpza-D+PR$Y0q4N z&qOoD)0zM%EP{5k^uTm^T4=>qRf-=1(jsLfD4Pls3%;p?)vKiLdeL%Ev525NwJd+I zqB34`4kT?vi?gmDd=U)#bURBAB*HHAE0mUskX6>89;9a+GJ|vsN}0a>Y{`MBKAYgy zd{2=fsCe>0Gv7Ui6dErLINbBGflNr0^@kfd`NOxRun$6}?hwSJ2>Ud{P@1P_X~2bF zoaL0)$v@CUog*MPKuU0;Q(2UbK(0-rwPd<+YZ_$Mwh0w}?mskj6S}*HRFXGwkstLK zX;HKWEH@24iKkt3!PsZP!gRw>X`5PVoWj!c0FaVyJWd8pMY=U3GB9`#-2)Fw!An?pcvPcnXCf%-Wc^Rk>J7-N1cS(Ys&P%=@8K(Kt0eLC&~(tOe( z3v9*XIJ8gJr*EbrEu1;+_cRN$WMy?n1Im)K4qCw#wS#yk!54q#FbJs*U~NSK8U`fZ z!fB$E`%y^+4)mOrI1I4>ZV@)mqmuJr;20;a942%o(-X(Jp1LyhqYlsuy$TuGL~=ae za4Ts%HNV8LYr~g_2%ZIzqu)Ht7C91fJSxoQM41*CQp$^96NtyU2`GThgV2@aL`K%< z8gI454N~6VfW;--3v<>%Db6GpYx`9xsAGya>@|K^z%dx$1zV_;u69%VL5=i@T{tVI zyc~4M9!54bPZ3s8(o@3oQTPp3HqpWaUm&-93pQRBdt_QB> z#Ba!zVhF z0{FLD&8RX1TYmExR7WSQZ#IT}W3;3#5&MO0z2hIsIWkzeO8cocCt|t5JQQjkMeyVg zqtirE5%umL)8IYkR=L%Cb4p5~nfv;Z!gjLxq@<~@p#C#uOu1RiWC~?MT8u3si6Li7 zWve075p*sD-eZK!&z^MjtO%x6gob+&iCF&X{m~i|AkgMsS_=~5whVuDc~*7z9i9@M zcTCzX@}&0c2W1>tq_*g4@}uz>&AB2Aez6HQ#O3r2{`Q$Aj$?ivT!@~J+WuK*`1Bz* zUKR<*ooUj3T}r2R60A(u6)&+_32-cI++aI=_04o3jG3O!4?8QZywH#RO7PRxZzN)3 zTS<@%V~G)V6cmzMm80zg4OKj$Mlv6*+a3`8r?)U+MqH+wh7V-%J~ZbWS8O5-%;{e? zZu|KBd4@&gOp2K!ypC02i81YxYdEGp`YJTGCQzb*WhONb9#qgP0w zz^zQ*s1l1QZ<9QoQd!AclC4)_6F-Mhsjrikepg;ZD=?0p2sS)YI}nh=NA7F9;+wx~9$(Ei2e0J3MGDRy~Z7pDLLm`6)J5djqtj zlA<_%gtyKi_@W|co4hUq1nEI98IrUgtGM}??+aX8HJR*OX^6RhG--j>gU;4)!$|XH zOSycIFA{IMXZ>0WS5_U*w|HLVaDSD|U}BV7N!h@z?on3NcYv~9pkv<|p@;(OzxiZ# z_rbDc8wENYqVH<~2rjE12B63byiOmXK8!i1VT>hBYnz5S3( zCN5QHZdtk8f|D!MP$-FXvsi&4h6i&}Q|q&y-?gf~9zMJ_%wKoVWe-kl=?GPqk@0fo zNhVbvc5(>2N`Ky2bTzd@1$ko}xh7PE430aGySMrZs-9cSkGWWN=uP&-EagEn3*l!U zo)`zursMPPr};Flw*{CO=VDXLCh)Ldl~k4JG1smSAf2n7dA%mk;KfXKHhI2xqvE^_ zM8Y#kEHGdeg%5zEtFw466p_xw$ym{$c)?UfHa2i4#1 z-I*xw+OCSRAyu|dIC)`!zBl~B;*AC|Tmi zs0{D2FV|L~RTss2aS>mv!)@+ysk=KrfpzFb&05+Z9J&d_bxTtn+LgecGQMklr-M7W z=;hlg!b>NFQC2eySJ2q}`OxPe@-iVAHsaJ)^mEO)}V-&SQ#==hqs6Zjs*T z*vj>jO98k{FS0%uZO@gTRGTMzlDD>6Lv?F<s*wt~wJL4UjSyK~Ix0+pbu=#h<^9z}%@vw2h=0 z!*I)`nXn}7zx~1}NfDM4dxh&rKPM(k{ryVj@hFgMfl|Ji(K(<7`PsztXE#{= zfvh=RWL+OFvcnUJXP8n?_sDfGMi7HuwJnb>wi!*xE+8VMFj^pDpo)6*M02VuM_DLz z&K_ozw~00zMHaxsKEYQMfDS|ezoLM0q=7e>lfN7xssRF(__sqpH7B{#n5i{+ zHD6IZ<7M!pEE&kioOY$*WG~zz@7S^wnv{lil7otJ*0Q2BuPSr8kuU7lBHImu z0+)i{p8E=E!xpCWFHq}<@i^(%=x)vP`t?H35{H+R(wA;&_kB(}&3f~pv>Wwid4MvT zTk~eQN-Y_a8xwT=;>{FsEPqrrSN{Eym0g)!!v3`gV)LZQT0<+mIV_E}J_eF2+cbTD zny=j5cQc*#N`=TdUJ41-@LZ5l&6){rnk6l|4Kas)ERHzjHqUnDtch_=EW<3le4n1U z_o0csvfgqhUWKjdv;5xtri(%{Y8RiPvWE05?;O@1;lzTE54*N~)rq=L!+iUC8S0~F z^FL1`%?!etI`}JdaV?OyWj|dnVKSvJ zOAW_%vhvg7M!A0Zel5VOv#C9*bq>k3P)fAs@V3Ey;f=O99$%Di{uG5Lu{(Ab^``Qh zVa-(e7}z9b^is;0sb^;0&^Fx(?diP9A14}eGF@aPQkm%UFX1_r8dCavp zHzA5Vl3Fan6&_oDLiubE@&0@Z-d>mqfsMh(uX0L@ZlK^ec~rn>H6#6V)uns8Xmq&7 z*apgvpCq=w#vktrj%EdPgj4WKec83!iO*83&-T34YL@p+@cC+f^mO4wsam@Adk_9- z_UykNnGugmIg!25maF})TrAc6YIfw5Py$_Uod&N$ZL%7wiEiz!WAP;v)ckYDsws*O zt|tqvkzs0gzF$O48fDfmy6(mYSw4mx(FvB)az`G*Lj5hr-8kOE5itv)o*OLls&+0f z`7#J4@wMmk#d(6T?}6_lN%Py1n{DFm{(Pjo+Ky`cHa6gPW+{$L4BMQ#NbXG!`UkT( z&1WFTt?pIJ#v>(I)}3_gdEVO^i!2AVctPUuj03O3?~JU!`SkJ^&JuM>T?~NhQJ3j$ z=250H5#ej_v>bvY%wPczl^FwH; zZeKPs7O1a4X+dy%Y)?371O@fH%sAj)#Dy&YSJ6oObkUVWlT*Net;ccNy*dAdTWva;o@5NEc$*&S z;hC(n-h%7$K-j0oPZ}U=8jMQCtY_CC+?uvy_J6D!`=i#BT$JD1AcMf;4 zjA!L)(3*w z0W3yf6EHgmiy<@6=v{eX1sHM|v9Pl z;@|&PSKg&1Mpgg|JL|if`@LM=Yvf&9UvcBsf%;3MVUV!Xu%*+Oc`mA6Ov$3I( z2@uF)WTX!=OjyAGDJdB0TUnXgnAkGeSvi}U+vr-mJK9_QyV~?m z!UD&?t|9&f4`#r-h4~*p9~M>?Mh*ZQkmOy;V0@Pv0BnD!!t#F)j=w08|DC=71hRtu z8V5QdT#DIgzVPnOCr*SI+x3PB@y|K<%(vS+0yR>x>GmI^z$Rs}(8P~z*dh}=7VsyD zSXT=N46u$UBq{AT&hN7Qh;_#LTljdNkNm-ynq#(RpWvULN@`FK-p$c`oo^C^-EVi$ zw;kWm1iw1pKXin~qUrV{#IV{{`9Ws8(&JKo(#lH%W;y!CC4BC@XY%EEhEXla0iquV zSaPg@qc>qqIb$GMH!nET#2*2ZJ1@GFd|9uM=ihnz--ow`ks_h?F!=V_rAC7iQYr$G zy#_BhVTvfX2QzCC*9oDvRYA=JhJM>%x?ElT z%VjpN^w6T69?{#6@NwNE8r+I>l@Fzqz_@5GCMD<#MBxpouf6>q1E7u2Bur>I*iATIrGUVCy7|U0Lf%~jcQYstSZC_JT53zjpFQPFP<|A#sx{+kH*b30NxOr#~=g$CCd=>0J<1?M`-vUto*dSGt(V+&ycfvRA^GX?4^ZZlo5()avKfn2pxFoVY$ zw4Ak`f@immqa{t+hCx&?&!Ya$CQ1gPldTgU{5Lqj2+_@`rNW|Jzg6L+Md$~oH){6J zoAv!|8X)nXoi0L*6_Af^T|fAK$uJX>mq3={JwtR*4r30+D&iVtd9LKCpoO4SM=OMje;ChdezZ1n+qX?hTZ1^hU{N9olwYAG^{6O#j{-skj zsGb*d!mz(}ccBa23+$iveIk=11p^`ind5@(c>JsvzXhq>i(wNuG4^{b=V#$B2rSUT zU3T{JI`@7q3p`n3>VnI))G?q;}i> z!{v2+E`>|#2X{rt=E9MtNHL@SbQMa(+1gZ=+zjJnB{~E)1j{vWfmbuMF!2*F?s7D( zpW!0Xsqi4DwEYoBrl|bpP@sSp)7A6LQE0xTThJ>uZ8$KmpNg5e@-zs4a(!{>kWx7}YM{}B=wy!I_XPj|#;bCOrQH=_BzRey;_DGGG|SCRj1T_lS#ySR!SE47yDM%*AdL2`-D5 zG$-~R6)%CpR?;G2Ou0U{^P+RYWEbq5dMq{Sh)&{`QQf~~{SKB`-)@J3ZPa<@{bF(N zSDEpPup0UY0UuqfOe^=Tp+#o3_lV&kFeD`l-i+%K)dKhWVqa*ctTA0W>b#XuMjw5u zE}l@Zbr56j3K-$`8?;4pS)c7eMnlRXV~V0(2`OoNhj}p^g4C?2eFJH)`kQMXg%C$< zzF#}e{I_nVwD~zilL+)+9dqdp~!#&*``PD*fLc0U_XW-m6@~ zqiPnJ0Ko<21w;`{^}D9%#2wPlCDP62T|D(4C-_@WU24UMpnaXJ2ZQ-o>xZpR#H77SmN%uo6I19t=dH2|NAm6Vb>@X$$}_U~-!#9G0;xtr-Ln8U}ga z)_5&izFF|5mWbQ`QBMurneAh58zM@IfuHM*)oXVz0&$oe@H?Ea&7Gp>Lk^McN=_f} zOOt9-u6u>)-d|Nor=X&7=($i)mDWASp(VlXnmuYznj#k!bSlf6Zo**0yR}4Aw$%QDqN>}BN{%S zRdeSDrM}{M2|>~?2Gd!Lv=cWr2AzpFWJgYn(k#;yLtc2NL=)biB=KkDG*p~`*;GQj zhMa4!S1RHwxzeAMIF4e!bDGcx=PFsve)ItI5Yss8OF|AD&W9q#V!{Hn#>GvhR-j;P z_E5F;sDXJYej<@1@BK;#Xk69!wdl{|$e`~agh~PH@(3986W$@-^yJ`gxmxt1f&{X* zkODMMjR9#ixgC}Smho;=RN*~0Y6M)8IiY=AJvArvRt;^4t^T*BL0X>Wn67S9#t!ii zq$P?zv0@b6lkaxFl@ZAe3_=l5q7XO34is{~Qz$hIvZ%vGSzCWIr&B>-t_C5~FRoTM zb?Ohbc-kA>k_zKd(8d&OQtTy6QBrqc*RLN{M}B$@IU{uDPncW~l! z(LUx+M*nyjq$~WmkH8pxnt(_#!l>N>;e8bSXHB~o^7pRuS_SeXjiFx!{XhV&h|;+L zioNv%dD}dQ3UzqsB;b2$i2ebf3ZM#$wg(Xl8->UrlNwo#sBG*X2R|^=OWQ{sr?7xo zYZh+1ylP!3>ZyW>{ekDh!LYC^vaR2@g0m<#?}-#iZSD!y)b@RUx^JmuG^=xY)@IbR zBT*Y3OnOM}FCqDx4XEvt1xXcsT79pP!}>;2YzM+E5>QKqDhLN&=ajaq*C9z=vx>7hx0t2< zM@1Tn+oPCzoH8z5wVz`V2?5{w%CuT|9!tu|J;3Cyld7^86%%p_w2%6I%%e;X_rx#T z0YZ`Q#;b-9ti`LNuwT$>zuFW{)1{Vy$3dvSyOSxbw7uJKEqqt6pQ0SPir9xX729G86_u=K}b{j3;ADs3$ zk#rH3_9icK>*}rANQSa|T%27*2BecLEOpmNZ0}hf_2D7)ox-Pb>JMT& z8vZ4-z+dfuB2Pv_1_8U=KR)+T_zi=1k=fzbl7#)*`Nvm30gH7@mpYVW-MX*N7EoNU5q(X1jJ}B6J(YxM!PCt5fYeqXys787+ zZWA{RL!YZl{VFjj{sXXa0H)-EKC6+9P*T9yF05v%=vkZ=ykX9kLQ*?aFu0S2wQ?UP zNQoOnZQm);7T@S3aNUrHOn@bHQ`)H0sf#r7JS4(@@i$0FFDq~DN>As?NN;yq-Lp2) zNuJYoO?c#hIrNmjrEc#Js76r1#CToK_p(tb#e!S^bmkM$qR_tEdal&0iXDS`AfAwO z^C!#A#Zv*M#hMW;(|4I>>MkWwcDy#Q9*fuzZ)#$9h7u{GcY*Ml6iey+$mpRFticI9*fe(7bb})qJO44(<}d%)viN-JRwf z_u3b(UO*I0&sVE4NfIJ~F~gTz1J_vQlx*J;1h7pc!~ zS3OE!H1-{NO;&h1Sr~(V9?~wmYOa7r{GmmiJWWVml4^qtC?nBz7V%+k>(v`joYpsZfOy0XlbKwKN|9?hB*i?K9^YzCyvxfY6qnf zT$v`z75ld7XEtsco#-lf`(JvOb6qI4aD7i{%f^-YZsn1Yk*3$Cls5K~S1dS@TgUTd z{eLm{PBFHH3)*hkwr$(?Dz36^+qP}nwry9fvTd8I>aV>|PO^8WPba6lFaEok%!@fQ zGA_pVj^};e!mDTZ(Zgb+Aw^m55~AA!?z5!5e)A+1gqczcDM<{h%C=JU%qVe)HS;31 zjPotKpr1`YrfEVG45GmvP z^J;WgC{43sW~^l{%1J&5g+9Ics_C6ZC0r17+mAK6^qdA(TxeDZY zI{UeStb2;4fHR~C!KO+0CcHyy*;ZAtJt2(^P|S>Az6@4G4cx&aaT_O=$ubs+<^r+d zpmxML`|>-miC7j!{F3YFWb5UAvn9K6x32F8{@%BQ>GSk@nu zaG&9CT^Bh%6FICbA=Hdb%Fd02=AlA&M0Io{`%6;Er+%}|{ah`$^~+8hq*x=km0$tv z7QdUaGTT+gh6~uO9u^4pH7#XmQVT0s>#N)pmNL{9(aKS8_G1i!=p$q<-)g#)>mN#K z+4dfuBd=k&RjhzQvnS$@8>WiKL^hA?DgTm*&2oB1r_yHNbk|aK5(E&Zz*9A3E9L~G^=To4s~ zMTqc!l9npas3i}TjHr!(9KgBcCla$vDw}|GS$(PKKFI=XSioBd;3zbL z=DBrGRdTfE=)w~#tM0$RRhlaS7BlQnblhNozsVM;62|Cj#RIsi;)y4SK`UzWx4NWgmmgU!hmPm>1A(I-VzLTc1NRu+24F>ApbwSN)rJAa zRD%whTN+4LqYnW@Q!)zHl8r28gx&FTtieP5CqXaA|0WB}&cH~={15K;Uy?g^Mm9Q@ zpCK)#|9gTvQwKZ4f97^<|1sYE-vol$elUMd4mM_H0wyMwA3*~H3jrJF&xRR01M~k< zh2ft(@c%Hi`~SWc{{jX7kE8CNoS*LJdN48){KzSO0`4CT0s#vr!~b)${P%7QQ(Ie8 zD`R>`eG_9dCu^&JC%ZZR#Vhe&C63H&4F8@uYH!(+w8Hwn=+?(t`!9r}Jb{7$dOdT> zc}o})c*FkD@+11S!2sALq_WeylT?WX4zK|eaQ{cidQp2gD?6DkUyz4JQy!UOFp+{2R&$Dc8j9N|Zk z-2OuSd;NvK`Mn~}Q&on^;l%3bdXekZxeypCO@{B1gpQVb4UpiP=18!BAUg{C%s?^kuG*U=DlIHcrSGxqHtcXU7q+1!><#(*p^Bc2Nl68zwe zzQiqX>kNEAx141FCHLq-0r7Rl1E-)0Zdwx1!EWoL2y$FuEI0hC-Udz<*afbd!Kzu#=yZGbnrg+6Q zmfaQ~p{SQc*Zt*n56pyHVyrNTk)b)s(jVviT7GHPYTVX}E zCHte%pYH?tP9J~Wjv;FAu3_TMADKEw&@B+uyL(J&B&wTO5xz|L7oLlqNYz{h%3+U1 zE}}aRxGq$$!nr-xv2@{M{h1s6Vvx}WAN9JoyS5ccG>_mReWmU z22ekgGYd3EAk!Tm?c0egn6fxy`(R7aZXQ)SG|aPHrC!qGm06UOE7N0JcRn9Fb$ed- zudZHt|DJT_;`zSb*mm-Jz21&)rRrX9Z|!6^geR)Hf1hxR6=0;@4zrb4v5j+}nobIv zkRQV@NUEb-I#SG>smSS6CWHTUzLOGp!|g0hM73a=kE z5Z8OKGq9tWL-VYtREMl{y1pq5$r7zan}N?dKaW_0m26LQ3yS3UA6i;_m67}?mKBaI3?l86BA4?jA46i56tf6~-J|AcdG;ZUsLm464NmThgY8RCL;z<}$$JXG4E z^5hcZ)f`d-=Lp!Xxz9`!Nd$}2Qk6@2O$pTz){u+s;_ zZ|x~xAJ`#mw8rCP%#nFW{c(zU9QZ7nwItwXWo}b)9ENZVA9Uy-P7am!bH~hV4hyV2 zVK}Zv-SHTf{EDc6kytAK$)|j1Ma|qMNo1mqgmC>@!L0K?CNEUwbeS7Eh|*FSSLVC^ z&;s{?%}{ELc;>hEp62k9fFx2f|;j6gIGQOCe4+u5y_{m?B_#EtE#1@ zS4rr+X0A-jKtYG3)&tBIgNGu9x|iP65*DSr7Ts9RH6_`)q5%peiUIv6!}I$9#3e#$ z=EgH&W9StL|p~rpr)(GB6a`5;l`G73Wr-G}fL5k0X*wXfF3W$!|tQ80<^E zm86hds3;{JF`^MDAK}~cP!Th;-McH+93rp3!w|M7q~FSL*j6eP@8(zwHdpa>fjLe~ zo_mdv0erUuj44$1AM>c`X?f!@>5%kViFS;IA&fQbyNyg)i13Ecuyaz{Zy&0)%zX51 za~_|=Qf4uy;4C3Xbr8LEeDd0Nd1GSkpZ+AFcIT|tKMfM(i+i<0jeC%N^z+~v;W zyb*m%IDu+KGNrG^*E76gTdWt+!oGprVGwD*y})L$3qgybS&hogDR((<<~Iu>jwhX> z{BGs8Ct|3mA{(Km%IHU`Vn-@a3zz5GFnA74rujR%KMm=~&xRoUz~v1;=?9jBa=2|e zgv`?0)=;yMHL#OOBcO5)%yUHozrejn77lG^0iVz_cmq!mnT?boHX(pg4nq{6j<|b1 zppWU=izOxd#ZGkb^pV)HFl;SMyR!Sa4BJ(87v2N}0XDw%xCCpNqNx6KSjge(jFqI= zxya-3k`h238kG}xmL7!-Ox4w3%p>}m2AzQfYc-y%@wUJGQ9j`|l4%)+f^O=C93!?f z9bjNR%(g<}T5LFdo8M!XenT<|7@Kwq5ZF?*EBW56n^{Y{hJ23kNQFiCA8h@KHHuTH z4p#pOmswIxrQ0a%Q}E`IbGIR?<{sk(g>~|bk854CrQ=TLY26%N6!$vX*;-#vKmj;F`4eDj2U#(lMo*Qi8=Q%RP5C!EEZY%*p_FW@L##|sKY zFB91%vdP2 zJVLvvIfp-KRi}A8Y}ec>B3rp%ISZGCwa=N!6c~XY5^3BUnUNHiZ@UYsXf$15bws&o zJW&q4A#?RJZH%X+%%kQlWmBSvw#^MKwy2@+Aq?vPuD?7Upi|n!F80SOq-b>0Q~d>6 z+DX2VK8OW!fFuD*)E8q@qZBd!+{!{R@Fo{~wio*Lx(woCOTNi(%lKjE(AkNZE4kK> zALVem{i0da+iaRgb7)CA(SkJqcZnke^Th=(zb;&;AfVUrH=Y>2ihXXX>Xx-N_Ar>J zLW)oIZYR!T=u0X4Jrk8g$So3wGdjy&UsA8{{8*Z4OZT~4f(#Y$fXc`W*su~sDzC@+ z>h*0(Vkp;ffJ3vX!LQy;+@!j<$VSGyV#sJ|L z@_e+g_z9+_SY|ggLdO%W>&#(K!d(-Cmn-KUC)^kKkXjh+&*x$9PTp#ZBW`T`v(m7d zA|3)dYjzX%@$>CqXeqvgaMEwdJGkO{eh)7nOx@1`Js0mU2dAWwZ-99%X5#-OGvWN- zq`+9%8R;0=SbnU9tV~RFES#K-tOP6^Y;;Wj=v4prj>i9~z3~Tf`l(6(H0q5QIhc(M zj0_k!7>xA|OgLE$ep=H;Kjf3Ki2(-(>%TVZEjb+jS)w*Ew=(``V8Z?%90Q#Ho8T7< zD>EI(&!7A8H~!;FF#V%J&BFGhe_&u@X8FJVO8g7*^p7+^VUxej(bevYg-8v?ubNTY!F4~oOvoysJgO9`xFQxb9t^dy$w6=NoY zq6^=y0{HX$M<225-yQP(M}>oOToCAA->J29TlBqOP7hzyj&El>>|T5%o`3?5Z%?6_ z__jSIXaOqoo_S!xFjSntAvFw8*~?ry2+0SLy?+;u&^tP~7hr?r=2?S-mgj%QKe7uG@+P9JBaLMXohQ9ySL+*~YMi_z*5(lL7!g+;9h%D+e+hLquFNd>0DO>M&;2#JbEL zci`zDWS5iF!^c4lD_b=3^f+49?Rpy3Zl#LFf@?y^vbk9|gH z{lX;_=o5e}1}?@$xXoCh30WBm2iuNAYE6=<(x3QZ%)N=Dg=y50rx^0R2HO2uSQrbe ziZ1LChmx1F4LzZ6M{-)FMJ4F`a2l>U-unmyMf*lav zqmU-)9w4~h4Q(EY=&gxfDk0h^M(LhQlOl8sT*C{9S@F)2C;!lqpI?~JOH#YD0>U25Wnz6=5xxvsHmdA z_wYEBq+;Z@EC0?M9-h@6Z}B3+*Z1EaZIh(*SX5Cm>T?6(^_BzGD3Ya&XW)Bqf9sS& z(seTPJYGE|YOBu_q8+lY{B+=dhc!sC&Bw>OUA{~gMFU5SU0kfPrKURT+h~qJOXF6Y z$8vZD$& z`xqhoXuus9caFqv;!-(-N6@!kjGQkdO4uBDB3Jc zq$$iaauW8yzz(e5|AQmQPKI3X_?JG1{+j-b1uuA4->w00=bd`GpaZ;vR5HN&pqc+g zOckv_l5<96tt=M=Y8wuwHFccf1 zA9G{K$GR-db#&ihN>etcbUeRdd9()6!%jcPL>{I4se`(|E$UJdw)hn)Sd$vUn|3v9 z9)TqV#+Wt6ic}Ht_9Et5WYJvZG~PCvvD1(6#1>kc`1A9@14|07IB<}CuLR0jHHR`v{hV8kK6yqwjQ zBQX75AGyJ<-ou5YCt5*59lM44G|pfY31#rGEDih{!va z5D>G@V73bCN;2-a)S0Dl^QgG#QwHWQO9EViesVU~x^X%}nuqTvXH5ZGkr_ywW@<>g zuo8@cEUHlSmz$=Uaf_6_@5!7fP*kDGW<5zFZ;`Us;ZH0!z61G1X@NMoarvPz8>Ls@ z(-v6b7`1`{I{!lGdKe_x(+SmjNRM9$Mi8PrDmoEYo{QmSMKfQw-U>^wM9f5r-w*fB zy4fUD2ouw+%DpO;W?82a10tEimKv*4G1}T|r%KfRJd9{Yo;mEGo5 zuM?6BRu$R!t4O=(9?itzFa%)s!0D*d;liU?>ek*+PunNQitJWu2GlzXz#caOLxQ}Ge(ip*gK?#j z!$zVT0*kJd0|P$!`xhvp(ZJpss)|5G1Z?LN`k91UrrFsLGZ~T2Gh7Wag=4SKKSB07 zXDR_%D=CeJ>qz5t_lPeDMUIvnuU4B-BLuDm#7d{2BRy3-DX!Ma8aU{HG@nZd7}*8= z(zwm}s{llJjYkz;YGzMH2@1mSzF?0MvVIAZ2$`og51us7smEphh^9^L$8sefIQ03d zNu&0MJpFx@W-#w!x6+$TNGF6u!-)3sR&m8hvbc?4x!+i~(XoATXbNabco!U9`<-7? zhi-iOVI4&~gPXI9gc`#{jMBPndXk6Hw1FFJC0SzE7qXw!xN6vd^jZca`rd$NZn;YY zR4vGuyM$rnH@*ZHhRg2|HqV_u9v#5URG#Qg5q^iLs$m$@mM`}2~_IxqpM@*47$T~jMitIhZ_w%2phQA zH-JPRke}Z;*Y#6VkK#>-*UyV0+diEPoed7X6u4S$GFr9s5oKbPH+N>+^JxAF5LM*TYkqh`9uQe?$_x)iGtTxDVCK3bznjdWbry3JA;i2g?Vz5j-v(Thcc$G&BBi?kT)7x3ivm z!<)Y=6WeZsyjpA6F`0jyUC`S*vQ2fl4JaGTQ@1cMhd8L879W;`(7`&h5%brV=Ayfc zYp&b4RlpKd&c9mndrhk7Bt=|>?Jx3g8(O<`1*}QV;yr-MRm+``^z-f-V5qNj%dajP z%<7DBZldEPN+}4-n@z9Hql#9Z>y5LDoI+YNs^A-!Q7kHPz!XczlVIKu6dG*_3D@eFj+B};a!ov|E$fRW0FkDzTSEm{e)5Y0%FRtY&;~h4 zF<{)l^;uXW?)-~7Qrh)v5|&7smAhe;YLM;rD2vJeg^&l+OH{{WR8D^dRA{FKH1>Dc zNwgyt$K*DV3$gJ{eBd9b+*{V47gk3s{4nY~_ohCC!|!=N&s*c#$Go1Ek9;#o(Va2X zY%Fz)7z;OMy2ZJqPSva@QA`;tq)MRQ43w5|X|zy!3I!sy<2$AOt*RMKV;72=OjX7Z z<*XMJXQoq|R`nNSKO1Gxqn^QBR%7_rcw7XI-aMyJlrq7T979Zk)rG}~QJ<(4V!H-;zHrO2dB)BG2En_NpHZ-lV>hcqD?jsCXqU9*SkNDM_a z*75fst5y_ag4?*a!}5*e&8ujpb3hrXwf%1}8T^6AiGmL;6Kai`kI^-Hc6yhaVBqY^ z)fQVKvy;u(4zuPy`8RaW+HjIvfxTpAaXS!$H%4Yc_`o(Nxw(7ivTYq+E6X>Ks;M_S z?;>#vQj4V-akAVFSk%?Pe;~VqpO@20oi%pBfAayp8T*ym8+&|gQTM^Kl-vKgT)PGC zRs?jfY*gGG-N&`h-5WK1y=ymVwKG`qzW{OZ9t@ZJpnbmLVYWpW2o+g z9I3u}=PAN}I#P0cI@S5c3^-PeX72@NA&C&{@O^r1)B!ywI`Lm;>oT8zu{>nEk&(7^ z2x(0hgStLFw`JM-u7bMDl2Ak_fJI567f>oP`-O#XOHrht1?clrtbaVow4KrJ>%bJq z8@Z=G;M>;Y2S~%c0;{`0y*(Ix1$wLh95sE!;kco?ML><#P}ol{ND+v{$DF8 z<9{HNetwgIz}CP5iid~(zlxWbnK;@1EnfN=7}*>_?76AkWrQTCsCvFMd{wH8M zE5zrc{XP&rKfKZ9a|+e<`8Yei0^j)ic!P}@yXGun)w$!-_2%{fs}fcsM~HwHMftp@ z+3*^2wT5k|FuLjacJJzY?las9PYf<(1hK=I##tRd_GqI9oRqfbnRj?QDLuZ?+i-^W z<^MMr2=igh5WkMJa(07uEt8$l^dpVujiB?9ll1yM3E2lD(|B;n^kF-ODCL}DZRVXQ z#E^-I{a0ORwXo|exn|1FN%eJ(?KU4{((XHfuffabT}T8PZs~RdIAV7sr#wjdEKw6X z(E&zvW4Y&-=F5zq|I60Hj;+JZILHMq8V5|ZQhEO0rw zoRL9>X^=iaK#53TL@b}LmrXr#JNryWoqsk%19Ble*bhi@!m_Vq^Eu`#Nyd@OCv5QL zlL^3cf(K%61MQbh1dxo6-t52inN}M8*|#3}tEr!FoO&^S32YHFu^qSTzUyO{@bhUK zJl+Kg?Vx|SA6^wdhr{T4^+n~Yjo9a?7uff=Q4)Z{Dl@n$PBBtG%QHlD< z1k4N%mO})@FkLDj7OFHzWH8oM*Do%Kp_h3w)5{|L0%0YAAyyP^l8x=*%@l2+2nNKR zqSbBQ6hO$f?OEbhxrwk2{w`Og&S;!V1A(nzyy^IMi*;g*P}VhdnaGZyNCHRv204EP zMT$oeV}Rq+hb}b!D5M!)VkLT3Up4B!eZ_KT8At>E z_ zrHlMfVtGo>m0Fvin=0&UJa<$M9sv#DE?+7QBMDEDV3!3kE)le5K&kr`O2;qe<+~@d`fR`n79_A&g-+O$ zj>j-|WRY86!WUe{?MRDA^V-{45bZNAjBM{J8j_usn^0_RKDL$F*Bq&9hrM4z6Jtsm z_mN>y|6VC{NQ7Dh`fc+tEC^NL*D3!TBDE2)gUbvdyL&w)te`bMz0vb6Q?M@x?&5f97eGya+v?0x zBHLgzC^-*jL;Urql66u;L_KpR=sk77Q0TGr9+GD$pw10}=BzV7yS+F3@~(%q!sxj) z7b8J(NihzlvEDk@s9%6($v6)c^D)$IreY_a11i+WDL1P0g!)AT+ft1) zlC8+9M~N>m_03yBa!wCA?rpe(2qjFo{{Z}X~}IWo*6MZ%#> ziDuP@EAT6^sja?a(iVOxcMCO>=fFuY4AOM;hyw#CTZgI>Sl6-Tvf&oGuP(9(2x4*o zU5PWxHr&6#A%h#4k(61I`_;R!buT6-u?;!WMPb;h=Fs}%u@P7r1RN{zqWPx2j zrDmP;ZLS^#knpiO*X{+N!?fpBMWCY@5v!#y$n4#Bg?%klJlJKJqid9%`m&lEP~qZ~ zcLGu*&CVJ}`sD_D=Cs1>PS>hrAR#H+f||r+Jvk<2^;sU~ZCi|0M?HtHx^#IWAQugc z?h(@`(Ow`v?1H7iLo63Vq&05$9_pXAwlaI_zDfz~0qFT;kGzl`X zW>RMp+6fE3O6=b4c~uHp-Ylr%c#lh4%da1Uh{yb&eX62uFU9Ge@FJL(-Dzc_Thi_2 zx+y5$tYN^4J}35u&N`PWLP4a^+vcLzh`q*OQSA1>7bLe)4dSf`DhBVR;6l-1T)!0y z?c1#y}J?aZ z5*6>Bb&XMO4*4k7&-VTo>NGhUn2)s6iL@K#6$wO6miRTW8y&uIB0tVaC2QPlbkmPH zD$clGHwR?>2b2O4tP2a-+KIBYN@@A!3RbzAmC1S7YlGKm zHYkzjWV|Pcs1?g?ETh-mgZp-WYYO+0ES`p6m9x6&o}2BhuM3JKElWNT7`SHT80`f& zwh3UOzYb68z*~G|OC`LogI{se8C->yeeA_8Us*pBbu*`lC_&~M3QYZ>7fMs#`UO`~#tMQHpTfBMOQjmH@gC(kb=aJ|ZN;g}y1Jq@Kf7q~H^@6n>ZvXAV5H zGQ$v+^`tVxF!4*#I+{ej$%FfZRG={46ulxlo+yRn#Li@S+zpkd3VxQ3gI~eAEPn^f z3@1WJ*U;KGqoYk^--@(ic?LsgkWnCkAW)BGdTv(Y`{pHp08+5a+jTFBUivUg$41rk zve!%Ap@3&wA5GOcGnFQL)Pjf4?ES-1z>_#N+OZgL3vSHk)ZDG{XyD}OdeL3ndn~?3 z`l8Xh3E8+=0v(s4`Qr%{Ap;>f`vE&=b;H$4L8vE_KFTOo7dl|Q0vF*nu3d%Llb@WdCUSpVmdn=Z=@B7P*Em6 z_)%oq{<9O` zzo7sB-QEJ*PjTbN9K=Y#{*Ofp24>cuY0aM#-_QQQPjBI;*6}mwz|8*t2hMP`a@40Y zGX5C_wKF!PcVVDoq<1j3cQ$u0wl=nLa{Tx50_VTDm;S4>-p?Y!zw6kww_-Qj;(c>_ z3-IXyr5hu5`V0U)FV{+W4wnV)u}C@r;W|B2yK(G%eJU2KQ#zKEQP$3LW<0D0@f49n z(Z(xuy`fxEe0qW3sXaIUKAn{M@}#4LEZ^x`1mS*v&EB|t;QRdjSopZvyuSJV7+O)h z;|6f*)b(|HzdMUv6mB3Jg;>;|7*E>mo5!8et6M(CEr9fT$o1*{2)^FdU4svzpXW`< z;dzel$Krn!B(5LyW=Z%9zaX*wjd^vw;JfhQ4&p_dByf{&Zhy0IalDYJgG88-Rvx~s zFFDhe`7Lv!$(Ex47!C#Z!m^tiPUmK2$LJu;!{+93^h?-p%5pab)&>1HLUWV)kpXL5cyZQ15=Nq!hubX{=$F_}ceT`UtIg+o}qqD!z ze~ur>?rWPq?hlZoI#&$Y-~yq!4;07awAQ@$rtf23R3v9QyFTt)x7e3E?ys&VeR)xl z`^3=Q@}JQ^AME^Jh+r$*4ua!Cptn~+NP}5;<3pj>1OVzN<>ZF;gbEmqFvpz)%O8w0HRtHmIq8zK)lM(u*va{``0{2)YYx&*o*XvwzEvLxzK_F+yK? zCY?Zw962VRlRB6W4{#7ahB^4Qn4}zJ#Ut`4NVqXNxEfBb7|T51%@x;=YT**OLPxpY z0R(k`oBNRaew@f{?lc^)=^y3%rv% z^je2U!J6&QW5f5C(7{NE46l%esdtGzTY`b;lFWixUr{T~`+%HPF}u zJxok6T{!XzafhX8Fkhq^+&$0m3y4S>VYSbPnfc){h)){d?`iyz4G(n%Jwsfhi2^^E=zI7K^u@9?X;Rq)opaI~=hC4AEno`K3YDV~&i3ruxdFsdn4rlHSpiUI%S+B%cR8#V zOI0^I{B$f6)c#1KpTtE>9S}_!a5r8D$NCux(j(b^ebY&`jS^DA9VRW32sm>^SY&+q zw!P!Z`~u^KRVqwEdY*weHxY_iJ_OS|4I~_;`V6bS%&$-6l>~h~8{1Go9-luZW%-^7 zCSM#u|O4E%r5H&)Ia!ZbvkVgG5M=j{p`x0=|onF5; zbK`TSx_As4aG{M_eJz!Q6rSvT2?_}6m6K(E@M=)xZx+gB+4qiLDK0i-kn4dOLkmm# z)HtOhaZB%psy$F0>JN2Aw`QKtSedMvd^l8<_j(r55atzXIkK6td=ySz-po@UH{U1p zy=gWvqU2WL9;`8$^GzOK@1YvUXL7emRG$JP< z!qF9FnH)#MC=`mNdukZrFp2!aGWj~Et>HmdFlHV6Q+ik2#L76W=L~!Ap#eaYH&L-s zUnC)WC&WFsv3=Y>qKO^2bU1o!YKV$1L58edhtt`e01a{zmIs=ooGN|)#j!B(7z`>~ zgsT6Q_V74l&jIBpVoGElQ^)9cY1hmLEiDALyRTy9>po4gaeWR;vS&tC7suatC4aiP z#=CUPFmultjN@_><@l*H0O!et{wLAg&q?W73Wk+Tg~t<}al&muRi9LiWeAbM@_K-A zhOAE88V%10SOR{ge)%|HAidOZ2;6PhqJ`456(TDS^n|uB-RDCYxMQm*Ku|HTMHLny zG2fKM`Xr|eoVb1mA$hQFKua(JD7$yf&I7hVN>+7+eHK$=t7Y;F)hjB7`>9>DzPLy7N*H2c9A3kr=7NTvd%@B?6|b$xW=H&w)Ub@&O_tlq#kCFffhj zQ=Q*vDdkuC9blf&g`gr=x6lVDh>D#(+M%k?{fg6&<4e! zS$Hgj6U81ZyOPg!!WNGbY;Vq7A3<&>aDDt1yQrg zxHKh*l?zZM9L9lXB_{nDo|A(Vf&rkLnsG-R*Ko8+{owV2W^R$nTLdO-2_^8o10m}u zVUVQbrf%XzmrM93J{SF5s=9(6py~zG_6&3+rFGo6t9-64p&e3NAwlKFZ3bPfB=il^ zN?Y@qQ&;K`e??2lzcM@)97YQkkeF9rBN@#MYBLTH8q0ga5(9A5zrxHK6t}{zdcKu0 z=8=0La8BB3#k@puRHbBY=GDNMS#d#Xs=Wo!5uohMQUX#kPLnM z%^&djzg|1%wHwudaAb&m-F&c!&dpI*+61ppIS3=-95S-0GP$Fe|6*3u63*DhM;wMV z)EM=;C#=KzJ^mIRBb#<6DZj*>FG~=qx+fKziWgf4t*~ICM12l6(=k*}Ml+wYGO1{g z`_rJdAS7dp(1f=Zut66cLupXz^Wj3HrtK8=!36Q-fu0+j@5p*LbaAuz>WFHk%mHw+aB?X>h^D$>0^Bu z)_(TX7*6PJ-mvk7a_@V#<1&eOY{7IkdE=a_f0Os*fYu5ztd zH_QCn_GL^U6dO{hJrxeh%P0p<&qhiJFcywH->E2CCTwB$a70rc1f3M_VutF|7$E3+ zvIx^$aWyo?mR~C&4D;f0Wdr8=aGP0&vSm7w1?aR7$$m{tQRuWm#X84E1On^;d{I=v zh%uhX1L9zLM1FjkM0G3P$%O?&)RjZHw$9ZxQMlG?_9J1n^b+hUkBOe|9TvuOhKURK z)8ea&as?;GQ{5^0A0m(GOXwaYda&aJLzg_x(D!OaI`qVXr6P0Yz;bz+N)Jhz1OW3` z%XcX1d~!ylMvdzSM3mpT?O%!tg?c1(n!wXgyI6l3B3eW(qXfwSwcD#0^_CmSBc|&*6=&=@NCW5vG9qVi-*y zwko3@Q3Fu~-&VT>jen7q)S)dmcc%td0uqb7V2GjdX3~n+l4fb|DU9^nKXvx1dobz7Gq3_McBdPv*?5Qou;ZD5dUVoLfU9$cEE=`j_h9#eQeE)JTkY&eMa2yOo8t;F)wG7Qte`OZ zx~8hYY2~L)M7dTg$XH#qLA;)M@H=#GsF^JOOe911%c~P0{g(sMAkM}y6Urwzfj3ev#yRX8`>*g1y-ppT>trH_} zsuI)T_y!JRLp?LW4oj6-TbV=NA((YlBpbvR;nJwytSVIeUQvPN36T0{EXmAD-nat< z;M%#;R<2t#y6DVB3fnIgsD^>00+Wk2!667=QcPrvX5Z^Kc|!0ZfcZ+YXzB_oVntc! zVe45uEr}68i-GD5a%!LY%6*Cl9cI!O3W;vYFpnwjXvrrN-i9e@;Ms*995-n4RAbx5 zFWOVMJ}2_C;uH6cDqq6eh{DRgZj80E+FF!)q+r-2@X1jQ3`fMH1p;X(0)vdN>D59+q@mqaA@U7ffNpFBFJ zj1-}E#lz8|Uw@Xs7MZQG>RFWeL7ThJ(tj$ve2Qg60iBa^y^?j%K?$wDD4L1|r z3aBjLHV&hzEPJ@P+j@s=iL6hx$V#qOj>c;`-g>od(p#thxY8GXl;;yS+C^QL;@^V-_PA!9NGIGD}j zf+EUrjD;qHYv)}tvI(C_P^96Zb~yYAsK|52H#Irrz}Rtge7Pe)O{RhbRe9a*%CPqO#ZMf3UW@r?~6NO zLq}kWolqAXKN309lPXt}=Zf-VCBwACrT_W+tNR>zWZo3%RVIa|{u{Vk8ifZ)={T{0 z@oZ-o?Q&MnWp?7)Dkn)^mEavfNBHZZe}q$h!%F5uYBpI{7TF@~25q*1VZYY~#pO-$p>sEuB!})Ruir|{q00dd;cN7v$l2uq-?iOW9w?{-~oov(9rEj7pmK)RMD^#Z}4c z{jenl-AOUDQ$p8q4{0LAt3a1R1TSYogFLTfX3}V0&Vfeq3+DJ=ccasgc=8VGJpt8- z;Z!M^X4LcBoT1N8Uyx=_>Q6fuKHFQgKF%rA(NwD9$pDedP?^07y}xt2_kqD%Ta8TN zYmbKM3a$8x`UGCwZw^c}?FuN+j;6t}^`3p$yxkFaMuBsi=Cj&P5TE|2e1b=e4nr%S z)P7*CXw0>=7}xVd(<_5qx#!oEwrng`dGpvTynC~?n;4+RGflpX7Cq?bj@;@Mw)d$# z^`oD)Gh7a}BY=Lcx<4-Yal=g@sK$jYzRF*&7>}JRyj)$mnVmQppIDqC7Uey$mYbe& zlahC1XUtteedD#ehiVI~i#U}e!#Jox(2wXoJB^(>_a9N>WjM2SB>JIewx9|y3MbTk zX1hu<;I{iP@9Y3M<*T>q7!-|)^bgK>H0;EP(*}=5zhcX^#5iaD;GRvmxSL%tl@E{h z=9Qi~NWRUN?FUa{JUR^`J`!)wi2M>w*D7fhqo3mR&33Ihfuv+sL-kEv5}Iec zUG*~YWPrmc%SqVZ334UB>q>Snw>j+da1u0D(t71>CQvT8I`BF0%f$MZ?45-#&a4xj zxW1krF{d$R5+^^SkPKT4bv^7#e&1SN^_+mq8@>sYVA-`TxxCq}_~+FG*O!ttx|Y#p z;x8m{u_s{|z7kcNoes)&9j!P-E~_^F@Z-QZgyT0?s6m1AO23&xq^q~R^g@~dwOKW-kLrk{bu(cjmX(ocpCy1bNZETmhDmh$Q|L(ApcJ|r? zb*ipnBlh4%JKBRgMqY-jvo!gM0UGw<3{P)$>bi%q3W~K5DK1w$cg1^|Chn`}F0}bD ztOY!2X{weMIG7Opyw8s6AIp7~8hfnG^niy`6?*^_n@E@9>rK-*Vr;T>BkgGzLAhjM zoL@agcygr%od=eamD&@3Azz%3J`U*xni_w5OztxT7HSpTtTI{3;_9jPgfr5_G!_(M z_lF6_oQC*XgWo=8vaZC}oo8y*+{(A2G%%cJx~S15_QDb0Fpq^cM?gaT&74BamK$~O z@;T95uV)7+NwYkmO#^SI%-2?m4z}BG56j+{)A%Tj7enMGs*o>1EzB=p*rShK^0wG3 zskncwFfS=SWOA*LvT@NP54~BMUtalRWJuZt&~S|<%Cn?^f=O3}7{O=w6KnFEfg>M> zmqPOwfK$f#9SohBhU*rM_IkWFt`INI-ylq$O#@d3Pp;W1cqD~vZe09SV>uJ58Sn(X zRX#joJ&>k6P(EDP*DVEO1B26QUbMf9iDefSyz0d!y5f30w92|^J|VO~IGm|4u%=m%oo*~m#(|TOn(V8jyBl2k4qXQIk&?#F-28aPxLtnja>lbm56m+(sW;GS z+IiOTR?sr;rKrdQ3wkDTaJ1>}K-u~~ulNU8oxdXjx@k7(Qs;ABr(T9L`fz3b0tF-K z02Y%O)xF9Z+fFsy%F^7#j2PNC3FKbOrTvY9-TEH_ig7tiA{yJCk0U9bqr5u#=3d#G zumEqX`qOWLU8-?5na`v?lJCPQXwgvQVZCZgXt9UsW=az0B>mZiG$FqqRgGByWrdwx!YS5+aD~ z#I1e(;xm!v9D~XA(q_}x=Qb1{&C&**oT4F;{BU)?w#q5NHv&1HP4A70N9T)Ep3`cq z(Getr&qE|gz1S{r-I1^-T4^RqI&gkmGr6EOfB(5!lI{zd4;DDf;+mdp7Y4!^RV&4gs}>_g{0jwg>|-Y zXSO`}I?r_sEKEHs?Y1iz*^g4J);|<*o-lqsHRjz}CWzf5O5cHlq_Ln6m& znZ4uAN~hWN`sv!}OIxegLM3R9=}Fea5zJcCJ?HNONp;5ii~<}7TByMdAIqAuD7l-m zFT``4=d4vj%{Q_KNx7)p5iK0cwk}XPMVTL zTIjAwlfz5iC4TMnR(&_XWYJ)AJ8|iv2m2UzL3&ZsL?_NTX=oDtQ{angw~%!WUVI`U zLHD($N3n0cHcnL)`+Q~3kkMMBvUW?!clisuX64LS?;}cdJ`O9GGfpql?pAx)Pk3fBrSc^`{})%QEG#bktg+TP`zb}7xl}JQXkX1JRYqZu%T&`7T}C3Jq@L`fA4Iujusd@KKcKzp zsgQ$gRR#O)`h;hv{4v%r@X*Mvro(gk54h z?gX&dT^+LpOteh9DXiL4unWjj7*6fF>)liCB5K=UArk6l{aX|@5z-vl!AkeZNEOvH zx&&k9^qNDc+5J8CW8!!h(P6H2rP=Gcr=}m43(CCP&gXz-EeSj<9x*|?&c;;bRM^TK zw>|Vo$-%eF-aZ&bmoAeRxse(shW3~x)5RErITMh!8A3SsWODdpuJ`yPBJU#ecxc@W zEVT<}$sAlLTvFmj!?pId5lo$qLq+O5ObRH>!>|gH5J4F@8`Vkw>WnyXO(oP<)CSv^ z`p#vP#?+3osJgXQR6PArw{m zb|1%CB(1ua4j(;kqOC+LU#*zg3f*;1*JG3`WT`OwL=k2j~soV>wC&gJ2 z)9VkRWxrR!yLySYz}ut9IxCMQ@U&p1)Z3IQ)yfAjy1IKlW}N|XrJ3j-U0;8wdT3pw zto16Qx!L7KfJEnvFmM1;4aP2>43|#(roHV%gWot3agT#NWl8Bv8uS-EpSxFc-yFP* zn)X=DV=vL$B3Ku{KED{`KHIp2e~Xu4|FyM-hSe=r{~ON+JsXymK~7XKX3={=l354n z=yRiUy>Zpk>y?>_o7?-@nqO4#(XPYz0qif+*pcvu=q%$#c2%!M~xuQt@WHO!uay--OEf=~k zD@t+)dy>21$QECmPOm2wX^fnIG97huJ|Dk^=JIjp_Tx&Xem@Tjhx#BKsmb*LUMH%BqPZ*~kT|tm#TW z=+%himvT=U+sFza*-zRGz5npGMXD7Kw#L1iijAC!Xe;YNPlYV){x=`nCJar^ zO^ufH-_ZGV`oXhJG{rho?u61CZK!vNmp7y};sU7SY7ZS$1f<$lRXUsv5O3aJSh?5H z%4n*tBUy4ompvcj@xR z@3W7ogej0F#rw?Xgm#V7=)@ZqHW$?bA3ATI*HJap%7+5CLYo~M;kOWf+kn5V_Y8Gw z3arp!*yFc>1^qq1ZcJNYt5O=Vsz$l{G24{O@%IG!gF*}G#n(i^4~^ZFu(y-v%O_O z@~?;+V7V$dZlJAwxnDu(Lw`SXO#fbMetR>N!G=CXx|cu))DZ^}f~C%l-9UoF-sS<+?o+|veu{bD%o32#-o7L3&ViJ0f~E>4p85n zQ9s|iBb>j7&0LSAW$(EwK629p33B!6;vBi5cWUbcvW2Zpo@DPY5VuFc)C|%MpZR9C z`HDUe81e*!T&rww9A}^!uP#zkw1PUZdKX$=6~`?AO|PKc`ebo+%TsYt+ooF1gR?4J z-aQEC%sOW2OTLsbza^f|X{YzCvqR68X0A7vezYnPwyS(nHd~jASFI;GcJ|8$v5k_t z5Ct6i1{xHnE16x9dU4|d65F4d)Ar@4l%DLuXfoHhcJ_MH+^?bGZ_1{(1e6%`cm?5K zHm0feeRU{7XOJy5K5awC+tfS8i@wXc`j0sLZ7|xf-Vw4LsdZSNf|8L3GWd zY;5mW$Rp1BHY7=IOi3{nDf)>d?TFuv`8=ueE`VzLg8T&n{V3vYI2FurKjegBf@o`qr z%j5NHI4J5tKP&Zu!>OWJn#7ZTMJ{k zx&HA=qvCI)w9l1xG~}0b5x%j@HyaPfDK^o%)~e?L9Uka0tihZe;cr;`WIZ?+BO)F8 z`r+W5!z}@PZPk!hY;qPuJnUg~jB*uM-J&TOEweE`*79EJ9c7Lfq6G2ZOSO=fZ7 zOS}VFmEi?U<;kO1=b+Vfbmgnqqv~NM1lFTE53_V7+E~YaXu$W!Vsh5*uAfWrLL=9y z=tf3e;CuS4W}G}(o%c!HV-ZU)K82eyGA-<@DgE+8YofV$^<`Oa+pk;Qi58atC$kKI zmt`mRm;Cf%Y>t8#xu@*U52yPh;2JTX7?n>_$Z}?G^h_ z?Mef#3Q`faj+!ndmyf+IcWLjDnXN6^3Kywx4A;@V6f>8`*K29f9e&hTm_={3a$mo) z;C)$@P5c0Zyl|Ifiifh_3+gzGC8d_LcPO&%uI812PWzH+-cONU6dFQOP=S3xk=bxt zezyJAgl78bv)C4)M$gm{+8oSHL#}P?0*BU!wneQe#V@rBr_KcS^v7Iv#GmDtAk25j zf^}<$R53Q*5WzWp?TZ^@*-Fh?ond4F@wo+?>DyYVygGwsX#@c@3@Q$EdJK}QV@8`> z7Ab^h#QD*0Da5k$G)uCD;h3Vl7<=vYR3DR_#dJoPwss|5Lpn$nvtsN~XnPQSTWZ56 zP*!QwSxv&RRs-*8ZESH7CY=kaqR#JxUf&7!c-lZ|KFp<3Ek?OR5M9Skxf=b>cwza4 zV8+a$f8^yED?OCr58ZUm5e=l(=kLaa&BVuSc8P)nA6;?y(B3b6am*oo$f&jSBkqfL z^2yI7oeTPHKJ%Q}RxFCM!JhKu-wCo~^?P%HwxY)QT!Fs!j`Jk)St78uP0dhB)+L^& zGxN|3RnI?~4dv{%(sj4ZL?Y$-rw>jnI=p1jw%?=R6|(Sf&3T7CpUsrTe9FMdMs)e} zEb>!9`@@`Y zYXaN6UI#tD0(YdIElCgmwRE{nmB&f-?DP(O>grQCY2M10T}M|PdPLQtGHANkhO)Ju z&ykm4&*$vnJVqgi=6D5(VAq@&$a7r37Pl0l_L3~Sq!$UQ2VUa%_NPL?6`?AIt}{NITW;3N~Unzk7~J5 z4>Nl{;Z~#=v#}S6boo?%>b`J?RV%5)_p(VIH|+!_njGQq zI8o1~zU48qDlQTcsgD?#vRTxNtC{7i<;lI9!8asYq~gNgD16!}a}9a0EwX@xw>M8U zIwpHFIVo2WLNICQ6V7$YL2JF+Sg%$Yy63mK8YZg=JUsTrvi*fT+N+wxQ)+Zp-iaRyuzM;!z2>n zM=0@nI=_Fw5!N2XReft6J+%NI-Hl?qbaOWKG35Lnt+h$xT%Jp9Sdh;%_Cb3L74qZ< z9%t^IX$~XaB!6I_Wf*xQ<3l!#Pan-B<9&g>r<&Kt*eHJ!v-4813`9sb7hjCLn{y^5 z=Sfz?)x(aebG_f@pSIv<{yN&|(_$Nwo0sLTEJwG>Ji0W~7Y;)?^v;D=3j(u_B%K}7 zORB0>q3$6amxr#3Nou8CZyCp(l#}-oxELO1_NL(0v+kBbWPS5{$b0nW%wax}TGFpa zGnB=LdkvKIQ%b_TR*W?}_2#^CQAfa(c=6KF$>k6WD?+nDeWgt7LErWfD8CV*W*#wv z2v2IU4w-g)cWA5s+M2JY;R4Tj!9;y7Jr# zNkneht`?1|OLb93s_Sjo3ol?E?7M}olPD`!-&6QHC9pLV^|KMPLkcMB=GSSWjVv@b z)=j(o9osbW68L@e^=IS>WEII>A4D~hQNnQ1Pz zIK|*hcTA4z%wE|d8572}J7@{Pcb{|fk5(wycALF@NOH*ztKrQPuK}aBSKQiF7d6!= z9m7X>aFCmOPvvChJ3EEy(2|F}3?L{QHpjSOf_v^EwL*wq8U#kT6O1o-MhHdpMf`RJ zYgsQ7>t<;0$je)dC6a=7V|a@yQpib?CPwFiAa5O~ycp>$3EyaN&()_8ihW^3Pg-ae z0~=6-4DiXeM6id_#Y?p7HFCy5oO$8nY$8`#FLK41;iYep)`wYeTzpR?-bB`xp34_Q z8oYSV@_NNcqMv+u=wsUW;z^BZ66Tjw%{BV-PT{t%6MI)!q!vWkrkmHcW#5)E$h;Nb zcqQ{VonE!ZuAIQHpT{;oZUJ*c|MI1#J{A5{G;Ic=v0Q!iuu8NST%zr+J&D@QwY?g_ ztQp#5?DlQ2K6Cs@`S|uc_0$*@_U^*K>sZS5?iHM3`zwuXva*(}oWKrUIJ|mAK2XRqY~N+IRu`*HhZI8 z9yED7`x3gCt__PFzLX#CnyYK_NM8zwpuaZ)?5wXC>Z2u@oSUtXS59LTUQ$XHB`-a! z_x_^btJ4u!N}T#2j$hwB*ajj&g($jj6(&K8j?1s%@FX%-7oNWZAMG@Eye=_A?r z^GZHBtvj7^YCPQ#Vxg;y0s{=KHRcZT?-|$w;=Vi{dMBzLaEmxx3DOA}=Qu1nd}iUB z$yej~si4a)`mp^Y#*?*CF*EDGraIrK)6YCM77ny9$O0)akkUi~@&%?L!0``suce!z%JP2sHkIrDy{QC47UuvM98Ig|!quNV8Ftb4@Z8YR!x$SGZ%>&(B z79rWh!4D)jZd5)K4aQ}Rok+|u$eUR;S;8i<$K*OS5k7rTC&H;`@qt}Wl%(G1W>f@= zdRR>s={v-zw%dd2@h50*jxyA7uo^G6H)uccFfAR^c=`MyJwCk&t}a)h;$>2XYmBm| z@7+uJFxc`DNyvT0Khxo@4(hyIho|T%RJI2(c}aLdD7P1Uv^{;&)L&@at%z!yd?aB0 zvH9!5QGJ`|W@qE+GEMjFn>wiszcuR?#B&iR@pj%N2;>bM4Gy{81PzW-3G zSo(OiH)d;PM9NZ@PA0i6`tgTvJsmjDq1)ImM>lYh%h%1!y z_WcKSn73V2)-PV|x|%iAvBTG%`a-`c3Bu!vE;_M?n_~$qpEWaVqG2Ls=fKaOzOA?D zQX6ak*g>}Qt)E4?PHeU${SKRG6G?z||76w2ns2 z0!-@!m*t=8h;5Lo;8H|q;!QZpdD61_m=aWsI`tYdwb5*tj$;-WAcu`3Sq=@$fpYly z#Dl6(J_xl|P+?S^qKSK_&=Q;jPQ~=vQ6aJ_v%jD;lKfi(ot!?X~}s0*R4 zaD=1kn9;=`Z@${ZPY2}Id4^Ak+Qi>YI^Qy_*jW?w+TK|l;aT?H-$)kRKWLLX+;!aA zTv?l<6~`y@K8ki;lsDYIIAh8ZZdl;GzkR#5;6=tq_6+F)wN9nyo%P$ya|F>9v)u9m zRaf+;+OC>!)w)r1JrRq3-=e!cbvIWw9|>ITD>B%d11*=ih7?N@avD0LX)W{o?p81r z6*ezJyIf$WL+s}Bjug$ud_|CRO4u0vO5;;+GnHC-{AbQk3Ud)fBbE0kXM*KeA?yX| zwbmDvLzqirF1p?#kC$X4vPhDOn0H)KeH5BNi3(-E-JGn4P18)_N86@p>rYllGL1n| zcS}VSn~B~)7b76pL8LW2COkVVs8}R+gy9+lna=H^twK%VJUn6$?e^uTPxEbu^mpv zyQo(RwwvaXNhIH%3Ta;)6KHFpP}?@ZOn5%zLZ~;r*n2vu{_U0uAGWMDS&M?9{XqIl zd)93lk2|;?4~H12&%4^|zMN?)(`|@|!cM%^-B2xAljho90r9jKeRk8?#7Hnn%;S&? zTo;c$cRyEB${kf^$~r@=UKGV2$E=(CtSw<*GUla-*eTfb@B@j_Jnr*X6-uQW zDp2Z|_S33a`!cQPBPv-Y54b7i7Vn#p%K4zKj7}M@UE`@Clha3M36AAtfVj`%3WJ=A zIGzraMw>~hT{%s0vq)Twfl`F0#=P`y20Hkjh@Q=Rx?F*93-yd-KRE7bSw zjKy+(NL=d!iS>|jEiUR1ovftPyGx%ds8HWUBV`j&_E-z`l~?V}idVigTVXZB6&t*~ zts6$_>ParZa@*0bxPvMBu<2bj`shKUPpZqQmkgmP;F9Oj+gzbZqKERuPBe^D4RnvN z;tJ4vO%vAs*r?+X;Zu1hDv9?RRkOlv)DKgS$J=-KKnr-^QgcT+ksMnC)w0C>Q4s_ z{EM73K3tU#(Rb@=+l<;iNXciMXb673Cqycuzk0i&?6lCk?Wy2&eXXqnbX?bR(3MVX znRZD!x!gj@y~^sVB_yiqVmQq?6u6zQmb|V=KQY+sliBShU_oh^Wf>E5n^2HRwUnl| zQrj-Lrx9DsW9yP3NBFA1|G*{6tMD-UqQ2IR7@}_4SxgX(_T(jc2DGZG=cvI$$gEtd zWIlnW9~2+xD;*5^W%nyUHKa8#OZ%DMTZBk(fbFs(DD%k4@7?5{d`KpoOe5V=D4QszpRfxRlFvjkhSb~Z6$&HAUkMF#ap~GP0 zmo#5HKFNEQ!YRJf>NqXYZHd^Cm_Z`JrLL29y}2a|mR{lkq9gkxZ@^UcVwI@)dU5IA zniD3IaSKuw*0a5paODR7yY@n==K4d8r4PQ0 zg`HKfoAu~t^WwN`Ve5PHOHt{V8~GyMo*kbjM%5&)QQN=Ov^u|VBWAtN?A}&4pUdqJ zZ4ozT3-OrL1FEvR)v(lUY7|(UMEb{Q7}#{qceUwyQ$414r(26-tnY{bMP$lNuaBk{c+=Sb+~(~Y^ZXsmg8$rpCpiPUK{+WR z|5Sz!c~6*mYglk8g>26=wmzm?1TsNT#*syX2(%O{`^odmeds7*Fycy4y^wlq_>tW1+5{+bZ)($r3 z5aDD~E;t?eadi0RgZ)uI7mq7g!*ti4Tx);5#1m=z?IxjSaEb6%FmJ9qsfD?LolSQSx@y z`T!q-G}+}v#X;=KhAs{ub}37spz!fk}MtB;vk{qpzB}=uPOlrK>-SiSnLvD z2ncuq?*#;~*wy6p%nbD%;6>CpLGWY+P(+Oz1V2g-{tgTU0k7ltTp(a~`{Rn@)>epG z;=CaEl5L=@I1B_|`V4;u<^Tb&^5tKn-`1y5;zPb|d#pM>;#7$F?Mvm7-TfZ{7me}V%6j?qH^0{j;OEKNuB1^hj5)F1+C0pRCf z0|h~J7O=7%P6`A$f(?Rz4Fo}81%e>@0|Z4p4G4-L3W6f&fq;VzzXKS0-22}G4E{C1 zP&n!1UCsU{0S0&$I7s<${1(sfc{aUKiyw+M0m1wwF<{9lX^Lce^2 z5JGo-FH~HAfe>5={8JDD6cHycaM%m*NN`KZ2|p7R{vNKp|4{qC)IVR9Jy4dD2L!wj zHGfC(a~-L3t|N60Y;FS7Jc%mTvAzD5@3?=BDi^$C{z0g6@qmEW5vp9gAmH_PP~`%4 z{5<9_aW3F^o@4XzTL>T7P3|M0xsQP6{sCz2p90PEYoNLRNkISGZ2C6<{lk26|5Va> zPeAq6x*t1l-2Ws=|NBt=rZMIIsdoYU3wZvq-o?LEc)ttDPyGw!AnEZ!aioe8!6U1{ITBz}GgMim>;q$wg z9NXV-tsVHJXa1Gov1;LgbJl;r-2D=h|8aG~i!fZ@`&HnR&iQwk@WPq)KY+jC>vD0yaAQ@049QK3~Vbevd`Lh*hxPL_5I# zj&OVOgR(p6hW~)r{{vBmYd&Cv68?K^{(kC1{5!hu@8EYNmSBz_8 zf5?*l`&Q$pw&J8b0`Gqs&j9y5EwC{58w)T3J83UhS9&l_3LQ*x9%Y1 zWZ3XMu82YR|6RELEiOJzD1eVM3gF|E0{A$m06tDCfRC~Y-=Kb!Vfdb#AtwWq?@>o& zBmQ@x{+rxC*!6c z@PdoTKLxM{A)fs(fXhI63~-z z#1F0iC;1Y8S10|-{{`;<`=b$v)F}`pA0-umD+01_rSqrWF!W@cfhcjpXZufO*Z%#n zp1 zkn4XR@t?~0uXy;cW&dl228_tc0bF~Oy8^C9vo63trf$GL&)q;zMjYQ`$qQF3|G&YK z_m~flQ#s(HT+TQAc$C-q!OonFG=9Jke&ZjB;eSp894ElQM;Wkh5IxF<{ebAn_~Uy- z5t+09c0_+uzK_y_-y-(Y2n2dE0{H>4{|6Ds|4`=Ugue|2(xmWTMBE4WA^zng`J*iA zw`d};`hovW#w*{U2|*+q|B?Ln%~b85BL&zS2$1@O|Ncwd{_rr5w$c3|>V=++OMXD< zryiy~d})Fsum%DE_?oE!uyWzpDL_;Qju?kO73gRr5`-8^R0JK3o`H@=nm|W^KG26> z7chLyIepFSLO6cP?LSgR%qvs`9XozU{vHV7uqlF$eUxMKdTdn@<`AAn{WWPgH~zpH zKaxfy=K-^Q)C<5B;lTsfBN)I(>o|av&9SR=ya)k&gwJ0-!B5?vAD;kx3@#Wkst7#F z(JWkGAmy0k(acZaGr|uB{NZEI;%NHjUp@l75x)Tf_-J|~;P@RUrg)A?9pMZ(GskEg zPn7^4&yF|}z`yQ?e^hYyFAVKpn}QQU_|q_t^F)yQgD4N=S2ZBiB9@8*kgW6iy+qsySLZJ3;Uxk}pO^xb%WRTv6ogK}|k0jz+iVr9q!vo-~rIU4_>0ym7C zjRy>duX{pNaNw|qxN<^y^c!TOE>vmYy{@7llx z9p^vKzMlBfJIwy5k^!ow$z1iIh#8>K*7MW zuHz)_cXj@RO5ec>Y5|2=I_sL4{;D)56gXoOXgNS0K>@Jm3 z9^wQ5`}5MA956O02Mn%q5fq%QtxfEV&7Jhkbge*|$6D)`6u68Wjf|}AIUH;`fJXeh zG!)o@69(wLuN|Rh>1=1jX>7t_Z>bN|InsZ>q@ZVH05PQIP96W4L zZYVsHjUZzUG2+y-apvW4fphM$sr>%YcwuauJe+?Bh?%1yl-teOf!o}`8l-t_P`|75 zC%2p1+C!WzjJOO8^ngx3wg=yp26KYhI5?m%xW+;}7AKdLot_CdkBzfF!0yK;_Pa`d zl5l`Q91PuD?QNZ107dol(hxvtL3nuJ`DFwJQ*#Tbxg(dEk-5DsAcV)(`O`}xj5Rv zY5Y{&|LhPw3q3$;?Tz5w{>$P}FgF{NhZi2GBRbvQz|K~W!_matkQ+|p*a`e)JGkuJ zz+e+QZfr#NRfJ3F>?l*#l9haT0n=`ku3#Sv@5FUGlzofxw#%b?r>11K0 z&jE<}FNy<}84wU&`07^#4KoWa3j;1k2#*0UyG-+|$N5VdzgHa5A27HXL(t&jaD*8c zLJW;LoSgs~Ki67N9w-|OI7<6#!534Z43{|jDgkukRuNm zi(S;v-U0p*5wx8A1~^9^e}jynm9c{f7+B%W1q?U+)i%R`B?o`A&F~FN86{N`udY(d zoy7b##XtfRW@NG#@+iu;9*45;$QxlVu)I=?{Djq~NMvku#;ZQ!{z4NZdhQOGjp5pP zl9srG*QNArwRijOX9?k5D2nazJ9I#(@SZv@J-G8viRf5`9VC;8&^xXI_@0baZhxKjt<_hFYS*h-c^ ztZ0kwpL-u)ArQVCFw*P#8ufLNkAQc=VGSNqz-{NvUGzE{vV|K9L6xa>?qd}zz0X9A zw)MwR-1p0&7Uu--P&MrpfKa4zcZgrJBhRAkrHROSydDEdBY&_@^|>whnDZb}CTZ}E zz@dCX)f2oLW8quNTU#4X1$()LV?gW7S2#%eC0fF^VkbR$d(Po%=v>B3#7g{pwpZ@n z!_HF#y^b1EY1~x3_A$2^rP8()=~~j0i3Q@-p37%AKqKj|EpM+GuRRhI@O7q^%D@4o z7gse%kowd`;%8HP7bYub>qmjtU&%?+?XKq%-6&U4`Eos&#!c*Irpboh;iktnv{K|D z&$z;eRP&k_#0$KJ4ckNnXVY6HT&4{&gz+EW1&PNCg*ILd&A*-SMAS6BbzEyQ!0oPr zZUxULycaiItc9&-Fx8QhW4MGVGRE2-K1rwXWKvA4{UD`|H1SZ0(lV2KwWXJA|GM^o zan@~*Ee{f+($LX-j3fzsY_1Qq`|9tF^o_NrjSRh)B|M5sTmsoZ!CfCCvmfO>KX;j> zm$Pku@dazNDZk~t4GGT_LF~%28VfT?lGPu>d_LbhO?o3Y?tutzc*Aq`iuECq=%?dh z`kpF>I=claJm_IXF2KgYOUw6Z+#8qYbGkXM;Y9bnSFGDU%)tx8IK0$I&(l{jbcRaJb9RLtFt?$UsrqnIUm-bcHX1Pl*&5BS@U_upmJUYCmSu`-z@R&GYXPTpojWxf9Mr`|%?sfB^{ub${V@eB_U4GFT zB>{3~K^9h3aUG^XFEGw^dxt^{7_1&%k}Xn8?6fmC^$IF~>Y#MNh27rqk(C+WJIA~i zwq>Q+*{)@gaw=j@7U9a4y+*Sdxg{2PR$_6rBRA$cr0<6uz*evI46w~b=aITEd*&?& z4eYUy7pluXG+&XN<<-piLcaV#=_7Oh%=AqBh=mQBHF-z+!`4>m(c*RAiYvzV8lp0c z?u#iGH*Ce_R(x(&5+k}#PImb+IW~XOX|AT=cMb2@RAmp$rOBu_|0Yeq~Y_zt09A8i3+mRBNtxHuq^+7w#Q{0)| zfeF+Zbf2fzT|L?^TSO5M}#4O2`bG;+yr_N1! zafB#=U!KuH4X|d%k*C_pPA*m;bPwEAyv>)5?#_-%ZSE&7xGB_30}|X8&hgAWq=gK?{LZ;A7aaP*(&ujvEQN4ur!-Idm2XA>Ao-}6a!T4J`~oR0owUN z+%dd4Mex9ik!=Z8UV}tK<$?F5l9)RZFU9xWu2_57??694)-yT`A=ztQCl(n?bo)V;yI9S^OnilwcUDwhO#4d#RVUY!H*c;l}={j5zwze<;N?7O` z+k=42l#snXuwMzVECv`ZgntG6!zBn8Fl?dZU}&iV&nW@}in=zEhNi|Q4!{@#Fc=~Q zSOimjAuD4GLlE#0PE8oGbIK*S*#~OFfI%%_HZHI#0t}c(`2PXjZwO39IGTWd(ptcM z+#lLo0yd->O4!j9cqRQ41QlhfD zHo#Ui20$}S9b6H&m0azCmP%O}S;IG*Q8Y9*wRf;{1u+Q$o3$7+W3kKG85r7`S{Z|w zzE%TnDmmKNSQuKu(c%D_!>$V7Hb#?+mq!}}gbbS8JYa2LnmsV&!pp%6yaPNjAc}+A zRt{j`lMA>5q+~U@q456!9)>12m{%J>N>ml`ZO02}Ac(p6$Kd>x@dSe5leSYkqRp(1 z17B=wH&{I*1l05*I_&Hd*W1LN@+DNDYx=74*M#GUzwCWFtN8IYO2GZgQ0I@Om4zCv zDa)OKxcq0NuLZnRPrUlkR{6n#bcFQw<@B2}qXcC2@A%vkHTX$-I8Uw98W=CD3?@FA zTM6psVSK~PU;ANx6m{vmxwnn*Vk#)qr=HruZDKQ44ui9$IPZ+>%z$P0{tb`(^Qkfw zZ6g@8w#8a0Q*NR?gw!OQCHSE!afhn`Pd}lrbZ9?iV^1w(%6TBhcLAb(?op1YW8_>y zcDqAcq1<|6dEbo(uFDrPn^^dt1~rtvzxW6;6eJ$g*i@AFLV;+U-EqQ`MmXOjxM~KkzsyL;fgt4uzjks<7 zBilU^=dy|~vlUxW84gH<)ysSK54V==mP(DwXOrB=%`xbGN#56<^E6a6WR5&XOhj z_RMrV=0wun)$9+B=1;|LTDe8w8~Lq97H^8RJuL9M}{SY$k8h_nKL%C3|1xV3pkl*^Xh4 ze}$g=Ue4Ri`x0B%t}aV4Rf{mCR#wv8NpYE!JpCq8oCaT9yyhVDl&z`@Npl#%JtnFh z_CWIpymdUCo|m^Hx(D37-d0{**L{k)|M`V94teQeYH4PYI*GzMR}hAx#@K@AhEzpz z_u|OF+RLQ3-2o!lsZ7X38mddwZshV}zFHS}887!=KCSV}yep%QQeN#%nHLUB8d9Dm zu6cGupMD+J;@tH>CZyc(m)N`0SyKBXE2pxDAF=r=OJX;WWL=x*f2Z0=mK9Q?k#!TT zMO0SO$A!RsGnl3?Q>v%Sgn%2-;EXN{Lsh?o=DGj@eojONQDs@hQL=IcB)s zDwH(reLN2GZXptBaww6Yp3yNO2m0_XMxvd1wx~0#rFEI}Iom5jLuq$Q0`&q_U=j(9 z(hXO}S}ZCQVQJH*Q1Bfo-$;B8^llNRKC){`3`@6ti{w%kTKS^p!W?m;_8QSxOJW?K z= zLgooEpUj`$wCS=WWh?t*m?4UPxsKL4>jB5U$;kJ z^J$ZIOAuQj6mK9)IsHiw6MZ9A^wb#MIjm8Mvsn9BDIStoMQO;gLU!>43UhVHyEbGL zs2!`pZ=qLdY=eo1uT|YhiQlZM!K{2ny;Zq9`KnUF)NpwmvnML&4QYNenj_ivjSBph zw@fxy(^8sPU!4aoUD9C?EH`c3fq7~L5?6LDJfXA{)Awfz4=!mLN)kF;Kvtdhy|bh= z5xg(YcenGlIn>gBUa^cY|MJ}RR~HLX4^t0Uq%bT{-Vf1n`*Emr zwK%8c{G8I}0Cf*l3-9@98~%3rM(gK1?{{H)ql~7{^VDrDCm+0bqj#p)+Olzfzf18{ zlW<{C>6KT9XWkr&CEk-7azbon%jcn5xkI-e7~RG6?aX>sAWW{2mZFAy`q5{v`RO9Q zB#cWeDr}5?mbn!jbjCwbVmgy|ip)u+v|l*wT>M;y!Dw<`hLazh&K@WFJTrP)s^|KH zTYZc*xXRb??mT;P72P!5V}|W^=Q+tUL3Fyo?YJzfBQ*>)MFe!MH5(icpN=|%0+`GN z#aZOp%;?uIk$w*AGpVz~urF}q4sf(39bFuKr-kmUtllB4hL)>;;cZL0eTl8JEx{Hi z)w$cM%Dh-e-U`@NQ)gTvHEX>0?_I8y*t(0Q!ZeJ;#rIk6{}A_%(Uo;uzi(2pZQHhO zo0W=Fv2EM7ZQD*&Y}-l2)>-xJ_w2LpdC$4+e!Cx5YineV)n^~Q_cdoS(|`ZLNR03q z#w(mNd+P^MB|phBGbo~+12XN>o^ov9!)oE~uPP`t85u{i^Q9igW=y8bBE@G;7CENf z5al*beVtrw_gyZ9vReI`N6VPC$cq_Y+LEvtctgAR$o@Hskd0@9lhYq(fi%IEJgbn-GI`$h!ea2(u|H_if!V6<5Q+-X^)&`)ZOA5} z??rPh)j4iGz7!QI-?i15Rm9t&i%X>$mKjzP$9a+ z>?&~s>%5k(@pk9Aww!X=frI*+lCb@f3u`NybN#Z#WO!7%tRE@BI0vEp2%vQ(H; z*>~KORn5!6_3K+Siwm2PbGglYvGJ6&w{+>l58>FuWo{Oxt%3A^wSZTu>cFVk_@zKI zYV6!-@~HmIqlBwyN>IZNzAQw%{b?PDR#ZP}nJjF9gPlJTVNI0wYgE^#uy}U=MpIB* zS<}2x^bChoIk=CVQm3K_^f#HRJHn|Hi!d&-Wa8-j-h<@A>CI1Ew<@~6*zTc3Dkypy z#lBKEe~Rzs)`3NhsggpE$TL7r=D(9f#Wo0`EM-D#%7-GN$7;mL2x=2nG`DD>u-Jdn zHrq7X1lDtlLs1dtcZ7^+N49Rw>p!jVs*m$^1Fz9iVnip&N*yNz$Yj46z~o8tK}I|x z^Q1*~57E?@u!pmQ5GBx)s6e}D#jn}vM^ebC4-vNG@IlULmMG%vtr6hLOaGE#w(=Q- za(>__Vnl;+{svXt=TX?~mAM%ytrjFlTZTAp0Zi`%!#qjG#{z%v(c z!6J2P@Q@x{mwDU#q6@?4D72{DjeC%eS6lrvzd%Ih=!dmuhCE&d?}n%*Ik z@S9%lk!vyE^~sH*AvnuHUup7NdK*F%4IqfbdB!SLiHF~)iX&Qn)6WTMzC?jj&FLA^ zr4IDJi?8$_NLNM2jt(gr5{fG;D+eGm3gJAsqy=QnP0Gt4htWX0vGTAK|I~mO_Jke7 zxvH;$hrhC~fyci>*9i4{a?=P6dy21t$J;{D#S?NFS-#l%2J3t`yE4o~w4WAla zO`vN3hF$m`omw894jf(nIihA!xF}Wxc^v2UO$9*H%-di0a`T_rF>0XRoJuaO*cJ?fhqN6(qr`Q2Ahaj&= zf0Vt}9qPm#P$skrznyGaOW<6<1L>KPNvMvx6~hUp$mEzM{}>1+W|mJH?N)%Sc@+&! z2c-%%Ck7>^3yWVY2wHp@s%1(u^7n6HF1Ib#$&b?4L~wQ2{>#B=<$lVIP^7w$Jv7o0 z;Eebi<`LhPki?~nW|Ym&TWC8MwVUAVVy+NOCzWrc3c80Ntdp^4OWV$|;jh*EEx7th zq3v^!@0*Brnl7agJjDu0Q*!ve0IG z{mscL6p7ky7@A5=9M~%m)=_bA7X@8L!EGS zT*df+9;ji)j%|W)(j&Ad8GM8`?PDZjx)K>(JQkeR60zn?yhrt1gp+wi+z=|I9DLMw zRWf+f%Z!$bpSP)?ippmy8YJMHW{npr^*HYm=+gDV5%kt&&3zdbbhpONQ3d2_ttfBJ zOF9;B%_*>Ju5I|m^yM>L8-6DA=3S0BGa>BYvT#t)(31(`La`3PCQZ|-YPWijGt#O{ zvx1k5-VORk<7!l+mth z7a6*&Ht)n~u*8g&^Y{fwX}cq|R0O6kNknIrKzM9e#r1KuE>inD>Ve9%5cWDW`nwNO zmK88?+7=`oDSaq0eyY3bpBS1{mp$qm#~hw1vv!4;=+engBryull!;&6u~KTapeN!E%uN*gLm5)+Qa5EGtGj*G%F3CRM* zOa~d5t{&7ci!-2a8ZWGYb%0LeWMC66jB!M-b~3P`98CY4P6hDB0q~|;5%5Mj&OZK! z|E08)gUBN@8xK-LrQ(TnIjD4{LG_~kQkhneR%M?eoGG4D&dz9u9u*ZIdzscnz0xG{ z^P!&vNSp8^aUVHbBv2-B%S!B(ba9d2k783KugDCs>&pC6aJ%RXF~4dHbl@7W6`A+v z8Wm~i=LZY)ep|&xNFS2>1t1&cM#wrjfRV|Xk9=ukn(&kl-u(%Kc~F}fmA#|@Z8G|H zmq6S4sRLtXwrefiCweKuFA4sQ`p>2ZOV9AOX_!$Ak$q0H?TV}ICqv)1S??yht1N;m zfdi2r>bWvsw!Upu1SKH5o&F!~f4+D;K9~eHeBlP(QqOcCHhy7{gVY2Xd)aa`CzbAD zEFFt6|9OjXt>@n$kh}+ba?jN4d2&lkGFuX9{gUxKd-~c9J(hgv1>LJ#$DtztMe`up-@b{x;2=%;L&5Y6II#iU z>+FMXK7Tsjmw;vBMsD~%#Qt-HYTfzb6`}QuRr4z!f$jyf;T!%w&kFd%8{aE>z%>1U zZLh~mXzi%LQD~y%Lc4y@rSSsT?uJCbx9f9;v=T~WfuA25q|w#>&WIYqz)v_?q&ccv zh4ADCqyY!*jswCX0fA1$qm&_`Q6yGKyOf8fC^3aF?qjp&L83vC-|qKe128Xtv#r1` z{M?ViWwYa+Vo$bADCHx)>fGyoT1j%1At+!ON;IqTqH1(Xsqby*b!ma@^l;ESfAM4F zUEj$8!FD1|YTGy^N}^z7>{0pMI4sx~gbgg2Ka3g%2mg&RyI&ts#4x*E90%6?II2C1 zZibtY&*j&L)Zg%MN&f| ze9XxS@i+Ce@vle*Ld5mTmGq<&RHVg6SoiVmT!KWD5I?1nC&5)vk)cfzB^CHSu2T6| z-mN;jDCmrw5?JV1g4w;RzTP>H3xdv$`FtARnGZ{&t8urz&kqWy_D?}y`pPu~_4qTP zLQcka<^Lq&8ptvssHw%j)*<>qH9-0JW68pjC5JAol+HcH;(lyqo|C&ZQg96=)^;y^ z2$ypW6Kw&;tcA1cr+ z1hfrH9KM-1zf*S0@={$T5+or^{5phSd*6j7IQ_HW{|r_aXo#-_k*FVN=wJ_=hIEBL z1fxliq%e-)+FN;QQ(MqkT2VB#77`eqbltS}%@uEhPfgj*8YE_<|6Us`QGQ3vK*_h- z!&yjHsZva6*QIP`FU8TnGkZ|*%xRd~EzW`kKb9vWAt?SPrEP^$Whg98KOV4MvG{sD zHmrxDX3=WO^0eLUMPK_S(HzAAPFd5}7^%DM-U(Dp(tAJ>*nNC1`s940+auk_g*ssC zl0VoQDgqsEt;zV)2lU((m9C@Sgan$tr!K?WLmh>vOf0$Gls!Qz*29Wb+^}*oOfw>c z;eXuJZju_rW0=){mr%>gGGac*Vajo+n3Hs?@~&#p5vX{=AP8MCHZ9B$#sosC!%>TW z+$zr51;y7%iWDgfijxrUS>aN0nU3OF6?q+qy|nM|jo0+v6!u4YFQSF~$c^SNSW#?P zM<1%^zvo3|z4$2*4-U+SPAEYQ%+l?IkWF+I9KPTkSIKR3xYlV)vUbt&BY#CU>kq3} zW~RLpRK=88Dz;uDlOd>bjs=R^ANz_-QKYO`;N)WY291_47wMZnm#rBYx>MR|X3JSU zUt7~Pr{xDddi6~vTrp|eI#VBTR!cyY-X%PPd1jz|z1vQ1m~n35hHN8}GFGH+n@Oa= zBdE%vJRRvwFYOZ-OJeg`@9$mwW+=mKW2NGUpAWgVy^m|_a<1d`oTfOwOL;-o@ zBKHjN=#0jG?AZHO+-fToqE0>-KN$m_lnw^=Y*$kF+MPYh-!o;XSALJHYjnHaMzs34 ztn+6YobTnZFDWI?eV_{%JrQ%9ae*8?u*bzG>S~ik`$?tCy7@!DQ_sLt9!L|0-UWfh4 zDb3D8!KxE_ybAc1-YGYT7i*TPy*KP9ANRURZKc~x+Z9FonhWdP?7z;dZnzo>yZd+A zxI2z#BXoUEl{V1kuBK(BWG66J?yL?}`g#t#-b(b9Lm5Gsu(hJvdMqACCMg4Vumkr?kwO+Lqp`6fuip0vTdJ}2m%Mm6}Le3 zl1YY}B-3CbFmZm}8T#d?R)UfCbk27m|yfgGvX5}4N)lsBJr(Ha3* zViJKn)*w7QDoPcTqhe(zwh^}{k^OgdVGfTk!#2cK(!ks01?_2wycGp z+zK}+&2hnFLZrTk{$z69IQZ5&g@GN$KD=_|l9R|C!(!Jlo|Sb6obK?B7xyOFPsioW z+be^dqQE9ssN4k61o_8G8S$SEj3_IT(u4!`&oY=-ebA`zK;ev$efUCJs`aDLke>SD z?(X&5b4F_YgzuQp@Zd?pVTzh9Hv0(Fp0}F&`wo4#!~wiE`zD1NrMmSiZCH zt${jjB3m|PR2dL)Bbf@3MlT%fFcvo4vDs2b=*s7@m~`T(uN>exrCVR#^04bQG&}0b z$_g3iX8E)n>p==1$>sXK@8$$8D$o|sUOre&+$X0lKJco$Vy*l@x_$@M46{xPYcPxp zoBIh#fCL-kx2EbkG_zP)F-r{MtMBKiZk)eayVz6bJ5Qi7?n-*VT4Z8UI#W!OJFBuJ z_4WETB?pJ_{ieUth??W6$ZS#d(CJrpCO3MX{KS~qJy8$J5_dN8xzJcpqB~ODME&Z- z0iynh-MpXhz`0v?%N-(-cHcwNKx{HYbjXpj2RIVy-eDX$_IS=f` z;?8`F_RedQH5uZw|F5kAYDBO=o^NwQU}^6_h;fGX;LD}vb(+WoW(^tDuVL3nL_#N5 zs&Zyb*0WI~Kl{uM_~qu4BN-T&_3TG)o7FpHx8i3mem7Gbp^=|d*7`ajy^Rl!9xIeS zW!L|y(GfH{V=y_tLuW&K+_?9=XDv1a_Eua5os+sm)S1}WS?!CSROf}GO$6nz`4p@a zAvK}wQC7r2n*iRDU0H2#Kkpg4kn58y?KSO63!(jZt!wva@#v+;n1aGo7w)*1BO$D1=Ug-VC6a42kgK!~Xnn`P6(Th;`RLkyI9E^V|Ap_Yqe<(Sgnc6vt zZZ-io7nVpHHyTF0G!__lT{ABHY`bY}sLD(?#+GlnB~~h;^Zfl!ny32rRSF6Luiml7 zx}6o*JG_^dn3w3Ou6lEEY=xKRJ7Ty=^x##dBw&0 zwojQ)?-ednS!+Kw3&&1z)Q2-!xsp3JGxDx;EwK($o>$LD7p`HcvS5wJNa6cuf-W;v z92@3azIzTnJa$3dWVRejXjRV|!Q7jN&k`ZLZ}3gxm}Tf#thTuSs#aMm5ZBv6e7b{J zsm-w7A6?Hd(a9_T9=f5}n0kEY=^qbIsHJni=*>GsI7 zjh}mRCW^KC_)r8H3WZO71u;7%la~dVFA7_eH`6U0(Y#HPz-h;~FINol<8%FcRrT;$ zyh`I?(MpW@zSOb@_-6by*Xzvp%fk1M*ENKvW=DdzHxYs2>;N*Kq3&<9>eS`-3gLxW z@P!`m7?X4pdJqN1cvX$uHc*^621G%>wVyE$9ND36>X~Nqhzf+{AsJnMEbdY+utdsi@%$c|HmFQ=5(~yKc8;84f;EE0_5b@H@BAw!Sw3QVJ3EJNK`7Ta#&O!-_8m}+XMH}|3&@X=War&iQFFzee z!()cRbHVfYEJO0I?}&6p6-q6Y^fwDD8}`>f^gsvBf7W9Eg@KnukZShwjhXe3P4jrCDTB zXQSculX?g~oR!9$Is~=CJ_#Qd(BbgeU#f02<;>Ofu%1n`Tw>!=dW)wISS0q?zU~fxGHgYt;)bnKjg^Y&Iz(*psK&8bR~bb zj0hDigKZghrPojbiLABlv?3Mnoc$xfzSzR{S$HDV{$Q?1CVVPq!6=_Ba!+w)8P?F$ z;t2;f9syGAnQ;gX3hk+yeC#>(0DDDY$N5 z;U6@BP6=wHqoXjf5G<=DCh)x~~&rnYLqzSLu6WGryih%}U$v z2f`(e*yI>8l^gG`OKL>k7uN4exI6UrN=+Fkv(mR7QrKDSe65KeAPfxe>jh^j``-Hs zNn3qw2w}7XI})L-?%O2FgueG}Fg@k+KzCc09BzFVNOq;@oWmm;2ro6W=6w}hM)NAH zwAlC^RpXlExH-Q8b;DYpNb4}%4>)&4(lbMmDq8t?#&2Uo?gBlM@l~ z=sCqvFrZUfs)Uq;j*Euu7qTU5P;g&YgNkRJEl%1pMp}5qG&V|k`QXrX5lC)Mx&K2e zO>9l!e4q;Rh}m$9Y{y5cSmS5Y)yxkwa&|t)_4klHOz+7lHHn1paKR`Pini*zYbvpl zs;%CR%LZI8_Ad+Z~$C9cRs(Xwz^HCV;e3s{jJIw zjHd*05f(S!{K46bFvA`sq)k|Og8U6Dk~;RY0w0o{HuoOlZd?yDXmz%DeCa z(U2s1i2c!u>-8s1Q2T>ehguLy7^e0XJAEyY=Qeq`8;@xjP_7IPpSds1`S2_YcIxN@ z($8xXRtgm!87|98KA<0>+DQb7UU0&18y4GEnh}2bhb*sPkKH| z2{|%N2~Ggg@2Aul{_{9!VxA@x4+{-dVAWL-G9M4_AW2T{ryio4yk^#ApU8mx>s<$! zH$?DL&bWS6xxe*29x`<$ny2h@R#5yn#9}D9nlf)l$H{D0#a5FU5;|R8KgHV_?L0Q` zAI69J^8S-Z1VuVMM6gF;Ah$hfWH@NnSq4Lg1YakpewOqJofl$0X z3-XwW!6sr%OPfd!EB`4*RQKprMlxMDdI^E?;F@+cOU!ZA6CKp~Ol0;O(L&|;2aVSHXYBe{TP{DdoN)E@u|H1($_aroqCxDlC zp(gtE_(O2)R@7d0)9d5t&-Oe@?0g!D9Gk$8YB9RoKNt3x&0}UQ^UScSgnCN3gT8}D zuSRH%YLyGWwjH)^G~-``9%-Y^YT*QEE+-MXOd%crHhl5Ct*( zQ|6PkOT|`Q6{k~Q&EXcOpoxhbicBWzE1EX~$-$rCDi{2=nxP@|!5%ZF|HvQcuT z@&d6f(ZxDW0T&H*?%HNHa;gzbm*UTe$~K5HfAn9|V_%yecGBOzl5ZmeW40991;Vc4 ztsDmXop^s%KW10-V+OQQ6OJ5GPVfFg<7SvWea-FE-q51v#a?-FQk)tQ1R>cs{9Pi+ z26#NT`kA(Z?#51j2K$^6SOAr|=hlA}@Fb~k%2#;x^@jP~Fjp~)wI9U-@tA#z+HQ=8 z()(%qK0P8>EwNyW+52uYpMtteW~uXN3lhdbX>%ZVl713KX3qDf=Az@Mzv$rcZFziq z{EB++IV5?I_ZML-o-9|rua3{4vwbJHq%Ef1gZ^v^ zrfqomEdj0ROq39U)up@7?CM8=3P#26Om+g6Qq97|(X`upG!Gb&hES+7rC*^ z^;9|q0*)4PcHgzcRj@l2hYPCp+EfL#*0ic~^s9kwUgthpy3q6tA^7`5)mN>A+xc3& zxYsJ7Yba_XN)eo*P=$&>%~ipZ-67VU_Xp4L@NPRBF0_HqFS#2M-EKdR_h#*h-E_>m z*R5|m7?0iZKPetp79>j>S~c{%C`~P#^4GkJjzaWzQJvXE>^D;;ZYSrJp>Ym$C^g>Z z@;Q2|x?QyU6IeYQMwT?-e`n0zv{pv)^}6wO%s2#rZ@*8zJ-xr31{VvICliPfsDg_A z2GLO?rnvJc}@^qqeYB(4kIYi;!Bq#gsOiWb0T;!y1QfByZk&hr9M0= z5E~wzRFX2<(!4pRYdw;6ULzlxX{)#VQ<>wiZ8C0jq<84EvtzwH`7v(tV%{ju7unRH zvC3Q50;*g&hkaa#_SnEX%JD`P(Kx}vkR}=Vnqx*f zoO(oScZ~ILiB!ON@r}u*-!+f04Bg8XacEb@s?Kr_I|;)SGDzIe>uZKfD+8jAO> zyDKO&K=SgtG#CzXll=w+gt^U~6}RWC6Zet*ONa>!&;yPxwGU5X`{^b^WpAWR+gng8 zxI4-(zM)J_Rjfc|OrkJsp3~dP+5(~m*}=oyZEu*=3H(tGQ;$85fYXm#TudPZ9=;@T zP$|b!(7TD6v~q2~dK`CWjS|tF-FR}G7^LjbH{eii#EXfnH{1;hCzoH`XYm3tX{VQ7 z)5!Qlyo+Qv&1NyvShxkX)wY*;YzhG0{I^?&8^-^8uQ#jfYe!gpL zj^Ao9GR^lre!|w@LK;Hx0#jZ3enUor&)^8wA*K}~#uAbU`#b@Vr4(~jycCcdobywy zz3(T93IgwY#vUofWn&4|SMNcQZ!zG%wtbdAb}6!(tfbMGwRwGg?4S6wRKkQuIjXR| zVCMX7D=dVL-IaIO2YFsql-eGaaBO;BLUdY3H1AzHw~!r1*=o|&DCjpcv#Dgzq) zS(v#1NbEmU*1y{PxtQoV01N;JKxgTHHT!e20YLG;Kl;Bn{Qo7rnE^m4*I%1#^z580 zf1CeV0Uu-qu={`2YZ(FR0s#8#e+L2n5kL4(5a7Q_TKsLN{y%^K|1g;U*{=P64#U6O zyZ?W{09d)W{zBvb2oV46_y<7e0Hz-h?eByvfM$PIfK0*P5etA70C1uJhMS%VfYot* z|GO8Q4l5T6J%ATr1WX3Vy6LboG10UC>nIa|!`5Np;-u$dX8T9TPaLjTt>z?cOf)DD=>MbFH_`PWkxfM|dYD<=y*CnrGQ0Wb#W1OV0vP*(Vt zeE+(RtePI6 zY4LYDKtKxs{R7PY*Czm@zY_q34ItzH8e|7Ruz)cj$sGTrksSaE>#zV4#|X&!-^PDt z)_>=e9iV}x!_3YMaFz}5r66PnfWQD}80k3xx%-=Q0P+r4%)g!I_{*qsuyF!-UO;pK z#{o*i|16H}uWi5>01W@Yzy<02l@(#;1cdbO1c27R4l7{Q zfC~6W!~zih|CkO?{|0DVu+amUekOpA02=?FjQ`J!{&$k*pN$#*>$3X)hF7pLvHm|u znxxH&LPpf!EAI&1oOaHi$cd_y0Ut;nOaijgync&KB(iE%-6!h`x-?n-NT~cSk9w9@ zb#hOogXSj~@tI|H}m{#(d3T+Y2YvN?Buaxy-(@Ic1k$Mi^IG+NBTJuaSH^pNVwkTag zW+bnMbdhc$xqtosdbC}0i5687Px4~g@U|kN zW>@8M#rh`-s!+rnvGMQkWxiSt$)B28!Xt-|O`erjvZ0u4#Gv4$?7d79B2%V=P-~>v4woufI&9 z)JV6|rs}lqLzl>Oe8F>ZHu#UZXc;~+SJs?=y!HEjs+QPmH!eLfRIwm@|L-^G|I1zK z-*=n;1{3|C?@s@pQKEnJnE>_tA1KkkHW~SkTjxK!jr><}{~IM@VPXG^=9ybLnK%&s z{iVRkMAYOjiv-Q^uQ`BSve;Puv)C`apxkg4o3FbIzab21yd6dZJ7!i2o? zx2MrvpXmwbJAB;DoT^ZMx%7E^b_iBN$jrkO#K$S`FPwGvzfh#SldvJ-Ge z>`S=HIV+EKE=f*l8Xu+Kgd$q@JdmUKX_ju95t-wP`owSkxM2k?f-jcw_4mSr%IkE< z523G6UM>kH9$;u;G8WdItfYbjIs=?l@}jCuBLq_P535Y2rKq_9$J)j z%+*<;`bk2z*Xos&JbI_<6;qZY$yrQA!X}fX6rU!f?C6nM<|f>=!k-}AFc1A7;;5oj zCpu7t=Xt)klYyP~nBU=yaQyx-QiG+7_8V5rQlG5DGLDULF{;atO7CieH&lzC0}p6x zDwtOLox3Z7K{A!ubB^lkd13zsUx>n1Vrc5&jUK4b(_CgYB1H*tM2=7ia%3njiJChH zEkkido3Q{&w-%Kdc0L}gn4Hm5(3l3M3_7=O3eS-^UBQfc79`KixB6BICB&7QqMXJA zT%A;-i(oXY|6Ly7*`0<^q@oXc@*@{J=jK zI?P=?=q(|LLiU>+G3caF*fHs=@X&D~ZB;+bGZVLt!UiRJ{q(bbN*MDvuEJ2wM(p%B zBdfuYhm&RoM?EI~4%qH%lc{s{T3j*D-Uavl2IrIT-*+L6%dv&IUg<2!Z0Kb2Q_5#% zjH)SHERr!;_n>RxEdkN1LgWJ%v}IJEVVsd0XmgdI2Q}haQ4s1OVP9hO*S*LG-`)t* zbn|dXr-lf9VLcG*e(BSj+L~S!Y{eMV8rpudy+waR@5>yZ65&AMw0bip0O1#gYpgY` zHE3=8VN`3<1+$ImDHGmL+jtPR+8fAN{w3(p7n zD}I)R=?M3T{mAtg$pO*$9qljjl|#YN>D8yAyP{($hD1G@Zk^Ynd!uxv zB~l&c1_z4ZB?tP*#Ok(_|}J%U`SZ5UEyqTQnwueXm-+x3K{6w`u?f9|NE9eO@@-sv zzhvO{fqMxM*i9*8EoSkxp6bN25+s3maU0@lzQ9~pE4+}g9=0q6L7?)>QK{=NO{^D?ePe{MGa z=e7W$%SWZu^R@>g+<{!xiab%L@&3ZcKa)7Uv-sr+N@IOD`=f{{K3SjzFN3uq7r&{& zEW$R+;?jhiz0C;fAUD8UE!tO?*2XubP|e+UyWzUbM{=XIs&L80Za(o)}mem*j;M zK>I%W{_EZPoydpai}Q=Vmq6R~rt>BHy{30!8~Brq(|4p7;kZ!Im->-lPv9W-+V=n< z+i6iJQ7&1taX6mfMgo$ztatC}TTttB;k$R@`D0c>%+)N5))QxMsKD>uf!;CO-LCzf`p>c6jO7}b@nBR1M1t3~F2Elk-;~K<@x4=0#>Zn5fh$ayRHc+VGU7-YP2c9tC zVELMgRH!H)#8Lqh`D2Qh(&!O_vjvn%xUX#Lz#q_zxjtZN3cY8HAP{7daJr!J(416a z(mUj!%A+&2$QKbl>lEqa*t{l8x2);OoAGM?RtKK^pF6(4I)TyRdFcqQulz6ZfO%dq zoyzuppxr;V;sPDA)S(+Xhvp;FctGsngL(UW^A3HE%}dKS?fUhVn=l-xcJ;eouMcu?<0*92x!-uW@i&Inm}6DLYAiajgr zLn82ZTPSV7+K>{VRIO(WD`JW5J4H^md%Y6^zXfEyIA;l0CeDGbB!J@YyTR|f>CY^` zf*_8sN>~v5odqU)y^#Na#%#S1_b9jkeK5$SvR_C19$r}14j;tdU)qP z1FyZpRq=33Md~v^ydVs_tF)v`{VHW41Jm2LkDL5qDk-yqG?a;-xe|y~hyv*N4su`^ zyAsJ0nmPES(nawU+i@i?S$)dO9MlJqpm>@m01gcD*eg`k2oUoTA4Qn#ukXr4`d77*f3fP0cCyAojD^*qTK9cqTqpDc>q|B4(7 zSN8DBDZghf&uOYL*K@$Gwhg~I=JWVuh`Wi0!U_z5N0RZw8V#bZ;bMh})erYB9k}Jd zvjX4fWot-a2rOQ}p_ZHi!a{hlSpg@*Z~%sHKtMRab7$SRORU{iB3d6~Cq|h3F4>a= zbElP-3MMAwNHLcRbID7oogr~J%S#=3q{N*ef%OyLq{uyqk0jP%hF4nnjuLCKu!=R- z5J^9TX1t_`HPd0%N?N>;3S+Xw_@BXqR3jA^bzp*=BQ@V-`K`HpvWO!!%4B&GYiWZr zd^Kz7K#~I5WECxIe30W2zxR2K#lkD|!VdkFodkj7BNHaX<9Z;R4#f_1 zjEP~WBnJ=*MWL~&gRQ8+z>)(Zq=iI^^NN*cl&CPvP@`0#O(%LI9BhiQ>C+|uAE!R|KwKX5h!`;Pd1am!kc@-6MR?)cKe2eh!%t{+1^B3v zNapXz^A0h`-ApHmbFz$uM}zTrt(`^^VJFGTCDM1%OcWbtjQ;@LG2c$6cOrO^O^TDV zRBX?q4H?Z!lFvC^A51HK?ZmTe7OL7j}D zZ~gu4M+%6?oFHkPh?iY}cN-BpDif~TW;kV0I$AKasTdK>Lkri!`dN9anxbuG(bke^ zceV6=3n_HDIcmc^Ws(ftig;mMpg0l5LzfM9@C7S$d-+$h%GY_xCr9k-w$+)h^THQ4 zb3t7E>kpQPUV7|chjNUmVkJp&@A@Qhq7-o=sAyr>XkldRV8&=+Emr7pb5sXE+S@Ww z#*bnmk;4JyUb0fM%n3>0Wvdz7UP2kvmFX{U!7CVA0zAH4-=W8#kRO6H_6|0`VU|l&LsGgfvJkVA+lO&UtXpgF%ud1G-S2&X` zb_7`Pg<5XS-}j6@{^0opdAr+hQaAFkEp^lGn?wQ;D^^CJfkz+;+@MGrX zSZsg0iV``p@Ar%H$&IsB)^z6h7$hJ_iF~kpL4&)q<#Cy|0h}(H z%sG-|H`qBJUZUK!+~Lnuv!zSPM7Dip=Wdqj*}Hfy4>oKdDjYQv#J9*p|lN1d|JgP|Q3F*y7b%r)5QvcMZ`DhrSH zkDqUOa#=6b$vEerrZ;y5(0w9BY4Gomw2W=0TBC6o+zroOld$Cz7GQx8Sl{a z=y;T8DmC;s4shAe=}zv$lEOndLC(^^dY}>q-15X6v?r z42zYU5bDoB zVMZa%z{anEZn$5y0;g@c6ExM+`m?*3xLKnw^pBTv)bNqlXiDTe`3g1$TD?>1P30G|mCA}?t zM4P7|<}<3x8jYW~q%&bWN0Yl|`#tgAV0-|^%KF>_bfQwd85_dr#YU}$TzA!J7l_DZ zXQ1R`P>$=amPcgA$au7Fjv%h#p}{I1g5o+4d^>knrjMI>^;vAb;;iSWQ^H2TRm7Eg zvD<&Of71Goc9mRjAl&KGwOvCGk#to7=*6AL4uxbfpFy6|wWCT`*jvQ)Pu4Z80jw!& zu2F)c6jAIo>%$l%32S?+q7IKJn6=|1@(f*rUxo_S241pQFsf)^mA^LlD?2pR<}}g& zz=6Z;vum*KP_|HdwfBeIp6I82wxI6r_a$dt4-Mt#ld@l1rauSqY4d zubI@;+N-X0ODR;|-rjjmS2XIp67VT0nbX;rOY21Ee@5h+kgkK`3P8Nd9Xw%S;UaSE zWjJXeW1yk3vsabN#E_vgjLN=_&gHAubo3pac*8f2PmJlBT)NqAyKSbZQ9HUk5eKr@ zv>GW$xH5d;AVE^3YXs{?MUnx96Ov6?=P7+ejf|vljU;SzeY3oqNPeE;Hg_FWY~d}( zJ++Z$AjhJ(8VY4}jHOngMSGuEY)1R^I_wy7obRa#HDPV!s|&q0a`nBwB@8sDE_ z>2xNyb<5NJQP$xqYe{t}4s)Ifu5def6))KP6%6V^;g_ zWdGu>y{>&kEptWk8L7}E{0w}DwAV{^6(`-^EX4SUb0v;Ax2YDq2^$}F1+<+xLgqx4 zcs&MR+M0lgx%oU2V2R1%IqwpcBX{NLI$7uh{)y}XulgM<{hK5$&R}QZAhk0GZ7P7_ zPflhw9Sp~H(u_J&SM3>i!@fVrPYV+yUQHj$^}5Xf33PUOrcJ<6RLpZ!p!tGhs{k%? zW9pTc44VuYQGhDvTI3p@3Vi~M;7t)DcLJDfMv-vV@;ZVzNwqx>4Q<}7yi#>VCq<&oMJ5t5# z*XN<;YOL?mU&mJ}*3YN9s;sOrG7ICIAH19^*tHVjJGpBr=X^m}1AUjXX6y8+m$a1^ z`U+z~71(Dy1h>haCHgYs(MO`cJ%_xkEg^DJ$YN|6;oi%!ywG*Ua(->(eXNAHgIn!vD1N#wFF z{I20xxJ`(%*8jQKEAm!+uZMSGqt=dEu7F2K~Q#y#|Yl}|sBor}U97w(J2S0EiTnr)mTF>KFE-!~ibGRNcXj!Q z9?iFm?Mhi`(x25HWdKul{s_@MNho+@MTHK_HXJ>;5qwQIr+Z*qMFqupgAuE`kc-RC zQ=j1&QE|B|i5jiyII2FO39HKwlyhi)XtCYT6@LN+W;RqXJ*D3L5k-Es#8~>>rmEz- zxzc9yS&ZjptI?8obgI>s&ra}KvmOY}S@6a+lOuEVZ!+^JWE4{IqakLz)tDtuLWWh1W&8)5X8*%($5iLzQH*EvIJumURj)9*3Y}X zxPDd@^ORxbpAU}yes)@*;*Bnw1kGT>SQ+b1lPztwjt;0QmalDB(_SVzUG{=Y`*q)w zHB9YEt;d_=W(C?X3wpP~iCGN}mD(;u#ZzN~hW=FQ#Z|ri`}gA!(iE3^W_>xytJuBl zlga4csI6)Kp}D|#M{opGCp>O^$%=(=S1!@5UQJaIr>*$T3?p~&T+iXZ)GcJ;hAmTR z_NrRgm)91Se=KR(|GZ!>TovptG+~o7bjQ|rPqEm5l-k&Z1r;Jqf7fPyKng63aq(2u zvM%Z3(d1NJtYgN$prcw;Sq{V}9@;#*(8ItVxajKHEsU#~kiN3%vKTCEZgRIqHWJ=n zKM?;gk+sAW_a6odm+K)7mvfd2;2SF?zlAj3yfl7+_5!4RC{E}6e(Dspj(;4le!^I> z9u&IL&07Mtq%AcL!pF$$8iW%OOEvDoj}LfhWqO(p{`JMWV{z1$mxwp$sTGXAv(grcr6)ksmrwqY-d0nVdZ~=9o-0DHQq?TkK z+VO;c^mey|a(W#ntPyGS#c%F2E3U%E&=*!$c)`?5^Hhvlu)S?{<#5EJn%+8%@T4i} z6xAhReC^eXI?kMoTXm%lSWuO77*m^X)qD3_HM>QF#I)89^p|)Jy;hWcxr$<7mSe&K z``b$QQk%%99h+g= zU36+JdI^!;zQ6OuUHje0H=_wmeB`!VVW+jW2jEGVk5+VUN=EnMNA4MTH}P+mCYn7b zlgXD2ITDBYlfaGzjR*?M{ZrHE5|iB}6ywD#>{g>WTtR%djH>R9B1a7o!7W1k411!M zfK3r$UBdV6eEb|hm~Oaw?rVfs3CZ65%CS2A*_TBO-AOQ*(Fom0)5X}W#V6O(gcE-%{T(-ssPrgMjQwiq=!hm|jP+CoEVg4Ic zmMn=9IKj-+Dm>UsQqlrj5Y8Fr-Q6IxzM+dnd&l|kwLw=ln{j=oww;8!aO;4^4y|oN zyLQ^f8S{99g8|tkwX}Y$^M))Qy#(?%aOl@0*#eT|9;`sQ7z)df2izS%$&YQF3l$a4 z#;V-c6Pl&){=)J&DRb0{`dm5~3KCQ$Bq}d1p5f!A`*xJ#$tlcK^&AhT$ILdZe7z<^t4VM{8K3l zb1tq-OZhG*@6x4$O4&|oSF`D5WdV4YZtf5H@RoN;y?D-^q1A=?B_SS~P_0S0g-BN; zfdLuudPAChU1qf$Xp?4{oE1@&FqRxSi&4j~#jzzrlOI@Cnv+#cc5ou4Yin9*)xXiwBKu^_N*f!9F4Z`|B4`{mtZPY?m5?1$%QURHF)iE_ z&t^5w5fV4vqn0e{PSK_uV4tftY3D=_oNv=oM#8=eEq}HBQOlJ}&*<=!f0@?=6of8S z_aUaz+1^&e-iT(oE=*H@j~dXx6RCxoV-XXk7-obhgywG2H2qd2h;TX<76<;ZDpVsEh~qc(|m!u;6B|;&VX`8kpLLBltU%q~VV^fR(R+{)=FqtUXA96%Hxcc)zr|L&}_UZYLjn0HguQcQ#YEti`aMvlT< z%}K%DGRo$)WztvC%CT}mQ4jx4NcrX7`QLdc z7M6b#M*QJ+|7&1b^dG?TH(O^1Ba?3iHjZB$Z2x<38G!!%jaUY-ng1+F&IGU`{wJl( z@dtGJ?+Il#0N_Z^0x)s_>M{c~aU5L#T9N!Oc0U#^?cw_+d=zsj?Ci=tE0_<#nb%1>U_QMI-mK{KF0tjKia=>~3-T8;H zWdq>M0IwXN4-Nnm%<_-rfX4vf#s9*M*|^yOm@)fbNGdy^A9jGD>Cg6mZmfX6*#O2i zRsj9^=W(+Eq~V-@uv5VQ{%HfyV*r`^2m1tAy#RzQ;8FiYIWzr1aRI36A1IU+ATI?_ zt$_ajG}!;#ac~2A{!^G9P^TVn#|+pHfKdm4wE&)!89>J}GXa#u06g~34w;Bp0e%<2 z?ayMs5CD2NCP0%JAn5}5vH*xQpb6L&3t-fM9{vmF^ba-tf5Dux{ws#3lQ3-)$OJcZ z#U~!K_9GxD$r(k&!aSbqJa*_2h>aP44oWExa?>Y#Qqw#h=>6R5&@ro}BDV3w45PbU ziJhswigUG>O*b3FB}CSigdy#6u>+-Rab}h3ao0566w6{hS}BJW!}sZ&tv#6k{J||` zn99cyN86U+e5c%sk5kRkS09MAo{R>j=*L~MuIB41LXTmRwZ+BVdxurWVb`8xZ4Vd= zC8xy#(~7FUD9023z44&uf64QEQ;#=iE=Vzgm7eB zv*A)m$zq=Ql=((P_IcVe?A}PFBCCKZGm_l-QBreT^=4y*Qj5UUg&FAFvWm<5tMZRa zYwvU;WR#Wr!kO19?SqO#r78_{-0t$Dz&X*MQZSd)Zu$DhqCtkskRMWGNpx4LTE=8i z#M-HAQHzV*g1Q~wxR#h^H+wEtS7sqp+>6wS{VnX@4UYdf;Qx8R{u|TzKOgA- zw`}J>%%lGs+xeH~;P3B7fAb^!FSZjvi~k?cPG-R2`!8sxnwH$W8Jgf$_h%E48+F4$ zzKHBW{zzpo$k!x5xqrZQ^>~xMa~W`o)DR5F@r2gMaAvI{Pb|cY$)I$8`hI>o$CvFu z#LUY_HrB-Sr(N<{8VP-)c%l>u>QNBzC7<2HY_?MdE(#ZyoVnef>T?06&qz(UdO{i0B_Ez1kbet<|e>`ixFQ|yJx;% z65e7#!-K;tzGw>?@70xwg_ATIYc~#EA3TVUxBIF1`-S*=bo(Kf>0l=54MN>8LZrMF z^))Gngg-}YiXJFh2zU(o(h=G(_>fgRQoBAWM$gvrit)C$^DYs+0lVaejjj2c$9FGd zzbMe&WhCt21Uf^iI{|9oLAK6@oAkx1Tuu^-j|$$?Lxt)ykNNVWrXJ-GYE&=` zQMD<|#p&rRQfR-Us+7L72rN`XcA_M&5)xqVc78ph*b{)Zz(jzVyt~IZO|IELrvn~) z=eEF^W?cW8et1us;hOBJUQP8oiR&;ei5J#bkOZ>xc;;Apk>Fk)!+W0LIFsz92-SN& zvdcWL5_L5LTqh-{N%i})Hb!k z#w2NYn#z9nDZ0t0g>)6ooZo>O3KFrV=A=~lI%!$LFmp)kU*TSX9=?USe8ci&JfxAJ zpTX)Q7+is3f@*_uF#r^bCLGWRjT1NMccvd{N|OO+FtOG!=C9Tf1-2Oax%l>@1kq7e zV_}a@WN({beJFtOD|Iq3{X$G4(_^8d6)3yh^bQ@F+k~LYO_A`t2zZ|!+($t}u#8`4 zSRcE@iN-n=8I6_*uUN?Yb^jSI(n<;)wj-&BuAl1|#f{HNcs@iq@sdRiM$L(vA3wez zd3NRv+qNyzCrF$Vw1b*0+!m zAXQJ+Cmvpg){VH>XteVQYBIf^N%H%XqvPdNt~~hbwTB;eO;((xzDuQ}@Q;^?#%>O91F|FY83@~;+XZmO;2SVCSntSXmj zPTsF>#z|N*=8c5mf3}=lJSnQ{sK{C{?th8s+kMgg*x(>~OuJ{KBWIEVWt|AujAv4*pf?WS4lT}jFwtW#Ag{IOADRoRf>D!riM>>m+O zr(&mA1)zHi$0%XFbACqoRCX4JCl4|Whr&Q8$V+_)AvVW-{@4#adrBYp;R zwL(?D3$RR*?CV`q(8P6Ry#}|!jn}}9Uw4C?@PDjbbc23H>puAm?up^=nmVWWkbGnH z*`ny*+n^-Sq`+tOyK#+_ur7 z^c{g<^K|7b@KbwpT|ZDD$Jm}#wJym$mqq_ANK(plJ)X5Brbp*D8d^*#d_gQ z-ira@2hr3##~e*ocHD6=cKo771;nXrMaU1O&li=i4}otOIYoC|*(N5NVq@cCv5I1g z0<*!#b|hsm`FjkrF~>8gul1 z8u)hn`w{e)#PB~t^}kQKf6khJ3jzO!srG*z|NWH$`@hA1e`UA+J^uS!!qNYV{{W{w ztp5j{A`2(L-TU{K{k#*}Og$EKbaATF9I?3w5p<9c5vG*77*U;coD@== ziUvp(mAwHvIKIh(JsQ5N36ufsFhC$1VfCTBbhdtYCRTyuHqhk;O1$e`n?aG9LZ}@iNjnD zy{R#O4xM1qg|B0K9pnOqyaMmv95G}$Z{A@pZNO}o4n95C7qK&d5n|PR$k0_!X87K; zhF(tF^pq6?(dqDc4!L8#U-U|LK^`B0gdyhgJFZ6cS5ejqIAD$cPLGC;`MS4)HOwJ$ zd489oZLGa>0j|&6d&%kBdv6X)&^{G$&jR%0)qS2plZvojUu*P4UBKa^;m*_p*P}Ds z{f@cPV<%vUd{ksP?A|M-+wNu}*gaI@0vzfRl<6IN9_)*M0KODUIGYP;D|AB7reC|UK=~ado^Caq6(PJR+mp?>YwCBtLRtSzw+Lj9ZOM!#w^eSW zbf25uXQcE-w?n?Li5&MS!?20mrhTultO``J&&mb+x9rlUG2e0x8pW=&&NmSZ0kt*X zBUi0eQ}etGW{cw%QrJoJi~#a%(?F{MZ7Lx-A=d$drzkLwolqfaAE^5fhXI&3q~lm` zVSOeaC}mN8BnQ&g_$7L%@==I$bd@$0n!&RAuRpFixl(zQ!!wNIStxIHFWvC`l#C=| zf$`gp@RM{c1Ot<3K&2z6Ac(^G@ly#5NWn>L*}GA+OXGXLfZR zgtjhcpvl{bF-py&RWR|&!#-tg%rz{RMu7p-CFh&`1X^puI@EtX?5FymV`bh9qlWdf z7qP`F)SW@Sle|d+uK{BS309{G;I|_vn(nJRB9+L4dLLaSc{*oh_NJbjXKJ)OH*STj zF^1mszYzx0V#-{4-)o2OT{irQtfiM z#dcfe%mAcFQA7~D6TW&xK`%a9&A7OFB;?0&Xo2DeZKfVOvZ%{}F&$t%(1E%c_>0Uv zh%KunW4Y)+$4VL}?w#qKC-j@Hi%;ysNP70&+4allad95yb|2c|`seU8_6Rs(3?h0v?IZVYfi;X}+qvVTWYr#>$1qV3(N3hQ|i)qClRK`pi3$Bk`)|s%M+> zwbS&=d9S6{s^41&-L&NbEfw=-Ms`Nug`T2Df@hPtmj*)ib22$Obdf)c*1*BRgMH=s zmfpY2M)H4X^VKcAC44WftteaHXx1An^63_Q;C^_n_I8o=?1>d1+JN+7j?qLR6BPSI zhFv=%6Eu9L`N0U{Y$ZP^X zwp!a#r`#Vs5bM~Abvf|tMTyr_XV!O61Iag#`i7O#53@twVF4~vgO{}0NYz_SUQC&q z-{7g$0YkohLN$w-P_+MZ-+2@_!L^^iM$3(71%g7mv1so|=$7?labM7`Fdc-mm;Rcg zwYLUTt&P>5f1Un~z~sGBq*E%PK4HrMHs6l8NqC@fA817qRsB`&Deh^6#Q^HpZK$!M zgJ23wz6~?3O)&pYPHc&SxupG)ec(&oA#a>u434#JxvU+8blR5IyRHe<|BF{b)>!E5s zLS03vduH5Y#Xm-STW~7;&NOWM^|tX*hz5<}(xqUJK|I=Pu+)+-y@Ip+E!S~cuq}Nf z$wm|`iXejp!w|!_##b&v6IJozx!w?*M~1_|bJ;|2Mw9oE++z`bd^@>1ZtsiU1>KDt zWAi$#w}0ruAPA@q54n{8s*9>1SxVFz0lo!!4nxS_JK=k2Wzte7dy~eC%C$Ev za7%9Z+9vT);{EmY>w6*}-Mrj6 zTkz^QQnH{;k-{&ObUmt-C>+~nq1Q23NQe>A=SL{!1x}$(-S=_#ElzSAv}$E}0=~P5 z*1^`cjiRrokI@(O7i)=E+b}zlIWQtvOY5v(a??V17R6gcwgSCAP?(Ljz@><>r4)v9 z(q#@4#P>xd&E~|NKs)_yibIcG z14z=qGPf~cf(=AEnZBu$XwzAtWdt7ua|XKxR|la!Nk>R9(;bG{GZQ1A7Dvbqt_>m% zA{J85InKS-DG4a*Qa6w3QF$l%%Tzjqe66lfbD?1@habsS>_J#w6kijh(e>J^;yFf|l!7uv_qC=6OI!|kTymPxgneI4?h zOxGc4lX8Is6YvsK$p@6!;l3%Ui!lV^H$W`bUM&JK?K3({3a@GM2JN%>slWXGj$ohOC9zBMnjeTq$Np0C|W8N<}z+P=I39?|L z;A)8;JF3r)FE|$2km95rNJUQ7Ey}Mx$0)z^T15>Pa0S3@H@hj0qYBn%=DxsROoBcZ8?Z3d?&sVK$v^SHT&r^Tor z)s`7ziz}IA>rj$-zU%QV31Bp)dG4g_({Ty9gQET5Q%}|oVc$f&>i1~Lt@z%J_OOOD zmDN0QGWW&f3r+@IzU7If6W+$MvsZ2NT^OQ3?=&GZ3zBte<1Q@|LRrFDg0JM|WeCb6 z1{b74An8+5?hbwk>}y_wH`_)A9u3{VkvHy?)MhFiD(Qs6R4DT`)p~&xlf3jc7H1;n zjXPs-x|5YdI8(hh!Xv1KGnKiTp7shjj_M`eO_4GraJHI6vDe~O z`Icc~7^x~DeoKSr<=|)v#Ea+A^y@MmpwN^h z%|%eG^u3Wi zNIsdS?o#!Awy5d-IS^IhRu^5i7j3HWQ|3}>W&@^jnJlFmX{y7Q28!F><(ZwScsJQm zh6gdkiK@<1lPJG_JspavJ6)`*#2?C=$(?55ny9OVI&U|!*?#X|{o!`zyc|wCEPid{ zB);*}v<}1;+d#1|hvvbH)rg(0(7@*B3KA|}W~Yk{D1yAcHEUES*D4CsaeHDY@fTDL zUZ1aYtJ=xv<(aDZ>!$&YMvR+>OQauF+~OO}7LUrqHjS%uK`V2BbALnHQ*Im7 zjW4=^xrtov=c}ma=cpZW9&Po>_7V|P``_R;vZUBBF<3B^LblTkaUpNruJSK28|Kul zeqi(hwIf>8+7>T9&<n4BVfqrxvOZh4L`-gnOJ%JrL7nDl26_@+W`r=H;u5CI4n`~8S9Sns&_Na&dX}`(t=rNXb`s1@r!0wi=(IZ ziIf+=fJn-R6)k=QA(&%nH?w2jQC!3T9rGpEDW{B(AY=H)gAvvRmvwmkvcb;_KZ@Y< zA1-c2**3SJi0-*p&5)UAF-MypP|lc|PcvdM9eI+@ZVgo&Z6gsfGVGYK)>_B(*-`od zgiuf{A}mY7+o3grS*P_9`k+eOG`Yi23r~YW;x=s(g|cRV0dpd zqbS}@@Q#0Mq8n34C)ADiu6sZ17Q^7CWkWUC73`7`1{7X3^mMdp&33;oblF*v3pfdT`Ep8OTU9Sl+6CnRncQvA%jJ}H>|o_qjq z#_Ts?h2rA?BQ2Yb{g}=GD6u>ce{3X$OH`@IL0y&+sln*n6F~4$&?DgL|ek#{rsq*SxLB52Bap+3|ZOpUt6Zt$X^KSc5;FjNAW#j7>+k6;TgObt~Y+^(#hF!L#pN&t&EE!iBc9t zpSyWqW-|^?`F@FH5FQp(p@@R4Zna(cwqCwks0HGm2*Ri@@W-4#Y$al~LrQ+c&X~;##(p5(i&Ym? zi&xKKYF=|x-s{VH*}27qek2Qk>z?0$-kTE$DCX|(INmezc|r6eg-p*;YKa(N8TWbh zShB&f!$mUd<;Pl&I|n5Oo;xu<>R{FW87uQ~alT%xZjqnE=KD_Xzr21W=$a<{gYZD<5ub$gLy=pLuaytmPFERUI^qGM6Gn_j-Nl3w-s9i zKaO|~K;F0<>4xN;FziH!HY$z&7^ZGBIIFoQZG zp{P%;SYyNqm@waFuS7q&U&uVLWsmgxmmC|2F$$_h1kv9e?sE%L-5mua6%B;j!nNG` zBj%$C`VH@)-)iJHsG?6-G{4Ax?+<fc|rsugZ>w3K4o)#PUo z=dUZdB+7v>5aQI*b+C4BN88#;{vqWSm-&VvLgIj5>c=(OU&F|D>t+j8Mn@M&%W=_>7ZX6kypmwm@9LQ6%? zoo!QwX&H4#+!gXQgi~-lQr925xhaUztas|#q}O1_T{wrCHQC;UbjV4|&C1Kcjq8^h zJ|ATTr(rg4ze;a?e{$*9&J}jve*RkMa5RmuXnLsI%psAEKInr=wa$C0RwOkfx;S*S-ON?^K}V8zZU(c9fgyt$Czhy znHydx^N6Fv6Q`Dfr))O49M*pxLe`&U$YRA$RN=0?;hJ)tQ|Kiau`jldV$zjlHfUi< zbyZ-G&jtH0Iy;kUjf&|aknF?Kh2q_TGZydmO^gYP!55BgWYDCBE*Z6IW9V}S0^ho| z@&)>*qC(R)H!?RDd05&oFz!VQkuuI7M>{dxSrMd+j_;Z9$zRhk-c%AxCkHko{ikAv2Yu);}=gsQvRwoYImM2aRPijv`etmrNA@PJ6 zza%W*g!yJMq`1l>o~yAjr+1}?nyISOY^`ps;hySQdD5b$aiOTJI!;4t^czR@19jzq z&J(L$jr-5f5|cMpH>Mpyb#{YFaIbU8-klvTJ9fMtZe6}}ApdHgf0p~Jq<5;>g|PL3 z57UoPBZ<3}fE*Kacp996Iy;40I@gKgne^LW`MZpBxeHEM}ytY_vg`>RRpAM-FKgOq&6u1xJmf#bv5AYr4gkW<~PH zo&g^^B_tl0x(1mPQ>LG=A8v1{Mg7E97;LkWP00LWDMCisGk7@MIr&srxf}+Mc?^Ro zp+h`gJrW5-(dj$@<0csqI^^R7cDJ#4hRpB51gEq%6h`w2C+Se5?BwI)T5@$_M-ZqW zZfPVaarSviZ;MsPtrBS=8YM}nyg3HFJGkh?>?!iZf0wFf}LNHHAbgnVvkj}bPIBDo&@LxeaJ4+ke&5xf&-))TgG#+0(h#RYHA7bQm!49x^%PcqDEz_v?s zF=x)`K!YYha#` z9j)u<#y*U0XK<^d4vk4%6%dK6)=wU0xjnbfTZ558tP7fIFvc_Bx~Z0_BxCPi$7QU2xMqE(2x;kh_jGs5kD*!EQjB$e%Fn$cPSJz>h>9 z@C4#s$R9~w(7GaC$hskdq#yW?Bm@E8qusrQVOREiAk(B>h>%Eve%&NKP>f`sFogDRSRWxz&>d-f zK%YUWNP-7$Xu1+_a0ukNq0>8n@&$O^erm|wK}$%V;MJrAM^Cm-lDFJXFq>j;IGZAG z5H}i67@HDr2neJ;h*v~8+shDF~Q=ykLHgn*gL@wQ`?NX2vhW zf(a*t?@@A}*ODynfqF#qu}&lRgF419o`R(FTYD_;orI(JuB~#u&53fpspJ1_9sxFI zN<9f}l@omH{!-Na_!%$f`0{=#8#C3pxG;6NZadogu5L76rTJPNu<-vGL_WoMEhvOqHF&jZMp zWG-+z3Zo@5@>7r7YingKNg`{#Ex+k!O>eJdO zYVDbwTJBrY4qVqkSsu~Wa@9r*4?={KvlQApR3 zW#gsZzrR_ii5^P?kspc6LOPsv0E;YaWE7iq!M684q6O` zo5b7m8#ZxXWBokJfR3U-bE+E+`zF5-$2KZ4`dy#&Ue6~_FPzn1Yrc(Qv;--WS6g$s zQQew@HYNm4l~w)5sJ_m=wrFS}N)vyI3OiJZEg}Qj2qq?B)b#B~o$J^;iy3h4_$<4$ zu-4$*&fHm`eEk|b?y`w?Y!3p?vVF!1fRh9~j3!OAa+QS1y77oaS$100MEt@n-)Dax z@+k-n0<|Tw#k{g6gj8KZJcQJ71IS_+1Y9nAV{HUH+#24w#%-&*6|_wGl*+nL9kZG; zj!0C-}W zQ-2o4p;F&sukpetI8!~qO_XqOmSN0<4HxsL+jX1jr3;X1;B|y1TLj(a#P^5@UL9B& z?)HuEu2d04#@y#F?b57VR=QbY|Ck|^V)bu2i9Qb6d<40O;5pMk1dvZXpaQYDH~TGI z*G5jpLxVd^D=RI_R)Sw>6k7ETwc{`mT;y3Yq9t5IWo_r4-ZeCQ7>%)eu0rUpK_j1m z{kb_?))ksz8Nb0PkWW;x@sDczFlbr}#-pv7 z3lmzPl%=l38U%W7g|1Eqw}UhdFGREr4_4vtH+#%JwBpQBO}-ty3-q2!c&xS?@MVsE zlYP{pSu+ryp;A%2ptW_DXb^F4;Z z*Qg_tMY!1z>>nKHuXONb(hsZlR^9D$-Q^F)H{=1n>l0mqmr#AU$k=~u-w6IQ{^X?l zlW%jNFD*ZQBE1Mf-&0eIoF~6%)QV8iMH#L1iL~?ER2wcm?_KtFY___)|20=yNU%7q z^q0tNXU;BWcjjJ{-Q7)0+^Lm4esn-8i*PHaq!K?~^gDVA#S#SbdpvpE+i{`B7@W81 z|7eyJ{_K1E2DsKF=s?b7qAb7kQ-}+R>cZ~C*|W#5HxTkZxEC1f3rwH^5FEi5e=a+q zY5L%~IfBAM4U<9!*IG+7T~&-rWGZ?bWo6}m9Oh@RtV6Nq8E8=uf!@5@9vcKK#!uJ8 zp!#k|A@{*MOkg31*kN35c5NwL-m))`lMGj9H%V?odnO073Hn>Q{T_wj8`OYe@G_5; z#`=OKG__Yx)Fpu8KaYH1kF0ByBz;||ZD3oXV(Y_QeQ)}&TKhCyKoVG;{b7TI30l&X z1e7A=g#%JFy;9&&e(P!{oDfBQAnFL2HK+1Lyta?z4SNs6vELmfJ3;T|o)_~~S|uRTT_q!}7GXp|`7piq!CzUqi3*2%uw^Y#_ertID|8-_arW^G%>2^z^b zvcQv8rXzDaWqcY(F5J6I0hOah^lXMNj<-y1GrTiv384?O`5h5e9F1G+1Sywqnwj*t6x>S1lZ39g*T^cJT{Cl3+1PO@gn>FA( z8RMGc9w_0A&Gj~-ySN*JJfmyhX9{ZTav(q;CWW2@Yf(BP11f?HM3<+A2W?ESU6RRH zaeuJ4D#mUXBI|c2_sjZ?$pA3| zBaXMTh!H3xCy4E@RN_uaBy_dNnM)9Gp(hwu+K0Z=U3C&3Pv~{jYr8v$tloO>sWU<3 zvwN@-12Y>u4fS>98MTm!f? zyqjtJbXro@(6h_7Nom||Cg|XkO0*Q==7EO5X}?Pwsf4%O)2&IZ#d8eUUWXsK>eU! zCe+E42JL*IGkqHNLQF#gZ*fl{GP6xD9o0Fxvz$am2Itf_=+&K1fvNT$6ah4ipfe{2 z>B{@^#?$1zzxt;A0*EahG?P6EBH}V(I!iip;;Vj7#9*>22aw{ zj$($!;{#vrPjo`+W2&{e$nD{cs*|bm9rCk0?mq0!j(%@BoFq5LZ%o;KJkF{0UwOV& zQU93Cgj7-3a(({zJ)QkKbL|%oV%Ov7p){g^$L~|~&Ski)v^Sx9qAT)|$LxSu*ymqt zkSvB;lsDG5uTC6pT|`J6oG7MIaRF(_YOq=(-|6f^0}M-usL<67G3uukdyr?Bts<%H zbty4oFonr^7#KESVDC!4kXuz&ym(%LDkUfKHwyc`tHlLVmW}8Dd z;eB|tIQ41Ct%?TYu?;0n!|?IzmS?%x>xe{11vtb>6 zC4X0>4*J%YeM@q0tvAMaelU59bEa>hQJXWJ$?I{4F#}3dgy!yEd`eryDQht`?*1~c zMk#33dB(z*LOUWDi_AB>bibLDCelTyuZURRda=X|plY+81}=6o59zlYmP`tZ+wqEq zekD5|RXAG5`rHmj=488GM$vLM_&OcelC_Sxj+Y$*GS^PpvUQzS?rE#LcO`BN&z1xyR*8hJcOK>PAi-bo~P%G2mmeSOfwOV zkD?IneHbuwS4GkZAcDcjGmR{Pik*>M{`^jo=|mbvg5hr#ZAi=Sno5TKaBi2*TyKZ< zO%t3FU}FoeO-?$)5T6cDrk0=G9D~3$N$fSYPX=E69t4x5Gf^eHJEGtMzhPiae)i<% zJN8Sz$p+7CzU}L)(B626OUG@wj3+qmenf-{KMKhhXy#-{|D4@SR3LM#&qf2?2i^9t^ zof_1(+6#-9*Db#1{l~{m<5T{=Db(VPc^}G%y3Ex>X71km{t_p~pb_Yell}ga?eDuH zfwa^V64_`ZZK+BMRrowyFkMk1{gg^&NCjEi8cud>&rfdkB3?D^GCwwiYMSXQJf&wW z;956?A+`GCIF#VP=~Y1@5HUTgv>gIu>6%)Y!uw6p6-t}lsZ7%1sO_%BXNK&Jw|CDQ zS8%g`bLTPO${hZB`XzoAp`eos$k#%!`)aS`MDc;iDx$-7=qL@X>iA#m9F&CU zgzrIeX6}I^L)gxe;Am{yF4QI1LzHi$-s5ky&9=I(hix{9+LC&fYwU-oXTOo1M;nK8 zZAj%;S1lj6oGr)GXb|6bcOGGj8hjs`i>qw8C&=L|48@1#gu25dMDvQ$Vc0 z<9%aqO%st!Z>bp~Kf^|`4mOIW>=TYw&P=>CmmEw}F{Vrs^T{+Fu=+>=+-JC(KZ0sB;}NB1_bz!4>Mt;34&8k6nqdfz40J44FL)E@zC2@(hJ# z%rS%|VoSJ~uC9g{@=`rs>3Tz^;|Eb&a?> zv7zw&HedG<^Ck|Oa1I^x=&bE+7rt=yJMBk)zviROZ?=_>xo-60yYIU6k_W|c*10`L z^&I%wKc+XdfBoXxqnDGBWEpwjvHKrC@n-#lb*;DGyk!ga=rmm0Jkot&frYwtzlDhS zM^({)iw6@lJt+|lW=o?WP}Y%SxXvfgfK6>Q{1e6?GfkyJ6$T5)ax{Sa*7Rf?tsSpl zR5|MS(b3vh%vxY}InxCz>=h}+4<{VkKafF?l~^B#qkNjMV_o~vk$r5tg)9HHM*ME` zx|`Y^?ccY)v6*~Ap1O_gpNzv<p+u%oqC?gs8f|(6 zWsQx1O|wy3E4g0xm-x7V@o@p;;{wLl*enpCmCn-LxjZwMXXf(E+(viC5RbTZH+665 z^;BkU;|m(!&^X?)zIABNj*5ETiALhbI3m`Ig;rtKw?)zlkd8@8%|LV2Z+P}K~oR#xtuUK>LtkwNSY#4XN{hO~`exHz4 zaP!--@p3NBoMN1{bOYO?5nSuI(^mECz7K!jJdbz$_MYy?8mX09m4f+ z({8ZgREe?faQjloS3r)j)J*3c7wwqJ9*yhZES}9MH=mpN9Al|vlB;CVj>+xOGMjzd zPiC;2d9?Hb`cbD%hh3&HDkph%R879({OwDvw46AmcVi6hvvUr&^UW2z&)z*@>K3wD z!`3lY!D@9uEg{^Hv}ksD0%7`54zt18;O$S;m+)1hgpVIGKbwsjTWR0?OUFh;r$(IA z`1C@q^4a;M{+YXWz1I(JRowYTdRf1~=^j0Hmgmgaxx#$Ud4XB6O9GdL)&;H!-RgND z@Mz!@&j*pOBF+Jx+dZ2-Lcf9;GR|j?o0#Fy{H0kcF5!NC{Il-{PvD{ zWY_J~z9G?N9^p!zo~}l&-?k2yYMdpz(KJ~p#`#G?Up*p9AS(tc zSBgR=zgfKYefz=`_~8~~bZ z1CZ*_##O*dvzgiPO=d&r#P+nw^G#--Bt?i!@_A3%va*=6Xw!whOU*M?W+<{%h{q|P zhlIrk@fm33v(v*jKzW{tTh&G*uZN?-%IoK}!f9f)Y24!ca(kNihR;UI>aF7o6<>5D zj>TF}ZI(LQ(4egrHfYaCPs;nWW12~o>c~V|tIakwYyUF;%knR)K{SgN(JGjX21yjn z7ON^NiWzBDHY)^xJ+Y1Ns74gC3$v&ourxPI6C$G7g;yCulB9-YL2jiBbpxp8k9A5Y z-9t<$0!_NZ9D$jNFt%1aD84NU8$=?u5~7=G&HI$M&B6vVF|)X4Qw}P0xzeOi zg(wbWe1a@>>F!{Dr=R@ zIY)Leu9C;+WoqkeV|=AMS#5c7bCA_$HnKX*j05vo4dy_1Y)>ci!pKU8B7CbplHRBP ziVJd1r1$8*+s+v*MaZdLdn4beuTS)!;_KLjudi%FtQ^hbqf(n`DR*+?6+McyN)7dH zwxHN6*v49FUybe9m)fJ9HqERwMxrIDJ=b_Fih1Qs#xZ&$jo<%Vjr{#^1EAeY3QuZuiWy?=8)Lv}xAXhF-#0yXV|dBNh~V zzezFooi}vs#e5I;+~kMS1=35%2cPelPUmJ*k{VO~tRLx985MyN%XC->OR}5b>g)}0 ztMrg?k7c*8-SU*>1vrxZuWY;3k!{b;77FEV_QGH!Jj^oDb*6iwf3`F)`%=d>j$4IW zthWa5CwJ5P?SHpA!36=$r3FNGtk}}6g0I9i-6}L25HSnQKp`t68nn3WG>AuV$qa&DYs2j*zbE7TvOzZS>yAj63{g|_ z!LEgDpn5YL-1#i-D?KOuU2Ey4I?+=Y@#Ts9AwO`E8 zUuPKln+!vTeuAOX(fTf2+^gr1i}gN2mZHl1HwQ%7?+f^-Y%=1kGYYcXhYV{L!1+pO^Nyvrj*#{owxl+S?zPRYAJ>uy;Jsj8xYAhOOk8bT&e6~=AFnxjk^FPs)JMl94Au!shlTHLre;~ZxA z4}^ok@H0}v-2bdJPo}$dR7OzLk2-$jN0OmePwW(TH0|#ab5GnZBu@NYxLVq?xxMP) zcFSgL5i0583VcEX+@%lTPqZEmY&&(d#m+qq2s+>J9Ohm967v#sWRy2t4*6NQk$xXJi} zks2jRP3)H~G80D4Oc<3-7z3aF5k-;N7CB!#FrS$YNb%h=nVFe|d<`Ld4XJOk5DPW& zelhWWG4XyeMbf9X`*b5V;TLp}n(m-ZGs%-4^oWH-EVY&f%R-CTzs{GaU(|72+cC51 zQz_U>4Ec)^Xf^WbMQ|~|enTHay!&zb-Q$n9$0t*fhaqF-qv>jKZo8{ zT9yindZxl|1*tHWmkP5(sn8clom&@Lw6ch_LE0h-LIm}+^{@$=K`aIxYT<1-29hI! zi5ozWQl}TJTYc&7{xs9wpJlrHD_u(|dAz&t5)ajNl2SvaOloODO|HIf(c;Rs4o!(2 zYjA0wajZZ0KF(Aj>^anaq_K?Ws8bIKIpr0qpH1L9%|cC=shqF&txFm7EOKm{ z+0Hug7+Nv*GFh|Q%T&#(l^Gai)R3przxiHwa$wonliiSmo!Q-AAu^wnRe?e~NwMR(W#y|MEz$B%Ezuv;r&bXrYnimgM!$%h{;$ofeYYX6huK;|9!;OIV<&vHgP)<9 zGQv*vnLT`Sl5N-^rJ^MjQB>qVl`4Xu8bnGBiYf|RT(IZKNMV+OQkW%0WLe70U#UY1 zOAhuh82?(2vk5HKM@%GQsx>v37Mhw&l1c4YoXmW2;u6?`jrwJ)l=x#I+_6e^5)_I0 z1nVu7x{HqgQ0h9^MjhH^VvoeC%l2nDQb-=&g^Z|15P)+XQ(chbwyXNk3ViN;JBL=N zdP#~bsZfv&nL4}Ek7P-TWT~;#yK|;kg<^G~ce42LolYdPQ)D)hZbp8ywSzCy91a=& z#E~UzIUx4io)YMur%tp>d%nLyT>jlqvFZD!l*TEx;W%5s2gmf#O#3_+9jT3Uou!@S z5>4ii&1wa&FQp$iGK*=Kw-0vmW6(HXSMB`#ikna>e|3$WW$8t%i`76RK=3c%vvAh7 z@Zqy?*0%hoy2X$6C4XlQKWSDiO0AZoQhI1cVd85g+D=@N458{yqftB39Xcc}xM9@% z8|pr5f2Mskx%AQ7>rd}>b^97=kJT}A=XsB`x3xVikaf$aT;;Yft^H1%D`+-h59XjW zjXF$L;^-5c96nb)FO230YoN+e;h2j9&!?&-wUL%GH#uR-5ZJNrZgT|6Fg&&`C%??j z;@SCSS~|3)L(G45TXsB^kL9&=$ny0ONaR^h3!WAkXPOc`FSyunk@aHR3gc?qZ!Hhl zT5TU$KeTC9vpHh7x$JhE-DWm8vS>8mG0G11wv^;E7(Cv9KjdXCPFoxMbT=CWuNR^@ zoGpAlo6V|*GQ81w<2|ith2qxRLOEt%Wr^^x3#xlOr3ZjO(3+HU6@ z;`^l>BLD2Xjeh8KJ3$0wYBW>IwHJLJw48%O{45A8jrJ!77}__jmYdAXyP zwl5;Ru3p#Iq_iy+O3N=EHk-e@I3@X^_)oOjd(yw`hWzxdY&^3)%THXr{B(ow#QQl0 zI9f7WHs%ar$`HW}!C$W(aPp-I%8_uKE=(62g(aexm)~2c2o4cOD5q!FgsXFh=8qHV zlquO~c3b1L#u)RkG0x2pc^M*}A@Vasj1PaRaEi#w5b+F=&zNv1qqSc)#rT9ZYy@;sKJP<|fClROfWf2mE( zP>~@N@(6rWAz^n^)T2)a_53}l;-jfgmR^*XYqgl9D4PCRsv?U5l}TQ14yK{imQ@tc z8LO{HS$fn1MSK&4ua+8#kXq6}7LpA_Cat7dw-&KmvRh+|PcvjT?hTnaYUnr|GsK~Q z6tFW1tCh0z&f|KR)macIiKdQe_-u^weR=Ew5_hmgn-z9srlF%_XVo#D%~pS}=_w-? z&AS9US5I%QWcFFoJ1iRN-$xi6XT5;)jBR+b4K7?efxWVtainvEPdaCLh?bV5ZU5YS zeh9|jAxrPdxLsb4=;f12j^*UWr|hy!efqKm500yy(!YKF8FOY`{^gB-_;#hV$F})_ z<~u9;lGi3RU9$4~-#^vIk>U9W>(BQEm8i5nF}79G3WU!tk+z3#aUxY zOXqd#zvF_X2OF1s%w|?kTu1lto%gqNzs%*A!cpe-l=5>{mCWraQ=Rzsx9v?`EXriHuZi=Ns+7YE*aL z(pfje(LyEXNc{&I{|-V`+OX_!>=i6#l~ZSXwz#%EOR{X<{N2Cs{svqHhqCwU$Wh%X zb>|Pqr@rV*+ua?O_;;Lu%-iBhC)@nXRJWuQUusP==_lLMFUEY5I^Hw#vsz#~zg%9C zT!xgRF_5nO1D#7QlFN--j_EgO91=lem8mpnB^gwSP^tLM1KOJh?4_mn$W{E^hAcg| zSR#ecO~^AAn|qoY%xly&h7IO@=3{14#9V8pA~mTreY9vGW)rAbWmOg5u*a*7217)Z zT&gOep&6l)i&DvexB56@1l2H8B{QkY4cTrLwJK>+H>emVM7L1ATg6mL*3&yEr7XoB zk!mH{Q))o#c%O7klB8C;dYh>M7c@URP+%{1Gw;(a_K<<>}-YW^O+eQ$r$Pvk_$~VE*2==Ooi6CqmS56?Pp>&4p+yE_39MU zWb1gxEHYD^tIjjcvCecX6)#cQqH>|*Lgz|xt+Lj5lh~^6ay%(MqrM`(s=j7D?)QOQ%-Ar?)DAz1pRaS>ZMh+XOb0dqfUHFO+~$HL+yn9tIf1rHdCsjQo%%| znIm6rEFH@YKcMHQwq}l`lbrr8^@4^drp5J%jJWZ#jt9pMV#BK-G7QQo#poJ*`B#l7ErH!QPrqs3@Eze%Q;XRqTjYLm1iYML8T~hr6oTU>5fz zkAsjr&fjX_iV@)!hkNUxC+UMXZ|4$GAwN6sSzDo#xamXP4eWJ|w2nEy#!r}w% zbFRFS*~6zLKN5rDKg#h4q&D!nu|?g$+H| zT2>a^>iJ#Z0gJm^<`6NTf5(nd_xc~~w$uMexBdQu-ClIR*-fqX5cZZAdpS^^;BcIL zL7+F2*<%YT&{=2hlr%->qRP1wuJtVGm`bIZIX zQI~IO!GZ!>5G=MA?v)S9G%Q!iRCaM)(3Sc!1lNhJd?$cU9@)pY0QgIdGT+~k zt$aIxzkwz9>U;78gnef%Wj7=e8VXGQ<-nsEgPw2HB@*wy&y0#AiK?S*i6g1+Xmwn> zC}nbGzLo;qisvtl!=gN9A#nSloPTfN9&aoz$cmMwUJSxQLgkFz zbGJM?tZ{hnd9TkRr8TRUU!2|SJO70>s~@b@4BnhagWhxYFPKtt-kjNY#j~%PF!Z4n zqpuk4vRVSUdB*dL2GlL`ExKl;KJBy~7ajY4#elx#&2B-h+o63rbkFI5dbQp%$yV<< zJ1|$BZ<%d7-*bLoU-&ii>)zM>?>ayAe(HZO`*1iJ_D7^*Td}LBRAtkp(`~iVxzg*| z{}#W~%$nOO$`rDKxbzs^L95A^*(dO2>`3-jhMvcnJNE?>(M-Cj!PI0DQ>V}-K4VP$ z7{rv?mFC1T&Z4HwmtaloZE-f%EJYv3#@@69g%kgxC}$UYDLDA#5czx%QyaM4fGHI? z(+R0BTINx@kD&3pi8PaAL=2NEGKL6*SsrZO5H_c>8HW?jdW0Vk5eH*E!dZ{$TiY3L z^P(QsJmO<*Bm5N-;tvfg@4Sh`*=lj+D7Gui)6mZ4JH(y1m9ZoK@z*v|a(mHYSQL%f zOYLZ?hNxSESWdoxmg>ouGmGxqzIf|7TNdf@@B$ zn?2^0=J%*z&6#RjU|VhzY=p_qcu#)%*MQC10Gr@2978L@W;4ObHU_cj8HThmhUC0r z%eAUpF6U%%+xU1!s(z+l!FegylnCR3PSCuvCQwb$xk{vS&8H7nTy)M6I%kLBoCyM) z2}T6noZZ}<_1s+ExpNOHB&<{^RI#$l6eGJ#;j5F9`c^qFt@O$&+uvbsrx>Ku5%1!W z<63R}^dYNAQntjUX#!j56X)eb=Cjf9<(r`-Ab6P`Nex z^FO}c{`KOI*KGbrc#D7ed6B*PG9wi&d6qIuwMFXf>K#0jjyH~VjSJ4AGo+b@>8^(0zVOS^-<@yz-*>+6 z`rP|Z|NC6Fc*5aCfC;3L0VbW49%!KV@bsg-EhA}-WvFXJ@J!=G%Ph zlQhIVfS6fHxWI$$5K_Yur~MEWH0|W~w5Y*u`{L>rqS&xzqntrd%JQV}vuD40*SojQ z+kf}W9}e5zpZVpj%l_rrmCK*F?`LaIKD7aH4^BRx4V@UvI&Z%I>pchFe3fXV@mM2O zVo4Lh-0?FnR>hx|J)n+1z~LW@WY}M~G44Pt_ssaza(4p=`)`UpV0&Px2$2 z(TNf1^PLIF^V5@^uEgc(Yn_G3Ytzfc<&op;@wmlAk!i$YlcF=Di=tdKZLYI6TC8R9 zskACFyV;+UT@pz4J`a}4!W;V0Fcu53pbBeH+X66aVjlMK{tIGaMo=PY5W|klJh*>K zePdyZfsAAoLEm$A1#;X;%%Cb%6(zc%3$o0csWA2PselixTFI+zECg#!cRiH=nX+|H z!@5&hk%kqA91ybZgEdTd9&n07_%X}_$H7gUP)c{kPtb(t_Ws4~etNK<7YNvNAQmj9 zc_d@IQZo61l5)H~r;>&x?OW?ti_DLg>_Cb=$qyukAm;PS($!`oOB^(A3yd zy{HQ7P#rZCKPrA>Wj61=5j}d}g*R>@Iwy?Tp+|U!iJ>80rK=z_F)T4W;UyL&eysn* zc+QZMMuV{>(U;&8WLmvsRiQ#QINeOED8lC5FwcpMy1_;6Kp5sQ?D2lG9!zFTrzR%P zaO@)pFy8GW&MpAfn@?92)-fpIk=>i{47_`N_pAY2vn<)qOq1`RedXgru8$9W0cxH^ z3Bd>C zMG|Ff_gadRVrtm@5z`S;(7J8IhH&!kWtVlO z&K`OBMQ^>u{b=pt`Gv9Haek_foq65bQ*-dnyP!Ck`vUL0N~RIL=FQYJ!EMrV?q%8- zS5ztz6-`>5+f>u0jdCy5#=29bY1)n23H5K0(9oKu`txeetG}#%UDL)UX;k&7AstO) zwXxM>hFnp7#gMtu_0`u8nc39SbfErl^(BZCPjMi=oeHPlUI?VJzpBfh*v|)`20`>A-o%bbC#MY$7HL3nW})9 zs(_iQYF{zmGCwv4OuowOFgvlOLHovltTthfnn}VdW){k^(&Wmt+SUZwY$ozeORI@^ z&QCnOO&9x+hE*=1K}5s$Jx*#^5BU`ehdSE*_9csBB#{w(iF#UvK%{i6rP&6=qOQDt zXU=AA3W`D5p-@bT!(;cgx{nhQupPiPRoEtf7M(dV` zQ2U4L5n8*x=hVEf-&+0)uJP@@SKj>Xp5KyPyM|#nlIiH8M_*u~cuPfM1!zeCj#rLv z;l^;g4IUnf#1e&=Z0okm35YS%g5+wd9_U;>SZ+v|ZiL`bPVt&Y6$TX4r=Tb(+^7fX zs0}c>WMLKJ!P=0R3f8=mB(GEaP_T$!MHU*+DX1$YsjP5*dFr+Z5uqApl z$}Wm-jBbezMENM|!cx1iq%JJJo1?iQ7XDU8NtdDwLvC!yLmGh}&N<=5Kw9uYf?ipM z$O-BPFf51%7Tk`Bk#Upa<%Zkhe2|^DIDc#~0|c4e)CV9@ACMCvF(lORCof15FNC3Ui!gv@(7$wb* z3ib!4bbnAv(jSzP^wYvE792GWMiEAd_aly{f`}s}gn)o6j|L=i6q)``Wu3B7*`o9* zA1g=6edVf@MM{sd!GGwWGN7nc3bx-Rp5+vgB-40j!By`>j3|h_DoQzl;WzLb`7L}O ze~=gZ_@g|_@ELv|KI3`n>apa?{_^v%{Jcu8&x4}i14Y3HkQsbWm9PRoQ9hFimb61- z1iaKV{_dCYbuTFo?hgKeLLR){_xJYp@?XC7*2xH8ck%#Ms{>CKC!^7@nhrY0TSBog zii_zqcP2#RQZVDx4Te#N(V&l0#~4N(MgvA($O-50&BA?x5R(N#;(3-A!VEGrmUDI9 z7BmT_v@V!ZO_b8MxejaUSPV-+Bd4nCG*qQ^Xq_6Tktu8?Q>a_RIefIuf}~S?fHNHqTQxs72%LO!Dd$5T$!-*1&2`&h zAn8=w;^fVL-kPE>N8QtDWb<^w1mmZt0+=LWVS|4Dd2t?k<)h*gcL}>sy@a+Dm-WwL zt8OonPJ(yi4bTc1;uAchT#F3S9DY`KnVuM=8))%}U?51G@P$7!ksmDliS#ESXg`A-&d&THOK^8D zB$HDsOsvDUd=2~GFEC+Wq*!H{m6Wgpq9T~SbU>s6A{h`BL7`_lAW{L538`(SrZ{iTR0W<`HH!hS< z`>jw5NDU^Z>q)Hb^!39zzh-i}wq`*c@ zL98!DgcTU8DV_2T zChA+Hrvl&gnCF6NN?9^QrC@Dt7h1mkOwCH;)ZmJgMX$TdD-OwdKVb2Tm(ZmA4kA4* zQUZrS2UDG^yWu)lw^PV5B7xx=L>&H+nL~OuL1tDAniPOt3$Aauh5N+FP4kvLR(0p= zKl}Oiny&K}{d4cMS(mLI&DT9T@tW(V?cThjzn=Z+f@?-U`gH$e?6&30C;jOA{qOt! z%x8EfMbTC-EO26&eb(A#eZu`Ie3UyL7I~s4vzX;pSZKYqFMcpS5a%FK-xBYO^Kp)CjzocPqQFB@;GQVRyW_pK z?Wh0$U1vr;!B--T(f7U*bl*qcqL1U%tLYC<0; zUMx=)r^`2pb7i?;jdn&yTjFD^@y__@n0S}arCe@xJKfRC;|qm_$}DT4voJax6|GG{gd+v4BbAzZN)T!G zv6KK51Hx~iL|d>(G>i#EpBJjl6^2WQkt``gigil;p+Kp4`OA0RCm=#j*D_;N%ru?z$-{Wh(TfW?eRl0c>3ptYP6 zti}0mY-V*2=8vFfD~U)$-og}NigKNBox&4yg}f>ZZIT$jX;OLMI}KYxumCkF@Zkt+D*2)(q=Y?aPXyrP(1ankGQ0 z7)^&XCX@&mA`vh|f-poCnZ-oyJ{4K2r_NM+R6YZ2k@5U~eV@_15;g9hp9nmKzW{%z z0cffc-fkg_q%K9_X13V}At?Mnp@m|D8{L1BdwTDyZ@l})>wh59;Ns$3?jT-wi%Fy9 z-U5xyvyJg{*zxQNUF?W-B*rJ!Rc@>l3gJSkqw=EgMX4#_DXHtj*QaJy_Ehc{-*r9{ zKi9vATSM5ao{zM#E&3(wSbYXNmwjLVApS}8^TcPVQ>=-2!%e0&DI~gS9xrw*)XW6y zU5+&RVNZsDP$sfW&zx!Yn0zIKJu6|wO$d9MrLd<7VNVmno+cO+5QvPD)i7!I1d+bH z8zkIPzn^k>hvnLD;Z5cEV=c%Vh;K>|-;|=%N2BONMdg{1O+T<%7S#-5n#bC|y|m0? zWcxtmC_j)n2yJR=eC*1Xi+{cOojYG!{A7RiFPGo4Y4fsMpDNB}<#Q&Yp-9?TynEBb zCobfEwRi7t|9$^^|4uCBd-3A#!Ru->A9)jog^|Uh8ot0^$WP(t@Jo47v1LV8jIga3 z3@0NEHX@@c4eMkiXER|GX0wCZPt)o8$8o`uhw!y$m#I<_gykUni-rcpGB0$ZGw#*D z7aBZdbst+os+`GuwFPZ68SD3JLU+7MW^V~8Y^S?|!U7W3gZDmp-rSC>u0HRA3(mRP zt>o*TSbXv5XX?jw%v{pHAAjeLfy3NZJTt>NZ23^&gc`947#qs^4pO47tj!)JY6GGs zAhH2b9T23Ep?4R=__A(xv~sC(QSH?1?CdJ#Vdb9MP2pcO{f0A?STY_PKECO_n2=(x zWLaw@QsZ56m(rznX_k%+y7AZD>W3Kpfdb69B}CjznkTn#AVBTM5xjR3&ase4OB`65mU( ziI*`4L@+-B_UO6f5gXlPf(f(o`G9g%pNp#@RL z&w|L}XI_}F5uYBaRV7ifHsOWig^}d#Er4ys=}2~K0y@nnGUT0!40%@qVk`+j56QE5 z?RT?RG19}VLR2UKdZ7vaGUCE%dx^1CuWUIicuP*6cWzDL#2yPt3x#@CR=0Z z@(?wJ8tN5Q%=0Ox%Bmn1ExBz$I~~9UYRs=*y_zYLm?Q?=efohxZIJ8h>W8u|g;A~F zDqX?T6aVCbUNKfQq(7CHrt5FR4y;4F92o*~C>qr!`&zpjYo&;i$Zm5W3qLz|3Rcs4! znrUU3EHVa}Cn_~hbTlBzW0Wy7rA1PYv`*p~Y}9U)wn%-_K1q~_%|pbnL~S0HmWe=K3J2kFCWZToI#vmGG1e z#86)h#U>`n!qyXKGg6e+nAL`K=C98`EEP^3yp zk@{$#3K$rooD|$qaTq zc|FA#vQ5!}#>UmOJiyP2sjmZ2AO>3z00ORK)w@?e)oE$Hntjvc$q%2?`;*>_7k0MX z!amZ!{ktQ^O`h_-``NaW2k?p}366?aRAooKz{oUZC@5NDzFoA$M*ZMtsVVSL54Hkp zmF42a1esA~5s9VJq*|zqAC@n#P=W%boh<@lvUZzFRGMM8DN!d~kjcQ}i*Cn9O+_YrA}>QTGWe~aq3hwm7Ok6Q|6#K z>|A-SvYfdM-Nvqvmn*laYtS0@KJEeOe)&P=r_6fg`|2;5C)JmkozhnI_snbR0p?xx z&&((4N#>Z^g#R2h&P3G)rcP~DI~h+^1kZ^U1Uxx~;HD6yE=we~8{t+HT9-4R8OU6a z$G`(2vjUH@f}m?8$M?fLo*VqTH@`Q}3@a6IwW^XV=M>dd6qVsv7F&ESLimfQ3~7}? z>H#EG;TR+g(~+K)JUmG-5m*6Jo|0ibl-0iYPr{Z*l8OHA{_bS_$f0h( z8(#@aumMOoUA02e^!@ze(?8{PRMn_COp@`#%?SN#alyYG%2mble}18O6JOVV&kZ+U zv5dW+V1cxTV<+Y`N4SPmW%-;&{d$1jD|CvWkZ>%-F9iy|LKZDo7I^=H7J68qX+_bm z20anPf&D@cs)(K73AyxyB>Ev_zuto~y9Je5BJaawr%p%D%5Jvf1m%8m=?U7-Qw{oE z)Bb{Bp=bRk4hKM+-?Z1x(5HP3^icpjf3*Gd79yRvkI|T%M04jw-<#eK_jBNB!%>pw zm(l4HeZ3)y1owHv2xWV4uG$=8bZm$t(Znoc=+IhDw-L*$ysi4tW~z&9((HDx_1<1< zKk2^P0cC`cWqw-ar?9+pQ6oP@WiPd_vLCiNJ45^P`<=A;fFM1@J*BF;V5KW4^ylrY zsx9!Mu7t&ul5hl`;YCf+Lb78qVa}D(a!RWR)iOD$QO<`7Op7#HJ|}b$H%|1V3G#UD zLUWvbsdJTixiepyCEwtz5O0^3$}fn!%^l9)#FI*cW;ZYmMt!KktapaFXEUwNZSsBc zdhRj(8T2gsthPyip4lPp4*iaQPkdiF%pW%YhYVsd@Vf{&Ka;USO}X zAGB3lT{dfz zGtCmvOu)7{EPs+%MK=sj9MG}$n&8B|L* z_<*^R)XzyFtDXE3n*5I6QZ;2-uaVI+>`Ma&v9EKG2@UM)Wrmv>ypIo-IulPHzojr` z8qt3Hwo0VsDt@4P%J?nKki;wx9Na2p=%bF`s*&oFSar{Fddp@IE7oHnKj{}Q}4e7XXPVsK>pE$$A2jHRvk zkA8?q28kSw5wTpu)pKZk@x|TGb@0v4y|AI>+#Q>Xy)QmDe!h>0|jXBCkZ4b0dkIDF=? zJn%L&AWZEv9b{$`v^CMBMB5DWhx`xa$3s6d`-DERPkO^tOfTA&O ztjWWi@9@*5>Dsi=V`#m)Ufap;(toGD9(vO{z`d)yZG2#Tsyfc;@>0h(40hg22V~&8E1@W_*R0vbM?qX_Y@!h<5NS^O}XuVD1IM3 z^x=Wg#n0IWRQzV#@C%wx7WMxBK$lK0b`z~%U7XDQ1#?o8osa$LI8AJ$l?UJtz-aP5 z^m@!2MnnDpyuzr{S5Oe3EGvkb?M{gq^>SvcP+gPLxOBpFL`@8Pj+xOsJ>xTFVpu-; zVKTlqnXt$K;#r{PQrpcmDa825TbOQZaHpD^Rn9X!yz-fb;RTBflCC&WBkt5|^?JQA zN*`skgdVrG2B#r>adf&fJv==!*O?oh8(ASPGgjEQySGQ~H6FCrI%~rZxa-wtwU?|H z?cMGd>Yv=d8U5Bb?m)USVBKg~OQ(4ABJ&=TGZUq+PvdQluN$yMXqvi(Wji(p6K*)1 zb5s|fm^v2TIZbsnO%0QhLQNzez@)7-J1qT5noaLwJDxZ3TzKv-_6kqya6E^-#(BkI zon7dH9VW^$V^S)43TDg8=)?6+otvZ&K$Y|b+lP@N6}F={m05)aIi9?J(g7HAD`@~6 zw~if3kh`-TNye=sKrwORhz6`Hm*=Js-u3IB$8QN?=@`e-@kQ*rA7-?H!>C*~x&t5W zXl+xot!*LFyeraX`|W0?6IY6KE%W)b%skD9>uEk}D=88U%u{m5uA1y!?m12E7su>6 zK`So&&4>AHRsNIS;)3&Qhp(DiDBkd#)li$7Z&vUP{g2k5MzF_(k!pjX< zD(=U;974a|flB?vXp)Gti~YUKI%p)V2L2CE!2~+55^|q@<6Vl0A#8&(%xXhz>Lq9_ zJ666#>9o4g73>xA3}uqF0A0^sFV9m}qNVamx3Favd5X=an}3U*ta^ ziR{{)R;0jUZK04G6V*6vh#jq{tgNaz#A0pEBGNCKy;jKMzgWH2VCe1Qz&Oc=RCX6K zd$Dd8#24AC7>1EZ3>c8Rtg$hKn2;Bm8R`ig4GCcW){>_}OBwY}gf=s%lew80U^pfY zcw54>mR1w-MS|@#Q_1Ql#GyPSnOprNhShF;+R@(sDb#fM?u8X1cLGA7*g&h{#eRMW zs*_3Vk<`SR%W+$0NGL`T`atE$%U=10IF_TP!e>l7|)uzbN)Vajg+!kx2onfMF ztc!n>(bEEsq!P6tv4(U3LDHz^>PQ3o^exkho!qScU*Ei99{TbTP8J`zt^ewk%8$v) zZ(|?#JJzPn%#1NJGjq%_ zGcz+YGgHjWY{zySGcz+Y#1J#HJty~e_r3jnJ-z1NtaT!(bhJygcd6u+^Q!cq8M~d**>e1E>=iT1Z_4fe;$D3=aWZnWuy?j|{1dt+YhVMQAdCJ9juUdUFtCsD7g_)I}13>TvAe{j#3juImK+l|PTrB^S?ED*@^>?x} z3)6q{_D>)G@|p0@sQepP?tl9JFD}^s2bvtm9~5FXLM>(v4th=&K->V3WEOfxKxmi< z|El{}3BV}^K#u_wWj1ztCN^dOyBR<&{>vD^qGJc(=m2nMc6u%@Wip#^yS zmoX%?h|{EL=?g0q)20H>&!7 zgZr^CvHqR%rjs}ZU`Hc~KD@&)%!z`rlUtAm&&Pi@oC%Y90oF$>FE*qV!Fcttk^7`b z2h7dlWL7gss^U`?lVfqcBev50#mnYvkVQKu^!K0!_F#vK?^(5ITaM;*lI_hJCD@^> zwc?!+T)O55ML<`&!*+->O>-+O^fOQ@)%EAk$)b+uj=5d@Pc zwU$N?@7k+64iv5$)$@3nMK~Ol0qE#Js|jq0glDDM-F-R9?lQk8JQW=VKTXUEmV?SU zWxO#7jUryrRU#W==vZf%e%D^qDVi+Dv!)IgzcemUWS)M+T0;u@_-`HhSnFF-hL_ku z;&6tTb>dncMb=TioDd2*`MlyS-9lWlTi+_xoH0{S>90CHJRBw&_%@x_U6&i!?LV+j z&4Z|U{gmqS{sR>H4=nuuni&Dpn2VwFA5i%JMFjkxbHx8E2JnwrN%7w?fPdWnf1A4g z2BrTe29Sx9ksUDE{OHvzQ zq!oL{>6OJtezn+71UwtcLTe*p?q5Xo6~i1c3tKiH6x6ANY}&$$GKAe^^xOC1_*vNu zMRG6^5gg$HfrpL{lQAjEV`pw>Zr{|053P&)^Nx!SJMhm$!7wv-5`msPnro}+PUG0Y zWgFA03p1JLe*2FiaKbp~!r*N@&#A0-P|M7vUD71VjmShkPnRVwxY7@+dVyCr`2tLE@S`20jq**x8!Wus zEna@j+ibzs9K=O@{LGyOI)`*NQAmd3NQ#>*t+o%r&MeQM(IHI3Tt94`mDKJ`j|u_5 zvC2hXr0weoDjc=>zaS5jp9wV)p|AR@YmZ67NzfukzTOy57DJm#_@;P35Eb7D>X1*%^VvrMzFe67yMYt-?0O}nT z%+UxTT25#hmn?w=Ez>7OOrQFpY~_S=+s`#|YoQxjTs)um!wWh{>6WNK6nY0Kp$Q=e z&4cC0x7~Dl3dp`&1!N&l^Q-WgIngh%s2Rcx!KH$*V~T9c()2^0!V#(VIUw8`R+Yv5#-rKnBzY0)4@!nAd%N|`XZ6}hF&?M=jTpG^6j58JbU$f|& z2&W6XenlQ*O)P9M)BtH0AWp$X6U;>&Q~ZuhiayF5Y|};%Q<&ZT0P#ISf-%%JPq!>C zD0t!$F917ex(R=^Ht-qih$@n}3`M2y*FwI5d1mlR^P%NYC9w~(yIO(Av;YieQ;#>V zB}==T2nJ(65ydukm_vkyH)koALu5N=smDlhws7@yPUvj11W~k{NQX`9S)7tjyuGHR zu>(knbv1+(RT^LHV$@AdfJT!1C$a`i5R9sXGkuQ7me^aH;2fYMD-?YY+ENgG%mAq9 zVZ*5S^ulS(&p#QBFwK2XOt};YZqEs692&!`vjN9aeEtS#H@mDqNB3Dl`K?W)Zs9En z3^muunw3G=P#w8w(~PQ0RO>W%qH9wDvzkgj4XUN5j2h zdR6OGN%yQZPFta#z)=HDEe6T$amR{D6^Rvx6Cp!4O-lf8+KpJ`cdJGvaFv2^Dcp`G^K#hfH39SakxeC3{-_d}(w%Yg?t^;PJ$JG`JaWC=Pv(`RE$R;smw@|B ziXkNt10lm7ix9+c?_U$ypWry`^Xg?>C4Up&1m>(8y#F2XKxG>$LK}&QY#2NxpuAba z8A(iW7+!-!F=ZEO9M7N=q;605Ko=|Q0@E7)5 zRULClw){QP@hI>t&{DqFw9yeZ$cOB<+E}|GRHm&~ma(y_@KyP3^QoHfr zB|jzdGI*#sDkd7xe!*lJ^p1EU>ef=M{jS7K{8n|lHrr%us^N}suftc66ThNco;djf#V6=xZBLO>R&Z79Pe_j}$AA8_CY(oWwb=g5%*;gj>$(259b- z*{HK@`Y2Ujfcsj>8RNxlsRUx&V3SkCDXB>y4~l3f+we|H%61D-6DN-BI_Z_&)l{!j zqyX8X+;u6#8&LPhFUMT)N6bgR{i(~Y9lb+_OFIMZFKLoMwy00{ieRkH_7roVjP`OY z-7(!z-M!U)Fm7Bt>zF(2!18bp5FG2{?LcVjGC}rXP{7k=aDt#5n~c09K$ebL$=w@G zFy~+jKwOc)W!vPs@QQm{?J3?PROEV&kpYE_@Asf`k^W_{nMs_IAUcA)31#h#9<%tm z%};X~_wvLt-LLICQ2O%`;RD~DKX&N4_OdJFnQHc$CGS~F0oD#AYbW01d`Ssum z!YYYG!<7x}JvgT-wq&a`{4Hl}mhdE8?daj@Rc=X-!fBKIr{0ZSQX?<;Y%a#zHto@~ zwJMJW95HDQCpR-Ib1{TGZ;+qG(p7sM7Mq(Hx`jzk zdpBm#Gq*6PA=dioAxPQtNI}j>bZ|(0*5M-j`xJH5U`*6-b>)HHNm0VA6=Du}K3K1f z{gQ?TL{BAJVCqua<-kYSMfBn(MK z42u%_B}gI?AW%q(yJg7S(PF<^qEI!GDvnAP%_m5d6DBCi5f|s|`0J5Hq*7m3H~FuO zT9ehSf9d>TK!+v_KSl_iWx0(jO-zEclF58Ttkc-3yN*CG{0yalDb#)( zCH7~q5!sHqC?`EJUMf=TLaQ17_>XN;_g`Sd72;ou~2=D?MIk0j;Xx6)A{}cq{{a5+5l=yp^TBl|{Y3$3L1QU(Fcq7J32< z`jvk6E0K{_Op{i~lU5x0zw91&P4C#>sEx(7w8uv z^rC1g2||5(CoEmj*o_b5bCg23tsvDe(fxoX0(*}#vf;a*WgDyk+<`@MI~19d^-NhC|ukANvzd^)W5J!Ih`!%CyHBc)e;G7 ziuo-*9n9+S?{u6b9qfEq7T96RjWxeQF-yu$%pcBbXGs5}@d!mLSfj`f5q7~vVRm7J zNS}NGa7>k09LFs+#?C#XA?N&hfsnAS1F&c3y$j-8Qp9=NdGHqH@vO)Da36xFj-OpT zcJyyg5q?-ZUFi3L;MJvwM9_+PhSzO+Xi9DKn`v89btCG`BiLk;0>!3Y7BU9`FK


dG;K?@0zArUsV#?Ea>lmBuDrmMR}aFiA)AQm3s4p-X-gzMi<+%gk>|sHi1jQd!S;49MdH`tJ9sW7>{X>bIY9`eFNJ0xgBTPX=7{M zeQ0+R)T(bW<6WYtYf$RKj*pSaL42sDyCNs&=Oe?Ze~2gKH#DT|W1Z);oFFp}7t))Z zt373=4uFKTguh7=>z!myVrIjz(WzV3t!T{w-)C?)K!=tj^ViDcHt_t*zglB}r>U z$S!Moy9$RjAY~CvqQ9Q_8ud)x-;ewlfOVa!wJUyjQ7dj+RBM;LgC7zGG0VR7>DB;_ z#n{xAUFZLx<<|%@r(?F1H;%5iZi&Ht|K5GkAJ2q8?-fOx(VCFmmo~JMoxYdZ3b(aP z!RXhwB)#F9vQk0`R_&CBOg0UV?DLoe#)b6oLUL=(04MOIilJbmhg1V~CCrulA7xgpO#x>UI z6m=EQ+X=#U4AvpT4hM(S93=v;XzmXzoqDsW0V-=u*nmFPXn9nGJ$=r89SI?^yVH&* zc^hVpj}Tntu#3_~V}zQsUz*Y}rk$=I2A_8rZk*u`FhE=-V`U^e-`*G=&~T&DiIo%{ zc1nA!eNncuB&=hVlJ(13VXn-|Qp;ew@x$V0Vis~qrS#uElX{Bq66|T{8CU&N1l0IP! zW=R!4Ehqr~V-HC+z=+G`%hW+S`Jw3@1n9znWgSxmJ|wnQWt*>t=-V{2p5Zs%GT1Wh zNzS)<5giPzz0%ec?YZx59VZ8uajcJ+Z=d21sp5wc3H*XxCRUSLY!Alp>C$uE3ssP(JwvBj%*DL!yJiCW^8uA6uUJ^|m>iptWUL;n8mPAe{;vFBm8 z;JzX6@@@|}zIT>qlNrP_uC}_l(w*j=oDyC4 zZKeKs9{7Of!DsKN16Af$DZTE3V66RqBi8E5{TniTGy_HzBQDW51Ku?iH~H%LdghqX zO3jw=1N!&*vivdyDw^qpW&L99U<3qs_X#t}Vc9OF<-(pkMNKs`l?&qdC9Mbo^!a&j z@n+iXn$bv|bPr{3lRHRO=N4ASJ4o~FjX9X2l8nzJdq#FZT~;*2@cg0t+G8SP9Jy{_H@ybnhI>gV^Wc3eT@%9j@f3`usMq zZ#1DNVJMTHQLOMnrL;w`-bwF}_A65t>h*2q^I4tQ*TzAyggh=lmsY07YL`?(*R%Aw zn-#3+8PZko+?JLW-=(NG#u$6d58%^k!(2Kr4_-!ECr@Sg4eT<+LfyzYF^`NHRBf)< z+A{Iz+vp7ImcVN?neEC8R}^j1Ii?P+p%5y)V^t0m3sr5Vsa0}3dVUCXS)@;HbEHSX zGSGNBq%HcmOmpTGC5ACLw_|_X>FADIjhmhk}*-mX_Q=8>^T%O;n-~6 zgCwerULjX9_;TlejK>zTXF>v_^Q`Nd*afgj?P9M+7t+J-7w2-bh7=(e_NT@V> zKnID3g^b7(}&9{Uriv>WC`s=V)d! zOD?KhJ*}7d6n-*uakK#}`bY;`q+yByy7j=JD<7IEY`%7PijopV2dsCe>^Z3i!)4C+ zSKhuxNRYF27D0H1d+a<2FOM%LQ$)QuW@TvjT=sG;vw6n*I4v*3Hf|eA<;)dWA+kHA zM{|HzHThnb#=6!@8Nw3t5l`d>I218~?JUlpDUurIE;{+r}<+$y-sUI&4 z2(sseB%kgHd|LKRvfd9&grG2PjPY@3!3nw#%@VRiueg?^4VizrzvuE&b-B|{?GEH& zxJGCGQL0$Q?W&jlvu|lw8S1UsRTp>60ImfQYgDNiipCWJ&YTyfUuy^Tk!*%1&0-UK zb;MG21^WVS4Z+d-t;$X$br%_ajHnuW|IiRB3RMd}Het#u$|b~&5fSE;Ezn8>6<+i$ zNcEOmoMZKmdqL5d#s(XE-|j=%Ri3e>rJ61X+kX6Vn+kLN#)Z8H^`OulnA|a|rmD;2 z>eE_F4o0$#7?(E_z=CU#CnrLIcRJ+R-DF9_u*E?g2f9Vj(yj|rs(~_E(V6izJN{04 zBC@Srb?YWlDHbDfc7@!Lb#4bRX?-y0O(5P_UTCO8P0q)LIqqkXsqosBMVzvVYN{qr zn+Y7tM>lYKJx3E6%gTz?w$230mYlPM)C_veX4$(27z4?=$Mb51JhW_r!ESA1su z`qVT}x7QX+3&)dUA=M$xlf6D&a*%KNN+Xu{POjl0P?57)Y9{O50!uO?u-5||XT5Db zH@I5zt*PEqWn;ElLCx5Ag1l1Ts4nD}%BGZ?kDy((N^+@DV>bCy~xk9FZwy?P8lbORRR&o zxs7XuTBuajTh^|Q3KmYJZU-rBGz;^G zj~3UuH(M#O*M;xrXhEBBr6A~vr)Nb70#dZ<>5h0C)4rc#YH?z*2$^nVu$07o_ZG6g z5*!0ccM-I{lG$s)Z|b(#H}L%Qt5mQ5Q_bX+y&dUp$d@Tt2W=>`TfOd$KH2}IQ2*8$ z|CbK)ms4b?X5wJ}yEM(t z&iLO%X;y$dNyoy(PRPszC;^0ME`W|BW@qaR5M@l50ZQ_3+2-GL9Jc@D&Yw1aIR!BM z?e@P((Er7`e`!+xAJk}euHQ=t*f@SW%Erk|$j-t>$i@m#o0)%mz(~l#_*--be8%)-h<$OZ@t z8^>>z8xV28N`T)TF>|sLasqz$HM$?I-E}86hNni8@%8Zjorv6241Z}5w<+0c338eIRiE~Y?UDSOvHD2 zFKs<96$N%=_7c^#UF70q8GTRtW?yLC)1Zs{Qy<4p7No6PCG)fK7mH)e-X=Wm%p#%n z$)vD+rnj`OWJbqk!?K$b)-LjU;5mu68&vkI+AF`>Y(bkito#gs_ORtDBB_r0WNWj1 zP9XHDMi*mXJkAbXrg^D5O<@5l@xE9r4EdU*eRhkm;`KLb)ttTx6vkhAzTy?q41U~U z3mRny?MUde0p*i>VHcb-cnR_eq?o6=x=B-Z#8Vi8hwA0`nC!BejA@5V=Z zA_5uE`p;UnB@C}TO9)56M>t3^*&6wG&7@0-Yk5oePMKLH@nPz+q-b?VbY2}Cx*y7i zGo1_w-C^xf93WP^-wKvz#8kZBfjEGQSO3FY@i)!nGs$CLQqB-{V;O#MHrx4$3g z|Eb>o9u)uf)N}kbO8%+dvaqrJo`e3|pkZd@Vq^cie0$pA0r*vNk;Av*eD$TXbZ*1> z9tkBe6bWh{LKut~Nfnh5#nnFGTST*<_KLDSsM{_?EUru-7E)OAWZ7Ogk1YvHK^Y0j zBx7^J`BwQagt4O={)?CPJ%Wo17MF56x+d8TrBc~!ZWkw1e;|KQq2R;@%h09Sup=>` zUjwT*lNAI!JEId19U&ZgK!g)!GHI;JA6h>>$A_+bN0y-nifKY~Z?vrapi?p_mZELb zXdA0QE_tE!+guJr=d^-FW@s+of!qB~#-mb0{cx_kA%PZ=fkwY_(K#_Fb}c7C=Gg(q zZ)WoNNWv})hP|m7dY!Dps*PXqE;~bZukZkeMrUxG6i8#dmOIf1z5|_p?07VkPI0?$ zwqY8)1TA-d@K$}Roo_V6_6V->z@+QgZ%eQ7_Fk+W?i0tP&*Jhq&sE*N*{8LhbDX)A z)|_X*G91s1IqQQh(PLd}>$K+bwc|$9N|GeR=Ry54~;80=t+atN9yfK=7T^=3)?NioO<^J3w)eR6v4ze4kPgX zxL2&I>m=qLxpb6|s5^T*FlQ#{eh@i;DeQi}yMig`zIlorMdZD6TMSF+T_G2rmD+K? zH^@InoBW~dLtWeXF*iymtyfL({bYzh&RI)nqW4E@lALilQ+BDo)Y6DMGF~eB46trL zp)OcGAweCICke)$Xdc5?qWd72ejH9*2RK%u^WLaCIw-rj4oY;DRwe4b8qjiOchCd* zHfn_+VnvL8)uRM0>gZ2ArnIs2IQNovWP=IJCQuej%V4Ama!y5j)+O?p*h^GWW}c>5 z>QUK35RMGn%LVemukgW|SP`!aea2~ssl9s!1Sb(&bUkegj35`W0auLp^b#8~zRyuh z<3Ru^yz`xnk}<@Aq#(BM$=-=u&R%S(=mw3gf-M`9)2+Nc900h!L3vcTFK>slu2$_c z-1p_y4%bEu2}zp_oMA*BV#U3n?eT&QIQ}d!pNWFyPba4kELW&sjnS*94ooM3EjFSZ zY;#~Z1|Dv3%J&-emG8LRtE;E2__3Cy5iTNBVeaw4C zt`<~R17n(P>)4gy2DTR9K&*zun>>g)WUMJ!U#Pu0Ug#Tm2?u&Hr&g2rCfWfEwzNe< zynrj`$dNR5{j9(%?PxISOZpvW;;$sCmpsv#@a;J(6*_duz4XGS*G$*%+9S7@@A!G`CycqVjn}GyFWTls%#&8p$olBJ9r(T* z>1_3&GfD|6Q?;0H2k!^fdh}Em*@i9M7>$s6#2tqLUYrBpV7YMoTq>as)EI~UrvK|P zBby=XUj6x9*VvR_?+5MnI_+Wwg(cu!eKiCeb*8*_y{fA3c9IUc1)Cw@PaZ8?D;j{` zb?qrx(vUA-+CMdo9e-P^!e|&VpFU9*P5B&va>U20xrP^e$T~ETbrg;)T7{P#i#{h; zX&xI^S8+eier3;Mhr!=a97=ciDlJX7pvbe5sNR<>uQAWV%Zn4+rHTsgmPoaS!{{d> z^PLG#p!cTG!w22WcWZkiJKw-Rp17y#%N^Q(~YeeLS9S)oq<~Ff19cF%1O`CAn)zm?a$b zX3H>FEo=@`#bNEGk;^SU>V3cFE+B$k`Z{j~2j5{e~SX7N~rs!AWwn*J?0v zQU$8PR~RR~vS{wUApp!q=(&HwVpURVwQqeQ5MYv?lcmhozLgY*i7^NZlNL$YXD^#D zMAW`sTlO%J{oU{T0L~!T0OTFz+HnkMl)`HQROa?wHqLmraJ!4PNCjRfc7W@2BrNXU z_wo?e;$trlVxFFWoc#^RsbMfWJWO!)xj7!@wd#jDk*yTYE^x2nl4TA6`-L3GTG;xT6 zdPS^O5~;=LwfqD&)DqUy!8t?siT+SmsiSWP`7u28m%4rDjQ)dG$0M4mFbyd4$jY~_ z71vq;8#sbAT{PD)x;ZP7{!8V2E?}mvv&z}YO9qr6yly6!&G5|qc)UhJ*Ya{-#^pX4 zKB!A|38!+WMIKKh#PxWQ#s%jGy+VC@%z2d1yUi4RM3~}S%g5z|qR1g+N9K86w7o4A zM?qdg>%H5r*&i5TFks9nI+x=Yt&!pneBkJozT|Vfq)5nvrLuS?KL~v*o@G9 zwY;5$>~Y^6ycukBe?#?X2Dc5_N*qhtx^Y@mS=J-V5vWL-B9{{c)@HE%ig1(@oVjg5 zOkqZTC_?TDdtz>Iy|GUb4;pg`#2dR*y>-v3z+g|+mFu)pn21x*O*r%48Rk{E+nSkw+uC*Hy~Y8_PZ>*PZ*Q?lluVs7v~I55{T0YJCkyOFpoE zrIm@5m!v$vxv#{Yrv`~ZD1NEWM^ded?yzzS3+~)qKq);K;d{}i0i`?GUnj!sk@!hu zQ3wl%;T%z)c-Uq&Ze4ch4N5P6Nc3;zwHLZ4QgzR*h`5(Lfwn``qC&qg5Z*T-9v zKg0*I%664(MO3&qJ|->G{Uk*o_mzs3d`F$C@#TfrfvJ@LfhY)>Q}PaVv3>lDz&=ZT z1c*6qU3_<+@G0Y$IRkgfk7=1(qUUBtv1Unk)l&)tbOt0nRDt-Kq(z#!z^?Fh{)q|>0LZx0VUMvt&_1gCyT>B5tJrL;s=!`1r^*Tfz{ z-XVoxzIO#d=J5P3L^{!qt&y9;b_1ndu}i+sz;9{q$?r<>!MehJ1_}1LNSNFqZ@q^F zD&xp6;XVU(yJ$=iEK1bbWHRxeQa&Z;B*l&dTz6-ZVwTQimxCuSvqQs}AhEs6M zyQ$0NCyVsOC?bp9Ge$BZT0?9*=Nx(_vPON&LduyVQ!-Dg&87#<6<>@OD>{@!ArLZ{@n7y`6Lz>T4KrDEd) z_F)udReRRv>Anyy37TB{QlsEwOU8mY0;xqdY`1?jhDlRzlRPpz)S9C(h`-E3XWEMq z1j!BCZHp6=b%~vj=doqPsvdaGKUf85bdYPCRoe4CPX}sKb|LUYz~y}%6X}=Up?n3> zlk$=;3@sqz%SvO|r18z5mBZM9=~IC~+-uE*G~0=Mf@|EzrfryQX`a0^mzHu+S?WVe z16RviXrscG+stm~ZXs?#(iL4LkhHrdvFJmei9k!f^lE4Celg~&>1%j5`{Zkg;oPIv zFI_K5RTD-sJ1{GDjBHVQt98uQ!XWj6(+tb@%uqJriP!VR9kes>B0SP*FK81<#21qA z0^Xj;1}!JRpdXR0+4N>Q7Evb782a83UFyPCdunQXl{3ZV;dT^a6grWl%MAdfV$YhNIyiywL6y;g%&D`7pJq4>ouD7f@GX6WbRN0yh| z>=DL$DQ?3&>p{)*F_rU7s*&IQuP2FBv7|f`AQNh^)yl8~<%xWxC~sw>B@yy&Ld@*r zBn^FV&vk*ZzF-hxV&I8$G!LsErDDbLwu^!>$!_jzuM3$`sKJh*V)j;E;%1@Bw5ylz z1fmNP(n8Pjz1mr=?*ssK`lrx1RrnC)aVH` z-!q5AEdV_Ym$fJL>%|Yr$JGx1WKZJRlk|#gHWYPqhy|o~4Y{uO7A^!uXetb02o)G^ zF#I`M5ULI{*g-so*f)&FjiKKl&OB*3=;3y=9pYGVSV(oY-d2a6Dw4&N04(QrhuV|VE zT)WBk(7P(%K&~ufm%59?mwHV`y1HHL!FMhD1^RH*BbqT(iE4sW`|s?@cJJ`){40^| zf~_vMn}KUY*P)h(Hv6wk)nne*=)OT*sj7#$58lUIqa!z~$2oY{l!;@gw#X5(E1 z#)P{S^1Xqb2V^D@?D$J|Lo^X@LS`aegiS0@f#MMauhkt8HG?|PtVcA%Uy*!Y zcB2sJU+_gdAPDe6CJ3Pmk0bJhiW9-zMYSi`b#(_~LO>)w-(E&Q%=cRc)}hyM*`Sr7b!Qu{hIH44%|ZzLO;E^xHkqjQf^Ro z_!LPeEJHVaKjscqe`+4;l^dQPVjEz)$+#BCD|C09D;0P0Cw5Kc(Apk5GLoFhgUFNabBnwl6XTy zB6S5mB-QR9TmkPcT@vjgKSEvET{7)*TvGHGUPA9Sh5L2uJP_^*Y)_4-?O=IFKFY-o z?I7*CU1IM(TypIq+H*d^yh7;_zJuuz=lItmwe8e~L-(gTls@8I39!GVT0b+{&c!)o z`ktDxzY(mLeCz@83qYQ|cH8BY`A*k#yyf_>>}<;VS;2O$Ylr;}_y3Z`y$+Rp#92R= zek`%SO#x1nzIGe5UP7?X{zlkzoNJ%?KH_Kn{Lx)<@#eQ&(sfUe`Chqty9a1-$o|IJ zbUf4|BQR<6eD$$>`?0lhEB9eE_vZ)r(M`<@K6L)U*>Q@7QK%mTvHXFZw=A+kKZN)j zLpLKL48+~M7mG#$x~TGHCo$GCAGCBm|HeoyGM158Q0!syoQIk_XSN$$Bpmgj&cY6V zXdzdO+vfXWJ53k;1$p87m(2kdcTA;ZZn)t}gs5=Obsv`sS+@!bel+Bd0AidKK{5?x z^q>mLOB(ROv;lJozBl9IuP9DZRebHZa1i^;{-U4pYJ_SzvL<N4!kr`=&0 z(;k0mojW|(nrfS{=1y{xe7E!9czbB4;eJ~BiBATHkjXW$aommzIMesejB(S35h(Hq zpEj%dJhhSD@*Fs^cH=??kRane2OJezY}zvOF7gZdu;fE8ehM+CFz`>ZdmDkk6vNSI z;i@e9Bi=X{VyL>2&_kwQ@vPOUX?8TZZEW8=0;qzQlArg_NlC0d(q$vrCfeYG7C_i( ziC3%r$ujSh=*%O(a7sT^o>S%8e(Tx>qq<->q~D!5VbrL(tp*eQU>}-#CW_6fH*}MJgZIfH~+wEC#UaWC-M{u3VkNdT?4-0)5g?Rh}4>`Hw zneqN{5Srp8O-8`WVtt6|zawI6Dn`h%r!4nLK-aWu3+j{64}u_c=}&kA7Jpvl-AL4R zdspdf@YvWCC{6Jl8(PF?VQeJ!`mJs#vk*cQm{5^Y&ug=7laXP@z^$~@fS&UP@(3rb>83|f?PaM%Z% z>G|@*2?k?u`=(hnY9E^bb3X_EDYJWJO?gok56+l})Ae=jeKyQPusT9m$A;b(jZ)9i z=xvj@&$RItRHnJit>4gS$KfD+={D1^@Uc!NZgz5{XfnkgfUDlW zaDt1r_!kGREAfRCx^opM&;(c4Q9>Z52#fBFE9s@aWr8Xl7$g_-=58n*3(loZ{C2#t ztf;7nl0eE{JG_pVl4_KOihDuHm0$P#)f>{7hx&%;vF_^z17K0;JwEpu)=4P*_ETVv z6lR4v@E^VB>3zJvNxumCvEqh0+QXj&5rr4Ojs|9B=|#Dj zc9Tcr^xpV*Qu8&7yEgoC=lcLXDn?lT%DUJ?$XjL8ykp$%-`=}Ch9i@N!iORQF2E=h zI7JMOXJO92$6XWxanc_cJ0^%}->}^8&EL25c%h4fF=Uyku5p!=7h;V`@f&d{(+>kQ zLDWwt@?VNIDN0-^$5LO7tX7#tbwA?_os0FpdsS1O5OFl8Onrt2cLbum|(;2IFf~ zWcP{)#*c+ae{vK@Nl7K6qh zw(Uvlr_?|9ZFdup-|^k)bhR4Rtj0VKQNb#=r%;CqK)Uz!3QHb4h((AhRIdC1(TFQ7d?q8|iYjldyQDjx8uhJ4#E!nv@a1m!*3;OSkG~#1;j%t7p;2#Z$G+jb z*0yPxbHn6}tWLS4?-yOQIF&ZY=QKLnZaPBJs(x0~x%4y$rfj0|UZ1S1asYh?u-8O8 zwY4r;^YpLKV9Or(;h5^WmFX0DK6RqLP78ac?foDH7*i4pkRO2>nig^b8yT*w0`!k?g} zJb*9Eg?L<*ZBRpDv9|}kFrkFLTN34~4(}hc@9>um+~&k)MawS%w%pD7Wx7bk?<*nI z80wrhUaYggZ2+|y1Iw`swmwYISw~_fNTqxTS|KcT8UO0hOXCfIotvEAYHR%RUxmD2d4y^Dfx@P3G-$O52I*9eP*6T0=VrnMb~A3v&J7Iq9?zECK6KgdOv&VJNAL3m zd3N=;`YsS}a6Fk}0-AipO$2yKZ%{uzO`WM*w8b3xCR?FmoD$hy$4;y;_7DM!tN8xA4N|2d8>~VcFxQKb9EGHBdi>A zj=mMmH|{v;I{{p;vl+Kfz_|&vY;Hl@^nR$hcw2fMlAW4*eTRGoKq7;1+)aA_uC6qyS}0+JmxI*FOU-$^f(=Hpzdfic>I#7y+>WSlA^p#b}LlCH$) zoB2DNClTdS{GF7RiAQ=C{Z3>R;?V9S6sD+Hi>Bj+MPOo-%q8-PLPF7SO8;^El~5E~ z@pn>6DHMrP^gEGN2t*^3Q0S%Vl|UAYWK<01622(}8*xaol6u0Q zWk(pA+*}hZ>;a)7&Turygk8eYf8>gI`+poO;uVd#LA_M!eNKFns3(xTclt+8GU6-y z`^KVG(5soA_;U1=>zemS+CC0-q9 zNOw>0)9vG*-;zsHdsKKk&_+F4@f_VF->#ggf|X}KQ8y{K&45J@aKi5iM{6}n1DixiL3WaFy*Ii?Hw6)8n{1}5Y!Kp!zM{O!J*VG3c zQVHME@0S6qi#)6_hs(tk;aNkh#fq``L9$~9GkgMWtOvZJE69!(Z;8(r6}~}0h+}7v*v#U+HIbycmF9bSe)5pB7OpB6V17#6i-RNx&R`4I3~HLYB6BZuB=U|` z5%j7?m=^2Z2sqI!K4=o@5Cr3+%#9);(hMSJQfKKSE7A`d7>tFzH~Z4?k&~5dzX~&>yC?@EajM38y;wJiOP>?9Vr3@e9kJ( zN&FRD!qxeS)3XI4`DFPD`N#QJ`6bp>c8J-yt7FT-VzYt-8|W272;#ex^=l$#CT@-x zRw6XQnv@*M$eUyPu%L=>DVS#_DE_nab6$nsu=PCYv2xTGr0ms#Tou;JCDbCxLQ=}d z#m3Y^1LdnKm6ob86}l=a8t5t&fxk|_e42+@(Q~mMeVwWtCjLN0_OTo`YbM#!5S>zDMU0_vI>>k@JNSTkyG?}I-nih+weLfnoyyw zn)U?U-6vx0)0##gIJ?*IhC}C8^9BiRCStSjHmPyQ;j7Rs!b7=?p4toLp)%1z+))F4 z@h>PpKlbeolx7&0X5R9w6cPBP2+B4>7q4>Ocmn=?liB$&N#pw-?|iHC-e~rmqX`IdW%}hlFU}Wjq7Rgtc z@yAE1a(4=cKlj+jrw;)=lpTSGBiU6x=#xt`7m@|uuOC0)3}8CErVCveY-~Zm-ymS2U;}=7=^%$PjWgiHc4w}qp>j3 zJg+^oAyeB3oiHRhdq>yzhQ8hv z85@|qjAjO}f1Lg(_~D3c5NW8?rKu1Z&tPB0Z_=|J^gA02XX)|EZ;8$noT%lc3@1cdDdf8xsSX9o6y>v$ zoao=c#@Rt%M(X?d!4>q6WTjB*XC*6HAteljfjWrEp&perq~r}4Mu71nP#N4|tA}XX zXol3|RASF-f~m)~cX;PYqaat3h=xcK7`$_l~Diw_?qRIw*% z)n~}qt{5_U8cT;xGH8gh5o+9VcUddU`&-XEy^(-cupr6H*giItxs*|Wc|=(HkdTphzL6MT%jBo3v!*y7Bok<0 zZ|d*&6h?Pu>{Fh=oAO2#10Q?+U3#4XgaljzITm#GcAEk?D_9wbRt;B0_YLdWJ95F&O$+CedV0X=apO%{)Vv8?{0Uf_h;GV zZ=!oUE3zV==I6R27Xcqkv08WVqwlF&DXocvF^E^biDTfFh*t?(8!IWqE(sxEukZ8a zq|W91OO#neijX6&3E#;M28l#R>vJpMgAo>hbL=k%w*-_@i)TaHWB3-235VOmssOBz zk-a6c;8PTnlQury#|xGbb^GecemvvBNyyE5dy6zn1?zc{j)_41#bv5%g$0CqIQ2rl{%AdN;m(gb z0{Hi?Oi-xq5zT3P_edUwArG%vGo;bDpTbo2WdX$`3a9q#mEl+yz$}*TzOuCHan>p= z9hHr~?!Fn!q#?=Oj-$a*ojte7S4e2{cu#`r)Jm^Xp%7}~QTlm-g;hnd25+S#til}X|0PIt_m z7|7t(3e$+-1P#<8Bl3^uukhE!FbheV9he%xODB|*hSQ^W8U+-I3=)!j-W$fo12YG9 zIk(mL%trC)+wI*L&VHS(WF~(P6*<5YD{nNH^>BT51=WG~J=(n5yjs6f4FL`|hJuuN z*d*RM^cLDk&l~%oI~e`-0l!CgMYm1Ylm2V+=Bq1RquJE8_PaRkT!%LML>X|v%U!@U z0(SMbcLw_`{V@ZsKo;>bP+R7aAr&fS5sbzJ(z?w`=_g+HvG_^5Y61`ng4l0nW9wGL zqO2|>HL>F_{nn+22*y8RfB8AH-y1(j?a=cCBqbG8v4w30MZn~-Q(0*!nK!7yX{zfZ zuA|_ko?Emi)8VX9bIndHLC80$h?PStY6_20uwKTV)P_N)^P~O-HtXeIxw0@6eX1#o zGZ)!x4K~G#) z!BFJB?gfiQvcSFy$S(P2*SpH8>C70li^|X&WRv1eWbJT+_&m;qo%grTx#FgbL%=Ht zY*a2MB|=O;Hh5a4h&SozT%@725Ti98qdCE)i~zIbi2Qjj42jP0lAn;=fvkpnjohak zTag;I%_Q?+lr|fvNXvb1`AqOk^320bcPtV|DHd9WaQ$1*C1CcigKtj|GBm}8C%*zQ z%qqxiBxJz=RkWM}>GV|!g~tlR1%$8p69_YEsA;&1HoO%g1k$e-Yj+)T4lOBIg>EJ;; z%wa6oS8=?uY?10Z{K-5`1AgB1usSe1ZO5HwH;9o~fg|-hOhq3mgThH9r*4L=&+W%G zt;lm|*hs$S1j?-xp_HQZx|D314#No;PETaLc-$RG0)R-t=z@6m%$xr)(hQVBff<67 z(n3IfTD#P1-EXE*Ltm|g^Nqpo3nq;3u(zB=&>s1wi25xQnu*@wxlh!}PRKu&q6%UmF$xkrxSH zqvXrZ#3ZiTq1MA==23Rw7N_`jS*q@t8E}qr({S1CbzUJ{%}d=(2YzR^p|H^6HK$wy zu1Ky)?;mn_wR;uY#xC}p%(%c!b#}?<$Eg#(SYDgOf=|FH(ra1eFwZz-$;kYAOljnRlpn<%NT8?saL#H|}oA z$V_5MkYb0;i@$+lOi{6+uFd~lSj|FRUnFEHG8>=pGUj{_B|PbztCjR&T;zul?kT*a zw8yN&)xJB2Jg>@xyCw$f`dRhp&X`TI_XZuNn!u+5R6@N`Ykz+VNpM#(0LzO+8r}VQ z5H;=X*Z#ynbJr8fz5#n)037&TC>b==W7EYc@Bj-TS%QedT2w}LC$Q)Mx3 zVJrM__<>+kL#3QJ5_qY|-Sy0+>@pPo&(9SJN_a8S z$vG!{Z1UNr(^4DQNdS1w4oq_o zuv?e!g8?-bZvYsky~O*D_Uz>sgta)>Yg~!etzp}S+Ref&J0jY(rp#)rIs~CP8^CAy zY$7%&6x(QMT|45?R$0mz2kch0VsO-Ot7N@%4tc1(HD0aG=a)pGAIVPyU)el^@XyKk z=eNX{U6JmyX7;h1_CZz~kKOdg*lrmCBQ~gRsppcctYKAJS5Qt&#m`_s*O4*n;2GIt zHNKIntFQ3ODD!mPL&&N#pO|rkT=`(*c30zooo4oNQ^j=5vZ->c@`T)=`ILEV z{@uKr<4>cRVmiV2i}QAKywJG)j7Zs@3@4_4f3yTxptOs`)c%?-ZoQK}`&3?+V-#s}pBh2A!K8V3!g-#`s4_^6DTOsyM>2b9T&EC~YCDy* zpS8E+CdDWgO4TMqfJ#D)oS!qMVHsYHeYcPq!_Zm)GlrAYCV6-U37f)*e4L3MH=6E9 zAb!vc4O>1m@N4KKvHwBjPGlA)Oqqy@>3VHBEC#`Hq=-mF3U-=8+UwZ&cn?(P-Ftm7 zW~O(cKsO%R2tu9DX5B%j)8(33m)J+j>;9g^1k*+sIx8B(Vldz*MHLln%ByYi1B z(oWg$NITZ{qqZUl+0Wk7UhFxN0Hk{QCa0AKYL&&54sCo14bs(8CQ-}4Ktf`8jJj2M z$SPtL3u6tm4`FJqGJmVY2rxy$n00H@;6&}&LexSYSD~5^pOKU9z zfjg?o6Wa+5&5I6@);iqPjTt8O)K(H{$@W2jFC)@Wo{WBl&#M-RmKDb~`o+(c6Ic{n0b|Gwzb#N#$`fvL&SZs{6}- zquvHUvkKa1l|^;}Tt06a@o)hE542`|3sQqIbp5s0E#5ypVu zlGj3%UWz|xg35_($4|?t=!}AEE75biGsNR;B>VgXkkS~-phxwLD*gy{c+eDfFpCkN z!Zy>Tx;Ebxqw=XHS#z!vWN6@v?n4?a?ib_*!YT1ZxheBt$|%uDnnwf_PdF`7Imsmo zDl{n*=Aa0xLg3s4&sEs<27n6*z;mLP{0TDXqbd~$Y_LszGT6h$omeh$SWK-Exxw&J zP3k_EjcXYf<7u~r8=w6R9K`a&Bj}zwe-J=9@eKl5CP$&|>TsqY1QGwU)vX3ZHk z`Z4!zK$ft_@YaQMN;B~U>AApU3B3Lkk*nNHbV}Z3MxRxQ9^FOCI=oJaGL?>=B$1+A zQC^9^ND1DFpooJi6r&lQxsx_|tmZc1%ww_y{SY22lDWuUyi#d_SZX z9MPbCj?qNRcP(-sCE9U|@kDK}-k?JsSWjy+@H4? zg;#-T`b>)vn(boy_>8PI^W8om=J^$(A}OAjza|+n6MrRg^&s(LadSlo&@=F^^F5$4 z804UZ6k1`lvMow;sr646W}0(FXmZfev@^XS2KhrlN**6XG)S%a*#rk6`a`plKCb7OZkQ8-5*EACpTCt zy(ChBMt+GbMn3Iid{2=!AVc9)aRtn(K8-WfjuU(yWKXPfxeM?;o?y2M*D|P!)FSnl z?Ev>J-DMgISnvB<;pCjjz03ZEi9B(*dK1}}))jI4FBjvs3xs9oJq_ooYmVEdho&j! zYUXU_t+O|g!RYiI*VT8Kp5N=CT_aw%3GV0(j`Dony?FlwKCMw+(_y;>*DX;y@sP@d3{dH0`4vXT3Q-WVWg)={` zy7Lmnj~;|w`5HLrz5aY`g-FHvdW&D*xZbu-nMoYiHrIjBv|cZ5!U$A3QLn-0{Ao4q z7~)TSaxKnZ@#_xxYq+G@)nQpOV?~h@R|d`I%>0NP<9;`;W!SYn%1dlwUzLZ!@HX{U z*XQBm0u`3iR;Y)?Jcje;`e8Ba*1G!gao`AH(%VF7d7AO$x^DubfEf@~Xvg!S9AaE- zoPF#$_%`Z%QbW@z<=Ib$wxWKkp#A=RvguGGB45LqrDWCIUgI_6RdcGIim|4)yNFdQ zzhl7F-Y&`}>Z(1r(vj0yB*NX_J?$Cew(1acBA(rffkT?4VDOm1ePb1-B7uo>J#*nC z&@M0x68H#Uabn|Li%wm7;m!rGMLK$1`^HB-QME-#oJ|9vXg2iBsqFrg^EPpzm8S4r z+2%^tT4n}ay=?RVBK<#6=Kn!DN><3myfjt8w_uX7B^hd?=wb)?fab>yf88ixf1PT8zO zw1f}M2lldzZM`mL=lT-*M$)5#Eqs;)Dn}BH?XH!%%a+13!u(w4W7L&MRMd<3p?DWY z!EL1Dxg+82?cswCPA5$(xk01otd5gWumRPg$?IF4eb&SRW)G(6y)R1ZKCVQDt0b<*{t}C{VT*V>xQA(;R?d-6$Qj&N)N`W zTW3TJ7eAKVxEz+>tx@K?28Vkz6kJP6Ze&R`G+aiHN`+FW+dn z_r>OGb(&_YUbgS%ZXb!rN$(C$cv5uV4c=#uL&;M#&6}&bRA5`lSP= zqc1)*Z%>uC--6k~M8&b}wCo3y2qQdU@vfIsv~vQz!)g$&;$s+XG#U)Ftc-fMOlJ;| zWgEG8kFIJ`Q6|rqD+rRcxsLKpmulXQ0R&Qg?nN>hGjdi?EoTk1!_){C&oq8hb;_5F z2<5Nl^R3#_?a!)f4L)cp6rLCZ%TD+5Bp3;2=v9|cHzwH>65N+<#uS+vYxTo~Hz=fe zlmz;b9toOfD2-|ZKjeo#dmJPTm|X`Cpk23J^?ZhT7rH}UqN$H0!7iBtA}P<3t1uC? zH!L(bjj^|nE=eyj@?5pG~iA) z0%-s;VUdA10=EMg_~phRJtRG~d-=zF;SYUJUa)tE{5qtfL!nCsCR120-BlQ)9t?(q zd|G0;0u1|qQGOxCYYxT$WjmigBuwF0`=)cy>3-$6Wn{xa$PyuJ`_`7(zi20@;DCO7 zZuv3650fG~f`SPd<8)*_$5pKfefFxQywJF(bklazilfdW?-9?L)Q4^1Z_!xGD}RS_ zsN+bPLd;nFC3O8mb1DSOohXwyC!m2$4ufQ8U^FQx!B#X-hC`5XCVnn0bnIXov6R9OC;2t>L^!VH-Q9tMKIQ9XMAHa~+&9&csvrRArzCWk$m;0H)@}-RRl~dqVp2y>m68rdbD%tF}{9{LtVL4 zq*(?lWq4{m;omQYaWd_9;_~$AcU`MA(T{H7E$-pe9)^(8ju;HDho#T$T#AMxt%G8M zBJoTJ1tu=Tkso#R2$n<0_JoLD>OMy4AzpjJxf`2CZl6H5a3t z<&&5+I?M{yUC7wAbHkf78KI+oZ_xVnKCVcEpr0Kyeqv&4JofrX*pLup+uhyLAOmJZ z+RdPbtUnMS4a>v$T%DoI)#0PRDdeE@SyMlTX)%SPW5PVsbJupeG}I(Z%u&r->jtds~k z1tIrpwPqbr4=$ zQIp2`%E3j3)5UDhb}hJPwii$q6LL}vM;%m(s`x6eeXl=Tjq1}G9<8L$MZKz6>T}*Vd2c6VVzfoYt=oT zn+?;uvpH}?TaSaZjuINGj|)eluXh6YtLv%AX`%HQ*-IHw8gX6nHVdq4ifay>3xK%mv8vf;EnrYw&TN{m8F{O%75i#DHkQrTl*=&qXA>`<4x8{;tr1_{e9C-! z?svQZ4qYW(Nfkp$21^P;?#@j#rw%qv-?vNJ*;|iP?v@i{{Q7^h{$a8d{XBv^a{_TNfUIW!PsS}L9Q|!L_AZU=hOr7`4Y(N~&$Oz;Ee&N> zSI~vHwAt$CzLs>k1K-*=!rfeqU?`{Uh0WOxHnD+ra}_OpfrC;VBi zhclQHTRNIXnI8suAqw9JddCP9y(Vq(F@41GVR;%&)Yq_12 z>-Q(qb?goT|7Oo7+n6a!dO#Kbfz}*s9c56VsxauSbLy~OI)B-mhK*92j(rX(TO!kh zHo_!0#|LdiaK})mzCm7lKj#PJ1O17@Dqu*xtgXqF+cAZZ{B z;V9wH=opvYlp=K+N?ku{Gh)rkdC^zLPgi=-o(&^$LiP^njTjHa?xs`0S4u4Ian^s0 z{c7u25l)aMFzTzg;Q-40mbU$Hy=3wM6edIKc7`np%r^Dmg0AaiU?plL^8P6g&vhbQ ze9ij+TV0wb+@Mh;i=n%nbL?7FadP)^n#OCmzJG~OUSQ%Z13zK^F`a&S(d=p}H@h!` z%p!2|mGw5~d4QDhCBM6I*R|V0^?1dJ_rl7>>~^SUbDtRl5}I_INz1UZ5ko%U)uaLC zj(9gfYkr2cmN*C0kW#wp*PE-^bEP%bWDPJ5RX^Lec_I@#fn=@Z)O%s69gvj0`%&QM zV62`LbnL9@qet6g$(MZa(}g>^WaPS62^ozXL-SWdZi5S+g$8wJT*;owxIQ9 zj!HSGGm+P;3B|EzpPy5BE_KxFqPAFQt*>$J=WII%Ag8ceA~LA0)(;6pwFiFPDis06 zkWSuF+D>50rnJ7B>fFF5mW6bvS{R>l;{C)5lsXkqj=sK-F;TxcPB$&y=4MG{!@^;0 zb>UavqiYAwu|}V*HxYc4ZoFzHf45lDS$=1JrNttD#qAm~eABK67?vD$Q0Gn_X)}g@3o;{ zf9=|{hTNT>N}vWDl39^KXuEth|Ecp>{sGwK<3B?sm$0|EICfNuIzC6UNxBDr9rz@x zj!_UPe>9>Rx*Xve<|^qr=1SpO{t|1J0y!v9#z!Luv0$K>rAB%(OS*XCT&^>B2Bgj; zmOsH0!i1(IOnZC}QkS4p_*`VrL{JE4+#H7CAVwJr$8gT5qMhvWHHF7}X+NT~HDBfn z23U@uI%;wMHG*vMtE_+OYTY^R4%Rzk#dB~_x{UR*3^Ce3Q_{upmFwnuZNTsx$;*L-^mh)F*|QZ*kw;jwgngOh8m)K2i+wLe88jskR=gig zTC8BM=~6H0ht<;Dtx|G&!QJ|2(zlxnGi1ln+|AgS1?eZMji5-HRqIG_dgj9@VasDX z=hhYy3n<9JXGEvT=c;GT8{EVea?L2&Ft7fsYt7Ln;-;i|x zmb4*jVOI6qu(J`jVNjj8|K+W*>KH1~^HY3vY;3=Edq3I9OI}g`S6R_GY@3;Y?5p_w zo;&CBF~cmjzGXUpNkr3lBx7Eco4lC6ZChUaqPR@3svtT({XIf==I-PA^{ z@GS*QjA1y3EHbCCe}i$Ayd{;c260*jONIfMSF=Ywx*kdu6zNq#yJ@^1Rut zZ^H6U40Z6>=e42_TMpiiW@+SnkA{b_hyh0~h7hHaBaCE)3vxr6_hyw1zXGJoD0c<^*7=Dx`M**#=O zFCB81myE3}6Xn4jH=fPr%bFL4*n!CsIY{oZ&1O2JygZrN0aDWJqp~L(b~$i8@Y5fhKGz#XKdCZXp>8q?|&!M8=auh*-#EFG*j zv!C3Q>8C!mZG(`L2?KvRA$3BafoFhwcH1D$sWXb&Dw5VN zK+?T?Zo{dB6E%nYk*@K!LCO4i#rgWh>qY)11iR1<2A?^lIbq_PuQxWd73Siz_tn;W zSI6zus;REPe$If)9#F}Ox!u!sl7L2fF?CVVB*}bffuOc>Gg7-rWR-D-87IpZAHFwO zqfpw6E-V&k4ItsGu5mKMSU(o(kP%XxQELi^N`h+~TJ?05Nh6^jfApD}O4xqv{>+i& zQe%gSGV80e56U$yzxI)ZH|^EC)9xM;48~tzwb-1Bka9DbVqS)6UG2f4aieHM3M9-d zPOSYulHN*;SDajxYS~IU;CZSxQO`G!lOBq!%$6vcb8s9p?@`5N>4AUt zIIC`ckGpE(#%5n&iAxv5^xVz84_HyO=C#GNPN_6n5HFULFR3eAKni+HomEtdx737b z-a#RCQ7#)iyyO}-a4@yzbZQpN!(Kxr=0tZu#t%EKA05BULzZQ@C9~(^^@N0lbM0fX1R+yPT^U813WCNyI-dj*vz5`b z4A{g>PFoz<()yv>S}q99b$*S~!X7`|$@42XDE-;Xp|fl?W7yjDlBu@O1u(hLLy>T3 zzbv85RY8HF2s3b{)Mhc4pv88mDjH%zTF*7xg1jv*yCU^YbZ5|G1n)e^91gDE6XzNK zlC@v{LHb(>F8qpAG5DcoD2LUF*{s7#dvZTYQ)cyL{GfM3^8DNwMsxMHg~Mj?%M38 zfrw4j`(DI0&*Mj%t3#8le+H{}6XR^^4_#=h_PgCxcCv$I>k(!Y1bD_3*L`eR@6UTt z938KHY3bR!e6jr$)d%jo0^VqXkJ32U--dgPQWPIk=F)g3opUO_F}6jvy|lNW6X3I8 z^#b~cm2fQhB&7^V_h_Ikxay*n@{(vqn_vFi)HoiL?=(z9@`201n@3Mh($q5m1}--y ztX0WuZK<(!0+< zh+Jq+s}aD!G;J&&!Y$dM-p=7AT_~uh>VD{V#gK}U<)G^RC)Eb3l5pF;0^=+uszc3O zwWlBT#XL`VD@2Uynx9!-hW%~Ld)6&fJU~4;Z}YO2plDt`Bdrq`MEy*KIc!TqJDYu^ zLi|a4bN5d#dbpR6AK2s77pO!UNVB2l3d#@}X<|V3ASV&+9`Q%VPrM%SK+Nv@O=YKF9+ZOUf$eRsm1D(3+QCAK+evC>a|b zo9Wt$cK+zfm%ehM2ar}d7))&N^rom147NmDWS#mXOC9@_u9Iznn~EOkene*B0?tvZ zm25;S<c+5BN>lvl4wXn@voM6!3f$VxjpG zL8<}=n|AI2h;wkn7|P0#{04?i6W+rzmGr7}ktD3kQE17=pk_hU_KIvA*ik$6A{0i@ z@FNwi31y|8*gac8czw4W_|-W^t;k0sNip}jl?>v_Q61=Qmm7;SU2_zAynVzMT+?EY z;a6@HRfB-Y`X>I^RaGJ98QjSSkZ&mQ%phHDJzZXsjRk*8(s_+sJBWV@_)ItwF-1AZ$<@=+SsdM7kAuyoAIuaUaCIM zd%vhVC1=7D^A<6Bh`vW6=}Git@W$WGl@nOy zvU?SaM?#K60W0}fY*g-2#W6BH9+Pa%gr>5wZ1d8{C*4OA2(wi_(Tc=w*%bYqh^&Tgk51AAUVLU)jjS z&WUg@aU%|Hm3uw>z`OLN@C*XGtZ}kF&Ewj8^AK>1WLQ5uPwfLNZHQF2jGnrZVl<(4 z%Ni(n!N`%OskKRYbhHjO6i)5ICkLaG!wlLfE7X~d_fmo`w4ITcx^pw#<@*|Szxa*q`G+h#PNWPUD`C1qG2;S#2|BQ8`~6vVOM}FA5@aBRO)H8PKLky z=lms!uK5MoBxG+$H1C=g0)rYey^65-{3))i<=3h~o+Sn`rf0%L;W+cu@Nx?=u3*z({5Fa)_cYm>~Q;GqfTDAEPK&XJL2*9keFLR zNO&XL#+|UnF&vj3DMYBaXAo{y$YF4w>czagtlkWQ)+2~FCFd1IPbik|4@@<6%pZVF zM_b$>^J=A8A&2Q4kD#qhE{TerH7=Oy!!pcJMPYI>fpps*QjaN#rMtAFopEl2BCFgJ zGjnHw08rfzH@=lGO!%8LUdzE2yqYw!2jp~(}pHjbl?uj z8xk%8o318xTuZj}G5N2bnO%CnGGe7ca|xHps4&Tg0N7C>SV#!xweu3G zMSuz1uLK{YAP{9_*?mXL@U(<>6C!#AGC?%nOaTUQyjJzaA! z&4f>c^>sqWt+8ptN9MBK;Ox!IF;Mv$zPJON;f3dr_A?K;o!aB875c% zIey~cjcE0AT@P~$QDKYF_6#>(JZlEPUBw4_I|NBczpEeDkd~j{cM19WgYVmHzyX~i z9b#Gq`XQf>K+&LIPj&9=riM&)-YeioV9)&3)8i9y&!nEO|Ee?_5v6dWs&8V32G@N; z75*7Y4!;l7j^mwI*2#0Tk}J>Ymi?lO+&uuEDj4&X7D{ zUV@39i>C!O@V8Kx`s^_XUP&Can-#pha4NUw?bK20@}wreDVqLYj)0{rz>@96>gN(7 zd^!qJA_~7pl7H?sH39zQZmPlBSOnYm{V}gg3Fh}{U%vEV_>9QJ8{-1*+B-d<3Y5m{ zmdF*L^oX0(!Z5%O_71v3bD;h=G|MkG>A%sUf8s3vgv9<68v9Q-tLR_ctp6b1{vQik zS-&Kqtn_TG%zvfevi^?*tqiO*>dn2{@WS)wfJRa z{esc{Yx_T}tbf`3hn4mJ#=`mq+WlKoE9<{KrGNTX|6(cq0`dOK1N-Ox|8|)ExmEj% z-}{%={pH90Y|)N?^9-DWB+ot($Rl$g8yk`{d3oU z7+L?d`7b;F-4FhG=s$bo&vk$KME3_J_={@%7hUdG2n(s=(ed#7am)S@s|;U$+-_T9L*Kzt&1mf`3=NEY%T(X1HhO)>yOF z=I?MlLoM3hFzTM)*#(m(cX~o^tw8E}DNFZGF1Fzw_sMA0)*oQB@7%4mw={!h=UShyg1;7gY;6Tu(dhSGr&}1q#*C z<=okd!HewU-N-|wmsRy^;vhjvk$^8%U<{dQo0t?mv=~<_32M1ihqmw7XyhhZBa(huJY9W5#zt zP?fKrfpL~{?fx!V{&@-h%$5HQ{rk5G<9`hO{fAllD-x7!m2J!oZHcu=p4Bmw}Gy%Y6Ocf`p!)iTd9uW zbL9;No@l3xKs0At}{ksY+}@C8`TcwCb1kc^aw8sq_O8pwY4g{{Au zczUj5?Y`s@DQwoiI1yuGYJZ&s^CC>kH&Sgpw{T=jQ|Hi@AvaJNUC}i?K_{eK~huQc8+=o^=G+N_&TB>aZA?qL7|J z;zUQ$-XMs~8}yyf{C1gy3c@*CP!o{89F$(Il_Fwgp{c0aR*O-Ya(j;RMAo6R zwP=f_9N0dm=Lqcr3zm3asc`s5vW)4YVbVK^MJu<~hC>LNR~~gvyMy8cr6KfW*;yI# z`Sp*;5YtCEEE&Nfw6siRivcSDa4G>rCPLTQwmVIK2Z#Jx+EwAPfC|(n#FHFyJd>L` zd2Id zt+|(@4(1)Ti;g@Pr=gr;hWn<1Nq?9QO>+8)bZ1H*YaPZVD6cie5-PH%C3ja$pUog4 zE5x+#;K>tf)x=A)D>7%2jy|;9Kkzc?@G*aek{k8ZTyz|W4(?F>F%3Sp!C+0?n9Ti_ zN7$Z~)e<>;-DWv*$fRS{s!>}{5>-bfCiAWMa!gw^YVCPqvG=h=cRz(?X!WhEt!-iF zcK3H!Q+O}#=vej0anaX|W_o|1*mxiyviBd&8J zOPYqKhEkKIY)V4_Z54Mub0cnfT@jB_^xW9R4-eB*<&6ODcgk1)cYm>pFpA11X;95J zCyjv%n%zbe9cbgCUoHNE(V`(*efpw$r5}7f>-z|T`bM>er>%^P&F+Xiwny+#K0;@r zI9sL_jj*#6HjeJ@zVBEUV5NMQq<)XX=fVfH1QRS zgY#C*_eqPUgToJpcFI;6OU4b(=H%D7OmpJbd&=@vtf|sR=oiKySw~L+@71>u?&k7G z$_v;2Q%AF8izl7mv1Y%0ARL*+i;zL_2-&CS^Nb+6{n$(#7-JLu!2{!kpoS!db@j&Y zuy?^NB<-a3VIDsNKjMVqXn=wdLJmS}s!!WEL^qwU`I{%R)+HX)5b=&n)1c0%pziou zt$84!!iAVU zK+y6#8XDU;;eCnZzaFT@|6HJE_*2C2WupIU3HrZ@7^vy#|5+Fc2-v#eX;L$O5&o$e zz7*>WbgZm+UuEcj)|&sw#{ZF?|6mx(t%ii#3!@lzRA5H&{TJyg)@c(7a$?~Nr z!29wqe7Oo3weh}AT$7oeS^G;kkEh8>$ND$+0;a#nHU8T}iGh**ug@gy*glJXI;h|) z9|-IYLHqRHc0sVjXaSgup?YpVL&`M$x~M&@n}>v~xjrBkFR{;Q+EK^XH4aSv?u2D* z5wuFizy@70!aFt%QW#>iiv6sc8dBsyBm0&q_>v`PodssUx1CEY^PtVqC!Cx>FTwz$ zO|Xs{$KefX{I{Jz8Ug%t5Ga^)C*|E@5}-8W*Co5{A?NAzp0H_Vyu>tEO^gfZDSmUJ zUm{4(eT#hFyB0tkw>7%fM;eoSV#}j29nZm8b+$ z>9C1{#coTx`@wx7g%Ve&ZWK;`b2l*kh35a)$ocoB|6}EsHbg+*(fE&O!lV7K5%WJS zI=*ZcUxE0)6dnK2g#5J@{fl4Z-$e%#8{60E{I?5C&&2+pf{MvbNG+ws57y45?g?)_ z5D*ZwxF5R&%in_|3h?@|g?~lomg0rFv0mF0n-uF8o1~?cxlc$*gUSIi(+aOmj&)|q z3@1)hDyAp9OBvt3E$K8Yo@d^==Nx@@Ub&kdcJyp_J#1Ol+-tUMYj^_w1jA=-{8m@m zo;!e%<2iKX8u(iStg1RojnP}&X6aY$;wa+4i)6avobjji^-(+3Dh(rh7*bL!3*q}bq0*tIqjHs+TI^}KgpbDI_Q{P z&)0FcEc6_+6)S^-#cDMQ$9wDBgEefQ22TFIHyiyC*%su!l_KOQaOQ)(E)* zdn0>}2&5t=U~UGcR)jIh?>NhKm#hwoB1i@ol&iLYS=3*ruh$k5gWm|1clp@99dMyq ziVAVY<7*GI(FYqh7YaXp{0@3XCD<`rVn(vzO}G}QBbt*}B9G_+(iOQSh;2r$0r0>X zG?n}7n7I+rTEsTLWrpfnf*nIbVVazOId51yT_eW=#NwVP#YCPtrETZA#BY4gAfpjsh944`4l-P2J6 zHSYA_m)^FRY~i1K_(N)mq2aqWhIl#-Mid7!9CMfpIfe!H(!z=Z*3f}7N<+PIPN9YX zcJzT!{Lprl@*9}YQVvXj&Cb;fsb?=feuTfFHvDPnZ6<_F0B_;PYu{J&tvFvPQ>G@5 zY;FeM@IUe=L|OQnQM-FSrn_$_+7x|6XLakO?Umbf-8GX+){0+5s+47wuT-`dk<4}G z-?TLgwX3-EE?hDWT*kfqUMoMJRZ+_*-(j~&Hi^5Gd8Iy6eL{RfLO!;=3UFUwuGNm~ z2KQ4-`Wl8DOU_=Cp59CN4Z#?ZJ~FQG`)PGfbS`vUJ3-xBJX~Dqgn-l$s(tqrL5F7(;RWElx zLOd9{k_TSJuQ`ie*~`KO_JG=qLsd3mW(r90>z7^&8#^AbDQCMU^;X|-fRJQdh+*t zZ}Xiuj%(QoMXVH<-lWX#y2vx^_;hN7z7PeS;lG1vEM!@L3AZY$72LU)^=@}ny8GM` z9iFkj1LydT!V7u|ct_v3Nh=id8|wwZJD@s`^BABV;FlfF*a5=}jCYJrVB0V=SX;DB zuBM#GV{YXo|2Xk4BxvGakk)KK65}7fCB&b;H|n=XcqT|6fo+?_BUg_GGasrAWl#s@ zuN({A6nB|&(0v%{l~R-2yS^e8Jn&_1h_~jC)Ls$~)CyxznW~S}zi&nH4@Rmm*$vzl zYx9zjjf;o}g@b?tK%pmaXLxB@NjWJQ2{+@GXqk&_aXqgN3WY4CoRn&2V)Z2D{9^cP z9GA9p_a5a_!+}%(~7OlD~F9Zlp28DBKtbjuW zQckS$K&zXOR7ZW=+QgA>pS1jNy=4N(LDW==k_^2UC)tvK8 z(xJ}BuXC)%o~Lt|>dOneB$LzCDnJRQ8EDC<7c0^X8RJOKMJA_Z6ht#|uNMmzloK*L zC%31ULy&3$qB7U5N=r*-zKEH&5|5+v9BJZ?+?bgr5>u|DoBtTOiMZPS#`z4FHMnty z=|7-Z{c$%Oi=i4yCG?0%5N~wdown0j2ww=T0QL+CMP#nNbjVkrH{6z6>VKIrOQs0v zphifUCB@9{AjGk!ED}u>8@Wlw7VBdrkQ-iy_ z`G?))F^QxKN|UB}(0m;5Px<{ZOBcsV`O52y5ljhU2gy)Q>Y|5wd!HMkE!ztF{s$LBo{Z|((1i?$=3;R#9z=IId zH1%oQTPZBc{-O;B8RJw&Xh@ZXQNf=SGQ+Zs3C2 z{ORkG3LEiyGXXIrMIf+d`0c|w3WXRICXiPG3+H!esu@FH1i5PagN+X31gYAX7^jic z(8YK^@x@70)1Tl<_VQYt;Ouy%?|430%m zn+oq>J@4GEUWc{DMvVl3C&HM-jVUgV!e1X&M4 zrXMOiV6E&+DlYZOHD%XV|K1T&CY zR-GQQAXgPUOpPZcapvZZ6mq9iC&=pSlq*wtHR*|nDdvHer}W=7V0oi*hy&2S49!Tf z-6(+FY0z3)HzeAsz$PDT=nT6z&RMjikqxxbWMosKk>)C&Y<>bcB)@+kJ{DV*B1e?! zQEw>5uNcZeS{%|;0D{9a3es)Uq~rwYK+@JW_ZF;Kv1??BH&ZH3TT1<4n@yOVA$BS# zk|>w0363y{MpUv^l{ZEB_EorWk+oRxmh^TUnCanYA9?nrt#zs*3?o#?1h@=kK*+O@ zR}AGRLsD_DP(+bg$o1VO18Nfv(1epjfW+TS`fi7n6fF_rVpN};9Na}ucfguV zLDw|JOg5r8G*5|H*k;jvALOJh0y!)L>;YPvHd+uxJB6b3 zQ3b6BxQ7kcNEmblyfH*tgWBjK^C)hLC_YFdi79TnC~Bnl_yIbpgRP)8ib#8kS=z{G zP#Z~PEv2L0XU&U2bS$T(nggOdTE1L0Grf7YtRZs zWGu)-743}jCW_*-@RBK@DrHaxq(L5uO?eYUp(eBE38(@M0s&PCgU%ogrbsG)0C{8y zNP{f0jIv$>O#xt(NPz`3>ZZs|7!&{qFh%|Zr87oqg3@UtaY5-+k&*z}ltB}KENJi> zKsI5}0}zumr~%-RHj1G*rDWAYa|M;B4AKA=qzkps$SGOH&;}@#C?gF)<;mYdDZ-=+ zb#x+!b^rs6Ab{8 zPW~1|VFQXt`<6{%L}{4vO_m<@WNdQ2JWexa93_a+46AQTrS6H>hP9#Jjm>~NAe22f zr0#5Q32uE847vs0&>wD<4SwhJkAo|z4K^4~ad0&x3sf)Lo!05x#9BJF)?5eVTE@@G zeMVRC%M+!^eHuiT3{vDnw}D590G(H!K><((!s<*1UA@K48qotCxaG(7?t;d^P>}rD1l~)wFrGXxwVAa-wR{a&dC> z8PybA92wcP6rqbJnKqt@UWR_4WR}v@lJZ#^c8lgCswjFKt_Xg-6vMz8d@DV?>|mGN z)5D3#?O-?i_`IR&c(Nhp_?}G)pS?ojBzJyF)?m1G5T^IT!?EY}tIGC|g##bI0Lu^x z{`3*8eUS6cwK1b0g!39sONqH6mgL~bmfR(O$e+*y{fq7?OL~;~q)r%0*puZ|_!jrpv5(<_)U>|xC+xsH zWj@gpbRem{HS78wy?C9 zgmlkU;+y=aye3|eq6}YR29yVO&|VbYW0mA2yD6^8pU45%X}Ux$KZCAmZ42(PfmyU& zl9uQtvB_MDUMX{4z*cS?_92{_3NlX4j6F5P` z*^J{Hm|M`4JckBUP|=b-p#?_M2m&lgOQ4buRlI`c1c31>nKG70C9BD1N|^vl=#rD< zQYEWmj+8m$pNu?hJ-{mYRN>G!- zRDKk4#LOuIQU zyvYr;rb^?*$)dD5pkQUZ62D|++7iW6nF?e|{*o<5y`lgREu|8$I9X(DX;1Ylh5~II z^ssV+a#R7vrcnSBp_EdT%?$#0R6fSmp6JVgUUEN8s4`puhU}OaZG0WXs`Xe?Rm9@#3?RT~pxR*C;M*Xd2_5jS6?z$a?E|s`^uc?;?7(Uu{h)Oq zb)eq9JfmJC_aX;efvbaUgEK*PL+L>HKpub5`Jw~ngUEyY%)dk5yA7rf@d3vJ!voK= zuSVd2=KyyNwL`Jv+-n}708R~-2POpV^W_=k8hnT66T_l9FS%F`Hw18>?%WN)!$PU;H&<$t>;|J$&3I$&e-2P$%*$KV^ z=RsHpb&cI?)@vOg7Z4kO7@!V*23`xcmR=GkX=~8S_v;`a4cr=>8GH#$?@K3`2BHU1 z9b6q+9mI8ZJFNllj%L6Vvciq_&WP-d?#_gU@0<^u&k9)4V!D|LH=a8_Zul%l0{c*1 z@Ac2!!a(hvNbVB{?WaUF-S0L?bpwU)U&k}{;*dM`G41d--8$+!Hjz45!#9PTJ9dWo zHeO5*>aGlK7WqSNfZy>lwm)O`r`+k_!g+6$&81;i`J%%3Ol5M)ZI0`r%wqB?ZH8sd zDpVBIOo!fwCeq0N#`u)^10>)(ER*~tO}m}JXOY6oJ0c|=fTkwLy~ z_m;7-^NI(5{|Z0%0Co(q0kr}ChSY}mOud7(!`&;_TN|MNSwD3dTo|$jyavJ# z;_d4*%nnTNQviLy6<8YhHUtxRH@FJ~69f}jH*_}48{swPj(q?#xIa`jWHwYbcs6u4 zgbz3m>@&eN?v6??Y%g^{JUB79&=(&t9wZ)U58@ec59Dj69h#l5J1jdWJA}QWz2Lom zBK-M?*IWj~2FwP?`Is{h7SKH49pLuhZV+x@Es$IPa^CB3ukj4f4aj#?d%b$;16==i zXPUr(Hy`#I!GP8P7L2OTgLQ`%Og8XFtky7z0Zg9xSpPCl9C0^}%IF(g3%hX`Fhb1%{{qBJxGI2t%C7(NLR3QPo)1~_Z^ zWzW3RUwdD=zw5BidA>d5Zue^nZJWBgW_fKR9gTr=(uMk&8vMiAqg+FN>;?{+o|SFF6qo`* zpeThLW}4EK+<-YL2`!YZlR9-rFifC>pxLbXu*duhI)m`i@XEIFv}V{?zw+1Nku#a! z7AOwse6MXq1Rj4>tL|5l>CYeT;jX$>RwInul~Z{mAN`bDE?1m~BI;b8kUo$$cH-mr z<(OCYrN*UfMpw!zv)a`Nf9}k^*+iKCZ{raYZEg9a;Ai zpn`Z~38a;UkAbR;gZ7UB)uZTv2{z=40rj zP_X^oA>v1SZDZQA^aA9At?SwBd=`V<^@n0Xj5pjyT~=<}!w&=(Fz>ONRcXJnONMlYZ)U9If(u`?J6mVzITp|khCOfvNSepaOVr)rgyD{&mOma&4A1VI zmXWHx>Iuyu22XGx`SD_O5i?Sj6?DB zkC%^bhBi`4=|A=aWZ1P8=9f~8@d$RRa1c{xA_G#{oj*R}Ua-*@oLUsWEc@RI1 z(jbu+C#xlN8V7(<4Sg&rU$b`qm7> zg|L@NqE7;7p~G(3^JSP7*-5wp!JDo4k@5@@9rD4hYGDV!+PM5ui@#Lo>9oD_5}PDg zRM_v@Z~rXvT$e~*m@D+8JUi2)WA|faCGQL$i`QVRU|2IecYODN4tJ}my>hCtOjPyd zrTSN`D>pLoSF(xLn$%*+7BeWRrA!Y>s^Mi)y&`#a9YakbEf*@XQUJN7+(5hW68=c} zW*>tA9=?t3`9*3?t(I1L0#<3EHNO2R-$)30yt`BwpvPdVBtnXyb66m zt9rAmzP%FdZ`)1m0zOuPY5L@o1d95<^XqdqT*qeTT@R02D^`tb(u-cgxj4wtEdyMW zxXf_FsaQU{-@9Pw{C?hHsFeI z=V`j_U9$SezmNJ-9254w-Td08Me?tfEKsc65WHuAU%tD%%!SUqOMGQPDjDdieyv5) zcc&yj(g@WdPpS~S*OF@omJH)m0GM=q+hXtch$Ox3Z|hG4>jW|Robuv4_)lhhU6@E8 z52~`8>kJvJZ<;*uk_2sa_LziSPiKGHR%AP7?I+LYZhTN$4?X(ow%+`T4|(*)&KGB~ z*@Q#lE%v4QL(eANfwlT%09C7;ghWJ@sK~x)y5^!b^aJD8MU#pOlU<1Mms#s{qYx6h zhRsEj+wj?i+iY)DZaR+lX@Q^KUp);D%QI5}zge}m+A8Zp7Ko^qT!%2;uS^v8K985y z*_c`l)NtZ`Fo~j_#tL&66<&Wnc5)5>{2-35ZJKspI_bV<^`<%m*r-%sf1T5q|PqKp)&0-tzyOcO=j)e7BLq)$|v^_}f z@0JEiOd!SNxIN?UyeDv34myiVVu*YFe$v{+gyyxZ&lL(9o11Gld4Z@9gL$iHG5S}P3 zuT?{Ijurx%32>{Ms8S1In6-WG>JoT7kh{z$z=O}T@!Fhz)>RN!W5(%t{K2-@Be9@o zsb&#oGDxYLtt&OGD~(M&u+@rxV|iedCCjCpv?s+7slZuP8J|{_&S|WY@><-!vRe;@ zJB>u+)6NCe^HOfedHGgKQnj80SQ9s7P7 zH-cau@H#CHTT(2GoCmHCSDH`=aUJ)e+OUcpCx|V{K93-Y+PNhh)PXkJ*xy z$;K^_pjrzcPaQdmwHL{0U4-4F3^%0+@VeafGzL>F6ognH!!)i0ty=xBYxL02)?K+k z7%DI^5`;Bg&OMFiFew6(lHvKe2(FwaiaczOx4k9d_~^qi42a{xER8;oBr@BlKj9D6 zo-PE7d_{NigG2*^mwtD3&S6GKA^N*{#E2ECT%H?pQ6k02OkUxqC1|mkDjp^ky^bx| zMagXqzuXQ6(mKMvk4NNvM=~u$?^670HkeQ^KGvAt!60KkG>)`CT#r987`P?|EhA4o zxlTT5{Nx;cIkk^fP1%z1wdE(IBMzWLa;By$LCt1aXxPQIeT2J7w=8nN;V3wpTI?ip z5m^!r-<#TXIBv|jB|hHzW*Ue8`X?ph^K;0iGdtmYYm6E7_BxNn+K!&Ko{1Fb74^s+ zV?!Zuk6;U7#0ZLQqSrW;6)sM0M3yQ@lsPsAfyiwx`b8^}-6oiLgCH0tMo^HI8#vm| zgz$m#iiu^6{;+jbjq+u(F1}B&aQE}?&RxG zmkNePpJ&sqWH_8-cA03|;p7kTO@E2Z%V_ECcV4IPzzOk-n&onyjI69|9Z3i}dD|8^n z;Dr9{SwkRve@fkWL(so$?{Y?7zkI1wKeisF$=ZxFd7fzXyav~tEJxaDJP`Dv@gto0 z!3js?OfZqemvDJ2d%lpmgujCqd{x>SEiQy>+?P9SGPwwE=p9n$oL*Ve>(u6&g%Uem zuPHW@sQb*<^f9C;6W0L2MovGzz5lyp5d0u8my4we(f`0feb`E<&kQ3ZIOuIsLeCZ_ zOg{L-y{EriTZh&4NSdCkk>GU3H9+%R&Enl~jwaEhk&-GQj9lS-BORD{XBuGwpaZ-a^95TwK)n zO+l(UrzEm#?cRt?2lO8DyAeD28~T9n3${ocm+N+jR}YR?kmB`^MR1sFyYi&3t$Z%f z66s-27f#IIQ@`XWUnhXCtJcG#n@hIC5Q#Rhc%v8ej6=W_`!V)u~we?AUhhUi|y}W!;dp zOU~7Fh!&hJF#mdzYV&cct=-LVHnSbuUO z_m7=Qck9(sKA5)ae1fm{FEch|vSx4_bCE!r0?i^SRZ+trBYW3lr3A5w*Ns`s%INQu z;5|2?Mo*+jV9H}7LtgE35QmLzRw8V2@p|V1)Q1ck{Jd@+_m>Y1AM4f=*ocu(C^;y*GPYs8Y?Fg?CRbO)> z5h9=Tp4xTxYjg~+e`)%-OtrbIC7Vt9Rq{AH8M=JmO_u%k*NkX*RaKE0k)}FBy=fOK zd`zx-X*98Qcp|o^#7&*%o^HA$;%5o7DyArWcyUq%XcL;S`!fCVysU1XbZVn7RZp*m zSIfwtkcnk0f_I+1S$L0&v{ust#(eJ=qQXYkr0#9cZ=J=INuM?=dcEJ|9H5jCR546* zF#bmgNF`1!Ka`)lR4gYX8*gp|%2M@?zJ0UyZWiHRMi61+@!2StvJp9mdh-IzlY)fZ zLZFw91wyksv-jdL`*@YA$Y6i(5Ja`hz)#I723ba~=D?>$@sRr?=WqkdSu?C{D9vpuCeS~2m! zOYpeWKrIvHbN-U82b5H^v>q+kZ7rQh$!T0?yUe8NAu8&)kjXgNZyB4r)Sq1AOd;)DJPsW3br@u1%Kx>HevkZaf6}))0B^49{Ppn*u$(f*ZlNeX%YQ-{=s!7VxyS=& zFwhFV$}B7`Qb}*~T{HFm8FM2CjjBgS1v-BiwA;{nm`b$AVKO?zUy*|wR10F2fX&O0IM`^_FTYgQ%)E*uk)zQ3H zV}|)>gwlfM;2Y$-6vw#r;}a?K$Fn7C!7&OBs~dlemTiox8-3x-otk2Se?(qIZ>==_ zqSi5tMXN|B7QbWVXd7yIkMOt_Q7RXC^{7eZawGb|rDPW=&7{>9jr-HAclv-4BsZhl z^AP1dA`{(1B*5ic=RkH-sm6v6h;F{)kS!e4D7J|JEAHHW*dR>Uf<25oB1dJb!LyA? z*33N`iMocrFSnM3S^@B$$MOBF*{O_uaybqUKqNEHS=bs3lm}*f6oj>&V0wOMxKsqa*s=(pk(cca zTg&cl2sss>_;I>K^mJCUm2Uqgb%g3XJ%h?o=D9aABbfRKIt)2GK>`PUep?&bIgb3U zlo(3S!H}X#Q5UR*I%5oEm1-H{xEI0>s5<*AqBS zsByLl+RQI}#zq56Ywi4bqw*1b* z8JZ^+NGX{6s`@s+KKwQo&L`W6DSf6i8z#C_P4j(Fd-k41(SE2<75a6{ky$N;7XDY+ zl{bA8jK`ohAS{HcBOCKHxxEwfb@CnF*0pQ-LQQB{>E<9|6ia_S|BHjVS8PkIUN}E> zXGgnE8FI&bz)|ii%~eGUZ;1kOJT+2u*>s1CYnn>LNwmG00Md?`V{FrEfr=L%PM27q zKs1NDXN6Z{;72@WjK-9<9RT!WIUN7bcq`pf50!HRW$5P}(3}SKtIF&1X?&b8=REgM zfo5wawB@h0^)Nd%k>$mN61z)Xr3NZ>aO3v&m3k>AtH^h;25gLv*T3MAI!fX}S@3;M zo#M?(UE06OH|-`U)$jVw(*?K!wnVk=seGw)WkZw^y;*R)1vN%EA5B&v``)9%;~SQ) zXlhneh3r$!-&PDYV@v7{$+>==b~riQoEG1Yn-DX5`#S#44h&o&ujddfEz1fwOsdsa z%cNyv=r^hR9pmtAA!WU}T^%78I(pG03J&?{rt#=|LUw5&%sc7iM=VXfW*Mbmd@GPi z9cL3$$Z1oUlg<{r-I`;=OjkbtB4lMchYJ3<5Sy}nx*&4T%DJI!@4_iZ`26+T3;$~< z{R(!wDrvJL@O;6~?sARrSInWOO#~K_Ff$AKaL1W6GQ^V5)JI6Ck_&sVG_p~E<)dAA z*7zt%EreX)D`B1aXJs5|>WgazLz6)M@7Z?q688AsK3xp$<#jYKDTp0z87k}ruPFTS zzP6L&HPkgC+lk`|s;daMO%RtAB5vXQ%mbrU35`|CoZ&+@+D}JFl=SKtV8aFpf z99dmY4t)Ei$ocYvNvD+Z>h+ZSE7gMsS>d6$#C5{1oHkqIoetArPhSJQO(Omt8J3X8 z9GS4iN;0ZfELfEU4S03M{l*#I)GfOcvD0GfaYHX#HnH+el&v1)SU3EB#&PL|Nxf|K z8z>Pdt2ekqhRmvJLMY-R@Oa(+<0{xymF)^^IkEUbRq*|z1J`}{u1xI@3=@C7m3M{I z!QGu+tQ#=`WFxqJMIK2^P)tg#E&^r7PY>S4tc7fXQC*$``B-^QZ4BGZZGJB;hM3i$ z#MIv$Kupg$b&|9RIE_^L{`6$xdh+*Ks0-tp>j~zXl>vi{C|7P?jzY!0CFK~`M41gf zm%CINJ~}0PC#-l=U|cpO9Od6*%>x1`(7f|!I9YWG&O0p?CusgPQmn-9o?(`^_qs8W z7~4;xjdbIDFgo-%b{$0HQ3$0+cOA<1Fn1{RbrM7dUQxGLvqq7RVu85z^}VPWGNs2} zx1ZkU%*<1e)TPpwOUyF|b;}M%hH%s0xzNG!m42&p=Z{-8i%nZKPWLBue`ce?>NTcI zpqfo&;V1Q1a-Ub`v#j39e7exzjAfurGD62jzG&wW7i{Eu9Im`!gst}83eKH60lIX0 zB0a(9enJhOp(n^wHt@MHfar0EDi6$fUlqCo0*fyVFTik0dqF#4vKfh0v34W3~XjRl{Y%px>$~L@@ma zRd{F^a4Vp#Gb*XSj>nHO_?Kf)6Q02rg-~(WdScy*(M?_b#!{hrbv4s$_p5{SafuQM z=coZ&%6)HRs;G-Klw_{Sj{}2_Og9xUOj(;8vAEX1|F_^i>q<$C(aPyDO| z0`NU|gAl1G_-0F2XmK0p<3AUBz8Xt!c2U`w%kLqiS3S3qLk^rnA-}TgKXDPPGLWkV zcG@{0)NpytYwiwEHR?2~KE>Z6KQ2S9F{Iw}{9Rb}HqSBWBq!WOg;lyi)nhFX`k)n8 zk-@nXV=pMk!+do20_Gh~|p4s=9fc<9}17Xcs_i(nupNv$qt+cnq@Qxu7J*Z1f zgnhqn>~zI`rOo0xN`4d)@l#tk>*N)z!VS}K@kHkhZ?uLu%waA$gTAD?TQI28b)$si zY(F$51b_^Dc=202Y&4u})69IFqaZOJsHcZ$K=YT`913K-n~+&(%p)MN68H3Oz$50Da1al6Kb|EGo5G=}0SC z^2CC^VG|5z3$GgISn5tYmTE+&rBN;|kaMj}(DS6~`#d30cf6*nJ7fd+Orqe!G#Ofg zdg_oaPr{89A4SvuT!^rl4EMB3e)9;5f3&O|PU7zSMfd~dkv&%S5LfwpHq5cHj7N!_ za72|5Fmoc$hAMDzihaO>1N^*A<7eN2&&kyyxXNMM9fH%JKjy|-EQa|S z>@}!`04-yKtYTBwhe9yn*Y*eTfl*+anTx;S??PYKQh2F)44(JZW!`O&lCgVRn^qqg zaX6e|s;CXD867I_THj8KM|<5g(G}g+`1v08-);#tiE$b@v)iKQB5u zyom9>B?HUbFc>WTD`_sWuD*fMH>Q<6sum_3Mu9EIZ{_{`IDU_@`hFR_k{UFFKFrBP zWR6ZK=6T`!8;w$o%u?uoQK(NS20#L6t~d&V)P=*%OoZ=SsD?!T$~0#$w*5~%Tb@Rf zN}a#?-`wALA{)^Tv3cLP((|8e5}i@Zlf=jF^y|<~^z+%&i%yECFLi4T55DF;|Cot$ z4)yXJZtTgUtWPJ!s;0*ZOKaV|K%yM+CC%#f3nY#Vo$M%xvLa2j*##T?{v&)$DdSj082Ez3zT8lo_`<1Em`YVU6_nX+CT+u;!FJw-8wY&uj>9uhZTXZ7% z_4+t`LcWH83xL_o9ztp{j3m7yAgmZ&Ihz0u(5%4}4X7jo;3EO>U*1;0Y1QR(erEUE zudloP_8SlR#bN1j{400ZJB=CLJ`iBC9sE)u_0u~+e!3r{U}LuoTt9iB#)}ANMLaO6EBB`P~26;BC_6|&ob;rIfk~* zWQ5uFN7~rjYVJnM@%B^OY)!uRyXkcMzsQ`G_5@FaUW6Zk+L z(%Zw0O%Ut`YIzq%Cs}qD4N?`t7Y4We72Sw3JK}7lJ);yRR#^` z5E36I4RdX9-BWeK@IrBXhH>0HgE{6&5F4A2u|QBJFxRdB0&yKmr2UqmR97$?snk$j z_=>doW9U)rg!yUPQRu~ zuDQ7--NjizjodQs-&!>-u>l8#`d~zGd$^H^_*$OUWM1#1m`z@4a@WW1)sP&4b=N6D zsxAsv>)V}(nu{6Iu#OR)C|i>>&F;gzUz||dA-dLfkUdQ2u;U!?Md);M47dtJx>z&Q z#633ja){#^E*L;#*k?f6ACYU5^J=y#i0#(|aYpKCFjI9;qJiUuB2`u(Y|2P2?((dA zr1W|rW=rG#8|&;o;)1v;W$ma=_Y*4W^Ue6P@j?Uc7$WGe#T;+xZJ;9yhAX0j~^RB4FR6Wzfnvu|$+JhPUuD;&5Fy9p^HDcHSLksEpk&$%fD~Uw%uTOd;Z)<_~#`UQl95bH;deD#qt0izNA8U;9D&fPcn~~r?kWSa;em@*;;@9@=LQ_vDn+9?>5kMEQ{DA zo8r(!m|pk2kK^xT499k{>CaI3u;I%sr!3yNhl>SN#)LDw~|S5$2N1p?EB2AGen&%XbW&wX?-$ z=B44QyDM%sau)|bwfLQlFoxIBbCyoSTBC3TIbk!L@K^Sy!K! z3WnI_%a4Y)=2>AXDzBT_t~Ye!=6YW*D9?vkt3L_ET~_l%F3#Ntg5Vu(KDXVuoh>_% z19jGnKxWsVoOatP+X@A845})`|KjT9?~e8vhc94I>iL znC+-+^%R2J9ciy@ay)QuQISxx5F;6C-2+ENii|j&C&uO8 zVPMCD-I8(aC*$l;DppCwP$LHPJDCoJy#6R?;H2`)3UFz5w5pHOnWSbg52oXl8TH=y z<4&pOS??VSE0eNm#;;-q2H3e~_!N&cGt1b>b-p`LwBzvdK43=J^E{=4v)rSQr(=Tn zD=+ydU55aj&tNV;O1US!3B}R->g*ms;lL)CE^N6Y4pXp9CsC0NGeW`%XQ>uU{E#Y6 zzRwe@Myl@0nl7%k9$DjYs*#P7m)k*>58>62Rj>1+< zAF2)Y8NC++OHX2>z@h!b>beP^XR2)4-@#JXymT2 z25)SikJQ43Sb8oT{=AYp#l)uTve#E;u5#3$^+y0?3DCJU9TIY%DRg{u^)}-?d~BKT zh0;HtL`fJznKp?TxE_O@EUh_(l_l}EbXqpo&6Dewn*9rrw-t-`|HsL>J}K+J<^24A z>iR!n_kTtk0sfPdi`&~d+uImAfA0DZLjIXg^k0IUn}dysi~BQP=wBTGZ*-y6nU#^^ug^8D! zi-nitQ!^-r}SF)1uaPTsH?q%osAKd&Ohx^~S`TqtD{~I^|r)&Nj zH~+T|_r4nA-kh^#2LFVOA&6 zW+7qwr|q9D=3(LZ4`Ba4lW_hevg-fDjoEqryC+ZDvat-D@>e2{sMAEh zE)-sP1doZUP*m6a6>7ijI?WXb2bafge#pb$UdRAkWuEj^H&&VLZ`W&yI=m^xGs13v zoV$Tsdi-THCXiK-OQ0TI>8fWzW6I5^Ih_7lP;^(Q_BgnHBQ_oPTB@bo)!9(StYleZ zH-9e8^D*`m9536lh3nmswxp`2%0Mxf7J+ z`7*BQwxoWUVDon<7^gvI*=N~G<|$PEsgvdxHxNx+6oC&XN>Dr;hZS7R%;O6w`$0ZN z1~m!{W2eo=|oHB(g|(zqoj0?1d;7Rsme6!$QnX=z8rCw}VT(O%J*Y z>@fly0xW*C2v^u8RA1k4v-wg~%<79eM>->TR!mFEX=~z4j+FvsBkptI}NlqL(_ZeP4fvH!+;o z`1e7|`7e_E|4zC8+lBgnE!+PY8~;xrYZH`1V|=Q;>Ud&NU~qVMix!&Ih2?F(qMaVO5~W}@a;BbBG^M=Hwa zQcGE+VtEN36QR-X+d5lObzi_DhBKLiMSrR z#&&6`V!zhf5nL^`FTw3i=yA=~^_|Cbz7N$$Pf?CqdCK)=*x$VrE``>+n+Dl}hy$B| z9!!27n$)etP0DbsuzDWf89@FS0t2WeQS&t438?pZSBEkeIe2mg0c@jq3v#T{`(l>Q z^FcYyG2hwMgt2~5vN=(*z@1L7Q3snOIVnq&g80~Sbi@u5LQOa|AsHn}>;z~@Q;PBS z@J10LtjW^=j@qNCy^TlAY@F9r9oy` zucnq7XZ31rZ5T(=w44b}M9Gccqb?}urG*Cl{JpTUU=;w=n&gH(xUdsY%dIklw>2aa z#oqWK`BN1rDiD+m(DK1`e@wtXg;X~ZN#!|e-bm?*j-BGaH-u9W=S%CiN?Q}9?lb_r)`>-m}mKNK!< zn>0JnCF&;byUDxZyP^2lUw8F=WIbfSvgW?eep$#UpP^9fOMpIf1F8w$XpDh|ur%Ii zw$h14=8=f9h#AZPSA0|+yfVHEyW{&#%&Y%BxL|644fqLk2E-ihf<2JMN1J7naS~A( z1)&xWRI(21WX^JVM1qB4%98iHM|b~$$;KRAYAHz_u@F`sefI#H#QyAJX&Np7b^jCP!v;&H?&M{B6eu&5Z& z+CznQafFA6$8z)AUa8ADwY63&N2%uIYJ%JK75wDw0tEliH6Xp6k+|Uhq5K*8mT5!n zo&aNz*O|D1#DA%_!)39GKW4kr19|(NDIiM}+VaNuNAM4%XUsX(y9smCf!{5KC*Inf z-dBGF<+r#ENW0X>R^`$CL8+Hfqy}^+0n!p-uyI;Ju0a&)>}Bwluk*#@Q)98&!kR?Evl;o z^aEWdw3ok-bt6J#{?vJ(=`rjBdMDtxBf+gfT#|B>LJ%b(a;mE%;3|<6N1rOeBok_{ zgqLOQ)`AuD1fo68u5@{TY4sNaE{?BzVpjey`>>@h;>)L?S5k{wsr;_GML(E}2a+bO z@^`O3VyhwD8R)9Z*Ze7sS3za(dM=(XjKgbz&0tIZidI8+7dLMaXEc8r7uc{{ke6r| ztd7>tZOeCG4?Ycu_>6KbkXAwZ;)%3ri+V?RyYNbe@sacojuvNfaPvjlO#7Ae&ksD= zq1!n5hm7P)j6Y#-N!j518w#+$%VXfa!+Q3?IO@yPmyPw13~5TU%f7LMYx3yn&VC@0`W?=oK-j%=UB#UOO+Pnbr;0ZEeLZ&*e$s3k16;vN=1o}0{HW= z0D?Ne2@BqwudFJTf;p0D;(5$P^vA%p_pZC!C3_f0=M(si6T==O6U^X0f9d@oxS`ZS zr^guD?Cx6|LPc^Jmrig65wgj?Rb{ik&S|o1XJ+A* zam!ZLR_182mw4S4|A2lWDb@1&K9vesS2aI?hJNn&Uz%d!EFO4C%WwNRgv=|GCp`^;sLF17$;yMt zd(uEF2P{y3vf~t7>%4pM#>IIVKi{>DjB z^%R``?;lWtnAN`(^=c(`51Gys%VNm8tPpuPcWz@{na@g$qstF=hSd(9`XfnV1eZp< z>DJbKT_5Byf4iksYQmA5Dr*XPW+;%R{MH$Dq0-Lo+K2XPdBh*xSTeEg=;*RlONzG- zmKF4oP?$E$V_i)&p!(H~6NAczNE$kjv^EGwA2ejcvKTSWnVq^Ajp71VRIFxAZOo;%2UIYL6+u9uxqF}8$)m$yzf zttWCq^#lY)+I2OUT_=_a-PM?M^$DEfzDdl#x+bxAnTEcg?3no0)fE87(IyY0Y3>r2 zJo|LIo1&ab(;5rQnh7vXOn;tbJ)+p}Y;N7izalXc&}C&12|44s06$x;;$1d9Yh@pP zvwC9F(BWgm&Y|DfQBjof09xOX@xdlK0_D>D4roxKD7Ei4nTsm-f7$QWv%2nA_RhN8J+c~H&w{ifw2ni_XnXe6fE~Ry5e(#SP zfT>Zfi~G|3p$2}S+Am7XCW*~X5tV!j@$i&$BrNG{7T^5g+h>5Swv*hxp`Kd*++mH4 zGe6?gcy7x_Z`e=4uELeZszO&$A^HKqims8(neP^9-?NsQru;CylOw;+pPrP^?i=jV zUbxPmoq0=m`HIWT6G4(rot1GvI409uGumDfLORNMXV=xq6cE6h22Spq5UI^^T+@`? zKMInO;umXVV37PddQc$?r4=_!o;*8ZnM8rxCy@;^EW{WVnh{@un(3}vQsY_I+^(|K zo0(smG{a&Q7w#v1P;y7Tal9^jQe|U!3o>Ub&KfPo45EJFnzT0d zN*Ja}Ro~6vsKp(&DHoAu8!HP!``(8}B2khY9xE}*f&2o^r#RjNF3^qmtNYO8ai%o% z*PI5|vvdkSaXaAz#@=f1T&B0|rL3h0^a*fylG5og7_Oe2~=aM$EWO z_piP$*EYclka$D!R^tZH7K9O*gD#&{r}OD#UO4DWyYpn7^J_iyi1>8h#YGjV_%A6oEvij5oL|GAtAFko^%Ly zvNE#SrllE0Hp4OmPhRX@)ga^L0Gb&rXU-zpV%f~2f8Sf zN{(417tINerfn*1e-VuQpxO7S+Ekp7TTw5o&!=nylFvv>zCM|f5G42H76&&4U2n8^ zKZ1t+E;GA90YxUz?wl0*^tAO@Xw{+LtYMLHmxJJ}cSR@Dpr3e22s~jg!N*)hz3dQ^ zoiLL@NS~jR?eY}pgVdj-)z(81nzvK)MU`9niFYoij=vQSsKIzrkJmr9D+JaL@Pi!f z+OTru^oL?}WcWQ#kmgJo7fK6{cU0!WZo1=SUR8GOt!lxqe1@Bl6abc#eDiE@TO;^&s_XbhIL5=C|)`&A4Ap z`}B$1y>k+35{jyCoJa4sZU^c{4~&o)Bq@9? zYy@-rhS48ZN5=4#oeT{%@T*ihiew0Qfz&aI;a5hnU`6upUn#``6Ue`WAoP**e33wb zE#?MpodJJY)c|rq%+t9{|4(b=Sm;NoZ>DISe2T7z`MP35NxTfrh<=$@~P*$7O(LK%ZSQwE=7-IwCM3 zrWKwLocP&Dkd7!FCW*j=5FK{v9e)P^8)hEn@l#tD#LBc0jR94D9{mzf>XeBDxE%4X zQ3(qVllTdmk0ZZi8l;?){5>cJIe7?jQRISB{3t|F2rpsWKWT};;Z7xlfV}6 z3)P5toU4G0UnlS_>xd?SZNNL|0c_l|fDW)b>H%upKEW{mCyWEifw6YgK1m$9K#X4} zz%5zavA`#U1I7Wuh(O#iKbSx^s5{t!Il@gBpmz8^MI7*sCz2802ya{$fhYf~KsLZC zY(2CQ*$7-5Z`@X`IvoDUK0&PiJK(_+K{xOV&4?{O9;iFWfqq;lKOTrXz=2QP8~-I} z8~A}uTnGQ<0#2n_*S;5*bo3c)(R51>OL>7;#uN%THgTvhrD z9FoujLriLhft&6Saa5?LAkxM;ZEG5}o#Gj#rM07#R#wyE7-GdVr{1Av)p%v)^XAg~+?W%z zu`E;zKL_ufUm7uKpm~~kpm=e}HEyw5Z8zbTS^ZCCPS{hh*&gJ6I+)CuK`h3G!WAMR z1BoJ?s~N|pSB}@BS3jNNkXdq-6^oOzH*BYu zv-b4GuBm1G`gCVmTXC6Mn>7$p6^|5WCa)(Cyt2R@kqY)E40YCK zZt|j>ESxY&!e{Jlf-{~>OiZI?4>vBUZB3|$8)jOSXf3@Ehu<03+o*YM-N2R;=*{)R zJ4HPo3g=Syn(I3=sqi#X-PbTO^jKq~S+ER;(6^M z)EpSgwB;&Uuvjc|5h)i_fyV3~3Wf{z`h<|~kB2p_WSHC+yU#~-$(bwaRI`5>zi!m6IxF z<(o$$eiar(0R&HL(#3B2SR~}me8GNCNe-e-p-i|pd;rwdC8Ao)f#ji#I4&wOvaze6 zi`|tbj02y*!i(pHecDeZqc&H;$e2Ek^j;F8TW7+8vf&`9FO+Oto7*;1=QD2|@G;fF zM4P2yrc|u#iiZD5+`X=tYIMVgvrmIN6**ZhVP*ATOPVAF?KS569ZrN-4}1WVocHmI z^uJ`DW%!Yq+WQ-libRj;f)o#H$TM>S3+cr{{AE`)ZEvu@rmqK=bIPLqi*Ewl`Elfs zJw$qS)e1qAXn%jxo|Bf5DA^EodY7;5z`q!KtF_4FHH)mda6u+5eZkH1rf8WpMmp1e zlUh~UI(qyp#?tM&;temq@kt?VsE~_=wc2Wu_#zTdoq>>=QDl1P0_Nlp=ERt$E{?JC z%m)Hp3`1(6M!1Rlokv)Fz%$>gP?LO$zoYb31;va;S`6Jum6|MwlR`6D-h29#@7Z-u zx+S}_Y0a0j`tvo$Inwpw_;SnV^@by$>vd|{mCmEJ;(B9fdG&kWMjz)2JWHt%k!5Yr zUV`J-!@TcB@;owQ8V;6RgQ8NBSggr(NF7iFEIbz2-5h_5_I6IhshN{-dz4Mvq~8W?2M^W7LGuX=U~SMPQ1E3ZaIA3zPbL2R$%-7g`RUm@ezygyb#yZ8F0yZ|`s zoy$<1loBiChjA%6fE;5)*}d1x&H*HG?@cAB0hp&ul*82@9h_D6 zg^`p2bP-2ZH4P8)gq@<2H-o`GRq8J(Ek)TxFn`)dEM+`|l(sR-)r;q?&V1t57J$qg zkjrYuLSj*7p<`a4*EIfsh92sId!~1!SIj_12kU~|xzefnHGC+}1z)qX=>;_1(XYLm zjMM5LT}8R&@>Yx!{8B15sm;M*S=trk8B2=um@-QS|0WBg3DWTuQZPOHpfEihf`y$s zq;KR>dyI-R`#5jvQmzLbbhwKtPX?7KA<#G0NCMT?qa=P;?=v3Q6?)-s9%<3{YN`pf-7SCuqhVqYq49bHs|e}wK|9vq>KGgQa%mxeh!h# z9OAKVqciq1ag_rH(b=QurBJInikqTbA_<*VeOO2{@6j_MXi14p(d@olV_;P3a2fZv z%cgm|&qu>1s!w-CWrM^1^kAE@&F7yfJ}t%59Fi4Bw9PsPFT=d~D~FV+0M<q@iB^jbDhgtrEZ9O0iXpa5Jxj z2^+Oxd*3IqeONFl9qh!`*7GRe#NK7e36-W>pMxsZwx}-tl-)D&Cn^X6wu|`349UsAeySocxNK9Zt6`@l7nVv~6!64*Vlpx)^c;anG zLS5c@J}uYPp3cqH16@gS3)y$xYE1tb?&V`)WPIn8*3F}+F@_t*t6f{J;!vrE%%K%U zBvApe`0(W#LQR*&6Br*6cjDEqNvC8Nv`WQ>krI^zh-8iyI(AUBWOw7M_m|PIEV*1c zQcf-a@X~lPV&~2AI~i+nQRUN@;T$%caWCEFN;aV*Zc(?#wfvVu>G>ACPOj+3ZfsYN z?ilF-QYx_Rh?5b_C_y*WR6r?tbVttoIlY3c1D`1(E|w<&<(PpR;6$^V5O>MJ{=VVx zf~L&iQW0F_t$AKFXRE)zIQGVAtSMZ*`}pgR#}brY%GBI_(U@;TtQInvs!<*$hB=H3 z2or`1N2kJNWhQTp-kz6r$^l*y6iMP)ZSx{H`` z$@)SWO?h=^WyPbQpgsGgpVJKtH=|6fQyAG&InAVp>=6>AsD#~ktyi-m3r9?hXEzqv+B-QoKA{eK|^I5VeD&h?=w zdiM9+7RhQmNMzkrywGSK8-azf6#XbObGMh;Z++)5He}yiLT62B%@0QVn(9Y3RdVKQ z(|;|exO~L?bcm+yGUtgpv#H(9Nu~(*90+m6wti!0orF=^sA5w9wvE!W)$9K=!kZ2Z zX?W^ZJ6IA;Eo~OtAR|0}!8gvf7Ghel(sIH2d0M_+QiIuQV=%dNyfm?%sohvr=yh`t zRG23aWq@puFX_u_H>nKXA`{LG!?CAOQdODRn9r+j%p5AfC+%+dRD(y1w6iQX&?(r*HoB9ib@k$ElHcI&3d#1!Z4_^=&g6SM4TbH zAmP!UR&iQ(T9J20oA*|Cmur)y7l@o%Ad?_=1b$qqVo7+;rKFklT!9{u=*z^U zuX47rfck+wHop8wtNwmn|31Hv#zbMhL)|a`Def$SU%jZ|{OBX0uZxLR zHShe;TH|*qBF8h0z2OBG+YOOui(uV|pAx&s$xcoj7@AM5TIyRi!D}OJe|umB-Zk3& zj{SwroY#8x`8c<@)qp?Za@I$a#=-%Qhheja$7A=l$8}VM)%59nzMUj}Go9dWq`~OF z5U+CEqIXqf**W=f--P=Pl3|1I9rYv8Hs*n}|1hKsQ-)F;(G+F4ZM_&@C7q}{ zo!hvcH0#)LSZ0X|S3+i*ij0cvxLMk*tkz}euFaWbI1`J8S}0xG#=))Hfk z8AhDg3&J&i;o;CET z9K@sCqJEN|11NZRdq!tbO(@!qB9e+hgGMZz(MP&JY?F@>*x*aLn;qQi%%tmyyDWnP zq8^N-)W3@;iU?zx;W||4s#6s)j3v`{+-Zi8%fvG0Qx#aJqFjH^X)`t7m`YQ`a~c{X z^_nxup&nD%Ql-017Og`5vVOVj%e;}Fl}@ts%SDIV3>ueORHB8A58}5pGbEC0>Dv>eA-8;bCFR4yV@7a76dyCq2 z7Hs+I^^xse5xXkN_!DZ{W3>*(=|jN{)}aBTj_)y_ceKJ6+0>WIH270=XvK;lezsmM zl;tR@OJ`fQVvxRT2Z5c6?q??AWD`5gR{YkSSmG3rAt9{H;~aLpHXrT!2%MXbdmjDS ziexx7y6aqeCO*1qoK+Jk`oQ2JndXP?tE>a-9}ZV|8dRt1ccTm%j_t!SeA9;L@h3~T zOfI(_fCeI4|HVR#SAJZg|Xd*wVq{UD?^OA z!Se|?(ZXE;lgv6rhU|pj2(aRXu@S8Kvp-@b%Tsb9YnZZ;4bQA- z8;X{+YB+TDGrtmKtOiW_1Qa}iUx*oZPhiv8FXd@#f?B(%6dY)NQ2i(4y4a!`3l#(D8Y@4BbZE(S2W!P#9(xw8k%8t?*?aCdF3x zCXJ4SnHUr1&wofb^3bi?<>$#1C@t<8F&%<#3-N=hudluNkgo@_2eyNH;#65l2i%@Z=@? z?m9s?`237mfH^U~atrIJN_VBHsZnQtHAeNil>Dq(gP?hPft@U4HX40oq>9&WZ*f|D z%v#0K+EjeLW?IwnmOUYua%SYD#HFFMlf83d%3`V0{0^z6zP`*s0XlZ_CoShgE*IZuU(`5@@B8;# z0BZ4dX4wNOgy(*N%M+#hF{V1G>4@PFYe_!n+>Y_TB)}q|Jm%iG@QgMdtW+Se% z>1OFjmchYBhEk-~fVRPsWA00~@*yhl2IC-VcR#o~!fOLFn^>pilrwkqrzF#iz&?b1 zYGBy?H%{Atm=0 zT7s0mz2Xp)g=*P+8V#SnPsd%BCbyPnILU}gx_*1S7|CAFg{CxbWx9@9FD^1kt2-96 zYIBT=C(^aG58-0os04TlhLCJs)crJ3o z_g@iga4MNCp37LtHk>VAuoGTYE@tAa`kf47b<}f0ocRQC$yeKr4HgOxJJ-UV{~4MP z%X!5>wU$ckJ&C|wf=|;`UF*G`iSlK-;1*(YQ&X9t?IxVAVJne7%Mc~2fGQDFid6(GloNYliql@dLlOZrg02AzCGxRLSdL;j2TPL5-x zcQdabE54eFGMk`kdulE{jYCmYMtVmUp3cJTJN;_SS&}4UyS19}r)@hR87)rR_c6^f;um_Pb~WhdwC~OA4?~0FEfVuxO!yU3{nM^jbC&SVDUf}VnE?CLHLTW!9TbG0GU}!T zfHJwS+Bmx*qa;D<(4vfU(dyM=>LuNGrzPs+PX5*h=o__b;G)(wP-yS519mb54l(m) z!M@i}X;I`ylpK#8T=9+jE}81o-dVgqF#We$;ITqt#VYz@r}}@7`2P|RyRNl$Y@WyZ z;&Q&vCSJG!({;YeW1GJ830y^oq_==Bv~BGhITU~m&(C=b*Qg>mX2JSJpa`Y^!7RTJ z_LKP7oiayqJuoAhLiV+pFR|D?)-RbJX*TKYi9=*dK`~_(Tf8h%_vQ4uYeDu5?$z=` z^AxK5(N}0u)ct0ociC;)E~gRQyU!P@opYo-u1K$-tAnU2UIq5FBqxHhUb8e!U0wu&jg)@fDVy!CBLA)KYk%Vx#x@BzVbk+JTFg0~d$ympL(I0O-t_2J ziWy3DwkMlXH7sp8a2HBgFcPJ!+pg@t_7lI)6)|h@2$=+K(qeSePe{K}7yc-R;Q3;$ zNWVW68&MN`m2VTF9L5|*AMM-{Gl8NAu$4khxB>s%>G+veWzl833$807o$C9R)3R~9 zvZp&Gp=`I`K}OU5R8QT#sF|@(O*;}1HDx&=os*6%d6n&|tBNSKTo;++ebO%3MHijX zY*D{>G@)*H*SC<46FOwHR59{vSfk3>^OwYG(Pn_SwH8O_>65R1NRvWp2jjN1hkPB7{2i_wCkmt!fnW+iKWus7qX! z%&Q~4KG=*c5AN|aYzj5+bo+Mnni{5QIa({V5_whRIZ}u3xnu*uL|=%ipk$KaGjM|P z-Gz4H$zG2nu+{Wpdmh2-I%C^=+Fyc$)%ruLBx?!bXX$W0t-Nfcj*z>P0AriXY-Ca3T4zddbEI z=?Oi|7xD&sefg17_VZ!p5Q1WgXh4&z1KpJao-`*2j2eOhHd52GU7i~$_Z%FcfA0{R zvw%sPjrc%9bj1ERLAVO{>TBs1u5lUYg+r1+Afsjt?8Y2HR%33l<8!J6MV5a(6wv^9 zT(!C7aoc@R8^S&=`s&m|dzkbay3Xe{RF@K78c;;OD-RhVcB0EQ924hV5#+N>>sh(tl% zG$NF@ze@sOE;L_UpD*Jpq)v}>%&%CMyTVA#DXJT!s)w!oG|%nz-*S8xj)h8Mn8Vw3 zo)CPSUy&1aw+IWrK9F9O08T;=Id5bzeg$*~KMAP9jh#c6B1ceQHc5@rXGZMO;;f!) zlprf8%z|;Ux-<3v9IM5@cFD&iO8bE?zUb%C0!-%jPQ43(TRrB{L23R^x6Fj7_827d;uvVr3~aQ zoRcrR)Y&XO@O~E;j!Ar2a!@}`l&=ZG4qX_aX0&zH}sK9yc*-zUsiX75*eUkn%^` z)(K4-3E8TjIk=niWz5b7&+E2LYwo}@3Gr$Xgu?YDJwOq}9{0B-ux2Hsfs@}4eCzi%WskbJBmJ@BTpTATz1)aZ|c`nfFi;tlatP$Zc18Zeif3Y`{&aW$9krTKQNV-z^ zY!<8$Q9FdS*G5QTNxTlVM{{=3F1Z!RT10b()d{v8?t*{00GnV7oI*7KttNR6d?)DI z8E;P;C8!gQT;*e11z zD`aN)qDP&Q5zKedC%ozX45TkEu;cUq_<-SoaDU3p=@aYbaK+#WiYW-4B|edNLOM?w zkQK`#n=h5PDth5v)NfR5a6RpK=ywm@ zm<%xq-FLUJ6Je$ zRosk+jG*KUq`2{M3M8jyp~2LhWQ1J(T|fd64Mj}K0N61zGY#>kIwgBxi6qD|y-m4| z-*pU9-Yh0gd|yKmeG0Yk_i@6u+>i!l5AkrOFwM-O-ufmi2!qKUwl|1I;di3 zk|KdvfN15VC|AG0&UFHIa9&Y*H~oXaJ4KCt3dWXQ$DaW|2kcn{e11gfSmCF0m$)C56KE1yfg-d-b_?fuPa zD#B@|`|8+_f4~)JZw9#BhY|acSpA z$ww)?v|zLmj({bDl!U`6zmTZu%&fr`z}vYK>x#BO9pb_=IWx7zkA@!@0p_*>uC!&M zWoRQV0;gj7L~R>YcC7R+&G8dxKiJ22B+1jkFu_u3DI`o9&C(kH*X0W_?vZE(1dp45 z5tp>JWa`xvnp!t5Eu3bjTVO`<@Q|VL;$VCV3-kB!>Ws3 zswz<_D>ASOSK&0pH{BW1bJ()c;Xu<2=zADrc^FF+yh2d$W0Q5`v=K*L;K+l<)&S?~ z5o{1R^k4i2>JK88EbpWe^|X`yEs@7GKudbnS!X!8mzG)S$s>R_i)bW^<5+_tVgU1+ z_0zSnG^G1U-$dqqK`QT!wdY4U*>MX80&VH=kndeINy84qKyb7yv%oNslSzEru86(VI* z|3(jp#KRi_(zzuJtA|(#tRyyxCM$@18$D=oR`V!XVoYZ=Fe#y?Mrtjb(xr7!553~T6`W$#oOvGS_<7}0g8)-4evr6)TwVx3xxMgDOWLzg**Qz9LJ@K3Wt9YQg19#ODAK=>M_F=Rm$ccqT~ca$ou|l% zwF$U@8KjK(z(S(2z-gr*rtXXisc5AZB}RdPp)S}|b6*wAhB1+#OPGL51KYqB``yp5 zg3>Aom*@sx30+VJngu5Sx7A*CAlL!efp=y!Cw|5G0DIvB(4X*{Ko4|`vdx1#vk7ei zcs&7P82Q;E&cN!WNekfyKlD$a3I!X+^idx%lKXI=Ecd$t>XG;bgVBXn)22}B!bRKo zDPZR<2tM-y>&=Kxc$_$t+;=Iwvc}n(&?ISrlfYhI7%hcsrY^S5)Fse3Rq8?EN-`xc z&`q6G>L$&~Z)eY{Y^QH3Tv=0?>7$v@mAW!#nHjWaY*`o_nJg^lR1Skauq`k-V>Ug2 zHa#2B@DhA18YwMf>@gj+QMSd=R-M#)tY{$E+1@6s%;5>K3asDho_cb33S)(_+$5P%A@e>tC2TpYA|(lm zqy&nKk|9^FrnvP|4cl*VdvNnDu*92La0W$ND76s>i98ZH30&GHKdrt)1y<$WW3X@SL42txq$fyKGM1t{%9FS45QNB^3QEp*LVL@S0VP0WzVSZs@VeXHT z9|Z%7d*pROuMz|yNX0<|40-_wdMI{ie3|-|?>Dc3k~hz6&Q)09zpM=sU1IXMWp2I>7H>*OWV` zJ>z~U0CYe;RTcJ~0MDp9?mdov^M1NO*`VCO&!9WzJ&Jy0fU*EGfOz2VIM?57vK!KG zSpYmxZXh=dJDh8T9Sl8;Yl0n9KX8C2;259~fIvTj9o`w|+DDf=dq62sDdU4Gbr zo&X(y9p6nJwo-7MWnh=@UJ!hHOaPtv!Yxx?v$4F9TphF@85_q zErZSi#BEe!H=np%a`Ilr^ms7=t^;f;U~dEj2|XAfa__mQs4N~H9?SQYH*Vj*wI!y~ zwz9Hxx9`V3*I(+>xxetRfRrBsvI)RChvwIzaYl)QJx0KPdx}@4h=}{+$ujSaT-k2u zZ^4Dqa!CAKJxNC%@gvK3E9@x~3s1N~VqjG?pNwiTG11sp*P%6a!yMN~BNn0XD?#4p z4rjx%qWm@4KkCYaVY1o2OuGL>kSrWrO0aH;F}ZbbaxdmD3&mr4FX}1%iP(SQxZ*i> z#r7KY+v=Hd^$$z6ezI8F};*?5*AfP0+7efS? zFow5|uK)DG6Rj5g_QCYs;1u5IkdAKDa*O$Tnc@mMwl`-+5UI&3_*XC~nF&if$kHIx z%5Z(^#~$MGNm>S;aR^@qD#T{czR=b6V*{U2Ol)$3FIOIbP^xuGkpHHNo$KSDCD_GqgrjxmoPgLx?qVkcCqA-bEY_o1X5KmDZEW$wfPi?gO3CYKSmmg<)^bGAkanX&%^?D#U zyk1XB5-C;A-AmsncB-*scaq)G`0F}!I{yx>kY0HAFI)XprZ`sCRo24O0Kkn2mC7?-;J zO67eKR|Q4p&|uIOKodHd_gF-jrGL)72PP0;fO)r{?CpYZQ>Y`TiRa%2=c;$M-Yjk9 z4rW0jDe-i#B%+qzD#s^xT>JBtp2ysIm&pLkYpU^zj-{sjD$9|)z&a)piApd+CqaWRCS4K$_K6( z2dEeqUKX*k0lE}sE$p|%zhB+4zo`zb1tRGKt6Zl;*b==FYsqi=+5LV`!gM z73sPCD5!B0`C)tVpm1!;7o)XZ({9v}CM8k5h~<*PxQ7wm-T1xp)*B&!3CWm`&}lq3 z*X$8$IRt#B(K0PPnm%9GXehH3Wtp@22Wnx$zJ1J=w)-F=eW8+4tY)ly<0D->kAWP1 zm?QD_te!FQu5(q9k}JaG;)F7(%sGOql65($JSu7rM=YnS)aY`n8YsU&wU;#@p_>{? zGX(LSY22S~m*J_6oXUup6zcaCb3E=5)=%H-J}rZ;9k_{VE%X+u#?z0tZFp;kT6lX^ z@22n$3;cWGPh)UbjazN1esAMab<_kZc9Mja_S+-W6bB zRujjO06vVakjhk(+DsHy5KAK9D)?CCGcl#!Y|4G+@NjVkyBt)S{aB_r41*1Ob5iCGamxxNl9Qn1MqwJID^3i5Y>lWk2&XrmtzeYi7u?#3y1YttQ zCx1l+8`@B5Wi;m~TBovS4H1YH2)5 z77!ZWyE}f)0e4;1Y$q(by;Nou#8zWitF>}BS?Mpgmrbi%xl8k$sRn|L4fF0JY{%x$ zfHY=W>|ihk%-E}_9NG2t+27!tNkq7dBJWtXtraE`MOuW-kuZe8=+UkYP@Txc!w&Ip zw(V+G$Sr3BX-078MB@;JVS~sYI+#z|J=dZ3H>o)q$>b5&+NJSP(;q?g16A;B!67^& zA*dy0PoM|UzA@J@Y_rtqBa6L_KpkBsODc||QEO&BVZ}GQeo(4`at-kwqMUE{bl2q9 z<3-0VuZG-EGN=^45ziTRoknr`Z7mFQ?dcoF#(FCnS@v0CyWGYH%&H#6iZNY~2o7mE z;UcU^ld`3G0G=(&02ob<8fTojsPKMdB!9beD7TeQM!Kr)W~)+;Y%ZwQlvC00J1tN-f|wYj(NucBf??5U z>mib6NrQL&V@OZve;s6$xVd#imunzCKa#^~Jn%aVRgtc|ev#gVpO4)K8Z*YVH*I;|a%Sr%WUchW@DWy2#_2jV(J{@xzek|dt>{A+!^;xAsrtE(ZiWcqwb)gbZ2 zenuqiiXmct6#AI_ZStI zwedU1?!~xiU5Y+;gGV#&a6j?*3fr#hp5TO6ftlyOB{@^h0eDCWLR#e$emvBO^cZigSZ1C&z63h_WLgNSAm; z*Ce$V9~~u0FNmB#-x4Y%En*rDKD)H(8c&t$(sLh3)Dd+_QiUC?fOoFq$lToNwwTI{ z2`H3fQx9HbDnb0hQV&kgpa^hQ_4EY8E_$L;jn)+(Y@fSSCO$= zUss_oT1&Vv{S=DU&0Xfxes*AAo6<4NYlI)NXdH5Pc%c-Nb7*30w2Hh#=@w`va2XWD z?R(pCIlwk|t`3Q*l@1FR0+}yBhh=s6+UR$`@hc8Jj8v9gHAiSGUYZx9x*B9ILfRl@73+|`bXpQZ4oNCWcB zOUma?0wxV_THDV+8j3m+P01nT1^Lk*Q%o`9qf6w3<9J6kSP493DOR?FfM4fQcLxWW z3G?F2D4C4V`8EcSOuNl^K$2o2PeI>SnY3OAT41mOiZK*phBO+8H0^%3a11)Ob&Sb% z`}_?AQ<>i}`gU3b_?B@o1P9p;*^YBrK#L-e^lwi#^7RCn z(=iz5sZ)?vR9|K`GtHwUKA*ask8HqawV0hMiPG7yvUCje9H=hir=pQ`4&$7v?A$mRzMRn(?QaG~{i95r zKj(VQQ_#l^ZM=T~E9=9^^o+R{pj0ZFrLq|ZQz{`g81S;vvnpY5)TSt!NGG>aTg`)+ z$etr`Gf8YHgE%F!xKc$OB@>*}8rD*1CLyZY36A2XD1EP4eiaudPuwB?euu98GTA>EKP;m#k4L5>vvr#)1?tNnim=i>{=M5qf40HKeXlL{42K~6MC|W7vOIJmAucqpXN=;(L=~DS9>iLEy^*Ght+$I9Wuxz7tRc58zMr+#-wsYxx z5WO3kCv)l~HZ|@lY{Qve7aRL$#_HjEWjJ4^LZ~5AgK%`x!Oja0&r?##eAdc8P>2Oz zI0j_4u2VCvq3q_$Ox*~=tepKf@590O=f=K{MrjAJIV$8FQ4igGVls{A3;#Siv&p|Rmi7)JJs^0txbj_2?= z<>5Gwzn6qpOrp2djW?`LHBcQ$Nkk_eUusvOSggB>_mA1&bq}b|$x?tnzdfrJW z2|O+~X#7BORvH zQg0sZ7Ck`645Vnssk1c#H4r_Aw9W2#i;-BTvmco2AgV`9RHOkYd|!RC2sq!`uL;J$ z5P!Dz7&98xOw=YPNyPOH9^mD&HPtciNYxv!&eExCCC@Y3m*vvDtg^ytKX|30QFjiW zUmI6+P{GwfR@KrWtR<<m-GKv9s(ThnTK z`BRSe#W(81PWJH{b0o$k=-4{|uh}eXkZ9SQ<=bac(KNoxP}<0usPLfc09~<+QGPzF zyX~Dref<(fXllxL3g0cq_%hw49Z~}~7nu-gak$|PQw@2fg;ot#8Py3La$zOiYpkBt z@h9JEGVf{M-f7DY(TAHishgiH17XC~75(&nEvi{lb??~KP*UV%;4-eHk%dpj7Qp=2 zUslqP4}fYLA(?d*tAC#zrr3tr;(;?u8`?%C($I*}Q|p(Et})oM9C1ZVDvkT1ZQ=y~ z?ZmaTH*-AAA0mr{Rs3!iGRY2Ro(`dt-4WrJo9wWR>c<7uL`%j@HW^XcAu@b}<`|ti zec?Y*!Bk7qgDLTgns61aOe3g)KFG=A1p73}7)_(4%6i{trE>K*D6kBaJi1hf1sUBO z5+|-_R%+th&s7aFWfsz4(RgZ9!k`NJN|nou8J(L%Dxw99Mqdl@IXVv%fEzNv0}c)k zX}({u$-7#J$3Kcz57e6Nc+Zay&_PeV_quD)PuC{t8QXe_tC1tOg3b(lQjPR9A3v4#tB+u z!Y)%*E{Nf{IA<&16mxf5D2?sea7rbu29RwXU$m1~vw;7uBdz`X`|hU22e182_E|Ph zR);4B*i;Z`3ui5$dmtycx<@qxZzE~x-`f~iW~KZhv>v?@8|N=2;&AaPMCK{>9tDd z?$*6{|DjCs7UG-(TJEW9b4940$Fp!#N@%D2@weO7*qs8#Rhfd2M0u z&8*+jDDj}gT{IiUJT2@x(blPz)cUSBN}sUVKxaOV`J8!A;nJ7k)~B>hcF+#{y@&Dd zrVPgGyKO~Gkk9X&%7vFaljg^(*VM@Nsf&K7ryn5#w@EmOkS-vk!QvsjmpH`Uox-m> zvR~{@n++X8-LPGF9>}jftd$|W*Y_XsTr0Sp&}DLx2UVds6OqQKjBZM)$02o}rTNkK z)%ZyE;5>0g=uz2tPup`ZneQ;KG{NEONKX?bt=^T9n_iFVWN_R_oaepMTc5Np+oNFc zo42p32ixRRg7a<6G5*fU#6u$-D!t|O1x86wBE9t%Iz@5a{RVLE-X(nQeW6S_*Pb8o z+c_|E0pag(fK=OqWrAlGaLYkUBs=~+yWctLZXAt(r;C11q4AxKwIDJ(2CJz1agWT> zUI%JZ_fKh#H&T(UcvYhAr#MzeZR+0D2JrormkU$)p@zwcMNh#Y9*DdzK;>dTpZ`i2 zF|siK&p|%P{{-^=n?3Y@qkOF2KGFXZHz@<-w@{Ov@!Ko`$b!NOARSv=FsNG+puv=exIZ|c#WrQ~I(fi$a4Mzk}o%D;> zRC!y1?;wsfH$iz{99z%4GyFDz<`$tQkH$fjYW}sO;Rv35)ko>(16!e|x6th)QeK_1 zT_xm#w4#sx?G4EzCK4bgapEM5s0pJb$>%v8qk5nXE9ZHjl)Apx$Xd=N5=kd;5j>7a ziojGKR@+Ea7gUEoNW&==BZN2lKF+AS=8+~?ju?$uL|*0c6rYp=b=y?=XeTWhhTa&6`sn4F@wE?|`iI#O z-?2WF$b8ck+I@i6PFDAEnV|HSwX&$->1Ny#cmO?Ar6iM(fISTBGTJtis^e0D0f^?e@cMa$7# zOyT8>(7Jj4;jl`f0t&^JR!+*`y1X#@Q&-!qKZr1es)llT|-{zr*3od+gDe7}mpJqj~8tq)|oJZwz zc5hYJ$d1TuA9^w4mNVE^sZA}K)NUcJk?N>@|44qcSDzzE@J)1# z?J~3Ne8e#AO686=*EHcqsd27~`TEKU%C|;SrN&) z7`%>Tpo1lEsQAzMr0KJf8jbtJVRwJTu{fYsU<~lNOUNklX+HX@P(dEhaUeR z>uh&}QNE7&r8&F(L*8bkYpmyjy&N@dN7Bnn)(bp0zT3}#*Ltgf(b=R(4%OY9mL7i1 z@qV6s6azl9qeIm#yG1!CgYBvg7Q4QUGZPr9ZcUDTJs&hbma}P6^}Bp=&(lJa^~9(_ z>*_|HDb-zv&n;}6WBTGOFW*r4^iJQsG%uI)9m3lupIz7Ma&o#-d@Rrzi4HwNHcI~- zS$5s};QFfjzy728HS)P++w^Le5{=9&5q51V!xfo^Sra=QHLtYJZ1+x9EiAR<9Bj`| zsTXxh8T>K!>FZqMG4a%-+-~n-8Ig}STx1%n`>dnvhp=;lpQZ;KI#T;fHS^z2G_QA} zYGrjaPKFvvHyh;J%RNXN7Hh3(ZnPar%pCb)z$um>KkZc6|1PrLj(@7(ad6|2`hn_I z^W`~QP3$r$Ma^-qDvqSdO|8;0soswp?!P-OCUMfm#_$E-%|w3Ioq7W@pL8$E9la@P*hSeys)cW?iAiw@GLd0!85O>=Q#X6&VAe}*Imef3k6a5i zlFuvc7E(z_v+}J^c$JjIF~Dr1s8zC7Ov0@EQ=CxG^xa`s8GbrsOe#-i@5y8Dxz676n7t>6y(gXBpvupx z#1EmJx?mA~vHsAA+f9|@>-KqW`fvZIbmx23%A+55toWb#abMV9ovf7BurR^#KSOYZ z`-1Sp3$1P|g0FYBGiDhGVcC`Rf1gwN`-I9;GQITFfb3FQFQ+7^AVsygY^?Xsj2{12 zLQf`8gceWTpgo{>;|pT(7$G78o+DU#XaGkffj0%9b2%jNM!+KMUq3T|MX=!9&0-=C z?lc!7p?z?JhX;NEf6=8sl(U$s(53Oq?nwW8uuDVyd&nT^S-f1GMfkXmwE>#qa|{1w z=>LluJeEWOCor%W3s4^O?avQeuODeD$Qa6fF>*iK{%YFhM?yI{aoD0Rjtq%f*OQM}FK&$A zs><`B)ouRNgvWF}Z{0_0+pdwBSf?XxtLKmNwCe9M^tjv~jrVg4KU60Qo# ziBfeLl^;u4mdh^Jhrgzl;n>V?^x!|8T`wLHqtC7vCu_Ne5|3mrXhc<7{oow1&QqSSHFR!+zI3O7PrERcbL7~5DYfQ@jfnFZAx zHFxmH$bX=8$ls%Jh#ZcbRbp@4{%-m(OE8!2?Yk`=2Zr07GlF)AF^T%mO2?hGjQAed zWQGjanFr4+@IB-$d0^8OSC}b2!FJg@s~4^&Xe6a;NZW>q!57+Q1&>RqjXiv!o)ccI~q~cl*%A8&Autc{h(< zQBV(ZiU^-~?Nd0uDftw?!REL7JuiLHOW&Y>n;3_LPA2aYbro<+JtbLgE}xZkr`1$t zTr88R-e(tkK(xR?{J}uVeW5jdRsn4rpE>K;6$Uq+xz}@jLt*el4YLog&t}nm?tWb2 z$39zp&&tw}xfQ72(i&_h8nF2p|CzUK`t4GxetqRNX5tUN1?(&L=qt zeRYbT)2iui7@OjLA)5~ayEbO@8I{zn+xzP5=`F`B#ZOajypGnQ-PBMFNN}i-+W0!- zREk}CEH|g2XGFKVWR<)KN52a*4X+oymlP2E_(yi*A)DOT2^=4(Dyl8dVx%Z`eR!r& zq=wt{^uVjHqL*(CaHPbHO-{b#=GLp*zJn`7XIts+gZtO_uW9H}pS|JM{iMU~M`zEl zxE8jq#aO$3WN+BItVkx}*}Fw`Y<5ktM*>M=Hp=4nGCt0)4eMwrn7Ot)A#Q?Z)3~L* z)G(*%;QmY9o*No;M4s@MY+0!mqs`gRJk=mvE#Bg1GaEfc(sIk5Jnh2w>fPj@DrK9} zy`M^NUdP8RyxQjJppm@z&o znVDUbH`NBO;vTx#<*K)L25k6p&S#rzd05)jin^3Aq0|RKQ)^YWB6hM|_3oON%~StS z;jf#kRSZbmQn{tB!K?AvDjHvM+=UBS(gz+?m8Eg*o#0Ffv2RIAK9=-gM%uM|`-!GX zl6IS$qFkxw#q%reN;NNZ9q&g3qgCoer&f1N)=xP$-Eq!x9Sc`}8m@9YUc{zCDM;jV zAEGnsnIlBhK~DgDJ}X8_I1)_7o^c`Evj z!`EY1X^o-5CdWPEG#;+=QD_ma=oyWZ|6wh0&H3|;z5JL=bv@fRVLSD+InmYmIV`K5 zt_*r{cnG66ehg`qc==?zkw9{BC~n8VS$9iG!%sbXrX0?USCt*A-|+Cpn#}m@agC}{ z3aQS4NBax`a~S#O84rcFkKFwgpo`_&q9hNkSkS?k|X z8XtAU5zDIG+q|-LhRst~(QG49Xs5eY4@E`swzu=$p3)|2P;I9Zb>yl=dBVcd>L%h> z(cTtcufOy*6MM`sbMM{Hm0lNP2mO9d)6A->)HbT>CpgT%FDth6&!%W~PuPoa8@x7c zbh1kM>XpG2WxT0v!SJbr=kB;PJ~;>Pxe?d8>a}^|*Mq#)YQA@565aQF^+HjvChNeg z_Qcc2`-DwH_}RHb?_}?gbGFH|+}?bfv~S(%C%B!7v+)DGN3f^o>`#vcX{fnBn7p}` z6hj**l)e>eS*7zyuswj)M7p}{XRor)rG_J~J$Y?=uiJ99K3reK(HLcMNX&78tG13J z5)!N$H#yXpB_m>W-@l30X zGlP^w+=r=G

yN%x!iGuXa%T5}(k}a7JE-iFm?mo1n;4tF$`5NTf%U2dk8=5x>%P zT#p6s&6l~cMimK#%^|NS)x|6TBX_jV8f0irH}5zS|U~OOl&E?q*w?uH3e&wpYKV zo#*@gwsyCnT>(?KvQAfOmdnX|Yd+xbQoK~CGZv&2!so}IU1yNlt)6R~hCN%)7BtE4 z(Wz7rz~4A18FDyQUzDTDDc|_UA+7b^ScCoLUJ{n4QmK3ypPyXpevUIbC(-lJ zIkml!nW>dF!J2dNM2N`~?b?t_=bbff6g}}}T3K7Fnq~0LG}BZ@re135FyYmF_h_dU zUg=G?wXyx%Q76rwa=Qn6U+a4a!FE!^9uMIVuZYjvlXj;L3j@ox6tRef$eVs3HVJmnCqqBaB zJUSuOj2S+_b9)<~R@>faCt)N`YyIkU+vGcPt;}`XjP9hxMkt2t%gg62(ckQz9NCdN zuKZzO!&4>iyX{KNIZyCf`!Yg(L#$H{v930{ekdYV1vjkQ^0vn2N|N1Cg3!8n`f|THG&7l*9mMS5!Nm5eb0~gzcQ`b&D zGCpuKS^q<(M#{|OTuZ&ff}LT<`=7m^AJH7ApFYRYhTm`cCZ6;UeW{-Vxx7cd8+^9X z=oX2}>y_uVUCGhYTevsywSbdWB~!-tf8<>}@5`|u@os+OoWk9kDy~N~&KwRgX0xz< zcJ8(B$`9THKVE#^{g&1%)S7$SBZ;%~Ba7GuXDdH`SJq2$4IYC(4i+E)aQ>z_s6%o9hGT~1t9^s;aJo@jX9L0DG!n~c{~&G&&owpQ>#;1z|U zYmb^%)<4vK_En>+JEOrj^?HPI-oq!d)Cv3T?d&Bq59fy20UwsMj&J^nqw0cb>P4%n zQ__zjx2N_tldV?MY+Ld*9jMW5Jke?+DIPqQyicfhPW8JZr&?T0X}x5{blhY%xr^(y zL$xn;JL{#q2a+87>|+SsN4F-E|zAsj@PE5$fFe=Tj zyXA|~*2XZ6BP~sEI>IljVnKC85JD(Kw9wMW zuz)m6GaG@UT5-o;YM!D^a@2evre`#CY^L{|d*`BOH!iZ4nb<$C#(fb~M+l z%Z<_tv&r%t_1>!Jc?Q2xjiXkMCUUipSz?E2N6VMomm^!P<*%kOJrbI+Fw@H`pkM_?#TOu)4L1Lm);RAZe?4| zh1_ivUx7E@^t-9}?Tr(EFQP18H^+Pv}_usz0$53@Ap` z{`o&Lg#?y|$mqWA($kj9cB3%M>g9h~!GnEcbQMQV$HvOofm=fetefB$HKm;D3j{!1Evt;uI=HO*m6BY+6149-Rjl=?6%C8@8B7uM-a2s;}1;YX} zMt``?mtYj|g9q_wISl-c#e%p6!_bw!N!1(h^N4KSR6uv`oodI zwOaK2VE_!gOHAJ%j!ectyr4_r2yo1FbipgS;13-Piy`4CSbDlZq*wwZd+_lE9s|!2 z9)p0llN#F#7SLZ7EoIY(Rnt10GOudKlmYPl50NjDUrB z0W`39204S?aIpNqWn~DxtkAY35(15fmfoy zxIM5`lk#WKSZ20YiF4 zB2n;o`f;Fi$uJG@Tslw=I$i)C9ynP#7`Psd1iqz$VPs%%bTG6ngYE)WuxR?YOs_B~ zd$2wO7zy@a07hXLFNJ_W^8xw;br>|i07gb&Sy6~E&!8<42gf%6Bcd11(eq5Cz&c5y zkg(txdI*mMRGI-s!qM}B%8HEEh|6h^fnGs#1n>w9ahi+)?gbhzfKebDMBC!gii3^@ zs1xAvqG|xeWDufoV8xP#7Vt-8lpjI>8`>}p0)y_75dv^_Fdh+uVc?I5KyeGDi--Lh z837+HLUs%!)4|EX5 z1BkF(@c7@v01}x&|3Eeg@dA7<5eMgH00s<>jxLZ38I<32Fwhn_4p^pWl1|?i16Cyv zNTz6i1Pml+3W&f^j0az?fVrf{L)(&JU84Z;LHP#2qktP{>3ODrZ`$Ep8Nh%?rNcwp zf^>=w1|T4a(vJk0tF!TJnfz@gH~57be>xaj4K0jfrVbsWGb zu#SUJhCq1^XbbcdvJU_QRS8TNR3(tj0L^9aF961n>!BJB=TczCF?f1g0Am9wH7sWk z0T^_H^qaAPFQ5sG{gD{#0|ZC(^OWVji2^JEf&pGYHAydL;75R5pm>g6^bF}U2Ec$~ z(a}Kr!@)5b3zp^3E2`;up@2k|LH3OGDwtdn6sJ)dL>z+*K&cCj1K^P%9|qcj{FGh~ zP#E?%ybu`jNic1UFd`1J8PFfdujq9Mz(^!0ZlN$>nsjmjFfzhO1Elw`oN+*!Fbxn_ zq1qH(djJIuL<5HaI0OSlB4i)H5I}fizyn1Sy`2L*aBc{m50LFbz8&DZtl{g8i z5m6X$ZS-w{;DIB8VR$%Rg0=)aV}AsYbir*2aE*i3-5?S|cnDBlxGjPOi6R3Y8P-1_ z00!Lv9ucHTP+J@b70_IQdK#{s(Hfh~z#pDrJqOLH5!j9Z430ej56mw#N5F@{c_Ql9 zfJ1t<8 literal 0 HcmV?d00001 diff --git a/doc/design-docs/VTGateBuffering.md b/doc/design-docs/VTGateBuffering.md new file mode 100644 index 00000000000..9155929ea49 --- /dev/null +++ b/doc/design-docs/VTGateBuffering.md @@ -0,0 +1,63 @@ +# Adding buffering to VTGate while switching traffic during a movetables operation + +## Current buffering support in VTGate + +VTGate currently supports buffering of queries during reparenting and resharding operations. This is done by buffering +the failing queries in the tablet gateway layer in vtgate. When a query fails, the reason for the failure is checked, to +see if is due to one of these. + +To assist in diagnosing the root cause a _KeyspaceEventWatcher_ (aka *KEW*) was introduced. This watches the +SrvKeyspace (in a goroutine): if there is a change to the keyspace partitions in the topo it is considered that there is +a resharding operation in progress. The buffering logic subscribes to the keyspace event watcher. + +Otherwise, if there are no tables to serve from, based on the health check results, it is assumed that there is a +cluster event where either the primary is being reparented or if the cluster is being restarted and all tablets are in +the process of starting up. + +If either of these occurs, the _consistent_ flag is set to false for that keyspace. When that happens the keyspace +watcher checks, on every SrvKeyspace update, if the event has got resolved. This can happen when tablets are now +available (in case of a cluster event) or if the partition information indicates that resharding is complete. + +When that happens. the keyspace event watcher publishes an event that the keyspace is now consistent. The buffers are +then drained and the queries retried by the tablet gateway. + +## Adding buffering support for MoveTables + +### Background + +MoveTables does not affect the entire keyspace, just the tables being moved. Even if all tables are being moved there is +no change in existing keyspace or shard configurations. So the KEW doesn't detect a cluster event since the tablets are +still available and shard partitions are unchanged. + +MoveTables moves tables from one keyspace to another. There are two flavors of MoveTables: one where the tables are +moved into all shards in the target keyspace. In Shard-By-Shard Migration user can specify a subset of shards to move +the tables into. + +These are the topo attributes that are affected during a MoveTables (regular or shard-by-shard): + +* *DeniedTables* in a shard's TabletControls. These are used to stop writes to the source keyspace for these tables. + While switching writes we first create these entries, wait for the target to catchup to the source (using gtid + positions), and then update the routing rules to point these tables to the target. When a primary sees a DeniedTables + entry during a DML it will error with an "enforce denied tables". +* *RoutingRules* (for regular movetables) and *ShardRoutingRules* (for shard by shard migration). Routing rules are + pointers for each table being moved to a keyspace. When a MoveTables is initiated, that keyspace is the source + keyspace. After traffic is switched the pointer is changed to point to the target keyspace. If routing rules are + specified, VTGate uses them to decide which keyspace to route each table. + +### Changes + +There are two main changes: + +* The keyspace event watcher is enhanced to look at the topo attributes mentioned above. An SrvVSchema watcher looks for + changes in the Routing Rules. DeniedTables are only in the Shard records in the topo. So any changes to the + DeniedTables would not result in a notification. To get around that we change the traffic switcher to also rebuild + SrvVSchema when DeniedTables are modified. +* The logic to start buffering needs to look for the "enforce denied tables" error that is thrown by the vttablets when + it tries to execute a query on a table being switched. +* We cannot use the current query retry logic which is at the tablet gateway level: meaning the keyspace is already + fixed by the planner and cannot be changed in that layer. We need to add a new retry logic at a higher level (the + _newExecute_ method) and always replan before retrying a query. This also means that we need to bypass the plan cache + while retrying. + + + diff --git a/doc/internal/Overview.md b/doc/internal/README.md similarity index 61% rename from doc/internal/Overview.md rename to doc/internal/README.md index e1cb74e4ddd..7ed4950e877 100644 --- a/doc/internal/Overview.md +++ b/doc/internal/README.md @@ -1,5 +1,7 @@ # Internal Documentation -The documents in this category document internal processes which are taken care of by the Vitess Team e.g. re-publishing the website [vitess.io](https://vitess.io) or creating a new release. +The documents in this category document internal processes which are taken care of by the Vitess Team e.g. creating a new release. We have put them here to increase transparency and make it easy for others to follow and improve processes. + +- [**Release**](./release/README.md) \ No newline at end of file diff --git a/doc/internal/.images/post-release-01.png b/doc/internal/release/.images/post-release-01.png similarity index 100% rename from doc/internal/.images/post-release-01.png rename to doc/internal/release/.images/post-release-01.png diff --git a/doc/internal/.images/release-01.png b/doc/internal/release/.images/release-01.png similarity index 100% rename from doc/internal/.images/release-01.png rename to doc/internal/release/.images/release-01.png diff --git a/doc/internal/.images/release-02.png b/doc/internal/release/.images/release-02.png similarity index 100% rename from doc/internal/.images/release-02.png rename to doc/internal/release/.images/release-02.png diff --git a/doc/internal/.images/release-03.png b/doc/internal/release/.images/release-03.png similarity index 100% rename from doc/internal/.images/release-03.png rename to doc/internal/release/.images/release-03.png diff --git a/doc/internal/.images/release-04.png b/doc/internal/release/.images/release-04.png similarity index 100% rename from doc/internal/.images/release-04.png rename to doc/internal/release/.images/release-04.png diff --git a/doc/internal/release/README.md b/doc/internal/release/README.md new file mode 100644 index 00000000000..8f593ee9e66 --- /dev/null +++ b/doc/internal/release/README.md @@ -0,0 +1,13 @@ +# Release Instructions + +This page describes the steps for cutting a new [open source release](https://github.com/vitessio/vitess/releases). + +### Summary + +- [How to Release](./how-to-release.md) +- [Versioning](./versioning.md) +- [Release Branches](./release-branches.md) +- [Release Tags](./release-tags.md) +- [Docker Images](./docker-images.md) +- [Java Packages](./java-packages.md) +- [End Of Life Process](./eol-process.md) diff --git a/doc/internal/release/docker-images.md b/doc/internal/release/docker-images.md new file mode 100644 index 00000000000..75941ca6309 --- /dev/null +++ b/doc/internal/release/docker-images.md @@ -0,0 +1,3 @@ +# Docker Images + +Docker images built automatically on DockerHub and can be found [here](https://hub.docker.com/repository/docker/vitess/lite/). diff --git a/doc/internal/release/eol-process.md b/doc/internal/release/eol-process.md new file mode 100644 index 00000000000..f1d2a343d0f --- /dev/null +++ b/doc/internal/release/eol-process.md @@ -0,0 +1,12 @@ +# End-of-Life Process + +The lifespan of a major version is one year long, after that time, the version has reached its end-of-life. +To properly deprecate a major of Vitess follow the following steps: + +- **Update the website documentation** + > - In the ['Releases' documentation](https://vitess.io/docs/releases/), the EOL version must be moved under the ['Archived Releases' section](https://vitess.io/docs/releases/#archived-releases). + > - The sidebar of the website must be changed. We need to remove the EOL version from it. To do so, we move the version folder onto the `archive` folder. +- **Delete the `Backport To: ...` label** + > - Delete the corresponding label for the EOL version, we do not want to motivate anymore backport to the EOL release branch. +- **Make proper announcement on Slack** + > - Notify the community of this deprecation. \ No newline at end of file diff --git a/doc/internal/ReleaseInstructions.md b/doc/internal/release/how-to-release.md similarity index 66% rename from doc/internal/ReleaseInstructions.md rename to doc/internal/release/how-to-release.md index 80ad0d38458..450127bd869 100644 --- a/doc/internal/ReleaseInstructions.md +++ b/doc/internal/release/how-to-release.md @@ -1,155 +1,53 @@ -# Release Instructions +# Release Cutover -This page describes the steps for cutting a new [open source release](https://github.com/vitessio/vitess/releases). +In this section we describe our current release process. Below is a summary of this document. -### Summary +- [**Pre-requisite for the release team**](#pre-requisites) +- [**Overview**](#overview) +- [**Pre-Release**](#pre-release) +- [**Release**](#release) +- [**Post-Release**](#post-release) +- [**How To prepare the release of Vitess**](#how-to-prepare-the-release-of-vitess) +- [**How To Release Vitess**](#how-to-release-vitess) +- [**How To Code Freeze**](#how-to-code-freeze) +- [**How To Merge During Code Freeze**](#how-to-merge-during-code-freeze) +- [**Java Packages: Deploy & Release**](#java-packages-deploy--release) -- [Versioning](#versioning) -- [Release Branches](#release-branches) -- [Release Tags](#release-tags) -- [Docker Images](#docker-images) -- [Java Packages](#java-packages) -- [Release Cutover](#release-cutover) -------- +----- -## Versioning - -Our versioning strategy is based on [VEP5](https://github.com/vitessio/enhancements/blob/main/veps/vep-5.md). - -### Major Release (vX) - -A new major release is needed when the public API changes in a -backward-incompatible way -- for example, when removing deprecated interfaces. - -Our public API includes (but is not limited to): - -* The VTGate [RPC interfaces](https://github.com/vitessio/vitess/tree/main/proto). -* The interfaces exposed by the VTGate client library in each language. - -Care must also be taken when changing the format of any data stored by a live -system, such as topology data or Vitess-internal tables (used for sequences, -distributed transactions, etc.). Although this data is considered as internal to -Vitess, if any change breaks the upgrade path for a live system (for example, -requiring that it be shut down and reinitialized from scratch), then it must be -considered as a breaking change. - -### Minor Release (vX.Y) - -A new minor release indicates that functionality has been added or changed in a -backward-compatible way. This should be the majority of normal releases. - -### Patch Release (vX.Y.Z) - -A patch release indicates that only a select set of bugfixes have been -cherry-picked onto the associated minor release. The expectation is that -upgrading by a patch release should be painless (not requiring any config -changes) and safe (isolated from active development on `main`). - -### Pre-Release Labels (vX.Y.Z-labelN) - -Pre-release versions should be labeled with a suffix like `-beta2` or `-rc1`. - -------- - -## Release Branches - -Each major and minor releases (X.Y) should have a [release branch](https://github.com/vitessio/vitess/branches/all?query=release) named -`release-X.Y`. This branch should diverge from `main` when the release -is declared, after which point only bugfix PRs should be cherry-picked onto the branch. -All other activity on `main` will go out with a subsequent major or minor release. - -```shell -git checkout main -git pull --ff-only upstream main - -git checkout -b release-X.Y -git push upstream release-X.Y -``` - -The branches are named `release-X.Y` to distinguish them from point-in-time -tags, which are named `vX.Y.Z`. - -------- - -## Release Tags - -While the release branch is a moving target, release tags mark point-in-time -snapshots of the repository. Essentially, a tag assigns a human-readable name to -a specific Git commit hash. Although it's technically possible to reassign a tag -name to a different hash, we must never do this. - -------- - -## Docker Images - -Docker images built automatically on DockerHub and can be found [here](https://hub.docker.com/repository/docker/vitess/lite/). - -------- - -## Java Packages - -We publish binary packages for our [JDBC driver and Java client on Maven Central](https://search.maven.org/#search|ga|1|g:"io.vitess"). - -To do so, we use the http://oss.sonatype.org/ repository. -New packages must be uploaded there ("deployed") and will be automatically published ("released"). -Once they are released there, they will be automatically synchronized with Maven Central. -The synchronization takes only several minutes, but the update on http://search.maven.org may take up to two hours. - -### Access to oss.sonatype.org - -[Sign up here.](https://issues.sonatype.org/secure/Signup!default.jspa) -Then you must be added as member to our `io.vitess` namespace. -Therefore, file a JIRA ticket with Sonatype to get added ([example for a different namespace](https://issues.sonatype.org/browse/OSSRH-30604)). - -### One-time setup - -#### Set up GPG - -Follow [Sonatype's GPG instructions](https://central.sonatype.org/pages/working-with-pgp-signatures.html). - -Install `gpg-agent` (needed below) e.g. on Ubuntu via: `sudo apt-get install gnupg-agent`. -for Mac you need to install 'gnupg' via 'brew install gnupg' - -#### Login configuration - -Create the `settings.xml` in the `$HOME/.m2/` directory as described in their [instructions](https://central.sonatype.org/pages/apache-maven.html). - -------- - -## Release Cutover - -In this section we describe our current release process. We begin with a list of [**pre-requisite for the release team**](#pre-requisites) and with a short [**overview**](#overview). -The release process is divided into three parts: [**Pre-Release**](#pre-release), [**Release**](#release), [**Post-Release**](#post-release), which are detailed after the overview. - -### Pre-Requisites +## Pre-Requisites This section highlights the different pre-requisites the release team has to meet before releasing. - The tool `gh` must be installed locally and ready to be used. -- You must have access to the Java release, more information in the [**Java Packages**](#java-packages) section. +- You must have access to the Java release, more information in the [**Java Packages**](./java-packages.md) section. - You must be able to create branches and have admin right on the `vitessio/vitess` and `planetscale/vitess-operator` repositories. -### Overview +----- -#### Schedule +## Overview + +### Schedule A new major version of Vitess is released every four months. For each major version there is at least one release candidate, which we release three weeks before the GA version. We usually create the RC1 during the first week of the month, and the GA version three weeks later. -#### Code Freeze +### Code Freeze Before creating RC1, there is a code freeze. Assuming the release of RC1 happens on a Tuesday, the release branch will be frozen Friday of the previous week. This allows us to test that the release branch can be released and avoid discovering unwanted events during the release day. Once the RC1 is released, there are three more weeks to backport bug fixes into the release branches. However, we also proceed to a code freeze the Friday before the GA release. (Assuming GA is on a Tuesday) Regarding patch releases, no code freeze is planned. -#### Tracking Issue for each Release +### Tracking Issue for each Release For each release, it is recommended to create an issue like [this one](https://github.com/vitessio/vitess/issues/10476) to track the current and past progress of a release. It also allows us to document what happened during a release. -### Pre-Release +----- + +## Pre-Release This step happens a few weeks before the actual release (whether it is an RC, GA or a patch release). The main goal of this step is to make sure everything is ready to be released for the release day. @@ -158,8 +56,9 @@ That includes: > - All the Pull Requests that need to be in the release must be reviewed and merged before the code freeze. > - The code freeze usually happens a few days before the release. - **Making sure the people doing the release have access to all the tools and infrastructure needed to do the release.** - > - This includes write access to the Vitess repository and to the Maven repository. + > - This includes write access to the Vitess repository and to the Maven repository. - **Preparing and cleaning the release notes summary.** + > - If the release does not contain significant changes (i.e. a small patch release) then this step can be skipped > - One or more Pull Requests have to be submitted in advance to create and update the release summary. > - The summary files are located in: `./changelog/*.0/*.*.*/summary.md`. > - The summary file for a release candidate is the same as the one for the GA release. @@ -179,21 +78,19 @@ That includes: > - While the Vitess Operator is located in a different repository, we also need to do a release for it. > - The Operator follows the same cycle: RC1 -> GA -> Patches. > - Documentation for the pre-release of the Vitess Operator is available [here](https://github.com/planetscale/vitess-operator/blob/main/docs/release-process.md#prepare-for-release). -- **Update the release notes on `main`.** - > - One Pull Request against `main` must be created, it will contain the new release notes that we are adding in the Release Pull Request. - > - We open this Pull Request now to avoid waiting on the CI during release day. - > - All future changes to the release notes during the code freeze will need to be ported to both PRs: the one on `main` and the Release Pull Request. - **Update the website documentation.** > - We want to open a preparatory **draft** Pull Request to update the documentation. > - There are several pages we want to update: - > - [The releases page](https://vitess.io/docs/releases/), we must add the new release to the list with all its information and link. The links can be broken (404 error) while we are preparing for the release, this is fine. - > - [The local install page](https://vitess.io/docs/get-started/local/), we must use the proper version increment for this guide and the proper SHA. The SHA will have to be modified once the Release Pull Request and the release is tagged is merged. + > - [The releases page](https://vitess.io/docs/releases/): we must add the new release to the list with all its information and link. The links can be broken (404 error) while we are preparing for the release, this is fine. + > - [The local install page](https://vitess.io/docs/get-started/local/): we must use the proper version increment for this guide and the proper SHA. The SHA will have to be modified once the Release Pull Request and the release is tagged is merged. > - If we are doing a GA or RC release follow the instructions below: > - There are two scripts in the website repository in `./tools/{ga|rc}_release.sh`, use them to update the website documentation. The scripts automate: - > - For an RC, we need to create a new version in the sidebar and mark the current version as RC. - > - For a GA, we need to mark the version we are releasing as "Stable" and the next one as "Development". + > - For an RC, we need to create a new entry in the sidebar which represents the next version on `main` and mark the version we are releasing as RC. + > - For a GA, we need to mark the version we are releasing as "Stable" and the next one as "Development". -### Release +----- + +## Release On the release day, there are several things to do: @@ -202,7 +99,7 @@ On the release day, there are several things to do: - **Tag the Vitess release.** > - A guide on how to tag a version is available in the [How To Release Vitess](#how-to-release-vitess) section. - **Update the release notes on `main`.** - > - During the code freeze, we created a Pull Request against `main` to update the release notes. It must be merged. + > - One Pull Request against `main` must be created, it will contain the new release notes that we are adding in the Release Pull Request. - **Create the corresponding Vitess operator release.** > - Applies only to versions greater or equal to `v14.0.0`. > - If we are doing an RC release, then we will need to create the Vitess Operator RC too. If we are doing a GA release, we're also doing a GA release in the Operator. @@ -224,18 +121,22 @@ On the release day, there are several things to do: > - This step is even more important for GA releases as we often include a link to _arewefastyet_ in the blog post. > - The benchmarks need to complete before announcing the blog posts or before they get cross-posted. - **Go back to dev mode on the release branch.** - > - The version constants across the codebase must be updated to `SNAPSHOT`. -- **Build k8s Docker images and publish them** + > - The version constants across the codebase must be updated to `SNAPSHOT`. +- **Build k8s Docker images and publish them.** > - The docker image for `base`, `lite`, etc are built automatically by DockerHub. The k8s images however are dependent on these images and are required to be built manually. > - These images should be built after the `base` image has been built and available on DockerHub. - > - To build and publish these images, run `./release.sh` from the directory `vitess/docker`. + > - To build and publish these images, checkout the new release tag that was just created and run `./release.sh` from the directory `./docker`. -### Post-Release +----- + +## Post-Release Once the release is over, we need to announce it on both Slack and Twitter. We also want to make sure the blog post was cross-posted, if applicable. We need to verify that _arewefastyet_ has finished the benchmark too. -### How to prepare the release of Vitess +----- + +## How to prepare the release of Vitess > In this example our current version is `v14.0.3` and we release the version `v15.0.0`. > Alongside Vitess' release, we also release a new version of the operator. @@ -254,8 +155,8 @@ We need to verify that _arewefastyet_ has finished the benchmark too. ``` 2. Creation of the Release Pull Request. - > This step will create the Release Pull Request that will then be reviewed ahead of the release day. - > The merge commit of that Pull Request will be used during the release day to tag the release. + > This step will create the Release Pull Request that will then be reviewed ahead of the release day. + > The merge commit of that Pull Request will be used during the release day to tag the release. 1. Run the `create_release` script using the Makefile: 1. Release Candidate: ```shell @@ -268,18 +169,15 @@ We need to verify that _arewefastyet_ has finished the benchmark too. The script will prompt you `Pausing so release notes can be added. Press enter to continue`. We are now going to generate the release notes, continue to the next sub-step. - 2. Run the following command to generate the release notes: - 1. Release Candidate: - ```shell - go run ./go/tools/release-notes --from "v14.0.3" --to "HEAD" --version "v15.0.0-rc1" --summary "./changelog/15.0/15.0.0/summary.md" [--threads=[0-9.]] - ``` - 2. General Availability: - ```shell - go run ./go/tools/release-notes --from "v14.0.3" --to "HEAD" --version "v15.0.0" --summary "./changelog/15.0/15.0.0/summary.md" [--threads=[0-9.]] - ``` - - > Important note: The release note generation fetches a lot of data from the GitHub API. You might reach the API request limit. - In which case you should use the `--threads=` flag and set an integer value lower than 10 (the default). + 2. Run the following command to generate the release notes. Note that you can omit the `--summary` flag if there are no summary. + ```shell + go run ./go/tools/release-notes --version "v15.0.0" --summary "./changelog/15.0/15.0.0/summary.md" + ``` + + > Make sure to also run `go run ./go/tools/releases/releases.go` to update the `./changelog` directory. + + > Important note: The release note generation fetches a lot of data from the GitHub API. You might reach the API request limit. + In which case you should use the `--threads=` flag and set an integer value lower than 10 (the default). This command will generate the release notes by looking at all the commits between the tag `v14.0.3` and the reference `HEAD`. It will also use the file located in `./changelog/15.0/15.0.0/summary.md` to prefix the release notes with a text that the maintainers wrote before the release. @@ -290,25 +188,27 @@ We need to verify that _arewefastyet_ has finished the benchmark too. 4. If we are doing an RC release it means we created a new branch from `main`. We need to update `main` with the next SNAPSHOT version. If `main` was on `15.0.0-SNAPSHOT`, we need to update it to `16.0.0-SNAPSHOT`. A simple find and replace in the IDE is sufficient, there only a handful of files that must be changed: `version.go` and several java files. -### How To Release Vitess +----- + +## How To Release Vitess This section is divided into two parts: - [Creation of the tags and release notes](#creation-of-the-tags-and-release-notes). - [Creating Release or Release Candidate on the GitHub UI](#creating-release-or-release-candidate-on-the-github-ui) -#### Creation of the tags and release notes +### Creation of the tags and release notes > This step implies that you have created a [Release Pull Request](#how-to-prepare-the-release-of-vitess) beforehand and that it has been reviewed. > The merge commit of this Release Pull Request will be used to tag the release. -> +> > In this example our current version is `v14.0.3` and we release the version `v15.0.0`. > Alongside Vitess' release, we also release a new version of the operator. > Since we are releasing a release candidate here, the new version of the operator will also be a release candidate. > In this example, the new operator version is `2.8.0`. -> +> > It is important to note that before the RC, there is a code freeze during which we create the release branch. > > The release branch in this example is `release-15.0`. -> +> > The example also assumes that `origin` is the `vitessio/vitess` remote. 1. Fetch `github.com/vitessio/vitess`'s remote. @@ -330,26 +230,26 @@ This section is divided into two parts: make BASE_BRANCH="release-15.0" BASE_REMOTE="origin" RELEASE_VERSION="15.0.0-rc1" DEV_VERSION="15.0.0-SNAPSHOT" back_to_dev_mode ``` > You will then need to follow the instructions given by the output of the back_to_dev_mode Makefile command. You will need to push the newly created branch and open a Pull Request. - + 6. Release the tag on GitHub UI as explained in the following section. -#### Creating Release or Release Candidate on the GitHub UI +### Creating Release or Release Candidate on the GitHub UI > In the below steps, we use `v8.0.0` and `v9.0.0` as an example. -##### 1. Open the releases page +#### 1. Open the releases page On Vitess' GitHub repository main page, click on Code -> [Releases](https://github.com/vitessio/vitess/releases). ![alt text](.images/release-01.png) -##### 2. Draft a new release +#### 2. Draft a new release On the Releases page, click on `Draft a new release`. ![alt text](.images/release-02.png) -##### 3. Tag a new release +#### 3. Tag a new release When drafting a new release, we are asked to choose the release's tag and branch. We format the tag this way: `v9.0.0`. We append `-rcN` to the tag name for release candidates, @@ -357,7 +257,7 @@ with `N` being the increment of the release candidate. ![alt text](.images/release-03.png) -##### 4. Add release notes and release +#### 4. Add release notes and release Copy/paste the previously built Release Notes into the description of the release. @@ -367,7 +267,9 @@ And finally, click on `Publish release`. ![alt text](.images/release-04.png) -### How To Code Freeze +----- + +## How To Code Freeze In this example we are going to do a code freeze on the `release-15.0` branch. If we are doing a release candidate, there won't be a branch yet, hence we need to create it. @@ -397,10 +299,12 @@ The script will prompt the command that will allow you to push the code freeze c Remember, you should also disable the Launchable integration from the newly created release branch. -### How To Merge During Code Freeze +----- + +## How To Merge During Code Freeze > **Warning:** It is not advised to merge a PR during code-freeze. If it is deemed absolutely necessary, then the following steps can be followed. - + The PR that needs to be merged will be failing on the `Code Freeze` CI. To merge this PR, we'll have to mark this CI action as not required. You will need administrator privileges on the vitess repository to be able to make this change. @@ -411,15 +315,17 @@ You will need administrator privileges on the vitess repository to be able to ma 5. Within this list find `Code Freeze` and click on the cross next to it to remove it from this list. 6. Save your changes on the bottom of the page. 7. Refresh the page of the PR, and you should be able to merge it. -8. After merging the PR, you need to do 2 more things - - 1. Add `Code Freeze` back as a required check. - 2. Check if the release PR has any merge conflicts. If it does, fix them and push. +8. After merging the PR, you need to do 2 more things - + 1. Add `Code Freeze` back as a required check. + 2. Check if the release PR has any merge conflicts. If it does, fix them and push. -### Java Packages: Deploy & Release +----- + +## Java Packages: Deploy & Release > **Warning:** This section's steps need to be executed only when releasing a new major version of Vitess, > or if the Java packages changed from one minor/patch version to another. -> +> > For this example, we assume we juste released `v12.0.0`. 1. Checkout to the release commit. @@ -443,7 +349,7 @@ You will need administrator privileges on the vitess repository to be able to ma 4. Deploy (upload) the Java code to the oss.sonatype.org repository: - > **Warning:** After the deployment, the Java packages will be automatically released. Once released, you cannot delete them. The only option is to upload a newer version (e.g. increment the patch level).

+ > **Warning:** After the deployment, the Java packages will be automatically released. Once released, you cannot delete them. The only option is to upload a newer version (e.g. increment the patch level).

```bash cd ./java/ diff --git a/doc/internal/release/java-packages.md b/doc/internal/release/java-packages.md new file mode 100644 index 00000000000..3b3d2a38472 --- /dev/null +++ b/doc/internal/release/java-packages.md @@ -0,0 +1,27 @@ +# Java Packages + +We publish binary packages for our [JDBC driver and Java client on Maven Central](https://search.maven.org/#search|ga|1|g:"io.vitess"). + +To do so, we use the http://oss.sonatype.org/ repository. +New packages must be uploaded there ("deployed") and will be automatically published ("released"). +Once they are released there, they will be automatically synchronized with Maven Central. +The synchronization takes only several minutes, but the update on http://search.maven.org may take up to two hours. + +## Access to oss.sonatype.org + +[Sign up here.](https://issues.sonatype.org/secure/Signup!default.jspa) +Then you must be added as member to our `io.vitess` namespace. +Therefore, file a JIRA ticket with Sonatype to get added ([example for a different namespace](https://issues.sonatype.org/browse/OSSRH-30604)). + +## One-time setup + +### Set up GPG + +Follow [Sonatype's GPG instructions](https://central.sonatype.org/pages/working-with-pgp-signatures.html). + +Install `gpg-agent` (needed below) e.g. on Ubuntu via: `sudo apt-get install gnupg-agent`. +for Mac you need to install 'gnupg' via 'brew install gnupg' + +### Login configuration + +Create the `settings.xml` in the `$HOME/.m2/` directory as described in their [instructions](https://central.sonatype.org/pages/apache-maven.html). diff --git a/doc/internal/release/release-branches.md b/doc/internal/release/release-branches.md new file mode 100644 index 00000000000..876ec9070d3 --- /dev/null +++ b/doc/internal/release/release-branches.md @@ -0,0 +1,17 @@ +# Release Branches + +Each major and minor releases (X.Y) should have a [release branch](https://github.com/vitessio/vitess/branches/all?query=release) named +`release-X.Y`. This branch should diverge from `main` when the release +is declared, after which point only bugfix PRs should be cherry-picked onto the branch. +All other activity on `main` will go out with a subsequent major or minor release. + +```shell +git checkout main +git pull --ff-only upstream main + +git checkout -b release-X.Y +git push upstream release-X.Y +``` + +The branches are named `release-X.Y` to distinguish them from point-in-time +tags, which are named `vX.Y.Z`. \ No newline at end of file diff --git a/doc/internal/release/release-tags.md b/doc/internal/release/release-tags.md new file mode 100644 index 00000000000..4136df1bbb9 --- /dev/null +++ b/doc/internal/release/release-tags.md @@ -0,0 +1,6 @@ +# Release Tags + +While the release branch is a moving target, release tags mark point-in-time +snapshots of the repository. Essentially, a tag assigns a human-readable name to +a specific Git commit hash. Although it's technically possible to reassign a tag +name to a different hash, we must never do this. \ No newline at end of file diff --git a/doc/internal/release/versioning.md b/doc/internal/release/versioning.md new file mode 100644 index 00000000000..b760e32d1b5 --- /dev/null +++ b/doc/internal/release/versioning.md @@ -0,0 +1,36 @@ +# Versioning + +Our versioning strategy is based on [VEP5](https://github.com/vitessio/enhancements/blob/main/veps/vep-5.md). + +## Major Release (vX) + +A new major release is needed when the public API changes in a +backward-incompatible way -- for example, when removing deprecated interfaces. + +Our public API includes (but is not limited to): + +* The VTGate [RPC interfaces](https://github.com/vitessio/vitess/tree/main/proto). +* The interfaces exposed by the VTGate client library in each language. + +Care must also be taken when changing the format of any data stored by a live +system, such as topology data or Vitess-internal tables (used for sequences, +distributed transactions, etc.). Although this data is considered as internal to +Vitess, if any change breaks the upgrade path for a live system (for example, +requiring that it be shut down and reinitialized from scratch), then it must be +considered as a breaking change. + +## Minor Release (vX.Y) + +A new minor release indicates that functionality has been added or changed in a +backward-compatible way. This should be the majority of normal releases. + +## Patch Release (vX.Y.Z) + +A patch release indicates that only a select set of bugfixes have been +cherry-picked onto the associated minor release. The expectation is that +upgrading by a patch release should be painless (not requiring any config +changes) and safe (isolated from active development on `main`). + +## Pre-Release Labels (vX.Y.Z-labelN) + +Pre-release versions should be labeled with a suffix like `-beta2` or `-rc1`. \ No newline at end of file diff --git a/doc/vtadmin/clusters.yaml b/doc/vtadmin/clusters.yaml index 55779df60b2..e4ed5335cc6 100644 --- a/doc/vtadmin/clusters.yaml +++ b/doc/vtadmin/clusters.yaml @@ -40,7 +40,16 @@ defaults: vtsql-discovery-tags: "tag1,tag2" # Username to send queries on behalf of. See package callerid. vtsql-effective-user: "my-effective-user" - + # Username used to make requests against vtgates in the cluster. Can be used with + # vtsql-credentials-password in place of vtsql-credentials-path-tmpl. + # If both vtsql-credentials-username and vtsql-credentials-path-tmpl are + # provided, vtsql-credentials-username takes precedent over username from vtsql-credentials-path-tmpl. + vtsql-credentials-username: "my-username" + # Password used to make requests against vtgates in the cluster. Used with + # vtsql-credentials-username in place of vtsql-credentials-path-tmpl. + # If both vtsql-credentials-password and vtsql-credentials-path-tmpl are + # provided, vtsql-credentials-password takes precedent over password from vtsql-credentials-path-tmpl. + vtsql-credentials-password: "my-password" # VTAdmin also provides different RPC pools to gate the number of concurrent # requests it will make against vtctlds/vtgates in a given cluster, to prevent # overwhelming those components. diff --git a/docker/base/Dockerfile b/docker/base/Dockerfile index 50aedd982d5..bacf76209a5 100644 --- a/docker/base/Dockerfile +++ b/docker/base/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -ARG bootstrap_version=18.0 +ARG bootstrap_version=22.1 ARG image="vitess/bootstrap:${bootstrap_version}-mysql80" FROM "${image}" diff --git a/docker/base/Dockerfile.mysql57 b/docker/base/Dockerfile.mysql57 index 32f50c246bb..f36eac096da 100644 --- a/docker/base/Dockerfile.mysql57 +++ b/docker/base/Dockerfile.mysql57 @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -ARG bootstrap_version=18.0 +ARG bootstrap_version=22.1 ARG image="vitess/bootstrap:${bootstrap_version}-mysql57" FROM "${image}" diff --git a/docker/base/Dockerfile.percona57 b/docker/base/Dockerfile.percona57 index 0e1ae2567ab..c4abe42f959 100644 --- a/docker/base/Dockerfile.percona57 +++ b/docker/base/Dockerfile.percona57 @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -ARG bootstrap_version=18.0 +ARG bootstrap_version=22.1 ARG image="vitess/bootstrap:${bootstrap_version}-percona57" FROM "${image}" diff --git a/docker/base/Dockerfile.percona80 b/docker/base/Dockerfile.percona80 index b3e27d379eb..7e24001895a 100644 --- a/docker/base/Dockerfile.percona80 +++ b/docker/base/Dockerfile.percona80 @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -ARG bootstrap_version=18.0 +ARG bootstrap_version=22.1 ARG image="vitess/bootstrap:${bootstrap_version}-percona80" FROM "${image}" diff --git a/docker/bootstrap/CHANGELOG.md b/docker/bootstrap/CHANGELOG.md index 455406f07ae..19b84857a08 100644 --- a/docker/bootstrap/CHANGELOG.md +++ b/docker/bootstrap/CHANGELOG.md @@ -72,4 +72,28 @@ List of changes between bootstrap image versions. ## [18.0] - 2023-06-07 ### Changes -- Update build to golang 1.20.5 \ No newline at end of file +- Update build to golang 1.20.5 +- +## [19] - 2023-06-07 +### Changes +- Update build to golang 1.20.5 + +## [20] - 2023-08-03 +### Changes +- Bump all images to bullseye base image + +## [21] - 2023-08-25 +### Changes +- Update build to golang 1.21.0 + +## [22] - 2023-09-07 +### Changes +- Update build to golang 1.21.1 + +## [22.0] - 2023-10-05 +### Changes +- Update build to golang 1.21.2 + +## [22.1] - 2023-10-10 +### Changes +- Update build to golang 1.21.3 diff --git a/docker/bootstrap/Dockerfile.common b/docker/bootstrap/Dockerfile.common index 5c6e4da5930..39b0c16566a 100644 --- a/docker/bootstrap/Dockerfile.common +++ b/docker/bootstrap/Dockerfile.common @@ -1,4 +1,4 @@ -FROM --platform=linux/amd64 golang:1.20.5-buster +FROM --platform=linux/amd64 golang:1.21.3-bullseye # Install Vitess build dependencies RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ @@ -22,7 +22,7 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-ins ENV VTROOT /vt/src/vitess.io/vitess ENV VTDATAROOT /vt/vtdataroot ENV VTPORTSTART 15000 -ENV PATH $VTROOT/bin:$VTROOT/dist/maven/bin:$VTROOT/dist/chromedriver:$PATH +ENV PATH $VTROOT/bin:$VTROOT/dist/maven/bin:$PATH ENV USER vitess # Copy files needed for bootstrap diff --git a/docker/bootstrap/Dockerfile.mysql57-arm64v8 b/docker/bootstrap/Dockerfile.mysql57-arm64v8 deleted file mode 100644 index 96b08413aa1..00000000000 --- a/docker/bootstrap/Dockerfile.mysql57-arm64v8 +++ /dev/null @@ -1,65 +0,0 @@ -FROM debian:9 AS builder - -WORKDIR /opt -#Build xtrabackup -RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ - autoconf \ - automake \ - bison \ - build-essential \ - bzr \ - ca-certificates \ - cmake \ - flex \ - libaio-dev \ - libcurl4-gnutls-dev \ - libev-dev \ - libgcrypt11-dev \ - libncurses-dev \ - libtool \ - mysql-client \ - vim-common \ - wget \ - zlib1g-dev && \ - wget https://github.com/percona/percona-xtrabackup/archive/percona-xtrabackup-2.4.13.tar.gz \ - -P /opt && \ - tar zxf /opt/percona-xtrabackup-2.4.13.tar.gz -C /opt && \ - rm /opt/percona-xtrabackup-2.4.13.tar.gz && \ - cd /opt/percona-xtrabackup-percona-xtrabackup-2.4.13 && \ - mkdir bld && cd bld && \ - cmake .. -DBUILD_CONFIG=xtrabackup_release -DWITH_MAN_PAGES=OFF \ - -DDOWNLOAD_BOOST=1 -DWITH_BOOST=/usr/local && \ - make -j4 && \ - make install - -ARG bootstrap_version -ARG image="vitess/bootstrap:${bootstrap_version}-common" - -FROM --platform=linux/arm64/v8 "${image}" - -# Install MySQL 5.7 -RUN add-apt-repository 'deb http://ftp.debian.org/debian sid main' && \ - apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ - libmysqlclient-dev \ - mysql-client-5.7 \ - mysql-server-5.7 \ - libdbd-mysql-perl \ - python3-distutils-extra \ - rsync \ - libev4 \ - libcurl4-openssl-dev \ - libaio1 && \ - rm -rf /var/lib/apt/lists/* && \ - mkdir -p /usr/local/xtrabackup/bin && \ - mkdir -p /usr/local/xtrabackup/lib - -# Bootstrap Vitess -WORKDIR /vt/src/vitess.io/vitess -COPY --from=builder /usr/local/xtrabackup/bin /usr/local/xtrabackup/bin -COPY --from=builder /usr/local/xtrabackup/lib /usr/local/xtrabackup/lib -ENV PATH="/usr/local/xtrabackup/bin:${PATH}" -ENV MYSQL_FLAVOR MySQL56 -USER vitess -RUN ./bootstrap.sh diff --git a/docker/bootstrap/Dockerfile.mysql80 b/docker/bootstrap/Dockerfile.mysql80 index e064c638d99..059f01b8101 100644 --- a/docker/bootstrap/Dockerfile.mysql80 +++ b/docker/bootstrap/Dockerfile.mysql80 @@ -6,9 +6,9 @@ FROM --platform=linux/amd64 "${image}" # Install MySQL 8.0 RUN for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver keyserver.ubuntu.com 8C718D3B5072E1F5 && break; done && \ for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver keyserver.ubuntu.com 467B942D3A79BD29 && break; done && \ - add-apt-repository 'deb http://repo.mysql.com/apt/debian/ buster mysql-8.0' && \ + add-apt-repository 'deb http://repo.mysql.com/apt/debian/ bullseye mysql-8.0' && \ for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.com --recv-keys 9334A25F8507EFA5 && break; done && \ - echo 'deb http://repo.percona.com/apt buster main' > /etc/apt/sources.list.d/percona.list && \ + echo 'deb http://repo.percona.com/apt bullseye main' > /etc/apt/sources.list.d/percona.list && \ { \ echo debconf debconf/frontend select Noninteractive; \ echo percona-server-server-8.0 percona-server-server/root_password password 'unused'; \ diff --git a/docker/bootstrap/Dockerfile.percona57 b/docker/bootstrap/Dockerfile.percona57 index 2d8beb5e95d..febe09fd8bf 100644 --- a/docker/bootstrap/Dockerfile.percona57 +++ b/docker/bootstrap/Dockerfile.percona57 @@ -5,16 +5,15 @@ FROM --platform=linux/amd64 "${image}" # Install Percona 5.7 RUN for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.com --recv-keys 9334A25F8507EFA5 && break; done && \ - add-apt-repository 'deb http://repo.percona.com/apt buster main' && \ + add-apt-repository 'deb http://repo.percona.com/apt bullseye main' && \ { \ echo debconf debconf/frontend select Noninteractive; \ echo percona-server-server-5.7 percona-server-server/root_password password 'unused'; \ echo percona-server-server-5.7 percona-server-server/root_password_again password 'unused'; \ } | debconf-set-selections && \ apt-get update && \ - apt-get install -y --no-install-recommends \ - percona-server-server-5.7 \ - libperconaserverclient20-dev percona-xtrabackup-24 && \ + apt-get install -y --no-install-recommends percona-server-server-5.7 && \ + apt-get install -y --no-install-recommends libperconaserverclient20-dev percona-xtrabackup-24 && \ rm -rf /var/lib/apt/lists/* # Bootstrap Vitess diff --git a/docker/bootstrap/Dockerfile.percona80 b/docker/bootstrap/Dockerfile.percona80 index 5dadc32cd0a..446ec554612 100644 --- a/docker/bootstrap/Dockerfile.percona80 +++ b/docker/bootstrap/Dockerfile.percona80 @@ -5,7 +5,7 @@ FROM --platform=linux/amd64 "${image}" # Install Percona 8.0 RUN for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.com --recv-keys 9334A25F8507EFA5 && break; done \ - && echo 'deb http://repo.percona.com/ps-80/apt buster main' > /etc/apt/sources.list.d/percona.list && \ + && echo 'deb http://repo.percona.com/ps-80/apt bullseye main' > /etc/apt/sources.list.d/percona.list && \ { \ echo debconf debconf/frontend select Noninteractive; \ echo percona-server-server-8.0 percona-server-server/root_password password 'unused'; \ @@ -21,7 +21,7 @@ RUN for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.c rsync \ libev4 \ # && rm -f /etc/apt/sources.list.d/percona.list \ - && echo 'deb http://repo.percona.com/apt buster main' > /etc/apt/sources.list.d/percona.list \ + && echo 'deb http://repo.percona.com/apt bullseye main' > /etc/apt/sources.list.d/percona.list \ # { \ # echo debconf debconf/frontend select Noninteractive; \ # echo percona-server-server-8.0 percona-server-server/root_password password 'unused'; \ diff --git a/docker/bootstrap/build.sh b/docker/bootstrap/build.sh index a3ac24d916d..d84e37fced9 100755 --- a/docker/bootstrap/build.sh +++ b/docker/bootstrap/build.sh @@ -47,11 +47,9 @@ fi chmod -R o=rx *; arch=$(uname -m) -[ "$arch" == "aarch64" ] && [ $flavor != "common" ] && arch_ext='-arm64v8' - base_image="${base_image:-vitess/bootstrap:$version-common}" -image="${image:-vitess/bootstrap:$version-$flavor$arch_ext}" +image="${image:-vitess/bootstrap:$version-$flavor}" while [ $# -gt 0 ]; do if [[ $1 == *"--"* ]]; then @@ -61,6 +59,11 @@ while [ $# -gt 0 ]; do shift done -if [ -f "docker/bootstrap/Dockerfile.$flavor$arch_ext" ]; then - docker build --no-cache -f docker/bootstrap/Dockerfile.$flavor$arch_ext -t $image --build-arg bootstrap_version=$version --build-arg image=$base_image . +if [ -f "docker/bootstrap/Dockerfile.$flavor" ]; then + docker build \ + -f docker/bootstrap/Dockerfile.$flavor \ + -t $image \ + --build-arg bootstrap_version=$version \ + --build-arg image=$base_image \ + . fi diff --git a/docker/k8s/Dockerfile b/docker/k8s/Dockerfile index 30ff33952bc..3ba46595a83 100644 --- a/docker/k8s/Dockerfile +++ b/docker/k8s/Dockerfile @@ -19,14 +19,6 @@ FROM vitess/base:${VT_BASE_VER} AS base FROM debian:${DEBIAN_VER} -# TODO: remove when https://github.com/vitessio/vitess/issues/3553 is fixed -RUN apt-get update && \ - apt-get upgrade -qq && \ - apt-get install default-mysql-client -qq --no-install-recommends && \ - apt-get autoremove && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* - # Set up Vitess environment (just enough to run pre-built Go binaries) ENV VTROOT /vt/src/vitess.io/vitess ENV VTDATAROOT /vtdataroot @@ -51,6 +43,7 @@ COPY --from=base /vt/bin/vtgate /vt/bin/ COPY --from=base /vt/bin/vttablet /vt/bin/ COPY --from=base /vt/bin/vtbackup /vt/bin/ COPY --from=base /vt/bin/vtadmin /vt/bin/ +COPY --from=base /vt/bin/vtorc /vt/bin/ # copy web admin files COPY --from=base $VTROOT/web /vt/web/ diff --git a/docker/k8s/orchestrator/Dockerfile b/docker/k8s/orchestrator/Dockerfile deleted file mode 100644 index e3e8f3ac346..00000000000 --- a/docker/k8s/orchestrator/Dockerfile +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2019 The Vitess Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -ARG VT_BASE_VER=latest -ARG DEBIAN_VER=stable-slim - -FROM vitess/k8s:${VT_BASE_VER} AS k8s - -FROM debian:${DEBIAN_VER} -ARG ORC_VER='3.2.3' - -RUN apt-get update && \ - apt-get upgrade -qq && \ - apt-get install wget ca-certificates jq -qq --no-install-recommends && \ - wget https://github.com/openark/orchestrator/releases/download/v${ORC_VER}/orchestrator_${ORC_VER}_amd64.deb && \ - dpkg -i orchestrator_${ORC_VER}_amd64.deb && \ - rm orchestrator_${ORC_VER}_amd64.deb && \ - apt-get purge wget -qq && \ - apt-get autoremove -qq && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* - -# Copy vtctlclient to be used to notify -COPY --from=k8s /vt/bin/vtctlclient /usr/bin/ - -WORKDIR /usr/local/orchestrator -CMD ["./orchestrator", "--config=/conf/orchestrator.conf.json", "http"] diff --git a/docker/k8s/vtadmin/Dockerfile b/docker/k8s/vtadmin/Dockerfile index 837ac8a525a..f952681d3c9 100644 --- a/docker/k8s/vtadmin/Dockerfile +++ b/docker/k8s/vtadmin/Dockerfile @@ -17,7 +17,7 @@ ARG DEBIAN_VER=bullseye-slim FROM vitess/k8s:${VT_BASE_VER} AS k8s -FROM node:16-${DEBIAN_VER} as node +FROM node:18-${DEBIAN_VER} as node # Prepare directory structure. RUN mkdir -p /vt/web diff --git a/docker/k8s/pmm-client/Dockerfile b/docker/k8s/vtorc/Dockerfile similarity index 56% rename from docker/k8s/pmm-client/Dockerfile rename to docker/k8s/vtorc/Dockerfile index 732e2e0a2ee..b62b30ee676 100644 --- a/docker/k8s/pmm-client/Dockerfile +++ b/docker/k8s/vtorc/Dockerfile @@ -18,18 +18,21 @@ ARG DEBIAN_VER=stable-slim FROM vitess/k8s:${VT_BASE_VER} AS k8s FROM debian:${DEBIAN_VER} -ARG PMM_CLIENT_VER='1.17.4' - -RUN apt-get update && \ - apt-get upgrade -qq && \ - apt-get install procps wget ca-certificates -qq --no-install-recommends && \ - wget https://www.percona.com/redir/downloads/pmm-client/${PMM_CLIENT_VER}/binary/debian/buster/x86_64/pmm-client_${PMM_CLIENT_VER}-1.buster_amd64.deb && \ - dpkg -i pmm-client_${PMM_CLIENT_VER}-1.buster_amd64.deb && \ - rm pmm-client_${PMM_CLIENT_VER}-1.buster_amd64.deb && \ - apt-get purge wget ca-certificates -qq && \ - apt-get autoremove -qq && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* - -# Copy CA certs for https calls + +# Set up Vitess environment (just enough to run pre-built Go binaries) +ENV VTROOT /vt + +# Prepare directory structure. +RUN mkdir -p /vt/bin && mkdir -p /vtdataroot + +# Copy certs to allow https calls COPY --from=k8s /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt + +# Copy binaries +COPY --from=k8s /vt/bin/vtorc /vt/bin/ + +# add vitess user/group and add permissions +RUN groupadd -r --gid 2000 vitess && \ + useradd -r -g vitess --uid 1000 vitess && \ + chown -R vitess:vitess /vt && \ + chown -R vitess:vitess /vtdataroot diff --git a/docker/k8s/vttablet/Dockerfile b/docker/k8s/vttablet/Dockerfile index 95453a69771..dd504d7860d 100644 --- a/docker/k8s/vttablet/Dockerfile +++ b/docker/k8s/vttablet/Dockerfile @@ -19,14 +19,6 @@ FROM vitess/k8s:${VT_BASE_VER} AS k8s FROM debian:${DEBIAN_VER} -# TODO: remove when https://github.com/vitessio/vitess/issues/3553 is fixed -RUN apt-get update && \ - apt-get upgrade -qq && \ - apt-get install wget default-mysql-client jq curl -qq --no-install-recommends && \ - apt-get autoremove && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* - # Set up Vitess environment (just enough to run pre-built Go binaries) ENV VTROOT /vt ENV VTDATAROOT /vtdataroot diff --git a/docker/lite/Dockerfile.mysql57 b/docker/lite/Dockerfile.mysql57 index 49fcf5ca3f6..c85be4df605 100644 --- a/docker/lite/Dockerfile.mysql57 +++ b/docker/lite/Dockerfile.mysql57 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=18.0 +ARG bootstrap_version=22.1 ARG image="vitess/bootstrap:${bootstrap_version}-mysql57" FROM "${image}" AS builder diff --git a/docker/lite/Dockerfile.mysql80 b/docker/lite/Dockerfile.mysql80 index 8d14065066a..831a19e5809 100644 --- a/docker/lite/Dockerfile.mysql80 +++ b/docker/lite/Dockerfile.mysql80 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=18.0 +ARG bootstrap_version=22.1 ARG image="vitess/bootstrap:${bootstrap_version}-mysql80" FROM "${image}" AS builder diff --git a/docker/lite/Dockerfile.percona57 b/docker/lite/Dockerfile.percona57 index f06bdc10a8c..324a2760753 100644 --- a/docker/lite/Dockerfile.percona57 +++ b/docker/lite/Dockerfile.percona57 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=18.0 +ARG bootstrap_version=22.1 ARG image="vitess/bootstrap:${bootstrap_version}-percona57" FROM "${image}" AS builder @@ -33,7 +33,7 @@ USER vitess RUN make install PREFIX=/vt/install # Start over and build the final image. -FROM debian:buster-slim +FROM debian:bullseye-slim # Install dependencies COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh diff --git a/docker/lite/Dockerfile.percona80 b/docker/lite/Dockerfile.percona80 index 5490eb6e79a..e09a0c1dd9f 100644 --- a/docker/lite/Dockerfile.percona80 +++ b/docker/lite/Dockerfile.percona80 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=18.0 +ARG bootstrap_version=22.1 ARG image="vitess/bootstrap:${bootstrap_version}-percona80" FROM "${image}" AS builder @@ -33,7 +33,7 @@ USER vitess RUN make install PREFIX=/vt/install # Start over and build the final image. -FROM debian:buster-slim +FROM debian:bullseye-slim # Install dependencies COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh diff --git a/docker/lite/Dockerfile.testing b/docker/lite/Dockerfile.testing index 08a43d99a18..aa47c814f39 100644 --- a/docker/lite/Dockerfile.testing +++ b/docker/lite/Dockerfile.testing @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=18.0 +ARG bootstrap_version=22.1 ARG image="vitess/bootstrap:${bootstrap_version}-mysql57" FROM "${image}" AS builder @@ -33,7 +33,7 @@ USER vitess RUN make install-testing PREFIX=/vt/install # Start over and build the final image. -FROM debian:buster-slim +FROM debian:bullseye-slim # Install dependencies COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh diff --git a/docker/lite/Dockerfile.ubi7.mysql57 b/docker/lite/Dockerfile.ubi7.mysql57 index 5307ad98ed5..0f02b151217 100644 --- a/docker/lite/Dockerfile.ubi7.mysql57 +++ b/docker/lite/Dockerfile.ubi7.mysql57 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=18.0 +ARG bootstrap_version=22.1 ARG image="vitess/bootstrap:${bootstrap_version}-mysql57" FROM "${image}" AS builder @@ -36,6 +36,7 @@ RUN make install PREFIX=/vt/install FROM registry.access.redhat.com/ubi7/ubi:latest # Install keys and dependencies +RUN rpm --import https://repo.mysql.com/RPM-GPG-KEY-mysql-2022 RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \ && yum install -y --setopt=alwaysprompt=no gnupg \ && ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 A4A9406876FCBD3C456770C88C718D3B5072E1F5 ) \ diff --git a/docker/lite/Dockerfile.ubi7.mysql80 b/docker/lite/Dockerfile.ubi7.mysql80 index c8fa02653bf..163be58c32b 100644 --- a/docker/lite/Dockerfile.ubi7.mysql80 +++ b/docker/lite/Dockerfile.ubi7.mysql80 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=18.0 +ARG bootstrap_version=22.1 ARG image="vitess/bootstrap:${bootstrap_version}-mysql80" FROM "${image}" AS builder @@ -36,6 +36,7 @@ RUN make install PREFIX=/vt/install FROM registry.access.redhat.com/ubi7/ubi:latest # Install keys and dependencies +RUN rpm --import https://repo.mysql.com/RPM-GPG-KEY-mysql-2022 RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \ && yum install -y --setopt=alwaysprompt=no gnupg \ && ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 A4A9406876FCBD3C456770C88C718D3B5072E1F5 ) \ diff --git a/docker/lite/Dockerfile.ubi7.percona57 b/docker/lite/Dockerfile.ubi7.percona57 index b5dc4006b9d..5df1129c6f3 100644 --- a/docker/lite/Dockerfile.ubi7.percona57 +++ b/docker/lite/Dockerfile.ubi7.percona57 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=18.0 +ARG bootstrap_version=22.1 ARG image="vitess/bootstrap:${bootstrap_version}-percona57" FROM "${image}" AS builder diff --git a/docker/lite/Dockerfile.ubi7.percona80 b/docker/lite/Dockerfile.ubi7.percona80 index a8a91d117f0..c55a4e2cfdc 100644 --- a/docker/lite/Dockerfile.ubi7.percona80 +++ b/docker/lite/Dockerfile.ubi7.percona80 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=18.0 +ARG bootstrap_version=22.1 ARG image="vitess/bootstrap:${bootstrap_version}-percona80" FROM "${image}" AS builder diff --git a/docker/lite/Dockerfile.ubi8.arm64.mysql80 b/docker/lite/Dockerfile.ubi8.arm64.mysql80 index b074e6cc014..fc479c149f1 100644 --- a/docker/lite/Dockerfile.ubi8.arm64.mysql80 +++ b/docker/lite/Dockerfile.ubi8.arm64.mysql80 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=18.0 +ARG bootstrap_version=22.1 ARG image="vitess/bootstrap:${bootstrap_version}-mysql80" FROM "${image}" AS builder @@ -36,6 +36,7 @@ RUN make cross-install PREFIX=/vt/install GOOS=linux GOARCH=arm64 FROM registry.access.redhat.com/ubi8/ubi:latest # Install keys and dependencies +RUN rpm --import https://repo.mysql.com/RPM-GPG-KEY-mysql-2022 RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \ && yum install -y --setopt=alwaysprompt=no gnupg \ && ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 99DB70FAE1D7CE227FB6488205B555B38483C65D 3A79BD29 A4A9406876FCBD3C456770C88C718D3B5072E1F5 94E279EB8D8F25B21810ADF121EA45AB2F86D6A1 ) \ @@ -54,7 +55,7 @@ RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \ && rm -f /tmp/mysqlrepo.rpm /tmp/perconarepo.rpm RUN echo H4sICIDAHmICA2ZvbwDVkDFLxEAQhfv9FVfY7o4RhCBsoXJcIXKHwUIOi7m5MVk2yS6zG0//vYlRULTU4rrHvOHN+2ZL5Q4TP6oeO7bX3Od1pcuFXlyNUzVZg7S2yTmmCwDsgzjuDSUyB5SDI2+QzOChcyJBEnwkPOPQZijNuTkrigKmsHUFJ1MeCjUQEqg61tQweVtM0vOrfXItj1eAM0H0DiR2erTgbnOrV5uVvlk+6M+Kinvctby3p0ptqRziHjOnnxz3s/FnKJcxVlkYu/+k4Zcs+AvM8n3+jWW8MBc2NO6FZILUMEsoYQ76UvWI/vAGB/SOZZsCAAA= | base64 -d | gzip -dc > /etc/yum.repos.d/CentOS-Base.repo \ && yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs --enablerepo c8base --enablerepo c8updates --enablerepo c8extras libev numactl-libs sysstat strace \ - && yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs https://download-ib01.fedoraproject.org/pub/epel/8/Everything/aarch64/Packages/g/gperftools-libs-2.7-9.el8.aarch64.rpm https://download-ib01.fedoraproject.org/pub/epel/8/Everything/aarch64/Packages/j/jemalloc-5.2.1-2.el8.aarch64.rpm https://download-ib01.fedoraproject.org/pub/epel/8/Everything/aarch64/Packages/l/libunwind-1.3.1-3.el8.aarch64.rpm + && yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs https://dl.fedoraproject.org/pub/epel/8/Everything/aarch64/Packages/g/gperftools-libs-2.7-9.el8.aarch64.rpm https://dl.fedoraproject.org/pub/epel/8/Everything/aarch64/Packages/j/jemalloc-5.2.1-2.el8.aarch64.rpm https://dl.fedoraproject.org/pub/epel/8/Everything/aarch64/Packages/l/libunwind-1.3.1-3.el8.aarch64.rpm RUN yum update -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs \ && yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs bzip2 ca-certificates gnupg libaio libcurl \ procps-ng rsync wget openssl hostname curl tzdata make \ diff --git a/docker/lite/Dockerfile.ubi8.mysql80 b/docker/lite/Dockerfile.ubi8.mysql80 index 74c4a026171..48c5aaaa086 100644 --- a/docker/lite/Dockerfile.ubi8.mysql80 +++ b/docker/lite/Dockerfile.ubi8.mysql80 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=18.0 +ARG bootstrap_version=22.1 ARG image="vitess/bootstrap:${bootstrap_version}-mysql80" FROM "${image}" AS builder @@ -36,6 +36,7 @@ RUN make install PREFIX=/vt/install FROM registry.access.redhat.com/ubi8/ubi:latest # Install keys and dependencies +RUN rpm --import https://repo.mysql.com/RPM-GPG-KEY-mysql-2022 RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \ && yum install -y --setopt=alwaysprompt=no gnupg \ && ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 99DB70FAE1D7CE227FB6488205B555B38483C65D 3A79BD29 A4A9406876FCBD3C456770C88C718D3B5072E1F5 94E279EB8D8F25B21810ADF121EA45AB2F86D6A1 ) \ diff --git a/docker/lite/install_dependencies.sh b/docker/lite/install_dependencies.sh index 92f7ab67397..2175df5def3 100755 --- a/docker/lite/install_dependencies.sh +++ b/docker/lite/install_dependencies.sh @@ -84,23 +84,25 @@ mysql57) ;; mysql80) mysql8_version=8.0.30 - do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/libmysqlclient21_${mysql8_version}-1debian10_amd64.deb /tmp/libmysqlclient21_${mysql8_version}-1debian10_amd64.deb - do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-client-core_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-community-client-core_${mysql8_version}-1debian10_amd64.deb - do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-client-plugins_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-community-client-plugins_${mysql8_version}-1debian10_amd64.deb - do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-client_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-community-client_${mysql8_version}-1debian10_amd64.deb - do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-client_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-client_${mysql8_version}-1debian10_amd64.deb - do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-server-core_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-community-server-core_${mysql8_version}-1debian10_amd64.deb - do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-server_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-community-server_${mysql8_version}-1debian10_amd64.deb - do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-server_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-server_${mysql8_version}-1debian10_amd64.deb + do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-common_${mysql8_version}-1debian11_amd64.deb /tmp/mysql-common_${mysql8_version}-1debian11_amd64.deb + do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/libmysqlclient21_${mysql8_version}-1debian11_amd64.deb /tmp/libmysqlclient21_${mysql8_version}-1debian11_amd64.deb + do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-client-core_${mysql8_version}-1debian11_amd64.deb /tmp/mysql-community-client-core_${mysql8_version}-1debian11_amd64.deb + do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-client-plugins_${mysql8_version}-1debian11_amd64.deb /tmp/mysql-community-client-plugins_${mysql8_version}-1debian11_amd64.deb + do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-client_${mysql8_version}-1debian11_amd64.deb /tmp/mysql-community-client_${mysql8_version}-1debian11_amd64.deb + do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-client_${mysql8_version}-1debian11_amd64.deb /tmp/mysql-client_${mysql8_version}-1debian11_amd64.deb + do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-server-core_${mysql8_version}-1debian11_amd64.deb /tmp/mysql-community-server-core_${mysql8_version}-1debian11_amd64.deb + do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-server_${mysql8_version}-1debian11_amd64.deb /tmp/mysql-community-server_${mysql8_version}-1debian11_amd64.deb + do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-server_${mysql8_version}-1debian11_amd64.deb /tmp/mysql-server_${mysql8_version}-1debian11_amd64.deb PACKAGES=( - /tmp/libmysqlclient21_${mysql8_version}-1debian10_amd64.deb - /tmp/mysql-community-client-core_${mysql8_version}-1debian10_amd64.deb - /tmp/mysql-community-client-plugins_${mysql8_version}-1debian10_amd64.deb - /tmp/mysql-community-client_${mysql8_version}-1debian10_amd64.deb - /tmp/mysql-client_${mysql8_version}-1debian10_amd64.deb - /tmp/mysql-community-server-core_${mysql8_version}-1debian10_amd64.deb - /tmp/mysql-community-server_${mysql8_version}-1debian10_amd64.deb - /tmp/mysql-server_${mysql8_version}-1debian10_amd64.deb + /tmp/mysql-common_${mysql8_version}-1debian11_amd64.deb + /tmp/libmysqlclient21_${mysql8_version}-1debian11_amd64.deb + /tmp/mysql-community-client-core_${mysql8_version}-1debian11_amd64.deb + /tmp/mysql-community-client-plugins_${mysql8_version}-1debian11_amd64.deb + /tmp/mysql-community-client_${mysql8_version}-1debian11_amd64.deb + /tmp/mysql-client_${mysql8_version}-1debian11_amd64.deb + /tmp/mysql-community-server-core_${mysql8_version}-1debian11_amd64.deb + /tmp/mysql-community-server_${mysql8_version}-1debian11_amd64.deb + /tmp/mysql-server_${mysql8_version}-1debian11_amd64.deb percona-xtrabackup-80 ) ;; @@ -146,18 +148,21 @@ mysql57) echo 'deb http://repo.mysql.com/apt/debian/ buster mysql-5.7' > /etc/apt/sources.list.d/mysql.list ;; mysql80) - echo 'deb http://repo.mysql.com/apt/debian/ buster mysql-8.0' > /etc/apt/sources.list.d/mysql.list + echo 'deb http://repo.mysql.com/apt/debian/ bullseye mysql-8.0' > /etc/apt/sources.list.d/mysql.list ;; esac # Add extra apt repositories for Percona Server and/or Percona XtraBackup. case "${FLAVOR}" in -mysql57|mysql80|percona57) +mysql57) echo 'deb http://repo.percona.com/apt buster main' > /etc/apt/sources.list.d/percona.list ;; +mysql80|percona57) + echo 'deb http://repo.percona.com/apt bullseye main' > /etc/apt/sources.list.d/percona.list + ;; percona80) - echo 'deb http://repo.percona.com/apt buster main' > /etc/apt/sources.list.d/percona.list - echo 'deb http://repo.percona.com/ps-80/apt buster main' > /etc/apt/sources.list.d/percona80.list + echo 'deb http://repo.percona.com/apt bullseye main' > /etc/apt/sources.list.d/percona.list + echo 'deb http://repo.percona.com/ps-80/apt bullseye main' > /etc/apt/sources.list.d/percona80.list ;; esac diff --git a/docker/local/Dockerfile b/docker/local/Dockerfile index 1d41173800b..9ef944cfd3b 100644 --- a/docker/local/Dockerfile +++ b/docker/local/Dockerfile @@ -1,4 +1,4 @@ -ARG bootstrap_version=18.0 +ARG bootstrap_version=22.1 ARG image="vitess/bootstrap:${bootstrap_version}-common" FROM "${image}" @@ -37,6 +37,7 @@ COPY examples/local /vt/local # Copy the vtadmin web app to the correct location and npm install COPY --chown=vitess:vitess web /web RUN npm install /web/vtadmin +RUN /web/vtadmin/build.sh RUN mkdir /vt/common COPY examples/common /vt/common diff --git a/docker/local/run.sh b/docker/local/run.sh index 9ba5aa07906..16b07fc426c 100755 --- a/docker/local/run.sh +++ b/docker/local/run.sh @@ -1,3 +1,3 @@ #!/bin/bash -docker run -p 14200:14200 -p 14201:14201 -p 15000:15000 -p 15001:15001 -p 15991:15991 -p 15999:15999 -p 16000:16000 --rm -it vitess/local +docker run -d -p 14200:14200 -p 14201:14201 -p 15000:15000 -p 15001:15001 -p 15991:15991 -p 15999:15999 -p 16000:16000 --rm -it vitess/local diff --git a/docker/mini/Dockerfile b/docker/mini/Dockerfile index f9c14932eb0..469fbef8d9e 100644 --- a/docker/mini/Dockerfile +++ b/docker/mini/Dockerfile @@ -31,16 +31,12 @@ RUN ln -s /usr/bin/python3 /usr/bin/python COPY docker/mini/install_mini_dependencies.sh /vt/dist/install_mini_dependencies.sh RUN /vt/dist/install_mini_dependencies.sh -COPY docker/mini/orchestrator-vitess-mini.conf.json /etc/orchestrator.conf.json -RUN chown vitess:vitess /etc/orchestrator.conf.json - COPY docker/mini/docker-entry /vt/dist/docker/mini/docker-entry COPY examples/common/scripts /vt/dist/scripts COPY examples/common/env.sh /vt/dist/scripts/env.sh COPY examples/common/lib/utils.sh /vt/dist/scripts/lib/utils.sh COPY docker/mini/vtctld-mini-up.sh /vt/dist/scripts/vtctld-mini-up.sh COPY docker/mini/vttablet-mini-up.sh /vt/dist/scripts/vttablet-mini-up.sh -COPY docker/mini/orchestrator-up.sh /vt/dist/scripts/orchestrator-up.sh RUN echo "hostname=127.0.0.1" >> /vt/dist/scripts/env.sh RUN cat /vt/dist/scripts/env.sh | egrep "^alias" >> /etc/bash.bashrc diff --git a/docker/mini/orchestrator-up.sh b/docker/mini/orchestrator-up.sh deleted file mode 100755 index 6e4ff486fad..00000000000 --- a/docker/mini/orchestrator-up.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -source ./env.sh - -echo "- Configuring orchestrator with given topology server and credentials..." -cp /etc/orchestrator.conf.json /tmp/ -sed -i /tmp/orchestrator.conf.json -e "s/DISCOVERY_SEED_PLACEHOLDER/$TOPOLOGY_SERVER/g" -sed -i /tmp/orchestrator.conf.json -e "s/MYSQL_TOPOLOGY_USER_PLACEHOLDER/$TOPOLOGY_USER/g" -sed -i /tmp/orchestrator.conf.json -e "s/MYSQL_TOPOLOGY_PASSWORD_PLACEHOLDER/$TOPOLOGY_PASSWORD/g" - -cat /tmp/orchestrator.conf.json > /etc/orchestrator.conf.json -rm /tmp/orchestrator.conf.json - -ORCHESTRATOR_LOG="${VTDATAROOT}/tmp/orchestrator.out" - -echo "- Starting orchestrator... Logfile is $ORCHESTRATOR_LOG" - -cd /usr/local/orchestrator -./orchestrator http > $ORCHESTRATOR_LOG 2>&1 & diff --git a/docker/mini/orchestrator-vitess-mini.conf.json b/docker/mini/orchestrator-vitess-mini.conf.json deleted file mode 100644 index 604801603c2..00000000000 --- a/docker/mini/orchestrator-vitess-mini.conf.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "Debug": true, - "EnableSyslog": false, - "ListenAddress": ":3000", - "MySQLTopologyUser": "MYSQL_TOPOLOGY_USER_PLACEHOLDER", - "MySQLTopologyPassword": "MYSQL_TOPOLOGY_PASSWORD_PLACEHOLDER", - "BackendDB": "sqlite", - "SQLite3DataFile": "/tmp/orchestrator.sqlite3", - "MySQLConnectTimeoutSeconds": 1, - "DefaultInstancePort": 3306, - "DiscoverByShowSlaveHosts": true, - "InstancePollSeconds": 1, - "HostnameResolveMethod": "none", - "MySQLHostnameResolveMethod": "@@report_host", - "SkipBinlogServerUnresolveCheck": true, - "ExpiryHostnameResolvesMinutes": 60, - "VerifyReplicationFilters": false, - "ReasonableMaintenanceReplicationLagSeconds": 20, - "CandidateInstanceExpireMinutes": 60, - "ReadOnly": false, - "AuthenticationMethod": "", - "ReplicationLagQuery": "", - "DetectClusterAliasQuery": "", - "DetectClusterDomainQuery": "", - "DetectInstanceAliasQuery": "", - "DetectPromotionRuleQuery": "", - "DetectDataCenterQuery": "", - "DetectRegionQuery": "", - "DetectPhysicalEnvironmentQuery": "", - "DetectSemiSyncEnforcedQuery": "", - "DiscoverySeeds": [ - "DISCOVERY_SEED_PLACEHOLDER" - ], - "ServeAgentsHttp": false, - "UseSSL": false, - "UseMutualTLS": false, - "MySQLTopologyUseMixedTLS": false, - "StatusEndpoint": "/api/status", - "StatusSimpleHealth": true, - "StatusOUVerify": false, - "BinlogEventsChunkSize": 10000, - "SkipBinlogEventsContaining": [], - "ReduceReplicationAnalysisCount": false, - "FailureDetectionPeriodBlockMinutes": 5, - "FailMasterPromotionOnLagMinutes": 0, - "RecoveryPeriodBlockSeconds": 0, - "RecoveryIgnoreHostnameFilters": [], - "RecoverMasterClusterFilters": [], - "RecoverIntermediateMasterClusterFilters": [], - "OnFailureDetectionProcesses": [], - "PreFailoverProcesses": [], - "PostFailoverProcesses": [], - "PostUnsuccessfulFailoverProcesses": [], - "PostMasterFailoverProcesses": [], - "PostIntermediateMasterFailoverProcesses": [], - "CoMasterRecoveryMustPromoteOtherCoMaster": true, - "DetachLostReplicasAfterMasterFailover": true, - "ApplyMySQLPromotionAfterMasterFailover": true, - "PreventCrossDataCenterMasterFailover": false, - "PreventCrossRegionMasterFailover": true, - "MasterFailoverDetachReplicaMasterHost": false, - "MasterFailoverLostInstancesDowntimeMinutes": 0, - "PostponeReplicaRecoveryOnLagMinutes": 0, - "RaftEnabled": false -} diff --git a/docker/mini/vttablet-mini-up.sh b/docker/mini/vttablet-mini-up.sh index 4cc86156076..37e74565763 100755 --- a/docker/mini/vttablet-mini-up.sh +++ b/docker/mini/vttablet-mini-up.sh @@ -64,7 +64,6 @@ vttablet \ -mycnf_mysql_port $mysql_port \ -service_map 'grpc-queryservice,grpc-tabletmanager,grpc-updatestream' \ -pid_file $VTDATAROOT/$tablet_dir/vttablet.pid \ - -vtctld_addr http://$hostname:$vtctld_web_port/ \ > $VTDATAROOT/$tablet_dir/vttablet.out 2>&1 & # Block waiting for the tablet to be listening diff --git a/docker/orchestrator/Dockerfile b/docker/orchestrator/Dockerfile deleted file mode 100644 index 13622322443..00000000000 --- a/docker/orchestrator/Dockerfile +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2019 The Vitess Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -FROM debian:jessie - -# Install Percona XtraDB Cluster (Galera) -RUN apt-key adv --keyserver keys.gnupg.net --recv-keys 9334A25F8507EFA5 && \ - echo 'deb http://repo.percona.com/apt jessie main' > /etc/apt/sources.list.d/mysql.list && \ - apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ - percona-xtradb-cluster-server-5.6 && \ - rm -rf /var/lib/apt/lists/* - -# Set up Orchestrator database -RUN service mysql start && \ - mysql -e "CREATE DATABASE orchestrator; GRANT ALL PRIVILEGES ON orchestrator.* TO 'orc_server_user'@'127.0.0.1' IDENTIFIED BY 'orc_server_user_password'" && \ - service mysql stop - -# Copy Orchestrator files (placed in workdir by build.sh) -COPY vtctlclient /usr/bin/vtctlclient -COPY orchestrator /usr/bin/orchestrator -COPY orchestrator.conf.json /orc/conf/orchestrator.conf.json -COPY resources /orc/resources - -WORKDIR /orc -CMD ["/usr/bin/orchestrator", "http"] - diff --git a/docker/orchestrator/build.sh b/docker/orchestrator/build.sh deleted file mode 100755 index 45236582a12..00000000000 --- a/docker/orchestrator/build.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash - -# Copyright 2019 The Vitess Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -e - -tmpdir=`mktemp -d` - -script="go install vitess.io/vitess/go/cmd/vtctlclient@latest && \ - git clone https://github.com/openark/orchestrator.git src/github.com/openark/orchestrator && \ - go install github.com/openark/orchestrator/go/cmd/orchestrator" - -echo "Building orchestrator..." -docker run -ti --name=vt_orc_build golang:1.14.4-buster bash -c "$script" -docker cp vt_orc_build:/go/bin/orchestrator $tmpdir -docker cp vt_orc_build:/go/bin/vtctlclient $tmpdir -docker cp vt_orc_build:/go/src/github.com/openark/orchestrator/resources $tmpdir -docker rm vt_orc_build - -echo "Building Docker image..." -cp Dockerfile orchestrator.conf.json $tmpdir -(cd $tmpdir && docker build -t vitess/orchestrator .) - -# Clean up -rm -r $tmpdir diff --git a/docker/orchestrator/orchestrator.conf.json b/docker/orchestrator/orchestrator.conf.json deleted file mode 100644 index 729594044ed..00000000000 --- a/docker/orchestrator/orchestrator.conf.json +++ /dev/null @@ -1,114 +0,0 @@ -{ - "ActiveNodeExpireSeconds": 5, - "ApplyMySQLPromotionAfterMasterFailover": true, - "AuditLogFile": "/tmp/orchestrator-audit.log", - "AuditToSyslog": false, - "AuthenticationMethod": "", - "AuthUserHeader": "", - "BackendDB": "sqlite", - "BinlogEventsChunkSize": 10000, - "CandidateInstanceExpireMinutes": 60, - "CoMasterRecoveryMustPromoteOtherCoMaster": false, - "DataCenterPattern": "[.]([^.]+)[.][^.]+[.]vitess[.]io", - "Debug": true, - "DefaultInstancePort": 3306, - "DetachLostSlavesAfterMasterFailover": true, - "DetectClusterAliasQuery": "SELECT value FROM _vt.local_metadata WHERE name='ClusterAlias'", - "DetectClusterDomainQuery": "", - "DetectInstanceAliasQuery": "SELECT value FROM _vt.local_metadata WHERE name='Alias'", - "DetectPromotionRuleQuery": "SELECT value FROM _vt.local_metadata WHERE name='PromotionRule'", - "DetectDataCenterQuery": "SELECT value FROM _vt.local_metadata WHERE name='DataCenter'", - "DetectSemiSyncEnforcedQuery": "SELECT @@global.rpl_semi_sync_master_wait_no_slave AND @@global.rpl_semi_sync_master_timeout > 1000000", - "DiscoverByShowSlaveHosts": false, - "EnableSyslog": false, - "ExpiryHostnameResolvesMinutes": 60, - "DelayMasterPromotionIfSQLThreadNotUpToDate": true, - "FailureDetectionPeriodBlockMinutes": 10, - "GraphiteAddr": "", - "GraphiteConvertHostnameDotsToUnderscores": true, - "GraphitePath": "", - "HostnameResolveMethod": "none", - "HTTPAuthPassword": "", - "HTTPAuthUser": "", - "InstanceBulkOperationsWaitTimeoutSeconds": 10, - "InstancePollSeconds": 5, - "ListenAddress": ":3000", - "MasterFailoverLostInstancesDowntimeMinutes": 0, - "MySQLConnectTimeoutSeconds": 1, - "MySQLHostnameResolveMethod": "none", - "MySQLTopologyCredentialsConfigFile": "", - "MySQLTopologyMaxPoolConnections": 3, - "MySQLTopologyPassword": "orc_client_user_password", - "MySQLTopologyReadTimeoutSeconds": 3, - "MySQLTopologySSLCAFile": "", - "MySQLTopologySSLCertFile": "", - "MySQLTopologySSLPrivateKeyFile": "", - "MySQLTopologySSLSkipVerify": true, - "MySQLTopologyUseMutualTLS": false, - "MySQLTopologyUser": "orc_client_user", - "OnFailureDetectionProcesses": [ - "echo 'Detected {failureType} on {failureCluster}. Affected replicas: {countSlaves}' >> /tmp/recovery.log" - ], - "OSCIgnoreHostnameFilters": [ - ], - "PhysicalEnvironmentPattern": "[.]([^.]+[.][^.]+)[.]vitess[.]io", - "PostFailoverProcesses": [ - "echo '(for all types) Recovered from {failureType} on {failureCluster}. Failed: {failedHost}:{failedPort}; Successor: {successorHost}:{successorPort}' >> /tmp/recovery.log" - ], - "PostIntermediateMasterFailoverProcesses": [ - "echo 'Recovered from {failureType} on {failureCluster}. Failed: {failedHost}:{failedPort}; Successor: {successorHost}:{successorPort}' >> /tmp/recovery.log" - ], - "PostMasterFailoverProcesses": [ - "echo 'Recovered from {failureType} on {failureCluster}. Failed: {failedHost}:{failedPort}; Promoted: {successorHost}:{successorPort}' >> /tmp/recovery.log", - "n=0; until [ $n -ge 10 ]; do vtctlclient -server vtctld:15999 TabletExternallyReparented {successorAlias} && break; n=$[$n+1]; sleep 5; done" - ], - "PostponeSlaveRecoveryOnLagMinutes": 0, - "PostUnsuccessfulFailoverProcesses": [ - ], - "PowerAuthUsers": [ - "*" - ], - "PreFailoverProcesses": [ - "echo 'Will recover from {failureType} on {failureCluster}' >> /tmp/recovery.log" - ], - "ProblemIgnoreHostnameFilters": [ - ], - "PromotionIgnoreHostnameFilters": [ - ], - "ReadLongRunningQueries": false, - "ReadOnly": false, - "ReasonableMaintenanceReplicationLagSeconds": 20, - "ReasonableReplicationLagSeconds": 10, - "RecoverMasterClusterFilters": [ - ".*" - ], - "RecoveryIgnoreHostnameFilters": [ - ], - "RecoveryPeriodBlockSeconds": 60, - "ReduceReplicationAnalysisCount": true, - "RejectHostnameResolvePattern": "", - "RemoveTextFromHostnameDisplay": ".vitess.io:3306", - "ReplicationLagQuery": "", - "ServeAgentsHttp": false, - "SkipBinlogEventsContaining": [ - ], - "SkipBinlogServerUnresolveCheck": true, - "SkipOrchestratorDatabaseUpdate": false, - "SlaveStartPostWaitMilliseconds": 1000, - "SnapshotTopologiesIntervalHours": 0, - "SQLite3DataFile": ":memory:", - "SSLCAFile": "", - "SSLCertFile": "", - "SSLPrivateKeyFile": "", - "SSLSkipVerify": false, - "SSLValidOUs": [ - ], - "StaleSeedFailMinutes": 60, - "StatusEndpoint": "/api/status", - "StatusOUVerify": false, - "UnseenAgentForgetHours": 6, - "UnseenInstanceForgetHours": 240, - "UseMutualTLS": false, - "UseSSL": false, - "VerifyReplicationFilters": false -} diff --git a/docker/release.sh b/docker/release.sh index fe9b5333f76..1805421b5af 100755 --- a/docker/release.sh +++ b/docker/release.sh @@ -1,9 +1,9 @@ #!/bin/bash set -ex -vt_base_version='v17.0.2' -debian_versions='buster bullseye' -default_debian_version='bullseye' +vt_base_version='v18.0.1-SNAPSHOT' +debian_versions='bullseye bookworm' +default_debian_version='bookworm' docker pull --platform linux/amd64 vitess/base:$vt_base_version @@ -21,6 +21,11 @@ do docker push vitess/vtadmin:$vt_base_version-$debian_version if [[ $debian_version == $default_debian_version ]]; then docker push vitess/vtadmin:$vt_base_version; fi + docker build --platform linux/amd64 --build-arg VT_BASE_VER=$vt_base_version --build-arg DEBIAN_VER=$debian_version-slim -t vitess/vtorc:$vt_base_version-$debian_version k8s/vtorc + docker tag vitess/vtorc:$vt_base_version-$debian_version vitess/vtorc:$vt_base_version + docker push vitess/vtorc:$vt_base_version-$debian_version + if [[ $debian_version == $default_debian_version ]]; then docker push vitess/vtorc:$vt_base_version; fi + docker build --platform linux/amd64 --build-arg VT_BASE_VER=$vt_base_version --build-arg DEBIAN_VER=$debian_version-slim -t vitess/vtgate:$vt_base_version-$debian_version k8s/vtgate docker tag vitess/vtgate:$vt_base_version-$debian_version vitess/vtgate:$vt_base_version docker push vitess/vtgate:$vt_base_version-$debian_version diff --git a/docker/vttestserver/Dockerfile.mysql57 b/docker/vttestserver/Dockerfile.mysql57 index e2fc04eb11f..8f20d4a7855 100644 --- a/docker/vttestserver/Dockerfile.mysql57 +++ b/docker/vttestserver/Dockerfile.mysql57 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=18.0 +ARG bootstrap_version=22.1 ARG image="vitess/bootstrap:${bootstrap_version}-mysql57" FROM "${image}" AS builder @@ -33,7 +33,7 @@ USER vitess RUN make install-testing PREFIX=/vt/install # Start over and build the final image. -FROM debian:buster-slim +FROM debian:bullseye-slim # Install dependencies COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh diff --git a/docker/vttestserver/Dockerfile.mysql80 b/docker/vttestserver/Dockerfile.mysql80 index 8911a8f00ee..c1dd5eef4f2 100644 --- a/docker/vttestserver/Dockerfile.mysql80 +++ b/docker/vttestserver/Dockerfile.mysql80 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=18.0 +ARG bootstrap_version=22.1 ARG image="vitess/bootstrap:${bootstrap_version}-mysql80" FROM "${image}" AS builder @@ -33,7 +33,7 @@ USER vitess RUN make install-testing PREFIX=/vt/install # Start over and build the final image. -FROM debian:buster-slim +FROM debian:bullseye-slim # Install dependencies COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh diff --git a/examples/backups/restart_tablets.sh b/examples/backups/restart_tablets.sh index bfafcf26d4f..de812a0ea8e 100755 --- a/examples/backups/restart_tablets.sh +++ b/examples/backups/restart_tablets.sh @@ -35,9 +35,9 @@ for i in 300 301 302; do done sleep 5 -# Wait for all the replica tablets to be in the serving state before initiating -# InitShardPrimary. This is essential, since we want the RESTORE phase to be -# complete before we start InitShardPrimary, otherwise we end up reading the +# Wait for all the tablets to be in the serving state before initiating +# PlannedReparentShard. This is essential, since we want the RESTORE phase to be +# complete before we start PlannedReparentShard, otherwise we end up reading the # tablet type to RESTORE and do not set semi-sync, which leads to the primary # hanging on writes. totalTime=600 @@ -50,6 +50,15 @@ for i in 101 201 301; do done done +for i in 102 202 302; do + while [ $totalTime -gt 0 ]; do + status=$(curl "http://$hostname:15$i/debug/status_details") + echo "$status" | grep "RDONLY: Serving" && break + totalTime=$((totalTime-1)) + sleep 0.1 + done +done + # Check that all the replica tablets have reached REPLICA: Serving state for i in 101 201 301; do status=$(curl "http://$hostname:15$i/debug/status_details") @@ -57,6 +66,13 @@ for i in 101 201 301; do echo "tablet-$i did not reach REPLICA: Serving state. Exiting due to failure." exit 1 done +# Check that all the rdonly tablets have reached RDONLY: Serving state +for i in 102 202 302; do + status=$(curl "http://$hostname:15$i/debug/status_details") + echo "$status" | grep "RDONLY: Serving" && continue + echo "tablet-$i did not reach RDONLY: Serving state. Exiting due to failure." + exit 1 +done vtctldclient PlannedReparentShard commerce/0 --new-primary "zone1-100" vtctldclient PlannedReparentShard customer/-80 --new-primary "zone1-200" diff --git a/examples/backups/start_cluster.sh b/examples/backups/start_cluster.sh index 9855171ea4d..33cbb362d88 100755 --- a/examples/backups/start_cluster.sh +++ b/examples/backups/start_cluster.sh @@ -22,8 +22,6 @@ source ../common/env.sh # start topo server if [ "${TOPO}" = "zk2" ]; then CELL=zone1 ../common/scripts/zk-up.sh -elif [ "${TOPO}" = "k8s" ]; then - CELL=zone1 ../common/scripts/k3s-up.sh else CELL=zone1 ../common/scripts/etcd-up.sh fi @@ -44,8 +42,8 @@ done vtctldclient PlannedReparentShard commerce/0 --new-primary "zone1-100" # create the schema for commerce -vtctlclient ApplySchema -- --sql-file ./create_commerce_schema.sql commerce || fail "Could not apply schema for the commerce keyspace" -vtctlclient ApplyVSchema -- --vschema_file ../local/vschema_commerce_seq.json commerce || fail "Could not apply vschema for the commerce keyspace" +vtctldclient ApplySchema --sql-file ./create_commerce_schema.sql commerce || fail "Could not apply schema for the commerce keyspace" +vtctldclient ApplyVSchema --vschema-file ../local/vschema_commerce_seq.json commerce || fail "Could not apply vschema for the commerce keyspace" # Create keyspace and set the semi_sync durability policy. vtctldclient CreateKeyspace --durability-policy=semi_sync customer || fail "Failed to create and configure the customer keyspace" @@ -69,8 +67,8 @@ for shard in "-80" "80-"; do done # create the schema for customer -vtctlclient ApplySchema -- --sql-file ./create_customer_schema.sql customer || fail "Could not apply schema for the customer keyspace" -vtctlclient ApplyVSchema -- --vschema_file ../local/vschema_customer_sharded.json customer || fail "Could not apply vschema for the customer keyspace" +vtctldclient ApplySchema --sql-file ./create_customer_schema.sql customer || fail "Could not apply schema for the customer keyspace" +vtctldclient ApplyVSchema --vschema-file ../local/vschema_customer_sharded.json customer || fail "Could not apply vschema for the customer keyspace" # start vtgate diff --git a/examples/backups/stop_tablets.sh b/examples/backups/stop_tablets.sh index 2a45e9e68d2..6a3ced6ab74 100755 --- a/examples/backups/stop_tablets.sh +++ b/examples/backups/stop_tablets.sh @@ -20,7 +20,7 @@ source ../common/env.sh for tablet in 100 200 300; do - if vtctlclient --action_timeout 1s --server localhost:15999 GetTablet zone1-$tablet >/dev/null 2>&1; then + if vtctldclient --action_timeout 1s --server localhost:15999 GetTablet zone1-$tablet >/dev/null 2>&1; then # The zero tablet is up. Try to shutdown 0-2 tablet + mysqlctl for i in 0 1 2; do uid=$(($tablet + $i)) @@ -29,7 +29,7 @@ for tablet in 100 200 300; do echo "Shutting down mysql zone1-$uid" CELL=zone1 TABLET_UID=$uid ../common/scripts/mysqlctl-down.sh echo "Removing tablet directory zone1-$uid" - vtctlclient DeleteTablet -- --allow_primary=true zone1-$uid + vtctldclient DeleteTablets --allow-primary zone1-$uid rm -Rf $VTDATAROOT/vt_0000000$uid done fi diff --git a/examples/backups/take_backups.sh b/examples/backups/take_backups.sh index dc1b049c9c3..85935edd2ce 100755 --- a/examples/backups/take_backups.sh +++ b/examples/backups/take_backups.sh @@ -20,5 +20,5 @@ source ../common/env.sh for shard in "customer/-80" "customer/80-" "commerce/0"; do - vtctlclient BackupShard "${shard}" || fail "Failed to backup shard: ${shard}" + vtctldclient BackupShard "${shard}" || fail "Failed to backup shard: ${shard}" done diff --git a/examples/backups/upgrade_cluster.sh b/examples/backups/upgrade_cluster.sh index 0144dc94579..3e831a14360 100755 --- a/examples/backups/upgrade_cluster.sh +++ b/examples/backups/upgrade_cluster.sh @@ -39,7 +39,7 @@ for i in 201 202; do echo "Shutting down mysql zone1-$i" CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-down.sh echo "Removing tablet directory zone1-$i" - vtctlclient DeleteTablet -- --allow_primary=true zone1-$i + vtctldclient DeleteTablets --allow-primary zone1-$i rm -Rf $VTDATAROOT/vt_0000000$i echo "Starting tablet zone1-$i again" CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-up.sh @@ -52,7 +52,7 @@ for i in 301 302; do echo "Shutting down mysql zone1-$i" CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-down.sh echo "Removing tablet directory zone1-$i" - vtctlclient DeleteTablet -- --allow_primary=true zone1-$i + vtctldclient DeleteTablets --allow-primary zone1-$i rm -Rf $VTDATAROOT/vt_0000000$i echo "Starting tablet zone1-$i again" CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-up.sh @@ -94,4 +94,4 @@ SHARD=-80 CELL=zone1 KEYSPACE=customer TABLET_UID=200 ../common/scripts/vttablet echo "Restarting tablet zone1-300" CELL=zone1 TABLET_UID=300 ../common/scripts/vttablet-down.sh -SHARD=80- CELL=zone1 KEYSPACE=customer TABLET_UID=300 ../common/scripts/vttablet-up.sh \ No newline at end of file +SHARD=80- CELL=zone1 KEYSPACE=customer TABLET_UID=300 ../common/scripts/vttablet-up.sh diff --git a/examples/common/env.sh b/examples/common/env.sh index dddb40bd08d..24f200b24ef 100644 --- a/examples/common/env.sh +++ b/examples/common/env.sh @@ -26,16 +26,13 @@ fi # mysqld might be in /usr/sbin which will not be in the default PATH PATH="/usr/sbin:$PATH" -for binary in mysqld etcd etcdctl curl vtctlclient vttablet vtgate vtctld mysqlctl; do +for binary in mysqld etcd etcdctl curl vtctldclient vttablet vtgate vtctld mysqlctl; do command -v "$binary" > /dev/null || fail "${binary} is not installed in PATH. See https://vitess.io/docs/get-started/local/ for install instructions." done; -# vtctlclient has a separate alias setup below +# vtctldclient has a separate alias setup below for binary in vttablet vtgate vtctld mysqlctl vtorc vtctl; do - majorVersion=$("${binary}" --version | sed -rn 's/^Version:[[:space:]]*([[:digit:]]+)\.[[:digit:]]+\.[[:digit:]]+.*/\1/p') - if [[ $majorVersion -gt "16" ]]; then - alias $binary="$binary --config-file-not-found-handling=ignore" - fi + alias $binary="$binary --config-file-not-found-handling=ignore" done; if [ "${TOPO}" = "zk2" ]; then @@ -59,13 +56,6 @@ if [ "${TOPO}" = "zk2" ]; then TOPOLOGY_FLAGS="--topo_implementation zk2 --topo_global_server_address ${ZK_SERVER} --topo_global_root /vitess/global" mkdir -p "${VTDATAROOT}/tmp" -elif [ "${TOPO}" = "k8s" ]; then - # Set topology environment parameters. - K8S_ADDR="localhost" - K8S_PORT="8443" - K8S_KUBECONFIG=$VTDATAROOT/tmp/k8s.kubeconfig - # shellcheck disable=SC2034 - TOPOLOGY_FLAGS="--topo_implementation k8s --topo_k8s_kubeconfig ${K8S_KUBECONFIG} --topo_global_server_address ${K8S_ADDR}:${K8S_PORT} --topo_global_root /vitess/global" elif [ "${TOPO}" = "consul" ]; then # Set up topology environment parameters. CONSUL_SERVER=127.0.0.1 diff --git a/examples/common/lib/utils.sh b/examples/common/lib/utils.sh index 66af7d31bd7..140e58147e1 100644 --- a/examples/common/lib/utils.sh +++ b/examples/common/lib/utils.sh @@ -108,13 +108,13 @@ function wait_for_shard_vreplication_engine() { local wait_secs=90 for _ in $(seq 1 ${wait_secs}); do - if vtctlclient --server=localhost:15999 Workflow -- "${keyspace}" listall &>/dev/null; then + if vtctldclient --server=localhost:15999 workflow --keyspace "${keyspace}" list &>/dev/null; then break fi sleep 1 done; - if ! vtctlclient --server=localhost:15999 Workflow -- "${keyspace}" listall &>/dev/null; then + if ! vtctldclient --server=localhost:15999 workflow --keyspace "${keyspace}" list &>/dev/null; then fail "Timed out after ${wait_secs} seconds waiting for the primary tablet's VReplication engine to open in ${keyspace}/${shard}" fi } @@ -139,6 +139,35 @@ function wait_for_healthy_shard() { wait_for_shard_vreplication_engine "${keyspace}" "${shard}" } +# Wait for a workflow to reach the running state. Example: +# wait_for_workflow_running customer customer2customer +function wait_for_workflow_running() { + if [[ -z ${1} || -z ${2} ]]; then + fail "A keyspace and workflow must be specified when waiting for a workflow to reach the running state" + fi + + local keyspace=${1} + local workflow=${2} + local wait_secs=90 + local result="" + + echo "Waiting for the ${workflow} workflow in the ${keyspace} keyspace to finish the copy phase..." + + for _ in $(seq 1 ${wait_secs}); do + result=$(vtctldclient Workflow --keyspace="${keyspace}" show --workflow="${workflow}" 2>/dev/null | grep "Copy phase completed") + if [[ ${result} != "" ]]; then + break + fi + sleep 1 + done; + + if [[ ${result} == "" ]]; then + fail "Timed out after ${wait_secs} seconds waiting for the ${workflow} workflow in the ${keyspace} keyspace to reach the running state" + fi + + echo "The ${workflow} workflow in the ${keyspace} keyspace is now running. $(sed -rn 's/.*"(Copy phase.*)".*/\1/p' <<< "${result}")." +} + # Stop the specified binary name using the provided PID file. # Example: # stop_process "vtadmin-web" "$VTDATAROOT/tmp/vtadmin-web.pid" diff --git a/examples/common/scripts/etcd-up.sh b/examples/common/scripts/etcd-up.sh index f2bba3e1470..937db27ea42 100755 --- a/examples/common/scripts/etcd-up.sh +++ b/examples/common/scripts/etcd-up.sh @@ -24,7 +24,7 @@ export ETCDCTL_API=2 # Check that etcd is not already running curl "http://${ETCD_SERVER}" > /dev/null 2>&1 && fail "etcd is already running. Exiting." -etcd --enable-v2=true --data-dir "${VTDATAROOT}/etcd/" --listen-client-urls "http://${ETCD_SERVER}" --advertise-client-urls "http://${ETCD_SERVER}" > "${VTDATAROOT}"/tmp/etcd.out 2>&1 & +etcd --data-dir "${VTDATAROOT}/etcd/" --listen-client-urls "http://${ETCD_SERVER}" --advertise-client-urls "http://${ETCD_SERVER}" > "${VTDATAROOT}"/tmp/etcd.out 2>&1 & PID=$! echo $PID > "${VTDATAROOT}/tmp/etcd.pid" sleep 5 @@ -56,6 +56,6 @@ vtctl $TOPOLOGY_FLAGS VtctldCommand AddCellInfo \ $cell set -e -echo "etcd start done..." +echo "etcd is running!" diff --git a/examples/common/scripts/k3s-down.sh b/examples/common/scripts/k3s-down.sh deleted file mode 100755 index 195b024bf91..00000000000 --- a/examples/common/scripts/k3s-down.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash - -# Copyright 2019 The Vitess Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This is an example script that stops the k3s server started by k3s-up.sh. - -set -e - -source "$(dirname "${BASH_SOURCE[0]:-$0}")/../env.sh" - -# Stop K3s server. -echo "Stopping k3s server..." - -pid=`cat $VTDATAROOT/tmp/k3s.pid` -echo "Stopping k3s..." -kill -9 $pid diff --git a/examples/common/scripts/k3s-up.sh b/examples/common/scripts/k3s-up.sh deleted file mode 100755 index 7c85cb0ac07..00000000000 --- a/examples/common/scripts/k3s-up.sh +++ /dev/null @@ -1,57 +0,0 @@ -#!/bin/bash - -# Copyright 2019 The Vitess Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This is an example script that creates a Kubernetes api for topo use by running k3s - -set -e -cell=${CELL:-'test'} - -script_dir="$(dirname "${BASH_SOURCE[0]:-$0}")" -source "${script_dir}/../env.sh" - -case $(uname) in - Linux) ;; - *) echo "WARNING: unsupported platform. K3s only supports running on Linux, the k8s topology is available for local examples."; exit 1;; -esac - -case $(uname -m) in - aarch64) ;; - x86_64) ;; - *) echo "ERROR: unsupported architecture, the k8s topology is not available for local examples."; exit 1;; -esac - -k3s server --disable-agent --data-dir "${VTDATAROOT}/k3s/" --https-listen-port "${K8S_PORT}" --write-kubeconfig "${K8S_KUBECONFIG}" > "${VTDATAROOT}"/tmp/k3s.out 2>&1 & -PID=$! -echo $PID > "${VTDATAROOT}/tmp/k3s.pid" -disown -a -echo "Waiting for k3s server to start" -sleep 15 - -# Use k3s built-in kubectl with custom config -KUBECTL="k3s kubectl --kubeconfig=${K8S_KUBECONFIG}" - -# Create the CRD for vitesstopologynodes -$KUBECTL create -f "${script_dir}/../../../go/vt/topo/k8stopo/VitessTopoNodes-crd.yaml" - -# Add the CellInfo description for the cell -set +e -echo "add $cell CellInfo" -vtctl $TOPOLOGY_FLAGS VtctldCommand AddCellInfo \ - --root /vitess/$cell \ - $cell -set -e - -echo "k3s start done..." diff --git a/examples/common/scripts/mysqlctl-up.sh b/examples/common/scripts/mysqlctl-up.sh index d9df27ccdc0..ff20cae5793 100755 --- a/examples/common/scripts/mysqlctl-up.sh +++ b/examples/common/scripts/mysqlctl-up.sh @@ -40,3 +40,5 @@ mysqlctl \ --tablet_uid $uid \ --mysql_port $mysql_port \ $action + +echo -e "MySQL for tablet $alias is running!" diff --git a/examples/common/scripts/vtadmin-up.sh b/examples/common/scripts/vtadmin-up.sh index 2cf4b578332..7fb5d759254 100755 --- a/examples/common/scripts/vtadmin-up.sh +++ b/examples/common/scripts/vtadmin-up.sh @@ -1,5 +1,19 @@ #!/bin/bash +# Copyright 2023 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + script_dir="$(dirname "${BASH_SOURCE[0]:-$0}")" source "${script_dir}/../env.sh" @@ -78,6 +92,7 @@ VITE_VTADMIN_API_ADDRESS="http://${hostname}:${vtadmin_api_port}" \ VITE_ENABLE_EXPERIMENTAL_TABLET_DEBUG_VARS="true" \ npm run --prefix "$web_dir" build +[[ ! -d "$web_dir/build" ]] && fail "Please make sure the VTAdmin files are built in $web_dir/build, using 'make build'" "${web_dir}/node_modules/.bin/serve" --no-clipboard -l $vtadmin_web_port -s "${web_dir}/build" \ > "${log_dir}/vtadmin-web.out" 2>&1 & diff --git a/examples/common/scripts/vtgate-up.sh b/examples/common/scripts/vtgate-up.sh index 07ac2ceee7b..f94c3c32ca0 100755 --- a/examples/common/scripts/vtgate-up.sh +++ b/examples/common/scripts/vtgate-up.sh @@ -24,7 +24,7 @@ grpc_port=15991 mysql_server_port=15306 mysql_server_socket_path="/tmp/mysql.sock" -# Start vtgate. +echo "Starting vtgate..." # shellcheck disable=SC2086 vtgate \ $TOPOLOGY_FLAGS \ @@ -40,12 +40,12 @@ vtgate \ --service_map 'grpc-vtgateservice' \ --pid_file $VTDATAROOT/tmp/vtgate.pid \ --mysql_auth_server_config_file $VTROOT/config/user.json \ + --enable_buffer \ > $VTDATAROOT/tmp/vtgate.out 2>&1 & # Block waiting for vtgate to be listening # Not the same as healthy -echo "Waiting for vtgate to be up..." while true; do curl -I "http://$hostname:$web_port/debug/status" >/dev/null 2>&1 && break sleep 0.1 diff --git a/examples/common/scripts/vtorc-up.sh b/examples/common/scripts/vtorc-up.sh index 66a826da288..23ca4e62b48 100755 --- a/examples/common/scripts/vtorc-up.sh +++ b/examples/common/scripts/vtorc-up.sh @@ -6,6 +6,7 @@ source "${script_dir}/../env.sh" log_dir="${VTDATAROOT}/tmp" port=16000 +echo "Starting vtorc..." vtorc \ $TOPOLOGY_FLAGS \ --logtostderr \ diff --git a/examples/common/scripts/vttablet-up.sh b/examples/common/scripts/vttablet-up.sh index 90a4239560a..812093666f2 100755 --- a/examples/common/scripts/vttablet-up.sh +++ b/examples/common/scripts/vttablet-up.sh @@ -53,9 +53,10 @@ vttablet \ --grpc_port $grpc_port \ --service_map 'grpc-queryservice,grpc-tabletmanager,grpc-updatestream' \ --pid_file $VTDATAROOT/$tablet_dir/vttablet.pid \ - --vtctld_addr http://$hostname:$vtctld_web_port/ \ --queryserver-config-max-result-size 10000000 \ - --heartbeat_enable --heartbeat_interval=250ms --heartbeat_on_demand_duration=5s \ + --heartbeat_enable \ + --heartbeat_interval=250ms \ + --heartbeat_on_demand_duration=5s \ > $VTDATAROOT/$tablet_dir/vttablet.out 2>&1 & # Block waiting for the tablet to be listening @@ -68,3 +69,5 @@ done # check one last time curl -I "http://$hostname:$port/debug/status" || fail "tablet could not be started!" + +echo -e "vttablet for $alias is running!" diff --git a/examples/compose/README.md b/examples/compose/README.md index 0070697ca9d..8d20f360620 100644 --- a/examples/compose/README.md +++ b/examples/compose/README.md @@ -222,17 +222,7 @@ The vreplication container included performs the following actions; 4. Prints out helpful debug information for you. ``` vitess/examples/compose$ docker-compose logs -f vreplication -vreplication_1 | + /vt/bin/vtctlclient --server vtctld:15999 VReplicationExec local-0000000101 'insert into _vt.vreplication (db_name, source, pos, max_tps, max_replication_lag, tablet_types, time_updated, transaction_timestamp, state) values('\''commerce'\'', '\''keyspace:\"ext_commerce\" shard:\"0\" filter: > on_ddl:EXEC_IGNORE '\'', '\'''\'', 9999, 9999, '\''primary'\'', 0, 0, '\''Running'\'')' -vreplication_1 | + /vt/bin/vtctlclient --server vtctld:15999 VReplicationExec local-0000000101 'select * from _vt.vreplication' -vreplication_1 | +----+----------+--------------------------------+-----+----------+---------+---------------------+------+--------------+--------------+-----------------------+---------+---------+----------+ -vreplication_1 | | id | workflow | source | pos | stop_pos | max_tps | max_replication_lag | cell | tablet_types | time_updated | transaction_timestamp | state | message | db_name | -vreplication_1 | +----+----------+--------------------------------+-----+----------+---------+---------------------+------+--------------+--------------+-----------------------+---------+---------+----------+ -vreplication_1 | | 1 | | keyspace:"ext_commerce" | | | 9999 | 9999 | | primary | 0 | 0 | Running | | commerce | -vreplication_1 | | | | shard:"0" | | | | | | | | | | | | -vreplication_1 | | | | filter: > | | | | | | | | | | | | -vreplication_1 | | | | on_ddl:EXEC_IGNORE | | | | | | | | | | | | -vreplication_1 | +----+----------+--------------------------------+-----+----------+---------+---------------------+------+--------------+--------------+-----------------------+---------+---------+----------+ -compose_vreplication_1 exited with code 0 +... ``` ### Connect to vgate and run queries diff --git a/examples/compose/client.go b/examples/compose/client.go index a4933f21833..8beaef683cd 100644 --- a/examples/compose/client.go +++ b/examples/compose/client.go @@ -42,7 +42,6 @@ var ( func main() { pflag.Parse() - rand.Seed(time.Now().UnixNano()) // Connect to vtgate. db, err := vitessdriver.Open(*server, "@primary") diff --git a/examples/compose/docker-compose.beginners.yml b/examples/compose/docker-compose.beginners.yml index 1afa23529bd..7f35ddbb034 100644 --- a/examples/compose/docker-compose.beginners.yml +++ b/examples/compose/docker-compose.beginners.yml @@ -58,7 +58,7 @@ services: - "3306" vtctld: - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 ports: - "15000:$WEB_PORT" - "$GRPC_PORT" @@ -81,7 +81,7 @@ services: condition: service_healthy vtgate: - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 ports: - "15099:$WEB_PORT" - "$GRPC_PORT" @@ -111,7 +111,7 @@ services: condition: service_healthy schemaload: - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 command: - sh - -c @@ -144,12 +144,12 @@ services: environment: - KEYSPACES=$KEYSPACE - GRPC_PORT=15999 - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 volumes: - .:/script vttablet100: - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 ports: - "15100:$WEB_PORT" - "$GRPC_PORT" @@ -181,7 +181,7 @@ services: retries: 15 vttablet101: - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 ports: - "15101:$WEB_PORT" - "$GRPC_PORT" @@ -213,7 +213,7 @@ services: retries: 15 vttablet102: - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 ports: - "15102:$WEB_PORT" - "$GRPC_PORT" @@ -245,7 +245,7 @@ services: retries: 15 vttablet103: - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 ports: - "15103:$WEB_PORT" - "$GRPC_PORT" @@ -277,7 +277,7 @@ services: retries: 15 vtorc: - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 command: ["sh", "-c", "/script/vtorc-up.sh"] depends_on: - vtctld @@ -307,7 +307,7 @@ services: retries: 15 vreplication: - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 volumes: - ".:/script" environment: diff --git a/examples/compose/docker-compose.yml b/examples/compose/docker-compose.yml index 6692d0a0575..aa71b1ba82c 100644 --- a/examples/compose/docker-compose.yml +++ b/examples/compose/docker-compose.yml @@ -75,7 +75,7 @@ services: - SCHEMA_FILES=lookup_keyspace_schema_file.sql - POST_LOAD_FILE= - EXTERNAL_DB=0 - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 volumes: - .:/script schemaload_test_keyspace: @@ -101,7 +101,7 @@ services: - SCHEMA_FILES=test_keyspace_schema_file.sql - POST_LOAD_FILE= - EXTERNAL_DB=0 - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 volumes: - .:/script set_keyspace_durability_policy: @@ -115,7 +115,7 @@ services: environment: - KEYSPACES=test_keyspace lookup_keyspace - GRPC_PORT=15999 - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 volumes: - .:/script vreplication: @@ -129,7 +129,7 @@ services: - TOPOLOGY_FLAGS=--topo_implementation consul --topo_global_server_address consul1:8500 --topo_global_root vitess/global - EXTERNAL_DB=0 - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 volumes: - .:/script vtctld: @@ -143,7 +143,7 @@ services: depends_on: external_db_host: condition: service_healthy - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 ports: - 15000:8080 - "15999" @@ -160,7 +160,7 @@ services: --normalize_queries=true ' depends_on: - vtctld - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 ports: - 15099:8080 - "15999" @@ -182,7 +182,7 @@ services: - EXTERNAL_DB=0 - DB_USER= - DB_PASS= - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 ports: - 13000:8080 volumes: @@ -217,7 +217,7 @@ services: - CMD-SHELL - curl -s --fail --show-error localhost:8080/debug/health timeout: 10s - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 ports: - 15101:8080 - "15999" @@ -254,7 +254,7 @@ services: - CMD-SHELL - curl -s --fail --show-error localhost:8080/debug/health timeout: 10s - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 ports: - 15102:8080 - "15999" @@ -291,7 +291,7 @@ services: - CMD-SHELL - curl -s --fail --show-error localhost:8080/debug/health timeout: 10s - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 ports: - 15201:8080 - "15999" @@ -328,7 +328,7 @@ services: - CMD-SHELL - curl -s --fail --show-error localhost:8080/debug/health timeout: 10s - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 ports: - 15202:8080 - "15999" @@ -365,7 +365,7 @@ services: - CMD-SHELL - curl -s --fail --show-error localhost:8080/debug/health timeout: 10s - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 ports: - 15301:8080 - "15999" @@ -402,7 +402,7 @@ services: - CMD-SHELL - curl -s --fail --show-error localhost:8080/debug/health timeout: 10s - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 ports: - 15302:8080 - "15999" diff --git a/examples/compose/externaldb_vreplication.sh b/examples/compose/externaldb_vreplication.sh index 1138909aa13..3a5291b18b7 100755 --- a/examples/compose/externaldb_vreplication.sh +++ b/examples/compose/externaldb_vreplication.sh @@ -19,19 +19,19 @@ set -ex VTCTLD_SERVER=${VTCTLD_SERVER:-'vtctld:15999'} # Wait until source and destination primaries are available -until (/vt/bin/vtctlclient --server $VTCTLD_SERVER ListAllTablets | grep "ext_" | grep "primary" ); do +until (/vt/bin/vtctldclient --server $VTCTLD_SERVER GetTablets | grep "ext_" | grep "primary" ); do echo 'waiting for external primary..'; sleep 1; done -until (/vt/bin/vtctlclient --server $VTCTLD_SERVER ListAllTablets | grep -v "ext_" | grep "primary" ); do +until (/vt/bin/vtctldclient --server $VTCTLD_SERVER GetTablets | grep -v "ext_" | grep "primary" ); do echo 'waiting for managed primary..'; sleep 1; done # Get source and destination tablet and shard information -TABLET_INFO=$(/vt/bin/vtctlclient --server $VTCTLD_SERVER ListAllTablets) +TABLET_INFO=$(/vt/bin/vtctldclient --server $VTCTLD_SERVER GetTablets) source_alias=$(echo "$TABLET_INFO "| grep "ext_" | grep "primary" | awk '{ print $1 }') dest_alias=$(echo "$TABLET_INFO "| grep -v "ext_" | grep "primary" | awk '{ print $1 }') source_keyspace=$(echo "$TABLET_INFO "| grep "ext_" | grep "primary" | awk '{ print $2 }') @@ -43,33 +43,27 @@ dest_tablet=$(echo "$TABLET_INFO "| grep -v "ext_" | grep "primary" | awk '{ pri # Disable foreign_key checks on destination -/vt/bin/vtctlclient --server $VTCTLD_SERVER ExecuteFetchAsDba $dest_alias 'SET GLOBAL FOREIGN_KEY_CHECKS=0;' +/vt/bin/vtctldclient --server $VTCTLD_SERVER ExecuteFetchAsDBA $dest_alias 'SET GLOBAL FOREIGN_KEY_CHECKS=0;' # Get source_sql mode -source_sql_mode=$(/vt/bin/vtctlclient --server $VTCTLD_SERVER ExecuteFetchAsDba $source_alias 'SELECT @@GLOBAL.sql_mode' | awk 'NR==4 {print $2}') +source_sql_mode=$(/vt/bin/vtctldclient --server $VTCTLD_SERVER ExecuteFetchAsDBA $source_alias 'SELECT @@GLOBAL.sql_mode' | awk 'NR==4 {print $2}') # Apply source sql_mode to destination # The intention is to avoid replication errors -/vt/bin/vtctlclient --server $VTCTLD_SERVER ExecuteFetchAsDba $dest_alias "SET GLOBAL sql_mode='$source_sql_mode';" +/vt/bin/vtctldclient --server $VTCTLD_SERVER ExecuteFetchAsDBA $dest_alias "SET GLOBAL sql_mode='$source_sql_mode';" # Verify sql_mode matches -[ $source_sql_mode == $(/vt/bin/vtctlclient --server $VTCTLD_SERVER ExecuteFetchAsDba $dest_alias 'SELECT @@GLOBAL.sql_mode' | awk 'NR==4 {print $2}') ] && \ +[ $source_sql_mode == $(/vt/bin/vtctldclient --server $VTCTLD_SERVER ExecuteFetchAsDBA $dest_alias 'SELECT @@GLOBAL.sql_mode' | awk 'NR==4 {print $2}') ] && \ echo "Source and Destination sql_mode Match." || echo "sql_mode MisMatch" -until /vt/bin/vtctlclient --server $VTCTLD_SERVER GetSchema $dest_alias; do +until /vt/bin/vtctldclient --server $VTCTLD_SERVER GetSchema $dest_alias; do echo "Waiting for destination schema to be ready.."; sleep 3; done -# Copy schema from source to destination shard -/vt/bin/vtctlclient --server $VTCTLD_SERVER CopySchemaShard $source_tablet $dest_tablet || true - -# Verify schema -/vt/bin/vtctlclient --server $VTCTLD_SERVER GetSchema $dest_alias - -# Start vreplication -/vt/bin/vtctlclient --server $VTCTLD_SERVER VReplicationExec $dest_alias 'insert into _vt.vreplication (db_name, source, pos, max_tps, max_replication_lag, tablet_types, time_updated, transaction_timestamp, state) values('"'"''"$dest_keyspace"''"'"', '"'"'keyspace:\"'"$source_keyspace"'\" shard:\"'"$source_shard"'\" filter: > on_ddl:EXEC_IGNORE '"'"', '"'"''"'"', 9999, 9999, '"'"'primary'"'"', 0, 0, '"'"'Running'"'"')' +# Start vreplication workflow +/vt/bin/vtctldclient --server $VTCTLD_SERVER MoveTables --workflow ext_commerce2commerce --target-keyspace $dest_keyspace create --source-keyspace $source_keyspace --all-tables # Check vreplication status -/vt/bin/vtctlclient --server $VTCTLD_SERVER VReplicationExec $dest_alias 'select * from _vt.vreplication' +/vt/bin/vtctldclient --server $VTCTLD_SERVER MoveTables --workflow ext_commerce2commerce --target-keyspace $dest_keyspace show diff --git a/examples/compose/lvtctl.sh b/examples/compose/lvtctl.sh index 94d4e236395..0b4f16b70c9 100755 --- a/examples/compose/lvtctl.sh +++ b/examples/compose/lvtctl.sh @@ -20,5 +20,5 @@ if [[ "$OSTYPE" == "msys" ]]; then tty=winpty fi -# This is a convenience script to run vtctlclient against the local example. -exec $tty docker-compose exec ${CS:-vtctld} /vt/bin/vtctlclient --server vtctld:15999 "$@" +# This is a convenience script to run vtctldclient against the local example. +exec $tty docker-compose exec ${CS:-vtctld} /vt/bin/vtctldclient --server vtctld:15999 "$@" diff --git a/examples/compose/schemaload.sh b/examples/compose/schemaload.sh index 0c27dd27026..607c791ce69 100755 --- a/examples/compose/schemaload.sh +++ b/examples/compose/schemaload.sh @@ -26,23 +26,23 @@ sleep $sleeptime if [ ! -f schema_run ]; then while true; do - vtctlclient --server vtctld:$GRPC_PORT GetTablet $targettab && break + vtctldclient --server vtctld:$GRPC_PORT GetTablet $targettab && break sleep 1 done if [ "$external_db" = "0" ]; then for schema_file in $schema_files; do echo "Applying Schema ${schema_file} to ${KEYSPACE}" - vtctlclient --server vtctld:$GRPC_PORT -- ApplySchema --sql-file /script/tables/${schema_file} $KEYSPACE || \ - vtctlclient --server vtctld:$GRPC_PORT -- ApplySchema --sql "$(cat /script/tables/${schema_file})" $KEYSPACE || true + vtctldclient --server vtctld:$GRPC_PORT ApplySchema --sql-file /script/tables/${schema_file} $KEYSPACE || \ + vtctldclient --server vtctld:$GRPC_PORT ApplySchema --sql "$(cat /script/tables/${schema_file})" $KEYSPACE || true done fi echo "Applying VSchema ${vschema_file} to ${KEYSPACE}" - vtctlclient --server vtctld:$GRPC_PORT -- ApplyVSchema --vschema_file /script/${vschema_file} $KEYSPACE || \ - vtctlclient --server vtctld:$GRPC_PORT -- ApplyVSchema --vschema "$(cat /script/${vschema_file})" $KEYSPACE + vtctldclient --server vtctld:$GRPC_PORT ApplyVSchema --vschema-file /script/${vschema_file} $KEYSPACE || \ + vtctldclient --server vtctld:$GRPC_PORT ApplyVSchema --vschema "$(cat /script/${vschema_file})" $KEYSPACE echo "List All Tablets" - vtctlclient --server vtctld:$GRPC_PORT ListAllTablets + vtctldclient --server vtctld:$GRPC_PORT GetTablets if [ -n "$load_file" ]; then # vtgate can take a REALLY long time to come up fully diff --git a/examples/compose/vtcompose/docker-compose.test.yml b/examples/compose/vtcompose/docker-compose.test.yml index 4fdd8363004..ad2995b3a6c 100644 --- a/examples/compose/vtcompose/docker-compose.test.yml +++ b/examples/compose/vtcompose/docker-compose.test.yml @@ -79,7 +79,7 @@ services: - SCHEMA_FILES=test_keyspace_schema_file.sql - POST_LOAD_FILE= - EXTERNAL_DB=0 - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 volumes: - .:/script schemaload_unsharded_keyspace: @@ -103,7 +103,7 @@ services: - SCHEMA_FILES=unsharded_keyspace_schema_file.sql - POST_LOAD_FILE= - EXTERNAL_DB=0 - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 volumes: - .:/script set_keyspace_durability_policy_test_keyspace: @@ -117,7 +117,7 @@ services: environment: - GRPC_PORT=15999 - KEYSPACES=test_keyspace - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 volumes: - .:/script set_keyspace_durability_policy_unsharded_keyspace: @@ -130,7 +130,7 @@ services: environment: - GRPC_PORT=15999 - KEYSPACES=unsharded_keyspace - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 volumes: - .:/script vreplication: @@ -144,7 +144,7 @@ services: - TOPOLOGY_FLAGS=--topo_implementation consul --topo_global_server_address consul1:8500 --topo_global_root vitess/global - EXTERNAL_DB=0 - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 volumes: - .:/script vtctld: @@ -159,7 +159,7 @@ services: depends_on: external_db_host: condition: service_healthy - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 ports: - 15000:8080 - "15999" @@ -176,7 +176,7 @@ services: ''grpc-vtgateservice'' --normalize_queries=true ' depends_on: - vtctld - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 ports: - 15099:8080 - "15999" @@ -199,7 +199,7 @@ services: - EXTERNAL_DB=0 - DB_USER= - DB_PASS= - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 ports: - 13000:8080 volumes: @@ -234,7 +234,7 @@ services: - CMD-SHELL - curl -s --fail --show-error localhost:8080/debug/health timeout: 10s - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 ports: - 15101:8080 - "15999" @@ -271,7 +271,7 @@ services: - CMD-SHELL - curl -s --fail --show-error localhost:8080/debug/health timeout: 10s - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 ports: - 15102:8080 - "15999" @@ -308,7 +308,7 @@ services: - CMD-SHELL - curl -s --fail --show-error localhost:8080/debug/health timeout: 10s - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 ports: - 15201:8080 - "15999" @@ -345,7 +345,7 @@ services: - CMD-SHELL - curl -s --fail --show-error localhost:8080/debug/health timeout: 10s - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 ports: - 15202:8080 - "15999" @@ -382,7 +382,7 @@ services: - CMD-SHELL - curl -s --fail --show-error localhost:8080/debug/health timeout: 10s - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 ports: - 15301:8080 - "15999" diff --git a/examples/compose/vtcompose/vtcompose.go b/examples/compose/vtcompose/vtcompose.go index 2b8ad0f5cf5..b054b6a4d2e 100644 --- a/examples/compose/vtcompose/vtcompose.go +++ b/examples/compose/vtcompose/vtcompose.go @@ -533,8 +533,8 @@ func generateDefaultShard(tabAlias int, shard string, keyspaceData keyspaceInfo, - op: add path: /services/init_shard_primary%[2]d value: - image: vitess/lite:v17.0.2 - command: ["sh", "-c", "/vt/bin/vtctlclient %[5]s InitShardPrimary -force %[4]s/%[3]s %[6]s-%[2]d "] + image: vitess/lite:v18.0.0 + command: ["sh", "-c", "/vt/bin/vtctldclient %[5]s InitShardPrimary --force %[4]s/%[3]s %[6]s-%[2]d "] %[1]s `, dependsOn, aliases[0], shard, keyspaceData.keyspace, opts.topologyFlags, opts.cell) } @@ -565,7 +565,7 @@ func generateExternalPrimary( - op: add path: /services/vttablet%[1]d value: - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 ports: - "15%[1]d:%[3]d" - "%[4]d" @@ -627,7 +627,7 @@ func generateDefaultTablet(tabAlias int, shard, role, keyspace string, dbInfo ex - op: add path: /services/vttablet%[1]d value: - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 ports: - "15%[1]d:%[4]d" - "%[5]d" @@ -665,7 +665,7 @@ func generateVtctld(opts vtOptions) string { - op: add path: /services/vtctld value: - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 ports: - "15000:%[1]d" - "%[2]d" @@ -696,7 +696,7 @@ func generateVtgate(opts vtOptions) string { - op: add path: /services/vtgate value: - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 ports: - "15099:%[1]d" - "%[2]d" @@ -738,7 +738,7 @@ func generateVTOrc(dbInfo externalDbInfo, keyspaceInfoMap map[string]keyspaceInf - op: add path: /services/vtorc value: - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 volumes: - ".:/script" environment: @@ -763,7 +763,7 @@ func generateVreplication(dbInfo externalDbInfo, opts vtOptions) string { - op: add path: /services/vreplication value: - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 volumes: - ".:/script" environment: @@ -791,7 +791,7 @@ func generateSetKeyspaceDurabilityPolicy( - op: add path: /services/set_keyspace_durability_policy_%[3]s value: - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 volumes: - ".:/script" environment: @@ -828,7 +828,7 @@ func generateSchemaload( - op: add path: /services/schemaload_%[7]s value: - image: vitess/lite:v17.0.2 + image: vitess/lite:v18.0.0 volumes: - ".:/script" environment: diff --git a/examples/compose/vttablet-up.sh b/examples/compose/vttablet-up.sh index 4978840866c..eeae54251f0 100755 --- a/examples/compose/vttablet-up.sh +++ b/examples/compose/vttablet-up.sh @@ -108,7 +108,7 @@ sleep $sleeptime # Create the cell # https://vitess.io/blog/2020-04-27-life-of-a-cluster/ -$VTROOT/bin/vtctlclient --server vtctld:$GRPC_PORT -- AddCellInfo --root vitess/$CELL --server_address consul1:8500 $CELL || true +$VTROOT/bin/vtctldclient --server vtctld:$GRPC_PORT AddCellInfo --root vitess/$CELL --server-address consul1:8500 $CELL || true #Populate external db conditional args if [ $tablet_role = "externalprimary" ]; then @@ -154,7 +154,6 @@ exec $VTROOT/bin/vttablet \ --port $web_port \ --grpc_port $grpc_port \ --service_map 'grpc-queryservice,grpc-tabletmanager,grpc-updatestream' \ - --vtctld_addr "http://vtctld:$WEB_PORT/" \ --init_keyspace $keyspace \ --init_shard $shard \ --backup_storage_implementation file \ diff --git a/examples/local/101_initial_cluster.sh b/examples/local/101_initial_cluster.sh index db4917bdac0..e93ba8ed170 100755 --- a/examples/local/101_initial_cluster.sh +++ b/examples/local/101_initial_cluster.sh @@ -31,8 +31,6 @@ SIDECAR_DB_NAME=${SIDECAR_DB_NAME:-"_vt"} # start topo server if [ "${TOPO}" = "zk2" ]; then CELL=zone1 ../common/scripts/zk-up.sh -elif [ "${TOPO}" = "k8s" ]; then - CELL=zone1 ../common/scripts/k3s-up.sh elif [ "${TOPO}" = "consul" ]; then CELL=zone1 ../common/scripts/consul-up.sh else @@ -44,18 +42,29 @@ CELL=zone1 ../common/scripts/vtctld-up.sh if vtctldclient GetKeyspace commerce > /dev/null 2>&1 ; then # Keyspace already exists: we could be running this 101 example on an non-empty VTDATAROOT - vtctldclient --server localhost:15999 SetKeyspaceDurabilityPolicy --durability-policy=semi_sync commerce || fail "Failed to set keyspace durability policy on the commerce keyspace" + vtctldclient SetKeyspaceDurabilityPolicy --durability-policy=semi_sync commerce || fail "Failed to set keyspace durability policy on the commerce keyspace" else # Create the keyspace with the sidecar database name and set the # correct durability policy. Please see the comment above for # more context on using a custom sidecar database name in your # Vitess clusters. - vtctldclient --server localhost:15999 CreateKeyspace --sidecar-db-name="${SIDECAR_DB_NAME}" --durability-policy=semi_sync commerce || fail "Failed to create and configure the commerce keyspace" + vtctldclient CreateKeyspace --sidecar-db-name="${SIDECAR_DB_NAME}" --durability-policy=semi_sync commerce || fail "Failed to create and configure the commerce keyspace" fi +# start mysqlctls for keyspace commerce +# because MySQL takes time to start, we do this in parallel +for i in 100 101 102; do + CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-up.sh & +done + +# without a sleep, we can have below echo happen before the echo of mysqlctl-up.sh +sleep 2 +echo "Waiting for mysqlctls to start..." +wait +echo "mysqlctls are running!" + # start vttablets for keyspace commerce for i in 100 101 102; do - CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-up.sh CELL=zone1 KEYSPACE=commerce TABLET_UID=$i ../common/scripts/vttablet-up.sh done diff --git a/examples/local/202_move_tables.sh b/examples/local/202_move_tables.sh index f385acb12a3..a4a24150973 100755 --- a/examples/local/202_move_tables.sh +++ b/examples/local/202_move_tables.sh @@ -19,4 +19,7 @@ source ../common/env.sh -vtctlclient MoveTables -- --source commerce --tables 'customer,corder' Create customer.commerce2customer +vtctldclient MoveTables --workflow commerce2customer --target-keyspace customer create --source-keyspace commerce --tables "customer,corder" + +# Wait for the workflow to reach the running state. +wait_for_workflow_running customer commerce2customer diff --git a/examples/local/203_switch_reads.sh b/examples/local/203_switch_reads.sh index 4bca7e4e257..a307c583171 100755 --- a/examples/local/203_switch_reads.sh +++ b/examples/local/203_switch_reads.sh @@ -19,4 +19,4 @@ source ../common/env.sh -vtctlclient MoveTables -- --tablet_types=rdonly,replica SwitchTraffic customer.commerce2customer +vtctldclient MoveTables --workflow commerce2customer --target-keyspace customer switchtraffic --tablet-types "rdonly,replica" diff --git a/examples/local/204_switch_writes.sh b/examples/local/204_switch_writes.sh index 743ca1e2512..8305356a1cf 100755 --- a/examples/local/204_switch_writes.sh +++ b/examples/local/204_switch_writes.sh @@ -19,4 +19,4 @@ source ../common/env.sh -vtctlclient MoveTables -- --tablet_types=primary SwitchTraffic customer.commerce2customer +vtctldclient MoveTables --workflow commerce2customer --target-keyspace customer switchtraffic --tablet-types primary diff --git a/examples/local/205_clean_commerce.sh b/examples/local/205_clean_commerce.sh index 5d307a231d3..127437d1d1b 100755 --- a/examples/local/205_clean_commerce.sh +++ b/examples/local/205_clean_commerce.sh @@ -19,5 +19,4 @@ source ../common/env.sh -vtctlclient MoveTables Complete customer.commerce2customer - +vtctldclient MoveTables --workflow commerce2customer --target-keyspace customer complete diff --git a/examples/local/301_customer_sharded.sh b/examples/local/301_customer_sharded.sh index ad80cdd98dd..7d1fac41dce 100755 --- a/examples/local/301_customer_sharded.sh +++ b/examples/local/301_customer_sharded.sh @@ -26,3 +26,4 @@ vtctldclient ApplySchema --sql-file create_commerce_seq.sql commerce || fail "Fa vtctldclient ApplyVSchema --vschema-file vschema_commerce_seq.json commerce || fail "Failed to create vschema sequences in the commerce keyspace" vtctldclient ApplyVSchema --vschema-file vschema_customer_sharded.json customer || fail "Failed to create vschema in sharded customer keyspace" vtctldclient ApplySchema --sql-file create_customer_sharded.sql customer || fail "Failed to create schema in sharded customer keyspace" +vtctldclient ApplySchema --sql-file create_commerce_seq.sql customer || fail "Failed to create schema in sharded customer keyspace" diff --git a/examples/local/303_reshard.sh b/examples/local/303_reshard.sh index ea12987e9ed..5bf36ff7a19 100755 --- a/examples/local/303_reshard.sh +++ b/examples/local/303_reshard.sh @@ -19,4 +19,8 @@ source ../common/env.sh -vtctlclient Reshard -- --source_shards '0' --target_shards '-80,80-' Create customer.cust2cust +vtctldclient Reshard --workflow cust2cust --target-keyspace customer create --source-shards '0' --target-shards '-80,80-' + +# Wait for the workflow to reach the running state. +wait_for_workflow_running customer cust2cust + diff --git a/examples/local/304_switch_reads.sh b/examples/local/304_switch_reads.sh index 52d6093f4ff..5e4edff7f0d 100755 --- a/examples/local/304_switch_reads.sh +++ b/examples/local/304_switch_reads.sh @@ -18,4 +18,4 @@ source ../common/env.sh -vtctlclient Reshard -- --tablet_types=rdonly,replica SwitchTraffic customer.cust2cust +vtctldclient Reshard --workflow cust2cust --target-keyspace customer switchtraffic --tablet-types "rdonly,replica" diff --git a/examples/local/305_switch_writes.sh b/examples/local/305_switch_writes.sh index 9bbc7ed9ea5..c9bd66b92a5 100755 --- a/examples/local/305_switch_writes.sh +++ b/examples/local/305_switch_writes.sh @@ -18,4 +18,5 @@ source ../common/env.sh -vtctlclient Reshard -- --tablet_types=primary SwitchTraffic customer.cust2cust +vtctldclient Reshard --workflow cust2cust --target-keyspace customer switchtraffic --tablet-types "primary" + diff --git a/examples/local/306_down_shard_0.sh b/examples/local/306_down_shard_0.sh index db860b3e23c..5c8332f95bc 100755 --- a/examples/local/306_down_shard_0.sh +++ b/examples/local/306_down_shard_0.sh @@ -17,7 +17,7 @@ source ../common/env.sh -vtctlclient Reshard Complete customer.cust2cust +vtctldclient Reshard --workflow cust2cust --target-keyspace customer complete for i in 200 201 202; do CELL=zone1 TABLET_UID=$i ../common/scripts/vttablet-down.sh diff --git a/examples/local/401_teardown.sh b/examples/local/401_teardown.sh index ab93f453668..8f3e7844c5a 100755 --- a/examples/local/401_teardown.sh +++ b/examples/local/401_teardown.sh @@ -33,8 +33,15 @@ for tablet in 100 200 300 400; do printf -v alias '%s-%010d' 'zone1' $uid echo "Shutting down tablet $alias" CELL=zone1 TABLET_UID=$uid ../common/scripts/vttablet-down.sh - CELL=zone1 TABLET_UID=$uid ../common/scripts/mysqlctl-down.sh + # because MySQL takes time to stop, we do this in parallel + CELL=zone1 TABLET_UID=$uid ../common/scripts/mysqlctl-down.sh & done + + # without a sleep below, we can have the echo happen before the echo of mysqlctl-down.sh + sleep 2 + echo "Waiting mysqlctl to stop..." + wait + echo "mysqlctls are stopped!" fi done @@ -42,8 +49,6 @@ done if [ "${TOPO}" = "zk2" ]; then CELL=zone1 ../common/scripts/zk-down.sh -elif [ "${TOPO}" = "k8s" ]; then - CELL=zone1 ../common/scripts/k3s-down.sh elif [ "${TOPO}" = "consul" ]; then CELL=zone1 ../common/scripts/consul-down.sh else diff --git a/examples/local/README.md b/examples/local/README.md index cb846b7c8b1..233cd3cacf5 100644 --- a/examples/local/README.md +++ b/examples/local/README.md @@ -19,35 +19,38 @@ mysql --table < ../common/select_commerce_data.sql ./201_customer_tablets.sh # Initiate move tables -vtctlclient MoveTables -- --source commerce --tables 'customer,corder' Create customer.commerce2customer +vtctldclient MoveTables --workflow commerce2customer --target-keyspace customer create --source-keyspace commerce --tables "customer,corder" # Validate -vtctlclient VDiff customer.commerce2customer +vtctldclient vdiff --workflow commerce2customer --target-keyspace customer create +vtctldclient vdiff --workflow commerce2customer --target-keyspace customer show last # Cut-over -vtctlclient MoveTables -- --tablet_types=rdonly,replica SwitchTraffic customer.commerce2customer -vtctlclient MoveTables -- --tablet_types=primary SwitchTraffic customer.commerce2customer +vtctldclient MoveTables --workflow commerce2customer --target-keyspace customer switchtraffic --tablet-types "rdonly,replica" +vtctldclient MoveTables --workflow commerce2customer --target-keyspace customer switchtraffic --tablet-types primary # Clean-up -vtctlclient MoveTables Complete customer.commerce2customer +vtctldclient MoveTables --workflow commerce2customer --target-keyspace customer complete # Prepare for resharding ./301_customer_sharded.sh ./302_new_shards.sh # Reshard -vtctlclient Reshard -- --source_shards '0' --target_shards '-80,80-' Create customer.cust2cust +vtctldclient Reshard --workflow cust2cust --target-keyspace customer create --source-shards '0' --target-shards '-80,80-' # Validate -vtctlclient VDiff customer.cust2cust +vtctldclient vdiff --workflow cust2cust --target-keyspace customer create +vtctldclient vdiff --workflow cust2cust --target-keyspace customer show last # Cut-over -vtctlclient Reshard -- --tablet_types=rdonly,replica SwitchTraffic customer.cust2cust -vtctlclient Reshard -- --tablet_types=primary SwitchTraffic customer.cust2cust +vtctldclient Reshard --workflow cust2cust --target-keyspace customer switchtraffic --tablet-types "rdonly,replica" +vtctldclient Reshard --workflow cust2cust --target-keyspace customer switchtraffic --tablet-types primary # Down shard 0 +vtctldclient Reshard --workflow cust2cust --target-keyspace customer complete ./306_down_shard_0.sh -vtctlclient DeleteShard -- --force --recursive customer/0 +vtctldclient DeleteShards --force --recursive customer/0 # Down cluster ./401_teardown.sh diff --git a/examples/local/vschema_customer_sharded.json b/examples/local/vschema_customer_sharded.json index 4163cb7c2f9..a74112e9fc7 100644 --- a/examples/local/vschema_customer_sharded.json +++ b/examples/local/vschema_customer_sharded.json @@ -15,12 +15,9 @@ ], "auto_increment": { "column": "customer_id", - "sequence": "customer_seq" + "sequence": "customer.customer_seq" } }, - "messages": { - "pinned": "00" - }, "corder": { "column_vindexes": [ { @@ -30,8 +27,27 @@ ], "auto_increment": { "column": "order_id", - "sequence": "order_seq" + "sequence": "customer.order_seq" } + }, + "messages": { + "pinned": "00" + }, + "test": { + "column_vindexes": [ + { + "column": "id", + "name": "hash" + } + ] + }, + "customer_seq": { + "type": "sequence", + "pinned": "00" + }, + "order_seq": { + "type": "sequence", + "pinned": "00" } } } diff --git a/examples/operator/101_initial_cluster.yaml b/examples/operator/101_initial_cluster.yaml index 24831bc9e88..d122c27ff05 100644 --- a/examples/operator/101_initial_cluster.yaml +++ b/examples/operator/101_initial_cluster.yaml @@ -8,14 +8,14 @@ metadata: name: example spec: images: - vtctld: vitess/lite:v17.0.2 - vtadmin: vitess/vtadmin:v17.0.2 - vtgate: vitess/lite:v17.0.2 - vttablet: vitess/lite:v17.0.2 - vtbackup: vitess/lite:v17.0.2 - vtorc: vitess/lite:v17.0.2 + vtctld: vitess/lite:v18.0.0 + vtadmin: vitess/vtadmin:v18.0.0 + vtgate: vitess/lite:v18.0.0 + vttablet: vitess/lite:v18.0.0 + vtbackup: vitess/lite:v18.0.0 + vtorc: vitess/lite:v18.0.0 mysqld: - mysql80Compatible: vitess/lite:v17.0.2 + mysql80Compatible: vitess/lite:v18.0.0 mysqldExporter: prom/mysqld-exporter:v0.11.0 cells: - name: zone1 diff --git a/examples/operator/201_customer_tablets.yaml b/examples/operator/201_customer_tablets.yaml index 1f93549147a..b53aa44c292 100644 --- a/examples/operator/201_customer_tablets.yaml +++ b/examples/operator/201_customer_tablets.yaml @@ -4,14 +4,14 @@ metadata: name: example spec: images: - vtctld: vitess/lite:v17.0.2 - vtadmin: vitess/vtadmin:v17.0.2 - vtgate: vitess/lite:v17.0.2 - vttablet: vitess/lite:v17.0.2 - vtbackup: vitess/lite:v17.0.2 - vtorc: vitess/lite:v17.0.2 + vtctld: vitess/lite:v18.0.0 + vtadmin: vitess/vtadmin:v18.0.0 + vtgate: vitess/lite:v18.0.0 + vttablet: vitess/lite:v18.0.0 + vtbackup: vitess/lite:v18.0.0 + vtorc: vitess/lite:v18.0.0 mysqld: - mysql80Compatible: vitess/lite:v17.0.2 + mysql80Compatible: vitess/lite:v18.0.0 mysqldExporter: prom/mysqld-exporter:v0.11.0 cells: - name: zone1 diff --git a/examples/operator/302_new_shards.yaml b/examples/operator/302_new_shards.yaml index 87ba78de105..eefba85cc43 100644 --- a/examples/operator/302_new_shards.yaml +++ b/examples/operator/302_new_shards.yaml @@ -4,14 +4,14 @@ metadata: name: example spec: images: - vtctld: vitess/lite:v17.0.2 - vtadmin: vitess/vtadmin:v17.0.2 - vtgate: vitess/lite:v17.0.2 - vttablet: vitess/lite:v17.0.2 - vtbackup: vitess/lite:v17.0.2 - vtorc: vitess/lite:v17.0.2 + vtctld: vitess/lite:v18.0.0 + vtadmin: vitess/vtadmin:v18.0.0 + vtgate: vitess/lite:v18.0.0 + vttablet: vitess/lite:v18.0.0 + vtbackup: vitess/lite:v18.0.0 + vtorc: vitess/lite:v18.0.0 mysqld: - mysql80Compatible: vitess/lite:v17.0.2 + mysql80Compatible: vitess/lite:v18.0.0 mysqldExporter: prom/mysqld-exporter:v0.11.0 cells: - name: zone1 diff --git a/examples/operator/306_down_shard_0.yaml b/examples/operator/306_down_shard_0.yaml index c9ef0c856e7..090752fc885 100644 --- a/examples/operator/306_down_shard_0.yaml +++ b/examples/operator/306_down_shard_0.yaml @@ -4,14 +4,14 @@ metadata: name: example spec: images: - vtctld: vitess/lite:v17.0.2 - vtadmin: vitess/vtadmin:v17.0.2 - vtgate: vitess/lite:v17.0.2 - vttablet: vitess/lite:v17.0.2 - vtbackup: vitess/lite:v17.0.2 - vtorc: vitess/lite:v17.0.2 + vtctld: vitess/lite:v18.0.0 + vtadmin: vitess/vtadmin:v18.0.0 + vtgate: vitess/lite:v18.0.0 + vttablet: vitess/lite:v18.0.0 + vtbackup: vitess/lite:v18.0.0 + vtorc: vitess/lite:v18.0.0 mysqld: - mysql80Compatible: vitess/lite:v17.0.2 + mysql80Compatible: vitess/lite:v18.0.0 mysqldExporter: prom/mysqld-exporter:v0.11.0 cells: - name: zone1 diff --git a/examples/operator/README.md b/examples/operator/README.md index de2e598b516..9182b25340c 100644 --- a/examples/operator/README.md +++ b/examples/operator/README.md @@ -26,9 +26,9 @@ kubectl apply -f 101_initial_cluster.yaml # VTAdmin's UI will be available at http://localhost:14000/ ./pf.sh & alias mysql="mysql -h 127.0.0.1 -P 15306 -u user" -alias vtctlclient="vtctlclient --server localhost:15999 --alsologtostderr" -vtctlclient ApplySchema -- --sql="$(cat create_commerce_schema.sql)" commerce -vtctlclient ApplyVSchema -- --vschema="$(cat vschema_commerce_initial.json)" commerce +alias vtctldclient="vtctldclient --server localhost:15999 --alsologtostderr" +vtctldclient ApplySchema --sql="$(cat create_commerce_schema.sql)" commerce +vtctldclient ApplyVSchema --vschema="$(cat vschema_commerce_initial.json)" commerce # Insert and verify data mysql < ../common/insert_commerce_data.sql @@ -38,37 +38,39 @@ mysql --table < ../common/select_commerce_data.sql kubectl apply -f 201_customer_tablets.yaml # Initiate move tables -vtctlclient MoveTables -- --source commerce --tables 'customer,corder' Create customer.commerce2customer +vtctldclient MoveTables --workflow commerce2customer --target-keyspace customer create --source-keyspace commerce --tables "customer,corder" # Validate -vtctlclient VDiff customer.commerce2customer +vtctldclient vdiff --workflow commerce2customer --target-keyspace customer create +vtctldclient vdiff --workflow commerce2customer --target-keyspace customer show last # Cut-over -vtctlclient MoveTables -- --tablet_types=rdonly,replica SwitchTraffic customer.commerce2customer -vtctlclient MoveTables -- --tablet_types=primary SwitchTraffic customer.commerce2customer +vtctldclient MoveTables --workflow commerce2customer --target-keyspace customer switchtraffic --tablet-types "rdonly,replica" +vtctldclient MoveTables --workflow commerce2customer --target-keyspace customer switchtraffic --tablet-types primary # Clean-up -vtctlclient MoveTables Complete customer.commerce2customer +vtctldclient MoveTables --workflow commerce2customer --target-keyspace customer complete # Prepare for resharding -vtctlclient ApplySchema -- --sql="$(cat create_commerce_seq.sql)" commerce -vtctlclient ApplyVSchema -- --vschema="$(cat vschema_commerce_seq.json)" commerce -vtctlclient ApplySchema -- --sql="$(cat create_customer_sharded.sql)" customer -vtctlclient ApplyVSchema -- --vschema="$(cat vschema_customer_sharded.json)" customer +vtctldclient ApplySchema --sql="$(cat create_commerce_seq.sql)" commerce +vtctldclient ApplyVSchema --vschema="$(cat vschema_commerce_seq.json)" commerce +vtctldclient ApplySchema --sql="$(cat create_customer_sharded.sql)" customer +vtctldclient ApplyVSchema --vschema="$(cat vschema_customer_sharded.json)" customer kubectl apply -f 302_new_shards.yaml # Reshard -vtctlclient Reshard -- --source_shards '-' --target_shards '-80,80-' Create customer.cust2cust +vtctldclient Reshard --workflow cust2cust --target-keyspace customer create --source-shards '-' --target-shards '-80,80-' # Validate -vtctlclient VDiff customer.cust2cust +vtctldclient vdiff --workflow cust2cust --target-keyspace customer create +vtctldclient vdiff --workflow cust2cust --target-keyspace customer show last # Cut-over -vtctlclient Reshard -- --tablet_types=rdonly,replica SwitchTraffic customer.cust2cust -vtctlclient Reshard -- --tablet_types=primary SwitchTraffic customer.cust2cust +vtctldclient Reshard --workflow cust2cust --target-keyspace customer switchtraffic --tablet-types "rdonly,replica" +vtctldclient Reshard --workflow cust2cust --target-keyspace customer switchtraffic --tablet-types primary # Down shard 0 -vtctlclient Reshard Complete customer.cust2cust +vtctldclient Reshard --workflow cust2cust --target-keyspace customer complete kubectl apply -f 306_down_shard_0.yaml # Down cluster diff --git a/examples/operator/operator.yaml b/examples/operator/operator.yaml index 4c627daed22..3a2b0e66121 100644 --- a/examples/operator/operator.yaml +++ b/examples/operator/operator.yaml @@ -6145,7 +6145,7 @@ spec: fieldPath: metadata.name - name: OPERATOR_NAME value: vitess-operator - image: planetscale/vitess-operator:v2.10.2 + image: planetscale/vitess-operator:v2.11.0 name: vitess-operator resources: limits: diff --git a/examples/operator/pf.sh b/examples/operator/pf.sh index 7d784ea2a33..5af7a429667 100755 --- a/examples/operator/pf.sh +++ b/examples/operator/pf.sh @@ -8,7 +8,6 @@ kubectl port-forward --address localhost "$(kubectl get service --selector="plan process_id3=$! sleep 2 echo "You may point your browser to http://localhost:15000, use the following aliases as shortcuts:" -echo 'alias vtctlclient="vtctlclient --server=localhost:15999 --logtostderr"' echo 'alias vtctldclient="vtctldclient --server=localhost:15999 --logtostderr"' echo 'alias mysql="mysql -h 127.0.0.1 -P 15306 -u user"' echo "Hit Ctrl-C to stop the port forwards" diff --git a/examples/region_sharding/101_initial_cluster.sh b/examples/region_sharding/101_initial_cluster.sh index c2692440189..6dd8989a32f 100755 --- a/examples/region_sharding/101_initial_cluster.sh +++ b/examples/region_sharding/101_initial_cluster.sh @@ -22,8 +22,6 @@ source ../common/env.sh # start topo server if [ "${TOPO}" = "zk2" ]; then CELL=zone1 ../common/scripts/zk-up.sh -elif [ "${TOPO}" = "k8s" ]; then - CELL=zone1 ../common/scripts/k3s-up.sh else CELL=zone1 ../common/scripts/etcd-up.sh fi diff --git a/examples/region_sharding/201_main_sharded.sh b/examples/region_sharding/201_main_sharded.sh index 387f89506db..cb0bb1ff823 100755 --- a/examples/region_sharding/201_main_sharded.sh +++ b/examples/region_sharding/201_main_sharded.sh @@ -20,14 +20,14 @@ source ../common/env.sh vtctldclient ApplyVSchema --vschema-file main_vschema_sharded.json main || fail "Failed to apply vschema for the sharded main keyspace" # optional: create the schema needed for lookup vindex -#vtctlclient ApplySchema --sql-file create_lookup_schema.sql main +#vtctldclient ApplySchema --sql-file create_lookup_schema.sql main # create the lookup vindex -vtctlclient CreateLookupVindex -- --tablet_types=PRIMARY main "$(cat lookup_vindex.json)" || fail "Failed to create lookup vindex in main keyspace" +vtctldclient LookupVindex --name customer_region_lookup --table-keyspace main create --keyspace main --type consistent_lookup_unique --table-owner customer --table-owner-columns=id --tablet-types=PRIMARY || fail "Failed to create lookup vindex in main keyspace" # we have to wait for replication to catch up # Can see on vttablet status page Vreplication that copy is complete sleep 5 # externalize vindex -vtctlclient ExternalizeVindex main.customer_region_lookup || fail "Failed to externalize customer_region_lookup vindex in the main keyspace" +vtctldclient LookupVindex --name customer_region_lookup --table-keyspace main externalize --keyspace main || fail "Failed to externalize customer_region_lookup vindex in the main keyspace" diff --git a/examples/region_sharding/203_reshard.sh b/examples/region_sharding/203_reshard.sh index aaa448a135d..753b5947623 100755 --- a/examples/region_sharding/203_reshard.sh +++ b/examples/region_sharding/203_reshard.sh @@ -16,4 +16,4 @@ source ../common/env.sh -vtctlclient Reshard -- --source_shards '0' --target_shards '-40,40-80,80-c0,c0-' --tablet_types=PRIMARY Create main.main2regions +vtctldclient reshard --workflow main2regions --target-keyspace main create --source-shards '0' --target-shards '-40,40-80,80-c0,c0-' --tablet-types=PRIMARY diff --git a/examples/region_sharding/204_switch_reads.sh b/examples/region_sharding/204_switch_reads.sh index 20703938199..570d5f60f9c 100755 --- a/examples/region_sharding/204_switch_reads.sh +++ b/examples/region_sharding/204_switch_reads.sh @@ -18,4 +18,4 @@ source ../common/env.sh -vtctlclient Reshard -- --tablet_types=rdonly,replica SwitchTraffic main.main2regions +vtctldclient reshard --workflow main2regions --target-keyspace main SwitchTraffic --tablet-types=rdonly,replica diff --git a/examples/region_sharding/205_switch_writes.sh b/examples/region_sharding/205_switch_writes.sh index ad0d8ee51d2..981aa016d56 100755 --- a/examples/region_sharding/205_switch_writes.sh +++ b/examples/region_sharding/205_switch_writes.sh @@ -18,7 +18,7 @@ source ../common/env.sh -vtctlclient Reshard -- --tablet_types=primary SwitchTraffic main.main2regions +vtctldclient reshard --workflow main2regions --target-keyspace main SwitchTraffic --tablet-types=primary # to go back to unsharded # call Reshard ReverseTraffic with all tablet types diff --git a/examples/region_sharding/301_teardown.sh b/examples/region_sharding/301_teardown.sh index 25f3bb259f2..ee86772a4f2 100755 --- a/examples/region_sharding/301_teardown.sh +++ b/examples/region_sharding/301_teardown.sh @@ -26,7 +26,7 @@ source ../common/env.sh ../common/scripts/vtgate-down.sh for tablet in 100 200 300 400 500; do - if vtctlclient --server localhost:15999 GetTablet zone1-$tablet >/dev/null 2>&1; then + if vtctldclient --server localhost:15999 GetTablet zone1-$tablet >/dev/null 2>&1; then printf -v alias '%s-%010d' 'zone1' $tablet echo "Shutting down tablet $alias" CELL=zone1 TABLET_UID=$tablet ../common/scripts/vttablet-down.sh @@ -38,8 +38,6 @@ done if [ "${TOPO}" = "zk2" ]; then CELL=zone1 ../common/scripts/zk-down.sh -elif [ "${TOPO}" = "k8s" ]; then - CELL=zone1 ../common/scripts/k3s-down.sh else CELL=zone1 ../common/scripts/etcd-down.sh fi diff --git a/go.mod b/go.mod index e794bb447e8..23505f06ea4 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module vitess.io/vitess -go 1.20 +go 1.21 require ( cloud.google.com/go/storage v1.29.0 @@ -9,10 +9,9 @@ require ( github.com/Azure/azure-storage-blob-go v0.15.0 github.com/DataDog/datadog-go v4.8.3+incompatible github.com/HdrHistogram/hdrhistogram-go v0.9.0 // indirect - github.com/PuerkitoBio/goquery v1.5.1 github.com/aquarapid/vaultlib v0.5.1 github.com/armon/go-metrics v0.4.1 // indirect - github.com/aws/aws-sdk-go v1.44.192 + github.com/aws/aws-sdk-go v1.44.258 github.com/buger/jsonparser v1.1.1 github.com/cespare/xxhash/v2 v2.2.0 github.com/corpix/uarand v0.1.1 // indirect @@ -21,7 +20,7 @@ require ( github.com/fsnotify/fsnotify v1.6.0 github.com/go-sql-driver/mysql v1.7.0 github.com/golang/glog v1.0.0 - github.com/golang/protobuf v1.5.2 + github.com/golang/protobuf v1.5.3 github.com/golang/snappy v0.0.4 github.com/google/go-cmp v0.5.9 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 @@ -30,11 +29,10 @@ require ( github.com/gorilla/mux v1.8.0 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/hashicorp/consul/api v1.18.0 + github.com/hashicorp/consul/api v1.20.0 github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/serf v0.10.1 // indirect github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428 - github.com/imdario/mergo v0.3.13 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/klauspost/compress v1.16.5 github.com/klauspost/pgzip v1.2.5 @@ -52,16 +50,16 @@ require ( github.com/pires/go-proxyproto v0.6.2 github.com/pkg/errors v0.9.1 github.com/planetscale/pargzip v0.0.0-20201116224723-90c7fc03ea8a - github.com/planetscale/vtprotobuf v0.4.0 - github.com/prometheus/client_golang v1.14.0 - github.com/prometheus/common v0.39.0 // indirect + github.com/planetscale/vtprotobuf v0.5.0 + github.com/prometheus/client_golang v1.15.1 + github.com/prometheus/common v0.43.0 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 github.com/sjmudd/stopwatch v0.1.1 github.com/soheilhy/cmux v0.1.5 github.com/spf13/cobra v1.6.1 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.15.0 - github.com/stretchr/testify v1.8.1 + github.com/stretchr/testify v1.8.2 github.com/tchap/go-patricia v2.3.0+incompatible github.com/tidwall/gjson v1.12.1 github.com/tinylib/msgp v1.1.8 // indirect @@ -69,148 +67,126 @@ require ( github.com/uber/jaeger-lib v2.4.1+incompatible // indirect github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 github.com/z-division/go-zookeeper v1.0.0 - go.etcd.io/etcd/api/v3 v3.5.7 - go.etcd.io/etcd/client/pkg/v3 v3.5.7 - go.etcd.io/etcd/client/v3 v3.5.7 + go.etcd.io/etcd/api/v3 v3.5.8 + go.etcd.io/etcd/client/pkg/v3 v3.5.8 + go.etcd.io/etcd/client/v3 v3.5.8 go.uber.org/mock v0.2.0 - golang.org/x/crypto v0.12.0 // indirect + golang.org/x/crypto v0.14.0 // indirect golang.org/x/mod v0.12.0 // indirect - golang.org/x/net v0.14.0 - golang.org/x/oauth2 v0.4.0 - golang.org/x/sys v0.11.0 // indirect - golang.org/x/term v0.11.0 - golang.org/x/text v0.12.0 + golang.org/x/net v0.17.0 + golang.org/x/oauth2 v0.7.0 + golang.org/x/sys v0.13.0 + golang.org/x/term v0.13.0 + golang.org/x/text v0.13.0 golang.org/x/time v0.3.0 - golang.org/x/tools v0.12.0 - google.golang.org/api v0.109.0 - google.golang.org/genproto v0.0.0-20230131230820-1c016267d619 // indirect - google.golang.org/grpc v1.52.3 - google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 + golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 + google.golang.org/api v0.121.0 + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + google.golang.org/grpc v1.55.0-dev + google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 google.golang.org/grpc/examples v0.0.0-20210430044426-28078834f35b - google.golang.org/protobuf v1.28.1 - gopkg.in/DataDog/dd-trace-go.v1 v1.47.0 + google.golang.org/protobuf v1.30.0 + gopkg.in/DataDog/dd-trace-go.v1 v1.50.1 gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect - gopkg.in/gcfg.v1 v1.2.3 gopkg.in/ldap.v2 v2.5.1 - gopkg.in/warnings.v0 v0.1.2 // indirect gotest.tools v2.2.0+incompatible - k8s.io/apiextensions-apiserver v0.18.19 - k8s.io/apimachinery v0.26.1 - k8s.io/client-go v0.26.1 - k8s.io/code-generator v0.26.1 sigs.k8s.io/yaml v1.3.0 ) require ( github.com/Shopify/toxiproxy/v2 v2.5.0 github.com/bndr/gotabulate v1.1.2 - github.com/golang/mock v1.6.0 + github.com/gammazero/deque v0.2.1 github.com/google/safehtml v0.1.0 github.com/hashicorp/go-version v1.6.0 github.com/kr/pretty v0.3.1 github.com/kr/text v0.2.0 github.com/mitchellh/mapstructure v1.5.0 github.com/nsf/jsondiff v0.0.0-20210926074059-1e845ec5d249 + github.com/spf13/afero v1.9.3 github.com/spf13/jwalterweatherman v1.1.0 github.com/xlab/treeprint v1.2.0 + go.uber.org/goleak v1.2.1 golang.org/x/exp v0.0.0-20230131160201-f062dba9d201 golang.org/x/sync v0.3.0 - k8s.io/utils v0.0.0-20230115233650-391b47cb4029 modernc.org/sqlite v1.20.3 ) require ( - cloud.google.com/go v0.109.0 // indirect - cloud.google.com/go/compute v1.18.0 // indirect + cloud.google.com/go v0.110.0 // indirect + cloud.google.com/go/compute v1.19.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v0.10.0 // indirect - github.com/DataDog/datadog-agent/pkg/obfuscate v0.42.0 // indirect - github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.42.0 // indirect + cloud.google.com/go/iam v0.13.0 // indirect + github.com/DataDog/appsec-internal-go v1.0.0 // indirect + github.com/DataDog/datadog-agent/pkg/obfuscate v0.43.1 // indirect + github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.45.0-rc.1 // indirect github.com/DataDog/datadog-go/v5 v5.2.0 // indirect + github.com/DataDog/go-libddwaf v1.1.0 // indirect github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork // indirect github.com/DataDog/sketches-go v1.4.1 // indirect github.com/Microsoft/go-winio v0.6.0 // indirect - github.com/andybalholm/cascadia v1.1.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect - github.com/cyphar/filepath-securejoin v0.2.3 // indirect + github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/dgraph-io/ristretto v0.1.1 // indirect github.com/dustin/go-humanize v1.0.1 // indirect - github.com/emicklei/go-restful/v3 v3.10.1 // indirect - github.com/fatih/color v1.14.1 // indirect + github.com/fatih/color v1.15.0 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect - github.com/go-logr/logr v1.2.3 // indirect - github.com/go-openapi/jsonpointer v0.19.6 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.22.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/google/gnostic v0.6.9 // indirect - github.com/google/gofuzz v1.2.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.1 // indirect - github.com/googleapis/gax-go/v2 v2.7.0 // indirect + github.com/google/btree v1.0.1 // indirect + github.com/google/s2a-go v0.1.3 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect + github.com/googleapis/gax-go/v2 v2.8.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-hclog v1.4.0 // indirect + github.com/hashicorp/go-hclog v1.5.0 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/josharian/intern v1.0.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect - github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-ieproxy v0.0.9 // indirect - github.com/mattn/go-isatty v0.0.17 // indirect + github.com/mattn/go-ieproxy v0.0.10 // indirect + github.com/mattn/go-isatty v0.0.18 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/pelletier/go-toml/v2 v2.0.6 // indirect + github.com/onsi/gomega v1.23.0 // indirect + github.com/outcaste-io/ristretto v0.2.1 // indirect + github.com/pelletier/go-toml/v2 v2.0.7 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect - github.com/rivo/uniseg v0.4.3 // indirect - github.com/rogpeppe/go-internal v1.9.0 // indirect + github.com/rivo/uniseg v0.4.4 // indirect + github.com/rogpeppe/go-internal v1.10.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect - github.com/spf13/afero v1.9.3 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.5.0 // indirect github.com/spf13/cast v1.5.0 // indirect github.com/subosito/gotenv v1.4.2 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.0 // indirect go.opencensus.io v0.24.0 // indirect - go.uber.org/atomic v1.10.0 // indirect - go.uber.org/multierr v1.9.0 // indirect - go.uber.org/zap v1.23.0 // indirect - go4.org/intern v0.0.0-20220617035311-6925f38cc365 // indirect - go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.24.0 // indirect + go4.org/intern v0.0.0-20230205224052-192e9f60865c // indirect + go4.org/unsafe/assume-no-moving-gc v0.0.0-20230426161633-7e06285ff160 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/appengine v1.6.7 // indirect - gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect inet.af/netaddr v0.0.0-20220811202034-502d2d690317 // indirect - k8s.io/api v0.26.1 // indirect - k8s.io/gengo v0.0.0-20221011193443-fad74ee6edd9 // indirect - k8s.io/klog/v2 v2.90.0 // indirect - k8s.io/kube-openapi v0.0.0-20230202010329-39b3636cbaa3 // indirect lukechampine.com/uint128 v1.2.0 // indirect modernc.org/cc/v3 v3.40.0 // indirect modernc.org/ccgo/v3 v3.16.13 // indirect - modernc.org/libc v1.22.2 // indirect + modernc.org/libc v1.22.5 // indirect modernc.org/mathutil v1.5.0 // indirect modernc.org/memory v1.5.0 // indirect modernc.org/opt v0.1.3 // indirect modernc.org/strutil v1.1.3 // indirect modernc.org/token v1.1.0 // indirect - sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect ) diff --git a/go.sum b/go.sum index 2edff808e6c..baeeda160ff 100644 --- a/go.sum +++ b/go.sum @@ -17,23 +17,24 @@ cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHOb cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go v0.109.0 h1:38CZoKGlCnPZjGdyj0ZfpoGae0/wgNfy5F0byyxg0Gk= -cloud.google.com/go v0.109.0/go.mod h1:2sYycXt75t/CSB5R9M2wPU1tJmire7AQZTPtITcGBVE= +cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY= -cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ= +cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/iam v0.10.0 h1:fpP/gByFs6US1ma53v7VxhvbJpO2Aapng6wabJ99MuI= -cloud.google.com/go/iam v0.10.0/go.mod h1:nXAECrMt2qHpF6RZUZseteD6QyanL68reN4OXPw0UWM= -cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs= +cloud.google.com/go/iam v0.13.0 h1:+CmB+K0J/33d0zSQ9SlFWUeCCEn5XJA0ZMZ3pHE9u8k= +cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -53,40 +54,37 @@ github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVt github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-storage-blob-go v0.15.0 h1:rXtgp8tN1p29GvpGgfJetavIG0V7OgcSXPpwp3tx6qk= github.com/Azure/azure-storage-blob-go v0.15.0/go.mod h1:vbjsVbX0dlxnRc4FFMPsS9BsJWPcne7GB7onqlPvz58= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/datadog-agent/pkg/obfuscate v0.42.0 h1:p9uCmbyi4gEbJAOLoT/GjIAQMGe3velLmiC3mMgSIy4= -github.com/DataDog/datadog-agent/pkg/obfuscate v0.42.0/go.mod h1:7Bsrm5U8/B+B8dffT3t733tDvdCr7upqIPSVuDqJ0Mw= -github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.42.0 h1:b/RFr5T6HcEOKoXfKFOqZf33hsUbvskY1F5LDld7HCI= -github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.42.0/go.mod h1:VVMDDibJxYEkwcLdZBT2g8EHKpbMT4JdOhRbQ9GdjbM= +github.com/DataDog/appsec-internal-go v1.0.0 h1:2u5IkF4DBj3KVeQn5Vg2vjPUtt513zxEYglcqnd500U= +github.com/DataDog/appsec-internal-go v1.0.0/go.mod h1:+Y+4klVWKPOnZx6XESG7QHydOaUGEXyH2j/vSg9JiNM= +github.com/DataDog/datadog-agent/pkg/obfuscate v0.43.1 h1:HG4dOM6Ou+zZsaKC++4kpM9VGJ/TYo9X61LPz2mmjDE= +github.com/DataDog/datadog-agent/pkg/obfuscate v0.43.1/go.mod h1:o+rJy3B2o+Zb+wCgLSkMlkD7EiUEA5Q63cid53fZkQY= +github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.45.0-rc.1 h1:0OK84DbAucLUwoDYoBFve1cuhDWtoquruVVDjgucYlI= +github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.45.0-rc.1/go.mod h1:VVMDDibJxYEkwcLdZBT2g8EHKpbMT4JdOhRbQ9GdjbM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go v4.8.3+incompatible h1:fNGaYSuObuQb5nzeTQqowRAd9bpDIRRV4/gUtIBjh8Q= github.com/DataDog/datadog-go v4.8.3+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go/v5 v5.1.1/go.mod h1:KhiYb2Badlv9/rofz+OznKoEF5XKTonWyhx5K83AP8E= github.com/DataDog/datadog-go/v5 v5.2.0 h1:kSptqUGSNK67DgA+By3rwtFnAh6pTBxJ7Hn8JCLZcKY= github.com/DataDog/datadog-go/v5 v5.2.0/go.mod h1:XRDJk1pTc00gm+ZDiBKsjh7oOOtJfYfglVCmFb8C2+Q= +github.com/DataDog/go-libddwaf v1.1.0 h1:PhlI/31yxu88JEgTYqxffhd8oM4KQMfNWUVyICqIDMY= +github.com/DataDog/go-libddwaf v1.1.0/go.mod h1:DI5y8obPajk+Tvy2o+nZc2g/5Ria/Rfq5/624k7pHpE= github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork h1:yBq5PrAtrM4yVeSzQ+bn050+Ysp++RKF1QmtkL4VqvU= github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork/go.mod h1:yA5JwkZsHTLuqq3zaRgUQf35DfDkpOZqgtBqHKpwrBs= +github.com/DataDog/gostackparse v0.5.0 h1:jb72P6GFHPHz2W0onsN51cS3FkaMDcjb0QzgxxA4gDk= +github.com/DataDog/gostackparse v0.5.0/go.mod h1:lTfqcJKqS9KnXQGnyQMCugq3u1FP6UZMfWR0aitKFMM= github.com/DataDog/sketches-go v1.4.1 h1:j5G6as+9FASM2qC36lvpvQAj9qsv/jUs3FtO8CwZNAY= github.com/DataDog/sketches-go v1.4.1/go.mod h1:xJIXldczJyyjnbDop7ZZcLxJdV3+7Kra7H1KMgpgkLk= github.com/HdrHistogram/hdrhistogram-go v0.9.0 h1:dpujRju0R4M/QZzcnR1LH1qm+TVG3UzkWdp5tH1WMcg= @@ -98,40 +96,23 @@ github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpz github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/goquery v1.5.1 h1:PSPBGne8NIUWw+/7vFBV+kG2J/5MOjbzc7154OaKCSE= -github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/toxiproxy/v2 v2.5.0 h1:i4LPT+qrSlKNtQf5QliVjdP08GyAH8+BUIc9gT0eahc= github.com/Shopify/toxiproxy/v2 v2.5.0/go.mod h1:yhM2epWtAmel9CB8r2+L+PCmhH6yH2pITaPAo7jxJl0= -github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= -github.com/andybalholm/cascadia v1.1.0 h1:BuuO6sSfQNFRu1LppgbD25Hr2vLYW25JvxHs5zzsLTo= -github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/aquarapid/vaultlib v0.5.1 h1:vuLWR6bZzLHybjJBSUYPgZlIp6KZ+SXeHLRRYTuk6d4= github.com/aquarapid/vaultlib v0.5.1/go.mod h1:yT7AlEXtuabkxylOc/+Ulyp18tff1+QjgNLTnFWTlOs= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go v1.44.192 h1:KL54vCxRd5v5XBGjnF3FelzXXwl+aWHDmDTihFmRNgM= -github.com/aws/aws-sdk-go v1.44.192/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.258 h1:JVk1lgpsTnb1kvUw3eGhPLcTpEBp6HeSf1fxcYDs2Ho= +github.com/aws/aws-sdk-go v1.44.258/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -139,13 +120,11 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/bndr/gotabulate v1.1.2 h1:yC9izuZEphojb9r+KYL4W9IJKO/ceIO8HDwxMA24U4c= github.com/bndr/gotabulate v1.1.2/go.mod h1:0+8yUgaPTtLRTjf49E8oju7ojpU11YmXyvq1LbPAb3U= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -159,91 +138,62 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= github.com/codegangsta/cli v1.20.0/go.mod h1:/qJNoX69yVSKu5o4jLyXAENLRyk1uhi7zkbQ3slBdOA= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/corpix/uarand v0.1.1 h1:RMr1TWc9F4n5jiPDzFHtmaUXLKLNUFK0SgCLo4BhX/U= github.com/corpix/uarand v0.1.1/go.mod h1:SFKZvkcRoLqVRFZ4u25xPmp6m9ktANfbpXZ7SJ0/FNU= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= -github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/dave/jennifer v1.6.0 h1:MQ/6emI2xM7wt0tJzJzyUik2Q3Tcn2eE0vtYgh4GPVI= github.com/dave/jennifer v1.6.0/go.mod h1:AxTG893FiZKqxy3FP1kL80VMshSMuz2G+EgvszgGRnk= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= -github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= -github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/dvyukov/go-fuzz v0.0.0-20210103155950-6a8e9d1f2415/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= -github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= -github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= github.com/flynn/go-docopt v0.0.0-20140912013429-f6dd2ebbb31e/go.mod h1:HyVoz1Mz5Co8TFO8EupIdlcpwShBmY98dkT2xeHkvEI= github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gammazero/deque v0.2.1 h1:qSdsbG6pgp6nL7A0+K/B7s12mcCY/5l5SIUpMOl+dC0= +github.com/gammazero/deque v0.2.1/go.mod h1:LFroj8x4cMYCukHJDbxFCkT+r9AndaJnFMuZDV34tuU= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -251,74 +201,17 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= -github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= -github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= -github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= -github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= -github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= -github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= -github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= -github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= -github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= -github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc= github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -331,9 +224,7 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -349,15 +240,15 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= -github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -369,19 +260,17 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -394,56 +283,48 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbuBVKCudVG457BR2GZFIz3uw3hQ= +github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.3 h1:FAgZmpLl/SXurPEZyCMPBIiiYeTbqfjlbdnCNTAkbGE= +github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/safehtml v0.1.0 h1:EwLKo8qawTKfsi0orxcQAZzu07cICaBeFMegAU9eaT8= github.com/google/safehtml v0.1.0/go.mod h1:L4KWwDsUJdECRAEpZoBn3O64bQaywRscowZjJAzjHnU= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.2.1 h1:RY7tHKZcRlk788d5WSo/e83gOyyy742E8GSs771ySpg= -github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ= -github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gax-go/v2 v2.8.0 h1:UBtEZqx1bjXtOQ5BVTkuYghXrr3N4V123VKJK67vJZc= +github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= -github.com/hashicorp/consul/api v1.18.0 h1:R7PPNzTCeN6VuQNDwwhZWJvzCtGSrNpJqfb22h3yH9g= -github.com/hashicorp/consul/api v1.18.0/go.mod h1:owRRGJ9M5xReDC5nfT8FTJrNAPbT4NM6p/k+d03q2v4= -github.com/hashicorp/consul/sdk v0.13.0 h1:lce3nFlpv8humJL8rNrrGHYSKc3q+Kxfeg3Ii1m6ZWU= -github.com/hashicorp/consul/sdk v0.13.0/go.mod h1:0hs/l5fOVhJy/VdcoaNqUSi2AUs95eF5WKtv+EYIQqE= +github.com/hashicorp/consul/api v1.20.0 h1:9IHTjNVSZ7MIwjlW3N3a7iGiykCMDpxZu8jsxFJh0yc= +github.com/hashicorp/consul/api v1.20.0/go.mod h1:nR64eD44KQ59Of/ECwt2vUmIK2DKsDzAwTmwmLl8Wpo= +github.com/hashicorp/consul/sdk v0.13.1 h1:EygWVWWMczTzXGpO93awkHFzfUka6hLYJ0qhETd+6lY= +github.com/hashicorp/consul/sdk v0.13.1/go.mod h1:SW/mM4LbKfqmMvcFu8v+eiQQ7oitXEFeiBe9StxERb0= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.4.0 h1:ctuWFGrhFha8BnnzxqeRGidlEcQkDyL5u8J8t5eA11I= -github.com/hashicorp/go-hclog v1.4.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= @@ -452,6 +333,7 @@ github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iP github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= @@ -463,7 +345,6 @@ github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -483,10 +364,6 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428 h1:Mo9W14pwbO9VfRe+ygqZ8dFbPpoIK1HFrG/zjTuQ+nc= github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428/go.mod h1:uhpZMVGznybq1itEKXj6RYw9I71qK4kH+OGMjRC4KEo= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= -github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= @@ -494,24 +371,14 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= @@ -522,28 +389,17 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/krishicks/yaml-patch v0.0.10 h1:H4FcHpnNwVmw8u0MjPRjWyIXtco6zM2F78t+57oNM3E= github.com/krishicks/yaml-patch v0.0.10/go.mod h1:Sm5TchwZS6sm7RJoyg87tzxm2ZcKzdRE4Q7TjNhPrME= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= @@ -552,19 +408,16 @@ github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= -github.com/mattn/go-ieproxy v0.0.9 h1:RvVbLiMv/Hbjf1gRaC2AQyzwbdVhdId7D2vPnXIml4k= -github.com/mattn/go-ieproxy v0.0.9/go.mod h1:eF30/rfdQUO9EnzNIZQr0r9HiLMlZNCpJkHbmMuOAE0= +github.com/mattn/go-ieproxy v0.0.10 h1:P+2QihaKCLgbs/32dhFLbxXlqsy8tIG1LUXHIoPaQPo= +github.com/mattn/go-ieproxy v0.0.10/go.mod h1:/NsJd+kxZBmjMc5hrJCKMbP57B84rvq9BiDRbtO9AS0= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= +github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= @@ -578,72 +431,54 @@ github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/minio/minio-go v0.0.0-20190131015406-c8a261de75c1 h1:jw16EimP5oAEM/2wt+SiEUov/YDyTCTDuPtIKgQIvk0= github.com/minio/minio-go v0.0.0-20190131015406-c8a261de75c1/go.mod h1:vuvdOZLJuf5HmJAJrKV64MmozrSsk+or0PB5dzdfspg= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/montanaflynn/stats v0.7.0 h1:r3y12KyNxj/Sb/iOE46ws+3mS1+MZca1wlHQFPsY/JU= github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/ngdinhtoan/glide-cleanup v0.2.0/go.mod h1:UQzsmiDOb8YV3nOsCxK/c9zPpCZVNoHScRE3EO9pVMM= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nsf/jsondiff v0.0.0-20210926074059-1e845ec5d249 h1:NHrXEjTNQY7P0Zfx1aMrNhpgxHmow66XQtm0aQLY0AE= github.com/nsf/jsondiff v0.0.0-20210926074059-1e845ec5d249/go.mod h1:mpRZBD8SJ55OIICQ3iWH0Yz3cjzA61JdqMLoWXeB2+8= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys= +github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e h1:4cPxUYdgaGzZIT5/j0IfqOrrXmq6bG8AwvwisMXpdrg= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/outcaste-io/ristretto v0.2.0/go.mod h1:iBZA7RCt6jaOr0z6hiBQ6t662/oZ6Gx/yauuPvIWHAI= +github.com/outcaste-io/ristretto v0.2.1 h1:KCItuNIGJZcursqHr3ghO7fc5ddZLEHspL9UR0cQM64= +github.com/outcaste-io/ristretto v0.2.1/go.mod h1:W8HywhmtlopSB1jeMg3JtdIhf+DYkLAr0VN/s4+MHac= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU= -github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pelletier/go-toml/v2 v2.0.7 h1:muncTPStnKRos5dpVKULv2FVd4bMOhNePj9CjgDb8Us= +github.com/pelletier/go-toml/v2 v2.0.7/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw= github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0= github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= @@ -658,28 +493,27 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/planetscale/pargzip v0.0.0-20201116224723-90c7fc03ea8a h1:y0OpQ4+5tKxeh9+H+2cVgASl9yMZYV9CILinKOiKafA= github.com/planetscale/pargzip v0.0.0-20201116224723-90c7fc03ea8a/go.mod h1:GJFUzQuXIoB2Kjn1ZfDhJr/42D5nWOqRcIQVgCxTuIE= -github.com/planetscale/vtprotobuf v0.4.0 h1:NEI+g4woRaAZgeZ3sAvbtyvMBRjIv5kE7EWYQ8m4JwY= -github.com/planetscale/vtprotobuf v0.4.0/go.mod h1:wm1N3qk9G/4+VM1WhpkLbvY/d8+0PbwYYpP5P5VhTks= +github.com/planetscale/vtprotobuf v0.5.0 h1:l8PXm6Colok5z6qQLNhAj2Jq5BfoMTIHxLER5a6nDqM= +github.com/planetscale/vtprotobuf v0.5.0/go.mod h1:wm1N3qk9G/4+VM1WhpkLbvY/d8+0PbwYYpP5P5VhTks= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= -github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI= +github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI= -github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= +github.com/prometheus/common v0.43.0 h1:iq+BVjvYLei5f27wiuNiB1DN6DYQkp1c8Bx0Vykh5us= +github.com/prometheus/common v0.43.0/go.mod h1:NCvr5cQIh3Y/gy73/RdVtC9r8xxrxwJnB+2lB3BxrFc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= @@ -690,25 +524,24 @@ github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqn github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052 h1:Qp27Idfgi6ACvFQat5+VJvlYToylpM/hcyLBI3WaKPA= +github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052/go.mod h1:uvX/8buq8uVeiZiFht+0lqSLBHF+uGV8BrTv8W/SIwk= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw= -github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= +github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/secure-systems-lab/go-securesystemslib v0.3.1/go.mod h1:o8hhjkbNl2gOamKUA/eNW3xUrntHT9L4W89W1nfj43U= -github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE= -github.com/secure-systems-lab/go-securesystemslib v0.4.0/go.mod h1:FGBZgq2tXWICsxWQW1msNf49F0Pf2Op5Htayx335Qbs= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/secure-systems-lab/go-securesystemslib v0.5.0 h1:oTiNu0QnulMQgN/hLK124wJD/r2f9ZhIUuKIeBsCBT8= +github.com/secure-systems-lab/go-securesystemslib v0.5.0/go.mod h1:uoCqUC0Ap7jrBSEanxT+SdACYJTVplRXWLkGMuDjXqk= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= @@ -716,36 +549,24 @@ github.com/sjmudd/stopwatch v0.1.1 h1:x45OvxFB5OtCkjvYtzRF5fWB857Jzjjk84Oyd5C5eb github.com/sjmudd/stopwatch v0.1.1/go.mod h1:BLw0oIQJ1YLXBO/q9ufK/SgnKBVIkC2qrm6uy78Zw6U= github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk= github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -758,8 +579,9 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= @@ -769,27 +591,17 @@ github.com/tidwall/gjson v1.12.1 h1:ikuZsLdhr8Ws0IdROXUS1Gi4v9Z4pGqpX/CvJkxvfpo= github.com/tidwall/gjson v1.12.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0= github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 h1:BHyfKlQyqbsFN5p3IfnEUduWvb9is428/nNb5L3U01M= github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -800,17 +612,12 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/z-division/go-zookeeper v1.0.0 h1:ULsCj0nP6+U1liDFWe+2oEF6o4amixoDcDlwEUghVUY= github.com/z-division/go-zookeeper v1.0.0/go.mod h1:6X4UioQXpvyezJJl4J9NHAJKsoffCwy5wCaaTktXjOA= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.etcd.io/etcd/api/v3 v3.5.7 h1:sbcmosSVesNrWOJ58ZQFitHMdncusIifYcrBfwrlJSY= -go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA= -go.etcd.io/etcd/client/pkg/v3 v3.5.7 h1:y3kf5Gbp4e4q7egZdn5T7W9TSHUvkClN6u+Rq9mEOmg= -go.etcd.io/etcd/client/pkg/v3 v3.5.7/go.mod h1:o0Abi1MK86iad3YrWhgUsbGx1pmTS+hrORWc2CamuhY= -go.etcd.io/etcd/client/v3 v3.5.7 h1:u/OhpiuCgYY8awOHlhIhmGIGpxfBU/GZBUP3m/3/Iz4= -go.etcd.io/etcd/client/v3 v3.5.7/go.mod h1:sOWmj9DZUMyAngS7QQwCyAXXAL6WhgTOPLNS/NabQgw= -go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.etcd.io/etcd/api/v3 v3.5.8 h1:Zf44zJszoU7zRV0X/nStPenegNXoFDWcB/MwrJbA+L4= +go.etcd.io/etcd/api/v3 v3.5.8/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k= +go.etcd.io/etcd/client/pkg/v3 v3.5.8 h1:tPp9YRn/UBFAHdhOQUII9eUs7aOK35eulpMhX4YBd+M= +go.etcd.io/etcd/client/pkg/v3 v3.5.8/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4= +go.etcd.io/etcd/client/v3 v3.5.8 h1:B6ngTKZSWWowHEoaucOKHQR/AtZKaoHLiUpWxOLG4l4= +go.etcd.io/etcd/client/v3 v3.5.8/go.mod h1:idZYIPVkttBJBiRigkB5EM0MmEyx8jcl18zCV3F5noc= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -820,36 +627,34 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= -go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/mock v0.2.0 h1:TaP3xedm7JaAgScZO7tlvlKrqT0p7I6OsdGB5YNSMDU= go.uber.org/mock v0.2.0/go.mod h1:J0y0rp9L3xiff1+ZBfKxlC1fz2+aO16tw0tsDOixfuM= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= -go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= -go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= go4.org/intern v0.0.0-20211027215823-ae77deb06f29/go.mod h1:cS2ma+47FKrLPdXFpr7CuxiTW3eyJbWew4qx0qtQWDA= -go4.org/intern v0.0.0-20220617035311-6925f38cc365 h1:t9hFvR102YlOqU0fQn1wgwhNvSbHGBbbJxX9JKfU3l0= -go4.org/intern v0.0.0-20220617035311-6925f38cc365/go.mod h1:WXRv3p7T6gzt0CcJm43AAKdKVZmcQbwwC7EwquU5BZU= +go4.org/intern v0.0.0-20230205224052-192e9f60865c h1:b8WZ7Ja8nKegYxfwDLLwT00ZKv4lXAQrw8LYPK+cHSI= +go4.org/intern v0.0.0-20230205224052-192e9f60865c/go.mod h1:RJ0SVrOMpxLhgb5noIV+09zI1RsRlMsbUcSxpWHqbrE= go4.org/unsafe/assume-no-moving-gc v0.0.0-20211027215541-db492cf91b37/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= -go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760 h1:FyBZqvoA/jbNzuAWLQE2kG820zMAkcilx6BMjGbL/E4= go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= +go4.org/unsafe/assume-no-moving-gc v0.0.0-20230204201903-c31fa085b70e/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= +go4.org/unsafe/assume-no-moving-gc v0.0.0-20230426161633-7e06285ff160 h1:LrTREdITdNDW/JRlUuG3fhXvCK3ZcKXTCf1BbxE8sT4= +go4.org/unsafe/assume-no-moving-gc v0.0.0-20230426161633-7e06285ff160/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190128193316-c7b33c32a30b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -859,10 +664,9 @@ golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= -golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= -golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= -golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -888,7 +692,6 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -901,23 +704,17 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -926,8 +723,6 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -958,18 +753,13 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220630215102-69896b714898/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= -golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -979,8 +769,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M= -golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -993,39 +783,30 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1049,7 +830,6 @@ golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1066,56 +846,44 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= -golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= -golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= -golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -1124,8 +892,6 @@ golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1151,7 +917,6 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -1171,10 +936,8 @@ golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= -golang.org/x/tools v0.5.0 h1:+bSpV5HIeWkuvgaMfI3UmKRThoTA5ODJTUd8T17NO+4= -golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= -golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= -golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= +golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 h1:Vve/L0v7CXXuxUmaMGIEK/dEeq7uiqb5qBgQrZzIE7E= +golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1200,8 +963,8 @@ google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz513 google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.109.0 h1:sW9hgHyX497PP5//NUM7nqfV8D0iDfBApqq7sOh1XR8= -google.golang.org/api v0.109.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.121.0 h1:8Oopoo8Vavxx6gt+sgs8s8/X60WBAtKQq6JqnkF+xow= +google.golang.org/api v0.121.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZOyms= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1249,9 +1012,8 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230131230820-1c016267d619 h1:p0kMzw6AG0JEzd7Z+kXqOiLhC6gjUQTbtS2zR0Q3DbI= -google.golang.org/genproto v0.0.0-20230131230820-1c016267d619/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1271,11 +1033,11 @@ google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.52.3 h1:pf7sOysg4LdgBqduXveGKrcEwbStiK2rtfghdzlUYDQ= -google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 h1:TLkBREm4nIsEcexnCjgQd5GQWaHcqMzwQV0TX9pq8S0= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0/go.mod h1:DNq5QpG7LJqD2AamLZ7zvKE0DEpVl2BSEVjFycAAjRY= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.55.0-dev h1:b3WG8LoyS+X/C5ZbIWsJGjt8Hhqq0wUVX8+rPF/BHZo= +google.golang.org/grpc v1.55.0-dev/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 h1:rNBFJjBCOgVr9pWD7rs/knKL4FRTKgpZmsRfV214zcA= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0/go.mod h1:Dk1tviKTvMCz5tvh7t+fh94dhmQVHuCt2OzJB3CTW9Y= google.golang.org/grpc/examples v0.0.0-20210430044426-28078834f35b h1:D/GTYPo6I1oEo08Bfpuj3xl5XE+UGHj7//5fVyKxhsQ= google.golang.org/grpc/examples v0.0.0-20210430044426-28078834f35b/go.mod h1:Ly7ZA/ARzg8fnPU9TyZIxoz33sEUuWX7txiqs8lPTgE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -1290,12 +1052,11 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/DataDog/dd-trace-go.v1 v1.47.0 h1:w3mHEgOR1o52mkyCbkTM+El8DG732+Fnug4FAGhIpsk= -gopkg.in/DataDog/dd-trace-go.v1 v1.47.0/go.mod h1:aHb6c4hPRANXnB64LDAKyfWotKgfRjlHv23MnahM8AI= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/DataDog/dd-trace-go.v1 v1.50.1 h1:DUpHhh+MHtpYnUyGr5rpfvKUXkRg93TSEHii/LZVF6g= +gopkg.in/DataDog/dd-trace-go.v1 v1.50.1/go.mod h1:sw4gV8LIXseC5ISMbDJmm79OJDdl8I2Hhtelb6lpHuQ= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= @@ -1304,26 +1065,15 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3 h1:m8OOJ4ccYHnx2f4gQwpno8nAX5OGOh7RLaaz0pj3Ogs= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.41.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ldap.v2 v2.5.1 h1:wiu0okdNfjlBzg6UWvd1Hn8Y+Ux17/u/4nlk4CQr6tU= gopkg.in/ldap.v2 v2.5.1/go.mod h1:oI0cpe/D7HRtBQl8aTg+ZmzFUAvu4lsv3eLXMLGFxWk= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1334,8 +1084,6 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= @@ -1349,38 +1097,6 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= inet.af/netaddr v0.0.0-20220811202034-502d2d690317 h1:U2fwK6P2EqmopP/hFLTOAjWTki0qgd4GMJn5X8wOleU= inet.af/netaddr v0.0.0-20220811202034-502d2d690317/go.mod h1:OIezDfdzOgFhuw4HuWapWq2e9l0H9tK4F1j+ETRtF3k= -k8s.io/api v0.18.19/go.mod h1:lmViaHqL3es8JiaK3pCJMjBKm2CnzIcAXpHKifwbmAg= -k8s.io/api v0.26.1 h1:f+SWYiPd/GsiWwVRz+NbFyCgvv75Pk9NK6dlkZgpCRQ= -k8s.io/api v0.26.1/go.mod h1:xd/GBNgR0f707+ATNyPmQ1oyKSgndzXij81FzWGsejg= -k8s.io/apiextensions-apiserver v0.18.19 h1:z7tzzrsODC0cqvp3Pcy2HHc6wOnaSQQEWn0l/jbrJ6c= -k8s.io/apiextensions-apiserver v0.18.19/go.mod h1:kiomVdryKCrn+R0E+iPx+bZ/00rgj5tPXEBduSEJwgI= -k8s.io/apimachinery v0.18.19/go.mod h1:70HIRzSveORLKbatTlXzI2B2UUhbWzbq8Vqyf+HbdUQ= -k8s.io/apimachinery v0.26.1 h1:8EZ/eGJL+hY/MYCNwhmDzVqq2lPl3N3Bo8rvweJwXUQ= -k8s.io/apimachinery v0.26.1/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74= -k8s.io/apiserver v0.18.19/go.mod h1:VY80gRUh89Cmnx2s9S5nZTF8vwzEKweAFy7nTFuFLRU= -k8s.io/client-go v0.18.19/go.mod h1:lB+d4UqdzSjaU41VODLYm/oon3o05LAzsVpm6Me5XkY= -k8s.io/client-go v0.26.1 h1:87CXzYJnAMGaa/IDDfRdhTzxk/wzGZ+/HUQpqgVSZXU= -k8s.io/client-go v0.26.1/go.mod h1:IWNSglg+rQ3OcvDkhY6+QLeasV4OYHDjdqeWkDQZwGE= -k8s.io/code-generator v0.18.19/go.mod h1:l5yJd8cLSvkIb0ZJMsQdWuDOx5rWfLNpgmHQyl3LmBE= -k8s.io/code-generator v0.26.1 h1:dusFDsnNSKlMFYhzIM0jAO1OlnTN5WYwQQ+Ai12IIlo= -k8s.io/code-generator v0.26.1/go.mod h1:OMoJ5Dqx1wgaQzKgc+ZWaZPfGjdRq/Y3WubFrZmeI3I= -k8s.io/component-base v0.18.19/go.mod h1:nQMCdH6RaS/GD0J1YZqc5NInfCdknth4BwlAT5Mf7tA= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20221011193443-fad74ee6edd9 h1:iu3o/SxaHVI7tKPtkGzD3M9IzrE21j+CUKH98NQJ8Ms= -k8s.io/gengo v0.0.0-20221011193443-fad74ee6edd9/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.90.0 h1:VkTxIV/FjRXn1fgNNcKGM8cfmL1Z33ZjXRTVxKCoF5M= -k8s.io/klog/v2 v2.90.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= -k8s.io/kube-openapi v0.0.0-20230202010329-39b3636cbaa3 h1:vV3ZKAUX0nMjTflyfVea98dTfROpIxDaEsQws0FT2Ts= -k8s.io/kube-openapi v0.0.0-20230202010329-39b3636cbaa3/go.mod h1:/BYxry62FuDzmI+i9B+X2pqfySRmSOW2ARmj5Zbqhj0= -k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20230115233650-391b47cb4029 h1:L8zDtT4jrxj+TaQYD0k8KNlr556WaVQylDXswKmX+dE= -k8s.io/utils v0.0.0-20230115233650-391b47cb4029/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI= lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= modernc.org/cc/v3 v3.40.0 h1:P3g79IUS/93SYhtoeaHW+kRCIrYaxJ27MFPv+7kaTOw= @@ -1388,9 +1104,11 @@ modernc.org/cc/v3 v3.40.0/go.mod h1:/bTg4dnWkSXowUO6ssQKnOV0yMVxDYNIsIrzqTFDGH0= modernc.org/ccgo/v3 v3.16.13 h1:Mkgdzl46i5F/CNR/Kj80Ri59hC8TKAhZrYSaqvkwzUw= modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY= modernc.org/ccorpus v1.11.6 h1:J16RXiiqiCgua6+ZvQot4yUuUy8zxgqbqEEUuGPlISk= +modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM= -modernc.org/libc v1.22.2 h1:4U7v51GyhlWqQmwCHj28Rdq2Yzwk55ovjFrdPjs8Hb0= -modernc.org/libc v1.22.2/go.mod h1:uvQavJ1pZ0hIoC/jfqNoMLURIMhKzINIWypNM17puug= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/libc v1.22.5 h1:91BNch/e5B0uPbJFgqbxXuOnxBQjlS//icfQEGmvyjE= +modernc.org/libc v1.22.5/go.mod h1:jj+Z7dTNX8fBScMVNRAYZ/jF91K8fdT2hYMThc3YjBY= modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ= modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= modernc.org/memory v1.5.0 h1:N+/8c5rE6EqugZwHii4IFsaJ7MUhoWX07J5tC/iI5Ds= @@ -1402,20 +1120,13 @@ modernc.org/sqlite v1.20.3/go.mod h1:zKcGyrICaxNTMEHSr1HQ2GUraP0j+845GYw37+EyT6A modernc.org/strutil v1.1.3 h1:fNMm+oJklMGYfU9Ylcywl0CO5O6nTfaowNsh2wpPjzY= modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= modernc.org/tcl v1.15.0 h1:oY+JeD11qVVSgVvodMJsu7Edf8tr5E/7tuhF5cNYz34= +modernc.org/tcl v1.15.0/go.mod h1:xRoGotBZ6dU+Zo2tca+2EqVEeMmOUBzHnhIwq4YrVnE= modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/z v1.7.0 h1:xkDw/KepgEjeizO2sNco+hqYkU12taxQFqPEmgm1GWE= +modernc.org/z v1.7.0/go.mod h1:hVdgNMh8ggTuRG1rGU8x+xGRFfiQUIAw0ZqlPy8+HyQ= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/structured-merge-diff/v3 v3.0.1/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/go/cache/cache.go b/go/cache/cache.go deleted file mode 100644 index b6466132452..00000000000 --- a/go/cache/cache.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cache - -// Cache is a generic interface type for a data structure that keeps recently used -// objects in memory and evicts them when it becomes full. -type Cache interface { - Get(key string) (any, bool) - Set(key string, val any) bool - ForEach(callback func(any) bool) - - Delete(key string) - Clear() - - // Wait waits for all pending operations on the cache to settle. Since cache writes - // are asynchronous, a write may not be immediately accessible unless the user - // manually calls Wait. - Wait() - - Len() int - Evictions() int64 - Hits() int64 - Misses() int64 - UsedCapacity() int64 - MaxCapacity() int64 - SetCapacity(int64) -} - -type cachedObject interface { - CachedSize(alloc bool) int64 -} - -// NewDefaultCacheImpl returns the default cache implementation for Vitess. The options in the -// Config struct control the memory and entry limits for the cache, and the underlying cache -// implementation. -func NewDefaultCacheImpl(cfg *Config) Cache { - switch { - case cfg == nil: - return &nullCache{} - - case cfg.LFU: - if cfg.MaxEntries == 0 || cfg.MaxMemoryUsage == 0 { - return &nullCache{} - } - return NewRistrettoCache(cfg.MaxEntries, cfg.MaxMemoryUsage, func(val any) int64 { - return val.(cachedObject).CachedSize(true) - }) - - default: - if cfg.MaxEntries == 0 { - return &nullCache{} - } - return NewLRUCache(cfg.MaxEntries, func(_ any) int64 { - return 1 - }) - } -} - -// Config is the configuration options for a cache instance -type Config struct { - // MaxEntries is the estimated amount of entries that the cache will hold at capacity - MaxEntries int64 - // MaxMemoryUsage is the maximum amount of memory the cache can handle - MaxMemoryUsage int64 - // LFU toggles whether to use a new cache implementation with a TinyLFU admission policy - LFU bool -} - -// DefaultConfig is the default configuration for a cache instance in Vitess -var DefaultConfig = &Config{ - MaxEntries: 5000, - MaxMemoryUsage: 32 * 1024 * 1024, - LFU: true, -} diff --git a/go/cache/cache_test.go b/go/cache/cache_test.go deleted file mode 100644 index 911a3bb207b..00000000000 --- a/go/cache/cache_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package cache - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/cache/ristretto" -) - -func TestNewDefaultCacheImpl(t *testing.T) { - assertNullCache := func(t *testing.T, cache Cache) { - _, ok := cache.(*nullCache) - require.True(t, ok) - } - - assertLFUCache := func(t *testing.T, cache Cache) { - _, ok := cache.(*ristretto.Cache) - require.True(t, ok) - } - - assertLRUCache := func(t *testing.T, cache Cache) { - _, ok := cache.(*LRUCache) - require.True(t, ok) - } - - tests := []struct { - cfg *Config - verify func(t *testing.T, cache Cache) - }{ - {&Config{MaxEntries: 0, MaxMemoryUsage: 0, LFU: false}, assertNullCache}, - {&Config{MaxEntries: 0, MaxMemoryUsage: 0, LFU: true}, assertNullCache}, - {&Config{MaxEntries: 100, MaxMemoryUsage: 0, LFU: false}, assertLRUCache}, - {&Config{MaxEntries: 0, MaxMemoryUsage: 1000, LFU: false}, assertNullCache}, - {&Config{MaxEntries: 100, MaxMemoryUsage: 1000, LFU: false}, assertLRUCache}, - {&Config{MaxEntries: 100, MaxMemoryUsage: 0, LFU: true}, assertNullCache}, - {&Config{MaxEntries: 100, MaxMemoryUsage: 1000, LFU: true}, assertLFUCache}, - {&Config{MaxEntries: 0, MaxMemoryUsage: 1000, LFU: true}, assertNullCache}, - } - for _, tt := range tests { - t.Run(fmt.Sprintf("%d.%d.%v", tt.cfg.MaxEntries, tt.cfg.MaxMemoryUsage, tt.cfg.LFU), func(t *testing.T) { - cache := NewDefaultCacheImpl(tt.cfg) - tt.verify(t, cache) - }) - } -} diff --git a/go/cache/lru_cache.go b/go/cache/lru_cache.go index 31ceadaf201..d845265b77b 100644 --- a/go/cache/lru_cache.go +++ b/go/cache/lru_cache.go @@ -29,8 +29,6 @@ import ( "time" ) -var _ Cache = &LRUCache{} - // LRUCache is a typical LRU cache implementation. If the cache // reaches the capacity, the least recently used item is deleted from // the cache. Note the capacity is not the number of items, but the @@ -250,3 +248,7 @@ func (lru *LRUCache) checkCapacity() { lru.evictions++ } } + +func (lru *LRUCache) Close() { + lru.Clear() +} diff --git a/go/cache/null.go b/go/cache/null.go deleted file mode 100644 index c99d52eb2ec..00000000000 --- a/go/cache/null.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cache - -// nullCache is a no-op cache that does not store items -type nullCache struct{} - -// Get never returns anything on the nullCache -func (n *nullCache) Get(_ string) (any, bool) { - return nil, false -} - -// Set is a no-op in the nullCache -func (n *nullCache) Set(_ string, _ any) bool { - return false -} - -// ForEach iterates the nullCache, which is always empty -func (n *nullCache) ForEach(_ func(any) bool) {} - -// Delete is a no-op in the nullCache -func (n *nullCache) Delete(_ string) {} - -// Clear is a no-op in the nullCache -func (n *nullCache) Clear() {} - -// Wait is a no-op in the nullcache -func (n *nullCache) Wait() {} - -func (n *nullCache) Len() int { - return 0 -} - -// Hits returns number of cache hits since creation -func (n *nullCache) Hits() int64 { - return 0 -} - -// Hits returns number of cache misses since creation -func (n *nullCache) Misses() int64 { - return 0 -} - -// Capacity returns the capacity of the nullCache, which is always 0 -func (n *nullCache) UsedCapacity() int64 { - return 0 -} - -// Capacity returns the capacity of the nullCache, which is always 0 -func (n *nullCache) MaxCapacity() int64 { - return 0 -} - -// SetCapacity sets the capacity of the null cache, which is a no-op -func (n *nullCache) SetCapacity(_ int64) {} - -func (n *nullCache) Evictions() int64 { - return 0 -} diff --git a/go/cache/ristretto.go b/go/cache/ristretto.go deleted file mode 100644 index 6d6f596a5b9..00000000000 --- a/go/cache/ristretto.go +++ /dev/null @@ -1,28 +0,0 @@ -package cache - -import ( - "vitess.io/vitess/go/cache/ristretto" -) - -var _ Cache = &ristretto.Cache{} - -// NewRistrettoCache returns a Cache implementation based on Ristretto -func NewRistrettoCache(maxEntries, maxCost int64, cost func(any) int64) *ristretto.Cache { - // The TinyLFU paper recommends to allocate 10x times the max entries amount as counters - // for the admission policy; since our caches are small and we're very interested on admission - // accuracy, we're a bit more greedy than 10x - const CounterRatio = 12 - - config := ristretto.Config{ - NumCounters: maxEntries * CounterRatio, - MaxCost: maxCost, - BufferItems: 64, - Metrics: true, - Cost: cost, - } - cache, err := ristretto.NewCache(&config) - if err != nil { - panic(err) - } - return cache -} diff --git a/go/cache/ristretto/bloom/bbloom.go b/go/cache/ristretto/bloom/bbloom.go deleted file mode 100644 index ce5daa6864d..00000000000 --- a/go/cache/ristretto/bloom/bbloom.go +++ /dev/null @@ -1,151 +0,0 @@ -// The MIT License (MIT) -// Copyright (c) 2014 Andreas Briese, eduToolbox@Bri-C GmbH, Sarstedt - -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software is furnished to do so, -// subject to the following conditions: - -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. - -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package bloom - -import ( - "math" - "unsafe" -) - -// helper -var mask = []uint8{1, 2, 4, 8, 16, 32, 64, 128} - -func getSize(ui64 uint64) (size uint64, exponent uint64) { - if ui64 < uint64(512) { - ui64 = uint64(512) - } - size = uint64(1) - for size < ui64 { - size <<= 1 - exponent++ - } - return size, exponent -} - -// NewBloomFilterWithErrorRate returns a new bloomfilter with optimal size for the given -// error rate -func NewBloomFilterWithErrorRate(numEntries uint64, wrongs float64) *Bloom { - size := -1 * float64(numEntries) * math.Log(wrongs) / math.Pow(0.69314718056, 2) - locs := math.Ceil(0.69314718056 * size / float64(numEntries)) - return NewBloomFilter(uint64(size), uint64(locs)) -} - -// NewBloomFilter returns a new bloomfilter. -func NewBloomFilter(entries, locs uint64) (bloomfilter *Bloom) { - size, exponent := getSize(entries) - bloomfilter = &Bloom{ - sizeExp: exponent, - size: size - 1, - setLocs: locs, - shift: 64 - exponent, - } - bloomfilter.Size(size) - return bloomfilter -} - -// Bloom filter -type Bloom struct { - bitset []uint64 - ElemNum uint64 - sizeExp uint64 - size uint64 - setLocs uint64 - shift uint64 -} - -// <--- http://www.cse.yorku.ca/~oz/hash.html -// modified Berkeley DB Hash (32bit) -// hash is casted to l, h = 16bit fragments -// func (bl Bloom) absdbm(b *[]byte) (l, h uint64) { -// hash := uint64(len(*b)) -// for _, c := range *b { -// hash = uint64(c) + (hash << 6) + (hash << bl.sizeExp) - hash -// } -// h = hash >> bl.shift -// l = hash << bl.shift >> bl.shift -// return l, h -// } - -// Add adds hash of a key to the bloomfilter. -func (bl *Bloom) Add(hash uint64) { - h := hash >> bl.shift - l := hash << bl.shift >> bl.shift - for i := uint64(0); i < bl.setLocs; i++ { - bl.Set((h + i*l) & bl.size) - bl.ElemNum++ - } -} - -// Has checks if bit(s) for entry hash is/are set, -// returns true if the hash was added to the Bloom Filter. -func (bl Bloom) Has(hash uint64) bool { - h := hash >> bl.shift - l := hash << bl.shift >> bl.shift - for i := uint64(0); i < bl.setLocs; i++ { - if !bl.IsSet((h + i*l) & bl.size) { - return false - } - } - return true -} - -// AddIfNotHas only Adds hash, if it's not present in the bloomfilter. -// Returns true if hash was added. -// Returns false if hash was already registered in the bloomfilter. -func (bl *Bloom) AddIfNotHas(hash uint64) bool { - if bl.Has(hash) { - return false - } - bl.Add(hash) - return true -} - -// TotalSize returns the total size of the bloom filter. -func (bl *Bloom) TotalSize() int { - // The bl struct has 5 members and each one is 8 byte. The bitset is a - // uint64 byte slice. - return len(bl.bitset)*8 + 5*8 -} - -// Size makes Bloom filter with as bitset of size sz. -func (bl *Bloom) Size(sz uint64) { - bl.bitset = make([]uint64, sz>>6) -} - -// Clear resets the Bloom filter. -func (bl *Bloom) Clear() { - for i := range bl.bitset { - bl.bitset[i] = 0 - } -} - -// Set sets the bit[idx] of bitset. -func (bl *Bloom) Set(idx uint64) { - ptr := unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[idx>>6])) + uintptr((idx%64)>>3)) - *(*uint8)(ptr) |= mask[idx%8] -} - -// IsSet checks if bit[idx] of bitset is set, returns true/false. -func (bl *Bloom) IsSet(idx uint64) bool { - ptr := unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[idx>>6])) + uintptr((idx%64)>>3)) - r := ((*(*uint8)(ptr)) >> (idx % 8)) & 1 - return r == 1 -} diff --git a/go/cache/ristretto/bloom/bbloom_test.go b/go/cache/ristretto/bloom/bbloom_test.go deleted file mode 100644 index 7d280988bae..00000000000 --- a/go/cache/ristretto/bloom/bbloom_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package bloom - -import ( - "crypto/rand" - "os" - "testing" - - _flag "vitess.io/vitess/go/internal/flag" - "vitess.io/vitess/go/vt/log" - - "vitess.io/vitess/go/hack" -) - -var ( - wordlist1 [][]byte - n = uint64(1 << 16) - bf *Bloom -) - -func TestMain(m *testing.M) { - // hack to get rid of an "ERROR: logging before flag.Parse" - _flag.TrickGlog() - wordlist1 = make([][]byte, n) - for i := range wordlist1 { - b := make([]byte, 32) - _, _ = rand.Read(b) - wordlist1[i] = b - } - log.Info("Benchmarks relate to 2**16 OP. --> output/65536 op/ns") - - os.Exit(m.Run()) -} - -func TestM_NumberOfWrongs(t *testing.T) { - bf = NewBloomFilter(n*10, 7) - - cnt := 0 - for i := range wordlist1 { - hash := hack.RuntimeMemhash(wordlist1[i], 0) - if !bf.AddIfNotHas(hash) { - cnt++ - } - } - log.Infof("Bloomfilter New(7* 2**16, 7) (-> size=%v bit): \n Check for 'false positives': %v wrong positive 'Has' results on 2**16 entries => %v %%", len(bf.bitset)<<6, cnt, float64(cnt)/float64(n)) - -} - -func BenchmarkM_New(b *testing.B) { - for r := 0; r < b.N; r++ { - _ = NewBloomFilter(n*10, 7) - } -} - -func BenchmarkM_Clear(b *testing.B) { - bf = NewBloomFilter(n*10, 7) - for i := range wordlist1 { - hash := hack.RuntimeMemhash(wordlist1[i], 0) - bf.Add(hash) - } - b.ResetTimer() - for r := 0; r < b.N; r++ { - bf.Clear() - } -} - -func BenchmarkM_Add(b *testing.B) { - bf = NewBloomFilter(n*10, 7) - b.ResetTimer() - for r := 0; r < b.N; r++ { - for i := range wordlist1 { - hash := hack.RuntimeMemhash(wordlist1[i], 0) - bf.Add(hash) - } - } - -} - -func BenchmarkM_Has(b *testing.B) { - b.ResetTimer() - for r := 0; r < b.N; r++ { - for i := range wordlist1 { - hash := hack.RuntimeMemhash(wordlist1[i], 0) - bf.Has(hash) - } - } -} diff --git a/go/cache/ristretto/cache.go b/go/cache/ristretto/cache.go deleted file mode 100644 index b745d6dc991..00000000000 --- a/go/cache/ristretto/cache.go +++ /dev/null @@ -1,697 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * Copyright 2021 The Vitess Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package ristretto is a fast, fixed size, in-memory cache with a dual focus on -// throughput and hit ratio performance. You can easily add Ristretto to an -// existing system and keep the most valuable data where you need it. -package ristretto - -import ( - "bytes" - "errors" - "fmt" - "sync" - "sync/atomic" - "time" - "unsafe" - - "vitess.io/vitess/go/hack" -) - -var ( - // TODO: find the optimal value for this or make it configurable - setBufSize = 32 * 1024 -) - -func defaultStringHash(key string) (uint64, uint64) { - const Seed1 = uint64(0x1122334455667788) - const Seed2 = uint64(0x8877665544332211) - return hack.RuntimeStrhash(key, Seed1), hack.RuntimeStrhash(key, Seed2) -} - -type itemCallback func(*Item) - -// CacheItemSize is the overhead in bytes for every stored cache item -var CacheItemSize = hack.RuntimeAllocSize(int64(unsafe.Sizeof(storeItem{}))) - -// Cache is a thread-safe implementation of a hashmap with a TinyLFU admission -// policy and a Sampled LFU eviction policy. You can use the same Cache instance -// from as many goroutines as you want. -type Cache struct { - // store is the central concurrent hashmap where key-value items are stored. - store store - // policy determines what gets let in to the cache and what gets kicked out. - policy policy - // getBuf is a custom ring buffer implementation that gets pushed to when - // keys are read. - getBuf *ringBuffer - // setBuf is a buffer allowing us to batch/drop Sets during times of high - // contention. - setBuf chan *Item - // onEvict is called for item evictions. - onEvict itemCallback - // onReject is called when an item is rejected via admission policy. - onReject itemCallback - // onExit is called whenever a value goes out of scope from the cache. - onExit func(any) - // KeyToHash function is used to customize the key hashing algorithm. - // Each key will be hashed using the provided function. If keyToHash value - // is not set, the default keyToHash function is used. - keyToHash func(string) (uint64, uint64) - // stop is used to stop the processItems goroutine. - stop chan struct{} - // indicates whether cache is closed. - isClosed bool - // cost calculates cost from a value. - cost func(value any) int64 - // ignoreInternalCost dictates whether to ignore the cost of internally storing - // the item in the cost calculation. - ignoreInternalCost bool - // Metrics contains a running log of important statistics like hits, misses, - // and dropped items. - Metrics *Metrics -} - -// Config is passed to NewCache for creating new Cache instances. -type Config struct { - // NumCounters determines the number of counters (keys) to keep that hold - // access frequency information. It's generally a good idea to have more - // counters than the max cache capacity, as this will improve eviction - // accuracy and subsequent hit ratios. - // - // For example, if you expect your cache to hold 1,000,000 items when full, - // NumCounters should be 10,000,000 (10x). Each counter takes up 4 bits, so - // keeping 10,000,000 counters would require 5MB of memory. - NumCounters int64 - // MaxCost can be considered as the cache capacity, in whatever units you - // choose to use. - // - // For example, if you want the cache to have a max capacity of 100MB, you - // would set MaxCost to 100,000,000 and pass an item's number of bytes as - // the `cost` parameter for calls to Set. If new items are accepted, the - // eviction process will take care of making room for the new item and not - // overflowing the MaxCost value. - MaxCost int64 - // BufferItems determines the size of Get buffers. - // - // Unless you have a rare use case, using `64` as the BufferItems value - // results in good performance. - BufferItems int64 - // Metrics determines whether cache statistics are kept during the cache's - // lifetime. There *is* some overhead to keeping statistics, so you should - // only set this flag to true when testing or throughput performance isn't a - // major factor. - Metrics bool - // OnEvict is called for every eviction and passes the hashed key, value, - // and cost to the function. - OnEvict func(item *Item) - // OnReject is called for every rejection done via the policy. - OnReject func(item *Item) - // OnExit is called whenever a value is removed from cache. This can be - // used to do manual memory deallocation. Would also be called on eviction - // and rejection of the value. - OnExit func(val any) - // KeyToHash function is used to customize the key hashing algorithm. - // Each key will be hashed using the provided function. If keyToHash value - // is not set, the default keyToHash function is used. - KeyToHash func(string) (uint64, uint64) - // Cost evaluates a value and outputs a corresponding cost. This function - // is ran after Set is called for a new item or an item update with a cost - // param of 0. - Cost func(value any) int64 - // IgnoreInternalCost set to true indicates to the cache that the cost of - // internally storing the value should be ignored. This is useful when the - // cost passed to set is not using bytes as units. Keep in mind that setting - // this to true will increase the memory usage. - IgnoreInternalCost bool -} - -type itemFlag byte - -const ( - itemNew itemFlag = iota - itemDelete - itemUpdate -) - -// Item is passed to setBuf so items can eventually be added to the cache. -type Item struct { - flag itemFlag - Key uint64 - Conflict uint64 - Value any - Cost int64 - wg *sync.WaitGroup -} - -// NewCache returns a new Cache instance and any configuration errors, if any. -func NewCache(config *Config) (*Cache, error) { - switch { - case config.NumCounters == 0: - return nil, errors.New("NumCounters can't be zero") - case config.MaxCost == 0: - return nil, errors.New("Capacity can't be zero") - case config.BufferItems == 0: - return nil, errors.New("BufferItems can't be zero") - } - policy := newPolicy(config.NumCounters, config.MaxCost) - cache := &Cache{ - store: newStore(), - policy: policy, - getBuf: newRingBuffer(policy, config.BufferItems), - setBuf: make(chan *Item, setBufSize), - keyToHash: config.KeyToHash, - stop: make(chan struct{}), - cost: config.Cost, - ignoreInternalCost: config.IgnoreInternalCost, - } - cache.onExit = func(val any) { - if config.OnExit != nil && val != nil { - config.OnExit(val) - } - } - cache.onEvict = func(item *Item) { - if config.OnEvict != nil { - config.OnEvict(item) - } - cache.onExit(item.Value) - } - cache.onReject = func(item *Item) { - if config.OnReject != nil { - config.OnReject(item) - } - cache.onExit(item.Value) - } - if cache.keyToHash == nil { - cache.keyToHash = defaultStringHash - } - if config.Metrics { - cache.collectMetrics() - } - // NOTE: benchmarks seem to show that performance decreases the more - // goroutines we have running cache.processItems(), so 1 should - // usually be sufficient - go cache.processItems() - return cache, nil -} - -// Wait blocks until all the current cache operations have been processed in the background -func (c *Cache) Wait() { - if c == nil || c.isClosed { - return - } - wg := &sync.WaitGroup{} - wg.Add(1) - c.setBuf <- &Item{wg: wg} - wg.Wait() -} - -// Get returns the value (if any) and a boolean representing whether the -// value was found or not. The value can be nil and the boolean can be true at -// the same time. -func (c *Cache) Get(key string) (any, bool) { - if c == nil || c.isClosed { - return nil, false - } - keyHash, conflictHash := c.keyToHash(key) - c.getBuf.Push(keyHash) - value, ok := c.store.Get(keyHash, conflictHash) - if ok { - c.Metrics.add(hit, keyHash, 1) - } else { - c.Metrics.add(miss, keyHash, 1) - } - return value, ok -} - -// Set attempts to add the key-value item to the cache. If it returns false, -// then the Set was dropped and the key-value item isn't added to the cache. If -// it returns true, there's still a chance it could be dropped by the policy if -// its determined that the key-value item isn't worth keeping, but otherwise the -// item will be added and other items will be evicted in order to make room. -// -// The cost of the entry will be evaluated lazily by the cache's Cost function. -func (c *Cache) Set(key string, value any) bool { - return c.SetWithCost(key, value, 0) -} - -// SetWithCost works like Set but adds a key-value pair to the cache with a specific -// cost. The built-in Cost function will not be called to evaluate the object's cost -// and instead the given value will be used. -func (c *Cache) SetWithCost(key string, value any, cost int64) bool { - if c == nil || c.isClosed { - return false - } - - keyHash, conflictHash := c.keyToHash(key) - i := &Item{ - flag: itemNew, - Key: keyHash, - Conflict: conflictHash, - Value: value, - Cost: cost, - } - // cost is eventually updated. The expiration must also be immediately updated - // to prevent items from being prematurely removed from the map. - if prev, ok := c.store.Update(i); ok { - c.onExit(prev) - i.flag = itemUpdate - } - // Attempt to send item to policy. - select { - case c.setBuf <- i: - return true - default: - if i.flag == itemUpdate { - // Return true if this was an update operation since we've already - // updated the store. For all the other operations (set/delete), we - // return false which means the item was not inserted. - return true - } - c.Metrics.add(dropSets, keyHash, 1) - return false - } -} - -// Delete deletes the key-value item from the cache if it exists. -func (c *Cache) Delete(key string) { - if c == nil || c.isClosed { - return - } - keyHash, conflictHash := c.keyToHash(key) - // Delete immediately. - _, prev := c.store.Del(keyHash, conflictHash) - c.onExit(prev) - // If we've set an item, it would be applied slightly later. - // So we must push the same item to `setBuf` with the deletion flag. - // This ensures that if a set is followed by a delete, it will be - // applied in the correct order. - c.setBuf <- &Item{ - flag: itemDelete, - Key: keyHash, - Conflict: conflictHash, - } -} - -// Close stops all goroutines and closes all channels. -func (c *Cache) Close() { - if c == nil || c.isClosed { - return - } - c.Clear() - - // Block until processItems goroutine is returned. - c.stop <- struct{}{} - close(c.stop) - close(c.setBuf) - c.policy.Close() - c.isClosed = true -} - -// Clear empties the hashmap and zeroes all policy counters. Note that this is -// not an atomic operation (but that shouldn't be a problem as it's assumed that -// Set/Get calls won't be occurring until after this). -func (c *Cache) Clear() { - if c == nil || c.isClosed { - return - } - // Block until processItems goroutine is returned. - c.stop <- struct{}{} - - // Clear out the setBuf channel. -loop: - for { - select { - case i := <-c.setBuf: - if i.wg != nil { - i.wg.Done() - continue - } - if i.flag != itemUpdate { - // In itemUpdate, the value is already set in the store. So, no need to call - // onEvict here. - c.onEvict(i) - } - default: - break loop - } - } - - // Clear value hashmap and policy data. - c.policy.Clear() - c.store.Clear(c.onEvict) - // Only reset metrics if they're enabled. - if c.Metrics != nil { - c.Metrics.Clear() - } - // Restart processItems goroutine. - go c.processItems() -} - -// Len returns the size of the cache (in entries) -func (c *Cache) Len() int { - if c == nil { - return 0 - } - return c.store.Len() -} - -// UsedCapacity returns the size of the cache (in bytes) -func (c *Cache) UsedCapacity() int64 { - if c == nil { - return 0 - } - return c.policy.Used() -} - -// MaxCapacity returns the max cost of the cache (in bytes) -func (c *Cache) MaxCapacity() int64 { - if c == nil { - return 0 - } - return c.policy.MaxCost() -} - -// SetCapacity updates the maxCost of an existing cache. -func (c *Cache) SetCapacity(maxCost int64) { - if c == nil { - return - } - c.policy.UpdateMaxCost(maxCost) -} - -// Evictions returns the number of evictions -func (c *Cache) Evictions() int64 { - // TODO - if c == nil || c.Metrics == nil { - return 0 - } - return int64(c.Metrics.KeysEvicted()) -} - -// Hits returns the number of cache hits -func (c *Cache) Hits() int64 { - if c == nil || c.Metrics == nil { - return 0 - } - return int64(c.Metrics.Hits()) -} - -// Misses returns the number of cache misses -func (c *Cache) Misses() int64 { - if c == nil || c.Metrics == nil { - return 0 - } - return int64(c.Metrics.Misses()) -} - -// ForEach yields all the values currently stored in the cache to the given callback. -// The callback may return `false` to stop the iteration early. -func (c *Cache) ForEach(forEach func(any) bool) { - if c == nil { - return - } - c.store.ForEach(forEach) -} - -// processItems is ran by goroutines processing the Set buffer. -func (c *Cache) processItems() { - startTs := make(map[uint64]time.Time) - numToKeep := 100000 // TODO: Make this configurable via options. - - trackAdmission := func(key uint64) { - if c.Metrics == nil { - return - } - startTs[key] = time.Now() - if len(startTs) > numToKeep { - for k := range startTs { - if len(startTs) <= numToKeep { - break - } - delete(startTs, k) - } - } - } - onEvict := func(i *Item) { - delete(startTs, i.Key) - if c.onEvict != nil { - c.onEvict(i) - } - } - - for { - select { - case i := <-c.setBuf: - if i.wg != nil { - i.wg.Done() - continue - } - // Calculate item cost value if new or update. - if i.Cost == 0 && c.cost != nil && i.flag != itemDelete { - i.Cost = c.cost(i.Value) - } - if !c.ignoreInternalCost { - // Add the cost of internally storing the object. - i.Cost += CacheItemSize - } - - switch i.flag { - case itemNew: - victims, added := c.policy.Add(i.Key, i.Cost) - if added { - c.store.Set(i) - c.Metrics.add(keyAdd, i.Key, 1) - trackAdmission(i.Key) - } else { - c.onReject(i) - } - for _, victim := range victims { - victim.Conflict, victim.Value = c.store.Del(victim.Key, 0) - onEvict(victim) - } - - case itemUpdate: - c.policy.Update(i.Key, i.Cost) - - case itemDelete: - c.policy.Del(i.Key) // Deals with metrics updates. - _, val := c.store.Del(i.Key, i.Conflict) - c.onExit(val) - } - case <-c.stop: - return - } - } -} - -// collectMetrics just creates a new *Metrics instance and adds the pointers -// to the cache and policy instances. -func (c *Cache) collectMetrics() { - c.Metrics = newMetrics() - c.policy.CollectMetrics(c.Metrics) -} - -type metricType int - -const ( - // The following 2 keep track of hits and misses. - hit = iota - miss - // The following 3 keep track of number of keys added, updated and evicted. - keyAdd - keyUpdate - keyEvict - // The following 2 keep track of cost of keys added and evicted. - costAdd - costEvict - // The following keep track of how many sets were dropped or rejected later. - dropSets - rejectSets - // The following 2 keep track of how many gets were kept and dropped on the - // floor. - dropGets - keepGets - // This should be the final enum. Other enums should be set before this. - doNotUse -) - -func stringFor(t metricType) string { - switch t { - case hit: - return "hit" - case miss: - return "miss" - case keyAdd: - return "keys-added" - case keyUpdate: - return "keys-updated" - case keyEvict: - return "keys-evicted" - case costAdd: - return "cost-added" - case costEvict: - return "cost-evicted" - case dropSets: - return "sets-dropped" - case rejectSets: - return "sets-rejected" // by policy. - case dropGets: - return "gets-dropped" - case keepGets: - return "gets-kept" - default: - return "unidentified" - } -} - -// Metrics is a snapshot of performance statistics for the lifetime of a cache instance. -type Metrics struct { - all [doNotUse][]*uint64 -} - -func newMetrics() *Metrics { - s := &Metrics{} - for i := 0; i < doNotUse; i++ { - s.all[i] = make([]*uint64, 256) - slice := s.all[i] - for j := range slice { - slice[j] = new(uint64) - } - } - return s -} - -func (p *Metrics) add(t metricType, hash, delta uint64) { - if p == nil { - return - } - valp := p.all[t] - // Avoid false sharing by padding at least 64 bytes of space between two - // atomic counters which would be incremented. - idx := (hash % 25) * 10 - atomic.AddUint64(valp[idx], delta) -} - -func (p *Metrics) get(t metricType) uint64 { - if p == nil { - return 0 - } - valp := p.all[t] - var total uint64 - for i := range valp { - total += atomic.LoadUint64(valp[i]) - } - return total -} - -// Hits is the number of Get calls where a value was found for the corresponding key. -func (p *Metrics) Hits() uint64 { - return p.get(hit) -} - -// Misses is the number of Get calls where a value was not found for the corresponding key. -func (p *Metrics) Misses() uint64 { - return p.get(miss) -} - -// KeysAdded is the total number of Set calls where a new key-value item was added. -func (p *Metrics) KeysAdded() uint64 { - return p.get(keyAdd) -} - -// KeysUpdated is the total number of Set calls where the value was updated. -func (p *Metrics) KeysUpdated() uint64 { - return p.get(keyUpdate) -} - -// KeysEvicted is the total number of keys evicted. -func (p *Metrics) KeysEvicted() uint64 { - return p.get(keyEvict) -} - -// CostAdded is the sum of costs that have been added (successful Set calls). -func (p *Metrics) CostAdded() uint64 { - return p.get(costAdd) -} - -// CostEvicted is the sum of all costs that have been evicted. -func (p *Metrics) CostEvicted() uint64 { - return p.get(costEvict) -} - -// SetsDropped is the number of Set calls that don't make it into internal -// buffers (due to contention or some other reason). -func (p *Metrics) SetsDropped() uint64 { - return p.get(dropSets) -} - -// SetsRejected is the number of Set calls rejected by the policy (TinyLFU). -func (p *Metrics) SetsRejected() uint64 { - return p.get(rejectSets) -} - -// GetsDropped is the number of Get counter increments that are dropped -// internally. -func (p *Metrics) GetsDropped() uint64 { - return p.get(dropGets) -} - -// GetsKept is the number of Get counter increments that are kept. -func (p *Metrics) GetsKept() uint64 { - return p.get(keepGets) -} - -// Ratio is the number of Hits over all accesses (Hits + Misses). This is the -// percentage of successful Get calls. -func (p *Metrics) Ratio() float64 { - if p == nil { - return 0.0 - } - hits, misses := p.get(hit), p.get(miss) - if hits == 0 && misses == 0 { - return 0.0 - } - return float64(hits) / float64(hits+misses) -} - -// Clear resets all the metrics. -func (p *Metrics) Clear() { - if p == nil { - return - } - for i := 0; i < doNotUse; i++ { - for j := range p.all[i] { - atomic.StoreUint64(p.all[i][j], 0) - } - } -} - -// String returns a string representation of the metrics. -func (p *Metrics) String() string { - if p == nil { - return "" - } - var buf bytes.Buffer - for i := 0; i < doNotUse; i++ { - t := metricType(i) - fmt.Fprintf(&buf, "%s: %d ", stringFor(t), p.get(t)) - } - fmt.Fprintf(&buf, "gets-total: %d ", p.get(hit)+p.get(miss)) - fmt.Fprintf(&buf, "hit-ratio: %.2f", p.Ratio()) - return buf.String() -} diff --git a/go/cache/ristretto/cache_test.go b/go/cache/ristretto/cache_test.go deleted file mode 100644 index eda9f9109f3..00000000000 --- a/go/cache/ristretto/cache_test.go +++ /dev/null @@ -1,690 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * Copyright 2021 The Vitess Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ristretto - -import ( - "fmt" - "math/rand" - "strconv" - "strings" - "sync" - "testing" - "time" - - "vitess.io/vitess/go/vt/log" - - "github.com/stretchr/testify/require" -) - -var wait = time.Millisecond * 10 - -func TestCacheKeyToHash(t *testing.T) { - keyToHashCount := 0 - c, err := NewCache(&Config{ - NumCounters: 10, - MaxCost: 1000, - BufferItems: 64, - IgnoreInternalCost: true, - KeyToHash: func(key string) (uint64, uint64) { - keyToHashCount++ - return defaultStringHash(key) - }, - }) - require.NoError(t, err) - if c.SetWithCost("1", 1, 1) { - time.Sleep(wait) - val, ok := c.Get("1") - require.True(t, ok) - require.NotNil(t, val) - c.Delete("1") - } - require.Equal(t, 3, keyToHashCount) -} - -func TestCacheMaxCost(t *testing.T) { - charset := "abcdefghijklmnopqrstuvwxyz0123456789" - key := func() string { - k := make([]byte, 2) - for i := range k { - k[i] = charset[rand.Intn(len(charset))] - } - return string(k) - } - c, err := NewCache(&Config{ - NumCounters: 12960, // 36^2 * 10 - MaxCost: 1e6, // 1mb - BufferItems: 64, - Metrics: true, - }) - require.NoError(t, err) - stop := make(chan struct{}, 8) - for i := 0; i < 8; i++ { - go func() { - for { - select { - case <-stop: - return - default: - time.Sleep(time.Millisecond) - - k := key() - if _, ok := c.Get(k); !ok { - val := "" - if rand.Intn(100) < 10 { - val = "test" - } else { - val = strings.Repeat("a", 1000) - } - c.SetWithCost(key(), val, int64(2+len(val))) - } - } - } - }() - } - for i := 0; i < 20; i++ { - time.Sleep(time.Second) - cacheCost := c.Metrics.CostAdded() - c.Metrics.CostEvicted() - log.Infof("total cache cost: %d", cacheCost) - require.True(t, float64(cacheCost) <= float64(1e6*1.05)) - } - for i := 0; i < 8; i++ { - stop <- struct{}{} - } -} - -func TestUpdateMaxCost(t *testing.T) { - c, err := NewCache(&Config{ - NumCounters: 10, - MaxCost: 10, - BufferItems: 64, - }) - require.NoError(t, err) - require.Equal(t, int64(10), c.MaxCapacity()) - require.True(t, c.SetWithCost("1", 1, 1)) - time.Sleep(wait) - _, ok := c.Get("1") - // Set is rejected because the cost of the entry is too high - // when accounting for the internal cost of storing the entry. - require.False(t, ok) - - // Update the max cost of the cache and retry. - c.SetCapacity(1000) - require.Equal(t, int64(1000), c.MaxCapacity()) - require.True(t, c.SetWithCost("1", 1, 1)) - time.Sleep(wait) - val, ok := c.Get("1") - require.True(t, ok) - require.NotNil(t, val) - c.Delete("1") -} - -func TestNewCache(t *testing.T) { - _, err := NewCache(&Config{ - NumCounters: 0, - }) - require.Error(t, err) - - _, err = NewCache(&Config{ - NumCounters: 100, - MaxCost: 0, - }) - require.Error(t, err) - - _, err = NewCache(&Config{ - NumCounters: 100, - MaxCost: 10, - BufferItems: 0, - }) - require.Error(t, err) - - c, err := NewCache(&Config{ - NumCounters: 100, - MaxCost: 10, - BufferItems: 64, - Metrics: true, - }) - require.NoError(t, err) - require.NotNil(t, c) -} - -func TestNilCache(t *testing.T) { - var c *Cache - val, ok := c.Get("1") - require.False(t, ok) - require.Nil(t, val) - - require.False(t, c.SetWithCost("1", 1, 1)) - c.Delete("1") - c.Clear() - c.Close() -} - -func TestMultipleClose(t *testing.T) { - var c *Cache - c.Close() - - var err error - c, err = NewCache(&Config{ - NumCounters: 100, - MaxCost: 10, - BufferItems: 64, - Metrics: true, - }) - require.NoError(t, err) - c.Close() - c.Close() -} - -func TestSetAfterClose(t *testing.T) { - c, err := newTestCache() - require.NoError(t, err) - require.NotNil(t, c) - - c.Close() - require.False(t, c.SetWithCost("1", 1, 1)) -} - -func TestClearAfterClose(t *testing.T) { - c, err := newTestCache() - require.NoError(t, err) - require.NotNil(t, c) - - c.Close() - c.Clear() -} - -func TestGetAfterClose(t *testing.T) { - c, err := newTestCache() - require.NoError(t, err) - require.NotNil(t, c) - - require.True(t, c.SetWithCost("1", 1, 1)) - c.Close() - - _, ok := c.Get("2") - require.False(t, ok) -} - -func TestDelAfterClose(t *testing.T) { - c, err := newTestCache() - require.NoError(t, err) - require.NotNil(t, c) - - require.True(t, c.SetWithCost("1", 1, 1)) - c.Close() - - c.Delete("1") -} - -func TestCacheProcessItems(t *testing.T) { - m := &sync.Mutex{} - evicted := make(map[uint64]struct{}) - c, err := NewCache(&Config{ - NumCounters: 100, - MaxCost: 10, - BufferItems: 64, - IgnoreInternalCost: true, - Cost: func(value any) int64 { - return int64(value.(int)) - }, - OnEvict: func(item *Item) { - m.Lock() - defer m.Unlock() - evicted[item.Key] = struct{}{} - }, - }) - require.NoError(t, err) - - var key uint64 - var conflict uint64 - - key, conflict = defaultStringHash("1") - c.setBuf <- &Item{ - flag: itemNew, - Key: key, - Conflict: conflict, - Value: 1, - Cost: 0, - } - time.Sleep(wait) - require.True(t, c.policy.Has(key)) - require.Equal(t, int64(1), c.policy.Cost(key)) - - key, conflict = defaultStringHash("1") - c.setBuf <- &Item{ - flag: itemUpdate, - Key: key, - Conflict: conflict, - Value: 2, - Cost: 0, - } - time.Sleep(wait) - require.Equal(t, int64(2), c.policy.Cost(key)) - - key, conflict = defaultStringHash("1") - c.setBuf <- &Item{ - flag: itemDelete, - Key: key, - Conflict: conflict, - } - time.Sleep(wait) - key, conflict = defaultStringHash("1") - val, ok := c.store.Get(key, conflict) - require.False(t, ok) - require.Nil(t, val) - require.False(t, c.policy.Has(1)) - - key, conflict = defaultStringHash("2") - c.setBuf <- &Item{ - flag: itemNew, - Key: key, - Conflict: conflict, - Value: 2, - Cost: 3, - } - key, conflict = defaultStringHash("3") - c.setBuf <- &Item{ - flag: itemNew, - Key: key, - Conflict: conflict, - Value: 3, - Cost: 3, - } - key, conflict = defaultStringHash("4") - c.setBuf <- &Item{ - flag: itemNew, - Key: key, - Conflict: conflict, - Value: 3, - Cost: 3, - } - key, conflict = defaultStringHash("5") - c.setBuf <- &Item{ - flag: itemNew, - Key: key, - Conflict: conflict, - Value: 3, - Cost: 5, - } - time.Sleep(wait) - m.Lock() - require.NotEqual(t, 0, len(evicted)) - m.Unlock() - - defer func() { - require.NotNil(t, recover()) - }() - c.Close() - c.setBuf <- &Item{flag: itemNew} -} - -func TestCacheGet(t *testing.T) { - c, err := NewCache(&Config{ - NumCounters: 100, - MaxCost: 10, - BufferItems: 64, - IgnoreInternalCost: true, - Metrics: true, - }) - require.NoError(t, err) - - key, conflict := defaultStringHash("1") - i := Item{ - Key: key, - Conflict: conflict, - Value: 1, - } - c.store.Set(&i) - val, ok := c.Get("1") - require.True(t, ok) - require.NotNil(t, val) - - val, ok = c.Get("2") - require.False(t, ok) - require.Nil(t, val) - - // 0.5 and not 1.0 because we tried Getting each item twice - require.Equal(t, 0.5, c.Metrics.Ratio()) - - c = nil - val, ok = c.Get("0") - require.False(t, ok) - require.Nil(t, val) -} - -// retrySet calls SetWithCost until the item is accepted by the cache. -func retrySet(t *testing.T, c *Cache, key string, value int, cost int64) { - for { - if set := c.SetWithCost(key, value, cost); !set { - time.Sleep(wait) - continue - } - - time.Sleep(wait) - val, ok := c.Get(key) - require.True(t, ok) - require.NotNil(t, val) - require.Equal(t, value, val.(int)) - return - } -} - -func TestCacheSet(t *testing.T) { - c, err := NewCache(&Config{ - NumCounters: 100, - MaxCost: 10, - IgnoreInternalCost: true, - BufferItems: 64, - Metrics: true, - }) - require.NoError(t, err) - - retrySet(t, c, "1", 1, 1) - - c.SetWithCost("1", 2, 2) - val, ok := c.store.Get(defaultStringHash("1")) - require.True(t, ok) - require.Equal(t, 2, val.(int)) - - c.stop <- struct{}{} - for i := 0; i < setBufSize; i++ { - key, conflict := defaultStringHash("1") - c.setBuf <- &Item{ - flag: itemUpdate, - Key: key, - Conflict: conflict, - Value: 1, - Cost: 1, - } - } - require.False(t, c.SetWithCost("2", 2, 1)) - require.Equal(t, uint64(1), c.Metrics.SetsDropped()) - close(c.setBuf) - close(c.stop) - - c = nil - require.False(t, c.SetWithCost("1", 1, 1)) -} - -func TestCacheInternalCost(t *testing.T) { - c, err := NewCache(&Config{ - NumCounters: 100, - MaxCost: 10, - BufferItems: 64, - Metrics: true, - }) - require.NoError(t, err) - - // Get should return false because the cache's cost is too small to store the item - // when accounting for the internal cost. - c.SetWithCost("1", 1, 1) - time.Sleep(wait) - _, ok := c.Get("1") - require.False(t, ok) -} - -func TestCacheDel(t *testing.T) { - c, err := NewCache(&Config{ - NumCounters: 100, - MaxCost: 10, - BufferItems: 64, - }) - require.NoError(t, err) - - c.SetWithCost("1", 1, 1) - c.Delete("1") - // The deletes and sets are pushed through the setbuf. It might be possible - // that the delete is not processed before the following get is called. So - // wait for a millisecond for things to be processed. - time.Sleep(time.Millisecond) - val, ok := c.Get("1") - require.False(t, ok) - require.Nil(t, val) - - c = nil - defer func() { - require.Nil(t, recover()) - }() - c.Delete("1") -} - -func TestCacheClear(t *testing.T) { - c, err := NewCache(&Config{ - NumCounters: 100, - MaxCost: 10, - IgnoreInternalCost: true, - BufferItems: 64, - Metrics: true, - }) - require.NoError(t, err) - - for i := 0; i < 10; i++ { - c.SetWithCost(strconv.Itoa(i), i, 1) - } - time.Sleep(wait) - require.Equal(t, uint64(10), c.Metrics.KeysAdded()) - - c.Clear() - require.Equal(t, uint64(0), c.Metrics.KeysAdded()) - - for i := 0; i < 10; i++ { - val, ok := c.Get(strconv.Itoa(i)) - require.False(t, ok) - require.Nil(t, val) - } -} - -func TestCacheMetrics(t *testing.T) { - c, err := NewCache(&Config{ - NumCounters: 100, - MaxCost: 10, - IgnoreInternalCost: true, - BufferItems: 64, - Metrics: true, - }) - require.NoError(t, err) - - for i := 0; i < 10; i++ { - c.SetWithCost(strconv.Itoa(i), i, 1) - } - time.Sleep(wait) - m := c.Metrics - require.Equal(t, uint64(10), m.KeysAdded()) -} - -func TestMetrics(t *testing.T) { - newMetrics() -} - -func TestNilMetrics(t *testing.T) { - var m *Metrics - for _, f := range []func() uint64{ - m.Hits, - m.Misses, - m.KeysAdded, - m.KeysEvicted, - m.CostEvicted, - m.SetsDropped, - m.SetsRejected, - m.GetsDropped, - m.GetsKept, - } { - require.Equal(t, uint64(0), f()) - } -} - -func TestMetricsAddGet(t *testing.T) { - m := newMetrics() - m.add(hit, 1, 1) - m.add(hit, 2, 2) - m.add(hit, 3, 3) - require.Equal(t, uint64(6), m.Hits()) - - m = nil - m.add(hit, 1, 1) - require.Equal(t, uint64(0), m.Hits()) -} - -func TestMetricsRatio(t *testing.T) { - m := newMetrics() - require.Equal(t, float64(0), m.Ratio()) - - m.add(hit, 1, 1) - m.add(hit, 2, 2) - m.add(miss, 1, 1) - m.add(miss, 2, 2) - require.Equal(t, 0.5, m.Ratio()) - - m = nil - require.Equal(t, float64(0), m.Ratio()) -} - -func TestMetricsString(t *testing.T) { - m := newMetrics() - m.add(hit, 1, 1) - m.add(miss, 1, 1) - m.add(keyAdd, 1, 1) - m.add(keyUpdate, 1, 1) - m.add(keyEvict, 1, 1) - m.add(costAdd, 1, 1) - m.add(costEvict, 1, 1) - m.add(dropSets, 1, 1) - m.add(rejectSets, 1, 1) - m.add(dropGets, 1, 1) - m.add(keepGets, 1, 1) - require.Equal(t, uint64(1), m.Hits()) - require.Equal(t, uint64(1), m.Misses()) - require.Equal(t, 0.5, m.Ratio()) - require.Equal(t, uint64(1), m.KeysAdded()) - require.Equal(t, uint64(1), m.KeysUpdated()) - require.Equal(t, uint64(1), m.KeysEvicted()) - require.Equal(t, uint64(1), m.CostAdded()) - require.Equal(t, uint64(1), m.CostEvicted()) - require.Equal(t, uint64(1), m.SetsDropped()) - require.Equal(t, uint64(1), m.SetsRejected()) - require.Equal(t, uint64(1), m.GetsDropped()) - require.Equal(t, uint64(1), m.GetsKept()) - - require.NotEqual(t, 0, len(m.String())) - - m = nil - require.Equal(t, 0, len(m.String())) - - require.Equal(t, "unidentified", stringFor(doNotUse)) -} - -func TestCacheMetricsClear(t *testing.T) { - c, err := NewCache(&Config{ - NumCounters: 100, - MaxCost: 10, - BufferItems: 64, - Metrics: true, - }) - require.NoError(t, err) - - c.SetWithCost("1", 1, 1) - stop := make(chan struct{}) - go func() { - for { - select { - case <-stop: - return - default: - c.Get("1") - } - } - }() - time.Sleep(wait) - c.Clear() - stop <- struct{}{} - c.Metrics = nil - c.Metrics.Clear() -} - -// Regression test for bug https://github.com/dgraph-io/ristretto/issues/167 -func TestDropUpdates(t *testing.T) { - originalSetBugSize := setBufSize - defer func() { setBufSize = originalSetBugSize }() - - test := func() { - // dropppedMap stores the items dropped from the cache. - droppedMap := make(map[int]struct{}) - lastEvictedSet := int64(-1) - - var err error - handler := func(_ any, value any) { - v := value.(string) - lastEvictedSet, err = strconv.ParseInt(string(v), 10, 32) - require.NoError(t, err) - - _, ok := droppedMap[int(lastEvictedSet)] - if ok { - panic(fmt.Sprintf("val = %+v was dropped but it got evicted. Dropped items: %+v\n", - lastEvictedSet, droppedMap)) - } - } - - // This is important. The race condition shows up only when the setBuf - // is full and that's why we reduce the buf size here. The test will - // try to fill up the setbuf to it's capacity and then perform an - // update on a key. - setBufSize = 10 - - c, err := NewCache(&Config{ - NumCounters: 100, - MaxCost: 10, - BufferItems: 64, - Metrics: true, - OnEvict: func(item *Item) { - if item.Value != nil { - handler(nil, item.Value) - } - }, - }) - require.NoError(t, err) - - for i := 0; i < 5*setBufSize; i++ { - v := fmt.Sprintf("%0100d", i) - // We're updating the same key. - if !c.SetWithCost("0", v, 1) { - // The race condition doesn't show up without this sleep. - time.Sleep(time.Microsecond) - droppedMap[i] = struct{}{} - } - } - // Wait for all the items to be processed. - c.Wait() - // This will cause eviction from the cache. - require.True(t, c.SetWithCost("1", nil, 10)) - c.Close() - } - - // Run the test 100 times since it's not reliable. - for i := 0; i < 100; i++ { - test() - } -} - -func newTestCache() (*Cache, error) { - return NewCache(&Config{ - NumCounters: 100, - MaxCost: 10, - BufferItems: 64, - Metrics: true, - }) -} diff --git a/go/cache/ristretto/policy.go b/go/cache/ristretto/policy.go deleted file mode 100644 index 84cc008cb99..00000000000 --- a/go/cache/ristretto/policy.go +++ /dev/null @@ -1,423 +0,0 @@ -/* - * Copyright 2020 Dgraph Labs, Inc. and Contributors - * Copyright 2021 The Vitess Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ristretto - -import ( - "math" - "sync" - "sync/atomic" - - "vitess.io/vitess/go/cache/ristretto/bloom" -) - -const ( - // lfuSample is the number of items to sample when looking at eviction - // candidates. 5 seems to be the most optimal number [citation needed]. - lfuSample = 5 -) - -// policy is the interface encapsulating eviction/admission behavior. -// -// TODO: remove this interface and just rename defaultPolicy to policy, as we -// -// are probably only going to use/implement/maintain one policy. -type policy interface { - ringConsumer - // Add attempts to Add the key-cost pair to the Policy. It returns a slice - // of evicted keys and a bool denoting whether or not the key-cost pair - // was added. If it returns true, the key should be stored in cache. - Add(uint64, int64) ([]*Item, bool) - // Has returns true if the key exists in the Policy. - Has(uint64) bool - // Del deletes the key from the Policy. - Del(uint64) - // Cap returns the amount of used capacity. - Used() int64 - // Close stops all goroutines and closes all channels. - Close() - // Update updates the cost value for the key. - Update(uint64, int64) - // Cost returns the cost value of a key or -1 if missing. - Cost(uint64) int64 - // Optionally, set stats object to track how policy is performing. - CollectMetrics(*Metrics) - // Clear zeroes out all counters and clears hashmaps. - Clear() - // MaxCost returns the current max cost of the cache policy. - MaxCost() int64 - // UpdateMaxCost updates the max cost of the cache policy. - UpdateMaxCost(int64) -} - -func newPolicy(numCounters, maxCost int64) policy { - return newDefaultPolicy(numCounters, maxCost) -} - -type defaultPolicy struct { - sync.Mutex - admit *tinyLFU - evict *sampledLFU - itemsCh chan []uint64 - stop chan struct{} - isClosed bool - metrics *Metrics - numCounters int64 - maxCost int64 -} - -func newDefaultPolicy(numCounters, maxCost int64) *defaultPolicy { - p := &defaultPolicy{ - admit: newTinyLFU(numCounters), - evict: newSampledLFU(maxCost), - itemsCh: make(chan []uint64, 3), - stop: make(chan struct{}), - numCounters: numCounters, - maxCost: maxCost, - } - go p.processItems() - return p -} - -func (p *defaultPolicy) CollectMetrics(metrics *Metrics) { - p.metrics = metrics - p.evict.metrics = metrics -} - -type policyPair struct { - key uint64 - cost int64 -} - -func (p *defaultPolicy) processItems() { - for { - select { - case items := <-p.itemsCh: - p.Lock() - p.admit.Push(items) - p.Unlock() - case <-p.stop: - return - } - } -} - -func (p *defaultPolicy) Push(keys []uint64) bool { - if p.isClosed { - return false - } - - if len(keys) == 0 { - return true - } - - select { - case p.itemsCh <- keys: - p.metrics.add(keepGets, keys[0], uint64(len(keys))) - return true - default: - p.metrics.add(dropGets, keys[0], uint64(len(keys))) - return false - } -} - -// Add decides whether the item with the given key and cost should be accepted by -// the policy. It returns the list of victims that have been evicted and a boolean -// indicating whether the incoming item should be accepted. -func (p *defaultPolicy) Add(key uint64, cost int64) ([]*Item, bool) { - p.Lock() - defer p.Unlock() - - // Cannot add an item bigger than entire cache. - if cost > p.evict.getMaxCost() { - return nil, false - } - - // No need to go any further if the item is already in the cache. - if has := p.evict.updateIfHas(key, cost); has { - // An update does not count as an addition, so return false. - return nil, false - } - - // If the execution reaches this point, the key doesn't exist in the cache. - // Calculate the remaining room in the cache (usually bytes). - room := p.evict.roomLeft(cost) - if room >= 0 { - // There's enough room in the cache to store the new item without - // overflowing. Do that now and stop here. - p.evict.add(key, cost) - p.metrics.add(costAdd, key, uint64(cost)) - return nil, true - } - - // incHits is the hit count for the incoming item. - incHits := p.admit.Estimate(key) - // sample is the eviction candidate pool to be filled via random sampling. - // TODO: perhaps we should use a min heap here. Right now our time - // complexity is N for finding the min. Min heap should bring it down to - // O(lg N). - sample := make([]*policyPair, 0, lfuSample) - // As items are evicted they will be appended to victims. - victims := make([]*Item, 0) - - // Delete victims until there's enough space or a minKey is found that has - // more hits than incoming item. - for ; room < 0; room = p.evict.roomLeft(cost) { - // Fill up empty slots in sample. - sample = p.evict.fillSample(sample) - - // Find minimally used item in sample. - minKey, minHits, minID, minCost := uint64(0), int64(math.MaxInt64), 0, int64(0) - for i, pair := range sample { - // Look up hit count for sample key. - if hits := p.admit.Estimate(pair.key); hits < minHits { - minKey, minHits, minID, minCost = pair.key, hits, i, pair.cost - } - } - - // If the incoming item isn't worth keeping in the policy, reject. - if incHits < minHits { - p.metrics.add(rejectSets, key, 1) - return victims, false - } - - // Delete the victim from metadata. - p.evict.del(minKey) - - // Delete the victim from sample. - sample[minID] = sample[len(sample)-1] - sample = sample[:len(sample)-1] - // Store victim in evicted victims slice. - victims = append(victims, &Item{ - Key: minKey, - Conflict: 0, - Cost: minCost, - }) - } - - p.evict.add(key, cost) - p.metrics.add(costAdd, key, uint64(cost)) - return victims, true -} - -func (p *defaultPolicy) Has(key uint64) bool { - p.Lock() - _, exists := p.evict.keyCosts[key] - p.Unlock() - return exists -} - -func (p *defaultPolicy) Del(key uint64) { - p.Lock() - p.evict.del(key) - p.Unlock() -} - -func (p *defaultPolicy) Used() int64 { - p.Lock() - used := p.evict.used - p.Unlock() - return used -} - -func (p *defaultPolicy) Update(key uint64, cost int64) { - p.Lock() - p.evict.updateIfHas(key, cost) - p.Unlock() -} - -func (p *defaultPolicy) Cost(key uint64) int64 { - p.Lock() - if cost, found := p.evict.keyCosts[key]; found { - p.Unlock() - return cost - } - p.Unlock() - return -1 -} - -func (p *defaultPolicy) Clear() { - p.Lock() - p.admit = newTinyLFU(p.numCounters) - p.evict = newSampledLFU(p.maxCost) - p.Unlock() -} - -func (p *defaultPolicy) Close() { - if p.isClosed { - return - } - - // Block until the p.processItems goroutine returns. - p.stop <- struct{}{} - close(p.stop) - close(p.itemsCh) - p.isClosed = true -} - -func (p *defaultPolicy) MaxCost() int64 { - if p == nil || p.evict == nil { - return 0 - } - return p.evict.getMaxCost() -} - -func (p *defaultPolicy) UpdateMaxCost(maxCost int64) { - if p == nil || p.evict == nil { - return - } - p.evict.updateMaxCost(maxCost) -} - -// sampledLFU is an eviction helper storing key-cost pairs. -type sampledLFU struct { - keyCosts map[uint64]int64 - maxCost int64 - used int64 - metrics *Metrics -} - -func newSampledLFU(maxCost int64) *sampledLFU { - return &sampledLFU{ - keyCosts: make(map[uint64]int64), - maxCost: maxCost, - } -} - -func (p *sampledLFU) getMaxCost() int64 { - return atomic.LoadInt64(&p.maxCost) -} - -func (p *sampledLFU) updateMaxCost(maxCost int64) { - atomic.StoreInt64(&p.maxCost, maxCost) -} - -func (p *sampledLFU) roomLeft(cost int64) int64 { - return p.getMaxCost() - (p.used + cost) -} - -func (p *sampledLFU) fillSample(in []*policyPair) []*policyPair { - if len(in) >= lfuSample { - return in - } - for key, cost := range p.keyCosts { - in = append(in, &policyPair{key, cost}) - if len(in) >= lfuSample { - return in - } - } - return in -} - -func (p *sampledLFU) del(key uint64) { - cost, ok := p.keyCosts[key] - if !ok { - return - } - p.used -= cost - delete(p.keyCosts, key) - p.metrics.add(costEvict, key, uint64(cost)) - p.metrics.add(keyEvict, key, 1) -} - -func (p *sampledLFU) add(key uint64, cost int64) { - p.keyCosts[key] = cost - p.used += cost -} - -func (p *sampledLFU) updateIfHas(key uint64, cost int64) bool { - if prev, found := p.keyCosts[key]; found { - // Update the cost of an existing key, but don't worry about evicting. - // Evictions will be handled the next time a new item is added. - p.metrics.add(keyUpdate, key, 1) - if prev > cost { - diff := prev - cost - p.metrics.add(costAdd, key, ^uint64(uint64(diff)-1)) - } else if cost > prev { - diff := cost - prev - p.metrics.add(costAdd, key, uint64(diff)) - } - p.used += cost - prev - p.keyCosts[key] = cost - return true - } - return false -} - -func (p *sampledLFU) clear() { - p.used = 0 - p.keyCosts = make(map[uint64]int64) -} - -// tinyLFU is an admission helper that keeps track of access frequency using -// tiny (4-bit) counters in the form of a count-min sketch. -// tinyLFU is NOT thread safe. -type tinyLFU struct { - freq *cmSketch - door *bloom.Bloom - incrs int64 - resetAt int64 -} - -func newTinyLFU(numCounters int64) *tinyLFU { - return &tinyLFU{ - freq: newCmSketch(numCounters), - door: bloom.NewBloomFilterWithErrorRate(uint64(numCounters), 0.01), - resetAt: numCounters, - } -} - -func (p *tinyLFU) Push(keys []uint64) { - for _, key := range keys { - p.Increment(key) - } -} - -func (p *tinyLFU) Estimate(key uint64) int64 { - hits := p.freq.Estimate(key) - if p.door.Has(key) { - hits++ - } - return hits -} - -func (p *tinyLFU) Increment(key uint64) { - // Flip doorkeeper bit if not already done. - if added := p.door.AddIfNotHas(key); !added { - // Increment count-min counter if doorkeeper bit is already set. - p.freq.Increment(key) - } - p.incrs++ - if p.incrs >= p.resetAt { - p.reset() - } -} - -func (p *tinyLFU) reset() { - // Zero out incrs. - p.incrs = 0 - // clears doorkeeper bits - p.door.Clear() - // halves count-min counters - p.freq.Reset() -} - -func (p *tinyLFU) clear() { - p.incrs = 0 - p.freq.Clear() - p.door.Clear() -} diff --git a/go/cache/ristretto/policy_test.go b/go/cache/ristretto/policy_test.go deleted file mode 100644 index c864b6c74d0..00000000000 --- a/go/cache/ristretto/policy_test.go +++ /dev/null @@ -1,276 +0,0 @@ -/* - * Copyright 2020 Dgraph Labs, Inc. and Contributors - * Copyright 2021 The Vitess Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ristretto - -import ( - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -func TestPolicy(t *testing.T) { - defer func() { - require.Nil(t, recover()) - }() - newPolicy(100, 10) -} - -func TestPolicyMetrics(t *testing.T) { - p := newDefaultPolicy(100, 10) - p.CollectMetrics(newMetrics()) - require.NotNil(t, p.metrics) - require.NotNil(t, p.evict.metrics) -} - -func TestPolicyProcessItems(t *testing.T) { - p := newDefaultPolicy(100, 10) - p.itemsCh <- []uint64{1, 2, 2} - time.Sleep(wait) - p.Lock() - require.Equal(t, int64(2), p.admit.Estimate(2)) - require.Equal(t, int64(1), p.admit.Estimate(1)) - p.Unlock() - - p.stop <- struct{}{} - p.itemsCh <- []uint64{3, 3, 3} - time.Sleep(wait) - p.Lock() - require.Equal(t, int64(0), p.admit.Estimate(3)) - p.Unlock() -} - -func TestPolicyPush(t *testing.T) { - p := newDefaultPolicy(100, 10) - require.True(t, p.Push([]uint64{})) - - keepCount := 0 - for i := 0; i < 10; i++ { - if p.Push([]uint64{1, 2, 3, 4, 5}) { - keepCount++ - } - } - require.NotEqual(t, 0, keepCount) -} - -func TestPolicyAdd(t *testing.T) { - p := newDefaultPolicy(1000, 100) - if victims, added := p.Add(1, 101); victims != nil || added { - t.Fatal("can't add an item bigger than entire cache") - } - p.Lock() - p.evict.add(1, 1) - p.admit.Increment(1) - p.admit.Increment(2) - p.admit.Increment(3) - p.Unlock() - - victims, added := p.Add(1, 1) - require.Nil(t, victims) - require.False(t, added) - - victims, added = p.Add(2, 20) - require.Nil(t, victims) - require.True(t, added) - - victims, added = p.Add(3, 90) - require.NotNil(t, victims) - require.True(t, added) - - victims, added = p.Add(4, 20) - require.NotNil(t, victims) - require.False(t, added) -} - -func TestPolicyHas(t *testing.T) { - p := newDefaultPolicy(100, 10) - p.Add(1, 1) - require.True(t, p.Has(1)) - require.False(t, p.Has(2)) -} - -func TestPolicyDel(t *testing.T) { - p := newDefaultPolicy(100, 10) - p.Add(1, 1) - p.Del(1) - p.Del(2) - require.False(t, p.Has(1)) - require.False(t, p.Has(2)) -} - -func TestPolicyCap(t *testing.T) { - p := newDefaultPolicy(100, 10) - p.Add(1, 1) - require.Equal(t, int64(9), p.MaxCost()-p.Used()) -} - -func TestPolicyUpdate(t *testing.T) { - p := newDefaultPolicy(100, 10) - p.Add(1, 1) - p.Update(1, 2) - p.Lock() - require.Equal(t, int64(2), p.evict.keyCosts[1]) - p.Unlock() -} - -func TestPolicyCost(t *testing.T) { - p := newDefaultPolicy(100, 10) - p.Add(1, 2) - require.Equal(t, int64(2), p.Cost(1)) - require.Equal(t, int64(-1), p.Cost(2)) -} - -func TestPolicyClear(t *testing.T) { - p := newDefaultPolicy(100, 10) - p.Add(1, 1) - p.Add(2, 2) - p.Add(3, 3) - p.Clear() - require.Equal(t, int64(10), p.MaxCost()-p.Used()) - require.False(t, p.Has(1)) - require.False(t, p.Has(2)) - require.False(t, p.Has(3)) -} - -func TestPolicyClose(t *testing.T) { - defer func() { - require.NotNil(t, recover()) - }() - - p := newDefaultPolicy(100, 10) - p.Add(1, 1) - p.Close() - p.itemsCh <- []uint64{1} -} - -func TestPushAfterClose(t *testing.T) { - p := newDefaultPolicy(100, 10) - p.Close() - require.False(t, p.Push([]uint64{1, 2})) -} - -func TestAddAfterClose(t *testing.T) { - p := newDefaultPolicy(100, 10) - p.Close() - p.Add(1, 1) -} - -func TestSampledLFUAdd(t *testing.T) { - e := newSampledLFU(4) - e.add(1, 1) - e.add(2, 2) - e.add(3, 1) - require.Equal(t, int64(4), e.used) - require.Equal(t, int64(2), e.keyCosts[2]) -} - -func TestSampledLFUDel(t *testing.T) { - e := newSampledLFU(4) - e.add(1, 1) - e.add(2, 2) - e.del(2) - require.Equal(t, int64(1), e.used) - _, ok := e.keyCosts[2] - require.False(t, ok) - e.del(4) -} - -func TestSampledLFUUpdate(t *testing.T) { - e := newSampledLFU(4) - e.add(1, 1) - require.True(t, e.updateIfHas(1, 2)) - require.Equal(t, int64(2), e.used) - require.False(t, e.updateIfHas(2, 2)) -} - -func TestSampledLFUClear(t *testing.T) { - e := newSampledLFU(4) - e.add(1, 1) - e.add(2, 2) - e.add(3, 1) - e.clear() - require.Equal(t, 0, len(e.keyCosts)) - require.Equal(t, int64(0), e.used) -} - -func TestSampledLFURoom(t *testing.T) { - e := newSampledLFU(16) - e.add(1, 1) - e.add(2, 2) - e.add(3, 3) - require.Equal(t, int64(6), e.roomLeft(4)) -} - -func TestSampledLFUSample(t *testing.T) { - e := newSampledLFU(16) - e.add(4, 4) - e.add(5, 5) - sample := e.fillSample([]*policyPair{ - {1, 1}, - {2, 2}, - {3, 3}, - }) - k := sample[len(sample)-1].key - require.Equal(t, 5, len(sample)) - require.NotEqual(t, 1, k) - require.NotEqual(t, 2, k) - require.NotEqual(t, 3, k) - require.Equal(t, len(sample), len(e.fillSample(sample))) - e.del(5) - sample = e.fillSample(sample[:len(sample)-2]) - require.Equal(t, 4, len(sample)) -} - -func TestTinyLFUIncrement(t *testing.T) { - a := newTinyLFU(4) - a.Increment(1) - a.Increment(1) - a.Increment(1) - require.True(t, a.door.Has(1)) - require.Equal(t, int64(2), a.freq.Estimate(1)) - - a.Increment(1) - require.False(t, a.door.Has(1)) - require.Equal(t, int64(1), a.freq.Estimate(1)) -} - -func TestTinyLFUEstimate(t *testing.T) { - a := newTinyLFU(8) - a.Increment(1) - a.Increment(1) - a.Increment(1) - require.Equal(t, int64(3), a.Estimate(1)) - require.Equal(t, int64(0), a.Estimate(2)) -} - -func TestTinyLFUPush(t *testing.T) { - a := newTinyLFU(16) - a.Push([]uint64{1, 2, 2, 3, 3, 3}) - require.Equal(t, int64(1), a.Estimate(1)) - require.Equal(t, int64(2), a.Estimate(2)) - require.Equal(t, int64(3), a.Estimate(3)) - require.Equal(t, int64(6), a.incrs) -} - -func TestTinyLFUClear(t *testing.T) { - a := newTinyLFU(16) - a.Push([]uint64{1, 3, 3, 3}) - a.clear() - require.Equal(t, int64(0), a.incrs) - require.Equal(t, int64(0), a.Estimate(3)) -} diff --git a/go/cache/ristretto/ring.go b/go/cache/ristretto/ring.go deleted file mode 100644 index 84d8689ee37..00000000000 --- a/go/cache/ristretto/ring.go +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * Copyright 2021 The Vitess Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ristretto - -import ( - "sync" -) - -// ringConsumer is the user-defined object responsible for receiving and -// processing items in batches when buffers are drained. -type ringConsumer interface { - Push([]uint64) bool -} - -// ringStripe is a singular ring buffer that is not concurrent safe. -type ringStripe struct { - cons ringConsumer - data []uint64 - capa int -} - -func newRingStripe(cons ringConsumer, capa int64) *ringStripe { - return &ringStripe{ - cons: cons, - data: make([]uint64, 0, capa), - capa: int(capa), - } -} - -// Push appends an item in the ring buffer and drains (copies items and -// sends to Consumer) if full. -func (s *ringStripe) Push(item uint64) { - s.data = append(s.data, item) - // Decide if the ring buffer should be drained. - if len(s.data) >= s.capa { - // Send elements to consumer and create a new ring stripe. - if s.cons.Push(s.data) { - s.data = make([]uint64, 0, s.capa) - } else { - s.data = s.data[:0] - } - } -} - -// ringBuffer stores multiple buffers (stripes) and distributes Pushed items -// between them to lower contention. -// -// This implements the "batching" process described in the BP-Wrapper paper -// (section III part A). -type ringBuffer struct { - pool *sync.Pool -} - -// newRingBuffer returns a striped ring buffer. The Consumer in ringConfig will -// be called when individual stripes are full and need to drain their elements. -func newRingBuffer(cons ringConsumer, capa int64) *ringBuffer { - // LOSSY buffers use a very simple sync.Pool for concurrently reusing - // stripes. We do lose some stripes due to GC (unheld items in sync.Pool - // are cleared), but the performance gains generally outweigh the small - // percentage of elements lost. The performance primarily comes from - // low-level runtime functions used in the standard library that aren't - // available to us (such as runtime_procPin()). - return &ringBuffer{ - pool: &sync.Pool{ - New: func() any { return newRingStripe(cons, capa) }, - }, - } -} - -// Push adds an element to one of the internal stripes and possibly drains if -// the stripe becomes full. -func (b *ringBuffer) Push(item uint64) { - // Reuse or create a new stripe. - stripe := b.pool.Get().(*ringStripe) - stripe.Push(item) - b.pool.Put(stripe) -} diff --git a/go/cache/ristretto/ring_test.go b/go/cache/ristretto/ring_test.go deleted file mode 100644 index 0dbe962ccc6..00000000000 --- a/go/cache/ristretto/ring_test.go +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright 2020 Dgraph Labs, Inc. and Contributors - * Copyright 2021 The Vitess Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ristretto - -import ( - "sync" - "testing" - - "github.com/stretchr/testify/require" -) - -type testConsumer struct { - push func([]uint64) - save bool -} - -func (c *testConsumer) Push(items []uint64) bool { - if c.save { - c.push(items) - return true - } - return false -} - -func TestRingDrain(t *testing.T) { - drains := 0 - r := newRingBuffer(&testConsumer{ - push: func(items []uint64) { - drains++ - }, - save: true, - }, 1) - for i := 0; i < 100; i++ { - r.Push(uint64(i)) - } - require.Equal(t, 100, drains, "buffers shouldn't be dropped with BufferItems == 1") -} - -func TestRingReset(t *testing.T) { - drains := 0 - r := newRingBuffer(&testConsumer{ - push: func(items []uint64) { - drains++ - }, - save: false, - }, 4) - for i := 0; i < 100; i++ { - r.Push(uint64(i)) - } - require.Equal(t, 0, drains, "testConsumer shouldn't be draining") -} - -func TestRingConsumer(t *testing.T) { - mu := &sync.Mutex{} - drainItems := make(map[uint64]struct{}) - r := newRingBuffer(&testConsumer{ - push: func(items []uint64) { - mu.Lock() - defer mu.Unlock() - for i := range items { - drainItems[items[i]] = struct{}{} - } - }, - save: true, - }, 4) - for i := 0; i < 100; i++ { - r.Push(uint64(i)) - } - l := len(drainItems) - require.NotEqual(t, 0, l) - require.True(t, l <= 100) -} diff --git a/go/cache/ristretto/sketch.go b/go/cache/ristretto/sketch.go deleted file mode 100644 index ce0504a2a83..00000000000 --- a/go/cache/ristretto/sketch.go +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * Copyright 2021 The Vitess Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package ristretto includes multiple probabalistic data structures needed for -// admission/eviction metadata. Most are Counting Bloom Filter variations, but -// a caching-specific feature that is also required is a "freshness" mechanism, -// which basically serves as a "lifetime" process. This freshness mechanism -// was described in the original TinyLFU paper [1], but other mechanisms may -// be better suited for certain data distributions. -// -// [1]: https://arxiv.org/abs/1512.00727 -package ristretto - -import ( - "fmt" - "math/rand" - "time" -) - -// cmSketch is a Count-Min sketch implementation with 4-bit counters, heavily -// based on Damian Gryski's CM4 [1]. -// -// [1]: https://github.com/dgryski/go-tinylfu/blob/master/cm4.go -type cmSketch struct { - rows [cmDepth]cmRow - seed [cmDepth]uint64 - mask uint64 -} - -const ( - // cmDepth is the number of counter copies to store (think of it as rows). - cmDepth = 4 -) - -func newCmSketch(numCounters int64) *cmSketch { - if numCounters == 0 { - panic("cmSketch: bad numCounters") - } - // Get the next power of 2 for better cache performance. - numCounters = next2Power(numCounters) - sketch := &cmSketch{mask: uint64(numCounters - 1)} - // Initialize rows of counters and seeds. - source := rand.New(rand.NewSource(time.Now().UnixNano())) - for i := 0; i < cmDepth; i++ { - sketch.seed[i] = source.Uint64() - sketch.rows[i] = newCmRow(numCounters) - } - return sketch -} - -// Increment increments the count(ers) for the specified key. -func (s *cmSketch) Increment(hashed uint64) { - for i := range s.rows { - s.rows[i].increment((hashed ^ s.seed[i]) & s.mask) - } -} - -// Estimate returns the value of the specified key. -func (s *cmSketch) Estimate(hashed uint64) int64 { - min := byte(255) - for i := range s.rows { - val := s.rows[i].get((hashed ^ s.seed[i]) & s.mask) - if val < min { - min = val - } - } - return int64(min) -} - -// Reset halves all counter values. -func (s *cmSketch) Reset() { - for _, r := range s.rows { - r.reset() - } -} - -// Clear zeroes all counters. -func (s *cmSketch) Clear() { - for _, r := range s.rows { - r.clear() - } -} - -// cmRow is a row of bytes, with each byte holding two counters. -type cmRow []byte - -func newCmRow(numCounters int64) cmRow { - return make(cmRow, numCounters/2) -} - -func (r cmRow) get(n uint64) byte { - return byte(r[n/2]>>((n&1)*4)) & 0x0f -} - -func (r cmRow) increment(n uint64) { - // Index of the counter. - i := n / 2 - // Shift distance (even 0, odd 4). - s := (n & 1) * 4 - // Counter value. - v := (r[i] >> s) & 0x0f - // Only increment if not max value (overflow wrap is bad for LFU). - if v < 15 { - r[i] += 1 << s - } -} - -func (r cmRow) reset() { - // Halve each counter. - for i := range r { - r[i] = (r[i] >> 1) & 0x77 - } -} - -func (r cmRow) clear() { - // Zero each counter. - for i := range r { - r[i] = 0 - } -} - -func (r cmRow) string() string { - s := "" - for i := uint64(0); i < uint64(len(r)*2); i++ { - s += fmt.Sprintf("%02d ", (r[(i/2)]>>((i&1)*4))&0x0f) - } - s = s[:len(s)-1] - return s -} - -// next2Power rounds x up to the next power of 2, if it's not already one. -func next2Power(x int64) int64 { - x-- - x |= x >> 1 - x |= x >> 2 - x |= x >> 4 - x |= x >> 8 - x |= x >> 16 - x |= x >> 32 - x++ - return x -} diff --git a/go/cache/ristretto/sketch_test.go b/go/cache/ristretto/sketch_test.go deleted file mode 100644 index 03804a6d599..00000000000 --- a/go/cache/ristretto/sketch_test.go +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright 2020 Dgraph Labs, Inc. and Contributors - * Copyright 2021 The Vitess Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ristretto - -import ( - "testing" - - "vitess.io/vitess/go/vt/log" - - "github.com/stretchr/testify/require" -) - -func TestSketch(t *testing.T) { - defer func() { - require.NotNil(t, recover()) - }() - - s := newCmSketch(5) - require.Equal(t, uint64(7), s.mask) - newCmSketch(0) -} - -func TestSketchIncrement(t *testing.T) { - s := newCmSketch(16) - s.Increment(1) - s.Increment(5) - s.Increment(9) - for i := 0; i < cmDepth; i++ { - if s.rows[i].string() != s.rows[0].string() { - break - } - require.False(t, i == cmDepth-1, "identical rows, bad seeding") - } -} - -func TestSketchEstimate(t *testing.T) { - s := newCmSketch(16) - s.Increment(1) - s.Increment(1) - require.Equal(t, int64(2), s.Estimate(1)) - require.Equal(t, int64(0), s.Estimate(0)) -} - -func TestSketchReset(t *testing.T) { - s := newCmSketch(16) - s.Increment(1) - s.Increment(1) - s.Increment(1) - s.Increment(1) - s.Reset() - require.Equal(t, int64(2), s.Estimate(1)) -} - -func TestSketchClear(t *testing.T) { - s := newCmSketch(16) - for i := 0; i < 16; i++ { - s.Increment(uint64(i)) - } - s.Clear() - for i := 0; i < 16; i++ { - require.Equal(t, int64(0), s.Estimate(uint64(i))) - } -} - -func TestNext2Power(t *testing.T) { - sz := 12 << 30 - szf := float64(sz) * 0.01 - val := int64(szf) - log.Infof("szf = %.2f val = %d\n", szf, val) - pow := next2Power(val) - log.Infof("pow = %d. mult 4 = %d\n", pow, pow*4) -} - -func BenchmarkSketchIncrement(b *testing.B) { - s := newCmSketch(16) - b.SetBytes(1) - for n := 0; n < b.N; n++ { - s.Increment(1) - } -} - -func BenchmarkSketchEstimate(b *testing.B) { - s := newCmSketch(16) - s.Increment(1) - b.SetBytes(1) - for n := 0; n < b.N; n++ { - s.Estimate(1) - } -} diff --git a/go/cache/ristretto/store.go b/go/cache/ristretto/store.go deleted file mode 100644 index 0e455e7052f..00000000000 --- a/go/cache/ristretto/store.go +++ /dev/null @@ -1,240 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * Copyright 2021 The Vitess Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ristretto - -import ( - "sync" -) - -// TODO: Do we need this to be a separate struct from Item? -type storeItem struct { - key uint64 - conflict uint64 - value any -} - -// store is the interface fulfilled by all hash map implementations in this -// file. Some hash map implementations are better suited for certain data -// distributions than others, so this allows us to abstract that out for use -// in Ristretto. -// -// Every store is safe for concurrent usage. -type store interface { - // Get returns the value associated with the key parameter. - Get(uint64, uint64) (any, bool) - // Set adds the key-value pair to the Map or updates the value if it's - // already present. The key-value pair is passed as a pointer to an - // item object. - Set(*Item) - // Del deletes the key-value pair from the Map. - Del(uint64, uint64) (uint64, any) - // Update attempts to update the key with a new value and returns true if - // successful. - Update(*Item) (any, bool) - // Clear clears all contents of the store. - Clear(onEvict itemCallback) - // ForEach yields all the values in the store - ForEach(forEach func(any) bool) - // Len returns the number of entries in the store - Len() int -} - -// newStore returns the default store implementation. -func newStore() store { - return newShardedMap() -} - -const numShards uint64 = 256 - -type shardedMap struct { - shards []*lockedMap -} - -func newShardedMap() *shardedMap { - sm := &shardedMap{ - shards: make([]*lockedMap, int(numShards)), - } - for i := range sm.shards { - sm.shards[i] = newLockedMap() - } - return sm -} - -func (sm *shardedMap) Get(key, conflict uint64) (any, bool) { - return sm.shards[key%numShards].get(key, conflict) -} - -func (sm *shardedMap) Set(i *Item) { - if i == nil { - // If item is nil make this Set a no-op. - return - } - - sm.shards[i.Key%numShards].Set(i) -} - -func (sm *shardedMap) Del(key, conflict uint64) (uint64, any) { - return sm.shards[key%numShards].Del(key, conflict) -} - -func (sm *shardedMap) Update(newItem *Item) (any, bool) { - return sm.shards[newItem.Key%numShards].Update(newItem) -} - -func (sm *shardedMap) ForEach(forEach func(any) bool) { - for _, shard := range sm.shards { - if !shard.foreach(forEach) { - break - } - } -} - -func (sm *shardedMap) Len() int { - l := 0 - for _, shard := range sm.shards { - l += shard.Len() - } - return l -} - -func (sm *shardedMap) Clear(onEvict itemCallback) { - for i := uint64(0); i < numShards; i++ { - sm.shards[i].Clear(onEvict) - } -} - -type lockedMap struct { - sync.RWMutex - data map[uint64]storeItem -} - -func newLockedMap() *lockedMap { - return &lockedMap{ - data: make(map[uint64]storeItem), - } -} - -func (m *lockedMap) get(key, conflict uint64) (any, bool) { - m.RLock() - item, ok := m.data[key] - m.RUnlock() - if !ok { - return nil, false - } - if conflict != 0 && (conflict != item.conflict) { - return nil, false - } - return item.value, true -} - -func (m *lockedMap) Set(i *Item) { - if i == nil { - // If the item is nil make this Set a no-op. - return - } - - m.Lock() - defer m.Unlock() - item, ok := m.data[i.Key] - - if ok { - // The item existed already. We need to check the conflict key and reject the - // update if they do not match. Only after that the expiration map is updated. - if i.Conflict != 0 && (i.Conflict != item.conflict) { - return - } - } - - m.data[i.Key] = storeItem{ - key: i.Key, - conflict: i.Conflict, - value: i.Value, - } -} - -func (m *lockedMap) Del(key, conflict uint64) (uint64, any) { - m.Lock() - item, ok := m.data[key] - if !ok { - m.Unlock() - return 0, nil - } - if conflict != 0 && (conflict != item.conflict) { - m.Unlock() - return 0, nil - } - - delete(m.data, key) - m.Unlock() - return item.conflict, item.value -} - -func (m *lockedMap) Update(newItem *Item) (any, bool) { - m.Lock() - item, ok := m.data[newItem.Key] - if !ok { - m.Unlock() - return nil, false - } - if newItem.Conflict != 0 && (newItem.Conflict != item.conflict) { - m.Unlock() - return nil, false - } - - m.data[newItem.Key] = storeItem{ - key: newItem.Key, - conflict: newItem.Conflict, - value: newItem.Value, - } - - m.Unlock() - return item.value, true -} - -func (m *lockedMap) Len() int { - m.RLock() - l := len(m.data) - m.RUnlock() - return l -} - -func (m *lockedMap) Clear(onEvict itemCallback) { - m.Lock() - i := &Item{} - if onEvict != nil { - for _, si := range m.data { - i.Key = si.key - i.Conflict = si.conflict - i.Value = si.value - onEvict(i) - } - } - m.data = make(map[uint64]storeItem) - m.Unlock() -} - -func (m *lockedMap) foreach(forEach func(any) bool) bool { - m.RLock() - defer m.RUnlock() - for _, si := range m.data { - if !forEach(si.value) { - return false - } - } - return true -} diff --git a/go/cache/ristretto/store_test.go b/go/cache/ristretto/store_test.go deleted file mode 100644 index 54634736a72..00000000000 --- a/go/cache/ristretto/store_test.go +++ /dev/null @@ -1,224 +0,0 @@ -/* - * Copyright 2020 Dgraph Labs, Inc. and Contributors - * Copyright 2021 The Vitess Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ristretto - -import ( - "strconv" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestStoreSetGet(t *testing.T) { - s := newStore() - key, conflict := defaultStringHash("1") - i := Item{ - Key: key, - Conflict: conflict, - Value: 2, - } - s.Set(&i) - val, ok := s.Get(key, conflict) - require.True(t, ok) - require.Equal(t, 2, val.(int)) - - i.Value = 3 - s.Set(&i) - val, ok = s.Get(key, conflict) - require.True(t, ok) - require.Equal(t, 3, val.(int)) - - key, conflict = defaultStringHash("2") - i = Item{ - Key: key, - Conflict: conflict, - Value: 2, - } - s.Set(&i) - val, ok = s.Get(key, conflict) - require.True(t, ok) - require.Equal(t, 2, val.(int)) -} - -func TestStoreDel(t *testing.T) { - s := newStore() - key, conflict := defaultStringHash("1") - i := Item{ - Key: key, - Conflict: conflict, - Value: 1, - } - s.Set(&i) - s.Del(key, conflict) - val, ok := s.Get(key, conflict) - require.False(t, ok) - require.Nil(t, val) - - s.Del(2, 0) -} - -func TestStoreClear(t *testing.T) { - s := newStore() - for i := 0; i < 1000; i++ { - key, conflict := defaultStringHash(strconv.Itoa(i)) - it := Item{ - Key: key, - Conflict: conflict, - Value: i, - } - s.Set(&it) - } - s.Clear(nil) - for i := 0; i < 1000; i++ { - key, conflict := defaultStringHash(strconv.Itoa(i)) - val, ok := s.Get(key, conflict) - require.False(t, ok) - require.Nil(t, val) - } -} - -func TestStoreUpdate(t *testing.T) { - s := newStore() - key, conflict := defaultStringHash("1") - i := Item{ - Key: key, - Conflict: conflict, - Value: 1, - } - s.Set(&i) - i.Value = 2 - _, ok := s.Update(&i) - require.True(t, ok) - - val, ok := s.Get(key, conflict) - require.True(t, ok) - require.NotNil(t, val) - - val, ok = s.Get(key, conflict) - require.True(t, ok) - require.Equal(t, 2, val.(int)) - - i.Value = 3 - _, ok = s.Update(&i) - require.True(t, ok) - - val, ok = s.Get(key, conflict) - require.True(t, ok) - require.Equal(t, 3, val.(int)) - - key, conflict = defaultStringHash("2") - i = Item{ - Key: key, - Conflict: conflict, - Value: 2, - } - _, ok = s.Update(&i) - require.False(t, ok) - val, ok = s.Get(key, conflict) - require.False(t, ok) - require.Nil(t, val) -} - -func TestStoreCollision(t *testing.T) { - s := newShardedMap() - s.shards[1].Lock() - s.shards[1].data[1] = storeItem{ - key: 1, - conflict: 0, - value: 1, - } - s.shards[1].Unlock() - val, ok := s.Get(1, 1) - require.False(t, ok) - require.Nil(t, val) - - i := Item{ - Key: 1, - Conflict: 1, - Value: 2, - } - s.Set(&i) - val, ok = s.Get(1, 0) - require.True(t, ok) - require.NotEqual(t, 2, val.(int)) - - _, ok = s.Update(&i) - require.False(t, ok) - val, ok = s.Get(1, 0) - require.True(t, ok) - require.NotEqual(t, 2, val.(int)) - - s.Del(1, 1) - val, ok = s.Get(1, 0) - require.True(t, ok) - require.NotNil(t, val) -} - -func BenchmarkStoreGet(b *testing.B) { - s := newStore() - key, conflict := defaultStringHash("1") - i := Item{ - Key: key, - Conflict: conflict, - Value: 1, - } - s.Set(&i) - b.SetBytes(1) - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - s.Get(key, conflict) - } - }) -} - -func BenchmarkStoreSet(b *testing.B) { - s := newStore() - key, conflict := defaultStringHash("1") - b.SetBytes(1) - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - i := Item{ - Key: key, - Conflict: conflict, - Value: 1, - } - s.Set(&i) - } - }) -} - -func BenchmarkStoreUpdate(b *testing.B) { - s := newStore() - key, conflict := defaultStringHash("1") - i := Item{ - Key: key, - Conflict: conflict, - Value: 1, - } - s.Set(&i) - b.SetBytes(1) - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - s.Update(&Item{ - Key: key, - Conflict: conflict, - Value: 2, - }) - } - }) -} diff --git a/go/cache/theine/LICENSE b/go/cache/theine/LICENSE new file mode 100644 index 00000000000..0161260b7b6 --- /dev/null +++ b/go/cache/theine/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Yiling-J + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/go/cache/theine/bf/bf.go b/go/cache/theine/bf/bf.go new file mode 100644 index 00000000000..f68e34d81e3 --- /dev/null +++ b/go/cache/theine/bf/bf.go @@ -0,0 +1,116 @@ +package bf + +import ( + "math" +) + +// doorkeeper is a small bloom-filter-based cache admission policy +type Bloomfilter struct { + Filter bitvector // our filter bit vector + M uint32 // size of bit vector in bits + K uint32 // distinct hash functions needed + FalsePositiveRate float64 + Capacity int +} + +func New(falsePositiveRate float64) *Bloomfilter { + d := &Bloomfilter{FalsePositiveRate: falsePositiveRate} + d.EnsureCapacity(320) + return d +} + +// create new bloomfilter with given size in bytes +func NewWithSize(size uint32) *Bloomfilter { + d := &Bloomfilter{} + bits := size * 8 + m := nextPowerOfTwo(uint32(bits)) + d.M = m + d.Filter = newbv(m) + return d +} + +func (d *Bloomfilter) EnsureCapacity(capacity int) { + if capacity <= d.Capacity { + return + } + capacity = int(nextPowerOfTwo(uint32(capacity))) + bits := float64(capacity) * -math.Log(d.FalsePositiveRate) / (math.Log(2.0) * math.Log(2.0)) // in bits + m := nextPowerOfTwo(uint32(bits)) + + if m < 1024 { + m = 1024 + } + + k := uint32(0.7 * float64(m) / float64(capacity)) + if k < 2 { + k = 2 + } + d.Capacity = capacity + d.M = m + d.Filter = newbv(m) + d.K = k +} + +func (d *Bloomfilter) Exist(h uint64) bool { + h1, h2 := uint32(h), uint32(h>>32) + var o uint = 1 + for i := uint32(0); i < d.K; i++ { + o &= d.Filter.get((h1 + (i * h2)) & (d.M - 1)) + } + return o == 1 +} + +// insert inserts the byte array b into the bloom filter. Returns true if the value +// was already considered to be in the bloom filter. +func (d *Bloomfilter) Insert(h uint64) bool { + h1, h2 := uint32(h), uint32(h>>32) + var o uint = 1 + for i := uint32(0); i < d.K; i++ { + o &= d.Filter.getset((h1 + (i * h2)) & (d.M - 1)) + } + return o == 1 +} + +// Reset clears the bloom filter +func (d *Bloomfilter) Reset() { + for i := range d.Filter { + d.Filter[i] = 0 + } +} + +// Internal routines for the bit vector +type bitvector []uint64 + +func newbv(size uint32) bitvector { + return make([]uint64, uint(size+63)/64) +} + +func (b bitvector) get(bit uint32) uint { + shift := bit % 64 + idx := bit / 64 + bb := b[idx] + m := uint64(1) << shift + return uint((bb & m) >> shift) +} + +// set bit 'bit' in the bitvector d and return previous value +func (b bitvector) getset(bit uint32) uint { + shift := bit % 64 + idx := bit / 64 + bb := b[idx] + m := uint64(1) << shift + b[idx] |= m + return uint((bb & m) >> shift) +} + +// return the integer >= i which is a power of two +func nextPowerOfTwo(i uint32) uint32 { + n := i - 1 + n |= n >> 1 + n |= n >> 2 + n |= n >> 4 + n |= n >> 8 + n |= n >> 16 + n++ + return n +} diff --git a/go/cache/theine/bf/bf_test.go b/go/cache/theine/bf/bf_test.go new file mode 100644 index 00000000000..f0e505766e7 --- /dev/null +++ b/go/cache/theine/bf/bf_test.go @@ -0,0 +1,24 @@ +package bf + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestBloom(t *testing.T) { + bf := NewWithSize(5) + bf.FalsePositiveRate = 0.1 + bf.EnsureCapacity(5) + bf.EnsureCapacity(500) + bf.EnsureCapacity(200) + + exist := bf.Insert(123) + require.False(t, exist) + + exist = bf.Exist(123) + require.True(t, exist) + + exist = bf.Exist(456) + require.False(t, exist) +} diff --git a/go/cache/theine/entry.go b/go/cache/theine/entry.go new file mode 100644 index 00000000000..48e3bd5a09a --- /dev/null +++ b/go/cache/theine/entry.go @@ -0,0 +1,93 @@ +/* +Copyright 2023 The Vitess Authors. +Copyright 2023 Yiling-J + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package theine + +import "sync/atomic" + +const ( + NEW int8 = iota + REMOVE + UPDATE +) + +type ReadBufItem[K cachekey, V any] struct { + entry *Entry[K, V] + hash uint64 +} +type WriteBufItem[K cachekey, V any] struct { + entry *Entry[K, V] + costChange int64 + code int8 +} + +type MetaData[K cachekey, V any] struct { + prev *Entry[K, V] + next *Entry[K, V] +} + +type Entry[K cachekey, V any] struct { + key K + value V + meta MetaData[K, V] + cost atomic.Int64 + frequency atomic.Int32 + epoch atomic.Uint32 + removed bool + deque bool + root bool + list uint8 // used in slru, probation or protected +} + +func NewEntry[K cachekey, V any](key K, value V, cost int64) *Entry[K, V] { + entry := &Entry[K, V]{ + key: key, + value: value, + } + entry.cost.Store(cost) + return entry +} + +func (e *Entry[K, V]) Next() *Entry[K, V] { + if p := e.meta.next; !p.root { + return e.meta.next + } + return nil +} + +func (e *Entry[K, V]) Prev() *Entry[K, V] { + if p := e.meta.prev; !p.root { + return e.meta.prev + } + return nil +} + +func (e *Entry[K, V]) prev() *Entry[K, V] { + return e.meta.prev +} + +func (e *Entry[K, V]) next() *Entry[K, V] { + return e.meta.next +} + +func (e *Entry[K, V]) setPrev(entry *Entry[K, V]) { + e.meta.prev = entry +} + +func (e *Entry[K, V]) setNext(entry *Entry[K, V]) { + e.meta.next = entry +} diff --git a/go/cache/theine/list.go b/go/cache/theine/list.go new file mode 100644 index 00000000000..19854190cba --- /dev/null +++ b/go/cache/theine/list.go @@ -0,0 +1,205 @@ +/* +Copyright 2023 The Vitess Authors. +Copyright 2023 Yiling-J + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package theine + +import ( + "fmt" + "strings" +) + +const ( + LIST_PROBATION uint8 = 1 + LIST_PROTECTED uint8 = 2 +) + +// List represents a doubly linked list. +// The zero value for List is an empty list ready to use. +type List[K cachekey, V any] struct { + root Entry[K, V] // sentinel list element, only &root, root.prev, and root.next are used + len int // current list length(sum of costs) excluding (this) sentinel element + count int // count of entries in list + capacity uint + bounded bool + listType uint8 // 1 tinylfu list, 2 timerwheel list +} + +// New returns an initialized list. +func NewList[K cachekey, V any](size uint, listType uint8) *List[K, V] { + l := &List[K, V]{listType: listType, capacity: size, root: Entry[K, V]{}} + l.root.root = true + l.root.setNext(&l.root) + l.root.setPrev(&l.root) + l.len = 0 + l.capacity = size + if size > 0 { + l.bounded = true + } + return l +} + +func (l *List[K, V]) Reset() { + l.root.setNext(&l.root) + l.root.setPrev(&l.root) + l.len = 0 +} + +// Len returns the number of elements of list l. +// The complexity is O(1). +func (l *List[K, V]) Len() int { return l.len } + +func (l *List[K, V]) display() string { + var s []string + for e := l.Front(); e != nil; e = e.Next() { + s = append(s, fmt.Sprintf("%v", e.key)) + } + return strings.Join(s, "/") +} + +func (l *List[K, V]) displayReverse() string { + var s []string + for e := l.Back(); e != nil; e = e.Prev() { + s = append(s, fmt.Sprintf("%v", e.key)) + } + return strings.Join(s, "/") +} + +// Front returns the first element of list l or nil if the list is empty. +func (l *List[K, V]) Front() *Entry[K, V] { + e := l.root.next() + if e != &l.root { + return e + } + return nil +} + +// Back returns the last element of list l or nil if the list is empty. +func (l *List[K, V]) Back() *Entry[K, V] { + e := l.root.prev() + if e != &l.root { + return e + } + return nil +} + +// insert inserts e after at, increments l.len, and evicted entry if capacity exceed +func (l *List[K, V]) insert(e, at *Entry[K, V]) *Entry[K, V] { + var evicted *Entry[K, V] + if l.bounded && l.len >= int(l.capacity) { + evicted = l.PopTail() + } + e.list = l.listType + e.setPrev(at) + e.setNext(at.next()) + e.prev().setNext(e) + e.next().setPrev(e) + if l.bounded { + l.len += int(e.cost.Load()) + l.count += 1 + } + return evicted +} + +// PushFront push entry to list head +func (l *List[K, V]) PushFront(e *Entry[K, V]) *Entry[K, V] { + return l.insert(e, &l.root) +} + +// Push push entry to the back of list +func (l *List[K, V]) PushBack(e *Entry[K, V]) *Entry[K, V] { + return l.insert(e, l.root.prev()) +} + +// remove removes e from its list, decrements l.len +func (l *List[K, V]) remove(e *Entry[K, V]) { + e.prev().setNext(e.next()) + e.next().setPrev(e.prev()) + e.setNext(nil) + e.setPrev(nil) + e.list = 0 + if l.bounded { + l.len -= int(e.cost.Load()) + l.count -= 1 + } +} + +// move moves e to next to at. +func (l *List[K, V]) move(e, at *Entry[K, V]) { + if e == at { + return + } + e.prev().setNext(e.next()) + e.next().setPrev(e.prev()) + + e.setPrev(at) + e.setNext(at.next()) + e.prev().setNext(e) + e.next().setPrev(e) +} + +// Remove removes e from l if e is an element of list l. +// It returns the element value e.Value. +// The element must not be nil. +func (l *List[K, V]) Remove(e *Entry[K, V]) { + l.remove(e) +} + +// MoveToFront moves element e to the front of list l. +// If e is not an element of l, the list is not modified. +// The element must not be nil. +func (l *List[K, V]) MoveToFront(e *Entry[K, V]) { + l.move(e, &l.root) +} + +// MoveToBack moves element e to the back of list l. +// If e is not an element of l, the list is not modified. +// The element must not be nil. +func (l *List[K, V]) MoveToBack(e *Entry[K, V]) { + l.move(e, l.root.prev()) +} + +// MoveBefore moves element e to its new position before mark. +// If e or mark is not an element of l, or e == mark, the list is not modified. +// The element and mark must not be nil. +func (l *List[K, V]) MoveBefore(e, mark *Entry[K, V]) { + l.move(e, mark.prev()) +} + +// MoveAfter moves element e to its new position after mark. +// If e or mark is not an element of l, or e == mark, the list is not modified. +// The element and mark must not be nil. +func (l *List[K, V]) MoveAfter(e, mark *Entry[K, V]) { + l.move(e, mark) +} + +func (l *List[K, V]) PopTail() *Entry[K, V] { + entry := l.root.prev() + if entry != nil && entry != &l.root { + l.remove(entry) + return entry + } + return nil +} + +func (l *List[K, V]) Contains(entry *Entry[K, V]) bool { + for e := l.Front(); e != nil; e = e.Next() { + if e == entry { + return true + } + } + return false +} diff --git a/go/cache/theine/list_test.go b/go/cache/theine/list_test.go new file mode 100644 index 00000000000..aad68f5c142 --- /dev/null +++ b/go/cache/theine/list_test.go @@ -0,0 +1,91 @@ +/* +Copyright 2023 The Vitess Authors. +Copyright 2023 Yiling-J + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package theine + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestList(t *testing.T) { + l := NewList[StringKey, string](5, LIST_PROBATION) + require.Equal(t, uint(5), l.capacity) + require.Equal(t, LIST_PROBATION, l.listType) + for i := 0; i < 5; i++ { + evicted := l.PushFront(NewEntry(StringKey(fmt.Sprintf("%d", i)), "", 1)) + require.Nil(t, evicted) + } + require.Equal(t, 5, l.len) + require.Equal(t, "4/3/2/1/0", l.display()) + require.Equal(t, "0/1/2/3/4", l.displayReverse()) + + evicted := l.PushFront(NewEntry(StringKey("5"), "", 1)) + require.Equal(t, StringKey("0"), evicted.key) + require.Equal(t, 5, l.len) + require.Equal(t, "5/4/3/2/1", l.display()) + require.Equal(t, "1/2/3/4/5", l.displayReverse()) + + for i := 0; i < 5; i++ { + entry := l.PopTail() + require.Equal(t, StringKey(fmt.Sprintf("%d", i+1)), entry.key) + } + entry := l.PopTail() + require.Nil(t, entry) + + var entries []*Entry[StringKey, string] + for i := 0; i < 5; i++ { + new := NewEntry(StringKey(fmt.Sprintf("%d", i)), "", 1) + evicted := l.PushFront(new) + entries = append(entries, new) + require.Nil(t, evicted) + } + require.Equal(t, "4/3/2/1/0", l.display()) + l.MoveToBack(entries[2]) + require.Equal(t, "4/3/1/0/2", l.display()) + require.Equal(t, "2/0/1/3/4", l.displayReverse()) + l.MoveBefore(entries[1], entries[3]) + require.Equal(t, "4/1/3/0/2", l.display()) + require.Equal(t, "2/0/3/1/4", l.displayReverse()) + l.MoveAfter(entries[2], entries[4]) + require.Equal(t, "4/2/1/3/0", l.display()) + require.Equal(t, "0/3/1/2/4", l.displayReverse()) + l.Remove(entries[1]) + require.Equal(t, "4/2/3/0", l.display()) + require.Equal(t, "0/3/2/4", l.displayReverse()) + +} + +func TestListCountCost(t *testing.T) { + l := NewList[StringKey, string](100, LIST_PROBATION) + require.Equal(t, uint(100), l.capacity) + require.Equal(t, LIST_PROBATION, l.listType) + for i := 0; i < 5; i++ { + evicted := l.PushFront(NewEntry(StringKey(fmt.Sprintf("%d", i)), "", 20)) + require.Nil(t, evicted) + } + require.Equal(t, 100, l.len) + require.Equal(t, 5, l.count) + for i := 0; i < 3; i++ { + entry := l.PopTail() + require.NotNil(t, entry) + } + require.Equal(t, 40, l.len) + require.Equal(t, 2, l.count) +} diff --git a/go/cache/theine/mpsc.go b/go/cache/theine/mpsc.go new file mode 100644 index 00000000000..c00e2ce5a26 --- /dev/null +++ b/go/cache/theine/mpsc.go @@ -0,0 +1,86 @@ +/* +Copyright 2023 The Vitess Authors. +Copyright 2023 Yiling-J + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package theine + +// This implementation is based on http://www.1024cores.net/home/lock-free-algorithms/queues/non-intrusive-mpsc-node-based-queue + +import ( + "sync" + "sync/atomic" +) + +type node[V any] struct { + next atomic.Pointer[node[V]] + val V +} + +type Queue[V any] struct { + head, tail atomic.Pointer[node[V]] + nodePool sync.Pool +} + +func NewQueue[V any]() *Queue[V] { + q := &Queue[V]{nodePool: sync.Pool{New: func() any { + return new(node[V]) + }}} + stub := &node[V]{} + q.head.Store(stub) + q.tail.Store(stub) + return q +} + +// Push adds x to the back of the queue. +// +// Push can be safely called from multiple goroutines +func (q *Queue[V]) Push(x V) { + n := q.nodePool.Get().(*node[V]) + n.val = x + + // current producer acquires head node + prev := q.head.Swap(n) + + // release node to consumer + prev.next.Store(n) +} + +// Pop removes the item from the front of the queue or nil if the queue is empty +// +// Pop must be called from a single, consumer goroutine +func (q *Queue[V]) Pop() (V, bool) { + tail := q.tail.Load() + next := tail.next.Load() + if next != nil { + var null V + q.tail.Store(next) + v := next.val + next.val = null + tail.next.Store(nil) + q.nodePool.Put(tail) + return v, true + } + var null V + return null, false +} + +// Empty returns true if the queue is empty +// +// Empty must be called from a single, consumer goroutine +func (q *Queue[V]) Empty() bool { + tail := q.tail.Load() + return tail.next.Load() == nil +} diff --git a/go/cache/perf_test.go b/go/cache/theine/mpsc_test.go similarity index 53% rename from go/cache/perf_test.go rename to go/cache/theine/mpsc_test.go index 693e55238a0..eca50efed3e 100644 --- a/go/cache/perf_test.go +++ b/go/cache/theine/mpsc_test.go @@ -1,5 +1,6 @@ /* -Copyright 2019 The Vitess Authors. +Copyright 2023 The Vitess Authors. +Copyright 2023 Yiling-J Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,23 +15,32 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cache +package theine import ( "testing" + + "github.com/stretchr/testify/assert" ) -func BenchmarkGet(b *testing.B) { - cache := NewLRUCache(64*1024*1024, func(val any) int64 { - return int64(cap(val.([]byte))) - }) - value := make([]byte, 1000) - cache.Set("stuff", value) - for i := 0; i < b.N; i++ { - val, ok := cache.Get("stuff") - if !ok { - panic("error") - } - _ = val - } +func TestQueue_PushPop(t *testing.T) { + q := NewQueue[int]() + + q.Push(1) + q.Push(2) + v, ok := q.Pop() + assert.True(t, ok) + assert.Equal(t, 1, v) + v, ok = q.Pop() + assert.True(t, ok) + assert.Equal(t, 2, v) + _, ok = q.Pop() + assert.False(t, ok) +} + +func TestQueue_Empty(t *testing.T) { + q := NewQueue[int]() + assert.True(t, q.Empty()) + q.Push(1) + assert.False(t, q.Empty()) } diff --git a/go/cache/theine/singleflight.go b/go/cache/theine/singleflight.go new file mode 100644 index 00000000000..fde56670514 --- /dev/null +++ b/go/cache/theine/singleflight.go @@ -0,0 +1,196 @@ +/* +Copyright 2023 The Vitess Authors. +Copyright 2023 Yiling-J +Copyright 2013 The Go Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package theine + +import ( + "bytes" + "errors" + "fmt" + "runtime" + "runtime/debug" + "sync" + "sync/atomic" +) + +// errGoexit indicates the runtime.Goexit was called in +// the user given function. +var errGoexit = errors.New("runtime.Goexit was called") + +// A panicError is an arbitrary value recovered from a panic +// with the stack trace during the execution of given function. +type panicError struct { + value interface{} + stack []byte +} + +// Error implements error interface. +func (p *panicError) Error() string { + return fmt.Sprintf("%v\n\n%s", p.value, p.stack) +} + +func newPanicError(v interface{}) error { + stack := debug.Stack() + + // The first line of the stack trace is of the form "goroutine N [status]:" + // but by the time the panic reaches Do the goroutine may no longer exist + // and its status will have changed. Trim out the misleading line. + if line := bytes.IndexByte(stack[:], '\n'); line >= 0 { + stack = stack[line+1:] + } + return &panicError{value: v, stack: stack} +} + +// call is an in-flight or completed singleflight.Do call +type call[V any] struct { + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val V + err error + + wg sync.WaitGroup + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups atomic.Int32 +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group[K comparable, V any] struct { + m map[K]*call[V] // lazily initialized + mu sync.Mutex // protects m + callPool sync.Pool +} + +func NewGroup[K comparable, V any]() *Group[K, V] { + return &Group[K, V]{ + callPool: sync.Pool{New: func() any { + return new(call[V]) + }}, + } +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result struct { + Val interface{} + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *Group[K, V]) Do(key K, fn func() (V, error)) (v V, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[K]*call[V]) + } + if c, ok := g.m[key]; ok { + _ = c.dups.Add(1) + g.mu.Unlock() + c.wg.Wait() + + if e, ok := c.err.(*panicError); ok { + panic(e) + } else if c.err == errGoexit { + runtime.Goexit() + } + // assign value/err before put back to pool to avoid race + v = c.val + err = c.err + n := c.dups.Add(-1) + if n == 0 { + g.callPool.Put(c) + } + return v, err, true + } + c := g.callPool.Get().(*call[V]) + defer func() { + n := c.dups.Add(-1) + if n == 0 { + g.callPool.Put(c) + } + }() + _ = c.dups.Add(1) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, true +} + +// doCall handles the single call for a key. +func (g *Group[K, V]) doCall(c *call[V], key K, fn func() (V, error)) { + normalReturn := false + recovered := false + + // use double-defer to distinguish panic from runtime.Goexit, + // more details see https://golang.org/cl/134395 + defer func() { + // the given function invoked runtime.Goexit + if !normalReturn && !recovered { + c.err = errGoexit + } + + g.mu.Lock() + defer g.mu.Unlock() + c.wg.Done() + if g.m[key] == c { + delete(g.m, key) + } + + if e, ok := c.err.(*panicError); ok { + panic(e) + } + }() + + func() { + defer func() { + if !normalReturn { + // Ideally, we would wait to take a stack trace until we've determined + // whether this is a panic or a runtime.Goexit. + // + // Unfortunately, the only way we can distinguish the two is to see + // whether the recover stopped the goroutine from terminating, and by + // the time we know that, the part of the stack trace relevant to the + // panic has been discarded. + if r := recover(); r != nil { + c.err = newPanicError(r) + } + } + }() + + c.val, c.err = fn() + normalReturn = true + }() + + if !normalReturn { + recovered = true + } +} diff --git a/go/cache/theine/singleflight_test.go b/go/cache/theine/singleflight_test.go new file mode 100644 index 00000000000..60b28e69b4e --- /dev/null +++ b/go/cache/theine/singleflight_test.go @@ -0,0 +1,211 @@ +/* +Copyright 2023 The Vitess Authors. +Copyright 2023 Yiling-J +Copyright 2013 The Go Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package theine + +import ( + "crypto/rand" + "errors" + "fmt" + "io" + "runtime" + "sync" + "sync/atomic" + "testing" + "time" +) + +func TestDo(t *testing.T) { + g := NewGroup[string, string]() + v, err, _ := g.Do("key", func() (string, error) { + return "bar", nil + }) + if got, want := fmt.Sprintf("%v (%T)", v, v), "bar (string)"; got != want { + t.Errorf("Do = %v; want %v", got, want) + } + if err != nil { + t.Errorf("Do error = %v", err) + } +} + +func TestDoErr(t *testing.T) { + g := NewGroup[string, string]() + someErr := errors.New("Some error") + v, err, _ := g.Do("key", func() (string, error) { + return "", someErr + }) + if err != someErr { + t.Errorf("Do error = %v; want someErr %v", err, someErr) + } + if v != "" { + t.Errorf("unexpected non-nil value %#v", v) + } +} + +func TestDoDupSuppress(t *testing.T) { + g := NewGroup[string, string]() + var wg1, wg2 sync.WaitGroup + c := make(chan string, 1) + var calls int32 + fn := func() (string, error) { + if atomic.AddInt32(&calls, 1) == 1 { + // First invocation. + wg1.Done() + } + v := <-c + c <- v // pump; make available for any future calls + + time.Sleep(10 * time.Millisecond) // let more goroutines enter Do + + return v, nil + } + + const n = 10 + wg1.Add(1) + for i := 0; i < n; i++ { + wg1.Add(1) + wg2.Add(1) + go func() { + defer wg2.Done() + wg1.Done() + v, err, _ := g.Do("key", fn) + if err != nil { + t.Errorf("Do error: %v", err) + return + } + if s := v; s != "bar" { + t.Errorf("Do = %T %v; want %q", v, v, "bar") + } + }() + } + wg1.Wait() + // At least one goroutine is in fn now and all of them have at + // least reached the line before the Do. + c <- "bar" + wg2.Wait() + if got := atomic.LoadInt32(&calls); got <= 0 || got >= n { + t.Errorf("number of calls = %d; want over 0 and less than %d", got, n) + } +} + +// Test singleflight behaves correctly after Do panic. +// See https://github.com/golang/go/issues/41133 +func TestPanicDo(t *testing.T) { + g := NewGroup[string, string]() + fn := func() (string, error) { + panic("invalid memory address or nil pointer dereference") + } + + const n = 5 + waited := int32(n) + panicCount := int32(0) + done := make(chan struct{}) + for i := 0; i < n; i++ { + go func() { + defer func() { + if err := recover(); err != nil { + atomic.AddInt32(&panicCount, 1) + } + + if atomic.AddInt32(&waited, -1) == 0 { + close(done) + } + }() + + _, _, _ = g.Do("key", fn) + }() + } + + select { + case <-done: + if panicCount != n { + t.Errorf("Expect %d panic, but got %d", n, panicCount) + } + case <-time.After(time.Second): + t.Fatalf("Do hangs") + } +} + +func TestGoexitDo(t *testing.T) { + g := NewGroup[string, int]() + fn := func() (int, error) { + runtime.Goexit() + return 0, nil + } + + const n = 5 + waited := int32(n) + done := make(chan struct{}) + for i := 0; i < n; i++ { + go func() { + var err error + defer func() { + if err != nil { + t.Errorf("Error should be nil, but got: %v", err) + } + if atomic.AddInt32(&waited, -1) == 0 { + close(done) + } + }() + _, err, _ = g.Do("key", fn) + }() + } + + select { + case <-done: + case <-time.After(time.Second): + t.Fatalf("Do hangs") + } +} + +func BenchmarkDo(b *testing.B) { + keys := randKeys(b, 10240, 10) + benchDo(b, NewGroup[string, int](), keys) + +} + +func benchDo(b *testing.B, g *Group[string, int], keys []string) { + keyc := len(keys) + b.ReportAllocs() + b.ResetTimer() + + b.RunParallel(func(pb *testing.PB) { + for i := 0; pb.Next(); i++ { + _, _, _ = g.Do(keys[i%keyc], func() (int, error) { + return 0, nil + }) + } + }) +} + +func randKeys(b *testing.B, count, length uint) []string { + keys := make([]string, 0, count) + key := make([]byte, length) + + for i := uint(0); i < count; i++ { + if _, err := io.ReadFull(rand.Reader, key); err != nil { + b.Fatalf("Failed to generate random key %d of %d of length %d: %s", i+1, count, length, err) + } + keys = append(keys, string(key)) + } + return keys +} diff --git a/go/cache/theine/sketch.go b/go/cache/theine/sketch.go new file mode 100644 index 00000000000..7d241d94fc8 --- /dev/null +++ b/go/cache/theine/sketch.go @@ -0,0 +1,137 @@ +/* +Copyright 2023 The Vitess Authors. +Copyright 2023 Yiling-J + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package theine + +type CountMinSketch struct { + Table []uint64 + Additions uint + SampleSize uint + BlockMask uint +} + +func NewCountMinSketch() *CountMinSketch { + new := &CountMinSketch{} + new.EnsureCapacity(16) + return new +} + +// indexOf return table index and counter index together +func (s *CountMinSketch) indexOf(h uint64, block uint64, offset uint8) (uint, uint) { + counterHash := h + uint64(1+offset)*(h>>32) + // max block + 7(8 * 8 bytes), fit 64 bytes cache line + index := block + counterHash&1 + uint64(offset<<1) + return uint(index), uint((counterHash & 0xF) << 2) +} + +func (s *CountMinSketch) inc(index uint, offset uint) bool { + mask := uint64(0xF << offset) + if s.Table[index]&mask != mask { + s.Table[index] += 1 << offset + return true + } + return false +} + +func (s *CountMinSketch) Add(h uint64) bool { + hn := spread(h) + block := (hn & uint64(s.BlockMask)) << 3 + hc := rehash(h) + index0, offset0 := s.indexOf(hc, block, 0) + index1, offset1 := s.indexOf(hc, block, 1) + index2, offset2 := s.indexOf(hc, block, 2) + index3, offset3 := s.indexOf(hc, block, 3) + + added := s.inc(index0, offset0) + added = s.inc(index1, offset1) || added + added = s.inc(index2, offset2) || added + added = s.inc(index3, offset3) || added + + if added { + s.Additions += 1 + if s.Additions == s.SampleSize { + s.reset() + return true + } + } + return false +} + +func (s *CountMinSketch) reset() { + for i := range s.Table { + s.Table[i] = s.Table[i] >> 1 + } + s.Additions = s.Additions >> 1 +} + +func (s *CountMinSketch) count(h uint64, block uint64, offset uint8) uint { + index, off := s.indexOf(h, block, offset) + count := (s.Table[index] >> off) & 0xF + return uint(count) +} + +func (s *CountMinSketch) Estimate(h uint64) uint { + hn := spread(h) + block := (hn & uint64(s.BlockMask)) << 3 + hc := rehash(h) + m := min(s.count(hc, block, 0), 100) + m = min(s.count(hc, block, 1), m) + m = min(s.count(hc, block, 2), m) + m = min(s.count(hc, block, 3), m) + return m +} + +func next2Power(x uint) uint { + x-- + x |= x >> 1 + x |= x >> 2 + x |= x >> 4 + x |= x >> 8 + x |= x >> 16 + x |= x >> 32 + x++ + return x +} + +func (s *CountMinSketch) EnsureCapacity(size uint) { + if len(s.Table) >= int(size) { + return + } + if size < 16 { + size = 16 + } + newSize := next2Power(size) + s.Table = make([]uint64, newSize) + s.SampleSize = 10 * size + s.BlockMask = uint((len(s.Table) >> 3) - 1) + s.Additions = 0 +} + +func spread(h uint64) uint64 { + h ^= h >> 17 + h *= 0xed5ad4bb + h ^= h >> 11 + h *= 0xac4c1b51 + h ^= h >> 15 + return h +} + +func rehash(h uint64) uint64 { + h *= 0x31848bab + h ^= h >> 14 + return h +} diff --git a/go/cache/theine/sketch_test.go b/go/cache/theine/sketch_test.go new file mode 100644 index 00000000000..3437f0cac3c --- /dev/null +++ b/go/cache/theine/sketch_test.go @@ -0,0 +1,54 @@ +package theine + +import ( + "fmt" + "testing" + + "github.com/cespare/xxhash/v2" + "github.com/stretchr/testify/require" +) + +func TestEnsureCapacity(t *testing.T) { + sketch := NewCountMinSketch() + sketch.EnsureCapacity(1) + require.Equal(t, 16, len(sketch.Table)) +} + +func TestSketch(t *testing.T) { + sketch := NewCountMinSketch() + sketch.EnsureCapacity(100) + require.Equal(t, 128, len(sketch.Table)) + require.Equal(t, uint(1000), sketch.SampleSize) + // override sampleSize so test won't reset + sketch.SampleSize = 5120 + + failed := 0 + for i := 0; i < 500; i++ { + key := fmt.Sprintf("key:%d", i) + keyh := xxhash.Sum64String(key) + sketch.Add(keyh) + sketch.Add(keyh) + sketch.Add(keyh) + sketch.Add(keyh) + sketch.Add(keyh) + key = fmt.Sprintf("key:%d:b", i) + keyh2 := xxhash.Sum64String(key) + sketch.Add(keyh2) + sketch.Add(keyh2) + sketch.Add(keyh2) + + es1 := sketch.Estimate(keyh) + es2 := sketch.Estimate(keyh2) + if es2 > es1 { + failed++ + } + require.True(t, es1 >= 5) + require.True(t, es2 >= 3) + + } + require.True(t, float32(failed)/4000 < 0.1) + require.True(t, sketch.Additions > 3500) + a := sketch.Additions + sketch.reset() + require.Equal(t, a>>1, sketch.Additions) +} diff --git a/go/cache/theine/slru.go b/go/cache/theine/slru.go new file mode 100644 index 00000000000..e3bcb2532b1 --- /dev/null +++ b/go/cache/theine/slru.go @@ -0,0 +1,79 @@ +/* +Copyright 2023 The Vitess Authors. +Copyright 2023 Yiling-J + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package theine + +type Slru[K cachekey, V any] struct { + probation *List[K, V] + protected *List[K, V] + maxsize uint +} + +func NewSlru[K cachekey, V any](size uint) *Slru[K, V] { + return &Slru[K, V]{ + maxsize: size, + probation: NewList[K, V](size, LIST_PROBATION), + protected: NewList[K, V](uint(float32(size)*0.8), LIST_PROTECTED), + } +} + +func (s *Slru[K, V]) insert(entry *Entry[K, V]) *Entry[K, V] { + var evicted *Entry[K, V] + if s.probation.Len()+s.protected.Len() >= int(s.maxsize) { + evicted = s.probation.PopTail() + } + s.probation.PushFront(entry) + return evicted +} + +func (s *Slru[K, V]) victim() *Entry[K, V] { + if s.probation.Len()+s.protected.Len() < int(s.maxsize) { + return nil + } + return s.probation.Back() +} + +func (s *Slru[K, V]) access(entry *Entry[K, V]) { + switch entry.list { + case LIST_PROBATION: + s.probation.remove(entry) + evicted := s.protected.PushFront(entry) + if evicted != nil { + s.probation.PushFront(evicted) + } + case LIST_PROTECTED: + s.protected.MoveToFront(entry) + } +} + +func (s *Slru[K, V]) remove(entry *Entry[K, V]) { + switch entry.list { + case LIST_PROBATION: + s.probation.remove(entry) + case LIST_PROTECTED: + s.protected.remove(entry) + } +} + +func (s *Slru[K, V]) updateCost(entry *Entry[K, V], delta int64) { + switch entry.list { + case LIST_PROBATION: + s.probation.len += int(delta) + case LIST_PROTECTED: + s.protected.len += int(delta) + } +} diff --git a/go/cache/theine/store.go b/go/cache/theine/store.go new file mode 100644 index 00000000000..3d86e549867 --- /dev/null +++ b/go/cache/theine/store.go @@ -0,0 +1,615 @@ +/* +Copyright 2023 The Vitess Authors. +Copyright 2023 Yiling-J + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package theine + +import ( + "runtime" + "sync" + "sync/atomic" + "time" + + "github.com/gammazero/deque" + + "vitess.io/vitess/go/cache/theine/bf" + "vitess.io/vitess/go/hack" +) + +const ( + MaxReadBuffSize = 64 + MinWriteBuffSize = 4 + MaxWriteBuffSize = 1024 +) + +type RemoveReason uint8 + +const ( + REMOVED RemoveReason = iota + EVICTED + EXPIRED +) + +type Shard[K cachekey, V any] struct { + hashmap map[K]*Entry[K, V] + dookeeper *bf.Bloomfilter + deque *deque.Deque[*Entry[K, V]] + group *Group[K, V] + qsize uint + qlen int + counter uint + mu sync.RWMutex +} + +func NewShard[K cachekey, V any](size uint, qsize uint, doorkeeper bool) *Shard[K, V] { + s := &Shard[K, V]{ + hashmap: make(map[K]*Entry[K, V]), + qsize: qsize, + deque: deque.New[*Entry[K, V]](), + group: NewGroup[K, V](), + } + if doorkeeper { + s.dookeeper = bf.New(0.01) + } + return s +} + +func (s *Shard[K, V]) set(key K, entry *Entry[K, V]) { + s.hashmap[key] = entry + if s.dookeeper != nil { + ds := 20 * len(s.hashmap) + if ds > s.dookeeper.Capacity { + s.dookeeper.EnsureCapacity(ds) + } + } +} + +func (s *Shard[K, V]) get(key K) (entry *Entry[K, V], ok bool) { + entry, ok = s.hashmap[key] + return +} + +func (s *Shard[K, V]) delete(entry *Entry[K, V]) bool { + var deleted bool + exist, ok := s.hashmap[entry.key] + if ok && exist == entry { + delete(s.hashmap, exist.key) + deleted = true + } + return deleted +} + +func (s *Shard[K, V]) len() int { + return len(s.hashmap) +} + +type Metrics struct { + evicted atomic.Int64 + hits atomic.Int64 + misses atomic.Int64 +} + +func (m *Metrics) Evicted() int64 { + return m.evicted.Load() +} + +func (m *Metrics) Hits() int64 { + return m.hits.Load() +} + +func (m *Metrics) Misses() int64 { + return m.misses.Load() +} + +func (m *Metrics) Accesses() int64 { + return m.Hits() + m.Misses() +} + +type cachekey interface { + comparable + Hash() uint64 + Hash2() (uint64, uint64) +} + +type HashKey256 [32]byte + +func (h HashKey256) Hash() uint64 { + return uint64(h[0]) | uint64(h[1])<<8 | uint64(h[2])<<16 | uint64(h[3])<<24 | + uint64(h[4])<<32 | uint64(h[5])<<40 | uint64(h[6])<<48 | uint64(h[7])<<56 +} + +func (h HashKey256) Hash2() (uint64, uint64) { + h0 := h.Hash() + h1 := uint64(h[8]) | uint64(h[9])<<8 | uint64(h[10])<<16 | uint64(h[11])<<24 | + uint64(h[12])<<32 | uint64(h[13])<<40 | uint64(h[14])<<48 | uint64(h[15])<<56 + return h0, h1 +} + +type StringKey string + +func (h StringKey) Hash() uint64 { + return hack.RuntimeStrhash(string(h), 13850135847636357301) +} + +func (h StringKey) Hash2() (uint64, uint64) { + h0 := h.Hash() + h1 := ((h0 >> 16) ^ h0) * 0x45d9f3b + h1 = ((h1 >> 16) ^ h1) * 0x45d9f3b + h1 = (h1 >> 16) ^ h1 + return h0, h1 +} + +type cacheval interface { + CachedSize(alloc bool) int64 +} + +type Store[K cachekey, V cacheval] struct { + Metrics Metrics + OnRemoval func(K, V, RemoveReason) + + entryPool sync.Pool + writebuf chan WriteBufItem[K, V] + policy *TinyLfu[K, V] + readbuf *Queue[ReadBufItem[K, V]] + shards []*Shard[K, V] + cap uint + shardCount uint + writebufsize int64 + tailUpdate bool + doorkeeper bool + + mlock sync.Mutex + readCounter atomic.Uint32 + open atomic.Bool +} + +func NewStore[K cachekey, V cacheval](maxsize int64, doorkeeper bool) *Store[K, V] { + writeBufSize := maxsize / 100 + if writeBufSize < MinWriteBuffSize { + writeBufSize = MinWriteBuffSize + } + if writeBufSize > MaxWriteBuffSize { + writeBufSize = MaxWriteBuffSize + } + shardCount := 1 + for shardCount < runtime.GOMAXPROCS(0)*2 { + shardCount *= 2 + } + if shardCount < 16 { + shardCount = 16 + } + if shardCount > 128 { + shardCount = 128 + } + dequeSize := int(maxsize) / 100 / shardCount + shardSize := int(maxsize) / shardCount + if shardSize < 50 { + shardSize = 50 + } + policySize := int(maxsize) - (dequeSize * shardCount) + + s := &Store[K, V]{ + cap: uint(maxsize), + policy: NewTinyLfu[K, V](uint(policySize)), + readbuf: NewQueue[ReadBufItem[K, V]](), + writebuf: make(chan WriteBufItem[K, V], writeBufSize), + entryPool: sync.Pool{New: func() any { return &Entry[K, V]{} }}, + shardCount: uint(shardCount), + doorkeeper: doorkeeper, + writebufsize: writeBufSize, + } + s.shards = make([]*Shard[K, V], 0, s.shardCount) + for i := 0; i < int(s.shardCount); i++ { + s.shards = append(s.shards, NewShard[K, V](uint(shardSize), uint(dequeSize), doorkeeper)) + } + + go s.maintenance() + s.open.Store(true) + return s +} + +func (s *Store[K, V]) EnsureOpen() { + if s.open.Swap(true) { + return + } + s.writebuf = make(chan WriteBufItem[K, V], s.writebufsize) + go s.maintenance() +} + +func (s *Store[K, V]) getFromShard(key K, hash uint64, shard *Shard[K, V], epoch uint32) (V, bool) { + new := s.readCounter.Add(1) + shard.mu.RLock() + entry, ok := shard.get(key) + var value V + if ok { + if entry.epoch.Load() < epoch { + s.Metrics.misses.Add(1) + ok = false + } else { + s.Metrics.hits.Add(1) + s.policy.hit.Add(1) + value = entry.value + } + } else { + s.Metrics.misses.Add(1) + } + shard.mu.RUnlock() + switch { + case new < MaxReadBuffSize: + var send ReadBufItem[K, V] + send.hash = hash + if ok { + send.entry = entry + } + s.readbuf.Push(send) + case new == MaxReadBuffSize: + var send ReadBufItem[K, V] + send.hash = hash + if ok { + send.entry = entry + } + s.readbuf.Push(send) + s.drainRead() + } + return value, ok +} + +func (s *Store[K, V]) Get(key K, epoch uint32) (V, bool) { + h, index := s.index(key) + shard := s.shards[index] + return s.getFromShard(key, h, shard, epoch) +} + +func (s *Store[K, V]) GetOrLoad(key K, epoch uint32, load func() (V, error)) (V, bool, error) { + h, index := s.index(key) + shard := s.shards[index] + v, ok := s.getFromShard(key, h, shard, epoch) + if !ok { + loaded, err, _ := shard.group.Do(key, func() (V, error) { + loaded, err := load() + if err == nil { + s.Set(key, loaded, 0, epoch) + } + return loaded, err + }) + return loaded, false, err + } + return v, true, nil +} + +func (s *Store[K, V]) setEntry(shard *Shard[K, V], cost int64, epoch uint32, entry *Entry[K, V]) { + shard.set(entry.key, entry) + // cost larger than deque size, send to policy directly + if cost > int64(shard.qsize) { + shard.mu.Unlock() + s.writebuf <- WriteBufItem[K, V]{entry: entry, code: NEW} + return + } + entry.deque = true + shard.deque.PushFront(entry) + shard.qlen += int(cost) + s.processDeque(shard, epoch) +} + +func (s *Store[K, V]) setInternal(key K, value V, cost int64, epoch uint32) (*Shard[K, V], *Entry[K, V], bool) { + h, index := s.index(key) + shard := s.shards[index] + shard.mu.Lock() + exist, ok := shard.get(key) + if ok { + var costChange int64 + exist.value = value + oldCost := exist.cost.Swap(cost) + if oldCost != cost { + costChange = cost - oldCost + if exist.deque { + shard.qlen += int(costChange) + } + } + shard.mu.Unlock() + exist.epoch.Store(epoch) + if costChange != 0 { + s.writebuf <- WriteBufItem[K, V]{ + entry: exist, code: UPDATE, costChange: costChange, + } + } + return shard, exist, true + } + if s.doorkeeper { + if shard.counter > uint(shard.dookeeper.Capacity) { + shard.dookeeper.Reset() + shard.counter = 0 + } + hit := shard.dookeeper.Insert(h) + if !hit { + shard.counter += 1 + shard.mu.Unlock() + return shard, nil, false + } + } + entry := s.entryPool.Get().(*Entry[K, V]) + entry.frequency.Store(-1) + entry.key = key + entry.value = value + entry.cost.Store(cost) + entry.epoch.Store(epoch) + s.setEntry(shard, cost, epoch, entry) + return shard, entry, true + +} + +func (s *Store[K, V]) Set(key K, value V, cost int64, epoch uint32) bool { + if cost == 0 { + cost = value.CachedSize(true) + } + if cost > int64(s.cap) { + return false + } + _, _, ok := s.setInternal(key, value, cost, epoch) + return ok +} + +type dequeKV[K cachekey, V cacheval] struct { + k K + v V +} + +func (s *Store[K, V]) processDeque(shard *Shard[K, V], epoch uint32) { + if shard.qlen <= int(shard.qsize) { + shard.mu.Unlock() + return + } + var evictedkv []dequeKV[K, V] + var expiredkv []dequeKV[K, V] + + // send to slru + send := make([]*Entry[K, V], 0, 2) + for shard.qlen > int(shard.qsize) { + evicted := shard.deque.PopBack() + evicted.deque = false + shard.qlen -= int(evicted.cost.Load()) + + if evicted.epoch.Load() < epoch { + deleted := shard.delete(evicted) + if deleted { + if s.OnRemoval != nil { + evictedkv = append(evictedkv, dequeKV[K, V]{evicted.key, evicted.value}) + } + s.postDelete(evicted) + s.Metrics.evicted.Add(1) + } + } else { + count := evicted.frequency.Load() + threshold := s.policy.threshold.Load() + if count == -1 { + send = append(send, evicted) + } else { + if int32(count) >= threshold { + send = append(send, evicted) + } else { + deleted := shard.delete(evicted) + // double check because entry maybe removed already by Delete API + if deleted { + if s.OnRemoval != nil { + evictedkv = append(evictedkv, dequeKV[K, V]{evicted.key, evicted.value}) + } + s.postDelete(evicted) + s.Metrics.evicted.Add(1) + } + } + } + } + } + + shard.mu.Unlock() + for _, entry := range send { + s.writebuf <- WriteBufItem[K, V]{entry: entry, code: NEW} + } + if s.OnRemoval != nil { + for _, kv := range evictedkv { + s.OnRemoval(kv.k, kv.v, EVICTED) + } + for _, kv := range expiredkv { + s.OnRemoval(kv.k, kv.v, EXPIRED) + } + } +} + +func (s *Store[K, V]) Delete(key K) { + _, index := s.index(key) + shard := s.shards[index] + shard.mu.Lock() + entry, ok := shard.get(key) + if ok { + shard.delete(entry) + } + shard.mu.Unlock() + if ok { + s.writebuf <- WriteBufItem[K, V]{entry: entry, code: REMOVE} + } +} + +func (s *Store[K, V]) Len() int { + total := 0 + for _, s := range s.shards { + s.mu.RLock() + total += s.len() + s.mu.RUnlock() + } + return total +} + +func (s *Store[K, V]) UsedCapacity() int { + total := 0 + for _, s := range s.shards { + s.mu.RLock() + total += s.qlen + s.mu.RUnlock() + } + return total +} + +func (s *Store[K, V]) MaxCapacity() int { + return int(s.cap) +} + +// spread hash before get index +func (s *Store[K, V]) index(key K) (uint64, int) { + h0, h1 := key.Hash2() + return h0, int(h1 & uint64(s.shardCount-1)) +} + +func (s *Store[K, V]) postDelete(entry *Entry[K, V]) { + var zero V + entry.value = zero + s.entryPool.Put(entry) +} + +// remove entry from cache/policy/timingwheel and add back to pool +func (s *Store[K, V]) removeEntry(entry *Entry[K, V], reason RemoveReason) { + if prev := entry.meta.prev; prev != nil { + s.policy.Remove(entry) + } + switch reason { + case EVICTED, EXPIRED: + _, index := s.index(entry.key) + shard := s.shards[index] + shard.mu.Lock() + deleted := shard.delete(entry) + shard.mu.Unlock() + if deleted { + if s.OnRemoval != nil { + s.OnRemoval(entry.key, entry.value, reason) + } + s.postDelete(entry) + s.Metrics.evicted.Add(1) + } + case REMOVED: + // already removed from shard map + if s.OnRemoval != nil { + s.OnRemoval(entry.key, entry.value, reason) + } + } +} + +func (s *Store[K, V]) drainRead() { + s.policy.total.Add(MaxReadBuffSize) + s.mlock.Lock() + for { + v, ok := s.readbuf.Pop() + if !ok { + break + } + s.policy.Access(v) + } + s.mlock.Unlock() + s.readCounter.Store(0) +} + +func (s *Store[K, V]) maintenanceItem(item WriteBufItem[K, V]) { + s.mlock.Lock() + defer s.mlock.Unlock() + + entry := item.entry + if entry == nil { + return + } + + // lock free because store API never read/modify entry metadata + switch item.code { + case NEW: + if entry.removed { + return + } + evicted := s.policy.Set(entry) + if evicted != nil { + s.removeEntry(evicted, EVICTED) + s.tailUpdate = true + } + removed := s.policy.EvictEntries() + for _, e := range removed { + s.tailUpdate = true + s.removeEntry(e, EVICTED) + } + case REMOVE: + entry.removed = true + s.removeEntry(entry, REMOVED) + s.policy.threshold.Store(-1) + case UPDATE: + if item.costChange != 0 { + s.policy.UpdateCost(entry, item.costChange) + removed := s.policy.EvictEntries() + for _, e := range removed { + s.tailUpdate = true + s.removeEntry(e, EVICTED) + } + } + } + item.entry = nil + if s.tailUpdate { + s.policy.UpdateThreshold() + s.tailUpdate = false + } +} + +func (s *Store[K, V]) maintenance() { + tick := time.NewTicker(500 * time.Millisecond) + defer tick.Stop() + + for { + select { + case <-tick.C: + s.mlock.Lock() + s.policy.UpdateThreshold() + s.mlock.Unlock() + + case item, ok := <-s.writebuf: + if !ok { + return + } + s.maintenanceItem(item) + } + } +} + +func (s *Store[K, V]) Range(epoch uint32, f func(key K, value V) bool) { + for _, shard := range s.shards { + shard.mu.RLock() + for _, entry := range shard.hashmap { + if entry.epoch.Load() < epoch { + continue + } + if !f(entry.key, entry.value) { + shard.mu.RUnlock() + return + } + } + shard.mu.RUnlock() + } +} + +func (s *Store[K, V]) Close() { + if !s.open.Swap(false) { + panic("theine.Store: double close") + } + + for _, s := range s.shards { + s.mu.Lock() + clear(s.hashmap) + s.mu.Unlock() + } + close(s.writebuf) +} diff --git a/go/cache/theine/store_test.go b/go/cache/theine/store_test.go new file mode 100644 index 00000000000..880acf30193 --- /dev/null +++ b/go/cache/theine/store_test.go @@ -0,0 +1,82 @@ +/* +Copyright 2023 The Vitess Authors. +Copyright 2023 Yiling-J + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package theine + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +type cachedint int + +func (ci cachedint) CachedSize(bool) int64 { + return 1 +} + +type keyint int + +func (k keyint) Hash() uint64 { + return uint64(k) +} + +func (k keyint) Hash2() (uint64, uint64) { + return uint64(k), uint64(k) * 333 +} + +func TestProcessDeque(t *testing.T) { + store := NewStore[keyint, cachedint](20000, false) + + evicted := map[keyint]cachedint{} + store.OnRemoval = func(key keyint, value cachedint, reason RemoveReason) { + if reason == EVICTED { + evicted[key] = value + } + } + _, index := store.index(123) + shard := store.shards[index] + shard.qsize = 10 + + for i := keyint(0); i < 5; i++ { + entry := &Entry[keyint, cachedint]{key: i} + entry.cost.Store(1) + store.shards[index].deque.PushFront(entry) + store.shards[index].qlen += 1 + store.shards[index].hashmap[i] = entry + } + + // move 0,1,2 entries to slru + store.Set(123, 123, 8, 0) + require.Equal(t, store.shards[index].deque.Len(), 3) + var keys []keyint + for store.shards[index].deque.Len() != 0 { + e := store.shards[index].deque.PopBack() + keys = append(keys, e.key) + } + require.Equal(t, []keyint{3, 4, 123}, keys) +} + +func TestDoorKeeperDynamicSize(t *testing.T) { + store := NewStore[keyint, cachedint](200000, true) + shard := store.shards[0] + require.True(t, shard.dookeeper.Capacity == 512) + for i := keyint(0); i < 5000; i++ { + shard.set(i, &Entry[keyint, cachedint]{}) + } + require.True(t, shard.dookeeper.Capacity > 100000) +} diff --git a/go/cache/theine/tlfu.go b/go/cache/theine/tlfu.go new file mode 100644 index 00000000000..f7a4f8dec51 --- /dev/null +++ b/go/cache/theine/tlfu.go @@ -0,0 +1,197 @@ +/* +Copyright 2023 The Vitess Authors. +Copyright 2023 Yiling-J + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package theine + +import ( + "sync/atomic" +) + +type TinyLfu[K cachekey, V any] struct { + slru *Slru[K, V] + sketch *CountMinSketch + size uint + counter uint + total atomic.Uint32 + hit atomic.Uint32 + hr float32 + threshold atomic.Int32 + lruFactor uint8 + step int8 +} + +func NewTinyLfu[K cachekey, V any](size uint) *TinyLfu[K, V] { + tlfu := &TinyLfu[K, V]{ + size: size, + slru: NewSlru[K, V](size), + sketch: NewCountMinSketch(), + step: 1, + } + // default threshold to -1 so all entries are admitted until cache is full + tlfu.threshold.Store(-1) + return tlfu +} + +func (t *TinyLfu[K, V]) climb() { + total := t.total.Load() + hit := t.hit.Load() + current := float32(hit) / float32(total) + delta := current - t.hr + var diff int8 + if delta > 0.0 { + if t.step < 0 { + t.step -= 1 + } else { + t.step += 1 + } + if t.step < -13 { + t.step = -13 + } else if t.step > 13 { + t.step = 13 + } + newFactor := int8(t.lruFactor) + t.step + if newFactor < 0 { + newFactor = 0 + } else if newFactor > 16 { + newFactor = 16 + } + diff = newFactor - int8(t.lruFactor) + t.lruFactor = uint8(newFactor) + } else if delta < 0.0 { + // reset + if t.step > 0 { + t.step = -1 + } else { + t.step = 1 + } + newFactor := int8(t.lruFactor) + t.step + if newFactor < 0 { + newFactor = 0 + } else if newFactor > 16 { + newFactor = 16 + } + diff = newFactor - int8(t.lruFactor) + t.lruFactor = uint8(newFactor) + } + t.threshold.Add(-int32(diff)) + t.hr = current + t.hit.Store(0) + t.total.Store(0) +} + +func (t *TinyLfu[K, V]) Set(entry *Entry[K, V]) *Entry[K, V] { + t.counter++ + if t.counter > 10*t.size { + t.climb() + t.counter = 0 + } + if entry.meta.prev == nil { + if victim := t.slru.victim(); victim != nil { + freq := int(entry.frequency.Load()) + if freq == -1 { + freq = int(t.sketch.Estimate(entry.key.Hash())) + } + evictedCount := uint(freq) + uint(t.lruFactor) + victimCount := t.sketch.Estimate(victim.key.Hash()) + if evictedCount <= uint(victimCount) { + return entry + } + } else { + count := t.slru.probation.count + t.slru.protected.count + t.sketch.EnsureCapacity(uint(count + count/100)) + } + evicted := t.slru.insert(entry) + return evicted + } + + return nil +} + +func (t *TinyLfu[K, V]) Access(item ReadBufItem[K, V]) { + t.counter++ + if t.counter > 10*t.size { + t.climb() + t.counter = 0 + } + if entry := item.entry; entry != nil { + reset := t.sketch.Add(item.hash) + if reset { + t.threshold.Store(t.threshold.Load() / 2) + } + if entry.meta.prev != nil { + var tail bool + if entry == t.slru.victim() { + tail = true + } + t.slru.access(entry) + if tail { + t.UpdateThreshold() + } + } else { + entry.frequency.Store(int32(t.sketch.Estimate(item.hash))) + } + } else { + reset := t.sketch.Add(item.hash) + if reset { + t.threshold.Store(t.threshold.Load() / 2) + } + } +} + +func (t *TinyLfu[K, V]) Remove(entry *Entry[K, V]) { + t.slru.remove(entry) +} + +func (t *TinyLfu[K, V]) UpdateCost(entry *Entry[K, V], delta int64) { + t.slru.updateCost(entry, delta) +} + +func (t *TinyLfu[K, V]) EvictEntries() []*Entry[K, V] { + removed := []*Entry[K, V]{} + + for t.slru.probation.Len()+t.slru.protected.Len() > int(t.slru.maxsize) { + entry := t.slru.probation.PopTail() + if entry == nil { + break + } + removed = append(removed, entry) + } + for t.slru.probation.Len()+t.slru.protected.Len() > int(t.slru.maxsize) { + entry := t.slru.protected.PopTail() + if entry == nil { + break + } + removed = append(removed, entry) + } + return removed +} + +func (t *TinyLfu[K, V]) UpdateThreshold() { + if t.slru.probation.Len()+t.slru.protected.Len() < int(t.slru.maxsize) { + t.threshold.Store(-1) + } else { + tail := t.slru.victim() + if tail != nil { + t.threshold.Store( + int32(t.sketch.Estimate(tail.key.Hash()) - uint(t.lruFactor)), + ) + } else { + // cache is not full + t.threshold.Store(-1) + } + } +} diff --git a/go/cache/theine/tlfu_test.go b/go/cache/theine/tlfu_test.go new file mode 100644 index 00000000000..ac6ddaabdb6 --- /dev/null +++ b/go/cache/theine/tlfu_test.go @@ -0,0 +1,156 @@ +/* +Copyright 2023 The Vitess Authors. +Copyright 2023 Yiling-J + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package theine + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestTlfu(t *testing.T) { + tlfu := NewTinyLfu[StringKey, string](1000) + require.Equal(t, uint(1000), tlfu.slru.probation.capacity) + require.Equal(t, uint(800), tlfu.slru.protected.capacity) + require.Equal(t, 0, tlfu.slru.probation.len) + require.Equal(t, 0, tlfu.slru.protected.len) + + var entries []*Entry[StringKey, string] + for i := 0; i < 200; i++ { + e := NewEntry(StringKey(fmt.Sprintf("%d", i)), "", 1) + evicted := tlfu.Set(e) + entries = append(entries, e) + require.Nil(t, evicted) + } + + require.Equal(t, 200, tlfu.slru.probation.len) + require.Equal(t, 0, tlfu.slru.protected.len) + + // probation -> protected + tlfu.Access(ReadBufItem[StringKey, string]{entry: entries[11]}) + require.Equal(t, 199, tlfu.slru.probation.len) + require.Equal(t, 1, tlfu.slru.protected.len) + tlfu.Access(ReadBufItem[StringKey, string]{entry: entries[11]}) + require.Equal(t, 199, tlfu.slru.probation.len) + require.Equal(t, 1, tlfu.slru.protected.len) + + for i := 200; i < 1000; i++ { + e := NewEntry(StringKey(fmt.Sprintf("%d", i)), "", 1) + entries = append(entries, e) + evicted := tlfu.Set(e) + require.Nil(t, evicted) + } + // access protected + tlfu.Access(ReadBufItem[StringKey, string]{entry: entries[11]}) + require.Equal(t, 999, tlfu.slru.probation.len) + require.Equal(t, 1, tlfu.slru.protected.len) + + evicted := tlfu.Set(NewEntry(StringKey("0a"), "", 1)) + require.Equal(t, StringKey("0a"), evicted.key) + require.Equal(t, 999, tlfu.slru.probation.len) + require.Equal(t, 1, tlfu.slru.protected.len) + + victim := tlfu.slru.victim() + require.Equal(t, StringKey("0"), victim.key) + tlfu.Access(ReadBufItem[StringKey, string]{entry: entries[991]}) + tlfu.Access(ReadBufItem[StringKey, string]{entry: entries[991]}) + tlfu.Access(ReadBufItem[StringKey, string]{entry: entries[991]}) + tlfu.Access(ReadBufItem[StringKey, string]{entry: entries[991]}) + evicted = tlfu.Set(NewEntry(StringKey("1a"), "", 1)) + require.Equal(t, StringKey("1a"), evicted.key) + require.Equal(t, 998, tlfu.slru.probation.len) + + var entries2 []*Entry[StringKey, string] + for i := 0; i < 1000; i++ { + e := NewEntry(StringKey(fmt.Sprintf("%d*", i)), "", 1) + tlfu.Set(e) + entries2 = append(entries2, e) + } + require.Equal(t, 998, tlfu.slru.probation.len) + require.Equal(t, 2, tlfu.slru.protected.len) + + for _, i := range []int{997, 998, 999} { + tlfu.Remove(entries2[i]) + tlfu.slru.probation.display() + tlfu.slru.probation.displayReverse() + tlfu.slru.protected.display() + tlfu.slru.protected.displayReverse() + } + +} + +func TestEvictEntries(t *testing.T) { + tlfu := NewTinyLfu[StringKey, string](500) + require.Equal(t, uint(500), tlfu.slru.probation.capacity) + require.Equal(t, uint(400), tlfu.slru.protected.capacity) + require.Equal(t, 0, tlfu.slru.probation.len) + require.Equal(t, 0, tlfu.slru.protected.len) + + for i := 0; i < 500; i++ { + tlfu.Set(NewEntry(StringKey(fmt.Sprintf("%d:1", i)), "", 1)) + } + require.Equal(t, 500, tlfu.slru.probation.len) + require.Equal(t, 0, tlfu.slru.protected.len) + new := NewEntry(StringKey("l:10"), "", 10) + new.frequency.Store(10) + tlfu.Set(new) + require.Equal(t, 509, tlfu.slru.probation.len) + require.Equal(t, 0, tlfu.slru.protected.len) + // 2. probation length is 509, so remove 9 entries from probation + removed := tlfu.EvictEntries() + for _, rm := range removed { + require.True(t, strings.HasSuffix(string(rm.key), ":1")) + } + require.Equal(t, 9, len(removed)) + require.Equal(t, 500, tlfu.slru.probation.len) + require.Equal(t, 0, tlfu.slru.protected.len) + + // put l:450 to probation, this will remove 1 entry, probation len is 949 now + // remove 449 entries from probation + new = NewEntry(StringKey("l:450"), "", 450) + new.frequency.Store(10) + tlfu.Set(new) + removed = tlfu.EvictEntries() + require.Equal(t, 449, len(removed)) + require.Equal(t, 500, tlfu.slru.probation.len) + require.Equal(t, 0, tlfu.slru.protected.len) + + // put l:460 to probation, this will remove 1 entry, probation len is 959 now + // remove all entries except the new l:460 one + new = NewEntry(StringKey("l:460"), "", 460) + new.frequency.Store(10) + tlfu.Set(new) + removed = tlfu.EvictEntries() + require.Equal(t, 41, len(removed)) + require.Equal(t, 460, tlfu.slru.probation.len) + require.Equal(t, 0, tlfu.slru.protected.len) + + // access + tlfu.Access(ReadBufItem[StringKey, string]{entry: new}) + require.Equal(t, 0, tlfu.slru.probation.len) + require.Equal(t, 460, tlfu.slru.protected.len) + new.cost.Store(600) + tlfu.UpdateCost(new, 140) + removed = tlfu.EvictEntries() + require.Equal(t, 1, len(removed)) + require.Equal(t, 0, tlfu.slru.probation.len) + require.Equal(t, 0, tlfu.slru.protected.len) + +} diff --git a/go/cmd/internal/docgen/docgen.go b/go/cmd/internal/docgen/docgen.go index 6fe461e5af7..f52042e80af 100644 --- a/go/cmd/internal/docgen/docgen.go +++ b/go/cmd/internal/docgen/docgen.go @@ -46,8 +46,10 @@ import ( "fmt" "io/fs" "os" + "os/exec" "path/filepath" "strings" + "sync" "github.com/spf13/cobra" "github.com/spf13/cobra/doc" @@ -57,6 +59,10 @@ import ( // written to `dir`. The root command is also renamed to _index.md to remain // compatible with the vitessio/website content structure expectations. func GenerateMarkdownTree(cmd *cobra.Command, dir string) error { + sha, err := getCommitID("HEAD") + if err != nil { + return fmt.Errorf("failed to get commit id for HEAD: %w", err) + } switch fi, err := os.Stat(dir); { case errors.Is(err, fs.ErrNotExist): if err := os.MkdirAll(dir, 0755); err != nil { @@ -69,7 +75,7 @@ func GenerateMarkdownTree(cmd *cobra.Command, dir string) error { } recursivelyDisableAutoGenTags(cmd) - if err := doc.GenMarkdownTreeCustom(cmd, dir, frontmatterFilePrepender, linkHandler); err != nil { + if err := doc.GenMarkdownTreeCustom(cmd, dir, frontmatterFilePrepender(sha), linkHandler); err != nil { return err } @@ -79,6 +85,120 @@ func GenerateMarkdownTree(cmd *cobra.Command, dir string) error { return fmt.Errorf("failed to index doc (generated at %s) into proper position (%s): %w", rootDocPath, indexDocPath, err) } + if err := anonymizeHomedir(indexDocPath); err != nil { + return fmt.Errorf("failed to anonymize homedir in help text for command %s: %w", indexDocPath, err) + } + + if err := restructure(dir, dir, cmd.Name(), cmd.Commands()); err != nil { + return err + } + + return nil +} + +/* +_index.md (aka vtctldclient.md) +vtctldclient_AddCellInfo.md +vtctldclient_movetables.md +vtctldclient_movetables_show.md + +becomes + +_index.md +vtctldclient_AddCellInfo.md +vtctldclient_movetables/ + _index.md + vtctldclient_movetables_show.md +*/ + +func restructure(rootDir string, dir string, name string, commands []*cobra.Command) error { + for _, cmd := range commands { + fullCmdFilename := strings.Join([]string{name, cmd.Name()}, "_") + + children := cmd.Commands() + + switch { + case len(children) > 0: + // Command (top-level or not) with children. + // 1. Set up a directory for its children. + // 2. Move its doc into that dir as "_index.md" + // 3. Restructure its children. + cmdDir := filepath.Join(dir, fullCmdFilename) + if err := os.MkdirAll(cmdDir, 0755); err != nil { + return fmt.Errorf("failed to create subdir for %s: %w", fullCmdFilename, err) + } + + indexFile := filepath.Join(cmdDir, "_index.md") + if err := os.Rename(filepath.Join(rootDir, fullCmdFilename+".md"), indexFile); err != nil { + return fmt.Errorf("failed to move index doc for command %s with children: %w", fullCmdFilename, err) + } + + if err := anonymizeHomedir(indexFile); err != nil { + return fmt.Errorf("failed to anonymize homedir in help text for command %s: %w", indexFile, err) + } + + if err := restructure(rootDir, cmdDir, fullCmdFilename, children); err != nil { + return fmt.Errorf("failed to restructure child commands for %s: %w", fullCmdFilename, err) + } + case rootDir != dir: + // Sub-command without children. + // 1. Move its doc into the directory for its parent, name unchanged. + if cmd.Name() == "help" { + // all commands with children have their own "help" subcommand, + // which we do not generate docs for + continue + } + + oldName := filepath.Join(rootDir, fullCmdFilename+".md") + newName := filepath.Join(dir, fullCmdFilename+".md") + + if err := os.Rename(oldName, newName); err != nil { + return fmt.Errorf("failed to move child command %s to its parent's dir: %w", fullCmdFilename, err) + } + + sed := newParentLinkSedCommand(name, newName) + if out, err := sed.CombinedOutput(); err != nil { + return fmt.Errorf("failed to rewrite links to parent command in child %s: %w (extra: %s)", newName, err, out) + } + + if err := anonymizeHomedir(newName); err != nil { + return fmt.Errorf("failed to anonymize homedir in help text for command %s: %w", newName, err) + } + default: + // Top-level command without children. Nothing to restructure. + continue + } + } + + return nil +} + +func newParentLinkSedCommand(parent string, file string) *exec.Cmd { + return exec.Command("sed", "-i", "", "-e", fmt.Sprintf("s:(./%s/):(../):i", parent), file) +} + +var ( + wd string + once sync.Once +) + +func anonymizeHomedir(file string) (err error) { + once.Do(func() { + // Only do this once per run. + wd, err = os.Getwd() + }) + if err != nil { + return err + } + + // We're replacing the stuff inside the square brackets in the example sed + // below: + // 's:Paths to search for config files in. (default \[.*\])$:Paths to search for config files in. (default \[\]):' + sed := exec.Command("sed", "-i", "", "-e", fmt.Sprintf("s:%s::i", wd), file) + if out, err := sed.CombinedOutput(); err != nil { + return fmt.Errorf("%w: %s", err, out) + } + return nil } @@ -91,31 +211,47 @@ func recursivelyDisableAutoGenTags(root *cobra.Command) { } } +func getCommitID(ref string) (string, error) { + gitShow := exec.Command("git", "show", "--pretty=format:%H", "--no-patch", ref) + out, err := gitShow.Output() + if err != nil { + return "", err + } + + return string(out), nil +} + const frontmatter = `--- title: %s series: %s +commit: %s --- ` -func frontmatterFilePrepender(filename string) string { - name := filepath.Base(filename) - base := strings.TrimSuffix(name, filepath.Ext(name)) +func frontmatterFilePrepender(sha string) func(filename string) string { + return func(filename string) string { + name := filepath.Base(filename) + base := strings.TrimSuffix(name, filepath.Ext(name)) - root, cmdName, ok := strings.Cut(base, "_") - if !ok { // no `_`, so not a subcommand - cmdName = root - } + root, cmdName, ok := strings.Cut(base, "_") + if !ok { // no `_`, so not a subcommand + cmdName = root + } - return fmt.Sprintf(frontmatter, cmdName, root) + cmdName = strings.ReplaceAll(cmdName, "_", " ") + + return fmt.Sprintf(frontmatter, cmdName, root, sha) + } } func linkHandler(filename string) string { - name := filepath.Base(filename) - base := strings.TrimSuffix(name, filepath.Ext(name)) + base := filepath.Base(filename) + name := strings.TrimSuffix(base, filepath.Ext(base)) - if _, _, ok := strings.Cut(base, "_"); !ok { + _, _, ok := strings.Cut(name, "_") + if !ok { return "../" } - return fmt.Sprintf("./%s/", strings.ToLower(base)) + return fmt.Sprintf("./%s/", strings.ToLower(name)) } diff --git a/go/cmd/mysqlctl/command/init.go b/go/cmd/mysqlctl/command/init.go new file mode 100644 index 00000000000..71a9661aa80 --- /dev/null +++ b/go/cmd/mysqlctl/command/init.go @@ -0,0 +1,71 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "context" + "fmt" + "time" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/vt/mysqlctl" +) + +var Init = &cobra.Command{ + Use: "init", + Short: "Initializes the directory structure and starts mysqld.", + Long: "Bootstraps a new `mysqld` instance, initializes its data directory, and starts the instance.\n" + + "The MySQL version and flavor will be auto-detected, with a minimal configuration file applied.", + Example: `mysqlctl \ + --alsologtostderr \ + --tablet_uid 101 \ + --mysql_port 12345 \ + init`, + Args: cobra.NoArgs, + RunE: commandInit, +} + +var initArgs = struct { + WaitTime time.Duration + InitDbSQLFile string +}{ + WaitTime: 5 * time.Minute, +} + +func commandInit(cmd *cobra.Command, args []string) error { + // Generate my.cnf from scratch and use it to find mysqld. + mysqld, cnf, err := mysqlctl.CreateMysqldAndMycnf(tabletUID, mysqlSocket, mysqlPort) + if err != nil { + return fmt.Errorf("failed to initialize mysql config: %v", err) + } + defer mysqld.Close() + + ctx, cancel := context.WithTimeout(context.Background(), initArgs.WaitTime) + defer cancel() + if err := mysqld.Init(ctx, cnf, initArgs.InitDbSQLFile); err != nil { + return fmt.Errorf("failed init mysql: %v", err) + } + return nil +} + +func init() { + Init.Flags().DurationVar(&initArgs.WaitTime, "wait_time", initArgs.WaitTime, "How long to wait for mysqld startup.") + Init.Flags().StringVar(&initArgs.InitDbSQLFile, "init_db_sql_file", initArgs.InitDbSQLFile, "Path to .sql file to run after mysqld initiliaztion.") + + Root.AddCommand(Init) +} diff --git a/go/cmd/mysqlctl/command/init_config.go b/go/cmd/mysqlctl/command/init_config.go new file mode 100644 index 00000000000..70e751e02cb --- /dev/null +++ b/go/cmd/mysqlctl/command/init_config.go @@ -0,0 +1,57 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "fmt" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/vt/mysqlctl" +) + +var InitConfig = &cobra.Command{ + Use: "init_config", + Short: "Initializes the directory structure, creates my.cnf file, but does not start mysqld.", + Long: "Bootstraps the configuration for a new `mysqld` instance and initializes its data directory.\n" + + "This command is the same as `init` except the `mysqld` server will not be started.", + Example: `mysqlctl \ + --alsologtostderr \ + --tablet_uid 101 \ + --mysql_port 12345 \ + init_config`, + Args: cobra.NoArgs, + RunE: commandInitConfig, +} + +func commandInitConfig(cmd *cobra.Command, args []string) error { + // Generate my.cnf from scratch and use it to find mysqld. + mysqld, cnf, err := mysqlctl.CreateMysqldAndMycnf(tabletUID, mysqlSocket, mysqlPort) + if err != nil { + return fmt.Errorf("failed to initialize mysql config: %v", err) + } + defer mysqld.Close() + if err := mysqld.InitConfig(cnf); err != nil { + return fmt.Errorf("failed to init mysql config: %v", err) + } + + return nil +} + +func init() { + Root.AddCommand(InitConfig) +} diff --git a/go/cmd/mysqlctl/plugin_prometheusbackend.go b/go/cmd/mysqlctl/command/plugin_prometheusbackend.go similarity index 98% rename from go/cmd/mysqlctl/plugin_prometheusbackend.go rename to go/cmd/mysqlctl/command/plugin_prometheusbackend.go index 62853982f11..7376af743a4 100644 --- a/go/cmd/mysqlctl/plugin_prometheusbackend.go +++ b/go/cmd/mysqlctl/command/plugin_prometheusbackend.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package command // This plugin imports Prometheus to allow for instrumentation // with the Prometheus client library diff --git a/go/cmd/mysqlctl/command/position.go b/go/cmd/mysqlctl/command/position.go new file mode 100644 index 00000000000..46f848e1bbb --- /dev/null +++ b/go/cmd/mysqlctl/command/position.go @@ -0,0 +1,74 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "fmt" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/mysql/replication" +) + +var Position = &cobra.Command{ + Use: "position ", + Short: "Compute operations on replication positions", + Args: cobra.MatchAll(cobra.ExactArgs(3), func(cmd *cobra.Command, args []string) error { + switch args[0] { + case "equal", "at_least", "append": + default: + return fmt.Errorf("invalid operation %s (choices are 'equal', 'at_least', 'append')", args[0]) + } + + return nil + }), + RunE: commandPosition, +} + +func commandPosition(cmd *cobra.Command, args []string) error { + pos1, err := replication.DecodePosition(args[1]) + if err != nil { + return err + } + + switch args[0] { + case "equal": + pos2, err := replication.DecodePosition(args[2]) + if err != nil { + return err + } + fmt.Println(pos1.Equal(pos2)) + case "at_least": + pos2, err := replication.DecodePosition(args[2]) + if err != nil { + return err + } + fmt.Println(pos1.AtLeast(pos2)) + case "append": + gtid, err := replication.DecodeGTID(args[2]) + if err != nil { + return err + } + fmt.Println(replication.AppendGTID(pos1, gtid)) + } + + return nil +} + +func init() { + Root.AddCommand(Position) +} diff --git a/go/cmd/mysqlctl/command/reinit_config.go b/go/cmd/mysqlctl/command/reinit_config.go new file mode 100644 index 00000000000..b06642c8203 --- /dev/null +++ b/go/cmd/mysqlctl/command/reinit_config.go @@ -0,0 +1,58 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "context" + "fmt" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/vt/mysqlctl" +) + +var ReinitConfig = &cobra.Command{ + Use: "reinit_config", + Short: "Reinitializes my.cnf file with new server_id.", + Long: "Regenerate new configuration files for an existing `mysqld` instance (generating new server_id and server_uuid values).\n" + + "This could be helpful to revert configuration changes, or to pick up changes made to the bundled config in newer Vitess versions.", + Example: `mysqlctl \ + --alsologtostderr \ + --tablet_uid 101 \ + --mysql_port 12345 \ + reinit_config`, + Args: cobra.NoArgs, + RunE: commandReinitConfig, +} + +func commandReinitConfig(cmd *cobra.Command, args []string) error { + // There ought to be an existing my.cnf, so use it to find mysqld. + mysqld, cnf, err := mysqlctl.OpenMysqldAndMycnf(tabletUID) + if err != nil { + return fmt.Errorf("failed to find mysql config: %v", err) + } + defer mysqld.Close() + + if err := mysqld.ReinitConfig(context.TODO(), cnf); err != nil { + return fmt.Errorf("failed to reinit mysql config: %v", err) + } + return nil +} + +func init() { + Root.AddCommand(ReinitConfig) +} diff --git a/go/cmd/mysqlctl/command/root.go b/go/cmd/mysqlctl/command/root.go new file mode 100644 index 00000000000..4f5626ef7e6 --- /dev/null +++ b/go/cmd/mysqlctl/command/root.go @@ -0,0 +1,77 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "fmt" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/acl" + vtcmd "vitess.io/vitess/go/cmd" + "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/servenv" +) + +var ( + mysqlPort = 3306 + tabletUID = uint32(41983) + mysqlSocket string + + Root = &cobra.Command{ + Use: "mysqlctl", + Short: "mysqlctl initializes and controls mysqld with Vitess-specific configuration.", + Long: "`mysqlctl` is a command-line client used for managing `mysqld` instances.\n\n" + + + "It is responsible for bootstrapping tasks such as generating a configuration file for `mysqld` and initializing the instance and its data directory.\n" + + "The `mysqld_safe` watchdog is utilized when present.\n" + + "This helps ensure that `mysqld` is automatically restarted after failures.", + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if err := servenv.CobraPreRunE(cmd, args); err != nil { + return nil + } + + if vtcmd.IsRunningAsRoot() { + return fmt.Errorf("mysqlctl cannot be run as root. Please run as a different user") + } + + return nil + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + logutil.Flush() + }, + Version: servenv.AppVersion.String(), + } +) + +func init() { + servenv.RegisterDefaultSocketFileFlags() + servenv.RegisterFlags() + servenv.RegisterServiceMapFlag() + + // mysqlctl only starts and stops mysql, only needs dba. + dbconfigs.RegisterFlags(dbconfigs.Dba) + + servenv.MovePersistentFlagsToCobraCommand(Root) + + Root.PersistentFlags().IntVar(&mysqlPort, "mysql_port", mysqlPort, "MySQL port.") + Root.PersistentFlags().Uint32Var(&tabletUID, "tablet_uid", tabletUID, "Tablet UID.") + Root.PersistentFlags().StringVar(&mysqlSocket, "mysql_socket", mysqlSocket, "Path to the mysqld socket file.") + + acl.RegisterFlags(Root.PersistentFlags()) +} diff --git a/go/cmd/mysqlctl/command/shutdown.go b/go/cmd/mysqlctl/command/shutdown.go new file mode 100644 index 00000000000..41c804856eb --- /dev/null +++ b/go/cmd/mysqlctl/command/shutdown.go @@ -0,0 +1,66 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "context" + "fmt" + "time" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/vt/mysqlctl" +) + +var Shutdown = &cobra.Command{ + Use: "shutdown", + Short: "Shuts down mysqld, without removing any files.", + Long: "Stop a `mysqld` instance that was previously started with `init` or `start`.\n\n" + + + "For large `mysqld` instances, you may need to extend the `wait_time` to shutdown cleanly.", + Example: `mysqlctl --tablet_uid 101 --alsologtostderr shutdown`, + Args: cobra.NoArgs, + RunE: commandShutdown, +} + +var shutdownArgs = struct { + WaitTime time.Duration +}{ + WaitTime: 5 * time.Minute, +} + +func commandShutdown(cmd *cobra.Command, args []string) error { + // There ought to be an existing my.cnf, so use it to find mysqld. + mysqld, cnf, err := mysqlctl.OpenMysqldAndMycnf(tabletUID) + if err != nil { + return fmt.Errorf("failed to find mysql config: %v", err) + } + defer mysqld.Close() + + ctx, cancel := context.WithTimeout(context.Background(), shutdownArgs.WaitTime) + defer cancel() + if err := mysqld.Shutdown(ctx, cnf, true); err != nil { + return fmt.Errorf("failed shutdown mysql: %v", err) + } + return nil +} + +func init() { + Shutdown.Flags().DurationVar(&shutdownArgs.WaitTime, "wait_time", shutdownArgs.WaitTime, "How long to wait for mysqld shutdown.") + + Root.AddCommand(Shutdown) +} diff --git a/go/cmd/mysqlctl/command/start.go b/go/cmd/mysqlctl/command/start.go new file mode 100644 index 00000000000..397909e0966 --- /dev/null +++ b/go/cmd/mysqlctl/command/start.go @@ -0,0 +1,67 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "context" + "fmt" + "time" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/flagutil" + "vitess.io/vitess/go/vt/mysqlctl" +) + +var Start = &cobra.Command{ + Use: "start", + Short: "Starts mysqld on an already 'init'-ed directory.", + Long: "Resume an existing `mysqld` instance that was previously bootstrapped with `init` or `init_config`", + Example: `mysqlctl --tablet_uid 101 --alsologtostderr start`, + Args: cobra.NoArgs, + RunE: commandStart, +} + +var startArgs = struct { + WaitTime time.Duration + MySQLdArgs flagutil.StringListValue +}{ + WaitTime: 5 * time.Minute, +} + +func commandStart(cmd *cobra.Command, args []string) error { + // There ought to be an existing my.cnf, so use it to find mysqld. + mysqld, cnf, err := mysqlctl.OpenMysqldAndMycnf(tabletUID) + if err != nil { + return fmt.Errorf("failed to find mysql config: %v", err) + } + defer mysqld.Close() + + ctx, cancel := context.WithTimeout(context.Background(), startArgs.WaitTime) + defer cancel() + if err := mysqld.Start(ctx, cnf, startArgs.MySQLdArgs...); err != nil { + return fmt.Errorf("failed start mysql: %v", err) + } + return nil +} + +func init() { + Start.Flags().DurationVar(&startArgs.WaitTime, "wait_time", startArgs.WaitTime, "How long to wait for mysqld startup.") + Start.Flags().Var(&startArgs.MySQLdArgs, "mysqld_args", "List of comma-separated flags to pass additionally to mysqld.") + + Root.AddCommand(Start) +} diff --git a/go/cmd/mysqlctl/command/teardown.go b/go/cmd/mysqlctl/command/teardown.go new file mode 100644 index 00000000000..0d37a15cfdc --- /dev/null +++ b/go/cmd/mysqlctl/command/teardown.go @@ -0,0 +1,70 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "context" + "fmt" + "time" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/vt/mysqlctl" +) + +var Teardown = &cobra.Command{ + Use: "teardown", + Short: "Shuts mysqld down and removes the directory.", + Long: "{{< warning >}}\n" + + "This is a destructive operation.\n" + + "{{}}\n\n" + + + "Shuts down a `mysqld` instance and removes its data directory.", + Example: `mysqlctl --tablet_uid 101 --alsologtostderr teardown`, + Args: cobra.NoArgs, + RunE: commandTeardown, +} + +var teardownArgs = struct { + WaitTime time.Duration + Force bool +}{ + WaitTime: 5 * time.Minute, +} + +func commandTeardown(cmd *cobra.Command, args []string) error { + // There ought to be an existing my.cnf, so use it to find mysqld. + mysqld, cnf, err := mysqlctl.OpenMysqldAndMycnf(tabletUID) + if err != nil { + return fmt.Errorf("failed to find mysql config: %v", err) + } + defer mysqld.Close() + + ctx, cancel := context.WithTimeout(context.Background(), teardownArgs.WaitTime) + defer cancel() + if err := mysqld.Teardown(ctx, cnf, teardownArgs.Force); err != nil { + return fmt.Errorf("failed teardown mysql (forced? %v): %v", teardownArgs.Force, err) + } + return nil +} + +func init() { + Teardown.Flags().DurationVar(&teardownArgs.WaitTime, "wait_time", teardownArgs.WaitTime, "How long to wait for mysqld shutdown.") + Teardown.Flags().BoolVarP(&teardownArgs.Force, "force", "f", teardownArgs.Force, "Remove the root directory even if mysqld shutdown fails.") + + Root.AddCommand(Teardown) +} diff --git a/go/cmd/mysqlctl/docgen/main.go b/go/cmd/mysqlctl/docgen/main.go new file mode 100644 index 00000000000..2162b5e8551 --- /dev/null +++ b/go/cmd/mysqlctl/docgen/main.go @@ -0,0 +1,37 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/internal/docgen" + "vitess.io/vitess/go/cmd/mysqlctl/command" +) + +func main() { + var dir string + cmd := cobra.Command{ + Use: "docgen [-d
]", + RunE: func(cmd *cobra.Command, args []string) error { + return docgen.GenerateMarkdownTree(command.Root, dir) + }, + } + + cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation") + _ = cmd.Execute() +} diff --git a/go/cmd/mysqlctl/mysqlctl.go b/go/cmd/mysqlctl/mysqlctl.go index 6873cc2bf56..72198c2c8c0 100644 --- a/go/cmd/mysqlctl/mysqlctl.go +++ b/go/cmd/mysqlctl/mysqlctl.go @@ -18,268 +18,12 @@ limitations under the License. package main import ( - "context" - "fmt" - "os" - "time" - - "github.com/spf13/pflag" - - "vitess.io/vitess/go/acl" - "vitess.io/vitess/go/cmd" - "vitess.io/vitess/go/exit" - "vitess.io/vitess/go/flagutil" - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/cmd/mysqlctl/command" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/logutil" - "vitess.io/vitess/go/vt/mysqlctl" - "vitess.io/vitess/go/vt/servenv" -) - -var ( - mysqlPort = 3306 - tabletUID = uint32(41983) - mysqlSocket string ) -func init() { - servenv.RegisterDefaultSocketFileFlags() - servenv.RegisterFlags() - servenv.RegisterServiceMapFlag() - // mysqlctl only starts and stops mysql, only needs dba. - dbconfigs.RegisterFlags(dbconfigs.Dba) - servenv.OnParse(func(fs *pflag.FlagSet) { - fs.IntVar(&mysqlPort, "mysql_port", mysqlPort, "MySQL port") - fs.Uint32Var(&tabletUID, "tablet_uid", tabletUID, "Tablet UID") - fs.StringVar(&mysqlSocket, "mysql_socket", mysqlSocket, "Path to the mysqld socket file") - - acl.RegisterFlags(fs) - }) -} - -func initConfigCmd(subFlags *pflag.FlagSet, args []string) error { - _ = subFlags.Parse(args) - - // Generate my.cnf from scratch and use it to find mysqld. - mysqld, cnf, err := mysqlctl.CreateMysqldAndMycnf(tabletUID, mysqlSocket, mysqlPort) - if err != nil { - return fmt.Errorf("failed to initialize mysql config: %v", err) - } - defer mysqld.Close() - if err := mysqld.InitConfig(cnf); err != nil { - return fmt.Errorf("failed to init mysql config: %v", err) - } - return nil -} - -func initCmd(subFlags *pflag.FlagSet, args []string) error { - waitTime := subFlags.Duration("wait_time", 5*time.Minute, "How long to wait for mysqld startup") - initDBSQLFile := subFlags.String("init_db_sql_file", "", "Path to .sql file to run after mysqld initiliaztion") - _ = subFlags.Parse(args) - - // Generate my.cnf from scratch and use it to find mysqld. - mysqld, cnf, err := mysqlctl.CreateMysqldAndMycnf(tabletUID, mysqlSocket, mysqlPort) - if err != nil { - return fmt.Errorf("failed to initialize mysql config: %v", err) - } - defer mysqld.Close() - - ctx, cancel := context.WithTimeout(context.Background(), *waitTime) - defer cancel() - if err := mysqld.Init(ctx, cnf, *initDBSQLFile); err != nil { - return fmt.Errorf("failed init mysql: %v", err) - } - return nil -} - -func reinitConfigCmd(subFlags *pflag.FlagSet, args []string) error { - _ = subFlags.Parse(args) - - // There ought to be an existing my.cnf, so use it to find mysqld. - mysqld, cnf, err := mysqlctl.OpenMysqldAndMycnf(tabletUID) - if err != nil { - return fmt.Errorf("failed to find mysql config: %v", err) - } - defer mysqld.Close() - - if err := mysqld.ReinitConfig(context.TODO(), cnf); err != nil { - return fmt.Errorf("failed to reinit mysql config: %v", err) - } - return nil -} - -func shutdownCmd(subFlags *pflag.FlagSet, args []string) error { - waitTime := subFlags.Duration("wait_time", 5*time.Minute, "How long to wait for mysqld shutdown") - _ = subFlags.Parse(args) - - // There ought to be an existing my.cnf, so use it to find mysqld. - mysqld, cnf, err := mysqlctl.OpenMysqldAndMycnf(tabletUID) - if err != nil { - return fmt.Errorf("failed to find mysql config: %v", err) - } - defer mysqld.Close() - - ctx, cancel := context.WithTimeout(context.Background(), *waitTime) - defer cancel() - if err := mysqld.Shutdown(ctx, cnf, true); err != nil { - return fmt.Errorf("failed shutdown mysql: %v", err) - } - return nil -} - -func startCmd(subFlags *pflag.FlagSet, args []string) error { - waitTime := subFlags.Duration("wait_time", 5*time.Minute, "How long to wait for mysqld startup") - var mysqldArgs flagutil.StringListValue - subFlags.Var(&mysqldArgs, "mysqld_args", "List of comma-separated flags to pass additionally to mysqld") - _ = subFlags.Parse(args) - - // There ought to be an existing my.cnf, so use it to find mysqld. - mysqld, cnf, err := mysqlctl.OpenMysqldAndMycnf(tabletUID) - if err != nil { - return fmt.Errorf("failed to find mysql config: %v", err) - } - defer mysqld.Close() - - ctx, cancel := context.WithTimeout(context.Background(), *waitTime) - defer cancel() - if err := mysqld.Start(ctx, cnf, mysqldArgs...); err != nil { - return fmt.Errorf("failed start mysql: %v", err) - } - return nil -} - -func teardownCmd(subFlags *pflag.FlagSet, args []string) error { - waitTime := subFlags.Duration("wait_time", 5*time.Minute, "How long to wait for mysqld shutdown") - force := subFlags.Bool("force", false, "Remove the root directory even if mysqld shutdown fails") - _ = subFlags.Parse(args) - - // There ought to be an existing my.cnf, so use it to find mysqld. - mysqld, cnf, err := mysqlctl.OpenMysqldAndMycnf(tabletUID) - if err != nil { - return fmt.Errorf("failed to find mysql config: %v", err) - } - defer mysqld.Close() - - ctx, cancel := context.WithTimeout(context.Background(), *waitTime) - defer cancel() - if err := mysqld.Teardown(ctx, cnf, *force); err != nil { - return fmt.Errorf("failed teardown mysql (forced? %v): %v", *force, err) - } - return nil -} - -func positionCmd(subFlags *pflag.FlagSet, args []string) error { - _ = subFlags.Parse(args) - if len(args) < 3 { - return fmt.Errorf("not enough arguments for position operation") - } - - pos1, err := mysql.DecodePosition(args[1]) - if err != nil { - return err - } - - switch args[0] { - case "equal": - pos2, err := mysql.DecodePosition(args[2]) - if err != nil { - return err - } - fmt.Println(pos1.Equal(pos2)) - case "at_least": - pos2, err := mysql.DecodePosition(args[2]) - if err != nil { - return err - } - fmt.Println(pos1.AtLeast(pos2)) - case "append": - gtid, err := mysql.DecodeGTID(args[2]) - if err != nil { - return err - } - fmt.Println(mysql.AppendGTID(pos1, gtid)) - } - - return nil -} - -type command struct { - name string - method func(*pflag.FlagSet, []string) error - params string - help string -} - -var commands = []command{ - {"init", initCmd, "[--wait_time=5m] [--init_db_sql_file=]", - "Initializes the directory structure and starts mysqld"}, - {"init_config", initConfigCmd, "", - "Initializes the directory structure, creates my.cnf file, but does not start mysqld"}, - {"reinit_config", reinitConfigCmd, "", - "Reinitializes my.cnf file with new server_id"}, - {"teardown", teardownCmd, "[--wait_time=5m] [--force]", - "Shuts mysqld down, and removes the directory"}, - {"start", startCmd, "[--wait_time=5m]", - "Starts mysqld on an already 'init'-ed directory"}, - {"shutdown", shutdownCmd, "[--wait_time=5m]", - "Shuts down mysqld, does not remove any file"}, - - {"position", positionCmd, - " ", - "Compute operations on replication positions"}, -} - func main() { - defer exit.Recover() - defer logutil.Flush() - - fs := pflag.NewFlagSet("mysqlctl", pflag.ExitOnError) - log.RegisterFlags(fs) - logutil.RegisterFlags(fs) - pflag.Usage = func() { - w := os.Stderr - fmt.Fprintf(w, "Usage: %s [global-flags] -- [command-flags]\n", os.Args[0]) - fmt.Fprintf(w, "\nThe commands are listed below. Use '%s -- {-h, --help}' for command help.\n\n", os.Args[0]) - for _, cmd := range commands { - fmt.Fprintf(w, " %s", cmd.name) - if cmd.params != "" { - fmt.Fprintf(w, " %s", cmd.params) - } - fmt.Fprintf(w, "\n") - } - fmt.Fprintf(w, "\nGlobal flags:\n") - pflag.PrintDefaults() - } - args := servenv.ParseFlagsWithArgs("mysqlctl") - - if cmd.IsRunningAsRoot() { - fmt.Fprintln(os.Stderr, "mysqlctl cannot be ran as root. Please run as a different user") - exit.Return(1) - } - - action := args[0] - for _, cmd := range commands { - if cmd.name == action { - subFlags := pflag.NewFlagSet(action, pflag.ExitOnError) - subFlags.Usage = func() { - w := os.Stderr - fmt.Fprintf(w, "Usage: %s %s %s\n\n", os.Args[0], cmd.name, cmd.params) - fmt.Fprintf(w, cmd.help) - fmt.Fprintf(w, "\n\n") - subFlags.PrintDefaults() - } - // This is logged and we want sentence capitalization and punctuation. - pflag.ErrHelp = fmt.Errorf("\nSee %s --help for more information.", os.Args[0]) // nolint:revive - if err := cmd.method(subFlags, args[1:]); err != nil { - log.Errorf("%v\n", err) - subFlags.Usage() - exit.Return(1) - } - return - } + if err := command.Root.Execute(); err != nil { + log.Exit(err) } - log.Errorf("invalid action: %v\n\n", action) - pflag.Usage() - exit.Return(1) } diff --git a/go/cmd/mysqlctld/cli/mysqlctld.go b/go/cmd/mysqlctld/cli/mysqlctld.go new file mode 100644 index 00000000000..6ebaa5dc422 --- /dev/null +++ b/go/cmd/mysqlctld/cli/mysqlctld.go @@ -0,0 +1,178 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// mysqlctld is a daemon that starts or initializes mysqld and provides an RPC +// interface for vttablet to stop and start mysqld from a different container +// without having to restart the container running mysqlctld. +package cli + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/servenv" +) + +var ( + // mysqld is used by the rpc implementation plugin. + mysqld *mysqlctl.Mysqld + cnf *mysqlctl.Mycnf + + mysqlPort = 3306 + tabletUID = uint32(41983) + mysqlSocket string + + // mysqlctl init flags + waitTime = 5 * time.Minute + initDBSQLFile string + + Main = &cobra.Command{ + Use: "mysqlctld", + Short: "mysqlctld is a daemon that starts or initializes mysqld.", + Long: "`mysqlctld` is a gRPC server that can be used instead of the `mysqlctl` client tool.\n" + + "If the target directories are empty when it is invoked, it automatically performs initialization operations to bootstrap the `mysqld` instance before starting it.\n" + + "The `mysqlctld` process can subsequently receive gRPC commands from a `vttablet` to perform housekeeping operations like shutting down and restarting the `mysqld` instance as needed.\n\n" + + "{{< warning >}}\n" + + "`mysqld_safe` is not used so the `mysqld` process will not be automatically restarted in case of a failure.\n" + + "{{}}\n\n" + + "To enable communication with a `vttablet`, the server must be configured to receive gRPC messages on a unix domain socket.", + Example: `mysqlctld \ + --log_dir=${VTDATAROOT}/logs \ + --tablet_uid=100 \ + --mysql_port=17100 \ + --socket_file=/path/to/socket_file`, + Args: cobra.NoArgs, + PreRunE: servenv.CobraPreRunE, + RunE: run, + } +) + +func init() { + servenv.RegisterDefaultFlags() + servenv.RegisterDefaultSocketFileFlags() + servenv.RegisterFlags() + servenv.RegisterGRPCServerFlags() + servenv.RegisterGRPCServerAuthFlags() + servenv.RegisterServiceMapFlag() + // mysqlctld only starts and stops mysql, only needs dba. + dbconfigs.RegisterFlags(dbconfigs.Dba) + + servenv.MoveFlagsToCobraCommand(Main) + + Main.Flags().IntVar(&mysqlPort, "mysql_port", mysqlPort, "MySQL port") + Main.Flags().Uint32Var(&tabletUID, "tablet_uid", tabletUID, "Tablet UID") + Main.Flags().StringVar(&mysqlSocket, "mysql_socket", mysqlSocket, "Path to the mysqld socket file") + Main.Flags().DurationVar(&waitTime, "wait_time", waitTime, "How long to wait for mysqld startup or shutdown") + Main.Flags().StringVar(&initDBSQLFile, "init_db_sql_file", initDBSQLFile, "Path to .sql file to run after mysqld initialization") + + acl.RegisterFlags(Main.Flags()) +} + +func run(cmd *cobra.Command, args []string) error { + defer logutil.Flush() + + // We'll register this OnTerm handler before mysqld starts, so we get notified + // if mysqld dies on its own without us (or our RPC client) telling it to. + mysqldTerminated := make(chan struct{}) + onTermFunc := func() { + close(mysqldTerminated) + } + + // Start or Init mysqld as needed. + ctx, cancel := context.WithTimeout(context.Background(), waitTime) + mycnfFile := mysqlctl.MycnfFile(tabletUID) + if _, statErr := os.Stat(mycnfFile); os.IsNotExist(statErr) { + // Generate my.cnf from scratch and use it to find mysqld. + log.Infof("mycnf file (%s) doesn't exist, initializing", mycnfFile) + + var err error + mysqld, cnf, err = mysqlctl.CreateMysqldAndMycnf(tabletUID, mysqlSocket, mysqlPort) + if err != nil { + cancel() + return fmt.Errorf("failed to initialize mysql config: %w", err) + } + mysqld.OnTerm(onTermFunc) + + if err := mysqld.Init(ctx, cnf, initDBSQLFile); err != nil { + cancel() + return fmt.Errorf("failed to initialize mysql data dir and start mysqld: %w", err) + } + } else { + // There ought to be an existing my.cnf, so use it to find mysqld. + log.Infof("mycnf file (%s) already exists, starting without init", mycnfFile) + + var err error + mysqld, cnf, err = mysqlctl.OpenMysqldAndMycnf(tabletUID) + if err != nil { + cancel() + return fmt.Errorf("failed to find mysql config: %w", err) + } + mysqld.OnTerm(onTermFunc) + + err = mysqld.RefreshConfig(ctx, cnf) + if err != nil { + cancel() + return fmt.Errorf("failed to refresh config: %w", err) + } + + // check if we were interrupted during a previous restore + if !mysqlctl.RestoreWasInterrupted(cnf) { + if err := mysqld.Start(ctx, cnf); err != nil { + cancel() + return fmt.Errorf("failed to start mysqld: %w", err) + } + } else { + log.Infof("found interrupted restore, not starting mysqld") + } + } + cancel() + + servenv.Init() + + // Take mysqld down with us on SIGTERM before entering lame duck. + servenv.OnTermSync(func() { + log.Infof("mysqlctl received SIGTERM, shutting down mysqld first") + ctx := context.Background() + if err := mysqld.Shutdown(ctx, cnf, true); err != nil { + log.Errorf("failed to shutdown mysqld: %v", err) + } + }) + + // Start RPC server and wait for SIGTERM. + mysqlctldTerminated := make(chan struct{}) + go func() { + servenv.RunDefault() + close(mysqlctldTerminated) + }() + + select { + case <-mysqldTerminated: + log.Infof("mysqld shut down on its own, exiting mysqlctld") + case <-mysqlctldTerminated: + log.Infof("mysqlctld shut down gracefully") + } + + return nil +} diff --git a/go/cmd/mysqlctld/plugin_grpcmysqlctlserver.go b/go/cmd/mysqlctld/cli/plugin_grpcmysqlctlserver.go similarity index 98% rename from go/cmd/mysqlctld/plugin_grpcmysqlctlserver.go rename to go/cmd/mysqlctld/cli/plugin_grpcmysqlctlserver.go index ee81ab77515..1186d5ed788 100644 --- a/go/cmd/mysqlctld/plugin_grpcmysqlctlserver.go +++ b/go/cmd/mysqlctld/cli/plugin_grpcmysqlctlserver.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Import and register the gRPC mysqlctl server diff --git a/go/cmd/mysqlctld/plugin_prometheusbackend.go b/go/cmd/mysqlctld/cli/plugin_prometheusbackend.go similarity index 98% rename from go/cmd/mysqlctld/plugin_prometheusbackend.go rename to go/cmd/mysqlctld/cli/plugin_prometheusbackend.go index 4ae114ceedd..e01ecf0bead 100644 --- a/go/cmd/mysqlctld/plugin_prometheusbackend.go +++ b/go/cmd/mysqlctld/cli/plugin_prometheusbackend.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports Prometheus to allow for instrumentation // with the Prometheus client library diff --git a/go/cmd/mysqlctld/docgen/main.go b/go/cmd/mysqlctld/docgen/main.go new file mode 100644 index 00000000000..4c920fa46e0 --- /dev/null +++ b/go/cmd/mysqlctld/docgen/main.go @@ -0,0 +1,37 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/internal/docgen" + "vitess.io/vitess/go/cmd/mysqlctld/cli" +) + +func main() { + var dir string + cmd := cobra.Command{ + Use: "docgen [-d ]", + RunE: func(cmd *cobra.Command, args []string) error { + return docgen.GenerateMarkdownTree(cli.Main, dir) + }, + } + + cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation") + _ = cmd.Execute() +} diff --git a/go/cmd/mysqlctld/mysqlctld.go b/go/cmd/mysqlctld/mysqlctld.go index 39b9ac11490..5843c5a15e1 100644 --- a/go/cmd/mysqlctld/mysqlctld.go +++ b/go/cmd/mysqlctld/mysqlctld.go @@ -20,140 +20,12 @@ limitations under the License. package main import ( - "context" - "os" - "time" - - "github.com/spf13/pflag" - - "vitess.io/vitess/go/acl" - "vitess.io/vitess/go/exit" - "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/cmd/mysqlctld/cli" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/logutil" - "vitess.io/vitess/go/vt/mysqlctl" - "vitess.io/vitess/go/vt/servenv" -) - -var ( - // mysqld is used by the rpc implementation plugin. - mysqld *mysqlctl.Mysqld - cnf *mysqlctl.Mycnf - - mysqlPort = 3306 - tabletUID = uint32(41983) - mysqlSocket string - - // mysqlctl init flags - waitTime = 5 * time.Minute - initDBSQLFile string ) -func init() { - servenv.RegisterDefaultFlags() - servenv.RegisterDefaultSocketFileFlags() - servenv.RegisterFlags() - servenv.RegisterGRPCServerFlags() - servenv.RegisterGRPCServerAuthFlags() - servenv.RegisterServiceMapFlag() - // mysqlctld only starts and stops mysql, only needs dba. - dbconfigs.RegisterFlags(dbconfigs.Dba) - servenv.OnParse(func(fs *pflag.FlagSet) { - fs.IntVar(&mysqlPort, "mysql_port", mysqlPort, "MySQL port") - fs.Uint32Var(&tabletUID, "tablet_uid", tabletUID, "Tablet UID") - fs.StringVar(&mysqlSocket, "mysql_socket", mysqlSocket, "Path to the mysqld socket file") - fs.DurationVar(&waitTime, "wait_time", waitTime, "How long to wait for mysqld startup or shutdown") - fs.StringVar(&initDBSQLFile, "init_db_sql_file", initDBSQLFile, "Path to .sql file to run after mysqld initialization") - - acl.RegisterFlags(fs) - }) -} - func main() { - defer exit.Recover() - defer logutil.Flush() - - servenv.ParseFlags("mysqlctld") - - // We'll register this OnTerm handler before mysqld starts, so we get notified - // if mysqld dies on its own without us (or our RPC client) telling it to. - mysqldTerminated := make(chan struct{}) - onTermFunc := func() { - close(mysqldTerminated) - } - - // Start or Init mysqld as needed. - ctx, cancel := context.WithTimeout(context.Background(), waitTime) - mycnfFile := mysqlctl.MycnfFile(tabletUID) - if _, statErr := os.Stat(mycnfFile); os.IsNotExist(statErr) { - // Generate my.cnf from scratch and use it to find mysqld. - log.Infof("mycnf file (%s) doesn't exist, initializing", mycnfFile) - - var err error - mysqld, cnf, err = mysqlctl.CreateMysqldAndMycnf(tabletUID, mysqlSocket, mysqlPort) - if err != nil { - log.Errorf("failed to initialize mysql config: %v", err) - exit.Return(1) - } - mysqld.OnTerm(onTermFunc) - - if err := mysqld.Init(ctx, cnf, initDBSQLFile); err != nil { - log.Errorf("failed to initialize mysql data dir and start mysqld: %v", err) - exit.Return(1) - } - } else { - // There ought to be an existing my.cnf, so use it to find mysqld. - log.Infof("mycnf file (%s) already exists, starting without init", mycnfFile) - - var err error - mysqld, cnf, err = mysqlctl.OpenMysqldAndMycnf(tabletUID) - if err != nil { - log.Errorf("failed to find mysql config: %v", err) - exit.Return(1) - } - mysqld.OnTerm(onTermFunc) - - err = mysqld.RefreshConfig(ctx, cnf) - if err != nil { - log.Errorf("failed to refresh config: %v", err) - exit.Return(1) - } - - // check if we were interrupted during a previous restore - if !mysqlctl.RestoreWasInterrupted(cnf) { - if err := mysqld.Start(ctx, cnf); err != nil { - log.Errorf("failed to start mysqld: %v", err) - exit.Return(1) - } - } else { - log.Infof("found interrupted restore, not starting mysqld") - } - } - cancel() - - servenv.Init() - defer servenv.Close() - - // Take mysqld down with us on SIGTERM before entering lame duck. - servenv.OnTermSync(func() { - log.Infof("mysqlctl received SIGTERM, shutting down mysqld first") - ctx := context.Background() - if err := mysqld.Shutdown(ctx, cnf, true); err != nil { - log.Errorf("failed to shutdown mysqld: %v", err) - } - }) - - // Start RPC server and wait for SIGTERM. - mysqlctldTerminated := make(chan struct{}) - go func() { - servenv.RunDefault() - close(mysqlctldTerminated) - }() - - select { - case <-mysqldTerminated: - log.Infof("mysqld shut down on its own, exiting mysqlctld") - case <-mysqlctldTerminated: - log.Infof("mysqlctld shut down gracefully") + if err := cli.Main.Execute(); err != nil { + log.Exit(err) } } diff --git a/go/cmd/query_analyzer/query_analyzer.go b/go/cmd/query_analyzer/query_analyzer.go deleted file mode 100644 index 2138bde2673..00000000000 --- a/go/cmd/query_analyzer/query_analyzer.go +++ /dev/null @@ -1,149 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "bufio" - "bytes" - "fmt" - "io" - "os" - "sort" - - "github.com/spf13/pflag" - - "vitess.io/vitess/go/acl" - "vitess.io/vitess/go/exit" - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/logutil" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/sqlparser" - - // Include deprecation warnings for soon-to-be-unsupported flag invocations. - _flag "vitess.io/vitess/go/internal/flag" -) - -var ( - ignores = [][]byte{ - []byte("#"), - []byte("/*"), - []byte("SET"), - []byte("use"), - []byte("BEGIN"), - []byte("COMMIT"), - []byte("ROLLBACK"), - } - bindIndex = 0 - queries = make(map[string]int) -) - -type stat struct { - Query string - Count int -} - -type stats []stat - -func (a stats) Len() int { return len(a) } -func (a stats) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a stats) Less(i, j int) bool { return a[i].Count > a[j].Count } - -func main() { - defer exit.Recover() - fs := pflag.NewFlagSet("query_analyzer", pflag.ExitOnError) - log.RegisterFlags(fs) - logutil.RegisterFlags(fs) - acl.RegisterFlags(fs) - servenv.RegisterMySQLServerFlags(fs) - _flag.Parse(fs) - logutil.PurgeLogs() - for _, filename := range _flag.Args() { - fmt.Printf("processing: %s\n", filename) - if err := processFile(filename); err != nil { - log.Errorf("processFile error: %v", err) - exit.Return(1) - } - } - var stats = make(stats, 0, 128) - for k, v := range queries { - stats = append(stats, stat{Query: k, Count: v}) - } - sort.Sort(stats) - for _, s := range stats { - fmt.Printf("%d: %s\n", s.Count, s.Query) - } -} - -func processFile(filename string) error { - f, err := os.Open(filename) - if err != nil { - return err - } - r := bufio.NewReader(f) - for { - line, err := r.ReadBytes('\n') - if err != nil { - if err == io.EOF { - break - } - return err - } - analyze(line) - } - return nil -} - -func analyze(line []byte) { - for _, ignore := range ignores { - if bytes.HasPrefix(line, ignore) { - return - } - } - dml := string(bytes.TrimRight(line, "\n")) - ast, err := sqlparser.Parse(dml) - if err != nil { - log.Errorf("Error parsing %s", dml) - return - } - bindIndex = 0 - buf := sqlparser.NewTrackedBuffer(formatWithBind) - buf.Myprintf("%v", ast) - addQuery(buf.ParsedQuery().Query) -} - -func formatWithBind(buf *sqlparser.TrackedBuffer, node sqlparser.SQLNode) { - v, ok := node.(*sqlparser.Literal) - if !ok { - node.Format(buf) - return - } - switch v.Type { - case sqlparser.StrVal, sqlparser.HexVal, sqlparser.IntVal: - buf.WriteArg(":", fmt.Sprintf("v%d", bindIndex)) - bindIndex++ - default: - node.Format(buf) - } -} - -func addQuery(query string) { - count, ok := queries[query] - if !ok { - count = 0 - } - queries[query] = count + 1 -} diff --git a/go/cmd/vtorc/plugin_consultopo.go b/go/cmd/topo2topo/cli/plugin_consultopo.go similarity index 98% rename from go/cmd/vtorc/plugin_consultopo.go rename to go/cmd/topo2topo/cli/plugin_consultopo.go index 59d6774fdbc..a128f294a42 100644 --- a/go/cmd/vtorc/plugin_consultopo.go +++ b/go/cmd/topo2topo/cli/plugin_consultopo.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports consultopo to register the consul implementation of TopoServer. diff --git a/go/cmd/vtorc/plugin_etcd2topo.go b/go/cmd/topo2topo/cli/plugin_etcd2topo.go similarity index 98% rename from go/cmd/vtorc/plugin_etcd2topo.go rename to go/cmd/topo2topo/cli/plugin_etcd2topo.go index d99ef51d4af..5a51923cf00 100644 --- a/go/cmd/vtorc/plugin_etcd2topo.go +++ b/go/cmd/topo2topo/cli/plugin_etcd2topo.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports etcd2topo to register the etcd2 implementation of TopoServer. diff --git a/go/cmd/topo2topo/plugin_zk2topo.go b/go/cmd/topo2topo/cli/plugin_zk2topo.go similarity index 98% rename from go/cmd/topo2topo/plugin_zk2topo.go rename to go/cmd/topo2topo/cli/plugin_zk2topo.go index 62dda455df7..66d14988c75 100644 --- a/go/cmd/topo2topo/plugin_zk2topo.go +++ b/go/cmd/topo2topo/cli/plugin_zk2topo.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( // Imports and register the zk2 TopologyServer diff --git a/go/cmd/topo2topo/cli/topo2topo.go b/go/cmd/topo2topo/cli/topo2topo.go new file mode 100644 index 00000000000..6e7e173872b --- /dev/null +++ b/go/cmd/topo2topo/cli/topo2topo.go @@ -0,0 +1,158 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import ( + "context" + "fmt" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/vt/grpccommon" + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/helpers" +) + +var ( + fromImplementation string + fromServerAddress string + fromRoot string + toImplementation string + toServerAddress string + toRoot string + compare bool + doKeyspaces bool + doShards bool + doShardReplications bool + doTablets bool + doRoutingRules bool + + Main = &cobra.Command{ + Use: "topo2topo", + Short: "topo2topo copies Vitess topology data from one topo server to another.", + Long: `topo2topo copies Vitess topology data from one topo server to another. +It can also be used to compare data between two topologies.`, + Args: cobra.NoArgs, + PreRunE: servenv.CobraPreRunE, + RunE: run, + } +) + +func init() { + servenv.MoveFlagsToCobraCommand(Main) + + Main.Flags().StringVar(&fromImplementation, "from_implementation", fromImplementation, "topology implementation to copy data from") + Main.Flags().StringVar(&fromServerAddress, "from_server", fromServerAddress, "topology server address to copy data from") + Main.Flags().StringVar(&fromRoot, "from_root", fromRoot, "topology server root to copy data from") + Main.Flags().StringVar(&toImplementation, "to_implementation", toImplementation, "topology implementation to copy data to") + Main.Flags().StringVar(&toServerAddress, "to_server", toServerAddress, "topology server address to copy data to") + Main.Flags().StringVar(&toRoot, "to_root", toRoot, "topology server root to copy data to") + Main.Flags().BoolVar(&compare, "compare", compare, "compares data between topologies") + Main.Flags().BoolVar(&doKeyspaces, "do-keyspaces", doKeyspaces, "copies the keyspace information") + Main.Flags().BoolVar(&doShards, "do-shards", doShards, "copies the shard information") + Main.Flags().BoolVar(&doShardReplications, "do-shard-replications", doShardReplications, "copies the shard replication information") + Main.Flags().BoolVar(&doTablets, "do-tablets", doTablets, "copies the tablet information") + Main.Flags().BoolVar(&doRoutingRules, "do-routing-rules", doRoutingRules, "copies the routing rules") + + acl.RegisterFlags(Main.Flags()) + grpccommon.RegisterFlags(Main.Flags()) +} + +func run(cmd *cobra.Command, args []string) error { + defer logutil.Flush() + servenv.Init() + + fromTS, err := topo.OpenServer(fromImplementation, fromServerAddress, fromRoot) + if err != nil { + return fmt.Errorf("Cannot open 'from' topo %v: %w", fromImplementation, err) + } + toTS, err := topo.OpenServer(toImplementation, toServerAddress, toRoot) + if err != nil { + return fmt.Errorf("Cannot open 'to' topo %v: %w", toImplementation, err) + } + + ctx := context.Background() + + if compare { + return compareTopos(ctx, fromTS, toTS) + } + + return copyTopos(ctx, fromTS, toTS) +} + +func copyTopos(ctx context.Context, fromTS, toTS *topo.Server) error { + if doKeyspaces { + if err := helpers.CopyKeyspaces(ctx, fromTS, toTS); err != nil { + return err + } + } + if doShards { + if err := helpers.CopyShards(ctx, fromTS, toTS); err != nil { + return err + } + } + if doShardReplications { + if err := helpers.CopyShardReplications(ctx, fromTS, toTS); err != nil { + return err + } + } + if doTablets { + if err := helpers.CopyTablets(ctx, fromTS, toTS); err != nil { + return err + } + } + if doRoutingRules { + if err := helpers.CopyRoutingRules(ctx, fromTS, toTS); err != nil { + return err + } + } + + return nil +} + +func compareTopos(ctx context.Context, fromTS, toTS *topo.Server) (err error) { + if doKeyspaces { + err = helpers.CompareKeyspaces(ctx, fromTS, toTS) + if err != nil { + return fmt.Errorf("Compare keyspaces failed: %w", err) + } + } + if doShards { + err = helpers.CompareShards(ctx, fromTS, toTS) + if err != nil { + return fmt.Errorf("Compare shards failed: %w", err) + } + } + if doShardReplications { + err = helpers.CompareShardReplications(ctx, fromTS, toTS) + if err != nil { + return fmt.Errorf("Compare shard replications failed: %w", err) + } + } + if doTablets { + err = helpers.CompareTablets(ctx, fromTS, toTS) + if err != nil { + return fmt.Errorf("Compare tablets failed: %w", err) + } + } + + fmt.Println("Topologies are in sync") + return nil +} diff --git a/go/cmd/topo2topo/docgen/main.go b/go/cmd/topo2topo/docgen/main.go new file mode 100644 index 00000000000..c1d29fff086 --- /dev/null +++ b/go/cmd/topo2topo/docgen/main.go @@ -0,0 +1,37 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/internal/docgen" + "vitess.io/vitess/go/cmd/topo2topo/cli" +) + +func main() { + var dir string + cmd := cobra.Command{ + Use: "docgen [-d ]", + RunE: func(cmd *cobra.Command, args []string) error { + return docgen.GenerateMarkdownTree(cli.Main, dir) + }, + } + + cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation") + _ = cmd.Execute() +} diff --git a/go/cmd/topo2topo/plugin_kubernetestopo.go b/go/cmd/topo2topo/plugin_kubernetestopo.go deleted file mode 100644 index 671d0c8321f..00000000000 --- a/go/cmd/topo2topo/plugin_kubernetestopo.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -// This plugin imports k8stopo to register the kubernetes implementation of TopoServer. - -import ( - _ "vitess.io/vitess/go/vt/topo/k8stopo" -) diff --git a/go/cmd/topo2topo/topo2topo.go b/go/cmd/topo2topo/topo2topo.go index 157960548b8..c1276ebf504 100644 --- a/go/cmd/topo2topo/topo2topo.go +++ b/go/cmd/topo2topo/topo2topo.go @@ -17,132 +17,15 @@ limitations under the License. package main import ( - "context" - "fmt" - "os" - - "github.com/spf13/pflag" - - "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/cmd/topo2topo/cli" "vitess.io/vitess/go/exit" - "vitess.io/vitess/go/vt/grpccommon" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/logutil" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/topo/helpers" -) - -var ( - fromImplementation string - fromServerAddress string - fromRoot string - toImplementation string - toServerAddress string - toRoot string - compare bool - doKeyspaces bool - doShards bool - doShardReplications bool - doTablets bool - doRoutingRules bool ) -func init() { - servenv.OnParse(func(fs *pflag.FlagSet) { - fs.StringVar(&fromImplementation, "from_implementation", fromImplementation, "topology implementation to copy data from") - fs.StringVar(&fromServerAddress, "from_server", fromServerAddress, "topology server address to copy data from") - fs.StringVar(&fromRoot, "from_root", fromRoot, "topology server root to copy data from") - fs.StringVar(&toImplementation, "to_implementation", toImplementation, "topology implementation to copy data to") - fs.StringVar(&toServerAddress, "to_server", toServerAddress, "topology server address to copy data to") - fs.StringVar(&toRoot, "to_root", toRoot, "topology server root to copy data to") - fs.BoolVar(&compare, "compare", compare, "compares data between topologies") - fs.BoolVar(&doKeyspaces, "do-keyspaces", doKeyspaces, "copies the keyspace information") - fs.BoolVar(&doShards, "do-shards", doShards, "copies the shard information") - fs.BoolVar(&doShardReplications, "do-shard-replications", doShardReplications, "copies the shard replication information") - fs.BoolVar(&doTablets, "do-tablets", doTablets, "copies the tablet information") - fs.BoolVar(&doRoutingRules, "do-routing-rules", doRoutingRules, "copies the routing rules") - - acl.RegisterFlags(fs) - }) -} - func main() { defer exit.RecoverAll() - defer logutil.Flush() - fs := pflag.NewFlagSet("topo2topo", pflag.ExitOnError) - grpccommon.RegisterFlags(fs) - log.RegisterFlags(fs) - logutil.RegisterFlags(fs) - - servenv.ParseFlags("topo2topo") - servenv.Init() - - fromTS, err := topo.OpenServer(fromImplementation, fromServerAddress, fromRoot) - if err != nil { - log.Exitf("Cannot open 'from' topo %v: %v", fromImplementation, err) - } - toTS, err := topo.OpenServer(toImplementation, toServerAddress, toRoot) - if err != nil { - log.Exitf("Cannot open 'to' topo %v: %v", toImplementation, err) - } - - ctx := context.Background() - - if compare { - compareTopos(ctx, fromTS, toTS) - return - } - copyTopos(ctx, fromTS, toTS) -} - -func copyTopos(ctx context.Context, fromTS, toTS *topo.Server) { - if doKeyspaces { - helpers.CopyKeyspaces(ctx, fromTS, toTS) - } - if doShards { - helpers.CopyShards(ctx, fromTS, toTS) - } - if doShardReplications { - helpers.CopyShardReplications(ctx, fromTS, toTS) - } - if doTablets { - helpers.CopyTablets(ctx, fromTS, toTS) - } - if doRoutingRules { - helpers.CopyRoutingRules(ctx, fromTS, toTS) - } -} - -func compareTopos(ctx context.Context, fromTS, toTS *topo.Server) { - var err error - if doKeyspaces { - err = helpers.CompareKeyspaces(ctx, fromTS, toTS) - if err != nil { - log.Exitf("Compare keyspaces failed: %v", err) - } - } - if doShards { - err = helpers.CompareShards(ctx, fromTS, toTS) - if err != nil { - log.Exitf("Compare shards failed: %v", err) - } - } - if doShardReplications { - err = helpers.CompareShardReplications(ctx, fromTS, toTS) - if err != nil { - log.Exitf("Compare shard replications failed: %v", err) - } - } - if doTablets { - err = helpers.CompareTablets(ctx, fromTS, toTS) - if err != nil { - log.Exitf("Compare tablets failed: %v", err) - } - } - if err == nil { - fmt.Println("Topologies are in sync") - os.Exit(0) + if err := cli.Main.Execute(); err != nil { + log.Exitf("%s", err) } } diff --git a/go/cmd/vtaclcheck/cli/vtactlcheck.go b/go/cmd/vtaclcheck/cli/vtactlcheck.go new file mode 100644 index 00000000000..ebac94131e8 --- /dev/null +++ b/go/cmd/vtaclcheck/cli/vtactlcheck.go @@ -0,0 +1,67 @@ +/* +Copyright 2023 The Vitess Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/vtaclcheck" +) + +var ( + aclFile string + staticAuthFile string + + Main = &cobra.Command{ + Use: "vtaclcheck", + Short: "vtaclcheck checks that the access-control list (ACL) rules in a given file are valid.", + Args: cobra.NoArgs, + Version: servenv.AppVersion.String(), + PreRunE: servenv.CobraPreRunE, + PostRun: func(cmd *cobra.Command, args []string) { + logutil.Flush() + }, + RunE: run, + } +) + +func run(cmd *cobra.Command, args []string) error { + servenv.Init() + + opts := &vtaclcheck.Options{ + ACLFile: aclFile, + StaticAuthFile: staticAuthFile, + } + + if err := vtaclcheck.Init(opts); err != nil { + return err + } + + return vtaclcheck.Run() +} + +func init() { + servenv.MoveFlagsToCobraCommand(Main) + + Main.Flags().StringVar(&aclFile, "acl-file", aclFile, "The path of the JSON ACL file to check") + Main.Flags().StringVar(&staticAuthFile, "static-auth-file", staticAuthFile, "The path of the auth_server_static JSON file to check") + + acl.RegisterFlags(Main.Flags()) +} diff --git a/go/cmd/vtaclcheck/docgen/main.go b/go/cmd/vtaclcheck/docgen/main.go new file mode 100644 index 00000000000..d3da8b76179 --- /dev/null +++ b/go/cmd/vtaclcheck/docgen/main.go @@ -0,0 +1,37 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/internal/docgen" + "vitess.io/vitess/go/cmd/vtaclcheck/cli" +) + +func main() { + var dir string + cmd := cobra.Command{ + Use: "docgen [-d ]", + RunE: func(cmd *cobra.Command, args []string) error { + return docgen.GenerateMarkdownTree(cli.Main, dir) + }, + } + + cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation") + _ = cmd.Execute() +} diff --git a/go/cmd/vtaclcheck/vtaclcheck.go b/go/cmd/vtaclcheck/vtaclcheck.go index 8b916a8cc0c..bec4cf95fe9 100644 --- a/go/cmd/vtaclcheck/vtaclcheck.go +++ b/go/cmd/vtaclcheck/vtaclcheck.go @@ -19,52 +19,21 @@ package main import ( "fmt" - "github.com/spf13/pflag" - - "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/cmd/vtaclcheck/cli" "vitess.io/vitess/go/exit" "vitess.io/vitess/go/vt/logutil" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/vtaclcheck" ) -var aclFile, staticAuthFile string - func init() { logger := logutil.NewConsoleLogger() - servenv.OnParse(func(fs *pflag.FlagSet) { - fs.StringVar(&aclFile, "acl-file", aclFile, "The path of the JSON ACL file to check") - fs.StringVar(&staticAuthFile, "static-auth-file", staticAuthFile, "The path of the auth_server_static JSON file to check") - - acl.RegisterFlags(fs) - - fs.SetOutput(logutil.NewLoggerWriter(logger)) - }) + cli.Main.SetOutput(logutil.NewLoggerWriter(logger)) } func main() { defer exit.RecoverAll() - defer logutil.Flush() - - servenv.ParseFlags("vtaclcheck") - servenv.Init() - err := run() - if err != nil { + if err := cli.Main.Execute(); err != nil { fmt.Printf("ERROR: %s\n", err) exit.Return(1) } } - -func run() error { - opts := &vtaclcheck.Options{ - ACLFile: aclFile, - StaticAuthFile: staticAuthFile, - } - - if err := vtaclcheck.Init(opts); err != nil { - return err - } - - return vtaclcheck.Run() -} diff --git a/go/cmd/vtctld/plugin_azblobbackupstorage.go b/go/cmd/vtbackup/cli/plugin_azblobbackupstorage.go similarity index 97% rename from go/cmd/vtctld/plugin_azblobbackupstorage.go rename to go/cmd/vtbackup/cli/plugin_azblobbackupstorage.go index a4ca64096a9..bdadc894aae 100644 --- a/go/cmd/vtctld/plugin_azblobbackupstorage.go +++ b/go/cmd/vtbackup/cli/plugin_azblobbackupstorage.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( _ "vitess.io/vitess/go/vt/mysqlctl/azblobbackupstorage" diff --git a/go/cmd/vtbackup/plugin_cephbackupstorage.go b/go/cmd/vtbackup/cli/plugin_cephbackupstorage.go similarity index 97% rename from go/cmd/vtbackup/plugin_cephbackupstorage.go rename to go/cmd/vtbackup/cli/plugin_cephbackupstorage.go index 819cb108126..2f5a825f270 100644 --- a/go/cmd/vtbackup/plugin_cephbackupstorage.go +++ b/go/cmd/vtbackup/cli/plugin_cephbackupstorage.go @@ -13,7 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( _ "vitess.io/vitess/go/vt/mysqlctl/cephbackupstorage" diff --git a/go/cmd/vtbackup/plugin_consultopo.go b/go/cmd/vtbackup/cli/plugin_consultopo.go similarity index 97% rename from go/cmd/vtbackup/plugin_consultopo.go rename to go/cmd/vtbackup/cli/plugin_consultopo.go index 2b6f10e2b28..c2f8de3339e 100644 --- a/go/cmd/vtbackup/plugin_consultopo.go +++ b/go/cmd/vtbackup/cli/plugin_consultopo.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( _ "vitess.io/vitess/go/vt/topo/consultopo" diff --git a/go/cmd/vtbackup/plugin_etcd2topo.go b/go/cmd/vtbackup/cli/plugin_etcd2topo.go similarity index 97% rename from go/cmd/vtbackup/plugin_etcd2topo.go rename to go/cmd/vtbackup/cli/plugin_etcd2topo.go index 97412e65755..e4d6d4129ff 100644 --- a/go/cmd/vtbackup/plugin_etcd2topo.go +++ b/go/cmd/vtbackup/cli/plugin_etcd2topo.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( _ "vitess.io/vitess/go/vt/topo/etcd2topo" diff --git a/go/cmd/vtbackup/plugin_filebackupstorage.go b/go/cmd/vtbackup/cli/plugin_filebackupstorage.go similarity index 97% rename from go/cmd/vtbackup/plugin_filebackupstorage.go rename to go/cmd/vtbackup/cli/plugin_filebackupstorage.go index 31417781026..68bf790c827 100644 --- a/go/cmd/vtbackup/plugin_filebackupstorage.go +++ b/go/cmd/vtbackup/cli/plugin_filebackupstorage.go @@ -13,7 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( _ "vitess.io/vitess/go/vt/mysqlctl/filebackupstorage" diff --git a/go/cmd/vtbackup/plugin_gcsbackupstorage.go b/go/cmd/vtbackup/cli/plugin_gcsbackupstorage.go similarity index 97% rename from go/cmd/vtbackup/plugin_gcsbackupstorage.go rename to go/cmd/vtbackup/cli/plugin_gcsbackupstorage.go index 2319d0aa7fe..eff9339a318 100644 --- a/go/cmd/vtbackup/plugin_gcsbackupstorage.go +++ b/go/cmd/vtbackup/cli/plugin_gcsbackupstorage.go @@ -13,7 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( _ "vitess.io/vitess/go/vt/mysqlctl/gcsbackupstorage" diff --git a/go/vt/vtgr/controller/controller.go b/go/cmd/vtbackup/cli/plugin_opentsdb.go similarity index 74% rename from go/vt/vtgr/controller/controller.go rename to go/cmd/vtbackup/cli/plugin_opentsdb.go index 2b2c36cd320..597e426cc09 100644 --- a/go/vt/vtgr/controller/controller.go +++ b/go/cmd/vtbackup/cli/plugin_opentsdb.go @@ -1,5 +1,5 @@ /* -Copyright 2021 The Vitess Authors. +Copyright 2023 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,13 +14,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -package controller +package cli -import ( - "math/rand" - "time" -) +import "vitess.io/vitess/go/stats/opentsdb" + +// This plugin imports opentsdb to register the opentsdb stats backend. func init() { - rand.Seed(time.Now().UnixNano()) + opentsdb.Init("vtbackup") } diff --git a/go/cmd/vtbackup/plugin_prometheusbackend.go b/go/cmd/vtbackup/cli/plugin_prometheusbackend.go similarity index 98% rename from go/cmd/vtbackup/plugin_prometheusbackend.go rename to go/cmd/vtbackup/cli/plugin_prometheusbackend.go index de4ecbb5e9f..3cf256e76c1 100644 --- a/go/cmd/vtbackup/plugin_prometheusbackend.go +++ b/go/cmd/vtbackup/cli/plugin_prometheusbackend.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports Prometheus to allow for instrumentation // with the Prometheus client library diff --git a/go/cmd/vtbackup/plugin_s3backupstorage.go b/go/cmd/vtbackup/cli/plugin_s3backupstorage.go similarity index 97% rename from go/cmd/vtbackup/plugin_s3backupstorage.go rename to go/cmd/vtbackup/cli/plugin_s3backupstorage.go index 917352f2469..27b4ef06dee 100644 --- a/go/cmd/vtbackup/plugin_s3backupstorage.go +++ b/go/cmd/vtbackup/cli/plugin_s3backupstorage.go @@ -13,7 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( _ "vitess.io/vitess/go/vt/mysqlctl/s3backupstorage" diff --git a/go/cmd/vtbackup/plugin_zk2topo.go b/go/cmd/vtbackup/cli/plugin_zk2topo.go similarity index 97% rename from go/cmd/vtbackup/plugin_zk2topo.go rename to go/cmd/vtbackup/cli/plugin_zk2topo.go index 5819d2d39ed..914a9b924f9 100644 --- a/go/cmd/vtbackup/plugin_zk2topo.go +++ b/go/cmd/vtbackup/cli/plugin_zk2topo.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( _ "vitess.io/vitess/go/vt/topo/zk2topo" diff --git a/go/cmd/vtbackup/cli/vtbackup.go b/go/cmd/vtbackup/cli/vtbackup.go new file mode 100644 index 00000000000..121ba39b8c5 --- /dev/null +++ b/go/cmd/vtbackup/cli/vtbackup.go @@ -0,0 +1,875 @@ +/* +Copyright 2023 The Vitess Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import ( + "context" + "crypto/rand" + "fmt" + "math" + "math/big" + "os" + "strings" + "syscall" + "time" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/mysql/replication" + + "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/cmd" + "vitess.io/vitess/go/exit" + "vitess.io/vitess/go/stats" + "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/mysqlctl/backupstats" + "vitess.io/vitess/go/vt/mysqlctl/backupstorage" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vterrors" + _ "vitess.io/vitess/go/vt/vttablet/grpctmclient" + "vitess.io/vitess/go/vt/vttablet/tmclient" +) + +const ( + // operationTimeout is the timeout for individual operations like fetching + // the primary position. This does not impose an overall timeout on + // long-running processes like taking the backup. It only applies to + // steps along the way that should complete quickly. This ensures we don't + // place a hard cap on the overall time for a backup, while also not waiting + // forever for things that should be quick. + operationTimeout = 1 * time.Minute + + phaseNameCatchupReplication = "CatchupReplication" + phaseNameInitialBackup = "InitialBackup" + phaseNameRestoreLastBackup = "RestoreLastBackup" + phaseNameTakeNewBackup = "TakeNewBackup" + phaseStatusCatchupReplicationStalled = "Stalled" + phaseStatusCatchupReplicationStopped = "Stopped" +) + +var ( + minBackupInterval time.Duration + minRetentionTime time.Duration + minRetentionCount = 1 + initialBackup bool + allowFirstBackup bool + restartBeforeBackup bool + upgradeSafe bool + + // vttablet-like flags + initDbNameOverride string + initKeyspace string + initShard string + concurrency = 4 + incrementalFromPos string + + // mysqlctld-like flags + mysqlPort = 3306 + mysqlSocket string + mysqlTimeout = 5 * time.Minute + initDBSQLFile string + detachedMode bool + keepAliveTimeout time.Duration + disableRedoLog bool + + // Deprecated, use "Phase" instead. + deprecatedDurationByPhase = stats.NewGaugesWithSingleLabel( + "DurationByPhaseSeconds", + "[DEPRECATED] How long it took vtbackup to perform each phase (in seconds).", + "phase", + ) + + // This gauge is updated 3*N times during the course of a vtbackup run, + // where N is the number of different phases vtbackup transitions through. + // Once to initialize to 0, another time to set the phase to active (1), + // and another to deactivate the phase (back to 0). + // + // At most a single phase is active at a given time. + // + // The sync gauge immediately reports changes to push-backed backends. + // The benefit of the sync gauge is that it makes verifying stats in + // integration tests a lot more tractable. + phase = stats.NewSyncGaugesWithSingleLabel( + "Phase", + "Active phase.", + "phase", + ) + phaseNames = []string{ + phaseNameCatchupReplication, + phaseNameInitialBackup, + phaseNameRestoreLastBackup, + phaseNameTakeNewBackup, + } + phaseStatus = stats.NewGaugesWithMultiLabels( + "PhaseStatus", + "Internal state of vtbackup phase.", + []string{"phase", "status"}, + ) + phaseStatuses = map[string][]string{ + phaseNameCatchupReplication: { + phaseStatusCatchupReplicationStalled, + phaseStatusCatchupReplicationStopped, + }, + } + + Main = &cobra.Command{ + Use: "vtbackup", + Short: "vtbackup is a batch command to perform a single pass of backup maintenance for a shard.", + Long: `vtbackup is a batch command to perform a single pass of backup maintenance for a shard. + +When run periodically for each shard, vtbackup can ensure these configurable policies: + * There is always a recent backup for the shard. + * Old backups for the shard are removed. + +Whatever system launches vtbackup is responsible for the following: + - Running vtbackup with similar flags that would be used for a vttablet and + mysqlctld in the target shard to be backed up. + - Provisioning as much disk space for vtbackup as would be given to vttablet. + The data directory MUST be empty at startup. Do NOT reuse a persistent disk. + - Running vtbackup periodically for each shard, for each backup storage location. + - Ensuring that at most one instance runs at a time for a given pair of shard + and backup storage location. + - Retrying vtbackup if it fails. + - Alerting human operators if the failure is persistent. + +The process vtbackup follows to take a new backup has the following steps: + 1. Restore from the most recent backup. + 2. Start a mysqld instance (but no vttablet) from the restored data. + 3. Instruct mysqld to connect to the current shard primary and replicate any + transactions that are new since the last backup. + 4. Ask the primary for its current replication position and set that as the goal + for catching up on replication before taking the backup, so the goalposts + don't move. + 5. Wait until replication is caught up to the goal position or beyond. + 6. Stop mysqld and take a new backup. + +Aside from additional replication load while vtbackup's mysqld catches up on +new transactions, the shard should be otherwise unaffected. Existing tablets +will continue to serve, and no new tablets will appear in topology, meaning no +query traffic will ever be routed to vtbackup's mysqld. This silent operation +mode helps make backups minimally disruptive to serving capacity and orthogonal +to the handling of the query path. + +The command-line parameters to vtbackup specify a policy for when a new backup +is needed, and when old backups should be removed. If the existing backups +already satisfy the policy, then vtbackup will do nothing and return success +immediately.`, + Version: servenv.AppVersion.String(), + Args: cobra.NoArgs, + PreRunE: servenv.CobraPreRunE, + RunE: run, + } +) + +func init() { + servenv.RegisterDefaultFlags() + dbconfigs.RegisterFlags(dbconfigs.All...) + mysqlctl.RegisterFlags() + + servenv.MoveFlagsToCobraCommand(Main) + + Main.Flags().DurationVar(&minBackupInterval, "min_backup_interval", minBackupInterval, "Only take a new backup if it's been at least this long since the most recent backup.") + Main.Flags().DurationVar(&minRetentionTime, "min_retention_time", minRetentionTime, "Keep each old backup for at least this long before removing it. Set to 0 to disable pruning of old backups.") + Main.Flags().IntVar(&minRetentionCount, "min_retention_count", minRetentionCount, "Always keep at least this many of the most recent backups in this backup storage location, even if some are older than the min_retention_time. This must be at least 1 since a backup must always exist to allow new backups to be made") + Main.Flags().BoolVar(&initialBackup, "initial_backup", initialBackup, "Instead of restoring from backup, initialize an empty database with the provided init_db_sql_file and upload a backup of that for the shard, if the shard has no backups yet. This can be used to seed a brand new shard with an initial, empty backup. If any backups already exist for the shard, this will be considered a successful no-op. This can only be done before the shard exists in topology (i.e. before any tablets are deployed).") + Main.Flags().BoolVar(&allowFirstBackup, "allow_first_backup", allowFirstBackup, "Allow this job to take the first backup of an existing shard.") + Main.Flags().BoolVar(&restartBeforeBackup, "restart_before_backup", restartBeforeBackup, "Perform a mysqld clean/full restart after applying binlogs, but before taking the backup. Only makes sense to work around xtrabackup bugs.") + Main.Flags().BoolVar(&upgradeSafe, "upgrade-safe", upgradeSafe, "Whether to use innodb_fast_shutdown=0 for the backup so it is safe to use for MySQL upgrades.") + + // vttablet-like flags + Main.Flags().StringVar(&initDbNameOverride, "init_db_name_override", initDbNameOverride, "(init parameter) override the name of the db used by vttablet") + Main.Flags().StringVar(&initKeyspace, "init_keyspace", initKeyspace, "(init parameter) keyspace to use for this tablet") + Main.Flags().StringVar(&initShard, "init_shard", initShard, "(init parameter) shard to use for this tablet") + Main.Flags().IntVar(&concurrency, "concurrency", concurrency, "(init restore parameter) how many concurrent files to restore at once") + Main.Flags().StringVar(&incrementalFromPos, "incremental_from_pos", incrementalFromPos, "Position of previous backup. Default: empty. If given, then this backup becomes an incremental backup from given position. If value is 'auto', backup taken from last successful backup position") + + // mysqlctld-like flags + Main.Flags().IntVar(&mysqlPort, "mysql_port", mysqlPort, "mysql port") + Main.Flags().StringVar(&mysqlSocket, "mysql_socket", mysqlSocket, "path to the mysql socket") + Main.Flags().DurationVar(&mysqlTimeout, "mysql_timeout", mysqlTimeout, "how long to wait for mysqld startup") + Main.Flags().StringVar(&initDBSQLFile, "init_db_sql_file", initDBSQLFile, "path to .sql file to run after mysql_install_db") + Main.Flags().BoolVar(&detachedMode, "detach", detachedMode, "detached mode - run backups detached from the terminal") + Main.Flags().DurationVar(&keepAliveTimeout, "keep-alive-timeout", keepAliveTimeout, "Wait until timeout elapses after a successful backup before shutting down.") + Main.Flags().BoolVar(&disableRedoLog, "disable-redo-log", disableRedoLog, "Disable InnoDB redo log during replication-from-primary phase of backup.") + + acl.RegisterFlags(Main.Flags()) +} + +func run(_ *cobra.Command, args []string) error { + servenv.Init() + + ctx, cancel := context.WithCancel(context.Background()) + servenv.OnClose(func() { + cancel() + }) + + defer func() { + servenv.ExitChan <- syscall.SIGTERM + <-ctx.Done() + }() + + go servenv.RunDefault() + // Some stats plugins use OnRun to initialize. Wait for them to finish + // initializing before continuing, so we don't lose any stats. + if err := stats.AwaitBackend(ctx); err != nil { + return fmt.Errorf("failed to await stats backend: %w", err) + } + + if detachedMode { + // this method will call os.Exit and kill this process + cmd.DetachFromTerminalAndExit() + } + + defer logutil.Flush() + + if minRetentionCount < 1 { + log.Errorf("min_retention_count must be at least 1 to allow restores to succeed") + exit.Return(1) + } + + // Open connection backup storage. + backupStorage, err := backupstorage.GetBackupStorage() + if err != nil { + return fmt.Errorf("Can't get backup storage: %w", err) + } + defer backupStorage.Close() + // Open connection to topology server. + topoServer := topo.Open() + defer topoServer.Close() + + // Initialize stats. + for _, phaseName := range phaseNames { + phase.Set(phaseName, int64(0)) + } + for phaseName, statuses := range phaseStatuses { + for _, status := range statuses { + phaseStatus.Set([]string{phaseName, status}, 0) + } + } + + // Try to take a backup, if it's been long enough since the last one. + // Skip pruning if backup wasn't fully successful. We don't want to be + // deleting things if the backup process is not healthy. + backupDir := mysqlctl.GetBackupDir(initKeyspace, initShard) + doBackup, err := shouldBackup(ctx, topoServer, backupStorage, backupDir) + if err != nil { + return fmt.Errorf("Can't take backup: %w", err) + } + if doBackup { + if err := takeBackup(ctx, topoServer, backupStorage); err != nil { + return fmt.Errorf("Failed to take backup: %w", err) + } + } + + // Prune old backups. + if err := pruneBackups(ctx, backupStorage, backupDir); err != nil { + return fmt.Errorf("Couldn't prune old backups: %w", err) + } + + if keepAliveTimeout > 0 { + log.Infof("Backup was successful, waiting %s before exiting (or until context expires).", keepAliveTimeout) + select { + case <-time.After(keepAliveTimeout): + case <-ctx.Done(): + } + } + log.Info("Exiting.") + + return nil +} + +func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage backupstorage.BackupStorage) error { + // This is an imaginary tablet alias. The value doesn't matter for anything, + // except that we generate a random UID to ensure the target backup + // directory is unique if multiple vtbackup instances are launched for the + // same shard, at exactly the same second, pointed at the same backup + // storage location. + bigN, err := rand.Int(rand.Reader, big.NewInt(math.MaxUint32)) + if err != nil { + return fmt.Errorf("can't generate random tablet UID: %v", err) + } + tabletAlias := &topodatapb.TabletAlias{ + Cell: "vtbackup", + Uid: uint32(bigN.Uint64()), + } + + // Clean up our temporary data dir if we exit for any reason, to make sure + // every invocation of vtbackup starts with a clean slate, and it does not + // accumulate garbage (and run out of disk space) if it's restarted. + tabletDir := mysqlctl.TabletDir(tabletAlias.Uid) + defer func() { + log.Infof("Removing temporary tablet directory: %v", tabletDir) + if err := os.RemoveAll(tabletDir); err != nil { + log.Warningf("Failed to remove temporary tablet directory: %v", err) + } + }() + + // Start up mysqld as if we are mysqlctld provisioning a fresh tablet. + mysqld, mycnf, err := mysqlctl.CreateMysqldAndMycnf(tabletAlias.Uid, mysqlSocket, mysqlPort) + if err != nil { + return fmt.Errorf("failed to initialize mysql config: %v", err) + } + initCtx, initCancel := context.WithTimeout(ctx, mysqlTimeout) + defer initCancel() + initMysqldAt := time.Now() + if err := mysqld.Init(initCtx, mycnf, initDBSQLFile); err != nil { + return fmt.Errorf("failed to initialize mysql data dir and start mysqld: %v", err) + } + deprecatedDurationByPhase.Set("InitMySQLd", int64(time.Since(initMysqldAt).Seconds())) + // Shut down mysqld when we're done. + defer func() { + // Be careful not to use the original context, because we don't want to + // skip shutdown just because we timed out waiting for other things. + mysqlShutdownCtx, mysqlShutdownCancel := context.WithTimeout(context.Background(), 30*time.Second) + defer mysqlShutdownCancel() + if err := mysqld.Shutdown(mysqlShutdownCtx, mycnf, false); err != nil { + log.Errorf("failed to shutdown mysqld: %v", err) + } + }() + + extraEnv := map[string]string{ + "TABLET_ALIAS": topoproto.TabletAliasString(tabletAlias), + } + dbName := initDbNameOverride + if dbName == "" { + dbName = fmt.Sprintf("vt_%s", initKeyspace) + } + + backupParams := mysqlctl.BackupParams{ + Cnf: mycnf, + Mysqld: mysqld, + Logger: logutil.NewConsoleLogger(), + Concurrency: concurrency, + IncrementalFromPos: incrementalFromPos, + HookExtraEnv: extraEnv, + TopoServer: topoServer, + Keyspace: initKeyspace, + Shard: initShard, + TabletAlias: topoproto.TabletAliasString(tabletAlias), + Stats: backupstats.BackupStats(), + UpgradeSafe: upgradeSafe, + } + // In initial_backup mode, just take a backup of this empty database. + if initialBackup { + // Take a backup of this empty DB without restoring anything. + // First, initialize it the way InitShardPrimary would, so this backup + // produces a result that can be used to skip InitShardPrimary entirely. + // This involves resetting replication (to erase any history) and then + // creating the main database and some Vitess system tables. + if err := mysqld.ResetReplication(ctx); err != nil { + return fmt.Errorf("can't reset replication: %v", err) + } + // We need to switch off super_read_only before we create the database. + resetFunc, err := mysqld.SetSuperReadOnly(false) + if err != nil { + return fmt.Errorf("failed to disable super_read_only during backup: %v", err) + } + if resetFunc != nil { + defer func() { + err := resetFunc() + if err != nil { + log.Error("Failed to set super_read_only back to its original value during backup") + } + }() + } + cmd := mysqlctl.GenerateInitialBinlogEntry() + if err := mysqld.ExecuteSuperQueryList(ctx, []string{cmd}); err != nil { + return err + } + + backupParams.BackupTime = time.Now() + // Now we're ready to take the backup. + phase.Set(phaseNameInitialBackup, int64(1)) + defer phase.Set(phaseNameInitialBackup, int64(0)) + if err := mysqlctl.Backup(ctx, backupParams); err != nil { + return fmt.Errorf("backup failed: %v", err) + } + deprecatedDurationByPhase.Set("InitialBackup", int64(time.Since(backupParams.BackupTime).Seconds())) + log.Info("Initial backup successful.") + phase.Set(phaseNameInitialBackup, int64(0)) + return nil + } + + phase.Set(phaseNameRestoreLastBackup, int64(1)) + defer phase.Set(phaseNameRestoreLastBackup, int64(0)) + backupDir := mysqlctl.GetBackupDir(initKeyspace, initShard) + log.Infof("Restoring latest backup from directory %v", backupDir) + restoreAt := time.Now() + params := mysqlctl.RestoreParams{ + Cnf: mycnf, + Mysqld: mysqld, + Logger: logutil.NewConsoleLogger(), + Concurrency: concurrency, + HookExtraEnv: extraEnv, + DeleteBeforeRestore: true, + DbName: dbName, + Keyspace: initKeyspace, + Shard: initShard, + Stats: backupstats.RestoreStats(), + } + backupManifest, err := mysqlctl.Restore(ctx, params) + var restorePos replication.Position + switch err { + case nil: + // if err is nil, we expect backupManifest to be non-nil + restorePos = backupManifest.Position + log.Infof("Successfully restored from backup at replication position %v", restorePos) + case mysqlctl.ErrNoBackup: + // There is no backup found, but we may be taking the initial backup of a shard + if !allowFirstBackup { + return fmt.Errorf("no backup found; not starting up empty since --initial_backup flag was not enabled") + } + restorePos = replication.Position{} + default: + return fmt.Errorf("can't restore from backup: %v", err) + } + deprecatedDurationByPhase.Set("RestoreLastBackup", int64(time.Since(restoreAt).Seconds())) + phase.Set(phaseNameRestoreLastBackup, int64(0)) + + // As of MySQL 8.0.21, you can disable redo logging using the ALTER INSTANCE + // DISABLE INNODB REDO_LOG statement. This functionality is intended for + // loading data into a new MySQL instance. Disabling redo logging speeds up + // data loading by avoiding redo log writes and doublewrite buffering. + disabledRedoLog := false + if disableRedoLog { + if err := mysqld.DisableRedoLog(ctx); err != nil { + log.Warningf("Error disabling redo logging: %v", err) + } else { + disabledRedoLog = true + } + } + + // We have restored a backup. Now start replication. + if err := resetReplication(ctx, restorePos, mysqld); err != nil { + return fmt.Errorf("error resetting replication: %v", err) + } + if err := startReplication(ctx, mysqld, topoServer); err != nil { + return fmt.Errorf("error starting replication: %v", err) + } + + log.Info("get the current primary replication position, and wait until we catch up") + // Get the current primary replication position, and wait until we catch up + // to that point. We do this instead of looking at ReplicationLag + // because that value can + // sometimes lie and tell you there's 0 lag when actually replication is + // stopped. Also, if replication is making progress but is too slow to ever + // catch up to live changes, we'd rather take a backup of something rather + // than timing out. + tmc := tmclient.NewTabletManagerClient() + // Keep retrying if we can't contact the primary. The primary might be + // changing, moving, or down temporarily. + var primaryPos replication.Position + err = retryOnError(ctx, func() error { + // Add a per-operation timeout so we re-read topo if the primary is unreachable. + opCtx, optCancel := context.WithTimeout(ctx, operationTimeout) + defer optCancel() + pos, err := getPrimaryPosition(opCtx, tmc, topoServer) + if err != nil { + return fmt.Errorf("can't get the primary replication position: %v", err) + } + primaryPos = pos + return nil + }) + if err != nil { + return fmt.Errorf("can't get the primary replication position after all retries: %v", err) + } + + log.Infof("takeBackup: primary position is: %s", primaryPos.String()) + + // Remember the time when we fetched the primary position, not when we caught + // up to it, so the timestamp on our backup is honest (assuming we make it + // to the goal position). + backupParams.BackupTime = time.Now() + + // Wait for replication to catch up. + phase.Set(phaseNameCatchupReplication, int64(1)) + defer phase.Set(phaseNameCatchupReplication, int64(0)) + + var ( + lastStatus replication.ReplicationStatus + status replication.ReplicationStatus + statusErr error + + waitStartTime = time.Now() + ) + for { + select { + case <-ctx.Done(): + return fmt.Errorf("error in replication catch up: %v", ctx.Err()) + case <-time.After(time.Second): + } + + lastStatus = status + status, statusErr = mysqld.ReplicationStatus() + if statusErr != nil { + log.Warningf("Error getting replication status: %v", statusErr) + continue + } + if status.Position.AtLeast(primaryPos) { + // We're caught up on replication to at least the point the primary + // was at when this vtbackup run started. + log.Infof("Replication caught up to %v after %v", status.Position, time.Since(waitStartTime)) + deprecatedDurationByPhase.Set("CatchUpReplication", int64(time.Since(waitStartTime).Seconds())) + break + } + if !lastStatus.Position.IsZero() { + if status.Position.Equal(lastStatus.Position) { + phaseStatus.Set([]string{phaseNameCatchupReplication, phaseStatusCatchupReplicationStalled}, 1) + } else { + phaseStatus.Set([]string{phaseNameCatchupReplication, phaseStatusCatchupReplicationStalled}, 0) + } + } + if !status.Healthy() { + log.Warning("Replication has stopped before backup could be taken. Trying to restart replication.") + phaseStatus.Set([]string{phaseNameCatchupReplication, phaseStatusCatchupReplicationStopped}, 1) + if err := startReplication(ctx, mysqld, topoServer); err != nil { + log.Warningf("Failed to restart replication: %v", err) + } + } else { + phaseStatus.Set([]string{phaseNameCatchupReplication, phaseStatusCatchupReplicationStopped}, 0) + } + } + phase.Set(phaseNameCatchupReplication, int64(0)) + + // Stop replication and see where we are. + if err := mysqld.StopReplication(nil); err != nil { + return fmt.Errorf("can't stop replication: %v", err) + } + + // Did we make any progress? + status, statusErr = mysqld.ReplicationStatus() + if statusErr != nil { + return fmt.Errorf("can't get replication status: %v", err) + } + log.Infof("Replication caught up to %v", status.Position) + if !status.Position.AtLeast(primaryPos) && status.Position.Equal(restorePos) { + return fmt.Errorf("not taking backup: replication did not make any progress from restore point: %v", restorePos) + } + phaseStatus.Set([]string{phaseNameCatchupReplication, phaseStatusCatchupReplicationStalled}, 0) + phaseStatus.Set([]string{phaseNameCatchupReplication, phaseStatusCatchupReplicationStopped}, 0) + + // Re-enable redo logging. + if disabledRedoLog { + if err := mysqld.EnableRedoLog(ctx); err != nil { + return fmt.Errorf("failed to re-enable redo log: %v", err) + } + } + + if restartBeforeBackup { + restartAt := time.Now() + log.Info("Proceeding with clean MySQL shutdown and startup to flush all buffers.") + // Prep for full/clean shutdown (not typically the default) + if err := mysqld.ExecuteSuperQuery(ctx, "SET GLOBAL innodb_fast_shutdown=0"); err != nil { + return fmt.Errorf("Could not prep for full shutdown: %v", err) + } + // Shutdown, waiting for it to finish + if err := mysqld.Shutdown(ctx, mycnf, true); err != nil { + return fmt.Errorf("Something went wrong during full MySQL shutdown: %v", err) + } + // Start MySQL, waiting for it to come up + if err := mysqld.Start(ctx, mycnf); err != nil { + return fmt.Errorf("Could not start MySQL after full shutdown: %v", err) + } + deprecatedDurationByPhase.Set("RestartBeforeBackup", int64(time.Since(restartAt).Seconds())) + } + + // Now we can take a new backup. + backupAt := time.Now() + phase.Set(phaseNameTakeNewBackup, int64(1)) + defer phase.Set(phaseNameTakeNewBackup, int64(0)) + if err := mysqlctl.Backup(ctx, backupParams); err != nil { + return fmt.Errorf("error taking backup: %v", err) + } + deprecatedDurationByPhase.Set("TakeNewBackup", int64(time.Since(backupAt).Seconds())) + phase.Set(phaseNameTakeNewBackup, int64(0)) + + // Return a non-zero exit code if we didn't meet the replication position + // goal, even though we took a backup that pushes the high-water mark up. + if !status.Position.AtLeast(primaryPos) { + return fmt.Errorf("replication caught up to %v but didn't make it to the goal of %v; a backup was taken anyway to save partial progress, but the operation should still be retried since not all expected data is backed up", status.Position, primaryPos) + } + log.Info("Backup successful.") + return nil +} + +func resetReplication(ctx context.Context, pos replication.Position, mysqld mysqlctl.MysqlDaemon) error { + cmds := []string{ + "STOP SLAVE", + "RESET SLAVE ALL", // "ALL" makes it forget replication source host:port. + } + if err := mysqld.ExecuteSuperQueryList(ctx, cmds); err != nil { + return vterrors.Wrap(err, "failed to reset replication") + } + + // Check if we have a position to resume from, if not reset to the beginning of time + if !pos.IsZero() { + // Set the position at which to resume from the replication source. + if err := mysqld.SetReplicationPosition(ctx, pos); err != nil { + return vterrors.Wrap(err, "failed to set replica position") + } + } else { + if err := mysqld.ResetReplication(ctx); err != nil { + return vterrors.Wrap(err, "failed to reset replication") + } + } + return nil +} + +func startReplication(ctx context.Context, mysqld mysqlctl.MysqlDaemon, topoServer *topo.Server) error { + si, err := topoServer.GetShard(ctx, initKeyspace, initShard) + if err != nil { + return vterrors.Wrap(err, "can't read shard") + } + if topoproto.TabletAliasIsZero(si.PrimaryAlias) { + // Normal tablets will sit around waiting to be reparented in this case. + // Since vtbackup is a batch job, we just have to fail. + return fmt.Errorf("can't start replication after restore: shard %v/%v has no primary", initKeyspace, initShard) + } + // TODO(enisoc): Support replicating from another replica, preferably in the + // same cell, preferably rdonly, to reduce load on the primary. + ti, err := topoServer.GetTablet(ctx, si.PrimaryAlias) + if err != nil { + return vterrors.Wrapf(err, "Cannot read primary tablet %v", si.PrimaryAlias) + } + + // Stop replication (in case we're restarting), set replication source, and start replication. + if err := mysqld.SetReplicationSource(ctx, ti.Tablet.MysqlHostname, ti.Tablet.MysqlPort, true /* stopReplicationBefore */, true /* startReplicationAfter */); err != nil { + return vterrors.Wrap(err, "MysqlDaemon.SetReplicationSource failed") + } + return nil +} + +func getPrimaryPosition(ctx context.Context, tmc tmclient.TabletManagerClient, ts *topo.Server) (replication.Position, error) { + si, err := ts.GetShard(ctx, initKeyspace, initShard) + if err != nil { + return replication.Position{}, vterrors.Wrap(err, "can't read shard") + } + if topoproto.TabletAliasIsZero(si.PrimaryAlias) { + // Normal tablets will sit around waiting to be reparented in this case. + // Since vtbackup is a batch job, we just have to fail. + return replication.Position{}, fmt.Errorf("shard %v/%v has no primary", initKeyspace, initShard) + } + ti, err := ts.GetTablet(ctx, si.PrimaryAlias) + if err != nil { + return replication.Position{}, fmt.Errorf("can't get primary tablet record %v: %v", topoproto.TabletAliasString(si.PrimaryAlias), err) + } + posStr, err := tmc.PrimaryPosition(ctx, ti.Tablet) + if err != nil { + return replication.Position{}, fmt.Errorf("can't get primary replication position: %v", err) + } + pos, err := replication.DecodePosition(posStr) + if err != nil { + return replication.Position{}, fmt.Errorf("can't decode primary replication position %q: %v", posStr, err) + } + return pos, nil +} + +// retryOnError keeps calling the given function until it succeeds, or the given +// Context is done. It waits an exponentially increasing amount of time between +// retries to avoid hot-looping. The only time this returns an error is if the +// Context is cancelled. +func retryOnError(ctx context.Context, fn func() error) error { + waitTime := 1 * time.Second + + for { + err := fn() + if err == nil { + return nil + } + log.Errorf("Waiting %v to retry after error: %v", waitTime, err) + + select { + case <-ctx.Done(): + log.Errorf("Not retrying after error: %v", ctx.Err()) + return ctx.Err() + case <-time.After(waitTime): + waitTime *= 2 + } + } +} + +func pruneBackups(ctx context.Context, backupStorage backupstorage.BackupStorage, backupDir string) error { + if minRetentionTime == 0 { + log.Info("Pruning of old backups is disabled.") + return nil + } + backups, err := backupStorage.ListBackups(ctx, backupDir) + if err != nil { + return fmt.Errorf("can't list backups: %v", err) + } + numBackups := len(backups) + if numBackups <= minRetentionCount { + log.Infof("Found %v backups. Not pruning any since this is within the min_retention_count of %v.", numBackups, minRetentionCount) + return nil + } + // We have more than the minimum retention count, so we could afford to + // prune some. See if any are beyond the minimum retention time. + // ListBackups returns them sorted by oldest first. + for _, backup := range backups { + backupTime, err := parseBackupTime(backup.Name()) + if err != nil { + return err + } + if time.Since(backupTime) < minRetentionTime { + // The oldest remaining backup is not old enough to prune. + log.Infof("Oldest backup taken at %v has not reached min_retention_time of %v. Nothing left to prune.", backupTime, minRetentionTime) + break + } + // Remove the backup. + log.Infof("Removing old backup %v from %v, since it's older than min_retention_time of %v", backup.Name(), backupDir, minRetentionTime) + if err := backupStorage.RemoveBackup(ctx, backupDir, backup.Name()); err != nil { + return fmt.Errorf("couldn't remove backup %v from %v: %v", backup.Name(), backupDir, err) + } + // We successfully removed one backup. Can we afford to prune any more? + numBackups-- + if numBackups == minRetentionCount { + log.Infof("Successfully pruned backup count to min_retention_count of %v.", minRetentionCount) + break + } + } + return nil +} + +func parseBackupTime(name string) (time.Time, error) { + // Backup names are formatted as "date.time.tablet-alias". + parts := strings.Split(name, ".") + if len(parts) != 3 { + return time.Time{}, fmt.Errorf("backup name not in expected format (date.time.tablet-alias): %v", name) + } + backupTime, err := time.Parse(mysqlctl.BackupTimestampFormat, fmt.Sprintf("%s.%s", parts[0], parts[1])) + if err != nil { + return time.Time{}, fmt.Errorf("can't parse timestamp from backup %q: %v", name, err) + } + return backupTime, nil +} + +func shouldBackup(ctx context.Context, topoServer *topo.Server, backupStorage backupstorage.BackupStorage, backupDir string) (bool, error) { + // Look for the most recent, complete backup. + backups, err := backupStorage.ListBackups(ctx, backupDir) + if err != nil { + return false, fmt.Errorf("can't list backups: %v", err) + } + lastBackup := lastCompleteBackup(ctx, backups) + + // Check preconditions for initial_backup mode. + if initialBackup { + // Check if any backups for the shard already exist in this backup storage location. + if lastBackup != nil { + log.Infof("At least one complete backup already exists, so there's no need to seed an empty backup. Doing nothing.") + return false, nil + } + + // Check whether the shard exists. + _, shardErr := topoServer.GetShard(ctx, initKeyspace, initShard) + switch { + case shardErr == nil: + // If the shard exists, we should make sure none of the tablets are + // already in a serving state, because then they might have data + // that conflicts with the initial backup we're about to take. + tablets, err := topoServer.GetTabletMapForShard(ctx, initKeyspace, initShard) + if err != nil { + // We don't know for sure whether any tablets are serving, + // so it's not safe to continue. + return false, fmt.Errorf("failed to check whether shard %v/%v has serving tablets before doing initial backup: %v", initKeyspace, initShard, err) + } + for tabletAlias, tablet := range tablets { + // Check if any tablet has its type set to one of the serving types. + // If so, it's too late to do an initial backup. + if tablet.IsInServingGraph() { + return false, fmt.Errorf("refusing to upload initial backup of empty database: the shard %v/%v already has at least one tablet that may be serving (%v); you must take a backup from a live tablet instead", initKeyspace, initShard, tabletAlias) + } + } + log.Infof("Shard %v/%v exists but has no serving tablets.", initKeyspace, initShard) + case topo.IsErrType(shardErr, topo.NoNode): + // The shard doesn't exist, so we know no tablets are running. + log.Infof("Shard %v/%v doesn't exist; assuming it has no serving tablets.", initKeyspace, initShard) + default: + // If we encounter any other error, we don't know for sure whether + // the shard exists, so it's not safe to continue. + return false, fmt.Errorf("failed to check whether shard %v/%v exists before doing initial backup: %v", initKeyspace, initShard, err) + } + + log.Infof("Shard %v/%v has no existing backups. Creating initial backup.", initKeyspace, initShard) + return true, nil + } + + // We need at least one backup so we can restore first, unless the user explicitly says we don't + if len(backups) == 0 && !allowFirstBackup { + return false, fmt.Errorf("no existing backups to restore from; backup is not possible since --initial_backup flag was not enabled") + } + if lastBackup == nil { + if allowFirstBackup { + // There's no complete backup, but we were told to take one from scratch anyway. + return true, nil + } + return false, fmt.Errorf("no complete backups to restore from; backup is not possible since --initial_backup flag was not enabled") + } + + // Has it been long enough since the last complete backup to need a new one? + if minBackupInterval == 0 { + // No minimum interval is set, so always backup. + return true, nil + } + lastBackupTime, err := parseBackupTime(lastBackup.Name()) + if err != nil { + return false, fmt.Errorf("can't check last backup time: %v", err) + } + if elapsedTime := time.Since(lastBackupTime); elapsedTime < minBackupInterval { + // It hasn't been long enough yet. + log.Infof("Skipping backup since only %v has elapsed since the last backup at %v, which is less than the min_backup_interval of %v.", elapsedTime, lastBackupTime, minBackupInterval) + return false, nil + } + // It has been long enough. + log.Infof("The last backup was taken at %v, which is older than the min_backup_interval of %v.", lastBackupTime, minBackupInterval) + return true, nil +} + +func lastCompleteBackup(ctx context.Context, backups []backupstorage.BackupHandle) backupstorage.BackupHandle { + if len(backups) == 0 { + return nil + } + + // Backups are sorted in ascending order by start time. Start at the end. + for i := len(backups) - 1; i >= 0; i-- { + // Check if this backup is complete by looking for the MANIFEST file, + // which is written at the end after all files are uploaded. + backup := backups[i] + if err := checkBackupComplete(ctx, backup); err != nil { + log.Warningf("Ignoring backup %v because it's incomplete: %v", backup.Name(), err) + continue + } + return backup + } + + return nil +} + +func checkBackupComplete(ctx context.Context, backup backupstorage.BackupHandle) error { + manifest, err := mysqlctl.GetBackupManifest(ctx, backup) + if err != nil { + return fmt.Errorf("can't get backup MANIFEST: %v", err) + } + + log.Infof("Found complete backup %v taken at position %v", backup.Name(), manifest.Position.String()) + return nil +} diff --git a/go/cmd/vtbackup/docgen/main.go b/go/cmd/vtbackup/docgen/main.go new file mode 100644 index 00000000000..90aa90ffa98 --- /dev/null +++ b/go/cmd/vtbackup/docgen/main.go @@ -0,0 +1,37 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/internal/docgen" + "vitess.io/vitess/go/cmd/vtbackup/cli" +) + +func main() { + var dir string + cmd := cobra.Command{ + Use: "docgen [-d ]", + RunE: func(cmd *cobra.Command, args []string) error { + return docgen.GenerateMarkdownTree(cli.Main, dir) + }, + } + + cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation") + _ = cmd.Execute() +} diff --git a/go/cmd/vtbackup/vtbackup.go b/go/cmd/vtbackup/vtbackup.go index ff99ab249e1..37dcadc9b19 100644 --- a/go/cmd/vtbackup/vtbackup.go +++ b/go/cmd/vtbackup/vtbackup.go @@ -14,763 +14,19 @@ See the License for the specific language governing permissions and limitations under the License. */ -/* -vtbackup is a batch command to perform a single pass of backup maintenance for a shard. - -When run periodically for each shard, vtbackup can ensure these configurable policies: -* There is always a recent backup for the shard. -* Old backups for the shard are removed. - -Whatever system launches vtbackup is responsible for the following: - - Running vtbackup with similar flags that would be used for a vttablet and - mysqlctld in the target shard to be backed up. - - Provisioning as much disk space for vtbackup as would be given to vttablet. - The data directory MUST be empty at startup. Do NOT reuse a persistent disk. - - Running vtbackup periodically for each shard, for each backup storage location. - - Ensuring that at most one instance runs at a time for a given pair of shard - and backup storage location. - - Retrying vtbackup if it fails. - - Alerting human operators if the failure is persistent. - -The process vtbackup follows to take a new backup is as follows: - 1. Restore from the most recent backup. - 2. Start a mysqld instance (but no vttablet) from the restored data. - 3. Instruct mysqld to connect to the current shard primary and replicate any - transactions that are new since the last backup. - 4. Ask the primary for its current replication position and set that as the goal - for catching up on replication before taking the backup, so the goalposts - don't move. - 5. Wait until replication is caught up to the goal position or beyond. - 6. Stop mysqld and take a new backup. - -Aside from additional replication load while vtbackup's mysqld catches up on -new transactions, the shard should be otherwise unaffected. Existing tablets -will continue to serve, and no new tablets will appear in topology, meaning no -query traffic will ever be routed to vtbackup's mysqld. This silent operation -mode helps make backups minimally disruptive to serving capacity and orthogonal -to the handling of the query path. - -The command-line parameters to vtbackup specify a policy for when a new backup -is needed, and when old backups should be removed. If the existing backups -already satisfy the policy, then vtbackup will do nothing and return success -immediately. -*/ package main import ( - "context" - "crypto/rand" - "fmt" - "math" - "math/big" - "os" - "strings" - "syscall" - "time" - - "github.com/spf13/pflag" - - "vitess.io/vitess/go/acl" - "vitess.io/vitess/go/cmd" + "vitess.io/vitess/go/cmd/vtbackup/cli" "vitess.io/vitess/go/exit" - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/stats" - "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/logutil" - "vitess.io/vitess/go/vt/mysqlctl" - "vitess.io/vitess/go/vt/mysqlctl/backupstats" - "vitess.io/vitess/go/vt/mysqlctl/backupstorage" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/topo/topoproto" - "vitess.io/vitess/go/vt/vterrors" - _ "vitess.io/vitess/go/vt/vttablet/grpctmclient" - "vitess.io/vitess/go/vt/vttablet/tmclient" ) -const ( - // operationTimeout is the timeout for individual operations like fetching - // the primary position. This does not impose an overall timeout on - // long-running processes like taking the backup. It only applies to - // steps along the way that should complete quickly. This ensures we don't - // place a hard cap on the overall time for a backup, while also not waiting - // forever for things that should be quick. - operationTimeout = 1 * time.Minute -) - -var ( - minBackupInterval time.Duration - minRetentionTime time.Duration - minRetentionCount = 1 - initialBackup bool - allowFirstBackup bool - restartBeforeBackup bool - // vttablet-like flags - initDbNameOverride string - initKeyspace string - initShard string - concurrency = 4 - incrementalFromPos string - // mysqlctld-like flags - mysqlPort = 3306 - mysqlSocket string - mysqlTimeout = 5 * time.Minute - initDBSQLFile string - detachedMode bool - keepAliveTimeout = 0 * time.Second - disableRedoLog = false - durationByPhase = stats.NewGaugesWithSingleLabel( - "DurationByPhaseSeconds", - "How long it took vtbackup to perform each phase (in seconds).", - "phase", - ) -) - -func registerFlags(fs *pflag.FlagSet) { - fs.DurationVar(&minBackupInterval, "min_backup_interval", minBackupInterval, "Only take a new backup if it's been at least this long since the most recent backup.") - fs.DurationVar(&minRetentionTime, "min_retention_time", minRetentionTime, "Keep each old backup for at least this long before removing it. Set to 0 to disable pruning of old backups.") - fs.IntVar(&minRetentionCount, "min_retention_count", minRetentionCount, "Always keep at least this many of the most recent backups in this backup storage location, even if some are older than the min_retention_time. This must be at least 1 since a backup must always exist to allow new backups to be made") - fs.BoolVar(&initialBackup, "initial_backup", initialBackup, "Instead of restoring from backup, initialize an empty database with the provided init_db_sql_file and upload a backup of that for the shard, if the shard has no backups yet. This can be used to seed a brand new shard with an initial, empty backup. If any backups already exist for the shard, this will be considered a successful no-op. This can only be done before the shard exists in topology (i.e. before any tablets are deployed).") - fs.BoolVar(&allowFirstBackup, "allow_first_backup", allowFirstBackup, "Allow this job to take the first backup of an existing shard.") - fs.BoolVar(&restartBeforeBackup, "restart_before_backup", restartBeforeBackup, "Perform a mysqld clean/full restart after applying binlogs, but before taking the backup. Only makes sense to work around xtrabackup bugs.") - // vttablet-like flags - fs.StringVar(&initDbNameOverride, "init_db_name_override", initDbNameOverride, "(init parameter) override the name of the db used by vttablet") - fs.StringVar(&initKeyspace, "init_keyspace", initKeyspace, "(init parameter) keyspace to use for this tablet") - fs.StringVar(&initShard, "init_shard", initShard, "(init parameter) shard to use for this tablet") - fs.IntVar(&concurrency, "concurrency", concurrency, "(init restore parameter) how many concurrent files to restore at once") - fs.StringVar(&incrementalFromPos, "incremental_from_pos", incrementalFromPos, "Position of previous backup. Default: empty. If given, then this backup becomes an incremental backup from given position. If value is 'auto', backup taken from last successful backup position") - // mysqlctld-like flags - fs.IntVar(&mysqlPort, "mysql_port", mysqlPort, "mysql port") - fs.StringVar(&mysqlSocket, "mysql_socket", mysqlSocket, "path to the mysql socket") - fs.DurationVar(&mysqlTimeout, "mysql_timeout", mysqlTimeout, "how long to wait for mysqld startup") - fs.StringVar(&initDBSQLFile, "init_db_sql_file", initDBSQLFile, "path to .sql file to run after mysql_install_db") - fs.BoolVar(&detachedMode, "detach", detachedMode, "detached mode - run backups detached from the terminal") - fs.DurationVar(&keepAliveTimeout, "keep-alive-timeout", keepAliveTimeout, "Wait until timeout elapses after a successful backup before shutting down.") - fs.BoolVar(&disableRedoLog, "disable-redo-log", disableRedoLog, "Disable InnoDB redo log during replication-from-primary phase of backup.") - - acl.RegisterFlags(fs) -} - -func init() { - servenv.RegisterDefaultFlags() - dbconfigs.RegisterFlags(dbconfigs.All...) - mysqlctl.RegisterFlags() - servenv.OnParse(registerFlags) -} - func main() { defer exit.Recover() - servenv.ParseFlags("vtbackup") - servenv.Init() - ctx, cancel := context.WithCancel(context.Background()) - servenv.OnClose(func() { - cancel() - }) - - defer func() { - servenv.ExitChan <- syscall.SIGTERM - <-ctx.Done() - }() - - go servenv.RunDefault() - - if detachedMode { - // this method will call os.Exit and kill this process - cmd.DetachFromTerminalAndExit() - } - - defer logutil.Flush() - - if minRetentionCount < 1 { - log.Errorf("min_retention_count must be at least 1 to allow restores to succeed") - exit.Return(1) - } - - // Open connection backup storage. - backupStorage, err := backupstorage.GetBackupStorage() - if err != nil { - log.Errorf("Can't get backup storage: %v", err) + if err := cli.Main.Execute(); err != nil { + log.Error(err) exit.Return(1) } - defer backupStorage.Close() - // Open connection to topology server. - topoServer := topo.Open() - defer topoServer.Close() - - // Try to take a backup, if it's been long enough since the last one. - // Skip pruning if backup wasn't fully successful. We don't want to be - // deleting things if the backup process is not healthy. - backupDir := mysqlctl.GetBackupDir(initKeyspace, initShard) - doBackup, err := shouldBackup(ctx, topoServer, backupStorage, backupDir) - if err != nil { - log.Errorf("Can't take backup: %v", err) - exit.Return(1) - } - if doBackup { - if err := takeBackup(ctx, topoServer, backupStorage); err != nil { - log.Errorf("Failed to take backup: %v", err) - exit.Return(1) - } - } - - // Prune old backups. - if err := pruneBackups(ctx, backupStorage, backupDir); err != nil { - log.Errorf("Couldn't prune old backups: %v", err) - exit.Return(1) - } - - if keepAliveTimeout > 0 { - log.Infof("Backup was successful, waiting %s before exiting (or until context expires).", keepAliveTimeout) - select { - case <-time.After(keepAliveTimeout): - case <-ctx.Done(): - } - } - log.Info("Exiting.") -} - -func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage backupstorage.BackupStorage) error { - // This is an imaginary tablet alias. The value doesn't matter for anything, - // except that we generate a random UID to ensure the target backup - // directory is unique if multiple vtbackup instances are launched for the - // same shard, at exactly the same second, pointed at the same backup - // storage location. - bigN, err := rand.Int(rand.Reader, big.NewInt(math.MaxUint32)) - if err != nil { - return fmt.Errorf("can't generate random tablet UID: %v", err) - } - tabletAlias := &topodatapb.TabletAlias{ - Cell: "vtbackup", - Uid: uint32(bigN.Uint64()), - } - - // Clean up our temporary data dir if we exit for any reason, to make sure - // every invocation of vtbackup starts with a clean slate, and it does not - // accumulate garbage (and run out of disk space) if it's restarted. - tabletDir := mysqlctl.TabletDir(tabletAlias.Uid) - defer func() { - log.Infof("Removing temporary tablet directory: %v", tabletDir) - if err := os.RemoveAll(tabletDir); err != nil { - log.Warningf("Failed to remove temporary tablet directory: %v", err) - } - }() - - // Start up mysqld as if we are mysqlctld provisioning a fresh tablet. - mysqld, mycnf, err := mysqlctl.CreateMysqldAndMycnf(tabletAlias.Uid, mysqlSocket, mysqlPort) - if err != nil { - return fmt.Errorf("failed to initialize mysql config: %v", err) - } - initCtx, initCancel := context.WithTimeout(ctx, mysqlTimeout) - defer initCancel() - initMysqldAt := time.Now() - if err := mysqld.Init(initCtx, mycnf, initDBSQLFile); err != nil { - return fmt.Errorf("failed to initialize mysql data dir and start mysqld: %v", err) - } - durationByPhase.Set("InitMySQLd", int64(time.Since(initMysqldAt).Seconds())) - // Shut down mysqld when we're done. - defer func() { - // Be careful not to use the original context, because we don't want to - // skip shutdown just because we timed out waiting for other things. - mysqlShutdownCtx, mysqlShutdownCancel := context.WithTimeout(context.Background(), 30*time.Second) - defer mysqlShutdownCancel() - if err := mysqld.Shutdown(mysqlShutdownCtx, mycnf, false); err != nil { - log.Errorf("failed to shutdown mysqld: %v", err) - } - }() - - extraEnv := map[string]string{ - "TABLET_ALIAS": topoproto.TabletAliasString(tabletAlias), - } - dbName := initDbNameOverride - if dbName == "" { - dbName = fmt.Sprintf("%s%s", topoproto.VtDbPrefix, initKeyspace) - } - - backupParams := mysqlctl.BackupParams{ - Cnf: mycnf, - Mysqld: mysqld, - Logger: logutil.NewConsoleLogger(), - Concurrency: concurrency, - IncrementalFromPos: incrementalFromPos, - HookExtraEnv: extraEnv, - TopoServer: topoServer, - Keyspace: initKeyspace, - Shard: initShard, - TabletAlias: topoproto.TabletAliasString(tabletAlias), - Stats: backupstats.BackupStats(), - } - // In initial_backup mode, just take a backup of this empty database. - if initialBackup { - // Take a backup of this empty DB without restoring anything. - // First, initialize it the way InitShardPrimary would, so this backup - // produces a result that can be used to skip InitShardPrimary entirely. - // This involves resetting replication (to erase any history) and then - // creating the main database and some Vitess system tables. - if err := mysqld.ResetReplication(ctx); err != nil { - return fmt.Errorf("can't reset replication: %v", err) - } - // We need to switch off super_read_only before we create the database. - resetFunc, err := mysqld.SetSuperReadOnly(false) - if err != nil { - return fmt.Errorf("failed to disable super_read_only during backup: %v", err) - } - if resetFunc != nil { - defer func() { - err := resetFunc() - if err != nil { - log.Error("Failed to set super_read_only back to its original value during backup") - } - }() - } - cmd := mysqlctl.GenerateInitialBinlogEntry() - if err := mysqld.ExecuteSuperQueryList(ctx, []string{cmd}); err != nil { - return err - } - - backupParams.BackupTime = time.Now() - // Now we're ready to take the backup. - if err := mysqlctl.Backup(ctx, backupParams); err != nil { - return fmt.Errorf("backup failed: %v", err) - } - durationByPhase.Set("InitialBackup", int64(time.Since(backupParams.BackupTime).Seconds())) - log.Info("Initial backup successful.") - return nil - } - - backupDir := mysqlctl.GetBackupDir(initKeyspace, initShard) - log.Infof("Restoring latest backup from directory %v", backupDir) - restoreAt := time.Now() - params := mysqlctl.RestoreParams{ - Cnf: mycnf, - Mysqld: mysqld, - Logger: logutil.NewConsoleLogger(), - Concurrency: concurrency, - HookExtraEnv: extraEnv, - DeleteBeforeRestore: true, - DbName: dbName, - Keyspace: initKeyspace, - Shard: initShard, - Stats: backupstats.RestoreStats(), - } - backupManifest, err := mysqlctl.Restore(ctx, params) - var restorePos mysql.Position - switch err { - case nil: - // if err is nil, we expect backupManifest to be non-nil - restorePos = backupManifest.Position - log.Infof("Successfully restored from backup at replication position %v", restorePos) - case mysqlctl.ErrNoBackup: - // There is no backup found, but we may be taking the initial backup of a shard - if !allowFirstBackup { - return fmt.Errorf("no backup found; not starting up empty since --initial_backup flag was not enabled") - } - restorePos = mysql.Position{} - default: - return fmt.Errorf("can't restore from backup: %v", err) - } - durationByPhase.Set("RestoreLastBackup", int64(time.Since(restoreAt).Seconds())) - - // As of MySQL 8.0.21, you can disable redo logging using the ALTER INSTANCE - // DISABLE INNODB REDO_LOG statement. This functionality is intended for - // loading data into a new MySQL instance. Disabling redo logging speeds up - // data loading by avoiding redo log writes and doublewrite buffering. - disabledRedoLog := false - if disableRedoLog { - if err := mysqld.DisableRedoLog(ctx); err != nil { - log.Warningf("Error disabling redo logging: %v", err) - } else { - disabledRedoLog = true - } - } - - // We have restored a backup. Now start replication. - if err := resetReplication(ctx, restorePos, mysqld); err != nil { - return fmt.Errorf("error resetting replication: %v", err) - } - if err := startReplication(ctx, mysqld, topoServer); err != nil { - return fmt.Errorf("error starting replication: %v", err) - } - - log.Info("get the current primary replication position, and wait until we catch up") - // Get the current primary replication position, and wait until we catch up - // to that point. We do this instead of looking at ReplicationLag - // because that value can - // sometimes lie and tell you there's 0 lag when actually replication is - // stopped. Also, if replication is making progress but is too slow to ever - // catch up to live changes, we'd rather take a backup of something rather - // than timing out. - tmc := tmclient.NewTabletManagerClient() - // Keep retrying if we can't contact the primary. The primary might be - // changing, moving, or down temporarily. - var primaryPos mysql.Position - err = retryOnError(ctx, func() error { - // Add a per-operation timeout so we re-read topo if the primary is unreachable. - opCtx, optCancel := context.WithTimeout(ctx, operationTimeout) - defer optCancel() - pos, err := getPrimaryPosition(opCtx, tmc, topoServer) - if err != nil { - return fmt.Errorf("can't get the primary replication position: %v", err) - } - primaryPos = pos - return nil - }) - if err != nil { - return fmt.Errorf("can't get the primary replication position after all retries: %v", err) - } - - log.Infof("takeBackup: primary position is: %s", primaryPos.String()) - - // Remember the time when we fetched the primary position, not when we caught - // up to it, so the timestamp on our backup is honest (assuming we make it - // to the goal position). - backupParams.BackupTime = time.Now() - - // Wait for replication to catch up. - waitStartTime := time.Now() - for { - select { - case <-ctx.Done(): - return fmt.Errorf("error in replication catch up: %v", ctx.Err()) - case <-time.After(time.Second): - } - - status, statusErr := mysqld.ReplicationStatus() - if statusErr != nil { - log.Warningf("Error getting replication status: %v", statusErr) - continue - } - if status.Position.AtLeast(primaryPos) { - // We're caught up on replication to at least the point the primary - // was at when this vtbackup run started. - log.Infof("Replication caught up to %v after %v", status.Position, time.Since(waitStartTime)) - durationByPhase.Set("CatchUpReplication", int64(time.Since(waitStartTime).Seconds())) - break - } - if !status.Healthy() { - log.Warning("Replication has stopped before backup could be taken. Trying to restart replication.") - if err := startReplication(ctx, mysqld, topoServer); err != nil { - log.Warningf("Failed to restart replication: %v", err) - } - } - } - - // Stop replication and see where we are. - if err := mysqld.StopReplication(nil); err != nil { - return fmt.Errorf("can't stop replication: %v", err) - } - - // Did we make any progress? - status, err := mysqld.ReplicationStatus() - if err != nil { - return fmt.Errorf("can't get replication status: %v", err) - } - log.Infof("Replication caught up to %v", status.Position) - if !status.Position.AtLeast(primaryPos) && status.Position.Equal(restorePos) { - return fmt.Errorf("not taking backup: replication did not make any progress from restore point: %v", restorePos) - } - - // Re-enable redo logging. - if disabledRedoLog { - if err := mysqld.EnableRedoLog(ctx); err != nil { - return fmt.Errorf("failed to re-enable redo log: %v", err) - } - } - - if restartBeforeBackup { - restartAt := time.Now() - log.Info("Proceeding with clean MySQL shutdown and startup to flush all buffers.") - // Prep for full/clean shutdown (not typically the default) - if err := mysqld.ExecuteSuperQuery(ctx, "SET GLOBAL innodb_fast_shutdown=0"); err != nil { - return fmt.Errorf("Could not prep for full shutdown: %v", err) - } - // Shutdown, waiting for it to finish - if err := mysqld.Shutdown(ctx, mycnf, true); err != nil { - return fmt.Errorf("Something went wrong during full MySQL shutdown: %v", err) - } - // Start MySQL, waiting for it to come up - if err := mysqld.Start(ctx, mycnf); err != nil { - return fmt.Errorf("Could not start MySQL after full shutdown: %v", err) - } - durationByPhase.Set("RestartBeforeBackup", int64(time.Since(restartAt).Seconds())) - } - - // Now we can take a new backup. - backupAt := time.Now() - if err := mysqlctl.Backup(ctx, backupParams); err != nil { - return fmt.Errorf("error taking backup: %v", err) - } - durationByPhase.Set("TakeNewBackup", int64(time.Since(backupAt).Seconds())) - - // Return a non-zero exit code if we didn't meet the replication position - // goal, even though we took a backup that pushes the high-water mark up. - if !status.Position.AtLeast(primaryPos) { - return fmt.Errorf("replication caught up to %v but didn't make it to the goal of %v; a backup was taken anyway to save partial progress, but the operation should still be retried since not all expected data is backed up", status.Position, primaryPos) - } - log.Info("Backup successful.") - return nil -} - -func resetReplication(ctx context.Context, pos mysql.Position, mysqld mysqlctl.MysqlDaemon) error { - cmds := []string{ - "STOP SLAVE", - "RESET SLAVE ALL", // "ALL" makes it forget replication source host:port. - } - if err := mysqld.ExecuteSuperQueryList(ctx, cmds); err != nil { - return vterrors.Wrap(err, "failed to reset replication") - } - - // Check if we have a position to resume from, if not reset to the beginning of time - if !pos.IsZero() { - // Set the position at which to resume from the replication source. - if err := mysqld.SetReplicationPosition(ctx, pos); err != nil { - return vterrors.Wrap(err, "failed to set replica position") - } - } else { - if err := mysqld.ResetReplication(ctx); err != nil { - return vterrors.Wrap(err, "failed to reset replication") - } - } - return nil -} - -func startReplication(ctx context.Context, mysqld mysqlctl.MysqlDaemon, topoServer *topo.Server) error { - si, err := topoServer.GetShard(ctx, initKeyspace, initShard) - if err != nil { - return vterrors.Wrap(err, "can't read shard") - } - if topoproto.TabletAliasIsZero(si.PrimaryAlias) { - // Normal tablets will sit around waiting to be reparented in this case. - // Since vtbackup is a batch job, we just have to fail. - return fmt.Errorf("can't start replication after restore: shard %v/%v has no primary", initKeyspace, initShard) - } - // TODO(enisoc): Support replicating from another replica, preferably in the - // same cell, preferably rdonly, to reduce load on the primary. - ti, err := topoServer.GetTablet(ctx, si.PrimaryAlias) - if err != nil { - return vterrors.Wrapf(err, "Cannot read primary tablet %v", si.PrimaryAlias) - } - - // Stop replication (in case we're restarting), set replication source, and start replication. - if err := mysqld.SetReplicationSource(ctx, ti.Tablet.MysqlHostname, ti.Tablet.MysqlPort, true /* stopReplicationBefore */, true /* startReplicationAfter */); err != nil { - return vterrors.Wrap(err, "MysqlDaemon.SetReplicationSource failed") - } - return nil -} - -func getPrimaryPosition(ctx context.Context, tmc tmclient.TabletManagerClient, ts *topo.Server) (mysql.Position, error) { - si, err := ts.GetShard(ctx, initKeyspace, initShard) - if err != nil { - return mysql.Position{}, vterrors.Wrap(err, "can't read shard") - } - if topoproto.TabletAliasIsZero(si.PrimaryAlias) { - // Normal tablets will sit around waiting to be reparented in this case. - // Since vtbackup is a batch job, we just have to fail. - return mysql.Position{}, fmt.Errorf("shard %v/%v has no primary", initKeyspace, initShard) - } - ti, err := ts.GetTablet(ctx, si.PrimaryAlias) - if err != nil { - return mysql.Position{}, fmt.Errorf("can't get primary tablet record %v: %v", topoproto.TabletAliasString(si.PrimaryAlias), err) - } - posStr, err := tmc.PrimaryPosition(ctx, ti.Tablet) - if err != nil { - return mysql.Position{}, fmt.Errorf("can't get primary replication position: %v", err) - } - pos, err := mysql.DecodePosition(posStr) - if err != nil { - return mysql.Position{}, fmt.Errorf("can't decode primary replication position %q: %v", posStr, err) - } - return pos, nil -} - -// retryOnError keeps calling the given function until it succeeds, or the given -// Context is done. It waits an exponentially increasing amount of time between -// retries to avoid hot-looping. The only time this returns an error is if the -// Context is cancelled. -func retryOnError(ctx context.Context, fn func() error) error { - waitTime := 1 * time.Second - - for { - err := fn() - if err == nil { - return nil - } - log.Errorf("Waiting %v to retry after error: %v", waitTime, err) - - select { - case <-ctx.Done(): - log.Errorf("Not retrying after error: %v", ctx.Err()) - return ctx.Err() - case <-time.After(waitTime): - waitTime *= 2 - } - } -} - -func pruneBackups(ctx context.Context, backupStorage backupstorage.BackupStorage, backupDir string) error { - if minRetentionTime == 0 { - log.Info("Pruning of old backups is disabled.") - return nil - } - backups, err := backupStorage.ListBackups(ctx, backupDir) - if err != nil { - return fmt.Errorf("can't list backups: %v", err) - } - numBackups := len(backups) - if numBackups <= minRetentionCount { - log.Infof("Found %v backups. Not pruning any since this is within the min_retention_count of %v.", numBackups, minRetentionCount) - return nil - } - // We have more than the minimum retention count, so we could afford to - // prune some. See if any are beyond the minimum retention time. - // ListBackups returns them sorted by oldest first. - for _, backup := range backups { - backupTime, err := parseBackupTime(backup.Name()) - if err != nil { - return err - } - if time.Since(backupTime) < minRetentionTime { - // The oldest remaining backup is not old enough to prune. - log.Infof("Oldest backup taken at %v has not reached min_retention_time of %v. Nothing left to prune.", backupTime, minRetentionTime) - break - } - // Remove the backup. - log.Infof("Removing old backup %v from %v, since it's older than min_retention_time of %v", backup.Name(), backupDir, minRetentionTime) - if err := backupStorage.RemoveBackup(ctx, backupDir, backup.Name()); err != nil { - return fmt.Errorf("couldn't remove backup %v from %v: %v", backup.Name(), backupDir, err) - } - // We successfully removed one backup. Can we afford to prune any more? - numBackups-- - if numBackups == minRetentionCount { - log.Infof("Successfully pruned backup count to min_retention_count of %v.", minRetentionCount) - break - } - } - return nil -} - -func parseBackupTime(name string) (time.Time, error) { - // Backup names are formatted as "date.time.tablet-alias". - parts := strings.Split(name, ".") - if len(parts) != 3 { - return time.Time{}, fmt.Errorf("backup name not in expected format (date.time.tablet-alias): %v", name) - } - backupTime, err := time.Parse(mysqlctl.BackupTimestampFormat, fmt.Sprintf("%s.%s", parts[0], parts[1])) - if err != nil { - return time.Time{}, fmt.Errorf("can't parse timestamp from backup %q: %v", name, err) - } - return backupTime, nil -} - -func shouldBackup(ctx context.Context, topoServer *topo.Server, backupStorage backupstorage.BackupStorage, backupDir string) (bool, error) { - // Look for the most recent, complete backup. - backups, err := backupStorage.ListBackups(ctx, backupDir) - if err != nil { - return false, fmt.Errorf("can't list backups: %v", err) - } - lastBackup := lastCompleteBackup(ctx, backups) - - // Check preconditions for initial_backup mode. - if initialBackup { - // Check if any backups for the shard already exist in this backup storage location. - if lastBackup != nil { - log.Infof("At least one complete backup already exists, so there's no need to seed an empty backup. Doing nothing.") - return false, nil - } - - // Check whether the shard exists. - _, shardErr := topoServer.GetShard(ctx, initKeyspace, initShard) - switch { - case shardErr == nil: - // If the shard exists, we should make sure none of the tablets are - // already in a serving state, because then they might have data - // that conflicts with the initial backup we're about to take. - tablets, err := topoServer.GetTabletMapForShard(ctx, initKeyspace, initShard) - if err != nil { - // We don't know for sure whether any tablets are serving, - // so it's not safe to continue. - return false, fmt.Errorf("failed to check whether shard %v/%v has serving tablets before doing initial backup: %v", initKeyspace, initShard, err) - } - for tabletAlias, tablet := range tablets { - // Check if any tablet has its type set to one of the serving types. - // If so, it's too late to do an initial backup. - if tablet.IsInServingGraph() { - return false, fmt.Errorf("refusing to upload initial backup of empty database: the shard %v/%v already has at least one tablet that may be serving (%v); you must take a backup from a live tablet instead", initKeyspace, initShard, tabletAlias) - } - } - log.Infof("Shard %v/%v exists but has no serving tablets.", initKeyspace, initShard) - case topo.IsErrType(shardErr, topo.NoNode): - // The shard doesn't exist, so we know no tablets are running. - log.Infof("Shard %v/%v doesn't exist; assuming it has no serving tablets.", initKeyspace, initShard) - default: - // If we encounter any other error, we don't know for sure whether - // the shard exists, so it's not safe to continue. - return false, fmt.Errorf("failed to check whether shard %v/%v exists before doing initial backup: %v", initKeyspace, initShard, err) - } - - log.Infof("Shard %v/%v has no existing backups. Creating initial backup.", initKeyspace, initShard) - return true, nil - } - - // We need at least one backup so we can restore first, unless the user explicitly says we don't - if len(backups) == 0 && !allowFirstBackup { - return false, fmt.Errorf("no existing backups to restore from; backup is not possible since --initial_backup flag was not enabled") - } - if lastBackup == nil { - if allowFirstBackup { - // There's no complete backup, but we were told to take one from scratch anyway. - return true, nil - } - return false, fmt.Errorf("no complete backups to restore from; backup is not possible since --initial_backup flag was not enabled") - } - - // Has it been long enough since the last complete backup to need a new one? - if minBackupInterval == 0 { - // No minimum interval is set, so always backup. - return true, nil - } - lastBackupTime, err := parseBackupTime(lastBackup.Name()) - if err != nil { - return false, fmt.Errorf("can't check last backup time: %v", err) - } - if elapsedTime := time.Since(lastBackupTime); elapsedTime < minBackupInterval { - // It hasn't been long enough yet. - log.Infof("Skipping backup since only %v has elapsed since the last backup at %v, which is less than the min_backup_interval of %v.", elapsedTime, lastBackupTime, minBackupInterval) - return false, nil - } - // It has been long enough. - log.Infof("The last backup was taken at %v, which is older than the min_backup_interval of %v.", lastBackupTime, minBackupInterval) - return true, nil -} - -func lastCompleteBackup(ctx context.Context, backups []backupstorage.BackupHandle) backupstorage.BackupHandle { - if len(backups) == 0 { - return nil - } - - // Backups are sorted in ascending order by start time. Start at the end. - for i := len(backups) - 1; i >= 0; i-- { - // Check if this backup is complete by looking for the MANIFEST file, - // which is written at the end after all files are uploaded. - backup := backups[i] - if err := checkBackupComplete(ctx, backup); err != nil { - log.Warningf("Ignoring backup %v because it's incomplete: %v", backup.Name(), err) - continue - } - return backup - } - - return nil -} - -func checkBackupComplete(ctx context.Context, backup backupstorage.BackupHandle) error { - manifest, err := mysqlctl.GetBackupManifest(ctx, backup) - if err != nil { - return fmt.Errorf("can't get backup MANIFEST: %v", err) - } - - log.Infof("Found complete backup %v taken at position %v", backup.Name(), manifest.Position.String()) - return nil } diff --git a/go/cmd/vtbench/cli/vtbench.go b/go/cmd/vtbench/cli/vtbench.go new file mode 100644 index 00000000000..69b866bb60d --- /dev/null +++ b/go/cmd/vtbench/cli/vtbench.go @@ -0,0 +1,246 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import ( + "context" + "errors" + "fmt" + "strings" + "time" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/grpccommon" + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vtbench" + + // Import and register the gRPC vtgateconn client + _ "vitess.io/vitess/go/vt/vtgate/grpcvtgateconn" + // Import and register the gRPC tabletconn client + _ "vitess.io/vitess/go/vt/vttablet/grpctabletconn" +) + +/* + + Vtbench is a simple load testing client to compare workloads in + Vitess across the various client/server protocols. + + There are a number of command line options to control the behavior, + but as a basic example, the three supported client protocols are: + + Mysql protocol to vtgate: + vtbench \ + --protocol mysql \ + --host vtgate-host.my.domain \ + --port 15306 \ + --user db_username \ + --db-credentials-file ./vtbench_db_creds.json \ + --db @replica \ + --sql "select * from loadtest_table where id=123456789" \ + --threads 10 \ + --count 10 + + GRPC to vtgate: + vtbench \ + --protocol grpc-vtgate \ + --host vtgate-host.my.domain \ + --port 15999 \ + --db @replica \ + $VTTABLET_GRPC_ARGS \ + --sql "select * from loadtest_table where id=123456789" \ + --threads 10 \ + --count 10 + + GRPC to vttablet: + vtbench \ + --protocol grpc-vttablet \ + --host tablet-loadtest-00-80.my.domain \ + --port 15999 \ + --db loadtest/00-80@replica \ + --sql "select * from loadtest_table where id=123456789" \ + --threads 10 \ + --count 10 + +*/ + +var ( + host, unixSocket, user, db, sql string + port int + protocol = "mysql" + deadline = 5 * time.Minute + threads = 2 + count = 1000 + + Main = &cobra.Command{ + Use: "vtbench", + Short: "vtbench is a simple load testing client to compare workloads in Vitess across the various client/server protocols.", + Example: `There are a number of command line options to control the behavior, +but as a basic example, the three supported client protocols are: + +Mysql protocol to vtgate: +vtbench \ + --protocol mysql \ + --host vtgate-host.my.domain \ + --port 15306 \ + --user db_username \ + --db-credentials-file ./vtbench_db_creds.json \ + --db @replica \ + --sql "select * from loadtest_table where id=123456789" \ + --threads 10 \ + --count 10 + +GRPC to vtgate: +vtbench \ + --protocol grpc-vtgate \ + --host vtgate-host.my.domain \ + --port 15999 \ + --db @replica \ + $VTTABLET_GRPC_ARGS \ + --sql "select * from loadtest_table where id=123456789" \ + --threads 10 \ + --count 10 + +GRPC to vttablet: +vtbench \ + --protocol grpc-vttablet \ + --host tablet-loadtest-00-80.my.domain \ + --port 15999 \ + --db loadtest/00-80@replica \ + --sql "select * from loadtest_table where id=123456789" \ + --threads 10 \ + --count 10`, + Args: cobra.NoArgs, + Version: servenv.AppVersion.String(), + PreRunE: servenv.CobraPreRunE, + RunE: run, + } +) + +func init() { + servenv.MoveFlagsToCobraCommand(Main) + + Main.Flags().StringVar(&host, "host", host, "VTGate host(s) in the form 'host1,host2,...'") + Main.Flags().IntVar(&port, "port", port, "VTGate port") + Main.Flags().StringVar(&unixSocket, "unix_socket", unixSocket, "VTGate unix socket") + Main.Flags().StringVar(&protocol, "protocol", protocol, "Client protocol, either mysql (default), grpc-vtgate, or grpc-vttablet") + Main.Flags().StringVar(&user, "user", user, "Username to connect using mysql (password comes from the db-credentials-file)") + Main.Flags().StringVar(&db, "db", db, "Database name to use when connecting / running the queries (e.g. @replica, keyspace, keyspace/shard etc)") + + Main.Flags().DurationVar(&deadline, "deadline", deadline, "Maximum duration for the test run (default 5 minutes)") + Main.Flags().StringVar(&sql, "sql", sql, "SQL statement to execute") + Main.Flags().IntVar(&threads, "threads", threads, "Number of parallel threads to run") + Main.Flags().IntVar(&count, "count", count, "Number of queries per thread") + + Main.MarkFlagRequired("sql") + + grpccommon.RegisterFlags(Main.Flags()) + acl.RegisterFlags(Main.Flags()) + servenv.RegisterMySQLServerFlags(Main.Flags()) +} + +func run(cmd *cobra.Command, args []string) error { + logger := logutil.NewConsoleLogger() + cmd.SetOutput(logutil.NewLoggerWriter(logger)) + _ = cmd.Flags().Set("logtostderr", "true") + + servenv.Init() + + var clientProto vtbench.ClientProtocol + switch protocol { + case "", "mysql": + clientProto = vtbench.MySQL + case "grpc-vtgate": + clientProto = vtbench.GRPCVtgate + case "grpc-vttablet": + clientProto = vtbench.GRPCVttablet + default: + return fmt.Errorf("invalid client protocol %s", protocol) + } + + if (host != "" || port != 0) && unixSocket != "" { + return errors.New("can't specify both host:port and unix_socket") + } + + if host != "" && port == 0 { + return errors.New("must specify port when using host") + } + + if host == "" && port != 0 { + return errors.New("must specify host when using port") + } + + if host == "" && port == 0 && unixSocket == "" { + return errors.New("vtbench requires either host/port or unix_socket") + } + + var password string + if clientProto == vtbench.MySQL { + var err error + _, password, err = dbconfigs.GetCredentialsServer().GetUserAndPassword(user) + if err != nil { + return fmt.Errorf("error reading password for user %v from file: %w", user, err) + } + } + + connParams := vtbench.ConnParams{ + Hosts: strings.Split(host, ","), + Port: port, + UnixSocket: unixSocket, + Protocol: clientProto, + DB: db, + Username: user, + Password: password, + } + + b := vtbench.NewBench(threads, count, connParams, sql) + + ctx, cancel := context.WithTimeout(context.Background(), deadline) + defer cancel() + + fmt.Printf("Initializing test with %s protocol / %d threads / %d iterations\n", + b.ConnParams.Protocol.String(), b.Threads, b.Count) + err := b.Run(ctx) + if err != nil { + return fmt.Errorf("error in test: %w", err) + } + + fmt.Printf("Average Rows Returned: %d\n", b.Rows.Get()/int64(b.Threads*b.Count)) + fmt.Printf("Average Query Time: %v\n", time.Duration(b.Timings.Time()/b.Timings.Count())) + fmt.Printf("Total Test Time: %v\n", b.TotalTime) + fmt.Printf("QPS (Per Thread): %v\n", float64(b.Count)/b.TotalTime.Seconds()) + fmt.Printf("QPS (Total): %v\n", float64(b.Count*b.Threads)/b.TotalTime.Seconds()) + + last := int64(0) + + histograms := b.Timings.Histograms() + h := histograms["query"] + buckets := h.Buckets() + fmt.Printf("Query Timings:\n") + for i, bucket := range h.Cutoffs() { + count := buckets[i] + if count != 0 { + fmt.Printf("%v-%v: %v\n", time.Duration(last), time.Duration(bucket), count) + } + last = bucket + } + + return nil +} diff --git a/go/cmd/vtbench/docgen/main.go b/go/cmd/vtbench/docgen/main.go new file mode 100644 index 00000000000..5efe9e899a8 --- /dev/null +++ b/go/cmd/vtbench/docgen/main.go @@ -0,0 +1,37 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/internal/docgen" + "vitess.io/vitess/go/cmd/vtbench/cli" +) + +func main() { + var dir string + cmd := cobra.Command{ + Use: "docgen [-d ]", + RunE: func(cmd *cobra.Command, args []string) error { + return docgen.GenerateMarkdownTree(cli.Main, dir) + }, + } + + cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation") + _ = cmd.Execute() +} diff --git a/go/cmd/vtbench/vtbench.go b/go/cmd/vtbench/vtbench.go index 19044aae4ed..0d8bb85b536 100644 --- a/go/cmd/vtbench/vtbench.go +++ b/go/cmd/vtbench/vtbench.go @@ -17,194 +17,15 @@ limitations under the License. package main import ( - "context" - "fmt" - "strings" - "time" - - "github.com/spf13/pflag" - - "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/cmd/vtbench/cli" "vitess.io/vitess/go/exit" - "vitess.io/vitess/go/vt/dbconfigs" - "vitess.io/vitess/go/vt/grpccommon" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/logutil" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vtbench" - - // Import and register the gRPC vtgateconn client - _ "vitess.io/vitess/go/vt/vtgate/grpcvtgateconn" - // Import and register the gRPC tabletconn client - _ "vitess.io/vitess/go/vt/vttablet/grpctabletconn" -) - -/* - - Vtbench is a simple load testing client to compare workloads in - Vitess across the various client/server protocols. - - There are a number of command line options to control the behavior, - but as a basic example, the three supported client protocols are: - - Mysql protocol to vtgate: - vtbench \ - --protocol mysql \ - --host vtgate-host.my.domain \ - --port 15306 \ - --user db_username \ - --db-credentials-file ./vtbench_db_creds.json \ - --db @replica \ - --sql "select * from loadtest_table where id=123456789" \ - --threads 10 \ - --count 10 - - GRPC to vtgate: - vtbench \ - --protocol grpc-vtgate \ - --host vtgate-host.my.domain \ - --port 15999 \ - --db @replica \ - $VTTABLET_GRPC_ARGS \ - --sql "select * from loadtest_table where id=123456789" \ - --threads 10 \ - --count 10 - - GRPC to vttablet: - vtbench \ - --protocol grpc-vttablet \ - --host tablet-loadtest-00-80.my.domain \ - --port 15999 \ - --db loadtest/00-80@replica \ - --sql "select * from loadtest_table where id=123456789" \ - --threads 10 \ - --count 10 - -*/ - -var ( - host, unixSocket, user, db, sql string - port int - protocol = "mysql" - deadline = 5 * time.Minute - threads = 2 - count = 1000 ) -func initFlags(fs *pflag.FlagSet) { - fs.StringVar(&host, "host", host, "VTGate host(s) in the form 'host1,host2,...'") - fs.IntVar(&port, "port", port, "VTGate port") - fs.StringVar(&unixSocket, "unix_socket", unixSocket, "VTGate unix socket") - fs.StringVar(&protocol, "protocol", protocol, "Client protocol, either mysql (default), grpc-vtgate, or grpc-vttablet") - fs.StringVar(&user, "user", user, "Username to connect using mysql (password comes from the db-credentials-file)") - fs.StringVar(&db, "db", db, "Database name to use when connecting / running the queries (e.g. @replica, keyspace, keyspace/shard etc)") - - fs.DurationVar(&deadline, "deadline", deadline, "Maximum duration for the test run (default 5 minutes)") - fs.StringVar(&sql, "sql", sql, "SQL statement to execute") - fs.IntVar(&threads, "threads", threads, "Number of parallel threads to run") - fs.IntVar(&count, "count", count, "Number of queries per thread") - - grpccommon.RegisterFlags(fs) - log.RegisterFlags(fs) - logutil.RegisterFlags(fs) - acl.RegisterFlags(fs) - servenv.RegisterMySQLServerFlags(fs) -} - func main() { - servenv.OnParseFor("vtbench", func(fs *pflag.FlagSet) { - logger := logutil.NewConsoleLogger() - fs.SetOutput(logutil.NewLoggerWriter(logger)) - - initFlags(fs) - _ = fs.Set("logtostderr", "true") - }) - - servenv.ParseFlags("vtbench") - servenv.Init() - defer exit.Recover() - clientProto := vtbench.MySQL - switch protocol { - case "", "mysql": - clientProto = vtbench.MySQL - case "grpc-vtgate": - clientProto = vtbench.GRPCVtgate - case "grpc-vttablet": - clientProto = vtbench.GRPCVttablet - default: - log.Exitf("invalid client protocol %s", protocol) - } - - if (host != "" || port != 0) && unixSocket != "" { - log.Exitf("can't specify both host:port and unix_socket") - } - - if host != "" && port == 0 { - log.Exitf("must specify port when using host") - } - - if host == "" && port != 0 { - log.Exitf("must specify host when using port") - } - - if host == "" && port == 0 && unixSocket == "" { - log.Exitf("vtbench requires either host/port or unix_socket") - } - - if sql == "" { - log.Exitf("must specify sql") - } - - var password string - if clientProto == vtbench.MySQL { - var err error - _, password, err = dbconfigs.GetCredentialsServer().GetUserAndPassword(user) - if err != nil { - log.Exitf("error reading password for user %v from file: %v", user, err) - } - } - - connParams := vtbench.ConnParams{ - Hosts: strings.Split(host, ","), - Port: port, - UnixSocket: unixSocket, - Protocol: clientProto, - DB: db, - Username: user, - Password: password, - } - - b := vtbench.NewBench(threads, count, connParams, sql) - - ctx, cancel := context.WithTimeout(context.Background(), deadline) - defer cancel() - - fmt.Printf("Initializing test with %s protocol / %d threads / %d iterations\n", - b.ConnParams.Protocol.String(), b.Threads, b.Count) - err := b.Run(ctx) - if err != nil { - log.Exitf("error in test: %v", err) - } - - fmt.Printf("Average Rows Returned: %d\n", b.Rows.Get()/int64(b.Threads*b.Count)) - fmt.Printf("Average Query Time: %v\n", time.Duration(b.Timings.Time()/b.Timings.Count())) - fmt.Printf("Total Test Time: %v\n", b.TotalTime) - fmt.Printf("QPS (Per Thread): %v\n", float64(b.Count)/b.TotalTime.Seconds()) - fmt.Printf("QPS (Total): %v\n", float64(b.Count*b.Threads)/b.TotalTime.Seconds()) - - last := int64(0) - - histograms := b.Timings.Histograms() - h := histograms["query"] - buckets := h.Buckets() - fmt.Printf("Query Timings:\n") - for i, bucket := range h.Cutoffs() { - count := buckets[i] - if count != 0 { - fmt.Printf("%v-%v: %v\n", time.Duration(last), time.Duration(bucket), count) - } - last = bucket + if err := cli.Main.Execute(); err != nil { + log.Exit(err) } } diff --git a/go/cmd/vtclient/plugin_opentracing.go b/go/cmd/vtclient/cli/plugin_opentracing.go similarity index 98% rename from go/cmd/vtclient/plugin_opentracing.go rename to go/cmd/vtclient/cli/plugin_opentracing.go index b48334531a3..a3466ca8c73 100644 --- a/go/cmd/vtclient/plugin_opentracing.go +++ b/go/cmd/vtclient/cli/plugin_opentracing.go @@ -14,11 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( "vitess.io/vitess/go/trace" - "vitess.io/vitess/go/vt/servenv" ) diff --git a/go/cmd/vtclient/cli/vtclient.go b/go/cmd/vtclient/cli/vtclient.go new file mode 100644 index 00000000000..949af851ab4 --- /dev/null +++ b/go/cmd/vtclient/cli/vtclient.go @@ -0,0 +1,431 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "io" + "math/rand" + "os" + "sort" + "sync" + "time" + + "github.com/olekukonko/tablewriter" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/vt/concurrency" + "vitess.io/vitess/go/vt/grpccommon" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vitessdriver" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/vtgateconn" + + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + // Include deprecation warnings for soon-to-be-unsupported flag invocations. +) + +var ( + server string + streaming bool + targetString string + jsonOutput bool + useRandom bool + bindVariables *bindvars + + timeout = 30 * time.Second + parallel = 1 + count = 1 + minSeqID int + maxSeqID int + qps int + + Main = &cobra.Command{ + Use: "vtclient ", + Short: "vtclient connects to a vtgate server using the standard go driver API.", + Long: `vtclient connects to a vtgate server using the standard go driver API. + +For query bound variables, we assume place-holders in the query string +in the form of :v1, :v2, etc.`, + Example: `vtclient --server vtgate:15991 "SELECT * FROM messages" + +vtclient --server vtgate:15991 --target '@primary' --bind_variables '[ 12345, 1, "msg 12345" ]' "INSERT INTO messages (page,time_created_ns,message) VALUES (:v1, :v2, :v3)"`, + Args: cobra.ExactArgs(1), + Version: servenv.AppVersion.String(), + RunE: run, + } +) + +var ( + seqChan = make(chan int, 10) +) + +func init() { + servenv.MoveFlagsToCobraCommand(Main) + + Main.Flags().StringVar(&server, "server", server, "vtgate server to connect to") + Main.Flags().DurationVar(&timeout, "timeout", timeout, "timeout for queries") + Main.Flags().BoolVar(&streaming, "streaming", streaming, "use a streaming query") + Main.Flags().StringVar(&targetString, "target", targetString, "keyspace:shard@tablet_type") + Main.Flags().BoolVar(&jsonOutput, "json", jsonOutput, "Output JSON instead of human-readable table") + Main.Flags().IntVar(¶llel, "parallel", parallel, "DMLs only: Number of threads executing the same query in parallel. Useful for simple load testing.") + Main.Flags().IntVar(&count, "count", count, "DMLs only: Number of times each thread executes the query. Useful for simple, sustained load testing.") + Main.Flags().IntVar(&minSeqID, "min_sequence_id", minSeqID, "min sequence ID to generate. When max_sequence_id > min_sequence_id, for each query, a number is generated in [min_sequence_id, max_sequence_id) and attached to the end of the bind variables.") + Main.Flags().IntVar(&maxSeqID, "max_sequence_id", maxSeqID, "max sequence ID.") + Main.Flags().BoolVar(&useRandom, "use_random_sequence", useRandom, "use random sequence for generating [min_sequence_id, max_sequence_id)") + Main.Flags().IntVar(&qps, "qps", qps, "queries per second to throttle each thread at.") + + acl.RegisterFlags(Main.Flags()) + grpccommon.RegisterFlags(Main.Flags()) + servenv.RegisterMySQLServerFlags(Main.Flags()) + + bindVariables = newBindvars(Main.Flags(), "bind_variables", "bind variables as a json list") +} + +type bindvars []any + +func (bv *bindvars) String() string { + b, err := json.Marshal(bv) + if err != nil { + return err.Error() + } + return string(b) +} + +func (bv *bindvars) Set(s string) (err error) { + err = json.Unmarshal([]byte(s), &bv) + if err != nil { + return err + } + // json reads all numbers as float64 + // So, we just ditch floats for bindvars + for i, v := range *bv { + if f, ok := v.(float64); ok { + if f > 0 { + (*bv)[i] = uint64(f) + } else { + (*bv)[i] = int64(f) + } + } + } + + return nil +} + +// For internal flag compatibility +func (bv *bindvars) Get() any { + return bv +} + +// Type is part of the pflag.Value interface. bindvars.Set() expects all numbers as float64. +func (bv *bindvars) Type() string { + return "float64" +} + +func newBindvars(fs *pflag.FlagSet, name, usage string) *bindvars { + var bv bindvars + fs.Var(&bv, name, usage) + return &bv +} + +func run(cmd *cobra.Command, args []string) error { + defer logutil.Flush() + + qr, err := _run(cmd, args) + if jsonOutput && qr != nil { + data, err := json.MarshalIndent(qr, "", " ") + if err != nil { + return fmt.Errorf("cannot marshal data: %w", err) + } + fmt.Fprint(cmd.OutOrStdout(), string(data)) + return nil + } + + qr.print(cmd.OutOrStdout()) + return err +} + +func _run(cmd *cobra.Command, args []string) (*results, error) { + logutil.PurgeLogs() + + if maxSeqID > minSeqID { + go func() { + if useRandom { + for { + seqChan <- rand.Intn(maxSeqID-minSeqID) + minSeqID + } + } else { + for i := minSeqID; i < maxSeqID; i++ { + seqChan <- i + } + } + }() + } + + c := vitessdriver.Configuration{ + Protocol: vtgateconn.GetVTGateProtocol(), + Address: server, + Target: targetString, + Streaming: streaming, + } + db, err := vitessdriver.OpenWithConfiguration(c) + if err != nil { + return nil, fmt.Errorf("client error: %w", err) + } + + log.Infof("Sending the query...") + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + return execMulti(ctx, db, cmd.Flags().Arg(0)) +} + +func prepareBindVariables() []any { + bv := make([]any, 0, len(*bindVariables)+1) + bv = append(bv, (*bindVariables)...) + if maxSeqID > minSeqID { + bv = append(bv, <-seqChan) + } + return bv +} + +func execMulti(ctx context.Context, db *sql.DB, sql string) (*results, error) { + all := newResults() + ec := concurrency.FirstErrorRecorder{} + wg := sync.WaitGroup{} + isDML := sqlparser.IsDML(sql) + + isThrottled := qps > 0 + + start := time.Now() + for i := 0; i < parallel; i++ { + wg.Add(1) + + go func() { + defer wg.Done() + + var ticker *time.Ticker + if isThrottled { + tickDuration := time.Second / time.Duration(qps) + ticker = time.NewTicker(tickDuration) + } + + for j := 0; j < count; j++ { + var qr *results + var err error + if isDML { + qr, err = execDml(ctx, db, sql) + } else { + qr, err = execNonDml(ctx, db, sql) + } + if count == 1 && parallel == 1 { + all = qr + } else { + all.merge(qr) + if err != nil { + all.recordError(err) + } + } + if err != nil { + ec.RecordError(err) + // We keep going and do not return early purpose. + } + + if ticker != nil { + <-ticker.C + } + } + }() + } + wg.Wait() + if all != nil { + all.duration = time.Since(start) + } + + return all, ec.Error() +} + +func execDml(ctx context.Context, db *sql.DB, sql string) (*results, error) { + start := time.Now() + tx, err := db.Begin() + if err != nil { + return nil, vterrors.Wrap(err, "BEGIN failed") + } + + result, err := tx.ExecContext(ctx, sql, []any(prepareBindVariables())...) + if err != nil { + return nil, vterrors.Wrap(err, "failed to execute DML") + } + + err = tx.Commit() + if err != nil { + return nil, vterrors.Wrap(err, "COMMIT failed") + } + + rowsAffected, _ := result.RowsAffected() + lastInsertID, _ := result.LastInsertId() + return &results{ + rowsAffected: rowsAffected, + lastInsertID: lastInsertID, + duration: time.Since(start), + }, nil +} + +func execNonDml(ctx context.Context, db *sql.DB, sql string) (*results, error) { + start := time.Now() + rows, err := db.QueryContext(ctx, sql, []any(prepareBindVariables())...) + if err != nil { + return nil, vterrors.Wrap(err, "client error") + } + defer rows.Close() + + // get the headers + var qr results + cols, err := rows.Columns() + if err != nil { + return nil, vterrors.Wrap(err, "client error") + } + qr.Fields = cols + + // get the rows + for rows.Next() { + row := make([]any, len(cols)) + for i := range row { + var col string + row[i] = &col + } + if err := rows.Scan(row...); err != nil { + return nil, vterrors.Wrap(err, "client error") + } + + // unpack []*string into []string + vals := make([]string, 0, len(row)) + for _, value := range row { + vals = append(vals, *(value.(*string))) + } + qr.Rows = append(qr.Rows, vals) + } + qr.rowsAffected = int64(len(qr.Rows)) + + if err := rows.Err(); err != nil { + return nil, vterrors.Wrap(err, "Vitess returned an error") + } + + qr.duration = time.Since(start) + return &qr, nil +} + +type results struct { + mu sync.Mutex + Fields []string `json:"fields"` + Rows [][]string `json:"rows"` + rowsAffected int64 + lastInsertID int64 + duration time.Duration + cumulativeDuration time.Duration + + // Multi DML mode: Track total error count, error count per code and the first error. + totalErrorCount int + errorCount map[vtrpcpb.Code]int + firstError map[vtrpcpb.Code]error +} + +func newResults() *results { + return &results{ + errorCount: make(map[vtrpcpb.Code]int), + firstError: make(map[vtrpcpb.Code]error), + } +} + +// merge aggregates "other" into "r". +// This is only used for executing DMLs concurrently and repeatedly. +// Therefore, "Fields" and "Rows" are not merged. +func (r *results) merge(other *results) { + if other == nil { + return + } + + r.mu.Lock() + defer r.mu.Unlock() + + r.rowsAffected += other.rowsAffected + if other.lastInsertID > r.lastInsertID { + r.lastInsertID = other.lastInsertID + } + r.cumulativeDuration += other.duration +} + +func (r *results) recordError(err error) { + r.mu.Lock() + defer r.mu.Unlock() + + r.totalErrorCount++ + code := vterrors.Code(err) + r.errorCount[code]++ + + if r.errorCount[code] == 1 { + r.firstError[code] = err + } +} + +func (r *results) print(w io.Writer) { + if r == nil { + return + } + + table := tablewriter.NewWriter(os.Stdout) + table.SetHeader(r.Fields) + table.SetAutoFormatHeaders(false) + table.AppendBulk(r.Rows) + table.Render() + fmt.Fprintf(w, "%v row(s) affected (%v, cum: %v)\n", r.rowsAffected, r.duration, r.cumulativeDuration) + if r.lastInsertID != 0 { + fmt.Fprintf(w, "Last insert ID: %v\n", r.lastInsertID) + } + + if r.totalErrorCount == 0 { + return + } + + fmt.Printf("%d error(s) were returned. Number of errors by error code:\n\n", r.totalErrorCount) + // Sort different error codes by count (descending). + type errorCounts struct { + code vtrpcpb.Code + count int + } + var counts []errorCounts + for code, count := range r.errorCount { + counts = append(counts, errorCounts{code, count}) + } + sort.Slice(counts, func(i, j int) bool { return counts[i].count >= counts[j].count }) + for _, c := range counts { + fmt.Fprintf(w, "%- 30v= % 5d\n", c.code, c.count) + } + + fmt.Fprintf(w, "\nFirst error per code:\n\n") + for code, err := range r.firstError { + fmt.Fprintf(w, "Code: %v\nError: %v\n\n", code, err) + } +} diff --git a/go/cmd/vtclient/vtclient_test.go b/go/cmd/vtclient/cli/vtclient_test.go similarity index 90% rename from go/cmd/vtclient/vtclient_test.go rename to go/cmd/vtclient/cli/vtclient_test.go index 4711b1e0127..a5ee571cd0b 100644 --- a/go/cmd/vtclient/vtclient_test.go +++ b/go/cmd/vtclient/cli/vtclient_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( "fmt" @@ -22,7 +22,7 @@ import ( "strings" "testing" - "github.com/spf13/pflag" + "github.com/stretchr/testify/require" "vitess.io/vitess/go/vt/vttest" @@ -120,15 +120,16 @@ func TestVtclient(t *testing.T) { }, } - // Change ErrorHandling from ExitOnError to panicking. - pflag.CommandLine.Init("vtclient_test.go", pflag.PanicOnError) for _, q := range queries { // Run main function directly and not as external process. To achieve this, // overwrite os.Args which is used by pflag.Parse(). - os.Args = []string{"vtclient_test.go", "--server", vtgateAddr} - os.Args = append(os.Args, q.args...) + args := []string{"--server", vtgateAddr} + args = append(args, q.args...) - results, err := run() + err := Main.ParseFlags(args) + require.NoError(t, err) + + results, err := _run(Main, args) if q.errMsg != "" { if got, want := err.Error(), q.errMsg; !strings.Contains(got, want) { t.Fatalf("vtclient %v returned wrong error: got = %v, want contains = %v", os.Args[1:], got, want) @@ -137,7 +138,7 @@ func TestVtclient(t *testing.T) { } if err != nil { - t.Fatalf("vtclient %v failed: %v", os.Args[1:], err) + t.Fatalf("vtclient %v failed: %v", args[1:], err) } if got, want := results.rowsAffected, q.rowsAffected; got != want { t.Fatalf("wrong rows affected for query: %v got = %v, want = %v", os.Args[1:], got, want) diff --git a/go/cmd/vtclient/docgen/main.go b/go/cmd/vtclient/docgen/main.go new file mode 100644 index 00000000000..b740cbd67a7 --- /dev/null +++ b/go/cmd/vtclient/docgen/main.go @@ -0,0 +1,37 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/internal/docgen" + "vitess.io/vitess/go/cmd/vtclient/cli" +) + +func main() { + var dir string + cmd := cobra.Command{ + Use: "docgen [-d ]", + RunE: func(cmd *cobra.Command, args []string) error { + return docgen.GenerateMarkdownTree(cli.Main, dir) + }, + } + + cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation") + _ = cmd.Execute() +} diff --git a/go/cmd/vtclient/vtclient.go b/go/cmd/vtclient/vtclient.go index adc060d7737..4201d25c882 100644 --- a/go/cmd/vtclient/vtclient.go +++ b/go/cmd/vtclient/vtclient.go @@ -17,440 +17,12 @@ limitations under the License. package main import ( - "context" - "database/sql" - "encoding/json" - "errors" - "flag" - "fmt" - "io" - "math/rand" - "os" - "sort" - "sync" - "time" - - "github.com/olekukonko/tablewriter" - "github.com/spf13/pflag" - - "vitess.io/vitess/go/acl" - "vitess.io/vitess/go/vt/concurrency" - "vitess.io/vitess/go/vt/grpccommon" + "vitess.io/vitess/go/cmd/vtclient/cli" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/logutil" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vitessdriver" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/vtgateconn" - - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - - // Include deprecation warnings for soon-to-be-unsupported flag invocations. - _flag "vitess.io/vitess/go/internal/flag" ) -var ( - usage = ` -vtclient connects to a vtgate server using the standard go driver API. -Version 3 of the API is used, we do not send any hint to the server. - -For query bound variables, we assume place-holders in the query string -in the form of :v1, :v2, etc. - -Examples: - - $ vtclient --server vtgate:15991 "SELECT * FROM messages" - - $ vtclient --server vtgate:15991 --target '@primary' --bind_variables '[ 12345, 1, "msg 12345" ]' "INSERT INTO messages (page,time_created_ns,message) VALUES (:v1, :v2, :v3)" - -` - server string - streaming bool - targetString string - jsonOutput bool - useRandom bool - bindVariables *bindvars - - timeout = 30 * time.Second - parallel = 1 - count = 1 - minSeqID = 0 - maxSeqID = 0 - qps = 0 -) - -var ( - seqChan = make(chan int, 10) -) - -func init() { - _flag.SetUsage(flag.CommandLine, _flag.UsageOptions{ - Epilogue: func(w io.Writer) { fmt.Fprint(w, usage) }, - }) -} - -func registerFlags(fs *pflag.FlagSet) { - fs.StringVar(&server, "server", server, "vtgate server to connect to") - fs.DurationVar(&timeout, "timeout", timeout, "timeout for queries") - fs.BoolVar(&streaming, "streaming", streaming, "use a streaming query") - fs.StringVar(&targetString, "target", targetString, "keyspace:shard@tablet_type") - fs.BoolVar(&jsonOutput, "json", jsonOutput, "Output JSON instead of human-readable table") - fs.IntVar(¶llel, "parallel", parallel, "DMLs only: Number of threads executing the same query in parallel. Useful for simple load testing.") - fs.IntVar(&count, "count", count, "DMLs only: Number of times each thread executes the query. Useful for simple, sustained load testing.") - fs.IntVar(&minSeqID, "min_sequence_id", minSeqID, "min sequence ID to generate. When max_sequence_id > min_sequence_id, for each query, a number is generated in [min_sequence_id, max_sequence_id) and attached to the end of the bind variables.") - fs.IntVar(&maxSeqID, "max_sequence_id", maxSeqID, "max sequence ID.") - fs.BoolVar(&useRandom, "use_random_sequence", useRandom, "use random sequence for generating [min_sequence_id, max_sequence_id)") - fs.IntVar(&qps, "qps", qps, "queries per second to throttle each thread at.") - - acl.RegisterFlags(fs) - - bindVariables = newBindvars(fs, "bind_variables", "bind variables as a json list") -} - -type bindvars []any - -func (bv *bindvars) String() string { - b, err := json.Marshal(bv) - if err != nil { - return err.Error() - } - return string(b) -} - -func (bv *bindvars) Set(s string) (err error) { - err = json.Unmarshal([]byte(s), &bv) - if err != nil { - return err - } - // json reads all numbers as float64 - // So, we just ditch floats for bindvars - for i, v := range *bv { - if f, ok := v.(float64); ok { - if f > 0 { - (*bv)[i] = uint64(f) - } else { - (*bv)[i] = int64(f) - } - } - } - - return nil -} - -// For internal flag compatibility -func (bv *bindvars) Get() any { - return bv -} - -// Type is part of the pflag.Value interface. bindvars.Set() expects all numbers as float64. -func (bv *bindvars) Type() string { - return "float64" -} - -func newBindvars(fs *pflag.FlagSet, name, usage string) *bindvars { - var bv bindvars - fs.Var(&bv, name, usage) - return &bv -} - func main() { - defer logutil.Flush() - - qr, err := run() - if jsonOutput && qr != nil { - data, err := json.MarshalIndent(qr, "", " ") - if err != nil { - log.Exitf("cannot marshal data: %v", err) - } - fmt.Print(string(data)) - return - } - - qr.print() - - if err != nil { + if err := cli.Main.Execute(); err != nil { log.Exit(err) } } - -func run() (*results, error) { - fs := pflag.NewFlagSet("vtclient", pflag.ExitOnError) - grpccommon.RegisterFlags(fs) - log.RegisterFlags(fs) - logutil.RegisterFlags(fs) - servenv.RegisterMySQLServerFlags(fs) - registerFlags(fs) - _flag.Parse(fs) - args := _flag.Args() - - logutil.PurgeLogs() - - if len(args) == 0 { - pflag.Usage() - return nil, errors.New("no arguments provided. See usage above") - } - if len(args) > 1 { - return nil, errors.New("no additional arguments after the query allowed") - } - - if maxSeqID > minSeqID { - go func() { - if useRandom { - rand.Seed(time.Now().UnixNano()) - for { - seqChan <- rand.Intn(maxSeqID-minSeqID) + minSeqID - } - } else { - for i := minSeqID; i < maxSeqID; i++ { - seqChan <- i - } - } - }() - } - - c := vitessdriver.Configuration{ - Protocol: vtgateconn.GetVTGateProtocol(), - Address: server, - Target: targetString, - Streaming: streaming, - } - db, err := vitessdriver.OpenWithConfiguration(c) - if err != nil { - return nil, fmt.Errorf("client error: %v", err) - } - - log.Infof("Sending the query...") - - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - return execMulti(ctx, db, args[0]) -} - -func prepareBindVariables() []any { - bv := make([]any, 0, len(*bindVariables)+1) - bv = append(bv, (*bindVariables)...) - if maxSeqID > minSeqID { - bv = append(bv, <-seqChan) - } - return bv -} - -func execMulti(ctx context.Context, db *sql.DB, sql string) (*results, error) { - all := newResults() - ec := concurrency.FirstErrorRecorder{} - wg := sync.WaitGroup{} - isDML := sqlparser.IsDML(sql) - - isThrottled := qps > 0 - - start := time.Now() - for i := 0; i < parallel; i++ { - wg.Add(1) - - go func() { - defer wg.Done() - - var ticker *time.Ticker - if isThrottled { - tickDuration := time.Second / time.Duration(qps) - ticker = time.NewTicker(tickDuration) - } - - for j := 0; j < count; j++ { - var qr *results - var err error - if isDML { - qr, err = execDml(ctx, db, sql) - } else { - qr, err = execNonDml(ctx, db, sql) - } - if count == 1 && parallel == 1 { - all = qr - } else { - all.merge(qr) - if err != nil { - all.recordError(err) - } - } - if err != nil { - ec.RecordError(err) - // We keep going and do not return early purpose. - } - - if ticker != nil { - <-ticker.C - } - } - }() - } - wg.Wait() - if all != nil { - all.duration = time.Since(start) - } - - return all, ec.Error() -} - -func execDml(ctx context.Context, db *sql.DB, sql string) (*results, error) { - start := time.Now() - tx, err := db.Begin() - if err != nil { - return nil, vterrors.Wrap(err, "BEGIN failed") - } - - result, err := tx.ExecContext(ctx, sql, []any(prepareBindVariables())...) - if err != nil { - return nil, vterrors.Wrap(err, "failed to execute DML") - } - - err = tx.Commit() - if err != nil { - return nil, vterrors.Wrap(err, "COMMIT failed") - } - - rowsAffected, _ := result.RowsAffected() - lastInsertID, _ := result.LastInsertId() - return &results{ - rowsAffected: rowsAffected, - lastInsertID: lastInsertID, - duration: time.Since(start), - }, nil -} - -func execNonDml(ctx context.Context, db *sql.DB, sql string) (*results, error) { - start := time.Now() - rows, err := db.QueryContext(ctx, sql, []any(prepareBindVariables())...) - if err != nil { - return nil, vterrors.Wrap(err, "client error") - } - defer rows.Close() - - // get the headers - var qr results - cols, err := rows.Columns() - if err != nil { - return nil, vterrors.Wrap(err, "client error") - } - qr.Fields = cols - - // get the rows - for rows.Next() { - row := make([]any, len(cols)) - for i := range row { - var col string - row[i] = &col - } - if err := rows.Scan(row...); err != nil { - return nil, vterrors.Wrap(err, "client error") - } - - // unpack []*string into []string - vals := make([]string, 0, len(row)) - for _, value := range row { - vals = append(vals, *(value.(*string))) - } - qr.Rows = append(qr.Rows, vals) - } - qr.rowsAffected = int64(len(qr.Rows)) - - if err := rows.Err(); err != nil { - return nil, vterrors.Wrap(err, "Vitess returned an error") - } - - qr.duration = time.Since(start) - return &qr, nil -} - -type results struct { - mu sync.Mutex - Fields []string `json:"fields"` - Rows [][]string `json:"rows"` - rowsAffected int64 - lastInsertID int64 - duration time.Duration - cumulativeDuration time.Duration - - // Multi DML mode: Track total error count, error count per code and the first error. - totalErrorCount int - errorCount map[vtrpcpb.Code]int - firstError map[vtrpcpb.Code]error -} - -func newResults() *results { - return &results{ - errorCount: make(map[vtrpcpb.Code]int), - firstError: make(map[vtrpcpb.Code]error), - } -} - -// merge aggregates "other" into "r". -// This is only used for executing DMLs concurrently and repeatedly. -// Therefore, "Fields" and "Rows" are not merged. -func (r *results) merge(other *results) { - if other == nil { - return - } - - r.mu.Lock() - defer r.mu.Unlock() - - r.rowsAffected += other.rowsAffected - if other.lastInsertID > r.lastInsertID { - r.lastInsertID = other.lastInsertID - } - r.cumulativeDuration += other.duration -} - -func (r *results) recordError(err error) { - r.mu.Lock() - defer r.mu.Unlock() - - r.totalErrorCount++ - code := vterrors.Code(err) - r.errorCount[code]++ - - if r.errorCount[code] == 1 { - r.firstError[code] = err - } -} - -func (r *results) print() { - if r == nil { - return - } - - table := tablewriter.NewWriter(os.Stdout) - table.SetHeader(r.Fields) - table.SetAutoFormatHeaders(false) - table.AppendBulk(r.Rows) - table.Render() - fmt.Printf("%v row(s) affected (%v, cum: %v)\n", r.rowsAffected, r.duration, r.cumulativeDuration) - if r.lastInsertID != 0 { - fmt.Printf("Last insert ID: %v\n", r.lastInsertID) - } - - if r.totalErrorCount == 0 { - return - } - - fmt.Printf("%d error(s) were returned. Number of errors by error code:\n\n", r.totalErrorCount) - // Sort different error codes by count (descending). - type errorCounts struct { - code vtrpcpb.Code - count int - } - var counts []errorCounts - for code, count := range r.errorCount { - counts = append(counts, errorCounts{code, count}) - } - sort.Slice(counts, func(i, j int) bool { return counts[i].count >= counts[j].count }) - for _, c := range counts { - fmt.Printf("%- 30v= % 5d\n", c.code, c.count) - } - - fmt.Printf("\nFirst error per code:\n\n") - for code, err := range r.firstError { - fmt.Printf("Code: %v\nError: %v\n\n", code, err) - } -} diff --git a/go/cmd/vtcombo/cli/main.go b/go/cmd/vtcombo/cli/main.go new file mode 100644 index 00000000000..bfc0ad894fe --- /dev/null +++ b/go/cmd/vtcombo/cli/main.go @@ -0,0 +1,358 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// vtcombo: a single binary that contains: +// - a ZK topology server based on an in-memory map. +// - one vtgate instance. +// - many vttablet instances. +// - a vtctld instance so it's easy to see the topology. +package cli + +import ( + "context" + "fmt" + "os" + "strings" + "time" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/srvtopo" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vtcombo" + "vitess.io/vitess/go/vt/vtctld" + "vitess.io/vitess/go/vt/vtgate" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" + "vitess.io/vitess/go/vt/vttest" + "vitess.io/vitess/go/vt/wrangler" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vttestpb "vitess.io/vitess/go/vt/proto/vttest" +) + +var ( + Main = &cobra.Command{ + Use: "vtcombo", + Short: "vtcombo is a single binary containing several vitess components.", + Long: `vtcombo is a single binary containing several vitess components. + +In particular, it contains: +- A topology server based on an in-memory map. +- One vtgate instance. +- Many vttablet instances. +- A vtctld instance so it's easy to see the topology.`, + Args: cobra.NoArgs, + Version: servenv.AppVersion.String(), + PreRunE: servenv.CobraPreRunE, + RunE: run, + } + schemaDir string + startMysql bool + mysqlPort = 3306 + externalTopoServer bool + plannerName string + vschemaPersistenceDir string + + tpb vttestpb.VTTestTopology + ts *topo.Server + resilientServer *srvtopo.ResilientServer +) + +func init() { + servenv.RegisterDefaultFlags() + servenv.RegisterFlags() + servenv.RegisterGRPCServerFlags() + servenv.RegisterGRPCServerAuthFlags() + servenv.RegisterServiceMapFlag() + + dbconfigs.RegisterFlags(dbconfigs.All...) + mysqlctl.RegisterFlags() + + servenv.MoveFlagsToCobraCommand(Main) + + acl.RegisterFlags(Main.Flags()) + + Main.Flags().StringVar(&schemaDir, "schema_dir", schemaDir, "Schema base directory. Should contain one directory per keyspace, with a vschema.json file if necessary.") + Main.Flags().BoolVar(&startMysql, "start_mysql", startMysql, "Should vtcombo also start mysql") + Main.Flags().IntVar(&mysqlPort, "mysql_port", mysqlPort, "mysql port") + Main.Flags().BoolVar(&externalTopoServer, "external_topo_server", externalTopoServer, "Should vtcombo use an external topology server instead of starting its own in-memory topology server. "+ + "If true, vtcombo will use the flags defined in topo/server.go to open topo server") + Main.Flags().StringVar(&plannerName, "planner-version", plannerName, "Sets the default planner to use when the session has not changed it. Valid values are: Gen4, Gen4Greedy, Gen4Left2Right") + Main.Flags().StringVar(&vschemaPersistenceDir, "vschema-persistence-dir", vschemaPersistenceDir, "If set, per-keyspace vschema will be persisted in this directory "+ + "and reloaded into the in-memory topology server across restarts. Bookkeeping is performed using a simple watcher goroutine. "+ + "This is useful when running vtcombo as an application development container (e.g. vttestserver) where you want to keep the same "+ + "vschema even if developer's machine reboots. This works in tandem with vttestserver's --persistent_mode flag. Needless to say, "+ + "this is neither a perfect nor a production solution for vschema persistence. Consider using the --external_topo_server flag if "+ + "you require a more complete solution. This flag is ignored if --external_topo_server is set.") + + Main.Flags().Var(vttest.TextTopoData(&tpb), "proto_topo", "vttest proto definition of the topology, encoded in compact text format. See vttest.proto for more information.") + Main.Flags().Var(vttest.JSONTopoData(&tpb), "json_topo", "vttest proto definition of the topology, encoded in json format. See vttest.proto for more information.") + + // We're going to force the value later, so don't even bother letting the + // user know about this flag. + Main.Flags().MarkHidden("tablet_protocol") +} + +func startMysqld(uid uint32) (mysqld *mysqlctl.Mysqld, cnf *mysqlctl.Mycnf, err error) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + mycnfFile := mysqlctl.MycnfFile(uid) + + if _, statErr := os.Stat(mycnfFile); os.IsNotExist(statErr) { + mysqld, cnf, err = mysqlctl.CreateMysqldAndMycnf(uid, "", mysqlPort) + if err != nil { + return nil, nil, fmt.Errorf("failed to initialize mysql config :%w", err) + } + if err := mysqld.Init(ctx, cnf, ""); err != nil { + return nil, nil, fmt.Errorf("failed to initialize mysql :%w", err) + } + } else { + mysqld, cnf, err = mysqlctl.OpenMysqldAndMycnf(uid) + if err != nil { + return nil, nil, fmt.Errorf("failed to find mysql config: %w", err) + } + err = mysqld.RefreshConfig(ctx, cnf) + if err != nil { + return nil, nil, fmt.Errorf("failed to refresh config: %w", err) + } + if err := mysqld.Start(ctx, cnf); err != nil { + return nil, nil, fmt.Errorf("Failed to start mysqld: %w", err) + } + } + + return mysqld, cnf, nil +} + +func run(cmd *cobra.Command, args []string) (err error) { + // Stash away a copy of the topology that vtcombo was started with. + // + // We will use this to determine the shard structure when keyspaces + // get recreated. + originalTopology := (&tpb).CloneVT() + + // default cell to "test" if unspecified + if len(tpb.Cells) == 0 { + tpb.Cells = append(tpb.Cells, "test") + } + + cmd.Flags().Set("cells_to_watch", strings.Join(tpb.Cells, ",")) + + // vtctld UI requires the cell flag + cmd.Flags().Set("cell", tpb.Cells[0]) + if cmd.Flags().Lookup("log_dir") == nil { + cmd.Flags().Set("log_dir", "$VTDATAROOT/tmp") + } + + if externalTopoServer { + // Open topo server based on the command line flags defined at topo/server.go + // do not create cell info as it should be done by whoever sets up the external topo server + ts = topo.Open() + } else { + // Create topo server. We use a 'memorytopo' implementation. + ts = memorytopo.NewServer(context.Background(), tpb.Cells...) + } + + // attempt to load any routing rules specified by tpb + if err := vtcombo.InitRoutingRules(context.Background(), ts, tpb.GetRoutingRules()); err != nil { + return fmt.Errorf("Failed to load routing rules: %w", err) + } + + servenv.Init() + tabletenv.Init() + + var ( + mysqld = &vtcomboMysqld{} + cnf *mysqlctl.Mycnf + ) + + if startMysql { + mysqld.Mysqld, cnf, err = startMysqld(1) + if err != nil { + return err + } + servenv.OnClose(func() { + mysqld.Shutdown(context.TODO(), cnf, true) + }) + // We want to ensure we can write to this database + mysqld.SetReadOnly(false) + + } else { + dbconfigs.GlobalDBConfigs.InitWithSocket("") + mysqld.Mysqld = mysqlctl.NewMysqld(&dbconfigs.GlobalDBConfigs) + servenv.OnClose(mysqld.Close) + } + + // Tablet configuration and init. + // Send mycnf as nil because vtcombo won't do backups and restores. + // + // Also force the `--tablet_manager_protocol` and `--tablet_protocol` flags + // to be the "internal" protocol that InitTabletMap registers. + cmd.Flags().Set("tablet_manager_protocol", "internal") + cmd.Flags().Set("tablet_protocol", "internal") + uid, err := vtcombo.InitTabletMap(ts, &tpb, mysqld, &dbconfigs.GlobalDBConfigs, schemaDir, startMysql) + if err != nil { + // ensure we start mysql in the event we fail here + if startMysql { + mysqld.Shutdown(context.TODO(), cnf, true) + } + + return fmt.Errorf("initTabletMapProto failed: %w", err) + } + + globalCreateDb = func(ctx context.Context, ks *vttestpb.Keyspace) error { + // Check if we're recreating a keyspace that was previously deleted by looking + // at the original topology definition. + // + // If we find a matching keyspace, we create it with the same sharding + // configuration. This ensures that dropping and recreating a keyspace + // will end up with the same number of shards. + for _, originalKs := range originalTopology.Keyspaces { + if originalKs.Name == ks.Name { + ks = originalKs.CloneVT() + } + } + + wr := wrangler.New(logutil.NewConsoleLogger(), ts, nil) + newUID, err := vtcombo.CreateKs(ctx, ts, &tpb, mysqld, &dbconfigs.GlobalDBConfigs, schemaDir, ks, true, uid, wr) + if err != nil { + return err + } + uid = newUID + tpb.Keyspaces = append(tpb.Keyspaces, ks) + return nil + } + + globalDropDb = func(ctx context.Context, ksName string) error { + if err := vtcombo.DeleteKs(ctx, ts, ksName, mysqld, &tpb); err != nil { + return err + } + + // Rebuild the SrvVSchema object + if err := ts.RebuildSrvVSchema(ctx, tpb.Cells); err != nil { + return err + } + + return nil + } + + // Now that we have fully initialized the tablets, rebuild the keyspace graph. + for _, ks := range tpb.Keyspaces { + err := topotools.RebuildKeyspace(context.Background(), logutil.NewConsoleLogger(), ts, ks.GetName(), tpb.Cells, false) + if err != nil { + if startMysql { + mysqld.Shutdown(context.TODO(), cnf, true) + } + + return fmt.Errorf("Couldn't build srv keyspace for (%v: %v). Got error: %w", ks, tpb.Cells, err) + } + } + + // vtgate configuration and init + resilientServer = srvtopo.NewResilientServer(context.Background(), ts, "ResilientSrvTopoServer") + tabletTypesToWait := []topodatapb.TabletType{ + topodatapb.TabletType_PRIMARY, + topodatapb.TabletType_REPLICA, + topodatapb.TabletType_RDONLY, + } + plannerVersion, _ := plancontext.PlannerNameToVersion(plannerName) + + vtgate.QueryLogHandler = "/debug/vtgate/querylog" + vtgate.QueryLogzHandler = "/debug/vtgate/querylogz" + vtgate.QueryzHandler = "/debug/vtgate/queryz" + // pass nil for healthcheck, it will get created + vtg := vtgate.Init(context.Background(), nil, resilientServer, tpb.Cells[0], tabletTypesToWait, plannerVersion) + + // vtctld configuration and init + err = vtctld.InitVtctld(ts) + if err != nil { + return err + } + + if vschemaPersistenceDir != "" && !externalTopoServer { + startVschemaWatcher(vschemaPersistenceDir, tpb.Keyspaces, ts) + } + + servenv.OnRun(func() { + addStatusParts(vtg) + }) + + servenv.OnTerm(func() { + log.Error("Terminating") + // FIXME(alainjobart): stop vtgate + }) + servenv.OnClose(func() { + // We will still use the topo server during lameduck period + // to update our state, so closing it in OnClose() + ts.Close() + }) + servenv.RunDefault() + + return nil +} + +// vtcomboMysqld is a wrapper on top of mysqlctl.Mysqld. +// We need this wrapper because vtcombo runs with a single MySQL instance +// which all the tablets connect to. (replica, primary, all). This means that we shouldn't +// be trying to run any replication related commands on it, otherwise they fail. +type vtcomboMysqld struct { + *mysqlctl.Mysqld +} + +// SetReplicationSource implements the MysqlDaemon interface +func (mysqld *vtcomboMysqld) SetReplicationSource(ctx context.Context, host string, port int32, stopReplicationBefore bool, startReplicationAfter bool) error { + return nil +} + +// StartReplication implements the MysqlDaemon interface +func (mysqld *vtcomboMysqld) StartReplication(hookExtraEnv map[string]string) error { + return nil +} + +// RestartReplication implements the MysqlDaemon interface +func (mysqld *vtcomboMysqld) RestartReplication(hookExtraEnv map[string]string) error { + return nil +} + +// StartReplicationUntilAfter implements the MysqlDaemon interface +func (mysqld *vtcomboMysqld) StartReplicationUntilAfter(ctx context.Context, pos replication.Position) error { + return nil +} + +// StopReplication implements the MysqlDaemon interface +func (mysqld *vtcomboMysqld) StopReplication(hookExtraEnv map[string]string) error { + return nil +} + +// SetSemiSyncEnabled implements the MysqlDaemon interface +func (mysqld *vtcomboMysqld) SetSemiSyncEnabled(source, replica bool) error { + return nil +} + +// SemiSyncExtensionLoaded implements the MysqlDaemon interface +func (mysqld *vtcomboMysqld) SemiSyncExtensionLoaded() (bool, error) { + return true, nil +} diff --git a/go/cmd/vtcombo/plugin_dbddl.go b/go/cmd/vtcombo/cli/plugin_dbddl.go similarity index 93% rename from go/cmd/vtcombo/plugin_dbddl.go rename to go/cmd/vtcombo/cli/plugin_dbddl.go index 49a7a601fb1..b04af91af5c 100644 --- a/go/cmd/vtcombo/plugin_dbddl.go +++ b/go/cmd/vtcombo/cli/plugin_dbddl.go @@ -14,10 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( "context" + "sync" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/vtgate/engine" @@ -29,7 +30,9 @@ var globalCreateDb func(ctx context.Context, ks *vttestpb.Keyspace) error var globalDropDb func(ctx context.Context, ksName string) error // DBDDL doesn't need to store any state - we use the global variables above instead -type DBDDL struct{} +type DBDDL struct { + mu sync.Mutex +} // CreateDatabase implements the engine.DBDDLPlugin interface func (plugin *DBDDL) CreateDatabase(ctx context.Context, name string) error { @@ -39,6 +42,8 @@ func (plugin *DBDDL) CreateDatabase(ctx context.Context, name string) error { Name: "0", }}, } + plugin.mu.Lock() + defer plugin.mu.Unlock() return globalCreateDb(ctx, ks) } diff --git a/go/cmd/vtcombo/plugin_grpcvtctldserver.go b/go/cmd/vtcombo/cli/plugin_grpcvtctldserver.go similarity index 98% rename from go/cmd/vtcombo/plugin_grpcvtctldserver.go rename to go/cmd/vtcombo/cli/plugin_grpcvtctldserver.go index e5bba399072..2cf8eed8368 100644 --- a/go/cmd/vtcombo/plugin_grpcvtctldserver.go +++ b/go/cmd/vtcombo/cli/plugin_grpcvtctldserver.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( "vitess.io/vitess/go/vt/servenv" diff --git a/go/cmd/vtctld/plugin_grpcvtctlserver.go b/go/cmd/vtcombo/cli/plugin_grpcvtctlserver.go similarity index 98% rename from go/cmd/vtctld/plugin_grpcvtctlserver.go rename to go/cmd/vtcombo/cli/plugin_grpcvtctlserver.go index 4ec5323b075..8b7f918bc58 100644 --- a/go/cmd/vtctld/plugin_grpcvtctlserver.go +++ b/go/cmd/vtcombo/cli/plugin_grpcvtctlserver.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( "vitess.io/vitess/go/vt/servenv" diff --git a/go/cmd/vtcombo/plugin_grpcvtgateservice.go b/go/cmd/vtcombo/cli/plugin_grpcvtgateservice.go similarity index 98% rename from go/cmd/vtcombo/plugin_grpcvtgateservice.go rename to go/cmd/vtcombo/cli/plugin_grpcvtgateservice.go index ff58dff616a..a980f063577 100644 --- a/go/cmd/vtcombo/plugin_grpcvtgateservice.go +++ b/go/cmd/vtcombo/cli/plugin_grpcvtgateservice.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the gRPC vtgateservice server diff --git a/go/cmd/vtcombo/plugin_opentracing.go b/go/cmd/vtcombo/cli/plugin_opentracing.go similarity index 98% rename from go/cmd/vtcombo/plugin_opentracing.go rename to go/cmd/vtcombo/cli/plugin_opentracing.go index c2ea8325e6a..0b9274b498d 100644 --- a/go/cmd/vtcombo/plugin_opentracing.go +++ b/go/cmd/vtcombo/cli/plugin_opentracing.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( "vitess.io/vitess/go/trace" diff --git a/go/cmd/vtcombo/status.go b/go/cmd/vtcombo/cli/status.go similarity index 96% rename from go/cmd/vtcombo/status.go rename to go/cmd/vtcombo/cli/status.go index 2b5e2696391..8069fc72606 100644 --- a/go/cmd/vtcombo/status.go +++ b/go/cmd/vtcombo/cli/status.go @@ -14,15 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/srvtopo" "vitess.io/vitess/go/vt/vtgate" - - _ "vitess.io/vitess/go/vt/status" ) func addStatusParts(vtg *vtgate.VTGate) { diff --git a/go/cmd/vtcombo/cli/vschema_watcher.go b/go/cmd/vtcombo/cli/vschema_watcher.go new file mode 100644 index 00000000000..c1c9f120b96 --- /dev/null +++ b/go/cmd/vtcombo/cli/vschema_watcher.go @@ -0,0 +1,117 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import ( + "context" + "encoding/json" + "os" + "path" + + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vtgate/vindexes" + + vschemapb "vitess.io/vitess/go/vt/proto/vschema" + vttestpb "vitess.io/vitess/go/vt/proto/vttest" +) + +func startVschemaWatcher(vschemaPersistenceDir string, keyspaces []*vttestpb.Keyspace, ts *topo.Server) { + // Create the directory if it doesn't exist. + if err := createDirectoryIfNotExists(vschemaPersistenceDir); err != nil { + log.Fatalf("Unable to create vschema persistence directory %v: %v", vschemaPersistenceDir, err) + } + + // If there are keyspace files, load them. + loadKeyspacesFromDir(vschemaPersistenceDir, keyspaces, ts) + + // Rebuild the SrvVSchema object in case we loaded vschema from file + if err := ts.RebuildSrvVSchema(context.Background(), tpb.Cells); err != nil { + log.Fatalf("RebuildSrvVSchema failed: %v", err) + } + + // Now watch for changes in the SrvVSchema object and persist them to disk. + go watchSrvVSchema(context.Background(), ts, tpb.Cells[0]) +} + +func loadKeyspacesFromDir(dir string, keyspaces []*vttestpb.Keyspace, ts *topo.Server) { + for _, ks := range tpb.Keyspaces { + ksFile := path.Join(dir, ks.Name+".json") + if _, err := os.Stat(ksFile); err == nil { + jsonData, err := os.ReadFile(ksFile) + if err != nil { + log.Fatalf("Unable to read keyspace file %v: %v", ksFile, err) + } + + keyspace := &vschemapb.Keyspace{} + err = json.Unmarshal(jsonData, keyspace) + if err != nil { + log.Fatalf("Unable to parse keyspace file %v: %v", ksFile, err) + } + + _, err = vindexes.BuildKeyspace(keyspace) + if err != nil { + log.Fatalf("Invalid keyspace definition: %v", err) + } + ts.SaveVSchema(context.Background(), ks.Name, keyspace) + log.Infof("Loaded keyspace %v from %v\n", ks.Name, ksFile) + } + } +} + +func watchSrvVSchema(ctx context.Context, ts *topo.Server, cell string) { + data, ch, err := ts.WatchSrvVSchema(context.Background(), tpb.Cells[0]) + if err != nil { + log.Fatalf("WatchSrvVSchema failed: %v", err) + } + + if data.Err != nil { + log.Fatalf("WatchSrvVSchema could not retrieve initial vschema: %v", data.Err) + } + persistNewSrvVSchema(data.Value) + + for update := range ch { + if update.Err != nil { + log.Errorf("WatchSrvVSchema returned an error: %v", update.Err) + } else { + persistNewSrvVSchema(update.Value) + } + } +} + +func persistNewSrvVSchema(srvVSchema *vschemapb.SrvVSchema) { + for ksName, ks := range srvVSchema.Keyspaces { + jsonBytes, err := json.MarshalIndent(ks, "", " ") + if err != nil { + log.Errorf("Error marshaling keyspace: %v", err) + continue + } + + err = os.WriteFile(path.Join(vschemaPersistenceDir, ksName+".json"), jsonBytes, 0644) + if err != nil { + log.Errorf("Error writing keyspace file: %v", err) + } + log.Infof("Persisted keyspace %v to %v", ksName, vschemaPersistenceDir) + } +} + +func createDirectoryIfNotExists(dir string) error { + if _, err := os.Stat(dir); os.IsNotExist(err) { + return os.Mkdir(dir, 0755) + } + return nil +} diff --git a/go/cmd/vtcombo/docgen/main.go b/go/cmd/vtcombo/docgen/main.go new file mode 100644 index 00000000000..31304296b3e --- /dev/null +++ b/go/cmd/vtcombo/docgen/main.go @@ -0,0 +1,37 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/internal/docgen" + "vitess.io/vitess/go/cmd/vtcombo/cli" +) + +func main() { + var dir string + cmd := cobra.Command{ + Use: "docgen [-d ]", + RunE: func(cmd *cobra.Command, args []string) error { + return docgen.GenerateMarkdownTree(cli.Main, dir) + }, + } + + cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation") + _ = cmd.Execute() +} diff --git a/go/cmd/vtcombo/main.go b/go/cmd/vtcombo/main.go index affbf0520e7..f5de215b617 100644 --- a/go/cmd/vtcombo/main.go +++ b/go/cmd/vtcombo/main.go @@ -22,331 +22,16 @@ limitations under the License. package main import ( - "context" - "os" - "strings" - "time" - - "github.com/spf13/pflag" - "google.golang.org/protobuf/proto" - - "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/cmd/vtcombo/cli" "vitess.io/vitess/go/exit" - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/logutil" - "vitess.io/vitess/go/vt/mysqlctl" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/srvtopo" - "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/topo/memorytopo" - "vitess.io/vitess/go/vt/topotools" - "vitess.io/vitess/go/vt/vtcombo" - "vitess.io/vitess/go/vt/vtctld" - "vitess.io/vitess/go/vt/vtgate" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" - "vitess.io/vitess/go/vt/vttest" - "vitess.io/vitess/go/vt/wrangler" - - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - vttestpb "vitess.io/vitess/go/vt/proto/vttest" ) -var ( - flags = pflag.NewFlagSet("vtcombo", pflag.ContinueOnError) - schemaDir = flags.String("schema_dir", "", "Schema base directory. Should contain one directory per keyspace, with a vschema.json file if necessary.") - startMysql = flags.Bool("start_mysql", false, "Should vtcombo also start mysql") - mysqlPort = flags.Int("mysql_port", 3306, "mysql port") - externalTopoServer = flags.Bool("external_topo_server", false, "Should vtcombo use an external topology server instead of starting its own in-memory topology server. "+ - "If true, vtcombo will use the flags defined in topo/server.go to open topo server") - plannerName = flags.String("planner-version", "", "Sets the default planner to use when the session has not changed it. Valid values are: V3, V3Insert, Gen4, Gen4Greedy and Gen4Fallback. Gen4Fallback tries the gen4 planner and falls back to the V3 planner if the gen4 fails.") - - tpb vttestpb.VTTestTopology - ts *topo.Server - resilientServer *srvtopo.ResilientServer -) - -func init() { - flags.Var(vttest.TextTopoData(&tpb), "proto_topo", "vttest proto definition of the topology, encoded in compact text format. See vttest.proto for more information.") - flags.Var(vttest.JSONTopoData(&tpb), "json_topo", "vttest proto definition of the topology, encoded in json format. See vttest.proto for more information.") - - servenv.RegisterDefaultFlags() - servenv.RegisterFlags() - servenv.RegisterGRPCServerFlags() - servenv.RegisterGRPCServerAuthFlags() - servenv.RegisterServiceMapFlag() -} - -func startMysqld(uid uint32) (*mysqlctl.Mysqld, *mysqlctl.Mycnf) { - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - mycnfFile := mysqlctl.MycnfFile(uid) - - var mysqld *mysqlctl.Mysqld - var cnf *mysqlctl.Mycnf - var err error - - if _, statErr := os.Stat(mycnfFile); os.IsNotExist(statErr) { - mysqld, cnf, err = mysqlctl.CreateMysqldAndMycnf(uid, "", *mysqlPort) - if err != nil { - log.Errorf("failed to initialize mysql config :%v", err) - exit.Return(1) - } - if err := mysqld.Init(ctx, cnf, ""); err != nil { - log.Errorf("failed to initialize mysql :%v", err) - exit.Return(1) - } - } else { - mysqld, cnf, err = mysqlctl.OpenMysqldAndMycnf(uid) - if err != nil { - log.Errorf("failed to find mysql config: %v", err) - exit.Return(1) - } - err = mysqld.RefreshConfig(ctx, cnf) - if err != nil { - log.Errorf("failed to refresh config: %v", err) - exit.Return(1) - } - if err := mysqld.Start(ctx, cnf); err != nil { - log.Errorf("Failed to start mysqld: %v", err) - exit.Return(1) - } - } - cancel() - return mysqld, cnf -} - func main() { defer exit.Recover() - // flag parsing - var globalFlags *pflag.FlagSet - dbconfigs.RegisterFlags(dbconfigs.All...) - mysqlctl.RegisterFlags() - servenv.OnParseFor("vtcombo", func(fs *pflag.FlagSet) { - // We're going to force the value later, so don't even bother letting - // the user know about this flag. - fs.MarkHidden("tablet_protocol") - - // Add the vtcombo flags declared above in var/init sections to the - // global flags. - fs.AddFlagSet(flags) - // Save for later -- see comment directly after ParseFlags for why. - globalFlags = fs - - acl.RegisterFlags(fs) - }) - - servenv.ParseFlags("vtcombo") - - // At this point, servenv.ParseFlags has invoked _flag.Parse, which has - // combined all the flags everywhere into the globalFlags variable we - // stashed a reference to earlier in our OnParseFor callback function. - // - // We now take those flags and make them available to our `flags` instance, - // which we call `Set` on various flags to force their values further down - // in main(). - // - // N.B.: we could just as easily call Set on globalFlags on everything - // (including our local flags), but we need to save a reference either way, - // and that in particular (globalFlags.Set on a local flag) feels more - // potentially confusing than its inverse (flags.Set on a global flag), so - // we go this way. - flags.AddFlagSet(globalFlags) - - // Stash away a copy of the topology that vtcombo was started with. - // - // We will use this to determine the shard structure when keyspaces - // get recreated. - originalTopology := proto.Clone(&tpb).(*vttestpb.VTTestTopology) - - // default cell to "test" if unspecified - if len(tpb.Cells) == 0 { - tpb.Cells = append(tpb.Cells, "test") - } - - flags.Set("cells_to_watch", strings.Join(tpb.Cells, ",")) - - // vtctld UI requires the cell flag - flags.Set("cell", tpb.Cells[0]) - if flags.Lookup("log_dir") == nil { - flags.Set("log_dir", "$VTDATAROOT/tmp") - } - - if *externalTopoServer { - // Open topo server based on the command line flags defined at topo/server.go - // do not create cell info as it should be done by whoever sets up the external topo server - ts = topo.Open() - } else { - // Create topo server. We use a 'memorytopo' implementation. - ts = memorytopo.NewServer(tpb.Cells...) - } - - // attempt to load any routing rules specified by tpb - if err := vtcombo.InitRoutingRules(context.Background(), ts, tpb.GetRoutingRules()); err != nil { - log.Errorf("Failed to load routing rules: %v", err) - exit.Return(1) - } - - servenv.Init() - tabletenv.Init() - - mysqld := &vtcomboMysqld{} - var cnf *mysqlctl.Mycnf - if *startMysql { - mysqld.Mysqld, cnf = startMysqld(1) - servenv.OnClose(func() { - mysqld.Shutdown(context.TODO(), cnf, true) - }) - // We want to ensure we can write to this database - mysqld.SetReadOnly(false) - - } else { - dbconfigs.GlobalDBConfigs.InitWithSocket("") - mysqld.Mysqld = mysqlctl.NewMysqld(&dbconfigs.GlobalDBConfigs) - servenv.OnClose(mysqld.Close) - } - // Tablet configuration and init. - // Send mycnf as nil because vtcombo won't do backups and restores. - // - // Also force the `--tablet_manager_protocol` and `--tablet_protocol` flags - // to be the "internal" protocol that InitTabletMap registers. - flags.Set("tablet_manager_protocol", "internal") - flags.Set("tablet_protocol", "internal") - uid, err := vtcombo.InitTabletMap(ts, &tpb, mysqld, &dbconfigs.GlobalDBConfigs, *schemaDir, *startMysql) - if err != nil { - log.Errorf("initTabletMapProto failed: %v", err) - // ensure we start mysql in the event we fail here - if *startMysql { - mysqld.Shutdown(context.TODO(), cnf, true) - } + if err := cli.Main.Execute(); err != nil { + log.Error(err) exit.Return(1) } - - globalCreateDb = func(ctx context.Context, ks *vttestpb.Keyspace) error { - // Check if we're recreating a keyspace that was previously deleted by looking - // at the original topology definition. - // - // If we find a matching keyspace, we create it with the same sharding - // configuration. This ensures that dropping and recreating a keyspace - // will end up with the same number of shards. - for _, originalKs := range originalTopology.Keyspaces { - if originalKs.Name == ks.Name { - ks = proto.Clone(originalKs).(*vttestpb.Keyspace) - } - } - - wr := wrangler.New(logutil.NewConsoleLogger(), ts, nil) - newUID, err := vtcombo.CreateKs(ctx, ts, &tpb, mysqld, &dbconfigs.GlobalDBConfigs, *schemaDir, ks, true, uid, wr) - if err != nil { - return err - } - uid = newUID - tpb.Keyspaces = append(tpb.Keyspaces, ks) - return nil - } - - globalDropDb = func(ctx context.Context, ksName string) error { - if err := vtcombo.DeleteKs(ctx, ts, ksName, mysqld, &tpb); err != nil { - return err - } - - // Rebuild the SrvVSchema object - if err := ts.RebuildSrvVSchema(ctx, tpb.Cells); err != nil { - return err - } - - return nil - } - - // Now that we have fully initialized the tablets, rebuild the keyspace graph. - for _, ks := range tpb.Keyspaces { - err := topotools.RebuildKeyspace(context.Background(), logutil.NewConsoleLogger(), ts, ks.GetName(), tpb.Cells, false) - if err != nil { - if *startMysql { - mysqld.Shutdown(context.TODO(), cnf, true) - } - log.Fatalf("Couldn't build srv keyspace for (%v: %v). Got error: %v", ks, tpb.Cells, err) - } - } - - // vtgate configuration and init - resilientServer = srvtopo.NewResilientServer(ts, "ResilientSrvTopoServer") - tabletTypesToWait := []topodatapb.TabletType{ - topodatapb.TabletType_PRIMARY, - topodatapb.TabletType_REPLICA, - topodatapb.TabletType_RDONLY, - } - plannerVersion, _ := plancontext.PlannerNameToVersion(*plannerName) - - vtgate.QueryLogHandler = "/debug/vtgate/querylog" - vtgate.QueryLogzHandler = "/debug/vtgate/querylogz" - vtgate.QueryzHandler = "/debug/vtgate/queryz" - // pass nil for healthcheck, it will get created - vtg := vtgate.Init(context.Background(), nil, resilientServer, tpb.Cells[0], tabletTypesToWait, plannerVersion) - - // vtctld configuration and init - err = vtctld.InitVtctld(ts) - if err != nil { - exit.Return(1) - } - - servenv.OnRun(func() { - addStatusParts(vtg) - }) - - servenv.OnTerm(func() { - log.Error("Terminating") - // FIXME(alainjobart): stop vtgate - }) - servenv.OnClose(func() { - // We will still use the topo server during lameduck period - // to update our state, so closing it in OnClose() - ts.Close() - }) - servenv.RunDefault() -} - -// vtcomboMysqld is a wrapper on top of mysqlctl.Mysqld. -// We need this wrapper because vtcombo runs with a single MySQL instance -// which all the tablets connect to. (replica, primary, all). This means that we shouldn't -// be trying to run any replication related commands on it, otherwise they fail. -type vtcomboMysqld struct { - *mysqlctl.Mysqld -} - -// SetReplicationSource implements the MysqlDaemon interface -func (mysqld *vtcomboMysqld) SetReplicationSource(ctx context.Context, host string, port int32, stopReplicationBefore bool, startReplicationAfter bool) error { - return nil -} - -// StartReplication implements the MysqlDaemon interface -func (mysqld *vtcomboMysqld) StartReplication(hookExtraEnv map[string]string) error { - return nil -} - -// RestartReplication implements the MysqlDaemon interface -func (mysqld *vtcomboMysqld) RestartReplication(hookExtraEnv map[string]string) error { - return nil -} - -// StartReplicationUntilAfter implements the MysqlDaemon interface -func (mysqld *vtcomboMysqld) StartReplicationUntilAfter(ctx context.Context, pos mysql.Position) error { - return nil -} - -// StopReplication implements the MysqlDaemon interface -func (mysqld *vtcomboMysqld) StopReplication(hookExtraEnv map[string]string) error { - return nil -} - -// SetSemiSyncEnabled implements the MysqlDaemon interface -func (mysqld *vtcomboMysqld) SetSemiSyncEnabled(source, replica bool) error { - return nil -} - -// SemiSyncExtensionLoaded implements the MysqlDaemon interface -func (mysqld *vtcomboMysqld) SemiSyncExtensionLoaded() (bool, error) { - return true, nil } diff --git a/go/cmd/vtctld/cli/cli.go b/go/cmd/vtctld/cli/cli.go new file mode 100644 index 00000000000..e5124133adb --- /dev/null +++ b/go/cmd/vtctld/cli/cli.go @@ -0,0 +1,89 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreedto in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vtctld" +) + +var ( + ts *topo.Server + Main = &cobra.Command{ + Use: "vtctld", + Short: "The Vitess cluster management daemon.", + Long: `vtctld provides web and gRPC interfaces to manage a single Vitess cluster. +It is usually the first Vitess component to be started after a valid global topology service has been created. + +For the last several releases, vtctld has been transitioning to a newer gRPC service for well-typed cluster management requests. +This is **required** to use programs such as vtadmin and vtctldclient, and The old API and service are deprecated and will be removed in a future release. +To enable this newer service, include "grpc-vtctld" in the --service_map argument. +This is demonstrated in the example usage below.`, + Example: `vtctld \ + --topo_implementation etcd2 \ + --topo_global_server_address localhost:2379 \ + --topo_global_root /vitess/ \ + --service_map 'grpc-vtctl,grpc-vtctld' \ + --backup_storage_implementation file \ + --file_backup_storage_root $VTDATAROOT/backups \ + --port 15000 \ + --grpc_port 15999`, + Args: cobra.NoArgs, + Version: servenv.AppVersion.String(), + PreRunE: servenv.CobraPreRunE, + RunE: run, + } +) + +func run(cmd *cobra.Command, args []string) error { + servenv.Init() + + ts = topo.Open() + defer ts.Close() + + // Init the vtctld core + if err := vtctld.InitVtctld(ts); err != nil { + return err + } + + // Register http debug/health + vtctld.RegisterDebugHealthHandler(ts) + + // Start schema manager service. + initSchema() + + // And run the server. + servenv.RunDefault() + + return nil +} + +func init() { + servenv.RegisterDefaultFlags() + servenv.RegisterFlags() + servenv.RegisterGRPCServerFlags() + servenv.RegisterGRPCServerAuthFlags() + servenv.RegisterServiceMapFlag() + + servenv.MoveFlagsToCobraCommand(Main) + + acl.RegisterFlags(Main.Flags()) +} diff --git a/go/cmd/vtbackup/plugin_azblobbackupstorage.go b/go/cmd/vtctld/cli/plugin_azblobbackupstorage.go similarity index 97% rename from go/cmd/vtbackup/plugin_azblobbackupstorage.go rename to go/cmd/vtctld/cli/plugin_azblobbackupstorage.go index a4ca64096a9..bdadc894aae 100644 --- a/go/cmd/vtbackup/plugin_azblobbackupstorage.go +++ b/go/cmd/vtctld/cli/plugin_azblobbackupstorage.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( _ "vitess.io/vitess/go/vt/mysqlctl/azblobbackupstorage" diff --git a/go/cmd/vtctld/plugin_cephbackupstorage.go b/go/cmd/vtctld/cli/plugin_cephbackupstorage.go similarity index 97% rename from go/cmd/vtctld/plugin_cephbackupstorage.go rename to go/cmd/vtctld/cli/plugin_cephbackupstorage.go index 6cd2d5619d0..171198f5e29 100644 --- a/go/cmd/vtctld/plugin_cephbackupstorage.go +++ b/go/cmd/vtctld/cli/plugin_cephbackupstorage.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( _ "vitess.io/vitess/go/vt/mysqlctl/cephbackupstorage" diff --git a/go/cmd/vtctld/plugin_consultopo.go b/go/cmd/vtctld/cli/plugin_consultopo.go similarity index 98% rename from go/cmd/vtctld/plugin_consultopo.go rename to go/cmd/vtctld/cli/plugin_consultopo.go index a0c53abe5ea..4617d753953 100644 --- a/go/cmd/vtctld/plugin_consultopo.go +++ b/go/cmd/vtctld/cli/plugin_consultopo.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the 'consul' topo.Server. diff --git a/go/cmd/vtctld/plugin_etcd2topo.go b/go/cmd/vtctld/cli/plugin_etcd2topo.go similarity index 98% rename from go/cmd/vtctld/plugin_etcd2topo.go rename to go/cmd/vtctld/cli/plugin_etcd2topo.go index 6ec507f910d..06e014fc19f 100644 --- a/go/cmd/vtctld/plugin_etcd2topo.go +++ b/go/cmd/vtctld/cli/plugin_etcd2topo.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the 'etcd2' topo.Server. diff --git a/go/cmd/vttablet/plugin_filebackupstorage.go b/go/cmd/vtctld/cli/plugin_filebackupstorage.go similarity index 97% rename from go/cmd/vttablet/plugin_filebackupstorage.go rename to go/cmd/vtctld/cli/plugin_filebackupstorage.go index cf2ceb5150f..9edc82d6a1b 100644 --- a/go/cmd/vttablet/plugin_filebackupstorage.go +++ b/go/cmd/vtctld/cli/plugin_filebackupstorage.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( _ "vitess.io/vitess/go/vt/mysqlctl/filebackupstorage" diff --git a/go/cmd/vtctld/plugin_gcsbackupstorage.go b/go/cmd/vtctld/cli/plugin_gcsbackupstorage.go similarity index 97% rename from go/cmd/vtctld/plugin_gcsbackupstorage.go rename to go/cmd/vtctld/cli/plugin_gcsbackupstorage.go index 82a22cef1da..655583c8ca2 100644 --- a/go/cmd/vtctld/plugin_gcsbackupstorage.go +++ b/go/cmd/vtctld/cli/plugin_gcsbackupstorage.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( _ "vitess.io/vitess/go/vt/mysqlctl/gcsbackupstorage" diff --git a/go/cmd/vtctld/plugin_grpctabletconn.go b/go/cmd/vtctld/cli/plugin_grpctabletconn.go similarity index 98% rename from go/cmd/vtctld/plugin_grpctabletconn.go rename to go/cmd/vtctld/cli/plugin_grpctabletconn.go index 08291a7c916..4a97e36eec4 100644 --- a/go/cmd/vtctld/plugin_grpctabletconn.go +++ b/go/cmd/vtctld/cli/plugin_grpctabletconn.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the gRPC tabletconn client diff --git a/go/cmd/vtorc/plugin_grpctmclient.go b/go/cmd/vtctld/cli/plugin_grpctmclient.go similarity index 98% rename from go/cmd/vtorc/plugin_grpctmclient.go rename to go/cmd/vtctld/cli/plugin_grpctmclient.go index ce554da96df..8cd349c7f87 100644 --- a/go/cmd/vtorc/plugin_grpctmclient.go +++ b/go/cmd/vtctld/cli/plugin_grpctmclient.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the gRPC tabletmanager client diff --git a/go/cmd/vtctld/plugin_grpcvtctldserver.go b/go/cmd/vtctld/cli/plugin_grpcvtctldserver.go similarity index 98% rename from go/cmd/vtctld/plugin_grpcvtctldserver.go rename to go/cmd/vtctld/cli/plugin_grpcvtctldserver.go index ee5d0aba22a..ff283d91336 100644 --- a/go/cmd/vtctld/plugin_grpcvtctldserver.go +++ b/go/cmd/vtctld/cli/plugin_grpcvtctldserver.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( "vitess.io/vitess/go/vt/servenv" diff --git a/go/cmd/vtcombo/plugin_grpcvtctlserver.go b/go/cmd/vtctld/cli/plugin_grpcvtctlserver.go similarity index 98% rename from go/cmd/vtcombo/plugin_grpcvtctlserver.go rename to go/cmd/vtctld/cli/plugin_grpcvtctlserver.go index 4ec5323b075..8b7f918bc58 100644 --- a/go/cmd/vtcombo/plugin_grpcvtctlserver.go +++ b/go/cmd/vtctld/cli/plugin_grpcvtctlserver.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( "vitess.io/vitess/go/vt/servenv" diff --git a/go/cmd/vtctld/plugin_grpcvtgateconn.go b/go/cmd/vtctld/cli/plugin_grpcvtgateconn.go similarity index 98% rename from go/cmd/vtctld/plugin_grpcvtgateconn.go rename to go/cmd/vtctld/cli/plugin_grpcvtgateconn.go index 87019ea4260..2f05e6d9a4e 100644 --- a/go/cmd/vtctld/plugin_grpcvtgateconn.go +++ b/go/cmd/vtctld/cli/plugin_grpcvtgateconn.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the gRPC vtgateconn client diff --git a/go/cmd/vtctld/plugin_opentracing.go b/go/cmd/vtctld/cli/plugin_opentracing.go similarity index 98% rename from go/cmd/vtctld/plugin_opentracing.go rename to go/cmd/vtctld/cli/plugin_opentracing.go index c35034d42a2..76423623493 100644 --- a/go/cmd/vtctld/plugin_opentracing.go +++ b/go/cmd/vtctld/cli/plugin_opentracing.go @@ -14,11 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( "vitess.io/vitess/go/trace" - "vitess.io/vitess/go/vt/servenv" ) diff --git a/go/cmd/vtctld/plugin_opentsdb.go b/go/cmd/vtctld/cli/plugin_opentsdb.go similarity index 98% rename from go/cmd/vtctld/plugin_opentsdb.go rename to go/cmd/vtctld/cli/plugin_opentsdb.go index 38f464dd887..e4f76d29009 100644 --- a/go/cmd/vtctld/plugin_opentsdb.go +++ b/go/cmd/vtctld/cli/plugin_opentsdb.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports opentsdb to register the opentsdb stats backend. diff --git a/go/cmd/vtctld/plugin_prometheusbackend.go b/go/cmd/vtctld/cli/plugin_prometheusbackend.go similarity index 98% rename from go/cmd/vtctld/plugin_prometheusbackend.go rename to go/cmd/vtctld/cli/plugin_prometheusbackend.go index f3c33e5637b..3c66018fe75 100644 --- a/go/cmd/vtctld/plugin_prometheusbackend.go +++ b/go/cmd/vtctld/cli/plugin_prometheusbackend.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports Prometheus to allow for instrumentation // with the Prometheus client library diff --git a/go/cmd/vtctld/plugin_s3backupstorage.go b/go/cmd/vtctld/cli/plugin_s3backupstorage.go similarity index 97% rename from go/cmd/vtctld/plugin_s3backupstorage.go rename to go/cmd/vtctld/cli/plugin_s3backupstorage.go index a5b5c671ebb..4b3ecb33edb 100644 --- a/go/cmd/vtctld/plugin_s3backupstorage.go +++ b/go/cmd/vtctld/cli/plugin_s3backupstorage.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( _ "vitess.io/vitess/go/vt/mysqlctl/s3backupstorage" diff --git a/go/cmd/vtctld/plugin_zk2topo.go b/go/cmd/vtctld/cli/plugin_zk2topo.go similarity index 98% rename from go/cmd/vtctld/plugin_zk2topo.go rename to go/cmd/vtctld/cli/plugin_zk2topo.go index 531d92c4cdd..77f86d98d52 100644 --- a/go/cmd/vtctld/plugin_zk2topo.go +++ b/go/cmd/vtctld/cli/plugin_zk2topo.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the 'zk2' topo.Server. diff --git a/go/cmd/vtctld/schema.go b/go/cmd/vtctld/cli/schema.go similarity index 63% rename from go/cmd/vtctld/schema.go rename to go/cmd/vtctld/cli/schema.go index 3bd7ae091c2..480679a09e6 100644 --- a/go/cmd/vtctld/schema.go +++ b/go/cmd/vtctld/cli/schema.go @@ -14,19 +14,18 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( "context" "time" - "github.com/spf13/pflag" - "vitess.io/vitess/go/timer" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/schemamanager" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver" "vitess.io/vitess/go/vt/vttablet/tmclient" "vitess.io/vitess/go/vt/wrangler" ) @@ -36,18 +35,16 @@ var ( schemaChangeController string schemaChangeUser string schemaChangeCheckInterval = time.Minute - schemaChangeReplicasTimeout = wrangler.DefaultWaitReplicasTimeout + schemaChangeReplicasTimeout = grpcvtctldserver.DefaultWaitReplicasTimeout ) func init() { - servenv.OnParse(func(fs *pflag.FlagSet) { - fs.StringVar(&schemaChangeDir, "schema_change_dir", schemaChangeDir, "Directory containing schema changes for all keyspaces. Each keyspace has its own directory, and schema changes are expected to live in '$KEYSPACE/input' dir. (e.g. 'test_keyspace/input/*sql'). Each sql file represents a schema change.") - fs.StringVar(&schemaChangeController, "schema_change_controller", schemaChangeController, "Schema change controller is responsible for finding schema changes and responding to schema change events.") - fs.StringVar(&schemaChangeUser, "schema_change_user", schemaChangeUser, "The user who schema changes are submitted on behalf of.") + Main.Flags().StringVar(&schemaChangeDir, "schema_change_dir", schemaChangeDir, "Directory containing schema changes for all keyspaces. Each keyspace has its own directory, and schema changes are expected to live in '$KEYSPACE/input' dir. (e.g. 'test_keyspace/input/*sql'). Each sql file represents a schema change.") + Main.Flags().StringVar(&schemaChangeController, "schema_change_controller", schemaChangeController, "Schema change controller is responsible for finding schema changes and responding to schema change events.") + Main.Flags().StringVar(&schemaChangeUser, "schema_change_user", schemaChangeUser, "The user who schema changes are submitted on behalf of.") - fs.DurationVar(&schemaChangeCheckInterval, "schema_change_check_interval", schemaChangeCheckInterval, "How often the schema change dir is checked for schema changes. This value must be positive; if zero or lower, the default of 1m is used.") - fs.DurationVar(&schemaChangeReplicasTimeout, "schema_change_replicas_timeout", schemaChangeReplicasTimeout, "How long to wait for replicas to receive a schema change.") - }) + Main.Flags().DurationVar(&schemaChangeCheckInterval, "schema_change_check_interval", schemaChangeCheckInterval, "How often the schema change dir is checked for schema changes. This value must be positive; if zero or lower, the default of 1m is used.") + Main.Flags().DurationVar(&schemaChangeReplicasTimeout, "schema_change_replicas_timeout", schemaChangeReplicasTimeout, "How long to wait for replicas to receive a schema change.") } func initSchema() { @@ -78,7 +75,7 @@ func initSchema() { _, err = schemamanager.Run( ctx, controller, - schemamanager.NewTabletExecutor("vtctld/schema", wr.TopoServer(), wr.TabletManagerClient(), wr.Logger(), schemaChangeReplicasTimeout), + schemamanager.NewTabletExecutor("vtctld/schema", wr.TopoServer(), wr.TabletManagerClient(), wr.Logger(), schemaChangeReplicasTimeout, 0), ) if err != nil { log.Errorf("Schema change failed, error: %v", err) diff --git a/go/cmd/vtctld/docgen/main.go b/go/cmd/vtctld/docgen/main.go new file mode 100644 index 00000000000..4243153859e --- /dev/null +++ b/go/cmd/vtctld/docgen/main.go @@ -0,0 +1,37 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/internal/docgen" + "vitess.io/vitess/go/cmd/vtctld/cli" +) + +func main() { + var dir string + cmd := cobra.Command{ + Use: "docgen [-d ]", + RunE: func(cmd *cobra.Command, args []string) error { + return docgen.GenerateMarkdownTree(cli.Main, dir) + }, + } + + cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation") + _ = cmd.Execute() +} diff --git a/go/cmd/vtctld/main.go b/go/cmd/vtctld/main.go index 26f9e100c19..6f9ab7384fc 100644 --- a/go/cmd/vtctld/main.go +++ b/go/cmd/vtctld/main.go @@ -17,52 +17,12 @@ limitations under the License. package main import ( - "github.com/spf13/pflag" - - "vitess.io/vitess/go/acl" - "vitess.io/vitess/go/exit" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/vtctld" -) - -func init() { - servenv.RegisterDefaultFlags() - servenv.RegisterFlags() - servenv.RegisterGRPCServerFlags() - servenv.RegisterGRPCServerAuthFlags() - servenv.RegisterServiceMapFlag() - - servenv.OnParse(func(fs *pflag.FlagSet) { - acl.RegisterFlags(fs) - }) -} - -// used at runtime by plug-ins -var ( - ts *topo.Server + "vitess.io/vitess/go/cmd/vtctld/cli" + "vitess.io/vitess/go/vt/log" ) func main() { - servenv.ParseFlags("vtctld") - servenv.Init() - defer servenv.Close() - - ts = topo.Open() - defer ts.Close() - - // Init the vtctld core - err := vtctld.InitVtctld(ts) - if err != nil { - exit.Return(1) + if err := cli.Main.Execute(); err != nil { + log.Fatal(err) } - - // Register http debug/health - vtctld.RegisterDebugHealthHandler(ts) - - // Start schema manager service. - initSchema() - - // And run the server. - servenv.RunDefault() } diff --git a/go/cmd/vtctldclient/cli/awk.go b/go/cmd/vtctldclient/cli/awk.go index c68b0fc0627..2916034a3ca 100644 --- a/go/cmd/vtctldclient/cli/awk.go +++ b/go/cmd/vtctldclient/cli/awk.go @@ -22,7 +22,7 @@ import ( "strings" "time" - "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" @@ -66,7 +66,7 @@ func MarshalTabletAWK(t *topodatapb.Tablet) string { // special case for old primary that hasn't been updated in the topo // yet. if t.PrimaryTermStartTime != nil && t.PrimaryTermStartTime.Seconds > 0 { - mtst = logutil.ProtoToTime(t.PrimaryTermStartTime).Format(time.RFC3339) + mtst = protoutil.TimeFromProto(t.PrimaryTermStartTime).UTC().Format(time.RFC3339) } return fmt.Sprintf("%v %v %v %v %v %v %v %v", topoproto.TabletAliasString(t.Alias), keyspace, shard, topoproto.TabletTypeLString(t.Type), ti.Addr(), ti.MysqlAddr(), MarshalMapAWK(t.Tags), mtst) diff --git a/go/cmd/vtctldclient/cli/json.go b/go/cmd/vtctldclient/cli/json.go index 80af6d80d72..fb9ed2c35ac 100644 --- a/go/cmd/vtctldclient/cli/json.go +++ b/go/cmd/vtctldclient/cli/json.go @@ -25,6 +25,19 @@ import ( "google.golang.org/protobuf/proto" ) +const ( + jsonIndent = " " + jsonPrefix = "" +) + +var DefaultMarshalOptions = protojson.MarshalOptions{ + Multiline: true, + Indent: jsonIndent, + UseEnumNumbers: false, + UseProtoNames: true, + EmitUnpopulated: true, // Can be set to false via the --compact flag +} + // MarshalJSON marshals obj to a JSON string. It uses the jsonpb marshaler for // proto.Message types, with some sensible defaults, and falls back to the // standard Go marshaler otherwise. In both cases, the marshaled JSON is @@ -34,19 +47,22 @@ import ( // either by being a proto message type or by anonymously embedding one, so for // other types that may have nested struct fields, we still use the standard Go // marshaler, which will result in different formattings. -func MarshalJSON(obj any) ([]byte, error) { +func MarshalJSON(obj any, marshalOptions ...protojson.MarshalOptions) ([]byte, error) { switch obj := obj.(type) { case proto.Message: - m := protojson.MarshalOptions{ - Multiline: true, - Indent: " ", - UseEnumNumbers: true, - UseProtoNames: true, - EmitUnpopulated: true, + m := DefaultMarshalOptions + switch len(marshalOptions) { + case 0: // Use default + case 1: // Use provided one + m = marshalOptions[0] + default: + return nil, fmt.Errorf("there should only be one optional MarshalOptions value but we had %d", + len(marshalOptions)) } + return m.Marshal(obj) default: - data, err := json.MarshalIndent(obj, "", " ") + data, err := json.MarshalIndent(obj, jsonPrefix, jsonIndent) if err != nil { return nil, fmt.Errorf("json.Marshal = %v", err) } @@ -54,3 +70,11 @@ func MarshalJSON(obj any) ([]byte, error) { return data, nil } } + +// MarshalJSONPretty works the same as MarshalJSON but uses ENUM names +// instead of numbers. +func MarshalJSONPretty(obj any) ([]byte, error) { + marshalOptions := DefaultMarshalOptions + marshalOptions.UseEnumNumbers = false + return MarshalJSON(obj, marshalOptions) +} diff --git a/go/cmd/vtctldclient/cli/shards.go b/go/cmd/vtctldclient/cli/shards.go index 93d7529d9a8..8ee38eff0d4 100644 --- a/go/cmd/vtctldclient/cli/shards.go +++ b/go/cmd/vtctldclient/cli/shards.go @@ -19,7 +19,7 @@ package cli import ( "sort" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/topo/topoproto" replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata" @@ -83,12 +83,12 @@ func (rts rTablets) Less(i, j int) bool { } // then compare replication positions - lpos, err := mysql.DecodePosition(l.Status.Position) + lpos, err := replication.DecodePosition(l.Status.Position) if err != nil { return true } - rpos, err := mysql.DecodePosition(r.Status.Position) + rpos, err := replication.DecodePosition(r.Status.Position) if err != nil { return false } diff --git a/go/cmd/vtctldclient/command/backups.go b/go/cmd/vtctldclient/command/backups.go index c427a88f1df..e6314ed7d6e 100644 --- a/go/cmd/vtctldclient/command/backups.go +++ b/go/cmd/vtctldclient/command/backups.go @@ -35,7 +35,7 @@ import ( var ( // Backup makes a Backup gRPC call to a vtctld. Backup = &cobra.Command{ - Use: "Backup [--concurrency ] [--allow-primary] ", + Use: "Backup [--concurrency ] [--allow-primary] [--incremental-from-pos=|auto] [--upgrade-safe] ", Short: "Uses the BackupStorage service on the given tablet to create and store a new backup.", DisableFlagsInUseLine: true, Args: cobra.ExactArgs(1), @@ -43,7 +43,7 @@ var ( } // BackupShard makes a BackupShard gRPC call to a vtctld. BackupShard = &cobra.Command{ - Use: "BackupShard [--concurrency ] [--allow-primary] ", + Use: "BackupShard [--concurrency ] [--allow-primary] [--incremental-from-pos=|auto] [--upgrade-safe] ", Short: "Finds the most up-to-date REPLICA, RDONLY, or SPARE tablet in the given shard and uses the BackupStorage service on that tablet to create and store a new backup.", Long: `Finds the most up-to-date REPLICA, RDONLY, or SPARE tablet in the given shard and uses the BackupStorage service on that tablet to create and store a new backup. @@ -70,7 +70,7 @@ If no replica-type tablet can be found, the backup can be taken on the primary i } // RestoreFromBackup makes a RestoreFromBackup gRPC call to a vtctld. RestoreFromBackup = &cobra.Command{ - Use: "RestoreFromBackup [--backup-timestamp|-t ] ", + Use: "RestoreFromBackup [--backup-timestamp|-t ] [--restore-to-pos ] [--dry-run] ", Short: "Stops mysqld on the specified tablet and restores the data from either the latest backup or closest before `backup-timestamp`.", DisableFlagsInUseLine: true, Args: cobra.ExactArgs(1), @@ -79,8 +79,10 @@ If no replica-type tablet can be found, the backup can be taken on the primary i ) var backupOptions = struct { - AllowPrimary bool - Concurrency uint64 + AllowPrimary bool + Concurrency uint64 + IncrementalFromPos string + UpgradeSafe bool }{} func commandBackup(cmd *cobra.Command, args []string) error { @@ -92,9 +94,11 @@ func commandBackup(cmd *cobra.Command, args []string) error { cli.FinishedParsing(cmd) stream, err := client.Backup(commandCtx, &vtctldatapb.BackupRequest{ - TabletAlias: tabletAlias, - AllowPrimary: backupOptions.AllowPrimary, - Concurrency: backupOptions.Concurrency, + TabletAlias: tabletAlias, + AllowPrimary: backupOptions.AllowPrimary, + Concurrency: backupOptions.Concurrency, + IncrementalFromPos: backupOptions.IncrementalFromPos, + UpgradeSafe: backupOptions.UpgradeSafe, }) if err != nil { return err @@ -114,8 +118,10 @@ func commandBackup(cmd *cobra.Command, args []string) error { } var backupShardOptions = struct { - AllowPrimary bool - Concurrency uint64 + AllowPrimary bool + Concurrency uint64 + IncrementalFromPos string + UpgradeSafe bool }{} func commandBackupShard(cmd *cobra.Command, args []string) error { @@ -127,10 +133,12 @@ func commandBackupShard(cmd *cobra.Command, args []string) error { cli.FinishedParsing(cmd) stream, err := client.BackupShard(commandCtx, &vtctldatapb.BackupShardRequest{ - Keyspace: keyspace, - Shard: shard, - AllowPrimary: backupShardOptions.AllowPrimary, - Concurrency: backupShardOptions.Concurrency, + Keyspace: keyspace, + Shard: shard, + AllowPrimary: backupShardOptions.AllowPrimary, + Concurrency: backupShardOptions.Concurrency, + IncrementalFromPos: backupShardOptions.IncrementalFromPos, + UpgradeSafe: backupShardOptions.UpgradeSafe, }) if err != nil { return err @@ -210,7 +218,10 @@ func commandRemoveBackup(cmd *cobra.Command, args []string) error { } var restoreFromBackupOptions = struct { - BackupTimestamp string + BackupTimestamp string + RestoreToPos string + RestoreToTimestamp string + DryRun bool }{} func commandRestoreFromBackup(cmd *cobra.Command, args []string) error { @@ -219,8 +230,23 @@ func commandRestoreFromBackup(cmd *cobra.Command, args []string) error { return err } + if restoreFromBackupOptions.RestoreToPos != "" && restoreFromBackupOptions.RestoreToTimestamp != "" { + return fmt.Errorf("--restore-to-pos and --restore-to-timestamp are mutually exclusive") + } + + var restoreToTimestamp time.Time + if restoreFromBackupOptions.RestoreToTimestamp != "" { + restoreToTimestamp, err = mysqlctl.ParseRFC3339(restoreFromBackupOptions.RestoreToTimestamp) + if err != nil { + return err + } + } + req := &vtctldatapb.RestoreFromBackupRequest{ - TabletAlias: alias, + TabletAlias: alias, + RestoreToPos: restoreFromBackupOptions.RestoreToPos, + RestoreToTimestamp: protoutil.TimeToProto(restoreToTimestamp), + DryRun: restoreFromBackupOptions.DryRun, } if restoreFromBackupOptions.BackupTimestamp != "" { @@ -255,10 +281,15 @@ func commandRestoreFromBackup(cmd *cobra.Command, args []string) error { func init() { Backup.Flags().BoolVar(&backupOptions.AllowPrimary, "allow-primary", false, "Allow the primary of a shard to be used for the backup. WARNING: If using the builtin backup engine, this will shutdown mysqld on the primary and stop writes for the duration of the backup.") Backup.Flags().Uint64Var(&backupOptions.Concurrency, "concurrency", 4, "Specifies the number of compression/checksum jobs to run simultaneously.") + Backup.Flags().StringVar(&backupOptions.IncrementalFromPos, "incremental-from-pos", "", "Position of previous backup. Default: empty. If given, then this backup becomes an incremental backup from given position. If value is 'auto', backup taken from last successful backup position") + + Backup.Flags().BoolVar(&backupOptions.UpgradeSafe, "upgrade-safe", false, "Whether to use innodb_fast_shutdown=0 for the backup so it is safe to use for MySQL upgrades.") Root.AddCommand(Backup) BackupShard.Flags().BoolVar(&backupShardOptions.AllowPrimary, "allow-primary", false, "Allow the primary of a shard to be used for the backup. WARNING: If using the builtin backup engine, this will shutdown mysqld on the primary and stop writes for the duration of the backup.") BackupShard.Flags().Uint64Var(&backupShardOptions.Concurrency, "concurrency", 4, "Specifies the number of compression/checksum jobs to run simultaneously.") + BackupShard.Flags().StringVar(&backupShardOptions.IncrementalFromPos, "incremental-from-pos", "", "Position of previous backup. Default: empty. If given, then this backup becomes an incremental backup from given position. If value is 'auto', backup taken from last successful backup position") + BackupShard.Flags().BoolVar(&backupOptions.UpgradeSafe, "upgrade-safe", false, "Whether to use innodb_fast_shutdown=0 for the backup so it is safe to use for MySQL upgrades.") Root.AddCommand(BackupShard) GetBackups.Flags().Uint32VarP(&getBackupsOptions.Limit, "limit", "l", 0, "Retrieve only the most recent N backups.") @@ -268,5 +299,8 @@ func init() { Root.AddCommand(RemoveBackup) RestoreFromBackup.Flags().StringVarP(&restoreFromBackupOptions.BackupTimestamp, "backup-timestamp", "t", "", "Use the backup taken at, or closest before, this timestamp. Omit to use the latest backup. Timestamp format is \"YYYY-mm-DD.HHMMSS\".") + RestoreFromBackup.Flags().StringVar(&restoreFromBackupOptions.RestoreToPos, "restore-to-pos", "", "Run a point in time recovery that ends with the given position. This will attempt to use one full backup followed by zero or more incremental backups") + RestoreFromBackup.Flags().StringVar(&restoreFromBackupOptions.RestoreToTimestamp, "restore-to-timestamp", "", "Run a point in time recovery that restores up to, and excluding, given timestamp in RFC3339 format (`2006-01-02T15:04:05Z07:00`). This will attempt to use one full backup followed by zero or more incremental backups") + RestoreFromBackup.Flags().BoolVar(&restoreFromBackupOptions.DryRun, "dry-run", false, "Only validate restore steps, do not actually restore data") Root.AddCommand(RestoreFromBackup) } diff --git a/go/cmd/vtctldclient/command/keyspaces.go b/go/cmd/vtctldclient/command/keyspaces.go index 393b4795d8f..420c274ddd5 100644 --- a/go/cmd/vtctldclient/command/keyspaces.go +++ b/go/cmd/vtctldclient/command/keyspaces.go @@ -24,10 +24,12 @@ import ( "github.com/spf13/cobra" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/protoutil" + "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/vt/logutil" - "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/topo" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -176,7 +178,7 @@ func commandCreateKeyspace(cmd *cobra.Command, args []string) error { return fmt.Errorf("--snapshot-time cannot be in the future; snapshot = %v, now = %v", t, now) } - snapshotTime = logutil.TimeToProto(t) + snapshotTime = protoutil.TimeToProto(t) } createKeyspaceOptions.SidecarDBName = strings.TrimSpace(createKeyspaceOptions.SidecarDBName) @@ -184,7 +186,7 @@ func commandCreateKeyspace(cmd *cobra.Command, args []string) error { return errors.New("--sidecar-db-name cannot be empty when creating a keyspace") } if len(createKeyspaceOptions.SidecarDBName) > mysql.MaxIdentifierLength { - return mysql.NewSQLError(mysql.ERTooLongIdent, mysql.SSDataTooLong, "--sidecar-db-name identifier value of %q is too long (%d chars), max length for database identifiers is %d characters", + return sqlerror.NewSQLError(sqlerror.ERTooLongIdent, sqlerror.SSDataTooLong, "--sidecar-db-name identifier value of %q is too long (%d chars), max length for database identifiers is %d characters", createKeyspaceOptions.SidecarDBName, len(createKeyspaceOptions.SidecarDBName), mysql.MaxIdentifierLength) } @@ -425,7 +427,7 @@ func init() { CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.BaseKeyspace, "base-keyspace", "", "The base keyspace for a snapshot keyspace.") CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.SnapshotTimestamp, "snapshot-timestamp", "", "The snapshot time for a snapshot keyspace, as a timestamp in RFC3339 format.") CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.DurabilityPolicy, "durability-policy", "none", "Type of durability to enforce for this keyspace. Default is none. Possible values include 'semi_sync' and others as dictated by registered plugins.") - CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.SidecarDBName, "sidecar-db-name", sidecardb.DefaultName, "(Experimental) Name of the Vitess sidecar database that tablets in this keyspace will use for internal metadata.") + CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.SidecarDBName, "sidecar-db-name", sidecar.DefaultName, "(Experimental) Name of the Vitess sidecar database that tablets in this keyspace will use for internal metadata.") Root.AddCommand(CreateKeyspace) DeleteKeyspace.Flags().BoolVarP(&deleteKeyspaceOptions.Recursive, "recursive", "r", false, "Recursively delete all shards in the keyspace, and all tablets in those shards.") diff --git a/go/cmd/vtctldclient/command/onlineddl.go b/go/cmd/vtctldclient/command/onlineddl.go new file mode 100644 index 00000000000..dbe927de2bf --- /dev/null +++ b/go/cmd/vtctldclient/command/onlineddl.go @@ -0,0 +1,404 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package command + +import ( + "fmt" + "os" + "strings" + "time" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/protoutil" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/schema" + "vitess.io/vitess/go/vt/vtctl/schematools" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +const ( + AllMigrationsIndicator = "all" +) + +var ( + OnlineDDL = &cobra.Command{ + Use: "OnlineDDL [args]", + Short: "Operates on online DDL (schema migrations).", + DisableFlagsInUseLine: true, + Args: cobra.MinimumNArgs(2), + } + OnlineDDLCancel = &cobra.Command{ + Use: "cancel ", + Short: "Cancel one or all migrations, terminating any running ones as needed.", + Example: "OnlineDDL cancel test_keyspace 82fa54ac_e83e_11ea_96b7_f875a4d24e90", + DisableFlagsInUseLine: true, + Args: cobra.ExactArgs(2), + RunE: commandOnlineDDLCancel, + } + OnlineDDLCleanup = &cobra.Command{ + Use: "cleanup ", + Short: "Mark a given schema migration ready for artifact cleanup.", + Example: "OnlineDDL cleanup test_keyspace 82fa54ac_e83e_11ea_96b7_f875a4d24e90", + DisableFlagsInUseLine: true, + Args: cobra.ExactArgs(2), + RunE: commandOnlineDDLCleanup, + } + OnlineDDLComplete = &cobra.Command{ + Use: "complete ", + Short: "Complete one or all migrations executed with --postpone-completion", + Example: "OnlineDDL complete test_keyspace 82fa54ac_e83e_11ea_96b7_f875a4d24e90", + DisableFlagsInUseLine: true, + Args: cobra.ExactArgs(2), + RunE: commandOnlineDDLComplete, + } + OnlineDDLLaunch = &cobra.Command{ + Use: "launch ", + Short: "Launch one or all migrations executed with --postpone-launch", + Example: "OnlineDDL launch test_keyspace 82fa54ac_e83e_11ea_96b7_f875a4d24e90", + DisableFlagsInUseLine: true, + Args: cobra.ExactArgs(2), + RunE: commandOnlineDDLLaunch, + } + OnlineDDLRetry = &cobra.Command{ + Use: "retry ", + Short: "Mark a given schema migration for retry.", + Example: "vtctl OnlineDDL retry test_keyspace 82fa54ac_e83e_11ea_96b7_f875a4d24e90", + DisableFlagsInUseLine: true, + Args: cobra.ExactArgs(2), + RunE: commandOnlineDDLRetry, + } + OnlineDDLThrottle = &cobra.Command{ + Use: "throttle ", + Short: "Throttles one or all migrations", + Example: "OnlineDDL throttle all", + DisableFlagsInUseLine: true, + Args: cobra.ExactArgs(2), + RunE: commandOnlineDDLThrottle, + } + OnlineDDLUnthrottle = &cobra.Command{ + Use: "unthrottle ", + Short: "Unthrottles one or all migrations", + Example: "OnlineDDL unthrottle all", + DisableFlagsInUseLine: true, + Args: cobra.ExactArgs(2), + RunE: commandOnlineDDLUnthrottle, + } + OnlineDDLShow = &cobra.Command{ + Use: "show", + Short: "Display information about online DDL operations.", + Example: `OnlineDDL show test_keyspace 82fa54ac_e83e_11ea_96b7_f875a4d24e90 +OnlineDDL show test_keyspace all +OnlineDDL show --order descending test_keyspace all +OnlineDDL show --limit 10 test_keyspace all +OnlineDDL show --skip 5 --limit 10 test_keyspace all +OnlineDDL show test_keyspace running +OnlineDDL show test_keyspace complete +OnlineDDL show test_keyspace failed`, + DisableFlagsInUseLine: true, + Args: cobra.RangeArgs(1, 2), + RunE: commandOnlineDDLShow, + } +) + +// analyzeOnlineDDLCommandWithUuidOrAllArgument is a general helper function for OnlineDDL commands that +// accept either a valid UUID or the "all" argument. +func analyzeOnlineDDLCommandWithUuidOrAllArgument(cmd *cobra.Command) (keyspace, uuid string, err error) { + keyspace = cmd.Flags().Arg(0) + uuid = cmd.Flags().Arg(1) + + switch { + case strings.ToLower(uuid) == AllMigrationsIndicator: + case schema.IsOnlineDDLUUID(uuid): + default: + return "", "", fmt.Errorf("argument must be 'all' or a valid UUID. Got '%s'", uuid) + } + return keyspace, uuid, nil +} + +func commandOnlineDDLCancel(cmd *cobra.Command, args []string) error { + keyspace, uuid, err := analyzeOnlineDDLCommandWithUuidOrAllArgument(cmd) + if err != nil { + return err + } + cli.FinishedParsing(cmd) + + resp, err := client.CancelSchemaMigration(commandCtx, &vtctldatapb.CancelSchemaMigrationRequest{ + Keyspace: keyspace, + Uuid: uuid, + }) + if err != nil { + return err + } + + data, err := cli.MarshalJSON(resp) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + return nil +} + +func commandOnlineDDLCleanup(cmd *cobra.Command, args []string) error { + keyspace := cmd.Flags().Arg(0) + uuid := cmd.Flags().Arg(1) + if !schema.IsOnlineDDLUUID(uuid) { + return fmt.Errorf("%s is not a valid UUID", uuid) + } + + cli.FinishedParsing(cmd) + + resp, err := client.CleanupSchemaMigration(commandCtx, &vtctldatapb.CleanupSchemaMigrationRequest{ + Keyspace: keyspace, + Uuid: uuid, + }) + if err != nil { + return err + } + + data, err := cli.MarshalJSON(resp) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + return nil +} + +func commandOnlineDDLComplete(cmd *cobra.Command, args []string) error { + keyspace, uuid, err := analyzeOnlineDDLCommandWithUuidOrAllArgument(cmd) + if err != nil { + return err + } + cli.FinishedParsing(cmd) + + resp, err := client.CompleteSchemaMigration(commandCtx, &vtctldatapb.CompleteSchemaMigrationRequest{ + Keyspace: keyspace, + Uuid: uuid, + }) + if err != nil { + return err + } + + data, err := cli.MarshalJSON(resp) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + return nil +} + +func commandOnlineDDLLaunch(cmd *cobra.Command, args []string) error { + keyspace, uuid, err := analyzeOnlineDDLCommandWithUuidOrAllArgument(cmd) + if err != nil { + return err + } + cli.FinishedParsing(cmd) + + resp, err := client.LaunchSchemaMigration(commandCtx, &vtctldatapb.LaunchSchemaMigrationRequest{ + Keyspace: keyspace, + Uuid: uuid, + }) + if err != nil { + return err + } + + data, err := cli.MarshalJSON(resp) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + return nil +} + +func commandOnlineDDLRetry(cmd *cobra.Command, args []string) error { + keyspace := cmd.Flags().Arg(0) + uuid := cmd.Flags().Arg(1) + if !schema.IsOnlineDDLUUID(uuid) { + return fmt.Errorf("%s is not a valid UUID", uuid) + } + + cli.FinishedParsing(cmd) + + resp, err := client.RetrySchemaMigration(commandCtx, &vtctldatapb.RetrySchemaMigrationRequest{ + Keyspace: keyspace, + Uuid: uuid, + }) + if err != nil { + return err + } + + data, err := cli.MarshalJSON(resp) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + return nil +} + +// throttleCommandHelper is a helper function that implements the logic for both +// commandOnlineDDLThrottle and commandOnlineDDLUnthrottle ; the only difference between the two +// is the ThrottledApp *rule* sent in UpdateThrottlerConfigRequest. +// input: `throttleType`: true stands for "throttle", `false` stands for "unthrottle" +func throttleCommandHelper(cmd *cobra.Command, throttleType bool) error { + keyspace, uuid, err := analyzeOnlineDDLCommandWithUuidOrAllArgument(cmd) + if err != nil { + return err + } + cli.FinishedParsing(cmd) + + var rule topodatapb.ThrottledAppRule + if throttleType { + rule.Ratio = throttle.DefaultThrottleRatio + rule.ExpiresAt = protoutil.TimeToProto(time.Now().Add(throttle.DefaultAppThrottleDuration)) + } else { + rule.Ratio = 0 + rule.ExpiresAt = protoutil.TimeToProto(time.Now()) + } + + if strings.ToLower(uuid) == AllMigrationsIndicator { + rule.Name = throttlerapp.OnlineDDLName.String() + } else { + rule.Name = uuid + } + + updateThrottlerConfigOptions := vtctldatapb.UpdateThrottlerConfigRequest{ + Keyspace: keyspace, + ThrottledApp: &rule, + } + resp, err := client.UpdateThrottlerConfig(commandCtx, &updateThrottlerConfigOptions) + if err != nil { + return err + } + + data, err := cli.MarshalJSON(resp) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + return nil +} + +// commandOnlineDDLThrottle throttles one or multiple migrations. +// As opposed to *most* OnlineDDL functions, this functionality does not end up calling a gRPC on tablets. +// Instead, it updates Keyspace and SrvKeyspace entries, on which the tablets listen. +func commandOnlineDDLThrottle(cmd *cobra.Command, args []string) error { + return throttleCommandHelper(cmd, true) +} + +// commandOnlineDDLUnthrottle unthrottles one or multiple migrations. +// As opposed to *most* OnlineDDL functions, this functionality does not end up calling a gRPC on tablets. +// Instead, it updates Keyspace and SrvKeyspace entries, on which the tablets listen. +func commandOnlineDDLUnthrottle(cmd *cobra.Command, args []string) error { + return throttleCommandHelper(cmd, false) +} + +var onlineDDLShowArgs = struct { + JSON bool + OrderStr string + Limit uint64 + Skip uint64 +}{ + OrderStr: "ascending", +} + +func commandOnlineDDLShow(cmd *cobra.Command, args []string) error { + var order vtctldatapb.QueryOrdering + switch strings.ToLower(onlineDDLShowArgs.OrderStr) { + case "": + order = vtctldatapb.QueryOrdering_NONE + case "asc", "ascending": + order = vtctldatapb.QueryOrdering_ASCENDING + case "desc", "descending": + order = vtctldatapb.QueryOrdering_DESCENDING + default: + return fmt.Errorf("invalid ordering %s (choices are 'asc', 'ascending', 'desc', 'descending')", onlineDDLShowArgs.OrderStr) + } + + cli.FinishedParsing(cmd) + + req := &vtctldatapb.GetSchemaMigrationsRequest{ + Keyspace: cmd.Flags().Arg(0), + Order: order, + Limit: onlineDDLShowArgs.Limit, + Skip: onlineDDLShowArgs.Skip, + } + + switch arg := cmd.Flags().Arg(1); arg { + case "", "all": + case "recent": + req.Recent = protoutil.DurationToProto(7 * 24 * time.Hour) + default: + if status, err := schematools.ParseSchemaMigrationStatus(arg); err == nil { + // Argument is a status name. + req.Status = status + } else if schema.IsOnlineDDLUUID(arg) { + req.Uuid = arg + } else { + req.MigrationContext = arg + } + } + + resp, err := client.GetSchemaMigrations(commandCtx, req) + if err != nil { + return err + } + + switch { + case onlineDDLShowArgs.JSON: + data, err := cli.MarshalJSON(resp) + if err != nil { + return err + } + fmt.Printf("%s\n", data) + default: + res, err := sqltypes.MarshalResult(schematools.MarshallableSchemaMigrations(resp.Migrations)) + if err != nil { + return err + } + + cli.WriteQueryResultTable(os.Stdout, res) + } + return nil +} + +func init() { + OnlineDDL.AddCommand(OnlineDDLCancel) + OnlineDDL.AddCommand(OnlineDDLCleanup) + OnlineDDL.AddCommand(OnlineDDLComplete) + OnlineDDL.AddCommand(OnlineDDLLaunch) + OnlineDDL.AddCommand(OnlineDDLRetry) + OnlineDDL.AddCommand(OnlineDDLThrottle) + OnlineDDL.AddCommand(OnlineDDLUnthrottle) + + OnlineDDLShow.Flags().BoolVar(&onlineDDLShowArgs.JSON, "json", false, "Output JSON instead of human-readable table.") + OnlineDDLShow.Flags().StringVar(&onlineDDLShowArgs.OrderStr, "order", "asc", "Sort the results by `id` property of the Schema migration.") + OnlineDDLShow.Flags().Uint64Var(&onlineDDLShowArgs.Limit, "limit", 0, "Limit number of rows returned in output.") + OnlineDDLShow.Flags().Uint64Var(&onlineDDLShowArgs.Skip, "skip", 0, "Skip specified number of rows returned in output.") + + OnlineDDL.AddCommand(OnlineDDLShow) + Root.AddCommand(OnlineDDL) +} diff --git a/go/cmd/vtctldclient/command/reparents.go b/go/cmd/vtctldclient/command/reparents.go index 6483d699457..5c83016701a 100644 --- a/go/cmd/vtctldclient/command/reparents.go +++ b/go/cmd/vtctldclient/command/reparents.go @@ -94,6 +94,7 @@ var emergencyReparentShardOptions = struct { NewPrimaryAliasStr string IgnoreReplicaAliasStrList []string PreventCrossCellPromotion bool + WaitForAllTablets bool }{} func commandEmergencyReparentShard(cmd *cobra.Command, args []string) error { @@ -132,6 +133,7 @@ func commandEmergencyReparentShard(cmd *cobra.Command, args []string) error { IgnoreReplicas: ignoreReplicaAliases, WaitReplicasTimeout: protoutil.DurationToProto(emergencyReparentShardOptions.WaitReplicasTimeout), PreventCrossCellPromotion: emergencyReparentShardOptions.PreventCrossCellPromotion, + WaitForAllTablets: emergencyReparentShardOptions.WaitForAllTablets, }) if err != nil { return err @@ -281,6 +283,7 @@ func init() { EmergencyReparentShard.Flags().DurationVar(&emergencyReparentShardOptions.WaitReplicasTimeout, "wait-replicas-timeout", topo.RemoteOperationTimeout, "Time to wait for replicas to catch up in reparenting.") EmergencyReparentShard.Flags().StringVar(&emergencyReparentShardOptions.NewPrimaryAliasStr, "new-primary", "", "Alias of a tablet that should be the new primary. If not specified, the vtctld will select the best candidate to promote.") EmergencyReparentShard.Flags().BoolVar(&emergencyReparentShardOptions.PreventCrossCellPromotion, "prevent-cross-cell-promotion", false, "Only promotes a new primary from the same cell as the previous primary.") + EmergencyReparentShard.Flags().BoolVar(&emergencyReparentShardOptions.WaitForAllTablets, "wait-for-all-tablets", false, "Should ERS wait for all the tablets to respond. Useful when all the tablets are reachable.") EmergencyReparentShard.Flags().StringSliceVarP(&emergencyReparentShardOptions.IgnoreReplicaAliasStrList, "ignore-replicas", "i", nil, "Comma-separated, repeated list of replica tablet aliases to ignore during the emergency reparent.") Root.AddCommand(EmergencyReparentShard) diff --git a/go/cmd/vtctldclient/command/root.go b/go/cmd/vtctldclient/command/root.go index 9e59276993c..1194b49ec8f 100644 --- a/go/cmd/vtctldclient/command/root.go +++ b/go/cmd/vtctldclient/command/root.go @@ -30,19 +30,33 @@ import ( "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/vtctl/vtctldclient" + + // These imports ensure init()s within them get called and they register their commands/subcommands. + "vitess.io/vitess/go/cmd/vtctldclient/cli" + vreplcommon "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common" + _ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/lookupvindex" + _ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/materialize" + _ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/migrate" + _ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/mount" + _ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/movetables" + _ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/reshard" + _ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/vdiff" + _ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/workflow" ) var ( // VtctldClientProtocol is the protocol to use when creating the vtctldclient.VtctldClient. VtctldClientProtocol = "grpc" - client vtctldclient.VtctldClient - traceCloser io.Closer + client vtctldclient.VtctldClient + traceCloser io.Closer + commandCtx context.Context commandCancel func() server string actionTimeout time.Duration + compactOutput bool // Root is the main entrypoint to the vtctldclient CLI. Root = &cobra.Command{ @@ -59,6 +73,11 @@ var ( ctx = context.Background() } commandCtx, commandCancel = context.WithTimeout(ctx, actionTimeout) + if compactOutput { + cli.DefaultMarshalOptions.EmitUnpopulated = false + } + vreplcommon.SetClient(client) + vreplcommon.SetCommandCtx(commandCtx) return err }, // Similarly, PersistentPostRun cleans up the resources spawned by @@ -122,6 +141,13 @@ func getClientForCommand(cmd *cobra.Command) (vtctldclient.VtctldClient, error) } } + // Reserved cobra commands for shell completion that we don't want to fail + // here. + switch { + case cmd.Name() == "__complete", cmd.Parent() != nil && cmd.Parent().Name() == "completion": + return nil, nil + } + if VtctldClientProtocol != "local" && server == "" { return nil, errNoServer } @@ -130,6 +156,8 @@ func getClientForCommand(cmd *cobra.Command) (vtctldclient.VtctldClient, error) } func init() { - Root.PersistentFlags().StringVar(&server, "server", "", "server to use for connection (required)") - Root.PersistentFlags().DurationVar(&actionTimeout, "action_timeout", time.Hour, "timeout for the total command") + Root.PersistentFlags().StringVar(&server, "server", "", "server to use for the connection (required)") + Root.PersistentFlags().DurationVar(&actionTimeout, "action_timeout", time.Hour, "timeout to use for the command") + Root.PersistentFlags().BoolVar(&compactOutput, "compact", false, "use compact format for otherwise verbose outputs") + vreplcommon.RegisterCommands(Root) } diff --git a/go/cmd/vtctldclient/command/schema.go b/go/cmd/vtctldclient/command/schema.go index a2d7843756d..795b1315e89 100644 --- a/go/cmd/vtctldclient/command/schema.go +++ b/go/cmd/vtctldclient/command/schema.go @@ -31,7 +31,7 @@ import ( "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo/topoproto" - "vitess.io/vitess/go/vt/wrangler" + "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" "vitess.io/vitess/go/vt/proto/vtrpc" @@ -40,7 +40,7 @@ import ( var ( // ApplySchema makes an ApplySchema gRPC call to a vtctld. ApplySchema = &cobra.Command{ - Use: "ApplySchema [--allow-long-unavailability] [--ddl-strategy ] [--uuid ...] [--migration-context ] [--wait-replicas-timeout ] [--caller-id ] {--sql-file | --sql } ", + Use: "ApplySchema [--ddl-strategy ] [--uuid ...] [--migration-context ] [--wait-replicas-timeout ] [--caller-id ] {--sql-file | --sql } ", Short: "Applies the schema change to the specified keyspace on every primary, running in parallel on all shards. The changes are then propagated to replicas via replication.", Long: `Applies the schema change to the specified keyspace on every primary, running in parallel on all shards. The changes are then propagated to replicas via replication. @@ -103,6 +103,7 @@ var applySchemaOptions = struct { WaitReplicasTimeout time.Duration SkipPreflight bool CallerID string + BatchSize int64 }{} func commandApplySchema(cmd *cobra.Command, args []string) error { @@ -137,15 +138,14 @@ func commandApplySchema(cmd *cobra.Command, args []string) error { ks := cmd.Flags().Arg(0) resp, err := client.ApplySchema(commandCtx, &vtctldatapb.ApplySchemaRequest{ - Keyspace: ks, - AllowLongUnavailability: applySchemaOptions.AllowLongUnavailability, - DdlStrategy: applySchemaOptions.DDLStrategy, - Sql: parts, - SkipPreflight: true, - UuidList: applySchemaOptions.UUIDList, - MigrationContext: applySchemaOptions.MigrationContext, - WaitReplicasTimeout: protoutil.DurationToProto(applySchemaOptions.WaitReplicasTimeout), - CallerId: cid, + Keyspace: ks, + DdlStrategy: applySchemaOptions.DDLStrategy, + Sql: parts, + UuidList: applySchemaOptions.UUIDList, + MigrationContext: applySchemaOptions.MigrationContext, + WaitReplicasTimeout: protoutil.DurationToProto(applySchemaOptions.WaitReplicasTimeout), + CallerId: cid, + BatchSize: applySchemaOptions.BatchSize, }) if err != nil { return err @@ -286,15 +286,16 @@ func commandReloadSchemaShard(cmd *cobra.Command, args []string) error { } func init() { - ApplySchema.Flags().MarkDeprecated("--skip-preflight", "Deprecated. Assumed to be always 'true'") - ApplySchema.Flags().BoolVar(&applySchemaOptions.AllowLongUnavailability, "allow-long-unavailability", false, "Allow large schema changes which incur a longer unavailability of the database.") + ApplySchema.Flags().Bool("allow-long-unavailability", false, "Deprecated and has no effect.") + ApplySchema.Flags().MarkDeprecated("--allow-long-unavailability", "") ApplySchema.Flags().StringVar(&applySchemaOptions.DDLStrategy, "ddl-strategy", string(schema.DDLStrategyDirect), "Online DDL strategy, compatible with @@ddl_strategy session variable (examples: 'gh-ost', 'pt-osc', 'gh-ost --max-load=Threads_running=100'.") ApplySchema.Flags().StringSliceVar(&applySchemaOptions.UUIDList, "uuid", nil, "Optional, comma-delimited, repeatable, explicit UUIDs for migration. If given, must match number of DDL changes.") ApplySchema.Flags().StringVar(&applySchemaOptions.MigrationContext, "migration-context", "", "For Online DDL, optionally supply a custom unique string used as context for the migration(s) in this command. By default a unique context is auto-generated by Vitess.") - ApplySchema.Flags().DurationVar(&applySchemaOptions.WaitReplicasTimeout, "wait-replicas-timeout", wrangler.DefaultWaitReplicasTimeout, "Amount of time to wait for replicas to receive the schema change via replication.") + ApplySchema.Flags().DurationVar(&applySchemaOptions.WaitReplicasTimeout, "wait-replicas-timeout", grpcvtctldserver.DefaultWaitReplicasTimeout, "Amount of time to wait for replicas to receive the schema change via replication.") ApplySchema.Flags().StringVar(&applySchemaOptions.CallerID, "caller-id", "", "Effective caller ID used for the operation and should map to an ACL name which grants this identity the necessary permissions to perform the operation (this is only necessary when strict table ACLs are used).") ApplySchema.Flags().StringArrayVar(&applySchemaOptions.SQL, "sql", nil, "Semicolon-delimited, repeatable SQL commands to apply. Exactly one of --sql|--sql-file is required.") ApplySchema.Flags().StringVar(&applySchemaOptions.SQLFile, "sql-file", "", "Path to a file containing semicolon-delimited SQL commands to apply. Exactly one of --sql|--sql-file is required.") + ApplySchema.Flags().Int64Var(&applySchemaOptions.BatchSize, "batch-size", 0, "How many queries to batch together. Only applicable when all queries are CREATE TABLE|VIEW") Root.AddCommand(ApplySchema) diff --git a/go/cmd/vtctldclient/command/throttler.go b/go/cmd/vtctldclient/command/throttler.go index b0dbd663013..9783f76720d 100644 --- a/go/cmd/vtctldclient/command/throttler.go +++ b/go/cmd/vtctldclient/command/throttler.go @@ -17,17 +17,23 @@ limitations under the License. package command import ( + "fmt" + "time" + "github.com/spf13/cobra" "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/protoutil" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle" ) var ( // UpdateThrottlerConfig makes a UpdateThrottlerConfig gRPC call to a vtctld. UpdateThrottlerConfig = &cobra.Command{ - Use: "UpdateThrottlerConfig [--enable|--disable] [--threshold=] [--custom-query=] [--check-as-check-self|--check-as-check-shard] ", + Use: "UpdateThrottlerConfig [--enable|--disable] [--threshold=] [--custom-query=] [--check-as-check-self|--check-as-check-shard] [--throttle-app|unthrottle-app=] [--throttle-app-ratio=] [--throttle-app-duration=] ", Short: "Update the tablet throttler configuration for all tablets in the given keyspace (across all cells)", DisableFlagsInUseLine: true, Args: cobra.ExactArgs(1), @@ -35,14 +41,32 @@ var ( } ) -var updateThrottlerConfigOptions vtctldatapb.UpdateThrottlerConfigRequest +var ( + updateThrottlerConfigOptions vtctldatapb.UpdateThrottlerConfigRequest + throttledAppRule topodatapb.ThrottledAppRule + unthrottledAppRule topodatapb.ThrottledAppRule + throttledAppDuration time.Duration +) func commandUpdateThrottlerConfig(cmd *cobra.Command, args []string) error { keyspace := cmd.Flags().Arg(0) cli.FinishedParsing(cmd) + if throttledAppRule.Name != "" && unthrottledAppRule.Name != "" { + return fmt.Errorf("throttle-app and unthrottle-app are mutually exclusive") + } + updateThrottlerConfigOptions.CustomQuerySet = cmd.Flags().Changed("custom-query") updateThrottlerConfigOptions.Keyspace = keyspace + + if throttledAppRule.Name != "" { + throttledAppRule.ExpiresAt = protoutil.TimeToProto(time.Now().Add(throttledAppDuration)) + updateThrottlerConfigOptions.ThrottledApp = &throttledAppRule + } else if unthrottledAppRule.Name != "" { + unthrottledAppRule.ExpiresAt = protoutil.TimeToProto(time.Now()) + updateThrottlerConfigOptions.ThrottledApp = &unthrottledAppRule + } + _, err := client.UpdateThrottlerConfig(commandCtx, &updateThrottlerConfigOptions) if err != nil { return err @@ -57,5 +81,12 @@ func init() { UpdateThrottlerConfig.Flags().StringVar(&updateThrottlerConfigOptions.CustomQuery, "custom-query", "", "custom throttler check query") UpdateThrottlerConfig.Flags().BoolVar(&updateThrottlerConfigOptions.CheckAsCheckSelf, "check-as-check-self", false, "/throttler/check requests behave as is /throttler/check-self was called") UpdateThrottlerConfig.Flags().BoolVar(&updateThrottlerConfigOptions.CheckAsCheckShard, "check-as-check-shard", false, "use standard behavior for /throttler/check requests") + + UpdateThrottlerConfig.Flags().StringVar(&unthrottledAppRule.Name, "unthrottle-app", "", "an app name to unthrottle") + UpdateThrottlerConfig.Flags().StringVar(&throttledAppRule.Name, "throttle-app", "", "an app name to throttle") + UpdateThrottlerConfig.Flags().Float64Var(&throttledAppRule.Ratio, "throttle-app-ratio", throttle.DefaultThrottleRatio, "ratio to throttle app (app specififed in --throttled-app)") + UpdateThrottlerConfig.Flags().DurationVar(&throttledAppDuration, "throttle-app-duration", throttle.DefaultAppThrottleDuration, "duration after which throttled app rule expires (app specififed in --throttled-app)") + UpdateThrottlerConfig.Flags().BoolVar(&throttledAppRule.Exempt, "throttle-app-exempt", throttledAppRule.Exempt, "exempt this app from being at all throttled. WARNING: use with extreme care, as this is likely to push metrics beyond the throttler's threshold, and starve other apps") + Root.AddCommand(UpdateThrottlerConfig) } diff --git a/go/cmd/vtctldclient/command/vreplication/common/cancel.go b/go/cmd/vtctldclient/command/vreplication/common/cancel.go new file mode 100644 index 00000000000..48abcc89584 --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/common/cancel.go @@ -0,0 +1,83 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "fmt" + "sort" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +var CancelOptions = struct { + KeepData bool + KeepRoutingRules bool +}{} + +func GetCancelCommand(opts *SubCommandsOpts) *cobra.Command { + cmd := &cobra.Command{ + Use: "cancel", + Short: fmt.Sprintf("Cancel a %s VReplication workflow.", opts.SubCommand), + Example: fmt.Sprintf(`vtctldclient --server localhost:15999 %s --workflow %s --target-keyspace customer cancel`, opts.SubCommand, opts.Workflow), + DisableFlagsInUseLine: true, + Aliases: []string{"Cancel"}, + Args: cobra.NoArgs, + RunE: commandCancel, + } + return cmd +} + +func commandCancel(cmd *cobra.Command, args []string) error { + format, err := GetOutputFormat(cmd) + if err != nil { + return err + } + + cli.FinishedParsing(cmd) + + req := &vtctldatapb.WorkflowDeleteRequest{ + Keyspace: BaseOptions.TargetKeyspace, + Workflow: BaseOptions.Workflow, + KeepData: CancelOptions.KeepData, + KeepRoutingRules: CancelOptions.KeepRoutingRules, + } + resp, err := GetClient().WorkflowDelete(GetCommandCtx(), req) + if err != nil { + return err + } + + var output []byte + if format == "json" { + // Sort the inner TabletInfo slice for deterministic output. + sort.Slice(resp.Details, func(i, j int) bool { + return resp.Details[i].Tablet.String() < resp.Details[j].Tablet.String() + }) + output, err = cli.MarshalJSONPretty(resp) + if err != nil { + return err + } + } else { + output = []byte(resp.Summary + "\n") + } + fmt.Printf("%s\n", output) + + return nil +} diff --git a/go/cmd/vtctldclient/command/vreplication/common/complete.go b/go/cmd/vtctldclient/command/vreplication/common/complete.go new file mode 100644 index 00000000000..6e210b188fe --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/common/complete.go @@ -0,0 +1,75 @@ +package common + +import ( + "bytes" + "fmt" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +var CompleteOptions = struct { + KeepData bool + KeepRoutingRules bool + RenameTables bool + DryRun bool +}{} + +func GetCompleteCommand(opts *SubCommandsOpts) *cobra.Command { + cmd := &cobra.Command{ + Use: "complete", + Short: fmt.Sprintf("Complete a %s VReplication workflow.", opts.SubCommand), + Example: fmt.Sprintf(`vtctldclient --server localhost:15999 %s --workflow %s --target-keyspace customer complete`, + opts.SubCommand, opts.Workflow), + DisableFlagsInUseLine: true, + Aliases: []string{"Complete"}, + Args: cobra.NoArgs, + RunE: commandComplete, + } + return cmd +} + +func commandComplete(cmd *cobra.Command, args []string) error { + format, err := GetOutputFormat(cmd) + if err != nil { + return err + } + cli.FinishedParsing(cmd) + + req := &vtctldatapb.MoveTablesCompleteRequest{ + Workflow: BaseOptions.Workflow, + TargetKeyspace: BaseOptions.TargetKeyspace, + KeepData: CompleteOptions.KeepData, + KeepRoutingRules: CompleteOptions.KeepRoutingRules, + RenameTables: CompleteOptions.RenameTables, + DryRun: CompleteOptions.DryRun, + } + resp, err := GetClient().MoveTablesComplete(GetCommandCtx(), req) + if err != nil { + return err + } + + var output []byte + if format == "json" { + output, err = cli.MarshalJSONPretty(resp) + if err != nil { + return err + } + } else { + tout := bytes.Buffer{} + tout.WriteString(resp.Summary + "\n") + if len(resp.DryRunResults) > 0 { + tout.WriteString("\n") + for _, r := range resp.DryRunResults { + tout.WriteString(r + "\n") + } + } + output = tout.Bytes() + } + fmt.Println(string(output)) + + return nil +} diff --git a/go/cmd/vtctldclient/command/vreplication/common/show.go b/go/cmd/vtctldclient/command/vreplication/common/show.go new file mode 100644 index 00000000000..71e6675f690 --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/common/show.go @@ -0,0 +1,68 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "fmt" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +var showOptions = struct { + IncludeLogs bool +}{} + +func GetShowCommand(opts *SubCommandsOpts) *cobra.Command { + cmd := &cobra.Command{ + Use: "show", + Short: fmt.Sprintf("Show the details for a %s VReplication workflow.", opts.SubCommand), + Example: fmt.Sprintf(`vtctldclient --server localhost:15999 %s --workflow %s --target-keyspace customer show`, opts.SubCommand, opts.Workflow), + DisableFlagsInUseLine: true, + Aliases: []string{"Show"}, + Args: cobra.NoArgs, + RunE: commandShow, + } + cmd.Flags().BoolVar(&showOptions.IncludeLogs, "include-logs", true, "Include recent logs for the workflow.") + return cmd +} + +func commandShow(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + req := &vtctldatapb.GetWorkflowsRequest{ + Keyspace: BaseOptions.TargetKeyspace, + Workflow: BaseOptions.Workflow, + IncludeLogs: showOptions.IncludeLogs, + } + resp, err := GetClient().GetWorkflows(GetCommandCtx(), req) + if err != nil { + return err + } + + data, err := cli.MarshalJSONPretty(resp) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + + return nil +} diff --git a/go/cmd/vtctldclient/command/vreplication/common/status.go b/go/cmd/vtctldclient/command/vreplication/common/status.go new file mode 100644 index 00000000000..ad038c42536 --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/common/status.go @@ -0,0 +1,63 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "fmt" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +func GetStatusCommand(opts *SubCommandsOpts) *cobra.Command { + cmd := &cobra.Command{ + Use: "status", + Short: fmt.Sprintf("Show the current status for a %s VReplication workflow.", opts.SubCommand), + Example: fmt.Sprintf(`vtctldclient --server localhost:15999 %s --workflow %s --target-keyspace customer status`, opts.SubCommand, opts.Workflow), + DisableFlagsInUseLine: true, + Aliases: []string{"Status", "progress", "Progress"}, + Args: cobra.NoArgs, + RunE: commandStatus, + } + return cmd +} + +func commandStatus(cmd *cobra.Command, args []string) error { + format, err := GetOutputFormat(cmd) + if err != nil { + return err + } + cli.FinishedParsing(cmd) + + req := &vtctldatapb.WorkflowStatusRequest{ + Keyspace: BaseOptions.TargetKeyspace, + Workflow: BaseOptions.Workflow, + } + resp, err := GetClient().WorkflowStatus(GetCommandCtx(), req) + if err != nil { + return err + } + + if err = OutputStatusResponse(resp, format); err != nil { + return err + } + + return nil +} diff --git a/go/cmd/vtctldclient/command/vreplication/common/switchtraffic.go b/go/cmd/vtctldclient/command/vreplication/common/switchtraffic.go new file mode 100644 index 00000000000..019367fe82b --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/common/switchtraffic.go @@ -0,0 +1,129 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "bytes" + "fmt" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/protoutil" + "vitess.io/vitess/go/vt/vtctl/workflow" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +func GetSwitchTrafficCommand(opts *SubCommandsOpts) *cobra.Command { + cmd := &cobra.Command{ + Use: "switchtraffic", + Short: fmt.Sprintf("Switch traffic for a %s VReplication workflow.", opts.SubCommand), + Example: fmt.Sprintf(`vtctldclient --server localhost:15999 %s --workflow %s --target-keyspace customer switchtraffic --tablet-types "replica,rdonly"`, opts.SubCommand, opts.Workflow), + DisableFlagsInUseLine: true, + Aliases: []string{"SwitchTraffic"}, + Args: cobra.NoArgs, + PreRunE: func(cmd *cobra.Command, args []string) error { + SwitchTrafficOptions.Direction = workflow.DirectionForward + if !cmd.Flags().Lookup("tablet-types").Changed { + // We switch traffic for all tablet types if none are provided. + SwitchTrafficOptions.TabletTypes = []topodatapb.TabletType{ + topodatapb.TabletType_PRIMARY, + topodatapb.TabletType_REPLICA, + topodatapb.TabletType_RDONLY, + } + } + return nil + }, + RunE: commandSwitchTraffic, + } + return cmd +} + +func GetReverseTrafficCommand(opts *SubCommandsOpts) *cobra.Command { + cmd := &cobra.Command{ + Use: "reversetraffic", + Short: fmt.Sprintf("Reverse traffic for a %s VReplication workflow.", opts.SubCommand), + Example: fmt.Sprintf(`vtctldclient --server localhost:15999 %s --workflow %s --target-keyspace customer reversetraffic`, opts.SubCommand, opts.Workflow), + DisableFlagsInUseLine: true, + Aliases: []string{"ReverseTraffic"}, + Args: cobra.NoArgs, + PreRunE: func(cmd *cobra.Command, args []string) error { + SwitchTrafficOptions.Direction = workflow.DirectionBackward + if !cmd.Flags().Lookup("tablet-types").Changed { + // We switch traffic for all tablet types if none are provided. + SwitchTrafficOptions.TabletTypes = []topodatapb.TabletType{ + topodatapb.TabletType_PRIMARY, + topodatapb.TabletType_REPLICA, + topodatapb.TabletType_RDONLY, + } + } + return nil + }, + RunE: commandSwitchTraffic, + } + return cmd +} + +func commandSwitchTraffic(cmd *cobra.Command, args []string) error { + format, err := GetOutputFormat(cmd) + if err != nil { + return err + } + + cli.FinishedParsing(cmd) + + req := &vtctldatapb.WorkflowSwitchTrafficRequest{ + Keyspace: BaseOptions.TargetKeyspace, + Workflow: BaseOptions.Workflow, + TabletTypes: SwitchTrafficOptions.TabletTypes, + MaxReplicationLagAllowed: protoutil.DurationToProto(SwitchTrafficOptions.MaxReplicationLagAllowed), + Timeout: protoutil.DurationToProto(SwitchTrafficOptions.Timeout), + DryRun: SwitchTrafficOptions.DryRun, + EnableReverseReplication: SwitchTrafficOptions.EnableReverseReplication, + InitializeTargetSequences: SwitchTrafficOptions.InitializeTargetSequences, + Direction: int32(SwitchTrafficOptions.Direction), + } + resp, err := GetClient().WorkflowSwitchTraffic(GetCommandCtx(), req) + if err != nil { + return err + } + + var output []byte + if format == "json" { + output, err = cli.MarshalJSONPretty(resp) + if err != nil { + return err + } + } else { + tout := bytes.Buffer{} + tout.WriteString(resp.Summary + "\n\n") + if req.DryRun { + for _, line := range resp.DryRunResults { + tout.WriteString(line + "\n") + } + } else { + tout.WriteString(fmt.Sprintf("Start State: %s\n", resp.StartState)) + tout.WriteString(fmt.Sprintf("Current State: %s\n", resp.CurrentState)) + } + output = tout.Bytes() + } + fmt.Printf("%s\n", output) + + return nil +} diff --git a/go/cmd/vtctldclient/command/vreplication/common/update.go b/go/cmd/vtctldclient/command/vreplication/common/update.go new file mode 100644 index 00000000000..7875c9412ac --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/common/update.go @@ -0,0 +1,170 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "fmt" + "sort" + "strings" + + "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/textutil" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +func bridgeToWorkflow(cmd *cobra.Command, args []string) { + workflowUpdateOptions.Workflow = BaseOptions.Workflow + workflowOptions.Keyspace = BaseOptions.TargetKeyspace +} + +var ( + workflowOptions = struct { + Keyspace string + }{} + + workflowUpdateOptions = struct { + Workflow string + Cells []string + TabletTypes []topodatapb.TabletType + TabletTypesInPreferenceOrder bool + OnDDL string + }{} +) + +func GetStartCommand(opts *SubCommandsOpts) *cobra.Command { + cmd := &cobra.Command{ + Use: "start", + Short: fmt.Sprintf("Start a %s workflow.", opts.SubCommand), + Example: fmt.Sprintf(`vtctldclient --server localhost:15999 %s --workflow %s --target-keyspace customer start`, opts.SubCommand, opts.Workflow), + DisableFlagsInUseLine: true, + Aliases: []string{"Start"}, + Args: cobra.NoArgs, + PreRun: bridgeToWorkflow, + RunE: commandUpdateState, + } + return cmd +} + +func GetStopCommand(opts *SubCommandsOpts) *cobra.Command { + cmd := &cobra.Command{ + Use: "stop", + Short: fmt.Sprintf("Stop a %s workflow.", opts.SubCommand), + Example: fmt.Sprintf(`vtctldclient --server localhost:15999 %s --workflow %s --target-keyspace customer stop`, opts.SubCommand, opts.Workflow), + DisableFlagsInUseLine: true, + Aliases: []string{"Stop"}, + Args: cobra.NoArgs, + PreRun: bridgeToWorkflow, + RunE: commandUpdateState, + } + return cmd +} + +func getWorkflow(keyspace, workflow string) (*vtctldatapb.GetWorkflowsResponse, error) { + resp, err := GetClient().GetWorkflows(GetCommandCtx(), &vtctldatapb.GetWorkflowsRequest{ + Keyspace: keyspace, + Workflow: workflow, + }) + if err != nil { + return &vtctldatapb.GetWorkflowsResponse{}, err + } + return resp, nil +} + +// CanRestartWorkflow validates that, for an atomic copy workflow, none of the streams are still in the copy phase. +// Since we copy all tables in a single snapshot, we cannot restart a workflow which broke before all tables were copied. +func CanRestartWorkflow(keyspace, workflow string) error { + resp, err := getWorkflow(keyspace, workflow) + if err != nil { + return err + } + if len(resp.Workflows) == 0 { + return fmt.Errorf("workflow %s not found", workflow) + } + if len(resp.Workflows) > 1 { + return vterrors.Errorf(vtrpc.Code_INTERNAL, "multiple results found for workflow %s", workflow) + } + wf := resp.Workflows[0] + if wf.WorkflowSubType != binlogdatapb.VReplicationWorkflowSubType_AtomicCopy.String() { + return nil + } + // If we're here, we have an atomic copy workflow. + for _, shardStream := range wf.ShardStreams { + for _, stream := range shardStream.Streams { + if len(stream.CopyStates) > 0 { + return fmt.Errorf("stream %d is still in the copy phase: can only start workflow %s if all streams have completed the copy phase", stream.Id, workflow) + } + } + } + return nil +} + +func commandUpdateState(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + var state binlogdatapb.VReplicationWorkflowState + switch strings.ToLower(cmd.Name()) { + case "start": + if err := CanRestartWorkflow(workflowOptions.Keyspace, workflowUpdateOptions.Workflow); err != nil { + return err + } + state = binlogdatapb.VReplicationWorkflowState_Running + case "stop": + state = binlogdatapb.VReplicationWorkflowState_Stopped + default: + return fmt.Errorf("invalid workflow state: %s", args[0]) + } + + // The only thing we're updating is the state. + req := &vtctldatapb.WorkflowUpdateRequest{ + Keyspace: workflowOptions.Keyspace, + TabletRequest: &tabletmanagerdatapb.UpdateVReplicationWorkflowRequest{ + Workflow: workflowUpdateOptions.Workflow, + Cells: textutil.SimulatedNullStringSlice, + TabletTypes: []topodatapb.TabletType{topodatapb.TabletType(textutil.SimulatedNullInt)}, + OnDdl: binlogdatapb.OnDDLAction(textutil.SimulatedNullInt), + State: state, + }, + } + + resp, err := GetClient().WorkflowUpdate(GetCommandCtx(), req) + if err != nil { + return err + } + + // Sort the inner TabletInfo slice for deterministic output. + sort.Slice(resp.Details, func(i, j int) bool { + return resp.Details[i].Tablet.String() < resp.Details[j].Tablet.String() + }) + + data, err := cli.MarshalJSONPretty(resp) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + + return nil +} diff --git a/go/cmd/vtctldclient/command/vreplication/common/utils.go b/go/cmd/vtctldclient/command/vreplication/common/utils.go new file mode 100644 index 00000000000..da6e3329579 --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/common/utils.go @@ -0,0 +1,245 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "bytes" + "context" + "fmt" + "strings" + "time" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtctl/vtctldclient" + "vitess.io/vitess/go/vt/vtctl/workflow" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +var ( + client vtctldclient.VtctldClient + commandCtx context.Context + // The generic default for most commands. + tabletTypesDefault = []topodatapb.TabletType{ + topodatapb.TabletType_REPLICA, + topodatapb.TabletType_PRIMARY, + } + onDDLDefault = binlogdatapb.OnDDLAction_IGNORE.String() + MaxReplicationLagDefault = 30 * time.Second + TimeoutDefault = 30 * time.Second + + BaseOptions = struct { + Workflow string + TargetKeyspace string + Format string + }{} + + CreateOptions = struct { + Cells []string + AllCells bool + TabletTypes []topodatapb.TabletType + TabletTypesInPreferenceOrder bool + OnDDL string + DeferSecondaryKeys bool + AutoStart bool + StopAfterCopy bool + }{} +) + +var commandHandlers = make(map[string]func(cmd *cobra.Command)) + +func RegisterCommandHandler(command string, handler func(cmd *cobra.Command)) { + commandHandlers[command] = handler +} + +func RegisterCommands(root *cobra.Command) { + for _, handler := range commandHandlers { + handler(root) + } +} + +type SubCommandsOpts struct { + SubCommand string + Workflow string // Used to specify an example workflow name for the Examples section of the help output. +} + +func SetClient(c vtctldclient.VtctldClient) { + client = c +} + +func GetClient() vtctldclient.VtctldClient { + return client +} + +func SetCommandCtx(ctx context.Context) { + commandCtx = ctx +} + +func GetCommandCtx() context.Context { + return commandCtx +} + +func ParseCells(cmd *cobra.Command) error { + cf := cmd.Flags().Lookup("cells") + af := cmd.Flags().Lookup("all-cells") + if cf != nil && cf.Changed && af != nil && af.Changed { + return fmt.Errorf("cannot specify both --cells and --all-cells") + } + if cf.Changed { // Validate the provided value(s) + for i, cell := range CreateOptions.Cells { // Which only means trimming whitespace + CreateOptions.Cells[i] = strings.TrimSpace(cell) + } + } + if CreateOptions.AllCells { // Use all current cells + ctx, cancel := context.WithTimeout(commandCtx, topo.RemoteOperationTimeout) + defer cancel() + resp, err := client.GetCellInfoNames(ctx, &vtctldatapb.GetCellInfoNamesRequest{}) + if err != nil { + return fmt.Errorf("failed to get current cells: %v", err) + } + CreateOptions.Cells = make([]string, len(resp.Names)) + copy(CreateOptions.Cells, resp.Names) + } + return nil +} + +func ParseTabletTypes(cmd *cobra.Command) error { + ttf := cmd.Flags().Lookup("tablet-types") + if ttf == nil { + return fmt.Errorf("no tablet-types flag found") + } + if !ttf.Changed { + CreateOptions.TabletTypes = tabletTypesDefault + } else if strings.TrimSpace(ttf.Value.String()) == "" { + return fmt.Errorf("invalid tablet-types value, at least one valid tablet type must be specified") + } + return nil +} + +func validateOnDDL(cmd *cobra.Command) error { + if _, ok := binlogdatapb.OnDDLAction_value[strings.ToUpper(CreateOptions.OnDDL)]; !ok { + return fmt.Errorf("invalid on-ddl value: %s", CreateOptions.OnDDL) + } + return nil +} + +func ParseAndValidateCreateOptions(cmd *cobra.Command) error { + if err := validateOnDDL(cmd); err != nil { + return err + } + if err := ParseCells(cmd); err != nil { + return err + } + if err := ParseTabletTypes(cmd); err != nil { + return err + } + return nil +} + +func GetOutputFormat(cmd *cobra.Command) (string, error) { + format := strings.ToLower(strings.TrimSpace(BaseOptions.Format)) + switch format { + case "text", "json": + return format, nil + default: + return "", fmt.Errorf("invalid output format, got %s", BaseOptions.Format) + } +} + +func GetTabletSelectionPreference(cmd *cobra.Command) tabletmanagerdatapb.TabletSelectionPreference { + tsp := tabletmanagerdatapb.TabletSelectionPreference_ANY + if CreateOptions.TabletTypesInPreferenceOrder { + tsp = tabletmanagerdatapb.TabletSelectionPreference_INORDER + } + return tsp +} + +func OutputStatusResponse(resp *vtctldatapb.WorkflowStatusResponse, format string) error { + var output []byte + var err error + if format == "json" { + output, err = cli.MarshalJSONPretty(resp) + if err != nil { + return err + } + } else { + tout := bytes.Buffer{} + tout.WriteString(fmt.Sprintf("The following vreplication streams exist for workflow %s.%s:\n\n", + BaseOptions.TargetKeyspace, BaseOptions.Workflow)) + for _, shardstreams := range resp.ShardStreams { + for _, shardstream := range shardstreams.Streams { + tablet := fmt.Sprintf("%s-%d", shardstream.Tablet.Cell, shardstream.Tablet.Uid) + tout.WriteString(fmt.Sprintf("id=%d on %s/%s: Status: %s. %s.\n", + shardstream.Id, BaseOptions.TargetKeyspace, tablet, shardstream.Status, shardstream.Info)) + } + } + tout.WriteString("\nTraffic State: ") + tout.WriteString(resp.TrafficState) + output = tout.Bytes() + } + fmt.Println(string(output)) + return nil +} + +func AddCommonFlags(cmd *cobra.Command) { + cmd.PersistentFlags().StringVar(&BaseOptions.TargetKeyspace, "target-keyspace", "", "Target keyspace for this workflow.") + cmd.MarkPersistentFlagRequired("target-keyspace") + cmd.PersistentFlags().StringVarP(&BaseOptions.Workflow, "workflow", "w", "", "The workflow you want to perform the command on.") + cmd.MarkPersistentFlagRequired("workflow") + cmd.PersistentFlags().StringVar(&BaseOptions.Format, "format", "text", "The format of the output; supported formats are: text,json.") +} + +func AddCommonCreateFlags(cmd *cobra.Command) { + cmd.Flags().StringSliceVarP(&CreateOptions.Cells, "cells", "c", nil, "Cells and/or CellAliases to copy table data from.") + cmd.Flags().BoolVarP(&CreateOptions.AllCells, "all-cells", "a", false, "Copy table data from any existing cell.") + cmd.Flags().Var((*topoproto.TabletTypeListFlag)(&CreateOptions.TabletTypes), "tablet-types", "Source tablet types to replicate table data from (e.g. PRIMARY,REPLICA,RDONLY).") + cmd.Flags().BoolVar(&CreateOptions.TabletTypesInPreferenceOrder, "tablet-types-in-preference-order", true, "When performing source tablet selection, look for candidates in the type order as they are listed in the tablet-types flag.") + cmd.Flags().StringVar(&CreateOptions.OnDDL, "on-ddl", onDDLDefault, "What to do when DDL is encountered in the VReplication stream. Possible values are IGNORE, STOP, EXEC, and EXEC_IGNORE.") + cmd.Flags().BoolVar(&CreateOptions.DeferSecondaryKeys, "defer-secondary-keys", false, "Defer secondary index creation for a table until after it has been copied.") + cmd.Flags().BoolVar(&CreateOptions.AutoStart, "auto-start", true, "Start the workflow after creating it.") + cmd.Flags().BoolVar(&CreateOptions.StopAfterCopy, "stop-after-copy", false, "Stop the workflow after it's finished copying the existing rows and before it starts replicating changes.") +} + +var SwitchTrafficOptions = struct { + Cells []string + TabletTypes []topodatapb.TabletType + Timeout time.Duration + MaxReplicationLagAllowed time.Duration + EnableReverseReplication bool + DryRun bool + Direction workflow.TrafficSwitchDirection + InitializeTargetSequences bool +}{} + +func AddCommonSwitchTrafficFlags(cmd *cobra.Command, initializeTargetSequences bool) { + cmd.Flags().StringSliceVarP(&SwitchTrafficOptions.Cells, "cells", "c", nil, "Cells and/or CellAliases to switch traffic in.") + cmd.Flags().Var((*topoproto.TabletTypeListFlag)(&SwitchTrafficOptions.TabletTypes), "tablet-types", "Tablet types to switch traffic for.") + cmd.Flags().DurationVar(&SwitchTrafficOptions.Timeout, "timeout", TimeoutDefault, "Specifies the maximum time to wait, in seconds, for VReplication to catch up on primary tablets. The traffic switch will be cancelled on timeout.") + cmd.Flags().DurationVar(&SwitchTrafficOptions.MaxReplicationLagAllowed, "max-replication-lag-allowed", MaxReplicationLagDefault, "Allow traffic to be switched only if VReplication lag is below this.") + cmd.Flags().BoolVar(&SwitchTrafficOptions.EnableReverseReplication, "enable-reverse-replication", true, "Setup replication going back to the original source keyspace to support rolling back the traffic cutover.") + cmd.Flags().BoolVar(&SwitchTrafficOptions.DryRun, "dry-run", false, "Print the actions that would be taken and report any known errors that would have occurred.") + if initializeTargetSequences { + cmd.Flags().BoolVar(&SwitchTrafficOptions.InitializeTargetSequences, "initialize-target-sequences", false, "When moving tables from an unsharded keyspace to a sharded keyspace, initialize any sequences that are being used on the target when switching writes.") + } +} diff --git a/go/cmd/vtctldclient/command/vreplication/common/utils_test.go b/go/cmd/vtctldclient/command/vreplication/common/utils_test.go new file mode 100644 index 00000000000..0dc179060d6 --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/common/utils_test.go @@ -0,0 +1,153 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common_test + +import ( + "context" + "testing" + "time" + + "github.com/spf13/cobra" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/cmd/vtctldclient/command" + "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver" + "vitess.io/vitess/go/vt/vtctl/localvtctldclient" + "vitess.io/vitess/go/vt/vtctl/vtctldclient" + "vitess.io/vitess/go/vt/vttablet/tmclient" +) + +func TestParseAndValidateCreateOptions(t *testing.T) { + common.SetCommandCtx(context.Background()) + ctx, cancel := context.WithTimeout(common.GetCommandCtx(), 60*time.Second) + defer cancel() + cells := []string{"zone1", "zone2", "zone3"} + SetupLocalVtctldClient(t, ctx, cells...) + + tests := []struct { + name string + setFunc func(*cobra.Command) error + wantErr bool + checkFunc func() + }{ + { + name: "invalid tablet type", + setFunc: func(cmd *cobra.Command) error { + tabletTypesFlag := cmd.Flags().Lookup("tablet-types") + err := tabletTypesFlag.Value.Set("invalid") + tabletTypesFlag.Changed = true + return err + }, + wantErr: true, + }, + { + name: "no tablet types", + setFunc: func(cmd *cobra.Command) error { + tabletTypesFlag := cmd.Flags().Lookup("tablet-types") + err := tabletTypesFlag.Value.Set("") + tabletTypesFlag.Changed = true + return err + }, + wantErr: true, + }, + { + name: "valid tablet types", + setFunc: func(cmd *cobra.Command) error { + tabletTypesFlag := cmd.Flags().Lookup("tablet-types") + err := tabletTypesFlag.Value.Set("rdonly,replica") + tabletTypesFlag.Changed = true + return err + }, + wantErr: false, + }, + { + name: "cells and all-cells", + setFunc: func(cmd *cobra.Command) error { + cellsFlag := cmd.Flags().Lookup("cells") + allCellsFlag := cmd.Flags().Lookup("all-cells") + if err := cellsFlag.Value.Set("cella"); err != nil { + return err + } + cellsFlag.Changed = true + if err := allCellsFlag.Value.Set("true"); err != nil { + return err + } + allCellsFlag.Changed = true + return nil + }, + wantErr: true, + }, + { + name: "all cells", + setFunc: func(cmd *cobra.Command) error { + allCellsFlag := cmd.Flags().Lookup("all-cells") + if err := allCellsFlag.Value.Set("true"); err != nil { + return err + } + allCellsFlag.Changed = true + return nil + }, + wantErr: false, + checkFunc: func() { + require.Equal(t, cells, common.CreateOptions.Cells) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cmd := &cobra.Command{} + common.AddCommonCreateFlags(cmd) + test := func() error { + if tt.setFunc != nil { + if err := tt.setFunc(cmd); err != nil { + return err + } + } + if err := common.ParseAndValidateCreateOptions(cmd); err != nil { + return err + } + return nil + } + if err := test(); (err != nil) != tt.wantErr { + t.Errorf("ParseAndValidateCreateOptions() error = %v, wantErr %t", err, tt.wantErr) + } + if tt.checkFunc != nil { + tt.checkFunc() + } + }) + } +} + +// SetupLocalVtctldClient sets up a local or internal VtctldServer and +// VtctldClient for tests. It uses a memorytopo instance which contains +// the cells provided. +func SetupLocalVtctldClient(t *testing.T, ctx context.Context, cells ...string) { + ts, factory := memorytopo.NewServerAndFactory(ctx, cells...) + topo.RegisterFactory("test", factory) + tmclient.RegisterTabletManagerClientFactory("grpc", func() tmclient.TabletManagerClient { + return nil + }) + vtctld := grpcvtctldserver.NewVtctldServer(ts) + localvtctldclient.SetServer(vtctld) + command.VtctldClientProtocol = "local" + client, err := vtctldclient.New(command.VtctldClientProtocol, "") + require.NoError(t, err, "failed to create local vtctld client which uses an internal vtctld server") + common.SetClient(client) +} diff --git a/go/cmd/vtctldclient/command/vreplication/lookupvindex/lookupvindex.go b/go/cmd/vtctldclient/command/vreplication/lookupvindex/lookupvindex.go new file mode 100644 index 00000000000..b703e873bd0 --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/lookupvindex/lookupvindex.go @@ -0,0 +1,321 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lookupvindex + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" + topoprotopb "vitess.io/vitess/go/vt/topo/topoproto" +) + +var ( + tabletTypesDefault = []topodatapb.TabletType{ + topodatapb.TabletType_REPLICA, + topodatapb.TabletType_PRIMARY, + } + + baseOptions = struct { + // This is where the lookup table and VReplicaiton workflow + // will be created. + TableKeyspace string + // This will be the name of the Lookup Vindex and the name + // of the VReplication workflow. + Name string + Vschema *vschemapb.Keyspace + }{} + + // base is the base command for all actions related to Lookup Vindexes. + base = &cobra.Command{ + Use: "LookupVindex --name --table-keyspace [command] [command-flags]", + Short: "Perform commands related to creating, backfilling, and externalizing Lookup Vindexes using VReplication workflows.", + DisableFlagsInUseLine: true, + Aliases: []string{"lookupvindex"}, + Args: cobra.NoArgs, + } + + createOptions = struct { + Keyspace string + Type string + TableOwner string + TableOwnerColumns []string + TableName string + TableVindexType string + Cells []string + TabletTypes []topodatapb.TabletType + TabletTypesInPreferenceOrder bool + IgnoreNulls bool + ContinueAfterCopyWithOwner bool + }{} + + externalizeOptions = struct { + Keyspace string + }{} + + parseAndValidateCreate = func(cmd *cobra.Command, args []string) error { + if createOptions.TableName == "" { // Use vindex name + createOptions.TableName = baseOptions.Name + } + if !strings.Contains(createOptions.Type, "lookup") { + return fmt.Errorf("vindex type must be a lookup vindex") + } + baseOptions.Vschema = &vschemapb.Keyspace{ + Vindexes: map[string]*vschemapb.Vindex{ + baseOptions.Name: { + Type: createOptions.Type, + Params: map[string]string{ + "table": baseOptions.TableKeyspace + "." + createOptions.TableName, + "from": strings.Join(createOptions.TableOwnerColumns, ","), + "to": "keyspace_id", + "ignore_nulls": fmt.Sprintf("%t", createOptions.IgnoreNulls), + }, + Owner: createOptions.TableOwner, + }, + }, + Tables: map[string]*vschemapb.Table{ + createOptions.TableOwner: { + ColumnVindexes: []*vschemapb.ColumnVindex{ + { + Name: baseOptions.Name, + Columns: createOptions.TableOwnerColumns, + }, + }, + }, + createOptions.TableName: { + ColumnVindexes: []*vschemapb.ColumnVindex{ + { + // If the vindex name/type is empty then we'll fill this in + // later using the defult for the column types. + Name: createOptions.TableVindexType, + Columns: createOptions.TableOwnerColumns, + }, + }, + }, + }, + } + + // VReplication specific flags. + ttFlag := cmd.Flags().Lookup("tablet-types") + if ttFlag != nil && ttFlag.Changed { + createOptions.TabletTypes = tabletTypesDefault + } + cFlag := cmd.Flags().Lookup("cells") + if cFlag != nil && cFlag.Changed { + for i, cell := range createOptions.Cells { + createOptions.Cells[i] = strings.TrimSpace(cell) + } + } + return nil + } + + // cancel makes a WorkflowDelete call to a vtctld. + cancel = &cobra.Command{ + Use: "cancel", + Short: "Cancel the VReplication workflow that backfills the Lookup Vindex.", + Example: `vtctldclient --server localhost:15999 LookupVindex --name corder_lookup_vdx --table-keyspace customer cancel`, + SilenceUsage: true, + DisableFlagsInUseLine: true, + Aliases: []string{"Cancel"}, + Args: cobra.NoArgs, + RunE: commandCancel, + } + + // create makes a LookupVindexCreate call to a vtctld. + create = &cobra.Command{ + Use: "create", + Short: "Create the Lookup Vindex in the specified keyspace and backfill it with a VReplication workflow.", + Example: `vtctldclient --server localhost:15999 LookupVindex --name corder_lookup_vdx --table-keyspace customer create --keyspace customer --type consistent_lookup_unique --table-owner corder --table-owner-columns sku --table-name corder_lookup_tbl --table-vindex-type unicode_loose_xxhash`, + SilenceUsage: true, + DisableFlagsInUseLine: true, + Aliases: []string{"Create"}, + Args: cobra.NoArgs, + PreRunE: parseAndValidateCreate, + RunE: commandCreate, + } + + // externalize makes a LookupVindexExternalize call to a vtctld. + externalize = &cobra.Command{ + Use: "externalize", + Short: "Externalize the Lookup Vindex. If the Vindex has an owner the VReplication workflow will also be deleted.", + Example: `vtctldclient --server localhost:15999 LookupVindex --name corder_lookup_vdx --table-keyspace customer externalize`, + SilenceUsage: true, + DisableFlagsInUseLine: true, + Aliases: []string{"Externalize"}, + Args: cobra.NoArgs, + RunE: commandExternalize, + } + + // show makes a GetWorkflows call to a vtctld. + show = &cobra.Command{ + Use: "show", + Short: "Show the status of the VReplication workflow that backfills the Lookup Vindex.", + Example: `vtctldclient --server localhost:15999 LookupVindex --name corder_lookup_vdx --table-keyspace customer show`, + SilenceUsage: true, + DisableFlagsInUseLine: true, + Aliases: []string{"Show"}, + Args: cobra.NoArgs, + RunE: commandShow, + } +) + +func commandCancel(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + req := &vtctldatapb.WorkflowDeleteRequest{ + Keyspace: baseOptions.TableKeyspace, + Workflow: baseOptions.Name, + } + _, err := common.GetClient().WorkflowDelete(common.GetCommandCtx(), req) + if err != nil { + return err + } + + output := fmt.Sprintf("LookupVindex %s left in place and the %s VReplication wokflow has been deleted", + baseOptions.Name, baseOptions.Name) + fmt.Println(output) + + return nil +} + +func commandCreate(cmd *cobra.Command, args []string) error { + tsp := common.GetTabletSelectionPreference(cmd) + cli.FinishedParsing(cmd) + + _, err := common.GetClient().LookupVindexCreate(common.GetCommandCtx(), &vtctldatapb.LookupVindexCreateRequest{ + Workflow: baseOptions.Name, + Keyspace: createOptions.Keyspace, + Vindex: baseOptions.Vschema, + ContinueAfterCopyWithOwner: createOptions.ContinueAfterCopyWithOwner, + Cells: createOptions.Cells, + TabletTypes: createOptions.TabletTypes, + TabletSelectionPreference: tsp, + }) + + if err != nil { + return err + } + + output := fmt.Sprintf("LookupVindex %s created in the %s keyspace and the %s VReplication wokflow scheduled on the %s shards, use show to view progress", + baseOptions.Name, createOptions.Keyspace, baseOptions.Name, baseOptions.TableKeyspace) + fmt.Println(output) + + return nil +} + +func commandExternalize(cmd *cobra.Command, args []string) error { + if externalizeOptions.Keyspace == "" { + externalizeOptions.Keyspace = baseOptions.TableKeyspace + } + cli.FinishedParsing(cmd) + + resp, err := common.GetClient().LookupVindexExternalize(common.GetCommandCtx(), &vtctldatapb.LookupVindexExternalizeRequest{ + Keyspace: externalizeOptions.Keyspace, + // The name of the workflow and lookup vindex. + Name: baseOptions.Name, + // Where the lookup table and VReplication workflow were created. + TableKeyspace: baseOptions.TableKeyspace, + }) + + if err != nil { + return err + } + + output := fmt.Sprintf("LookupVindex %s has been externalized", baseOptions.Name) + if resp.WorkflowDeleted { + output = output + fmt.Sprintf(" and the %s VReplication workflow has been deleted", baseOptions.Name) + } + fmt.Println(output) + + return nil +} + +func commandShow(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + req := &vtctldatapb.GetWorkflowsRequest{ + Keyspace: baseOptions.TableKeyspace, + Workflow: baseOptions.Name, + } + resp, err := common.GetClient().GetWorkflows(common.GetCommandCtx(), req) + if err != nil { + return err + } + + data, err := cli.MarshalJSONPretty(resp) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + + return nil +} + +func registerCommands(root *cobra.Command) { + base.PersistentFlags().StringVar(&baseOptions.Name, "name", "", "The name of the Lookup Vindex to create. This will also be the name of the VReplication workflow created to backfill the Lookup Vindex.") + base.MarkPersistentFlagRequired("name") + base.PersistentFlags().StringVar(&baseOptions.TableKeyspace, "table-keyspace", "", "The keyspace to create the lookup table in. This is also where the VReplication workflow is created to backfill the Lookup Vindex.") + base.MarkPersistentFlagRequired("table-keyspace") + root.AddCommand(base) + + // This will create the lookup vindex in the specified keyspace + // and setup a VReplication workflow to backfill its lookup table. + create.Flags().StringVar(&createOptions.Keyspace, "keyspace", "", "The keyspace to create the Lookup Vindex in. This is also where the table-owner must exist.") + create.MarkFlagRequired("keyspace") + create.Flags().StringVar(&createOptions.Type, "type", "", "The type of Lookup Vindex to create.") + create.MarkFlagRequired("type") + create.Flags().StringVar(&createOptions.TableOwner, "table-owner", "", "The table holding the data which we should use to backfill the Lookup Vindex. This must exist in the same keyspace as the Lookup Vindex.") + create.MarkFlagRequired("table-owner") + create.Flags().StringSliceVar(&createOptions.TableOwnerColumns, "table-owner-columns", nil, "The columns to read from the owner table. These will be used to build the hash which gets stored as the keyspace_id value in the lookup table.") + create.MarkFlagRequired("table-owner-columns") + create.Flags().StringVar(&createOptions.TableName, "table-name", "", "The name of the lookup table. If not specified, then it will be created using the same name as the Lookup Vindex.") + create.Flags().StringVar(&createOptions.TableVindexType, "table-vindex-type", "", "The primary vindex name/type to use for the lookup table, if the table-keyspace is sharded. This must match the name of a vindex defined in the table-keyspace. If no value is provided then the default type will be used based on the table-owner-columns types.") + create.Flags().BoolVar(&createOptions.IgnoreNulls, "ignore-nulls", false, "Do not add corresponding records in the lookup table if any of the owner table's 'from' fields are NULL.") + create.Flags().BoolVar(&createOptions.ContinueAfterCopyWithOwner, "continue-after-copy-with-owner", true, "Vindex will continue materialization after the backfill completes when an owner is provided.") + // VReplication specific flags. + create.Flags().StringSliceVar(&createOptions.Cells, "cells", nil, "Cells to look in for source tablets to replicate from.") + create.Flags().Var((*topoprotopb.TabletTypeListFlag)(&createOptions.TabletTypes), "tablet-types", "Source tablet types to replicate from.") + create.Flags().BoolVar(&createOptions.TabletTypesInPreferenceOrder, "tablet-types-in-preference-order", true, "When performing source tablet selection, look for candidates in the type order as they are listed in the tablet-types flag.") + base.AddCommand(create) + + // This will show the output of GetWorkflows client call + // for the VReplication workflow used. + base.AddCommand(show) + + // This will also delete the VReplication workflow if the + // vindex has an owner as the lookup vindex will then be + // managed by VTGate. + externalize.Flags().StringVar(&externalizeOptions.Keyspace, "keyspace", "", "The keyspace containing the Lookup Vindex. If no value is specified then the table-keyspace will be used.") + base.AddCommand(externalize) + + // The cancel command deletes the VReplication workflow used + // to backfill the lookup vindex. It ends up making a + // WorkflowDelete VtctldServer call. + base.AddCommand(cancel) +} + +func init() { + common.RegisterCommandHandler("LookupVindex", registerCommands) +} diff --git a/go/cmd/vtctldclient/command/vreplication/materialize/create.go b/go/cmd/vtctldclient/command/vreplication/materialize/create.go new file mode 100644 index 00000000000..d835b0f3426 --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/materialize/create.go @@ -0,0 +1,189 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package materialize + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/topo/topoproto" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +var ( + createOptions = struct { + SourceKeyspace string + TableSettings tableSettings + }{} + + // create makes a MaterializeCreate gRPC call to a vtctld. + create = &cobra.Command{ + Use: "create", + Short: "Create and run a Materialize VReplication workflow.", + Example: `vtctldclient --server localhost:15999 materialize --workflow product_sales --target-keyspace commerce create --source-keyspace commerce --table-settings '[{"target_table": "sales_by_sku", "create_ddl": "create table sales_by_sku (sku varbinary(128) not null primary key, orders bigint, revenue bigint)", "source_expression": "select sku, count(*) as orders, sum(price) as revenue from corder group by sku"}]' --cells zone1 --cells zone2 --tablet-types replica`, + Long: `Materialize is a lower level VReplication command that allows for generalized materialization +of tables. The target tables can be copies, aggregations, or views. The target tables are kept +in sync in near-realtime. The primary flag used to define the materializations (you can have +multiple per workflow) is table-settings which is a JSON array where each value must contain +two key/value pairs. The first required key is 'target_table' and it is the name of the table +in the target-keyspace to store the results in. The second required key is 'source_expression' +and its value is the select query to run against the source table. An optional key/value pair +can also be specified for 'create_ddl' which provides the DDL to create the target table if it +does not exist -- you can alternatively specify a value of 'copy' if the target table schema +should be copied as-is from the source keyspace. Here's an example value for table-settings: +[ + { + "target_table": "customer_one_email", + "source_expression": "select email from customer where customer_id = 1" + }, + { + "target_table": "states", + "source_expression": "select * from states", + "create_ddl": "copy" + }, + { + "target_table": "sales_by_sku", + "source_expression": "select sku, count(*) as orders, sum(price) as revenue from corder group by sku", + "create_ddl": "create table sales_by_sku (sku varbinary(128) not null primary key, orders bigint, revenue bigint)" + } +] +`, + SilenceUsage: true, + DisableFlagsInUseLine: true, + Aliases: []string{"Create"}, + Args: cobra.NoArgs, + PreRunE: func(cmd *cobra.Command, args []string) error { + if err := common.ParseAndValidateCreateOptions(cmd); err != nil { + return err + } + return nil + }, + RunE: commandCreate, + } +) + +func commandCreate(cmd *cobra.Command, args []string) error { + format, err := common.GetOutputFormat(cmd) + if err != nil { + return err + } + tsp := common.GetTabletSelectionPreference(cmd) + cli.FinishedParsing(cmd) + + ms := &vtctldatapb.MaterializeSettings{ + Workflow: common.BaseOptions.Workflow, + TargetKeyspace: common.BaseOptions.TargetKeyspace, + SourceKeyspace: createOptions.SourceKeyspace, + TableSettings: createOptions.TableSettings.val, + StopAfterCopy: common.CreateOptions.StopAfterCopy, + Cell: strings.Join(common.CreateOptions.Cells, ","), + TabletTypes: topoproto.MakeStringTypeCSV(common.CreateOptions.TabletTypes), + TabletSelectionPreference: tsp, + } + + req := &vtctldatapb.MaterializeCreateRequest{ + Settings: ms, + } + + _, err = common.GetClient().MaterializeCreate(common.GetCommandCtx(), req) + if err != nil { + return err + } + + if format == "json" { + resp := struct { + Action string + Status string + }{ + Action: "create", + Status: "success", + } + jsonText, _ := cli.MarshalJSONPretty(resp) + fmt.Println(string(jsonText)) + } else { + fmt.Printf("Materialization workflow %s successfully created in the %s keyspace. Use show to view the status.\n", + common.BaseOptions.Workflow, common.BaseOptions.TargetKeyspace) + } + + return nil +} + +// tableSettings is a wrapper around a slice of TableMaterializeSettings +// proto messages that implements the pflag.Value interface. +type tableSettings struct { + val []*vtctldatapb.TableMaterializeSettings +} + +func (ts *tableSettings) String() string { + tsj, _ := json.Marshal(ts.val) + return string(tsj) +} + +func (ts *tableSettings) Set(v string) error { + ts.val = make([]*vtctldatapb.TableMaterializeSettings, 0) + err := json.Unmarshal([]byte(v), &ts.val) + if err != nil { + return fmt.Errorf("table-settings is not valid JSON") + } + if len(ts.val) == 0 { + return fmt.Errorf("empty table-settings") + } + + // Validate the provided queries. + seenSourceTables := make(map[string]bool) + for _, tms := range ts.val { + if tms.TargetTable == "" || tms.SourceExpression == "" { + return fmt.Errorf("missing target_table or source_expression") + } + // Validate that the query is valid. + stmt, err := sqlparser.Parse(tms.SourceExpression) + if err != nil { + return fmt.Errorf("invalid source_expression: %q", tms.SourceExpression) + } + // Validate that each source-expression uses a different table. + // If any of them query the same table the materialize workflow + // will fail. + err = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch node := node.(type) { + case sqlparser.TableName: + if !node.Name.IsEmpty() { + if seenSourceTables[node.Name.String()] { + return false, fmt.Errorf("multiple source_expression queries use the same table: %q", node.Name.String()) + } + seenSourceTables[node.Name.String()] = true + } + } + return true, nil + }, stmt) + if err != nil { + return err + } + } + + return nil +} + +func (ts *tableSettings) Type() string { + return "JSON" +} diff --git a/go/cmd/vtctldclient/command/vreplication/materialize/materialize.go b/go/cmd/vtctldclient/command/vreplication/materialize/materialize.go new file mode 100644 index 00000000000..58be1ec4433 --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/materialize/materialize.go @@ -0,0 +1,64 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package materialize + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common" + "vitess.io/vitess/go/vt/topo/topoproto" +) + +var ( + // base is the base command for all actions related to Materialize. + base = &cobra.Command{ + Use: "Materialize --workflow --target-keyspace [command] [command-flags]", + Short: "Perform commands related to materializing query results from the source keyspace into tables in the target keyspace.", + DisableFlagsInUseLine: true, + Aliases: []string{"materialize"}, + Args: cobra.ExactArgs(1), + } +) + +func registerCommands(root *cobra.Command) { + common.AddCommonFlags(base) + root.AddCommand(base) + + create.Flags().StringSliceVarP(&common.CreateOptions.Cells, "cells", "c", nil, "Cells and/or CellAliases to copy table data from.") + create.Flags().Var((*topoproto.TabletTypeListFlag)(&common.CreateOptions.TabletTypes), "tablet-types", "Source tablet types to replicate table data from (e.g. PRIMARY,REPLICA,RDONLY).") + create.Flags().BoolVar(&common.CreateOptions.TabletTypesInPreferenceOrder, "tablet-types-in-preference-order", true, "When performing source tablet selection, look for candidates in the type order as they are listed in the tablet-types flag.") + create.Flags().StringVar(&createOptions.SourceKeyspace, "source-keyspace", "", "Keyspace where the tables queried in the 'source_expression' values within table-settings live.") + create.MarkFlagRequired("source-keyspace") + create.Flags().Var(&createOptions.TableSettings, "table-settings", "A JSON array defining what tables to materialize using what select statements. See the --help output for more details.") + create.MarkFlagRequired("table-settings") + create.Flags().BoolVar(&common.CreateOptions.StopAfterCopy, "stop-after-copy", false, "Stop the workflow after it's finished copying the existing rows and before it starts replicating changes.") + base.AddCommand(create) + + // Generic workflow commands. + opts := &common.SubCommandsOpts{ + SubCommand: "Materialize", + Workflow: "product_sales", + } + base.AddCommand(common.GetCancelCommand(opts)) + base.AddCommand(common.GetShowCommand(opts)) + base.AddCommand(common.GetStartCommand(opts)) + base.AddCommand(common.GetStopCommand(opts)) +} + +func init() { + common.RegisterCommandHandler("Materialize", registerCommands) +} diff --git a/go/cmd/vtctldclient/command/vreplication/migrate/migrate.go b/go/cmd/vtctldclient/command/vreplication/migrate/migrate.go new file mode 100644 index 00000000000..25f54ec71af --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/migrate/migrate.go @@ -0,0 +1,134 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package migrate + +import ( + "fmt" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +var ( + // migrate is the base command for all actions related to the migrate command. + migrate = &cobra.Command{ + Use: "Migrate --workflow --target-keyspace [command] [command-flags]", + Short: "Migrate is used to import data from an external cluster into the current cluster.", + DisableFlagsInUseLine: true, + Aliases: []string{"migrate"}, + Args: cobra.ExactArgs(1), + } +) + +var createOptions = struct { + MountName string + SourceKeyspace string + AllTables bool + IncludeTables []string + ExcludeTables []string + SourceTimeZone string + NoRoutingRules bool +}{} + +var createCommand = &cobra.Command{ + Use: "create", + Short: "Create and optionally run a Migrate VReplication workflow.", + Example: `vtctldclient --server localhost:15999 migrate --workflow import --target-keyspace customer create --source-keyspace commerce --mount-name ext1 --tablet-types replica`, + SilenceUsage: true, + DisableFlagsInUseLine: true, + Aliases: []string{"Create"}, + Args: cobra.NoArgs, + PreRunE: func(cmd *cobra.Command, args []string) error { + // Either specific tables or the all tables flags are required. + if !cmd.Flags().Lookup("tables").Changed && !cmd.Flags().Lookup("all-tables").Changed { + return fmt.Errorf("tables or all-tables are required to specify which tables to move") + } + if err := common.ParseAndValidateCreateOptions(cmd); err != nil { + return err + } + return nil + }, + RunE: commandCreate, +} + +func commandCreate(cmd *cobra.Command, args []string) error { + tsp := common.GetTabletSelectionPreference(cmd) + cli.FinishedParsing(cmd) + + req := &vtctldatapb.MigrateCreateRequest{ + Workflow: common.BaseOptions.Workflow, + TargetKeyspace: common.BaseOptions.TargetKeyspace, + SourceKeyspace: createOptions.SourceKeyspace, + MountName: createOptions.MountName, + SourceTimeZone: createOptions.SourceTimeZone, + Cells: common.CreateOptions.Cells, + TabletTypes: common.CreateOptions.TabletTypes, + TabletSelectionPreference: tsp, + AllTables: createOptions.AllTables, + IncludeTables: createOptions.IncludeTables, + ExcludeTables: createOptions.ExcludeTables, + OnDdl: common.CreateOptions.OnDDL, + DeferSecondaryKeys: common.CreateOptions.DeferSecondaryKeys, + AutoStart: common.CreateOptions.AutoStart, + StopAfterCopy: common.CreateOptions.StopAfterCopy, + NoRoutingRules: createOptions.NoRoutingRules, + } + + _, err := common.GetClient().MigrateCreate(common.GetCommandCtx(), req) + if err != nil { + return err + } + + return nil +} + +func addCreateFlags(cmd *cobra.Command) { + common.AddCommonCreateFlags(cmd) + cmd.Flags().StringVar(&createOptions.SourceKeyspace, "source-keyspace", "", "Keyspace where the tables are being moved from.") + cmd.MarkFlagRequired("source-keyspace") + cmd.Flags().StringVar(&createOptions.MountName, "mount-name", "", "Name external cluster is mounted as.") + cmd.MarkFlagRequired("mount-name") + cmd.Flags().StringVar(&createOptions.SourceTimeZone, "source-time-zone", "", "Specifying this causes any DATETIME fields to be converted from the given time zone into UTC.") + cmd.Flags().BoolVar(&createOptions.AllTables, "all-tables", false, "Copy all tables from the source.") + cmd.Flags().StringSliceVar(&createOptions.IncludeTables, "tables", nil, "Source tables to copy.") + cmd.Flags().StringSliceVar(&createOptions.ExcludeTables, "exclude-tables", nil, "Source tables to exclude from copying.") + cmd.Flags().BoolVar(&createOptions.NoRoutingRules, "no-routing-rules", false, "(Advanced) Do not create routing rules while creating the workflow. See the reference documentation for limitations if you use this flag.") + +} + +func registerCommands(root *cobra.Command) { + common.AddCommonFlags(migrate) + root.AddCommand(migrate) + addCreateFlags(createCommand) + migrate.AddCommand(createCommand) + opts := &common.SubCommandsOpts{ + SubCommand: "Migrate", + Workflow: "import", + } + migrate.AddCommand(common.GetCompleteCommand(opts)) + migrate.AddCommand(common.GetCancelCommand(opts)) + migrate.AddCommand(common.GetShowCommand(opts)) + migrate.AddCommand(common.GetStatusCommand(opts)) +} + +func init() { + common.RegisterCommandHandler("Migrate", registerCommands) +} diff --git a/go/cmd/vtctldclient/command/vreplication/mount/mount.go b/go/cmd/vtctldclient/command/vreplication/mount/mount.go new file mode 100644 index 00000000000..95ce3961e71 --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/mount/mount.go @@ -0,0 +1,183 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package migrate + +import ( + "encoding/json" + "fmt" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +var ( + // base is the base command for all actions related to the mount action. + base = &cobra.Command{ + Use: "Mount [command] [command-flags]", + Short: "Mount is used to link an external Vitess cluster in order to migrate data from it.", + DisableFlagsInUseLine: true, + Aliases: []string{"mount"}, + Args: cobra.ExactArgs(1), + } +) + +var mountOptions struct { + Name string + TopoType string + TopoServer string + TopoRoot string +} + +var register = &cobra.Command{ + Use: "register", + Short: "Register an external Vitess Cluster.", + Example: `vtctldclient --server localhost:15999 mount register --name ext1 --topo-type etcd2 --topo-server localhost:12379 --topo-root /vitess/global`, + DisableFlagsInUseLine: true, + Aliases: []string{"Register"}, + Args: cobra.NoArgs, + RunE: commandRegister, +} + +func commandRegister(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + req := &vtctldatapb.MountRegisterRequest{ + Name: mountOptions.Name, + TopoType: mountOptions.TopoType, + TopoServer: mountOptions.TopoServer, + TopoRoot: mountOptions.TopoRoot, + } + _, err := common.GetClient().MountRegister(common.GetCommandCtx(), req) + if err != nil { + return err + } + fmt.Printf("Mount %s registered successfully\n", req.Name) + return nil +} + +var unregister = &cobra.Command{ + Use: "unregister", + Short: "Unregister a previously mounted external Vitess Cluster.", + Example: `vtctldclient --server localhost:15999 mount unregister --name ext1`, + DisableFlagsInUseLine: true, + Aliases: []string{"Unregister"}, + Args: cobra.NoArgs, + RunE: commandUnregister, +} + +func commandUnregister(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + req := &vtctldatapb.MountUnregisterRequest{ + Name: mountOptions.Name, + } + _, err := common.GetClient().MountUnregister(common.GetCommandCtx(), req) + if err != nil { + return err + } + fmt.Printf("Mount %s unregistered successfully\n", req.Name) + return nil +} + +var show = &cobra.Command{ + Use: "show", + Short: "Show attributes of a previously mounted external Vitess Cluster.", + Example: `vtctldclient --server localhost:15999 mount show --name ext1`, + DisableFlagsInUseLine: true, + Aliases: []string{"Show"}, + Args: cobra.NoArgs, + RunE: commandShow, +} + +func commandShow(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + req := &vtctldatapb.MountShowRequest{ + Name: mountOptions.Name, + } + resp, err := common.GetClient().MountShow(common.GetCommandCtx(), req) + if err != nil { + return err + } + data, err := json.Marshal(resp) + if err != nil { + return err + } + fmt.Printf("%s\n", string(data)) + return nil +} + +var list = &cobra.Command{ + Use: "list", + Short: "List all mounted external Vitess Clusters.", + Example: `vtctldclient --server localhost:15999 mount list`, + DisableFlagsInUseLine: true, + Aliases: []string{"List"}, + Args: cobra.NoArgs, + RunE: commandList, +} + +func commandList(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + req := &vtctldatapb.MountListRequest{} + resp, err := common.GetClient().MountList(common.GetCommandCtx(), req) + if err != nil { + return err + } + if err != nil { + return err + } + data, err := json.Marshal(resp) + if err != nil { + return err + } + fmt.Printf("%s\n", string(data)) + return nil +} + +func registerCommands(root *cobra.Command) { + root.AddCommand(base) + + register.Flags().StringVar(&mountOptions.Name, "name", "", "Name to use for the mount.") + register.MarkFlagRequired("name") + register.Flags().StringVar(&mountOptions.TopoType, "topo-type", "", "Topo server implementation to use.") + register.MarkFlagRequired("topo-type") + register.Flags().StringVar(&mountOptions.TopoServer, "topo-server", "", "Topo server address.") + register.MarkFlagRequired("topo-server") + register.Flags().StringVar(&mountOptions.TopoRoot, "topo-root", "", "Topo server root path.") + register.MarkFlagRequired("topo-root") + base.AddCommand(register) + + unregister.Flags().StringVar(&mountOptions.Name, "name", "", "Name of the mount.") + unregister.MarkFlagRequired("name") + base.AddCommand(unregister) + + show.Flags().StringVar(&mountOptions.Name, "name", "", "Name of the mount.") + show.MarkFlagRequired("name") + base.AddCommand(show) + + base.AddCommand(list) +} + +func init() { + common.RegisterCommandHandler("Mount", registerCommands) +} diff --git a/go/cmd/vtctldclient/command/vreplication/movetables/create.go b/go/cmd/vtctldclient/command/vreplication/movetables/create.go new file mode 100644 index 00000000000..95c50f4f97e --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/movetables/create.go @@ -0,0 +1,122 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package movetables + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +var ( + createOptions = struct { + SourceKeyspace string + SourceShards []string + ExternalClusterName string + AllTables bool + IncludeTables []string + ExcludeTables []string + SourceTimeZone string + NoRoutingRules bool + AtomicCopy bool + }{} + + // create makes a MoveTablesCreate gRPC call to a vtctld. + create = &cobra.Command{ + Use: "create", + Short: "Create and optionally run a MoveTables VReplication workflow.", + Example: `vtctldclient --server localhost:15999 movetables --workflow commerce2customer --target-keyspace customer create --source-keyspace commerce --cells zone1 --cells zone2 --tablet-types replica`, + SilenceUsage: true, + DisableFlagsInUseLine: true, + Aliases: []string{"Create"}, + Args: cobra.NoArgs, + PreRunE: func(cmd *cobra.Command, args []string) error { + // Either specific tables or the all tables flags are required. + if !cmd.Flags().Lookup("tables").Changed && !cmd.Flags().Lookup("all-tables").Changed { + return fmt.Errorf("tables or all-tables are required to specify which tables to move") + } + if err := common.ParseAndValidateCreateOptions(cmd); err != nil { + return err + } + checkAtomicCopyOptions := func() error { + var errors []string + if !createOptions.AtomicCopy { + return nil + } + if !createOptions.AllTables { + errors = append(errors, "atomic copy requires --all-tables") + } + if len(createOptions.IncludeTables) > 0 || len(createOptions.ExcludeTables) > 0 { + errors = append(errors, "atomic copy does not support specifying tables") + } + if len(errors) > 0 { + return fmt.Errorf("found options incompatible with atomic copy: %s", strings.Join(errors, ", ")) + } + return nil + } + if err := checkAtomicCopyOptions(); err != nil { + return err + } + return nil + }, + RunE: commandCreate, + } +) + +func commandCreate(cmd *cobra.Command, args []string) error { + format, err := common.GetOutputFormat(cmd) + if err != nil { + return err + } + tsp := common.GetTabletSelectionPreference(cmd) + cli.FinishedParsing(cmd) + + req := &vtctldatapb.MoveTablesCreateRequest{ + Workflow: common.BaseOptions.Workflow, + TargetKeyspace: common.BaseOptions.TargetKeyspace, + SourceKeyspace: createOptions.SourceKeyspace, + SourceShards: createOptions.SourceShards, + SourceTimeZone: createOptions.SourceTimeZone, + Cells: common.CreateOptions.Cells, + TabletTypes: common.CreateOptions.TabletTypes, + TabletSelectionPreference: tsp, + AllTables: createOptions.AllTables, + IncludeTables: createOptions.IncludeTables, + ExcludeTables: createOptions.ExcludeTables, + OnDdl: common.CreateOptions.OnDDL, + DeferSecondaryKeys: common.CreateOptions.DeferSecondaryKeys, + AutoStart: common.CreateOptions.AutoStart, + StopAfterCopy: common.CreateOptions.StopAfterCopy, + NoRoutingRules: createOptions.NoRoutingRules, + AtomicCopy: createOptions.AtomicCopy, + } + + resp, err := common.GetClient().MoveTablesCreate(common.GetCommandCtx(), req) + if err != nil { + return err + } + if err = common.OutputStatusResponse(resp, format); err != nil { + return err + } + return nil +} diff --git a/go/cmd/vtctldclient/command/vreplication/movetables/movetables.go b/go/cmd/vtctldclient/command/vreplication/movetables/movetables.go new file mode 100644 index 00000000000..e2c7daed223 --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/movetables/movetables.go @@ -0,0 +1,85 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package movetables + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common" +) + +var ( + // base is the base command for all actions related to MoveTables. + base = &cobra.Command{ + Use: "MoveTables --workflow --target-keyspace [command] [command-flags]", + Short: "Perform commands related to moving tables from a source keyspace to a target keyspace.", + DisableFlagsInUseLine: true, + Aliases: []string{"movetables"}, + Args: cobra.ExactArgs(1), + } +) + +func registerCommands(root *cobra.Command) { + common.AddCommonFlags(base) + root.AddCommand(base) + + common.AddCommonCreateFlags(create) + create.PersistentFlags().StringVar(&createOptions.SourceKeyspace, "source-keyspace", "", "Keyspace where the tables are being moved from.") + create.MarkPersistentFlagRequired("source-keyspace") + create.Flags().StringSliceVar(&createOptions.SourceShards, "source-shards", nil, "Source shards to copy data from when performing a partial MoveTables (experimental).") + create.Flags().StringVar(&createOptions.SourceTimeZone, "source-time-zone", "", "Specifying this causes any DATETIME fields to be converted from the given time zone into UTC.") + create.Flags().BoolVar(&createOptions.AllTables, "all-tables", false, "Copy all tables from the source.") + create.Flags().StringSliceVar(&createOptions.IncludeTables, "tables", nil, "Source tables to copy.") + create.Flags().StringSliceVar(&createOptions.ExcludeTables, "exclude-tables", nil, "Source tables to exclude from copying.") + create.Flags().BoolVar(&createOptions.NoRoutingRules, "no-routing-rules", false, "(Advanced) Do not create routing rules while creating the workflow. See the reference documentation for limitations if you use this flag.") + create.Flags().BoolVar(&createOptions.AtomicCopy, "atomic-copy", false, "(EXPERIMENTAL) A single copy phase is run for all tables from the source. Use this, for example, if your source keyspace has tables which use foreign key constraints.") + base.AddCommand(create) + + opts := &common.SubCommandsOpts{ + SubCommand: "MoveTables", + Workflow: "commerce2customer", + } + base.AddCommand(common.GetShowCommand(opts)) + base.AddCommand(common.GetStatusCommand(opts)) + + base.AddCommand(common.GetStartCommand(opts)) + base.AddCommand(common.GetStopCommand(opts)) + + switchTrafficCommand := common.GetSwitchTrafficCommand(opts) + common.AddCommonSwitchTrafficFlags(switchTrafficCommand, true) + base.AddCommand(switchTrafficCommand) + + reverseTrafficCommand := common.GetReverseTrafficCommand(opts) + common.AddCommonSwitchTrafficFlags(reverseTrafficCommand, false) + base.AddCommand(reverseTrafficCommand) + + complete := common.GetCompleteCommand(opts) + complete.Flags().BoolVar(&common.CompleteOptions.KeepData, "keep-data", false, "Keep the original source table data that was copied by the MoveTables workflow.") + complete.Flags().BoolVar(&common.CompleteOptions.KeepRoutingRules, "keep-routing-rules", false, "Keep the routing rules in place that direct table traffic from the source keyspace to the target keyspace of the MoveTables workflow.") + complete.Flags().BoolVar(&common.CompleteOptions.RenameTables, "rename-tables", false, "Keep the original source table data that was copied by the MoveTables workflow, but rename each table to '__old'.") + complete.Flags().BoolVar(&common.CompleteOptions.DryRun, "dry-run", false, "Print the actions that would be taken and report any known errors that would have occurred.") + base.AddCommand(complete) + + cancel := common.GetCancelCommand(opts) + cancel.Flags().BoolVar(&common.CancelOptions.KeepData, "keep-data", false, "Keep the partially copied table data from the MoveTables workflow in the target keyspace.") + cancel.Flags().BoolVar(&common.CancelOptions.KeepRoutingRules, "keep-routing-rules", false, "Keep the routing rules created for the MoveTables workflow.") + base.AddCommand(cancel) +} + +func init() { + common.RegisterCommandHandler("MoveTables", registerCommands) +} diff --git a/go/cmd/vtctldclient/command/vreplication/reshard/create.go b/go/cmd/vtctldclient/command/vreplication/reshard/create.go new file mode 100644 index 00000000000..b8506ae61d0 --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/reshard/create.go @@ -0,0 +1,94 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reshard + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +var ( + reshardCreateOptions = struct { + sourceShards []string + targetShards []string + skipSchemaCopy bool + }{} + + // reshardCreate makes a ReshardCreate gRPC call to a vtctld. + reshardCreate = &cobra.Command{ + Use: "create", + Short: "Create and optionally run a Reshard VReplication workflow.", + Example: `vtctldclient --server localhost:15999 reshard --workflow customer2customer --target-keyspace customer create --source-shards="0" --target-shards="-80,80-" --cells zone1 --cells zone2 --tablet-types replica`, + SilenceUsage: true, + DisableFlagsInUseLine: true, + Aliases: []string{"Create"}, + Args: cobra.NoArgs, + PreRunE: func(cmd *cobra.Command, args []string) error { + if err := common.ParseAndValidateCreateOptions(cmd); err != nil { + return err + } + return nil + }, + RunE: commandReshardCreate, + } +) + +func commandReshardCreate(cmd *cobra.Command, args []string) error { + format, err := common.GetOutputFormat(cmd) + if err != nil { + return err + } + tsp := common.GetTabletSelectionPreference(cmd) + cli.FinishedParsing(cmd) + + req := &vtctldatapb.ReshardCreateRequest{ + Workflow: common.BaseOptions.Workflow, + Keyspace: common.BaseOptions.TargetKeyspace, + + TabletTypes: common.CreateOptions.TabletTypes, + TabletSelectionPreference: tsp, + Cells: common.CreateOptions.Cells, + OnDdl: common.CreateOptions.OnDDL, + DeferSecondaryKeys: common.CreateOptions.DeferSecondaryKeys, + AutoStart: common.CreateOptions.AutoStart, + StopAfterCopy: common.CreateOptions.StopAfterCopy, + + SourceShards: reshardCreateOptions.sourceShards, + TargetShards: reshardCreateOptions.targetShards, + SkipSchemaCopy: reshardCreateOptions.skipSchemaCopy, + } + resp, err := common.GetClient().ReshardCreate(common.GetCommandCtx(), req) + if err != nil { + return err + } + if err = common.OutputStatusResponse(resp, format); err != nil { + return err + } + return nil +} + +func registerCreateCommand(root *cobra.Command) { + common.AddCommonCreateFlags(reshardCreate) + reshardCreate.Flags().StringSliceVar(&reshardCreateOptions.sourceShards, "source-shards", nil, "Source shards.") + reshardCreate.Flags().StringSliceVar(&reshardCreateOptions.targetShards, "target-shards", nil, "Target shards.") + reshardCreate.Flags().BoolVar(&reshardCreateOptions.skipSchemaCopy, "skip-schema-copy", false, "Skip copying the schema from the source shards to the target shards.") + root.AddCommand(reshardCreate) +} diff --git a/go/cmd/vtctldclient/command/vreplication/reshard/reshard.go b/go/cmd/vtctldclient/command/vreplication/reshard/reshard.go new file mode 100644 index 00000000000..4b266dbb370 --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/reshard/reshard.go @@ -0,0 +1,65 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reshard + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common" +) + +var ( + // reshard is the base command for all actions related to reshard. + reshard = &cobra.Command{ + Use: "Reshard --workflow --target-keyspace [command] [command-flags]", + Short: "Perform commands related to resharding a keyspace.", + DisableFlagsInUseLine: true, + Aliases: []string{"reshard"}, + Args: cobra.ExactArgs(1), + } +) + +func registerReshardCommands(root *cobra.Command) { + common.AddCommonFlags(reshard) + root.AddCommand(reshard) + + registerCreateCommand(reshard) + opts := &common.SubCommandsOpts{ + SubCommand: "Reshard", + Workflow: "cust2cust", + } + reshard.AddCommand(common.GetShowCommand(opts)) + reshard.AddCommand(common.GetStatusCommand(opts)) + + reshard.AddCommand(common.GetStartCommand(opts)) + reshard.AddCommand(common.GetStopCommand(opts)) + + switchTrafficCommand := common.GetSwitchTrafficCommand(opts) + common.AddCommonSwitchTrafficFlags(switchTrafficCommand, false) + reshard.AddCommand(switchTrafficCommand) + + reverseTrafficCommand := common.GetReverseTrafficCommand(opts) + common.AddCommonSwitchTrafficFlags(reverseTrafficCommand, false) + reshard.AddCommand(reverseTrafficCommand) + + reshard.AddCommand(common.GetCompleteCommand(opts)) + reshard.AddCommand(common.GetCancelCommand(opts)) +} + +func init() { + common.RegisterCommandHandler("Reshard", registerReshardCommands) +} diff --git a/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff.go b/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff.go new file mode 100644 index 00000000000..a98cf3ad743 --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff.go @@ -0,0 +1,887 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vdiff + +import ( + "encoding/json" + "fmt" + "html/template" + "io" + "math" + "reflect" + "sort" + "strings" + "time" + + "github.com/bndr/gotabulate" + "github.com/google/uuid" + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common" + "vitess.io/vitess/go/protoutil" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vttablet/tabletmanager/vdiff" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + topoprotopb "vitess.io/vitess/go/vt/topo/topoproto" +) + +var ( + tabletTypesDefault = []topodatapb.TabletType{ + topodatapb.TabletType_RDONLY, + topodatapb.TabletType_REPLICA, + topodatapb.TabletType_PRIMARY, + } + + createOptions = struct { + UUID uuid.UUID + SourceCells []string + TargetCells []string + TabletTypes []topodatapb.TabletType + Tables []string + Limit uint32 // We only accept positive values but pass on an int64 + FilteredReplicationWaitTime time.Duration + DebugQuery bool + OnlyPKs bool + UpdateTableStats bool + MaxExtraRowsToCompare uint32 // We only accept positive values but pass on an int64 + Wait bool + WaitUpdateInterval time.Duration + AutoRetry bool + }{} + + deleteOptions = struct { + Arg string + }{} + + resumeOptions = struct { + UUID uuid.UUID + }{} + + showOptions = struct { + Arg string + Verbose bool + }{} + + stopOptions = struct { + UUID uuid.UUID + }{} + + parseAndValidateCreate = func(cmd *cobra.Command, args []string) error { + var err error + if len(args) == 1 { // Validate UUID if provided + if createOptions.UUID, err = uuid.Parse(args[0]); err != nil { + return fmt.Errorf("invalid UUID provided: %v", err) + } + } else { // Generate a UUID + createOptions.UUID = uuid.New() + } + if !cmd.Flags().Lookup("tablet-types").Changed { + createOptions.TabletTypes = tabletTypesDefault + } + if cmd.Flags().Lookup("source-cells").Changed { + for i, cell := range createOptions.SourceCells { + createOptions.SourceCells[i] = strings.TrimSpace(cell) + } + } + if cmd.Flags().Lookup("target-cells").Changed { + for i, cell := range createOptions.TargetCells { + createOptions.TargetCells[i] = strings.TrimSpace(cell) + } + } + if cmd.Flags().Lookup("tables").Changed { + for i, table := range createOptions.Tables { + createOptions.Tables[i] = strings.TrimSpace(table) + } + } + return nil + } + + // base is the base command for all actions related to VDiff. + base = &cobra.Command{ + Use: "VDiff --workflow --target-keyspace [command] [command-flags]", + Short: "Perform commands related to diffing tables involved in a VReplication workflow between the source and target.", + DisableFlagsInUseLine: true, + Aliases: []string{"vdiff"}, + Args: cobra.NoArgs, + } + + // create makes a VDiffCreate gRPC call to a vtctld. + create = &cobra.Command{ + Use: "create", + Short: "Create and run a VDiff to compare the tables involved in a VReplication workflow between the source and target.", + Example: `vtctldclient --server localhost:15999 vdiff --workflow commerce2customer --target-keyspace customer create +vtctldclient --server localhost:15999 vdiff --workflow commerce2customer --target-keyspace customer create b3f59678-5241-11ee-be56-0242ac120002`, + SilenceUsage: true, + DisableFlagsInUseLine: true, + Aliases: []string{"Create"}, + Args: cobra.MaximumNArgs(1), + PreRunE: parseAndValidateCreate, + RunE: commandCreate, + } + + // delete makes a VDiffDelete gRPC call to a vtctld. + delete = &cobra.Command{ + Use: "delete", + Short: "Delete VDiffs.", + Example: `vtctldclient --server localhost:15999 vdiff --workflow commerce2customer --target-keyspace delete a037a9e2-5628-11ee-8c99-0242ac120002 +vtctldclient --server localhost:15999 vdiff --workflow commerce2customer --target-keyspace delete all`, + DisableFlagsInUseLine: true, + Aliases: []string{"Delete"}, + Args: cobra.ExactArgs(1), + PreRunE: func(cmd *cobra.Command, args []string) error { + larg := strings.ToLower(args[0]) + switch larg { + case "all": + default: + if _, err := uuid.Parse(args[0]); err != nil { + return fmt.Errorf("invalid argument provided (%s), valid arguments are 'all' or a valid UUID", + args[0]) + } + } + deleteOptions.Arg = larg + return nil + }, + RunE: commandDelete, + } + + // resume makes a VDiffResume gRPC call to a vtctld. + resume = &cobra.Command{ + Use: "resume", + Short: "Resume a VDiff.", + Example: `vtctldclient --server localhost:15999 vdiff --workflow commerce2customer --target-keyspace resume a037a9e2-5628-11ee-8c99-0242ac120002`, + DisableFlagsInUseLine: true, + Aliases: []string{"Resume"}, + Args: cobra.ExactArgs(1), + PreRunE: func(cmd *cobra.Command, args []string) error { + uuid, err := uuid.Parse(args[0]) + if err != nil { + return fmt.Errorf("invalid UUID provided: %v", err) + } + resumeOptions.UUID = uuid + return nil + }, + RunE: commandResume, + } + + // show makes a VDiffShow gRPC call to a vtctld. + show = &cobra.Command{ + Use: "show", + Short: "Show the status of a VDiff.", + Example: `vtctldclient --server localhost:15999 vdiff --workflow commerce2customer --target-keyspace show last +vtctldclient --server localhost:15999 vdiff --workflow commerce2customer --target-keyspace show a037a9e2-5628-11ee-8c99-0242ac120002 +vtctldclient --server localhost:15999 vdiff --workflow commerce2customer --target-keyspace show all`, + DisableFlagsInUseLine: true, + Aliases: []string{"Show"}, + Args: cobra.ExactArgs(1), + PreRunE: func(cmd *cobra.Command, args []string) error { + larg := strings.ToLower(args[0]) + switch larg { + case "last", "all": + default: + if _, err := uuid.Parse(args[0]); err != nil { + return fmt.Errorf("invalid argument provided (%s), valid arguments are 'all', 'last', or a valid UUID", + args[0]) + } + } + showOptions.Arg = larg + return nil + }, + RunE: commandShow, + } + + // stop makes a VDiffStop gRPC call to a vtctld. + stop = &cobra.Command{ + Use: "stop", + Short: "Stop a running VDiff.", + Example: `vtctldclient --server localhost:15999 vdiff --workflow commerce2customer --target-keyspace stop a037a9e2-5628-11ee-8c99-0242ac120002`, + DisableFlagsInUseLine: true, + Aliases: []string{"Stop"}, + Args: cobra.ExactArgs(1), + PreRunE: func(cmd *cobra.Command, args []string) error { + uuid, err := uuid.Parse(args[0]) + if err != nil { + return fmt.Errorf("invalid UUID provided: %v", err) + } + stopOptions.UUID = uuid + return nil + }, + RunE: commandStop, + } +) + +type simpleResponse struct { + Action vdiff.VDiffAction + Status string +} + +// displaySimpleResponse displays a simple standard response for the +// resume, stop, and delete commands after the client command completes +// without an error. +func displaySimpleResponse(out io.Writer, format string, action vdiff.VDiffAction) { + status := "completed" + if action == vdiff.ResumeAction { + status = "scheduled" + } + if format == "json" { + resp := &simpleResponse{ + Action: action, + Status: status, + } + jsonText, _ := cli.MarshalJSONPretty(resp) + fmt.Fprintln(out, string(jsonText)) + } else { + fmt.Fprintf(out, "VDiff %s %s\n", action, status) + } +} + +func commandCreate(cmd *cobra.Command, args []string) error { + format, err := common.GetOutputFormat(cmd) + if err != nil { + return err + } + tsp := common.GetTabletSelectionPreference(cmd) + cli.FinishedParsing(cmd) + + resp, err := common.GetClient().VDiffCreate(common.GetCommandCtx(), &vtctldatapb.VDiffCreateRequest{ + Workflow: common.BaseOptions.Workflow, + TargetKeyspace: common.BaseOptions.TargetKeyspace, + Uuid: createOptions.UUID.String(), + SourceCells: createOptions.SourceCells, + TargetCells: createOptions.TargetCells, + TabletTypes: createOptions.TabletTypes, + TabletSelectionPreference: tsp, + Tables: createOptions.Tables, + Limit: int64(createOptions.Limit), + FilteredReplicationWaitTime: protoutil.DurationToProto(createOptions.FilteredReplicationWaitTime), + DebugQuery: createOptions.DebugQuery, + OnlyPKs: createOptions.OnlyPKs, + UpdateTableStats: createOptions.UpdateTableStats, + MaxExtraRowsToCompare: int64(createOptions.MaxExtraRowsToCompare), + Wait: createOptions.Wait, + WaitUpdateInterval: protoutil.DurationToProto(createOptions.WaitUpdateInterval), + AutoRetry: createOptions.AutoRetry, + }) + + if err != nil { + return err + } + + if createOptions.Wait { + tkr := time.NewTicker(createOptions.WaitUpdateInterval) + defer tkr.Stop() + var state vdiff.VDiffState + ctx := common.GetCommandCtx() + vtctldClient := common.GetClient() + uuidStr := createOptions.UUID.String() + for { + select { + case <-ctx.Done(): + return vterrors.Errorf(vtrpcpb.Code_CANCELED, "context has expired") + case <-tkr.C: + resp, err := vtctldClient.VDiffShow(ctx, &vtctldatapb.VDiffShowRequest{ + Workflow: common.BaseOptions.Workflow, + TargetKeyspace: common.BaseOptions.TargetKeyspace, + Arg: uuidStr, + }) + if err != nil { + return err + } + if state, err = displayShowSingleSummary(cmd.OutOrStdout(), format, common.BaseOptions.TargetKeyspace, common.BaseOptions.Workflow, uuidStr, resp, false); err != nil { + return err + } + if state == vdiff.CompletedState { + return nil + } + } + } + } else { + var data []byte + if format == "json" { + data, err = cli.MarshalJSONPretty(resp) + if err != nil { + return err + } + } else { + data = []byte(fmt.Sprintf("VDiff %s scheduled on target shards, use show to view progress", resp.UUID)) + } + fmt.Println(string(data)) + } + + return nil +} + +func commandDelete(cmd *cobra.Command, args []string) error { + format, err := common.GetOutputFormat(cmd) + if err != nil { + return err + } + cli.FinishedParsing(cmd) + + _, err = common.GetClient().VDiffDelete(common.GetCommandCtx(), &vtctldatapb.VDiffDeleteRequest{ + Workflow: common.BaseOptions.Workflow, + TargetKeyspace: common.BaseOptions.TargetKeyspace, + Arg: deleteOptions.Arg, + }) + + if err != nil { + return err + } + + displaySimpleResponse(cmd.OutOrStdout(), format, vdiff.DeleteAction) + + return nil +} + +func commandResume(cmd *cobra.Command, args []string) error { + format, err := common.GetOutputFormat(cmd) + if err != nil { + return err + } + cli.FinishedParsing(cmd) + + _, err = common.GetClient().VDiffResume(common.GetCommandCtx(), &vtctldatapb.VDiffResumeRequest{ + Workflow: common.BaseOptions.Workflow, + TargetKeyspace: common.BaseOptions.TargetKeyspace, + Uuid: resumeOptions.UUID.String(), + }) + + if err != nil { + return err + } + + displaySimpleResponse(cmd.OutOrStdout(), format, vdiff.ResumeAction) + + return nil +} + +// tableSummary aggregates the current state of the table diff from all shards. +type tableSummary struct { + TableName string + State vdiff.VDiffState + RowsCompared int64 + MatchingRows int64 + MismatchedRows int64 + ExtraRowsSource int64 + ExtraRowsTarget int64 + LastUpdated string `json:"LastUpdated,omitempty"` +} + +// summary aggregates the current state of the vdiff from all shards. +type summary struct { + Workflow, Keyspace string + State vdiff.VDiffState + UUID string + RowsCompared int64 + HasMismatch bool + Shards string + StartedAt string `json:"StartedAt,omitempty"` + CompletedAt string `json:"CompletedAt,omitempty"` + TableSummaryMap map[string]tableSummary `json:"TableSummary,omitempty"` + Reports map[string]map[string]vdiff.DiffReport `json:"Reports,omitempty"` + Errors map[string]string `json:"Errors,omitempty"` + Progress *vdiff.ProgressReport `json:"Progress,omitempty"` +} + +const summaryTextTemplate = ` +VDiff Summary for {{.Keyspace}}.{{.Workflow}} ({{.UUID}}) +State: {{.State}} +{{if .Errors}} +{{- range $shard, $error := .Errors}} + Error: (shard {{$shard}}) {{$error}} +{{- end}} +{{end}} +RowsCompared: {{.RowsCompared}} +HasMismatch: {{.HasMismatch}} +StartedAt: {{.StartedAt}} +{{if (eq .State "started")}}Progress: {{printf "%.2f" .Progress.Percentage}}%%{{if .Progress.ETA}}, ETA: {{.Progress.ETA}}{{end}}{{end}} +{{if .CompletedAt}}CompletedAt: {{.CompletedAt}}{{end}} +{{range $table := .TableSummaryMap}} +Table {{$table.TableName}}: + State: {{$table.State}} + ProcessedRows: {{$table.RowsCompared}} + MatchingRows: {{$table.MatchingRows}} +{{if $table.MismatchedRows}} MismatchedRows: {{$table.MismatchedRows}}{{end}} +{{if $table.ExtraRowsSource}} ExtraRowsSource: {{$table.ExtraRowsSource}}{{end}} +{{if $table.ExtraRowsTarget}} ExtraRowsTarget: {{$table.ExtraRowsTarget}}{{end}} +{{end}} + +Use "--format=json" for more detailed output. +` + +type listing struct { + UUID, Workflow, Keyspace, Shard, State string +} + +func (vdl *listing) String() string { + return fmt.Sprintf("UUID: %s, Workflow: %s, Keyspace: %s, Shard: %s, State: %s", + vdl.UUID, vdl.Workflow, vdl.Keyspace, vdl.Shard, vdl.State) +} + +func getStructFieldNames(s any) []string { + t := reflect.TypeOf(s) + + names := make([]string, t.NumField()) + for i := range names { + names[i] = t.Field(i).Name + } + + return names +} + +func buildListings(listings []*listing) string { + var values []string + var lines [][]string + var result string + + if len(listings) == 0 { + return "" + } + // Get the column headers. + fields := getStructFieldNames(listing{}) + // The header is the first row. + lines = append(lines, fields) + for _, listing := range listings { + v := reflect.ValueOf(*listing) + for _, field := range fields { + values = append(values, v.FieldByName(field).String()) + } + lines = append(lines, values) + } + t := gotabulate.Create(lines) + result = t.Render("grid") + return result +} + +func displayShowResponse(out io.Writer, format, keyspace, workflowName, actionArg string, resp *vtctldatapb.VDiffShowResponse, verbose bool) error { + var vdiffUUID uuid.UUID + var err error + switch actionArg { + case vdiff.AllActionArg: + return displayShowRecent(out, format, keyspace, workflowName, actionArg, resp) + case vdiff.LastActionArg: + for _, resp := range resp.TabletResponses { + vdiffUUID, err = uuid.Parse(resp.VdiffUuid) + if err != nil { + if format == "json" { + fmt.Fprintln(out, "{}") + } else { + fmt.Fprintf(out, "No previous vdiff found for %s.%s\n", keyspace, workflowName) + } + return nil + } + break + } + fallthrough + default: + if vdiffUUID == uuid.Nil { // Then it must be passed as the action arg + vdiffUUID, err = uuid.Parse(actionArg) + if err != nil { + return err + } + } + if len(resp.TabletResponses) == 0 { + return fmt.Errorf("no response received for vdiff show of %s.%s (%s)", keyspace, workflowName, vdiffUUID.String()) + } + _, err = displayShowSingleSummary(out, format, keyspace, workflowName, vdiffUUID.String(), resp, verbose) + return err + } +} + +func displayShowRecent(out io.Writer, format, keyspace, workflowName, subCommand string, resp *vtctldatapb.VDiffShowResponse) error { + output := "" + recentListings, err := buildRecentListings(resp) + if err != nil { + return err + } + if format == "json" { + jsonText, err := cli.MarshalJSONPretty(recentListings) + if err != nil { + return err + } + output = string(jsonText) + if output == "null" { + output = "[]" + } + } else { + output = buildListings(recentListings) + if output == "" { + output = fmt.Sprintf("No vdiffs found for %s.%s", keyspace, workflowName) + } + } + fmt.Fprintln(out, output) + return nil +} + +func buildRecentListings(resp *vtctldatapb.VDiffShowResponse) ([]*listing, error) { + var listings []*listing + for _, resp := range resp.TabletResponses { + if resp != nil && resp.Output != nil { + qr := sqltypes.Proto3ToResult(resp.Output) + for _, row := range qr.Named().Rows { + listings = append(listings, &listing{ + UUID: row["vdiff_uuid"].ToString(), + Workflow: row["workflow"].ToString(), + Keyspace: row["keyspace"].ToString(), + Shard: row["shard"].ToString(), + State: row["state"].ToString(), + }) + } + } + } + return listings, nil +} + +func displayShowSingleSummary(out io.Writer, format, keyspace, workflowName, uuid string, resp *vtctldatapb.VDiffShowResponse, verbose bool) (vdiff.VDiffState, error) { + state := vdiff.UnknownState + var output string + summary, err := buildSingleSummary(keyspace, workflowName, uuid, resp, verbose) + if err != nil { + return state, err + } + if summary == nil { // Should never happen + return state, fmt.Errorf("no report to show for vdiff %s.%s (%s)", keyspace, workflowName, uuid) + } + state = summary.State + if format == "json" { + jsonText, err := cli.MarshalJSONPretty(summary) + if err != nil { + return state, err + } + output = string(jsonText) + } else { + tmpl, err := template.New("summary").Parse(summaryTextTemplate) + if err != nil { + return state, err + } + sb := new(strings.Builder) + err = tmpl.Execute(sb, summary) + if err != nil { + return state, err + } + output = sb.String() + for { + str := strings.Replace(output, "\n\n", "\n", -1) + if output == str { + break + } + output = str + } + } + fmt.Fprintln(out, output) + return state, nil +} + +func buildSingleSummary(keyspace, workflow, uuid string, resp *vtctldatapb.VDiffShowResponse, verbose bool) (*summary, error) { + summary := &summary{ + Workflow: workflow, + Keyspace: keyspace, + UUID: uuid, + State: vdiff.UnknownState, + RowsCompared: 0, + StartedAt: "", + CompletedAt: "", + HasMismatch: false, + Shards: "", + Reports: make(map[string]map[string]vdiff.DiffReport), + Errors: make(map[string]string), + Progress: nil, + } + + var tableSummaryMap map[string]tableSummary + var reports map[string]map[string]vdiff.DiffReport + // Keep a tally of the states across all tables in all shards. + tableStateCounts := map[vdiff.VDiffState]int{ + vdiff.UnknownState: 0, + vdiff.PendingState: 0, + vdiff.StartedState: 0, + vdiff.StoppedState: 0, + vdiff.ErrorState: 0, + vdiff.CompletedState: 0, + } + // Keep a tally of the summary states across all shards. + shardStateCounts := map[vdiff.VDiffState]int{ + vdiff.UnknownState: 0, + vdiff.PendingState: 0, + vdiff.StartedState: 0, + vdiff.StoppedState: 0, + vdiff.ErrorState: 0, + vdiff.CompletedState: 0, + } + // Keep a tally of the approximate total rows to process as we'll use this for our progress + // report. + totalRowsToCompare := int64(0) + var shards []string + for shard, resp := range resp.TabletResponses { + first := true + if resp != nil && resp.Output != nil { + shards = append(shards, shard) + qr := sqltypes.Proto3ToResult(resp.Output) + if tableSummaryMap == nil { + tableSummaryMap = make(map[string]tableSummary, 0) + reports = make(map[string]map[string]vdiff.DiffReport, 0) + } + for _, row := range qr.Named().Rows { + // Update the global VDiff summary based on the per shard level summary. + // Since these values will be the same for all subsequent rows we only use + // the first row. + if first { + first = false + // Our timestamps are strings in `2022-06-26 20:43:25` format so we sort + // them lexicographically. + // We should use the earliest started_at across all shards. + if sa := row.AsString("started_at", ""); summary.StartedAt == "" || sa < summary.StartedAt { + summary.StartedAt = sa + } + // And we should use the latest completed_at across all shards. + if ca := row.AsString("completed_at", ""); summary.CompletedAt == "" || ca > summary.CompletedAt { + summary.CompletedAt = ca + } + // If we had an error on the shard, then let's add that to the summary. + if le := row.AsString("last_error", ""); le != "" { + summary.Errors[shard] = le + } + // Keep track of how many shards are marked as a specific state. We check + // this combined with the shard.table states to determine the VDiff summary + // state. + shardStateCounts[vdiff.VDiffState(strings.ToLower(row.AsString("vdiff_state", "")))]++ + } + + // Global VDiff summary updates that take into account the per table details + // per shard. + { + summary.RowsCompared += row.AsInt64("rows_compared", 0) + totalRowsToCompare += row.AsInt64("table_rows", 0) + + // If we had a mismatch on any table on any shard then the global VDiff + // summary does too. + if mm, _ := row.ToBool("has_mismatch"); mm { + summary.HasMismatch = true + } + } + + // Table summary information that must be accounted for across all shards. + { + table := row.AsString("table_name", "") + // Create the global VDiff table summary object if it doesn't exist. + if _, ok := tableSummaryMap[table]; !ok { + tableSummaryMap[table] = tableSummary{ + TableName: table, + State: vdiff.UnknownState, + } + + } + ts := tableSummaryMap[table] + // This is the shard level VDiff table state. + sts := vdiff.VDiffState(strings.ToLower(row.AsString("table_state", ""))) + tableStateCounts[sts]++ + + // The error state must be sticky, and we should not override any other + // known state with completed. + switch sts { + case vdiff.CompletedState: + if ts.State == vdiff.UnknownState { + ts.State = sts + } + case vdiff.ErrorState: + ts.State = sts + default: + if ts.State != vdiff.ErrorState { + ts.State = sts + } + } + + diffReport := row.AsString("report", "") + dr := vdiff.DiffReport{} + if diffReport != "" { + err := json.Unmarshal([]byte(diffReport), &dr) + if err != nil { + return nil, err + } + ts.RowsCompared += dr.ProcessedRows + ts.MismatchedRows += dr.MismatchedRows + ts.MatchingRows += dr.MatchingRows + ts.ExtraRowsTarget += dr.ExtraRowsTarget + ts.ExtraRowsSource += dr.ExtraRowsSource + } + if _, ok := reports[table]; !ok { + reports[table] = make(map[string]vdiff.DiffReport) + } + + reports[table][shard] = dr + tableSummaryMap[table] = ts + } + } + } + } + + // The global VDiff summary should progress from pending->started->completed with + // stopped for any shard and error for any table being sticky for the global summary. + // We should only consider the VDiff to be complete if it's completed for every table + // on every shard. + if shardStateCounts[vdiff.StoppedState] > 0 { + summary.State = vdiff.StoppedState + } else if shardStateCounts[vdiff.ErrorState] > 0 || tableStateCounts[vdiff.ErrorState] > 0 { + summary.State = vdiff.ErrorState + } else if tableStateCounts[vdiff.StartedState] > 0 { + summary.State = vdiff.StartedState + } else if tableStateCounts[vdiff.PendingState] > 0 { + summary.State = vdiff.PendingState + } else if tableStateCounts[vdiff.CompletedState] == (len(tableSummaryMap) * len(shards)) { + // When doing shard consolidations/merges, we cannot rely solely on the + // vdiff_table state as there are N sources that we process rows from sequentially + // with each one writing to the shared _vt.vdiff_table record for the target shard. + // So we only mark the vdiff for the shard as completed when we've finished + // processing rows from all of the sources -- which is recorded by marking the + // vdiff done for the shard by setting _vt.vdiff.state = completed. + if shardStateCounts[vdiff.CompletedState] == len(shards) { + summary.State = vdiff.CompletedState + } else { + summary.State = vdiff.StartedState + } + } else { + summary.State = vdiff.UnknownState + } + + // If the vdiff has been started then we can calculate the progress. + if summary.State == vdiff.StartedState { + buildProgressReport(summary, totalRowsToCompare) + } + + sort.Strings(shards) // Sort for predictable output + summary.Shards = strings.Join(shards, ",") + summary.TableSummaryMap = tableSummaryMap + summary.Reports = reports + if !summary.HasMismatch && !verbose { + summary.Reports = nil + summary.TableSummaryMap = nil + } + // If we haven't completed the global VDiff then be sure to reflect that with no + // CompletedAt value. + if summary.State != vdiff.CompletedState { + summary.CompletedAt = "" + } + return summary, nil +} + +func buildProgressReport(summary *summary, rowsToCompare int64) { + report := &vdiff.ProgressReport{} + if summary.RowsCompared >= 1 { + // Round to 2 decimal points. + report.Percentage = math.Round(math.Min((float64(summary.RowsCompared)/float64(rowsToCompare))*100, 100.00)*100) / 100 + } + if math.IsNaN(report.Percentage) { + report.Percentage = 0 + } + pctToGo := math.Abs(report.Percentage - 100.00) + startTime, _ := time.Parse(vdiff.TimestampFormat, summary.StartedAt) + curTime := time.Now().UTC() + runTime := curTime.Unix() - startTime.Unix() + if report.Percentage >= 1 { + // Calculate how long 1% took, on avg, and multiply that by the % left. + eta := time.Unix(((int64(runTime)/int64(report.Percentage))*int64(pctToGo))+curTime.Unix(), 1).UTC() + // Cap the ETA at 1 year out to prevent providing nonsensical ETAs. + if eta.Before(time.Now().UTC().AddDate(1, 0, 0)) { + report.ETA = eta.Format(vdiff.TimestampFormat) + } + } + summary.Progress = report +} + +func commandShow(cmd *cobra.Command, args []string) error { + format, err := common.GetOutputFormat(cmd) + if err != nil { + return err + } + cli.FinishedParsing(cmd) + + resp, err := common.GetClient().VDiffShow(common.GetCommandCtx(), &vtctldatapb.VDiffShowRequest{ + Workflow: common.BaseOptions.Workflow, + TargetKeyspace: common.BaseOptions.TargetKeyspace, + Arg: showOptions.Arg, + }) + + if err != nil { + return err + } + + if err = displayShowResponse(cmd.OutOrStdout(), format, common.BaseOptions.TargetKeyspace, common.BaseOptions.Workflow, showOptions.Arg, resp, showOptions.Verbose); err != nil { + return err + } + + return nil +} + +func commandStop(cmd *cobra.Command, args []string) error { + format, err := common.GetOutputFormat(cmd) + if err != nil { + return err + } + cli.FinishedParsing(cmd) + + _, err = common.GetClient().VDiffStop(common.GetCommandCtx(), &vtctldatapb.VDiffStopRequest{ + Workflow: common.BaseOptions.Workflow, + TargetKeyspace: common.BaseOptions.TargetKeyspace, + Uuid: stopOptions.UUID.String(), + }) + + if err != nil { + return err + } + + displaySimpleResponse(cmd.OutOrStdout(), format, vdiff.StopAction) + + return nil +} + +func registerCommands(root *cobra.Command) { + common.AddCommonFlags(base) + root.AddCommand(base) + + create.Flags().StringSliceVar(&createOptions.SourceCells, "source-cells", nil, "The source cell(s) to compare from; default is any available cell.") + create.Flags().StringSliceVar(&createOptions.TargetCells, "target-cells", nil, "The target cell(s) to compare with; default is any available cell.") + create.Flags().Var((*topoprotopb.TabletTypeListFlag)(&createOptions.TabletTypes), "tablet-types", "Tablet types to use on the source and target.") + create.Flags().BoolVar(&common.CreateOptions.TabletTypesInPreferenceOrder, "tablet-types-in-preference-order", true, "When performing source tablet selection, look for candidates in the type order as they are listed in the tablet-types flag.") + create.Flags().DurationVar(&createOptions.FilteredReplicationWaitTime, "filtered-replication-wait-time", 30*time.Second, "Specifies the maximum time to wait, in seconds, for replication to catch up when syncing tablet streams.") + create.Flags().Uint32Var(&createOptions.Limit, "limit", math.MaxUint32, "Max rows to stop comparing after.") + create.Flags().BoolVar(&createOptions.DebugQuery, "debug-query", false, "Adds a mysql query to the report that can be used for further debugging.") + create.Flags().BoolVar(&createOptions.OnlyPKs, "only-pks", false, "When reporting missing rows, only show primary keys in the report.") + create.Flags().StringSliceVar(&createOptions.Tables, "tables", nil, "Only run vdiff for these tables in the workflow.") + create.Flags().Uint32Var(&createOptions.MaxExtraRowsToCompare, "max-extra-rows-to-compare", 1000, "If there are collation differences between the source and target, you can have rows that are identical but simply returned in a different order from MySQL. We will do a second pass to compare the rows for any actual differences in this case and this flag allows you to control the resources used for this operation.") + create.Flags().BoolVar(&createOptions.Wait, "wait", false, "When creating or resuming a vdiff, wait for it to finish before exiting.") + create.Flags().DurationVar(&createOptions.WaitUpdateInterval, "wait-update-interval", time.Duration(1*time.Minute), "When waiting on a vdiff to finish, check and display the current status this often.") + create.Flags().BoolVar(&createOptions.AutoRetry, "auto-retry", true, "Should this vdiff automatically retry and continue in case of recoverable errors.") + create.Flags().BoolVar(&createOptions.UpdateTableStats, "update-table-stats", false, "Update the table statistics, using ANALYZE TABLE, on each table involved in the VDiff during initialization. This will ensure that progress estimates are as accurate as possible -- but it does involve locks and can potentially impact query processing on the target keyspace.") + base.AddCommand(create) + + base.AddCommand(delete) + + base.AddCommand(resume) + + show.Flags().BoolVar(&showOptions.Verbose, "verbose", false, "Show verbose output in summaries") + base.AddCommand(show) + + base.AddCommand(stop) +} + +func init() { + common.RegisterCommandHandler("VDiff", registerCommands) +} diff --git a/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff_env_test.go b/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff_env_test.go new file mode 100644 index 00000000000..1a2a374cf81 --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff_env_test.go @@ -0,0 +1,351 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vdiff + +import ( + "bytes" + "context" + "fmt" + "io" + "math/rand" + "sync" + "testing" + + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/grpcclient" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/vtctl/workflow" + "vitess.io/vitess/go/vt/vttablet/queryservice" + "vitess.io/vitess/go/vt/vttablet/queryservice/fakes" + "vitess.io/vitess/go/vt/vttablet/tabletconn" + "vitess.io/vitess/go/vt/vttablet/tabletconntest" + "vitess.io/vitess/go/vt/vttablet/tmclient" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" +) + +const ( + // vdiffStopPosition is the default stop position for the target vreplication. + // It can be overridden with the positons argument to newTestVDiffEnv. + vdiffStopPosition = "MySQL56/d834e6b8-7cbf-11ed-a1eb-0242ac120002:1-892" + // vdiffSourceGtid should be the position reported by the source side VStreamResults. + // It's expected to be higher the vdiffStopPosition. + vdiffSourceGtid = "MySQL56/d834e6b8-7cbf-11ed-a1eb-0242ac120002:1-893" + // vdiffTargetPrimaryPosition is the primary position of the target after + // vreplication has been synchronized. + vdiffTargetPrimaryPosition = "MySQL56/e34d6fb6-7cbf-11ed-a1eb-0242ac120002:1-892" +) + +type testVDiffEnv struct { + ws *workflow.Server + sourceKeyspace string + targetKeyspace string + workflow string + topoServ *topo.Server + cell string + tabletType topodatapb.TabletType + tmc *testVDiffTMClient + out io.Writer // Capture command output + + mu sync.Mutex + tablets map[int]*testVDiffTablet +} + +//---------------------------------------------- +// testVDiffEnv + +func newTestVDiffEnv(t testing.TB, ctx context.Context, sourceShards, targetShards []string, query string, positions map[string]string) *testVDiffEnv { + env := &testVDiffEnv{ + sourceKeyspace: "sourceks", + targetKeyspace: "targetks", + workflow: "vdiffTest", + tablets: make(map[int]*testVDiffTablet), + topoServ: memorytopo.NewServer(ctx, "cell"), + cell: "cell", + tabletType: topodatapb.TabletType_REPLICA, + tmc: newTestVDiffTMClient(), + } + env.ws = workflow.NewServer(env.topoServ, env.tmc) + env.tmc.testEnv = env + + // Generate a unique dialer name. + dialerName := fmt.Sprintf("VDiffTest-%s-%d", t.Name(), rand.Intn(1000000000)) + tabletconn.RegisterDialer(dialerName, func(tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { + env.mu.Lock() + defer env.mu.Unlock() + if qs, ok := env.tablets[int(tablet.Alias.Uid)]; ok { + return qs, nil + } + return nil, fmt.Errorf("tablet %d not found", tablet.Alias.Uid) + }) + tabletconntest.SetProtocol("go.cmd.vtctldclient.vreplication.vdiff_env_test", dialerName) + + tabletID := 100 + for _, shard := range sourceShards { + _ = env.addTablet(tabletID, env.sourceKeyspace, shard, topodatapb.TabletType_PRIMARY) + env.tmc.waitpos[tabletID+1] = vdiffStopPosition + + tabletID += 10 + } + tabletID = 200 + for _, shard := range targetShards { + primary := env.addTablet(tabletID, env.targetKeyspace, shard, topodatapb.TabletType_PRIMARY) + + var rows []string + var posRows []string + for j, sourceShard := range sourceShards { + bls := &binlogdatapb.BinlogSource{ + Keyspace: env.sourceKeyspace, + Shard: sourceShard, + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: query, + }}, + }, + } + rows = append(rows, fmt.Sprintf("%d|%v|||", j+1, bls)) + position := vdiffStopPosition + if pos := positions[sourceShard+shard]; pos != "" { + position = pos + } + posRows = append(posRows, fmt.Sprintf("%v|%s", bls, position)) + + // vdiff.syncTargets. This actually happens after stopTargets. + // But this is one statement per stream. + env.tmc.setVRResults( + primary.tablet, + fmt.Sprintf("update _vt.vreplication set state='Running', stop_pos='%s', message='synchronizing for vdiff' where id=%d", vdiffSourceGtid, j+1), + &sqltypes.Result{}, + ) + } + // migrater buildMigrationTargets + env.tmc.setVRResults( + primary.tablet, + "select id, source, message, cell, tablet_types, workflow_type, workflow_sub_type, defer_secondary_keys from _vt.vreplication where workflow='vdiffTest' and db_name='vt_target'", + sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|source|message|cell|tablet_types|workflow_type|workflow_sub_type|defer_secondary_keys", + "int64|varchar|varchar|varchar|varchar|int64|int64|int64"), + rows..., + ), + ) + + // vdiff.stopTargets + env.tmc.setVRResults(primary.tablet, "update _vt.vreplication set state='Stopped', message='for vdiff' where db_name='vt_target' and workflow='vdiffTest'", &sqltypes.Result{}) + env.tmc.setVRResults( + primary.tablet, + "select source, pos from _vt.vreplication where db_name='vt_target' and workflow='vdiffTest'", + sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "source|pos", + "varchar|varchar"), + posRows..., + ), + ) + + // vdiff.syncTargets (continued) + env.tmc.vrpos[tabletID] = vdiffSourceGtid + env.tmc.pos[tabletID] = vdiffTargetPrimaryPosition + + // vdiff.startQueryStreams + env.tmc.waitpos[tabletID+1] = vdiffTargetPrimaryPosition + + // vdiff.restartTargets + env.tmc.setVRResults(primary.tablet, "update _vt.vreplication set state='Running', message='', stop_pos='' where db_name='vt_target' and workflow='vdiffTest'", &sqltypes.Result{}) + + tabletID += 10 + } + env.resetOutput() + return env +} + +func (env *testVDiffEnv) getOutput() string { + env.mu.Lock() + defer env.mu.Unlock() + bb, ok := env.out.(*bytes.Buffer) + if !ok { + panic(fmt.Sprintf("unexpected output type for test env: %T", env.out)) + } + return bb.String() +} + +func (env *testVDiffEnv) resetOutput() { + env.mu.Lock() + defer env.mu.Unlock() + env.out = &bytes.Buffer{} +} + +func (env *testVDiffEnv) close() { + env.mu.Lock() + defer env.mu.Unlock() + for _, t := range env.tablets { + _ = env.topoServ.DeleteTablet(context.Background(), t.tablet.Alias) + } + env.tablets = nil + env.topoServ.Close() + env.ws = nil +} + +func (env *testVDiffEnv) addTablet(id int, keyspace, shard string, tabletType topodatapb.TabletType) *testVDiffTablet { + env.mu.Lock() + defer env.mu.Unlock() + tablet := &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: env.cell, + Uid: uint32(id), + }, + Keyspace: keyspace, + Shard: shard, + KeyRange: &topodatapb.KeyRange{}, + Type: tabletType, + PortMap: map[string]int32{ + "test": int32(id), + }, + } + env.tablets[id] = newTestVDiffTablet(tablet) + if err := env.topoServ.InitTablet(context.Background(), tablet, false /* allowPrimaryOverride */, true /* createShardAndKeyspace */, false /* allowUpdate */); err != nil { + panic(err) + } + if tabletType == topodatapb.TabletType_PRIMARY { + _, err := env.topoServ.UpdateShardFields(context.Background(), keyspace, shard, func(si *topo.ShardInfo) error { + si.PrimaryAlias = tablet.Alias + return nil + }) + if err != nil { + panic(err) + } + } + return env.tablets[id] +} + +//---------------------------------------------- +// testVDiffTablet + +type testVDiffTablet struct { + queryservice.QueryService + tablet *topodatapb.Tablet +} + +func newTestVDiffTablet(tablet *topodatapb.Tablet) *testVDiffTablet { + return &testVDiffTablet{ + QueryService: fakes.ErrorQueryService, + tablet: tablet, + } +} + +func (tvt *testVDiffTablet) StreamHealth(ctx context.Context, callback func(*querypb.StreamHealthResponse) error) error { + return callback(&querypb.StreamHealthResponse{ + Serving: true, + Target: &querypb.Target{ + Keyspace: tvt.tablet.Keyspace, + Shard: tvt.tablet.Shard, + TabletType: tvt.tablet.Type, + }, + RealtimeStats: &querypb.RealtimeStats{}, + }) +} + +//---------------------------------------------- +// testVDiffTMCclient + +type testVDiffTMClient struct { + tmclient.TabletManagerClient + vrQueries map[int]map[string]*querypb.QueryResult + vdRequests map[int]map[string]*tabletmanagerdatapb.VDiffResponse + waitpos map[int]string + vrpos map[int]string + pos map[int]string + + testEnv *testVDiffEnv // For access to the test environment +} + +func newTestVDiffTMClient() *testVDiffTMClient { + return &testVDiffTMClient{ + vrQueries: make(map[int]map[string]*querypb.QueryResult), + vdRequests: make(map[int]map[string]*tabletmanagerdatapb.VDiffResponse), + waitpos: make(map[int]string), + vrpos: make(map[int]string), + pos: make(map[int]string), + } +} + +func (tmc *testVDiffTMClient) setVRResults(tablet *topodatapb.Tablet, query string, result *sqltypes.Result) { + queries, ok := tmc.vrQueries[int(tablet.Alias.Uid)] + if !ok { + queries = make(map[string]*querypb.QueryResult) + tmc.vrQueries[int(tablet.Alias.Uid)] = queries + } + queries[query] = sqltypes.ResultToProto3(result) +} + +func (tmc *testVDiffTMClient) VReplicationExec(ctx context.Context, tablet *topodatapb.Tablet, query string) (*querypb.QueryResult, error) { + result, ok := tmc.vrQueries[int(tablet.Alias.Uid)][query] + if !ok { + return nil, fmt.Errorf("query %q not found for tablet %d", query, tablet.Alias.Uid) + } + return result, nil +} + +func (tmc *testVDiffTMClient) setVDResults(tablet *topodatapb.Tablet, req *tabletmanagerdatapb.VDiffRequest, res *tabletmanagerdatapb.VDiffResponse) { + reqs, ok := tmc.vdRequests[int(tablet.Alias.Uid)] + if !ok { + reqs = make(map[string]*tabletmanagerdatapb.VDiffResponse) + tmc.vdRequests[int(tablet.Alias.Uid)] = reqs + } + reqs[req.VdiffUuid] = res +} + +func (tmc *testVDiffTMClient) VDiff(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.VDiffRequest) (*tabletmanagerdatapb.VDiffResponse, error) { + resp, ok := tmc.vdRequests[int(tablet.Alias.Uid)][req.VdiffUuid] + if !ok { + return nil, fmt.Errorf("request %+v not found for tablet %d", req, tablet.Alias.Uid) + } + return resp, nil +} + +func (tmc *testVDiffTMClient) ReadVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.ReadVReplicationWorkflowRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowResponse, error) { + id := int32(1) + resp := &tabletmanagerdatapb.ReadVReplicationWorkflowResponse{ + Workflow: "vdiffTest", + } + + sourceShards, _ := tmc.testEnv.topoServ.GetShardNames(ctx, tmc.testEnv.sourceKeyspace) + streams := make([]*tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream, 0, len(sourceShards)) + for _, shard := range sourceShards { + streams = append(streams, &tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream{ + Id: id, + Bls: &binlogdatapb.BinlogSource{ + Keyspace: tmc.testEnv.sourceKeyspace, + Shard: shard, + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{ + { + Match: ".*", + }, + }, + }, + }, + }) + id++ + } + resp.Streams = streams + + return resp, nil +} diff --git a/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff_test.go b/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff_test.go new file mode 100644 index 00000000000..fd535bb2aad --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff_test.go @@ -0,0 +1,530 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vdiff + +import ( + "context" + "fmt" + "math" + "testing" + "time" + + "github.com/google/uuid" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/vttablet/tabletmanager/vdiff" + + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +var ( + fields = sqltypes.MakeTestFields( + "vdiff_state|last_error|table_name|uuid|table_state|table_rows|started_at|rows_compared|completed_at|has_mismatch|report", + "varbinary|varbinary|varbinary|varchar|varbinary|int64|timestamp|int64|timestamp|int64|json", + ) + options = &tabletmanagerdatapb.VDiffOptions{ + PickerOptions: &tabletmanagerdatapb.VDiffPickerOptions{ + TabletTypes: "primary", + }, + CoreOptions: &tabletmanagerdatapb.VDiffCoreOptions{ + Tables: "t1", + }, + ReportOptions: &tabletmanagerdatapb.VDiffReportOptions{ + Format: "json", + }, + } +) + +func TestVDiffUnsharded(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestVDiffEnv(t, ctx, []string{"0"}, []string{"0"}, "", nil) + defer env.close() + + now := time.Now() + UUID := uuid.New().String() + req := &tabletmanagerdatapb.VDiffRequest{ + Keyspace: env.targetKeyspace, + Workflow: env.workflow, + Action: string(vdiff.ShowAction), + ActionArg: UUID, + } + starttime := now.UTC().Format(vdiff.TimestampFormat) + comptime := now.Add(1 * time.Second).UTC().Format(vdiff.TimestampFormat) + goodReportfmt := `{ + "Workflow": "vdiffTest", + "Keyspace": "%s", + "State": "completed", + "UUID": "%s", + "RowsCompared": %d, + "HasMismatch": %t, + "Shards": "0", + "StartedAt": "%s", + "CompletedAt": "%s" +} +` + + badReportfmt := `{ + "Workflow": "vdiffTest", + "Keyspace": "%s", + "State": "completed", + "UUID": "%s", + "RowsCompared": %d, + "HasMismatch": %t, + "Shards": "0", + "StartedAt": "%s", + "CompletedAt": "%s", + "TableSummary": { + "t1": { + "TableName": "t1", + "State": "completed", + "RowsCompared": %d, + "MatchingRows": %d, + "MismatchedRows": %d, + "ExtraRowsSource": %d, + "ExtraRowsTarget": %d + } + }, + "Reports": { + "t1": { + "0": { + "TableName": "t1", + "ProcessedRows": %d, + "MatchingRows": %d, + "MismatchedRows": %d, + "ExtraRowsSource": %d, + "ExtraRowsTarget": %d, + %s + } + } + } +} +` + + testcases := []struct { + id string + result *sqltypes.Result + report string + }{{ + id: "1", + result: sqltypes.MakeTestResult(fields, + "completed||t1|"+UUID+"|completed|3|"+starttime+"|3|"+comptime+"|0|"+ + `{"TableName": "t1", "MatchingRows": 3, "ProcessedRows": 3, "MismatchedRows": 0, "ExtraRowsSource": 0, `+ + `"ExtraRowsTarget": 0}`), + report: fmt.Sprintf(goodReportfmt, + env.targetKeyspace, UUID, 3, false, starttime, comptime, + ), + }, { + id: "2", + result: sqltypes.MakeTestResult(fields, + "completed||t1|"+UUID+"|completed|3|"+starttime+"|3|"+comptime+"|1|"+ + `{"TableName": "t1", "MatchingRows": 1, "ProcessedRows": 3, "MismatchedRows": 0, "ExtraRowsSource": 0, `+ + `"ExtraRowsTarget": 2, "ExtraRowsTargetSample": [{"Row": {"c1": "2", "c2": "4"}}]}`), + report: fmt.Sprintf(badReportfmt, + env.targetKeyspace, UUID, 3, true, starttime, comptime, 3, 1, 0, 0, 2, 3, 1, 0, 0, 2, + `"ExtraRowsTargetSample": [ + { + "Row": { + "c1": "2", + "c2": "4" + } + } + ]`), + }, { + id: "3", + result: sqltypes.MakeTestResult(fields, + "completed||t1|"+UUID+"|completed|3|"+starttime+"|3|"+comptime+"|1|"+ + `{"TableName": "t1", "MatchingRows": 1, "ProcessedRows": 3, "MismatchedRows": 0, "ExtraRowsSource": 2, `+ + `"ExtraRowsTarget": 0, "ExtraRowsSourceSample": [{"Row": {"c1": "2", "c2": "4"}}]}`), + report: fmt.Sprintf(badReportfmt, + env.targetKeyspace, UUID, 3, true, starttime, comptime, 3, 1, 0, 2, 0, 3, 1, 0, 2, 0, + `"ExtraRowsSourceSample": [ + { + "Row": { + "c1": "2", + "c2": "4" + } + } + ]`), + }, { + id: "4", + result: sqltypes.MakeTestResult(fields, + "completed||t1|"+UUID+"|completed|3|"+starttime+"|3|"+comptime+"|1|"+ + `{"TableName": "t1", "MatchingRows": 2, "ProcessedRows": 3, "MismatchedRows": 0, "ExtraRowsSource": 1, `+ + `"ExtraRowsTarget": 0, "ExtraRowsSourceSample": [{"Row": {"c1": "2", "c2": "4"}}]}`), + report: fmt.Sprintf(badReportfmt, + env.targetKeyspace, UUID, 3, true, starttime, comptime, 3, 2, 0, 1, 0, 3, 2, 0, 1, 0, + `"ExtraRowsSourceSample": [ + { + "Row": { + "c1": "2", + "c2": "4" + } + } + ]`), + }, { + id: "5", + result: sqltypes.MakeTestResult(fields, + "completed||t1|"+UUID+"|completed|3|"+starttime+"|3|"+comptime+"|1|"+ + `{"TableName": "t1", "MatchingRows": 2, "ProcessedRows": 3, "MismatchedRows": 0, "ExtraRowsSource": 1, `+ + `"ExtraRowsTarget": 0, "ExtraRowsSourceSample": [{"Row": {"c1": "2", "c2": "4"}}]}`), + report: fmt.Sprintf(badReportfmt, + env.targetKeyspace, UUID, 3, true, starttime, comptime, 3, 2, 0, 1, 0, 3, 2, 0, 1, 0, + `"ExtraRowsSourceSample": [ + { + "Row": { + "c1": "2", + "c2": "4" + } + } + ]`), + }, { + id: "6", + result: sqltypes.MakeTestResult(fields, + "completed||t1|"+UUID+"|completed|3|"+starttime+"|3|"+comptime+"|1|"+ + `{"TableName": "t1", "MatchingRows": 2, "ProcessedRows": 3, "MismatchedRows": 1, "ExtraRowsSource": 0, `+ + `"ExtraRowsTarget": 0, "MismatchedRowsSample": [{"Source": {"Row": {"c1": "2", "c2": "3"}}, `+ + `"Target": {"Row": {"c1": "2", "c2": "4"}}}]}`), + report: fmt.Sprintf(badReportfmt, + env.targetKeyspace, UUID, 3, true, starttime, comptime, 3, 2, 1, 0, 0, 3, 2, 1, 0, 0, + `"MismatchedRowsSample": [ + { + "Source": { + "Row": { + "c1": "2", + "c2": "3" + } + }, + "Target": { + "Row": { + "c1": "2", + "c2": "4" + } + } + } + ]`), + }, { + id: "7", // --only_pks + result: sqltypes.MakeTestResult(fields, + "completed||t1|"+UUID+"|completed|3|"+starttime+"|3|"+comptime+"|1|"+ + `{"TableName": "t1", "MatchingRows": 2, "ProcessedRows": 3, "MismatchedRows": 1, "ExtraRowsSource": 0, `+ + `"ExtraRowsTarget": 0, "MismatchedRowsSample": [{"Source": {"Row": {"c1": "2"}}, `+ + `"Target": {"Row": {"c1": "2"}}}]}`), + report: fmt.Sprintf(badReportfmt, + env.targetKeyspace, UUID, 3, true, starttime, comptime, 3, 2, 1, 0, 0, 3, 2, 1, 0, 0, + `"MismatchedRowsSample": [ + { + "Source": { + "Row": { + "c1": "2" + } + }, + "Target": { + "Row": { + "c1": "2" + } + } + } + ]`), + }, { + id: "8", // --debug_query + result: sqltypes.MakeTestResult(fields, + "completed||t1|"+UUID+"|completed|3|"+starttime+"|3|"+comptime+"|1|"+ + `{"TableName": "t1", "MatchingRows": 2, "ProcessedRows": 3, "MismatchedRows": 1, "ExtraRowsSource": 0, `+ + `"ExtraRowsTarget": 0, "MismatchedRowsSample": [{"Source": {"Row": {"c1": "2", "c2": "3"}, "Query": "select c1, c2 from t1 where c1=2;"}, `+ + `"Target": {"Row": {"c1": "2", "c2": "4"}, "Query": "select c1, c2 from t1 where c1=2;"}}]}`), + report: fmt.Sprintf(badReportfmt, + env.targetKeyspace, UUID, 3, true, starttime, comptime, 3, 2, 1, 0, 0, 3, 2, 1, 0, 0, + `"MismatchedRowsSample": [ + { + "Source": { + "Row": { + "c1": "2", + "c2": "3" + }, + "Query": "select c1, c2 from t1 where c1=2;" + }, + "Target": { + "Row": { + "c1": "2", + "c2": "4" + }, + "Query": "select c1, c2 from t1 where c1=2;" + } + } + ]`), + }, + } + + for _, tcase := range testcases { + t.Run(tcase.id, func(t *testing.T) { + res := &tabletmanagerdatapb.VDiffResponse{ + Id: 1, + Output: sqltypes.ResultToProto3(tcase.result), + } + env.tmc.setVDResults(env.tablets[200].tablet, req, res) + req := &vtctldatapb.VDiffShowRequest{ + TargetKeyspace: env.targetKeyspace, + Workflow: env.workflow, + Arg: UUID, + } + + resp, err := env.ws.VDiffShow(context.Background(), req) + require.NoError(t, err) + vds, err := displayShowSingleSummary(env.out, options.ReportOptions.Format, env.targetKeyspace, env.workflow, UUID, resp, false) + require.NoError(t, err) + require.Equal(t, vdiff.CompletedState, vds) + + require.Equal(t, tcase.report, env.getOutput()) + env.resetOutput() + }) + } +} + +func TestVDiffSharded(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestVDiffEnv(t, ctx, []string{"-40", "40-"}, []string{"-80", "80-"}, "", map[string]string{ + "-80": "MySQL56/0e45e704-7cb9-11ed-a1eb-0242ac120002:1-890", + "80-": "MySQL56/1497ddb0-7cb9-11ed-a1eb-0242ac120002:1-891", + }) + defer env.close() + + now := time.Now() + UUID := uuid.New().String() + req := &tabletmanagerdatapb.VDiffRequest{ + Keyspace: env.targetKeyspace, + Workflow: env.workflow, + Action: string(vdiff.ShowAction), + ActionArg: UUID, + } + starttime := now.UTC().Format(vdiff.TimestampFormat) + comptime := now.Add(1 * time.Second).UTC().Format(vdiff.TimestampFormat) + verbosefmt := `{ + "Workflow": "vdiffTest", + "Keyspace": "%s", + "State": "completed", + "UUID": "%s", + "RowsCompared": %d, + "HasMismatch": %t, + "Shards": "-80,80-", + "StartedAt": "%s", + "CompletedAt": "%s", + "TableSummary": { + "t1": { + "TableName": "t1", + "State": "completed", + "RowsCompared": %d, + "MatchingRows": %d, + "MismatchedRows": %d, + "ExtraRowsSource": %d, + "ExtraRowsTarget": %d + } + }, + "Reports": { + "t1": { + "-80": { + "TableName": "t1", + "ProcessedRows": %d, + "MatchingRows": %d, + "MismatchedRows": %d, + "ExtraRowsSource": %d, + "ExtraRowsTarget": %d + }, + "80-": { + "TableName": "t1", + "ProcessedRows": %d, + "MatchingRows": %d, + "MismatchedRows": %d, + "ExtraRowsSource": %d, + "ExtraRowsTarget": %d + } + } + } +} +` + + testcases := []struct { + id string + shard1Res *sqltypes.Result + shard2Res *sqltypes.Result + report string + }{{ + id: "1", + shard1Res: sqltypes.MakeTestResult(fields, + "completed||t1|"+UUID+"|completed|3|"+starttime+"|3|"+comptime+"|0|"+ + `{"TableName": "t1", "MatchingRows": 3, "ProcessedRows": 3, "MismatchedRows": 0, "ExtraRowsSource": 0, `+ + `"ExtraRowsTarget": 0}`), + shard2Res: sqltypes.MakeTestResult(fields, + "completed||t1|"+UUID+"|completed|3|"+starttime+"|3|"+comptime+"|0|"+ + `{"TableName": "t1", "MatchingRows": 3, "ProcessedRows": 3, "MismatchedRows": 0, "ExtraRowsSource": 0, `+ + `"ExtraRowsTarget": 0}`), + report: fmt.Sprintf(verbosefmt, + env.targetKeyspace, UUID, 6, false, starttime, comptime, 6, 6, 0, 0, 0, 3, 3, 0, 0, 0, 3, 3, 0, 0, 0, + ), + }} + + for _, tcase := range testcases { + t.Run(tcase.id, func(t *testing.T) { + shard1Res := &tabletmanagerdatapb.VDiffResponse{ + Id: 1, + Output: sqltypes.ResultToProto3(tcase.shard1Res), + } + shard2Res := &tabletmanagerdatapb.VDiffResponse{ + Id: 1, + Output: sqltypes.ResultToProto3(tcase.shard2Res), + } + env.tmc.setVDResults(env.tablets[200].tablet, req, shard1Res) + env.tmc.setVDResults(env.tablets[210].tablet, req, shard2Res) + req := &vtctldatapb.VDiffShowRequest{ + TargetKeyspace: env.targetKeyspace, + Workflow: env.workflow, + Arg: UUID, + } + + resp, err := env.ws.VDiffShow(context.Background(), req) + require.NoError(t, err) + vds, err := displayShowSingleSummary(env.out, options.ReportOptions.Format, env.targetKeyspace, env.workflow, UUID, resp, true) + require.NoError(t, err) + require.Equal(t, vdiff.CompletedState, vds) + + require.Equal(t, tcase.report, env.getOutput()) + env.resetOutput() + }) + } +} + +func TestGetStructNames(t *testing.T) { + type s struct { + A string + B int64 + } + got := getStructFieldNames(s{}) + want := []string{"A", "B"} + require.EqualValues(t, want, got) +} + +func TestBuildProgressReport(t *testing.T) { + now := time.Now() + type args struct { + summary *summary + rowsToCompare int64 + } + tests := []struct { + name string + args args + want *vdiff.ProgressReport + }{ + { + name: "no progress", + args: args{ + summary: &summary{RowsCompared: 0}, + rowsToCompare: 100, + }, + want: &vdiff.ProgressReport{ + Percentage: 0, + ETA: "", // no ETA + }, + }, + { + name: "one third of the way", + args: args{ + summary: &summary{ + RowsCompared: 33, + StartedAt: now.Add(-10 * time.Second).UTC().Format(vdiff.TimestampFormat), + }, + rowsToCompare: 100, + }, + want: &vdiff.ProgressReport{ + Percentage: 33, + ETA: now.Add(20 * time.Second).UTC().Format(vdiff.TimestampFormat), + }, + }, + { + name: "half way", + args: args{ + summary: &summary{ + RowsCompared: 5000000000, + StartedAt: now.Add(-10 * time.Hour).UTC().Format(vdiff.TimestampFormat), + }, + rowsToCompare: 10000000000, + }, + want: &vdiff.ProgressReport{ + Percentage: 50, + ETA: now.Add(10 * time.Hour).UTC().Format(vdiff.TimestampFormat), + }, + }, + { + name: "full progress", + args: args{ + summary: &summary{ + RowsCompared: 100, + CompletedAt: now.UTC().Format(vdiff.TimestampFormat), + }, + rowsToCompare: 100, + }, + want: &vdiff.ProgressReport{ + Percentage: 100, + ETA: now.UTC().Format(vdiff.TimestampFormat), + }, + }, + { + name: "more than in I_S", + args: args{ + summary: &summary{ + RowsCompared: 100, + CompletedAt: now.UTC().Format(vdiff.TimestampFormat), + }, + rowsToCompare: 50, + }, + want: &vdiff.ProgressReport{ + Percentage: 100, + ETA: now.UTC().Format(vdiff.TimestampFormat), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + buildProgressReport(tt.args.summary, tt.args.rowsToCompare) + // We always check the percentage + require.Equal(t, int(tt.want.Percentage), int(tt.args.summary.Progress.Percentage)) + + // We only check the ETA if there is one. + if tt.want.ETA != "" { + // Let's check that we're within 1 second to avoid flakes. + wantTime, err := time.Parse(vdiff.TimestampFormat, tt.want.ETA) + require.NoError(t, err) + var timeDiff float64 + if tt.want.Percentage == 100 { + completedTime, err := time.Parse(vdiff.TimestampFormat, tt.args.summary.CompletedAt) + require.NoError(t, err) + timeDiff = math.Abs(completedTime.Sub(wantTime).Seconds()) + } else { + startTime, err := time.Parse(vdiff.TimestampFormat, tt.args.summary.StartedAt) + require.NoError(t, err) + completedTimeUnix := float64(now.UTC().Unix()-startTime.UTC().Unix()) * (100 / tt.want.Percentage) + estimatedTime, err := time.Parse(vdiff.TimestampFormat, tt.want.ETA) + require.NoError(t, err) + timeDiff = math.Abs(estimatedTime.Sub(startTime).Seconds() - completedTimeUnix) + } + require.LessOrEqual(t, timeDiff, 1.0) + } + }) + } +} diff --git a/go/cmd/vtctldclient/command/vreplication/workflow/delete.go b/go/cmd/vtctldclient/command/vreplication/workflow/delete.go new file mode 100644 index 00000000000..4eae8076fec --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/workflow/delete.go @@ -0,0 +1,76 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "fmt" + "sort" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +var ( + deleteOptions = struct { + KeepData bool + KeepRoutingRules bool + }{} + + // delete makes a WorkflowDelete gRPC call to a vtctld. + delete = &cobra.Command{ + Use: "delete", + Short: "Delete a VReplication workflow.", + Example: `vtctldclient --server localhost:15999 workflow --keyspace customer delete --workflow commerce2customer`, + DisableFlagsInUseLine: true, + Aliases: []string{"Delete"}, + Args: cobra.NoArgs, + RunE: commandDelete, + } +) + +func commandDelete(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + req := &vtctldatapb.WorkflowDeleteRequest{ + Keyspace: baseOptions.Keyspace, + Workflow: baseOptions.Workflow, + KeepData: deleteOptions.KeepData, + KeepRoutingRules: deleteOptions.KeepRoutingRules, + } + resp, err := common.GetClient().WorkflowDelete(common.GetCommandCtx(), req) + if err != nil { + return err + } + + // Sort the inner TabletInfo slice for deterministic output. + sort.Slice(resp.Details, func(i, j int) bool { + return resp.Details[i].Tablet.String() < resp.Details[j].Tablet.String() + }) + + data, err := cli.MarshalJSONPretty(resp) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + + return nil +} diff --git a/go/cmd/vtctldclient/command/vreplication/workflow/get.go b/go/cmd/vtctldclient/command/vreplication/workflow/get.go new file mode 100644 index 00000000000..69acc535158 --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/workflow/get.go @@ -0,0 +1,67 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "fmt" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +var ( + getWorkflowsOptions = struct { + ShowAll bool + }{} + // GetWorkflows makes a GetWorkflows gRPC call to a vtctld. + getWorkflows = &cobra.Command{ + Use: "GetWorkflows ", + Short: "Gets all vreplication workflows (Reshard, MoveTables, etc) in the given keyspace.", + DisableFlagsInUseLine: true, + Args: cobra.ExactArgs(1), + RunE: commandGetWorkflows, + } +) + +func commandGetWorkflows(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + ks := cmd.Flags().Arg(0) + + resp, err := common.GetClient().GetWorkflows(common.GetCommandCtx(), &vtctldatapb.GetWorkflowsRequest{ + Keyspace: ks, + ActiveOnly: !getWorkflowsOptions.ShowAll, + IncludeLogs: workflowShowOptions.IncludeLogs, + }) + + if err != nil { + return err + } + + data, err := cli.MarshalJSONPretty(resp) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + + return nil +} diff --git a/go/cmd/vtctldclient/command/vreplication/workflow/show.go b/go/cmd/vtctldclient/command/vreplication/workflow/show.go new file mode 100644 index 00000000000..ebc18ea250d --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/workflow/show.go @@ -0,0 +1,85 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +var ( + // WorkflowList makes a GetWorkflows gRPC call to a vtctld. + workflowList = &cobra.Command{ + Use: "list", + Short: "List the VReplication workflows in the given keyspace.", + Example: `vtctldclient --server localhost:15999 workflow --keyspace customer list`, + DisableFlagsInUseLine: true, + Aliases: []string{"List"}, + Args: cobra.NoArgs, + RunE: commandShow, + } + + // show makes a GetWorkflows gRPC call to a vtctld. + show = &cobra.Command{ + Use: "show", + Short: "Show the details for a VReplication workflow.", + Example: `vtctldclient --server localhost:15999 workflow --keyspace customer show --workflow commerce2customer`, + DisableFlagsInUseLine: true, + Aliases: []string{"Show"}, + Args: cobra.NoArgs, + RunE: commandShow, + } +) + +func commandShow(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + req := &vtctldatapb.GetWorkflowsRequest{ + Keyspace: baseOptions.Keyspace, + Workflow: baseOptions.Workflow, + IncludeLogs: workflowShowOptions.IncludeLogs, + } + resp, err := common.GetClient().GetWorkflows(common.GetCommandCtx(), req) + if err != nil { + return err + } + + var data []byte + if strings.ToLower(cmd.Name()) == "list" { + // We only want the names. + Names := make([]string, len(resp.Workflows)) + for i, wf := range resp.Workflows { + Names[i] = wf.Name + } + data, err = cli.MarshalJSONPretty(Names) + } else { + data, err = cli.MarshalJSONPretty(resp) + } + if err != nil { + return err + } + fmt.Println(string(data)) + + return nil +} diff --git a/go/cmd/vtctldclient/command/vreplication/workflow/state.go b/go/cmd/vtctldclient/command/vreplication/workflow/state.go new file mode 100644 index 00000000000..89e75312ab2 --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/workflow/state.go @@ -0,0 +1,106 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "fmt" + "sort" + "strings" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common" + "vitess.io/vitess/go/textutil" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +var ( + // start makes a WorfklowUpdate gRPC call to a vtctld. + start = &cobra.Command{ + Use: "start", + Short: "Start a VReplication workflow.", + Example: `vtctldclient --server localhost:15999 workflow --keyspace customer start --workflow commerce2customer`, + DisableFlagsInUseLine: true, + Aliases: []string{"Start"}, + Args: cobra.NoArgs, + RunE: commandUpdateState, + } + + // stop makes a WorfklowUpdate gRPC call to a vtctld. + stop = &cobra.Command{ + Use: "stop", + Short: "Stop a VReplication workflow.", + Example: `vtctldclient --server localhost:15999 workflow --keyspace customer stop --workflow commerce2customer`, + DisableFlagsInUseLine: true, + Aliases: []string{"Stop"}, + Args: cobra.NoArgs, + RunE: commandUpdateState, + } +) + +func commandUpdateState(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + var state binlogdatapb.VReplicationWorkflowState + switch strings.ToLower(cmd.Name()) { + case "start": + if err := common.CanRestartWorkflow(baseOptions.Keyspace, baseOptions.Workflow); err != nil { + return err + } + state = binlogdatapb.VReplicationWorkflowState_Running + case "stop": + state = binlogdatapb.VReplicationWorkflowState_Stopped + default: + return fmt.Errorf("invalid workflow state: %s", args[0]) + } + + // The only thing we're updating is the state. + req := &vtctldatapb.WorkflowUpdateRequest{ + Keyspace: baseOptions.Keyspace, + TabletRequest: &tabletmanagerdatapb.UpdateVReplicationWorkflowRequest{ + Workflow: baseOptions.Workflow, + Cells: textutil.SimulatedNullStringSlice, + TabletTypes: []topodatapb.TabletType{topodatapb.TabletType(textutil.SimulatedNullInt)}, + OnDdl: binlogdatapb.OnDDLAction(textutil.SimulatedNullInt), + State: state, + }, + } + + resp, err := common.GetClient().WorkflowUpdate(common.GetCommandCtx(), req) + if err != nil { + return err + } + + // Sort the inner TabletInfo slice for deterministic output. + sort.Slice(resp.Details, func(i, j int) bool { + return resp.Details[i].Tablet.String() < resp.Details[j].Tablet.String() + }) + + data, err := cli.MarshalJSONPretty(resp) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + + return nil +} diff --git a/go/cmd/vtctldclient/command/vreplication/workflow/update.go b/go/cmd/vtctldclient/command/vreplication/workflow/update.go new file mode 100644 index 00000000000..466d81e8be4 --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/workflow/update.go @@ -0,0 +1,135 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "fmt" + "sort" + "strings" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common" + "vitess.io/vitess/go/textutil" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +var ( + updateOptions = struct { + Cells []string + TabletTypes []topodatapb.TabletType + TabletTypesInPreferenceOrder bool + OnDDL string + }{} + + // update makes a WorkflowUpdate gRPC call to a vtctld. + update = &cobra.Command{ + Use: "update", + Short: "Update the configuration parameters for a VReplication workflow.", + Example: `vtctldclient --server localhost:15999 workflow --keyspace customer update --workflow commerce2customer --cells zone1 --cells zone2 -c "zone3,zone4" -c zone5`, + DisableFlagsInUseLine: true, + Aliases: []string{"Update"}, + Args: cobra.NoArgs, + PreRunE: func(cmd *cobra.Command, args []string) error { + changes := false + if cmd.Flags().Lookup("cells").Changed { // Validate the provided value(s) + changes = true + for i, cell := range updateOptions.Cells { // Which only means trimming whitespace + updateOptions.Cells[i] = strings.TrimSpace(cell) + } + } else { + updateOptions.Cells = textutil.SimulatedNullStringSlice + } + if cmd.Flags().Lookup("tablet-types").Changed { + if err := common.ParseTabletTypes(cmd); err != nil { + return err + } + changes = true + } else { + updateOptions.TabletTypes = []topodatapb.TabletType{topodatapb.TabletType(textutil.SimulatedNullInt)} + } + if cmd.Flags().Lookup("on-ddl").Changed { // Validate the provided value + changes = true + if _, ok := binlogdatapb.OnDDLAction_value[strings.ToUpper(updateOptions.OnDDL)]; !ok { + return fmt.Errorf("invalid on-ddl value: %s", updateOptions.OnDDL) + } + } // Simulated NULL will need to be handled in command + if !changes { + return fmt.Errorf("no configuration options specified to update") + } + return nil + }, + RunE: commandUpdate, + } +) + +func commandUpdate(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + // We've already validated any provided value, if one WAS provided. + // Now we need to do the mapping from the string representation to + // the enum value. + onddl := int32(textutil.SimulatedNullInt) // Simulated NULL when no value provided + if val, ok := binlogdatapb.OnDDLAction_value[strings.ToUpper(updateOptions.OnDDL)]; ok { + onddl = val + } + + // Simulated NULL when no value is provided. + tsp := tabletmanagerdatapb.TabletSelectionPreference_UNKNOWN + if cmd.Flags().Lookup("tablet-types-in-order").Changed { + if updateOptions.TabletTypesInPreferenceOrder { + tsp = tabletmanagerdatapb.TabletSelectionPreference_INORDER + } else { + tsp = tabletmanagerdatapb.TabletSelectionPreference_ANY + } + } + + req := &vtctldatapb.WorkflowUpdateRequest{ + Keyspace: baseOptions.Keyspace, + TabletRequest: &tabletmanagerdatapb.UpdateVReplicationWorkflowRequest{ + Workflow: baseOptions.Workflow, + Cells: updateOptions.Cells, + TabletTypes: updateOptions.TabletTypes, + TabletSelectionPreference: tsp, + OnDdl: binlogdatapb.OnDDLAction(onddl), + }, + } + + resp, err := common.GetClient().WorkflowUpdate(common.GetCommandCtx(), req) + if err != nil { + return err + } + + // Sort the inner TabletInfo slice for deterministic output. + sort.Slice(resp.Details, func(i, j int) bool { + return resp.Details[i].Tablet.String() < resp.Details[j].Tablet.String() + }) + + data, err := cli.MarshalJSONPretty(resp) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + + return nil +} diff --git a/go/cmd/vtctldclient/command/vreplication/workflow/workflow.go b/go/cmd/vtctldclient/command/vreplication/workflow/workflow.go new file mode 100644 index 00000000000..e552b61d476 --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/workflow/workflow.go @@ -0,0 +1,90 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common" + "vitess.io/vitess/go/vt/topo/topoproto" +) + +var ( + // base is a parent command for Workflow commands. + base = &cobra.Command{ + Use: "Workflow --keyspace [command] [command-flags]", + Short: "Administer VReplication workflows (Reshard, MoveTables, etc) in the given keyspace.", + DisableFlagsInUseLine: true, + Aliases: []string{"workflow"}, + Args: cobra.ExactArgs(1), + RunE: commandGetWorkflows, + } +) + +var ( + baseOptions = struct { + Keyspace string + Workflow string + }{} + + workflowShowOptions = struct { + IncludeLogs bool + }{} +) + +func registerCommands(root *cobra.Command) { + base.PersistentFlags().StringVarP(&baseOptions.Keyspace, "keyspace", "k", "", "Keyspace context for the workflow.") + base.MarkPersistentFlagRequired("keyspace") + root.AddCommand(base) + + getWorkflows.Flags().BoolVar(&workflowShowOptions.IncludeLogs, "include-logs", true, "Include recent logs for the workflows.") + getWorkflows.Flags().BoolVarP(&getWorkflowsOptions.ShowAll, "show-all", "a", false, "Show all workflows instead of just active workflows.") + root.AddCommand(getWorkflows) // Yes this is supposed to be root as GetWorkflows is a top-level command. + + delete.Flags().StringVarP(&baseOptions.Workflow, "workflow", "w", "", "The workflow you want to delete.") + delete.MarkFlagRequired("workflow") + delete.Flags().BoolVar(&deleteOptions.KeepData, "keep-data", false, "Keep the partially copied table data from the workflow in the target keyspace.") + delete.Flags().BoolVar(&deleteOptions.KeepRoutingRules, "keep-routing-rules", false, "Keep the routing rules created for the workflow.") + base.AddCommand(delete) + + base.AddCommand(workflowList) + + show.Flags().StringVarP(&baseOptions.Workflow, "workflow", "w", "", "The workflow you want the details for.") + show.MarkFlagRequired("workflow") + show.Flags().BoolVar(&workflowShowOptions.IncludeLogs, "include-logs", true, "Include recent logs for the workflow.") + base.AddCommand(show) + + start.Flags().StringVarP(&baseOptions.Workflow, "workflow", "w", "", "The workflow you want to start.") + start.MarkFlagRequired("workflow") + base.AddCommand(start) + + stop.Flags().StringVarP(&baseOptions.Workflow, "workflow", "w", "", "The workflow you want to stop.") + stop.MarkFlagRequired("workflow") + base.AddCommand(stop) + + update.Flags().StringVarP(&baseOptions.Workflow, "workflow", "w", "", "The workflow you want to update.") + update.MarkFlagRequired("workflow") + update.Flags().StringSliceVarP(&updateOptions.Cells, "cells", "c", nil, "New Cell(s) or CellAlias(es) (comma-separated) to replicate from.") + update.Flags().VarP((*topoproto.TabletTypeListFlag)(&updateOptions.TabletTypes), "tablet-types", "t", "New source tablet types to replicate from (e.g. PRIMARY,REPLICA,RDONLY).") + update.Flags().BoolVar(&updateOptions.TabletTypesInPreferenceOrder, "tablet-types-in-order", true, "When performing source tablet selection, look for candidates in the type order as they are listed in the tablet-types flag.") + update.Flags().StringVar(&updateOptions.OnDDL, "on-ddl", "", "New instruction on what to do when DDL is encountered in the VReplication stream. Possible values are IGNORE, STOP, EXEC, and EXEC_IGNORE.") + base.AddCommand(update) +} + +func init() { + common.RegisterCommandHandler("Workflow", registerCommands) +} diff --git a/go/cmd/vtctldclient/command/workflows.go b/go/cmd/vtctldclient/command/workflows.go deleted file mode 100644 index f783ce9c307..00000000000 --- a/go/cmd/vtctldclient/command/workflows.go +++ /dev/null @@ -1,193 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package command - -import ( - "fmt" - "sort" - "strings" - - "github.com/spf13/cobra" - - "vitess.io/vitess/go/cmd/vtctldclient/cli" - "vitess.io/vitess/go/textutil" - "vitess.io/vitess/go/vt/topo/topoproto" - - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" - vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" -) - -var ( - // GetWorkflows makes a GetWorkflows gRPC call to a vtctld. - GetWorkflows = &cobra.Command{ - Use: "GetWorkflows ", - Short: "Gets all vreplication workflows (Reshard, MoveTables, etc) in the given keyspace.", - DisableFlagsInUseLine: true, - Args: cobra.ExactArgs(1), - RunE: commandGetWorkflows, - } - - // Workflow is a parent command for Workflow* sub commands. - Workflow = &cobra.Command{ - Use: "workflow", - Short: "Administer VReplication workflows (Reshard, MoveTables, etc) in the given keyspace", - DisableFlagsInUseLine: true, - Aliases: []string{"Workflow"}, - Args: cobra.ExactArgs(1), - RunE: commandGetWorkflows, - } - - // WorkflowUpdate makes a WorkflowUpdate gRPC call to a vtctld. - WorkflowUpdate = &cobra.Command{ - Use: "update", - Short: "Update the configuration parameters for a VReplication workflow", - Example: `vtctldclient --server=localhost:15999 workflow --keyspace=customer update --workflow=commerce2customer --cells "zone1" --cells "zone2" -c "zone3,zone4" -c "zone5"`, - DisableFlagsInUseLine: true, - Aliases: []string{"Update"}, - Args: cobra.NoArgs, - PreRunE: func(cmd *cobra.Command, args []string) error { - changes := false - if cmd.Flags().Lookup("cells").Changed { // Validate the provided value(s) - changes = true - for i, cell := range workflowUpdateOptions.Cells { // Which only means trimming whitespace - workflowUpdateOptions.Cells[i] = strings.TrimSpace(cell) - } - } else { - workflowUpdateOptions.Cells = textutil.SimulatedNullStringSlice - } - if cmd.Flags().Lookup("tablet-types").Changed { // Validate the provided value(s) - changes = true - for i, tabletType := range workflowUpdateOptions.TabletTypes { - workflowUpdateOptions.TabletTypes[i] = strings.ToUpper(strings.TrimSpace(tabletType)) - if _, err := topoproto.ParseTabletType(workflowUpdateOptions.TabletTypes[i]); err != nil { - return err - } - } - } else { - workflowUpdateOptions.TabletTypes = textutil.SimulatedNullStringSlice - } - if cmd.Flags().Lookup("on-ddl").Changed { // Validate the provided value - changes = true - if _, ok := binlogdatapb.OnDDLAction_value[strings.ToUpper(workflowUpdateOptions.OnDDL)]; !ok { - return fmt.Errorf("invalid on-ddl value: %s", workflowUpdateOptions.OnDDL) - } - } // Simulated NULL will need to be handled in command - if !changes { - return fmt.Errorf("no configuration options specified to update") - } - return nil - }, - RunE: commandWorkflowUpdate, - } -) - -var getWorkflowsOptions = struct { - ShowAll bool -}{} - -func commandGetWorkflows(cmd *cobra.Command, args []string) error { - cli.FinishedParsing(cmd) - - ks := cmd.Flags().Arg(0) - - resp, err := client.GetWorkflows(commandCtx, &vtctldatapb.GetWorkflowsRequest{ - Keyspace: ks, - ActiveOnly: !getWorkflowsOptions.ShowAll, - }) - - if err != nil { - return err - } - - data, err := cli.MarshalJSON(resp) - if err != nil { - return err - } - - fmt.Printf("%s\n", data) - - return nil -} - -var ( - workflowOptions = struct { - Keyspace string - }{} - workflowUpdateOptions = struct { - Workflow string - Cells []string - TabletTypes []string - OnDDL string - }{} -) - -func commandWorkflowUpdate(cmd *cobra.Command, args []string) error { - cli.FinishedParsing(cmd) - - // We've already validated any provided value, if one WAS provided. - // Now we need to do the mapping from the string representation to - // the enum value. - onddl := int32(textutil.SimulatedNullInt) // Simulated NULL when no value provided - if val, ok := binlogdatapb.OnDDLAction_value[strings.ToUpper(workflowUpdateOptions.OnDDL)]; ok { - onddl = val - } - - req := &vtctldatapb.WorkflowUpdateRequest{ - Keyspace: workflowOptions.Keyspace, - TabletRequest: &tabletmanagerdatapb.UpdateVRWorkflowRequest{ - Workflow: workflowUpdateOptions.Workflow, - Cells: workflowUpdateOptions.Cells, - TabletTypes: workflowUpdateOptions.TabletTypes, - OnDdl: binlogdatapb.OnDDLAction(onddl), - }, - } - - resp, err := client.WorkflowUpdate(commandCtx, req) - if err != nil { - return err - } - - // Sort the inner TabletInfo slice for deterministic output. - sort.Slice(resp.Details, func(i, j int) bool { - return resp.Details[i].Tablet < resp.Details[j].Tablet - }) - - data, err := cli.MarshalJSON(resp) - if err != nil { - return err - } - - fmt.Printf("%s\n", data) - - return nil -} - -func init() { - GetWorkflows.Flags().BoolVarP(&getWorkflowsOptions.ShowAll, "show-all", "a", false, "Show all workflows instead of just active workflows.") - Root.AddCommand(GetWorkflows) - - Workflow.PersistentFlags().StringVarP(&workflowOptions.Keyspace, "keyspace", "k", "", "Keyspace context for the workflow (required)") - Workflow.MarkPersistentFlagRequired("keyspace") - Root.AddCommand(Workflow) - WorkflowUpdate.Flags().StringVarP(&workflowUpdateOptions.Workflow, "workflow", "w", "", "The workflow you want to update (required)") - WorkflowUpdate.MarkFlagRequired("workflow") - WorkflowUpdate.Flags().StringSliceVarP(&workflowUpdateOptions.Cells, "cells", "c", nil, "New Cell(s) or CellAlias(es) (comma-separated) to replicate from") - WorkflowUpdate.Flags().StringSliceVarP(&workflowUpdateOptions.TabletTypes, "tablet-types", "t", nil, "New source tablet types to replicate from (e.g. PRIMARY,REPLICA,RDONLY)") - WorkflowUpdate.Flags().StringVar(&workflowUpdateOptions.OnDDL, "on-ddl", "", "New instruction on what to do when DDL is encountered in the VReplication stream. Possible values are IGNORE, STOP, EXEC, and EXEC_IGNORE") - Workflow.AddCommand(WorkflowUpdate) -} diff --git a/go/cmd/vtexplain/cli/vtexplain.go b/go/cmd/vtexplain/cli/vtexplain.go new file mode 100644 index 00000000000..8b0622cf8a3 --- /dev/null +++ b/go/cmd/vtexplain/cli/vtexplain.go @@ -0,0 +1,196 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import ( + "context" + "fmt" + "os" + + "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/vtexplain" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + + "github.com/spf13/cobra" + + querypb "vitess.io/vitess/go/vt/proto/query" +) + +var ( + sqlFlag string + sqlFileFlag string + schemaFlag string + schemaFileFlag string + vschemaFlag string + vschemaFileFlag string + ksShardMapFlag string + ksShardMapFileFlag string + normalize bool + dbName string + plannerVersionStr string + + numShards = 2 + replicationMode = "ROW" + executionMode = "multi" + outputMode = "text" + + Main = &cobra.Command{ + Use: "vtexplain", + Short: "vtexplain is a command line tool which provides information on how Vitess plans to execute a particular query.", + Long: `vtexplain is a command line tool which provides information on how Vitess plans to execute a particular query. + +It can be used to validate queries for compatibility with Vitess. + +For a user guide that describes how to use the vtexplain tool to explain how Vitess executes a particular SQL statement, see Analyzing a SQL statement. + +## Limitations + +### The VSchema must use a keyspace name. + +VTExplain requires a keyspace name for each keyspace in an input VSchema: +` + + "```\n" + + `"keyspace_name": { + "_comment": "Keyspace definition goes here." +} +` + "```" + ` + +If no keyspace name is present, VTExplain will return the following error: +` + + "```\n" + + `ERROR: initVtgateExecutor: json: cannot unmarshal bool into Go value of type map[string]json.RawMessage +` + "```\n", + Example: "Explain how Vitess will execute the query `SELECT * FROM users` using the VSchema contained in `vschemas.json` and the database schema `schema.sql`:\n\n" + + "```\nvtexplain --vschema-file vschema.json --schema-file schema.sql --sql \"SELECT * FROM users\"\n```\n\n" + + + "Explain how the example will execute on 128 shards using Row-based replication:\n\n" + + + "```\nvtexplain -- -shards 128 --vschema-file vschema.json --schema-file schema.sql --replication-mode \"ROW\" --output-mode text --sql \"INSERT INTO users (user_id, name) VALUES(1, 'john')\"\n```\n", + Args: cobra.NoArgs, + PreRunE: servenv.CobraPreRunE, + RunE: run, + } +) + +func init() { + servenv.MoveFlagsToCobraCommand(Main) + Main.Flags().StringVar(&sqlFlag, "sql", sqlFlag, "A list of semicolon-delimited SQL commands to analyze") + Main.Flags().StringVar(&sqlFileFlag, "sql-file", sqlFileFlag, "Identifies the file that contains the SQL commands to analyze") + Main.Flags().StringVar(&schemaFlag, "schema", schemaFlag, "The SQL table schema") + Main.Flags().StringVar(&schemaFileFlag, "schema-file", schemaFileFlag, "Identifies the file that contains the SQL table schema") + Main.Flags().StringVar(&vschemaFlag, "vschema", vschemaFlag, "Identifies the VTGate routing schema") + Main.Flags().StringVar(&vschemaFileFlag, "vschema-file", vschemaFileFlag, "Identifies the VTGate routing schema file") + Main.Flags().StringVar(&ksShardMapFlag, "ks-shard-map", ksShardMapFlag, "JSON map of keyspace name -> shard name -> ShardReference object. The inner map is the same as the output of FindAllShardsInKeyspace") + Main.Flags().StringVar(&ksShardMapFileFlag, "ks-shard-map-file", ksShardMapFileFlag, "File containing json blob of keyspace name -> shard name -> ShardReference object") + Main.Flags().StringVar(&replicationMode, "replication-mode", replicationMode, "The replication mode to simulate -- must be set to either ROW or STATEMENT") + Main.Flags().BoolVar(&normalize, "normalize", normalize, "Whether to enable vtgate normalization") + Main.Flags().StringVar(&dbName, "dbname", dbName, "Optional database target to override normal routing") + Main.Flags().StringVar(&plannerVersionStr, "planner-version", plannerVersionStr, "Sets the default planner to use. Valid values are: Gen4, Gen4Greedy, Gen4Left2Right") + Main.Flags().IntVar(&numShards, "shards", numShards, "Number of shards per keyspace. Passing --ks-shard-map/--ks-shard-map-file causes this flag to be ignored.") + Main.Flags().StringVar(&executionMode, "execution-mode", executionMode, "The execution mode to simulate -- must be set to multi, legacy-autocommit, or twopc") + Main.Flags().StringVar(&outputMode, "output-mode", outputMode, "Output in human-friendly text or json") + + acl.RegisterFlags(Main.Flags()) +} + +// getFileParam returns a string containing either flag is not "", +// or the content of the file named flagFile +func getFileParam(flag, flagFile, name string, required bool) (string, error) { + if flag != "" { + if flagFile != "" { + return "", fmt.Errorf("action requires only one of %v or %v-file", name, name) + } + return flag, nil + } + + if flagFile == "" { + if required { + return "", fmt.Errorf("action requires one of %v or %v-file", name, name) + } + + return "", nil + } + data, err := os.ReadFile(flagFile) + if err != nil { + return "", fmt.Errorf("cannot read file %v: %v", flagFile, err) + } + return string(data), nil +} + +func run(cmd *cobra.Command, args []string) error { + defer logutil.Flush() + + servenv.Init() + return parseAndRun() +} + +func parseAndRun() error { + plannerVersion, _ := plancontext.PlannerNameToVersion(plannerVersionStr) + if plannerVersionStr != "" && plannerVersion != querypb.ExecuteOptions_Gen4 { + return fmt.Errorf("invalid value specified for planner-version of '%s' -- valid value is Gen4 or an empty value to use the default planner", plannerVersionStr) + } + + sql, err := getFileParam(sqlFlag, sqlFileFlag, "sql", true) + if err != nil { + return err + } + + schema, err := getFileParam(schemaFlag, schemaFileFlag, "schema", true) + if err != nil { + return err + } + + vschema, err := getFileParam(vschemaFlag, vschemaFileFlag, "vschema", true) + if err != nil { + return err + } + + ksShardMap, err := getFileParam(ksShardMapFlag, ksShardMapFileFlag, "ks-shard-map", false) + if err != nil { + return err + } + + opts := &vtexplain.Options{ + ExecutionMode: executionMode, + PlannerVersion: plannerVersion, + ReplicationMode: replicationMode, + NumShards: numShards, + Normalize: normalize, + Target: dbName, + } + + vte, err := vtexplain.Init(context.Background(), vschema, schema, ksShardMap, opts) + if err != nil { + return err + } + defer vte.Stop() + + plans, err := vte.Run(sql) + if err != nil { + return err + } + + if outputMode == "text" { + fmt.Print(vte.ExplainsAsText(plans)) + } else { + fmt.Print(vtexplain.ExplainsAsJSON(plans)) + } + + return nil +} diff --git a/go/cmd/vtexplain/docgen/main.go b/go/cmd/vtexplain/docgen/main.go new file mode 100644 index 00000000000..15ea92b53bb --- /dev/null +++ b/go/cmd/vtexplain/docgen/main.go @@ -0,0 +1,37 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/internal/docgen" + "vitess.io/vitess/go/cmd/vtexplain/cli" +) + +func main() { + var dir string + cmd := cobra.Command{ + Use: "docgen [-d ]", + RunE: func(cmd *cobra.Command, args []string) error { + return docgen.GenerateMarkdownTree(cli.Main, dir) + }, + } + + cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation") + _ = cmd.Execute() +} diff --git a/go/cmd/vtexplain/vtexplain.go b/go/cmd/vtexplain/vtexplain.go index d5f60a893ba..37774076382 100644 --- a/go/cmd/vtexplain/vtexplain.go +++ b/go/cmd/vtexplain/vtexplain.go @@ -18,151 +18,16 @@ package main import ( "fmt" - "os" - "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/cmd/vtexplain/cli" "vitess.io/vitess/go/exit" - "vitess.io/vitess/go/vt/logutil" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/vtexplain" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - - "github.com/spf13/pflag" - - querypb "vitess.io/vitess/go/vt/proto/query" -) - -var ( - sqlFlag string - sqlFileFlag string - schemaFlag string - schemaFileFlag string - vschemaFlag string - vschemaFileFlag string - ksShardMapFlag string - ksShardMapFileFlag string - normalize bool - dbName string - plannerVersionStr string - - numShards = 2 - replicationMode = "ROW" - executionMode = "multi" - outputMode = "text" ) -func registerFlags(fs *pflag.FlagSet) { - fs.StringVar(&sqlFlag, "sql", sqlFlag, "A list of semicolon-delimited SQL commands to analyze") - fs.StringVar(&sqlFileFlag, "sql-file", sqlFileFlag, "Identifies the file that contains the SQL commands to analyze") - fs.StringVar(&schemaFlag, "schema", schemaFlag, "The SQL table schema") - fs.StringVar(&schemaFileFlag, "schema-file", schemaFileFlag, "Identifies the file that contains the SQL table schema") - fs.StringVar(&vschemaFlag, "vschema", vschemaFlag, "Identifies the VTGate routing schema") - fs.StringVar(&vschemaFileFlag, "vschema-file", vschemaFileFlag, "Identifies the VTGate routing schema file") - fs.StringVar(&ksShardMapFlag, "ks-shard-map", ksShardMapFlag, "JSON map of keyspace name -> shard name -> ShardReference object. The inner map is the same as the output of FindAllShardsInKeyspace") - fs.StringVar(&ksShardMapFileFlag, "ks-shard-map-file", ksShardMapFileFlag, "File containing json blob of keyspace name -> shard name -> ShardReference object") - fs.StringVar(&replicationMode, "replication-mode", replicationMode, "The replication mode to simulate -- must be set to either ROW or STATEMENT") - fs.BoolVar(&normalize, "normalize", normalize, "Whether to enable vtgate normalization") - fs.StringVar(&dbName, "dbname", dbName, "Optional database target to override normal routing") - fs.StringVar(&plannerVersionStr, "planner-version", plannerVersionStr, "Sets the query planner version to use when generating the explain output. Valid values are V3 and Gen4. An empty value will use VTGate's default planner") - fs.IntVar(&numShards, "shards", numShards, "Number of shards per keyspace. Passing --ks-shard-map/--ks-shard-map-file causes this flag to be ignored.") - fs.StringVar(&executionMode, "execution-mode", executionMode, "The execution mode to simulate -- must be set to multi, legacy-autocommit, or twopc") - fs.StringVar(&outputMode, "output-mode", outputMode, "Output in human-friendly text or json") - - acl.RegisterFlags(fs) -} - -func init() { - servenv.OnParse(registerFlags) -} - -// getFileParam returns a string containing either flag is not "", -// or the content of the file named flagFile -func getFileParam(flag, flagFile, name string, required bool) (string, error) { - if flag != "" { - if flagFile != "" { - return "", fmt.Errorf("action requires only one of %v or %v-file", name, name) - } - return flag, nil - } - - if flagFile == "" { - if required { - return "", fmt.Errorf("action requires one of %v or %v-file", name, name) - } - - return "", nil - } - data, err := os.ReadFile(flagFile) - if err != nil { - return "", fmt.Errorf("cannot read file %v: %v", flagFile, err) - } - return string(data), nil -} - func main() { defer exit.RecoverAll() - defer logutil.Flush() - servenv.ParseFlags("vtexplain") - servenv.Init() - err := parseAndRun() - if err != nil { + if err := cli.Main.Execute(); err != nil { fmt.Printf("ERROR: %s\n", err) exit.Return(1) } } - -func parseAndRun() error { - plannerVersion, _ := plancontext.PlannerNameToVersion(plannerVersionStr) - if plannerVersionStr != "" && plannerVersion != querypb.ExecuteOptions_V3 && plannerVersion != querypb.ExecuteOptions_Gen4 { - return fmt.Errorf("invalid value specified for planner-version of '%s' -- valid values are V3 and Gen4 or an empty value to use the default planner", plannerVersionStr) - } - - sql, err := getFileParam(sqlFlag, sqlFileFlag, "sql", true) - if err != nil { - return err - } - - schema, err := getFileParam(schemaFlag, schemaFileFlag, "schema", true) - if err != nil { - return err - } - - vschema, err := getFileParam(vschemaFlag, vschemaFileFlag, "vschema", true) - if err != nil { - return err - } - - ksShardMap, err := getFileParam(ksShardMapFlag, ksShardMapFileFlag, "ks-shard-map", false) - if err != nil { - return err - } - - opts := &vtexplain.Options{ - ExecutionMode: executionMode, - PlannerVersion: plannerVersion, - ReplicationMode: replicationMode, - NumShards: numShards, - Normalize: normalize, - Target: dbName, - } - - vte, err := vtexplain.Init(vschema, schema, ksShardMap, opts) - if err != nil { - return err - } - defer vte.Stop() - - plans, err := vte.Run(sql) - if err != nil { - return err - } - - if outputMode == "text" { - fmt.Print(vte.ExplainsAsText(plans)) - } else { - fmt.Print(vtexplain.ExplainsAsJSON(plans)) - } - - return nil -} diff --git a/go/cmd/vtgate/cli/cli.go b/go/cmd/vtgate/cli/cli.go new file mode 100644 index 00000000000..9182bfcf9a4 --- /dev/null +++ b/go/cmd/vtgate/cli/cli.go @@ -0,0 +1,192 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreedto in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import ( + "context" + "fmt" + "strings" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/exit" + "vitess.io/vitess/go/vt/discovery" + "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/srvtopo" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/proto/vtrpc" +) + +var ( + cell string + tabletTypesToWait []topodatapb.TabletType + plannerName string + resilientServer *srvtopo.ResilientServer + + Main = &cobra.Command{ + Use: "vtgate", + Short: "VTGate is a stateless proxy responsible for accepting requests from applications and routing them to the appropriate tablet server(s) for query execution. It speaks both the MySQL Protocol and a gRPC protocol.", + Long: `VTGate is a stateless proxy responsible for accepting requests from applications and routing them to the appropriate tablet server(s) for query execution. It speaks both the MySQL Protocol and a gRPC protocol. + +### Key Options +` + + "\n* `--srv_topo_cache_ttl`: There may be instances where you will need to increase the cached TTL from the default of 1 second to a higher number:\n" + + ` * You may want to increase this option if you see that your topo leader goes down and keeps your queries waiting for a few seconds.`, + Example: `vtgate \ + --topo_implementation etcd2 \ + --topo_global_server_address localhost:2379 \ + --topo_global_root /vitess/global \ + --log_dir $VTDATAROOT/tmp \ + --port 15001 \ + --grpc_port 15991 \ + --mysql_server_port 15306 \ + --cell test \ + --cells_to_watch test \ + --tablet_types_to_wait PRIMARY,REPLICA \ + --service_map 'grpc-vtgateservice' \ + --pid_file $VTDATAROOT/tmp/vtgate.pid \ + --mysql_auth_server_impl none`, + Args: cobra.NoArgs, + Version: servenv.AppVersion.String(), + PreRunE: servenv.CobraPreRunE, + RunE: run, + } +) + +// CheckCellFlags will check validation of cell and cells_to_watch flag +// it will help to avoid strange behaviors when vtgate runs but actually does not work +func CheckCellFlags(ctx context.Context, serv srvtopo.Server, cell string, cellsToWatch string) error { + // topo check + var topoServer *topo.Server + if serv != nil { + var err error + topoServer, err = serv.GetTopoServer() + if err != nil { + return fmt.Errorf("Unable to create gateway: %w", err) + } + } else { + return fmt.Errorf("topo server cannot be nil") + } + cellsInTopo, err := topoServer.GetKnownCells(ctx) + if err != nil { + return err + } + if len(cellsInTopo) == 0 { + return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "topo server should have at least one cell") + } + + // cell valid check + if cell == "" { + return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cell flag must be set") + } + hasCell := false + for _, v := range cellsInTopo { + if v == cell { + hasCell = true + break + } + } + if !hasCell { + return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cell:[%v] does not exist in topo", cell) + } + + // cells_to_watch valid check + cells := make([]string, 0, 1) + for _, c := range strings.Split(cellsToWatch, ",") { + if c == "" { + continue + } + // cell should contained in cellsInTopo + if exists := topo.InCellList(c, cellsInTopo); !exists { + return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cell: [%v] is not valid. Available cells: [%v]", c, strings.Join(cellsInTopo, ",")) + } + cells = append(cells, c) + } + if len(cells) == 0 { + return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cells_to_watch flag cannot be empty") + } + + return nil +} + +func run(cmd *cobra.Command, args []string) error { + defer exit.Recover() + + servenv.Init() + + ts := topo.Open() + defer ts.Close() + + resilientServer = srvtopo.NewResilientServer(context.Background(), ts, "ResilientSrvTopoServer") + + tabletTypes := make([]topodatapb.TabletType, 0, 1) + for _, tt := range tabletTypesToWait { + if topoproto.IsServingType(tt) { + tabletTypes = append(tabletTypes, tt) + } + } + + if len(tabletTypes) == 0 { + return fmt.Errorf("tablet_types_to_wait must contain at least one serving tablet type") + } + + err := CheckCellFlags(context.Background(), resilientServer, cell, vtgate.CellsToWatch) + if err != nil { + return fmt.Errorf("cells_to_watch validation failed: %v", err) + } + + plannerVersion, _ := plancontext.PlannerNameToVersion(plannerName) + + // pass nil for HealthCheck and it will be created + vtg := vtgate.Init(context.Background(), nil, resilientServer, cell, tabletTypes, plannerVersion) + + servenv.OnRun(func() { + // Flags are parsed now. Parse the template using the actual flag value and overwrite the current template. + discovery.ParseTabletURLTemplateFromFlag() + addStatusParts(vtg) + }) + servenv.OnClose(func() { + _ = vtg.Gateway().Close(context.Background()) + }) + servenv.RunDefault() + + return nil +} + +func init() { + servenv.RegisterDefaultFlags() + servenv.RegisterFlags() + servenv.RegisterGRPCServerFlags() + servenv.RegisterGRPCServerAuthFlags() + servenv.RegisterServiceMapFlag() + + servenv.MoveFlagsToCobraCommand(Main) + + acl.RegisterFlags(Main.Flags()) + Main.Flags().StringVar(&cell, "cell", cell, "cell to use") + Main.Flags().Var((*topoproto.TabletTypeListFlag)(&tabletTypesToWait), "tablet_types_to_wait", "Wait till connected for specified tablet types during Gateway initialization. Should be provided as a comma-separated set of tablet types.") + Main.Flags().StringVar(&plannerName, "planner-version", plannerName, "Sets the default planner to use when the session has not changed it. Valid values are: Gen4, Gen4Greedy, Gen4Left2Right") + + Main.MarkFlagRequired("tablet_types_to_wait") +} diff --git a/go/cmd/vtgate/plugin_auth_clientcert.go b/go/cmd/vtgate/cli/plugin_auth_clientcert.go similarity index 98% rename from go/cmd/vtgate/plugin_auth_clientcert.go rename to go/cmd/vtgate/cli/plugin_auth_clientcert.go index 4f3d65ef626..1a1334e71ba 100644 --- a/go/cmd/vtgate/plugin_auth_clientcert.go +++ b/go/cmd/vtgate/cli/plugin_auth_clientcert.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports clientcert to register the client certificate implementation of AuthServer. diff --git a/go/cmd/vtgate/plugin_auth_ldap.go b/go/cmd/vtgate/cli/plugin_auth_ldap.go similarity index 98% rename from go/cmd/vtgate/plugin_auth_ldap.go rename to go/cmd/vtgate/cli/plugin_auth_ldap.go index 257f0742733..7dc5b246f72 100644 --- a/go/cmd/vtgate/plugin_auth_ldap.go +++ b/go/cmd/vtgate/cli/plugin_auth_ldap.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports ldapauthserver to register the LDAP implementation of AuthServer. diff --git a/go/cmd/vtgate/plugin_auth_static.go b/go/cmd/vtgate/cli/plugin_auth_static.go similarity index 98% rename from go/cmd/vtgate/plugin_auth_static.go rename to go/cmd/vtgate/cli/plugin_auth_static.go index 8e4a552cecf..9ffd60a79f2 100644 --- a/go/cmd/vtgate/plugin_auth_static.go +++ b/go/cmd/vtgate/cli/plugin_auth_static.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports staticauthserver to register the flat-file implementation of AuthServer. diff --git a/go/cmd/vtgate/plugin_auth_vault.go b/go/cmd/vtgate/cli/plugin_auth_vault.go similarity index 98% rename from go/cmd/vtgate/plugin_auth_vault.go rename to go/cmd/vtgate/cli/plugin_auth_vault.go index ca271b496ca..2aee32e3940 100644 --- a/go/cmd/vtgate/plugin_auth_vault.go +++ b/go/cmd/vtgate/cli/plugin_auth_vault.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports InitAuthServerVault to register the HashiCorp Vault implementation of AuthServer. diff --git a/go/cmd/topo2topo/plugin_consultopo.go b/go/cmd/vtgate/cli/plugin_consultopo.go similarity index 98% rename from go/cmd/topo2topo/plugin_consultopo.go rename to go/cmd/vtgate/cli/plugin_consultopo.go index 59d6774fdbc..a128f294a42 100644 --- a/go/cmd/topo2topo/plugin_consultopo.go +++ b/go/cmd/vtgate/cli/plugin_consultopo.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports consultopo to register the consul implementation of TopoServer. diff --git a/go/cmd/vttablet/plugin_etcd2topo.go b/go/cmd/vtgate/cli/plugin_etcd2topo.go similarity index 98% rename from go/cmd/vttablet/plugin_etcd2topo.go rename to go/cmd/vtgate/cli/plugin_etcd2topo.go index d99ef51d4af..5a51923cf00 100644 --- a/go/cmd/vttablet/plugin_etcd2topo.go +++ b/go/cmd/vtgate/cli/plugin_etcd2topo.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports etcd2topo to register the etcd2 implementation of TopoServer. diff --git a/go/cmd/vtgate/plugin_grpctabletconn.go b/go/cmd/vtgate/cli/plugin_grpctabletconn.go similarity index 98% rename from go/cmd/vtgate/plugin_grpctabletconn.go rename to go/cmd/vtgate/cli/plugin_grpctabletconn.go index 08291a7c916..4a97e36eec4 100644 --- a/go/cmd/vtgate/plugin_grpctabletconn.go +++ b/go/cmd/vtgate/cli/plugin_grpctabletconn.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the gRPC tabletconn client diff --git a/go/cmd/vtgate/plugin_grpcvtgateservice.go b/go/cmd/vtgate/cli/plugin_grpcvtgateservice.go similarity index 98% rename from go/cmd/vtgate/plugin_grpcvtgateservice.go rename to go/cmd/vtgate/cli/plugin_grpcvtgateservice.go index 4ee159710ca..bbbc6e3039e 100644 --- a/go/cmd/vtgate/plugin_grpcvtgateservice.go +++ b/go/cmd/vtgate/cli/plugin_grpcvtgateservice.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the gRPC vtgateservice server diff --git a/go/cmd/vtgate/plugin_opentracing.go b/go/cmd/vtgate/cli/plugin_opentracing.go similarity index 98% rename from go/cmd/vtgate/plugin_opentracing.go rename to go/cmd/vtgate/cli/plugin_opentracing.go index 9a6786d3d64..7ec15423f5a 100644 --- a/go/cmd/vtgate/plugin_opentracing.go +++ b/go/cmd/vtgate/cli/plugin_opentracing.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( "vitess.io/vitess/go/trace" diff --git a/go/cmd/vtgate/plugin_opentsdb.go b/go/cmd/vtgate/cli/plugin_opentsdb.go similarity index 98% rename from go/cmd/vtgate/plugin_opentsdb.go rename to go/cmd/vtgate/cli/plugin_opentsdb.go index 0988f3b9a64..37c81f271c9 100644 --- a/go/cmd/vtgate/plugin_opentsdb.go +++ b/go/cmd/vtgate/cli/plugin_opentsdb.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports opentsdb to register the opentsdb stats backend. diff --git a/go/cmd/vtgate/plugin_prometheusbackend.go b/go/cmd/vtgate/cli/plugin_prometheusbackend.go similarity index 98% rename from go/cmd/vtgate/plugin_prometheusbackend.go rename to go/cmd/vtgate/cli/plugin_prometheusbackend.go index 6bffd133332..a1797abdcd1 100644 --- a/go/cmd/vtgate/plugin_prometheusbackend.go +++ b/go/cmd/vtgate/cli/plugin_prometheusbackend.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports Prometheus to allow for instrumentation // with the Prometheus client library diff --git a/go/cmd/vtgate/cli/plugin_statsd.go b/go/cmd/vtgate/cli/plugin_statsd.go new file mode 100644 index 00000000000..fc42fa4f447 --- /dev/null +++ b/go/cmd/vtgate/cli/plugin_statsd.go @@ -0,0 +1,23 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import "vitess.io/vitess/go/stats/statsd" + +func init() { + statsd.Init("vtgate") +} diff --git a/go/cmd/vtgate/plugin_zk2topo.go b/go/cmd/vtgate/cli/plugin_zk2topo.go similarity index 98% rename from go/cmd/vtgate/plugin_zk2topo.go rename to go/cmd/vtgate/cli/plugin_zk2topo.go index d75a1c6bcb4..1870a3b2bb3 100644 --- a/go/cmd/vtgate/plugin_zk2topo.go +++ b/go/cmd/vtgate/cli/plugin_zk2topo.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( // Imports and register the zk2 TopologyServer diff --git a/go/cmd/vtgate/status.go b/go/cmd/vtgate/cli/status.go similarity index 96% rename from go/cmd/vtgate/status.go rename to go/cmd/vtgate/cli/status.go index 436a1301438..2fdab073d5a 100644 --- a/go/cmd/vtgate/status.go +++ b/go/cmd/vtgate/cli/status.go @@ -14,13 +14,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/srvtopo" - _ "vitess.io/vitess/go/vt/status" "vitess.io/vitess/go/vt/vtgate" ) diff --git a/go/cmd/vtgate/docgen/main.go b/go/cmd/vtgate/docgen/main.go new file mode 100644 index 00000000000..763d38b7e7b --- /dev/null +++ b/go/cmd/vtgate/docgen/main.go @@ -0,0 +1,42 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "vitess.io/vitess/go/cmd/internal/docgen" + "vitess.io/vitess/go/cmd/vtgate/cli" +) + +func main() { + var dir string + cmd := cobra.Command{ + Use: "docgen [-d ]", + RunE: func(cmd *cobra.Command, args []string) error { + return docgen.GenerateMarkdownTree(cli.Main, dir) + }, + } + + // Here because we inadvertently transfer the required "tablet-types-to-wait" + // flag during vtgate/cli's init func. + pflag.CommandLine = cmd.Flags() + + cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation") + _ = cmd.Execute() +} diff --git a/go/cmd/vtgate/plugin_kubernetestopo.go b/go/cmd/vtgate/plugin_kubernetestopo.go deleted file mode 100644 index 671d0c8321f..00000000000 --- a/go/cmd/vtgate/plugin_kubernetestopo.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -// This plugin imports k8stopo to register the kubernetes implementation of TopoServer. - -import ( - _ "vitess.io/vitess/go/vt/topo/k8stopo" -) diff --git a/go/cmd/vtgate/plugin_statsd.go b/go/cmd/vtgate/plugin_statsd.go deleted file mode 100644 index ae2ecb5b2e0..00000000000 --- a/go/cmd/vtgate/plugin_statsd.go +++ /dev/null @@ -1,7 +0,0 @@ -package main - -import "vitess.io/vitess/go/stats/statsd" - -func init() { - statsd.Init("vtgate") -} diff --git a/go/cmd/vtgate/vtgate.go b/go/cmd/vtgate/vtgate.go index f4fc21000a2..fd81fe85a68 100644 --- a/go/cmd/vtgate/vtgate.go +++ b/go/cmd/vtgate/vtgate.go @@ -17,153 +17,12 @@ limitations under the License. package main import ( - "context" - "math/rand" - "strings" - "time" - - "github.com/spf13/pflag" - - "vitess.io/vitess/go/acl" - "vitess.io/vitess/go/exit" - "vitess.io/vitess/go/vt/discovery" + "vitess.io/vitess/go/cmd/vtgate/cli" "vitess.io/vitess/go/vt/log" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/srvtopo" - "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/topo/topoproto" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) -var ( - cell = "" - tabletTypesToWait []topodatapb.TabletType - plannerName string -) - -func registerFlags(fs *pflag.FlagSet) { - fs.StringVar(&cell, "cell", cell, "cell to use") - fs.Var((*topoproto.TabletTypeListFlag)(&tabletTypesToWait), "tablet_types_to_wait", "Wait till connected for specified tablet types during Gateway initialization. Should be provided as a comma-separated set of tablet types.") - fs.StringVar(&plannerName, "planner-version", plannerName, "Sets the default planner to use when the session has not changed it. Valid values are: V3, V3Insert, Gen4, Gen4Greedy and Gen4Fallback. Gen4Fallback tries the gen4 planner and falls back to the V3 planner if the gen4 fails.") - - acl.RegisterFlags(fs) -} - -var resilientServer *srvtopo.ResilientServer - -func init() { - rand.Seed(time.Now().UnixNano()) - servenv.RegisterDefaultFlags() - servenv.RegisterFlags() - servenv.RegisterGRPCServerFlags() - servenv.RegisterGRPCServerAuthFlags() - servenv.RegisterServiceMapFlag() - servenv.OnParse(registerFlags) -} - -// CheckCellFlags will check validation of cell and cells_to_watch flag -// it will help to avoid strange behaviors when vtgate runs but actually does not work -func CheckCellFlags(ctx context.Context, serv srvtopo.Server, cell string, cellsToWatch string) error { - // topo check - var topoServer *topo.Server - if serv != nil { - var err error - topoServer, err = serv.GetTopoServer() - if err != nil { - log.Exitf("Unable to create gateway: %v", err) - } - } else { - log.Exitf("topo server cannot be nil") - } - cellsInTopo, err := topoServer.GetKnownCells(ctx) - if err != nil { - return err - } - if len(cellsInTopo) == 0 { - return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "topo server should have at least one cell") - } - - // cell valid check - if cell == "" { - return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cell flag must be set") - } - hasCell := false - for _, v := range cellsInTopo { - if v == cell { - hasCell = true - break - } - } - if !hasCell { - return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cell:[%v] does not exist in topo", cell) - } - - // cells_to_watch valid check - cells := make([]string, 0, 1) - for _, c := range strings.Split(cellsToWatch, ",") { - if c == "" { - continue - } - // cell should contained in cellsInTopo - if exists := topo.InCellList(c, cellsInTopo); !exists { - return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cell: [%v] is not valid. Available cells: [%v]", c, strings.Join(cellsInTopo, ",")) - } - cells = append(cells, c) - } - if len(cells) == 0 { - return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cells_to_watch flag cannot be empty") - } - - return nil -} - func main() { - defer exit.Recover() - - servenv.ParseFlags("vtgate") - servenv.Init() - - ts := topo.Open() - defer ts.Close() - - resilientServer = srvtopo.NewResilientServer(ts, "ResilientSrvTopoServer") - - tabletTypes := make([]topodatapb.TabletType, 0, 1) - if len(tabletTypesToWait) != 0 { - for _, tt := range tabletTypesToWait { - if topoproto.IsServingType(tt) { - tabletTypes = append(tabletTypes, tt) - } - } - } else { - log.Exitf("tablet_types_to_wait flag must be set") - } - - if len(tabletTypes) == 0 { - log.Exitf("tablet_types_to_wait should contain at least one serving tablet type") - } - - err := CheckCellFlags(context.Background(), resilientServer, cell, vtgate.CellsToWatch) - if err != nil { - log.Exitf("cells_to_watch validation failed: %v", err) + if err := cli.Main.Execute(); err != nil { + log.Exit(err) } - - plannerVersion, _ := plancontext.PlannerNameToVersion(plannerName) - - // pass nil for HealthCheck and it will be created - vtg := vtgate.Init(context.Background(), nil, resilientServer, cell, tabletTypes, plannerVersion) - - servenv.OnRun(func() { - // Flags are parsed now. Parse the template using the actual flag value and overwrite the current template. - discovery.ParseTabletURLTemplateFromFlag() - addStatusParts(vtg) - }) - servenv.OnClose(func() { - _ = vtg.Gateway().Close(context.Background()) - }) - servenv.RunDefault() } diff --git a/go/cmd/vtgateclienttest/cli/main.go b/go/cmd/vtgateclienttest/cli/main.go new file mode 100644 index 00000000000..a30cebe418d --- /dev/null +++ b/go/cmd/vtgateclienttest/cli/main.go @@ -0,0 +1,64 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cli is the implementation of vtgateclienttest. +// This program has a chain of vtgateservice.VTGateService implementations, +// each one being responsible for one test scenario. +package cli + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/cmd/vtgateclienttest/services" + "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/vtgate" +) + +var Main = &cobra.Command{ + Use: "vtgateclienttest", + Short: "vtgateclienttest is a chain of vtgateservice.VTGateService implementations, each one being responsible for one test scenario.", + Args: cobra.NoArgs, + PreRunE: servenv.CobraPreRunE, + RunE: run, +} + +func init() { + servenv.RegisterDefaultFlags() + servenv.RegisterFlags() + servenv.RegisterGRPCServerFlags() + servenv.RegisterGRPCServerAuthFlags() + servenv.RegisterServiceMapFlag() + + servenv.MoveFlagsToCobraCommand(Main) + + acl.RegisterFlags(Main.Flags()) +} + +func run(cmd *cobra.Command, args []string) error { + servenv.Init() + + // The implementation chain. + servenv.OnRun(func() { + s := services.CreateServices() + for _, f := range vtgate.RegisterVTGates { + f(s) + } + }) + + servenv.RunDefault() + return nil +} diff --git a/go/cmd/vtgateclienttest/plugin_grpcvtgateservice.go b/go/cmd/vtgateclienttest/cli/plugin_grpcvtgateservice.go similarity index 98% rename from go/cmd/vtgateclienttest/plugin_grpcvtgateservice.go rename to go/cmd/vtgateclienttest/cli/plugin_grpcvtgateservice.go index 4ee159710ca..bbbc6e3039e 100644 --- a/go/cmd/vtgateclienttest/plugin_grpcvtgateservice.go +++ b/go/cmd/vtgateclienttest/cli/plugin_grpcvtgateservice.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the gRPC vtgateservice server diff --git a/go/cmd/vtgateclienttest/docgen/main.go b/go/cmd/vtgateclienttest/docgen/main.go new file mode 100644 index 00000000000..3a18cd6feeb --- /dev/null +++ b/go/cmd/vtgateclienttest/docgen/main.go @@ -0,0 +1,37 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/internal/docgen" + "vitess.io/vitess/go/cmd/vtgateclienttest/cli" +) + +func main() { + var dir string + cmd := cobra.Command{ + Use: "docgen [-d ]", + RunE: func(cmd *cobra.Command, args []string) error { + return docgen.GenerateMarkdownTree(cli.Main, dir) + }, + } + + cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation") + _ = cmd.Execute() +} diff --git a/go/cmd/vtgateclienttest/main.go b/go/cmd/vtgateclienttest/main.go index 2623ab84893..313b27de04a 100644 --- a/go/cmd/vtgateclienttest/main.go +++ b/go/cmd/vtgateclienttest/main.go @@ -14,46 +14,18 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package main is the implementation of vtgateclienttest. -// This program has a chain of vtgateservice.VTGateService implementations, -// each one being responsible for one test scenario. package main import ( - "github.com/spf13/pflag" - - "vitess.io/vitess/go/acl" - "vitess.io/vitess/go/cmd/vtgateclienttest/services" + "vitess.io/vitess/go/cmd/vtgateclienttest/cli" "vitess.io/vitess/go/exit" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/vtgate" + "vitess.io/vitess/go/vt/log" ) -func init() { - servenv.RegisterDefaultFlags() - servenv.RegisterFlags() - servenv.RegisterGRPCServerFlags() - servenv.RegisterGRPCServerAuthFlags() - servenv.RegisterServiceMapFlag() - - servenv.OnParse(func(fs *pflag.FlagSet) { - acl.RegisterFlags(fs) - }) -} - func main() { defer exit.Recover() - servenv.ParseFlags("vtgateclienttest") - servenv.Init() - - // The implementation chain. - servenv.OnRun(func() { - s := services.CreateServices() - for _, f := range vtgate.RegisterVTGates { - f(s) - } - }) - - servenv.RunDefault() + if err := cli.Main.Execute(); err != nil { + log.Exitf("%s", err) + } } diff --git a/go/cmd/vtgateclienttest/services/callerid.go b/go/cmd/vtgateclienttest/services/callerid.go index d5334e2272a..db3fa07acf5 100644 --- a/go/cmd/vtgateclienttest/services/callerid.go +++ b/go/cmd/vtgateclienttest/services/callerid.go @@ -17,23 +17,21 @@ limitations under the License. package services import ( + "context" "encoding/json" "fmt" "strings" "vitess.io/vitess/go/mysql" - "context" - "google.golang.org/protobuf/proto" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/callerid" - "vitess.io/vitess/go/vt/vtgate/vtgateservice" - querypb "vitess.io/vitess/go/vt/proto/query" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vtgate/vtgateservice" ) // CallerIDPrefix is the prefix to send with queries so they go @@ -79,11 +77,11 @@ func (c *callerIDClient) checkCallerID(ctx context.Context, received string) (bo return true, fmt.Errorf("SUCCESS: callerid matches") } -func (c *callerIDClient) Execute(ctx context.Context, conn *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) { +func (c *callerIDClient) Execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, conn *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) { if ok, err := c.checkCallerID(ctx, sql); ok { return session, nil, err } - return c.fallbackClient.Execute(ctx, session, sql, bindVariables) + return c.fallbackClient.Execute(ctx, mysqlCtx, conn, session, sql, bindVariables) } func (c *callerIDClient) ExecuteBatch(ctx context.Context, conn *mysql.Conn, session *vtgatepb.Session, sqlList []string, bindVariablesList []map[string]*querypb.BindVariable) (*vtgatepb.Session, []sqltypes.QueryResponse, error) { @@ -95,9 +93,9 @@ func (c *callerIDClient) ExecuteBatch(ctx context.Context, conn *mysql.Conn, ses return c.fallbackClient.ExecuteBatch(ctx, session, sqlList, bindVariablesList) } -func (c *callerIDClient) StreamExecute(ctx context.Context, conn *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) { +func (c *callerIDClient) StreamExecute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, conn *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) { if ok, err := c.checkCallerID(ctx, sql); ok { return session, err } - return c.fallbackClient.StreamExecute(ctx, session, sql, bindVariables, callback) + return c.fallbackClient.StreamExecute(ctx, mysqlCtx, nil, session, sql, bindVariables, callback) } diff --git a/go/cmd/vtgateclienttest/services/echo.go b/go/cmd/vtgateclienttest/services/echo.go index 27b77cda3d8..9b9f12c039b 100644 --- a/go/cmd/vtgateclienttest/services/echo.go +++ b/go/cmd/vtgateclienttest/services/echo.go @@ -100,7 +100,7 @@ func echoQueryResult(vals map[string]any) *sqltypes.Result { return qr } -func (c *echoClient) Execute(ctx context.Context, conn *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) { +func (c *echoClient) Execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, conn *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) { if strings.HasPrefix(sql, EchoPrefix) { return session, echoQueryResult(map[string]any{ "callerId": callerid.EffectiveCallerIDFromContext(ctx), @@ -109,10 +109,10 @@ func (c *echoClient) Execute(ctx context.Context, conn *mysql.Conn, session *vtg "session": session, }), nil } - return c.fallbackClient.Execute(ctx, session, sql, bindVariables) + return c.fallbackClient.Execute(ctx, mysqlCtx, conn, session, sql, bindVariables) } -func (c *echoClient) StreamExecute(ctx context.Context, conn *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) { +func (c *echoClient) StreamExecute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, conn *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) { if strings.HasPrefix(sql, EchoPrefix) { callback(echoQueryResult(map[string]any{ "callerId": callerid.EffectiveCallerIDFromContext(ctx), @@ -122,7 +122,7 @@ func (c *echoClient) StreamExecute(ctx context.Context, conn *mysql.Conn, sessio })) return session, nil } - return c.fallbackClient.StreamExecute(ctx, session, sql, bindVariables, callback) + return c.fallbackClient.StreamExecute(ctx, mysqlCtx, conn, session, sql, bindVariables, callback) } func (c *echoClient) ExecuteBatch(ctx context.Context, conn *mysql.Conn, session *vtgatepb.Session, sqlList []string, bindVariablesList []map[string]*querypb.BindVariable) (*vtgatepb.Session, []sqltypes.QueryResponse, error) { diff --git a/go/cmd/vtgateclienttest/services/errors.go b/go/cmd/vtgateclienttest/services/errors.go index d62c87ec776..611acbe6b8e 100644 --- a/go/cmd/vtgateclienttest/services/errors.go +++ b/go/cmd/vtgateclienttest/services/errors.go @@ -113,14 +113,14 @@ func trimmedRequestToError(received string) error { } } -func (c *errorClient) Execute(ctx context.Context, conn *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) { +func (c *errorClient) Execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, conn *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) { if err := requestToPartialError(sql, session); err != nil { return session, nil, err } if err := requestToError(sql); err != nil { return session, nil, err } - return c.fallbackClient.Execute(ctx, session, sql, bindVariables) + return c.fallbackClient.Execute(ctx, mysqlCtx, conn, session, sql, bindVariables) } func (c *errorClient) ExecuteBatch(ctx context.Context, conn *mysql.Conn, session *vtgatepb.Session, sqlList []string, bindVariablesList []map[string]*querypb.BindVariable) (*vtgatepb.Session, []sqltypes.QueryResponse, error) { @@ -135,11 +135,11 @@ func (c *errorClient) ExecuteBatch(ctx context.Context, conn *mysql.Conn, sessio return c.fallbackClient.ExecuteBatch(ctx, session, sqlList, bindVariablesList) } -func (c *errorClient) StreamExecute(ctx context.Context, conn *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) { +func (c *errorClient) StreamExecute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, conn *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) { if err := requestToError(sql); err != nil { return session, err } - return c.fallbackClient.StreamExecute(ctx, session, sql, bindVariables, callback) + return c.fallbackClient.StreamExecute(ctx, mysqlCtx, conn, session, sql, bindVariables, callback) } func (c *errorClient) Prepare(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, []*querypb.Field, error) { diff --git a/go/cmd/vtgateclienttest/services/fallback.go b/go/cmd/vtgateclienttest/services/fallback.go index 08665e0e2eb..1892c8943f4 100644 --- a/go/cmd/vtgateclienttest/services/fallback.go +++ b/go/cmd/vtgateclienttest/services/fallback.go @@ -19,6 +19,8 @@ package services import ( "context" + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/vtgate/vtgateservice" @@ -40,16 +42,16 @@ func newFallbackClient(fallback vtgateservice.VTGateService) fallbackClient { return fallbackClient{fallback: fallback} } -func (c fallbackClient) Execute(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) { - return c.fallback.Execute(ctx, nil, session, sql, bindVariables) +func (c fallbackClient) Execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, conn *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) { + return c.fallback.Execute(ctx, mysqlCtx, nil, session, sql, bindVariables) } func (c fallbackClient) ExecuteBatch(ctx context.Context, session *vtgatepb.Session, sqlList []string, bindVariablesList []map[string]*querypb.BindVariable) (*vtgatepb.Session, []sqltypes.QueryResponse, error) { return c.fallback.ExecuteBatch(ctx, nil, session, sqlList, bindVariablesList) } -func (c fallbackClient) StreamExecute(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) { - return c.fallback.StreamExecute(ctx, nil, session, sql, bindVariables, callback) +func (c fallbackClient) StreamExecute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, conn *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) { + return c.fallback.StreamExecute(ctx, mysqlCtx, nil, session, sql, bindVariables, callback) } func (c fallbackClient) Prepare(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, []*querypb.Field, error) { diff --git a/go/cmd/vtgateclienttest/services/terminal.go b/go/cmd/vtgateclienttest/services/terminal.go index d3ca1f49902..0ce1375bb27 100644 --- a/go/cmd/vtgateclienttest/services/terminal.go +++ b/go/cmd/vtgateclienttest/services/terminal.go @@ -17,12 +17,13 @@ limitations under the License. package services import ( + "context" "errors" "fmt" "vitess.io/vitess/go/mysql" - "context" + "vitess.io/vitess/go/vt/vtgate/vtgateservice" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/tb" @@ -44,7 +45,7 @@ func newTerminalClient() *terminalClient { return &terminalClient{} } -func (c *terminalClient) Execute(ctx context.Context, conn *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) { +func (c *terminalClient) Execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, conn *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) { if sql == "quit://" { log.Fatal("Received quit:// query. Going down.") } @@ -60,7 +61,7 @@ func (c *terminalClient) ExecuteBatch(ctx context.Context, conn *mysql.Conn, ses return session, nil, errTerminal } -func (c *terminalClient) StreamExecute(ctx context.Context, conn *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) { +func (c *terminalClient) StreamExecute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, conn *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) { return session, errTerminal } diff --git a/go/cmd/vtgr/main.go b/go/cmd/vtgr/main.go deleted file mode 100644 index bc403f2aa67..00000000000 --- a/go/cmd/vtgr/main.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "context" - "fmt" - - "github.com/spf13/pflag" - - "vitess.io/vitess/go/acl" - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/vtgr" -) - -const deprecationMsg = "vtgr is deprecated and will be removed in Vitess 18. We recommend using VTOrc with semi-sync replication instead." - -func main() { - fmt.Println(deprecationMsg) - - var clustersToWatch []string - servenv.OnParseFor("vtgr", func(fs *pflag.FlagSet) { - fs.StringSliceVar(&clustersToWatch, "clusters_to_watch", nil, `Comma-separated list of keyspaces or keyspace/shards that this instance will monitor and repair. Defaults to all clusters in the topology. Example: "ks1,ks2/-80"`) - - acl.RegisterFlags(fs) - }) - servenv.ParseFlags("vtgr") - - log.Warning(deprecationMsg) - - // openTabletDiscovery will open up a connection to topo server - // and populate the tablets in memory - vtgr := vtgr.OpenTabletDiscovery(context.Background(), nil, clustersToWatch) - vtgr.RefreshCluster() - vtgr.ScanAndRepair() - - // block here so that we don't exit directly - select {} -} diff --git a/go/cmd/vtorc/cli/cli.go b/go/cmd/vtorc/cli/cli.go new file mode 100644 index 00000000000..f521ae05e57 --- /dev/null +++ b/go/cmd/vtorc/cli/cli.go @@ -0,0 +1,103 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/vtorc/config" + "vitess.io/vitess/go/vt/vtorc/inst" + "vitess.io/vitess/go/vt/vtorc/logic" + "vitess.io/vitess/go/vt/vtorc/server" +) + +var ( + configFile string + Main = &cobra.Command{ + Use: "vtorc", + Short: "VTOrc is the automated fault detection and repair tool in Vitess.", + Example: `vtorc \ + --topo_implementation etcd2 \ + --topo_global_server_address localhost:2379 \ + --topo_global_root /vitess/global \ + --log_dir $VTDATAROOT/tmp \ + --port 15000 \ + --recovery-period-block-duration "10m" \ + --instance-poll-time "1s" \ + --topo-information-refresh-duration "30s" \ + --alsologtostderr`, + Args: cobra.NoArgs, + Version: servenv.AppVersion.String(), + PreRunE: servenv.CobraPreRunE, + Run: run, + } +) + +func run(cmd *cobra.Command, args []string) { + servenv.Init() + config.UpdateConfigValuesFromFlags() + inst.RegisterStats() + + log.Info("starting vtorc") + if len(configFile) > 0 { + config.ForceRead(configFile) + } else { + config.Read("/etc/vtorc.conf.json", "conf/vtorc.conf.json", "vtorc.conf.json") + } + if config.Config.AuditToSyslog { + inst.EnableAuditSyslog() + } + config.MarkConfigurationLoaded() + + // Log final config values to debug if something goes wrong. + config.LogConfigValues() + server.StartVTOrcDiscovery() + + server.RegisterVTOrcAPIEndpoints() + servenv.OnRun(func() { + addStatusParts() + }) + + // For backward compatability, we require that VTOrc functions even when the --port flag is not provided. + // In this case, it should function like before but without the servenv pages. + // Therefore, currently we don't check for the --port flag to be necessary, but release 16+ that check + // can be added to always have the serenv page running in VTOrc. + servenv.RunDefault() +} + +// addStatusParts adds UI parts to the /debug/status page of VTOrc +func addStatusParts() { + servenv.AddStatusPart("Recent Recoveries", logic.TopologyRecoveriesTemplate, func() any { + recoveries, _ := logic.ReadRecentRecoveries(false, 0) + return recoveries + }) +} + +func init() { + servenv.RegisterDefaultFlags() + servenv.RegisterFlags() + + servenv.MoveFlagsToCobraCommand(Main) + + logic.RegisterFlags(Main.Flags()) + config.RegisterFlags(Main.Flags()) + acl.RegisterFlags(Main.Flags()) + Main.Flags().StringVar(&configFile, "config", "", "config file name") +} diff --git a/go/cmd/vtgate/plugin_consultopo.go b/go/cmd/vtorc/cli/plugin_consultopo.go similarity index 98% rename from go/cmd/vtgate/plugin_consultopo.go rename to go/cmd/vtorc/cli/plugin_consultopo.go index 59d6774fdbc..a128f294a42 100644 --- a/go/cmd/vtgate/plugin_consultopo.go +++ b/go/cmd/vtorc/cli/plugin_consultopo.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports consultopo to register the consul implementation of TopoServer. diff --git a/go/cmd/vtgate/plugin_etcd2topo.go b/go/cmd/vtorc/cli/plugin_etcd2topo.go similarity index 98% rename from go/cmd/vtgate/plugin_etcd2topo.go rename to go/cmd/vtorc/cli/plugin_etcd2topo.go index d99ef51d4af..5a51923cf00 100644 --- a/go/cmd/vtgate/plugin_etcd2topo.go +++ b/go/cmd/vtorc/cli/plugin_etcd2topo.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports etcd2topo to register the etcd2 implementation of TopoServer. diff --git a/go/cmd/vtctld/plugin_grpctmclient.go b/go/cmd/vtorc/cli/plugin_grpctmclient.go similarity index 98% rename from go/cmd/vtctld/plugin_grpctmclient.go rename to go/cmd/vtorc/cli/plugin_grpctmclient.go index ce554da96df..8cd349c7f87 100644 --- a/go/cmd/vtctld/plugin_grpctmclient.go +++ b/go/cmd/vtorc/cli/plugin_grpctmclient.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the gRPC tabletmanager client diff --git a/go/cmd/vtorc/plugin_prometheusbackend.go b/go/cmd/vtorc/cli/plugin_prometheusbackend.go similarity index 98% rename from go/cmd/vtorc/plugin_prometheusbackend.go rename to go/cmd/vtorc/cli/plugin_prometheusbackend.go index 868e097ade2..8cb6e034d8a 100644 --- a/go/cmd/vtorc/plugin_prometheusbackend.go +++ b/go/cmd/vtorc/cli/plugin_prometheusbackend.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports Prometheus to allow for instrumentation // with the Prometheus client library diff --git a/go/cmd/vtorc/plugin_zk2topo.go b/go/cmd/vtorc/cli/plugin_zk2topo.go similarity index 98% rename from go/cmd/vtorc/plugin_zk2topo.go rename to go/cmd/vtorc/cli/plugin_zk2topo.go index ebf385ec1af..d71a7e2e196 100644 --- a/go/cmd/vtorc/plugin_zk2topo.go +++ b/go/cmd/vtorc/cli/plugin_zk2topo.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the zk2 TopologyServer diff --git a/go/cmd/vtorc/docgen/main.go b/go/cmd/vtorc/docgen/main.go new file mode 100644 index 00000000000..22daccab302 --- /dev/null +++ b/go/cmd/vtorc/docgen/main.go @@ -0,0 +1,37 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/internal/docgen" + "vitess.io/vitess/go/cmd/vtorc/cli" +) + +func main() { + var dir string + cmd := cobra.Command{ + Use: "docgen [-d ]", + RunE: func(cmd *cobra.Command, args []string) error { + return docgen.GenerateMarkdownTree(cli.Main, dir) + }, + } + + cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation") + _ = cmd.Execute() +} diff --git a/go/cmd/vtorc/main.go b/go/cmd/vtorc/main.go index c80f0573948..be6dfbb84c6 100644 --- a/go/cmd/vtorc/main.go +++ b/go/cmd/vtorc/main.go @@ -17,119 +17,17 @@ package main import ( - "strings" - _ "github.com/go-sql-driver/mysql" - "github.com/spf13/pflag" _ "modernc.org/sqlite" - "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/cmd/vtorc/cli" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/vtorc/config" - "vitess.io/vitess/go/vt/vtorc/inst" - "vitess.io/vitess/go/vt/vtorc/logic" - "vitess.io/vitess/go/vt/vtorc/server" ) -// transformArgsForPflag turns a slice of raw args passed on the command line, -// possibly incompatible with pflag (because the user is expecting stdlib flag -// parsing behavior) and transforms them into the arguments that should have -// been passed to conform to pflag parsing behavior. -// -// the primary function is to catch any cases where the user specified a longopt -// with only a single hyphen (e.g. `-myflag`) and correct it to be -// double-hyphenated. -// -// note that this transformation does _not_ actually validate the arguments; for -// example if the user specifies `--myflag`, but the FlagSet has no such flag -// defined, that will still appear in the returned result and will (correctly) -// cause a parse error later on in `main`, at which point the CLI usage will -// be printed. -// -// note also that this transformation is incomplete. pflag allows interspersing -// of flag and positional arguments, whereas stdlib flag does not. however, for -// vtorc specifically, with the exception of `vtorc help `, the CLI only -// consumes flag arguments (in other words, there are no supported subcommands), -// so this is a non-issue, and is not implemented here in order to make this -// function a bit simpler. -func transformArgsForPflag(fs *pflag.FlagSet, args []string) (result []string) { - for i, arg := range args { - switch { - case arg == "--": - // pflag stops parsing at `--`, so we're done transforming the CLI - // arguments. Just append everything remaining and be done. - result = append(result, args[i:]...) - return result - case strings.HasPrefix(arg, "--"): - // Long-hand flag. Append it and continue. - result = append(result, arg) - case strings.HasPrefix(arg, "-"): - // Most complex case. This is either: - // 1. A legacy long-hand flag that needs a double-dash (e.g. `-myflag` => `--myflag`). - // 2. One _or more_ pflag shortopts all shoved together (think `rm -rf` as `rm -r -f`). - // - // In the latter case, we don't need to do any transformations, but - // in the former, we do. - name := strings.SplitN(arg[1:], "=", 2)[0] // discard any potential value (`-myflag` and `-myflag=10` both have the name of `myflag`) - if fs.Lookup(name) != nil || name == "help" { - // Case 1: We have a long opt with this name, so we need to - // prepend an additional hyphen. - result = append(result, "-"+arg) - } else { - // Case 2: No transformation needed. - result = append(result, arg) - } - default: - // Just a flag argument. Nothing to transform. - result = append(result, arg) - } - } - - return result -} - // main is the application's entry point. It will spawn an HTTP interface. func main() { - servenv.RegisterDefaultFlags() - servenv.RegisterFlags() - - var configFile string - servenv.OnParseFor("vtorc", func(fs *pflag.FlagSet) { - logic.RegisterFlags(fs) - server.RegisterFlags(fs) - config.RegisterFlags(fs) - acl.RegisterFlags(fs) - - fs.StringVar(&configFile, "config", "", "config file name") - }) - servenv.ParseFlags("vtorc") - servenv.Init() - config.UpdateConfigValuesFromFlags() - - log.Info("starting vtorc") - if len(configFile) > 0 { - config.ForceRead(configFile) - } else { - config.Read("/etc/vtorc.conf.json", "conf/vtorc.conf.json", "vtorc.conf.json") - } - if config.Config.AuditToSyslog { - inst.EnableAuditSyslog() + // TODO: viperutil.BindFlags() + if err := cli.Main.Execute(); err != nil { + log.Exit(err) } - config.MarkConfigurationLoaded() - - // Log final config values to debug if something goes wrong. - config.LogConfigValues() - server.StartVTOrcDiscovery() - - server.RegisterVTOrcAPIEndpoints() - servenv.OnRun(func() { - addStatusParts() - }) - - // For backward compatability, we require that VTOrc functions even when the --port flag is not provided. - // In this case, it should function like before but without the servenv pages. - // Therefore, currently we don't check for the --port flag to be necessary, but release 16+ that check - // can be added to always have the serenv page running in VTOrc. - servenv.RunDefault() } diff --git a/go/cmd/vtorc/main_test.go b/go/cmd/vtorc/main_test.go deleted file mode 100644 index 5bbdcdaf981..00000000000 --- a/go/cmd/vtorc/main_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package main - -import ( - "strings" - "testing" - - "github.com/spf13/pflag" - "github.com/stretchr/testify/assert" -) - -func Test_transformArgsForPflag(t *testing.T) { - fs := pflag.NewFlagSet("test", pflag.ContinueOnError) - fs.String("foobar", "baz", "") - fs.StringP("name", "n", "", "") - fs.BoolP("debug", "d", true, "") - - tests := []struct { - args []string - transformed []string - }{ - { - args: []string{"--foobar=hello", "--name", "myname", "-d"}, - transformed: []string{"--foobar=hello", "--name", "myname", "-d"}, - }, - { - args: []string{"-foobar=hello", "-name", "myname", "-d"}, - transformed: []string{"--foobar=hello", "--name", "myname", "-d"}, - }, - { - args: []string{"--", "-foobar=hello"}, - transformed: []string{"--", "-foobar=hello"}, - }, - { - args: []string{"-dn"}, // combined shortopts - transformed: []string{"-dn"}, - }, - } - - for _, tt := range tests { - tt := tt - name := strings.Join(tt.args, " ") - - t.Run(name, func(t *testing.T) { - got := transformArgsForPflag(fs, tt.args) - assert.Equal(t, tt.transformed, got) - }) - } -} diff --git a/go/cmd/vtorc/plugin_kubernetestopo.go b/go/cmd/vtorc/plugin_kubernetestopo.go deleted file mode 100644 index 671d0c8321f..00000000000 --- a/go/cmd/vtorc/plugin_kubernetestopo.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -// This plugin imports k8stopo to register the kubernetes implementation of TopoServer. - -import ( - _ "vitess.io/vitess/go/vt/topo/k8stopo" -) diff --git a/go/cmd/vttablet/cli/cli.go b/go/cmd/vttablet/cli/cli.go new file mode 100644 index 00000000000..1efa35613d7 --- /dev/null +++ b/go/cmd/vttablet/cli/cli.go @@ -0,0 +1,276 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreedto in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import ( + "bytes" + "context" + "fmt" + "os" + "time" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/vt/binlog" + "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/tableacl" + "vitess.io/vitess/go/vt/tableacl/simpleacl" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vttablet/onlineddl" + "vitess.io/vitess/go/vt/vttablet/tabletmanager" + "vitess.io/vitess/go/vt/vttablet/tabletmanager/vdiff" + "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" + "vitess.io/vitess/go/vt/vttablet/tabletserver" + "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" + "vitess.io/vitess/go/yaml2" + "vitess.io/vitess/resources" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" +) + +var ( + enforceTableACLConfig bool + tableACLConfig string + tableACLConfigReloadInterval time.Duration + tabletPath string + tabletConfig string + + tm *tabletmanager.TabletManager + + Main = &cobra.Command{ + Use: "vttablet", + Short: "The VTTablet server controls a running MySQL server.", + Long: `The VTTablet server _controls_ a running MySQL server. VTTablet supports two primary types of deployments: + +* Managed MySQL (most common) +* External MySQL + +In addition to these deployment types, a partially managed VTTablet is also possible by setting ` + "`--disable_active_reparents`." + ` + +### Managed MySQL + +In this mode, Vitess actively manages MySQL. + +### External MySQL. + +In this mode, an external MySQL can be used such as AWS RDS, AWS Aurora, Google CloudSQL; or just an existing (vanilla) MySQL installation. + +See "Unmanaged Tablet" for the full guide. + +Even if a MySQL is external, you can still make vttablet perform some management functions. They are as follows: + +` + + "* `--disable_active_reparents`: If this flag is set, then any reparent or replica commands will not be allowed. These are InitShardPrimary, PlannedReparentShard, EmergencyReparentShard, and ReparentTablet. In this mode, you should use the TabletExternallyReparented command to inform vitess of the current primary.\n" + + "* `--replication_connect_retry`: This value is give to mysql when it connects a replica to the primary as the retry duration parameter.\n" + + "* `--enable_replication_reporter`: If this flag is set, then vttablet will transmit replica lag related information to the vtgates, which will allow it to balance load better. Additionally, enabling this will also cause vttablet to restart replication if it was stopped. However, it will do this only if `--disable_active_reparents` was not turned on.\n" + + "* `--heartbeat_enable` and `--heartbeat_interval duration`: cause vttablet to write heartbeats to the sidecar database. This information is also used by the replication reporter to assess replica lag.\n", + Example: ` +vttablet \ + --topo_implementation etcd2 \ + --topo_global_server_address localhost:2379 \ + --topo_global_root /vitess/ \ + --tablet-path $alias \ + --init_keyspace $keyspace \ + --init_shard $shard \ + --init_tablet_type $tablet_type \ + --port $port \ + --grpc_port $grpc_port \ + --service_map 'grpc-queryservice,grpc-tabletmanager,grpc-updatestream'` + "\n\n`$alias` needs to be of the form: `-id`, and the cell should match one of the local cells that was created in the topology. The id can be left padded with zeroes: `cell-100` and `cell-000000100` are synonymous.", + Args: cobra.NoArgs, + Version: servenv.AppVersion.String(), + PreRunE: servenv.CobraPreRunE, + RunE: run, + } +) + +func run(cmd *cobra.Command, args []string) error { + servenv.Init() + + tabletAlias, err := topoproto.ParseTabletAlias(tabletPath) + if err != nil { + return fmt.Errorf("failed to parse --tablet-path: %w", err) + } + + // config and mycnf initializations are intertwined. + config, mycnf, err := initConfig(tabletAlias) + if err != nil { + return err + } + + ts := topo.Open() + qsc, err := createTabletServer(context.Background(), config, ts, tabletAlias) + if err != nil { + ts.Close() + return err + } + + mysqld := mysqlctl.NewMysqld(config.DB) + servenv.OnClose(mysqld.Close) + + if err := extractOnlineDDL(); err != nil { + ts.Close() + return fmt.Errorf("failed to extract online DDL binaries: %w", err) + } + + // Initialize and start tm. + gRPCPort := int32(0) + if servenv.GRPCPort() != 0 { + gRPCPort = int32(servenv.GRPCPort()) + } + tablet, err := tabletmanager.BuildTabletFromInput(tabletAlias, int32(servenv.Port()), gRPCPort, config.DB) + if err != nil { + return fmt.Errorf("failed to parse --tablet-path: %w", err) + } + tm = &tabletmanager.TabletManager{ + BatchCtx: context.Background(), + TopoServer: ts, + Cnf: mycnf, + MysqlDaemon: mysqld, + DBConfigs: config.DB.Clone(), + QueryServiceControl: qsc, + UpdateStream: binlog.NewUpdateStream(ts, tablet.Keyspace, tabletAlias.Cell, qsc.SchemaEngine()), + VREngine: vreplication.NewEngine(config, ts, tabletAlias.Cell, mysqld, qsc.LagThrottler()), + VDiffEngine: vdiff.NewEngine(config, ts, tablet), + } + if err := tm.Start(tablet, config.Healthcheck.IntervalSeconds.Get()); err != nil { + ts.Close() + return fmt.Errorf("failed to parse --tablet-path or initialize DB credentials: %w", err) + } + servenv.OnClose(func() { + // Close the tm so that our topo entry gets pruned properly and any + // background goroutines that use the topo connection are stopped. + tm.Close() + + // tm uses ts. So, it should be closed after tm. + ts.Close() + }) + + servenv.RunDefault() + + return nil +} + +func initConfig(tabletAlias *topodatapb.TabletAlias) (*tabletenv.TabletConfig, *mysqlctl.Mycnf, error) { + tabletenv.Init() + // Load current config after tabletenv.Init, because it changes it. + config := tabletenv.NewCurrentConfig() + if err := config.Verify(); err != nil { + return nil, nil, fmt.Errorf("invalid config: %w", err) + } + + if tabletConfig != "" { + bytes, err := os.ReadFile(tabletConfig) + if err != nil { + return nil, nil, fmt.Errorf("error reading config file %s: %w", tabletConfig, err) + } + if err := yaml2.Unmarshal(bytes, config); err != nil { + return nil, nil, fmt.Errorf("error parsing config file %s: %w", bytes, err) + } + } + gotBytes, _ := yaml2.Marshal(config) + log.Infof("Loaded config file %s successfully:\n%s", tabletConfig, gotBytes) + + var ( + mycnf *mysqlctl.Mycnf + socketFile string + ) + // If no connection parameters were specified, load the mycnf file + // and use the socket from it. If connection parameters were specified, + // we assume that the mysql is not local, and we skip loading mycnf. + // This also means that backup and restore will not be allowed. + if !config.DB.HasGlobalSettings() { + var err error + if mycnf, err = mysqlctl.NewMycnfFromFlags(tabletAlias.Uid); err != nil { + return nil, nil, fmt.Errorf("mycnf read failed: %w", err) + } + + socketFile = mycnf.SocketFile + } else { + log.Info("connection parameters were specified. Not loading my.cnf.") + } + + // If connection parameters were specified, socketFile will be empty. + // Otherwise, the socketFile (read from mycnf) will be used to initialize + // dbconfigs. + config.DB.InitWithSocket(socketFile) + for _, cfg := range config.ExternalConnections { + cfg.InitWithSocket("") + } + return config, mycnf, nil +} + +// extractOnlineDDL extracts the gh-ost binary from this executable. gh-ost is appended +// to vttablet executable by `make build` with a go:embed +func extractOnlineDDL() error { + if binaryFileName, isOverride := onlineddl.GhostBinaryFileName(); !isOverride { + if err := os.WriteFile(binaryFileName, resources.GhostBinary, 0755); err != nil { + // One possibility of failure is that gh-ost is up and running. In that case, + // let's pause and check if the running gh-ost is exact same binary as the one we wish to extract. + foundBytes, _ := os.ReadFile(binaryFileName) + if bytes.Equal(resources.GhostBinary, foundBytes) { + // OK, it's the same binary, there is no need to extract the file anyway + return nil + } + return err + } + } + + return nil +} + +func createTabletServer(ctx context.Context, config *tabletenv.TabletConfig, ts *topo.Server, tabletAlias *topodatapb.TabletAlias) (*tabletserver.TabletServer, error) { + if tableACLConfig != "" { + // To override default simpleacl, other ACL plugins must set themselves to be default ACL factory + tableacl.Register("simpleacl", &simpleacl.Factory{}) + } else if enforceTableACLConfig { + return nil, fmt.Errorf("table acl config has to be specified with table-acl-config flag because enforce-tableacl-config is set.") + } + // creates and registers the query service + qsc := tabletserver.NewTabletServer(ctx, "", config, ts, tabletAlias) + servenv.OnRun(func() { + qsc.Register() + addStatusParts(qsc) + }) + servenv.OnClose(qsc.StopService) + qsc.InitACL(tableACLConfig, enforceTableACLConfig, tableACLConfigReloadInterval) + return qsc, nil +} + +func init() { + servenv.RegisterDefaultFlags() + servenv.RegisterFlags() + servenv.RegisterGRPCServerFlags() + servenv.RegisterGRPCServerAuthFlags() + servenv.RegisterServiceMapFlag() + + dbconfigs.RegisterFlags(dbconfigs.All...) + mysqlctl.RegisterFlags() + + servenv.MoveFlagsToCobraCommand(Main) + + acl.RegisterFlags(Main.Flags()) + Main.Flags().BoolVar(&enforceTableACLConfig, "enforce-tableacl-config", enforceTableACLConfig, "if this flag is true, vttablet will fail to start if a valid tableacl config does not exist") + Main.Flags().StringVar(&tableACLConfig, "table-acl-config", tableACLConfig, "path to table access checker config file; send SIGHUP to reload this file") + Main.Flags().DurationVar(&tableACLConfigReloadInterval, "table-acl-config-reload-interval", tableACLConfigReloadInterval, "Ticker to reload ACLs. Duration flag, format e.g.: 30s. Default: do not reload") + Main.Flags().StringVar(&tabletPath, "tablet-path", tabletPath, "tablet alias") + Main.Flags().StringVar(&tabletConfig, "tablet_config", tabletConfig, "YAML file config for tablet") +} diff --git a/go/cmd/vttablet/plugin_azblobbackupstorage.go b/go/cmd/vttablet/cli/plugin_azblobbackupstorage.go similarity index 97% rename from go/cmd/vttablet/plugin_azblobbackupstorage.go rename to go/cmd/vttablet/cli/plugin_azblobbackupstorage.go index a4ca64096a9..bdadc894aae 100644 --- a/go/cmd/vttablet/plugin_azblobbackupstorage.go +++ b/go/cmd/vttablet/cli/plugin_azblobbackupstorage.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( _ "vitess.io/vitess/go/vt/mysqlctl/azblobbackupstorage" diff --git a/go/cmd/vttablet/plugin_cephbackupstorage.go b/go/cmd/vttablet/cli/plugin_cephbackupstorage.go similarity index 97% rename from go/cmd/vttablet/plugin_cephbackupstorage.go rename to go/cmd/vttablet/cli/plugin_cephbackupstorage.go index 6cd2d5619d0..171198f5e29 100644 --- a/go/cmd/vttablet/plugin_cephbackupstorage.go +++ b/go/cmd/vttablet/cli/plugin_cephbackupstorage.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( _ "vitess.io/vitess/go/vt/mysqlctl/cephbackupstorage" diff --git a/go/cmd/vttablet/plugin_consultopo.go b/go/cmd/vttablet/cli/plugin_consultopo.go similarity index 98% rename from go/cmd/vttablet/plugin_consultopo.go rename to go/cmd/vttablet/cli/plugin_consultopo.go index 59d6774fdbc..a128f294a42 100644 --- a/go/cmd/vttablet/plugin_consultopo.go +++ b/go/cmd/vttablet/cli/plugin_consultopo.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports consultopo to register the consul implementation of TopoServer. diff --git a/go/cmd/topo2topo/plugin_etcd2topo.go b/go/cmd/vttablet/cli/plugin_etcd2topo.go similarity index 98% rename from go/cmd/topo2topo/plugin_etcd2topo.go rename to go/cmd/vttablet/cli/plugin_etcd2topo.go index d99ef51d4af..5a51923cf00 100644 --- a/go/cmd/topo2topo/plugin_etcd2topo.go +++ b/go/cmd/vttablet/cli/plugin_etcd2topo.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports etcd2topo to register the etcd2 implementation of TopoServer. diff --git a/go/cmd/vtctld/plugin_filebackupstorage.go b/go/cmd/vttablet/cli/plugin_filebackupstorage.go similarity index 97% rename from go/cmd/vtctld/plugin_filebackupstorage.go rename to go/cmd/vttablet/cli/plugin_filebackupstorage.go index cf2ceb5150f..9edc82d6a1b 100644 --- a/go/cmd/vtctld/plugin_filebackupstorage.go +++ b/go/cmd/vttablet/cli/plugin_filebackupstorage.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( _ "vitess.io/vitess/go/vt/mysqlctl/filebackupstorage" diff --git a/go/cmd/vttablet/plugin_filecustomrule.go b/go/cmd/vttablet/cli/plugin_filecustomrule.go similarity index 98% rename from go/cmd/vttablet/plugin_filecustomrule.go rename to go/cmd/vttablet/cli/plugin_filecustomrule.go index 854c484d3c1..1bf3c4297d5 100644 --- a/go/cmd/vttablet/plugin_filecustomrule.go +++ b/go/cmd/vttablet/cli/plugin_filecustomrule.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the file custom rule source diff --git a/go/cmd/vttablet/plugin_filelogger.go b/go/cmd/vttablet/cli/plugin_filelogger.go similarity index 98% rename from go/cmd/vttablet/plugin_filelogger.go rename to go/cmd/vttablet/cli/plugin_filelogger.go index bc5d968d2f7..fd5104f69a8 100644 --- a/go/cmd/vttablet/plugin_filelogger.go +++ b/go/cmd/vttablet/cli/plugin_filelogger.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the file-based query logger diff --git a/go/cmd/vttablet/plugin_gcsbackupstorage.go b/go/cmd/vttablet/cli/plugin_gcsbackupstorage.go similarity index 97% rename from go/cmd/vttablet/plugin_gcsbackupstorage.go rename to go/cmd/vttablet/cli/plugin_gcsbackupstorage.go index 82a22cef1da..655583c8ca2 100644 --- a/go/cmd/vttablet/plugin_gcsbackupstorage.go +++ b/go/cmd/vttablet/cli/plugin_gcsbackupstorage.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( _ "vitess.io/vitess/go/vt/mysqlctl/gcsbackupstorage" diff --git a/go/cmd/vttablet/plugin_grpcbinlogplayer.go b/go/cmd/vttablet/cli/plugin_grpcbinlogplayer.go similarity index 98% rename from go/cmd/vttablet/plugin_grpcbinlogplayer.go rename to go/cmd/vttablet/cli/plugin_grpcbinlogplayer.go index f8b2380c7c7..31920b97fae 100644 --- a/go/cmd/vttablet/plugin_grpcbinlogplayer.go +++ b/go/cmd/vttablet/cli/plugin_grpcbinlogplayer.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the gRPC binlog player diff --git a/go/cmd/vttablet/plugin_grpcbinlogstreamer.go b/go/cmd/vttablet/cli/plugin_grpcbinlogstreamer.go similarity index 98% rename from go/cmd/vttablet/plugin_grpcbinlogstreamer.go rename to go/cmd/vttablet/cli/plugin_grpcbinlogstreamer.go index 26683ea7ccf..716dd499785 100644 --- a/go/cmd/vttablet/plugin_grpcbinlogstreamer.go +++ b/go/cmd/vttablet/cli/plugin_grpcbinlogstreamer.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the gRPC binlog streamer diff --git a/go/cmd/vttablet/plugin_grpcqueryservice.go b/go/cmd/vttablet/cli/plugin_grpcqueryservice.go similarity index 98% rename from go/cmd/vttablet/plugin_grpcqueryservice.go rename to go/cmd/vttablet/cli/plugin_grpcqueryservice.go index 073c2009151..a46701d16aa 100644 --- a/go/cmd/vttablet/plugin_grpcqueryservice.go +++ b/go/cmd/vttablet/cli/plugin_grpcqueryservice.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the gRPC queryservice server diff --git a/go/cmd/vttablet/plugin_grpctabletconn.go b/go/cmd/vttablet/cli/plugin_grpctabletconn.go similarity index 98% rename from go/cmd/vttablet/plugin_grpctabletconn.go rename to go/cmd/vttablet/cli/plugin_grpctabletconn.go index 08291a7c916..4a97e36eec4 100644 --- a/go/cmd/vttablet/plugin_grpctabletconn.go +++ b/go/cmd/vttablet/cli/plugin_grpctabletconn.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the gRPC tabletconn client diff --git a/go/cmd/vttablet/plugin_grpcthrottlerserver.go b/go/cmd/vttablet/cli/plugin_grpcthrottlerserver.go similarity index 98% rename from go/cmd/vttablet/plugin_grpcthrottlerserver.go rename to go/cmd/vttablet/cli/plugin_grpcthrottlerserver.go index 40cce4bd51c..f25fdb73df3 100644 --- a/go/cmd/vttablet/plugin_grpcthrottlerserver.go +++ b/go/cmd/vttablet/cli/plugin_grpcthrottlerserver.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the gRPC throttler server. diff --git a/go/cmd/vttablet/plugin_grpctmclient.go b/go/cmd/vttablet/cli/plugin_grpctmclient.go similarity index 98% rename from go/cmd/vttablet/plugin_grpctmclient.go rename to go/cmd/vttablet/cli/plugin_grpctmclient.go index ce554da96df..8cd349c7f87 100644 --- a/go/cmd/vttablet/plugin_grpctmclient.go +++ b/go/cmd/vttablet/cli/plugin_grpctmclient.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the gRPC tabletmanager client diff --git a/go/cmd/vttablet/plugin_grpctmserver.go b/go/cmd/vttablet/cli/plugin_grpctmserver.go similarity index 98% rename from go/cmd/vttablet/plugin_grpctmserver.go rename to go/cmd/vttablet/cli/plugin_grpctmserver.go index 094d273fe39..6dee0146c21 100644 --- a/go/cmd/vttablet/plugin_grpctmserver.go +++ b/go/cmd/vttablet/cli/plugin_grpctmserver.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the gRPC tabletmanager server diff --git a/go/cmd/vttablet/plugin_opentracing.go b/go/cmd/vttablet/cli/plugin_opentracing.go similarity index 98% rename from go/cmd/vttablet/plugin_opentracing.go rename to go/cmd/vttablet/cli/plugin_opentracing.go index 942bb25c895..f836daf4036 100644 --- a/go/cmd/vttablet/plugin_opentracing.go +++ b/go/cmd/vttablet/cli/plugin_opentracing.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( "vitess.io/vitess/go/trace" diff --git a/go/cmd/vttablet/plugin_opentsdb.go b/go/cmd/vttablet/cli/plugin_opentsdb.go similarity index 98% rename from go/cmd/vttablet/plugin_opentsdb.go rename to go/cmd/vttablet/cli/plugin_opentsdb.go index 494dbbee20d..328628c2a3d 100644 --- a/go/cmd/vttablet/plugin_opentsdb.go +++ b/go/cmd/vttablet/cli/plugin_opentsdb.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports opentsdb to register the opentsdb stats backend. diff --git a/go/cmd/vttablet/plugin_prometheusbackend.go b/go/cmd/vttablet/cli/plugin_prometheusbackend.go similarity index 98% rename from go/cmd/vttablet/plugin_prometheusbackend.go rename to go/cmd/vttablet/cli/plugin_prometheusbackend.go index 4066b5ba6ec..a169c6d9777 100644 --- a/go/cmd/vttablet/plugin_prometheusbackend.go +++ b/go/cmd/vttablet/cli/plugin_prometheusbackend.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports Prometheus to allow for instrumentation // with the Prometheus client library diff --git a/go/cmd/vttablet/plugin_s3backupstorage.go b/go/cmd/vttablet/cli/plugin_s3backupstorage.go similarity index 97% rename from go/cmd/vttablet/plugin_s3backupstorage.go rename to go/cmd/vttablet/cli/plugin_s3backupstorage.go index a5b5c671ebb..4b3ecb33edb 100644 --- a/go/cmd/vttablet/plugin_s3backupstorage.go +++ b/go/cmd/vttablet/cli/plugin_s3backupstorage.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( _ "vitess.io/vitess/go/vt/mysqlctl/s3backupstorage" diff --git a/go/cmd/vttablet/cli/plugin_statsd.go b/go/cmd/vttablet/cli/plugin_statsd.go new file mode 100644 index 00000000000..189e0367eb0 --- /dev/null +++ b/go/cmd/vttablet/cli/plugin_statsd.go @@ -0,0 +1,22 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package cli + +import "vitess.io/vitess/go/stats/statsd" + +func init() { + statsd.Init("vttablet") +} diff --git a/go/cmd/vttablet/plugin_sysloglogger.go b/go/cmd/vttablet/cli/plugin_sysloglogger.go similarity index 98% rename from go/cmd/vttablet/plugin_sysloglogger.go rename to go/cmd/vttablet/cli/plugin_sysloglogger.go index 4c57ad006c3..a7260d6f8cc 100644 --- a/go/cmd/vttablet/plugin_sysloglogger.go +++ b/go/cmd/vttablet/cli/plugin_sysloglogger.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the syslog-based query logger diff --git a/go/cmd/vttablet/plugin_topocustomrule.go b/go/cmd/vttablet/cli/plugin_topocustomrule.go similarity index 98% rename from go/cmd/vttablet/plugin_topocustomrule.go rename to go/cmd/vttablet/cli/plugin_topocustomrule.go index cef81458155..9fce319558e 100644 --- a/go/cmd/vttablet/plugin_topocustomrule.go +++ b/go/cmd/vttablet/cli/plugin_topocustomrule.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the topo custom rule source diff --git a/go/cmd/vttablet/plugin_zk2topo.go b/go/cmd/vttablet/cli/plugin_zk2topo.go similarity index 98% rename from go/cmd/vttablet/plugin_zk2topo.go rename to go/cmd/vttablet/cli/plugin_zk2topo.go index ebf385ec1af..d71a7e2e196 100644 --- a/go/cmd/vttablet/plugin_zk2topo.go +++ b/go/cmd/vttablet/cli/plugin_zk2topo.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the zk2 TopologyServer diff --git a/go/cmd/vttablet/status.go b/go/cmd/vttablet/cli/status.go similarity index 97% rename from go/cmd/vttablet/status.go rename to go/cmd/vttablet/cli/status.go index ff3b65134c7..762a9fa646e 100644 --- a/go/cmd/vttablet/status.go +++ b/go/cmd/vttablet/cli/status.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Vitess Authors. +Copyright 2023 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,11 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( "vitess.io/vitess/go/vt/servenv" - _ "vitess.io/vitess/go/vt/status" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" "vitess.io/vitess/go/vt/vttablet/tabletserver" diff --git a/go/cmd/vttablet/docgen/main.go b/go/cmd/vttablet/docgen/main.go new file mode 100644 index 00000000000..9915d641352 --- /dev/null +++ b/go/cmd/vttablet/docgen/main.go @@ -0,0 +1,37 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/internal/docgen" + "vitess.io/vitess/go/cmd/vttablet/cli" +) + +func main() { + var dir string + cmd := cobra.Command{ + Use: "docgen [-d ]", + RunE: func(cmd *cobra.Command, args []string) error { + return docgen.GenerateMarkdownTree(cli.Main, dir) + }, + } + + cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation") + _ = cmd.Execute() +} diff --git a/go/cmd/vttablet/plugin_kubernetestopo.go b/go/cmd/vttablet/plugin_kubernetestopo.go deleted file mode 100644 index 671d0c8321f..00000000000 --- a/go/cmd/vttablet/plugin_kubernetestopo.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -// This plugin imports k8stopo to register the kubernetes implementation of TopoServer. - -import ( - _ "vitess.io/vitess/go/vt/topo/k8stopo" -) diff --git a/go/cmd/vttablet/plugin_statsd.go b/go/cmd/vttablet/plugin_statsd.go deleted file mode 100644 index 51761e6c406..00000000000 --- a/go/cmd/vttablet/plugin_statsd.go +++ /dev/null @@ -1,7 +0,0 @@ -package main - -import "vitess.io/vitess/go/stats/statsd" - -func init() { - statsd.Init("vttablet") -} diff --git a/go/cmd/vttablet/vttablet.go b/go/cmd/vttablet/vttablet.go index f879d2728e1..0f91f48b649 100644 --- a/go/cmd/vttablet/vttablet.go +++ b/go/cmd/vttablet/vttablet.go @@ -18,206 +18,12 @@ limitations under the License. package main import ( - "bytes" - "context" - "os" - "time" - - "github.com/spf13/pflag" - - "vitess.io/vitess/go/acl" - "vitess.io/vitess/go/vt/binlog" - "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/cmd/vttablet/cli" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/mysqlctl" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/tableacl" - "vitess.io/vitess/go/vt/tableacl/simpleacl" - "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/topo/topoproto" - "vitess.io/vitess/go/vt/vttablet/onlineddl" - "vitess.io/vitess/go/vt/vttablet/tabletmanager" - "vitess.io/vitess/go/vt/vttablet/tabletmanager/vdiff" - "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" - "vitess.io/vitess/go/vt/vttablet/tabletserver" - "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" - "vitess.io/vitess/go/yaml2" - "vitess.io/vitess/resources" - - topodatapb "vitess.io/vitess/go/vt/proto/topodata" -) - -var ( - enforceTableACLConfig bool - tableACLConfig string - tableACLConfigReloadInterval time.Duration - tabletPath string - tabletConfig string - - tm *tabletmanager.TabletManager ) -func registerFlags(fs *pflag.FlagSet) { - fs.BoolVar(&enforceTableACLConfig, "enforce-tableacl-config", enforceTableACLConfig, "if this flag is true, vttablet will fail to start if a valid tableacl config does not exist") - fs.StringVar(&tableACLConfig, "table-acl-config", tableACLConfig, "path to table access checker config file; send SIGHUP to reload this file") - fs.DurationVar(&tableACLConfigReloadInterval, "table-acl-config-reload-interval", tableACLConfigReloadInterval, "Ticker to reload ACLs. Duration flag, format e.g.: 30s. Default: do not reload") - fs.StringVar(&tabletPath, "tablet-path", tabletPath, "tablet alias") - fs.StringVar(&tabletConfig, "tablet_config", tabletConfig, "YAML file config for tablet") - - acl.RegisterFlags(fs) -} - -func init() { - servenv.RegisterDefaultFlags() - servenv.RegisterFlags() - servenv.RegisterGRPCServerFlags() - servenv.RegisterGRPCServerAuthFlags() - servenv.RegisterServiceMapFlag() - servenv.OnParseFor("vttablet", registerFlags) -} - func main() { - dbconfigs.RegisterFlags(dbconfigs.All...) - mysqlctl.RegisterFlags() - - servenv.ParseFlags("vttablet") - servenv.Init() - - if tabletPath == "" { - log.Exit("--tablet-path required") - } - tabletAlias, err := topoproto.ParseTabletAlias(tabletPath) - if err != nil { - log.Exitf("failed to parse --tablet-path: %v", err) - } - - // config and mycnf initializations are intertwined. - config, mycnf := initConfig(tabletAlias) - - ts := topo.Open() - qsc := createTabletServer(config, ts, tabletAlias) - - mysqld := mysqlctl.NewMysqld(config.DB) - servenv.OnClose(mysqld.Close) - - if err := extractOnlineDDL(); err != nil { - log.Exitf("failed to extract online DDL binaries: %v", err) - } - - // Initialize and start tm. - gRPCPort := int32(0) - if servenv.GRPCPort() != 0 { - gRPCPort = int32(servenv.GRPCPort()) - } - tablet, err := tabletmanager.BuildTabletFromInput(tabletAlias, int32(servenv.Port()), gRPCPort, config.DB) - if err != nil { - log.Exitf("failed to parse --tablet-path: %v", err) - } - tm = &tabletmanager.TabletManager{ - BatchCtx: context.Background(), - TopoServer: ts, - Cnf: mycnf, - MysqlDaemon: mysqld, - DBConfigs: config.DB.Clone(), - QueryServiceControl: qsc, - UpdateStream: binlog.NewUpdateStream(ts, tablet.Keyspace, tabletAlias.Cell, qsc.SchemaEngine()), - VREngine: vreplication.NewEngine(config, ts, tabletAlias.Cell, mysqld, qsc.LagThrottler()), - VDiffEngine: vdiff.NewEngine(config, ts, tablet), - } - if err := tm.Start(tablet, config.Healthcheck.IntervalSeconds.Get()); err != nil { - log.Exitf("failed to parse --tablet-path or initialize DB credentials: %v", err) - } - servenv.OnClose(func() { - // Close the tm so that our topo entry gets pruned properly and any - // background goroutines that use the topo connection are stopped. - tm.Close() - - // tm uses ts. So, it should be closed after tm. - ts.Close() - }) - - servenv.RunDefault() -} - -func initConfig(tabletAlias *topodatapb.TabletAlias) (*tabletenv.TabletConfig, *mysqlctl.Mycnf) { - tabletenv.Init() - // Load current config after tabletenv.Init, because it changes it. - config := tabletenv.NewCurrentConfig() - if err := config.Verify(); err != nil { - log.Exitf("invalid config: %v", err) - } - - if tabletConfig != "" { - bytes, err := os.ReadFile(tabletConfig) - if err != nil { - log.Exitf("error reading config file %s: %v", tabletConfig, err) - } - if err := yaml2.Unmarshal(bytes, config); err != nil { - log.Exitf("error parsing config file %s: %v", bytes, err) - } - } - gotBytes, _ := yaml2.Marshal(config) - log.Infof("Loaded config file %s successfully:\n%s", tabletConfig, gotBytes) - - var mycnf *mysqlctl.Mycnf - var socketFile string - // If no connection parameters were specified, load the mycnf file - // and use the socket from it. If connection parameters were specified, - // we assume that the mysql is not local, and we skip loading mycnf. - // This also means that backup and restore will not be allowed. - if !config.DB.HasGlobalSettings() { - var err error - if mycnf, err = mysqlctl.NewMycnfFromFlags(tabletAlias.Uid); err != nil { - log.Exitf("mycnf read failed: %v", err) - } - socketFile = mycnf.SocketFile - } else { - log.Info("connection parameters were specified. Not loading my.cnf.") - } - - // If connection parameters were specified, socketFile will be empty. - // Otherwise, the socketFile (read from mycnf) will be used to initialize - // dbconfigs. - config.DB.InitWithSocket(socketFile) - for _, cfg := range config.ExternalConnections { - cfg.InitWithSocket("") - } - return config, mycnf -} - -// extractOnlineDDL extracts the gh-ost binary from this executable. gh-ost is appended -// to vttablet executable by `make build` with a go:embed -func extractOnlineDDL() error { - if binaryFileName, isOverride := onlineddl.GhostBinaryFileName(); !isOverride { - if err := os.WriteFile(binaryFileName, resources.GhostBinary, 0755); err != nil { - // One possibility of failure is that gh-ost is up and running. In that case, - // let's pause and check if the running gh-ost is exact same binary as the one we wish to extract. - foundBytes, _ := os.ReadFile(binaryFileName) - if bytes.Equal(resources.GhostBinary, foundBytes) { - // OK, it's the same binary, there is no need to extract the file anyway - return nil - } - return err - } - } - - return nil -} - -func createTabletServer(config *tabletenv.TabletConfig, ts *topo.Server, tabletAlias *topodatapb.TabletAlias) *tabletserver.TabletServer { - if tableACLConfig != "" { - // To override default simpleacl, other ACL plugins must set themselves to be default ACL factory - tableacl.Register("simpleacl", &simpleacl.Factory{}) - } else if enforceTableACLConfig { - log.Exit("table acl config has to be specified with table-acl-config flag because enforce-tableacl-config is set.") + if err := cli.Main.Execute(); err != nil { + log.Exit(err) } - // creates and registers the query service - qsc := tabletserver.NewTabletServer("", config, ts, tabletAlias) - servenv.OnRun(func() { - qsc.Register() - addStatusParts(qsc) - }) - servenv.OnClose(qsc.StopService) - qsc.InitACL(tableACLConfig, enforceTableACLConfig, tableACLConfigReloadInterval) - return qsc } diff --git a/go/cmd/vttestserver/data/schema/app_customer/v001__create_customer_table.sql b/go/cmd/vttestserver/cli/data/schema/app_customer/v001__create_customer_table.sql similarity index 100% rename from go/cmd/vttestserver/data/schema/app_customer/v001__create_customer_table.sql rename to go/cmd/vttestserver/cli/data/schema/app_customer/v001__create_customer_table.sql diff --git a/go/cmd/vttestserver/data/schema/app_customer/v002__add_customer_vschema.sql b/go/cmd/vttestserver/cli/data/schema/app_customer/v002__add_customer_vschema.sql similarity index 100% rename from go/cmd/vttestserver/data/schema/app_customer/v002__add_customer_vschema.sql rename to go/cmd/vttestserver/cli/data/schema/app_customer/v002__add_customer_vschema.sql diff --git a/go/cmd/vttestserver/data/schema/app_customer/vschema.json b/go/cmd/vttestserver/cli/data/schema/app_customer/vschema.json similarity index 100% rename from go/cmd/vttestserver/data/schema/app_customer/vschema.json rename to go/cmd/vttestserver/cli/data/schema/app_customer/vschema.json diff --git a/go/cmd/vttestserver/data/schema/test_keyspace/v001__create_test_table.sql b/go/cmd/vttestserver/cli/data/schema/test_keyspace/v001__create_test_table.sql similarity index 100% rename from go/cmd/vttestserver/data/schema/test_keyspace/v001__create_test_table.sql rename to go/cmd/vttestserver/cli/data/schema/test_keyspace/v001__create_test_table.sql diff --git a/go/cmd/vttestserver/data/schema/test_keyspace/v002__create_hash_vindex.sql b/go/cmd/vttestserver/cli/data/schema/test_keyspace/v002__create_hash_vindex.sql similarity index 100% rename from go/cmd/vttestserver/data/schema/test_keyspace/v002__create_hash_vindex.sql rename to go/cmd/vttestserver/cli/data/schema/test_keyspace/v002__create_hash_vindex.sql diff --git a/go/cmd/vttestserver/data/schema/test_keyspace/v003__add_table_vschema.sql b/go/cmd/vttestserver/cli/data/schema/test_keyspace/v003__add_table_vschema.sql similarity index 100% rename from go/cmd/vttestserver/data/schema/test_keyspace/v003__add_table_vschema.sql rename to go/cmd/vttestserver/cli/data/schema/test_keyspace/v003__add_table_vschema.sql diff --git a/go/cmd/vttestserver/data/schema/test_keyspace/v004__create_test_table1.sql b/go/cmd/vttestserver/cli/data/schema/test_keyspace/v004__create_test_table1.sql similarity index 100% rename from go/cmd/vttestserver/data/schema/test_keyspace/v004__create_test_table1.sql rename to go/cmd/vttestserver/cli/data/schema/test_keyspace/v004__create_test_table1.sql diff --git a/go/cmd/vttestserver/cli/main.go b/go/cmd/vttestserver/cli/main.go new file mode 100644 index 00000000000..f9a2f16cd87 --- /dev/null +++ b/go/cmd/vttestserver/cli/main.go @@ -0,0 +1,308 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// vttestserver allows users to spawn a self-contained Vitess server for local testing/CI. +package cli + +import ( + "encoding/json" + "fmt" + "os" + "os/signal" + "strconv" + "strings" + "syscall" + "time" + + "github.com/spf13/cobra" + "google.golang.org/protobuf/encoding/prototext" + + "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/vttest" + + vttestpb "vitess.io/vitess/go/vt/proto/vttest" +) + +type topoFlags struct { + cells []string + keyspaces []string + shards []string + replicas int + rdonly int +} + +var ( + basePort int + config vttest.Config + doSeed bool + mycnf string + protoTopo string + seed vttest.SeedConfig + topo topoFlags +) + +func (t *topoFlags) buildTopology() (*vttestpb.VTTestTopology, error) { + topo := &vttestpb.VTTestTopology{} + topo.Cells = t.cells + + keyspaces := t.keyspaces + shardCounts := t.shards + if len(keyspaces) != len(shardCounts) { + return nil, fmt.Errorf("--keyspaces must be same length as --shards") + } + + for i := range keyspaces { + name := keyspaces[i] + numshards, err := strconv.ParseInt(shardCounts[i], 10, 32) + if err != nil { + return nil, err + } + + ks := &vttestpb.Keyspace{ + Name: name, + ReplicaCount: int32(t.replicas), + RdonlyCount: int32(t.rdonly), + } + + for _, shardname := range vttest.GetShardNames(int(numshards)) { + ks.Shards = append(ks.Shards, &vttestpb.Shard{ + Name: shardname, + }) + } + + topo.Keyspaces = append(topo.Keyspaces, ks) + } + + return topo, nil +} + +func init() { + servenv.RegisterFlags() + servenv.RegisterGRPCServerFlags() + servenv.RegisterGRPCServerAuthFlags() + servenv.RegisterServiceMapFlag() +} + +func New() (cmd *cobra.Command) { + cmd = &cobra.Command{ + Use: "vttestserver", + Short: "vttestserver allows users to spawn a self-contained Vitess server for local testing/CI.", + Args: cobra.NoArgs, + PreRunE: servenv.CobraPreRunE, + RunE: run, + } + + servenv.MoveFlagsToCobraCommand(cmd) + + cmd.Flags().IntVar(&basePort, "port", 0, + "Port to use for vtcombo. If this is 0, a random port will be chosen.") + + cmd.Flags().StringVar(&protoTopo, "proto_topo", "", + "Define the fake cluster topology as a compact text format encoded"+ + " vttest proto. See vttest.proto for more information.") + + cmd.Flags().StringVar(&config.SchemaDir, "schema_dir", "", + "Directory for initial schema files. Within this dir,"+ + " there should be a subdir for each keyspace. Within"+ + " each keyspace dir, each file is executed as SQL"+ + " after the database is created on each shard."+ + " If the directory contains a vschema.json file, it"+ + " will be used as the vschema for the V3 API.") + + cmd.Flags().StringVar(&config.DefaultSchemaDir, "default_schema_dir", "", + "Default directory for initial schema files. If no schema is found"+ + " in schema_dir, default to this location.") + + cmd.Flags().StringVar(&config.DataDir, "data_dir", "", + "Directory where the data files will be placed, defaults to a random "+ + "directory under /vt/vtdataroot") + + cmd.Flags().BoolVar(&config.OnlyMySQL, "mysql_only", false, + "If this flag is set only mysql is initialized."+ + " The rest of the vitess components are not started."+ + " Also, the output specifies the mysql unix socket"+ + " instead of the vtgate port.") + + cmd.Flags().BoolVar(&config.PersistentMode, "persistent_mode", false, + "If this flag is set, the MySQL data directory is not cleaned up"+ + " when LocalCluster.TearDown() is called. This is useful for running"+ + " vttestserver as a database container in local developer environments. Note"+ + " that db migration files (--schema_dir option) and seeding of"+ + " random data (--initialize_with_random_data option) will only run during"+ + " cluster startup if the data directory does not already exist. "+ + " Changes to VSchema are persisted across cluster restarts using a simple"+ + " watcher if the --data_dir argument is specified.") + + cmd.Flags().BoolVar(&doSeed, "initialize_with_random_data", false, + "If this flag is each table-shard will be initialized"+ + " with random data. See also the 'rng_seed' and 'min_shard_size'"+ + " and 'max_shard_size' flags.") + + cmd.Flags().IntVar(&seed.RngSeed, "rng_seed", 123, + "The random number generator seed to use when initializing"+ + " with random data (see also --initialize_with_random_data)."+ + " Multiple runs with the same seed will result with the same"+ + " initial data.") + + cmd.Flags().IntVar(&seed.MinSize, "min_table_shard_size", 1000, + "The minimum number of initial rows in a table shard. Ignored if"+ + "--initialize_with_random_data is false. The actual number is chosen"+ + " randomly.") + + cmd.Flags().IntVar(&seed.MaxSize, "max_table_shard_size", 10000, + "The maximum number of initial rows in a table shard. Ignored if"+ + "--initialize_with_random_data is false. The actual number is chosen"+ + " randomly") + + cmd.Flags().Float64Var(&seed.NullProbability, "null_probability", 0.1, + "The probability to initialize a field with 'NULL' "+ + " if --initialize_with_random_data is true. Only applies to fields"+ + " that can contain NULL values.") + + cmd.Flags().StringVar(&config.MySQLBindHost, "mysql_bind_host", "localhost", + "which host to bind vtgate mysql listener to") + + cmd.Flags().StringVar(&mycnf, "extra_my_cnf", "", + "extra files to add to the config, separated by ':'") + + cmd.Flags().StringSliceVar(&topo.cells, "cells", []string{"test"}, "Comma separated list of cells") + cmd.Flags().StringSliceVar(&topo.keyspaces, "keyspaces", []string{"test_keyspace"}, + "Comma separated list of keyspaces") + cmd.Flags().StringSliceVar(&topo.shards, "num_shards", []string{"2"}, + "Comma separated shard count (one per keyspace)") + cmd.Flags().IntVar(&topo.replicas, "replica_count", 2, + "Replica tablets per shard (includes primary)") + cmd.Flags().IntVar(&topo.rdonly, "rdonly_count", 1, + "Rdonly tablets per shard") + + cmd.Flags().StringVar(&config.Charset, "charset", "utf8mb4", "MySQL charset") + + cmd.Flags().StringVar(&config.PlannerVersion, "planner-version", "", "Sets the default planner to use when the session has not changed it. Valid values are: Gen4, Gen4Greedy, Gen4Left2Right") + + cmd.Flags().StringVar(&config.SnapshotFile, "snapshot_file", "", + "A MySQL DB snapshot file") + + cmd.Flags().BoolVar(&config.EnableSystemSettings, "enable_system_settings", true, "This will enable the system settings to be changed per session at the database connection level") + + cmd.Flags().StringVar(&config.TransactionMode, "transaction_mode", "MULTI", "Transaction mode MULTI (default), SINGLE or TWOPC ") + cmd.Flags().Float64Var(&config.TransactionTimeout, "queryserver-config-transaction-timeout", 0, "query server transaction timeout (in seconds), a transaction will be killed if it takes longer than this value") + + cmd.Flags().StringVar(&config.TabletHostName, "tablet_hostname", "localhost", "The hostname to use for the tablet otherwise it will be derived from OS' hostname") + + cmd.Flags().StringVar(&config.VSchemaDDLAuthorizedUsers, "vschema_ddl_authorized_users", "", "Comma separated list of users authorized to execute vschema ddl operations via vtgate") + + cmd.Flags().StringVar(&config.ForeignKeyMode, "foreign_key_mode", "allow", "This is to provide how to handle foreign key constraint in create/alter table. Valid values are: allow, disallow") + cmd.Flags().BoolVar(&config.EnableOnlineDDL, "enable_online_ddl", true, "Allow users to submit, review and control Online DDL") + cmd.Flags().BoolVar(&config.EnableDirectDDL, "enable_direct_ddl", true, "Allow users to submit direct DDL statements") + + // flags for using an actual topo implementation for vtcombo instead of in-memory topo. useful for test setup where an external topo server is shared across multiple vtcombo processes or other components + cmd.Flags().StringVar(&config.ExternalTopoImplementation, "external_topo_implementation", "", "the topology implementation to use for vtcombo process") + cmd.Flags().StringVar(&config.ExternalTopoGlobalServerAddress, "external_topo_global_server_address", "", "the address of the global topology server for vtcombo process") + cmd.Flags().StringVar(&config.ExternalTopoGlobalRoot, "external_topo_global_root", "", "the path of the global topology data in the global topology server for vtcombo process") + + cmd.Flags().DurationVar(&config.VtgateTabletRefreshInterval, "tablet_refresh_interval", 10*time.Second, "Interval at which vtgate refreshes tablet information from topology server.") + acl.RegisterFlags(cmd.Flags()) + + return cmd +} + +func newEnv() (env vttest.Environment, err error) { + if basePort != 0 { + if config.DataDir == "" { + env, err = vttest.NewLocalTestEnv("", basePort) + if err != nil { + return + } + } else { + env, err = vttest.NewLocalTestEnvWithDirectory("", basePort, config.DataDir) + if err != nil { + return + } + } + } + + if protoTopo == "" { + config.Topology, err = topo.buildTopology() + if err != nil { + return + } + } else { + var topology vttestpb.VTTestTopology + err = prototext.Unmarshal([]byte(protoTopo), &topology) + if err != nil { + return + } + if len(topology.Cells) == 0 { + topology.Cells = append(topology.Cells, "test") + } + config.Topology = &topology + } + + if doSeed { + config.Seed = &seed + } + + if mycnf != "" { + config.ExtraMyCnf = strings.Split(mycnf, ":") + } + + return +} + +func run(cmd *cobra.Command, args []string) error { + cluster, err := runCluster() + if err != nil { + return err + } + defer cluster.TearDown() + + servenv.Init() + + kvconf := cluster.JSONConfig() + if err := json.NewEncoder(os.Stdout).Encode(kvconf); err != nil { + return err + } + + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + <-c + + return nil +} + +func runCluster() (cluster vttest.LocalCluster, err error) { + env, err := newEnv() + if err != nil { + return + } + + log.Infof("Starting local cluster...") + log.Infof("config: %#v", config) + cluster = vttest.LocalCluster{ + Config: config, + Env: env, + } + err = cluster.Setup() + if err != nil { + return cluster, err + } + + log.Info("Local cluster started.") + + return cluster, nil +} diff --git a/go/cmd/vttestserver/vttestserver_test.go b/go/cmd/vttestserver/cli/main_test.go similarity index 89% rename from go/cmd/vttestserver/vttestserver_test.go rename to go/cmd/vttestserver/cli/main_test.go index 0665d5f9c46..39dc8e4ea78 100644 --- a/go/cmd/vttestserver/vttestserver_test.go +++ b/go/cmd/vttestserver/cli/main_test.go @@ -14,14 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( "context" "fmt" "io" "math/rand" - "os" "os/exec" "path" "strings" @@ -54,9 +53,8 @@ type columnVindex struct { } func TestRunsVschemaMigrations(t *testing.T) { - args := os.Args conf := config - defer resetFlags(args, conf) + defer resetConfig(conf) cluster, err := startCluster() defer cluster.TearDown() @@ -72,17 +70,22 @@ func TestRunsVschemaMigrations(t *testing.T) { } func TestPersistentMode(t *testing.T) { - args := os.Args conf := config - defer resetFlags(args, conf) + defer resetConfig(conf) dir := t.TempDir() cluster, err := startPersistentCluster(dir) assert.NoError(t, err) - // basic sanity checks similar to TestRunsVschemaMigrations + // Add a new "ad-hoc" vindex via vtgate once the cluster is up, to later make sure it is persisted across teardowns + err = addColumnVindex(cluster, "test_keyspace", "alter vschema on persistence_test add vindex my_vdx(id)") + assert.NoError(t, err) + + // Basic sanity checks similar to TestRunsVschemaMigrations + // See go/cmd/vttestserver/data/schema/app_customer/* and go/cmd/vttestserver/data/schema/test_keyspace/* assertColumnVindex(t, cluster, columnVindex{keyspace: "test_keyspace", table: "test_table", vindex: "my_vdx", vindexType: "hash", column: "id"}) + assertColumnVindex(t, cluster, columnVindex{keyspace: "test_keyspace", table: "persistence_test", vindex: "my_vdx", vindexType: "hash", column: "id"}) assertColumnVindex(t, cluster, columnVindex{keyspace: "app_customer", table: "customers", vindex: "hash", vindexType: "hash", column: "id"}) // insert some data to ensure persistence across teardowns @@ -108,11 +111,15 @@ func TestPersistentMode(t *testing.T) { // reboot the persistent cluster cluster.TearDown() cluster, err = startPersistentCluster(dir) - defer cluster.TearDown() + defer func() { + cluster.PersistentMode = false // Cleanup the tmpdir as we're done + cluster.TearDown() + }() assert.NoError(t, err) - // rerun our sanity checks to make sure vschema migrations are run during every startup + // rerun our sanity checks to make sure vschema is persisted correctly assertColumnVindex(t, cluster, columnVindex{keyspace: "test_keyspace", table: "test_table", vindex: "my_vdx", vindexType: "hash", column: "id"}) + assertColumnVindex(t, cluster, columnVindex{keyspace: "test_keyspace", table: "persistence_test", vindex: "my_vdx", vindexType: "hash", column: "id"}) assertColumnVindex(t, cluster, columnVindex{keyspace: "app_customer", table: "customers", vindex: "hash", vindexType: "hash", column: "id"}) // ensure previous data was successfully persisted @@ -125,9 +132,8 @@ func TestPersistentMode(t *testing.T) { } func TestForeignKeysAndDDLModes(t *testing.T) { - args := os.Args conf := config - defer resetFlags(args, conf) + defer resetConfig(conf) cluster, err := startCluster("--foreign_key_mode=allow", "--enable_online_ddl=true", "--enable_direct_ddl=true") assert.NoError(t, err) @@ -180,9 +186,8 @@ func TestForeignKeysAndDDLModes(t *testing.T) { } func TestCanGetKeyspaces(t *testing.T) { - args := os.Args conf := config - defer resetFlags(args, conf) + defer resetConfig(conf) cluster, err := startCluster() assert.NoError(t, err) @@ -192,9 +197,8 @@ func TestCanGetKeyspaces(t *testing.T) { } func TestExternalTopoServerConsul(t *testing.T) { - args := os.Args conf := config - defer resetFlags(args, conf) + defer resetConfig(conf) // Start a single consul in the background. cmd, serverAddr := startConsul(t) @@ -218,9 +222,8 @@ func TestExternalTopoServerConsul(t *testing.T) { } func TestMtlsAuth(t *testing.T) { - args := os.Args conf := config - defer resetFlags(args, conf) + defer resetConfig(conf) // Our test root. root := t.TempDir() @@ -249,7 +252,10 @@ func TestMtlsAuth(t *testing.T) { fmt.Sprintf("--vtctld_grpc_ca=%s", caCert), fmt.Sprintf("--grpc_auth_mtls_allowed_substrings=%s", "CN=ClientApp")) assert.NoError(t, err) - defer cluster.TearDown() + defer func() { + cluster.PersistentMode = false // Cleanup the tmpdir as we're done + cluster.TearDown() + }() // startCluster will apply vschema migrations using vtctl grpc and the clientCert. assertColumnVindex(t, cluster, columnVindex{keyspace: "test_keyspace", table: "test_table", vindex: "my_vdx", vindexType: "hash", column: "id"}) @@ -257,9 +263,8 @@ func TestMtlsAuth(t *testing.T) { } func TestMtlsAuthUnauthorizedFails(t *testing.T) { - args := os.Args conf := config - defer resetFlags(args, conf) + defer resetConfig(conf) // Our test root. root := t.TempDir() @@ -309,15 +314,21 @@ var clusterKeyspaces = []string{ "app_customer", } -func startCluster(flags ...string) (vttest.LocalCluster, error) { - os.Args = []string{"vttestserver"} +func startCluster(flags ...string) (cluster vttest.LocalCluster, err error) { + args := []string{"vttestserver"} schemaDirArg := "--schema_dir=data/schema" tabletHostname := "--tablet_hostname=localhost" keyspaceArg := "--keyspaces=" + strings.Join(clusterKeyspaces, ",") numShardsArg := "--num_shards=2,2" vschemaDDLAuthorizedUsers := "--vschema_ddl_authorized_users=%" - os.Args = append(os.Args, []string{schemaDirArg, keyspaceArg, numShardsArg, tabletHostname, vschemaDDLAuthorizedUsers}...) - os.Args = append(os.Args, flags...) + alsoLogToStderr := "--alsologtostderr" // better debugging + args = append(args, []string{schemaDirArg, keyspaceArg, numShardsArg, tabletHostname, vschemaDDLAuthorizedUsers, alsoLogToStderr}...) + args = append(args, flags...) + + if err = New().ParseFlags(args); err != nil { + return + } + return runCluster() } @@ -370,8 +381,7 @@ func assertEqual(t *testing.T, actual string, expected string, message string) { } } -func resetFlags(args []string, conf vttest.Config) { - os.Args = args +func resetConfig(conf vttest.Config) { config = conf } diff --git a/go/cmd/vttestserver/docgen/main.go b/go/cmd/vttestserver/docgen/main.go new file mode 100644 index 00000000000..61f982e2e56 --- /dev/null +++ b/go/cmd/vttestserver/docgen/main.go @@ -0,0 +1,37 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/internal/docgen" + "vitess.io/vitess/go/cmd/vttestserver/cli" +) + +func main() { + var dir string + cmd := cobra.Command{ + Use: "docgen [-d ]", + RunE: func(cmd *cobra.Command, args []string) error { + return docgen.GenerateMarkdownTree(cli.New(), dir) + }, + } + + cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation") + _ = cmd.Execute() +} diff --git a/go/cmd/vttestserver/main.go b/go/cmd/vttestserver/main.go index d15d26c2894..95e63fa8019 100644 --- a/go/cmd/vttestserver/main.go +++ b/go/cmd/vttestserver/main.go @@ -14,293 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -// vttestserver allows users to spawn a self-contained Vitess server for local testing/CI. package main import ( - "encoding/json" - "fmt" - "os" - "os/signal" - "strconv" - "strings" - "sync" - "syscall" - "time" - - "github.com/spf13/pflag" - "google.golang.org/protobuf/encoding/prototext" - - "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/cmd/vttestserver/cli" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/vttest" - - vttestpb "vitess.io/vitess/go/vt/proto/vttest" ) -type topoFlags struct { - cells []string - keyspaces []string - shards []string - replicas int - rdonly int -} - -var ( - basePort int - config vttest.Config - doSeed bool - mycnf string - protoTopo string - seed vttest.SeedConfig - topo topoFlags -) - -func registerFlags(fs *pflag.FlagSet) { - fs.IntVar(&basePort, "port", 0, - "Port to use for vtcombo. If this is 0, a random port will be chosen.") - - fs.StringVar(&protoTopo, "proto_topo", "", - "Define the fake cluster topology as a compact text format encoded"+ - " vttest proto. See vttest.proto for more information.") - - fs.StringVar(&config.SchemaDir, "schema_dir", "", - "Directory for initial schema files. Within this dir,"+ - " there should be a subdir for each keyspace. Within"+ - " each keyspace dir, each file is executed as SQL"+ - " after the database is created on each shard."+ - " If the directory contains a vschema.json file, it"+ - " will be used as the vschema for the V3 API.") - - fs.StringVar(&config.DefaultSchemaDir, "default_schema_dir", "", - "Default directory for initial schema files. If no schema is found"+ - " in schema_dir, default to this location.") - - fs.StringVar(&config.DataDir, "data_dir", "", - "Directory where the data files will be placed, defaults to a random "+ - "directory under /vt/vtdataroot") - - fs.BoolVar(&config.OnlyMySQL, "mysql_only", false, - "If this flag is set only mysql is initialized."+ - " The rest of the vitess components are not started."+ - " Also, the output specifies the mysql unix socket"+ - " instead of the vtgate port.") - - fs.BoolVar(&config.PersistentMode, "persistent_mode", false, - "If this flag is set, the MySQL data directory is not cleaned up"+ - " when LocalCluster.TearDown() is called. This is useful for running"+ - " vttestserver as a database container in local developer environments. Note"+ - " that db migration files (--schema_dir option) and seeding of"+ - " random data (--initialize_with_random_data option) will only run during"+ - " cluster startup if the data directory does not already exist. vschema"+ - " migrations are run every time the cluster starts, since persistence"+ - " for the topology server has not been implemented yet") - - fs.BoolVar(&doSeed, "initialize_with_random_data", false, - "If this flag is each table-shard will be initialized"+ - " with random data. See also the 'rng_seed' and 'min_shard_size'"+ - " and 'max_shard_size' flags.") - - fs.IntVar(&seed.RngSeed, "rng_seed", 123, - "The random number generator seed to use when initializing"+ - " with random data (see also --initialize_with_random_data)."+ - " Multiple runs with the same seed will result with the same"+ - " initial data.") - - fs.IntVar(&seed.MinSize, "min_table_shard_size", 1000, - "The minimum number of initial rows in a table shard. Ignored if"+ - "--initialize_with_random_data is false. The actual number is chosen"+ - " randomly.") - - fs.IntVar(&seed.MaxSize, "max_table_shard_size", 10000, - "The maximum number of initial rows in a table shard. Ignored if"+ - "--initialize_with_random_data is false. The actual number is chosen"+ - " randomly") - - fs.Float64Var(&seed.NullProbability, "null_probability", 0.1, - "The probability to initialize a field with 'NULL' "+ - " if --initialize_with_random_data is true. Only applies to fields"+ - " that can contain NULL values.") - - fs.StringVar(&config.MySQLBindHost, "mysql_bind_host", "localhost", - "which host to bind vtgate mysql listener to") - - fs.StringVar(&mycnf, "extra_my_cnf", "", - "extra files to add to the config, separated by ':'") - - fs.StringSliceVar(&topo.cells, "cells", []string{"test"}, "Comma separated list of cells") - fs.StringSliceVar(&topo.keyspaces, "keyspaces", []string{"test_keyspace"}, - "Comma separated list of keyspaces") - fs.StringSliceVar(&topo.shards, "num_shards", []string{"2"}, - "Comma separated shard count (one per keyspace)") - fs.IntVar(&topo.replicas, "replica_count", 2, - "Replica tablets per shard (includes primary)") - fs.IntVar(&topo.rdonly, "rdonly_count", 1, - "Rdonly tablets per shard") - - fs.StringVar(&config.Charset, "charset", "utf8mb4", "MySQL charset") - - fs.StringVar(&config.PlannerVersion, "planner-version", "", "Sets the default planner to use when the session has not changed it. Valid values are: V3, V3Insert, Gen4, Gen4Greedy and Gen4Fallback. Gen4Fallback tries the new gen4 planner and falls back to the V3 planner if the gen4 fails.") - - fs.StringVar(&config.SnapshotFile, "snapshot_file", "", - "A MySQL DB snapshot file") - - fs.BoolVar(&config.EnableSystemSettings, "enable_system_settings", true, "This will enable the system settings to be changed per session at the database connection level") - - fs.StringVar(&config.TransactionMode, "transaction_mode", "MULTI", "Transaction mode MULTI (default), SINGLE or TWOPC ") - fs.Float64Var(&config.TransactionTimeout, "queryserver-config-transaction-timeout", 0, "query server transaction timeout (in seconds), a transaction will be killed if it takes longer than this value") - - fs.StringVar(&config.TabletHostName, "tablet_hostname", "localhost", "The hostname to use for the tablet otherwise it will be derived from OS' hostname") - - fs.StringVar(&config.VSchemaDDLAuthorizedUsers, "vschema_ddl_authorized_users", "", "Comma separated list of users authorized to execute vschema ddl operations via vtgate") - - fs.StringVar(&config.ForeignKeyMode, "foreign_key_mode", "allow", "This is to provide how to handle foreign key constraint in create/alter table. Valid values are: allow, disallow") - fs.BoolVar(&config.EnableOnlineDDL, "enable_online_ddl", true, "Allow users to submit, review and control Online DDL") - fs.BoolVar(&config.EnableDirectDDL, "enable_direct_ddl", true, "Allow users to submit direct DDL statements") - - // flags for using an actual topo implementation for vtcombo instead of in-memory topo. useful for test setup where an external topo server is shared across multiple vtcombo processes or other components - fs.StringVar(&config.ExternalTopoImplementation, "external_topo_implementation", "", "the topology implementation to use for vtcombo process") - fs.StringVar(&config.ExternalTopoGlobalServerAddress, "external_topo_global_server_address", "", "the address of the global topology server for vtcombo process") - fs.StringVar(&config.ExternalTopoGlobalRoot, "external_topo_global_root", "", "the path of the global topology data in the global topology server for vtcombo process") - - fs.DurationVar(&config.VtgateTabletRefreshInterval, "tablet_refresh_interval", 10*time.Second, "Interval at which vtgate refreshes tablet information from topology server.") - acl.RegisterFlags(fs) -} - -func init() { - servenv.OnParseFor("vttestserver", registerFlags) -} - -func (t *topoFlags) buildTopology() (*vttestpb.VTTestTopology, error) { - topo := &vttestpb.VTTestTopology{} - topo.Cells = t.cells - - keyspaces := t.keyspaces - shardCounts := t.shards - if len(keyspaces) != len(shardCounts) { - return nil, fmt.Errorf("--keyspaces must be same length as --shards") - } - - for i := range keyspaces { - name := keyspaces[i] - numshards, err := strconv.ParseInt(shardCounts[i], 10, 32) - if err != nil { - return nil, err - } - - ks := &vttestpb.Keyspace{ - Name: name, - ReplicaCount: int32(t.replicas), - RdonlyCount: int32(t.rdonly), - } - - for _, shardname := range vttest.GetShardNames(int(numshards)) { - ks.Shards = append(ks.Shards, &vttestpb.Shard{ - Name: shardname, - }) - } - - topo.Keyspaces = append(topo.Keyspaces, ks) - } - - return topo, nil -} - -// Annoying, but in unit tests, parseFlags gets called multiple times per process -// (anytime startCluster is called), so we need to guard against the second test -// to run failing with, for example: -// -// flag redefined: log_rotate_max_size -var flagsOnce sync.Once - -func parseFlags() (env vttest.Environment, err error) { - flagsOnce.Do(func() { - servenv.RegisterFlags() - servenv.RegisterGRPCServerFlags() - servenv.RegisterGRPCServerAuthFlags() - servenv.RegisterServiceMapFlag() - }) - - servenv.ParseFlags("vttestserver") - - if basePort != 0 { - if config.DataDir == "" { - env, err = vttest.NewLocalTestEnv("", basePort) - if err != nil { - return - } - } else { - env, err = vttest.NewLocalTestEnvWithDirectory("", basePort, config.DataDir) - if err != nil { - return - } - } - } - - if protoTopo == "" { - config.Topology, err = topo.buildTopology() - if err != nil { - return - } - } else { - var topology vttestpb.VTTestTopology - err = prototext.Unmarshal([]byte(protoTopo), &topology) - if err != nil { - return - } - if len(topology.Cells) == 0 { - topology.Cells = append(topology.Cells, "test") - } - config.Topology = &topology - } - - if doSeed { - config.Seed = &seed - } - - if mycnf != "" { - config.ExtraMyCnf = strings.Split(mycnf, ":") - } - - return -} - func main() { - cluster, err := runCluster() - servenv.Init() - if err != nil { + if err := cli.New().Execute(); err != nil { log.Fatal(err) } - defer cluster.TearDown() - - kvconf := cluster.JSONConfig() - if err := json.NewEncoder(os.Stdout).Encode(kvconf); err != nil { - log.Fatal(err) - } - - c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt, syscall.SIGTERM) - <-c -} - -func runCluster() (vttest.LocalCluster, error) { - env, err := parseFlags() - if err != nil { - log.Fatal(err) - } - log.Infof("Starting local cluster...") - log.Infof("config: %#v", config) - cluster := vttest.LocalCluster{ - Config: config, - Env: env, - } - err = cluster.Setup() - if err != nil { - return cluster, err - } - - log.Info("Local cluster started.") - - return cluster, nil } diff --git a/go/cmd/vttlstest/cli/vttlstest.go b/go/cmd/vttlstest/cli/vttlstest.go new file mode 100644 index 00000000000..4e0f9c2b95e --- /dev/null +++ b/go/cmd/vttlstest/cli/vttlstest.go @@ -0,0 +1,135 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreedto in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/vt/tlstest" +) + +var ( + root = "." + parent = "ca" + serial = "01" + commonName string + + Root = &cobra.Command{ + Use: "vttlstest", + Short: "vttlstest is a tool for generating test certificates, keys, and related artifacts for TLS tests.", + Long: "vttlstest is a tool for generating test certificates, keys, and related artifacts for TLS tests.", + } + + createCACmd = &cobra.Command{ + Use: "CreateCA [--root ]", + DisableFlagsInUseLine: true, + Example: "CreateCA --root /tmp", + Short: "Create certificate authority", + Long: "Create certificate authority", + Args: cobra.NoArgs, + Run: runCreateCA, + } + + createIntermediateCACmd = &cobra.Command{ + Use: "CreateIntermediateCA [--root ] [--parent ] [--serial ] [--common-name ] ", + DisableFlagsInUseLine: true, + Example: "CreateIntermediateCA --root /tmp --parent ca mail.mycoolsite.com", + Short: "Create intermediate certificate authority", + Long: "Create intermediate certificate authority", + Args: cobra.ExactArgs(1), + Run: runCreateIntermediateCA, + } + + createCRLCmd = &cobra.Command{ + Use: "CreateCRL [--root ] ", + DisableFlagsInUseLine: true, + Example: "CreateCRL --root /tmp mail.mycoolsite.com", + Short: "Create certificate revocation list", + Long: "Create certificate revocation list", + Args: cobra.ExactArgs(1), + Run: runCreateCRL, + } + + createSignedCertCmd = &cobra.Command{ + Use: "CreateSignedCert [--root ] [--parent ] [--serial ] [--common-name ] ", + DisableFlagsInUseLine: true, + Example: "CreateSignedCert --root /tmp --common-name mail.mysite.com --parent mail.mycoolsite.com postman1", + Short: "Create signed certificate", + Long: "Create signed certificate", + Args: cobra.ExactArgs(1), + Run: runCreateSignedCert, + } + + revokeCertCmd = &cobra.Command{ + Use: "RevokeCert [--root ] [--parent ] ", + DisableFlagsInUseLine: true, + Example: "RevokeCert --root /tmp --parent mail.mycoolsite.com postman1", + Short: "Revoke a certificate", + Long: "Revoke a certificate", + Args: cobra.ExactArgs(1), + Run: runRevokeCert, + } +) + +func init() { + Root.PersistentFlags().StringVar(&root, "root", root, "root directory for all artifacts") + + Root.AddCommand(createCACmd) + Root.AddCommand(createIntermediateCACmd) + Root.AddCommand(createCRLCmd) + Root.AddCommand(createSignedCertCmd) + Root.AddCommand(revokeCertCmd) + + for _, cmd := range []*cobra.Command{createIntermediateCACmd, createSignedCertCmd} { + cmd.Flags().StringVar(&parent, "parent", parent, "Parent cert name to use. Use 'ca' for the toplevel CA.") + cmd.Flags().StringVar(&serial, "serial", serial, "Serial number for the certificate to create. Should be different for two certificates with the same parent.") + cmd.Flags().StringVar(&commonName, "common-name", commonName, "Common name for the certificate. If empty, uses the name.") + } + revokeCertCmd.Flags().StringVar(&parent, "parent", parent, "Parent cert name to use. Use 'ca' for the toplevel CA.") +} + +func runCreateCA(cmd *cobra.Command, args []string) { + tlstest.CreateCA(root) +} + +func runCreateIntermediateCA(cmd *cobra.Command, args []string) { + name := args[0] + if commonName == "" { + commonName = name + } + + tlstest.CreateIntermediateCA(root, parent, serial, name, commonName) +} + +func runCreateCRL(cmd *cobra.Command, args []string) { + ca := args[0] + tlstest.CreateCRL(root, ca) +} + +func runCreateSignedCert(cmd *cobra.Command, args []string) { + name := args[0] + if commonName == "" { + commonName = name + } + + tlstest.CreateSignedCert(root, parent, serial, name, commonName) +} + +func runRevokeCert(cmd *cobra.Command, args []string) { + name := args[0] + tlstest.RevokeCertAndRegenerateCRL(root, parent, name) +} diff --git a/go/cmd/vttlstest/docgen/main.go b/go/cmd/vttlstest/docgen/main.go new file mode 100644 index 00000000000..2354dceb493 --- /dev/null +++ b/go/cmd/vttlstest/docgen/main.go @@ -0,0 +1,37 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/internal/docgen" + "vitess.io/vitess/go/cmd/vttlstest/cli" +) + +func main() { + var dir string + cmd := cobra.Command{ + Use: "docgen [-d ]", + RunE: func(cmd *cobra.Command, args []string) error { + return docgen.GenerateMarkdownTree(cli.Root, dir) + }, + } + + cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation") + _ = cmd.Execute() +} diff --git a/go/cmd/vttlstest/vttlstest.go b/go/cmd/vttlstest/vttlstest.go index 78bffb813a3..08e994c096d 100644 --- a/go/cmd/vttlstest/vttlstest.go +++ b/go/cmd/vttlstest/vttlstest.go @@ -19,126 +19,14 @@ package main import ( "github.com/spf13/cobra" + "vitess.io/vitess/go/cmd/vttlstest/cli" "vitess.io/vitess/go/exit" "vitess.io/vitess/go/vt/logutil" - "vitess.io/vitess/go/vt/tlstest" ) -var ( - root = "." - parent = "ca" - serial = "01" - commonName string - - rootCmd = &cobra.Command{ - Use: "vttlstest", - Short: "vttlstest is a tool for generating test certificates, keys, and related artifacts for TLS tests.", - Long: "vttlstest is a tool for generating test certificates, keys, and related artifacts for TLS tests.", - } - - createCACmd = &cobra.Command{ - Use: "CreateCA [--root ]", - DisableFlagsInUseLine: true, - Example: "CreateCA --root /tmp", - Short: "Create certificate authority", - Long: "Create certificate authority", - Args: cobra.NoArgs, - Run: runCreateCA, - } - - createIntermediateCACmd = &cobra.Command{ - Use: "CreateIntermediateCA [--root ] [--parent ] [--serial ] [--common-name ] ", - DisableFlagsInUseLine: true, - Example: "CreateIntermediateCA --root /tmp --parent ca mail.mycoolsite.com", - Short: "Create intermediate certificate authority", - Long: "Create intermediate certificate authority", - Args: cobra.ExactArgs(1), - Run: runCreateIntermediateCA, - } - - createCRLCmd = &cobra.Command{ - Use: "CreateCRL [--root ] ", - DisableFlagsInUseLine: true, - Example: "CreateCRL --root /tmp mail.mycoolsite.com", - Short: "Create certificate revocation list", - Long: "Create certificate revocation list", - Args: cobra.ExactArgs(1), - Run: runCreateCRL, - } - - createSignedCertCmd = &cobra.Command{ - Use: "CreateSignedCert [--root ] [--parent ] [--serial ] [--common-name ] ", - DisableFlagsInUseLine: true, - Example: "CreateSignedCert --root /tmp --common-name mail.mysite.com --parent mail.mycoolsite.com postman1", - Short: "Create signed certificate", - Long: "Create signed certificate", - Args: cobra.ExactArgs(1), - Run: runCreateSignedCert, - } - - revokeCertCmd = &cobra.Command{ - Use: "RevokeCert [--root ] [--parent ] ", - DisableFlagsInUseLine: true, - Example: "RevokeCert --root /tmp --parent mail.mycoolsite.com postman1", - Short: "Revoke a certificate", - Long: "Revoke a certificate", - Args: cobra.ExactArgs(1), - Run: runRevokeCert, - } -) - -func init() { - rootCmd.PersistentFlags().StringVar(&root, "root", root, "root directory for all artifacts") - - rootCmd.AddCommand(createCACmd) - rootCmd.AddCommand(createIntermediateCACmd) - rootCmd.AddCommand(createCRLCmd) - rootCmd.AddCommand(createSignedCertCmd) - rootCmd.AddCommand(revokeCertCmd) - - for _, cmd := range []*cobra.Command{createIntermediateCACmd, createSignedCertCmd} { - cmd.Flags().StringVar(&parent, "parent", parent, "Parent cert name to use. Use 'ca' for the toplevel CA.") - cmd.Flags().StringVar(&serial, "serial", serial, "Serial number for the certificate to create. Should be different for two certificates with the same parent.") - cmd.Flags().StringVar(&commonName, "common-name", commonName, "Common name for the certificate. If empty, uses the name.") - } - revokeCertCmd.Flags().StringVar(&parent, "parent", parent, "Parent cert name to use. Use 'ca' for the toplevel CA.") -} - -func runCreateCA(cmd *cobra.Command, args []string) { - tlstest.CreateCA(root) -} - -func runCreateIntermediateCA(cmd *cobra.Command, args []string) { - name := args[0] - if commonName == "" { - commonName = name - } - - tlstest.CreateIntermediateCA(root, parent, serial, name, commonName) -} - -func runCreateCRL(cmd *cobra.Command, args []string) { - ca := args[0] - tlstest.CreateCRL(root, ca) -} - -func runCreateSignedCert(cmd *cobra.Command, args []string) { - name := args[0] - if commonName == "" { - commonName = name - } - - tlstest.CreateSignedCert(root, parent, serial, name, commonName) -} - -func runRevokeCert(cmd *cobra.Command, args []string) { - name := args[0] - tlstest.RevokeCertAndRegenerateCRL(root, parent, name) -} - func main() { defer exit.Recover() defer logutil.Flush() - cobra.CheckErr(rootCmd.Execute()) + cobra.CheckErr(cli.Root.Execute()) } diff --git a/go/cmd/vtorc/status.go b/go/cmd/zk/command/add_auth.go similarity index 56% rename from go/cmd/vtorc/status.go rename to go/cmd/zk/command/add_auth.go index a4d8a59d3fc..566c463f4a8 100644 --- a/go/cmd/vtorc/status.go +++ b/go/cmd/zk/command/add_auth.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Vitess Authors. +Copyright 2023 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,17 +14,23 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package command import ( - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/vtorc/logic" + "github.com/spf13/cobra" ) -// addStatusParts adds UI parts to the /debug/status page of VTOrc -func addStatusParts() { - servenv.AddStatusPart("Recent Recoveries", logic.TopologyRecoveriesTemplate, func() any { - recoveries, _ := logic.ReadRecentRecoveries(false, 0) - return recoveries - }) +var AddAuth = &cobra.Command{ + Use: "addAuth ", + Args: cobra.ExactArgs(2), + RunE: commandAddAuth, +} + +func commandAddAuth(cmd *cobra.Command, args []string) error { + scheme, auth := cmd.Flags().Arg(0), cmd.Flags().Arg(1) + return fs.Conn.AddAuth(cmd.Context(), scheme, []byte(auth)) +} + +func init() { + Root.AddCommand(AddAuth) } diff --git a/go/cmd/zk/command/cat.go b/go/cmd/zk/command/cat.go new file mode 100644 index 00000000000..1d5460f7006 --- /dev/null +++ b/go/cmd/zk/command/cat.go @@ -0,0 +1,103 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreedto in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" + "github.com/z-division/go-zookeeper/zk" + "golang.org/x/term" + + "vitess.io/vitess/go/cmd/zk/internal/zkfilepath" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/zk2topo" +) + +var ( + catArgs = struct { + LongListing bool + Force bool + DecodeProto bool + }{} + + Cat = &cobra.Command{ + Use: "cat [ ...]", + Example: `zk cat /zk/path + +# List filename before file data +zk cat -l /zk/path1 /zk/path2`, + Args: cobra.MinimumNArgs(1), + RunE: commandCat, + } +) + +func commandCat(cmd *cobra.Command, args []string) error { + resolved, err := zk2topo.ResolveWildcards(cmd.Context(), fs.Conn, cmd.Flags().Args()) + if err != nil { + return fmt.Errorf("cat: invalid wildcards: %w", err) + } + if len(resolved) == 0 { + // the wildcards didn't result in anything, we're done + return nil + } + + hasError := false + for _, arg := range resolved { + zkPath := zkfilepath.Clean(arg) + data, _, err := fs.Conn.Get(cmd.Context(), zkPath) + if err != nil { + hasError = true + if !catArgs.Force || err != zk.ErrNoNode { + log.Warningf("cat: cannot access %v: %v", zkPath, err) + } + continue + } + + if catArgs.LongListing { + fmt.Printf("%v:\n", zkPath) + } + decoded := "" + if catArgs.DecodeProto { + decoded, err = topo.DecodeContent(zkPath, data, false) + if err != nil { + log.Warningf("cat: cannot proto decode %v: %v", zkPath, err) + decoded = string(data) + } + } else { + decoded = string(data) + } + fmt.Print(decoded) + if len(decoded) > 0 && decoded[len(decoded)-1] != '\n' && (term.IsTerminal(int(os.Stdout.Fd())) || catArgs.LongListing) { + fmt.Print("\n") + } + } + if hasError { + return fmt.Errorf("cat: some paths had errors") + } + return nil +} + +func init() { + Cat.Flags().BoolVarP(&catArgs.LongListing, "longListing", "l", false, "long listing") + Cat.Flags().BoolVarP(&catArgs.Force, "force", "f", false, "no warning on nonexistent node") + Cat.Flags().BoolVarP(&catArgs.DecodeProto, "decodeProto", "p", false, "decode proto files and display them as text") + + Root.AddCommand(Cat) +} diff --git a/go/cmd/zk/command/chmod.go b/go/cmd/zk/command/chmod.go new file mode 100644 index 00000000000..39125d618c4 --- /dev/null +++ b/go/cmd/zk/command/chmod.go @@ -0,0 +1,91 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreedto in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "fmt" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/zk/internal/zkfilepath" + "vitess.io/vitess/go/cmd/zk/internal/zkfs" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/topo/zk2topo" +) + +var Chmod = &cobra.Command{ + Use: "chmod ", + Example: `zk chmod n-mode /zk/path +zk chmod n+mode /zk/path`, + Args: cobra.MinimumNArgs(2), + RunE: commandChmod, +} + +func commandChmod(cmd *cobra.Command, args []string) error { + mode := cmd.Flags().Arg(0) + if mode[0] != 'n' { + return fmt.Errorf("chmod: invalid mode") + } + + addPerms := false + if mode[1] == '+' { + addPerms = true + } else if mode[1] != '-' { + return fmt.Errorf("chmod: invalid mode") + } + + permMask := zkfs.ParsePermMode(mode[2:]) + + resolved, err := zk2topo.ResolveWildcards(cmd.Context(), fs.Conn, cmd.Flags().Args()[1:]) + if err != nil { + return fmt.Errorf("chmod: invalid wildcards: %w", err) + } + if len(resolved) == 0 { + // the wildcards didn't result in anything, we're done + return nil + } + + hasError := false + for _, arg := range resolved { + zkPath := zkfilepath.Clean(arg) + aclv, _, err := fs.Conn.GetACL(cmd.Context(), zkPath) + if err != nil { + hasError = true + log.Warningf("chmod: cannot set access %v: %v", zkPath, err) + continue + } + if addPerms { + aclv[0].Perms |= permMask + } else { + aclv[0].Perms &= ^permMask + } + err = fs.Conn.SetACL(cmd.Context(), zkPath, aclv, -1) + if err != nil { + hasError = true + log.Warningf("chmod: cannot set access %v: %v", zkPath, err) + continue + } + } + if hasError { + return fmt.Errorf("chmod: some paths had errors") + } + return nil +} + +func init() { + Root.AddCommand(Chmod) +} diff --git a/go/cmd/zk/command/cp.go b/go/cmd/zk/command/cp.go new file mode 100644 index 00000000000..e89486413ea --- /dev/null +++ b/go/cmd/zk/command/cp.go @@ -0,0 +1,43 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreedto in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import "github.com/spf13/cobra" + +var Cp = &cobra.Command{ + Use: "cp ", + Example: `zk cp /zk/path . +zk cp ./config /zk/path/config + +# Trailing slash indicates directory +zk cp ./config /zk/path/`, + Args: cobra.MinimumNArgs(2), + RunE: commandCp, +} + +func commandCp(cmd *cobra.Command, args []string) error { + switch cmd.Flags().NArg() { + case 2: + return fs.CopyContext(cmd.Context(), cmd.Flags().Arg(0), cmd.Flags().Arg(1)) + default: + return fs.MultiCopyContext(cmd.Context(), cmd.Flags().Args()) + } +} + +func init() { + Root.AddCommand(Cp) +} diff --git a/go/cmd/zk/command/edit.go b/go/cmd/zk/command/edit.go new file mode 100644 index 00000000000..ec4b74c4b62 --- /dev/null +++ b/go/cmd/zk/command/edit.go @@ -0,0 +1,101 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreedto in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "path" + "time" + + "github.com/spf13/cobra" + "github.com/z-division/go-zookeeper/zk" + + "vitess.io/vitess/go/cmd/zk/internal/zkfilepath" + "vitess.io/vitess/go/vt/log" +) + +var ( + editArgs = struct { + Force bool + }{} + + Edit = &cobra.Command{ + Use: "edit ", + Short: "Create a local copy, edit, and write changes back to cell.", + Args: cobra.ExactArgs(1), + RunE: commandEdit, + } +) + +func commandEdit(cmd *cobra.Command, args []string) error { + arg := cmd.Flags().Arg(0) + zkPath := zkfilepath.Clean(arg) + data, stat, err := fs.Conn.Get(cmd.Context(), zkPath) + if err != nil { + if !editArgs.Force || err != zk.ErrNoNode { + log.Warningf("edit: cannot access %v: %v", zkPath, err) + } + return fmt.Errorf("edit: cannot access %v: %v", zkPath, err) + } + + name := path.Base(zkPath) + tmpPath := fmt.Sprintf("/tmp/zk-edit-%v-%v", name, time.Now().UnixNano()) + f, err := os.Create(tmpPath) + if err == nil { + _, err = f.Write(data) + f.Close() + } + if err != nil { + return fmt.Errorf("edit: cannot write file %v", err) + } + + editor := exec.Command(os.Getenv("EDITOR"), tmpPath) + editor.Stdin = os.Stdin + editor.Stdout = os.Stdout + editor.Stderr = os.Stderr + err = editor.Run() + if err != nil { + os.Remove(tmpPath) + return fmt.Errorf("edit: cannot start $EDITOR: %v", err) + } + + fileData, err := os.ReadFile(tmpPath) + if err != nil { + os.Remove(tmpPath) + return fmt.Errorf("edit: cannot read file %v", err) + } + + if !bytes.Equal(fileData, data) { + // data changed - update if we can + _, err = fs.Conn.Set(cmd.Context(), zkPath, fileData, stat.Version) + if err != nil { + os.Remove(tmpPath) + return fmt.Errorf("edit: cannot write zk file %v", err) + } + } + os.Remove(tmpPath) + return nil +} + +func init() { + Edit.Flags().BoolVarP(&editArgs.Force, "force", "f", false, "no warning on nonexistent node") + + Root.AddCommand(Edit) +} diff --git a/go/cmd/zk/command/ls.go b/go/cmd/zk/command/ls.go new file mode 100644 index 00000000000..83c1d31363b --- /dev/null +++ b/go/cmd/zk/command/ls.go @@ -0,0 +1,153 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreedto in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "fmt" + "path" + "sort" + "sync" + + "github.com/spf13/cobra" + "github.com/z-division/go-zookeeper/zk" + + "vitess.io/vitess/go/cmd/zk/internal/zkfilepath" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/topo/zk2topo" +) + +var ( + lsArgs = struct { + LongListing bool + DirectoryListing bool + Force bool + RecursiveListing bool + }{} + + Ls = &cobra.Command{ + Use: "ls ", + Example: `zk ls /zk +zk ls -l /zk + +# List directory node itself) +zk ls -ld /zk + +# Recursive (expensive) +zk ls -R /zk`, + Args: cobra.MinimumNArgs(1), + RunE: commandLs, + } +) + +func commandLs(cmd *cobra.Command, args []string) error { + resolved, err := zk2topo.ResolveWildcards(cmd.Context(), fs.Conn, cmd.Flags().Args()) + if err != nil { + return fmt.Errorf("ls: invalid wildcards: %v", err) + } + if len(resolved) == 0 { + // the wildcards didn't result in anything, we're + // done. + return nil + } + + hasError := false + needsHeader := len(resolved) > 1 && !lsArgs.DirectoryListing + for _, arg := range resolved { + zkPath := zkfilepath.Clean(arg) + var children []string + var err error + isDir := true + if lsArgs.DirectoryListing { + children = []string{""} + isDir = false + } else if lsArgs.RecursiveListing { + children, err = zk2topo.ChildrenRecursive(cmd.Context(), fs.Conn, zkPath) + } else { + children, _, err = fs.Conn.Children(cmd.Context(), zkPath) + // Assume this is a file node if it has no children. + if len(children) == 0 { + children = []string{""} + isDir = false + } + } + if err != nil { + hasError = true + if !lsArgs.Force || err != zk.ErrNoNode { + log.Warningf("ls: cannot access %v: %v", zkPath, err) + } + } + + // Show the full path when it helps. + showFullPath := false + if lsArgs.RecursiveListing { + showFullPath = true + } else if lsArgs.LongListing && (lsArgs.DirectoryListing || !isDir) { + showFullPath = true + } + if needsHeader { + fmt.Printf("%v:\n", zkPath) + } + if len(children) > 0 { + if lsArgs.LongListing && isDir { + fmt.Printf("total: %v\n", len(children)) + } + sort.Strings(children) + stats := make([]*zk.Stat, len(children)) + wg := sync.WaitGroup{} + f := func(i int) { + localPath := path.Join(zkPath, children[i]) + _, stat, err := fs.Conn.Exists(cmd.Context(), localPath) + if err != nil { + if !lsArgs.Force || err != zk.ErrNoNode { + log.Warningf("ls: cannot access: %v: %v", localPath, err) + } + } else { + stats[i] = stat + } + wg.Done() + } + for i := range children { + wg.Add(1) + go f(i) + } + wg.Wait() + + for i, child := range children { + localPath := path.Join(zkPath, child) + if stat := stats[i]; stat != nil { + fmt.Println(zkfilepath.Format(stat, localPath, showFullPath, lsArgs.LongListing)) + } + } + } + if needsHeader { + fmt.Println() + } + } + if hasError { + return fmt.Errorf("ls: some paths had errors") + } + return nil +} + +func init() { + Ls.Flags().BoolVarP(&lsArgs.LongListing, "longlisting", "l", false, "long listing") + Ls.Flags().BoolVarP(&lsArgs.DirectoryListing, "directorylisting", "d", false, "list directory instead of contents") + Ls.Flags().BoolVarP(&lsArgs.Force, "force", "f", false, "no warning on nonexistent node") + Ls.Flags().BoolVarP(&lsArgs.RecursiveListing, "recursivelisting", "R", false, "recursive listing") + + Root.AddCommand(Ls) +} diff --git a/go/cmd/zk/command/rm.go b/go/cmd/zk/command/rm.go new file mode 100644 index 00000000000..5e5b5f4c494 --- /dev/null +++ b/go/cmd/zk/command/rm.go @@ -0,0 +1,97 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreedto in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" + "github.com/z-division/go-zookeeper/zk" + + "vitess.io/vitess/go/cmd/zk/internal/zkfilepath" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/topo/zk2topo" +) + +var ( + rmArgs = struct { + Force bool + RecursiveDelete bool + }{} + + Rm = &cobra.Command{ + Use: "rm ", + Example: `zk rm /zk/path + +# Recursive. +zk rm -R /zk/path + +# No error on nonexistent node. +zk rm -f /zk/path`, + Args: cobra.MinimumNArgs(1), + RunE: commandRm, + } +) + +func commandRm(cmd *cobra.Command, args []string) error { + if rmArgs.RecursiveDelete { + for _, arg := range cmd.Flags().Args() { + zkPath := zkfilepath.Clean(arg) + if strings.Count(zkPath, "/") < 2 { + return fmt.Errorf("rm: overly general path: %v", zkPath) + } + } + } + + resolved, err := zk2topo.ResolveWildcards(cmd.Context(), fs.Conn, cmd.Flags().Args()) + if err != nil { + return fmt.Errorf("rm: invalid wildcards: %v", err) + } + if len(resolved) == 0 { + // the wildcards didn't result in anything, we're done + return nil + } + + hasError := false + for _, arg := range resolved { + zkPath := zkfilepath.Clean(arg) + var err error + if rmArgs.RecursiveDelete { + err = zk2topo.DeleteRecursive(cmd.Context(), fs.Conn, zkPath, -1) + } else { + err = fs.Conn.Delete(cmd.Context(), zkPath, -1) + } + if err != nil && (!rmArgs.Force || err != zk.ErrNoNode) { + hasError = true + log.Warningf("rm: cannot delete %v: %v", zkPath, err) + } + } + if hasError { + // to be consistent with the command line 'rm -f', return + // 0 if using 'zk rm -f' and the file doesn't exist. + return fmt.Errorf("rm: some paths had errors") + } + return nil +} + +func init() { + Rm.Flags().BoolVarP(&rmArgs.Force, "force", "f", false, "no warning on nonexistent node") + Rm.Flags().BoolVarP(&rmArgs.RecursiveDelete, "recursivedelete", "r", false, "recursive delete") + + Root.AddCommand(Rm) +} diff --git a/go/cmd/zk/command/root.go b/go/cmd/zk/command/root.go new file mode 100644 index 00000000000..f3f02e7d4f2 --- /dev/null +++ b/go/cmd/zk/command/root.go @@ -0,0 +1,66 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreedto in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/cmd/zk/internal/zkfs" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/topo/zk2topo" +) + +var ( + fs *zkfs.FS + server string + + Root = &cobra.Command{ + Use: "zk", + Short: "zk is a tool for wrangling zookeeper.", + Long: `zk is a tool for wrangling zookeeper. + +It tries to mimic unix file system commands wherever possible, but +there are some slight differences in flag handling. + +The zk tool looks for the address of the cluster in /etc/zookeeper/zk_client.conf, +or the file specified in the ZK_CLIENT_CONFIG environment variable. + +The local cell may be overridden with the ZK_CLIENT_LOCAL_CELL environment +variable.`, + PersistentPreRun: func(cmd *cobra.Command, args []string) { + logutil.PurgeLogs() + + // Connect to the server. + fs = &zkfs.FS{ + Conn: zk2topo.Connect(server), + } + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + logutil.Flush() + }, + } +) + +func init() { + Root.Flags().StringVar(&server, "server", server, "server(s) to connect to") + + log.RegisterFlags(Root.Flags()) + logutil.RegisterFlags(Root.Flags()) + acl.RegisterFlags(Root.Flags()) +} diff --git a/go/cmd/zk/command/stat.go b/go/cmd/zk/command/stat.go new file mode 100644 index 00000000000..713a68a3d4e --- /dev/null +++ b/go/cmd/zk/command/stat.go @@ -0,0 +1,88 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreedto in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "fmt" + + "github.com/spf13/cobra" + "github.com/z-division/go-zookeeper/zk" + + "vitess.io/vitess/go/cmd/zk/internal/zkfilepath" + "vitess.io/vitess/go/cmd/zk/internal/zkfs" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/topo/zk2topo" +) + +var ( + statArgs = struct { + Force bool + }{} + Stat = &cobra.Command{ + Use: "stat ", + Args: cobra.MinimumNArgs(1), + RunE: commandStat, + } +) + +func commandStat(cmd *cobra.Command, args []string) error { + resolved, err := zk2topo.ResolveWildcards(cmd.Context(), fs.Conn, cmd.Flags().Args()) + if err != nil { + return fmt.Errorf("stat: invalid wildcards: %v", err) + } + if len(resolved) == 0 { + // the wildcards didn't result in anything, we're done + return nil + } + + hasError := false + for _, arg := range resolved { + zkPath := zkfilepath.Clean(arg) + acls, stat, err := fs.Conn.GetACL(cmd.Context(), zkPath) + if stat == nil { + err = fmt.Errorf("no such node") + } + if err != nil { + hasError = true + if !statArgs.Force || err != zk.ErrNoNode { + log.Warningf("stat: cannot access %v: %v", zkPath, err) + } + continue + } + fmt.Printf("Path: %s\n", zkPath) + fmt.Printf("Created: %s\n", zk2topo.Time(stat.Ctime).Format(zkfilepath.TimeFmtMicro)) + fmt.Printf("Modified: %s\n", zk2topo.Time(stat.Mtime).Format(zkfilepath.TimeFmtMicro)) + fmt.Printf("Size: %v\n", stat.DataLength) + fmt.Printf("Children: %v\n", stat.NumChildren) + fmt.Printf("Version: %v\n", stat.Version) + fmt.Printf("Ephemeral: %v\n", stat.EphemeralOwner) + fmt.Printf("ACL:\n") + for _, acl := range acls { + fmt.Printf(" %v:%v %v\n", acl.Scheme, acl.ID, zkfs.FormatACL(acl)) + } + } + if hasError { + return fmt.Errorf("stat: some paths had errors") + } + return nil +} + +func init() { + Stat.Flags().BoolVarP(&statArgs.Force, "force", "f", false, "no warning on nonexistent node") + + Root.AddCommand(Stat) +} diff --git a/go/cmd/zk/command/touch.go b/go/cmd/zk/command/touch.go new file mode 100644 index 00000000000..76c390cf169 --- /dev/null +++ b/go/cmd/zk/command/touch.go @@ -0,0 +1,93 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreedto in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "fmt" + + "github.com/spf13/cobra" + "github.com/z-division/go-zookeeper/zk" + + "vitess.io/vitess/go/cmd/zk/internal/zkfilepath" + "vitess.io/vitess/go/vt/topo/zk2topo" +) + +var ( + touchArgs = struct { + CreateParents bool + TouchOnly bool + }{} + + Touch = &cobra.Command{ + Use: "touch ", + Short: "Change node access time.", + Long: `Change node access time. + +NOTE: There is no mkdir - just touch a node. +The disntinction between file and directory is not relevant in zookeeper.`, + Example: `zk touch /zk/path + +# Don't create, just touch timestamp. +zk touch -c /zk/path + +# Create all parts necessary (think mkdir -p). +zk touch -p /zk/path`, + Args: cobra.ExactArgs(1), + RunE: commandTouch, + } +) + +func commandTouch(cmd *cobra.Command, args []string) error { + zkPath := zkfilepath.Clean(cmd.Flags().Arg(0)) + var ( + version int32 = -1 + create = false + ) + + data, stat, err := fs.Conn.Get(cmd.Context(), zkPath) + switch { + case err == nil: + version = stat.Version + case err == zk.ErrNoNode: + create = true + default: + return fmt.Errorf("touch: cannot access %v: %v", zkPath, err) + } + + switch { + case !create: + _, err = fs.Conn.Set(cmd.Context(), zkPath, data, version) + case touchArgs.TouchOnly: + return fmt.Errorf("touch: no such path %v", zkPath) + case touchArgs.CreateParents: + _, err = zk2topo.CreateRecursive(cmd.Context(), fs.Conn, zkPath, data, 0, zk.WorldACL(zk.PermAll), 10) + default: + _, err = fs.Conn.Create(cmd.Context(), zkPath, data, 0, zk.WorldACL(zk.PermAll)) + } + + if err != nil { + return fmt.Errorf("touch: cannot modify %v: %v", zkPath, err) + } + return nil +} + +func init() { + Touch.Flags().BoolVarP(&touchArgs.CreateParents, "createparent", "p", false, "create parents") + Touch.Flags().BoolVarP(&touchArgs.TouchOnly, "touchonly", "c", false, "touch only - don't create") + + Root.AddCommand(Touch) +} diff --git a/go/cmd/zk/command/unzip.go b/go/cmd/zk/command/unzip.go new file mode 100644 index 00000000000..f4c800e0533 --- /dev/null +++ b/go/cmd/zk/command/unzip.go @@ -0,0 +1,81 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreedto in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "archive/zip" + "fmt" + "io" + "path" + "strings" + + "github.com/spf13/cobra" + "github.com/z-division/go-zookeeper/zk" + + "vitess.io/vitess/go/vt/topo/zk2topo" +) + +var Unzip = &cobra.Command{ + Use: "unzip ", + Example: `zk unzip zktree.zip / +zk unzip zktree.zip /zk/prefix`, + Args: cobra.ExactArgs(1), + RunE: commandUnzip, +} + +func commandUnzip(cmd *cobra.Command, args []string) error { + srcPath, dstPath := cmd.Flags().Arg(0), cmd.Flags().Arg(1) + + if !strings.HasSuffix(srcPath, ".zip") { + return fmt.Errorf("zip: need to specify src .zip path: %v", srcPath) + } + + zipReader, err := zip.OpenReader(srcPath) + if err != nil { + return fmt.Errorf("zip: error %v", err) + } + defer zipReader.Close() + + for _, zf := range zipReader.File { + rc, err := zf.Open() + if err != nil { + return fmt.Errorf("unzip: error %v", err) + } + data, err := io.ReadAll(rc) + if err != nil { + return fmt.Errorf("unzip: failed reading archive: %v", err) + } + zkPath := zf.Name + if dstPath != "/" { + zkPath = path.Join(dstPath, zkPath) + } + _, err = zk2topo.CreateRecursive(cmd.Context(), fs.Conn, zkPath, data, 0, zk.WorldACL(zk.PermAll), 10) + if err != nil && err != zk.ErrNodeExists { + return fmt.Errorf("unzip: zk create failed: %v", err) + } + _, err = fs.Conn.Set(cmd.Context(), zkPath, data, -1) + if err != nil { + return fmt.Errorf("unzip: zk set failed: %v", err) + } + rc.Close() + } + return nil +} + +func init() { + Root.AddCommand(Unzip) +} diff --git a/go/cmd/zk/command/wait.go b/go/cmd/zk/command/wait.go new file mode 100644 index 00000000000..864f6e83626 --- /dev/null +++ b/go/cmd/zk/command/wait.go @@ -0,0 +1,78 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreedto in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "fmt" + + "github.com/spf13/cobra" + "github.com/z-division/go-zookeeper/zk" + + "vitess.io/vitess/go/cmd/zk/internal/zkfilepath" +) + +var ( + waitArgs = struct { + ExitIfExists bool + }{} + + Wait = &cobra.Command{ + Use: "wait ", + Short: "Sets a watch on the node and then waits for an event to fire.", + Example: ` # Wait for node change or creation. +zk wait /zk/path + +# Trailing slash waits on children. +zk wait /zk/path/children/`, + Args: cobra.ExactArgs(1), + RunE: commandWait, + } +) + +func commandWait(cmd *cobra.Command, args []string) error { + zkPath := cmd.Flags().Arg(0) + isDir := zkPath[len(zkPath)-1] == '/' + zkPath = zkfilepath.Clean(zkPath) + + var wait <-chan zk.Event + var err error + if isDir { + _, _, wait, err = fs.Conn.ChildrenW(cmd.Context(), zkPath) + } else { + _, _, wait, err = fs.Conn.GetW(cmd.Context(), zkPath) + } + if err != nil { + if err == zk.ErrNoNode { + _, _, wait, _ = fs.Conn.ExistsW(cmd.Context(), zkPath) + } else { + return fmt.Errorf("wait: error %v: %v", zkPath, err) + } + } else { + if waitArgs.ExitIfExists { + return fmt.Errorf("already exists: %v", zkPath) + } + } + event := <-wait + fmt.Printf("event: %v\n", event) + return nil +} + +func init() { + Wait.Flags().BoolVarP(&waitArgs.ExitIfExists, "exit", "e", false, "exit if the path already exists") + + Root.AddCommand(Wait) +} diff --git a/go/cmd/zk/command/watch.go b/go/cmd/zk/command/watch.go new file mode 100644 index 00000000000..eb28cc29ca2 --- /dev/null +++ b/go/cmd/zk/command/watch.go @@ -0,0 +1,86 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreedto in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "fmt" + + "github.com/spf13/cobra" + "github.com/z-division/go-zookeeper/zk" + + "vitess.io/vitess/go/cmd/zk/internal/zkfilepath" + "vitess.io/vitess/go/vt/log" +) + +var Watch = &cobra.Command{ + Use: "watch ", + Short: "Watches for changes to nodes and prints events as they occur.", + Example: `watch /zk/path`, + Args: cobra.MinimumNArgs(1), + RunE: commandWatch, +} + +func commandWatch(cmd *cobra.Command, args []string) error { + eventChan := make(chan zk.Event, 16) + for _, arg := range cmd.Flags().Args() { + zkPath := zkfilepath.Clean(arg) + _, _, watch, err := fs.Conn.GetW(cmd.Context(), zkPath) + if err != nil { + return fmt.Errorf("watch error: %v", err) + } + go func() { + eventChan <- <-watch + }() + } + + for { + select { + case <-cmd.Context().Done(): + return nil + case event := <-eventChan: + log.Infof("watch: event %v: %v", event.Path, event) + if event.Type == zk.EventNodeDataChanged { + data, stat, watch, err := fs.Conn.GetW(cmd.Context(), event.Path) + if err != nil { + return fmt.Errorf("ERROR: failed to watch %v", err) + } + log.Infof("watch: %v %v\n", event.Path, stat) + println(data) + go func() { + eventChan <- <-watch + }() + } else if event.State == zk.StateDisconnected { + return nil + } else if event.Type == zk.EventNodeDeleted { + log.Infof("watch: %v deleted\n", event.Path) + } else { + // Most likely a session event - try t + _, _, watch, err := fs.Conn.GetW(cmd.Context(), event.Path) + if err != nil { + return fmt.Errorf("ERROR: failed to watch %v", err) + } + go func() { + eventChan <- <-watch + }() + } + } + } +} + +func init() { + Root.AddCommand(Watch) +} diff --git a/go/cmd/zk/command/zip.go b/go/cmd/zk/command/zip.go new file mode 100644 index 00000000000..b765f5bb00e --- /dev/null +++ b/go/cmd/zk/command/zip.go @@ -0,0 +1,116 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreedto in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "archive/zip" + "fmt" + "os" + "path" + "strings" + "sync" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/zk/internal/zkfilepath" + "vitess.io/vitess/go/cmd/zk/internal/zkfs" + "vitess.io/vitess/go/vt/topo/zk2topo" +) + +var Zip = &cobra.Command{ + Use: "zip [ ...] ", + Short: "Store a zk tree in a zip archive.", + Long: `Store a zk tree in a zip archive. + +Note this won't be immediately useful to the local filesystem since znodes can have data and children; +that is, even "directories" can contain data.`, + Args: cobra.MinimumNArgs(2), + RunE: commandZip, +} + +func commandZip(cmd *cobra.Command, args []string) error { + posargs := cmd.Flags().Args() + dstPath := posargs[len(posargs)-1] + paths := posargs[:len(posargs)-1] + if !strings.HasSuffix(dstPath, ".zip") { + return fmt.Errorf("zip: need to specify destination .zip path: %v", dstPath) + } + zipFile, err := os.Create(dstPath) + if err != nil { + return fmt.Errorf("zip: error %v", err) + } + + wg := sync.WaitGroup{} + items := make(chan *zkfs.Item, 64) + for _, arg := range paths { + zkPath := zkfilepath.Clean(arg) + children, err := zk2topo.ChildrenRecursive(cmd.Context(), fs.Conn, zkPath) + if err != nil { + return fmt.Errorf("zip: error %v", err) + } + for _, child := range children { + toAdd := path.Join(zkPath, child) + wg.Add(1) + go func() { + data, stat, err := fs.Conn.Get(cmd.Context(), toAdd) + items <- &zkfs.Item{ + Path: toAdd, + Data: data, + Stat: stat, + Err: err, + } + wg.Done() + }() + } + } + go func() { + wg.Wait() + close(items) + }() + + zipWriter := zip.NewWriter(zipFile) + for item := range items { + path, data, stat, err := item.Path, item.Data, item.Stat, item.Err + if err != nil { + return fmt.Errorf("zip: get failed: %v", err) + } + // Skip ephemerals - not sure why you would archive them. + if stat.EphemeralOwner > 0 { + continue + } + fi := &zip.FileHeader{Name: path, Method: zip.Deflate} + fi.Modified = zk2topo.Time(stat.Mtime) + f, err := zipWriter.CreateHeader(fi) + if err != nil { + return fmt.Errorf("zip: create failed: %v", err) + } + _, err = f.Write(data) + if err != nil { + return fmt.Errorf("zip: create failed: %v", err) + } + } + err = zipWriter.Close() + if err != nil { + return fmt.Errorf("zip: close failed: %v", err) + } + zipFile.Close() + return nil +} + +func init() { + Root.AddCommand(Zip) +} diff --git a/go/cmd/zk/docgen/main.go b/go/cmd/zk/docgen/main.go new file mode 100644 index 00000000000..b8a7bde3d14 --- /dev/null +++ b/go/cmd/zk/docgen/main.go @@ -0,0 +1,37 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/internal/docgen" + "vitess.io/vitess/go/cmd/zk/command" +) + +func main() { + var dir string + cmd := cobra.Command{ + Use: "docgen [-d ]", + RunE: func(cmd *cobra.Command, args []string) error { + return docgen.GenerateMarkdownTree(command.Root, dir) + }, + } + + cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation") + _ = cmd.Execute() +} diff --git a/go/cmd/zk/internal/zkfilepath/zkfilepath.go b/go/cmd/zk/internal/zkfilepath/zkfilepath.go new file mode 100644 index 00000000000..7febc7a9677 --- /dev/null +++ b/go/cmd/zk/internal/zkfilepath/zkfilepath.go @@ -0,0 +1,75 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreedto in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package zkfilepath provides filepath utilities specialized to zookeeper. +package zkfilepath + +import ( + "fmt" + "path" + "strings" + + "github.com/z-division/go-zookeeper/zk" + + "vitess.io/vitess/go/vt/topo/zk2topo" +) + +const ( + TimeFmt = "2006-01-02 15:04:05" + TimeFmtMicro = "2006-01-02 15:04:05.000000" +) + +// Clean returns the shortest path name of a zookeeper path after trimming +// trailing slashes. +func Clean(zkPath string) string { + if zkPath != "/" { + zkPath = strings.TrimSuffix(zkPath, "/") + } + + return path.Clean(zkPath) +} + +// Format returns a path formatted to a canonical string. +func Format(stat *zk.Stat, zkPath string, showFullPath bool, longListing bool) string { + var name, perms string + + if !showFullPath { + name = path.Base(zkPath) + } else { + name = zkPath + } + + if longListing { + if stat.NumChildren > 0 { + // FIXME(msolomon) do permissions check? + perms = "drwxrwxrwx" + if stat.DataLength > 0 { + // give a visual indication that this node has data as well as children + perms = "nrw-rw-rw-" + } + } else if stat.EphemeralOwner != 0 { + perms = "erw-rw-rw-" + } else { + perms = "-rw-rw-rw-" + } + // always print the Local version of the time. zookeeper's + // go / C library would return a local time anyway, but + // might as well be sure. + return fmt.Sprintf("%v %v %v % 8v % 20v %v\n", perms, "zk", "zk", stat.DataLength, zk2topo.Time(stat.Mtime).Local().Format(TimeFmt), name) + } else { + return fmt.Sprintf("%v\n", name) + } +} diff --git a/go/cmd/zk/internal/zkfs/zkfs.go b/go/cmd/zk/internal/zkfs/zkfs.go new file mode 100644 index 00000000000..9bab19ec1e4 --- /dev/null +++ b/go/cmd/zk/internal/zkfs/zkfs.go @@ -0,0 +1,174 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreedto in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package zkfs provides utilities for working with zookeepr in a filesystem-like manner. +package zkfs + +import ( + "context" + "fmt" + "io" + "os" + "path" + "strings" + "syscall" + + "github.com/z-division/go-zookeeper/zk" + + "vitess.io/vitess/go/cmd/zk/internal/zkfilepath" + "vitess.io/vitess/go/vt/topo/zk2topo" +) + +// FS wraps a zk2topo connection to provide FS utility methods. +type FS struct { + Conn *zk2topo.ZkConn +} + +// CopyContext copies the contents of src to dst. +func (fs *FS) CopyContext(ctx context.Context, src, dst string) error { + dstIsDir := dst[len(dst)-1] == '/' + src = zkfilepath.Clean(src) + dst = zkfilepath.Clean(dst) + + if !IsFile(src) && !IsFile(dst) { + return fmt.Errorf("cp: neither src nor dst is a /zk file") + } + + data, err := fs.ReadContext(ctx, src) + if err != nil { + return fmt.Errorf("cp: cannot read %v: %v", src, err) + } + + // If we are copying to a local directory - say '.', make the filename + // the same as the source. + if !IsFile(dst) { + fileInfo, err := os.Stat(dst) + if err != nil { + if err.(*os.PathError).Err != syscall.ENOENT { + return fmt.Errorf("cp: cannot stat %v: %v", dst, err) + } + } else if fileInfo.IsDir() { + dst = path.Join(dst, path.Base(src)) + } + } else if dstIsDir { + // If we are copying into zk, interpret trailing slash as treating the + // dst as a directory. + dst = path.Join(dst, path.Base(src)) + } + if err := fs.WriteContext(ctx, dst, data); err != nil { + return fmt.Errorf("cp: cannot write %v: %v", dst, err) + } + return nil +} + +// MultiCopyContext copies the contents of multiple sources to a single dst directory. +func (fs *FS) MultiCopyContext(ctx context.Context, args []string) error { + dstPath := args[len(args)-1] + if dstPath[len(dstPath)-1] != '/' { + // In multifile context, dstPath must be a directory. + dstPath += "/" + } + + for _, srcPath := range args[:len(args)-1] { + if err := fs.CopyContext(ctx, srcPath, dstPath); err != nil { + return err + } + } + return nil +} + +// ReadContext reads the data stored at path. +func (fs *FS) ReadContext(ctx context.Context, path string) (data []byte, err error) { + if !IsFile(path) { + data, _, err = fs.Conn.Get(ctx, path) + return data, err + } + + file, err := os.Open(path) + if err != nil { + return nil, err + } + + data, err = io.ReadAll(file) + return data, err +} + +// WriteContext writes the given data to path. +func (fs *FS) WriteContext(ctx context.Context, path string, data []byte) (err error) { + if IsFile(path) { + _, err = fs.Conn.Set(ctx, path, data, -1) + if err == zk.ErrNoNode { + _, err = zk2topo.CreateRecursive(ctx, fs.Conn, path, data, 0, zk.WorldACL(zk.PermAll), 10) + } + return err + } + return os.WriteFile(path, []byte(data), 0666) +} + +var ( + charPermMap map[string]int32 + permCharMap map[int32]string +) + +func init() { + charPermMap = map[string]int32{ + "r": zk.PermRead, + "w": zk.PermWrite, + "d": zk.PermDelete, + "c": zk.PermCreate, + "a": zk.PermAdmin, + } + permCharMap = make(map[int32]string) + for c, p := range charPermMap { + permCharMap[p] = c + } +} + +// FormatACL returns a string representation of a zookeeper ACL permission. +func FormatACL(acl zk.ACL) string { + s := "" + + for _, perm := range []int32{zk.PermRead, zk.PermWrite, zk.PermDelete, zk.PermCreate, zk.PermAdmin} { + if acl.Perms&perm != 0 { + s += permCharMap[perm] + } else { + s += "-" + } + } + return s +} + +// IsFile returns true if the path is a zk type of file. +func IsFile(path string) bool { + return strings.HasPrefix(path, "/zk") +} + +// ParsePermMode parses the mode string as a perm mask. +func ParsePermMode(mode string) (mask int32) { + for _, c := range mode[2:] { + mask |= charPermMap[string(c)] + } + + return mask +} + +// Item represents an item in a zookeeper filesystem. +type Item struct { + Path string + Data []byte + Stat *zk.Stat + Err error +} diff --git a/go/cmd/zk/zkcmd.go b/go/cmd/zk/zkcmd.go index 8d456f6b081..f03ac41c6ef 100644 --- a/go/cmd/zk/zkcmd.go +++ b/go/cmd/zk/zkcmd.go @@ -17,156 +17,17 @@ limitations under the License. package main import ( - "archive/zip" - "bytes" "context" - "fmt" - "io" "os" - "os/exec" "os/signal" - "path" - "sort" - "strings" - "sync" - "syscall" - "time" - "github.com/spf13/pflag" - "github.com/z-division/go-zookeeper/zk" - "golang.org/x/term" - - "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/cmd/zk/command" "vitess.io/vitess/go/exit" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/logutil" - "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/topo/zk2topo" -) - -var doc = ` -zk is a tool for wrangling the zookeeper - -It tries to mimic unix file system commands wherever possible, but -there are some slight differences in flag handling. - -zk -h - provide help on overriding cell selection - -zk addAuth digest user:pass - -zk cat /zk/path -zk cat -l /zk/path1 /zk/path2 (list filename before file data) - -zk chmod n-mode /zk/path -zk chmod n+mode /zk/path - -zk cp /zk/path . -zk cp ./config /zk/path/config -zk cp ./config /zk/path/ (trailing slash indicates directory) - -zk edit /zk/path (create a local copy, edit and write changes back to cell) - -zk ls /zk -zk ls -l /zk -zk ls -ld /zk (list directory node itself) -zk ls -R /zk (recursive, expensive) - -zk stat /zk/path - -zk touch /zk/path -zk touch -c /zk/path (don't create, just touch timestamp) -zk touch -p /zk/path (create all parts necessary, think mkdir -p) -NOTE: there is no mkdir - just touch a node. The distinction -between file and directory is just not relevant in zookeeper. - -zk rm /zk/path -zk rm -r /zk/path (recursive) -zk rm -f /zk/path (no error on nonexistent node) - -zk wait /zk/path (wait for node change or creation) -zk wait /zk/path/children/ (trailing slash waits on children) - -zk watch /zk/path (print changes) - -zk unzip zktree.zip / -zk unzip zktree.zip /zk/prefix - -zk zip /zk/root zktree.zip -NOTE: zip file can't be dumped to the file system since znodes -can have data and children. - -The zk tool looks for the address of the cluster in /etc/zookeeper/zk_client.conf, -or the file specified in the ZK_CLIENT_CONFIG environment variable. - -The local cell may be overridden with the ZK_CLIENT_LOCAL_CELL environment -variable. -` - -const ( - timeFmt = "2006-01-02 15:04:05" - timeFmtMicro = "2006-01-02 15:04:05.000000" ) -type cmdFunc func(ctx context.Context, subFlags *pflag.FlagSet, args []string) error - -var cmdMap map[string]cmdFunc -var zconn *zk2topo.ZkConn -var server string - -func init() { - cmdMap = map[string]cmdFunc{ - "addAuth": cmdAddAuth, - "cat": cmdCat, - "chmod": cmdChmod, - "cp": cmdCp, - "edit": cmdEdit, - "ls": cmdLs, - "rm": cmdRm, - "stat": cmdStat, - "touch": cmdTouch, - "unzip": cmdUnzip, - "wait": cmdWait, - "watch": cmdWatch, - "zip": cmdZip, - } -} - func main() { defer exit.Recover() - defer logutil.Flush() - pflag.StringVar(&server, "server", server, "server(s) to connect to") - // handling case of --help & -h - var help bool - pflag.BoolVarP(&help, "help", "h", false, "display usage and exit") - log.RegisterFlags(pflag.CommandLine) - logutil.RegisterFlags(pflag.CommandLine) - acl.RegisterFlags(pflag.CommandLine) - pflag.CommandLine.Usage = func() { - fmt.Fprint(os.Stderr, doc) - pflag.Usage() - } - - pflag.Parse() - logutil.PurgeLogs() - - if help || pflag.Arg(0) == "help" { - pflag.Usage() - os.Exit(0) - } - - // if no zk command is provided after --server then we need to print doc & usage both - args := pflag.Args() - if len(args) == 0 { - pflag.CommandLine.Usage() - exit.Return(1) - } - cmdName := args[0] - args = args[1:] - cmd, ok := cmdMap[cmdName] - if !ok { - log.Exitf("Unknown command %v", cmdName) - } - subFlags := pflag.NewFlagSet(cmdName, pflag.ContinueOnError) // Create a context for the command, cancel it if we get a signal. ctx, cancel := context.WithCancel(context.Background()) @@ -177,848 +38,9 @@ func main() { cancel() }() - // Connect to the server. - zconn = zk2topo.Connect(server) - // Run the command. - if err := cmd(ctx, subFlags, args); err != nil { + if err := command.Root.ExecuteContext(ctx); err != nil { log.Error(err) exit.Return(1) } } - -func fixZkPath(zkPath string) string { - if zkPath != "/" { - zkPath = strings.TrimSuffix(zkPath, "/") - } - return path.Clean(zkPath) -} - -func isZkFile(path string) bool { - return strings.HasPrefix(path, "/zk") -} - -func cmdWait(ctx context.Context, subFlags *pflag.FlagSet, args []string) error { - var exitIfExists bool - subFlags.BoolVarP(&exitIfExists, "exit", "e", false, "exit if the path already exists") - - if err := subFlags.Parse(args); err != nil { - return err - } - - if subFlags.NArg() != 1 { - return fmt.Errorf("wait: can only wait for one path") - } - zkPath := subFlags.Arg(0) - isDir := zkPath[len(zkPath)-1] == '/' - zkPath = fixZkPath(zkPath) - - var wait <-chan zk.Event - var err error - if isDir { - _, _, wait, err = zconn.ChildrenW(ctx, zkPath) - } else { - _, _, wait, err = zconn.GetW(ctx, zkPath) - } - if err != nil { - if err == zk.ErrNoNode { - _, _, wait, _ = zconn.ExistsW(ctx, zkPath) - } else { - return fmt.Errorf("wait: error %v: %v", zkPath, err) - } - } else { - if exitIfExists { - return fmt.Errorf("already exists: %v", zkPath) - } - } - event := <-wait - fmt.Printf("event: %v\n", event) - return nil -} - -// Watch for changes to the node. -func cmdWatch(ctx context.Context, subFlags *pflag.FlagSet, args []string) error { - if err := subFlags.Parse(args); err != nil { - return err - } - - eventChan := make(chan zk.Event, 16) - for _, arg := range subFlags.Args() { - zkPath := fixZkPath(arg) - _, _, watch, err := zconn.GetW(ctx, zkPath) - if err != nil { - return fmt.Errorf("watch error: %v", err) - } - go func() { - eventChan <- <-watch - }() - } - - for { - select { - case <-ctx.Done(): - return nil - case event := <-eventChan: - log.Infof("watch: event %v: %v", event.Path, event) - if event.Type == zk.EventNodeDataChanged { - data, stat, watch, err := zconn.GetW(ctx, event.Path) - if err != nil { - return fmt.Errorf("ERROR: failed to watch %v", err) - } - log.Infof("watch: %v %v\n", event.Path, stat) - println(data) - go func() { - eventChan <- <-watch - }() - } else if event.State == zk.StateDisconnected { - return nil - } else if event.Type == zk.EventNodeDeleted { - log.Infof("watch: %v deleted\n", event.Path) - } else { - // Most likely a session event - try t - _, _, watch, err := zconn.GetW(ctx, event.Path) - if err != nil { - return fmt.Errorf("ERROR: failed to watch %v", err) - } - go func() { - eventChan <- <-watch - }() - } - } - } -} - -func cmdLs(ctx context.Context, subFlags *pflag.FlagSet, args []string) error { - var ( - longListing bool - directoryListing bool - force bool - recursiveListing bool - ) - subFlags.BoolVarP(&longListing, "longlisting", "l", false, "long listing") - subFlags.BoolVarP(&directoryListing, "directorylisting", "d", false, "list directory instead of contents") - subFlags.BoolVarP(&force, "force", "f", false, "no warning on nonexistent node") - subFlags.BoolVarP(&recursiveListing, "recursivelisting", "R", false, "recursive listing") - - if err := subFlags.Parse(args); err != nil { - return err - } - if subFlags.NArg() == 0 { - return fmt.Errorf("ls: no path specified") - } - resolved, err := zk2topo.ResolveWildcards(ctx, zconn, subFlags.Args()) - if err != nil { - return fmt.Errorf("ls: invalid wildcards: %v", err) - } - if len(resolved) == 0 { - // the wildcards didn't result in anything, we're - // done. - return nil - } - - hasError := false - needsHeader := len(resolved) > 1 && !directoryListing - for _, arg := range resolved { - zkPath := fixZkPath(arg) - var children []string - var err error - isDir := true - if directoryListing { - children = []string{""} - isDir = false - } else if recursiveListing { - children, err = zk2topo.ChildrenRecursive(ctx, zconn, zkPath) - } else { - children, _, err = zconn.Children(ctx, zkPath) - // Assume this is a file node if it has no children. - if len(children) == 0 { - children = []string{""} - isDir = false - } - } - if err != nil { - hasError = true - if !force || err != zk.ErrNoNode { - log.Warningf("ls: cannot access %v: %v", zkPath, err) - } - } - - // Show the full path when it helps. - showFullPath := false - if recursiveListing { - showFullPath = true - } else if longListing && (directoryListing || !isDir) { - showFullPath = true - } - if needsHeader { - fmt.Printf("%v:\n", zkPath) - } - if len(children) > 0 { - if longListing && isDir { - fmt.Printf("total: %v\n", len(children)) - } - sort.Strings(children) - stats := make([]*zk.Stat, len(children)) - wg := sync.WaitGroup{} - f := func(i int) { - localPath := path.Join(zkPath, children[i]) - _, stat, err := zconn.Exists(ctx, localPath) - if err != nil { - if !force || err != zk.ErrNoNode { - log.Warningf("ls: cannot access: %v: %v", localPath, err) - } - } else { - stats[i] = stat - } - wg.Done() - } - for i := range children { - wg.Add(1) - go f(i) - } - wg.Wait() - - for i, child := range children { - localPath := path.Join(zkPath, child) - if stat := stats[i]; stat != nil { - fmtPath(stat, localPath, showFullPath, longListing) - } - } - } - if needsHeader { - fmt.Println() - } - } - if hasError { - return fmt.Errorf("ls: some paths had errors") - } - return nil -} - -func fmtPath(stat *zk.Stat, zkPath string, showFullPath bool, longListing bool) { - var name, perms string - - if !showFullPath { - name = path.Base(zkPath) - } else { - name = zkPath - } - - if longListing { - if stat.NumChildren > 0 { - // FIXME(msolomon) do permissions check? - perms = "drwxrwxrwx" - if stat.DataLength > 0 { - // give a visual indication that this node has data as well as children - perms = "nrw-rw-rw-" - } - } else if stat.EphemeralOwner != 0 { - perms = "erw-rw-rw-" - } else { - perms = "-rw-rw-rw-" - } - // always print the Local version of the time. zookeeper's - // go / C library would return a local time anyway, but - // might as well be sure. - fmt.Printf("%v %v %v % 8v % 20v %v\n", perms, "zk", "zk", stat.DataLength, zk2topo.Time(stat.Mtime).Local().Format(timeFmt), name) - } else { - fmt.Printf("%v\n", name) - } -} - -func cmdTouch(ctx context.Context, subFlags *pflag.FlagSet, args []string) error { - var ( - createParents bool - touchOnly bool - ) - - subFlags.BoolVarP(&createParents, "createparent", "p", false, "create parents") - subFlags.BoolVarP(&touchOnly, "touchonly", "c", false, "touch only - don't create") - - if err := subFlags.Parse(args); err != nil { - return err - } - if subFlags.NArg() != 1 { - return fmt.Errorf("touch: need to specify exactly one path") - } - - zkPath := fixZkPath(subFlags.Arg(0)) - - var ( - version int32 = -1 - create = false - ) - - data, stat, err := zconn.Get(ctx, zkPath) - switch { - case err == nil: - version = stat.Version - case err == zk.ErrNoNode: - create = true - default: - return fmt.Errorf("touch: cannot access %v: %v", zkPath, err) - } - - switch { - case !create: - _, err = zconn.Set(ctx, zkPath, data, version) - case touchOnly: - return fmt.Errorf("touch: no such path %v", zkPath) - case createParents: - _, err = zk2topo.CreateRecursive(ctx, zconn, zkPath, data, 0, zk.WorldACL(zk.PermAll), 10) - default: - _, err = zconn.Create(ctx, zkPath, data, 0, zk.WorldACL(zk.PermAll)) - } - - if err != nil { - return fmt.Errorf("touch: cannot modify %v: %v", zkPath, err) - } - return nil -} - -func cmdRm(ctx context.Context, subFlags *pflag.FlagSet, args []string) error { - var ( - force bool - recursiveDelete bool - ) - subFlags.BoolVarP(&force, "force", "f", false, "no warning on nonexistent node") - subFlags.BoolVarP(&recursiveDelete, "recursivedelete", "r", false, "recursive delete") - - if err := subFlags.Parse(args); err != nil { - return err - } - - if subFlags.NArg() == 0 { - return fmt.Errorf("rm: no path specified") - } - - if recursiveDelete { - for _, arg := range subFlags.Args() { - zkPath := fixZkPath(arg) - if strings.Count(zkPath, "/") < 2 { - return fmt.Errorf("rm: overly general path: %v", zkPath) - } - } - } - - resolved, err := zk2topo.ResolveWildcards(ctx, zconn, subFlags.Args()) - if err != nil { - return fmt.Errorf("rm: invalid wildcards: %v", err) - } - if len(resolved) == 0 { - // the wildcards didn't result in anything, we're done - return nil - } - - hasError := false - for _, arg := range resolved { - zkPath := fixZkPath(arg) - var err error - if recursiveDelete { - err = zk2topo.DeleteRecursive(ctx, zconn, zkPath, -1) - } else { - err = zconn.Delete(ctx, zkPath, -1) - } - if err != nil && (!force || err != zk.ErrNoNode) { - hasError = true - log.Warningf("rm: cannot delete %v: %v", zkPath, err) - } - } - if hasError { - // to be consistent with the command line 'rm -f', return - // 0 if using 'zk rm -f' and the file doesn't exist. - return fmt.Errorf("rm: some paths had errors") - } - return nil -} - -func cmdAddAuth(ctx context.Context, subFlags *pflag.FlagSet, args []string) error { - if err := subFlags.Parse(args); err != nil { - return err - } - if subFlags.NArg() < 2 { - return fmt.Errorf("addAuth: expected args ") - } - scheme, auth := subFlags.Arg(0), subFlags.Arg(1) - return zconn.AddAuth(ctx, scheme, []byte(auth)) -} - -func cmdCat(ctx context.Context, subFlags *pflag.FlagSet, args []string) error { - var ( - longListing bool - force bool - decodeProto bool - ) - subFlags.BoolVarP(&longListing, "longListing", "l", false, "long listing") - subFlags.BoolVarP(&force, "force", "f", false, "no warning on nonexistent node") - subFlags.BoolVarP(&decodeProto, "decodeProto", "p", false, "decode proto files and display them as text") - - if err := subFlags.Parse(args); err != nil { - return err - } - if subFlags.NArg() == 0 { - return fmt.Errorf("cat: no path specified") - } - resolved, err := zk2topo.ResolveWildcards(ctx, zconn, subFlags.Args()) - if err != nil { - return fmt.Errorf("cat: invalid wildcards: %v", err) - } - if len(resolved) == 0 { - // the wildcards didn't result in anything, we're done - return nil - } - - hasError := false - for _, arg := range resolved { - zkPath := fixZkPath(arg) - data, _, err := zconn.Get(ctx, zkPath) - if err != nil { - hasError = true - if !force || err != zk.ErrNoNode { - log.Warningf("cat: cannot access %v: %v", zkPath, err) - } - continue - } - - if longListing { - fmt.Printf("%v:\n", zkPath) - } - decoded := "" - if decodeProto { - decoded, err = topo.DecodeContent(zkPath, data, false) - if err != nil { - log.Warningf("cat: cannot proto decode %v: %v", zkPath, err) - decoded = string(data) - } - } else { - decoded = string(data) - } - fmt.Print(decoded) - if len(decoded) > 0 && decoded[len(decoded)-1] != '\n' && (term.IsTerminal(int(os.Stdout.Fd())) || longListing) { - fmt.Print("\n") - } - } - if hasError { - return fmt.Errorf("cat: some paths had errors") - } - return nil -} - -func cmdEdit(ctx context.Context, subFlags *pflag.FlagSet, args []string) error { - var force bool - subFlags.BoolVarP(&force, "force", "f", false, "no warning on nonexistent node") - - if err := subFlags.Parse(args); err != nil { - return err - } - if subFlags.NArg() == 0 { - return fmt.Errorf("edit: no path specified") - } - arg := subFlags.Arg(0) - zkPath := fixZkPath(arg) - data, stat, err := zconn.Get(ctx, zkPath) - if err != nil { - if !force || err != zk.ErrNoNode { - log.Warningf("edit: cannot access %v: %v", zkPath, err) - } - return fmt.Errorf("edit: cannot access %v: %v", zkPath, err) - } - - name := path.Base(zkPath) - tmpPath := fmt.Sprintf("/tmp/zk-edit-%v-%v", name, time.Now().UnixNano()) - f, err := os.Create(tmpPath) - if err == nil { - _, err = f.Write(data) - f.Close() - } - if err != nil { - return fmt.Errorf("edit: cannot write file %v", err) - } - - cmd := exec.Command(os.Getenv("EDITOR"), tmpPath) - cmd.Stdin = os.Stdin - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - err = cmd.Run() - if err != nil { - os.Remove(tmpPath) - return fmt.Errorf("edit: cannot start $EDITOR: %v", err) - } - - fileData, err := os.ReadFile(tmpPath) - if err != nil { - os.Remove(tmpPath) - return fmt.Errorf("edit: cannot read file %v", err) - } - - if !bytes.Equal(fileData, data) { - // data changed - update if we can - _, err = zconn.Set(ctx, zkPath, fileData, stat.Version) - if err != nil { - os.Remove(tmpPath) - return fmt.Errorf("edit: cannot write zk file %v", err) - } - } - os.Remove(tmpPath) - return nil -} - -func cmdStat(ctx context.Context, subFlags *pflag.FlagSet, args []string) error { - var force bool - subFlags.BoolVarP(&force, "force", "f", false, "no warning on nonexistent node") - - if err := subFlags.Parse(args); err != nil { - return err - } - - if subFlags.NArg() == 0 { - return fmt.Errorf("stat: no path specified") - } - - resolved, err := zk2topo.ResolveWildcards(ctx, zconn, subFlags.Args()) - if err != nil { - return fmt.Errorf("stat: invalid wildcards: %v", err) - } - if len(resolved) == 0 { - // the wildcards didn't result in anything, we're done - return nil - } - - hasError := false - for _, arg := range resolved { - zkPath := fixZkPath(arg) - acls, stat, err := zconn.GetACL(ctx, zkPath) - if stat == nil { - err = fmt.Errorf("no such node") - } - if err != nil { - hasError = true - if !force || err != zk.ErrNoNode { - log.Warningf("stat: cannot access %v: %v", zkPath, err) - } - continue - } - fmt.Printf("Path: %s\n", zkPath) - fmt.Printf("Created: %s\n", zk2topo.Time(stat.Ctime).Format(timeFmtMicro)) - fmt.Printf("Modified: %s\n", zk2topo.Time(stat.Mtime).Format(timeFmtMicro)) - fmt.Printf("Size: %v\n", stat.DataLength) - fmt.Printf("Children: %v\n", stat.NumChildren) - fmt.Printf("Version: %v\n", stat.Version) - fmt.Printf("Ephemeral: %v\n", stat.EphemeralOwner) - fmt.Printf("ACL:\n") - for _, acl := range acls { - fmt.Printf(" %v:%v %v\n", acl.Scheme, acl.ID, fmtACL(acl)) - } - } - if hasError { - return fmt.Errorf("stat: some paths had errors") - } - return nil -} - -var charPermMap map[string]int32 -var permCharMap map[int32]string - -func init() { - charPermMap = map[string]int32{ - "r": zk.PermRead, - "w": zk.PermWrite, - "d": zk.PermDelete, - "c": zk.PermCreate, - "a": zk.PermAdmin, - } - permCharMap = make(map[int32]string) - for c, p := range charPermMap { - permCharMap[p] = c - } -} - -func fmtACL(acl zk.ACL) string { - s := "" - - for _, perm := range []int32{zk.PermRead, zk.PermWrite, zk.PermDelete, zk.PermCreate, zk.PermAdmin} { - if acl.Perms&perm != 0 { - s += permCharMap[perm] - } else { - s += "-" - } - } - return s -} - -func cmdChmod(ctx context.Context, subFlags *pflag.FlagSet, args []string) error { - if err := subFlags.Parse(args); err != nil { - return err - } - if subFlags.NArg() < 2 { - return fmt.Errorf("chmod: no permission specified") - } - mode := subFlags.Arg(0) - if mode[0] != 'n' { - return fmt.Errorf("chmod: invalid mode") - } - - addPerms := false - if mode[1] == '+' { - addPerms = true - } else if mode[1] != '-' { - return fmt.Errorf("chmod: invalid mode") - } - - var permMask int32 - for _, c := range mode[2:] { - permMask |= charPermMap[string(c)] - } - - resolved, err := zk2topo.ResolveWildcards(ctx, zconn, subFlags.Args()[1:]) - if err != nil { - return fmt.Errorf("chmod: invalid wildcards: %v", err) - } - if len(resolved) == 0 { - // the wildcards didn't result in anything, we're done - return nil - } - - hasError := false - for _, arg := range resolved { - zkPath := fixZkPath(arg) - aclv, _, err := zconn.GetACL(ctx, zkPath) - if err != nil { - hasError = true - log.Warningf("chmod: cannot set access %v: %v", zkPath, err) - continue - } - if addPerms { - aclv[0].Perms |= permMask - } else { - aclv[0].Perms &= ^permMask - } - err = zconn.SetACL(ctx, zkPath, aclv, -1) - if err != nil { - hasError = true - log.Warningf("chmod: cannot set access %v: %v", zkPath, err) - continue - } - } - if hasError { - return fmt.Errorf("chmod: some paths had errors") - } - return nil -} - -func cmdCp(ctx context.Context, subFlags *pflag.FlagSet, args []string) error { - if err := subFlags.Parse(args); err != nil { - return err - } - switch { - case subFlags.NArg() < 2: - return fmt.Errorf("cp: need to specify source and destination paths") - case subFlags.NArg() == 2: - return fileCp(ctx, args[0], args[1]) - default: - return multiFileCp(ctx, args) - } -} - -func getPathData(ctx context.Context, filePath string) ([]byte, error) { - if isZkFile(filePath) { - data, _, err := zconn.Get(ctx, filePath) - return data, err - } - var err error - file, err := os.Open(filePath) - if err == nil { - data, err := io.ReadAll(file) - if err == nil { - return data, err - } - } - return nil, err -} - -func setPathData(ctx context.Context, filePath string, data []byte) error { - if isZkFile(filePath) { - _, err := zconn.Set(ctx, filePath, data, -1) - if err == zk.ErrNoNode { - _, err = zk2topo.CreateRecursive(ctx, zconn, filePath, data, 0, zk.WorldACL(zk.PermAll), 10) - } - return err - } - return os.WriteFile(filePath, []byte(data), 0666) -} - -func fileCp(ctx context.Context, srcPath, dstPath string) error { - dstIsDir := dstPath[len(dstPath)-1] == '/' - srcPath = fixZkPath(srcPath) - dstPath = fixZkPath(dstPath) - - if !isZkFile(srcPath) && !isZkFile(dstPath) { - return fmt.Errorf("cp: neither src nor dst is a /zk file: exitting") - } - - data, err := getPathData(ctx, srcPath) - if err != nil { - return fmt.Errorf("cp: cannot read %v: %v", srcPath, err) - } - - // If we are copying to a local directory - say '.', make the filename - // the same as the source. - if !isZkFile(dstPath) { - fileInfo, err := os.Stat(dstPath) - if err != nil { - if err.(*os.PathError).Err != syscall.ENOENT { - return fmt.Errorf("cp: cannot stat %v: %v", dstPath, err) - } - } else if fileInfo.IsDir() { - dstPath = path.Join(dstPath, path.Base(srcPath)) - } - } else if dstIsDir { - // If we are copying into zk, interpret trailing slash as treating the - // dstPath as a directory. - dstPath = path.Join(dstPath, path.Base(srcPath)) - } - if err := setPathData(ctx, dstPath, data); err != nil { - return fmt.Errorf("cp: cannot write %v: %v", dstPath, err) - } - return nil -} - -func multiFileCp(ctx context.Context, args []string) error { - dstPath := args[len(args)-1] - if dstPath[len(dstPath)-1] != '/' { - // In multifile context, dstPath must be a directory. - dstPath += "/" - } - - for _, srcPath := range args[:len(args)-1] { - if err := fileCp(ctx, srcPath, dstPath); err != nil { - return err - } - } - return nil -} - -type zkItem struct { - path string - data []byte - stat *zk.Stat - err error -} - -// Store a zk tree in a zip archive. This won't be immediately useful to -// zip tools since even "directories" can contain data. -func cmdZip(ctx context.Context, subFlags *pflag.FlagSet, args []string) error { - if err := subFlags.Parse(args); err != nil { - return err - } - if subFlags.NArg() < 2 { - return fmt.Errorf("zip: need to specify source and destination paths") - } - - dstPath := subFlags.Arg(subFlags.NArg() - 1) - paths := subFlags.Args()[:len(args)-1] - if !strings.HasSuffix(dstPath, ".zip") { - return fmt.Errorf("zip: need to specify destination .zip path: %v", dstPath) - } - zipFile, err := os.Create(dstPath) - if err != nil { - return fmt.Errorf("zip: error %v", err) - } - - wg := sync.WaitGroup{} - items := make(chan *zkItem, 64) - for _, arg := range paths { - zkPath := fixZkPath(arg) - children, err := zk2topo.ChildrenRecursive(ctx, zconn, zkPath) - if err != nil { - return fmt.Errorf("zip: error %v", err) - } - for _, child := range children { - toAdd := path.Join(zkPath, child) - wg.Add(1) - go func() { - data, stat, err := zconn.Get(ctx, toAdd) - items <- &zkItem{toAdd, data, stat, err} - wg.Done() - }() - } - } - go func() { - wg.Wait() - close(items) - }() - - zipWriter := zip.NewWriter(zipFile) - for item := range items { - path, data, stat, err := item.path, item.data, item.stat, item.err - if err != nil { - return fmt.Errorf("zip: get failed: %v", err) - } - // Skip ephemerals - not sure why you would archive them. - if stat.EphemeralOwner > 0 { - continue - } - fi := &zip.FileHeader{Name: path, Method: zip.Deflate} - fi.Modified = zk2topo.Time(stat.Mtime) - f, err := zipWriter.CreateHeader(fi) - if err != nil { - return fmt.Errorf("zip: create failed: %v", err) - } - _, err = f.Write(data) - if err != nil { - return fmt.Errorf("zip: create failed: %v", err) - } - } - err = zipWriter.Close() - if err != nil { - return fmt.Errorf("zip: close failed: %v", err) - } - zipFile.Close() - return nil -} - -func cmdUnzip(ctx context.Context, subFlags *pflag.FlagSet, args []string) error { - if err := subFlags.Parse(args); err != nil { - return err - } - if subFlags.NArg() != 2 { - return fmt.Errorf("zip: need to specify source and destination paths") - } - - srcPath, dstPath := subFlags.Arg(0), subFlags.Arg(1) - - if !strings.HasSuffix(srcPath, ".zip") { - return fmt.Errorf("zip: need to specify src .zip path: %v", srcPath) - } - - zipReader, err := zip.OpenReader(srcPath) - if err != nil { - return fmt.Errorf("zip: error %v", err) - } - defer zipReader.Close() - - for _, zf := range zipReader.File { - rc, err := zf.Open() - if err != nil { - return fmt.Errorf("unzip: error %v", err) - } - data, err := io.ReadAll(rc) - if err != nil { - return fmt.Errorf("unzip: failed reading archive: %v", err) - } - zkPath := zf.Name - if dstPath != "/" { - zkPath = path.Join(dstPath, zkPath) - } - _, err = zk2topo.CreateRecursive(ctx, zconn, zkPath, data, 0, zk.WorldACL(zk.PermAll), 10) - if err != nil && err != zk.ErrNodeExists { - return fmt.Errorf("unzip: zk create failed: %v", err) - } - _, err = zconn.Set(ctx, zkPath, data, -1) - if err != nil { - return fmt.Errorf("unzip: zk set failed: %v", err) - } - rc.Close() - } - return nil -} diff --git a/go/cmd/zkctl/command/init.go b/go/cmd/zkctl/command/init.go new file mode 100644 index 00000000000..518b4a6239d --- /dev/null +++ b/go/cmd/zkctl/command/init.go @@ -0,0 +1,32 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import "github.com/spf13/cobra" + +var Init = &cobra.Command{ + Use: "init", + Short: "Generates a new config and then starts zookeeper.", + Args: cobra.ExactArgs(0), + RunE: func(cmd *cobra.Command, args []string) error { + return zkd.Init() + }, +} + +func init() { + Root.AddCommand(Init) +} diff --git a/go/cmd/zkctl/command/root.go b/go/cmd/zkctl/command/root.go new file mode 100644 index 00000000000..3399ed8c4cb --- /dev/null +++ b/go/cmd/zkctl/command/root.go @@ -0,0 +1,63 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/zkctl" +) + +var ( + zkCfg = "6@:3801:3802:3803" + myID uint + zkExtra []string + + zkd *zkctl.Zkd + + Root = &cobra.Command{ + Use: "zkctl", + Short: "Initializes and controls zookeeper with Vitess-specific configuration.", + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if err := servenv.CobraPreRunE(cmd, args); err != nil { + return err + } + + zkConfig := zkctl.MakeZkConfigFromString(zkCfg, uint32(myID)) + zkConfig.Extra = zkExtra + zkd = zkctl.NewZkd(zkConfig) + + return nil + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + logutil.Flush() + }, + } +) + +func init() { + Root.PersistentFlags().StringVar(&zkCfg, "zk.cfg", zkCfg, + "zkid@server1:leaderPort1:electionPort1:clientPort1,...)") + Root.PersistentFlags().UintVar(&myID, "zk.myid", myID, + "which server do you want to be? only needed when running multiple instance on one box, otherwise myid is implied by hostname") + Root.PersistentFlags().StringArrayVar(&zkExtra, "zk.extra", zkExtra, + "extra config line(s) to append verbatim to config (flag can be specified more than once)") + + servenv.MovePersistentFlagsToCobraCommand(Root) +} diff --git a/go/cmd/zkctl/command/shutdown.go b/go/cmd/zkctl/command/shutdown.go new file mode 100644 index 00000000000..7237841e9c1 --- /dev/null +++ b/go/cmd/zkctl/command/shutdown.go @@ -0,0 +1,32 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreedto in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import "github.com/spf13/cobra" + +var Shutdown = &cobra.Command{ + Use: "shutdown", + Short: "Terminates a zookeeper server but keeps its data dir intact.", + Args: cobra.ExactArgs(0), + RunE: func(cmd *cobra.Command, args []string) error { + return zkd.Shutdown() + }, +} + +func init() { + Root.AddCommand(Shutdown) +} diff --git a/go/cmd/zkctl/command/start.go b/go/cmd/zkctl/command/start.go new file mode 100644 index 00000000000..1ed31d0ed54 --- /dev/null +++ b/go/cmd/zkctl/command/start.go @@ -0,0 +1,32 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import "github.com/spf13/cobra" + +var Start = &cobra.Command{ + Use: "start", + Short: "Runs an already initialized zookeeper server.", + Args: cobra.ExactArgs(0), + RunE: func(cmd *cobra.Command, args []string) error { + return zkd.Start() + }, +} + +func init() { + Root.AddCommand(Start) +} diff --git a/go/cmd/zkctl/command/teardown.go b/go/cmd/zkctl/command/teardown.go new file mode 100644 index 00000000000..14fe7278835 --- /dev/null +++ b/go/cmd/zkctl/command/teardown.go @@ -0,0 +1,32 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import "github.com/spf13/cobra" + +var Teardown = &cobra.Command{ + Use: "teardown", + Short: "Shuts down the zookeeper server and removes its data dir.", + Args: cobra.ExactArgs(0), + RunE: func(cmd *cobra.Command, args []string) error { + return zkd.Teardown() + }, +} + +func init() { + Root.AddCommand(Teardown) +} diff --git a/go/cmd/zkctl/docgen/main.go b/go/cmd/zkctl/docgen/main.go new file mode 100644 index 00000000000..c35da8930e4 --- /dev/null +++ b/go/cmd/zkctl/docgen/main.go @@ -0,0 +1,37 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/internal/docgen" + "vitess.io/vitess/go/cmd/zkctl/command" +) + +func main() { + var dir string + cmd := cobra.Command{ + Use: "docgen [-d ]", + RunE: func(cmd *cobra.Command, args []string) error { + return docgen.GenerateMarkdownTree(command.Root, dir) + }, + } + + cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation") + _ = cmd.Execute() +} diff --git a/go/cmd/zkctl/zkctl.go b/go/cmd/zkctl/zkctl.go index 85ddb3e7e56..b00e3eb4812 100644 --- a/go/cmd/zkctl/zkctl.go +++ b/go/cmd/zkctl/zkctl.go @@ -14,71 +14,19 @@ See the License for the specific language governing permissions and limitations under the License. */ -// zkctl initializes and controls ZooKeeper with Vitess-specific configuration. package main import ( - "github.com/spf13/pflag" - + "vitess.io/vitess/go/cmd/zkctl/command" "vitess.io/vitess/go/exit" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/logutil" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/zkctl" ) -var usage = ` -Commands: - - init | start | shutdown | teardown -` - -var ( - zkCfg = "6@:3801:3802:3803" - myID uint -) - -func registerZkctlFlags(fs *pflag.FlagSet) { - fs.StringVar(&zkCfg, "zk.cfg", zkCfg, - "zkid@server1:leaderPort1:electionPort1:clientPort1,...)") - fs.UintVar(&myID, "zk.myid", myID, - "which server do you want to be? only needed when running multiple instance on one box, otherwise myid is implied by hostname") - -} -func init() { - servenv.OnParse(registerZkctlFlags) -} - func main() { defer exit.Recover() - defer logutil.Flush() - - fs := pflag.NewFlagSet("zkctl", pflag.ExitOnError) - log.RegisterFlags(fs) - logutil.RegisterFlags(fs) - args := servenv.ParseFlagsWithArgs("zkctl") - zkConfig := zkctl.MakeZkConfigFromString(zkCfg, uint32(myID)) - zkd := zkctl.NewZkd(zkConfig) - - action := args[0] - var err error - switch action { - case "init": - err = zkd.Init() - case "shutdown": - err = zkd.Shutdown() - case "start": - err = zkd.Start() - case "teardown": - err = zkd.Teardown() - default: - log.Errorf("invalid action: %v", action) - log.Errorf(usage) - exit.Return(1) - } - if err != nil { - log.Errorf("failed %v: %v", action, err) + if err := command.Root.Execute(); err != nil { + log.Error(err) exit.Return(1) } } diff --git a/go/cmd/zkctld/cli/zkctld.go b/go/cmd/zkctld/cli/zkctld.go new file mode 100644 index 00000000000..101f1013722 --- /dev/null +++ b/go/cmd/zkctld/cli/zkctld.go @@ -0,0 +1,100 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import ( + "fmt" + "os" + "os/signal" + "syscall" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/zkctl" +) + +var ( + zkCfg = "6@:3801:3802:3803" + myID uint + zkExtra []string + + Main = &cobra.Command{ + Use: "zkctld", + Short: "zkctld is a daemon that starts or initializes ZooKeeper with Vitess-specific configuration. It will stay running as long as the underlying ZooKeeper server, and will pass along SIGTERM.", + Args: cobra.NoArgs, + PersistentPreRunE: servenv.CobraPreRunE, + PostRun: func(cmd *cobra.Command, args []string) { + logutil.Flush() + }, + RunE: run, + } +) + +func init() { + servenv.OnParse(registerFlags) +} + +func registerFlags(fs *pflag.FlagSet) { + fs.StringVar(&zkCfg, "zk.cfg", zkCfg, + "zkid@server1:leaderPort1:electionPort1:clientPort1,...)") + fs.UintVar(&myID, "zk.myid", myID, + "which server do you want to be? only needed when running multiple instance on one box, otherwise myid is implied by hostname") + fs.StringArrayVar(&zkExtra, "zk.extra", zkExtra, + "extra config line(s) to append verbatim to config (flag can be specified more than once)") + acl.RegisterFlags(fs) +} + +func run(cmd *cobra.Command, args []string) error { + servenv.Init() + zkConfig := zkctl.MakeZkConfigFromString(zkCfg, uint32(myID)) + zkConfig.Extra = zkExtra + zkd := zkctl.NewZkd(zkConfig) + + if zkd.Inited() { + log.Infof("already initialized, starting without init...") + if err := zkd.Start(); err != nil { + return fmt.Errorf("failed start: %v", err) + } + } else { + log.Infof("initializing...") + if err := zkd.Init(); err != nil { + return fmt.Errorf("failed init: %v", err) + } + } + + log.Infof("waiting for signal or server shutdown...") + sig := make(chan os.Signal, 1) + signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM) + select { + case <-zkd.Done(): + log.Infof("server shut down on its own") + case <-sig: + log.Infof("signal received, shutting down server") + + // Action to perform if there is an error + if err := zkd.Shutdown(); err != nil { + return fmt.Errorf("error during shutdown:%v", err) + } + } + + return nil +} diff --git a/go/cmd/zkctld/docgen/main.go b/go/cmd/zkctld/docgen/main.go new file mode 100644 index 00000000000..9cf989f37b7 --- /dev/null +++ b/go/cmd/zkctld/docgen/main.go @@ -0,0 +1,37 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/internal/docgen" + "vitess.io/vitess/go/cmd/zkctld/cli" +) + +func main() { + var dir string + cmd := cobra.Command{ + Use: "docgen [-d ]", + RunE: func(cmd *cobra.Command, args []string) error { + return docgen.GenerateMarkdownTree(cli.Main, dir) + }, + } + + cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation") + _ = cmd.Execute() +} diff --git a/go/cmd/zkctld/zkctld.go b/go/cmd/zkctld/zkctld.go index dac1866f60f..211b63325eb 100644 --- a/go/cmd/zkctld/zkctld.go +++ b/go/cmd/zkctld/zkctld.go @@ -20,74 +20,15 @@ limitations under the License. package main import ( - "os" - "os/signal" - "syscall" - - "github.com/spf13/pflag" - - "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/cmd/zkctld/cli" "vitess.io/vitess/go/exit" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/logutil" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/zkctl" -) - -var ( - zkCfg = "6@:3801:3802:3803" - myID uint ) -func init() { - servenv.OnParse(registerFlags) -} - -func registerFlags(fs *pflag.FlagSet) { - fs.StringVar(&zkCfg, "zk.cfg", zkCfg, - "zkid@server1:leaderPort1:electionPort1:clientPort1,...)") - fs.UintVar(&myID, "zk.myid", myID, - "which server do you want to be? only needed when running multiple instance on one box, otherwise myid is implied by hostname") - - acl.RegisterFlags(fs) -} - func main() { defer exit.Recover() - defer logutil.Flush() - - servenv.ParseFlags("zkctld") - servenv.Init() - zkConfig := zkctl.MakeZkConfigFromString(zkCfg, uint32(myID)) - zkd := zkctl.NewZkd(zkConfig) - - if zkd.Inited() { - log.Infof("already initialized, starting without init...") - if err := zkd.Start(); err != nil { - log.Errorf("failed start: %v", err) - exit.Return(255) - } - } else { - log.Infof("initializing...") - if err := zkd.Init(); err != nil { - log.Errorf("failed init: %v", err) - exit.Return(255) - } - } - - log.Infof("waiting for signal or server shutdown...") - sig := make(chan os.Signal, 1) - signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM) - select { - case <-zkd.Done(): - log.Infof("server shut down on its own") - case <-sig: - log.Infof("signal received, shutting down server") - - // Action to perform if there is an error - if err := zkd.Shutdown(); err != nil { - log.Errorf("error during shutdown:%v", err) - exit.Return(1) - } + if err := cli.Main.Execute(); err != nil { + log.Error(err) + exit.Return(1) } } diff --git a/go/constants/sidecar/name.go b/go/constants/sidecar/name.go new file mode 100644 index 00000000000..063452782b7 --- /dev/null +++ b/go/constants/sidecar/name.go @@ -0,0 +1,42 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sidecar + +import ( + "sync/atomic" +) + +const ( + DefaultName = "_vt" +) + +var ( + // This should be accessed via GetName() + sidecarDBName atomic.Value +) + +func init() { + sidecarDBName.Store(DefaultName) +} + +func SetName(name string) { + sidecarDBName.Store(name) +} + +func GetName() string { + return sidecarDBName.Load().(string) +} diff --git a/go/constants/sidecar/queries.go b/go/constants/sidecar/queries.go new file mode 100644 index 00000000000..97fa30ebecc --- /dev/null +++ b/go/constants/sidecar/queries.go @@ -0,0 +1,55 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sidecar + +import "vitess.io/vitess/go/vt/sqlparser" + +// region unit-test-only +// This section uses helpers used in tests, but also in +// go/vt/vtexplain/vtexplain_vttablet.go. +// Hence, it is here and not in the _test.go file. +const ( + createDBQuery = "create database if not exists %s" + createTableRegexp = "(?i)CREATE TABLE .* `?\\_vt\\`?..*" + alterTableRegexp = "(?i)ALTER TABLE `?\\_vt\\`?..*" +) + +var ( + DBInitQueries = []string{ + "use %s", + createDBQuery, + } + // Query patterns to handle in mocks. + DBInitQueryPatterns = []string{ + createTableRegexp, + alterTableRegexp, + } +) + +// GetCreateQuery returns the CREATE DATABASE SQL statement +// used to create the sidecar database. +func GetCreateQuery() string { + return sqlparser.BuildParsedQuery(createDBQuery, GetIdentifier()).Query +} + +// GetIdentifier returns the sidecar database name as an SQL +// identifier string, most importantly this means that it will +// be properly escaped if/as needed. +func GetIdentifier() string { + ident := sqlparser.NewIdentifierCS(GetName()) + return sqlparser.String(ident) +} diff --git a/go/flags/endtoend/flags_test.go b/go/flags/endtoend/flags_test.go index 61bc1dacfc3..25cca54caf9 100644 --- a/go/flags/endtoend/flags_test.go +++ b/go/flags/endtoend/flags_test.go @@ -41,18 +41,21 @@ var ( //go:embed mysqlctld.txt mysqlctldTxt string + //go:embed topo2topo.txt + topo2topoTxt string + //go:embed vtaclcheck.txt vtaclcheckTxt string + //go:embed vtcombo.txt + vtcomboTxt string + //go:embed vtexplain.txt vtexplainTxt string //go:embed vtgate.txt vtgateTxt string - //go:embed vtgr.txt - vtgrTxt string - //go:embed vttablet.txt vttabletTxt string @@ -71,6 +74,9 @@ var ( //go:embed vtctldclient.txt vtctldclientTxt string + //go:embed vtgateclienttest.txt + vtgateclienttestTxt string + //go:embed vttestserver.txt vttestserverTxt string @@ -87,23 +93,25 @@ var ( zkTxt string helpOutput = map[string]string{ - "mysqlctl": mysqlctlTxt, - "mysqlctld": mysqlctldTxt, - "vtaclcheck": vtaclcheckTxt, - "vtexplain": vtexplainTxt, - "vtgate": vtgateTxt, - "vtgr": vtgrTxt, - "vttablet": vttabletTxt, - "vttlstest": vttlstestTxt, - "vtctld": vtctldTxt, - "vtctlclient": vtctlclientTxt, - "vtctldclient": vtctldclientTxt, - "vtorc": vtorcTxt, - "vttestserver": vttestserverTxt, - "zkctld": zkctldTxt, - "vtbackup": vtbackupTxt, - "zk": zkTxt, - "zkctl": zkctlTxt, + "mysqlctl": mysqlctlTxt, + "mysqlctld": mysqlctldTxt, + "topo2topo": topo2topoTxt, + "vtaclcheck": vtaclcheckTxt, + "vtbackup": vtbackupTxt, + "vtcombo": vtcomboTxt, + "vtctlclient": vtctlclientTxt, + "vtctld": vtctldTxt, + "vtctldclient": vtctldclientTxt, + "vtexplain": vtexplainTxt, + "vtgate": vtgateTxt, + "vtgateclienttest": vtgateclienttestTxt, + "vtorc": vtorcTxt, + "vttablet": vttabletTxt, + "vttestserver": vttestserverTxt, + "vttlstest": vttlstestTxt, + "zk": zkTxt, + "zkctl": zkctlTxt, + "zkctld": zkctldTxt, } ) diff --git a/go/flags/endtoend/mysqlctl.txt b/go/flags/endtoend/mysqlctl.txt index 4af44804749..a8f832d3345 100644 --- a/go/flags/endtoend/mysqlctl.txt +++ b/go/flags/endtoend/mysqlctl.txt @@ -1,16 +1,24 @@ -Usage: mysqlctl [global-flags] -- [command-flags] +`mysqlctl` is a command-line client used for managing `mysqld` instances. -The commands are listed below. Use 'mysqlctl -- {-h, --help}' for command help. +It is responsible for bootstrapping tasks such as generating a configuration file for `mysqld` and initializing the instance and its data directory. +The `mysqld_safe` watchdog is utilized when present. +This helps ensure that `mysqld` is automatically restarted after failures. - init [--wait_time=5m] [--init_db_sql_file=] - init_config - reinit_config - teardown [--wait_time=5m] [--force] - start [--wait_time=5m] - shutdown [--wait_time=5m] - position +Usage: + mysqlctl [command] -Global flags: +Available Commands: + completion Generate the autocompletion script for the specified shell + help Help about any command + init Initializes the directory structure and starts mysqld. + init_config Initializes the directory structure, creates my.cnf file, but does not start mysqld. + position Compute operations on replication positions + reinit_config Reinitializes my.cnf file with new server_id. + shutdown Shuts down mysqld, without removing any files. + start Starts mysqld on an already 'init'-ed directory. + teardown Shuts mysqld down and removes the directory. + +Flags: --alsologtostderr log to standard error as well as files --app_idle_timeout duration Idle timeout for app connections (default 1m0s) --app_pool_size int Size of the connection pool for app connections (default 40) @@ -52,7 +60,7 @@ Global flags: --db_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3. --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s) --dba_pool_size int Size of the connection pool for dba connections (default 20) - -h, --help display usage and exit + -h, --help help for mysqlctl --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms) @@ -62,9 +70,9 @@ Global flags: --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) --logtostderr log to standard error instead of files --max-stack-size int configure the maximum stack size in bytes (default 67108864) - --mysql_port int MySQL port (default 3306) + --mysql_port int MySQL port. (default 3306) --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess") - --mysql_socket string Path to the mysqld socket file + --mysql_socket string Path to the mysqld socket file. --mysqlctl_client_protocol string the protocol to use to talk to the mysqlctl server (default "grpc") --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions) @@ -81,7 +89,9 @@ Global flags: --stderrthreshold severity logs at or above this threshold go to stderr (default 1) --table-refresh-interval int interval in milliseconds to refresh tables in status page with refreshRequired class --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid. - --tablet_uid uint32 Tablet UID (default 41983) + --tablet_uid uint32 Tablet UID. (default 41983) --v Level log level for V logs -v, --version print binary version --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging + +Use "mysqlctl [command] --help" for more information about a command. diff --git a/go/flags/endtoend/mysqlctld.txt b/go/flags/endtoend/mysqlctld.txt index 6fbbd059492..06b48347bf6 100644 --- a/go/flags/endtoend/mysqlctld.txt +++ b/go/flags/endtoend/mysqlctld.txt @@ -1,7 +1,28 @@ -Usage of mysqlctld: +`mysqlctld` is a gRPC server that can be used instead of the `mysqlctl` client tool. +If the target directories are empty when it is invoked, it automatically performs initialization operations to bootstrap the `mysqld` instance before starting it. +The `mysqlctld` process can subsequently receive gRPC commands from a `vttablet` to perform housekeeping operations like shutting down and restarting the `mysqld` instance as needed. + +{{ "{{< warning >}}" }} +`mysqld_safe` is not used so the `mysqld` process will not be automatically restarted in case of a failure. +{{ "{{}}" }} + +To enable communication with a `vttablet`, the server must be configured to receive gRPC messages on a unix domain socket. + +Usage: + mysqlctld [flags] + +Examples: +mysqlctld \ + --log_dir=${VTDATAROOT}/logs \ + --tablet_uid=100 \ + --mysql_port=17100 \ + --socket_file=/path/to/socket_file + +Flags: --alsologtostderr log to standard error as well as files --app_idle_timeout duration Idle timeout for app connections (default 1m0s) --app_pool_size int Size of the connection pool for app connections (default 40) + --bind-address string Bind address for the server. If empty, the server will listen on all available unicast and anycast IP addresses of the local system. --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified --config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored. --config-file-not-found-handling ConfigFileNotFoundHandling Behavior when a config file is not found. (Options: error, exit, ignore, warn) (default warn) @@ -44,6 +65,7 @@ Usage of mysqlctld: --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon). --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server. --grpc_auth_static_password_file string JSON File to read the users/passwords from. + --grpc_bind_address string Bind address for gRPC calls. If empty, listen on all addresses. --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy @@ -62,7 +84,7 @@ Usage of mysqlctld: --grpc_server_initial_window_size int gRPC server initial window size --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s) --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs) - -h, --help display usage and exit + -h, --help help for mysqlctld --init_db_sql_file string Path to .sql file to run after mysqld initialization --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) diff --git a/go/flags/endtoend/topo2topo.txt b/go/flags/endtoend/topo2topo.txt new file mode 100644 index 00000000000..4391a32a1a8 --- /dev/null +++ b/go/flags/endtoend/topo2topo.txt @@ -0,0 +1,44 @@ +topo2topo copies Vitess topology data from one topo server to another. +It can also be used to compare data between two topologies. + +Usage: + topo2topo [flags] + +Flags: + --alsologtostderr log to standard error as well as files + --compare compares data between topologies + --config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored. + --config-file-not-found-handling ConfigFileNotFoundHandling Behavior when a config file is not found. (Options: error, exit, ignore, warn) (default warn) + --config-name string Name of the config file (without extension) to search for. (default "vtconfig") + --config-path strings Paths to search for config files in. (default [{{ .Workdir }}]) + --config-persistence-min-interval duration minimum interval between persisting dynamic config changes back to disk (if no change has occurred, nothing is done). (default 1s) + --config-type string Config file type (omit to infer config type from file extension). + --do-keyspaces copies the keyspace information + --do-routing-rules copies the routing rules + --do-shard-replications copies the shard replication information + --do-shards copies the shard information + --do-tablets copies the tablet information + --from_implementation string topology implementation to copy data from + --from_root string topology server root to copy data from + --from_server string topology server address to copy data from + --grpc_enable_tracing Enable gRPC tracing. + --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216) + --grpc_prometheus Enable gRPC monitoring with Prometheus. + -h, --help help for topo2topo + --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) + --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --log_err_stacks log stack traces for errors + --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) + --logtostderr log to standard error instead of files + --pprof strings enable profiling + --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) + --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) + --stderrthreshold severity logs at or above this threshold go to stderr (default 1) + --to_implementation string topology implementation to copy data to + --to_root string topology server root to copy data to + --to_server string topology server address to copy data to + --v Level log level for V logs + -v, --version print binary version + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging diff --git a/go/flags/endtoend/vtaclcheck.txt b/go/flags/endtoend/vtaclcheck.txt index 001d3a5b192..34bef9a05f9 100644 --- a/go/flags/endtoend/vtaclcheck.txt +++ b/go/flags/endtoend/vtaclcheck.txt @@ -1,4 +1,9 @@ -Usage of vtaclcheck: +vtaclcheck checks that the access-control list (ACL) rules in a given file are valid. + +Usage: + vtaclcheck [flags] + +Flags: --acl-file string The path of the JSON ACL file to check --alsologtostderr log to standard error as well as files --config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored. @@ -7,7 +12,7 @@ Usage of vtaclcheck: --config-path strings Paths to search for config files in. (default [{{ .Workdir }}]) --config-persistence-min-interval duration minimum interval between persisting dynamic config changes back to disk (if no change has occurred, nothing is done). (default 1s) --config-type string Config file type (omit to infer config type from file extension). - -h, --help display usage and exit + -h, --help help for vtaclcheck --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) diff --git a/go/flags/endtoend/vtbackup.txt b/go/flags/endtoend/vtbackup.txt index 3678872ef65..2429f631d68 100644 --- a/go/flags/endtoend/vtbackup.txt +++ b/go/flags/endtoend/vtbackup.txt @@ -1,4 +1,47 @@ -Usage of vtbackup: +vtbackup is a batch command to perform a single pass of backup maintenance for a shard. + +When run periodically for each shard, vtbackup can ensure these configurable policies: + * There is always a recent backup for the shard. + * Old backups for the shard are removed. + +Whatever system launches vtbackup is responsible for the following: + - Running vtbackup with similar flags that would be used for a vttablet and + mysqlctld in the target shard to be backed up. + - Provisioning as much disk space for vtbackup as would be given to vttablet. + The data directory MUST be empty at startup. Do NOT reuse a persistent disk. + - Running vtbackup periodically for each shard, for each backup storage location. + - Ensuring that at most one instance runs at a time for a given pair of shard + and backup storage location. + - Retrying vtbackup if it fails. + - Alerting human operators if the failure is persistent. + +The process vtbackup follows to take a new backup has the following steps: + 1. Restore from the most recent backup. + 2. Start a mysqld instance (but no vttablet) from the restored data. + 3. Instruct mysqld to connect to the current shard primary and replicate any + transactions that are new since the last backup. + 4. Ask the primary for its current replication position and set that as the goal + for catching up on replication before taking the backup, so the goalposts + don't move. + 5. Wait until replication is caught up to the goal position or beyond. + 6. Stop mysqld and take a new backup. + +Aside from additional replication load while vtbackup's mysqld catches up on +new transactions, the shard should be otherwise unaffected. Existing tablets +will continue to serve, and no new tablets will appear in topology, meaning no +query traffic will ever be routed to vtbackup's mysqld. This silent operation +mode helps make backups minimally disruptive to serving capacity and orthogonal +to the handling of the query path. + +The command-line parameters to vtbackup specify a policy for when a new backup +is needed, and when old backups should be removed. If the existing backups +already satisfy the policy, then vtbackup will do nothing and return success +immediately. + +Usage: + vtbackup [flags] + +Flags: --allow_first_backup Allow this job to take the first backup of an existing shard. --alsologtostderr log to standard error as well as files --azblob_backup_account_key_file string Path to a file containing the Azure Storage account key; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_KEY will be used as the key itself (NOT a file path). @@ -12,6 +55,7 @@ Usage of vtbackup: --backup_storage_compress if set, the backup files will be compressed. (default true) --backup_storage_implementation string Which backup storage implementation to use for creating and restoring backups. --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, in parallel, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression. (default 2) + --bind-address string Bind address for the server. If empty, the server will listen on all available unicast and anycast IP addresses of the local system. --builtinbackup-file-read-buffer-size uint read files using an IO buffer of this many bytes. Golang defaults are used when set to 0. --builtinbackup-file-write-buffer-size uint write files using an IO buffer of this many bytes. Golang defaults are used when set to 0. (default 2097152) --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup. (default 10m0s) @@ -92,7 +136,7 @@ Usage of vtbackup: --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s) --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 268435456) --grpc_prometheus Enable gRPC monitoring with Prometheus. - -h, --help display usage and exit + -h, --help help for vtbackup --incremental_from_pos string Position of previous backup. Default: empty. If given, then this backup becomes an incremental backup from given position. If value is 'auto', backup taken from last successful backup position --init_db_name_override string (init parameter) override the name of the db used by vttablet --init_db_sql_file string path to .sql file to run after mysql_install_db @@ -134,6 +178,7 @@ Usage of vtbackup: --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess") --mysql_socket string path to the mysql socket --mysql_timeout duration how long to wait for mysqld startup (default 5m0s) + --opentsdb_uri string URI of opentsdb /api/put method --port int port for the server --pprof strings enable profiling --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) @@ -184,6 +229,7 @@ Usage of vtbackup: --topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server --topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS --topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS + --upgrade-safe Whether to use innodb_fast_shutdown=0 for the backup so it is safe to use for MySQL upgrades. --v Level log level for V logs -v, --version print binary version --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging diff --git a/go/flags/endtoend/vtbench.txt b/go/flags/endtoend/vtbench.txt new file mode 100644 index 00000000000..d74dc13ebc8 --- /dev/null +++ b/go/flags/endtoend/vtbench.txt @@ -0,0 +1,97 @@ +vtbench is a simple load testing client to compare workloads in Vitess across the various client/server protocols. + +Usage: + vtbench [flags] + +Examples: +There are a number of command line options to control the behavior, +but as a basic example, the three supported client protocols are: + +Mysql protocol to vtgate: +vtbench \ + --protocol mysql \ + --host vtgate-host.my.domain \ + --port 15306 \ + --user db_username \ + --db-credentials-file ./vtbench_db_creds.json \ + --db @replica \ + --sql "select * from loadtest_table where id=123456789" \ + --threads 10 \ + --count 10 + +GRPC to vtgate: +vtbench \ + --protocol grpc-vtgate \ + --host vtgate-host.my.domain \ + --port 15999 \ + --db @replica \ + $VTTABLET_GRPC_ARGS \ + --sql "select * from loadtest_table where id=123456789" \ + --threads 10 \ + --count 10 + +GRPC to vttablet: +vtbench \ + --protocol grpc-vttablet \ + --host tablet-loadtest-00-80.my.domain \ + --port 15999 \ + --db loadtest/00-80@replica \ + --sql "select * from loadtest_table where id=123456789" \ + --threads 10 \ + --count 10 + +Flags: + --alsologtostderr log to standard error as well as files + --config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored. + --config-file-not-found-handling ConfigFileNotFoundHandling Behavior when a config file is not found. (Options: error, exit, ignore, warn) (default warn) + --config-name string Name of the config file (without extension) to search for. (default "vtconfig") + --config-path strings Paths to search for config files in. (default [{{ .Workdir }}]) + --config-persistence-min-interval duration minimum interval between persisting dynamic config changes back to disk (if no change has occurred, nothing is done). (default 1s) + --config-type string Config file type (omit to infer config type from file extension). + --count int Number of queries per thread (default 1000) + --db string Database name to use when connecting / running the queries (e.g. @replica, keyspace, keyspace/shard etc) + --deadline duration Maximum duration for the test run (default 5 minutes) (default 5m0s) + --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server. + --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy + --grpc_enable_tracing Enable gRPC tracing. + --grpc_initial_conn_window_size int gRPC initial connection window size + --grpc_initial_window_size int gRPC initial window size + --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s) + --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s) + --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216) + --grpc_prometheus Enable gRPC monitoring with Prometheus. + -h, --help help for vtbench + --host string VTGate host(s) in the form 'host1,host2,...' + --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) + --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --log_err_stacks log stack traces for errors + --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) + --logtostderr log to standard error instead of files + --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess") + --port int VTGate port + --pprof strings enable profiling + --protocol string Client protocol, either mysql (default), grpc-vtgate, or grpc-vttablet (default "mysql") + --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) + --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) + --sql string SQL statement to execute + --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited) + --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512) + --stderrthreshold severity logs at or above this threshold go to stderr (default 1) + --tablet_grpc_ca string the server ca to use to validate servers when connecting + --tablet_grpc_cert string the cert to use to connect + --tablet_grpc_crl string the server crl to use to validate server certificates when connecting + --tablet_grpc_key string the key to use to connect + --tablet_grpc_server_name string the server name to use to validate server certificate + --threads int Number of parallel threads to run (default 2) + --unix_socket string VTGate unix socket + --user string Username to connect using mysql (password comes from the db-credentials-file) + --v Level log level for V logs + -v, --version print binary version + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging + --vtgate_grpc_ca string the server ca to use to validate servers when connecting + --vtgate_grpc_cert string the cert to use to connect + --vtgate_grpc_crl string the server crl to use to validate server certificates when connecting + --vtgate_grpc_key string the key to use to connect + --vtgate_grpc_server_name string the server name to use to validate server certificate diff --git a/go/flags/endtoend/vtclient.txt b/go/flags/endtoend/vtclient.txt new file mode 100644 index 00000000000..3d17734168c --- /dev/null +++ b/go/flags/endtoend/vtclient.txt @@ -0,0 +1,52 @@ +vtclient connects to a vtgate server using the standard go driver API. + +For query bound variables, we assume place-holders in the query string +in the form of :v1, :v2, etc. + +Usage: + vtclient [flags] + +Examples: +vtclient --server vtgate:15991 "SELECT * FROM messages" + +vtclient --server vtgate:15991 --target '@primary' --bind_variables '[ 12345, 1, "msg 12345" ]' "INSERT INTO messages (page,time_created_ns,message) VALUES (:v1, :v2, :v3)" + +Flags: + --alsologtostderr log to standard error as well as files + --bind_variables float bind variables as a json list (default null) + --config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored. + --config-file-not-found-handling ConfigFileNotFoundHandling Behavior when a config file is not found. (Options: error, exit, ignore, warn) (default warn) + --config-name string Name of the config file (without extension) to search for. (default "vtconfig") + --config-path strings Paths to search for config files in. (default [{{ .Workdir }}]) + --config-persistence-min-interval duration minimum interval between persisting dynamic config changes back to disk (if no change has occurred, nothing is done). (default 1s) + --config-type string Config file type (omit to infer config type from file extension). + --count int DMLs only: Number of times each thread executes the query. Useful for simple, sustained load testing. (default 1) + --grpc_enable_tracing Enable gRPC tracing. + --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216) + --grpc_prometheus Enable gRPC monitoring with Prometheus. + -h, --help help for vtclient + --json Output JSON instead of human-readable table + --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) + --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --log_err_stacks log stack traces for errors + --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) + --logtostderr log to standard error instead of files + --max_sequence_id int max sequence ID. + --min_sequence_id int min sequence ID to generate. When max_sequence_id > min_sequence_id, for each query, a number is generated in [min_sequence_id, max_sequence_id) and attached to the end of the bind variables. + --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess") + --parallel int DMLs only: Number of threads executing the same query in parallel. Useful for simple load testing. (default 1) + --pprof strings enable profiling + --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) + --qps int queries per second to throttle each thread at. + --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) + --server string vtgate server to connect to + --stderrthreshold severity logs at or above this threshold go to stderr (default 1) + --streaming use a streaming query + --target string keyspace:shard@tablet_type + --timeout duration timeout for queries (default 30s) + --use_random_sequence use random sequence for generating [min_sequence_id, max_sequence_id) + --v Level log level for V logs + -v, --version print binary version + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging diff --git a/go/flags/endtoend/vtcombo.txt b/go/flags/endtoend/vtcombo.txt new file mode 100644 index 00000000000..a2273cbd25d --- /dev/null +++ b/go/flags/endtoend/vtcombo.txt @@ -0,0 +1,437 @@ +vtcombo is a single binary containing several vitess components. + +In particular, it contains: +- A topology server based on an in-memory map. +- One vtgate instance. +- Many vttablet instances. +- A vtctld instance so it's easy to see the topology. + +Usage: + vtcombo [flags] + +Flags: + --action_timeout duration time to wait for an action before resorting to force (default 1m0s) + --allow-kill-statement Allows the execution of kill statement + --allowed_tablet_types strings Specifies the tablet types this vtgate is allowed to route queries to. Should be provided as a comma-separated set of tablet types. + --alsologtostderr log to standard error as well as files + --app_idle_timeout duration Idle timeout for app connections (default 1m0s) + --app_pool_size int Size of the connection pool for app connections (default 40) + --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default "builtin") + --backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000) + --backup_storage_compress if set, the backup files will be compressed. (default true) + --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, in parallel, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression. (default 2) + --bind-address string Bind address for the server. If empty, the server will listen on all available unicast and anycast IP addresses of the local system. + --binlog_host string PITR restore parameter: hostname/IP of binlog server. + --binlog_password string PITR restore parameter: password of binlog server. + --binlog_player_protocol string the protocol to download binlogs from a vttablet (default "grpc") + --binlog_port int PITR restore parameter: port of binlog server. + --binlog_ssl_ca string PITR restore parameter: Filename containing TLS CA certificate to verify binlog server TLS certificate against. + --binlog_ssl_cert string PITR restore parameter: Filename containing mTLS client certificate to present to binlog server as authentication. + --binlog_ssl_key string PITR restore parameter: Filename containing mTLS client private key for use in binlog server authentication. + --binlog_ssl_server_name string PITR restore parameter: TLS server name (common name) to verify against for the binlog server we are connecting to (If not set: use the hostname or IP supplied in --binlog_host). + --binlog_user string PITR restore parameter: username of binlog server. + --buffer_drain_concurrency int Maximum number of requests retried simultaneously. More concurrency will increase the load on the PRIMARY vttablet when draining the buffer. (default 1) + --buffer_keyspace_shards string If not empty, limit buffering to these entries (comma separated). Entry format: keyspace or keyspace/shard. Requires --enable_buffer=true. + --buffer_max_failover_duration duration Stop buffering completely if a failover takes longer than this duration. (default 20s) + --buffer_min_time_between_failovers duration Minimum time between the end of a failover and the start of the next one (tracked per shard). Faster consecutive failovers will not trigger buffering. (default 1m0s) + --buffer_size int Maximum number of buffered requests in flight (across all ongoing failovers). (default 1000) + --buffer_window duration Duration for how long a request should be buffered at most. (default 10s) + --builtinbackup-file-read-buffer-size uint read files using an IO buffer of this many bytes. Golang defaults are used when set to 0. + --builtinbackup-file-write-buffer-size uint write files using an IO buffer of this many bytes. Golang defaults are used when set to 0. (default 2097152) + --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup. (default 10m0s) + --builtinbackup_progress duration how often to send progress updates when backing up large files. (default 5s) + --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified + --cell string cell to use + --compression-engine-name string compressor engine used for compression. (default "pargzip") + --compression-level int what level to pass to the compressor. (default 1) + --config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored. + --config-file-not-found-handling ConfigFileNotFoundHandling Behavior when a config file is not found. (Options: error, exit, ignore, warn) (default warn) + --config-name string Name of the config file (without extension) to search for. (default "vtconfig") + --config-path strings Paths to search for config files in. (default [{{ .Workdir }}]) + --config-persistence-min-interval duration minimum interval between persisting dynamic config changes back to disk (if no change has occurred, nothing is done). (default 1s) + --config-type string Config file type (omit to infer config type from file extension). + --consolidator-stream-query-size int Configure the stream consolidator query size in bytes. Setting to 0 disables the stream consolidator. (default 2097152) + --consolidator-stream-total-size int Configure the stream consolidator total size in bytes. Setting to 0 disables the stream consolidator. (default 134217728) + --consul_auth_static_file string JSON File to read the topos/tokens from. + --datadog-agent-host string host to send spans to. if empty, no tracing will be done + --datadog-agent-port string port to send spans to. if empty, no tracing will be done + --db-credentials-file string db credentials file; send SIGHUP to reload this file + --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default "file") + --db-credentials-vault-addr string URL to Vault server + --db-credentials-vault-path string Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds + --db-credentials-vault-role-mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default "approle") + --db-credentials-vault-role-secretidfile string Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable + --db-credentials-vault-roleid string Vault AppRole id; can also be passed using VAULT_ROLEID environment variable + --db-credentials-vault-timeout duration Timeout for vault API operations (default 10s) + --db-credentials-vault-tls-ca string Path to CA PEM for validating Vault server certificate + --db-credentials-vault-tokenfile string Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable + --db-credentials-vault-ttl duration How long to cache DB credentials from the Vault server (default 30m0s) + --db_allprivs_password string db allprivs password + --db_allprivs_use_ssl Set this flag to false to make the allprivs connection to not use ssl (default true) + --db_allprivs_user string db allprivs user userKey (default "vt_allprivs") + --db_app_password string db app password + --db_app_use_ssl Set this flag to false to make the app connection to not use ssl (default true) + --db_app_user string db app user userKey (default "vt_app") + --db_appdebug_password string db appdebug password + --db_appdebug_use_ssl Set this flag to false to make the appdebug connection to not use ssl (default true) + --db_appdebug_user string db appdebug user userKey (default "vt_appdebug") + --db_charset string Character set used for this tablet. (default "utf8mb4") + --db_conn_query_info enable parsing and processing of QUERY_OK info fields + --db_connect_timeout_ms int connection timeout to mysqld in milliseconds (0 for no timeout) + --db_dba_password string db dba password + --db_dba_use_ssl Set this flag to false to make the dba connection to not use ssl (default true) + --db_dba_user string db dba user userKey (default "vt_dba") + --db_erepl_password string db erepl password + --db_erepl_use_ssl Set this flag to false to make the erepl connection to not use ssl (default true) + --db_erepl_user string db erepl user userKey (default "vt_erepl") + --db_filtered_password string db filtered password + --db_filtered_use_ssl Set this flag to false to make the filtered connection to not use ssl (default true) + --db_filtered_user string db filtered user userKey (default "vt_filtered") + --db_flags uint Flag values as defined by MySQL. + --db_flavor string Flavor overrid. Valid value is FilePos. + --db_host string The host name for the tcp connection. + --db_port int tcp port + --db_repl_password string db repl password + --db_repl_use_ssl Set this flag to false to make the repl connection to not use ssl (default true) + --db_repl_user string db repl user userKey (default "vt_repl") + --db_server_name string server name of the DB we are connecting to. + --db_socket string The unix socket to connect on. If this is specified, host and port will not be used. + --db_ssl_ca string connection ssl ca + --db_ssl_ca_path string connection ssl ca path + --db_ssl_cert string connection ssl certificate + --db_ssl_key string connection ssl key + --db_ssl_mode SslMode SSL mode to connect with. One of disabled, preferred, required, verify_ca & verify_identity. + --db_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3. + --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s) + --dba_pool_size int Size of the connection pool for dba connections (default 20) + --dbddl_plugin string controls how to handle CREATE/DROP DATABASE. use it if you are using your own database provisioning service (default "fail") + --ddl_strategy string Set default strategy for DDL statements. Override with @@ddl_strategy session variable (default "direct") + --default_tablet_type topodatapb.TabletType The default tablet type to set for queries, when one is not explicitly selected. (default PRIMARY) + --degraded_threshold duration replication lag after which a replica is considered degraded (default 30s) + --disable_active_reparents if set, do not allow active reparents. Use this to protect a cluster using external reparents. + --emit_stats If set, emit stats to push-based monitoring and stats backends + --enable-consolidator Synonym to -enable_consolidator (default true) + --enable-consolidator-replicas Synonym to -enable_consolidator_replicas + --enable-partial-keyspace-migration (Experimental) Follow shard routing rules: enable only while migrating a keyspace shard by shard. See documentation on Partial MoveTables for more. (default false) + --enable-per-workload-table-metrics If true, query counts and query error metrics include a label that identifies the workload + --enable-tx-throttler Synonym to -enable_tx_throttler + --enable-views Enable views support in vtgate. + --enable_buffer Enable buffering (stalling) of primary traffic during failovers. + --enable_buffer_dry_run Detect and log failover events, but do not actually buffer requests. + --enable_consolidator This option enables the query consolidator. (default true) + --enable_consolidator_replicas This option enables the query consolidator only on replicas. + --enable_direct_ddl Allow users to submit direct DDL statements (default true) + --enable_hot_row_protection If true, incoming transactions for the same row (range) will be queued and cannot consume all txpool slots. + --enable_hot_row_protection_dry_run If true, hot row protection is not enforced but logs if transactions would have been queued. + --enable_online_ddl Allow users to submit, review and control Online DDL (default true) + --enable_replication_reporter Use polling to track replication lag. + --enable_set_var This will enable the use of MySQL's SET_VAR query hint for certain system variables instead of using reserved connections (default true) + --enable_system_settings This will enable the system settings to be changed per session at the database connection level (default true) + --enable_transaction_limit If true, limit on number of transactions open at the same time will be enforced for all users. User trying to open a new transaction after exhausting their limit will receive an error immediately, regardless of whether there are available slots or not. + --enable_transaction_limit_dry_run If true, limit on number of transactions open at the same time will be tracked for all users, but not enforced. + --enable_tx_throttler If true replication-lag-based throttling on transactions will be enabled. + --enforce_strict_trans_tables If true, vttablet requires MySQL to run with STRICT_TRANS_TABLES or STRICT_ALL_TABLES on. It is recommended to not turn this flag off. Otherwise MySQL may alter your supplied values before saving them to the database. (default true) + --external-compressor string command with arguments to use when compressing a backup. + --external-compressor-extension string extension to use when using an external compressor. + --external-decompressor string command with arguments to use when decompressing a backup. + --external_topo_server Should vtcombo use an external topology server instead of starting its own in-memory topology server. If true, vtcombo will use the flags defined in topo/server.go to open topo server + --foreign_key_mode string This is to provide how to handle foreign key constraint in create/alter table. Valid values are: allow, disallow (default "allow") + --gate_query_cache_memory int gate server query cache size in bytes, maximum amount of memory to be cached. vtgate analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 33554432) + --gc_check_interval duration Interval between garbage collection checks (default 1h0m0s) + --gc_purge_check_interval duration Interval between purge discovery checks (default 1m0s) + --gh-ost-path string override default gh-ost binary full path + --grpc-send-session-in-streaming If set, will send the session as last packet in streaming api to support transactions in streaming + --grpc-use-effective-groups If set, and SSL is not used, will set the immediate caller's security groups from the effective caller id's groups. + --grpc-use-static-authentication-callerid If set, will set the immediate caller id to the username authenticated by the static auth plugin. + --grpc_auth_mode string Which auth plugin implementation to use (eg: static) + --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon). + --grpc_auth_static_password_file string JSON File to read the users/passwords from. + --grpc_bind_address string Bind address for gRPC calls. If empty, listen on all addresses. + --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check + --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS + --grpc_crl string path to a certificate revocation list in PEM format, client certificates will be further verified against this file during TLS handshake + --grpc_enable_optional_tls enable optional TLS mode when a server accepts both TLS and plain-text connections on the same port + --grpc_enable_tracing Enable gRPC tracing. + --grpc_key string server private key to use for gRPC connections, requires grpc_cert, enables TLS + --grpc_max_connection_age duration Maximum age of a client connection before GoAway is sent. (default 2562047h47m16.854775807s) + --grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s) + --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216) + --grpc_port int Port to listen on for gRPC calls. If zero, do not listen. + --grpc_prometheus Enable gRPC monitoring with Prometheus. + --grpc_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients + --grpc_server_initial_conn_window_size int gRPC server initial connection window size + --grpc_server_initial_window_size int gRPC server initial window size + --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s) + --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs) + --grpc_use_effective_callerid If set, and SSL is not used, will set the immediate caller id from the effective caller id's principal. + --health_check_interval duration Interval between health checks (default 20s) + --healthcheck_retry_delay duration health check retry delay (default 2ms) + --healthcheck_timeout duration the health check timeout period (default 1m0s) + --heartbeat_enable If true, vttablet records (if master) or checks (if replica) the current time of a replication heartbeat in the sidecar database's heartbeat table. The result is used to inform the serving state of the vttablet via healthchecks. + --heartbeat_interval duration How frequently to read and write replication heartbeat. (default 1s) + --heartbeat_on_demand_duration duration If non-zero, heartbeats are only written upon consumer request, and only run for up to given duration following the request. Frequent requests can keep the heartbeat running consistently; when requests are infrequent heartbeat may completely stop between requests + -h, --help help for vtcombo + --hot_row_protection_concurrent_transactions int Number of concurrent transactions let through to the txpool/MySQL for the same hot row. Should be > 1 to have enough 'ready' transactions in MySQL and benefit from a pipelining effect. (default 5) + --hot_row_protection_max_global_queue_size int Global queue limit across all row (ranges). Useful to prevent that the queue can grow unbounded. (default 1000) + --hot_row_protection_max_queue_size int Maximum number of BeginExecute RPCs which will be queued for the same row (range). (default 20) + --init_db_name_override string (init parameter) override the name of the db used by vttablet. Without this flag, the db name defaults to vt_ + --init_keyspace string (init parameter) keyspace to use for this tablet + --init_shard string (init parameter) shard to use for this tablet + --init_tablet_type string (init parameter) the tablet type to use for this tablet. + --init_tags StringMap (init parameter) comma separated list of key:value pairs used to tag the tablet + --init_timeout duration (init parameter) timeout to use for the init phase. (default 1m0s) + --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done + --json_topo vttest.TopoData vttest proto definition of the topology, encoded in json format. See vttest.proto for more information. + --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) + --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) + --keyspaces_to_watch strings Specifies which keyspaces this vtgate should have access to while routing queries or accessing the vschema. + --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms) + --lock-timeout duration Maximum time for which a shard/keyspace lock can be acquired for (default 45s) + --lock_heartbeat_time duration If there is lock function used. This will keep the lock connection active by using this heartbeat (default 5s) + --lock_tables_timeout duration How long to keep the table locked before timing out (default 1m0s) + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --log_err_stacks log stack traces for errors + --log_queries_to_file string Enable query logging to the specified file + --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) + --logtostderr log to standard error instead of files + --manifest-external-decompressor string command with arguments to store in the backup manifest when compressing a backup with an external compression engine. + --max-stack-size int configure the maximum stack size in bytes (default 67108864) + --max_concurrent_online_ddl int Maximum number of online DDL changes that may run concurrently (default 256) + --max_memory_rows int Maximum number of rows that will be held in memory for intermediate results as well as the final result. (default 300000) + --max_payload_size int The threshold for query payloads in bytes. A payload greater than this threshold will result in a failure to handle the query. + --message_stream_grace_period duration the amount of time to give for a vttablet to resume if it ends a message stream, usually because of a reparent. (default 30s) + --migration_check_interval duration Interval between migration checks (default 1m0s) + --mycnf-file string path to my.cnf, if reading all config params from there + --mycnf_bin_log_path string mysql binlog path + --mycnf_data_dir string data directory for mysql + --mycnf_error_log_path string mysql error log path + --mycnf_general_log_path string mysql general log path + --mycnf_innodb_data_home_dir string Innodb data home directory + --mycnf_innodb_log_group_home_dir string Innodb log group home directory + --mycnf_master_info_file string mysql master.info file + --mycnf_mysql_port int port mysql is listening on + --mycnf_pid_file string mysql pid file + --mycnf_relay_log_index_path string mysql relay log index path + --mycnf_relay_log_info_path string mysql relay log info path + --mycnf_relay_log_path string mysql relay log path + --mycnf_secure_file_priv string mysql path for loading secure files + --mycnf_server_id int mysql server id of the server (if specified, mycnf-file will be ignored) + --mycnf_slow_log_path string mysql slow query log path + --mycnf_socket_file string mysql socket file + --mycnf_tmp_dir string mysql tmp directory + --mysql-server-keepalive-period duration TCP period between keep-alives + --mysql-server-pool-conn-read-buffers If set, the server will pool incoming connection read buffers + --mysql_allow_clear_text_without_tls If set, the server will allow the use of a clear text password over non-SSL connections. + --mysql_auth_server_impl string Which auth server implementation to use. Options: none, ldap, clientcert, static, vault. (default "static") + --mysql_default_workload string Default session workload (OLTP, OLAP, DBA) (default "OLTP") + --mysql_port int mysql port (default 3306) + --mysql_server_bind_address string Binds on this address when listening to MySQL binary protocol. Useful to restrict listening to 'localhost' only for instance. + --mysql_server_port int If set, also listen for MySQL binary protocol connections on this port. (default -1) + --mysql_server_query_timeout duration mysql query timeout + --mysql_server_read_timeout duration connection read timeout + --mysql_server_require_secure_transport Reject insecure connections but only if mysql_server_ssl_cert and mysql_server_ssl_key are provided + --mysql_server_socket_path string This option specifies the Unix socket file to use when listening for local connections. By default it will be empty and it won't listen to a unix socket + --mysql_server_ssl_ca string Path to ssl CA for mysql server plugin SSL. If specified, server will require and validate client certs. + --mysql_server_ssl_cert string Path to the ssl cert for mysql server plugin SSL + --mysql_server_ssl_crl string Path to ssl CRL for mysql server plugin SSL + --mysql_server_ssl_key string Path to ssl key for mysql server plugin SSL + --mysql_server_ssl_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients + --mysql_server_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3. + --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess") + --mysql_server_write_timeout duration connection write timeout + --mysql_slow_connect_warn_threshold duration Warn if it takes more than the given threshold for a mysql connection to establish + --mysql_tcp_version string Select tcp, tcp4, or tcp6 to control the socket type. (default "tcp") + --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init + --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions) + --no_scatter when set to true, the planner will fail instead of producing a plan that includes scatter queries + --normalize_queries Rewrite queries with bind vars. Turn this off if the app itself sends normalized queries with bind vars. (default true) + --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 10s) + --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s) + --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown. + --pitr_gtid_lookup_timeout duration PITR restore parameter: timeout for fetching gtid from timestamp. (default 1m0s) + --planner-version string Sets the default planner to use when the session has not changed it. Valid values are: Gen4, Gen4Greedy, Gen4Left2Right + --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled) + --port int port for the server + --pprof strings enable profiling + --proto_topo vttest.TopoData vttest proto definition of the topology, encoded in compact text format. See vttest.proto for more information. + --proxy_protocol Enable HAProxy PROXY protocol on MySQL listener socket + --proxy_tablets Setting this true will make vtctld proxy the tablet status instead of redirecting to them + --pt-osc-path string override default pt-online-schema-change binary full path + --publish_retry_interval duration how long vttablet waits to retry publishing the tablet record (default 30s) + --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) + --query-log-stream-handler string URL handler for streaming queries log (default "/debug/querylog") + --query-timeout int Sets the default query timeout (in ms). Can be overridden by session variable (query_timeout) or comment directive (QUERY_TIMEOUT_MS) + --querylog-buffer-size int Maximum number of buffered query logs before throttling log output (default 10) + --querylog-filter-tag string string that must be present in the query for it to be logged; if using a value as the tag, you need to disable query normalization + --querylog-format string format for query logs ("text" or "json") (default "text") + --querylog-row-threshold uint Number of rows a query has to return or affect before being logged; not useful for streaming queries. 0 means all queries will be logged. + --queryserver-config-acl-exempt-acl string an acl that exempt from table acl checking (this acl is free to access any vitess tables). + --queryserver-config-annotate-queries prefix queries to MySQL backend with comment indicating vtgate principal (user) and target tablet type + --queryserver-config-enable-table-acl-dry-run If this flag is enabled, tabletserver will emit monitoring metrics and let the request pass regardless of table acl check results + --queryserver-config-idle-timeout duration query server idle timeout (in seconds), vttablet manages various mysql connection pools. This config means if a connection has not been used in given idle timeout, this connection will be removed from pool. This effectively manages number of connection objects and optimize the pool performance. (default 30m0s) + --queryserver-config-max-result-size int query server max result size, maximum number of rows allowed to return from vttablet for non-streaming queries. (default 10000) + --queryserver-config-message-postpone-cap int query server message postpone cap is the maximum number of messages that can be postponed at any given time. Set this number to substantially lower than transaction cap, so that the transaction pool isn't exhausted by the message subsystem. (default 4) + --queryserver-config-olap-transaction-timeout duration query server transaction timeout (in seconds), after which a transaction in an OLAP session will be killed (default 30s) + --queryserver-config-passthrough-dmls query server pass through all dml statements without rewriting + --queryserver-config-pool-conn-max-lifetime duration query server connection max lifetime (in seconds), vttablet manages various mysql connection pools. This config means if a connection has lived at least this long, it connection will be removed from pool upon the next time it is returned to the pool. (default 0s) + --queryserver-config-pool-size int query server read pool size, connection pool is used by regular queries (non streaming, not in a transaction) (default 16) + --queryserver-config-query-cache-memory int query server query cache size in bytes, maximum amount of memory to be used for caching. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 33554432) + --queryserver-config-query-pool-timeout duration query server query pool timeout (in seconds), it is how long vttablet waits for a connection from the query pool. If set to 0 (default) then the overall query timeout is used instead. (default 0s) + --queryserver-config-query-pool-waiter-cap int query server query pool waiter limit, this is the maximum number of queries that can be queued waiting to get a connection (default 5000) + --queryserver-config-query-timeout duration query server query timeout (in seconds), this is the query timeout in vttablet side. If a query takes more than this timeout, it will be killed. (default 30s) + --queryserver-config-schema-change-signal query server schema signal, will signal connected vtgates that schema has changed whenever this is detected. VTGates will need to have -schema_change_signal enabled for this to work (default true) + --queryserver-config-schema-reload-time duration query server schema reload time, how often vttablet reloads schemas from underlying MySQL instance in seconds. vttablet keeps table schemas in its own memory and periodically refreshes it from MySQL. This config controls the reload time. (default 30m0s) + --queryserver-config-stream-buffer-size int query server stream buffer size, the maximum number of bytes sent from vttablet for each stream call. It's recommended to keep this value in sync with vtgate's stream_buffer_size. (default 32768) + --queryserver-config-stream-pool-size int query server stream connection pool size, stream pool is used by stream queries: queries that return results to client in a streaming fashion (default 200) + --queryserver-config-stream-pool-timeout duration query server stream pool timeout (in seconds), it is how long vttablet waits for a connection from the stream pool. If set to 0 (default) then there is no timeout. (default 0s) + --queryserver-config-stream-pool-waiter-cap int query server stream pool waiter limit, this is the maximum number of streaming queries that can be queued waiting to get a connection + --queryserver-config-strict-table-acl only allow queries that pass table acl checks + --queryserver-config-terse-errors prevent bind vars from escaping in client error messages + --queryserver-config-transaction-cap int query server transaction cap is the maximum number of transactions allowed to happen at any given point of a time for a single vttablet. E.g. by setting transaction cap to 100, there are at most 100 transactions will be processed by a vttablet and the 101th transaction will be blocked (and fail if it cannot get connection within specified timeout) (default 20) + --queryserver-config-transaction-timeout duration query server transaction timeout (in seconds), a transaction will be killed if it takes longer than this value (default 30s) + --queryserver-config-truncate-error-len int truncate errors sent to client if they are longer than this value (0 means do not truncate) + --queryserver-config-txpool-timeout duration query server transaction pool timeout, it is how long vttablet waits if tx pool is full (default 1s) + --queryserver-config-txpool-waiter-cap int query server transaction pool waiter limit, this is the maximum number of transactions that can be queued waiting to get a connection (default 5000) + --queryserver-config-warn-result-size int query server result size warning threshold, warn if number of rows returned from vttablet for non-streaming queries exceeds this + --queryserver-enable-settings-pool Enable pooling of connections with modified system settings (default true) + --queryserver-enable-views Enable views support in vttablet. + --queryserver_enable_online_ddl Enable online DDL. (default true) + --redact-debug-ui-queries redact full queries and bind variables from debug UI + --relay_log_max_items int Maximum number of rows for VReplication target buffering. (default 5000) + --relay_log_max_size int Maximum buffer size (in bytes) for VReplication target buffering. If single rows are larger than this, a single row is buffered at a time. (default 250000) + --remote_operation_timeout duration time to wait for a remote operation (default 15s) + --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s) + --restore-to-pos string (init incremental restore parameter) if set, run a point in time recovery that ends with the given position. This will attempt to use one full backup followed by zero or more incremental backups + --restore-to-timestamp string (init incremental restore parameter) if set, run a point in time recovery that restores up to the given timestamp, if possible. Given timestamp in RFC3339 format. Example: '2006-01-02T15:04:05Z07:00' + --restore_concurrency int (init restore parameter) how many concurrent files to restore at once (default 4) + --restore_from_backup (init restore parameter) will check BackupStorage for a recent backup at startup and start there + --restore_from_backup_ts string (init restore parameter) if set, restore the latest backup taken at or before this timestamp. Example: '2021-04-29.133050' + --retain_online_ddl_tables duration How long should vttablet keep an old migrated table before purging it (default 24h0m0s) + --sanitize_log_messages Remove potentially sensitive information in tablet INFO, WARNING, and ERROR log messages such as query parameters. + --schema-change-reload-timeout duration query server schema change reload timeout, this is how long to wait for the signaled schema reload operation to complete before giving up (default 30s) + --schema-version-max-age-seconds int max age of schema version records to kept in memory by the vreplication historian + --schema_change_signal Enable the schema tracker; requires queryserver-config-schema-change-signal to be enabled on the underlying vttablets for this to work (default true) + --schema_dir string Schema base directory. Should contain one directory per keyspace, with a vschema.json file if necessary. + --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) + --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice + --serving_state_grace_period duration how long to pause after broadcasting health to vtgate, before enforcing a new serving state + --shard_sync_retry_delay duration delay between retries of updates to keep the tablet and its shard record in sync (default 30s) + --shutdown_grace_period duration how long to wait (in seconds) for queries and transactions to complete during graceful shutdown. (default 0s) + --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited) + --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512) + --srv_topo_cache_refresh duration how frequently to refresh the topology for cached entries (default 1s) + --srv_topo_cache_ttl duration how long to use cached entries for topology (default 1s) + --srv_topo_timeout duration topo server timeout (default 5s) + --start_mysql Should vtcombo also start mysql + --stats_backend string The name of the registered push-based monitoring/stats backend to use + --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars + --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2 + --stats_drop_variables string Variables to be dropped from the list of exported variables. + --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s) + --stderrthreshold severity logs at or above this threshold go to stderr (default 1) + --stream_buffer_size int the number of bytes sent from vtgate for each stream call. It's recommended to keep this value in sync with vttablet's query-server-config-stream-buffer-size. (default 32768) + --stream_health_buffer_size uint max streaming health entries to buffer per streaming health client (default 20) + --table-refresh-interval int interval in milliseconds to refresh tables in status page with refreshRequired class + --table_gc_lifecycle string States for a DROP TABLE garbage collection cycle. Default is 'hold,purge,evac,drop', use any subset ('drop' implcitly always included) (default "hold,purge,evac,drop") + --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid. + --tablet_filters strings Specifies a comma-separated list of 'keyspace|shard_name or keyrange' values to filter the tablets to watch. + --tablet_health_keep_alive duration close streaming tablet health connection if there are no requests for this long (default 5m0s) + --tablet_hostname string if not empty, this hostname will be assumed instead of trying to resolve it + --tablet_manager_grpc_ca string the server ca to use to validate servers when connecting + --tablet_manager_grpc_cert string the cert to use to connect + --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App}) (default 8) + --tablet_manager_grpc_connpool_size int number of tablets to keep tmclient connections open to (default 100) + --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting + --tablet_manager_grpc_key string the key to use to connect + --tablet_manager_grpc_server_name string the server name to use to validate server certificate + --tablet_manager_protocol string Protocol to use to make tabletmanager RPCs to vttablets. (default "grpc") + --tablet_refresh_interval duration Tablet refresh interval. (default 1m0s) + --tablet_refresh_known_tablets Whether to reload the tablet's address/port map from topo in case they change. (default true) + --tablet_url_template string Format string describing debug tablet url formatting. See getTabletDebugURL() for how to customize this. (default "http://{{ "{{.GetTabletHostPort}}" }}") + --throttle_tablet_types string Comma separated VTTablet types to be considered by the throttler. default: 'replica'. example: 'replica,rdonly'. 'replica' aways implicitly included (default "replica") + --topo_consul_lock_delay duration LockDelay for consul session. (default 15s) + --topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth") + --topo_consul_lock_session_ttl string TTL for consul session. + --topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s) + --topo_etcd_lease_ttl int Lease TTL for locks and leader election. The client will use KeepAlive to keep the lease going. (default 30) + --topo_etcd_tls_ca string path to the ca to use to validate the server cert when connecting to the etcd topo server + --topo_etcd_tls_cert string path to the client cert to use to connect to the etcd topo server, requires topo_etcd_tls_key, enables TLS + --topo_etcd_tls_key string path to the client key to use to connect to the etcd topo server, enables TLS + --topo_global_root string the path of the global topology data in the global topology server + --topo_global_server_address string the address of the global topology server + --topo_implementation string the topology implementation to use + --topo_read_concurrency int Concurrency of topo reads. (default 32) + --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass + --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s) + --topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64) + --topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server + --topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS + --topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS + --tracer string tracing service to use (default "noop") + --tracing-enable-logging whether to enable logging in the tracing service + --tracing-sampling-rate float sampling rate for the probabilistic jaeger sampler (default 0.1) + --tracing-sampling-type string sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default "const") + --track_schema_versions When enabled, vttablet will store versions of schemas at each position that a DDL is applied and allow retrieval of the schema corresponding to a position + --transaction-log-stream-handler string URL handler for streaming transactions log (default "/debug/txlog") + --transaction_limit_by_component Include CallerID.component when considering who the user is for the purpose of transaction limit. + --transaction_limit_by_principal Include CallerID.principal when considering who the user is for the purpose of transaction limit. (default true) + --transaction_limit_by_subcomponent Include CallerID.subcomponent when considering who the user is for the purpose of transaction limit. + --transaction_limit_by_username Include VTGateCallerID.username when considering who the user is for the purpose of transaction limit. (default true) + --transaction_limit_per_user float Maximum number of transactions a single user is allowed to use at any time, represented as fraction of -transaction_cap. (default 0.4) + --transaction_mode string SINGLE: disallow multi-db transactions, MULTI: allow multi-db transactions with best effort commit, TWOPC: allow multi-db transactions with 2pc commit (default "MULTI") + --truncate-error-len int truncate errors sent to client if they are longer than this value (0 means do not truncate) + --twopc_abandon_age float time in seconds. Any unresolved transaction older than this time will be sent to the coordinator to be resolved. + --twopc_coordinator_address string address of the (VTGate) process(es) that will be used to notify of abandoned transactions. + --twopc_enable if the flag is on, 2pc is enabled. Other 2pc flags must be supplied. + --tx-throttler-config string Synonym to -tx_throttler_config (default "target_replication_lag_sec:2 max_replication_lag_sec:10 initial_rate:100 max_increase:1 emergency_decrease:0.5 min_duration_between_increases_sec:40 max_duration_between_increases_sec:62 min_duration_between_decreases_sec:20 spread_backlog_across_sec:20 age_bad_rate_after_sec:180 bad_rate_increase:0.1 max_rate_approach_threshold:0.9") + --tx-throttler-default-priority int Default priority assigned to queries that lack priority information (default 100) + --tx-throttler-dry-run If present, the transaction throttler only records metrics about requests received and throttled, but does not actually throttle any requests. + --tx-throttler-healthcheck-cells strings Synonym to -tx_throttler_healthcheck_cells + --tx-throttler-tablet-types strings A comma-separated list of tablet types. Only tablets of this type are monitored for replication lag by the transaction throttler. Supported types are replica and/or rdonly. (default replica) + --tx-throttler-topo-refresh-interval duration The rate that the transaction throttler will refresh the topology to find cells. (default 5m0s) + --tx_throttler_config string The configuration of the transaction throttler as a text-formatted throttlerdata.Configuration protocol buffer message. (default "target_replication_lag_sec:2 max_replication_lag_sec:10 initial_rate:100 max_increase:1 emergency_decrease:0.5 min_duration_between_increases_sec:40 max_duration_between_increases_sec:62 min_duration_between_decreases_sec:20 spread_backlog_across_sec:20 age_bad_rate_after_sec:180 bad_rate_increase:0.1 max_rate_approach_threshold:0.9") + --tx_throttler_healthcheck_cells strings A comma-separated list of cells. Only tabletservers running in these cells will be monitored for replication lag by the transaction throttler. + --unhealthy_threshold duration replication lag after which a replica is considered unhealthy (default 2h0m0s) + --v Level log level for V logs + -v, --version print binary version + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging + --vreplication-parallel-insert-workers int Number of parallel insertion workers to use during copy phase. Set <= 1 to disable parallelism, or > 1 to enable concurrent insertion during copy phase. (default 1) + --vreplication_copy_phase_duration duration Duration for each copy phase loop (before running the next catchup: default 1h) (default 1h0m0s) + --vreplication_copy_phase_max_innodb_history_list_length int The maximum InnoDB transaction history that can exist on a vstreamer (source) before starting another round of copying rows. This helps to limit the impact on the source tablet. (default 1000000) + --vreplication_copy_phase_max_mysql_replication_lag int The maximum MySQL replication lag (in seconds) that can exist on a vstreamer (source) before starting another round of copying rows. This helps to limit the impact on the source tablet. (default 43200) + --vreplication_healthcheck_retry_delay duration healthcheck retry delay (default 5s) + --vreplication_healthcheck_timeout duration healthcheck retry delay (default 1m0s) + --vreplication_healthcheck_topology_refresh duration refresh interval for re-reading the topology (default 30s) + --vreplication_heartbeat_update_interval int Frequency (in seconds, default 1, max 60) at which the time_updated column of a vreplication stream when idling (default 1) + --vreplication_max_time_to_retry_on_error duration stop automatically retrying when we've had consecutive failures with the same error for this long after the first occurrence + --vreplication_replica_lag_tolerance duration Replica lag threshold duration: once lag is below this we switch from copy phase to the replication (streaming) phase (default 1m0s) + --vreplication_retry_delay duration delay before retrying a failed workflow event in the replication phase (default 5s) + --vreplication_store_compressed_gtid Store compressed gtids in the pos column of the sidecar database's vreplication table + --vreplication_tablet_type string comma separated list of tablet types used as a source (default "in_order:REPLICA,PRIMARY") + --vschema-persistence-dir string If set, per-keyspace vschema will be persisted in this directory and reloaded into the in-memory topology server across restarts. Bookkeeping is performed using a simple watcher goroutine. This is useful when running vtcombo as an application development container (e.g. vttestserver) where you want to keep the same vschema even if developer's machine reboots. This works in tandem with vttestserver's --persistent_mode flag. Needless to say, this is neither a perfect nor a production solution for vschema persistence. Consider using the --external_topo_server flag if you require a more complete solution. This flag is ignored if --external_topo_server is set. + --vschema_ddl_authorized_users string List of users authorized to execute vschema ddl operations, or '%' to allow all users. + --vstream-binlog-rotation-threshold int Byte size at which a VStreamer will attempt to rotate the source's open binary log before starting a GTID snapshot based stream (e.g. a ResultStreamer or RowStreamer) (default 67108864) + --vstream_dynamic_packet_size Enable dynamic packet sizing for VReplication. This will adjust the packet size during replication to improve performance. (default true) + --vstream_packet_size int Suggested packet size for VReplication streamer. This is used only as a recommendation. The actual packet size may be more or less than this amount. (default 250000) + --vtctld_sanitize_log_messages When true, vtctld sanitizes logging. + --vtgate-config-terse-errors prevent bind vars from escaping in returned errors + --vtgate_grpc_ca string the server ca to use to validate servers when connecting + --vtgate_grpc_cert string the cert to use to connect + --vtgate_grpc_crl string the server crl to use to validate server certificates when connecting + --vtgate_grpc_key string the key to use to connect + --vtgate_grpc_server_name string the server name to use to validate server certificate + --vttablet_skip_buildinfo_tags string comma-separated list of buildinfo tags to skip from merging with --init_tags. each tag is either an exact match or a regular expression of the form '/regexp/'. (default "/.*/") + --wait_for_backup_interval duration (init restore parameter) if this is greater than 0, instead of starting up empty when no backups are found, keep checking at this interval for a backup to appear + --warn_memory_rows int Warning threshold for in-memory results. A row count higher than this amount will cause the VtGateWarnings.ResultsExceeded counter to be incremented. (default 30000) + --warn_payload_size int The warning threshold for query payloads in bytes. A payload greater than this threshold will cause the VtGateWarnings.WarnPayloadSizeExceeded counter to be incremented. + --warn_sharded_only If any features that are only available in unsharded mode are used, query execution warnings will be added to the session + --watch_replication_stream When enabled, vttablet will stream the MySQL replication stream from the local server, and use it to update schema when it sees a DDL. + --xbstream_restore_flags string Flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt + --xtrabackup_backup_flags string Flags to pass to backup command. These should be space separated and will be added to the end of the command + --xtrabackup_prepare_flags string Flags to pass to prepare command. These should be space separated and will be added to the end of the command + --xtrabackup_root_path string Directory location of the xtrabackup and xbstream executables, e.g., /usr/bin + --xtrabackup_stream_mode string Which mode to use if streaming, valid values are tar and xbstream. Please note that tar is not supported in XtraBackup 8.0 (default "tar") + --xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400) + --xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression + --xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation. diff --git a/go/flags/endtoend/vtctld.txt b/go/flags/endtoend/vtctld.txt index 0c3c9f07c3b..eb83fc80a7b 100644 --- a/go/flags/endtoend/vtctld.txt +++ b/go/flags/endtoend/vtctld.txt @@ -1,4 +1,26 @@ -Usage of vtctld: +vtctld provides web and gRPC interfaces to manage a single Vitess cluster. +It is usually the first Vitess component to be started after a valid global topology service has been created. + +For the last several releases, vtctld has been transitioning to a newer gRPC service for well-typed cluster management requests. +This is **required** to use programs such as vtadmin and vtctldclient, and The old API and service are deprecated and will be removed in a future release. +To enable this newer service, include "grpc-vtctld" in the --service_map argument. +This is demonstrated in the example usage below. + +Usage: + vtctld [flags] + +Examples: +vtctld \ + --topo_implementation etcd2 \ + --topo_global_server_address localhost:2379 \ + --topo_global_root /vitess/ \ + --service_map 'grpc-vtctl,grpc-vtctld' \ + --backup_storage_implementation file \ + --file_backup_storage_root $VTDATAROOT/backups \ + --port 15000 \ + --grpc_port 15999 + +Flags: --action_timeout duration time to wait for an action before resorting to force (default 1m0s) --alsologtostderr log to standard error as well as files --azblob_backup_account_key_file string Path to a file containing the Azure Storage account key; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_KEY will be used as the key itself (NOT a file path). @@ -12,6 +34,7 @@ Usage of vtctld: --backup_storage_compress if set, the backup files will be compressed. (default true) --backup_storage_implementation string Which backup storage implementation to use for creating and restoring backups. --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, in parallel, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression. (default 2) + --bind-address string Bind address for the server. If empty, the server will listen on all available unicast and anycast IP addresses of the local system. --builtinbackup-file-read-buffer-size uint read files using an IO buffer of this many bytes. Golang defaults are used when set to 0. --builtinbackup-file-write-buffer-size uint write files using an IO buffer of this many bytes. Golang defaults are used when set to 0. (default 2097152) --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup. (default 10m0s) @@ -37,6 +60,7 @@ Usage of vtctld: --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon). --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server. --grpc_auth_static_password_file string JSON File to read the users/passwords from. + --grpc_bind_address string Bind address for gRPC calls. If empty, listen on all addresses. --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy @@ -58,7 +82,7 @@ Usage of vtctld: --grpc_server_initial_window_size int gRPC server initial window size --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s) --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs) - -h, --help display usage and exit + -h, --help help for vtctld --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) @@ -136,9 +160,6 @@ Usage of vtctld: --topo_global_root string the path of the global topology data in the global topology server --topo_global_server_address string the address of the global topology server --topo_implementation string the topology implementation to use - --topo_k8s_context string The kubeconfig context to use, overrides the 'current-context' from the config - --topo_k8s_kubeconfig string Path to a valid kubeconfig file. When running as a k8s pod inside the same cluster you wish to use as the topo, you may omit this and the below arguments, and Vitess is capable of auto-discovering the correct values. https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod - --topo_k8s_namespace string The kubernetes namespace to use for all objects. Default comes from the context or in-cluster config --topo_read_concurrency int Concurrency of topo reads. (default 32) --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s) diff --git a/go/flags/endtoend/vtctldclient.txt b/go/flags/endtoend/vtctldclient.txt index 0be37194bc0..74c74b93cb0 100644 --- a/go/flags/endtoend/vtctldclient.txt +++ b/go/flags/endtoend/vtctldclient.txt @@ -51,6 +51,12 @@ Available Commands: GetVSchema Prints a JSON representation of a keyspace's topo record. GetWorkflows Gets all vreplication workflows (Reshard, MoveTables, etc) in the given keyspace. LegacyVtctlCommand Invoke a legacy vtctlclient command. Flag parsing is best effort. + LookupVindex Perform commands related to creating, backfilling, and externalizing Lookup Vindexes using VReplication workflows. + Materialize Perform commands related to materializing query results from the source keyspace into tables in the target keyspace. + Migrate Migrate is used to import data from an external cluster into the current cluster. + Mount Mount is used to link an external Vitess cluster in order to migrate data from it. + MoveTables Perform commands related to moving tables from a source keyspace to a target keyspace. + OnlineDDL Operates on online DDL (schema migrations). PingTablet Checks that the specified tablet is awake and responding to RPCs. This command can be blocked by other in-flight operations. PlannedReparentShard Reparents the shard to a new primary, or away from an old primary. Both the old and new primaries must be up and running. RebuildKeyspaceGraph Rebuilds the serving data for the keyspace(s). This command may trigger an update to all connected clients. @@ -64,6 +70,7 @@ Available Commands: RemoveKeyspaceCell Removes the specified cell from the Cells list for all shards in the specified keyspace (by calling RemoveShardCell on every shard). It also removes the SrvKeyspace for that keyspace in that cell. RemoveShardCell Remove the specified cell from the specified shard's Cells list. ReparentTablet Reparent a tablet to the current primary in the shard. + Reshard Perform commands related to resharding a keyspace. RestoreFromBackup Stops mysqld on the specified tablet and restores the data from either the latest backup or closest before `backup-timestamp`. RunHealthCheck Runs a healthcheck on the remote tablet. SetKeyspaceDurabilityPolicy Sets the durability-policy used by the specified keyspace. @@ -81,19 +88,21 @@ Available Commands: UpdateCellInfo Updates the content of a CellInfo with the provided parameters, creating the CellInfo if it does not exist. UpdateCellsAlias Updates the content of a CellsAlias with the provided parameters, creating the CellsAlias if it does not exist. UpdateThrottlerConfig Update the tablet throttler configuration for all tablets in the given keyspace (across all cells) + VDiff Perform commands related to diffing tables involved in a VReplication workflow between the source and target. Validate Validates that all nodes reachable from the global replication graph, as well as all tablets in discoverable cells, are consistent. ValidateKeyspace Validates that all nodes reachable from the specified keyspace are consistent. ValidateSchemaKeyspace Validates that the schema on the primary tablet for shard 0 matches the schema on all other tablets in the keyspace. ValidateShard Validates that all nodes reachable from the specified shard are consistent. ValidateVersionKeyspace Validates that the version on the primary tablet of shard 0 matches all of the other tablets in the keyspace. ValidateVersionShard Validates that the version on the primary matches all of the replicas. + Workflow Administer VReplication workflows (Reshard, MoveTables, etc) in the given keyspace. completion Generate the autocompletion script for the specified shell help Help about any command - workflow Administer VReplication workflows (Reshard, MoveTables, etc) in the given keyspace Flags: - --action_timeout duration timeout for the total command (default 1h0m0s) + --action_timeout duration timeout to use for the command (default 1h0m0s) --alsologtostderr log to standard error as well as files + --compact use compact format for otherwise verbose outputs --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server. --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy --grpc_enable_tracing Enable gRPC tracing. @@ -113,7 +122,7 @@ Flags: --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess") --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) - --server string server to use for connection (required) + --server string server to use for the connection (required) --stderrthreshold severity logs at or above this threshold go to stderr (default 1) -v, --v Level log level for V logs --version version for vtctldclient diff --git a/go/flags/endtoend/vtexplain.txt b/go/flags/endtoend/vtexplain.txt index dff8d2c2bbe..d9c1ded1538 100644 --- a/go/flags/endtoend/vtexplain.txt +++ b/go/flags/endtoend/vtexplain.txt @@ -1,4 +1,43 @@ -Usage of vtexplain: +vtexplain is a command line tool which provides information on how Vitess plans to execute a particular query. + +It can be used to validate queries for compatibility with Vitess. + +For a user guide that describes how to use the vtexplain tool to explain how Vitess executes a particular SQL statement, see Analyzing a SQL statement. + +## Limitations + +### The VSchema must use a keyspace name. + +VTExplain requires a keyspace name for each keyspace in an input VSchema: +``` +"keyspace_name": { + "_comment": "Keyspace definition goes here." +} +``` + +If no keyspace name is present, VTExplain will return the following error: +``` +ERROR: initVtgateExecutor: json: cannot unmarshal bool into Go value of type map[string]json.RawMessage +``` + +Usage: + vtexplain [flags] + +Examples: +Explain how Vitess will execute the query `SELECT * FROM users` using the VSchema contained in `vschemas.json` and the database schema `schema.sql`: + +``` +vtexplain --vschema-file vschema.json --schema-file schema.sql --sql "SELECT * FROM users" +``` + +Explain how the example will execute on 128 shards using Row-based replication: + +``` +vtexplain -- -shards 128 --vschema-file vschema.json --schema-file schema.sql --replication-mode "ROW" --output-mode text --sql "INSERT INTO users (user_id, name) VALUES(1, 'john')" +``` + + +Flags: --alsologtostderr log to standard error as well as files --batch-interval duration Interval between logical time slots. (default 10ms) --config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored. @@ -10,7 +49,7 @@ Usage of vtexplain: --dbname string Optional database target to override normal routing --default_tablet_type topodatapb.TabletType The default tablet type to set for queries, when one is not explicitly selected. (default PRIMARY) --execution-mode string The execution mode to simulate -- must be set to multi, legacy-autocommit, or twopc (default "multi") - -h, --help display usage and exit + -h, --help help for vtexplain --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) --ks-shard-map string JSON map of keyspace name -> shard name -> ShardReference object. The inner map is the same as the output of FindAllShardsInKeyspace @@ -24,7 +63,7 @@ Usage of vtexplain: --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess") --normalize Whether to enable vtgate normalization --output-mode string Output in human-friendly text or json (default "text") - --planner-version string Sets the query planner version to use when generating the explain output. Valid values are V3 and Gen4. An empty value will use VTGate's default planner + --planner-version string Sets the default planner to use. Valid values are: Gen4, Gen4Greedy, Gen4Left2Right --pprof strings enable profiling --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) --replication-mode string The replication mode to simulate -- must be set to either ROW or STATEMENT (default "ROW") diff --git a/go/flags/endtoend/vtgate.txt b/go/flags/endtoend/vtgate.txt index 9240ee0682b..0611a387abd 100644 --- a/go/flags/endtoend/vtgate.txt +++ b/go/flags/endtoend/vtgate.txt @@ -1,8 +1,35 @@ -Usage of vtgate: +VTGate is a stateless proxy responsible for accepting requests from applications and routing them to the appropriate tablet server(s) for query execution. It speaks both the MySQL Protocol and a gRPC protocol. + +### Key Options + +* `--srv_topo_cache_ttl`: There may be instances where you will need to increase the cached TTL from the default of 1 second to a higher number: + * You may want to increase this option if you see that your topo leader goes down and keeps your queries waiting for a few seconds. + +Usage: + vtgate [flags] + +Examples: +vtgate \ + --topo_implementation etcd2 \ + --topo_global_server_address localhost:2379 \ + --topo_global_root /vitess/global \ + --log_dir $VTDATAROOT/tmp \ + --port 15001 \ + --grpc_port 15991 \ + --mysql_server_port 15306 \ + --cell test \ + --cells_to_watch test \ + --tablet_types_to_wait PRIMARY,REPLICA \ + --service_map 'grpc-vtgateservice' \ + --pid_file $VTDATAROOT/tmp/vtgate.pid \ + --mysql_auth_server_impl none + +Flags: + --allow-kill-statement Allows the execution of kill statement --allowed_tablet_types strings Specifies the tablet types this vtgate is allowed to route queries to. Should be provided as a comma-separated set of tablet types. --alsologtostderr log to standard error as well as files + --bind-address string Bind address for the server. If empty, the server will listen on all available unicast and anycast IP addresses of the local system. --buffer_drain_concurrency int Maximum number of requests retried simultaneously. More concurrency will increase the load on the PRIMARY vttablet when draining the buffer. (default 1) - --buffer_implementation string Allowed values: healthcheck (legacy implementation), keyspace_events (default) (default "keyspace_events") --buffer_keyspace_shards string If not empty, limit buffering to these entries (comma separated). Entry format: keyspace or keyspace/shard. Requires --enable_buffer=true. --buffer_max_failover_duration duration Stop buffering completely if a failover takes longer than this duration. (default 20s) --buffer_min_time_between_failovers duration Minimum time between the end of a failover and the start of the next one (tracked per shard). Faster consecutive failovers will not trigger buffering. (default 1m0s) @@ -35,16 +62,16 @@ Usage of vtgate: --enable_set_var This will enable the use of MySQL's SET_VAR query hint for certain system variables instead of using reserved connections (default true) --enable_system_settings This will enable the system settings to be changed per session at the database connection level (default true) --foreign_key_mode string This is to provide how to handle foreign key constraint in create/alter table. Valid values are: allow, disallow (default "allow") - --gate_query_cache_lfu gate server cache algorithm. when set to true, a new cache algorithm based on a TinyLFU admission policy will be used to improve cache behavior and prevent pollution from sparse queries (default true) --gate_query_cache_memory int gate server query cache size in bytes, maximum amount of memory to be cached. vtgate analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 33554432) - --gate_query_cache_size int gate server query cache size, maximum number of queries to be cached. vtgate analyzes every incoming query and generate a query plan, these plans are being cached in a cache. This config controls the expected amount of unique entries in the cache. (default 5000) --gateway_initial_tablet_timeout duration At startup, the tabletGateway will wait up to this duration to get at least one tablet per keyspace/shard/tablet type (default 30s) + --grpc-send-session-in-streaming If set, will send the session as last packet in streaming api to support transactions in streaming --grpc-use-effective-groups If set, and SSL is not used, will set the immediate caller's security groups from the effective caller id's groups. --grpc-use-static-authentication-callerid If set, will set the immediate caller id to the username authenticated by the static auth plugin. --grpc_auth_mode string Which auth plugin implementation to use (eg: static) --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon). --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server. --grpc_auth_static_password_file string JSON File to read the users/passwords from. + --grpc_bind_address string Bind address for gRPC calls. If empty, listen on all addresses. --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy @@ -69,7 +96,7 @@ Usage of vtgate: --grpc_use_effective_callerid If set, and SSL is not used, will set the immediate caller id from the effective caller id's principal. --healthcheck_retry_delay duration health check retry delay (default 2ms) --healthcheck_timeout duration the health check timeout period (default 1m0s) - -h, --help display usage and exit + -h, --help help for vtgate --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) @@ -90,6 +117,7 @@ Usage of vtgate: --max_payload_size int The threshold for query payloads in bytes. A payload greater than this threshold will result in a failure to handle the query. --message_stream_grace_period duration the amount of time to give for a vttablet to resume if it ends a message stream, usually because of a reparent. (default 30s) --min_number_serving_vttablets int The minimum number of vttablets for each replicating tablet_type (e.g. replica, rdonly) that will be continue to be used even with replication lag above discovery_low_replication_lag, but still below discovery_high_replication_lag_minimum_serving. (default 2) + --mysql-server-keepalive-period duration TCP period between keep-alives --mysql-server-pool-conn-read-buffers If set, the server will pool incoming connection read buffers --mysql_allow_clear_text_without_tls If set, the server will allow the use of a clear text password over non-SSL connections. --mysql_auth_server_config_file string JSON File to read the users/passwords from. @@ -136,7 +164,7 @@ Usage of vtgate: --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s) --opentsdb_uri string URI of opentsdb /api/put method --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown. - --planner-version string Sets the default planner to use when the session has not changed it. Valid values are: V3, V3Insert, Gen4, Gen4Greedy and Gen4Fallback. Gen4Fallback tries the gen4 planner and falls back to the V3 planner if the gen4 fails. + --planner-version string Sets the default planner to use when the session has not changed it. Valid values are: Gen4, Gen4Greedy, Gen4Left2Right --port int port for the server --pprof strings enable profiling --proxy_protocol Enable HAProxy PROXY protocol on MySQL listener socket @@ -150,7 +178,6 @@ Usage of vtgate: --remote_operation_timeout duration time to wait for a remote operation (default 15s) --retry-count int retry count (default 2) --schema_change_signal Enable the schema tracker; requires queryserver-config-schema-change-signal to be enabled on the underlying vttablets for this to work (default true) - --schema_change_signal_user string User to be used to send down query to vttablet to retrieve schema changes --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited) @@ -192,9 +219,6 @@ Usage of vtgate: --topo_global_root string the path of the global topology data in the global topology server --topo_global_server_address string the address of the global topology server --topo_implementation string the topology implementation to use - --topo_k8s_context string The kubeconfig context to use, overrides the 'current-context' from the config - --topo_k8s_kubeconfig string Path to a valid kubeconfig file. When running as a k8s pod inside the same cluster you wish to use as the topo, you may omit this and the below arguments, and Vitess is capable of auto-discovering the correct values. https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod - --topo_k8s_namespace string The kubernetes namespace to use for all objects. Default comes from the context or in-cluster config --topo_read_concurrency int Concurrency of topo reads. (default 32) --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s) diff --git a/go/flags/endtoend/vtgateclienttest.txt b/go/flags/endtoend/vtgateclienttest.txt new file mode 100644 index 00000000000..4580d4d6ce7 --- /dev/null +++ b/go/flags/endtoend/vtgateclienttest.txt @@ -0,0 +1,67 @@ +vtgateclienttest is a chain of vtgateservice.VTGateService implementations, each one being responsible for one test scenario. + +Usage: + vtgateclienttest [flags] + +Flags: + --alsologtostderr log to standard error as well as files + --bind-address string Bind address for the server. If empty, the server will listen on all available unicast and anycast IP addresses of the local system. + --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified + --config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored. + --config-file-not-found-handling ConfigFileNotFoundHandling Behavior when a config file is not found. (Options: error, exit, ignore, warn) (default warn) + --config-name string Name of the config file (without extension) to search for. (default "vtconfig") + --config-path strings Paths to search for config files in. (default [{{ .Workdir }}]) + --config-persistence-min-interval duration minimum interval between persisting dynamic config changes back to disk (if no change has occurred, nothing is done). (default 1s) + --config-type string Config file type (omit to infer config type from file extension). + --default_tablet_type topodatapb.TabletType The default tablet type to set for queries, when one is not explicitly selected. (default PRIMARY) + --grpc_auth_mode string Which auth plugin implementation to use (eg: static) + --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon). + --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server. + --grpc_auth_static_password_file string JSON File to read the users/passwords from. + --grpc_bind_address string Bind address for gRPC calls. If empty, listen on all addresses. + --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check + --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS + --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy + --grpc_crl string path to a certificate revocation list in PEM format, client certificates will be further verified against this file during TLS handshake + --grpc_enable_optional_tls enable optional TLS mode when a server accepts both TLS and plain-text connections on the same port + --grpc_enable_tracing Enable gRPC tracing. + --grpc_initial_conn_window_size int gRPC initial connection window size + --grpc_initial_window_size int gRPC initial window size + --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s) + --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s) + --grpc_key string server private key to use for gRPC connections, requires grpc_cert, enables TLS + --grpc_max_connection_age duration Maximum age of a client connection before GoAway is sent. (default 2562047h47m16.854775807s) + --grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s) + --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216) + --grpc_port int Port to listen on for gRPC calls. If zero, do not listen. + --grpc_prometheus Enable gRPC monitoring with Prometheus. + --grpc_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients + --grpc_server_initial_conn_window_size int gRPC server initial connection window size + --grpc_server_initial_window_size int gRPC server initial window size + --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s) + --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs) + -h, --help help for vtgateclienttest + --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) + --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) + --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms) + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --log_err_stacks log stack traces for errors + --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) + --logtostderr log to standard error instead of files + --max-stack-size int configure the maximum stack size in bytes (default 67108864) + --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess") + --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 10s) + --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s) + --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown. + --port int port for the server + --pprof strings enable profiling + --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) + --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) + --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice + --stderrthreshold severity logs at or above this threshold go to stderr (default 1) + --table-refresh-interval int interval in milliseconds to refresh tables in status page with refreshRequired class + --v Level log level for V logs + -v, --version print binary version + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging + --vschema_ddl_authorized_users string List of users authorized to execute vschema ddl operations, or '%' to allow all users. diff --git a/go/flags/endtoend/vtgr.txt b/go/flags/endtoend/vtgr.txt deleted file mode 100644 index 56698f88e23..00000000000 --- a/go/flags/endtoend/vtgr.txt +++ /dev/null @@ -1,93 +0,0 @@ -vtgr is deprecated and will be removed in Vitess 18. We recommend using VTOrc with semi-sync replication instead. -Usage of vtgr: - --abort_rebootstrap Don't allow vtgr to rebootstrap an existing group. - --alsologtostderr log to standard error as well as files - --clusters_to_watch strings Comma-separated list of keyspaces or keyspace/shards that this instance will monitor and repair. Defaults to all clusters in the topology. Example: "ks1,ks2/-80" - --config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored. - --config-file-not-found-handling ConfigFileNotFoundHandling Behavior when a config file is not found. (Options: error, exit, ignore, warn) (default warn) - --config-name string Name of the config file (without extension) to search for. (default "vtconfig") - --config-path strings Paths to search for config files in. (default [{{ .Workdir }}]) - --config-persistence-min-interval duration minimum interval between persisting dynamic config changes back to disk (if no change has occurred, nothing is done). (default 1s) - --config-type string Config file type (omit to infer config type from file extension). - --consul_auth_static_file string JSON File to read the topos/tokens from. - --db-credentials-file string db credentials file; send SIGHUP to reload this file - --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default "file") - --db-credentials-vault-addr string URL to Vault server - --db-credentials-vault-path string Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds - --db-credentials-vault-role-mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default "approle") - --db-credentials-vault-role-secretidfile string Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable - --db-credentials-vault-roleid string Vault AppRole id; can also be passed using VAULT_ROLEID environment variable - --db-credentials-vault-timeout duration Timeout for vault API operations (default 10s) - --db-credentials-vault-tls-ca string Path to CA PEM for validating Vault server certificate - --db-credentials-vault-tokenfile string Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable - --db-credentials-vault-ttl duration How long to cache DB credentials from the Vault server (default 30m0s) - --db_config string Full path to db config file that will be used by VTGR. - --db_flavor string MySQL flavor override. (default "MySQL56") - --db_port int Local mysql port, set this to enable local fast check. - --emit_stats If set, emit stats to push-based monitoring and stats backends - --enable_heartbeat_check Enable heartbeat checking, set together with --group_heartbeat_threshold. - --gr_port int Port to bootstrap a MySQL group. (default 33061) - --group_heartbeat_threshold int VTGR will trigger backoff on inconsistent state if the group heartbeat staleness exceeds this threshold (in seconds). Should be used along with --enable_heartbeat_check. - --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server. - --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy - --grpc_enable_tracing Enable gRPC tracing. - --grpc_initial_conn_window_size int gRPC initial connection window size - --grpc_initial_window_size int gRPC initial window size - --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s) - --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s) - --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 268435456) - --grpc_prometheus Enable gRPC monitoring with Prometheus. - -h, --help display usage and exit - --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) - --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) - --lock-timeout duration Maximum time for which a shard/keyspace lock can be acquired for (default 45s) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) - --log_dir string If non-empty, write log files in this directory - --log_err_stacks log stack traces for errors - --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) - --logtostderr log to standard error instead of files - --ping_tablet_timeout duration time to wait when we ping a tablet (default 2s) - --pprof strings enable profiling - --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) - --refresh_interval duration Refresh interval to load tablets. (default 10s) - --remote_operation_timeout duration time to wait for a remote operation (default 15s) - --scan_interval duration Scan interval to diagnose and repair. (default 3s) - --scan_repair_timeout duration Time to wait for a Diagnose and repair operation. (default 3s) - --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) - --stats_backend string The name of the registered push-based monitoring/stats backend to use - --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars - --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2 - --stats_drop_variables string Variables to be dropped from the list of exported variables. - --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s) - --stderrthreshold severity logs at or above this threshold go to stderr (default 1) - --tablet_manager_grpc_ca string the server ca to use to validate servers when connecting - --tablet_manager_grpc_cert string the cert to use to connect - --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App}) (default 8) - --tablet_manager_grpc_connpool_size int number of tablets to keep tmclient connections open to (default 100) - --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting - --tablet_manager_grpc_key string the key to use to connect - --tablet_manager_grpc_server_name string the server name to use to validate server certificate - --tablet_manager_protocol string Protocol to use to make tabletmanager RPCs to vttablets. (default "grpc") - --topo_consul_lock_delay duration LockDelay for consul session. (default 15s) - --topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth") - --topo_consul_lock_session_ttl string TTL for consul session. - --topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s) - --topo_etcd_lease_ttl int Lease TTL for locks and leader election. The client will use KeepAlive to keep the lease going. (default 30) - --topo_etcd_password string password to use to validate the server cert when connecting to the etcd topo server - --topo_etcd_tls_ca string path to the ca to use to validate the server cert when connecting to the etcd topo server - --topo_etcd_tls_cert string path to the client cert to use to connect to the etcd topo server, requires topo_etcd_tls_key, enables TLS - --topo_etcd_tls_key string path to the client key to use to connect to the etcd topo server, enables TLS - --topo_etcd_username string username to use to validate the server cert when connecting to the etcd topo server - --topo_global_root string the path of the global topology data in the global topology server - --topo_global_server_address string the address of the global topology server - --topo_implementation string the topology implementation to use - --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass - --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s) - --topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64) - --topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server - --topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS - --topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS - --v Level log level for V logs - -v, --version print binary version - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging - --vtgr_config string Config file for vtgr. diff --git a/go/flags/endtoend/vtorc.txt b/go/flags/endtoend/vtorc.txt index e8ab0515343..495df9b3e65 100644 --- a/go/flags/endtoend/vtorc.txt +++ b/go/flags/endtoend/vtorc.txt @@ -1,10 +1,30 @@ -Usage of vtorc: +VTOrc is the automated fault detection and repair tool in Vitess. + +Usage: + vtorc [flags] + +Examples: +vtorc \ + --topo_implementation etcd2 \ + --topo_global_server_address localhost:2379 \ + --topo_global_root /vitess/global \ + --log_dir $VTDATAROOT/tmp \ + --port 15000 \ + --recovery-period-block-duration "10m" \ + --instance-poll-time "1s" \ + --topo-information-refresh-duration "30s" \ + --alsologtostderr + +Flags: + --allow-emergency-reparent Whether VTOrc should be allowed to run emergency reparent operation when it detects a dead primary (default true) --alsologtostderr log to standard error as well as files --audit-file-location string File location where the audit logs are to be stored --audit-purge-duration duration Duration for which audit logs are held before being purged. Should be in multiples of days (default 168h0m0s) --audit-to-backend Whether to store the audit log in the VTOrc database --audit-to-syslog Whether to store the audit log in the syslog + --bind-address string Bind address for the server. If empty, the server will listen on all available unicast and anycast IP addresses of the local system. --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified + --change-tablets-with-errant-gtid-to-drained Whether VTOrc should be changing the type of tablets with errant GTIDs to DRAINED --clusters_to_watch strings Comma-separated list of keyspaces or keyspace/shards that this instance will monitor and repair. Defaults to all clusters in the topology. Example: "ks1,ks2/-80" --config string config file name --config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored. @@ -24,7 +44,7 @@ Usage of vtorc: --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s) --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 268435456) --grpc_prometheus Enable gRPC monitoring with Prometheus. - -h, --help display usage and exit + -h, --help help for vtorc --instance-poll-time duration Timer duration on which VTOrc refreshes MySQL information (default 5s) --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) @@ -80,9 +100,6 @@ Usage of vtorc: --topo_global_root string the path of the global topology data in the global topology server --topo_global_server_address string the address of the global topology server --topo_implementation string the topology implementation to use - --topo_k8s_context string The kubeconfig context to use, overrides the 'current-context' from the config - --topo_k8s_kubeconfig string Path to a valid kubeconfig file. When running as a k8s pod inside the same cluster you wish to use as the topo, you may omit this and the below arguments, and Vitess is capable of auto-discovering the correct values. https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod - --topo_k8s_namespace string The kubernetes namespace to use for all objects. Default comes from the context or in-cluster config --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s) --topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64) diff --git a/go/flags/endtoend/vttablet.txt b/go/flags/endtoend/vttablet.txt index 168c0222adc..2052b0f3ef4 100644 --- a/go/flags/endtoend/vttablet.txt +++ b/go/flags/endtoend/vttablet.txt @@ -1,4 +1,47 @@ -Usage of vttablet: +The VTTablet server _controls_ a running MySQL server. VTTablet supports two primary types of deployments: + +* Managed MySQL (most common) +* External MySQL + +In addition to these deployment types, a partially managed VTTablet is also possible by setting `--disable_active_reparents`. + +### Managed MySQL + +In this mode, Vitess actively manages MySQL. + +### External MySQL. + +In this mode, an external MySQL can be used such as AWS RDS, AWS Aurora, Google CloudSQL; or just an existing (vanilla) MySQL installation. + +See "Unmanaged Tablet" for the full guide. + +Even if a MySQL is external, you can still make vttablet perform some management functions. They are as follows: + +* `--disable_active_reparents`: If this flag is set, then any reparent or replica commands will not be allowed. These are InitShardPrimary, PlannedReparentShard, EmergencyReparentShard, and ReparentTablet. In this mode, you should use the TabletExternallyReparented command to inform vitess of the current primary. +* `--replication_connect_retry`: This value is give to mysql when it connects a replica to the primary as the retry duration parameter. +* `--enable_replication_reporter`: If this flag is set, then vttablet will transmit replica lag related information to the vtgates, which will allow it to balance load better. Additionally, enabling this will also cause vttablet to restart replication if it was stopped. However, it will do this only if `--disable_active_reparents` was not turned on. +* `--heartbeat_enable` and `--heartbeat_interval duration`: cause vttablet to write heartbeats to the sidecar database. This information is also used by the replication reporter to assess replica lag. + +Usage: + vttablet [flags] + +Examples: + +vttablet \ + --topo_implementation etcd2 \ + --topo_global_server_address localhost:2379 \ + --topo_global_root /vitess/ \ + --tablet-path $alias \ + --init_keyspace $keyspace \ + --init_shard $shard \ + --init_tablet_type $tablet_type \ + --port $port \ + --grpc_port $grpc_port \ + --service_map 'grpc-queryservice,grpc-tabletmanager,grpc-updatestream' + +`$alias` needs to be of the form: `-id`, and the cell should match one of the local cells that was created in the topology. The id can be left padded with zeroes: `cell-100` and `cell-000000100` are synonymous. + +Flags: --alsologtostderr log to standard error as well as files --app_idle_timeout duration Idle timeout for app connections (default 1m0s) --app_pool_size int Size of the connection pool for app connections (default 40) @@ -13,6 +56,7 @@ Usage of vttablet: --backup_storage_compress if set, the backup files will be compressed. (default true) --backup_storage_implementation string Which backup storage implementation to use for creating and restoring backups. --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, in parallel, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression. (default 2) + --bind-address string Bind address for the server. If empty, the server will listen on all available unicast and anycast IP addresses of the local system. --binlog_host string PITR restore parameter: hostname/IP of binlog server. --binlog_password string PITR restore parameter: password of binlog server. --binlog_player_grpc_ca string the server ca to use to validate servers when connecting @@ -100,14 +144,12 @@ Usage of vttablet: --emit_stats If set, emit stats to push-based monitoring and stats backends --enable-consolidator Synonym to -enable_consolidator (default true) --enable-consolidator-replicas Synonym to -enable_consolidator_replicas - --enable-lag-throttler Synonym to -enable_lag_throttler --enable-per-workload-table-metrics If true, query counts and query error metrics include a label that identifies the workload --enable-tx-throttler Synonym to -enable_tx_throttler --enable_consolidator This option enables the query consolidator. (default true) --enable_consolidator_replicas This option enables the query consolidator only on replicas. --enable_hot_row_protection If true, incoming transactions for the same row (range) will be queued and cannot consume all txpool slots. --enable_hot_row_protection_dry_run If true, hot row protection is not enforced but logs if transactions would have been queued. - --enable_lag_throttler If true, vttablet will run a throttler service, and will implicitly enable heartbeats --enable_replication_reporter Use polling to track replication lag. --enable_transaction_limit If true, limit on number of transactions open at the same time will be enforced for all users. User trying to open a new transaction after exhausting their limit will receive an error immediately, regardless of whether there are available slots or not. --enable_transaction_limit_dry_run If true, limit on number of transactions open at the same time will be tracked for all users, but not enforced. @@ -129,6 +171,7 @@ Usage of vttablet: --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon). --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server. --grpc_auth_static_password_file string JSON File to read the users/passwords from. + --grpc_bind_address string Bind address for gRPC calls. If empty, listen on all addresses. --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy @@ -154,7 +197,7 @@ Usage of vttablet: --heartbeat_enable If true, vttablet records (if master) or checks (if replica) the current time of a replication heartbeat in the sidecar database's heartbeat table. The result is used to inform the serving state of the vttablet via healthchecks. --heartbeat_interval duration How frequently to read and write replication heartbeat. (default 1s) --heartbeat_on_demand_duration duration If non-zero, heartbeats are only written upon consumer request, and only run for up to given duration following the request. Frequent requests can keep the heartbeat running consistently; when requests are infrequent heartbeat may completely stop between requests - -h, --help display usage and exit + -h, --help help for vttablet --hot_row_protection_concurrent_transactions int Number of concurrent transactions let through to the txpool/MySQL for the same hot row. Should be > 1 to have enough 'ready' transactions in MySQL and benefit from a pipelining effect. (default 5) --hot_row_protection_max_global_queue_size int Global queue limit across all row (ranges). Useful to prevent that the queue can grow unbounded. (default 1000) --hot_row_protection_max_queue_size int Maximum number of BeginExecute RPCs which will be queued for the same row (range). (default 20) @@ -227,9 +270,7 @@ Usage of vttablet: --queryserver-config-passthrough-dmls query server pass through all dml statements without rewriting --queryserver-config-pool-conn-max-lifetime duration query server connection max lifetime (in seconds), vttablet manages various mysql connection pools. This config means if a connection has lived at least this long, it connection will be removed from pool upon the next time it is returned to the pool. (default 0s) --queryserver-config-pool-size int query server read pool size, connection pool is used by regular queries (non streaming, not in a transaction) (default 16) - --queryserver-config-query-cache-lfu query server cache algorithm. when set to true, a new cache algorithm based on a TinyLFU admission policy will be used to improve cache behavior and prevent pollution from sparse queries (default true) --queryserver-config-query-cache-memory int query server query cache size in bytes, maximum amount of memory to be used for caching. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 33554432) - --queryserver-config-query-cache-size int query server query cache size, maximum number of queries to be cached. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 5000) --queryserver-config-query-pool-timeout duration query server query pool timeout (in seconds), it is how long vttablet waits for a connection from the query pool. If set to 0 (default) then the overall query timeout is used instead. (default 0s) --queryserver-config-query-pool-waiter-cap int query server query pool waiter limit, this is the maximum number of queries that can be queued waiting to get a connection (default 5000) --queryserver-config-query-timeout duration query server query timeout (in seconds), this is the query timeout in vttablet side. If a query takes more than this timeout, it will be killed. (default 30s) @@ -255,6 +296,8 @@ Usage of vttablet: --relay_log_max_size int Maximum buffer size (in bytes) for VReplication target buffering. If single rows are larger than this, a single row is buffered at a time. (default 250000) --remote_operation_timeout duration time to wait for a remote operation (default 15s) --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s) + --restore-to-pos string (init incremental restore parameter) if set, run a point in time recovery that ends with the given position. This will attempt to use one full backup followed by zero or more incremental backups + --restore-to-timestamp string (init incremental restore parameter) if set, run a point in time recovery that restores up to the given timestamp, if possible. Given timestamp in RFC3339 format. Example: '2006-01-02T15:04:05Z07:00' --restore_concurrency int (init restore parameter) how many concurrent files to restore at once (default 4) --restore_from_backup (init restore parameter) will check BackupStorage for a recent backup at startup and start there --restore_from_backup_ts string (init restore parameter) if set, restore the latest backup taken at or before this timestamp. Example: '2021-04-29.133050' @@ -312,9 +355,6 @@ Usage of vttablet: --tablet_manager_grpc_server_name string the server name to use to validate server certificate --tablet_manager_protocol string Protocol to use to make tabletmanager RPCs to vttablets. (default "grpc") --tablet_protocol string Protocol to use to make queryservice RPCs to vttablets. (default "grpc") - --throttle_check_as_check_self Should throttler/check return a throttler/check-self result (changes throttler behavior for writes) - --throttle_metrics_query SELECT Override default heartbeat/lag metric. Use either SELECT (must return single row, single value) or `SHOW GLOBAL ... LIKE ...` queries. Set -throttle_metrics_threshold respectively. - --throttle_metrics_threshold float Override default throttle threshold, respective to --throttle_metrics_query (default 1.7976931348623157e+308) --throttle_tablet_types string Comma separated VTTablet types to be considered by the throttler. default: 'replica'. example: 'replica,rdonly'. 'replica' aways implicitly included (default "replica") --throttle_threshold duration Replication lag threshold for default lag throttling (default 1s) --throttler-config-via-topo When 'true', read config from topo service and ignore throttle_threshold, throttle_metrics_threshold, throttle_metrics_query, throttle_check_as_check_self (default true) @@ -331,9 +371,6 @@ Usage of vttablet: --topo_global_root string the path of the global topology data in the global topology server --topo_global_server_address string the address of the global topology server --topo_implementation string the topology implementation to use - --topo_k8s_context string The kubeconfig context to use, overrides the 'current-context' from the config - --topo_k8s_kubeconfig string Path to a valid kubeconfig file. When running as a k8s pod inside the same cluster you wish to use as the topo, you may omit this and the below arguments, and Vitess is capable of auto-discovering the correct values. https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod - --topo_k8s_namespace string The kubernetes namespace to use for all objects. Default comes from the context or in-cluster config --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s) --topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64) @@ -356,11 +393,13 @@ Usage of vttablet: --twopc_abandon_age float time in seconds. Any unresolved transaction older than this time will be sent to the coordinator to be resolved. --twopc_coordinator_address string address of the (VTGate) process(es) that will be used to notify of abandoned transactions. --twopc_enable if the flag is on, 2pc is enabled. Other 2pc flags must be supplied. - --tx-throttler-config string Synonym to -tx_throttler_config (default "target_replication_lag_sec: 2\nmax_replication_lag_sec: 10\ninitial_rate: 100\nmax_increase: 1\nemergency_decrease: 0.5\nmin_duration_between_increases_sec: 40\nmax_duration_between_increases_sec: 62\nmin_duration_between_decreases_sec: 20\nspread_backlog_across_sec: 20\nage_bad_rate_after_sec: 180\nbad_rate_increase: 0.1\nmax_rate_approach_threshold: 0.9\n") + --tx-throttler-config string Synonym to -tx_throttler_config (default "target_replication_lag_sec:2 max_replication_lag_sec:10 initial_rate:100 max_increase:1 emergency_decrease:0.5 min_duration_between_increases_sec:40 max_duration_between_increases_sec:62 min_duration_between_decreases_sec:20 spread_backlog_across_sec:20 age_bad_rate_after_sec:180 bad_rate_increase:0.1 max_rate_approach_threshold:0.9") --tx-throttler-default-priority int Default priority assigned to queries that lack priority information (default 100) + --tx-throttler-dry-run If present, the transaction throttler only records metrics about requests received and throttled, but does not actually throttle any requests. --tx-throttler-healthcheck-cells strings Synonym to -tx_throttler_healthcheck_cells --tx-throttler-tablet-types strings A comma-separated list of tablet types. Only tablets of this type are monitored for replication lag by the transaction throttler. Supported types are replica and/or rdonly. (default replica) - --tx_throttler_config string The configuration of the transaction throttler as a text-formatted throttlerdata.Configuration protocol buffer message. (default "target_replication_lag_sec: 2\nmax_replication_lag_sec: 10\ninitial_rate: 100\nmax_increase: 1\nemergency_decrease: 0.5\nmin_duration_between_increases_sec: 40\nmax_duration_between_increases_sec: 62\nmin_duration_between_decreases_sec: 20\nspread_backlog_across_sec: 20\nage_bad_rate_after_sec: 180\nbad_rate_increase: 0.1\nmax_rate_approach_threshold: 0.9\n") + --tx-throttler-topo-refresh-interval duration The rate that the transaction throttler will refresh the topology to find cells. (default 5m0s) + --tx_throttler_config string The configuration of the transaction throttler as a text-formatted throttlerdata.Configuration protocol buffer message. (default "target_replication_lag_sec:2 max_replication_lag_sec:10 initial_rate:100 max_increase:1 emergency_decrease:0.5 min_duration_between_increases_sec:40 max_duration_between_increases_sec:62 min_duration_between_decreases_sec:20 spread_backlog_across_sec:20 age_bad_rate_after_sec:180 bad_rate_increase:0.1 max_rate_approach_threshold:0.9") --tx_throttler_healthcheck_cells strings A comma-separated list of cells. Only tabletservers running in these cells will be monitored for replication lag by the transaction throttler. --unhealthy_threshold duration replication lag after which a replica is considered unhealthy (default 2h0m0s) --v Level log level for V logs diff --git a/go/flags/endtoend/vttestserver.txt b/go/flags/endtoend/vttestserver.txt index c93bdc4ff4f..f1b666eb93c 100644 --- a/go/flags/endtoend/vttestserver.txt +++ b/go/flags/endtoend/vttestserver.txt @@ -1,4 +1,9 @@ -Usage of vttestserver: +vttestserver allows users to spawn a self-contained Vitess server for local testing/CI. + +Usage: + vttestserver [flags] + +Flags: --alsologtostderr log to standard error as well as files --app_idle_timeout duration Idle timeout for app connections (default 1m0s) --app_pool_size int Size of the connection pool for app connections (default 40) @@ -42,6 +47,7 @@ Usage of vttestserver: --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon). --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server. --grpc_auth_static_password_file string JSON File to read the users/passwords from. + --grpc_bind_address string Bind address for gRPC calls. If empty, listen on all addresses. --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy @@ -63,7 +69,7 @@ Usage of vttestserver: --grpc_server_initial_window_size int gRPC server initial window size --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s) --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs) - -h, --help display usage and exit + -h, --help help for vttestserver --initialize_with_random_data If this flag is each table-shard will be initialized with random data. See also the 'rng_seed' and 'min_shard_size' and 'max_shard_size' flags. --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) @@ -87,9 +93,9 @@ Usage of vttestserver: --num_shards strings Comma separated shard count (one per keyspace) (default [2]) --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 10s) --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s) - --persistent_mode If this flag is set, the MySQL data directory is not cleaned up when LocalCluster.TearDown() is called. This is useful for running vttestserver as a database container in local developer environments. Note that db migration files (--schema_dir option) and seeding of random data (--initialize_with_random_data option) will only run during cluster startup if the data directory does not already exist. vschema migrations are run every time the cluster starts, since persistence for the topology server has not been implemented yet + --persistent_mode If this flag is set, the MySQL data directory is not cleaned up when LocalCluster.TearDown() is called. This is useful for running vttestserver as a database container in local developer environments. Note that db migration files (--schema_dir option) and seeding of random data (--initialize_with_random_data option) will only run during cluster startup if the data directory does not already exist. Changes to VSchema are persisted across cluster restarts using a simple watcher if the --data_dir argument is specified. --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown. - --planner-version string Sets the default planner to use when the session has not changed it. Valid values are: V3, V3Insert, Gen4, Gen4Greedy and Gen4Fallback. Gen4Fallback tries the new gen4 planner and falls back to the V3 planner if the gen4 fails. + --planner-version string Sets the default planner to use when the session has not changed it. Valid values are: Gen4, Gen4Greedy, Gen4Left2Right --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled) --port int Port to use for vtcombo. If this is 0, a random port will be chosen. --pprof strings enable profiling diff --git a/go/flags/endtoend/zk.txt b/go/flags/endtoend/zk.txt index 443bf0b9ca2..add1b6b6803 100644 --- a/go/flags/endtoend/zk.txt +++ b/go/flags/endtoend/zk.txt @@ -1,8 +1,41 @@ -Usage of zk: - -h, --help display usage and exit +zk is a tool for wrangling zookeeper. + +It tries to mimic unix file system commands wherever possible, but +there are some slight differences in flag handling. + +The zk tool looks for the address of the cluster in /etc/zookeeper/zk_client.conf, +or the file specified in the ZK_CLIENT_CONFIG environment variable. + +The local cell may be overridden with the ZK_CLIENT_LOCAL_CELL environment +variable. + +Usage: + zk [command] + +Available Commands: + addAuth + cat + chmod + completion Generate the autocompletion script for the specified shell + cp + edit Create a local copy, edit, and write changes back to cell. + help Help about any command + ls + rm + stat + touch Change node access time. + unzip + wait Sets a watch on the node and then waits for an event to fire. + watch Watches for changes to nodes and prints events as they occur. + zip Store a zk tree in a zip archive. + +Flags: + -h, --help help for zk --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) --server string server(s) to connect to + +Use "zk [command] --help" for more information about a command. diff --git a/go/flags/endtoend/zkctl.txt b/go/flags/endtoend/zkctl.txt index 36ddace46db..d1aea061ea5 100644 --- a/go/flags/endtoend/zkctl.txt +++ b/go/flags/endtoend/zkctl.txt @@ -1,4 +1,17 @@ -Usage of zkctl: +Initializes and controls zookeeper with Vitess-specific configuration. + +Usage: + zkctl [command] + +Available Commands: + completion Generate the autocompletion script for the specified shell + help Help about any command + init Generates a new config and then starts zookeeper. + shutdown Terminates a zookeeper server but keeps its data dir intact. + start Runs an already initialized zookeeper server. + teardown Shuts down the zookeeper server and removes its data dir. + +Flags: --alsologtostderr log to standard error as well as files --config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored. --config-file-not-found-handling ConfigFileNotFoundHandling Behavior when a config file is not found. (Options: error, exit, ignore, warn) (default warn) @@ -6,7 +19,7 @@ Usage of zkctl: --config-path strings Paths to search for config files in. (default [{{ .Workdir }}]) --config-persistence-min-interval duration minimum interval between persisting dynamic config changes back to disk (if no change has occurred, nothing is done). (default 1s) --config-type string Config file type (omit to infer config type from file extension). - -h, --help display usage and exit + -h, --help help for zkctl --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) @@ -21,4 +34,7 @@ Usage of zkctl: -v, --version print binary version --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging --zk.cfg string zkid@server1:leaderPort1:electionPort1:clientPort1,...) (default "6@:3801:3802:3803") + --zk.extra stringArray extra config line(s) to append verbatim to config (flag can be specified more than once) --zk.myid uint which server do you want to be? only needed when running multiple instance on one box, otherwise myid is implied by hostname + +Use "zkctl [command] --help" for more information about a command. diff --git a/go/flags/endtoend/zkctld.txt b/go/flags/endtoend/zkctld.txt index 76f19523660..d808bd7ce67 100644 --- a/go/flags/endtoend/zkctld.txt +++ b/go/flags/endtoend/zkctld.txt @@ -1,25 +1,7 @@ -Usage of zkctld: - --alsologtostderr log to standard error as well as files - --config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored. - --config-file-not-found-handling ConfigFileNotFoundHandling Behavior when a config file is not found. (Options: error, exit, ignore, warn) (default warn) - --config-name string Name of the config file (without extension) to search for. (default "vtconfig") - --config-path strings Paths to search for config files in. (default [{{ .Workdir }}]) - --config-persistence-min-interval duration minimum interval between persisting dynamic config changes back to disk (if no change has occurred, nothing is done). (default 1s) - --config-type string Config file type (omit to infer config type from file extension). - -h, --help display usage and exit - --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) - --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) - --log_dir string If non-empty, write log files in this directory - --log_err_stacks log stack traces for errors - --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) - --logtostderr log to standard error instead of files - --pprof strings enable profiling - --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) - --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) - --stderrthreshold severity logs at or above this threshold go to stderr (default 1) - --v Level log level for V logs - -v, --version print binary version - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging - --zk.cfg string zkid@server1:leaderPort1:electionPort1:clientPort1,...) (default "6@:3801:3802:3803") - --zk.myid uint which server do you want to be? only needed when running multiple instance on one box, otherwise myid is implied by hostname +zkctld is a daemon that starts or initializes ZooKeeper with Vitess-specific configuration. It will stay running as long as the underlying ZooKeeper server, and will pass along SIGTERM. + +Usage: + zkctld [flags] + +Flags: + -h, --help help for zkctld diff --git a/go/flagutil/flagutil.go b/go/flagutil/flagutil.go index f6604295730..ebf4ccef485 100644 --- a/go/flagutil/flagutil.go +++ b/go/flagutil/flagutil.go @@ -189,6 +189,17 @@ func DualFormatBoolVar(fs *pflag.FlagSet, p *bool, name string, value bool, usag } } +// DualFormatVar creates a flag which supports both dashes and underscores +func DualFormatVar(fs *pflag.FlagSet, val pflag.Value, name string, usage string) { + dashes := strings.Replace(name, "_", "-", -1) + underscores := strings.Replace(name, "-", "_", -1) + + fs.Var(val, underscores, usage) + if dashes != underscores { + fs.Var(val, dashes, fmt.Sprintf("Synonym to -%s", underscores)) + } +} + type Value[T any] interface { pflag.Value Get() T diff --git a/go/hack/hack.go b/go/hack/hack.go index 8b042950d1e..95bf11f5530 100644 --- a/go/hack/hack.go +++ b/go/hack/hack.go @@ -21,7 +21,6 @@ limitations under the License. package hack import ( - "reflect" "unsafe" ) @@ -37,10 +36,5 @@ func String(b []byte) (s string) { // StringBytes returns the underlying bytes for a string. Modifying this byte slice // will lead to undefined behavior. func StringBytes(s string) []byte { - var b []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - hdr.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data - hdr.Cap = len(s) - hdr.Len = len(s) - return b + return unsafe.Slice(unsafe.StringData(s), len(s)) } diff --git a/go/hack/runtime.go b/go/hack/runtime.go index d1ccb699460..c80ac1d38e5 100644 --- a/go/hack/runtime.go +++ b/go/hack/runtime.go @@ -19,7 +19,6 @@ limitations under the License. package hack import ( - "reflect" "unsafe" ) @@ -35,8 +34,7 @@ func strhash(p unsafe.Pointer, h uintptr) uintptr // This is an optimal hash function which takes an input seed and is potentially implemented in hardware // for most architectures. This is the same hash function that the language's `map` uses. func RuntimeMemhash(b []byte, seed uint64) uint64 { - pstring := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - return uint64(memhash(unsafe.Pointer(pstring.Data), uintptr(seed), uintptr(pstring.Len))) + return uint64(memhash(unsafe.Pointer(unsafe.SliceData(b)), uintptr(seed), uintptr(len(b)))) } // RuntimeStrhash provides access to the Go runtime's default hash function for strings. diff --git a/go/internal/flag/flag.go b/go/internal/flag/flag.go index 6f087143610..ade4907e573 100644 --- a/go/internal/flag/flag.go +++ b/go/internal/flag/flag.go @@ -42,7 +42,7 @@ import ( // // See VEP-4, phase 1 for details: https://github.com/vitessio/enhancements/blob/c766ea905e55409cddeb666d6073cd2ac4c9783e/veps/vep-4.md#phase-1-preparation func Parse(fs *flag.FlagSet) { - preventGlogVFlagFromClobberingVersionFlagShorthand(fs) + PreventGlogVFlagFromClobberingVersionFlagShorthand(fs) fs.AddGoFlagSet(goflag.CommandLine) if fs.Lookup("help") == nil { @@ -115,7 +115,7 @@ func TrickGlog() { // // IMPORTANT: This must be called prior to AddGoFlagSet in both Parse and // ParseFlagsForTest. -func preventGlogVFlagFromClobberingVersionFlagShorthand(fs *flag.FlagSet) { +func PreventGlogVFlagFromClobberingVersionFlagShorthand(fs *flag.FlagSet) { // N.B. we use goflag.Lookup instead of this package's Lookup, because we // explicitly want to check only the goflags. if f := goflag.Lookup("v"); f != nil { @@ -178,7 +178,7 @@ func ParseFlagsForTest() { } // parse remaining flags including the log-related ones like --alsologtostderr - preventGlogVFlagFromClobberingVersionFlagShorthand(flag.CommandLine) + PreventGlogVFlagFromClobberingVersionFlagShorthand(flag.CommandLine) flag.CommandLine.AddGoFlagSet(goflag.CommandLine) flag.Parse() } diff --git a/go/ioutil/timeout_closer.go b/go/ioutil/timeout_closer.go new file mode 100644 index 00000000000..1f025fbdb44 --- /dev/null +++ b/go/ioutil/timeout_closer.go @@ -0,0 +1,59 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ioutil + +import ( + "context" + "io" + "time" +) + +// TimeoutCloser is an io.Closer that has a timeout for executing the Close() function. +type TimeoutCloser struct { + ctx context.Context + closer io.Closer + timeout time.Duration +} + +func NewTimeoutCloser(ctx context.Context, closer io.Closer, timeout time.Duration) *TimeoutCloser { + return &TimeoutCloser{ + ctx: ctx, + closer: closer, + timeout: timeout, + } +} + +func (c *TimeoutCloser) Close() error { + done := make(chan error) + + ctx, cancel := context.WithTimeout(c.ctx, c.timeout) + defer cancel() + + go func() { + defer close(done) + select { + case done <- c.closer.Close(): + case <-ctx.Done(): + } + }() + select { + case err := <-done: + return err + case <-ctx.Done(): + return ctx.Err() + } +} diff --git a/go/ioutil/timeout_closer_test.go b/go/ioutil/timeout_closer_test.go new file mode 100644 index 00000000000..9aabe307c85 --- /dev/null +++ b/go/ioutil/timeout_closer_test.go @@ -0,0 +1,53 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ioutil + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type hangCloser struct { + hang bool +} + +func (c hangCloser) Close() error { + if c.hang { + ch := make(chan bool) + ch <- true // hang forever + } + return nil +} + +func TestTimeoutCloser(t *testing.T) { + ctx := context.Background() + { + closer := NewTimeoutCloser(ctx, &hangCloser{hang: false}, time.Second) + err := closer.Close() + require.NoError(t, err) + } + { + closer := NewTimeoutCloser(ctx, &hangCloser{hang: true}, time.Second) + err := closer.Close() + require.Error(t, err) + assert.ErrorIs(t, err, context.DeadlineExceeded) + } +} diff --git a/go/maps2/maps.go b/go/maps2/maps.go new file mode 100644 index 00000000000..56191bea1a7 --- /dev/null +++ b/go/maps2/maps.go @@ -0,0 +1,37 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package maps2 + +// Keys returns the keys of the map m. +// The keys will be in an indeterminate order. +func Keys[M ~map[K]V, K comparable, V any](m M) []K { + r := make([]K, 0, len(m)) + for k := range m { + r = append(r, k) + } + return r +} + +// Values returns the values of the map m. +// The values will be in an indeterminate order. +func Values[M ~map[K]V, K comparable, V any](m M) []V { + r := make([]V, 0, len(m)) + for _, v := range m { + r = append(r, v) + } + return r +} diff --git a/go/mysql/auth_server.go b/go/mysql/auth_server.go index 64ff2beaa11..a01fdc59971 100644 --- a/go/mysql/auth_server.go +++ b/go/mysql/auth_server.go @@ -26,6 +26,7 @@ import ( "net" "sync" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" @@ -442,7 +443,7 @@ func (n *mysqlNativePasswordAuthMethod) AllowClearTextWithoutTLS() bool { func (n *mysqlNativePasswordAuthMethod) HandleAuthPluginData(conn *Conn, user string, serverAuthPluginData []byte, clientAuthPluginData []byte, remoteAddr net.Addr) (Getter, error) { if serverAuthPluginData[len(serverAuthPluginData)-1] != 0x00 { - return nil, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return nil, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } salt := serverAuthPluginData[:len(serverAuthPluginData)-1] @@ -534,7 +535,7 @@ func (n *mysqlCachingSha2AuthMethod) AllowClearTextWithoutTLS() bool { func (n *mysqlCachingSha2AuthMethod) HandleAuthPluginData(c *Conn, user string, serverAuthPluginData []byte, clientAuthPluginData []byte, remoteAddr net.Addr) (Getter, error) { if serverAuthPluginData[len(serverAuthPluginData)-1] != 0x00 { - return nil, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return nil, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } salt := serverAuthPluginData[:len(serverAuthPluginData)-1] @@ -546,7 +547,7 @@ func (n *mysqlCachingSha2AuthMethod) HandleAuthPluginData(c *Conn, user string, switch cacheState { case AuthRejected: - return nil, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return nil, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) case AuthAccepted: // We need to write a more data packet to indicate the // handshake completed properly. This will be followed @@ -561,7 +562,7 @@ func (n *mysqlCachingSha2AuthMethod) HandleAuthPluginData(c *Conn, user string, return result, nil case AuthNeedMoreData: if !c.TLSEnabled() && !c.IsUnixSocket() { - return nil, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return nil, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } data, pos := c.startEphemeralPacketWithHeader(2) @@ -577,7 +578,7 @@ func (n *mysqlCachingSha2AuthMethod) HandleAuthPluginData(c *Conn, user string, return n.storage.UserEntryWithPassword(c, user, password, remoteAddr) default: // Somehow someone returned an unknown state, let's error with access denied. - return nil, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return nil, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } } diff --git a/go/mysql/auth_server_clientcert_test.go b/go/mysql/auth_server_clientcert_test.go index 560be63bab6..8b0530da253 100644 --- a/go/mysql/auth_server_clientcert_test.go +++ b/go/mysql/auth_server_clientcert_test.go @@ -45,7 +45,7 @@ func testValidCert(t *testing.T) { authServer := newAuthServerClientCert() // Create the listener, so we can get its host. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err, "NewListener failed: %v", err) defer l.Close() host := l.Addr().(*net.TCPAddr).IP.String() @@ -114,7 +114,7 @@ func testNoCert(t *testing.T) { authServer := newAuthServerClientCert() // Create the listener, so we can get its host. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err, "NewListener failed: %v", err) defer l.Close() host := l.Addr().(*net.TCPAddr).IP.String() diff --git a/go/mysql/auth_server_config.go b/go/mysql/auth_server_config.go index ec2edb28a44..68deee34584 100644 --- a/go/mysql/auth_server_config.go +++ b/go/mysql/auth_server_config.go @@ -10,6 +10,8 @@ import ( "os" "sync" + "vitess.io/vitess/go/mysql/sqlerror" + "github.com/spf13/pflag" "vitess.io/vitess/go/ipfilters" @@ -249,13 +251,13 @@ func (asc *AuthServerConfig) UserEntryWithHash(conn *Conn, salt []byte, user str entry, ok := asc.Entries[user] asc.mu.Unlock() if !ok { - return &ConfigUserData{}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return &ConfigUserData{}, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } if entry.MysqlNativePassword != "" { hash, err := DecodeMysqlNativePasswordHex(entry.MysqlNativePassword) if err != nil { - return &ConfigUserData{username: entry.UserData, groups: entry.Groups}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return &ConfigUserData{username: entry.UserData, groups: entry.Groups}, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } isPass := VerifyHashedMysqlNativePassword(authResponse, salt, hash) if isPass { @@ -274,17 +276,17 @@ func (asc *AuthServerConfig) UserEntryWithHash(conn *Conn, salt []byte, user str } else if encryptFromEnt.UserData == "v_0001" { ecnPass, err := aseV001(entry.Password, []byte("akArIfh/a28N8w==")) if err != nil { - return &ConfigUserData{}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v' error ses descryption", user) + return &ConfigUserData{}, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v' error ses descryption", user) } ecnPassStr = ecnPass } else { - return &ConfigUserData{}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v' error desencryption verssion", user) + return &ConfigUserData{}, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v' error desencryption verssion", user) } computedAuthResponse := ScrambleMysqlNativePassword(salt, []byte(ecnPassStr)) if !bytes.Equal(authResponse, computedAuthResponse) { - return &ConfigUserData{}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return &ConfigUserData{}, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } return &ConfigUserData{entry.UserData, entry.Groups}, nil } @@ -301,12 +303,12 @@ func (asc *AuthServerConfig) ValidateClearText(user, password string) (string, e entry, ok := asc.Entries[user] asc.mu.Unlock() if !ok { - return "", NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return "", sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } // Validate the password. if entry.Password != password { - return "", NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return "", sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } return entry.UserData, nil @@ -319,7 +321,7 @@ func (asc *AuthServerConfig) GetPrivilege(user string) (uint16, error) { entry, ok := asc.Entries[user] asc.mu.Unlock() if !ok { - return 0, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return 0, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } return entry.Privilege, nil } @@ -332,7 +334,7 @@ func (asc *AuthServerConfig) GetUserKeyspaces(user string) ([]string, error) { entry, ok := asc.Entries[user] asc.mu.Unlock() if !ok { - return nil, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return nil, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } for _, v := range entry.KeySpaces { userKeyspaces = append(userKeyspaces, v.Name) @@ -344,7 +346,7 @@ func (asc *AuthServerConfig) GetUserKeyspaces(user string) ([]string, error) { func (asc *AuthServerConfig) GetKeyspace(user string) ([]string, error) { entry, ok := asc.Entries[user] if !ok { - return nil, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return nil, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } if len(entry.KeySpaces) == 0 { return nil, nil @@ -361,7 +363,7 @@ func (asc *AuthServerConfig) GetKeyspace(user string) ([]string, error) { func (asc *AuthServerConfig) GetRoleType(user string) (int8, error) { entry, ok := asc.Entries[user] if !ok { - return 0, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return 0, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } if len(entry.KeySpaces) == 0 { return 0, nil @@ -374,7 +376,7 @@ func (asc *AuthServerConfig) GetPassword(user string) (string, error) { // Find the entry. entry, ok := asc.Entries[user] if !ok { - return "", NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return "", sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } encryptFromEnt := asc.Entries["encrypt_version"] @@ -385,12 +387,12 @@ func (asc *AuthServerConfig) GetPassword(user string) (string, error) { if encryptFromEnt.UserData == "v_0001" { ecnPass, err := aseV001(entry.Password, []byte("akArIfh/a28N8w==")) if err != nil { - return "", NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v' error ses descryption", user) + return "", sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v' error ses descryption", user) } return ecnPass, nil } - return "", NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "unsupported encrypt version for user :%v", user) + return "", sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "unsupported encrypt version for user :%v", user) } // ConfigUserData holds the username and groups diff --git a/go/mysql/auth_server_static.go b/go/mysql/auth_server_static.go index 15d155ce061..c8a43968625 100644 --- a/go/mysql/auth_server_static.go +++ b/go/mysql/auth_server_static.go @@ -29,6 +29,8 @@ import ( "github.com/spf13/pflag" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/vterrors" @@ -179,7 +181,7 @@ func (a *AuthServerStatic) UserEntryWithPassword(conn *Conn, user string, passwo a.mu.Unlock() if !ok { - return &StaticUserData{}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return &StaticUserData{}, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } for _, entry := range entries { @@ -188,7 +190,7 @@ func (a *AuthServerStatic) UserEntryWithPassword(conn *Conn, user string, passwo return &StaticUserData{entry.UserData, entry.Groups}, nil } } - return &StaticUserData{}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return &StaticUserData{}, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } // UserEntryWithHash implements password lookup based on a @@ -199,14 +201,14 @@ func (a *AuthServerStatic) UserEntryWithHash(conn *Conn, salt []byte, user strin a.mu.Unlock() if !ok { - return &StaticUserData{}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return &StaticUserData{}, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } for _, entry := range entries { if entry.MysqlNativePassword != "" { hash, err := DecodeMysqlNativePasswordHex(entry.MysqlNativePassword) if err != nil { - return &StaticUserData{entry.UserData, entry.Groups}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return &StaticUserData{entry.UserData, entry.Groups}, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } isPass := VerifyHashedMysqlNativePassword(authResponse, salt, hash) @@ -221,7 +223,7 @@ func (a *AuthServerStatic) UserEntryWithHash(conn *Conn, salt []byte, user strin } } } - return &StaticUserData{}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return &StaticUserData{}, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } // UserEntryWithCacheHash implements password lookup based on a @@ -232,7 +234,7 @@ func (a *AuthServerStatic) UserEntryWithCacheHash(conn *Conn, salt []byte, user a.mu.Unlock() if !ok { - return &StaticUserData{}, AuthRejected, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return &StaticUserData{}, AuthRejected, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } for _, entry := range entries { @@ -243,7 +245,7 @@ func (a *AuthServerStatic) UserEntryWithCacheHash(conn *Conn, salt []byte, user return &StaticUserData{entry.UserData, entry.Groups}, AuthAccepted, nil } } - return &StaticUserData{}, AuthRejected, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return &StaticUserData{}, AuthRejected, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } // AuthMethods returns the AuthMethod instances this auth server can handle. diff --git a/go/mysql/auth_server_static_flaky_test.go b/go/mysql/auth_server_static_flaky_test.go index 52e8fee8ab4..12ae74e0d60 100644 --- a/go/mysql/auth_server_static_flaky_test.go +++ b/go/mysql/auth_server_static_flaky_test.go @@ -126,9 +126,7 @@ func TestStaticConfigHUP(t *testing.T) { mu.Lock() defer mu.Unlock() // delete registered Auth server - for auth := range authServers { - delete(authServers, auth) - } + clear(authServers) } func TestStaticConfigHUPWithRotation(t *testing.T) { diff --git a/go/mysql/binlog/binlog_json_test.go b/go/mysql/binlog/binlog_json_test.go index f6d4fe7fcf2..5652b58567e 100644 --- a/go/mysql/binlog/binlog_json_test.go +++ b/go/mysql/binlog/binlog_json_test.go @@ -265,7 +265,7 @@ func TestMarshalJSONToSQL(t *testing.T) { { name: "null", data: []byte{}, - expected: "CAST(null as JSON)", + expected: "CAST(_utf8mb4'null' as JSON)", }, { name: `object {"a": "b"}`, @@ -330,17 +330,17 @@ func TestMarshalJSONToSQL(t *testing.T) { { name: `true`, data: []byte{4, 1}, - expected: `CAST(true as JSON)`, + expected: `CAST(_utf8mb4'true' as JSON)`, }, { name: `false`, data: []byte{4, 2}, - expected: `CAST(false as JSON)`, + expected: `CAST(_utf8mb4'false' as JSON)`, }, { name: `null`, data: []byte{4, 0}, - expected: `CAST(null as JSON)`, + expected: `CAST(_utf8mb4'null' as JSON)`, }, { name: `-1`, diff --git a/go/mysql/binlog_dump.go b/go/mysql/binlog_dump.go index 8383a590c5e..d6768056974 100644 --- a/go/mysql/binlog_dump.go +++ b/go/mysql/binlog_dump.go @@ -20,6 +20,7 @@ import ( "encoding/binary" "io" + "vitess.io/vitess/go/mysql/replication" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" ) @@ -51,7 +52,7 @@ func (c *Conn) parseComBinlogDump(data []byte) (logFile string, binlogPos uint32 return logFile, binlogPos, nil } -func (c *Conn) parseComBinlogDumpGTID(data []byte) (logFile string, logPos uint64, position Position, err error) { +func (c *Conn) parseComBinlogDumpGTID(data []byte) (logFile string, logPos uint64, position replication.Position, err error) { // see https://dev.mysql.com/doc/internals/en/com-binlog-dump-gtid.html pos := 1 @@ -80,7 +81,7 @@ func (c *Conn) parseComBinlogDumpGTID(data []byte) (logFile string, logPos uint6 return logFile, logPos, position, readPacketErr } if gtid := string(data[pos : pos+int(dataSize)]); gtid != "" { - position, err = DecodePosition(gtid) + position, err = replication.DecodePosition(gtid) if err != nil { return logFile, logPos, position, err } diff --git a/go/mysql/binlog_event.go b/go/mysql/binlog_event.go index 0e9bfc1f155..e58cb9b254c 100644 --- a/go/mysql/binlog_event.go +++ b/go/mysql/binlog_event.go @@ -19,6 +19,7 @@ package mysql import ( "fmt" + "vitess.io/vitess/go/mysql/replication" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) @@ -95,7 +96,7 @@ type BinlogEvent interface { // GTID returns the GTID from the event, and if this event // also serves as a BEGIN statement. // This is only valid if IsGTID() returns true. - GTID(BinlogFormat) (GTID, bool, error) + GTID(BinlogFormat) (replication.GTID, bool, error) // Query returns a Query struct representing data from a QUERY_EVENT. // This is only valid if IsQuery() returns true. Query(BinlogFormat) (Query, error) @@ -107,7 +108,7 @@ type BinlogEvent interface { Rand(BinlogFormat) (uint64, uint64, error) // PreviousGTIDs returns the Position from the event. // This is only valid if IsPreviousGTIDs() returns true. - PreviousGTIDs(BinlogFormat) (Position, error) + PreviousGTIDs(BinlogFormat) (replication.Position, error) // TableID returns the table ID for a TableMap, UpdateRows, // WriteRows or DeleteRows event. diff --git a/go/mysql/binlog_event_filepos.go b/go/mysql/binlog_event_filepos.go index 8cf7ea11db9..4edc4bb91ff 100644 --- a/go/mysql/binlog_event_filepos.go +++ b/go/mysql/binlog_event_filepos.go @@ -19,6 +19,8 @@ package mysql import ( "encoding/binary" "fmt" + + "vitess.io/vitess/go/mysql/replication" ) // filePosBinlogEvent wraps a raw packet buffer and provides methods to examine @@ -38,7 +40,7 @@ func newFilePosBinlogEvent(buf []byte) *filePosBinlogEvent { return &filePosBinlogEvent{binlogEvent: binlogEvent(buf)} } -func (*filePosBinlogEvent) GTID(BinlogFormat) (GTID, bool, error) { +func (*filePosBinlogEvent) GTID(BinlogFormat) (replication.GTID, bool, error) { return nil, false, nil } @@ -51,8 +53,8 @@ func (*filePosBinlogEvent) IsGTID() bool { return false } -func (*filePosBinlogEvent) PreviousGTIDs(BinlogFormat) (Position, error) { - return Position{}, fmt.Errorf("filePos should not provide PREVIOUS_GTIDS_EVENT events") +func (*filePosBinlogEvent) PreviousGTIDs(BinlogFormat) (replication.Position, error) { + return replication.Position{}, fmt.Errorf("filePos should not provide PREVIOUS_GTIDS_EVENT events") } // StripChecksum implements BinlogEvent.StripChecksum(). @@ -213,7 +215,7 @@ func (ev filePosFakeEvent) Format() (BinlogFormat, error) { return BinlogFormat{}, nil } -func (ev filePosFakeEvent) GTID(BinlogFormat) (GTID, bool, error) { +func (ev filePosFakeEvent) GTID(BinlogFormat) (replication.GTID, bool, error) { return nil, false, nil } @@ -229,8 +231,8 @@ func (ev filePosFakeEvent) Rand(BinlogFormat) (uint64, uint64, error) { return 0, 0, nil } -func (ev filePosFakeEvent) PreviousGTIDs(BinlogFormat) (Position, error) { - return Position{}, nil +func (ev filePosFakeEvent) PreviousGTIDs(BinlogFormat) (replication.Position, error) { + return replication.Position{}, nil } func (ev filePosFakeEvent) TableID(BinlogFormat) uint64 { @@ -270,7 +272,7 @@ func (ev filePosFakeEvent) Bytes() []byte { // filePosGTIDEvent is a fake GTID event for filePos. type filePosGTIDEvent struct { filePosFakeEvent - gtid filePosGTID + gtid replication.FilePosGTID } func newFilePosGTIDEvent(file string, pos uint32, timestamp uint32) filePosGTIDEvent { @@ -278,9 +280,9 @@ func newFilePosGTIDEvent(file string, pos uint32, timestamp uint32) filePosGTIDE filePosFakeEvent: filePosFakeEvent{ timestamp: timestamp, }, - gtid: filePosGTID{ - file: file, - pos: pos, + gtid: replication.FilePosGTID{ + File: file, + Pos: pos, }, } } @@ -293,6 +295,6 @@ func (ev filePosGTIDEvent) StripChecksum(f BinlogFormat) (BinlogEvent, []byte, e return ev, nil, nil } -func (ev filePosGTIDEvent) GTID(BinlogFormat) (GTID, bool, error) { +func (ev filePosGTIDEvent) GTID(BinlogFormat) (replication.GTID, bool, error) { return ev.gtid, false, nil } diff --git a/go/mysql/binlog_event_make.go b/go/mysql/binlog_event_make.go index 0688fa9540b..52a8c453517 100644 --- a/go/mysql/binlog_event_make.go +++ b/go/mysql/binlog_event_make.go @@ -19,6 +19,8 @@ package mysql import ( "encoding/binary" "hash/crc32" + + "vitess.io/vitess/go/mysql/replication" ) const ( @@ -292,7 +294,7 @@ func NewIntVarEvent(f BinlogFormat, s *FakeBinlogStream, typ byte, value uint64) // NewMariaDBGTIDEvent returns a MariaDB specific GTID event. // It ignores the Server in the gtid, instead uses the FakeBinlogStream.ServerID. -func NewMariaDBGTIDEvent(f BinlogFormat, s *FakeBinlogStream, gtid MariadbGTID, hasBegin bool) BinlogEvent { +func NewMariaDBGTIDEvent(f BinlogFormat, s *FakeBinlogStream, gtid replication.MariadbGTID, hasBegin bool) BinlogEvent { length := 8 + // sequence 4 + // domain 1 // flags2 diff --git a/go/mysql/binlog_event_make_test.go b/go/mysql/binlog_event_make_test.go index 7eb94fef848..12d8a54ff97 100644 --- a/go/mysql/binlog_event_make_test.go +++ b/go/mysql/binlog_event_make_test.go @@ -23,6 +23,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/binlog" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) @@ -149,7 +151,7 @@ func TestMariadDBGTIDEVent(t *testing.T) { s.ServerID = 0x87654321 // With built-in begin. - event := NewMariaDBGTIDEvent(f, s, MariadbGTID{Domain: 0, Sequence: 0x123456789abcdef0}, true) + event := NewMariaDBGTIDEvent(f, s, replication.MariadbGTID{Domain: 0, Sequence: 0x123456789abcdef0}, true) require.True(t, event.IsValid(), "NewMariaDBGTIDEvent().IsValid() is false") require.True(t, event.IsGTID(), "NewMariaDBGTIDEvent().IsGTID() if false") @@ -160,7 +162,7 @@ func TestMariadDBGTIDEVent(t *testing.T) { require.NoError(t, err, "NewMariaDBGTIDEvent().GTID() returned error: %v", err) require.True(t, hasBegin, "NewMariaDBGTIDEvent() didn't store hasBegin properly.") - mgtid, ok := gtid.(MariadbGTID) + mgtid, ok := gtid.(replication.MariadbGTID) require.True(t, ok, "NewMariaDBGTIDEvent().GTID() returned a non-MariaDBGTID GTID") if mgtid.Domain != 0 || mgtid.Server != 0x87654321 || mgtid.Sequence != 0x123456789abcdef0 { @@ -168,7 +170,7 @@ func TestMariadDBGTIDEVent(t *testing.T) { } // Without built-in begin. - event = NewMariaDBGTIDEvent(f, s, MariadbGTID{Domain: 0, Sequence: 0x123456789abcdef0}, false) + event = NewMariaDBGTIDEvent(f, s, replication.MariadbGTID{Domain: 0, Sequence: 0x123456789abcdef0}, false) require.True(t, event.IsValid(), "NewMariaDBGTIDEvent().IsValid() is false") require.True(t, event.IsGTID(), "NewMariaDBGTIDEvent().IsGTID() if false") @@ -179,7 +181,7 @@ func TestMariadDBGTIDEVent(t *testing.T) { require.NoError(t, err, "NewMariaDBGTIDEvent().GTID() returned error: %v", err) require.False(t, hasBegin, "NewMariaDBGTIDEvent() didn't store hasBegin properly.") - mgtid, ok = gtid.(MariadbGTID) + mgtid, ok = gtid.(replication.MariadbGTID) require.True(t, ok, "NewMariaDBGTIDEvent().GTID() returned a non-MariaDBGTID GTID") if mgtid.Domain != 0 || mgtid.Server != 0x87654321 || mgtid.Sequence != 0x123456789abcdef0 { diff --git a/go/mysql/binlog_event_mariadb.go b/go/mysql/binlog_event_mariadb.go index 33f858c2f36..f2c0ec8f369 100644 --- a/go/mysql/binlog_event_mariadb.go +++ b/go/mysql/binlog_event_mariadb.go @@ -19,6 +19,7 @@ package mysql import ( "encoding/binary" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" ) @@ -59,13 +60,13 @@ func (ev mariadbBinlogEvent) IsGTID() bool { // 8 sequence number // 4 domain ID // 1 flags2 -func (ev mariadbBinlogEvent) GTID(f BinlogFormat) (GTID, bool, error) { +func (ev mariadbBinlogEvent) GTID(f BinlogFormat) (replication.GTID, bool, error) { const FLStandalone = 1 data := ev.Bytes()[f.HeaderLength:] flags2 := data[8+4] - return MariadbGTID{ + return replication.MariadbGTID{ Sequence: binary.LittleEndian.Uint64(data[:8]), Domain: binary.LittleEndian.Uint32(data[8 : 8+4]), Server: ev.ServerID(), @@ -73,8 +74,8 @@ func (ev mariadbBinlogEvent) GTID(f BinlogFormat) (GTID, bool, error) { } // PreviousGTIDs implements BinlogEvent.PreviousGTIDs(). -func (ev mariadbBinlogEvent) PreviousGTIDs(f BinlogFormat) (Position, error) { - return Position{}, vterrors.Errorf(vtrpc.Code_INTERNAL, "MariaDB should not provide PREVIOUS_GTIDS_EVENT events") +func (ev mariadbBinlogEvent) PreviousGTIDs(f BinlogFormat) (replication.Position, error) { + return replication.Position{}, vterrors.Errorf(vtrpc.Code_INTERNAL, "MariaDB should not provide PREVIOUS_GTIDS_EVENT events") } // StripChecksum implements BinlogEvent.StripChecksum(). diff --git a/go/mysql/binlog_event_mariadb_test.go b/go/mysql/binlog_event_mariadb_test.go index 1464da0e573..c4eeac39c38 100644 --- a/go/mysql/binlog_event_mariadb_test.go +++ b/go/mysql/binlog_event_mariadb_test.go @@ -22,6 +22,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql/replication" ) // sample event data @@ -99,7 +101,7 @@ func TestMariadbStandaloneBinlogEventGTID(t *testing.T) { } input := mariadbBinlogEvent{binlogEvent: binlogEvent(mariadbStandaloneGTIDEvent)} - want := MariadbGTID{Domain: 0, Server: 62344, Sequence: 9} + want := replication.MariadbGTID{Domain: 0, Server: 62344, Sequence: 9} got, hasBegin, err := input.GTID(f) assert.NoError(t, err, "unexpected error: %v", err) assert.False(t, hasBegin, "unexpected hasBegin") @@ -115,7 +117,7 @@ func TestMariadbBinlogEventGTID(t *testing.T) { } input := mariadbBinlogEvent{binlogEvent: binlogEvent(mariadbBeginGTIDEvent)} - want := MariadbGTID{Domain: 0, Server: 62344, Sequence: 10} + want := replication.MariadbGTID{Domain: 0, Server: 62344, Sequence: 10} got, hasBegin, err := input.GTID(f) assert.NoError(t, err, "unexpected error: %v", err) assert.True(t, hasBegin, "unexpected !hasBegin") diff --git a/go/mysql/binlog_event_mysql56.go b/go/mysql/binlog_event_mysql56.go index 2e6cfec2dfa..3f931310ba9 100644 --- a/go/mysql/binlog_event_mysql56.go +++ b/go/mysql/binlog_event_mysql56.go @@ -19,6 +19,7 @@ package mysql import ( "encoding/binary" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" ) @@ -59,22 +60,22 @@ func (ev mysql56BinlogEvent) IsGTID() bool { // 1 flags // 16 SID (server UUID) // 8 GNO (sequence number, signed int) -func (ev mysql56BinlogEvent) GTID(f BinlogFormat) (GTID, bool, error) { +func (ev mysql56BinlogEvent) GTID(f BinlogFormat) (replication.GTID, bool, error) { data := ev.Bytes()[f.HeaderLength:] - var sid SID + var sid replication.SID copy(sid[:], data[1:1+16]) gno := int64(binary.LittleEndian.Uint64(data[1+16 : 1+16+8])) - return Mysql56GTID{Server: sid, Sequence: gno}, false /* hasBegin */, nil + return replication.Mysql56GTID{Server: sid, Sequence: gno}, false /* hasBegin */, nil } // PreviousGTIDs implements BinlogEvent.PreviousGTIDs(). -func (ev mysql56BinlogEvent) PreviousGTIDs(f BinlogFormat) (Position, error) { +func (ev mysql56BinlogEvent) PreviousGTIDs(f BinlogFormat) (replication.Position, error) { data := ev.Bytes()[f.HeaderLength:] - set, err := NewMysql56GTIDSetFromSIDBlock(data) + set, err := replication.NewMysql56GTIDSetFromSIDBlock(data) if err != nil { - return Position{}, err + return replication.Position{}, err } - return Position{ + return replication.Position{ GTIDSet: set, }, nil } diff --git a/go/mysql/binlog_event_mysql56_test.go b/go/mysql/binlog_event_mysql56_test.go index 86b58862ef9..e5fa3545278 100644 --- a/go/mysql/binlog_event_mysql56_test.go +++ b/go/mysql/binlog_event_mysql56_test.go @@ -23,6 +23,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql/replication" ) // Sample event data for MySQL 5.6. @@ -79,25 +81,14 @@ func TestMysql56GTID(t *testing.T) { require.NoError(t, err, "StripChecksum() error: %v", err) require.True(t, input.IsGTID(), "IsGTID() = false, want true") - want, _ := parseMysql56GTID("439192bd-f37c-11e4-bbeb-0242ac11035a:4") + want := replication.Mysql56GTID{ + Server: replication.SID{0x43, 0x91, 0x92, 0xbd, 0xf3, 0x7c, 0x11, 0xe4, 0xbb, 0xeb, 0x2, 0x42, 0xac, 0x11, 0x3, 0x5a}, + Sequence: 4, + } got, hasBegin, err := input.GTID(format) require.NoError(t, err, "GTID() error: %v", err) assert.False(t, hasBegin, "GTID() returned hasBegin") assert.Equal(t, want, got, "GTID() = %#v, want %#v", got, want) - -} - -func TestMysql56ParseGTID(t *testing.T) { - input := "00010203-0405-0607-0809-0A0B0C0D0E0F:56789" - want := Mysql56GTID{ - Server: SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, - Sequence: 56789, - } - - got, err := parseMysql56GTID(input) - require.NoError(t, err, "unexpected error: %v", err) - assert.Equal(t, want, got, "(&mysql56{}).ParseGTID(%#v) = %#v, want %#v", input, got, want) - } func TestMysql56DecodeTransactionPayload(t *testing.T) { @@ -148,13 +139,13 @@ func TestMysql56DecodeTransactionPayload(t *testing.T) { func TestMysql56ParsePosition(t *testing.T) { input := "00010203-0405-0607-0809-0a0b0c0d0e0f:1-2" - sid := SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} - var set GTIDSet = Mysql56GTIDSet{} - set = set.AddGTID(Mysql56GTID{Server: sid, Sequence: 1}) - set = set.AddGTID(Mysql56GTID{Server: sid, Sequence: 2}) - want := Position{GTIDSet: set} + sid := replication.SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} + var set replication.GTIDSet = replication.Mysql56GTIDSet{} + set = set.AddGTID(replication.Mysql56GTID{Server: sid, Sequence: 1}) + set = set.AddGTID(replication.Mysql56GTID{Server: sid, Sequence: 2}) + want := replication.Position{GTIDSet: set} - got, err := ParsePosition(Mysql56FlavorID, input) + got, err := replication.ParsePosition(replication.Mysql56FlavorID, input) assert.NoError(t, err, "unexpected error: %v", err) assert.True(t, got.Equal(want), "(&mysql56{}).ParsePosition(%#v) = %#v, want %#v", input, got, want) diff --git a/go/mysql/client.go b/go/mysql/client.go index 487f1d0fe52..f6c4e17cbfc 100644 --- a/go/mysql/client.go +++ b/go/mysql/client.go @@ -27,6 +27,7 @@ import ( "time" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/sqlerror" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttls" @@ -94,11 +95,11 @@ func Connect(ctx context.Context, params *ConnParams) (*Conn, error) { // should return a 2003. if netProto == "tcp" { status <- connectResult{ - err: NewSQLError(CRConnHostError, SSUnknownSQLState, "net.Dial(%v) failed: %v", addr, err), + err: sqlerror.NewSQLError(sqlerror.CRConnHostError, sqlerror.SSUnknownSQLState, "net.Dial(%v) failed: %v", addr, err), } } else { status <- connectResult{ - err: NewSQLError(CRConnectionError, SSUnknownSQLState, "net.Dial(%v) to local server failed: %v", addr, err), + err: sqlerror.NewSQLError(sqlerror.CRConnectionError, sqlerror.SSUnknownSQLState, "net.Dial(%v) to local server failed: %v", addr, err), } } return @@ -178,11 +179,11 @@ func (c *Conn) Ping() error { data[pos] = ComPing if err := c.writeEphemeralPacket(); err != nil { - return NewSQLError(CRServerGone, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerGone, sqlerror.SSUnknownSQLState, "%v", err) } data, err := c.readEphemeralPacket() if err != nil { - return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err) } defer c.recycleReadPacket() switch data[0] { @@ -207,7 +208,7 @@ func (c *Conn) clientHandshake(params *ConnParams) error { // Wait for the server initial handshake packet, and parse it. data, err := c.readPacket() if err != nil { - return NewSQLError(CRServerLost, "", "initial packet read failed: %v", err) + return sqlerror.NewSQLError(sqlerror.CRServerLost, "", "initial packet read failed: %v", err) } capabilities, salt, err := c.parseInitialHandshakePacket(data) if err != nil { @@ -218,7 +219,7 @@ func (c *Conn) clientHandshake(params *ConnParams) error { // Sanity check. if capabilities&CapabilityClientProtocol41 == 0 { - return NewSQLError(CRVersionError, SSUnknownSQLState, "cannot connect to servers earlier than 4.1") + return sqlerror.NewSQLError(sqlerror.CRVersionError, sqlerror.SSUnknownSQLState, "cannot connect to servers earlier than 4.1") } // Remember a subset of the capabilities, so we can use them @@ -238,7 +239,7 @@ func (c *Conn) clientHandshake(params *ConnParams) error { // If client asked for SSL, but server doesn't support it, // stop right here. if params.SslRequired() && capabilities&CapabilityClientSSL == 0 { - return NewSQLError(CRSSLConnectionError, SSUnknownSQLState, "server doesn't support SSL but client asked for it") + return sqlerror.NewSQLError(sqlerror.CRSSLConnectionError, sqlerror.SSUnknownSQLState, "server doesn't support SSL but client asked for it") } // The ServerName to verify depends on what the hostname is. @@ -259,13 +260,13 @@ func (c *Conn) clientHandshake(params *ConnParams) error { tlsVersion, err := vttls.TLSVersionToNumber(params.TLSMinVersion) if err != nil { - return NewSQLError(CRSSLConnectionError, SSUnknownSQLState, "error parsing minimal TLS version: %v", err) + return sqlerror.NewSQLError(sqlerror.CRSSLConnectionError, sqlerror.SSUnknownSQLState, "error parsing minimal TLS version: %v", err) } // Build the TLS config. clientConfig, err := vttls.ClientConfig(params.EffectiveSslMode(), params.SslCert, params.SslKey, params.SslCa, params.SslCrl, serverName, tlsVersion) if err != nil { - return NewSQLError(CRSSLConnectionError, SSUnknownSQLState, "error loading client cert and ca: %v", err) + return sqlerror.NewSQLError(sqlerror.CRSSLConnectionError, sqlerror.SSUnknownSQLState, "error loading client cert and ca: %v", err) } // Send the SSLRequest packet. @@ -296,7 +297,7 @@ func (c *Conn) clientHandshake(params *ConnParams) error { } else if params.Flags&CapabilityClientSessionTrack == CapabilityClientSessionTrack { // If client asked for ClientSessionTrack, but server doesn't support it, // stop right here. - return NewSQLError(CRSSLConnectionError, SSUnknownSQLState, "server doesn't support ClientSessionTrack but client asked for it") + return sqlerror.NewSQLError(sqlerror.CRSSLConnectionError, sqlerror.SSUnknownSQLState, "server doesn't support ClientSessionTrack but client asked for it") } // Build and send our handshake response 41. @@ -321,7 +322,7 @@ func (c *Conn) clientHandshake(params *ConnParams) error { // Wait for response, should be OK. response, err := c.readPacket() if err != nil { - return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err) } switch response[0] { case OKPacket: @@ -331,7 +332,7 @@ func (c *Conn) clientHandshake(params *ConnParams) error { return ParseErrorPacket(response) default: // FIXME(alainjobart) handle extra auth cases and so on. - return NewSQLError(CRServerHandshakeErr, SSUnknownSQLState, "initial server response is asking for more information, not implemented yet: %v", response) + return sqlerror.NewSQLError(sqlerror.CRServerHandshakeErr, sqlerror.SSUnknownSQLState, "initial server response is asking for more information, not implemented yet: %v", response) } } @@ -346,7 +347,7 @@ func (c *Conn) parseInitialHandshakePacket(data []byte) (uint32, []byte, error) // Protocol version. pver, pos, ok := readByte(data, pos) if !ok { - return 0, nil, NewSQLError(CRVersionError, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no protocol version") + return 0, nil, sqlerror.NewSQLError(sqlerror.CRVersionError, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: packet has no protocol version") } // Server is allowed to immediately send ERR packet @@ -355,41 +356,41 @@ func (c *Conn) parseInitialHandshakePacket(data []byte) (uint32, []byte, error) // Normally there would be a 1-byte sql_state_marker field and a 5-byte // sql_state field here, but docs say these will not be present in this case. errorMsg, _, _ := readEOFString(data, pos) - return 0, nil, NewSQLError(CRServerHandshakeErr, SSUnknownSQLState, "immediate error from server errorCode=%v errorMsg=%v", errorCode, errorMsg) + return 0, nil, sqlerror.NewSQLError(sqlerror.CRServerHandshakeErr, sqlerror.SSUnknownSQLState, "immediate error from server errorCode=%v errorMsg=%v", errorCode, errorMsg) } if pver != protocolVersion { - return 0, nil, NewSQLError(CRVersionError, SSUnknownSQLState, "bad protocol version: %v", pver) + return 0, nil, sqlerror.NewSQLError(sqlerror.CRVersionError, sqlerror.SSUnknownSQLState, "bad protocol version: %v", pver) } // Read the server version. c.ServerVersion, pos, ok = readNullString(data, pos) if !ok { - return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no server version") + return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: packet has no server version") } // Read the connection id. c.ConnectionID, pos, ok = readUint32(data, pos) if !ok { - return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no connection id") + return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: packet has no connection id") } // Read the first part of the auth-plugin-data authPluginData, pos, ok := readBytes(data, pos, 8) if !ok { - return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no auth-plugin-data-part-1") + return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: packet has no auth-plugin-data-part-1") } // One byte filler, 0. We don't really care about the value. _, pos, ok = readByte(data, pos) if !ok { - return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no filler") + return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: packet has no filler") } // Lower 2 bytes of the capability flags. capLower, pos, ok := readUint16(data, pos) if !ok { - return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no capability flags (lower 2 bytes)") + return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: packet has no capability flags (lower 2 bytes)") } var capabilities = uint32(capLower) @@ -401,20 +402,20 @@ func (c *Conn) parseInitialHandshakePacket(data []byte) (uint32, []byte, error) // Character set. characterSet, pos, ok := readByte(data, pos) if !ok { - return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no character set") + return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: packet has no character set") } c.CharacterSet = collations.ID(characterSet) // Status flags. Ignored. _, pos, ok = readUint16(data, pos) if !ok { - return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no status flags") + return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: packet has no status flags") } // Upper 2 bytes of the capability flags. capUpper, pos, ok := readUint16(data, pos) if !ok { - return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no capability flags (upper 2 bytes)") + return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: packet has no capability flags (upper 2 bytes)") } capabilities += uint32(capUpper) << 16 @@ -424,13 +425,13 @@ func (c *Conn) parseInitialHandshakePacket(data []byte) (uint32, []byte, error) if capabilities&CapabilityClientPluginAuth != 0 { authPluginDataLength, pos, ok = readByte(data, pos) if !ok { - return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no length of auth-plugin-data") + return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: packet has no length of auth-plugin-data") } } else { // One byte filler, 0. We don't really care about the value. _, pos, ok = readByte(data, pos) if !ok { - return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no length of auth-plugin-data filler") + return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: packet has no length of auth-plugin-data filler") } } @@ -447,12 +448,12 @@ func (c *Conn) parseInitialHandshakePacket(data []byte) (uint32, []byte, error) var authPluginDataPart2 []byte authPluginDataPart2, pos, ok = readBytes(data, pos, l) if !ok { - return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no auth-plugin-data-part-2") + return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: packet has no auth-plugin-data-part-2") } // The last byte has to be 0, and is not part of the data. if authPluginDataPart2[l-1] != 0 { - return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: auth-plugin-data-part-2 is not 0 terminated") + return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: auth-plugin-data-part-2 is not 0 terminated") } authPluginData = append(authPluginData, authPluginDataPart2[0:l-1]...) } @@ -510,7 +511,7 @@ func (c *Conn) writeSSLRequest(capabilities uint32, characterSet uint8, params * // And send it as is. if err := c.writeEphemeralPacket(); err != nil { - return NewSQLError(CRServerLost, SSUnknownSQLState, "cannot send SSLRequest: %v", err) + return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "cannot send SSLRequest: %v", err) } return nil } @@ -608,11 +609,11 @@ func (c *Conn) writeHandshakeResponse41(capabilities uint32, scrambledPassword [ // Sanity-check the length. if pos != len(data) { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "writeHandshakeResponse41: only packed %v bytes, out of %v allocated", pos, len(data)) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "writeHandshakeResponse41: only packed %v bytes, out of %v allocated", pos, len(data)) } if err := c.writeEphemeralPacket(); err != nil { - return NewSQLError(CRServerLost, SSUnknownSQLState, "cannot send HandshakeResponse41: %v", err) + return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "cannot send HandshakeResponse41: %v", err) } return nil } @@ -622,7 +623,7 @@ func (c *Conn) writeHandshakeResponse41(capabilities uint32, scrambledPassword [ func (c *Conn) handleAuthResponse(params *ConnParams) error { response, err := c.readPacket() if err != nil { - return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err) } switch response[0] { @@ -642,7 +643,7 @@ func (c *Conn) handleAuthResponse(params *ConnParams) error { case ErrPacket: return ParseErrorPacket(response) default: - return NewSQLError(CRServerHandshakeErr, SSUnknownSQLState, "initial server response cannot be parsed: %v", response) + return sqlerror.NewSQLError(sqlerror.CRServerHandshakeErr, sqlerror.SSUnknownSQLState, "initial server response cannot be parsed: %v", response) } return nil @@ -654,7 +655,7 @@ func (c *Conn) handleAuthSwitchPacket(params *ConnParams, response []byte) error var salt []byte c.authPluginName, salt, err = parseAuthSwitchRequest(response) if err != nil { - return NewSQLError(CRServerHandshakeErr, SSUnknownSQLState, "cannot parse auth switch request: %v", err) + return sqlerror.NewSQLError(sqlerror.CRServerHandshakeErr, sqlerror.SSUnknownSQLState, "cannot parse auth switch request: %v", err) } if salt != nil { c.salt = salt @@ -675,7 +676,7 @@ func (c *Conn) handleAuthSwitchPacket(params *ConnParams, response []byte) error return err } default: - return NewSQLError(CRServerHandshakeErr, SSUnknownSQLState, "server asked for unsupported auth method: %v", c.authPluginName) + return sqlerror.NewSQLError(sqlerror.CRServerHandshakeErr, sqlerror.SSUnknownSQLState, "server asked for unsupported auth method: %v", c.authPluginName) } // The response could be an OKPacket, AuthMoreDataPacket or ErrPacket @@ -717,7 +718,7 @@ func (c *Conn) handleAuthMoreDataPacket(data byte, params *ConnParams) error { // Next packet should either be an OKPacket or ErrPacket return c.handleAuthResponse(params) default: - return NewSQLError(CRServerHandshakeErr, SSUnknownSQLState, "cannot parse AuthMoreDataPacket: %v", data) + return sqlerror.NewSQLError(sqlerror.CRServerHandshakeErr, sqlerror.SSUnknownSQLState, "cannot parse AuthMoreDataPacket: %v", data) } } @@ -747,7 +748,7 @@ func (c *Conn) requestPublicKey() (rsaKey *rsa.PublicKey, err error) { response, err := c.readPacket() if err != nil { - return nil, NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) + return nil, sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err) } // Server should respond with a AuthMoreDataPacket containing the public key diff --git a/go/mysql/client_test.go b/go/mysql/client_test.go index 73d48df1d35..d7424b29516 100644 --- a/go/mysql/client_test.go +++ b/go/mysql/client_test.go @@ -32,16 +32,18 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/vt/tlstest" "vitess.io/vitess/go/vt/vttls" ) // assertSQLError makes sure we get the right error. -func assertSQLError(t *testing.T, err error, code ErrorCode, sqlState, subtext, query, pattern string) { +func assertSQLError(t *testing.T, err error, code sqlerror.ErrorCode, sqlState, subtext, query, pattern string) { t.Helper() require.Error(t, err, "was expecting SQLError %v / %v / %v but got no error.", code, sqlState, subtext) - serr, ok := err.(*SQLError) + serr, ok := err.(*sqlerror.SQLError) require.True(t, ok, "was expecting SQLError %v / %v / %v but got: %v", code, sqlState, subtext, err) require.Equal(t, code, serr.Num, "was expecting SQLError %v / %v / %v but got code %v", code, sqlState, subtext, serr.Num) require.Equal(t, sqlState, serr.State, "was expecting SQLError %v / %v / %v but got state %v", code, sqlState, subtext, serr.State) @@ -110,14 +112,14 @@ func TestConnectTimeout(t *testing.T) { }() ctx = context.Background() _, err = Connect(ctx, params) - assertSQLError(t, err, CRServerLost, SSUnknownSQLState, "initial packet read failed", "", "") + assertSQLError(t, err, sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "initial packet read failed", "", "") // Now close the listener. Connect should fail right away, // check the error. listener.Close() wg.Wait() _, err = Connect(ctx, params) - assertSQLError(t, err, CRConnHostError, SSUnknownSQLState, "connection refused", "", "") + assertSQLError(t, err, sqlerror.CRConnHostError, sqlerror.SSUnknownSQLState, "connection refused", "", "") // Tests a connection where Dial to a unix socket fails // properly returns the right error. To simulate exactly the @@ -131,7 +133,7 @@ func TestConnectTimeout(t *testing.T) { _, err = Connect(ctx, params) os.Remove(name) t.Log(err) - assertSQLError(t, err, CRConnectionError, SSUnknownSQLState, "connection refused", "", "net\\.Dial\\(([a-z0-9A-Z_\\/]*)\\) to local server failed:") + assertSQLError(t, err, sqlerror.CRConnectionError, sqlerror.SSUnknownSQLState, "connection refused", "", "net\\.Dial\\(([a-z0-9A-Z_\\/]*)\\) to local server failed:") } // TestTLSClientDisabled creates a Server with TLS support, then connects @@ -149,7 +151,7 @@ func TestTLSClientDisabled(t *testing.T) { // Below, we are enabling --ssl-verify-server-cert, which adds // a check that the common name of the certificate matches the // server host name we connect to. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err) defer l.Close() @@ -221,7 +223,7 @@ func TestTLSClientPreferredDefault(t *testing.T) { // Below, we are enabling --ssl-verify-server-cert, which adds // a check that the common name of the certificate matches the // server host name we connect to. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err) defer l.Close() @@ -294,7 +296,7 @@ func TestTLSClientRequired(t *testing.T) { // Below, we are enabling --ssl-verify-server-cert, which adds // a check that the common name of the certificate matches the // server host name we connect to. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err) defer l.Close() @@ -341,7 +343,7 @@ func testTLSClientVerifyCA(t *testing.T) { // Below, we are enabling --ssl-verify-server-cert, which adds // a check that the common name of the certificate matches the // server host name we connect to. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err) defer l.Close() @@ -424,7 +426,7 @@ func TestTLSClientVerifyIdentity(t *testing.T) { // Below, we are enabling --ssl-verify-server-cert, which adds // a check that the common name of the certificate matches the // server host name we connect to. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err) defer l.Close() diff --git a/go/mysql/collations/charset/convert.go b/go/mysql/collations/charset/convert.go index 3904329654c..bc51e9b8377 100644 --- a/go/mysql/collations/charset/convert.go +++ b/go/mysql/collations/charset/convert.go @@ -19,6 +19,8 @@ package charset import ( "fmt" "unicode/utf8" + + "vitess.io/vitess/go/hack" ) func failedConversionError(from, to Charset, input []byte) error { @@ -128,6 +130,79 @@ func Convert(dst []byte, dstCharset Charset, src []byte, srcCharset Charset) ([] } } +func Expand(dst []rune, src []byte, srcCharset Charset) []rune { + switch srcCharset := srcCharset.(type) { + case Charset_utf8mb3, Charset_utf8mb4: + if dst == nil { + return []rune(string(src)) + } + dst = make([]rune, 0, len(src)) + for _, cp := range string(src) { + dst = append(dst, cp) + } + return dst + case Charset_binary: + if dst == nil { + dst = make([]rune, 0, len(src)) + } + for _, c := range src { + dst = append(dst, rune(c)) + } + return dst + default: + if dst == nil { + dst = make([]rune, 0, len(src)) + } + for len(src) > 0 { + cp, width := srcCharset.DecodeRune(src) + src = src[width:] + dst = append(dst, cp) + } + return dst + } +} + +func Collapse(dst []byte, src []rune, dstCharset Charset) []byte { + switch dstCharset := dstCharset.(type) { + case Charset_utf8mb3, Charset_utf8mb4: + if dst == nil { + return hack.StringBytes(string(src)) + } + return append(dst, hack.StringBytes(string(src))...) + case Charset_binary: + if dst == nil { + dst = make([]byte, 0, len(src)) + } + for _, b := range src { + dst = append(dst, byte(b)) + } + return dst + default: + nDst := 0 + if dst == nil { + dst = make([]byte, len(src)*dstCharset.MaxWidth()) + } else { + nDst = len(dst) + dst = dst[:cap(dst)] + } + for _, c := range src { + if len(dst)-nDst < 4 { + newDst := make([]byte, len(dst)*2) + copy(newDst, dst[:nDst]) + dst = newDst + } + w := dstCharset.EncodeRune(dst[nDst:], c) + if w < 0 { + if w = dstCharset.EncodeRune(dst[nDst:], '?'); w < 0 { + break + } + } + nDst += w + } + return dst[:nDst] + } +} + func ConvertFromUTF8(dst []byte, dstCharset Charset, src []byte) ([]byte, error) { return Convert(dst, dstCharset, src, Charset_utf8mb4{}) } diff --git a/go/mysql/collations/charset/korean/tables.go b/go/mysql/collations/charset/korean/tables.go index 0480e85c4aa..7f7ad3e4264 100644 --- a/go/mysql/collations/charset/korean/tables.go +++ b/go/mysql/collations/charset/korean/tables.go @@ -17056,8 +17056,6 @@ var decode = [...]uint16{ 17629: 0x8A70, } -const numEncodeTables = 7 - // encodeX are the encoding tables from Unicode to EUC-KR code, // sorted by decreasing length. // encode0: 20893 entries for runes in [19968, 40861). diff --git a/go/mysql/collations/charset/simplifiedchinese/tables.go b/go/mysql/collations/charset/simplifiedchinese/tables.go index 415f52a1116..645127580f6 100644 --- a/go/mysql/collations/charset/simplifiedchinese/tables.go +++ b/go/mysql/collations/charset/simplifiedchinese/tables.go @@ -22091,8 +22091,6 @@ var decode = [...]uint16{ 23844: 0x4DAE, } -const numEncodeTables = 5 - // encodeX are the encoding tables from Unicode to GBK code, // sorted by decreasing length. // encode0: 28965 entries for runes in [11905, 40870). diff --git a/go/mysql/collations/coercion.go b/go/mysql/collations/coercion.go index 8e72ebf3c37..8b66c818cc0 100644 --- a/go/mysql/collations/coercion.go +++ b/go/mysql/collations/coercion.go @@ -19,8 +19,6 @@ package collations import ( "fmt" "unsafe" - - "vitess.io/vitess/go/mysql/collations/charset" ) func init() { @@ -95,11 +93,6 @@ const ( RepertoireUnicode ) -// Coercion is a function that will transform either the given argument -// arguments of the function into a specific character set. The `dst` argument -// will be used as the destination of the coerced argument, but it can be nil. -type Coercion func(dst, in []byte) ([]byte, error) - // TypedCollation is the Collation of a SQL expression, including its coercibility // and repertoire. type TypedCollation struct { @@ -112,208 +105,13 @@ func (tc TypedCollation) Valid() bool { return tc.Collation != Unknown } -func checkCompatibleCollations( - left Collation, leftCoercibility Coercibility, leftRepertoire Repertoire, - right Collation, rightCoercibility Coercibility, rightRepertoire Repertoire, -) bool { - leftCS := left.Charset() - rightCS := right.Charset() - - switch leftCS.(type) { - case charset.Charset_utf8mb4: - if leftCoercibility <= rightCoercibility { - return true - } - - case charset.Charset_utf32: - switch { - case leftCoercibility < rightCoercibility: - return true - case leftCoercibility == rightCoercibility: - if !charset.IsUnicode(rightCS) { - return true - } - if !left.IsBinary() { - return true - } - } - - case charset.Charset_utf8mb3, charset.Charset_ucs2, charset.Charset_utf16, charset.Charset_utf16le: - switch { - case leftCoercibility < rightCoercibility: - return true - case leftCoercibility == rightCoercibility: - if !charset.IsUnicode(rightCS) { - return true - } - } - } - - if rightRepertoire == RepertoireASCII { - switch { - case leftCoercibility < rightCoercibility: - return true - case leftCoercibility == rightCoercibility: - if leftRepertoire == RepertoireUnicode { - return true - } - } - } - - return false -} - -// CoercionOptions is used to configure how aggressive the algorithm can be -// when merging two different collations by transcoding them. -type CoercionOptions struct { - // ConvertToSuperset allows merging two different collations as long - // as the charset of one of them is a strict superset of the other. In - // order to operate on the two expressions, one of them will need to - // be transcoded. This transcoding will always be safe because the string - // with the smallest repertoire will be transcoded to its superset, which - // cannot fail. - ConvertToSuperset bool - - // ConvertWithCoercion allows merging two different collations by forcing - // a coercion as long as the coercibility of the two sides is lax enough. - // This will force a transcoding of one of the expressions even if their - // respective charsets are not a strict superset, so the resulting transcoding - // CAN fail depending on the content of their strings. - ConvertWithCoercion bool -} - -// MergeCollations returns a Coercion function for a pair of TypedCollation based -// on their coercibility. -// -// The function takes the typed collations for the two sides of a text operation -// (namely, a comparison or concatenation of two textual expressions). These typed -// collations includes the actual collation for the expression on each size, their -// coercibility values (see: Coercibility) and their respective repertoires, -// and returns the target collation (i.e. the collation into which the two expressions -// must be coerced, and a Coercion function. The Coercion function can be called repeatedly -// with the different values for the two expressions and will transcode either -// the left-hand or right-hand value to the appropriate charset so it can be -// collated against the other value. -// -// If the collations for both sides of the expressions are the same, the returned -// Coercion function will be a no-op. Likewise, if the two collations are not the same, -// but they are compatible and have the same charset, the Coercion function will also -// be a no-op. -// -// If the collations for both sides of the expression are not compatible, an error -// will be returned and the returned TypedCollation and Coercion will be nil. -func (env *Environment) MergeCollations(left, right TypedCollation, opt CoercionOptions) (TypedCollation, Coercion, Coercion, error) { - leftColl := left.Collation.Get() - rightColl := right.Collation.Get() - if leftColl == nil || rightColl == nil { - return TypedCollation{}, nil, nil, fmt.Errorf("unsupported TypeCollationID: %v / %v", left.Collation, right.Collation) - } - - leftCS := leftColl.Charset() - rightCS := rightColl.Charset() - - if left.Coercibility == CoerceExplicit && right.Coercibility == CoerceExplicit { - if left.Collation != right.Collation { - goto cannotCoerce - } - } - - if leftCS.Name() == rightCS.Name() { - switch { - case left.Coercibility < right.Coercibility: - left.Repertoire |= right.Repertoire - return left, nil, nil, nil - - case left.Coercibility > right.Coercibility: - right.Repertoire |= left.Repertoire - return right, nil, nil, nil - - case left.Collation == right.Collation: - left.Repertoire |= right.Repertoire - return left, nil, nil, nil - } - - if left.Coercibility == CoerceExplicit { - goto cannotCoerce - } - - leftCsBin := leftColl.IsBinary() - rightCsBin := rightColl.IsBinary() - - switch { - case leftCsBin && rightCsBin: - left.Coercibility = CoerceNone - return left, nil, nil, nil - - case leftCsBin: - return left, nil, nil, nil - - case rightCsBin: - return right, nil, nil, nil - } - - defaults := env.byCharset[leftCS.Name()] - return TypedCollation{ - Collation: defaults.Binary.ID(), - Coercibility: CoerceNone, - Repertoire: left.Repertoire | right.Repertoire, - }, nil, nil, nil - } - - if _, leftIsBinary := leftColl.(*Collation_binary); leftIsBinary { - if left.Coercibility <= right.Coercibility { - return left, nil, nil, nil - } - goto coerceToRight - } - if _, rightIsBinary := rightColl.(*Collation_binary); rightIsBinary { - if left.Coercibility >= right.Coercibility { - return right, nil, nil, nil - } - goto coerceToLeft - } - - if opt.ConvertToSuperset { - if checkCompatibleCollations(leftColl, left.Coercibility, left.Repertoire, rightColl, right.Coercibility, right.Repertoire) { - goto coerceToLeft - } - if checkCompatibleCollations(rightColl, right.Coercibility, right.Repertoire, leftColl, left.Coercibility, left.Repertoire) { - goto coerceToRight - } - } - - if opt.ConvertWithCoercion { - if left.Coercibility < right.Coercibility && right.Coercibility > CoerceImplicit { - goto coerceToLeft - } - if right.Coercibility < left.Coercibility && left.Coercibility > CoerceImplicit { - goto coerceToRight - } - } - -cannotCoerce: - return TypedCollation{}, nil, nil, fmt.Errorf("Illegal mix of collations (%s,%s) and (%s,%s)", - leftColl.Name(), left.Coercibility, rightColl.Name(), right.Coercibility) - -coerceToLeft: - return left, nil, - func(dst, in []byte) ([]byte, error) { - return charset.Convert(dst, leftCS, in, rightCS) - }, nil - -coerceToRight: - return right, - func(dst, in []byte) ([]byte, error) { - return charset.Convert(dst, rightCS, in, leftCS) - }, nil, nil -} - func (env *Environment) EnsureCollate(fromID, toID ID) error { // these two lookups should never fail - from := fromID.Get() - to := toID.Get() - if from.Charset().Name() != to.Charset().Name() { - return fmt.Errorf("COLLATION '%s' is not valid for CHARACTER SET '%s'", to.Name(), from.Charset().Name()) + fromCharsetName := env.LookupCharsetName(fromID) + toCharsetName := env.LookupCharsetName(toID) + if fromCharsetName != toCharsetName { + toCollName := env.LookupName(toID) + return fmt.Errorf("COLLATION '%s' is not valid for CHARACTER SET '%s'", toCollName, fromCharsetName) } return nil } diff --git a/go/mysql/collations/collation.go b/go/mysql/collations/collation.go index 172f5d4552f..aebc4dc9646 100644 --- a/go/mysql/collations/collation.go +++ b/go/mysql/collations/collation.go @@ -16,167 +16,10 @@ limitations under the License. package collations -import ( - "math" - - "vitess.io/vitess/go/mysql/collations/charset" - "vitess.io/vitess/go/vt/vthash" -) - //go:generate go run ./tools/makecolldata/ --embed=true -// CaseAwareCollation implements lowercase and uppercase conventions for collations. -type CaseAwareCollation interface { - Collation - ToUpper(dst []byte, src []byte) []byte - ToLower(dst []byte, src []byte) []byte -} - // ID is a numeric identifier for a collation. These identifiers are defined by MySQL, not by Vitess. type ID uint16 -// Get returns the Collation identified by this ID. If the ID is invalid, this returns nil -func (i ID) Get() Collation { - if int(i) < len(collationsById) { - return collationsById[i] - } - return nil -} - -// Valid returns whether this Collation ID is valid (i.e. identifies a valid collation) -func (i ID) Valid() bool { - return int(i) < len(collationsById) && collationsById[i] != nil -} - // Unknown is the default ID for an unknown collation. const Unknown ID = 0 - -// Collation implements a MySQL-compatible collation. It defines how to compare -// for sorting order and equality two strings with the same encoding. -type Collation interface { - // ID returns the numerical identifier for this collation. This is the same - // value that is returned by MySQL in a query's headers to identify the collation - // for a given column - ID() ID - - // Name is the full name of this collation, in the form of "ENCODING_LANG_SENSITIVITY" - Name() string - - // Collate compares two strings using this collation. `left` and `right` must be the - // two strings encoded in the proper encoding for this collation. If `isPrefix` is true, - // the function instead behaves equivalently to `strings.HasPrefix(left, right)`, but - // being collation-aware. - // It returns a numeric value like a normal comparison function: <0 if left < right, - // 0 if left == right, >0 if left > right - Collate(left, right []byte, isPrefix bool) int - - // WeightString returns a weight string for the given `src` string. A weight string - // is a binary representation of the weights for the given string, that can be - // compared byte-wise to return identical results to collating this string. - // - // This means: - // bytes.Compare(WeightString(left), WeightString(right)) == Collate(left, right) - // - // The semantics of this API have been carefully designed to match MySQL's behavior - // in its `strnxfrm` API. Most notably, the `numCodepoints` argument implies different - // behaviors depending on the collation's padding mode: - // - // - For collations that pad WITH SPACE (this is, all legacy collations in MySQL except - // for the newly introduced UCA v9.0.0 utf8mb4 collations in MySQL 8.0), `numCodepoints` - // can have the following values: - // - // - if `numCodepoints` is any integer greater than zero, this treats the `src` string - // as if it were in a `CHAR(numCodepoints)` column in MySQL, meaning that the resulting - // weight string will be padded with the weight for the SPACE character until it becomes - // wide enough to fill the `CHAR` column. This is necessary to perform weight comparisons - // in fixed-`CHAR` columns. If `numCodepoints` is smaller than the actual amount of - // codepoints stored in `src`, the result is unspecified. - // - // - if `numCodepoints` is zero, this is equivalent to `numCodepoints = RuneCount(src)`, - // meaning that the resulting weight string will have no padding at the end: it'll only have - // the weight values for the exact amount of codepoints contained in `src`. This is the - // behavior required to sort `VARCHAR` columns. - // - // - if `numCodepoints` is the special constant PadToMax, then the `dst` slice must be - // pre-allocated to a zero-length slice with enough capacity to hold the complete weight - // string, and any remaining capacity in `dst` will be filled by the weights for the - // padding character, repeatedly. This is a special flag used by MySQL when performing - // filesorts, where all the sorting keys must have identical sizes, even for `VARCHAR` - // columns. - // - // - For collations that have NO PAD (this is, the newly introduced UCA v9.0.0 utf8mb4 collations - // in MySQL 8.0), `numCodepoints` can only have the special constant `PadToMax`, which will make - // the weight string padding equivalent to a PAD SPACE collation (as explained in the previous - // section). All other values for `numCodepoints` are ignored, because NO PAD collations always - // return the weights for the codepoints in their strings, with no further padding at the end. - // - // The resulting weight string is written to `dst`, which can be pre-allocated to - // WeightStringLen() bytes to prevent growing the slice. `dst` can also be nil, in which - // case it will grow dynamically. If `numCodepoints` has the special PadToMax value explained - // earlier, `dst` MUST be pre-allocated to the target size or the function will return an - // empty slice. - WeightString(dst, src []byte, numCodepoints int) []byte - - // WeightStringLen returns a size (in bytes) that would fit any weight strings for a string - // with `numCodepoints` using this collation. Note that this is a higher bound for the size - // of the string, and in practice weight strings can be significantly smaller than the - // returned value. - WeightStringLen(numCodepoints int) int - - // Hash returns a 32 or 64 bit identifier (depending on the platform) that uniquely identifies - // the given string based on this collation. It is functionally equivalent to calling WeightString - // and then hashing the result. - // - // Consequently, if the hashes for two strings are different, then the two strings are considered - // different according to this collation. If the hashes for two strings are equal, the two strings - // may or may not be considered equal according to this collation, because hashes can collide unlike - // weight strings. - // - // The numCodepoints argument has the same behavior as in WeightString: if this collation uses PAD SPACE, - // the hash will interpret the source string as if it were stored in a `CHAR(n)` column. If the value of - // numCodepoints is 0, this is equivalent to setting `numCodepoints = RuneCount(src)`. - // For collations with NO PAD, the numCodepoint argument is ignored. - Hash(hasher *vthash.Hasher, src []byte, numCodepoints int) - - // Wildcard returns a matcher for the given wildcard pattern. The matcher can be used to repeatedly - // test different strings to check if they match the pattern. The pattern must be a traditional wildcard - // pattern, which may contain the provided special characters for matching one character or several characters. - // The provided `escape` character will be used as an escape sequence in front of the other special characters. - // - // This method is fully collation aware; the matching will be performed according to the underlying collation. - // I.e. if this is a case-insensitive collation, matching will be case-insensitive. - // - // The returned WildcardPattern is always valid, but if the provided special characters do not exist in this - // collation's repertoire, the returned pattern will not match any strings. Likewise, if the provided pattern - // has invalid syntax, the returned pattern will not match any strings. - // - // If the provided special characters are 0, the defaults to parse an SQL 'LIKE' statement will be used. - // This is, '_' for matching one character, '%' for matching many and '\\' for escape. - // - // This method can also be used for Shell-like matching with '?', '*' and '\\' as their respective special - // characters. - Wildcard(pat []byte, matchOne, matchMany, escape rune) WildcardPattern - - // Charset returns the Charset with which this collation is encoded - Charset() Charset - - // IsBinary returns whether this collation is a binary collation - IsBinary() bool -} - -// WildcardPattern is a matcher for a wildcard pattern, constructed from a given collation -type WildcardPattern interface { - // Match returns whether the given string matches this pattern - Match(in []byte) bool -} - -type Charset = charset.Charset - -const PadToMax = math.MaxInt32 - -func minInt(i1, i2 int) int { - if i1 < i2 { - return i1 - } - return i2 -} diff --git a/go/mysql/collations/8bit.go b/go/mysql/collations/colldata/8bit.go similarity index 92% rename from go/mysql/collations/8bit.go rename to go/mysql/collations/colldata/8bit.go index 7a22ed1d0e1..2355888bbab 100644 --- a/go/mysql/collations/8bit.go +++ b/go/mysql/collations/colldata/8bit.go @@ -14,9 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -package collations +package colldata import ( + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/charset" "vitess.io/vitess/go/vt/vthash" ) @@ -42,7 +43,7 @@ type simpletables struct { } type Collation_8bit_bin struct { - id ID + id collations.ID name string simpletables charset charset.Charset @@ -52,7 +53,7 @@ func (c *Collation_8bit_bin) Name() string { return c.name } -func (c *Collation_8bit_bin) ID() ID { +func (c *Collation_8bit_bin) ID() collations.ID { return c.id } @@ -78,7 +79,7 @@ func (c *Collation_8bit_bin) WeightString(dst, src []byte, numCodepoints int) [] case PadToMax: padToMax = true default: - copyCodepoints = minInt(copyCodepoints, numCodepoints) + copyCodepoints = min(copyCodepoints, numCodepoints) } dst = append(dst, src[:copyCodepoints]...) @@ -92,7 +93,7 @@ func (c *Collation_8bit_bin) Hash(hasher *vthash.Hasher, src []byte, numCodepoin return } - tocopy := minInt(len(src), numCodepoints) + tocopy := min(len(src), numCodepoints) hasher.Write(src[:tocopy]) numCodepoints -= tocopy @@ -129,7 +130,7 @@ func (c *Collation_8bit_bin) ToUpper(dst, src []byte) []byte { } type Collation_8bit_simple_ci struct { - id ID + id collations.ID name string simpletables charset charset.Charset @@ -139,7 +140,7 @@ func (c *Collation_8bit_simple_ci) Name() string { return c.name } -func (c *Collation_8bit_simple_ci) ID() ID { +func (c *Collation_8bit_simple_ci) ID() collations.ID { return c.id } @@ -153,7 +154,7 @@ func (c *Collation_8bit_simple_ci) IsBinary() bool { func (c *Collation_8bit_simple_ci) Collate(left, right []byte, rightIsPrefix bool) int { sortOrder := c.sort - cmpLen := minInt(len(left), len(right)) + cmpLen := min(len(left), len(right)) for i := 0; i < cmpLen; i++ { sortL, sortR := sortOrder[left[i]], sortOrder[right[i]] @@ -178,7 +179,7 @@ func (c *Collation_8bit_simple_ci) WeightString(dst, src []byte, numCodepoints i case PadToMax: padToMax = true default: - copyCodepoints = minInt(copyCodepoints, numCodepoints) + copyCodepoints = min(copyCodepoints, numCodepoints) } for _, ch := range src[:copyCodepoints] { @@ -192,7 +193,7 @@ func (c *Collation_8bit_simple_ci) Hash(hasher *vthash.Hasher, src []byte, numCo var tocopy = len(src) if numCodepoints > 0 { - tocopy = minInt(tocopy, numCodepoints) + tocopy = min(tocopy, numCodepoints) } hasher.Write64(uint64(c.id)) @@ -251,8 +252,8 @@ func (c *Collation_8bit_simple_ci) ToUpper(dst, src []byte) []byte { type Collation_binary struct{} -func (c *Collation_binary) ID() ID { - return CollationBinaryID +func (c *Collation_binary) ID() collations.ID { + return collations.CollationBinaryID } func (c *Collation_binary) Name() string { @@ -280,7 +281,7 @@ func (c *Collation_binary) WeightString(dst, src []byte, numCodepoints int) []by case PadToMax: padToMax = true default: - copyCodepoints = minInt(copyCodepoints, numCodepoints) + copyCodepoints = min(copyCodepoints, numCodepoints) } dst = append(dst, src[:copyCodepoints]...) diff --git a/go/mysql/collations/cached_size.go b/go/mysql/collations/colldata/cached_size.go similarity index 98% rename from go/mysql/collations/cached_size.go rename to go/mysql/collations/colldata/cached_size.go index 6b5e901dffd..36167c69d6d 100644 --- a/go/mysql/collations/cached_size.go +++ b/go/mysql/collations/colldata/cached_size.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by Sizegen. DO NOT EDIT. -package collations +package colldata import hack "vitess.io/vitess/go/hack" diff --git a/go/mysql/collations/colldata/collation.go b/go/mysql/collations/colldata/collation.go new file mode 100644 index 00000000000..ec66fc09b58 --- /dev/null +++ b/go/mysql/collations/colldata/collation.go @@ -0,0 +1,374 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package colldata + +import ( + "fmt" + "math" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/collations/charset" + "vitess.io/vitess/go/vt/vthash" +) + +type Charset = charset.Charset + +// Collation implements a MySQL-compatible collation. It defines how to compare +// for sorting order and equality two strings with the same encoding. +type Collation interface { + // ID returns the numerical identifier for this collation. This is the same + // value that is returned by MySQL in a query's headers to identify the collation + // for a given column + ID() collations.ID + + // Name is the full name of this collation, in the form of "ENCODING_LANG_SENSITIVITY" + Name() string + + // Collate compares two strings using this collation. `left` and `right` must be the + // two strings encoded in the proper encoding for this collation. If `isPrefix` is true, + // the function instead behaves equivalently to `strings.HasPrefix(left, right)`, but + // being collation-aware. + // It returns a numeric value like a normal comparison function: <0 if left < right, + // 0 if left == right, >0 if left > right + Collate(left, right []byte, isPrefix bool) int + + // WeightString returns a weight string for the given `src` string. A weight string + // is a binary representation of the weights for the given string, that can be + // compared byte-wise to return identical results to collating this string. + // + // This means: + // bytes.Compare(WeightString(left), WeightString(right)) == Collate(left, right) + // + // The semantics of this API have been carefully designed to match MySQL's behavior + // in its `strnxfrm` API. Most notably, the `numCodepoints` argument implies different + // behaviors depending on the collation's padding mode: + // + // - For collations that pad WITH SPACE (this is, all legacy collations in MySQL except + // for the newly introduced UCA v9.0.0 utf8mb4 collations in MySQL 8.0), `numCodepoints` + // can have the following values: + // + // - if `numCodepoints` is any integer greater than zero, this treats the `src` string + // as if it were in a `CHAR(numCodepoints)` column in MySQL, meaning that the resulting + // weight string will be padded with the weight for the SPACE character until it becomes + // wide enough to fill the `CHAR` column. This is necessary to perform weight comparisons + // in fixed-`CHAR` columns. If `numCodepoints` is smaller than the actual amount of + // codepoints stored in `src`, the result is unspecified. + // + // - if `numCodepoints` is zero, this is equivalent to `numCodepoints = RuneCount(src)`, + // meaning that the resulting weight string will have no padding at the end: it'll only have + // the weight values for the exact amount of codepoints contained in `src`. This is the + // behavior required to sort `VARCHAR` columns. + // + // - if `numCodepoints` is the special constant PadToMax, then the `dst` slice must be + // pre-allocated to a zero-length slice with enough capacity to hold the complete weight + // string, and any remaining capacity in `dst` will be filled by the weights for the + // padding character, repeatedly. This is a special flag used by MySQL when performing + // filesorts, where all the sorting keys must have identical sizes, even for `VARCHAR` + // columns. + // + // - For collations that have NO PAD (this is, the newly introduced UCA v9.0.0 utf8mb4 collations + // in MySQL 8.0), `numCodepoints` can only have the special constant `PadToMax`, which will make + // the weight string padding equivalent to a PAD SPACE collation (as explained in the previous + // section). All other values for `numCodepoints` are ignored, because NO PAD collations always + // return the weights for the codepoints in their strings, with no further padding at the end. + // + // The resulting weight string is written to `dst`, which can be pre-allocated to + // WeightStringLen() bytes to prevent growing the slice. `dst` can also be nil, in which + // case it will grow dynamically. If `numCodepoints` has the special PadToMax value explained + // earlier, `dst` MUST be pre-allocated to the target size or the function will return an + // empty slice. + WeightString(dst, src []byte, numCodepoints int) []byte + + // WeightStringLen returns a size (in bytes) that would fit any weight strings for a string + // with `numCodepoints` using this collation. Note that this is a higher bound for the size + // of the string, and in practice weight strings can be significantly smaller than the + // returned value. + WeightStringLen(numCodepoints int) int + + // Hash returns a 32 or 64 bit identifier (depending on the platform) that uniquely identifies + // the given string based on this collation. It is functionally equivalent to calling WeightString + // and then hashing the result. + // + // Consequently, if the hashes for two strings are different, then the two strings are considered + // different according to this collation. If the hashes for two strings are equal, the two strings + // may or may not be considered equal according to this collation, because hashes can collide unlike + // weight strings. + // + // The numCodepoints argument has the same behavior as in WeightString: if this collation uses PAD SPACE, + // the hash will interpret the source string as if it were stored in a `CHAR(n)` column. If the value of + // numCodepoints is 0, this is equivalent to setting `numCodepoints = RuneCount(src)`. + // For collations with NO PAD, the numCodepoint argument is ignored. + Hash(hasher *vthash.Hasher, src []byte, numCodepoints int) + + // Wildcard returns a matcher for the given wildcard pattern. The matcher can be used to repeatedly + // test different strings to check if they match the pattern. The pattern must be a traditional wildcard + // pattern, which may contain the provided special characters for matching one character or several characters. + // The provided `escape` character will be used as an escape sequence in front of the other special characters. + // + // This method is fully collation aware; the matching will be performed according to the underlying collation. + // I.e. if this is a case-insensitive collation, matching will be case-insensitive. + // + // The returned WildcardPattern is always valid, but if the provided special characters do not exist in this + // collation's repertoire, the returned pattern will not match any strings. Likewise, if the provided pattern + // has invalid syntax, the returned pattern will not match any strings. + // + // If the provided special characters are 0, the defaults to parse an SQL 'LIKE' statement will be used. + // This is, '_' for matching one character, '%' for matching many and '\\' for escape. + // + // This method can also be used for Shell-like matching with '?', '*' and '\\' as their respective special + // characters. + Wildcard(pat []byte, matchOne, matchMany, escape rune) WildcardPattern + + // Charset returns the Charset with which this collation is encoded + Charset() Charset + + // IsBinary returns whether this collation is a binary collation + IsBinary() bool +} + +// WildcardPattern is a matcher for a wildcard pattern, constructed from a given collation +type WildcardPattern interface { + // Match returns whether the given string matches this pattern + Match(in []byte) bool +} + +const PadToMax = math.MaxInt32 + +// CaseAwareCollation implements lowercase and uppercase conventions for collations. +type CaseAwareCollation interface { + Collation + ToUpper(dst []byte, src []byte) []byte + ToLower(dst []byte, src []byte) []byte +} + +func Lookup(id collations.ID) Collation { + if int(id) >= len(collationsById) { + return nil + } + return collationsById[id] +} + +// All returns a slice with all known collations in Vitess. +func All(env *collations.Environment) []Collation { + allCols := env.AllCollationIDs() + all := make([]Collation, 0, len(allCols)) + for _, col := range allCols { + all = append(all, collationsById[col]) + } + return all +} + +func checkCompatibleCollations( + left Collation, leftCoercibility collations.Coercibility, leftRepertoire collations.Repertoire, + right Collation, rightCoercibility collations.Coercibility, rightRepertoire collations.Repertoire, +) bool { + leftCS := left.Charset() + rightCS := right.Charset() + + switch leftCS.(type) { + case charset.Charset_utf8mb4: + if leftCoercibility <= rightCoercibility { + return true + } + + case charset.Charset_utf32: + switch { + case leftCoercibility < rightCoercibility: + return true + case leftCoercibility == rightCoercibility: + if !charset.IsUnicode(rightCS) { + return true + } + if !left.IsBinary() { + return true + } + } + + case charset.Charset_utf8mb3, charset.Charset_ucs2, charset.Charset_utf16, charset.Charset_utf16le: + switch { + case leftCoercibility < rightCoercibility: + return true + case leftCoercibility == rightCoercibility: + if !charset.IsUnicode(rightCS) { + return true + } + } + } + + if rightRepertoire == collations.RepertoireASCII { + switch { + case leftCoercibility < rightCoercibility: + return true + case leftCoercibility == rightCoercibility: + if leftRepertoire == collations.RepertoireUnicode { + return true + } + } + } + + return false +} + +// CoercionOptions is used to configure how aggressive the algorithm can be +// when merging two different collations by transcoding them. +type CoercionOptions struct { + // ConvertToSuperset allows merging two different collations as long + // as the charset of one of them is a strict superset of the other. In + // order to operate on the two expressions, one of them will need to + // be transcoded. This transcoding will always be safe because the string + // with the smallest repertoire will be transcoded to its superset, which + // cannot fail. + ConvertToSuperset bool + + // ConvertWithCoercion allows merging two different collations by forcing + // a coercion as long as the coercibility of the two sides is lax enough. + // This will force a transcoding of one of the expressions even if their + // respective charsets are not a strict superset, so the resulting transcoding + // CAN fail depending on the content of their strings. + ConvertWithCoercion bool +} + +// Coercion is a function that will transform either the given argument +// arguments of the function into a specific character set. The `dst` argument +// will be used as the destination of the coerced argument, but it can be nil. +type Coercion func(dst, in []byte) ([]byte, error) + +// Merge returns a Coercion function for a pair of TypedCollation based +// on their coercibility. +// +// The function takes the typed collations for the two sides of a text operation +// (namely, a comparison or concatenation of two textual expressions). These typed +// collations includes the actual collation for the expression on each size, their +// coercibility values (see: Coercibility) and their respective repertoires, +// and returns the target collation (i.e. the collation into which the two expressions +// must be coerced, and a Coercion function. The Coercion function can be called repeatedly +// with the different values for the two expressions and will transcode either +// the left-hand or right-hand value to the appropriate charset so it can be +// collated against the other value. +// +// If the collations for both sides of the expressions are the same, the returned +// Coercion function will be a no-op. Likewise, if the two collations are not the same, +// but they are compatible and have the same charset, the Coercion function will also +// be a no-op. +// +// If the collations for both sides of the expression are not compatible, an error +// will be returned and the returned TypedCollation and Coercion will be nil. +func Merge(env *collations.Environment, left, right collations.TypedCollation, opt CoercionOptions) (collations.TypedCollation, Coercion, Coercion, error) { + leftColl := Lookup(left.Collation) + rightColl := Lookup(right.Collation) + if leftColl == nil || rightColl == nil { + return collations.TypedCollation{}, nil, nil, fmt.Errorf("unsupported TypeCollationID: %v / %v", left.Collation, right.Collation) + } + + leftCS := leftColl.Charset() + rightCS := rightColl.Charset() + + if left.Coercibility == collations.CoerceExplicit && right.Coercibility == collations.CoerceExplicit { + if left.Collation != right.Collation { + goto cannotCoerce + } + } + + if leftCS.Name() == rightCS.Name() { + switch { + case left.Coercibility < right.Coercibility: + left.Repertoire |= right.Repertoire + return left, nil, nil, nil + + case left.Coercibility > right.Coercibility: + right.Repertoire |= left.Repertoire + return right, nil, nil, nil + + case left.Collation == right.Collation: + left.Repertoire |= right.Repertoire + return left, nil, nil, nil + } + + if left.Coercibility == collations.CoerceExplicit { + goto cannotCoerce + } + + leftCsBin := leftColl.IsBinary() + rightCsBin := rightColl.IsBinary() + + switch { + case leftCsBin && rightCsBin: + left.Coercibility = collations.CoerceNone + return left, nil, nil, nil + + case leftCsBin: + return left, nil, nil, nil + + case rightCsBin: + return right, nil, nil, nil + } + + defaults := env.LookupByCharset(leftCS.Name()) + return collations.TypedCollation{ + Collation: defaults.Binary, + Coercibility: collations.CoerceNone, + Repertoire: left.Repertoire | right.Repertoire, + }, nil, nil, nil + } + + if _, leftIsBinary := leftColl.(*Collation_binary); leftIsBinary { + if left.Coercibility <= right.Coercibility { + return left, nil, nil, nil + } + goto coerceToRight + } + if _, rightIsBinary := rightColl.(*Collation_binary); rightIsBinary { + if left.Coercibility >= right.Coercibility { + return right, nil, nil, nil + } + goto coerceToLeft + } + + if opt.ConvertToSuperset { + if checkCompatibleCollations(leftColl, left.Coercibility, left.Repertoire, rightColl, right.Coercibility, right.Repertoire) { + goto coerceToLeft + } + if checkCompatibleCollations(rightColl, right.Coercibility, right.Repertoire, leftColl, left.Coercibility, left.Repertoire) { + goto coerceToRight + } + } + + if opt.ConvertWithCoercion { + if left.Coercibility < right.Coercibility && right.Coercibility > collations.CoerceImplicit { + goto coerceToLeft + } + if right.Coercibility < left.Coercibility && left.Coercibility > collations.CoerceImplicit { + goto coerceToRight + } + } + +cannotCoerce: + return collations.TypedCollation{}, nil, nil, fmt.Errorf("Illegal mix of collations (%s,%s) and (%s,%s)", + leftColl.Name(), left.Coercibility, rightColl.Name(), right.Coercibility) + +coerceToLeft: + return left, nil, + func(dst, in []byte) ([]byte, error) { + return charset.Convert(dst, leftCS, in, rightCS) + }, nil + +coerceToRight: + return right, + func(dst, in []byte) ([]byte, error) { + return charset.Convert(dst, rightCS, in, leftCS) + }, nil, nil +} diff --git a/go/mysql/collations/fuzz.go b/go/mysql/collations/colldata/fuzz.go similarity index 98% rename from go/mysql/collations/fuzz.go rename to go/mysql/collations/colldata/fuzz.go index e71eae3fbdc..c5ebf50698b 100644 --- a/go/mysql/collations/fuzz.go +++ b/go/mysql/collations/colldata/fuzz.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package collations +package colldata import ( fuzz "github.com/AdaLogics/go-fuzz-headers" diff --git a/go/mysql/collations/fuzz_test.go b/go/mysql/collations/colldata/fuzz_test.go similarity index 96% rename from go/mysql/collations/fuzz_test.go rename to go/mysql/collations/colldata/fuzz_test.go index 1f36fd34ff3..0c11116f580 100644 --- a/go/mysql/collations/fuzz_test.go +++ b/go/mysql/collations/colldata/fuzz_test.go @@ -18,9 +18,11 @@ limitations under the License. // The fuzzing tests for collations use the new Fuzz implementation in Go 1.18+ -package collations +package colldata -import "testing" +import ( + "testing" +) func FuzzUCACollate(f *testing.F) { for _, left := range AllTestStrings { diff --git a/go/mysql/collations/colldata/golden_test.go b/go/mysql/collations/colldata/golden_test.go new file mode 100644 index 00000000000..2b41ebcddc6 --- /dev/null +++ b/go/mysql/collations/colldata/golden_test.go @@ -0,0 +1,82 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package colldata + +import ( + "bytes" + "fmt" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + + "vitess.io/vitess/go/mysql/collations/charset" + "vitess.io/vitess/go/mysql/collations/internal/testutil" +) + +func TestGoldenWeights(t *testing.T) { + gllGoldenTests, err := filepath.Glob("testdata/wiki_*.gob.gz") + if err != nil { + t.Fatal(err) + } + + for _, goldenPath := range gllGoldenTests { + golden := &testutil.GoldenTest{} + if err := golden.DecodeFromFile(goldenPath); err != nil { + t.Fatal(err) + } + + for _, goldenCase := range golden.Cases { + t.Run(fmt.Sprintf("%s (%s)", golden.Name, goldenCase.Lang), func(t *testing.T) { + for coll, expected := range goldenCase.Weights { + coll := testcollation(t, coll) + + input, err := charset.ConvertFromUTF8(nil, coll.Charset(), goldenCase.Text) + if err != nil { + t.Fatal(err) + } + + result := coll.WeightString(nil, input, 0) + assert.True(t, bytes.Equal(expected, result), "mismatch for collation=%s\noriginal: %s\ninput: %#v\nexpected: %v\nactual: %v", coll.Name(), string(goldenCase.Text), input, expected, result) + + } + }) + } + } +} + +func TestCollationsForLanguage(t *testing.T) { + allCollations := testall() + langCounts := make(map[testutil.Lang][]string) + + for lang := range testutil.KnownLanguages { + var matched []string + for _, coll := range allCollations { + name := coll.Name() + if lang.MatchesCollation(name) { + matched = append(matched, name) + } + } + langCounts[lang] = matched + } + + for lang := range testutil.KnownLanguages { + assert.NotEqual(t, 0, len(langCounts[lang]), "no collations found for %q", lang) + + t.Logf("%s: %v", lang, langCounts[lang]) + } +} diff --git a/go/mysql/collations/multibyte.go b/go/mysql/collations/colldata/multibyte.go similarity index 95% rename from go/mysql/collations/multibyte.go rename to go/mysql/collations/colldata/multibyte.go index f9d13df2d1f..cc123a25a1a 100644 --- a/go/mysql/collations/multibyte.go +++ b/go/mysql/collations/colldata/multibyte.go @@ -14,23 +14,24 @@ See the License for the specific language governing permissions and limitations under the License. */ -package collations +package colldata import ( "math" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/charset" "vitess.io/vitess/go/vt/vthash" ) type Collation_multibyte struct { - id ID + id collations.ID name string sort *[256]byte charset charset.Charset } -func (c *Collation_multibyte) ID() ID { +func (c *Collation_multibyte) ID() collations.ID { return c.id } @@ -51,7 +52,7 @@ func (c *Collation_multibyte) Collate(left, right []byte, isPrefix bool) int { return collationBinary(left, right, isPrefix) } - cmpLen := minInt(len(left), len(right)) + cmpLen := min(len(left), len(right)) cs := c.charset sortOrder := c.sort for i := 0; i < cmpLen; i++ { @@ -62,7 +63,7 @@ func (c *Collation_multibyte) Collate(left, right []byte, isPrefix bool) int { } _, widthL := cs.DecodeRune(left[i:]) _, widthR := cs.DecodeRune(right[i:]) - switch minInt(widthL, widthR) { + switch min(widthL, widthR) { case 4: i++ if left[i] != right[i] { diff --git a/go/mysql/collations/mysqldata.go b/go/mysql/collations/colldata/mysqldata.go similarity index 99% rename from go/mysql/collations/mysqldata.go rename to go/mysql/collations/colldata/mysqldata.go index 0b3d10372d0..f626028cb95 100644 --- a/go/mysql/collations/mysqldata.go +++ b/go/mysql/collations/colldata/mysqldata.go @@ -1,6 +1,22 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + // Code generated by makecolldata DO NOT EDIT -package collations +package colldata import ( charset "vitess.io/vitess/go/mysql/collations/charset" diff --git a/go/mysql/collations/mysqlucadata.bin b/go/mysql/collations/colldata/mysqlucadata.bin similarity index 100% rename from go/mysql/collations/mysqlucadata.bin rename to go/mysql/collations/colldata/mysqlucadata.bin diff --git a/go/mysql/collations/mysqlucadata.go b/go/mysql/collations/colldata/mysqlucadata.go similarity index 99% rename from go/mysql/collations/mysqlucadata.go rename to go/mysql/collations/colldata/mysqlucadata.go index ae8e2d48642..0affc45d11f 100644 --- a/go/mysql/collations/mysqlucadata.go +++ b/go/mysql/collations/colldata/mysqlucadata.go @@ -1,10 +1,25 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + // Code generated by makecolldata DO NOT EDIT -package collations +package colldata import ( _ "embed" - reflect "reflect" unsafe "unsafe" ) @@ -1402,5 +1417,5 @@ var weightTable_uca520 = []*[]uint16{ var weightsUCA_embed_data string func weightsUCA_embed(pos, length int) []uint16 { - return (*[0x7fff0000]uint16)(unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&weightsUCA_embed_data)).Data))[pos : pos+length] + return (*[0x7fff0000]uint16)(unsafe.Pointer(unsafe.StringData(weightsUCA_embed_data)))[pos : pos+length] } diff --git a/go/mysql/collations/uca.go b/go/mysql/collations/colldata/uca.go similarity index 96% rename from go/mysql/collations/uca.go rename to go/mysql/collations/colldata/uca.go index 444fd3c295c..4b7272bfbc3 100644 --- a/go/mysql/collations/uca.go +++ b/go/mysql/collations/colldata/uca.go @@ -14,12 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -package collations +package colldata import ( "bytes" "math/bits" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/charset" "vitess.io/vitess/go/mysql/collations/internal/uca" "vitess.io/vitess/go/vt/vthash" @@ -27,7 +28,7 @@ import ( type Collation_utf8mb4_uca_0900 struct { name string - id ID + id collations.ID uca *uca.Collation900 } @@ -35,7 +36,7 @@ func (c *Collation_utf8mb4_uca_0900) Name() string { return c.name } -func (c *Collation_utf8mb4_uca_0900) ID() ID { +func (c *Collation_utf8mb4_uca_0900) ID() collations.ID { return c.id } @@ -213,7 +214,7 @@ func (c *Collation_utf8mb4_uca_0900) ToUpper(dst, src []byte) []byte { type Collation_utf8mb4_0900_bin struct{} -func (c *Collation_utf8mb4_0900_bin) ID() ID { +func (c *Collation_utf8mb4_0900_bin) ID() collations.ID { return 309 } @@ -271,11 +272,11 @@ func (c *Collation_utf8mb4_0900_bin) ToUpper(dst, src []byte) []byte { type Collation_uca_legacy struct { name string - id ID + id collations.ID uca *uca.CollationLegacy } -func (c *Collation_uca_legacy) ID() ID { +func (c *Collation_uca_legacy) ID() collations.ID { return c.id } diff --git a/go/mysql/collations/uca_contraction_test.go b/go/mysql/collations/colldata/uca_contraction_test.go similarity index 99% rename from go/mysql/collations/uca_contraction_test.go rename to go/mysql/collations/colldata/uca_contraction_test.go index 7d59b6fa4a8..d17ff21e255 100644 --- a/go/mysql/collations/uca_contraction_test.go +++ b/go/mysql/collations/colldata/uca_contraction_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package collations +package colldata import ( "encoding/json" diff --git a/go/mysql/collations/uca_tables_test.go b/go/mysql/collations/colldata/uca_tables_test.go similarity index 95% rename from go/mysql/collations/uca_tables_test.go rename to go/mysql/collations/colldata/uca_tables_test.go index 011095e1cf6..40c2f3bbed3 100644 --- a/go/mysql/collations/uca_tables_test.go +++ b/go/mysql/collations/colldata/uca_tables_test.go @@ -14,13 +14,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -package collations +package colldata import ( "encoding/json" "fmt" "os" - "reflect" "strconv" "testing" "unsafe" @@ -95,12 +94,12 @@ func TestWeightsForAllCodepoints(t *testing.T) { } func TestWeightTablesAreDeduplicated(t *testing.T) { - sliceptr := func(table uca.Weights) uintptr { - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&table)) - return hdr.Data + sliceptr := func(table uca.Weights) unsafe.Pointer { + data := unsafe.SliceData(table) + return unsafe.Pointer(data) } - uniqueTables := make(map[uintptr]int) + uniqueTables := make(map[unsafe.Pointer]int) for _, col := range testall() { var weights uca.Weights switch col := col.(type) { diff --git a/go/mysql/collations/uca_test.go b/go/mysql/collations/colldata/uca_test.go similarity index 99% rename from go/mysql/collations/uca_test.go rename to go/mysql/collations/colldata/uca_test.go index 5e3f22929c8..70c9312636e 100644 --- a/go/mysql/collations/uca_test.go +++ b/go/mysql/collations/colldata/uca_test.go @@ -14,12 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -package collations +package colldata import ( "bytes" "fmt" "math/rand" + "slices" "sort" "strings" "sync" @@ -28,7 +29,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "golang.org/x/exp/slices" "vitess.io/vitess/go/mysql/collations/charset" "vitess.io/vitess/go/vt/vthash" @@ -949,8 +949,8 @@ func TestUCACollationOrder(t *testing.T) { j := rand.Intn(i + 1) ary[i], ary[j] = ary[j], ary[i] } - slices.SortFunc(ary, func(a, b string) bool { - return col.Collate([]byte(a), []byte(b), false) < 0 + slices.SortFunc(ary, func(a, b string) int { + return col.Collate([]byte(a), []byte(b), false) }) require.Equal(t, sorted, ary) } diff --git a/go/mysql/collations/unicase.go b/go/mysql/collations/colldata/unicase.go similarity index 99% rename from go/mysql/collations/unicase.go rename to go/mysql/collations/colldata/unicase.go index c669c2368ad..964d48d7107 100644 --- a/go/mysql/collations/unicase.go +++ b/go/mysql/collations/colldata/unicase.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package collations +package colldata import ( "vitess.io/vitess/go/mysql/collations/charset" diff --git a/go/mysql/collations/unicode.go b/go/mysql/collations/colldata/unicode.go similarity index 96% rename from go/mysql/collations/unicode.go rename to go/mysql/collations/colldata/unicode.go index 8168595cd34..c0495b0474f 100644 --- a/go/mysql/collations/unicode.go +++ b/go/mysql/collations/colldata/unicode.go @@ -14,25 +14,26 @@ See the License for the specific language governing permissions and limitations under the License. */ -package collations +package colldata import ( "bytes" "math" "math/bits" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/charset" "vitess.io/vitess/go/vt/vthash" ) type Collation_unicode_general_ci struct { - id ID + id collations.ID name string unicase *UnicaseInfo charset charset.Charset } -func (c *Collation_unicode_general_ci) ID() ID { +func (c *Collation_unicode_general_ci) ID() collations.ID { return c.id } @@ -164,12 +165,12 @@ func (c *Collation_unicode_general_ci) Wildcard(pat []byte, matchOne rune, match } type Collation_unicode_bin struct { - id ID + id collations.ID name string charset charset.Charset } -func (c *Collation_unicode_bin) ID() ID { +func (c *Collation_unicode_bin) ID() collations.ID { return c.id } @@ -352,7 +353,7 @@ func (c *Collation_unicode_bin) Wildcard(pat []byte, matchOne rune, matchMany ru } func collationBinary(left, right []byte, rightPrefix bool) int { - minLen := minInt(len(left), len(right)) + minLen := min(len(left), len(right)) if diff := bytes.Compare(left[:minLen], right[:minLen]); diff != 0 { return diff } diff --git a/go/mysql/collations/wildcard.go b/go/mysql/collations/colldata/wildcard.go similarity index 99% rename from go/mysql/collations/wildcard.go rename to go/mysql/collations/colldata/wildcard.go index 5d8fd012375..01f4807b7df 100644 --- a/go/mysql/collations/wildcard.go +++ b/go/mysql/collations/colldata/wildcard.go @@ -38,7 +38,7 @@ limitations under the License. // // Because of this, we intend to enable the recursive algorithm by default. -package collations +package colldata import ( "unicode/utf8" diff --git a/go/mysql/collations/wildcard_test.go b/go/mysql/collations/colldata/wildcard_test.go similarity index 99% rename from go/mysql/collations/wildcard_test.go rename to go/mysql/collations/colldata/wildcard_test.go index dc6a44c644c..fff08f35c22 100644 --- a/go/mysql/collations/wildcard_test.go +++ b/go/mysql/collations/colldata/wildcard_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package collations +package colldata import ( "testing" diff --git a/go/mysql/collations/env.go b/go/mysql/collations/env.go index d5b113fb204..91fc2a8bd8c 100644 --- a/go/mysql/collations/env.go +++ b/go/mysql/collations/env.go @@ -18,26 +18,29 @@ package collations import ( "fmt" + "slices" "strings" "sync" ) type colldefaults struct { - Default Collation - Binary Collation + Default ID + Binary ID } // Environment is a collation environment for a MySQL version, which contains // a database of collations and defaults for that specific version. type Environment struct { - version collver - byName map[string]Collation - byCharset map[string]*colldefaults - unsupported map[string]ID + version collver + byName map[string]ID + byCharset map[string]*colldefaults + byCharsetName map[ID]string + unsupported map[string]ID + byID map[ID]string } // LookupByName returns the collation with the given name. -func (env *Environment) LookupByName(name string) Collation { +func (env *Environment) LookupByName(name string) ID { return env.byName[name] } @@ -45,37 +48,34 @@ func (env *Environment) LookupByName(name string) Collation { // the collation is supported by this package. func (env *Environment) LookupID(name string) (ID, bool) { if supported, ok := env.byName[name]; ok { - return supported.ID(), true + return supported, true } - if unsupported, ok := env.unsupported[name]; ok { - return unsupported, false + if unsup, ok := env.unsupported[name]; ok { + return unsup, false } return Unknown, false } +// LookupName returns the collation name for the given ID and whether +// the collation is supported by this package. +func (env *Environment) LookupName(id ID) string { + return env.byID[id] +} + // DefaultCollationForCharset returns the default collation for a charset -func (env *Environment) DefaultCollationForCharset(charset string) Collation { +func (env *Environment) DefaultCollationForCharset(charset string) ID { if defaults, ok := env.byCharset[charset]; ok { return defaults.Default } - return nil + return Unknown } // BinaryCollationForCharset returns the default binary collation for a charset -func (env *Environment) BinaryCollationForCharset(charset string) Collation { +func (env *Environment) BinaryCollationForCharset(charset string) ID { if defaults, ok := env.byCharset[charset]; ok { return defaults.Binary } - return nil -} - -// AllCollations returns a slice with all known collations in Vitess. -func (env *Environment) AllCollations() (all []Collation) { - all = make([]Collation, 0, len(env.byName)) - for _, col := range env.byName { - all = append(all, col) - } - return + return Unknown } var globalEnvironments = make(map[collver]*Environment) @@ -109,7 +109,7 @@ func NewEnvironment(serverVersion string) *Environment { case strings.HasSuffix(serverVersion, "-ripple"): // the ripple binlog server can mask the actual version of mysqld; // assume we have the highest - version = collverMySQL80 + version = collverMySQL8 case strings.Contains(serverVersion, "mariadb"): switch { case strings.Contains(serverVersion, "10.0."): @@ -125,66 +125,62 @@ func NewEnvironment(serverVersion string) *Environment { version = collverMySQL56 case strings.HasPrefix(serverVersion, "5.7."): version = collverMySQL57 - case strings.HasPrefix(serverVersion, "8.0."): - version = collverMySQL80 + case strings.HasPrefix(serverVersion, "8."): + version = collverMySQL8 } return fetchCacheEnvironment(version) } func makeEnv(version collver) *Environment { env := &Environment{ - version: version, - byName: make(map[string]Collation), - byCharset: make(map[string]*colldefaults), - unsupported: make(map[string]ID), + version: version, + byName: make(map[string]ID), + byCharset: make(map[string]*colldefaults), + byCharsetName: make(map[ID]string), + byID: make(map[ID]string), + unsupported: make(map[string]ID), } for collid, vi := range globalVersionInfo { var ournames []string + var ourcharsets []string for _, alias := range vi.alias { if alias.mask&version != 0 { ournames = append(ournames, alias.name) + ourcharsets = append(ourcharsets, alias.charset) } } if len(ournames) == 0 { continue } - var collation Collation - if int(collid) < len(collationsById) { - collation = collationsById[collid] - } - if collation == nil { + if int(collid) >= len(supported) || supported[collid] == "" { for _, name := range ournames { env.unsupported[name] = collid } continue } - for _, name := range ournames { - env.byName[name] = collation - } - - csname := collation.Charset().Name() - if _, ok := env.byCharset[csname]; !ok { - env.byCharset[csname] = &colldefaults{} - } - defaults := env.byCharset[csname] - if vi.isdefault&version != 0 { - defaults.Default = collation - } - if collation.IsBinary() { - if defaults.Binary != nil && defaults.Binary.ID() > collation.ID() { - // If there's more than one binary collation, the one with the - // highest ID (i.e. the newest one) takes precedence. This applies - // to utf8mb4_bin vs utf8mb4_0900_bin - continue + for i, name := range ournames { + cs := ourcharsets[i] + env.byName[name] = collid + env.byID[collid] = name + env.byCharsetName[collid] = cs + defaults := env.byCharset[cs] + if defaults == nil { + defaults = &colldefaults{} + env.byCharset[cs] = defaults + } + if vi.isdefault&version != 0 { + defaults.Default = collid + } + if strings.HasSuffix(name, "_bin") && defaults.Binary < collid { + defaults.Binary = collid } - defaults.Binary = collation } } - for from, to := range version.charsetAliases() { + for from, to := range charsetAliases() { env.byCharset[from] = env.byCharset[to] } @@ -194,15 +190,13 @@ func makeEnv(version collver) *Environment { // A few interesting character set values. // See http://dev.mysql.com/doc/internals/en/character-set.html#packet-Protocol::CharacterSet const ( - CollationUtf8mb3ID = 33 - CollationUtf8mb4ID = 255 - CollationBinaryID = 63 - CollationUtf8mb4BinID = 46 + CollationUtf8mb3ID = 33 + CollationUtf8mb4ID = 255 + CollationBinaryID = 63 + CollationUtf8mb4BinID = 46 + CollationLatin1Swedish = 8 ) -// Binary is the default Binary collation -var Binary = ID(CollationBinaryID).Get() - // SystemCollation is the default collation for the system tables // such as the information schema. This is still utf8mb3 to match // MySQLs behavior. This means that you can't use utf8mb4 in table @@ -218,7 +212,7 @@ var SystemCollation = TypedCollation{ // this mapping will change, so it's important to use this helper so that // Vitess code has a consistent mapping for the active collations environment. func (env *Environment) CharsetAlias(charset string) (alias string, ok bool) { - alias, ok = env.version.charsetAliases()[charset] + alias, ok = charsetAliases()[charset] return } @@ -228,10 +222,10 @@ func (env *Environment) CharsetAlias(charset string) (alias string, ok bool) { // Vitess code has a consistent mapping for the active collations environment. func (env *Environment) CollationAlias(collation string) (string, bool) { col := env.LookupByName(collation) - if col == nil { + if col == Unknown { return collation, false } - allCols, ok := globalVersionInfo[col.ID()] + allCols, ok := globalVersionInfo[col] if !ok { return collation, false } @@ -239,7 +233,7 @@ func (env *Environment) CollationAlias(collation string) (string, bool) { return collation, false } for _, alias := range allCols.alias { - for source, dest := range env.version.charsetAliases() { + for source, dest := range charsetAliases() { if strings.HasPrefix(collation, fmt.Sprintf("%s_", source)) && strings.HasPrefix(alias.name, fmt.Sprintf("%s_", dest)) { return alias.name, true @@ -256,7 +250,7 @@ func (env *Environment) CollationAlias(collation string) (string, bool) { // For older MySQL environments, the default charset is `utf8mb4_general_ci`. func (env *Environment) DefaultConnectionCharset() uint8 { switch env.version { - case collverMySQL80: + case collverMySQL8: return uint8(CollationUtf8mb4ID) default: return 45 @@ -281,12 +275,29 @@ func (env *Environment) ParseConnectionCharset(csname string) (uint8, error) { var collid ID = 0 csname = strings.ToLower(csname) if defaults, ok := env.byCharset[csname]; ok { - collid = defaults.Default.ID() + collid = defaults.Default } else if coll, ok := env.byName[csname]; ok { - collid = coll.ID() + collid = coll } if collid == 0 || collid > 255 { return 0, fmt.Errorf("unsupported connection charset: %q", csname) } return uint8(collid), nil } + +func (env *Environment) AllCollationIDs() []ID { + all := make([]ID, 0, len(env.byID)) + for v := range env.byID { + all = append(all, v) + } + slices.Sort(all) + return all +} + +func (env *Environment) LookupByCharset(name string) *colldefaults { + return env.byCharset[name] +} + +func (env *Environment) LookupCharsetName(coll ID) string { + return env.byCharsetName[coll] +} diff --git a/go/mysql/collations/golden_test.go b/go/mysql/collations/golden_test.go index 32b9e90394f..099f77268b7 100644 --- a/go/mysql/collations/golden_test.go +++ b/go/mysql/collations/golden_test.go @@ -1,5 +1,5 @@ /* -Copyright 2021 The Vitess Authors. +Copyright 2023 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,71 +17,58 @@ limitations under the License. package collations import ( - "bytes" "fmt" "os" - "path/filepath" "sort" "strings" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/mysql/collations/charset" - "vitess.io/vitess/go/mysql/collations/internal/testutil" ) -func TestGoldenWeights(t *testing.T) { - gllGoldenTests, err := filepath.Glob("testdata/wiki_*.gob.gz") - if err != nil { - t.Fatal(err) +func TestAllCollationsByCharset(t *testing.T) { + var defaults1 = map[string][2]string{ + "utf8mb4": {"utf8mb4_general_ci", "utf8mb4_bin"}, } - - for _, goldenPath := range gllGoldenTests { - golden := &testutil.GoldenTest{} - if err := golden.DecodeFromFile(goldenPath); err != nil { - t.Fatal(err) - } - - for _, goldenCase := range golden.Cases { - t.Run(fmt.Sprintf("%s (%s)", golden.Name, goldenCase.Lang), func(t *testing.T) { - for coll, expected := range goldenCase.Weights { - coll := testcollation(t, coll) - - input, err := charset.ConvertFromUTF8(nil, coll.Charset(), goldenCase.Text) - if err != nil { - t.Fatal(err) - } - - result := coll.WeightString(nil, input, 0) - assert.True(t, bytes.Equal(expected, result), "mismatch for collation=%s\noriginal: %s\ninput: %#v\nexpected: %v\nactual: %v", coll.Name(), string(goldenCase.Text), input, expected, result) - - } - }) - } + var defaults2 = map[string][2]string{ + "utf8mb4": {"utf8mb4_0900_ai_ci", "utf8mb4_0900_bin"}, } -} -func TestCollationsForLanguage(t *testing.T) { - allCollations := testall() - langCounts := make(map[testutil.Lang][]string) + for _, tc := range []struct { + version collver + defaults map[string][2]string + }{ + {collverMariaDB100, defaults1}, + {collverMariaDB101, defaults1}, + {collverMariaDB102, defaults1}, + {collverMariaDB103, defaults1}, + {collverMySQL56, defaults1}, + {collverMySQL57, defaults1}, + {collverMySQL8, defaults2}, + } { + t.Run(tc.version.String(), func(t *testing.T) { + env := makeEnv(tc.version) + for csname, cset := range env.byCharset { + switch csname { + case "gb18030": + // this doesn't work yet + continue + } + require.NotNil(t, cset.Default, "charset %s has no default", csname) + require.NotNil(t, cset.Binary, "charset %s has no binary", csname) - for lang := range testutil.KnownLanguages { - var matched []string - for _, coll := range allCollations { - name := coll.Name() - if lang.MatchesCollation(name) { - matched = append(matched, name) } - } - langCounts[lang] = matched - } - for lang := range testutil.KnownLanguages { - assert.NotEqual(t, 0, len(langCounts[lang]), "no collations found for %q", lang) - - t.Logf("%s: %v", lang, langCounts[lang]) + for charset, expected := range tc.defaults { + expectedDefault, expectedBinary := expected[0], expected[1] + if def := env.DefaultCollationForCharset(charset); env.LookupName(def) != expectedDefault { + t.Fatalf("bad default for utf8mb4: %s (expected %s)", env.LookupName(def), expectedDefault) + } + if def := env.BinaryCollationForCharset(charset); env.LookupName(def) != expectedBinary { + t.Fatalf("bad binary for utf8mb4: %s (expected %s)", env.LookupName(def), expectedBinary) + } + } + }) } } @@ -89,7 +76,7 @@ func TestCollationsForLanguage(t *testing.T) { // table with Collation support information for the current build of Vitess. func XTestSupportTables(t *testing.T) { var versions = []collver{ - collverMySQL80, + collverMySQL8, collverMySQL57, collverMySQL56, collverMariaDB103, @@ -120,8 +107,8 @@ func XTestSupportTables(t *testing.T) { fmt.Fprintf(out, " |\n|%s\n", strings.Repeat("---|", len(envs)+2)) for _, id := range all { - coll := collationsById[id] - if coll == nil { + name := envs[0].LookupName(id) + if name == "" { vdata := globalVersionInfo[id] var collnames []string @@ -148,9 +135,9 @@ func XTestSupportTables(t *testing.T) { } } } else { - fmt.Fprintf(out, "| %s | %s", coll.Name(), coll.Charset().Name()) + fmt.Fprintf(out, "| %s | %s", name, envs[0].LookupCharsetName(id)) for _, env := range envs { - _, supported := env.byName[coll.Name()] + _, supported := env.LookupID(name) if supported { fmt.Fprintf(out, " | ✅") } else { @@ -162,49 +149,3 @@ func XTestSupportTables(t *testing.T) { fmt.Fprintf(out, " |\n") } } - -func TestAllCollationsByCharset(t *testing.T) { - var defaults1 = map[string][2]string{ - "utf8mb4": {"utf8mb4_general_ci", "utf8mb4_bin"}, - } - var defaults2 = map[string][2]string{ - "utf8mb4": {"utf8mb4_0900_ai_ci", "utf8mb4_0900_bin"}, - } - - for _, tc := range []struct { - version collver - defaults map[string][2]string - }{ - {collverMariaDB100, defaults1}, - {collverMariaDB101, defaults1}, - {collverMariaDB102, defaults1}, - {collverMariaDB103, defaults1}, - {collverMySQL56, defaults1}, - {collverMySQL57, defaults1}, - {collverMySQL80, defaults2}, - } { - t.Run(tc.version.String(), func(t *testing.T) { - env := makeEnv(tc.version) - for csname, cset := range env.byCharset { - switch csname { - case "gb18030": - // this doesn't work yet - continue - } - require.NotNil(t, cset.Default, "charset %s has no default", csname) - require.NotNil(t, cset.Binary, "charset %s has no binary", csname) - - } - - for charset, expected := range tc.defaults { - expectedDefault, expectedBinary := expected[0], expected[1] - if def := env.DefaultCollationForCharset(charset); def.Name() != expectedDefault { - t.Fatalf("bad default for utf8mb4: %s (expected %s)", def.Name(), expectedDefault) - } - if def := env.BinaryCollationForCharset(charset); def.Name() != expectedBinary { - t.Fatalf("bad binary for utf8mb4: %s (expected %s)", def.Name(), expectedBinary) - } - } - }) - } -} diff --git a/go/mysql/collations/integration/charset_test.go b/go/mysql/collations/integration/charset_test.go index 2705dc29f5d..8a4d12a0e4d 100644 --- a/go/mysql/collations/integration/charset_test.go +++ b/go/mysql/collations/integration/charset_test.go @@ -23,6 +23,8 @@ import ( "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations/colldata" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/charset" "vitess.io/vitess/go/mysql/collations/remote" @@ -45,7 +47,7 @@ func TestLocalEncodings(t *testing.T) { for _, tc := range cases { local := collations.Local().LookupByName(tc.collation) remote := remote.NewCollation(conn, tc.collation) - verifyTranscoding(t, local, remote, tc.input) + verifyTranscoding(t, colldata.Lookup(local), remote, tc.input) } } diff --git a/go/mysql/collations/integration/coercion_test.go b/go/mysql/collations/integration/coercion_test.go index 7ad31f78852..dad55bcafad 100644 --- a/go/mysql/collations/integration/coercion_test.go +++ b/go/mysql/collations/integration/coercion_test.go @@ -26,6 +26,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations/colldata" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/remote" "vitess.io/vitess/go/sqltypes" @@ -33,18 +35,18 @@ import ( type TextWithCollation struct { Text []byte - Collation collations.Collation + Collation collations.ID } type RemoteCoercionResult struct { Expr sqltypes.Value - Collation collations.Collation + Collation collations.ID Coercibility collations.Coercibility } type RemoteCoercionTest interface { Expression() string - Test(t *testing.T, remote *RemoteCoercionResult, local collations.TypedCollation, coerce1, coerce2 collations.Coercion) + Test(t *testing.T, remote *RemoteCoercionResult, local collations.TypedCollation, coerce1, coerce2 colldata.Coercion) } type testConcat struct { @@ -52,15 +54,17 @@ type testConcat struct { } func (tc *testConcat) Expression() string { + env := collations.Local() return fmt.Sprintf("CONCAT((_%s X'%x' COLLATE %q), (_%s X'%x' COLLATE %q))", - tc.left.Collation.Charset().Name(), tc.left.Text, tc.left.Collation.Name(), - tc.right.Collation.Charset().Name(), tc.right.Text, tc.right.Collation.Name(), + colldata.Lookup(tc.left.Collation).Charset().Name(), tc.left.Text, env.LookupName(tc.left.Collation), + colldata.Lookup(tc.right.Collation).Charset().Name(), tc.right.Text, env.LookupName(tc.right.Collation), ) } -func (tc *testConcat) Test(t *testing.T, remote *RemoteCoercionResult, local collations.TypedCollation, coercion1, coercion2 collations.Coercion) { - localCollation := local.Collation.Get() - assert.Equal(t, remote.Collation.Name(), localCollation.Name(), "bad collation resolved: local is %s, remote is %s", localCollation.Name(), remote.Collation.Name()) +func (tc *testConcat) Test(t *testing.T, remote *RemoteCoercionResult, local collations.TypedCollation, coercion1, coercion2 colldata.Coercion) { + localCollation := colldata.Lookup(local.Collation) + remoteName := collations.Local().LookupName(remote.Collation) + assert.Equal(t, remoteName, localCollation.Name(), "bad collation resolved: local is %s, remote is %s", localCollation.Name(), remoteName) assert.Equal(t, remote.Coercibility, local.Coercibility, "bad coercibility resolved: local is %d, remote is %d", local.Coercibility, remote.Coercibility) leftText, err := coercion1(nil, tc.left.Text) @@ -81,8 +85,8 @@ func (tc *testConcat) Test(t *testing.T, remote *RemoteCoercionResult, local col rEBytes, err := remote.Expr.ToBytes() require.NoError(t, err) - assert.True(t, bytes.Equal(concat.Bytes(), rEBytes), "failed to concatenate text;\n\tCONCAT(%v COLLATE %s, %v COLLATE %s) = \n\tCONCAT(%v, %v) COLLATE %s = \n\t\t%v\n\n\texpected: %v", tc.left.Text, tc.left.Collation.Name(), - tc.right.Text, tc.right.Collation.Name(), leftText, rightText, localCollation.Name(), + assert.True(t, bytes.Equal(concat.Bytes(), rEBytes), "failed to concatenate text;\n\tCONCAT(%v COLLATE %s, %v COLLATE %s) = \n\tCONCAT(%v, %v) COLLATE %s = \n\t\t%v\n\n\texpected: %v", tc.left.Text, collations.Local().LookupName(tc.left.Collation), + tc.right.Text, collations.Local().LookupName(tc.right.Collation), leftText, rightText, localCollation.Name(), concat.Bytes(), rEBytes) } @@ -92,14 +96,15 @@ type testComparison struct { } func (tc *testComparison) Expression() string { + env := collations.Local() return fmt.Sprintf("(_%s X'%x' COLLATE %q) = (_%s X'%x' COLLATE %q)", - tc.left.Collation.Charset().Name(), tc.left.Text, tc.left.Collation.Name(), - tc.right.Collation.Charset().Name(), tc.right.Text, tc.right.Collation.Name(), + env.LookupCharsetName(tc.left.Collation), tc.left.Text, env.LookupName(tc.left.Collation), + env.LookupCharsetName(tc.right.Collation), tc.right.Text, env.LookupName(tc.right.Collation), ) } -func (tc *testComparison) Test(t *testing.T, remote *RemoteCoercionResult, local collations.TypedCollation, coerce1, coerce2 collations.Coercion) { - localCollation := local.Collation.Get() +func (tc *testComparison) Test(t *testing.T, remote *RemoteCoercionResult, local collations.TypedCollation, coerce1, coerce2 colldata.Coercion) { + localCollation := colldata.Lookup(local.Collation) leftText, err := coerce1(nil, tc.left.Text) if err != nil { t.Errorf("failed to transcode left: %v", err) @@ -130,12 +135,12 @@ func TestComparisonSemantics(t *testing.T) { t.Skipf("The behavior of Coercion Semantics is not correct before 8.0.31") } - for _, coll := range collations.Local().AllCollations() { + for _, coll := range colldata.All(collations.Local()) { text := verifyTranscoding(t, coll, remote.NewCollation(conn, coll.Name()), []byte(BaseString)) - testInputs = append(testInputs, &TextWithCollation{Text: text, Collation: coll}) + testInputs = append(testInputs, &TextWithCollation{Text: text, Collation: coll.ID()}) } sort.Slice(testInputs, func(i, j int) bool { - return testInputs[i].Collation.ID() < testInputs[j].Collation.ID() + return testInputs[i].Collation < testInputs[j].Collation }) var testCases = []struct { @@ -161,17 +166,17 @@ func TestComparisonSemantics(t *testing.T) { for _, collA := range testInputs { for _, collB := range testInputs { left := collations.TypedCollation{ - Collation: collA.Collation.ID(), + Collation: collA.Collation, Coercibility: 0, Repertoire: collations.RepertoireASCII, } right := collations.TypedCollation{ - Collation: collB.Collation.ID(), + Collation: collB.Collation, Coercibility: 0, Repertoire: collations.RepertoireASCII, } - resultLocal, coercionLocal1, coercionLocal2, errLocal := collations.Local().MergeCollations(left, right, - collations.CoercionOptions{ + resultLocal, coercionLocal1, coercionLocal2, errLocal := colldata.Merge(collations.Local(), left, right, + colldata.CoercionOptions{ ConvertToSuperset: true, ConvertWithCoercion: true, }) @@ -189,11 +194,12 @@ func TestComparisonSemantics(t *testing.T) { query := fmt.Sprintf("SELECT CAST((%s) AS BINARY), COLLATION(%s), COERCIBILITY(%s)", expr, expr, expr) resultRemote, errRemote := conn.ExecuteFetch(query, 1, false) + env := collations.Local() if errRemote != nil { require.True(t, strings.Contains(errRemote.Error(), "Illegal mix of collations"), "query %s failed: %v", query, errRemote) if errLocal == nil { - t.Errorf("expected %s vs %s to fail coercion: %v", collA.Collation.Name(), collB.Collation.Name(), errRemote) + t.Errorf("expected %s vs %s to fail coercion: %v", env.LookupName(collA.Collation), env.LookupName(collB.Collation), errRemote) continue } require.True(t, strings.HasPrefix(normalizeCollationInError(errRemote.Error()), normalizeCollationInError(errLocal.Error())), "bad error message: expected %q, got %q", errRemote, errLocal) @@ -202,7 +208,7 @@ func TestComparisonSemantics(t *testing.T) { } if errLocal != nil { - t.Errorf("expected %s vs %s to coerce, but they failed: %v", collA.Collation.Name(), collB.Collation.Name(), errLocal) + t.Errorf("expected %s vs %s to coerce, but they failed: %v", env.LookupName(collA.Collation), env.LookupName(collB.Collation), errLocal) continue } diff --git a/go/mysql/collations/integration/collations_test.go b/go/mysql/collations/integration/collations_test.go index 32ffb81a498..3b33e23e2d3 100644 --- a/go/mysql/collations/integration/collations_test.go +++ b/go/mysql/collations/integration/collations_test.go @@ -31,6 +31,8 @@ import ( "github.com/stretchr/testify/require" "golang.org/x/text/encoding/unicode/utf32" + "vitess.io/vitess/go/mysql/collations/colldata" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/remote" @@ -140,7 +142,7 @@ func (u *uca900CollationTest) Test(t *testing.T, result *sqltypes.Result) { continue } - weightString := coll.WeightString(make([]byte, 0, 128), utf8Input, 0) + weightString := colldata.Lookup(coll).WeightString(make([]byte, 0, 128), utf8Input, 0) if !bytes.Equal(weightString, expectedWeightString) { t.Errorf("[%s] mismatch for %s (%v): \n\twant: %v\n\tgot: %v", u.collation, row[2].ToString(), utf8Input, expectedWeightString, weightString) errors++ @@ -227,7 +229,7 @@ func TestCollationWithSpace(t *testing.T) { remote := remote.NewCollation(conn, collName) for _, size := range []int{0, codepoints, codepoints + 1, codepoints + 2, 20, 32} { - localWeight := local.WeightString(nil, []byte(ExampleString), size) + localWeight := colldata.Lookup(local).WeightString(nil, []byte(ExampleString), size) remoteWeight := remote.WeightString(nil, []byte(ExampleString), size) require.True(t, bytes.Equal(localWeight, remoteWeight), "mismatch at len=%d\ninput: %#v\nexpected: %#v\nactual: %#v", size, []byte(ExampleString), remoteWeight, localWeight) diff --git a/go/mysql/collations/integration/helpers_test.go b/go/mysql/collations/integration/helpers_test.go index 95410fbb74a..d436280f04b 100644 --- a/go/mysql/collations/integration/helpers_test.go +++ b/go/mysql/collations/integration/helpers_test.go @@ -27,6 +27,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations/colldata" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/charset" @@ -52,7 +54,7 @@ func testRemoteWeights(t *testing.T, golden io.Writer, cases []testweight) { t.Run(tc.collation, func(t *testing.T) { local := collations.Local().LookupByName(tc.collation) remote := remote.NewCollation(conn, tc.collation) - localResult := local.WeightString(nil, tc.input, 0) + localResult := colldata.Lookup(local).WeightString(nil, tc.input, 0) remoteResult := remote.WeightString(nil, tc.input, 0) if err := remote.LastError(); err != nil { @@ -85,7 +87,7 @@ func testRemoteComparison(t *testing.T, golden io.Writer, cases []testcmp) { t.Run(tc.collation, func(t *testing.T) { local := collations.Local().LookupByName(tc.collation) remote := remote.NewCollation(conn, tc.collation) - localResult := normalizecmp(local.Collate(tc.left, tc.right, false)) + localResult := normalizecmp(colldata.Lookup(local).Collate(tc.left, tc.right, false)) remoteResult := remote.Collate(tc.left, tc.right, false) if err := remote.LastError(); err != nil { @@ -101,7 +103,7 @@ func testRemoteComparison(t *testing.T, golden io.Writer, cases []testcmp) { } } -func verifyTranscoding(t *testing.T, local collations.Collation, remote *remote.Collation, text []byte) []byte { +func verifyTranscoding(t *testing.T, local colldata.Collation, remote *remote.Collation, text []byte) []byte { transRemote, err := charset.ConvertFromUTF8(nil, remote.Charset(), text) require.NoError(t, err, "remote transcoding failed: %v", err) @@ -112,7 +114,7 @@ func verifyTranscoding(t *testing.T, local collations.Collation, remote *remote. return transLocal } -func verifyWeightString(t *testing.T, local collations.Collation, remote *remote.Collation, text []byte) { +func verifyWeightString(t *testing.T, local colldata.Collation, remote *remote.Collation, text []byte) { localResult := local.WeightString(nil, text, 0) remoteResult := remote.WeightString(nil, text, 0) diff --git a/go/mysql/collations/integration/weight_string_test.go b/go/mysql/collations/integration/weight_string_test.go index c93a9ed586e..170da4f5987 100644 --- a/go/mysql/collations/integration/weight_string_test.go +++ b/go/mysql/collations/integration/weight_string_test.go @@ -25,6 +25,7 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/charset" + "vitess.io/vitess/go/mysql/collations/colldata" "vitess.io/vitess/go/mysql/collations/internal/testutil" "vitess.io/vitess/go/mysql/collations/remote" ) @@ -46,7 +47,7 @@ func TestFastIterators(t *testing.T) { func TestWeightStringsComprehensive(t *testing.T) { type collationsForCharset struct { charset charset.Charset - locals []collations.Collation + locals []colldata.Collation remotes []*remote.Collation } var charsetMap = make(map[string]*collationsForCharset) @@ -59,7 +60,7 @@ func TestWeightStringsComprehensive(t *testing.T) { conn := mysqlconn(t) defer conn.Close() - allCollations := collations.Local().AllCollations() + allCollations := colldata.All(collations.Local()) sort.Slice(allCollations, func(i, j int) bool { return allCollations[i].ID() < allCollations[j].ID() }) @@ -103,16 +104,16 @@ func TestCJKWeightStrings(t *testing.T) { conn := mysqlconn(t) defer conn.Close() - allCollations := collations.Local().AllCollations() + allCollations := colldata.All(collations.Local()) testdata, _ := filepath.Glob("../internal/charset/testdata/*.txt") for _, testfile := range testdata { - charset := filepath.Base(testfile) - charset = strings.TrimSuffix(charset, ".txt") - charset = charset[strings.LastIndexByte(charset, '-')+1:] + cs := filepath.Base(testfile) + cs = strings.TrimSuffix(cs, ".txt") + cs = cs[strings.LastIndexByte(cs, '-')+1:] - var valid []collations.Collation + var valid []colldata.Collation for _, coll := range allCollations { - if coll.Charset().Name() == charset { + if coll.Charset().Name() == cs { valid = append(valid, coll) t.Logf("%s -> %s", testfile, coll.Name()) } diff --git a/go/mysql/collations/integration/wildcard_test.go b/go/mysql/collations/integration/wildcard_test.go index a848e5b7867..6475a35dd21 100644 --- a/go/mysql/collations/integration/wildcard_test.go +++ b/go/mysql/collations/integration/wildcard_test.go @@ -22,6 +22,7 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/charset" + "vitess.io/vitess/go/mysql/collations/colldata" "vitess.io/vitess/go/mysql/collations/remote" ) @@ -78,7 +79,7 @@ func TestRemoteWildcardMatches(t *testing.T) { {"Ǎḅeçd", "a%bd"}, } - for _, local := range collations.Local().AllCollations() { + for _, local := range colldata.All(collations.Local()) { t.Run(local.Name(), func(t *testing.T) { var remote = remote.NewCollation(conn, local.Name()) var err error diff --git a/go/mysql/collations/internal/uca/contractions.go b/go/mysql/collations/internal/uca/contractions.go index c4ff99d42e2..d894b0e206e 100644 --- a/go/mysql/collations/internal/uca/contractions.go +++ b/go/mysql/collations/internal/uca/contractions.go @@ -18,7 +18,6 @@ package uca import ( "fmt" - "unicode/utf8" "vitess.io/vitess/go/mysql/collations/charset" ) @@ -28,19 +27,6 @@ type trie struct { weights []uint16 } -func (t *trie) walkUTF8(remainder []byte) ([]uint16, []byte) { - if len(remainder) > 0 { - cp, width := utf8.DecodeRune(remainder) - if cp == utf8.RuneError && width < 3 { - return nil, nil - } - if ch := t.children[cp]; ch != nil { - return ch.walkUTF8(remainder[width:]) - } - } - return t.weights, remainder -} - func (t *trie) walkCharset(cs charset.Charset, remainder []byte, depth int) ([]uint16, []byte, int) { if len(remainder) > 0 { cp, width := cs.DecodeRune(remainder) diff --git a/go/mysql/collations/internal/uca/fasttables.go b/go/mysql/collations/internal/uca/fasttables.go index 1995a78a664..40f3718babe 100644 --- a/go/mysql/collations/internal/uca/fasttables.go +++ b/go/mysql/collations/internal/uca/fasttables.go @@ -1,3 +1,19 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + // Code generated by makecolldata DO NOT EDIT package uca diff --git a/go/mysql/collations/internal/uca/layout.go b/go/mysql/collations/internal/uca/layout.go index a5ee45a0ece..35a2749eb21 100644 --- a/go/mysql/collations/internal/uca/layout.go +++ b/go/mysql/collations/internal/uca/layout.go @@ -17,7 +17,6 @@ limitations under the License. package uca import ( - "reflect" "sync" "unsafe" ) @@ -287,29 +286,29 @@ func (Layout_uca_legacy) applyPatches(page []uint16, offset int, weights []uint1 } type tableWithPatch struct { - tableptr uintptr - patchptr uintptr + tableptr unsafe.Pointer + patchptr unsafe.Pointer } var cachedTables = make(map[tableWithPatch]Weights) var cachedTablesMu sync.Mutex func lookupCachedTable(table Weights, patch []Patch) (Weights, bool) { - hdr1 := (*reflect.SliceHeader)(unsafe.Pointer(&table)) - hdr2 := (*reflect.SliceHeader)(unsafe.Pointer(&patch)) + data1 := unsafe.Pointer(unsafe.SliceData(table)) + data2 := unsafe.Pointer(unsafe.SliceData(patch)) cachedTablesMu.Lock() defer cachedTablesMu.Unlock() - tbl, ok := cachedTables[tableWithPatch{hdr1.Data, hdr2.Data}] + tbl, ok := cachedTables[tableWithPatch{tableptr: data1, patchptr: data2}] return tbl, ok } func storeCachedTable(table Weights, patch []Patch, result Weights) { - hdr1 := (*reflect.SliceHeader)(unsafe.Pointer(&table)) - hdr2 := (*reflect.SliceHeader)(unsafe.Pointer(&patch)) + data1 := unsafe.Pointer(unsafe.SliceData(table)) + data2 := unsafe.Pointer(unsafe.SliceData(patch)) cachedTablesMu.Lock() - cachedTables[tableWithPatch{hdr1.Data, hdr2.Data}] = result + cachedTables[tableWithPatch{tableptr: data1, patchptr: data2}] = result cachedTablesMu.Unlock() } diff --git a/go/mysql/collations/mysqlversion.go b/go/mysql/collations/mysqlversion.go index 2a1409fbb7e..93d1add9b6a 100644 --- a/go/mysql/collations/mysqlversion.go +++ b/go/mysql/collations/mysqlversion.go @@ -1,11 +1,28 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + // Code generated by makecolldata DO NOT EDIT package collations type collver byte type collalias struct { - mask collver - name string + mask collver + name string + charset string } const ( @@ -16,7 +33,7 @@ const ( collverMariaDB103 collver = 1 << 3 collverMySQL56 collver = 1 << 4 collverMySQL57 collver = 1 << 5 - collverMySQL80 collver = 1 << 6 + collverMySQL8 collver = 1 << 6 ) func (v collver) String() string { @@ -35,405 +52,405 @@ func (v collver) String() string { return "MySQL 5.6" case collverMySQL57: return "MySQL 5.7" - case collverMySQL80: - return "MySQL 8.0" + case collverMySQL8: + return "MySQL 0.8" default: panic("invalid version identifier") } } -func (v collver) charsetAliases() map[string]string { return map[string]string{"utf8": "utf8mb3"} } +func charsetAliases() map[string]string { return map[string]string{"utf8": "utf8mb3"} } var globalVersionInfo = map[ID]struct { alias []collalias isdefault collver }{ - 1: {alias: []collalias{{0b01111111, "big5_chinese_ci"}}, isdefault: 0b01111111}, - 2: {alias: []collalias{{0b01111111, "latin2_czech_cs"}}, isdefault: 0b00000000}, - 3: {alias: []collalias{{0b01111111, "dec8_swedish_ci"}}, isdefault: 0b01111111}, - 4: {alias: []collalias{{0b01111111, "cp850_general_ci"}}, isdefault: 0b01111111}, - 5: {alias: []collalias{{0b01111111, "latin1_german1_ci"}}, isdefault: 0b00000000}, - 6: {alias: []collalias{{0b01111111, "hp8_english_ci"}}, isdefault: 0b01111111}, - 7: {alias: []collalias{{0b01111111, "koi8r_general_ci"}}, isdefault: 0b01111111}, - 8: {alias: []collalias{{0b01111111, "latin1_swedish_ci"}}, isdefault: 0b01111111}, - 9: {alias: []collalias{{0b01111111, "latin2_general_ci"}}, isdefault: 0b01111111}, - 10: {alias: []collalias{{0b01111111, "swe7_swedish_ci"}}, isdefault: 0b01111111}, - 11: {alias: []collalias{{0b01111111, "ascii_general_ci"}}, isdefault: 0b01111111}, - 12: {alias: []collalias{{0b01111111, "ujis_japanese_ci"}}, isdefault: 0b01111111}, - 13: {alias: []collalias{{0b01111111, "sjis_japanese_ci"}}, isdefault: 0b01111111}, - 14: {alias: []collalias{{0b01111111, "cp1251_bulgarian_ci"}}, isdefault: 0b00000000}, - 15: {alias: []collalias{{0b01111111, "latin1_danish_ci"}}, isdefault: 0b00000000}, - 16: {alias: []collalias{{0b01111111, "hebrew_general_ci"}}, isdefault: 0b01111111}, - 18: {alias: []collalias{{0b01111111, "tis620_thai_ci"}}, isdefault: 0b01111111}, - 19: {alias: []collalias{{0b01111111, "euckr_korean_ci"}}, isdefault: 0b01111111}, - 20: {alias: []collalias{{0b01111111, "latin7_estonian_cs"}}, isdefault: 0b00000000}, - 21: {alias: []collalias{{0b01111111, "latin2_hungarian_ci"}}, isdefault: 0b00000000}, - 22: {alias: []collalias{{0b01111111, "koi8u_general_ci"}}, isdefault: 0b01111111}, - 23: {alias: []collalias{{0b01111111, "cp1251_ukrainian_ci"}}, isdefault: 0b00000000}, - 24: {alias: []collalias{{0b01111111, "gb2312_chinese_ci"}}, isdefault: 0b01111111}, - 25: {alias: []collalias{{0b01111111, "greek_general_ci"}}, isdefault: 0b01111111}, - 26: {alias: []collalias{{0b01111111, "cp1250_general_ci"}}, isdefault: 0b01111111}, - 27: {alias: []collalias{{0b01111111, "latin2_croatian_ci"}}, isdefault: 0b00000000}, - 28: {alias: []collalias{{0b01111111, "gbk_chinese_ci"}}, isdefault: 0b01111111}, - 29: {alias: []collalias{{0b01111111, "cp1257_lithuanian_ci"}}, isdefault: 0b00000000}, - 30: {alias: []collalias{{0b01111111, "latin5_turkish_ci"}}, isdefault: 0b01111111}, - 31: {alias: []collalias{{0b01111111, "latin1_german2_ci"}}, isdefault: 0b00000000}, - 32: {alias: []collalias{{0b01111111, "armscii8_general_ci"}}, isdefault: 0b01111111}, - 33: {alias: []collalias{{0b01111111, "utf8_general_ci"}, {0b01111111, "utf8mb3_general_ci"}}, isdefault: 0b01111111}, - 34: {alias: []collalias{{0b01111111, "cp1250_czech_cs"}}, isdefault: 0b00000000}, - 35: {alias: []collalias{{0b01111111, "ucs2_general_ci"}}, isdefault: 0b01111111}, - 36: {alias: []collalias{{0b01111111, "cp866_general_ci"}}, isdefault: 0b01111111}, - 37: {alias: []collalias{{0b01111111, "keybcs2_general_ci"}}, isdefault: 0b01111111}, - 38: {alias: []collalias{{0b01111111, "macce_general_ci"}}, isdefault: 0b01111111}, - 39: {alias: []collalias{{0b01111111, "macroman_general_ci"}}, isdefault: 0b01111111}, - 40: {alias: []collalias{{0b01111111, "cp852_general_ci"}}, isdefault: 0b01111111}, - 41: {alias: []collalias{{0b01111111, "latin7_general_ci"}}, isdefault: 0b01111111}, - 42: {alias: []collalias{{0b01111111, "latin7_general_cs"}}, isdefault: 0b00000000}, - 43: {alias: []collalias{{0b01111111, "macce_bin"}}, isdefault: 0b00000000}, - 44: {alias: []collalias{{0b01111111, "cp1250_croatian_ci"}}, isdefault: 0b00000000}, - 45: {alias: []collalias{{0b01111111, "utf8mb4_general_ci"}}, isdefault: 0b00111111}, - 46: {alias: []collalias{{0b01111111, "utf8mb4_bin"}}, isdefault: 0b00000000}, - 47: {alias: []collalias{{0b01111111, "latin1_bin"}}, isdefault: 0b00000000}, - 48: {alias: []collalias{{0b01111111, "latin1_general_ci"}}, isdefault: 0b00000000}, - 49: {alias: []collalias{{0b01111111, "latin1_general_cs"}}, isdefault: 0b00000000}, - 50: {alias: []collalias{{0b01111111, "cp1251_bin"}}, isdefault: 0b00000000}, - 51: {alias: []collalias{{0b01111111, "cp1251_general_ci"}}, isdefault: 0b01111111}, - 52: {alias: []collalias{{0b01111111, "cp1251_general_cs"}}, isdefault: 0b00000000}, - 53: {alias: []collalias{{0b01111111, "macroman_bin"}}, isdefault: 0b00000000}, - 54: {alias: []collalias{{0b01111111, "utf16_general_ci"}}, isdefault: 0b01111111}, - 55: {alias: []collalias{{0b01111111, "utf16_bin"}}, isdefault: 0b00000000}, - 56: {alias: []collalias{{0b01111111, "utf16le_general_ci"}}, isdefault: 0b01111111}, - 57: {alias: []collalias{{0b01111111, "cp1256_general_ci"}}, isdefault: 0b01111111}, - 58: {alias: []collalias{{0b01111111, "cp1257_bin"}}, isdefault: 0b00000000}, - 59: {alias: []collalias{{0b01111111, "cp1257_general_ci"}}, isdefault: 0b01111111}, - 60: {alias: []collalias{{0b01111111, "utf32_general_ci"}}, isdefault: 0b01111111}, - 61: {alias: []collalias{{0b01111111, "utf32_bin"}}, isdefault: 0b00000000}, - 62: {alias: []collalias{{0b01111111, "utf16le_bin"}}, isdefault: 0b00000000}, - 63: {alias: []collalias{{0b01111111, "binary"}}, isdefault: 0b01111111}, - 64: {alias: []collalias{{0b01111111, "armscii8_bin"}}, isdefault: 0b00000000}, - 65: {alias: []collalias{{0b01111111, "ascii_bin"}}, isdefault: 0b00000000}, - 66: {alias: []collalias{{0b01111111, "cp1250_bin"}}, isdefault: 0b00000000}, - 67: {alias: []collalias{{0b01111111, "cp1256_bin"}}, isdefault: 0b00000000}, - 68: {alias: []collalias{{0b01111111, "cp866_bin"}}, isdefault: 0b00000000}, - 69: {alias: []collalias{{0b01111111, "dec8_bin"}}, isdefault: 0b00000000}, - 70: {alias: []collalias{{0b01111111, "greek_bin"}}, isdefault: 0b00000000}, - 71: {alias: []collalias{{0b01111111, "hebrew_bin"}}, isdefault: 0b00000000}, - 72: {alias: []collalias{{0b01111111, "hp8_bin"}}, isdefault: 0b00000000}, - 73: {alias: []collalias{{0b01111111, "keybcs2_bin"}}, isdefault: 0b00000000}, - 74: {alias: []collalias{{0b01111111, "koi8r_bin"}}, isdefault: 0b00000000}, - 75: {alias: []collalias{{0b01111111, "koi8u_bin"}}, isdefault: 0b00000000}, - 76: {alias: []collalias{{0b01000000, "utf8_tolower_ci"}, {0b01000000, "utf8mb3_tolower_ci"}}, isdefault: 0b00000000}, - 77: {alias: []collalias{{0b01111111, "latin2_bin"}}, isdefault: 0b00000000}, - 78: {alias: []collalias{{0b01111111, "latin5_bin"}}, isdefault: 0b00000000}, - 79: {alias: []collalias{{0b01111111, "latin7_bin"}}, isdefault: 0b00000000}, - 80: {alias: []collalias{{0b01111111, "cp850_bin"}}, isdefault: 0b00000000}, - 81: {alias: []collalias{{0b01111111, "cp852_bin"}}, isdefault: 0b00000000}, - 82: {alias: []collalias{{0b01111111, "swe7_bin"}}, isdefault: 0b00000000}, - 83: {alias: []collalias{{0b01111111, "utf8_bin"}, {0b01111111, "utf8mb3_bin"}}, isdefault: 0b00000000}, - 84: {alias: []collalias{{0b01111111, "big5_bin"}}, isdefault: 0b00000000}, - 85: {alias: []collalias{{0b01111111, "euckr_bin"}}, isdefault: 0b00000000}, - 86: {alias: []collalias{{0b01111111, "gb2312_bin"}}, isdefault: 0b00000000}, - 87: {alias: []collalias{{0b01111111, "gbk_bin"}}, isdefault: 0b00000000}, - 88: {alias: []collalias{{0b01111111, "sjis_bin"}}, isdefault: 0b00000000}, - 89: {alias: []collalias{{0b01111111, "tis620_bin"}}, isdefault: 0b00000000}, - 90: {alias: []collalias{{0b01111111, "ucs2_bin"}}, isdefault: 0b00000000}, - 91: {alias: []collalias{{0b01111111, "ujis_bin"}}, isdefault: 0b00000000}, - 92: {alias: []collalias{{0b01111111, "geostd8_general_ci"}}, isdefault: 0b01111111}, - 93: {alias: []collalias{{0b01111111, "geostd8_bin"}}, isdefault: 0b00000000}, - 94: {alias: []collalias{{0b01111111, "latin1_spanish_ci"}}, isdefault: 0b00000000}, - 95: {alias: []collalias{{0b01111111, "cp932_japanese_ci"}}, isdefault: 0b01111111}, - 96: {alias: []collalias{{0b01111111, "cp932_bin"}}, isdefault: 0b00000000}, - 97: {alias: []collalias{{0b01111111, "eucjpms_japanese_ci"}}, isdefault: 0b01111111}, - 98: {alias: []collalias{{0b01111111, "eucjpms_bin"}}, isdefault: 0b00000000}, - 99: {alias: []collalias{{0b01111111, "cp1250_polish_ci"}}, isdefault: 0b00000000}, - 101: {alias: []collalias{{0b01111111, "utf16_unicode_ci"}}, isdefault: 0b00000000}, - 102: {alias: []collalias{{0b01111111, "utf16_icelandic_ci"}}, isdefault: 0b00000000}, - 103: {alias: []collalias{{0b01111111, "utf16_latvian_ci"}}, isdefault: 0b00000000}, - 104: {alias: []collalias{{0b01111111, "utf16_romanian_ci"}}, isdefault: 0b00000000}, - 105: {alias: []collalias{{0b01111111, "utf16_slovenian_ci"}}, isdefault: 0b00000000}, - 106: {alias: []collalias{{0b01111111, "utf16_polish_ci"}}, isdefault: 0b00000000}, - 107: {alias: []collalias{{0b01111111, "utf16_estonian_ci"}}, isdefault: 0b00000000}, - 108: {alias: []collalias{{0b01111111, "utf16_spanish_ci"}}, isdefault: 0b00000000}, - 109: {alias: []collalias{{0b01111111, "utf16_swedish_ci"}}, isdefault: 0b00000000}, - 110: {alias: []collalias{{0b01111111, "utf16_turkish_ci"}}, isdefault: 0b00000000}, - 111: {alias: []collalias{{0b01111111, "utf16_czech_ci"}}, isdefault: 0b00000000}, - 112: {alias: []collalias{{0b01111111, "utf16_danish_ci"}}, isdefault: 0b00000000}, - 113: {alias: []collalias{{0b01111111, "utf16_lithuanian_ci"}}, isdefault: 0b00000000}, - 114: {alias: []collalias{{0b01111111, "utf16_slovak_ci"}}, isdefault: 0b00000000}, - 115: {alias: []collalias{{0b01111111, "utf16_spanish2_ci"}}, isdefault: 0b00000000}, - 116: {alias: []collalias{{0b01111111, "utf16_roman_ci"}}, isdefault: 0b00000000}, - 117: {alias: []collalias{{0b01111111, "utf16_persian_ci"}}, isdefault: 0b00000000}, - 118: {alias: []collalias{{0b01111111, "utf16_esperanto_ci"}}, isdefault: 0b00000000}, - 119: {alias: []collalias{{0b01111111, "utf16_hungarian_ci"}}, isdefault: 0b00000000}, - 120: {alias: []collalias{{0b01111111, "utf16_sinhala_ci"}}, isdefault: 0b00000000}, - 121: {alias: []collalias{{0b01111111, "utf16_german2_ci"}}, isdefault: 0b00000000}, - 122: {alias: []collalias{{0b01110000, "utf16_croatian_ci"}, {0b00001111, "utf16_croatian_mysql561_ci"}}, isdefault: 0b00000000}, - 123: {alias: []collalias{{0b01111111, "utf16_unicode_520_ci"}}, isdefault: 0b00000000}, - 124: {alias: []collalias{{0b01111111, "utf16_vietnamese_ci"}}, isdefault: 0b00000000}, - 128: {alias: []collalias{{0b01111111, "ucs2_unicode_ci"}}, isdefault: 0b00000000}, - 129: {alias: []collalias{{0b01111111, "ucs2_icelandic_ci"}}, isdefault: 0b00000000}, - 130: {alias: []collalias{{0b01111111, "ucs2_latvian_ci"}}, isdefault: 0b00000000}, - 131: {alias: []collalias{{0b01111111, "ucs2_romanian_ci"}}, isdefault: 0b00000000}, - 132: {alias: []collalias{{0b01111111, "ucs2_slovenian_ci"}}, isdefault: 0b00000000}, - 133: {alias: []collalias{{0b01111111, "ucs2_polish_ci"}}, isdefault: 0b00000000}, - 134: {alias: []collalias{{0b01111111, "ucs2_estonian_ci"}}, isdefault: 0b00000000}, - 135: {alias: []collalias{{0b01111111, "ucs2_spanish_ci"}}, isdefault: 0b00000000}, - 136: {alias: []collalias{{0b01111111, "ucs2_swedish_ci"}}, isdefault: 0b00000000}, - 137: {alias: []collalias{{0b01111111, "ucs2_turkish_ci"}}, isdefault: 0b00000000}, - 138: {alias: []collalias{{0b01111111, "ucs2_czech_ci"}}, isdefault: 0b00000000}, - 139: {alias: []collalias{{0b01111111, "ucs2_danish_ci"}}, isdefault: 0b00000000}, - 140: {alias: []collalias{{0b01111111, "ucs2_lithuanian_ci"}}, isdefault: 0b00000000}, - 141: {alias: []collalias{{0b01111111, "ucs2_slovak_ci"}}, isdefault: 0b00000000}, - 142: {alias: []collalias{{0b01111111, "ucs2_spanish2_ci"}}, isdefault: 0b00000000}, - 143: {alias: []collalias{{0b01111111, "ucs2_roman_ci"}}, isdefault: 0b00000000}, - 144: {alias: []collalias{{0b01111111, "ucs2_persian_ci"}}, isdefault: 0b00000000}, - 145: {alias: []collalias{{0b01111111, "ucs2_esperanto_ci"}}, isdefault: 0b00000000}, - 146: {alias: []collalias{{0b01111111, "ucs2_hungarian_ci"}}, isdefault: 0b00000000}, - 147: {alias: []collalias{{0b01111111, "ucs2_sinhala_ci"}}, isdefault: 0b00000000}, - 148: {alias: []collalias{{0b01111111, "ucs2_german2_ci"}}, isdefault: 0b00000000}, - 149: {alias: []collalias{{0b01110000, "ucs2_croatian_ci"}, {0b00001111, "ucs2_croatian_mysql561_ci"}}, isdefault: 0b00000000}, - 150: {alias: []collalias{{0b01111111, "ucs2_unicode_520_ci"}}, isdefault: 0b00000000}, - 151: {alias: []collalias{{0b01111111, "ucs2_vietnamese_ci"}}, isdefault: 0b00000000}, - 159: {alias: []collalias{{0b01111111, "ucs2_general_mysql500_ci"}}, isdefault: 0b00000000}, - 160: {alias: []collalias{{0b01111111, "utf32_unicode_ci"}}, isdefault: 0b00000000}, - 161: {alias: []collalias{{0b01111111, "utf32_icelandic_ci"}}, isdefault: 0b00000000}, - 162: {alias: []collalias{{0b01111111, "utf32_latvian_ci"}}, isdefault: 0b00000000}, - 163: {alias: []collalias{{0b01111111, "utf32_romanian_ci"}}, isdefault: 0b00000000}, - 164: {alias: []collalias{{0b01111111, "utf32_slovenian_ci"}}, isdefault: 0b00000000}, - 165: {alias: []collalias{{0b01111111, "utf32_polish_ci"}}, isdefault: 0b00000000}, - 166: {alias: []collalias{{0b01111111, "utf32_estonian_ci"}}, isdefault: 0b00000000}, - 167: {alias: []collalias{{0b01111111, "utf32_spanish_ci"}}, isdefault: 0b00000000}, - 168: {alias: []collalias{{0b01111111, "utf32_swedish_ci"}}, isdefault: 0b00000000}, - 169: {alias: []collalias{{0b01111111, "utf32_turkish_ci"}}, isdefault: 0b00000000}, - 170: {alias: []collalias{{0b01111111, "utf32_czech_ci"}}, isdefault: 0b00000000}, - 171: {alias: []collalias{{0b01111111, "utf32_danish_ci"}}, isdefault: 0b00000000}, - 172: {alias: []collalias{{0b01111111, "utf32_lithuanian_ci"}}, isdefault: 0b00000000}, - 173: {alias: []collalias{{0b01111111, "utf32_slovak_ci"}}, isdefault: 0b00000000}, - 174: {alias: []collalias{{0b01111111, "utf32_spanish2_ci"}}, isdefault: 0b00000000}, - 175: {alias: []collalias{{0b01111111, "utf32_roman_ci"}}, isdefault: 0b00000000}, - 176: {alias: []collalias{{0b01111111, "utf32_persian_ci"}}, isdefault: 0b00000000}, - 177: {alias: []collalias{{0b01111111, "utf32_esperanto_ci"}}, isdefault: 0b00000000}, - 178: {alias: []collalias{{0b01111111, "utf32_hungarian_ci"}}, isdefault: 0b00000000}, - 179: {alias: []collalias{{0b01111111, "utf32_sinhala_ci"}}, isdefault: 0b00000000}, - 180: {alias: []collalias{{0b01111111, "utf32_german2_ci"}}, isdefault: 0b00000000}, - 181: {alias: []collalias{{0b01110000, "utf32_croatian_ci"}, {0b00001111, "utf32_croatian_mysql561_ci"}}, isdefault: 0b00000000}, - 182: {alias: []collalias{{0b01111111, "utf32_unicode_520_ci"}}, isdefault: 0b00000000}, - 183: {alias: []collalias{{0b01111111, "utf32_vietnamese_ci"}}, isdefault: 0b00000000}, - 192: {alias: []collalias{{0b01111111, "utf8_unicode_ci"}, {0b01111111, "utf8mb3_unicode_ci"}}, isdefault: 0b00000000}, - 193: {alias: []collalias{{0b01111111, "utf8_icelandic_ci"}, {0b01111111, "utf8mb3_icelandic_ci"}}, isdefault: 0b00000000}, - 194: {alias: []collalias{{0b01111111, "utf8_latvian_ci"}, {0b01111111, "utf8mb3_latvian_ci"}}, isdefault: 0b00000000}, - 195: {alias: []collalias{{0b01111111, "utf8_romanian_ci"}, {0b01111111, "utf8mb3_romanian_ci"}}, isdefault: 0b00000000}, - 196: {alias: []collalias{{0b01111111, "utf8_slovenian_ci"}, {0b01111111, "utf8mb3_slovenian_ci"}}, isdefault: 0b00000000}, - 197: {alias: []collalias{{0b01111111, "utf8_polish_ci"}, {0b01111111, "utf8mb3_polish_ci"}}, isdefault: 0b00000000}, - 198: {alias: []collalias{{0b01111111, "utf8_estonian_ci"}, {0b01111111, "utf8mb3_estonian_ci"}}, isdefault: 0b00000000}, - 199: {alias: []collalias{{0b01111111, "utf8_spanish_ci"}, {0b01111111, "utf8mb3_spanish_ci"}}, isdefault: 0b00000000}, - 200: {alias: []collalias{{0b01111111, "utf8_swedish_ci"}, {0b01111111, "utf8mb3_swedish_ci"}}, isdefault: 0b00000000}, - 201: {alias: []collalias{{0b01111111, "utf8_turkish_ci"}, {0b01111111, "utf8mb3_turkish_ci"}}, isdefault: 0b00000000}, - 202: {alias: []collalias{{0b01111111, "utf8_czech_ci"}, {0b01111111, "utf8mb3_czech_ci"}}, isdefault: 0b00000000}, - 203: {alias: []collalias{{0b01111111, "utf8_danish_ci"}, {0b01111111, "utf8mb3_danish_ci"}}, isdefault: 0b00000000}, - 204: {alias: []collalias{{0b01111111, "utf8_lithuanian_ci"}, {0b01111111, "utf8mb3_lithuanian_ci"}}, isdefault: 0b00000000}, - 205: {alias: []collalias{{0b01111111, "utf8_slovak_ci"}, {0b01111111, "utf8mb3_slovak_ci"}}, isdefault: 0b00000000}, - 206: {alias: []collalias{{0b01111111, "utf8_spanish2_ci"}, {0b01111111, "utf8mb3_spanish2_ci"}}, isdefault: 0b00000000}, - 207: {alias: []collalias{{0b01111111, "utf8_roman_ci"}, {0b01111111, "utf8mb3_roman_ci"}}, isdefault: 0b00000000}, - 208: {alias: []collalias{{0b01111111, "utf8_persian_ci"}, {0b01111111, "utf8mb3_persian_ci"}}, isdefault: 0b00000000}, - 209: {alias: []collalias{{0b01111111, "utf8_esperanto_ci"}, {0b01111111, "utf8mb3_esperanto_ci"}}, isdefault: 0b00000000}, - 210: {alias: []collalias{{0b01111111, "utf8_hungarian_ci"}, {0b01111111, "utf8mb3_hungarian_ci"}}, isdefault: 0b00000000}, - 211: {alias: []collalias{{0b01111111, "utf8_sinhala_ci"}, {0b01111111, "utf8mb3_sinhala_ci"}}, isdefault: 0b00000000}, - 212: {alias: []collalias{{0b01111111, "utf8_german2_ci"}, {0b01111111, "utf8mb3_german2_ci"}}, isdefault: 0b00000000}, - 213: {alias: []collalias{{0b01110000, "utf8_croatian_ci"}, {0b00001111, "utf8_croatian_mysql561_ci"}, {0b01110000, "utf8mb3_croatian_ci"}, {0b00001111, "utf8mb3_croatian_mysql561_ci"}}, isdefault: 0b00000000}, - 214: {alias: []collalias{{0b01111111, "utf8_unicode_520_ci"}, {0b01111111, "utf8mb3_unicode_520_ci"}}, isdefault: 0b00000000}, - 215: {alias: []collalias{{0b01111111, "utf8_vietnamese_ci"}, {0b01111111, "utf8mb3_vietnamese_ci"}}, isdefault: 0b00000000}, - 223: {alias: []collalias{{0b01111111, "utf8_general_mysql500_ci"}, {0b01111111, "utf8mb3_general_mysql500_ci"}}, isdefault: 0b00000000}, - 224: {alias: []collalias{{0b01111111, "utf8mb4_unicode_ci"}}, isdefault: 0b00000000}, - 225: {alias: []collalias{{0b01111111, "utf8mb4_icelandic_ci"}}, isdefault: 0b00000000}, - 226: {alias: []collalias{{0b01111111, "utf8mb4_latvian_ci"}}, isdefault: 0b00000000}, - 227: {alias: []collalias{{0b01111111, "utf8mb4_romanian_ci"}}, isdefault: 0b00000000}, - 228: {alias: []collalias{{0b01111111, "utf8mb4_slovenian_ci"}}, isdefault: 0b00000000}, - 229: {alias: []collalias{{0b01111111, "utf8mb4_polish_ci"}}, isdefault: 0b00000000}, - 230: {alias: []collalias{{0b01111111, "utf8mb4_estonian_ci"}}, isdefault: 0b00000000}, - 231: {alias: []collalias{{0b01111111, "utf8mb4_spanish_ci"}}, isdefault: 0b00000000}, - 232: {alias: []collalias{{0b01111111, "utf8mb4_swedish_ci"}}, isdefault: 0b00000000}, - 233: {alias: []collalias{{0b01111111, "utf8mb4_turkish_ci"}}, isdefault: 0b00000000}, - 234: {alias: []collalias{{0b01111111, "utf8mb4_czech_ci"}}, isdefault: 0b00000000}, - 235: {alias: []collalias{{0b01111111, "utf8mb4_danish_ci"}}, isdefault: 0b00000000}, - 236: {alias: []collalias{{0b01111111, "utf8mb4_lithuanian_ci"}}, isdefault: 0b00000000}, - 237: {alias: []collalias{{0b01111111, "utf8mb4_slovak_ci"}}, isdefault: 0b00000000}, - 238: {alias: []collalias{{0b01111111, "utf8mb4_spanish2_ci"}}, isdefault: 0b00000000}, - 239: {alias: []collalias{{0b01111111, "utf8mb4_roman_ci"}}, isdefault: 0b00000000}, - 240: {alias: []collalias{{0b01111111, "utf8mb4_persian_ci"}}, isdefault: 0b00000000}, - 241: {alias: []collalias{{0b01111111, "utf8mb4_esperanto_ci"}}, isdefault: 0b00000000}, - 242: {alias: []collalias{{0b01111111, "utf8mb4_hungarian_ci"}}, isdefault: 0b00000000}, - 243: {alias: []collalias{{0b01111111, "utf8mb4_sinhala_ci"}}, isdefault: 0b00000000}, - 244: {alias: []collalias{{0b01111111, "utf8mb4_german2_ci"}}, isdefault: 0b00000000}, - 245: {alias: []collalias{{0b01110000, "utf8mb4_croatian_ci"}, {0b00001111, "utf8mb4_croatian_mysql561_ci"}}, isdefault: 0b00000000}, - 246: {alias: []collalias{{0b01111111, "utf8mb4_unicode_520_ci"}}, isdefault: 0b00000000}, - 247: {alias: []collalias{{0b01111111, "utf8mb4_vietnamese_ci"}}, isdefault: 0b00000000}, - 248: {alias: []collalias{{0b01100000, "gb18030_chinese_ci"}}, isdefault: 0b01100000}, - 249: {alias: []collalias{{0b01100000, "gb18030_bin"}}, isdefault: 0b00000000}, - 250: {alias: []collalias{{0b01100000, "gb18030_unicode_520_ci"}}, isdefault: 0b00000000}, - 255: {alias: []collalias{{0b01000000, "utf8mb4_0900_ai_ci"}}, isdefault: 0b01000000}, - 256: {alias: []collalias{{0b01000000, "utf8mb4_de_pb_0900_ai_ci"}}, isdefault: 0b00000000}, - 257: {alias: []collalias{{0b01000000, "utf8mb4_is_0900_ai_ci"}}, isdefault: 0b00000000}, - 258: {alias: []collalias{{0b01000000, "utf8mb4_lv_0900_ai_ci"}}, isdefault: 0b00000000}, - 259: {alias: []collalias{{0b01000000, "utf8mb4_ro_0900_ai_ci"}}, isdefault: 0b00000000}, - 260: {alias: []collalias{{0b01000000, "utf8mb4_sl_0900_ai_ci"}}, isdefault: 0b00000000}, - 261: {alias: []collalias{{0b01000000, "utf8mb4_pl_0900_ai_ci"}}, isdefault: 0b00000000}, - 262: {alias: []collalias{{0b01000000, "utf8mb4_et_0900_ai_ci"}}, isdefault: 0b00000000}, - 263: {alias: []collalias{{0b01000000, "utf8mb4_es_0900_ai_ci"}}, isdefault: 0b00000000}, - 264: {alias: []collalias{{0b01000000, "utf8mb4_sv_0900_ai_ci"}}, isdefault: 0b00000000}, - 265: {alias: []collalias{{0b01000000, "utf8mb4_tr_0900_ai_ci"}}, isdefault: 0b00000000}, - 266: {alias: []collalias{{0b01000000, "utf8mb4_cs_0900_ai_ci"}}, isdefault: 0b00000000}, - 267: {alias: []collalias{{0b01000000, "utf8mb4_da_0900_ai_ci"}}, isdefault: 0b00000000}, - 268: {alias: []collalias{{0b01000000, "utf8mb4_lt_0900_ai_ci"}}, isdefault: 0b00000000}, - 269: {alias: []collalias{{0b01000000, "utf8mb4_sk_0900_ai_ci"}}, isdefault: 0b00000000}, - 270: {alias: []collalias{{0b01000000, "utf8mb4_es_trad_0900_ai_ci"}}, isdefault: 0b00000000}, - 271: {alias: []collalias{{0b01000000, "utf8mb4_la_0900_ai_ci"}}, isdefault: 0b00000000}, - 273: {alias: []collalias{{0b01000000, "utf8mb4_eo_0900_ai_ci"}}, isdefault: 0b00000000}, - 274: {alias: []collalias{{0b01000000, "utf8mb4_hu_0900_ai_ci"}}, isdefault: 0b00000000}, - 275: {alias: []collalias{{0b01000000, "utf8mb4_hr_0900_ai_ci"}}, isdefault: 0b00000000}, - 277: {alias: []collalias{{0b01000000, "utf8mb4_vi_0900_ai_ci"}}, isdefault: 0b00000000}, - 278: {alias: []collalias{{0b01000000, "utf8mb4_0900_as_cs"}}, isdefault: 0b00000000}, - 279: {alias: []collalias{{0b01000000, "utf8mb4_de_pb_0900_as_cs"}}, isdefault: 0b00000000}, - 280: {alias: []collalias{{0b01000000, "utf8mb4_is_0900_as_cs"}}, isdefault: 0b00000000}, - 281: {alias: []collalias{{0b01000000, "utf8mb4_lv_0900_as_cs"}}, isdefault: 0b00000000}, - 282: {alias: []collalias{{0b01000000, "utf8mb4_ro_0900_as_cs"}}, isdefault: 0b00000000}, - 283: {alias: []collalias{{0b01000000, "utf8mb4_sl_0900_as_cs"}}, isdefault: 0b00000000}, - 284: {alias: []collalias{{0b01000000, "utf8mb4_pl_0900_as_cs"}}, isdefault: 0b00000000}, - 285: {alias: []collalias{{0b01000000, "utf8mb4_et_0900_as_cs"}}, isdefault: 0b00000000}, - 286: {alias: []collalias{{0b01000000, "utf8mb4_es_0900_as_cs"}}, isdefault: 0b00000000}, - 287: {alias: []collalias{{0b01000000, "utf8mb4_sv_0900_as_cs"}}, isdefault: 0b00000000}, - 288: {alias: []collalias{{0b01000000, "utf8mb4_tr_0900_as_cs"}}, isdefault: 0b00000000}, - 289: {alias: []collalias{{0b01000000, "utf8mb4_cs_0900_as_cs"}}, isdefault: 0b00000000}, - 290: {alias: []collalias{{0b01000000, "utf8mb4_da_0900_as_cs"}}, isdefault: 0b00000000}, - 291: {alias: []collalias{{0b01000000, "utf8mb4_lt_0900_as_cs"}}, isdefault: 0b00000000}, - 292: {alias: []collalias{{0b01000000, "utf8mb4_sk_0900_as_cs"}}, isdefault: 0b00000000}, - 293: {alias: []collalias{{0b01000000, "utf8mb4_es_trad_0900_as_cs"}}, isdefault: 0b00000000}, - 294: {alias: []collalias{{0b01000000, "utf8mb4_la_0900_as_cs"}}, isdefault: 0b00000000}, - 296: {alias: []collalias{{0b01000000, "utf8mb4_eo_0900_as_cs"}}, isdefault: 0b00000000}, - 297: {alias: []collalias{{0b01000000, "utf8mb4_hu_0900_as_cs"}}, isdefault: 0b00000000}, - 298: {alias: []collalias{{0b01000000, "utf8mb4_hr_0900_as_cs"}}, isdefault: 0b00000000}, - 300: {alias: []collalias{{0b01000000, "utf8mb4_vi_0900_as_cs"}}, isdefault: 0b00000000}, - 303: {alias: []collalias{{0b01000000, "utf8mb4_ja_0900_as_cs"}}, isdefault: 0b00000000}, - 304: {alias: []collalias{{0b01000000, "utf8mb4_ja_0900_as_cs_ks"}}, isdefault: 0b00000000}, - 305: {alias: []collalias{{0b01000000, "utf8mb4_0900_as_ci"}}, isdefault: 0b00000000}, - 306: {alias: []collalias{{0b01000000, "utf8mb4_ru_0900_ai_ci"}}, isdefault: 0b00000000}, - 307: {alias: []collalias{{0b01000000, "utf8mb4_ru_0900_as_cs"}}, isdefault: 0b00000000}, - 308: {alias: []collalias{{0b01000000, "utf8mb4_zh_0900_as_cs"}}, isdefault: 0b00000000}, - 309: {alias: []collalias{{0b01000000, "utf8mb4_0900_bin"}}, isdefault: 0b00000000}, - 310: {alias: []collalias{{0b01000000, "utf8mb4_nb_0900_ai_ci"}}, isdefault: 0b00000000}, - 311: {alias: []collalias{{0b01000000, "utf8mb4_nb_0900_as_cs"}}, isdefault: 0b00000000}, - 312: {alias: []collalias{{0b01000000, "utf8mb4_nn_0900_ai_ci"}}, isdefault: 0b00000000}, - 313: {alias: []collalias{{0b01000000, "utf8mb4_nn_0900_as_cs"}}, isdefault: 0b00000000}, - 314: {alias: []collalias{{0b01000000, "utf8mb4_sr_latn_0900_ai_ci"}}, isdefault: 0b00000000}, - 315: {alias: []collalias{{0b01000000, "utf8mb4_sr_latn_0900_as_cs"}}, isdefault: 0b00000000}, - 316: {alias: []collalias{{0b01000000, "utf8mb4_bs_0900_ai_ci"}}, isdefault: 0b00000000}, - 317: {alias: []collalias{{0b01000000, "utf8mb4_bs_0900_as_cs"}}, isdefault: 0b00000000}, - 318: {alias: []collalias{{0b01000000, "utf8mb4_bg_0900_ai_ci"}}, isdefault: 0b00000000}, - 319: {alias: []collalias{{0b01000000, "utf8mb4_bg_0900_as_cs"}}, isdefault: 0b00000000}, - 320: {alias: []collalias{{0b01000000, "utf8mb4_gl_0900_ai_ci"}}, isdefault: 0b00000000}, - 321: {alias: []collalias{{0b01000000, "utf8mb4_gl_0900_as_cs"}}, isdefault: 0b00000000}, - 322: {alias: []collalias{{0b01000000, "utf8mb4_mn_cyrl_0900_ai_ci"}}, isdefault: 0b00000000}, - 323: {alias: []collalias{{0b01000000, "utf8mb4_mn_cyrl_0900_as_cs"}}, isdefault: 0b00000000}, - 576: {alias: []collalias{{0b00001111, "utf8_croatian_ci"}, {0b00001111, "utf8mb3_croatian_ci"}}, isdefault: 0b00000000}, - 577: {alias: []collalias{{0b00001111, "utf8_myanmar_ci"}, {0b00001111, "utf8mb3_myanmar_ci"}}, isdefault: 0b00000000}, - 578: {alias: []collalias{{0b00001110, "utf8_thai_520_w2"}, {0b00001110, "utf8mb3_thai_520_w2"}}, isdefault: 0b00000000}, - 608: {alias: []collalias{{0b00001111, "utf8mb4_croatian_ci"}}, isdefault: 0b00000000}, - 609: {alias: []collalias{{0b00001111, "utf8mb4_myanmar_ci"}}, isdefault: 0b00000000}, - 610: {alias: []collalias{{0b00001110, "utf8mb4_thai_520_w2"}}, isdefault: 0b00000000}, - 640: {alias: []collalias{{0b00001111, "ucs2_croatian_ci"}}, isdefault: 0b00000000}, - 641: {alias: []collalias{{0b00001111, "ucs2_myanmar_ci"}}, isdefault: 0b00000000}, - 642: {alias: []collalias{{0b00001110, "ucs2_thai_520_w2"}}, isdefault: 0b00000000}, - 672: {alias: []collalias{{0b00001111, "utf16_croatian_ci"}}, isdefault: 0b00000000}, - 673: {alias: []collalias{{0b00001111, "utf16_myanmar_ci"}}, isdefault: 0b00000000}, - 674: {alias: []collalias{{0b00001110, "utf16_thai_520_w2"}}, isdefault: 0b00000000}, - 736: {alias: []collalias{{0b00001111, "utf32_croatian_ci"}}, isdefault: 0b00000000}, - 737: {alias: []collalias{{0b00001111, "utf32_myanmar_ci"}}, isdefault: 0b00000000}, - 738: {alias: []collalias{{0b00001110, "utf32_thai_520_w2"}}, isdefault: 0b00000000}, - 1025: {alias: []collalias{{0b00001100, "big5_chinese_nopad_ci"}}, isdefault: 0b00000000}, - 1027: {alias: []collalias{{0b00001100, "dec8_swedish_nopad_ci"}}, isdefault: 0b00000000}, - 1028: {alias: []collalias{{0b00001100, "cp850_general_nopad_ci"}}, isdefault: 0b00000000}, - 1030: {alias: []collalias{{0b00001100, "hp8_english_nopad_ci"}}, isdefault: 0b00000000}, - 1031: {alias: []collalias{{0b00001100, "koi8r_general_nopad_ci"}}, isdefault: 0b00000000}, - 1032: {alias: []collalias{{0b00001100, "latin1_swedish_nopad_ci"}}, isdefault: 0b00000000}, - 1033: {alias: []collalias{{0b00001100, "latin2_general_nopad_ci"}}, isdefault: 0b00000000}, - 1034: {alias: []collalias{{0b00001100, "swe7_swedish_nopad_ci"}}, isdefault: 0b00000000}, - 1035: {alias: []collalias{{0b00001100, "ascii_general_nopad_ci"}}, isdefault: 0b00000000}, - 1036: {alias: []collalias{{0b00001100, "ujis_japanese_nopad_ci"}}, isdefault: 0b00000000}, - 1037: {alias: []collalias{{0b00001100, "sjis_japanese_nopad_ci"}}, isdefault: 0b00000000}, - 1040: {alias: []collalias{{0b00001100, "hebrew_general_nopad_ci"}}, isdefault: 0b00000000}, - 1042: {alias: []collalias{{0b00001100, "tis620_thai_nopad_ci"}}, isdefault: 0b00000000}, - 1043: {alias: []collalias{{0b00001100, "euckr_korean_nopad_ci"}}, isdefault: 0b00000000}, - 1046: {alias: []collalias{{0b00001100, "koi8u_general_nopad_ci"}}, isdefault: 0b00000000}, - 1048: {alias: []collalias{{0b00001100, "gb2312_chinese_nopad_ci"}}, isdefault: 0b00000000}, - 1049: {alias: []collalias{{0b00001100, "greek_general_nopad_ci"}}, isdefault: 0b00000000}, - 1050: {alias: []collalias{{0b00001100, "cp1250_general_nopad_ci"}}, isdefault: 0b00000000}, - 1052: {alias: []collalias{{0b00001100, "gbk_chinese_nopad_ci"}}, isdefault: 0b00000000}, - 1054: {alias: []collalias{{0b00001100, "latin5_turkish_nopad_ci"}}, isdefault: 0b00000000}, - 1056: {alias: []collalias{{0b00001100, "armscii8_general_nopad_ci"}}, isdefault: 0b00000000}, - 1057: {alias: []collalias{{0b00001100, "utf8_general_nopad_ci"}, {0b00001100, "utf8mb3_general_nopad_ci"}}, isdefault: 0b00000000}, - 1059: {alias: []collalias{{0b00001100, "ucs2_general_nopad_ci"}}, isdefault: 0b00000000}, - 1060: {alias: []collalias{{0b00001100, "cp866_general_nopad_ci"}}, isdefault: 0b00000000}, - 1061: {alias: []collalias{{0b00001100, "keybcs2_general_nopad_ci"}}, isdefault: 0b00000000}, - 1062: {alias: []collalias{{0b00001100, "macce_general_nopad_ci"}}, isdefault: 0b00000000}, - 1063: {alias: []collalias{{0b00001100, "macroman_general_nopad_ci"}}, isdefault: 0b00000000}, - 1064: {alias: []collalias{{0b00001100, "cp852_general_nopad_ci"}}, isdefault: 0b00000000}, - 1065: {alias: []collalias{{0b00001100, "latin7_general_nopad_ci"}}, isdefault: 0b00000000}, - 1067: {alias: []collalias{{0b00001100, "macce_nopad_bin"}}, isdefault: 0b00000000}, - 1069: {alias: []collalias{{0b00001100, "utf8mb4_general_nopad_ci"}}, isdefault: 0b00000000}, - 1070: {alias: []collalias{{0b00001100, "utf8mb4_nopad_bin"}}, isdefault: 0b00000000}, - 1071: {alias: []collalias{{0b00001100, "latin1_nopad_bin"}}, isdefault: 0b00000000}, - 1074: {alias: []collalias{{0b00001100, "cp1251_nopad_bin"}}, isdefault: 0b00000000}, - 1075: {alias: []collalias{{0b00001100, "cp1251_general_nopad_ci"}}, isdefault: 0b00000000}, - 1077: {alias: []collalias{{0b00001100, "macroman_nopad_bin"}}, isdefault: 0b00000000}, - 1078: {alias: []collalias{{0b00001100, "utf16_general_nopad_ci"}}, isdefault: 0b00000000}, - 1079: {alias: []collalias{{0b00001100, "utf16_nopad_bin"}}, isdefault: 0b00000000}, - 1080: {alias: []collalias{{0b00001100, "utf16le_general_nopad_ci"}}, isdefault: 0b00000000}, - 1081: {alias: []collalias{{0b00001100, "cp1256_general_nopad_ci"}}, isdefault: 0b00000000}, - 1082: {alias: []collalias{{0b00001100, "cp1257_nopad_bin"}}, isdefault: 0b00000000}, - 1083: {alias: []collalias{{0b00001100, "cp1257_general_nopad_ci"}}, isdefault: 0b00000000}, - 1084: {alias: []collalias{{0b00001100, "utf32_general_nopad_ci"}}, isdefault: 0b00000000}, - 1085: {alias: []collalias{{0b00001100, "utf32_nopad_bin"}}, isdefault: 0b00000000}, - 1086: {alias: []collalias{{0b00001100, "utf16le_nopad_bin"}}, isdefault: 0b00000000}, - 1088: {alias: []collalias{{0b00001100, "armscii8_nopad_bin"}}, isdefault: 0b00000000}, - 1089: {alias: []collalias{{0b00001100, "ascii_nopad_bin"}}, isdefault: 0b00000000}, - 1090: {alias: []collalias{{0b00001100, "cp1250_nopad_bin"}}, isdefault: 0b00000000}, - 1091: {alias: []collalias{{0b00001100, "cp1256_nopad_bin"}}, isdefault: 0b00000000}, - 1092: {alias: []collalias{{0b00001100, "cp866_nopad_bin"}}, isdefault: 0b00000000}, - 1093: {alias: []collalias{{0b00001100, "dec8_nopad_bin"}}, isdefault: 0b00000000}, - 1094: {alias: []collalias{{0b00001100, "greek_nopad_bin"}}, isdefault: 0b00000000}, - 1095: {alias: []collalias{{0b00001100, "hebrew_nopad_bin"}}, isdefault: 0b00000000}, - 1096: {alias: []collalias{{0b00001100, "hp8_nopad_bin"}}, isdefault: 0b00000000}, - 1097: {alias: []collalias{{0b00001100, "keybcs2_nopad_bin"}}, isdefault: 0b00000000}, - 1098: {alias: []collalias{{0b00001100, "koi8r_nopad_bin"}}, isdefault: 0b00000000}, - 1099: {alias: []collalias{{0b00001100, "koi8u_nopad_bin"}}, isdefault: 0b00000000}, - 1101: {alias: []collalias{{0b00001100, "latin2_nopad_bin"}}, isdefault: 0b00000000}, - 1102: {alias: []collalias{{0b00001100, "latin5_nopad_bin"}}, isdefault: 0b00000000}, - 1103: {alias: []collalias{{0b00001100, "latin7_nopad_bin"}}, isdefault: 0b00000000}, - 1104: {alias: []collalias{{0b00001100, "cp850_nopad_bin"}}, isdefault: 0b00000000}, - 1105: {alias: []collalias{{0b00001100, "cp852_nopad_bin"}}, isdefault: 0b00000000}, - 1106: {alias: []collalias{{0b00001100, "swe7_nopad_bin"}}, isdefault: 0b00000000}, - 1107: {alias: []collalias{{0b00001100, "utf8_nopad_bin"}, {0b00001100, "utf8mb3_nopad_bin"}}, isdefault: 0b00000000}, - 1108: {alias: []collalias{{0b00001100, "big5_nopad_bin"}}, isdefault: 0b00000000}, - 1109: {alias: []collalias{{0b00001100, "euckr_nopad_bin"}}, isdefault: 0b00000000}, - 1110: {alias: []collalias{{0b00001100, "gb2312_nopad_bin"}}, isdefault: 0b00000000}, - 1111: {alias: []collalias{{0b00001100, "gbk_nopad_bin"}}, isdefault: 0b00000000}, - 1112: {alias: []collalias{{0b00001100, "sjis_nopad_bin"}}, isdefault: 0b00000000}, - 1113: {alias: []collalias{{0b00001100, "tis620_nopad_bin"}}, isdefault: 0b00000000}, - 1114: {alias: []collalias{{0b00001100, "ucs2_nopad_bin"}}, isdefault: 0b00000000}, - 1115: {alias: []collalias{{0b00001100, "ujis_nopad_bin"}}, isdefault: 0b00000000}, - 1116: {alias: []collalias{{0b00001100, "geostd8_general_nopad_ci"}}, isdefault: 0b00000000}, - 1117: {alias: []collalias{{0b00001100, "geostd8_nopad_bin"}}, isdefault: 0b00000000}, - 1119: {alias: []collalias{{0b00001100, "cp932_japanese_nopad_ci"}}, isdefault: 0b00000000}, - 1120: {alias: []collalias{{0b00001100, "cp932_nopad_bin"}}, isdefault: 0b00000000}, - 1121: {alias: []collalias{{0b00001100, "eucjpms_japanese_nopad_ci"}}, isdefault: 0b00000000}, - 1122: {alias: []collalias{{0b00001100, "eucjpms_nopad_bin"}}, isdefault: 0b00000000}, - 1125: {alias: []collalias{{0b00001100, "utf16_unicode_nopad_ci"}}, isdefault: 0b00000000}, - 1147: {alias: []collalias{{0b00001100, "utf16_unicode_520_nopad_ci"}}, isdefault: 0b00000000}, - 1152: {alias: []collalias{{0b00001100, "ucs2_unicode_nopad_ci"}}, isdefault: 0b00000000}, - 1174: {alias: []collalias{{0b00001100, "ucs2_unicode_520_nopad_ci"}}, isdefault: 0b00000000}, - 1184: {alias: []collalias{{0b00001100, "utf32_unicode_nopad_ci"}}, isdefault: 0b00000000}, - 1206: {alias: []collalias{{0b00001100, "utf32_unicode_520_nopad_ci"}}, isdefault: 0b00000000}, - 1216: {alias: []collalias{{0b00001100, "utf8_unicode_nopad_ci"}, {0b00001100, "utf8mb3_unicode_nopad_ci"}}, isdefault: 0b00000000}, - 1238: {alias: []collalias{{0b00001100, "utf8_unicode_520_nopad_ci"}, {0b00001100, "utf8mb3_unicode_520_nopad_ci"}}, isdefault: 0b00000000}, - 1248: {alias: []collalias{{0b00001100, "utf8mb4_unicode_nopad_ci"}}, isdefault: 0b00000000}, - 1270: {alias: []collalias{{0b00001100, "utf8mb4_unicode_520_nopad_ci"}}, isdefault: 0b00000000}, + 1: {alias: []collalias{{0b01111111, "big5_chinese_ci", "big5"}}, isdefault: 0b01111111}, + 2: {alias: []collalias{{0b01111111, "latin2_czech_cs", "latin2"}}, isdefault: 0b00000000}, + 3: {alias: []collalias{{0b01111111, "dec8_swedish_ci", "dec8"}}, isdefault: 0b01111111}, + 4: {alias: []collalias{{0b01111111, "cp850_general_ci", "cp850"}}, isdefault: 0b01111111}, + 5: {alias: []collalias{{0b01111111, "latin1_german1_ci", "latin1"}}, isdefault: 0b00000000}, + 6: {alias: []collalias{{0b01111111, "hp8_english_ci", "hp8"}}, isdefault: 0b01111111}, + 7: {alias: []collalias{{0b01111111, "koi8r_general_ci", "koi8r"}}, isdefault: 0b01111111}, + 8: {alias: []collalias{{0b01111111, "latin1_swedish_ci", "latin1"}}, isdefault: 0b01111111}, + 9: {alias: []collalias{{0b01111111, "latin2_general_ci", "latin2"}}, isdefault: 0b01111111}, + 10: {alias: []collalias{{0b01111111, "swe7_swedish_ci", "swe7"}}, isdefault: 0b01111111}, + 11: {alias: []collalias{{0b01111111, "ascii_general_ci", "ascii"}}, isdefault: 0b01111111}, + 12: {alias: []collalias{{0b01111111, "ujis_japanese_ci", "ujis"}}, isdefault: 0b01111111}, + 13: {alias: []collalias{{0b01111111, "sjis_japanese_ci", "sjis"}}, isdefault: 0b01111111}, + 14: {alias: []collalias{{0b01111111, "cp1251_bulgarian_ci", "cp1251"}}, isdefault: 0b00000000}, + 15: {alias: []collalias{{0b01111111, "latin1_danish_ci", "latin1"}}, isdefault: 0b00000000}, + 16: {alias: []collalias{{0b01111111, "hebrew_general_ci", "hebrew"}}, isdefault: 0b01111111}, + 18: {alias: []collalias{{0b01111111, "tis620_thai_ci", "tis620"}}, isdefault: 0b01111111}, + 19: {alias: []collalias{{0b01111111, "euckr_korean_ci", "euckr"}}, isdefault: 0b01111111}, + 20: {alias: []collalias{{0b01111111, "latin7_estonian_cs", "latin7"}}, isdefault: 0b00000000}, + 21: {alias: []collalias{{0b01111111, "latin2_hungarian_ci", "latin2"}}, isdefault: 0b00000000}, + 22: {alias: []collalias{{0b01111111, "koi8u_general_ci", "koi8u"}}, isdefault: 0b01111111}, + 23: {alias: []collalias{{0b01111111, "cp1251_ukrainian_ci", "cp1251"}}, isdefault: 0b00000000}, + 24: {alias: []collalias{{0b01111111, "gb2312_chinese_ci", "gb2312"}}, isdefault: 0b01111111}, + 25: {alias: []collalias{{0b01111111, "greek_general_ci", "greek"}}, isdefault: 0b01111111}, + 26: {alias: []collalias{{0b01111111, "cp1250_general_ci", "cp1250"}}, isdefault: 0b01111111}, + 27: {alias: []collalias{{0b01111111, "latin2_croatian_ci", "latin2"}}, isdefault: 0b00000000}, + 28: {alias: []collalias{{0b01111111, "gbk_chinese_ci", "gbk"}}, isdefault: 0b01111111}, + 29: {alias: []collalias{{0b01111111, "cp1257_lithuanian_ci", "cp1257"}}, isdefault: 0b00000000}, + 30: {alias: []collalias{{0b01111111, "latin5_turkish_ci", "latin5"}}, isdefault: 0b01111111}, + 31: {alias: []collalias{{0b01111111, "latin1_german2_ci", "latin1"}}, isdefault: 0b00000000}, + 32: {alias: []collalias{{0b01111111, "armscii8_general_ci", "armscii8"}}, isdefault: 0b01111111}, + 33: {alias: []collalias{{0b01111111, "utf8_general_ci", "utf8"}, {0b01111111, "utf8mb3_general_ci", "utf8mb3"}}, isdefault: 0b01111111}, + 34: {alias: []collalias{{0b01111111, "cp1250_czech_cs", "cp1250"}}, isdefault: 0b00000000}, + 35: {alias: []collalias{{0b01111111, "ucs2_general_ci", "ucs2"}}, isdefault: 0b01111111}, + 36: {alias: []collalias{{0b01111111, "cp866_general_ci", "cp866"}}, isdefault: 0b01111111}, + 37: {alias: []collalias{{0b01111111, "keybcs2_general_ci", "keybcs2"}}, isdefault: 0b01111111}, + 38: {alias: []collalias{{0b01111111, "macce_general_ci", "macce"}}, isdefault: 0b01111111}, + 39: {alias: []collalias{{0b01111111, "macroman_general_ci", "macroman"}}, isdefault: 0b01111111}, + 40: {alias: []collalias{{0b01111111, "cp852_general_ci", "cp852"}}, isdefault: 0b01111111}, + 41: {alias: []collalias{{0b01111111, "latin7_general_ci", "latin7"}}, isdefault: 0b01111111}, + 42: {alias: []collalias{{0b01111111, "latin7_general_cs", "latin7"}}, isdefault: 0b00000000}, + 43: {alias: []collalias{{0b01111111, "macce_bin", "macce"}}, isdefault: 0b00000000}, + 44: {alias: []collalias{{0b01111111, "cp1250_croatian_ci", "cp1250"}}, isdefault: 0b00000000}, + 45: {alias: []collalias{{0b01111111, "utf8mb4_general_ci", "utf8mb4"}}, isdefault: 0b00111111}, + 46: {alias: []collalias{{0b01111111, "utf8mb4_bin", "utf8mb4"}}, isdefault: 0b00000000}, + 47: {alias: []collalias{{0b01111111, "latin1_bin", "latin1"}}, isdefault: 0b00000000}, + 48: {alias: []collalias{{0b01111111, "latin1_general_ci", "latin1"}}, isdefault: 0b00000000}, + 49: {alias: []collalias{{0b01111111, "latin1_general_cs", "latin1"}}, isdefault: 0b00000000}, + 50: {alias: []collalias{{0b01111111, "cp1251_bin", "cp1251"}}, isdefault: 0b00000000}, + 51: {alias: []collalias{{0b01111111, "cp1251_general_ci", "cp1251"}}, isdefault: 0b01111111}, + 52: {alias: []collalias{{0b01111111, "cp1251_general_cs", "cp1251"}}, isdefault: 0b00000000}, + 53: {alias: []collalias{{0b01111111, "macroman_bin", "macroman"}}, isdefault: 0b00000000}, + 54: {alias: []collalias{{0b01111111, "utf16_general_ci", "utf16"}}, isdefault: 0b01111111}, + 55: {alias: []collalias{{0b01111111, "utf16_bin", "utf16"}}, isdefault: 0b00000000}, + 56: {alias: []collalias{{0b01111111, "utf16le_general_ci", "utf16le"}}, isdefault: 0b01111111}, + 57: {alias: []collalias{{0b01111111, "cp1256_general_ci", "cp1256"}}, isdefault: 0b01111111}, + 58: {alias: []collalias{{0b01111111, "cp1257_bin", "cp1257"}}, isdefault: 0b00000000}, + 59: {alias: []collalias{{0b01111111, "cp1257_general_ci", "cp1257"}}, isdefault: 0b01111111}, + 60: {alias: []collalias{{0b01111111, "utf32_general_ci", "utf32"}}, isdefault: 0b01111111}, + 61: {alias: []collalias{{0b01111111, "utf32_bin", "utf32"}}, isdefault: 0b00000000}, + 62: {alias: []collalias{{0b01111111, "utf16le_bin", "utf16le"}}, isdefault: 0b00000000}, + 63: {alias: []collalias{{0b01111111, "binary", "binary"}}, isdefault: 0b01111111}, + 64: {alias: []collalias{{0b01111111, "armscii8_bin", "armscii8"}}, isdefault: 0b00000000}, + 65: {alias: []collalias{{0b01111111, "ascii_bin", "ascii"}}, isdefault: 0b00000000}, + 66: {alias: []collalias{{0b01111111, "cp1250_bin", "cp1250"}}, isdefault: 0b00000000}, + 67: {alias: []collalias{{0b01111111, "cp1256_bin", "cp1256"}}, isdefault: 0b00000000}, + 68: {alias: []collalias{{0b01111111, "cp866_bin", "cp866"}}, isdefault: 0b00000000}, + 69: {alias: []collalias{{0b01111111, "dec8_bin", "dec8"}}, isdefault: 0b00000000}, + 70: {alias: []collalias{{0b01111111, "greek_bin", "greek"}}, isdefault: 0b00000000}, + 71: {alias: []collalias{{0b01111111, "hebrew_bin", "hebrew"}}, isdefault: 0b00000000}, + 72: {alias: []collalias{{0b01111111, "hp8_bin", "hp8"}}, isdefault: 0b00000000}, + 73: {alias: []collalias{{0b01111111, "keybcs2_bin", "keybcs2"}}, isdefault: 0b00000000}, + 74: {alias: []collalias{{0b01111111, "koi8r_bin", "koi8r"}}, isdefault: 0b00000000}, + 75: {alias: []collalias{{0b01111111, "koi8u_bin", "koi8u"}}, isdefault: 0b00000000}, + 76: {alias: []collalias{{0b01000000, "utf8_tolower_ci", "utf8"}, {0b01000000, "utf8mb3_tolower_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 77: {alias: []collalias{{0b01111111, "latin2_bin", "latin2"}}, isdefault: 0b00000000}, + 78: {alias: []collalias{{0b01111111, "latin5_bin", "latin5"}}, isdefault: 0b00000000}, + 79: {alias: []collalias{{0b01111111, "latin7_bin", "latin7"}}, isdefault: 0b00000000}, + 80: {alias: []collalias{{0b01111111, "cp850_bin", "cp850"}}, isdefault: 0b00000000}, + 81: {alias: []collalias{{0b01111111, "cp852_bin", "cp852"}}, isdefault: 0b00000000}, + 82: {alias: []collalias{{0b01111111, "swe7_bin", "swe7"}}, isdefault: 0b00000000}, + 83: {alias: []collalias{{0b01111111, "utf8_bin", "utf8"}, {0b01111111, "utf8mb3_bin", "utf8mb3"}}, isdefault: 0b00000000}, + 84: {alias: []collalias{{0b01111111, "big5_bin", "big5"}}, isdefault: 0b00000000}, + 85: {alias: []collalias{{0b01111111, "euckr_bin", "euckr"}}, isdefault: 0b00000000}, + 86: {alias: []collalias{{0b01111111, "gb2312_bin", "gb2312"}}, isdefault: 0b00000000}, + 87: {alias: []collalias{{0b01111111, "gbk_bin", "gbk"}}, isdefault: 0b00000000}, + 88: {alias: []collalias{{0b01111111, "sjis_bin", "sjis"}}, isdefault: 0b00000000}, + 89: {alias: []collalias{{0b01111111, "tis620_bin", "tis620"}}, isdefault: 0b00000000}, + 90: {alias: []collalias{{0b01111111, "ucs2_bin", "ucs2"}}, isdefault: 0b00000000}, + 91: {alias: []collalias{{0b01111111, "ujis_bin", "ujis"}}, isdefault: 0b00000000}, + 92: {alias: []collalias{{0b01111111, "geostd8_general_ci", "geostd8"}}, isdefault: 0b01111111}, + 93: {alias: []collalias{{0b01111111, "geostd8_bin", "geostd8"}}, isdefault: 0b00000000}, + 94: {alias: []collalias{{0b01111111, "latin1_spanish_ci", "latin1"}}, isdefault: 0b00000000}, + 95: {alias: []collalias{{0b01111111, "cp932_japanese_ci", "cp932"}}, isdefault: 0b01111111}, + 96: {alias: []collalias{{0b01111111, "cp932_bin", "cp932"}}, isdefault: 0b00000000}, + 97: {alias: []collalias{{0b01111111, "eucjpms_japanese_ci", "eucjpms"}}, isdefault: 0b01111111}, + 98: {alias: []collalias{{0b01111111, "eucjpms_bin", "eucjpms"}}, isdefault: 0b00000000}, + 99: {alias: []collalias{{0b01111111, "cp1250_polish_ci", "cp1250"}}, isdefault: 0b00000000}, + 101: {alias: []collalias{{0b01111111, "utf16_unicode_ci", "utf16"}}, isdefault: 0b00000000}, + 102: {alias: []collalias{{0b01111111, "utf16_icelandic_ci", "utf16"}}, isdefault: 0b00000000}, + 103: {alias: []collalias{{0b01111111, "utf16_latvian_ci", "utf16"}}, isdefault: 0b00000000}, + 104: {alias: []collalias{{0b01111111, "utf16_romanian_ci", "utf16"}}, isdefault: 0b00000000}, + 105: {alias: []collalias{{0b01111111, "utf16_slovenian_ci", "utf16"}}, isdefault: 0b00000000}, + 106: {alias: []collalias{{0b01111111, "utf16_polish_ci", "utf16"}}, isdefault: 0b00000000}, + 107: {alias: []collalias{{0b01111111, "utf16_estonian_ci", "utf16"}}, isdefault: 0b00000000}, + 108: {alias: []collalias{{0b01111111, "utf16_spanish_ci", "utf16"}}, isdefault: 0b00000000}, + 109: {alias: []collalias{{0b01111111, "utf16_swedish_ci", "utf16"}}, isdefault: 0b00000000}, + 110: {alias: []collalias{{0b01111111, "utf16_turkish_ci", "utf16"}}, isdefault: 0b00000000}, + 111: {alias: []collalias{{0b01111111, "utf16_czech_ci", "utf16"}}, isdefault: 0b00000000}, + 112: {alias: []collalias{{0b01111111, "utf16_danish_ci", "utf16"}}, isdefault: 0b00000000}, + 113: {alias: []collalias{{0b01111111, "utf16_lithuanian_ci", "utf16"}}, isdefault: 0b00000000}, + 114: {alias: []collalias{{0b01111111, "utf16_slovak_ci", "utf16"}}, isdefault: 0b00000000}, + 115: {alias: []collalias{{0b01111111, "utf16_spanish2_ci", "utf16"}}, isdefault: 0b00000000}, + 116: {alias: []collalias{{0b01111111, "utf16_roman_ci", "utf16"}}, isdefault: 0b00000000}, + 117: {alias: []collalias{{0b01111111, "utf16_persian_ci", "utf16"}}, isdefault: 0b00000000}, + 118: {alias: []collalias{{0b01111111, "utf16_esperanto_ci", "utf16"}}, isdefault: 0b00000000}, + 119: {alias: []collalias{{0b01111111, "utf16_hungarian_ci", "utf16"}}, isdefault: 0b00000000}, + 120: {alias: []collalias{{0b01111111, "utf16_sinhala_ci", "utf16"}}, isdefault: 0b00000000}, + 121: {alias: []collalias{{0b01111111, "utf16_german2_ci", "utf16"}}, isdefault: 0b00000000}, + 122: {alias: []collalias{{0b01110000, "utf16_croatian_ci", "utf16"}, {0b00001111, "utf16_croatian_mysql561_ci", "utf16"}}, isdefault: 0b00000000}, + 123: {alias: []collalias{{0b01111111, "utf16_unicode_520_ci", "utf16"}}, isdefault: 0b00000000}, + 124: {alias: []collalias{{0b01111111, "utf16_vietnamese_ci", "utf16"}}, isdefault: 0b00000000}, + 128: {alias: []collalias{{0b01111111, "ucs2_unicode_ci", "ucs2"}}, isdefault: 0b00000000}, + 129: {alias: []collalias{{0b01111111, "ucs2_icelandic_ci", "ucs2"}}, isdefault: 0b00000000}, + 130: {alias: []collalias{{0b01111111, "ucs2_latvian_ci", "ucs2"}}, isdefault: 0b00000000}, + 131: {alias: []collalias{{0b01111111, "ucs2_romanian_ci", "ucs2"}}, isdefault: 0b00000000}, + 132: {alias: []collalias{{0b01111111, "ucs2_slovenian_ci", "ucs2"}}, isdefault: 0b00000000}, + 133: {alias: []collalias{{0b01111111, "ucs2_polish_ci", "ucs2"}}, isdefault: 0b00000000}, + 134: {alias: []collalias{{0b01111111, "ucs2_estonian_ci", "ucs2"}}, isdefault: 0b00000000}, + 135: {alias: []collalias{{0b01111111, "ucs2_spanish_ci", "ucs2"}}, isdefault: 0b00000000}, + 136: {alias: []collalias{{0b01111111, "ucs2_swedish_ci", "ucs2"}}, isdefault: 0b00000000}, + 137: {alias: []collalias{{0b01111111, "ucs2_turkish_ci", "ucs2"}}, isdefault: 0b00000000}, + 138: {alias: []collalias{{0b01111111, "ucs2_czech_ci", "ucs2"}}, isdefault: 0b00000000}, + 139: {alias: []collalias{{0b01111111, "ucs2_danish_ci", "ucs2"}}, isdefault: 0b00000000}, + 140: {alias: []collalias{{0b01111111, "ucs2_lithuanian_ci", "ucs2"}}, isdefault: 0b00000000}, + 141: {alias: []collalias{{0b01111111, "ucs2_slovak_ci", "ucs2"}}, isdefault: 0b00000000}, + 142: {alias: []collalias{{0b01111111, "ucs2_spanish2_ci", "ucs2"}}, isdefault: 0b00000000}, + 143: {alias: []collalias{{0b01111111, "ucs2_roman_ci", "ucs2"}}, isdefault: 0b00000000}, + 144: {alias: []collalias{{0b01111111, "ucs2_persian_ci", "ucs2"}}, isdefault: 0b00000000}, + 145: {alias: []collalias{{0b01111111, "ucs2_esperanto_ci", "ucs2"}}, isdefault: 0b00000000}, + 146: {alias: []collalias{{0b01111111, "ucs2_hungarian_ci", "ucs2"}}, isdefault: 0b00000000}, + 147: {alias: []collalias{{0b01111111, "ucs2_sinhala_ci", "ucs2"}}, isdefault: 0b00000000}, + 148: {alias: []collalias{{0b01111111, "ucs2_german2_ci", "ucs2"}}, isdefault: 0b00000000}, + 149: {alias: []collalias{{0b01110000, "ucs2_croatian_ci", "ucs2"}, {0b00001111, "ucs2_croatian_mysql561_ci", "ucs2"}}, isdefault: 0b00000000}, + 150: {alias: []collalias{{0b01111111, "ucs2_unicode_520_ci", "ucs2"}}, isdefault: 0b00000000}, + 151: {alias: []collalias{{0b01111111, "ucs2_vietnamese_ci", "ucs2"}}, isdefault: 0b00000000}, + 159: {alias: []collalias{{0b01111111, "ucs2_general_mysql500_ci", "ucs2"}}, isdefault: 0b00000000}, + 160: {alias: []collalias{{0b01111111, "utf32_unicode_ci", "utf32"}}, isdefault: 0b00000000}, + 161: {alias: []collalias{{0b01111111, "utf32_icelandic_ci", "utf32"}}, isdefault: 0b00000000}, + 162: {alias: []collalias{{0b01111111, "utf32_latvian_ci", "utf32"}}, isdefault: 0b00000000}, + 163: {alias: []collalias{{0b01111111, "utf32_romanian_ci", "utf32"}}, isdefault: 0b00000000}, + 164: {alias: []collalias{{0b01111111, "utf32_slovenian_ci", "utf32"}}, isdefault: 0b00000000}, + 165: {alias: []collalias{{0b01111111, "utf32_polish_ci", "utf32"}}, isdefault: 0b00000000}, + 166: {alias: []collalias{{0b01111111, "utf32_estonian_ci", "utf32"}}, isdefault: 0b00000000}, + 167: {alias: []collalias{{0b01111111, "utf32_spanish_ci", "utf32"}}, isdefault: 0b00000000}, + 168: {alias: []collalias{{0b01111111, "utf32_swedish_ci", "utf32"}}, isdefault: 0b00000000}, + 169: {alias: []collalias{{0b01111111, "utf32_turkish_ci", "utf32"}}, isdefault: 0b00000000}, + 170: {alias: []collalias{{0b01111111, "utf32_czech_ci", "utf32"}}, isdefault: 0b00000000}, + 171: {alias: []collalias{{0b01111111, "utf32_danish_ci", "utf32"}}, isdefault: 0b00000000}, + 172: {alias: []collalias{{0b01111111, "utf32_lithuanian_ci", "utf32"}}, isdefault: 0b00000000}, + 173: {alias: []collalias{{0b01111111, "utf32_slovak_ci", "utf32"}}, isdefault: 0b00000000}, + 174: {alias: []collalias{{0b01111111, "utf32_spanish2_ci", "utf32"}}, isdefault: 0b00000000}, + 175: {alias: []collalias{{0b01111111, "utf32_roman_ci", "utf32"}}, isdefault: 0b00000000}, + 176: {alias: []collalias{{0b01111111, "utf32_persian_ci", "utf32"}}, isdefault: 0b00000000}, + 177: {alias: []collalias{{0b01111111, "utf32_esperanto_ci", "utf32"}}, isdefault: 0b00000000}, + 178: {alias: []collalias{{0b01111111, "utf32_hungarian_ci", "utf32"}}, isdefault: 0b00000000}, + 179: {alias: []collalias{{0b01111111, "utf32_sinhala_ci", "utf32"}}, isdefault: 0b00000000}, + 180: {alias: []collalias{{0b01111111, "utf32_german2_ci", "utf32"}}, isdefault: 0b00000000}, + 181: {alias: []collalias{{0b01110000, "utf32_croatian_ci", "utf32"}, {0b00001111, "utf32_croatian_mysql561_ci", "utf32"}}, isdefault: 0b00000000}, + 182: {alias: []collalias{{0b01111111, "utf32_unicode_520_ci", "utf32"}}, isdefault: 0b00000000}, + 183: {alias: []collalias{{0b01111111, "utf32_vietnamese_ci", "utf32"}}, isdefault: 0b00000000}, + 192: {alias: []collalias{{0b01111111, "utf8_unicode_ci", "utf8"}, {0b01111111, "utf8mb3_unicode_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 193: {alias: []collalias{{0b01111111, "utf8_icelandic_ci", "utf8"}, {0b01111111, "utf8mb3_icelandic_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 194: {alias: []collalias{{0b01111111, "utf8_latvian_ci", "utf8"}, {0b01111111, "utf8mb3_latvian_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 195: {alias: []collalias{{0b01111111, "utf8_romanian_ci", "utf8"}, {0b01111111, "utf8mb3_romanian_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 196: {alias: []collalias{{0b01111111, "utf8_slovenian_ci", "utf8"}, {0b01111111, "utf8mb3_slovenian_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 197: {alias: []collalias{{0b01111111, "utf8_polish_ci", "utf8"}, {0b01111111, "utf8mb3_polish_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 198: {alias: []collalias{{0b01111111, "utf8_estonian_ci", "utf8"}, {0b01111111, "utf8mb3_estonian_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 199: {alias: []collalias{{0b01111111, "utf8_spanish_ci", "utf8"}, {0b01111111, "utf8mb3_spanish_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 200: {alias: []collalias{{0b01111111, "utf8_swedish_ci", "utf8"}, {0b01111111, "utf8mb3_swedish_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 201: {alias: []collalias{{0b01111111, "utf8_turkish_ci", "utf8"}, {0b01111111, "utf8mb3_turkish_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 202: {alias: []collalias{{0b01111111, "utf8_czech_ci", "utf8"}, {0b01111111, "utf8mb3_czech_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 203: {alias: []collalias{{0b01111111, "utf8_danish_ci", "utf8"}, {0b01111111, "utf8mb3_danish_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 204: {alias: []collalias{{0b01111111, "utf8_lithuanian_ci", "utf8"}, {0b01111111, "utf8mb3_lithuanian_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 205: {alias: []collalias{{0b01111111, "utf8_slovak_ci", "utf8"}, {0b01111111, "utf8mb3_slovak_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 206: {alias: []collalias{{0b01111111, "utf8_spanish2_ci", "utf8"}, {0b01111111, "utf8mb3_spanish2_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 207: {alias: []collalias{{0b01111111, "utf8_roman_ci", "utf8"}, {0b01111111, "utf8mb3_roman_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 208: {alias: []collalias{{0b01111111, "utf8_persian_ci", "utf8"}, {0b01111111, "utf8mb3_persian_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 209: {alias: []collalias{{0b01111111, "utf8_esperanto_ci", "utf8"}, {0b01111111, "utf8mb3_esperanto_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 210: {alias: []collalias{{0b01111111, "utf8_hungarian_ci", "utf8"}, {0b01111111, "utf8mb3_hungarian_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 211: {alias: []collalias{{0b01111111, "utf8_sinhala_ci", "utf8"}, {0b01111111, "utf8mb3_sinhala_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 212: {alias: []collalias{{0b01111111, "utf8_german2_ci", "utf8"}, {0b01111111, "utf8mb3_german2_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 213: {alias: []collalias{{0b01110000, "utf8_croatian_ci", "utf8"}, {0b00001111, "utf8_croatian_mysql561_ci", "utf8"}, {0b01110000, "utf8mb3_croatian_ci", "utf8mb3"}, {0b00001111, "utf8mb3_croatian_mysql561_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 214: {alias: []collalias{{0b01111111, "utf8_unicode_520_ci", "utf8"}, {0b01111111, "utf8mb3_unicode_520_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 215: {alias: []collalias{{0b01111111, "utf8_vietnamese_ci", "utf8"}, {0b01111111, "utf8mb3_vietnamese_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 223: {alias: []collalias{{0b01111111, "utf8_general_mysql500_ci", "utf8"}, {0b01111111, "utf8mb3_general_mysql500_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 224: {alias: []collalias{{0b01111111, "utf8mb4_unicode_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 225: {alias: []collalias{{0b01111111, "utf8mb4_icelandic_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 226: {alias: []collalias{{0b01111111, "utf8mb4_latvian_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 227: {alias: []collalias{{0b01111111, "utf8mb4_romanian_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 228: {alias: []collalias{{0b01111111, "utf8mb4_slovenian_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 229: {alias: []collalias{{0b01111111, "utf8mb4_polish_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 230: {alias: []collalias{{0b01111111, "utf8mb4_estonian_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 231: {alias: []collalias{{0b01111111, "utf8mb4_spanish_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 232: {alias: []collalias{{0b01111111, "utf8mb4_swedish_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 233: {alias: []collalias{{0b01111111, "utf8mb4_turkish_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 234: {alias: []collalias{{0b01111111, "utf8mb4_czech_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 235: {alias: []collalias{{0b01111111, "utf8mb4_danish_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 236: {alias: []collalias{{0b01111111, "utf8mb4_lithuanian_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 237: {alias: []collalias{{0b01111111, "utf8mb4_slovak_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 238: {alias: []collalias{{0b01111111, "utf8mb4_spanish2_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 239: {alias: []collalias{{0b01111111, "utf8mb4_roman_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 240: {alias: []collalias{{0b01111111, "utf8mb4_persian_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 241: {alias: []collalias{{0b01111111, "utf8mb4_esperanto_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 242: {alias: []collalias{{0b01111111, "utf8mb4_hungarian_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 243: {alias: []collalias{{0b01111111, "utf8mb4_sinhala_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 244: {alias: []collalias{{0b01111111, "utf8mb4_german2_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 245: {alias: []collalias{{0b01110000, "utf8mb4_croatian_ci", "utf8mb4"}, {0b00001111, "utf8mb4_croatian_mysql561_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 246: {alias: []collalias{{0b01111111, "utf8mb4_unicode_520_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 247: {alias: []collalias{{0b01111111, "utf8mb4_vietnamese_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 248: {alias: []collalias{{0b01100000, "gb18030_chinese_ci", "gb18030"}}, isdefault: 0b01100000}, + 249: {alias: []collalias{{0b01100000, "gb18030_bin", "gb18030"}}, isdefault: 0b00000000}, + 250: {alias: []collalias{{0b01100000, "gb18030_unicode_520_ci", "gb18030"}}, isdefault: 0b00000000}, + 255: {alias: []collalias{{0b01000000, "utf8mb4_0900_ai_ci", "utf8mb4"}}, isdefault: 0b01000000}, + 256: {alias: []collalias{{0b01000000, "utf8mb4_de_pb_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 257: {alias: []collalias{{0b01000000, "utf8mb4_is_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 258: {alias: []collalias{{0b01000000, "utf8mb4_lv_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 259: {alias: []collalias{{0b01000000, "utf8mb4_ro_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 260: {alias: []collalias{{0b01000000, "utf8mb4_sl_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 261: {alias: []collalias{{0b01000000, "utf8mb4_pl_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 262: {alias: []collalias{{0b01000000, "utf8mb4_et_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 263: {alias: []collalias{{0b01000000, "utf8mb4_es_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 264: {alias: []collalias{{0b01000000, "utf8mb4_sv_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 265: {alias: []collalias{{0b01000000, "utf8mb4_tr_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 266: {alias: []collalias{{0b01000000, "utf8mb4_cs_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 267: {alias: []collalias{{0b01000000, "utf8mb4_da_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 268: {alias: []collalias{{0b01000000, "utf8mb4_lt_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 269: {alias: []collalias{{0b01000000, "utf8mb4_sk_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 270: {alias: []collalias{{0b01000000, "utf8mb4_es_trad_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 271: {alias: []collalias{{0b01000000, "utf8mb4_la_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 273: {alias: []collalias{{0b01000000, "utf8mb4_eo_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 274: {alias: []collalias{{0b01000000, "utf8mb4_hu_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 275: {alias: []collalias{{0b01000000, "utf8mb4_hr_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 277: {alias: []collalias{{0b01000000, "utf8mb4_vi_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 278: {alias: []collalias{{0b01000000, "utf8mb4_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 279: {alias: []collalias{{0b01000000, "utf8mb4_de_pb_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 280: {alias: []collalias{{0b01000000, "utf8mb4_is_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 281: {alias: []collalias{{0b01000000, "utf8mb4_lv_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 282: {alias: []collalias{{0b01000000, "utf8mb4_ro_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 283: {alias: []collalias{{0b01000000, "utf8mb4_sl_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 284: {alias: []collalias{{0b01000000, "utf8mb4_pl_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 285: {alias: []collalias{{0b01000000, "utf8mb4_et_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 286: {alias: []collalias{{0b01000000, "utf8mb4_es_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 287: {alias: []collalias{{0b01000000, "utf8mb4_sv_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 288: {alias: []collalias{{0b01000000, "utf8mb4_tr_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 289: {alias: []collalias{{0b01000000, "utf8mb4_cs_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 290: {alias: []collalias{{0b01000000, "utf8mb4_da_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 291: {alias: []collalias{{0b01000000, "utf8mb4_lt_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 292: {alias: []collalias{{0b01000000, "utf8mb4_sk_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 293: {alias: []collalias{{0b01000000, "utf8mb4_es_trad_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 294: {alias: []collalias{{0b01000000, "utf8mb4_la_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 296: {alias: []collalias{{0b01000000, "utf8mb4_eo_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 297: {alias: []collalias{{0b01000000, "utf8mb4_hu_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 298: {alias: []collalias{{0b01000000, "utf8mb4_hr_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 300: {alias: []collalias{{0b01000000, "utf8mb4_vi_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 303: {alias: []collalias{{0b01000000, "utf8mb4_ja_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 304: {alias: []collalias{{0b01000000, "utf8mb4_ja_0900_as_cs_ks", "utf8mb4"}}, isdefault: 0b00000000}, + 305: {alias: []collalias{{0b01000000, "utf8mb4_0900_as_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 306: {alias: []collalias{{0b01000000, "utf8mb4_ru_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 307: {alias: []collalias{{0b01000000, "utf8mb4_ru_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 308: {alias: []collalias{{0b01000000, "utf8mb4_zh_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 309: {alias: []collalias{{0b01000000, "utf8mb4_0900_bin", "utf8mb4"}}, isdefault: 0b00000000}, + 310: {alias: []collalias{{0b01000000, "utf8mb4_nb_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 311: {alias: []collalias{{0b01000000, "utf8mb4_nb_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 312: {alias: []collalias{{0b01000000, "utf8mb4_nn_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 313: {alias: []collalias{{0b01000000, "utf8mb4_nn_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 314: {alias: []collalias{{0b01000000, "utf8mb4_sr_latn_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 315: {alias: []collalias{{0b01000000, "utf8mb4_sr_latn_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 316: {alias: []collalias{{0b01000000, "utf8mb4_bs_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 317: {alias: []collalias{{0b01000000, "utf8mb4_bs_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 318: {alias: []collalias{{0b01000000, "utf8mb4_bg_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 319: {alias: []collalias{{0b01000000, "utf8mb4_bg_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 320: {alias: []collalias{{0b01000000, "utf8mb4_gl_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 321: {alias: []collalias{{0b01000000, "utf8mb4_gl_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 322: {alias: []collalias{{0b01000000, "utf8mb4_mn_cyrl_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 323: {alias: []collalias{{0b01000000, "utf8mb4_mn_cyrl_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 576: {alias: []collalias{{0b00001111, "utf8_croatian_ci", "utf8"}, {0b00001111, "utf8mb3_croatian_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 577: {alias: []collalias{{0b00001111, "utf8_myanmar_ci", "utf8"}, {0b00001111, "utf8mb3_myanmar_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 578: {alias: []collalias{{0b00001110, "utf8_thai_520_w2", "utf8"}, {0b00001110, "utf8mb3_thai_520_w2", "utf8mb3"}}, isdefault: 0b00000000}, + 608: {alias: []collalias{{0b00001111, "utf8mb4_croatian_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 609: {alias: []collalias{{0b00001111, "utf8mb4_myanmar_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 610: {alias: []collalias{{0b00001110, "utf8mb4_thai_520_w2", "utf8mb4"}}, isdefault: 0b00000000}, + 640: {alias: []collalias{{0b00001111, "ucs2_croatian_ci", "ucs2"}}, isdefault: 0b00000000}, + 641: {alias: []collalias{{0b00001111, "ucs2_myanmar_ci", "ucs2"}}, isdefault: 0b00000000}, + 642: {alias: []collalias{{0b00001110, "ucs2_thai_520_w2", "ucs2"}}, isdefault: 0b00000000}, + 672: {alias: []collalias{{0b00001111, "utf16_croatian_ci", "utf16"}}, isdefault: 0b00000000}, + 673: {alias: []collalias{{0b00001111, "utf16_myanmar_ci", "utf16"}}, isdefault: 0b00000000}, + 674: {alias: []collalias{{0b00001110, "utf16_thai_520_w2", "utf16"}}, isdefault: 0b00000000}, + 736: {alias: []collalias{{0b00001111, "utf32_croatian_ci", "utf32"}}, isdefault: 0b00000000}, + 737: {alias: []collalias{{0b00001111, "utf32_myanmar_ci", "utf32"}}, isdefault: 0b00000000}, + 738: {alias: []collalias{{0b00001110, "utf32_thai_520_w2", "utf32"}}, isdefault: 0b00000000}, + 1025: {alias: []collalias{{0b00001100, "big5_chinese_nopad_ci", "big5"}}, isdefault: 0b00000000}, + 1027: {alias: []collalias{{0b00001100, "dec8_swedish_nopad_ci", "dec8"}}, isdefault: 0b00000000}, + 1028: {alias: []collalias{{0b00001100, "cp850_general_nopad_ci", "cp850"}}, isdefault: 0b00000000}, + 1030: {alias: []collalias{{0b00001100, "hp8_english_nopad_ci", "hp8"}}, isdefault: 0b00000000}, + 1031: {alias: []collalias{{0b00001100, "koi8r_general_nopad_ci", "koi8r"}}, isdefault: 0b00000000}, + 1032: {alias: []collalias{{0b00001100, "latin1_swedish_nopad_ci", "latin1"}}, isdefault: 0b00000000}, + 1033: {alias: []collalias{{0b00001100, "latin2_general_nopad_ci", "latin2"}}, isdefault: 0b00000000}, + 1034: {alias: []collalias{{0b00001100, "swe7_swedish_nopad_ci", "swe7"}}, isdefault: 0b00000000}, + 1035: {alias: []collalias{{0b00001100, "ascii_general_nopad_ci", "ascii"}}, isdefault: 0b00000000}, + 1036: {alias: []collalias{{0b00001100, "ujis_japanese_nopad_ci", "ujis"}}, isdefault: 0b00000000}, + 1037: {alias: []collalias{{0b00001100, "sjis_japanese_nopad_ci", "sjis"}}, isdefault: 0b00000000}, + 1040: {alias: []collalias{{0b00001100, "hebrew_general_nopad_ci", "hebrew"}}, isdefault: 0b00000000}, + 1042: {alias: []collalias{{0b00001100, "tis620_thai_nopad_ci", "tis620"}}, isdefault: 0b00000000}, + 1043: {alias: []collalias{{0b00001100, "euckr_korean_nopad_ci", "euckr"}}, isdefault: 0b00000000}, + 1046: {alias: []collalias{{0b00001100, "koi8u_general_nopad_ci", "koi8u"}}, isdefault: 0b00000000}, + 1048: {alias: []collalias{{0b00001100, "gb2312_chinese_nopad_ci", "gb2312"}}, isdefault: 0b00000000}, + 1049: {alias: []collalias{{0b00001100, "greek_general_nopad_ci", "greek"}}, isdefault: 0b00000000}, + 1050: {alias: []collalias{{0b00001100, "cp1250_general_nopad_ci", "cp1250"}}, isdefault: 0b00000000}, + 1052: {alias: []collalias{{0b00001100, "gbk_chinese_nopad_ci", "gbk"}}, isdefault: 0b00000000}, + 1054: {alias: []collalias{{0b00001100, "latin5_turkish_nopad_ci", "latin5"}}, isdefault: 0b00000000}, + 1056: {alias: []collalias{{0b00001100, "armscii8_general_nopad_ci", "armscii8"}}, isdefault: 0b00000000}, + 1057: {alias: []collalias{{0b00001100, "utf8_general_nopad_ci", "utf8"}, {0b00001100, "utf8mb3_general_nopad_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 1059: {alias: []collalias{{0b00001100, "ucs2_general_nopad_ci", "ucs2"}}, isdefault: 0b00000000}, + 1060: {alias: []collalias{{0b00001100, "cp866_general_nopad_ci", "cp866"}}, isdefault: 0b00000000}, + 1061: {alias: []collalias{{0b00001100, "keybcs2_general_nopad_ci", "keybcs2"}}, isdefault: 0b00000000}, + 1062: {alias: []collalias{{0b00001100, "macce_general_nopad_ci", "macce"}}, isdefault: 0b00000000}, + 1063: {alias: []collalias{{0b00001100, "macroman_general_nopad_ci", "macroman"}}, isdefault: 0b00000000}, + 1064: {alias: []collalias{{0b00001100, "cp852_general_nopad_ci", "cp852"}}, isdefault: 0b00000000}, + 1065: {alias: []collalias{{0b00001100, "latin7_general_nopad_ci", "latin7"}}, isdefault: 0b00000000}, + 1067: {alias: []collalias{{0b00001100, "macce_nopad_bin", "macce"}}, isdefault: 0b00000000}, + 1069: {alias: []collalias{{0b00001100, "utf8mb4_general_nopad_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 1070: {alias: []collalias{{0b00001100, "utf8mb4_nopad_bin", "utf8mb4"}}, isdefault: 0b00000000}, + 1071: {alias: []collalias{{0b00001100, "latin1_nopad_bin", "latin1"}}, isdefault: 0b00000000}, + 1074: {alias: []collalias{{0b00001100, "cp1251_nopad_bin", "cp1251"}}, isdefault: 0b00000000}, + 1075: {alias: []collalias{{0b00001100, "cp1251_general_nopad_ci", "cp1251"}}, isdefault: 0b00000000}, + 1077: {alias: []collalias{{0b00001100, "macroman_nopad_bin", "macroman"}}, isdefault: 0b00000000}, + 1078: {alias: []collalias{{0b00001100, "utf16_general_nopad_ci", "utf16"}}, isdefault: 0b00000000}, + 1079: {alias: []collalias{{0b00001100, "utf16_nopad_bin", "utf16"}}, isdefault: 0b00000000}, + 1080: {alias: []collalias{{0b00001100, "utf16le_general_nopad_ci", "utf16le"}}, isdefault: 0b00000000}, + 1081: {alias: []collalias{{0b00001100, "cp1256_general_nopad_ci", "cp1256"}}, isdefault: 0b00000000}, + 1082: {alias: []collalias{{0b00001100, "cp1257_nopad_bin", "cp1257"}}, isdefault: 0b00000000}, + 1083: {alias: []collalias{{0b00001100, "cp1257_general_nopad_ci", "cp1257"}}, isdefault: 0b00000000}, + 1084: {alias: []collalias{{0b00001100, "utf32_general_nopad_ci", "utf32"}}, isdefault: 0b00000000}, + 1085: {alias: []collalias{{0b00001100, "utf32_nopad_bin", "utf32"}}, isdefault: 0b00000000}, + 1086: {alias: []collalias{{0b00001100, "utf16le_nopad_bin", "utf16le"}}, isdefault: 0b00000000}, + 1088: {alias: []collalias{{0b00001100, "armscii8_nopad_bin", "armscii8"}}, isdefault: 0b00000000}, + 1089: {alias: []collalias{{0b00001100, "ascii_nopad_bin", "ascii"}}, isdefault: 0b00000000}, + 1090: {alias: []collalias{{0b00001100, "cp1250_nopad_bin", "cp1250"}}, isdefault: 0b00000000}, + 1091: {alias: []collalias{{0b00001100, "cp1256_nopad_bin", "cp1256"}}, isdefault: 0b00000000}, + 1092: {alias: []collalias{{0b00001100, "cp866_nopad_bin", "cp866"}}, isdefault: 0b00000000}, + 1093: {alias: []collalias{{0b00001100, "dec8_nopad_bin", "dec8"}}, isdefault: 0b00000000}, + 1094: {alias: []collalias{{0b00001100, "greek_nopad_bin", "greek"}}, isdefault: 0b00000000}, + 1095: {alias: []collalias{{0b00001100, "hebrew_nopad_bin", "hebrew"}}, isdefault: 0b00000000}, + 1096: {alias: []collalias{{0b00001100, "hp8_nopad_bin", "hp8"}}, isdefault: 0b00000000}, + 1097: {alias: []collalias{{0b00001100, "keybcs2_nopad_bin", "keybcs2"}}, isdefault: 0b00000000}, + 1098: {alias: []collalias{{0b00001100, "koi8r_nopad_bin", "koi8r"}}, isdefault: 0b00000000}, + 1099: {alias: []collalias{{0b00001100, "koi8u_nopad_bin", "koi8u"}}, isdefault: 0b00000000}, + 1101: {alias: []collalias{{0b00001100, "latin2_nopad_bin", "latin2"}}, isdefault: 0b00000000}, + 1102: {alias: []collalias{{0b00001100, "latin5_nopad_bin", "latin5"}}, isdefault: 0b00000000}, + 1103: {alias: []collalias{{0b00001100, "latin7_nopad_bin", "latin7"}}, isdefault: 0b00000000}, + 1104: {alias: []collalias{{0b00001100, "cp850_nopad_bin", "cp850"}}, isdefault: 0b00000000}, + 1105: {alias: []collalias{{0b00001100, "cp852_nopad_bin", "cp852"}}, isdefault: 0b00000000}, + 1106: {alias: []collalias{{0b00001100, "swe7_nopad_bin", "swe7"}}, isdefault: 0b00000000}, + 1107: {alias: []collalias{{0b00001100, "utf8_nopad_bin", "utf8"}, {0b00001100, "utf8mb3_nopad_bin", "utf8mb3"}}, isdefault: 0b00000000}, + 1108: {alias: []collalias{{0b00001100, "big5_nopad_bin", "big5"}}, isdefault: 0b00000000}, + 1109: {alias: []collalias{{0b00001100, "euckr_nopad_bin", "euckr"}}, isdefault: 0b00000000}, + 1110: {alias: []collalias{{0b00001100, "gb2312_nopad_bin", "gb2312"}}, isdefault: 0b00000000}, + 1111: {alias: []collalias{{0b00001100, "gbk_nopad_bin", "gbk"}}, isdefault: 0b00000000}, + 1112: {alias: []collalias{{0b00001100, "sjis_nopad_bin", "sjis"}}, isdefault: 0b00000000}, + 1113: {alias: []collalias{{0b00001100, "tis620_nopad_bin", "tis620"}}, isdefault: 0b00000000}, + 1114: {alias: []collalias{{0b00001100, "ucs2_nopad_bin", "ucs2"}}, isdefault: 0b00000000}, + 1115: {alias: []collalias{{0b00001100, "ujis_nopad_bin", "ujis"}}, isdefault: 0b00000000}, + 1116: {alias: []collalias{{0b00001100, "geostd8_general_nopad_ci", "geostd8"}}, isdefault: 0b00000000}, + 1117: {alias: []collalias{{0b00001100, "geostd8_nopad_bin", "geostd8"}}, isdefault: 0b00000000}, + 1119: {alias: []collalias{{0b00001100, "cp932_japanese_nopad_ci", "cp932"}}, isdefault: 0b00000000}, + 1120: {alias: []collalias{{0b00001100, "cp932_nopad_bin", "cp932"}}, isdefault: 0b00000000}, + 1121: {alias: []collalias{{0b00001100, "eucjpms_japanese_nopad_ci", "eucjpms"}}, isdefault: 0b00000000}, + 1122: {alias: []collalias{{0b00001100, "eucjpms_nopad_bin", "eucjpms"}}, isdefault: 0b00000000}, + 1125: {alias: []collalias{{0b00001100, "utf16_unicode_nopad_ci", "utf16"}}, isdefault: 0b00000000}, + 1147: {alias: []collalias{{0b00001100, "utf16_unicode_520_nopad_ci", "utf16"}}, isdefault: 0b00000000}, + 1152: {alias: []collalias{{0b00001100, "ucs2_unicode_nopad_ci", "ucs2"}}, isdefault: 0b00000000}, + 1174: {alias: []collalias{{0b00001100, "ucs2_unicode_520_nopad_ci", "ucs2"}}, isdefault: 0b00000000}, + 1184: {alias: []collalias{{0b00001100, "utf32_unicode_nopad_ci", "utf32"}}, isdefault: 0b00000000}, + 1206: {alias: []collalias{{0b00001100, "utf32_unicode_520_nopad_ci", "utf32"}}, isdefault: 0b00000000}, + 1216: {alias: []collalias{{0b00001100, "utf8_unicode_nopad_ci", "utf8"}, {0b00001100, "utf8mb3_unicode_nopad_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 1238: {alias: []collalias{{0b00001100, "utf8_unicode_520_nopad_ci", "utf8"}, {0b00001100, "utf8mb3_unicode_520_nopad_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 1248: {alias: []collalias{{0b00001100, "utf8mb4_unicode_nopad_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 1270: {alias: []collalias{{0b00001100, "utf8mb4_unicode_520_nopad_ci", "utf8mb4"}}, isdefault: 0b00000000}, } diff --git a/go/mysql/collations/remote/collation.go b/go/mysql/collations/remote/collation.go index 1e81c429794..dcc2acfee61 100644 --- a/go/mysql/collations/remote/collation.go +++ b/go/mysql/collations/remote/collation.go @@ -28,6 +28,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/charset" + "vitess.io/vitess/go/mysql/collations/colldata" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/vthash" ) @@ -53,22 +54,22 @@ type Collation struct { err error } -var _ collations.Collation = (*Collation)(nil) +var _ colldata.Collation = (*Collation)(nil) func makeRemoteCollation(conn *mysql.Conn, collid collations.ID, collname string) *Collation { - charset := collname + cs := collname if idx := strings.IndexByte(collname, '_'); idx >= 0 { - charset = collname[:idx] + cs = collname[:idx] } coll := &Collation{ name: collname, id: collid, conn: conn, - charset: charset, + charset: cs, } - coll.prefix = fmt.Sprintf("_%s X'", charset) + coll.prefix = fmt.Sprintf("_%s X'", cs) coll.suffix = fmt.Sprintf("' COLLATE %q", collname) coll.hex = hex.NewEncoder(&coll.sql) return coll @@ -204,7 +205,7 @@ func (rp *remotePattern) Match(in []byte) bool { return match } -func (c *Collation) Wildcard(pat []byte, _ rune, _ rune, escape rune) collations.WildcardPattern { +func (c *Collation) Wildcard(pat []byte, _ rune, _ rune, escape rune) colldata.WildcardPattern { return &remotePattern{ pattern: fmt.Sprintf("_%s X'%x'", c.charset, pat), remote: c, diff --git a/go/mysql/collations/supported.go b/go/mysql/collations/supported.go new file mode 100644 index 00000000000..4404af2d4fb --- /dev/null +++ b/go/mysql/collations/supported.go @@ -0,0 +1,294 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by makecolldata DO NOT EDIT + +package collations + +var supported = [...]string{ + 0x3: "dec8_swedish_ci", + 0x4: "cp850_general_ci", + 0x5: "latin1_german1_ci", + 0x6: "hp8_english_ci", + 0x7: "koi8r_general_ci", + 0x8: "latin1_swedish_ci", + 0x9: "latin2_general_ci", + 0xa: "swe7_swedish_ci", + 0xb: "ascii_general_ci", + 0xc: "ujis_japanese_ci", + 0xd: "sjis_japanese_ci", + 0xe: "cp1251_bulgarian_ci", + 0xf: "latin1_danish_ci", + 0x10: "hebrew_general_ci", + 0x13: "euckr_korean_ci", + 0x14: "latin7_estonian_cs", + 0x15: "latin2_hungarian_ci", + 0x16: "koi8u_general_ci", + 0x17: "cp1251_ukrainian_ci", + 0x18: "gb2312_chinese_ci", + 0x19: "greek_general_ci", + 0x1a: "cp1250_general_ci", + 0x1b: "latin2_croatian_ci", + 0x1d: "cp1257_lithuanian_ci", + 0x1e: "latin5_turkish_ci", + 0x20: "armscii8_general_ci", + 0x21: "utf8mb3_general_ci", + 0x23: "ucs2_general_ci", + 0x24: "cp866_general_ci", + 0x25: "keybcs2_general_ci", + 0x26: "macce_general_ci", + 0x27: "macroman_general_ci", + 0x28: "cp852_general_ci", + 0x29: "latin7_general_ci", + 0x2a: "latin7_general_cs", + 0x2b: "macce_bin", + 0x2c: "cp1250_croatian_ci", + 0x2d: "utf8mb4_general_ci", + 0x2e: "utf8mb4_bin", + 0x2f: "latin1_bin", + 0x30: "latin1_general_ci", + 0x31: "latin1_general_cs", + 0x32: "cp1251_bin", + 0x33: "cp1251_general_ci", + 0x34: "cp1251_general_cs", + 0x35: "macroman_bin", + 0x36: "utf16_general_ci", + 0x37: "utf16_bin", + 0x38: "utf16le_general_ci", + 0x39: "cp1256_general_ci", + 0x3a: "cp1257_bin", + 0x3b: "cp1257_general_ci", + 0x3c: "utf32_general_ci", + 0x3d: "utf32_bin", + 0x3e: "utf16le_bin", + 0x3f: "binary", + 0x40: "armscii8_bin", + 0x41: "ascii_bin", + 0x42: "cp1250_bin", + 0x43: "cp1256_bin", + 0x44: "cp866_bin", + 0x45: "dec8_bin", + 0x46: "greek_bin", + 0x47: "hebrew_bin", + 0x48: "hp8_bin", + 0x49: "keybcs2_bin", + 0x4a: "koi8r_bin", + 0x4b: "koi8u_bin", + 0x4d: "latin2_bin", + 0x4e: "latin5_bin", + 0x4f: "latin7_bin", + 0x50: "cp850_bin", + 0x51: "cp852_bin", + 0x52: "swe7_bin", + 0x53: "utf8mb3_bin", + 0x55: "euckr_bin", + 0x56: "gb2312_bin", + 0x58: "sjis_bin", + 0x5a: "ucs2_bin", + 0x5b: "ujis_bin", + 0x5c: "geostd8_general_ci", + 0x5d: "geostd8_bin", + 0x5e: "latin1_spanish_ci", + 0x5f: "cp932_japanese_ci", + 0x60: "cp932_bin", + 0x61: "eucjpms_japanese_ci", + 0x62: "eucjpms_bin", + 0x63: "cp1250_polish_ci", + 0x65: "utf16_unicode_ci", + 0x66: "utf16_icelandic_ci", + 0x67: "utf16_latvian_ci", + 0x68: "utf16_romanian_ci", + 0x69: "utf16_slovenian_ci", + 0x6a: "utf16_polish_ci", + 0x6b: "utf16_estonian_ci", + 0x6c: "utf16_spanish_ci", + 0x6d: "utf16_swedish_ci", + 0x6e: "utf16_turkish_ci", + 0x6f: "utf16_czech_ci", + 0x70: "utf16_danish_ci", + 0x71: "utf16_lithuanian_ci", + 0x72: "utf16_slovak_ci", + 0x73: "utf16_spanish2_ci", + 0x74: "utf16_roman_ci", + 0x75: "utf16_persian_ci", + 0x76: "utf16_esperanto_ci", + 0x77: "utf16_hungarian_ci", + 0x78: "utf16_sinhala_ci", + 0x79: "utf16_german2_ci", + 0x7a: "utf16_croatian_ci", + 0x7b: "utf16_unicode_520_ci", + 0x7c: "utf16_vietnamese_ci", + 0x80: "ucs2_unicode_ci", + 0x81: "ucs2_icelandic_ci", + 0x82: "ucs2_latvian_ci", + 0x83: "ucs2_romanian_ci", + 0x84: "ucs2_slovenian_ci", + 0x85: "ucs2_polish_ci", + 0x86: "ucs2_estonian_ci", + 0x87: "ucs2_spanish_ci", + 0x88: "ucs2_swedish_ci", + 0x89: "ucs2_turkish_ci", + 0x8a: "ucs2_czech_ci", + 0x8b: "ucs2_danish_ci", + 0x8c: "ucs2_lithuanian_ci", + 0x8d: "ucs2_slovak_ci", + 0x8e: "ucs2_spanish2_ci", + 0x8f: "ucs2_roman_ci", + 0x90: "ucs2_persian_ci", + 0x91: "ucs2_esperanto_ci", + 0x92: "ucs2_hungarian_ci", + 0x93: "ucs2_sinhala_ci", + 0x94: "ucs2_german2_ci", + 0x95: "ucs2_croatian_ci", + 0x96: "ucs2_unicode_520_ci", + 0x97: "ucs2_vietnamese_ci", + 0xa0: "utf32_unicode_ci", + 0xa1: "utf32_icelandic_ci", + 0xa2: "utf32_latvian_ci", + 0xa3: "utf32_romanian_ci", + 0xa4: "utf32_slovenian_ci", + 0xa5: "utf32_polish_ci", + 0xa6: "utf32_estonian_ci", + 0xa7: "utf32_spanish_ci", + 0xa8: "utf32_swedish_ci", + 0xa9: "utf32_turkish_ci", + 0xaa: "utf32_czech_ci", + 0xab: "utf32_danish_ci", + 0xac: "utf32_lithuanian_ci", + 0xad: "utf32_slovak_ci", + 0xae: "utf32_spanish2_ci", + 0xaf: "utf32_roman_ci", + 0xb0: "utf32_persian_ci", + 0xb1: "utf32_esperanto_ci", + 0xb2: "utf32_hungarian_ci", + 0xb3: "utf32_sinhala_ci", + 0xb4: "utf32_german2_ci", + 0xb5: "utf32_croatian_ci", + 0xb6: "utf32_unicode_520_ci", + 0xb7: "utf32_vietnamese_ci", + 0xc0: "utf8mb3_unicode_ci", + 0xc1: "utf8mb3_icelandic_ci", + 0xc2: "utf8mb3_latvian_ci", + 0xc3: "utf8mb3_romanian_ci", + 0xc4: "utf8mb3_slovenian_ci", + 0xc5: "utf8mb3_polish_ci", + 0xc6: "utf8mb3_estonian_ci", + 0xc7: "utf8mb3_spanish_ci", + 0xc8: "utf8mb3_swedish_ci", + 0xc9: "utf8mb3_turkish_ci", + 0xca: "utf8mb3_czech_ci", + 0xcb: "utf8mb3_danish_ci", + 0xcc: "utf8mb3_lithuanian_ci", + 0xcd: "utf8mb3_slovak_ci", + 0xce: "utf8mb3_spanish2_ci", + 0xcf: "utf8mb3_roman_ci", + 0xd0: "utf8mb3_persian_ci", + 0xd1: "utf8mb3_esperanto_ci", + 0xd2: "utf8mb3_hungarian_ci", + 0xd3: "utf8mb3_sinhala_ci", + 0xd4: "utf8mb3_german2_ci", + 0xd5: "utf8mb3_croatian_ci", + 0xd6: "utf8mb3_unicode_520_ci", + 0xd7: "utf8mb3_vietnamese_ci", + 0xe0: "utf8mb4_unicode_ci", + 0xe1: "utf8mb4_icelandic_ci", + 0xe2: "utf8mb4_latvian_ci", + 0xe3: "utf8mb4_romanian_ci", + 0xe4: "utf8mb4_slovenian_ci", + 0xe5: "utf8mb4_polish_ci", + 0xe6: "utf8mb4_estonian_ci", + 0xe7: "utf8mb4_spanish_ci", + 0xe8: "utf8mb4_swedish_ci", + 0xe9: "utf8mb4_turkish_ci", + 0xea: "utf8mb4_czech_ci", + 0xeb: "utf8mb4_danish_ci", + 0xec: "utf8mb4_lithuanian_ci", + 0xed: "utf8mb4_slovak_ci", + 0xee: "utf8mb4_spanish2_ci", + 0xef: "utf8mb4_roman_ci", + 0xf0: "utf8mb4_persian_ci", + 0xf1: "utf8mb4_esperanto_ci", + 0xf2: "utf8mb4_hungarian_ci", + 0xf3: "utf8mb4_sinhala_ci", + 0xf4: "utf8mb4_german2_ci", + 0xf5: "utf8mb4_croatian_ci", + 0xf6: "utf8mb4_unicode_520_ci", + 0xf7: "utf8mb4_vietnamese_ci", + 0xfa: "gb18030_unicode_520_ci", + 0xff: "utf8mb4_0900_ai_ci", + 0x100: "utf8mb4_de_pb_0900_ai_ci", + 0x101: "utf8mb4_is_0900_ai_ci", + 0x102: "utf8mb4_lv_0900_ai_ci", + 0x103: "utf8mb4_ro_0900_ai_ci", + 0x104: "utf8mb4_sl_0900_ai_ci", + 0x105: "utf8mb4_pl_0900_ai_ci", + 0x106: "utf8mb4_et_0900_ai_ci", + 0x107: "utf8mb4_es_0900_ai_ci", + 0x108: "utf8mb4_sv_0900_ai_ci", + 0x109: "utf8mb4_tr_0900_ai_ci", + 0x10a: "utf8mb4_cs_0900_ai_ci", + 0x10b: "utf8mb4_da_0900_ai_ci", + 0x10c: "utf8mb4_lt_0900_ai_ci", + 0x10d: "utf8mb4_sk_0900_ai_ci", + 0x10e: "utf8mb4_es_trad_0900_ai_ci", + 0x10f: "utf8mb4_la_0900_ai_ci", + 0x111: "utf8mb4_eo_0900_ai_ci", + 0x112: "utf8mb4_hu_0900_ai_ci", + 0x113: "utf8mb4_hr_0900_ai_ci", + 0x115: "utf8mb4_vi_0900_ai_ci", + 0x116: "utf8mb4_0900_as_cs", + 0x117: "utf8mb4_de_pb_0900_as_cs", + 0x118: "utf8mb4_is_0900_as_cs", + 0x119: "utf8mb4_lv_0900_as_cs", + 0x11a: "utf8mb4_ro_0900_as_cs", + 0x11b: "utf8mb4_sl_0900_as_cs", + 0x11c: "utf8mb4_pl_0900_as_cs", + 0x11d: "utf8mb4_et_0900_as_cs", + 0x11e: "utf8mb4_es_0900_as_cs", + 0x11f: "utf8mb4_sv_0900_as_cs", + 0x120: "utf8mb4_tr_0900_as_cs", + 0x121: "utf8mb4_cs_0900_as_cs", + 0x122: "utf8mb4_da_0900_as_cs", + 0x123: "utf8mb4_lt_0900_as_cs", + 0x124: "utf8mb4_sk_0900_as_cs", + 0x125: "utf8mb4_es_trad_0900_as_cs", + 0x126: "utf8mb4_la_0900_as_cs", + 0x128: "utf8mb4_eo_0900_as_cs", + 0x129: "utf8mb4_hu_0900_as_cs", + 0x12a: "utf8mb4_hr_0900_as_cs", + 0x12c: "utf8mb4_vi_0900_as_cs", + 0x12f: "utf8mb4_ja_0900_as_cs", + 0x130: "utf8mb4_ja_0900_as_cs_ks", + 0x131: "utf8mb4_0900_as_ci", + 0x132: "utf8mb4_ru_0900_ai_ci", + 0x133: "utf8mb4_ru_0900_as_cs", + 0x134: "utf8mb4_zh_0900_as_cs", + 0x135: "utf8mb4_0900_bin", + 0x136: "utf8mb4_nb_0900_ai_ci", + 0x137: "utf8mb4_nb_0900_as_cs", + 0x138: "utf8mb4_nn_0900_ai_ci", + 0x139: "utf8mb4_nn_0900_as_cs", + 0x13a: "utf8mb4_sr_latn_0900_ai_ci", + 0x13b: "utf8mb4_sr_latn_0900_as_cs", + 0x13c: "utf8mb4_bs_0900_ai_ci", + 0x13d: "utf8mb4_bs_0900_as_cs", + 0x13e: "utf8mb4_bg_0900_ai_ci", + 0x13f: "utf8mb4_bg_0900_as_cs", + 0x140: "utf8mb4_gl_0900_ai_ci", + 0x141: "utf8mb4_gl_0900_as_cs", + 0x142: "utf8mb4_mn_cyrl_0900_ai_ci", + 0x143: "utf8mb4_mn_cyrl_0900_as_cs", +} diff --git a/go/mysql/collations/testdata/versions/collations_MySQL80.csv b/go/mysql/collations/testdata/versions/collations_MySQL8.csv similarity index 100% rename from go/mysql/collations/testdata/versions/collations_MySQL80.csv rename to go/mysql/collations/testdata/versions/collations_MySQL8.csv diff --git a/go/mysql/collations/tools/colldump/Dockerfile b/go/mysql/collations/tools/colldump/Dockerfile new file mode 100644 index 00000000000..3e5acf4d9a6 --- /dev/null +++ b/go/mysql/collations/tools/colldump/Dockerfile @@ -0,0 +1,20 @@ +FROM debian:latest + +ARG MYSQL_VERSION=8.0.34 + +RUN apt-get update && apt-get -y install curl cmake build-essential libssl-dev libncurses5-dev pkg-config rapidjson-dev + +RUN cd /tmp && \ + curl -OL https://dev.mysql.com/get/Downloads/MySQL-8.0/mysql-${MYSQL_VERSION}.tar.gz && \ + tar zxvf mysql-${MYSQL_VERSION}.tar.gz + +ADD colldump.cc /tmp/mysql-${MYSQL_VERSION}/strings/colldump.cc +RUN echo "MYSQL_ADD_EXECUTABLE(colldump colldump.cc SKIP_INSTALL)\nTARGET_LINK_LIBRARIES(colldump strings)\n" >> /tmp/mysql-${MYSQL_VERSION}/strings/CMakeLists.txt + +RUN cd /tmp/mysql-${MYSQL_VERSION} && \ + mkdir build && \ + cd build && \ + cmake -DDOWNLOAD_BOOST=1 -DWITH_BOOST=dist/boost .. && \ + make colldump + +RUN mkdir /mysql-collations && /tmp/mysql-${MYSQL_VERSION}/build/runtime_output_directory/colldump /mysql-collations diff --git a/go/mysql/collations/tools/colldump/colldump.cc b/go/mysql/collations/tools/colldump/colldump.cc new file mode 100644 index 00000000000..7668ae1dc70 --- /dev/null +++ b/go/mysql/collations/tools/colldump/colldump.cc @@ -0,0 +1,418 @@ +/* Copyright (c) 2023, The Vitess Authors + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License, version 2.0, + as published by the Free Software Foundation. + + This program is also distributed with certain software (including + but not limited to OpenSSL) that is licensed under separate terms, + as designated in a particular file or component or in included license + documentation. The authors of MySQL hereby grant you an additional + permission to link the program and your derivative works with the + separately licensed software that they have included with MySQL. + + Without limiting anything contained in the foregoing, this file, + which is part of C Driver for MySQL (Connector/C), is also subject to the + Universal FOSS Exception, version 1.0, a copy of which can be found at + http://oss.oracle.com/licenses/universal-foss-exception. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License, version 2.0, for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ + +#include +#include +#include +#include +#include +#include + +#include "m_ctype.h" + +#ifdef HAVE_UNISTD_H +#include +#endif + +#include "my_sys.h" +#include "my_config.h" +#include "my_compiler.h" +#include "my_inttypes.h" +#include "my_io.h" +#include "my_loglevel.h" +#include "my_macros.h" +#include "str_uca_type.h" + +#include "rapidjson/rapidjson.h" +#include "rapidjson/filewritestream.h" +#include "rapidjson/writer.h" + +template +static void print_contractions_1(J &json, my_wc_t *path, size_t depth, bool contextual, const MY_CONTRACTION &contraction) +{ + path[depth] = contraction.ch; + + if (contraction.is_contraction_tail) + { + json.StartObject(); + + json.Key("Path"); + json.StartArray(); + for (size_t i = 0; i <= depth; i++) + { + json.Uint((unsigned int)path[i]); + } + json.EndArray(); + + json.Key("Weights"); + json.StartArray(); + for (size_t i = 0; i < MY_UCA_MAX_WEIGHT_SIZE; i++) + { + json.Uint(contraction.weight[i]); + } + json.EndArray(); + + if (contextual) + { + json.Key("Contextual"); + json.Bool(true); + } + + json.EndObject(); + } + + for (const MY_CONTRACTION &ctr : contraction.child_nodes) + { + print_contractions_1(json, path, depth + 1, false, ctr); + } + for (const MY_CONTRACTION &ctr : contraction.child_nodes_context) + { + print_contractions_1(json, path, depth + 1, true, ctr); + } +} + +template +static void print_contractions(J &json, std::vector *contractions) +{ + my_wc_t path[256]; + json.StartArray(); + for (const MY_CONTRACTION &ctr : *contractions) + { + print_contractions_1(json, path, 0, false, ctr); + } + json.EndArray(); +} + +template +static void print_reorder_params(J &json, struct Reorder_param *reorder) +{ + json.StartArray(); + for (int i = 0; i < reorder->wt_rec_num; i++) + { + struct Reorder_wt_rec &r = reorder->wt_rec[i]; + json.StartArray(); + json.Uint(r.old_wt_bdy.begin); + json.Uint(r.old_wt_bdy.end); + json.Uint(r.new_wt_bdy.begin); + json.Uint(r.new_wt_bdy.end); + json.EndArray(); + } + json.EndArray(); +} + +template +static void print_unipages(J &json, const MY_UNI_IDX *unicodeidx) +{ + json.StartArray(); + for (const MY_UNI_IDX *idx = unicodeidx; idx->tab != NULL; idx++) + { + json.StartObject(); + json.Key("From"); + json.Uint(idx->from); + json.Key("To"); + json.Uint(idx->to); + json.Key("Tab"); + json.StartArray(); + const size_t entries = idx->to - idx->from; + for (size_t i = 0; i <= entries; i++) + { + json.Uint(idx->tab[i]); + } + json.EndArray(); + json.EndObject(); + } + json.EndArray(); +} + +template +static void print_uca_weights_900(J &json, int codepoint, uint16 **weights) +{ + uint16 *page = weights[codepoint >> 8]; + if (page == NULL) + return; + + int offset = codepoint & 0xFF; + int cecount = page[offset]; + char key[32]; + snprintf(key, sizeof(key), "U+%04X", codepoint); + + json.Key(key); + json.StartArray(); + for (int ce = 0; ce < cecount; ce++) + { + json.Uint(page[256 + (ce * 3 + 0) * 256 + offset]); + json.Uint(page[256 + (ce * 3 + 1) * 256 + offset]); + json.Uint(page[256 + (ce * 3 + 2) * 256 + offset]); + } + json.EndArray(); +} + +template +static void print_uca_weights_legacy(J &json, int codepoint, uint16 **weights, uchar *lengths) +{ + uint16 *page = weights[codepoint >> 8]; + if (page == NULL) + return; + + int offset = codepoint & 0xFF; + uint16 *w = page + offset * lengths[codepoint >> 8]; + if (!w[0]) + return; + + char key[32]; + snprintf(key, sizeof(key), "U+%04X", codepoint); + + json.Key(key); + json.StartArray(); + for (; w[0]; w++) + { + json.Uint(w[0]); + } + json.EndArray(); +} + +template +static void print_array_uchar(J &json, const uchar *arr, size_t len) +{ + json.StartArray(); + for (size_t i = 0; i < len; ++i) + { + json.Uint(arr[i]); + } + json.EndArray(); +} + +template +static void print_array_uint16(J &json, const uint16 *arr, size_t len) +{ + json.StartArray(); + for (size_t i = 0; i < len; ++i) + { + json.Uint(arr[i]); + } + json.EndArray(); +} + +static CHARSET_INFO *init_collation(const char *name) +{ + MY_CHARSET_LOADER loader; + return my_collation_get_by_name(&loader, name, MYF(0)); +} + +#define MY_UCA_MAXCHAR (0x10FFFF + 1) +#define MY_UCA_CHARS_PER_PAGE 256 + +extern MY_COLLATION_HANDLER my_collation_uca_900_handler; +extern MY_COLLATION_HANDLER my_collation_any_uca_handler; +extern MY_COLLATION_HANDLER my_collation_utf16_uca_handler; +extern MY_COLLATION_HANDLER my_collation_utf32_uca_handler; +extern MY_COLLATION_HANDLER my_collation_ucs2_uca_handler; + +struct KNOWN_HANDLER +{ + const char *name; + const MY_COLLATION_HANDLER *h; +}; + +static KNOWN_HANDLER known_handlers[] = { + {"8bit_bin", &my_collation_8bit_bin_handler}, + {"8bit_simple_ci", &my_collation_8bit_simple_ci_handler}, + {"any_uca", &my_collation_any_uca_handler}, + {"uca_900", &my_collation_uca_900_handler}, + {"utf16_uca", &my_collation_utf16_uca_handler}, + {"utf32_uca", &my_collation_utf32_uca_handler}, + {"ucs2_uca", &my_collation_ucs2_uca_handler}, +}; + +static int dumpall(const char *dumppath) +{ + char pathbuf[4096]; + char jsonbuf[4096 * 4]; + + // bootstrap the `all_charsets` collation array + init_collation("utf8mb4_0900_ai_ci"); + + for (const CHARSET_INFO *charset : all_charsets) + { + if (!charset || (charset->state & MY_CS_AVAILABLE) == 0) + continue; + + charset = init_collation(charset->m_coll_name); + snprintf(pathbuf, sizeof(pathbuf), "%s/%s.json", dumppath, charset->m_coll_name); + + FILE *jsonfile = fopen(pathbuf, "w"); + if (jsonfile == NULL) + { + fprintf(stderr, "failed to create '%s'\n", pathbuf); + return 1; + } + + rapidjson::FileWriteStream os(jsonfile, jsonbuf, sizeof(jsonbuf)); + rapidjson::Writer, rapidjson::ASCII<>> json(os); + + json.StartObject(); + json.Key("Name"); + json.String(charset->m_coll_name); + json.Key("Charset"); + json.String(charset->csname); + json.Key("Number"); + json.Uint(charset->number); + + json.Key("Flags"); + json.StartObject(); + + json.Key("Binary"); + json.Bool((charset->state & MY_CS_BINSORT) != 0); + json.Key("ASCII"); + json.Bool((charset->state & MY_CS_PUREASCII) != 0); + json.Key("Default"); + json.Bool((charset->state & MY_CS_PRIMARY) != 0); + + json.EndObject(); + + for (const KNOWN_HANDLER &handler : known_handlers) + { + if (charset->coll == handler.h) + { + json.Key("CollationImpl"); + json.String(handler.name); + break; + } + } + + if (charset->ctype != NULL) + { + json.Key("CType"); + print_array_uchar(json, charset->ctype, 256); + } + + if (charset->to_lower != NULL) + { + json.Key("ToLower"); + print_array_uchar(json, charset->to_lower, 256); + } + + if (charset->to_upper != NULL) + { + json.Key("ToUpper"); + print_array_uchar(json, charset->to_upper, 256); + } + + if (charset->tab_to_uni != NULL) + { + json.Key("TabToUni"); + print_array_uint16(json, charset->tab_to_uni, 256); + } + + if (charset->tab_from_uni != NULL) + { + json.Key("TabFromUni"); + print_unipages(json, charset->tab_from_uni); + } + + if (charset->sort_order != NULL) + { + json.Key("SortOrder"); + print_array_uchar(json, charset->sort_order, 256); + } + + if (charset->uca != NULL) + { + MY_UCA_INFO *uca = charset->uca; + + json.Key("UCAVersion"); + + switch (uca->version) + { + case UCA_V400: + json.Uint(400); + break; + case UCA_V520: + json.Uint(520); + break; + case UCA_V900: + json.Uint(900); + break; + default: + json.Uint(0); + break; + } + + json.Key("Weights"); + json.StartObject(); + if (uca->version == UCA_V900) + { + for (my_wc_t cp = 0; cp < MY_UCA_MAXCHAR; cp++) + { + print_uca_weights_900(json, cp, uca->weights); + } + } + else + { + for (my_wc_t cp = 0; cp < uca->maxchar; cp++) + { + print_uca_weights_legacy(json, cp, uca->weights, uca->lengths); + } + } + json.EndObject(); + + if (uca->have_contractions) + { + json.Key("Contractions"); + print_contractions(json, uca->contraction_nodes); + } + } + + if (charset->coll_param != NULL) + { + json.Key("UppercaseFirst"); + json.Bool(charset->coll_param->case_first == CASE_FIRST_UPPER); + + if (charset->coll_param->reorder_param != NULL) + { + json.Key("Reorder"); + print_reorder_params(json, charset->coll_param->reorder_param); + } + } + + json.EndObject(); + os.Flush(); + fclose(jsonfile); + } + return 0; +} + +int main(int argc, char **argv) +{ + if (argc < 2) + { + fprintf(stderr, "usage: %s \n", argv[0]); + return 1; + } + + return dumpall(argv[1]); +} \ No newline at end of file diff --git a/go/mysql/collations/tools/colldump/colldump.sh b/go/mysql/collations/tools/colldump/colldump.sh new file mode 100755 index 00000000000..fe6d1d9d7d2 --- /dev/null +++ b/go/mysql/collations/tools/colldump/colldump.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +cd "$(dirname "$0")" +docker build --tag mysql-collation-data . + +imgid=$(docker create mysql-collation-data) +docker cp $imgid:/mysql-collations/. ../../testdata/mysqldata +docker rm -v $imgid \ No newline at end of file diff --git a/go/mysql/collations/tools/makecolldata/codegen/codegen.go b/go/mysql/collations/tools/makecolldata/codegen/codegen.go index cc2d5ad3a90..4fa98f2afd1 100644 --- a/go/mysql/collations/tools/makecolldata/codegen/codegen.go +++ b/go/mysql/collations/tools/makecolldata/codegen/codegen.go @@ -24,6 +24,7 @@ import ( "path" "reflect" "sort" + "time" "vitess.io/vitess/go/tools/codegen" ) @@ -64,10 +65,29 @@ func Merge(gens ...*Generator) *Generator { return result } +const licenseFileHeader = `/* +Copyright %d The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +` + func (g *Generator) WriteToFile(out string) { var file, fmtfile bytes.Buffer file.Grow(g.Buffer.Len() + 1024) + fmt.Fprintf(&file, licenseFileHeader, time.Now().Year()) fmt.Fprintf(&file, "// Code generated by %s DO NOT EDIT\n\n", path.Base(os.Args[0])) fmt.Fprintf(&file, "package %s\n\n", g.local.Name()) fmt.Fprintf(&file, "import (\n") diff --git a/go/mysql/collations/tools/makecolldata/codegen/tablegen.go b/go/mysql/collations/tools/makecolldata/codegen/tablegen.go index 787d41293be..b12d32f59d7 100644 --- a/go/mysql/collations/tools/makecolldata/codegen/tablegen.go +++ b/go/mysql/collations/tools/makecolldata/codegen/tablegen.go @@ -24,7 +24,6 @@ import ( "log" "math/bits" "os" - "reflect" "vitess.io/vitess/go/mysql/collations/internal/uca" ) @@ -91,7 +90,6 @@ func (pg *EmbedPageGenerator) WritePage16(g *Generator, varname string, values [ func (pg *EmbedPageGenerator) WriteTrailer(g *Generator, embedfile string) { unsafe := Package("unsafe") - reflect := Package("reflect") g.UsePackage("embed") g.P() @@ -99,7 +97,7 @@ func (pg *EmbedPageGenerator) WriteTrailer(g *Generator, embedfile string) { g.P("var weightsUCA_embed_data string") g.P() g.P("func weightsUCA_embed(pos, length int) []uint16 {") - g.P("return (*[0x7fff0000]uint16)(", unsafe, ".Pointer((*", reflect, ".StringHeader)(", unsafe, ".Pointer(&weightsUCA_embed_data)).Data))[pos:pos+length]") + g.P("return (*[0x7fff0000]uint16)(", unsafe, ".Pointer(", unsafe, ".StringData(weightsUCA_embed_data)))[pos:pos+length]") g.P("}") } @@ -126,23 +124,12 @@ type entry struct { weights []uint16 } -func (e *entry) adjustHangulWeights(tb *TableGenerator, jamos []rune) { - for _, jamo := range jamos { - _, entry := tb.entryForCodepoint(jamo) - e.weights = append(e.weights, entry.weights[0], entry.weights[1], entry.weights[2]+1) - } -} - type page struct { n int entryCount int entries [uca.CodepointsPerPage]entry } -func (p *page) equals(other *page) bool { - return reflect.DeepEqual(p, other) -} - func (p *page) name(uca string) string { if p.entryCount == 0 { panic("cannot name empty page") diff --git a/go/mysql/collations/tools/makecolldata/main.go b/go/mysql/collations/tools/makecolldata/main.go index 0bcbd1ecb2b..ee559a886b5 100644 --- a/go/mysql/collations/tools/makecolldata/main.go +++ b/go/mysql/collations/tools/makecolldata/main.go @@ -106,7 +106,7 @@ func (all AllMetadata) get(name string) *CollationMetadata { return nil } -const PkgCollations codegen.Package = "vitess.io/vitess/go/mysql/collations" +const PkgCollationsData codegen.Package = "vitess.io/vitess/go/mysql/collations/colldata" const PkgCharset codegen.Package = "vitess.io/vitess/go/mysql/collations/charset" func main() { @@ -114,5 +114,5 @@ func main() { metadata := loadMysqlMetadata() maketables(*Embed, ".", metadata) makeversions(".") - makemysqldata(".", metadata) + makemysqldata("colldata", ".", metadata) } diff --git a/go/mysql/collations/tools/makecolldata/maketables.go b/go/mysql/collations/tools/makecolldata/maketables.go index 8ac2f9049ce..055162401bb 100644 --- a/go/mysql/collations/tools/makecolldata/maketables.go +++ b/go/mysql/collations/tools/makecolldata/maketables.go @@ -39,7 +39,7 @@ func maketable(g *codegen.Generator, table string, collation *CollationMetadata, func maketables(embed bool, output string, metadata AllMetadata) { var pages = codegen.NewPageGenerator(embed) - var g = codegen.NewGenerator("vitess.io/vitess/go/mysql/collations") + var g = codegen.NewGenerator("vitess.io/vitess/go/mysql/collations/colldata") var fastg = codegen.NewGenerator("vitess.io/vitess/go/mysql/collations/internal/uca") tablegen := maketable(g, "uca900", metadata.get("utf8mb4_0900_ai_ci"), pages, uca.Layout_uca900{}) @@ -53,9 +53,9 @@ func maketables(embed bool, output string, metadata AllMetadata) { if pages, ok := pages.(*codegen.EmbedPageGenerator); ok { pages.WriteTrailer(g, "mysqlucadata.bin") - pages.WriteToFile(path.Join(output, "mysqlucadata.bin")) + pages.WriteToFile(path.Join(output, "colldata/mysqlucadata.bin")) } - g.WriteToFile(path.Join(output, "mysqlucadata.go")) + g.WriteToFile(path.Join(output, "colldata/mysqlucadata.go")) fastg.WriteToFile(path.Join(output, "internal/uca/fasttables.go")) } diff --git a/go/mysql/collations/tools/makecolldata/mysqldata.go b/go/mysql/collations/tools/makecolldata/mysqldata.go index 351e578d2af..567f04362de 100644 --- a/go/mysql/collations/tools/makecolldata/mysqldata.go +++ b/go/mysql/collations/tools/makecolldata/mysqldata.go @@ -353,12 +353,12 @@ func (g *Generator) printCollationMultibyte(meta *CollationMetadata) { g.P("},") } -func makemysqldata(output string, metadata AllMetadata) { +func makemysqldata(output string, supportedOutput string, metadata AllMetadata) { var unsupportedByCharset = make(map[string][]string) var g = Generator{ - Generator: codegen.NewGenerator(PkgCollations), + Generator: codegen.NewGenerator(PkgCollationsData), Tables: TableGenerator{ - Generator: codegen.NewGenerator(PkgCollations), + Generator: codegen.NewGenerator(PkgCollationsData), dedup: make(map[string]string), baseWeightsUca400: metadata.get("utf8mb4_unicode_ci").Weights, baseWeightsUca520: metadata.get("utf8mb4_unicode_520_ci").Weights, @@ -366,15 +366,22 @@ func makemysqldata(output string, metadata AllMetadata) { }, } + var h = Generator{ + Generator: codegen.NewGenerator("vitess.io/vitess/go/mysql/collations"), + } + g.P("var collationsById = [...]Collation{") + h.P("var supported = [...]string{") for _, meta := range metadata { switch { case meta.Name == "utf8mb4_0900_bin": g.P(uint(309), ": &Collation_utf8mb4_0900_bin{},") + h.P(meta.Number, ": ", codegen.Quote(meta.Name), ",") case meta.Name == "binary": g.P(uint(63), ": &Collation_binary{},") + h.P(meta.Number, ": ", codegen.Quote(meta.Name), ",") case meta.Name == "tis620_bin": // explicitly unsupported for now because of not accurate results @@ -384,24 +391,31 @@ func makemysqldata(output string, metadata AllMetadata) { meta.CollationImpl == "utf32_uca" || meta.CollationImpl == "ucs2_uca": g.printCollationUcaLegacy(meta) + h.P(meta.Number, ": ", codegen.Quote(meta.Name), ",") case meta.CollationImpl == "uca_900": g.printCollationUca900(meta) + h.P(meta.Number, ": ", codegen.Quote(meta.Name), ",") case meta.CollationImpl == "8bit_bin" || meta.CollationImpl == "8bit_simple_ci": g.printCollation8bit(meta) + h.P(meta.Number, ": ", codegen.Quote(meta.Name), ",") case meta.Name == "gb18030_unicode_520_ci": g.printCollationUcaLegacy(meta) + h.P(meta.Number, ": ", codegen.Quote(meta.Name), ",") case charset.IsMultibyteByName(meta.Charset): g.printCollationMultibyte(meta) + h.P(meta.Number, ": ", codegen.Quote(meta.Name), ",") case strings.HasSuffix(meta.Name, "_bin") && charset.IsUnicodeByName(meta.Charset): g.printCollationUnicode(meta) + h.P(meta.Number, ": ", codegen.Quote(meta.Name), ",") case strings.HasSuffix(meta.Name, "_general_ci"): g.printCollationUnicode(meta) + h.P(meta.Number, ": ", codegen.Quote(meta.Name), ",") default: unsupportedByCharset[meta.Charset] = append(unsupportedByCharset[meta.Charset], meta.Name) @@ -409,7 +423,9 @@ func makemysqldata(output string, metadata AllMetadata) { } g.P("}") + h.P("}") codegen.Merge(g.Tables.Generator, g.Generator).WriteToFile(path.Join(output, "mysqldata.go")) + h.WriteToFile(path.Join(supportedOutput, "supported.go")) var unhandledCount int for impl, collations := range unsupportedByCharset { diff --git a/go/mysql/collations/tools/makecolldata/mysqlversions.go b/go/mysql/collations/tools/makecolldata/mysqlversions.go index 5bdd3165e53..f0578ecd95b 100644 --- a/go/mysql/collations/tools/makecolldata/mysqlversions.go +++ b/go/mysql/collations/tools/makecolldata/mysqlversions.go @@ -60,6 +60,7 @@ func makeversions(output string) { } sort.Strings(versionfiles) + charsets := make(map[string]string) versioninfo := make(map[uint]*versionInfo) for v, versionCsv := range versionfiles { f, err := os.Open(versionCsv) @@ -89,14 +90,17 @@ func makeversions(output string) { collname := cols[0] vi.alias[collname] |= 1 << v + charsets[collname] = cols[1] for from, to := range CharsetAliases { if strings.HasPrefix(collname, from+"_") { aliased := strings.Replace(collname, from+"_", to+"_", 1) + charsets[aliased] = to vi.alias[aliased] |= 1 << v } if strings.HasPrefix(collname, to+"_") { aliased := strings.Replace(collname, to+"_", from+"_", 1) + charsets[aliased] = from vi.alias[aliased] |= 1 << v } } @@ -123,7 +127,7 @@ func makeversions(output string) { var g = codegen.NewGenerator("vitess.io/vitess/go/mysql/collations") g.P("type collver byte") - g.P("type collalias struct { mask collver; name string }") + g.P("type collalias struct { mask collver; name string; charset string }") g.P("const (") g.P("collverInvalid collver = 0") for n, version := range versions { @@ -150,7 +154,7 @@ func makeversions(output string) { // all MySQL versions, but this is implemented as a method on `collver` so when // MySQL maps utf8 to utfmb4, we can perform the mapping only for the specific // MySQL version onwards. - g.P("func (v collver) charsetAliases() map[string]string { return ", fmt.Sprintf("%#v", CharsetAliases), "}") + g.P("func charsetAliases() map[string]string { return ", fmt.Sprintf("%#v", CharsetAliases), "}") g.P() g.P("var globalVersionInfo = map[ID]struct{alias []collalias; isdefault collver}{") @@ -164,14 +168,14 @@ func makeversions(output string) { for _, vi := range sorted { var reverse []alias for a, m := range vi.alias { - reverse = append(reverse, alias{m, a}) + reverse = append(reverse, alias{mask: m, name: a}) } sort.Slice(reverse, func(i, j int) bool { return reverse[i].name < reverse[j].name }) fmt.Fprintf(g, "%d: {alias: []collalias{", vi.id) for _, a := range reverse { - fmt.Fprintf(g, "{0b%08b, %q},", a.mask, a.name) + fmt.Fprintf(g, "{0b%08b, %q, %q},", a.mask, a.name, charsets[a.name]) } fmt.Fprintf(g, "}, isdefault: 0b%08b},\n", vi.isdefault) } diff --git a/go/mysql/collations/tools/maketestdata/maketestdata.go b/go/mysql/collations/tools/maketestdata/maketestdata.go index e8cb0daee5d..67d5a4739f6 100644 --- a/go/mysql/collations/tools/maketestdata/maketestdata.go +++ b/go/mysql/collations/tools/maketestdata/maketestdata.go @@ -30,6 +30,8 @@ import ( "github.com/spf13/pflag" + "vitess.io/vitess/go/mysql/collations/colldata" + "vitess.io/vitess/go/internal/flag" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/charset" @@ -166,17 +168,17 @@ func main() { flag.Parse(fs) var defaults = collations.Local() - var collationsForLanguage = make(map[testutil.Lang][]collations.Collation) - var allcollations = defaults.AllCollations() + var collationsForLanguage = make(map[testutil.Lang][]collations.ID) + var allcollations = colldata.All(defaults) for lang := range testutil.KnownLanguages { for _, coll := range allcollations { if lang.MatchesCollation(coll.Name()) { - collationsForLanguage[lang] = append(collationsForLanguage[lang], coll) + collationsForLanguage[lang] = append(collationsForLanguage[lang], coll.ID()) } } } - var rootCollations = []collations.Collation{ + var rootCollations = []collations.ID{ defaults.LookupByName("utf8mb4_0900_as_cs"), defaults.LookupByName("utf8mb4_0900_as_ci"), defaults.LookupByName("utf8mb4_0900_ai_ci"), @@ -211,21 +213,22 @@ func main() { var total int var collationNames []string - var interestingCollations []collations.Collation + var interestingCollations []collations.ID interestingCollations = append(interestingCollations, rootCollations...) interestingCollations = append(interestingCollations, collationsForLanguage[lang]...) for _, collation := range interestingCollations { - transcoded, err := charset.ConvertFromUTF8(nil, collation.Charset(), []byte(snippet)) + transcoded, err := charset.ConvertFromUTF8(nil, colldata.Lookup(collation).Charset(), []byte(snippet)) if err != nil { - log.Printf("[%s] skip collation %s", lang, collation.Name()) + log.Printf("[%s] skip collation %s", lang, defaults.LookupName(collation)) continue } - weights := colldump(collation.Name(), transcoded) - gcase.Weights[collation.Name()] = weights + colName := defaults.LookupName(collation) + weights := colldump(colName, transcoded) + gcase.Weights[colName] = weights total += len(weights) - collationNames = append(collationNames, collation.Name()) + collationNames = append(collationNames, colName) } log.Printf("[%s] written samples for %d collations (%.02fkb): %s", diff --git a/go/mysql/conn.go b/go/mysql/conn.go index b12f4907c0c..1a8efe8978d 100644 --- a/go/mysql/conn.go +++ b/go/mysql/conn.go @@ -30,6 +30,8 @@ import ( "sync/atomic" "time" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/mysql/collations" @@ -216,6 +218,19 @@ type Conn struct { // See: ConnParams.EnableQueryInfo enableQueryInfo bool + // keepAliveOn marks when keep alive is active on the connection. + // This is currently used for testing. + keepAliveOn bool + + // mu protects the fields below + mu sync.Mutex + // cancel keep the cancel function for the current executing query. + // this is used by `kill [query|connection] ID` command from other connection. + cancel context.CancelFunc + // this is used to mark the connection to be closed so that the command phase for the connection can be stopped and + // the connection gets closed. + closing bool + // AccountType is a flag about account authority, inlude rw ro rs AccountType int8 @@ -281,10 +296,21 @@ func newConn(conn net.Conn) *Conn { // the server is shutting down, and has the ability to control buffer // size for reads. func newServerConn(conn net.Conn, listener *Listener) *Conn { + // Enable KeepAlive on TCP connections and change keep-alive period if provided. + enabledKeepAlive := false + if tcpConn, ok := conn.(*net.TCPConn); ok { + if err := setTcpConnProperties(tcpConn, listener.connKeepAlivePeriod); err != nil { + log.Errorf("error in setting tcp properties: %v", err) + } else { + enabledKeepAlive = true + } + } + c := &Conn{ conn: conn, listener: listener, PrepareData: make(map[uint32]*PrepareData), + keepAliveOn: enabledKeepAlive, } if listener.connReadBufferSize > 0 { @@ -302,6 +328,22 @@ func newServerConn(conn net.Conn, listener *Listener) *Conn { return c } +func setTcpConnProperties(conn *net.TCPConn, keepAlivePeriod time.Duration) error { + if err := conn.SetKeepAlive(true); err != nil { + return vterrors.Wrapf(err, "unable to enable keepalive on tcp connection") + } + + if keepAlivePeriod <= 0 { + return nil + } + + if err := conn.SetKeepAlivePeriod(keepAlivePeriod); err != nil { + return vterrors.Wrapf(err, "unable to set keepalive period on tcp connection") + } + + return nil +} + // startWriterBuffering starts using buffered writes. This should // be terminated by a call to endWriteBuffering. func (c *Conn) startWriterBuffering() { @@ -594,7 +636,7 @@ func (c *Conn) readPacket() ([]byte, error) { func (c *Conn) ReadPacket() ([]byte, error) { result, err := c.readPacket() if err != nil { - return nil, NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) + return nil, sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err) } return result, err } @@ -718,7 +760,7 @@ func (c *Conn) writeComQuit() error { data, pos := c.startEphemeralPacketWithHeader(1) data[pos] = ComQuit if err := c.writeEphemeralPacket(); err != nil { - return NewSQLError(CRServerGone, SSUnknownSQLState, err.Error()) + return sqlerror.NewSQLError(sqlerror.CRServerGone, sqlerror.SSUnknownSQLState, err.Error()) } return nil } @@ -806,7 +848,7 @@ func (c *Conn) writeOKPacketWithHeader(packetOk *PacketOK, headerType byte) erro bytes, pos := c.startEphemeralPacketWithHeader(length) data := &coder{data: bytes, pos: pos} - data.writeByte(headerType) //header - OK or EOF + data.writeByte(headerType) // header - OK or EOF data.writeLenEncInt(packetOk.affectedRows) data.writeLenEncInt(packetOk.lastInsertID) data.writeUint16(packetOk.statusFlags) @@ -856,10 +898,10 @@ func getLenEncInt(i uint64) []byte { } func (c *Conn) WriteErrorAndLog(format string, args ...interface{}) bool { - return c.writeErrorAndLog(ERUnknownComError, SSNetError, format, args...) + return c.writeErrorAndLog(sqlerror.ERUnknownComError, sqlerror.SSNetError, format, args...) } -func (c *Conn) writeErrorAndLog(errorCode ErrorCode, sqlState string, format string, args ...any) bool { +func (c *Conn) writeErrorAndLog(errorCode sqlerror.ErrorCode, sqlState string, format string, args ...any) bool { if err := c.writeErrorPacket(errorCode, sqlState, format, args...); err != nil { log.Errorf("Error writing error to %s: %v", c, err) return false @@ -879,7 +921,7 @@ func (c *Conn) writeErrorPacketFromErrorAndLog(err error) bool { // writeErrorPacket writes an error packet. // Server -> Client. // This method returns a generic error, not a SQLError. -func (c *Conn) writeErrorPacket(errorCode ErrorCode, sqlState string, format string, args ...any) error { +func (c *Conn) writeErrorPacket(errorCode sqlerror.ErrorCode, sqlState string, format string, args ...any) error { errorMessage := fmt.Sprintf(format, args...) length := 1 + 2 + 1 + 5 + len(errorMessage) data, pos := c.startEphemeralPacketWithHeader(length) @@ -887,7 +929,7 @@ func (c *Conn) writeErrorPacket(errorCode ErrorCode, sqlState string, format str pos = writeUint16(data, pos, uint16(errorCode)) pos = writeByte(data, pos, '#') if sqlState == "" { - sqlState = SSUnknownSQLState + sqlState = sqlerror.SSUnknownSQLState } if len(sqlState) != 5 { panic("sqlState has to be 5 characters long") @@ -901,11 +943,11 @@ func (c *Conn) writeErrorPacket(errorCode ErrorCode, sqlState string, format str // writeErrorPacketFromError writes an error packet, from a regular error. // See writeErrorPacket for other info. func (c *Conn) writeErrorPacketFromError(err error) error { - if se, ok := err.(*SQLError); ok { + if se, ok := err.(*sqlerror.SQLError); ok { return c.writeErrorPacket(se.Num, se.State, "%v", se.Message) } - return c.writeErrorPacket(ERUnknownError, SSUnknownSQLState, "unknown error: %v", err) + return c.writeErrorPacket(sqlerror.ERUnknownError, sqlerror.SSUnknownSQLState, "unknown error: %v", err) } // writeEOFPacket writes an EOF packet, through the buffer, and @@ -938,6 +980,10 @@ func (c *Conn) handleNextCommand(handler Handler) bool { if len(data) == 0 { return false } + // before continue to process the packet, check if the connection should be closed or not. + if c.IsMarkedForClose() { + return false + } if c.CrossEnable || c.AttachEnable { if err = handler.CheckAttachedHost(c); err != nil { @@ -1033,7 +1079,7 @@ func (c *Conn) handleNextCommand(handler Handler) bool { default: log.Errorf("Got unhandled packet (default) from %s, returning error: %v", c, data) c.recycleReadPacket() - if !c.writeErrorAndLog(ERUnknownComError, SSNetError, "command handling not implemented yet: %v", data[0]) { + if !c.writeErrorAndLog(sqlerror.ERUnknownComError, sqlerror.SSNetError, "command handling not implemented yet: %v", data[0]) { return false } } @@ -1136,7 +1182,7 @@ func (c *Conn) handleComStmtReset(data []byte) bool { c.recycleReadPacket() if !ok { log.Error("Got unhandled packet from client %v, returning error: %v", c.ConnectionID, data) - if !c.writeErrorAndLog(ERUnknownComError, SSNetError, "error handling packet: %v", data) { + if !c.writeErrorAndLog(sqlerror.ERUnknownComError, sqlerror.SSNetError, "error handling packet: %v", data) { return false } } @@ -1144,7 +1190,7 @@ func (c *Conn) handleComStmtReset(data []byte) bool { prepare, ok := c.PrepareData[stmtID] if !ok { log.Error("Commands were executed in an improper order from client %v, packet: %v", c.ConnectionID, data) - if !c.writeErrorAndLog(CRCommandsOutOfSync, SSNetError, "commands were executed in an improper order: %v", data) { + if !c.writeErrorAndLog(sqlerror.CRCommandsOutOfSync, sqlerror.SSNetError, "commands were executed in an improper order: %v", data) { return false } } @@ -1166,7 +1212,7 @@ func (c *Conn) handleComStmtSendLongData(data []byte) bool { if c.CrossEnable || c.AttachEnable { defer c.recycleReadPacket() if err := c.crossTabletConn.writePacketNoHeader(data); err != nil { - if err := c.writeErrorPacket(ERUnknownComError, SSNetError, "command handling not implemented yet: %v", data[0]); err != nil { + if err := c.writeErrorPacket(sqlerror.ERUnknownComError, sqlerror.SSNetError, "command handling not implemented yet: %v", data[0]); err != nil { log.Errorf("Error writing error packet to client: %v", err) return false } @@ -1174,7 +1220,7 @@ func (c *Conn) handleComStmtSendLongData(data []byte) bool { } if err := c.crossTabletConn.endWriterBuffering(); err != nil { - if err := c.writeErrorPacket(ERUnknownComError, SSNetError, "command handling not implemented yet: %v", data[0]); err != nil { + if err := c.writeErrorPacket(sqlerror.ERUnknownComError, sqlerror.SSNetError, "command handling not implemented yet: %v", data[0]); err != nil { log.Errorf("Error writing error packet to client: %v", err) return false } @@ -1224,7 +1270,7 @@ func (c *Conn) handleComStmtExecute(handler Handler, data []byte) (kontinue bool c.recycleReadPacket() err := c.crossTabletConn.ptComStmtExecute(edata, c) if err != nil { - if err := c.writeErrorPacket(ERUnknownComError, SSNetError, "command handling not implemented yet: %v", edata[0]); err != nil { + if err := c.writeErrorPacket(sqlerror.ERUnknownComError, sqlerror.SSNetError, "command handling not implemented yet: %v", edata[0]); err != nil { log.Errorf("Error writing error packet to client: %v", err) return false } @@ -1285,7 +1331,7 @@ func (c *Conn) handleComStmtExecute(handler Handler, data []byte) (kontinue bool if !fieldSent { // This is just a failsafe. Should never happen. if err == nil || err == io.EOF { - err = NewSQLErrorFromError(errors.New("unexpected: query ended without no results and no error")) + err = sqlerror.NewSQLErrorFromError(errors.New("unexpected: query ended without no results and no error")) } if !c.writeErrorPacketFromErrorAndLog(err) { return false @@ -1331,7 +1377,7 @@ func (c *Conn) handleComPrepare(handler Handler, data []byte) (kontinue bool) { data[0] = ComPrepare copy(data[1:], query) if err := c.crossTabletConn.ptComPrepare(data, c); err != nil { - if err := c.writeErrorPacket(ERUnknownComError, SSNetError, "command handling not implemented yet: %v", data[0]); err != nil { + if err := c.writeErrorPacket(sqlerror.ERUnknownComError, sqlerror.SSNetError, "command handling not implemented yet: %v", data[0]); err != nil { log.Errorf("Error writing error packet to client: %v", err) return false } @@ -1427,7 +1473,7 @@ func (c *Conn) handleComSetOption(data []byte) bool { c.Capabilities &^= CapabilityClientMultiStatements default: log.Errorf("Got unhandled packet (ComSetOption default) from client %v, returning error: %v", c.ConnectionID, data) - if !c.writeErrorAndLog(ERUnknownComError, SSNetError, "error handling packet: %v", data) { + if !c.writeErrorAndLog(sqlerror.ERUnknownComError, sqlerror.SSNetError, "error handling packet: %v", data) { return false } } @@ -1437,7 +1483,7 @@ func (c *Conn) handleComSetOption(data []byte) bool { } } else { log.Errorf("Got unhandled packet (ComSetOption else) from client %v, returning error: %v", c.ConnectionID, data) - if !c.writeErrorAndLog(ERUnknownComError, SSNetError, "error handling packet: %v", data) { + if !c.writeErrorAndLog(sqlerror.ERUnknownComError, sqlerror.SSNetError, "error handling packet: %v", data) { return false } } @@ -1448,7 +1494,7 @@ func (c *Conn) handleComPing() bool { c.recycleReadPacket() if c.CrossEnable || c.AttachEnable { if err := c.crossTabletConn.Ping(); err != nil { - if err := c.writeErrorPacket(ERUnknownComError, SSNetError, "command handling not implemented yet: %v", err); err != nil { + if err := c.writeErrorPacket(sqlerror.ERUnknownComError, sqlerror.SSNetError, "command handling not implemented yet: %v", err); err != nil { log.Errorf("Error writing error packet to client: %v", err) return false } @@ -1461,7 +1507,7 @@ func (c *Conn) handleComPing() bool { } // Return error if listener was shut down and OK otherwise if c.listener.shutdown.Load() { - if !c.writeErrorAndLog(ERServerShutdown, SSNetError, "Server shutdown in progress") { + if !c.writeErrorAndLog(sqlerror.ERServerShutdown, sqlerror.SSNetError, "Server shutdown in progress") { return false } } else { @@ -1473,7 +1519,7 @@ func (c *Conn) handleComPing() bool { return true } -var errEmptyStatement = NewSQLError(EREmptyQuery, SSClientError, "Query was empty") +var errEmptyStatement = sqlerror.NewSQLError(sqlerror.EREmptyQuery, sqlerror.SSClientError, "Query was empty") func (c *Conn) handleComQuery(handler Handler, data []byte) (kontinue bool) { c.startWriterBuffering() @@ -1494,7 +1540,7 @@ func (c *Conn) handleComQuery(handler Handler, data []byte) (kontinue bool) { copy(data[1:], query) if err := c.crossTabletConn.passthroughComQuery(data, c); err != nil { - if IsConnErrByCross(err) { + if sqlerror.IsConnErrByCross(err) { return false } } @@ -1580,7 +1626,7 @@ func (c *Conn) execQuery(query string, handler Handler, more bool) execResult { if !callbackCalled { // This is just a failsafe. Should never happen. if err == nil || err == io.EOF { - err = NewSQLErrorFromError(errors.New("unexpected: query ended without no results and no error")) + err = sqlerror.NewSQLErrorFromError(errors.New("unexpected: query ended without no results and no error")) } if !c.writeErrorPacketFromErrorAndLog(err) { return connErr @@ -1719,7 +1765,6 @@ func (c *Conn) parseOKPacket(in []byte) (*PacketOK, error) { // we can return the packet. return packetOK, nil } - // Alright, now we need to read each sub packet from the session state change. for { sscType, ok := data.readByte() @@ -1771,7 +1816,7 @@ func ParseErrorPacket(data []byte) error { // Error code is 2 bytes. code, pos, ok := readUint16(data, pos) if !ok { - return NewSQLError(CRUnknownError, SSUnknownSQLState, "invalid error packet code: %v", data) + return sqlerror.NewSQLError(sqlerror.CRUnknownError, sqlerror.SSUnknownSQLState, "invalid error packet code: %v", data) } // '#' marker of the SQL state is 1 byte. Ignored. @@ -1780,13 +1825,13 @@ func ParseErrorPacket(data []byte) error { // SQL state is 5 bytes sqlState, pos, ok := readBytes(data, pos, 5) if !ok { - return NewSQLError(CRUnknownError, SSUnknownSQLState, "invalid error packet sqlState: %v", data) + return sqlerror.NewSQLError(sqlerror.CRUnknownError, sqlerror.SSUnknownSQLState, "invalid error packet sqlState: %v", data) } // Human readable error message is the rest. msg := string(data[pos:]) - return NewSQLError(ErrorCode(code), string(sqlState), "%v", msg) + return sqlerror.NewSQLError(sqlerror.ErrorCode(code), string(sqlState), "%v", msg) } // GetTLSClientCerts gets TLS certificates. @@ -1879,7 +1924,7 @@ func (c *Conn) ptComStmtClose(data []byte, clientConn *Conn) error { // This is a new command, need to reset the sequence. c.sequence = 0 if err := c.writePacketNoHeader(data); err != nil { - return NewSQLError(CRServerGone, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerGone, sqlerror.SSUnknownSQLState, "%v", err) } if err := c.endWriterBuffering(); err != nil { return err @@ -1891,7 +1936,7 @@ func (c *Conn) ptOnePacket(data []byte, clientConn *Conn) error { // This is a new command, need to reset the sequence. c.sequence = 0 if err := c.writePacketNoHeader(data); err != nil { - return NewSQLError(CRServerGone, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerGone, sqlerror.SSUnknownSQLState, "%v", err) } if err := c.endWriterBuffering(); err != nil { return err @@ -1919,13 +1964,13 @@ func (c *Conn) passthroughComQuery(data []byte, clientConn *Conn) error { rdata, err := c.readEphemeralPacket() if err != nil { - return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err) } wdata := make([]byte, len(rdata)) copy(wdata, rdata) c.recycleReadPacket() if len(wdata) == 0 { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "invalid empty COM_QUERY response packet") + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "invalid empty COM_QUERY response packet") } return c.processData(wdata, clientConn) @@ -1936,7 +1981,7 @@ func (c *Conn) processData(data []byte, clientConn *Conn) error { switch data[0] { case ErrPacket: err := ParseErrorPacket(data) - if IsConnErr(err) { + if sqlerror.IsConnErr(err) { return err } if err := clientConn.writePacketNoHeader(data); err != nil { @@ -1957,13 +2002,13 @@ func (c *Conn) processData(data []byte, clientConn *Conn) error { if packetOk.statusFlags&ServerMoreResultsExists == ServerMoreResultsExists { data, err := c.readEphemeralPacket() if err != nil { - return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err) } wdata := make([]byte, len(data)) copy(wdata, data) c.recycleReadPacket() if len(wdata) == 0 { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "invalid empty COM_QUERY response packet") + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "invalid empty COM_QUERY response packet") } //clientConn.sequence=0 return c.processData(wdata, clientConn) @@ -2012,10 +2057,10 @@ func (c *Conn) processData(data []byte, clientConn *Conn) error { colNumber, pos, ok := readLenEncInt(data, 0) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "cannot get column number") + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "cannot get column number") } if pos != len(data) { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "extra data in COM_QUERY response") + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "extra data in COM_QUERY response") } if colNumber == 0 { @@ -2045,7 +2090,7 @@ func (c *Conn) processData(data []byte, clientConn *Conn) error { // EOF is only present here if it's not deprecated. eofData, err := c.readEphemeralPacket() if err != nil { - return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err) } if c.isEOFPacket(eofData) { // This is what we expect. @@ -2087,11 +2132,11 @@ func (c *Conn) processData(data []byte, clientConn *Conn) error { if statusFlag&ServerMoreResultsExists == ServerMoreResultsExists { data, err := c.readEphemeralPacket() if err != nil { - return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err) } if len(data) == 0 { c.recycleReadPacket() - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "invalid empty COM_QUERY response packet") + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "invalid empty COM_QUERY response packet") } wdata := make([]byte, len(data)) copy(wdata, data) @@ -2112,11 +2157,11 @@ func (c *Conn) processData(data []byte, clientConn *Conn) error { data, err := c.readEphemeralPacket() if err != nil { - return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err) } if len(data) == 0 { c.recycleReadPacket() - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "invalid empty COM_QUERY response packet") + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "invalid empty COM_QUERY response packet") } wdata := make([]byte, len(data)) copy(wdata, data) @@ -2142,7 +2187,7 @@ func (c *Conn) ptComPrepare(data []byte, clientConn *Conn) error { // This is a new command, need to reset the sequence. c.sequence = 0 if err := c.writePacketNoHeader(data); err != nil { - return NewSQLError(CRServerGone, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerGone, sqlerror.SSUnknownSQLState, "%v", err) } if err := c.endWriterBuffering(); err != nil { return err @@ -2156,10 +2201,10 @@ func (c *Conn) ptComPrepare(data []byte, clientConn *Conn) error { wdata := make([]byte, len(rdata)) copy(wdata, rdata) if len(wdata) == 0 { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "invalid empty COM_STMT_PREPARE response packet") + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "invalid empty COM_STMT_PREPARE response packet") } if len(wdata) == 0 { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "invalid empty COM_STMT_PREPARE response packet") + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "invalid empty COM_STMT_PREPARE response packet") } c.recycleReadPacket() @@ -2196,7 +2241,7 @@ func (c *Conn) ptComPrepare(data []byte, clientConn *Conn) error { // EOF is only present here if it's not deprecated. _, err := c.readEphemeralPacket() if err != nil { - return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err) } c.recycleReadPacket() } @@ -2223,7 +2268,7 @@ func (c *Conn) ptComPrepare(data []byte, clientConn *Conn) error { // EOF is only present here if it's not deprecated. _, err := c.readEphemeralPacket() if err != nil { - return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err) } c.recycleReadPacket() } @@ -2253,7 +2298,7 @@ func (c *Conn) ptComStmtExecute(data []byte, clientConn *Conn) error { // This is a new command, need to reset the sequence. c.sequence = 0 if err := c.writePacketNoHeader(data); err != nil { - return NewSQLError(CRServerGone, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerGone, sqlerror.SSUnknownSQLState, "%v", err) } if err := c.endWriterBuffering(); err != nil { return err @@ -2261,13 +2306,13 @@ func (c *Conn) ptComStmtExecute(data []byte, clientConn *Conn) error { rdata, err := c.readEphemeralPacket() if err != nil { - return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err) } wdata := make([]byte, len(rdata)) copy(wdata, rdata) c.recycleReadPacket() if len(wdata) == 0 { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "invalid empty COM_QUERY response packet") + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "invalid empty COM_QUERY response packet") } switch wdata[0] { @@ -2288,10 +2333,10 @@ func (c *Conn) ptComStmtExecute(data []byte, clientConn *Conn) error { colNumber, pos, ok := readLenEncInt(wdata, 0) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "cannot get column number") + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "cannot get column number") } if pos != len(wdata) { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "extra data in COM_QUERY response") + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "extra data in COM_QUERY response") } if colNumber == 0 { @@ -2321,7 +2366,7 @@ func (c *Conn) ptComStmtExecute(data []byte, clientConn *Conn) error { // EOF is only present here if it's not deprecated. eofData, err := c.readEphemeralPacket() if err != nil { - return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err) } if c.isEOFPacket(eofData) { // This is what we expect. @@ -2390,7 +2435,7 @@ func (c *Conn) handleComFieldList(handler Handler, data []byte) bool { c.recycleReadPacket() if tableName == "" || err != nil { log.Error("Got unhandled packet from client %v, returning error: %s", c.ConnectionID, data) - if err := c.writeErrorPacket(ERUnknownComError, SSUnknownComError, "command handling not implemented yet: %v", data[0]); err != nil { + if err := c.writeErrorPacket(sqlerror.ERUnknownComError, sqlerror.SSNetError, "command handling not implemented yet: %v", data[0]); err != nil { log.Error("Error writing error packet to client: %v", err) return false } @@ -2469,3 +2514,38 @@ func (c *Conn) handleComFieldList(handler Handler, data []byte) bool { } return true } + +// CancelCtx aborts an existing running query +func (c *Conn) CancelCtx() { + c.mu.Lock() + defer c.mu.Unlock() + if c.cancel != nil { + c.cancel() + } +} + +// UpdateCancelCtx updates the cancel function on the connection. +func (c *Conn) UpdateCancelCtx(cancel context.CancelFunc) { + c.mu.Lock() + defer c.mu.Unlock() + c.cancel = cancel +} + +// MarkForClose marks the connection for close. +func (c *Conn) MarkForClose() { + c.mu.Lock() + defer c.mu.Unlock() + c.closing = true +} + +// IsMarkedForClose return true if the connection should be closed. +func (c *Conn) IsMarkedForClose() bool { + c.mu.Lock() + defer c.mu.Unlock() + return c.closing +} + +// GetTestConn returns a conn for testing purpose only. +func GetTestConn() *Conn { + return newConn(testConn{}) +} diff --git a/go/mysql/conn_fake.go b/go/mysql/conn_fake.go new file mode 100644 index 00000000000..72d944c2f3b --- /dev/null +++ b/go/mysql/conn_fake.go @@ -0,0 +1,83 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mysql + +import ( + "fmt" + "net" + "time" +) + +// testConn to be used for testing only as net.Conn interface implementation. +type testConn struct { + writeToPass []bool + pos int + queryPacket []byte +} + +func (t testConn) Read(b []byte) (n int, err error) { + copy(b, t.queryPacket) + return len(b), nil +} + +func (t testConn) Write(b []byte) (n int, err error) { + t.pos = t.pos + 1 + if t.writeToPass[t.pos] { + return 0, nil + } + return 0, fmt.Errorf("error in writing to connection") +} + +func (t testConn) Close() error { + return nil +} + +func (t testConn) LocalAddr() net.Addr { + panic("implement me") +} + +func (t testConn) RemoteAddr() net.Addr { + return mockAddress{s: "a"} +} + +func (t testConn) SetDeadline(t1 time.Time) error { + panic("implement me") +} + +func (t testConn) SetReadDeadline(t1 time.Time) error { + panic("implement me") +} + +func (t testConn) SetWriteDeadline(t1 time.Time) error { + panic("implement me") +} + +var _ net.Conn = (*testConn)(nil) + +type mockAddress struct { + s string +} + +func (m mockAddress) Network() string { + return m.s +} + +func (m mockAddress) String() string { + return m.s +} + +var _ net.Addr = (*mockAddress)(nil) diff --git a/go/mysql/conn_flaky_test.go b/go/mysql/conn_flaky_test.go index 4111b32872c..538ab1d622e 100644 --- a/go/mysql/conn_flaky_test.go +++ b/go/mysql/conn_flaky_test.go @@ -31,6 +31,8 @@ import ( "testing" "time" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/vt/sqlparser" "github.com/stretchr/testify/assert" @@ -304,7 +306,7 @@ func TestBasicPackets(t *testing.T) { assert.EqualValues(78, packetOk.warnings) // Write error packet, read it, compare. - err = sConn.writeErrorPacket(ERAccessDeniedError, SSAccessDeniedError, "access denied: %v", "reason") + err = sConn.writeErrorPacket(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "access denied: %v", "reason") require.NoError(err) data, err = cConn.ReadPacket() require.NoError(err) @@ -312,10 +314,10 @@ func TestBasicPackets(t *testing.T) { assert.EqualValues(data[0], ErrPacket, "ErrPacket") err = ParseErrorPacket(data) - utils.MustMatch(t, err, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "access denied: reason"), "") + utils.MustMatch(t, err, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "access denied: reason"), "") // Write error packet from error, read it, compare. - err = sConn.writeErrorPacketFromError(NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "access denied")) + err = sConn.writeErrorPacketFromError(sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "access denied")) require.NoError(err) data, err = cConn.ReadPacket() @@ -324,7 +326,7 @@ func TestBasicPackets(t *testing.T) { assert.EqualValues(data[0], ErrPacket, "ErrPacket") err = ParseErrorPacket(data) - utils.MustMatch(t, err, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "access denied"), "") + utils.MustMatch(t, err, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "access denied"), "") // Write EOF packet, read it, compare first byte. Payload is always ignored. err = sConn.writeEOFPacket(0x8912, 0xabba) @@ -840,9 +842,9 @@ func TestMultiStatement(t *testing.T) { // this handler will return results according to the query. In case the query contains "error" it will return an error // panic if the query contains "panic" and it will return selectRowsResult in case of any other query - handler := &testRun{t: t, err: NewSQLError(CRMalformedPacket, SSUnknownSQLState, "cannot get column number")} + handler := &testRun{t: t, err: sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "cannot get column number")} res := sConn.handleNextCommand(handler) - //The queries run will be select 1; and select 2; These queries do not return any errors, so the connection should still be open + // The queries run will be select 1; and select 2; These queries do not return any errors, so the connection should still be open require.True(t, res, "we should not break the connection in case of no errors") // Read the result of the query and assert that it is indeed what we want. This will contain the result of the first query. data, more, _, err := cConn.ReadQueryResult(100, true) @@ -992,67 +994,6 @@ func TestConnectionErrorWhileWritingComStmtExecute(t *testing.T) { require.False(t, res, "we should beak the connection in case of error writing error packet") } -var _ Handler = (*testRun)(nil) - -type testConn struct { - writeToPass []bool - pos int - queryPacket []byte -} - -func (t testConn) Read(b []byte) (n int, err error) { - copy(b, t.queryPacket) - return len(b), nil -} - -func (t testConn) Write(b []byte) (n int, err error) { - t.pos = t.pos + 1 - if t.writeToPass[t.pos] { - return 0, nil - } - return 0, fmt.Errorf("error in writing to connection") -} - -func (t testConn) Close() error { - panic("implement me") -} - -func (t testConn) LocalAddr() net.Addr { - panic("implement me") -} - -func (t testConn) RemoteAddr() net.Addr { - return mockAddress{s: "a"} -} - -func (t testConn) SetDeadline(t1 time.Time) error { - panic("implement me") -} - -func (t testConn) SetReadDeadline(t1 time.Time) error { - panic("implement me") -} - -func (t testConn) SetWriteDeadline(t1 time.Time) error { - panic("implement me") -} - -var _ net.Conn = (*testConn)(nil) - -type mockAddress struct { - s string -} - -func (m mockAddress) Network() string { - return m.s -} - -func (m mockAddress) String() string { - return m.s -} - -var _ net.Addr = (*mockAddress)(nil) - var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") func randSeq(n int) string { @@ -1177,7 +1118,7 @@ func (t testRun) ComBinlogDump(c *Conn, logFile string, binlogPos uint32) error panic("implement me") } -func (t testRun) ComBinlogDumpGTID(c *Conn, logFile string, logPos uint64, gtidSet GTIDSet) error { +func (t testRun) ComBinlogDumpGTID(c *Conn, logFile string, logPos uint64, gtidSet replication.GTIDSet) error { panic("implement me") } diff --git a/go/mysql/constants.go b/go/mysql/constants.go index e57097a80cf..37649cbeea4 100644 --- a/go/mysql/constants.go +++ b/go/mysql/constants.go @@ -17,13 +17,6 @@ limitations under the License. package mysql import ( - "strconv" - "strings" - - "golang.org/x/text/encoding" - "golang.org/x/text/encoding/charmap" - "golang.org/x/text/encoding/simplifiedchinese" - "vitess.io/vitess/go/mysql/binlog" ) @@ -281,390 +274,6 @@ const ( AuthSwitchRequestPacket = 0xfe ) -// Error codes for client-side errors. -// Originally found in include/mysql/errmsg.h and -// https://dev.mysql.com/doc/mysql-errors/en/client-error-reference.html -const ( - // CRUnknownError is CR_UNKNOWN_ERROR - CRUnknownError = ErrorCode(2000) - - // CRConnectionError is CR_CONNECTION_ERROR - // This is returned if a connection via a Unix socket fails. - CRConnectionError = ErrorCode(2002) - - // CRConnHostError is CR_CONN_HOST_ERROR - // This is returned if a connection via a TCP socket fails. - CRConnHostError = ErrorCode(2003) - - // CRUnknownHost is CR_UNKNOWN_HOST - // This is returned if the host name cannot be resolved. - CRUnknownHost = ErrorCode(2005) - - // CRServerGone is CR_SERVER_GONE_ERROR. - // This is returned if the client tries to send a command but it fails. - CRServerGone = ErrorCode(2006) - - // CRVersionError is CR_VERSION_ERROR - // This is returned if the server versions don't match what we support. - CRVersionError = ErrorCode(2007) - - // CRServerHandshakeErr is CR_SERVER_HANDSHAKE_ERR - CRServerHandshakeErr = ErrorCode(2012) - - // CRServerLost is CR_SERVER_LOST. - // Used when: - // - the client cannot write an initial auth packet. - // - the client cannot read an initial auth packet. - // - the client cannot read a response from the server. - // This happens when a running query is killed. - CRServerLost = ErrorCode(2013) - - // CRCommandsOutOfSync is CR_COMMANDS_OUT_OF_SYNC - // Sent when the streaming calls are not done in the right order. - CRCommandsOutOfSync = ErrorCode(2014) - - // CRNamedPipeStateError is CR_NAMEDPIPESETSTATE_ERROR. - // This is the highest possible number for a connection error. - CRNamedPipeStateError = ErrorCode(2018) - - // CRCantReadCharset is CR_CANT_READ_CHARSET - CRCantReadCharset = ErrorCode(2019) - - // CRSSLConnectionError is CR_SSL_CONNECTION_ERROR - CRSSLConnectionError = ErrorCode(2026) - - // CRMalformedPacket is CR_MALFORMED_PACKET - CRMalformedPacket = ErrorCode(2027) -) - -type ErrorCode uint16 - -func (e ErrorCode) ToString() string { - return strconv.FormatUint(uint64(e), 10) -} - -// Error codes for server-side errors. -// Originally found in include/mysql/mysqld_error.h and -// https://dev.mysql.com/doc/mysql-errors/en/server-error-reference.html -// The below are in sorted order by value, grouped by vterror code they should be bucketed into. -// See above reference for more information on each code. -const ( - // Vitess specific errors, (100-999) - ERNotReplica = ErrorCode(100) - - // unknown - ERUnknownError = ErrorCode(1105) - - // internal - ERInternalError = ErrorCode(1815) - - // unimplemented - ERNotSupportedYet = ErrorCode(1235) - ERUnsupportedPS = ErrorCode(1295) - - // resource exhausted - ERDiskFull = ErrorCode(1021) - EROutOfMemory = ErrorCode(1037) - EROutOfSortMemory = ErrorCode(1038) - ERConCount = ErrorCode(1040) - EROutOfResources = ErrorCode(1041) - ERRecordFileFull = ErrorCode(1114) - ERHostIsBlocked = ErrorCode(1129) - ERCantCreateThread = ErrorCode(1135) - ERTooManyDelayedThreads = ErrorCode(1151) - ERNetPacketTooLarge = ErrorCode(1153) - ERTooManyUserConnections = ErrorCode(1203) - ERLockTableFull = ErrorCode(1206) - ERUserLimitReached = ErrorCode(1226) - - // deadline exceeded - ERLockWaitTimeout = ErrorCode(1205) - - // unavailable - ERServerShutdown = ErrorCode(1053) - - // not found - ERDbDropExists = ErrorCode(1008) - ERCantFindFile = ErrorCode(1017) - ERFormNotFound = ErrorCode(1029) - ERKeyNotFound = ErrorCode(1032) - ERBadFieldError = ErrorCode(1054) - ERNoSuchThread = ErrorCode(1094) - ERUnknownTable = ErrorCode(1109) - ERCantFindUDF = ErrorCode(1122) - ERNonExistingGrant = ErrorCode(1141) - ERNoSuchTable = ErrorCode(1146) - ERNonExistingTableGrant = ErrorCode(1147) - ERKeyDoesNotExist = ErrorCode(1176) - - // permissions - ERDBAccessDenied = ErrorCode(1044) - ERAccessDeniedError = ErrorCode(1045) - ERKillDenied = ErrorCode(1095) - ERNoPermissionToCreateUsers = ErrorCode(1211) - ERSpecifiedAccessDenied = ErrorCode(1227) - - // failed precondition - ERNoDb = ErrorCode(1046) - ERNoSuchIndex = ErrorCode(1082) - ERCantDropFieldOrKey = ErrorCode(1091) - ERTableNotLockedForWrite = ErrorCode(1099) - ERTableNotLocked = ErrorCode(1100) - ERTooBigSelect = ErrorCode(1104) - ERNotAllowedCommand = ErrorCode(1148) - ERTooLongString = ErrorCode(1162) - ERDelayedInsertTableLocked = ErrorCode(1165) - ERDupUnique = ErrorCode(1169) - ERRequiresPrimaryKey = ErrorCode(1173) - ERCantDoThisDuringAnTransaction = ErrorCode(1179) - ERReadOnlyTransaction = ErrorCode(1207) - ERCannotAddForeign = ErrorCode(1215) - ERNoReferencedRow = ErrorCode(1216) - ERRowIsReferenced = ErrorCode(1217) - ERCantUpdateWithReadLock = ErrorCode(1223) - ERNoDefault = ErrorCode(1230) - ERMasterFatalReadingBinlog = ErrorCode(1236) - EROperandColumns = ErrorCode(1241) - ERSubqueryNo1Row = ErrorCode(1242) - ERUnknownStmtHandler = ErrorCode(1243) - ERWarnDataOutOfRange = ErrorCode(1264) - ERNonUpdateableTable = ErrorCode(1288) - ERFeatureDisabled = ErrorCode(1289) - EROptionPreventsStatement = ErrorCode(1290) - ERDuplicatedValueInType = ErrorCode(1291) - ERSPDoesNotExist = ErrorCode(1305) - ERNoDefaultForField = ErrorCode(1364) - ErSPNotVarArg = ErrorCode(1414) - ERRowIsReferenced2 = ErrorCode(1451) - ErNoReferencedRow2 = ErrorCode(1452) - ERDupIndex = ErrorCode(1831) - ERInnodbReadOnly = ErrorCode(1874) - - // already exists - ERDbCreateExists = ErrorCode(1007) - ERTableExists = ErrorCode(1050) - ERDupEntry = ErrorCode(1062) - ERFileExists = ErrorCode(1086) - ERUDFExists = ErrorCode(1125) - - // aborted - ERGotSignal = ErrorCode(1078) - ERForcingClose = ErrorCode(1080) - ERAbortingConnection = ErrorCode(1152) - ERLockDeadlock = ErrorCode(1213) - - // invalid arg - ERUnknownComError = ErrorCode(1047) - ERBadNullError = ErrorCode(1048) - ERBadDb = ErrorCode(1049) - ERBadTable = ErrorCode(1051) - ERNonUniq = ErrorCode(1052) - ERWrongFieldWithGroup = ErrorCode(1055) - ERWrongGroupField = ErrorCode(1056) - ERWrongSumSelect = ErrorCode(1057) - ERWrongValueCount = ErrorCode(1058) - ERTooLongIdent = ErrorCode(1059) - ERDupFieldName = ErrorCode(1060) - ERDupKeyName = ErrorCode(1061) - ERWrongFieldSpec = ErrorCode(1063) - ERParseError = ErrorCode(1064) - EREmptyQuery = ErrorCode(1065) - ERNonUniqTable = ErrorCode(1066) - ERInvalidDefault = ErrorCode(1067) - ERMultiplePriKey = ErrorCode(1068) - ERTooManyKeys = ErrorCode(1069) - ERTooManyKeyParts = ErrorCode(1070) - ERTooLongKey = ErrorCode(1071) - ERKeyColumnDoesNotExist = ErrorCode(1072) - ERBlobUsedAsKey = ErrorCode(1073) - ERTooBigFieldLength = ErrorCode(1074) - ERWrongAutoKey = ErrorCode(1075) - ERWrongFieldTerminators = ErrorCode(1083) - ERBlobsAndNoTerminated = ErrorCode(1084) - ERTextFileNotReadable = ErrorCode(1085) - ERWrongSubKey = ErrorCode(1089) - ERCantRemoveAllFields = ErrorCode(1090) - ERUpdateTableUsed = ErrorCode(1093) - ERNoTablesUsed = ErrorCode(1096) - ERTooBigSet = ErrorCode(1097) - ERBlobCantHaveDefault = ErrorCode(1101) - ERWrongDbName = ErrorCode(1102) - ERWrongTableName = ErrorCode(1103) - ERUnknownProcedure = ErrorCode(1106) - ERWrongParamCountToProcedure = ErrorCode(1107) - ERWrongParametersToProcedure = ErrorCode(1108) - ERFieldSpecifiedTwice = ErrorCode(1110) - ERInvalidGroupFuncUse = ErrorCode(1111) - ERTableMustHaveColumns = ErrorCode(1113) - ERUnknownCharacterSet = ErrorCode(1115) - ERTooManyTables = ErrorCode(1116) - ERTooManyFields = ErrorCode(1117) - ERTooBigRowSize = ErrorCode(1118) - ERWrongOuterJoin = ErrorCode(1120) - ERNullColumnInIndex = ErrorCode(1121) - ERFunctionNotDefined = ErrorCode(1128) - ERWrongValueCountOnRow = ErrorCode(1136) - ERInvalidUseOfNull = ErrorCode(1138) - ERRegexpError = ErrorCode(1139) - ERMixOfGroupFuncAndFields = ErrorCode(1140) - ERIllegalGrantForTable = ErrorCode(1144) - ERSyntaxError = ErrorCode(1149) - ERWrongColumnName = ErrorCode(1166) - ERWrongKeyColumn = ErrorCode(1167) - ERBlobKeyWithoutLength = ErrorCode(1170) - ERPrimaryCantHaveNull = ErrorCode(1171) - ERTooManyRows = ErrorCode(1172) - ERLockOrActiveTransaction = ErrorCode(1192) - ERUnknownSystemVariable = ErrorCode(1193) - ERSetConstantsOnly = ErrorCode(1204) - ERWrongArguments = ErrorCode(1210) - ERWrongUsage = ErrorCode(1221) - ERWrongNumberOfColumnsInSelect = ErrorCode(1222) - ERDupArgument = ErrorCode(1225) - ERLocalVariable = ErrorCode(1228) - ERGlobalVariable = ErrorCode(1229) - ERWrongValueForVar = ErrorCode(1231) - ERWrongTypeForVar = ErrorCode(1232) - ERVarCantBeRead = ErrorCode(1233) - ERCantUseOptionHere = ErrorCode(1234) - ERIncorrectGlobalLocalVar = ErrorCode(1238) - ERWrongFKDef = ErrorCode(1239) - ERKeyRefDoNotMatchTableRef = ErrorCode(1240) - ERCyclicReference = ErrorCode(1245) - ERIllegalReference = ErrorCode(1247) - ERDerivedMustHaveAlias = ErrorCode(1248) - ERTableNameNotAllowedHere = ErrorCode(1250) - ERCollationCharsetMismatch = ErrorCode(1253) - ERWarnDataTruncated = ErrorCode(1265) - ERCantAggregate2Collations = ErrorCode(1267) - ERCantAggregate3Collations = ErrorCode(1270) - ERCantAggregateNCollations = ErrorCode(1271) - ERVariableIsNotStruct = ErrorCode(1272) - ERUnknownCollation = ErrorCode(1273) - ERWrongNameForIndex = ErrorCode(1280) - ERWrongNameForCatalog = ErrorCode(1281) - ERBadFTColumn = ErrorCode(1283) - ERTruncatedWrongValue = ErrorCode(1292) - ERTooMuchAutoTimestampCols = ErrorCode(1293) - ERInvalidOnUpdate = ErrorCode(1294) - ERUnknownTimeZone = ErrorCode(1298) - ERInvalidCharacterString = ErrorCode(1300) - ERQueryInterrupted = ErrorCode(1317) - ERTruncatedWrongValueForField = ErrorCode(1366) - ERIllegalValueForType = ErrorCode(1367) - ERDataTooLong = ErrorCode(1406) - ErrWrongValueForType = ErrorCode(1411) - ERNoSuchUser = ErrorCode(1449) - ERForbidSchemaChange = ErrorCode(1450) - ERWrongValue = ErrorCode(1525) - ERDataOutOfRange = ErrorCode(1690) - ERInvalidJSONText = ErrorCode(3140) - ERInvalidJSONTextInParams = ErrorCode(3141) - ERInvalidJSONBinaryData = ErrorCode(3142) - ERInvalidJSONCharset = ErrorCode(3144) - ERInvalidCastToJSON = ErrorCode(3147) - ERJSONValueTooBig = ErrorCode(3150) - ERJSONDocumentTooDeep = ErrorCode(3157) - - // max execution time exceeded - ERQueryTimeout = ErrorCode(3024) - - ErrCantCreateGeometryObject = ErrorCode(1416) - ErrGISDataWrongEndianess = ErrorCode(3055) - ErrNotImplementedForCartesianSRS = ErrorCode(3704) - ErrNotImplementedForProjectedSRS = ErrorCode(3705) - ErrNonPositiveRadius = ErrorCode(3706) - - // server not available - ERServerIsntAvailable = ErrorCode(3168) -) - -// Sql states for errors. -// Originally found in include/mysql/sql_state.h -const ( - // SSUnknownSqlstate is ER_SIGNAL_EXCEPTION in - // include/mysql/sql_state.h, but: - // const char *unknown_sqlstate= "HY000" - // in client.c. So using that one. - SSUnknownSQLState = "HY000" - - // SSNetError is network related error - SSNetError = "08S01" - - // SSUnknownComError is ER_UNKNOWN_COM_ERROR - SSUnknownComError = "08S01" - - // SSWrongNumberOfColumns is related to columns error - SSWrongNumberOfColumns = "21000" - - // SSWrongValueCountOnRow is related to columns count mismatch error - SSWrongValueCountOnRow = "21S01" - - // SSDataTooLong is ER_DATA_TOO_LONG - SSDataTooLong = "22001" - - // SSDataOutOfRange is ER_DATA_OUT_OF_RANGE - SSDataOutOfRange = "22003" - - // SSConstraintViolation is constraint violation - SSConstraintViolation = "23000" - - // SSCantDoThisDuringAnTransaction is - // ER_CANT_DO_THIS_DURING_AN_TRANSACTION - SSCantDoThisDuringAnTransaction = "25000" - - // SSAccessDeniedError is ER_ACCESS_DENIED_ERROR - SSAccessDeniedError = "28000" - - // SSNoDB is ER_NO_DB_ERROR - SSNoDB = "3D000" - - // SSLockDeadlock is ER_LOCK_DEADLOCK - SSLockDeadlock = "40001" - - // SSClientError is the state on client errors - SSClientError = "42000" - - // SSDupFieldName is ER_DUP_FIELD_NAME - SSDupFieldName = "42S21" - - // SSBadFieldError is ER_BAD_FIELD_ERROR - SSBadFieldError = "42S22" - - // SSUnknownTable is ER_UNKNOWN_TABLE - SSUnknownTable = "42S02" - - // SSQueryInterrupted is ER_QUERY_INTERRUPTED; - SSQueryInterrupted = "70100" -) - -// CharacterSetEncoding maps a charset name to a golang encoder. -// golang does not support encoders for all MySQL charsets. -// A charset not in this map is unsupported. -// A trivial encoding (e.g. utf8) has a `nil` encoder -var CharacterSetEncoding = map[string]encoding.Encoding{ - "cp850": charmap.CodePage850, - "koi8r": charmap.KOI8R, - "latin1": charmap.Windows1252, - "latin2": charmap.ISO8859_2, - "ascii": nil, - "hebrew": charmap.ISO8859_8, - "greek": charmap.ISO8859_7, - "cp1250": charmap.Windows1250, - "gbk": simplifiedchinese.GBK, - "latin5": charmap.ISO8859_9, - "utf8": nil, - "utf8mb3": nil, - "cp866": charmap.CodePage866, - "cp852": charmap.CodePage852, - "latin7": charmap.ISO8859_13, - "utf8mb4": nil, - "cp1251": charmap.Windows1251, - "cp1256": charmap.Windows1256, - "cp1257": charmap.Windows1257, - "binary": nil, -} - // IsNum returns true if a MySQL type is a numeric value. // It is the same as IS_NUM defined in mysql.h. func IsNum(typ uint8) bool { @@ -672,128 +281,3 @@ func IsNum(typ uint8) bool { typ == binlog.TypeYear || typ == binlog.TypeNewDecimal } - -// IsConnErr returns true if the error is a connection error. -func IsConnErr(err error) bool { - if IsTooManyConnectionsErr(err) { - return false - } - if sqlErr, ok := err.(*SQLError); ok { - num := sqlErr.Number() - return (num >= CRUnknownError && num <= CRNamedPipeStateError) || num == ERQueryInterrupted - } - return false -} - -// IsConnLostDuringQuery returns true if the error is a CRServerLost error. -// Happens most commonly when a query is killed MySQL server-side. -func IsConnLostDuringQuery(err error) bool { - if sqlErr, ok := err.(*SQLError); ok { - num := sqlErr.Number() - return (num == CRServerLost) - } - return false -} - -// IsEphemeralError returns true if the error is ephemeral and the caller should -// retry if possible. Note: non-SQL errors are always treated as ephemeral. -func IsEphemeralError(err error) bool { - if sqlErr, ok := err.(*SQLError); ok { - en := sqlErr.Number() - switch en { - case - CRConnectionError, - CRConnHostError, - CRMalformedPacket, - CRNamedPipeStateError, - CRServerHandshakeErr, - CRServerGone, - CRServerLost, - CRSSLConnectionError, - CRUnknownError, - CRUnknownHost, - ERCantCreateThread, - ERDiskFull, - ERForcingClose, - ERGotSignal, - ERHostIsBlocked, - ERLockTableFull, - ERInnodbReadOnly, - ERInternalError, - ERLockDeadlock, - ERLockWaitTimeout, - ERQueryTimeout, - EROutOfMemory, - EROutOfResources, - EROutOfSortMemory, - ERQueryInterrupted, - ERServerIsntAvailable, - ERServerShutdown, - ERTooManyUserConnections, - ERUnknownError, - ERUserLimitReached: - return true - default: - return false - } - } - // If it's not an sqlError then we assume it's ephemeral - return true -} - -// IsTooManyConnectionsErr returns true if the error is due to too many connections. -func IsTooManyConnectionsErr(err error) bool { - if sqlErr, ok := err.(*SQLError); ok { - if sqlErr.Number() == CRServerHandshakeErr && strings.Contains(sqlErr.Message, "Too many connections") { - return true - } - } - return false -} - -// IsSchemaApplyError returns true when given error is a MySQL error applying schema change -func IsSchemaApplyError(err error) bool { - merr, isSQLErr := err.(*SQLError) - if !isSQLErr { - return false - } - switch merr.Num { - case - ERDupKeyName, - ERCantDropFieldOrKey, - ERTableExists, - ERDupFieldName: - return true - } - return false -} - -type ReplicationState int32 - -const ( - ReplicationStateUnknown ReplicationState = iota - ReplicationStateStopped - ReplicationStateConnecting - ReplicationStateRunning -) - -// ReplicationStatusToState converts a value you have for the IO thread(s) or SQL -// thread(s) or Group Replication applier thread(s) from MySQL or intermediate -// layers to a mysql.ReplicationState. -// on,yes,true == ReplicationStateRunning -// off,no,false == ReplicationStateStopped -// connecting == ReplicationStateConnecting -// anything else == ReplicationStateUnknown -func ReplicationStatusToState(s string) ReplicationState { - // Group Replication uses ON instead of Yes - switch strings.ToLower(s) { - case "yes", "on", "true": - return ReplicationStateRunning - case "no", "off", "false": - return ReplicationStateStopped - case "connecting": - return ReplicationStateConnecting - default: - return ReplicationStateUnknown - } -} diff --git a/go/mysql/constants_test.go b/go/mysql/constants_test.go index 34d8c09ca54..1a54aad4c02 100644 --- a/go/mysql/constants_test.go +++ b/go/mysql/constants_test.go @@ -21,6 +21,8 @@ import ( "testing" "github.com/stretchr/testify/assert" + + "vitess.io/vitess/go/mysql/sqlerror" ) func TestIsConnErr(t *testing.T) { @@ -31,23 +33,23 @@ func TestIsConnErr(t *testing.T) { in: errors.New("t"), want: false, }, { - in: NewSQLError(5, "", ""), + in: sqlerror.NewSQLError(5, "", ""), want: false, }, { - in: NewSQLError(CRServerGone, "", ""), + in: sqlerror.NewSQLError(sqlerror.CRServerGone, "", ""), want: true, }, { - in: NewSQLError(CRServerLost, "", ""), + in: sqlerror.NewSQLError(sqlerror.CRServerLost, "", ""), want: true, }, { - in: NewSQLError(ERQueryInterrupted, "", ""), + in: sqlerror.NewSQLError(sqlerror.ERQueryInterrupted, "", ""), want: true, }, { - in: NewSQLError(CRCantReadCharset, "", ""), + in: sqlerror.NewSQLError(sqlerror.CRCantReadCharset, "", ""), want: false, }} for _, tcase := range testcases { - got := IsConnErr(tcase.in) + got := sqlerror.IsConnErr(tcase.in) assert.Equal(t, tcase.want, got, "IsConnErr(%#v): %v, want %v", tcase.in, got, tcase.want) } @@ -61,23 +63,23 @@ func TestIsConnLostDuringQuery(t *testing.T) { in: errors.New("t"), want: false, }, { - in: NewSQLError(5, "", ""), + in: sqlerror.NewSQLError(5, "", ""), want: false, }, { - in: NewSQLError(CRServerGone, "", ""), + in: sqlerror.NewSQLError(sqlerror.CRServerGone, "", ""), want: false, }, { - in: NewSQLError(CRServerLost, "", ""), + in: sqlerror.NewSQLError(sqlerror.CRServerLost, "", ""), want: true, }, { - in: NewSQLError(ERQueryInterrupted, "", ""), + in: sqlerror.NewSQLError(sqlerror.ERQueryInterrupted, "", ""), want: false, }, { - in: NewSQLError(CRCantReadCharset, "", ""), + in: sqlerror.NewSQLError(sqlerror.CRCantReadCharset, "", ""), want: false, }} for _, tcase := range testcases { - got := IsConnLostDuringQuery(tcase.in) + got := sqlerror.IsConnLostDuringQuery(tcase.in) assert.Equal(t, tcase.want, got, "IsConnLostDuringQuery(%#v): %v, want %v", tcase.in, got, tcase.want) } diff --git a/go/mysql/datetime/types.go b/go/mysql/datetime/datetime.go similarity index 70% rename from go/mysql/datetime/types.go rename to go/mysql/datetime/datetime.go index 559b44d2d6a..cc1fc92e091 100644 --- a/go/mysql/datetime/types.go +++ b/go/mysql/datetime/datetime.go @@ -17,6 +17,7 @@ limitations under the License. package datetime import ( + "encoding/binary" "time" "vitess.io/vitess/go/mysql/decimal" @@ -94,7 +95,7 @@ func (t Time) FormatDecimal() decimal.Decimal { } func (t Time) ToDateTime() (out DateTime) { - return FromStdTime(t.ToStdTime(time.Local)) + return NewDateTimeFromStd(t.ToStdTime(time.Local)) } func (t Time) IsZero() bool { @@ -243,12 +244,12 @@ func (d Date) Hash(h *vthash.Hasher) { h.Write8(d.day) } -func (dt Date) Weekday() time.Weekday { - return dt.ToStdTime(time.Local).Weekday() +func (d Date) Weekday() time.Weekday { + return d.ToStdTime(time.Local).Weekday() } -func (dt Date) Yearday() int { - return dt.ToStdTime(time.Local).YearDay() +func (d Date) Yearday() int { + return d.ToStdTime(time.Local).YearDay() } func (d Date) ISOWeek() (int, int) { @@ -405,7 +406,19 @@ func (t Time) ToDuration() time.Duration { } func (t Time) toStdTime(year int, month time.Month, day int, loc *time.Location) (out time.Time) { - return time.Date(year, month, day, 0, 0, 0, 0, loc).Add(t.ToDuration()) + hours := t.Hour() + minutes := t.Minute() + secs := t.Second() + nsecs := t.Nanosecond() + + if t.Neg() { + hours = -hours + minutes = -minutes + secs = -secs + nsecs = -nsecs + } + + return time.Date(year, month, day, hours, minutes, secs, nsecs, loc) } func (t Time) ToStdTime(loc *time.Location) (out time.Time) { @@ -413,6 +426,20 @@ func (t Time) ToStdTime(loc *time.Location) (out time.Time) { return t.toStdTime(year, month, day, loc) } +func (t Time) AddInterval(itv *Interval, stradd bool) (Time, uint8, bool) { + dt := DateTime{Time: t} + ok := dt.addInterval(itv) + return dt.Time, itv.precision(stradd), ok +} + +func (t Time) toSeconds() int { + tsecs := t.Hour()*secondsPerHour + t.Minute()*secondsPerMinute + t.Second() + if t.Neg() { + return -tsecs + } + return tsecs +} + func (d Date) ToStdTime(loc *time.Location) (out time.Time) { return time.Date(d.Year(), time.Month(d.Month()), d.Day(), 0, 0, 0, 0, loc) } @@ -471,6 +498,12 @@ func (d Date) Compare(d2 Date) int { return 0 } +func (d Date) AddInterval(itv *Interval) (Date, bool) { + dt := DateTime{Date: d} + ok := dt.addInterval(itv) + return dt.Date, ok +} + func (dt DateTime) FormatInt64() int64 { d := dt.Round(0) return d.Date.FormatInt64()*1000000 + d.Time.FormatInt64() @@ -493,7 +526,7 @@ func (dt DateTime) Compare(dt2 DateTime) int { case zerodate1 || zerodate2: // if we're comparing a time to a datetime, we need to normalize them // both into datetimes; this normalization is not trivial because negative - // times result in a date change, to let the standard library handle this + // times result in a date change, so let the standard library handle this return dt.ToStdTime(time.Local).Compare(dt2.ToStdTime(time.Local)) } if cmp := dt.Date.Compare(dt2.Date); cmp != 0 { @@ -502,6 +535,11 @@ func (dt DateTime) Compare(dt2 DateTime) int { return dt.Time.Compare(dt2.Time) } +func (dt DateTime) AddInterval(itv *Interval, stradd bool) (DateTime, uint8, bool) { + ok := dt.addInterval(itv) + return dt, itv.precision(stradd), ok +} + func (dt DateTime) Round(p int) (r DateTime) { if dt.Time.nanosecond == 0 { return dt @@ -521,28 +559,138 @@ func (dt DateTime) Round(p int) (r DateTime) { r = dt if n == 1e9 { r.Time.nanosecond = 0 - return FromStdTime(r.ToStdTime(time.Local).Add(time.Second)) + return NewDateTimeFromStd(r.ToStdTime(time.Local).Add(time.Second)) } r.Time.nanosecond = uint32(n) return r } -func FromStdTime(t time.Time) DateTime { +func (dt DateTime) toSeconds() int { + return (dt.Date.Day()-1)*secondsPerDay + dt.Time.toSeconds() +} + +func (dt *DateTime) addInterval(itv *Interval) bool { + switch { + case itv.unit.HasTimeParts(): + if !itv.inRange() { + return false + } + + nsec := dt.Time.Nanosecond() + itv.nsec + sec := dt.toSeconds() + itv.toSeconds() + (nsec / int(time.Second)) + nsec = nsec % int(time.Second) + + if nsec < 0 { + nsec += int(time.Second) + sec-- + } + + days := sec / secondsPerDay + sec -= days * secondsPerDay + + if sec < 0 { + sec += secondsPerDay + days-- + } + + dt.Time.nanosecond = uint32(nsec) + dt.Time.second = uint8(sec % secondsPerMinute) + dt.Time.minute = uint8((sec / secondsPerMinute) % secondsPerMinute) + dt.Time.hour = uint16(sec / secondsPerHour) + + daynum := mysqlDayNumber(dt.Date.Year(), dt.Date.Month(), 1) + days + if daynum < 0 || daynum > maxDay { + return false + } + + dt.Date.year, dt.Date.month, dt.Date.day = mysqlDateFromDayNumber(daynum) + return true + + case itv.unit.HasDayParts(): + daynum := mysqlDayNumber(dt.Date.Year(), dt.Date.Month(), dt.Date.Day()) + daynum += itv.day + dt.Date.year, dt.Date.month, dt.Date.day = mysqlDateFromDayNumber(daynum) + return true + + case itv.unit.HasMonthParts(): + months := dt.Date.Year()*12 + itv.year*12 + (dt.Date.Month() - 1) + itv.month + if months < 0 || months >= 120000 { + return false + } + + year := months / 12 + month := (months % 12) + 1 + + dt.Date.year = uint16(year) + dt.Date.month = uint8(month) + + // MySQL quirk: if the original date was in a day that the new month + // doesn't have, the date is offset backwards to the last day of + // the new month. This is the opposite to normal date handling where + // we'd offset days into the next month. + if dim := daysIn(time.Month(month), year); dt.Date.Day() > dim { + dt.Date.day = uint8(dim) + } + return true + + case itv.unit == IntervalYear: + if itv.year > 10000 { + return false + } + + year := dt.Date.Year() + itv.year + dt.Date.year = uint16(year) + + // MySQL quirk: if the original date was Feb 29th on a leap year, and + // the resulting year is not a leap year, the date is offset backwards. + // This is the opposite to what normal date handling does. + if dt.Date.Month() == 2 && dt.Date.Day() == 29 && !isLeap(year) { + dt.Date.day = 28 + } + return true + + default: + panic("unexpected IntervalType") + } +} + +func (dt DateTime) WeightString(dst []byte) []byte { + // This logic does the inverse of what we do in the binlog parser for the datetime2 type. + year, month, day := dt.Date.Year(), dt.Date.Month(), dt.Date.Day() + ymd := uint64(year*13+month)<<5 | uint64(day) + hms := uint64(dt.Time.Hour())<<12 | uint64(dt.Time.Minute())<<6 | uint64(dt.Time.Second()) + raw := (ymd<<17|hms)<<24 + uint64(dt.Time.Nanosecond()/1000) + if dt.Time.Neg() { + raw = -raw + } + + raw = raw ^ (1 << 63) + return binary.BigEndian.AppendUint64(dst, raw) +} + +func NewDateFromStd(t time.Time) Date { year, month, day := t.Date() + return Date{ + year: uint16(year), + month: uint8(month), + day: uint8(day), + } +} + +func NewTimeFromStd(t time.Time) Time { hour, min, sec := t.Clock() nsec := t.Nanosecond() + return Time{ + hour: uint16(hour), + minute: uint8(min), + second: uint8(sec), + nanosecond: uint32(nsec), + } +} +func NewDateTimeFromStd(t time.Time) DateTime { return DateTime{ - Date: Date{ - year: uint16(year), - month: uint8(month), - day: uint8(day), - }, - Time: Time{ - hour: uint16(hour), - minute: uint8(min), - second: uint8(sec), - nanosecond: uint32(nsec), - }, + Date: NewDateFromStd(t), + Time: NewTimeFromStd(t), } } diff --git a/go/mysql/datetime/helpers.go b/go/mysql/datetime/helpers.go index 8b53a376d7c..33d673782fc 100644 --- a/go/mysql/datetime/helpers.go +++ b/go/mysql/datetime/helpers.go @@ -198,7 +198,7 @@ func getnuml(s string, l int) (int, string, bool) { } func getnumn(s string) (int, string, bool) { - if !isDigit(s, 0) { + if len(s) == 0 || !('0' <= s[0] && s[0] <= '9') { return 0, s, false } @@ -229,6 +229,14 @@ var daysBefore = [...]int32{ 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30 + 31, } +var daysInMonth = [...]int{ + 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31, +} + +var daysInMonthLeap = [...]int{ + 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31, +} + func daysIn(m time.Month, year int) int { if m == time.February && isLeap(year) { return 29 @@ -240,6 +248,13 @@ func isLeap(year int) bool { return year%4 == 0 && (year%100 != 0 || year%400 == 0) } +func daysInYear(year int) int { + if isLeap(year) { + return 366 + } + return 365 +} + func parseNanoseconds[bytes []byte | string](value bytes, nbytes int) (ns int, l int, ok bool) { if value[0] != '.' { return 0, 0, false @@ -268,3 +283,9 @@ func parseNanoseconds[bytes []byte | string](value bytes, nbytes int) (ns int, l return } + +const ( + secondsPerMinute = 60 + secondsPerHour = 60 * secondsPerMinute + secondsPerDay = 24 * secondsPerHour +) diff --git a/go/mysql/datetime/interval.go b/go/mysql/datetime/interval.go new file mode 100644 index 00000000000..21395f2174d --- /dev/null +++ b/go/mysql/datetime/interval.go @@ -0,0 +1,425 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package datetime + +import ( + "math" + "math/bits" + "strconv" + "strings" + + "vitess.io/vitess/go/hack" + "vitess.io/vitess/go/mysql/decimal" +) + +// IntervalType represents the temporal elements contained in an Interval. +// Intervals in MySQL can contain more than one temporal element. We define their types as +// a bitset to let us efficiently query the temporal elements that form each interval. +// There are two kinds of IntervalTypes: unary and compound. Unary interval types contain +// a single temporal element (e.g. SECONDS, or DAYS) and hence contain only one bit set. +// Compount interval types are the logical combination of several unary interval types. +type IntervalType uint8 + +// IntervalType constants. +const ( + // Unary interval types + IntervalNone IntervalType = 0 + IntervalMicrosecond IntervalType = 1 << 0 + IntervalSecond IntervalType = 1 << 1 + IntervalMinute IntervalType = 1 << 2 + IntervalHour IntervalType = 1 << 3 + IntervalDay IntervalType = 1 << 4 + IntervalMonth IntervalType = 1 << 5 + IntervalYear IntervalType = 1 << 6 + intervalMulti IntervalType = 1 << 7 + + // IntervalWeek and IntervalQuarter are an exception for unary interval types, + // which are not unique temporal elements but instead a modifier on a unary element + // - WEEK is just a count of DAYS multiplied by 7 + // - QUARTER is just a count of MONTHS multiplied by 3 + IntervalWeek = IntervalDay | intervalMulti + IntervalQuarter = IntervalMonth | intervalMulti + + // Compound interval types + IntervalSecondMicrosecond = IntervalSecond | IntervalMicrosecond + IntervalMinuteMicrosecond = IntervalMinute | IntervalSecond | IntervalMicrosecond + IntervalMinuteSecond = IntervalMinute | IntervalSecond + IntervalHourMicrosecond = IntervalHour | IntervalMinute | IntervalSecond | IntervalMicrosecond + IntervalHourSecond = IntervalHour | IntervalMinute | IntervalSecond + IntervalHourMinute = IntervalHour | IntervalMinute + IntervalDayMicrosecond = IntervalDay | IntervalHour | IntervalMinute | IntervalSecond | IntervalMicrosecond + IntervalDaySecond = IntervalDay | IntervalHour | IntervalMinute | IntervalSecond + IntervalDayMinute = IntervalDay | IntervalHour | IntervalMinute + IntervalDayHour = IntervalDay | IntervalHour + IntervalYearMonth = IntervalYear | IntervalMonth +) + +type intervalSetter func(tp *Interval, val int) + +var intervalSet = [...]intervalSetter{ + intervalSetMicrosecond, + intervalSetSecond, + intervalSetMinute, + intervalSetHour, + intervalSetDay, + intervalSetMonth, + intervalSetYear, +} + +// setter returns the setter method for this interval's type. +// If this is a unary interval, it'll return the setter for the interval's unary type. +// If this is a compound interval, it'll return the setter for the smallest unary type +// in the interval. +func (itv IntervalType) setter() intervalSetter { + // find the lowest bit set in the interval, this is the smallest unary type + unary := itv & -itv + + // map from an unary interval type to its offset by counting the trailing + // zeroes. e.g. for HOUR(1 << 3), this will return 3, which the position + // for the HOUR setter in intervalSet + return intervalSet[bits.TrailingZeros8(uint8(unary))] +} + +func (itv IntervalType) PartCount() int { + return bits.OnesCount8(uint8(itv & ^intervalMulti)) +} + +func (itv IntervalType) HasTimeParts() bool { + return itv&(IntervalHour|IntervalMinute|IntervalSecond|IntervalMicrosecond) != 0 +} + +func (itv IntervalType) HasDateParts() bool { + return itv&(IntervalYear|IntervalMonth|IntervalDay) != 0 +} + +func (itv IntervalType) HasDayParts() bool { + return (itv & IntervalDay) != 0 +} + +func (itv IntervalType) HasMonthParts() bool { + return (itv & IntervalMonth) != 0 +} + +func (itv IntervalType) NeedsPrecision() bool { + return itv&IntervalMicrosecond != 0 +} + +// ToString returns the type as a string +func (itv IntervalType) ToString() string { + switch itv { + case IntervalYear: + return "year" + case IntervalQuarter: + return "quarter" + case IntervalMonth: + return "month" + case IntervalWeek: + return "week" + case IntervalDay: + return "day" + case IntervalHour: + return "hour" + case IntervalMinute: + return "minute" + case IntervalSecond: + return "second" + case IntervalMicrosecond: + return "microsecond" + case IntervalYearMonth: + return "year_month" + case IntervalDayHour: + return "day_hour" + case IntervalDayMinute: + return "day_minute" + case IntervalDaySecond: + return "day_second" + case IntervalHourMinute: + return "hour_minute" + case IntervalHourSecond: + return "hour_second" + case IntervalMinuteSecond: + return "minute_second" + case IntervalDayMicrosecond: + return "day_microsecond" + case IntervalHourMicrosecond: + return "hour_microsecond" + case IntervalMinuteMicrosecond: + return "minute_microsecond" + case IntervalSecondMicrosecond: + return "second_microsecond" + default: + return "[unknown IntervalType]" + } +} + +func intervalSetYear(tp *Interval, val int) { + tp.year = val +} + +func intervalSetMonth(tp *Interval, val int) { + // if the intervalMulti flag is set, this interval expects QUARTERS instead of months + if tp.unit&intervalMulti != 0 { + val = val * 3 + } + tp.month = val +} + +func intervalSetDay(tp *Interval, val int) { + // if the intervalMulti flag is set, this interval expects WEEKS instead of days + if tp.unit&intervalMulti != 0 { + val = val * 7 + } + tp.day = val +} + +func intervalSetHour(tp *Interval, val int) { + tp.hour = val +} + +func intervalSetMinute(tp *Interval, val int) { + tp.min = val +} + +func intervalSetSecond(tp *Interval, val int) { + tp.sec = val +} + +func intervalSetMicrosecond(tp *Interval, val int) { + // if we are setting the Microseconds in this interval, but the + // interval's type isn't explicitly MICROSECOND (i.e. it's an interval + // with several values besides MICROSECOND), the value being passed + // here won't be a fixed number of microseconds, but a fractional part. + // We need to scale it into microseconds. + // E.g. when parsing a SECOND:MICROSECOND value of '1.5', the input + // to this setter will be 5, but the interval doesn't contain 5 microseconds, + // it contains 500000. We perform the scaling into 6 digits using base10 log. + if tp.unit != IntervalMicrosecond { + digits := int(math.Log10(float64(val)) + 1) + val = val * int(math.Pow10(6-digits)) + } + // we store nsec internally, so convert from microseconds to nanoseconds + tp.nsec = val * 1000 +} + +// parseIntervalFields parses a internal string into separate numeric fields. +// The parsing is extremely lax according to MySQL. Any contiguous run of numbers +// is considered a field, and any non-numeric character is ignored. +func parseIntervalFields(itv string, negate *bool) (fields []int) { + if len(itv) > 0 && itv[0] == '-' { + *negate = !*negate + itv = itv[1:] + } + + for { + for len(itv) > 0 && !('0' <= itv[0] && itv[0] <= '9') { + itv = itv[1:] + } + if len(itv) == 0 { + break + } + + var n int + for len(itv) > 0 && '0' <= itv[0] && itv[0] <= '9' { + n = n*10 + int(itv[0]-'0') + itv = itv[1:] + } + + fields = append(fields, n) + } + return +} + +type Interval struct { + timeparts + unit IntervalType +} + +func (itv *Interval) Unit() IntervalType { + return itv.unit +} + +const maxDay = 3652424 + +func (itv *Interval) inRange() bool { + if itv.day > maxDay { + return false + } + if itv.hour > maxDay*24 { + return false + } + if itv.min > maxDay*24*60 { + return false + } + if itv.sec > maxDay*24*60*60 { + return false + } + return true +} + +// setFromFields sets the duration of interval from a slice of fields and +// the given interval type. +// This follow's MySQL's behavior: if there are fewer fields than the ones +// we'd expect to see in the interval's type, we pick the RIGHTMOST as +// the values for the interval. +// E.g. if our interval type wants HOUR:MINUTE:SECOND and we have [1, 1] +// as input fields, the resulting interval is '1min1sec' +func (itv *Interval) setFromFields(fields []int, unit IntervalType) bool { + parts := unit.PartCount() + if parts == 1 { + unit.setter()(itv, fields[0]) + return true + } + if len(fields) > 3 && parts < 4 { + return false + } + + for f, set := range intervalSet { + if len(fields) == 0 { + break + } + if unit&(1<= 3652500 { + return 0, 0, 0 + } + + year := daynr * 100 / 36525 + leapAdjust := (((year-1)/100 + 1) * 3) / 4 + yday := (daynr - year*365) - (year-1)/4 + leapAdjust + + if diy := daysInYear(year); yday > diy { + yday -= diy + year++ + } + + daycount := daysInMonth + if isLeap(year) { + daycount = daysInMonthLeap + } + for month, dim := range daycount { + if yday <= dim { + return uint16(year), uint8(month + 1), uint8(yday) + } + yday -= dim + } + + panic("unreachable: yday is too large?") +} diff --git a/go/mysql/datetime/mydate_test.go b/go/mysql/datetime/mydate_test.go new file mode 100644 index 00000000000..29ecd2df9d2 --- /dev/null +++ b/go/mysql/datetime/mydate_test.go @@ -0,0 +1,59 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package datetime + +import ( + "encoding/json" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDayNumber(t *testing.T) { + td, err := os.Open("testdata/year_to_daynr.json") + require.NoError(t, err) + defer td.Close() + + var expected []int + err = json.NewDecoder(td).Decode(&expected) + require.NoError(t, err) + + for year, daynr := range expected { + assert.Equal(t, daynr, mysqlDayNumber(year, 1, 1)) + } +} + +func TestDayNumberFields(t *testing.T) { + td, err := os.Open("testdata/daynr_to_date.json") + require.NoError(t, err) + defer td.Close() + + var expected [][4]int + err = json.NewDecoder(td).Decode(&expected) + require.NoError(t, err) + + for _, tc := range expected { + y, m, d := mysqlDateFromDayNumber(tc[0]) + assert.Equal(t, tc[1], int(y)) + assert.Equal(t, tc[2], int(m)) + assert.Equal(t, tc[3], int(d)) + + assert.Equalf(t, tc[0], mysqlDayNumber(tc[1], tc[2], tc[3]), "date %d-%d-%d", tc[1], tc[2], tc[3]) + } +} diff --git a/go/mysql/datetime/parse.go b/go/mysql/datetime/parse.go index 1d94a9ba8a5..e8f17191f4c 100644 --- a/go/mysql/datetime/parse.go +++ b/go/mysql/datetime/parse.go @@ -321,7 +321,7 @@ func ParseDateTimeInt64(i int64) (dt DateTime, ok bool) { if i == 0 { return dt, true } - if t == 0 || d == 0 { + if d == 0 { return dt, false } dt.Time, ok = ParseTimeInt64(t) @@ -399,5 +399,10 @@ func ParseTimeDecimal(d decimal.Decimal, l int32, prec int) (Time, int, bool) { } else { t = t.Round(prec) } + // We only support a maximum of nanosecond precision, + // so if the decimal has any larger precision we truncate it. + if prec > 9 { + prec = 9 + } return t, prec, ok } diff --git a/go/mysql/datetime/parse_test.go b/go/mysql/datetime/parse_test.go index 6b5b489d167..6ed342edfb3 100644 --- a/go/mysql/datetime/parse_test.go +++ b/go/mysql/datetime/parse_test.go @@ -17,6 +17,7 @@ limitations under the License. package datetime import ( + "fmt" "testing" "github.com/stretchr/testify/assert" @@ -235,6 +236,7 @@ func TestParseDateTime(t *testing.T) { {input: "20221012111213.123456", output: datetime{2022, 10, 12, 11, 12, 13, 123456000}, l: 6}, {input: "221012111213.123456", output: datetime{2022, 10, 12, 11, 12, 13, 123456000}, l: 6}, {input: "2022101211121321321312", output: datetime{2022, 10, 12, 11, 12, 13, 0}, err: true}, + {input: "3284004416225113510", output: datetime{}, err: true}, {input: "2012-12-31 11:30:45", output: datetime{2012, 12, 31, 11, 30, 45, 0}}, {input: "2012^12^31 11+30+45", output: datetime{2012, 12, 31, 11, 30, 45, 0}}, {input: "2012/12/31 11*30*45", output: datetime{2012, 12, 31, 11, 30, 45, 0}}, @@ -290,3 +292,53 @@ func TestParseDateTime(t *testing.T) { }) } } + +func TestParseDateTimeInt64(t *testing.T) { + type datetime struct { + year int + month int + day int + hour int + minute int + second int + nanosecond int + } + tests := []struct { + input int64 + output datetime + l int + err bool + }{ + {input: 1, output: datetime{}, err: true}, + {input: 20221012000000, output: datetime{2022, 10, 12, 0, 0, 0, 0}}, + {input: 20221012112233, output: datetime{2022, 10, 12, 11, 22, 33, 0}}, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%d", test.input), func(t *testing.T) { + got, ok := ParseDateTimeInt64(test.input) + if test.err { + if !got.IsZero() { + assert.Equal(t, test.output.year, got.Date.Year()) + assert.Equal(t, test.output.month, got.Date.Month()) + assert.Equal(t, test.output.day, got.Date.Day()) + assert.Equal(t, test.output.hour, got.Time.Hour()) + assert.Equal(t, test.output.minute, got.Time.Minute()) + assert.Equal(t, test.output.second, got.Time.Second()) + assert.Equal(t, test.output.nanosecond, got.Time.Nanosecond()) + } + assert.Falsef(t, ok, "did not fail to parse %s", test.input) + return + } + + require.True(t, ok) + assert.Equal(t, test.output.year, got.Date.Year()) + assert.Equal(t, test.output.month, got.Date.Month()) + assert.Equal(t, test.output.day, got.Date.Day()) + assert.Equal(t, test.output.hour, got.Time.Hour()) + assert.Equal(t, test.output.minute, got.Time.Minute()) + assert.Equal(t, test.output.second, got.Time.Second()) + assert.Equal(t, test.output.nanosecond, got.Time.Nanosecond()) + }) + } +} diff --git a/go/mysql/datetime/testdata/daynr_to_date.json b/go/mysql/datetime/testdata/daynr_to_date.json new file mode 100644 index 00000000000..3bb175d97e2 --- /dev/null +++ b/go/mysql/datetime/testdata/daynr_to_date.json @@ -0,0 +1,8188 @@ +[[456, 1, 4, 1], +[559, 1, 7, 13], +[572, 1, 7, 26], +[618, 1, 9, 10], +[785, 2, 2, 24], +[911, 2, 6, 30], +[1067, 2, 12, 3], +[1173, 3, 3, 19], +[1214, 3, 4, 29], +[1402, 3, 11, 3], +[1518, 4, 2, 27], +[1680, 4, 8, 7], +[1706, 4, 9, 2], +[1805, 4, 12, 10], +[1829, 5, 1, 3], +[1861, 5, 2, 4], +[1891, 5, 3, 6], +[1983, 5, 6, 6], +[2043, 5, 8, 5], +[2223, 6, 2, 1], +[2260, 6, 3, 10], +[2367, 6, 6, 25], +[2449, 6, 9, 15], +[2533, 6, 12, 8], +[2682, 7, 5, 6], +[2769, 7, 8, 1], +[2881, 7, 11, 21], +[2962, 8, 2, 10], +[3014, 8, 4, 2], +[3206, 8, 10, 11], +[3253, 8, 11, 27], +[3416, 9, 5, 9], +[3422, 9, 5, 15], +[3601, 9, 11, 10], +[3637, 9, 12, 16], +[3794, 10, 5, 22], +[3885, 10, 8, 21], +[3966, 10, 11, 10], +[3978, 10, 11, 22], +[4012, 10, 12, 26], +[4087, 11, 3, 11], +[4101, 11, 3, 25], +[4138, 11, 5, 1], +[4317, 11, 10, 27], +[4421, 12, 2, 8], +[4440, 12, 2, 27], +[4450, 12, 3, 8], +[4615, 12, 8, 20], +[4671, 12, 10, 15], +[4855, 13, 4, 17], +[4879, 13, 5, 11], +[5057, 13, 11, 5], +[5257, 14, 5, 24], +[5272, 14, 6, 8], +[5313, 14, 7, 19], +[5323, 14, 7, 29], +[5409, 14, 10, 23], +[5525, 15, 2, 16], +[5656, 15, 6, 27], +[5829, 15, 12, 17], +[5914, 16, 3, 11], +[6010, 16, 6, 15], +[6104, 16, 9, 17], +[6241, 17, 2, 1], +[6303, 17, 4, 4], +[6397, 17, 7, 7], +[6516, 17, 11, 3], +[6695, 18, 5, 1], +[6833, 18, 9, 16], +[6994, 19, 2, 24], +[7061, 19, 5, 2], +[7110, 19, 6, 20], +[7229, 19, 10, 17], +[7314, 20, 1, 10], +[7374, 20, 3, 10], +[7462, 20, 6, 6], +[7555, 20, 9, 7], +[7736, 21, 3, 7], +[7862, 21, 7, 11], +[7926, 21, 9, 13], +[7938, 21, 9, 25], +[8021, 21, 12, 17], +[8087, 22, 2, 21], +[8159, 22, 5, 4], +[8218, 22, 7, 2], +[8233, 22, 7, 17], +[8337, 22, 10, 29], +[8488, 23, 3, 29], +[8612, 23, 7, 31], +[8677, 23, 10, 4], +[8705, 23, 11, 1], +[8783, 24, 1, 18], +[8815, 24, 2, 19], +[8944, 24, 6, 27], +[9077, 24, 11, 7], +[9218, 25, 3, 28], +[9376, 25, 9, 2], +[9512, 26, 1, 16], +[9628, 26, 5, 12], +[9764, 26, 9, 25], +[9862, 27, 1, 1], +[10027, 27, 6, 15], +[10161, 27, 10, 27], +[10273, 28, 2, 16], +[10373, 28, 5, 26], +[10542, 28, 11, 11], +[10700, 29, 4, 18], +[10875, 29, 10, 10], +[10995, 30, 2, 7], +[11121, 30, 6, 13], +[11157, 30, 7, 19], +[11314, 30, 12, 23], +[11498, 31, 6, 25], +[11603, 31, 10, 8], +[11779, 32, 4, 1], +[11931, 32, 8, 31], +[12026, 32, 12, 4], +[12063, 33, 1, 10], +[12127, 33, 3, 15], +[12306, 33, 9, 10], +[12337, 33, 10, 11], +[12491, 34, 3, 14], +[12657, 34, 8, 27], +[12832, 35, 2, 18], +[12877, 35, 4, 4], +[13005, 35, 8, 10], +[13038, 35, 9, 12], +[13198, 36, 2, 19], +[13346, 36, 7, 16], +[13532, 37, 1, 18], +[13701, 37, 7, 6], +[13727, 37, 8, 1], +[13829, 37, 11, 11], +[13849, 37, 12, 1], +[13969, 38, 3, 31], +[14112, 38, 8, 21], +[14173, 38, 10, 21], +[14177, 38, 10, 25], +[14363, 39, 4, 29], +[14513, 39, 9, 26], +[14678, 40, 3, 9], +[14846, 40, 8, 24], +[15036, 41, 3, 2], +[15159, 41, 7, 3], +[15266, 41, 10, 18], +[15450, 42, 4, 20], +[15618, 42, 10, 5], +[15683, 42, 12, 9], +[15754, 43, 2, 18], +[15883, 43, 6, 27], +[16058, 43, 12, 19], +[16082, 44, 1, 12], +[16198, 44, 5, 7], +[16375, 44, 10, 31], +[16467, 45, 1, 31], +[16486, 45, 2, 19], +[16511, 45, 3, 16], +[16642, 45, 7, 25], +[16751, 45, 11, 11], +[16949, 46, 5, 28], +[17077, 46, 10, 3], +[17116, 46, 11, 11], +[17206, 47, 2, 9], +[17388, 47, 8, 10], +[17562, 48, 1, 31], +[17741, 48, 7, 28], +[17873, 48, 12, 7], +[17963, 49, 3, 7], +[17983, 49, 3, 27], +[18069, 49, 6, 21], +[18144, 49, 9, 4], +[18343, 50, 3, 22], +[18387, 50, 5, 5], +[18519, 50, 9, 14], +[18645, 51, 1, 18], +[18729, 51, 4, 12], +[18742, 51, 4, 25], +[18839, 51, 7, 31], +[18890, 51, 9, 20], +[19027, 52, 2, 4], +[19042, 52, 2, 19], +[19128, 52, 5, 15], +[19296, 52, 10, 30], +[19416, 53, 2, 27], +[19566, 53, 7, 27], +[19624, 53, 9, 23], +[19685, 53, 11, 23], +[19821, 54, 4, 8], +[19909, 54, 7, 5], +[20006, 54, 10, 10], +[20139, 55, 2, 20], +[20336, 55, 9, 5], +[20505, 56, 2, 21], +[20655, 56, 7, 20], +[20841, 57, 1, 22], +[20906, 57, 3, 28], +[20931, 57, 4, 22], +[21114, 57, 10, 22], +[21158, 57, 12, 5], +[21246, 58, 3, 3], +[21414, 58, 8, 18], +[21528, 58, 12, 10], +[21550, 59, 1, 1], +[21582, 59, 2, 2], +[21731, 59, 7, 1], +[21903, 59, 12, 20], +[22062, 60, 5, 27], +[22128, 60, 8, 1], +[22212, 60, 10, 24], +[22411, 61, 5, 11], +[22568, 61, 10, 15], +[22591, 61, 11, 7], +[22647, 62, 1, 2], +[22710, 62, 3, 6], +[22840, 62, 7, 14], +[22850, 62, 7, 24], +[23046, 63, 2, 5], +[23231, 63, 8, 9], +[23248, 63, 8, 26], +[23273, 63, 9, 20], +[23347, 63, 12, 3], +[23444, 64, 3, 9], +[23482, 64, 4, 16], +[23682, 64, 11, 2], +[23806, 65, 3, 6], +[23957, 65, 8, 4], +[24061, 65, 11, 16], +[24120, 66, 1, 14], +[24160, 66, 2, 23], +[24298, 66, 7, 11], +[24450, 66, 12, 10], +[24567, 67, 4, 6], +[24704, 67, 8, 21], +[24773, 67, 10, 29], +[24973, 68, 5, 16], +[25079, 68, 8, 30], +[25170, 68, 11, 29], +[25350, 69, 5, 28], +[25496, 69, 10, 21], +[25611, 70, 2, 13], +[25732, 70, 6, 14], +[25782, 70, 8, 3], +[25937, 71, 1, 5], +[26109, 71, 6, 26], +[26300, 72, 1, 3], +[26319, 72, 1, 22], +[26325, 72, 1, 28], +[26448, 72, 5, 30], +[26627, 72, 11, 25], +[26752, 73, 3, 30], +[26775, 73, 4, 22], +[26836, 73, 6, 22], +[26870, 73, 7, 26], +[26967, 73, 10, 31], +[27013, 73, 12, 16], +[27028, 73, 12, 31], +[27164, 74, 5, 16], +[27350, 74, 11, 18], +[27412, 75, 1, 19], +[27503, 75, 4, 20], +[27692, 75, 10, 26], +[27700, 75, 11, 3], +[27875, 76, 4, 26], +[27937, 76, 6, 27], +[27984, 76, 8, 13], +[28058, 76, 10, 26], +[28217, 77, 4, 3], +[28264, 77, 5, 20], +[28436, 77, 11, 8], +[28620, 78, 5, 11], +[28741, 78, 9, 9], +[28878, 79, 1, 24], +[28916, 79, 3, 3], +[29032, 79, 6, 27], +[29084, 79, 8, 18], +[29096, 79, 8, 30], +[29136, 79, 10, 9], +[29143, 79, 10, 16], +[29218, 79, 12, 30], +[29335, 80, 4, 25], +[29361, 80, 5, 21], +[29388, 80, 6, 17], +[29468, 80, 9, 5], +[29482, 80, 9, 19], +[29665, 81, 3, 21], +[29666, 81, 3, 22], +[29672, 81, 3, 28], +[29759, 81, 6, 23], +[29861, 81, 10, 3], +[30050, 82, 4, 10], +[30101, 82, 5, 31], +[30293, 82, 12, 9], +[30338, 83, 1, 23], +[30513, 83, 7, 17], +[30667, 83, 12, 18], +[30859, 84, 6, 27], +[30925, 84, 9, 1], +[31078, 85, 2, 1], +[31101, 85, 2, 24], +[31134, 85, 3, 29], +[31284, 85, 8, 26], +[31400, 85, 12, 20], +[31470, 86, 2, 28], +[31628, 86, 8, 5], +[31803, 87, 1, 27], +[31850, 87, 3, 15], +[32031, 87, 9, 12], +[32093, 87, 11, 13], +[32293, 88, 5, 31], +[32411, 88, 9, 26], +[32565, 89, 2, 27], +[32710, 89, 7, 22], +[32889, 90, 1, 17], +[33066, 90, 7, 13], +[33243, 91, 1, 6], +[33370, 91, 5, 13], +[33462, 91, 8, 13], +[33619, 92, 1, 17], +[33730, 92, 5, 7], +[33874, 92, 9, 28], +[33925, 92, 11, 18], +[34037, 93, 3, 10], +[34229, 93, 9, 18], +[34411, 94, 3, 19], +[34448, 94, 4, 25], +[34602, 94, 9, 26], +[34747, 95, 2, 18], +[34914, 95, 8, 4], +[35003, 95, 11, 1], +[35106, 96, 2, 12], +[35159, 96, 4, 5], +[35182, 96, 4, 28], +[35382, 96, 11, 14], +[35387, 96, 11, 19], +[35555, 97, 5, 6], +[35636, 97, 7, 26], +[35710, 97, 10, 8], +[35876, 98, 3, 23], +[36055, 98, 9, 18], +[36182, 99, 1, 23], +[36336, 99, 6, 26], +[36510, 99, 12, 17], +[36523, 99, 12, 30], +[36646, 100, 5, 2], +[36803, 100, 10, 6], +[36881, 100, 12, 23], +[37053, 101, 6, 13], +[37159, 101, 9, 27], +[37316, 102, 3, 3], +[37388, 102, 5, 14], +[37545, 102, 10, 18], +[37624, 103, 1, 5], +[37666, 103, 2, 16], +[37705, 103, 3, 27], +[37809, 103, 7, 9], +[37836, 103, 8, 5], +[37868, 103, 9, 6], +[38031, 104, 2, 16], +[38175, 104, 7, 9], +[38269, 104, 10, 11], +[38361, 105, 1, 11], +[38483, 105, 5, 13], +[38642, 105, 10, 19], +[38714, 105, 12, 30], +[38795, 106, 3, 21], +[38893, 106, 6, 27], +[38983, 106, 9, 25], +[39116, 107, 2, 5], +[39262, 107, 7, 1], +[39336, 107, 9, 13], +[39456, 108, 1, 11], +[39521, 108, 3, 16], +[39529, 108, 3, 24], +[39719, 108, 9, 30], +[39888, 109, 3, 18], +[39988, 109, 6, 26], +[40092, 109, 10, 8], +[40152, 109, 12, 7], +[40244, 110, 3, 9], +[40410, 110, 8, 22], +[40480, 110, 10, 31], +[40508, 110, 11, 28], +[40514, 110, 12, 4], +[40662, 111, 5, 1], +[40850, 111, 11, 5], +[40854, 111, 11, 9], +[40951, 112, 2, 14], +[41039, 112, 5, 12], +[41166, 112, 9, 16], +[41269, 112, 12, 28], +[41427, 113, 6, 4], +[41575, 113, 10, 30], +[41633, 113, 12, 27], +[41641, 114, 1, 4], +[41682, 114, 2, 14], +[41694, 114, 2, 26], +[41774, 114, 5, 17], +[41890, 114, 9, 10], +[41893, 114, 9, 13], +[41933, 114, 10, 23], +[41963, 114, 11, 22], +[42012, 115, 1, 10], +[42013, 115, 1, 11], +[42117, 115, 4, 25], +[42164, 115, 6, 11], +[42331, 115, 11, 25], +[42481, 116, 4, 23], +[42521, 116, 6, 2], +[42525, 116, 6, 6], +[42557, 116, 7, 8], +[42604, 116, 8, 24], +[42612, 116, 9, 1], +[42744, 117, 1, 11], +[42898, 117, 6, 14], +[42914, 117, 6, 30], +[42961, 117, 8, 16], +[43134, 118, 2, 5], +[43222, 118, 5, 4], +[43346, 118, 9, 5], +[43386, 118, 10, 15], +[43495, 119, 2, 1], +[43683, 119, 8, 8], +[43812, 119, 12, 15], +[43950, 120, 5, 1], +[44027, 120, 7, 17], +[44156, 120, 11, 23], +[44242, 121, 2, 17], +[44379, 121, 7, 4], +[44392, 121, 7, 17], +[44588, 122, 1, 29], +[44763, 122, 7, 23], +[44828, 122, 9, 26], +[44837, 122, 10, 5], +[44905, 122, 12, 12], +[45032, 123, 4, 18], +[45066, 123, 5, 22], +[45114, 123, 7, 9], +[45243, 123, 11, 15], +[45268, 123, 12, 10], +[45286, 123, 12, 28], +[45436, 124, 5, 26], +[45571, 124, 10, 8], +[45572, 124, 10, 9], +[45713, 125, 2, 27], +[45771, 125, 4, 26], +[45919, 125, 9, 21], +[46098, 126, 3, 19], +[46221, 126, 7, 20], +[46403, 127, 1, 18], +[46492, 127, 4, 17], +[46599, 127, 8, 2], +[46604, 127, 8, 7], +[46613, 127, 8, 16], +[46672, 127, 10, 14], +[46773, 128, 1, 23], +[46825, 128, 3, 15], +[46848, 128, 4, 7], +[47037, 128, 10, 13], +[47075, 128, 11, 20], +[47225, 129, 4, 19], +[47235, 129, 4, 29], +[47401, 129, 10, 12], +[47567, 130, 3, 27], +[47735, 130, 9, 11], +[47768, 130, 10, 14], +[47873, 131, 1, 27], +[47977, 131, 5, 11], +[48124, 131, 10, 5], +[48274, 132, 3, 3], +[48351, 132, 5, 19], +[48373, 132, 6, 10], +[48474, 132, 9, 19], +[48497, 132, 10, 12], +[48619, 133, 2, 11], +[48631, 133, 2, 23], +[48745, 133, 6, 17], +[48793, 133, 8, 4], +[48935, 133, 12, 24], +[49011, 134, 3, 10], +[49058, 134, 4, 26], +[49108, 134, 6, 15], +[49174, 134, 8, 20], +[49242, 134, 10, 27], +[49278, 134, 12, 2], +[49366, 135, 2, 28], +[49435, 135, 5, 8], +[49606, 135, 10, 26], +[49786, 136, 4, 23], +[49931, 136, 9, 15], +[50044, 137, 1, 6], +[50127, 137, 3, 30], +[50258, 137, 8, 8], +[50315, 137, 10, 4], +[50438, 138, 2, 4], +[50572, 138, 6, 18], +[50630, 138, 8, 15], +[50633, 138, 8, 18], +[50799, 139, 1, 31], +[50981, 139, 8, 1], +[51027, 139, 9, 16], +[51084, 139, 11, 12], +[51200, 140, 3, 7], +[51229, 140, 4, 5], +[51389, 140, 9, 12], +[51464, 140, 11, 26], +[51654, 141, 6, 4], +[51801, 141, 10, 29], +[51925, 142, 3, 2], +[52074, 142, 7, 29], +[52191, 142, 11, 23], +[52284, 143, 2, 24], +[52431, 143, 7, 21], +[52504, 143, 10, 2], +[52587, 143, 12, 24], +[52661, 144, 3, 7], +[52728, 144, 5, 13], +[52771, 144, 6, 25], +[52914, 144, 11, 15], +[52978, 145, 1, 18], +[53001, 145, 2, 10], +[53085, 145, 5, 5], +[53244, 145, 10, 11], +[53296, 145, 12, 2], +[53330, 146, 1, 5], +[53372, 146, 2, 16], +[53385, 146, 3, 1], +[53401, 146, 3, 17], +[53502, 146, 6, 26], +[53516, 146, 7, 10], +[53574, 146, 9, 6], +[53654, 146, 11, 25], +[53702, 147, 1, 12], +[53846, 147, 6, 5], +[53892, 147, 7, 21], +[54067, 148, 1, 12], +[54105, 148, 2, 19], +[54304, 148, 9, 5], +[54374, 148, 11, 14], +[54559, 149, 5, 18], +[54586, 149, 6, 14], +[54745, 149, 11, 20], +[54843, 150, 2, 26], +[54932, 150, 5, 26], +[54937, 150, 5, 31], +[54972, 150, 7, 5], +[54981, 150, 7, 14], +[54991, 150, 7, 24], +[55008, 150, 8, 10], +[55063, 150, 10, 4], +[55095, 150, 11, 5], +[55279, 151, 5, 8], +[55308, 151, 6, 6], +[55312, 151, 6, 10], +[55406, 151, 9, 12], +[55441, 151, 10, 17], +[55491, 151, 12, 6], +[55590, 152, 3, 14], +[55756, 152, 8, 27], +[55776, 152, 9, 16], +[55834, 152, 11, 13], +[55935, 153, 2, 22], +[55986, 153, 4, 14], +[56105, 153, 8, 11], +[56139, 153, 9, 14], +[56315, 154, 3, 9], +[56343, 154, 4, 6], +[56406, 154, 6, 8], +[56550, 154, 10, 30], +[56706, 155, 4, 4], +[56906, 155, 10, 21], +[56964, 155, 12, 18], +[57118, 156, 5, 20], +[57256, 156, 10, 5], +[57419, 157, 3, 17], +[57474, 157, 5, 11], +[57608, 157, 9, 22], +[57719, 158, 1, 11], +[57725, 158, 1, 17], +[57814, 158, 4, 16], +[57878, 158, 6, 19], +[57881, 158, 6, 22], +[58019, 158, 11, 7], +[58026, 158, 11, 14], +[58084, 159, 1, 11], +[58105, 159, 2, 1], +[58135, 159, 3, 3], +[58292, 159, 8, 7], +[58483, 160, 2, 14], +[58557, 160, 4, 28], +[58639, 160, 7, 19], +[58665, 160, 8, 14], +[58812, 161, 1, 8], +[58822, 161, 1, 18], +[58961, 161, 6, 6], +[59055, 161, 9, 8], +[59235, 162, 3, 7], +[59304, 162, 5, 15], +[59372, 162, 7, 22], +[59407, 162, 8, 26], +[59488, 162, 11, 15], +[59627, 163, 4, 3], +[59690, 163, 6, 5], +[59870, 163, 12, 2], +[59876, 163, 12, 8], +[59954, 164, 2, 24], +[60106, 164, 7, 25], +[60153, 164, 9, 10], +[60179, 164, 10, 6], +[60315, 165, 2, 19], +[60353, 165, 3, 29], +[60517, 165, 9, 9], +[60615, 165, 12, 16], +[60668, 166, 2, 7], +[60729, 166, 4, 9], +[60760, 166, 5, 10], +[60766, 166, 5, 16], +[60876, 166, 9, 3], +[60948, 166, 11, 14], +[60987, 166, 12, 23], +[61185, 167, 7, 9], +[61341, 167, 12, 12], +[61521, 168, 6, 9], +[61576, 168, 8, 3], +[61714, 168, 12, 19], +[61836, 169, 4, 20], +[61890, 169, 6, 13], +[62060, 169, 11, 30], +[62153, 170, 3, 3], +[62239, 170, 5, 28], +[62305, 170, 8, 2], +[62352, 170, 9, 18], +[62444, 170, 12, 19], +[62625, 171, 6, 18], +[62628, 171, 6, 21], +[62782, 171, 11, 22], +[62793, 171, 12, 3], +[62808, 171, 12, 18], +[62888, 172, 3, 7], +[62901, 172, 3, 20], +[62948, 172, 5, 6], +[63060, 172, 8, 26], +[63242, 173, 2, 24], +[63425, 173, 8, 26], +[63587, 174, 2, 4], +[63733, 174, 6, 30], +[63752, 174, 7, 19], +[63927, 175, 1, 10], +[63970, 175, 2, 22], +[64083, 175, 6, 15], +[64176, 175, 9, 16], +[64214, 175, 10, 24], +[64361, 176, 3, 19], +[64497, 176, 8, 2], +[64528, 176, 9, 2], +[64721, 177, 3, 14], +[64783, 177, 5, 15], +[64914, 177, 9, 23], +[64926, 177, 10, 5], +[65059, 178, 2, 15], +[65107, 178, 4, 4], +[65209, 178, 7, 15], +[65377, 178, 12, 30], +[65489, 179, 4, 21], +[65532, 179, 6, 3], +[65596, 179, 8, 6], +[65784, 180, 2, 10], +[65917, 180, 6, 22], +[65995, 180, 9, 8], +[66102, 180, 12, 24], +[66228, 181, 4, 29], +[66232, 181, 5, 3], +[66296, 181, 7, 6], +[66429, 181, 11, 16], +[66529, 182, 2, 24], +[66708, 182, 8, 22], +[66846, 183, 1, 7], +[66911, 183, 3, 13], +[66977, 183, 5, 18], +[67144, 183, 11, 1], +[67165, 183, 11, 22], +[67289, 184, 3, 25], +[67305, 184, 4, 10], +[67425, 184, 8, 8], +[67517, 184, 11, 8], +[67706, 185, 5, 16], +[67715, 185, 5, 25], +[67885, 185, 11, 11], +[68064, 186, 5, 9], +[68194, 186, 9, 16], +[68385, 187, 3, 26], +[68545, 187, 9, 2], +[68680, 188, 1, 15], +[68687, 188, 1, 22], +[68852, 188, 7, 5], +[68943, 188, 10, 4], +[68948, 188, 10, 9], +[69103, 189, 3, 13], +[69160, 189, 5, 9], +[69167, 189, 5, 16], +[69236, 189, 7, 24], +[69254, 189, 8, 11], +[69400, 190, 1, 4], +[69489, 190, 4, 3], +[69573, 190, 6, 26], +[69726, 190, 11, 26], +[69803, 191, 2, 11], +[69806, 191, 2, 14], +[69830, 191, 3, 10], +[70029, 191, 9, 25], +[70211, 192, 3, 25], +[70404, 192, 10, 4], +[70529, 193, 2, 6], +[70715, 193, 8, 11], +[70774, 193, 10, 9], +[70883, 194, 1, 26], +[71004, 194, 5, 27], +[71022, 194, 6, 14], +[71067, 194, 7, 29], +[71172, 194, 11, 11], +[71286, 195, 3, 5], +[71466, 195, 9, 1], +[71575, 195, 12, 19], +[71616, 196, 1, 29], +[71618, 196, 1, 31], +[71812, 196, 8, 12], +[71836, 196, 9, 5], +[72026, 197, 3, 14], +[72157, 197, 7, 23], +[72163, 197, 7, 29], +[72351, 198, 2, 2], +[72466, 198, 5, 28], +[72549, 198, 8, 19], +[72578, 198, 9, 17], +[72620, 198, 10, 29], +[72745, 199, 3, 3], +[72859, 199, 6, 25], +[72964, 199, 10, 8], +[73117, 200, 3, 10], +[73247, 200, 7, 18], +[73252, 200, 7, 23], +[73418, 201, 1, 5], +[73440, 201, 1, 27], +[73460, 201, 2, 16], +[73543, 201, 5, 10], +[73599, 201, 7, 5], +[73759, 201, 12, 12], +[73783, 202, 1, 5], +[73959, 202, 6, 30], +[74041, 202, 9, 20], +[74079, 202, 10, 28], +[74095, 202, 11, 13], +[74277, 203, 5, 14], +[74459, 203, 11, 12], +[74476, 203, 11, 29], +[74559, 204, 2, 20], +[74650, 204, 5, 21], +[74815, 204, 11, 2], +[74829, 204, 11, 16], +[74922, 205, 2, 17], +[75008, 205, 5, 14], +[75164, 205, 10, 17], +[75352, 206, 4, 23], +[75484, 206, 9, 2], +[75592, 206, 12, 19], +[75605, 207, 1, 1], +[75771, 207, 6, 16], +[75843, 207, 8, 27], +[75857, 207, 9, 10], +[75905, 207, 10, 28], +[75950, 207, 12, 12], +[76098, 208, 5, 8], +[76206, 208, 8, 24], +[76383, 209, 2, 17], +[76523, 209, 7, 7], +[76680, 209, 12, 11], +[76728, 210, 1, 28], +[76925, 210, 8, 13], +[77075, 211, 1, 10], +[77263, 211, 7, 17], +[77453, 212, 1, 23], +[77460, 212, 1, 30], +[77486, 212, 2, 25], +[77487, 212, 2, 26], +[77544, 212, 4, 23], +[77587, 212, 6, 5], +[77711, 212, 10, 7], +[77730, 212, 10, 26], +[77771, 212, 12, 6], +[77875, 213, 3, 20], +[77885, 213, 3, 30], +[77947, 213, 5, 31], +[78112, 213, 11, 12], +[78192, 214, 1, 31], +[78288, 214, 5, 7], +[78382, 214, 8, 9], +[78522, 214, 12, 27], +[78604, 215, 3, 19], +[78778, 215, 9, 9], +[78787, 215, 9, 18], +[78972, 216, 3, 21], +[78975, 216, 3, 24], +[79175, 216, 10, 10], +[79249, 216, 12, 23], +[79306, 217, 2, 18], +[79489, 217, 8, 20], +[79676, 218, 2, 23], +[79762, 218, 5, 20], +[79857, 218, 8, 23], +[79961, 218, 12, 5], +[80134, 219, 5, 27], +[80236, 219, 9, 6], +[80321, 219, 11, 30], +[80472, 220, 4, 29], +[80541, 220, 7, 7], +[80657, 220, 10, 31], +[80830, 221, 4, 22], +[80985, 221, 9, 24], +[81176, 222, 4, 3], +[81360, 222, 10, 4], +[81378, 222, 10, 22], +[81409, 222, 11, 22], +[81593, 223, 5, 25], +[81786, 223, 12, 4], +[81965, 224, 5, 31], +[81979, 224, 6, 14], +[81999, 224, 7, 4], +[82070, 224, 9, 13], +[82130, 224, 11, 12], +[82276, 225, 4, 7], +[82413, 225, 8, 22], +[82578, 226, 2, 3], +[82722, 226, 6, 27], +[82730, 226, 7, 5], +[82734, 226, 7, 9], +[82844, 226, 10, 27], +[82955, 227, 2, 15], +[83127, 227, 8, 6], +[83254, 227, 12, 11], +[83351, 228, 3, 17], +[83503, 228, 8, 16], +[83667, 229, 1, 27], +[83822, 229, 7, 1], +[83927, 229, 10, 14], +[84028, 230, 1, 23], +[84114, 230, 4, 19], +[84149, 230, 5, 24], +[84226, 230, 8, 9], +[84354, 230, 12, 15], +[84489, 231, 4, 29], +[84507, 231, 5, 17], +[84684, 231, 11, 10], +[84763, 232, 1, 28], +[84845, 232, 4, 19], +[85006, 232, 9, 27], +[85018, 232, 10, 9], +[85155, 233, 2, 23], +[85290, 233, 7, 8], +[85486, 234, 1, 20], +[85528, 234, 3, 3], +[85670, 234, 7, 23], +[85710, 234, 9, 1], +[85782, 234, 11, 12], +[85830, 234, 12, 30], +[85992, 235, 6, 10], +[86076, 235, 9, 2], +[86099, 235, 9, 25], +[86281, 236, 3, 25], +[86316, 236, 4, 29], +[86456, 236, 9, 16], +[86500, 236, 10, 30], +[86629, 237, 3, 8], +[86711, 237, 5, 29], +[86818, 237, 9, 13], +[86915, 237, 12, 19], +[86977, 238, 2, 19], +[87128, 238, 7, 20], +[87270, 238, 12, 9], +[87469, 239, 6, 26], +[87557, 239, 9, 22], +[87642, 239, 12, 16], +[87783, 240, 5, 5], +[87970, 240, 11, 8], +[88020, 240, 12, 28], +[88024, 241, 1, 1], +[88128, 241, 4, 15], +[88207, 241, 7, 3], +[88336, 241, 11, 9], +[88345, 241, 11, 18], +[88378, 241, 12, 21], +[88519, 242, 5, 11], +[88556, 242, 6, 17], +[88730, 242, 12, 8], +[88880, 243, 5, 7], +[89041, 243, 10, 15], +[89059, 243, 11, 2], +[89167, 244, 2, 18], +[89245, 244, 5, 6], +[89417, 244, 10, 25], +[89614, 245, 5, 10], +[89628, 245, 5, 24], +[89696, 245, 7, 31], +[89735, 245, 9, 8], +[89793, 245, 11, 5], +[89858, 246, 1, 9], +[90055, 246, 7, 25], +[90210, 246, 12, 27], +[90246, 247, 2, 1], +[90301, 247, 3, 28], +[90379, 247, 6, 14], +[90464, 247, 9, 7], +[90653, 248, 3, 14], +[90792, 248, 7, 31], +[90886, 248, 11, 2], +[90930, 248, 12, 16], +[91126, 249, 6, 30], +[91260, 249, 11, 11], +[91340, 250, 1, 30], +[91392, 250, 3, 23], +[91507, 250, 7, 16], +[91661, 250, 12, 17], +[91680, 251, 1, 5], +[91722, 251, 2, 16], +[91893, 251, 8, 6], +[92022, 251, 12, 13], +[92078, 252, 2, 7], +[92277, 252, 8, 24], +[92404, 252, 12, 29], +[92448, 253, 2, 11], +[92621, 253, 8, 3], +[92696, 253, 10, 17], +[92889, 254, 4, 28], +[93011, 254, 8, 28], +[93165, 255, 1, 29], +[93364, 255, 8, 16], +[93451, 255, 11, 11], +[93651, 256, 5, 29], +[93749, 256, 9, 4], +[93817, 256, 11, 11], +[93980, 257, 4, 23], +[94112, 257, 9, 2], +[94146, 257, 10, 6], +[94172, 257, 11, 1], +[94178, 257, 11, 7], +[94255, 258, 1, 23], +[94437, 258, 7, 24], +[94501, 258, 9, 26], +[94528, 258, 10, 23], +[94661, 259, 3, 5], +[94725, 259, 5, 8], +[94802, 259, 7, 24], +[94990, 260, 1, 28], +[95114, 260, 5, 31], +[95164, 260, 7, 20], +[95211, 260, 9, 5], +[95333, 261, 1, 5], +[95409, 261, 3, 22], +[95572, 261, 9, 1], +[95669, 261, 12, 7], +[95757, 262, 3, 5], +[95768, 262, 3, 16], +[95938, 262, 9, 2], +[96123, 263, 3, 6], +[96141, 263, 3, 24], +[96285, 263, 8, 15], +[96383, 263, 11, 21], +[96486, 264, 3, 3], +[96544, 264, 4, 30], +[96727, 264, 10, 30], +[96805, 265, 1, 16], +[96870, 265, 3, 22], +[96950, 265, 6, 10], +[96989, 265, 7, 19], +[97108, 265, 11, 15], +[97167, 266, 1, 13], +[97246, 266, 4, 2], +[97281, 266, 5, 7], +[97391, 266, 8, 25], +[97415, 266, 9, 18], +[97508, 266, 12, 20], +[97670, 267, 5, 31], +[97835, 267, 11, 12], +[98002, 268, 4, 27], +[98083, 268, 7, 17], +[98180, 268, 10, 22], +[98364, 269, 4, 24], +[98394, 269, 5, 24], +[98418, 269, 6, 17], +[98549, 269, 10, 26], +[98697, 270, 3, 23], +[98720, 270, 4, 15], +[98912, 270, 10, 24], +[99005, 271, 1, 25], +[99074, 271, 4, 4], +[99150, 271, 6, 19], +[99346, 272, 1, 1], +[99513, 272, 6, 16], +[99569, 272, 8, 11], +[99672, 272, 11, 22], +[99844, 273, 5, 13], +[99891, 273, 6, 29], +[99982, 273, 9, 28], +[100180, 274, 4, 14], +[100331, 274, 9, 12], +[100477, 275, 2, 5], +[100627, 275, 7, 5], +[100659, 275, 8, 6], +[100741, 275, 10, 27], +[100847, 276, 2, 10], +[101009, 276, 7, 21], +[101066, 276, 9, 16], +[101123, 276, 11, 12], +[101252, 277, 3, 21], +[101375, 277, 7, 22], +[101443, 277, 9, 28], +[101504, 277, 11, 28], +[101680, 278, 5, 23], +[101746, 278, 7, 28], +[101849, 278, 11, 8], +[101969, 279, 3, 8], +[102076, 279, 6, 23], +[102157, 279, 9, 12], +[102206, 279, 10, 31], +[102291, 280, 1, 24], +[102432, 280, 6, 13], +[102502, 280, 8, 22], +[102608, 280, 12, 6], +[102617, 280, 12, 15], +[102808, 281, 6, 24], +[102839, 281, 7, 25], +[102884, 281, 9, 8], +[102988, 281, 12, 21], +[103109, 282, 4, 21], +[103276, 282, 10, 5], +[103470, 283, 4, 17], +[103595, 283, 8, 20], +[103739, 284, 1, 11], +[103795, 284, 3, 7], +[103935, 284, 7, 25], +[104118, 285, 1, 24], +[104198, 285, 4, 14], +[104280, 285, 7, 5], +[104454, 285, 12, 26], +[104532, 286, 3, 14], +[104679, 286, 8, 8], +[104716, 286, 9, 14], +[104718, 286, 9, 16], +[104876, 287, 2, 21], +[104934, 287, 4, 20], +[105117, 287, 10, 20], +[105315, 288, 5, 5], +[105405, 288, 8, 3], +[105602, 289, 2, 16], +[105692, 289, 5, 17], +[105877, 289, 11, 18], +[106025, 290, 4, 15], +[106159, 290, 8, 27], +[106305, 291, 1, 20], +[106455, 291, 6, 19], +[106536, 291, 9, 8], +[106716, 292, 3, 6], +[106816, 292, 6, 14], +[106820, 292, 6, 18], +[106834, 292, 7, 2], +[106918, 292, 9, 24], +[107071, 293, 2, 24], +[107141, 293, 5, 5], +[107187, 293, 6, 20], +[107242, 293, 8, 14], +[107299, 293, 10, 10], +[107499, 294, 4, 28], +[107640, 294, 9, 16], +[107833, 295, 3, 28], +[107874, 295, 5, 8], +[107980, 295, 8, 22], +[108078, 295, 11, 28], +[108102, 295, 12, 22], +[108188, 296, 3, 17], +[108193, 296, 3, 22], +[108232, 296, 4, 30], +[108292, 296, 6, 29], +[108308, 296, 7, 15], +[108423, 296, 11, 7], +[108509, 297, 2, 1], +[108658, 297, 6, 30], +[108837, 297, 12, 26], +[108863, 298, 1, 21], +[108978, 298, 5, 16], +[109095, 298, 9, 10], +[109286, 299, 3, 20], +[109461, 299, 9, 11], +[109488, 299, 10, 8], +[109511, 299, 10, 31], +[109598, 300, 1, 26], +[109599, 300, 1, 27], +[109763, 300, 7, 10], +[109852, 300, 10, 7], +[109896, 300, 11, 20], +[109944, 301, 1, 7], +[110047, 301, 4, 20], +[110174, 301, 8, 25], +[110308, 302, 1, 6], +[110340, 302, 2, 7], +[110486, 302, 7, 3], +[110606, 302, 10, 31], +[110667, 302, 12, 31], +[110809, 303, 5, 22], +[110811, 303, 5, 24], +[110929, 303, 9, 19], +[111107, 304, 3, 15], +[111259, 304, 8, 14], +[111298, 304, 9, 22], +[111469, 305, 3, 12], +[111610, 305, 7, 31], +[111720, 305, 11, 18], +[111751, 305, 12, 19], +[111804, 306, 2, 10], +[111822, 306, 2, 28], +[111953, 306, 7, 9], +[112135, 307, 1, 7], +[112285, 307, 6, 6], +[112296, 307, 6, 17], +[112457, 307, 11, 25], +[112493, 307, 12, 31], +[112665, 308, 6, 20], +[112686, 308, 7, 11], +[112783, 308, 10, 16], +[112967, 309, 4, 18], +[113063, 309, 7, 23], +[113158, 309, 10, 26], +[113344, 310, 4, 30], +[113374, 310, 5, 30], +[113457, 310, 8, 21], +[113612, 311, 1, 23], +[113667, 311, 3, 19], +[113840, 311, 9, 8], +[113902, 311, 11, 9], +[114074, 312, 4, 29], +[114190, 312, 8, 23], +[114261, 312, 11, 2], +[114386, 313, 3, 7], +[114467, 313, 5, 27], +[114581, 313, 9, 18], +[114663, 313, 12, 9], +[114790, 314, 4, 15], +[114894, 314, 7, 28], +[114986, 314, 10, 28], +[115062, 315, 1, 12], +[115082, 315, 2, 1], +[115083, 315, 2, 2], +[115113, 315, 3, 4], +[115268, 315, 8, 6], +[115390, 315, 12, 6], +[115484, 316, 3, 9], +[115635, 316, 8, 7], +[115791, 317, 1, 10], +[115848, 317, 3, 8], +[116035, 317, 9, 11], +[116122, 317, 12, 7], +[116230, 318, 3, 25], +[116362, 318, 8, 4], +[116416, 318, 9, 27], +[116435, 318, 10, 16], +[116626, 319, 4, 25], +[116761, 319, 9, 7], +[116900, 320, 1, 24], +[117069, 320, 7, 11], +[117088, 320, 7, 30], +[117206, 320, 11, 25], +[117365, 321, 5, 3], +[117514, 321, 9, 29], +[117520, 321, 10, 5], +[117692, 322, 3, 26], +[117886, 322, 10, 6], +[117968, 322, 12, 27], +[118103, 323, 5, 11], +[118268, 323, 10, 23], +[118333, 323, 12, 27], +[118339, 324, 1, 2], +[118448, 324, 4, 20], +[118573, 324, 8, 23], +[118591, 324, 9, 10], +[118724, 325, 1, 21], +[118894, 325, 7, 10], +[118946, 325, 8, 31], +[119091, 326, 1, 23], +[119273, 326, 7, 24], +[119380, 326, 11, 8], +[119579, 327, 5, 26], +[119602, 327, 6, 18], +[119640, 327, 7, 26], +[119702, 327, 9, 26], +[119790, 327, 12, 23], +[119926, 328, 5, 7], +[120054, 328, 9, 12], +[120239, 329, 3, 16], +[120436, 329, 9, 29], +[120598, 330, 3, 10], +[120679, 330, 5, 30], +[120824, 330, 10, 22], +[120961, 331, 3, 8], +[121143, 331, 9, 6], +[121162, 331, 9, 25], +[121216, 331, 11, 18], +[121230, 331, 12, 2], +[121419, 332, 6, 8], +[121608, 332, 12, 14], +[121639, 333, 1, 14], +[121664, 333, 2, 8], +[121679, 333, 2, 23], +[121709, 333, 3, 25], +[121783, 333, 6, 7], +[121823, 333, 7, 17], +[121858, 333, 8, 21], +[121939, 333, 11, 10], +[121991, 334, 1, 1], +[122133, 334, 5, 23], +[122200, 334, 7, 29], +[122345, 334, 12, 21], +[122507, 335, 6, 1], +[122539, 335, 7, 3], +[122684, 335, 11, 25], +[122730, 336, 1, 10], +[122813, 336, 4, 2], +[122862, 336, 5, 21], +[123009, 336, 10, 15], +[123097, 337, 1, 11], +[123293, 337, 7, 26], +[123323, 337, 8, 25], +[123330, 337, 9, 1], +[123500, 338, 2, 18], +[123535, 338, 3, 25], +[123696, 338, 9, 2], +[123713, 338, 9, 19], +[123852, 339, 2, 5], +[123930, 339, 4, 24], +[123985, 339, 6, 18], +[123994, 339, 6, 27], +[124090, 339, 10, 1], +[124237, 340, 2, 25], +[124427, 340, 9, 2], +[124613, 341, 3, 7], +[124644, 341, 4, 7], +[124671, 341, 5, 4], +[124766, 341, 8, 7], +[124837, 341, 10, 17], +[124969, 342, 2, 26], +[125075, 342, 6, 12], +[125217, 342, 11, 1], +[125385, 343, 4, 18], +[125477, 343, 7, 19], +[125663, 344, 1, 21], +[125854, 344, 7, 30], +[125987, 344, 12, 10], +[126079, 345, 3, 12], +[126241, 345, 8, 21], +[126386, 346, 1, 13], +[126528, 346, 6, 4], +[126701, 346, 11, 24], +[126878, 347, 5, 20], +[126990, 347, 9, 9], +[127151, 348, 2, 17], +[127292, 348, 7, 7], +[127376, 348, 9, 29], +[127451, 348, 12, 13], +[127507, 349, 2, 7], +[127661, 349, 7, 11], +[127737, 349, 9, 25], +[127787, 349, 11, 14], +[127874, 350, 2, 9], +[128042, 350, 7, 27], +[128140, 350, 11, 2], +[128327, 351, 5, 8], +[128362, 351, 6, 12], +[128537, 351, 12, 4], +[128613, 352, 2, 18], +[128623, 352, 2, 28], +[128694, 352, 5, 9], +[128799, 352, 8, 22], +[128895, 352, 11, 26], +[129061, 353, 5, 11], +[129067, 353, 5, 17], +[129208, 353, 10, 5], +[129403, 354, 4, 18], +[129524, 354, 8, 17], +[129719, 355, 2, 28], +[129809, 355, 5, 29], +[129849, 355, 7, 8], +[129985, 355, 11, 21], +[130177, 356, 5, 31], +[130363, 356, 12, 3], +[130558, 357, 6, 16], +[130666, 357, 10, 2], +[130782, 358, 1, 26], +[130833, 358, 3, 18], +[130861, 358, 4, 15], +[131027, 358, 9, 28], +[131159, 359, 2, 7], +[131340, 359, 8, 7], +[131380, 359, 9, 16], +[131548, 360, 3, 2], +[131655, 360, 6, 17], +[131776, 360, 10, 16], +[131825, 360, 12, 4], +[131883, 361, 1, 31], +[132061, 361, 7, 28], +[132186, 361, 11, 30], +[132201, 361, 12, 15], +[132295, 362, 3, 19], +[132337, 362, 4, 30], +[132481, 362, 9, 21], +[132504, 362, 10, 14], +[132639, 363, 2, 26], +[132747, 363, 6, 14], +[132784, 363, 7, 21], +[132933, 363, 12, 17], +[132962, 364, 1, 15], +[133090, 364, 5, 22], +[133119, 364, 6, 20], +[133197, 364, 9, 6], +[133292, 364, 12, 10], +[133409, 365, 4, 6], +[133453, 365, 5, 20], +[133571, 365, 9, 15], +[133679, 366, 1, 1], +[133720, 366, 2, 11], +[133914, 366, 8, 24], +[133964, 366, 10, 13], +[134091, 367, 2, 17], +[134286, 367, 8, 31], +[134424, 368, 1, 16], +[134527, 368, 4, 28], +[134553, 368, 5, 24], +[134709, 368, 10, 27], +[134798, 369, 1, 24], +[134885, 369, 4, 21], +[134904, 369, 5, 10], +[134927, 369, 6, 2], +[134994, 369, 8, 8], +[135098, 369, 11, 20], +[135172, 370, 2, 2], +[135220, 370, 3, 22], +[135353, 370, 8, 2], +[135467, 370, 11, 24], +[135665, 371, 6, 10], +[135811, 371, 11, 3], +[135934, 372, 3, 5], +[136045, 372, 6, 24], +[136061, 372, 7, 10], +[136106, 372, 8, 24], +[136163, 372, 10, 20], +[136202, 372, 11, 28], +[136297, 373, 3, 3], +[136317, 373, 3, 23], +[136509, 373, 10, 1], +[136552, 373, 11, 13], +[136671, 374, 3, 12], +[136809, 374, 7, 28], +[137003, 375, 2, 7], +[137163, 375, 7, 17], +[137259, 375, 10, 21], +[137345, 376, 1, 15], +[137418, 376, 3, 28], +[137484, 376, 6, 2], +[137627, 376, 10, 23], +[137664, 376, 11, 29], +[137795, 377, 4, 9], +[137834, 377, 5, 18], +[137906, 377, 7, 29], +[137983, 377, 10, 14], +[138110, 378, 2, 18], +[138265, 378, 7, 23], +[138332, 378, 9, 28], +[138377, 378, 11, 12], +[138382, 378, 11, 17], +[138580, 379, 6, 3], +[138774, 379, 12, 14], +[138938, 380, 5, 26], +[138947, 380, 6, 4], +[138997, 380, 7, 24], +[139176, 381, 1, 19], +[139234, 381, 3, 18], +[139321, 381, 6, 13], +[139521, 381, 12, 30], +[139708, 382, 7, 5], +[139828, 382, 11, 2], +[139908, 383, 1, 21], +[139960, 383, 3, 14], +[139997, 383, 4, 20], +[140028, 383, 5, 21], +[140046, 383, 6, 8], +[140233, 383, 12, 12], +[140257, 384, 1, 5], +[140282, 384, 1, 30], +[140463, 384, 7, 29], +[140464, 384, 7, 30], +[140604, 384, 12, 17], +[140738, 385, 4, 30], +[140773, 385, 6, 4], +[140835, 385, 8, 5], +[140850, 385, 8, 20], +[141042, 386, 2, 28], +[141183, 386, 7, 19], +[141260, 386, 10, 4], +[141324, 386, 12, 7], +[141333, 386, 12, 16], +[141448, 387, 4, 10], +[141639, 387, 10, 18], +[141767, 388, 2, 23], +[141781, 388, 3, 8], +[141826, 388, 4, 22], +[141951, 388, 8, 25], +[142005, 388, 10, 18], +[142068, 388, 12, 20], +[142186, 389, 4, 17], +[142195, 389, 4, 26], +[142380, 389, 10, 28], +[142479, 390, 2, 4], +[142484, 390, 2, 9], +[142660, 390, 8, 4], +[142838, 391, 1, 29], +[142926, 391, 4, 27], +[142994, 391, 7, 4], +[142996, 391, 7, 6], +[143058, 391, 9, 6], +[143123, 391, 11, 10], +[143152, 391, 12, 9], +[143320, 392, 5, 25], +[143507, 392, 11, 28], +[143547, 393, 1, 7], +[143726, 393, 7, 5], +[143744, 393, 7, 23], +[143817, 393, 10, 4], +[143921, 394, 1, 16], +[144046, 394, 5, 21], +[144077, 394, 6, 21], +[144166, 394, 9, 18], +[144190, 394, 10, 12], +[144245, 394, 12, 6], +[144309, 395, 2, 8], +[144488, 395, 8, 6], +[144610, 395, 12, 6], +[144630, 395, 12, 26], +[144690, 396, 2, 24], +[144820, 396, 7, 3], +[144871, 396, 8, 23], +[144961, 396, 11, 21], +[144995, 396, 12, 25], +[145093, 397, 4, 2], +[145165, 397, 6, 13], +[145286, 397, 10, 12], +[145393, 398, 1, 27], +[145540, 398, 6, 23], +[145700, 398, 11, 30], +[145795, 399, 3, 5], +[145808, 399, 3, 18], +[145913, 399, 7, 1], +[145983, 399, 9, 9], +[146105, 400, 1, 9], +[146118, 400, 1, 22], +[146307, 400, 7, 29], +[146418, 400, 11, 17], +[146453, 400, 12, 22], +[146628, 401, 6, 15], +[146824, 401, 12, 28], +[146907, 402, 3, 21], +[147037, 402, 7, 29], +[147106, 402, 10, 6], +[147130, 402, 10, 30], +[147199, 403, 1, 7], +[147209, 403, 1, 17], +[147404, 403, 7, 31], +[147585, 404, 1, 28], +[147697, 404, 5, 19], +[147812, 404, 9, 11], +[147817, 404, 9, 16], +[147962, 405, 2, 8], +[148019, 405, 4, 6], +[148136, 405, 8, 1], +[148159, 405, 8, 24], +[148297, 406, 1, 9], +[148371, 406, 3, 24], +[148447, 406, 6, 8], +[148580, 406, 10, 19], +[148747, 407, 4, 4], +[148938, 407, 10, 12], +[149061, 408, 2, 12], +[149227, 408, 7, 27], +[149371, 408, 12, 18], +[149452, 409, 3, 9], +[149521, 409, 5, 17], +[149621, 409, 8, 25], +[149686, 409, 10, 29], +[149749, 409, 12, 31], +[149823, 410, 3, 15], +[149877, 410, 5, 8], +[149944, 410, 7, 14], +[150134, 411, 1, 20], +[150318, 411, 7, 23], +[150380, 411, 9, 23], +[150525, 412, 2, 15], +[150716, 412, 8, 24], +[150741, 412, 9, 18], +[150819, 412, 12, 5], +[150884, 413, 2, 8], +[151017, 413, 6, 21], +[151030, 413, 7, 4], +[151183, 413, 12, 4], +[151280, 414, 3, 11], +[151374, 414, 6, 13], +[151393, 414, 7, 2], +[151506, 414, 10, 23], +[151601, 415, 1, 26], +[151746, 415, 6, 20], +[151767, 415, 7, 11], +[151853, 415, 10, 5], +[151958, 416, 1, 18], +[152090, 416, 5, 29], +[152149, 416, 7, 27], +[152219, 416, 10, 5], +[152370, 417, 3, 5], +[152555, 417, 9, 6], +[152670, 417, 12, 30], +[152695, 418, 1, 24], +[152760, 418, 3, 30], +[152802, 418, 5, 11], +[153002, 418, 11, 27], +[153200, 419, 6, 13], +[153380, 419, 12, 10], +[153567, 420, 6, 14], +[153680, 420, 10, 5], +[153782, 421, 1, 15], +[153814, 421, 2, 16], +[153924, 421, 6, 6], +[153964, 421, 7, 16], +[154100, 421, 11, 29], +[154109, 421, 12, 8], +[154258, 422, 5, 6], +[154307, 422, 6, 24], +[154382, 422, 9, 7], +[154412, 422, 10, 7], +[154612, 423, 4, 25], +[154737, 423, 8, 28], +[154904, 424, 2, 11], +[155052, 424, 7, 8], +[155106, 424, 8, 31], +[155255, 425, 1, 27], +[155362, 425, 5, 14], +[155471, 425, 8, 31], +[155506, 425, 10, 5], +[155535, 425, 11, 3], +[155717, 426, 5, 4], +[155746, 426, 6, 2], +[155776, 426, 7, 2], +[155961, 427, 1, 3], +[155986, 427, 1, 28], +[155996, 427, 2, 7], +[156146, 427, 7, 7], +[156280, 427, 11, 18], +[156446, 428, 5, 2], +[156535, 428, 7, 30], +[156669, 428, 12, 11], +[156734, 429, 2, 14], +[156747, 429, 2, 27], +[156857, 429, 6, 17], +[157014, 429, 11, 21], +[157169, 430, 4, 25], +[157183, 430, 5, 9], +[157380, 430, 11, 22], +[157531, 431, 4, 22], +[157680, 431, 9, 18], +[157805, 432, 1, 21], +[157894, 432, 4, 19], +[157916, 432, 5, 11], +[158081, 432, 10, 23], +[158137, 432, 12, 18], +[158263, 433, 4, 23], +[158385, 433, 8, 23], +[158443, 433, 10, 20], +[158606, 434, 4, 1], +[158739, 434, 8, 12], +[158892, 435, 1, 12], +[159012, 435, 5, 12], +[159142, 435, 9, 19], +[159274, 436, 1, 29], +[159384, 436, 5, 18], +[159385, 436, 5, 19], +[159583, 436, 12, 3], +[159697, 437, 3, 27], +[159844, 437, 8, 21], +[160005, 438, 1, 29], +[160083, 438, 4, 17], +[160096, 438, 4, 30], +[160221, 438, 9, 2], +[160362, 439, 1, 21], +[160488, 439, 5, 27], +[160506, 439, 6, 14], +[160589, 439, 9, 5], +[160774, 440, 3, 8], +[160812, 440, 4, 15], +[160931, 440, 8, 12], +[161086, 441, 1, 14], +[161277, 441, 7, 24], +[161334, 441, 9, 19], +[161493, 442, 2, 25], +[161574, 442, 5, 17], +[161701, 442, 9, 21], +[161836, 443, 2, 3], +[162014, 443, 7, 31], +[162031, 443, 8, 17], +[162205, 444, 2, 7], +[162370, 444, 7, 21], +[162375, 444, 7, 26], +[162432, 444, 9, 21], +[162513, 444, 12, 11], +[162552, 445, 1, 19], +[162579, 445, 2, 15], +[162633, 445, 4, 10], +[162636, 445, 4, 13], +[162688, 445, 6, 4], +[162874, 445, 12, 7], +[162909, 446, 1, 11], +[162967, 446, 3, 10], +[162999, 446, 4, 11], +[163056, 446, 6, 7], +[163253, 446, 12, 21], +[163392, 447, 5, 9], +[163490, 447, 8, 15], +[163614, 447, 12, 17], +[163782, 448, 6, 2], +[163956, 448, 11, 23], +[164091, 449, 4, 7], +[164272, 449, 10, 5], +[164426, 450, 3, 8], +[164472, 450, 4, 23], +[164488, 450, 5, 9], +[164536, 450, 6, 26], +[164723, 450, 12, 30], +[164863, 451, 5, 19], +[164915, 451, 7, 10], +[164920, 451, 7, 15], +[164937, 451, 8, 1], +[165090, 452, 1, 1], +[165113, 452, 1, 24], +[165123, 452, 2, 3], +[165130, 452, 2, 10], +[165157, 452, 3, 8], +[165194, 452, 4, 14], +[165273, 452, 7, 2], +[165440, 452, 12, 16], +[165634, 453, 6, 28], +[165790, 453, 12, 1], +[165828, 454, 1, 8], +[166008, 454, 7, 7], +[166175, 454, 12, 21], +[166320, 455, 5, 15], +[166455, 455, 9, 27], +[166640, 456, 3, 30], +[166801, 456, 9, 7], +[166877, 456, 11, 22], +[167018, 457, 4, 12], +[167191, 457, 10, 2], +[167369, 458, 3, 29], +[167473, 458, 7, 11], +[167558, 458, 10, 4], +[167684, 459, 2, 7], +[167741, 459, 4, 5], +[167854, 459, 7, 27], +[167906, 459, 9, 17], +[168002, 459, 12, 22], +[168149, 460, 5, 17], +[168267, 460, 9, 12], +[168433, 461, 2, 25], +[168469, 461, 4, 2], +[168658, 461, 10, 8], +[168805, 462, 3, 4], +[168939, 462, 7, 16], +[169003, 462, 9, 18], +[169104, 462, 12, 28], +[169164, 463, 2, 26], +[169196, 463, 3, 30], +[169229, 463, 5, 2], +[169341, 463, 8, 22], +[169362, 463, 9, 12], +[169372, 463, 9, 22], +[169382, 463, 10, 2], +[169483, 464, 1, 11], +[169660, 464, 7, 6], +[169837, 464, 12, 30], +[169937, 465, 4, 9], +[170074, 465, 8, 24], +[170180, 465, 12, 8], +[170334, 466, 5, 11], +[170490, 466, 10, 14], +[170645, 467, 3, 18], +[170829, 467, 9, 18], +[171022, 468, 3, 29], +[171059, 468, 5, 5], +[171150, 468, 8, 4], +[171202, 468, 9, 25], +[171208, 468, 10, 1], +[171347, 469, 2, 17], +[171351, 469, 2, 21], +[171419, 469, 4, 30], +[171433, 469, 5, 14], +[171553, 469, 9, 11], +[171559, 469, 9, 17], +[171562, 469, 9, 20], +[171678, 470, 1, 14], +[171798, 470, 5, 14], +[171967, 470, 10, 30], +[172141, 471, 4, 22], +[172266, 471, 8, 25], +[172386, 471, 12, 23], +[172462, 472, 3, 8], +[172600, 472, 7, 24], +[172789, 473, 1, 29], +[172870, 473, 4, 20], +[172911, 473, 5, 31], +[172972, 473, 7, 31], +[173098, 473, 12, 4], +[173258, 474, 5, 13], +[173360, 474, 8, 23], +[173486, 474, 12, 27], +[173610, 475, 4, 30], +[173687, 475, 7, 16], +[173828, 475, 12, 4], +[174024, 476, 6, 17], +[174047, 476, 7, 10], +[174064, 476, 7, 27], +[174169, 476, 11, 9], +[174214, 476, 12, 24], +[174400, 477, 6, 28], +[174437, 477, 8, 4], +[174483, 477, 9, 19], +[174613, 478, 1, 27], +[174634, 478, 2, 17], +[174816, 478, 8, 18], +[174881, 478, 10, 22], +[175045, 479, 4, 4], +[175221, 479, 9, 27], +[175252, 479, 10, 28], +[175445, 480, 5, 8], +[175517, 480, 7, 19], +[175683, 481, 1, 1], +[175780, 481, 4, 8], +[175962, 481, 10, 7], +[176002, 481, 11, 16], +[176087, 482, 2, 9], +[176146, 482, 4, 9], +[176285, 482, 8, 26], +[176481, 483, 3, 10], +[176607, 483, 7, 14], +[176636, 483, 8, 12], +[176785, 484, 1, 8], +[176880, 484, 4, 12], +[177013, 484, 8, 23], +[177210, 485, 3, 8], +[177308, 485, 6, 14], +[177504, 485, 12, 27], +[177515, 486, 1, 7], +[177562, 486, 2, 23], +[177598, 486, 3, 31], +[177723, 486, 8, 3], +[177809, 486, 10, 28], +[177961, 487, 3, 29], +[178083, 487, 7, 29], +[178241, 488, 1, 3], +[178349, 488, 4, 20], +[178387, 488, 5, 28], +[178520, 488, 10, 8], +[178591, 488, 12, 18], +[178791, 489, 7, 6], +[178857, 489, 9, 10], +[179018, 490, 2, 18], +[179113, 490, 5, 24], +[179149, 490, 6, 29], +[179163, 490, 7, 13], +[179253, 490, 10, 11], +[179390, 491, 2, 25], +[179537, 491, 7, 22], +[179716, 492, 1, 17], +[179896, 492, 7, 15], +[180079, 493, 1, 14], +[180257, 493, 7, 11], +[180358, 493, 10, 20], +[180363, 493, 10, 25], +[180509, 494, 3, 20], +[180564, 494, 5, 14], +[180753, 494, 11, 19], +[180854, 495, 2, 28], +[180965, 495, 6, 19], +[181131, 495, 12, 2], +[181264, 496, 4, 13], +[181356, 496, 7, 14], +[181469, 496, 11, 4], +[181516, 496, 12, 21], +[181570, 497, 2, 13], +[181674, 497, 5, 28], +[181761, 497, 8, 23], +[181846, 497, 11, 16], +[181905, 498, 1, 14], +[182076, 498, 7, 4], +[182185, 498, 10, 21], +[182248, 498, 12, 23], +[182313, 499, 2, 26], +[182320, 499, 3, 5], +[182496, 499, 8, 28], +[182566, 499, 11, 6], +[182745, 500, 5, 4], +[182900, 500, 10, 6], +[182914, 500, 10, 20], +[182978, 500, 12, 23], +[183149, 501, 6, 12], +[183332, 501, 12, 12], +[183482, 502, 5, 11], +[183616, 502, 9, 22], +[183793, 503, 3, 18], +[183873, 503, 6, 6], +[184014, 503, 10, 25], +[184108, 504, 1, 27], +[184250, 504, 6, 17], +[184288, 504, 7, 25], +[184411, 504, 11, 25], +[184611, 505, 6, 13], +[184737, 505, 10, 17], +[184928, 506, 4, 26], +[185097, 506, 10, 12], +[185267, 507, 3, 31], +[185323, 507, 5, 26], +[185356, 507, 6, 28], +[185539, 507, 12, 28], +[185652, 508, 4, 19], +[185781, 508, 8, 26], +[185911, 509, 1, 3], +[186005, 509, 4, 7], +[186177, 509, 9, 26], +[186256, 509, 12, 14], +[186447, 510, 6, 23], +[186563, 510, 10, 17], +[186593, 510, 11, 16], +[186729, 511, 4, 1], +[186757, 511, 4, 29], +[186913, 511, 10, 2], +[187047, 512, 2, 13], +[187184, 512, 6, 29], +[187353, 512, 12, 15], +[187460, 513, 4, 1], +[187501, 513, 5, 12], +[187610, 513, 8, 29], +[187759, 514, 1, 25], +[187911, 514, 6, 26], +[187944, 514, 7, 29], +[187960, 514, 8, 14], +[188019, 514, 10, 12], +[188080, 514, 12, 12], +[188130, 515, 1, 31], +[188153, 515, 2, 23], +[188248, 515, 5, 29], +[188439, 515, 12, 6], +[188522, 516, 2, 27], +[188528, 516, 3, 4], +[188636, 516, 6, 20], +[188694, 516, 8, 17], +[188713, 516, 9, 5], +[188899, 517, 3, 10], +[188952, 517, 5, 2], +[188957, 517, 5, 7], +[188996, 517, 6, 15], +[189106, 517, 10, 3], +[189225, 518, 1, 30], +[189284, 518, 3, 30], +[189330, 518, 5, 15], +[189402, 518, 7, 26], +[189433, 518, 8, 26], +[189625, 519, 3, 6], +[189721, 519, 6, 10], +[189847, 519, 10, 14], +[190026, 520, 4, 10], +[190091, 520, 6, 14], +[190213, 520, 10, 14], +[190318, 521, 1, 27], +[190362, 521, 3, 12], +[190545, 521, 9, 11], +[190581, 521, 10, 17], +[190690, 522, 2, 3], +[190842, 522, 7, 5], +[190889, 522, 8, 21], +[191086, 523, 3, 6], +[191206, 523, 7, 4], +[191207, 523, 7, 5], +[191283, 523, 9, 19], +[191329, 523, 11, 4], +[191404, 524, 1, 18], +[191479, 524, 4, 2], +[191624, 524, 8, 25], +[191800, 525, 2, 17], +[191842, 525, 3, 31], +[191985, 525, 8, 21], +[192184, 526, 3, 8], +[192197, 526, 3, 21], +[192371, 526, 9, 11], +[192567, 527, 3, 26], +[192707, 527, 8, 13], +[192773, 527, 10, 18], +[192935, 528, 3, 28], +[193080, 528, 8, 20], +[193093, 528, 9, 2], +[193216, 529, 1, 3], +[193385, 529, 6, 21], +[193573, 529, 12, 26], +[193722, 530, 5, 24], +[193751, 530, 6, 22], +[193880, 530, 10, 29], +[194063, 531, 4, 30], +[194110, 531, 6, 16], +[194174, 531, 8, 19], +[194280, 531, 12, 3], +[194461, 532, 6, 1], +[194574, 532, 9, 22], +[194670, 532, 12, 27], +[194737, 533, 3, 4], +[194853, 533, 6, 28], +[194875, 533, 7, 20], +[194911, 533, 8, 25], +[194978, 533, 10, 31], +[195036, 533, 12, 28], +[195098, 534, 2, 28], +[195112, 534, 3, 14], +[195242, 534, 7, 22], +[195296, 534, 9, 14], +[195365, 534, 11, 22], +[195434, 535, 1, 30], +[195521, 535, 4, 27], +[195544, 535, 5, 20], +[195601, 535, 7, 16], +[195699, 535, 10, 22], +[195721, 535, 11, 13], +[195750, 535, 12, 12], +[195785, 536, 1, 16], +[195853, 536, 3, 24], +[195994, 536, 8, 12], +[196176, 537, 2, 10], +[196294, 537, 6, 8], +[196435, 537, 10, 27], +[196620, 538, 4, 30], +[196759, 538, 9, 16], +[196774, 538, 10, 1], +[196969, 539, 4, 14], +[197036, 539, 6, 20], +[197165, 539, 10, 27], +[197263, 540, 2, 2], +[197421, 540, 7, 9], +[197527, 540, 10, 23], +[197623, 541, 1, 27], +[197750, 541, 6, 3], +[197767, 541, 6, 20], +[197786, 541, 7, 9], +[197986, 542, 1, 25], +[198133, 542, 6, 21], +[198281, 542, 11, 16], +[198449, 543, 5, 3], +[198543, 543, 8, 5], +[198599, 543, 9, 30], +[198643, 543, 11, 13], +[198791, 544, 4, 9], +[198906, 544, 8, 2], +[198957, 544, 9, 22], +[198978, 544, 10, 13], +[198995, 544, 10, 30], +[199049, 544, 12, 23], +[199082, 545, 1, 25], +[199170, 545, 4, 23], +[199307, 545, 9, 7], +[199485, 546, 3, 4], +[199512, 546, 3, 31], +[199608, 546, 7, 5], +[199748, 546, 11, 22], +[199775, 546, 12, 19], +[199848, 547, 3, 2], +[199896, 547, 4, 19], +[199969, 547, 7, 1], +[200087, 547, 10, 27], +[200201, 548, 2, 18], +[200291, 548, 5, 18], +[200425, 548, 9, 29], +[200547, 549, 1, 29], +[200601, 549, 3, 24], +[200748, 549, 8, 18], +[200776, 549, 9, 15], +[200809, 549, 10, 18], +[200837, 549, 11, 15], +[201017, 550, 5, 14], +[201023, 550, 5, 20], +[201187, 550, 10, 31], +[201277, 551, 1, 29], +[201433, 551, 7, 4], +[201526, 551, 10, 5], +[201541, 551, 10, 20], +[201658, 552, 2, 14], +[201830, 552, 8, 4], +[201986, 553, 1, 7], +[202156, 553, 6, 26], +[202352, 554, 1, 8], +[202530, 554, 7, 5], +[202550, 554, 7, 25], +[202601, 554, 9, 14], +[202662, 554, 11, 14], +[202736, 555, 1, 27], +[202898, 555, 7, 8], +[202909, 555, 7, 19], +[202989, 555, 10, 7], +[203162, 556, 3, 28], +[203204, 556, 5, 9], +[203226, 556, 5, 31], +[203346, 556, 9, 28], +[203431, 556, 12, 22], +[203594, 557, 6, 3], +[203615, 557, 6, 24], +[203803, 557, 12, 29], +[203857, 558, 2, 21], +[204012, 558, 7, 26], +[204032, 558, 8, 15], +[204107, 558, 10, 29], +[204153, 558, 12, 14], +[204236, 559, 3, 7], +[204241, 559, 3, 12], +[204367, 559, 7, 16], +[204502, 559, 11, 28], +[204503, 559, 11, 29], +[204654, 560, 4, 28], +[204813, 560, 10, 4], +[204874, 560, 12, 4], +[204913, 561, 1, 12], +[204927, 561, 1, 26], +[205101, 561, 7, 19], +[205266, 561, 12, 31], +[205283, 562, 1, 17], +[205404, 562, 5, 18], +[205550, 562, 10, 11], +[205611, 562, 12, 11], +[205795, 563, 6, 13], +[205863, 563, 8, 20], +[205884, 563, 9, 10], +[205930, 563, 10, 26], +[205936, 563, 11, 1], +[206066, 564, 3, 10], +[206205, 564, 7, 27], +[206222, 564, 8, 13], +[206277, 564, 10, 7], +[206350, 564, 12, 19], +[206521, 565, 6, 8], +[206709, 565, 12, 13], +[206898, 566, 6, 20], +[207062, 566, 12, 1], +[207092, 566, 12, 31], +[207147, 567, 2, 24], +[207197, 567, 4, 15], +[207204, 567, 4, 22], +[207355, 567, 9, 20], +[207413, 567, 11, 17], +[207515, 568, 2, 27], +[207517, 568, 2, 29], +[207674, 568, 8, 4], +[207806, 568, 12, 14], +[207846, 569, 1, 23], +[207943, 569, 4, 30], +[207975, 569, 6, 1], +[208151, 569, 11, 24], +[208233, 570, 2, 14], +[208261, 570, 3, 14], +[208360, 570, 6, 21], +[208482, 570, 10, 21], +[208496, 570, 11, 4], +[208624, 571, 3, 12], +[208771, 571, 8, 6], +[208901, 571, 12, 14], +[208926, 572, 1, 8], +[208985, 572, 3, 7], +[209172, 572, 9, 10], +[209211, 572, 10, 19], +[209396, 573, 4, 22], +[209580, 573, 10, 23], +[209680, 574, 1, 31], +[209751, 574, 4, 12], +[209884, 574, 8, 23], +[210029, 575, 1, 15], +[210150, 575, 5, 16], +[210173, 575, 6, 8], +[210182, 575, 6, 17], +[210291, 575, 10, 4], +[210337, 575, 11, 19], +[210469, 576, 3, 30], +[210637, 576, 9, 14], +[210696, 576, 11, 12], +[210878, 577, 5, 13], +[210881, 577, 5, 16], +[210950, 577, 7, 24], +[210975, 577, 8, 18], +[211030, 577, 10, 12], +[211061, 577, 11, 12], +[211256, 578, 5, 26], +[211318, 578, 7, 27], +[211369, 578, 9, 16], +[211542, 579, 3, 8], +[211590, 579, 4, 25], +[211732, 579, 9, 14], +[211758, 579, 10, 10], +[211843, 580, 1, 3], +[211992, 580, 5, 31], +[212100, 580, 9, 16], +[212155, 580, 11, 10], +[212203, 580, 12, 28], +[212397, 581, 7, 10], +[212438, 581, 8, 20], +[212562, 581, 12, 22], +[212611, 582, 2, 9], +[212715, 582, 5, 24], +[212765, 582, 7, 13], +[212828, 582, 9, 14], +[212880, 582, 11, 5], +[212894, 582, 11, 19], +[213041, 583, 4, 15], +[213047, 583, 4, 21], +[213082, 583, 5, 26], +[213126, 583, 7, 9], +[213164, 583, 8, 16], +[213174, 583, 8, 26], +[213372, 584, 3, 11], +[213537, 584, 8, 23], +[213737, 585, 3, 11], +[213848, 585, 6, 30], +[214033, 586, 1, 1], +[214115, 586, 3, 24], +[214118, 586, 3, 27], +[214158, 586, 5, 6], +[214202, 586, 6, 19], +[214285, 586, 9, 10], +[214324, 586, 10, 19], +[214360, 586, 11, 24], +[214474, 587, 3, 18], +[214552, 587, 6, 4], +[214750, 587, 12, 19], +[214877, 588, 4, 24], +[215036, 588, 9, 30], +[215082, 588, 11, 15], +[215229, 589, 4, 11], +[215241, 589, 4, 23], +[215433, 589, 11, 1], +[215454, 589, 11, 22], +[215499, 590, 1, 6], +[215625, 590, 5, 12], +[215744, 590, 9, 8], +[215815, 590, 11, 18], +[215979, 591, 5, 1], +[216083, 591, 8, 13], +[216252, 592, 1, 29], +[216316, 592, 4, 2], +[216358, 592, 5, 14], +[216491, 592, 9, 24], +[216568, 592, 12, 10], +[216702, 593, 4, 23], +[216847, 593, 9, 15], +[216858, 593, 9, 26], +[216884, 593, 10, 22], +[217064, 594, 4, 20], +[217104, 594, 5, 30], +[217220, 594, 9, 23], +[217299, 594, 12, 11], +[217491, 595, 6, 21], +[217498, 595, 6, 28], +[217502, 595, 7, 2], +[217657, 595, 12, 4], +[217825, 596, 5, 20], +[218012, 596, 11, 23], +[218157, 597, 4, 17], +[218199, 597, 5, 29], +[218366, 597, 11, 12], +[218405, 597, 12, 21], +[218439, 598, 1, 24], +[218474, 598, 2, 28], +[218514, 598, 4, 9], +[218538, 598, 5, 3], +[218603, 598, 7, 7], +[218625, 598, 7, 29], +[218711, 598, 10, 23], +[218803, 599, 1, 23], +[218871, 599, 4, 1], +[219071, 599, 10, 18], +[219207, 600, 3, 3], +[219243, 600, 4, 8], +[219356, 600, 7, 30], +[219379, 600, 8, 22], +[219476, 600, 11, 27], +[219493, 600, 12, 14], +[219675, 601, 6, 14], +[219844, 601, 11, 30], +[220040, 602, 6, 14], +[220136, 602, 9, 18], +[220158, 602, 10, 10], +[220296, 603, 2, 25], +[220450, 603, 7, 29], +[220506, 603, 9, 23], +[220530, 603, 10, 17], +[220633, 604, 1, 28], +[220638, 604, 2, 2], +[220715, 604, 4, 19], +[220808, 604, 7, 21], +[220820, 604, 8, 2], +[220860, 604, 9, 11], +[220891, 604, 10, 12], +[221030, 605, 2, 28], +[221145, 605, 6, 23], +[221339, 606, 1, 3], +[221366, 606, 1, 30], +[221478, 606, 5, 22], +[221612, 606, 10, 3], +[221726, 607, 1, 25], +[221876, 607, 6, 24], +[222020, 607, 11, 15], +[222091, 608, 1, 25], +[222167, 608, 4, 10], +[222224, 608, 6, 6], +[222380, 608, 11, 9], +[222484, 609, 2, 21], +[222644, 609, 7, 31], +[222802, 610, 1, 5], +[222883, 610, 3, 27], +[223045, 610, 9, 5], +[223120, 610, 11, 19], +[223171, 611, 1, 9], +[223228, 611, 3, 7], +[223324, 611, 6, 11], +[223362, 611, 7, 19], +[223427, 611, 9, 22], +[223444, 611, 10, 9], +[223619, 612, 4, 1], +[223637, 612, 4, 19], +[223672, 612, 5, 24], +[223720, 612, 7, 11], +[223876, 612, 12, 14], +[223943, 613, 2, 19], +[223975, 613, 3, 23], +[224077, 613, 7, 3], +[224248, 613, 12, 21], +[224427, 614, 6, 18], +[224615, 614, 12, 23], +[224797, 615, 6, 23], +[224841, 615, 8, 6], +[224890, 615, 9, 24], +[225053, 616, 3, 5], +[225242, 616, 9, 10], +[225273, 616, 10, 11], +[225299, 616, 11, 6], +[225409, 617, 2, 24], +[225557, 617, 7, 22], +[225590, 617, 8, 24], +[225625, 617, 9, 28], +[225666, 617, 11, 8], +[225825, 618, 4, 16], +[225859, 618, 5, 20], +[225973, 618, 9, 11], +[226097, 619, 1, 13], +[226216, 619, 5, 12], +[226380, 619, 10, 23], +[226473, 620, 1, 24], +[226506, 620, 2, 26], +[226562, 620, 4, 22], +[226577, 620, 5, 7], +[226663, 620, 8, 1], +[226859, 621, 2, 13], +[226959, 621, 5, 24], +[227154, 621, 12, 5], +[227183, 622, 1, 3], +[227251, 622, 3, 12], +[227273, 622, 4, 3], +[227364, 622, 7, 3], +[227488, 622, 11, 4], +[227578, 623, 2, 2], +[227594, 623, 2, 18], +[227691, 623, 5, 26], +[227705, 623, 6, 9], +[227813, 623, 9, 25], +[227957, 624, 2, 16], +[228052, 624, 5, 21], +[228125, 624, 8, 2], +[228226, 624, 11, 11], +[228231, 624, 11, 16], +[228384, 625, 4, 18], +[228532, 625, 9, 13], +[228715, 626, 3, 15], +[228898, 626, 9, 14], +[229047, 627, 2, 10], +[229153, 627, 5, 27], +[229284, 627, 10, 5], +[229432, 628, 3, 1], +[229559, 628, 7, 6], +[229742, 629, 1, 5], +[229930, 629, 7, 12], +[230041, 629, 10, 31], +[230074, 629, 12, 3], +[230163, 630, 3, 2], +[230299, 630, 7, 16], +[230394, 630, 10, 19], +[230590, 631, 5, 3], +[230693, 631, 8, 14], +[230736, 631, 9, 26], +[230908, 632, 3, 16], +[231021, 632, 7, 7], +[231141, 632, 11, 4], +[231178, 632, 12, 11], +[231312, 633, 4, 24], +[231330, 633, 5, 12], +[231349, 633, 5, 31], +[231536, 633, 12, 4], +[231672, 634, 4, 19], +[231813, 634, 9, 7], +[231980, 635, 2, 21], +[232112, 635, 7, 3], +[232119, 635, 7, 10], +[232130, 635, 7, 21], +[232175, 635, 9, 4], +[232320, 636, 1, 27], +[232334, 636, 2, 10], +[232338, 636, 2, 14], +[232518, 636, 8, 12], +[232567, 636, 9, 30], +[232656, 636, 12, 28], +[232798, 637, 5, 19], +[232906, 637, 9, 4], +[233081, 638, 2, 26], +[233211, 638, 7, 6], +[233391, 639, 1, 2], +[233542, 639, 6, 2], +[233639, 639, 9, 7], +[233815, 640, 3, 1], +[233941, 640, 7, 5], +[234130, 641, 1, 10], +[234214, 641, 4, 4], +[234249, 641, 5, 9], +[234270, 641, 5, 30], +[234291, 641, 6, 20], +[234455, 641, 12, 1], +[234504, 642, 1, 19], +[234536, 642, 2, 20], +[234674, 642, 7, 8], +[234852, 643, 1, 2], +[234955, 643, 4, 15], +[235132, 643, 10, 9], +[235206, 643, 12, 22], +[235302, 644, 3, 27], +[235479, 644, 9, 20], +[235563, 644, 12, 13], +[235584, 645, 1, 3], +[235760, 645, 6, 28], +[235781, 645, 7, 19], +[235891, 645, 11, 6], +[235900, 645, 11, 15], +[236028, 646, 3, 23], +[236050, 646, 4, 14], +[236152, 646, 7, 25], +[236275, 646, 11, 25], +[236331, 647, 1, 20], +[236373, 647, 3, 3], +[236567, 647, 9, 13], +[236596, 647, 10, 12], +[236760, 648, 3, 24], +[236829, 648, 6, 1], +[236857, 648, 6, 29], +[237048, 649, 1, 6], +[237241, 649, 7, 18], +[237304, 649, 9, 19], +[237463, 650, 2, 25], +[237615, 650, 7, 27], +[237768, 650, 12, 27], +[237889, 651, 4, 27], +[237977, 651, 7, 24], +[238082, 651, 11, 6], +[238153, 652, 1, 16], +[238295, 652, 6, 6], +[238338, 652, 7, 19], +[238535, 653, 2, 1], +[238578, 653, 3, 16], +[238673, 653, 6, 19], +[238694, 653, 7, 10], +[238784, 653, 10, 8], +[238915, 654, 2, 16], +[239102, 654, 8, 22], +[239157, 654, 10, 16], +[239338, 655, 4, 15], +[239425, 655, 7, 11], +[239604, 656, 1, 6], +[239768, 656, 6, 18], +[239776, 656, 6, 26], +[239888, 656, 10, 16], +[239890, 656, 10, 18], +[240084, 657, 4, 30], +[240220, 657, 9, 13], +[240375, 658, 2, 15], +[240379, 658, 2, 19], +[240473, 658, 5, 24], +[240562, 658, 8, 21], +[240591, 658, 9, 19], +[240638, 658, 11, 5], +[240803, 659, 4, 19], +[240891, 659, 7, 16], +[241060, 660, 1, 1], +[241100, 660, 2, 10], +[241199, 660, 5, 19], +[241366, 660, 11, 2], +[241510, 661, 3, 26], +[241563, 661, 5, 18], +[241663, 661, 8, 26], +[241784, 661, 12, 25], +[241790, 661, 12, 31], +[241857, 662, 3, 8], +[241915, 662, 5, 5], +[242028, 662, 8, 26], +[242087, 662, 10, 24], +[242249, 663, 4, 4], +[242431, 663, 10, 3], +[242605, 664, 3, 25], +[242775, 664, 9, 11], +[242953, 665, 3, 8], +[243056, 665, 6, 19], +[243206, 665, 11, 16], +[243218, 665, 11, 28], +[243275, 666, 1, 24], +[243321, 666, 3, 11], +[243480, 666, 8, 17], +[243666, 667, 2, 19], +[243708, 667, 4, 2], +[243766, 667, 5, 30], +[243785, 667, 6, 18], +[243887, 667, 9, 28], +[243953, 667, 12, 3], +[243971, 667, 12, 21], +[243981, 667, 12, 31], +[244144, 668, 6, 11], +[244249, 668, 9, 24], +[244445, 669, 4, 8], +[244605, 669, 9, 15], +[244691, 669, 12, 10], +[244869, 670, 6, 6], +[244904, 670, 7, 11], +[245001, 670, 10, 16], +[245084, 671, 1, 7], +[245252, 671, 6, 24], +[245332, 671, 9, 12], +[245353, 671, 10, 3], +[245475, 672, 2, 2], +[245599, 672, 6, 5], +[245769, 672, 11, 22], +[245924, 673, 4, 26], +[246070, 673, 9, 19], +[246086, 673, 10, 5], +[246260, 674, 3, 28], +[246383, 674, 7, 29], +[246573, 675, 2, 4], +[246650, 675, 4, 22], +[246733, 675, 7, 14], +[246743, 675, 7, 24], +[246891, 675, 12, 19], +[246929, 676, 1, 26], +[247016, 676, 4, 22], +[247086, 676, 7, 1], +[247126, 676, 8, 10], +[247225, 676, 11, 17], +[247364, 677, 4, 5], +[247393, 677, 5, 4], +[247446, 677, 6, 26], +[247513, 677, 9, 1], +[247520, 677, 9, 8], +[247711, 678, 3, 18], +[247822, 678, 7, 7], +[247916, 678, 10, 9], +[248050, 679, 2, 20], +[248072, 679, 3, 14], +[248087, 679, 3, 29], +[248209, 679, 7, 29], +[248373, 680, 1, 9], +[248567, 680, 7, 21], +[248599, 680, 8, 22], +[248725, 680, 12, 26], +[248789, 681, 2, 28], +[248834, 681, 4, 14], +[248845, 681, 4, 25], +[248994, 681, 9, 21], +[249010, 681, 10, 7], +[249139, 682, 2, 13], +[249187, 682, 4, 2], +[249372, 682, 10, 4], +[249376, 682, 10, 8], +[249551, 683, 4, 1], +[249674, 683, 8, 2], +[249680, 683, 8, 8], +[249707, 683, 9, 4], +[249812, 683, 12, 18], +[249999, 684, 6, 22], +[250155, 684, 11, 25], +[250311, 685, 4, 30], +[250499, 685, 11, 4], +[250670, 686, 4, 24], +[250848, 686, 10, 19], +[250898, 686, 12, 8], +[250937, 687, 1, 16], +[250973, 687, 2, 21], +[251003, 687, 3, 23], +[251193, 687, 9, 29], +[251364, 688, 3, 18], +[251473, 688, 7, 5], +[251525, 688, 8, 26], +[251535, 688, 9, 5], +[251636, 688, 12, 15], +[251667, 689, 1, 15], +[251822, 689, 6, 19], +[251844, 689, 7, 11], +[251954, 689, 10, 29], +[252034, 690, 1, 17], +[252051, 690, 2, 3], +[252162, 690, 5, 25], +[252189, 690, 6, 21], +[252236, 690, 8, 7], +[252414, 691, 2, 1], +[252509, 691, 5, 7], +[252520, 691, 5, 18], +[252658, 691, 10, 3], +[252664, 691, 10, 9], +[252679, 691, 10, 24], +[252780, 692, 2, 2], +[252836, 692, 3, 29], +[252912, 692, 6, 13], +[253089, 692, 12, 7], +[253132, 693, 1, 19], +[253308, 693, 7, 14], +[253445, 693, 11, 28], +[253446, 693, 11, 29], +[253464, 693, 12, 17], +[253577, 694, 4, 9], +[253631, 694, 6, 2], +[253774, 694, 10, 23], +[253963, 695, 4, 30], +[254105, 695, 9, 19], +[254151, 695, 11, 4], +[254224, 696, 1, 16], +[254247, 696, 2, 8], +[254310, 696, 4, 11], +[254445, 696, 8, 24], +[254607, 697, 2, 2], +[254632, 697, 2, 27], +[254826, 697, 9, 9], +[254857, 697, 10, 10], +[255010, 698, 3, 12], +[255198, 698, 9, 16], +[255226, 698, 10, 14], +[255281, 698, 12, 8], +[255443, 699, 5, 19], +[255466, 699, 6, 11], +[255589, 699, 10, 12], +[255647, 699, 12, 9], +[255758, 700, 3, 30], +[255958, 700, 10, 16], +[255985, 700, 11, 12], +[256185, 701, 5, 31], +[256186, 701, 6, 1], +[256335, 701, 10, 28], +[256388, 701, 12, 20], +[256466, 702, 3, 8], +[256581, 702, 7, 1], +[256601, 702, 7, 21], +[256791, 703, 1, 27], +[256975, 703, 7, 30], +[256985, 703, 8, 9], +[257133, 704, 1, 4], +[257224, 704, 4, 4], +[257381, 704, 9, 8], +[257492, 704, 12, 28], +[257541, 705, 2, 15], +[257628, 705, 5, 13], +[257711, 705, 8, 4], +[257819, 705, 11, 20], +[257910, 706, 2, 19], +[258056, 706, 7, 15], +[258188, 706, 11, 24], +[258262, 707, 2, 6], +[258306, 707, 3, 22], +[258349, 707, 5, 4], +[258535, 707, 11, 6], +[258544, 707, 11, 15], +[258554, 707, 11, 25], +[258635, 708, 2, 14], +[258656, 708, 3, 6], +[258748, 708, 6, 6], +[258880, 708, 10, 16], +[258979, 709, 1, 23], +[259071, 709, 4, 25], +[259112, 709, 6, 5], +[259301, 709, 12, 11], +[259309, 709, 12, 19], +[259490, 710, 6, 18], +[259584, 710, 9, 20], +[259689, 711, 1, 3], +[259887, 711, 7, 20], +[259970, 711, 10, 11], +[260145, 712, 4, 3], +[260340, 712, 10, 15], +[260408, 712, 12, 22], +[260477, 713, 3, 1], +[260608, 713, 7, 10], +[260703, 713, 10, 13], +[260888, 714, 4, 16], +[260949, 714, 6, 16], +[260956, 714, 6, 23], +[261027, 714, 9, 2], +[261108, 714, 11, 22], +[261297, 715, 5, 30], +[261460, 715, 11, 9], +[261654, 716, 5, 21], +[261672, 716, 6, 8], +[261774, 716, 9, 18], +[261919, 717, 2, 10], +[262069, 717, 7, 10], +[262263, 718, 1, 20], +[262395, 718, 6, 1], +[262534, 718, 10, 18], +[262590, 718, 12, 13], +[262750, 719, 5, 22], +[262779, 719, 6, 20], +[262954, 719, 12, 12], +[263036, 720, 3, 3], +[263072, 720, 4, 8], +[263198, 720, 8, 12], +[263303, 720, 11, 25], +[263361, 721, 1, 22], +[263362, 721, 1, 23], +[263552, 721, 8, 1], +[263746, 722, 2, 11], +[263890, 722, 7, 5], +[264078, 723, 1, 9], +[264254, 723, 7, 4], +[264314, 723, 9, 2], +[264508, 724, 3, 14], +[264673, 724, 8, 26], +[264830, 725, 1, 30], +[264910, 725, 4, 20], +[264941, 725, 5, 21], +[265038, 725, 8, 26], +[265203, 726, 2, 7], +[265308, 726, 5, 23], +[265416, 726, 9, 8], +[265542, 727, 1, 12], +[265659, 727, 5, 9], +[265759, 727, 8, 17], +[265883, 727, 12, 19], +[266018, 728, 5, 2], +[266030, 728, 5, 14], +[266132, 728, 8, 24], +[266177, 728, 10, 8], +[266237, 728, 12, 7], +[266307, 729, 2, 15], +[266483, 729, 8, 10], +[266501, 729, 8, 28], +[266512, 729, 9, 8], +[266605, 729, 12, 10], +[266634, 730, 1, 8], +[266756, 730, 5, 10], +[266867, 730, 8, 29], +[267036, 731, 2, 14], +[267139, 731, 5, 28], +[267287, 731, 10, 23], +[267332, 731, 12, 7], +[267418, 732, 3, 2], +[267613, 732, 9, 13], +[267756, 733, 2, 3], +[267829, 733, 4, 17], +[267834, 733, 4, 22], +[267914, 733, 7, 11], +[268059, 733, 12, 3], +[268198, 734, 4, 21], +[268240, 734, 6, 2], +[268293, 734, 7, 25], +[268320, 734, 8, 21], +[268433, 734, 12, 12], +[268459, 735, 1, 7], +[268537, 735, 3, 26], +[268648, 735, 7, 15], +[268756, 735, 10, 31], +[268801, 735, 12, 15], +[268805, 735, 12, 19], +[268998, 736, 6, 29], +[269162, 736, 12, 10], +[269292, 737, 4, 19], +[269387, 737, 7, 23], +[269466, 737, 10, 10], +[269513, 737, 11, 26], +[269657, 738, 4, 19], +[269796, 738, 9, 5], +[269960, 739, 2, 16], +[270156, 739, 8, 31], +[270251, 739, 12, 4], +[270276, 739, 12, 29], +[270380, 740, 4, 11], +[270473, 740, 7, 13], +[270614, 740, 12, 1], +[270724, 741, 3, 21], +[270807, 741, 6, 12], +[270881, 741, 8, 25], +[271014, 742, 1, 5], +[271027, 742, 1, 18], +[271058, 742, 2, 18], +[271119, 742, 4, 20], +[271206, 742, 7, 16], +[271358, 742, 12, 15], +[271496, 743, 5, 2], +[271681, 743, 11, 3], +[271803, 744, 3, 4], +[271929, 744, 7, 8], +[272071, 744, 11, 27], +[272175, 745, 3, 11], +[272257, 745, 6, 1], +[272419, 745, 11, 10], +[272491, 746, 1, 21], +[272588, 746, 4, 28], +[272711, 746, 8, 29], +[272738, 746, 9, 25], +[272758, 746, 10, 15], +[272927, 747, 4, 2], +[273076, 747, 8, 29], +[273258, 748, 2, 27], +[273379, 748, 6, 27], +[273459, 748, 9, 15], +[273636, 749, 3, 11], +[273756, 749, 7, 9], +[273829, 749, 9, 20], +[274000, 750, 3, 10], +[274146, 750, 8, 3], +[274148, 750, 8, 5], +[274235, 750, 10, 31], +[274368, 751, 3, 13], +[274393, 751, 4, 7], +[274574, 751, 10, 5], +[274667, 752, 1, 6], +[274736, 752, 3, 15], +[274784, 752, 5, 2], +[274934, 752, 9, 29], +[274980, 752, 11, 14], +[275006, 752, 12, 10], +[275030, 753, 1, 3], +[275202, 753, 6, 24], +[275333, 753, 11, 2], +[275433, 754, 2, 10], +[275607, 754, 8, 3], +[275774, 755, 1, 17], +[275837, 755, 3, 21], +[275843, 755, 3, 27], +[276018, 755, 9, 18], +[276165, 756, 2, 12], +[276226, 756, 4, 13], +[276397, 756, 10, 1], +[276526, 757, 2, 7], +[276698, 757, 7, 29], +[276878, 758, 1, 25], +[276911, 758, 2, 27], +[277080, 758, 8, 15], +[277280, 759, 3, 3], +[277325, 759, 4, 17], +[277432, 759, 8, 2], +[277583, 759, 12, 31], +[277723, 760, 5, 19], +[277895, 760, 11, 7], +[277962, 761, 1, 13], +[277974, 761, 1, 25], +[278109, 761, 6, 9], +[278257, 761, 11, 4], +[278313, 761, 12, 30], +[278363, 762, 2, 18], +[278533, 762, 8, 7], +[278590, 762, 10, 3], +[278784, 763, 4, 15], +[278927, 763, 9, 5], +[279125, 764, 3, 21], +[279254, 764, 7, 28], +[279321, 764, 10, 3], +[279471, 765, 3, 2], +[279641, 765, 8, 19], +[279841, 766, 3, 7], +[279975, 766, 7, 19], +[279992, 766, 8, 5], +[280138, 766, 12, 29], +[280183, 767, 2, 12], +[280358, 767, 8, 6], +[280412, 767, 9, 29], +[280467, 767, 11, 23], +[280622, 768, 4, 26], +[280716, 768, 7, 29], +[280914, 769, 2, 12], +[281027, 769, 6, 5], +[281110, 769, 8, 27], +[281186, 769, 11, 11], +[281299, 770, 3, 4], +[281353, 770, 4, 27], +[281384, 770, 5, 28], +[281466, 770, 8, 18], +[281643, 771, 2, 11], +[281666, 771, 3, 6], +[281739, 771, 5, 18], +[281756, 771, 6, 4], +[281822, 771, 8, 9], +[281865, 771, 9, 21], +[281873, 771, 9, 29], +[281915, 771, 11, 10], +[281931, 771, 11, 26], +[281989, 772, 1, 23], +[282160, 772, 7, 12], +[282242, 772, 10, 2], +[282396, 773, 3, 5], +[282481, 773, 5, 29], +[282585, 773, 9, 10], +[282746, 774, 2, 18], +[282924, 774, 8, 15], +[283005, 774, 11, 4], +[283146, 775, 3, 25], +[283235, 775, 6, 22], +[283363, 775, 10, 28], +[283460, 776, 2, 2], +[283562, 776, 5, 14], +[283645, 776, 8, 5], +[283696, 776, 9, 25], +[283827, 777, 2, 3], +[283998, 777, 7, 24], +[284129, 777, 12, 2], +[284156, 777, 12, 29], +[284326, 778, 6, 17], +[284394, 778, 8, 24], +[284474, 778, 11, 12], +[284615, 779, 4, 2], +[284641, 779, 4, 28], +[284644, 779, 5, 1], +[284801, 779, 10, 5], +[284949, 780, 3, 1], +[285065, 780, 6, 25], +[285197, 780, 11, 4], +[285234, 780, 12, 11], +[285399, 781, 5, 25], +[285400, 781, 5, 26], +[285444, 781, 7, 9], +[285640, 782, 1, 21], +[285686, 782, 3, 8], +[285862, 782, 8, 31], +[286005, 783, 1, 21], +[286107, 783, 5, 3], +[286117, 783, 5, 13], +[286130, 783, 5, 26], +[286226, 783, 8, 30], +[286250, 783, 9, 23], +[286392, 784, 2, 12], +[286525, 784, 6, 24], +[286713, 784, 12, 29], +[286746, 785, 1, 31], +[286819, 785, 4, 14], +[286830, 785, 4, 25], +[286948, 785, 8, 21], +[287106, 786, 1, 26], +[287219, 786, 5, 19], +[287227, 786, 5, 27], +[287359, 786, 10, 6], +[287401, 786, 11, 17], +[287485, 787, 2, 9], +[287643, 787, 7, 17], +[287759, 787, 11, 10], +[287819, 788, 1, 9], +[287991, 788, 6, 29], +[288064, 788, 9, 10], +[288191, 789, 1, 15], +[288352, 789, 6, 25], +[288517, 789, 12, 7], +[288685, 790, 5, 24], +[288808, 790, 9, 24], +[288854, 790, 11, 9], +[288868, 790, 11, 23], +[288965, 791, 2, 28], +[289163, 791, 9, 14], +[289279, 792, 1, 8], +[289307, 792, 2, 5], +[289444, 792, 6, 21], +[289540, 792, 9, 25], +[289579, 792, 11, 3], +[289708, 793, 3, 12], +[289711, 793, 3, 15], +[289733, 793, 4, 6], +[289870, 793, 8, 21], +[289983, 793, 12, 12], +[290158, 794, 6, 5], +[290356, 794, 12, 20], +[290511, 795, 5, 24], +[290609, 795, 8, 30], +[290641, 795, 10, 1], +[290715, 795, 12, 14], +[290905, 796, 6, 21], +[291014, 796, 10, 8], +[291101, 797, 1, 3], +[291158, 797, 3, 1], +[291187, 797, 3, 30], +[291349, 797, 9, 8], +[291410, 797, 11, 8], +[291455, 797, 12, 23], +[291623, 798, 6, 9], +[291657, 798, 7, 13], +[291687, 798, 8, 12], +[291769, 798, 11, 2], +[291808, 798, 12, 11], +[291943, 799, 4, 25], +[291974, 799, 5, 26], +[292076, 799, 9, 5], +[292242, 800, 2, 18], +[292272, 800, 3, 19], +[292348, 800, 6, 3], +[292416, 800, 8, 10], +[292581, 801, 1, 22], +[292647, 801, 3, 29], +[292782, 801, 8, 11], +[292825, 801, 9, 23], +[292868, 801, 11, 5], +[292887, 801, 11, 24], +[292970, 802, 2, 15], +[293001, 802, 3, 18], +[293131, 802, 7, 26], +[293229, 802, 11, 1], +[293285, 802, 12, 27], +[293332, 803, 2, 12], +[293391, 803, 4, 12], +[293407, 803, 4, 28], +[293457, 803, 6, 17], +[293633, 803, 12, 10], +[293740, 804, 3, 26], +[293892, 804, 8, 25], +[293893, 804, 8, 26], +[293896, 804, 8, 29], +[293948, 804, 10, 20], +[294038, 805, 1, 18], +[294158, 805, 5, 18], +[294310, 805, 10, 17], +[294433, 806, 2, 17], +[294603, 806, 8, 6], +[294678, 806, 10, 20], +[294756, 807, 1, 6], +[294885, 807, 5, 15], +[294978, 807, 8, 16], +[295003, 807, 9, 10], +[295198, 808, 3, 23], +[295344, 808, 8, 16], +[295466, 808, 12, 16], +[295646, 809, 6, 14], +[295829, 809, 12, 14], +[295911, 810, 3, 6], +[295953, 810, 4, 17], +[296052, 810, 7, 25], +[296225, 811, 1, 14], +[296312, 811, 4, 11], +[296455, 811, 9, 1], +[296521, 811, 11, 6], +[296700, 812, 5, 3], +[296866, 812, 10, 16], +[296892, 812, 11, 11], +[296983, 813, 2, 10], +[297158, 813, 8, 4], +[297259, 813, 11, 13], +[297407, 814, 4, 10], +[297426, 814, 4, 29], +[297620, 814, 11, 9], +[297625, 814, 11, 14], +[297814, 815, 5, 22], +[297938, 815, 9, 23], +[298079, 816, 2, 11], +[298204, 816, 6, 15], +[298277, 816, 8, 27], +[298408, 817, 1, 5], +[298510, 817, 4, 17], +[298656, 817, 9, 10], +[298840, 818, 3, 13], +[298876, 818, 4, 18], +[298993, 818, 8, 13], +[299145, 819, 1, 12], +[299245, 819, 4, 22], +[299389, 819, 9, 13], +[299564, 820, 3, 6], +[299595, 820, 4, 6], +[299623, 820, 5, 4], +[299742, 820, 8, 31], +[299911, 821, 2, 16], +[300100, 821, 8, 24], +[300224, 821, 12, 26], +[300395, 822, 6, 15], +[300487, 822, 9, 15], +[300546, 822, 11, 13], +[300697, 823, 4, 13], +[300753, 823, 6, 8], +[300819, 823, 8, 13], +[301017, 824, 2, 27], +[301102, 824, 5, 22], +[301259, 824, 10, 26], +[301352, 825, 1, 27], +[301426, 825, 4, 11], +[301500, 825, 6, 24], +[301612, 825, 10, 14], +[301639, 825, 11, 10], +[301667, 825, 12, 8], +[301683, 825, 12, 24], +[301870, 826, 6, 29], +[301901, 826, 7, 30], +[301986, 826, 10, 23], +[302035, 826, 12, 11], +[302100, 827, 2, 14], +[302201, 827, 5, 26], +[302333, 827, 10, 5], +[302500, 828, 3, 20], +[302666, 828, 9, 2], +[302712, 828, 10, 18], +[302811, 829, 1, 25], +[302889, 829, 4, 13], +[302986, 829, 7, 19], +[303163, 830, 1, 12], +[303313, 830, 6, 11], +[303471, 830, 11, 16], +[303510, 830, 12, 25], +[303536, 831, 1, 20], +[303712, 831, 7, 15], +[303852, 831, 12, 2], +[303953, 832, 3, 12], +[304021, 832, 5, 19], +[304060, 832, 6, 27], +[304085, 832, 7, 22], +[304164, 832, 10, 9], +[304299, 833, 2, 21], +[304336, 833, 3, 30], +[304367, 833, 4, 30], +[304447, 833, 7, 19], +[304585, 833, 12, 4], +[304624, 834, 1, 12], +[304789, 834, 6, 26], +[304959, 834, 12, 13], +[305001, 835, 1, 24], +[305191, 835, 8, 2], +[305228, 835, 9, 8], +[305402, 836, 2, 29], +[305451, 836, 4, 18], +[305503, 836, 6, 9], +[305554, 836, 7, 30], +[305563, 836, 8, 8], +[305618, 836, 10, 2], +[305652, 836, 11, 5], +[305680, 836, 12, 3], +[305719, 837, 1, 11], +[305751, 837, 2, 12], +[305799, 837, 4, 1], +[305898, 837, 7, 9], +[306069, 837, 12, 27], +[306113, 838, 2, 9], +[306236, 838, 6, 12], +[306292, 838, 8, 7], +[306464, 839, 1, 26], +[306555, 839, 4, 27], +[306568, 839, 5, 10], +[306669, 839, 8, 19], +[306845, 840, 2, 11], +[307042, 840, 8, 26], +[307225, 841, 2, 25], +[307354, 841, 7, 4], +[307361, 841, 7, 11], +[307422, 841, 9, 10], +[307542, 842, 1, 8], +[307705, 842, 6, 20], +[307887, 842, 12, 19], +[307933, 843, 2, 3], +[308063, 843, 6, 13], +[308235, 843, 12, 2], +[308392, 844, 5, 7], +[308419, 844, 6, 3], +[308559, 844, 10, 21], +[308583, 844, 11, 14], +[308639, 845, 1, 9], +[308792, 845, 6, 11], +[308893, 845, 9, 20], +[309020, 846, 1, 25], +[309057, 846, 3, 3], +[309130, 846, 5, 15], +[309175, 846, 6, 29], +[309373, 847, 1, 13], +[309472, 847, 4, 22], +[309541, 847, 6, 30], +[309571, 847, 7, 30], +[309748, 848, 1, 23], +[309923, 848, 7, 16], +[310015, 848, 10, 16], +[310104, 849, 1, 13], +[310209, 849, 4, 28], +[310218, 849, 5, 7], +[310314, 849, 8, 11], +[310352, 849, 9, 18], +[310438, 849, 12, 13], +[310463, 850, 1, 7], +[310468, 850, 1, 12], +[310597, 850, 5, 21], +[310754, 850, 10, 25], +[310837, 851, 1, 16], +[310994, 851, 6, 22], +[311169, 851, 12, 14], +[311357, 852, 6, 19], +[311438, 852, 9, 8], +[311635, 853, 3, 24], +[311816, 853, 9, 21], +[311823, 853, 9, 28], +[311961, 854, 2, 13], +[312065, 854, 5, 28], +[312227, 854, 11, 6], +[312406, 855, 5, 4], +[312493, 855, 7, 30], +[312554, 855, 9, 29], +[312602, 855, 11, 16], +[312759, 856, 4, 21], +[312906, 856, 9, 15], +[312912, 856, 9, 21], +[312962, 856, 11, 10], +[313086, 857, 3, 14], +[313206, 857, 7, 12], +[313298, 857, 10, 12], +[313362, 857, 12, 15], +[313497, 858, 4, 29], +[313617, 858, 8, 27], +[313796, 859, 2, 22], +[313962, 859, 8, 7], +[314041, 859, 10, 25], +[314077, 859, 11, 30], +[314178, 860, 3, 10], +[314253, 860, 5, 24], +[314377, 860, 9, 25], +[314391, 860, 10, 9], +[314455, 860, 12, 12], +[314614, 861, 5, 20], +[314785, 861, 11, 7], +[314863, 862, 1, 24], +[314996, 862, 6, 6], +[315049, 862, 7, 29], +[315109, 862, 9, 27], +[315251, 863, 2, 16], +[315437, 863, 8, 21], +[315569, 863, 12, 31], +[315624, 864, 2, 24], +[315778, 864, 7, 27], +[315887, 864, 11, 13], +[316055, 865, 4, 30], +[316174, 865, 8, 27], +[316210, 865, 10, 2], +[316322, 866, 1, 22], +[316455, 866, 6, 4], +[316485, 866, 7, 4], +[316538, 866, 8, 26], +[316621, 866, 11, 17], +[316748, 867, 3, 24], +[316939, 867, 10, 1], +[317056, 868, 1, 26], +[317239, 868, 7, 27], +[317316, 868, 10, 12], +[317367, 868, 12, 2], +[317454, 869, 2, 27], +[317504, 869, 4, 18], +[317560, 869, 6, 13], +[317577, 869, 6, 30], +[317675, 869, 10, 6], +[317733, 869, 12, 3], +[317759, 869, 12, 29], +[317950, 870, 7, 8], +[317959, 870, 7, 17], +[318098, 870, 12, 3], +[318233, 871, 4, 17], +[318329, 871, 7, 22], +[318511, 872, 1, 20], +[318561, 872, 3, 10], +[318589, 872, 4, 7], +[318767, 872, 10, 2], +[318900, 873, 2, 12], +[318973, 873, 4, 26], +[319011, 873, 6, 3], +[319113, 873, 9, 13], +[319249, 874, 1, 27], +[319367, 874, 5, 25], +[319503, 874, 10, 8], +[319528, 874, 11, 2], +[319648, 875, 3, 2], +[319670, 875, 3, 24], +[319822, 875, 8, 23], +[320012, 876, 2, 29], +[320122, 876, 6, 18], +[320300, 876, 12, 13], +[320481, 877, 6, 12], +[320532, 877, 8, 2], +[320712, 878, 1, 29], +[320876, 878, 7, 12], +[320880, 878, 7, 16], +[320901, 878, 8, 6], +[321082, 879, 2, 3], +[321236, 879, 7, 7], +[321326, 879, 10, 5], +[321377, 879, 11, 25], +[321381, 879, 11, 29], +[321390, 879, 12, 8], +[321498, 880, 3, 25], +[321650, 880, 8, 24], +[321659, 880, 9, 2], +[321809, 881, 1, 30], +[321894, 881, 4, 25], +[322072, 881, 10, 20], +[322109, 881, 11, 26], +[322248, 882, 4, 14], +[322268, 882, 5, 4], +[322321, 882, 6, 26], +[322386, 882, 8, 30], +[322397, 882, 9, 10], +[322488, 882, 12, 10], +[322623, 883, 4, 24], +[322786, 883, 10, 4], +[322844, 883, 12, 1], +[323028, 884, 6, 2], +[323088, 884, 8, 1], +[323204, 884, 11, 25], +[323402, 885, 6, 11], +[323487, 885, 9, 4], +[323665, 886, 3, 1], +[323856, 886, 9, 8], +[323900, 886, 10, 22], +[323957, 886, 12, 18], +[324135, 887, 6, 14], +[324333, 887, 12, 29], +[324511, 888, 6, 24], +[324671, 888, 12, 1], +[324763, 889, 3, 3], +[324870, 889, 6, 18], +[324996, 889, 10, 22], +[325049, 889, 12, 14], +[325054, 889, 12, 19], +[325059, 889, 12, 24], +[325079, 890, 1, 13], +[325214, 890, 5, 28], +[325364, 890, 10, 25], +[325449, 891, 1, 18], +[325512, 891, 3, 22], +[325581, 891, 5, 30], +[325717, 891, 10, 13], +[325784, 891, 12, 19], +[325832, 892, 2, 5], +[325843, 892, 2, 16], +[326021, 892, 8, 12], +[326188, 893, 1, 26], +[326273, 893, 4, 21], +[326355, 893, 7, 12], +[326363, 893, 7, 20], +[326523, 893, 12, 27], +[326545, 894, 1, 18], +[326636, 894, 4, 19], +[326766, 894, 8, 27], +[326918, 895, 1, 26], +[326924, 895, 2, 1], +[327104, 895, 7, 31], +[327195, 895, 10, 30], +[327364, 896, 4, 16], +[327547, 896, 10, 16], +[327708, 897, 3, 26], +[327894, 897, 9, 28], +[328063, 898, 3, 16], +[328129, 898, 5, 21], +[328287, 898, 10, 26], +[328465, 899, 4, 22], +[328471, 899, 4, 28], +[328539, 899, 7, 5], +[328601, 899, 9, 5], +[328730, 900, 1, 12], +[328903, 900, 7, 4], +[329011, 900, 10, 20], +[329075, 900, 12, 23], +[329097, 901, 1, 14], +[329256, 901, 6, 22], +[329315, 901, 8, 20], +[329502, 902, 2, 23], +[329618, 902, 6, 19], +[329812, 902, 12, 30], +[329815, 903, 1, 2], +[329958, 903, 5, 25], +[330143, 903, 11, 26], +[330288, 904, 4, 19], +[330367, 904, 7, 7], +[330438, 904, 9, 16], +[330566, 905, 1, 22], +[330755, 905, 7, 30], +[330759, 905, 8, 3], +[330781, 905, 8, 25], +[330934, 906, 1, 25], +[331037, 906, 5, 8], +[331040, 906, 5, 11], +[331196, 906, 10, 14], +[331219, 906, 11, 6], +[331237, 906, 11, 24], +[331384, 907, 4, 20], +[331548, 907, 10, 1], +[331677, 908, 2, 7], +[331817, 908, 6, 26], +[331843, 908, 7, 22], +[331873, 908, 8, 21], +[331886, 908, 9, 3], +[331972, 908, 11, 28], +[332145, 909, 5, 20], +[332223, 909, 8, 6], +[332371, 910, 1, 1], +[332570, 910, 7, 19], +[332766, 911, 1, 31], +[332892, 911, 6, 6], +[333046, 911, 11, 7], +[333066, 911, 11, 27], +[333100, 911, 12, 31], +[333188, 912, 3, 28], +[333343, 912, 8, 30], +[333484, 913, 1, 18], +[333630, 913, 6, 13], +[333759, 913, 10, 20], +[333871, 914, 2, 9], +[333993, 914, 6, 11], +[334088, 914, 9, 14], +[334202, 915, 1, 6], +[334211, 915, 1, 15], +[334382, 915, 7, 5], +[334450, 915, 9, 11], +[334568, 916, 1, 7], +[334670, 916, 4, 18], +[334697, 916, 5, 15], +[334790, 916, 8, 16], +[334982, 917, 2, 24], +[335177, 917, 9, 7], +[335198, 917, 9, 28], +[335254, 917, 11, 23], +[335427, 918, 5, 15], +[335530, 918, 8, 26], +[335683, 919, 1, 26], +[335813, 919, 6, 5], +[335972, 919, 11, 11], +[336045, 920, 1, 23], +[336046, 920, 1, 24], +[336160, 920, 5, 17], +[336230, 920, 7, 26], +[336292, 920, 9, 26], +[336357, 920, 11, 30], +[336382, 920, 12, 25], +[336409, 921, 1, 21], +[336451, 921, 3, 4], +[336472, 921, 3, 25], +[336505, 921, 4, 27], +[336682, 921, 10, 21], +[336707, 921, 11, 15], +[336764, 922, 1, 11], +[336901, 922, 5, 28], +[336948, 922, 7, 14], +[336960, 922, 7, 26], +[336972, 922, 8, 7], +[337029, 922, 10, 3], +[337072, 922, 11, 15], +[337200, 923, 3, 23], +[337389, 923, 9, 28], +[337534, 924, 2, 20], +[337707, 924, 8, 11], +[337719, 924, 8, 23], +[337755, 924, 9, 28], +[337796, 924, 11, 8], +[337861, 925, 1, 12], +[338051, 925, 7, 21], +[338134, 925, 10, 12], +[338218, 926, 1, 4], +[338325, 926, 4, 21], +[338345, 926, 5, 11], +[338425, 926, 7, 30], +[338575, 926, 12, 27], +[338696, 927, 4, 27], +[338758, 927, 6, 28], +[338893, 927, 11, 10], +[338981, 928, 2, 6], +[339179, 928, 8, 22], +[339281, 928, 12, 2], +[339344, 929, 2, 3], +[339476, 929, 6, 15], +[339522, 929, 7, 31], +[339633, 929, 11, 19], +[339692, 930, 1, 17], +[339846, 930, 6, 20], +[339857, 930, 7, 1], +[340027, 930, 12, 18], +[340135, 931, 4, 5], +[340167, 931, 5, 7], +[340190, 931, 5, 30], +[340385, 931, 12, 11], +[340506, 932, 4, 10], +[340553, 932, 5, 27], +[340699, 932, 10, 20], +[340770, 932, 12, 30], +[340811, 933, 2, 9], +[340976, 933, 7, 24], +[341153, 934, 1, 17], +[341232, 934, 4, 6], +[341345, 934, 7, 28], +[341456, 934, 11, 16], +[341469, 934, 11, 29], +[341549, 935, 2, 17], +[341656, 935, 6, 4], +[341703, 935, 7, 21], +[341895, 936, 1, 29], +[342028, 936, 6, 10], +[342072, 936, 7, 24], +[342167, 936, 10, 27], +[342317, 937, 3, 26], +[342412, 937, 6, 29], +[342480, 937, 9, 5], +[342663, 938, 3, 7], +[342664, 938, 3, 8], +[342854, 938, 9, 14], +[343032, 939, 3, 11], +[343067, 939, 4, 15], +[343082, 939, 4, 30], +[343135, 939, 6, 22], +[343157, 939, 7, 14], +[343305, 939, 12, 9], +[343346, 940, 1, 19], +[343512, 940, 7, 3], +[343682, 940, 12, 20], +[343775, 941, 3, 23], +[343785, 941, 4, 2], +[343960, 941, 9, 24], +[344005, 941, 11, 8], +[344156, 942, 4, 8], +[344189, 942, 5, 11], +[344348, 942, 10, 17], +[344521, 943, 4, 8], +[344538, 943, 4, 25], +[344614, 943, 7, 10], +[344791, 944, 1, 3], +[344827, 944, 2, 8], +[344957, 944, 6, 17], +[345107, 944, 11, 14], +[345158, 945, 1, 4], +[345303, 945, 5, 29], +[345469, 945, 11, 11], +[345556, 946, 2, 6], +[345558, 946, 2, 8], +[345737, 946, 8, 6], +[345756, 946, 8, 25], +[345770, 946, 9, 8], +[345798, 946, 10, 6], +[345972, 947, 3, 29], +[346053, 947, 6, 18], +[346171, 947, 10, 14], +[346336, 948, 3, 27], +[346533, 948, 10, 10], +[346570, 948, 11, 16], +[346649, 949, 2, 3], +[346798, 949, 7, 2], +[346919, 949, 10, 31], +[347015, 950, 2, 4], +[347130, 950, 5, 30], +[347214, 950, 8, 22], +[347344, 950, 12, 30], +[347439, 951, 4, 4], +[347442, 951, 4, 7], +[347633, 951, 10, 15], +[347753, 952, 2, 12], +[347843, 952, 5, 12], +[347872, 952, 6, 10], +[347884, 952, 6, 22], +[348036, 952, 11, 21], +[348059, 952, 12, 14], +[348096, 953, 1, 20], +[348143, 953, 3, 8], +[348310, 953, 8, 22], +[348374, 953, 10, 25], +[348506, 954, 3, 6], +[348639, 954, 7, 17], +[348670, 954, 8, 17], +[348839, 955, 2, 2], +[348954, 955, 5, 28], +[348959, 955, 6, 2], +[349059, 955, 9, 10], +[349141, 955, 12, 1], +[349293, 956, 5, 1], +[349361, 956, 7, 8], +[349412, 956, 8, 28], +[349593, 957, 2, 25], +[349631, 957, 4, 4], +[349810, 957, 9, 30], +[349841, 957, 10, 31], +[349994, 958, 4, 2], +[350133, 958, 8, 19], +[350271, 959, 1, 4], +[350353, 959, 3, 27], +[350358, 959, 4, 1], +[350420, 959, 6, 2], +[350431, 959, 6, 13], +[350607, 959, 12, 6], +[350752, 960, 4, 29], +[350894, 960, 9, 18], +[350934, 960, 10, 28], +[350937, 960, 10, 31], +[351125, 961, 5, 7], +[351211, 961, 8, 1], +[351257, 961, 9, 16], +[351405, 962, 2, 11], +[351541, 962, 6, 27], +[351629, 962, 9, 23], +[351742, 963, 1, 14], +[351791, 963, 3, 4], +[351916, 963, 7, 7], +[351921, 963, 7, 12], +[352086, 963, 12, 24], +[352117, 964, 1, 24], +[352193, 964, 4, 9], +[352343, 964, 9, 6], +[352442, 964, 12, 14], +[352632, 965, 6, 22], +[352816, 965, 12, 23], +[352893, 966, 3, 10], +[352979, 966, 6, 4], +[353176, 966, 12, 18], +[353259, 967, 3, 11], +[353410, 967, 8, 9], +[353427, 967, 8, 26], +[353430, 967, 8, 29], +[353432, 967, 8, 31], +[353472, 967, 10, 10], +[353646, 968, 4, 1], +[353807, 968, 9, 9], +[353841, 968, 10, 13], +[353944, 969, 1, 24], +[354131, 969, 7, 30], +[354220, 969, 10, 27], +[354399, 970, 4, 24], +[354598, 970, 11, 9], +[354760, 971, 4, 20], +[354874, 971, 8, 12], +[354901, 971, 9, 8], +[355070, 972, 2, 24], +[355228, 972, 7, 31], +[355361, 972, 12, 11], +[355371, 972, 12, 21], +[355481, 973, 4, 10], +[355614, 973, 8, 21], +[355694, 973, 11, 9], +[355789, 974, 2, 12], +[355867, 974, 5, 1], +[355957, 974, 7, 30], +[356009, 974, 9, 20], +[356096, 974, 12, 16], +[356247, 975, 5, 16], +[356259, 975, 5, 28], +[356370, 975, 9, 16], +[356461, 975, 12, 16], +[356586, 976, 4, 19], +[356660, 976, 7, 2], +[356779, 976, 10, 29], +[356957, 977, 4, 25], +[357029, 977, 7, 6], +[357151, 977, 11, 5], +[357203, 977, 12, 27], +[357230, 978, 1, 23], +[357328, 978, 5, 1], +[357367, 978, 6, 9], +[357499, 978, 10, 19], +[357567, 978, 12, 26], +[357748, 979, 6, 25], +[357946, 980, 1, 9], +[358095, 980, 6, 6], +[358214, 980, 10, 3], +[358260, 980, 11, 18], +[358442, 981, 5, 19], +[358565, 981, 9, 19], +[358710, 982, 2, 11], +[358877, 982, 7, 28], +[358982, 982, 11, 10], +[359136, 983, 4, 13], +[359298, 983, 9, 22], +[359343, 983, 11, 6], +[359504, 984, 4, 15], +[359506, 984, 4, 17], +[359603, 984, 7, 23], +[359735, 984, 12, 2], +[359848, 985, 3, 25], +[359919, 985, 6, 4], +[360090, 985, 11, 22], +[360176, 986, 2, 16], +[360208, 986, 3, 20], +[360338, 986, 7, 28], +[360510, 987, 1, 16], +[360684, 987, 7, 9], +[360732, 987, 8, 26], +[360765, 987, 9, 28], +[360876, 988, 1, 17], +[361047, 988, 7, 6], +[361084, 988, 8, 12], +[361136, 988, 10, 3], +[361317, 989, 4, 2], +[361488, 989, 9, 20], +[361661, 990, 3, 12], +[361828, 990, 8, 26], +[362005, 991, 2, 19], +[362182, 991, 8, 15], +[362331, 992, 1, 11], +[362370, 992, 2, 19], +[362416, 992, 4, 5], +[362497, 992, 6, 25], +[362534, 992, 8, 1], +[362615, 992, 10, 21], +[362795, 993, 4, 19], +[362941, 993, 9, 12], +[363086, 994, 2, 4], +[363190, 994, 5, 19], +[363227, 994, 6, 25], +[363390, 994, 12, 5], +[363524, 995, 4, 18], +[363686, 995, 9, 27], +[363882, 996, 4, 10], +[364026, 996, 9, 1], +[364218, 997, 3, 12], +[364257, 997, 4, 20], +[364328, 997, 6, 30], +[364391, 997, 9, 1], +[364416, 997, 9, 26], +[364569, 998, 2, 26], +[364576, 998, 3, 5], +[364670, 998, 6, 7], +[364708, 998, 7, 15], +[364898, 999, 1, 21], +[365086, 999, 7, 28], +[365282, 1000, 2, 9], +[365425, 1000, 7, 2], +[365572, 1000, 11, 26], +[365751, 1001, 5, 24], +[365873, 1001, 9, 23], +[365876, 1001, 9, 26], +[366023, 1002, 2, 20], +[366047, 1002, 3, 16], +[366071, 1002, 4, 9], +[366215, 1002, 8, 31], +[366342, 1003, 1, 5], +[366463, 1003, 5, 6], +[366663, 1003, 11, 22], +[366740, 1004, 2, 7], +[366801, 1004, 4, 8], +[366823, 1004, 4, 30], +[366961, 1004, 9, 15], +[367055, 1004, 12, 18], +[367166, 1005, 4, 8], +[367304, 1005, 8, 24], +[367395, 1005, 11, 23], +[367402, 1005, 11, 30], +[367511, 1006, 3, 19], +[367559, 1006, 5, 6], +[367671, 1006, 8, 26], +[367708, 1006, 10, 2], +[367803, 1007, 1, 5], +[367910, 1007, 4, 22], +[367962, 1007, 6, 13], +[368095, 1007, 10, 24], +[368108, 1007, 11, 6], +[368139, 1007, 12, 7], +[368178, 1008, 1, 15], +[368324, 1008, 6, 9], +[368362, 1008, 7, 17], +[368495, 1008, 11, 27], +[368642, 1009, 4, 23], +[368715, 1009, 7, 5], +[368734, 1009, 7, 24], +[368757, 1009, 8, 16], +[368850, 1009, 11, 17], +[368986, 1010, 4, 2], +[369078, 1010, 7, 3], +[369263, 1011, 1, 4], +[369288, 1011, 1, 29], +[369435, 1011, 6, 25], +[369560, 1011, 10, 28], +[369748, 1012, 5, 3], +[369767, 1012, 5, 22], +[369915, 1012, 10, 17], +[370074, 1013, 3, 25], +[370209, 1013, 8, 7], +[370279, 1013, 10, 16], +[370284, 1013, 10, 21], +[370331, 1013, 12, 7], +[370452, 1014, 4, 7], +[370561, 1014, 7, 25], +[370751, 1015, 1, 31], +[370766, 1015, 2, 15], +[370788, 1015, 3, 9], +[370972, 1015, 9, 9], +[371037, 1015, 11, 13], +[371158, 1016, 3, 13], +[371162, 1016, 3, 17], +[371238, 1016, 6, 1], +[371253, 1016, 6, 16], +[371310, 1016, 8, 12], +[371368, 1016, 10, 9], +[371517, 1017, 3, 7], +[371535, 1017, 3, 25], +[371605, 1017, 6, 3], +[371640, 1017, 7, 8], +[371676, 1017, 8, 13], +[371686, 1017, 8, 23], +[371801, 1017, 12, 16], +[371903, 1018, 3, 28], +[372077, 1018, 9, 18], +[372236, 1019, 2, 24], +[372322, 1019, 5, 21], +[372333, 1019, 6, 1], +[372450, 1019, 9, 26], +[372480, 1019, 10, 26], +[372680, 1020, 5, 13], +[372757, 1020, 7, 29], +[372881, 1020, 11, 30], +[373058, 1021, 5, 26], +[373163, 1021, 9, 8], +[373256, 1021, 12, 10], +[373405, 1022, 5, 8], +[373457, 1022, 6, 29], +[373498, 1022, 8, 9], +[373519, 1022, 8, 30], +[373708, 1023, 3, 7], +[373724, 1023, 3, 23], +[373895, 1023, 9, 10], +[373941, 1023, 10, 26], +[374102, 1024, 4, 4], +[374301, 1024, 10, 20], +[374342, 1024, 11, 30], +[374479, 1025, 4, 16], +[374661, 1025, 10, 15], +[374696, 1025, 11, 19], +[374711, 1025, 12, 4], +[374806, 1026, 3, 9], +[374931, 1026, 7, 12], +[375121, 1027, 1, 18], +[375213, 1027, 4, 20], +[375360, 1027, 9, 14], +[375373, 1027, 9, 27], +[375567, 1028, 4, 8], +[375642, 1028, 6, 22], +[375705, 1028, 8, 24], +[375898, 1029, 3, 5], +[376013, 1029, 6, 28], +[376144, 1029, 11, 6], +[376164, 1029, 11, 26], +[376239, 1030, 2, 9], +[376312, 1030, 4, 23], +[376430, 1030, 8, 19], +[376593, 1031, 1, 29], +[376769, 1031, 7, 24], +[376837, 1031, 9, 30], +[376902, 1031, 12, 4], +[376970, 1032, 2, 10], +[377122, 1032, 7, 11], +[377261, 1032, 11, 27], +[377392, 1033, 4, 7], +[377528, 1033, 8, 21], +[377690, 1034, 1, 30], +[377732, 1034, 3, 13], +[377793, 1034, 5, 13], +[377902, 1034, 8, 30], +[377998, 1034, 12, 4], +[378005, 1034, 12, 11], +[378125, 1035, 4, 10], +[378283, 1035, 9, 15], +[378339, 1035, 11, 10], +[378487, 1036, 4, 6], +[378547, 1036, 6, 5], +[378730, 1036, 12, 5], +[378892, 1037, 5, 16], +[379084, 1037, 11, 24], +[379182, 1038, 3, 2], +[379210, 1038, 3, 30], +[379340, 1038, 8, 7], +[379387, 1038, 9, 23], +[379572, 1039, 3, 27], +[379606, 1039, 4, 30], +[379789, 1039, 10, 30], +[379858, 1040, 1, 7], +[380020, 1040, 6, 17], +[380096, 1040, 9, 1], +[380199, 1040, 12, 13], +[380337, 1041, 4, 30], +[380374, 1041, 6, 6], +[380395, 1041, 6, 27], +[380572, 1041, 12, 21], +[380657, 1042, 3, 16], +[380826, 1042, 9, 1], +[380952, 1043, 1, 5], +[380961, 1043, 1, 14], +[381110, 1043, 6, 12], +[381170, 1043, 8, 11], +[381319, 1044, 1, 7], +[381344, 1044, 2, 1], +[381356, 1044, 2, 13], +[381502, 1044, 7, 8], +[381685, 1045, 1, 7], +[381690, 1045, 1, 12], +[381862, 1045, 7, 3], +[381951, 1045, 9, 30], +[382116, 1046, 3, 14], +[382170, 1046, 5, 7], +[382253, 1046, 7, 29], +[382393, 1046, 12, 16], +[382572, 1047, 6, 13], +[382602, 1047, 7, 13], +[382605, 1047, 7, 16], +[382776, 1048, 1, 3], +[382797, 1048, 1, 24], +[382976, 1048, 7, 21], +[383153, 1049, 1, 14], +[383213, 1049, 3, 15], +[383383, 1049, 9, 1], +[383571, 1050, 3, 8], +[383717, 1050, 8, 1], +[383741, 1050, 8, 25], +[383792, 1050, 10, 15], +[383892, 1051, 1, 23], +[383987, 1051, 4, 28], +[384088, 1051, 8, 7], +[384230, 1051, 12, 27], +[384404, 1052, 6, 18], +[384470, 1052, 8, 23], +[384491, 1052, 9, 13], +[384537, 1052, 10, 29], +[384688, 1053, 3, 29], +[384729, 1053, 5, 9], +[384800, 1053, 7, 19], +[384803, 1053, 7, 22], +[384959, 1053, 12, 25], +[385021, 1054, 2, 25], +[385187, 1054, 8, 10], +[385295, 1054, 11, 26], +[385451, 1055, 5, 1], +[385499, 1055, 6, 18], +[385667, 1055, 12, 3], +[385758, 1056, 3, 3], +[385917, 1056, 8, 9], +[386005, 1056, 11, 5], +[386129, 1057, 3, 9], +[386130, 1057, 3, 10], +[386205, 1057, 5, 24], +[386208, 1057, 5, 27], +[386361, 1057, 10, 27], +[386418, 1057, 12, 23], +[386464, 1058, 2, 7], +[386468, 1058, 2, 11], +[386636, 1058, 7, 29], +[386811, 1059, 1, 20], +[386909, 1059, 4, 28], +[386973, 1059, 7, 1], +[387133, 1059, 12, 8], +[387145, 1059, 12, 20], +[387171, 1060, 1, 15], +[387269, 1060, 4, 22], +[387467, 1060, 11, 6], +[387572, 1061, 2, 19], +[387702, 1061, 6, 29], +[387747, 1061, 8, 13], +[387800, 1061, 10, 5], +[387972, 1062, 3, 26], +[388073, 1062, 7, 5], +[388150, 1062, 9, 20], +[388155, 1062, 9, 25], +[388319, 1063, 3, 8], +[388472, 1063, 8, 8], +[388611, 1063, 12, 25], +[388631, 1064, 1, 14], +[388796, 1064, 6, 27], +[388962, 1064, 12, 10], +[389101, 1065, 4, 28], +[389292, 1065, 11, 5], +[389417, 1066, 3, 10], +[389571, 1066, 8, 11], +[389754, 1067, 2, 10], +[389922, 1067, 7, 28], +[390023, 1067, 11, 6], +[390197, 1068, 4, 28], +[390203, 1068, 5, 4], +[390348, 1068, 9, 26], +[390493, 1069, 2, 18], +[390647, 1069, 7, 22], +[390703, 1069, 9, 16], +[390706, 1069, 9, 19], +[390748, 1069, 10, 31], +[390853, 1070, 2, 13], +[390961, 1070, 6, 1], +[391050, 1070, 8, 29], +[391106, 1070, 10, 24], +[391239, 1071, 3, 6], +[391279, 1071, 4, 15], +[391302, 1071, 5, 8], +[391364, 1071, 7, 9], +[391562, 1072, 1, 23], +[391565, 1072, 1, 26], +[391745, 1072, 7, 24], +[391871, 1072, 11, 27], +[391983, 1073, 3, 19], +[392131, 1073, 8, 14], +[392323, 1074, 2, 22], +[392420, 1074, 5, 30], +[392430, 1074, 6, 9], +[392596, 1074, 11, 22], +[392637, 1075, 1, 2], +[392748, 1075, 4, 23], +[392856, 1075, 8, 9], +[392918, 1075, 10, 10], +[392947, 1075, 11, 8], +[393123, 1076, 5, 2], +[393312, 1076, 11, 7], +[393373, 1077, 1, 7], +[393442, 1077, 3, 17], +[393599, 1077, 8, 21], +[393619, 1077, 9, 10], +[393770, 1078, 2, 8], +[393794, 1078, 3, 4], +[393932, 1078, 7, 20], +[394107, 1079, 1, 11], +[394265, 1079, 6, 18], +[394345, 1079, 9, 6], +[394496, 1080, 2, 4], +[394589, 1080, 5, 7], +[394620, 1080, 6, 7], +[394773, 1080, 11, 7], +[394811, 1080, 12, 15], +[394923, 1081, 4, 6], +[395109, 1081, 10, 9], +[395192, 1081, 12, 31], +[395200, 1082, 1, 8], +[395315, 1082, 5, 3], +[395337, 1082, 5, 25], +[395490, 1082, 10, 25], +[395573, 1083, 1, 16], +[395657, 1083, 4, 10], +[395722, 1083, 6, 14], +[395760, 1083, 7, 22], +[395790, 1083, 8, 21], +[395869, 1083, 11, 8], +[395989, 1084, 3, 7], +[396070, 1084, 5, 27], +[396262, 1084, 12, 5], +[396340, 1085, 2, 21], +[396385, 1085, 4, 7], +[396450, 1085, 6, 11], +[396500, 1085, 7, 31], +[396557, 1085, 9, 26], +[396735, 1086, 3, 23], +[396747, 1086, 4, 4], +[396894, 1086, 8, 29], +[396943, 1086, 10, 17], +[396978, 1086, 11, 21], +[397103, 1087, 3, 26], +[397241, 1087, 8, 11], +[397370, 1087, 12, 18], +[397460, 1088, 3, 17], +[397650, 1088, 9, 23], +[397825, 1089, 3, 17], +[397970, 1089, 8, 9], +[398088, 1089, 12, 5], +[398214, 1090, 4, 10], +[398228, 1090, 4, 24], +[398262, 1090, 5, 28], +[398457, 1090, 12, 9], +[398579, 1091, 4, 10], +[398695, 1091, 8, 4], +[398779, 1091, 10, 27], +[398844, 1091, 12, 31], +[398955, 1092, 4, 20], +[398984, 1092, 5, 19], +[399054, 1092, 7, 28], +[399239, 1093, 1, 29], +[399250, 1093, 2, 9], +[399385, 1093, 6, 24], +[399409, 1093, 7, 18], +[399480, 1093, 9, 27], +[399574, 1093, 12, 30], +[399596, 1094, 1, 21], +[399717, 1094, 5, 22], +[399723, 1094, 5, 28], +[399783, 1094, 7, 27], +[399789, 1094, 8, 2], +[399861, 1094, 10, 13], +[399912, 1094, 12, 3], +[400030, 1095, 3, 31], +[400228, 1095, 10, 15], +[400406, 1096, 4, 10], +[400544, 1096, 8, 26], +[400653, 1096, 12, 13], +[400756, 1097, 3, 26], +[400911, 1097, 8, 28], +[400919, 1097, 9, 5], +[400924, 1097, 9, 10], +[400975, 1097, 10, 31], +[401166, 1098, 5, 10], +[401263, 1098, 8, 15], +[401463, 1099, 3, 3], +[401609, 1099, 7, 27], +[401802, 1100, 2, 5], +[401835, 1100, 3, 10], +[401880, 1100, 4, 24], +[402018, 1100, 9, 9], +[402177, 1101, 2, 15], +[402200, 1101, 3, 10], +[402399, 1101, 9, 25], +[402542, 1102, 2, 15], +[402668, 1102, 6, 21], +[402706, 1102, 7, 29], +[402807, 1102, 11, 7], +[402864, 1103, 1, 3], +[403043, 1103, 7, 1], +[403162, 1103, 10, 28], +[403203, 1103, 12, 8], +[403221, 1103, 12, 26], +[403416, 1104, 7, 8], +[403605, 1105, 1, 13], +[403766, 1105, 6, 23], +[403847, 1105, 9, 12], +[403913, 1105, 11, 17], +[404056, 1106, 4, 9], +[404064, 1106, 4, 17], +[404135, 1106, 6, 27], +[404239, 1106, 10, 9], +[404392, 1107, 3, 11], +[404447, 1107, 5, 5], +[404527, 1107, 7, 24], +[404600, 1107, 10, 5], +[404774, 1108, 3, 27], +[404813, 1108, 5, 5], +[404985, 1108, 10, 24], +[405086, 1109, 2, 2], +[405217, 1109, 6, 13], +[405351, 1109, 10, 25], +[405373, 1109, 11, 16], +[405456, 1110, 2, 7], +[405597, 1110, 6, 28], +[405754, 1110, 12, 2], +[405859, 1111, 3, 17], +[405928, 1111, 5, 25], +[406109, 1111, 11, 22], +[406274, 1112, 5, 5], +[406362, 1112, 8, 1], +[406474, 1112, 11, 21], +[406624, 1113, 4, 20], +[406697, 1113, 7, 2], +[406745, 1113, 8, 19], +[406824, 1113, 11, 6], +[406851, 1113, 12, 3], +[406863, 1113, 12, 15], +[406886, 1114, 1, 7], +[406922, 1114, 2, 12], +[407084, 1114, 7, 24], +[407124, 1114, 9, 2], +[407251, 1115, 1, 7], +[407307, 1115, 3, 4], +[407383, 1115, 5, 19], +[407401, 1115, 6, 6], +[407508, 1115, 9, 21], +[407694, 1116, 3, 25], +[407867, 1116, 9, 14], +[407941, 1116, 11, 27], +[407989, 1117, 1, 14], +[408064, 1117, 3, 30], +[408145, 1117, 6, 19], +[408189, 1117, 8, 2], +[408316, 1117, 12, 7], +[408501, 1118, 6, 10], +[408655, 1118, 11, 11], +[408713, 1119, 1, 8], +[408813, 1119, 4, 18], +[408938, 1119, 8, 21], +[409015, 1119, 11, 6], +[409076, 1120, 1, 6], +[409193, 1120, 5, 2], +[409369, 1120, 10, 25], +[409527, 1121, 4, 1], +[409584, 1121, 5, 28], +[409713, 1121, 10, 4], +[409890, 1122, 3, 30], +[410038, 1122, 8, 25], +[410056, 1122, 9, 12], +[410110, 1122, 11, 5], +[410263, 1123, 4, 7], +[410272, 1123, 4, 16], +[410370, 1123, 7, 23], +[410544, 1124, 1, 13], +[410718, 1124, 7, 5], +[410806, 1124, 10, 1], +[410833, 1124, 10, 28], +[410960, 1125, 3, 4], +[411031, 1125, 5, 14], +[411134, 1125, 8, 25], +[411209, 1125, 11, 8], +[411300, 1126, 2, 7], +[411373, 1126, 4, 21], +[411378, 1126, 4, 26], +[411397, 1126, 5, 15], +[411556, 1126, 10, 21], +[411739, 1127, 4, 22], +[411923, 1127, 10, 23], +[411968, 1127, 12, 7], +[412052, 1128, 2, 29], +[412066, 1128, 3, 14], +[412149, 1128, 6, 5], +[412308, 1128, 11, 11], +[412367, 1129, 1, 9], +[412515, 1129, 6, 6], +[412674, 1129, 11, 12], +[412861, 1130, 5, 18], +[412867, 1130, 5, 24], +[412956, 1130, 8, 21], +[413015, 1130, 10, 19], +[413190, 1131, 4, 12], +[413353, 1131, 9, 22], +[413466, 1132, 1, 13], +[413542, 1132, 3, 29], +[413670, 1132, 8, 4], +[413828, 1133, 1, 9], +[414001, 1133, 7, 1], +[414030, 1133, 7, 30], +[414109, 1133, 10, 17], +[414227, 1134, 2, 12], +[414301, 1134, 4, 27], +[414341, 1134, 6, 6], +[414540, 1134, 12, 22], +[414577, 1135, 1, 28], +[414626, 1135, 3, 18], +[414648, 1135, 4, 9], +[414829, 1135, 10, 7], +[414929, 1136, 1, 15], +[415050, 1136, 5, 15], +[415134, 1136, 8, 7], +[415333, 1137, 2, 22], +[415377, 1137, 4, 7], +[415474, 1137, 7, 13], +[415536, 1137, 9, 13], +[415646, 1138, 1, 1], +[415753, 1138, 4, 18], +[415904, 1138, 9, 16], +[415946, 1138, 10, 28], +[416016, 1139, 1, 6], +[416034, 1139, 1, 24], +[416160, 1139, 5, 30], +[416191, 1139, 6, 30], +[416308, 1139, 10, 25], +[416419, 1140, 2, 13], +[416453, 1140, 3, 18], +[416540, 1140, 6, 13], +[416732, 1140, 12, 22], +[416810, 1141, 3, 10], +[416995, 1141, 9, 11], +[417001, 1141, 9, 17], +[417102, 1141, 12, 27], +[417277, 1142, 6, 20], +[417312, 1142, 7, 25], +[417317, 1142, 7, 30], +[417412, 1142, 11, 2], +[417518, 1143, 2, 16], +[417523, 1143, 2, 21], +[417690, 1143, 8, 7], +[417841, 1144, 1, 5], +[417854, 1144, 1, 18], +[417984, 1144, 5, 27], +[418036, 1144, 7, 18], +[418054, 1144, 8, 5], +[418148, 1144, 11, 7], +[418331, 1145, 5, 9], +[418381, 1145, 6, 28], +[418563, 1145, 12, 27], +[418641, 1146, 3, 15], +[418806, 1146, 8, 27], +[418849, 1146, 10, 9], +[418864, 1146, 10, 24], +[419037, 1147, 4, 15], +[419132, 1147, 7, 19], +[419290, 1147, 12, 24], +[419374, 1148, 3, 17], +[419413, 1148, 4, 25], +[419457, 1148, 6, 8], +[419487, 1148, 7, 8], +[419645, 1148, 12, 13], +[419792, 1149, 5, 9], +[419873, 1149, 7, 29], +[419968, 1149, 11, 1], +[419991, 1149, 11, 24], +[420104, 1150, 3, 17], +[420173, 1150, 5, 25], +[420347, 1150, 11, 15], +[420394, 1151, 1, 1], +[420549, 1151, 6, 5], +[420600, 1151, 7, 26], +[420646, 1151, 9, 10], +[420726, 1151, 11, 29], +[420891, 1152, 5, 12], +[421025, 1152, 9, 23], +[421199, 1153, 3, 16], +[421201, 1153, 3, 18], +[421279, 1153, 6, 4], +[421316, 1153, 7, 11], +[421460, 1153, 12, 2], +[421508, 1154, 1, 19], +[421562, 1154, 3, 14], +[421607, 1154, 4, 28], +[421774, 1154, 10, 12], +[421776, 1154, 10, 14], +[421899, 1155, 2, 14], +[421955, 1155, 4, 11], +[422006, 1155, 6, 1], +[422200, 1155, 12, 12], +[422215, 1155, 12, 27], +[422345, 1156, 5, 5], +[422463, 1156, 8, 31], +[422628, 1157, 2, 12], +[422741, 1157, 6, 5], +[422921, 1157, 12, 2], +[423079, 1158, 5, 9], +[423256, 1158, 11, 2], +[423418, 1159, 4, 13], +[423565, 1159, 9, 7], +[423677, 1159, 12, 28], +[423691, 1160, 1, 11], +[423818, 1160, 5, 17], +[423850, 1160, 6, 18], +[424013, 1160, 11, 28], +[424044, 1160, 12, 29], +[424064, 1161, 1, 18], +[424194, 1161, 5, 28], +[424314, 1161, 9, 25], +[424427, 1162, 1, 16], +[424431, 1162, 1, 20], +[424631, 1162, 8, 8], +[424681, 1162, 9, 27], +[424757, 1162, 12, 12], +[424929, 1163, 6, 2], +[424980, 1163, 7, 23], +[425155, 1164, 1, 14], +[425337, 1164, 7, 14], +[425454, 1164, 11, 8], +[425464, 1164, 11, 18], +[425494, 1164, 12, 18], +[425690, 1165, 7, 2], +[425875, 1166, 1, 3], +[426058, 1166, 7, 5], +[426226, 1166, 12, 20], +[426386, 1167, 5, 29], +[426572, 1167, 12, 1], +[426603, 1168, 1, 1], +[426706, 1168, 4, 13], +[426849, 1168, 9, 3], +[426942, 1168, 12, 5], +[426992, 1169, 1, 24], +[427125, 1169, 6, 6], +[427152, 1169, 7, 3], +[427350, 1170, 1, 17], +[427402, 1170, 3, 10], +[427537, 1170, 7, 23], +[427678, 1170, 12, 11], +[427686, 1170, 12, 19], +[427842, 1171, 5, 24], +[427929, 1171, 8, 19], +[428115, 1172, 2, 21], +[428287, 1172, 8, 11], +[428378, 1172, 11, 10], +[428427, 1172, 12, 29], +[428542, 1173, 4, 23], +[428551, 1173, 5, 2], +[428687, 1173, 9, 15], +[428843, 1174, 2, 18], +[428922, 1174, 5, 8], +[428960, 1174, 6, 15], +[429052, 1174, 9, 15], +[429082, 1174, 10, 15], +[429247, 1175, 3, 29], +[429391, 1175, 8, 20], +[429554, 1176, 1, 30], +[429732, 1176, 7, 26], +[429930, 1177, 2, 9], +[429974, 1177, 3, 25], +[430004, 1177, 4, 24], +[430022, 1177, 5, 12], +[430044, 1177, 6, 3], +[430122, 1177, 8, 20], +[430277, 1178, 1, 22], +[430333, 1178, 3, 19], +[430392, 1178, 5, 17], +[430403, 1178, 5, 28], +[430521, 1178, 9, 23], +[430627, 1179, 1, 7], +[430703, 1179, 3, 24], +[430734, 1179, 4, 24], +[430771, 1179, 5, 31], +[430884, 1179, 9, 21], +[430947, 1179, 11, 23], +[431015, 1180, 1, 30], +[431075, 1180, 3, 30], +[431194, 1180, 7, 27], +[431369, 1181, 1, 18], +[431407, 1181, 2, 25], +[431503, 1181, 6, 1], +[431649, 1181, 10, 25], +[431778, 1182, 3, 3], +[431845, 1182, 5, 9], +[431983, 1182, 9, 24], +[432093, 1183, 1, 12], +[432234, 1183, 6, 2], +[432269, 1183, 7, 7], +[432461, 1184, 1, 15], +[432614, 1184, 6, 16], +[432741, 1184, 10, 21], +[432934, 1185, 5, 2], +[433049, 1185, 8, 25], +[433233, 1186, 2, 25], +[433267, 1186, 3, 31], +[433358, 1186, 6, 30], +[433557, 1187, 1, 15], +[433754, 1187, 7, 31], +[433914, 1188, 1, 7], +[434104, 1188, 7, 15], +[434134, 1188, 8, 14], +[434164, 1188, 9, 13], +[434267, 1188, 12, 25], +[434402, 1189, 5, 9], +[434601, 1189, 11, 24], +[434694, 1190, 2, 25], +[434700, 1190, 3, 3], +[434841, 1190, 7, 22], +[435020, 1191, 1, 17], +[435117, 1191, 4, 24], +[435166, 1191, 6, 12], +[435325, 1191, 11, 18], +[435400, 1192, 2, 1], +[435509, 1192, 5, 20], +[435679, 1192, 11, 6], +[435865, 1193, 5, 11], +[436063, 1193, 11, 25], +[436131, 1194, 2, 1], +[436300, 1194, 7, 20], +[436326, 1194, 8, 15], +[436487, 1195, 1, 23], +[436687, 1195, 8, 11], +[436736, 1195, 9, 29], +[436919, 1196, 3, 30], +[437009, 1196, 6, 28], +[437178, 1196, 12, 14], +[437353, 1197, 6, 7], +[437379, 1197, 7, 3], +[437489, 1197, 10, 21], +[437492, 1197, 10, 24], +[437512, 1197, 11, 13], +[437562, 1198, 1, 2], +[437758, 1198, 7, 17], +[437866, 1198, 11, 2], +[437985, 1199, 3, 1], +[438078, 1199, 6, 2], +[438157, 1199, 8, 20], +[438180, 1199, 9, 12], +[438351, 1200, 3, 1], +[438424, 1200, 5, 13], +[438618, 1200, 11, 23], +[438657, 1201, 1, 1], +[438812, 1201, 6, 5], +[438991, 1201, 12, 1], +[439057, 1202, 2, 5], +[439059, 1202, 2, 7], +[439098, 1202, 3, 18], +[439286, 1202, 9, 22], +[439370, 1202, 12, 15], +[439504, 1203, 4, 28], +[439569, 1203, 7, 2], +[439684, 1203, 10, 25], +[439801, 1204, 2, 19], +[439856, 1204, 4, 14], +[439950, 1204, 7, 17], +[440002, 1204, 9, 7], +[440034, 1204, 10, 9], +[440060, 1204, 11, 4], +[440078, 1204, 11, 22], +[440150, 1205, 2, 2], +[440153, 1205, 2, 5], +[440350, 1205, 8, 21], +[440488, 1206, 1, 6], +[440605, 1206, 5, 3], +[440658, 1206, 6, 25], +[440683, 1206, 7, 20], +[440776, 1206, 10, 21], +[440934, 1207, 3, 28], +[441118, 1207, 9, 28], +[441145, 1207, 10, 25], +[441297, 1208, 3, 25], +[441479, 1208, 9, 23], +[441527, 1208, 11, 10], +[441689, 1209, 4, 21], +[441738, 1209, 6, 9], +[441746, 1209, 6, 17], +[441900, 1209, 11, 18], +[442094, 1210, 5, 31], +[442259, 1210, 11, 12], +[442418, 1211, 4, 20], +[442555, 1211, 9, 4], +[442616, 1211, 11, 4], +[442787, 1212, 4, 23], +[442792, 1212, 4, 28], +[442903, 1212, 8, 17], +[442921, 1212, 9, 4], +[442988, 1212, 11, 10], +[443143, 1213, 4, 14], +[443158, 1213, 4, 29], +[443343, 1213, 10, 31], +[443521, 1214, 4, 27], +[443637, 1214, 8, 21], +[443827, 1215, 2, 27], +[443918, 1215, 5, 29], +[443938, 1215, 6, 18], +[443989, 1215, 8, 8], +[444161, 1216, 1, 27], +[444209, 1216, 3, 15], +[444213, 1216, 3, 19], +[444308, 1216, 6, 22], +[444348, 1216, 8, 1], +[444442, 1216, 11, 3], +[444498, 1216, 12, 29], +[444677, 1217, 6, 26], +[444852, 1217, 12, 18], +[445012, 1218, 5, 27], +[445020, 1218, 6, 4], +[445173, 1218, 11, 4], +[445242, 1219, 1, 12], +[445377, 1219, 5, 27], +[445476, 1219, 9, 3], +[445545, 1219, 11, 11], +[445669, 1220, 3, 14], +[445695, 1220, 4, 9], +[445708, 1220, 4, 22], +[445809, 1220, 8, 1], +[445814, 1220, 8, 6], +[445967, 1221, 1, 6], +[445973, 1221, 1, 12], +[446099, 1221, 5, 18], +[446254, 1221, 10, 20], +[446355, 1222, 1, 29], +[446464, 1222, 5, 18], +[446602, 1222, 10, 3], +[446727, 1223, 2, 5], +[446812, 1223, 5, 1], +[446898, 1223, 7, 26], +[447024, 1223, 11, 29], +[447084, 1224, 1, 28], +[447254, 1224, 7, 16], +[447365, 1224, 11, 4], +[447475, 1225, 2, 22], +[447668, 1225, 9, 3], +[447865, 1226, 3, 19], +[447904, 1226, 4, 27], +[448059, 1226, 9, 29], +[448210, 1227, 2, 27], +[448211, 1227, 2, 28], +[448280, 1227, 5, 8], +[448353, 1227, 7, 20], +[448529, 1228, 1, 12], +[448558, 1228, 2, 10], +[448695, 1228, 6, 26], +[448719, 1228, 7, 20], +[448772, 1228, 9, 11], +[448773, 1228, 9, 12], +[448923, 1229, 2, 9], +[449085, 1229, 7, 21], +[449247, 1229, 12, 30], +[449386, 1230, 5, 18], +[449483, 1230, 8, 23], +[449538, 1230, 10, 17], +[449645, 1231, 2, 1], +[449686, 1231, 3, 14], +[449794, 1231, 6, 30], +[449994, 1232, 1, 16], +[450139, 1232, 6, 9], +[450155, 1232, 6, 25], +[450286, 1232, 11, 3], +[450406, 1233, 3, 3], +[450419, 1233, 3, 16], +[450617, 1233, 9, 30], +[450744, 1234, 2, 4], +[450891, 1234, 7, 1], +[451088, 1235, 1, 14], +[451186, 1235, 4, 22], +[451356, 1235, 10, 9], +[451447, 1236, 1, 8], +[451566, 1236, 5, 6], +[451642, 1236, 7, 21], +[451744, 1236, 10, 31], +[451874, 1237, 3, 10], +[451892, 1237, 3, 28], +[451994, 1237, 7, 8], +[452148, 1237, 12, 9], +[452219, 1238, 2, 18], +[452254, 1238, 3, 25], +[452374, 1238, 7, 23], +[452393, 1238, 8, 11], +[452509, 1238, 12, 5], +[452569, 1239, 2, 3], +[452721, 1239, 7, 5], +[452902, 1240, 1, 2], +[452964, 1240, 3, 4], +[452971, 1240, 3, 11], +[453150, 1240, 9, 6], +[453326, 1241, 3, 1], +[453514, 1241, 9, 5], +[453634, 1242, 1, 3], +[453829, 1242, 7, 17], +[453900, 1242, 9, 26], +[454044, 1243, 2, 17], +[454126, 1243, 5, 10], +[454129, 1243, 5, 13], +[454222, 1243, 8, 14], +[454326, 1243, 11, 26], +[454524, 1244, 6, 11], +[454536, 1244, 6, 23], +[454680, 1244, 11, 14], +[454829, 1245, 4, 12], +[454875, 1245, 5, 28], +[454974, 1245, 9, 4], +[454985, 1245, 9, 15], +[455148, 1246, 2, 25], +[455192, 1246, 4, 10], +[455266, 1246, 6, 23], +[455289, 1246, 7, 16], +[455291, 1246, 7, 18], +[455398, 1246, 11, 2], +[455529, 1247, 3, 13], +[455614, 1247, 6, 6], +[455647, 1247, 7, 9], +[455685, 1247, 8, 16], +[455794, 1247, 12, 3], +[455870, 1248, 2, 17], +[455919, 1248, 4, 6], +[456077, 1248, 9, 11], +[456251, 1249, 3, 4], +[456308, 1249, 4, 30], +[456489, 1249, 10, 28], +[456519, 1249, 11, 27], +[456608, 1250, 2, 24], +[456647, 1250, 4, 4], +[456810, 1250, 9, 14], +[456865, 1250, 11, 8], +[456995, 1251, 3, 18], +[457006, 1251, 3, 29], +[457202, 1251, 10, 11], +[457265, 1251, 12, 13], +[457308, 1252, 1, 25], +[457477, 1252, 7, 12], +[457504, 1252, 8, 8], +[457593, 1252, 11, 5], +[457688, 1253, 2, 8], +[457707, 1253, 2, 27], +[457738, 1253, 3, 30], +[457793, 1253, 5, 24], +[457960, 1253, 11, 7], +[457962, 1253, 11, 9], +[458069, 1254, 2, 24], +[458115, 1254, 4, 11], +[458158, 1254, 5, 24], +[458201, 1254, 7, 6], +[458205, 1254, 7, 10], +[458325, 1254, 11, 7], +[458503, 1255, 5, 4], +[458543, 1255, 6, 13], +[458703, 1255, 11, 20], +[458841, 1256, 4, 6], +[459027, 1256, 10, 9], +[459111, 1257, 1, 1], +[459271, 1257, 6, 10], +[459456, 1257, 12, 12], +[459473, 1257, 12, 29], +[459534, 1258, 2, 28], +[459698, 1258, 8, 11], +[459782, 1258, 11, 3], +[459831, 1258, 12, 22], +[459921, 1259, 3, 22], +[460083, 1259, 8, 31], +[460102, 1259, 9, 19], +[460256, 1260, 2, 20], +[460295, 1260, 3, 30], +[460446, 1260, 8, 28], +[460461, 1260, 9, 12], +[460469, 1260, 9, 20], +[460474, 1260, 9, 25], +[460628, 1261, 2, 26], +[460677, 1261, 4, 16], +[460687, 1261, 4, 26], +[460690, 1261, 4, 29], +[460722, 1261, 5, 31], +[460915, 1261, 12, 10], +[461062, 1262, 5, 6], +[461090, 1262, 6, 3], +[461171, 1262, 8, 23], +[461196, 1262, 9, 17], +[461255, 1262, 11, 15], +[461402, 1263, 4, 11], +[461563, 1263, 9, 19], +[461711, 1264, 2, 14], +[461846, 1264, 6, 28], +[461945, 1264, 10, 5], +[462137, 1265, 4, 15], +[462192, 1265, 6, 9], +[462266, 1265, 8, 22], +[462320, 1265, 10, 15], +[462482, 1266, 3, 26], +[462500, 1266, 4, 13], +[462695, 1266, 10, 25], +[462853, 1267, 4, 1], +[462981, 1267, 8, 7], +[463181, 1268, 2, 23], +[463340, 1268, 7, 31], +[463434, 1268, 11, 2], +[463561, 1269, 3, 9], +[463734, 1269, 8, 29], +[463925, 1270, 3, 8], +[463951, 1270, 4, 3], +[464104, 1270, 9, 3], +[464201, 1270, 12, 9], +[464303, 1271, 3, 21], +[464476, 1271, 9, 10], +[464643, 1272, 2, 24], +[464834, 1272, 9, 2], +[464990, 1273, 2, 5], +[465130, 1273, 6, 25], +[465315, 1273, 12, 27], +[465350, 1274, 1, 31], +[465437, 1274, 4, 28], +[465482, 1274, 6, 12], +[465557, 1274, 8, 26], +[465735, 1275, 2, 20], +[465935, 1275, 9, 8], +[466005, 1275, 11, 17], +[466185, 1276, 5, 15], +[466289, 1276, 8, 27], +[466345, 1276, 10, 22], +[466470, 1277, 2, 24], +[466561, 1277, 5, 26], +[466680, 1277, 9, 22], +[466850, 1278, 3, 11], +[466958, 1278, 6, 27], +[467106, 1278, 11, 22], +[467113, 1278, 11, 29], +[467187, 1279, 2, 11], +[467346, 1279, 7, 20], +[467508, 1279, 12, 29], +[467540, 1280, 1, 30], +[467689, 1280, 6, 27], +[467803, 1280, 10, 19], +[467932, 1281, 2, 25], +[467953, 1281, 3, 18], +[468028, 1281, 6, 1], +[468224, 1281, 12, 14], +[468393, 1282, 6, 1], +[468488, 1282, 9, 4], +[468504, 1282, 9, 20], +[468551, 1282, 11, 6], +[468631, 1283, 1, 25], +[468702, 1283, 4, 6], +[468877, 1283, 9, 28], +[468982, 1284, 1, 11], +[469141, 1284, 6, 18], +[469150, 1284, 6, 27], +[469216, 1284, 9, 1], +[469284, 1284, 11, 8], +[469471, 1285, 5, 14], +[469603, 1285, 9, 23], +[469630, 1285, 10, 20], +[469671, 1285, 11, 30], +[469863, 1286, 6, 10], +[469903, 1286, 7, 20], +[470035, 1286, 11, 29], +[470222, 1287, 6, 4], +[470330, 1287, 9, 20], +[470488, 1288, 2, 25], +[470657, 1288, 8, 12], +[470775, 1288, 12, 8], +[470919, 1289, 5, 1], +[470950, 1289, 6, 1], +[470978, 1289, 6, 29], +[471032, 1289, 8, 22], +[471186, 1290, 1, 23], +[471260, 1290, 4, 7], +[471459, 1290, 10, 23], +[471630, 1291, 4, 12], +[471778, 1291, 9, 7], +[471977, 1292, 3, 24], +[472167, 1292, 9, 30], +[472269, 1293, 1, 10], +[472378, 1293, 4, 29], +[472482, 1293, 8, 11], +[472620, 1293, 12, 27], +[472640, 1294, 1, 16], +[472822, 1294, 7, 17], +[472840, 1294, 8, 4], +[472994, 1295, 1, 5], +[473081, 1295, 4, 2], +[473159, 1295, 6, 19], +[473214, 1295, 8, 13], +[473309, 1295, 11, 16], +[473486, 1296, 5, 11], +[473657, 1296, 10, 29], +[473682, 1296, 11, 23], +[473825, 1297, 4, 15], +[473961, 1297, 8, 29], +[474108, 1298, 1, 23], +[474193, 1298, 4, 18], +[474243, 1298, 6, 7], +[474279, 1298, 7, 13], +[474299, 1298, 8, 2], +[474459, 1299, 1, 9], +[474525, 1299, 3, 16], +[474685, 1299, 8, 23], +[474694, 1299, 9, 1], +[474742, 1299, 10, 19], +[474854, 1300, 2, 8], +[474944, 1300, 5, 9], +[475070, 1300, 9, 12], +[475254, 1301, 3, 15], +[475441, 1301, 9, 18], +[475573, 1302, 1, 28], +[475749, 1302, 7, 23], +[475935, 1303, 1, 25], +[476007, 1303, 4, 7], +[476162, 1303, 9, 9], +[476174, 1303, 9, 21], +[476357, 1304, 3, 22], +[476539, 1304, 9, 20], +[476609, 1304, 11, 29], +[476703, 1305, 3, 3], +[476815, 1305, 6, 23], +[476990, 1305, 12, 15], +[477007, 1306, 1, 1], +[477144, 1306, 5, 18], +[477190, 1306, 7, 3], +[477375, 1307, 1, 4], +[477475, 1307, 4, 14], +[477498, 1307, 5, 7], +[477544, 1307, 6, 22], +[477656, 1307, 10, 12], +[477838, 1308, 4, 11], +[478032, 1308, 10, 22], +[478148, 1309, 2, 15], +[478316, 1309, 8, 2], +[478416, 1309, 11, 10], +[478474, 1310, 1, 7], +[478667, 1310, 7, 19], +[478681, 1310, 8, 2], +[478714, 1310, 9, 4], +[478723, 1310, 9, 13], +[478826, 1310, 12, 25], +[478956, 1311, 5, 4], +[479065, 1311, 8, 21], +[479226, 1312, 1, 29], +[479412, 1312, 8, 2], +[479552, 1312, 12, 20], +[479679, 1313, 4, 26], +[479786, 1313, 8, 11], +[479834, 1313, 9, 28], +[480005, 1314, 3, 18], +[480055, 1314, 5, 7], +[480143, 1314, 8, 3], +[480278, 1314, 12, 16], +[480391, 1315, 4, 8], +[480399, 1315, 4, 16], +[480558, 1315, 9, 22], +[480595, 1315, 10, 29], +[480780, 1316, 5, 1], +[480865, 1316, 7, 25], +[480965, 1316, 11, 2], +[481132, 1317, 4, 18], +[481209, 1317, 7, 4], +[481406, 1318, 1, 17], +[481522, 1318, 5, 13], +[481633, 1318, 9, 1], +[481821, 1319, 3, 8], +[481830, 1319, 3, 17], +[481922, 1319, 6, 17], +[482104, 1319, 12, 16], +[482238, 1320, 4, 28], +[482405, 1320, 10, 12], +[482522, 1321, 2, 6], +[482575, 1321, 3, 31], +[482693, 1321, 7, 27], +[482734, 1321, 9, 6], +[482870, 1322, 1, 20], +[482956, 1322, 4, 16], +[483063, 1322, 8, 1], +[483174, 1322, 11, 20], +[483351, 1323, 5, 16], +[483394, 1323, 6, 28], +[483415, 1323, 7, 19], +[483579, 1323, 12, 30], +[483611, 1324, 1, 31], +[483699, 1324, 4, 28], +[483741, 1324, 6, 9], +[483922, 1324, 12, 7], +[483996, 1325, 2, 19], +[484196, 1325, 9, 7], +[484383, 1326, 3, 13], +[484430, 1326, 4, 29], +[484448, 1326, 5, 17], +[484607, 1326, 10, 23], +[484799, 1327, 5, 3], +[484937, 1327, 9, 18], +[485001, 1327, 11, 21], +[485194, 1328, 6, 1], +[485199, 1328, 6, 6], +[485226, 1328, 7, 3], +[485279, 1328, 8, 25], +[485427, 1329, 1, 20], +[485611, 1329, 7, 23], +[485622, 1329, 8, 3], +[485668, 1329, 9, 18], +[485681, 1329, 10, 1], +[485729, 1329, 11, 18], +[485873, 1330, 4, 11], +[486014, 1330, 8, 30], +[486127, 1330, 12, 21], +[486307, 1331, 6, 19], +[486415, 1331, 10, 5], +[486515, 1332, 1, 13], +[486700, 1332, 7, 16], +[486717, 1332, 8, 2], +[486726, 1332, 8, 11], +[486891, 1333, 1, 23], +[487034, 1333, 6, 15], +[487055, 1333, 7, 6], +[487148, 1333, 10, 7], +[487334, 1334, 4, 11], +[487404, 1334, 6, 20], +[487432, 1334, 7, 18], +[487446, 1334, 8, 1], +[487618, 1335, 1, 20], +[487741, 1335, 5, 23], +[487925, 1335, 11, 23], +[488107, 1336, 5, 23], +[488298, 1336, 11, 30], +[488307, 1336, 12, 9], +[488321, 1336, 12, 23], +[488430, 1337, 4, 11], +[488517, 1337, 7, 7], +[488651, 1337, 11, 18], +[488770, 1338, 3, 17], +[488904, 1338, 7, 29], +[488927, 1338, 8, 21], +[489121, 1339, 3, 3], +[489200, 1339, 5, 21], +[489233, 1339, 6, 23], +[489306, 1339, 9, 4], +[489436, 1340, 1, 12], +[489567, 1340, 5, 22], +[489706, 1340, 10, 8], +[489728, 1340, 10, 30], +[489733, 1340, 11, 4], +[489891, 1341, 4, 11], +[489944, 1341, 6, 3], +[489951, 1341, 6, 10], +[489990, 1341, 7, 19], +[490066, 1341, 10, 3], +[490226, 1342, 3, 12], +[490232, 1342, 3, 18], +[490398, 1342, 8, 31], +[490531, 1343, 1, 11], +[490685, 1343, 6, 14], +[490869, 1343, 12, 15], +[490988, 1344, 4, 12], +[491150, 1344, 9, 21], +[491181, 1344, 10, 22], +[491218, 1344, 11, 28], +[491228, 1344, 12, 8], +[491242, 1344, 12, 22], +[491386, 1345, 5, 15], +[491421, 1345, 6, 19], +[491520, 1345, 9, 26], +[491653, 1346, 2, 6], +[491765, 1346, 5, 29], +[491911, 1346, 10, 22], +[492055, 1347, 3, 15], +[492237, 1347, 9, 13], +[492376, 1348, 1, 30], +[492496, 1348, 5, 29], +[492601, 1348, 9, 11], +[492799, 1349, 3, 28], +[492802, 1349, 3, 31], +[492926, 1349, 8, 2], +[493022, 1349, 11, 6], +[493169, 1350, 4, 2], +[493237, 1350, 6, 9], +[493417, 1350, 12, 6], +[493425, 1350, 12, 14], +[493580, 1351, 5, 18], +[493693, 1351, 9, 8], +[493783, 1351, 12, 7], +[493856, 1352, 2, 18], +[493922, 1352, 4, 24], +[494108, 1352, 10, 27], +[494284, 1353, 4, 21], +[494381, 1353, 7, 27], +[494430, 1353, 9, 14], +[494536, 1353, 12, 29], +[494640, 1354, 4, 12], +[494785, 1354, 9, 4], +[494938, 1355, 2, 4], +[494976, 1355, 3, 14], +[495142, 1355, 8, 27], +[495267, 1355, 12, 30], +[495452, 1356, 7, 2], +[495575, 1356, 11, 2], +[495637, 1357, 1, 3], +[495789, 1357, 6, 4], +[495848, 1357, 8, 2], +[495853, 1357, 8, 7], +[495952, 1357, 11, 14], +[496004, 1358, 1, 5], +[496137, 1358, 5, 18], +[496155, 1358, 6, 5], +[496228, 1358, 8, 17], +[496373, 1359, 1, 9], +[496438, 1359, 3, 15], +[496630, 1359, 9, 23], +[496694, 1359, 11, 26], +[496820, 1360, 3, 31], +[497001, 1360, 9, 28], +[497065, 1360, 12, 1], +[497242, 1361, 5, 27], +[497441, 1361, 12, 12], +[497639, 1362, 6, 28], +[497742, 1362, 10, 9], +[497788, 1362, 11, 24], +[497960, 1363, 5, 15], +[498037, 1363, 7, 31], +[498152, 1363, 11, 23], +[498187, 1363, 12, 28], +[498195, 1364, 1, 5], +[498205, 1364, 1, 15], +[498229, 1364, 2, 8], +[498371, 1364, 6, 29], +[498466, 1364, 10, 2], +[498568, 1365, 1, 12], +[498580, 1365, 1, 24], +[498771, 1365, 8, 3], +[498782, 1365, 8, 14], +[498942, 1366, 1, 21], +[499100, 1366, 6, 28], +[499199, 1366, 10, 5], +[499258, 1366, 12, 3], +[499417, 1367, 5, 11], +[499521, 1367, 8, 23], +[499528, 1367, 8, 30], +[499640, 1367, 12, 20], +[499645, 1367, 12, 25], +[499698, 1368, 2, 16], +[499814, 1368, 6, 11], +[499970, 1368, 11, 14], +[500016, 1368, 12, 30], +[500065, 1369, 2, 17], +[500231, 1369, 8, 2], +[500286, 1369, 9, 26], +[500404, 1370, 1, 22], +[500486, 1370, 4, 14], +[500667, 1370, 10, 12], +[500798, 1371, 2, 20], +[500824, 1371, 3, 18], +[500986, 1371, 8, 27], +[501151, 1372, 2, 8], +[501323, 1372, 7, 29], +[501496, 1373, 1, 18], +[501580, 1373, 4, 12], +[501684, 1373, 7, 25], +[501764, 1373, 10, 13], +[501810, 1373, 11, 28], +[501893, 1374, 2, 19], +[501954, 1374, 4, 21], +[502011, 1374, 6, 17], +[502101, 1374, 9, 15], +[502110, 1374, 9, 24], +[502163, 1374, 11, 16], +[502317, 1375, 4, 19], +[502496, 1375, 10, 15], +[502550, 1375, 12, 8], +[502570, 1375, 12, 28], +[502767, 1376, 7, 12], +[502944, 1377, 1, 5], +[503082, 1377, 5, 23], +[503244, 1377, 11, 1], +[503401, 1378, 4, 7], +[503587, 1378, 10, 10], +[503767, 1379, 4, 8], +[503909, 1379, 8, 28], +[504060, 1380, 1, 26], +[504136, 1380, 4, 11], +[504280, 1380, 9, 2], +[504347, 1380, 11, 8], +[504501, 1381, 4, 11], +[504590, 1381, 7, 9], +[504643, 1381, 8, 31], +[504645, 1381, 9, 2], +[504770, 1382, 1, 5], +[504954, 1382, 7, 8], +[505143, 1383, 1, 13], +[505166, 1383, 2, 5], +[505253, 1383, 5, 3], +[505282, 1383, 6, 1], +[505415, 1383, 10, 12], +[505521, 1384, 1, 26], +[505719, 1384, 8, 11], +[505888, 1385, 1, 27], +[506078, 1385, 8, 5], +[506089, 1385, 8, 16], +[506282, 1386, 2, 25], +[506346, 1386, 4, 30], +[506413, 1386, 7, 6], +[506562, 1386, 12, 2], +[506741, 1387, 5, 30], +[506835, 1387, 9, 1], +[506919, 1387, 11, 24], +[506960, 1388, 1, 4], +[507060, 1388, 4, 13], +[507225, 1388, 9, 25], +[507289, 1388, 11, 28], +[507446, 1389, 5, 4], +[507503, 1389, 6, 30], +[507609, 1389, 10, 14], +[507634, 1389, 11, 8], +[507783, 1390, 4, 6], +[507789, 1390, 4, 12], +[507982, 1390, 10, 22], +[508101, 1391, 2, 18], +[508203, 1391, 5, 31], +[508270, 1391, 8, 6], +[508326, 1391, 10, 1], +[508449, 1392, 2, 1], +[508520, 1392, 4, 12], +[508695, 1392, 10, 4], +[508728, 1392, 11, 6], +[508909, 1393, 5, 6], +[509040, 1393, 9, 14], +[509176, 1394, 1, 28], +[509178, 1394, 1, 30], +[509216, 1394, 3, 9], +[509306, 1394, 6, 7], +[509310, 1394, 6, 11], +[509482, 1394, 11, 30], +[509636, 1395, 5, 3], +[509788, 1395, 10, 2], +[509804, 1395, 10, 18], +[509976, 1396, 4, 7], +[510054, 1396, 6, 24], +[510139, 1396, 9, 17], +[510316, 1397, 3, 13], +[510336, 1397, 4, 2], +[510348, 1397, 4, 14], +[510527, 1397, 10, 10], +[510613, 1398, 1, 4], +[510777, 1398, 6, 17], +[510956, 1398, 12, 13], +[510990, 1399, 1, 16], +[511123, 1399, 5, 29], +[511247, 1399, 9, 30], +[511308, 1399, 11, 30], +[511369, 1400, 1, 30], +[511546, 1400, 7, 26], +[511609, 1400, 9, 27], +[511702, 1400, 12, 29], +[511793, 1401, 3, 30], +[511854, 1401, 5, 30], +[511878, 1401, 6, 23], +[511944, 1401, 8, 28], +[512130, 1402, 3, 2], +[512205, 1402, 5, 16], +[512354, 1402, 10, 12], +[512502, 1403, 3, 9], +[512663, 1403, 8, 17], +[512819, 1404, 1, 20], +[512875, 1404, 3, 16], +[512921, 1404, 5, 1], +[513118, 1404, 11, 14], +[513175, 1405, 1, 10], +[513227, 1405, 3, 3], +[513384, 1405, 8, 7], +[513555, 1406, 1, 25], +[513623, 1406, 4, 3], +[513653, 1406, 5, 3], +[513709, 1406, 6, 28], +[513885, 1406, 12, 21], +[514081, 1407, 7, 5], +[514173, 1407, 10, 5], +[514320, 1408, 2, 29], +[514413, 1408, 6, 1], +[514603, 1408, 12, 8], +[514638, 1409, 1, 12], +[514709, 1409, 3, 24], +[514735, 1409, 4, 19], +[514863, 1409, 8, 25], +[514901, 1409, 10, 2], +[515000, 1410, 1, 9], +[515026, 1410, 2, 4], +[515092, 1410, 4, 11], +[515140, 1410, 5, 29], +[515231, 1410, 8, 28], +[515311, 1410, 11, 16], +[515368, 1411, 1, 12], +[515408, 1411, 2, 21], +[515551, 1411, 7, 14], +[515624, 1411, 9, 25], +[515700, 1411, 12, 10], +[515756, 1412, 2, 4], +[515802, 1412, 3, 21], +[515943, 1412, 8, 9], +[516065, 1412, 12, 9], +[516249, 1413, 6, 11], +[516402, 1413, 11, 11], +[516430, 1413, 12, 9], +[516511, 1414, 2, 28], +[516668, 1414, 8, 4], +[516682, 1414, 8, 18], +[516850, 1415, 2, 2], +[516875, 1415, 2, 27], +[516913, 1415, 4, 6], +[517096, 1415, 10, 6], +[517278, 1416, 4, 5], +[517314, 1416, 5, 11], +[517388, 1416, 7, 24], +[517419, 1416, 8, 24], +[517556, 1417, 1, 8], +[517676, 1417, 5, 8], +[517844, 1417, 10, 23], +[517917, 1418, 1, 4], +[518013, 1418, 4, 10], +[518094, 1418, 6, 30], +[518156, 1418, 8, 31], +[518185, 1418, 9, 29], +[518252, 1418, 12, 5], +[518388, 1419, 4, 20], +[518390, 1419, 4, 22], +[518508, 1419, 8, 18], +[518651, 1420, 1, 8], +[518695, 1420, 2, 21], +[518841, 1420, 7, 16], +[518852, 1420, 7, 27], +[519041, 1421, 2, 1], +[519052, 1421, 2, 12], +[519118, 1421, 4, 19], +[519313, 1421, 10, 31], +[519438, 1422, 3, 5], +[519513, 1422, 5, 19], +[519602, 1422, 8, 16], +[519650, 1422, 10, 3], +[519817, 1423, 3, 19], +[519892, 1423, 6, 2], +[520047, 1423, 11, 4], +[520177, 1424, 3, 13], +[520178, 1424, 3, 14], +[520293, 1424, 7, 7], +[520318, 1424, 8, 1], +[520342, 1424, 8, 25], +[520385, 1424, 10, 7], +[520555, 1425, 3, 26], +[520669, 1425, 7, 18], +[520846, 1426, 1, 11], +[520921, 1426, 3, 27], +[521020, 1426, 7, 4], +[521182, 1426, 12, 13], +[521244, 1427, 2, 13], +[521354, 1427, 6, 3], +[521439, 1427, 8, 27], +[521506, 1427, 11, 2], +[521527, 1427, 11, 23], +[521585, 1428, 1, 20], +[521691, 1428, 5, 5], +[521780, 1428, 8, 2], +[521941, 1429, 1, 10], +[521984, 1429, 2, 22], +[522025, 1429, 4, 4], +[522054, 1429, 5, 3], +[522119, 1429, 7, 7], +[522294, 1429, 12, 29], +[522477, 1430, 6, 30], +[522614, 1430, 11, 14], +[522763, 1431, 4, 12], +[522921, 1431, 9, 17], +[523032, 1432, 1, 6], +[523074, 1432, 2, 17], +[523247, 1432, 8, 8], +[523422, 1433, 1, 30], +[523474, 1433, 3, 23], +[523565, 1433, 6, 22], +[523600, 1433, 7, 27], +[523633, 1433, 8, 29], +[523666, 1433, 10, 1], +[523768, 1434, 1, 11], +[523939, 1434, 7, 1], +[523979, 1434, 8, 10], +[524053, 1434, 10, 23], +[524133, 1435, 1, 11], +[524297, 1435, 6, 24], +[524354, 1435, 8, 20], +[524478, 1435, 12, 22], +[524504, 1436, 1, 17], +[524534, 1436, 2, 16], +[524661, 1436, 6, 22], +[524718, 1436, 8, 18], +[524837, 1436, 12, 15], +[524874, 1437, 1, 21], +[524889, 1437, 2, 5], +[525011, 1437, 6, 7], +[525069, 1437, 8, 4], +[525222, 1438, 1, 4], +[525252, 1438, 2, 3], +[525420, 1438, 7, 21], +[525569, 1438, 12, 17], +[525585, 1439, 1, 2], +[525614, 1439, 1, 31], +[525799, 1439, 8, 4], +[525920, 1439, 12, 3], +[526008, 1440, 2, 29], +[526117, 1440, 6, 17], +[526175, 1440, 8, 14], +[526328, 1441, 1, 14], +[526365, 1441, 2, 20], +[526441, 1441, 5, 7], +[526590, 1441, 10, 3], +[526679, 1441, 12, 31], +[526789, 1442, 4, 20], +[526938, 1442, 9, 16], +[526956, 1442, 10, 4], +[527084, 1443, 2, 9], +[527096, 1443, 2, 21], +[527135, 1443, 4, 1], +[527257, 1443, 8, 1], +[527452, 1444, 2, 12], +[527506, 1444, 4, 6], +[527574, 1444, 6, 13], +[527593, 1444, 7, 2], +[527768, 1444, 12, 24], +[527869, 1445, 4, 4], +[527961, 1445, 7, 5], +[528126, 1445, 12, 17], +[528168, 1446, 1, 28], +[528272, 1446, 5, 12], +[528412, 1446, 9, 29], +[528572, 1447, 3, 8], +[528576, 1447, 3, 12], +[528712, 1447, 7, 26], +[528866, 1447, 12, 27], +[528896, 1448, 1, 26], +[529060, 1448, 7, 8], +[529249, 1449, 1, 13], +[529265, 1449, 1, 29], +[529391, 1449, 6, 4], +[529504, 1449, 9, 25], +[529596, 1449, 12, 26], +[529682, 1450, 3, 22], +[529850, 1450, 9, 6], +[529913, 1450, 11, 8], +[530019, 1451, 2, 22], +[530177, 1451, 7, 30], +[530213, 1451, 9, 4], +[530318, 1451, 12, 18], +[530424, 1452, 4, 2], +[530498, 1452, 6, 15], +[530656, 1452, 11, 20], +[530854, 1453, 6, 6], +[531009, 1453, 11, 8], +[531176, 1454, 4, 24], +[531217, 1454, 6, 4], +[531275, 1454, 8, 1], +[531323, 1454, 9, 18], +[531337, 1454, 10, 2], +[531356, 1454, 10, 21], +[531501, 1455, 3, 15], +[531671, 1455, 9, 1], +[531791, 1455, 12, 30], +[531793, 1456, 1, 1], +[531873, 1456, 3, 21], +[531894, 1456, 4, 11], +[532018, 1456, 8, 13], +[532056, 1456, 9, 20], +[532192, 1457, 2, 3], +[532220, 1457, 3, 3], +[532319, 1457, 6, 10], +[532450, 1457, 10, 19], +[532560, 1458, 2, 6], +[532567, 1458, 2, 13], +[532616, 1458, 4, 3], +[532744, 1458, 8, 9], +[532928, 1459, 2, 9], +[533128, 1459, 8, 28], +[533285, 1460, 2, 1], +[533325, 1460, 3, 12], +[533396, 1460, 5, 22], +[533508, 1460, 9, 11], +[533522, 1460, 9, 25], +[533668, 1461, 2, 18], +[533778, 1461, 6, 8], +[533793, 1461, 6, 23], +[533874, 1461, 9, 12], +[533913, 1461, 10, 21], +[534090, 1462, 4, 16], +[534217, 1462, 8, 21], +[534354, 1463, 1, 5], +[534409, 1463, 3, 1], +[534563, 1463, 8, 2], +[534697, 1463, 12, 14], +[534875, 1464, 6, 9], +[534993, 1464, 10, 5], +[535144, 1465, 3, 5], +[535300, 1465, 8, 8], +[535457, 1466, 1, 12], +[535483, 1466, 2, 7], +[535554, 1466, 4, 19], +[535655, 1466, 7, 29], +[535730, 1466, 10, 12], +[535821, 1467, 1, 11], +[536013, 1467, 7, 22], +[536157, 1467, 12, 13], +[536271, 1468, 4, 5], +[536440, 1468, 9, 21], +[536567, 1469, 1, 26], +[536748, 1469, 7, 26], +[536825, 1469, 10, 11], +[536973, 1470, 3, 8], +[537039, 1470, 5, 13], +[537185, 1470, 10, 6], +[537380, 1471, 4, 19], +[537545, 1471, 10, 1], +[537715, 1472, 3, 19], +[537854, 1472, 8, 5], +[538019, 1473, 1, 17], +[538077, 1473, 3, 16], +[538117, 1473, 4, 25], +[538205, 1473, 7, 22], +[538401, 1474, 2, 3], +[538595, 1474, 8, 16], +[538794, 1475, 3, 3], +[538951, 1475, 8, 7], +[538956, 1475, 8, 12], +[539022, 1475, 10, 17], +[539052, 1475, 11, 16], +[539213, 1476, 4, 25], +[539276, 1476, 6, 27], +[539446, 1476, 12, 14], +[539572, 1477, 4, 19], +[539695, 1477, 8, 20], +[539841, 1478, 1, 13], +[539913, 1478, 3, 26], +[540003, 1478, 6, 24], +[540052, 1478, 8, 12], +[540214, 1479, 1, 21], +[540378, 1479, 7, 4], +[540534, 1479, 12, 7], +[540595, 1480, 2, 6], +[540745, 1480, 7, 5], +[540929, 1481, 1, 5], +[540982, 1481, 2, 27], +[541021, 1481, 4, 7], +[541180, 1481, 9, 13], +[541286, 1481, 12, 28], +[541391, 1482, 4, 12], +[541395, 1482, 4, 16], +[541527, 1482, 8, 26], +[541559, 1482, 9, 27], +[541628, 1482, 12, 5], +[541769, 1483, 4, 25], +[541840, 1483, 7, 5], +[542037, 1484, 1, 18], +[542167, 1484, 5, 27], +[542293, 1484, 9, 30], +[542326, 1484, 11, 2], +[542464, 1485, 3, 20], +[542489, 1485, 4, 14], +[542648, 1485, 9, 20], +[542728, 1485, 12, 9], +[542744, 1485, 12, 25], +[542886, 1486, 5, 16], +[542978, 1486, 8, 16], +[543039, 1486, 10, 16], +[543141, 1487, 1, 26], +[543213, 1487, 4, 8], +[543336, 1487, 8, 9], +[543445, 1487, 11, 26], +[543526, 1488, 2, 15], +[543656, 1488, 6, 24], +[543684, 1488, 7, 22], +[543819, 1488, 12, 4], +[543933, 1489, 3, 28], +[543981, 1489, 5, 15], +[544007, 1489, 6, 10], +[544074, 1489, 8, 16], +[544111, 1489, 9, 22], +[544129, 1489, 10, 10], +[544303, 1490, 4, 2], +[544371, 1490, 6, 9], +[544460, 1490, 9, 6], +[544606, 1491, 1, 30], +[544608, 1491, 2, 1], +[544633, 1491, 2, 26], +[544790, 1491, 8, 2], +[544825, 1491, 9, 6], +[545025, 1492, 3, 24], +[545186, 1492, 9, 1], +[545275, 1492, 11, 29], +[545336, 1493, 1, 29], +[545424, 1493, 4, 27], +[545452, 1493, 5, 25], +[545505, 1493, 7, 17], +[545640, 1493, 11, 29], +[545660, 1493, 12, 19], +[545736, 1494, 3, 5], +[545871, 1494, 7, 18], +[546005, 1494, 11, 29], +[546015, 1494, 12, 9], +[546171, 1495, 5, 14], +[546316, 1495, 10, 6], +[546505, 1496, 4, 12], +[546576, 1496, 6, 22], +[546671, 1496, 9, 25], +[546780, 1497, 1, 12], +[546818, 1497, 2, 19], +[546905, 1497, 5, 17], +[546918, 1497, 5, 30], +[546933, 1497, 6, 14], +[547104, 1497, 12, 2], +[547151, 1498, 1, 18], +[547194, 1498, 3, 2], +[547375, 1498, 8, 30], +[547398, 1498, 9, 22], +[547475, 1498, 12, 8], +[547636, 1499, 5, 18], +[547647, 1499, 5, 29], +[547826, 1499, 11, 24], +[547857, 1499, 12, 25], +[547929, 1500, 3, 7], +[548025, 1500, 6, 11], +[548169, 1500, 11, 2], +[548316, 1501, 3, 29], +[548399, 1501, 6, 20], +[548536, 1501, 11, 4], +[548634, 1502, 2, 10], +[548825, 1502, 8, 20], +[548921, 1502, 11, 24], +[548963, 1503, 1, 5], +[549051, 1503, 4, 3], +[549130, 1503, 6, 21], +[549251, 1503, 10, 20], +[549259, 1503, 10, 28], +[549409, 1504, 3, 26], +[549524, 1504, 7, 19], +[549723, 1505, 2, 3], +[549817, 1505, 5, 8], +[549885, 1505, 7, 15], +[550044, 1505, 12, 21], +[550139, 1506, 3, 26], +[550176, 1506, 5, 2], +[550271, 1506, 8, 5], +[550385, 1506, 11, 27], +[550488, 1507, 3, 10], +[550594, 1507, 6, 24], +[550736, 1507, 11, 13], +[550848, 1508, 3, 4], +[550888, 1508, 4, 13], +[551079, 1508, 10, 21], +[551164, 1509, 1, 14], +[551272, 1509, 5, 2], +[551433, 1509, 10, 10], +[551548, 1510, 2, 2], +[551581, 1510, 3, 7], +[551667, 1510, 6, 1], +[551797, 1510, 10, 9], +[551938, 1511, 2, 27], +[551967, 1511, 3, 28], +[552128, 1511, 9, 5], +[552229, 1511, 12, 15], +[552318, 1512, 3, 13], +[552406, 1512, 6, 9], +[552513, 1512, 9, 24], +[552560, 1512, 11, 10], +[552589, 1512, 12, 9], +[552654, 1513, 2, 12], +[552699, 1513, 3, 29], +[552750, 1513, 5, 19], +[552865, 1513, 9, 11], +[552944, 1513, 11, 29], +[552990, 1514, 1, 14], +[553149, 1514, 6, 22], +[553312, 1514, 12, 2], +[553436, 1515, 4, 5], +[553476, 1515, 5, 15], +[553620, 1515, 10, 6], +[553679, 1515, 12, 4], +[553691, 1515, 12, 16], +[553720, 1516, 1, 14], +[553852, 1516, 5, 25], +[553874, 1516, 6, 16], +[553891, 1516, 7, 3], +[553912, 1516, 7, 24], +[554029, 1516, 11, 18], +[554132, 1517, 3, 1], +[554214, 1517, 5, 22], +[554384, 1517, 11, 8], +[554420, 1517, 12, 14], +[554476, 1518, 2, 8], +[554536, 1518, 4, 9], +[554659, 1518, 8, 10], +[554810, 1519, 1, 8], +[554879, 1519, 3, 18], +[555004, 1519, 7, 21], +[555035, 1519, 8, 21], +[555232, 1520, 3, 5], +[555276, 1520, 4, 18], +[555430, 1520, 9, 19], +[555589, 1521, 2, 25], +[555769, 1521, 8, 24], +[555893, 1521, 12, 26], +[555928, 1522, 1, 30], +[555994, 1522, 4, 6], +[556034, 1522, 5, 16], +[556046, 1522, 5, 28], +[556081, 1522, 7, 2], +[556144, 1522, 9, 3], +[556184, 1522, 10, 13], +[556285, 1523, 1, 22], +[556429, 1523, 6, 15], +[556567, 1523, 10, 31], +[556604, 1523, 12, 7], +[556707, 1524, 3, 19], +[556866, 1524, 8, 25], +[556992, 1524, 12, 29], +[557175, 1525, 6, 30], +[557265, 1525, 9, 28], +[557317, 1525, 11, 19], +[557399, 1526, 2, 9], +[557504, 1526, 5, 25], +[557527, 1526, 6, 17], +[557587, 1526, 8, 16], +[557783, 1527, 2, 28], +[557862, 1527, 5, 18], +[557906, 1527, 7, 1], +[558062, 1527, 12, 4], +[558107, 1528, 1, 18], +[558114, 1528, 1, 25], +[558138, 1528, 2, 18], +[558332, 1528, 8, 30], +[558418, 1528, 11, 24], +[558483, 1529, 1, 28], +[558657, 1529, 7, 21], +[558822, 1530, 1, 2], +[558892, 1530, 3, 13], +[559075, 1530, 9, 12], +[559111, 1530, 10, 18], +[559258, 1531, 3, 14], +[559293, 1531, 4, 18], +[559380, 1531, 7, 14], +[559390, 1531, 7, 24], +[559425, 1531, 8, 28], +[559542, 1531, 12, 23], +[559661, 1532, 4, 20], +[559667, 1532, 4, 26], +[559847, 1532, 10, 23], +[559946, 1533, 1, 30], +[560102, 1533, 7, 5], +[560104, 1533, 7, 7], +[560144, 1533, 8, 16], +[560332, 1534, 2, 20], +[560499, 1534, 8, 6], +[560631, 1534, 12, 16], +[560810, 1535, 6, 13], +[560853, 1535, 7, 26], +[560951, 1535, 11, 1], +[561057, 1536, 2, 15], +[561210, 1536, 7, 17], +[561352, 1536, 12, 6], +[561450, 1537, 3, 14], +[561511, 1537, 5, 14], +[561650, 1537, 9, 30], +[561659, 1537, 10, 9], +[561824, 1538, 3, 23], +[561847, 1538, 4, 15], +[562047, 1538, 11, 1], +[562058, 1538, 11, 12], +[562070, 1538, 11, 24], +[562116, 1539, 1, 9], +[562302, 1539, 7, 14], +[562378, 1539, 9, 28], +[562432, 1539, 11, 21], +[562437, 1539, 11, 26], +[562582, 1540, 4, 19], +[562593, 1540, 4, 30], +[562686, 1540, 8, 1], +[562777, 1540, 10, 31], +[562883, 1541, 2, 14], +[562971, 1541, 5, 13], +[563061, 1541, 8, 11], +[563076, 1541, 8, 26], +[563155, 1541, 11, 13], +[563315, 1542, 4, 22], +[563411, 1542, 7, 27], +[563434, 1542, 8, 19], +[563543, 1542, 12, 6], +[563579, 1543, 1, 11], +[563623, 1543, 2, 24], +[563676, 1543, 4, 18], +[563787, 1543, 8, 7], +[563828, 1543, 9, 17], +[563840, 1543, 9, 29], +[564020, 1544, 3, 27], +[564203, 1544, 9, 26], +[564377, 1545, 3, 19], +[564421, 1545, 5, 2], +[564455, 1545, 6, 5], +[564605, 1545, 11, 2], +[564740, 1546, 3, 17], +[564850, 1546, 7, 5], +[565033, 1547, 1, 4], +[565145, 1547, 4, 26], +[565233, 1547, 7, 23], +[565349, 1547, 11, 16], +[565359, 1547, 11, 26], +[565449, 1548, 2, 24], +[565640, 1548, 9, 2], +[565819, 1549, 2, 28], +[565827, 1549, 3, 8], +[565874, 1549, 4, 24], +[566023, 1549, 9, 20], +[566136, 1550, 1, 11], +[566250, 1550, 5, 5], +[566450, 1550, 11, 21], +[566459, 1550, 11, 30], +[566506, 1551, 1, 16], +[566523, 1551, 2, 2], +[566606, 1551, 4, 26], +[566612, 1551, 5, 2], +[566622, 1551, 5, 12], +[566699, 1551, 7, 28], +[566850, 1551, 12, 26], +[567020, 1552, 6, 13], +[567028, 1552, 6, 21], +[567154, 1552, 10, 25], +[567274, 1553, 2, 22], +[567474, 1553, 9, 10], +[567574, 1553, 12, 19], +[567592, 1554, 1, 6], +[567662, 1554, 3, 17], +[567770, 1554, 7, 3], +[567864, 1554, 10, 5], +[567945, 1554, 12, 25], +[567999, 1555, 2, 17], +[568182, 1555, 8, 19], +[568277, 1555, 11, 22], +[568462, 1556, 5, 25], +[568518, 1556, 7, 20], +[568693, 1557, 1, 11], +[568832, 1557, 5, 30], +[568913, 1557, 8, 19], +[569071, 1558, 1, 24], +[569085, 1558, 2, 7], +[569089, 1558, 2, 11], +[569195, 1558, 5, 28], +[569320, 1558, 9, 30], +[569346, 1558, 10, 26], +[569409, 1558, 12, 28], +[569439, 1559, 1, 27], +[569455, 1559, 2, 12], +[569459, 1559, 2, 16], +[569518, 1559, 4, 16], +[569662, 1559, 9, 7], +[569811, 1560, 2, 3], +[569970, 1560, 7, 11], +[570148, 1561, 1, 5], +[570236, 1561, 4, 3], +[570286, 1561, 5, 23], +[570412, 1561, 9, 26], +[570424, 1561, 10, 8], +[570609, 1562, 4, 11], +[570763, 1562, 9, 12], +[570809, 1562, 10, 28], +[570923, 1563, 2, 19], +[571052, 1563, 6, 28], +[571205, 1563, 11, 28], +[571252, 1564, 1, 14], +[571380, 1564, 5, 21], +[571464, 1564, 8, 13], +[571645, 1565, 2, 10], +[571775, 1565, 6, 20], +[571841, 1565, 8, 25], +[571848, 1565, 9, 1], +[571902, 1565, 10, 25], +[572014, 1566, 2, 14], +[572104, 1566, 5, 15], +[572228, 1566, 9, 16], +[572345, 1567, 1, 11], +[572358, 1567, 1, 24], +[572441, 1567, 4, 17], +[572545, 1567, 7, 30], +[572706, 1568, 1, 7], +[572769, 1568, 3, 10], +[572806, 1568, 4, 16], +[572883, 1568, 7, 2], +[572886, 1568, 7, 5], +[573086, 1569, 1, 21], +[573091, 1569, 1, 26], +[573242, 1569, 6, 26], +[573352, 1569, 10, 14], +[573508, 1570, 3, 19], +[573557, 1570, 5, 7], +[573677, 1570, 9, 4], +[573864, 1571, 3, 10], +[573887, 1571, 4, 2], +[573891, 1571, 4, 6], +[574013, 1571, 8, 6], +[574093, 1571, 10, 25], +[574264, 1572, 4, 13], +[574331, 1572, 6, 19], +[574482, 1572, 11, 17], +[574569, 1573, 2, 12], +[574750, 1573, 8, 12], +[574751, 1573, 8, 13], +[574807, 1573, 10, 8], +[574968, 1574, 3, 18], +[575146, 1574, 9, 12], +[575293, 1575, 2, 6], +[575400, 1575, 5, 24], +[575459, 1575, 7, 22], +[575647, 1576, 1, 26], +[575783, 1576, 6, 10], +[575916, 1576, 10, 21], +[575935, 1576, 11, 9], +[576012, 1577, 1, 25], +[576114, 1577, 5, 7], +[576242, 1577, 9, 12], +[576378, 1578, 1, 26], +[576408, 1578, 2, 25], +[576591, 1578, 8, 27], +[576599, 1578, 9, 4], +[576615, 1578, 9, 20], +[576748, 1579, 1, 31], +[576937, 1579, 8, 8], +[577112, 1580, 1, 30], +[577256, 1580, 6, 22], +[577356, 1580, 9, 30], +[577529, 1581, 3, 22], +[577661, 1581, 8, 1], +[577720, 1581, 9, 29], +[577892, 1582, 3, 20], +[578049, 1582, 8, 24], +[578096, 1582, 10, 10], +[578195, 1583, 1, 17], +[578223, 1583, 2, 14], +[578293, 1583, 4, 25], +[578357, 1583, 6, 28], +[578380, 1583, 7, 21], +[578570, 1584, 1, 27], +[578690, 1584, 5, 26], +[578830, 1584, 10, 13], +[578992, 1585, 3, 24], +[579190, 1585, 10, 8], +[579310, 1586, 2, 5], +[579376, 1586, 4, 12], +[579420, 1586, 5, 26], +[579496, 1586, 8, 10], +[579603, 1586, 11, 25], +[579724, 1587, 3, 26], +[579807, 1587, 6, 17], +[579813, 1587, 6, 23], +[580012, 1588, 1, 8], +[580209, 1588, 7, 23], +[580360, 1588, 12, 21], +[580452, 1589, 3, 23], +[580572, 1589, 7, 21], +[580650, 1589, 10, 7], +[580766, 1590, 1, 31], +[580957, 1590, 8, 10], +[581011, 1590, 10, 3], +[581114, 1591, 1, 14], +[581191, 1591, 4, 1], +[581308, 1591, 7, 27], +[581489, 1592, 1, 24], +[581681, 1592, 8, 3], +[581862, 1593, 1, 31], +[581982, 1593, 5, 31], +[582006, 1593, 6, 24], +[582184, 1593, 12, 19], +[582293, 1594, 4, 7], +[582421, 1594, 8, 13], +[582562, 1595, 1, 1], +[582679, 1595, 4, 28], +[582704, 1595, 5, 23], +[582896, 1595, 12, 1], +[583024, 1596, 4, 7], +[583106, 1596, 6, 28], +[583291, 1596, 12, 30], +[583453, 1597, 6, 10], +[583519, 1597, 8, 15], +[583649, 1597, 12, 23], +[583785, 1598, 5, 8], +[583914, 1598, 9, 14], +[584084, 1599, 3, 3], +[584222, 1599, 7, 19], +[584247, 1599, 8, 13], +[584446, 1600, 2, 28], +[584597, 1600, 7, 28], +[584612, 1600, 8, 12], +[584666, 1600, 10, 5], +[584774, 1601, 1, 21], +[584779, 1601, 1, 26], +[584861, 1601, 4, 18], +[584914, 1601, 6, 10], +[585101, 1601, 12, 14], +[585153, 1602, 2, 4], +[585302, 1602, 7, 3], +[585335, 1602, 8, 5], +[585505, 1603, 1, 22], +[585671, 1603, 7, 7], +[585736, 1603, 9, 10], +[585913, 1604, 3, 5], +[585937, 1604, 3, 29], +[585943, 1604, 4, 4], +[586108, 1604, 9, 16], +[586249, 1605, 2, 4], +[586443, 1605, 8, 17], +[586498, 1605, 10, 11], +[586601, 1606, 1, 22], +[586677, 1606, 4, 8], +[586733, 1606, 6, 3], +[586777, 1606, 7, 17], +[586817, 1606, 8, 26], +[586863, 1606, 10, 11], +[586925, 1606, 12, 12], +[586991, 1607, 2, 16], +[587150, 1607, 7, 25], +[587265, 1607, 11, 17], +[587346, 1608, 2, 6], +[587415, 1608, 4, 15], +[587450, 1608, 5, 20], +[587615, 1608, 11, 1], +[587645, 1608, 12, 1], +[587755, 1609, 3, 21], +[587869, 1609, 7, 13], +[588007, 1609, 11, 28], +[588053, 1610, 1, 13], +[588067, 1610, 1, 27], +[588092, 1610, 2, 21], +[588250, 1610, 7, 29], +[588294, 1610, 9, 11], +[588400, 1610, 12, 26], +[588540, 1611, 5, 15], +[588657, 1611, 9, 9], +[588735, 1611, 11, 26], +[588856, 1612, 3, 26], +[588976, 1612, 7, 24], +[589087, 1612, 11, 12], +[589240, 1613, 4, 14], +[589433, 1613, 10, 24], +[589609, 1614, 4, 18], +[589643, 1614, 5, 22], +[589796, 1614, 10, 22], +[589950, 1615, 3, 25], +[590068, 1615, 7, 21], +[590164, 1615, 10, 25], +[590346, 1616, 4, 24], +[590525, 1616, 10, 20], +[590680, 1617, 3, 24], +[590828, 1617, 8, 19], +[590844, 1617, 9, 4], +[590920, 1617, 11, 19], +[591114, 1618, 6, 1], +[591234, 1618, 9, 29], +[591339, 1619, 1, 12], +[591457, 1619, 5, 10], +[591502, 1619, 6, 24], +[591680, 1619, 12, 19], +[591708, 1620, 1, 16], +[591785, 1620, 4, 2], +[591838, 1620, 5, 25], +[591890, 1620, 7, 16], +[592015, 1620, 11, 18], +[592027, 1620, 11, 30], +[592183, 1621, 5, 5], +[592333, 1621, 10, 2], +[592387, 1621, 11, 25], +[592562, 1622, 5, 19], +[592685, 1622, 9, 19], +[592691, 1622, 9, 25], +[592717, 1622, 10, 21], +[592841, 1623, 2, 22], +[592887, 1623, 4, 9], +[592898, 1623, 4, 20], +[592908, 1623, 4, 30], +[592927, 1623, 5, 19], +[593104, 1623, 11, 12], +[593272, 1624, 4, 28], +[593411, 1624, 9, 14], +[593529, 1625, 1, 10], +[593692, 1625, 6, 22], +[593700, 1625, 6, 30], +[593855, 1625, 12, 2], +[594044, 1626, 6, 9], +[594227, 1626, 12, 9], +[594253, 1627, 1, 4], +[594267, 1627, 1, 18], +[594374, 1627, 5, 5], +[594483, 1627, 8, 22], +[594514, 1627, 9, 22], +[594677, 1628, 3, 3], +[594873, 1628, 9, 15], +[595012, 1629, 2, 1], +[595015, 1629, 2, 4], +[595054, 1629, 3, 15], +[595230, 1629, 9, 7], +[595343, 1629, 12, 29], +[595355, 1630, 1, 10], +[595405, 1630, 3, 1], +[595522, 1630, 6, 26], +[595525, 1630, 6, 29], +[595650, 1630, 11, 1], +[595677, 1630, 11, 28], +[595762, 1631, 2, 21], +[595879, 1631, 6, 18], +[596060, 1631, 12, 16], +[596140, 1632, 3, 5], +[596214, 1632, 5, 18], +[596301, 1632, 8, 13], +[596381, 1632, 11, 1], +[596550, 1633, 4, 19], +[596694, 1633, 9, 10], +[596759, 1633, 11, 14], +[596820, 1634, 1, 14], +[597007, 1634, 7, 20], +[597169, 1634, 12, 29], +[597190, 1635, 1, 19], +[597241, 1635, 3, 11], +[597313, 1635, 5, 22], +[597504, 1635, 11, 29], +[597536, 1635, 12, 31], +[597656, 1636, 4, 29], +[597836, 1636, 10, 26], +[597897, 1636, 12, 26], +[597995, 1637, 4, 3], +[598026, 1637, 5, 4], +[598171, 1637, 9, 26], +[598324, 1638, 2, 26], +[598453, 1638, 7, 5], +[598497, 1638, 8, 18], +[598504, 1638, 8, 25], +[598647, 1639, 1, 15], +[598689, 1639, 2, 26], +[598787, 1639, 6, 4], +[598956, 1639, 11, 20], +[599124, 1640, 5, 6], +[599163, 1640, 6, 14], +[599167, 1640, 6, 18], +[599266, 1640, 9, 25], +[599385, 1641, 1, 22], +[599555, 1641, 7, 11], +[599639, 1641, 10, 3], +[599734, 1642, 1, 6], +[599828, 1642, 4, 10], +[600002, 1642, 10, 1], +[600022, 1642, 10, 21], +[600026, 1642, 10, 25], +[600074, 1642, 12, 12], +[600169, 1643, 3, 17], +[600284, 1643, 7, 10], +[600359, 1643, 9, 23], +[600429, 1643, 12, 2], +[600569, 1644, 4, 20], +[600677, 1644, 8, 6], +[600722, 1644, 9, 20], +[600817, 1644, 12, 24], +[600922, 1645, 4, 8], +[600940, 1645, 4, 26], +[600966, 1645, 5, 22], +[601163, 1645, 12, 5], +[601297, 1646, 4, 18], +[601373, 1646, 7, 3], +[601467, 1646, 10, 5], +[601560, 1647, 1, 6], +[601586, 1647, 2, 1], +[601664, 1647, 4, 20], +[601731, 1647, 6, 26], +[601770, 1647, 8, 4], +[601867, 1647, 11, 9], +[602033, 1648, 4, 23], +[602174, 1648, 9, 11], +[602199, 1648, 10, 6], +[602289, 1649, 1, 4], +[602363, 1649, 3, 19], +[602511, 1649, 8, 14], +[602572, 1649, 10, 14], +[602702, 1650, 2, 21], +[602785, 1650, 5, 15], +[602832, 1650, 7, 1], +[602875, 1650, 8, 13], +[603070, 1651, 2, 24], +[603202, 1651, 7, 6], +[603328, 1651, 11, 9], +[603352, 1651, 12, 3], +[603506, 1652, 5, 5], +[603564, 1652, 7, 2], +[603603, 1652, 8, 10], +[603768, 1653, 1, 22], +[603816, 1653, 3, 11], +[603945, 1653, 7, 18], +[604106, 1653, 12, 26], +[604293, 1654, 7, 1], +[604311, 1654, 7, 19], +[604470, 1654, 12, 25], +[604646, 1655, 6, 19], +[604667, 1655, 7, 10], +[604669, 1655, 7, 12], +[604731, 1655, 9, 12], +[604855, 1656, 1, 14], +[604960, 1656, 4, 28], +[605020, 1656, 6, 27], +[605124, 1656, 10, 9], +[605211, 1657, 1, 4], +[605317, 1657, 4, 20], +[605407, 1657, 7, 19], +[605463, 1657, 9, 13], +[605517, 1657, 11, 6], +[605690, 1658, 4, 28], +[605701, 1658, 5, 9], +[605787, 1658, 8, 3], +[605905, 1658, 11, 29], +[605956, 1659, 1, 19], +[606113, 1659, 6, 25], +[606291, 1659, 12, 20], +[606466, 1660, 6, 12], +[606630, 1660, 11, 23], +[606672, 1661, 1, 4], +[606784, 1661, 4, 26], +[606870, 1661, 7, 21], +[606879, 1661, 7, 30], +[606960, 1661, 10, 19], +[607158, 1662, 5, 5], +[607353, 1662, 11, 16], +[607370, 1662, 12, 3], +[607499, 1663, 4, 11], +[607527, 1663, 5, 9], +[607549, 1663, 5, 31], +[607689, 1663, 10, 18], +[607752, 1663, 12, 20], +[607928, 1664, 6, 13], +[608127, 1664, 12, 29], +[608207, 1665, 3, 19], +[608311, 1665, 7, 1], +[608357, 1665, 8, 16], +[608530, 1666, 2, 5], +[608641, 1666, 5, 27], +[608709, 1666, 8, 3], +[608779, 1666, 10, 12], +[608880, 1667, 1, 21], +[609008, 1667, 5, 29], +[609182, 1667, 11, 19], +[609226, 1668, 1, 2], +[609285, 1668, 3, 1], +[609458, 1668, 8, 21], +[609619, 1669, 1, 29], +[609647, 1669, 2, 26], +[609718, 1669, 5, 8], +[609898, 1669, 11, 4], +[609921, 1669, 11, 27], +[610069, 1670, 4, 24], +[610108, 1670, 6, 2], +[610175, 1670, 8, 8], +[610335, 1671, 1, 15], +[610528, 1671, 7, 27], +[610655, 1671, 12, 1], +[610727, 1672, 2, 11], +[610734, 1672, 2, 18], +[610780, 1672, 4, 4], +[610966, 1672, 10, 7], +[611032, 1672, 12, 12], +[611168, 1673, 4, 27], +[611268, 1673, 8, 5], +[611404, 1673, 12, 19], +[611405, 1673, 12, 20], +[611438, 1674, 1, 22], +[611633, 1674, 8, 5], +[611787, 1675, 1, 6], +[611815, 1675, 2, 3], +[611907, 1675, 5, 6], +[611963, 1675, 7, 1], +[611994, 1675, 8, 1], +[612192, 1676, 2, 15], +[612374, 1676, 8, 15], +[612484, 1676, 12, 3], +[612664, 1677, 6, 1], +[612754, 1677, 8, 30], +[612839, 1677, 11, 23], +[612849, 1677, 12, 3], +[612949, 1678, 3, 13], +[613057, 1678, 6, 29], +[613187, 1678, 11, 6], +[613282, 1679, 2, 9], +[613301, 1679, 2, 28], +[613415, 1679, 6, 22], +[613471, 1679, 8, 17], +[613539, 1679, 10, 24], +[613716, 1680, 4, 18], +[613752, 1680, 5, 24], +[613787, 1680, 6, 28], +[613967, 1680, 12, 25], +[613999, 1681, 1, 26], +[614135, 1681, 6, 11], +[614285, 1681, 11, 8], +[614290, 1681, 11, 13], +[614443, 1682, 4, 15], +[614529, 1682, 7, 10], +[614650, 1682, 11, 8], +[614838, 1683, 5, 15], +[614879, 1683, 6, 25], +[614946, 1683, 8, 31], +[615036, 1683, 11, 29], +[615091, 1684, 1, 23], +[615246, 1684, 6, 26], +[615286, 1684, 8, 5], +[615345, 1684, 10, 3], +[615443, 1685, 1, 9], +[615540, 1685, 4, 16], +[615694, 1685, 9, 17], +[615849, 1686, 2, 19], +[615987, 1686, 7, 7], +[616138, 1686, 12, 5], +[616255, 1687, 4, 1], +[616434, 1687, 9, 27], +[616624, 1688, 4, 4], +[616650, 1688, 4, 30], +[616703, 1688, 6, 22], +[616752, 1688, 8, 10], +[616759, 1688, 8, 17], +[616844, 1688, 11, 10], +[616929, 1689, 2, 3], +[617067, 1689, 6, 21], +[617099, 1689, 7, 23], +[617182, 1689, 10, 14], +[617240, 1689, 12, 11], +[617368, 1690, 4, 18], +[617444, 1690, 7, 3], +[617469, 1690, 7, 28], +[617596, 1690, 12, 2], +[617726, 1691, 4, 11], +[617918, 1691, 10, 20], +[617974, 1691, 12, 15], +[617998, 1692, 1, 8], +[618182, 1692, 7, 10], +[618279, 1692, 10, 15], +[618337, 1692, 12, 12], +[618437, 1693, 3, 22], +[618595, 1693, 8, 27], +[618789, 1694, 3, 9], +[618955, 1694, 8, 22], +[619135, 1695, 2, 18], +[619274, 1695, 7, 7], +[619419, 1695, 11, 29], +[619608, 1696, 6, 5], +[619670, 1696, 8, 6], +[619740, 1696, 10, 15], +[619820, 1697, 1, 3], +[619917, 1697, 4, 10], +[619936, 1697, 4, 29], +[619942, 1697, 5, 5], +[619987, 1697, 6, 19], +[620073, 1697, 9, 13], +[620218, 1698, 2, 5], +[620316, 1698, 5, 14], +[620378, 1698, 7, 15], +[620493, 1698, 11, 7], +[620602, 1699, 2, 24], +[620630, 1699, 3, 24], +[620753, 1699, 7, 25], +[620898, 1699, 12, 17], +[620995, 1700, 3, 24], +[621080, 1700, 6, 17], +[621232, 1700, 11, 16], +[621312, 1701, 2, 4], +[621381, 1701, 4, 14], +[621481, 1701, 7, 23], +[621513, 1701, 8, 24], +[621614, 1701, 12, 3], +[621666, 1702, 1, 24], +[621732, 1702, 3, 31], +[621854, 1702, 7, 31], +[622030, 1703, 1, 23], +[622055, 1703, 2, 17], +[622124, 1703, 4, 27], +[622290, 1703, 10, 10], +[622426, 1704, 2, 23], +[622451, 1704, 3, 19], +[622522, 1704, 5, 29], +[622714, 1704, 12, 7], +[622901, 1705, 6, 12], +[622938, 1705, 7, 19], +[622956, 1705, 8, 6], +[623054, 1705, 11, 12], +[623227, 1706, 5, 4], +[623239, 1706, 5, 16], +[623241, 1706, 5, 18], +[623346, 1706, 8, 31], +[623496, 1707, 1, 28], +[623497, 1707, 1, 29], +[623690, 1707, 8, 10], +[623871, 1708, 2, 7], +[623970, 1708, 5, 16], +[624064, 1708, 8, 18], +[624116, 1708, 10, 9], +[624136, 1708, 10, 29], +[624252, 1709, 2, 22], +[624436, 1709, 8, 25], +[624608, 1710, 2, 13], +[624699, 1710, 5, 15], +[624741, 1710, 6, 26], +[624865, 1710, 10, 28], +[624952, 1711, 1, 23], +[625054, 1711, 5, 5], +[625141, 1711, 7, 31], +[625333, 1712, 2, 8], +[625378, 1712, 3, 24], +[625506, 1712, 7, 30], +[625514, 1712, 8, 7], +[625666, 1713, 1, 6], +[625745, 1713, 3, 26], +[625872, 1713, 7, 31], +[625991, 1713, 11, 27], +[626099, 1714, 3, 15], +[626108, 1714, 3, 24], +[626268, 1714, 8, 31], +[626283, 1714, 9, 15], +[626385, 1714, 12, 26], +[626525, 1715, 5, 15], +[626647, 1715, 9, 14], +[626779, 1716, 1, 24], +[626849, 1716, 4, 3], +[626897, 1716, 5, 21], +[626952, 1716, 7, 15], +[627065, 1716, 11, 5], +[627156, 1717, 2, 4], +[627308, 1717, 7, 6], +[627405, 1717, 10, 11], +[627474, 1717, 12, 19], +[627548, 1718, 3, 3], +[627745, 1718, 9, 16], +[627771, 1718, 10, 12], +[627948, 1719, 4, 7], +[628099, 1719, 9, 5], +[628168, 1719, 11, 13], +[628254, 1720, 2, 7], +[628382, 1720, 6, 14], +[628445, 1720, 8, 16], +[628560, 1720, 12, 9], +[628645, 1721, 3, 4], +[628768, 1721, 7, 5], +[628868, 1721, 10, 13], +[628913, 1721, 11, 27], +[628993, 1722, 2, 15], +[629006, 1722, 2, 28], +[629084, 1722, 5, 17], +[629164, 1722, 8, 5], +[629229, 1722, 10, 9], +[629393, 1723, 3, 22], +[629421, 1723, 4, 19], +[629592, 1723, 10, 7], +[629688, 1724, 1, 11], +[629774, 1724, 4, 6], +[629926, 1724, 9, 5], +[630094, 1725, 2, 20], +[630266, 1725, 8, 11], +[630461, 1726, 2, 22], +[630569, 1726, 6, 10], +[630761, 1726, 12, 19], +[630918, 1727, 5, 25], +[631035, 1727, 9, 19], +[631217, 1728, 3, 19], +[631283, 1728, 5, 24], +[631442, 1728, 10, 30], +[631501, 1728, 12, 28], +[631549, 1729, 2, 14], +[631738, 1729, 8, 22], +[631833, 1729, 11, 25], +[632004, 1730, 5, 15], +[632117, 1730, 9, 5], +[632244, 1731, 1, 10], +[632443, 1731, 7, 28], +[632570, 1731, 12, 2], +[632622, 1732, 1, 23], +[632679, 1732, 3, 20], +[632786, 1732, 7, 5], +[632969, 1733, 1, 4], +[633139, 1733, 6, 23], +[633178, 1733, 8, 1], +[633350, 1734, 1, 20], +[633424, 1734, 4, 4], +[633437, 1734, 4, 17], +[633615, 1734, 10, 12], +[633737, 1735, 2, 11], +[633913, 1735, 8, 6], +[634107, 1736, 2, 16], +[634226, 1736, 6, 14], +[634348, 1736, 10, 14], +[634409, 1736, 12, 14], +[634440, 1737, 1, 14], +[634516, 1737, 3, 31], +[634621, 1737, 7, 14], +[634722, 1737, 10, 23], +[634837, 1738, 2, 15], +[635001, 1738, 7, 29], +[635082, 1738, 10, 18], +[635084, 1738, 10, 20], +[635229, 1739, 3, 14], +[635307, 1739, 5, 31], +[635391, 1739, 8, 23], +[635419, 1739, 9, 20], +[635512, 1739, 12, 22], +[635590, 1740, 3, 9], +[635628, 1740, 4, 16], +[635808, 1740, 10, 13], +[635880, 1740, 12, 24], +[636040, 1741, 6, 2], +[636066, 1741, 6, 28], +[636178, 1741, 10, 18], +[636302, 1742, 2, 19], +[636451, 1742, 7, 18], +[636500, 1742, 9, 5], +[636570, 1742, 11, 14], +[636727, 1743, 4, 20], +[636767, 1743, 5, 30], +[636870, 1743, 9, 10], +[637061, 1744, 3, 19], +[637226, 1744, 8, 31], +[637258, 1744, 10, 2], +[637407, 1745, 2, 28], +[637570, 1745, 8, 10], +[637725, 1746, 1, 12], +[637876, 1746, 6, 12], +[637928, 1746, 8, 3], +[637977, 1746, 9, 21], +[638143, 1747, 3, 6], +[638297, 1747, 8, 7], +[638486, 1748, 2, 12], +[638614, 1748, 6, 19], +[638700, 1748, 9, 13], +[638787, 1748, 12, 9], +[638926, 1749, 4, 27], +[639112, 1749, 10, 30], +[639272, 1750, 4, 8], +[639433, 1750, 9, 16], +[639620, 1751, 3, 22], +[639645, 1751, 4, 16], +[639665, 1751, 5, 6], +[639724, 1751, 7, 4], +[639881, 1751, 12, 8], +[640032, 1752, 5, 7], +[640184, 1752, 10, 6], +[640310, 1753, 2, 9], +[640430, 1753, 6, 9], +[640507, 1753, 8, 25], +[640602, 1753, 11, 28], +[640775, 1754, 5, 20], +[640898, 1754, 9, 20], +[641079, 1755, 3, 20], +[641150, 1755, 5, 30], +[641202, 1755, 7, 21], +[641236, 1755, 8, 24], +[641318, 1755, 11, 14], +[641435, 1756, 3, 10], +[641587, 1756, 8, 9], +[641745, 1757, 1, 14], +[641747, 1757, 1, 16], +[641887, 1757, 6, 5], +[642035, 1757, 10, 31], +[642049, 1757, 11, 14], +[642067, 1757, 12, 2], +[642241, 1758, 5, 25], +[642427, 1758, 11, 27], +[642505, 1759, 2, 13], +[642604, 1759, 5, 23], +[642666, 1759, 7, 24], +[642717, 1759, 9, 13], +[642810, 1759, 12, 15], +[642821, 1759, 12, 26], +[642877, 1760, 2, 20], +[643039, 1760, 7, 31], +[643229, 1761, 2, 6], +[643397, 1761, 7, 24], +[643429, 1761, 8, 25], +[643476, 1761, 10, 11], +[643486, 1761, 10, 21], +[643565, 1762, 1, 8], +[643650, 1762, 4, 3], +[643750, 1762, 7, 12], +[643889, 1762, 11, 28], +[644080, 1763, 6, 7], +[644093, 1763, 6, 20], +[644223, 1763, 10, 28], +[644322, 1764, 2, 4], +[644388, 1764, 4, 10], +[644572, 1764, 10, 11], +[644626, 1764, 12, 4], +[644766, 1765, 4, 23], +[644773, 1765, 4, 30], +[644928, 1765, 10, 2], +[645069, 1766, 2, 20], +[645247, 1766, 8, 17], +[645376, 1766, 12, 24], +[645399, 1767, 1, 16], +[645508, 1767, 5, 5], +[645596, 1767, 8, 1], +[645778, 1768, 1, 30], +[645876, 1768, 5, 7], +[645988, 1768, 8, 27], +[646175, 1769, 3, 2], +[646255, 1769, 5, 21], +[646313, 1769, 7, 18], +[646445, 1769, 11, 27], +[646514, 1770, 2, 4], +[646558, 1770, 3, 20], +[646715, 1770, 8, 24], +[646771, 1770, 10, 19], +[646925, 1771, 3, 22], +[646940, 1771, 4, 6], +[647015, 1771, 6, 20], +[647043, 1771, 7, 18], +[647225, 1772, 1, 16], +[647425, 1772, 8, 3], +[647508, 1772, 10, 25], +[647628, 1773, 2, 22], +[647712, 1773, 5, 17], +[647911, 1773, 12, 2], +[648003, 1774, 3, 4], +[648140, 1774, 7, 19], +[648217, 1774, 10, 4], +[648293, 1774, 12, 19], +[648381, 1775, 3, 17], +[648398, 1775, 4, 3], +[648417, 1775, 4, 22], +[648480, 1775, 6, 24], +[648677, 1776, 1, 7], +[648688, 1776, 1, 18], +[648819, 1776, 5, 28], +[648901, 1776, 8, 18], +[649002, 1776, 11, 27], +[649075, 1777, 2, 8], +[649133, 1777, 4, 7], +[649165, 1777, 5, 9], +[649175, 1777, 5, 19], +[649209, 1777, 6, 22], +[649292, 1777, 9, 13], +[649409, 1778, 1, 8], +[649513, 1778, 4, 22], +[649692, 1778, 10, 18], +[649836, 1779, 3, 11], +[649974, 1779, 7, 27], +[650166, 1780, 2, 4], +[650334, 1780, 7, 21], +[650478, 1780, 12, 12], +[650521, 1781, 1, 24], +[650569, 1781, 3, 13], +[650657, 1781, 6, 9], +[650679, 1781, 7, 1], +[650837, 1781, 12, 6], +[650900, 1782, 2, 7], +[650911, 1782, 2, 18], +[651087, 1782, 8, 13], +[651232, 1783, 1, 5], +[651288, 1783, 3, 2], +[651421, 1783, 7, 13], +[651621, 1784, 1, 29], +[651649, 1784, 2, 26], +[651776, 1784, 7, 2], +[651935, 1784, 12, 8], +[651952, 1784, 12, 25], +[652132, 1785, 6, 23], +[652228, 1785, 9, 27], +[652301, 1785, 12, 9], +[652398, 1786, 3, 16], +[652449, 1786, 5, 6], +[652545, 1786, 8, 10], +[652616, 1786, 10, 20], +[652696, 1787, 1, 8], +[652745, 1787, 2, 26], +[652913, 1787, 8, 13], +[652949, 1787, 9, 18], +[652997, 1787, 11, 5], +[653051, 1787, 12, 29], +[653249, 1788, 7, 14], +[653275, 1788, 8, 9], +[653368, 1788, 11, 10], +[653444, 1789, 1, 25], +[653606, 1789, 7, 6], +[653803, 1790, 1, 19], +[653874, 1790, 3, 31], +[653926, 1790, 5, 22], +[653979, 1790, 7, 14], +[654093, 1790, 11, 5], +[654134, 1790, 12, 16], +[654232, 1791, 3, 24], +[654280, 1791, 5, 11], +[654355, 1791, 7, 25], +[654455, 1791, 11, 2], +[654485, 1791, 12, 2], +[654662, 1792, 5, 27], +[654723, 1792, 7, 27], +[654818, 1792, 10, 30], +[654928, 1793, 2, 17], +[654995, 1793, 4, 25], +[655042, 1793, 6, 11], +[655103, 1793, 8, 11], +[655264, 1794, 1, 19], +[655286, 1794, 2, 10], +[655359, 1794, 4, 24], +[655426, 1794, 6, 30], +[655519, 1794, 10, 1], +[655679, 1795, 3, 10], +[655755, 1795, 5, 25], +[655943, 1795, 11, 29], +[655945, 1795, 12, 1], +[656034, 1796, 2, 28], +[656148, 1796, 6, 21], +[656218, 1796, 8, 30], +[656221, 1796, 9, 2], +[656352, 1797, 1, 11], +[656540, 1797, 7, 18], +[656634, 1797, 10, 20], +[656804, 1798, 4, 8], +[656843, 1798, 5, 17], +[656875, 1798, 6, 18], +[656952, 1798, 9, 3], +[657057, 1798, 12, 17], +[657156, 1799, 3, 26], +[657314, 1799, 8, 31], +[657315, 1799, 9, 1], +[657476, 1800, 2, 9], +[657505, 1800, 3, 10], +[657546, 1800, 4, 20], +[657703, 1800, 9, 24], +[657712, 1800, 10, 3], +[657771, 1800, 12, 1], +[657926, 1801, 5, 5], +[657966, 1801, 6, 14], +[658080, 1801, 10, 6], +[658259, 1802, 4, 3], +[658439, 1802, 9, 30], +[658610, 1803, 3, 20], +[658628, 1803, 4, 7], +[658794, 1803, 9, 20], +[658806, 1803, 10, 2], +[658969, 1804, 3, 13], +[659032, 1804, 5, 15], +[659219, 1804, 11, 18], +[659262, 1804, 12, 31], +[659457, 1805, 7, 14], +[659495, 1805, 8, 21], +[659659, 1806, 2, 1], +[659669, 1806, 2, 11], +[659765, 1806, 5, 18], +[659932, 1806, 11, 1], +[660098, 1807, 4, 16], +[660154, 1807, 6, 11], +[660262, 1807, 9, 27], +[660439, 1808, 3, 22], +[660543, 1808, 7, 4], +[660548, 1808, 7, 9], +[660681, 1808, 11, 19], +[660746, 1809, 1, 23], +[660793, 1809, 3, 11], +[660825, 1809, 4, 12], +[661007, 1809, 10, 11], +[661177, 1810, 3, 30], +[661199, 1810, 4, 21], +[661259, 1810, 6, 20], +[661452, 1810, 12, 30], +[661644, 1811, 7, 10], +[661844, 1812, 1, 26], +[661934, 1812, 4, 25], +[662055, 1812, 8, 24], +[662206, 1813, 1, 22], +[662363, 1813, 6, 28], +[662447, 1813, 9, 20], +[662636, 1814, 3, 28], +[662666, 1814, 4, 27], +[662802, 1814, 9, 10], +[662974, 1815, 3, 1], +[663146, 1815, 8, 20], +[663275, 1815, 12, 27], +[663328, 1816, 2, 18], +[663451, 1816, 6, 20], +[663547, 1816, 9, 24], +[663576, 1816, 10, 23], +[663604, 1816, 11, 20], +[663794, 1817, 5, 29], +[663846, 1817, 7, 20], +[663897, 1817, 9, 9], +[663899, 1817, 9, 11], +[664048, 1818, 2, 7], +[664145, 1818, 5, 15], +[664206, 1818, 7, 15], +[664358, 1818, 12, 14], +[664550, 1819, 6, 24], +[664636, 1819, 9, 18], +[664782, 1820, 2, 11], +[664919, 1820, 6, 27], +[664968, 1820, 8, 15], +[665126, 1821, 1, 20], +[665298, 1821, 7, 11], +[665415, 1821, 11, 5], +[665428, 1821, 11, 18], +[665617, 1822, 5, 26], +[665634, 1822, 6, 12], +[665683, 1822, 7, 31], +[665729, 1822, 9, 15], +[665796, 1822, 11, 21], +[665972, 1823, 5, 16], +[666069, 1823, 8, 21], +[666114, 1823, 10, 5], +[666177, 1823, 12, 7], +[666337, 1824, 5, 15], +[666524, 1824, 11, 18], +[666697, 1825, 5, 10], +[666782, 1825, 8, 3], +[666873, 1825, 11, 2], +[666957, 1826, 1, 25], +[667032, 1826, 4, 10], +[667178, 1826, 9, 3], +[667193, 1826, 9, 18], +[667386, 1827, 3, 30], +[667546, 1827, 9, 6], +[667678, 1828, 1, 16], +[667722, 1828, 2, 29], +[667809, 1828, 5, 26], +[667941, 1828, 10, 5], +[667983, 1828, 11, 16], +[668108, 1829, 3, 21], +[668198, 1829, 6, 19], +[668247, 1829, 8, 7], +[668425, 1830, 2, 1], +[668622, 1830, 8, 17], +[668746, 1830, 12, 19], +[668927, 1831, 6, 18], +[669116, 1831, 12, 24], +[669234, 1832, 4, 20], +[669268, 1832, 5, 24], +[669405, 1832, 10, 8], +[669499, 1833, 1, 10], +[669681, 1833, 7, 11], +[669780, 1833, 10, 18], +[669933, 1834, 3, 20], +[670093, 1834, 8, 27], +[670111, 1834, 9, 14], +[670225, 1835, 1, 6], +[670263, 1835, 2, 13], +[670264, 1835, 2, 14], +[670436, 1835, 8, 5], +[670550, 1835, 11, 27], +[670603, 1836, 1, 19], +[670737, 1836, 6, 1], +[670837, 1836, 9, 9], +[671025, 1837, 3, 16], +[671127, 1837, 6, 26], +[671275, 1837, 11, 21], +[671412, 1838, 4, 7], +[671416, 1838, 4, 11], +[671544, 1838, 8, 17], +[671702, 1839, 1, 22], +[671814, 1839, 5, 14], +[671966, 1839, 10, 13], +[672158, 1840, 4, 22], +[672358, 1840, 11, 8], +[672437, 1841, 1, 26], +[672478, 1841, 3, 8], +[672561, 1841, 5, 30], +[672653, 1841, 8, 30], +[672811, 1842, 2, 4], +[672977, 1842, 7, 20], +[673077, 1842, 10, 28], +[673144, 1843, 1, 3], +[673193, 1843, 2, 21], +[673328, 1843, 7, 6], +[673348, 1843, 7, 26], +[673395, 1843, 9, 11], +[673548, 1844, 2, 11], +[673665, 1844, 6, 7], +[673863, 1844, 12, 22], +[674062, 1845, 7, 9], +[674250, 1846, 1, 13], +[674315, 1846, 3, 19], +[674368, 1846, 5, 11], +[674453, 1846, 8, 4], +[674549, 1846, 11, 8], +[674685, 1847, 3, 24], +[674697, 1847, 4, 5], +[674784, 1847, 7, 1], +[674887, 1847, 10, 12], +[675083, 1848, 4, 25], +[675267, 1848, 10, 26], +[675274, 1848, 11, 2], +[675297, 1848, 11, 25], +[675333, 1848, 12, 31], +[675378, 1849, 2, 14], +[675550, 1849, 8, 5], +[675694, 1849, 12, 27], +[675775, 1850, 3, 18], +[675961, 1850, 9, 20], +[676069, 1851, 1, 6], +[676212, 1851, 5, 29], +[676295, 1851, 8, 20], +[676451, 1852, 1, 23], +[676644, 1852, 8, 3], +[676698, 1852, 9, 26], +[676795, 1853, 1, 1], +[676971, 1853, 6, 26], +[677014, 1853, 8, 8], +[677080, 1853, 10, 13], +[677111, 1853, 11, 13], +[677169, 1854, 1, 10], +[677195, 1854, 2, 5], +[677338, 1854, 6, 28], +[677443, 1854, 10, 11], +[677640, 1855, 4, 26], +[677715, 1855, 7, 10], +[677816, 1855, 10, 19], +[677823, 1855, 10, 26], +[677934, 1856, 2, 14], +[678019, 1856, 5, 9], +[678154, 1856, 9, 21], +[678303, 1857, 2, 17], +[678345, 1857, 3, 31], +[678410, 1857, 6, 4], +[678459, 1857, 7, 23], +[678608, 1857, 12, 19], +[678793, 1858, 6, 22], +[678983, 1858, 12, 29], +[679074, 1859, 3, 30], +[679163, 1859, 6, 27], +[679215, 1859, 8, 18], +[679251, 1859, 9, 23], +[679280, 1859, 10, 22], +[679426, 1860, 3, 16], +[679441, 1860, 3, 31], +[679610, 1860, 9, 16], +[679646, 1860, 10, 22], +[679763, 1861, 2, 16], +[679873, 1861, 6, 6], +[680067, 1861, 12, 17], +[680142, 1862, 3, 2], +[680249, 1862, 6, 17], +[680420, 1862, 12, 5], +[680517, 1863, 3, 12], +[680541, 1863, 4, 5], +[680721, 1863, 10, 2], +[680809, 1863, 12, 29], +[680888, 1864, 3, 17], +[680943, 1864, 5, 11], +[680975, 1864, 6, 12], +[681040, 1864, 8, 16], +[681086, 1864, 10, 1], +[681186, 1865, 1, 9], +[681198, 1865, 1, 21], +[681390, 1865, 8, 1], +[681581, 1866, 2, 8], +[681653, 1866, 4, 21], +[681820, 1866, 10, 5], +[681926, 1867, 1, 19], +[682064, 1867, 6, 6], +[682149, 1867, 8, 30], +[682222, 1867, 11, 11], +[682307, 1868, 2, 4], +[682318, 1868, 2, 15], +[682330, 1868, 2, 27], +[682385, 1868, 4, 22], +[682478, 1868, 7, 24], +[682591, 1868, 11, 14], +[682787, 1869, 5, 29], +[682866, 1869, 8, 16], +[683056, 1870, 2, 22], +[683183, 1870, 6, 29], +[683338, 1870, 12, 1], +[683428, 1871, 3, 1], +[683617, 1871, 9, 6], +[683808, 1872, 3, 15], +[683918, 1872, 7, 3], +[684116, 1873, 1, 17], +[684241, 1873, 5, 22], +[684394, 1873, 10, 22], +[684529, 1874, 3, 6], +[684674, 1874, 7, 29], +[684865, 1875, 2, 5], +[684994, 1875, 6, 14], +[685034, 1875, 7, 24], +[685101, 1875, 9, 29], +[685111, 1875, 10, 9], +[685153, 1875, 11, 20], +[685199, 1876, 1, 5], +[685271, 1876, 3, 17], +[685367, 1876, 6, 21], +[685491, 1876, 10, 23], +[685561, 1877, 1, 1], +[685753, 1877, 7, 12], +[685936, 1878, 1, 11], +[686107, 1878, 7, 1], +[686220, 1878, 10, 22], +[686343, 1879, 2, 22], +[686491, 1879, 7, 20], +[686621, 1879, 11, 27], +[686633, 1879, 12, 9], +[686795, 1880, 5, 19], +[686928, 1880, 9, 29], +[687071, 1881, 2, 19], +[687180, 1881, 6, 8], +[687181, 1881, 6, 9], +[687221, 1881, 7, 19], +[687227, 1881, 7, 25], +[687312, 1881, 10, 18], +[687478, 1882, 4, 2], +[687677, 1882, 10, 18], +[687772, 1883, 1, 21], +[687879, 1883, 5, 8], +[688020, 1883, 9, 26], +[688028, 1883, 10, 4], +[688033, 1883, 10, 9], +[688119, 1884, 1, 3], +[688269, 1884, 6, 1], +[688357, 1884, 8, 28], +[688439, 1884, 11, 18], +[688578, 1885, 4, 6], +[688660, 1885, 6, 27], +[688752, 1885, 9, 27], +[688769, 1885, 10, 14], +[688968, 1886, 5, 1], +[689166, 1886, 11, 15], +[689211, 1886, 12, 30], +[689337, 1887, 5, 5], +[689352, 1887, 5, 20], +[689490, 1887, 10, 5], +[689554, 1887, 12, 8], +[689574, 1887, 12, 28], +[689760, 1888, 7, 1], +[689917, 1888, 12, 5], +[690038, 1889, 4, 5], +[690202, 1889, 9, 16], +[690205, 1889, 9, 19], +[690222, 1889, 10, 6], +[690299, 1889, 12, 22], +[690472, 1890, 6, 13], +[690508, 1890, 7, 19], +[690604, 1890, 10, 23], +[690701, 1891, 1, 28], +[690860, 1891, 7, 6], +[691054, 1892, 1, 16], +[691154, 1892, 4, 25], +[691288, 1892, 9, 6], +[691348, 1892, 11, 5], +[691440, 1893, 2, 5], +[691538, 1893, 5, 14], +[691560, 1893, 6, 5], +[691660, 1893, 9, 13], +[691694, 1893, 10, 17], +[691890, 1894, 5, 1], +[692028, 1894, 9, 16], +[692073, 1894, 10, 31], +[692268, 1895, 5, 14], +[692341, 1895, 7, 26], +[692448, 1895, 11, 10], +[692450, 1895, 11, 12], +[692580, 1896, 3, 21], +[692662, 1896, 6, 11], +[692680, 1896, 6, 29], +[692793, 1896, 10, 20], +[692917, 1897, 2, 21], +[692948, 1897, 3, 24], +[692995, 1897, 5, 10], +[693024, 1897, 6, 8], +[693214, 1897, 12, 15], +[693279, 1898, 2, 18], +[693388, 1898, 6, 7], +[693432, 1898, 7, 21], +[693449, 1898, 8, 7], +[693613, 1899, 1, 18], +[693686, 1899, 4, 1], +[693767, 1899, 6, 21], +[693890, 1899, 10, 22], +[693924, 1899, 11, 25], +[694083, 1900, 5, 3], +[694139, 1900, 6, 28], +[694253, 1900, 10, 20], +[694369, 1901, 2, 13], +[694561, 1901, 8, 24], +[694728, 1902, 2, 7], +[694855, 1902, 6, 14], +[694942, 1902, 9, 9], +[695080, 1903, 1, 25], +[695180, 1903, 5, 5], +[695269, 1903, 8, 2], +[695369, 1903, 11, 10], +[695560, 1904, 5, 19], +[695570, 1904, 5, 29], +[695637, 1904, 8, 4], +[695690, 1904, 9, 26], +[695854, 1905, 3, 9], +[695888, 1905, 4, 12], +[695985, 1905, 7, 18], +[696007, 1905, 8, 9], +[696016, 1905, 8, 18], +[696124, 1905, 12, 4], +[696159, 1906, 1, 8], +[696203, 1906, 2, 21], +[696214, 1906, 3, 4], +[696323, 1906, 6, 21], +[696332, 1906, 6, 30], +[696405, 1906, 9, 11], +[696585, 1907, 3, 10], +[696667, 1907, 5, 31], +[696723, 1907, 7, 26], +[696804, 1907, 10, 15], +[696919, 1908, 2, 7], +[697010, 1908, 5, 8], +[697048, 1908, 6, 15], +[697107, 1908, 8, 13], +[697290, 1909, 2, 12], +[697471, 1909, 8, 12], +[697524, 1909, 10, 4], +[697571, 1909, 11, 20], +[697723, 1910, 4, 21], +[697892, 1910, 10, 7], +[697917, 1910, 11, 1], +[698001, 1911, 1, 24], +[698093, 1911, 4, 26], +[698258, 1911, 10, 8], +[698268, 1911, 10, 18], +[698269, 1911, 10, 19], +[698457, 1912, 4, 24], +[698628, 1912, 10, 12], +[698684, 1912, 12, 7], +[698860, 1913, 6, 1], +[698919, 1913, 7, 30], +[698990, 1913, 10, 9], +[699065, 1913, 12, 23], +[699161, 1914, 3, 29], +[699213, 1914, 5, 20], +[699368, 1914, 10, 22], +[699523, 1915, 3, 26], +[699705, 1915, 9, 24], +[699797, 1915, 12, 25], +[699881, 1916, 3, 18], +[699891, 1916, 3, 28], +[700003, 1916, 7, 18], +[700158, 1916, 12, 20], +[700185, 1917, 1, 16], +[700338, 1917, 6, 18], +[700426, 1917, 9, 14], +[700499, 1917, 11, 26], +[700505, 1917, 12, 2], +[700663, 1918, 5, 9], +[700729, 1918, 7, 14], +[700785, 1918, 9, 8], +[700807, 1918, 9, 30], +[700825, 1918, 10, 18], +[700872, 1918, 12, 4], +[701045, 1919, 5, 26], +[701200, 1919, 10, 28], +[701237, 1919, 12, 4], +[701411, 1920, 5, 26], +[701418, 1920, 6, 2], +[701459, 1920, 7, 13], +[701592, 1920, 11, 23], +[701620, 1920, 12, 21], +[701691, 1921, 3, 2], +[701807, 1921, 6, 26], +[701885, 1921, 9, 12], +[701960, 1921, 11, 26], +[702029, 1922, 2, 3], +[702155, 1922, 6, 9], +[702244, 1922, 9, 6], +[702291, 1922, 10, 23], +[702481, 1923, 5, 1], +[702621, 1923, 9, 18], +[702821, 1924, 4, 5], +[702911, 1924, 7, 4], +[703008, 1924, 10, 9], +[703133, 1925, 2, 11], +[703251, 1925, 6, 9], +[703291, 1925, 7, 19], +[703418, 1925, 11, 23], +[703445, 1925, 12, 20], +[703501, 1926, 2, 14], +[703569, 1926, 4, 23], +[703644, 1926, 7, 7], +[703664, 1926, 7, 27], +[703697, 1926, 8, 29], +[703754, 1926, 10, 25], +[703848, 1927, 1, 27], +[703965, 1927, 5, 24], +[704071, 1927, 9, 7], +[704219, 1928, 2, 2], +[704235, 1928, 2, 18], +[704377, 1928, 7, 9], +[704546, 1928, 12, 25], +[704581, 1929, 1, 29], +[704620, 1929, 3, 9], +[704804, 1929, 9, 9], +[704833, 1929, 10, 8], +[705011, 1930, 4, 4], +[705059, 1930, 5, 22], +[705062, 1930, 5, 25], +[705188, 1930, 9, 28], +[705332, 1931, 2, 19], +[705513, 1931, 8, 19], +[705572, 1931, 10, 17], +[705715, 1932, 3, 8], +[705836, 1932, 7, 7], +[705959, 1932, 11, 7], +[706103, 1933, 3, 31], +[706246, 1933, 8, 21], +[706342, 1933, 11, 25], +[706347, 1933, 11, 30], +[706439, 1934, 3, 2], +[706531, 1934, 6, 2], +[706669, 1934, 10, 18], +[706763, 1935, 1, 20], +[706765, 1935, 1, 22], +[706872, 1935, 5, 9], +[707027, 1935, 10, 11], +[707076, 1935, 11, 29], +[707208, 1936, 4, 9], +[707244, 1936, 5, 15], +[707355, 1936, 9, 3], +[707493, 1937, 1, 19], +[707524, 1937, 2, 19], +[707644, 1937, 6, 19], +[707771, 1937, 10, 24], +[707916, 1938, 3, 18], +[708059, 1938, 8, 8], +[708085, 1938, 9, 3], +[708229, 1939, 1, 25], +[708232, 1939, 1, 28], +[708288, 1939, 3, 25], +[708469, 1939, 9, 22], +[708643, 1940, 3, 14], +[708793, 1940, 8, 11], +[708954, 1941, 1, 19], +[709052, 1941, 4, 27], +[709080, 1941, 5, 25], +[709154, 1941, 8, 7], +[709309, 1942, 1, 9], +[709419, 1942, 4, 29], +[709426, 1942, 5, 6], +[709435, 1942, 5, 15], +[709507, 1942, 7, 26], +[709535, 1942, 8, 23], +[709563, 1942, 9, 20], +[709662, 1942, 12, 28], +[709835, 1943, 6, 19], +[709987, 1943, 11, 18], +[710131, 1944, 4, 10], +[710164, 1944, 5, 13], +[710205, 1944, 6, 23], +[710214, 1944, 7, 2], +[710279, 1944, 9, 5], +[710319, 1944, 10, 15], +[710400, 1945, 1, 4], +[710521, 1945, 5, 5], +[710529, 1945, 5, 13], +[710691, 1945, 10, 22], +[710743, 1945, 12, 13], +[710832, 1946, 3, 12], +[710966, 1946, 7, 24], +[711012, 1946, 9, 8], +[711018, 1946, 9, 14], +[711204, 1947, 3, 19], +[711285, 1947, 6, 8], +[711452, 1947, 11, 22], +[711531, 1948, 2, 9], +[711710, 1948, 8, 6], +[711907, 1949, 2, 19], +[711981, 1949, 5, 4], +[712109, 1949, 9, 9], +[712117, 1949, 9, 17], +[712240, 1950, 1, 18], +[712371, 1950, 5, 29], +[712436, 1950, 8, 2], +[712474, 1950, 9, 9], +[712656, 1951, 3, 10], +[712754, 1951, 6, 16], +[712845, 1951, 9, 15], +[712946, 1951, 12, 25], +[713102, 1952, 5, 29], +[713299, 1952, 12, 12], +[713498, 1953, 6, 29], +[713561, 1953, 8, 31], +[713656, 1953, 12, 4], +[713845, 1954, 6, 11], +[713907, 1954, 8, 12], +[713910, 1954, 8, 15], +[713974, 1954, 10, 18], +[714017, 1954, 11, 30], +[714132, 1955, 3, 25], +[714175, 1955, 5, 7], +[714311, 1955, 9, 20], +[714416, 1956, 1, 3], +[714511, 1956, 4, 7], +[714581, 1956, 6, 16], +[714693, 1956, 10, 6], +[714732, 1956, 11, 14], +[714875, 1957, 4, 6], +[715048, 1957, 9, 26], +[715090, 1957, 11, 7], +[715127, 1957, 12, 14], +[715220, 1958, 3, 17], +[715368, 1958, 8, 12], +[715415, 1958, 9, 28], +[715419, 1958, 10, 2], +[715450, 1958, 11, 2], +[715633, 1959, 5, 4], +[715682, 1959, 6, 22], +[715712, 1959, 7, 22], +[715750, 1959, 8, 29], +[715896, 1960, 1, 22], +[715957, 1960, 3, 23], +[716119, 1960, 9, 1], +[716260, 1961, 1, 20], +[716378, 1961, 5, 18], +[716400, 1961, 6, 9], +[716487, 1961, 9, 4], +[716575, 1961, 12, 1], +[716676, 1962, 3, 12], +[716682, 1962, 3, 18], +[716801, 1962, 7, 15], +[716978, 1963, 1, 8], +[717008, 1963, 2, 7], +[717030, 1963, 3, 1], +[717120, 1963, 5, 30], +[717179, 1963, 7, 28], +[717215, 1963, 9, 2], +[717394, 1964, 2, 28], +[717495, 1964, 6, 8], +[717507, 1964, 6, 20], +[717559, 1964, 8, 11], +[717598, 1964, 9, 19], +[717680, 1964, 12, 10], +[717859, 1965, 6, 7], +[717953, 1965, 9, 9], +[718070, 1966, 1, 4], +[718210, 1966, 5, 24], +[718265, 1966, 7, 18], +[718353, 1966, 10, 14], +[718450, 1967, 1, 19], +[718506, 1967, 3, 16], +[718529, 1967, 4, 8], +[718703, 1967, 9, 29], +[718900, 1968, 4, 13], +[718986, 1968, 7, 8], +[719029, 1968, 8, 20], +[719228, 1969, 3, 7], +[719362, 1969, 7, 19], +[719558, 1970, 1, 31], +[719580, 1970, 2, 22], +[719623, 1970, 4, 6], +[719732, 1970, 7, 24], +[719795, 1970, 9, 25], +[719824, 1970, 10, 24], +[719955, 1971, 3, 4], +[720091, 1971, 7, 18], +[720225, 1971, 11, 29], +[720282, 1972, 1, 25], +[720299, 1972, 2, 11], +[720395, 1972, 5, 17], +[720402, 1972, 5, 24], +[720570, 1972, 11, 8], +[720684, 1973, 3, 2], +[720756, 1973, 5, 13], +[720941, 1973, 11, 14], +[721122, 1974, 5, 14], +[721154, 1974, 6, 15], +[721283, 1974, 10, 22], +[721438, 1975, 3, 26], +[721616, 1975, 9, 20], +[721745, 1976, 1, 27], +[721792, 1976, 3, 14], +[721829, 1976, 4, 20], +[721984, 1976, 9, 22], +[722045, 1976, 11, 22], +[722143, 1977, 2, 28], +[722288, 1977, 7, 23], +[722371, 1977, 10, 14], +[722561, 1978, 4, 22], +[722700, 1978, 9, 8], +[722722, 1978, 9, 30], +[722866, 1979, 2, 21], +[723044, 1979, 8, 18], +[723054, 1979, 8, 28], +[723084, 1979, 9, 27], +[723148, 1979, 11, 30], +[723334, 1980, 6, 3], +[723509, 1980, 11, 25], +[723652, 1981, 4, 17], +[723832, 1981, 10, 14], +[724028, 1982, 4, 28], +[724042, 1982, 5, 12], +[724178, 1982, 9, 25], +[724320, 1983, 2, 14], +[724438, 1983, 6, 12], +[724596, 1983, 11, 17], +[724693, 1984, 2, 22], +[724742, 1984, 4, 11], +[724865, 1984, 8, 12], +[724912, 1984, 9, 28], +[724926, 1984, 10, 12], +[724930, 1984, 10, 16], +[724938, 1984, 10, 24], +[725062, 1985, 2, 25], +[725067, 1985, 3, 2], +[725242, 1985, 8, 24], +[725265, 1985, 9, 16], +[725385, 1986, 1, 14], +[725555, 1986, 7, 3], +[725615, 1986, 9, 1], +[725747, 1987, 1, 11], +[725754, 1987, 1, 18], +[725932, 1987, 7, 15], +[726014, 1987, 10, 5], +[726138, 1988, 2, 6], +[726288, 1988, 7, 5], +[726390, 1988, 10, 15], +[726574, 1989, 4, 17], +[726719, 1989, 9, 9], +[726802, 1989, 12, 1], +[726953, 1990, 5, 1], +[727099, 1990, 9, 24], +[727157, 1990, 11, 21], +[727250, 1991, 2, 22], +[727394, 1991, 7, 16], +[727581, 1992, 1, 19], +[727729, 1992, 6, 15], +[727926, 1992, 12, 29], +[728032, 1993, 4, 14], +[728082, 1993, 6, 3], +[728210, 1993, 10, 9], +[728274, 1993, 12, 12], +[728344, 1994, 2, 20], +[728540, 1994, 9, 4], +[728546, 1994, 9, 10], +[728624, 1994, 11, 27], +[728629, 1994, 12, 2], +[728647, 1994, 12, 20], +[728649, 1994, 12, 22], +[728671, 1995, 1, 13], +[728859, 1995, 7, 20], +[728967, 1995, 11, 5], +[729141, 1996, 4, 27], +[729278, 1996, 9, 11], +[729461, 1997, 3, 13], +[729539, 1997, 5, 30], +[729563, 1997, 6, 23], +[729634, 1997, 9, 2], +[729786, 1998, 2, 1], +[729882, 1998, 5, 8], +[730022, 1998, 9, 25], +[730115, 1998, 12, 27], +[730184, 1999, 3, 6], +[730383, 1999, 9, 21], +[730469, 1999, 12, 16], +[730554, 2000, 3, 10], +[730745, 2000, 9, 17], +[730891, 2001, 2, 10], +[730995, 2001, 5, 25], +[731002, 2001, 6, 1], +[731050, 2001, 7, 19], +[731154, 2001, 10, 31], +[731299, 2002, 3, 25], +[731348, 2002, 5, 13], +[731541, 2002, 11, 22], +[731692, 2003, 4, 22], +[731779, 2003, 7, 18], +[731800, 2003, 8, 8], +[731978, 2004, 2, 2], +[732056, 2004, 4, 20], +[732099, 2004, 6, 2], +[732108, 2004, 6, 11], +[732296, 2004, 12, 16], +[732435, 2005, 5, 4], +[732612, 2005, 10, 28], +[732654, 2005, 12, 9], +[732841, 2006, 6, 14], +[732965, 2006, 10, 16], +[733043, 2007, 1, 2], +[733206, 2007, 6, 14], +[733349, 2007, 11, 4], +[733459, 2008, 2, 22], +[733620, 2008, 8, 1], +[733661, 2008, 9, 11], +[733798, 2009, 1, 26], +[733832, 2009, 3, 1], +[733851, 2009, 3, 20], +[734010, 2009, 8, 26], +[734202, 2010, 3, 6], +[734298, 2010, 6, 10], +[734317, 2010, 6, 29], +[734516, 2011, 1, 14], +[734665, 2011, 6, 12], +[734857, 2011, 12, 21], +[734884, 2012, 1, 17], +[734939, 2012, 3, 12], +[735073, 2012, 7, 24], +[735241, 2013, 1, 8], +[735419, 2013, 7, 5], +[735489, 2013, 9, 13], +[735604, 2014, 1, 6], +[735750, 2014, 6, 1], +[735839, 2014, 8, 29], +[736006, 2015, 2, 12], +[736040, 2015, 3, 18], +[736132, 2015, 6, 18], +[736176, 2015, 8, 1], +[736181, 2015, 8, 6], +[736354, 2016, 1, 26], +[736482, 2016, 6, 2], +[736485, 2016, 6, 5], +[736522, 2016, 7, 12], +[736523, 2016, 7, 13], +[736549, 2016, 8, 8], +[736603, 2016, 10, 1], +[736641, 2016, 11, 8], +[736647, 2016, 11, 14], +[736688, 2016, 12, 25], +[736765, 2017, 3, 12], +[736914, 2017, 8, 8], +[736932, 2017, 8, 26], +[737066, 2018, 1, 7], +[737113, 2018, 2, 23], +[737233, 2018, 6, 23], +[737382, 2018, 11, 19], +[737557, 2019, 5, 13], +[737586, 2019, 6, 11], +[737700, 2019, 10, 3], +[737724, 2019, 10, 27], +[737735, 2019, 11, 7], +[737736, 2019, 11, 8], +[737810, 2020, 1, 21], +[737885, 2020, 4, 5], +[738021, 2020, 8, 19], +[738116, 2020, 11, 22], +[738306, 2021, 5, 31], +[738374, 2021, 8, 7], +[738521, 2022, 1, 1], +[738546, 2022, 1, 26], +[738739, 2022, 8, 7], +[738904, 2023, 1, 19], +[738965, 2023, 3, 21], +[739009, 2023, 5, 4], +[739127, 2023, 8, 30], +[739243, 2023, 12, 24], +[739401, 2024, 5, 30], +[739573, 2024, 11, 18], +[739581, 2024, 11, 26], +[739611, 2024, 12, 26], +[739684, 2025, 3, 9], +[739755, 2025, 5, 19], +[739896, 2025, 10, 7], +[740083, 2026, 4, 12], +[740134, 2026, 6, 2], +[740317, 2026, 12, 2], +[740396, 2027, 2, 19], +[740536, 2027, 7, 9], +[740576, 2027, 8, 18], +[740650, 2027, 10, 31], +[740796, 2028, 3, 25], +[740850, 2028, 5, 18], +[740965, 2028, 9, 10], +[740999, 2028, 10, 14], +[741100, 2029, 1, 23], +[741125, 2029, 2, 17], +[741266, 2029, 7, 8], +[741434, 2029, 12, 23], +[741541, 2030, 4, 9], +[741615, 2030, 6, 22], +[741666, 2030, 8, 12], +[741863, 2031, 2, 25], +[741880, 2031, 3, 14], +[741987, 2031, 6, 29], +[742020, 2031, 8, 1], +[742143, 2031, 12, 2], +[742233, 2032, 3, 1], +[742359, 2032, 7, 5], +[742518, 2032, 12, 11], +[742590, 2033, 2, 21], +[742761, 2033, 8, 11], +[742953, 2034, 2, 19], +[743092, 2034, 7, 8], +[743279, 2035, 1, 11], +[743302, 2035, 2, 3], +[743467, 2035, 7, 18], +[743515, 2035, 9, 4], +[743552, 2035, 10, 11], +[743661, 2036, 1, 28], +[743812, 2036, 6, 27], +[743891, 2036, 9, 14], +[743997, 2036, 12, 29], +[744108, 2037, 4, 19], +[744155, 2037, 6, 5], +[744320, 2037, 11, 17], +[744520, 2038, 6, 5], +[744598, 2038, 8, 22], +[744695, 2038, 11, 27], +[744854, 2039, 5, 5], +[744904, 2039, 6, 24], +[744923, 2039, 7, 13], +[745072, 2039, 12, 9], +[745085, 2039, 12, 22], +[745171, 2040, 3, 17], +[745371, 2040, 10, 3], +[745539, 2041, 3, 20], +[745585, 2041, 5, 5], +[745678, 2041, 8, 6], +[745856, 2042, 1, 31], +[745915, 2042, 3, 31], +[745964, 2042, 5, 19], +[746020, 2042, 7, 14], +[746148, 2042, 11, 19], +[746202, 2043, 1, 12], +[746343, 2043, 6, 2], +[746483, 2043, 10, 20], +[746608, 2044, 2, 22], +[746699, 2044, 5, 23], +[746844, 2044, 10, 15], +[747028, 2045, 4, 17], +[747035, 2045, 4, 24], +[747174, 2045, 9, 10], +[747256, 2045, 12, 1], +[747428, 2046, 5, 22], +[747510, 2046, 8, 12], +[747701, 2047, 2, 19], +[747703, 2047, 2, 21], +[747766, 2047, 4, 25], +[747940, 2047, 10, 16], +[748093, 2048, 3, 17], +[748225, 2048, 7, 27], +[748280, 2048, 9, 20], +[748293, 2048, 10, 3], +[748467, 2049, 3, 26], +[748641, 2049, 9, 16], +[748698, 2049, 11, 12], +[748827, 2050, 3, 21], +[748870, 2050, 5, 3], +[749041, 2050, 10, 21], +[749130, 2051, 1, 18], +[749283, 2051, 6, 20], +[749328, 2051, 8, 4], +[749486, 2052, 1, 9], +[749633, 2052, 6, 4], +[749791, 2052, 11, 9], +[749810, 2052, 11, 28], +[749834, 2052, 12, 22], +[749884, 2053, 2, 10], +[749993, 2053, 5, 30], +[750002, 2053, 6, 8], +[750093, 2053, 9, 7], +[750275, 2054, 3, 8], +[750399, 2054, 7, 10], +[750550, 2054, 12, 8], +[750663, 2055, 3, 31], +[750856, 2055, 10, 10], +[751008, 2056, 3, 10], +[751118, 2056, 6, 28], +[751134, 2056, 7, 14], +[751193, 2056, 9, 11], +[751268, 2056, 11, 25], +[751440, 2057, 5, 16], +[751530, 2057, 8, 14], +[751534, 2057, 8, 18], +[751637, 2057, 11, 29], +[751652, 2057, 12, 14], +[751669, 2057, 12, 31], +[751692, 2058, 1, 23], +[751780, 2058, 4, 21], +[751803, 2058, 5, 14], +[751976, 2058, 11, 3], +[752056, 2059, 1, 22], +[752177, 2059, 5, 23], +[752253, 2059, 8, 7], +[752256, 2059, 8, 10], +[752276, 2059, 8, 30], +[752345, 2059, 11, 7], +[752465, 2060, 3, 6], +[752487, 2060, 3, 28], +[752578, 2060, 6, 27], +[752626, 2060, 8, 14], +[752778, 2061, 1, 13], +[752870, 2061, 4, 15], +[752964, 2061, 7, 18], +[753007, 2061, 8, 30], +[753070, 2061, 11, 1], +[753114, 2061, 12, 15], +[753264, 2062, 5, 14], +[753379, 2062, 9, 6], +[753495, 2062, 12, 31], +[753523, 2063, 1, 28], +[753593, 2063, 4, 8], +[753628, 2063, 5, 13], +[753810, 2063, 11, 11], +[754001, 2064, 5, 20], +[754199, 2064, 12, 4], +[754248, 2065, 1, 22], +[754302, 2065, 3, 17], +[754312, 2065, 3, 27], +[754503, 2065, 10, 4], +[754566, 2065, 12, 6], +[754748, 2066, 6, 6], +[754750, 2066, 6, 8], +[754774, 2066, 7, 2], +[754862, 2066, 9, 28], +[754986, 2067, 1, 30], +[755042, 2067, 3, 27], +[755082, 2067, 5, 6], +[755250, 2067, 10, 21], +[755437, 2068, 4, 25], +[755602, 2068, 10, 7], +[755692, 2069, 1, 5], +[755825, 2069, 5, 18], +[755928, 2069, 8, 29], +[755971, 2069, 10, 11], +[756112, 2070, 3, 1], +[756152, 2070, 4, 10], +[756331, 2070, 10, 6], +[756504, 2071, 3, 28], +[756593, 2071, 6, 25], +[756751, 2071, 11, 30], +[756755, 2071, 12, 4], +[756759, 2071, 12, 8], +[756902, 2072, 4, 29], +[756945, 2072, 6, 11], +[757006, 2072, 8, 11], +[757052, 2072, 9, 26], +[757135, 2072, 12, 18], +[757312, 2073, 6, 13], +[757314, 2073, 6, 15], +[757466, 2073, 11, 14], +[757612, 2074, 4, 9], +[757704, 2074, 7, 10], +[757834, 2074, 11, 17], +[757889, 2075, 1, 11], +[757921, 2075, 2, 12], +[757925, 2075, 2, 16], +[758045, 2075, 6, 16], +[758065, 2075, 7, 6], +[758263, 2076, 1, 20], +[758402, 2076, 6, 7], +[758530, 2076, 10, 13], +[758615, 2077, 1, 6], +[758674, 2077, 3, 6], +[758761, 2077, 6, 1], +[758853, 2077, 9, 1], +[759002, 2078, 1, 28], +[759004, 2078, 1, 30], +[759135, 2078, 6, 10], +[759156, 2078, 7, 1], +[759248, 2078, 10, 1], +[759412, 2079, 3, 14], +[759515, 2079, 6, 25], +[759636, 2079, 10, 24], +[759736, 2080, 2, 1], +[759912, 2080, 7, 26], +[760080, 2081, 1, 10], +[760143, 2081, 3, 14], +[760250, 2081, 6, 29], +[760282, 2081, 7, 31], +[760473, 2082, 2, 7], +[760586, 2082, 5, 31], +[760767, 2082, 11, 28], +[760836, 2083, 2, 5], +[761013, 2083, 8, 1], +[761053, 2083, 9, 10], +[761134, 2083, 11, 30], +[761154, 2083, 12, 20], +[761252, 2084, 3, 27], +[761423, 2084, 9, 14], +[761586, 2085, 2, 24], +[761780, 2085, 9, 6], +[761979, 2086, 3, 24], +[762126, 2086, 8, 18], +[762282, 2087, 1, 21], +[762427, 2087, 6, 15], +[762506, 2087, 9, 2], +[762564, 2087, 10, 30], +[762597, 2087, 12, 2], +[762731, 2088, 4, 14], +[762823, 2088, 7, 15], +[762905, 2088, 10, 5], +[762996, 2089, 1, 4], +[763115, 2089, 5, 3], +[763244, 2089, 9, 9], +[763308, 2089, 11, 12], +[763364, 2090, 1, 7], +[763458, 2090, 4, 11], +[763539, 2090, 7, 1], +[763596, 2090, 8, 27], +[763634, 2090, 10, 4], +[763683, 2090, 11, 22], +[763854, 2091, 5, 12], +[763871, 2091, 5, 29], +[763946, 2091, 8, 12], +[764027, 2091, 11, 1], +[764041, 2091, 11, 15], +[764102, 2092, 1, 15], +[764172, 2092, 3, 25], +[764182, 2092, 4, 4], +[764250, 2092, 6, 11], +[764348, 2092, 9, 17], +[764401, 2092, 11, 9], +[764542, 2093, 3, 30], +[764543, 2093, 3, 31], +[764571, 2093, 4, 28], +[764572, 2093, 4, 29], +[764604, 2093, 5, 31], +[764631, 2093, 6, 27], +[764680, 2093, 8, 15], +[764690, 2093, 8, 25], +[764757, 2093, 10, 31], +[764857, 2094, 2, 8], +[764993, 2094, 6, 24], +[765101, 2094, 10, 10], +[765258, 2095, 3, 16], +[765307, 2095, 5, 4], +[765469, 2095, 10, 13], +[765629, 2096, 3, 21], +[765725, 2096, 6, 25], +[765742, 2096, 7, 12], +[765752, 2096, 7, 22], +[765888, 2096, 12, 5], +[766068, 2097, 6, 3], +[766139, 2097, 8, 13], +[766211, 2097, 10, 24], +[766233, 2097, 11, 15], +[766346, 2098, 3, 8], +[766418, 2098, 5, 19], +[766528, 2098, 9, 6], +[766588, 2098, 11, 5], +[766755, 2099, 4, 21], +[766774, 2099, 5, 10], +[766863, 2099, 8, 7], +[766943, 2099, 10, 26], +[766953, 2099, 11, 5], +[766989, 2099, 12, 11], +[767145, 2100, 5, 16], +[767151, 2100, 5, 22], +[767217, 2100, 7, 27], +[767286, 2100, 10, 4], +[767305, 2100, 10, 23], +[767429, 2101, 2, 24], +[767508, 2101, 5, 14], +[767579, 2101, 7, 24], +[767751, 2102, 1, 12], +[767919, 2102, 6, 29], +[767958, 2102, 8, 7], +[768090, 2102, 12, 17], +[768251, 2103, 5, 27], +[768405, 2103, 10, 28], +[768543, 2104, 3, 14], +[768714, 2104, 9, 1], +[768857, 2105, 1, 22], +[769001, 2105, 6, 15], +[769084, 2105, 9, 6], +[769104, 2105, 9, 26], +[769167, 2105, 11, 28], +[769340, 2106, 5, 20], +[769452, 2106, 9, 9], +[769529, 2106, 11, 25], +[769718, 2107, 6, 2], +[769741, 2107, 6, 25], +[769779, 2107, 8, 2], +[769847, 2107, 10, 9], +[769961, 2108, 1, 31], +[770048, 2108, 4, 27], +[770098, 2108, 6, 16], +[770295, 2108, 12, 30], +[770378, 2109, 3, 23], +[770461, 2109, 6, 14], +[770653, 2109, 12, 23], +[770822, 2110, 6, 10], +[770919, 2110, 9, 15], +[771047, 2111, 1, 21], +[771208, 2111, 7, 1], +[771319, 2111, 10, 20], +[771446, 2112, 2, 24], +[771574, 2112, 7, 1], +[771742, 2112, 12, 16], +[771765, 2113, 1, 8], +[771808, 2113, 2, 20], +[771904, 2113, 5, 27], +[771934, 2113, 6, 26], +[772040, 2113, 10, 10], +[772058, 2113, 10, 28], +[772212, 2114, 3, 31], +[772261, 2114, 5, 19], +[772349, 2114, 8, 15], +[772472, 2114, 12, 16], +[772578, 2115, 4, 1], +[772617, 2115, 5, 10], +[772741, 2115, 9, 11], +[772761, 2115, 10, 1], +[772854, 2116, 1, 2], +[772951, 2116, 4, 8], +[773117, 2116, 9, 21], +[773266, 2117, 2, 17], +[773299, 2117, 3, 22], +[773388, 2117, 6, 19], +[773507, 2117, 10, 16], +[773575, 2117, 12, 23], +[773750, 2118, 6, 16], +[773946, 2118, 12, 29], +[774040, 2119, 4, 2], +[774195, 2119, 9, 4], +[774342, 2120, 1, 29], +[774509, 2120, 7, 14], +[774603, 2120, 10, 16], +[774624, 2120, 11, 6], +[774815, 2121, 5, 16], +[774984, 2121, 11, 1], +[775029, 2121, 12, 16], +[775164, 2122, 4, 30], +[775191, 2122, 5, 27], +[775349, 2122, 11, 1], +[775526, 2123, 4, 27], +[775673, 2123, 9, 21], +[775789, 2124, 1, 15], +[775873, 2124, 4, 8], +[775884, 2124, 4, 19], +[775885, 2124, 4, 20], +[775964, 2124, 7, 8], +[776076, 2124, 10, 28], +[776096, 2124, 11, 17], +[776107, 2124, 11, 28], +[776110, 2124, 12, 1], +[776228, 2125, 3, 29], +[776292, 2125, 6, 1], +[776420, 2125, 10, 7], +[776511, 2126, 1, 6], +[776548, 2126, 2, 12], +[776648, 2126, 5, 23], +[776733, 2126, 8, 16], +[776741, 2126, 8, 24], +[776810, 2126, 11, 1], +[776915, 2127, 2, 14], +[776982, 2127, 4, 22], +[777049, 2127, 6, 28], +[777104, 2127, 8, 22], +[777209, 2127, 12, 5], +[777227, 2127, 12, 23], +[777295, 2128, 2, 29], +[777384, 2128, 5, 28], +[777555, 2128, 11, 15], +[777731, 2129, 5, 10], +[777847, 2129, 9, 3], +[777898, 2129, 10, 24], +[777926, 2129, 11, 21], +[778017, 2130, 2, 20], +[778063, 2130, 4, 7], +[778233, 2130, 9, 24], +[778361, 2131, 1, 30], +[778452, 2131, 5, 1], +[778555, 2131, 8, 12], +[778733, 2132, 2, 6], +[778823, 2132, 5, 6], +[778888, 2132, 7, 10], +[778945, 2132, 9, 5], +[779049, 2132, 12, 18], +[779062, 2132, 12, 31], +[779157, 2133, 4, 5], +[779356, 2133, 10, 21], +[779520, 2134, 4, 3], +[779676, 2134, 9, 6], +[779768, 2134, 12, 7], +[779918, 2135, 5, 6], +[780004, 2135, 7, 31], +[780161, 2136, 1, 4], +[780329, 2136, 6, 20], +[780496, 2136, 12, 4], +[780530, 2137, 1, 7], +[780706, 2137, 7, 2], +[780750, 2137, 8, 15], +[780764, 2137, 8, 29], +[780846, 2137, 11, 19], +[781025, 2138, 5, 17], +[781091, 2138, 7, 22], +[781096, 2138, 7, 27], +[781198, 2138, 11, 6], +[781226, 2138, 12, 4], +[781348, 2139, 4, 5], +[781547, 2139, 10, 21], +[781562, 2139, 11, 5], +[781597, 2139, 12, 10], +[781764, 2140, 5, 25], +[781808, 2140, 7, 8], +[781941, 2140, 11, 18], +[782103, 2141, 4, 29], +[782239, 2141, 9, 12], +[782396, 2142, 2, 16], +[782579, 2142, 8, 18], +[782698, 2142, 12, 15], +[782719, 2143, 1, 5], +[782860, 2143, 5, 26], +[782990, 2143, 10, 3], +[783027, 2143, 11, 9], +[783202, 2144, 5, 2], +[783259, 2144, 6, 28], +[783319, 2144, 8, 27], +[783489, 2145, 2, 13], +[783608, 2145, 6, 12], +[783679, 2145, 8, 22], +[783741, 2145, 10, 23], +[783936, 2146, 5, 6], +[784029, 2146, 8, 7], +[784033, 2146, 8, 11], +[784135, 2146, 11, 21], +[784181, 2147, 1, 6], +[784340, 2147, 6, 14], +[784420, 2147, 9, 2], +[784516, 2147, 12, 7], +[784518, 2147, 12, 9], +[784632, 2148, 4, 1], +[784783, 2148, 8, 30], +[784787, 2148, 9, 3], +[784968, 2149, 3, 3], +[785067, 2149, 6, 10], +[785243, 2149, 12, 3], +[785399, 2150, 5, 8], +[785531, 2150, 9, 17], +[785696, 2151, 3, 1], +[785840, 2151, 7, 23], +[786033, 2152, 2, 1], +[786098, 2152, 4, 6], +[786184, 2152, 7, 1], +[786202, 2152, 7, 19], +[786385, 2153, 1, 18], +[786463, 2153, 4, 6], +[786577, 2153, 7, 29], +[786697, 2153, 11, 26], +[786848, 2154, 4, 26], +[787023, 2154, 10, 18], +[787153, 2155, 2, 25], +[787166, 2155, 3, 10], +[787295, 2155, 7, 17], +[787421, 2155, 11, 20], +[787448, 2155, 12, 17], +[787615, 2156, 6, 1], +[787759, 2156, 10, 23], +[787800, 2156, 12, 3], +[787846, 2157, 1, 18], +[788022, 2157, 7, 13], +[788063, 2157, 8, 23], +[788261, 2158, 3, 9], +[788277, 2158, 3, 25], +[788425, 2158, 8, 20], +[788602, 2159, 2, 13], +[788734, 2159, 6, 25], +[788872, 2159, 11, 10], +[788903, 2159, 12, 11], +[789025, 2160, 4, 11], +[789094, 2160, 6, 19], +[789215, 2160, 10, 18], +[789320, 2161, 1, 31], +[789433, 2161, 5, 24], +[789504, 2161, 8, 3], +[789681, 2162, 1, 27], +[789685, 2162, 1, 31], +[789786, 2162, 5, 12], +[789901, 2162, 9, 4], +[789981, 2162, 11, 23], +[790123, 2163, 4, 14], +[790198, 2163, 6, 28], +[790237, 2163, 8, 6], +[790353, 2163, 11, 30], +[790474, 2164, 3, 30], +[790508, 2164, 5, 3], +[790589, 2164, 7, 23], +[790707, 2164, 11, 18], +[790865, 2165, 4, 25], +[790984, 2165, 8, 22], +[791138, 2166, 1, 23], +[791308, 2166, 7, 12], +[791493, 2167, 1, 13], +[791518, 2167, 2, 7], +[791636, 2167, 6, 5], +[791666, 2167, 7, 5], +[791737, 2167, 9, 14], +[791898, 2168, 2, 22], +[792069, 2168, 8, 11], +[792234, 2169, 1, 23], +[792259, 2169, 2, 17], +[792263, 2169, 2, 21], +[792317, 2169, 4, 16], +[792492, 2169, 10, 8], +[792658, 2170, 3, 23], +[792681, 2170, 4, 15], +[792780, 2170, 7, 23], +[792890, 2170, 11, 10], +[792965, 2171, 1, 24], +[793165, 2171, 8, 12], +[793349, 2172, 2, 12], +[793351, 2172, 2, 14], +[793531, 2172, 8, 12], +[793577, 2172, 9, 27], +[793749, 2173, 3, 18], +[793867, 2173, 7, 14], +[793909, 2173, 8, 25], +[794082, 2174, 2, 14], +[794143, 2174, 4, 16], +[794207, 2174, 6, 19], +[794296, 2174, 9, 16], +[794362, 2174, 11, 21], +[794414, 2175, 1, 12], +[794552, 2175, 5, 30], +[794571, 2175, 6, 18], +[794672, 2175, 9, 27], +[794797, 2176, 1, 30], +[794930, 2176, 6, 11], +[795127, 2176, 12, 25], +[795251, 2177, 4, 28], +[795350, 2177, 8, 5], +[795463, 2177, 11, 26], +[795477, 2177, 12, 10], +[795558, 2178, 3, 1], +[795609, 2178, 4, 21], +[795759, 2178, 9, 18], +[795933, 2179, 3, 11], +[795938, 2179, 3, 16], +[796130, 2179, 9, 24], +[796288, 2180, 2, 29], +[796455, 2180, 8, 14], +[796598, 2181, 1, 4], +[796742, 2181, 5, 28], +[796905, 2181, 11, 7], +[796925, 2181, 11, 27], +[796970, 2182, 1, 11], +[797022, 2182, 3, 4], +[797175, 2182, 8, 4], +[797237, 2182, 10, 5], +[797282, 2182, 11, 19], +[797343, 2183, 1, 19], +[797394, 2183, 3, 11], +[797564, 2183, 8, 28], +[797701, 2184, 1, 12], +[797714, 2184, 1, 25], +[797744, 2184, 2, 24], +[797811, 2184, 5, 1], +[797972, 2184, 10, 9], +[798049, 2184, 12, 25], +[798182, 2185, 5, 7], +[798312, 2185, 9, 14], +[798337, 2185, 10, 9], +[798396, 2185, 12, 7], +[798511, 2186, 4, 1], +[798585, 2186, 6, 14], +[798705, 2186, 10, 12], +[798831, 2187, 2, 15], +[799003, 2187, 8, 6], +[799126, 2187, 12, 7], +[799323, 2188, 6, 21], +[799359, 2188, 7, 27], +[799540, 2189, 1, 24], +[799706, 2189, 7, 9], +[799762, 2189, 9, 3], +[799773, 2189, 9, 14], +[799951, 2190, 3, 11], +[799991, 2190, 4, 20], +[800085, 2190, 7, 23], +[800121, 2190, 8, 28], +[800259, 2191, 1, 13], +[800364, 2191, 4, 28], +[800549, 2191, 10, 30], +[800728, 2192, 4, 26], +[800892, 2192, 10, 7], +[800938, 2192, 11, 22], +[801129, 2193, 6, 1], +[801232, 2193, 9, 12], +[801265, 2193, 10, 15], +[801447, 2194, 4, 15], +[801532, 2194, 7, 9], +[801646, 2194, 10, 31], +[801705, 2194, 12, 29], +[801892, 2195, 7, 4], +[801973, 2195, 9, 23], +[801995, 2195, 10, 15], +[802139, 2196, 3, 7], +[802243, 2196, 6, 19], +[802406, 2196, 11, 29], +[802480, 2197, 2, 11], +[802559, 2197, 5, 1], +[802655, 2197, 8, 5], +[802735, 2197, 10, 24], +[802830, 2198, 1, 27], +[802833, 2198, 1, 30], +[802839, 2198, 2, 5], +[803037, 2198, 8, 22], +[803139, 2198, 12, 2], +[803207, 2199, 2, 8], +[803341, 2199, 6, 22], +[803479, 2199, 11, 7], +[803679, 2200, 5, 26], +[803737, 2200, 7, 23], +[803775, 2200, 8, 30], +[803914, 2201, 1, 16], +[803976, 2201, 3, 19], +[804027, 2201, 5, 9], +[804144, 2201, 9, 3], +[804257, 2201, 12, 25], +[804373, 2202, 4, 20], +[804402, 2202, 5, 19], +[804482, 2202, 8, 7], +[804603, 2202, 12, 6], +[804736, 2203, 4, 18], +[804747, 2203, 4, 29], +[804881, 2203, 9, 10], +[805059, 2204, 3, 6], +[805077, 2204, 3, 24], +[805146, 2204, 6, 1], +[805250, 2204, 9, 13], +[805268, 2204, 10, 1], +[805442, 2205, 3, 24], +[805592, 2205, 8, 21], +[805702, 2205, 12, 9], +[805748, 2206, 1, 24], +[805848, 2206, 5, 4], +[805868, 2206, 5, 24], +[806052, 2206, 11, 24], +[806095, 2207, 1, 6], +[806200, 2207, 4, 21], +[806321, 2207, 8, 20], +[806503, 2208, 2, 18], +[806673, 2208, 8, 6], +[806686, 2208, 8, 19], +[806759, 2208, 10, 31], +[806785, 2208, 11, 26], +[806846, 2209, 1, 26], +[806914, 2209, 4, 4], +[807038, 2209, 8, 6], +[807226, 2210, 2, 10], +[807365, 2210, 6, 29], +[807460, 2210, 10, 2], +[807474, 2210, 10, 16], +[807584, 2211, 2, 3], +[807756, 2211, 7, 25], +[807825, 2211, 10, 2], +[807913, 2211, 12, 29], +[808060, 2212, 5, 24], +[808223, 2212, 11, 3], +[808282, 2213, 1, 1], +[808412, 2213, 5, 11], +[808437, 2213, 6, 5], +[808532, 2213, 9, 8], +[808560, 2213, 10, 6], +[808658, 2214, 1, 12], +[808742, 2214, 4, 6], +[808860, 2214, 8, 2], +[808939, 2214, 10, 20], +[809027, 2215, 1, 16], +[809192, 2215, 6, 30], +[809354, 2215, 12, 9], +[809474, 2216, 4, 7], +[809525, 2216, 5, 28], +[809649, 2216, 9, 29], +[809757, 2217, 1, 15], +[809780, 2217, 2, 7], +[809857, 2217, 4, 25], +[809958, 2217, 8, 4], +[810126, 2218, 1, 19], +[810162, 2218, 2, 24], +[810188, 2218, 3, 22], +[810269, 2218, 6, 11], +[810378, 2218, 9, 28], +[810422, 2218, 11, 11], +[810508, 2219, 2, 5], +[810540, 2219, 3, 9], +[810707, 2219, 8, 23], +[810761, 2219, 10, 16], +[810888, 2220, 2, 20], +[811066, 2220, 8, 16], +[811178, 2220, 12, 6], +[811205, 2221, 1, 2], +[811391, 2221, 7, 7], +[811533, 2221, 11, 26], +[811691, 2222, 5, 3], +[811775, 2222, 7, 26], +[811895, 2222, 11, 23], +[812019, 2223, 3, 27], +[812144, 2223, 7, 30], +[812274, 2223, 12, 7], +[812275, 2223, 12, 8], +[812406, 2224, 4, 17], +[812554, 2224, 9, 12], +[812721, 2225, 2, 26], +[812897, 2225, 8, 21], +[813053, 2226, 1, 24], +[813252, 2226, 8, 11], +[813360, 2226, 11, 27], +[813385, 2226, 12, 22], +[813529, 2227, 5, 15], +[813548, 2227, 6, 3], +[813566, 2227, 6, 21], +[813693, 2227, 10, 26], +[813808, 2228, 2, 18], +[813818, 2228, 2, 28], +[813879, 2228, 4, 29], +[813972, 2228, 7, 31], +[814080, 2228, 11, 16], +[814132, 2229, 1, 7], +[814248, 2229, 5, 3], +[814268, 2229, 5, 23], +[814331, 2229, 7, 25], +[814451, 2229, 11, 22], +[814455, 2229, 11, 26], +[814592, 2230, 4, 12], +[814713, 2230, 8, 11], +[814788, 2230, 10, 25], +[814899, 2231, 2, 13], +[814978, 2231, 5, 3], +[815028, 2231, 6, 22], +[815083, 2231, 8, 16], +[815151, 2231, 10, 23], +[815248, 2232, 1, 28], +[815333, 2232, 4, 22], +[815429, 2232, 7, 27], +[815609, 2233, 1, 23], +[815772, 2233, 7, 5], +[815878, 2233, 10, 19], +[815911, 2233, 11, 21], +[815942, 2233, 12, 22], +[816116, 2234, 6, 14], +[816195, 2234, 9, 1], +[816218, 2234, 9, 24], +[816318, 2235, 1, 2], +[816511, 2235, 7, 14], +[816521, 2235, 7, 24], +[816536, 2235, 8, 8], +[816605, 2235, 10, 16], +[816631, 2235, 11, 11], +[816702, 2236, 1, 21], +[816900, 2236, 8, 6], +[817062, 2237, 1, 15], +[817223, 2237, 6, 25], +[817385, 2237, 12, 4], +[817504, 2238, 4, 2], +[817532, 2238, 4, 30], +[817675, 2238, 9, 20], +[817689, 2238, 10, 4], +[817854, 2239, 3, 18], +[818017, 2239, 8, 28], +[818153, 2240, 1, 11], +[818255, 2240, 4, 22], +[818416, 2240, 9, 30], +[818607, 2241, 4, 9], +[818781, 2241, 9, 30], +[818900, 2242, 1, 27], +[818975, 2242, 4, 12], +[819127, 2242, 9, 11], +[819130, 2242, 9, 14], +[819171, 2242, 10, 25], +[819280, 2243, 2, 11], +[819333, 2243, 4, 5], +[819452, 2243, 8, 2], +[819571, 2243, 11, 29], +[819678, 2244, 3, 15], +[819702, 2244, 4, 8], +[819799, 2244, 7, 14], +[819937, 2244, 11, 29], +[820005, 2245, 2, 5], +[820148, 2245, 6, 28], +[820299, 2245, 11, 26], +[820337, 2246, 1, 3], +[820379, 2246, 2, 14], +[820441, 2246, 4, 17], +[820531, 2246, 7, 16], +[820565, 2246, 8, 19], +[820736, 2247, 2, 6], +[820895, 2247, 7, 15], +[820999, 2247, 10, 27], +[821186, 2248, 5, 1], +[821205, 2248, 5, 20], +[821217, 2248, 6, 1], +[821358, 2248, 10, 20], +[821393, 2248, 11, 24], +[821532, 2249, 4, 12], +[821568, 2249, 5, 18], +[821735, 2249, 11, 1], +[821867, 2250, 3, 13], +[821884, 2250, 3, 30]] diff --git a/go/mysql/datetime/testdata/year_to_daynr.json b/go/mysql/datetime/testdata/year_to_daynr.json new file mode 100644 index 00000000000..43914806d21 --- /dev/null +++ b/go/mysql/datetime/testdata/year_to_daynr.json @@ -0,0 +1 @@ +[1, 366, 731, 1096, 1461, 1827, 2192, 2557, 2922, 3288, 3653, 4018, 4383, 4749, 5114, 5479, 5844, 6210, 6575, 6940, 7305, 7671, 8036, 8401, 8766, 9132, 9497, 9862, 10227, 10593, 10958, 11323, 11688, 12054, 12419, 12784, 13149, 13515, 13880, 14245, 14610, 14976, 15341, 15706, 16071, 16437, 16802, 17167, 17532, 17898, 18263, 18628, 18993, 19359, 19724, 20089, 20454, 20820, 21185, 21550, 21915, 22281, 22646, 23011, 23376, 23742, 24107, 24472, 24837, 25203, 25568, 25933, 26298, 26664, 27029, 27394, 27759, 28125, 28490, 28855, 29220, 29586, 29951, 30316, 30681, 31047, 31412, 31777, 32142, 32508, 32873, 33238, 33603, 33969, 34334, 34699, 35064, 35430, 35795, 36160, 36525, 36890, 37255, 37620, 37985, 38351, 38716, 39081, 39446, 39812, 40177, 40542, 40907, 41273, 41638, 42003, 42368, 42734, 43099, 43464, 43829, 44195, 44560, 44925, 45290, 45656, 46021, 46386, 46751, 47117, 47482, 47847, 48212, 48578, 48943, 49308, 49673, 50039, 50404, 50769, 51134, 51500, 51865, 52230, 52595, 52961, 53326, 53691, 54056, 54422, 54787, 55152, 55517, 55883, 56248, 56613, 56978, 57344, 57709, 58074, 58439, 58805, 59170, 59535, 59900, 60266, 60631, 60996, 61361, 61727, 62092, 62457, 62822, 63188, 63553, 63918, 64283, 64649, 65014, 65379, 65744, 66110, 66475, 66840, 67205, 67571, 67936, 68301, 68666, 69032, 69397, 69762, 70127, 70493, 70858, 71223, 71588, 71954, 72319, 72684, 73049, 73414, 73779, 74144, 74509, 74875, 75240, 75605, 75970, 76336, 76701, 77066, 77431, 77797, 78162, 78527, 78892, 79258, 79623, 79988, 80353, 80719, 81084, 81449, 81814, 82180, 82545, 82910, 83275, 83641, 84006, 84371, 84736, 85102, 85467, 85832, 86197, 86563, 86928, 87293, 87658, 88024, 88389, 88754, 89119, 89485, 89850, 90215, 90580, 90946, 91311, 91676, 92041, 92407, 92772, 93137, 93502, 93868, 94233, 94598, 94963, 95329, 95694, 96059, 96424, 96790, 97155, 97520, 97885, 98251, 98616, 98981, 99346, 99712, 100077, 100442, 100807, 101173, 101538, 101903, 102268, 102634, 102999, 103364, 103729, 104095, 104460, 104825, 105190, 105556, 105921, 106286, 106651, 107017, 107382, 107747, 108112, 108478, 108843, 109208, 109573, 109938, 110303, 110668, 111033, 111399, 111764, 112129, 112494, 112860, 113225, 113590, 113955, 114321, 114686, 115051, 115416, 115782, 116147, 116512, 116877, 117243, 117608, 117973, 118338, 118704, 119069, 119434, 119799, 120165, 120530, 120895, 121260, 121626, 121991, 122356, 122721, 123087, 123452, 123817, 124182, 124548, 124913, 125278, 125643, 126009, 126374, 126739, 127104, 127470, 127835, 128200, 128565, 128931, 129296, 129661, 130026, 130392, 130757, 131122, 131487, 131853, 132218, 132583, 132948, 133314, 133679, 134044, 134409, 134775, 135140, 135505, 135870, 136236, 136601, 136966, 137331, 137697, 138062, 138427, 138792, 139158, 139523, 139888, 140253, 140619, 140984, 141349, 141714, 142080, 142445, 142810, 143175, 143541, 143906, 144271, 144636, 145002, 145367, 145732, 146097, 146463, 146828, 147193, 147558, 147924, 148289, 148654, 149019, 149385, 149750, 150115, 150480, 150846, 151211, 151576, 151941, 152307, 152672, 153037, 153402, 153768, 154133, 154498, 154863, 155229, 155594, 155959, 156324, 156690, 157055, 157420, 157785, 158151, 158516, 158881, 159246, 159612, 159977, 160342, 160707, 161073, 161438, 161803, 162168, 162534, 162899, 163264, 163629, 163995, 164360, 164725, 165090, 165456, 165821, 166186, 166551, 166917, 167282, 167647, 168012, 168378, 168743, 169108, 169473, 169839, 170204, 170569, 170934, 171300, 171665, 172030, 172395, 172761, 173126, 173491, 173856, 174222, 174587, 174952, 175317, 175683, 176048, 176413, 176778, 177144, 177509, 177874, 178239, 178605, 178970, 179335, 179700, 180066, 180431, 180796, 181161, 181527, 181892, 182257, 182622, 182987, 183352, 183717, 184082, 184448, 184813, 185178, 185543, 185909, 186274, 186639, 187004, 187370, 187735, 188100, 188465, 188831, 189196, 189561, 189926, 190292, 190657, 191022, 191387, 191753, 192118, 192483, 192848, 193214, 193579, 193944, 194309, 194675, 195040, 195405, 195770, 196136, 196501, 196866, 197231, 197597, 197962, 198327, 198692, 199058, 199423, 199788, 200153, 200519, 200884, 201249, 201614, 201980, 202345, 202710, 203075, 203441, 203806, 204171, 204536, 204902, 205267, 205632, 205997, 206363, 206728, 207093, 207458, 207824, 208189, 208554, 208919, 209285, 209650, 210015, 210380, 210746, 211111, 211476, 211841, 212207, 212572, 212937, 213302, 213668, 214033, 214398, 214763, 215129, 215494, 215859, 216224, 216590, 216955, 217320, 217685, 218051, 218416, 218781, 219146, 219511, 219876, 220241, 220606, 220972, 221337, 221702, 222067, 222433, 222798, 223163, 223528, 223894, 224259, 224624, 224989, 225355, 225720, 226085, 226450, 226816, 227181, 227546, 227911, 228277, 228642, 229007, 229372, 229738, 230103, 230468, 230833, 231199, 231564, 231929, 232294, 232660, 233025, 233390, 233755, 234121, 234486, 234851, 235216, 235582, 235947, 236312, 236677, 237043, 237408, 237773, 238138, 238504, 238869, 239234, 239599, 239965, 240330, 240695, 241060, 241426, 241791, 242156, 242521, 242887, 243252, 243617, 243982, 244348, 244713, 245078, 245443, 245809, 246174, 246539, 246904, 247270, 247635, 248000, 248365, 248731, 249096, 249461, 249826, 250192, 250557, 250922, 251287, 251653, 252018, 252383, 252748, 253114, 253479, 253844, 254209, 254575, 254940, 255305, 255670, 256035, 256400, 256765, 257130, 257496, 257861, 258226, 258591, 258957, 259322, 259687, 260052, 260418, 260783, 261148, 261513, 261879, 262244, 262609, 262974, 263340, 263705, 264070, 264435, 264801, 265166, 265531, 265896, 266262, 266627, 266992, 267357, 267723, 268088, 268453, 268818, 269184, 269549, 269914, 270279, 270645, 271010, 271375, 271740, 272106, 272471, 272836, 273201, 273567, 273932, 274297, 274662, 275028, 275393, 275758, 276123, 276489, 276854, 277219, 277584, 277950, 278315, 278680, 279045, 279411, 279776, 280141, 280506, 280872, 281237, 281602, 281967, 282333, 282698, 283063, 283428, 283794, 284159, 284524, 284889, 285255, 285620, 285985, 286350, 286716, 287081, 287446, 287811, 288177, 288542, 288907, 289272, 289638, 290003, 290368, 290733, 291099, 291464, 291829, 292194, 292560, 292925, 293290, 293655, 294021, 294386, 294751, 295116, 295482, 295847, 296212, 296577, 296943, 297308, 297673, 298038, 298404, 298769, 299134, 299499, 299865, 300230, 300595, 300960, 301326, 301691, 302056, 302421, 302787, 303152, 303517, 303882, 304248, 304613, 304978, 305343, 305709, 306074, 306439, 306804, 307170, 307535, 307900, 308265, 308631, 308996, 309361, 309726, 310092, 310457, 310822, 311187, 311553, 311918, 312283, 312648, 313014, 313379, 313744, 314109, 314475, 314840, 315205, 315570, 315936, 316301, 316666, 317031, 317397, 317762, 318127, 318492, 318858, 319223, 319588, 319953, 320319, 320684, 321049, 321414, 321780, 322145, 322510, 322875, 323241, 323606, 323971, 324336, 324702, 325067, 325432, 325797, 326163, 326528, 326893, 327258, 327624, 327989, 328354, 328719, 329084, 329449, 329814, 330179, 330545, 330910, 331275, 331640, 332006, 332371, 332736, 333101, 333467, 333832, 334197, 334562, 334928, 335293, 335658, 336023, 336389, 336754, 337119, 337484, 337850, 338215, 338580, 338945, 339311, 339676, 340041, 340406, 340772, 341137, 341502, 341867, 342233, 342598, 342963, 343328, 343694, 344059, 344424, 344789, 345155, 345520, 345885, 346250, 346616, 346981, 347346, 347711, 348077, 348442, 348807, 349172, 349538, 349903, 350268, 350633, 350999, 351364, 351729, 352094, 352460, 352825, 353190, 353555, 353921, 354286, 354651, 355016, 355382, 355747, 356112, 356477, 356843, 357208, 357573, 357938, 358304, 358669, 359034, 359399, 359765, 360130, 360495, 360860, 361226, 361591, 361956, 362321, 362687, 363052, 363417, 363782, 364148, 364513, 364878, 365243, 365608, 365973, 366338, 366703, 367069, 367434, 367799, 368164, 368530, 368895, 369260, 369625, 369991, 370356, 370721, 371086, 371452, 371817, 372182, 372547, 372913, 373278, 373643, 374008, 374374, 374739, 375104, 375469, 375835, 376200, 376565, 376930, 377296, 377661, 378026, 378391, 378757, 379122, 379487, 379852, 380218, 380583, 380948, 381313, 381679, 382044, 382409, 382774, 383140, 383505, 383870, 384235, 384601, 384966, 385331, 385696, 386062, 386427, 386792, 387157, 387523, 387888, 388253, 388618, 388984, 389349, 389714, 390079, 390445, 390810, 391175, 391540, 391906, 392271, 392636, 393001, 393367, 393732, 394097, 394462, 394828, 395193, 395558, 395923, 396289, 396654, 397019, 397384, 397750, 398115, 398480, 398845, 399211, 399576, 399941, 400306, 400672, 401037, 401402, 401767, 402132, 402497, 402862, 403227, 403593, 403958, 404323, 404688, 405054, 405419, 405784, 406149, 406515, 406880, 407245, 407610, 407976, 408341, 408706, 409071, 409437, 409802, 410167, 410532, 410898, 411263, 411628, 411993, 412359, 412724, 413089, 413454, 413820, 414185, 414550, 414915, 415281, 415646, 416011, 416376, 416742, 417107, 417472, 417837, 418203, 418568, 418933, 419298, 419664, 420029, 420394, 420759, 421125, 421490, 421855, 422220, 422586, 422951, 423316, 423681, 424047, 424412, 424777, 425142, 425508, 425873, 426238, 426603, 426969, 427334, 427699, 428064, 428430, 428795, 429160, 429525, 429891, 430256, 430621, 430986, 431352, 431717, 432082, 432447, 432813, 433178, 433543, 433908, 434274, 434639, 435004, 435369, 435735, 436100, 436465, 436830, 437196, 437561, 437926, 438291, 438657, 439022, 439387, 439752, 440118, 440483, 440848, 441213, 441579, 441944, 442309, 442674, 443040, 443405, 443770, 444135, 444501, 444866, 445231, 445596, 445962, 446327, 446692, 447057, 447423, 447788, 448153, 448518, 448884, 449249, 449614, 449979, 450345, 450710, 451075, 451440, 451806, 452171, 452536, 452901, 453267, 453632, 453997, 454362, 454728, 455093, 455458, 455823, 456189, 456554, 456919, 457284, 457650, 458015, 458380, 458745, 459111, 459476, 459841, 460206, 460572, 460937, 461302, 461667, 462033, 462398, 462763, 463128, 463494, 463859, 464224, 464589, 464955, 465320, 465685, 466050, 466416, 466781, 467146, 467511, 467877, 468242, 468607, 468972, 469338, 469703, 470068, 470433, 470799, 471164, 471529, 471894, 472260, 472625, 472990, 473355, 473721, 474086, 474451, 474816, 475181, 475546, 475911, 476276, 476642, 477007, 477372, 477737, 478103, 478468, 478833, 479198, 479564, 479929, 480294, 480659, 481025, 481390, 481755, 482120, 482486, 482851, 483216, 483581, 483947, 484312, 484677, 485042, 485408, 485773, 486138, 486503, 486869, 487234, 487599, 487964, 488330, 488695, 489060, 489425, 489791, 490156, 490521, 490886, 491252, 491617, 491982, 492347, 492713, 493078, 493443, 493808, 494174, 494539, 494904, 495269, 495635, 496000, 496365, 496730, 497096, 497461, 497826, 498191, 498557, 498922, 499287, 499652, 500018, 500383, 500748, 501113, 501479, 501844, 502209, 502574, 502940, 503305, 503670, 504035, 504401, 504766, 505131, 505496, 505862, 506227, 506592, 506957, 507323, 507688, 508053, 508418, 508784, 509149, 509514, 509879, 510245, 510610, 510975, 511340, 511705, 512070, 512435, 512800, 513166, 513531, 513896, 514261, 514627, 514992, 515357, 515722, 516088, 516453, 516818, 517183, 517549, 517914, 518279, 518644, 519010, 519375, 519740, 520105, 520471, 520836, 521201, 521566, 521932, 522297, 522662, 523027, 523393, 523758, 524123, 524488, 524854, 525219, 525584, 525949, 526315, 526680, 527045, 527410, 527776, 528141, 528506, 528871, 529237, 529602, 529967, 530332, 530698, 531063, 531428, 531793, 532159, 532524, 532889, 533254, 533620, 533985, 534350, 534715, 535081, 535446, 535811, 536176, 536542, 536907, 537272, 537637, 538003, 538368, 538733, 539098, 539464, 539829, 540194, 540559, 540925, 541290, 541655, 542020, 542386, 542751, 543116, 543481, 543847, 544212, 544577, 544942, 545308, 545673, 546038, 546403, 546769, 547134, 547499, 547864, 548229, 548594, 548959, 549324, 549690, 550055, 550420, 550785, 551151, 551516, 551881, 552246, 552612, 552977, 553342, 553707, 554073, 554438, 554803, 555168, 555534, 555899, 556264, 556629, 556995, 557360, 557725, 558090, 558456, 558821, 559186, 559551, 559917, 560282, 560647, 561012, 561378, 561743, 562108, 562473, 562839, 563204, 563569, 563934, 564300, 564665, 565030, 565395, 565761, 566126, 566491, 566856, 567222, 567587, 567952, 568317, 568683, 569048, 569413, 569778, 570144, 570509, 570874, 571239, 571605, 571970, 572335, 572700, 573066, 573431, 573796, 574161, 574527, 574892, 575257, 575622, 575988, 576353, 576718, 577083, 577449, 577814, 578179, 578544, 578910, 579275, 579640, 580005, 580371, 580736, 581101, 581466, 581832, 582197, 582562, 582927, 583293, 583658, 584023, 584388, 584754, 585119, 585484, 585849, 586215, 586580, 586945, 587310, 587676, 588041, 588406, 588771, 589137, 589502, 589867, 590232, 590598, 590963, 591328, 591693, 592059, 592424, 592789, 593154, 593520, 593885, 594250, 594615, 594981, 595346, 595711, 596076, 596442, 596807, 597172, 597537, 597903, 598268, 598633, 598998, 599364, 599729, 600094, 600459, 600825, 601190, 601555, 601920, 602286, 602651, 603016, 603381, 603747, 604112, 604477, 604842, 605208, 605573, 605938, 606303, 606669, 607034, 607399, 607764, 608130, 608495, 608860, 609225, 609591, 609956, 610321, 610686, 611052, 611417, 611782, 612147, 612513, 612878, 613243, 613608, 613974, 614339, 614704, 615069, 615435, 615800, 616165, 616530, 616896, 617261, 617626, 617991, 618357, 618722, 619087, 619452, 619818, 620183, 620548, 620913, 621278, 621643, 622008, 622373, 622739, 623104, 623469, 623834, 624200, 624565, 624930, 625295, 625661, 626026, 626391, 626756, 627122, 627487, 627852, 628217, 628583, 628948, 629313, 629678, 630044, 630409, 630774, 631139, 631505, 631870, 632235, 632600, 632966, 633331, 633696, 634061, 634427, 634792, 635157, 635522, 635888, 636253, 636618, 636983, 637349, 637714, 638079, 638444, 638810, 639175, 639540, 639905, 640271, 640636, 641001, 641366, 641732, 642097, 642462, 642827, 643193, 643558, 643923, 644288, 644654, 645019, 645384, 645749, 646115, 646480, 646845, 647210, 647576, 647941, 648306, 648671, 649037, 649402, 649767, 650132, 650498, 650863, 651228, 651593, 651959, 652324, 652689, 653054, 653420, 653785, 654150, 654515, 654881, 655246, 655611, 655976, 656342, 656707, 657072, 657437, 657802, 658167, 658532, 658897, 659263, 659628, 659993, 660358, 660724, 661089, 661454, 661819, 662185, 662550, 662915, 663280, 663646, 664011, 664376, 664741, 665107, 665472, 665837, 666202, 666568, 666933, 667298, 667663, 668029, 668394, 668759, 669124, 669490, 669855, 670220, 670585, 670951, 671316, 671681, 672046, 672412, 672777, 673142, 673507, 673873, 674238, 674603, 674968, 675334, 675699, 676064, 676429, 676795, 677160, 677525, 677890, 678256, 678621, 678986, 679351, 679717, 680082, 680447, 680812, 681178, 681543, 681908, 682273, 682639, 683004, 683369, 683734, 684100, 684465, 684830, 685195, 685561, 685926, 686291, 686656, 687022, 687387, 687752, 688117, 688483, 688848, 689213, 689578, 689944, 690309, 690674, 691039, 691405, 691770, 692135, 692500, 692866, 693231, 693596, 693961, 694326, 694691, 695056, 695421, 695787, 696152, 696517, 696882, 697248, 697613, 697978, 698343, 698709, 699074, 699439, 699804, 700170, 700535, 700900, 701265, 701631, 701996, 702361, 702726, 703092, 703457, 703822, 704187, 704553, 704918, 705283, 705648, 706014, 706379, 706744, 707109, 707475, 707840, 708205, 708570, 708936, 709301, 709666, 710031, 710397, 710762, 711127, 711492, 711858, 712223, 712588, 712953, 713319, 713684, 714049, 714414, 714780, 715145, 715510, 715875, 716241, 716606, 716971, 717336, 717702, 718067, 718432, 718797, 719163, 719528, 719893, 720258, 720624, 720989, 721354, 721719, 722085, 722450, 722815, 723180, 723546, 723911, 724276, 724641, 725007, 725372, 725737, 726102, 726468, 726833, 727198, 727563, 727929, 728294, 728659, 729024, 729390, 729755, 730120, 730485, 730851, 731216, 731581, 731946, 732312, 732677, 733042, 733407, 733773, 734138, 734503, 734868, 735234, 735599, 735964, 736329, 736695, 737060, 737425, 737790, 738156, 738521, 738886, 739251, 739617, 739982, 740347, 740712, 741078, 741443, 741808, 742173, 742539, 742904, 743269, 743634, 744000, 744365, 744730, 745095, 745461, 745826, 746191, 746556, 746922, 747287, 747652, 748017, 748383, 748748, 749113, 749478, 749844, 750209, 750574, 750939, 751305, 751670, 752035, 752400, 752766, 753131, 753496, 753861, 754227, 754592, 754957, 755322, 755688, 756053, 756418, 756783, 757149, 757514, 757879, 758244, 758610, 758975, 759340, 759705, 760071, 760436, 760801, 761166, 761532, 761897, 762262, 762627, 762993, 763358, 763723, 764088, 764454, 764819, 765184, 765549, 765915, 766280, 766645] \ No newline at end of file diff --git a/go/mysql/datetime/time_zone_test.go b/go/mysql/datetime/time_zone_test.go index 94745d0c71e..4bd1572755f 100644 --- a/go/mysql/datetime/time_zone_test.go +++ b/go/mysql/datetime/time_zone_test.go @@ -18,10 +18,50 @@ package datetime import ( "testing" + "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) +func TestDST(t *testing.T) { + testCases := []struct { + time Time + year int + month time.Month + day int + tz string + expected string + }{ + { + time: Time{hour: 130, minute: 34, second: 58}, + year: 2023, month: 10, day: 24, + tz: "Europe/Madrid", + expected: "2023-10-29T10:34:58+01:00", + }, + { + time: Time{hour: 130, minute: 34, second: 58}, + year: 2023, month: 10, day: 29, + tz: "Europe/Madrid", + expected: "2023-11-03T10:34:58+01:00", + }, + { + time: Time{hour: 130 | negMask, minute: 34, second: 58}, + year: 2023, month: 11, day: 03, + tz: "Europe/Madrid", + expected: "2023-10-28T13:25:02+02:00", + }, + } + + for _, tc := range testCases { + tz, err := ParseTimeZone(tc.tz) + require.NoError(t, err) + + got := tc.time.toStdTime(tc.year, tc.month, tc.day, tz) + assert.Equal(t, tc.expected, got.Format(time.RFC3339)) + } +} + func TestParseTimeZone(t *testing.T) { testCases := []struct { tz string diff --git a/go/mysql/datetime/timeparts.go b/go/mysql/datetime/timeparts.go index 32bda00ef43..a774099a93a 100644 --- a/go/mysql/datetime/timeparts.go +++ b/go/mysql/datetime/timeparts.go @@ -48,7 +48,7 @@ func (tp *timeparts) toDateTime(prec int) (DateTime, int, bool) { if tp.yday > 0 { return DateTime{}, 0, false } else { - if tp.month < 0 { + if tp.month < 1 { tp.month = int(time.January) } if tp.day < 0 { @@ -86,3 +86,7 @@ func (tp *timeparts) toDateTime(prec int) (DateTime, int, bool) { func (tp *timeparts) isZero() bool { return tp.year == 0 && tp.month == 0 && tp.day == 0 && tp.hour == 0 && tp.min == 0 && tp.sec == 0 && tp.nsec == 0 } + +func (tp *timeparts) toSeconds() int { + return tp.day*secondsPerDay + tp.hour*3600 + tp.min*60 + tp.sec +} diff --git a/go/mysql/decimal/decimal.go b/go/mysql/decimal/decimal.go index 7293360ee52..a2b505a1232 100644 --- a/go/mysql/decimal/decimal.go +++ b/go/mysql/decimal/decimal.go @@ -677,6 +677,10 @@ func (d *Decimal) ensureInitialized() { } } +func (d Decimal) IsInitialized() bool { + return d.value != nil +} + // RescalePair rescales two decimals to common exponential value (minimal exp of both decimals) func RescalePair(d1 Decimal, d2 Decimal) (Decimal, Decimal) { d1.ensureInitialized() @@ -693,13 +697,6 @@ func RescalePair(d1 Decimal, d2 Decimal) (Decimal, Decimal) { return d1, d2.rescale(baseScale) } -func min(x, y int32) int32 { - if x >= y { - return y - } - return x -} - // largestForm returns the largest decimal that can be represented // with the given amount of integral and fractional digits // Example: diff --git a/go/mysql/decimal/weights.go b/go/mysql/decimal/weights.go new file mode 100644 index 00000000000..9b8f43a0c65 --- /dev/null +++ b/go/mysql/decimal/weights.go @@ -0,0 +1,56 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package decimal + +// Our weight string format is normalizing the weight string to a fixed length, +// so it becomes byte-ordered. The byte lengths are pre-computed based on +// https://dev.mysql.com/doc/refman/8.0/en/fixed-point-types.html +// and generated empirically with a manual loop: +// +// for i := 1; i <= 65; i++ { +// dec, err := NewFromMySQL(bytes.Repeat([]byte("9"), i)) +// if err != nil { +// t.Fatal(err) +// } +// +// byteLengths = append(byteLengths, len(dec.value.Bytes())) +// } +var weightStringLengths = []int{ + 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 7, 7, 8, 8, 8, + 9, 9, 10, 10, 10, 11, 11, 12, 12, 13, 13, 13, 14, 14, 15, 15, 15, + 16, 16, 17, 17, 18, 18, 18, 19, 19, 20, 20, 20, 21, 21, 22, 22, + 23, 23, 23, 24, 24, 25, 25, 25, 26, 26, 27, 27, 27, +} + +func (d Decimal) WeightString(dst []byte, length, precision int32) []byte { + dec := d.rescale(-precision) + dec = dec.Clamp(length-precision, precision) + + buf := make([]byte, weightStringLengths[length]+1) + dec.value.FillBytes(buf[:]) + + if dec.value.Sign() < 0 { + for i := range buf { + buf[i] ^= 0xff + } + } + // Use the same trick as used for signed numbers on the first byte. + buf[0] ^= 0x80 + + dst = append(dst, buf[:]...) + return dst +} diff --git a/go/mysql/endtoend/client_test.go b/go/mysql/endtoend/client_test.go index a48c9629d51..6591c454e8a 100644 --- a/go/mysql/endtoend/client_test.go +++ b/go/mysql/endtoend/client_test.go @@ -25,6 +25,8 @@ import ( "github.com/stretchr/testify/assert" + "vitess.io/vitess/go/mysql/sqlerror" + "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" @@ -73,9 +75,9 @@ func TestKill(t *testing.T) { // will differ. err = <-errChan if strings.Contains(err.Error(), "EOF") { - assertSQLError(t, err, mysql.CRServerLost, mysql.SSUnknownSQLState, "EOF", "select sleep(10) from dual") + assertSQLError(t, err, sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "EOF", "select sleep(10) from dual") } else { - assertSQLError(t, err, mysql.CRServerLost, mysql.SSUnknownSQLState, "", "connection reset by peer") + assertSQLError(t, err, sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "", "connection reset by peer") } } @@ -104,7 +106,7 @@ func TestKill2006(t *testing.T) { // unix socket, we will get a broken pipe when the server // closes the connection and we are trying to write the command. _, err = conn.ExecuteFetch("select sleep(10) from dual", 1000, false) - assertSQLError(t, err, mysql.CRServerGone, mysql.SSUnknownSQLState, "broken pipe", "select sleep(10) from dual") + assertSQLError(t, err, sqlerror.CRServerGone, sqlerror.SSUnknownSQLState, "broken pipe", "select sleep(10) from dual") } // TestDupEntry tests a duplicate key is properly raised. @@ -123,7 +125,7 @@ func TestDupEntry(t *testing.T) { t.Fatalf("first insert failed: %v", err) } _, err = conn.ExecuteFetch("insert into dup_entry(id, name) values(2, 10)", 0, false) - assertSQLError(t, err, mysql.ERDupEntry, mysql.SSConstraintViolation, "Duplicate entry", "insert into dup_entry(id, name) values(2, 10)") + assertSQLError(t, err, sqlerror.ERDupEntry, sqlerror.SSConstraintViolation, "Duplicate entry", "insert into dup_entry(id, name) values(2, 10)") } // TestClientFoundRows tests if the CLIENT_FOUND_ROWS flag works. diff --git a/go/mysql/endtoend/main_test.go b/go/mysql/endtoend/main_test.go index ef7cb671c33..466735c02e4 100644 --- a/go/mysql/endtoend/main_test.go +++ b/go/mysql/endtoend/main_test.go @@ -27,6 +27,8 @@ import ( "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/mysql" vtenv "vitess.io/vitess/go/vt/env" "vitess.io/vitess/go/vt/mysqlctl" @@ -41,11 +43,11 @@ var ( ) // assertSQLError makes sure we get the right error. -func assertSQLError(t *testing.T, err error, code mysql.ErrorCode, sqlState string, subtext string, query string) { +func assertSQLError(t *testing.T, err error, code sqlerror.ErrorCode, sqlState string, subtext string, query string) { t.Helper() require.Error(t, err, "was expecting SQLError %v / %v / %v but got no error.", code, sqlState, subtext) - serr, ok := err.(*mysql.SQLError) + serr, ok := err.(*sqlerror.SQLError) require.True(t, ok, "was expecting SQLError %v / %v / %v but got: %v", code, sqlState, subtext, err) require.Equal(t, code, serr.Num, "was expecting SQLError %v / %v / %v but got code %v", code, sqlState, subtext, serr.Num) require.Equal(t, sqlState, serr.State, "was expecting SQLError %v / %v / %v but got state %v", code, sqlState, subtext, serr.State) diff --git a/go/mysql/endtoend/query_test.go b/go/mysql/endtoend/query_test.go index 7565c2913e9..576960f2acb 100644 --- a/go/mysql/endtoend/query_test.go +++ b/go/mysql/endtoend/query_test.go @@ -26,6 +26,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations/colldata" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" @@ -39,7 +41,7 @@ const ( func columnSize(cs collations.ID, size uint32) uint32 { // utf8_general_ci results in smaller max column sizes because MySQL 5.7 is silly - if cs.Get().Charset().Name() == "utf8mb3" { + if colldata.Lookup(cs).Charset().Name() == "utf8mb3" { return size * 3 / 4 } return size @@ -321,6 +323,5 @@ func TestSysInfo(t *testing.T) { func getDefaultCollationID() collations.ID { collationHandler := collations.Local() - collation := collationHandler.DefaultCollationForCharset(charsetName) - return collation.ID() + return collationHandler.DefaultCollationForCharset(charsetName) } diff --git a/go/mysql/endtoend/replication_test.go b/go/mysql/endtoend/replication_test.go index 15b966e1feb..0c1fa006347 100644 --- a/go/mysql/endtoend/replication_test.go +++ b/go/mysql/endtoend/replication_test.go @@ -29,11 +29,12 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/binlog" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" - "vitess.io/vitess/go/vt/vtgate/evalengine" ) // connectForReplication is a helper method to connect for replication @@ -70,7 +71,7 @@ func connectForReplication(t *testing.T, rbr bool) (*mysql.Conn, mysql.BinlogFor t.Fatalf("SHOW MASTER STATUS returned unexpected result: %v", result) } file := result.Rows[0][0].ToString() - position, err := evalengine.ToUint64(result.Rows[0][1]) + position, err := result.Rows[0][1].ToCastUint64() require.NoError(t, err, "SHOW MASTER STATUS returned invalid position: %v", result.Rows[0][1]) // Tell the server that we understand the format of events @@ -126,9 +127,9 @@ func TestReplicationConnectionClosing(t *testing.T) { for { data, err := conn.ReadPacket() if err != nil { - serr, ok := err.(*mysql.SQLError) - assert.True(t, ok, "Got a non mysql.SQLError error: %v", err) - assert.Equal(t, mysql.CRServerLost, serr.Num, "Got an unexpected mysql.SQLError error: %v", serr) + serr, ok := err.(*sqlerror.SQLError) + assert.True(t, ok, "Got a non sqlerror.SQLError error: %v", err) + assert.Equal(t, sqlerror.CRServerLost, serr.Num, "Got an unexpected sqlerror.SQLError error: %v", serr) // we got the right error, all good. return diff --git a/go/mysql/endtoend/schema_change_test.go b/go/mysql/endtoend/schema_change_test.go index 5fc90e37935..a9e72aaef5b 100644 --- a/go/mysql/endtoend/schema_change_test.go +++ b/go/mysql/endtoend/schema_change_test.go @@ -22,7 +22,7 @@ import ( "strings" "testing" - "vitess.io/vitess/go/vt/sidecardb" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/vt/sqlparser" "github.com/stretchr/testify/require" @@ -42,9 +42,9 @@ func TestChangeSchemaIsNoticed(t *testing.T) { require.NoError(t, err) defer conn.Close() - clearQuery := sqlparser.BuildParsedQuery(mysql.ClearSchemaCopy, sidecardb.GetIdentifier()).Query - insertQuery := sqlparser.BuildParsedQuery(mysql.InsertIntoSchemaCopy, sidecardb.GetIdentifier()).Query - detectQuery := sqlparser.BuildParsedQuery(mysql.DetectSchemaChange, sidecardb.GetIdentifier()).Query + clearQuery := sqlparser.BuildParsedQuery(mysql.ClearSchemaCopy, sidecar.GetIdentifier()).Query + insertQuery := sqlparser.BuildParsedQuery(mysql.InsertIntoSchemaCopy, sidecar.GetIdentifier()).Query + detectQuery := sqlparser.BuildParsedQuery(mysql.DetectSchemaChange, sidecar.GetIdentifier()).Query tests := []struct { name string diff --git a/go/mysql/fakesqldb/server.go b/go/mysql/fakesqldb/server.go index b305842cfe9..7d3a79eeb85 100644 --- a/go/mysql/fakesqldb/server.go +++ b/go/mysql/fakesqldb/server.go @@ -29,6 +29,7 @@ import ( "testing" "time" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/log" @@ -188,7 +189,7 @@ func New(t testing.TB) *DB { authServer := mysql.NewAuthServerNone() // Start listening. - db.listener, err = mysql.NewListener("unix", socketFile, authServer, db, 0, 0, false, false) + db.listener, err = mysql.NewListener("unix", socketFile, authServer, db, 0, 0, false, false, 0) if err != nil { t.Fatalf("NewListener failed: %v", err) } @@ -382,7 +383,7 @@ func (db *DB) HandleQuery(c *mysql.Conn, query string, callback func(*sqltypes.R if db.shouldClose.Load() { c.Close() - //log error + // log error if err := callback(&sqltypes.Result{}); err != nil { log.Errorf("callback failed : %v", err) } @@ -393,7 +394,7 @@ func (db *DB) HandleQuery(c *mysql.Conn, query string, callback func(*sqltypes.R // The driver may send this at connection time, and we don't want it to // interfere. if key == "set names utf8" || strings.HasPrefix(key, "set collation_connection = ") { - //log error + // log error if err := callback(&sqltypes.Result{}); err != nil { log.Errorf("callback failed : %v", err) } @@ -527,7 +528,7 @@ func (db *DB) ComBinlogDump(c *mysql.Conn, logFile string, binlogPos uint32) err } // ComBinlogDumpGTID is part of the mysql.Handler interface. -func (db *DB) ComBinlogDumpGTID(c *mysql.Conn, logFile string, logPos uint64, gtidSet mysql.GTIDSet) error { +func (db *DB) ComBinlogDumpGTID(c *mysql.Conn, logFile string, logPos uint64, gtidSet replication.GTIDSet) error { return nil } diff --git a/go/mysql/flavor.go b/go/mysql/flavor.go index a8ab1dbbcb7..edb64913c31 100644 --- a/go/mysql/flavor.go +++ b/go/mysql/flavor.go @@ -23,6 +23,8 @@ import ( "strconv" "strings" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" @@ -31,7 +33,7 @@ import ( var ( // ErrNotReplica means there is no replication status. // Returned by ShowReplicationStatus(). - ErrNotReplica = NewSQLError(ERNotReplica, SSUnknownSQLState, "no replication status") + ErrNotReplica = sqlerror.NewSQLError(sqlerror.ERNotReplica, sqlerror.SSUnknownSQLState, "no replication status") // ErrNoPrimaryStatus means no status was returned by ShowPrimaryStatus(). ErrNoPrimaryStatus = errors.New("no master status") @@ -75,10 +77,10 @@ const ( // 2. MariaDB 10.X type flavor interface { // primaryGTIDSet returns the current GTIDSet of a server. - primaryGTIDSet(c *Conn) (GTIDSet, error) + primaryGTIDSet(c *Conn) (replication.GTIDSet, error) // purgedGTIDSet returns the purged GTIDSet of a server. - purgedGTIDSet(c *Conn) (GTIDSet, error) + purgedGTIDSet(c *Conn) (replication.GTIDSet, error) // gtidMode returns the gtid mode of a server. gtidMode(c *Conn) (string, error) @@ -94,11 +96,11 @@ type flavor interface { // startReplicationUntilAfter will start replication, but only allow it // to run until `pos` is reached. After reaching pos, replication will be stopped again - startReplicationUntilAfter(pos Position) string + startReplicationUntilAfter(pos replication.Position) string // startSQLThreadUntilAfter will start replication's sql thread(s), but only allow it // to run until `pos` is reached. After reaching pos, it will be stopped again - startSQLThreadUntilAfter(pos Position) string + startSQLThreadUntilAfter(pos replication.Position) string // stopReplicationCommand returns the command to stop the replication. stopReplicationCommand() string @@ -114,7 +116,7 @@ type flavor interface { // sendBinlogDumpCommand sends the packet required to start // dumping binlogs from the specified location. - sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, startPos Position) error + sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, startPos replication.Position) error // readBinlogEvent reads the next BinlogEvent from the connection. readBinlogEvent(c *Conn) (BinlogEvent, error) @@ -129,7 +131,7 @@ type flavor interface { // setReplicationPositionCommands returns the commands to set the // replication position at which the replica will resume. - setReplicationPositionCommands(pos Position) []string + setReplicationPositionCommands(pos replication.Position) []string // changeReplicationSourceArg returns the specific parameter to add to // a "change primary" command. @@ -137,17 +139,17 @@ type flavor interface { // status returns the result of the appropriate status command, // with parsed replication position. - status(c *Conn) (ReplicationStatus, error) + status(c *Conn) (replication.ReplicationStatus, error) // primaryStatus returns the result of 'SHOW MASTER STATUS', // with parsed executed position. - primaryStatus(c *Conn) (PrimaryStatus, error) + primaryStatus(c *Conn) (replication.PrimaryStatus, error) // waitUntilPositionCommand returns the SQL command to issue // to wait until the given position, until the context // expires. The command returns -1 if it times out. It // returns NULL if GTIDs are not enabled. - waitUntilPositionCommand(ctx context.Context, pos Position) (string, error) + waitUntilPositionCommand(ctx context.Context, pos replication.Position) (string, error) baseShowTables() string baseShowTablesWithSizes() string @@ -265,23 +267,23 @@ func (c *Conn) IsMariaDB() bool { } // PrimaryPosition returns the current primary's replication position. -func (c *Conn) PrimaryPosition() (Position, error) { +func (c *Conn) PrimaryPosition() (replication.Position, error) { gtidSet, err := c.flavor.primaryGTIDSet(c) if err != nil { - return Position{}, err + return replication.Position{}, err } - return Position{ + return replication.Position{ GTIDSet: gtidSet, }, nil } // GetGTIDPurged returns the tablet's GTIDs which are purged. -func (c *Conn) GetGTIDPurged() (Position, error) { +func (c *Conn) GetGTIDPurged() (replication.Position, error) { gtidSet, err := c.flavor.purgedGTIDSet(c) if err != nil { - return Position{}, err + return replication.Position{}, err } - return Position{ + return replication.Position{ GTIDSet: gtidSet, }, nil } @@ -297,13 +299,13 @@ func (c *Conn) GetServerUUID() (string, error) { } // PrimaryFilePosition returns the current primary's file based replication position. -func (c *Conn) PrimaryFilePosition() (Position, error) { +func (c *Conn) PrimaryFilePosition() (replication.Position, error) { filePosFlavor := filePosFlavor{} gtidSet, err := filePosFlavor.primaryGTIDSet(c) if err != nil { - return Position{}, err + return replication.Position{}, err } - return Position{ + return replication.Position{ GTIDSet: gtidSet, }, nil } @@ -319,14 +321,14 @@ func (c *Conn) RestartReplicationCommands() []string { } // StartReplicationUntilAfterCommand returns the command to start replication. -func (c *Conn) StartReplicationUntilAfterCommand(pos Position) string { +func (c *Conn) StartReplicationUntilAfterCommand(pos replication.Position) string { return c.flavor.startReplicationUntilAfter(pos) } // StartSQLThreadUntilAfterCommand returns the command to start the replica's SQL // thread(s) and have it run until it has reached the given position, at which point // it will stop. -func (c *Conn) StartSQLThreadUntilAfterCommand(pos Position) string { +func (c *Conn) StartSQLThreadUntilAfterCommand(pos replication.Position) string { return c.flavor.startSQLThreadUntilAfter(pos) } @@ -353,7 +355,7 @@ func (c *Conn) StartSQLThreadCommand() string { // SendBinlogDumpCommand sends the flavor-specific version of // the COM_BINLOG_DUMP command to start dumping raw binlog // events over a server connection, starting at a given GTID. -func (c *Conn) SendBinlogDumpCommand(serverID uint32, binlogFilename string, startPos Position) error { +func (c *Conn) SendBinlogDumpCommand(serverID uint32, binlogFilename string, startPos replication.Position) error { return c.flavor.sendBinlogDumpCommand(c, serverID, binlogFilename, startPos) } @@ -378,7 +380,7 @@ func (c *Conn) ResetReplicationParametersCommands() []string { // SetReplicationPositionCommands returns the commands to set the // replication position at which the replica will resume // when it is later reparented with SetReplicationSourceCommand. -func (c *Conn) SetReplicationPositionCommands(pos Position) []string { +func (c *Conn) SetReplicationPositionCommands(pos replication.Position) []string { return c.flavor.setReplicationPositionCommands(pos) } @@ -433,107 +435,15 @@ func resultToMap(qr *sqltypes.Result) (map[string]string, error) { return result, nil } -// parseReplicationStatus parses the common (non-flavor-specific) fields of ReplicationStatus -func parseReplicationStatus(fields map[string]string) ReplicationStatus { - // The field names in the map are identical to what we receive from the database - // Hence the names still contain Master - status := ReplicationStatus{ - SourceHost: fields["Master_Host"], - SourceUser: fields["Master_User"], - SSLAllowed: fields["Master_SSL_Allowed"] == "Yes", - AutoPosition: fields["Auto_Position"] == "1", - UsingGTID: fields["Using_Gtid"] != "No" && fields["Using_Gtid"] != "", - HasReplicationFilters: (fields["Replicate_Do_DB"] != "") || (fields["Replicate_Ignore_DB"] != "") || (fields["Replicate_Do_Table"] != "") || (fields["Replicate_Ignore_Table"] != "") || (fields["Replicate_Wild_Do_Table"] != "") || (fields["Replicate_Wild_Ignore_Table"] != ""), - // These fields are returned from the underlying DB and cannot be renamed - IOState: ReplicationStatusToState(fields["Slave_IO_Running"]), - LastIOError: fields["Last_IO_Error"], - SQLState: ReplicationStatusToState(fields["Slave_SQL_Running"]), - LastSQLError: fields["Last_SQL_Error"], - } - parseInt, _ := strconv.ParseInt(fields["Master_Port"], 10, 32) - status.SourcePort = int32(parseInt) - parseInt, _ = strconv.ParseInt(fields["Connect_Retry"], 10, 32) - status.ConnectRetry = int32(parseInt) - parseUint, err := strconv.ParseUint(fields["Seconds_Behind_Master"], 10, 32) - if err != nil { - // we could not parse the value into a valid uint32 -- most commonly because the value is NULL from the - // database -- so let's reflect that the underlying value was unknown on our last check - status.ReplicationLagUnknown = true - } else { - status.ReplicationLagUnknown = false - status.ReplicationLagSeconds = uint32(parseUint) - } - parseUint, _ = strconv.ParseUint(fields["Master_Server_Id"], 10, 32) - status.SourceServerID = uint32(parseUint) - parseUint, _ = strconv.ParseUint(fields["SQL_Delay"], 10, 32) - status.SQLDelay = uint32(parseUint) - - executedPosStr := fields["Exec_Master_Log_Pos"] - file := fields["Relay_Master_Log_File"] - if file != "" && executedPosStr != "" { - filePos, err := strconv.ParseUint(executedPosStr, 10, 32) - if err == nil { - status.FilePosition.GTIDSet = filePosGTID{ - file: file, - pos: uint32(filePos), - } - } - } - - readPosStr := fields["Read_Master_Log_Pos"] - file = fields["Master_Log_File"] - if file != "" && readPosStr != "" { - fileRelayPos, err := strconv.ParseUint(readPosStr, 10, 32) - if err == nil { - status.RelayLogSourceBinlogEquivalentPosition.GTIDSet = filePosGTID{ - file: file, - pos: uint32(fileRelayPos), - } - } - } - - relayPosStr := fields["Relay_Log_Pos"] - file = fields["Relay_Log_File"] - if file != "" && relayPosStr != "" { - relayFilePos, err := strconv.ParseUint(relayPosStr, 10, 32) - if err == nil { - status.RelayLogFilePosition.GTIDSet = filePosGTID{ - file: file, - pos: uint32(relayFilePos), - } - } - } - return status -} - // ShowReplicationStatus executes the right command to fetch replication status, // and returns a parsed Position with other fields. -func (c *Conn) ShowReplicationStatus() (ReplicationStatus, error) { +func (c *Conn) ShowReplicationStatus() (replication.ReplicationStatus, error) { return c.flavor.status(c) } -// parsePrimaryStatus parses the common fields of SHOW MASTER STATUS. -func parsePrimaryStatus(fields map[string]string) PrimaryStatus { - status := PrimaryStatus{} - - fileExecPosStr := fields["Position"] - file := fields["File"] - if file != "" && fileExecPosStr != "" { - filePos, err := strconv.ParseUint(fileExecPosStr, 10, 32) - if err == nil { - status.FilePosition.GTIDSet = filePosGTID{ - file: file, - pos: uint32(filePos), - } - } - } - - return status -} - // ShowPrimaryStatus executes the right SHOW MASTER STATUS command, // and returns a parsed executed Position, as well as file based Position. -func (c *Conn) ShowPrimaryStatus() (PrimaryStatus, error) { +func (c *Conn) ShowPrimaryStatus() (replication.PrimaryStatus, error) { return c.flavor.primaryStatus(c) } @@ -541,7 +451,7 @@ func (c *Conn) ShowPrimaryStatus() (PrimaryStatus, error) { // to wait until the given position, until the context // expires. The command returns -1 if it times out. It // returns NULL if GTIDs are not enabled. -func (c *Conn) WaitUntilPositionCommand(ctx context.Context, pos Position) (string, error) { +func (c *Conn) WaitUntilPositionCommand(ctx context.Context, pos replication.Position) (string, error) { return c.flavor.waitUntilPositionCommand(ctx, pos) } @@ -549,7 +459,7 @@ func (c *Conn) WaitUntilPositionCommand(ctx context.Context, pos Position) (stri // to wait until the given position, until the context // expires for the file position flavor. The command returns -1 if it times out. It // returns NULL if GTIDs are not enabled. -func (c *Conn) WaitUntilFilePositionCommand(ctx context.Context, pos Position) (string, error) { +func (c *Conn) WaitUntilFilePositionCommand(ctx context.Context, pos replication.Position) (string, error) { filePosFlavor := filePosFlavor{} return filePosFlavor.waitUntilPositionCommand(ctx, pos) } @@ -568,3 +478,7 @@ func (c *Conn) BaseShowTablesWithSizes() string { func (c *Conn) SupportsCapability(capability FlavorCapability) (bool, error) { return c.flavor.supportsCapability(c.ServerVersion, capability) } + +func init() { + flavors[replication.FilePosFlavorID] = newFilePosFlavor +} diff --git a/go/mysql/flavor_filepos.go b/go/mysql/flavor_filepos.go index 9c2bdeb7407..bf4076b85b1 100644 --- a/go/mysql/flavor_filepos.go +++ b/go/mysql/flavor_filepos.go @@ -20,10 +20,11 @@ import ( "context" "fmt" "io" - "strconv" "strings" "time" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" ) @@ -40,7 +41,7 @@ func newFilePosFlavor() flavor { } // primaryGTIDSet is part of the Flavor interface. -func (flv *filePosFlavor) primaryGTIDSet(c *Conn) (GTIDSet, error) { +func (flv *filePosFlavor) primaryGTIDSet(c *Conn) (replication.GTIDSet, error) { qr, err := c.ExecuteFetch("SHOW MASTER STATUS", 100, true /* wantfields */) if err != nil { return nil, err @@ -53,19 +54,11 @@ func (flv *filePosFlavor) primaryGTIDSet(c *Conn) (GTIDSet, error) { if err != nil { return nil, err } - pos, err := strconv.ParseUint(resultMap["Position"], 0, 32) - if err != nil { - return nil, fmt.Errorf("invalid FilePos GTID (%v): expecting pos to be an integer", resultMap["Position"]) - } - - return filePosGTID{ - file: resultMap["File"], - pos: uint32(pos), - }, nil + return replication.ParseFilePosGTIDSet(fmt.Sprintf("%s:%s", resultMap["File"], resultMap["Position"])) } // purgedGTIDSet is part of the Flavor interface. -func (flv *filePosFlavor) purgedGTIDSet(c *Conn) (GTIDSet, error) { +func (flv *filePosFlavor) purgedGTIDSet(c *Conn) (replication.GTIDSet, error) { return nil, nil } @@ -119,14 +112,14 @@ func (flv *filePosFlavor) startSQLThreadCommand() string { } // sendBinlogDumpCommand is part of the Flavor interface. -func (flv *filePosFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, startPos Position) error { - rpos, ok := startPos.GTIDSet.(filePosGTID) +func (flv *filePosFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, startPos replication.Position) error { + rpos, ok := startPos.GTIDSet.(replication.FilePosGTID) if !ok { return fmt.Errorf("startPos.GTIDSet is wrong type - expected filePosGTID, got: %#v", startPos.GTIDSet) } - flv.file = rpos.file - return c.WriteComBinlogDump(serverID, rpos.file, rpos.pos, 0) + flv.file = rpos.File + return c.WriteComBinlogDump(serverID, rpos.File, rpos.Pos, 0) } // readBinlogEvent is part of the Flavor interface. @@ -143,7 +136,7 @@ func (flv *filePosFlavor) readBinlogEvent(c *Conn) (BinlogEvent, error) { } switch result[0] { case EOFPacket: - return nil, NewSQLError(CRServerLost, SSUnknownSQLState, "%v", io.EOF) + return nil, sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", io.EOF) case ErrPacket: return nil, ParseErrorPacket(result) } @@ -223,7 +216,7 @@ func (flv *filePosFlavor) resetReplicationParametersCommands(c *Conn) []string { } // setReplicationPositionCommands is part of the Flavor interface. -func (flv *filePosFlavor) setReplicationPositionCommands(pos Position) []string { +func (flv *filePosFlavor) setReplicationPositionCommands(pos replication.Position) []string { return []string{ "unsupported", } @@ -235,64 +228,47 @@ func (flv *filePosFlavor) changeReplicationSourceArg() string { } // status is part of the Flavor interface. -func (flv *filePosFlavor) status(c *Conn) (ReplicationStatus, error) { +func (flv *filePosFlavor) status(c *Conn) (replication.ReplicationStatus, error) { qr, err := c.ExecuteFetch("SHOW SLAVE STATUS", 100, true /* wantfields */) if err != nil { - return ReplicationStatus{}, err + return replication.ReplicationStatus{}, err } if len(qr.Rows) == 0 { // The query returned no data, meaning the server // is not configured as a replica. - return ReplicationStatus{}, ErrNotReplica + return replication.ReplicationStatus{}, ErrNotReplica } resultMap, err := resultToMap(qr) if err != nil { - return ReplicationStatus{}, err + return replication.ReplicationStatus{}, err } - return parseFilePosReplicationStatus(resultMap) -} - -func parseFilePosReplicationStatus(resultMap map[string]string) (ReplicationStatus, error) { - status := parseReplicationStatus(resultMap) - - status.Position = status.FilePosition - status.RelayLogPosition = status.RelayLogSourceBinlogEquivalentPosition - - return status, nil + return replication.ParseFilePosReplicationStatus(resultMap) } // primaryStatus is part of the Flavor interface. -func (flv *filePosFlavor) primaryStatus(c *Conn) (PrimaryStatus, error) { +func (flv *filePosFlavor) primaryStatus(c *Conn) (replication.PrimaryStatus, error) { qr, err := c.ExecuteFetch("SHOW MASTER STATUS", 100, true /* wantfields */) if err != nil { - return PrimaryStatus{}, err + return replication.PrimaryStatus{}, err } if len(qr.Rows) == 0 { // The query returned no data. We don't know how this could happen. - return PrimaryStatus{}, ErrNoPrimaryStatus + return replication.PrimaryStatus{}, ErrNoPrimaryStatus } resultMap, err := resultToMap(qr) if err != nil { - return PrimaryStatus{}, err + return replication.PrimaryStatus{}, err } - return parseFilePosPrimaryStatus(resultMap) -} - -func parseFilePosPrimaryStatus(resultMap map[string]string) (PrimaryStatus, error) { - status := parsePrimaryStatus(resultMap) - - status.Position = status.FilePosition - - return status, nil + return replication.ParseFilePosPrimaryStatus(resultMap) } // waitUntilPositionCommand is part of the Flavor interface. -func (flv *filePosFlavor) waitUntilPositionCommand(ctx context.Context, pos Position) (string, error) { - filePosPos, ok := pos.GTIDSet.(filePosGTID) +func (flv *filePosFlavor) waitUntilPositionCommand(ctx context.Context, pos replication.Position) (string, error) { + filePosPos, ok := pos.GTIDSet.(replication.FilePosGTID) if !ok { return "", fmt.Errorf("Position is not filePos compatible: %#v", pos.GTIDSet) } @@ -302,17 +278,17 @@ func (flv *filePosFlavor) waitUntilPositionCommand(ctx context.Context, pos Posi if timeout <= 0 { return "", fmt.Errorf("timed out waiting for position %v", pos) } - return fmt.Sprintf("SELECT MASTER_POS_WAIT('%s', %d, %.6f)", filePosPos.file, filePosPos.pos, timeout.Seconds()), nil + return fmt.Sprintf("SELECT MASTER_POS_WAIT('%s', %d, %.6f)", filePosPos.File, filePosPos.Pos, timeout.Seconds()), nil } - return fmt.Sprintf("SELECT MASTER_POS_WAIT('%s', %d)", filePosPos.file, filePosPos.pos), nil + return fmt.Sprintf("SELECT MASTER_POS_WAIT('%s', %d)", filePosPos.File, filePosPos.Pos), nil } -func (*filePosFlavor) startReplicationUntilAfter(pos Position) string { +func (*filePosFlavor) startReplicationUntilAfter(pos replication.Position) string { return "unsupported" } -func (*filePosFlavor) startSQLThreadUntilAfter(pos Position) string { +func (*filePosFlavor) startSQLThreadUntilAfter(pos replication.Position) string { return "unsupported" } diff --git a/go/mysql/flavor_filepos_test.go b/go/mysql/flavor_filepos_test.go deleted file mode 100644 index be60f6a95a6..00000000000 --- a/go/mysql/flavor_filepos_test.go +++ /dev/null @@ -1,80 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mysql - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestFilePosRetrieveSourceServerId(t *testing.T) { - resultMap := map[string]string{ - "Master_Server_Id": "1", - } - - want := ReplicationStatus{SourceServerID: 1} - got, err := parseFilePosReplicationStatus(resultMap) - require.NoError(t, err) - assert.Equalf(t, got.SourceServerID, want.SourceServerID, "got SourceServerID: %v; want SourceServerID: %v", got.SourceServerID, want.SourceServerID) -} - -func TestFilePosRetrieveExecutedPosition(t *testing.T) { - resultMap := map[string]string{ - "Exec_Master_Log_Pos": "1307", - "Relay_Master_Log_File": "master-bin.000002", - "Read_Master_Log_Pos": "1308", - "Master_Log_File": "master-bin.000003", - "Relay_Log_Pos": "1309", - "Relay_Log_File": "relay-bin.000004", - } - - want := ReplicationStatus{ - Position: Position{GTIDSet: filePosGTID{file: "master-bin.000002", pos: 1307}}, - RelayLogPosition: Position{GTIDSet: filePosGTID{file: "master-bin.000003", pos: 1308}}, - FilePosition: Position{GTIDSet: filePosGTID{file: "master-bin.000002", pos: 1307}}, - RelayLogSourceBinlogEquivalentPosition: Position{GTIDSet: filePosGTID{file: "master-bin.000003", pos: 1308}}, - RelayLogFilePosition: Position{GTIDSet: filePosGTID{file: "relay-bin.000004", pos: 1309}}, - } - got, err := parseFilePosReplicationStatus(resultMap) - require.NoError(t, err) - assert.Equalf(t, got.Position.GTIDSet, want.Position.GTIDSet, "got Position: %v; want Position: %v", got.Position.GTIDSet, want.Position.GTIDSet) - assert.Equalf(t, got.RelayLogPosition.GTIDSet, want.RelayLogPosition.GTIDSet, "got RelayLogPosition: %v; want RelayLogPosition: %v", got.RelayLogPosition.GTIDSet, want.RelayLogPosition.GTIDSet) - assert.Equalf(t, got.RelayLogFilePosition.GTIDSet, want.RelayLogFilePosition.GTIDSet, "got RelayLogFilePosition: %v; want RelayLogFilePosition: %v", got.RelayLogFilePosition.GTIDSet, want.RelayLogFilePosition.GTIDSet) - assert.Equalf(t, got.FilePosition.GTIDSet, want.FilePosition.GTIDSet, "got FilePosition: %v; want FilePosition: %v", got.FilePosition.GTIDSet, want.FilePosition.GTIDSet) - assert.Equalf(t, got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, want.RelayLogSourceBinlogEquivalentPosition.GTIDSet, "got RelayLogSourceBinlogEquivalentPosition: %v; want RelayLogSourceBinlogEquivalentPosition: %v", got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, want.RelayLogSourceBinlogEquivalentPosition.GTIDSet) - assert.Equalf(t, got.Position.GTIDSet, got.FilePosition.GTIDSet, "FilePosition and Position don't match when they should for the FilePos flavor") - assert.Equalf(t, got.RelayLogPosition.GTIDSet, got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, "RelayLogPosition and RelayLogSourceBinlogEquivalentPosition don't match when they should for the FilePos flavor") -} - -func TestFilePosShouldGetPosition(t *testing.T) { - resultMap := map[string]string{ - "Position": "1307", - "File": "source-bin.000003", - } - - want := PrimaryStatus{ - Position: Position{GTIDSet: filePosGTID{file: "source-bin.000003", pos: 1307}}, - FilePosition: Position{GTIDSet: filePosGTID{file: "source-bin.000003", pos: 1307}}, - } - got, err := parseFilePosPrimaryStatus(resultMap) - require.NoError(t, err) - assert.Equalf(t, got.Position.GTIDSet, want.Position.GTIDSet, "got Position: %v; want Position: %v", got.Position.GTIDSet, want.Position.GTIDSet) - assert.Equalf(t, got.FilePosition.GTIDSet, want.FilePosition.GTIDSet, "got FilePosition: %v; want FilePosition: %v", got.FilePosition.GTIDSet, want.FilePosition.GTIDSet) - assert.Equalf(t, got.Position.GTIDSet, got.FilePosition.GTIDSet, "FilePosition and Position don't match when they should for the FilePos flavor") -} diff --git a/go/mysql/flavor_mariadb.go b/go/mysql/flavor_mariadb.go index 377ede1ecc8..15718542b45 100644 --- a/go/mysql/flavor_mariadb.go +++ b/go/mysql/flavor_mariadb.go @@ -18,12 +18,13 @@ limitations under the License. package mysql import ( + "context" "fmt" "io" "time" - "context" - + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" ) @@ -41,7 +42,7 @@ var _ flavor = (*mariadbFlavor101)(nil) var _ flavor = (*mariadbFlavor102)(nil) // primaryGTIDSet is part of the Flavor interface. -func (mariadbFlavor) primaryGTIDSet(c *Conn) (GTIDSet, error) { +func (mariadbFlavor) primaryGTIDSet(c *Conn) (replication.GTIDSet, error) { qr, err := c.ExecuteFetch("SELECT @@GLOBAL.gtid_binlog_pos", 1, false) if err != nil { return nil, err @@ -50,11 +51,11 @@ func (mariadbFlavor) primaryGTIDSet(c *Conn) (GTIDSet, error) { return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "unexpected result format for gtid_binlog_pos: %#v", qr) } - return parseMariadbGTIDSet(qr.Rows[0][0].ToString()) + return replication.ParseMariadbGTIDSet(qr.Rows[0][0].ToString()) } // purgedGTIDSet is part of the Flavor interface. -func (mariadbFlavor) purgedGTIDSet(c *Conn) (GTIDSet, error) { +func (mariadbFlavor) purgedGTIDSet(c *Conn) (replication.GTIDSet, error) { return nil, nil } @@ -68,11 +69,11 @@ func (mariadbFlavor) gtidMode(c *Conn) (string, error) { return "", nil } -func (mariadbFlavor) startReplicationUntilAfter(pos Position) string { +func (mariadbFlavor) startReplicationUntilAfter(pos replication.Position) string { return fmt.Sprintf("START SLAVE UNTIL master_gtid_pos = \"%s\"", pos) } -func (mariadbFlavor) startSQLThreadUntilAfter(pos Position) string { +func (mariadbFlavor) startSQLThreadUntilAfter(pos replication.Position) string { return fmt.Sprintf("START SLAVE SQL_THREAD UNTIL master_gtid_pos = \"%s\"", pos) } @@ -105,7 +106,7 @@ func (mariadbFlavor) startSQLThreadCommand() string { } // sendBinlogDumpCommand is part of the Flavor interface. -func (mariadbFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, startPos Position) error { +func (mariadbFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, startPos replication.Position) error { // Tell the server that we understand GTIDs by setting // mariadb_slave_capability to MARIA_SLAVE_CAPABILITY_GTID = 4 (MariaDB >= 10.0.1). if _, err := c.ExecuteFetch("SET @mariadb_slave_capability=4", 0, false); err != nil { @@ -154,7 +155,7 @@ func (mariadbFlavor) resetReplicationParametersCommands(c *Conn) []string { } // setReplicationPositionCommands is part of the Flavor interface. -func (mariadbFlavor) setReplicationPositionCommands(pos Position) []string { +func (mariadbFlavor) setReplicationPositionCommands(pos replication.Position) []string { return []string{ // RESET MASTER will clear out gtid_binlog_pos, // which then guarantees that gtid_current_pos = gtid_slave_pos, @@ -182,54 +183,42 @@ func (mariadbFlavor) changeReplicationSourceArg() string { } // status is part of the Flavor interface. -func (mariadbFlavor) status(c *Conn) (ReplicationStatus, error) { +func (mariadbFlavor) status(c *Conn) (replication.ReplicationStatus, error) { qr, err := c.ExecuteFetch("SHOW ALL SLAVES STATUS", 100, true /* wantfields */) if err != nil { - return ReplicationStatus{}, err + return replication.ReplicationStatus{}, err } if len(qr.Rows) == 0 { // The query returned no data, meaning the server // is not configured as a replica. - return ReplicationStatus{}, ErrNotReplica + return replication.ReplicationStatus{}, ErrNotReplica } resultMap, err := resultToMap(qr) if err != nil { - return ReplicationStatus{}, err - } - - return parseMariadbReplicationStatus(resultMap) -} - -func parseMariadbReplicationStatus(resultMap map[string]string) (ReplicationStatus, error) { - status := parseReplicationStatus(resultMap) - - var err error - status.Position.GTIDSet, err = parseMariadbGTIDSet(resultMap["Gtid_Slave_Pos"]) - if err != nil { - return ReplicationStatus{}, vterrors.Wrapf(err, "ReplicationStatus can't parse MariaDB GTID (Gtid_Slave_Pos: %#v)", resultMap["Gtid_Slave_Pos"]) + return replication.ReplicationStatus{}, err } - return status, nil + return replication.ParseMariadbReplicationStatus(resultMap) } // primaryStatus is part of the Flavor interface. -func (m mariadbFlavor) primaryStatus(c *Conn) (PrimaryStatus, error) { +func (m mariadbFlavor) primaryStatus(c *Conn) (replication.PrimaryStatus, error) { qr, err := c.ExecuteFetch("SHOW MASTER STATUS", 100, true /* wantfields */) if err != nil { - return PrimaryStatus{}, err + return replication.PrimaryStatus{}, err } if len(qr.Rows) == 0 { // The query returned no data. We don't know how this could happen. - return PrimaryStatus{}, ErrNoPrimaryStatus + return replication.PrimaryStatus{}, ErrNoPrimaryStatus } resultMap, err := resultToMap(qr) if err != nil { - return PrimaryStatus{}, err + return replication.PrimaryStatus{}, err } - status := parsePrimaryStatus(resultMap) + status := replication.ParsePrimaryStatus(resultMap) status.Position.GTIDSet, err = m.primaryGTIDSet(c) return status, err } @@ -238,7 +227,7 @@ func (m mariadbFlavor) primaryStatus(c *Conn) (PrimaryStatus, error) { // // Note: Unlike MASTER_POS_WAIT(), MASTER_GTID_WAIT() will continue waiting even // if the sql thread stops. If that is a problem, we'll have to change this. -func (mariadbFlavor) waitUntilPositionCommand(ctx context.Context, pos Position) (string, error) { +func (mariadbFlavor) waitUntilPositionCommand(ctx context.Context, pos replication.Position) (string, error) { if deadline, ok := ctx.Deadline(); ok { timeout := time.Until(deadline) if timeout <= 0 { @@ -260,7 +249,7 @@ func (mariadbFlavor) readBinlogEvent(c *Conn) (BinlogEvent, error) { } switch result[0] { case EOFPacket: - return nil, NewSQLError(CRServerLost, SSUnknownSQLState, "%v", io.EOF) + return nil, sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", io.EOF) case ErrPacket: return nil, ParseErrorPacket(result) } diff --git a/go/mysql/flavor_mariadb_test.go b/go/mysql/flavor_mariadb_test.go index a2741c27148..250d664e4af 100644 --- a/go/mysql/flavor_mariadb_test.go +++ b/go/mysql/flavor_mariadb_test.go @@ -17,11 +17,9 @@ limitations under the License. package mysql import ( - "fmt" "testing" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestMariadbSetReplicationSourceCommand(t *testing.T) { @@ -77,51 +75,3 @@ func TestMariadbSetReplicationSourceCommandSSL(t *testing.T) { assert.Equal(t, want, got, "mariadbFlavor.SetReplicationSourceCommand(%#v, %#v, %#v, %#v) = %#v, want %#v", params, host, port, connectRetry, got, want) } - -func TestMariadbRetrieveSourceServerId(t *testing.T) { - resultMap := map[string]string{ - "Master_Server_Id": "1", - "Gtid_Slave_Pos": "0-101-2320", - } - - want := ReplicationStatus{SourceServerID: 1} - got, err := parseMariadbReplicationStatus(resultMap) - require.NoError(t, err) - assert.Equal(t, got.SourceServerID, want.SourceServerID, fmt.Sprintf("got SourceServerID: %v; want SourceServerID: %v", got.SourceServerID, want.SourceServerID)) -} - -func TestMariadbRetrieveFileBasedPositions(t *testing.T) { - resultMap := map[string]string{ - "Exec_Master_Log_Pos": "1307", - "Relay_Master_Log_File": "master-bin.000002", - "Read_Master_Log_Pos": "1308", - "Master_Log_File": "master-bin.000003", - "Gtid_Slave_Pos": "0-101-2320", - "Relay_Log_Pos": "1309", - "Relay_Log_File": "relay-bin.000004", - } - - want := ReplicationStatus{ - FilePosition: Position{GTIDSet: filePosGTID{file: "master-bin.000002", pos: 1307}}, - RelayLogSourceBinlogEquivalentPosition: Position{GTIDSet: filePosGTID{file: "master-bin.000003", pos: 1308}}, - RelayLogFilePosition: Position{GTIDSet: filePosGTID{file: "relay-bin.000004", pos: 1309}}, - } - got, err := parseMariadbReplicationStatus(resultMap) - require.NoError(t, err) - assert.Equalf(t, got.RelayLogFilePosition.GTIDSet, want.RelayLogFilePosition.GTIDSet, "got RelayLogFilePosition: %v; want RelayLogFilePosition: %v", got.RelayLogFilePosition.GTIDSet, want.RelayLogFilePosition.GTIDSet) - assert.Equal(t, got.FilePosition.GTIDSet, want.FilePosition.GTIDSet, fmt.Sprintf("got FilePosition: %v; want FilePosition: %v", got.FilePosition.GTIDSet, want.FilePosition.GTIDSet)) - assert.Equal(t, got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, want.RelayLogSourceBinlogEquivalentPosition.GTIDSet, fmt.Sprintf("got RelayLogSourceBinlogEquivalentPosition: %v; want RelayLogSourceBinlogEquivalentPosition: %v", got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, want.RelayLogSourceBinlogEquivalentPosition.GTIDSet)) -} - -func TestMariadbShouldGetNilRelayLogPosition(t *testing.T) { - resultMap := map[string]string{ - "Exec_Master_Log_Pos": "1307", - "Relay_Master_Log_File": "master-bin.000002", - "Read_Master_Log_Pos": "1308", - "Master_Log_File": "master-bin.000003", - "Gtid_Slave_Pos": "0-101-2320", - } - got, err := parseMariadbReplicationStatus(resultMap) - require.NoError(t, err) - assert.Truef(t, got.RelayLogPosition.IsZero(), "Got a filled in RelayLogPosition. For MariaDB we should get back nil, because MariaDB does not return the retrieved GTIDSet. got: %#v", got.RelayLogPosition) -} diff --git a/go/mysql/flavor_mysql.go b/go/mysql/flavor_mysql.go index 388986e96fe..bc5f31006e5 100644 --- a/go/mysql/flavor_mysql.go +++ b/go/mysql/flavor_mysql.go @@ -22,6 +22,8 @@ import ( "io" "time" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" ) @@ -43,7 +45,7 @@ var _ flavor = (*mysqlFlavor57)(nil) var _ flavor = (*mysqlFlavor80)(nil) // primaryGTIDSet is part of the Flavor interface. -func (mysqlFlavor) primaryGTIDSet(c *Conn) (GTIDSet, error) { +func (mysqlFlavor) primaryGTIDSet(c *Conn) (replication.GTIDSet, error) { // keep @@global as lowercase, as some servers like the Ripple binlog server only honors a lowercase `global` value qr, err := c.ExecuteFetch("SELECT @@global.gtid_executed", 1, false) if err != nil { @@ -52,11 +54,11 @@ func (mysqlFlavor) primaryGTIDSet(c *Conn) (GTIDSet, error) { if len(qr.Rows) != 1 || len(qr.Rows[0]) != 1 { return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "unexpected result format for gtid_executed: %#v", qr) } - return ParseMysql56GTIDSet(qr.Rows[0][0].ToString()) + return replication.ParseMysql56GTIDSet(qr.Rows[0][0].ToString()) } // purgedGTIDSet is part of the Flavor interface. -func (mysqlFlavor) purgedGTIDSet(c *Conn) (GTIDSet, error) { +func (mysqlFlavor) purgedGTIDSet(c *Conn) (replication.GTIDSet, error) { // keep @@global as lowercase, as some servers like the Ripple binlog server only honors a lowercase `global` value qr, err := c.ExecuteFetch("SELECT @@global.gtid_purged", 1, false) if err != nil { @@ -65,7 +67,7 @@ func (mysqlFlavor) purgedGTIDSet(c *Conn) (GTIDSet, error) { if len(qr.Rows) != 1 || len(qr.Rows[0]) != 1 { return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "unexpected result format for gtid_purged: %#v", qr) } - return ParseMysql56GTIDSet(qr.Rows[0][0].ToString()) + return replication.ParseMysql56GTIDSet(qr.Rows[0][0].ToString()) } // serverUUID is part of the Flavor interface. @@ -105,11 +107,11 @@ func (mysqlFlavor) restartReplicationCommands() []string { } } -func (mysqlFlavor) startReplicationUntilAfter(pos Position) string { +func (mysqlFlavor) startReplicationUntilAfter(pos replication.Position) string { return fmt.Sprintf("START SLAVE UNTIL SQL_AFTER_GTIDS = '%s'", pos) } -func (mysqlFlavor) startSQLThreadUntilAfter(pos Position) string { +func (mysqlFlavor) startSQLThreadUntilAfter(pos replication.Position) string { return fmt.Sprintf("START SLAVE SQL_THREAD UNTIL SQL_AFTER_GTIDS = '%s'", pos) } @@ -130,8 +132,8 @@ func (mysqlFlavor) startSQLThreadCommand() string { } // sendBinlogDumpCommand is part of the Flavor interface. -func (mysqlFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, startPos Position) error { - gtidSet, ok := startPos.GTIDSet.(Mysql56GTIDSet) +func (mysqlFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, startPos replication.Position) error { + gtidSet, ok := startPos.GTIDSet.(replication.Mysql56GTIDSet) if !ok { return vterrors.Errorf(vtrpc.Code_INTERNAL, "startPos.GTIDSet is wrong type - expected Mysql56GTIDSet, got: %#v", startPos.GTIDSet) } @@ -163,7 +165,7 @@ func (mysqlFlavor) resetReplicationParametersCommands(c *Conn) []string { } // setReplicationPositionCommands is part of the Flavor interface. -func (mysqlFlavor) setReplicationPositionCommands(pos Position) []string { +func (mysqlFlavor) setReplicationPositionCommands(pos replication.Position) []string { return []string{ "RESET MASTER", // We must clear gtid_executed before setting gtid_purged. fmt.Sprintf("SET GLOBAL gtid_purged = '%s'", pos), @@ -176,88 +178,46 @@ func (mysqlFlavor) changeReplicationSourceArg() string { } // status is part of the Flavor interface. -func (mysqlFlavor) status(c *Conn) (ReplicationStatus, error) { +func (mysqlFlavor) status(c *Conn) (replication.ReplicationStatus, error) { qr, err := c.ExecuteFetch("SHOW SLAVE STATUS", 100, true /* wantfields */) if err != nil { - return ReplicationStatus{}, err + return replication.ReplicationStatus{}, err } if len(qr.Rows) == 0 { // The query returned no data, meaning the server // is not configured as a replica. - return ReplicationStatus{}, ErrNotReplica + return replication.ReplicationStatus{}, ErrNotReplica } resultMap, err := resultToMap(qr) if err != nil { - return ReplicationStatus{}, err + return replication.ReplicationStatus{}, err } - return parseMysqlReplicationStatus(resultMap) -} - -func parseMysqlReplicationStatus(resultMap map[string]string) (ReplicationStatus, error) { - status := parseReplicationStatus(resultMap) - uuidString := resultMap["Master_UUID"] - if uuidString != "" { - sid, err := ParseSID(uuidString) - if err != nil { - return ReplicationStatus{}, vterrors.Wrapf(err, "cannot decode SourceUUID") - } - status.SourceUUID = sid - } - - var err error - status.Position.GTIDSet, err = ParseMysql56GTIDSet(resultMap["Executed_Gtid_Set"]) - if err != nil { - return ReplicationStatus{}, vterrors.Wrapf(err, "ReplicationStatus can't parse MySQL 5.6 GTID (Executed_Gtid_Set: %#v)", resultMap["Executed_Gtid_Set"]) - } - relayLogGTIDSet, err := ParseMysql56GTIDSet(resultMap["Retrieved_Gtid_Set"]) - if err != nil { - return ReplicationStatus{}, vterrors.Wrapf(err, "ReplicationStatus can't parse MySQL 5.6 GTID (Retrieved_Gtid_Set: %#v)", resultMap["Retrieved_Gtid_Set"]) - } - // We take the union of the executed and retrieved gtidset, because the retrieved gtidset only represents GTIDs since - // the relay log has been reset. To get the full Position, we need to take a union of executed GTIDSets, since these would - // have been in the relay log's GTIDSet in the past, prior to a reset. - status.RelayLogPosition.GTIDSet = status.Position.GTIDSet.Union(relayLogGTIDSet) - - return status, nil + return replication.ParseMysqlReplicationStatus(resultMap) } // primaryStatus is part of the Flavor interface. -func (mysqlFlavor) primaryStatus(c *Conn) (PrimaryStatus, error) { +func (mysqlFlavor) primaryStatus(c *Conn) (replication.PrimaryStatus, error) { qr, err := c.ExecuteFetch("SHOW MASTER STATUS", 100, true /* wantfields */) if err != nil { - return PrimaryStatus{}, err + return replication.PrimaryStatus{}, err } if len(qr.Rows) == 0 { // The query returned no data. We don't know how this could happen. - return PrimaryStatus{}, ErrNoPrimaryStatus + return replication.PrimaryStatus{}, ErrNoPrimaryStatus } resultMap, err := resultToMap(qr) if err != nil { - return PrimaryStatus{}, err + return replication.PrimaryStatus{}, err } - return parseMysqlPrimaryStatus(resultMap) + return replication.ParseMysqlPrimaryStatus(resultMap) } -func parseMysqlPrimaryStatus(resultMap map[string]string) (PrimaryStatus, error) { - status := parsePrimaryStatus(resultMap) - - var err error - status.Position.GTIDSet, err = ParseMysql56GTIDSet(resultMap["Executed_Gtid_Set"]) - if err != nil { - return PrimaryStatus{}, vterrors.Wrapf(err, "PrimaryStatus can't parse MySQL 5.6 GTID (Executed_Gtid_Set: %#v)", resultMap["Executed_Gtid_Set"]) - } - - return status, nil -} - -// waitUntilPositionCommand is part of the Flavor interface. - // waitUntilPositionCommand is part of the Flavor interface. -func (mysqlFlavor) waitUntilPositionCommand(ctx context.Context, pos Position) (string, error) { +func (mysqlFlavor) waitUntilPositionCommand(ctx context.Context, pos replication.Position) (string, error) { // A timeout of 0 means wait indefinitely. timeoutSeconds := 0 if deadline, ok := ctx.Deadline(); ok { @@ -285,7 +245,7 @@ func (mysqlFlavor) readBinlogEvent(c *Conn) (BinlogEvent, error) { } switch result[0] { case EOFPacket: - return nil, NewSQLError(CRServerLost, SSUnknownSQLState, "%v", io.EOF) + return nil, sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", io.EOF) case ErrPacket: return nil, ParseErrorPacket(result) } @@ -361,7 +321,7 @@ const TablesWithSize80 = `SELECT t.table_name, i.allocated_size FROM information_schema.tables t LEFT JOIN information_schema.innodb_tablespaces i - ON i.name = CONCAT(t.table_schema, '/', t.table_name) COLLATE utf8_general_ci + ON i.name = CONCAT(t.table_schema, '/', t.table_name) COLLATE utf8mb3_general_ci WHERE t.table_schema = database() AND not t.create_options <=> 'partitioned' UNION ALL @@ -374,7 +334,7 @@ UNION ALL SUM(i.allocated_size) FROM information_schema.tables t LEFT JOIN information_schema.innodb_tablespaces i - ON i.name LIKE (CONCAT(t.table_schema, '/', t.table_name, '#p#%') COLLATE utf8_general_ci ) + ON i.name LIKE (CONCAT(t.table_schema, '/', t.table_name, '#p#%') COLLATE utf8mb3_general_ci ) WHERE t.table_schema = database() AND t.create_options <=> 'partitioned' GROUP BY diff --git a/go/mysql/flavor_mysql_test.go b/go/mysql/flavor_mysql_test.go index 75d6a3ebc65..0e1b749633a 100644 --- a/go/mysql/flavor_mysql_test.go +++ b/go/mysql/flavor_mysql_test.go @@ -20,7 +20,6 @@ import ( "testing" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestMysql56SetReplicationSourceCommand(t *testing.T) { @@ -76,74 +75,3 @@ func TestMysql56SetReplicationSourceCommandSSL(t *testing.T) { assert.Equal(t, want, got, "mysqlFlavor.SetReplicationSourceCommand(%#v, %#v, %#v, %#v) = %#v, want %#v", params, host, port, connectRetry, got, want) } - -func TestMysqlRetrieveSourceServerId(t *testing.T) { - resultMap := map[string]string{ - "Master_Server_Id": "1", - } - - want := ReplicationStatus{SourceServerID: 1} - got, err := parseMysqlReplicationStatus(resultMap) - require.NoError(t, err) - assert.Equalf(t, got.SourceServerID, want.SourceServerID, "got SourceServerID: %v; want SourceServerID: %v", got.SourceServerID, want.SourceServerID) -} - -func TestMysqlRetrieveFileBasedPositions(t *testing.T) { - resultMap := map[string]string{ - "Exec_Master_Log_Pos": "1307", - "Relay_Master_Log_File": "master-bin.000002", - "Read_Master_Log_Pos": "1308", - "Master_Log_File": "master-bin.000003", - "Relay_Log_Pos": "1309", - "Relay_Log_File": "relay-bin.000004", - } - - want := ReplicationStatus{ - FilePosition: Position{GTIDSet: filePosGTID{file: "master-bin.000002", pos: 1307}}, - RelayLogSourceBinlogEquivalentPosition: Position{GTIDSet: filePosGTID{file: "master-bin.000003", pos: 1308}}, - RelayLogFilePosition: Position{GTIDSet: filePosGTID{file: "relay-bin.000004", pos: 1309}}, - } - got, err := parseMysqlReplicationStatus(resultMap) - require.NoError(t, err) - assert.Equalf(t, got.FilePosition.GTIDSet, want.FilePosition.GTIDSet, "got FilePosition: %v; want FilePosition: %v", got.FilePosition.GTIDSet, want.FilePosition.GTIDSet) - assert.Equalf(t, got.RelayLogFilePosition.GTIDSet, want.RelayLogFilePosition.GTIDSet, "got RelayLogFilePosition: %v; want RelayLogFilePosition: %v", got.RelayLogFilePosition.GTIDSet, want.RelayLogFilePosition.GTIDSet) - assert.Equalf(t, got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, want.RelayLogSourceBinlogEquivalentPosition.GTIDSet, "got RelayLogSourceBinlogEquivalentPosition: %v; want RelayLogSourceBinlogEquivalentPosition: %v", got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, want.RelayLogSourceBinlogEquivalentPosition.GTIDSet) -} - -func TestMysqlShouldGetRelayLogPosition(t *testing.T) { - resultMap := map[string]string{ - "Executed_Gtid_Set": "3e11fa47-71ca-11e1-9e33-c80aa9429562:1-5", - "Retrieved_Gtid_Set": "3e11fa47-71ca-11e1-9e33-c80aa9429562:6-9", - "Exec_Master_Log_Pos": "1307", - "Relay_Master_Log_File": "master-bin.000002", - "Read_Master_Log_Pos": "1308", - "Master_Log_File": "master-bin.000003", - } - - sid, _ := ParseSID("3e11fa47-71ca-11e1-9e33-c80aa9429562") - want := ReplicationStatus{ - Position: Position{GTIDSet: Mysql56GTIDSet{sid: []interval{{start: 1, end: 5}}}}, - RelayLogPosition: Position{GTIDSet: Mysql56GTIDSet{sid: []interval{{start: 1, end: 9}}}}, - } - got, err := parseMysqlReplicationStatus(resultMap) - require.NoError(t, err) - assert.Equalf(t, got.RelayLogPosition.GTIDSet.String(), want.RelayLogPosition.GTIDSet.String(), "got RelayLogPosition: %v; want RelayLogPosition: %v", got.RelayLogPosition.GTIDSet, want.RelayLogPosition.GTIDSet) -} - -func TestMysqlShouldGetPosition(t *testing.T) { - resultMap := map[string]string{ - "Executed_Gtid_Set": "3e11fa47-71ca-11e1-9e33-c80aa9429562:1-5", - "Position": "1307", - "File": "source-bin.000003", - } - - sid, _ := ParseSID("3e11fa47-71ca-11e1-9e33-c80aa9429562") - want := PrimaryStatus{ - Position: Position{GTIDSet: Mysql56GTIDSet{sid: []interval{{start: 1, end: 5}}}}, - FilePosition: Position{GTIDSet: filePosGTID{file: "source-bin.000003", pos: 1307}}, - } - got, err := parseMysqlPrimaryStatus(resultMap) - require.NoError(t, err) - assert.Equalf(t, got.Position.GTIDSet.String(), want.Position.GTIDSet.String(), "got Position: %v; want Position: %v", got.Position.GTIDSet, want.Position.GTIDSet) - assert.Equalf(t, got.FilePosition.GTIDSet.String(), want.FilePosition.GTIDSet.String(), "got FilePosition: %v; want FilePosition: %v", got.FilePosition.GTIDSet, want.FilePosition.GTIDSet) -} diff --git a/go/mysql/flavor_mysqlgr.go b/go/mysql/flavor_mysqlgr.go index 33bd1e6e3e1..e96a6433f73 100644 --- a/go/mysql/flavor_mysqlgr.go +++ b/go/mysql/flavor_mysqlgr.go @@ -21,6 +21,7 @@ import ( "fmt" "math" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" @@ -57,12 +58,12 @@ func (mysqlGRFlavor) restartReplicationCommands() []string { } // startReplicationUntilAfter is disabled in mysqlGRFlavor -func (mysqlGRFlavor) startReplicationUntilAfter(pos Position) string { +func (mysqlGRFlavor) startReplicationUntilAfter(pos replication.Position) string { return "" } // startSQLThreadUntilAfter is disabled in mysqlGRFlavor -func (mysqlGRFlavor) startSQLThreadUntilAfter(pos Position) string { +func (mysqlGRFlavor) startSQLThreadUntilAfter(pos replication.Position) string { return "" } @@ -99,7 +100,7 @@ func (mysqlGRFlavor) resetReplicationParametersCommands(c *Conn) []string { } // setReplicationPositionCommands is disabled in mysqlGRFlavor -func (mysqlGRFlavor) setReplicationPositionCommands(pos Position) []string { +func (mysqlGRFlavor) setReplicationPositionCommands(pos replication.Position) []string { return []string{} } @@ -110,8 +111,8 @@ func (mysqlGRFlavor) setReplicationPositionCommands(pos Position) []string { // TODO: Right now the GR's lag is defined as the lag between a node processing a txn // and the time the txn was committed. We should consider reporting lag between current queueing txn timestamp // from replication_connection_status and the current processing txn's commit timestamp -func (mysqlGRFlavor) status(c *Conn) (ReplicationStatus, error) { - res := ReplicationStatus{} +func (mysqlGRFlavor) status(c *Conn) (replication.ReplicationStatus, error) { + res := replication.ReplicationStatus{} // Get primary node information query := `SELECT MEMBER_HOST, @@ -125,7 +126,7 @@ func (mysqlGRFlavor) status(c *Conn) (ReplicationStatus, error) { return nil }) if err != nil { - return ReplicationStatus{}, err + return replication.ReplicationStatus{}, err } query = `SELECT @@ -148,7 +149,7 @@ func (mysqlGRFlavor) status(c *Conn) (ReplicationStatus, error) { return nil }) if err != nil { - return ReplicationStatus{}, err + return replication.ReplicationStatus{}, err } // if chanel is not set, it means the state is not ONLINE or RECOVERING // return partial result early @@ -160,26 +161,26 @@ func (mysqlGRFlavor) status(c *Conn) (ReplicationStatus, error) { query = fmt.Sprintf(`SELECT SERVICE_STATE FROM performance_schema.replication_connection_status WHERE CHANNEL_NAME='%s'`, chanel) - var connectionState ReplicationState + var connectionState replication.ReplicationState err = fetchStatusForGroupReplication(c, query, func(values []sqltypes.Value) error { - connectionState = ReplicationStatusToState(values[0].ToString()) + connectionState = replication.ReplicationStatusToState(values[0].ToString()) return nil }) if err != nil { - return ReplicationStatus{}, err + return replication.ReplicationStatus{}, err } res.IOState = connectionState // Populate SQLState from replication_connection_status - var applierState ReplicationState + var applierState replication.ReplicationState query = fmt.Sprintf(`SELECT SERVICE_STATE FROM performance_schema.replication_applier_status_by_coordinator WHERE CHANNEL_NAME='%s'`, chanel) err = fetchStatusForGroupReplication(c, query, func(values []sqltypes.Value) error { - applierState = ReplicationStatusToState(values[0].ToString()) + applierState = replication.ReplicationStatusToState(values[0].ToString()) return nil }) if err != nil { - return ReplicationStatus{}, err + return replication.ReplicationStatus{}, err } res.SQLState = applierState @@ -197,17 +198,17 @@ func (mysqlGRFlavor) status(c *Conn) (ReplicationStatus, error) { return nil }) if err != nil { - return ReplicationStatus{}, err + return replication.ReplicationStatus{}, err } return res, nil } -func parsePrimaryGroupMember(res *ReplicationStatus, row []sqltypes.Value) { +func parsePrimaryGroupMember(res *replication.ReplicationStatus, row []sqltypes.Value) { res.SourceHost = row[0].ToString() /* MEMBER_HOST */ res.SourcePort, _ = row[1].ToInt32() /* MEMBER_PORT */ } -func parseReplicationApplierLag(res *ReplicationStatus, row []sqltypes.Value) { +func parseReplicationApplierLag(res *replication.ReplicationStatus, row []sqltypes.Value) { lagSec, err := row[0].ToUint32() // if the error is not nil, ReplicationLagSeconds will remain to be MaxUint32 if err == nil { @@ -234,7 +235,7 @@ func fetchStatusForGroupReplication(c *Conn, query string, onResult func([]sqlty // primaryStatus returns the result of 'SHOW MASTER STATUS', // with parsed executed position. -func (mysqlGRFlavor) primaryStatus(c *Conn) (PrimaryStatus, error) { +func (mysqlGRFlavor) primaryStatus(c *Conn) (replication.PrimaryStatus, error) { return mysqlFlavor{}.primaryStatus(c) } diff --git a/go/mysql/flavor_mysqlgr_test.go b/go/mysql/flavor_mysqlgr_test.go index 6b15ee5048e..df7876eca1c 100644 --- a/go/mysql/flavor_mysqlgr_test.go +++ b/go/mysql/flavor_mysqlgr_test.go @@ -20,12 +20,14 @@ import ( "gotest.tools/assert" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" ) func TestMysqlGRParsePrimaryGroupMember(t *testing.T) { - res := ReplicationStatus{} + res := replication.ReplicationStatus{} rows := []sqltypes.Value{ sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("host1")), sqltypes.MakeTrusted(querypb.Type_INT32, []byte("10")), @@ -33,12 +35,12 @@ func TestMysqlGRParsePrimaryGroupMember(t *testing.T) { parsePrimaryGroupMember(&res, rows) assert.Equal(t, "host1", res.SourceHost) assert.Equal(t, int32(10), res.SourcePort) - assert.Equal(t, ReplicationStateUnknown, res.IOState) - assert.Equal(t, ReplicationStateUnknown, res.SQLState) + assert.Equal(t, replication.ReplicationStateUnknown, res.IOState) + assert.Equal(t, replication.ReplicationStateUnknown, res.SQLState) } func TestMysqlGRReplicationApplierLagParse(t *testing.T) { - res := ReplicationStatus{} + res := replication.ReplicationStatus{} row := []sqltypes.Value{ sqltypes.MakeTrusted(querypb.Type_INT32, []byte("NULL")), } diff --git a/go/mysql/handshake_test.go b/go/mysql/handshake_test.go index b6532f830b3..c2b27d6f6d4 100644 --- a/go/mysql/handshake_test.go +++ b/go/mysql/handshake_test.go @@ -45,7 +45,7 @@ func TestClearTextClientAuth(t *testing.T) { defer authServer.close() // Create the listener. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err, "NewListener failed: %v", err) defer l.Close() host := l.Addr().(*net.TCPAddr).IP.String() @@ -99,7 +99,7 @@ func TestSSLConnection(t *testing.T) { defer authServer.close() // Create the listener, so we can get its host. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err, "NewListener failed: %v", err) defer l.Close() host := l.Addr().(*net.TCPAddr).IP.String() diff --git a/go/mysql/hex/hex.go b/go/mysql/hex/hex.go index 941cc08e5b5..d2aa00d592e 100644 --- a/go/mysql/hex/hex.go +++ b/go/mysql/hex/hex.go @@ -71,10 +71,10 @@ func DecodedLen(src []byte) int { return (len(src) + 1) / 2 } -func DecodeBytes(dst, src []byte) bool { +func DecodeBytes(dst, src []byte) error { if len(src)&1 == 1 { src = append([]byte{'0'}, src...) } _, err := hex.Decode(dst, src) - return err == nil + return err } diff --git a/go/mysql/icuregex/compiler.go b/go/mysql/icuregex/compiler.go new file mode 100644 index 00000000000..971cd439fb3 --- /dev/null +++ b/go/mysql/icuregex/compiler.go @@ -0,0 +1,3646 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package icuregex + +import ( + "math" + "slices" + "strings" + "unicode/utf8" + + "vitess.io/vitess/go/mysql/icuregex/internal/pattern" + "vitess.io/vitess/go/mysql/icuregex/internal/ucase" + "vitess.io/vitess/go/mysql/icuregex/internal/uchar" + "vitess.io/vitess/go/mysql/icuregex/internal/unames" + "vitess.io/vitess/go/mysql/icuregex/internal/uprops" + "vitess.io/vitess/go/mysql/icuregex/internal/uset" + "vitess.io/vitess/go/mysql/icuregex/internal/utf16" +) + +const BreakIteration = false +const stackSize = 100 + +type reChar struct { + char rune + quoted bool +} + +const ( + parenPlain = -1 + parenCapturing = -2 + parenAtomic = -3 + parenLookahead = -4 + parenNegLookahead = -5 + parenFlags = -6 + parenLookBehind = -7 + parenLookBehindN = -8 +) + +type setOperation uint32 + +const ( + setStart setOperation = 0<<16 | 1 + setEnd setOperation = 1<<16 | 2 + setNegation setOperation = 2<<16 | 3 + setCaseClose setOperation = 2<<16 | 9 + setDifference2 setOperation = 3<<16 | 4 // '--' set difference operator + setIntersection2 setOperation = 3<<16 | 5 // '&&' set intersection operator + setUnion setOperation = 4<<16 | 6 // implicit union of adjacent items + setDifference1 setOperation = 4<<16 | 7 // '-', single dash difference op, for compatibility with old UnicodeSet. + setIntersection1 setOperation = 4<<16 | 8 // '&', single amp intersection op, for compatibility with old UnicodeSet. +) + +type compiler struct { + err error + out *Pattern + p []rune + + scanIndex int + quoteMode bool + inBackslashQuote bool + eolComments bool + + lineNum int + charNum int + lastChar rune + peekChar rune + + c reChar + stack [stackSize]uint16 + stackPtr int + + modeFlags RegexpFlag + newModeFlags RegexpFlag + setModeFlag bool + + literalChars []rune + + parenStack []int + matchOpenParen int + matchCloseParen int + + intervalLow int + intervalUpper int + + setStack []*uset.UnicodeSet + setOpStack []setOperation + + lastSetLiteral rune + captureName *strings.Builder +} + +func newCompiler(pat *Pattern) *compiler { + return &compiler{ + out: pat, + scanIndex: 0, + eolComments: true, + lineNum: 1, + charNum: 0, + lastChar: -1, + peekChar: -1, + modeFlags: RegexpFlag(uint32(pat.flags) | 0x80000000), + matchOpenParen: -1, + matchCloseParen: -1, + lastSetLiteral: -1, + } +} + +func (c *compiler) nextCharLL() (ch rune) { + if c.peekChar != -1 { + ch, c.peekChar = c.peekChar, -1 + return + } + if len(c.p) == 0 { + return -1 + } + + ch = c.p[0] + c.p = c.p[1:] + if ch == utf8.RuneError { + return -1 + } + + if ch == chCR || ch == chNEL || ch == chLS || (ch == chLF && c.lastChar != chCR) { + c.lineNum++ + c.charNum = 0 + } else { + if ch != chLF { + c.charNum++ + } + } + c.lastChar = ch + return +} + +func (c *compiler) peekCharLL() rune { + if c.peekChar == -1 { + c.peekChar = c.nextCharLL() + } + return c.peekChar +} + +func (c *compiler) nextChar(ch *reChar) { + c.scanIndex++ + ch.char = c.nextCharLL() + ch.quoted = false + + if c.quoteMode { + ch.quoted = true + if (ch.char == chBackSlash && c.peekCharLL() == chE && ((c.modeFlags & Literal) == 0)) || + ch.char == -1 { + c.quoteMode = false // Exit quote mode, + c.nextCharLL() // discard the E + c.nextChar(ch) + return + } + } else if c.inBackslashQuote { + // The current character immediately follows a '\' + // Don't check for any further escapes, just return it as-is. + // Don't set c.fQuoted, because that would prevent the state machine from + // dispatching on the character. + c.inBackslashQuote = false + } else { + // We are not in a \Q quoted region \E of the source. + // + if (c.modeFlags & Comments) != 0 { + // + // We are in free-spacing and comments mode. + // Scan through any white space and comments, until we + // reach a significant character or the end of input. + for { + if ch.char == -1 { + break // End of Input + } + if ch.char == chPound && c.eolComments { + // Start of a comment. Consume the rest of it, until EOF or a new line + for { + ch.char = c.nextCharLL() + if ch.char == -1 || // EOF + ch.char == chCR || + ch.char == chLF || + ch.char == chNEL || + ch.char == chLS { + break + } + } + } + // TODO: check what Java & Perl do with non-ASCII white spaces. Ticket 6061. + if !pattern.IsWhitespace(ch.char) { + break + } + ch.char = c.nextCharLL() + } + } + + // + // check for backslash escaped characters. + // + if ch.char == chBackSlash { + beforeEscape := c.p + if staticSetUnescape.ContainsRune(c.peekCharLL()) { + // + // A '\' sequence that is handled by ICU's standard unescapeAt function. + // Includes \uxxxx, \n, \r, many others. + // Return the single equivalent character. + // + c.nextCharLL() // get & discard the peeked char. + ch.quoted = true + + ch.char, c.p = pattern.UnescapeAtRunes(beforeEscape) + if ch.char < 0 { + c.error(BadEscapeSequence) + } + c.charNum += len(beforeEscape) - len(c.p) + } else if c.peekCharLL() == chDigit0 { + // Octal Escape, using Java Regexp Conventions + // which are \0 followed by 1-3 octal digits. + // Different from ICU Unescape handling of Octal, which does not + // require the leading 0. + // Java also has the convention of only consuming 2 octal digits if + // the three digit number would be > 0xff + // + ch.char = 0 + c.nextCharLL() // Consume the initial 0. + for index := 0; index < 3; index++ { + ch2 := c.peekCharLL() + if ch2 < chDigit0 || ch2 > chDigit7 { + if index == 0 { + // \0 is not followed by any octal digits. + c.error(BadEscapeSequence) + } + break + } + ch.char <<= 3 + ch.char += ch2 & 7 + if ch.char <= 255 { + c.nextCharLL() + } else { + // The last digit made the number too big. Forget we saw it. + ch.char >>= 3 + } + } + ch.quoted = true + } else if c.peekCharLL() == chQ { + // "\Q" enter quote mode, which will continue until "\E" + c.quoteMode = true + c.nextCharLL() // discard the 'Q'. + c.nextChar(ch) // recurse to get the real next char. + return + } else { + // We are in a '\' escape that will be handled by the state table scanner. + // Just return the backslash, but remember that the following char is to + // be taken literally. + c.inBackslashQuote = true + } + } + } + + // re-enable # to end-of-line comments, in case they were disabled. + // They are disabled by the parser upon seeing '(?', but this lasts for + // the fetching of the next character only. + c.eolComments = true +} + +const ( + chCR = 0x0d // New lines, for terminating comments. + chLF = 0x0a // Line Feed + chPound = 0x23 // '#', introduces a comment. + chDigit0 = 0x30 // '0' + chDigit7 = 0x37 // '9' + chColon = 0x3A // ':' + chE = 0x45 // 'E' + chQ = 0x51 // 'Q' + chN = 0x4E // 'N' + chP = 0x50 // 'P' + chBackSlash = 0x5c // '\' introduces a char escape + chLBracket = 0x5b // '[' + chRBracket = 0x5d // ']' + chUp = 0x5e // '^' + chLowerP = 0x70 + chLBrace = 0x7b // '{' + chRBrace = 0x7d // '}' + chNEL = 0x85 // NEL newline variant + chLS = 0x2028 // Unicode Line Separator + chAmp = 0x26 // '&' + chDash = 0x2d // '-' +) + +func (c *compiler) compile(pat []rune) error { + if c.err != nil { + return c.err + } + if c.out.pattern != "" { + panic("cannot reuse pattern") + } + + c.out.pattern = string(pat) + c.p = pat + + var state uint16 = 1 + var table []regexTableEl + + // UREGEX_LITERAL force entire pattern to be treated as a literal string. + if c.modeFlags&Literal != 0 { + c.quoteMode = true + } + + c.nextChar(&c.c) + + // Main loop for the regex pattern parsing state machine. + // Runs once per state transition. + // Each time through optionally performs, depending on the state table, + // - an advance to the the next pattern char + // - an action to be performed. + // - pushing or popping a state to/from the local state return stack. + // file regexcst.txt is the source for the state table. The logic behind + // recongizing the pattern syntax is there, not here. + for { + if c.err != nil { + break + } + + if state == 0 { + panic("bad state?") + } + + table = parseStateTable[state:] + for len(table) > 0 { + if table[0].charClass < 127 && !c.c.quoted && rune(table[0].charClass) == c.c.char { + break + } + if table[0].charClass == 255 { + break + } + if table[0].charClass == 254 && c.c.quoted { + break + } + if table[0].charClass == 253 && c.c.char == -1 { + break + } + if table[0].charClass >= 128 && table[0].charClass < 240 && !c.c.quoted && c.c.char != -1 { + if staticRuleSet[table[0].charClass-128].ContainsRune(c.c.char) { + break + } + } + + table = table[1:] + } + + if !c.doParseActions(table[0].action) { + break + } + + if table[0].pushState != 0 { + c.stackPtr++ + if c.stackPtr >= stackSize { + c.error(InternalError) + c.stackPtr-- + } + c.stack[c.stackPtr] = uint16(table[0].pushState) + } + + if table[0].nextChar { + c.nextChar(&c.c) + } + + if table[0].nextState != 255 { + state = uint16(table[0].nextState) + } else { + state = c.stack[c.stackPtr] + c.stackPtr-- + if c.stackPtr < 0 { + c.stackPtr++ + c.error(MismatchedParen) + } + } + } + + if c.err != nil { + return c.err + } + + c.allocateStackData(restackframeHdrCount) + c.stripNOPs() + + c.out.minMatchLen = c.minMatchLength(3, len(c.out.compiledPat)-1) + + c.matchStartType() + return c.err +} + +func (c *compiler) doParseActions(action patternParseAction) bool { + switch action { + case doPatStart: + // Start of pattern compiles to: + //0 SAVE 2 Fall back to position of FAIL + //1 jmp 3 + //2 FAIL Stop if we ever reach here. + //3 NOP Dummy, so start of pattern looks the same as + // the start of an ( grouping. + //4 NOP Resreved, will be replaced by a save if there are + // OR | operators at the top level + c.appendOp(urxStateSave, 2) + c.appendOp(urxJmp, 3) + c.appendOp(urxFail, 0) + + // Standard open nonCapture paren action emits the two NOPs and + // sets up the paren stack frame. + c.doParseActions(doOpenNonCaptureParen) + + case doPatFinish: + // We've scanned to the end of the pattern + // The end of pattern compiles to: + // URX_END + // which will stop the runtime match engine. + // Encountering end of pattern also behaves like a close paren, + // and forces fixups of the State Save at the beginning of the compiled pattern + // and of any OR operations at the top level. + // + c.handleCloseParen() + if len(c.parenStack) > 0 { + // Missing close paren in pattern. + c.error(MismatchedParen) + } + + // add the END operation to the compiled pattern. + c.appendOp(urxEnd, 0) + + // Terminate the pattern compilation state machine. + return false + + case doOrOperator: + // Scanning a '|', as in (A|B) + // Generate code for any pending literals preceding the '|' + c.fixLiterals(false) + + // Insert a SAVE operation at the start of the pattern section preceding + // this OR at this level. This SAVE will branch the match forward + // to the right hand side of the OR in the event that the left hand + // side fails to match and backtracks. Locate the position for the + // save from the location on the top of the parentheses stack. + var savePosition int + savePosition, c.parenStack = stackPop(c.parenStack) + op := c.out.compiledPat[savePosition] + + if op.typ() != urxNop { + panic("expected a NOP placeholder") + } + + op = c.buildOp(urxStateSave, len(c.out.compiledPat)+1) + c.out.compiledPat[savePosition] = op + + // Append an JMP operation into the compiled pattern. The operand for + // the JMP will eventually be the location following the ')' for the + // group. This will be patched in later, when the ')' is encountered. + c.appendOp(urxJmp, 0) + + // Push the position of the newly added JMP op onto the parentheses stack. + // This registers if for fixup when this block's close paren is encountered. + c.parenStack = append(c.parenStack, len(c.out.compiledPat)-1) + + // Append a NOP to the compiled pattern. This is the slot reserved + // for a SAVE in the event that there is yet another '|' following + // this one. + c.appendOp(urxNop, 0) + c.parenStack = append(c.parenStack, len(c.out.compiledPat)-1) + + case doBeginNamedCapture: + // Scanning (? + // Compile to a + // - NOP, which later may be replaced if the parenthesized group + // has a quantifier, followed by + // - STO_SP save state stack position, so it can be restored at the ")" + // - NOP, which may later be replaced by a save-state if there + // is an '|' alternation within the parens. + c.fixLiterals(false) + c.appendOp(urxNop, 0) + varLoc := c.allocateData(1) // Reserve a data location for saving the state stack ptr. + c.appendOp(urxStoSp, varLoc) + c.appendOp(urxNop, 0) + + // On the Parentheses stack, start a new frame and add the postions + // of the two NOPs. Depending on what follows in the pattern, the + // NOPs may be changed to SAVE_STATE or JMP ops, with a target + // address of the end of the parenthesized group. + c.parenStack = append(c.parenStack, int(c.modeFlags)) + c.parenStack = append(c.parenStack, parenAtomic) + c.parenStack = append(c.parenStack, len(c.out.compiledPat)-3) + c.parenStack = append(c.parenStack, len(c.out.compiledPat)-1) + + case doOpenLookAhead: + // Positive Look-ahead (?= stuff ) + // + // Note: Addition of transparent input regions, with the need to + // restore the original regions when failing out of a lookahead + // block, complicated this sequence. Some combined opcodes + // might make sense - or might not, lookahead aren't that common. + // + // Caution: min match length optimization knows about this + // sequence; don't change without making updates there too. + // + // Compiles to + // 1 LA_START dataLoc Saves SP, Input Pos, Active input region. + // 2. STATE_SAVE 4 on failure of lookahead, goto 4 + // 3 JMP 6 continue ... + // + // 4. LA_END Look Ahead failed. Restore regions. + // 5. BACKTRACK and back track again. + // + // 6. NOP reserved for use by quantifiers on the block. + // Look-ahead can't have quantifiers, but paren stack + // compile time conventions require the slot anyhow. + // 7. NOP may be replaced if there is are '|' ops in the block. + // 8. code for parenthesized stuff. + // 9. LA_END + // + // Four data slots are reserved, for saving state on entry to the look-around + // 0: stack pointer on entry. + // 1: input position on entry. + // 2: fActiveStart, the active bounds start on entry. + // 3: fActiveLimit, the active bounds limit on entry. + c.fixLiterals(false) + dataLoc := c.allocateData(4) + c.appendOp(urxLaStart, dataLoc) + c.appendOp(urxStateSave, len(c.out.compiledPat)+2) + c.appendOp(urxJmp, len(c.out.compiledPat)+3) + c.appendOp(urxLaEnd, dataLoc) + c.appendOp(urxBacktrack, 0) + c.appendOp(urxNop, 0) + c.appendOp(urxNop, 0) + + // On the Parentheses stack, start a new frame and add the postions + // of the NOPs. + c.parenStack = append(c.parenStack, int(c.modeFlags)) + c.parenStack = append(c.parenStack, parenLookahead) + c.parenStack = append(c.parenStack, len(c.out.compiledPat)-2) + c.parenStack = append(c.parenStack, len(c.out.compiledPat)-1) + + case doOpenLookAheadNeg: + // Negated Lookahead. (?! stuff ) + // Compiles to + // 1. LA_START dataloc + // 2. SAVE_STATE 7 // Fail within look-ahead block restores to this state, + // // which continues with the match. + // 3. NOP // Std. Open Paren sequence, for possible '|' + // 4. code for parenthesized stuff. + // 5. LA_END // Cut back stack, remove saved state from step 2. + // 6. BACKTRACK // code in block succeeded, so neg. lookahead fails. + // 7. END_LA // Restore match region, in case look-ahead was using + // an alternate (transparent) region. + // Four data slots are reserved, for saving state on entry to the look-around + // 0: stack pointer on entry. + // 1: input position on entry. + // 2: fActiveStart, the active bounds start on entry. + // 3: fActiveLimit, the active bounds limit on entry. + c.fixLiterals(false) + dataLoc := c.allocateData(4) + c.appendOp(urxLaStart, dataLoc) + c.appendOp(urxStateSave, 0) // dest address will be patched later. + c.appendOp(urxNop, 0) + + // On the Parentheses stack, start a new frame and add the postions + // of the StateSave and NOP. + c.parenStack = append(c.parenStack, int(c.modeFlags)) + c.parenStack = append(c.parenStack, parenNegLookahead) + c.parenStack = append(c.parenStack, len(c.out.compiledPat)-2) + c.parenStack = append(c.parenStack, len(c.out.compiledPat)-1) + + // Instructions #5 - #7 will be added when the ')' is encountered. + + case doOpenLookBehind: + // Compile a (?<= look-behind open paren. + // + // Compiles to + // 0 URX_LB_START dataLoc + // 1 URX_LB_CONT dataLoc + // 2 MinMatchLen + // 3 MaxMatchLen + // 4 URX_NOP Standard '(' boilerplate. + // 5 URX_NOP Reserved slot for use with '|' ops within (block). + // 6 + // 7 URX_LB_END dataLoc # Check match len, restore input len + // 8 URX_LA_END dataLoc # Restore stack, input pos + // + // Allocate a block of matcher data, to contain (when running a match) + // 0: Stack ptr on entry + // 1: Input Index on entry + // 2: fActiveStart, the active bounds start on entry. + // 3: fActiveLimit, the active bounds limit on entry. + // 4: Start index of match current match attempt. + // The first four items must match the layout of data for LA_START / LA_END + + // Generate match code for any pending literals. + c.fixLiterals(false) + + // Allocate data space + dataLoc := c.allocateData(5) + + // Emit URX_LB_START + c.appendOp(urxLbStart, dataLoc) + + // Emit URX_LB_CONT + c.appendOp(urxLbCont, dataLoc) + c.appendOp(urxReservedOp, 0) // MinMatchLength. To be filled later. + c.appendOp(urxReservedOp, 0) // MaxMatchLength. To be filled later. + + // Emit the NOPs + c.appendOp(urxNop, 0) + c.appendOp(urxNop, 0) + + // On the Parentheses stack, start a new frame and add the postions + // of the URX_LB_CONT and the NOP. + c.parenStack = append(c.parenStack, int(c.modeFlags)) + c.parenStack = append(c.parenStack, parenLookBehind) + c.parenStack = append(c.parenStack, len(c.out.compiledPat)-2) + c.parenStack = append(c.parenStack, len(c.out.compiledPat)-1) + + // The final two instructions will be added when the ')' is encountered. + + case doOpenLookBehindNeg: + // Compile a (? + // 8 URX_LBN_END dataLoc # Check match len, cause a FAIL + // 9 ... + // + // Allocate a block of matcher data, to contain (when running a match) + // 0: Stack ptr on entry + // 1: Input Index on entry + // 2: fActiveStart, the active bounds start on entry. + // 3: fActiveLimit, the active bounds limit on entry. + // 4: Start index of match current match attempt. + // The first four items must match the layout of data for LA_START / LA_END + + // Generate match code for any pending literals. + c.fixLiterals(false) + + // Allocate data space + dataLoc := c.allocateData(5) + + // Emit URX_LB_START + c.appendOp(urxLbStart, dataLoc) + + // Emit URX_LBN_CONT + c.appendOp(urxLbnCount, dataLoc) + c.appendOp(urxReservedOp, 0) // MinMatchLength. To be filled later. + c.appendOp(urxReservedOp, 0) // MaxMatchLength. To be filled later. + c.appendOp(urxReservedOp, 0) // Continue Loc. To be filled later. + + // Emit the NOPs + c.appendOp(urxNop, 0) + c.appendOp(urxNop, 0) + + // On the Parentheses stack, start a new frame and add the postions + // of the URX_LB_CONT and the NOP. + c.parenStack = append(c.parenStack, int(c.modeFlags)) + c.parenStack = append(c.parenStack, parenLookBehindN) + c.parenStack = append(c.parenStack, len(c.out.compiledPat)-2) + c.parenStack = append(c.parenStack, len(c.out.compiledPat)-1) + + // The final two instructions will be added when the ')' is encountered. + + case doConditionalExpr, doPerlInline: + // Conditionals such as (?(1)a:b) + // Perl inline-condtionals. (?{perl code}a|b) We're not perl, no way to do them. + c.error(Unimplemented) + + case doCloseParen: + c.handleCloseParen() + if len(c.parenStack) == 0 { + // Extra close paren, or missing open paren. + c.error(MismatchedParen) + } + + case doNOP: + + case doBadOpenParenType, doRuleError: + c.error(RuleSyntax) + + case doMismatchedParenErr: + c.error(MismatchedParen) + + case doPlus: + // Normal '+' compiles to + // 1. stuff to be repeated (already built) + // 2. jmp-sav 1 + // 3. ... + // + // Or, if the item to be repeated can match a zero length string, + // 1. STO_INP_LOC data-loc + // 2. body of stuff to be repeated + // 3. JMP_SAV_X 2 + // 4. ... + + // + // Or, if the item to be repeated is simple + // 1. Item to be repeated. + // 2. LOOP_SR_I set number (assuming repeated item is a set ref) + // 3. LOOP_C stack location + topLoc := c.blockTopLoc(false) // location of item #1 + + // Check for simple constructs, which may get special optimized code. + if topLoc == len(c.out.compiledPat)-1 { + repeatedOp := c.out.compiledPat[topLoc] + + if repeatedOp.typ() == urxSetref { + // Emit optimized code for [char set]+ + c.appendOp(urxLoopSrI, repeatedOp.value()) + frameLoc := c.allocateStackData(1) + c.appendOp(urxLoopC, frameLoc) + break + } + + if repeatedOp.typ() == urxDotany || repeatedOp.typ() == urxDotanyAll || repeatedOp.typ() == urxDotanyUnix { + // Emit Optimized code for .+ operations. + loopOpI := c.buildOp(urxLoopDotI, 0) + if repeatedOp.typ() == urxDotanyAll { + // URX_LOOP_DOT_I operand is a flag indicating ". matches any" mode. + loopOpI |= 1 + } + if c.modeFlags&UnixLines != 0 { + loopOpI |= 2 + } + c.appendIns(loopOpI) + frameLoc := c.allocateStackData(1) + c.appendOp(urxLoopC, frameLoc) + break + } + } + + // General case. + + // Check for minimum match length of zero, which requires + // extra loop-breaking code. + if c.minMatchLength(topLoc, len(c.out.compiledPat)-1) == 0 { + // Zero length match is possible. + // Emit the code sequence that can handle it. + c.insertOp(topLoc) + frameLoc := c.allocateStackData(1) + op := c.buildOp(urxStoInpLoc, frameLoc) + c.out.compiledPat[topLoc] = op + + c.appendOp(urxJmpSavX, topLoc+1) + } else { + // Simpler code when the repeated body must match something non-empty + c.appendOp(urxJmpSav, topLoc) + } + + case doNGPlus: + // Non-greedy '+?' compiles to + // 1. stuff to be repeated (already built) + // 2. state-save 1 + // 3. ... + topLoc := c.blockTopLoc(false) + c.appendOp(urxStateSave, topLoc) + + case doOpt: + // Normal (greedy) ? quantifier. + // Compiles to + // 1. state save 3 + // 2. body of optional block + // 3. ... + // Insert the state save into the compiled pattern, and we're done. + saveStateLoc := c.blockTopLoc(true) + saveStateOp := c.buildOp(urxStateSave, len(c.out.compiledPat)) + c.out.compiledPat[saveStateLoc] = saveStateOp + + case doNGOpt: + // Non-greedy ?? quantifier + // compiles to + // 1. jmp 4 + // 2. body of optional block + // 3 jmp 5 + // 4. state save 2 + // 5 ... + // This code is less than ideal, with two jmps instead of one, because we can only + // insert one instruction at the top of the block being iterated. + jmp1Loc := c.blockTopLoc(true) + jmp2Loc := len(c.out.compiledPat) + + jmp1Op := c.buildOp(urxJmp, jmp2Loc+1) + c.out.compiledPat[jmp1Loc] = jmp1Op + + c.appendOp(urxJmp, jmp2Loc+2) + c.appendOp(urxStateSave, jmp1Loc+1) + + case doStar: + // Normal (greedy) * quantifier. + // Compiles to + // 1. STATE_SAVE 4 + // 2. body of stuff being iterated over + // 3. JMP_SAV 2 + // 4. ... + // + // Or, if the body is a simple [Set], + // 1. LOOP_SR_I set number + // 2. LOOP_C stack location + // ... + // + // Or if this is a .* + // 1. LOOP_DOT_I (. matches all mode flag) + // 2. LOOP_C stack location + // + // Or, if the body can match a zero-length string, to inhibit infinite loops, + // 1. STATE_SAVE 5 + // 2. STO_INP_LOC data-loc + // 3. body of stuff + // 4. JMP_SAV_X 2 + // 5. ... + // location of item #1, the STATE_SAVE + topLoc := c.blockTopLoc(false) + + // Check for simple *, where the construct being repeated + // compiled to single opcode, and might be optimizable. + if topLoc == len(c.out.compiledPat)-1 { + repeatedOp := c.out.compiledPat[topLoc] + + if repeatedOp.typ() == urxSetref { + // Emit optimized code for a [char set]* + loopOpI := c.buildOp(urxLoopSrI, repeatedOp.value()) + c.out.compiledPat[topLoc] = loopOpI + dataLoc := c.allocateStackData(1) + c.appendOp(urxLoopC, dataLoc) + break + } + + if repeatedOp.typ() == urxDotany || repeatedOp.typ() == urxDotanyAll || repeatedOp.typ() == urxDotanyUnix { + // Emit Optimized code for .* operations. + loopOpI := c.buildOp(urxLoopDotI, 0) + if repeatedOp.typ() == urxDotanyAll { + // URX_LOOP_DOT_I operand is a flag indicating . matches any mode. + loopOpI |= 1 + } + if (c.modeFlags & UnixLines) != 0 { + loopOpI |= 2 + } + c.out.compiledPat[topLoc] = loopOpI + dataLoc := c.allocateStackData(1) + c.appendOp(urxLoopC, dataLoc) + break + } + } + + // Emit general case code for this * + // The optimizations did not apply. + + saveStateLoc := c.blockTopLoc(true) + jmpOp := c.buildOp(urxJmpSav, saveStateLoc+1) + + // Check for minimum match length of zero, which requires + // extra loop-breaking code. + if c.minMatchLength(saveStateLoc, len(c.out.compiledPat)-1) == 0 { + c.insertOp(saveStateLoc) + dataLoc := c.allocateStackData(1) + + op := c.buildOp(urxStoInpLoc, dataLoc) + c.out.compiledPat[saveStateLoc+1] = op + jmpOp = c.buildOp(urxJmpSavX, saveStateLoc+2) + } + + // Locate the position in the compiled pattern where the match will continue + // after completing the *. (4 or 5 in the comment above) + continueLoc := len(c.out.compiledPat) + 1 + + // Put together the save state op and store it into the compiled code. + saveStateOp := c.buildOp(urxStateSave, continueLoc) + c.out.compiledPat[saveStateLoc] = saveStateOp + + // Append the URX_JMP_SAV or URX_JMPX operation to the compiled pattern. + c.appendIns(jmpOp) + + case doNGStar: + // Non-greedy *? quantifier + // compiles to + // 1. JMP 3 + // 2. body of stuff being iterated over + // 3. STATE_SAVE 2 + // 4 ... + jmpLoc := c.blockTopLoc(true) // loc 1. + saveLoc := len(c.out.compiledPat) // loc 3. + jmpOp := c.buildOp(urxJmp, saveLoc) + c.out.compiledPat[jmpLoc] = jmpOp + c.appendOp(urxStateSave, jmpLoc+1) + + case doIntervalInit: + // The '{' opening an interval quantifier was just scanned. + // Init the counter varaiables that will accumulate the values as the digits + // are scanned. + c.intervalLow = 0 + c.intervalUpper = -1 + + case doIntevalLowerDigit: + // Scanned a digit from the lower value of an {lower,upper} interval + digitValue := uCharDigitValue(c.c.char) + val := int64(c.intervalLow)*10 + digitValue + if val > math.MaxInt32 { + c.error(NumberTooBig) + } else { + c.intervalLow = int(val) + } + + case doIntervalUpperDigit: + // Scanned a digit from the upper value of an {lower,upper} interval + if c.intervalUpper < 0 { + c.intervalUpper = 0 + } + digitValue := uCharDigitValue(c.c.char) + val := int64(c.intervalUpper)*10 + digitValue + if val > math.MaxInt32 { + c.error(NumberTooBig) + } else { + c.intervalUpper = int(val) + } + + case doIntervalSame: + // Scanned a single value interval like {27}. Upper = Lower. + c.intervalUpper = c.intervalLow + + case doInterval: + // Finished scanning a normal {lower,upper} interval. Generate the code for it. + if !c.compileInlineInterval() { + c.compileInterval(urxCtrInit, utxCtrLoop) + } + + case doPossessiveInterval: + // Finished scanning a Possessive {lower,upper}+ interval. Generate the code for it. + + // Remember the loc for the top of the block being looped over. + // (Can not reserve a slot in the compiled pattern at this time, because + // compileInterval needs to reserve also, and blockTopLoc can only reserve + // once per block.) + topLoc := c.blockTopLoc(false) + + // Produce normal looping code. + c.compileInterval(urxCtrInit, utxCtrLoop) + + // Surround the just-emitted normal looping code with a STO_SP ... LD_SP + // just as if the loop was inclosed in atomic parentheses. + + // First the STO_SP before the start of the loop + c.insertOp(topLoc) + + varLoc := c.allocateData(1) // Reserve a data location for saving the + op := c.buildOp(urxStoSp, varLoc) + c.out.compiledPat[topLoc] = op + + var loopOp instruction + loopOp, c.out.compiledPat = stackPop(c.out.compiledPat) + if loopOp.typ() != utxCtrLoop || loopOp.value() != topLoc { + panic("bad instruction at the end of compiled pattern") + } + + loopOp++ // point LoopOp after the just-inserted STO_SP + c.appendIns(loopOp) + + // Then the LD_SP after the end of the loop + c.appendOp(urxLdSp, varLoc) + + case doNGInterval: + // Finished scanning a non-greedy {lower,upper}? interval. Generate the code for it. + c.compileInterval(urxCtrInitNg, urxCtrLoopNg) + + case doIntervalError: + c.error(BadInterval) + + case doLiteralChar: + // We've just scanned a "normal" character from the pattern, + c.literalChar(c.c.char) + + case doEscapedLiteralChar: + // We've just scanned an backslashed escaped character with no + // special meaning. It represents itself. + if (c.modeFlags&ErrorOnUnknownEscapes) != 0 && ((c.c.char >= 0x41 && c.c.char <= 0x5A) || /* in [A-Z] */ (c.c.char >= 0x61 && c.c.char <= 0x7a)) { // in [a-z] + c.error(BadEscapeSequence) + } + c.literalChar(c.c.char) + + case doDotAny: + // scanned a ".", match any single character. + c.fixLiterals(false) + if (c.modeFlags & DotAll) != 0 { + c.appendOp(urxDotanyAll, 0) + } else if (c.modeFlags & UnixLines) != 0 { + c.appendOp(urxDotanyUnix, 0) + } else { + c.appendOp(urxDotany, 0) + } + + case doCaret: + c.fixLiterals(false) + if (c.modeFlags&Multiline) == 0 && (c.modeFlags&UnixLines) == 0 { + c.appendOp(urxCaret, 0) + } else if (c.modeFlags&Multiline) != 0 && (c.modeFlags&UnixLines) == 0 { + c.appendOp(urxCaretM, 0) + } else if (c.modeFlags&Multiline) == 0 && (c.modeFlags&UnixLines) != 0 { + c.appendOp(urxCaret, 0) // Only testing true start of input. + } else if (c.modeFlags&Multiline) != 0 && (c.modeFlags&UnixLines) != 0 { + c.appendOp(urxCaretMUnix, 0) + } + + case doDollar: + c.fixLiterals(false) + if (c.modeFlags&Multiline) == 0 && (c.modeFlags&UnixLines) == 0 { + c.appendOp(urxDollar, 0) + } else if (c.modeFlags&Multiline) != 0 && (c.modeFlags&UnixLines) == 0 { + c.appendOp(urxDollarM, 0) + } else if (c.modeFlags&Multiline) == 0 && (c.modeFlags&UnixLines) != 0 { + c.appendOp(urxDollarD, 0) + } else if (c.modeFlags&Multiline) != 0 && (c.modeFlags&UnixLines) != 0 { + c.appendOp(urxDollarMd, 0) + } + + case doBackslashA: + c.fixLiterals(false) + c.appendOp(urxCaret, 0) + + case doBackslashB: + if !BreakIteration { + if (c.modeFlags & UWord) != 0 { + c.error(Unimplemented) + } + } + c.fixLiterals(false) + if c.modeFlags&UWord != 0 { + c.appendOp(urxBackslashBu, 1) + } else { + c.appendOp(urxBackslashB, 1) + } + + case doBackslashb: + if !BreakIteration { + if (c.modeFlags & UWord) != 0 { + c.error(Unimplemented) + } + } + c.fixLiterals(false) + if c.modeFlags&UWord != 0 { + c.appendOp(urxBackslashBu, 0) + } else { + c.appendOp(urxBackslashB, 0) + } + + case doBackslashD: + c.fixLiterals(false) + c.appendOp(urxBackslashD, 1) + + case doBackslashd: + c.fixLiterals(false) + c.appendOp(urxBackslashD, 0) + + case doBackslashG: + c.fixLiterals(false) + c.appendOp(urxBackslashG, 0) + + case doBackslashH: + c.fixLiterals(false) + c.appendOp(urxBackslashH, 1) + + case doBackslashh: + c.fixLiterals(false) + c.appendOp(urxBackslashH, 0) + + case doBackslashR: + c.fixLiterals(false) + c.appendOp(urxBackslashR, 0) + + case doBackslashS: + c.fixLiterals(false) + c.appendOp(urxStatSetrefN, urxIsspaceSet) + + case doBackslashs: + c.fixLiterals(false) + c.appendOp(urxStaticSetref, urxIsspaceSet) + + case doBackslashV: + c.fixLiterals(false) + c.appendOp(urxBackslashV, 1) + + case doBackslashv: + c.fixLiterals(false) + c.appendOp(urxBackslashV, 0) + + case doBackslashW: + c.fixLiterals(false) + c.appendOp(urxStatSetrefN, urxIswordSet) + + case doBackslashw: + c.fixLiterals(false) + c.appendOp(urxStaticSetref, urxIswordSet) + + case doBackslashX: + if !BreakIteration { + // Grapheme Cluster Boundary requires ICU break iteration. + c.error(Unimplemented) + } + c.fixLiterals(false) + c.appendOp(urxBackslashX, 0) + + case doBackslashZ: + c.fixLiterals(false) + c.appendOp(urxDollar, 0) + + case doBackslashz: + c.fixLiterals(false) + c.appendOp(urxBackslashZ, 0) + + case doEscapeError: + c.error(BadEscapeSequence) + + case doExit: + c.fixLiterals(false) + return false + + case doProperty: + c.fixLiterals(false) + theSet := c.scanProp() + c.compileSet(theSet) + + case doNamedChar: + ch := c.scanNamedChar() + c.literalChar(ch) + + case doBackRef: + // BackReference. Somewhat unusual in that the front-end can not completely parse + // the regular expression, because the number of digits to be consumed + // depends on the number of capture groups that have been defined. So + // we have to do it here instead. + numCaptureGroups := len(c.out.groupMap) + groupNum := int64(0) + ch := c.c.char + + for { + // Loop once per digit, for max allowed number of digits in a back reference. + digit := uCharDigitValue(ch) + groupNum = groupNum*10 + digit + if groupNum >= int64(numCaptureGroups) { + break + } + ch = c.peekCharLL() + if !staticRuleSet[ruleSetDigitChar-128].ContainsRune(ch) { + break + } + c.nextCharLL() + } + + // Scan of the back reference in the source regexp is complete. Now generate + // the compiled code for it. + // Because capture groups can be forward-referenced by back-references, + // we fill the operand with the capture group number. At the end + // of compilation, it will be changed to the variable's location. + if groupNum == 0 { + panic("\\0 begins an octal escape sequence, and shouldn't enter this code path at all") + } + c.fixLiterals(false) + if (c.modeFlags & CaseInsensitive) != 0 { + c.appendOp(urxBackrefI, int(groupNum)) + } else { + c.appendOp(urxBackref, int(groupNum)) + } + + case doBeginNamedBackRef: + if c.captureName != nil { + panic("should not replace capture name") + } + c.captureName = &strings.Builder{} + + case doContinueNamedBackRef: + c.captureName.WriteRune(c.c.char) + + case doCompleteNamedBackRef: + { + groupNumber := c.out.namedCaptureMap[c.captureName.String()] + if groupNumber == 0 { + // Group name has not been defined. + // Could be a forward reference. If we choose to support them at some + // future time, extra mechanism will be required at this point. + c.error(InvalidCaptureGroupName) + } else { + // Given the number, handle identically to a \n numbered back reference. + // See comments above, under doBackRef + c.fixLiterals(false) + if (c.modeFlags & CaseInsensitive) != 0 { + c.appendOp(urxBackrefI, groupNumber) + } else { + c.appendOp(urxBackref, groupNumber) + } + } + c.captureName = nil + } + + case doPossessivePlus: + // Possessive ++ quantifier. + // Compiles to + // 1. STO_SP + // 2. body of stuff being iterated over + // 3. STATE_SAVE 5 + // 4. JMP 2 + // 5. LD_SP + // 6. ... + // + // Note: TODO: This is pretty inefficient. A mass of saved state is built up + // then unconditionally discarded. Perhaps introduce a new opcode. Ticket 6056 + // + // Emit the STO_SP + topLoc := c.blockTopLoc(true) + stoLoc := c.allocateData(1) // Reserve the data location for storing save stack ptr. + op := c.buildOp(urxStoSp, stoLoc) + c.out.compiledPat[topLoc] = op + + // Emit the STATE_SAVE + c.appendOp(urxStateSave, len(c.out.compiledPat)+2) + + // Emit the JMP + c.appendOp(urxJmp, topLoc+1) + + // Emit the LD_SP + c.appendOp(urxLdSp, stoLoc) + + case doPossessiveStar: + // Possessive *+ quantifier. + // Compiles to + // 1. STO_SP loc + // 2. STATE_SAVE 5 + // 3. body of stuff being iterated over + // 4. JMP 2 + // 5. LD_SP loc + // 6 ... + // TODO: do something to cut back the state stack each time through the loop. + // Reserve two slots at the top of the block. + topLoc := c.blockTopLoc(true) + c.insertOp(topLoc) + + // emit STO_SP loc + stoLoc := c.allocateData(1) // Reserve the data location for storing save stack ptr. + op := c.buildOp(urxStoSp, stoLoc) + c.out.compiledPat[topLoc] = op + + // Emit the SAVE_STATE 5 + L7 := len(c.out.compiledPat) + 1 + op = c.buildOp(urxStateSave, L7) + c.out.compiledPat[topLoc+1] = op + + // Append the JMP operation. + c.appendOp(urxJmp, topLoc+1) + + // Emit the LD_SP loc + c.appendOp(urxLdSp, stoLoc) + + case doPossessiveOpt: + // Possessive ?+ quantifier. + // Compiles to + // 1. STO_SP loc + // 2. SAVE_STATE 5 + // 3. body of optional block + // 4. LD_SP loc + // 5. ... + // + // Reserve two slots at the top of the block. + topLoc := c.blockTopLoc(true) + c.insertOp(topLoc) + + // Emit the STO_SP + stoLoc := c.allocateData(1) // Reserve the data location for storing save stack ptr. + op := c.buildOp(urxStoSp, stoLoc) + c.out.compiledPat[topLoc] = op + + // Emit the SAVE_STATE + continueLoc := len(c.out.compiledPat) + 1 + op = c.buildOp(urxStateSave, continueLoc) + c.out.compiledPat[topLoc+1] = op + + // Emit the LD_SP + c.appendOp(urxLdSp, stoLoc) + + case doBeginMatchMode: + c.newModeFlags = c.modeFlags + c.setModeFlag = true + case doMatchMode: // (?i) and similar + var bit RegexpFlag + switch c.c.char { + case 0x69: /* 'i' */ + bit = CaseInsensitive + case 0x64: /* 'd' */ + bit = UnixLines + case 0x6d: /* 'm' */ + bit = Multiline + case 0x73: /* 's' */ + bit = DotAll + case 0x75: /* 'u' */ + bit = 0 /* Unicode casing */ + case 0x77: /* 'w' */ + bit = UWord + case 0x78: /* 'x' */ + bit = Comments + case 0x2d: /* '-' */ + c.setModeFlag = false + default: + // Should never happen. Other chars are filtered out by the scanner. + panic("unreachable") + } + if c.setModeFlag { + c.newModeFlags |= bit + } else { + c.newModeFlags &= ^bit + } + + case doSetMatchMode: + // Emit code to match any pending literals, using the not-yet changed match mode. + c.fixLiterals(false) + + // We've got a (?i) or similar. The match mode is being changed, but + // the change is not scoped to a parenthesized block. + if c.newModeFlags >= 0 { + panic("cNewModeFlags not properly initialized") + } + c.modeFlags = c.newModeFlags + + case doMatchModeParen: + // We've got a (?i: or similar. Begin a parenthesized block, save old + // mode flags so they can be restored at the close of the block. + // + // Compile to a + // - NOP, which later may be replaced by a save-state if the + // parenthesized group gets a * quantifier, followed by + // - NOP, which may later be replaced by a save-state if there + // is an '|' alternation within the parens. + c.fixLiterals(false) + c.appendOp(urxNop, 0) + c.appendOp(urxNop, 0) + + // On the Parentheses stack, start a new frame and add the postions + // of the two NOPs (a normal non-capturing () frame, except for the + // saving of the orignal mode flags.) + c.parenStack = append(c.parenStack, int(c.modeFlags)) + c.parenStack = append(c.parenStack, parenFlags) + c.parenStack = append(c.parenStack, len(c.out.compiledPat)-2) + c.parenStack = append(c.parenStack, len(c.out.compiledPat)-1) + + // Set the current mode flags to the new values. + if c.newModeFlags >= 0 { + panic("cNewModeFlags not properly initialized") + } + c.modeFlags = c.newModeFlags + + case doBadModeFlag: + c.error(InvalidFlag) + + case doSuppressComments: + // We have just scanned a '(?'. We now need to prevent the character scanner from + // treating a '#' as a to-the-end-of-line comment. + // (This Perl compatibility just gets uglier and uglier to do...) + c.eolComments = false + + case doSetAddAmp: + set := c.setStack[len(c.setStack)-1] + set.AddRune(chAmp) + + case doSetAddDash: + set := c.setStack[len(c.setStack)-1] + set.AddRune(chDash) + + case doSetBackslashs: + set := c.setStack[len(c.setStack)-1] + set.AddAll(staticPropertySets[urxIsspaceSet]) + + case doSetBackslashS: + sset := uset.New() + sset.AddAll(staticPropertySets[urxIsspaceSet]) // TODO: add latin1 spaces + sset.Complement() + + set := c.setStack[len(c.setStack)-1] + set.AddAll(sset) + + case doSetBackslashd: + set := c.setStack[len(c.setStack)-1] + c.err = uprops.AddCategory(set, uchar.GcNdMask) + + case doSetBackslashD: + digits := uset.New() + c.err = uprops.ApplyIntPropertyValue(digits, uprops.UCharGeneralCategoryMask, int32(uchar.GcNdMask)) + digits.Complement() + set := c.setStack[len(c.setStack)-1] + set.AddAll(digits) + + case doSetBackslashh: + h := uset.New() + c.err = uprops.ApplyIntPropertyValue(h, uprops.UCharGeneralCategoryMask, int32(uchar.GcZsMask)) + h.AddRune(9) // Tab + + set := c.setStack[len(c.setStack)-1] + set.AddAll(h) + + case doSetBackslashH: + h := uset.New() + c.err = uprops.ApplyIntPropertyValue(h, uprops.UCharGeneralCategoryMask, int32(uchar.GcZsMask)) + h.AddRune(9) // Tab + h.Complement() + + set := c.setStack[len(c.setStack)-1] + set.AddAll(h) + + case doSetBackslashv: + set := c.setStack[len(c.setStack)-1] + set.AddRuneRange(0x0a, 0x0d) // add range + set.AddRune(0x85) + set.AddRuneRange(0x2028, 0x2029) + + case doSetBackslashV: + v := uset.New() + v.AddRuneRange(0x0a, 0x0d) // add range + v.AddRune(0x85) + v.AddRuneRange(0x2028, 0x2029) + v.Complement() + + set := c.setStack[len(c.setStack)-1] + set.AddAll(v) + + case doSetBackslashw: + set := c.setStack[len(c.setStack)-1] + set.AddAll(staticPropertySets[urxIswordSet]) + + case doSetBackslashW: + sset := uset.New() + sset.AddAll(staticPropertySets[urxIswordSet]) + sset.Complement() + + set := c.setStack[len(c.setStack)-1] + set.AddAll(sset) + + case doSetBegin: + c.fixLiterals(false) + c.setStack = append(c.setStack, uset.New()) + c.setOpStack = append(c.setOpStack, setStart) + if (c.modeFlags & CaseInsensitive) != 0 { + c.setOpStack = append(c.setOpStack, setCaseClose) + } + + case doSetBeginDifference1: + // We have scanned something like [[abc]-[ + // Set up a new UnicodeSet for the set beginning with the just-scanned '[' + // Push a Difference operator, which will cause the new set to be subtracted from what + // went before once it is created. + c.setPushOp(setDifference1) + c.setOpStack = append(c.setOpStack, setStart) + if (c.modeFlags & CaseInsensitive) != 0 { + c.setOpStack = append(c.setOpStack, setCaseClose) + } + + case doSetBeginIntersection1: + // We have scanned something like [[abc]&[ + // Need both the '&' operator and the open '[' operator. + c.setPushOp(setIntersection1) + c.setOpStack = append(c.setOpStack, setStart) + if (c.modeFlags & CaseInsensitive) != 0 { + c.setOpStack = append(c.setOpStack, setCaseClose) + } + + case doSetBeginUnion: + // We have scanned something like [[abc][ + // Need to handle the union operation explicitly [[abc] | [ + c.setPushOp(setUnion) + c.setOpStack = append(c.setOpStack, setStart) + if (c.modeFlags & CaseInsensitive) != 0 { + c.setOpStack = append(c.setOpStack, setCaseClose) + } + + case doSetDifference2: + // We have scanned something like [abc-- + // Consider this to unambiguously be a set difference operator. + c.setPushOp(setDifference2) + + case doSetEnd: + // Have encountered the ']' that closes a set. + // Force the evaluation of any pending operations within this set, + // leave the completed set on the top of the set stack. + c.setEval(setEnd) + var start setOperation + start, c.setOpStack = stackPop(c.setOpStack) + if start != setStart { + panic("bad set operation in stack") + } + + case doSetFinish: + // Finished a complete set expression, including all nested sets. + // The close bracket has already triggered clearing out pending set operators, + // the operator stack should be empty and the operand stack should have just + // one entry, the result set. + if len(c.setOpStack) > 0 { + panic("expected setOpStack to be empty") + } + var set *uset.UnicodeSet + set, c.setStack = stackPop(c.setStack) + c.compileSet(set) + + case doSetIntersection2: + // Have scanned something like [abc&& + c.setPushOp(setIntersection2) + + case doSetLiteral: + // Union the just-scanned literal character into the set being built. + // This operation is the highest precedence set operation, so we can always do + // it immediately, without waiting to see what follows. It is necessary to perform + // any pending '-' or '&' operation first, because these have the same precedence + // as union-ing in a literal' + c.setEval(setUnion) + set := c.setStack[len(c.setStack)-1] + set.AddRune(c.c.char) + c.lastSetLiteral = c.c.char + + case doSetLiteralEscaped: + // A back-slash escaped literal character was encountered. + // Processing is the same as with setLiteral, above, with the addition of + // the optional check for errors on escaped ASCII letters. + if (c.modeFlags&ErrorOnUnknownEscapes) != 0 && + ((c.c.char >= 0x41 && c.c.char <= 0x5A) || // in [A-Z] + (c.c.char >= 0x61 && c.c.char <= 0x7a)) { // in [a-z] + c.error(BadEscapeSequence) + } + c.setEval(setUnion) + set := c.setStack[len(c.setStack)-1] + set.AddRune(c.c.char) + c.lastSetLiteral = c.c.char + + case doSetNamedChar: + // Scanning a \N{UNICODE CHARACTER NAME} + // Aside from the source of the character, the processing is identical to doSetLiteral, + // above. + ch := c.scanNamedChar() + c.setEval(setUnion) + set := c.setStack[len(c.setStack)-1] + set.AddRune(ch) + c.lastSetLiteral = ch + + case doSetNamedRange: + // We have scanned literal-\N{CHAR NAME}. Add the range to the set. + // The left character is already in the set, and is saved in fLastSetLiteral. + // The right side needs to be picked up, the scan is at the 'N'. + // Lower Limit > Upper limit being an error matches both Java + // and ICU UnicodeSet behavior. + ch := c.scanNamedChar() + if c.err == nil && (c.lastSetLiteral == -1 || c.lastSetLiteral > ch) { + c.error(InvalidRange) + } + set := c.setStack[len(c.setStack)-1] + set.AddRuneRange(c.lastSetLiteral, ch) + c.lastSetLiteral = ch + + case doSetNegate: + // Scanned a '^' at the start of a set. + // Push the negation operator onto the set op stack. + // A twist for case-insensitive matching: + // the case closure operation must happen _before_ negation. + // But the case closure operation will already be on the stack if it's required. + // This requires checking for case closure, and swapping the stack order + // if it is present. + tosOp := c.setOpStack[len(c.setOpStack)-1] + if tosOp == setCaseClose { + _, c.setOpStack = stackPop(c.setOpStack) + c.setOpStack = append(c.setOpStack, setNegation) + c.setOpStack = append(c.setOpStack, setCaseClose) + } else { + c.setOpStack = append(c.setOpStack, setNegation) + } + + case doSetNoCloseError: + c.error(MissingCloseBracket) + + case doSetOpError: + c.error(RuleSyntax) // -- or && at the end of a set. Illegal. + + case doSetPosixProp: + if set := c.scanPosixProp(); set != nil { + c.setStack[len(c.setStack)-1].AddAll(set) + } + + case doSetProp: + // Scanned a \p \P within [brackets]. + if set := c.scanProp(); set != nil { + c.setStack[len(c.setStack)-1].AddAll(set) + } + + case doSetRange: + // We have scanned literal-literal. Add the range to the set. + // The left character is already in the set, and is saved in fLastSetLiteral. + // The right side is the current character. + // Lower Limit > Upper limit being an error matches both Java + // and ICU UnicodeSet behavior. + + if c.lastSetLiteral == -1 || c.lastSetLiteral > c.c.char { + c.error(InvalidRange) + } + c.setStack[len(c.setStack)-1].AddRuneRange(c.lastSetLiteral, c.c.char) + + default: + panic("unexpected OP in parser") + } + + return c.err == nil +} + +func uCharDigitValue(char rune) int64 { + if char >= '0' && char <= '9' { + return int64(char - '0') + } + return -1 +} + +func stackPop[T any](stack []T) (T, []T) { + var out T + if len(stack) > 0 { + out = stack[len(stack)-1] + stack = stack[:len(stack)-1] + } + return out, stack +} + +func (c *compiler) error(e CompileErrorCode) { + c.err = &CompileError{ + Code: e, + Line: c.lineNum, + Offset: c.charNum, + Context: c.out.pattern, + } +} + +func (c *compiler) stripNOPs() { + if c.err != nil { + return + } + + end := len(c.out.compiledPat) + deltas := make([]int, 0, end) + + // Make a first pass over the code, computing the amount that things + // will be offset at each location in the original code. + var loc, d int + for loc = 0; loc < end; loc++ { + deltas = append(deltas, d) + op := c.out.compiledPat[loc] + if op.typ() == urxNop { + d++ + } + } + + // Make a second pass over the code, removing the NOPs by moving following + // code up, and patching operands that refer to code locations that + // are being moved. The array of offsets from the first step is used + // to compute the new operand values. + var src, dst int + for src = 0; src < end; src++ { + op := c.out.compiledPat[src] + opType := op.typ() + + switch opType { + case urxNop: + // skip + + case urxStateSave, + urxJmp, + utxCtrLoop, + urxCtrLoopNg, + urxRelocOprnd, + urxJmpx, + urxJmpSav, + urxJmpSavX: + // These are instructions with operands that refer to code locations. + operandAddress := op.value() + fixedOperandAddress := operandAddress - deltas[operandAddress] + op = c.buildOp(opType, fixedOperandAddress) + c.out.compiledPat[dst] = op + dst++ + + case urxBackref, urxBackrefI: + where := op.value() + if where > len(c.out.groupMap) { + c.error(InvalidBackRef) + break + } + + where = int(c.out.groupMap[where-1]) + op = c.buildOp(opType, where) + c.out.compiledPat[dst] = op + dst++ + c.out.needsAltInput = true + + case urxReservedOp, + urxReservedOpN, + urxBacktrack, + urxEnd, + urxOnechar, + urxString, + urxStringLen, + urxStartCapture, + urxEndCapture, + urxStaticSetref, + urxStatSetrefN, + urxSetref, + urxDotany, + urxFail, + urxBackslashB, + urxBackslashBu, + urxBackslashG, + urxBackslashX, + urxBackslashZ, + urxDotanyAll, + urxBackslashD, + urxCaret, + urxDollar, + urxCtrInit, + urxCtrInitNg, + urxDotanyUnix, + urxStoSp, + urxLdSp, + urxStoInpLoc, + urxLaStart, + urxLaEnd, + urcOnecharI, + urxStringI, + urxDollarM, + urxCaretM, + urxCaretMUnix, + urxLbStart, + urxLbCont, + urxLbEnd, + urxLbnCount, + urxLbnEnd, + urxLoopSrI, + urxLoopDotI, + urxLoopC, + urxDollarD, + urxDollarMd, + urxBackslashH, + urxBackslashR, + urxBackslashV: + // These instructions are unaltered by the relocation. + c.out.compiledPat[dst] = op + dst++ + + default: + // Some op is unaccounted for. + panic("unreachable") + } + } + + c.out.compiledPat = c.out.compiledPat[:dst] +} + +func (c *compiler) matchStartType() { + var loc int // Location in the pattern of the current op being processed. + var currentLen int32 // Minimum length of a match to this point (loc) in the pattern + var numInitialStrings int // Number of strings encountered that could match at start. + var atStart = true // True if no part of the pattern yet encountered + // could have advanced the position in a match. + // (Maximum match length so far == 0) + + // forwardedLength is a vector holding minimum-match-length values that + // are propagated forward in the pattern by JMP or STATE_SAVE operations. + // It must be one longer than the pattern being checked because some ops + // will jmp to a end-of-block+1 location from within a block, and we must + // count those when checking the block. + end := len(c.out.compiledPat) + forwardedLength := make([]int32, end+1) + + for loc = 3; loc < end; loc++ { + forwardedLength[loc] = math.MaxInt32 + } + + for loc = 3; loc < end; loc++ { + op := c.out.compiledPat[loc] + opType := op.typ() + + // The loop is advancing linearly through the pattern. + // If the op we are now at was the destination of a branch in the pattern, + // and that path has a shorter minimum length than the current accumulated value, + // replace the current accumulated value. + if forwardedLength[loc] < currentLen { + currentLen = forwardedLength[loc] + } + + switch opType { + // Ops that don't change the total length matched + case urxReservedOp, + urxEnd, + urxFail, + urxStringLen, + urxNop, + urxStartCapture, + urxEndCapture, + urxBackslashB, + urxBackslashBu, + urxBackslashG, + urxBackslashZ, + urxDollar, + urxDollarM, + urxDollarD, + urxDollarMd, + urxRelocOprnd, + urxStoInpLoc, + urxBackref, // BackRef. Must assume that it might be a zero length match + urxBackrefI, + urxStoSp, // Setup for atomic or possessive blocks. Doesn't change what can match. + urxLdSp: + // skip + + case urxCaret: + if atStart { + c.out.startType = startStart + } + + case urxCaretM, urxCaretMUnix: + if atStart { + c.out.startType = startLine + } + + case urxOnechar: + if currentLen == 0 { + // This character could appear at the start of a match. + // Add it to the set of possible starting characters. + c.out.initialChars.AddRune(op.value32()) + numInitialStrings += 2 + } + currentLen = safeIncrement(currentLen, 1) + atStart = false + + case urxSetref: + if currentLen == 0 { + sn := op.value() + set := c.out.sets[sn] + c.out.initialChars.AddAll(set) + numInitialStrings += 2 + } + currentLen = safeIncrement(currentLen, 1) + atStart = false + + case urxLoopSrI: + // [Set]*, like a SETREF, above, in what it can match, + // but may not match at all, so currentLen is not incremented. + if currentLen == 0 { + sn := op.value() + set := c.out.sets[sn] + c.out.initialChars.AddAll(set) + numInitialStrings += 2 + } + atStart = false + + case urxLoopDotI: + if currentLen == 0 { + // .* at the start of a pattern. + // Any character can begin the match. + c.out.initialChars.Clear() + c.out.initialChars.Complement() + numInitialStrings += 2 + } + atStart = false + + case urxStaticSetref: + if currentLen == 0 { + sn := op.value() + c.out.initialChars.AddAll(staticPropertySets[sn]) + numInitialStrings += 2 + } + currentLen = safeIncrement(currentLen, 1) + atStart = false + + case urxStatSetrefN: + if currentLen == 0 { + sn := op.value() + sc := uset.New() + sc.AddAll(staticPropertySets[sn]) + sc.Complement() + + c.out.initialChars.AddAll(sc) + numInitialStrings += 2 + } + currentLen = safeIncrement(currentLen, 1) + atStart = false + + case urxBackslashD: + // Digit Char + if currentLen == 0 { + s := uset.New() + c.err = uprops.ApplyIntPropertyValue(s, uprops.UCharGeneralCategoryMask, int32(uchar.GcNdMask)) + if op.value() != 0 { + s.Complement() + } + c.out.initialChars.AddAll(s) + numInitialStrings += 2 + } + currentLen = safeIncrement(currentLen, 1) + atStart = false + + case urxBackslashH: + // Horiz white space + if currentLen == 0 { + s := uset.New() + c.err = uprops.ApplyIntPropertyValue(s, uprops.UCharGeneralCategoryMask, int32(uchar.GcZsMask)) + s.AddRune(9) // Tab + if op.value() != 0 { + s.Complement() + } + c.out.initialChars.AddAll(s) + numInitialStrings += 2 + } + currentLen = safeIncrement(currentLen, 1) + atStart = false + + case urxBackslashR, // Any line ending sequence + urxBackslashV: // Any line ending code point, with optional negation + if currentLen == 0 { + s := uset.New() + s.AddRuneRange(0x0a, 0x0d) // add range + s.AddRune(0x85) + s.AddRuneRange(0x2028, 0x2029) + if op.value() != 0 { + // Complement option applies to URX_BACKSLASH_V only. + s.Complement() + } + c.out.initialChars.AddAll(s) + numInitialStrings += 2 + } + currentLen = safeIncrement(currentLen, 1) + atStart = false + + case urcOnecharI: + // Case Insensitive Single Character. + if currentLen == 0 { + ch := op.value32() + if uprops.HasBinaryProperty(ch, uprops.UCharCaseSensitive) { + starters := uset.New() + starters.AddRuneRange(ch, ch) + starters.CloseOver(uset.CaseInsensitive) + // findCaseInsensitiveStarters(c, &starters); + // For ONECHAR_I, no need to worry about text chars that expand on folding into + // strings. The expanded folding can't match the pattern. + c.out.initialChars.AddAll(starters) + } else { + // Char has no case variants. Just add it as-is to the + // set of possible starting chars. + c.out.initialChars.AddRune(ch) + } + numInitialStrings += 2 + } + currentLen = safeIncrement(currentLen, 1) + atStart = false + + case urxBackslashX, // Grapheme Cluster. Minimum is 1, max unbounded. + urxDotanyAll, // . matches one or two. + urxDotany, + urxDotanyUnix: + if currentLen == 0 { + // These constructs are all bad news when they appear at the start + // of a match. Any character can begin the match. + c.out.initialChars.Clear() + c.out.initialChars.Complement() + numInitialStrings += 2 + } + currentLen = safeIncrement(currentLen, 1) + atStart = false + + case urxJmpx: + loc++ // Except for extra operand on URX_JMPX, same as URX_JMP. + fallthrough + + case urxJmp: + jmpDest := op.value() + if jmpDest < loc { + // Loop of some kind. Can safely ignore, the worst that will happen + // is that we understate the true minimum length + currentLen = forwardedLength[loc+1] + } else { + // Forward jump. Propagate the current min length to the target loc of the jump. + if forwardedLength[jmpDest] > currentLen { + forwardedLength[jmpDest] = currentLen + } + } + atStart = false + + case urxJmpSav, + urxJmpSavX: + // Combo of state save to the next loc, + jmp backwards. + // Net effect on min. length computation is nothing. + atStart = false + + case urxBacktrack: + // Fails are kind of like a branch, except that the min length was + // propagated already, by the state save. + currentLen = forwardedLength[loc+1] + atStart = false + + case urxStateSave: + // State Save, for forward jumps, propagate the current minimum. + // of the state save. + jmpDest := op.value() + if jmpDest > loc { + if currentLen < forwardedLength[jmpDest] { + forwardedLength[jmpDest] = (currentLen) + } + } + atStart = false + + case urxString: + loc++ + stringLenOp := c.out.compiledPat[loc] + stringLen := stringLenOp.value() + if currentLen == 0 { + // Add the starting character of this string to the set of possible starting + // characters for this pattern. + stringStartIdx := op.value() + ch := c.out.literalText[stringStartIdx] + c.out.initialChars.AddRune(ch) + + // Remember this string. After the entire pattern has been checked, + // if nothing else is identified that can start a match, we'll use it. + numInitialStrings++ + c.out.initialStringIdx = stringStartIdx + c.out.initialStringLen = stringLen + } + + currentLen = safeIncrement(currentLen, stringLen) + atStart = false + + case urxStringI: + // Case-insensitive string. Unlike exact-match strings, we won't + // attempt a string search for possible match positions. But we + // do update the set of possible starting characters. + loc++ + stringLenOp := c.out.compiledPat[loc] + stringLen := stringLenOp.value() + if currentLen == 0 { + // Add the starting character of this string to the set of possible starting + // characters for this pattern. + stringStartIdx := op.value() + ch := c.out.literalText[stringStartIdx] + s := uset.New() + c.findCaseInsensitiveStarters(ch, s) + c.out.initialChars.AddAll(s) + numInitialStrings += 2 // Matching on an initial string not possible. + } + currentLen = safeIncrement(currentLen, stringLen) + atStart = false + + case urxCtrInit, + urxCtrInitNg: + // Loop Init Ops. These don't change the min length, but they are 4 word ops + // so location must be updated accordingly. + // Loop Init Ops. + // If the min loop count == 0 + // move loc forwards to the end of the loop, skipping over the body. + // If the min count is > 0, + // continue normal processing of the body of the loop. + loopEndLoc := c.out.compiledPat[loc+1].value() + minLoopCount := int(c.out.compiledPat[loc+2]) + if minLoopCount == 0 { + // Min Loop Count of 0, treat like a forward branch and + // move the current minimum length up to the target + // (end of loop) location. + if forwardedLength[loopEndLoc] > currentLen { + forwardedLength[loopEndLoc] = currentLen + } + } + loc += 3 // Skips over operands of CTR_INIT + atStart = false + + case utxCtrLoop, + urxCtrLoopNg: + // Loop ops. + // The jump is conditional, backwards only. + atStart = false + + case urxLoopC: + // More loop ops. These state-save to themselves. + // don't change the minimum match + atStart = false + + case urxLaStart, + urxLbStart: + // Look-around. Scan forward until the matching look-ahead end, + // without processing the look-around block. This is overly pessimistic. + + // Keep track of the nesting depth of look-around blocks. Boilerplate code for + // lookahead contains two LA_END instructions, so count goes up by two + // for each LA_START. + var depth int + if opType == urxLaStart { + depth = 2 + } else { + depth = 1 + } + for { + loc++ + op = c.out.compiledPat[loc] + if op.typ() == urxLaStart { + depth += 2 + } + if op.typ() == urxLbStart { + depth++ + } + if op.typ() == urxLaEnd || op.typ() == urxLbnEnd { + depth-- + if depth == 0 { + break + } + } + if op.typ() == urxStateSave { + // Need this because neg lookahead blocks will FAIL to outside + // of the block. + jmpDest := op.value() + if jmpDest > loc { + if currentLen < forwardedLength[jmpDest] { + forwardedLength[jmpDest] = (currentLen) + } + } + } + } + + case urxLaEnd, + urxLbCont, + urxLbEnd, + urxLbnCount, + urxLbnEnd: + panic("should be consumed in URX_LA_START") + + default: + panic("unreachable") + } + } + + // Sort out what we should check for when looking for candidate match start positions. + // In order of preference, + // 1. Start of input text buffer. + // 2. A literal string. + // 3. Start of line in multi-line mode. + // 4. A single literal character. + // 5. A character from a set of characters. + // + if c.out.startType == startStart { + // Match only at the start of an input text string. + // start type is already set. We're done. + } else if numInitialStrings == 1 && c.out.minMatchLen > 0 { + // Match beginning only with a literal string. + ch := c.out.literalText[c.out.initialStringIdx] + c.out.startType = startString + c.out.initialChar = ch + } else if c.out.startType == startLine { + // Match at start of line in Multi-Line mode. + // Nothing to do here; everything is already set. + } else if c.out.minMatchLen == 0 { + // Zero length match possible. We could start anywhere. + c.out.startType = startNoInfo + } else if c.out.initialChars.Len() == 1 { + // All matches begin with the same char. + c.out.startType = startChar + c.out.initialChar = c.out.initialChars.RuneAt(0) + } else if !c.out.initialChars.ContainsRuneRange(0, 0x10ffff) && c.out.minMatchLen > 0 { + // Matches start with a set of character smaller than the set of all chars. + c.out.startType = startSet + } else { + // Matches can start with anything + c.out.startType = startNoInfo + } +} + +func (c *compiler) appendOp(typ opcode, arg int) { + c.appendIns(c.buildOp(typ, arg)) +} + +func (c *compiler) appendIns(ins instruction) { + if c.err != nil { + return + } + c.out.compiledPat = append(c.out.compiledPat, ins) +} + +func (c *compiler) buildOp(typ opcode, val int) instruction { + if c.err != nil { + return 0 + } + if val > 0x00ffffff { + panic("bad argument to buildOp") + } + if val < 0 { + if !(typ == urxReservedOpN || typ == urxReservedOp) { + panic("bad value to buildOp") + } + typ = urxReservedOpN + } + return instruction(int32(typ)<<24 | int32(val)) +} + +func (c *compiler) handleCloseParen() { + if len(c.parenStack) == 0 { + c.error(MismatchedParen) + return + } + + c.fixLiterals(false) + + var patIdx int + var patOp instruction + + for { + patIdx, c.parenStack = stackPop(c.parenStack) + if patIdx < 0 { + break + } + + patOp = c.out.compiledPat[patIdx] + if patOp.value() != 0 { + panic("branch target for JMP should not be set") + } + patOp |= instruction(len(c.out.compiledPat)) + c.out.compiledPat[patIdx] = patOp + c.matchOpenParen = patIdx + } + + var modeFlags int + modeFlags, c.parenStack = stackPop(c.parenStack) + if modeFlags >= 0 { + panic("modeFlags in paren stack was not negated") + } + + c.modeFlags = RegexpFlag(modeFlags) + + switch patIdx { + case parenPlain, parenFlags: + // No additional fixups required. + // (Grouping-only parentheses) + case parenCapturing: + // Capturing Parentheses. + // Insert a End Capture op into the pattern. + // The frame offset of the variables for this cg is obtained from the + // start capture op and put it into the end-capture op. + + captureOp := c.out.compiledPat[c.matchOpenParen+1] + if captureOp.typ() != urxStartCapture { + panic("bad type in capture op (expected URX_START_CAPTURE)") + } + frameVarLocation := captureOp.value() + c.appendOp(urxEndCapture, frameVarLocation) + + case parenAtomic: + // Atomic Parenthesis. + // Insert a LD_SP operation to restore the state stack to the position + // it was when the atomic parens were entered. + stoOp := c.out.compiledPat[c.matchOpenParen+1] + if stoOp.typ() != urxStoSp { + panic("bad type in capture op (expected URX_STO_SP)") + } + stoLoc := stoOp.value() + c.appendOp(urxLdSp, stoLoc) + + case parenLookahead: + startOp := c.out.compiledPat[c.matchOpenParen-5] + if startOp.typ() != urxLaStart { + panic("bad type in capture op (expected URX_LA_START)") + } + dataLoc := startOp.value() + c.appendOp(urxLaEnd, dataLoc) + + case parenNegLookahead: + startOp := c.out.compiledPat[c.matchOpenParen-1] + if startOp.typ() != urxLaStart { + panic("bad type in capture op (expected URX_LA_START)") + } + dataLoc := startOp.value() + c.appendOp(urxLaEnd, dataLoc) + c.appendOp(urxBacktrack, 0) + c.appendOp(urxLaEnd, dataLoc) + + // Patch the URX_SAVE near the top of the block. + // The destination of the SAVE is the final LA_END that was just added. + saveOp := c.out.compiledPat[c.matchOpenParen] + if saveOp.typ() != urxStateSave { + panic("bad type in capture op (expected URX_STATE_SAVE)") + } + saveOp = c.buildOp(urxStateSave, len(c.out.compiledPat)-1) + c.out.compiledPat[c.matchOpenParen] = saveOp + + case parenLookBehind: + startOp := c.out.compiledPat[c.matchOpenParen-4] + if startOp.typ() != urxLbStart { + panic("bad type in capture op (expected URX_LB_START)") + } + dataLoc := startOp.value() + c.appendOp(urxLbEnd, dataLoc) + c.appendOp(urxLaEnd, dataLoc) + + // Determine the min and max bounds for the length of the + // string that the pattern can match. + // An unbounded upper limit is an error. + patEnd := len(c.out.compiledPat) - 1 + minML := c.minMatchLength(c.matchOpenParen, patEnd) + maxML := c.maxMatchLength(c.matchOpenParen, patEnd) + + if maxML == math.MaxInt32 { + c.error(LookBehindLimit) + break + } + if minML == math.MaxInt32 { + // This condition happens when no match is possible, such as with a + // [set] expression containing no elements. + // In principle, the generated code to evaluate the expression could be deleted, + // but it's probably not worth the complication. + minML = 0 + } + + c.out.compiledPat[c.matchOpenParen-2] = instruction(minML) + c.out.compiledPat[c.matchOpenParen-1] = instruction(maxML) + + case parenLookBehindN: + startOp := c.out.compiledPat[c.matchOpenParen-5] + if startOp.typ() != urxLbStart { + panic("bad type in capture op (expected URX_LB_START)") + } + dataLoc := startOp.value() + c.appendOp(urxLbnEnd, dataLoc) + + // Determine the min and max bounds for the length of the + // string that the pattern can match. + // An unbounded upper limit is an error. + patEnd := len(c.out.compiledPat) - 1 + minML := c.minMatchLength(c.matchOpenParen, patEnd) + maxML := c.maxMatchLength(c.matchOpenParen, patEnd) + + if instruction(maxML).typ() != 0 { + c.error(LookBehindLimit) + break + } + if maxML == math.MaxInt32 { + c.error(LookBehindLimit) + break + } + if minML == math.MaxInt32 { + // This condition happens when no match is possible, such as with a + // [set] expression containing no elements. + // In principle, the generated code to evaluate the expression could be deleted, + // but it's probably not worth the complication. + minML = 0 + } + + c.out.compiledPat[c.matchOpenParen-3] = instruction(minML) + c.out.compiledPat[c.matchOpenParen-2] = instruction(maxML) + + op := c.buildOp(urxRelocOprnd, len(c.out.compiledPat)) + c.out.compiledPat[c.matchOpenParen-1] = op + + default: + panic("unexpected opcode in parenStack") + } + + c.matchCloseParen = len(c.out.compiledPat) +} + +func (c *compiler) fixLiterals(split bool) { + if len(c.literalChars) == 0 { + return + } + + lastCodePoint := c.literalChars[len(c.literalChars)-1] + + // Split: We need to ensure that the last item in the compiled pattern + // refers only to the last literal scanned in the pattern, so that + // quantifiers (*, +, etc.) affect only it, and not a longer string. + // Split before case folding for case insensitive matches. + if split { + c.literalChars = c.literalChars[:len(c.literalChars)-1] + c.fixLiterals(false) + + c.literalChar(lastCodePoint) + c.fixLiterals(false) + return + } + + if c.modeFlags&CaseInsensitive != 0 { + c.literalChars = ucase.FoldRunes(c.literalChars) + lastCodePoint = c.literalChars[len(c.literalChars)-1] + } + + if len(c.literalChars) == 1 { + if c.modeFlags&CaseInsensitive != 0 && uprops.HasBinaryProperty(lastCodePoint, uprops.UCharCaseSensitive) { + c.appendOp(urcOnecharI, int(lastCodePoint)) + } else { + c.appendOp(urxOnechar, int(lastCodePoint)) + } + } else { + if len(c.literalChars) > 0x00ffffff || len(c.out.literalText) > 0x00ffffff { + c.error(PatternTooBig) + } + if c.modeFlags&CaseInsensitive != 0 { + c.appendOp(urxStringI, len(c.out.literalText)) + } else { + c.appendOp(urxString, len(c.out.literalText)) + } + c.appendOp(urxStringLen, len(c.literalChars)) + c.out.literalText = append(c.out.literalText, c.literalChars...) + } + + c.literalChars = c.literalChars[:0] +} + +func (c *compiler) literalChar(point rune) { + c.literalChars = append(c.literalChars, point) +} + +func (c *compiler) allocateData(size int) int { + if c.err != nil { + return 0 + } + if size <= 0 || size > 0x100 || c.out.dataSize < 0 { + c.error(InternalError) + return 0 + } + + dataIndex := c.out.dataSize + c.out.dataSize += size + if c.out.dataSize >= 0x00fffff0 { + c.error(InternalError) + } + return dataIndex +} + +func (c *compiler) allocateStackData(size int) int { + if c.err != nil { + return 0 + } + if size <= 0 || size > 0x100 || c.out.frameSize < 0 { + c.error(InternalError) + return 0 + } + dataIndex := c.out.frameSize + c.out.frameSize += size + if c.out.frameSize >= 0x00fffff0 { + c.error(InternalError) + } + return dataIndex +} + +func (c *compiler) insertOp(where int) { + if where < 0 || where >= len(c.out.compiledPat) { + panic("insertOp: out of bounds") + } + + nop := c.buildOp(urxNop, 0) + c.out.compiledPat = slices.Insert(c.out.compiledPat, where, nop) + + // Walk through the pattern, looking for any ops with targets that + // were moved down by the insert. Fix them. + for loc, op := range c.out.compiledPat { + switch op.typ() { + case urxJmp, urxJmpx, urxStateSave, utxCtrLoop, urxCtrLoopNg, urxJmpSav, urxJmpSavX, urxRelocOprnd: + if op.value() > where { + op = c.buildOp(op.typ(), op.value()+1) + c.out.compiledPat[loc] = op + } + } + } + + // Now fix up the parentheses stack. All positive values in it are locations in + // the compiled pattern. (Negative values are frame boundaries, and don't need fixing.) + for loc, x := range c.parenStack { + if x > where { + c.parenStack[loc] = x + 1 + } + } + + if c.matchCloseParen > where { + c.matchCloseParen++ + } + if c.matchOpenParen > where { + c.matchOpenParen++ + } +} + +func (c *compiler) blockTopLoc(reserve bool) int { + var loc int + c.fixLiterals(true) + + if len(c.out.compiledPat) == c.matchCloseParen { + // The item just processed is a parenthesized block. + loc = c.matchOpenParen + } else { + // Item just compiled is a single thing, a ".", or a single char, a string or a set reference. + // No slot for STATE_SAVE was pre-reserved in the compiled code. + // We need to make space now. + loc = len(c.out.compiledPat) - 1 + op := c.out.compiledPat[loc] + if op.typ() == urxStringLen { + // Strings take two opcode, we want the position of the first one. + // We can have a string at this point if a single character case-folded to two. + loc-- + } + if reserve { + nop := c.buildOp(urxNop, 0) + c.out.compiledPat = slices.Insert(c.out.compiledPat, loc, nop) + } + } + return loc +} + +func (c *compiler) compileInlineInterval() bool { + if c.intervalUpper > 10 || c.intervalUpper < c.intervalLow { + return false + } + + topOfBlock := c.blockTopLoc(false) + if c.intervalUpper == 0 { + // Pathological case. Attempt no matches, as if the block doesn't exist. + // Discard the generated code for the block. + // If the block included parens, discard the info pertaining to them as well. + c.out.compiledPat = c.out.compiledPat[:topOfBlock] + if c.matchOpenParen >= topOfBlock { + c.matchOpenParen = -1 + } + if c.matchCloseParen >= topOfBlock { + c.matchCloseParen = -1 + } + return true + } + + if topOfBlock != len(c.out.compiledPat)-1 && c.intervalUpper != 1 { + // The thing being repeated is not a single op, but some + // more complex block. Do it as a loop, not inlines. + // Note that things "repeated" a max of once are handled as inline, because + // the one copy of the code already generated is just fine. + return false + } + + // Pick up the opcode that is to be repeated + // + op := c.out.compiledPat[topOfBlock] + + // Compute the pattern location where the inline sequence + // will end, and set up the state save op that will be needed. + // + endOfSequenceLoc := len(c.out.compiledPat) - 1 + c.intervalUpper + (c.intervalUpper - c.intervalLow) + + saveOp := c.buildOp(urxStateSave, endOfSequenceLoc) + if c.intervalLow == 0 { + c.insertOp(topOfBlock) + c.out.compiledPat[topOfBlock] = saveOp + } + + // Loop, emitting the op for the thing being repeated each time. + // Loop starts at 1 because one instance of the op already exists in the pattern, + // it was put there when it was originally encountered. + for i := 1; i < c.intervalUpper; i++ { + if i >= c.intervalLow { + c.appendIns(saveOp) + } + c.appendIns(op) + } + return true +} + +func (c *compiler) compileInterval(init opcode, loop opcode) { + // The CTR_INIT op at the top of the block with the {n,m} quantifier takes + // four slots in the compiled code. Reserve them. + topOfBlock := c.blockTopLoc(true) + c.insertOp(topOfBlock) + c.insertOp(topOfBlock) + c.insertOp(topOfBlock) + + // The operands for the CTR_INIT opcode include the index in the matcher data + // of the counter. Allocate it now. There are two data items + // counterLoc --> Loop counter + // +1 --> Input index (for breaking non-progressing loops) + // (Only present if unbounded upper limit on loop) + var dataSize int + if c.intervalUpper < 0 { + dataSize = 2 + } else { + dataSize = 1 + } + counterLoc := c.allocateStackData(dataSize) + + op := c.buildOp(init, counterLoc) + c.out.compiledPat[topOfBlock] = op + + // The second operand of CTR_INIT is the location following the end of the loop. + // Must put in as a URX_RELOC_OPRND so that the value will be adjusted if the + // compilation of something later on causes the code to grow and the target + // position to move. + loopEnd := len(c.out.compiledPat) + op = c.buildOp(urxRelocOprnd, loopEnd) + c.out.compiledPat[topOfBlock+1] = op + + // Followed by the min and max counts. + c.out.compiledPat[topOfBlock+2] = instruction(c.intervalLow) + c.out.compiledPat[topOfBlock+3] = instruction(c.intervalUpper) + + // Append the CTR_LOOP op. The operand is the location of the CTR_INIT op. + // Goes at end of the block being looped over, so just append to the code so far. + c.appendOp(loop, topOfBlock) + + if (c.intervalLow&0xff000000) != 0 || (c.intervalUpper > 0 && (c.intervalUpper&0xff000000) != 0) { + c.error(NumberTooBig) + } + + if c.intervalLow > c.intervalUpper && c.intervalUpper != -1 { + c.error(MaxLtMin) + } +} + +func (c *compiler) scanNamedChar() rune { + c.nextChar(&c.c) + if c.c.char != chLBrace { + c.error(PropertySyntax) + return 0 + } + + var charName []rune + for { + c.nextChar(&c.c) + if c.c.char == chRBrace { + break + } + if c.c.char == -1 { + c.error(PropertySyntax) + return 0 + } + charName = append(charName, c.c.char) + } + + if !isInvariantUString(charName) { + // All Unicode character names have only invariant characters. + // The API to get a character, given a name, accepts only char *, forcing us to convert, + // which requires this error check + c.error(PropertySyntax) + return 0 + } + + theChar := unames.CharForName(unames.UnicodeCharName, string(charName)) + if c.err != nil { + c.error(PropertySyntax) + } + + c.nextChar(&c.c) // Continue overall regex pattern processing with char after the '}' + return theChar +} + +func isInvariantUString(name []rune) bool { + for _, c := range name { + /* + * no assertions here because these functions are legitimately called + * for strings with variant characters + */ + if !ucharIsInvariant(c) { + return false /* found a variant char */ + } + } + return true +} + +var invariantChars = [...]uint32{ + 0xfffffbff, /* 00..1f but not 0a */ + 0xffffffe5, /* 20..3f but not 21 23 24 */ + 0x87fffffe, /* 40..5f but not 40 5b..5e */ + 0x87fffffe, /* 60..7f but not 60 7b..7e */ +} + +func ucharIsInvariant(c rune) bool { + return c <= 0x7f && (invariantChars[(c)>>5]&(uint32(1)<<(c&0x1f))) != 0 +} + +func (c *compiler) setPushOp(op setOperation) { + c.setEval(op) + c.setOpStack = append(c.setOpStack, op) + c.setStack = append(c.setStack, uset.New()) +} + +func (c *compiler) setEval(nextOp setOperation) { + var rightOperand *uset.UnicodeSet + var leftOperand *uset.UnicodeSet + + for { + pendingSetOp := c.setOpStack[len(c.setOpStack)-1] + if (pendingSetOp & 0xffff0000) < (nextOp & 0xffff0000) { + break + } + + c.setOpStack = c.setOpStack[:len(c.setOpStack)-1] + rightOperand = c.setStack[len(c.setStack)-1] + + switch pendingSetOp { + case setNegation: + rightOperand.Complement() + + case setCaseClose: + rightOperand.CloseOver(uset.CaseInsensitive) + + case setDifference1, setDifference2: + c.setStack = c.setStack[:len(c.setStack)-1] + leftOperand = c.setStack[len(c.setStack)-1] + leftOperand.RemoveAll(rightOperand) + + case setIntersection1, setIntersection2: + c.setStack = c.setStack[:len(c.setStack)-1] + leftOperand = c.setStack[len(c.setStack)-1] + leftOperand.RetainAll(rightOperand) + + case setUnion: + c.setStack = c.setStack[:len(c.setStack)-1] + leftOperand = c.setStack[len(c.setStack)-1] + leftOperand.AddAll(rightOperand) + + default: + panic("unreachable") + } + } +} + +func safeIncrement(val int32, delta int) int32 { + if delta <= math.MaxInt32 && math.MaxInt32-val > int32(delta) { + return val + int32(delta) + } + return math.MaxInt32 +} + +func (c *compiler) minMatchLength(start, end int) int32 { + if c.err != nil { + return 0 + } + + var loc int + var currentLen int32 + + // forwardedLength is a vector holding minimum-match-length values that + // are propagated forward in the pattern by JMP or STATE_SAVE operations. + // It must be one longer than the pattern being checked because some ops + // will jmp to a end-of-block+1 location from within a block, and we must + // count those when checking the block. + forwardedLength := make([]int32, end+2) + for i := range forwardedLength { + forwardedLength[i] = math.MaxInt32 + } + + for loc = start; loc <= end; loc++ { + op := c.out.compiledPat[loc] + opType := op.typ() + + // The loop is advancing linearly through the pattern. + // If the op we are now at was the destination of a branch in the pattern, + // and that path has a shorter minimum length than the current accumulated value, + // replace the current accumulated value. + // no-match-possible cases. + if forwardedLength[loc] < currentLen { + currentLen = forwardedLength[loc] + } + + switch opType { + // Ops that don't change the total length matched + case urxReservedOp, + urxEnd, + urxStringLen, + urxNop, + urxStartCapture, + urxEndCapture, + urxBackslashB, + urxBackslashBu, + urxBackslashG, + urxBackslashZ, + urxCaret, + urxDollar, + urxDollarM, + urxDollarD, + urxDollarMd, + urxRelocOprnd, + urxStoInpLoc, + urxCaretM, + urxCaretMUnix, + urxBackref, // BackRef. Must assume that it might be a zero length match + urxBackrefI, + urxStoSp, // Setup for atomic or possessive blocks. Doesn't change what can match. + urxLdSp, + urxJmpSav, + urxJmpSavX: + // no-op + + // Ops that match a minimum of one character (one or two 16 bit code units.) + // + case urxOnechar, + urxStaticSetref, + urxStatSetrefN, + urxSetref, + urxBackslashD, + urxBackslashH, + urxBackslashR, + urxBackslashV, + urcOnecharI, + urxBackslashX, // Grapheme Cluster. Minimum is 1, max unbounded. + urxDotanyAll, // . matches one or two. + urxDotany, + urxDotanyUnix: + currentLen = safeIncrement(currentLen, 1) + + case urxJmpx: + loc++ // URX_JMPX has an extra operand, ignored here, otherwise processed identically to URX_JMP. + fallthrough + + case urxJmp: + jmpDest := op.value() + if jmpDest < loc { + // Loop of some kind. Can safely ignore, the worst that will happen + // is that we understate the true minimum length + currentLen = forwardedLength[loc+1] + } else { + // Forward jump. Propagate the current min length to the target loc of the jump. + if forwardedLength[jmpDest] > currentLen { + forwardedLength[jmpDest] = currentLen + } + } + + case urxBacktrack: + // Back-tracks are kind of like a branch, except that the min length was + // propagated already, by the state save. + currentLen = forwardedLength[loc+1] + + case urxStateSave: + // State Save, for forward jumps, propagate the current minimum. + // of the state save. + jmpDest := op.value() + if jmpDest > loc { + if currentLen < forwardedLength[jmpDest] { + forwardedLength[jmpDest] = currentLen + } + } + + case urxString: + loc++ + stringLenOp := c.out.compiledPat[loc] + currentLen = safeIncrement(currentLen, stringLenOp.value()) + + case urxStringI: + loc++ + // TODO: with full case folding, matching input text may be shorter than + // the string we have here. More smarts could put some bounds on it. + // Assume a min length of one for now. A min length of zero causes + // optimization failures for a pattern like "string"+ + // currentLen += URX_VAL(stringLenOp); + currentLen = safeIncrement(currentLen, 1) + + case urxCtrInit, urxCtrInitNg: + // Loop Init Ops. + // If the min loop count == 0 + // move loc forwards to the end of the loop, skipping over the body. + // If the min count is > 0, + // continue normal processing of the body of the loop. + loopEndOp := c.out.compiledPat[loc+1] + loopEndLoc := loopEndOp.value() + minLoopCount := c.out.compiledPat[loc+2] + if minLoopCount == 0 { + loc = loopEndLoc + } else { + loc += 3 // Skips over operands of CTR_INIT + } + + case utxCtrLoop, urxCtrLoopNg: + // Loop ops. The jump is conditional, backwards only. + + case urxLoopSrI, urxLoopDotI, urxLoopC: + // More loop ops. These state-save to themselves. don't change the minimum match - could match nothing at all. + + case urxLaStart, urxLbStart: + // Look-around. Scan forward until the matching look-ahead end, + // without processing the look-around block. This is overly pessimistic for look-ahead, + // it assumes that the look-ahead match might be zero-length. + // TODO: Positive lookahead could recursively do the block, then continue + // with the longer of the block or the value coming in. Ticket 6060 + var depth int32 + if opType == urxLaStart { + depth = 2 + } else { + depth = 1 + } + + for { + loc++ + op = c.out.compiledPat[loc] + if op.typ() == urxLaStart { + // The boilerplate for look-ahead includes two LA_END instructions, + // Depth will be decremented by each one when it is seen. + depth += 2 + } + if op.typ() == urxLbStart { + depth++ + } + if op.typ() == urxLaEnd { + depth-- + if depth == 0 { + break + } + } + if op.typ() == urxLbnEnd { + depth-- + if depth == 0 { + break + } + } + if op.typ() == urxStateSave { + // Need this because neg lookahead blocks will FAIL to outside of the block. + jmpDest := op.value() + if jmpDest > loc { + if currentLen < forwardedLength[jmpDest] { + forwardedLength[jmpDest] = currentLen + } + } + } + } + + case urxLaEnd, urxLbCont, urxLbEnd, urxLbnCount, urxLbnEnd: + // Only come here if the matching URX_LA_START or URX_LB_START was not in the + // range being sized, which happens when measuring size of look-behind blocks. + + default: + panic("unreachable") + } + } + + // We have finished walking through the ops. Check whether some forward jump + // propagated a shorter length to location end+1. + if forwardedLength[end+1] < currentLen { + currentLen = forwardedLength[end+1] + } + + return currentLen +} + +func (c *compiler) maxMatchLength(start, end int) int32 { + if c.err != nil { + return 0 + } + var loc int + var currentLen int32 + + forwardedLength := make([]int32, end+1) + + for loc = start; loc <= end; loc++ { + op := c.out.compiledPat[loc] + opType := op.typ() + + // The loop is advancing linearly through the pattern. + // If the op we are now at was the destination of a branch in the pattern, + // and that path has a longer maximum length than the current accumulated value, + // replace the current accumulated value. + if forwardedLength[loc] > currentLen { + currentLen = forwardedLength[loc] + } + + switch opType { + // Ops that don't change the total length matched + case urxReservedOp, + urxEnd, + urxStringLen, + urxNop, + urxStartCapture, + urxEndCapture, + urxBackslashB, + urxBackslashBu, + urxBackslashG, + urxBackslashZ, + urxCaret, + urxDollar, + urxDollarM, + urxDollarD, + urxDollarMd, + urxRelocOprnd, + urxStoInpLoc, + urxCaretM, + urxCaretMUnix, + urxStoSp, // Setup for atomic or possessive blocks. Doesn't change what can match. + urxLdSp, + urxLbEnd, + urxLbCont, + urxLbnCount, + urxLbnEnd: + // no-op + + // Ops that increase that cause an unbounded increase in the length + // of a matched string, or that increase it a hard to characterize way. + // Call the max length unbounded, and stop further checking. + case urxBackref, // BackRef. Must assume that it might be a zero length match + urxBackrefI, + urxBackslashX: // Grapheme Cluster. Minimum is 1, max unbounded. + currentLen = math.MaxInt32 + + // Ops that match a max of one character (possibly two 16 bit code units.) + // + case urxStaticSetref, + urxStatSetrefN, + urxSetref, + urxBackslashD, + urxBackslashH, + urxBackslashR, + urxBackslashV, + urcOnecharI, + urxDotanyAll, + urxDotany, + urxDotanyUnix: + currentLen = safeIncrement(currentLen, 2) + + // Single literal character. Increase current max length by one or two, + // depending on whether the char is in the supplementary range. + case urxOnechar: + currentLen = safeIncrement(currentLen, 1) + if op.value() > 0x10000 { + currentLen = safeIncrement(currentLen, 1) + } + + // Jumps. + // + case urxJmp, urxJmpx, urxJmpSav, urxJmpSavX: + jmpDest := op.value() + if jmpDest < loc { + // Loop of some kind. Max match length is unbounded. + currentLen = math.MaxInt32 + } else { + // Forward jump. Propagate the current min length to the target loc of the jump. + if forwardedLength[jmpDest] < currentLen { + forwardedLength[jmpDest] = currentLen + } + currentLen = 0 + } + + case urxBacktrack: + // back-tracks are kind of like a branch, except that the max length was + // propagated already, by the state save. + currentLen = forwardedLength[loc+1] + + case urxStateSave: + // State Save, for forward jumps, propagate the current minimum. + // of the state save. + // For backwards jumps, they create a loop, maximum + // match length is unbounded. + jmpDest := op.value() + if jmpDest > loc { + if currentLen > forwardedLength[jmpDest] { + forwardedLength[jmpDest] = currentLen + } + } else { + currentLen = math.MaxInt32 + } + + case urxString: + loc++ + stringLenOp := c.out.compiledPat[loc] + currentLen = safeIncrement(currentLen, stringLenOp.value()) + + case urxStringI: + // TODO: This code assumes that any user string that matches will be no longer + // than our compiled string, with case insensitive matching. + // Our compiled string has been case-folded already. + // + // Any matching user string will have no more code points than our + // compiled (folded) string. Folding may add code points, but + // not remove them. + // + // There is a potential problem if a supplemental code point + // case-folds to a BMP code point. In this case our compiled string + // could be shorter (in code units) than a matching user string. + // + // At this time (Unicode 6.1) there are no such characters, and this case + // is not being handled. A test, intltest regex/Bug9283, will fail if + // any problematic characters are added to Unicode. + // + // If this happens, we can make a set of the BMP chars that the + // troublesome supplementals fold to, scan our string, and bump the + // currentLen one extra for each that is found. + // + loc++ + stringLenOp := c.out.compiledPat[loc] + currentLen = safeIncrement(currentLen, stringLenOp.value()) + + case urxCtrInit, urxCtrInitNg: + // For Loops, recursively call this function on the pattern for the loop body, + // then multiply the result by the maximum loop count. + loopEndLoc := c.out.compiledPat[loc+1].value() + if loopEndLoc == loc+4 { + // Loop has an empty body. No affect on max match length. + // Continue processing with code after the loop end. + loc = loopEndLoc + break + } + + maxLoopCount := int(c.out.compiledPat[loc+3]) + if maxLoopCount == -1 { + // Unbounded Loop. No upper bound on match length. + currentLen = math.MaxInt32 + break + } + + blockLen := c.maxMatchLength(loc+4, loopEndLoc-1) // Recursive call. + updatedLen := int(currentLen) + int(blockLen)*maxLoopCount + if updatedLen >= math.MaxInt32 { + currentLen = math.MaxInt32 + break + } + currentLen = int32(updatedLen) + loc = loopEndLoc + + case utxCtrLoop, urxCtrLoopNg: + panic("should not encounter this opcode") + + case urxLoopSrI, urxLoopDotI, urxLoopC: + // For anything to do with loops, make the match length unbounded. + currentLen = math.MaxInt32 + + case urxLaStart, urxLaEnd: + // Look-ahead. Just ignore, treat the look-ahead block as if + // it were normal pattern. Gives a too-long match length, + // but good enough for now. + + case urxLbStart: + // Look-behind. Scan forward until the matching look-around end, + // without processing the look-behind block. + dataLoc := op.value() + for loc = loc + 1; loc <= end; loc++ { + op = c.out.compiledPat[loc] + if (op.typ() == urxLaEnd || op.typ() == urxLbnEnd) && (op.value() == dataLoc) { + break + } + } + + default: + panic("unreachable") + } + + if currentLen == math.MaxInt32 { + // The maximum length is unbounded. + // Stop further processing of the pattern. + break + } + } + + return currentLen +} + +// Machine Generated below. +// It may need updating with new versions of Unicode. +// Intltest test RegexTest::TestCaseInsensitiveStarters will fail if an update is needed. +// The update tool is here: +// svn+ssh://source.icu-project.org/repos/icu/tools/trunk/unicode/c/genregexcasing + +// Machine Generated Data. Do not hand edit. +var reCaseFixCodePoints = [...]rune{ + 0x61, 0x66, 0x68, 0x69, 0x6a, 0x73, 0x74, 0x77, 0x79, 0x2bc, + 0x3ac, 0x3ae, 0x3b1, 0x3b7, 0x3b9, 0x3c1, 0x3c5, 0x3c9, 0x3ce, 0x565, + 0x574, 0x57e, 0x1f00, 0x1f01, 0x1f02, 0x1f03, 0x1f04, 0x1f05, 0x1f06, 0x1f07, + 0x1f20, 0x1f21, 0x1f22, 0x1f23, 0x1f24, 0x1f25, 0x1f26, 0x1f27, 0x1f60, 0x1f61, + 0x1f62, 0x1f63, 0x1f64, 0x1f65, 0x1f66, 0x1f67, 0x1f70, 0x1f74, 0x1f7c, 0x110000} + +var reCaseFixStringOffsets = [...]int16{ + 0x0, 0x1, 0x6, 0x7, 0x8, 0x9, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, + 0x17, 0x1b, 0x20, 0x21, 0x2a, 0x2e, 0x2f, 0x30, 0x34, 0x35, 0x37, 0x39, 0x3b, + 0x3d, 0x3f, 0x41, 0x43, 0x45, 0x47, 0x49, 0x4b, 0x4d, 0x4f, 0x51, 0x53, 0x55, + 0x57, 0x59, 0x5b, 0x5d, 0x5f, 0x61, 0x63, 0x65, 0x66, 0x67, 0} + +var reCaseFixCounts = [...]int16{ + 0x1, 0x5, 0x1, 0x1, 0x1, 0x4, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x4, 0x4, 0x5, 0x1, 0x9, + 0x4, 0x1, 0x1, 0x4, 0x1, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, + 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x1, 0x1, 0x1, 0} + +var reCaseFixData = [...]uint16{ + 0x1e9a, 0xfb00, 0xfb01, 0xfb02, 0xfb03, 0xfb04, 0x1e96, 0x130, 0x1f0, 0xdf, 0x1e9e, 0xfb05, + 0xfb06, 0x1e97, 0x1e98, 0x1e99, 0x149, 0x1fb4, 0x1fc4, 0x1fb3, 0x1fb6, 0x1fb7, 0x1fbc, 0x1fc3, + 0x1fc6, 0x1fc7, 0x1fcc, 0x390, 0x1fd2, 0x1fd3, 0x1fd6, 0x1fd7, 0x1fe4, 0x3b0, 0x1f50, 0x1f52, + 0x1f54, 0x1f56, 0x1fe2, 0x1fe3, 0x1fe6, 0x1fe7, 0x1ff3, 0x1ff6, 0x1ff7, 0x1ffc, 0x1ff4, 0x587, + 0xfb13, 0xfb14, 0xfb15, 0xfb17, 0xfb16, 0x1f80, 0x1f88, 0x1f81, 0x1f89, 0x1f82, 0x1f8a, 0x1f83, + 0x1f8b, 0x1f84, 0x1f8c, 0x1f85, 0x1f8d, 0x1f86, 0x1f8e, 0x1f87, 0x1f8f, 0x1f90, 0x1f98, 0x1f91, + 0x1f99, 0x1f92, 0x1f9a, 0x1f93, 0x1f9b, 0x1f94, 0x1f9c, 0x1f95, 0x1f9d, 0x1f96, 0x1f9e, 0x1f97, + 0x1f9f, 0x1fa0, 0x1fa8, 0x1fa1, 0x1fa9, 0x1fa2, 0x1faa, 0x1fa3, 0x1fab, 0x1fa4, 0x1fac, 0x1fa5, + 0x1fad, 0x1fa6, 0x1fae, 0x1fa7, 0x1faf, 0x1fb2, 0x1fc2, 0x1ff2, 0} + +func (c *compiler) findCaseInsensitiveStarters(ch rune, starterChars *uset.UnicodeSet) { + if uprops.HasBinaryProperty(ch, uprops.UCharCaseSensitive) { + caseFoldedC := ucase.Fold(ch) + starterChars.Clear() + starterChars.AddRune(caseFoldedC) + + var i int + for i = 0; reCaseFixCodePoints[i] < ch; i++ { + // Simple linear search through the sorted list of interesting code points. + } + + if reCaseFixCodePoints[i] == ch { + data := reCaseFixData[reCaseFixStringOffsets[i]:] + numCharsToAdd := reCaseFixCounts[i] + for j := int16(0); j < numCharsToAdd; j++ { + var cpToAdd rune + cpToAdd, data = utf16.NextUnsafe(data) + starterChars.AddRune(cpToAdd) + } + } + + starterChars.CloseOver(uset.CaseInsensitive) + } else { + // Not a cased character. Just return it alone. + starterChars.Clear() + starterChars.AddRune(ch) + } +} + +func (c *compiler) scanProp() *uset.UnicodeSet { + if c.err != nil { + return nil + } + negated := c.c.char == chP + + c.nextChar(&c.c) + if c.c.char != chLBrace { + c.error(PropertySyntax) + return nil + } + + var propertyName strings.Builder + for { + c.nextChar(&c.c) + if c.c.char == chRBrace { + break + } + if c.c.char == -1 { + c.error(PropertySyntax) + return nil + } + propertyName.WriteRune(c.c.char) + } + + ss := c.createSetForProperty(propertyName.String(), negated) + c.nextChar(&c.c) + return ss +} + +func (c *compiler) createSetForProperty(propName string, negated bool) *uset.UnicodeSet { + if c.err != nil { + return nil + } + + var set *uset.UnicodeSet + + var usetFlags uset.USet + if c.modeFlags&CaseInsensitive != 0 { + usetFlags |= uset.CaseInsensitive + } + + var err error + set, err = uprops.NewUnicodeSetFomPattern("\\p{"+propName+"}", usetFlags) + if err == nil { + goto done + } + + // + // The incoming property wasn't directly recognized by ICU. + + // Check [:word:] and [:all:]. These are not recognized as a properties by ICU UnicodeSet. + // Java accepts 'word' with mixed case. + // Java accepts 'all' only in all lower case. + if strings.EqualFold(propName, "word") { + set = staticPropertySets[urxIswordSet].Clone() + goto done + } + if propName == "all" { + set = uset.New() + set.AddRuneRange(0, 0x10ffff) + goto done + } + + // Do Java InBlock expressions + // + if strings.HasPrefix(propName, "In") && len(propName) >= 3 { + set = uset.New() + if uprops.ApplyPropertyAlias(set, "Block", propName[2:]) != nil { + c.error(PropertySyntax) + } + goto done + } + + // Check for the Java form "IsBooleanPropertyValue", which we will recast + // as "BooleanPropertyValue". The property value can be either a + // a General Category or a Script Name. + if strings.HasPrefix(propName, "Is") && len(propName) >= 3 { + mPropName := propName[2:] + if strings.IndexByte(mPropName, '=') >= 0 { + c.error(PropertySyntax) + goto done + } + + if strings.EqualFold(mPropName, "assigned") { + mPropName = "unassigned" + negated = !negated + } else if strings.EqualFold(mPropName, "TitleCase") { + mPropName = "Titlecase_Letter" + } + + set, err = uprops.NewUnicodeSetFomPattern("\\p{"+mPropName+"}", 0) + if err != nil { + c.error(PropertySyntax) + } else if !set.IsEmpty() && (usetFlags&uset.CaseInsensitive) != 0 { + set.CloseOver(uset.CaseInsensitive) + } + goto done + } + + if strings.HasPrefix(propName, "java") { + set = uset.New() + + // + // Try the various Java specific properties. + // These all begin with "java" + // + if propName == "javaDefined" { + c.err = uprops.AddCategory(set, uchar.GcCnMask) + set.Complement() + } else if propName == "javaDigit" { + c.err = uprops.AddCategory(set, uchar.GcNdMask) + } else if propName == "javaIdentifierIgnorable" { + c.err = addIdentifierIgnorable(set) + } else if propName == "javaISOControl" { + set.AddRuneRange(0, 0x1F) + set.AddRuneRange(0x7F, 0x9F) + } else if propName == "javaJavaIdentifierPart" { + c.err = uprops.AddCategory(set, uchar.GcLMask) + if c.err == nil { + c.err = uprops.AddCategory(set, uchar.GcScMask) + } + if c.err == nil { + c.err = uprops.AddCategory(set, uchar.GcPcMask) + } + if c.err == nil { + c.err = uprops.AddCategory(set, uchar.GcNdMask) + } + if c.err == nil { + c.err = uprops.AddCategory(set, uchar.GcNlMask) + } + if c.err == nil { + c.err = uprops.AddCategory(set, uchar.GcMcMask) + } + if c.err == nil { + c.err = uprops.AddCategory(set, uchar.GcMnMask) + } + if c.err == nil { + c.err = addIdentifierIgnorable(set) + } + } else if propName == "javaJavaIdentifierStart" { + c.err = uprops.AddCategory(set, uchar.GcLMask) + if c.err == nil { + c.err = uprops.AddCategory(set, uchar.GcNlMask) + } + if c.err == nil { + c.err = uprops.AddCategory(set, uchar.GcScMask) + } + if c.err == nil { + c.err = uprops.AddCategory(set, uchar.GcPcMask) + } + } else if propName == "javaLetter" { + c.err = uprops.AddCategory(set, uchar.GcLMask) + } else if propName == "javaLetterOrDigit" { + c.err = uprops.AddCategory(set, uchar.GcLMask) + if c.err == nil { + c.err = uprops.AddCategory(set, uchar.GcNdMask) + } + } else if propName == "javaLowerCase" { + c.err = uprops.AddCategory(set, uchar.GcLlMask) + } else if propName == "javaMirrored" { + c.err = uprops.ApplyIntPropertyValue(set, uprops.UCharBidiMirrored, 1) + } else if propName == "javaSpaceChar" { + c.err = uprops.AddCategory(set, uchar.GcZMask) + } else if propName == "javaSupplementaryCodePoint" { + set.AddRuneRange(0x10000, uset.MaxValue) + } else if propName == "javaTitleCase" { + c.err = uprops.AddCategory(set, uchar.GcLtMask) + } else if propName == "javaUnicodeIdentifierStart" { + c.err = uprops.AddCategory(set, uchar.GcLMask) + if c.err == nil { + c.err = uprops.AddCategory(set, uchar.GcNlMask) + } + } else if propName == "javaUnicodeIdentifierPart" { + c.err = uprops.AddCategory(set, uchar.GcLMask) + if c.err == nil { + c.err = uprops.AddCategory(set, uchar.GcPcMask) + } + if c.err == nil { + c.err = uprops.AddCategory(set, uchar.GcNdMask) + } + if c.err == nil { + c.err = uprops.AddCategory(set, uchar.GcNlMask) + } + if c.err == nil { + c.err = uprops.AddCategory(set, uchar.GcMcMask) + } + if c.err == nil { + c.err = uprops.AddCategory(set, uchar.GcMnMask) + } + if c.err == nil { + c.err = addIdentifierIgnorable(set) + } + } else if propName == "javaUpperCase" { + c.err = uprops.AddCategory(set, uchar.GcLuMask) + } else if propName == "javaValidCodePoint" { + set.AddRuneRange(0, uset.MaxValue) + } else if propName == "javaWhitespace" { + c.err = uprops.AddCategory(set, uchar.GcZMask) + excl := uset.New() + excl.AddRune(0x0a) + excl.AddRune(0x2007) + excl.AddRune(0x202f) + set.RemoveAll(excl) + set.AddRuneRange(9, 0x0d) + set.AddRuneRange(0x1c, 0x1f) + } else { + c.error(PropertySyntax) + } + + if c.err == nil && !set.IsEmpty() && (usetFlags&uset.CaseInsensitive) != 0 { + set.CloseOver(uset.CaseInsensitive) + } + goto done + } + + // Unrecognized property. ICU didn't like it as it was, and none of the Java compatibility + // extensions matched it. + c.error(PropertySyntax) + +done: + if c.err != nil { + return nil + } + if negated { + set.Complement() + } + return set +} + +func addIdentifierIgnorable(set *uset.UnicodeSet) error { + set.AddRuneRange(0, 8) + set.AddRuneRange(0x0e, 0x1b) + set.AddRuneRange(0x7f, 0x9f) + + return uprops.AddCategory(set, uchar.GcCfMask) +} + +func (c *compiler) scanPosixProp() *uset.UnicodeSet { + var set *uset.UnicodeSet + + if !(c.c.char == chColon) { + panic("assertion failed: c.lastChar == ':'") + } + + savedScanIndex := c.scanIndex + savedScanPattern := c.p + savedQuoteMode := c.quoteMode + savedInBackslashQuote := c.inBackslashQuote + savedEOLComments := c.eolComments + savedLineNum := c.lineNum + savedCharNum := c.charNum + savedLastChar := c.lastChar + savedPeekChar := c.peekChar + savedC := c.c + + // Scan for a closing ]. A little tricky because there are some perverse + // edge cases possible. "[:abc\Qdef:] \E]" is a valid non-property expression, + // ending on the second closing ]. + var propName []rune + negated := false + + // Check for and consume the '^' in a negated POSIX property, e.g. [:^Letter:] + c.nextChar(&c.c) + if c.c.char == chUp { + negated = true + c.nextChar(&c.c) + } + + // Scan for the closing ":]", collecting the property name along the way. + sawPropSetTerminator := false + for { + propName = append(propName, c.c.char) + c.nextChar(&c.c) + if c.c.quoted || c.c.char == -1 { + // Escaped characters or end of input - either says this isn't a [:Property:] + break + } + if c.c.char == chColon { + c.nextChar(&c.c) + if c.c.char == chRBracket { + sawPropSetTerminator = true + break + } + } + } + + if sawPropSetTerminator { + set = c.createSetForProperty(string(propName), negated) + } else { + // No closing ']' - not a [:Property:] + // Restore the original scan position. + // The main scanner will retry the input as a normal set expression, + // not a [:Property:] expression. + c.scanIndex = savedScanIndex + c.p = savedScanPattern + c.quoteMode = savedQuoteMode + c.inBackslashQuote = savedInBackslashQuote + c.eolComments = savedEOLComments + c.lineNum = savedLineNum + c.charNum = savedCharNum + c.lastChar = savedLastChar + c.peekChar = savedPeekChar + c.c = savedC + } + + return set +} + +func (c *compiler) compileSet(set *uset.UnicodeSet) { + if set == nil { + return + } + // Remove any strings from the set. + // There shoudn't be any, but just in case. + // (Case Closure can add them; if we had a simple case closure available that + // ignored strings, that would be better.) + setSize := set.Len() + + switch setSize { + case 0: + // Set of no elements. Always fails to match. + c.appendOp(urxBacktrack, 0) + + case 1: + // The set contains only a single code point. Put it into + // the compiled pattern as a single char operation rather + // than a set, and discard the set itself. + c.literalChar(set.RuneAt(0)) + + default: + // The set contains two or more chars. (the normal case) + // Put it into the compiled pattern as a set. + // theSet->freeze(); + setNumber := len(c.out.sets) + c.out.sets = append(c.out.sets, set) + c.appendOp(urxSetref, setNumber) + } +} diff --git a/go/mysql/icuregex/compiler_table.go b/go/mysql/icuregex/compiler_table.go new file mode 100644 index 00000000000..e8cfe0d5e55 --- /dev/null +++ b/go/mysql/icuregex/compiler_table.go @@ -0,0 +1,357 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package icuregex + +type patternParseAction uint8 + +const ( + doSetBackslashD patternParseAction = iota + doBackslashh + doBackslashH + doSetLiteralEscaped + doOpenLookAheadNeg + doCompleteNamedBackRef + doPatStart + doBackslashS + doBackslashD + doNGStar + doNOP + doBackslashX + doSetLiteral + doContinueNamedCapture + doBackslashG + doBackslashR + doSetBegin + doSetBackslashv + doPossessivePlus + doPerlInline + doBackslashZ + doSetAddAmp + doSetBeginDifference1 + doIntervalError + doSetNegate + doIntervalInit + doSetIntersection2 + doPossessiveInterval + doRuleError + doBackslashW + doContinueNamedBackRef + doOpenNonCaptureParen + doExit + doSetNamedChar + doSetBackslashV + doConditionalExpr + doEscapeError + doBadOpenParenType + doPossessiveStar + doSetAddDash + doEscapedLiteralChar + doSetBackslashw + doIntervalUpperDigit + doBackslashv + doSetBackslashS + doSetNoCloseError + doSetProp + doBackslashB + doSetEnd + doSetRange + doMatchModeParen + doPlus + doBackslashV + doSetMatchMode + doBackslashz + doSetNamedRange + doOpenLookBehindNeg + doInterval + doBadNamedCapture + doBeginMatchMode + doBackslashd + doPatFinish + doNamedChar + doNGPlus + doSetDifference2 + doSetBackslashH + doCloseParen + doDotAny + doOpenCaptureParen + doEnterQuoteMode + doOpenAtomicParen + doBadModeFlag + doSetBackslashd + doSetFinish + doProperty + doBeginNamedBackRef + doBackRef + doOpt + doDollar + doBeginNamedCapture + doNGInterval + doSetOpError + doSetPosixProp + doSetBeginIntersection1 + doBackslashb + doSetBeginUnion + doIntevalLowerDigit + doSetBackslashh + doStar + doMatchMode + doBackslashA + doOpenLookBehind + doPossessiveOpt + doOrOperator + doBackslashw + doBackslashs + doLiteralChar + doSuppressComments + doCaret + doIntervalSame + doNGOpt + doOpenLookAhead + doSetBackslashW + doMismatchedParenErr + doSetBackslashs + rbbiLastAction +) + +// ------------------------------------------------------------------------------- +// +// RegexTableEl represents the structure of a row in the transition table +// for the pattern parser state machine. +// +// ------------------------------------------------------------------------------- +type regexTableEl struct { + action patternParseAction + charClass uint8 + nextState uint8 + pushState uint8 + nextChar bool +} + +var parseStateTable = []regexTableEl{ + {doNOP, 0, 0, 0, true}, + {doPatStart, 255, 2, 0, false}, // 1 start + {doLiteralChar, 254, 14, 0, true}, // 2 term + {doLiteralChar, 130, 14, 0, true}, // 3 + {doSetBegin, 91 /* [ */, 123, 205, true}, // 4 + {doNOP, 40 /* ( */, 27, 0, true}, // 5 + {doDotAny, 46 /* . */, 14, 0, true}, // 6 + {doCaret, 94 /* ^ */, 14, 0, true}, // 7 + {doDollar, 36 /* $ */, 14, 0, true}, // 8 + {doNOP, 92 /* \ */, 89, 0, true}, // 9 + {doOrOperator, 124 /* | */, 2, 0, true}, // 10 + {doCloseParen, 41 /* ) */, 255, 0, true}, // 11 + {doPatFinish, 253, 2, 0, false}, // 12 + {doRuleError, 255, 206, 0, false}, // 13 + {doNOP, 42 /* * */, 68, 0, true}, // 14 expr-quant + {doNOP, 43 /* + */, 71, 0, true}, // 15 + {doNOP, 63 /* ? */, 74, 0, true}, // 16 + {doIntervalInit, 123 /* { */, 77, 0, true}, // 17 + {doNOP, 40 /* ( */, 23, 0, true}, // 18 + {doNOP, 255, 20, 0, false}, // 19 + {doOrOperator, 124 /* | */, 2, 0, true}, // 20 expr-cont + {doCloseParen, 41 /* ) */, 255, 0, true}, // 21 + {doNOP, 255, 2, 0, false}, // 22 + {doSuppressComments, 63 /* ? */, 25, 0, true}, // 23 open-paren-quant + {doNOP, 255, 27, 0, false}, // 24 + {doNOP, 35 /* # */, 50, 14, true}, // 25 open-paren-quant2 + {doNOP, 255, 29, 0, false}, // 26 + {doSuppressComments, 63 /* ? */, 29, 0, true}, // 27 open-paren + {doOpenCaptureParen, 255, 2, 14, false}, // 28 + {doOpenNonCaptureParen, 58 /* : */, 2, 14, true}, // 29 open-paren-extended + {doOpenAtomicParen, 62 /* > */, 2, 14, true}, // 30 + {doOpenLookAhead, 61 /* = */, 2, 20, true}, // 31 + {doOpenLookAheadNeg, 33 /* ! */, 2, 20, true}, // 32 + {doNOP, 60 /* < */, 46, 0, true}, // 33 + {doNOP, 35 /* # */, 50, 2, true}, // 34 + {doBeginMatchMode, 105 /* i */, 53, 0, false}, // 35 + {doBeginMatchMode, 100 /* d */, 53, 0, false}, // 36 + {doBeginMatchMode, 109 /* m */, 53, 0, false}, // 37 + {doBeginMatchMode, 115 /* s */, 53, 0, false}, // 38 + {doBeginMatchMode, 117 /* u */, 53, 0, false}, // 39 + {doBeginMatchMode, 119 /* w */, 53, 0, false}, // 40 + {doBeginMatchMode, 120 /* x */, 53, 0, false}, // 41 + {doBeginMatchMode, 45 /* - */, 53, 0, false}, // 42 + {doConditionalExpr, 40 /* ( */, 206, 0, true}, // 43 + {doPerlInline, 123 /* { */, 206, 0, true}, // 44 + {doBadOpenParenType, 255, 206, 0, false}, // 45 + {doOpenLookBehind, 61 /* = */, 2, 20, true}, // 46 open-paren-lookbehind + {doOpenLookBehindNeg, 33 /* ! */, 2, 20, true}, // 47 + {doBeginNamedCapture, 129, 64, 0, false}, // 48 + {doBadOpenParenType, 255, 206, 0, false}, // 49 + {doNOP, 41 /* ) */, 255, 0, true}, // 50 paren-comment + {doMismatchedParenErr, 253, 206, 0, false}, // 51 + {doNOP, 255, 50, 0, true}, // 52 + {doMatchMode, 105 /* i */, 53, 0, true}, // 53 paren-flag + {doMatchMode, 100 /* d */, 53, 0, true}, // 54 + {doMatchMode, 109 /* m */, 53, 0, true}, // 55 + {doMatchMode, 115 /* s */, 53, 0, true}, // 56 + {doMatchMode, 117 /* u */, 53, 0, true}, // 57 + {doMatchMode, 119 /* w */, 53, 0, true}, // 58 + {doMatchMode, 120 /* x */, 53, 0, true}, // 59 + {doMatchMode, 45 /* - */, 53, 0, true}, // 60 + {doSetMatchMode, 41 /* ) */, 2, 0, true}, // 61 + {doMatchModeParen, 58 /* : */, 2, 14, true}, // 62 + {doBadModeFlag, 255, 206, 0, false}, // 63 + {doContinueNamedCapture, 129, 64, 0, true}, // 64 named-capture + {doContinueNamedCapture, 128, 64, 0, true}, // 65 + {doOpenCaptureParen, 62 /* > */, 2, 14, true}, // 66 + {doBadNamedCapture, 255, 206, 0, false}, // 67 + {doNGStar, 63 /* ? */, 20, 0, true}, // 68 quant-star + {doPossessiveStar, 43 /* + */, 20, 0, true}, // 69 + {doStar, 255, 20, 0, false}, // 70 + {doNGPlus, 63 /* ? */, 20, 0, true}, // 71 quant-plus + {doPossessivePlus, 43 /* + */, 20, 0, true}, // 72 + {doPlus, 255, 20, 0, false}, // 73 + {doNGOpt, 63 /* ? */, 20, 0, true}, // 74 quant-opt + {doPossessiveOpt, 43 /* + */, 20, 0, true}, // 75 + {doOpt, 255, 20, 0, false}, // 76 + {doNOP, 128, 79, 0, false}, // 77 interval-open + {doIntervalError, 255, 206, 0, false}, // 78 + {doIntevalLowerDigit, 128, 79, 0, true}, // 79 interval-lower + {doNOP, 44 /* , */, 83, 0, true}, // 80 + {doIntervalSame, 125 /* } */, 86, 0, true}, // 81 + {doIntervalError, 255, 206, 0, false}, // 82 + {doIntervalUpperDigit, 128, 83, 0, true}, // 83 interval-upper + {doNOP, 125 /* } */, 86, 0, true}, // 84 + {doIntervalError, 255, 206, 0, false}, // 85 + {doNGInterval, 63 /* ? */, 20, 0, true}, // 86 interval-type + {doPossessiveInterval, 43 /* + */, 20, 0, true}, // 87 + {doInterval, 255, 20, 0, false}, // 88 + {doBackslashA, 65 /* A */, 2, 0, true}, // 89 backslash + {doBackslashB, 66 /* B */, 2, 0, true}, // 90 + {doBackslashb, 98 /* b */, 2, 0, true}, // 91 + {doBackslashd, 100 /* d */, 14, 0, true}, // 92 + {doBackslashD, 68 /* D */, 14, 0, true}, // 93 + {doBackslashG, 71 /* G */, 2, 0, true}, // 94 + {doBackslashh, 104 /* h */, 14, 0, true}, // 95 + {doBackslashH, 72 /* H */, 14, 0, true}, // 96 + {doNOP, 107 /* k */, 115, 0, true}, // 97 + {doNamedChar, 78 /* N */, 14, 0, false}, // 98 + {doProperty, 112 /* p */, 14, 0, false}, // 99 + {doProperty, 80 /* P */, 14, 0, false}, // 100 + {doBackslashR, 82 /* R */, 14, 0, true}, // 101 + {doEnterQuoteMode, 81 /* Q */, 2, 0, true}, // 102 + {doBackslashS, 83 /* S */, 14, 0, true}, // 103 + {doBackslashs, 115 /* s */, 14, 0, true}, // 104 + {doBackslashv, 118 /* v */, 14, 0, true}, // 105 + {doBackslashV, 86 /* V */, 14, 0, true}, // 106 + {doBackslashW, 87 /* W */, 14, 0, true}, // 107 + {doBackslashw, 119 /* w */, 14, 0, true}, // 108 + {doBackslashX, 88 /* X */, 14, 0, true}, // 109 + {doBackslashZ, 90 /* Z */, 2, 0, true}, // 110 + {doBackslashz, 122 /* z */, 2, 0, true}, // 111 + {doBackRef, 128, 14, 0, true}, // 112 + {doEscapeError, 253, 206, 0, false}, // 113 + {doEscapedLiteralChar, 255, 14, 0, true}, // 114 + {doBeginNamedBackRef, 60 /* < */, 117, 0, true}, // 115 named-backref + {doBadNamedCapture, 255, 206, 0, false}, // 116 + {doContinueNamedBackRef, 129, 119, 0, true}, // 117 named-backref-2 + {doBadNamedCapture, 255, 206, 0, false}, // 118 + {doContinueNamedBackRef, 129, 119, 0, true}, // 119 named-backref-3 + {doContinueNamedBackRef, 128, 119, 0, true}, // 120 + {doCompleteNamedBackRef, 62 /* > */, 14, 0, true}, // 121 + {doBadNamedCapture, 255, 206, 0, false}, // 122 + {doSetNegate, 94 /* ^ */, 126, 0, true}, // 123 set-open + {doSetPosixProp, 58 /* : */, 128, 0, false}, // 124 + {doNOP, 255, 126, 0, false}, // 125 + {doSetLiteral, 93 /* ] */, 141, 0, true}, // 126 set-open2 + {doNOP, 255, 131, 0, false}, // 127 + {doSetEnd, 93 /* ] */, 255, 0, true}, // 128 set-posix + {doNOP, 58 /* : */, 131, 0, false}, // 129 + {doRuleError, 255, 206, 0, false}, // 130 + {doSetEnd, 93 /* ] */, 255, 0, true}, // 131 set-start + {doSetBeginUnion, 91 /* [ */, 123, 148, true}, // 132 + {doNOP, 92 /* \ */, 191, 0, true}, // 133 + {doNOP, 45 /* - */, 137, 0, true}, // 134 + {doNOP, 38 /* & */, 139, 0, true}, // 135 + {doSetLiteral, 255, 141, 0, true}, // 136 + {doRuleError, 45 /* - */, 206, 0, false}, // 137 set-start-dash + {doSetAddDash, 255, 141, 0, false}, // 138 + {doRuleError, 38 /* & */, 206, 0, false}, // 139 set-start-amp + {doSetAddAmp, 255, 141, 0, false}, // 140 + {doSetEnd, 93 /* ] */, 255, 0, true}, // 141 set-after-lit + {doSetBeginUnion, 91 /* [ */, 123, 148, true}, // 142 + {doNOP, 45 /* - */, 178, 0, true}, // 143 + {doNOP, 38 /* & */, 169, 0, true}, // 144 + {doNOP, 92 /* \ */, 191, 0, true}, // 145 + {doSetNoCloseError, 253, 206, 0, false}, // 146 + {doSetLiteral, 255, 141, 0, true}, // 147 + {doSetEnd, 93 /* ] */, 255, 0, true}, // 148 set-after-set + {doSetBeginUnion, 91 /* [ */, 123, 148, true}, // 149 + {doNOP, 45 /* - */, 171, 0, true}, // 150 + {doNOP, 38 /* & */, 166, 0, true}, // 151 + {doNOP, 92 /* \ */, 191, 0, true}, // 152 + {doSetNoCloseError, 253, 206, 0, false}, // 153 + {doSetLiteral, 255, 141, 0, true}, // 154 + {doSetEnd, 93 /* ] */, 255, 0, true}, // 155 set-after-range + {doSetBeginUnion, 91 /* [ */, 123, 148, true}, // 156 + {doNOP, 45 /* - */, 174, 0, true}, // 157 + {doNOP, 38 /* & */, 176, 0, true}, // 158 + {doNOP, 92 /* \ */, 191, 0, true}, // 159 + {doSetNoCloseError, 253, 206, 0, false}, // 160 + {doSetLiteral, 255, 141, 0, true}, // 161 + {doSetBeginUnion, 91 /* [ */, 123, 148, true}, // 162 set-after-op + {doSetOpError, 93 /* ] */, 206, 0, false}, // 163 + {doNOP, 92 /* \ */, 191, 0, true}, // 164 + {doSetLiteral, 255, 141, 0, true}, // 165 + {doSetBeginIntersection1, 91 /* [ */, 123, 148, true}, // 166 set-set-amp + {doSetIntersection2, 38 /* & */, 162, 0, true}, // 167 + {doSetAddAmp, 255, 141, 0, false}, // 168 + {doSetIntersection2, 38 /* & */, 162, 0, true}, // 169 set-lit-amp + {doSetAddAmp, 255, 141, 0, false}, // 170 + {doSetBeginDifference1, 91 /* [ */, 123, 148, true}, // 171 set-set-dash + {doSetDifference2, 45 /* - */, 162, 0, true}, // 172 + {doSetAddDash, 255, 141, 0, false}, // 173 + {doSetDifference2, 45 /* - */, 162, 0, true}, // 174 set-range-dash + {doSetAddDash, 255, 141, 0, false}, // 175 + {doSetIntersection2, 38 /* & */, 162, 0, true}, // 176 set-range-amp + {doSetAddAmp, 255, 141, 0, false}, // 177 + {doSetDifference2, 45 /* - */, 162, 0, true}, // 178 set-lit-dash + {doSetAddDash, 91 /* [ */, 141, 0, false}, // 179 + {doSetAddDash, 93 /* ] */, 141, 0, false}, // 180 + {doNOP, 92 /* \ */, 183, 0, true}, // 181 + {doSetRange, 255, 155, 0, true}, // 182 + {doSetOpError, 115 /* s */, 206, 0, false}, // 183 set-lit-dash-escape + {doSetOpError, 83 /* S */, 206, 0, false}, // 184 + {doSetOpError, 119 /* w */, 206, 0, false}, // 185 + {doSetOpError, 87 /* W */, 206, 0, false}, // 186 + {doSetOpError, 100 /* d */, 206, 0, false}, // 187 + {doSetOpError, 68 /* D */, 206, 0, false}, // 188 + {doSetNamedRange, 78 /* N */, 155, 0, false}, // 189 + {doSetRange, 255, 155, 0, true}, // 190 + {doSetProp, 112 /* p */, 148, 0, false}, // 191 set-escape + {doSetProp, 80 /* P */, 148, 0, false}, // 192 + {doSetNamedChar, 78 /* N */, 141, 0, false}, // 193 + {doSetBackslashs, 115 /* s */, 155, 0, true}, // 194 + {doSetBackslashS, 83 /* S */, 155, 0, true}, // 195 + {doSetBackslashw, 119 /* w */, 155, 0, true}, // 196 + {doSetBackslashW, 87 /* W */, 155, 0, true}, // 197 + {doSetBackslashd, 100 /* d */, 155, 0, true}, // 198 + {doSetBackslashD, 68 /* D */, 155, 0, true}, // 199 + {doSetBackslashh, 104 /* h */, 155, 0, true}, // 200 + {doSetBackslashH, 72 /* H */, 155, 0, true}, // 201 + {doSetBackslashv, 118 /* v */, 155, 0, true}, // 202 + {doSetBackslashV, 86 /* V */, 155, 0, true}, // 203 + {doSetLiteralEscaped, 255, 141, 0, true}, // 204 + {doSetFinish, 255, 14, 0, false}, // 205 set-finish + {doExit, 255, 206, 0, true}, // 206 errorDeath +} diff --git a/go/mysql/icuregex/debug.go b/go/mysql/icuregex/debug.go new file mode 100644 index 00000000000..92c43e704d7 --- /dev/null +++ b/go/mysql/icuregex/debug.go @@ -0,0 +1,151 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package icuregex + +import ( + "fmt" + "io" +) + +func (pat *Pattern) Dump(w io.Writer) { + fmt.Fprintf(w, "Original Pattern: \"%s\"\n", pat.pattern) + fmt.Fprintf(w, " Min Match Length: %d\n", pat.minMatchLen) + fmt.Fprintf(w, " Match Start Type: %v\n", pat.startType) + if pat.startType == startString { + fmt.Fprintf(w, " Initial match string: \"%s\"\n", string(pat.literalText[pat.initialStringIdx:pat.initialStringIdx+pat.initialStringLen])) + } else if pat.startType == startSet { + fmt.Fprintf(w, " Match First Chars: %s\n", pat.initialChars.String()) + } else if pat.startType == startChar { + fmt.Fprintf(w, " First char of Match: ") + if pat.initialChar > 0x20 { + fmt.Fprintf(w, "'%c'\n", pat.initialChar) + } else { + fmt.Fprintf(w, "%#x\n", pat.initialChar) + } + } + + fmt.Fprintf(w, "Named Capture Groups:\n") + if len(pat.namedCaptureMap) == 0 { + fmt.Fprintf(w, " None\n") + } else { + for name, number := range pat.namedCaptureMap { + fmt.Fprintf(w, " %d\t%s\n", number, name) + } + } + + fmt.Fprintf(w, "\nIndex Binary Type Operand\n-------------------------------------------\n") + for idx := range pat.compiledPat { + pat.dumpOp(w, idx) + } + fmt.Fprintf(w, "\n\n") +} + +func (pat *Pattern) dumpOp(w io.Writer, index int) { + op := pat.compiledPat[index] + val := op.value() + opType := op.typ() + pinnedType := opType + if int(pinnedType) >= len(urxOpcodeNames) { + pinnedType = 0 + } + + fmt.Fprintf(w, "%4d %08x %-15s ", index, op, urxOpcodeNames[pinnedType]) + + switch opType { + case urxNop, + urxDotany, + urxDotanyAll, + urxFail, + urxCaret, + urxDollar, + urxBackslashG, + urxBackslashX, + urxEnd, + urxDollarM, + urxCaretM: + // Types with no operand field of interest. + + case urxReservedOp, + urxStartCapture, + urxEndCapture, + urxStateSave, + urxJmp, + urxJmpSav, + urxJmpSavX, + urxBackslashB, + urxBackslashBu, + urxBackslashD, + urxBackslashZ, + urxStringLen, + urxCtrInit, + urxCtrInitNg, + utxCtrLoop, + urxCtrLoopNg, + urxRelocOprnd, + urxStoSp, + urxLdSp, + urxBackref, + urxStoInpLoc, + urxJmpx, + urxLaStart, + urxLaEnd, + urxBackrefI, + urxLbStart, + urxLbCont, + urxLbEnd, + urxLbnCount, + urxLbnEnd, + urxLoopC, + urxLoopDotI, + urxBackslashH, + urxBackslashR, + urxBackslashV: + // types with an integer operand field. + fmt.Fprintf(w, "%d", val) + + case urxOnechar, urcOnecharI: + if val < 0x20 { + fmt.Fprintf(w, "%#x", val) + } else { + fmt.Fprintf(w, "'%c'", rune(val)) + } + + case urxString, urxStringI: + lengthOp := pat.compiledPat[index+1] + length := lengthOp.value() + fmt.Fprintf(w, "%q", string(pat.literalText[val:val+length])) + + case urxSetref, urxLoopSrI: + fmt.Fprintf(w, "%s", pat.sets[val].String()) + + case urxStaticSetref, urxStatSetrefN: + if (val & urxNegSet) != 0 { + fmt.Fprintf(w, "NOT ") + val &= ^urxNegSet + } + fmt.Fprintf(w, "%s", staticPropertySets[val].String()) + + default: + fmt.Fprintf(w, "??????") + } + fmt.Fprintf(w, "\n") +} diff --git a/go/mysql/icuregex/error.go b/go/mysql/icuregex/error.go new file mode 100644 index 00000000000..39c92399aa9 --- /dev/null +++ b/go/mysql/icuregex/error.go @@ -0,0 +1,152 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package icuregex + +import ( + "fmt" + "strings" +) + +type CompileError struct { + Code CompileErrorCode + Line int + Offset int + Context string +} + +func (e *CompileError) Error() string { + var out strings.Builder + switch e.Code { + case InternalError: + out.WriteString("Internal error") + case RuleSyntax: + out.WriteString("Syntax error") + case BadEscapeSequence: + out.WriteString("Bad escape sequence") + case PropertySyntax: + out.WriteString("Property syntax error") + case Unimplemented: + out.WriteString("Unimplemented") + case MismatchedParen: + out.WriteString("Mismatched parentheses") + case NumberTooBig: + out.WriteString("Number too big") + case BadInterval: + out.WriteString("Bad interval") + case MaxLtMin: + out.WriteString("Max less than min") + case InvalidBackRef: + out.WriteString("Invalid back reference") + case InvalidFlag: + out.WriteString("Invalid flag") + case LookBehindLimit: + out.WriteString("Look behind limit") + case MissingCloseBracket: + out.WriteString("Missing closing ]") + case InvalidRange: + out.WriteString("Invalid range") + case PatternTooBig: + out.WriteString("Pattern too big") + case InvalidCaptureGroupName: + out.WriteString("Invalid capture group name") + } + _, _ = fmt.Fprintf(&out, " in regular expression on line %d, character %d: `%s`", e.Line, e.Offset, e.Context) + + return out.String() +} + +type MatchError struct { + Code MatchErrorCode + Pattern string + Position int + Input []rune +} + +const maxMatchInputLength = 20 + +func (e *MatchError) Error() string { + var out strings.Builder + switch e.Code { + case StackOverflow: + out.WriteString("Stack overflow") + case TimeOut: + out.WriteString("Timeout") + case InternalMatchError: + out.WriteString("Internal error") + } + + input := e.Input + if len(input) > maxMatchInputLength { + var b []rune + start := e.Position - maxMatchInputLength/2 + if start < 0 { + start = 0 + } else { + b = append(b, '.', '.', '.') + } + end := start + maxMatchInputLength + trailing := true + if end > len(input) { + end = len(input) + trailing = false + } + b = append(b, input[start:end]...) + if trailing { + b = append(b, '.', '.', '.') + } + input = b + } + _, _ = fmt.Fprintf(&out, " for expression `%s` at position %d in: %q", e.Pattern, e.Position, string(input)) + + return out.String() +} + +type Code int32 + +type CompileErrorCode int32 + +const ( + InternalError CompileErrorCode = iota + 1 /**< An internal error (bug) was detected. */ + RuleSyntax /**< Syntax error in regexp pattern. */ + BadEscapeSequence /**< Unrecognized backslash escape sequence in pattern */ + PropertySyntax /**< Incorrect Unicode property */ + Unimplemented /**< Use of regexp feature that is not yet implemented. */ + MismatchedParen /**< Incorrectly nested parentheses in regexp pattern. */ + NumberTooBig /**< Decimal number is too large. */ + BadInterval /**< Error in {min,max} interval */ + MaxLtMin /**< In {min,max}, max is less than min. */ + InvalidBackRef /**< Back-reference to a non-existent capture group. */ + InvalidFlag /**< Invalid value for match mode flags. */ + LookBehindLimit /**< Look-Behind pattern matches must have a bounded maximum length. */ + MissingCloseBracket /**< Missing closing bracket on a bracket expression. */ + InvalidRange /**< In a character range [x-y], x is greater than y. */ + PatternTooBig /**< Pattern exceeds limits on size or complexity. @stable ICU 55 */ + InvalidCaptureGroupName /**< Invalid capture group name. @stable ICU 55 */ +) + +type MatchErrorCode int32 + +const ( + StackOverflow MatchErrorCode = iota /**< Regular expression backtrack stack overflow. */ + TimeOut /**< Maximum allowed match time exceeded */ + InternalMatchError /**< Internal error (bug) was detected. */ +) diff --git a/go/vt/vtgr/controller/error.go b/go/mysql/icuregex/errors/error.go similarity index 55% rename from go/vt/vtgr/controller/error.go rename to go/mysql/icuregex/errors/error.go index 5613c802524..f03a5157acf 100644 --- a/go/vt/vtgr/controller/error.go +++ b/go/mysql/icuregex/errors/error.go @@ -1,5 +1,10 @@ /* -Copyright 2021 The Vitess Authors. +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,12 +19,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -package controller +package errors import "errors" -var ( - errMissingPrimaryTablet = errors.New("no primary tablet available") - errMissingGroup = errors.New("no mysql group") - errForceAbortBootstrap = errors.New("force abort bootstrap") -) +var ErrIllegalArgument = errors.New("illegal argument") +var ErrUnsupported = errors.New("unsupported") diff --git a/go/mysql/icuregex/icu_test.go b/go/mysql/icuregex/icu_test.go new file mode 100644 index 00000000000..9e9be505df7 --- /dev/null +++ b/go/mysql/icuregex/icu_test.go @@ -0,0 +1,415 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package icuregex_test + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "regexp" + "strconv" + "strings" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql/icuregex" + "vitess.io/vitess/go/mysql/icuregex/internal/pattern" +) + +var ErrSkip = errors.New("ignored test") + +type Matcher int8 + +const ( + FuncFind Matcher = iota + FuncMatches + FuncLookingAt +) + +type Expectation int8 + +const ( + Unknown Expectation = iota + Expected + NotExpected +) + +type TestPattern struct { + Line string + Lineno int + + Pattern string + Flags icuregex.RegexpFlag + Options struct { + MatchFunc Matcher + FindCount int + MatchOnly bool + MustError bool + Dump bool + HitEnd Expectation + RequireEnd Expectation + } + Input string + Groups []TestGroup +} + +type TestGroup struct { + Start, End int +} + +var parsePattern = regexp.MustCompile(`<(/?)(r|[0-9]+)>`) + +func (tp *TestPattern) parseFlags(line string) (string, error) { + for len(line) > 0 { + switch line[0] { + case '"', '\'', '/': + return line, nil + case ' ', '\t': + case 'i': + tp.Flags |= icuregex.CaseInsensitive + case 'x': + tp.Flags |= icuregex.Comments + case 's': + tp.Flags |= icuregex.DotAll + case 'm': + tp.Flags |= icuregex.Multiline + case 'e': + tp.Flags |= icuregex.ErrorOnUnknownEscapes + case 'D': + tp.Flags |= icuregex.UnixLines + case 'Q': + tp.Flags |= icuregex.Literal + case '2', '3', '4', '5', '6', '7', '8', '9': + tp.Options.FindCount = int(line[0] - '0') + case 'G': + tp.Options.MatchOnly = true + case 'E': + tp.Options.MustError = true + case 'd': + tp.Options.Dump = true + case 'L': + tp.Options.MatchFunc = FuncLookingAt + case 'M': + tp.Options.MatchFunc = FuncMatches + case 'v': + tp.Options.MustError = !icuregex.BreakIteration + case 'a', 'b': + return "", ErrSkip + case 'z': + tp.Options.HitEnd = Expected + case 'Z': + tp.Options.HitEnd = NotExpected + case 'y': + tp.Options.RequireEnd = Expected + case 'Y': + tp.Options.RequireEnd = NotExpected + default: + return "", fmt.Errorf("unexpected modifier '%c'", line[0]) + } + line = line[1:] + } + return "", io.ErrUnexpectedEOF +} + +func (tp *TestPattern) parseMatch(orig string) error { + input, ok := pattern.Unescape(orig) + if !ok { + return fmt.Errorf("failed to unquote input: %s", orig) + } + + var detagged []rune + var last int + + m := parsePattern.FindAllStringSubmatchIndex(input, -1) + for _, g := range m { + detagged = append(detagged, []rune(input[last:g[0]])...) + last = g[1] + + closing := input[g[2]:g[3]] == "/" + groupNum := input[g[4]:g[5]] + if groupNum == "r" { + return ErrSkip + } + num, err := strconv.Atoi(groupNum) + if err != nil { + return fmt.Errorf("bad group number %q: %w", groupNum, err) + } + + if num >= len(tp.Groups) { + grp := make([]TestGroup, num+1) + for i := range grp { + grp[i].Start = -1 + grp[i].End = -1 + } + copy(grp, tp.Groups) + tp.Groups = grp + } + + if closing { + tp.Groups[num].End = len(detagged) + } else { + tp.Groups[num].Start = len(detagged) + } + } + + detagged = append(detagged, []rune(input[last:])...) + tp.Input = string(detagged) + return nil +} + +func ParseTestFile(t testing.TB, filename string) []TestPattern { + f, err := os.Open(filename) + require.NoError(t, err) + + defer f.Close() + scanner := bufio.NewScanner(f) + var lineno int + var patterns []TestPattern + + errFunc := func(err error) { + if err == ErrSkip { + return + } + t.Errorf("Parse error: %v\n%03d: %s", err, lineno, scanner.Text()) + } + + for scanner.Scan() { + lineno++ + line := scanner.Text() + line = strings.TrimSpace(line) + + if len(line) == 0 || line[0] == '#' { + continue + } + + var tp TestPattern + tp.Line = line + tp.Lineno = lineno + + idx := strings.IndexByte(line[1:], line[0]) + + tp.Pattern = line[1 : idx+1] + line, err = tp.parseFlags(line[idx+2:]) + if err != nil { + errFunc(err) + continue + } + + idx = strings.IndexByte(line[1:], line[0]) + err = tp.parseMatch(line[1 : idx+1]) + if err != nil { + errFunc(err) + continue + } + + patterns = append(patterns, tp) + } + + err = scanner.Err() + require.NoError(t, err) + return patterns +} + +func (tp *TestPattern) fail(t testing.TB, msg string, args ...any) bool { + t.Helper() + msg = fmt.Sprintf(msg, args...) + t.Errorf("%s (in line %d)\nregexp: %s\ninput: %q\noriginal: %s", msg, tp.Lineno, tp.Pattern, tp.Input, tp.Line) + return false +} + +func (tp *TestPattern) Test(t testing.TB) bool { + re, err := func() (re *icuregex.Pattern, err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("PANIC: %v", r) + } + }() + re, err = icuregex.CompileString(tp.Pattern, tp.Flags) + return + }() + if err != nil { + if tp.Options.MustError { + return true + } + + return tp.fail(t, "unexpected parser failure: %v", err) + } + if tp.Options.MustError { + return tp.fail(t, "parse failure expected") + } + + matcher := re.Match(tp.Input) + var isMatch bool + var findCount = tp.Options.FindCount + if findCount == 0 { + findCount = 1 + } + + for i := 0; i < findCount; i++ { + isMatch, err = func() (bool, error) { + defer func() { + if r := recover(); r != nil { + tp.fail(t, "unexpected match failure: %v", r) + } + }() + switch tp.Options.MatchFunc { + case FuncMatches: + return matcher.Matches() + case FuncLookingAt: + return matcher.LookingAt() + case FuncFind: + return matcher.Find() + default: + panic("invalid MatchFunc") + } + }() + } + + require.NoError(t, err) + + if !isMatch && len(tp.Groups) > 0 { + return tp.fail(t, "Match expected, but none found.") + } + if isMatch && len(tp.Groups) == 0 { + return tp.fail(t, "No match expected, but found one at position %d", matcher.Start()) + } + if tp.Options.MatchOnly { + return true + } + + for i := 0; i < matcher.GroupCount(); i++ { + expectedStart := -1 + expectedEnd := -1 + + if i < len(tp.Groups) { + expectedStart = tp.Groups[i].Start + expectedEnd = tp.Groups[i].End + } + if gotStart := matcher.StartForGroup(i); gotStart != expectedStart { + return tp.fail(t, "Incorrect start position for group %d. Expected %d, got %d", i, expectedStart, gotStart) + } + if gotEnd := matcher.EndForGroup(i); gotEnd != expectedEnd { + return tp.fail(t, "Incorrect end position for group %d. Expected %d, got %d", i, expectedEnd, gotEnd) + } + } + + if matcher.GroupCount()+1 < len(tp.Groups) { + return tp.fail(t, "Expected %d capture groups, found %d", len(tp.Groups)-1, matcher.GroupCount()) + } + + if tp.Options.HitEnd == Expected && !matcher.HitEnd() { + return tp.fail(t, "HitEnd() returned false. Expected true") + } + if tp.Options.HitEnd == NotExpected && matcher.HitEnd() { + return tp.fail(t, "HitEnd() returned true. Expected false") + } + + if tp.Options.RequireEnd == Expected && !matcher.RequireEnd() { + return tp.fail(t, "RequireEnd() returned false. Expected true") + } + if tp.Options.RequireEnd == NotExpected && matcher.RequireEnd() { + return tp.fail(t, "RequireEnd() returned true. Expected false") + } + + return true +} + +func TestICU(t *testing.T) { + pats := ParseTestFile(t, "testdata/regextst.txt") + + var valid int + + for _, p := range pats { + if p.Test(t) { + valid++ + } + } + + t.Logf("%d/%d (%.02f)", valid, len(pats), float64(valid)/float64(len(pats))) +} + +func TestICUExtended(t *testing.T) { + // This tests additional cases that aren't covered in the + // copied ICU test suite. + pats := ParseTestFile(t, "testdata/regextst_extended.txt") + + var valid int + + for _, p := range pats { + if p.Test(t) { + valid++ + } + } + + t.Logf("%d/%d (%.02f)", valid, len(pats), float64(valid)/float64(len(pats))) +} + +func TestCornerCases(t *testing.T) { + var cases = []struct { + Pattern string + Input string + Flags icuregex.RegexpFlag + Match bool + }{ + {`xyz$`, "xyz\n", 0, true}, + {`a*+`, "abbxx", 0, true}, + {`(ABC){1,2}+ABC`, "ABCABCABC", 0, true}, + {`(ABC){2,3}+ABC`, "ABCABCABC", 0, false}, + {`(abc)*+a`, "abcabcabc", 0, false}, + {`(abc)*+a`, "abcabcab", 0, true}, + {`a\N{LATIN SMALL LETTER B}c`, "abc", 0, true}, + {`a.b`, "a\rb", icuregex.UnixLines, true}, + {`a.b`, "a\rb", 0, false}, + {`(?d)abc$`, "abc\r", 0, false}, + {`[ \b]`, "b", 0, true}, + {`[abcd-\N{LATIN SMALL LETTER G}]+`, "xyz-abcdefghij-", 0, true}, + {`[[abcd]&&[ac]]+`, "bacacd", 0, true}, + } + + for _, tc := range cases { + t.Run(tc.Pattern, func(t *testing.T) { + _, err := icuregex.CompileString(tc.Pattern, tc.Flags) + require.NoError(t, err) + }) + } +} + +func TestOne(t *testing.T) { + const Pattern = `\p{CaseIgnorable}` + const Input = "foo.bar" + const Flags = 0 + + re, err := icuregex.CompileString(Pattern, Flags) + require.NoError(t, err) + + re.Dump(os.Stderr) + + m := icuregex.NewMatcher(re) + m.Dumper(os.Stderr) + m.ResetString(Input) + found, err := m.Find() + require.NoError(t, err) + t.Logf("match = %v", found) +} diff --git a/go/mysql/icuregex/internal/bytestrie/bytes_trie.go b/go/mysql/icuregex/internal/bytestrie/bytes_trie.go new file mode 100644 index 00000000000..aff80dc3e69 --- /dev/null +++ b/go/mysql/icuregex/internal/bytestrie/bytes_trie.go @@ -0,0 +1,354 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bytestrie + +type BytesTrie struct { + pos []byte + original []byte + remainingMatchLength int32 +} + +func New(pos []byte) BytesTrie { + return BytesTrie{pos: pos, original: pos, remainingMatchLength: -1} +} + +type result int32 + +const ( /** + * The input unit(s) did not continue a matching string. + * Once current()/next() return NO_MATCH, + * all further calls to current()/next() will also return NO_MATCH, + * until the trie is reset to its original state or to a saved state. + * @stable ICU 4.8 + */ + noMatch result = iota + /** + * The input unit(s) continued a matching string + * but there is no value for the string so far. + * (It is a prefix of a longer string.) + * @stable ICU 4.8 + */ + noValue + /** + * The input unit(s) continued a matching string + * and there is a value for the string so far. + * This value will be returned by getValue(). + * No further input byte/unit can continue a matching string. + * @stable ICU 4.8 + */ + finalValue + /** + * The input unit(s) continued a matching string + * and there is a value for the string so far. + * This value will be returned by getValue(). + * Another input byte/unit can continue a matching string. + * @stable ICU 4.8 + */ + intermediateValue +) + +const ( + maxBranchLinearSubNodeLength = 5 + + // 10..1f: Linear-match node, match 1..16 bytes and continue reading the next node. + minLinearMatch = 0x10 + maxLinearMatchLength = 0x10 + + // 20..ff: Variable-length value node. + // If odd, the value is final. (Otherwise, intermediate value or jump delta.) + // Then shift-right by 1 bit. + // The remaining lead byte value indicates the number of following bytes (0..4) + // and contains the value's top bits. + minValueLead = minLinearMatch + maxLinearMatchLength // 0x20 + // It is a final value if bit 0 is set. + valueIsFinal = 1 + + // Compact value: After testing bit 0, shift right by 1 and then use the following thresholds. + minOneByteValueLead = minValueLead / 2 // 0x10 + maxOneByteValue = 0x40 // At least 6 bits in the first byte. + + minTwoByteValueLead = minOneByteValueLead + maxOneByteValue + 1 // 0x51 + maxTwoByteValue = 0x1aff + minThreeByteValueLead = minTwoByteValueLead + (maxTwoByteValue >> 8) + 1 // 0x6c + fourByteValueLead = 0x7e + + // Compact delta integers. + maxOneByteDelta = 0xbf + minTwoByteDeltaLead = maxOneByteDelta + 1 // 0xc0 + minThreeByteDeltaLead = 0xf0 + fourByteDeltaLead = 0xfe +) + +func (bt *BytesTrie) ContainsName(name string) bool { + result := noValue + for _, c := range []byte(name) { + if 'A' <= c && c <= 'Z' { + c += 'a' - 'A' + } + if c == 0x2d || c == 0x5f || c == 0x20 || (0x09 <= c && c <= 0x0d) { + continue + } + if result&1 == 0 { + return false + } + result = bt.next(int32(c)) + } + return result >= finalValue +} + +func (bt *BytesTrie) next(inByte int32) result { + pos := bt.pos + if pos == nil { + return noMatch + } + if inByte < 0 { + inByte += 0x100 + } + length := bt.remainingMatchLength // Actual remaining match length minus 1. + if length >= 0 { + match := inByte == int32(pos[0]) + pos = pos[1:] + // Remaining part of a linear-match node. + if match { + length = length - 1 + bt.remainingMatchLength = length + bt.pos = pos + if length < 0 { + node := int32(pos[0]) + if node >= minValueLead { + return bt.valueResult(node) + } + } + return noValue + } + bt.stop() + return noMatch + } + return bt.nextImpl(pos, inByte) +} + +func (bt *BytesTrie) nextImpl(pos []byte, inByte int32) result { + for { + node := int32(pos[0]) + pos = pos[1:] + if node < minLinearMatch { + return bt.branchNext(pos, node, inByte) + } else if node < minValueLead { + // Match the first of length+1 bytes. + length := node - minLinearMatch // Actual match length minus 1. + match := inByte == int32(pos[0]) + pos = pos[1:] + if match { + length = length - 1 + bt.remainingMatchLength = length + bt.pos = pos + if length < 0 { + node = int32(pos[0]) + if node >= minValueLead { + return bt.valueResult(node) + } + } + return noValue + } + // No match. + break + } else if (node & valueIsFinal) != 0 { + // No further matching bytes. + break + } else { + // Skip intermediate value. + pos = bt.skipValue2(pos, node) + // The next node must not also be a value node. + } + } + bt.stop() + return noMatch +} + +func (bt *BytesTrie) stop() { + bt.pos = nil +} + +func (bt *BytesTrie) valueResult(node int32) result { + return intermediateValue - result(node&valueIsFinal) +} + +func (bt *BytesTrie) branchNext(pos []byte, length int32, inByte int32) result { + // Branch according to the current unit. + if length == 0 { + length = int32(pos[0]) + pos = pos[1:] + } + length++ + // The length of the branch is the number of units to select from. + // The data structure encodes a binary search. + for length > maxBranchLinearSubNodeLength { + p := int32(pos[0]) + pos = pos[1:] + if inByte < p { + length >>= 1 + pos = bt.jumpByDelta(pos) + } else { + length = length - (length >> 1) + pos = bt.skipDelta(pos) + } + } + // Drop down to linear search for the last few bytes. + // length>=2 because the loop body above sees length>kMaxBranchLinearSubNodeLength>=3 + // and divides length by 2. + for { + p := int32(pos[0]) + pos = pos[1:] + if inByte == p { + var result result + node := int32(pos[0]) + if (node & valueIsFinal) != 0 { + // Leave the final value for getValue() to read. + result = finalValue + } else { + // Use the non-final value as the jump delta. + pos = pos[1:] + // int32_t delta=readValue(pos, node>>1); + node >>= 1 + var delta int32 + if node < minTwoByteValueLead { + delta = node - minOneByteValueLead + } else if node < minThreeByteValueLead { + delta = ((node - minTwoByteValueLead) << 8) | int32(pos[0]) + pos = pos[1:] + } else if node < fourByteValueLead { + delta = ((node - minThreeByteValueLead) << 16) | (int32(pos[0]) << 8) | int32(pos[1]) + pos = pos[2:] + } else if node == fourByteValueLead { + delta = (int32(pos[0]) << 16) | (int32(pos[1]) << 8) | int32(pos[2]) + pos = pos[3:] + } else { + delta = (int32(pos[0]) << 24) | (int32(pos[1]) << 16) | (int32(pos[2]) << 8) | int32(pos[3]) + pos = pos[4:] + } + // end readValue() + pos = pos[delta:] + node = int32(pos[0]) + if node >= minValueLead { + result = bt.valueResult(node) + } else { + result = noValue + } + } + bt.pos = pos + return result + } + length-- + pos = bt.skipValue1(pos) + if length <= 1 { + break + } + } + p := int32(pos[0]) + pos = pos[1:] + if inByte == p { + bt.pos = pos + node := int32(pos[0]) + if node >= minValueLead { + return bt.valueResult(node) + } + return noValue + } + bt.stop() + return noMatch +} + +func (bt *BytesTrie) skipValue1(pos []byte) []byte { + leadByte := int32(pos[0]) + return bt.skipValue2(pos[1:], leadByte) +} + +func (bt *BytesTrie) skipValue2(pos []byte, leadByte int32) []byte { + if leadByte >= (minTwoByteValueLead << 1) { + if leadByte < (minThreeByteValueLead << 1) { + pos = pos[1:] + } else if leadByte < (fourByteValueLead << 1) { + pos = pos[2:] + } else { + pos = pos[3+((leadByte>>1)&1):] + } + } + return pos +} + +func (bt *BytesTrie) skipDelta(pos []byte) []byte { + delta := int32(pos[0]) + pos = pos[1:] + if delta >= minTwoByteDeltaLead { + if delta < minThreeByteDeltaLead { + pos = pos[1:] + } else if delta < fourByteDeltaLead { + pos = pos[2:] + } else { + pos = pos[3+(delta&1):] + } + } + return pos +} + +func (bt *BytesTrie) jumpByDelta(pos []byte) []byte { + delta := int32(pos[0]) + pos = pos[1:] + if delta < minTwoByteDeltaLead { + // nothing to do + } else if delta < minThreeByteDeltaLead { + delta = ((delta - minTwoByteDeltaLead) << 8) | int32(pos[0]) + pos = pos[1:] + } else if delta < fourByteDeltaLead { + delta = ((delta - minThreeByteDeltaLead) << 16) | (int32(pos[0]) << 8) | int32(pos[1]) + pos = pos[2:] + } else if delta == fourByteDeltaLead { + delta = (int32(pos[0]) << 16) | (int32(pos[1]) << 8) | int32(pos[2]) + pos = pos[3:] + } else { + delta = (int32(pos[0]) << 24) | (int32(pos[1]) << 16) | (int32(pos[2]) << 8) | int32(pos[3]) + pos = pos[4:] + } + return pos[delta:] +} + +func (bt *BytesTrie) GetValue() int32 { + pos := bt.pos + leadByte := int32(pos[0]) + return bt.readValue(pos[1:], leadByte>>1) +} + +func (bt *BytesTrie) readValue(pos []byte, leadByte int32) int32 { + var value int32 + if leadByte < minTwoByteValueLead { + value = leadByte - minOneByteValueLead + } else if leadByte < minThreeByteValueLead { + value = ((leadByte - minTwoByteValueLead) << 8) | int32(pos[0]) + } else if leadByte < fourByteValueLead { + value = ((leadByte - minThreeByteValueLead) << 16) | (int32(pos[0]) << 8) | int32(pos[1]) + } else if leadByte == fourByteValueLead { + value = (int32(pos[0]) << 16) | (int32(pos[1]) << 8) | int32(pos[2]) + } else { + value = (int32(pos[0]) << 24) | (int32(pos[1]) << 16) | (int32(pos[2]) << 8) | int32(pos[3]) + } + return value +} diff --git a/go/mysql/icuregex/internal/icudata/README.md b/go/mysql/icuregex/internal/icudata/README.md new file mode 100644 index 00000000000..070633b555e --- /dev/null +++ b/go/mysql/icuregex/internal/icudata/README.md @@ -0,0 +1,46 @@ +# ICU data files + +These are files copied from the ICU project that contain various types +of data, like character properties. + +## How to update + +Not all data files are immediately available in the source code, but +need to be built first. This applies to the character / word break +tables. + +### Copy from source data + +The `icu4c/source/data/in` directory in the source distribution contains +the following ICU data files we use: + +``` +pnames.icu +ubidi.icu +ucase.icu +unames.icu +ulayout.icu +uprops.icu +nfc.nrm +nfkc.nrm +nfkc_cf.nrm +``` + +The character and word break table need to be compiled before they can +be copied. + +In `icu4c/source` run: + +```bash +./configure --with-data-packaging=files +make +``` + +This will compile the character and word break data into a binary file +that we can use. Once built, the following files we use are available in +`icu4c/source/data/out/build/icudtl/brkitr`: + +``` +char.brk +word.brk +``` diff --git a/go/mysql/icuregex/internal/icudata/char.brk b/go/mysql/icuregex/internal/icudata/char.brk new file mode 100644 index 0000000000000000000000000000000000000000..a243ae6580ac2a4271f4ed78d252ca11bae801b1 GIT binary patch literal 13680 zcmeHOeT-CB6@UBY&6_tf`^~~KJ1B3-Qh~s3LHpru*CN}}0!yT@yGH1;4zttUfnjEu zofTKvstHZ{MKDGZZ9;5}k{JKNkl2O7_c6eCm8+lWP_U`Ww(=2E2TE&D6mzIFGhfb6O;^lyL+i~g zw{O1F%oHchLZ*@{_nV`|e6}>1+i316W_w9cs!Zp~v%ThUKAS7f=FHnn03Mw+OOKdy zv$=kAx>A|x@9R5${CMviQ|>L5r~0zOP49H&Xu(vT{e_}n3~zrMgWYONqx2`AbDRV4~%cvIDg7S%Lei)ltYp~VwPaO>MR zUvyrwC5KuzJ4_|YhEFqTG{AM7`VR+<1Clks4d>>F_f;*{-kzYxD4UQvSh7-zE` zC)5ErKJhmJH-_=x*d#<$pP0J$3Gjq@TJ`Uq(%w1}!jbzReAI?Ri17|9SkiNgF- zQho+_Ah-st7VK9?JBW4QQ4#Ir#p%J;MR@`$Po8EaN8DyCw< za@eYT3n=$-S-EcUHZ?Hm!wt_gTy1DkO=YWkOg*Q*r2bXyi;PE1<;ln^k+`-)dtCdq zrbq9Oo{YX6)%0!pxAm9wPxLRu9*td!{U>&hQ8vyQe>AQeo8qJKGx68r?TNLC$;6Kn zrt+tRnp~6InY810Qlf3Hpq@{@pX_d&X#9Sosk{p3KjFmVj;60RJ==7xX?62>^V#Ob z=1-cDdshpS9_Kw9;q|$e_kh>cx~tVx9)nXH=YjrhYeO3w-%I^y+f!|eZP(gT?bq7& zwV!Uk*e=kfvWp`H=cD$XWr8-9exM6*?&g)WCEo$=WeJIrcU)r|eT%wPfm;>$|5iZl z*xE4|7co5AVJeq8KJMsWKD+$F@(-7*@Q?88*v_SIz?hkPu&pm^g#^mVfHma3$1bY5A6Hz-qS zO?{firt&GsW@GB}sUK0z?opY1duo)+bEyiam~VEyqMiiqr5KJ*r_QDZ^`EEGp1RzZ zdNH+Gs^wzps*f#U_lo*nYIVbgC>wvAx>K^3=<4bcI8%8;-5`x@KBID}{tzi*c6ROe zAo5-FzW4t2aA6$90>s@J#$bWRj(oCZzy_!?91(|-4 z>ML{HbSGF`+L7kfO-WS_w{%7AP4~fZSNa2MPPgj2G_TeJ=~Z<_HI|-CAA?#fNcE`D zI`!JF#`z$Q$Eh@4qltU&UjSjp z)9~eRkbg;>YJD|5(fVfk=~kxkm-L6}x7yxqIoPVM>JGIMo5Jfk&jr-@nv8c*@8(AFuUe^1{>ZrjeW|LoaZC%XPoTYIz8tgto5 zt~iztS>~l>`V0CS`bT<4Y<+A;Y-h~2wKVE*>c% zM6H2E=r^}negX)~dkm-uUYyOi!4|Kk?{ufbk5|y*aW}iRaW!WSIk=)fwVC#!64Y$I zWL%Hf>_AGw+oT(9xEXBB74Ww{Y*;oTy=Z`VnKFl2u&=VLW;$-SnsxYGv-r7IK&*tZ zLc~m2;(@*pQA4v#G9uv`JBN^B(qpr+mKIMMdNL_Wl!VrlOi~8G69#nMf7M&^IOw>W z5X!yI2YMr*k%mI+adNLi7^0(8%Tts#kC6CHD|JnsL{jSU5r6+SANgK~yc4 zg!GaubXamw23UroNwpO${~q*{96p%JhTV7qif6+POqH`wHuJ+!@$JtDzi4=yqC_P%mQoxM7+V0@TWSz-aO+bc{o&NXj_ZP7iMK6XK= zmUmUBW-UQi^$nisc7#_nPgx)*cq(YMJ45jwK|y#m@YM$&L#&~BEEV2gu;JT5osB7q ztMyE&(~1ShxV%poOYBRqI}xOaxzu_y6A?QSuIii0h!uldp1rPoqY_L5(HIzJR#R#? zC)n5=p_aF-I9XN-NZ>NThHiOI&;ggl!Gue>dcG_o@wQ0d9^s$WB6;e)U(v*Ne}Ox6 zVBiQAYk~7DGu;Z_ae!xEc8r?KM$8w=Ku%c*&}Gv(4Ax@-Sy2H&n!yTA!~P>V=o_$v zQRx(#=zIo@Ro~T)x;{P3OVWUU`NOlrP9nK|xeO>K1lIvGLW9s`){e_bAy_n zzUl6c=KRW6+YOut!#j2}XIJTE#0f%wp5}lA#p?j^N4xLb<1LgoXBwgJt6(0KYI<%- zk6J-KRp=GB-Tiv|Otr>gV}whYu8tEDJ~A4<}L7pIF*(D^MCe+v{eiKL$%{GrH& zB%!T`d+TLU{eK}i!3VuUzs721R>Hna~STSft*i~_}V54Qx$ zloSvwxu$n_6v*}&oOB4wRXlNG6cY zzmF&YjFSD$23N3N#`#!)@i|3}{SpLm(jO~JE`)G!<>1?_VI-uP!qVd}gAlh=9O6nB zZcfqz4qs064FE$ZkRcW?&7Spav4Ww^ft6tqqYZ3gu6d7zW(I)bmH;YHXwCWlV!wf* zjK7d|xSHV8dzP&sB2gL?*1Efo&6O&-$%#T9{(nHGFgsBy7UlP3M{xN9D?Y5Gns6@ zIJG-dJ`(0if}z$}zMMIlVG47H%M0LYHaj?yE6j0ncVYf0C%;sl&v0^Et~kZX`*L5) zEZhcqlCgSRh?1BC)UgTrA}9H!cZA;u6f5{4L|l8l8xhRMqCSg0Kk z^AusRz^>Yp&q8JUAwQ)0#|OH*v(uS;5&A-@bmWWExyc*ahgj84vO8i!Dp-bu=;#>bsHJz9l$%_e*x8GX$=4X literal 0 HcmV?d00001 diff --git a/go/mysql/icuregex/internal/icudata/embed.go b/go/mysql/icuregex/internal/icudata/embed.go new file mode 100644 index 00000000000..12dbd5d0322 --- /dev/null +++ b/go/mysql/icuregex/internal/icudata/embed.go @@ -0,0 +1,101 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package icudata + +import _ "embed" + +// PNames is the list of property names. It is used +// for example by usage of Unicode propery name aliases +// in regular expressions. +// +//go:embed pnames.icu +var PNames []byte + +// UBidi is the list of bidi properties. These are used +// by Bidi class aliases in regular expressions. +// +//go:embed ubidi.icu +var UBidi []byte + +// UCase is the list of case properties. These are used +// for case folding internally for case insensitive matching. +// +//go:embed ucase.icu +var UCase []byte + +// UEmoji is the list of Emoji properties. +// +//go:embed uemoji.icu +var UEmoji []byte + +// ULayout is used for property checks agains the InPC, InSC +// and VO properties. +// +//go:embed ulayout.icu +var ULayout []byte + +// UNames is used for named character references in regular +// expressions. +// +//go:embed unames.icu +var UNames []byte + +// UProps is used for all the character properties. These +// are used to retrieve properties of characters for character +// classes, like letters, whitespace, digits etc. +// +//go:embed uprops.icu +var UProps []byte + +// Nfc is the table for character normalization where canonical +// decomposition is done followed by canonical composition. +// This is used for property checks of characters about composition. +// +//go:embed nfc.nrm +var Nfc []byte + +// Nfkc is the table for character normalization where compatibility +// decomposition is done followed by canonical composition. +// This is used for property checks of characters about composition. +// +//go:embed nfkc.nrm +var Nfkc []byte + +// NfkcCf is the table for character normalization where compatibility +// decomposition is done followed by canonical composition with +// case folding. +// This is used for property checks of characters about composition. +// +//Unused: go:embed nfkc_cf.nrm +//var NfkcCf []byte + +// BrkChar is used for matching against character break +// characters in regular expressions. +// +//Unused: go:embed char.brk +//var BrkChar []byte + +// BrkWord is used for matching against word break +// characters in regular expressions. +// +//Unused: go:embed word.brk +///var BrkWord []byte diff --git a/go/mysql/icuregex/internal/icudata/nfc.nrm b/go/mysql/icuregex/internal/icudata/nfc.nrm new file mode 100644 index 0000000000000000000000000000000000000000..2b0e972807e446bdca88a79e378a516bf0325c46 GIT binary patch literal 35392 zcmeIb2Y3@#(+0Y`l4Z%g_io%!w}@0RyHt(@B7ULJ0}I1p*|24-z0i z=p_((hfsnbriM6>kc1FYAYkA3j4at0laTWL|Gm%s*=OD}XJ*dKoY}KyS8LfsaaR)j zIQCEAnhfcmEC%OEeH%1Gv%L|=MNI`wd!F_&GZDw#a^bieNgUTR9sMU9H^qnJmZWi< z4t4bmE?#(r=eU~O7;YE$J7?xc^PBhwe7w*~h!^Gx7X)8XOVJ$B zIgzKhi8vmzDdMBz-^3cHcwvy!GN)^3e{#}Fx=3D^?2$Z`n4Q}@FLJgz`%87w!P555 zA4sjzK$%Y3T-IB*TDDhKB8wMvF0EYNa(Ub3bC(jAG}ke%TU^h&#tW`)@j^qlK5on1 zu7a~4oV%T?s#TCSkMLFCnlkIJ+?_nKJtleV^(gg7@$BMhlCsfrJZ(PCd-{5{^qS*! z-pkj!rS}}~^WMHbOq1m^4DI_qmwfzuTl&uNJ?8tXZ&guWRu<#;tmXHZS0-eyR+7gH zd;K2yrTUNXf7bs0djD5}|Es|NJ1el&|DnHMfIYV+#0?Kv9q@TThV(A=9tD^JZTjYZ zqXH)cP7RzDI4*E<;Ecfk>;0RRID{%Y1=S3)pNs!Lx8sBM1#!VOgU1K&3+6&$b0Brct4`yfl@@h0&8kO-P%Cb_y+UX#J4$u+T|meQ&a2-shG{ zmN_gd=C{fr-59zrG(mPc)aSXm7lj^kE+2C*^moUYa;cqjmWjfA!)(28@nlu;2rrY{ zWPy$hE85ujkY_BP*DJ%y=U1+MrOac5G}Pfc*ujg-6e=EjH%G^R}R`mB*d8s&2e2`;DeQ_IcfALiDD)D~tb@3B%sFTsDg_BM9GS~~YWB!8u^FH5x zZuBar1JB65bb93EDmlnSfd1K0L(*BobN@Ssn3HmD6x)ky#&zTdaZ|XH+*$4(C*|w# z_4$tcFn&D$K7X6%xqG~ukSNp_^ z#C60S#l6LEh);@n?t=KC*vm=dG}dX7({iWPPG_Aej{i6KJAcvRG@qG1w_WD@Ja!Sg zK6LVMUE=euPmt?spY=YQeRle&TzPJv&tadFK1SCju3gc4;&jf(>T}JfkLzt8mhU${ z_kG5@{^B#=b-61i`M~vwkKnu6RqE^E>*pKddmJO8eB*r;z874-aeacGRFdp#^sV75 zajWCYa}9i(`L^~Ab4zyX7vq=cr}ESLrTNt) zDbLmOYwXw3uN``w{d)NI^&9Lr+;41|Zld25zqkD6`7QQaUMA_`HpFd}-&(&-e%nd% ziQhrL6MmoheePG}H{NZU+bzF4en0sA>^Il#9k<{8cz=n%n?H-b+HI5DZnwj3KK?=e z5&qTutNUyGQ~fjiYx~#tZ{pv|zrBA~|DOK+{D=6D^nb;Qs`(lLUALgalLzPzIy~ zFt5gd8Ub|z8Uz^KYrD4$=oFA2&^KT(X?SjUz*yoY222T<8L&9uy?`|V9|dd)*cq@d z;BdgnfO7%XfNKG_1HK8kAMi`SlK>%58t4(|7Z?&46&N3=2uuz%2G$6y6WAcISzznH zj)5#*x4_=i+Go_ry|sH+_df1}0!If<3VbtgnENa43j^N`{2*|{Gj?|S|H;LCli2C$+0yN0$Jx>iPwo8n(z!UcKeZimpUy4UFqut*aY9Mi@x(q7 zo^cq`)6-8rt({&!{RU#$bT$p+d9Ivi{gb7(EKZi5E4_fjjy9{R&|aULNp`Nh{i=d& zTi~9+yGaKEj|HB=`MwDHBJgJ5-N2x-+UwBP0ky4gE{1`hRMwwT)-IR)82D@86!)ir zv)x5Ou0fm2zVH0dxkHdxYMX(ytaU4E!^+wywAF~u3+ja&`cqbk(xHy*Mg_eZG}+Pn z^Li+x8tL?)S)ug>yo)MmH4^eD#pl?0Rom4uXpl|+<8mBf@(D~ZRESW>-2 zQNnX7kftOV$h_eEg8QF6%4cEY(o1TT)U1?6x8UBv1A_~L#{}0YTZ=EBV>66sB{h^7 zOUxxU-?dBXl&~F@?SkdeZ9J>NA0Krqwj`qJ4$E3qN}t#I^&opyM$$9 z@AKTt=Eg?U|C9VG=lg7*tpb*V9c_EN9qk&XwUaH+XTAKldiLtrT9(1{k^cM)RvcsZ zSYCO%g!#7d_LlW3j(s`ac2&Nd?Vnq*y4%i%J-0s}^G6otv$grH=(}83&Xuo4CbO*) zdrr1%%w}uTJnQX`^7uQt9@06_TxY$di_q^UUG|KW=RBUg*t<{J)ncEE&GKH-gQVY* zN|MCYU8;Lk52*g)@&8?o2eRWTWk0BF2V5EaVX!^>idmG~hgFY3=869(`@gf|D`xYu zyo$2aRmhdi{Y6Jr^Jcc@>NTs^B_E9{czThS|Bn3c#rmh#J|!#{dzLnDtEC(0 zY(5PZg~&oG=gMrJwg0C%u<<^QvsIPv%~H=0yt{J`42cY>O8;+U@M0gQOHNajp0)VA z#CAL@{qOhgmE5Z$n};Q~uKA9oz^9S2>Sy zl{7-BxU^Ziw6tydL};df#E`IqjTD!5O=nL=Y@M^VvNv1*tx7(bbe==@wu4*Y;Qr&_ zDtlb_jOOpwv9i~{(%&bI!}V10c>bzlyO?cvsAyM_|DVZRz_;N3{HU1MzsqwaFMIsE zak0z3bYz?5za9VO{nxz2xOR z{B_IS^x5yPjbop%{@RE?v#z?vRgJQ(oN{C~|8_Rd$1RUn{@L38xw!muwEZ(Qvo7bK z_x5i~4oc-AN2S`3lhV|Xv(k)^3(_pwt*z3WO1t*oiR@8QzSkE?n}*zw=7!vs-jQ|) z$qTtB?N#OT-rtF18&k1zwyOVqUoRf{Pj`;Wo?rAy@nz!JXDO7FuPxgOvh~?cSI-;vl@y9H})zHCwKXj`2ZO4nkY&V~KIb)3a}>Z9_@y>K5u38WI{6 z8Xu|-HHFp+tryxTv_)v!(9WSfLi>ac3LPFgF7&m~DWPwM&JA50`fljj(9NMc@olKX zp(jJngZ+>V#5-{RAIU>b6Cx=x?zpNT7w$03j0~F?_D0zJu;pQE!afSy61FRBU)bTW2kuYYPls8%z_W&PmbFR1x}!w1`>}^&%QYw1{XM z(K&+W@5l3z%lvA5JGz^9r|vQJH)Mn3PP z9g10%*4k%%+KHI0X`jV>l6EQPa9UTN(`hW;wU|3Gzs5Mn2F51DrpDHbZ4uiswr}j1 z*y*uv$9@>QEB0{g+1Tr`Kg5>CdRB|9rmkkLR;OCaYTc_1t@c{AIn~~)w!Yf-YDcPF zs`hoYU#mIC1;)k1DdN)N8pd^q>l-&FZhG9jxVPipi~At%<2at{zpN*zjOW}yULgDNdpw_D=l`g6ehDl01mA?<1V4BW z0)Dx01#E1?H|U#XvL z5ZNzCSeh_QHd6Lp!kUD!vhlKy61G&D!z9_1N}kJSZR6gkpr0w5i{3)nu7pn$jwGBS zUrS`mW$)uRq$-d5tNrr{mlLiheDzFz-zNN+@N2@;XJmiXZ`Jlks-Jk~%|_K$RZGwO z?U7ac`>WEI|HSel&-VT3@3t@U`%mPX@$`1Kba&}K9Dn9#tz)%qkckpE%Vde$WuA$< zW&Vl#WTA-%WzmU8WeJHVWy-{}vXsONGE<^emYG;2%TDCE8?uIpw`I)}@5tJqXFu-A zIwf{bd?+hO9GLh-R+u;@QS34y@%2PEml=t3D%jcWt8N!LEWMt1(OY6amt_u_ZA_4h zeQcP^%ETy_4->1oY)nja*_Nnq*^{VoIgqGxIhJU2Ig^;~axt-{%NL1tTy7@TcljEA z8o7Lz*v#dp#8xhkDtK+{Qkv+L)YZi`$vdgHOJGuX(jb@Eq@<*gF6tzGl0Djs+wm@w zT&B3ZfxOblx>i!Xq{c}tiJ$4x?j=0D*!Q1n%I|Sit;Rne|6+@Z_t_U){WHy<#rWsf zYI%Nt*H=|_tt=~#@^5ms=a`rC_;+Xba^C;kimgLd727AUXPuSN%sEKUaTL{b_Yx?j)DV-R0hLe|fMxTplftlPAfQa;;o1H_0>PwdC3I9C>4T zb9t`3oxGDgPo6I?koT7lmKVxL$;Zhj$S2FE%4f)D$>+%z$(PDk$XCkO$k)j?%D2dO z$oI%Ul^>EHlb@1*Cch}ZEWawhDgR1-SN@&+NBPh4NAjm~Ug4yWDclv_3V%hgB3u!z zh*KmflnSjvuP`Yx6w&fpifl!WqOqd6B3IE)(Mge~$X667`YQ%23KgRi;}jDVlND1H zGZeEF^Aw8|OBE{=D-~-L>l7OmTNFDKdla844k?Z)PANW9TvS|ETvgmue5JUn_)hVo z;%CJp#Zv{ZbW+Nc?n-Z^zcN@Eu8da3DU*~+rBKBGppW3e`&08r3@0M%5P84%Hsjr>aA$W2#fC&r}yxmsM9)H&tJ$?yA01{itGV z@@L>9;-0E_wUb(=c2|3={nf$haCNkr=i)#~Af8jIwbb&QUTsomsB5XS)j8_M>gMWP zbvtz@b)Gt3U7+r-9;_}@k5Z3QPf$-*PgT!Q&r;7*FH$d6uTZa4uTig4Z&Ytl?@;ei zf2uyDKBhjU{!D#QeOY}~eN+9FI!<|4{hj(p_0Q@@>Zdq(jgv;Eao2cj{58RvaE($O zt%=hlX_OkRCRM|-O4DR&vNZKH4K+GX7 zUemm;nXY+DGeYMj(xYrfU|p!rGjtLAsjSL#xYNE@eg*8Z$^)p}}uwSn4D ztxOZCO;X2d-8Hoo30iNBTpO%WYg4qT+B9vZHd+&`&eFzd>S>cS4Yfa3Z>r7JcF^W& z^R)%q{@S71QQB9vuWA3yy~3i^mAp}<$JyG2+U430w41cswV!AYYL9ErYCqQ&X>Y;$ zj`n-)L+xWNmyF-g0J$am0E3bvfYp+#Cu@=o$?3_plj|opNp1z%fBSg$Da8KP!~X91 z&+oVFUj4ikdxui~e!Bc#YL8ZVRxF-P$J+8|7TeDQ$~{#aSMIf<)XrC>o$JyiX|YQ| zg{SG|j!yzSH?)Gh+)L%}9CGxT!mb~e^g2eZa#`#0;&=U3Wm54c20kSfdv?zrReWp1 zrt={CXY;_jzOtFjN_so#*@*U<+gh6syG2!zD|_a-Ph7YsE0aD-;-2iF8daUK>9O zWtIa;r^;&iN0#jvuzb)$6SbQ5%w zbyIaSbaQkIbZ_gH>E6@*NB5y_qi&mSmu|1_fbNLygzk*)yzUZ?FLXC`U+HobcXi+C ze$xG_`(0P66X~7x-`_4Ez(P4&6@cKS~G zJpEvKzP_)1AdW))C>-PT6ZEg^XXt0?=jj*em+DvOSL)a3*XcLvC#bjRcj@=)59r_0 z9MB)pPgS4LpV6P!U((M|JITM$U)O)B|62d8{s;X}`bYYwdfwnKSZY{d$kVPgtTC)JY&2{!>@w^%955U)oHBf7xM;X+n4!38xM}#> z@SWi&!>@*?20qm(RhH_Wx7DAI8k`!Q8k?GsDo<6X?$xBErlw}3)=JGzZJ63T zH8=Hurd?{M)V$RE)PmIhse@BTq>f3QkUBYadg=+yTd8wW7o{#uU6J~q)OD#FQ@5n< zNZo_u)6_$$=QYPtPo;jAdNK8K>h;twQ*#w}Q-4VPDfLlmX{wV^X8cy;ZuB(<8zYTz z#v~j+XbQASqt>W5nv9voJ@Q{QS;l(CQ|iXXmPUVdJ7Xu~Rdt>*-&kNAh@;Rr$~eyW zn(=kxbmLpbdB#P?rN$M;mBuy3kBlE1w;6XC_Zklvj~GuG&l@ipuNrR}zczkr{8{~j z@n_@j#!{n`NoMji`I-Vvp{7VvtSP~yG^LnKrb2zDDa%yP)X>z_)Y8<()WOun)ZNs} z)Yr6BG1ydS8f6-1nqZo2;x$uEGfcBg^Gu6O%S^rdNy{A<&Zt~TZnnY%2v#Z(D>}w7*hngeJvF0SR(yTS>%_eh( zIm?`5ZftIDPSDD=x#o7}F6Qp$Ugo~$f#yQ<81t*<*UYb*XPDL6S>^@irREjpmF6|( zkIWyNx0!dD_nHrwkC;!GKQmu6Up8Mge`)^O{H^&1^H1hS=2EjL%{k3A%{$FMt)8}? zHZ(0VEiNr7O_`=m)2ErzGSX_LHPqHiYnawFtz}x9v<_)`Y58e=(*~!FNE?&(YT9dQ zucu8CG%s*oo8YN~G!CEN`HHZjyA2uZh?v zMNKSqkV}2^>!f>08X<>t_^6K@nx%KNv_kxah`$)|mmq#KjBkbbZDHRP@s}a~OvHZ& zbM``fKa8E5(M-}&=xS-3F1GX*1W8x;JS(az=>ebpgl?AJ$Yls(kH%d4LpB_Fj>KGt zK|VI!Lozp|i+p!OzXdhg4x4?j`4o8^N^d05h?`h;Lv{hOqmWr4(}|l%4kEuB$gc?bZ9;yV zk>6J2_Zez;5&2nBw;jmsB-Trj5FojM{8k~qM8s7fzb?q{Xhwa>?eu1n+wgS{=kQLt zMDiQz@&rD)3_&8!5KHbLwp)gmM4D08;+28*li_6v%HSloq2Gy^_fWh0ST8-q9+EKF z-4}c$_h5Sm`UjX#6vq65*pHEWwTy0-M2y{seD`DQA>?})^%Rh=GvYKwJ&z&hy2$wi z*7y?SDXd~wE?|wjz)pfS?t-~{pk^M3;R9I!VuWCgpTT(yK&>JWF9fxX!5WW; zZ3O0Y7IDuZ?nT5Mjhqt@w>rkEP^-@ocR1FV0q4dAF+C7-DAssOtnp;jCmHoI;5^nq z%x|#9&8Ty2tnnHcUmLMcV~y8EE)CGHi#6T^In;oU2FRgBdPhlXThiCe$(pcJY|YHpE(wd_RGH8|t(ZHv3_70C^n2noh<#{{*tnQ9Bjta2)b0kQ=bp z44s&<}?FP}tvxE(rZu=+6OJVSfR# zFxa1i{aM(zc22Z>4BLm0N5J+o_&yKczrprq4MEZlw%uUc6}j=yiE2!e^v|4T=?VR9 z=-XpVGPXveG1tD#QP5MiZPE>&R&+) zAP*_x$-FG@%LI!=ioFi~wdf0IC!*aB+AZTGAyTR2pv+B@EcLP+g>4YplQOBK2(mD# zm*j>lz;YY5cR=?+R>81bqU(JK^^*Y=^+#aoBE0dm4VP zLAD!yZ^3UdY`+0L09^sCmqu9bfxd_BX7sm$E`UClt#Yg5w%V<(+Xrtna9jOGeNZFN zB=^;CtOAX9A33{)&&b)`K)rp&`p)-VKWCxu*ns%~Yw=ew*3VxUuzo>e;Ml;qf$JA6 z3>>>)e&E`LGlM=@G(LDz@Vwv;g5L~YyJ%+c*pRs)>lZH!8M}CX$Xt9M?VZpap`X0H zJGA5SMp1pXx*7Ux^#b{Uf0lf*D4Y~q42)Y5f1-b+J26P|v0K{bv zvIwAkp#2~z$OYsD@&O$L9R{@pwFh+sbp~AkeGamMu4E_QOw3EZxv*>U%_X4ap!Y$m zL2E%9K$}6^LAycwKnFp`L8n3Ix+dq{=&HMU3vDslJ818r{T}TDv=7n#hW0UR(-G$m zs4b`?s4J)ks5ht|Xb@-^Xe4MXXgp{VXd38E(CobAyybbioA07sg?2UCwP@F)-Gp{C z+U;m}!uEU6eb58YL(r4FshcONbkPirgsp-5b2`Q_>>M*yH}TB;c<>h&#!uG?ijhgv zbrV&?lcsO3tsV>9-*=dt;c)qk%|fF=$@o2m;#!=59jy^|$NCu(NvjMWOS23nm16JP zEn=JtcEE=s;dmC80M092D9)xa<1uC|#!SJONf?uZF@mKd^i4_sKJ?3>Ukm+e=$mpP z?5PXWrINN{FUu0LV^La(h2mU{i`hlch1^v6wuZR8v=m%16i;-J<4HT; z8M0n=^i=3ICEarL-lwQc_bz&?&}+%@xbqwZHF9+Hf4zoZM+rd%kzZ%1~Uk=?!wAIa{Fx|V|(f^{Hic0#iin*Gpx0?lD)a!GRm znzPUpL1TsH1~i4F`5c;a&|HJ&3N*JegyT)QMAXiT^D>aQD^xFM$oA1$W88jdvvJnk zEXH)MSNxWjJy-9A_}KUm-{yPt8N`}WRIp{NlTi%5ggg`$4M4FT7Jxb5JE z5VsRtOK={Rv*3mjcMe=FaAHZ@3^z-^4528GdhO90fL=E1S>6DbPu!c}ni97G+(6zcED0tgO& zK%^ZMzXu}q#xqqvRAdLuy(c7nAQ=G3BC;6<$w)}Xl7!_q9FozHyaLHyvYCXcPN5#t zThN<^-ZIk7MQzmv z$uZRD6zWqGb2&zKM<6``=^03CXLd!M$QtvllbJ_7_(!j$P|2 zSj3JF9qV;Qq>VI#9^keSHwoM^;%<|c*$p5qX1^U8QODxW0zB*`cL1tcjT zNdfsEK>i1kd?3jSi7zbUng3D5k0KAXaIa(cZ{YD-xHAaH^B5Lqkxh5PEb`o)Fbj78 zp}0F?7UxDh!#c$GAk5|@kmM6)lVv_b+*_FceDa@7midJB$a62kdNisRVLck#i^kSt zPZP&`F)Xf6mc0pcXubu6IW(67!W{BY!0>nu#Vw$?IpnhejqStmcti5smpnJ3`SxRY zyb;O!mC4y0`%$({$Yuc9G$ES-WYdJS0|=W^^nrv;IaiDwNZ5kL4xzCvXzUOg+k!?7 zA#6#x4W-;#lFd-UR)oXIz7<&xBg~~Jg@n26$q-K*409=BA;aUjRDnXOKpXNug0LOo zs4||d*!Gm+Xu|eXtI>q*Db{GhT7-FoIfMm-O$i4Q<`NbHi?}R$mK2MgfMZW?Vuy@( z$asg$%^@ov%QrpY~$J4o-Hu!L$>v(9=!;g(DgI`c!)A_!4u6_lnLWlREEWb zR>I#1YjK{C=MmQ8yoqO+O_F@VY?9{_=8&X-Foz@sgiT2@kgzF91`@U)$q>R8BpE`O zOOisuT#^(r#4}Vpa42Cb!eIOM4nC(uRiL!K=;H)7`VaOekeL{E%SkJY!E5jOuZGf{~F98p`egZr~c$i5#)Wu(j z6}Y`_7lDV%@N>80Zdvq6fl2GS*Kmg~!n*El+y&WX_v7wG?pbKhfMcH|t^?z|R9*t- z6sivuIA=l`p(~*~p*NwKFrBalVJ2a1hWJDghbttHVR3D&YN5Cr!=l>wydV^HBdpD7 ziPwsb;Cf)6$9d7$;Mk{Mw&v>4xE?gF4vpwRSck^-V0gR^?d&}m7S-XDz#c#!>`3$} zRpbjUAJ-A*kns)~V&Kya873YQ zyhFz3+>EjtMA(dS8ARBEa0swHX39R>ibWm3vCp<_&MhdjAq?@Zfz4qkvqd!a=D}Ii z0UUet;4JD&GBjwbBQuy|NmME1-yik_TaZh>QOz<7s@bI3S{Oy-c4 zj}$k}nL{RX$V3hq>da^1O;V^R zmdWrYiE%_d=bljvi`r2hqbQGdl*cH-c69DX0ln~9n%??|yoqC&0s(9#RI5yTU;~g?yR3BP;lOoC?j@j8|yhFx|nn6o%RYc8+V|I2K?~s+-)pp2g zi`roXy=`%Dyo0NabF&4$*c&2-dkGH#xyY>h?EPc52yYX?`GFH6L0lb@bLfj4eU?wQ zNbKnQVl8EhoE&{W>PsAbUr}x7ogIBY>Pr!cy+zCx;SC}<7SpB7#`bnGTjc8K+hV#o z`nH(vj=n9Xhof(c>FMa(Vn!hndjps)!rMP^EM`oZjqS}~wkX!ox5cdH=-Xn(Ir_Gk z@s7SNW`d({i?5$t62ygtrv6$7%Y;11>vqf@8-xgEh=-Xl{9erC&m7{Nqsdn^j zF}orXduNs{!uv9CEM~Vd8{50IY*BYd-xjlnqi>6u@95iN_H^`ZF?%`swwS#UiM=Pw z7U3NkI2N-{nT_rJS+=OJqi>7Z&(XKV?C1v8(&t7{Y*;TfKqU=Bv`{zk!u>e$Z8SgvWUEv zkf##zR6?E(k9B_=oix1X1I(-tzn4s2wQ$;v=KO)a4z9l!V84w2t_f>MoQ>P7)uyOm`<2Q zm`zxpFoR>G@`!Itm`;)!ggr=-PdJ2dDB*Cz5rm@&#}G~=e2vgT_&VXcgsTbH5PnEF zmT(;5EW+7@n+QK9+)TKIp|qi9kw%aeU*psj~ z;lMIn4F5xjAEIG=A@PO8k0gF1@neV|LwsH0mlA$RxQ_56!i|I<6K)~gM!17;7vUbl zy@a0<9watu?MQ8ROc3bF%Iza$;V}xD5_&4{_VC zI$1x07uc0?6082I#HzWbI_Lz|fe+?cp+6=*k@}i+OGVYI7R&Hf#^~ zKcK7NYh|F&?ouV{Y3azlkvx__)#xIiS{=ICqOM$J5BO_He8vfY8ua5an$6V^5 zHi4+Q1^OiT^u&Dg+1f#zCh)x)y037~B=CI;zDHqBXHbiqpp(e|7RF`beD6g3aEv{R zoEO6OF|LpIplh>)=a%8TEP$^Qh1WgSq%vupeNZ8ByP-pdV1PC!oYdh_w`cAlrsG>v7J$Ziblf zdkneV1~tdLmm;R5HP7`1)kN&opdKB0ZUAU5&TDVf_f60PP*a@KTcBCUZ!+pS0`mz$ z-C_p99(9us0gu|v8i}=qJaa(xLCrv|L7hM|KyQL-qJ|Sd`%p*OD_8@dA()R7*4uXz zaGr4ew86UZLVf0gc+}SfI*&akCxn1Snz<&;19xh#r;s2s9YgkrX0hQ221{ zx}YyW4?zuJR}*v+)!T^5jKLZ1fUBSes?q|h>IcxM*HI}{!vflivwi~f9cUUVV?;%U z;A}6Nh5yfMHm*-7wz;Iz$22)gU^sgn?V*J-~~&hJ_1?M5FQB)cIKyw5T# zpR-Qeodo~0kO}^u5uPJFPk4dwBH`zRR>I4KR|vl#EF!!{c!TgJ;g^KB35yB8CcHz) zauNK$Vd#6&X@`>#_?gpQ;1`6qfnux9+UBaxnvdriowY4-I%^lS1Ms9$w}NZ{WK2G= zl6+v9d{8C%pfdU3O7g*F@*$Pv=p)(=Xl=RdBu;1Dh1Q;nP0RW^>+TA2yDgJ5dwY&{ zdzK@!x6AGJ1Fzyq0oR8SRI7==)wISy(iK<9E|7pdVl-$G=nUu`oxs(ynz0^Q;ri(T z8qu6d!Z2osj^}zoGJ_-yAz6Ul`=FEXy%y3Q*6Wb|42cL=-F4Wfp{)xV0vZAG*W;SU zJ|qF%=#1l0%i2E7aV1>}XR zyCtYQXcFi{(8r+tpnx{GZ-AP&#(oR>9=pJ4^dExU!B0nt!w1UqlbF7Surq7>cqHWa05EwZV4I-x(T`kQX6q6 zK%3YZ=NVK0>Sh$U`qtO3!V=mIkbDf<+F9UwTW2F~3Ap-JEjS*#XAGhr!H7oI?U+?t z>lKXa23cRwml(eo?J3YD&?C?U?2cv=c0-fE?Cp!Y!EfL8Zm7VlyHU-ZGwnhuYB1+IzpKN!;r zcZDX93_-gLw2L$|P~Uo1*Q?CxF~&9NCvZ)zyJ5c<&n%}vy+8rD&(;7HfJTBQ_h(iS zI4e5z)VR+K0HtK0?gNjtr_ zIu1Uw2eUhAclcOB+=m#MI0WZsh`=?sN+BPQ-aP8*;k$*^5BZKBifab6wg%6wM7ter zy9A8a4ta8L3MF2n-AMuYdA(W8Hp7@y?9hP*SeVS5c2IzJ z1-9MC;p)LEI)YYs1$6}Jve@;LNLNukcyCAj%iu=8$}}}FZr-a{W9YTBUcubjSz|G) zuHzZE8r&dojjUZ@cM-lfVWo;D;GSLw^+UU*J+u1{s@2ZA1=z?shj0n(#S^hJ;+dc? zY-fR=4q6KO7`yI9)`IqciYE$OduuvIgujMafYyOxL3==7gIa=)gHD2qUK6+uIDZWL zqG}zi7f}^?Hg@Al*vUWvlLfA$^*H?X0N25)0M`lnylmFni2OQQ&tXho$eLNO!a)U~ zHJ~XVkJqt}p-n{F7i0llg6&PTJf4~#f$xg8`xJreWbK7oUPA90NM9F~M7s<3tj^Z; z$g#7vCu;Z!8{*@)1?8iH93Xn^N9&<)7*X+PW4fN}GYP0~Wf zZH0$h3vuN)gk%w(G(r18C7{y{1+J%6gQzu^;C=u)-3ZT$pk7Pau74U{erv=yCq(RR z^}rhKZJmX?!0LAD6(29nvn z`ST`Z>^nn~(X#Isy@8f}ljtq9c+Vm_#MLflzbR2%JfWB)2 literal 0 HcmV?d00001 diff --git a/go/mysql/icuregex/internal/icudata/nfkc.nrm b/go/mysql/icuregex/internal/icudata/nfkc.nrm new file mode 100644 index 0000000000000000000000000000000000000000..deffa3daa81fa8df31d1bfcdfb254e65c879c78b GIT binary patch literal 55112 zcmeEv2Y8f4*Y>l!$!0f=klq{VyJ_3f+t&2nH$8+T(o5*5AjB6D0R;h-rhuY=fFPhC zB2AhUK|nx61wlbUK|vw>_sp}qNhpHo`+e7cT_1Cud(ND5=A1M0%+qFfgD>-Cup>kM zahcp9{Uv7LdeAlt`KZkLGfc=Apkw#aLg)#tv1FK*P=@huMdKF3l-omI&oE<~4Kg0& z_$O^0Qp_;#SuBV+$cVU4au_C+8N+j|72FrOhq?B=0^SVXA)cLC zj@k2OBCd$L(~NJfFdtwp;;uA5ZSKhzaijSI`SbWY_2b>iNltPF1%6JSx=(c4=5f7a&1{%vTcp+eB0Buu06_oJl|tOj}ZbQztkhhu8-XU zyEAq^J!^X|=y|56kA1EE0{b)eJ`Obw^Bq2OuyxFJe8F+6V~1ms(>SO1oX$EKV_7?A zJ3sGiY(1?0*m*x}&0IR|{J`swVJ+$fRDQod^!gPj;(B=J zc#rWOBY44^wB~z%h1$tS#Qom8#HZS)$)~?hg-^XttIz+n{>_tCBZ7@q z6Daqhscn>#I^Xb2&{cLX|N4FVvUX$W*-iLmc-mph@?drxo9^mYNuMxeZt_UYwwfyk#*H~=YI(KBSaAD6e{BS zg+_*^pk~W3`uuBbJ$gOC`9S-^PSl~-8FxPT7`LpiNX@XdWQLhMTc30xr7CW#e~^}d4+|C z@xmO!dWB_$>BH*7s>23{jS71=4$lp*2_F#t zWcUl=^TSt$e-!>%_*daSg?EJ8MtDa=L?lP#M$|+Mh}lM1Rc z8)!DkY?0YVW?!29Zsuq%HP1JH!u%lfsO2fxzVtWj@AdoMJ-t6NKWYA_xd-29o4_yN zH=;G1N?ztKQF!Prf0Y}YA z=Tvcqah~QZ=6uKDFqb&i+z_ssJDod+yNbJkdy3mN=WARUFO^r!>&IKgTgyAhyTS`G zi!xK2RhhM5cVM3xhdE+)&CJd`%6zQ(MDvB_ZvqI;9n-{tybOq{4`#SrzmVNB^SRS#wVE&b5Kl>r}7c58GKWRVF{yF;& zOAa&L{$=~u>;+cdR#8?Q=8F02_HWs*w2!x1YfmD7VE?gwzSU0qK30RQu9=Uv+Gl^* zexlWJ`>*W3v;V>V71G20H~U-mi>*Ge+K1LHbFPEH!PemmD@O+oA4=8x9=~=8jg5c8(loZRiJ~E{%ESL9KUcpMeRmfPq2R8`eo~H94|QjxhdPgPe#&{0^K;JAamRcOci6X_S30kC{=oTT=bg^`oDVx6cmB%xJLeyquQ>na ze9M{ZB5<*FaddHa@pTDu5xWpy5|>z78^_{l6J!%*6K|91lIc?HQsbhw$+c;58R9b9 z<>_vAM*IKC+U%YgfAr|qcuH^#a}D#m5qhKGhG&>>*rWGlEd#>B!lJ?yVToZ%+BYLC zJM3_1ept!h_Qd+zciU9%m^fB;XCILm>Gr`p8w};(omIDu{DwO-Q8wIW%goy|J2gbc zYS53{VJNqZJ>ePU$>xd8`))tiJiYmA#A0=c2#OI|tNWpG zek;1nPnCghG|480wuC+FGB|9i%S$eEvBDOBmbkp*@}7&paI^zZ%RuQ}j(%_O!wl__ zhI%^xLzk^C~ueEG10=ob*3THIfk0Uyk_8*8|qc4*HV51#gBp8Ee={7 zw>V|t=6Z?Ba2GMu*9^6pfXz^V+M4n{u7Suw1mZAuf@oS3KE^f4Rb^_iz5WUv1+Ib& z*Svdrbgq}b>iyr)mALBf$)+<-y=${;f7hYZVib1FJq5mkNv=~|XSmLGo#*-nl`nIB z*L9uihpt;)cew@#LInF=KX*OhddBsf>yNHiU4M7I?aFf#y7h1)UY*=LXx$lMT(?!g z%E{Tq79+Qbo^eL0@$Bq8M_Ic~cRocw#}4NX*ADj%&kpYn-wywdfQ}%PkdCkpaR=_u zpr{TBka*!R0pR{;OXnz9F0muIBlWHi~py-J0i0@#1 zD?8L3p3K2t7p*C`XTm0jO$(cK*AYu`OBHDDI;wYjR130TkuKy^ zl#pzUZ4UFOxse{}ru^=X`Y3I(3P=t{WUuW;G-`;}NNk>u;`^W57-xsANe1`&eE1RU z+Q;azbLNc_;+y4-HEDJ2`)E9SRz8~T!>!1?vq!_2+r#_(l||=hS-)L?$$l#GYfUEvxD7?XU8999_1cpxb$tFDOI1SWw+f%3*A8J=nXw;cPLEg|&PoY+=~a zu;pQE!Zw6`6t?}YGyK8c{%wsdvSaSbzT9vHY;rf+cS6p#6%?d%(N1?Rw<@UaWs=ZQnt1F=okn zn=Y6`kLHJNTitg34;@W=)6wdl3!Bkvg8i7a#WqLXPP^St|4(P(e*JAs=ig@?pV3+B zZuLb6Te?gCdo2f3K9@Q!-6x-G9c+1625urBlc$H-{+0gf&HHP~lg0ft50jF9gjRLy zA9$Z^MJ8FaiHk9DNha>@h^o6Z+&gA><}?fLENo5}6^p7wEg~~WNFhDV?!47Zo|(uQ zLh8G{vF*Wk`6SX2CfPU>_l${~Y~t?rIIWxJap&=FuYafgmf%~QX-;V$2{@C}CN4LMfCt;^qHm0kZci-9d)eRX@i6QA&++|w zu`8z&iOs*A&Ao9u<8{7~8{eio-@c7+-o(0-zt`KpEmG2_J@V=;ZWZ?z4;HtHM~TOY=iV4Eo+LhWW3qUvc!qeEc&>P! zc#(LCc$s*Gc(r()c%yi;c&m7a_*3zI@geaM@d+_|#MonLMD`kRk?+e&rdw}d4A#fwdW6>zj)s8?C>=6vh=d^a`y7{^7jhyitv(o z#e1cCrF#{6ReAOC8sIg|YmC=;uXeBJy~3an<}_bHm2pJH$K6JKkIEo$Foh-QeBN zd${-0-p_f@^q%kimiHR(P2M}b4|;#;{hjwu-h5j-+uyuzd2@XPK6XCtJ^?-vKCwPZ zpKPB}pE{o>e1`ap^m)=}qK~_6fbDZW(|zXnyzaBy=Y5}#efIbq@%hTgZ;+%P4b)O_nh5pc60sS@LS=x-fyelUcZ%go9vGIo$OX7_rt*ZN+&dL8Ta zeXn17-Rfl(;1M7SNC?OZ&eSz_fq`0q+KE2{;h&Wx#g%aVcy{3Yz&D{++piAX7#P*C zB`~&OdtirH5N;p7H}Fv4i9jXI^=#n9z^j432i^|MM%yzyC_E~h7bFbo8C2Tf7UUBY z7!)2P4@wA13#x0-1;vMJ!}G$)81sTkgKC2~Os;)v(BPn!h7m!78pZ|{hgU+|6y86) zHT3R6dKpXCxAsBQpHb7b9uYn!eEdCH}7G-09{Zmmg z&Hac-6LB1O;^TB*Asye?EQP!ZRA;C+iAFZOCUQpW7VhS~QE$f_wMO)h7#z_SLFRj8 z#Q2Cf@uY~!5mTXM%R+iDPZv9O+smI3F*jmS#5)o1MSK{sEn;uP=Mg6(zK!@X;@5~j zBY2Tkk!wYEkNgc38RihRE?gM`>=PF(XnR$FAim(ARYLlGQweUoUjEc@RJC5v*nfy&AMA z=sS470J_vA%7eT$=TW zXAd(W%kMxRfpBk-HQI-Q?1aY*J{%4D&K~#UP6xRPzeNvEq3>Px7lVEdx*qgr5GOc5 z7$UR?wh0ysql6B@Zox8PtT0iSO6`4u1B2DVbfG9XIyhUHFN_IJ3ho}KSXh3S=k9u} z>@NB`VQ;irgsR|-;Jn}x>Z_k{kZ_oAWEZQuJv>@lA6$>$mh|tI-_YPu!A}KG>Lziek6QNKq0 z8O4jXigt+hi0&0Fik3zvM605+qKl&S(T&l4qlZS1iGC(}O7u(7uSUNS{Z90I(H}-{ zi{2akdGyKXZ=-*V{x$l~Xr9DM;vkucw+nJbAlIY2-!Asy&4OIh+$H4gA|UcRkv7s> zQWY^I^qPoVC2om&ykkU(J;U}a~j?)gtg;}{n3k5}nm=P>yJpEwwM{qsEt z9lL@D#wn3hNUA0Ek|s&3q`zdaq)jqPGEOpHGD$L7GF37|GD|X7GEcHdvP7~>vO=<2 zvQDy5vRSfKvO}_4vR`sYa#V6ca$0g$a!ztla#?a!a$Ry$a$CZYnoEUJ8);9elhjS> zCH0dAN<*dL(rBq%8YfMXrb#u@3~7$EKw2WLkXB3UrA^XSX@BWpX`6JEbewd&bdq$k zbgFcQbe43kbe?pPbcuACbcJ-abe(jgbhC7;bcb}ebieeF^r-ZN^tAM>^qlmf^s@A- z^t$w>^tO~EGnWZvHnN^FCz+efOXepFl!eN|WzjOZEKZgrOOt728L}K%fviMUA*+_v z%bH}Zvi`EcvNqW$**MvF*(BLy*;LsK*(}*y**w`I*%H|@*$UZe**e)q*=E^R*$&xm z*?!p}*-_aE*=gBX**V!o*=5;P*>%}X*=-p|ZY~$fZR9=WPI5Q7m)uVtC=Zo~%cJFT zd7L~+o+j7GGvqn)0(ptNLS8Memp92<<^AP@%k^@_F(_ z@+IrII3;+Z za7OTdgtLR67tRZwE_@?+rf^yC9O1jcuL;)$FBEgSh zF~rfbHN@3&AbfjTwuSgwjtL2{9B+!mVL~jQ4VfAeW%*Lb+>lty1tCj9QZ3&JSsjvY zxjtlb7rO^#o^M%fS#DWn$zirp>)jy-LXL)d_=yc z`TgeO@>BA!Kw+)0Q#dMI6`qR2 z&0jQs+5A=WH_hjpe`x-x`IqM3n*V6-Xy&%?TP#~_TkKn$Tijc`Tl`!66oHCRMYtkb zAy>pHk`!qQjUq#lqbN|6C@K`yih4zpqE*pfF<8;27^N7e7_XS5n5>wpn4y@Zn5&ql zSfp5@Sf*H^Sgly6*r?d7*s9o}*sWkkUV>}ke#IfhQN;0rX;31=Bb9s4KFp!Yk0F^b;E}ZI~oo& zoM`yA;c~<84V*@+MyJNWM!fOI^p9za85c7tW@^l=n0YZvVphbgi`g8rBW8ch(U{XQ z=VC6$T#vaOV;*Z0>lEu18yFiNE00Zz)x_q+mc&-aHpTXjZHpZjJ1KT@Y_)W1?A>`! zkDVL4Aa+UYJF%-{*T-&--4?q$_CV~>*psnmW518R9J^0`M1CB{lpMty^!RliJxOD1 zV{)UqF|#qhv9z(Ov7xc0aX{m+#?g&WH?}vv*!UXqH_q*^vA@Uu89QEbCw8@D6t3<| zaNWNwJ0uw_<;7XVS;h5;bBJ?^^N90_>lGIqcTpmWi;R=T#l$7VrNpV?baCvl$%@O1 zD~cnen;th4H2FmGL$4 z4e@>ApNJn2KO}y5{OI^6<0r&F8$Ts}TKr4#v*TZhpCA8v{L=WhwT>^r>RGiPg7WvyeYlui-d}%vk4ayTAGG5T}ybfX?oNA zrnj3mG~G_v)5J@(N_0r{N(@L8C(0Ak60;J&ZThWA*ry=TvyVQpF|luATViA%Ri815 z`aVx5PEMSbI4kkh#6^j3Cay?)FL7hymc$*2`xB2Qo=!ZMcscQU;_XE9B%36sB(J2v zr0^tpQeu)iDKjZQsXVDBsdv)Qq-TT;%t@J_vN+}K zlvOG3r)*03B<0hT&r*)0e3^1K<;RpODZi!MO0h__O?6ClPxVa=N)@L{Qe#t-Q`M=N zsfDTKsWqv6QU|2Ar9PS3p89<1i>Y%`=cg`BeK&Po>W8UYQ+K8APyIagMCzH;bE%h8 zuczKlHBYlib4v3{3rq`7lcy!6Y0`4iO46#+n$r5GwWW?7+v>6AC}MK>zyNdM`#SYXG_wSbvx?+{H>)kugCRu zztwTKtTW2L$+3H%kLK}lNB3ynA8tj?CNeARC$8OBC)?^g-yZJmpIJQqT&zgjePean zjQhMxuj{rhAC})5%L@C)DgVFpd)W9Nr1x(|RJ?&=N?KFtoW zopPgcvvR9)hjO=azw(gssPcsJwDPR-obsabvhu3(y7H#-wvwYVR|!=%s+(y&RZc25 zm6ys-6{reTg{z`fa#frvNtLG3s4`SJssdGsszOz*Ix4GIHK|%v{Z)fiZK_eKajNmE zNvg@Jsj3;OS*p3Ji;{V&MXDvLWvUgb)v9%>jjDN)&8n@c9je``{i;K%qpB0C)2g$o zbE=D~%c`rY>#Cco+bWLQTrE`FsC%j_R8DF)wU^pY9jFdfhpVI2ZYsGtPMxGqQ)|>2 z>Kt`}xEcdPfS52=r;PpD6;&#KR<{Ztp#m(^F**VQ-Gx78etxkjk5 z(e%_fY1}jyWnLOTO`s-J6RwHY$Te}ABu$z|qsh?ZXaZ#gni5Tgrdm_4Y0|W6`fCPj z+BBmy<22(nlQfexQ#CVWGc@yLvov!x^E6~#Ednl~+%nAy&1%g$%|^{;%~s70&2CMa zjKl1Q?hw@<)tsO;hdHe|t2w8+sJX1Us=2PYskyD;Xw9`kt&O&))=BH8_0sxj&830b zP;Iz2S}WJaX_K^RT8%bCo1-nzmS`)q)mp2#dTo=oRoh?d5jR-drX8gnryZ}osG6jm ztevWzp`E3ji!x8UNV`P4OuIt6TDwlWQF~OgS-VxcL%UnMU;DY1k72Bx<*~IZiS|=ZlG?cZiH@(Znfqq-7~spbyIZH zbT8?~YiH|T)m>CA(9O`kp?g#Jj_zIEd%7js^}0iv4|U75JylzDE415m>$JOcdvynN zpX-k4HfwiiPU^O5&**k&ztt^~f3Lf&`&D;ScU#9vH&3@p@0sqB?vehu=llKod8Y@Y zhowiQ$EIu3vmfZWD7_-RHoYmmZx@R}>9UUD>EqH{@wwyp^hxQH)2F7-NS~E{F?>$? z{Pd;itI{{5f0VvGeNXzq^e@s+rGJxt0lj}pzn1<-I+MZAz%QUd_8G1~?~DLoSVmNa zA|o+FnUS86n^BxmnNgq7jMCRo24=8jXa-wGWUysS23wxWV9PTZYoeH$VFp{aWUysh#(Q^Qp{9nNu=nWX{f!qx@SqrijXT6iPI%|E_=B#a5pJpA%I+}Gd>+7uVvo2@-nsqblPL^4=Wwu?m zbGB!8K(;trmYtZb&i=K}ZH&XBnT#YSyC~Z&vm%>}q&B-LJ1MhoHW|s_>=D@|nd7p_ zXh>FUM#fy&EGA^n&6=D&Eqim;tZWk9Is4V@McFsA-pnR($!zs(KGVme**IG(vW;`~ z9(;GsQP&yTnB8@b?wuhrJDu}loRclt#u+ir#*S<<7kjdeGjS-JWa5^&0wW~Rk7d7+ zc{=;s?2FlCOl_FI9mxDp_D|WrWp`v-=V?p^2I zCGX~}N7<6IE9XGYv79qG-=qAL^Lx&n9E;o@xh^O^xxpxrC^5MyDB9dSlrogs+-8)4 zC?j&8%zZX@8p`b41-VOe-_2c*vL$y{?t$Dda?j*`kMc9h?Z<){A-cfkIkMnu{y9LK^ly?-4 zDaZ?doX_^3D%f54Ou_!bLxo2Rbp>Sgs0vOL{uF<@@ND6^!i$BM3$GSlFT7cJyO2}# z-`e9fFA8aiXt61BEb=T;v;-6-w`g0$MY5vAqTH6!mYSA6EdyGHw`hv8i;9b?ikgc0 z6%8vITQsrg`J$OcuN5sWT3)oah#Y|@TiRP*XnDD1e#_Dp(q7r}e#=KKJ6k?$`GU5; zYB}HXQ_F8H9WDG;qPJ~zZuO=d-p7hI7i}%tS+u|CP|;C%J5hA1=xot^ z*<<`5yk2y(=vEP@*t{5jS*O^hxM#6bv0JfMv0rguacFUPadfe~IIcLUI3XgfSW}!) zoKsv-TvA+7TwB~&++5thcyRIX;xWZf759=nQ~Yf4)Z!V%vx?^y&nsS3yrg(p@d|id zUA(S%WAWzVt;IWvcNgz3{v5Ig6|(bk?;Z>J7G3AJ;5&Clj8~nzy^-q0UFlwLH$_m@#1fae=D{v2`Wi0DJ@xbXBGWMsIkv>v`M);X6GH0 zXgcSd$nUo8YH>duhv{81pkzYHDiVyfk7{-Thj&}b>7>Bw4H^N9h?C)C0H_wRw z&fm4%KMwg;@ilz!)@Vn5{ld;x_k38p&iUz{k8xg4l$@qV;OmleC6gj9mRu^iT5_!f ze~GN*b_u7{yi{0fQ)*W_Il{5jt8`SDUui&TXlZz9bZK3Lyfn6y{nbojX7`pIwiEAw4}7WbbLg%xVp5yw5hbEl>Dxyf9asow$ibslS-$S&MJMabV=!7#k`;W zveFfF|M7nJ|G$!Tu2#ZzUA;yPnLi_MwCFBjXBgl3C|y^&v2;`E*3#{zpO)?`Jy?3A z^hD|D(yvR;m0m2pTza+iTItQw+ohZ`voc|sb(vk6Q<+S&&y7heOvZp*{@}PmhsB1${or*%6pZI%BAHA z<*M?m@}hEmd1HCsa%X%Sbx8That?F9@1;Ij{&vd5^5@E@m(PL4y=8v+;_|l*npO9; z=WXz08!K*Wwp47Z*j2H&;!wrWito!$RGg{!w&MGWA1h93f3BD%xn6Oz z;&uh6GP1(FQds%3mh2E?ceb);WvI%jQmA#S^s4l$45THrGO|)y8B>{1nO3Q(%&5$% zET}B0tf;K6tgmdUY_05HIT+1uRZgy)S~*xVqjFZ|+{$^Ciz=5? zE~{Koxw>+F<%gA9D)*LetK3z&xAIWsvC5N`XDYv~{J!$X%AYHLt^6J3&q|KoT<@k5 z>TUE6dKbNi{U4c3eFk$Sm4PM@Mz>2>-neV)EZU#8dVYxRx#W_@4%V11i@ zlzyB(Lpfd_E`3)2tbVF~2Ffh`T>S$568$p$3jJ#RI{ildX8l(E4*hO@pmx9hbNw;> zNxevSQh!DtuKia3z5Yl2&-!TX>9k+=XL?^La>T35BP>ZWdU)z+#VRlBS9SAAY}tmEw4_fPN`N^>#ECiS=D*f zCDj$x)zyvFt=0Xj^}4~;ZPlZy$5oH7o>V=#dRp~M)pM)oRliZ)sC%>eo$A%q>#8?a zZ>iouNUEY_8c_v#Vxr&4HTFYmU{NtT|J2uI9&@ zpKGqy+^o4%!>gU4wWzhJb*Ocz^{Vx&4Xzc{M%GGeV`>v>Q))G}S+xbVQ}t!F`r6vs z#@goEzO@5uht`g$9aH;M?K8Els>!ueYiHEXs-0UquNI$O)Gn!AR=c8hb?y4v&9&QV zch&B#Jy833?TOmcwcpl$U;AV2)!N@{|EwLSyi?1ov#7JG>rv-W_m0k`&ZEw!u2)@f zov1FdPFgoh7E_m0msY2#%c#q#E2t~0tFEiBYpUC#+ooGBYpoktH?(d<-I%(k>L%4q zshd{!Qr+yjd3C#Vi|XF2TT!>VZe88Rx-E6v>UP!btvgWndEK$PlXYk7&edJ4yIl8c z-S2gO*4?S&)eGx;)H~F>)O*zX)d$v}(VfwW>Lcsr^>Ot{^=b8*`i%OV`hxmzb!GMX z`r7)&`sVt+^@Ho%>c`ZNuOBRbwtl>PO8vC@m+EKN&ytf*9!a@_e}mdjsRy-x4EjpV z@So;z`4c#1{7D=>znvrCGhA!FIoFn-V`j&n%(3UIlN|X|IIjFxc;0-*%%5*&rsbz4 zW%8NC{B{BC%o4SHL1II@bt0E<%{AlOLFUMH<-2lo+oy5BC%W=Ixmvz!VlLk|v63GE zThBy>KOJ#jMBG`3J2FYY4@BG$^c8dM_%9>wuq0Q$jO)!8Af`294o=GD7ntSqqqvp) zsKmNza1hGeBtolM{d2IN3H#}=S&ZD4BDZD8?HwGGxybD`RWLeO4jHD$JuV>;f>3b%?bb`EG)K9p-2QY(9p~R^+iGF`FM{ zmfOAwvLldv3fXbUWM;Yi?a1$IC5s?-1thDDpdwdE1EG_F=u8;yUxc zMt)0?UkKugkzXzH`!p$?|6O7}|2z1)gkyLikpS zw%slX>nF*s-8G5fe+T^sh_Ud{D#zm}e!{cryAc5&JW&aV>Jm zL|cnBo{Jok;Ug0{6ed>jOOb;ZV?T}@zC;eEkV7GSlp=>p#6N@hFCzXzjM*OXozeGY ztnnIdU3+Ds8UG2cYx~z&OAYXQ7{{&;{0`tYv_FA-h9Uk~jCUYpBa!Q9#2OCylUU~y za6H>F_FvHVKR8z3V1Ay1kLj>~0rt=egyq` z%+m+3`2;rGkjE~pX$jW(N05Dixr@L&?1B7C$Q4-YJCWlz$nh&O4&-KVYr@fw}2`PcGIf zA8Y*w#J9y7wuGN3tYLesU1zL!cO19#u=xXVe}d^RSUY{N-o0Vlfw(`xUIP0cU~>&) z^GClM$l(^o5QH@!2Kf!1pnW@!+kOl4^auEz;5&G(?WaIr@fiNF6kM+@1pFZ>*8D*! zLHwS{A$*7A=h{c6;MxqGokb8oIyt0Wn#}NLqHT|M0c?t3GY2+nA#+6kHR!)8#g1PL z{b1M+h5Z%iT+yC^_Di7Sus;HsC+rWy{vhm2EkfGggY7lQysqbzkSf( zgnk}mTOr$qc$X3He2QJWtpIU^cI}IW+;+YI*EF=3qs>J<0rdvZCLzOj7YO*>~Hh@~}$2z?1u=1rj4rB4ZH7v*f z*|75EInFC*hq#P&nd!1}_8gb7vtM>uK4-e?+jF0Go9H&n?QORg+?LOs?l#taru)iQ z=D3f2jI^SAeQOuYl%*UIPj6oMR7i1*LE-&=;WNpf5F&vmy18vvcYsXI}#?1T6-= z1zHYT1zHQ*0NMol7_=R<2lN@}aGj+7>pI!lZ&05{eF60))IXvA1@$%5H&Fip+eE~< z04fJnf$BiLK`o$uph2Kvppl@lpr=6-LC=9+0KHf*sb5$xJNr87rKsORy&UyQ)N4?$ zMZE#_2eADKbOrPa=o;u&{p7V1B4mC9V=0ybKgp=_Cs^Y@SvKLo%R%6e&Iy_#5vc&z zwM{(2S>oz;jmO=iVuHchC34SeXrHIiXDs^s2Yn`@PX_vM+pD0@qx!|rFNA(M^lw3* z$MA5yoRcWvmz&wOzeepylwvdP`4aSH`0qo%k|*H5jqB(d#IC1)a^WWveoEn|5Ps@u zZXZ!S$?Z4f)<|=+htDS3X9D^>i9XMvPdoaw(%fpGA4v5}p?@9vmC(Np{Xm-AE6A-9 zxy`3`B)3xJ)<$#NfZSFgx8=xfEu9xWGOmN4D)?!EpWZY#B5i=Q2GS=W?E|TXF@rQD zk=rh&8Io2QS`ldFQQbnc7SpIi_c~fj(JEp%Jlpn9!40L{9xAhjY#)^o?lW*DB>VO& z;Myqn3zeDi?AotU8R33QR&8k#n$`2w25(0m2Wm(YBZ#NCs}gkbKDW6+z@bul-mGq2<1KiR|L+s{UErZlsgPA4V)RjJjuGfUlR9JJ#AH@H2^IQX|+EO zu8DFlfXk!YDsTfSw+5?XHO~PfFvtB)i2Ixk?#~QzUd?la)B;i~NP9raO<{agtTkrA zEQLc`utJM((vcYwm~^CNY0@dtvNq{R%NCJl(D+V>}lCLM|BY0{CFHzKX0@dJ{%?IFpeMdAjb6^7OXdrgf`7EM zCLNilbd!#>GEF)%PuV6NY2_l)G8+FVBAq~_(}|1(X%+E zmA2UUr6wJTSZ>mhR;5WtB37Anq*a4RtLX@OgIiCziQslq?mMa_b_1xEa4%3T;eJZt z(YrK5ub~`@xZ2~MLL?g87YO3ci~BE;WKc;4?sDAox#VtuD=LxaQF#gFOAI`*si1s4 zmDd}1(yNI|ny93S`fsI@Rw`+w{s&P11F3ucwR+q8Y!x%=SGSu+yS`f8!4(7YswQ;Q@%Gv4a0||iK2#DHW9?Vh4^ox{x#IH ziK32rZl=4$w$iv6 z)MqB``vk!~S=4i1>N%T^w;#bh*;L-oASYw&N3+ePHUp?lF0~mzZE~r00L45SeIUg= z#tMB0QY@r>htR%-wC@nww~+Q4La~VEHk9U8L~VvrET%Y&+80yHVH8Vfls1YbnUbXY^9h- zaUjJKifzDCjEX)>n(=OdBTsH-CK<;hN;pqZNAqc>m_@NK@CS|xXJH8X{Roac8IiMLJ%=3a7r6wVptd?X zAI%hV>G?DOxPxY5i6@$KG!w#+s07bbJWlZj#Wbb|rC3aH7(tv}WZW&l zFEFY+I={bx8%VjMRK~+RU8gd_MPSa!Gol$U5*&F(B(qpS=Y15#8FXf5QJhV2F2w~D zmrz^=+{07Rr${n`bMfp*?pg%bQHzbhy|5rpmBeBT#T^u#%*Ykh$&6S#m=P_X^6r#( zrP^L**7py*6dLD7X`3`JKnGH0;yhhi_U% zAl@GWaX!=&Jg>wW=ALgLcuI-41MaB?ib^J$^3l9qSpDSfo5TAa9C_CzYfer3^``yQ zv`24>YTBl$r?#_6FMH`a|DIc@E&3aMmy;8OJ0;47|&bK1~!gOcdp# zcp*6R$h#Y#7YdHNyOFi7rTO%ssH10MGsSc&X`y+h(+pb(;@l@IxP{=Jbed-i%`=^e zK%5qe88l`q@Bl_mb{IIkpTUv$c`^rC^lI0a;HfO?yD!BoItP7$3UAeU@}_6TivdU8 z^sx5v?nmafAH{4sx_$)D=TLwBDdy0;`_sI0sMr1k@l-{Q*8nQdrSbt3bE&rh6mzN1 z0YKw5jAOcnaZECSNhaWB<4DlG0A3Ebf%KRdWgL@?j5(iXH;7_B&1De9LW)Cxl^7{` zlQrY%!I3vvGUh^>*${%*Ss-H=N^B90>@!&K^x()og9WdS%B;cFQyJkJ;EB8mTksme zkvC!Dp_uv^M(|WIWZYB3C>GOL!zdQhxfn*Vgb9F68^scOY}+W7P;DEq1rf=928Y)Q zj_hY}cu!E78Lq^Asf=*k^QH7TY&gMFr8Merg6GTVJ#z%bGCE5mD3;OnHiBXq&3gpR zyNvc7LHm|d`ACZ8)N&-n3Mv^zv64Q)j3#)hlJ*@<2$294TE@KgqoooXeBGl|Ue zK;RL?B>RyZ-ZgM!_m5+eF(w&fk_k<+&b~a8jK|xD=wvsC%i9l*?B;NJ3o%z@Z;iuS z1di;baZEDCBx6i6p-Cn*$#^Ci=9#0yj*$ni7m;Ddh;ZCfB_tNk9U#sSGQKw8yNF2k z=s3Jp;K-gF$0TD+GR7nmnq-}Qc_tZ;*dY>$2!Du1W8?XXJn@Voc&dWtF^c92I)McK|iKUZyr1Gr-hlV+NYqY|J22n~fQ4YO^sz5Q*&WX?WP#14m+p8En{H zKMhZ0YO^uLrZyWh+|*`cMwr@c%t%w4jai3CWN%Ev!@d|e60^Zz!|s`Bc#Wnu8?(2m z&Bkmpwb__`Ol>x1v#HI-Y(XTlAEx19FAN-s`GmoS-6zxV`kLBo%zmae8?(Qu&Bh#H zYO^s1n%ZnkvR_8-Evxb5G6+%0?ip4sY7K8WkF?3ke4lE_8YH{f#C`^O(gETuz{=C` zhQWgDw~@ZWGgew8s+aV+r-K2zFzU9oZcudG7{(hP=tA z85-ViRKw=IfZ8u3h~olV&UC+6;ENPzQaniU2*tw`dH%#kK+%d~FN%Q_6Dg`FYAB{t zOkzl{ddlZeOr(-zioL0%iQ*87Ln#iYID+D6ieo5Fp!f{Mc8bqZe4XN36qiwahvHa@ z<0#Ic_#(wM6yKw`mf|{sf~=^yQCvX|#XO1y6pJX9P%NWZK~Yb!nqn=*dWyX%_MzB9 zai9TTf&U?t9}-3QHp;hAekA2bQhp5O$538N`FRxIp}2zLyA)Sbe2?Nfit8zEr1&Al z%@ns#+)8mf#hnyCrFg)AB&)-eKTM+^rTkIKAE*3r%AchCNy;y!{Ld7xP`pa<7mBxm zW)^10=bkb<{=q52y=vl?upDD{Jo>cR@!->D;3OtacgE~^@);r{Tp2j?;~P(z6E6mC zm5JL-xeceykG~5qurqM{@GwKkN%LBSEvkZ{%{X_%gVMGYQOFc#5TCM{7Owb5Os5`+1Z31amGE%pb<*GU*g| zQOqQ0;Y9aPoGH3c^rYxV(VwD-VkE^y!cEwDd7a|B6eX64mRyTiip~^WD0)&9QS`G) zwBlO$6J&C1CAM50L1m;Fb!z;1~q{y zAk%}YK-j-!urI-2FM-KIykf}k&dv-2;qwt@Alk!Gufg$q540Av6SND2&sdnPsFTbP z8+Q0)gegIN1$7@#E%#Lsmh)dQj?&r)#x>A)q<62bmw&3ZM=ok~nF>y*> z2i1}%10%;Vah+Zm-X!?$T-|ldT1E-4WY5Ew!M)R$98F(_XQ;WSd>JWvMBto@K;cm& zEs3whmq$_J%Vp%ga(9^6cmDxfKnJULmvquL}3nOAUQ|@iUGxf;y@yh zy+4i*>JU&UXaIZ^;(EcuOhw{|u{HKn7<{6|tOD{NjilEiQj;}+?`cD$1s4o17dosf zSQlXx^04AGSdm=RWKH6;5(c~0)cT+~{!S6-Fz9o09%N}uIrJYx{}N~+sHZ=B&5MC2 za_x)5=tv!nk+BjfY6&EyCK|b+O=@y=B(=;?6TJd$xuGWQSc4s@iJa6iwB}NOl@r9LXtYH*VnIex4z3(Lk0Oz`qVOoR3Y^huRGNNiIf?_+(|oE6WkT zEJr-E9P!O^#5>Co|13u$u$(al$vKpcfSx(@Y?4xxQIu0uP>i7%OEC^e)=iY5jyBX1 zLrr`TJ@Gb6Fw~8Py0@WjGSqzxb+e&vG1RSw z`Uykb*HHH})cpQzlhI+W69$~0Q8tPGodNgYMt`US! z+L^(aeRlnkl^Bnbgt<>b;Za13b2=;*=kNZ$xQ;L2FibA4F(3sf1yli=1bQB{5cB~k z1%ElD3Dg%f0rWD6{I!y=LDxYHek)0SM|l(EgulDs4vGb(fvP~Qpb4P&L7#%Y1s%ih zYklzh#2@jyKrVh+X9;ow1%rly^7S0%Y0wQ&k1E_NL9w6#puwP%)%c5mpsSz`(2yG3 zS3pmJu7Ns0R<#_)0rdZRJJ0Ycs;vP}<|M};2n>P^OMok2fN)4o0s$|kHwY~u0qG$$ zLqZ5mdJzz%Xi!QJREm^K6Cxslf}nzcbabS56)v`Ti(amN?^-iC_dfplp6|~G&%4&z zb@tvfvuE#p_DniKKNtb~AT*EqLOWrlS-1!b%YuwD*Z^yn@OPBpB0T;Y=N=;ANq8ErFJ%n#I&A`P!A{r* zpTlwZ1^x}y-{87}FlYlA&;{ngQg{=RxW<#S%+N4HW{P13vn6Ae??q&}?*PvN!=kKU zf(L>j1Req}+y$Xf52}FtExD>t4eo*JPy=d$1+}0r)P_2+9}d7_I07f(OZW;NhVyU{ z9)U+84C+GzXar9{6KD#}AOf00B*Z`*q(Xc65q^SP=mywmn| zaX*&p=<5R<9X{C6Ror-O$2#tNvK>9P#L>&=IJ%b`{!SOiebp_;1ULS`92i9PUbx@s!rhH-$yGFj-XxrK#oUG84CT(EC$Kuw6X4gM-ueblj0(cqz@i?^ zdf@74t~}!GC%;-gYB>bcESg+8$N=M;Zu;F>mVG0!|*vAfunE&PJ<0+ z;R0NQOK=&kz%{r5-@vzU6K=s@;Wm5^KfsUh6Mf~i4s)`DjTOFI{4K8KJ`dI%Y!CIR z@O^|{&EM;)@C8$^JHGYcp>`F%9jfp6E}_@^R{3rcW4-TN{;JmjwQl2c2WzA6x>_dI zZJ$@=WkL>cj1YBw@21&I_#NNpQ1+Oq7P7y189?$ zJ3`7Mnx}_0Tjl$h98UXI>M=c7-x4o`7ToOHji1!iMt)4%vxIn%j<0B`_XR!rZ9>G7 z_!r3isOnqf5`w$Jx1GOXCb2K#BY(@x#F93ttgDCf;^ce7w?bpy)H2>8=1t0Ea(<*O zoz^h926q(8Tt#H0$|#k7mC-6=RJK&vN@Xn4xq=-n>76Us(UOO_B^S|>k7&tBwB#jP zaue<3CnI`sOCRKDDUrCH{G=xm_h5Rfe%u@RD&tve@i6x9#~oubFU~<0Gk=!wLS-xA ziE2fvEDyM)k4h)KX^iMyGtV@d4V;tB%2@aQ1*TDb;Lf4J^innYJk+u6SjjVJmcpjS$YeXvyV2)>dBoOZhLiDud9DavH+C72RL5JTq43C!e7+C;9%{**JA zYq3s!Ioono4%0aK#9~jGtom|w%e9?G9c6_=){5HkUt}h-fK@?Rcb0QFlu!?Hm|i(U zbv2`qJ3-oX2>zmnp`{(uIBv09CVAR-(%d?+4#l;vcTSeBmE>yr97pEK=@kpQ|OkxFrYCl80!?MYQB2T5=LCd5MdofSd5v&VNXFax*r(_xPYVsOIUcroN>M|N?M%qW2qjX|~b88q+=o!qt zn3sK)$CG&~^EvW7$oTDkMp4g@=N-}<;TSU+`D|x&bb(RS$N9_`IA%@4zNSokT*BxH z-!pfMWR5kAxzex8SR;6rxe_C;Hz$(+bCltgAma<>e;2!9_hCN5{Bsg>%RKTqNZh^D z{~rq}2jO`=nL}}mGnAnQNJe-6Qns>%K8UoO<*MYSK9Ow z^8byxgwrM)7tp@UGZ%3#j=-k_na^^JyqB1#6Zd7#x52TN(AEt|lRlhzIO_lhN&7Qp z97g>c68AgG*MRerLEGNXu~xzd9Pa?v!d}khXXN!a>NuYAZX*A-ly4O6aE)^Rl0&_E z(SEeeR{VyL{}QgR6!P}Z=bED}(>R|GET+9XlAdF~L!I8k|1xQ2lE;}&l!5krit|-Q z{flREY0I1$wOS>iV# z-v=q%P2z@A&)L)=i8i^PHl9iN6XaKedJd55gky9d-?fBY=Nx&-_X_!rr7lNlgUfJ* zG*59}-{-iGknR}eTui)Qx&BuYmb;YoF3!Uu@;SsYzu{g88_&Fy_S#cI-DuBlTvvOj za}3v6YmR>xtfqZWQsys+H-mO~l(g?qo;H*@lYCDS9@m+4U0Flmni@&i1>E7Z_rcMu zEzqXDVF0`YIkfkOa2?&A%6Z^^$mM#dNk6j?f@sHB zILZAUI+wK@+UhEG`JKAAp^l^a@^rDkyTFx4%7(P~HflE*rcm2t(XgMI{t8{GX&GFB za%!2>o%tE$JxyzIMow@BzJeiKK^wuQ1v*gkQ0NPO&fdq|9S?J-b%7jOqb^rL4xFVW z4#A_vd{d6zAez?L48v&=?=XqEAt%^qCcZE2A%Tshyv z*ym{_T4E;b<}4qA+wdZ-(u&q7;p~=`vbr~)yJ#VK(=t_QnOd)zMuwenPGl0Yy?y#3 z=ei9o6-S%K@oSBpe!(>hf{DC&cS*325yF!4}2T) zQHc5Aw;_8&(*Ko+YZq=Te0JeyTF1!2taU8py%4j`G5pLr$5ozCc~a#mm8VsHq0(0Q zmC7?J&#FA9@`B1sDleI#zA?fzPPkX%gnK1UxL4wY zdnHb|SK>6$I8D^QiTXED|E8L%sron5IL*|KP&-0HBh)8KeWKJSN{zjT58(E!C%`+O1T^YRp)*TdUn#?Rd50HGaJ25byd+UE;OO2^yN9{s~%_1ocn! zHX=@<=8&jy5;ccJZ<6{;nq&YoMerY ztZ`B_PKw4!(Ksm@Cq?7%r5}!)s&P`)KUMux)jvc1Gc;|6rp?ee85$=;;xzDii;*o< zrXxM|!i@Ib&Z;wA%{RBy-O<&ZL^lXCI!h|9q82J6Ri+@r)hFC5KH*;RX`((&)TfF1 zBx`%7sDFz3r>K91`edk2hWa%2rfNPSqurJiJ4O5(q-q&5G)@zhQ7ZjfQ$Mz+UYysd zL7dmAL7Z1=U>Ys89a?Hz#i|{vc5Ag;s~x3wl-ho^{c1N;yP4We)o!YGgxV2mN2?vJ zc8uCFYWwxP`1QQ_wMKreb(G3zl`$ggHuARPGYxVNCaadaNZfKCiI%%Zv|LG!ZszI; zw4@TZ#LjSaI$G{Ci7$7Zgo}3ko%nL^IpK0W@|!uaB%a)d;+B#-TIwurxq2Nf{&J5> zesY(JTkcIsFS^n#>Eu2Yx7?|fTGGkgDsIsdPqg^UJuCilcS?Su#VvPPrIz^OmiTfH zNqEv1!kaf_CoQ?#5f377LETF!@P zX%9y`>7{27xAYT^cKoGu;+FOmE%g&E=Sg&8z`wG+obpQkQZES?Euj)$T3fWVnrP{D zMK^ZCrQG6{b`u>3N-8)4jDce!7=Eclb-LVS(6Tf!LvxJ&6#J5>Ik!;XSN2zhoJVOnuCzp*40r zcna>reZQ|^nCzsHeLfM!<6#M^`85sSd!Y~U#=}IYP5e4g6D+tJszNom2dYC2=t10G zM%ysiV-(_H7KghbghMlU652vf_!B%1afVqp%xFg+5=S3mJ}kYu`AC?N=`|k>Gddup zS2Q0BGdd!rXEejYj7~`D9ZjAB^pNzN$ogSM7i65b3*&}*&A1;^&M&K9Z-D^hikKGOa<+g5gb>@1mE(3Rnz#f*AS2-XJ;V-4{hZ z%6?tHoqOJI4`4U1-|nWC-|mAhVt;PRfB6;RC;r3!;6KduFaCpnvFktl5B|el|B^rW zFGXAjh+;Ox?OWKDw z%Z>T)D)4@}u@HE}+~ECkV+p(lOJNzj4!m)0Ecg73jMsg-tFYdJ)vyNE!d>;5d7ydg z+;|swJHdD#cEShnA@J_Gu^0Bi$M6Xp0Nw~N%=&*r#*_ZI>i0Z9sD9sbTJ@)%A60+k z`9ihq5j+Ry!52*3AQE^Z-S9&+#6U}E1-zYZw1Fp~Ej$Hr5Dy8E2uY9(DUb?j&<--8 z1H7=B6##S&bc1hUGaQ9d=n8#cJ#;4S7IYE)(sCF^FC}}zL^egXP#LK*N~K?Aw8|Kj zEmgKs8HtF-o$czbuFi3FuB*Gby1T2NR$bed#WRk%5x=ak+Mv77)ATouEcMA^$C>ksfc)~2 zOFa1v!;z9AQ-32!(tYgXeeKAp+$X;==dxaNnANL?SiiZ-+Q=%_e!>Rwd>YQ#WUXgT zALi1X*%dT^br;+TtgEbJ73l}oePUVFtIev_f;`S>f1WQdb9JM$a8G5GY8-1Zp~RWO zdQKGUHYKEgmo=kIUe~GZYdWZoPjrD3Oo#PQ4twFgTt-UJIfr#l_>Qx3 z1ozKy5B6+yB)SD`=w=$p_O)}YLgM-dcsb|wW!;W_70LEmRoV=A6P1$ZN$fY4&sHiAm_&_>{xDz4?{Rp3gHY&2GVw zgQig%j=G=N?d|udO*i`tY4Y$ZfUBfkg8m%7gkNB4U;4XPRw85h1=&7zfzNqJP?KxBF7$#ea0)v0m({^)7b!7U{~de=qo0Fo8uB9{_4Db)@mU8Et*IC8 zNw5sYz)<{#4v>=H%^AAN-S$uJ!gP4B4XqB>h%uk5aRC^tlrAM~;}rVYMDAAHy&;Re zEA8!l950;|zv43kx(w$2w+qR49_!7ILG2;j$*>Y`!rEsf#!Bjc_8C?-pCm_qJMUnx zAx(AxeL6lR=r>@ehRmgXQ|+qfCDyN`>oC+bI@-I4znk9!K8HbYA8XrfVF)}6GYTcv zqns5#uIBU$MG*C8+Pz3Zj+4_bgcJ=ES8vX3M|&Q%`IKHJmb<4eEF^XYdLoR0jqG`d zw;w0ZZ)UzlY3brI_o0G@)_gsvu^z z{gcWEX|ZfOid2t|;atFMNQt9f#O-Q7Ldp)$as{ZXA+6liUZV07%71Gttrk!H$MHMD zIQG@s6UZ;eUPIV-_?Zdfl6rqSp3wp{!Y9|BLarw!FosQFjTn6u7%&ntleqmSa`$i* z?MIuFXh-lT%Kg(=@1j(4@2>8zu*OZ6khY{-Fqvx%S9kjib?a_7pjLgSh_x207^}VA zhj^#Sdn;F}XDaiuB-#(X?P-a(hSutCZ$q}XUs1V~_`%cY4PWA}C2lG9Y*+^6^t4+< z!v}CUeoIjBTv|11QG_BGknI3pLJs8|K z!!&x@2gxrVtB2hbs~6$@lEt-!^7XV&kfs2?PBXc}VF;{)Sy26X`Y?23bOFqSFNu2@ z9mFqUzhL*BWg5NgLA2wS_*{VK6j~9zljl-zdlTjBZ4adVKE!`Ny!oR1uCSKluIIV? z5!9W_l|NT}n^UF)JD9LT`0aup@K3NW;B#so@9@|ssA)g@Hu=5!GBpLORD2?+TisHg zIP)n9*f0Tpd4;FhtEQ1>|4NM63pmT^j8PY`>%ksFE;n%PTgdtBMN2P|XJRr($hRM& zMnxISOW+dz1GFF8njzLgO7p~GvEC(zD~q|~+u>8jm>BlLZ*ZiYX$-V|95rq!&j2{m zp5JI-&@$=ikC4kh+lv*#5eM7Vxi$yer5raWlN?^>3H%20e7Nro_FUKoe~@7!^O{7y znT@*Xb>84jxWRbcGZS5ko`qh3egVA*{j!H=At^(# zU&N0&h39ScbG|?|4?ii-95nL^&r9fa=uaq5Jns?8TOEegk3F(tnE}fautEZsH(=cr zu&M;CssXE7z^WdwY6PsB0m}+lwE|W(tBA7PMOms^L#%4n0Bb#Nuj{U*p{ACYuMm^U zWS6x_DW%B{Ymrhqlik%KrBo)nt3^s_OmEj>IQ!^ik6|)96_4`9tGHxG-5I3gTj7(pVpp(R?-2fPi*Kl7%Uil@*;}}oRG+bP zFD10nCGXvC@FlT_u}t^xRoWH43%*T+u+9=HZ~993CgO>Gh+H7D&3cDhu)tr5k$BFN_KlUB-C1XqJ z|4PhL#FV#=SI}OvTk{k##Vw^eMSfzJ`%bDQIm_-*+5hRZYo$w$`UN@3o6Gws$r@sw zA&s1&53rKdI^;VMuw`%Q8Orb;wM`C{cy~xw=KD(HO6hjf63Oh-Ttdsp-p_M{$Q#hN zu+QW3nWmI*2`%%2)HEdadDlKmCLT){l0`yxk_)` zn0mwThzwGBFLK%q=}V2N)20otQBngveRz$LqH#~D!3vwjC=WKm+pq<;LOHwx+n^uh zL4U}H0Wc5-!C)8y&p-hTg+eHTVNeXip#(<2NEl@qp;JT4*d#tRbcrvj1v6C25v_WI z6{C8i)mru2RvXn@tS41(wf?NS+-j@(9qTF8+pLbN`&pe-=UJUq_qV#J&bP8u53sUT z545_f9%SXH9&F{R9%6M<{fyOJb%FJ?>Y-K-)rD41)kRh>)x)gbs*9~Ys)t*BRhL-( zRFAOoRFAa!tETS8rSB6bUo1ZQ{FgTdrtrT#{x661{O^$)oiks5`F{YGj?e=D literal 0 HcmV?d00001 diff --git a/go/mysql/icuregex/internal/icudata/nfkc_cf.nrm b/go/mysql/icuregex/internal/icudata/nfkc_cf.nrm new file mode 100644 index 0000000000000000000000000000000000000000..3f8d756a0f4fe8738b790f82cc4f0321a6cb4947 GIT binary patch literal 52432 zcmeEv33yFc*Z;X=5=lhHn|m|eA#-wb=gia1%{+*RP(=xXh?pWs@tUKG)=))NOVvCV zMNw7MJXcXvRZ&${MTsH#|MoujW)efS@Ap0b=lSgCx7ONgueJ8t`%L?soO?)_?*m*I z@{h~399XL5Lvo;Xmb3yJUd}LU4+4jth7s)qtAz~H#f@R~ZBcp5Fg--P+-|_{5c)@=Js|8mDKElqzcZ7$9KEj`bQmd9$gRNFt{bm(ponif!^5IYPP!B&1ODAg{{fzp;I28(}xZZk^pBJ0GEy zeVY9Q`=$0j*n2ehK~%QHLw=)P-J)mthiEz8+{z)=!AF=6i~va4;&9eM+~TzsGg_Q% zVdbcE47BkPmN~9>yy>WP>fyB5>8g{za}Vdm&R3oNU3$1IcDd@}=UU`C)Af|At6PEF zTW)*Y_@ZRdo1OMfRt&hGi{mfDN>1o!t~d$88tRvaT9CY~ks5gq`VNr-=@q_`~S_r|INVv z;K=idlgo`X}%MD_xf7-rTIGg0hNN=Y-RsEe*uoVhGkW7!@7@fd+_$)Jxyx&Kc)2%o((=5e6t|5LQ z7Ok2}iM4f$p74X_6-#|xW4TobDV-Yl7dNOi^!TNAz3{q~rKK!{ot?3b{A{r@@)6E} zc45fckZrZCS@7H&a@t&Hs1>oiX4d>!Yia3$@TyuZKh!zYt(hz|qc#JJoho#5y}ckI zv}LHJg=8^HO}~HOHS?nT-V3bRt+`$TAzk`OKrY+bc~}#)0jU!1C1@3s;_M{YsBWW z(Dawse=X5=h8_>S82TIHbPIV9YOS>B+dp%CZ?R~aXSla_&vozYUf}Mo?BJf~Ug++t z)GL#fZIo)H~J3 z)tA(_)f|nZMyAndjG8=6p=N+)v}T%Sfo7d%r{=iklIFICqjl8Ev>L5Z`)Yl&UMa5s ztQhzO{Br)g{A0`-{$BnCeih$MpcUi_`UoZomI}TW9AjR#oQCb4zhVD$TxVfpowqPX z*g-g~RzFv`PPhxT@2KW4q0q|TiuDOK^TrWgzMgH>hsKy})v$1w|DD2TtQkAn_k)Me zFI7#n*Uz~H(~9ZH^k?2;4qkKGy7Hi& zY|&OV&~cdhHdPN-y7jW@YqM53$mSd2F(IG#z3@$&Q8qsdD{Us*Ot+bB^H9iPKC=1L zX1NXDN@5ji#bNAtYi&Na*=7@N^|cL$*=NIlcm&9{;y*kE{yp#`a25Cs_|t~-ki+mF zc18OG)T;qOGycP7KnuVP@B|b46+`G28_)7o70j*zIIg3P8Pue7`MD}&m*piq-inoXqUt0JNe?oS@ z0KWrwfyaQ*7Bdb=Y{?AzLJ9*SY)K^k!&t~k!1ow=7I?mGd)r_UDVSwjjVS5?|6zCN z`vB#@Fklo=2}}m21G9mTfKP$tz*^vQU>oo?un#z5?(bV$0~se71?~T5^!qz-9yV8j z2ax|D9NE^DC_4T)kiqvbsRi%VW3>_$h+Y$Q6v-Y0J;-~| zMbw?$;(sIKUZTFDL3pM)N>oXGZ@Ent9dMf}n)$aQv7iyPE{`pqh>SA4U`)ZGCvO$JQ}BW4kU2iv zLhQ6{>ts?tRKsR*sOCh?wb(t7swI0#;+Ga~M*uvm@tU8#@O(S33_og`JCee^G-c0&VK;KZ78T z9b%`ki?EA&Xj7%Bnp$ z;6Ce}@qq1byJ)9qk0>*zbp6!-$3@?Yy5^h}T@YOoT@m%p8JP2C&UFzxDt6rL z74~txk=CDwhI3$KG_1?g~}$o3_A{! z3vd{BegWk~?qFAF*OSytCFc#cSfp+ATw>8$&O7XxiD+57Z8iHKyHvYXyH`u9 zJ*&N|rPcCk-)eu9!0O=Y&}t472B@pGpu~&A_(J-hEp?+{rP%8D>O^Eg){vHR#_k8Z zD|R>RZrcT#k1ik^J!L$m-~7Hg-~Vay7YDkn?+8@2pJb;Q1xBc82bVszfy+ zf&26A|NE`??PB>)YR@fA_m{}$*>jj=e%*M9L~Jb@GJZ=P>sC55FP;tJkzP9^P51qL ztz~bu^vGT(EtF(OTIz$%^Z8N#tyatIu$*M^bmW(tC5xXWV%^MJG$b}FTR5p%{9c~3 z=Yf~Ef4(J|dvBK*f$m}M`v0|aN^@k# zG|Ik>`JC%vFSEB~Uw>3}_R;Q%$UOBQW&bLBeEn=*R?enbwyu{e8T+$JQ{yJKuerbB z-krwiT`$tJqWpLCuaft_n@LB006hYf_PQqa5WE9ayGQkRAN;S(Q!nh3QH_qee`8E_ znUZLI_=D)apJ1P2pVh?3o<3Ioc@9I-V_xmZ8t->Yt3R)u%YWY2-;2QRXT$mJkey|o zM)5n@cSGIZx&*Xww)MCC-|Oq2T02#fTr63#(I)fXqg%7gzQTUw|IpTSs@+-*b73=@ z%^#0hQ(I55pK0Gz`#+t9ru;|L9)CxwkI-3aX!Tt+TN-Nrd$qB(`TSP>TNC-*t!B%M zGMMQ8Zf&F&**>rRHhD_XL?J7Srt~k?5kG>p4g6;|k?q`C-GW+aNv*W1R%+bm`bI6> z8h*|D1=ckm6>N6@()}CvL+*TNNFkp5n#BcV%|wn6!W&0p>!%vUB-(Sex=XdvwOZ+B zt<*T;odzwhI**N`{+;@#{JA*N>X)aFH*$Vs&l~ma>dP;ed6oMUcaB&fwh`Nl9mTF< zvDiy26Z?sS#7eP7Y!F9@e)Zyx60xjuM-HJDaEbt?RGuDT(C?N8J+< z%hM2IT_-;s?cdfcmcPn=1Ao2!X8sm>b=}6_(da7sDm^!^uDdpW#ouGU zn}5)LAO8^lxczDSWBdzEuH0XxAJ(V-%(1in>am{f`A^S)jUzwnp8t9M%n{EM&lfKe zFGE=+UMJqzXf?4#yj{FgyhprWd{}&3d`f&)d_jCkd_{a+d{cZ|d{0~@ej?^b1QHvG zy~I)CDiKS(Br=JgBuJu^Xe0(nlq61Kl%z;9Bsr2iNr9xjq?4pjLNYIs6iZ5})OZ~A z>z3K)R@d$G?4Hfm$=S8`@38dmu@>)!$D`#eOnfaC|2FUK)?YFRBN{3hF1hG7O43R+ zPV$=QEy-ljWXV+Mr@GCQ%$9T%eJGhP=_2awwn(x}vI-u=p1fPJPBIEsu$OFAk{35w2xh=UTx$kzztx7WI!4nC`BU@|Zr zh_>cG`~dtDU@5Q~*Z^z=z5>1h4gn{C?}49z-+>1J$ASOQ3a|r`5yu7G6OaQfQ4a>! z1MxsAkPWm3UIX3$x&ys|fxvKJ954x(3cL?|2z&x81J(c=fiHlsfW5$BhZ7EG9DZ>4 z*&)g!l&<|(;>{k!N8i7q{ZHUAAZ&qo10;Yi5C%j7NkBHx1NLpeUkADZeSpEhDBx{i zIxrjP54(@R7Xxd7FM!>^5#S8)6Yv}G7r;33A6f&=0XIMj1Os{?9!LdpfY!ikj`jbZ z{igL~>q5s8$8yJ!juRbcIBu(ISU$2|X+6(ziQ_uQt&ZeasNYKJUsxZq-s5=G@vP%z z{8I1!Q~p0#|6$E>vUPHG@^K1s(mB=lCot(tP8*$eIPG^j`BeOKPGfQ|IlY~8&1rhhO{WiX?l^stQ{}Wg zr^e~C9GZX5?wk`jKjr+H!)@u*Qr_~5M)k0kku6hOwr$y?<=~c;E#Gh1xc*7Yl`X$$ zd7$O@Eq`tKuw}E{#&v$z{|bj_Wf)6t_Veq%V$!^>IW9OB#utsF+@9OF#<2j_l(+~4#V5bD9(Q@{N853a)3yE=A^luiW24>h zxb1Pjk*&86RGuiXJBpX?^_I%wF?{<;n4~@pAMMd&#_l zyv9kwy!2kNUYjLJUg=(~yk7U3D(d3Z!)vnWJmO78_RnuO_w}%@&wgHYGKcvVPq@zj z=joGfwq=;}Xy^Ji&ypv+gw+)18P0Q@e?;`3fnVzN@~o(@qW_Kmu{oDN_=_Oj<*#^& z-0FIIz+dP5n7^r>&k(N>n5XexlN!ucod+Xe&Z8;m zl(UQA96a0wo{j7;IbU3qkz%2_J#71THjTml850$UeH7qvhyh!n(9dy$KmOQIlI zpl}ItNf%@bR4xXWhW)e>v}qK%VLSdJS+BjLpbKi<1u-s0mo%3g8mkxH`|mFpRL`n$ zgqN%5yR>)d>{8SqzY>=LE<;>KG|;_Vv=>>;@S63EE%zeNy7s5fjB~vfcrEc-<+a{x zi`NdXJzfXBj(eT!#N|uP0stZ+mZ7Z!d2@ZAF+?jC&)+R6Xj#{ z$?(bZY420$Q|vRqr^08n&qSYTK688)_$>2T=d;CUr_X+$<34A7F8N&d>Fq|&NaSwe z7u+c{yt|m{#-7v2S)1G!bP-L(o4A1dp@h1r)NpGeV*0wZ|fiWaHZDLSB>iU z%AO5-{5x*~zKU(b-d>@GtAI%AA@yO$ORfoKER)!HERvj+m^G{(*DSWYCfY1fNCTu6 z>;E-gn-R^+oIAU6pgA>c|5s}hvF3hWdHLsq&x&GM0lg}JR@|4Ae2D?KPZDm^JZjV~NrlwOuzlirZtlHQTtmp+o#NO>|V znXSx0<}AYl37L;fAq$X&$W$_&EJ79|OOTmlX|gO?t}I{HPS!!zS=LR~Q&uADCo7W; zkqwiLl#P{5kWG@kE1MykC7UbzShi5MM7Bb@0xe)2N;5cx3qNcmX#1o2w%gm>b6qrovwRb54)bEa!27=*H0Vd|EwtgOut;QTCuX=GsW714F#VozEFIr z*j%u!;H!e&1^WsP73@-cqd1^AqByDePVv3s2gOf{tBPM0zbpP!{H1uPI9Bj&!I^^d z1wR)2T<}Z5Zv}r8+%0%e@VJ0!%WrGl)~;=fwk~bm+j_Q@;%{#?3ZAc(udT0xuQPs_ z=Hct(tMCo*4e?d^>U<-7V|){QP58n^mT#_azHd9<4!)gzyZQF?E%EK=Tjo2&cbM-; z-?8}q#U$T%eP{U2@}2AZvF}1(cH}yo3zzt=@Ll7(-glGlR$r5Jhwm{ zFFR*PeNX#d^u6YL%lE!-4ZgkM;3x9)@eA-%`9=60&ApU+Cs%;m$q zmpqR8o%Xxvcg^pX-+jLteAmOlU*zxOAK5Eoz!ND0UY$O*^~XdloypeUduU;w^kG9qAXz{G%e17-%y4OkGcBw$s* z`hYC~I|B9u91J)fa5~^Zz?FcT0e1r)25vR4IB_SC~#=t@W4@l;{qoJP6?bAI5Tig;Jm;E zfr|o{1+EHQhcBpX3EUR=Rp9QxeSwDqm*<@f+?jVF@VmU918)ZYo_8;>I?yV}A;>LA z7F3gG+gj2(pmj)4Olw0>LF<^H^q`{F16ogNT@ds}P|u+Gt@{O41Z`+NE@*Pl*R7AV zKHGX`(A=Q;K}&;vYW-*Hn%4IDUipFfy8P7qwL$Ijw+8JB>XF|+|47jA{CDy{%wL-S zdH(63efbxHt_0l-x*t>>Bn);4_6YV1zLbAA->OYWu%wM4I4(FPI5$|?#@MDHxP6<~ zgS!P62lopu4;~ggI(S0xl;9b`bAmq(UJ|?}cvJ9>;Jv{|gHH!v489h8EBJnJO|Vsn zLx@|5cZhFDaELY}G9*4EB_t=LO-P53!jPUJr6FY@!$Zb|ycP0J$a^6lgv<|F9I`TG zL&%pQyF(6y91Hm_LS@uB}wj#IvkZ$Qje&JX!WxmdYc zxl#EgzV2`cUvxO5{6TqDc}sau`AEqLvkJ2ha}E=S`Gom}DZ>n5abYQ8Ibj81ox+O3 zO2f*-Mud$In-VrNY+l%+uvK9j!?uU*2|JAMDg0O0@px0ray&P+qNQ#1?m+%dE~~^B z!bnU@{QnmJ*ZO$gILTeb^VArw%_-Gc)dhU-=8EdN>L$K=b5B*JdV+7?2-G%edwc=MRV`L~sby+Eb&y(# z@7WmCQR+B+)n>J@r_HOB|6ly-Z;QVp>)SRgFXKx*UdE1t@8VkQ^ljr!L}q*Y+BB$u z!}$D-C$g3}{~lN3@35A?JpOx8|6ZgNb%uI{JO|%gDp0pqkHr&;PU?g5LUob4SY3** zFqNw-)FbdMrt#{D>M8gl(@gao^*nr+X_0!FdKJFTv{Ai9y&d0Z+N0jDK8!Clol>7w zU%>a8uBfl8Z{n*>_taJDC-`=gLN3tQXzcL~Cs&PFRPI^i2nMVewwDZcbnuBp(B!1tcUYbI)@;HytFHFGrM)${Q6r$w4& znpOA))JDw~&31eVYL8~W<}khobxLzqa{*t4x}v$RSuekduS4C_RB4{z8&LwSjn*Dt zigMM8wO;sMl%IC5G)Sw&ccTp2C~X|R9+jdUCe6_1X!EoM+V5RG*U4|}4cicZuSD!d5x73qp~rMdySRhn{Lg>Hmyv~Ij^qHcS(q`RWKuDhwbt-GhI(ml~} z^a8z&-d^vhch!sa{p4QyVRD(?PamWwdy*1VLnVVgN*|{;>QnR?`W$_pzCiCI=P>Qj z)`_+k>We7nFva>({Q!NrzCu4jKUzOtKT$tLKTSVVKSw`Lzd*l8zf8YMe_6Utzfr$M zzg@pmzem4ce^`H9e@cH=e?fmqe?@;?f5rc%{_J(q;qruf6 zHh39i20xS_gVLZe7z|N{ID^rUVkp#S7;+4Gh5|!-LuUiYznh__p~TS7P-Yln7-kr0 zn4=$Sm|&P>Q0m_`%rML{%r$&$SZG*cSYcRWSZ~;5*lO6K-(lFU-(}cqIA}O(IBC%6 z&#F%w&KY{ju4@dsi-yaFv-)d>3;G*|TZTJ^DE)oIBSVdW7j6|Er?(Av2zL$_g?ohi zgxebw;Q`_0+K_NBgDPAX9uXcBo)E4vn8G{h)4~mgd77;7C_`?z(U2eBF1$l{=kRXf z8HPN4&+r^WNqC;2U-(!>S$IYG$nf#u6T_#3PYa(FJ}-P>_>%CycZ5IRdU^QT@Xy1y zh3^hOgz_xgli_E>FNR+UzfsTPk8pkUU*S)}-^JhUIT3;gn+W>|#|YO5g{OOjcZ6?5 zXoNl@IwCP5H6lBrb;N5CZ$xyDz&jBU{UZiP438KC`7Pj`i1$E0h?oz$IAUeQXAzqt zzKqx%aUkMY#CH)FA}*s`GnX3?Y`GP|mOBw_xgWunM-gnPiC_yak}Xz|Y_W}Gi$f$^ zoFknhMJOI9J}3&502JKtMykw37s-~0NVdd8vLzuhA<|?nz1s9@lN!mEtVp)t#h=Lj zZTh!)){HjP=SQ-oU1Yn+4k(>bYDC>4yG8ayDM9IHE@hErkwe=IZ8HXCGRiEJ1t=>} zHqx@A4Jkt+*|NXQ$u<{IzuIP4jJy+B6Hrs$05+~_vZ9ij`Pdq$T=mqiba{!jF{=(nS%M$e9( zAH6hsZS>~o+if0U9D)KelAY0eqo+h3jwT~H8GSZ-UF5}RGLkFNH=_4P-i{`tAz85* zS#n{s_zM|4iRQ&`78Pl@Ro(>rEh%&?eIF%x1Y$IOVC9rJO_r!gyH*2Zj#*%q@aW?#(Fm~Ugw z#rzm^E#|kFJ26!;HHadJwT*R*6~(@)pCwKn@=1$g_ZL=2_7cxn&)A0h_|r8-YzT@j zHYU~-n-!ZM+X1C(Y)Ncc?6BCev6E0{#Lh)oh_WJfJ<1ocyHF0IoQyq(av9}D?4PlZ zVtH}AINLbqI7yr$E(Apv7ZYcS%Z$s9>wwY?r39rcZdlxyxJhv{;^xLJj3XJXh+7}G zHEvhj!MKws=i+{fyAgK>|3$p*Uec!mNa4=AJBT%V+WR37ZnOChSO9Cfb#-H{oEy(FDso zcx+w`@24$qjal9gKUA~P<7C3=gmWn5eYlGWmlLif+(@{Ua3|q@!lMN87D-J4FVQN| zHj%`0NOVs8*@G>_=Uf8me;wWpTPE6p{&yvuY!FA3=#l7?sPI%I`X>e_24khJO4KDr zB*xHaBw~DGa$+WGt$?tTux-mwM4euNMy^cM7G>XWXt_TwmeE?OHCqMxJIs#TnB_kwzMp0SOGb5QB^6urX1V^KY&Y4G|tC4IYGN0=c6p8CR#KxxHUL^YhX4^A3ac@FQ;;X#l z;*hAqcJ?x!OPKpg@5o$CkQpy0_!)zYO5@f9vj1#K&=~gx8jMlKIHS>+V$3k+81sw; z#`ebl)-(Q2M*p^<_{-Ek`3o4?2KE06M(Xwd5=QFv{~AW>>|ex)gg0xAMaE)dU$;`@ z0Ao3}XoYdOakO!qaiVdGaT@ARAJeR#h1MOr77f1P@aXxa&a$DTdDK+FeSuleN)3J3 z+DzjdV}Hp!BffsgEHExIE;Ft&k}qGbGj23)F>W{RH109paNBPz_B?DnZaif?YrJ5* z5_ZXW&3MCj%XrUNWvn*xlB|--JZ+O4l6<7DNme3pl2?)}$uG%46qJPflO#=&At@>; zF3Fgbl9Z8@3*EB{c@FmU6EE^kY~62KoT{rHJ-<9Fdfj(~8e5PvCc8ZduX}#T{u-*T z@A`hve_qy9YZu`~*B-*_^l!v$q(1ojv8CLpr?=>wsvCbhr|!34#O8^s6}HSxj$L9d$c%ZNCh3i&UP*(K#wATpnxC|`W-WbV+~Ttvby6Cied^W* zBud>jA^OJovGvtSo0GmyI+pZ9(r?c>Vu@|to0<6jWxd~hJ^Rf})`$3#-+kdNUA%oy z_*A@UPTuC^Fwc6M6X$4HHhcq=!#w*9Q1&-z3ah;a;W=0DG$?W3W6f+>MMdu?jX@jPAKp2QyA z4eP9~{{F_w4K?fxbC@JknrV(W(=<=q($w12*7UmR4O2H$Pt$yHiK(B7{8DqEX^3f< zX{2e4X@co((>taardg&BOy7#mias(eG%YbT9f9~SYT&y{{I+QY{EmyRi`JOdn>Nw0 zTk@#SkI`?`TCNF}qo}?GE89GJetFuXPE&u*>Yt6s3U~$9v-q8-aTnIPrxS(7TUdGC z$7*vgR;t&qmc4;>?m?`Lmtg(76)W0vSh;RBB}DEp?ZTLdKZmISILsQ*ru;dK#UEe3 zhRtPaO+1J%TUcLw`5GFwZ+tHseG^||u^!aon-Gb0?NfcTxpSC?xi{>a&7H&SfVXAr zHHmd&C;d0{e|qeejGvyl`rdm@hfOCC_p2;iS}B^M^|^({&+PA*Nh*9}N6Pp(KFkvuwieDV(2#N;W-(~@T<&q>MC0|Isl6*7yUhlGx~23?sYotK8IV$*QjszurKe$ZN||&*%A}NcQ)Z;>OrDi8H)WWCJh>ie_&8-@ z%0|tSl$rV!DQi;Jr);8SYs$`)Jt_NB4yT+V6aNHO?60hPMxP0rFx|Lq(0FpQUg*$QdOzC)QHrW)Pz)1YDQ{KYF=umdn;KsrOQ=QlF&q(yY>K(;U)Hsh!hoW#Tk( znopVnB_J&%O_LUp7L%5cW=cy-%Sy{l%TH^U)*)@3zH?epT5(!w+9pG3+JLkz`tr1j zv=M2e)3)o2)#KA9rcFtkmNqkOPTIV*g=tIDR-~;-Tc5TmZF}0zv^{D2(+;N{Pdk-% zHtj;%PW`2{D`_{h4=bWOS; zJu3aMJ}%vqo|c}KK0Z7*JwLr&dWZDR=|$80rd(ubrEOCOm&HvPJ0Li(iiY3Z}l z=cO-5Uy{BeeNFoM^iAo<^#&q_#>I@w8P_wsG#=qMGj3;8>F;Me$;i?3GOaQN2HQ-BOy^87ich8@Gayr$ zsmU~CMr9^snljTevodot^E2CLcFHWwEXpj-EX^E{IV5vL=IG1`nUgZ7WzNj>GR(>R zICD|vvdlG^>od1xZqMAAxhHdf=HblanWr-^WM0nnNxPnTGxK)lz09i2Cz+fqL6%LH zeU@XEYt}uDN0v{PA}b&(BukYQWYA?rWW{79WSO!uvU0Nuvf5{L$|}q%$|}j~msOrs zku@S~Y}UlADOom}X<0L~=48#wT9CCUDrU4FtgP_d@OR~pvN+j-Y@2NRY{zU-wpX?++b=sPTa}$3uFHea ztFqT;Z^|wSFA3kCy)%1n_QC9<*(bA4XP?Wyn0-0BU-;a424@M{iVxQ8PYe#5aA za=CUwD{gaPF299vKgUU!8s{QB$Z;2b$nzF*`EsFC+OU`?m`K-W#v&0w7B)kl&CzFBTuWg-zm-tSeO;)H?NS*Y+q^O!eg4Yh z3!}N#!lYO`VLbek(DxBu%gR*bl8Jh1YztvavCKOOzg$C#bazZ-ntk834-gWIL@^;o{JH`l%LM;=$$6>(4SQiVMb zw=cJAWpCs&2>lPoc>6&&6uG{Mz6L}8pV;QYF|nzYl^FY<@O_Ib6#j(yc^fgN!u}oD zPlo;bu%8Z_pOM>Db|H_wvDrc`zh&i@ z(0zxw3&T7dfc|^v!}%?R-yp}IkmC_FCBg3Y)tan;d=*ki%n) z!9T8RWiavOSyz6;<5pJl+$(?JF~T3?xx&}1tc6_@?1Wts{DqwOKp`)FTxFjGYzMTx zZsjkO#Rpa@;u+yDsCPu&2pbb@Cd1}8=-z<;C-9%2&_Xy9?S*LXhW3?c{{!u_p&N}D zW8?jW`w{{x4?uqx`VZjuHT=GU-z?ZHfZsy+9YXs9w0{WQPUv<)_Z#d!v1w7c*oIrV z*qRZ}x3&|mM*S1;ao}G7U)nIjFRZPF-`Lm*2U@qNJOJCx;74q%g+D;I&ANr~CmXlQ ztFZkQ_!IaZSZ(cH`8(`Bhy0z*V!LF!rFN-y%O_{rEuEYWWCP>umrh;`jItj(y|vTO z>0Np|lGPu~mJI*keaU+s8$3Sp*y8c!+-)8m=VwcMe{L7v`|}on z3*ZiT0=~dt;7wo@Fb1d$?_a4m^{)&EIszX69|0c%);PyG0q#ILkPVCh-U8kMrUTyt zyMZ5ppMYP0Ux7b>KLIACeiRn^a1(BqQ1NalT3p_5IxM56~UPfpnrENjK&gdv7^bPq${g|Wg z`$Il8+kcXt8#**-l739s;Gjtxl2pS%zb+XRLg?U-DI2)i{k8ad;MGEgizBopj@ryP zp|EY7Q)TNoqP4~`{3VY_&2S|D#p536#soleisfD{qCR8b^B?%U4WCN*6vKyG`3Bni z(e}k?{{-!;(Y_q*{TLq3n;*nl3t#8AsQif9kzU&JxmU~K%Lq54eJ#&gxDw}$4YY3| zv|SPB4aDhp96%_|R3$qv#HNadv8%0H>Z=e4N( zE0%kvAL9Y9FA({2ME*L?N%$qsY2U@a@6d7rEkB^;d$jx%$34)G2}HKv)4Z%8 zeT_LO!kpMuMk97VhUmItEL(VFET1ND@jso9t6ZJHy}E~Lc0ujna?WxdYLemnOE!3tvG`*ndLp8*v2Q>buc6-LSQZ z#tyVQrA3SoauLyEuORkCRNs@@_oS^oX=_h%99`{2^~E%wV$uupM1%vy)VDWjJy1d| z`;dMQ^fAk%$G%k4mumXbUi#7a{TN%c4y3IEY3o4RT1KOl5zT=?R6mI7%c)m6Il^(2 z!1q8o?R^N%t%A}w>8RhN<9(BQ6;fJEX+LuuJKkf)M2}*At;DI7>RK7z&#)tB7d|h5 zOC&2u+^bjV4D_VYN$>q)cei*0s=#%abQTl*^2KA^Oaae$_f zaRe=*auJn_sa#CuepK#9<$+WlNab=WmlGLBob4el+iURN^5w6-Gdg zdoqFwjNkyzM2~)w(>j{jMoKqPx&`zgEXdh_SnQy552gDljpUPk$iOFir-4uQU>%=W zwxx13l_P1ZiCUKP$ymdwtTt2PWuWp|>J?6D1f`LDl4&HB<0-vPX%3|(+G;Y(WKT+? zau%fl!h9k3Y5;y)$i13Q==lI)0pv`oIYsGCN>31K6=)M{!?jXVsw0%yYx59(gqzp?!ibT6gf5KV_vyS8>*W{2G=&~ME2JG%pRCVCDf+En{^d&HtN z)xMoQ*XFGK0sHgzCh((>$T{{Rs9%e)7F)+0cg5M4Pz~=I_6l-^B-^VyZR<|kx}%MIpgTG0&Ua^&JZJ3L^a@rh z!2{=WC8cU!5cVl@RUvyq4-)5UPkLmk5^0SF;hxom(tjMsoa-(_NA76VQJatIkl2C zFNYqZxcb&gr04!LVt<oC-amq1TLY2H?c$4cphgU)+K8`K!)xqSfdVVnRzB+`S--pnwhLCxK zhHSYZv~?(vuMVa93M#)z&U)wHq&>e$d+tTE9%S|<=gJCt#w=uPct2sz$qI^7D>1cF zT`P~b0AnLpa4u3Mk`NviiKNf!Y-*)C53(N=p(jFlykF5DS$%P8C8k!gsg-!VE$EM| z5yP26#MDZ4tvudg#2~8+E^h(yBP%9Ot;EzyHnkFR=9sV^ z@!-J+TZi;lj{ertEknF55>qSHwepA#Uep^ijC0`m3L2w=j=ln;XM9ZTDu>6R5>^GZ zI!>*QQ>#m=)g|#-z=N)Sc#c#ez7`#)R>$E<(MnfBJQG=V;%m`yYIU4iT~e(siI)fux^m(fsYHA&I!>*Q!)t|Bx{BiEQHj`DbevjUon2C` zE{WF#9&~lZE2I+fwdgptI!>)FsaBVSZMhECWCcm+CQ7$Zx`Wa^ll604n96%y(~Es|>6h{t{soikMy2|At9 z_b5F^>9>@gpp+*kHrABdQmUZTkJ4C5O_ZiknoemPL%a&9oI`0W)x=ZUoob3G9Y|>z zrGqIQLg|~74x@AorDG|rq;vwMpHRA#(q)vcpmaE;BPg9l>2yjzqjWu`8z|jKsC5>8 zXTi13p){A$JWBH^Eugd=rR^#0KxrpRJ5yRnX?IF{P}+;qerEas;t!1iL+Lt7*HgNY(oK|Zp>!*y+bP{a=}t;_Q~C|1`zSqX zrX;HqR6ap_KSkwJR6axHGgLlD<#SYCOyygY{z2(&O8=zv2`Jx+e`d~k{+Tb%6KQU( zw1|}${u#{${+WOad`Q|_DeWTvO#DTnBT_p^f-_sr3rG~Rw6<2-PNmHk1ZUPD3hd01 z@QmSt@QnHbhmqpmM@p{p46Zwn_>i=C@~opZoCTR}SQ|*mN`%oskGpR0WU1cFwct8h z=Q3p|f6X z1>guc0nUI6;0m|_BETIG0}{Xk@C3X7Z@>qT0y01jC;(r;4+sF1fEL?K55xmTAPL9> zx&c{>@y2Bfd?yq*0h|QB1x^9q0jGg8z**pX;2dxsxB&bBTm*guE&)FQmw}&wE5KFY z8t@Bn9rzWv0sIEs1bzo@0e=9ufj@yez+K=T@E33&cmPxZ4}m8@HBbZK-k0eLbO-S4 zgTWm#j%5ymI{*f&dmL++4;(*OmotL^Jn3Lsp`HhD@vXj&;8{4{21EZD_WSj~2H+cD zFR&Ac!yL7S9iD(N1>k>x_W(LWE(D5z-oQMd6!;hz0Q3jSfI+}eU<|MVeR-l^a_mn< zeHySC=bu96OB|c)cw6Yv>x^DmDam|{;tJ1fLMu69@tn)dIcCmUk@XB|cfned#atBCVbMh9&ev|%VbN=Z#C)k=AkYNcGpAT^*yr~x$@3OVZtkWB&#q0j&c z4BzadmE!-3!szsX0SE^ofJh(;2mzerm{ssVAPD{gakk}Q4pf+9mgCIKtfjSX(2}z_ z;pJ3I=01Q!&ZBsS#SDP14BMKA?OTL%9v7T!c|22K@ZBTkIB)_uDd1tN71A>z`Tvp~ zW=`6}Q8$=5sYjaa2q${NBPi$6 zc;u>sCt#G~`5gFQ(7{wkD4vVa8j(orP|%^^JW2^yBO0!JObEW^#iKMtj_oNALygcd z%(Jx`XC5`_Q4LEC3ZYs`iLVA@Ak;udIr3%O5#R|xB5=3_ zXUdOpy_$SEJW(C1{T;GvEpY0A)a{4p@l+4*-XbxUvF~Kwn@0 zaIO=saKLS#8W{Kn&g#HO;4V-N*mlO%9B2g;0%gE1z`85O3uFPRBAhFM3BWAiQ(z15 zBX9-yqX+DJVvYbQ=Bz7F0jviO0^b7X0Ea$UH2`{`q(6rV9*91G7$5;i1KI-}fR(_w zGORLz^Mh~|1>DOqA3z|W1||X1fx&~Z-GJ4=4&Zm-+z?#pfjdBS1+HMgm%yMmkrPk_ zlnuw+0pTNYwFTY=wga3|Sp5J_fDRZ5Oav~E#_9mL13U)o#$crb_yB!@fxu88rxN1> z65rx5*+4hoKNFEJZ~|c7#`R`0he-trfT6%Vp!*bDk%9HVig)nlFfjLB{C{`AMqt`h z4wEqrSAO6Sa2yyi6Z-(*`9Ag?U<>dIkT;9Nya99tih+Z`P2di|nayFA0_T8>!25GJ z%p71Pu=E40q=1Wn-&|~aKnuhIuLD;;#0upj%n7gv*b3|d4g<%5D&Psw{A27_fE0)U zGJ)2>6yQT(K9G!kJSCrNEoI34u#~|)A@i2{ZP4}VeYhqtj>r|@0z7~Z2mlEn1grpS zKnBY}#4zLHB0}en7z!7i)TmV;KFR%|d1RMcQ0;hqqfG2PPxCnRw-hdR40}3Dj z2m(TYP(TTU0V+TXgae5{8jua#2L1%v0r)=_nD)RpU_9^^Fb$Xvya#*)EC6h9PC3^W zV**zKwtyD%ft$VV(q(95`eOMl7)*qk7 z)_vNs{7MnagL|>u#%vc~!Pf2Du)Ja*%NI;$c_(xGhSqHTvN@08X8Rr;%=*DB=j5_{ zui3wiIp0!q97l8eucKMJJK!89w*YsjZ{RDM!+2QHll2~1n~)Wa7KKoaZM(atY~z&9urD@b>P%z+d{{sKQymF&Ae6=(^!deHB-WJ7|jm zW_7^51aNsU_B`0^MZ7l9d$uRJklb}NcOA`LM|0QH-1RhfJo~N>Z_-| zdg`mEz6R>6!4-wf1fe=g^^_VY4W~4M(nw09K*?H0ZRW(E)Y%BVaM#7dwipPGrvg~B z@~{pcMDNKy!I|b$U@@>1SPrZJRsm~(b-;RHBd`hB0&E4g13Q47z;56hfSk1s00)6X zz+vDBa1=NJoC3}O=YSu8i@=Y-CEzFEXW%OE3veCy6}SQX2HXU00e=9ufj@Dc{JG`i zHhkt2^$mQRWr3Op=`my;#&ZYXZQ)@|%OLSFuE**X01u>R(~8 z0^fAmg|jY^ZbI`I(kguEg-BdTH`PK)FQL8 z8R;3&-S9n!79J?8C7g`=2ip5hv=E8d|A6R6DZhaz0@UxQH>s~t>xLd!`Bo5&u65%8k;UpU2Y&^26Cv|cTVmZl()Y*9COhoFvaJDMM zvC*B3wh68!ftW8D)>jZSc&k zeYX-W)4a#lK75>&96C#K<10uE(nnuPxABNvO63wW?MgL-_C)XGj=B)XDId>DScydF zK_d{ljmJ`QOqbFlyM)qScy_^q56erJMmI_Ki-|v5=whOM{*=|8;{UZ zDtDu_1e8ZLWc)-&&dls=kT#+v_T-M7w369jYowKEOBq*r8OFqNVqZe%g|v|Q;b9Cc zABb9aMu2TVIPntJlS#k*$@5|4O6WEop`}#rPq$cKO1JUI)+?oQiFr%n{SkN(x{XI@ zDV0mibU&V*l;Y_GAN__wBF|j9(x({RFeWb09r<@dMr6LpIQvnX?yw?T4o`nE$7Ji0 zy^8D|eL+i52pt642T=*_3-3PgCR?x%o*9riD27CMKX5X_Ug)d8IS+D8?N5FC(>=4y zOvwm`&^=}_vLaikkaBVqUMV6+A)z`-^^_VY4W~4M(nw09K*`agHgn=n>Ldc;BnIIm z3gILU;Up5_Bo^T$8sTg_aukv}ISN_M#v?}|TW8~u6#-jkw=fwE@f7 zcs(&*@+>_Y&jBmE@LZjXl_W3$$X|gk@j#c)!OC?BR^ZUhcgDH^_ykx8dC$`Ol?b(ja%Z%X)j%{4|jGC z=i!OQP~6#JjW~Y<;*UiRGcc#PTfO)in7_AR zQ;oP+#$Zg{@Pr1jR>QX#IqU~UW4(M0W9onPZeN072BIZUmswu74XRzfTsp{YHcMM*n z9Q#=``nLiWV}4H}=kH)Q9`oQ0-wnto207;-?n$&K=E1K3PduM;`qis6p1-}0^GvfJO%>iApShWfNs+Rg5tElchKAdol zN1?(&uvL5n5Qs?#2lIfS#c*CB0TBoS%0mL-CCVcpSVRmcYO8P!76q$P(F$Aw1gK&Z zHHrcXV&tJCZ-ru&s;zYORNuUZP_$7_H9GB7dl%;*uk#nszfeuv`#F!kT1Y=I9)@#1 zwB|bVI5eXji{Sv{J-CGHJZ*J~y8KApuceN6j`cZ%+^>&w=CNlNTKq+7HyLWEZCny| zQPc0?CThA8K7*~)GIldhlrZiVT8kdpPY-+oQ#gY*fJ+Muq~<{w3kCGv+l-FO7->Ub z7_HHsvtSq;p(Xaf6*Krt#?_#Z*4P9yX_3I4lol?Z!&MWGz~7;qxDwb)i*2S=?xr_K za(4HjMTT(Zd;`CFkXE849)LIKkyDMvX7rBg4^f-CbGA(GCHox;Z z=r7_Y0?x7R(^|iEA32j>9bB4NQ$9>Llw~4K{x(8^(=qh??m5)1J-QP~+j!3EQ zisYG+l)5{bWp`7v{d4%0d$xaqrBg{4Bt3WdeHVV>|D)d*`qsbAPm9ArI08rE82lN& z^!tACh5t>z?~*V4Z}~IOw_{~sZO8V7n%LXdrpf=Z-`B1QpRe7AmhQLofTag5J!I)e zmb#XHV(DQ^k63!t(&Lt%wDeO;Pg#1}(leHRZt0&bRat!PzEpbAA%BD4_wx_^Z;&3b z^t4ibO;fFA2Ixc3mqlt|9^4DRg6H7{xF7C=2jD?i0*}B_cnDTNEv$o0uoX7LEAT43 z39rL`*Z{j=C+vnj@D98SO;84VnSqcM$hEYarFoXdEiJQ@HHA*L#bgH*lO0e@c0e)N z0mWno6q92yIp)tXe~$TcZLeJOce9vorstWSXIt~klW(4U^W@u6^37jpo$4@=8PGdg5C0|J9h z-jI|7lk&!-93y}~S+r03U_ zO18JZ{Hmu$n*5TA)96e|9am%4v%PlJh*kcQ+OAQk?OEg|zxp>NwO#${**crNe)Vi` zf8{P)rPX%XlDcwAs=u^d{UoXWkW_nk(%WBG2HCn!c+&H$bh6dHlB%Di`bjdH_NUs* zE3fiby|i6YTNSU?mQ<@r>Z&W5mE5j!%T~Kd7IID9kBpdX?sP@Q7|GJ20bOsJlb71O zJDQ?ZGUlQL@&o;_np09;VYNp;e7f>_xt0amSbSN4`HuLNYi2q+GaVhI>@l_|Rh)9_ z5!h=wzp**ofrfc%hTiBM;Ci?i`{&R$lP3ze72n-(546SK4q8J9E`S!$5`G33LMs?a z>}co&T_78}!FA9FM!{9k8A=>q`%I@l*NGCY6TZuI<@Q~k>D&)8+aJk(y!a~yIEtb-~T2fu@-U=rL9{|W1T{FQN=bBDcj4NpU6`DK~LXAK2?~u>JYFGh{&A9V}x0>lV7o1Rf1#^ZA+~LOx+zHGm zE^tSf7D#Ia?lfiwr~C!(^q*KWeqzo1iB)a+HKcQXC7rY9OXckOQ&wu5cbwF=>Bl&C zD0xq6!OAy>b#Kl~z{)r06?he1gV%v|Z_W;Q8(973?1u9?(t~geJ=~nUfVFQwDqf$-}Wc;-WX0h{0hsDPn7h3TBT znyaKVm$Zka*I1fwX@RAMmKIss)6!m+7L#Uhg_y+^Lhp@PQN&wjy+fYGn{DqMv-d_> zZ@cf{{kGC7OZ9t5mCcU6JKUeWw}$oG;BcN=_wo#XDNnJdctU)hC*aJ9jO>}zkSE?p zZsiGsC*b-CJbhwEc)DE2v-Dqi`Yq<&Mq8fUmyhGgcswKXQCb^WfxVDt^?5vz28pTR zsWzXd*J}2Ei6`e9YE_5aN$tXt=MVJcBT4z|k)GW!!;=-`Jo)h)PaYfM$>9@pm3)Ho zeM9+wNx6GL-+?}-#O=sAGnVJ~SHU-klE6y%0G=t}4&Lp})9O&_RRwbg%X1m~G!!t3 z%<7Nz1lyj6z2sYmx5_<%H-lQ7AnsaZ0_H-yLf$}7iU5?sEieo2fqN^MCCxt?#{0k- zyobSa!aU;{y=Srh1_4^>8R!N5U^1+Re?V(mtOCZv{jeUk!dr0haQ=?K;9PGmSxO@62?_#k23j*|cN4>q^_wcy8mS_36`z7W43%>Te(dY^; z`b6Z{MO-H#$obM|q|ZsXyJ?FZST3A1&o8Lak5~cT{Z!B|3wzQF=#8*s6i*-SY4(YZ zmUaeh+GyU#!F>1>K7;PPcn5{d8bhDL6d2cwIq>d%$B1NWJ)W)b(iqNtcNtf*er^wr z+-woG;i$Wa9^k%AZAQ3<*=HQSsc?#YpG3Y7AH#RBa4h4#nD;ToKBvq*c$~fQ^sD6e z2$%<3;Ca{t$DuXnc6%5No8ce~8n1UGEl*Hl&i)_pOh(=Zr)|sM*wV3zD-xb{kk^}f zVK0EEVJ_T`@Ae5Q`33aQDMs5r8HHtV$+fgPoF-x!XXA2kcwh7s+cwm2?TIp4vBy9K za}EZ$J2+k$dwh>)F$|f^{C)Qh@?FYXw=1FT6h<;U2VcV4+Z6E}^*?eO?{}^vM}7kx z=&oU(p;Nie;i*Qhf>&+J656-aZE;Ld-?QJq+kMWB?snqe;H~KUFbOW^4PPIa0(ZgU zI}~*Vy;6YHo$JCh$p0nnK22LbB&YA#GHtr7G4$Jvh`$DFxmkteJ;ZHGdFRbyoIqV4{&FK< zMwZUz{s|Vtd(i%R?xpVZITdaZd-O4F8E?J%xvMyC>p9x;EIC%VFOv@CeHrHg`vgm5 zog;Fn`>myy&|*W~eD=CxE`0$HL!yLw5qpz+IeQMgi!;EiF0}GZ?vs|jL;27CidO4O z{payEb)L@|=FTU-VeT5XeT~l-k)?X?yqmW;&=t>cw}xEz&*!cs!do-sDPY1VTOJ^G z{5^~w&Z1pN-vZhZ3Zfc6SvHDF$^B-tKS7&!ueS7IzvcIGj$z&G9;R+LyPc@j*cxeT z(Pp3xaBn5<5P5ImOwCxxootNuL%w*6;?~ewH@h#A4sai{^eN(-FXC$WABBr3IaI)VuosTr=W|BVe@ds)Y9rl4v`S8# zEAahX!QkS>K4+Bs9{E+FjdXL-Mzg&#E^9O88|Ch2pQ-o;J-`_bQ(zrD1Q$NYH4K@B zoC*)X$Hab$Y{u^b-=U9v$mfi9C((`{<2eq630e{PDu0*8xEm?o7NyPm(hZ$kSeocT-S?M|5@ZgaNn!M7d$ia+8W$8&HgD<9nb)U?w5iu`{2C^dyp ztvq?ut$i(jah6dMaA7`t_ZWZ8e#@`s?)OAIyqsPx<6dhyzn!~t$>j{zj>qZG(X{jv z`kNT%2vzQ-)M#2ccc^d@{{*{^Z7G-bIHkFIg|wH*;jX^!M|W%f1fka z4Rh3zr}!HH`v&l)8YVrhEB!ul`R4#>evUZVy^wQrvRlh>huuI9&+r#`6;DQR@hX0~ zcb(%uIuTa;1~aOh1>E&USv#+gx-ekox|@UZlRe@IF#sUf74!w-=l%qujS%#Oe@M7NfP* zI*}{EiJ9Ng%9FIMbtbH0P1~$i!S<(Ztv_L{YudgjBZ7Ti+HPxhCP&gr;|8rDM$<7n zCS$x6$PF2t(zbWZtJ1c2%+6`sJ7$-(?H#jg+V+mgYF}nv=$NbkM$<9JC1bpm!3`PX z)3$fasE7fC8#%w%VRopKap?^OK&~J1I|#r03CHDNoC!Cu?uYb79gmdtb`aD(Pu#N_ko* zJuMHUJfWm##G#a@P15tj?v$rxXqwgis-4a8HFrvBhZdnJp_ZWuq4mivZ-1mMiR6|x zwue&}Vcu|MXhnU5+0~Vy(_13Uu&xZv-xgtpb!BLGLxdUDm7%uVBh0d{4AnJ8m|&j5C zy(jCqFSgiiA%_zFuzSQVja}8CMQC=Yey4Jt7Fx}}Gp8Abym4YS;|Hw!m8DsChe6s5qu5+X9b0xid;eQcq;<`4)J(|j zl=~~&OO{qn%icN_CL@q;Cx$FikmYhN$O--j0L56t#}YkSC_MtTje_oACYQL>8kBnT2aT}LaO6>me$Fi3pWNL zXY|FL14w;sr%NEp8@HjnZ%b*1(ptZ?wkxgeOl!N-T2oppOKZ7lty@})r?v95)+4Q5 zlh*heWU^FwX{|KSkrL}mLY+(w4P1rP*MvF;BKAF@F4(F`j7Z%L=pAAOR;RS3ZJlKu z)wY`gyTXy6&eAhy7M^i3ls2>UBGN@?f?V;Pg^L!=Y*pO~dH>8-vn8=>TJcUV#1jQ< zfO^;rTVN~v5nhB!7zg8_3MRlrm;{qy3fu-$;dZzKronWW0W+Z*X2EQ@Gvovp23InX zd|~iOmWkH;P#tG{2j7&9Z#Dk-&7SHvF!f)FQ}dAH j{4403=h^?C=?Z7Z#FI7umm_94R*(N}m##;BtLpy;3C)wy literal 0 HcmV?d00001 diff --git a/go/mysql/icuregex/internal/icudata/pnames.icu b/go/mysql/icuregex/internal/icudata/pnames.icu new file mode 100644 index 0000000000000000000000000000000000000000..c960dc00b4918971d770fe91ce61c5f33451f2a0 GIT binary patch literal 44272 zcmb5X37p*IegCh~8Uq26nLTX-HpVs>FlN0rzOaq=@Oo{p*Y@uEnziP205nwfVoFN79UDZPMT0 zFMsr08tKtKTVcxxCw7>3V= z;rqhyGhz7GVfc$MJnO9B`Im;_+A!P}hBt+w8-{m>;p1WW!7%(%7=9}Z|0N9P(lEeJgl}0-TQhb( zdA*|CNw{*mqP&^#r9MUZ458AmD5nT_j3~;(jDb^xPZR!>@EwHeK1G=&Tt@hNgj)&4 z`!Zp8KjBBn@7b>?Uncwj;dcpdI>3E|j}!h6VRcMVS{VbsKCCF436)C}B>~2cC`zA5 zcasL+CoTVNSCrR+Uxw-b48!)LLGg+(TpxzRVR$eMoiKcB82&*Rel-k#6o&s6hUXs( zo~u--2UroN2k58Pu!!cC@k)Gun1fTE&>;W#b61z1S|!Yf@NSi7za0i0w{q=a00v$+zIXm_kb^f zuYhlYm%+Eecft3-55SMWPrxhSr{M3w&%iIhYOoy)gPTDS%mDH1N5J#o1K^Y33*hVE z`{3u`Kfrt%uMJ!Rt_GXIPB0EMV1ZkK7}IY99{3>mBk*PLP4Hvz3-I6Ie3kYASAtDo z2pk4^Py-JGAAACQ9sCf8dGVh>WxSsU7J+48Ef@d?K?@znc6e3*HVs3|<0%1%3%GpyQW-E#L^Kf(O9!;FI7R;Ge)Oh~70sq?5t246Fw?gWJF} z;KSf=z}XC>C7=iF0OKGF9Pmc)6nG!_Bk&jCyWsD^Z@{?>z{TJiuo?7&aiD`~@E~|Q z_z?JK@PEO%4A@R^6}SP^z!Ts_@DtF-U^c+r;4$!N@FQ>*1AHYI1tstx_PB!c zcn|m*_%Y~Yl3fk9fW2TEJPcj{{{+_1;aflr+zXxrC&4GcFF^;Ba}#jE2f#PMKY>L| z*g-G`GT?UbF7OxNpFt;+c`LXP*x(NE2>3Yo4p5os>p>R05j+llAG`wo6?8xZHh`m` z0`3DJ1%C!!0bLM?UT`C@!9(Cl@P6Sc$5BwugFK0Z0wO|n31Wtf^z(e3e;0xeq zAhD7*0Xx7k;DX1&m%+Eec~>wG!FF&IxY+F7R$}3VaRx0$gx4{Q?eyBDfE{6MPDM1+=YZ zo`J2v1W$v{0_7TLBDf0lfeLs4d>DKcoZU^kgLPmW*x)hnLGWkbAAq`swGdncjsg=r z0X`0X0?z4S9D;-37VtjCT7p-Lf34tybu!M_7#1N;P73@!%)U@tfZvfw`O5O@;2AAAga7JM1}2>b*1PjJ>o#uQixE(KSB z9I0-ghZ2)+dV5_}u{3-}*!_9ogETn+}nK5z>t0SDX;-V8nk{v7-t z@GH=#BVNN$0~gK;neo&ui$FN5ELX`%@crOZ;1|%fgcAIl zNAL~uz6X8=ehoUIYf<@^C_k?sUKd;qHiKQ@Fvx=%xDPx5PJ)kt&x3D(AAp~O{{pWW zfL4N~U^UnZ_JCs`4=Ug`@MiEFct7|!_#*fQ_&)eY@SkA*4rm<^{Lv+ZtHBUB0Meig zX2AX632+j82z(Cw8TbzPDfoBLI!K*B59kMnKoLxV2f$O{B={f@YwRBpei3{f{0;ai zXxYhp0}H`&umS7=hky>M;CApJcoMuDd=z{Rd>i}({41D0M4y7oz#6ax>;$9WCQt+} zcs;luJOzC4hv2i|%iv}3x8PSGv5R_vC14Hc1;Zc>Cc)j{5%3&%ANVx*Cipq{chJ6@ z?*VJUHn0mEy#yLhcng>Y_k$)>)ivDU36tg!w>^JPBsB~6|K z?gVcI?*e}azRG>Al#74j{_l|YL-4Iv$$usG+?Lq=i}u0u|BkX&*0HEQ;(aH%{|~^Y z!HeLl;4i_;;QQdG;2*)SfV#gyFJ4Pr(2T`|f^K{!_T1Nn&)q@UK5!J|fS_F#;R$dj zcmO;GqIZk;zMXU*>t>bO(5u+pl_EE z-v9)Sn_K50^2J&ly-T#?t>oPW-UOy&Wl73DO5SI{SHM4j*N!r;!A7tT+z51VH~0kj zI!KJsMxY<0Sbw8;E$4ZUlJ^eqVemQdbC4RR&R_?~gU7&E!9Ro6gN#W~18y1@{* zn00$S`QjcGhQ+^jom$Y6In`e5vHRS%ik8*WT}D-RWvtW5 zWgJ~AooaWzUDcD<)mpOO+J3$ks5`HStTIX9fW*r@XH@!y;y z-g|cDR9kjk?yzp^&dTX;Rg_kvQhn()3-#%$Zn>iU$V(I^tb{vWZPTr+ zp2)1LcIr;qur#yKCflg3-KZLqx}GSnc4fb~3u$6k^pNhjE!n*87*9D1HN(tmmTei? zC7PMF%NfI>>RD5B-BYQTx4$Np&7Mlp5Jwh^XIE-gwpx?@boAo6-|?EVH)(m(m6O|! zpM0y+qOH=`7sidZx93C;A6#6Z;+kn#B1zxBy~EJU_JrZ>Y{?IlZ>-*SV&T+;s~@k4 z@9_s0nOdQq8dz-SjJ!d;qw@ZRvEm)ATD7=uzP-ursh&EwHa_{SZl%@A7MEy_mgQ@+ z^mfHItm=XGGGCodRC^}dG_$6Q`(`TosdF`ND(l^uSF=y$ys=UudnlJM`i>{Er!J^< z8QD^{sAo&3E@-oKr&*G9hb8C7O3s%hrI)v#l~}_lEGHR@ zXSEhJ=YUE_c*9OYztTuq)!REOx|P#&*(J6`!|9f6{j%ni-jS@T?y73{RMMSF%~-Wo z-C;y*oLW$fX3SLk46|ywmMp&`QQlHpWCv4B{CKQYT=Fme#&~gy;n@=>&o$>|PMwo| zv;LIf9kNw@Phof}r8~oNQVlmI)x4A(Dz=l;9ep~hSHy(g^YZq2Vx%M&Xt`G%PRUM7 z59aSVYHqjX-QbF8)?#-u?8=Ny;%fE|{jKiyL}s))n9#O67cuCZ8Jk%qhRn{CQ|%pS z&fBpqqBhqj?Y>Sqy^UZ!9+hLyPHJ}~MnwZ#Y z)vPz5dwZBY>oRxi51OrpRoaxQ6mM(Q%X&-Z=4`h<6l5;q7mV$oL0j7eX~=D8g|1wi znyzkm>8wQNRBQQLmnau8Xd7Sn(pha~%`K6yd0xi*TK43=+`BK*&nnl-R>dn#C#&5F zeO0B^$X4Ig%CJl1mOE=MtdI;Sr;&YE(oW=_oJbmvwqyrNKy^PkFkf zFi_lWj<}1mhEdd~WeYA7Get@1)5{Xp#!9NFUDBS>Sw)tnYUP=;%uCJn<dV_NXp?=` zu{>dR+wHQOS5O=GlyX_S;W#!kbEDB|6$<7|rO4xwxy!p;BP*$y-#fo}TXEdD$M)>0 zdD-sLdh@>On`#dv@^2|WoXX}OS)f@3-JN2X%*zg#ruByEQ?&)rGLm~he03ti4vrnf|NH2DL@&*4G(cZ}n75$}L`@W;f@LR91C5 ztYNH&S6?jpI~c{X)ml2VnprgpOTuT@Y4256CbiWaS%+F=uW8Azv$j?HW-d+X?y^gu z#jd{elBB)#(pIOYFFUVvgT2h&;tWqFjOEVSHb*OKYm<|gw(E|m%`EeN1Yc3C`adci zF{@Tf=^DG+Sz2o?RkdptRvkT{Ou3~AZTaMy1(r5x8(EgLHQoi*AKHOpDmp=)As zzgqKd&3J#6h3d^)v!%!DTk9R#=;byg*Z4P_d+lqky?%Y~hW?E^Htp>0rnqnI$hxiT z5^Md!2EVjExzV58nCS6OZ1Uf@S?%`k?D6kj>)#`OHn#rYhQk|=Zn~j+*P1;&d)Mw; zcVPb7bJk|p>DLz48{IdpN%v^Ub^f^bN=xRQrvDSi|7lg#s+rrW?Ha38E}?A;D(!ux z%@m*eC0Tdxo9&KlvzNVZCLRA}?<+~u|57Vo<-NivcY+4}scfL1c;8j^Ri^)K$Nyd{ zpYMIQamV-M9p66Bdf_Wh+8K1cp{Yc1xx2Q`qpiI(VRW0SzSY=cU%!A}s@kF&m#R#e zA-i)@cO1B|>$}PsJtvxRYlmx_45U5Xi}iw71YEtsstvJUV|l%{H&#)#eC}TR!b*-g znhnTxBHv?#zuVhxJ7ulfyTs8X>97C%nnb?W*qg9dy2*0?g_d9-d7W7K`V-c*)rCbC zOTAWwG+wLUuRoRdCd^9`dbgwM8;o7{rUiLwE}QI<=BjV1SA9cRgKG)yX zcEZRRv+Js!UOTyOvPIjX-(h(BUC(TFYZd?X3B!Mfv$$C>pl~6Du-alwa8GWFvrtM3 zdHUP+A8Ci)PFKATc7_bG|G|!Gu~a*5%wB(%alLWOezS9tMz7mWRkL!i=iXm*MpgR@ zHM1_i#c3d9mxyWLIIDks-=h83M%yC7Qr+}Tu8o86*nS7v%#{}v`v!Oyg@x@o@N zVvbf1P2N1+S<|cu(R{bn>vmgG_bYILW&aOa4ZZ4rBIOwVcT}R5S|Ye(!Fkc>Tr&0&#;#!411`kKx|uF1J+PX1zfZ-sr8GCwDwU@1B}>az-`xmorYQp=CGAigeX0($!@n?9umz|1h-1 zbQ9We9^T3^^@5h25o}J48w#^MORQXjk#Ko;LYuu(Zx_rh5w)<+**4RL1Y*y2wu#S} zUDle+;;r*L za+(Ey;yo+s?R_VzxBs1an)hD)#`5zOZ;#t1M(q;QWvs-iID1yoNoXT(E6c-@N>!BB z7Yo;=;v#h+vPZN&Z6}au90?bHB5Ars5cr(7T$eGtn4femUb? zE!vIc>5A9;s`Y4x?U-^|o(dVinDzg&jhE_6Abe7I4%uVgzOW&m`44IL7ri0FJ6>+p zOf!)mEOjt6X!es=bOf_z_KH?!Q^NJfCp#Gc8r0VxPn6f!Ivvd|LL2>e$$_C+{_iEU zQM=8l7BzZN%t5W14LD%e|M!HmdXk}G`7^CrPV>KoM!;$-YNodTV$I1GH6v?D66pH> z(Uucq`!{Df*E$1kj+XLA8clV)D{32Wq)jxBKXRdF(zz9_Xlj#&|8Ls%;+^I~ZHAuB zYEI7ezp55Dn!9TqjEJIE^Z&I=n<^F|ody42I?6MSZt4E76WT*|La$n#f=d)b9tLie zn7IC1Is$RpbK=y7TsQ_k=gCB2o6+W0ZP$A@BhtHYiq{f=LB)vSUXDYNt1sStM^X)78f!+(3yxkr#KeZGCKZ91M-^D;B- zn#F>9M35PV?#DC!Kk02^w$tk(k;si&9R=Iu-4l|iTo6sp*$ZgZpSKB#KVfdEE<~UR zXSx4iM>yS+w*QX_bHL?Wyc-uZ+WtnA9hNcMqIu8iCx;D~{fChQ&#Ek_c*ouK)h<0- zqnaHR4XITVAK7cozX_lD z5S$`kxuvqOE;zaVp9PcHcd7ZyXCp=oxFglf%tDAoIC^F;X@iHeX3t+hmy`=Os*>6E zWKq>N>$~l)XqMmJX;z9FzwvKEDwmZ=XzOF;DYLRf-XL@x(U82UxWDuU^L{IlTW54I z)L>KuLrV|-2|VLOD^tdMe|yz1b=QBt7`_HY)2PN zmE;C))dn(4_2d-G$!lSQ4HP^6o7Bvf!fvYz9#az|cJ{IkttvnIpF7KvRWfFG7CwK| z#EU~i!+Jr>YSV^?~~W}B@rGqj0(@etH7;<88etxYhP;bH>O)i>p zYb+TBRvOpe3nx?VnPfJy4*DNU+Bb@>;0Yhi_@82a^cUx8+l~EY%f7e9YB>cD;lI-9 z*en4>!++pn=961y2??a6*=|TlvCgm@Z=GQJ|Ls}TYpd&~PRz7HsSV$2*GjB4&nNWZ zO0tOJDx|y-vsR)J{Zi7H9W=a=l6Qaw5Oqi4K})P3dlR~3tGqvrzT5v(?pL+hw#@9t zh=RyOs%H6pscGYhE+^zRpKKL%e(|EJJ!zEc^J+)I`&jYh-9}4pxa=LYyiBD{EQD7^ zgYl|F_G!H<(r_elhum-?pFHnk!4fo9)!FmfblPDF;v&>pkZeh@-KQ^NdcvKEi7O`6 zrxQ6?11VNY4O2x+6nmU@D#yJ0<2JdPeO9QZtZnY1_+qelp1^ zFa>*2a{W=5PQ%+#Yqf0K&!%dow_D9_nYcM7-|S|+uF%pMgYTc2(8{*o+sgIEIzJsw zq=k9Iv5PP!GurIMZK&m@HfSf$&%FC+_PwifzxQKQZ0{>Sj!awaC>l0Q=FL(6+#9u} ze=iN^-NRD(rOfPxV4!>V@OY8zgv*BGa{WK=$eOmR=Tz-j_Icx?$fQ9}!rp$q{;bbO ztHiw#vvaCVboadr8e-L{)I`Y}SQ0L+40ISLRzZJ2+g+G66PfNAl!Zoic7<37EdNJs zjyaQCGGl2jb;;H&-4Lxgn^-bK4{KJjz8EC(=vH33sEJLSU4FLp!mk*>-c7Fex0CHU z>ORZ=qlETowoMp+5`}fqtn+6vVd$kqd7ae}GLE;m38~e4LriM*-yoPN!96ry?E3$M z#;@$X!D)qGetj$Q^2Qc@clnMA0w)}wcSpT}?x3+-H2>?F*=w`2y+Lc!^kG_96ufUN zh*o)RiCA-`nlV~XzmdrIIg1;bPdTvYtOFVUAcLQ3U5lJzEXzHwdwoT3FN|5H(iJ|W zlF0i=19N`u@9%`K5X_19wK{?QS}SYs?AeP7tk@yP=uf5Wvj5*_+sEykVd1 zr{}C<{?nTGlj6zOP9#g-&Ux5;ygMB4S=amh+7e4@66%&x*W0;(l`>ek&(=%&tdgH? zEzGVi&h{~u7%kq8ME(gQkr~K!h1ONq+b6o+Pt$SGOmV}w0k;>duaw4xt#lhy!u7}K z#IyDDhCP@Y%z*kbTSCHG6fUbXnOynJR1kUnLwkEjMi7 z*@bO9UB5++?cYY#@qgQ*Z!;dUz2mixifPaAo*yRMTac^+4s@HGb02|89SBMLN6^Cz zv|&Z7`p0;;b?0QK?!pf{wb^SHM59QIpzi#7DX!i{Bl$mA&=_s)1tAVTf|y!H4QG2F z2pRYfq#SK_(K+RGd9yX-o|yFZOtsRb{V6@y!?&7iRc(7_gPq8%w37N#X)oT8bk?<^ zWVlu>Z#+I&OJufIw!!FKj}FY-lrqgt3Fo>R^G&}FGh=moo1jv?*(3U+qVuiw2sPd9 zLXn$WlQOES#Y?VHwe{$HlkmL^lNCwpQfy}8XUp20tf^m~va*+}#%6oKNouRwrE2%; zq_r}onO9);ylS4dIX_&y*{V!kDe8XJr6^vFLJfIr*=1eP+J5V39I)M_kven&mmc7zGTv>;bBVdqsq_k{8N2465hu18q zt6eu`$4Upw2QH3mEWtu~dBWOW5$v8Y!vtAEQ6-d4dTy!ppzZCfoRe?Q-(VcI_c_bm zU3INSzqTs7H@8#YbDFk#tD0MDUTd#!QdD1^sN7aiZnXL;!*08jSPutk->U*8 z@P?SmXWvD-7311=<)6>rZ+Lr5@5b^x?Uw8s{Tl0PyCblqqec#&$7&@={uN!30DK@} ztVY*^Y}tD;lP31W8CC zYZr-SB-CnOmmSD?x9AsIf}w$;3QBm*d~1_6Xpgu%Y8N^&sy22>Yt=EY?@L+NuzzSq1M<|vlAM`cv(Hd-;guD!@D#!7Z#R79)^XyjU@&3f^5)yeAc zK9Vl)C^;*mdX|HNEP+FlEo9aYD{$+)uOoJcDQ&ro!495aK$r@dxqoL z?pf7Ks~e|=XF8E5vQyfWy};3g?S`m-umi?lk;9Zxc=3yclcU9xM@uK4Hcx)Itn${A+l*v+EMaJU6Kz62=a2J>-0E_xUMl*-RMFqlf@rX%xO)nZmFkRtAUWmlO<4X2jp*-dM|G4d z`uk}xf3(#mI%aLQw^s&_?{->sx9IN{vff|{ecnz~Z$(w{j)Zw78VBpiR=rYuTdQ6z zJ`E8m26w(416sBCY^eR1Z>_S{IXAnRY6`1CRxPZydT3(*7H|`6-^~`0G|*D6=4Ucn zCi={zNx=;!nKxpl_Gxtg81+0L8h=9>LbnMY2Yp*GkaldB1E^muURS=+TEsFOi4%#^ zl@)%fcNmt1D#p((I-%KCQ7_5?$I4M0uu#H7d&%3A$nT_IcZqL)Af=Uu5vhy&G%Mqc z=e&x(1~F+H^I@T|4d`VtVy^8#ZA0Hbd~dS2R?TlQDpY%=p3Gm4-@yv(hL{!9e7Cj! zcuLFrM^a3HL#?)L`G=F%vV^|7h7(r}G14uGr)1=aeoLZw1^s#z-?*xicQ}Syyyk3c zwYAFGRy|Z(y1=$3fa(bx+588{^aNM)d7cV#LxeiSBJbjyagFOhT z8Vvl3c6Llxad6Ua=(0jTs~yS8&>W(iu(vtGi}buP4doFUy1l#cd}t*1Bx@rGd$7i~ zwXb+%6ZzkWfz61}R4r_^L2k?&_A)bqNY>xW0EWW9sB_nku0 z+?cBAn^a>NoPKAt4lnPDgrXJmdM?+l$Xr#rn%SzApq80!ku-O>EiaZA+?i}<^i*F{Y;Xg*IU>^}mp;_&K!g*`(oT z&n|8&t}}+Ld+90L|2%XG3ut))6KDw`D8Jj`ch9#9vni^*t~X+VyaLRK3Y#F3O!c4 zoXuk4bslUJ3ad9Af3fYj_dUn^u{+;*5Sr=@*Sy+H!W?wwWp&+88>=S#TP(;nWFcuk z-iZXLVa$A+n%hzsW?=e{6f+pmv}`ixy<5Kf-CdD&(XXh5&Ccjd(neK_B;tQ4k-N!aGcSS zSuHR`Su~GtC5pVv_HUxuK5lt;*(Vc~v&_ZjfU_GTX57qnYs$&7WMTM!y>^@KjpN?7 z%^b0Aw$Yacma!Y?z+=)Dc1$(=6BYl~N&mLww10zI+-B~_k=C1KSO|;UY{MeACt3F1 z+8!9?-ik}9e{_Cned&;E)v9My`=dlHthT+w)cJ5S0YlmZhO}OElSiuehK4l%C22@oB@JoM3Om}-)9h&XNIRPU$@rV&x_pj3Xb(Bn>K(OvXIQ_>N#|&rFuxte71HW~4fGu*m!B*g z!l{fAMzeZvqiNrk(hc8DnN{y3Rx86lDdup&@c!6L&6E$$w>DZm&W)}qc%6fZ+(yHj ziOfec9&0HrFiv;B4>`s1-`6l)`5!E9`ycBJtxNvL>eeNUYB}%CU3G)fn^T(Y-#Fh| zWUaF|R<>1B7zL(NIc-nM(b1mirvISUQ}hOm6xP(womE|i@#0Wuynq)K#*5*Fwo^24 zDMC*dy%>)8y|H#*KCup&mVuG7QN+MEHtv-9O+JeTg;2$uSTVqN7YyQOk zqA%TcwwT8QB_|E^@_j%4kP=rYGLcj*v9CW$oARH7sucV^75~$zQuQr(@p%0P zdWNAk(L%ct*19Ux$`il#`sUlOMgQvWs`|U7a(yb}{~osVzL=8S|M&n3_Z_K>Gnmq+ zc8conMuBMFG{=%sRde~`#H-p;ULV*}P9(8&b-+%+1ux6JctOrPp?fbD=IM8u&)D9e z;~m6&vc(#~2)?}5+a^pXy)16d^*9$+jq4j`6#pa4=aN6SpEK%&fnTmiS27|Df!to#(+im`_7xL@v@8^ymZjs6#LQu~fVDiQzh#bXG}K z+tra$&i~{2#ofgr`f+j+hSG(9sNtZ&BWgk={w`sfKg}fY90u;PiZ{JqwJvWH8oahp(}zuxN=it}FdSKpa*y?X=o_`S@#mGB+L6)k3;_13C4IJvwFH((gzvc2Ncgg!pG zQgCutb_HroQ@^S+bjrH|CwzEmapSUtw$#SQ73DKx98RrEFI$8*0U<>eqsm$p`rsz* zrA-O7+-+?R%9bP0XY>`qKljYJ=Q*1u&!1d7edUSN)I>kBle5y@UtJ@lrcHCDrcH_R zwUq^RXUuEOCFT};cO_BT;I!*vr^AMoiTqWQtCH1guq^7%((JIFD0 z47j({9-pR~$=a0xPk&XyM5!d+7U;sRZpp6K2P*luAb#QUVuU}vlOt!t)RorG>y&ybU^}W;{Kld%xq#d+U zLSL4@wk5N{SZQ8iUBz}RbDf0&=E_#&!408;Pu16(n`*0qPrkZ+N{FhP^4I6L7eqaS7fj z`qKHfZtr$(tmY;!He8ed($8Bs7jIpFTQQEbwQT8X>BxLd%C2<7%;eNekC_Rao3Cp} zLt>YEM0Ubl9b{fB+?lTzm-SubOMl-@nN{YED07wuKe)8%2bYU`SBT5X`VX$Uza+^`Ju=7aR+pTH=|KA{{<1dF1%qeIXu*zEcVjTxy2DV!? z#P~iOY*oRw77p8^;`14y`0+WVn}W`~fi}9Wg3TP32eRWFOxVMPN`|poxQ<8DQqk3HgXepmM<1h7mjp6SM0e6%VT42(T>H|B(gHS0(HgG=j_Dk z2hGSX&HHr5dm)S4h-Y_NF1u%}>Xqk|3Z)@-UEJt;eNzijZiYYV7p6O=N;`Y+G;>43 zgu?6i*5uoaX+5`fA=#MQ(7kEbK@#+$*zMqKN@9K3mc0<^tX|w3d{OTD+;D!hc+6NV zyyH+q+fs9)=hmWi$kF*|wXRt}H29`97h})Ei%W?0LPKyx=Om(?R@9ay3}jhtNwI8% zs-!>*d^uZQYAaM{V|B20SqH9~Xr?Z|q(f+9Q0*t2<+T-;vVlVw{Lm#PoE}U)@}?Ex zJx=X3OiQVKk`3fgXh`y7W=niJ7{Hd&BDTt&+KnS zzBEJNoE0d-XO)H>;*=7ptA5B455EQYzv%YtXOlpv0_Et zSh39V4BLC4@;}|bVXL^Y*7&X1d}{yq9c=0zcmIPe$?T%5%F>Qt)8B8!oppQ0-*BMB zQ1-vKvmg5JsnUO6!Y1F}a#2pRZ}9n_Ra$cIQOpl;=1Pr^)}7BOA6A;|9q&q19+g!1 zl{*qG@Y=g8he8vDY_4G7Yk}G0BTCHfAu8OgWuxx-7e1f&b{byJ^k&K@Uzo=ps98n# z@37w0FoA50o-dj*cz!RwC)Li-6;L*AvNZcb!srPsBHv-(aI;NBe()LWBC;)?I~(Ij zFUFBIwH3EC8%M6lZpA#(XI{}J%p=bUl1Za{K#5yO7&+O&ocBbtjl{o$20Gh1Y>nDC zyNrltONksFmkMjix9$+E3xi#sVvfwdpdHC_U!E_WYfBLw z%%}`!*H%0&^!?JZ@*btxw8ChJ8&}?=sJ+5}LXlVLMWM_aQATWKP#F|@Xl33}<*4p* zWZXFWe-w3Ol)`FyG}_CfsQV5n`=Ih{|54PTUdo3wi*m{6aQ_}kIK zpAwSD9EN6KjH!9+~I~5^H5*ZIDx0Awnb;5{Hp;}tAhAg%HpxVftN~Y1gC1D}t}-EO&=b_4N7jI=ERi+n32M+IYrs{O z$QtwnHRzEw;3`XG4LA!&CS(nIf*P!qHQ*{r*2)@il?hpcwLuNm${KK$C9(!6OXiKv z$xM%mRY6gCbo_~ZL5lp*Fh9I+SP5P^s)QN6)JZ%&s!EXJUA#%=hxZNiDFe3Pi-MXf z>OgOl5!P3Y&oS)Y`{tPD?t#r?T49rgUr2h zU!$JmC1@KJc~J*;DFb?5Z0Aewina=-``JsK-VZOpV+ZsKcT~cyfTRsg0fFm+qVAZk zDnKwj4qb(}NP5S#GKlS-r_^^1k{51%Rn(nMx)0^MvQuo*$G0d*iEQz#dT#JgdHXOX z&_;nwsiFyDA3ww{P2Ez$K;1Rauk0G22EiEx3K=xxKzdBkg^ zSnTnnIJBTE>R#+-ioB>J{WzLJ5*rSuP5p3SFN9k;fQTSjowQ7=io6Q<2c_lkSOv~aIV|>^ zr-QhZP!#ouXlX97rY1Eev%G(l#j)bCGd3Z%k@5;#FKzf&qkyol`?uq5HS0uQzKXzw5b5A{22wQScj0Ia0gCQcu z@_wzF9-FZg!Ya;kBF;AZAph`KJv-hgsv=ztIB6uqwt^C&9AJcSkYX|^!R}@z3Qdud z4Z?Kn-xV}XeFBnocvxXPh;78_!;I{7eawja4QcY7he|Vr0|7SH$CLP`N#F&pYEXIL0EZ*W6rnbmy+%8I7;0 z|KxX-#eO)5+jrwVjUP4V$9^z==KGtnW49kWy!%X#Z^~}muGA}_sF`fI(1_ z9+ZC9aK&SL#hM|=F=BQ&zWYX$K0DYe-1w5gv0cibfer-{C-x(!cLf`g#b@lA^BMJ` zQ4Uy>tV+Gk^`iZ-4s&Z&FU*)FrB7_-p@w}_FdZf&*P;ZIY8Yx@DJ?QcuYtj-D6x3B z+cwIgmg087Bcyx7%z#CJ>+6k|_RTHrV-erqE4hm>47aGT{D#zrK`My6tBA!-w&UCa z!S~eNHN~h4q$&EOo-G@n$*@!SwZpG)P4`-Y0*Sp5kk=VUIyo!#h}w&aLFlR(Dq#f1 zMtFF@+sJ@B#E1`vlA@0B3@xjKfjVB)5QLO4fLEI&DYinvvelz%A6uXo%kkVae76(qt7gd$&XaLj zpTp8dmb$ONUPvXls-rcm=1Oo?_Y`sZQGzRD%nJ5DDveltFxU5v(E{NJ7u+|7f`N%i&q!`IB2ycA|!JG_rU;n7GPfU3F zL2&eP>lj*$`fgtqIOz;4dN|zVshpN0e8wOpeKz7vxQfA^krD2BL_ zn9t3KD7v5*g=s^O3xfJ@gs6qo){x5517a6munDixl;NAAI*ONsaXFmh(?J~yP5Vkc z!n*_e2X7A}G;&35n4yKkh?HYt|Ko&3@SV)hV2fBtX83|k%@Q9Xg8GBxJdGX}o2vC* zrqYcr7he!AKk^Hp66n^0_6%OCh+CT8ACv}bp(u=qSiSY!;I-noDX}_=7oPb(MLlZJ zfP&6p22h$3qA2Q7QBseq0kMr}ZsX^)c%ew=hhFzC4d0v&ZrFeDw0rgkM<%2X3K6mK zdm%B$({qw7`w#ZQ>&p`bU$XuL2`WJ(ogdjDWdq1hzHgcPSG z7XHYIE4H|gSUuf%GnhRf$+Vje}P~L88>5iDw^RLtR+&z6TDGWQ|``?!&ejpsjxo#%v^ex zJV!u@Mb(|Q*i)*60j!HFyfxulf>7<733t1t_ljMC%A7RRH6j6OaD%)yWuseZkXWH) zsgwSQ@Cq{9c#ddNF{k=oWv;~L6s^d{p5#y$0AKy+z+DM;@x2D{iB9nEw(qcTd&GhPk(Y5sClhjYmhH`MQKYJoG|%O?#R zVWJtx_^||naUKnR zp@?K0M$|*$F0^p1pOeI1YI8h2GIbxae=sVdNKE|fpDT6t$8{+Kb4v#hIEyS;@codO zFd~{HbQ%|QCyF9Yh8~2-kx{IXS)c0l!YJo-M!p|y7P4_6J&NKuo)ABghcXEo%g!13 zosAd*WhIMEIt=Jk1PR=)>CrG!*UE_T3#qOzvm`??oFxDqg+TMounEyRUB6i@Nm?yD zSxbpUS#f5xVtQDJJz+#0)x`-?=&)sgh5~hhywqtz#NtHMAWDl)z@5l;MJLDziYONy z)9{Qq)Jh2h%nYcDEVLIgpj2o$D3t-iad86W1X8gK)8pb`AS`^g*g}sPQUp=vn1&Wc zY)asxqL?ZaD01FM@<&}wawa88u&PQ+gP3t>ey~hM5k@l#eEG0?tiEmy;|s>3!G*!h zDYhsO#Z{F`|8UeqY;m9uK0`@NJ0@S#(2&I{CL}(QWF*8CO7$DWTE3f;4dgJZMbKYRd8DL2kYW# z8IppH*)_og3yzkpi8&(!bw?pMg(N*xKZXPg)0r|)SF!{P&|K9^ zAOXavbzMlW%hEzbV7+~{f;yd##E1U>U3`GZ?!^~T^jj1%vQbDdvc$k--=^?b{@*>z_M`+=rP#xpoXQsP6TN9^%Sfa63%0W?I zFPp~9xK@G-39bw-*TvyVD+=397-My*W#yq8NZG-u8>}>^WuZ(H>d^F#3aZd(iDwZ% zLuNBmDw?sO42db814T)lzOrJ~g2OnJdPLnpjo25VMB!;FQQ1p#)uORy;)o&+vl4zM zGKAZ-IRy<)QwAh!!7*y&M6noOh1IZGo4-Ryc2W|;d=*8+60EWM8<2r8A_zmY=ROu? zP+$~D?+FgmW6;D?lI4{gF5}B#K!B!232{2GqtrhTC5JkK&?e8zQyMWzqesMfkWH&b zbB??T=9puQBHma;u^5Rzw;@ikgdGVr#%IdIx1z`yp6MY>w*}0#u>?hIcE$>!lOM&% ztfl+0{tan0vKw)AzbiL2vQ7m{%-l4yLY{3YG*;4LDjiigSpq%5tCnD@%84UPF;xX& zWV@nC;3gT%kvtaKvQEpF0)MCj>2KCZ)N^6!9dSA_9DNX~hnc{F6eH&<@wnPsw9AUT zz!~C}B)0Z7ErDmwt=qihERXD7O_Mw#q&xn1LdpQ%p>a4^dLTR;34ry#_ zwUjVGRmY(jVz`H)97#jbfunLTHBzz_Uu}_TlA>PiCZ52C0NevOx8<*&US{p(}f`HWvql2q#_6# zmPPrxx^=OsgOO=9A=t7;?Q_`?k8(s@(RyGm73`=C!lX1ZARY@faZnP^k-7x2h~h%@ zzu6oH@zNEnMzH2a(fW$J7uqazNjt^yr+iR6g>{eZznJyJl|^huoEFXc6@;M$Tz+w2 z1#kM?W4u)Eu1}B2Q_hvsGQ^~C(b_~7&ZZQy4>~SHA91alnxmyc+MVkZl=7M-PG$_h zqB+ID7mjrZtiib}%z)-}D7=h`b0O>WBoJGg^H>l{qJe0pAS44~9`VY;sM(ZcrO4sY zDbHqQooK}6TpP0sQuqr*fgnyJ2azZ|6Q6w={X7HPecC-N!!uGki9*&KdOY+|h{x5D z8AB6i!lDt;#G$a^;_xcj>OQg2hgN8At25_@CXr|qDuGobN(NFUBP|#a!Mv;cgGoag zV&Cs5t&eKy9U(_%JyxIfhc)~4fGi;}D#YK2a_lm>B zu+TQ*@Fqf1PY2&QCT*y2DW~Vbk+?Nk;V4qCZ}h#anyApkA@7nD$Wz|yjd8}ph?WR( zBP1dXTCJ!@b@7)1*a;wmIB#CJMnN=!HibtK3R^q9SDxty)r}>ZWu(5kt4Do;3Kl2o zWa3e@{^&Y`20^RGgHp&>Jtw_$ZYfPKrq4ln>b6|7!l?XomlYkVN}!J0Ra=plx~hbs z^#4y;B2*5CkbcEM9_(@mBkD29X<0L&b*woF{}t0O2j)IOiTfk*rryG=S`iB{^uLd} zp*b2b#IMuVLFBDw%?cmToKc@Id$f`~!nZ-nPS2qSr-b~^1=@jwTX4{W5|69dY*u*a z1SijlaT+>j^#|UGIbl}J#=OzT`EyN53vP4he8q)dlI97GrzlxcEj&tyAQ=7?ey_Bk z15o-6ak8DF6Kxs9feqk*jZ&5*rh7$R@*>tU$SMM|Qqi9$(Xxv@ZD>c}MEDJ-+Hey- zb4utpms1Q9Ef8%%xWz4HqAerBFI*EIEeam!q%%}o9-*UH8Kq^cYz8Dvaf!8z9e0#) z@pFl`1YSQGg}-AU5*5LY;+^CYYY{qQB~lzKlOok}*uZ^JUNGarDPU0F!C|oJUZDz4 zQ-GALj=)pMQ}~dsmEP#kK1Cg3oeQnOips{H$QUd-FZu@caPWAwfcZD{Sn9_`SNz2; z2AIVJLB^CP?J*5&4M+4)mrthI@S$&H!p%gk&W%i5&?3cu;AFwC?a;|H%Ghg(VbJVC zxmOPrK0-MH$AwO2FMfB(u=R|Vk(?M;;$)z30axVTuHsv%pL8gU@>RA$g^s16@9VL^ z*HHMw*F9H{@iQdDrmV^ty;)oO8Ha)Ek&?v&$)w%#j689kA5nXb3xqI=hAtWC@2NHW zKbkEM`(2!ta5HZ*N9@O0PVCK{lPh0s$%Fakltdrd^pr8-1z0yOv(EDSW{xIxTUhld zrS8=7iU1MD6;W}8nJhA~`0cn}6r)j`MF`tT(U4X?p{4gB4pJg>)FB=3QGUa_1Vkjf zH;Rx&?d1jfnuomd!ad@P0oI1TME@-5P0h$}@ zro6z>6B8a%j41xA2Y8muZ6?)XBMU+(>Gn;H}QsXkG(d>A=*1JIT z2MsANNbggycl`V)Um+kOY+w+FKZe3U zm}ql8Da?sKWv`fojSmvZc(Is)v0^zPW9fbHKy#AuUo#tHrD7Jxk_R}cGhQsy@f$`t z-ZOrKOvi5sW^?S3;q-3GDRa(f{3)0sMNa%VV!FnDBWG_sE%hgnf)&@^<^d>Ya?|aM zwx-9QKDLA%Q}j*KZByt_W|}GzUPmPX@s3i0V8`QBCG_o2qbH*t@l>pEcf5>5tZ10+ z0FB>?R8!%c(&plkLj1QP75nW7n<5$y7OAGfIi+)op?R_23JTarj>gMK#EQm{_!>8g zRIG3uJ#V8>q~{td`0A%6LI{( z3Zza{>#UNLjMRgIAC8zM_{ngKYj6lFd=7RNc;1kif*DZc4yk%N+Wb||5L^E1C%$r6 zMke5M`*7BUuRd}Fj$a<>2;a3UE=`C%RQ12f0MFHfv6G*q`|uql*&8N%#fd7U*`6H~ zv!l_0K=tjR&BNnV)8Whd@!EiKh6xmYT2UsOEl+23#b(I5C>e+V`1WHc#qM6vap|ve zwkS&RBkUv0ni{OP^yk_+jFXZ$;9%%zY(H|`u^pLjM8TjgIe*-)gE($1BCd)J-xf#3 z22!yws|=%`5OTpFho!(*jY(-mY+4mxJwj&%XUw9N*ncK2ejnSk3C+;5=J0zQ4 za^PTNb1_^Mo-oM0oWY7XEe~NOZ)aSL7~3zUAu2p}G;H@!X9S~iSUDE%8CFMz5f#xr zh%Zpo!@HHkoarOSj;Q@mkHUX-XsyD3b@#Bse|7h`!hd!5A%*{H|Nf|RwEFq3ju~DnBTTARDi0@6{IVcUIO-#a&%A4dCGoDN}(8-u}#SC?Ik?QMi)j}0R zY7#xU+$ScrXm#CLpz}s55BFF}FYUUIHeQY4zBZxzo^WD&#n?zzi|r;8R7sE~N#zP6 z#@;LvY-$U8g#?>3;u|xVs6s~<>>v}$I#T#ThzNXcxxSxjSi`xQix5Jp7BMC>q!vYU zgTj)Qj!pHgS6D-NLVXk2z5+r_xN}SRrb{KBhy+$lXnwfujSXE+w5v-P`x?8~Fh0cg z;b~}Iq|#T21ZHDv9Lxdgz+ji2*xV*`6dAe0O-0>=OD3Nu^zpG>XUGkPfr6AVGF~v7 z9lM*ox|^K3d41^FE%)d!C4-GRYJY|P5B*+gGrWtTCc?#7*rNkVEWCJyeF#s6y6-G* z1V}}pi!CA%q)9k24Ks;_3vSF0cgQKle5m(1SSEuLHKD-xp~jq1g+=T%4^APzL~fi& zWG1$rX3s~B_H1&^9M3a78h(&RDxBufDX zGfD3mw0g9XxQO{f$B2utgRoIuTtu`eqAWaIaR)l1*AX*^DIzXnv3AAC=P_KXBFe*u z%BNYpg~4;K4c|>|2k&^6=V>%9-6vMoP>%E({U}hFdgtnP|lY zJw0T&74+j=)FW zxsVriFTR0vM$AP}ED~tI=KB5N!RJ;R?tA_CBh|f<_6RmeUerBtH##9@2-Ya_Gr^sd zK{eOKFLc_Y3?<|oKMV1TNxGGbx}QIWaoUeVw?X<9x*KyD9oHs>)j9NrRF9S6{K<>-wW0BwU2OPUFpb#}Id7rX4(l|>LuMp{CN}0)Bb-^{9xW^;xwz+$@b{SG z^pS@-?iCkxca3+;i*WB@k_!Kxk+_w-$ee0&{F>wNCEU1ZcHz+_ja0+~!~bFabUQ4W zU_6J++7$Z^&g5Uh0&GuE-N5xpIIKW_L$8(ILNUW6I^FL}xD_+XBEKf#0s;|~?jX_+ zq~QdDOHa1huSa;^G~91+%!qksl<4fbiv|Qt^h@LtvCnO|I6!bSg6cAIc5CbqmlrY2 zbTQ2MV7w0m&wy>c?usD&4I0h{cn@UG;6fn$1L{r!g3*{``xlma9vqqLIY5+=6Eno4 zM3YQWlY@Y?!t?5eO+HxF>N&!O&NvPX>S7_Opr1RV(Og>D8GQ{?yL!0U`YcwsnB5uY zy3!mIvVjlC8EwYGa$7gw_E%tBo6WNOYBWyFKB|seF_WmUV8+atnDNR~v|-m%IaX41 zn%K(^A0W4-M!a}0f)946`Z>4ESr$3?Cv^Nl=N?iHiQ{v_loUJVIk;iQ@F8XRQ2nv| z{LnZV`+d&^~^@e`eMm+u?$EcXoEYKF!W)e}KISE**Pp7ZdN!A=9V44Z!PK z*Wds-r^mA05X98gkczweERy*qCT!_vwl93nb%~LSgmZMQ$@X2iqs7$4*tS3vYR*g+ zl>YY|W$fDTVah7@Zg-X+49^SQChk&uw-6v;_|@EMB=WR`#N0lf&t+E%d56KyF2mls zzijUK+W3)Sd~fFZ{oP+J>THO^4kwlh448WZnq}22rBl?Cu*!*;T(_|;U{VYUiRqkI z*Cm3tIARryhx zrZd&B_tLGRMl%FUS_nuXv@Gpl6st9w5o}nzOyovJ1cAAd+Sl+OqgXq0jJA9wn}k1dD%f+%P%+g(bc3Xf32$lsk|o!9!qk*Vd-v!TY6TK;8D5(i6yH?ZsSPMCwwcDED( zj*u*_$5bRfEnL|^mqliSCnK+(4h3r_PU6%u2H-n?l#?&~>nfKCVmsV7_$!9@ja4`x z$kpN6P3wmoQ;-MkW>v&rSM~;5bdBuGZ-C$yEL`M^0LWz(+p#v~$WD}Be{NnzIYPWL2aKXgQDme93cU-JV1gl{)PW$JNbe$bXXwVCm8W}xzLM~h_A0YN z<7W{55sqQ06_^KxKeziRu3k36n1`P*K-DOldi{dGR`{!qv1%zMTOdhUh@WgqWX5i# zr&hJLX?|T?D@#pVC5VQg_7y--E^WUzHXIPMhZnNa4vesPmMb2)PA*Ov=60e-TT=dn9KU61tHJ)A0iaU*=#Q}!t{!J1)?^k47S$J7-$ O$^V6y+!Hl@ThxC|@-8L- literal 0 HcmV?d00001 diff --git a/go/mysql/icuregex/internal/icudata/ubidi.icu b/go/mysql/icuregex/internal/icudata/ubidi.icu new file mode 100644 index 0000000000000000000000000000000000000000..cfde07406cc885273c45a396a4e88b201770a844 GIT binary patch literal 27616 zcmeHQ3y@uPdH>%%&wK8@yUA`6Vo0)^o0KHWE|4f$3jsF7h(V)-M;o9@LV3kb1$>lt zW+BP6NSQWf)fy~P+>|j3O$Cz9Ahjx)cBrC)8Ae;mL}y2FicTMqk_xqie&7E)-~J!x zdGF;hL-+rmbH3N_d;GuixUaLxfOvNK9D$=toWK2D+q=4i=#et*W&t_;J3<_}R)`hA z5BNfSHw0Z&5z87C4Vh#g>AGI5-u}@`g;?0Vsyb1*r+c954rh;Zm-9vEE6!uiH=XY} zlg>k_+Yg){IzM&(-8t^O;=Jrs+^Rd+??b7JAFP zA@3bP*Lv$D-RNxw{pH>^uy6M!otwQ+c(-~xygR%-pufxeBIp;p_kkb!PdW!+n{*C( ze=F(Ny~lw*;T?hfGv1WVLV~~B!KR>uI zxHz~XxGK0lxG^{@xFy&D*4u+WjqPLp=e*AVe>Z3+oj;I1!&lwV3wC)XX^}&2(U<12Y|X zD|XVoRx>VEkAoHb9Yu8HkuSI?_nQ2jvllIoSQ=Bn!T)w`Sa-w*PQ#S zru<;_(6r|7t6v5F+fshI`pmRm_jtZKm7+fex|iS{ETXk;81+PRqean@XeCG|d#j>% zM(0I~-3uc8PP5{@Kbmwd1^M!58*r1(brF7-iE~Z(5zyb~-R6BlT1dMpx>ZWIN4quc zr1RnSV%V7Ip{c4#nE%pxo!}X8VZ>jI7-zfbMa|dvr z3HE3;a;>eaVb4$mA{15F+ZJ68 zz)rYGM8cI5xoB1NLL?mF%gu*hKa7~~M(@u2d)rC=n);}o=J#9S zOb&ZV-G?_N@f0E&kHjG_;T0)L_=(XZFf930i|sdP@Kxb8k-KsNY?L`GUq9# zs(Qt_a!rjm64lU{u8}hlCu_ATUM3Kwn^(9S{tyLtM}k|=o;)k%1VoV$yed7 zxWjz3kY>#<%dB>1c*RsZON`nCKD6OpuP~d({AtlJ=59GDde$B>Y(q}TiX(g0O54$= zuO~lJ+O@n+bUKrjU>>}FaW0W}5WKgjyBBjzF+y5xc{@H&`!CC^&M>y&HPDb?G6{HR6A zwCa-L(`(anCHeH%ud>>M0K|=k9bM_u>}=I3MreO@;+wV(yOQ$pi`Iefwaq~?WhnAC zJ(H5|&pcADpJ~!l*A(L^CgBuX@1!0tjn}=BcDqLySqS=0Sf3mBzk#?j! zIomhel8nc9Y^38{#glM$4K6Ee2P!vPek%go>^vH-Ts;cD|Ea@MW^#1 zL?}PZ<98t(uSg8abN?WGzg&C{wO*}V8}_C3DAY}kg;H{uI@5#COI&ZK!|Y*uQfoEv zdd(FU#XvN5f%kOQQ54oO)uXXar)V$EuY08NdX_xiti336GE;tR!XpMb+83{lW3mUy zEam{0WY=BVkEE4BTV%+<4o-DC^-*_UikaFiEv+7R1f}McL1Kw zemMN=@S)I`-_~ls%3+=}qGKwGUI+7fJydP>o_}*{m-5+@MX^$KRESHfMxm5dt&*(h zO&ekCciHSoIZ29UaTK=d)hM1(HdS8C5yF&gFHOdym9_EF9Eace#-UQ$6Ga|%0ydh6 z7ST;|J{;jB%`8WLwv5rJQyvs$9CWy0wAg1Fqp-?`NUWJ{SP^ZyhVgVp8fT@Ehw|Dz zKyO=@CR5F9Q8{rv_Cyrws9LjTj=)s+$5q*}RCR>p*ofm8I<@`r_jBBAeU_Z8A=8`B zz1C^zYM{!zYomN-J0sttiYRBW{aMbdjxmz*aY&UnacgbLn&9wyFOMc< z>mF7Hs80>u##(w;!xB*)r~8|zauWW!3zNI4B+}(|1bx_-uOX&+en}g9F_op8t1KtA zdOXVXC@VAjrX2x(FNzr5jfk7-M?QTpiV-$V z^Ge1_YSR4Z44{l?=Ujv}CDT96mX#BaZTUe_R4m2i#o*^a6m?p9E=7N~BaP3?oo~aZ zTq?%1BWdyTbbM{((L8@;i*ldQ#wUdp*5)WXoh#YNsPWP6e&&0p8y^ULs0U&(!} z*IqH{nP&ODlziWUIJ_gN^Uq#YYw{tPnV+DR`Q>Z$8}|GPm81US8^f_CG(w5*{lj@=F=s@=o*tJ`$6Ly(V|cH%C0IX=F~Z=uZoF#y;L^6d)fYsS9N4#bxV1? zUYXuC)KiUm+*WyxuKRTwnf&#Lc|2FZ6tT3jaVo_Aip>#!t!|>|SMo}5X7Nip}O)I1FsU8E7EZbhV8hbF7^O9_%P}Y$qZ?7rM zn{;V@tP}C-Sw@OOm+5$@Jw?smoBpPh?}=6&PiU20w~@9uo{7K+dQ{Vj*=e)&Wo7l( zuzdX{Y1vqWv;Fb&R@RdQDl(qgd6T<99%f%{Nkz@(lutHBwKo(Pn5?9GokAOT5Qzum=ckEdXj^}0hfoO`~ikFISUj_NH z@@tKY@SjI7Iamt>&ZhihH3hhm9AwUo2q>Fd8#bL*`Bs+;)|jo z8b_L!uC=pd%N!{iftKw1bUA-f`1z0H5arK~imsENF5BOm@%I(h!Bjc!i;EJ*^jAhPm>Xg`n-|m$2#_B*`#Jv(}t2clJ@c%`)HVvZL1lVv8Fal z%8tcxvS`|MmxV2>$KNgZomaaQBVe3Pr+HsaN=5N`%?HGQ_&X5hHV54P=9D|ooN|^l zC!A*Uu(Par*j?Wo=-SX6t-P-}Q2BgQxci%i(9=Qajs&+Zp{@7e0A@sr=GED&Pi`Od07-L9B3{Mer=^!4{W?4pZSirPp=9(wX{-!@}c7S^P$*z z(78rqX@A3UMn;B*hSscUEM3}X_`d~(4_>kLw?A~%<=d|Ot*hng;PBf6^w$Zops`{_ z?fOIq;H+CwT{Lh0@NoaouM2-k?Xvz#-y-O8|L|{Y`AwjWT4hV4F|P)mXRTd3I(m+{ zSX>JKMbf$tT(W-s`gfgs(S~=w@9eYR^Pcx^+_>>V0l#C&alv`--+X=|*^J`?=4kd{r6SX z-PJWSQ3}b{qdOC9dF0}f5od}(b)q~25kPycB4q6(LT(5Ua;49r%s>jDKR$8gNJ2(- zxL&^1x3R-2-zZ_6Fj1HyTr1ol+%DWD%ogSej|xu-%Y+w&b;6s%JHj?$r?6Z2Q6Qou zM#L^+q1Z?4Ck_zL5YH0J#4%!}c&T`W_(yS?c&j*FyhFTOY!K&)kBE!JrQ%9)t+-Kq zN8Bb>3x5)}i(iU+#C>9(KjfGFQUBro?*60v$NBsFtAzpnQNmgNGyG-#;ri6yNQ?59AN|AM-!sUk*Mm!&vX%;NJo~%HIdP-M<6;kgpcLhEXki z=l_Xf5|9Gbf)Y>x76f_*iUR$Csuq3|I4v+Za9-g2z_`FfDnV(wK80HSIq>Jee`RHF zjdfGt_P|vCRH&#f&;Z_ZVKl;66nHKf>xCp{UaNsyAJ`1=y}+jc=lORAb_c!>Z1?{R zoIhACgkVI1U4mNh$l$TT6NAOUQ-bl}$ly7_s^A5|*}`NP)xoO)UI*ir;GMx4!3Tm5 z2cHZs39bmf65Ig(TY?_|{4DrY@cZD;!GP3JI$W|=oeiBY=~C~Uy|Rg%&gM9&zjTsx zsx%w!+}XmJ(z((|sG&Sr$9TXCV4b}CM1O+XUP0j>lYZXzhct&);Lr*jT7g3=aA*bo zKdivCgZjpcz$wDsZ-)4PlkSpc(>(K}h0@dTu3jpwl-5YENt>njq)&_rK9}|Yy-)JV z)k2v+Aa|4(!MnS&tjUES=_B`(2LL`z9weVFSIEPGo&e)eKePgeR^ZSI{CljxWyX$o zrCdfo*QTHM)$(=n4TjHX__Tk!e3v{+4pZL;4Ndwe-rCI_Hh1`tQ?+{v_N}nV{UKY*OqNhqyy4@b9()Y6tIU6E%&F zCwy^|f5wn6mtU0EP;Q;PLEfB{zbk(z?=WoKa{M5FomP)NzsN$!^bK2RQ;L)tIwGWp zdV#*LhfP0_pGbA50E{QCCiw@4&J7If!f>}!fyhxj~1qb ze20H#=z*j+N(YA?rreXEB^GJ5@Oa!&GRp@(4{TvE} zSvEPmSdNAd3wHzl$fS&Etne7Bxl%qMX;B<50qRUw`yt`&{$b&ASDiIxW_nzB;sHdt z9DM&XJTHrPkF^rMLR^O5%3V#Xxb6UI;n5BR-+0jS-;wjG97x^FccFB5c(%t39t=Mk zUS#N&8dPf5mEpDF*TS2^@4~xvSNP@d-thjgpoEmwVbr_?nl<57;h6GrxIpQt^i}#R z-IU(SamtBGp>m3n8qY3SD8-e9(hz01QjyWt?&;25rd(-@GeNmV`KvNbxyg{;V^H=8 z3*jm4V|#+_dgT7@;hSp7C3C@}QCX}k&DoPj$Q>^auT)klYqH9Cd4?Hh&OXV%5I(st zG(M>_j>8RERgpQ1x5Y^O>FOXh{8i0tcFs#4i`aXJRFG?kUAD}@vsf5kB+=dW-bVWG zF7?@EwGQ5_<^P((Ik2A^*=%?{B54M(2%YPh#$1&C|tToSv4 zQj_W@A%+T%j!mqen3P>o*RMWNe<^Zx!$6m)zbsl3yPRs1YZse>Tx^ODt~{ftc5?k> z!-`tqeSs1miE8}5xJ`UsY!mzX$NC%m>-_uu#R0=78Qsa(z2lU)@rl6afv&;wAl1Dd z`_#=Q7g=nD_m#88!8PFhHkSQ~M}y7Ksnz}UJ?nch`^C2Vt~Dr2MN)J|w82N}8tMQ> zX9G2>RASNU8WKR&&GeC4sS&8fGwSYbl$xbhs;|4(M`XF~9<+^8$ZPPCBAH6>@{v+G z3J`k&sJi#-?t=cFX<1i~SIFcURVz2r2u``3_Y*(q4a4mCtc#>`a-f_^+6SF+bB!{R zj3jZRua}YI!<3_>s-3;s!_OW)PTrS!4yM6;VzM%x+qB>khRs+`ir<5{E=c3^rbhjK~J8)^FHqsnvqg-(mWi=E=x79(9iA~Yg zXde76OCBkXC1TN^_HzF4WscA0t_!$)E(XzvqC-*L`rEV2xSv+tv_ zft|1EY}c%_@Hiv+u;}hm7|vcxWzC1lhubyfR{ou+(fb31!`g8jW*y#gc(&`|W=D+s zKTe-TH=BQ%jO~A3d-{+6vb@QRaHn_4z2j4ZCl=mB++TMdc+WoqcUpU^GOiA%@R_$h z=hnRdY4Sfwlf` zHhw?Z@xQVHHg67b{+OFLM&-WlJf28bm2sTr7^UrNc+FC4VzrtPt@A)BURmszmv=GA zGbw}I&T7r|+#(JUQk!VRwZ=eOx6ZC{YLr|gIA?PuSVR-@NZbOe!b-d|qsF_X(|LPw z4X=WIW_HVDT5umP?B?xdYn8_-(_U{L$?DH(#Jqly8>YqF)BZlTx}XK?{Cftb<$0{O z?zEZ1UGJ3ge7X8r3Uy#_3fcTK2w)@#m>SCJ00Sur1+RcbEOF?m(WT6#xz zi;Iy`8_IHzZ;z8&wOyaeRne@etCwoYB$z>@180F9CD>X0B7HkxRD4UAja%C>zboz1 zTsSiWr!aT!jpwvhgpqJ&*ifH)vwE=%nPQlAcv-SKtdY@@Rq5q}^<%3z;=nsbG4n9G z=7`24T8@yr6n9B{zm4F}EYFq+ zV_tK`S#6k^)nnH%8IOoM_MDgyb?9wvb}!VJ_P9^-j?yZKi9DkjMjAB?$E~3=&{&B% z13o9_gjH2CvYpUc6RRyEU9b+&*d$d%JuQuK(yrayd*&CSi=8R1WqsEh= z(bwASzsBUwnZK{JpXzP-^@m0At^?k8y_mLn(wxl1ZF(5LahXxs z^~nyJ*N9y|gy_kj8G}oiy(xF2yubgp|4!&-_j#|PBfWIl$`ePn9+~Lf+BruE? z1xI`R=9yhhm^Uigd+aBBw`%czwrY{L>XEkUkRhBZr5>RfA#9GPX6`6 zUw7?yd%w1RiT*A9OOJ0lzUYL86XX-GJ#p8GV^4bZq?3QM@Hbt5d;4$Oew!H3GN81$ zskmrh!$A4uYfs*F^4L>eJ>}$67oOVnwA)X^`r1ybIlUFKCC+F)gRalJv3F;eh(sR5 zcER#NErUvbM@aMUO5?G&Sl=IKI`mJRnLik7Z5^EW59+@mwt?@}sLwyE`K*T0IwAk; zhO?=DZES7*Ps|_vN{2KLDL#jgrgMtVjje>fDz<9ZxqRnE3uj}#ZWc}%To7m+azAIh zRGwR+r%9U>fnTe=o8McN)R;Smlgj8y4R8;@eSz-<<{rUoj3?`6R%pp=xMk)%d4>t2 zeDwF#Ji7a_-&*lkC~n0h2(1mdiu0J&tn;>>$Mh^xtRu$ajZwI7*kiDKJqZ1HL5-t( zxQ{oEN$Gp^$Ml^=#vRBzMQXjK!2OW6)PAzaA?KQnxPFt zt#6KHL$?myH?*LPy%RX(@6_J#?jKk-u#9_{_XW<5OWECyoORAty1eYbJAA4wGh8Aj!?-1OPWScL3snN|8)(`(H< z{Eo5WYs#98`^zf#qVcrM>vy|O_-63(%_PTNA7f@}nQZtKIuM+$R(w9w>AB8N-wOO` zwtDSzU2|41dKbW71)5KIw2ODXfx{|>T{CRfuz}#QY}nRe`-T+^=lPvDwsYX{3PWqI zGTO1fAT{%t+Vm8AQ?m$2+xIAb%CLSLknP=yd2vO|R%McDsVbXST2q_S?S6L3Yz{WY zTB-GX#C9T^XsVsr=_oeuL_III95aYT-Ke!o30sd*NMhA1a>)c?C(Q))`sED!`p@z zjVK>cJECbs>xlf3r6X%bHjZo=DUT{1l^E49ws~yZ*rE%{FQ~ns>4Me^^2e2qs~Oig zu4SBjVey5D3mYzMzOe1WqKnEes=cV`qSlM@D@!YDDjO?XD&_IT;}hc>Mm3LW8&!0E z`T4cyH=W;le*Wmv(KVwRN4JcY#}toAjA|-QF(cJZFy69Yk7V}X+=#%V?|4a zJhpgjVl1zdUwLztvspbt8ph{OXqb?Har4DBRrwRmXm+Y;V)3M=NyV2mU1F7Imw`P? zctz877T2uang?n7p2ANm4BPKgr>DuY=w3POe^Z$q$6{l|)c;<>{x_EHoLQR})qeKNqfw8Z(%SU#hKnO-)^_^MUo~7rAvavpbb-HQYP5?4~vM_rmq6TZi+G zP0n~i>b`L6UUF{h)H9w{#Lg_~ApUmf?B;CPuFCXomQGJ6&vy}q&A1}{bP+cNSyK3h zGw)%O!kxy-Wf>0se*Iu{?`-ftB{&G(J3fCFy)DvdvfY8x#lY)fUwPzNU5J_9Zs8se zZE>`xxxJ&>6N&j?G`E&*L|zTX!1-e{BbpRdB!RosYJoIcSEn>rp4W#tO3w1d7oyGj| z&T4;SF|0Ao=()yq&IfC?-_O~dgT1ZX&%N18+qX_QFY7Ajx-Mo`zCwJxQ#ITIajuQ; zQQViua>jA1pWE7fou0^BE$uYw++wFjN%b=&c+Z#|qf+y-Iyg+t;FMS+G99ijN*QEp z$L>~?<%et(+sY$-FQ7iEp)Wy9oFOD`JNnRPD#x=Gqo2d;qi3L($CGY7wx z9AiH7ilH=@#yzw3VfNWt+wII>hqI@g-|^vZz)qQyPZ!;8OCVZmymbCvI(uB7Yh0%d z{w|L3uyJT&rg7%uG5GHn*7{TbyL)EsjN#;YOnXLFmED%rYsykHTI=C)o!FVj8jsEY z!19>!(o@d5oP2t3_BRRK$8EJT`Qu!0cbnJ?2D!RhJb`QFR-R6Fm+VFXj|cJ1t|pK8&=xZ|wV zSnaIx^fXq&>akX3`Z&J_bn?9|-#MCInJCPezB%)7zPIE5F2dzboIbjBuDAl`{eYBr z6O(WF%=Pm(JHx3FW6=Ze(XZRz%{wp-b4oX7d+pBs0(on5(>rBW`%JX`%A|6c{JFkt zZ6Q?J&)44^!LT3QlJu$ahg=vb9vtH$@0AO=mm*xTY zBH#sp-vZnS_^2KMm{+|p7-lmQ_;0~_jj#aVN?{JbWx_0gtAx2wE6#VO@CNYD3ikki zyyzp-v=Zq=z<&XJ8sO^zmjJ#2@E|dd+^F3I)Y%}pS&K_Cz|#PC2K*Pm-2q<@xCh`H z0QXFK#HGFCQv*L)^DQ!dVfT!)D&sDwlSheb=n&M>$Nul-lc5h`gE5O;>y8!Rg-UIk|?R|jvYaal7K>HBj9BmuG zx!OknAJjeuI8XZo;6vJ{0OxDl0WQ!!1NgAE1K=atPJoSCE5Jv!&jBvfb^&}$`vTzO z+Lr*I(7porr1mwyMcQtFi?we6KBese__Vec;4|8{0H4*q1NfZwJ-{W}4*-{HKLTvh z+5j%o_5u8d_D_H>Xg>p7q5Tqxgf`BIge3T9op!6c)qwPk`VGK6>K=f5)x7|}Rlf!J zo%$WX^#IID0`vSs9K< z;%yP>vyKsIz-=Yc`0hS38+M9Hit+iql=K(QN)F_!i6o3n4&<*UAz^fKAYTJ?d2%2> zp1hD(k{khuU)g=SvZ7ld>`YS$>Q@V@Fh4n_N%;Jz0-+KPxztCc9CGO(D&=sxB$2{A zqQkSVHwcXL0Vf+=9&jOp6MzdFoCus^aDL!A7+e6jjs_P5t`l;KEg$8Ke3UctQO?Lm zIU^tCjC_5N;Ccer*Wh{qcZ|Uu3EZ&;*BiLw4DKl4er0e+ z1NUp>Xm%qX<&1okGxAZ+$VWLNALWdElr!>C&d5jSR|(lKHEJ0TT*BZc0C$+k zxtBriW%`+LhstojE*6@EI(So|_A#Ju2Kr{p@_H5Mn6S7=z(G zhHpXUT@AU`L9TU1t|j0-Bbn<7plgAyP3Bqx@n$3k=0YB=F&9Ff%Z$O^vIO)JJhcpZ zy?v+C`83gJ7;zBr%pW<4$`P_B8{^?yltUIZ_3gTdMqLHF#@ys!y9(|IZ18xQAsb`(SnU8LQI_^KFD`P`d%>JAuA4xjN0@+n5{}6M3}8>JjqQ;W5gTaov2RQ7BVZ z6#B>zQl<lvI1MS4@a`P4z(;O{ zcjws@M-J`C^?^$~!LY9qi$)kgs?R2Kq#OnnUCYD&Jsha?9RyPBDOMMIA7Ih22x7D`+zN5YautjYFxK-T>@Lly?fbXgA0eoM5AK(Y- z2LL}*KLof<-3IU@^&@~Es~-dWMEwNdr|PEwx2xL$ex`l~aEH1B;7)ZXz*e;t;OFY+ z0C%ao0Dhr<0q{%pOMqXgUjh7D{aVF)brP9G2Gc%?zEvl|iGDDg?*{{B`pH0_17kAq z{Z2r8v|u(!b1B*}%Sm9%KSxx!iFfOP3e}mn5V1J%rKLvEB!kFSOg-`1O{YZr82SR3bCr~l`)1ZXxXH zxf5~=jB2>9gmi{=u7)d0$YGS40@M+dx(X;2D7?;u91awLIT12Vzg729>Mwe&o=2(c z_1koTQa9+g>msFY)bG&!l)6d(s~(`#&HCT;Af;~6@6_>JLdbM|rjBP4_$xHMLC5n5 z{B@f?OUJVaA$RGsbv%a_`TFBJzV8WHpg*DGyPlAT^(S?F Y(-ZO(tQx+D33(AHeCNQwbPLq~0HNIr`Tzg` literal 0 HcmV?d00001 diff --git a/go/mysql/icuregex/internal/icudata/uemoji.icu b/go/mysql/icuregex/internal/icudata/uemoji.icu new file mode 100644 index 0000000000000000000000000000000000000000..11fdf50ff187d7774f74e149007a23e820a0fc77 GIT binary patch literal 13024 zcmeI1e{@{camQzWyxp}x{(*&J;~(rIELjqzvMdWbNQ!U+u7k02T9ULUY1|VTTg1j- zY;Zz}Q42|Wz?_f-%cV}*(9#pfBorE(lLm7R#czyQ|pAAk! z>{GFsvCqcl#%_x(HPZFY3iI@EYzL~nR!_=be_T(E@n@pgNs-pF904rv&9>ni`QUB&%+Wib%>Y9 zq$K?-l}x6R1g$r&TD_+{p)b*A|Dv7ufZn@nN9|=Gdh;9eookp^B1+*&FUegh)0m87 zivX)p`3CjwUAaJ$9bN-t&8q#o@u9utF_o$X#h+Rfy1#H?ZLKmX(T{iO%{&ImCS^nx z;;|j#A?UgP;2XefGMO~b{cZ}jSI{Q1yut(cEZMN|z&&6jAT4{DX-y(DdX1RL5|DVkVueU17g{k1Q>+Eq2VdmjjY>V` zwECzYE+aE(^!n`eQ+u568_!+}JY%Vaw1@QuOC_p8#N`?EhLlI&rYaE&7q{G{yyQ4v z9~60d@5=|}c@bjqV)O%*T$rN1ylW{J81GL7d3X-&12r`BWCcFlh8AdRMK++}1(z4% z?J~bQ6H3%P5bCJ;lTf^VdT4dyRH#xOYpjr#^pnz>7HN`A?#*)Ct&`vOR>(WOWsoDg zdLy#8HzWso8)VpN>H01F+wlKw_|4!pgWH1qzmV@V^5bqqegpri;a#^v-ge6%N8UsC zk&8{&x9P(7!tXTvYi>ebL-sndjFD}Ee+ho8;lJ)qlt-}n3^t#^=D(odi2g;Re+2%Y z;MW@dA#6T`Y$dXXb2cLKZTJTbe+TyN!roojyBq9sbYIKqBC-^Inc-(*Zzh<}AiLej zK9z~cb(xTy%J}`Nmo?exlFlZ@&0Zo0vI!_6hq56#k~KYt?}g7AKAnxrTUkeTAn!q* zG4fYwzm4|WkiUt1drlsa&G2s+ehuyaj`n{K?l;I^G4hGT(2R}M*jSD3F?4@xbj|SJ zfbY&><5G)_I^^}puQKv$dM}X)w4Z>y8hK4l9+4FMm4?5=jmr`;xPt85M|Q%d0^;=% zX=n&ZYlGkSrrI)Ts&!;aZA6^X-wVGF{$RBq z!xnH`z->kTI`Umc{xtkE@c(Z3Zfvdry9VqJ(ESMAk8`?+tcHKg@GHQr0QWs`k0M`X z;s3($FHHEnR1xndu|1bIb7}MEU^>9uVlbbCZ-<|5_^ZKP4es}mPene>$gAM1 z;p+?^#pWomqri?smqd3(P8X4(@FNT_-~?Oonn zk;hUI`AI4y>r%drRq);Ls|`OhStc`+j$DC!8uCYt{3jBTrz9jTNxz-{eGzM4g%Mp2oSthS#E zwWe2u+E{U?%5&~o>0#B~*IOrtSalDvk{)IyJ;KU)lzG#Vj>?Iwbe)7wL8qZJ&^~bc z!5sj1umo-sHaCHN3GB;Y_m;r+FkdswSC`eSnBO;8b+@xhZfAa{!M_E5M@f7{wt;!G z1ZE5NwqS27_Fe<~dI{`vU|t2Yxddji`$y7BoExya0lUxA<`=a2*Ai`7!L)%{UINpM zy=LrP19mFdX(h0e!8C!HQUWuM7^|^ajm-w|lfXBY#7AU2n9ECGMq_U@_AbNT7_ehY zU@rzU49ut!n7I5ZTLC%pYgT=`qZF%-Z-RRf+$MH|m)H%8;Y6;Ldo$~#o64U}r3EfU~>l88^O*3ds7K)H<`E|%nc zj@$$O5i+-`MCQJeapb$a53J*TK;IdP<>LNKh1?JJn_#~M_S?KW6vN&N<}bngRS8UI z=4M%1;jl?+z zn{%-FS@840FDQw3WEQxaz|Ag!n}N+4*!)AVGr@kQ1a>-@KLB$>3CuWly<_ZpWxPRL z#2Z4f@hPnt&HL`fp=0hcIq6;j4V6>wNIC6ZDrda<2+QwW>DmvSgw$3@KH%vnGzo5R zPWnDi$2=O&`}#CF;kA#+Zud1vy7oW^pyQC*87}*%+#|f%9zmilPeAJB473kYuiD}u zw3|0uwelhK5u|=KviG1}kjA7|WAukNS~!jHG^ExPsjb!GVfT7D>^9=HNsf9kR(O#d zrQInv6+Q*VwkwQ6Bi&8JHWKYqFq(JaZA6+6&5-)`$3P|87mZA_pnW~SdVA2FA&0z3 z%4NShoTw6zpA*GsG&UE01o>QO9yChzqV{bbE}~CE>0`{xu6nas)d&uG$e1^hc5G}m zZQSZ{xBD(|+e5JzgYvzz zc`Y2>V9z3=oJMD@X*^|&`B3UN2IXVXI?%pq*0hziuj8f`{FX<^_i=YD)@uc!pMDTDlo?TD-YU}b3!e#gvv*Znh2+}ds=BUj04Q3m;QB3Ml}pLP`0p7mqL z#pc%fvFm~QQV+wa)e1R=2b&SC5%r?|&?>Td)~M}Dqt>nE;7HmTRzc1#V+8wGo$b5! z$L2lgWuO&gpwU!M&BT8&+upMv)86wHC@i;RE@Dp{!Tvc#+ItouTMFHmkuK#I^Q>6Q zcksN-@KN@QFGl9o{4m^-UK?&r_k`Qhqj|%v5ASf7^M?JW(3kklqf@fX^L@GZj!Wfa zHYKOBzbB`&)8tHcvXm#=())W`(wwn)?+=~>g1Vqw&j&%BwhNve3U%7no+}2@XPp9 zPQA2;{0Q5La5)h^MTGB|2xI)haNLVQN@+)1(ORKo=%+hK<`?xD=;0W!Eafs?y(U$ZWBAb2Aku_2+T1nECknd;9AnAGn zS_^#ueF(KdOQCzA2cTBBh4bxr$l-j?oPRl`pA zAIZg;Ti6fBLk@eQJ&O)5mc_Kaojvsq&Rk2N!Q^mo=>GBd-IuP-L?hQ`z7o1Fb4#c* z^VN`^R?Cy)&8{((U1I^eMmM|0%Yj`Z8=hCQhYbCYU40(Ay6zfhUu`CNBM_2KeBA1t zH2d%8c0GFv56|Oa5gsO=mxp4z!Z+}+5)aqnA$(pQitPfk@bE=El;L43zjdB}mh2mf z&C(b=)ZyVR_R=5yuXwPlcB8lBlNm;_ zp~Clypcm=7m3bHR;!6^;kq9=Tt6|l<3hH7F{2nwv(;ZrnSxL>_A9@bG&fZVg=&Wus z(ZA;^rxl$v72QNd&!wUtq@p(jD*A5X-9aXsi1!5Xp4(XIO46F{%nbB4WXEVGmVbuj zIF>hmEX&0zcr2E?u&nPjkAE!7#VYqBM(~fZ{4*>s``DHTdQW)~%hg!!!t#vsvRqiH zx~`0%O7mkfaCPblZD5TfSFsBOD@?1(R-RUM2c-YKND+M_(Q8%rU-yDP5ky2aEON!O`IS z@U!7q_-fdQ&W=_`_eL*8ees9mf76Hb>o{uxQJscp1pfRUT}PjSL#BPAPY zF+{UR@6(47Q#t4l76yxgrNPy~O2qfq!RFurL}}z751t8L4ki$BDV!U=BRoI6BwQA* zK=fY>?+Ldd2Tz92g|8qJKM8fz6LCH+iY~L)<;czY=*!X9k)iF;W6{&d(~qKGM8&u- zUJzdpzaKgKgZRey)|f}^_INYO?eU}W?s#u}P1VBiNbSjUVfU6qTj?D7iSkgs7@^|1X43SS~#aD8`s2j|G|P_1XNvyF+)}*5)7)Hqz-##!T>nn-sV==NvA1}zP2y+8s6;di zC((RdN2Io>bg`7ooeoXtD?RxeN~=nr_u7nbdug-fe53SGOJA#nouzfc+EZHJ+GX4C zr4PvQQTjZ6m7bs<;I5$QFcsD5>Jp`C1~Ar) zCUN%Hz)sV&ZGV}b*)&m2WgiL}5r2Mfsxh zFuR#g>4c*vvgX;>6q{7afrHtVSkRX-*@JF{kiAZ-ley0+2inP{4?_q-`mn5`va`?L zWHgz5BVae@O!{~#Nn)H2F?dlFb;sj5m#cM`ZS7S&^dJ-koH8&hBcteqy9Zw} z`jrY*Dw%%MbZ*mc3XL6bmX5Q8IT#1LOJ0@+53$X9laUiQE-+#cKJc{Xl~4+!a^x9z zwvInUo)$bHGBW{FW#Vx%N zPQJ#wPA-hQsR(qKmwdHwdmPkQAGsswiXF-+mQ{*^yM@sq`g7eo*hHJ0V#R)Nln&e09 zaE6p(X9F!y#GWSWsT5sFm?>M=SgI*~$N_VpP_3qEHNjBXxo-eN0C9wzEFN2AlQ<`o zn%`{fl#v9CwI-O3iBkMwL8gq4n%9B_eyHJL$$W-1LQJtT*`oee&FyT0c_x#anFs?%kBWq z-!G97fwK$1v=VSR{nC;U*a<7gU6|(1FU~2*_AuA@6qKe^*Y>QR@eP3m7%MK9HV zrJvSmaB;99*cIsTqHtZfGkhyt9IcI>h<+I@ia!_M7w?PvlV!;<-4hH1J;C`{t&Fn$OZrFp7l><4a9VI4qPsGns7`zs| zj(B^*MtC-&zARiGu14&e!u!Jqk%8yKABIPehiWtsof$2TE{(2AUF*qP-?Kkp3jq1=bv=o&qB_3TG8AEc|X^MBWVMm>($|MDHILZbT1rZ-w+- zg})V@M+#3#xwmkj@KZs{^+qWu&L|EReNKCQLGfM1i#?f-Bv*K?QM$gV)viA|XN}Fv zn&LR!43u&DQt@lWvEp9(FVvqbK3yE4mx_mqZ=gRejnnMX+e?ggF6!g7q;$6)p({$) zmkywHZ@Q+mp)`Wp9nw2a{ElIizFT@hbUJx)p!Cxcl}D&v9x9L1g7R-!e>%x_<6ThZ z{BKU*U%slmyu7-+9%$qAeoqxE&P?@Pa3{-^TyQQKesS2~3LAD558_T%xn^dB@4 z+@01cgB2rxR^?CB;>v!=Tw1xNGD52=yxP`Q(qLm{OQqVD@0TGtPGglFl|7Yxz?i6T zd$e#Ax}E%H`D0Y7&aDntx!>r&v-+N@k!Q=7SFf`z9+yv5*Ht%F?-%KIM3%oheQ=WA zQQBGEQyr&$)x#O$D7^{JIF)L1Yoqjz8rRROy{~qa)UL0s0(bhyT4)`kjkPVc9r0N0 z%4nQ+)D|XtGK_tKI#D}X8=+EtoOPzaEL-yMGs!3$uOCOTs6Z8f8 zCVh|o6F0t)enzv@IqC`((i)r_aGu<;Rqg7C@tWc4Q}q&kz22a=>Ro!j&Td}Qso>uw zb26Ha!qhT=Y@z=C7KUrKi@KhcQfoal+)8-OFf}zAy{h5UV51>7bMTz@@Gy8yc`&A2 ze;L-vStr!^be(?5Z$6laUj<8rtaTzb(P#{2|B^tHWhiR34qir@`YAWg?CBT>sQghc zEQ%d#9Qy9TEOV1Vvl%c)_ms=zy_U_5BYw`ol##P#5V^=3;sYXQL(SA0dDa`@5umrX z*9+Z*J$2q&t!H_+*ft-X@-Bmxxq$x#ox<^Q$ZS@EH^T}+86$gfV=ZAzo`&_7QSV?~ zz~iXh`ivRckAK>+dr{kJU!Pg)nRWB(6F;t9=8;AonogOwKf(Rj-3TBHW<9r?jP({q#CUbA_rLmDacyJ5}+A*G*U3 zxRxXZ9(tPTKy9fdt$q9q(~qqw8S=tspk7aY&>_P}E@_y+Hj;faYqiK5afSXQVWHvS z94tw^F|$59ga*f}b-!H{E!!D@@N$#?myi($ol_i#GiIRAfBMRXi^XQ+1Di5au0#Dz z4{`1(!U z)D3VpfiQrX6K=4OzK;Lq-+ryP+I+@YBnym7)^;cotHASN9&4IoIi<`yybzaduRG`n%Z69XQ# zKr?acn=!+i-UcRrd{I6978sG z&HTZZ;5{qRFsIdkAgy>Bu`ssOc&KBZoxtc6d;v6*x4P=KF^Rr=e3jclEC*+rorA086v_WF#U3s6y-6@k?q)m zIc#S5gq-AF+9uRF~*n>@q1%T3+iJ=b;B{isdR z-(9=jI{mvl!&%AfO(pvGbQcY%g=&npH;D#n5v(p%SE(CRN&TtC+tA|4YUuo5lb+w$ z1g(2AjR37v&^k=p)FY}Yh)-GE+~2(BBe{CsTUWEY==ySXHAfX8`~A!lht^)QSInJl zWw)W8g7>PLWqXXyT~ah#nY#7A_&-k_-v!Upb+rKJZ|jTom0A5`sMXafxpu-@J86%R zuHhSFcl;$}H%=kn$wo8J#5%nzYqm-=U{X8b3swGUtL>VJlZ|BiE9ztPgq|Ltnm z=+5cnL3a&*otVg{lJC}kBKDi{j1Qfz&2r7H>ByfJbds7ruSac=J$D=SU32Wl@9jgl zcb}rlsFmp)bvgdi@+Gt$!1;`tP$fNllE+ctzDJj7G8|)h^8R7{1`w75!5r(eTAA*X z`kS`LNtlxDJ>tm@YqC)`%eA+5v_*WRqXl1cA9UDH|Cj(T#j^m%_RFRoCpD~33)s)y zWOOIX(e(JIi!FzJB4zWO{Pnj2k_`hFZ{r98uhHbwWPB$yRlk!%+1Un6H&Lb_;sTF; z!-emzDhxENZr-$#ENBiejJgqJH>?uNWOddPKEBGz>X~3BCORVNv#fut$9+v*fz}pR zz9LaUU`p4@Y;Z+qhp>&Nl`wrC-vG;1wJKVhnX0SsFA$F`n+xGe+kXFox(P>^XjpHM qpa5UJTP_Oug2ihGN)BJZeB~nPu@}~33CGK%!C*7fUL!80z@4o;2|7b(1q0mram}=-XI1JMaqlQBba}1{# z78#Zr)*7xb++?`d@QC4A!*;{IeIFP;GkjzC)v(VH6P6s75mp#h88$VnJ#62;{xA#t z-~V&+?)0#O!;TC)G3?a;*H8TK@BE+dED2i?c464XVVlEl2)i}x-mu5Ro)3F1?Cr2m z!oCXoA?&ZPePMCox#4Bub>S`H1L2l%Px$okgTiNrpA>#d_~P)D;TMH(3coh|mhdg% z4~0Jw{!;i`;h%=@3I9EOUwCvxT0}uaSwwY2Ys6s0a6}+tJmS!ZV!jXE)=9oeW0MY7)xMZJKoLq+2K5GwHEO&rRAs>HSHcPWo!no=Ja93XhD9%!n+EtdDGs?2oiZ z`XgsV9u+w+a$)4zk>^FOjl4MW%E%id?~QyY@~Oy|BHxPqF!J-rA0q#V+!q-$IeBv4 z&X8Gx?&)mruTK^6ir!oczS(=O@25`Q6E% zPX220kCXqH93B-Hl@gT~RTaQ71;79Cdos@~DfVu86uJ z>aM6qqn?f09`#Pt$5CHK{Tj6|YI1Z^bWZe?=!WR7=)q`vv?qEr`oQQTqK}Q98+}If z(&#nO8=^Ny-x0kf`myL|qF;`FGy0?GFQdPY{w>-N6BUyflNOU5QyH^wUqeh+%s`Aa z#v3yuW@gOnnB!v>#4L`vAZBCCRWUcl+!gas%+{C}V%~^(FJ@=V7coD^{1Fot8ylMz zn-g0cTNT?B+Y#FzYl$6>ofbPic4q7`v2$Y=#x9CIH+F68rLkAV-Vl3F?AF+qVt2&8 z7rQfdckG_n-(&xcjfzW-%Z$s9tBRW%*BaLyHyCG)^To}GJ3Q{_xRc^ejawAAGHyfM zm2o%5-5z&u+{1BC#%+t+9`|0{=W*Z0{TVkoJ~2KozC6A*zByitx5xY9XT%>GKRf=! z_*3H-#h)9$D*od5jq%sS-xhy={NwRY#lIN;M*N5IU&MbK|8x95@lgpW3Hb?A5*iXZ z5_%I%3DXj$C(KMZGU23zg$auj&P!OIaCyQt3AZNPo$zqNlL^}rUQhTS;q!#=6Mj$F zmoPaoDKR54FR?VSF0nnaCs9kZCOQ-Si8B%pOFSxZPU6Xlrzf79xH@q|;^m1~C0?I+ zd*XeGPbR*Y_-f+&iC-jsm-tKK-o&t^sHCK%tfZ2pDM|H7ZAtw}#w2T!D`|St!AVCY zotU&BX;ISBq?Ji)lQt*am~?m2gGpPHwk5rp^l8%Wq@R-ZCQV9?Nlr@6NX}25lH8o! zo;;X5oE%6VPd+U9=;XP{rzfA2yej#k-veft_y+EThxOexNkV9Eh0vr<0;Riypr;6%FdK8Q@&66IpvR(e^SCyBU4jTb5l!Ft5O?M+fw^dEvcT=vD8CS zk4QZ>b$;q;spq7gmwG|!C8<}Y-kSPA>SL+fQeRDdEA{=6?V+^C)1FRyDeaB4 z57NF!`zGzjwBOVINsCI4PftnDPA^KYO0P?APajCPrTfwkN3K;{9NM`RwCIWP0n%*B~YGgoA;%e*x6>dad*@6CKP^Qp}3nQvx( znE83;cbUIt{*xJ&6_b^mm6es3Rhm_w)tS|oWy*48`Lo8dW@R0dbxPJ5S_MG=} zcIAAV^Ha_rIbpf6xoNrCxdpkEx%IiNxjnhYTzjrJcQp5a+(UDZ$~`uBUhd-D^K#eb zUYff(_om#ta_`ICn)`I_w%pfqKgj(e_lMlSawp{_=4I!V=GEo3=MCl!=Xvr*@(##5 zG;em^@p%jKmgKF-Tb;Kd@AABB@@~$%Bk!TSXY$_2+m-iy-tT$g`BC}F`8oMT`Q`aF z`3?DP`91l@e0#nte_H-X{*3&?@{h?sC4Wi&n*0sw=#O_7+SkOejn%%qlD>EG?W;*i_hEI9O;d^c0R19$0u( z;rzn03ePXRtnli>n+xwQe5mk=!siRO7rtBgN#WOpKNkL37*-TrlvI>nlwDL%R9e(r zWGu23xr+it<3)!Q9Z_^_(Md%Miq0rHyJ%U_s-g>vHWpo5baT;NMfVpyR`hhyi$&Xu z-Y)vEXjjqhq92NWFESKI6(<*G6&DxR6?Yc*6&s7K#qQ!@@yy~m#S4p<7q2Z|U%avS z>f)P-kspQX+e@enj zV@i`tGfMMI%SvlXn@W32wNhKDt8{wlA*Dx^o=|#n>FK3smo6{8pmcrdrqXLlZ!O(Y z`cUcC(q~IwDt)8$z0!|MKQH~R^ykt)O8+gLR2E;BURF|8Q`S<}TV^S9myMPkQZ~En zq_We>mY1z9ySQv)*|lXimfc4V_o$~yYH>P|%<*O+_Px*UFR8?|SUR6oe zl&bowma5LGzA9stwaQiHubN(UP}Sj8M^~Ltb#m2dRp(W$u3A@hN!7l6o2ss^x}oZp zst2nct$L#BxvH0|c2s>>^<~wMReP%`bDt6Qr3s!i4QYEN~rdPeoE>eO=pdz z##eK2%@H+oYfi0MT(i7pP0htMn`*ADxuxdrn#XINt$DfT^_ur;KCRhZ^Ha@VHQ}|f zwW+nawWYN+wGFi$wY{}kt-W?y?O5&1+S#=y)-J4FT)VV(W$otL>uPVRy`y$Z?PImi z)xK8yZtbVF-_-t6`&X@@ZgO3GU20u+U142$T}@qMU2olBouzKL&Qmv|?#Q|m>K4|W zUAMaKvbyW*?yh^R?uEKn>Rzw=sBU-Np1NP^{;d1AE~-AcKBvC4zP7%#zOUX`@2L0I zkJTSqe@y+n`ZMa6)~~5wSHGctWBrx&*VNxoe@p!x_4m|2R{wnc8}%R8@2>x~K6+}} z)aRYtnrfcvnL0jo*3=WG&Yybf)WuVmPhB(h;;EaaUOV-csdrC(aO&2n&rN-0 z>YGz{PW@);Z&Ux78r~4oklc{fP}oq>P}k7X(A_Z5U~aHAxEg#7qYVc%9MW)j!|aCR z8s;`EXgIUsoQ4$*7c^YlaCyVk4L3I2-f(ZjLk(LSo^E)l;kAZ$8a`_HtYLS<4-LOH z{M``V7~PoInBJJ%Sln3ISl763Uvpz;<6xt$(c3t^@sP%&8c%3k(0F>|IgP6tFKWEJ z@!G~)8}Ds=r19y-ZH=!rzSFp~@vFuk8vkhgyV1}T-IUyv-BjFE)l}cq(WEswnu1LS zH_dLE+jK_LvZl378=J0by1D6&rhA$mXnM5ixu)$+?>Fsi+THX^)1OTd&C$&{&4tZn z&GpUA%^l5Jv!mJDJl=e8^X%s1n&&nzXg<4nRrAHo8=J3dzPb7K=Es_!Y<{-+h2~e9 zUvK`j`HSY=&EGfw)cjlXU(K;CDJ@wo1ubPQ)h&%JT`hwx))sHeXv;w@N4CsqncK3k z<*b%vEvsABw_M(Gb<0gHceXs(@_5U(mRDQeYWc9`vzFa0KeznZVrY$OO>WIx2XrCs+>n!W6?QH4n=`?jZI{lq9IuGfb)p>O1@tyNJ7j~Z6 zxukPt=eo{~o!4~U(s@tk!<|oczSy~=^Zm|Uoj-K`(;3^9)m79rrK_Q1EG=XamceQx*p-RrwIbzj$gTlbdkN4uZte!2V2?hm^^@BXg)*Y1D1!+WB7;(C&M zGJ0}*ih9a>s(R{sntR%NdU^(XEIsxfcaN`UwC8}HnLS7J9Mf|`&-|WKdlvO9=~>pZ zvS&@t`kswFn|rS7xuxf>o-I8O_dMS7V$b%Tw|hS9`MPIM&)%M}-h|$?-lE>x-j3eB zUQ@5V*V7y9ozXk9cTVrZ-bKADd)M}^@7>&cTkk!+5B5IZ`%LePz1w@=?)|X$litsJ zf9(CWcW>{$-pIbVzLdVKzJk88zUsb)zP7%uzJWeVpQCSD-+_IH_08@(zHfftX?=_O zR`#vwTi17K-<5sW_1)F?Xy5aFZ}xrC_g&wgeUbes{RREi{cZhPzpH<&e^&pA{R{dR z^)Kx|zyG5COZ%_tzoGy3{=54h?BCk|O#ci0ulB#y|6%{9{a^Qg-~Vg>-~AB-u>&as z83V-wzJ4xBZxY+&`k`hm*_t{J#_;I4uD z1|A)Fdf@qi*9P7mcz@vIfn5W;2Ywv*ZQ!4Q@WH6TxWUxHoWa7uiov?Umcfp}{z1#2 zb8y<=$l!s4hYrpjoHKah;DW(32G1TmfAEsQD+g~L+%mXz@R`Av26qg8H2C@84}*IL zCu=EMrdFU;X$@MpHlPh@)3oW@!P?>4G1^JmLhVfLTy2%MPPtYMyC6#ysD=$b6o8oq3b_2J>C! zht1EJUp2pL-evyI{JS~K5@*S<6j`b*EtWou)iTX8!!pZqyk((fiRFCD2Fqs4EtV~o z$1TrWc33{Nd};a7veyzhlr)qxR5nyU)G?$DIfjBm2M^62nmcsH(6XVmLmP*#8@glY z!J(&yULJaT=#!yuhJGFTbLgL;Fl(eW#+qPFv1V9vtOeE*YlXGiT5oN#wpqKZeOAqC zvD&RJ>ojZ7I>S2Cx^Lf+);ZR>)`iwZ*5%gK)(zH8)@!UcS?{oJu|8~l!up)`W$O;> zyVj4bpIg7N{%HNnYOqbV#oJPC*|tJkxvj?5Xlu9i+AKDQEnpkB9cG(tJKi?mcAD)R z+X~wSwo7eS+OD(RV!O+Bziq4SdE2YDH*N3RKCyji`@!~`?QdJ8J=UIP&$buXEA0*T zR(qGd-)^?M>?8IA?1$QqvL9!kXFt_`mVK#xm3_T^qy1X@&Gvij585BMZ?nH*f5ZNv zeV6@f`yTtR_J8e@h7*P}hl_@*hMR`FhfTxI;o$H=!$%IEFuZX1?BSKe7Y%P3zJBa)~JnDGX@v7q;$48FO9J?JqIR12mJL8<`&H`tpv%%Tv)SOnQ%jtKHIS+Qu zavtM6(RqsV4CfN(D(8jHjm~SGw>j@~KJMJ+-0pnOxy$*j^H=A;&L~&1E5}vps&%!x z`dmXUk88SXrt4_eT-WKY#jd5Um97h1>s^<*u5?}Ny2*9B>mJtwu18#3T~E8Vxn6Q@ zcfI9$-?h{Axofv;kLwrLpRRvh5$~^|3hp8I_FTK9VQrS47c&F<^mH@R!IUw4Ej+LPc(^<;VSJtdw>PpzlX z^FJD%|NqPX4+%Iefq=zVZ19gcotn{UDK^;b7N5uJ9P`?E*QGg~#Rik+wFfj_xU_)H z0_-OF$>ec)d2O^i?SU~qY`40L4Wsq|Uo-{X7Wk(GsxPJzlxLY0PJLI_1@fXVfCs9Ck3x zpkJS%-{iA<`KnE>#Bbd}mrFhgYi=2Y z{B~6HnVE%`i@RB0zkOVO&(g9WG6J85jJVm5-7U**wv2FPD?>yOmLV=aV)2Dk!ZoJ3 zT_PF3=JwY!ZMQYFE)Kgxa|HQ{n$Ngfb6MPOw8rHbvG9@=Mw2W`SS%k}!kvsx&4gq| z5hAy@T0B0hT4SUkD&+Hw>LUnEz+^*n9TxehQw{O zEW?b{Zvjl)V;AZ2O5MVs85ts~@6^Wl!#ZnD?&Q)J&+OAi`7Giy2fa=Y(`VQgr)L!F zK-amYzG?{6ra6a3?Pf8L5f3Y`#$$1t8O|tX=5^vF5wg?c@r7Qac4mZVF6gh*en`iv}tySKj>gY!(1|SV|wV+S)+X6mb@Ant1EjGzY+bl+(g{9W*9>Kc(48)D%VUOrL zLLL(Lq3p$N!OHC;OgLb}uw>pArWf>jm7BA>SqAQ+`A|9~0Q09O_{IBeVw?FtSvlK|5WSQs+SjT;PWScMWaK1N-IA)2um-B5;Pl|q-1 z*~^F>e3Gkq_!z?UX*QR>wxM~+9fRJG@EmwZ4-&HR5w9~SUQm3dnW-2pET@zAeHIMS zuO^_m?0j1{dVFWlZ4M1icRzgFKBi4$0-^?1C!WfVB2F(h;8`@IHZCTNjo@Epbub#6 z%+VV3xq`Sf2IvV)Xk5_eSK@Zqf@Wvb}V(L6hcJ>g@LT#KRb!9_2du zx=eD+>mdc_Qlj+;LtMGc!$L?wQJ5Vr4Ur>LgyFaQMfH)acxLqhq%9#%c|@^)BB$5q z!8vJo05My9Bszn!1uV$Uq)g8zS|alhFTk6`78G|v=j3W+0WxE2&^@fo(rNJ`1reVS zfyl4C3`Yi`TlbLN&q{d$0gp?5iH{oN)4H$W%0Zt)gh@o@zLHE>rV~+!Exw?VGS_yv z_=3-4LpRh{JwXwdW>%hk%);xyDC+ZH79F^>d;UZ=8F??Y9qWrlYAas zFD#<+A*Urn8L_yP)a)Lnpp97kD%c@K|NmGT%_%{b(Ti=MTn?LOSR5?I?)6AMKrGaQ zlT!~?_+Wp)gIHYwZNwK8KVZ~+XrK!1Dpa8D*mOWwE|)eY*#_QF*B1msV!|4ExUDR= z!Q&N8z!=7n)TrhrrtqV##$soqvObQWPqGcPk;&kvMRk}Gm+@2V$!`#cED`2qk|5_Z zXq2Gx&X7mk-mr4ZL&|k97`q=wpmdXrOAJW(Xm_U5y z<9a3H)18L$N`zwkr-rXo@-_yop)^Uw2N|HOu+OABc>^jOkknhop~8vAYB)#;#djD< zPf&K+fbUMFCJ+E{UI}q9Ee%7NQ7v)KdFs;^>Kx*n#qZQsIaUoD#g%_(BsDxu_bC z>qzc4PWFkhDEB4~8vlpeuI#;z+r+mkTJ>K)L*4aY3IX(EF1CnLkE7TUe&!C5e3Y91$1 znUxglnsDwpQ6Ey%b4*9AYH%lZwIyh#vN#x|ke$fO<&Xqfx0?~mS`ucxtQ0oucChXkuGooupRSfi-qA0>mF;5| zvb<0}&Q(0_iPFM^Y13&D2R7kB#d}!jo0!SM{dNzQM-M~PRGKfm4u+~xzlC*Gt`kv` z9*v?F;>D*+uBJN<@#drhK7@(_iejru6H~IP7-W(X(yO6nHi77)-#%T%XlZmP|6w8# z5T~K4ScFfNhTOTg33&|RLz$mjcQBIkk!FjdA&SXrklTcUvE=7Lm%PC+{X{nOOa(LK zDGM+D%PwwT1j{%Ab_d=XgCXBwXiiP(=tMg7fB(SmiAm&f6N#Z7hC+TC*MyR?9L_N= z%bjT~jH)&C7@c-pB}?L?6(w=hrD6;TXpp1;#n8iz*qf-NXSzMKR>)2I0!TFIBu^z3((WTioy!7{EAH2EeC6Wuh zaevCk@5oL_x|xcbYY-Y;{N<7u;CImcLlDfEDj^`Xj)<0_N`ugcb}BcyU&Rd$-&oM$ z86h?@dZ%~43^1}03;Bw2E@GN^OBuBu{FyHGnx8uAPf$u5Gp^n$Hwc-fA6c`=l1SmT{?FCYe_~Ep3|F&` z(*TfVK?$7V+g!v&I)`#GPrzUd+U&ekd5IV|#>!Y33AwfINR)J#y~`uPLtkrkWxh7* zkxWtrZ;=7FA)<&cvzv4yMwECwYy_o{LQrC)(k&SuZdJ9*5WU4TX3{;m-(r=_UB*YJ zOj;9SR-r^p-eo%6$f1!fiH9L4V|l$c92?q?db#%}z3rON54Tz9+5~Ikg zR0B(beyFpBBjsui8=}?IH1ZY;mynevB{pFtp=^k1LgI>kM~4I{F-@-IMSvztC@ztk z^OnS2)qeDPM#gYv6L|#=*E4K4;__uKM&3t3akoLY3i6U{OPa2v$C{`BBK3lDH1q_` zL4U|UtK22z-v~nF*$jsO;tTj`4`|2_zs$HMdeeRaCF?0QJGo1+S;C-Pbt0ox>dxIw zGJ8=ADP8q;jPwK~5%!G{a#ccVAHu2ngXA+7>AgzZ)vGd5qP^Q13Lw;mI13h&VQ?{G zhg4X+B0$NqTJc^QO)y75lY#S&S;Qi?Ob>No?vvfgZjJynGmnbBckpf_dD5a$Bu=}hw`?##8TWQ&d$Ig(E z;D1HV#1E{dT-Y2$$0dPPRv(hSn9@)H7v&x$iNYd72Eo*cz815MjGNf4m#@-lk{${J zVan1Mljf*_C2(6Q`}Of-II1D2ly)MDqLmKPJ-y2?M$be4iez%C=Z4XfldBGW0HrXq z?ow>IRnQP2>pem-L@6(1q867|@|jTH%~(`INX5#qu@9AE@hLrf#Q2?5EFxXn6(VK4 zUBDg)>ekD0n1iMOtE;z2#_iT|2@|2HDb7qZiX{0zFCi;u9;1wx(1q7=5=#6iFanm9 zqGf^YQv>o9RKZUxix;|0AU-|uWSzwNl9NZJJ~tl+V-I#x=eUH{FxESOA9=d70Mv35};86vl+MG z^bqX`^XfHDskKM7Kp8lWab?WptAMBMXTMq8+$ks%0ey6|yYsDg3wZgK{!%%NVuf zIO1j7PHrG_mmG>aAbNd)P$NApAYY``*LNyHHp}NFNl@X_Im9(ZpIJc}s1$v6AA05U zNamuvoW-kW5Td9-6v)PUB6l>-N2Y`ek+up>R_}y`nqLN{$FkuLxT*s~;M(MWl4z)2 zh+)`n8(}NXDOWNheR3gE^B^wCqw;N3+l*h3wUE5YNa;swniX_g90v1<$AY#yMTGor zj7=P0kj8^XHpEVcL4^miOpMe$6k;iGB>w1oZMe&TY&A&E$O1EVRTm*zBRf^fGnp;& z;n0eZQ0&i~^{#*@qspnpBkBz>zNX3~`BiB3#13WaP?{(KPiTL@KVk{U2GvB3s`Nwl z2E}JdQb;vmN2+3|LAnB54)2CzWWXxLSMR9pHVK0)AVE%QOv5zE8ffkj+(nyEJGBjD zpzbCEW19S7TSRY}&`p#NsUcG|Os8&lseq!F3SOML_#BCGdbl342gccH#L<#wss7Fo zgO*k^Zp-N;3)9O~8Is#9tya=vHGEY_oApA#kFn?}zCqe}bn5Z0Yy*h9Q~RxS{6dor zIcL2NXE9ZUNGd?-h<&NlgW=)18G`b5s`Vqb8d`HJB@nw7BObG^DSnS^fl8kqZ^VAE zQ%|AAmW_VB!7gzNA0`S9f7Hx?9iuYt+h)r?=tCO5U^lSUtr+AyE_)mprEF^Q8?ixUbq4kln7i`Hx@wK-yQoMLo%f*=`mU>#ovQ!s zb5JQ!9$*1#-+(mP!8SFWA$sg?N$AM$Xc!WpLTbR`OL3>t3iKHL5mTUCa2m$6Vd)^~ zO)S!5i;orrR;|`OROqR*0RL4sY}eqjOn$>KrY5y8b0nW&s?zyDHzb@ZgS zuHE9A#V{s{I-lKW6#Wiu%kgb)f)Zi(UbQC|G-3e(Np5sSm;Dc)U-EplA;LZ_O*Bav zyf|`MMg%C%noNbusB4K;N%2G0WWs1ebLA(Z+r&?#yk5~KNe2UCUi>x&B`SbDiyIQx zO|r>FB)JzG@<~*d5QyEX%{93_#w$q+`Lx)csu^Y9N~(ORboKH}xd#TPdi{hU#z2N3 zvu5-*k^$09?q(#^v*#W(O4m~KTS^)dX}txj`a&q{;ay6?u!r6adDO|Nb4B!P9!r33sI53(BgIL zr6xL#juBUpwA(%|&R|rum#<3(C+;Mhvf8)Vb?}-*GHlPx8*-W)bf=X8j*&`-jGs-LH ziI{+j`0{3pEUTP4P?fagSX@42TBw^YPqGocl_Eyzx2er=X$a}w(qtQzE|gQ#qy;#H zF)<@q0qS7XP_hQSxc1{d@%eH`r$}3GOW>7MKT}+`DtsuxN$tdy`GP0{uUI+6B)t|f zpotoe`77_Hmk4U*#N_>|sMZS)e49@jmj)VMk&Q+ygPG#gl?=raRY;OfGL7~LTlG>6 zab`m{-9)O`y__cC3>Fmy&0{V`>r*@zQAqWSc*nx#j*%=}Oq2nO$T<@flq-qRhBiQk zq%>9fCXgVJ{;X_?vOxo?xLxnDAVyA={F<=%6ZKsVGVAduMnAlNv&d5lM;6JU= zvmQCTfVPs((j$>AXzFqDcg8HYRHu^&i6m(x;CN+ODb+cl3Y%!qGCt+(MV0qwmU?7W zX9Spq+DVq+K}E(CRgp~TZJ}gAq1@7e2xSUGB4_SS)D{^b{Dhh>W7nSEci5RAxBZ=aSy)t;lLK|M+F_m-xj~Kd^ ze^h1cY}4pF&ghVGoLpFZi#R9@7aI~EVOOGXc{s4j(1tk3Dt}|&Y8yvyZlG^tEC%^S zsM<215Ng}VDaR&gMS3JMTBR3;VWAy*+mg76pOl|Tfn_Wv`p_bIeHYy)=e^uAHChuY z5a_8gR}#DSn2<}T@%n!i^Kx3s$H4^JHrSa+nop8bD36J6G|}3Utc=@}m!VvGj);3z zsjCcxj9z?Y$OTABBi$ehB8EqzfSvLg$wAdN8WBS!)lP1#q5(;ibBG#P?a5NOn>nnf zraGee4L&&>BIjLb%rwhV(S+bcptO5rmyQ6B&kC)%toj&niy0#qcJ);+WxwyEUewpsFiQ<1^_hnM;8)6?86E*`j16KmiAkvAKC(+aM%u_hB-Wbp z1mA^-kt;PDDXJ8}oZFC8RIWr5ma<$O?3I2q7Rqd~EGIERsRPOWI46eeqURDmbq_Yt zF;{QtbvW7LgacvAMy@O=xDw)j>xgTfh;76If`Oc^QFb6bFxd|xp_aCQ3T=9wE>`WQ zat*aoWh+$*R(8JBK_8^Xog`nPO~fZh?LPkUu{lZ_DrG_o%EJZ48A%&R?3ntPJ4t>- zTSM}Heur#v-*&fGV&M=CK-5d}8nkbsp`d0fZ9WoZtXNcuvkTHWWFr^JQsT-MKF6f! zk0C2Yg%Oiiku0ft#Z8eSf@F7|n;7LzvY}z%ERf8a|;|YcN2Y{^~#HoO)Hv-vykMhavnw%h0-2jR`^O; z1gR()2>~l4dkO4AL+Zpj@2D)AGmV;y`#9X3zC?U_0OD9z=m@RelR(nJfLap0{e)L# z5h<6rN|4z|`NBM|!p|2I?3m_t3j8 zq^;uHsf$PTVo4v0T+k))OSN5@8In*jK)PMZKqs2&d>03;@6kwCjlDhwCwt_y#F;Yo z>f_M5pM>4*8AW?II<2Zs${#$RXn;RRV*$fZORK6F@(0c*k=ThhwOI8d(SD}|A1$iH z@0hQBBK{He!&4$W*?{Ks6?LX(lwQ4bwe)OV-}xi0)DP+Db*M~R>{e8sAS-7` zNL>-wL>kYsNeajmWQ!PKS*34?PsBjwc%oH0rXq_N1cR^B znV++>F5pcHcRr)T;lAK2q9d2x|H#D~Lr}2NyM2=BP57W@t#P8eq(=4Y} zhSWJWv`#WK!l^yG&r>#LZ+8C6& zkkDh|v#NX-+8ODKi>swUHbKzP-`Ku%yG+dOg;|gBcf+03~Wp9zx2uZ2! z5TkexleDWbDtaoS3@DduIwE`h{2`&yW3n+~TER?w!a}$ekKF`u*u)^tXW_l9;IDVhK_qqfH@~Med?sUDS=Hg($PxLK8zS z=w}yAH9gcOoiq>R91bhXheL@xZl5P^nptCMeW^ZyR3exkA%r?snZ8T&Vz@s2+$aSB zJ9hL~B$7!8pf4eYs`^%pOGVJEi{_sMGNC+t+6px)IusUlHb?J-M=Bqtvc z+yB}3)(Z==S6_>oywm~|a>|m_9f-vY3^yFA5qNrMVnbCdm)$aaIa6RT4!N5=_QHyg zPUAbppQ+t$N-P>DGIX{8Q6KbN0t2pw@sU&UmgHQx0ZC-Z#Y8OPxl~Q;RDC0HOHznR zM^$#wP)?Gh5z#|#?;!xCN5sH@WcP(!P&ANM zoGd+FguPmoEzwD&r7D-!t5tnxY9g&vdxhvNfqegl0`Us3!8N52Mg;O8k!(D9Z!pMis83&=oiw{$0dC(#%CN?Zd3-tT2kz-MZjY2=uyEIGU#nCOL z&Y?#S*znaqKjhTkH->(uAi^UNBrqeyBu}nLvt9K}^`3+LAk~8^OL6LQW?etwiH}u< zwvp{(scXEbsmP2ObKRizuU{gb$jAKzHCHJe}NXjn_J)3M4=_|wf%dPbcC1ho) z!z5j3@d@fa#A&rhjCS!@ROn+YA)*XjK=jwb!E$vJA6=6sm7Z0QhUyPI>5tRVbKx^2 znz%D{K$>J<9sQ@(uAj3B(%L3ukrk^I!jUlrR$yWS28Zw9_o4Gjl1i%2={vCEJMl?0 zE%n@9%0ZTnDNC%NJ1~98U1Zb<-66>d(aFwa87XPBHYZqjmswFC>4S9b4c z(us^YCqWj=n(%&r52>Ia2TN!SVFi?=tPxFrDFdw3zueaNrV&X*P6rp{CZt$a;#5x-S>0e&oA*sb<{(eLwH5mx+Y&>apc*3zON3Bn#b8aWvtTm8$&|uWOL#MMZ!hOX#@3y zi0T=t;*a&E(zb{soIHQPc@%U9iK{Y6I+?W4RIt^LA`!5WzpQJ3Jr^vIQ*6vP*wmzJ zlVnGFN-42|hs;D)O2eefP2)4bu}Xv1(b$FGBotB6TC6m;Bo%ZsS{LpCqY>>wX!u~2 zF)M$r_vB=MpOhY-%v_{E;@pJlWur1w4z%P6lx!-n0CbwTJBA<~U~VBNFw`cvawrmb zy_&vuthR%sQWZk-M1~ZJk_ECQ$g?WCR~3(@!W^2(b>yNd0*RCH+IVn<&r@myc!*J@ z@T{!Vf@)>e_{FUt6A}f#+L)uH6vJ@q$KQxWW@b-+x!BO!+JzM4j23)2vcQDGb<1pfR9=36Cgot_7`D zZ7TDyh5Z=xkD7sFRQ6}7|9ipmTP&;jGz3HXPuaS zXsd^`3MJu6bS=Foxujk$wj-ILN)N=fORdON5wk2PeQ76u;gXTOoW+t>4dc>G&q@)fRptnvurPMD03QSH=p(PWgGHJOy6vbHt?&$TH)jqOa zy1#5Q>#-Mgk;brRy2_y#r?^hF`-1Rg9|K+CZ?sJcBmGE){*D|$;rUL~iMF#$m?$qJ z#+nQ^R*+}>`7RS@c9gW&o@59UU zk$PPqx+{sWYS&0sB-_1on0cIAx`(QOkpr+eA40mNSsW&ZKpkVGd-S$Bs?9B=kR#aO zX_Zc5iL(4knbe}~?|QIM(ht?Mb6!Zs?PgwXD-9`rlHba1AGO6qS!3cMd->NQc$e-w z(y)&avDFz>q(OjZpFk9vQkKa{I;AHv(qxfSn4E*+be|Nta-ttEhN7uKsMaHngPIVp zPs2wBO)G?nhznG?B%kNgw88b`MiYk!P-yZ|c1`#k+8WwrmoplY-4i7#Vpu)ygs*3s z^0|o?27-})I6!Tn@{AVsBI1+xBu!)l;<{+olYdM+ye6s|mT$t*=JnE~^V)~!E4rHB*YER)h7;v6ZX(nY?9sM(4{DLF7EDvJA=Q)aN(=D<`Lz+r+~u(p;5FL+$Qi z3wqi<(Q#oOlufvEwm&Ae68Nsn3{_+!RCW6$%avn2(wmg~k^!hDEk{deoQOfPcGBqP zmBu+K%5GMH94nxAtE70>kE^NNT5VfXfr*K;zDAGeuS6a>k)d}FrEX`%#C-T?59mS4 zZ>7r=P<0P^lfHL~V+w7s$irk5j&>LEQ$3o%mXhv^Ma~lq2Bj~m&mp<0)DNOzdc7p= z#fg0v3{=kjknT`#FcO+C68v#?r~8Y!K)jvO@ND%ieAn)k;DH!(h|)fwH4dIeog~0NSa+zZgF7Spz;F5E@cZXdaV6P}d=0byUdC0ad7-&CJ6~1UsMSn^2fQ#q}UTvRf(J9 z(tKCkzPK1^v7rx=?eIk&%bG@T#O8>xlJav+DO*UR?PfiONSVr(5Rpi9n-P;M;G@~b zQyp~XY?tN+LC40!8{9}vYGac7U9e>OdM361pJdWZI6~R^VM{aAchReEISPbrvZugh zIVLG?Lexrnyxf;SD)~CupzgETxT0spXK?mBLP{cvcj5b785L#CP$5ooPPsd$wx|WgI+bzI zjCWB9m)aMRZ>bmj>l#iT|8SGd5tt10b@{Sz%Dh?Hd##Yh&- z9+Gw9j_jmR%gSSmjFv4CX>gK9tE}Gc#Gpnc+^I)EM965P++95yE4Dfj*VKj$#v^vY z=9#2TC=i#yL$ZZg&dDU2pW&ySnz~aPNT88 zET1726=^UGr+(5X5G1RXonHCZXDH;<$scTzyxWIROT0(#5*$xKJ>25U(QbK&Q zJWAov%q`Rh%uEs(ZZO8-ZK4`>!%XD722!=l)-gZIVjIMZv7tzK;$RFaKEkCO;$i}H zi5My~V4vB}rG~IaspU9o;3C6NcVqLH*zqTI zRoZ8l;*~BUBSt|8iE>tqe{X_?MC14z2g{izf~Q@;UC0C&F1vp!#N*ECh2PefqcVsN|PmY6! zMa9Fp(FySQm_&F=Y!bXUE*U-=p8|hQNQFsBX)q=^U6_&qBji8eV)!LB3qFvR4ew3Q zfzvZ`VQyv~{46scZptcv?(9OCl2ZiV&nbpi=a#@(d8IHdzYM;UUk)ECsDMutR>BvH zrodNf2%I)DBqK&KGLbdm0dg9%L|lOOC+jc@`154FzG5bp&2+9fiBC)8TB+t{Du!*~Z~&`waNE{Q$Up_&~U8 z_#imwI9T3s90LDv%!G@ahr&s&!{G6*!(p|17QEek1Z?me2{(9-g1>lX!_&M+!?bC~ zz-GfetryqO4+KpmyR# z@ck3l!+9rN43p<>fVa%O1p4M(3ir&r3|=ySBkVr;a`@uOo8aZATmh#nxDr0G;40`^ zxEUs#dNus))N9~lr(Fv#JN-I%#u?Ycz?nC|{6#mye;3^Zzc}k=xMT4xaKqWR!ULAv z2J6qc9mbz~2YmJ1JK@@;cR|;(yJ5-ld*J)a?}aa&w*_upaUYCZbw6Bj{sZvP)epkY zRzC!1t$7%}wdN7ncfq6Z^$Q+@H>`aezI@?UIA`4xFy^8s;rff7f(NgE8pdAy476={ z7Uo{^9Q@>xZE)SC&qLQ`FTh_fdlBBS@g-Pu`OEO=O|QU}SG)=jyz(_T^Q!GIee>(^ z*UdZNn^(U9AHL>I=(_eTIB?xN@S*G8g)d$I9(?wO_u-}+KY)MT^dY?T=8xdwTRw)P zx9)@)w|xR%y6scA`SxA#usc433-A0Kj@|VI^xgd>oO#bz@XC9?hKsiBhF$l41HZiQ zTln?;-@%3lzlVhn?SbDv^aK3p;U8h@qd&o($9{$M{B!fDU{4d=h`5A1sBU%39|eej%D3}N`^SHqy=wQv}*Jp#VGU7Y?U zuSde%9g|_*8&UAGH=<$1n=!EMtyuW`TXAs5+wt(ycM@RlyNR&$y(IX_d&%(D_fz1C z4^rX4hiUMLkJ90)k2BzzJ2T;tpJYMrr`hnCT{&>^XSwj9&-37(FY;mLR|U}bbs-$w zT?D6pQw*1VTLNGCt`ru0Uj`rjz8t#tRKV^ZD&g)QrohX7tb+ERs^OBKYv6mo)WYW9 z>fk}Y*TddFrozNO8{nQl8{zAJHNkazo8jueTcGEkR(R;YZSbId?TGIfLkB!FtP|cF z-UR~@-Ow_r2YMoV;jGDh@Ytw+xH@`3UdIf=V5|l+c};2FL1q1JaYtnWVs)(k9w+XoiHRfDI(W3|(u*?2lkH=P0Zn9hXT z&5Pie&9QuvO28Js=597Z|LgU2~mzVNNMHlp9J~Z}j9dz9 zMlXYj(>KC@r(X{L9@_*Ljb8z)XIu#%m~kBD`pyBz!^H;~Fx1LJ!r)7Xgu_#3M!>d1 zC&BQ;BH@n1Cc_I4kAi)(qT#o*V&G**#KORlaj^8Lc=-KM3Gk-biSWduljQwllHnW2 zq`)=Drow~fq`~In(&6{VWxy+r&xD>6vS8wg+3B8;B}`}!rg*Tqt2A3^uhgHiu;A_h| z;q2vI@Qw4j;T9>z=PKe!fk6b`1}P%xMr;ho_?Vj9=y&1 zzg;&3S6pO;pIu~w8`s<6s*8u=@f#d)&ju%a&e|T&sT)OoW`0^8QL{ODhJ_Q(6+A0HbCne%ss!8bn% zhvmB_LDy%IFyV{I(EMc-y!oqWxb5p0c=GO882Ba*UiNJ~eB!$V_|^A`(6A>7-mxbg ze*QxSJn_d&nDtW@T>4WsbpM)15Tqn$n?}D`n-SFpx9{6=)FYHR{gP!C; z*q)-n*Het}@Klq$pJs-W(k<|TbSn(Yu)!-c>~JL03E$0h!F#jZFqrLui8)@lH)k3= zF4qTt$o0d~yZ}5ae*|7yFba<^oDP2~9D~;tjl&hiGoYvB0GLpEAbhd(5P4lT6PA`A z3I{6=hnH8(g7YhnfKgM9ggd7k1=m*1hAXP)!iJi8a8J#AxTW?KSXj3JI_eg}lKNBO zrux(1@YK`clT*)t`3+~na~l@H*Bj1)Ga47euNu#W7d9<{+0Ey`4bA65OUqIi*}4qg z*t#4V+s=b2?JM9*?JJ?bV-#s<*TN6GFN7ENTm*0LSr4tf z7sF?IH^7m;OW@;um%{G;%izQP8)5FiRq)h-&9HXxYWU^gHE@G=Exb#+4h|TvhaVYl zg&n5b;M1ntVZeMRJjQYt%pSTM?ijiUc3JO*H(9s9fbBl`o$UeGV1E$qwm$?f8r}+@ z8-4VI-iB@uIJ#0dmH@G{XD$e^8#GweHj)_dj(!I?N!*~dks$aZHEc|*WnKT z4tQkX4fuQDO}I7q7W9w24bL2X2X33b6V{A<0`DIC6k5l3!AHkGgEMA)4igXf3T`~$ zYdC)3ZkTbbT!5fSi{BPYS%j*5iu&YleS9vuZckBx?=IWf?9Tr4a-J`N_I5DzO(On^-% zCBnA3NpSkSWO(@e6gcnXROme=4Ia559iF=|10Hi~COr1EEV$eVQ^IqTy*~F z@c#49fYVo>374-~1RuQMEI4oNVrakcY*@H%3H)i@Iq>O=&V?J+FNO0iUIyJ8mc#N( z&V#>QvI4$(=}LIrWvk%Ajpsx6<*VV8O>1Dp6&Jv_u2>6iyYfPK?Ws z`>wedrd+!Les=98@S*E2g%@0Z89e-kjj-d!%VG9So8a#^T>)Rc`AT@>Emy&FZ`}+} zzU^wb`1a!1aKq_$ltA~LrLgR-GWgqF<#7Am74U|8D&c~Ar$F15Dp+`5HQaMw4cvNv zExhP~I(W>3^|0@usj%ST2Dta(M)=YrO>onr&2Y|RgK=E*F%1T`8sT$Km|*QwX86q0 z7HEHV2;Tdg74F++lXqUQ!z*4Kh9AG=fZ?w=Vg0KvxZ*W8e0jSEM(*&!#y6(Hwl{q+ z`7J+u@vQ(n;_V=8eP;yjdS?`Fes?-_zBdM^zCR9oK9~XD`QQNf=Z7c3bsx=zy&unm zpMSgn93k3_~a+2$vdB(4y$)9g3s<6jAu1J)8IXy8R21{n_$`(X86<>7C86I zAz1d674G=T2A6zohb_B@;jY~dxb7P#G=1xWKY#0n*L>%J{_njoV$U>q&mJE<><2$g z`!N8Y`Y{OS{xky1ejbH8ex44O{4xgb`E?xb`fUbG`{Mv;`tv|I_pgKCHG2<+JN`Zd zM*KSyw(L7JKEiOA;V`%^>~Q#0_$>Hy#1XJ8a#2Er;m^pk;7OAg!}_SR;WtrB;Puhx zzyo8>g{iSi;d8Oe;AwHoVQ2h#Fg#%etWH=7T?wn;&V=*fYl*Aj)}%G?&g2W=RVi!X z1*sRpv(nbV6Vfk&hh(gW-pq^PVAclMn7tED%J~GY$@vr>p1TW%=Y0lW&ifpmm%kt} z!tiYVLO5D@D!i)bG2Q6?8L+49On7VgS@7-h#jw2cYR=i2Xt!Qs1Lh2w7cr{f;@wewzh zlWPm?a^D9(a^DZvcpiWQ-Us2Y-iP2b(;kMKeUHGk{zu`uz+>>*;N$Sbk*!c0eF6ri zKM5nppMn?8cp6@Mz%%gs1D}N@2R{doIAj}qe&+M=nnPcJPapOoeC6<$;Cr)PhVe(f z0*^iFRrvJm*I@lI+u`W3ufyZ!?0^p+_Xhm*_&4Fq6aIj&pYSJaILVNNcAXRk`{#zk z&*nzJE%PS9ruoruczz7DpBxKYPDzKCost3nIwcd1FUW#X3oGH4g;QYbsRJlkE%4j3hv32I*kIzhc6sOAVfg&g!{8Ci4u?-Hn*}#4KMGpT zn+;1=oB(rIo(OMWc@ivJH5aa3H4iqPKOgQo{}g!M>a*ZIs~5xnhpe{&jH){G|L5MB z$t01u8@5KRFN{d7FJ-&yOQo&~lguPDWRgiTGYJ#apWQ-0aQ}^9h^DUXS|LGFrCSvd ztgRYfK(|GK{Ykf4cYm#I;rIDI=T5@zA9B9WbDr~@bDr~h z&%JZ+IX}GP7I19!*TC1maVz+Xe_jsW{>>HOLw62>&)qcy&b@mjShwaj@P}(wfnU34 z80=pAb}X-+$~Ouwvr~_{)tC3m<<39DCwXaLRWd z1JByD0X+T5jbQ3~kAt^8^#u6B)BggFZvHO#)Rs-)hVMTK{?)VJ1OMTT*|ulE7e}83tG7P~-mqgU`1a1{!Qb!tSMc>0UjUzYX&d;p-J{_C z-DBWeFK-85du0cB@t&RFjj!$k|JM(8gMa+N%iy`My#hY+qdnkjufGaLfBXaR{U84j z{K-#V1K-{IBk&JzybjL$>Hh}b{I?&2FTVK`aO+!p!G%A21DyBsePH+(KLywR;@`j< z_rD2#^X<35Iq&=oyziGk2cQ1;Ux3+n_k%CKJ9=U`RC#a=Jn6md;Ms?EfHQu*6HNb~ zUErMeUj!E(ehIwcH@m^1|9Ba^=eMtbkAAQReCESf!QH?60r=C8eh9wzpRa+x|F0i` zmA?=5f`9P9PyN9MU;jfGy#M1$u=kHuVCYZP;Oaje2TuRW)7fiii>a_X`^lxfDAC7z9+~a*?&v_$}{*mY>MR!spME^zXDY6U zEb~6{FY$jr)q8uYcW|o51Kg^q-jb=_@~Qrusb0fWFEQ17z<DTng8SYr>^*8Qg4KGcJp3!L{OYxHeont^=3H zDZL`D6W4{CgDc^>ulvHge!c(u1z&iVCn|C8s_D?va&n$p=FU|kr1r6t2bK&Tjw3AaNaO@wMYea&Qpxy; z_fSRih_=p{9=>SAyVg65s|ugu$L-x%Quyu2a(|njM0}9f5%~^B@1R+#ua{H(%Cql> zKlX)>f?FeZVBVfRLqjX)4Dk>6+6_ZPs&y|q*?T9Ts{*AwJo+ zwvW5mDX0QP6s_?4M0qG9P0YY z3>WUtz@Mb=$nU86RTjKzQq`QSan$@~4Sur*YN8r=5gY+CVzpTeA>(#DmDo34PrIG(37jC)Zx6(k( zdPhy)a`!K17Cke=#3f)p4RUpqxz(&;Rx8sE&B|G{RP&CSADhi@wj;N?an!<@NY*eD z49}l4wDK;S8-~8>Vi4kzRPl(R;;E-914qqo8Q`}JL{<3{H2CZ`8IO&HJyM9hKrrA& zwW>zFTvY?Ls&Ul(3oSDQVvavE$H)_K`3UTA)cj@#ezQYVb~wtVCeU}(`PF6RSC@ra ze4|09Si9B@krhF>(o?v)a3Rol)cF-Ie-N%(!&{!E)G0JNHIIp^DV)XjVvY7W0qFPpz% zsDI&#p%ul2L&dJ4;pn>6M6Y)xgLsw1f>ylZ#89S;%ltWbjo|CjrC-xTB|=A!4@FPW zaAMS33Q$X-Om~MLB=ik6cI3~1*Cn)I8 zUfwJVp}8$@M~@Hr?~-Jvsk=5-G_|#Ag*H?#G{QvLokmu%yy8V<<7Bdb_K6{HhSe7~ zEho%Ki1oq~kt>jgx=O9BJw3xGm5O(1L~P~X+)XEjVymMUoEWP5zlLFDHm;cMS~=jgGF}>eOC) z+}VJdW3_ssaWwb?#`>^YiR+8{tS&PuiKA|!mtPZoP;wjhl-w?N(Ad{qF5|3P*`-pZ zTsACrvYRYiyO5^&$jEBMQLblQypV{NS{u|)FU~IVPK(+TV-gE<3k9b|?{HcH<1W80Ekr?D z7~`}8##!pLWT;DvD~Hnx7bf5UBPkIz%KYziMhl+%q1;HG#@1B`o{2B=1x z;vKcBvx}WpNScBvbh$r|v#wHpHr}y3V@+ylp=g&2(V&x$vu+|%AJ;@A%6P{{>qJH= zwNd|a-I*opBE1H0;B;>G8>UMO4@{yyP)KT7JN)OGoi z{YJoWgL%GV=8sg=Us!L454pEMFi(+TxmJ@l;J=j1457j_d zvKA3j@s&mX*_VfE(sHr}Ivdx4+wCBbs(lc4;YyeDllkSLg@WCF=JHV3yFB!Um<68i zKXZBLI*|p%p1&{jEE`(5wkI|9{EN0+ZuN?y>QOqUN+0k)DfG@XI!808YFA^c3?k4H%Iq7lFGwN)(H z!`o`SmlnEI*ke(R&L_(?4|M)s#M53uJM5+#Fm`pvbq6 zvT~+=0#b34^Lke~8u@mai#;8E$GdFL<)Mlx0izRg_!Q*w5Qjy$kCanMDp>VOBv&(6 z1@&~5)tq;g)tz^h>U+7XiscK(snr@$Um}73QpH40JQ}{AQlD!iaw64dUPtxiT_dVp zW#+2uZY$<5xZuLZi+V09boLbU%jQi{;!{*jyj?1IX^QvIl<<$H_>EIuTQ$Wyd5ZTt z$Ze~pSY1x>XG~e*y@Jo7#r9TA(baXzZh!l#DROT(=5eo$$!a`wX#Z-Joel#-oX&kp^ zuS9-~(1>@!+?qGSPevZ!GPlOt9x!i|IUcN{U*a38&_%0;zWUXn%|QX?hD%o8%HsLS zc=$9auFa$~mg^rS;vViB0i74ng^qgv8qk+TxrWV~>|ejgqJ*Qwi{yZCt>8hyY#@3r z5c>=e`GnvcL6@K!Xek1%f;kx=++K`gDXYtIgSlr_Nc0y9UIbL60RLUd#sr;`)k*e> zVBX|t8j7h(S?D*`JBh6e@l ztqsiY47WBozUs?c=IVrv_?Efhf88<{ZSQwR!lx_>I4z}-#h%u<@# zNT_lpED*Bp1%xcH;%`&M9`&1^CBtUf#Uc$Fmlr1!@ybLrwDB=-y%1IIf( z9*`wkkqD#TV;zM~FdU9A4}#;%!{M%;zU7@=3m2p)3nMx8DQ?49VSDe0$pL)!4K^8u zFU!bNnFqpS_@E_$g=P(_;`p^=FQnNdx|vgQ-jzi#$>-IO1g>68ry%wmkQ8Sb#LkE7 zHZP2KrLt&Nj8s<8B4`CX3@eEGQM_n`#8oN~EGoa3bLE0IGfk zL~b3C@D#BTv5Hnwyi(#*Okg;yWl9oYEwh$5ti^jifVFsWvX%z`ih;F2i=Y+A;U=st zSG+E2b zth`(U5=Hh}s2Y$Q;(#?E#W0C$Kow0{r~&Es{cHQzY6NU8_7AC^{8tDA=pOBG0J6VV zf_Rm|rg0xc|DFJjGRSbZvTe5Drs-I1^1rh#n zmOjvoAE4*XFk6ja*UOWueo2s{^r-?Kwm?Yk?0Ka7;cM<0yz!ou_Y4j8DvQ4*2#8jV zO!-vRFQiT)wA^H9%P{RGwDzk$%I?^x$HLjIS#v>ZMD%;@DB9gZ9ys%{Ld3m7W@ z0_)XAC@l{L%(j3b^{GzaDE0NK`kc^sZhf6s3}rpbX*+uSnN?FYTpXdjCc98a&2Qzz zZ{_6t5e*k3vdl)A5%UGH5r|83;T;;hVL8>XmW%M(PlYO8mjw^-D*%Z&>gra_p}Mu! z__=HCm{C_>)?Cz^!l^gw4UUrDPt6J=_$~-y2h_6U$&h8~DAmG)x06*XMvp(!i)vbO z#LecWu2}(wk@jbl0V?D}7t>KIjHRTmb28!9W0bUG^whLx1EAkpwhr~95x&69B_iZS zYd(a!qt37B@+&FZ(LL&OnPxTEIh_ZBA* zDzerqk3T(W2o9;K|t+Ls|i50!x%SK9JRv=AO}!8j9zMoCy6qLbv$-( z-zHsZ2c$MwKFDviL6{22(%(X<4Q>Fa4Tkv+fZBlj2vBVh(J50KbOV#9Hprdauz&9S zzo{IozT(u+oHKOK%AHrl388|>L&;bL>HdKVk~Ks(a$_p3XzWIn5$#TNw2QJ4j7zV?y?LiJQRslO$r4)=jWh9@PxTE((`2&vLp;w7Fi zAYj`KFq18oAW1fPhIF%2X2<4Dyw}7gH*xQp&Hl~QT4w@Uc?uON68oi*$}EJfTQluY zj$Y_UK~2ReSTF@^k(wwaLM#52&O|4oc8*poOFql3;g#yw#`o zuPZ+PH+;bzDO}GjL9)=c`92yUrTn7x{x%4^$HZaoV-r1UIA3hvyy;)@Z+cq7$QNA| z%6PZIX3*rJNY7^Gl)~mvq++u+F%g?iZD!YzPGl#^gM_`#Hgk`b+#LE+#l4$Da#d^b zMBm;VlKWElU23h5Z6>0thGt=BD6}~geixTU;j<{Tk*3_2*ySymAw@^pbB$#4DvIAh zMxK>D-X)vQj6Sna20$S+Qqn}C_XS<-zN){txa-zOven;QbT0<;Or-0oY9gN(->%;jl8QF=#AMeMTDGpXg4!?h zbTb}B@q~=-#sfSyRV4fpDQdUuL~FWOSIdEc3`kzGtE)L0dk8qwi%j+}og96OU-JX6 zcQM<(u(7LD;&nB#YFw!++Ay>ZS)Am4hua8$2zfX3A;MC~5_O@-f7OM;f1b>D^X+5|x7BMW z$M6@G=cLXu7X(X+!m3-7$y8$l@0or+nY!l2!iu`!C?e9HiVb(;?u9=OdK>v%R7bB( zli8ChW9)w+daJJDXkE;&tN5Uf%A@Y6y2^X&Dn72WjhEB$RduQY75}#s+na>SChxt< z?RDLpLd=#}rMk0fYdTRotJa$oFt4qydjq}5KTw|V=jtLi6TL_OQe8dQZ0NdS*@`cB z(VeDxZyO#oJZxBHxWsU|(4S*`gJHt(fjVBJt*dy4UdKmW**dSsTjO6tb6jqXa=FUw zU8_d&BDN>1n8~t&stq{m_Nbcts5eK7B2pLb-SA11Yt)xc zvh*NI&xCQuDB;&I4q4C@X9w|ubo|d-=F)+6ozyYJRF@>~V-{BlJ{A-!e2Fo4kM*s& zaan(+Yv7vxwafa~UyRC*K*hgYT{is9$*<|0;-uMwhg8@F$^u+w)VBHG;pc6{?*!59 zENANGW_5UfpKGIirqkMF5*M8lY8VW+ShyXzVm_6XoU}W-`CUPnYgAhhEAFLbON~L@ zd(Z5%(csS1FqygsK|va;k2&%c`>rRgp$*F#8XsEF|M?At?b{c0md1L?Wc6h4VpZyD zoyR0vagpmX{!qo!PR4Ykw8NjuF=@yZ>v#erfl!l`n;tp|3V%@@m}$a4+lUH+h6+Nc zn?6*8^85)G$y$b5HPwX@no`fC0V%yr74p4vjPLm`UyX);DWj+6D6?M8(X9;inxjl| znsnJGFkKft!P{7MbL{8oOn1GMAN1bdv1NX;`0TLW>*Qbmmw)h80yy2z-n@>@_uiJ? z<>9~bHZp_O9DV*8a`CPrQ6_l87^F26jBE@ps*9qTPt2zO*M(Ew_xX+N5@U`+{RfZo zew3Ylos9cSl#oq>3<{f3>En|+z5i=-%G&`w7|_E3tqSOpfG&5`pA+~S0-6Zu12kR8 z4Xd3-{6C4*vIKH(A;;FEMRrK3_yQ}g2@iXmvcVkhvlGN!2m5bu@oq~)y=%){L|d3}KyE@? zu^5e4X=e{+GIn)_@h^95oRs8Gr+~&C z+tPcUEw7qY_}{RjUPfE${DzdQrhe1NRO0_yGo4P)=%x#?UMp$pMyg9mFbA5-%GaG7 z*~-@m3nnWx#Rb>R5Uix?2$9C@6DcHW9>$Yd)NJY}sR5WPJ;AcY=eQo5=*o7Ug^4hjM6^?~+0$)aZm3MePQ|nbrD2Ou)j^^@!L^V|Ouz*!I~+S%D#h~FvD|b! zCdV;!xgfYc2;?ZdR4E9q4+3)uHg$EfijT>cJ^JmzD0{kRKOwHO*ydlxYF}_ra9B_! zSRz;s_;bWH2oiz^{L4Zxp;QXUh`|+cxtsvUxl2ry?8+~-R$RT9HbFr!Ti|~LB*k?I zqU(XYIO2khf@T4aMvBENPV8T~cIDj*h9`aEiqNF$)yS*Q>tg5ryfSQ&o__?eEYtFK z2}%O?T8)pZRxP&Ns}@`SRg0B5FOcv;j#0gdV~qbx$7G{h113<(mQ}KWO6+rch_Uds zRsLnXbQJdd%k+wo=&89}oR_{h^cFmioJvgubh_3IAM|i8OaKx_Vtw-E-{75KMLYzn( z;dcUr3THpyBV3{Z4Z(&=|hT?OT#;kCTjU0h%ztGML~wn3fc{dD*8p(;|+0__9 zlqA{Hnyq`?;bVKefA7q(AA9-yJN%K00q-~VSm0{*@vK}q`uwEhcw}{H_Agmv4EwJN zXR~I3KOzh-HhfB0slD-r!`p=&rW0Yw6B>@4Zt|mc`SYuJq@nAeU+phl5bz6@vpPU_ zT|oV6o_IJVZ*0LhAD*FGC5yX&UyZi~{B_6hsOa(>vCDorm&iZZ{WIPL%lRp3JQ324 z9G>uE51QB6tzw{xXXl2YyZhJOI67-C)BE$MPC9veX~Hz3m-YSX1;OJgzxq13=>MKv z`H3>+772f_`iQ;HjZ)>yX0`WUf$^+OSSyh-SLgDg$vs+d_vw_1WsO^ltgc(a9c0CK zHm#~_<75{0)sDk*kM?pRJAHPsTQ^ny%M4w5drRPf%_bbrfg4=OvUQOsxCqV8)YtJv zY=r&FdCbcpPj!(GxvBH~$P?a_*xUDP7`tKk(oc`}tX=WgO=quM-`La7>fqNbn}X$_ zdww(DEf=)t8RvLVFuvO4E>-Qqed|K4w4M)c#PuAF)s+^W_84f~t0rO<72WA>kh?PM ze9g_6ccI-tU!|u*tS(vqsaouT1`JOSyh-Xeuj;W;^l!D4;Tc%)I78X<>_HJQhAgA& z?8%4<^E_HHmUN>2(Bv9_$mL(-y$3U4rSnHm(NmG=DSD(8Jw*@j6j!SzqIUM*`0To# zn@;MwzI$+_)DPKOkAI|nxNrj@UQmr zLm>2{Ue;H=p~(60M+jS7ex`1;Gg&~s4tYQHN$69(T-2c_^s<0}-rmb1sy9^m|MjwB z!=LYEH38k;%QCB%6;&?_wqDk2y{se1XIpP5c6u+rzL4Gy^p5tjjO*o^*UN=DB>-!~ zlY49Z+wf6lF4~1+&%Dlgh54VGKc};C-t|(W7Pz*{38)BxP<6qofy!VRfulMk%$ZOq zN0roiAHYN>n@(46uBA6RSX(()82w4F-hA%Wd(-HI&tXvlEgFC<$f;4*{nV#nHyrgP zLFM9J7NZorlqhyyXqDK+gXhsyao#L7n6+4z)Mz(Bw%5P0cOi~tHSQqpFs=%>1h>4G z1vRt*m%u&H>unwBtvy=tC!d-OpF>X67i(!Xd>6d}%A0_ye4Z0CU z*=ldS&)*xW{vz(ALNZJroE4rGc5S~m(KL*5U468h9$ zdK&bEz4SEb?R)8Id+BBH`MvZq==Qx7bT7SZFTHFpJ&d?JkRRPkui8tm+Dlj3OIO;f zt_0APu8y=t3Y)kv$3mfGRI&_EloL=Y>q=k>N6F|iVv1D72b7P6EuXYvT7pkk_V3wC z-`-2JV}+Vr{UDhfO7{2vaMumo;%`R7N4W_aUIzc_0eS`WqXYDc1M~{`BZN)#iezL~ zGBm4zd>!(B=#$W=4$v#0Cmf(xKyN=luQ))jfX^SGS3tKPpjRBAR~(>M9H3W_{vj9R+j=$j`Gro zyEt4r7_~`KiX?{?s+z2xs3-=lL3S6dhdYQnjH|*e!7blK>p>fE3ETs_Xqa6L(p^lu zySUHXRa<%Qu3GmaEaf=;waPnI?W%p#FuYJST8tD5#WtoT7CQX%rqhmsWrB~r>7j(2 zAR{M0iS`8U>=0yu6fU8#oEU{QD~hiKn#6@kj|K%AEZ`$J?#rgrs0ug&gcr+zHgnNp zF;twdkaY^_7x$#Nr+|VbcmiC~xZB~lEuBsS1K~VSZ`^jc7I8eko=!6ZSpm0s(`jkI z-vNY=0^Gn(r?n|{crxJM=1o_F3p4Y3dZSlI=hH$_4y!0un2^%a-Uf!M3~P1~>O)N~ zq(;=-s?HCT`j5QnoEpaD4Yf&}e531u%EbWlc+UcAmw(H-tyTX*N|z93scLl!hY-4j z;Gp2Jph~brupIE`h-(l~5c-EV-A%hB)`diy2N^yJYV#l@6+^-(4f5>PXf&izJ}Y$L ztP7K&$gEk(eRK@m$NNGF{%{%oa7F%bDgJN?BsuO(HkcSwL@UI@CO`sngqFoL#6yc8$Ha@?a4Xlok zuS4DseG>ZAA=(Lg!XZW=^!7uv*&*5sK7WY82i<;%wmU@I9ir_H(RPPuJM!3eh_*XK z+mY4|^o|~)?GDj)hZuN=7Q_2Y_ zHO+a5!FI@vIShCi1J1#y4w+T8gF~UR+~T=rrnw{A$F`60*Q?sJ>dcZ5l`6;{qWy6P zaffkLxFxvdhiHFj11^Dk;81PkokPz~jqLhL<;{_Q@;0LN?9}&<^>{Om^=&=Y^Zr+= z(&LfPJI1$l!c)tuYAwZi$%g;dVfNQn{K;FfpALgthWmIwrtA+T_`_xR!xj0%-PwW5 z?hmE-!zBpG@rO$g+Kj&c@qW6?e!2_%tNZCL(2w@hUG~#m;Exb?Om~Uco_D*Ndw`{^qC)m4E1)>Sz6W|L$YpeQGxq}Elyl$?O#(oN(9l$frvpRTgsb(Q_) z&at266hCzDr{9+QNhGaFB@|K>S{-Idfts{FvY$SJJBT}stHLe8E#FTcfi~b0xCbc3 zGq``*uMy4XlK*q;58Fr|rYUjDa32rbDtS1R;18GK4_D+5cV`DKI~+>!hf5HW;}4f0 zv>AP$1e&1XW$>>K)0ohYhH1=U8Wa8qVb+*#@&OmjDMQ6MDihjS0Pd zn8qAtz8w+*X}fpQx!;{x12#^v#92{S-ZPCyMt(80BDwfIn4SAmx zcL&X~GLtFyjTCQNbj_bXUoB!yk|kpr#`?xEeFJw8cNkZNTY_6YOy7Vu;1ak8hPi$W z>-qs~CzJ5Uf=9_SatB<+5D-0rHyRzbmnZkg`qoxMfwHDJ28PX%(M>DwUAc1O#+CQ1 z4IQ`T+KLzGSufP`>p;GfdMG7csO8LDz19oM8{hhk06Na3%92!>({Gm2;XG@{6w9>C zWIB%F##vqj4k9x125dRa@x&A5M00^guFNFLar}pQbI_&a-#WoQ9Gm?@$iG#{sj>xr zwu)Ci{X!_ZFE)E?&$Tc1^laJMvuW{`Yu2vb6hiUghKWQb*oR|RTOwdAtmv^YZ@!jt zxu!&%-%j|b%+3$DWTp9V)g#h3e7ItZcwzK6Iy~8GDZ0Mf*wLHM**i2p=f8ng!s!XI0 zQn@*iN+zZoAAMwkuiTu;m!)N{a&uc^Ty>mn;(Sn!ZnPAdb1n77P9-0GWP&%2A1s>{ zP%*;q-ti!J=ZQY@-@Tg^Uq`FiPeq~aH)-|`gvSWBaiL^r>1A9caKbC%v)b?;-Nk8N1F@sS;)D{p)5rm9ERdi~#R&c!)27$$Zq z4|B>489?L_YCV1X7jPrgET=g7Y1yHaUeh?iOiOc8W`CK5$rC>@q zzGpBc&ZQ?JH%Y)f;tYC?Q;9lr!aiB65f zcz-HbK*f!PbkR~4$D2gb5}Pw2@nmg%E?%GDpmG(gEf?qLRG7+U(l@QfT)eR?u>?%e zI=b?%kqwWn`ts`H_nUe91_J5Za#z1vPm9mwHi8Q|f_kWjj*ZRN93As)D{O|QM zpacg6hXqxFC4%Ju_oQ$Q0utp?M_f9dEOeGMvoA%l5bf<<*7a3(ed;gv_Z4p%-0i)^ zdB{NQ)Ywx-sxO2@KFNcHw?YgJut5;{r2i3w77Y}5!0=Y6a)t{!VuT-oO>v#`^*i5_ z&ZpFqN{M%3-U?ORXxUZVSkCTtgslpx7qkf~n@xv)TYaJ4hKR5NpX71JTOsLH&yY3YH@&x53JJn5a)mUXdFikSGX{nwu_eQzSf2X4RU7#v z4>Ir&S!JwztX7slDraji?@mrUA zpHaG>(E`De^V`3A-&>L59`CKz3_l{!=kU|#-cnBk4hjwnssu{}%K>^CT!VlJ>S=Fj z5DTr2o6{}DxOE)kDraPGdj5*fTsnNo*!CA+YFxN``-_X8fBe+h&u!c^a9plsU{l5K zqnAc6<4EdOj7lZriLz*V_3VyTK0B)C=x4^^F=C?pQGzckW=pczYQd3vdhPd zio8sR9dRCsyA&je{&C!!ukR>kY^u-mW==eMX-iWMPjg~=ylQ6PWBN#SO-dv7XJPb@j(3!k%(*u3%@)6Awl?K0uZ||( zM&d`@7V|~FGCkQ{OvWo7j6LFIV`r$CWKtEX)ios^{ghbVqH7ZCf4w$O%y?4+Zv;(8 z0TWC*y*bSbxbe1xsU(>+;%pgEY|0PRMqY^hp{XI$6mYGjOj(#Own*r&EC<6)1V>(I zO*GjZi(T9wzjk==`AL(q&6@`3Ah?6L!?-Hk65R3ux(2iXm%u%M^G(Y)J>N8a)Agmz z=2X)6ErxG#{2P+0-%b6+E6?NQT^3wx6Y#iW;^Z=iV%7P1*&k99TsZNd_ z-??cZG@ZZ_t^-GEcA%A%YcW!@Z=-P^1yM|A^OS709QQlAaY#0E!5AJ3w2Hsj>0q0h$|bJ3xbvsl|cs0<`&< z8huQyKBi_Lqul}8c}xvGrj{O4Q)9ypfW{tEYmce9$7pZjp9QGJEe$aiG3W!$MHX&x zitQN2L%ck`){bspJNnYy&!0D``lq|DegFRsJbrC&C^Qi1tX*H9N)*H#EHg9!L-L8D zg!%eny-ifvfm4$mnQXkiIB;qQD_*+Ez^QHe$~E4kW-bfWQf_u6SaUa3b87TFMT?#n z`#-TiPp=}P*e0g0__(j?Qn5ChMP5kqqgpVtX^zwEZZmolz2~y32CnntlYAf=*wn&H zm@QlZ#s#rIYp(xteFs0Jb}%hcsQ+=f`JX|-rAOa1SIn_bnnlc4J30J0edAm)N8G@s zY>KxlNwI}FywzqozVP4$;A2EKsmwDpSW2&4N~c^(pIl0pgnxA@9TNJ{Qo7?(9yY-r zSsH4kC}=Fkw+`A5eG>ZAQhFrxgr#&p=eO* zRb1*_wn-ef5$R0X)0~R8rD+2HH_;jXA+@~Y`1F^EjwfoFEvA+RLhs-Hc7i>Bg1umZ zy>NoP2=m{t9Oz??E2-N!wkd77HKpnkasQB7*?TUh7WbZ0*4}d|`iXzK_3qQvzx~tI z!~N5(Z=c?6y!y3&y85kux_YgDy85htdZY36iJrlQ{rxwtzhY9I>S@8UWmlVyN_;vU zFOqw^mOZk{^_y+q>qVysYG#3_x?>~g{Yj6iyZl!NX4&$P%#!>@vD^Xn)PXQ z_u*z_W;c_8;Q92EV2}63Me}QWX$ORZ0y-H;O zGfPj!z=OirZtit@I(atAyL6pJn48&nqZOz(tWLnPBm`4}&Ww;(!$I~I08PLx3v4!# z2P;+st%(lM;_*Zmwz?bWNTfg>+DY(PpeU~DpX6;9NRv$y!0>nEUkJp`0A^>hV1r?G z4eT73$XOU3s0e0L>*^0>|A4QNodrkGF=6Bz0MYWQupem<|sZy$tRz?+3 zB~$^ed{(>=rL)3?s?WOFio$nvFcd`PKtgntf;a7NRzcXAj88po4((P6qKTWMb$<5p z(9LQ9S41`#ljZf9L^eKR^P8Ztv1i`Ac|GTT%TJv9t#Hk`--^WG7ePOZ|NFQH@UMlw z5B(|h=5xOlZG~1qJ!Jomdk6PT+`r*&$K9vYqEn3{E&Qb5|3Xsl>&l_>IdRgBT}X)K z5S@wax2E&kNUUdQ#R}f;@99~n!MOdUU9VUusiA`%VA|!9SZGb?93v>JQ3LaOdK!1^esNFFuHEyvX3nGX&%#|Sqd*ED zzF6kMQvslw>0;?b4;n`mAjMRWDgdd1=nrUy`&F2-p$%BlFyZA5Zp;g06PEq|CC4gO z{alp+9#a8U#D+8}ACroTEm`CbGCJ}@jCl!)(f%J)B>D`+@b6Rm!IPZ-3$;un-oR~2 zLn?VuGTFeZzx~V3-580VyD`CE>fDVbXyrxcZcOEHu#X~w;_D)W=Zx@DLr}yowJ!W! z8Dt1jxDq^niRpwTrXz^G2N!!!aZ))+LFwkEo4S*goDf;Av)D7gc+LFI&iQPmRP1Ra z%jwE@x>=$GEPnJ&85@05=7LxneUq^0n`B;`P7pw}A}Lx%MT=il))nUxTyd_f2L*~= zajr6`IM)LS-RjFKYbpDEYjg1WO@EUIbx57zU**_CD!F+-&w{hj;1Uyu+hq36PPR`2KWo5$#f=Olc=L+JJUv) zqC@!agt8YT4GVS`uCFOqJT(BSgqm=LXZ1Alf8LHAuk0{J^MG_zVokb3OrJwc zcdAWIaYCfZGbpL1H+aiI6Wfq%U@p)v=rEVdc&3e-z^hh^9yE`&cPW);$hJ0FoOjA` zC;$_QVyh`GSCTfOhZxaA6_sl6ib`b((sUJ-%Ge702kQ(P{b~#q;}5D1P=%SAOtM^N z$(&*oiB3B%oxl}wnQ6yWO@pQ+l*`Exnw3K%iv}NL0aD4zp_1)FQuz?NmWq<$%}iuz zmy#uAX|dRxc3g`biM61|7cYPkYmpX1W+ceGKIQ(8(&D%F?4C>f<94S&b9<6>vyQCWB* ziKyyp(~gU*NBoQ=T?tiPTj%@`#LEd_W}acQKxtW~Yff)<{3Ts;c)Bw0_2 zd~*mWy?QxPt5;&3a;S8o!Z*KE#>p^fbfVEIje;B+1vIKpgend7n*tgIX*9^8(SSyS z(rA!MgB&Ui6P0ECu5mssItQ&eM43aBIf^nzQRc`IWsWLqj*_0EqQV(#DypnjysgA; zC6I?wfPz}(h~27){t=iGI+YKKF{P4el~AjkA>l+Uhns znBEfx zpsCTO!KSfhL#M1{A#0cD<)LnWy)hWlYm$CR9wQ!_nNM6+ySFiyLs z9XFfUvq^k5iO*Kzvz7R4ITD|(#GB+$X+nk1ZULz@$)UoAGJxr+mf_5ADynFHtPv=_ zCE!wUNmv?|h@z#qM@Tiw&F@S#&Z7>)CorB=ht;2a1;svrJ68e9)c=U_B$X)o zJf1>ST{Z2v3WjAh5gJS>oS$|akLOSt6C)+IwZ}M6Krz}(G4e9$Y9_3179&OWnnS08 zsw!wUEHE9q>DVTgRI#mUK2mj%f_g|nT?DWB6|eE|#_ZL(7u9qKhLf<|CU%ve`L+bJkhXf6=4Yn)Z?ezJG>+|SL>h->TeE4potwtpKkhKD3bzEed>Z%s z&<0!r_rNsXPMUU}e-7@7Y3D^w!(Rk9wLa5<^+_9G^USx`SGp5PHE%y3zNDfl(;1XW^M`P zZV44{33cAWlh0eYi-#g_BA|(oCW4v>OCwJsPa{twPa{tw&mzwv&mzwv&mzwvZ$aLI zyajm+@)qQ+N&~AI`NbmlD)z5v0ck?fZbiEl?N$j%g~M9XaScbynsuhlSC&e9ob%RAhm3!SV;0`|9H$=3`t0Yf(lho zsUk1m!WQMXgsPhnC_e_DfS@iC>mr#ha_J(Zi-0cVCFCXKCFCXKCFCXKEuFWpsb@-% z5#$6#fdX1Oc^sTBT$oOm(xvN{uW6cp+9{C}PCF%a+9`=d=Co7vhs*JYD^2FJbr zK8QRawbM@N!t3>VXNYzoHXhogxXvRVG0g+*QZmj1` z5yWS5r=9X8Z&m4@&g~1jG)1QFE)?&&``ePfBN+2IHwVUF8ZdT*u%^wNfMT?j>&TcvW>RV?tw3x? zA1K$9F^o=EMq_{pZ>F@UwCKU>tx&1E?tW;6;s#Y_3^3tU-sQ?O1|T<8pW{?%j%y3V zvh1`|3RuWa5ae4hCm^vxxgf?2a=}DBsX$V#J`q|jhcSh6IgA0&RNeu1*!etY0jo#u ze)OVZa$cd7DyfjAvaa%Eky!S^;G6Km`S&|XfgrQojF!RS#iyOpjFBv^$h+B`fS~4b zS&V5W8I*u>35;njm%tbxD1qae%QbHdK(Z>I<5;lMe`jakD)W2WY ztjte4rQ*4Y5?ks8Os34_%1i>Lfg-U?#+U|k0`hJs7s;3gvPTIh7s;50a*>Pyf+9Jt zpEG#X$x7BIU7v$4SW;w2niG&6+atM2$e30VMhPgF!WeEWTrS1{6J9v~ekWlj8EkDL zE_P(ejT>e%$(j?8QMR0$F%e!7f%c?>6YuzaoKVm8UqA%=s0fC zo#n;=6JF&Tgq?QE2e5f;W-W-Rd2<31%a?67CQme!fU?cT5kYuir1zj3w24^kcg}&8O`j~ou{1=<8qQu%S3;NIOSfs&=p6NhXCcm87KcS z0$3#F0vacOZp$N22c2@{ZsyFt3+hKf7NcB77U<4@jKCyxQyPeyCvqs66A+mfwE+t5 z7?e>0$_->pscf_{0Fl)|jw_W7HU#ro}#yUeuUOH_qb=hn{82=081; z-tie1Cd%)*Bq>eD#m<#XsfSr_RZc(cl&agDL86*Q3xzZ`fyAp`Kvk?WAE0(Es9p+c z&O!;x!mmgfL@IPdslVbfc_nRHRnts_CA^m2SuDiKC{6$8ITTUaGlfFFr)OxWZ~nYw z=dy)ELy{?gl-gx%JE@=xka5-w%qmZKnZhe>L9JqqT1xF>lu)CDYm+L(txXOL3FbXj z(0`TE&Xo@*@Hc9-`5SG#`5QIb{EZrE;eS@GJE|HsKeZ~VIu$LXJ3AZGx7{|hidM0I zOk%PZFeFDT2dh3UP9@7yB~_nuX^SdtQP*x3LM~2XF$q(Cl2|*Mu2!p^ybHpI`51gM z)#0B^j{eCipRDl)npEQn-sHOS_$TYC1Dafy8UJLha6pr5X#=02OkM21qMwX= z7Vx77(v4~rP}WNCsoi8EXr>CTR|amjNBp&`288NCONNtkE>srC?b) zDANH$13AXZs>QgELl9v4j~}gRP3Fb{tg00)|Rh z%{thkV6Be^wQd3o6|5!FU`fFm=7SpbfT4ml><4EP>~9nf&X&?_1@o**h!R0koo%X4 z%c(g%6nk9Hp$+gUlSt$xS07QWFWNQp2m{xJ@>cQ3cBrbZSnE2T{e8hK2sk@0{O< zVz=USD@wOic}^*mez(%>#(i19TfBzStYnRVz?H{zW9J~rOG68tK`np=t1lC$l(f_t zjGZlDk{Z;sHCXjEz^1N2%~`Mjck_lyqk`oI0?VYjnStEZKiD^)E6A5OZhZ7E!XhsN zJjw&Bxk?OHoG7jeh_d(?^gjZawJk-S6*!+&N02IkvJQfM)%g)7VxDtp8b{1rEOc`* z=scu?NRu0cSVD-KQEf~IEb3)$f+boYR0;tbMAyYlk}v5B$Aov!4gLLtg9B@pb#It^ z)7hP&ke|0_aq&HVo~_;4Dh!$}X7Z3mz7B7N=6S~s(#rc=5Izr*PLddrK|+eY+UD;@IcX$6ylOn~JR@`~Tr*=HI zW5@0>?|cfFF2);Exik=OM9_(2tHSO$uTU1@OC17S7mk^f9UT+haWVOL>~&;Sp9K=F zd7>oXa`|{pf(G*?Quzja(MKZ(a_mB*kjSFw-;{az`6dcz1*jLj;K|GD9jOd}s5!x! z0ouT)Le3X&hH1jZUJ#SwYo?}BBOeani|z-yimkKxxTw_H8kfnu0H4L+w6UqCtZ}l3 z05&!@uS`0|+nOjQGu`plQZgj4d_R|Dgut_@dJ`>Wd`O*>{m-#lT|0VL z^q+HTAu*XUlTx@O2^$(>7$r#-@xeb8D(0UNO5ifMB985B&)_LG;J*s67w#EU6u_>v zXYeK(z}~lK&>8?+*Pg*>0wtNU{nT-%79TBFe(E^Wx%Km7?lB| zGGJ5&jLLvf889jXMr8m{bQ!ksO+k=h3mLYMVG9|yD8m+I*rE(ulwpf9Y*B_S%CJQl zwkX3EW!R#;T9j9d@@i2w0NL1+cL6`FB?~)Ac_%MqLvy$^z?E+&{T1m)Q26TUJNZa% zC;1^;2bF9AimfmOgy9pQ@Ue3dLhB{V@8k*^S=*3HwKgdwW;pf&wwPSQCF#iUpagxC5-qL8hIs6R z=3+6uvo<=+19CKYlAXxztZm~*Cw2qVm!@Q!<4etAVJ9D~c4Sh7COQ+%9Lhw}mM>q9 z@-9Ah>K{G+-nAP>w~oT>bS``A&Xkgf`$3xCeHg8Xb;Yl+U#~rlpuG zIj)Viu`rfT0aGk%wei84lSSrE#E8rtj}zf+=O7j=@h-UNqMI)2AG-Bc**OdQ`eIxA z!f*EZnLaHe`~3I%ym{fz_4#Wek;n;s{)fCV!dKpjj#51G`96O!dj4Y5Y-PiPrea_0 z&8wnK-obz#4ro>1S$;O)m-JBuxP!RExGJ1v%loJTXag>Rd*Ff#E|gW}f2qDuqAvty z`a-$BP!Ysa#HDbpgvB=YF*f^{F8UZ)eat6)Oc;F(v_3{*A49T_@!ZEihySRL?=kwA zOZu3*`j|=j__Du`>4LD2`}mEnk56~{LRHhCyotxw@$LyjPtUS`j<4oONCMK>62;@M z@v>))h+>Mr2C2UW8~rs{31>oC>aUTF{u){7uaTYp8d>VEk*)q3S?jNnjs6<5a*gct z*T_Kjcw&35go@Fqb)Zj?X zv=3$Lhi2c0mhOjU(}!uL_o3PPp=!|o(Cqur?EBCx`%pFKqi@9Q_|S~~P*v!E$b&i& zE6az`S@4Tx&0@=jz1_uI5{nhc`wSc-#bFH_M#pxHj_Cwd{kv_$h7I3FbG`pJ{#*1O zWJ)fzW=>%;()*~yB$uNkS2K8Tc{)Qkpyx95u zb&iJn1NxMsRyV{eXJPe*Kb#Nrjyh_!fZQqwnn)vGqnusqi*q$4F1(D;#p28!uznO{VMxe`}pkY>i7Ho&$$!)cE7;<{*UuM_uO;uJ&*rkwsf@8 zzJwi5-_516Obsh*+?#y}VUabyOva?mNN9fe$1w^V;*ZcmNKge!9x~$zgdGsh*%8DY z_B`-GN>4Bag3&rcief6%mRp%=DYO)pF6Fm>HiLl}49wFBW;%#16xsx+fc+1|A_H*0 z7XrX%tVMwz!ZIiaf#1P0#7!4|(G3%{==aH{F| znDw!KkC6ocK7lt4gY}V67Q3Ybx!74eM$R|*Hm45rZ5e|X=5JM*T9J*nL}D{AVuS&^q*{S~ z69AisE70o#V448pD*)I$1ouY(Y#y#CgGquF=yaQb*vk%&=K)}r1Kd9V@CCX8{WrxF z0JaRl9Rk4K1;YVg%Wws{bO1KTBkIEdZ2n(?E*=0ghRA|RAc?V}9L62B?GvePwF0jq zgeW0HfISK-MS*V#@>)+8GIwqgnJ{;DsuegnRgeWNFHAct9}QeHXc{J<>xLsrjH6oQx_*qdV*;iOgLY&Bx^4()9MvM% z^6b0QaS4a37kO zTsO7IbySO7*N+?B*gtM{In?hsThe|^lQSkpm%U{2CFr-3=}0p=6uRL))Smb~Vl)|- zx=h6N>N0Voh`V87cE-fk<}xtDp;Z}|iIhe(NdV3n+{U#+3K zytwm4{=kiBattG1^hqW=iv36@t1d!h6AkxJirf{zoupBMg@L}#IP zcNS_NpyCI5@f8jwg7oKkQTy|}sQrP8Kj_6DWQi}0!vosnV+;v})Tt!f{;^L>19?c+0RABG4- zf!l2#A3^*0^Z|&Bn|B`{Snwr)r@}ryOq9}mEctexhk_o|B9WtDql6591U!vC;1h^(lI5QC zZ6C;Hbs#Dj|}H+>1{M3A6hnvfMyJG*3h+ z0HNS!p>0I*$%4T_oLPWNCV!v_{Pvg10|vX71!n^NAwF$2on`23(#4oPZvw-7l44dh zg7pq>!;C|ydD{n7R=4#$OjdPwqra557`_vH;EbVT)8w=!XEX_D(xu6IAy%uFYBfRm z8T^rLAD}ZsB<#`z5e0a}07L{pg#Q4qK*A$5GzbiQWRri@w#RTTPFeAsBoBFE?1ZNXMY_e{sI={TECAfLR>P^mfGIAMk1@K4x zQ8-j}TIcSzF0IOgs01Zzp%$%$kQdkB_|nEmIu-1%x@>ZPm6})xC2Mhhb=7N-3I>p^ z0*~ilq*lCMlcXl;XMG!Pzy0>!-RE!k&XRkgFi?8s_Q{zT43=Qjze%$k5=(}*Pp*yB zMjE$IPQk=}bNzN4Yk<>mwwGZn5007y?Ps@_VJwgeA)>%&Ar(SI;a?1UrqK8o2d-xb z+_EveVMF6`;tkdkC1w+0T#G`)y%7Pdr|sf)JO%iJaV+|`UfGVmoj>#eWai5+$T=ps zezd(Ta2Zb6i8hk){|&?Y+x>qNiX}oAi)#+`H8h0p84ZV{I1%o^=)kknH){{{IN>sk z)?q+$J3bv4oW{Pkqsyfa&UkD`ce)*)5=iHFjUhY`w&PjEUl8987!s}Dj>pn=e4A`X z7s=vo$G6FLJYa$;e25!{!BhA(pw5$Ma+r47j=ttu0IrNi_)kW`rZSDxkOZT!@p6IX&ZuAvvo zSTHYAA0(j6KlRjZ=YV}WNCq2BlEOslgk&Q^7GSSEokOWMv?O8G#`ze5-Hsjuzti}g z!7qSc7k=xvqn7}!#gBd-*`C1IaukRf4{dM3A;X#N{u>}-BPPbG!DBo+Dk@fA^Znh$ zFZ6B-Y};|@(AH7>jq&f$;WEa67gtokv`Sj`A~6zTqQncP2`n#3tB1rIqtbZg_D_*r zJ??g#h!=sbDjlhhG?0ljha(9x{DjQJTrL(R=iBPaFq&2|F2$m*RKxJ}RIjrM+c!s5 zOE|-X4Ie7;!|6<;s~?8VO!6PmKdMbUX+dXOT!vxevg8ed4zInjR8rr#nP8k&B-Xnt z?)qMJ?b$=VCt;-+r@W)55Dt^eF!80?7IYZllVzckWzmym88DHPW$}|`zzC#9=&!@C z9zW}qlem3P;?6yZ0bR%+ox}}y5;xjO++Zim68P2Om&LCJzb5?J@Jr$s!>9v4UG7a0r`$8)tnHU=d3ytN}k%p6)NJP~BA0eA0%$@Zg$RVpw znOoFDcw>F2%vw*xdn;KXp67+&75SmEz|BJNj!b7EctL)s%=5euyvtHABoS)F|Df=A z>84%?UZ5Z1tMx;ue9;QQyElCZ@&6lMzaJ{||3V1frRgHX^Sltea#N+rE8y|wO$BC? zbK&))OINPkpC08?arE$GbodB>clC$LFsA`TzY2`vmr)O4qNynXp6})WlWXBIWCV@4 z1#T7+ry_8(5QIhd8eF5Zc3?6-4<9-(xj{V?_$T!c+5)a~9JBxGp-Ow>gv)S1I4nSt z$0S%NS%U{pO2qk@a3&^P;Gbj!mt$~x@N=n}#%~q?$~O%y0Dvk4X!#YeC>C7)2td7p z`v`z)o5mrxX{_dHX#W6QDu9H;sKPYd3-eSw9J2WFbX$ZC* zf*G=~!3m70X$K}Z5}F7xLNg&jND|VbgFUbrFlP!GkG*;|Z8ExDpk1M+JvgB$8Ar$m z(Z8gz#0FDdHKL$CAI*Y8vi5!fC=8LV(akSP#Ib zI=ET_V#O_pV#5wAYT*HL-S%VZ1w^6AX-&>(640bell4NZRxQz1_I#t8vXY^mgyO#lPuOqNlAWx?|#6`}?90SW==E#*`%Rb(-j?XgP{n7*-O; zf)JQjqC^W)q_iO=uH`~#M@qy?gwS>rMc7;t0<%k`u)HLqNr3j3D8ud&39K-YXhTfo zXp4yy=9oxmX^NDVm`Gh?OoQia>)nj&Ix+BU;HBNZS6AP)_2h&0M!bgMsR@RUXLT2E{0Z`ZUY1P66Q{SSOkU=|AfDR?mG$-rQXF$_C{#lnY% zR6`a+UErM=fo=Am>F&%3)x%P3R!r)udtBu>=bPm6_Vmp<1XVn=0G5D**xQ+mfJ*+XMIxIdKAKY?&S^1=pK!jh~p$K{F@fwm^LYNTvB2j#(79mk8<`WtTcwOH^+C(_f zOpFs$OPt@@muoCZ^o`Wwl3F}NFW3l6Puv+RArlf)l6X`wO;)^KuF)NaSip`+QaRd` zX?dv*wr|o)xPB^}PG8GFSwah80U@%W;rufD3o$GM;bL1XgiJ*7Y(@N@&_cM|lZnm* zVj@cP{DSEH^u@pk?50f$@fQKq;D_1*hVi531fd?k2K=J<&Bw11KZa|0F84hLK%($Qlp3eR8V5x#%8_C@QY^PFCmv;}EPfs)?9kIfdos{| z6m!_Q+|q%8fx*B)W_fS-2D*91fS&C6@0OQE2}weRkX;CH;zZOg5H(616U^e`W!S1} zf)(r4i(tYoQ|B^GF4OEX371Kp#dz1W>ljk5GVL-8T;?ch=Hf^*TTHwjh*h%{7@Ue? z_y|2xroQ3)ay+GU6SbB|OGR2}u&_j^wd!ES9<1k(zg@d0PT4(Sc**)(bOF?_m0y>@ za43S|>PuW>iNlR!Tv0;9p@@b%&-H?ODS3{Uc;-Y$ff_QLcfD|E5=W)Y)`F`IR@nar z8u%iBDMT|XTZf1Gw`E%wtZYs1Tncp^e%6=gqo%=3msX5hshk%C#F-A@eFe}O__PPC zY0$7Pkqg7&I?xwxyK+`7g`;F*Uiw8HcZ!actpoASEVmwKGnbakie@O^7XWBRtW9vR zU(6fQdK{Fyas}4i1W&KsY zzwfWI6Pv5BwtRCH79aOl!O*z`a5_|r@(3G>FosZEFiWfrT47QvE+p_p3K%6tIc2Q0 zk#bCAjZN7?3+&j9WWPUfU#_)d^#Xcm77rR_Y!yRa%jl3IsF)D(X?v)tAvlr^=Re8U z5pXRFxRc4EnPCWd57`94_XQ#vnyYQ0%troMGHjK?uql4F%9>*TqQA;h;HWVWW5%x% zoN6`E`Pt?woX-vk_Vyz*xf?u&3Y=(tyt&HyfjVD(h+$`(&RvB=xZBlKoX$Pa+dD9V zA8=#<-1(Cv-M-Bx17Pr49K;FVP-v=UdZ29FF+#2tWbn2btneIdR}55Q@jh+h(NxO} zMwy{_3rjqP5sekrK0vjPG806B!P_TZLWbMbFX5@gmb6UYX#2Ha2GSjb8(O{LYQRWH zrAklleR`iGGQ2eYC-dcu9y;A(z4c#mK6`yZ9gTpq-9BrmhFcRFRC;ZS@H@Q6n1`1 z-Zj-S^G=z0Cv_`!oEN<=6O>?jdq+k_M_G0FW8EPrx^78IQ!O)U%8VM+zM$RpqFK@U zgC(tIby#G>^~@|u_VGjzou>G{mALUbVbTEA*))v!&{a>3+)(JEtj{oGhLJZ6-EP@? zNxLN@7tAomkG^2yi>zpQX?Ve))hxck5t?sQnu?4X5cyS9o%=Vib>NxdrHG{k{@FWd zs%2h8%1ninNoXpv)BRO!81@aKVXeAZ>tmh~Cq8|^;MpMdb;!otwjtb3euoJ6@gC&H^I;0I{V4nQ(NW%t zM08vua6~MU6-{-|{|6Jj0;4r$`oi#WBy35`S~c2Vj{7Y|0MBQ~!E0uGzgY6^CXbk< zGT_0&lgZ>kvyvWJlb&WvS~oqa-So(t^fc@GcGH7a(!;|+RGLcf&q(wWLSlo9M5U?r zVQPdbvQnk;AT3of3TRx-gNYtlNk&^r!>uHvXQoT6>)XwtTSig+~nOn!-HL^FKmewztpmh zjFxp|ST{ZT_(MM%t!cb`TaixDcr+QHKejBbbu$31R0(V8rF?~p3jbhIWk_DBh9+WyE)>`%xE>$9S1uab{y=|sh0!{qcx103dZXV zc^`|#pfHTq@Z*z;td7HTM2GMk!DD5_{K`Pf7UG6{w!(hG&<&cZZ?s$I8TQt37TV&t z)@8rZuJR=_1+1w8rL~g2*1D{l712t4!>=?gQpu7I%cjL> zBu34;x1k%Ot97H%g_Ewr?H6?QeNdrgQ04<|Q)E(+mlzpTr3OVVbT^<`(-XM9bsmv~1Qd#ZLUoeTS?KPGh@@Dm-jQAu^RAo9bR)Ww z6@1WCIQm+taC9RkX*iK((w--E$4ct<@ut&uv!WwH=_Mkek`>0n((9l)ZXKF+d^3ydIj3z4L~Asin5&a72Ca+l8v&&n?Q ze(QG6m;JrIYJaci%XZLLZ3peYfsKqA4@oDAAQ=LP2? zjack5!#P0NIiNoBX7k6?ac{Qsm~$RoF!CB|#M-H#bDr}X=Tgs?olAYy&ZRr7mO__( zc>&AHFYkoSg1e991=UmD?!OH}1apO_RoKn~oe2fwSSYzo{d{Crxcf-=#;#ejlEeEh z3i`?mzH<9Hb=ZFHomc&R>MFnv`;_mtz(eX~D@G&Gl1cwhC71r6CgO>pZ#Cx_F`0>s zifdq(xCVAPCwvF|*V;jUO%sj81c&@J_9DA>$TO?pnWgYMf&V1reFss7k2b1y$-g?T zIas0Qy29E>@K^zxz5D_Dhk?iIHqN~jCWLF7F8w`CIQDid2{Du~sa^*Aprh&_NfD+{|1+MFxw*+VIqc)``VR82&#V%YPS3_f^l zM?c5x;^m2F76pu7o2McV}Bj?-Er6QiGjm)vJ^CvwO(=# z_+Jkt5-}vh5o6C=-VZ!)q4MAIv`h7Rr%UyEr#oLYE1td6^>y%053Kd?4czWOK@6<@ zw0Z)Q?N~Nd?>})C|K>&1MpDgjtjojfq!HC}EYn$zkCQ04F8tQ3R;bgr{7g)-c^BjRoA;} z{kvA{yQ=nG6?#`a^6qOx?>ejAMLh2!ns*TkXYoRW2q8{rB{=^{Y#=leVuU6_Ga*4p z5z=?=z9fI=f?Vr0mkiu@9B zieY9pIK=iD9BRhh2?13T!OZ-S^XO0uM(l^wn>aP~I#_%el0#{S7;A8-F`5Vtr8D)I z)(Z{=)(^p&4(!jsN=|cgo5TS>^%*R6XpRPlLK)gxLJSSbwk-CZ2Zw60WPqt>lQ;l0 zj#U+L%%u#akT&vypxEbCk2?k{Zm}9ah^(YD$;N1^0Y{fJkIgXKjwJ_A3?a=>IyjWf zUTy|ittd)Ly9_k?w#SXysyJ>}F? zbuuD*tN)9D#za7!>Ix88rpQFoZNdFmN)J;C5uCJ{ z#$<}~v;L}eYnj(rLA-aU`4uW|-CMQJWjfbxMxHUbb$x&Ao_kks7_CkZKAM`_@Y3<{ z>{v4JpYb}JtB7p{Dcr~EL;G{7OwIgAZTj{1Ce?ZyVR$DM&7{-;_45AO5H3o*4rexp zf-w5TMW)#Kdn$!Cgku!FjddYzO>KZ-tLf$S(7c4oS0$p;Cq?NMG&a2)4X!3=9ovs^C~;GSZOX1nMrwIw(W%tN6Kz;=(vY4$DS;)Z zn90Nu&S*q1eLa;5rmnbB$n;70Uw3p;-SkOK)63(^*BdOVnUw^MW5e3+jkr#nPbUl|9e(xH4;eGjh zz9ZL-Qsc>az5ka-Z(Ta6o)9}hAlLT#dtaFp29yh+L|-XKd|xS_pGd}O&8jVEUw+h? z=dT$GHR92c!nKE)l%wT!m1{oow^(bQekX+~PaJ7awi&ipgbCbD(f4mab(QBf3W5JA zg1;VQL{Kde@=jm{U2>^tYq}vtubz+S$0q@DLM)H(e?D!WR;OjfWG1^(?{3cDrT*!M z>NMdDAwcLNtOuY+23Jc65gq~9Nq}<|VGhL!fOnJ^sB%KyIaE&JwBQ-RfMA#4dZN`T ze63(e@DZY&Bsy0K&JoQ7A`&2B3L+l5FTZd!f9thn)B+9G4TiclJTXqN5Y{H_4~YbCpyS`ec2=y-@bvhOT5t%J=Dgs!?I>1`;ul!gcf7BoZ*N~` zzR#MXruL}{^*-UXJ~dmz`+e)yR_y7(Z=H3zuLHkz>Z-^4juW`1WC)q#!qjIkDfF)G zpmr|-fvjl=Wlcjbv&d>WF4!TsSCHwgt*8m5BJivH@T>gbJV(HJj&$ac5_-CdJ$vm$ z-|@8-&Zq2onrz#vB~7+5kpu0KxbAnYX|IXKx*A{zL&pIlc!VLS804#K)Z-|<3UN1<r;>mB={N-mq_sa0tx}Y_^tCa_I2Qgsw(V3ofP)qT;A&H?vaMu?#oBAxs};<_GkN! z*+h*)7R0{x0j5*a6N17bl_1)cG~||1>Tiiwu>w>*;5c79rW+* zF;#9aVeU}Z_7Yy`o<3a4a%RwS#txIkd_ndFZmww&!d0uZ^@Nwg^f+I=;`8h>>=%D=_h}sdrQ2t z;-xQ>nh{UVcx~uaPrLn={cB&!S>?H2J*@iF!!;SaspEn5@M7PiwV9eo(D$hGQ{SWP zII;O0PZYexq~o4f^3H|2}#)^pAR&wI{-Fs%QEBPr%g zv(B~YxHXrk-tx^*Z^3lroz5Su9~P`1VrSnEy>H<6j`hQAJcdVM(E1_n2mGj3+lW## zJi53hQj3=<>@P!J{NH@lSruu;Qzum)iDRPxcG3m?-)x9C!BQV;u2#LF9zLO7#M^mq zDiv*x1ihcSr|z!N(ZYAWwY)NSXf%A+p~25D4u}ej?r+PT}IX58+e<=egh+oxxinuh|vepZNb?0wHqWSU3W6ojCg$c0`}6X~X$1 zcy{wlXvy%Bc`fTz$>jM?LVP95hr{>2c+&nrr^1Z;lLw4&Fgd}2a+y1 z*RSElulUe;L~IO?+#+_@uSrA4%bZO1{?R^Hm&iWHP{)`@{9!R0&|>Eyf;1y02H8z*G2X@(*Jcv)Jni$GRr9we@#j)}U?IaVy&5IjV6}qfrd3!ZU0aVvlJR7m*(i27T zIHAn-VV~=K-}8ulj^KBlFY8{<{%&BO4aqGBJ3h*~Hd$U4-1|FpuDGlC359p@`I&mY zY~Teo8EwT|T_lMwd1zYWSk&Ge#y3@7wO3$UZ(d#9o2v~kyElVJ(f!@MhYk(*vj1e? zr71!{w(caNAG=mPj5_dKp&qvJxn2`*!CPgg{axRk&NX&(-eo#PBAiffcz~4XA}ib$+|_m}s>X*7Iz#Al6!arwCG}Jrt8qTp%=X4Z zk&814eC2baTQWH0$@DeE@r1)$NF|m8R^kc4uheK3yB?Dm9}w3021foooAaK{oxXGP zzH`-BKJZO7)*1Mw^>uL8*TJ2z<_;~kyU?6{{V4>CC+%7K^D>fj9<+a)jYe9q3!qs?gry@aqe5*uj5RWjA=R)tj z-bZBM?`;|MdwblEb!lLW^;B&ec20-!q`{~FpQj-#p>7V(K)uxAYcbW<#0P&{I*uLI z!DOa6j&BcqU^e57>*in_U(k4giZ^r24&TD8!WcdQYY_!%8=DDT7Ow}5)?dF|8J$rB z`_<%9czR;I#otT8p5X*E)$cQ!w`OZPKc3c%T1I-xdsv83r?3O50c(1X^bU9T-rm>0 zdie1JoBVxujNU?DH83mS`p}Wszt+^L(Nvy+k_X=5)UBH!>2+vg&sf)|eRc@NmkQd5)KuR=?bUwN^<#2*+>PwGM8Z|AX#EI{4m{|Fx2Cw^p`|0enF#i} z5$u%+aEu)KS~mllTGwid5XR+{ahMW#-9)3q=~_Q!E$iF)*yDYDgA?cX?ZC+#OE*Pblqge?rYNkeqlxJPAVYKA77{o_-)gF7jrMCzaXDu?T|B=aS}%*QXLWe>fvJck zMt;e}>p{Gq$M}AG1-~n?;WC5VB>9%s;=MOew;uy{73;0%uem%mwqe!K;pG$0d3Iu8 zblHJJhnD6Np5N_9+s7wF20cp9fAjuuH1$b^|6k#wDG^V}i(qhn&*BPo0CNlYI6-H$ z-_wZ!yWoD9yKV~ZpSQz0R?!?s$1qP_Y=71HU#B2pGf@kBjXvCy@!v>GpTqTjvz`# zX82#<&+78`dVgtOBYDDGRyN9F`TnXo(wYWIB+-Xq*B1`%Xb$ejNETjuT?ytq4k|y; zz7w!d`C;Y*3ZQ*wKEG;pZ=p9bI+_me!x+pkwD7@cgM;=G5(EP7vzO$xjs)Sft`pDk z2PJaQ^`4~22v0RU8CRij6-w6;`_3KeW&6&9o!0h!qIJRt^WY`C6ZZTz9Zz_E>nU1a zU;MTjlb?D}RZ+aQ(o^j0QVsUydBGjN*L*+vl3=yqT*1G!5|yXB+R-;BG>85<(f`y) z|I^zZotu8@(BT*EX}~y*xK*I*f^RjoeTQMKDJ8b6LR~%XDy()DwyWLysgkQ^#kHhW z|C`RxX@)tnE~TogU5#0nE}rn$zMJ+(_>>E>J==HM*IIS7x3AN z)GpxDl@tjIMT+Wb##vmv#=5$AYK?w5@V>j>`!41l<8^gGaCRDy!Vx2h;Qn|G$BmKP zgr)nJe`eFN@b}kmTCls&yZ#3+^^V@P>xRlT&A6Q_*LcoTIEE+0VQekSde-7Oq^=CBhS0 zz#u(rS7VL@=dxn@q;d`3tLo4V{H@DHp_BI;ygoDrF(i(wSGmTU@sz34hMqArU}#t6 znrE1aR2yc9U`&YJOB83i=-i*3>!A8e>kffQWW)t zu?Htse(hp+1_snKp!}Wz7!8glJp=rzfMTMiI4>X_tjW|i#v+~pe6$2PjXF@niLr(t z_Fgxl@My9)*27Fi4uqQc;uOS{^m+!IOEE8j#~^03)2OKZNCnee7>7Z7{7+12;9b}C zV4Z<$!KrbLRl9T^tODS72EPD)UHGk^2ZI2hwfKebdt@Gt6rOjPa}|Db!2cY-pMkFh zeGk7L{9eWHNAqy(@N&e5-x>S@_;um8emUX;t;H{d-y_Qr-*Uu<-yG&6pXYG!p+nt= zi(klO;QDj?eg@ZC(D(4`!S7Z4ezY7ZT=Wjjd0&Jb!z3eGSfWWzh_y)*k5-OqDNlk! zkA?Ui5aRr+5FD)qvyOOXiByLhd0g^5D$2k!LiDUe$!owXT1-%&vAE%_7pdfwY=}@p zs3q_`;7|Nf86ngW>In^mC}BRKkq{#^5t<2cLV|D|AxTIP(u52lOK2gq650q0=3)73 zh)_eQC4>nPLLH%=&_IY1V0`zJe^fRSVuU6_Ga*h$5UwL62`NIFkRfCVEreD=8v!{& z9TAWt)DZzWLY)whBh(22IYONf<`a+u)CmDOK%Eef1JsG+HZq=PDv=}95dk?u9TAWt z)DZzWLLCv1Bi0ejTs*_JPM8C*e@^%r#aeRj5qb!(5`F}*rWX+GG~o;(K{~I_cc-nfEyx?gJ(w;GHE+;zehpyV6 zwcGVq;!cw@ak(7dGgJc6tEs2k^Y;0|4Yud4t&+TPq%Z4I2ml*j$6FFbWSSloPRz}WBq?3v07lu%>7p^{p9X>a1eR zRW$`i3C$%#JSgLM0a&Ams8pj!KzLS5mvAhD=6Kzh^V4q2A9NXNbjx;`g7_?m5~DXy zp&lFrRZ|P7#eC0El*OJS`krLb;e#?XozmaZnO+laeLLAxYm-&WRS z|H!jAUj={LwlN!SLoY=Hza}2jJSF=(N*UZy%AgccvY=g=1>@>oY-T|_B3k3-)}nLF zt%B3S>K5!AbF&YvTh7xv59ZZ}`?Qap2Rp&*n|Y^oJ1>ZK$jJSz9qg|{KPdV^(GQA# zQ1que+qKBntyP1a?WeU5s7~8o&E{J2_4#ZrRiDc&?EilML1n4G?0ZN3T=1WPBi6e> zQLCKrBX%7$mWP7+qxwsyW;;dSsXcWvK&OQ5d`BT3AmYTPG4+?z&IRfM=K`D>(in6u zIH(;2t;Hxi0N-zm50X3B*}f0|MINUI^X=jfu4Khkz}16_@9Nc=7lv=GomD$0zbd~f zm%pW`w)Eit;tK0g=k1=w*1r_X)t1E-J3Mn2S9B5heIKVeu2|6b3Ken@^1|`bbduXi zCx-07uju?^kMobkE)e{RzS%v#+5E^+SNG^I4`8_#K=?5P!F-z>7Z(V|#d8l`Jogmk zTct;kJi}{}>&{i*KQ(e^clV}Cmb7jh*v!u+MyNk}KGG2ip$j7<7OmMm{C_P=Y|XC2 zKwqRSVt4hZW$KVxc8Z1POlS()B4u1Z>x-vklOG5!KP5B{A$e;tDF-LL=i+-F68s|^ zE-%oYYCmUBMbz*w6v4F&ug*RU(BivaGZ+XkjO&EgiMh88Mn~%Lj(Tf2le?BJav9s- zvf@6>Qxx}g0&rCT5PR(B7VFF|)|urM+sS+`uGp6s23NZa21PlLjE_Y7a=9!QV`hPu z=VBdLv&g>nMTIq92y(E~{uhvahU7&$NP>Ex618H8QGzgA%oX$4gD=o6{0v2lyMXOP zkVUxh3#pl-mvl^>wd(RInT?rjw&?v##e_+|AA3i!MzIAe+%QBHNf)hUJ=QYs9XRlh z%fj%bk!+e!UdOQ)k(R;7ms2VE#ULmdaF8_a2;Jdv! z7{-)#YQmBltn^9ZJF>_7oF`c%q;LjOGG5yVtG2$c*+WbRtjejrtO>dBg!B$ zizQ6Hwe48Dh3~jT1fR{p{l0y^r2WPk|K$J}oQ!~YHvvrDQJg}U3h;am;H+uEX``5| z-i2cxv5>LHyGXs>V^yfvi-aDzBF5iDW$AScWVB%9c8H>bm zB2|4n7R5&>Mk3sb=P5KB@_vaXGT&z>hs^XML8Z^b4H83`4|7A^} z2;nblFl~d8q@dOrK^y`_=z6^86}{)7Nhi>xITnGLtJhL3SQnW>o58S@+QnlEv8+Lw z2S(T;a&y!~YrQYR-V(Y7?TeGYaKey8aDN@FlqF|`BQs*K&$kvG$p}YNFnatml_;^+ zrQ0$;$c=Vq+6Okz&=RG#__t4B`(kzC)0t51% zyV{}gT+$9l&m|S=3XM=w)1_5AG*K7uNpk@n4(J8J;%p{8n1vDgEGNow6`Dnfh<)7{ zqp!najXK;uTt$pqsyTvDCivsB4_wkvYCdMZvb?8iF`5u>F4}es=Wk=}A;!izO@_P7 zuo!G>uFW*JrXrl-!i@xr)9FYmj+F{9q_UO^+-eZzmI+s9GKX&5^y&N^_l(?i$1tq< zuD-_Z@x;8F2`h@^2rEn{9rRR^^M1w`_x;%S-J(&OE+^>8xBI?(RC?K?(#_(|!UW(J zwG*RUkrZYoIBO6NPWW;>(Gbnx%wOEywT&3&3#MXGjwNyy0!x!BdaT>$nNV!8%_+|V zXvN7kRqq?}{bQ%^AAOBQyI{DU5?AbGLYRb2qO(7X$J8SFWK0XT1cwIoJ4UA(mKe9# zyX?>JwBPJGWxt76rDEVW!!JEKHda4&6hGzruv5Ix_)>|RiLC=1 zK*aDEE?DIn-31QfrnsV=*5>*>^VM;fVWZ7M|7Rh5bha?jGrwq!Vf|VLgPJ(p54}A4 zmI&$uCdd7#9zOhVmt)ojW{*)rp2~(yjc^r=2iLm^XFgP}!LEe{JXtW{?fIPlKkT#_ z0OuecDlV|heiNtka*IW>jpqv2#6#)DYKil9su8P={a?eX*mT7DKNX&}SdN~yZ<8@W z%m#(R$N=h+*7oRzHBr;72=T1A#*UG@MiXH2VsD)@E()PEr7hDt$*Mw`) z$m+hnwCG#m+;b4ao9Jg9yxrZi&*giZdwK>j;bqUVUl`(iEgsXjJkEVih5GY^bJd@n z8v;+^H{{%~FYn#q-Ek1u(;TfS;)ieyYx!FT@v|FS2l2DlF0^hR#80)T1K>0{-@4tp zq6cWN1w7vSK=+3FrAtz|)weyCd+ggyp&0H6%wFD($;4gZ5Eipx%SOnWtk$U|-d~Xp zx{O-FC%_EqifW0vA8#p|;KoeiiJbCmkp5;K`W&W>*O;O@0Kr}hc2Lm1(prY>KtuxY z100paM%DfBq(H>WrjqE(F)$s3!$=}MVwmuLwa#P|@n~*P6Kj+Ic(J|5^KjAg@ZUat z`QNQ>|6XQv6sMl2)6YCU3cVWjA4hgPSo*^}Aa3RlrM9nB>%0a3O}LhIcs8R=1ighp zEL+Q>iGwvI9W+!|kQ!xB|0Arb5^IeeLpjy5XSRUbm**?k3@Wx9Jd4LmuvQ5vs$);6 zY>&ZjgU55%p54$gO80cI9eR$YMU zLlURM$WDjR@S~(1b9iv%2am9WTB-9aO3k`n2TMLrcXAqv!B6L1j%i|=QKzbzqa55j zt%RJ>B%n!`ChLV@9l3a^)g+|JBP19gl&#QW@8+iPP0K68S#@;YtxF|dp@>*0Vit<1 zg(7ZJMlLd97mDbGB7RULz;sN6nvtwhBbg;5*(DL2gu z8+>hO5Nllr?YD*oxv;ly5IQXNeXS2qyE^&uvAzM7UQ3dWCKBj~JNn*WP=ftg;@8KH zG;G{(+rYN@3+~%?`?FK_CkC}c5kCGVj761+8O{S8s**wtZ1KjMv|NO4uxRbv+m84^ zkAvcAi=8?MIq2DryS1ZF9k+*jKt2)@c))ob4Zfq#`#r-9?t{U3Y6cG)#=gN-Sra`+ z>>FJDyg~SC!O^ji8TLKM+K2W%QfM+pWYGsp4#FW~QP9Z_;iT6ZsRP{d_^4>+D-9xz zgzF-gb8_w0f zxvIq};CDwSPWAXRmqU5m^W&_wz2a5-Ms5Z2?{*5-h5p^v`&e65v18A<)?4rIC2eJSnzQjp7{rwYid4l?*;8w0s z#AhwmDt5N#3-)RtSnuyt3{=IijuC^Gzol72wb9>{%_JpMlOfC$PH+}= zgqgw#RtqaQQ}de4reDHL;RLIN6`ZMgO=gK+aHi%psn&ufh*rl0>Smmo&#zv+5y{M9 zdW4z6304a$I8*bQ%o4reOwDUjtp!bxo{mY=*&+ze)VwCuTF?Z=F=2$x(vl`9l6KP+ zr4Z3jmd*qP^WT(jF3U%lDV$)nu!1u+ugNUY3(nNMCe>Qd1S#s6G@UJi;7rYHQmq9| zP(Bkz=qxR1g7RrMO;H{Z4Q2UEP%!RwzxEsnVtAiVc*HTB|`Y37S zI?w%YuJ2X6zJ!^=304a$I8*bQ%o4reOwDUjtp!bxl8#B!*&+ze)VwCuTF?XqG+~6! z(vl`9rFPR4B@)q4mdgYM^WR-xah*e)DflH#s&eXgn z)mqR5x35k}(^*>51h=rsgizeICJRE5u5mWZY%OYnTUm$I6nC)>Yg}EKw>KSdhl;zQ zyt8$1!=SV#0@p`L!}8C)%Du07Z*SbTZ5QI4qeEzdTU$q{Deh_!4dq>~oehIKR%AnY zr)zh^@ZQ!DxIRi6whM8?9VuQ6P9f#|qq>S3ydd-Xu%Rpgkk&+P2&A9;F^8;kt-%;+BOmiH6w z$Q^m@^8U&fc5<@C^L=i(q(&$=>rsZI(Z9d8U0(3TiwRF(CIWh-hxhxew|1(Xe2c`` zj@l`2m-MH0jt2+Nj|TA|S_xCillE(JWx`pf*KYK`uM%rFb~yjX`Ov8|jCJqYjeDuF z=ef)_y!mg(EN6u8{$SL$KJ5M^QDY?u+wd!#Ra%b?rtSOko;e7C8Mis`nCo7d>Ej7o z`j3VViTuqFA0M8!|NfhgR3j0Y+_{K4XuHv1V$J{y;;z#^{!jINVSjNM!;vZqnuGJ}Q zYi38P@Ot+u<`mHcWrM1)+BcFa`PRJljR|4bx3F&9yeHha$(y*znz-ezkz}w`pyXTg zBQKc0u|s2rUU=cI?%P(wc0$f1IUY&pcqG=8-8yaitHztXLNhAiW@NW!?62x9blNL4 zqY`emjgO2`sHvqOr8uNe! z0}h?e(zikKnsf$^hy)^*Uo49YwJR)3B;QZ$H?6PhmE08+)RkXf54EsF3 z+JtuE{zNI?&V!rzJaYK7WjJ;ycu z*wxiufz6*c+7~zrdaJX&NAdR}{_?%+Yg0#$9KH2Q&wDzcrkTW1XTb?)0mE7+1g%x{ zkV$B2f0Zd!;6uC2nQyZmc;~QGQDAOybm)yVEPERa@`?uz$UUvjM-}qSkFeN4W)a7gu?& z_10t3{MCMdW#LM`sjdc#*{x_@D(KwkDdyG1yD|OUi7gW=kZFu**efvUt@j89^>n){ z$b@@QOIK*8EA%vek2hcR<~yCAc!%?Z&VsOl+~Cv%!z37%^}K>5yjTwJdBy+p31wVH z{<=O_$@{w(M|Wb$+64#ZV7Y72+fp~HH@vuln?bPN6t#zKQ=AZ!-R779g2kg$sv%Q{ zX)|n##?9ZZ-toMwsdFt1NB4XD>O)c1$0AXz7sk|6vXR@go8sX%oDmeQ<#G+PoTcDo72zqX^9>*{+2__FjvSy2<3C4`I+cFNmU>*;L9b#WChKsP}@+)Xe zWbwx1yllOX9~w;oLnHisw9(p$!4?do3dPW08z!9Fdi$Tgf5T|@`{?7x#*Xg3Dw9ce zb2BHmz6yQKQ0(cXZ2!h~aJ!aRPdnMg)-&39vHctMFeY~1YS+ZtuJs7r6vsJr@gO!U zWSTs`t-!wa-PVYDSTiv;#?i()dYGe+5lm%r%My}D zns^J~K9q_ph5*)`0PJKPVO>JBFkH43jaEI3p4Hdud-K^jIG%Y`@4#5@aNpzK%HMkE z$P40p`3vgf=u*?yBtrc^XA*oPf{;y&J z44dx6i-`i>OdRtVf(H$Ys(|hRJq1&c+>nTay4wGS-7GeEIH?w-gmUuSG+qcUiFy+U zA;szea;2>8{$FJ?-I?Cuk?>3TOmFV74I6H-oB8r)m6T0DQFr`PVlR(8fl`z+HU_&ToHJ^FD;>kwpZ+c#WO}Gw7sHWPhkp{ zwYqn7Roi0^j4j+zSh)MD-ht84BXlTOR(UpphHY6AO#CA_^r+8p9EV6_8tDP>-U;3Z z(8-`Bc>K(9fm_b-d7&A+5$xd`2wh0<>ORLt%MrUlSVo0|Be91Z3mWL*PZesNK4U{J zHO(jBrlctXyve+V%e|XM%)s*%rr<1c0;q;>s6VA(=q)u;;+PxU*VpZqY1_@#Wfhp+ zyRM&)`ifKbjePbCrHnbMeo+Vm4HA4ul7z1kU(%}&Iplrh@ZlF;=skRR;P7F*!XG~T z;)|#??8!cSxc3Q0ro(B!Om$qAw9Xbk;fmu(J{cT`lIZZxv)Ta^A=;gjumM&w$8fet zS8J~Jt{G@{zph?Z=kInmo4X>OqKs{~-OruaOu%Kec07RHzgyl8sTY5U(1ef}v?#8q zJncojuiXQJuMg1!XvN_8lFUk3_m<2$1S_yxhjWLrp)AW)a@-@xJz02?!tSTx)b+He zaXsg$mN#^7x^3{hZ1D&AmP2=;w-~#(L0!*zEW7~20PxJc94{z+O{N-@0Po@nYZJBAs6od>;r;g5W#zMX_pCtuirgL^_b%BM$(FyIJAh7S1-iJ?Ol{ZHB^mL4wD zK`(XUUEr}i&xUV~1xAd-A!vkZ7`Ba8UgBlrakO3^b&`a0S z!LK>A4N~Whb(k9Mz|Z*X;E0#&^Q^EE5B zp`w=c;K#M;s9C{5*6N^hdxdkmeIu4};n6<`#XNf#gJXQ|o8D!NfgFZ3M7P75rd43P z2W9Y2RYb5bOkMv8F&u8`?#{uwL2uc}$cvj+XV$#|dv*AA;D=nI-2pI$HWdVI^Lq3|m*d<)fSxGWOTtfb|#NKaVC zRZ-Zn2Rl7A+qH+$QpISElV-d2Fa@f1_&z(~>Mp{rUEST8%&uMG@K3IM;qchuv0P5F zSA`%f%39c8V;x8ytKh^kQ3*E|n3n0llE4cI+)36I=fNf%p@XoOzzWD`u>3$*#aM~$ zEOFM?h4tNJUFy3@B?2G92+44k6@Dsxg9mPIuBA%l>dfzuK zj-p?`)o3UZYG|vE zMnVa!&E&HAR4S9|v29o>jWmb2_$_VSPGecY)3%Kz-Jzv*-MzV9EMWV}Wf;piwEBDB zdI=W2qG9YYfDOI{kxZ(QOHosyL_7wIC801@(Z*BRObmsT+-u_KJJpjw;V5{gh*qQDuPq9BYcnb|9o=>w(Br%wLcZXI!g>~fOX!(8h0`8 zyZ;326Jot^jAuYcqS%QZOwep#aDRR!mbWiy$z)-tdR0p%b0>BsFCf-r8rwi{lyd~c zw+;~a9pSeWze#u-;J=Yv0&s_5l)A3~8bgh7It>6k8v%jSfOs?G_J5f&g9m;}@ftv; zkuHG~01TRukEe+VCZB3Zhd?GY0=(w{ti$9kXj$3OdRt~kw_6JOxe$Qko0*Zo6AcX3 z#(2t|Cx~s>nGd6;)G~X{-CFOwEY8l|hVh*bNACgvlg7epaQ?}REHFWr=0A`R?3YGe zVZ$`*3VWDYS2!+n9RPI&_$|e665a-&uD~S#tgG^}D0N=}NL|5c0K8|Tdnw$)L5aG< zu4>dB0(_Y=qeI<6yavF!gM0!YbqBwwJ8Z#bcBJmgS$8;hiFrodfx7^Jy24g#))hjt zt{^cS>jdc56|`~4x~@Q3R}>9{xzDQ3wfwR{ zZ+aLz&SOCgd9+}ow_!xX(}d;d!aX23a8ORn#kp%dHx45iI6n>zBv_$tgsiDPQ<8F{ z=&_aExNC}&=RFA21f?JJXovunoXM9L9S+Zv<9c0+!~5)igEwX2Y+{4x#$XB*21Pt$ z&|&mOO#gsA>gEzBdtoJQpM4__a6Kw^+j$l>54u95^LztEk-U+^Q@%Hs%XAO)!Welb z7rq;SORUJY#3M<2;r!8C^SXLix~1Egp7so6$L?4RACeX0EPz00Z{iTvP9gm;Nm!N z7hOs&lN-QC27b^d=mmfaN-qH96^60F3&E|%90AY|Pz!(&1cm}2jRr{r2%n#(&rgGk zOKFLypVt#XCkGe9Oe*WX+ryZIpugzW^`xoxFXg0vDJvN%m0=i0WFd@CrzbSS*A6U@ z9_YoF6pG1wvDN^H`!(-f%n1p@i6n%Q?+cPE5cDPI__jk5qVwc?m?4oBFyfms0%@s1 zmk0#c2Cr=*8-~7Nf?d3mQl8KtA_yhV9U_5H3eVS(ky7@4QCvYF*>Noj6}h=yqP8)n z80{TsS=I~FK+EpGrL1h@k@a6m=rI0A8HF7Ol`t(KHb*GPR5pg9cULw=gSh&Yo}Uvj zMS`OPQg>H^iW=8hD0X=?g|M3gErgdrDo_X|DTa_L93{|ycU53w2zCIIWbj51O(9Ic zuPc@W>C*xTsjmS?T#(^00*mO?`8nL`hgM- zk8H3L769I55sPE%Wm6xh(P{Hj&A6?*E%&GYJ~)V^25#xKPjTQ)K>1WB!@2T<=m?>m*V?Bg=hA&@tyQa5)mpW7?W(SmKnSjOc3g{3~d6 zqkQRDnDS|<)6w(emjRwgJfYo5T3gk;XRp>GN$*@#ss`^|R2E<<=65ct8^{atHPky7 z@vZ7R7o`L$1||igUlN=WNE?iV3m}2SamBi3)a4K(dJ3AZk{CP=1DRVwYaivo-}}hqJ)YP zDoThb@xf_Ph!iIgnUKhYL?$Fc)Z!{T3kXo3FRgkhxxPRcP29J z=IpH(yo>V`&=Tl-3XnExHPn=-VgSgL1j+&xK$TEaVrE>R4rnNvmuzoa;EjOTNx;D0 z1Ns!r)Gsk92^Azh@IFA#KQ)V>{>^~NCLs0)5{TZLcod|IN#lvQHQm`ZlkR-;qA&LE zzB&J3r^(A9@p59L-WE9_Vn@9EqglcZMSnDFF#0cjixL~7|H1^E@QI!!$n`Aw(JTS( z$9F{krT^Q=3bNabJ{b&3tb@$WV4>dglxCT^YwCIGg$0UTlUSgfn@~OqjGm~nK3KK> zWqp1>9{i;vCMN&#j>Ocq*sjDIqKo?ndVf7?%Y}veEwMjx^uKCPs1{pX`w5RM+$ z@lcO>i%$B`Oe(h~bzptxo6Wmf|K+r{W^%-8v(ea(SZ)@`db0*a?}%O8O#W!rAYV?r zf0m$Ua?uO^fqj(-?5;xWA3xsRaq096jB`7qr=oWb+!uQ`ei;j7J`*cM|Ap6W{v}p; zCiY_A_2=!-<~(VLStFk58`2$r^ji<`!6ofD71kRl0Lf6cW58t`FX2%9m{NeR&SbaE9NY6yXItma zL)nva_skiavIkmtWsUT0Wv+*3~k6civ>@tJkXvs8`x*Gc4;%QonU5A9lJj}x(MgG38;Kx#vRO#o6QkxR= zeafSW|KPYcEGAn*2er2{3{!&Pfluto&s}%up}9SC8tEeu&uE_Ny*4c6ZB*z=48O+X z{kX;4a8-!6sEPyAf|Fcy=x8gQC9ZQEawzQ+-$IN2rHiWiN}R|`i-$?8m;J@F;X~L@w;`b9?-8aJL@cTx3?}o_a z>ib5RCCiMH5)hX4esbT)Ky%*+yF2ak(+(~RNWyLG8>wlFx_u*UP7)a8*c_~VBSDUtO(cFmKr->+PdwSk&TQMW>z>wiv+Yk6X2`{v z$8+a>Be|-AeO6zZ#75l2ocX?y(E^}%H6?G~H=;dCFqI`9sO%W^VG;CwoKxeqI+z7D zINkS+^t>HOJ_48Fj9L3e`1G8~P;#=k2)88rM$+S`h+ftIyTB7NIjTJeSiBNyL+>y} z<*QXn;>|i}bt2HrRW?de)5zeA`0wF`pu}Z8zD-xFC`(+ucdpTBWOH+$oK0^lE#EqI zwX$hN3Jj75eVZ-h_5CP0f(dE-_`VUQO7_wFAQ50$-!iKFdQ~rl^|AMofmTQ%Z6Sx#Mw7c1+edjJc%s5 zla67Pg(cO9)KBf&)qZNvOnc4kyKc&|+1l29bRf(O<51>TDWjdqV1KEQCBm#J#T7Co z@-Fml$gnnprl!$V4UgM6k+)BEAmOQ;m*BbtKoT@TLXuiKs9l}*oW-P6o& z(_v!i1XU{_w-%VuQq+`d%|?rDNU8UiG%!l9)-(~WDk=w8KsCMACKs|04^3GcuVF_^ z%~fkBAz7+cF=Sw?&V|1HI+}IZgO^j_MHFHE=Dv|uz`Y8MfMw;WZB5O8G~lCJ3LU^u zX0-uLk_KNd?P)~SwL-J%O=kR4o#UU#?%p$-{_~qMjrnZuTV}SOOX_-&rn^NsFK?@2p97W+!(cpmnDeDEsLzk)_OT}tBm5F?}ChxsY> zAz}Zk?k0mirrUI=690h)NuhMWItCcrdo3}$SW$E6eH)6eRQ?kENZ;dXL@Y)@{(#Nt zvMlG%#i*=Kr?c6)Qt!UTOnSunX&iH$a1G4?cgwiJEjne)Wz(FdnKI{|cgH%o_?Kd1 zmtsbFbiB%GukJuywAxyMOImV|H#v!TPC#Kx5TrZ8sDGpyKf%;K)cvFB2Hf-1i35)u zITnfRiA{cm&kCY75nqh{^6rqamo*|o*52!k8U0zEMW5vt@e`NFw%pYvEM1(~iY|+l z+!jbLN$ITMCN7J(jF*B!dbs^`I?XG+&3b2R`=<1+kL-yj_rwZ&G+)EDJZplSj^E0( zjy9H*C;MOd^F6V@KD{S#&*?qyiTcyC^Wz>W{azWbwD zf!_S-oVL6?ZA%Q0QOjNv|Tvc6~5=ghH}6DwZW)_twDs|7i64v7_ujC-ADkfWS_Hy@1$+sFXk;aQ~j3UqvrJy(ji}f;}$@ z+Ayc*u0&Quzy6%iHo?RnC06Y0{r?h`(+1CrFOOf~EH%9^OG;&j!A0^v9bR z;ooDCq2RFyzoEio5q?9>$0GcOGLJ?04OJhD@GFv@BNP$H5sFCU2t`D4gd#FILLHeY zLLHeYLLHeYLLHeYLLHeYLWBOiYk=d&m@|h@o-|>oK_-Q?Fd5T?1{u?Y3W61cDx_85 zSK#I0jS^~vJZYnZ8X=F&D4|BkBRfi{5%T02CDaII@{~CNS*|;sW{2KPZQ$$kv+&4M zMz)O1&?G_%Ja2&@3{ApT7>7HKs&Sa(h>uIW4buV%ZJ2GCmB%72Wt20AikS=IryjGT zE@EK6kzLkV&SM->!~|paPjWXr^Y4xyqTIQ4IEtMJzo9}W!f&YAiSQfBbRztQs+|bG zBI!9o5rG_`h(wN1L?lNjB9kN3k(naYk(naYk(naYk(naYk(na=&quMtQG`#PG=ZZ? zgupj~97QmpLB=$pf?x%q3TYMi6?l1gql6kEPueJbp5?$?Z+s+~B{}!inhojc9W7mn@U#vE&kw{$o4G#~mNgFs589fuJor#ps zM6zch=`)eWnMmbKq;w{dI}<6MiR8~j#?C}GpNUMIiA)hm5$c3-qQr=ti3DdN)iaR{ zKN5lhxIi;baf;uR$Yz2%;Xks<_?o+0!-Lij(RDTyqk6bLP}qd12^fx>uF_S{{t zDk~zfU_~T81pe39_g6#`eNdl^_X@^72D}d5rAR!nA`<-*$wvPqe!tNN43Y=_euF=l zNzdq%Po>jq*l}!a+PIXW95LkM{{ZJF(xVXnqZN@JEReRID7+sMULsIh><0vLqA+*xbY3=lF1WC)Xm6_LV< zL=xTaK-cF~Q=n}7T>L__+&n_TMqg|2dx9*;#q~^5WY2E|d#^gp#X~03%yiN% zwUBh0^P@>qFK`+Va?lwy^a>5MN~5oeDA0tt+Jg{i8pZik$>rc1FEC~Rv#Ovb4!?5d zdQQFTIpwbBRJ&g1T8>a6OidC;D%a*LBvervuy9ms>+acIH(?|5@y*Wi=Z^jt&6I+4XU)d zm^I1N#A~RCx|vaL=Wc2Eg`!>M07%PEgleazET_O`b!qC{)bkIi+aFSQJ)}N*NS*VL zdgUQ?$3yCehtvTNsfQg>|2d=%bVxnukh;(zK5g;8haWz$x7nPW{K)ndv0(d(L?8IN z?JMF>5blRQK=}Ul6+IQ=z0f}-yhPYf_yOT#+gB6|lbLEUN$yG^)2xVS6pAb;D2jzS znvNz?fy*B;9*>rmQVfJe{%zh1r*{|VnbSGi&_QUMgaG3oX4zpjwVfJe{%zh1r z`<87#-1|`ixXq2%_QO2?2};`!CqBIW@Zjk7!`!l^irWw8ITeUhQ-w@pa{FOEL{pR< zzqm-=et6(bl!PXp4^=6Y$x9Ycg|WK-0CqYBK0+LaaXCrSbK5P;eivI4s&{Q6T*Ww?u<7b)|^PS1ealp z#kIEB`gkndf2vFTA|&FSF7YQJ(Q|WG<{e$m9~;rP`vH}}?iqrw8XPdV)8Jk~_R&x( zJ7oX^gY2deyOQt=%lQk-`3v^m5Jx$G5#9US4ejCPCK}o%8XEYzO*AyZ{m=&p-`_+- zBi;-BL&8gh{e&M7KDKG0p>66mv`q^QZBw_QZR$3(P2GmJDQsw))X)H*1JKX_TLfMe zI0XnB+9ui<%RrWkRZ(L+0V*hGv*%B_B|5jg-LmbG4{oBp5nd$>5OxywZlb+`Q-pwU z|0deoCfeI3+8Y6^_O{8}+a}uECfXZERqbt)wKpQ(-Zoi#BkJvq#_%ReLX)*O_!(C3 zZBP_TmTsaEIj%DWqqQ$L9nP?qEqAjOL~JvwO^2D!C(74mGuO^E#tw3?tQJT!+;o`N z-^!R%+jLl4#S60UP>QU1?z5>Bm1BMcVg6!+VkXsN6VNtvny?Hx1UxQX@- z{x;!ROnyO4!J5x6toi)Hn$It^8B5}KX~P{gA0Q16Nb>>Ge1J5(ct<&*wM9!uIf3Mi z4;~%(^xPNUdhFPm%WfPVwqrB0=kJ$(c#QWD`657MHp9$KNVEQFDlev1{Vj@bV{*u#D8&s9iR15gI+6&rNfZPG?%S^DcADJOUWi zqN%*Qxv3Lu>J&D0nwvVAO`YndPLX&KMiEXC7R{|whgXMJhgXMJhsTBdZ~u+KL#BaD z0~xx2PKA6GcoldYk4^<%9$p?^9$p?^9$pz<8D1G)8D1IQB)m!DlhEVPvhxYCI$aGK(SQ>Xa7;omA!E{hz zu5LOYHqC4qQ%ImjVY*qZOlRu6Z`Vv$rcH6<`r$6-o%Us?BpDGnl zR>y8tQC)w*V09X!D2Eqj3)5*CA#LmmTb*WR7E4%Yw$Yr%Wy(_(L39*<@$Tkyjc2B; zYHgG>!zfm#N2%&GzGdleoR@DFcmz8=%7WIsBB2CklBe^TVzXghudxL0G@tSm@&&l# zO=)8nOF=drn;ykC!E}u`c~l6hWK*@$ndscI_kp?2L~HM-c;9di3HIS5MaBRt(|lZs zNmXH3j2Mm#kaxw;_Hx#IJ^UAjCUEyw`DTB8;b0$GN#X56yGa z*ZE(L4ju~=f)L;DIBsWs?1jVzk0;iJ{QoP&|7XZgh4KF$;y(=Wb0PjOj$;>v`1WXM za_shKXf#x4Hq_4Pd@Z0YO$W{CSbMtKny#g$(`A0i*#9fo`>)f5rk$3&w8op)MsgII z?Z8Ohx>U^Qa;CtM*W(Z#z-th=F*Q7#8BTN0%T4c^;dRro9|z@Hopw6i-=o9INjO~` zoi2|~=SHWwq?u;@HO4mkx(Y&2n|AiPieYbnog6eG@UI^ zSIV%;)8+DXsytm3K^?Ysws!WX({~N0hEp5(k*1m%C^~7CgiTcqz&4WI{9pi;0WTE^ zJnxv!5jaN_l-k86McwLHLB{%Ytu63shQLoTFZ1XSKsJ6zv*I;YA5YM zzg*dvE|sTCm1*9Sn=Vb@th@o|2-R-UdYj8E73wWiyn(=CcFPe*%z-f{&G z@%PZY8dUuC-kiYJn84PQz;*_J?F@*&-p>Hy)?mTb;KA15gfx*bnD_qq>b8rLy3 zUub|h&w?-~*1LjtvDvTiETrVQCEK3qEOI!TyTCkt}3vx-uS~gCE7mSiMG#+)d8&>R>PvUQ9NaXQzviB_(8>`Y# z>q+g^L7c~UJWmeIw5wC|JSiMh-mk<#yg*4awPVP7#_)Q62xBzEyId>-^TYI-W+uI+ z(MY8)>uK-?Z|$l@g$7}Qz)#VP;u$eA3a|n=0gQkj(PkC8SydobB~c9GiYg=d+D$(E zyJ``$vsW#mkqVG7ijGk!88t0HVqLWITRN|z95+{`iylN3IatRQvVK@t#2&izN z0Wr89&9-E$EbX**&vwL=5GMDl6tfs;q_W83ntWChPbSx7WKBlaFtR2iYfae!1jx#o ztgK;WO;*;-N+71Xf8D|6VRv!prTuhZ9 zkFlu)q?f2l5HSN~e!h(P082IViI8g6oAI)}tVZsZo8bU4WVTr?quD|Nz&7p%DcbCI zvw4+{xQyg=f=r{u?TY^G^bk)u<(4u|Da&-Tyo}*yH}+b2vP+G=EX!rM*`1Z6v+{Em zQ?oLa7hVL!!m_L^%Sw9|4`rn-Dm^X!W9*e1f zbW2KGQrbdkOG;bTdjX9}O(4aVEiKv7!j_h7X~`We8Pvj{mJDiHE|ZsOEt%HBw3bY3 znQ1^snbuPMTbR-^Q|z#{Ci%uwEusOUjXSeR`TAFfHVOOh#~vZvM-H&Y1x;y zh6kjp{svK5nkJH#rD@Br?6Nj3YtynejkRf6%gAq0tYv#+0iq4`vu?hC@&mo`S{u-5 zIb%!-$exDmX<$!7_B510O1qQHOYz;a^dk$M3f+sPo0R*_9G330j!GVw_BQvb%YE3IZ$>uEc zm0V>kbFm6Ax0qscihQDm#kOTw3dCG4vJJ}gk{nW!6+o*1$RoxxPn3cQPJwQtvNJ;> z@(kNn3A$%M9xxwE9?e0Lp1x)kqcM zG8v%il&B5+)B#kNl3ZS*CRPzDU{RTqXO)u0I&)G#nkZth^-3bj47V6OoeKZQ-P{QPPNFX7CCB> z)8SyY(xTL&C_9U?vxuEV*;%wU12j?$xfq-ExORr^N4d5XQ+cqkC<}{b8_-BGWLr_T z6~&WngBu}aT2Up-G-ehB#KN?qOk*aHXzvn?bQii0&51aPH2A6-5OKa0Q|;Vc<}Y_x z=rOqrbN!l9T|U>C5;q@)PINE28*}obJCfJUM7u^)==#i??oRWkJ2dnth${rQQ%DgR z2*h%r0qiczGDi-LC!dwg5}=XBmJMYcPe4t5rmpQR(BX(Hn}A-Rk*-3ngpW=63F{vw zbfBRPAOopt3V8xb6tN`hCMINfegeY*#>KOy^pmc#fu?D+{V+lp4n ze6K2wki#0wA2FuUlZI!q*wPL}F!mjzET<`FVq7|e1f>;d9A}uQ>o65oA9H}$Q;G-k zm}Zqz-K=U}Gq0G{%d2IzvP!8=Rwb*ED#YhzTmgCSZ`VS!W2TV>u%}JYZ57?NLV-q# zq4L^_s=T(!Yg1lZmQKmiDIKXP4%C!R$tklNXrvg@F(n;S=$Mj@DboQoQVbbBB}G%> zNzqhB7nr)P^d87A<)>7tDONjTryKzVD^zW0N=;}={oRy0?J4@ZDRtRX)?)*&TEsiP z0;HxeIwhm^fZL!kQ36A3nVypADNJYX5@4nSjTA$sr(}AHi+8s;Z1ME&JWT}1XDO9x zh064lOiy7ti;D=zbRa;cr(}8x(^E1%Wu^nEg*)6mZZ3D9<8HE%DosKT$;_vUmSX<5 z1K>x%oo(KBcjIp^3eMgMb~J;)$BhQ8AoI4l+x?BJO`Cb#UG6?Nr@Plfx0~N9W;~Ew zKm!yEDXWDX8c$YNtknRGG`4xaVlAg)RRCIo7hxsHPDE6ft!WJgG#!qNF})DkjRX#@ znB73*;BiQkF`pAbhK&I^vk|Ccb6w4)ZWaTL6hr3L<>NZWvN#ODOa&S#h8jv;4W%xg z%w`iw0R_t;fhH3S;6z)G?mrnFVAz5TJ%qm)&*DuFLAW84ZLqqk&2{yWQ~6 zayK2QTyDHsAODTTjcew*y=Jgm>?WJdZZyy8lt)en8nBMvW=UqeJ&9}Py1izwTO68f zHczq01g*__x}FiEFk~w4odw7v#%_?=9|YtN_4h0 zUmhKbWj#jTx{SQ~48FlMI6l#tndSbAHw84B?V4eRghcZ2o&@Cr(H8reQDk9TN$U{Y zYQ>yqNuDCB2QLxZItMeFPlMt1zENE3B=Rzs>;}ACnX6qZ1+AAbbD@o4GTu{|d2D!$ ze9vdES~Nax9ux|%FRUth_@qP`1w}JFJsu^x$vijG=m6-)J(#2#J z98)Q?D&uyh%PdHf@iX1bVyiY}faHpuGzt-x?zXemj-?{TliTwCAjb}DluVGtSTaCAEWIoV=Dr0`Be(g%G`svz+L5#aspBXU@RNi!oubCn9r2nJIHrDb zOg+vR9p#uh$}t<4fW|Z#X&tk31Sqzw1&A$c$BZeSv1H$v>>I;AHo_K=1wer88_t8Rf{&up3SmnGxlsQ&@G)cSVOd!I$%H(!ahg}*=+U!#g=^lv1Q+8V~S@i z*|%BtZN|RMvTw852ZWS;n`Pf-?At8+Hk*AwOm&C3$INB!v(Raw*FYwksp*zLtsbyT z7olRi3*CpI6WxpM#xeQP9XV#6bXS@$-I<{`-JR}Fcc^*PTv{yDDZ^#S8*kb_{}a8Sd)U6J>dOl@8V}@W>(o`4MPmubEac(|}e8&V)1>b0-n} zS((%U2O`t|1*0S~>1KkI7v#{A+{{TG;tJ4sI>?ntS=+zCD3>&@4m5%WVB4LF-Kb0y zn4ZwAEt9vJsMw15gw6oJYw?>FpfSWY&2~#-w<2~YGJzQrGGapQW4a4j zc-11SrqyzigE1*BUPrH@)es=IwPPS9StY%Wld7WCP!DuvaV-Mcnq_`S36@hLs;+7~ zspG*bM;4U@vTsuMO=91q?3*aGc0?Be!lhnXFzlqQ^TVQ-$VCC(WD9&Ggn*8Y8dbGdPmCLmknDC}k64 zLXjePJcR-dCu&s=6PtXDc-06e3NpIO7}0Z~5j_kTksn7iZOj*~lTb2=Yt0c(nh~xJ zEgO8@v{3-ZjxT0WM3L&WRYh1_rH^nofUs^Pu(w*hV)&(b|WEtp3 zbeRs<9?pyuGqFBCF#*&B;&nh;RKucgfXay~3dH^mFeWNx(Vs!}T?9xxEASOSIS94$ z@ld9D;Nj1-VOAtv5~vIGi~y#rgzrG*MNJ6w&jYfe+Ln3}YEo2u7eGWHb1gI>3WO>_ zbW_}BUK~-;%ljW_C_Q@l;CpHk5+`M)i5l&)%rBelc zmF6nGeQ;4Gqfj}>9W4}J*BT(+KYVb}s4;oJ>)@inWd|1(dEe_G^9sgrI9MX-%?FtR zC#C=02N$I+OFD0euQCrZG6Hy={vcDhf`gYITr}{%9^}rG$iN>SoCXIdy>G3(kW)Csj2B z2xWnySX2UNAPdnVGzfzis|FBM09Yc7D@I_-`yjU}1f@(Vh~ULW2;01L0$4J+Tsj4?B40lO-rQqggs@c59PV_{`K(6GH4H6#@5*P#5qRp&~Fz zC|W>Y|53CNDHAG$*sl*Rs#X<9F*vWcDB&9=Wi0_)Ild%POumG$Zu=FXBQ%0sljtMYA0w)D{36nNg#sCpz zfr0rOr3b%JP%s6mX1JxVAG6l=|`9B{sG%%mIHbY{&AEtATTWFQOCB6vL@ z(#lS&0+tBliV=9KK?MkXoI6usZl^d9gfRkxHii?5iqI8PI-#tXf|96$4g&LFGMyN& zUg~h*!mTsu3@42a{|wEtt%Mp992)?EBcrN_;)vK`F@Ri_Xp7Jw^sTVNVD=L&5ylB= z0!Kgx0GY32I~eCo~ zGcz3ARHL(dSF17G=};%q29i_>TqLN6g0m5&U^BTawt{3TIZmv^!C2v-I^Tn=Z*y=_ z?~u;eUcM|8RNGyOBamE5rzfOWrVcLUz2t*SSwR9a8w3ZJ4!-f=QeM{~N}CrdHU)YA z)mWEM%ouc9e;xk}Nbul0qJK+xneZxMfUuLW_aIxAgHwcnaKCKP$=y4Yt{z~;$mZ@N zb4OSc>A>zI&3Ca2C`V$V`XAJZ>tg!WVzZkq6J?U+2#p`_-wVryIsCP0_%IVrucH`f_KHY^t2zi z;9b!vL-7^YfSeT23SNmlWMM5?kqMEpZ(s1P-X|}3S552Q zRg2JFQkC3&z^cj@Zk-lZrQ52OAoP?+ggEqhiy3-dM60eLXh@P%mQ4sZzvH~U(SA}x zf(1@3s~|K>caUaCvAV}V#Jp5PERt%1f_QElc>wrLE(B;%Pm#bYBJZNsf>z;7R<-_v z%$hZsdMe$zDVu7fcco`;zBCeC%B9|=ktT?c$z00Ezko&ns9qZB|H7q_!lhixUCQ+$ zX$2yDY(^p%FhTi7QquabHDXX(dcRgBWJWXo2mN?Te9CGyt@r7ETr zt6LlCY%_byEnn=f509ufLCdI$;>CGL1LSIuqev*@B`#FN$xtV*R4$Fg$7Q)ZRS!r* zUl5edwkm5YOGz$|1s6P8tZrs3sX#TFMY{S%p?)dXA#^u|OVxQ^8hho^SmDyxiDmG4r0UmaiIiAwz0Z=xrnm&e*yL>7qN4|Q3rH5?`;{_I>@qQ8zmb2i^X#PTl0 zEYCtLEaRN`Z_X`cflnC=G$kz1mVY7k9D9vi5qU2D_W)M1zasKg*4ju+T@hiyxwqUY z6kX(-F(P8mr zLyGkT76WMk@k)-EAx`uuXaJqX&!GTNOGPb(>i@=hu>ujtEZm&c>dkdh0+k4Ws*0+L zYKm%#YKhX?&H<}MQ%8$B1#eQ(Nm5@(v4-_Gh&i!)?nr0u*s-}L;Shl(gb`xlYOQZ= z$YLyWZBbk9TFYAh-gRA;lT`Uk2^m3KH~Tlr!9v<#mPRH=iT!D=hIf`We$Ug+18WIo zMO3kxbg-F-meD>-8^7nx&I4O~;CF3JZlsKQU1ak)dC$_uH>2TMJ$NbiT8c8nQqX)( z>v1Po^4jEVS?-6v#L|Jsj?oG_S+?2Z!}HxJZ^^3wj@qCOsvzU`bLKol7F(zYtdQ11 z_La193$bc;0~)~U*L5fW!%VdT3A`fG6f@l{0|!;IJqA*&>9J;pkA@E%xP4-4^8v2) z@B*nhbnjBr4GC-g&Y{_hFngVY!Oqc&>y|4dY=L#>XnB9jCEI2B3(>aBxJX9TR!!xApxcdbYJED-|^Dt@HT1o>W6yAZbqjj~GCW!1kBU?cUXy zeS-DuQ-mx*-h07U$Tt-KQY3b9fdoANr7I#s%Q*Zv`I+(?NqHe_uqGH?Y^*|(HP&*0 zaqcL0j(4{A-G2M-TW-H&_bt02qJI|sn@C`=Ah;ymD%4wiJ!q_^!HmJGCFcWu&*-T| zW3g_$<6MY5x+J>GvZb6mc!|ZvJT?|);Uhj{56&9m&BhRS(>>m|!r5VJm{{>>Ekq%A z)T7)8Tb{C`aYv=g6c6+a*ByD@3zt|mFI-}dxp0X&i-P6Fn|0J0Bf*_lza`D9HMyJ)Eo&DR@iGDuzW3Gb@#s1AuE{uK0VyQ6pGmDLd zv0qz^qp9riHyBELtm3E^rqwNN)MI5wiD!+O_==&viww0rt!KnyQyv>~!~*A1MB5T7 zIf|XMSlwfJM-B^?!Vq=KEw}H!^_!oa_|~nre?+H=@iPXi1~+?#2}fB+yg}xICV8Ao zVMUK~A#UvW`whkJ_1JDlhYdwp>e+}^m4yv$KZ?-K`0}oaT3C>!?b1nCcRZ|@?GDXr zm=6 zNBG3Jf@0{N2%?-mqkwBjBsX8W5CFjkP5CqC2>ue`6wfL*IYCp=a?8OA;^L zFvg-d$!Zx@lQ&S?$%TI47N??5#UI3QM=v{i)zN^XosRYzicNSdi!wwk0_$`;Bqmz`wYQ`c3gLK%WT-Y?;g$^z2g&B`ndh~ME z_pIeDA$mFAF}p1xnLL}gB-OTZm+(w1h(0f0JueqM&sheb>d&ik&(jD1)ctu?@_Eh? z09HQF83>5|8h|gKSKXgim7k~j&8@SnQN+$QzU)sAOuXSB-S8Lu8*=!l$N!0^i+$MNzE0=liO#nokrnsF{%;oe z0-gbn-Zf3-_!H% zY&Jb}Q~DFRn@R(pKK`MLj?X=WEX53vh9p$~b;?(Psu5y87Jn3K)b*4hCkf+({^=mc zMD!`zLn3~ z0tCxk=0cPV=^6_tlrs5K&yX`DRRUiN=Xe4B6kRXjWx}h30m4qg-cyVg;1nSs+)ts6 z!Z^o~ds`M`4spYck=Wbfy98rD6HKf&&S8TmZd?+3TRmvs$errj z`ZnAc`Alqk?AL~}fuWoul|b5<$5aNf_-;e7$2@lUMtIb~(aVlrbu{2;r=z`wViO)q zISL%zZ-phkVJIDH|Cq%_!`KPtMw`u>+s$1!YqOtbD|#D!_HB^@p-IRnu1J+Y*Ls@< zq1)8I-o}*-fEUFjLthK{86d?HBOrRh+qkX)RPrWKF?J;-V; z0F@HJ9@=#Dw$JtN0K{$uaNz}HyWU%UBOt6|25 zt!)ph0mp{p?-z{SYcvfNnsy4GwiFw#n0(z}-eCVcpM@()O)oY~^XHrx);SRyo-m0i zgX6)C@jbEOYA|VdbHz};eB;udEq%`jMqf7g>Wz_}El4vuRFkp)7XSQ>OE^EGKUf{& zwKwu)pv{->f1@gN$&{+(EYph)Iv7(eDRZcZI>RIBOBt$!x-v_7mZJFJhI~~vlFmg( zwP4TnZn@)*V^7cBbG1}NlhHc8q+rz`ouDFIPYQ8!Yq5Jhw%cQmdF-&qPI~M&Le?7@ z=5pF_Ko{i10>cQN=M{=PZBZf{d$I0Bkrft$vc-8dMtD(T`Dx+UvmtRI_p)L{f7;7C zU%l)()zg}>O}E&)ij^)5vcuBAj8WQP2{v$Wmd&ns!NMQ{(At9uBpD%t?A4Ukrc-Hc zI+eCfr_#KH^v*^7b;Ya9ciq7I2(?=~cOH58(6MXpwoRt;l*HrCa2u*{AFA#~RHi_w zlp=(R!8frz*C=XO6hT0ijPk~mK>^J*#vZm!FWFCcBJnHYJs;4#Z0un{wzIV9UE4q} zsmbe$n2@#UP@ha*JTRvjLG9GJmmNBd+8g^i1p8`+a!k*L6wPXZ?)iX}1AXo^+7@xk zfn&$M_=ZO&KX|2~9$akW)fYAgAP<(sL5XNg^kayi)T@%@@p6E)uvt5pCNJr{ zQIal6x+Lk6$|@#Qp-xMm=c4PATNf5DiW$lsA57Yf@w>GN@v?; zgOb?Xl*s0$Wa<*BD@dvynvKOP|^zMe)Yr!zfV;JUYQ({aN2&M9ck`b)-WSDCO>wCHC z4GCVLa}EnjnF!k@Dr>&!ikj9xb2%+$jfF~$U_Bo$BgM*v290%-*cQbSxY|`ZyIdMh zhnPqCu8(*2oEYuqq;AQ?{U%Mdx+FF0lCd9$$%F4n+<$c7&?Cpb_?BZ|{Ey7Qb8pif z%BX5rUX|?9-SW6B<{Nt{VWe81f(2@lP0GV$?G)0b&r%w;7T6VT?^KqLWn6(n5B@$2 z6r1w`_B%^AcOIWBI zSiPjM;*p*g0|OaBmNb8)X{pUi3QFx=!K&v#_?hMu%u>yqI(B2E9O`h{Ru&qX`f>jXU?Wh ze4*w`n17kG`5(dW8?9x{i7}TW9(jQKSIpUL5Rm2OKNgl@OFUn~bdK2fiEXQ`Ul-yZ z2(#Qd@SKSn&Fjqdt)bZaSUDZo^NwTtw$1DdZz($2tC^W{8(B-HS(XU1MY8Qa*#s>G zKS8Aw{Dc{)IcB27*COoVxRflvv zzrxDDLj3;Ei{JlwX=B;@Ii{nq3b_B=TYJ&V|Ksbk8&|G$w93(HN5hWRI9lsyoul=R zHaOboC~!3DDCH>aDB~z=XqD?*<@#2+zE!SomFrvO`c}EVRjzN9>s#geR=K`au5Xp= zTjlyzxxQ7|*R1bq*T35JuXg>bUH@v=zuNV$cKxee|7zF2+V!t?{i|L7YS+Ko^{;mQ ztG&L%u5Z}&4ZFT!*Ej6?hF#yV>l=1`!>(`G^$okeVb?e8`i5QKut*T2T~uW|irT>l!^zsB{ias6vt{~Fi7#_PM*^{sV%YhB-3*SFU7 zt#y5CUEf;Qx7PKob$x4H-&)tV*7dD*eQRCc+Sjh{I@iC>^{;dN>sm=Pzs~iqbN%aF|2o&d&h@Wz{p-BG>s{Y^*SFsFt#^IvUEg}wx8C)wcYW(!-+I@# z-u10_ed}G{de^t!^{s#H`fhOj8(jYe*T2E_Z*ct^T>l2wzrpoyaQz!x{|48;!S!!& z{Tp2W2G_sA>$}nQZFGGbUEfC6x6$=&bbT9L-$vKB(e-U~eH&fhM%TB|^=)*08(rVV z*RF5i`UBS=xciR}q->B;wb$z3*Z`AdTy1r4@H|qLEUEiqd8-4Bird)r@^`~5a%JrvQf6DczTz|^- zr(A!^^`~5a%JrvQf6DczTz|^zn|6I^*OzvEY1fx_eQDR1c718rmv((=*OzvEY1fx_ zeQDR1c75sBu5ZTmXIy{A^=Djv#`R}hf5!D^Tz|&(XIy{A^=Djv#`R}hf5!D^yuMl2 zmvwzv*OzsDS=X0!eOcF+b$waamvwzv*OzsDS=X0!eOcF+ea-p?KK=$PO@FY`^am?V zf3VW@2P;i~u+sDgD@}i}()0%_O@FY`^am?Vf3VW@2P;i~u+sDgE3LkPkH3MBzk!dx zfsem|kH3MBzk!dxfsem|kH3MBzk!dxfsem|kH3MBzk!dx!E2Adfsem|kH3MBzk!dx zfsem|kH3MBzk!dxfsem|kH3MBzk!dxfsem|kH3MBzk!dxfsem|kH3MBzk!dxfsem| zkH3MBzk!dxfsem|kH3MBzk!dxfsem|kH3MBzrkydzk!dxfsem|kH3MBzk!dxfsem| zkH3MBzk!dxfsem|kH3MBzk!dxfsem|kH3MBzk!dxfsem|kH3MBzk!dxfsem|kH3MB zzk!dxfsem|kH3MBzk!dxfsem|kH5jf_#5e7XX*Mb@bNeB@i*}CH}LT{@bNeB@i*}C zH}LT{@bNeB@i*}CH}LT{@bNeB@i*`hH}DZR@DVrg5jXG=H}DZR@DVrg5jXG=H}DZR z@DVrg5jXG=H}DZR@DVq7?GZQd5jXG=H}DZR@DVrg5jXG=H}DZR@DVrg5jXG=H}DZR z@DVrg5jXG=H}DZR@DVrg5jXG=H}DZR@DVrg5jXG=H}DZR@DVrg5jXG=H}DZR@DVrg z5jXG=H}DZRc&G;s!qA20r2jKH>&G;s!qA20r2jKH>&G;s!qA20r2jKH>&8 z;zqykYP>)8@iy@BHt_K_@bNbA@iy@BHt_K_@bNbA@is_rY__)9e0HRN?x9Fz@bsU( zacEhVIm%^O<};UNdGVO2kw&vsaFRETm(?5J7+TiPn%Ql`?0IdNy(_@2Ogq_4naJe7 z7U}PtmuI7d&0mZC(O5H`sa{g&$+KQ^<%Lb&6#Ck-p0D?J`a6Ap*ZEXu*RFeyo!EWr zkvks!^zDz{{k2HXj-E@u7D)^zI)Z8Tf@z4zX9RmcBRFtC@cN#=6-so2jXfQ7A{0F!W;B5%)HF@ssdA!^ilUBgp0S9$ANA}rk<3gZDW@QJ6Go!WW299C`c zZC{IEkProemPHc-2hPU}{kLCxwd3o-egC9b-#c262Ipr z<3C~iC#cVnLvsh_4s@$?{5T>{nBWr<BvsToOiMOiGZX_`oc7ku=bW)j z-WbA?U=<%{7$HvlS5g%JmD92!{wrD0dz<*Zw~60-oA|xAiQjix{JzuT_nj8M@3hH3 zYVwbo{G%rSsQA%Wa(t9r3CxUzhz{x@+rQ!1vGnfF-0oXHK|xU~I%-8nt>~zVh~0f& zbp02V=9JQ7clWM!PVd@oTsg$|Jc&V*r4mLo}a7o$={%bbi^ z({oobnBs|1GXAb;BKkHZ{5uvlFN?01J?BLe*m5>PS=~rr<)0HfC#~n=b8_`ud`^nF zqMOM)(v4VU&PLA3W7X=WSlzmj-!j2!dNzf6E$g|9I(91(eVdH>cPy#-cPu$1eob_} z4uUWuhP(WqUVZ%dv6IIS-18(q{_i<($AOP5xFx#voT^9Fk#jwcaGRd) z8VO-##38H<2p4&8H|4#~=zD9JaI%~Cq%&f_>>BjTP-1^q0>Vp&@Y46673n&KH22&~ zUFnxXAsAgDNIxpygJ*{h!rBBQa_9yKcLUcBmxVG`7m0L_6Qmb(ndLul*FAIpDfHD{ z_a1%o-Lw}{+^-N`;ALS=VRXxZv~rGyW`Hdd*zz3pEJVW`P?jP3N|-~0Wf5U{Ak1Dd z!dxQVUWNd#@7FuKNQcu<9PQZAe*UN#&rdwp0ojBeSGmTh3{yy$j!Yy3?Mq{J2? z@g3(4oF^3`e&^b&58TVydYsesjwk*1REM8 zGstoXU}Tvo@{l&ilE31_IkHU|B*VfEJ`;bo|s z79*x^x=Xkkx@j@2TkaB;qW;+!>YuxWWw!n~d zQI`j4Je?$6+aR6C>Cxpu8V~n{<0FI*-w-~0<5|@gk@%a8*ccKN?i^09kZ|{?lZwkd zM!0*rr1!J#9`VdQM!0*rq|0FhrWy~@dp~!Nc;*fx+#Oxg<=h?DU+qrBk+i=9a|aHQ zPwtU$=$=rOP4-*@|({-{Wm9{{npXNi;f&Qa&+n9#qT+P*?5pIuwiL4$==*S zkUxJ}?|T!!4k~AfAXwlv%jb~i^0}Koe_356SRu`1Z((3Xz$Y#$h8_7X>5ZJfEZ`F( zbR~Z~8U$U22~vo5eNDDQsm&}eQ{WpMc9X@@vq~_ymmPOaM<$aTSYg_+&$OH^lue)c z_%bL4h_DB&N+0X#jTeXqhbAl!z9ULdk%a<)EC*x5H5?44Z!bo$AGtDiK-9}?%H=PdBS{2N&axUq86&D6!IumNt zQ<`)^(Y=~8vg=JHgq{z{t~i=Qf096n-oTCQ@)?@FrW&Nfb4b^!-Gjmz=S}Oxu@gt% zdgAzrWBk!$J#y^Gkz+?5A%e4AsCzaLJ2yM@*_5skWP$7&VJD&+CujY;gl=xmxwB=3 z=Z9s|;6Wy%)p)yfczj*P4w>uFrB&A%y52fVcg@2S-SVu-c|QEy&Bx6V5>Lli`SvY}R=o(s$p$Gl?sg=#pkLnf@oHdV8DfUKSKbgK^D&E-P1gvzKorEh3#| zqxhD!>T~JKdTWwnKvH&3HPYnzcJ{LGsVAMkY)m46pj2)0=J0rd+4l#|Up88(v-_%a z2jiD8jIk$pNM$k%S@kAw7xQv2Rc-TyWOtMb*rP)X70MpV+K@VZ$Jg(;_pk286I-rX zwdI=CTdo=2a?P48*R0)g&AKhutlx6YhAr1@Bxxmq^i>F~LShvntB_f>F1jN6=!bX4 z3Xbyi-#||)99Xax67cAQmf9Qf`vw&^U|0)`!SnQA@u|uN!-xQrkFdDlXNkJqXSqxFd zkQM5GQ|!>)7rcwLLQ2)7$_Pmc3QgbeT>qU;_6;XiIMs7Smtyg+i=CJFqq8&@P|oD~ z$%!4Y#oc-@P&=Y8#5Q-i=g>Pobk<2{$sOCG*L8)@k~?4?dgRH6Kap;A*3^bs<+Occ zS9Mo>M{KH_ae?AMEHFi~DKgLccvAU?3sh+MbT{wxkK%8RKi~IU{Q1Nm#h>3M5P!4k z-1nTI*MBHh7@}zmsZk8YUL1n1H)@O zBaKWX(tcmfzQ`C#Y#NIG@z7sAFw}cxV%JdgucIG&U?{rM;Qx4FC^j1V`A}l_14BJ; zd0;5w>5F^WN0yzYY^qr~gE zxXW{DK`!>bu$bsX?0v?ii|;-*5_OhGcgJHT@vsano>A$sGooW>oR(bdjAUYSyxMC6 zmv=z3UpFChb13>+W1c!|LNDNPyA_Ug|e(aax$9^e(?3X6L-{kk3{C<<)Z~T{x|B~@vGX6_e z&iAaG?^!wDvvR&?{L74gnei|C;MH6CzL`JT==P=jU3}5qgQ&brqPk^|XrPX02#@IA zk4LFu+PorA?C2|2?A=!E-Bzq_K6wf%zCADlim!_y%k7stG)#+W9EP^C)|1DsY`f>k z*$7f$#M}DW$hj$YZY(6(>ESU{IdT2fiEk#}(f36ENIaQHrH9kQYwNbU!SKw?JUz~Q z^iNOEzvqE@dWU&>hxx>&`FM1m9%!B(Xg>PK^MCcge5^39Ze?Em)qL#d^L)fYc$x4j zVSuocuy>wMxWOqxK)8QC_R4(h#d+(D=2`G=-a4dty1MWyGDj>wJ+D4#o|VZ0fl+~! zKw2OpkQK-YV**8il0aFYB2X2m32YV^7pM!Ej;5%VKwDr!U{YXee#i8W=G9lu z&q735u5VtQ_HS9+%&eV{ZzP2;@35A&^#rh%Ps0STc1YIp?VJGC^39z9*7Bj90M-u4 zT0UwMz*<&s6u??Od=$W1J{A+eT0Ybhz*;_06u?^6SDx4KFuyZ?@w|qJ`RJ+n9e)^o zzm`SUw|SokKP#M{g}Hx^4&8o@ar;@)^GEv`efD?u#=fz?!xv|Op+Ep0pC<}r1T10F zQYMW%X`DuLfBe*A{ojgTa2hEkE1v|EOCm)vi{ci=E{b1NLR|@UCDfHrR|1!TzxCI+ z6=p+%4GE$RAi;)8s3@VLgo+X>N~kCyuY|l3@=C}nA+Ln863R*_E1|4}vJ$LjRxzvB zxa7tqH!itx$&E{{Z7TP7;>)eZ1N)80wA(i~dBg1H=FKv{wtqz;xqn3@769TyqWD4w zl;{)vxp=Rk*vEvf6aP{`{0RUb6DgC{xAE8kN6Jmwen&rWr2HlqlT`d4#N`7ekb(Fg zLG@IWB!xXuCHzp}B>|NcQ%SLpMOfB`O(V8O8e5x@=12sltOzmyJhLLwzZQ_G3Mlpv zU{aJgxJ1CBmO;#>c$T3pUb_Imz2p~|RJ3qYs&VxfhS!YfR%3AcdqyJjDXut@56t)e z?{qWk=_2Om6K7)W`N7|LV4hXzlMn3AR>#Zr;%0*QkzHN5?X$MzZh2{r^pY3lyg1G&Sdc7mp?v zjjmj|AsIL|ydjx!YW0R>!Krntl2xZxtxPtZTDdBj4zsLTn@op!R<2K`!%VBzB-3H8 zwaA9q)~@GH!%JARE;$xv8-@&J*R4&Cg|h3{CyQO#HOXQqyAE!sV*N@!onl0_!2WmaR>e!)&YJb}O_lSqZbPUX!eJEnAtagxS`v=EXrTZ0*`) zrJHRySqrnRU!AOF@am&o-$E_&48t3fwXXJ6$y&FVmC0II%&K+C&D~-)BsYiJSFcKL z4zmrfPHqmfty{?=NM7#h^~v#2_W!5rZNQ_t&U;_&`OrwRlc#b@L)s)vzMQy@9i-6+ zaoXA*X$EPmkw(_cmXL5#asv|nc!EuhMmRnv$#X3uY(nU%3FgDSv1@8VoYWke+7#E8 zgyV*qkOnyMz5PhZG1qQxjk$iRTl&xwe1HFUtu07~bC!-QAY9rK?fB zEK66TyJgeWC}+ftE^Fhd(p|PJn;wqtwlqELFKcT}5BtknmZe9dxNYfCAGdXBdbE6m z!>aw?Rdu#;YkIoQmpz-#wK}#flg?!v+kw<)VHs){-FW#@cz?ULj&y&Nw^kY^^r^Jd zeo^_BQEyS|8EQ64zP*F0jpDacGoJ6DJ{IoPk`B(1n-6v_ZRzlIxudO$jy%)mXxlPR zeR#Ir`P)?*;Xq5qS6-fFHS2}n{`99ypDta!_~_ix z=Vzx$>yJ8HmM%@#Z})6#x?U+sN;fSO2E8YnF7RveeB@g}Pcr>d#B5)>!huqyV!7Xk zx35SStk4Q>@1vIbe8{pDX^wZlp}U-`Mv}gC<0r20cMzjI>;pUc(w*g8l!@gl(p?Kd z%LxigB^MEuX#BiqThc79lnNu>+nVNR+99b9nW0;Al4Mup%cl7PRADfz5E}0!Scb(q zq4_%S_2n1GQqk-n^I-&Q5T5Zd;n>I}QD&!z4!U zYVtX3^A(P@G<3uLLbtSPG09(pF&oRuZyG{%0U)I z44qm^ES(aS?ujl>H~tUDwlDRx#naYwcWzM6yWX#o^|7Sny}AJ@v}Zl_xo!7%X!mz$ zmv$0wnNNP1Pkx!tuxxL+zuMzt`Bdp*^}p&xKEY)^K~r}U?C=S8_ykS;CHFPZ(Kh|a zD=*LOpPT;67rspePH$bz$rE0hTpd+t*8^<)TvhZoQ`Xc*48Xq zRqd&Uoh}B-P#GwA&1^*-9`X9j4WK^&fS%W?VhBq71_vc~WB0u-->O zAr{!yG~e$WES9TjRvm1d;#y2uW4F6Wn=3)9DGBY|+NROfnJC3{mDSqj3fP);mDHN` zdGl)0nx%*523-rcW__AwXh_5>ORHCw)~qEKdk<=3G2vR5E1WUeKRgd4-RZ)J~LnOf+984fLpZW4?kaRZf<+On%KI zZw0Y^s{l?Crz{<*;=WZ!;w$#88o}JB_pM^4V-IDk8mIQHDtDH7#X_w5|3)l9um-$q zX3MT!vxOHLuiBgBt12J-^rG2&>Q)6~iogMsoUU1@a9QsNLrS@lZv5~2R+Wlm0~zP3 z^cKaeEL2=9OfVzl zVi#XV;!&}`+{b$$t_lwFwd`uHpBw}S!~QPaO|rfPzVQfxe6g>XC(JyRqB@sF%%dHH z#n7&%zg_mQ>d(Vv)d%InfigFD@lt183YNX-! zjdbV2{wjUJJTyF~JSy>r4Li%jMjJvSU8S(o7~YxXx;UELd$?s+ukjoPx_RdCw`X?l zUHtIwtsj`#^z;iKI9z8zrDEP_W6DUwpA3|8{Vrp9=A>btiMz^u?iLjti0Va@u-{k? zJnM-{!v_-wOqjgRNKY~EQqw%RlCJ-hyEx}chK`uOkn1i*H}%z0<@YZm7D)0&EoaQS z%rnHy)=6^%d{NnzIrU#nR4SI-mF1P`)P55+zHJ1hr1_0ik0K4{c~5SoOD*wVIyuA4 zA}rx^7JTqe|MMXq)(fk*)Wv>;lA*8_Cp^0*>y7&nTmD&SUJFYY#63D<|~ z7d1v)h#SE5D%v2ff~(32a*9$AC^e3cU&BV9>QFaKwYv}o)0~ewT{qAQaUNKEXEy{8h4Z|0$+uI~3^pG`Kt3bMFuc+RlVaD(BLFt*b8F2kJRlOPipVeC4? z+l7e_nY_vPhm2ou{D+PIxsXvnL{k`11_Sdli80EUAgXDMQRXowXWn9dCN2fmd^!G> z|KSS0v^`Y2y)K9AEC9WNyg+V9Fe>N~q<$4B;(8UoP!jYB`hnCZfiiADQ4@QC-Vi7Z z3MxQV0Ve8|cfiu@H7u-9q(O=6-wzaZ7rq~(EBPFn+BaFdzqY6Ljp;4pwcGzXcvCP1 z#7X|I<4wdIQSuExtsfToS^aSA<8`^7N)FPQD-QDD3fEoilpvq$t|++JGZc!sjR;Hy z6jTiPV(-UUAMP$eIM18oykQDO#p?|jFrX#Qrj{Qg$Omyh&N4HxB)GLO2!_~*@9Du5 z#T=`x{Kk&WyQUEjT)s>E=lHP+d^`B#EOUoNyIG+)tVDQ_wmhsLeU14KZ*qtkiTOfgGb|kxK>h@}OHb ze}BaAHB}4YOTER~#$C^?wc~WhChz}BU7b=HbQe}=Ieb-@jZl0S6Rr_4d{fz_QR!g` z*0CclsQxz;p=@A?LLlg|3|2XiPre~k3BpGQMVdY&Y`9HWw7ia3tPxkJ+!6)k`i)Q> zMw>oj#0q5`+1)r5h7)VU;c#kraz}MVeXy@CSK>sxeS8-P=+5W5*I_-CN29`!rSys?AhGwVB4KHd8It zX1c7<_-^C7jbCYem*K4r?IP($wYh3>^2X;)o;OK+kwOPd-f6hPu-~D@=`+61uIe+1 znx@)3e5WRE*rJr*irHEcZJdp%E=V4FE`2u z6o1)wL+da5UF?-qDktmHuU%&N(e6kt$G0vNdBNkZ?aaA~>I_3>ii)AShVXo9cut4` z0%O;iq<)NZhR68`c43UK%ZK6Gi+q}?R5UCY<{b7IDLXXsS&Q{qi}hKH_1T=o`m75s zi{wUt@-Q$Q0(_wwRIG*!z~lwwbB-WOm0(jNz!$K=Uc0v2u&_d59{?IZ12lXJC{zGW ztp!t`WM~MlEDFNL|Ex%Zf|8(5&<~_u1j@>NVfZ2kk5XJ=!7%5r$4J?s5sFJJiYv5O zDy}>(nBwZ=dH}`M8JB>H%R5pW#pPA1c~x9qrW#UQ-kcgsaaI2+$i&8}O*G2{VJbc^ zT~`1UpO0UC2B7%JDu9a5$7eqYP+WwAf|8(5&<{{t@}%4shJ(*D+7$*M)p8Z=EiwuI zJYUb0q^H>31LdPm0N)dUbaC`QN1=b7lH@>scHIr>EOvtVLM{*K&Pg=HtXoMGMj(89 zdjB|!5yRoev9;qU) zK#osceZH>T4`_)Ml3FgY^2T>`2O(H0E@AGB*pg?J7(P^K%Q+RZTppE#HDE9|ES}X~ zNpwA96>j}WZdWUpAUvRK$9@WWip72?N9k|p%M~NyIi-Jl9bZM-&aj?a0qJ#wt>y~ub650- z98${t4gyrz_BxIx-OeO;dtCun#FcR&jx+NSN?LaBo-s)rrnzHcq=du)r8;-h%lkB|<0B7hmw$^$Z~v z*-@UKT%wFNND4iUQmy!6M};(jg*cix=6x;- zAKoI6iU&ghpNIizCy6J7e3l2~V_d80$_C__W1<~Tnf0k=^Qg>pC|70})$NsKuux@} z?r#@Z9k5kG5|v#SJ8E^nt_c&W%$C&JSIfpg-#=*yoz#uI;-TPI&N4@HCHi;F33Al9?lzs{>Wb(bP3Q*^0IQ^ z#C)V?*QPCxZP_%vTXa7^L$ZLC1%CrRyW*lY-DClL{coi)y;vhlYJ`PVF_>G&+=jK1 zAcrLrWxDDF$sy+-@e%rRx0&EJpA_fibeSMO8bx8k(QBexWuBB8W>=&d#aO|@gan)% z@9sj+clqFZGZxZK&RpfNreKmvVZ^06R2Ug`$rXoPW|?NYWQxP?>aLO8@UF*fB6ao0 z+0j-FlhxIBjaf|Hnyk3W9*9!uEeZYQ;b1z z6+vPzP!*T@RUpI-$+uE4f*URTPwWp)dDpZ7AnzQs{eFPF1LU24UKCI}nl?bmWn!QX z8D-Jj0iY~Q7@h(Ym$HBc(fE9nd7mRwnR*8#AG6-En>0}(;hL)BBmo^mV1Wx zNG@ewBY7rDkk>?B5?$UJMdrmxlpyaGy(CJIw~Jm9CCHm-Cuxro%c8q4i|)QGx@uW;)v^xiYHuSt2GzR7uIzcIokg*)vh&^7`0IqJ%qK z!tA5Cp=GI$*1~&5i`L?G2kM)$g_nvx$Y-`C(~|R|OpA+!sLPAk>fxgE_S`X1?pivc zn=E%h8Q)DZJ{y^(?hcuyE)CvES`wM1F7KJ8F6)`4E}_iQ*PKcyooixueyxybs1lA znO0w_R-az0PtW(G3~y-N^*%k`&YItMos91}88&V>xlfPR^v>_o<8{6B`}BM_%4C*D zRS{7a&v&Yf?^79ecG&f8K7HTCGQNjpdxG%r&ahZ0Xp6_oNwsp98K0V*-GRs;P zHnu%~g^IM%A(98}lD4HSm)X|KZ00iCcA3pyX4@~b%PzAWm)YeDY|G`lwOqbi%jLVZ zT)tb&<-4_9zFW)XyR}@tTg$@Tvc7v|efP@x?v?f3E9<*g)_1S0?^fBRuEMgu&t!d{ z$@)H%W!r)(HrtYo6e^-#?iwQNJ4DuZh^+4jS>F+|z9VFPN67k)kY%HSrSCgJ)^~)g z?+97n5wgAmWHU~ovc3ak{XmlS9U$w6k*rtvtXKG~SNN<~@T^zttRFYBUa_-Yv9n&W zvtF^Y>`bwIdd1FW|9|Sk4`Suycm>XS%bI0h zl9T&<_(3GQ%-4q>OR|0_$+9iU#rNrX!<_Y_MwV?!7S6ULM_mWY`Y|NSEu!M$v6xR>1OEc-b4{c*hwkm$v(rnsIIhQSMPxp8kJE}d$Mn=yq zPnX0s1g+_kDOE#|NtaAL8-liU$rQ99Xit}%DlKDU_prORr6s-6bFJwS&t=lFzw&Hb znv-We&u+PhWow$xZfkqGFf!t=WZUws1+|`))`MN$;kCBIYi)o*}8ozQ!`Ibe{bi$EgN50JiYh*vwTtNy}8^?b-~@i(bzv4jXCZ;xq<>afzI?b zK?9WUMt$FrC++i55g-2fF;$;f2~YqW^rSiGHF4g9ASahQ&1gCyUQ&eT9vh z2;QqtUhxevyX?IY!)d4AQD6_C&tm!T*iE;>-FT-(=HJLtlJY3%|ZICOI_mZ_xJ+0@W^kZOiIQAKp@JiY`=V>XMVWj1kJ=uH&vk z$WQQ{#O=z*Pa>xNzGUX6y3}@I;|0UGFu0|w#M<*hDiuD5_A}O0d9if)FUVh&!xeBv zTp1VQSYrj`V*w3Nc&{S#noLnxOal~9Qb0)oB?Yho1Sp_S0euSSQvgeDfCBmz(64}g z1+d}0{Cb)pa2$?0m)dg1Il*%vje^jH8~l4D>zXXpEi=X+mU)FJZyw@FGWcl zbY!t3?z-fUoo}}zn~lUa5!UN0%tcAQV!p(2Cw#(4eOV-UWuJ(4 zN*X!mNTU&UL`uk>ND(#+I6CJ7KR4%Uo|VZk9Nq{7UsgfCTt_j1dU+266cF!0!D7KA zK*8X43pN833E3H7J?{(f8}vx{8Gzy-Q!qv0lH>9|0Z@o92bZ>_?t=W-h)VKh?fC>6 z1seoY0HygdWm&scZ*$h_k1Vep2nM<5G-$A%ybr1XK)r;Af%TMNP{lxaP_S4q2~Z4z zDF=SRdP)N;SWlU_Ua+2$5m%t%AvNfc@G}6#LuPSafunee_=JRl`ZAU!8Qz`^S9^0_ z7YwSLz&SyqU_&t2H5hgk^~0sKc~$H|6?;%kIH(2;Z%^uN2{k}(VX<~~tc`L5iMs{$ zfSqLBb3@`dd{05rg8)m4{o*HqJx0f z`b6Cju814K_2NpnK3qSpj2pyNDnJz%;#S~p!=r+TuVN;(4S1ma89p|y^jw%3T_jTp=b!GPT zx?qbD^7=aK#sE2eop%TV$*Q^m`Fj0uYEOI^@>YER6R*b~9PF!#6#IAq=>F=)`QG`) zwPR1bj?M_q2^s|(1XDn4rMNBuNwV)8hz|m>>jbw0i4RHEB<>+`>jC-(sI6KMc52DP zMlBKcX+gA2ON3oo60%84KKAuM-kOuBLwLm4HHKqzZl8y3LVJ#^Q?uG^E=*dC8RN%= zY&MttFAe|ABx8nO6SCP{p&vE=tHys!$o^^(@@@2p_pb4jjoTJ?tEWjXpYi^2qle$( zXI`Zh)gJ0T=&&X@Vi>G(WXwoxt0PG#oN>N!Bk|A0huaE?TX=^_Urq2YpFdfbJIO~F zaUt%}L7;#upS(8q53inVUKc#P{bX}6fxCDT#W;!HoWug1+6R6&~?Px68pcMjKx+i())JBjX{l!gIwC#BveACJ9s^6~cscbt4&F0n5*eDZO& zsCDPCr6(V6#24FiQlCaY`8Zo^3l)1-e2kw^?QMAE;n|0upRNVj;94xSqj7f7$e);W zG*&TRDg`02-{ zr}y*kVG;e+f@`dv$gG&u^`5dJNdEeVJ^hBG2`WnRLPZ;k#5MhzqYW|dzi}Z(i{t9a zp<*Vp-cMQOPyO6c(#m+rM;%pF*%fSr5^WfDw3|bX9M^QSqltfUH1(9HX-6BjI-2}j zFJJ6v6ZK7sRxzU5@+2C&E}YsChL7zCho=sN;Z!&rT&3yQRho!h#g1jFRGxaEo1+E` zB^YLAS7{P}{9tfZbC?&m7#OAXcsqX;z0=c3z?AJOP1%UJTd)~enHx~rd;!^LycG{k z%szODX;;rB&){YL04RWY+g0r7a5rEUag|=Q6Q)?qQAxKv&@;7iXf+eJq42R`^}ag9 zxAh>0CXNG4GOl9C@3Gu6mAxuB^6Vpq+k>k#@c^cP_#nXS<0_kdT=jVDj^L`tne$%t z__`k@PV;l8s~(T2c`%dh$b!F0qzgksLk0dqHs~@*2!jIK2XF>u;|%+aH7tl_pGu+5 zoIp{^f})&22{GX>u+bOb`3D#=Us=E zOl)9rc^E8c(?fx&Ai#!%g0?Y`tyal;EGEq&xga?5YU*DY#BK|gy!E5t?e|<}gU7Dq z30z|SwFOy#MaIL3zdhh9Tv-*-C~as8 ztJNNOf35bM`+D=_5xVEkl-#rt43B)691|Wkf-=Y#mBGbW?8u}eyB*nV1Y@H3t_)7{ zh9f;jPEOR^_gUl7%wf@Cs`oO7hnkqt&L zA_~Kdh|q?$&9%vCHoQExg%`EFudM~2sSBRL=tE9M1dA;xn`|ZZ#0wdg6{Zc|3iR}u zT&MGU`b@462|V*muG`0Olo*k~GtcDmK8B;j@JxZhNH_EJnH)R9<@c0`k-#%ZdVLhn zkYO*0m}hb&AEm^x!>S86JWoINn8``QMRsctexQF;(CDHxNdXpNj3@@7@|88^cg91$)oxd>8cpX3sW~3FKF0QFy7$` z80irf4Gasnnz&b(Gvp_OCMr2}m-U&a-=T@h;>(7Kzn~l7OQswx7YgI+crYBA9{bX2 zUugu>n8H3s2IXN7vr%eWL6uP>RYO!svZ~8Kmt{PZh@ZBAiN6T_O&u)^S#+1es7tTE zdPD4A_SrR+|5(-RYpOj77^ccy z8A6WPDLGwgJPg>K1MlopBKWrVeM`MMloE315L9~D{Q ze0fLWH50N5uCU}@BP>{frU(nC#LFf0q7t>n0kaZT6v_LRA!Jp5Q= zu#?ZCjhLsZ52QIczq^f?3hVhq)sAW-Z#r_$kw!;0I5K4}2Eh~%TPdzfkP|!^`zq}Xu(%1_4kSJ#S(CVj#H|T-<|*H&5*Vi&3?in@a(4uf>iy&9(e8-m>c4`j+{ zcW#ApzqWpD-6PI?)*($@5Ac%&kVmo|b^PLYhaLlW@FNePAdrsiU?I3F0k9-k`6`(B z4w%{maQ@;oKYGN);f2>i`u;Xo?f-B*)^>M?;bb*GJbTAi$i&-haHR-}5r0D2f19VnD%bFH@t(JX zM-eXh(e}6PnG5oxM;r@@z287#|19Z0iz^wi?_kP%<93Smrb?wkS#jQeJm}caG;E|Z zH>j7N?QcK+H`l~ok|_RT^9C^qR7v_9n_VXXtUnFz`Y^pF{?Th-44r7@Mi$b5tkfSoF@RDgEs4d1~?~Z6l?%^klC!q zn9b5aOU%KuN;0;DOar{n0_x>G4Dccg??J(0!6d+IEV$i*%|N_WvNPh|5cJ6V8GyG~ zcoo1~EFvG5_X&VkSxY`gvi@D_y?poM;Xn!cE{Vo@kCgkNfV4x*Y#; zMgHSL{^K4UloOQXfCAA7FGC3L<%-zF6WGENtUkjZKf%Zkoj-xqJyDlvg}+4DUC=$y zTGV~>`4wZgbI|E(0uB;z=mf?Xy66PU(9mrs7}-zM4T!ItU>O=ZcY;Cx1k2DTu<$3C z2b{o0bNx}m-aWyb;RL3Yo4$0SuJ&5$!@}fVVZ#?sT-!|Pu5Iq*cL&wtz+kx)rcF>p zNP$^vmlIHC!61uM9E`nY!Qx0-G7uJUR0PZNV!{)_J11hBPO$KNf+gw`tUv?tPoJJuzXpFm-7=Wva<4Y(=ZU$2CA;c~bqPoO}^uEX7af-xC?lfAs7(#9trk`g75KZJZe z^uy4f+skiY?gWzx+?%*_xJKLt+|&uie`psjhkNowbL}-UORX@icSJ>SUPo!%+H2I} z^@-;~mhx#O_cz6|+D#~rU_x*)xQQ7dPEeE+5J?MgH`NtzIZ4W5Bw-?VlO}^Vp)mlz zC&GIiV7dr54=`BYgx<(|35ee%ZjZPcKus{EL~;;dghzY`U>t{AuShzWWndD7q6~M&dCe1Hz(%{csjso#_0p_DOpxGA=Y5P?*Z*v*f`)&}1tJ3U5IPZ?p_X|&5ze&uWaZcq8zj`?{{;ut;*-~^ry zc{7~-n9qrHJ9HjSCCZ?blJip~ky{-)4+WL1%9j%$jcb5>-gxP!WIPmzG`$~a zxIy7A*oT^Dz@Za$ng{!&%ldLT#_o_6@}aeBwwqcb9dUPcIA{_1#U}xLl?bdDHhw(F zpm+j`YB~y1V$)HOf}4(lWn(R@OyqL<)gY4#h3IUCOt?GhaV4W4fCA?Pje-q=DS$^5 zxGn*=#l%#@36@=DI14`O((nvwoRKbPIPaS5maM2`#q!Fp0yA{FTyQTIdT8 z`>{&TVDaCLz4_B9C2j(D@pN5||F|OmaUuV4j}FQSN^(GfXoQy`g!ghqjElRW@d^0j zr>P6*{Ap_9G!+4ViLkq%d!RMwLFl1{!I#%Adiipi8bH42H06hGJ5A|NQ+W8wX^IY= zJ59k)V;xUZ_R|!d^j|tn=}Ge_;qRWN5>8VGr`3``QBDxb2`G;ZsKSzguz;gbD4`gB z9i7Ry<)0t3ELMNJeKi&I>gZ!>5^{;J$mh+;=**=`;#-nkqgWE1izN^C|(?oWAy^ zS5I@g)nG0ieD>N`Pgg>*uaau=ngXd@O@*VuxHoa1m!##Pr(3|T! z-JJLzr;}Ho?&6e6PSx_3`gHJ0qPfMel@|)`Z+x}Xyza$d3yB+^Gi)^6U^pd=tu(&N zFlYEA$apV|U1xZ^F!3RiHyQts@#~HMu<<_^GPyJ|SK!y;_6V(6F@Hgk&Vv&aaS{|H zkf$t068ch6eW|Fv1Q^nz5FQ8UR&etG-3lc2 z1w|>EkV2Vx14&Va4Ny^K*u)~0^#|So=$ysohTTCC6I7HPDr$#RP*h(nYV?Jr?-rZu zRW-Ne%!FC{IzSkF)>40TkexpR1L2Ub_AnXjrQ-Ke=XQURkWY zs#;|80F??DQndu%rI0=1Y5+Be_n^Fo0O}WR5kURIZ3C#|OL3u# z&Qe(DwzCxVEG31noTZ$^UkpEYmXe*NU}q`US&DU*Qjx~dvlQ$sz2eV z30M5{%}Zi``^b5WJ#GSb@qAs5|F|OmaUuV4j}GEW=UJd86mhu>Mczxy7#DRz;}h`5 z&r_Mu`SVofdFm1V5@C1ghPRkYXa`Xh`5xpo=t1bA^VBDF(Ru0yY{%k$LbdFt{!bxHa!ou@uY<0#?po~KUFQ>RPfTehF4YR+Sr&#NwhA`fxNfn>FA zXWfprEQ~s^*>Z<-YSwJJ{WHzL|sml-=NEY7JZ$OoT&J zR5-w|mItgOs?My^N+dwl2}(=O(@=sp1?L2ff(?QxAhvP|6^rW?^b3ZHzySLRY%JuZ zzWon_RrFvt^V-OJR0Wb(~2gMx%I4)7kd;DibY>4+ILkd|0U>o4J0oVq> z(!oL?tN>UDxH$le09Y_~VU_H{Ds}%=G7hWi8b1lJgw9=$0zCh$lC4-JTd_)wE~Kz( zW+BB@Lkm?q*3?F*8EYI7YDf*w01&Qu(0+XmmmHRqI+G>@t7NBvhXmA@o-bF)Y_CGw zfb|lV`vMNTWR}gXvEi#^$XD@Eu~iMr6X{@;Echx^LGY&FoS;##K`;d{Uxn)uP!u|^ zQXz%Z{jUbAlK(^k%q%FbbkNRaGqrP|3_UOSZsnKORPy-?x&B*=7wU5S$H5hGA^&lY z4$28iazKG-gqIi$8vX)Jeu0iodPfO+_W~__ zfrh@Ib_R-af>2ICS*(SHB?Dmr$9e)tv1n5B3Mn2f%4$!SD$w!>MK*!EhVn`%K87V9)b;0Ma{3*t=(FfHO3} z88rYI1(z zBEAv{gSq%ujZYr9^OyN`R|W*$w=VX)VzY#n2-f^JpY0)`6)UnZ2lU>gP>Cn(AZB()$+ z+BWHquw)=C;4t{xF!(TvjKSX~gTGA%f13MYK>g+l z!)7#XmaMTGX>O49kuJ`JRpv0=+ho7D$!c$F?g^Do8E@cC!8t*rV1r-^z<9%T2`uuH zYbPgn?AY>6yOUsJqV1YsXk+lv)RB$rJnt#Kbruvwi3C>@H@MpSlK+Y?`L8aHQC;+d zh(mjL=(_wDtVerrf})%tloJ$i)X{qM1V$3{WWDrcz4TDrV?EC4%ae@ujdC1 zDmRU7`C4c(WlXIOyeT**XcTM^OaZhyT$g}6VfWosjdTk9`r@9{1GfY(s%BqQ%>u^( zYT`vz(Tf-WBh<=^>Mbv-7G6{Zyr`-s`aD2AzNo5rQC0DxDu94X0JZQUP5h!NnP}Sp zT284Np#hQ30koK;O8mLzca#c5Dau#uW+8!}dJg8SJ?_v?obLOcd zLs)eTfg$Cwjsapt4UJH0Vp|l-4NFAzFo1%qzid^1*-A&zDa3-MfQd z4gNF7TCQ?5=Lo;}H1SC2sh5wGouAxTI8|=6;D`@RecO4dZwuuou`U+Jg&Q*x-s+gJ zD+=j%OgW!-j5bMI+WAa9eEo17mru^skM^{=mITbM&5MfoFk2nY;a`C2sXoF zTES#D%=e^-Y=;Qe!^rJM_%exwH96ly&bQwAKJ0uy7b*9RkB_gex7SFidz8)KcLNUT zcIZgaJQPPEp+iRs<|!LuF%*u?5TTVsup1^kZUoz5zIh|q5A$6zg88ru-2Jg0<~w8r z3u3-SMrbkfZ8O3#z%{lh3pQ_VxF=P+!^GGYi&Qa!Ju%;$5p0TlsGku}u`lvsVMMSo zB3Ky_?2HJOMg&_U!kkTnjdBn(J4UUV%=DPoD$2}{d9B9G>X_H6PBsUh9=JtPJ4LWQ zVEk8-7xuA*`&p@t3R#0H;2oFiFS%T6bkn6+D~P6Y5PJ}Z#@*9{L{d2;Z#r_$kw!;0 zI5K4fm9v;#j#&EsIgdrn=rJ#UtS-lYT#^5{kpH+x2jzeQp@_>6#Jxm}T|9<%978qW zj~`<|fzBUeI61~>0)L6HyP$iZHRwU;p<@gq&_&1Su+hyKyj7z4*KH6T!w6NGXCiemF_uz;g9 z4EBG@V!mrNt#bJf**nL0kUqxa-Ld4=#N`D@fkk&6(;WT%l%r~Y`gv|92n&V+ zBJ1FiDP_=6p z!R3wH2G=icz`TBFwk%JF)6)+T(rF=caDCzu#{_=@q^<)RJ_#gm0Y(*igJ483C@2Yr z1hJz){9T|TZ;zni+Mj-Y{c{wK>JxCc=J=<0h7i5Z(SUMN<3Ig;{Oez`G~-{_e0G3@k3ypA?Ix%`O^Qc z-z?_S7ANu8mA-}_&DZ7TX*OI~5a2}1a$<4{%1Qj_prACbCYz_l=4r5b+H0QXigY-l zgdLx!@u2hbG~GOX0sazk?}F}u)}RNWhvsQF=%RUA4Z3ZfHk+r(;4AYq5Oi*y<{^z^ zxIdYvZRTl}c^YM&Hkqf!=BcfD8f2bYBHg3pqgK;m=Fm{SD?j|o%OvvdJgqZNl1STSt<&~L>4qNnK4k+ONpj^yNN{J2o>jw2c|7Ks2C8^+>`!tiN20dXq>M63+t z^m7sS$B#Q<7pp=-q?2CBCO5A>hZy}}lJ6EK1#noK#J5l`PeDW7jETuEVSDSME zzL*NU^0v+&U0Pp@fBbT`<-43vlx-L1a>kuMO2K8kyZ7#2|CXC=x(;y<)(rRTHOZ^* zX|Df2?$H)J*7P_M>7M4`f&{^ZD@3uk8}~MDmKg3n$NeN?_=<5p)RONlS4;&C)aCe( zEAk%~@*nr;pq!v|psqj+g31trdWjsncmR`l0FwxR`~W5pI)4BYcmNX!e~GZWpnIS- z=t1bA1DHVQq63$w1DXwRLViAveA@v`;sJUXeB}Tp5juAOlXw8rcL39O0MmB>(?@zo z348YdmhJ$S?f|A+P?QsdasrBB(*R)!DBvgxMqf;Lfbj}9fxC!%r>nrP1^J8Y`vK+y z2V$EJFgH7(*WCvimank&f*V>cv#kf1pyA%cox?TaHsGcXuxbG9!sT#J9$*tA?i{WW zw*fbGfK80!BZp{`>&n3CIr?@|ASr zE3XENQJG*dCOBA3TLDb&7t>nejswi+;pPG6@^F^`=IV>}w7-~^0eH|~Ogl&x%_ge) zHLLy*GBICFvjB->f&$E{4 zS<5v@wr4HhvzBohfSk`-)@Lo-vzG5!%Q*KY=d+geS&76qZ8ASefm^?U$4 z#DdkpT5=Qp1@HMG7YYoEVtGmfLj%KtMKEdESd-G4M58%D+7skSO_Jk6+Ex6#kd_5m z=8@!*kS8_qdyKCcf6(|tLfT((775V+@!N#`d~C)3*44(x@2Oiok&+b6P>hO@dKEt> zL?u8P9HhlTnjGY@45a=*9?C!z07L^op2)z4YlXM6HBgj2ND!-B5mkh`wh>i@y0;OQ zhU8Oysb?Eejj4B!-MRK*^=2yo6tNmbs4CQR7wDJM;dq35&kuQmd(RK~B(dd<^Kdu= zURlvInD9-*bB2wE8w{s}Jbfunm!Z4rNt(ZYwXZ~x;FD^8fniZB1p`9^Lw=;M7$)ZD z`QlAZ<-k*&AQcZ%@gNlsq8=dX0n(r#4GPi-APOVNgpei{zfDMA2T>jn<&pd@8wpf ztn}8J@>%)SoJ3Lc2~|WI>XSb#_gVJT%Ki_b+DiDQ;W@)b!wrU0LfT4kx(t;gTNZcE zdDV^K?F~vV7Eq}GbF_fM0Zjh_svcmr7f{Ur)4hOd1em7F|#4 zjo+T!s63l$D~#CDOMyu?G2Y4tL3lqf$?D9cmS`p)XDj1!k#5h_|0O;i7`h5+_E1dn z0|tO!Fer{Ecfi}kA)gM!H{1&(9utwW0&fb=2^s|(1XBQN3fCo|SmXHj3++6 z_NRETMC>JXk{seUl&=jJs_E1N`(D#$l12H92%m+0t^N}v3Qhgz)Mtg)?|ZHBJNsU{ zv*Ew&dyQwu{wiU7;wi_sZC{K3!$6tO_|l2>BZb#0RlY~7630Hb@3rPWJ_JYwjO3J& z-W&|MdF^ra^M*UjavxX$-A*MI%S`mZ>0t)k)Q)F-1ZIRr`$l6bF=z~^lH-6hA3 zaBxbc$G9+kO8>K!94LHVCPRUid8zHyQE%DMdX zl&jSmzMNmTDu3VPQwJWLSy7ldylZyX_V+b+l0|7mXRe#N)o-T<$wQ}V3P=Y_j8&V& zh9w%5E_6~woyD{iN+kIYj=DDQq(xKzL>~W1g>qa-sp0l3tZ@Ue!&2By;^G^#^6Rd7 z(&L+%6lp4U?Ttb5RO;6}@9N#e%8D_{gfXgR&!jJ3(j5gi{iY92smObadwIAGYlY#p zbNQVcsR6rGDqu`;aDhpmDv@22Hr?WxQWS3?h##??>f?Y4@^4Wa=|{%&7d(^vzfM^@ zsZfIh3;HbA1A`0Z89P+8|t+FH)muFO?aZ{y9y~OR0c#a%Y%M-j?_j zN`%Xk`+P_(dq$lMf8tFMB@}k1Z%i#&yK!jU*s{6O`}qxfe$(VbYb->~5z-Vuh_dEG z`fOg!2#~3I*Z;3g-sV#6R}QTaDRm)zqY7N$){JIfKqcS6M~V%HtW~c)rIyAc#ewuu zG)NP-gQ#CY$b7*HR_Ion6i`T+f#gj$HZ3vY3kexrUELTnY8R(Rmw-xB6f3 z3(M-Ar*v26xy+d%|HX}oFIM@PTly3qF}FA>0#RipAX3=$DKp;ql7IK~Ol|-D4>Z;u zdFE3y-`d+fIWu!VRYXwi)l-A+ChDgM=&I>Rq-I1U)r5GRltAhQRii23?@y_;61Ysz zXny$l#brjyKos>`BWRSP=~F%f-#k^JY7nNbrEAjHX+eC9gb}67y12=oolDqf3w3UV(@>D!A{Fjv+O$D@m?0A`@BRjUR3fmi_@&zB=QfXj>+VH! zw~tp>Ot0ADs!m!@c+>%42VCZ)==dt?FEl=RzO_Yr4M*g z7fz{L2fqlGm`ZdrNJu*vDuVryhp6=r221K6Onxy~qWZN42h#sREUUu_y63 zs_E_`11hG0K|V1Z?03}qaAMl(G%>9Kwp%&EOzw9K%98rtjnWB7uVxocaqy`@A*7l< z)$|+Um9Z49lX_O!s&BqO+%f*(ShZH0uB{p0yz>hdq9H!@ECb@RssCHIB2V3FS-{OA zy4CE!GD8_zJbWzneH>@PrPnH#&ajxO_fT`mpIJ|aXlRGXG)aivhjeR#0p*MoqsqBL z(KHAJ?lGsHQU8xOsu>}cI8+@0Qlg(2BaQ0`H8m+#?|iEJ?uTxzZrQn}umWpZLzX6w z8kdT0U^+`d&&pP$jFZ(XqVH;=#CvT3OT2fG5+vTsl;yqke}j7dP3l6hM0asDU8I7F zSVh;oMY}Q8Ra9+J*U(_U#z2_({hqPg*i2I;)_`HT?8B9P-+?cs^q(;L6HW^A^1X| z{mXkSdhDnXX&4vFfI(7dBcvh)5Dob-tqcE*5tp_Nbcb!ABU0moW+ugb#3GH7kzS2@ z04?uX6s|XyN2*yn93}(II>eqhk1FhnKHMvN05D~xyNFSGCesrEKRhHEsef{3uUYQoaCJMc>BQiZa*3wVJ z)xU{vILx$kysufqSp9JZkX`mzxa-OZg?Vf^c4DSPibmohBnw%1LZNfqKl!aMJvY7e zlRGy*{iWF%^nq{{AEHS*#OAY58~=A%r%@dG2t)={ks&`0B#E8SWTRess)U=oLX(kui9}meTw-36@bszfirgaeHtYnS#wLI(P$5Mn+eoc zkTYgiY>?h{BhQ|6X&DFP=CS8%uK4c$&ELOc=IM#rpUpk^=mUwBYo;)pL~5KomHMu6 zQ5sb}{3uyhBaI(D#i&wYrqb^+>&l1Ud!q+vE@@iirZea(nx@o0A|kt3;yILV7k|j? zzlhIu+@AI04<$LeRH)9&sRR`b4x0Eb%z#tZqn*EP&#Dk9)G65o8J?1?b*b3br(W!8 zQB}xLY${NaI36?Qfh7NK)9y_Vy#G1$WMbD?b$V@eYudtPO$l$@Xpg)xZ%1{78`|Jx z10k;0!>HpsKa(?i_$81WAuHt;Bx+wIz5(Ivu?#{c@l}coF)PL1*7PYop(5At9*?=q z1p9p{;`e(-wHlAlpWp#_xC31=GJxm-JZJ^t(LqdfN26Zcf&-G34ar zo|V%pYFj@w@${O3_ciy?x_vn}*g-YN=#%~91PK96spn$<%qa!!D=?{-Nw)#8j}kN_ zgC(MUdV~X9B4K|;6Z5`suv^U{RZiTe?hnd7Kt|0+Q7nuR`aT&3sbAv0V85h&OiUlp9xvh32m7} zYbGv8NEumtOZSIWUX)S}JO<=i?bpu}rfva|wnJ6f{bZxxO*H$9iYbi&qM3xT49ISb z?&m$WYBknsCJiDTg2XSGHo?gpi9GEufwwV|I2t3lf(|3Hi+(D*=y;zF(#uKt?n#8k zkKDIr*WNAbwhr(5*5O?pBF3KnO{8it#+hGZ zt@A0G#u1ZPzmlXE=jgc z1ChShKNr8>N6@40pl+iXs!W{bJ@GvYdT5Ol-=qA1nqV2>0;q;9h~^v+bts4}2ko&5 z7e2LvqI7eD%ceEYSI26#hrV&&iXZ%7!pW~OVvp=Zv)Y5W#4ZgMiCw7Su3vtpbh6Y3uq7Xe9zR+8?Ov`l3jLE$?H=2_dDG-%L;U z(90&*e8^UaX*{Fs^P*(b3+JG-TuRm*qrMFXCVtNf^m{kjN+rI=4JNe3BfQpue$Qso z_?!;IXfgyb;|w)R3AIY($aSd;=~H#}e-nG|w|?ulu$LFrt=tofRzWc4Ssy?lFGeD} zu1ZEV`;~cP7J0RJ)h&MFLD4V99yOcIjsd1~LZTZAq|h&|F_Jerv8>%pV(qjm=_ ztWxyn!c9NOhqwRU_z%AS{rrmB%+axLbbnJpbkM3QzTppSBGm8)!4mHemNfjqFKbzq zqU&zeuvdUOnCb%LT{y~m$P)E(B8F+1vM|j*5uQ2IF&3(0{}kz`lTl5CS&cbCnEr|4 z*1y74)UW!Zik7+{9ZFrWo&-@PLg+2}eI-oU9`PFPFK(XO{F&qbVi=Ml&^kIEc=MGL--(pG6CdlFcYU z8&dAGr)hk)b_~_vZ-g5`-B=8h{0APhCu=*OzV9hUfXyS-O)j2G(M;=o)`y8}GeL+H z8?R0J?sJy*K3aj&Bh?TCkzztz|ALXiKxiQw*5GJK1oeMQ*Tzu>Uc9J+Wnr6E%eH`e z9)rlJLo`2U8nn_-8bP=2lMgjiZH%fiMl(#|Xq0f2Yo&pxq6`S+zmm{Ub;s?C)~wt( zJMlvOrrO-Z5CtC99AGr3q(-$~u1DWd^+Lyx>Zpb|AieNXXjGPJv^c8hx>1-lFjG0| zjLoQ2q&GK^E?`ZI>WRFdvnjzBRXn|1NmD}7iQY(PuECoaZkU=srA}?{3T(Q=8aJh9 zVcNR!xkymn%yHak0a^yQZcJK|!jab(QLXL6v@8#X+s^$9DM{K1%sDVBT`%Y-w| zH*Vyu`nMIa!^jbb7`{Plu}{fG7&FrFASQ+NSgVo9BO5^38-8F389CyR!S`}HE_}>| z1V0L%MNwMXx#o!Xw6-XLHS*ab?l1e=e%YJO{X2grwRF^K>iduF`o;t&c+br2-ZJ+> z1MxVmnMCOh%jHSRqZj3wU-DUsmjlzCy1|U%hWys-mppdixFr+il(V0es(Jr?u(Ez- zxXXJ-{1OI=;(N*`w45Lq^9-NSa+0?iKCAn87&+oRYdm4kQESvzV@@`9g^Z~c@3vhN zIbQvf=7JQI(V!#Z-b&ozG`%BLbj(<#xU$;A7FG2Mf>lJTcVa~RTfyHaDqD6vw)6f2 z2fq2oW3z74hT;c5w2oG*|LR+fUqm1#e$j6zQ>z4#QqW7@38T&#ord@@?HGOuz%+x zn|WqFPDTk9uR#{7sRmTb zUJx7HAu`8Y{Y_1HU}W4OHi)|^CqDl$3BwqP-=3EsZqkfP~5n}74hdox!YmU;c_x74-vhFay-5-(d=AXc?JUAO_>zrXRiX(K+j-gVCwrDa@-zqf~v23)qc9<{YNV^lZ^ZXpE&$L;-Gi}S9 z&!S{HoUnsQ3Q?9@Vr^}fpSCu;L&FmHlBgcW-VTXji;1m7#=Pg3{F<#xC+pi>b+l#e zmTk*i>h6|p{$9(iP;JXCA8pGm$Jy-C+OFAe?b@~T{#}P(etB1I=jORhN{|oN8)>lw zli&Y;>3S3BxUTa|5GX8#f{8eC$D>$LN0m`45iQEW+pE;_^6J$qpa`G}tOX!odQrzB zWm|KcNESg_^kJX2Ws#y1X(nlj6xk`_h@Nfw;|q{P?>ymY!xCvogPGo6_@ z9$Aw!QD)LT$w}t>zW-g28dH?sfB(Du%l+?nmnzhI&qi+&jQ*lVX0rH+NQoKyvl}^0 z5;FX3s5<>+iY5HCp%R|V#qeAXV}as^m&G$T`A@L;;4CbTlR=CH1a*crm2t9*K~OHo z1#R^G0{#~0>dhW1%wFPa5E6!ctTeIDmcf$##EO%p{#uHZs|1oOlq1Iv2ut{Cc|A>D zc>KYqKK8+*_a8lZ`l(Z2KJ*l3#YgqMDxEG^{ap`VG-~eSBsy^#F5^I|)2^=%>B#C& zPO5Ooh6oPo6tJHdfg37sA3%|1SH{l5c`d$Y^#>T9WX}{!Ias*LuXgm=L|@wIYn}gK zznj39nE37$Bf`HK@QWn;(V|#D!C{f)_jg!1rDl*WgjF=d!5<&R-Pf*SdA7Y~;`57_Nqwq29L^^amj0u&KPeyY)=rv*5I4?5Miy->!k=LB@0 z-4kcdjQ-PCuE~~WXFbRGjtLzTIVN^Y;+UFa>ILJ?I>ssVoI=lUxVpeGPPOMWdUd-2 z@G>o$`c9wkgnj3r@3i>NLEowKorAt}FmMhAPEX+U1Wr%j^aM^%;PeDePw4c7&au$x z37vT8#6u?)IjP7=MJ_y%^Cxn8BIi%!6h_XC$SI6xi%4O?v42IpVrL+B24ZI*b_Qan zH+GK3&Oq!8B+lc+DNLNg#3@Xi!o+!;L~g7TXCrZ{6Q{c7RM(v9np0hKs%uVl&8e>l<3q`={&OqH6s5=97XP{mf@Ma4Go|`w`Y+=Bg zEnN3z3)j8bLbEqpDD-9vh2E@F=;n>*ri}%=&mQ;30l?K7{}H@6EeX%v@1l zG!P9%BcrL3*l3YyD3^%VMC+v9tf&-vQs_ycCxu=pX-|4RDfFbmORPegJZbW!$(JTy zntW;UrOB5HUn=~B-NC=ZxDi>e|DX@PbotU1NLL_Tfpi7F35An}EZJDHv1DV(#*$4S+Yk~!AqxswP{@KpmK0q< z0u`W80SXnMP?1R(6vCuv$^ap0N|tVdm#n!-*rP3lNd83DMa~&i$gK!V1^%&XOb=V#R(5p9mUf|0b<#}Sp@lRPci=fZNMt~ZFEX-26y`!9sag??$jDWK5xf)< zjNqk^U<5A}zPU;;<^m(AR1k&WrGh8~&mlr09mF4a=x8ZCq{W@Au3Ma6D25(T=mCWu zP`CmLHK0%fYHHv^OY;#FK7dM=^AS8(Ya~*^@PL;J#s$1oFd!`j1XL<~&M9Q1i2(sG z6$}V?sbE0BO9dm*QY1h<^acAA2`IkDR-LJ2fY3xL86fad$@qYm%7B1FMw$bL8W|bj z>}+Xvf=Ux-CwOV%>;%sx9ElWak|HC$q3PvZMUfP8lD1~KH<3y(@_=*ZfO90W!Z|mG zv0|6F3bJpUTq;Ei)L9Go0UmyU!U0k!MFIy%iwrCv2R}gJ$DGCw6n>DZ6^9WS>E-x= zmtKw^cq!!gftL!d{&NOsB+|t311}XEKk!n)@dIzJ@oZ6y7uG`lQ>8R>Sm!jXpi;^4 z122^vKk!n?@dIxvIgH3yZ#j&}NE63zPU8nEO&mY)geej!HgFd~s&jvshw;=s*m zbp@4A93$}3#C37rMvA@Sicy@GP>Ob-Xa{QTU+3a7bOo0c~Y1{0yUse0}3Oc5F@o}NFxJje#SM53TA**B+vy4C8SmZ zUC2NeD0G2B38*PS1eVlwQV9^-$Vih%ub7c`&(HYBE)5hB2-a$`!>|!{7$(?Zm|%xt zf*l5lJYwv?%N37ff{Z-U7J!T#(-we?{Ns#S(uxNv$2e8Mn`4|Y$Vjix^&1(f=fnXo zgT1U;_O1D3G| z&FYAbM2>SBftNzgpN>Wl6y=~O2eopj1`pMsPz?&zpite>V1Ys>sAL&k@RDV8!AsUl zcDlp`9$izNba@O18rIMyB+|@W;YDKF6ky~`n?j77X;YAqGcAHG?z*L&RGYJ1Ic1`2Pu7JQe@4Fd`78Rk+vZ z6D%1_uw)=X*FkGMh~OW#1D;9_A66+`>uPLBrP1R_ojGY_43LpW>^XRO#D0U9E9^CR zbQ;tYa_R4Clt?WjP3$&`q>1#6<%`AQ1 zM7PvJD)nU;8it4*5*c6z!Ar9iH)Ldh9qen)fFfp~GQbXkmqKzvYFBO4(>%p}`I~>8+*AxJibDRR;rIJ$syi{@u z3>0fnv;jpMQkX)5rl4pFYE5AlJP*rAV4c)5hzK&A^aEiE6t+pNNb0#Pp-6feV(`){ z3?U=+3^DlPyBscnK%jQ+fkF-{%?vqssbt8(OC>`NUb+}$@N$kp9w^A5lGWFa$Vi2j z9%Q76AqNjl*nwaT6cV6l3W_$MXaj0(2tsBsz(`;l6t+QO8~X}ugW^LO|DiL*7rrW$ zW`+hWq?w@sFU^bzcTh621)Fcjc9PD344K&6Qh z0nfRHL<$)S@X}&qs zq-y1vKc4x+RRUFJMJImb^erj7$aBAK#we9>juUtp=Qx3vIgZnc#)(vhd~VguspCGs zRAWvZ_h;2{e^wp$XVr0kRvq_e_gc#ZOBR)=3&cNkC>R=hWaJjd9=zP**n^i_I{zbM z-Vp*TfWYD6bBs`_C6T38DLeXffHIjQGQbmRNMw!x0WWg|2zVO;;shC)fV_p4i=XatHzplC!2t4sg^ zB-R$j!E@O_0v|wOoD@ZC(nV?+s6+-TLHU;m%(vpZ0!%)YLMiH?6cpM(p>17iKp{^G zWlZ1(6CjZZkcb4jNv#Dx}3OfP}RoayDPXU@Fkyl2j|a05h+ z>pc@_!Ggsl_@41+-@NOyB;!Z%%ZPg?|BZ2@TaGK;JN0Q-My|V(_ZHK&>iAnwdTsgC zn|F=9m6Y--Lpta#TfxLJHX8dn8>!r7-6f8xb+mCemh z>yNqk!l2#jb$eC3aiG19_n|^^?ajMJ|EBUkfR;$hzxO0rC6l)+84r}aqx9RZJ7TRr zV%f2xn*9+ghl_?2Z~l<=S$X8HtlW_(-HV#ht(`o3>I8Q9o!Zj9AKKwZw~WV6LDRXs z>5c_Tu?YX9{thhaiw2^hXe1hoCZaXbI;l5nG*#k>N`)sCo>X{J;Yo!j6`oXhQsGO5 zFIiu*zGQvL`jYh}i*qdv_)_6Zg)bGsEPKM^sQrE0dK5@yAeDhs22vSFWgwM-bOq8C zn6A2vgz_qsiqNwnX$qw&l%`Oc@O>=Z3Z)QVt6Qd_MCwDS52Zem-bi{QDU75rlEO#| zBWbcfSK8eRr zUApShRhO>1bk(J+E?sr`QI~8zQPAsBQLkBsn!H(R0%Z|?Y+=+g)Qg{8ScdBHiwmPx z6f#yQW30Sc+61+#$g1c8ew1M}-O$Gd90GI<5ClZ1OaXJWV6~gp(~be>}|}PxHsq z#PLGKQB%d!RPi)bJWUl(Q^nI%@ibLDO%+d5#fv1Xso`mAc$ylXriQ1f;c04k@fE+U zux`;OO%+d5#nY_tG%GyK3Qx1b)2#3`D?H5#PqV_)tnf4|Jk1JEv%=G?@H8tt%?eMm z!qcqqG%GyK3a_TvX%={z1)gSsr&-`>7I>Nko@RlkS>S0Fcy-PwED`!Bt`D#5k7k9Z z>EP7~H(&F>*97oqIS!y4I3$FgKg$W=YXbPfx<5;=e4!iE+7iFM@C&LU!Pxf*TwlQT z1zcai^|kPWT2m>+*-33VV!rT;AM=tIetqHB_xVeuD^bTC$`_t};n^3SeF51QkU^zb zc=m;7UwHO~XJ2^swGH^%27KYy*EZk_xW0hv3%I_3>kGKPfa?pmzJTisxW0hv3%I_3 z>kGKPfa?pmzJTjT{8-0-JF|%$5|DiX*%y#~0ofOjeF51QkbME!7m$4c+1DEH3%I_3 z>kGJ`3WMkGfW@aqe|zVPb{zrOJ63%|bb>kGfW@aqe|zVPb{zkUt#h#$h; zI33mq3D3Up>pd>!7T9`2*04T69~V7@Eh<~6!s6lf5A~=gMA8w=RkN4gy%p9 znLsNrp6;SHL~tMk2SRY56*v%L10gmLVgn&I5Ml#CH4s!m<&F>=2(baa9HG`g6BG%z zkU#(q1mHjb4g_FOYpe|(Icv;G7T9lkT*o+MDwWGZc_5SrLOFgY&sw285Xu9g9KW1b zxwXGs9|-$_upbEf zpi-eJfcpljQ+Jd;-D}foEwCEyiGe0Spa}qK6$+f@L7;hn8x7c06Clt82dgHY2T)HDb+4MK0W ziqA@){6lyIKCR*-Q<3449%@MsH3dRVflyN*)D#Fc1wu`MP>Xn|DG+K3gqi}Ora-7E z5NZm9KDVr$e_Rl0NmC-!ln6B?Ld}Ixb0O5~9SZ!Rz#j_yp}-$%^$xXqhg!Wu;TpGc zP_u9y3fFj(hDwO2P`D0->rksV?)hN3a1F`<2?g&^@D2ssoBf&BfEF-}(5-cOZG7>B!Ez6OXtC$ zNEnQS!AKa4guzG{jC}6C@3&uT8->S_Fc}Gxk(T917>tC$NEnQQ*{usNoWK1IFI~KN z@e5m<7dJOAK6mlyU3=beVe?$|XGd-yoxn5Uhk14C6Uxjr+Ie1Wbvw#U|7ayG|K}aH z^1KP8scVkA?(z=FKsAH*V5h2s8`le^xbq7S&%whJ@VfOvnfU6CZp#+>m(qNxAxBK< zusStwhUf5%lA&0(S;>d{iwvGrl;*~xG|fABHl?!te26Et8{Zjp_u;A8DqqNl_|u207%GeH z!mxYUPYA0OuvUymcL_RF*22?z#PqsWx3FAD_MHaAW*NYCI@)e9IDWtX)$_Z`pMU$0UauQOJkrKG!A0D#beiI9QG9 zG9E}^4q<&V1|Z_1Q(ao=a@wFDL_e%qpjV5DfI)zDWYh)|+@!Hs6vH<+w-X4l)zU}Q zbg6~s{1-hOn)FfwBVz#uh#AMu=H`n}-2dX%smD)lo!C0|*7Mi>!Pe8*EX<)bfWdVD zufe^-Ho^yQi=ebzhvB8xmJqFgAG`lvQaN?J;w1LOw z=eJJ&nCcK4L*DY)tGTbrNz+^5wDz7ZT5ab*)ct4?Q2!>)}B zoG5lE`mRw%5jH(6uM*qw51?fHph&$iR&yq+qCEi%$tE7PAPzVyb}5wZ9crn|s@mVz z3Nzett?z&W4OTTlvud#~Lon^&sbWQxG2TLh@P|6vs=yGm<}CJD8HdxCLufmg`QE|Q z>|j{TMQtdB<4|JJZ@C~~unX_B|G^MCwC~(cY#iJ5&yRlL$eD*L=hn{s_!H0FFJ%qr z6{F*W?Qw#GEo51>$61L{G*8R7mMryPps297sR8Xk>N{W0srN=I6JbI&*GU`^1TpCqDMX)7L)rF|Hay>JFed zD{O=?_GZ5FOUv!dmQzmGRV^6s6KjLDT*BQ z1<)Me77HP~Lqh^?PzOS7lk=sEiYC?ycD)G~*vqD!T9uU}&FZ3g#3BO~I80qS2Tn_t zJZ;(xfJRP-q81O<6<6T_o6YfLC2^V!1Fj}#Pams1^mOI9%@;S1p5MLs{MoY)KBFc) zGl{WP@kuG;w%P6RcRApQ>9cVg&2>-c+P?QTo3K#W;m zj6+lF0X*z>3isxKBMam5u;|FnDFy9XHjtHBGfZ?ZKXLY$1;Lm>F~N%{*pY$GP|amWEbIuJb#A#` zq=>j{W-BUVP>hk=1*vTZdvkMe*@Cd<#&@bGL&&?(X_5oOJffi=&J~A3?%Kup^ZZc^ z!~mnJ_Ql>t1noPs`S~w@;n>uJ4;_B+V;e^|PFn!6%(?CtBZB1&^Ww7S<_X5r+G%|+ zSe~UVGR2nC)OoFl!-4?_FX52V40!}wR45$FBp+O1e<_f!jMN!EQOZ%Vr^6AYdwl+_0o|}63?hk(c*Pr^r z=N~oMIRzM!+#Zl zZ4pcakziy?oMbil;G5bt&{rO4G>c$S=w<{9ZB2Xw((kqgoxI<7DiB?sm~0Ekt7|SG z^oX+-D%}9!EGEc9qs1G~yeJCfvs!cN7IJKE^9UNB^ zoW!kQtqAVaE!kq8;zuv4#;0bbTpJ5j&32)bpUbTb%b+pZ3rw*c=W?9IU4MD5x7nbL zkK<{m;m(TZgvk(sZWi^e28Pu+3aA&3y1AtlG0o4MIeqr@6DQ9;ef`woPu;b3YYF_%nw z`dUd1oO=ZO(i(nGH0+zH;y4D3V@#fU>A92VY7cH4J+*P}XO0{>^5DOeG`^=RhGlIB z$7}dHrN4@2zvI1hYQ%zyK@{I@Sy3@pxMZkcXKlhBlUMWQuGUq~MRR7gNcDjevyb0- zV1YEP1sVWzQ|6Wnd1v7)ZA+m1oRVU-=a^P|`BL6oQ*p;axLic7Sg;l>luN~I(dSgc z(RzZ@gk%?rVefX1JbviNgGU}ediX)Saqn{*2Cy-kA+17cZeC;8!UH=Y-KtUtGL~qK zYQZsfy~QO;sW!zHHVNfyZ$v9wfLc{;jjgJNwZ(15+Ub)_t4gG_a857_+A5Ky&qyq8 zkiA2BO*p8qp(UeYd_F8Z!GNknTWSkOQmeNwq*_?Qhx<=&9DTTa^uZ%XA3S>E^ZY8? zn%L&s!VXmQiJLGwQCKj4ig83d6mmE)$738<$yRmhyb=v0OgWcqEcpo96GRU)h6kXv9qUQ8$oV)lA!pD*xbq{ESsTq#KDF}xy_?7F^lCmsiS<14MWh*ROVc81H zR#=v^EN5BHvV1>X;vaJEI>Ddu;-#|Xkv7}3*{01lZORQ8jdF!B858QZbZ{k96kHW^{3wv(@N(hH_@~J%yCKvNDV|T3EYb+ibVZdfV)`<-rmi zSfc$U3M{EN=+FxO8{UT(z@iZEzb*6qw`IQn7Th)nUNnnWtdigjv}Fu62wr_#9>F_n z%Y0`obdlg?wq?GMwv3@AhZo+K0Xh)8@V3ks*`kf=@e6&Wi^!qpFv$EsP4R-< z^2mNxwMg)mTp0TskoA!&PfFuvVChRIy@(YdNMC^O#&rR?{ZtDLJDi zgGt9C6UohKqw@ELe;oT&Hn?H*y#+IQ%rP66*|)js(Z4C(w!xo3@u$s7?>e~ot3QqJ zAsd#gFlj5(7M~84Nr%c=#`r?STA4^{GTp9Zx?QVVH4Uq#AysaeDpxcV-?o~z)mX-O zQ(*FGQNfyLSJEM9XV6!=C_10w!N-RVJ$T~IgAblOedgj*PaJuajjXn35(`T9p)IP` zOwJlCDU(@Er%2~JDqUVIit&nD85R}UFPf1)Gya1~(K(2)Xg(fX4nmpGT>}TMwnLIb~pOp+4d1PB@ zyK%1axIJf96RfH-GgOd|wDHw^aTG$k=-)5RzLa{{m!meey51 zHm43>dUoT!3tKC9pKo8-f~Rjdt#%E(UIp(LrsSZv!^6o%#`c>1Yh5V1W`3~^I>6fy znz!OT#B+JoO4e8RRxwC>*`yzh=J$4~KNtaX79S>_=?(GSoW}86Z5gGMR%c zb{Pb`Tn2RfH?f)ZapvpeoZp=jQE`S5tzyi&UFt=|_7koJQ_bd!+%jHl+DwUORCRiJ zA0NS?cWnI{$ifS;_$mvoq6{xBdg3HLE#O;EEh->Zp3-|XU=)m7U^q}RpRQT~J{S~= z=ZhwoC9sP&%RrVGoCJ)S=WwHDg%LyAD$=zg9rWokDi&6YhO2v<&5sV6{FgU3x0-|I z;LMMVEaB?oTEVr73*VUzk*wodSO($6c5}E|xZ1cDaHY8R;>vL~aAmkCxJI&wYag!p zWxUdYDXz|%@f~W~kE@BROYsh_9y7R7>R=+-qcd=H#vGl2qce1L29D0a z(HS^814n1zC@OHJxc1`8aW!ydxQ4jk=nNd4ful2pqe#Jv6W1S|`R{))I`@7^)ReqHT56x+o^a-REZymu(O>S)H;4HL^5(Faue9>k z!Hwq2*q~mEd^Yzx;4FERsMO^{hs(Qchx zSzqrnS;dvk!|tyk*u6Hit${SxNeB7MBa^EykMzJ?^XxlHU^{#zcY7TZtlN3{nyKer zzGkes(rst>yh|1ZKYdNgOJshYYzL(L1&}T;l2s8fIWN`!?gv)x8?NMsRz5I1_`rR` z!3x{vYoKjjRM(a!&^czPqD}d>8Vt#)hTM?thiVVX_CpotY>U5xfl@U(3IC`h&E-n3 z2fp<0a;3S;u%0a?ol;Skgnpau?%bVs4*BMWiI4L01RE!e%%LF@G-ZOuOlr-8 znAP}qR&-?Kr%M0v4?lowV{HHEtEAxHKC^qgI`yW}xyqkClcfcN39_c``wH??*WnIA z5}za>2}vT7m?R;ok<>xFSrSTkl<+9wQNp8yM+uJ-9wj_V_>}M|;Zwq=gii^d5(o%EhB`FhMjjGqz>Pa3(11I1NT9(ZMsUjw2_$f%4G1yf5hHjqDhVZs5sw)0 zh!Kw%!L2kTlpscM9}Nj5h!NaGLqZ8+1h>wR)PC4s+(?5OVg|R-kWd3L;t?abS%!of zh!NZ+)0A;Z_$C zXu!QKB#^+3EhLb@Z7n2_@QD%J#sWf&;2stdN)RKsfrW$;#E4If;C2-fN)RKsSA~QU z#0YLo`QiBDJ{8tb12Ka~)RRyHF@jrDNT`7r!F?$tlpscMQwj+sh!Nb8LP7~*1h=D* zP=Xl2y(lDVgz@YkWhjc!7U~vlpscMe+daCh!NaOLPCk! zRpuM+DM1Y}gBwansDT*4eIz8*K#brX5)#@ZMsVi{2_$ge2ni%`*9ZwDaL)({Byh(F z2_=XT+$Tap31S2{g^*B!7{MJOB$OaVa68Bk;{kVvu!b6l8QdB|LJhq2=2`wp#(94J2FTp zL5$#b3=&EZBe)j>gc!j+7$lS+MuOT^<{NInU=1}8GXd8X+;ss$jNqON5^5ku0%8O= zTaZwK7{QGdB#^*u6(o?rO%)`Nz%3OdkiZQUB#^-E6eN@&MnYl)_f3#cf*8R~6C_ue zZ@7hmHPk@N;Qk2`Y9K~%^8^Vs5F@yAf`k&p2yT-AAx3bM1PLXG5!@j`LJ497w?~jr zf*8TQ5hRo#MsQ;U2_=XT+!aAWiSR1(4Yx?3hM2*f5hT<=jNrBi5^5kua8CpYZ4x86 z^MQmC#0YMCAfW^?f_olFC_#+ih6fO01h+eoP=Xl2y$&RlAVzSb0|_OF5!~hQ!}AR{ zJ+Ouvh#B1RKtc_~2yS;Ep$1|E_coAFf*8S#4J4ExMsQaH2_=Y;kQj-G5!~WH0twvU zKmrNe-arBg+}uC{3EbKMLX6X5)mU2F%l6Y5it@GBM~tY5hD>X5)mU&c$N7U z5i=1n6A?2J*OiDE!COp7sDT)Xh>?gGiHMPi7>S6Hh!}~8k%$?gGiHMPi7>S6H zh!}~8k%$?gGiHMPi7>S6Hh!}~8ktn&!e2a*gh?t3pnTYF3M2tklNJNZ8#7IPp zM8rr$j6}ppM2tklNJNZ8#7IPpM8rr$j6}ppM2tklNJNap#7Inx#KcHUjKsu9OpL_D zNKB02^)5e*-+9rPy;a%6C*J(5)&gaF%lCaF)-NKA~x#7Inx#KcHUjKsu9OpL_DNKA~x z#7Inx#KcHUjKsu9OpL_DNKA~x#7Inx#KcHUjKsu9LX0HDNJ5Mx#7HuGmHCzsGYK)1 z5Hktam4p~ch>?UCNr;hz7)gkcgcwPPk%SmYh>?UCNr;hz7)gkcgc!kFct|Kgj3mTJ zLX0HDNJ5Mx#7IJnB*aKUj3mTJ5?p1zCB#fZ%p}B2!gVDfMiOEqAx08nBq2r;Vk99( z5@H1J0sVTE@#A+Gy!JjeKs2^6%rQ;JZ)%Xj|y&OhJz=R5y= z=b!KV^PPXb^Urtw`OZJz`R6tf%7MD{shjS!1)t6e*))E z;QR@kKY{ZnaQ+0&pTPMOIDdlIi&yCU3!Q(V^DlJ%h0edw`4>9>Lg!!T{0p6bq4O_v z{)NuJ(D@fS|3Vk9$oUgFeC?V&`A%{EMA`vGXr>{>9F}*!dSb|6=D~?EH(Jf3fo~cK*dKUWxN3asDLE zpTzl-IDZo7PvZPZoIi>4CvpBH&Y#5jlQ@48=TGANNnS5rHRoT=`B!uP)trAd=U>hF zS9AW=oPRavU(NYfbNLY{&G}bz{?%N(>dv3K^QZ3osXKq_&Y!yTr|$fzJAdlV zpSts>?)<4cf9lSky7Q;*{HecQyi(_1>ikQcf2s2?b^fK!zts7cI{#AVU+VlzoqwtG zFLnN<&cD?8m%4a0oIefcPs91saQ-x$KMm(k!}-&2{xqCF4d+k8`O|RzG@L&T=TF1= z(|EmjWzN6M`IkBWGUs3B{L7qwne#7m{$fBBKH0$suW)iq;a@ge_?OKV{$;a;f7xu|Up8C# zm(3RbWwV8U*=*roHe2|Y%@+P;vxR@zY~f!vTf{4K=daA2zcP3J%G~)YbLX$joxd`7 z{>t3>D|6?s%$>h7cmB%U`73khugsmlGI##UUhn*sx${@%&R>~3e`W6cmAUg*=FVT4 zJAY;F{FS-$SLV)NnLB@F?);Ux^H=81Uzt09W$yfyx${@%&R>~3e`W6cmAUg*=FVT4 zJAY;F{FS-$SLV)NnLB@F?);Ux^H=81Uzt09Wv_Ss%G~)YbLX$joxd`7{>t3>D|6?s z%$>h7cmB%U`73khugsmlGI##U-1#eW=daA2zcP3J%G~)YbLX$joxd`7{>t3>D|6?s z%$>h7cmB%U`73khugsmlGI##U-1#eW=daA2zp~dme`W6cmAUg*=FVT4JAY;F{FS-$ zSLV)NnLB@F?);Ux^H=81Uzt09W$yfyx${@%&R>~3e`W6cmAUg*=FVT4JAY;F{FS-$ zSLV)NnLB@F?);Ux^H=81Uzt09W$yfyx${@%&R^Nt3>D|6?s%$>h7cmB%U z`73khugsmlGI##U-1#eW=daA2zcP3J%G~)YbLX$joxd`7{>t3>D|6?s%$>h7cmB%U z`73khugsmlGI##U-1#eW=daA2zcP3J%G~)YbLX$@_0C_JJAY;F{FS-$SLV)NnLB@F z?);Ux^H=81Uzt09W$yfyx$_t99WQngcmB%U`73khugsmlGI##U-1#eW=daA2zcP3J z%G~)YbLX$joxd`7{>t3>D|6?s%$>h7cmB%U`73khuk1DFFa5lt=z}|dW$yfyx${@% z&R>~3e`W6cmAUg*=FVT4JAY;F{FS-$SLV)NnLB@F?);Ux^H=81Uzt09W$yfyx${@% z&R>~3e`U?6_@&6m#zy(*nKP9~4~~reclbq$C4nWOC6OhGB{e2v595a^j*$>cBt}Mp zWF$&P!enX(ZW`Wsa{Tu0wvqf+@3xWDh)*scS?%67(q>`%Rur~b z*uFKqZDh!@A7? ztL%ZgX%FqDJ+zzl&~Dm8yJ-*YraiQq_RwxYkpM*_P_zO?Gf=bxMZ*R429#~oAJBz1 ze7Z=NE~2=6k=?q8?=j&uBY)8P!9wUMUt~8gvYQv-S6@K}g(N6ML17dWjX~K9`d*>$ z75ZMG?-lx9q3;#?Uh$|)9RQ^+bpVvQ)PZTA5%5tr?L*77kD6&8TBdzynf9S&dWlVz z^p)`nBd&idLl#nGi6Q~p1XL5SEKnKM1XL41%|IXlMI%tO0!1^>0bo`BA~cl0h|2O8 zQCI#Vs>)x4hVmDoq5MUL=8HtY7wfl;Os=5ux6zBY(Tlf1 za9*y1q7f)t-9(|AD0Gvu6uOB*H&N&&_BvscgiR7QN!TP|lPmwmbnrg-K4hUp;NbKu z84&q8i`7OJOb(QZ0slbB6nHi_9JW|NprVm67`Bw~|@O(HZ|623uE z3c7@$uMmwDqOn3WR*1$*KqraD@?WC1{FkUL|7Bpy;ELkR1%-uZUH(h7F8}2k#%l7d zXg>K?G@pDcluf=B)f3-F*~GU|Ht}tgO?+Er5SjQk4D_^af}$Q24M5Rinz5T^?4}vJ zX~u4vv71iVLB^`5F#)Bh#sn12K+z7A>eXRTJ);vqq2CDi+GDDtIZg1MRwYpC(y9bX zU0vzw3d!tTg`KOga}{>3@>ZBy)d~cPDp1scq7sy~asiaJT2nw#JAFHQcsoj`Z)Xp0 zR~dVFJ9~ILdf2&jI@0(U<8#kh9Ugptu@%}+C`Y&*t+hpS($(Q_jLa-_-F`V-o)C+| z)iw#Xu9?;DL2EM{u#)${@E;0Sp$TnB*rxM=ANLyg1(zkBCB7wrC7~seC9x%mB{fUx zmZX+6EXgcsT9RARGDj{`71zvu&Ft6Ae$DLH%zn-6*UWy+?AOeG&Ft6Ae$DLH%zn-6 z*UWycC3|(VS2ufgvsX8Jb+cDDdv&u{H+yxnS2ufgvsX8Jb+cDDdv&u{zj}XDv!9y% z)a<8bKQ;TQ*-y=WYW7pJpPK#D?5AcwHT$XAPtAU6{cV`NhS_VFy@uIqn7xMCYnZ); z*=v}+hS_VFy@uIqn7xMCYnZ*p)%%;7{mkrVW=X7)4dZ`15G&0f>&HO*er>^03^)9f|PUeoL~&0f>&HO*er>^03^)9f{`-rwBp z=Vm`Q`?=ZA&3z9`?6{m|@(W_uiTx_W&iP=lcUSjqVvzM5?#Ox(zFEM+`)%$DvFMe$+`?mk$7q^x(`?mk$ zcehq(_HF;guWhZ+?A!k9+y3j@{_ETR>)ZZ|U*<}{_Fw!u*K%gh_Fw!)*9y&^?Z5b? zt`(X++kbuAe|_73ecOM1+kgG5@4vq7zrO9izU{xh?Z3Y5zrO9izU{xh?Z3Y5zrO9i zzU{xh?Z3Y5zrO9izU{xh?Z3Y5zrO9izU{xh?Z3Y5zrO9izU{xh?Z3Y5zrO9izU{xh z?Z3Y5zrO9izU{yM)%RcD_Fv!jU*Gm$-}YbM_Fv!jU*Gm$-}YbM_Fv!jU*Gm$-}YbM z_Fv!jU*Gm$-}YbM_Fv!jU*Gm$-}YbM_Fv!jU*Gm$-}YbM_Fv!jU*Gm$-}YbM_Fv!j zU*Gm$|Np=Lj*JK(w*UIJ|N6H7`nLc2w*UIJ|N6H7`nLc2w*UIJ|N6H7`nLc2w*UIJ z|N6H7`nLc2w*UIJ|N6H7`nLc2w*UIJ|N6H7`nLc2w*UIJ|N6H7`mF|j(v5$3CL8Yc z!(T#%kBrPeGP3`Xk>y8529J!i9vNA9WMuJ?k;Wq<%|}L79vPW?WTc1MY6k_?K3b4l zs4k(H*#V@-?`VSZT7dBQ9ebe@Di%@D!-b|0K_QwiQ5p~1LcY4Ry7BCN!_|$WtN0fV zSMNiMDw-`--&FcNwoE-R3m`YVtjB*yOm)9r$jA!JrzY!%j4A-|fphr@UO&cSz$jNq|9$99#^TpSr; zHIz;KEgDb!Em}_eEt*dJE##K-9v)m$O<~5d=rJ5Z!T234Mu$LuW$BSp{0pBQ{~VIZ zW0oGWWc0oF6vdYQqa~$jm6oPQ|6y|FP{9vyYh&ISKuKx3bhDDYIyxn$?U6@UV@0G~tdkb0Q zDU8IVN8c-Py;&s8owbL56+CT7N;>Q~4A>)4?5R%Bn3nQ9iLY_!v$VEVnh*~zP}#nYv~ zD%c%n;uo)|gpG8k!4;W_U%WE2D?fBU7UKm7dGOOHZqJzwl1!E>Z;c&3-GbeJj6fi$|k^>i%-H*BW8 z1v1OsCZ4@E5RZrSDaB*cI@W9<&E{3Kp09Kl(7eCUZBVS2uJ?0m+*@0pF9vYzj>+j~ zmUI?#D>Dq)MMrp9^Ju51yjy-W_xLGhT4 zBd;#>)`km3g6BFe%Jl@C*1SZoiDyxLqzt1Eb@M84P=qS4|So&b5|)%BFV=Md}T zg^2AzKRxp5)IavC>_>MA@^~KgO1FtVU1qCekFn94cKZG5)JwFq$W~+j>d33(AK{S0 z`uIoW#+UJul@lk1pZ}GE2S0b$i4)Ilvc?8XjWjyeycjBE#*R-sPdfR2(y{+UI)0I~ z^c|}yeaGrb-?7@#cUXTHJ@`-bsG0Zh^lSDA&#YK#0}rpd5q%W{{%Yw9zZ9KjF$g2~7(XUQ? zi|Klb;Td%EPYzC=czD;Y>p%PlU)wx*>B36~zk2tv#qH<5VQn$(Ud4#Ms_`se3(vca zwMFz`eJNj4SNel=fVuSQ*zcHTrj%kj_4n-^e*C|-WqIs(%ra*d8tr7Nm&IX>Lro3M zmEqD_T3t_<*I;I;eOWJNY%w{AitggV5Kj_V;yC>d`#$=8Mr8E+#Wduw%i`$w(QN>1 zrN}{Yh~=l-)HI^lyD4TWEbMrO-j($T{OI=!eRyozzzK4O3C)G(oqYGT7B82-Mz0Ok z==ZNuU7h&d7Y=^*#Q8mUpTGOq#=#R0-@LVTV)LP`N9jpr;!UH^5n`j?A0OLcD}Td+ z@?=pg432((@>iIjJipKVurso=#Z!|HX3Q-q}H!v_cLCj805bu){r6(AF!XJ!!*K zdh&|ym!7;L0H{$2N>5r4R9S?f^rQs>sX{UO{nC^2j_xcBTkZZ*`l-X0PY2|VyvhX@ z%j>InstB5O@MaGr%js}oZFS@R%?q0sUpTY5^|6P`XCK;l;rIW=&DW1T1JX?Uc{>F& zdI)6d%=o5~$q-5T-O82Ut=vM2Q%ir)Mn!+Ew}fY6ERc8WrRCO^0j4%yR)U%xzH0J# z{92luhR;4`OUoZolh2#hx1vqF!J)KH9jJY!{iipdt{+=Hap>t?$>y;SZ$A6$AA{EJez@N@O)F{da;EYY8G$?=<0rot zfI~6ZDU>d^Szxj_+of|V#e??oOeRbMYm9L+X7bOaaPrTwBjTyG@^1(ararE6XJs*8 z>T-aZYh6%iQpoUpTfDdi1q(fl;7*u19vq4Z4u9#C6t$PyEj%bottU^a2pCAQD`H5N z+6xNLa*oanjP199Oyf~GC^}cUPNKbZx$wVyuL{cdn-z>uv)%dFi|1~?ZS!O2@4kEY znPcmhp1N>sSAFZ(e;(f8odfh2udFGe=5$x_bX0b2@?^pAKsGl=D}u#b4^Ak0%4*ov zzXC7fr_Lymc42#=YxB#Q9N+-AQ({58vc&b@A~#W;yl=V*(TOJuW{pVXFqFR~L*;J( zzsvjlHBoB|4E`ym z73LQ5kXu|+oT%Y`^XuM;&96OuXzR%Rn-_oo(o0|agO@J7bfA3V;!B&GPdzsGX5VazC0l@DW9A=RYB8nW0Oa@dgVS1b41n!vTqCYFUV?=-REbu_+fEj+Zkd=pxT z6Sj&KiBeifs?*J9AKzH}?U&A9zj*TE<_qU{O>I5B=Pl2F8ZDab<;7xAn*3wUx5+!mY2__S;2P1T6&7oSLI99c*s9>kZpJNM{-^mz*_0?9*@#mKCf)BN%h4zJu z7k>ZTo;|ytKX+`;+_CFFymjHy`F~nDU%B+?PMgc>WJzMEEdAT%UKbCetBzl<9LAzu zeQia$D%i`+->C%e4_jzs>BeVHJXUY&?;u$I z#+A9rl7@Rn?PO_+2ZYH|<-f@YHnp`Kzvc34N-BR>crkS?z7ldkXNDLw*yo+Jw;vDo ztK45YwITpr^f#EG8YRn#qLl#)k$}v zisB|mt8{AOYZ!CPRI0I(v46dgpWF3@uRQ$EzwpA>H;( zoikR-+cx(KUWhB5s{B6_zicrPt0=YR@ik6bo%q_sFR!FH{@}NJg#$&wPL7L7%Y5k| z9v#}6x_f;M>xwvj_EZR_?(Q%1$R$a9O!-0icCid1-=l%Cp>xI*FwS4!YJIh_3vUrZ5p zp~z#sviwA={5@`3?1=b$4$o9mMyt&P4jDPu1{96mcO^ghjV6wgOWom_^SjUOiH>Y+ z-2eQ!ijKW%JNb>uuNBPH5ix0RM^U*|2&vCYKn!Wm*VXz)wwy;H*wJE&S3eVTGo$Yz{e|() zlQ&;~NOZh27Q^gq{d}&|1)x5ZVQ=Afs%+M6rkKg6%Bg3GZ%J_T^(}6^1Jo8w^--(~ z%|FD618+YV_E6El_eA;4*Z&2|pB(*z$vQ^qt|I+OOUsumt(>!T`ev76>*u#Pm2n7P zx*~(GF3$le)$-qs|NH6b&wl8sPftCFA5VRG>$3+xgl}zK)l?aNsA%qGJWezWv-Y^fUyr2pvoLWJ((71YoGbeUSM&?6ll_R6#B@dPCmY|ak=tVsel~3Hy-C$U<8<82$*0Dm_!>JSS&a1I`z`(;k$qOb4L!II{2v7U@c^W#FCmNSh-l} z%@!$C;{nsl1AHg3#EIR>5eAnTd+X>I96z~0w{94{vC&;cTDs0stS#J!GZf)#CBBbz zeZ9@WLdborpi_S7IwYed%Y8*-@fD86n_5^E@yD~Yz1T;@(j|-qDL>I7wE7+bpV9Ed zOs&89N*m24&U2bhoae;B3peoD-KA9)O?*YwUm@Qb_S5S44LIj~`0ic1ZpZ((Cnx4k zfB6fSE`9aIhl+YOl!yz>0FLLE+;F}4Y)3j8bCpCjs@D&z}&rgm`-ZSLzdu@Cu?BZh=f8%-t zX9@fF;KsX4z5g=W@GS8y2`mXMi7bgNNi3;ZQn$pLEz&}dS19rdOYftca^V#LP#CN@vLvyjW{Kw& zX(5XqnMS9%XpSCOdEHWYY9@EajI&xFZ^!{xxdvqFm3QBa zuVisZ<~;=V&s3DI{}$dQ0`fhQS4b)(_mDgQf_I64%Scj^&wz~eK*nn%F|*$#`7>tO zZtSl~K2GvYl0R}5^|A_>ExkyaAm1Z-g``4q56J@{un8_BNl88f0-G<=CP>WecS-(? zS+;{skdKpmljM&M-14!(p~tp9dEr}|7Z$H={mPlmJ2#&hYaAaxKK9>^;}t9`d1rC0 zYB{{$t7Gi+job6Aa<@%-L&Af$W3abR)OI9s?8Xsw} z7V&_BIu!TtVJpio9bfAX2lK7rBAda)H!mH>yD|CYjWzA%_;c!|iMR={S}-7_veu+1rJmbednz(>gYL1O&&76Bo8lKmX;2 z&i~kj&HJ}bKYZ$mV?Vw1et0*;F$likLp-)R_9Vk+IqS%!}*H z)`lsx;q){&j|zO$S9{o5<)1d_9fG9L!7UuF@DU1%`Q2$po(?T(;>(UsyFYLGN1qz| zQ&=CSy(WKU!6BUbjdthUp5*99pWpoQubtW2Jb%kGoO{rj;$%oWIPMMcL8BPqe5sM+ zt0bqf*&R0E-)cJGxI@4K-0pM;!Kr`1c;I&h_~1@WaE34Nf}8ts9zr)bzJ2@^olu1L zh|cmdUixt9I8Ho;e-r;3dWvCE_wfD;oOMx;Z{P7dhQcL$Xu?;ArebU`_ESPDt+iU% znDGgT@K{V6z>uutk30wy*qui@;nqCfhEiG>-#7j*`R1a6dBrgm$K2zX2MPwOLD4vK zOzN1=keT?ILLnI`BqN1nq>zjhf-!|GkwP+3Nai!2oEUp%^n0SOh*m`J5q&_FBoADE z^?-42eBY0xxEi=JTuod#t`@F2T=Tfvxc1^&z_o~L30DVK7uPbbeYkqK`nU$ThPYO6 zt>W5`YYo@>8{anrJ2S8|13NRYGXpy_tsnMh26ks)cLsK6V0Q*~XJB^*c4uIB26ks) zcLsK6V0Q*~XJB^*c4uIB26ks)cLsK6*57yl{X2mE9YFsMpnnI@zXRys0rc+x`gZ{R zJAnQjK>rT>$l_&x-X#`gOSn3?y115c?Zef>)yFl!HN>@oYZcdiTx+;s7yUhe{vJSo z51_vX(BA_yu#5g3K;I6aZwJt~1L)fU^z8upb^v`lfW93--ww?5=)mPoXb3y#>jCuj z0Q!0WeLaA_9zb6Ypsxqe*Y~lrAc!)%3xYVa!yp(@b{Pb*Wv4+9U3MD;@nuIr5Mg!| z1Tki3K@eqj7X)!;hd~f&b{S;#TEhjs=-6)bYd5;J8@<|%{_I9i zcB3P^XYkULapA|s24%2;UhPJwcB4_$I!qZ_-?i{0qN zZjLKE0fH#9Du!!Kdk2CjqJPdOm$DF1-OX`k zw?Pn9b{qszW!FIvRdybP9&udRe-L`aaox>vWgI~05yzF`0HH@5S4IGY9&ucEb6gn- z5PHCIWiUYK0ml{b7!DCkj9gP=M|?3rAoPgi%ouGWMB9kaHe$1l&}<_z+X&1yqOpx& zY$F!i2*oxcv5i1%W1P3q?QL{;8~xozcel~oZFF`UeceV^x6#S%nT7_kAj63vlO!i;k<5|I zle9_pk}Qxck}Q#QNV+7;B>PBuBz=+r$&h4)WR+w;$r{Nz2x0<44-gX&dVrXKw49&z zqQ!#;SxAo%BM^Fo7=h3u#0Z2QAx0qd2r&YoM~D#!Jwl8?=n-NBLXQw55PF1(^U*tb zoFS!8JkpTTD;{e|85tgJNa+cWH>C81M;y{c-bH=oKb6uw9(+jYAP+yJbdd)jQaZ^) z5Gmc{L5P%&@-Re7S9u@;Rrq))A}?PRK^~1*B3~6W9*9^XUllhVi&(kkVHkGe{Xn9yCblD~}qa^o@rNQu@Z@1}Wo+1IKIov$3I4`pP2*sN%{) z2YLCbxbob=68WmQ^60@5`Kq|`^uZGOs<`q1!V>waxbh6b68S1%@)*Jr`KmbbB*GHL zk!KN7jkyI9ha-);g%Y_Yzb?D-A0$vLAXs#f6KpNM$O#Mz}8HUnAEhG?ZE3^=S z&_ZhSr${M(NideDb(gE)a5A{dkXb<3TB={9iD=Lr%->VVA?6v z-6&CB$-*Ws)_#s`GZVGjC3PziP#in4c zDb&d+7-|YunnG=yLS39fO`L)yreKCC)WIp#z$xT@3bk(v`JO_ir%>;vQ0u0ux?`~( z_6p<=Jtp#yE^ zqbh1t^$99dC4d@LQKKqqR7H)d+{?*(Y2GU53!ab7TV?$g`b#Ff+d%ogaZ>H6%HpsT zlJPzTAMYC{G020)D``Vemk>Ngz>y?0UP&9m_cDST2sk{1#&|Y4V1&kausHOD#&|SI zL1Q>!L%==&XuOg(gwF&42bs_qyqD&!gw0mcW-DnkP|yH2Q+Lyha5L+pFm>K@0nG-+iDz#Ni4O(=S3MGHPj4mNBQ*?u(A~J+sF#TI3 zV*tvo6oiss6x1Xl=nRToo@zq>NNl9SI z1|zjx=>t1nhO>fT=Jv<^H~96;lwVkEp~9N{JBz>;-f^HSH$%hMy_HT&`eX?muckV))C*7uaN4~OcbCP2EXJP0v@ zJY6C{%^**hxYZEyl&Qa%f=?Qqrby*!Q@0eVJaOvQ{F!o<#!UDof2L^p^r<|PKU2E$ z%t?4HU&z(`v7HmX$xpdV<@pm2Eme62HQ)qp>BY$?ecQ6VG6K34Ye~zq$?)Z}mgkd< zvcXJo%Tr3eDR6BkFLGYRn~4Bd!xEe)c#9xFu!&$0z{U^}6T$=-pJE~yqi9AUJTLGq zfdPS=1P&4+BEv+P0n<#$%R{)F4xp@0t)(o&^8()z7!bHg;20L~M^P z?Av$b^wB-1_tYG;Zn9G~2VW;}J|}P+q3t2G`UKwehUY$n5oq0Hd7N(<@(t@9_g2by z!+(t+yHkmFmMnHX?8afCObdbV`5&2ES(P;LSgVO>A3mNsZA2&E!PISP zCS@>9+(GX?rY6X`Y0!Pny4vJ|rz&cs!cf!5OaSx1J>IVo7*r#~3v)?f3c*>cvJkdlJ z`{rV058SBTIpfltBI0IP(?4 zMLX()2z7z*4bI5&4H{YCFEwl-O}Rke>%RAu)1%At=8S7;nWXsXqLetTWg}pv5+<3P z93S6z^z8A<}U>4X8D?f-oIcFHLy@oiRdYF5xmE~)O~>weQ(duq)_5(G#T}oajWki z`Wb~Zo*3TwTv?t{o}?ERvOOA`w6m=oLiO+@O=d(&xtOQ@ev2JncpA8slKR@4-QTr^ zd4+P0PTa?yB!*#8y(K$_V)egw|J>`>4Xs&w7i@ z%CerB;gQyw$-wr;nuAfS3X6Fq4ewv5YB0CfxnH@jq9mcL^EqZT%8Cpt6jdZBiAIh_ zL@>#lhM`srL+q}l01>yo<{-`mB9pM2#@}XQ!7VxQ7z-mEy4k9sfm}wYY46)Vr{nJ3 zM~|F4_tJjf(2*C0MkmL`S51XbD$(|oF*EM}Kibqn>gGZFStzhVt2$}4U_`b@vDH_0 z(@fHQ?ql6hN~$PATKi0$eAu&!e%8a*0&FPEB=KngX(whe{K&V4;K8>nj*ZMTIo8A4 zM8F>3Qu=j8Fp+xoO$T|n{1UJpR)wsG^JyH##{Mf-D<(kl&Qa^Iezz0VFEJbfmp(r7fOV{9jwkxU~$Nc+%uKw><>G^Lo(7|)(4 z2NMEH?W4t@}9GiKR0o#%Zb2X=>G7)LO)Q4Faf26)gE z%)aT@9>VYy%am!RUm1MU8yfj_gWa_ttcTMK`zs;EN=hbXxCLzZ$c}z{%a(_?ZoPN( z^h0On&%bKx*!+uG<&7wqqpaYJpuRHira`rUYAnt1>!wkr&hjRod3pV6Pz#sMwb)b) z&PG-(Vz9dwglg}6&hTuHA*?>})KV8x@6~sJu3}5b( zkdELv3eNU3y&uctzVU{6O)IU>EozxOA4pi*A!=Y>0ZDf-o3k9E8-1jVjUpBvUz99M zfdy=usM&=53CXNJYZQkD3(r9LF$1CI)`##WElyj?^^YAzID1B4(*P-mQ9>B+F7ie z$AF@te~tCa8vh#4hX~%T@n1>jm2_SSr*9vezI|{4yj|nYh0~o2C&1e^fltF3_%xgV z@V$^Lbw$$294I)Q0qY?Aoq;NW)3|0u0-b-`c4R+xmEt4i z>-tI~&-C{XT!FaWJM53wY{e_%O6=hO#5A=`DaeE$w(1L(Fdv&0>SHK3_@i$Am*)}Z zftsxcJil=Mx@PN$q65~m*7q1hiI0{EBqJQpe+c2}^?nn?I@Ci#e0hI#=J})T(F+CurX0H$?5Sc*A#CbZ|aRi=ot*ukru+M>`PhjF; z>!`9B1HPhkhvue3#8@4|^kjqy#E?J?>AhQ+Kx6`aqQ-vWfc4Y?6pRwKQzkyH#7Nm| zr0g|U5zpU>9}xY3=;!%c@v}FZ zm_pEBh)>8qDpZrvXZkn86#d4-#4!@>%|@mWluCkOM#=@R1mr4#FhZCxT81N1@PJbA zBT}G%raVVj@FV4d2VsJ-#90-=D1quu^_RqLP%~h?XA_nOc4$ zT7EoG%B+Ht zNb$UV#X3z2-I&?aZ~xq~}Z1q5oIKQL~dMq>)4_&L%-01ejC9 zpARf&`18hvX!0QUY<=?h{6&lAFS_yg+`!oUEBfy~e#@QPYUv4+65^dS64>Z{iIu40 zb;%tG>ndhVSn?DJOG_D^oz8dF4rIfjp-0LtHH&c>GnWI-mZ4TZ;OTwc>6PS!k`SOo zuv8hPMGwy}Jg;a_5E_yk>L5Xl13sQ^>-VsRpicm`VNWs;zOGaR!q)9v92mR2!^>e% zcqDjIF`SCyMEHMm24N*nH}?RSf--!QBadKVgKrFbpljUqy(9gT-#PmdUa=1rKKQ|B z`X@JMYCM-_6J3C4lCBYyPb8tjrKtDn_WfR5g1WHjnj4^l=xSQqy%#%w;XUlW!5Eku znC945+K*u1P!2$+wV)ix^Ro3l44H_Wje-!={Q%rwD-#RgK@LtwDq(#MG2AtDtwBEr z;JO-TueTK-cY7foUjf@Vd;P0wD%VDm*f|}9nad>mXnQSN0z37BaJ*|3m_=?ohu%TF z&nnjz5wFJk3nb+Ig=f{oy4^!P;}4GBUHIS=6Q@V}H-8=MHf(>_{mwO>F)`QJ0d%xb zih^POF)`ZMh~W~GdjJD#_z659V``eq`G-uQt&OVx19Im4L#`0zgr4^u=N~$-SzQQp zqv>XKz_GtvjrTBu0{k^V5j(Ft@mUX&iFV9`C0~SFVpBda%m> zYr_}PhYz@P;G-ZlK75No2z!oo6vBznj~CZp0|iO|Ied@@hXSvA_H_T&hqpe{zvoPH z{(Jh5*PL1O^a_dgzJ6B7!_wH@%>L6HOR$dLGNGHz#V+r zx*A&p<5_WF9vD`F!=B^(9Kin$>JSl&_b&h2zW23-v!Igr3M38d!}EEUbFsbI5dQz; z9rFAbJnxV_XZm@)iw_JA{kMm4C?d}f)5SXx#(D+Oem#r?=me_pTCzG}Vn{$(DQx3#UEf)N1R2WtcD9IzpI=M>}hLPu0Yi z{^dEGhl>k^Z#Cgm364M19H0latr?l7oW}$A*&oKi6SPVG5UM(lao~6Y z9rh9F_4z|1`6B#TpP!4QW2GRY_?Iuk!Hqm>W=>xQ+S~W$oO?syIrr8HtQXiIuu))> zz~&k~Y+2*~j{~0I`k{ko8|!O%mI8k`Y7X{@Cs^Y-*AeHL4iI>fVJpwM7(QAt~_{csT8n5B3>1P6Htw^%I?xgm&&{ z%3*xJn~CoZmP4yl&0GR9mXx!C&$OCMX+#YI)eINk2=@GaMaA6t{JXeZoD~(&G#0G( z{)h9wHPocukR;b=KbHD-^E&Tt#>VVFVv_1L}^) z;y}J#0D*rc_-8sl3*f(lR0d$Xzyx_K0Xp%GIC_l&c=rMX&I6=V6zjj0?w=F%@FM`s;*!pFp!CN~Cy_bi9i&O^}fg z@vN|O+~m@|`#?YO+Me#Up!CN~Cy_bebR|e(@Y> z>@(_Je_-GEBh!_T+;|&dheULXq}?kB?X!f=J%@yK6`}VF)$U)C5ba6$MQ{usp`_k; zaUtK^_whkTz#Dp)4zxNHum=4y%ogWLnFVjF8*>wS6Gnq zmxA!61;GY&s4Jytxp8$Wjw(tj$|+i*C}lkHicp4$6H}M{J54RC&TbPaZ&E1ZHsza8 zx2vmU0vl&Woy*H?@^PF@%}c76EH4I^OUiS+4syuX*%Rg3$0*n}py<4!w-f~wZBjHS z$X>3ls3OVxHP)kadG1M-!jhMz(QsM3nba-VpRLG*0U`u>f*3(3K|4VgL5jfs9zag0 z{{U4YRU}wO&_RHlt^HJ6G5)E$%IScaq)!4r=idnvOcNv#e{pdV#?wj?@fTJvL6C8Y z{STfmPw&0tKE`~!-6~at!vy$62%-crf_8!ufhPx$B$Xn_5ah+bx_QQ-&M`AYzt zCkDhWEcJwHH#sL**jt?cd$Mr+3d5d#|+<dLd~s+W1=?%i$-K)uxNNq!J>!^)qzrxp->A*i5)_evPi^Hk*#1BL{O-q z9wAj`Qz%&MR_1l{|TY7KT3l-UR}t|U^xCu~H6Mv#TD z(k%>Zf`n>=5lt-=3PvKn@xip%7t zxg}W6OKUKvY1Y*SbDC&f2%mA0xJ`Az$`1$?Y6|8w*?Lf>Tu{I{Q!Zd8Ozg&B(nxFy zCJnDSm=uY%Ee*k>u}*V6NXa)0oitf#1XD%A%2v_@Kci+VkVJRd?Hb`ZK(~$jOdmSNC{b2hq*hY>VtWsVnZ;mv0FpHn>cU*Vr6fX0$-TE>k&8@Gi>%Q?uIKh^#7Ii-Ieu%=)@u z$;hk^mQ-eKYp`T=L(WPnvjI^|9#M29)vdlE*kE{A45|Bs~(>_=QP&fkI&`sctS$9w7_4C(b^0-A~J-wqD?DREh-rWtq7}X)izKC6S%cDn8HVtB_diK z{}~lgN3Ir2sFAcOY(%9)l5A`W2D*g8;{~p2jphb)STkD(vp`OGa(wdWG39Rc_F{9RKZ4G85m0GlV88tW_Gk?7b2n920hT1yZdZFcn zlrySqLt_vN84?ohOqVvKAv~L1+LG3yS*u(i$)oBiRohH&6)L2u2hC`zwGj=WXdyt$ z1uq6GSq@*zrwpM?>UHW8`FPYiGl|pVXXbEy5AVV`R;7+Rck2g!dI_F-LRHYyqA7(xZ3*i)uytZhQEYugyY|8JHUbkENt&$2ADw)OvM#Qkk)I&T)CG+66l5grNn$=W^brn54g0~0)1e*v3 z0qo_Zq69Dzddm-Px%wtF^r@T@iUW7hgip(wtGv@+TH7L zoTlGux~KwQZfzrug+Y->hQnRNU5SKX3})c+j}vC%gpm$G7(2irBt|YEC3>2a=L>`- z4d;AASU)Fp9}*74J^qsr6^&>` zIJxD&WshuBUkQ5FYQjv7WEE30npI3iN>-~(RxyhMxtJ_LRyiX>C|RxIKno7xFsoI} zDrRm7nAIxHDyCtORj`m%OwS;z@Q~n8zRF}3(=*5_XL1NNt5p@uD&}&K)hfwqRfS{~ zQ%dUbpZ!}N%&)vQQ{m+hE>q#IW=P39CYHiUx+Jfh5VD$>)1cR}n*Nl)X@1#ia{t6? zdM5Bas|nvg!i5A_4G@{AZy;C+;JlB#N>aNE-8>rA zEjMvves3#%K0%-Nllo_X3ziFLwFo+`jx~yU1lju)d6hY?u3kYGCM7Q6X(87L>>;%G z3-k(STmI08cyr-@89?9%G_byB1ix7FoOG(ZcmARLX_n<*%3LmU?@wMTl@- z;9CL%0yha9Bt>n5e+8z()x^Swi~;fh!4}_X)pJ=-onpLg@Dk{m+CbVImeWG0Iq+ zC}hDXWnvbyV3f1)QPAQ>NfTd~6mE;4)9P3=$#THovpkS>bhN)h!t{tQ%3tDpHSD;$ zdIiBg;`d!ezg4EjLp`7$DuM*LMqRtqwSWkpfW-tA=2RB_#A?S=Vld4}Hw{TlQv!=I z>83e}X;9)v6^l{nR)c^trXWwU3qln}@gG5?Z(j&}y!=HSJ#gLAbNM_6KfitXJa^mW z^D39Z{f*1#`GWYph2P_s&-4BSeh-1Z33wmejrjcvzc6^$;P=nqA3)s8!8-=L1ibfx zha+76{rIf_FAllyTt3gfl_}Y`QigpiW81g72fUfpUTxqC1uyQQzkJ?y2HQ@Jws*uk z0IAG&YO)0Q=jb=xt$tqr%KzY&vq)bof>VYO53T=cITan)fF!O zdfe5kf&6}`GRy?+VWwpdGcnLe2sG6lW-5S427Dxe@JIt3Bmx?;?O_VHhb61SCY!+S z0eAIXZqI5wP58MzJ9l0_&w8GDw4P_0*7K4}>v<*(2&-GqQvrBCBoJ8z2WkL94M3=Y zkjO3|ltD-+13r|oo@bW9N1nllVn_^%fsb;qo~L@&^I8_fzH4yWi+Y(qmVmvG^}=2# z<+V@@vyipJUMQ-<8`Z>K$ch3;i!G$u_Cm4SLb2OImJm3!-a@h7Lb2XLDfoqA!-ZnQ zg{(XXLrX5C@{k7Q?S*31g{($Ug|=PDEFkrFP;&oG<=1{mWncRxm3{4(RQ9!BQp?wV zNi6}PB_OnX?U&RNG)foYp(Qxb5)j4(gmK{u;{w9C2m|9nCbWbvv;++;;R`L{3lk%C zXbA`{;R`L1`a9)UhgIW=S;PAxEgz1F?*Dx8?MTwXpPn##7@t>dq+nPg0MEf zW4-ELNz|z5T}9P`oUHQ;_<83E+9B|Qb<9)22rY&5-oG>U(2#O;i;)I~McSpCbOpHE= zP7qsb{OzO^%9kLNPa=~nbOGVhz#WMg2!AgCZa697`{6jZ(j~t5rZ^|LH)96~J{6q> z5yoU|R|*8S`d}Lm2;cV!j{MU*hY75C?iXuo4GtMxXK=m24F)$F+$4C!#2cyAcq6qM zZ=_b^jnrzqky?#6QmgSsYil*5Gf?D>8abmz&ZvIip6-=S}*bH}O7i;(gx4 z`@D(wc@yvRCf=}#H*De!n|Q+}-mr-`Y~l@@cn3|qgC^cV6YrpjchJN;XyP3-@$NVA z?l?*5{T5dX zi&*1nVbQJew6N&bcv@JuxF{@%uZ2Umrl*BNx2C6sL${`#Fmg+r|Iv~cLwcv?7g zYdjRrj48BHk@U5&ZgI7+h*fVbEV@;1Ev#EyEi7WquND^F8cz$0ZjGmfb&IQoMXd3( zu;|u!C@j%O3y1us65F-W(fb05N8Yx!`Hlco7Vzk<1tAR<2{sABV3QyOn*) z8a<@(Lnq%uM(B&1U6R7zcFb39%sEI)t?w)O8C=N$Kq0ojRbF5qXi0GuYV zhM)()-cR7AbDUJKov>b9$^-sCwO%}>#8T&6wdZF`t%vq4b^0%<4!p;S+<67&X$XXh zl8O<;3EBwS2|5T81j`6I36ca|1Sx_vfqv@n@JDYht4`+a1X+R{L7t#M&`nSzSWZwP zSYiFhzTN&?P`RZm0iy!5OQW%+#TC(Xri2Sz^1+kC@Cp+|2%-crf;d4NK|4VQL4sfz zK_@|ypo<_ykS0*Q>+8)&7J7QfyqzFRkR!+w6bQNriUi9EN(3v0JGzIvx`%VI;aqMw zlj$BVMo%q;M_6D)pmJhD#|5?tY!}!eFd=Z6z)pclfn5Sq0@DIB+L!+5{>zof0uCyTA^C34zN5b_z@i>=Kv~m=>tnx+fN^FErG**XLF4?c$dem=l;6 zSP>ffo>v8DP7NoZWd>U3@C!rlV)P@iWy8_ra1_t}a9cJWA5O%EQ}N<( zs(m=!Ih;%9hV3h>F%vc1of|$K8}>Zy|FdDMrE}O?gWq^({{x#F*2g-_-S3KFJcpmQ z&JW`u6to9V>8Gu?hP|6J!@PLryP09`_#5ts4tK<|!+|B(6g-U2V&|}bIyam{^(l13 zhH*JBKHJ@iW!-FN@FaxGBtjWj|I7N;d~5^}6j2ma6jKye)TXFiQHP?0qGgIY6(tpQ zDM~3yE6OOkT~St1PElS_K~cA&qN3%BN{UwW+@?kveL!B)qaXQ32QJGU?&-wkn8Q7} z@bE&ba9LAbLqv$iNMo}S_0i~XPb@BuhImU4fXI#w5Y*))Hzs5+*EWhU3$SfDLFcYu2Db_sWYOIUJW?W5;jm=BH zDHjmr@r8MixWp)v@7X;*y%S4xv>%&~kNguR1?F%QAYIE7ggGu=wksVCCpIVN_P_AV znyu%?zcaql$N%l4n;(gm@V)v}ybbrfm8_%gc#7M-gVp#}e>RsZqQ+Wg5e?{=O@k`z8^Wq+>5)@N!&Vvn{kt! zrC>2!^8Vg^HeSf4Gx5c^#uYaMkrBr55oAti>>^mqFYaeC10Qyut)^cLpUlr$Z+bi@ zi?{~}JDZ(Dpq*9L_h(pvqoA$-5QZE&5k{cJgwb;-677(00cBNNuc-tO8Vm#l0J-l4 zXvcX4CT6dH>T~^vHb3@Y`_TA!+u&r!$g{V>*lF&`ho|qy12JSM8PD&ijO;+nD&G%* zd})Kcz16<2l|e3*E`oNq17PobD$Hw5v1un23kL48UaMRaOSj{;t8}D*t1;!5{2_jm4P(J(LXsa%tZ^UOsaH>+n5J&VIi zIG&QrrMqxrbG#!FN#t%ji(})|H*!$w{11la}-yh#M zR&(4pI`QDRwFVbrv;|Rdpi37nwL~?T*M>`)pp-LD{AP~v1U+xMS9TS0xEwXzmB%%^ z`Eylpd9wwqY5A6HB_i&;e61OuUwf)Gs298t?c1bWQ z{5x<)EaSct4R^+|)BRmshMZ_mIgbBhxHv4_h0FJ_tsR$r#tj2^S+sYcUQpQtZgxvY z)A*81JZKkr$5Sbp#=RGrbOr^DM>&Qq4e=6FN3G4~nFfxQ)s?(qw@9@^EkM#IF_Rawt=Dc6xkT6rGjrm`B?6HXNUVy)Pp)70?A>>k&K|!rd1m3*17q{uiQWi$t?_&W4PLMhMawo8C5QWrg6@0R zx<}Gb2R1D5UK!j41AFFhpIRQZ@k!jKo#`m~ccH<7{;WE9?WKZ&SF?ro2u^mrnk(|! zy{z+hSbc`k+tW!FhlIwaL|(auH#Y8|Vf0|-tx?2>GT4HMgFoJ#c-bc7C92kzh$my% z$^>6r&U6BP{*TSLGB~@K4@XdtChwVW439ZQFbAxfeA2vs62<*{NC|g2qLHh1$0LPA z62}_(yyCQ7wt#Dx(kXW$aQSqX06CAN&CLhh3Gd4&XVkoCI2rW5j1~uuq?iU6cItH_i-|Iaxw38y-iRyORGnM8IIcT3>_H1?MtHb|S zWvU~M$1@*J;(gJHNoc-jpg%5hW(D@8I&krHGVqUrpgAsFO7J=)Jin+5`henK8!Dij zycI4!#s$w(Vnw(!Tnb|sqqHoZt^SPvul$)ExJe3cru=}xzCAnK_xSb<01OZKhH-&U z9J=7aLo18oJ=cfc7+X)an5ewLzI`w(+TyUY=2p*JPARmw&QSFRHyGS#a8un4bzABl z%r1y5{A9zzXveh020GK<$|Xwit!8?ptY?AXCUWZwA&y z3hkWv=X(S#doqk;h``8^zq{k6j)mQ4C+}YOz}U41dY`zvZ>Rr)zndpw*<`rB4z_%( zGl7-vBlvyOew(=RL$Dp5(Nt$TjOG9@M0{p0o9>F^5+*k4=nX_r2DodSO(k4tb~BKJ z&L}&juOYvH`2=3_z?x$a-oLh9L;kUA4GuhaxK0a6OXmH%tk=*Y^NB#1bUPrj3Wy`M zMCdfo5TJ^9?0``1>B^w0hKUil@{$a0(Bi^J9`z=X!S>Hl>pX) zL()<*2mbkotReu)vzT(bDm#z__7>o@ZH^}~adA|Bv9xK+;^->Jcn-G{m!;y(fUWSU zgxlr&A}$6){|}jogxg`5Xa{a)%LOa1!y6J!bs`G#*=-LsTyy7p$HzW+CJ^h`{n>%z zm%2;%2eEQK8Lx)yjmiHZU%aK-k*`c7syDZOYMnl(!I~EYeVNL zS#m$rSHyFPKH{)jZ?*VSios*Q5j;1mPjyyKj~_b+kp!>=w9j+A^MzVG8%>KKJRJX_P~I(xRZr+I`u z@KN^>_BC<8C9i}-D~p~sYFiTbjiPlBzQ84f?JcDie2DqAyo$YGRl43l@Rv7hDYPV;J^Dmwy zn1z|rE)DC8X9?cl(h>12K{sQc_iLUd-g%xSzI*W7Ej<5y{t3pZtyhaptD1s(DlZ&A%V~sw*!&1PBfw$B{u2?pF?JylUn+=RaKi3a`V9 zq4R#}p&XR*|HgeAwJ6?(K_>fD7*OV-?oIwu;LD^M*52{-=+;FI>-g-oCyJ>%?RRMcRgAJgcm>7+oMkTmU=p*1?$r~=Xx@IB~h z@jaMBJWF2&Ttky#E&ODzeKsSdDQW-V|~zYM@{p z^>tfCU$=K2eqZx-J3sJsGkDIe#R~@)Jfnd~CcDt}^fdS`<_&e(J6B%V@Wj3AM@J$9 zx7>7W(R&X~Or9NY3oplu5xr90kcWW--*0%OWeRvUqCs>A*n(gqej%2_7z!c*r!>hcyA#RjW5PfG8sIHCl6zP#B!?rKd(Nq^Iq$y_qFQ4 zXWaJ)fe(zm2cKztt$LcE7tYL6vfJUk!T-13K}pP@VD*Nb&K>rR-57q!$vixwv3S`N z<>GzVIJi>H90jW@i|C`WuLT-Cc}ze`FVp`O!Yr3@U+|U z4Sl<;;_EXMb}glgI6J!Zg1y>~pXhr|qPqt@N?x=Fg?8(Ad;OQXU$$NmgpL5tm*cI9jn^sbs5&ceuDqF)RV1vuBhW_S z<;sV~5Hql1f}sBc-Y-?}+Ig?%MuB-u3xB`I`1qYIDzJPJ>UD0U^F>xVHL z*3d@0OR5L$=RI5Je6#!Bu6Pf}ifSm+Y|CKc{Hy`q8to2gbX{1{RtxlJ8tNCafQ8Y48JIj^W#~xx23z#sA9H- z^BCxf$2-Fqns%l6VU}Deg}x3p^5ZRvMgpC5yH{^ZjpMaZOd?7LUc~W4*xq4nzu-Ad zWVI+hZJcWi?on*-H`r_V2)>NNz=1IhRC&E(Ua^^4th9W5!q94mhm3R2MIdy`H!iFQ6!K_L=7}!4`?|BS( zAM`$)Nnqer!amTr`ytPp=<8tMi$*esfhNu|b<5q=c>N^fxOWz}pXM?pUML#$4`OhF zagjnCS&QJ}Sm_?PA7Xa_$#1k@WOMClz!lhWUSS)o_FdYm%pLw`yzv2oiF0f6$8*VB z7Ws>lXM0xO(u&oRAy0!RZ+{X~cW4K@&|Sn_5t{U1x!L>@mV>JUe<#hayWRH@ryA`7 zr)t6CgO7%4`rVWp@ICnSM)&a38##oOH!@q1r#H5l31D?)FvpvZWHEX5^hQjbVblG_ zz#(V#i`sX!-mzb-wl8I`pcIe&=)XCoXZs`R+Wsi;th4$@2t4Td1^QrkUB_b*I`AOA z%!{wNI)m&o!!&lvUbJK5UO~Nr2Glh`7ie~`K@%Y)`rCpL*-i}&HLTooZgl_k-KG2@ z|KQ;AkJ^g{Oo#yqL6-+wjZGZ*vVA$R`-y7*S^(d>eZB{XAMmFY;6qUH92|i^ zT|VIa{@Rld^j@29zvhDj*ALxr%l>=+d^I~Z_-%V5hI`>zF|BRxH{1u^Z$!iVtXSR? zcXr_SS>L6Em>Y0*&>^HJ9{7PLP9OFya)BTCE){}T{l;}_#CcOAy$Rc(R56Bvh``r4 zNtQ0M^VNk(<}S?R6Z`J6&?!@@64Ku+wPq-qEmM{MQ@>m$`py1Wr zUwN}kRK*tCSNi`JeYK_jwNEcombecM?p3js9yv5p-eb8gx$j2mXNY zbXpzNEX{P@(|N;kaT2XEH1K)n+LwjD%I~6*VZH{l>9W>>vvtcb`QKSe2lM=765en5 z0-OJ20`E^Oz9i6sVYU$cHtX23Fb1ey=~6h6m>gTQDDn8%3um_{Ke+A8W#>l6ZizkO zJb`tNC#>z&=utcc#2##p@zN!`8g8rT`LQAFIeiQ?;|e4LowHmoa&zhgw9dBk<#^2sMGzkJPlExx>5 zwy4I?^tD1Nj3op-H1OYEO`Xsx@!o7O_+S`6l4z*VjDYX`G6Uza?jVC8%gccjm}&+g zm>IbJH~QY^U1bQ*U44NFhSsy3l4;)(&I<6hatgmExoG)V<=*{swvK;i;_=;Q&vc$U z-Gjj-CN8ja^zYVdz7J!%0OiKz8rne=#{pM-Hx17@X1g*Sz9sHFn#BDu(qb7PegWe| zNuUiT!u{Qdhar_n#aktwq4>NsA6T5is8pu`t=HI1$0h7c)d=fA>s&S%J_a_Yk5;p` zV{~AaGCXfG4Lr1IUb(G_Cd-lBjhG;jK^g^m=X-t(v6z{WsXVUQJ5OMq#h8-BAJi%F z(pn|tOnHm5i2MXf-Y;t~l!5#2AYn4X!y5)x&c7L-fP87d*MIlW-qDASKB6X|uXhEk z1&F%#S|P(r%@B+r4LN?Cdz=E_PC4HKpVSctH=aoE2`e@N?OJd-S8L2a?`nZWJt z%T&*h(kPVKm;)^Z?Oz9edBIstBww2L&-c8Eewlpt1C5XUYxx`-#A0HC1rOB5b0TQq z>#b_*-M!8{t5H$4^{TpvUNoLB8eh7V->AVD*PA!7g%~58gRvqI>hFR!hvl0*MC1IN~BwvBXqd-O8uHzykDtFt-=o3H6&Mjn^*kZ(i~KW`p-PC%nJ8VF8yqy2lJ&U=`IpP58H2-QG_VeroO5 zx)<$9>;A7RJwsX_US}L*-VO9598+ZP9JAjgh}iFX9}irxBZN2JGx5DbvAyx|)_d06 zk-YcZ-J{;$ptpp#Df``cfrYz*{=zN89V)BM6fx`cweKTKzptwkhiREZKuoqLIc=Nj1%3GbU z5a#iEbiP99|Elv9|5wp3#FI<$80mCl6_@vvKy$P&c$)qR4JxmX$7nu=F*}wmp#aCN zoB*V1?jAxezfT(C__bRLfByIvSMGn$$fJ>obL&30Zne7y?|)1bW4;@$7SdrZ;KJk% z2Ets|$cs@(91eWnKDdd%7dqr-(YeV5F`IPIIvZH;$#VdYgQZ#T!&vfxf`Jn4NH z06i#t8i0dWOcG*!C+NKtBb%VzY?vnl$DLy*aWH(b5ST9Fa)TEYymJg|*BInuF%<9C z_7?`;JK*g-)AZQs2k-Rktnk0^rx;#$@Ru({GMEnF;GYW~EJZG;rn_>OpK3?<2aDk0 zMAt?P!s)WEtF|9=ehyo4k)1_xXsP!jei*Ijzgh@OsrSzeno^dx~V+RI`SP*8x z1_F?p!@>dH?HP&6S{hFQY6i25kv- z1z|+hncJ}fk%xu29t0=mW&z#(0M2JfEgj|Azsvlz~c!)j(t;D4YR-?%(G)1;+&}gmVAhzmpM(85Lti zE=!9vsm7op(J@M{d5K^8BVAiK0_`1$`V-UL!$8jk;r0Zr!rBdup*{8 zy3;yFFYw);Dmf4sYd$kSaQud``K{*;_dGm)_UQQJ#C;5L`?P??hW1`$9rJg=&j&%% z1X^13izET}cK=0;dYyI5kc>j7gobkXnj}`D5eBo|sv&llU?GCKVvx?|jgq<;(|fg% zCPZ15I7~U9^QD21pJ}2jNG%p`FQ6AhK|d6`|BzO9f7rkjlbu);m?k7V=4~)xh-V#a zn0GPetSgPO(=yoI!AyL7e_wu|ldH3{23*SpwXeQluLSbztCQu&_1=4EI!uamB5^{5|}DAUMs3Ft9sQc zOpx+7h97!ifS!LL-0`~oSGeJ0y{ES0}X8eeHT2(&U=noS9y-D@*ErU9D}N! zWAFnT`=T$gc0obGs}zkXLNFx|REhIA$soZMuTnIo2*H&=a3#*;xPwX;yh@#8iYQ&! zcdy^O?b*FU2cI1JVzeE*M9-hLMh0w@h~il;Pp!TFJkC3-cbS;&W(YeXVKi`$1cv%1 z66f0?91031Vn7&R+=Nst+Rbo8nDbL*%*OOsYTbMazophN(6HTWjqI@3>$d-jirbJQ z{&Cu#llQ)RWFmi_(7Or!{e+mVAC?JzK!gNDNOI;0y_*mrhsUh94-E}*6Ldwz&=AK- zN008>SzTpaXU%`b;0*?!Fc|*GpaxI^A(TK6B^c&lIgAhtBLu?;A?X}2=>SVQz>*Fy zWvDk1B?Pbp02co-;}0zUz~T=q{-`x3gisPj zC%KmEnCGhWDKq-nYx++xH578}K`UU!hQ6&pH&t{q{xR zU1JPDuz}zN!LBiI;D&48u1~Gq_X>U+@H>GYht7&#CBm3k5t$o%0c_ki1T-{s`gDK) z(W9fI{J;%ecKegN&Rgkyz^@p*!Qc}HQ}_(|YMDeLGh{iyE+b0O21O?nQO-2=s-Yld znlip@tW=>gv7t0M(+q`?~uK4CEB&5*B_Nl2R^CBV_6 z6Zm6U6$+!HxS?j07DT}t`P0;fY3jr@#Z+cHUY&6urP&S>Y?nN*1RDrW5bWBq)A^zB z%fyBPZ-hIPVXiMAA~Wu#k*B0d_uwyzP}&GBF@&)gOALS zoU%f4Z*012vFGTHeH*L^qFweU#ieMM^*i-@Mf@y58#@UZh5)+hIEC=X8?mv~y@2V`7&iQ1A1Q`9MQ$)#hN57u+c*%% zxF0?J6qg&(bM4>(0nXxJ-%QRqhV4{o-Nlx~UQ|viVBI9wIac#%q36!I`zCjf4E9V8 zEW5>baAy(Mra*W1yMb6WCo*KwHRu1=cHKCK!2eu3P^zk{srF&;v5uCL~zS(F5M>kgnb|3 zCL{P@B!S^3R!5>Z_7N>aWjh3RIi^#3G62@V#ch}d(zP8qVc?yIfhG2tV8w;gu2}2A zc%gD3=6-myNznf{@pf1alP4TT;FJw^2=Kf}XQGIs1z4Fz1n!W}#jxl=Ehmo--Q9fT z;qH}Jj?G^P7cUC8xzW>x3tGg%X{);{-I6R+%SG;GLV-IHxZN)63pg`SZU-O z2Toq3V`*;B$DV4$$IJs(uW)7pyCqVc*kr{$Uuo`gz!{=A&aPxJ!GXP$oC!gOu!$er zjL{0y#Hb%}XN}i&vc9K??YWAvcp9U7hwrJ%mG1SGm-+rp#`$PP(n568Y>AZb^Xes^tSA_@w3O*6u;Qb z?QM8&v9q#0juS4dneKJc)FT7XpR>L0gD@;t3sNX;?70rN#WAPehBiYBl#_OtFvVL3 zr*v@2DN=}_eZj13SSD;S9fHp^vcetiL{lzJhI2T^!&J^;R7g(NW40StNhR}vJN-+9 zfc3EZ9dE$HSX$ycj3-ap>|cA{WS+Z9K_0BKe~mFcW)!##<#`jOf>nm{#WP*dybaTu zrEc!!$B7bbT|h}<4kp=#?|dfWD|ne$Jb!3S=}cnZmdQ=M6USC`kMAC+KvhemoF90W z@cac-!1RwKF6|)ZWKpY8wQ>#)bK#`AQ_X`l<|x&&1ga()tHO#EFn|{y%Ety_OsB`O zZ!4P!I@iarF>f80UY2tyAFUT=H8JOaS>fe;>&QZA7N6TqqU{YFL>|yOn`w4rkI$W+ zAj0H44%cAH@O@ls2T=-VQ6tbzKyx6Q)UnN$Cep15oDoJK+3{k(Y#qyC4hkYT?aYlg znS3gYr!a`e1nUGCbxur7o|ZrM;m5~cc;Q?RR;aIaKThb)2>mfa=ag{7*LQS>b*;V1 zy2g)_($*&jW;+;a#tY$`7p-gT1=c4?-ZV|Ri>68EpQlMiG-2JvNo9fkggOX$rA{V_u4lyERp#V|||fdpw$z356=A)GR$ zv=9<9nIn9qV}-qEmhLl*vhp(=#G&DZam{q`&2rLqvz#o1S=@&o#wS!Cn|>IKs~p!j5!8DDI(skB!+U}5lhgIQOp2djvlM>j%?NT?Pn@4gB_NZg z`I*a#LFUPxzEg$5LlxsM^o$n==5}^hRM_7{(-p^HA)F)`c#hD0OW^wj#t7Me!Ad_U zOao)NDr}qub0rn%k!)gPIO_NFyKSmicO|?vB!`Mzo>|& znlC9i<%r&H7z-SMf8%QvuK=tgq9MjMsss+Dt>AhC`u=h}sw)FcmZjZK`|oGZ0&6$v zl!ay4LRS%F6vcZI7Q=F-q=D_h zvIvpDDKgzSPJipe=$oOB!UMxOtnumLHv2_<(42Y1N3Nrtog8e0(OJVvEVeK6&=b2< zIN2NRK$nW$PMQV#1k*9>so^Lmfc`ORl?Rmzr?>g;t41$9*HPkzoqWeKtnrq3IEwpG z&`rw4CstnD!HJIUXR|$bUe}685yww42_PG8JWFT?EWPuLIlf6QV^$2n?sM;wUI7Lc zMQi|rn3$YyiX;m--^;y9xj>s921ggYJMRA*^#8E*fW37w3@~t{8s~SguE5exCfLzL zXCswc7Ut<}*gfcb4Bba`Bx1)VCwm^f4f=hv=}xBrT!HggQcG~g3QJ3dL>~ql3ptRq zu9kr$wophfS_h&>z?-yQLibljmPj+>>k4425eL(rU-aQR^?e(h)w`V4?(eJxgt(q^ z^&5MgJ3N2FQ?>D|=a5^!%bjvoZy>;RESr0_^bhuIxx4r7yZ3fmms+6^L7h+n>xC3p zr?jF5;R~!&T2Z|^6xA7*!Hp6}V4c#6>XoG^RHwK>Lnx{lyhO^*Qu|fdUYtOGcJQ0)X$(g#r4K%Fp@Un4AH1gMRmqyaJ_LF zjEt7!D$!uv1|ze^-w?>1GRg!q-t_cF>(V1n4{o&n>uGz_EQgg5$8)oN)L~`DX^ip7 z$%#pH&=im{&DEGjWH01P#OEM?#sT?sNXlg&b(DP&{|`L|%RbgK(01Czbfz8jdT!c9 z5z`LwDrajt9JBviIh;7ArI)=#`ixgugjfdBpPSAHEI7+ye99ISpRxtjLCq0GL{l2F zJIjIi^eJ0tmZR)5D~g7qHWH_t2X!%>Kk%Ya%04No!=l(iH^@F4kTC-8d3zgPTw)xTH# zd)1$FwO2V;^(uN!(bI}N3+P4OrLdVj?IeNGGe-JkLx1%%EQ}6ZAHF7R=CN zhrP`x_Wydi7q}OABQT3Yq_8YVAjDZom~JS5M_IAqqcIpihrbtJeCegX{L8y1i(5~R z77izFvpT+T{>XEzLxc8>vm91N9Hy;Nnu*(+W(A^Ff54a(LSl;CS)w!{!%)Y@zQI|s zl-JugJ6*#=TK}(_)Zl+kWR_4SU=o{A7>AsY^~2ShzRKAj%(!AQ+?>EM4Kw74oqeb`!Mz5b8pbE` zofG<5Hs^%%Pt};n$(5(R;SRiF-$;Jo6)PkDa(;0Kb}C?EryJXt$_dK%`rxo%<%M7X z=Ly~-2oP){7zDs2V4$J|VS-0zz7(p|!iz7fYip^kH8^B&ox$}6HyGS#aFfB!2Dcd8 zYH--#h`~{VV+O|!Zc`jG>4!}EA(MW{q#rWrhfMk*lYYpgA2R8OO!^^{e#oRBGUC(sWbZ28GY)EK6OT) zI%AJ|lYYHPzuu%@Z_=+f>DQa|>rMLgCjEMoe!WS*-lSh|(yuq^*PHa~P5SkJ#2yVs zp9Z5(gVCqK=+j{IX)yXU7=0RyJ`F~n2BS}d(Wk-a(_r*zF#0qYeHx5C8cq6*CjCZ} zexpgh(WKvK(r+~BH=6VtP5O-{{YI01qe;Kfq~B=LZ#3yQ{tUfqfd*`r^V>g zV)SV-`m`8*T8us|MxPd=Pm9r~#n_|Oq~B`NZ#C(+n)F*u`mHAYR+E0KNx#*k-)hot zHR-pS^jl5(ttS0elYZ+Tu}9eG6E^yUjXq(cPuS=aHu{8(K4GIz*ys~B`h<->VWUsj z=o2>jgpEF7V~>bQKVs64nDiqi{fJ3FV$zS8^dlzyh)F+U(vO(*BPRWbNk3xJkC^l$ zf5aY9qfgZ66E*rojXqJMPt@oWHTp!2K2f7j)aVm6`b3RBQKL`P=o2;iM2$USCjFR6 zKW5U8ne<~O{g_EVX3~$D^kXLdm`Oio(vO++V0f~u=Y%~y8*@mpL@GlS*6&gWz zdRH!81Vm5}+~`ULajiRjo>RYZ$VYdEuD5Hdo^#H7>V2yFN8fwvjN~(t&qzKa`HbW< zlFvv!Bl(QvGm_6pJ|p>zSX~Hcn7C<)d!ON8OZ-55XUTKLmdW{t)~j_(SlA;19tcfuZK zA^1b^hu{yvAG(4+1b+zr5d0zdL-2>-55XUTKLmdW{t)~j_(SlA;19tcfuZK zA^1b^hu{yvAA&ywe+d2%{2};5@Q2_J!5_MUKLmdW{t)~j_(SlA;19tcfuZK zA^1b^hu{yvAA&ywe+d2%{2};5@Q2_J!5@M@1b+zr5d0zdL-2>-55XV0fuZKA^1b^hu{yvAA&ywe+d2%{2};5@Q2_J!5@M@ z1b+zr5d0zdL-2>L;19tcfuZKA^1b^hu{yvAA&ywe+d2%{2};5@Q2_J!5@M@ z1b+zr5d0zdL-2>-55XUTKLmdW{t)~j_(SlAuHX;BAA&ywe+d2%{2};5@Q2_J!5@M@ z1b+zr5d0zdL-2>-55XUTKLmdW{t)~j_(SlA;19tcfuZKA^1b^hpylc!5@M@ z1b_Gu-P8R8e+d2%{2};5@Q2_J!5@M@1b+zr5d0zdL-2>-55XUTKLmdW{t)~j_(SlA z;19tcfuZKp%3v#Y4aW6j{*J|;Ew_R7~qcq{utno0sa`^j{*J|;Ew_R7~qcq z{utno0sa`^j{*J|;Ew_R7~qcq{utno0sa`^j{*J|;E#d7e+=-)P!FGb@+YJx-~FPW zAN%I_1?4H-ek$wP;r3hY&r64IzB$|N-<<8Ty5ny4eTOgpWVZ7|%VxW0Ka;%|eSS9E zeRuZyZ1?R1e{5v;2cPj`7Q0V;D#NXzUF+sJUp81ve-e32Q9isA31Ds zY;oJ-QH!@*bhg$Uw|KY3lNRr_c)!I5Ek11Vt1Z6N;%ST1xMywu%dEoL@Zurx8V@yequW;QoUX5)27Tg+^{>}ZRb&5e@Tcp=jk zGn>8VJcl>-U+`?&?(DaI&wxL;6)PQn;NRm{>e}1Jn;yT?ciyVE-rL;U!rRi@%A0k) zb-i_S>*m(Yt(#jnw{C9T+`74SbL-~TEv#Eux3F$u-NL$sbqnhj)-5(4NaxVU+-~W1 zOSfCP-O}xrZnt#1rQ0ptZt4D))~&2tS+}xoW!=iUm31rYR@SYoTUocZZf)J#y0vv{ z>(ttQ+EcBY@36|E*+S*xMJF9DFxz}b+aD&>VMNNyE7PX&JdKz;uJ=FA2(?d-U zt)i`@scB77y?{eP#!Ovn3R_cIzv7#aF;m={>eiIEroJ@=uBmWMiT!#{LRzHCHD#`; zb4{UZDqU0Rnp)Sz;%TPSUJj;*l)t9_b&(!Y@tTs?)Z8mXC!~jzy{7Ipg|Df+m%L6$ zixj`6`ZeXRseg?DY8+5wff^6gn7}K-JH!SxKBzH5jT36DP~(NVUVTKqaYv0kYWz_* z=^>8r+SCc@A*S#G)d?9hzNj%qjWcSj;U%&Y(jxAtu}6(RY7A22kQ$5Bc%+X<`go*| zNBVfAk4O4=q>o4Xc%<((AI`UrXZm=ik7xRt9^#Qc9_izeJ|5}gkv<;j+kM||{OQh5 zi+1m~o4?)t?e=f?fBOLtkM!|KACL6$NFR^*759XU5s&ooNFR^%@kqbup5*)FcU%sp zhj^xsXZrkr@Jss%=^-BJid>En?;9_ix|FL{xW7V(JRM{s$0*?x~MO8Lz_5Aw_x z%B|zO?Tz$#w2ogs@-tffc*!r7%i`B;{`-TE(fquWAOFc82;BdDdA@wQeAa71`1ip)<@mVVDtF7H^03@4Lm5UdHtnym zj1P+U9WTc6pqvl>@{SKZC}-u|$Ir)dey3cVmGP`x+$nd;G|E1+@65`--2m%Z|rpYHkmHP`iLdquyxUh@^7|J<*y*WC2^AO7xo%`biaum8MWQ_6lhet!Sj NoFDr9FFt?0=6^2U9a2G+~myeGXgaA>kJ6RC!<(6l5QiaV9oaN_SjqK*bM-CK(fr(Y|zRIhK z_WsL**9B(==L8oApAWtkd^fl%xH|ZK@V8()o4LUFOURD4-{Q~W^uRQyK#MHHm46qC|Y zqtss7Q0gl6lD3g{l=@4%N&89%OUFq^N~cKUq;n)ex>RbBZkA?BbEIpeh0>E!t29?y zE`2C{CcPr9k^YiGp>L&Zs4=u&NDuW0Z5`?#>Jr*FbWrGs&~BlVLt{b{LdS$I4qXwt zJ~TOWS7>f%Vd&OSKWTC3`Ow_pYoT{SD@pog=!eknAzxSy$HKYr+@K(}4{sRm8txUI zD()Qa8x}}Atn5jo2ZRTQhmv#zSxyX(4v#1HQ^iT)X^tM&2$zSiGwf!D=Y;2m7ls#; z((~ch!taJxliZKtpTmEJSCO1w&d7?Kkeg&dT3_B+?k@Ksxn1Rf@?d!f`B-^waFl$S ze3m>^zDPcwXs#g3O!<0wjyzXhAU`fIk$Z?Q$*;<9%O8^UV6wGVku3^*Cew6{>{DbV zuH=Z&S8Z z`;oPo?(#0`p6WpLz>+8Ie?7z@D(&3}wTC!T6{OMXBy~JtusBVfuFg>B24@@gcbn>) zr!G{VR~M`O$QFN1g)RB6x=Q_w_zTikWZhlpNAOUh85TL#RS)Ads-!)` z)5<=gXO^{}Sk}TGwGzn%rMQleK-Ni?!+6PvM)T z+1d>49-^DCEz+JP=`!t2?E~#o?Hla}?RTvQ`P3JrVANdOf2IFM;Qz`9M54K9EIKz> zAMK;{v0?N-{+C98?!8^faqaWS7STSXLcI(4vY?n?roHYL`Rgg<~{y|XfJXU zK8@(cMJGfjM=y>}kIpDr-Wq*0Iy3lm^o8gv(Tsdo^uap!p7zmK6R##x$$`nslFub0 zsa;d&rk+ZDlj@p2F+D!LDE&jaduC|nmdwh`519?Khh=AEKghPv4a|+tEz13pOXUaV z$195zLG4SvtKFe~q4rgxk-iFL_sDYJa^HoK7fO<&FhLD*E{Iz=^N=-!%yVb zq0RK(x*+w_chPs#_a(W5iFT@ZrFbOC57$r9C+K5HZZcV>=`-}{hOhb945dk`U!JYc z*Y7d(&+3cxR{a${BQGcAkMu9}?@NAxm>T;{FUHo1C1agpo5$t`w~K8V6QusJ-D3O3 z4vrm3B(&Yfk=#`AlvodO9MMlSq@`tg{VqeEX@tc_e0Sz-KY=V&aX&`-x{vaXzuM(b|f?ulzpQ_XO!{Q)_DL z-_MD^>h47)HS_y|bzI8({~XT9w~+6K899`UBn7Dg$v2 zm)yxH??K}Io9w?<>-UrW6^;}CCI>t8=*Z-VX#&FRuIi5sSlEy5l!z@ztk?NJyU&BJCHNMC`rjLJ*j(47S#WJ zeNl>hdN^oU{P*=`o_c19xi?8n#s|cEs`yRn(<-CP4gQh}r-d{`&81^TiZyJQ?wanE z?wj5@y(f{kH|fBpyL>P(djeN=cX@c^W(Qoclok5 zYqQ+BG%kH@$^I52y(4{p8ft%-EKjDNHR^EkyjxwCek=WMdR6+hl7Dx3HOalFJmT=_ ziaoC^{V@&W|GTj!zvN{EDMFTij%mg(pDD*W8#3!U%4=`k+WJ+x^IC3~TaPwh%k}hL z%kkXWjdDDr$MQds`b@um4>!)m{)u{>>*!bKi0_iA?0xKRh3(qQr6t3SZ#wS&^=h#G zYSM9Q@d#ASd8m6|56=uTKnu95gE74Bt%Pyi(tr2Y(UUTl1p!^CkBE>p$d}TTkS4ht zS(1ii7h$rnM38;E_$K?7_+gsu$QeM+R|6^Hmlg^_hAjRXNRispWZ~}sv)G479%+*<%H%yO8~|RM!O)W0a0iaTDv!QZ|zg<=;tik zs@k#7*|wFBNdC#iUYlLAcH6^8gYkm>9qXhyDxn(Gnl;Re`7#-e%E}l6C4RC;fFB^1 z@R7S4LC>$D?7p~bcrO+ca(qiF^XFq#lc&9-YpfOgu~xUX@P2pvcIQ{_1BtK+D|XLV%t`ijLRZkwrgMK z6%mgve_!gf50>B3^xZ(u%;>t-?Z^D!j1FE!LY}05-w^lUp&TDNT_-sgAtHEV z>Qpn;n>FA?Unqx?y}ux(HPaL#({Y3n+B)?Yemyb2%zF&2)7oEJ2lf+Ftl6Ae zwY1Djp?>@Jg?>THp>7xn+E4p725pJ(_u9?(f*2q19|upeEX1sEck( z+B@_9j4h%mjs-12G>FN+oVIUksJW?mG@KJ4d6F}~=sSnC52I2EYdQ5|HPakO%{j9_ zjl!?7cDhxu&$z#1>a@Mt`x#?ET(A8b?P!`6n#C5eRa7OqhI1;YpB_*7Hqy}r3G2UE zkEL>YIvOy>bW79vpeMA9DeW(7FYOhr1*9|%l)d{47WQ$%xZn*fpl89@2S~9WnDRP4 z@2uygP#4?3`Tjy4-_6$kQXTlx7i*JOf9d|szoX$D&H!JC%p{n@7GsL?YEp=9{T#;q z7gMlc?>HJgEU3?^OPgEOCCxpXmykb~VyBJN>3s|e=!f%X5@=~ZE9(u|S7r(Ab+QDR zJ%%A7Ex{MG7>{t!LL3t+hjsbUhVjGq4vaT|FE|fj*w5tL&e8Z$1b^Ba7Rjl{Ov~r2 zo=oS|Zo0of>zN(=ZpV%R*gv;@fceqmx%vBmwFKWds2#+`PXI`r;x~71?$f+O^RCSU zn+G=!Z9cYnRP)Aggv14|ZNcKS-S{4{HhSe?6yQaB?7YT(2ekCJ5Ejwd!#kd{k6B88 z;o-;1JaSaeMj9v;N8mCOG~JC{(jHuG#|a07p;rN#p4plz=FYB(Y=&! zBg@kx3PfYR&`yZwwEs{mI$G>^vubki?TOm+zwyyut#NGWZ`y zliBrPRZee?=xtv~eSADtH!n5knT>P%&DPE(@T7YWzpv4}8i4DltpPRI9Bc`;233)K z_A0HBX1z7uoNCQ9=Ua=-O)aA#25XCRJ@AQWhkDXAzx~VQ!Gqr)pq#X9VYha;%i-AR zu0>6x0vohtDI7)Vi^=#L9q}phQdv=WIpobRtJbMZx2V%Op_8>;Mvnl{Ci}~7ZI^(* z+skf^65NOA9$CFV#-SG8bWJo5J1%z7xn772QwUp+}(Q!Ye2QBiW!Ove zrwc7nTmJLY-ajhg$D#JTTXcWh_ZW8zFAZyv*WDLZoi!rpt+4Gy?`neWL2YP?`T)6a zsL-ZkQBaR5)6jd6P!1`J&8&DmOV2YU>m~K<*givHI{j z$TPiteEr54;~l(qJ!}E=$F^Yqg_3W^_&k97iRqok*7qBf;cjHKGwUP9^;UYH2fg-+ z_k3X5>Zez$`R{%7eh{cHNbbLN0=;A8dl&=iZXoyGrpOvob9=IaCPcY5;oa4|6?kGf zBeeSM-%DBh;I}n5U_=FSCSNZ!j@ufKTJ{wGAW!e-Q)t0Sw!@Cw1tK6-OZDjgtyqOo~-1>=n@3ivXd&hCt@-?_yRjR?gu=3O?GC9VD zo5L;P*03r!%Pn%NtSZfl{kYc}aylBehAQ=TvU6&Wmy91j`@ZAl2|A~}!%0>yj#Xbi z2EH$xQm2uwRAy}nmf1K#jjU8n&Z(`H`8#>K$tsIzwZdptd8K9W2U{F3x3qjb@Z_!B zB00<9o$IV|mpXaCClviGrk{#X!Ug3Zp)yD*aQ0%>^Yvm3dgcHn_QG?d=I)Tgc#y)G zCVSC5eyegznLXs-yQA03{N0Sd7DD6mzlVXOT-mKvD7Uu-`V41^>}-+dSQ*C$Qu@m& ztWggs?!%Ze5A*L;D2Fp$md_G@EZI}oid2fxF=cm|^IN)-ywiT?=4T%j|X0=6aCC^50jxH8g+}qzEkoZ@U_g zwNE7TlaTQc$hxhiwdTCu1F(fFrW#u=>ckrTuwB;oMkQMreK9qTU|gF}?v%1IXE`T3 zkd?P^IEGZ%4y2n<* z=UpHD22JDfd3$`bmW>Md((ld8hiU14jo*4PrT6lb+mm@cUgZswZ!8qNmT$hp9sYsE zzC%Gf_$Xd^+^6`qLmq0z+O0b9aeBY;cHrw(Z#f;-Z%y&LpjXPrru~C&hahEJf|krt zz?S!Q&zt?Q1Z{PGWr+qy+MdGiCtl-OO}n5Ce0v}dGS(M~)W%AfbE+S8_TOVbgSB7{ zd}<#B8p}NXGzuLvr}`lZjuvZ+bI#l+`d*>;j_`eV_NDE>9?=rtQs^1ockKQ0mOw1} z3Fcfg-Bz%D=r)EUEXbT*d<4*!E1P5R*7og5uQjt{ZlyW)tQETlJu+h}?0U0=wvX<; zYz^-lIvQ^K2gb+#VcD9ZzSNk7z55s zYdJe@^Dq1D0!ACH-P4?h4et+mTISd8ah)~hy%+ZfYs##c#`-Db93Q0R&*oTE=bU@& zN;Gg~xSSo->t$nZm-!sUtfM8jSa5y=@Si$PdHlSfDg3f)zB1wD!^_~e|0KCPCS&`B z7=JN_$TTjk18nhb8uocbe^2Ip;J%w=>B(PyXN7_feOk0-Np%Rxl?%mm&7q?MJy zlHLcCBYPm6S;9ZU*)=oR3QjM2j}Oz*(W&)0i0n=$_8uyq!{->%?Sos1?>FYn=L;yU zE`?8PHxKh@yIG;UQfq2g&KkB3Ms`Ze>79J2jBR)I9(8{6=;rawlbWYBPj8;lJiGax z=J_q7TgJCcYMItDy=6wr?3R04=2H*)-sRVH@4$FYR}Jj{B_fmeOj7AYtFxDq`+`bD zmc}XM4xvfPbi}w5h};82{>4b{R%-3T*KW=?i>=+M#OsO~ZR^tR1G-NU$=ykDayQcS zDbq@!nRjuVB;` zkz5%HwZ^f`QmnsRy0mDuP}-t<@Apa& zB&(j5X-yc%oSN@nrgtdA+S+UWmp0lXbNkDe_}}`R^G^51vbemDlO?Zr%3HUCR~bL$ zpmumyS>M%EVwHMD#4*d3j$Uw;8J>gEV!uAv_6~N62gh6Pvx-D9n%A~Mo$>EJqmDL} zOU9`dyNn+Z2t<#f&&+G?ACi)=ePE3?$O%4rF-j+ou%N9`wwn^XG^P; z)?BMU^M)L@-z;S%a|@Z=IcIGtJ$^ddI!Vo+Pi`eAPb#y2ayd!N{&cS)PYobPWNbga zf6SKU&tLHU?EO>%+wq_~9G6;pIlnvUyN}+72kooxMSF(*tUks40nVe1BV7|539YY6 z;^G*aQ~LgHZo>W2{O*OgXpa`wy1mM*UDyx$&hzr&b?nI!&7+;pW$BR-T4)~=*pBdf za%EQpSWEdF_MN6U0yM>TdhLUJET=sD_D!z}Kn<9O`1mAUxQ6WXvj67e6yG{3PGiVN zH^*STXKh58^LrCVi2kG1TD$Z6jjxkN=Owk|>9uov?QoqF<6%p&KlIvz)A-PR$+O!difgZUPQMT_7YNjudry<=iKV&<>j|BSTHM= zL!3cc!H+fEe&%{*a?DCMyXn3FxW6wQXPsGEA*{EL%qb zy-*JkpceYhqqSj+`IPp-{NA84qX|Y^Sj_S{h;2VQFd67D3fmi<#v8P7e#Gq2&z{8+(ns#9TZ0*=uHftt5xhagV~XTWW3r?tLJIQGhb7)!uX3 z%X0m9Z!SUM&V4yv*_JCD@YI zm(%v(ZOrvn`mX1__;%39&knzzI}edoa%?I4dw3>pL1=mTjjt)u?X8mI&I4)crF zYTh6DQn~Si$gqF#?KJ1B+u!I&;F!|3(vdQ^+ghS~h*iqUc;AKBl}2W5&1D7kdGciA zR&|-WRDH3zwRxGkggj?`i{_rKJ(`!aZmW{tgW>vYYajJ_YYp@{9OQaykEPv>tFGOr z^ctX(YpvX~A=s~H*6o8Wu|7@0{A>w6mi?}4PC}kZVfLgtDx0!s%Hw>Q+72L3u^3=H z#XOUaXM+)5Q@tPU@i`D7nJPzdZ zj67jg9n&&K9osUtnj;3QgSNc6^c;NS%2nyPlul2Ipr6Uuerpu#D4?Dm>)?1)FBp-g zII1{$&MExA!u|uge|mQL#1Whf_6V%Qtp@ywzQoz$NOR)`+;Rl zQSP0hHP+)~&1@j&W~!p{ROWUC%}efd#mr4sc^s#^O5nds)#KI{FFCung+7Uo-uVJ!jrFq}N-!R@+}e6; z8Ew(mE#>W4jeI-MlWD2MtwxOKoPsZl#Bx*$_3|=m?Hq|pv2~TCUUA%X<>K;w_O_Sv z2EBdGYd)rP-t1FJEoK|DzjGZZHQSow*h|#Q>O-HQQ^_%++$W2%gLtg)SX^0(M{F-^ zw87Lo+l5(*=Yh`W)U4Uo`pWeh?BT`yaO?1C<#S21mYCngyxcoaTg2A1Wy}Z9>)cXi z>6Ej4=s1|T#RYjKFQSO4JO z49@kl8G^k1t@X1q&9Sm`J-iPaOix2EVtZJ8Hqg_8!OFe}V4Yn0ei)_$tvefYfH6YXgP{M@sq7~d+l zmdkw;ET?bvJ3Y&@S)@<5wbo`YV>=B`H{8&WrGwDBHwyFdD2>J%S!kXrS;&Pi{zhcen(efhGJ zAIi`l%jT4iVsEc?e9XSpm8{XtHCwgj*l25(x|Q5xk)vMO|6{uyD?#4h@{e*aTdS@f zU-GQDI%>w2n4f)S^ry`8&%BN#K*;F#bx4~;s@eK;uqrg;%{O=Ql+F+lsZbC zrEbzz()Q&1dvEDLX^1pJ8YzvH&L-DGFOsg5u9t3=?vfsq9+jS!UXWgw-j!BKtEC^M zKO`}vh0>wM(0ZXRp&p^$#VOi;(W&w0628V6O=}uHPX3g9FZoqc3CF^@aQpCv;Z4Fl z!`p;+4DT8q7#nRQYfQ<+pQRY;wmot!;So}9fJ#cS z>f8B?Bhw>2^V>ysDPFDJq}{IFryZsb)we8cTUZu(GjdO1ej(p*NOV!5Nn2lgwjjpm z#(U`xY5V23$?u-uG5=lu^Zc#(%?q7VlueAZXXUQ`e|Q(wn7wr~9RMNxz-jB!6z< z!{VUyq3NU2C#3gEpO#*c`z-fi*37p#%9@;z_vht&JWm4UbH=)ftPja|DqK)_QGHGA z-?&?2=cc9kNApkQcPiYK2lkFUm>(HY3X38KMurfU6@JZMR+t^RCvuSTS@yH+5z4n& z#xcs)p#hm=GbbyfGN&tNWX{c8kU59^75%czHJPc(nrxvkAac3#Q|6D%%^5K(XYa_| zoq3k9G&3*r7-3N+p7rJa<9~Amg1KS&VfmLc|MA}l{5Jysjlh2+U>yeQnnF+Q@SM*y z&U}?wKYw#FQAiYKCGSekRTe6Xl`GTNr*BO^uiTaXl5ke~{PctAkJ2mCKcpW`KbdY# zzmk4US)P7ZxhVZ?cHTqavz%XAaD4o!Ks~q;jZr9uex#x0`{LcAF`DgRtLZ8BD@}2dULa*Ye;seDW zirpJVH9XKTC^}wQqzGzX^%(Uky|40Cw68+BLtT+vk<9CF5h?J6I!Nyv>8tD&xj6Dt zB(CkHT@_uTt&^;bd~8T^|M->ho02!h*GU0`Vq;m*jx#gIgOpMLdwZxHDe`BD3#F&pb2 z8YJEu`#Ex|@tGWNjOjVOThrLcgvjK`<;B*W|Ioy!7Tq?s8?hfG&MeL> z?n`oqH7$u9(DYJl*LJOOd#lpXTiQL9*fF_N^4NCY$A69c5`knWd3qwA=+Mxy;ne83 z=$zu*Vq>(cE;s%X74-M@jT*Z&_GmmbcDLTrcrb~-Aoh6d(#CoE#rl={4f-q{>sZ$G zY|~cp?c>QNwdwpAN(YI*HSQGKBes9+$;O8pf7ZLjw{GmKA4%d|8M`5NoY8V>yFHJdwBe~_$l#)@h9WsxLVMDd3;3jgyd<4|Af#5?T$$tTBwDgg})+_1{_-e zqS~HKjL8l)V+;P?bZY34c8|1sHPN}D9228|>7m$D?et_exwHOJ7SN%>0@8%_Et% zGy5xZ*XOoZZ_G8T`=|%0*W`{MXF4aVGjg+Ypt(ACWA3)x^xO@(YjR`N<8!Cx#^uh< zos=7s8=pHrceZ*_Zgi73M>ieQbad0mrnh1X*2-9YllUd^8;2H8$KPqUNAmGf^mE&F zNi1=w>G@E*AY2!p8Q(3jPvW4&5s7QtQD%|coW#9}Z$tI)U1*AWZw~l1L^&_d z;}4_!-%8(ASE^sCKd8s&f#20&Bow(me?@-F$bQ;^+97T4yJ1?sc)WIizD}-D?Wnek z>5&!r8IeKa7P(HjO>?K_v*gFG=J;Om(;LU=$29J#Z;;&7*f(!(wDOGpqW*@yGV!Ug zZ+9r3sclr;sCc$EHF|mUy6CsXw~A|uzJ||=>qR$;c8&Im_KEHg-8H&E!sPf0GxIPW%3y?W*VXI`#wW{? zp9ju0{T=W7YGVGIM+B(*-#j0Kx}A$?qu<)~upSTp>2fczm$<@jWtIOc|EGj6 zY#OU2l-~}$8d?y#K6HC%7FjBfzluTA4A=wou$u0yG#2^y$Rb&Kl}Sj+Yxz z5vCHAn`HwfzI=pquyhz9P{d%27s52DW*C4d@bq;ZhkxM(!^@wyh4dpdO&1Ug$AgI>jQ911zp{8?o)o4x>5QqcxLCSCzHT6A^QU^Sp<1*>uXLcv@BzD~5*q|I2o%Y}*6RjFOI}Obhz>Pi8!*qO zxl&A!y(qsZzbro|^ZH8j;0Mc#@)9F$QO}J)9?Ph=rsqb0aT+k5fNMw}jXquia^|H| z;B=`|z>9KvFW*jqE}pS@-`8a1{K)x{?E+SM2Y7l0dIq`?CP>|g{9LIISpZ1E?tE!m zLk~8#5RIsf!Hju5wVd(kHT$ zX6EcDWny%I227KtNg;Itxq9c!vHEZwmfZvU2OyONSvWZGR21kRxH|=9;K0Dnfnx*B zWC3J>F2@Fr2wWkRLl(>y5aEb6E|to0Sio$-e2>*n2%HcYX|(Btz_7rF@@4A9>Tr_s z%fP^10k;MGhXoD`Y)|gIxKN!InHsq?0_$_M^9;U`Z<5XoObDE5fGJ46k)d>hWaTEC z#A?Bpuac}>Ym-`y-f|zgiI9n}lvm1E zt5>S4Zz#aA}}=B0@yCB5^vW(ve`s;gQ348aD;q`cDZ(mrVD273e*C| zSe>KZGXpmTP-aeNM<_2P=cZ24fsur3v|GbBMQ$Km!Eu6)+#J3$^+(})?Y8hYi6w#O z+E@~(jL|xBXV}SOUD?c*DsV;Q+Q?Or=^A5oVq5YrE-Oz476ldt9tvEiUZ-}DA^vM* zd9j33eSg1fjl9Na39V_3{2XZ!EY6_qbAj!M*8?W*B|V!*KP}mkANQNbXzJR7N79BQ_g=<%FZkwqn!VNMIN$QER^%##qUiy z|3mY3_V6>x`9EH0@q4j%mJ;$iQ!f6oMQ^srcvCL^lSRr5Kj!%I4F7`}%GW+;%EiB- zNSW(*rkwwG+IQO52H$C~33kf)C;cO`UX=5H*zd(p*2iVo!X*oKPV`eUg;>GnU=dte z^u}UtE3V(*jm^O#*w;TQ@5RY}Z!F=eDjqB!EC+=8fjIUj3sfiBQ}d==w1-pQ1pXrA z1Tkm^o|FX=OwC$WvU)-pe3%^a-v{oE(3-vpP(Vs+CQo$?`h!0OZqcY8TYp2^MF|JP zBx=}bBlv@KXW0(4>-WI#fi-PFe!X~eZs*)Lxg2?l%>xlNI4@EZvOxwuzXc?aXBvCX zs)cfac;T8xh!ROE2*8ga7$5VH#!J!(I%;WJAkhj&J0L1gvup<<(Kb%dzt8d3G~XmN z1{;Ix2b+YAf;@{N8@0)-I^K?1p_E$lK21WC(4ItJ-vH{Pu)0Chk<rHmX)lY==Ed-Z6Ly?B>KjL zt&H`?!E<$xZWY{y)Z9VXhNPPtIVe#MDu6wfb_nj!*2Z&n>fI}7r$_L~$nOaT`eVAT zu&%IEaG&71!aih;fD|P(-OsQ=DafdtS+X4H&=Mm;I!mJ^s)c%Y3&OHn$sTf$-!ljd zFr??}XbtI3B}-nmuJCYlploF?88}UQI)W@AIp+6h>c}AQQ|{=XnGFP5#?-83C1?HP zDc)iO@&Ko4`k){{>pL>IfdHk!rF2m6P?F;T?FPa$eVYD42IPSvtRd6v)7iOPIofJGNxF&a6&`c+RmN7MJS;<+?OFIdjgy$kJMotPI6C6eAI)*GK z1v?2)8bwlA9}_&D>`{O1jYsZ$&0gH1j=PuU~KT>EYC|B zh;yk9-d%+8K|5FI4C;B6el15=;qXF7rK8eS*hnc)R{1TtxhD3|_=>d9a&6xgdCZ8@B{! z1ux;ah~%dSuXo^@;5BU#FjBQ?PhmlDK@iqGg~fTu!;dmksC8(vR@I(;lDLh6b_XLqS!RLxSh5Lhc!0(aZBf&S4ZzLgK57)}T4aORB zy#*@aWlZaoLwihbkb5b!_1SuFfig@UCJ#53;qoaYcM8cPR~YgW$P=JO%BRc65-^II z6b+~)_z#meR)&%Gp)Zv|$t}0B0-!xjH&%v|I6!ZqhtfmYoMTfizd6z0pl`0+PM-L5 zlRlID*K5N#C7cW=v2V4f&juNNgm;sD1cc@n2fZ1h@GQ`VDBB7s*;ZhADuXhU+4bf8 zF|su+=T}L+d`#9xUdr<2>H~cxzz9T>(d7NwgW6!_NM%roBT4>H8~Z6vK#XmLqm@c| zBeR{bo$zXg0p+pExEeT38Kv|WfO8Zl#w!5TJfAYNv%qtU!Z^>!Pgbm4KqS`?u2*I$ zmn)Z$+;ozAInpdN3pXluK#sf?v4c8DA5ZIA5l813rPMk&)h?xHlgM&3qO#!)x~v_7Zq4lzBbm!Gd3`dfp%M zRy|*Kitp}E+rso8kk}s(-YwN()xS$LsDpN!^?M0sJ+_O*r+V~*2w=nXACbEE5^hy* zGk8%SDBPB!d>3SFEk3O7FYGV8DQztx+l%{(+lzaU=P53iwiI8JC|in1UvY1-uehtY zqqx8DgoebEW+3YRLZo=0;KfkUjl)ICQ1M;mUFC7|EWu~gr%F7ly%nV-;|B{~TqXh^ zD=VU}>68-^?`Gc3941hle&(Ga&JeFF@i=*QB;{-6Yvl-`TAmV}7$#Io4HvT1;R1ST z6tTdwQ`OX?2HuF2{(kTOGCIFkyf%a5IeI!m7o+$?ZSse4vUNwMhZRUp7I~z zKf>#gXKo%GIYmIW4A+J){$%d<{Agk8aCt@xk}SzyuN}z~Kx<>LHds4dSVwi^Oaa(X zEywT(wSaVjVCB}hH;ZJ-xq>srgg0b(IL;TG`B_Am{x{`UQIPDfKZw+g^SLQpAWzHu z+UyyS(&R!UED0&+n08EdOzOocf;W5wjF>j!(F>gk<8mjg69$+jB#298%!c>DJlH_q z+z!3Cqw38e;S%vooi^G?NXAmZZ&kB!eTh0id z5x$aewXwdE*j*W(NSv;mPgr6A+wScY_( zFimKZ4h;zr7MKVp`#7&L&^G> z(BRO1Bt0#3eCWu~c_ep^1EWGG5er~4nMHM{I=Dgr_z0DksqZc-rY_!8#98}rqqXQ(p; z@SQ0@O4lr<8f(iiKD30j=9>t}SsATpYK>tnqqVt4Y!7cu+shrOUpyKkdkMIqLyYI$Z0XREV?+98kZb|F_p4Y}PuB1O{0I|M7a zVj@zW2zlv|k%YShE4fH{(vfuJUcs5Bh#L*l=ar!ig>UKILiuv7>20B`CK>m91%Wx>jiniF58zDix4y*m4* z@IlO(lM5U9fKap={?z`|g3(~~Pi?udT<}GG(O8`Fp5V+05pVL*40)nmhM;jA6h9yi z%f1F5CqGJlC?Lsb%s?YQtj3}ZRk2dAV)kDtR9jvr>P@?-olgYDdQryb!e`0P1%&3$ zhE3M? z`$o}*Pya{-$SuG9b2GBTbBLM`=cC5bnm?)lbSic#u3PM2S~e*DB-GECC~zv_IKrvX zlS_;!tPz0c62OZFPbZ#EEFsHoJ_f%x{h0hQ`Mbb#R7EuP4&g@?9qGtDhruAeVL~|7nM zlq=(Bljnh-ZDI#sOZ+k-xy)FbbypH=1T>Q@?BLtMH!*&(1E4|5=`W1)To<2F0;CY* zh_pM0r6(3XOgxl$)5N00!o&lF_lz~ud-{Oa<4ZZ7kH14e$v(c<63gOt_VH1`?$`KNWZBPG9Y4o^ zj{gz=HvU6AVZ7i+q=4~55?!asAGM^!Am1RLoUj9O#C#QJu#Zyv6e}2E-9+Ua>g$kb zvd}G2IYWFvmqb^N-igXN(s!hA|Z(&LiV za-NT8eA0`_zR5l>eb2PF{+^__etfcLx^kd3Ru(6pXk)uHy|r~(7o_J z;Q}9J)DEQ(rCaVr!aaFD(mBb>c)18%FZ1sTK97wNo$F5>__nSc%zUoxH@J$F^exOu>@R z+dZj-`PY_fNHrKE?&jS+#j}nvvkj~_Bg=XOD%mi#Vd`?IVsaifoCOv(i_G*(**^_sD+i}3 zb9_!5n7+2?#C?90K9ae=ChqZ3CKmQDIx(;a-0!QG6Ubc0b{_O0b{$I4f~F|tQ_vin z9+n=S9+^JH!UCTe9A~$5bh=V{4HW0?u*#hJ%^Q_^N0 z^HCY5W-TjOJ<*G|cqIQsW=i_>^aNv>K+*|mh>PjD>2t|kY9MKsg*9@zA)}?KZS6y^ zGt)fOib*hrIlU=eo+o_nHV>qM*&KJI?@BNBA@`>3pbpY?vXn*Xh3Sv-ALUyL7ZxTF zp7l}o$a?d1x*A%2R_gO^^;!L`@ynIHKh;pXB>i0aMe+&wg3ro_Y2>x^Yw4GKlsD7J zd+GPm&t_o#UV3?(9_?Q8eV#^M^}Xu*ByD9SnNyDeHfC0*{~~;!{yqJM&xxPXc6d86 zQx9+Z{24oM`zYZ|I5V~AO+16BnN(XIWpTIwY=ddv^%XMp@PW_!?me@%V+Q%ihrrWF z`Vn>@^JjkSv$Ivk&c{CUOp_n`yq{^pYgr9cx?84NKJ)SHmnqMerf2r-lc7-QNf|E& zWsV^XD)F7~J0FyfCxGmj%%PdVnIV}`gd+(@8;r_y&bqTGf^z4AWt)TK?bz?KA>+a$YnTW+%~SO8mO?R3v} z&u(wfJ-ed`^0<*YIUrjJiXW`lqt^D#A_rx8H2*}R;80m@Gld~rhj>{(fUa%)7EzMFM&jJr;A2s+mj~{c- zWbL5tsq9nPRW|Z|yR4jKMRu8qyq_yWfpd{ipgjZGg2iXXbde@I#5NdAV19WR9_g9~em@fQ+t3=1w8`(S!+xjN0J( z^qd{kLHc2?Hn#LrrsXarZ0&dBilR3+<(%m2M~*0Za~GMBa1r6&+ylA0a(Cpm_nWzj z%$mu#B6mfpbXAV>b-pro^gGip?+$qEQJ{QV=+OYs63i4g#t-b1cV^E#u!D^~^8@lb z<#)^PQhM&A_tDi&yQ_a!Kh+(XXYApJJY6$s`P`5^vWI_Aeh`TO<{rH)AKyn+??Bn5WFjBSZb=Ik)J|C&kqqpBKG2%5Uel zd^L>lTPZz+h(3yltdH{_=eNqd%b^1BXPyH7kfYKNxy4T*kI_?z1!$_r{pX4kji+R4 z1s-OdFXRhgak9UmP%kHwD9)c=Ou9<}fb!;!pLV+zD9nTA%6+=v(ec{?07poAT*NgV zKOvH^Q%{>qqN;F;*3=$~JpYhDgADAToBV=m1o+}*}R z|J{ZAjA%0oGYWGHw-z2II#_NlJX*MoEKrk~I}3Jri=PXXGs#~+PZcWRLch1wh5kx; zreKHn{k34_V*lD;S;5X_{-uSbh35;G`7iT>7E(8?XLW~}&hxJH4=WBUUg>8XTr}hT zRA|r+CtU4!(~nyG4_ub~<9=tv29cbm#T&A&Jmq)S*)?$z3u029@&i!W={su|C>DujS>FpaR9UJUC<7X`O*T?eQJANSiza`BIiIB^4+Fa3tZ z^Zw`kmqn`w{OVwMWut~y{ETOlwe!0Fb^pZd#Ozyscb4b8`Jv#% z`+mfGt8dltNnp8!PXd$=1MT8JB|Z;qAa|1A47?d=h<~5>KCwDrrXddgp9JoTd>*J3 zh;VoKzHoop&dR`<`sw)T+H%y;;3SyGNTFo};~;1fJ!dKUz!vg&?p+ z9BB>KPxmOzF0Yapz#GAvx!d=oP;wIvkL8QAnPl|~zN-s<6iSs0QkZw?HN0=#f7B>{{kp2w* zNcg3V-%7c!gIkE*#mxv$N_lc)$|7l@^pNzN6cU$6OQZ%8;c4kYv5?~C0<~}F-v&B!y z`qYvHMx%#|W#jb!Iyk%7;5#PsU6 zCq788LLT zD2Bw)t>VoD%&{`a?i24ebYMZP&=%}z{#IfK3!;W@Bcn&_2tV> z^)T6;M*Z)o?#vaxROX6|N5zLlSeRt47!C2Ix#FLpKieep#rY=Nxnhv~>ECQ$d+AGM zh5Dre;L}oTO`UFne@5IkQW?8xjiJg|Dn9Rsn>Sj!=~3Q3Wy$o`T2BcH18 zh?KX)O+%Z8-V)yuH!{{V$MXAW`)OaOU#M?rKPup{ap--ryhH4(1?!`JP$#XImdPQ- z5w}{P)hkGZjR?^8joWfm{-g4v@|Eh$t6EERl~@^{iq|B8Pen6trM?m=R0sL%lh-GI z5Pz^(n*Fe3kBAy;+9PC@@MVa@VqsgaN>H0`MN0c*dt&zm;j2>Xeo((v-ImQmcKAA- z@~jQcT6n5ztwbb-Zahu3`nC7dR(DjPZ3oKc7DBB*t7A1XwPn%mIkT0@+0$j|b8E3& z&fofkTB|+i;aRtCU_BTI{dqZ`Pmpndr`)1_bt9c9Jtvo?<`H!&m8*fpf*iTI$OMdL zPA16k(TeZ5*l{s$Ce}KJ4-k zV|kE9*)?1b2NWD0Lt}X`%^qRK5RH2Ql{!*N3$q`-bc1;%Mara}8 zAQFWxu(o5>g9o#`GIeFj>8cYibK9>fmA{;zoSrZ$)2 zwep!IMjPogV|}`OzC5*!Y4T|KT(ST#=Nrw@^`){MUXOX0Ee^b4nIcaxmaF8elWe-J<5sqeyZKiqcbYw~8rdu|6W%Q4b^^`0 ztuIn8BD3qAX98y4bF;5h&gQSXU%9`^x$u~;HP>2mB>F3Qwd)ls@{3X*ICEI7Y2~wp zIa@m31mt{ckRiP>3A|E3+Nt&Pdcls{tXp>Zdag5c*=^k=Fh|$$2+q~zKBt$P#ja{S zyr0@vWvq^@j_j>g&*zcPBfT_lSS_{XDxaOm+43kAIYb?yo@hYlBsxw#yp4lYGt>^& z<>oJ{L=P=sHM7{&#z#;0Iix?7dgz>G|1sOGtK=zKfn)@C2)nR~pXW`FP6%(h-p z%{sH6cO7FYm`TWct|4>Xm6gi1iPu*a(QL*0SDMENXQ}luQC%CnQ{b6hoL#K_ZbRyc zpRbjq?~_mC_dWSAvNH0L=ESE_iqjRwPa)vUa!k4%hAysIG}X&-36Mkya8 zKi5ASz&o?b@NNvt@qUG<2$WBe$gt$X&$T}EB1D)Wqx#i zR3QjD&tLjqx<3Z%-^+mWTh*Gf*`aF7b&h(`RNOFH55=Na^h$L1Ma^s<^&+5C#2A8e zK-HS!yic{JheYe)fapOr!RkCJ>dw%pnZu(aqH*$bLLnB8ffuHQhno4vfvuX8lSYVbdkXK5F zwulg!&ehdb@2dCLw<8O%osokQT{eyZN5wmmb;lT$ZKro6d0_q6Mnrd1d}n=UeVbU1 znA_6BLkB*U&7S3Mb+EoRC_jhR5ypI1wXfl~{)hBM1o-X#p;8{oRPv;8J%C;lz~A?+ z=^ez2UN@kAbbS|zw@9Doz-IBy;+w=f#~;ui(3h8>XP>8#Gf?=J1e}tXo`f<;P9f)} zD5Vy-W_vrl7g*^hozBaO=T+b9Hzm$2K{a%pjb&pgGQlIisHLAk+s7J7ZfN|=g0_6G; zc8cv*2Rj)tXdId!5DOYpr$N*m9N=%A!uil@O^@N`Hmf9md<;0rSdWU0ik(&`Fn5|0 z=fuXwCdbZm)H&I^$@(R?NnR7XJm$1akLBYR#DGg;O3KVdv5ScGdPhH}SZ=bWbTJJ} z2f~jTB?Ys;-ChScf1EX45M$gEL++0OTKo~yta1Dda+rTGW`{*ApQH1_)xIt_t6Did z&WonRF;`t$*{o^qR(K!EwFxtqrB2OSc{*ojOuTYnhPIOlI7b_l%`>Z2U%I;DdDomR zZ;jW^&GFkj=PdJ@&kS`B$IJ0a;_>+BWT8A72Of|AlKCyuG2cG_R2+CE{#IQ)Ln6Fl zM557X9%QS^@Tp1vejHd8UlHFR-6`FL05hC<@deR-VIU-g1muLo&t!r17i3N~FW*XZ zbY8XE*PRlzvOxli|4d>W=qe3K?-NRqf2`DRrZv`1xY&-5|mq# z=Ow2kZb{lUD&K{wdF>U zdDTWGPpg?Dxp~&c`#Wi_b2b?Y%vchE*NP+n;q4lE$aCc zqB3}mQ=VC}oRb}uF&lHPce;1l-K$bfpR_Y$vH%~YPhGY3Nt@%b(dFyFytPfxSCD~z zfx#^q$kFn)>1_=yZ8NN`{#eHN?kTmrB|}ph@r*3u?pKKh+TSM)?FVbx59_*o9IJ8RV+xP?K}HD}km zNw5u&^84@AsYjCDparD-0$!-25w1?LGPN|9o$I+hb9?5j>{;2zk^r@#d020h@rKRr zSWk|hCDJ+0;v`_#)&5i^>Un4O&g^YuqWesWkxVg|)G60Lolosd7AJO2GlJP*wz+s; z*36i~n8G^7OoRMfgw^HuNY#h;EPds)Zf?~qA*m6m6YFFIi7|pivN9rd9Ep4~ z;l$Lbgu_!qQwqHxp4~ltcHykTQ7Jpk>0~yOjb+KHQuboUdHyPw^1sPen*S>YxLH4& zw`z_s?5US^?RGq}%K76HJw{nA%w_sH2eMh2S*6b?-l9tt$41`IZJxbK<@`K~OWFzmu8ayjYV4R$HL5HR*3WeZAtMu6?Ff8Z&_R zr?pwTX$IKX05$yFX-@ggGdw$xdE9!DncNsoKGnzuWU67OjFsK0p5M*v;Way3CAzUL z^P5#Fzr>gs%zX{>hh2~v#{s2_2(TVk#@jtEYOcxD$Bmi#uzr4F`Qr=)%g*Vud9OK( zE79#}nxCw;(yrNE%0}^?CA7SD?`(aPpEJzc;;0H{E%UM&kzE@c*EV~Zdo;Acl+JX9 zrCL*X9&0_EkaK2LVN1f+g-Ka6E!o~V#%npI?U{4q+;^Izt#%~8(fW~wshugIOBo06NFyMeGxep+r?Y4$3NQhoYRt`cUF8MS7Y zn3r4FmVbbFqnuAGiAinjSE@lWCy%^UaARf>sQnjg>S=vHVwoL9wk)hDU}+V}KTw3@ z2};nOrYNmE^?sUldF^|-dYD%%H!n{)z4NSPl}lkBuzGNt2dq;0Mfv);Bwrub^}}N{hWR2^M@ikw;ovHC)u9Ry{Eq z#0LhIh+M@e;uA|$6yFT;Fh#AY_I2A@kD|5JKIrwfRjc0nTfg<&doR4XrS~@ZWb)0* z+H0@9_S(~%+ul;Zx3xjOF7s@&Lz0foB454gBQ5zaIEvW3~N~Ab(2Wrv`p{;AaGWX5d+4*?dlrUmEzbz?TPp zLExQ%uOfHs>y8(Z`DMq81OHCos{_9@@XG@KZs1o0er4cYfnOE))q!6Vcu(MK0$&^W z^?~0I_&RdOzV3JvnO}D74SapzHwS)8;2Q$(3;edgHwJ!t;F|)!Bk;QdzdP_Pf!`bW z*1+!z{Qkfn2>ijoAIjWMJ3buvw#W1 z82Il3|9#+p2>g$M|0(c~0{?U1e+m4rfqxwMCxL&O8SVYI%xLe=0{=YlF9J7B`Nxh{ z;CX@Dfd>Q64?Gn3h`_^v7X%&&d}QE-ffofH4ZI}q(!k3CFAscF;G+YNXGYwP$&9$I z2)r`zv4M{Ze0<6e0Jb-0-qPS8+bbK;{tC9{P@662z-9vG_0|J z7+x6o*@5p1{D*8(PJ?uQUYJevaIXCH=1$jH_)#UKGs{yu>VVs> zpG-&0vSOAkIuk{Qc8Y$r(=z)5!Diu!lm;Phr*_NQB~ESez}$TEfpW6+$rri&NYjB2 zRu=L_razR%MiIkiDWVKE`;FPByf@g4I-7ojvhz&`akjcDPo;$_YcbDkev$_g`-QBs znJreCEmoN=KA9VFNBwJZ=czPw1pI;jRxerhGn@U)W;3(doHwkph+$#0w{SYqSvDz` z51Gv-nQ@*VBc3Dat?g#X0@$DGwPq*xC9?c1jJgUVw#F^$bjj8))-LLp51TJ-!Lqft zH+$`>)G}V>_Q{ihRhDw&w{C0--xxT}NZuPvS+o!Xb2EypGbn-A^LYLhIRWZ5LkX62cNEc=&5pC%$!zVNTvqL5-j4hy%O*LwpJds0cALwVS36{tB}+fE`Qz+m zk2$KAC95o1^^#>Lv)OTWJ82wt7~9$A1Y_p&9_LBRVe$6B>I>8N@!L%-t9r>zZ~kMO z-EPOmS`r@-xam`V^f9$wvg#$vCRsL}-A?8f?2~2RQQkxige(tx0Q5p4tnoUE)PzNQ&I};c~PW`Yx}$(Ar+)&e%B=JI1uL&g)GUlftuw zTWPjwPJ#|9Fh7ku9UD9Fd3`R6{}i(?#Iz4K-`HQ2BX7+Z<$;ku16cPd2e**hm62Db zPaW*F%nq3QF|q9p(#h`6U_vhx?xb;J+QfcO%so|+qwFaxi+q4@V0?WTHzq$d$i-34 zwilg;g8pn{Wzjm=1aAm+flFP5cO}ztIr{FjpuaKj>E!mjG@8<*vOF>@oRQ_=Gat^S z-c3PXxSvwtUdy=XZ>B!_6m>a!tO_3pa@Z+y?B@&jTJvULNBy$IIPbf_sH;7Im{Sg& z2K%VYt-u4|@nEO5)ofy$i~Mn(o8JyRKXBAJ0`g`X%5ttoZvQ%%^VqkWmT|U8mQ68j zifL19Hg{Z%vIrA@wOV6s+MG@a3|X7GDqduY#yl@Xo^kmI=D<hr8bokKid zjJI8fWe2i8M#>;K$3g7pCdj#uUE(@CA1z7Ee8?wzoR>1&9wzhbyHLF$6$SBKo zYCXmm*!vXyTiDh+mvhZpOW&{-Ku2u3H*TyPz$(W4Y?+Rj`B~(!Sr~Z*?x5@d@@JvR zZOmd$9xuEMbF$m?`8l)A&#vFH+$#+IQ>cUXqDA1+u2cU{Zp=@~>GOtM7V^`*F3QE6 zGmV?no`-s=Lq@%i3T4j-eCETs__Ha<3->X920u4br|olQ*6*YpPa-G&x;W^pYcu{_ zFD`K&wm5+CwQaoELQd{FBR;;*gq%Ez%j(Qaoe?)57mz!5Y13Ig zw1ZAy*u;Jn4~E?4`gB{OedNZL^py8D0>WsJcp_91@oYF-t%6TrEXfiixqFH{P%G$USH-Xd0 zMJ>v$?1=Qi4S1ldvNo2dom+(uVy%1-y#qPMOF~-~i>-~B=Yf|?K5qnZc)aT$Y4XgF zkDU+synE*}4i_532a5^W<>?|XJ8Cf(GoD2b`-RaLV60QPj`=?2HV@>o&aPu)tz~1G zWqX68#!vRTMy#&FQI4`jj$A5?TmoCYBUUdMx!oSIIVbbOW9m3?^1Mn$!RGy~X{2xx&P!1j$?!7u4gnpXp&3o?T2@M(hqG9CQUFTe& zaRfVsVFwKR7#A*gb~$W94*SpnW3G60y3fnPy?$osIQ#x2a;Guo{i}Z#(sQm-qxP+wSom+vUhH#38fYk>cL0ldvw>xd8Vh9fSt< z@vhVp$@u5qX2-7u{x5-_2Ua|HJU{UBT4sm8c09lG;tPgZ*WK+OHJQKNp4Unjr&Ul! zy*uGUON_Hy8@B~{T^92^RpeN^iyS;q_0u32Yqy`RzxYN$uoyrV(8vhpEpTP`&0SV(W4|Qvfj3n?<8G2fy6UfP;5#$3FFv!T z%RFDO@?P8Xl(N`YlzHRLE9W5h{>9vTS_l5n3teMF(qEkyoCw^+@uGgf*ftp0yRm*x8uxhmAXo+bSDV_U2K3o&_C^3+mgmsu_Du zWWG&m3~od>atw~Ib^CnBW1ZPI|M~o7qlHY*U%)-T=R65^av!bF`cMSpE8Cu#F*kXR zb792!l;F>)fj7{m`w50k=QD!*%pl*CEb-&$7DhEbW^V^jB9Dd0l<+{5Q!~PXR>Lb)!81ml? z^1_gxIb8Y_cOyCecBere^?MJOz7cR%02JRwQubr(h2g*;(w-)Uy4 z-=$s4bIgHT(pS&9py-o5&UU`rXlMC^bKiXzQ3v_fM@(K-avo#6>th^i&_Lxh2-ZFp z~b?CoVe-{2S;x;!Q? zto5GuqFnDpI_<@k&$yaC!?$iS_8gu_r@y4~#+QP%?vvGbr@pM}q~A3iWIEc2xuW!S zWzHK_=jG7Jd%sP?CU>6;_iZejylwX(xJxA4ekrq!aoi;q9o*fKb-yQ`ZjSh!8M65{ zQn;HI748J?Cfr37ecV43#u%S2W4tiOZ;@kMi+RQ@a;)P;j{Az~yjGHxdt}D&fXxHb zIVZ`K^ZmN%oMU*mO*Z*HTVag5!Z_1SCogO7Cx9huC|la7V>P5 z`!}-MRk`bBRhP;0x|v=Fn_A-@^LjAL_8MoOGs#vK^PzK}e9jwa2Qlv^V{h+7TKGn1 z&Vgb+_X+ZJ9qaO(EZj|1j^|d8;|vR(ZaV8t&{uoOYA;#sC95Bu;WNg_*?X&RnHNEh z`K`pWGUmEX!xjhktuW^<#>eI^Jnxt;=Y$^AlBj$sa&=_pq*Z_FT`?F2vZo>j4r^L}B>C#K^u2X``W z3i4+{4*NROi7{7iuXZY9pKwL6b5*s2aXphXXCnV+64&L4bjh2moGhP>M~xAGmwP

?hMIWdOR zCil$aZ-qbfANf!iak~L>%?VH22c~Z`iLCY-yPZ4I)8CeLI^^s#<RiM0((+uIl3XdTaI#>#}&t*4C{#hGh56_Rm|9Gt4o{RWEW?Wx0Ren)@32H-Rfgd ze3{MpV%FQUyII)uJRu`bbe9X8mK)L^h;rnK-!YbY@oYkj+$fCPSYvS>!uD^K?FUL* zFqTVMoYPJ2W4UnO-i6y*#@#b&S&Z3IS7nZ0>r=OL1^XAg%f_|j(+&0pwJvbhZ}qZG z%=5d!nv2L9ca<^czYaPYzgTyw&B9$t0Byk-G@Vh4 z9-g~l+?BRxw*6RUuAxPTV{>aBn;56X=W%1^7|M!Sw#f0ksqj>~^1ne}a}ilNL)P3v z)_me}PaWBJt~R01dFgrYgnj9gHFn7ABeFin zWSq+m-CTXU^j(zG2jgs$ESqG_)zYUAmG4MH`%xB&W$hxXy=0Advf^KP!u2V~I%Rg` z{|nv?=J?&0$Z9WH@p1O}80R)#_#Tv1-;w1HS$4>3qj4_#{99^Sviv8jUb6b#ojjAZ5 zWquplUm0uAw%lK2#mCv>gAL<0kI8J4R%Gtj^E;L0yuHolaLwo2lIC!HE(3Dd@y}?G z?Qb9iZ!+Gc7 z%zoiHsOaPDfHuPa#PfyAvF>0#w6>!zoJrpbX4xs*ODHCt^orZS$_=vepDZ7Ykz;7D z^pzWuV_Z+BH6O|LC(?6&4@^6I8nWzEmVCCyb(V74@43Fl8s+Mj%(l)tqhHWQvduGM z9}BN;b|1(-caw4MBgr`zz3QWo%Qv#xXzcCnCd$=b$ys*z9vu}GEGJO-v zj>+A(p6kdC^=-a|O>(rU&sAOZsce5=B41{GEmr?ht~Qd}J?^nx?rk*AZCw0mwZ+Dx z*v8`vKLfeqX`JIpx%%QYpRGD%$zSxjEbk`cT$Xa_y!`W7r<2H%zw`@LPL}*BcR?<0 z%~hSN?*?mpk(Cc*#fPl6kQExB!KD&DU7po7IcLzB>zMr@L_Z2QCnZO<@E*6GQhekxT@+bz5|TDpq+auZ}a)G*zQr61|GZ5?D%u! zcii4B{rGNUJG&uPw~%|xRkHf}BSHW6z+e9u%BoK*&oti*@*f7qyKvOE`;g`nGu!ez zito6+SLUmC2EHZmt%2Vk_{SfgD`kIXobz)e&E}lM`EEALv4$2oS$XK}d5H7fY;M;v zN7b^KX9xIRRP z_RiSZdh-2Xt+U2H&vX*y>WlNfRCUOjE68dKS$#q7dLFvm`(?qw?88Kw{(Z3QkTuVc zWz#tO<8ohv$g0cP?MvU+&w&wmIB5 z`+Vb{qb|jYtiHJNFHG)z@%F#0{*cv=WR)e$2eN!{x%+_ieS01+Sl_`~-~Ry2y7~$0 za>)m*#f5Lay;t%FXR+tF{;a*vb7|6hQ_Du?3ga$xXJT9U{YG}?wxGw2IUce0Nr%rC z3r{YCT=_;;OmJ^scJ?fOl4T!rLea0>wYg%a&lS!?w8{Iroug!z zgV)>pB$z9TZ--cX!53+KVtoV>+$2iyZ+V7yO z~}xsP$O=Eiq^ui7NbCRu(O!#CtZCsD4t$g=Np zw=aFp+t>efwNI9P@?|~7imb6omVIZpiH|Amga`PI=PAaF5AKJ5nYAWf^fy^&BJKZM zu<9kNEtO|nM=^iN52`*{`jsC_#vYR{+5MSFZ}?#?OIBI3?BDWt&{02<6$i5PoqfI{ zD<;k!6XTpSH~c+pDu!g$Wt??Z{sVNl#$h}fOP_4|7>{J#ExhR;O$RYYzmsM2x`(Qq zEcsLa339cIEP47LRX=c3`O4;rKZ1_>h^%^z;XmghmwQaevQJh%OP{&7_kY%U$|d&mvDei}luPG*KZcIlMV6mr^&Odh&Lm^EbAXJck1?c<`Z2SOQ)ll}m8Jiy zeo}o)KdpT8zg2(8@`o&c$cmw}$Bir>GMf+1?!%jZR_k*1vSh|&b*elO82jO`4l8GD ze$LE&dzF7J%b`;kd-ZS4NC(f>|JB%@&0UZEMz7??*thl;KI*%NO23d52eR6@><5QR z{$Ow4%l(c$Lt*R_3J-QNuTGWGu62-m|JvQM828EBz^WJTbr(6-dgDnOJ6LmNpU<;O zT~$7nF26DB@HtB_=Ns-eWpgHxm6KQ#CGXpM*UNiXvUIT5l}^X@lfAqik>xX4&uSGN3{>f+EIKXO9%6CX(MvD=#Zsjob9~(=UHb@L%z(`0x|vRxm#%>N zZ}u^FnNGIS}xXQm)c`PlcWe32t-%qJ< zuZ4Z`-aHnN|9f4hW8W*R$gu}D`K0AA_Nzq)d(^!p=da5;ysJ^!DSHmMH|KErI(4|8 zM}F?jaUg46B`eO@Kh&}Vjq2^E_s<7QpR61vuebc!=QCh3VOFnEKuJwL!NCXOhXit^;IyUS{7* zkot<%iX%~0`X5?Y?T{58vf4$aekWD#l5@LU?sGm_^*VdIjG@nVeRvV-Rl6!r*&UR8 zgXX=%WV;JO4oe={_V7VIu%PM`?zL`O0J*RA%}t|VoKNj@`ZtYL9=xt=Ws$dC`;h&9 zFveHMS#iT!us`P#S+PR?7dcsR!~8EhvvxK#=D5HaaKE>m=er`u7}+1jh~!KVUnnz1F@3kSpKD7K3${xO55F{DGZ)%S`Tk=k4A7sqN$RQ@eX5 z2XNnbz~i&p-amB4=lGDH2Xd_N9@hc)vt!>|@<7f9%+-?f^N+H_d^7o^cagVYo&Ti+E$D*w670J>kD^_IPE2iV1FW)N9G%aY~uofD8Nwq*HC4m#N% zj7{|y$L4`NjxYufzeQON)BL*%z4=vyQ`l|?3X?EeL?4v`GB0k zKHPLTMv(J!@CVGjitE_>X*WpUzDMFc)aUnp#C`8v&Ly&9?(8wg-Kp&0KJELNSKB_M z$dR{&k+)=vD|+%2gNgK{4rL9tb?J zpvnv5%(x)SI}N$+4d!yrEL{s7<^w*Xml>bAOY3sketOpJ-O@+bo^*HV7d#ifJHcn$ zx8T|9p`?8#_6CP&6Z6%oqrvJ6(dC}EnQiPC!%yx#cAo|xa{g?re&UnH z@Yz2@J@=^Ur*TXEP_D9gjww6*Z2O@+N8v11`B{~btBOhA&Q^zE<|pFZ5_Ns^TPe4_>*N5pXHHFey&H1yuFTPk@LF(Push2eZ(2^ zF6Hofy}c)l_x6i##@-e_rzrL@M#y^ax73AwML*{oo)+>R8*HEP89yuQL#OaW`r>0Cmp^3LH1^m|rhCqYj^{WsDt_#a|Bw9#~N1*kdc^cxNE9y)&?`>QwHv zmT!jKVmLC9*6i*Te=rsv%KJ?04IWB7o9t=ujuzz5w>{?}Fxz7ORK{JzLpk5b(#Lvj z`gy$;bG;T%H`LKNh^+XKWe5EzA6BQ zANY*3*xCi&cKhsnW?z?!4*FM&x%9DM&5a9tm|{y;fSCrqU(p zlWEQwUl&8irSC8DC#RRBmnHl6o#~6Fi9b^;vliN)#J^5~lf%irr|U9&)BI!7@%UCd zlj+*@wsdpv@c$F#roW0M&O({%)7#U{X@9yseJp)EeKwS@dK=Q=G>6mGDEG16aC#y9 zIV-&wWnT)P-rE~ZuY`B+>@7;WQO~u#iF6G-{cLYpdVN}nZ+HFT-pceQERf^9b?GEb zyeIcoq^I?US@$yN591Hofxq8|Kg)6p!S3HOw7}YXv;7CJ8m|y-M)RW`=OJ(>!x1`}T0ga!+gO%yR s-lp^<^wK5PZ}>CNoQiLNa#7E|-fdqYo8Nm0`t~JhNpme?b4~Am14GH|j{pDw literal 0 HcmV?d00001 diff --git a/go/mysql/icuregex/internal/icudata/word.brk b/go/mysql/icuregex/internal/icudata/word.brk new file mode 100644 index 0000000000000000000000000000000000000000..80460c6012812b2ac0702a64ffd82695b7c7ace3 GIT binary patch literal 22232 zcmeHPdvILUc|W^%ckgOfTD>ewtC!a=d8PGB*v3>ILqUyW@Dm)H48nF6X^~aSl98nv z*Es1+pi`P@OKa#K&9nn;rg$iA$b%-+4wFoQU^g{!#*=1@At5Chm=+p_Ev9WINx$zq z=iKwS_wKIj;UAiFrF+i#zTfve-#K^pTH{C9D=QW-Y*BX0%;8)DMzIKEx#8)fCuSzb z56$NC!~MDI)(&2uD^88&CX2JBnW5a?sfp3)vC_)ity80`i7`EUs5EnIb#D8_Xld$L zDc6?+;E`jw>3ef?$4W!FL$k9-ht{k)e*F0AIWD|K_wiRv-*==oY z3Fa57snu>*{c#DD#SJ%Nk_V_TUXGO7cYy zg_TWZU6y5TmPlHC;5@L#R6azVM(sLQM;0j8Y4e?Q-f6N zy;SScg5EDm!CAzAfNaIW7({zrYMsm0Gxs{-hZl?74N-eHc(F(XTawy9`vuRdz*>(& z71HX5?uIDr2HH`fyCDj@!PR$zkq@%m$F8US(ii53s?(ra4_sT@ind?G3FAXVVMn>N zqrwot+(>&4wjG^PwhnF2g~bwuUFp)U3_}1Do-nX);RjlT`F2Cp+zk;t8AX$oY}cvb zDb=Z2g;pg8)|j**dY47*Dm1K+j`gWuqdHwrLYk+t3ZvDl5bA>}s88sytgw6()`eCs zbX}s5Q_8x`sigyAF+FVsTYO*eXubW#r{Q;eeml`q2G&MCh zwS)`$IO>@6u_U2puRs9&(4JdTgXohC9-8PWJRMVfFZ&Rv10SCJ$6{l&ES0XTgH^VY zV3e`SI&3Sj4}+UO3QTAX>k_ZY4>HR0(TFLj)^PO$Fpx2s(-4FuBr0g##LOq6ES?@( z`6*{Zdjy^czs^pwe`ICj>WYj+4oChb@^S>}_0clxj_!y)82u-x(Tz@-9dgb%uQ?w$ z8)L^~UyJ=)tTVnZ{%HJSyg9Ku@rQ}?iL|@Teb{};jVC{r{A}|1WTfuqy3f>oqwbe= zH>N(FdM@?z)aLZ5^w-nxr#IBk)qkb_oqA<|QNzB5M;o4Rc&j0w*_wIJ@C#ADmpPkx zE0b?L)Oe=x)yC$g!KV9~o@n|}Q&02$=F`m=n}5-KW6P1bCH|7DTkELC;Q= z;Ou;LY4@^UT_08kZdi8TGJe|r>aKS0mzKTI`=e!xmlv1cw|u=HFQjZ}q+1S6HzywLW!w>S*c_xc)r#52;sDKmJgzSo&MAU%t%_uh_wMFkCOW zqj-OmXo~(?%v{rxE6h2{?*xr_J?VS_&^UT3R%SUqqs%Aj#@`>p@9Tya(%Vad%IAo~OY|!SU1{I$5#{8SPPBi0 zy3F=QF#GZBJw51tHvdNa9Q%{}!|q?^pUFS2`M@O*S!Vkp z-^^biulal=@_IfvMy~nY{1;-ZpW8>uNTi{k<8ih-k^|4V{+m)g{R90&gq1$(J>qwP zr#21ggYKEwZTt6x4%%180M*nwcbbI87nqP7ib?xT( z97u1EoJ&9HpvO1C>~!>O8drl~A3!yg*-!g_0roQkP%X{912Q&=G5a!GJz!Gg>`C}8 z!7tQ#Etjf))Te90-J;L+anOF1^4r;*7qj%T)?V7U?5nQYG;n#W+U>CBx(uF9kBhx` zyyXW_RX4KBu=+Yi#cNU({fc`Rd{Y~V+&Iu0m6)9aAMZXeaBbJU732?54U|GEL3(lj zz*o8}UvG4C>_Y>8T)`T~aZeRnDj%5dqi`-=iajy##Jtw*nSqhWb71G3X3NRQXy(_; zswuPcVlTYNzCZAjftST>Ny9Acz21iR2EH4(SAGZT;JfT4;NdUoI1lxMHB@G}tMT`j z0}TbdaP>zbiDtaNm2OXVBtM8g>eWJZzt5$63WY*A4rAHLuC#0l#UNr=#H{WheJU&F z=(QL>lO%OpAy^k?@2oe=%OQJC>A(%wj1 zAU)^AxqQBT0gcYXeYs}g9F5P@dyOD}6wS0`&jwwEyt+|3t3Jcpqbu`x7jgwY{{~-I zLg>M0oBBVYdTnIT{I}t>d!d)dVdt*ad63>9+PlkwHFhO8wDa?V-))r6^N#@?+Z&sU zJr;X5_PyBqv4;4n_>TCI_#edoHhw<-PCS{&C)Ol(B|hcID-V=uWBy0lOrp#_pZFqR zzMc3F>w25!^{$Bynyqw)J=CZ-lE^9d_uZ%5Zvy>w;<02~5_ucwzBf6Sd@T8F@Y=_!N3A*A`2|zPdS!@;Pg+Tl>>G={NQl*x%D!NAU5#8aQ31*721($7`{8 zJmF2ef$Jo3Ot_SjcIv&>kRnhfv?06E31w$UJWlt@wc4a8lt&^MUtq- z4^gYg!VyIX88u&HBemcy#0xFOgLX0vFo6sZ*SZA~Elj{vi8;dM08kAirN;#G0J!HS z$C}9)=ao3{nSNvqwS+f>AU6Xw=Ofm zlaT_#DBv)bWBfmEg=y!SbV>*NeNV?8PL#PD76d%^;RVs84zY5?F-E|i3&kqJi&=!=YAWEDkB<0vIt29{PCM`|s`@hl@C7X~R|97m`G8hIxz zsmO7sWF!V)$~ZE)=`0{xJK(7uk!A<&Xr>x?YvXUgl$?Zb+}iuu7Z{$ zdDzL46T1!AGV?ezSKYJAl^8WA_4?LWF?b~jSqMaMKr5MqG*Bi`Ej4aVUwQOLwT^kV zY)40yH_{=b1u3hSx3W=T)Y0L+`s9PRL}-b&<;;=Yc&ckjo-hN9;O{Y(q4b8ggHZh4kF3WuWJ`%2+O~-gnq};W`k4 zBkX911ZbQ@EDn?<#JO3ejiUpzrxC@;sj6S&g$`+n)P9lYYbahYOtEmZi8E(Xil}l@ zsSg$pm7ub!w(PdFjko!=&1FJmUz=3W%DLg9Z!vq_-5=h&sP)@nS~C>{49;l9X&UG{j=@=N#{(kXlhAeb!A9{j)o2ye>ONu`($Lmmkh-sR1K&S`E{`zD20leiHd9uu)0f$Xm7A;ndQ&Yp$stcFnZhOAN zphm|mn3s~w0yV5SvJ_O9>ONhid#D|Qq?^}K*pkL4S94dc&sUl|C8VHMMReF{L!nY8x22K#|VSR)G~N)2VwAj;f8L>#96< zk=@zW6^iPTjiC^J%TD6ef|$XA`%20=jo6dq)uKfN7Jb(s(Gl1 zLxO7-^ML$J<9DS?EkCE}C_d&2fQ#QFv@hPC;(&qFqGeg2B!_zCh978X1t1C-wCa;k zTVTzOn_d`buX+`qIOaaBWZE#Q)*eJW_pyeRex3yLg@rMLcOS;QhKctco3Qv=JM^Cv z2%!Z;{?l@3TaEMOBB@?1pb(cylD($YHs>!mRwXpVFN7wQnjrI05Y zEe(yWlM-xrUrs;}y_f2^0kQ;=pi-T&JZSC@j>#svtNr^uk-1MGtL$)h(H4xzC5k|d zNE6yr#>fLwt|y!0Jq<<@NTG_ucvJ2e@4`(>^rF5)h9#DS)4FJ)!&%B2_ZO>)kH}{L$)o@kMTAvf7Qv*G zS?F;c2mU`EqbU}C0n?k}zzFxHBydOY4isUU+@>kGLKDp=BJf}_U=+j~Z=&dc1mcn; zd0O%CG7_p$IX4L-Y1=L}Zbe8?zR)oGA=P7#Q;ba9Xig3yg>&IfAPy%PAxJ<;KsFHQ z-PZ!+^_)IKTk!qPYd9CvcB}PxAU=>dm}v@{Wc&=9{C{Au9~1MlqJIR4_7=QY+AOpIJJ3X2<}7sH!T~!WAn(} zM^D~4Ju`OjmYGuV@aEw=P62)U7Ms3(3(|L%j!#ZZm29RUBjRKC&z7dfYyhX8T7}jD zbl07?*$5z^_#LJ3iRr20hcinBJBQ<1-|I9ohioGRJ~ zNk`1q(mgY!;|I5wW+7oV&|o6^=3ty4BVy0Q)c9oS;O)2!wmO}M*x#6*yC)2*^ANjZ zVr=K!5ioBn^%w#b>>;2yL+J`ugnTemnwc220fLMOzO=y(={&^Vxu-O~=V)=%mky7i zG(zR>o*3mZay^1FJw8)BdT3%4p<5?risQ7bLvn~^ed}0BqR4Y?j#9(ZQ?oPElf?Qs zDk~i+9UPvVI|l16w*iNcjpEe!+~l4UlaobQA`qG@j;>w1>h8I*jm34O?a0x|(*3Bk zbU*kzv>kSc*i6&JIw*MP+BWbOet&af^9XJRF9NSZ0Rxpki@RXv#PDs~0O!e|gyxF@ z1>7(cCp}!mC+VhEj3oimLp)0 zFQV9~cm(XKogD%k3|S z>$wh5R0dHiA)TI=dBm(fU>+g;99R*usUlY@@V4v&YV$u}oG-Q53eSe2ssc1) for quick FCC boundary-after tests. + deltaTccc0 = 0 + deltaTccc1 = 2 + deltaTcccGt1 = 4 + deltaTcccMask = 6 + deltaShift = 3 + + maxDelta = 0x40 +) + +const ( + jamoLBase rune = 0x1100 /* "lead" jamo */ + jamoLEnd rune = 0x1112 + jamoVBase rune = 0x1161 /* "vowel" jamo */ + jamoVEnd rune = 0x1175 + jamoTBase rune = 0x11a7 /* "trail" jamo */ + jamoTEnd rune = 0x11c2 + + hangulBase rune = 0xac00 + hangulEnd rune = 0xd7a3 + + jamoLCount rune = 19 + jamoVCount rune = 21 + jamoTCount rune = 28 + + hangulCount = jamoLCount * jamoVCount * jamoTCount + hangulLimit = hangulBase + hangulCount +) + +const ( + mappingHasCccLcccWord = 0x80 + mappingHasRawMapping = 0x40 + // unused bit 0x20, + mappingLengthMask = 0x1f +) + +/** + * Constants for normalization modes. + * @deprecated ICU 56 Use unorm2.h instead. + */ +type Mode int32 + +const ( + /** No decomposition/composition. @deprecated ICU 56 Use unorm2.h instead. */ + NormNone Mode = 1 + /** Canonical decomposition. @deprecated ICU 56 Use unorm2.h instead. */ + NormNfd Mode = 2 + /** Compatibility decomposition. @deprecated ICU 56 Use unorm2.h instead. */ + NormNfkd Mode = 3 + /** Canonical decomposition followed by canonical composition. @deprecated ICU 56 Use unorm2.h instead. */ + NormNfc Mode = 4 + /** Default normalization. @deprecated ICU 56 Use unorm2.h instead. */ + NormDefault Mode = NormNfc + /** Compatibility decomposition followed by canonical composition. @deprecated ICU 56 Use unorm2.h instead. */ + NormNfkc Mode = 5 + /** "Fast C or D" form. @deprecated ICU 56 Use unorm2.h instead. */ + NormFcd Mode = 6 +) + +/** + * Result values for normalization quick check functions. + * For details see http://www.unicode.org/reports/tr15/#Detecting_Normalization_Forms + * @stable ICU 2.0 + */ +type CheckResult int + +const ( + /** + * The input string is not in the normalization form. + * @stable ICU 2.0 + */ + No CheckResult = iota + /** + * The input string is in the normalization form. + * @stable ICU 2.0 + */ + Yes + /** + * The input string may or may not be in the normalization form. + * This value is only returned for composition forms like NFC and FCC, + * when a backward-combining character is found for which the surrounding text + * would have to be analyzed further. + * @stable ICU 2.0 + */ + Maybe +) diff --git a/go/mysql/icuregex/internal/normalizer/normalizer.go b/go/mysql/icuregex/internal/normalizer/normalizer.go new file mode 100644 index 00000000000..c13a4878deb --- /dev/null +++ b/go/mysql/icuregex/internal/normalizer/normalizer.go @@ -0,0 +1,482 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package normalizer + +import ( + "errors" + "sync" + + "vitess.io/vitess/go/mysql/icuregex/internal/icudata" + "vitess.io/vitess/go/mysql/icuregex/internal/udata" + "vitess.io/vitess/go/mysql/icuregex/internal/uset" + "vitess.io/vitess/go/mysql/icuregex/internal/utf16" + "vitess.io/vitess/go/mysql/icuregex/internal/utrie" +) + +type Normalizer struct { + minDecompNoCP rune + minCompNoMaybeCP rune + minLcccCP rune + + // Norm16 value thresholds for quick check combinations and types of extra data. + minYesNo uint16 + minYesNoMappingsOnly uint16 + minNoNo uint16 + minNoNoCompBoundaryBefore uint16 + minNoNoCompNoMaybeCC uint16 + minNoNoEmpty uint16 + limitNoNo uint16 + centerNoNoDelta uint16 + minMaybeYes uint16 + + normTrie *utrie.UcpTrie + + maybeYesCompositions []uint16 + extraData []uint16 // mappings and/or compositions for yesYes, yesNo & noNo characters + smallFCD []uint8 // [0x100] one bit per 32 BMP code points, set if any FCD!=0 +} + +var nfc *Normalizer +var nfkc *Normalizer + +var normalizerOnce sync.Once + +func loadNormalizer() { + normalizerOnce.Do(func() { + nfc = &Normalizer{} + if err := nfc.load(icudata.Nfc); err != nil { + panic(err) + } + + nfkc = &Normalizer{} + if err := nfkc.load(icudata.Nfkc); err != nil { + panic(err) + } + }) +} + +const ixNormTrieOffset = 0 +const ixExtraDataOffset = 1 +const ixSmallFcdOffset = 2 +const ixReserved3Offset = 3 +const ixTotalSize = 7 + +const ixMinDecompNoCp = 8 +const ixMinCompNoMaybeCp = 9 + +/** Mappings & compositions in [minYesNo..minYesNoMappingsOnly[. */ +const ixMinYesNo = 10 + +/** Mappings are comp-normalized. */ +const ixMinNoNo = 11 +const ixLimitNoNo = 12 +const ixMinMaybeYes = 13 + +/** Mappings only in [minYesNoMappingsOnly..minNoNo[. */ +const ixMinYesNoMappingsOnly = 14 + +/** Mappings are not comp-normalized but have a comp boundary before. */ +const ixMinNoNoCompBoundaryBefore = 15 + +/** Mappings do not have a comp boundary before. */ +const ixMinNoNoCompNoMaybeCc = 16 + +/** Mappings to the empty string. */ +const ixMinNoNoEmpty = 17 + +const ixMinLcccCp = 18 +const ixCount = 20 + +func (n *Normalizer) load(data []byte) error { + bytes := udata.NewBytes(data) + + err := bytes.ReadHeader(func(info *udata.DataInfo) bool { + return info.Size >= 20 && + info.IsBigEndian == 0 && + info.CharsetFamily == 0 && + info.DataFormat[0] == 0x4e && /* dataFormat="unam" */ + info.DataFormat[1] == 0x72 && + info.DataFormat[2] == 0x6d && + info.DataFormat[3] == 0x32 && + info.FormatVersion[0] == 4 + }) + if err != nil { + return err + } + + indexesLength := int32(bytes.Uint32()) / 4 + if indexesLength <= ixMinLcccCp { + return errors.New("normalizer2 data: not enough indexes") + } + indexes := make([]int32, indexesLength) + indexes[0] = indexesLength * 4 + for i := int32(1); i < indexesLength; i++ { + indexes[i] = bytes.Int32() + } + + n.minDecompNoCP = indexes[ixMinDecompNoCp] + n.minCompNoMaybeCP = indexes[ixMinCompNoMaybeCp] + n.minLcccCP = indexes[ixMinLcccCp] + + n.minYesNo = uint16(indexes[ixMinYesNo]) + n.minYesNoMappingsOnly = uint16(indexes[ixMinYesNoMappingsOnly]) + n.minNoNo = uint16(indexes[ixMinNoNo]) + n.minNoNoCompBoundaryBefore = uint16(indexes[ixMinNoNoCompBoundaryBefore]) + n.minNoNoCompNoMaybeCC = uint16(indexes[ixMinNoNoCompNoMaybeCc]) + n.minNoNoEmpty = uint16(indexes[ixMinNoNoEmpty]) + n.limitNoNo = uint16(indexes[ixLimitNoNo]) + n.minMaybeYes = uint16(indexes[ixMinMaybeYes]) + + n.centerNoNoDelta = uint16(indexes[ixMinMaybeYes]>>deltaShift) - maxDelta - 1 + + offset := indexes[ixNormTrieOffset] + nextOffset := indexes[ixExtraDataOffset] + triePosition := bytes.Position() + + n.normTrie, err = utrie.UcpTrieFromBytes(bytes) + if err != nil { + return err + } + + trieLength := bytes.Position() - triePosition + if trieLength > nextOffset-offset { + return errors.New("normalizer2 data: not enough bytes for normTrie") + } + bytes.Skip((nextOffset - offset) - trieLength) // skip padding after trie bytes + + // Read the composition and mapping data. + offset = nextOffset + nextOffset = indexes[ixSmallFcdOffset] + numChars := (nextOffset - offset) / 2 + if numChars != 0 { + n.maybeYesCompositions = bytes.Uint16Slice(numChars) + n.extraData = n.maybeYesCompositions[((minNormalMaybeYes - n.minMaybeYes) >> offsetShift):] + } + + // smallFCD: new in formatVersion 2 + n.smallFCD = bytes.Uint8Slice(0x100) + return nil +} + +func Nfc() *Normalizer { + loadNormalizer() + return nfc +} + +func Nfkc() *Normalizer { + loadNormalizer() + return nfkc +} + +func (n *Normalizer) AddPropertyStarts(u *uset.UnicodeSet) { + var start, end rune + var value uint32 + for { + end, value = nfc.normTrie.GetRange(start, utrie.UcpMapRangeFixedLeadSurrogates, inert, nil) + if end < 0 { + break + } + u.AddRune(start) + if start != end && n.isAlgorithmicNoNo(uint16(value)) && (value&deltaTcccMask) > deltaTccc1 { + // Range of code points with same-norm16-value algorithmic decompositions. + // They might have different non-zero FCD16 values. + prevFCD16 := n.GetFCD16(start) + for { + start++ + if start > end { + break + } + fcd16 := n.GetFCD16(start) + if fcd16 != prevFCD16 { + u.AddRune(start) + prevFCD16 = fcd16 + } + } + } + start = end + 1 + } + + // add Hangul LV syllables and LV+1 because of skippables + for c := hangulBase; c < hangulLimit; c += jamoTCount { + u.AddRune(c) + u.AddRune(c + 1) + } + u.AddRune(hangulLimit) +} + +func (n *Normalizer) isAlgorithmicNoNo(norm16 uint16) bool { + return n.limitNoNo <= norm16 && norm16 < n.minMaybeYes +} + +func (n *Normalizer) GetFCD16(c rune) uint16 { + if c < n.minDecompNoCP { + return 0 + } else if c <= 0xffff { + if !n.singleLeadMightHaveNonZeroFCD16(c) { + return 0 + } + } + return n.getFCD16FromNormData(c) +} + +func (n *Normalizer) singleLeadMightHaveNonZeroFCD16(lead rune) bool { + // 0<=lead<=0xffff + bits := n.smallFCD[lead>>8] + if bits == 0 { + return false + } + return ((bits >> ((lead >> 5) & 7)) & 1) != 0 +} + +func (n *Normalizer) getFCD16FromNormData(c rune) uint16 { + norm16 := n.getNorm16(c) + if norm16 >= n.limitNoNo { + if norm16 >= minNormalMaybeYes { + // combining mark + norm16 = uint16(n.getCCFromNormalYesOrMaybe(norm16)) + return norm16 | (norm16 << 8) + } else if norm16 >= n.minMaybeYes { + return 0 + } else { // isDecompNoAlgorithmic(norm16) + deltaTrailCC := norm16 & deltaTcccMask + if deltaTrailCC <= deltaTccc1 { + return deltaTrailCC >> offsetShift + } + // Maps to an isCompYesAndZeroCC. + c = n.mapAlgorithmic(c, norm16) + norm16 = n.getRawNorm16(c) + } + } + + if norm16 <= n.minYesNo || n.isHangulLVT(norm16) { + // no decomposition or Hangul syllable, all zeros + return 0 + } + // c decomposes, get everything from the variable-length extra data + mapping := n.getMapping(norm16) + firstUnit := mapping[1] + if firstUnit&mappingHasCccLcccWord != 0 { + norm16 |= mapping[0] & 0xff00 + } + return norm16 +} + +func (n *Normalizer) getMapping(norm16 uint16) []uint16 { + return n.extraData[(norm16>>offsetShift)-1:] +} + +func (n *Normalizer) getNorm16(c rune) uint16 { + if utf16.IsLead(c) { + return inert + } + return n.getRawNorm16(c) +} + +func (n *Normalizer) getRawNorm16(c rune) uint16 { + return uint16(n.normTrie.Get(c)) +} + +func (n *Normalizer) getCCFromNormalYesOrMaybe(norm16 uint16) uint8 { + return uint8(norm16 >> offsetShift) +} + +func (n *Normalizer) mapAlgorithmic(c rune, norm16 uint16) rune { + return c + rune(norm16>>deltaShift) - rune(n.centerNoNoDelta) +} + +func (n *Normalizer) isHangulLV(norm16 uint16) bool { + return norm16 == n.minYesNo +} + +func (n *Normalizer) isHangulLVT(norm16 uint16) bool { + return norm16 == n.hangulLVT() +} + +func (n *Normalizer) hangulLVT() uint16 { + return n.minYesNoMappingsOnly | hasCompBoundaryAfter +} + +func (n *Normalizer) getComposeQuickCheck(c rune) CheckResult { + return n.getCompQuickCheck(n.getNorm16(c)) +} + +func (n *Normalizer) getDecomposeQuickCheck(c rune) CheckResult { + if n.isDecompYes(n.getNorm16(c)) { + return Yes + } + return No +} + +func QuickCheck(c rune, mode Mode) CheckResult { + if mode <= NormNone || NormFcd <= mode { + return Yes + } + switch mode { + case NormNfc: + return Nfc().getComposeQuickCheck(c) + case NormNfd: + return Nfc().getDecomposeQuickCheck(c) + case NormNfkc: + return Nfkc().getComposeQuickCheck(c) + case NormNfkd: + return Nfkc().getDecomposeQuickCheck(c) + default: + return Maybe + } +} + +func IsInert(c rune, mode Mode) bool { + switch mode { + case NormNfc: + return Nfc().isCompInert(c) + case NormNfd: + return Nfc().isDecompInert(c) + case NormNfkc: + return Nfkc().isCompInert(c) + case NormNfkd: + return Nfkc().isDecompInert(c) + default: + return true + } +} + +func (n *Normalizer) isDecompYes(norm16 uint16) bool { + return norm16 < n.minYesNo || n.minMaybeYes <= norm16 +} + +func (n *Normalizer) getCompQuickCheck(norm16 uint16) CheckResult { + if norm16 < n.minNoNo || minYesYesWithCC <= norm16 { + return Yes + } else if n.minMaybeYes <= norm16 { + return Maybe + } else { + return No + } +} + +func (n *Normalizer) isMaybeOrNonZeroCC(norm16 uint16) bool { + return norm16 >= n.minMaybeYes +} + +func (n *Normalizer) isDecompNoAlgorithmic(norm16 uint16) bool { + return norm16 >= n.limitNoNo +} + +func (n *Normalizer) IsCompNo(c rune) bool { + norm16 := n.getNorm16(c) + return n.minNoNo <= norm16 && norm16 < n.minMaybeYes +} + +func (n *Normalizer) Decompose(c rune) []rune { + norm16 := n.getNorm16(c) + if c < n.minDecompNoCP || n.isMaybeOrNonZeroCC(norm16) { + // c does not decompose + return nil + } + var decomp []rune + + if n.isDecompNoAlgorithmic(norm16) { + // Maps to an isCompYesAndZeroCC. + c = n.mapAlgorithmic(c, norm16) + decomp = append(decomp, c) + // The mapping might decompose further. + norm16 = n.getRawNorm16(c) + } + if norm16 < n.minYesNo { + return decomp + } else if n.isHangulLV(norm16) || n.isHangulLVT(norm16) { + // Hangul syllable: decompose algorithmically + parts := hangulDecompose(c) + for len(parts) > 0 { + c = rune(parts[0]) + decomp = append(decomp, c) + parts = parts[1:] + } + return decomp + } + // c decomposes, get everything from the variable-length extra data + mapping := n.getMapping(norm16) + length := mapping[1] & mappingLengthMask + mapping = mapping[2 : 2+length] + + for len(mapping) > 0 { + c, mapping = utf16.NextUnsafe(mapping) + decomp = append(decomp, c) + } + + return decomp +} + +func hangulDecompose(c rune) []uint16 { + c -= hangulBase + c2 := c % jamoTCount + c /= jamoTCount + var buffer []uint16 + buffer = append(buffer, uint16(jamoLBase+c/jamoVCount)) + buffer = append(buffer, uint16(jamoVBase+c%jamoVCount)) + if c2 != 0 { + buffer = append(buffer, uint16(jamoTBase+c2)) + } + return buffer +} + +func (n *Normalizer) isCompInert(c rune) bool { + norm16 := n.getNorm16(c) + return n.isCompYesAndZeroCC(norm16) && (norm16&hasCompBoundaryAfter) != 0 +} + +func (n *Normalizer) isDecompInert(c rune) bool { + return n.isDecompYesAndZeroCC(n.getNorm16(c)) +} + +func (n *Normalizer) isCompYesAndZeroCC(norm16 uint16) bool { + return norm16 < n.minNoNo +} + +func (n *Normalizer) isDecompYesAndZeroCC(norm16 uint16) bool { + return norm16 < n.minYesNo || + norm16 == jamoVt || + (n.minMaybeYes <= norm16 && norm16 <= minNormalMaybeYes) +} + +func (n *Normalizer) CombiningClass(c rune) uint8 { + return n.getCC(n.getNorm16(c)) +} + +func (n *Normalizer) getCC(norm16 uint16) uint8 { + if norm16 >= minNormalMaybeYes { + return n.getCCFromNormalYesOrMaybe(norm16) + } + if norm16 < n.minNoNo || n.limitNoNo <= norm16 { + return 0 + } + return n.getCCFromNoNo(norm16) + +} + +func (n *Normalizer) getCCFromNoNo(norm16 uint16) uint8 { + mapping := n.getMapping(norm16) + if mapping[1]&mappingHasCccLcccWord != 0 { + return uint8(mapping[0]) + } + return 0 +} diff --git a/go/mysql/icuregex/internal/pattern/unescape.go b/go/mysql/icuregex/internal/pattern/unescape.go new file mode 100644 index 00000000000..e4a554ff612 --- /dev/null +++ b/go/mysql/icuregex/internal/pattern/unescape.go @@ -0,0 +1,314 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pattern + +import ( + "strings" + "unicode/utf8" + + "vitess.io/vitess/go/mysql/icuregex/internal/utf16" +) + +/* Convert one octal digit to a numeric value 0..7, or -1 on failure */ +func _digit8(c rune) rune { + if c >= 0x0030 && c <= 0x0037 { + return (c - 0x0030) + } + return -1 +} + +/* Convert one hex digit to a numeric value 0..F, or -1 on failure */ +func _digit16(c rune) rune { + if c >= 0x0030 && c <= 0x0039 { + return (c - 0x0030) + } + if c >= 0x0041 && c <= 0x0046 { + return (c - (0x0041 - 10)) + } + if c >= 0x0061 && c <= 0x0066 { + return (c - (0x0061 - 10)) + } + return -1 +} + +var unscapeMap = []byte{ + /*" 0x22, 0x22 */ + /*' 0x27, 0x27 */ + /*? 0x3F, 0x3F */ + /*\ 0x5C, 0x5C */ + /*a*/ 0x61, 0x07, + /*b*/ 0x62, 0x08, + /*e*/ 0x65, 0x1b, + /*f*/ 0x66, 0x0c, + /*n*/ 0x6E, 0x0a, + /*r*/ 0x72, 0x0d, + /*t*/ 0x74, 0x09, + /*v*/ 0x76, 0x0b, +} + +func Unescape(str string) (string, bool) { + var idx int + if idx = strings.IndexByte(str, '\\'); idx < 0 { + return str, true + } + + var result strings.Builder + result.WriteString(str[:idx]) + str = str[idx:] + + for len(str) > 0 { + if str[0] == '\\' { + var r rune + r, str = UnescapeAt(str[1:]) + if r < 0 { + return "", false + } + result.WriteRune(r) + } else { + result.WriteByte(str[0]) + str = str[1:] + } + } + return result.String(), true +} + +func UnescapeAt(str string) (rune, string) { + c, w := utf8.DecodeRuneInString(str) + str = str[w:] + if c == utf8.RuneError && (w == 0 || w == 1) { + return -1, str + } + + var minDig, maxDig, n int + var braces bool + var bitsPerDigit = 4 + var result rune + + switch c { + case 'u': + minDig = 4 + maxDig = 4 + case 'U': + minDig = 8 + maxDig = 8 + case 'x': + minDig = 1 + if len(str) > 0 && str[0] == '{' { + str = str[1:] + braces = true + maxDig = 8 + } else { + maxDig = 2 + } + default: + if dig := _digit8(c); dig >= 0 { + minDig = 1 + maxDig = 4 + n = 1 + bitsPerDigit = 3 + result = dig + } + } + + if minDig != 0 { + for n < maxDig && len(str) > 0 { + c, w = utf8.DecodeRuneInString(str) + if c == utf8.RuneError && w == 1 { + return -1, str + } + + var dig rune + if bitsPerDigit == 3 { + dig = _digit8(c) + } else { + dig = _digit16(c) + } + if dig < 0 { + break + } + result = (result << bitsPerDigit) | dig + str = str[w:] + n++ + } + if n < minDig { + return -1, str + } + if braces { + if c != '}' { + return -1, str + } + str = str[1:] + } + if result < 0 || result > utf8.MaxRune { + return -1, str + } + if len(str) > 0 && utf16.IsLead(result) { + c, w = utf8.DecodeRuneInString(str) + if c == utf8.RuneError && (w == 0 || w == 1) { + return -1, str + } + if c == '\\' { + var str2 string + c, str2 = UnescapeAt(str[1:]) + if utf16.IsTrail(c) { + result = utf16.DecodeRune(result, c) + str = str2 + } + } + } + return result, str + } + + if c < utf8.RuneSelf { + for i := 0; i < len(unscapeMap); i += 2 { + if byte(c) == unscapeMap[i] { + return rune(unscapeMap[i+1]), str + } + if byte(c) < unscapeMap[i] { + break + } + } + } + + if c == 'c' && len(str) > 0 { + c, w = utf8.DecodeRuneInString(str) + if c == utf8.RuneError && (w == 0 || w == 1) { + return -1, str + } + return 0x1f & c, str[w:] + } + + return c, str +} + +func UnescapeAtRunes(str []rune) (rune, []rune) { + if len(str) == 0 { + return -1, str + } + + c := str[0] + str = str[1:] + if c == utf8.RuneError { + return -1, str + } + + var minDig, maxDig, n int + var braces bool + var bitsPerDigit = 4 + var result rune + + switch c { + case 'u': + minDig = 4 + maxDig = 4 + case 'U': + minDig = 8 + maxDig = 8 + case 'x': + minDig = 1 + if len(str) > 0 && str[0] == '{' { + str = str[1:] + braces = true + maxDig = 8 + } else { + maxDig = 2 + } + default: + if dig := _digit8(c); dig >= 0 { + minDig = 1 + maxDig = 4 + n = 1 + bitsPerDigit = 3 + result = dig + } + } + + if minDig != 0 { + for n < maxDig && len(str) > 0 { + c = str[0] + if c == utf8.RuneError { + return -1, str + } + + var dig rune + if bitsPerDigit == 3 { + dig = _digit8(c) + } else { + dig = _digit16(c) + } + if dig < 0 { + break + } + result = (result << bitsPerDigit) | dig + str = str[1:] + n++ + } + if n < minDig { + return -1, str + } + if braces { + if c != '}' { + return -1, str + } + str = str[1:] + } + if result < 0 || result > utf8.MaxRune { + return -1, str + } + if len(str) > 0 && utf16.IsLead(result) { + c = str[0] + if c == utf8.RuneError { + return -1, str + } + if c == '\\' { + var str2 []rune + c, str2 = UnescapeAtRunes(str[1:]) + if utf16.IsTrail(c) { + result = utf16.DecodeRune(result, c) + str = str2 + } + } + } + return result, str + } + + if c < utf8.RuneSelf { + for i := 0; i < len(unscapeMap); i += 2 { + if byte(c) == unscapeMap[i] { + return rune(unscapeMap[i+1]), str + } + if byte(c) < unscapeMap[i] { + break + } + } + } + + if c == 'c' && len(str) > 0 { + c = str[0] + if c == utf8.RuneError { + return -1, str + } + return 0x1f & c, str[1:] + } + + return c, str +} diff --git a/go/mysql/icuregex/internal/pattern/unescape_test.go b/go/mysql/icuregex/internal/pattern/unescape_test.go new file mode 100644 index 00000000000..0bb76c2bfdb --- /dev/null +++ b/go/mysql/icuregex/internal/pattern/unescape_test.go @@ -0,0 +1,48 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pattern + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestUnescapeAt(t *testing.T) { + r, str := UnescapeAt("ud800\\ud800\\udc00") + assert.Equal(t, rune(0xd800), r) + assert.Equal(t, "\\ud800\\udc00", str) + + r, str = UnescapeAt(str[1:]) + assert.Equal(t, rune(0x00010000), r) + assert.Equal(t, "", str) +} + +func TestUnescapeAtRunes(t *testing.T) { + r, str := UnescapeAtRunes([]rune("ud800\\ud800\\udc00")) + assert.Equal(t, rune(0xd800), r) + assert.Equal(t, []rune("\\ud800\\udc00"), str) + + r, str = UnescapeAtRunes(str[1:]) + assert.Equal(t, rune(0x00010000), r) + assert.Equal(t, []rune(""), str) +} diff --git a/go/mysql/icuregex/internal/pattern/utils.go b/go/mysql/icuregex/internal/pattern/utils.go new file mode 100644 index 00000000000..4dcf55e9f42 --- /dev/null +++ b/go/mysql/icuregex/internal/pattern/utils.go @@ -0,0 +1,111 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pattern + +import ( + "strings" + "unicode/utf8" +) + +var patternPropsLatin1 = [256]uint8{ + // WS: 9..D + 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 5, 5, 5, 5, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + // WS: 20 Syntax: 21..2F + 5, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + // Syntax: 3A..40 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, + 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + // Syntax: 5B..5E + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, + // Syntax: 60 + 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + // Syntax: 7B..7E + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, + // WS: 85 + 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + // Syntax: A1..A7, A9, AB, AC, AE + 0, 3, 3, 3, 3, 3, 3, 3, 0, 3, 0, 3, 3, 0, 3, 0, + // Syntax: B0, B1, B6, BB, BF + 3, 3, 0, 0, 0, 0, 3, 0, 0, 0, 0, 3, 0, 0, 0, 3, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + // Syntax: D7 + 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + // Syntax: F7 + 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, +} + +func IsWhitespace(c rune) bool { + if c < 0 { + return false + } else if c <= 0xff { + return (patternPropsLatin1[c]>>2)&1 != 0 + } else if 0x200e <= c && c <= 0x2029 { + return c <= 0x200f || 0x2028 <= c + } else { + return false + } +} + +func SkipWhitespace(str string) string { + for { + r, w := utf8.DecodeRuneInString(str) + if r == utf8.RuneError && (w == 0 || w == 1) { + return str[w:] + } + if !IsWhitespace(r) { + return str + } + str = str[w:] + } +} + +func IsUnprintable(c rune) bool { + return !(c >= 0x20 && c <= 0x7E) +} + +// "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" +var digits = [...]byte{ + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, + 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, + 85, 86, 87, 88, 89, 90, +} + +func EscapeUnprintable(w *strings.Builder, c rune) { + w.WriteByte('\\') + if (c & ^0xFFFF) != 0 { + w.WriteByte('U') + w.WriteByte(digits[0xF&(c>>28)]) + w.WriteByte(digits[0xF&(c>>24)]) + w.WriteByte(digits[0xF&(c>>20)]) + w.WriteByte(digits[0xF&(c>>16)]) + } else { + w.WriteByte('u') + } + w.WriteByte(digits[0xF&(c>>12)]) + w.WriteByte(digits[0xF&(c>>8)]) + w.WriteByte(digits[0xF&(c>>4)]) + w.WriteByte(digits[0xF&c]) +} diff --git a/go/mysql/icuregex/internal/ubidi/loader.go b/go/mysql/icuregex/internal/ubidi/loader.go new file mode 100644 index 00000000000..e30ca402f81 --- /dev/null +++ b/go/mysql/icuregex/internal/ubidi/loader.go @@ -0,0 +1,125 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ubidi + +import ( + "errors" + "sync" + + "vitess.io/vitess/go/mysql/icuregex/internal/icudata" + "vitess.io/vitess/go/mysql/icuregex/internal/udata" + "vitess.io/vitess/go/mysql/icuregex/internal/utrie" +) + +var ubidiOnce sync.Once +var ubidi struct { + indexes []int32 + trie *utrie.UTrie2 + mirrors []uint32 + jg []uint8 + jg2 []uint8 +} + +func indexes() []int32 { + loadUBidi() + return ubidi.indexes +} + +func trie() *utrie.UTrie2 { + loadUBidi() + return ubidi.trie +} + +func mirrors() []uint32 { + loadUBidi() + return ubidi.mirrors +} + +func jg() []uint8 { + loadUBidi() + return ubidi.jg +} + +func jg2() []uint8 { + loadUBidi() + return ubidi.jg2 +} + +func loadUBidi() { + ubidiOnce.Do(func() { + b := udata.NewBytes(icudata.UBidi) + if err := readData(b); err != nil { + panic(err) + } + }) +} + +func readData(bytes *udata.Bytes) error { + err := bytes.ReadHeader(func(info *udata.DataInfo) bool { + return info.DataFormat[0] == 0x42 && + info.DataFormat[1] == 0x69 && + info.DataFormat[2] == 0x44 && + info.DataFormat[3] == 0x69 && + info.FormatVersion[0] == 2 + }) + if err != nil { + return err + } + + count := int32(bytes.Uint32()) + if count < ixTop { + return errors.New("indexes[0] too small in ucase.icu") + } + + ubidi.indexes = make([]int32, count) + ubidi.indexes[0] = count + + for i := int32(1); i < count; i++ { + ubidi.indexes[i] = int32(bytes.Uint32()) + } + + ubidi.trie, err = utrie.UTrie2FromBytes(bytes) + if err != nil { + return err + } + + expectedTrieLength := ubidi.indexes[ixTrieSize] + trieLength := ubidi.trie.SerializedLength() + + if trieLength > expectedTrieLength { + return errors.New("ucase.icu: not enough bytes for the trie") + } + + bytes.Skip(expectedTrieLength - trieLength) + + if n := ubidi.indexes[ixMirrorLength]; n > 0 { + ubidi.mirrors = bytes.Uint32Slice(n) + } + if n := ubidi.indexes[ixJgLimit] - ubidi.indexes[ixJgStart]; n > 0 { + ubidi.jg = bytes.Uint8Slice(n) + } + if n := ubidi.indexes[ixJgLimit2] - ubidi.indexes[ixJgStart2]; n > 0 { + ubidi.jg2 = bytes.Uint8Slice(n) + } + + return nil +} diff --git a/go/mysql/icuregex/internal/ubidi/ubidi.go b/go/mysql/icuregex/internal/ubidi/ubidi.go new file mode 100644 index 00000000000..79482dfbc8d --- /dev/null +++ b/go/mysql/icuregex/internal/ubidi/ubidi.go @@ -0,0 +1,390 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ubidi + +const ( + ixIndexTop = iota + ixLength + ixTrieSize + ixMirrorLength + + ixJgStart + ixJgLimit + ixJgStart2 /* new in format version 2.2, ICU 54 */ + ixJgLimit2 + + maxValuesIndex + ixTop +) + +const ( + /* UBIDI_CLASS_SHIFT=0, */ /* bidi class: 5 bits (4..0) */ + jtShift = 5 /* joining type: 3 bits (7..5) */ + + bptShift = 8 /* Bidi_Paired_Bracket_Type(bpt): 2 bits (9..8) */ + + joinControlShift = 10 + bidiControlShift = 11 + + isMirroredShift = 12 /* 'is mirrored' */ +) + +/** + * Bidi Paired Bracket Type constants. + * + * @see UCHAR_BIDI_PAIRED_BRACKET_TYPE + * @stable ICU 52 + */ +type UPairedBracketType int32 + +/* + * Note: UBidiPairedBracketType constants are parsed by preparseucd.py. + * It matches lines like + * U_BPT_ + */ +const ( + /** Not a paired bracket. @stable ICU 52 */ + BptNone UPairedBracketType = iota + /** Open paired bracket. @stable ICU 52 */ + BptOpen + /** Close paired bracket. @stable ICU 52 */ + BptClose +) + +const classMask = 0x0000001f +const jtMask = 0x000000e0 +const bptMask = 0x00000300 + +/** + * Joining Type constants. + * + * @see UCHAR_JOINING_TYPE + * @stable ICU 2.2 + */ +type JoiningType int32 + +/* + * Note: UJoiningType constants are parsed by preparseucd.py. + * It matches lines like + * U_JT_ + */ +const ( + JtNonJoining JoiningType = iota /*[U]*/ + JtJoinCausing /*[C]*/ + JtDualJoining /*[D]*/ + JtLeftJoining /*[L]*/ + JtRightJoining /*[R]*/ + JtTransparent /*[T]*/ +) + +/** + * Joining Group constants. + * + * @see UCHAR_JOINING_GROUP + * @stable ICU 2.2 + */ +type JoiningGroup int32 + +/* + * Note: UJoiningGroup constants are parsed by preparseucd.py. + * It matches lines like + * U_JG_ + */ +const ( + JgNoJoiningGroup JoiningGroup = iota + JgAin + JgAlaph + JgAlef + JgBeh + JgBeth + JgDal + JgDalathRish + JgE + JgFeh + JgFinalSemkath + JgGaf + JgGamal + JgHah + JgTehMarbutaGoal /**< @stable ICU 4.6 */ + JgHe + JgHeh + JgHehGoal + JgHeth + JgKaf + JgKaph + JgKnottedHeh + JgLam + JgLamadh + JgMeem + JgMim + JgNoon + JgNun + JgPe + JgQaf + JgQaph + JgReh + JgReversedPe + JgSad + JgSadhe + JgSeen + JgSemkath + JgShin + JgSwashKaf + JgSyriacWaw + JgTah + JgTaw + JgTehMarbuta + JgTeth + JgWaw + JgYeh + JgYehBarree + JgYehWithTail + JgYudh + JgYudhHe + JgZain + JgFe /**< @stable ICU 2.6 */ + JgKhaph /**< @stable ICU 2.6 */ + JgZhain /**< @stable ICU 2.6 */ + JgBurushashkiYehBarree /**< @stable ICU 4.0 */ + JgFarsiYeh /**< @stable ICU 4.4 */ + JgNya /**< @stable ICU 4.4 */ + JgRohingyaYeh /**< @stable ICU 49 */ + JgManichaeanAleph /**< @stable ICU 54 */ + JgManichaeanAyin /**< @stable ICU 54 */ + JgManichaeanBeth /**< @stable ICU 54 */ + JgManichaeanDaleth /**< @stable ICU 54 */ + JgManichaeanDhamedh /**< @stable ICU 54 */ + JgManichaeanFive /**< @stable ICU 54 */ + JgManichaeanGimel /**< @stable ICU 54 */ + JgManichaeanHeth /**< @stable ICU 54 */ + JgManichaeanHundred /**< @stable ICU 54 */ + JgManichaeanKaph /**< @stable ICU 54 */ + JgManichaeanLamedh /**< @stable ICU 54 */ + JgManichaeanMem /**< @stable ICU 54 */ + JgManichaeanNun /**< @stable ICU 54 */ + JgManichaeanOne /**< @stable ICU 54 */ + JgManichaeanPe /**< @stable ICU 54 */ + JgManichaeanQoph /**< @stable ICU 54 */ + JgManichaeanResh /**< @stable ICU 54 */ + JgManichaeanSadhe /**< @stable ICU 54 */ + JgManichaeanSamekh /**< @stable ICU 54 */ + JgManichaeanTaw /**< @stable ICU 54 */ + JgManichaeanTen /**< @stable ICU 54 */ + JgManichaeanTeth /**< @stable ICU 54 */ + JgManichaeanThamedh /**< @stable ICU 54 */ + JgManichaeanTwenty /**< @stable ICU 54 */ + JgManichaeanWaw /**< @stable ICU 54 */ + JgManichaeanYodh /**< @stable ICU 54 */ + JgManichaeanZayin /**< @stable ICU 54 */ + JgStraightWaw /**< @stable ICU 54 */ + JgAfricanFeh /**< @stable ICU 58 */ + JgAfricanNoon /**< @stable ICU 58 */ + JgAfricanQaf /**< @stable ICU 58 */ + + JgMalayalamBha /**< @stable ICU 60 */ + JgMalayalamJa /**< @stable ICU 60 */ + JgMalayalamLla /**< @stable ICU 60 */ + JgMalayalamLlla /**< @stable ICU 60 */ + JgMalayalamNga /**< @stable ICU 60 */ + JgMalayalamNna /**< @stable ICU 60 */ + JgMalayalamNnna /**< @stable ICU 60 */ + JgMalayalamNya /**< @stable ICU 60 */ + JgMalayalamRa /**< @stable ICU 60 */ + JgMalayalamSsa /**< @stable ICU 60 */ + JgMalayalamTta /**< @stable ICU 60 */ + + JgHanafiRohingyaKinnaYa /**< @stable ICU 62 */ + JgHanafiRohingyaPa /**< @stable ICU 62 */ + + JgThinYeh /**< @stable ICU 70 */ + JgVerticalTail /**< @stable ICU 70 */ +) + +/** + * This specifies the language directional property of a character set. + * @stable ICU 2.0 + */ +type CharDirection int32 + +/* + * Note: UCharDirection constants and their API comments are parsed by preparseucd.py. + * It matches pairs of lines like + * / ** comment... * / + * U_<[A-Z_]+> = , + */ + +const ( + /** L @stable ICU 2.0 */ + LeftToRight CharDirection = 0 + /** R @stable ICU 2.0 */ + RightToLeft CharDirection = 1 + /** EN @stable ICU 2.0 */ + EuropeanNumber CharDirection = 2 + /** ES @stable ICU 2.0 */ + EuropeanNumberSeparator CharDirection = 3 + /** ET @stable ICU 2.0 */ + EuropeanNumberTerminator CharDirection = 4 + /** AN @stable ICU 2.0 */ + ArabicNumber CharDirection = 5 + /** CS @stable ICU 2.0 */ + CommonNumberSeparator CharDirection = 6 + /** B @stable ICU 2.0 */ + BlockSeparator CharDirection = 7 + /** S @stable ICU 2.0 */ + SegmentSeparator CharDirection = 8 + /** WS @stable ICU 2.0 */ + WhiteSpaceNeutral CharDirection = 9 + /** ON @stable ICU 2.0 */ + OtherNeutral CharDirection = 10 + /** LRE @stable ICU 2.0 */ + LeftToRightEmbedding CharDirection = 11 + /** LRO @stable ICU 2.0 */ + LeftToRightOverride CharDirection = 12 + /** AL @stable ICU 2.0 */ + RightToLeftArabic CharDirection = 13 + /** RLE @stable ICU 2.0 */ + RightToLeftEmbedding CharDirection = 14 + /** RLO @stable ICU 2.0 */ + RightToLeftOverride CharDirection = 15 + /** PDF @stable ICU 2.0 */ + PopDirectionalFormat CharDirection = 16 + /** NSM @stable ICU 2.0 */ + DirNonSpacingMark CharDirection = 17 + /** BN @stable ICU 2.0 */ + BoundaryNeutral CharDirection = 18 + /** FSI @stable ICU 52 */ + StrongIsolate CharDirection = 19 + /** LRI @stable ICU 52 */ + LeftToRightIsolate CharDirection = 20 + /** RLI @stable ICU 52 */ + RightToLeftIsolate CharDirection = 21 + /** PDI @stable ICU 52 */ + PopDirectionalIsolate CharDirection = 22 +) + +type propertySet interface { + AddRune(ch rune) + AddRuneRange(from rune, to rune) +} + +func AddPropertyStarts(sa propertySet) { + /* add the start code point of each same-value range of the trie */ + trie().Enum(nil, func(start, _ rune, _ uint32) bool { + sa.AddRune(start) + return true + }) + + idxs := indexes() + mrs := mirrors() + /* add the code points from the bidi mirroring table */ + length := idxs[ixMirrorLength] + for i := int32(0); i < length; i++ { + c := mirrorCodePoint(rune(mrs[i])) + sa.AddRuneRange(c, c+1) + } + + /* add the code points from the Joining_Group array where the value changes */ + start := idxs[ixJgStart] + limit := idxs[ixJgLimit] + jgArray := jg() + for { + prev := uint8(0) + for start < limit { + jg := jgArray[0] + jgArray = jgArray[1:] + if jg != prev { + sa.AddRune(start) + prev = jg + } + start++ + } + if prev != 0 { + /* add the limit code point if the last value was not 0 (it is now start==limit) */ + sa.AddRune(limit) + } + if limit == idxs[ixJgLimit] { + /* switch to the second Joining_Group range */ + start = idxs[ixJgStart2] + limit = idxs[ixJgLimit2] + jgArray = jg2() + } else { + break + } + } + + /* add code points with hardcoded properties, plus the ones following them */ + + /* (none right now) */ +} + +func HasFlag(props uint16, shift int) bool { + return ((props >> shift) & 1) != 0 +} + +func mirrorCodePoint(m rune) rune { + return m & 0x1fffff +} + +func IsJoinControl(c rune) bool { + props := trie().Get16(c) + return HasFlag(props, joinControlShift) +} + +func JoinType(c rune) JoiningType { + props := trie().Get16(c) + return JoiningType((props & jtMask) >> jtShift) +} + +func JoinGroup(c rune) JoiningGroup { + idxs := indexes() + start := idxs[ixJgStart] + limit := idxs[ixJgLimit] + if start <= c && c < limit { + return JoiningGroup(jg()[c-start]) + } + start = idxs[ixJgStart2] + limit = idxs[ixJgLimit2] + if start <= c && c < limit { + return JoiningGroup(jg2()[c-start]) + } + return JgNoJoiningGroup +} + +func IsMirrored(c rune) bool { + props := trie().Get16(c) + return HasFlag(props, isMirroredShift) +} + +func IsBidiControl(c rune) bool { + props := trie().Get16(c) + return HasFlag(props, bidiControlShift) +} + +func PairedBracketType(c rune) UPairedBracketType { + props := trie().Get16(c) + return UPairedBracketType((props & bptMask) >> bptShift) +} + +func Class(c rune) CharDirection { + props := trie().Get16(c) + return CharDirection(props & classMask) +} diff --git a/go/mysql/icuregex/internal/ucase/fold.go b/go/mysql/icuregex/internal/ucase/fold.go new file mode 100644 index 00000000000..728142042ba --- /dev/null +++ b/go/mysql/icuregex/internal/ucase/fold.go @@ -0,0 +1,243 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ucase + +import ( + "math/bits" + + "vitess.io/vitess/go/mysql/icuregex/internal/utf16" +) + +func FoldRunes(str []rune) []rune { + out := make([]rune, 0, len(str)) + for _, c := range str { + r, exp := FullFolding(c) + if exp == nil { + out = append(out, r) + continue + } + + for len(exp) > 0 { + r, exp = utf16.NextUnsafe(exp) + out = append(out, r) + } + } + return out +} + +/* + - Case folding is similar to lowercasing. + - The result may be a simple mapping, i.e., a single code point, or + - a full mapping, i.e., a string. + - If the case folding for a code point is the same as its simple (1:1) lowercase mapping, + - then only the lowercase mapping is stored. + * + - Some special cases are hardcoded because their conditions cannot be + - parsed and processed from CaseFolding.txt. + * + - Unicode 3.2 CaseFolding.txt specifies for its status field: + +# C: common case folding, common mappings shared by both simple and full mappings. +# F: full case folding, mappings that cause strings to grow in length. Multiple characters are separated by spaces. +# S: simple case folding, mappings to single characters where different from F. +# T: special case for uppercase I and dotted uppercase I +# - For non-Turkic languages, this mapping is normally not used. +# - For Turkic languages (tr, az), this mapping can be used instead of the normal mapping for these characters. +# +# Usage: +# A. To do a simple case folding, use the mappings with status C + S. +# B. To do a full case folding, use the mappings with status C + F. +# +# The mappings with status T can be used or omitted depending on the desired case-folding +# behavior. (The default option is to exclude them.) + + - Unicode 3.2 has 'T' mappings as follows: + +0049; T; 0131; # LATIN CAPITAL LETTER I +0130; T; 0069; # LATIN CAPITAL LETTER I WITH DOT ABOVE + + - while the default mappings for these code points are: + +0049; C; 0069; # LATIN CAPITAL LETTER I +0130; F; 0069 0307; # LATIN CAPITAL LETTER I WITH DOT ABOVE + + - U+0130 has no simple case folding (simple-case-folds to itself). +*/ +func Fold(c rune) rune { + props := trie().Get16(c) + if !hasException(props) { + if isUpperOrTitle(props) { + c += getDelta(props) + } + } else { + pe := getExceptions(props) + excWord := pe[0] + pe = pe[1:] + if (excWord & excConditionalFold) != 0 { + /* special case folding mappings, hardcoded */ + /* default mappings */ + if c == 0x49 { + /* 0049; C; 0069; # LATIN CAPITAL LETTER I */ + return 0x69 + } else if c == 0x130 { + /* no simple case folding for U+0130 */ + return c + } + } + if (excWord & excNoSimpleCaseFolding) != 0 { + return c + } + if hasSlot(excWord, excDelta) && isUpperOrTitle(props) { + var delta int32 + delta, _ = getSlotValue(excWord, excDelta, pe) + if excWord&excDeltaIsNegative == 0 { + return c + delta + } + return c - delta + } + + var idx int32 + if hasSlot(excWord, excFold) { + idx = excFold + } else if hasSlot(excWord, excLower) { + idx = excLower + } else { + return c + } + c, _ = getSlotValue(excWord, idx, pe) + } + return c +} + +func FullFolding(c rune) (rune, []uint16) { + result := c + props := trie().Get16(c) + + if !hasException(props) { + if isUpperOrTitle(props) { + result = c + getDelta(props) + } + return result, nil + } + + pe := getExceptions(props) + excWord := pe[0] + pe = pe[1:] + var idx int32 + + if excWord&excConditionalFold != 0 { + /* use hardcoded conditions and mappings */ + /* default mappings */ + if c == 0x49 { + /* 0049; C; 0069; # LATIN CAPITAL LETTER I */ + return 0x69, nil + } else if c == 0x130 { + /* 0130; F; 0069 0307; # LATIN CAPITAL LETTER I WITH DOT ABOVE */ + return -1, []uint16{0x69, 0x307} + } + } else if hasSlot(excWord, excFullMappings) { + full, pe := getSlotValue(excWord, excFullMappings, pe) + + /* start of full case mapping strings */ + pe = pe[1:] + + /* skip the lowercase result string */ + pe = pe[full&fullLower:] + full = (full >> 4) & 0xf + + if full != 0 { + /* set the output pointer to the result string */ + return -1, pe[:full] + } + } + + if excWord&excNoSimpleCaseFolding != 0 { + return result, nil + } + if hasSlot(excWord, excDelta) && isUpperOrTitle(props) { + delta, _ := getSlotValue(excWord, excDelta, pe) + if excWord&excDeltaIsNegative == 0 { + return c + delta, nil + } + return c - delta, nil + } + if hasSlot(excWord, excFold) { + idx = excFold + } else if hasSlot(excWord, excLower) { + idx = excLower + } else { + return c, nil + } + result, _ = getSlotValue(excWord, idx, pe) + return result, nil +} + +const ( + excLower = iota + excFold + excUpper + excTitle + excDelta + exc5 /* reserved */ + excClosure + excFullMappings +) + +const ( + /* complex/conditional mappings */ + excConditionalSpecial = 0x4000 + excConditionalFold = 0x8000 + excNoSimpleCaseFolding = 0x200 + excDeltaIsNegative = 0x400 + excSensitive = 0x800 + + excDoubleSlots = 0x100 +) + +func isUpperOrTitle(props uint16) bool { + return props&2 != 0 +} + +func getDelta(props uint16) rune { + return rune(int16(props) >> 7) +} + +func getExceptions(props uint16) []uint16 { + return exceptions()[props>>4:] +} + +func hasSlot(flags uint16, idx int32) bool { + return (flags & (1 << idx)) != 0 +} + +func slotOffset(flags uint16, idx int32) int { + return bits.OnesCount8(uint8(flags & ((1 << idx) - 1))) +} + +func getSlotValue(excWord uint16, idx int32, pExc16 []uint16) (int32, []uint16) { + if excWord&excDoubleSlots == 0 { + pExc16 = pExc16[slotOffset(excWord, idx):] + return int32(pExc16[0]), pExc16 + } + pExc16 = pExc16[2*slotOffset(excWord, idx):] + return (int32(pExc16[0]) << 16) | int32(pExc16[1]), pExc16[1:] +} diff --git a/go/mysql/icuregex/internal/ucase/loader.go b/go/mysql/icuregex/internal/ucase/loader.go new file mode 100644 index 00000000000..2ac25cc0f6f --- /dev/null +++ b/go/mysql/icuregex/internal/ucase/loader.go @@ -0,0 +1,101 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ucase + +import ( + "errors" + "sync" + + "vitess.io/vitess/go/mysql/icuregex/internal/icudata" + "vitess.io/vitess/go/mysql/icuregex/internal/udata" + "vitess.io/vitess/go/mysql/icuregex/internal/utrie" +) + +var ucaseOnce sync.Once +var ucase struct { + trie *utrie.UTrie2 + exceptions []uint16 +} + +func trie() *utrie.UTrie2 { + loadUCase() + return ucase.trie +} + +func exceptions() []uint16 { + loadUCase() + return ucase.exceptions +} + +func loadUCase() { + ucaseOnce.Do(func() { + b := udata.NewBytes(icudata.UCase) + if err := readData(b); err != nil { + panic(err) + } + }) +} + +func readData(bytes *udata.Bytes) error { + err := bytes.ReadHeader(func(info *udata.DataInfo) bool { + return info.DataFormat[0] == 0x63 && + info.DataFormat[1] == 0x41 && + info.DataFormat[2] == 0x53 && + info.DataFormat[3] == 0x45 && + info.FormatVersion[0] == 4 + }) + if err != nil { + return err + } + + count := int32(bytes.Uint32()) + if count < ixTop { + return errors.New("indexes[0] too small in ucase.icu") + } + + indexes := make([]int32, count) + indexes[0] = count + + for i := int32(1); i < count; i++ { + indexes[i] = int32(bytes.Uint32()) + } + + ucase.trie, err = utrie.UTrie2FromBytes(bytes) + if err != nil { + return err + } + + expectedTrieLength := indexes[ixTrieSize] + trieLength := ucase.trie.SerializedLength() + + if trieLength > expectedTrieLength { + return errors.New("ucase.icu: not enough bytes for the trie") + } + + bytes.Skip(expectedTrieLength - trieLength) + + if n := indexes[ixExcLength]; n > 0 { + ucase.exceptions = bytes.Uint16Slice(n) + } + + return nil +} diff --git a/go/mysql/icuregex/internal/ucase/ucase.go b/go/mysql/icuregex/internal/ucase/ucase.go new file mode 100644 index 00000000000..33fac0a5cce --- /dev/null +++ b/go/mysql/icuregex/internal/ucase/ucase.go @@ -0,0 +1,359 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ucase + +import ( + "vitess.io/vitess/go/mysql/icuregex/internal/utf16" +) + +const ( + ixIndexTop = 0 + ixLength = 1 + ixTrieSize = 2 + ixExcLength = 3 + ixUnfoldLength = 4 + ixMaxFullLength = 15 + ixTop = 16 +) + +type propertySet interface { + AddRune(ch rune) +} + +func AddPropertyStarts(sa propertySet) { + /* add the start code point of each same-value range of the trie */ + trie().Enum(nil, func(start, _ rune, _ uint32) bool { + sa.AddRune(start) + return true + }) + + /* add code points with hardcoded properties, plus the ones following them */ + + /* (none right now, see comment below) */ + + /* + * Omit code points with hardcoded specialcasing properties + * because we do not build property UnicodeSets for them right now. + */ +} + +const ( + fullMappingsMaxLength = (4 * 0xf) + closureMaxLength = 0xf + + fullLower = 0xf + fullFolding = 0xf0 + fullUpper = 0xf00 + fullTitle = 0xf000 +) + +func AddCaseClosure(c rune, sa propertySet) { + /* + * Hardcode the case closure of i and its relatives and ignore the + * data file data for these characters. + * The Turkic dotless i and dotted I with their case mapping conditions + * and case folding option make the related characters behave specially. + * This code matches their closure behavior to their case folding behavior. + */ + + switch c { + case 0x49: + /* regular i and I are in one equivalence class */ + sa.AddRune(0x69) + return + case 0x69: + sa.AddRune(0x49) + return + case 0x130: + /* dotted I is in a class with <0069 0307> (for canonical equivalence with <0049 0307>) */ + // the Regex engine calls removeAllStrings() on all UnicodeSets, so we don't need to insert them + // sa->addString(sa->set, iDot, 2); + return + case 0x131: + /* dotless i is in a class by itself */ + return + default: + /* otherwise use the data file data */ + break + } + + props := trie().Get16(c) + if !hasException(props) { + if getPropsType(props) != None { + /* add the one simple case mapping, no matter what type it is */ + delta := getDelta(props) + if delta != 0 { + sa.AddRune(c + delta) + } + } + } else { + /* + * c has exceptions, so there may be multiple simple and/or + * full case mappings. Add them all. + */ + pe := getExceptions(props) + excWord := pe[0] + pe = pe[1:] + var idx int32 + var closure []uint16 + + /* add all simple case mappings */ + for idx = excLower; idx <= excTitle; idx++ { + if hasSlot(excWord, idx) { + c, _ = getSlotValue(excWord, idx, pe) + sa.AddRune(c) + } + } + if hasSlot(excWord, excDelta) { + delta, _ := getSlotValue(excWord, excDelta, pe) + if excWord&excDeltaIsNegative == 0 { + sa.AddRune(c + delta) + } else { + sa.AddRune(c - delta) + } + } + + /* get the closure string pointer & length */ + if hasSlot(excWord, excClosure) { + closureLength, pe1 := getSlotValue(excWord, excClosure, pe) + closureLength &= closureMaxLength /* higher bits are reserved */ + closure = pe1[1 : 1+closureLength] /* behind this slot, unless there are full case mappings */ + } + + /* add the full case folding */ + if hasSlot(excWord, excFullMappings) { + fullLength, pe1 := getSlotValue(excWord, excFullMappings, pe) + + /* start of full case mapping strings */ + pe1 = pe1[1:] + + fullLength &= 0xffff /* bits 16 and higher are reserved */ + + /* skip the lowercase result string */ + pe1 = pe1[fullLength&fullLower:] + fullLength >>= 4 + + /* skip adding the case folding strings */ + length := fullLength & 0xf + pe1 = pe1[length:] + + /* skip the uppercase and titlecase strings */ + fullLength >>= 4 + pe1 = pe1[fullLength&0xf:] + fullLength >>= 4 + pe1 = pe1[fullLength:] + + closure = pe1[:len(closure)] + } + + /* add each code point in the closure string */ + for len(closure) > 0 { + c, closure = utf16.NextUnsafe(closure) + sa.AddRune(c) + } + } +} + +const dotMask = 0x60 + +const ( + noDot = 0 /* normal characters with cc=0 */ + softDotted = 0x20 /* soft-dotted characters with cc=0 */ + above = 0x40 /* "above" accents with cc=230 */ + otherAccent = 0x60 /* other accent character (0> excDotShift) & dotMask) +} + +func IsCaseSensitive(c rune) bool { + props := trie().Get16(c) + if !hasException(props) { + return (props & sensitive) != 0 + } + pe := getExceptions(props) + return (pe[0] & excSensitive) != 0 +} + +func ToFullLower(c rune) rune { + // The sign of the result has meaning, input must be non-negative so that it can be returned as is. + result := c + props := trie().Get16(c) + if !hasException(props) { + if isUpperOrTitle(props) { + result = c + getDelta(props) + } + } else { + pe := getExceptions(props) + excWord := pe[0] + pe = pe[1:] + + if excWord&excConditionalSpecial != 0 { + /* use hardcoded conditions and mappings */ + if c == 0x130 { + return 2 + } + /* no known conditional special case mapping, use a normal mapping */ + } else if hasSlot(excWord, excFullMappings) { + full, _ := getSlotValue(excWord, excFullMappings, pe) + full = full & fullLower + if full != 0 { + /* return the string length */ + return full + } + } + + if hasSlot(excWord, excDelta) && isUpperOrTitle(props) { + delta, _ := getSlotValue(excWord, excDelta, pe) + if (excWord & excDeltaIsNegative) == 0 { + return c + delta + } + return c - delta + } + if hasSlot(excWord, excLower) { + result, _ = getSlotValue(excWord, excLower, pe) + } + } + + if result == c { + return ^result + } + return result +} + +func ToFullUpper(c rune) rune { + return toUpperOrTitle(c, true) +} + +func ToFullTitle(c rune) rune { + return toUpperOrTitle(c, false) +} + +func toUpperOrTitle(c rune, upperNotTitle bool) rune { + result := c + props := trie().Get16(c) + if !hasException(props) { + if getPropsType(props) == Lower { + result = c + getDelta(props) + } + } else { + pe := getExceptions(props) + excWord := pe[0] + pe = pe[1:] + + if excWord&excConditionalSpecial != 0 { + if c == 0x0587 { + return 2 + } + /* no known conditional special case mapping, use a normal mapping */ + } else if hasSlot(excWord, excFullMappings) { + full, _ := getSlotValue(excWord, excFullMappings, pe) + + /* skip the lowercase and case-folding result strings */ + full >>= 8 + + if upperNotTitle { + full &= 0xf + } else { + /* skip the uppercase result string */ + full = (full >> 4) & 0xf + } + + if full != 0 { + /* return the string length */ + return full + } + } + + if hasSlot(excWord, excDelta) && getPropsType(props) == Lower { + delta, _ := getSlotValue(excWord, excDelta, pe) + if (excWord & excDeltaIsNegative) == 0 { + return c + delta + } + return c - delta + } + var idx int32 + if !upperNotTitle && hasSlot(excWord, excTitle) { + idx = excTitle + } else if hasSlot(excWord, excUpper) { + /* here, titlecase is same as uppercase */ + idx = excUpper + } else { + return ^c + } + result, _ = getSlotValue(excWord, idx, pe) + } + + if result == c { + return ^result + } + return result +} + +func GetTypeOrIgnorable(c rune) int32 { + props := trie().Get16(c) + return int32(props & 7) +} + +type Type int32 + +const ( + None Type = iota + Lower + Upper + Title +) + +const typeMask = 3 + +func GetType(c rune) Type { + props := trie().Get16(c) + return getPropsType(props) +} + +func getPropsType(props uint16) Type { + return Type(props & typeMask) +} diff --git a/go/mysql/icuregex/internal/uchar/constants.go b/go/mysql/icuregex/internal/uchar/constants.go new file mode 100644 index 00000000000..60899393397 --- /dev/null +++ b/go/mysql/icuregex/internal/uchar/constants.go @@ -0,0 +1,238 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package uchar + +func Mask[T ~int | ~int8](x T) uint32 { + return 1 << x +} + +type Category int8 + +const ( + /* + * Note: UCharCategory constants and their API comments are parsed by preparseucd.py. + * It matches pairs of lines like + * / ** comment... * / + * U_<[A-Z_]+> = , + */ + + /** Non-category for unassigned and non-character code points. @stable ICU 2.0 */ + Unassigned Category = 0 + /** Cn "Other, Not Assigned (no characters in [UnicodeData.txt] have this property)" (same as U_UNASSIGNED!) @stable ICU 2.0 */ + GeneralOtherTypes Category = iota - 1 + /** Lu @stable ICU 2.0 */ + UppercaseLetter + /** Ll @stable ICU 2.0 */ + LowercaseLetter + /** Lt @stable ICU 2.0 */ + TitlecaseLetter + /** Lm @stable ICU 2.0 */ + ModifierLetter + /** Lo @stable ICU 2.0 */ + OtherLetter + /** Mn @stable ICU 2.0 */ + NonSpacingMask + /** Me @stable ICU 2.0 */ + EnclosingMark + /** Mc @stable ICU 2.0 */ + CombiningSpacingMask + /** Nd @stable ICU 2.0 */ + DecimalDigitNumber + /** Nl @stable ICU 2.0 */ + LetterNumber + /** No @stable ICU 2.0 */ + OtherNumber + /** Zs @stable ICU 2.0 */ + SpaceSeparator + /** Zl @stable ICU 2.0 */ + LineSeparator + /** Zp @stable ICU 2.0 */ + ParagraphSeparator + /** Cc @stable ICU 2.0 */ + ControlChar + /** Cf @stable ICU 2.0 */ + FormatChar + /** Co @stable ICU 2.0 */ + PrivateUseChar + /** Cs @stable ICU 2.0 */ + Surrogate + /** Pd @stable ICU 2.0 */ + DashPunctuation + /** Ps @stable ICU 2.0 */ + StartPunctuation + /** Pe @stable ICU 2.0 */ + EndPunctuation + /** Pc @stable ICU 2.0 */ + ConnectorPunctuation + /** Po @stable ICU 2.0 */ + OtherPunctuation + /** Sm @stable ICU 2.0 */ + MathSymbol + /** Sc @stable ICU 2.0 */ + CurrencySymbol + /** Sk @stable ICU 2.0 */ + ModifierSymbol + /** So @stable ICU 2.0 */ + OtherSymbol + /** Pi @stable ICU 2.0 */ + InitialPunctuation + /** Pf @stable ICU 2.0 */ + FinalPunctuation + /** + * One higher than the last enum UCharCategory constant. + * This numeric value is stable (will not change), see + * http://www.unicode.org/policies/stability_policy.html#Property_Value + * + * @stable ICU 2.0 + */ + CharCategoryCount +) + +var ( + GcCnMask = Mask(GeneralOtherTypes) + + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcLuMask = Mask(UppercaseLetter) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcLlMask = Mask(LowercaseLetter) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcLtMask = Mask(TitlecaseLetter) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcLmMask = Mask(ModifierLetter) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcLoMask = Mask(OtherLetter) + + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcMnMask = Mask(NonSpacingMask) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcMeMask = Mask(EnclosingMark) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcMcMask = Mask(CombiningSpacingMask) + + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcNdMask = Mask(DecimalDigitNumber) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcNlMask = Mask(LetterNumber) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcNoMask = Mask(OtherNumber) + + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcZsMask = Mask(SpaceSeparator) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcZlMask = Mask(LineSeparator) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcZpMask = Mask(ParagraphSeparator) + + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcCcMask = Mask(ControlChar) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcCfMask = Mask(FormatChar) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcCoMask = Mask(PrivateUseChar) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcCsMask = Mask(Surrogate) + + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcPdMask = Mask(DashPunctuation) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcPsMask = Mask(StartPunctuation) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcPeMask = Mask(EndPunctuation) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcPcMask = Mask(ConnectorPunctuation) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcPoMask = Mask(OtherPunctuation) + + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcSmMask = Mask(MathSymbol) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcScMask = Mask(CurrencySymbol) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcSkMask = Mask(ModifierSymbol) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcSoMask = Mask(OtherSymbol) + + /** Mask constant for multiple UCharCategory bits (L Letters). @stable ICU 2.1 */ + GcLMask = (GcLuMask | GcLlMask | GcLtMask | GcLmMask | GcLoMask) + + /** Mask constant for multiple UCharCategory bits (LC Cased Letters). @stable ICU 2.1 */ + GcLcMask = (GcLuMask | GcLlMask | GcLtMask) + + /** Mask constant for multiple UCharCategory bits (M Marks). @stable ICU 2.1 */ + GcMMask = (GcMnMask | GcMeMask | GcMcMask) + + /** Mask constant for multiple UCharCategory bits (N Numbers). @stable ICU 2.1 */ + GcNMask = (GcNdMask | GcNlMask | GcNoMask) + + /** Mask constant for multiple UCharCategory bits (Z Separators). @stable ICU 2.1 */ + GcZMask = (GcZsMask | GcZlMask | GcZpMask) +) + +const upropsAgeShift = 24 +const maxVersionLength = 4 +const versionDelimiter = '.' + +type UVersionInfo [maxVersionLength]uint8 + +const ( + /** No numeric value. */ + UPropsNtvNone = 0 + /** Decimal digits: nv=0..9 */ + UPropsNtvDecimalStart = 1 + /** Other digits: nv=0..9 */ + UPropsNtvDigitStart = 11 + /** Small integers: nv=0..154 */ + UPropsNtvNumericStart = 21 + /** Fractions: ((ntv>>4)-12) / ((ntv&0xf)+1) = -1..17 / 1..16 */ + UPropsNtvFractionStart = 0xb0 + /** + * Large integers: + * ((ntv>>5)-14) * 10^((ntv&0x1f)+2) = (1..9)*(10^2..10^33) + * (only one significant decimal digit) + */ + UPropsNtvLargeStart = 0x1e0 + /** + * Sexagesimal numbers: + * ((ntv>>2)-0xbf) * 60^((ntv&3)+1) = (1..9)*(60^1..60^4) + */ + UPropsNtvBase60Start = 0x300 + /** + * Fraction-20 values: + * frac20 = ntv-0x324 = 0..0x17 -> 1|3|5|7 / 20|40|80|160|320|640 + * numerator: num = 2*(frac20&3)+1 + * denominator: den = 20<<(frac20>>2) + */ + UPropsNtvFraction20Start = UPropsNtvBase60Start + 36 // 0x300+9*4=0x324 + /** + * Fraction-32 values: + * frac32 = ntv-0x34c = 0..15 -> 1|3|5|7 / 32|64|128|256 + * numerator: num = 2*(frac32&3)+1 + * denominator: den = 32<<(frac32>>2) + */ + UPropsNtvFraction32Start = UPropsNtvFraction20Start + 24 // 0x324+6*4=0x34c + /** No numeric value (yet). */ + UPropsNtvReservedStart = UPropsNtvFraction32Start + 16 // 0x34c+4*4=0x35c + + UPropsNtvMaxSmallInt = UPropsNtvFractionStart - UPropsNtvNumericStart - 1 +) + +const noNumericValue = -123456789.0 diff --git a/go/mysql/icuregex/internal/uchar/loader.go b/go/mysql/icuregex/internal/uchar/loader.go new file mode 100644 index 00000000000..fab54f85e0a --- /dev/null +++ b/go/mysql/icuregex/internal/uchar/loader.go @@ -0,0 +1,139 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package uchar + +import ( + "errors" + "sync" + + "vitess.io/vitess/go/mysql/icuregex/internal/icudata" + "vitess.io/vitess/go/mysql/icuregex/internal/udata" + "vitess.io/vitess/go/mysql/icuregex/internal/utrie" +) + +var upropsOnce sync.Once +var uprops struct { + trie *utrie.UTrie2 + trie2 *utrie.UTrie2 + vectorsColumns int32 + vectors []uint32 + scriptExtensions []uint16 +} + +func trie() *utrie.UTrie2 { + loadUProps() + return uprops.trie +} + +func trie2() *utrie.UTrie2 { + loadUProps() + return uprops.trie2 +} + +func vectorsColumns() int32 { + loadUProps() + return uprops.vectorsColumns +} + +func vectors() []uint32 { + loadUProps() + return uprops.vectors +} + +func scriptExtensions() []uint16 { + loadUProps() + return uprops.scriptExtensions +} + +func loadUProps() { + upropsOnce.Do(func() { + b := udata.NewBytes(icudata.UProps) + if err := readData(b); err != nil { + panic(err) + } + }) +} + +func readData(bytes *udata.Bytes) error { + err := bytes.ReadHeader(func(info *udata.DataInfo) bool { + return info.DataFormat[0] == 0x55 && + info.DataFormat[1] == 0x50 && + info.DataFormat[2] == 0x72 && + info.DataFormat[3] == 0x6f && + info.FormatVersion[0] == 7 + }) + if err != nil { + return err + } + + propertyOffset := bytes.Int32() + /* exceptionOffset = */ bytes.Int32() + /* caseOffset = */ bytes.Int32() + additionalOffset := bytes.Int32() + additionalVectorsOffset := bytes.Int32() + uprops.vectorsColumns = bytes.Int32() + scriptExtensionsOffset := bytes.Int32() + reservedOffset7 := bytes.Int32() + /* reservedOffset8 = */ bytes.Int32() + /* dataTopOffset = */ bytes.Int32() + _ = bytes.Int32() + _ = bytes.Int32() + bytes.Skip((16 - 12) << 2) + + uprops.trie, err = utrie.UTrie2FromBytes(bytes) + if err != nil { + return err + } + + expectedTrieLength := (propertyOffset - 16) * 4 + trieLength := uprops.trie.SerializedLength() + + if trieLength > expectedTrieLength { + return errors.New("ucase.icu: not enough bytes for the trie") + } + + bytes.Skip(expectedTrieLength - trieLength) + bytes.Skip((additionalOffset - propertyOffset) * 4) + + if uprops.vectorsColumns > 0 { + uprops.trie2, err = utrie.UTrie2FromBytes(bytes) + if err != nil { + return err + } + + expectedTrieLength = (additionalVectorsOffset - additionalOffset) * 4 + trieLength = uprops.trie2.SerializedLength() + + if trieLength > expectedTrieLength { + return errors.New("ucase.icu: not enough bytes for the trie") + } + + bytes.Skip(expectedTrieLength - trieLength) + uprops.vectors = bytes.Uint32Slice(scriptExtensionsOffset - additionalVectorsOffset) + } + + if n := (reservedOffset7 - scriptExtensionsOffset) * 2; n > 0 { + uprops.scriptExtensions = bytes.Uint16Slice(n) + } + + return nil +} diff --git a/go/mysql/icuregex/internal/uchar/uchar.go b/go/mysql/icuregex/internal/uchar/uchar.go new file mode 100644 index 00000000000..e93b51d9bb4 --- /dev/null +++ b/go/mysql/icuregex/internal/uchar/uchar.go @@ -0,0 +1,316 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package uchar + +import ( + "strconv" +) + +type PropertySet interface { + AddRune(ch rune) +} + +func VecAddPropertyStarts(sa PropertySet) { + trie2().Enum(nil, func(start, _ rune, _ uint32) bool { + sa.AddRune(start) + return true + }) +} + +const ( + tab = 0x0009 + lf = 0x000a + ff = 0x000c + cr = 0x000d + nbsp = 0x00a0 + cgj = 0x034f + figuresp = 0x2007 + hairsp = 0x200a + zwnj = 0x200c + zwj = 0x200d + rlm = 0x200f + nnbsp = 0x202f + zwnbsp = 0xfef +) + +func AddPropertyStarts(sa PropertySet) { + /* add the start code point of each same-value range of the main trie */ + trie().Enum(nil, func(start, _ rune, _ uint32) bool { + sa.AddRune(start) + return true + }) + + /* add code points with hardcoded properties, plus the ones following them */ + + /* add for u_isblank() */ + sa.AddRune(tab) + sa.AddRune(tab + 1) + + /* add for IS_THAT_CONTROL_SPACE() */ + sa.AddRune(cr + 1) /* range TAB..CR */ + sa.AddRune(0x1c) + sa.AddRune(0x1f + 1) + sa.AddRune(0x85) // NEXT LINE (NEL) + sa.AddRune(0x85 + 1) + + /* add for u_isIDIgnorable() what was not added above */ + sa.AddRune(0x7f) /* range DEL..NBSP-1, NBSP added below */ + sa.AddRune(hairsp) + sa.AddRune(rlm + 1) + sa.AddRune(0x206a) // INHIBIT SYMMETRIC SWAPPING + sa.AddRune(0x206f + 1) // NOMINAL DIGIT SHAPES + sa.AddRune(zwnbsp) + sa.AddRune(zwnbsp + 1) + + /* add no-break spaces for u_isWhitespace() what was not added above */ + sa.AddRune(nbsp) + sa.AddRune(nbsp + 1) + sa.AddRune(figuresp) + sa.AddRune(figuresp + 1) + sa.AddRune(nnbsp) + sa.AddRune(nnbsp + 1) + + /* add for u_digit() */ + sa.AddRune('a') + sa.AddRune('z' + 1) + sa.AddRune('A') + sa.AddRune('Z' + 1) + // fullwidth + sa.AddRune('a') + sa.AddRune('z' + 1) + sa.AddRune('A') + sa.AddRune('Z' + 1) + + /* add for u_isxdigit() */ + sa.AddRune('f' + 1) + sa.AddRune('F' + 1) + // fullwidth + sa.AddRune('f' + 1) + sa.AddRune('F' + 1) + + /* add for UCHAR_DEFAULT_IGNORABLE_CODE_POINT what was not added above */ + sa.AddRune(0x2060) /* range 2060..206f */ + sa.AddRune(0xfff0) + sa.AddRune(0xfffb + 1) + sa.AddRune(0xe0000) + sa.AddRune(0xe0fff + 1) + + /* add for UCHAR_GRAPHEME_BASE and others */ + sa.AddRune(cgj) + sa.AddRune(cgj + 1) +} + +func CharType(c rune) Category { + props := trie().Get16(c) + return getCategory(props) +} + +func getCategory(props uint16) Category { + return Category(props & 0x1f) +} + +func GetUnicodeProperties(c rune, column int) uint32 { + if column >= int(vectorsColumns()) { + return 0 + } + vecIndex := trie2().Get16(c) + return vectors()[int(vecIndex)+column] +} + +func ScriptExtension(idx uint32) uint16 { + return scriptExtensions()[idx] +} + +func ScriptExtensions(idx uint32) []uint16 { + return scriptExtensions()[idx:] +} + +func IsDigit(c rune) bool { + return CharType(c) == DecimalDigitNumber +} + +func IsPOSIXPrint(c rune) bool { + return CharType(c) == SpaceSeparator || IsGraphPOSIX(c) +} + +func IsGraphPOSIX(c rune) bool { + props := trie().Get16(c) + /* \p{space}\p{gc=Control} == \p{gc=Z}\p{Control} */ + /* comparing ==0 returns FALSE for the categories mentioned */ + return Mask(getCategory(props))&(GcCcMask|GcCsMask|GcCnMask|GcZMask) == 0 +} + +func IsXDigit(c rune) bool { + /* check ASCII and Fullwidth ASCII a-fA-F */ + if (c <= 0x66 && c >= 0x41 && (c <= 0x46 || c >= 0x61)) || + (c >= 0xff21 && c <= 0xff46 && (c <= 0xff26 || c >= 0xff41)) { + return true + } + return IsDigit(c) +} + +func IsBlank(c rune) bool { + if c <= 0x9f { + return c == 9 || c == 0x20 /* TAB or SPACE */ + } + /* Zs */ + return CharType(c) == SpaceSeparator +} + +func CharAge(c rune) UVersionInfo { + version := GetUnicodeProperties(c, 0) >> upropsAgeShift + return UVersionInfo{uint8(version >> 4), uint8(version & 0xf), 0, 0} +} + +func VersionFromString(str string) (version UVersionInfo) { + part := 0 + for len(str) > 0 && part < maxVersionLength { + if str[0] == versionDelimiter { + str = str[1:] + } + str, version[part] = parseInt(str) + part++ + } + return +} + +// parseInt is simplified but aims to mimic strtoul usage +// as it is used for ICU version parsing. +func parseInt(str string) (string, uint8) { + if str == "" { + return str, 0 + } + + start := 0 + end := 0 +whitespace: + for i := 0; i < len(str); i++ { + switch str[i] { + case ' ', '\f', '\n', '\r', '\t', '\v': + start++ + continue + default: + break whitespace + } + } + str = str[start:] + + for i := 0; i < len(str); i++ { + if str[i] < '0' || str[i] > '9' { + end = i + break + } + end++ + } + + val, err := strconv.ParseUint(str[start:end], 10, 8) + if err != nil { + return str[end:], 0 + } + return str[end:], uint8(val) +} + +const upropsNumericTypeValueShift = 6 + +func NumericTypeValue(c rune) uint16 { + props := trie().Get16(c) + return props >> upropsNumericTypeValueShift +} + +func NumericValue(c rune) float64 { + ntv := int32(NumericTypeValue(c)) + + if ntv == UPropsNtvNone { + return noNumericValue + } else if ntv < UPropsNtvDigitStart { + /* decimal digit */ + return float64(ntv - UPropsNtvDecimalStart) + } else if ntv < UPropsNtvNumericStart { + /* other digit */ + return float64(ntv - UPropsNtvDigitStart) + } else if ntv < UPropsNtvFractionStart { + /* small integer */ + return float64(ntv - UPropsNtvNumericStart) + } else if ntv < UPropsNtvLargeStart { + /* fraction */ + numerator := (ntv >> 4) - 12 + denominator := (ntv & 0xf) + 1 + return float64(numerator) / float64(denominator) + } else if ntv < UPropsNtvBase60Start { + /* large, single-significant-digit integer */ + mant := (ntv >> 5) - 14 + exp := (ntv & 0x1f) + 2 + numValue := float64(mant) + + /* multiply by 10^exp without math.h */ + for exp >= 4 { + numValue *= 10000. + exp -= 4 + } + switch exp { + case 3: + numValue *= 1000.0 + case 2: + numValue *= 100.0 + case 1: + numValue *= 10.0 + case 0: + default: + } + + return numValue + } else if ntv < UPropsNtvFraction20Start { + /* sexagesimal (base 60) integer */ + numValue := (ntv >> 2) - 0xbf + exp := (ntv & 3) + 1 + + switch exp { + case 4: + numValue *= 60 * 60 * 60 * 60 + case 3: + numValue *= 60 * 60 * 60 + case 2: + numValue *= 60 * 60 + case 1: + numValue *= 60 + case 0: + default: + } + + return float64(numValue) + } else if ntv < UPropsNtvFraction32Start { + // fraction-20 e.g. 3/80 + frac20 := ntv - UPropsNtvFraction20Start // 0..0x17 + numerator := 2*(frac20&3) + 1 + denominator := 20 << (frac20 >> 2) + return float64(numerator) / float64(denominator) + } else if ntv < UPropsNtvReservedStart { + // fraction-32 e.g. 3/64 + frac32 := ntv - UPropsNtvFraction32Start // 0..15 + numerator := 2*(frac32&3) + 1 + denominator := 32 << (frac32 >> 2) + return float64(numerator) / float64(denominator) + } else { + /* reserved */ + return noNumericValue + } +} diff --git a/go/mysql/icuregex/internal/udata/udata.go b/go/mysql/icuregex/internal/udata/udata.go new file mode 100644 index 00000000000..f20f8be1efa --- /dev/null +++ b/go/mysql/icuregex/internal/udata/udata.go @@ -0,0 +1,155 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package udata + +import ( + "encoding/binary" + "errors" + "unsafe" +) + +type DataInfo struct { + /** sizeof(UDataInfo) + * @stable ICU 2.0 */ + Size uint16 + + /** unused, set to 0 + * @stable ICU 2.0*/ + ReservedWord uint16 + + /* platform data properties */ + /** 0 for little-endian machine, 1 for big-endian + * @stable ICU 2.0 */ + IsBigEndian uint8 + + /** see U_CHARSET_FAMILY values in utypes.h + * @stable ICU 2.0*/ + CharsetFamily uint8 + + /** sizeof(UChar), one of { 1, 2, 4 } + * @stable ICU 2.0*/ + SizeofUChar uint8 + + /** unused, set to 0 + * @stable ICU 2.0*/ + ReservedByte uint8 + + /** data format identifier + * @stable ICU 2.0*/ + DataFormat [4]uint8 + + /** versions: [0] major [1] minor [2] milli [3] micro + * @stable ICU 2.0*/ + FormatVersion [4]uint8 + + /** versions: [0] major [1] minor [2] milli [3] micro + * @stable ICU 2.0*/ + DataVersion [4]uint8 +} + +type Bytes struct { + buf []byte + orig []byte + enc binary.ByteOrder +} + +func NewBytes(b []byte) *Bytes { + return &Bytes{buf: b, orig: b, enc: binary.LittleEndian} +} + +func (b *Bytes) ReadHeader(isValid func(info *DataInfo) bool) error { + type MappedData struct { + headerSize uint16 + magic1 uint8 + magic2 uint8 + } + + type DataHeader struct { + dataHeader MappedData + info DataInfo + } + + data := unsafe.SliceData(b.buf) + header := (*DataHeader)(unsafe.Pointer(data)) + + if header.dataHeader.magic1 != 0xda || header.dataHeader.magic2 != 0x27 { + return errors.New("invalid magic number") + } + + if header.info.IsBigEndian != 0 { + return errors.New("unsupported: BigEndian data source") + } + + if !isValid(&header.info) { + return errors.New("failed to validate data header") + } + + b.buf = b.buf[header.dataHeader.headerSize:] + return nil +} + +func (b *Bytes) Uint8() uint8 { + u := b.buf[0] + b.buf = b.buf[1:] + return u +} +func (b *Bytes) Uint16() uint16 { + u := b.enc.Uint16(b.buf) + b.buf = b.buf[2:] + return u +} + +func (b *Bytes) Uint16Slice(size int32) []uint16 { + s := unsafe.Slice((*uint16)(unsafe.Pointer(unsafe.SliceData(b.buf))), size) + b.buf = b.buf[2*size:] + return s +} + +func (b *Bytes) Uint32Slice(size int32) []uint32 { + s := unsafe.Slice((*uint32)(unsafe.Pointer(unsafe.SliceData(b.buf))), size) + b.buf = b.buf[4*size:] + return s +} + +func (b *Bytes) Uint32() uint32 { + u := b.enc.Uint32(b.buf) + b.buf = b.buf[4:] + return u +} + +func (b *Bytes) Int32() int32 { + return int32(b.Uint32()) +} + +func (b *Bytes) Skip(size int32) { + b.buf = b.buf[size:] +} + +func (b *Bytes) Uint8Slice(n int32) []uint8 { + s := b.buf[:n] + b.buf = b.buf[n:] + return s +} + +func (b *Bytes) Position() int32 { + return int32(len(b.orig) - len(b.buf)) +} diff --git a/go/mysql/icuregex/internal/uemoji/loader.go b/go/mysql/icuregex/internal/uemoji/loader.go new file mode 100644 index 00000000000..7015491d069 --- /dev/null +++ b/go/mysql/icuregex/internal/uemoji/loader.go @@ -0,0 +1,69 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package uemoji + +import ( + "sync" + + "vitess.io/vitess/go/mysql/icuregex/internal/icudata" + "vitess.io/vitess/go/mysql/icuregex/internal/udata" + "vitess.io/vitess/go/mysql/icuregex/internal/utrie" +) + +var uemojiOnce sync.Once +var uemoji struct { + trie *utrie.UcpTrie +} + +func loadUEmoji() { + uemojiOnce.Do(func() { + b := udata.NewBytes(icudata.UEmoji) + if err := readData(b); err != nil { + panic(err) + } + }) +} + +func trie() *utrie.UcpTrie { + loadUEmoji() + return uemoji.trie +} + +func readData(bytes *udata.Bytes) error { + err := bytes.ReadHeader(func(info *udata.DataInfo) bool { + return info.DataFormat[0] == 0x45 && + info.DataFormat[1] == 0x6d && + info.DataFormat[2] == 0x6f && + info.DataFormat[3] == 0x6a && + info.FormatVersion[0] == 1 + }) + if err != nil { + return err + } + + bytes.Skip(bytes.Int32() - 4) + uemoji.trie, err = utrie.UcpTrieFromBytes(bytes) + if err != nil { + return err + } + return nil +} diff --git a/go/mysql/icuregex/internal/uemoji/uemoji.go b/go/mysql/icuregex/internal/uemoji/uemoji.go new file mode 100644 index 00000000000..5cc89acd69a --- /dev/null +++ b/go/mysql/icuregex/internal/uemoji/uemoji.go @@ -0,0 +1,82 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package uemoji + +import ( + "vitess.io/vitess/go/mysql/icuregex/internal/utrie" +) + +type propertySet interface { + AddRune(ch rune) + AddRuneRange(from rune, to rune) +} + +func AddPropertyStarts(sa propertySet) { + // Add the start code point of each same-value range of the trie. + var start, end rune + for { + end, _ = trie().GetRange(start, utrie.UcpMapRangeNormal, 0, nil) + if end < 0 { + break + } + sa.AddRune(start) + start = end + 1 + } +} + +const ( + bitEmoji = 0 + bitEmojiPresentation = 1 + bitEmojiModifier = 2 + bitEmojiModifierBase = 3 + bitEmojiComponent = 4 + bitExtendedPictographic = 5 + bitBasicEmoji = 6 +) + +// Note: REGIONAL_INDICATOR is a single, hardcoded range implemented elsewhere. +var bitFlags = []int8{ + bitEmoji, + bitEmojiPresentation, + bitEmojiModifier, + bitEmojiModifierBase, + bitEmojiComponent, + -1, + -1, + bitExtendedPictographic, + bitBasicEmoji, + -1, + -1, + -1, + -1, + -1, + bitBasicEmoji, +} + +func HasBinaryProperty(c rune, which int) bool { + bit := bitFlags[which] + if bit < 0 { + return false // not a property that we support in this function + } + bits := trie().Get(c) + return ((bits >> bit) & 1) != 0 +} diff --git a/go/mysql/icuregex/internal/ulayout/ulayout.go b/go/mysql/icuregex/internal/ulayout/ulayout.go new file mode 100644 index 00000000000..dbf21d9460b --- /dev/null +++ b/go/mysql/icuregex/internal/ulayout/ulayout.go @@ -0,0 +1,128 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ulayout + +import ( + "errors" + "sync" + + "vitess.io/vitess/go/mysql/icuregex/internal/icudata" + "vitess.io/vitess/go/mysql/icuregex/internal/udata" + "vitess.io/vitess/go/mysql/icuregex/internal/utrie" +) + +var inpcTrie *utrie.UcpTrie +var inscTrie *utrie.UcpTrie +var voTrie *utrie.UcpTrie + +const ( + ixInpcTrieTop = 1 + ixInscTrieTop = 2 + ixVoTrieTop = 3 + + ixCount = 12 +) + +func InpcTrie() *utrie.UcpTrie { + loadLayouts() + return inpcTrie +} + +func InscTrie() *utrie.UcpTrie { + loadLayouts() + return inscTrie +} + +func VoTrie() *utrie.UcpTrie { + loadLayouts() + return voTrie +} + +var layoutsOnce sync.Once + +func loadLayouts() { + layoutsOnce.Do(func() { + b := udata.NewBytes(icudata.ULayout) + if err := readData(b); err != nil { + panic(err) + } + }) +} + +func readData(bytes *udata.Bytes) error { + err := bytes.ReadHeader(func(info *udata.DataInfo) bool { + return info.DataFormat[0] == 0x4c && + info.DataFormat[1] == 0x61 && + info.DataFormat[2] == 0x79 && + info.DataFormat[3] == 0x6f && + info.FormatVersion[0] == 1 + }) + if err != nil { + return err + } + + startPos := bytes.Position() + indexesLength := int32(bytes.Uint32()) // inIndexes[IX_INDEXES_LENGTH] + if indexesLength < ixCount { + return errors.New("text layout properties data: not enough indexes") + } + index := make([]int32, indexesLength) + index[0] = indexesLength + for i := int32(1); i < indexesLength; i++ { + index[i] = int32(bytes.Uint32()) + } + + offset := indexesLength * 4 + top := index[ixInpcTrieTop] + trieSize := top - offset + if trieSize >= 16 { + inpcTrie, err = utrie.UcpTrieFromBytes(bytes) + if err != nil { + return err + } + } + + pos := bytes.Position() - startPos + bytes.Skip(top - pos) + offset = top + top = index[ixInscTrieTop] + trieSize = top - offset + if trieSize >= 16 { + inscTrie, err = utrie.UcpTrieFromBytes(bytes) + if err != nil { + return err + } + } + + pos = bytes.Position() - startPos + bytes.Skip(top - pos) + offset = top + top = index[ixVoTrieTop] + trieSize = top - offset + if trieSize >= 16 { + voTrie, err = utrie.UcpTrieFromBytes(bytes) + if err != nil { + return err + } + } + return nil +} diff --git a/go/mysql/icuregex/internal/unames/loader.go b/go/mysql/icuregex/internal/unames/loader.go new file mode 100644 index 00000000000..296670b1c66 --- /dev/null +++ b/go/mysql/icuregex/internal/unames/loader.go @@ -0,0 +1,90 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unames + +import ( + "sync" + + "vitess.io/vitess/go/mysql/icuregex/internal/icudata" + "vitess.io/vitess/go/mysql/icuregex/internal/udata" +) + +var charNamesOnce sync.Once +var charNames *unames + +type unames struct { + tokens []uint16 + tokenStrings []uint8 + groups []uint16 + groupNames []uint8 + algNames []algorithmicRange +} + +func loadCharNames() { + charNamesOnce.Do(func() { + b := udata.NewBytes(icudata.UNames) + if err := b.ReadHeader(func(info *udata.DataInfo) bool { + return info.Size >= 20 && + info.IsBigEndian == 0 && + info.CharsetFamily == 0 && + info.DataFormat[0] == 0x75 && /* dataFormat="unam" */ + info.DataFormat[1] == 0x6e && + info.DataFormat[2] == 0x61 && + info.DataFormat[3] == 0x6d && + info.FormatVersion[0] == 1 + }); err != nil { + panic(err) + } + + tokenStringOffset := int32(b.Uint32() - 16) + groupsOffset := int32(b.Uint32() - 16) + groupStringOffset := int32(b.Uint32() - 16) + algNamesOffset := int32(b.Uint32() - 16) + charNames = &unames{ + tokens: b.Uint16Slice(tokenStringOffset / 2), + tokenStrings: b.Uint8Slice(groupsOffset - tokenStringOffset), + groups: b.Uint16Slice((groupStringOffset - groupsOffset) / 2), + groupNames: b.Uint8Slice(algNamesOffset - groupStringOffset), + } + + algCount := b.Uint32() + charNames.algNames = make([]algorithmicRange, 0, algCount) + + for i := uint32(0); i < algCount; i++ { + ar := algorithmicRange{ + start: b.Uint32(), + end: b.Uint32(), + typ: b.Uint8(), + variant: b.Uint8(), + } + size := b.Uint16() + switch ar.typ { + case 0: + ar.s = b.Uint8Slice(int32(size) - 12) + case 1: + ar.factors = b.Uint16Slice(int32(ar.variant)) + ar.s = b.Uint8Slice(int32(size) - 12 - int32(ar.variant)*2) + } + charNames.algNames = append(charNames.algNames, ar) + } + }) +} diff --git a/go/mysql/icuregex/internal/unames/unames.go b/go/mysql/icuregex/internal/unames/unames.go new file mode 100644 index 00000000000..66e8ba15615 --- /dev/null +++ b/go/mysql/icuregex/internal/unames/unames.go @@ -0,0 +1,406 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unames + +import ( + "bytes" + "strconv" + "strings" +) + +func (names *unames) getGroupName(group []uint16) []uint8 { + return names.groupNames[names.getGroupOffset(group):] +} + +type NameChoice int32 + +const ( + UnicodeCharName NameChoice = iota + /** + * The Unicode_1_Name property value which is of little practical value. + * Beginning with ICU 49, ICU APIs return an empty string for this name choice. + * @deprecated ICU 49 + */ + Unicode10CharName + /** Standard or synthetic character name. @stable ICU 2.0 */ + ExtendedCharName + /** Corrected name from NameAliases.txt. @stable ICU 4.4 */ + CharNameAlias +) + +type algorithmicRange struct { + start, end uint32 + typ, variant uint8 + factors []uint16 + s []uint8 +} + +func (ar *algorithmicRange) findAlgName(otherName string) rune { + switch ar.typ { + case 0: + s := ar.s + + for s[0] != 0 && len(otherName) > 0 { + if s[0] != otherName[0] { + return -1 + } + s = s[1:] + otherName = otherName[1:] + } + + var code rune + count := int(ar.variant) + for i := 0; i < count && len(otherName) > 0; i++ { + c := rune(otherName[0]) + otherName = otherName[1:] + if '0' <= c && c <= '9' { + code = (code << 4) | (c - '0') + } else if 'A' <= c && c <= 'F' { + code = (code << 4) | (c - 'A' + 10) + } else { + return -1 + } + } + + if len(otherName) == 0 && ar.start <= uint32(code) && uint32(code) <= ar.end { + return code + } + case 1: + factors := ar.factors + s := ar.s + + for s[0] != 0 && len(otherName) > 0 { + if s[0] != otherName[0] { + return -1 + } + s = s[1:] + otherName = otherName[1:] + } + s = s[1:] + + start := rune(ar.start) + limit := rune(ar.end + 1) + + var indexes [8]uint16 + var buf strings.Builder + var elements [8][]byte + var elementBases [8][]byte + + ar.writeFactorSuffix0(factors, s, &buf, &elements, &elementBases) + if buf.String() == otherName { + return start + } + + for start+1 < limit { + start++ + i := len(factors) + + for { + i-- + idx := indexes[i] + 1 + if idx < factors[i] { + indexes[i] = idx + s = elements[i] + s = s[bytes.IndexByte(s, 0)+1:] + elements[i] = s + break + } + + indexes[i] = 0 + elements[i] = elementBases[i] + } + + t := otherName + for i = 0; i < len(factors); i++ { + s = elements[i] + + for s[0] != 0 && len(t) > 0 { + if s[0] != t[0] { + s = nil + i = 99 + break + } + s = s[1:] + t = t[1:] + } + } + if i < 99 && len(t) == 0 { + return start + } + } + } + return -1 +} + +func (ar *algorithmicRange) writeFactorSuffix0(factors []uint16, s []uint8, buf *strings.Builder, elements, elementBases *[8][]byte) { + /* write each element */ + for i := 0; i < len(factors); i++ { + (*elements)[i] = s + (*elementBases)[i] = s + + nul := bytes.IndexByte(s, 0) + buf.Write(s[:nul]) + s = s[nul+1:] + + factor := int(factors[i] - 1) + for factor > 0 { + s = s[bytes.IndexByte(s, 0)+1:] + factor-- + } + } +} + +func CharForName(nameChoice NameChoice, name string) rune { + loadCharNames() + + lower := strings.ToLower(name) + upper := strings.ToUpper(name) + + if lower[0] == '<' { + if nameChoice == ExtendedCharName && lower[len(lower)-1] == '>' { + if limit := strings.LastIndexByte(lower, '-'); limit >= 2 { + cp, err := strconv.ParseUint(lower[limit+1:len(lower)-1], 16, 32) + if err != nil || cp > 0x10ffff { + return -1 + } + return rune(cp) + } + } + return -1 + } + + for _, ar := range charNames.algNames { + if cp := ar.findAlgName(upper); cp != -1 { + return cp + } + } + + return charNames.enumNames(0, 0x10ffff+1, upper, nameChoice) +} + +const groupShift = 5 +const linesPerGroup = 1 << groupShift +const groupMask = linesPerGroup - 1 + +const ( + groupMsb = iota + groupOffsetHigh + groupOffsetLow + groupLength +) + +func (names *unames) enumNames(start, limit rune, otherName string, nameChoice NameChoice) rune { + startGroupMSB := uint16(start >> groupShift) + endGroupMSB := uint16((limit - 1) >> groupShift) + + group := names.getGroup(start) + + if startGroupMSB < group[groupMsb] && nameChoice == ExtendedCharName { + extLimit := rune(group[groupMsb]) << groupShift + if extLimit > limit { + extLimit = limit + } + start = extLimit + } + + if startGroupMSB == endGroupMSB { + if startGroupMSB == group[groupMsb] { + return names.enumGroupNames(group, start, limit-1, otherName, nameChoice) + } + } else { + if startGroupMSB == group[groupMsb] { + if start&groupMask != 0 { + if cp := names.enumGroupNames(group, start, (rune(startGroupMSB)< group[groupMsb] { + group = group[groupLength:] + } + + for len(group) > 0 && group[groupMsb] < endGroupMSB { + start = rune(group[groupMsb]) << groupShift + if cp := names.enumGroupNames(group, start, start+linesPerGroup-1, otherName, nameChoice); cp != -1 { + return cp + } + group = group[groupLength:] + } + + if len(group) > 0 && group[groupMsb] == endGroupMSB { + return names.enumGroupNames(group, (limit-1)&^groupMask, limit-1, otherName, nameChoice) + } + } + + return -1 +} + +func (names *unames) getGroup(code rune) []uint16 { + groups := names.groups + groupMSB := uint16(code >> groupShift) + + start := 0 + groupCount := int(groups[0]) + limit := groupCount + groups = groups[1:] + + for start < limit-1 { + number := (start + limit) / 2 + if groupMSB < groups[number*groupLength+groupMsb] { + limit = number + } else { + start = number + } + } + + return groups[start*groupLength : (groupCount-start)*groupLength] +} + +func (names *unames) getGroupOffset(group []uint16) uint32 { + return (uint32(group[groupOffsetHigh]) << 16) | uint32(group[groupOffsetLow]) +} + +func (names *unames) enumGroupNames(group []uint16, start, end rune, otherName string, choice NameChoice) rune { + var offsets [linesPerGroup + 2]uint16 + var lengths [linesPerGroup + 2]uint16 + + s := names.getGroupName(group) + s = expandGroupLengths(s, offsets[:0], lengths[:0]) + + for start < end { + name := s[offsets[start&groupMask]:] + nameLen := lengths[start&groupMask] + if names.compareName(name[:nameLen], choice, otherName) { + return start + } + start++ + } + return -1 +} + +func expandGroupLengths(s []uint8, offsets []uint16, lengths []uint16) []uint8 { + /* read the lengths of the 32 strings in this group and get each string's offset */ + var i, offset, length uint16 + var lengthByte uint8 + + /* all 32 lengths must be read to get the offset of the first group string */ + for i < linesPerGroup { + lengthByte = s[0] + s = s[1:] + + /* read even nibble - MSBs of lengthByte */ + if length >= 12 { + /* double-nibble length spread across two bytes */ + length = ((length&0x3)<<4 | uint16(lengthByte)>>4) + 12 + lengthByte &= 0xf + } else if (lengthByte /* &0xf0 */) >= 0xc0 { + /* double-nibble length spread across this one byte */ + length = (uint16(lengthByte) & 0x3f) + 12 + } else { + /* single-nibble length in MSBs */ + length = uint16(lengthByte) >> 4 + lengthByte &= 0xf + } + + offsets = append(offsets, offset) + lengths = append(lengths, length) + + offset += length + i++ + + /* read odd nibble - LSBs of lengthByte */ + if (lengthByte & 0xf0) == 0 { + /* this nibble was not consumed for a double-nibble length above */ + length = uint16(lengthByte) + if length < 12 { + /* single-nibble length in LSBs */ + offsets = append(offsets, offset) + lengths = append(lengths, length) + + offset += length + i++ + } + } else { + length = 0 /* prevent double-nibble detection in the next iteration */ + } + } + + /* now, s is at the first group string */ + return s +} + +func (names *unames) compareName(name []byte, choice NameChoice, otherName string) bool { + tokens := names.tokens + + tokenCount := tokens[0] + tokens = tokens[1:] + + otherNameLen := len(otherName) + + for len(name) > 0 && len(otherName) > 0 { + c := name[0] + name = name[1:] + + if uint16(c) >= tokenCount { + if c != ';' { + if c != otherName[0] { + return false + } + otherName = otherName[1:] + } else { + break + } + } else { + token := tokens[c] + if int16(token) == -2 { + token = tokens[int(c)<<8|int(name[0])] + name = name[1:] + } + if int16(token) == -1 { + if c != ';' { + if c != otherName[0] { + return false + } + otherName = otherName[1:] + } else { + if len(otherName) == otherNameLen && choice == ExtendedCharName { + if ';' >= tokenCount || int16(tokens[';']) == -1 { + continue + } + } + break + } + } else { + tokenString := names.tokenStrings[token:] + for tokenString[0] != 0 && len(otherName) > 0 { + if tokenString[0] != otherName[0] { + return false + } + tokenString = tokenString[1:] + otherName = otherName[1:] + } + } + } + } + + return len(otherName) == 0 +} diff --git a/go/mysql/icuregex/internal/unames/unames_test.go b/go/mysql/icuregex/internal/unames/unames_test.go new file mode 100644 index 00000000000..f15353eef8d --- /dev/null +++ b/go/mysql/icuregex/internal/unames/unames_test.go @@ -0,0 +1,64 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unames + +import ( + "testing" +) + +func TestCharForName(t *testing.T) { + var TestNames = []struct { + code rune + name, oldName, extName string + }{ + {0x0061, "LATIN SMALL LETTER A", "", "LATIN SMALL LETTER A"}, + {0x01a2, "LATIN CAPITAL LETTER OI", "", "LATIN CAPITAL LETTER OI"}, + {0x0284, "LATIN SMALL LETTER DOTLESS J WITH STROKE AND HOOK", "", "LATIN SMALL LETTER DOTLESS J WITH STROKE AND HOOK"}, + {0x0fd0, "TIBETAN MARK BSKA- SHOG GI MGO RGYAN", "", "TIBETAN MARK BSKA- SHOG GI MGO RGYAN"}, + {0x3401, "CJK UNIFIED IDEOGRAPH-3401", "", "CJK UNIFIED IDEOGRAPH-3401"}, + {0x7fed, "CJK UNIFIED IDEOGRAPH-7FED", "", "CJK UNIFIED IDEOGRAPH-7FED"}, + {0xac00, "HANGUL SYLLABLE GA", "", "HANGUL SYLLABLE GA"}, + {0xd7a3, "HANGUL SYLLABLE HIH", "", "HANGUL SYLLABLE HIH"}, + {0xd800, "", "", ""}, + {0xdc00, "", "", ""}, + {0xff08, "FULLWIDTH LEFT PARENTHESIS", "", "FULLWIDTH LEFT PARENTHESIS"}, + {0xffe5, "FULLWIDTH YEN SIGN", "", "FULLWIDTH YEN SIGN"}, + {0xffff, "", "", ""}, + {0x1d0c5, "BYZANTINE MUSICAL SYMBOL FHTORA SKLIRON CHROMA VASIS", "", "BYZANTINE MUSICAL SYMBOL FHTORA SKLIRON CHROMA VASIS"}, + {0x23456, "CJK UNIFIED IDEOGRAPH-23456", "", "CJK UNIFIED IDEOGRAPH-23456"}, + } + + for _, tn := range TestNames { + if tn.name != "" { + r := CharForName(UnicodeCharName, tn.name) + if r != tn.code { + t.Errorf("CharFromName(U_UNICODE_CHAR_NAME, %q) = '%c' (U+%d), expected %c (U+%d)", tn.name, r, r, tn.code, tn.code) + } + } + if tn.extName != "" { + r := CharForName(ExtendedCharName, tn.extName) + if r != tn.code { + t.Errorf("CharFromName(U_EXTENDED_CHAR_NAME, %q) = '%c' (U+%d), expected %c (U+%d)", tn.extName, r, r, tn.code, tn.code) + } + } + } +} diff --git a/go/mysql/icuregex/internal/uprops/constants.go b/go/mysql/icuregex/internal/uprops/constants.go new file mode 100644 index 00000000000..4cdf1ef8a0b --- /dev/null +++ b/go/mysql/icuregex/internal/uprops/constants.go @@ -0,0 +1,664 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package uprops + +type Property int32 + +const ( + /* + * Note: UProperty constants are parsed by preparseucd.py. + * It matches lines like + * UCHAR_=, + */ + + /* Note: Place UCHAR_ALPHABETIC before UCHAR_BINARY_START so that + debuggers display UCHAR_ALPHABETIC as the symbolic name for 0, + rather than UCHAR_BINARY_START. Likewise for other *_START + identifiers. */ + + /** Binary property Alphabetic. Same as u_isUAlphabetic, different from u_isalpha. + Lu+Ll+Lt+Lm+Lo+Nl+Other_Alphabetic @stable ICU 2.1 */ + UCharAlphabetic Property = 0 + /** First constant for binary Unicode properties. @stable ICU 2.1 */ + UCharBinaryStart = UCharAlphabetic + /** Binary property ASCII_Hex_Digit. 0-9 A-F a-f @stable ICU 2.1 */ + UCharASCIIHexDigit Property = 1 + /** Binary property Bidi_Control. + Format controls which have specific functions + in the Bidi Algorithm. @stable ICU 2.1 */ + UCharBidiControl Property = 2 + /** Binary property Bidi_Mirrored. + Characters that may change display in RTL text. + Same as u_isMirrored. + See Bidi Algorithm, UTR 9. @stable ICU 2.1 */ + UCharBidiMirrored Property = 3 + /** Binary property Dash. Variations of dashes. @stable ICU 2.1 */ + UCharDash Property = 4 + /** Binary property Default_Ignorable_Code_Point (new in Unicode 3.2). + Ignorable in most processing. + <2060..206F, FFF0..FFFB, E0000..E0FFF>+Other_Default_Ignorable_Code_Point+(Cf+Cc+Cs-White_Space) @stable ICU 2.1 */ + UCharDefaultIgnorableCodePoint Property = 5 + /** Binary property Deprecated (new in Unicode 3.2). + The usage of deprecated characters is strongly discouraged. @stable ICU 2.1 */ + UCharDeprecated Property = 6 + /** Binary property Diacritic. Characters that linguistically modify + the meaning of another character to which they apply. @stable ICU 2.1 */ + UCharDiacritic Property = 7 + /** Binary property Extender. + Extend the value or shape of a preceding alphabetic character, + e.g., length and iteration marks. @stable ICU 2.1 */ + UCharExtender Property = 8 + /** Binary property Full_Composition_Exclusion. + CompositionExclusions.txt+Singleton Decompositions+ + Non-Starter Decompositions. @stable ICU 2.1 */ + UCharFullCompositionExclusion Property = 9 + /** Binary property Grapheme_Base (new in Unicode 3.2). + For programmatic determination of grapheme cluster boundaries. + [0..10FFFF]-Cc-Cf-Cs-Co-Cn-Zl-Zp-Grapheme_Link-Grapheme_Extend-CGJ @stable ICU 2.1 */ + UCharGraphemeBase Property = 10 + /** Binary property Grapheme_Extend (new in Unicode 3.2). + For programmatic determination of grapheme cluster boundaries. + Me+Mn+Mc+Other_Grapheme_Extend-Grapheme_Link-CGJ @stable ICU 2.1 */ + UCharGraphemeExtend Property = 11 + /** Binary property Grapheme_Link (new in Unicode 3.2). + For programmatic determination of grapheme cluster boundaries. @stable ICU 2.1 */ + UCharGraphemeLink Property = 12 + /** Binary property Hex_Digit. + Characters commonly used for hexadecimal numbers. @stable ICU 2.1 */ + UCharHexDigit Property = 13 + /** Binary property Hyphen. Dashes used to mark connections + between pieces of words, plus the Katakana middle dot. @stable ICU 2.1 */ + UCharHyphen Property = 14 + /** Binary property ID_Continue. + Characters that can continue an identifier. + DerivedCoreProperties.txt also says "NOTE: Cf characters should be filtered out." + ID_Start+Mn+Mc+Nd+Pc @stable ICU 2.1 */ + UCharIDContinue Property = 15 + /** Binary property ID_Start. + Characters that can start an identifier. + Lu+Ll+Lt+Lm+Lo+Nl @stable ICU 2.1 */ + UCharIDStart Property = 16 + /** Binary property Ideographic. + CJKV ideographs. @stable ICU 2.1 */ + UCharIdeographic Property = 17 + /** Binary property IDS_Binary_Operator (new in Unicode 3.2). + For programmatic determination of + Ideographic Description Sequences. @stable ICU 2.1 */ + UCharIdsBinaryOperator Property = 18 + /** Binary property IDS_Trinary_Operator (new in Unicode 3.2). + For programmatic determination of + Ideographic Description Sequences. @stable ICU 2.1 */ + UCharIdsTrinaryOperator Property = 19 + /** Binary property Join_Control. + Format controls for cursive joining and ligation. @stable ICU 2.1 */ + UCharJoinControl Property = 20 + /** Binary property Logical_Order_Exception (new in Unicode 3.2). + Characters that do not use logical order and + require special handling in most processing. @stable ICU 2.1 */ + UCharLogicalOrderException Property = 21 + /** Binary property Lowercase. Same as u_isULowercase, different from u_islower. + Ll+Other_Lowercase @stable ICU 2.1 */ + UCharLowercase Property = 22 + /** Binary property Math. Sm+Other_Math @stable ICU 2.1 */ + UCharMath Property = 23 + /** Binary property Noncharacter_Code_Point. + Code points that are explicitly defined as illegal + for the encoding of characters. @stable ICU 2.1 */ + UCharNoncharacterCodePoint Property = 24 + /** Binary property Quotation_Mark. @stable ICU 2.1 */ + UCharQuotationMark Property = 25 + /** Binary property Radical (new in Unicode 3.2). + For programmatic determination of + Ideographic Description Sequences. @stable ICU 2.1 */ + UCharRadical Property = 26 + /** Binary property Soft_Dotted (new in Unicode 3.2). + Characters with a "soft dot", like i or j. + An accent placed on these characters causes + the dot to disappear. @stable ICU 2.1 */ + UCharSoftDotted Property = 27 + /** Binary property Terminal_Punctuation. + Punctuation characters that generally mark + the end of textual units. @stable ICU 2.1 */ + UCharTerminalPunctuation Property = 28 + /** Binary property Unified_Ideograph (new in Unicode 3.2). + For programmatic determination of + Ideographic Description Sequences. @stable ICU 2.1 */ + UCharUnifiedIdeograph Property = 29 + /** Binary property Uppercase. Same as u_isUUppercase, different from u_isupper. + Lu+Other_Uppercase @stable ICU 2.1 */ + UCharUppercase Property = 30 + /** Binary property White_Space. + Same as u_isUWhiteSpace, different from u_isspace and u_isWhitespace. + Space characters+TAB+CR+LF-ZWSP-ZWNBSP @stable ICU 2.1 */ + UCharWhiteSpace Property = 31 + /** Binary property XID_Continue. + ID_Continue modified to allow closure under + normalization forms NFKC and NFKD. @stable ICU 2.1 */ + UCharXidContinue Property = 32 + /** Binary property XID_Start. ID_Start modified to allow + closure under normalization forms NFKC and NFKD. @stable ICU 2.1 */ + UCharXidStart Property = 33 + /** Binary property Case_Sensitive. Either the source of a case + mapping or _in_ the target of a case mapping. Not the same as + the general category Cased_Letter. @stable ICU 2.6 */ + UCharCaseSensitive Property = 34 + /** Binary property STerm (new in Unicode 4.0.1). + Sentence Terminal. Used in UAX #29: Text Boundaries + (http://www.unicode.org/reports/tr29/) + @stable ICU 3.0 */ + UCharSTerm Property = 35 + /** Binary property Variation_Selector (new in Unicode 4.0.1). + Indicates all those characters that qualify as Variation Selectors. + For details on the behavior of these characters, + see StandardizedVariants.html and 15.6 Variation Selectors. + @stable ICU 3.0 */ + UCharVariationSelector Property = 36 + /** Binary property NFD_Inert. + ICU-specific property for characters that are inert under NFD, + i.e., they do not interact with adjacent characters. + See the documentation for the Normalizer2 class and the + Normalizer2::isInert() method. + @stable ICU 3.0 */ + UCharNfdInert Property = 37 + /** Binary property NFKD_Inert. + ICU-specific property for characters that are inert under NFKD, + i.e., they do not interact with adjacent characters. + See the documentation for the Normalizer2 class and the + Normalizer2::isInert() method. + @stable ICU 3.0 */ + UCharNfkdInert Property = 38 + /** Binary property NFC_Inert. + ICU-specific property for characters that are inert under NFC, + i.e., they do not interact with adjacent characters. + See the documentation for the Normalizer2 class and the + Normalizer2::isInert() method. + @stable ICU 3.0 */ + UCharNfcInert Property = 39 + /** Binary property NFKC_Inert. + ICU-specific property for characters that are inert under NFKC, + i.e., they do not interact with adjacent characters. + See the documentation for the Normalizer2 class and the + Normalizer2::isInert() method. + @stable ICU 3.0 */ + UCharNfkcInert Property = 40 + /** Binary Property Segment_Starter. + ICU-specific property for characters that are starters in terms of + Unicode normalization and combining character sequences. + They have ccc=0 and do not occur in non-initial position of the + canonical decomposition of any character + (like a-umlaut in NFD and a Jamo T in an NFD(Hangul LVT)). + ICU uses this property for segmenting a string for generating a set of + canonically equivalent strings, e.g. for canonical closure while + processing collation tailoring rules. + @stable ICU 3.0 */ + UCharSegmentStarter Property = 41 + /** Binary property Pattern_Syntax (new in Unicode 4.1). + See UAX #31 Identifier and Pattern Syntax + (http://www.unicode.org/reports/tr31/) + @stable ICU 3.4 */ + UCharPatternSyntax Property = 42 + /** Binary property Pattern_White_Space (new in Unicode 4.1). + See UAX #31 Identifier and Pattern Syntax + (http://www.unicode.org/reports/tr31/) + @stable ICU 3.4 */ + UCharPatternWhiteSpace Property = 43 + /** Binary property alnum (a C/POSIX character class). + Implemented according to the UTS #18 Annex C Standard Recommendation. + See the uchar.h file documentation. + @stable ICU 3.4 */ + UCharPosixAlnum Property = 44 + /** Binary property blank (a C/POSIX character class). + Implemented according to the UTS #18 Annex C Standard Recommendation. + See the uchar.h file documentation. + @stable ICU 3.4 */ + UCharPosixBlank Property = 45 + /** Binary property graph (a C/POSIX character class). + Implemented according to the UTS #18 Annex C Standard Recommendation. + See the uchar.h file documentation. + @stable ICU 3.4 */ + UCharPosixGraph Property = 46 + /** Binary property print (a C/POSIX character class). + Implemented according to the UTS #18 Annex C Standard Recommendation. + See the uchar.h file documentation. + @stable ICU 3.4 */ + UCharPosixPrint Property = 47 + /** Binary property xdigit (a C/POSIX character class). + Implemented according to the UTS #18 Annex C Standard Recommendation. + See the uchar.h file documentation. + @stable ICU 3.4 */ + UCharPosixXdigit Property = 48 + /** Binary property Cased. For Lowercase, Uppercase and Titlecase characters. @stable ICU 4.4 */ + UCharCased Property = 49 + /** Binary property Case_Ignorable. Used in context-sensitive case mappings. @stable ICU 4.4 */ + UCharCaseIgnorable Property = 50 + /** Binary property Changes_When_Lowercased. @stable ICU 4.4 */ + UCharChangesWhenLowercased Property = 51 + /** Binary property Changes_When_Uppercased. @stable ICU 4.4 */ + UCharChangesWhenUppercased Property = 52 + /** Binary property Changes_When_Titlecased. @stable ICU 4.4 */ + UCharChangesWhenTitlecased Property = 53 + /** Binary property Changes_When_Casefolded. @stable ICU 4.4 */ + UCharChangesWhenCasefolded Property = 54 + /** Binary property Changes_When_Casemapped. @stable ICU 4.4 */ + UCharChangesWhenCasemapped Property = 55 + /** Binary property Changes_When_NFKC_Casefolded. @stable ICU 4.4 */ + UCharChangesWhenNfkcCasefolded Property = 56 + /** + * Binary property Emoji. + * See http://www.unicode.org/reports/tr51/#Emoji_Properties + * + * @stable ICU 57 + */ + UCharEmoji Property = 57 + /** + * Binary property Emoji_Presentation. + * See http://www.unicode.org/reports/tr51/#Emoji_Properties + * + * @stable ICU 57 + */ + UCharEmojiPresentation Property = 58 + /** + * Binary property Emoji_Modifier. + * See http://www.unicode.org/reports/tr51/#Emoji_Properties + * + * @stable ICU 57 + */ + UCharEmojiModifier Property = 59 + /** + * Binary property Emoji_Modifier_Base. + * See http://www.unicode.org/reports/tr51/#Emoji_Properties + * + * @stable ICU 57 + */ + UCharEmojiModifierBase Property = 60 + /** + * Binary property Emoji_Component. + * See http://www.unicode.org/reports/tr51/#Emoji_Properties + * + * @stable ICU 60 + */ + UCharEmojiComponent Property = 61 + /** + * Binary property Regional_Indicator. + * @stable ICU 60 + */ + UCharRegionalIndicator Property = 62 + /** + * Binary property Prepended_Concatenation_Mark. + * @stable ICU 60 + */ + UCharPrependedConcatenationMark Property = 63 + /** + * Binary property Extended_Pictographic. + * See http://www.unicode.org/reports/tr51/#Emoji_Properties + * + * @stable ICU 62 + */ + UCharExtendedPictographic Property = 64 + + /** + * Binary property of strings Basic_Emoji. + * See https://www.unicode.org/reports/tr51/#Emoji_Sets + * + * @stable ICU 70 + */ + UCharBasicEmoji Property = 65 + /** + * Binary property of strings Emoji_Keycap_Sequence. + * See https://www.unicode.org/reports/tr51/#Emoji_Sets + * + * @stable ICU 70 + */ + UCharEmojiKeycapSequence Property = 66 + /** + * Binary property of strings RGI_Emoji_Modifier_Sequence. + * See https://www.unicode.org/reports/tr51/#Emoji_Sets + * + * @stable ICU 70 + */ + UCharRgiEmojiModifierSequence Property = 67 + /** + * Binary property of strings RGI_Emoji_Flag_Sequence. + * See https://www.unicode.org/reports/tr51/#Emoji_Sets + * + * @stable ICU 70 + */ + UCharRgiEmojiFlagSequence Property = 68 + /** + * Binary property of strings RGI_Emoji_Tag_Sequence. + * See https://www.unicode.org/reports/tr51/#Emoji_Sets + * + * @stable ICU 70 + */ + UCharRgiEmojiTagSequence Property = 69 + /** + * Binary property of strings RGI_Emoji_ZWJ_Sequence. + * See https://www.unicode.org/reports/tr51/#Emoji_Sets + * + * @stable ICU 70 + */ + UCharRgiEmojiZwjSequence Property = 70 + /** + * Binary property of strings RGI_Emoji. + * See https://www.unicode.org/reports/tr51/#Emoji_Sets + * + * @stable ICU 70 + */ + UCharRgiEmoji Property = 71 + + /** Enumerated property Bidi_Class. + Same as u_charDirection, returns UCharDirection values. @stable ICU 2.2 */ + UCharBidiClass Property = 0x1000 + /** First constant for enumerated/integer Unicode properties. @stable ICU 2.2 */ + UCharIntStart = UCharBidiClass + /** Enumerated property Block. + Same as ublock_getCode, returns UBlockCode values. @stable ICU 2.2 */ + UCharBlock Property = 0x1001 + /** Enumerated property Canonical_Combining_Class. + Same as u_getCombiningClass, returns 8-bit numeric values. @stable ICU 2.2 */ + UCharCanonicalCombiningClass Property = 0x1002 + /** Enumerated property Decomposition_Type. + Returns UDecompositionType values. @stable ICU 2.2 */ + UCharDecompositionType Property = 0x1003 + /** Enumerated property East_Asian_Width. + See http://www.unicode.org/reports/tr11/ + Returns UEastAsianWidth values. @stable ICU 2.2 */ + UCharEastAsianWidth Property = 0x1004 + /** Enumerated property General_Category. + Same as u_charType, returns UCharCategory values. @stable ICU 2.2 */ + UCharGeneralCategory Property = 0x1005 + /** Enumerated property Joining_Group. + Returns UJoiningGroup values. @stable ICU 2.2 */ + UCharJoiningGroup Property = 0x1006 + /** Enumerated property Joining_Type. + Returns UJoiningType values. @stable ICU 2.2 */ + UCharJoiningType Property = 0x1007 + /** Enumerated property Line_Break. + Returns ULineBreak values. @stable ICU 2.2 */ + UCharLineBreak Property = 0x1008 + /** Enumerated property Numeric_Type. + Returns UNumericType values. @stable ICU 2.2 */ + UCharNumericType Property = 0x1009 + /** Enumerated property Script. + Same as uscript_getScript, returns UScriptCode values. @stable ICU 2.2 */ + UCharScript Property = 0x100A + /** Enumerated property Hangul_Syllable_Type, new in Unicode 4. + Returns UHangulSyllableType values. @stable ICU 2.6 */ + UCharHangulSyllableType Property = 0x100B + /** Enumerated property NFD_Quick_Check. + Returns UNormalizationCheckResult values. @stable ICU 3.0 */ + UCharNfdQuickCheck Property = 0x100C + /** Enumerated property NFKD_Quick_Check. + Returns UNormalizationCheckResult values. @stable ICU 3.0 */ + UCharNfkdQuickCheck Property = 0x100D + /** Enumerated property NFC_Quick_Check. + Returns UNormalizationCheckResult values. @stable ICU 3.0 */ + UCharNfcQuickCheck Property = 0x100E + /** Enumerated property NFKC_Quick_Check. + Returns UNormalizationCheckResult values. @stable ICU 3.0 */ + UCharNfkcQuickCheck Property = 0x100F + /** Enumerated property Lead_Canonical_Combining_Class. + ICU-specific property for the ccc of the first code point + of the decomposition, or lccc(c)=ccc(NFD(c)[0]). + Useful for checking for canonically ordered text; + see UNORM_FCD and http://www.unicode.org/notes/tn5/#FCD . + Returns 8-bit numeric values like UCHAR_CANONICAL_COMBINING_CLASS. @stable ICU 3.0 */ + UCharLeadCanonicalCombiningClass Property = 0x1010 + /** Enumerated property Trail_Canonical_Combining_Class. + ICU-specific property for the ccc of the last code point + of the decomposition, or tccc(c)=ccc(NFD(c)[last]). + Useful for checking for canonically ordered text; + see UNORM_FCD and http://www.unicode.org/notes/tn5/#FCD . + Returns 8-bit numeric values like UCHAR_CANONICAL_COMBINING_CLASS. @stable ICU 3.0 */ + UCharTrailCanonicalCombiningClass Property = 0x1011 + /** Enumerated property Grapheme_Cluster_Break (new in Unicode 4.1). + Used in UAX #29: Text Boundaries + (http://www.unicode.org/reports/tr29/) + Returns UGraphemeClusterBreak values. @stable ICU 3.4 */ + UCharGraphemeClusterBreak Property = 0x1012 + /** Enumerated property Sentence_Break (new in Unicode 4.1). + Used in UAX #29: Text Boundaries + (http://www.unicode.org/reports/tr29/) + Returns USentenceBreak values. @stable ICU 3.4 */ + UCharSentenceBreak Property = 0x1013 + /** Enumerated property Word_Break (new in Unicode 4.1). + Used in UAX #29: Text Boundaries + (http://www.unicode.org/reports/tr29/) + Returns UWordBreakValues values. @stable ICU 3.4 */ + UCharWordBreak Property = 0x1014 + /** Enumerated property Bidi_Paired_Bracket_Type (new in Unicode 6.3). + Used in UAX #9: Unicode Bidirectional Algorithm + (http://www.unicode.org/reports/tr9/) + Returns UBidiPairedBracketType values. @stable ICU 52 */ + UCharBidiPairedBracketType Property = 0x1015 + /** + * Enumerated property Indic_Positional_Category. + * New in Unicode 6.0 as provisional property Indic_Matra_Category; + * renamed and changed to informative in Unicode 8.0. + * See http://www.unicode.org/reports/tr44/#IndicPositionalCategory.txt + * @stable ICU 63 + */ + UCharIndicPositionalCategory Property = 0x1016 + /** + * Enumerated property Indic_Syllabic_Category. + * New in Unicode 6.0 as provisional; informative since Unicode 8.0. + * See http://www.unicode.org/reports/tr44/#IndicSyllabicCategory.txt + * @stable ICU 63 + */ + UCharIndicSyllableCategory Property = 0x1017 + /** + * Enumerated property Vertical_Orientation. + * Used for UAX #50 Unicode Vertical Text Layout (https://www.unicode.org/reports/tr50/). + * New as a UCD property in Unicode 10.0. + * @stable ICU 63 + */ + UCharVerticalOrientation Property = 0x1018 + + /** Bitmask property General_Category_Mask. + This is the General_Category property returned as a bit mask. + When used in u_getIntPropertyValue(c), same as U_MASK(u_charType(c)), + returns bit masks for UCharCategory values where exactly one bit is set. + When used with u_getPropertyValueName() and u_getPropertyValueEnum(), + a multi-bit mask is used for sets of categories like "Letters". + Mask values should be cast to uint32_t. + @stable ICU 2.4 */ + UCharGeneralCategoryMask Property = 0x2000 + /** First constant for bit-mask Unicode properties. @stable ICU 2.4 */ + UCharMaskStart = UCharGeneralCategoryMask + /** Double property Numeric_Value. + Corresponds to u_getNumericValue. @stable ICU 2.4 */ + UCharNumericValue Property = 0x3000 + /** First constant for double Unicode properties. @stable ICU 2.4 */ + UCharDoubleStart = UCharNumericValue + /** String property Age. + Corresponds to u_charAge. @stable ICU 2.4 */ + UCharAge Property = 0x4000 + /** First constant for string Unicode properties. @stable ICU 2.4 */ + UCharStringStart = UCharAge + /** String property Bidi_Mirroring_Glyph. + Corresponds to u_charMirror. @stable ICU 2.4 */ + UCharBidiMirroringGlyph Property = 0x4001 + /** String property Case_Folding. + Corresponds to u_strFoldCase in ustring.h. @stable ICU 2.4 */ + UCharCaseFolding Property = 0x4002 + /** String property Lowercase_Mapping. + Corresponds to u_strToLower in ustring.h. @stable ICU 2.4 */ + UCharLowercaseMapping Property = 0x4004 + /** String property Name. + Corresponds to u_charName. @stable ICU 2.4 */ + UCharName Property = 0x4005 + /** String property Simple_Case_Folding. + Corresponds to u_foldCase. @stable ICU 2.4 */ + UCharSimpleCaseFolding Property = 0x4006 + /** String property Simple_Lowercase_Mapping. + Corresponds to u_tolower. @stable ICU 2.4 */ + UCharSimpleLowercaseMapping Property = 0x4007 + /** String property Simple_Titlecase_Mapping. + Corresponds to u_totitle. @stable ICU 2.4 */ + UcharSimpleTitlecaseMapping Property = 0x4008 + /** String property Simple_Uppercase_Mapping. + Corresponds to u_toupper. @stable ICU 2.4 */ + UCharSimpleUppercaseMapping Property = 0x4009 + /** String property Titlecase_Mapping. + Corresponds to u_strToTitle in ustring.h. @stable ICU 2.4 */ + UCharTitlecaseMapping Property = 0x400A + /** String property Uppercase_Mapping. + Corresponds to u_strToUpper in ustring.h. @stable ICU 2.4 */ + UCharUppercaseMapping Property = 0x400C + /** String property Bidi_Paired_Bracket (new in Unicode 6.3). + Corresponds to u_getBidiPairedBracket. @stable ICU 52 */ + UCharBidiPairedBracket Property = 0x400D + + /** Miscellaneous property Script_Extensions (new in Unicode 6.0). + Some characters are commonly used in multiple scripts. + For more information, see UAX #24: http://www.unicode.org/reports/tr24/. + Corresponds to uscript_hasScript and uscript_getScriptExtensions in uscript.h. + @stable ICU 4.6 */ + UCharScriptExtensions Property = 0x7000 + /** First constant for Unicode properties with unusual value types. @stable ICU 4.6 */ + UCharOtherPropertyStart = UCharScriptExtensions + + /** Represents a nonexistent or invalid property or property value. @stable ICU 2.4 */ + UCharInvalidCode Property = -1 +) + +const ( + uCharBinaryLimit = 72 + uCharIntLimit = 0x1019 + uCharMaskLimit = 0x2001 + uCharStringLimit = 0x400E +) + +/* + * Properties in vector word 1 + * Each bit encodes one binary property. + * The following constants represent the bit number, use 1<= 0 { + set.AddRuneRange(startHasProperty, c-1) + startHasProperty = -1 + } + } + } + if startHasProperty >= 0 { + set.AddRuneRange(startHasProperty, uset.MaxValue) + } + + inclusionsForProperty[prop] = set + return set, nil +} + +func getInclusionsForIntProperty(prop Property) (*uset.UnicodeSet, error) { + if inc, ok := inclusionsForProperty[prop]; ok { + return inc, nil + } + + src := prop.source() + incl, err := getInclusionsForSource(src) + if err != nil { + return nil, err + } + + intPropIncl := uset.New() + intPropIncl.AddRune(0) + + numRanges := incl.RangeCount() + prevValue := int32(0) + + for i := 0; i < numRanges; i++ { + rangeEnd := incl.RangeEnd(i) + for c := incl.RangeStart(i); c <= rangeEnd; c++ { + value := getIntPropertyValue(c, prop) + if value != prevValue { + intPropIncl.AddRune(c) + prevValue = value + } + } + } + + inclusionsForProperty[prop] = intPropIncl + return intPropIncl, nil +} + +func ApplyIntPropertyValue(u *uset.UnicodeSet, prop Property, value int32) error { + switch { + case prop == UCharGeneralCategoryMask: + inclusions, err := getInclusionsForProperty(prop) + if err != nil { + return err + } + u.ApplyFilter(inclusions, func(ch rune) bool { + return (uchar.Mask(uchar.CharType(ch)) & uint32(value)) != 0 + }) + case prop == UCharScriptExtensions: + inclusions, err := getInclusionsForProperty(prop) + if err != nil { + return err + } + u.ApplyFilter(inclusions, func(ch rune) bool { + return uscriptHasScript(ch, code(value)) + }) + case 0 <= prop && prop < uCharBinaryLimit: + if value == 0 || value == 1 { + set, err := getInclusionsForBinaryProperty(prop) + if err != nil { + return err + } + u.CopyFrom(set) + if value == 0 { + u.Complement() + } + } else { + u.Clear() + } + + case UCharIntStart <= prop && prop < uCharIntLimit: + inclusions, err := getInclusionsForProperty(prop) + if err != nil { + return err + } + u.ApplyFilter(inclusions, func(ch rune) bool { + return getIntPropertyValue(ch, prop) == value + }) + default: + return errors.ErrUnsupported + } + return nil +} + +func mungeCharName(charname string) string { + out := make([]byte, 0, len(charname)) + for _, ch := range []byte(charname) { + j := len(out) + if ch == ' ' && (j == 0 || out[j-1] == ' ') { + continue + } + out = append(out, ch) + } + return string(out) +} + +func ApplyPropertyPattern(u *uset.UnicodeSet, pat string) error { + if len(pat) < 5 { + return errors.ErrIllegalArgument + } + + var posix, isName, invert bool + + if isPOSIXOpen(pat) { + posix = true + pat = pattern.SkipWhitespace(pat[2:]) + if len(pat) > 0 && pat[0] == '^' { + pat = pat[1:] + invert = true + } + } else if isPerlOpen(pat) || isNameOpen(pat) { + c := pat[1] + invert = c == 'P' + isName = c == 'N' + pat = pattern.SkipWhitespace(pat[2:]) + if len(pat) == 0 || pat[0] != '{' { + return errors.ErrIllegalArgument + } + pat = pat[1:] + } else { + return errors.ErrIllegalArgument + } + + var closePos int + if posix { + closePos = strings.Index(pat, ":]") + } else { + closePos = strings.IndexByte(pat, '}') + } + if closePos < 0 { + return errors.ErrIllegalArgument + } + + equals := strings.IndexByte(pat, '=') + var propName, valueName string + if equals >= 0 && equals < closePos && !isName { + propName = pat[:equals] + valueName = pat[equals+1 : closePos] + } else { + propName = pat[:closePos] + if isName { + valueName = propName + propName = "na" + } + } + + if err := ApplyPropertyAlias(u, propName, valueName); err != nil { + return err + } + if invert { + u.Complement() + } + return nil +} + +func isPOSIXOpen(pattern string) bool { + return pattern[0] == '[' && pattern[1] == ':' +} + +func isNameOpen(pattern string) bool { + return pattern[0] == '\\' && pattern[1] == 'N' +} + +func isPerlOpen(pattern string) bool { + return pattern[0] == '\\' && (pattern[1] == 'p' || pattern[1] == 'P') +} + +func ApplyPropertyAlias(u *uset.UnicodeSet, prop, value string) error { + var p Property + var v int32 + var invert bool + + if len(value) > 0 { + p = getPropertyEnum(prop) + if p == -1 { + return errors.ErrIllegalArgument + } + if p == UCharGeneralCategory { + p = UCharGeneralCategoryMask + } + + if (p >= UCharBinaryStart && p < uCharBinaryLimit) || + (p >= UCharIntStart && p < uCharIntLimit) || + (p >= UCharMaskStart && p < uCharMaskLimit) { + v = getPropertyValueEnum(p, value) + if v == -1 { + // Handle numeric CCC + if p == UCharCanonicalCombiningClass || + p == UCharTrailCanonicalCombiningClass || + p == UCharLeadCanonicalCombiningClass { + val, err := strconv.ParseUint(value, 10, 8) + if err != nil { + return errors.ErrIllegalArgument + } + v = int32(val) + } else { + return errors.ErrIllegalArgument + } + } + } else { + switch p { + case UCharNumericValue: + val, err := strconv.ParseFloat(value, 64) + if err != nil { + return errors.ErrIllegalArgument + } + incl, err := getInclusionsForProperty(p) + if err != nil { + return err + } + u.ApplyFilter(incl, func(ch rune) bool { + return uchar.NumericValue(ch) == val + }) + return nil + case UCharName: + // Must munge name, since u_charFromName() does not do + // 'loose' matching. + charName := mungeCharName(value) + ch := unames.CharForName(unames.ExtendedCharName, charName) + if ch < 0 { + return errors.ErrIllegalArgument + } + u.Clear() + u.AddRune(ch) + return nil + case UCharAge: + // Must munge name, since u_versionFromString() does not do + // 'loose' matching. + charName := mungeCharName(value) + version := uchar.VersionFromString(charName) + incl, err := getInclusionsForProperty(p) + if err != nil { + return err + } + u.ApplyFilter(incl, func(ch rune) bool { + return uchar.CharAge(ch) == version + }) + return nil + case UCharScriptExtensions: + v = getPropertyValueEnum(UCharScript, value) + if v == -1 { + return errors.ErrIllegalArgument + } + default: + // p is a non-binary, non-enumerated property that we + // don't support (yet). + return errors.ErrIllegalArgument + } + } + } else { + // value is empty. Interpret as General Category, Script, or + // Binary property. + p = UCharGeneralCategoryMask + v = getPropertyValueEnum(p, prop) + if v == -1 { + p = UCharScript + v = getPropertyValueEnum(p, prop) + if v == -1 { + p = getPropertyEnum(prop) + if p >= UCharBinaryStart && p < uCharBinaryLimit { + v = 1 + } else if 0 == comparePropertyNames("ANY", prop) { + u.Clear() + u.AddRuneRange(uset.MinValue, uset.MaxValue) + return nil + } else if 0 == comparePropertyNames("ASCII", prop) { + u.Clear() + u.AddRuneRange(0, 0x7F) + return nil + } else if 0 == comparePropertyNames("Assigned", prop) { + // [:Assigned:]=[:^Cn:] + p = UCharGeneralCategoryMask + v = int32(uchar.GcCnMask) + invert = true + } else { + return errors.ErrIllegalArgument + } + } + } + } + + err := ApplyIntPropertyValue(u, p, v) + if err != nil { + return err + } + if invert { + u.Complement() + } + return nil +} + +func AddULayoutPropertyStarts(src propertySource, u *uset.UnicodeSet) { + var trie *utrie.UcpTrie + switch src { + case srcInpc: + trie = ulayout.InpcTrie() + case srcInsc: + trie = ulayout.InscTrie() + case srcVo: + trie = ulayout.VoTrie() + default: + panic("unreachable") + } + + // Add the start code point of each same-value range of the trie. + var start, end rune + for { + end, _ = trie.GetRange(start, utrie.UcpMapRangeNormal, 0, nil) + if end < 0 { + break + } + u.AddRune(start) + start = end + 1 + } +} + +func AddCategory(u *uset.UnicodeSet, mask uint32) error { + set := uset.New() + err := ApplyIntPropertyValue(set, UCharGeneralCategoryMask, int32(mask)) + if err != nil { + return err + } + u.AddAll(set) + return nil +} + +func NewUnicodeSetFomPattern(pattern string, flags uset.USet) (*uset.UnicodeSet, error) { + u := uset.New() + if err := ApplyPropertyPattern(u, pattern); err != nil { + return nil, err + } + if flags&uset.CaseInsensitive != 0 { + u.CloseOver(uset.CaseInsensitive) + } + return u, nil +} + +func MustNewUnicodeSetFomPattern(pattern string, flags uset.USet) *uset.UnicodeSet { + u, err := NewUnicodeSetFomPattern(pattern, flags) + if err != nil { + panic(err) + } + return u +} diff --git a/go/mysql/icuregex/internal/uprops/uprops.go b/go/mysql/icuregex/internal/uprops/uprops.go new file mode 100644 index 00000000000..0589938c29c --- /dev/null +++ b/go/mysql/icuregex/internal/uprops/uprops.go @@ -0,0 +1,217 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package uprops + +import ( + "vitess.io/vitess/go/mysql/icuregex/internal/bytestrie" + "vitess.io/vitess/go/mysql/icuregex/internal/uchar" +) + +const ( + ixValueMapsOffset = 0 + ixByteTriesOffset = 1 + ixNameGroupsOffset = 2 + ixReserved3Offset = 3 +) + +func (prop Property) source() propertySource { + if prop < UCharBinaryStart { + return srcNone /* undefined */ + } else if prop < uCharBinaryLimit { + bprop := binProps[prop] + if bprop.mask != 0 { + return srcPropsvec + } + return bprop.column + } else if prop < UCharIntStart { + return srcNone /* undefined */ + } else if prop < uCharIntLimit { + iprop := intProps[prop-UCharIntStart] + if iprop.mask != 0 { + return srcPropsvec + } + return iprop.column + } else if prop < UCharStringStart { + switch prop { + case UCharGeneralCategoryMask, + UCharNumericValue: + return srcChar + + default: + return srcNone + } + } else if prop < uCharStringLimit { + switch prop { + case UCharAge: + return srcPropsvec + + case UCharBidiMirroringGlyph: + return srcBidi + + case UCharCaseFolding, + UCharLowercaseMapping, + UCharSimpleCaseFolding, + UCharSimpleLowercaseMapping, + UcharSimpleTitlecaseMapping, + UCharSimpleUppercaseMapping, + UCharTitlecaseMapping, + UCharUppercaseMapping: + return srcCase + + /* UCHAR_ISO_COMMENT, UCHAR_UNICODE_1_NAME (deprecated) */ + case UCharName: + return srcNames + + default: + return srcNone + } + } else { + switch prop { + case UCharScriptExtensions: + return srcPropsvec + default: + return srcNone /* undefined */ + } + } +} + +func getPropertyEnum(alias string) Property { + return Property(getPropertyOrValueEnum(0, alias)) +} + +func getPropertyValueEnum(prop Property, alias string) int32 { + valueMapIdx := findProperty(prop) + if valueMapIdx == 0 { + return -1 + } + + valueMps := valueMaps() + valueMapIdx = int32(valueMps[valueMapIdx+1]) + if valueMapIdx == 0 { + return -1 + } + // valueMapIndex is the start of the property's valueMap, + // where the first word is the BytesTrie offset. + return getPropertyOrValueEnum(int32(valueMps[valueMapIdx]), alias) +} + +func findProperty(prop Property) int32 { + var i = int32(1) + valueMps := valueMaps() + for numRanges := int32(valueMps[0]); numRanges > 0; numRanges-- { + start := int32(valueMps[i]) + limit := int32(valueMps[i+1]) + i += 2 + if int32(prop) < start { + break + } + if int32(prop) < limit { + return i + (int32(prop)-start)*2 + } + i += (limit - start) * 2 + } + return 0 +} + +func getPropertyOrValueEnum(offset int32, alias string) int32 { + trie := bytestrie.New(byteTrie()[offset:]) + if trie.ContainsName(alias) { + return trie.GetValue() + } + return -1 +} + +func comparePropertyNames(name1, name2 string) int { + next := func(s string) (byte, string) { + for len(s) > 0 && (s[0] == 0x2d || s[0] == 0x5f || s[0] == 0x20 || (0x09 <= s[0] && s[0] <= 0x0d)) { + s = s[1:] + } + if len(s) == 0 { + return 0, "" + } + c := s[0] + s = s[1:] + if 'A' <= c && c <= 'Z' { + c += 'a' - 'A' + } + return c, s + } + + var r1, r2 byte + for { + r1, name1 = next(name1) + r2, name2 = next(name2) + + if r1 == 0 && r2 == 0 { + return 0 + } + + /* Compare the lowercased characters */ + if r1 != r2 { + return int(r1) - int(r2) + } + } +} + +func getIntPropertyValue(c rune, which Property) int32 { + if which < UCharIntStart { + if UCharBinaryStart <= which && which < uCharBinaryLimit { + prop := binProps[which] + if prop.contains == nil { + return 0 + } + if prop.contains(prop, c, which) { + return 1 + } + return 0 + } + } else if which < uCharIntLimit { + iprop := intProps[which-UCharIntStart] + return iprop.getValue(iprop, c, which) + } else if which == UCharGeneralCategoryMask { + return int32(uchar.Mask(uchar.CharType(c))) + } + return 0 // undefined +} + +func mergeScriptCodeOrIndex(scriptX uint32) uint32 { + return ((scriptX & scriptHighMask) >> scriptHighShift) | + (scriptX & scriptLowMask) +} + +func script(c rune) int32 { + if c > 0x10ffff { + return -1 + } + scriptX := uchar.GetUnicodeProperties(c, 0) & scriptXMask + codeOrIndex := mergeScriptCodeOrIndex(scriptX) + + if scriptX < scriptXWithCommon { + return int32(codeOrIndex) + } else if scriptX < scriptXWithInherited { + return 0 + } else if scriptX < scriptXWithOther { + return 1 + } else { + return int32(uchar.ScriptExtension(codeOrIndex)) + } +} diff --git a/go/mysql/icuregex/internal/uprops/uprops_binary.go b/go/mysql/icuregex/internal/uprops/uprops_binary.go new file mode 100644 index 00000000000..5d4aaaec1b5 --- /dev/null +++ b/go/mysql/icuregex/internal/uprops/uprops_binary.go @@ -0,0 +1,249 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package uprops + +import ( + "slices" + + "vitess.io/vitess/go/mysql/icuregex/internal/normalizer" + "vitess.io/vitess/go/mysql/icuregex/internal/ubidi" + "vitess.io/vitess/go/mysql/icuregex/internal/ucase" + "vitess.io/vitess/go/mysql/icuregex/internal/uchar" + "vitess.io/vitess/go/mysql/icuregex/internal/uemoji" +) + +type binaryProperty struct { + column propertySource + mask uint32 + contains func(prop *binaryProperty, c rune, which Property) bool +} + +func defaultContains(prop *binaryProperty, c rune, _ Property) bool { + return (uchar.GetUnicodeProperties(c, int(prop.column)) & prop.mask) != 0 +} + +var binProps = [uCharBinaryLimit]*binaryProperty{ + /* + * column and mask values for binary properties from u_getUnicodeProperties(). + * Must be in order of corresponding UProperty, + * and there must be exactly one entry per binary UProperty. + * + * Properties with mask==0 are handled in code. + * For them, column is the UPropertySource value. + * + * See also https://unicode-org.github.io/icu/userguide/strings/properties.html + */ + {1, uchar.Mask(pAlphabetic), defaultContains}, + {1, uchar.Mask(pASCIIHexDigit), defaultContains}, + {srcBidi, 0, isBidiControl}, + {srcBidi, 0, isMirrored}, + {1, uchar.Mask(pDash), defaultContains}, + {1, uchar.Mask(pDefaultIgnorableCodePoint), defaultContains}, + {1, uchar.Mask(pDeprecated), defaultContains}, + {1, uchar.Mask(pDiacritic), defaultContains}, + {1, uchar.Mask(pExtender), defaultContains}, + {srcNfc, 0, hasFullCompositionExclusion}, + {1, uchar.Mask(pGraphemeBase), defaultContains}, + {1, uchar.Mask(pGraphemeExtend), defaultContains}, + {1, uchar.Mask(pGraphemeLink), defaultContains}, + {1, uchar.Mask(pHexDigit), defaultContains}, + {1, uchar.Mask(pHyphen), defaultContains}, + {1, uchar.Mask(pIDContinue), defaultContains}, + {1, uchar.Mask(pIDStart), defaultContains}, + {1, uchar.Mask(pIdeographic), defaultContains}, + {1, uchar.Mask(pIdsBinaryOperator), defaultContains}, + {1, uchar.Mask(pIdsTrinaryOperator), defaultContains}, + {srcBidi, 0, isJoinControl}, + {1, uchar.Mask(pLogicalOrderException), defaultContains}, + {srcCase, 0, caseBinaryPropertyContains}, // UCHAR_LOWERCASE + {1, uchar.Mask(pMath), defaultContains}, + {1, uchar.Mask(pNoncharacterCodePoint), defaultContains}, + {1, uchar.Mask(pQuotationMark), defaultContains}, + {1, uchar.Mask(pRadical), defaultContains}, + {srcCase, 0, caseBinaryPropertyContains}, // UCHAR_SOFT_DOTTED + {1, uchar.Mask(pTerminalPunctuation), defaultContains}, + {1, uchar.Mask(pUnifiedIdeograph), defaultContains}, + {srcCase, 0, caseBinaryPropertyContains}, // UCHAR_UPPERCASE + {1, uchar.Mask(pWhiteSpace), defaultContains}, + {1, uchar.Mask(pXidContinue), defaultContains}, + {1, uchar.Mask(pXidStart), defaultContains}, + {srcCase, 0, caseBinaryPropertyContains}, // UCHAR_CASE_SENSITIVE + {1, uchar.Mask(pSTerm), defaultContains}, + {1, uchar.Mask(pVariationSelector), defaultContains}, + {srcNfc, 0, isNormInert}, // UCHAR_NFD_INERT + {srcNfkc, 0, isNormInert}, // UCHAR_NFKD_INERT + {srcNfc, 0, isNormInert}, // UCHAR_NFC_INERT + {srcNfkc, 0, isNormInert}, // UCHAR_NFKC_INERT + {srcNfcCanonIter, 0, nil}, // Segment_Starter is currently unsupported + {1, uchar.Mask(pPatternSyntax), defaultContains}, + {1, uchar.Mask(pPatternWhiteSpace), defaultContains}, + {srcCharAndPropsvec, 0, isPOSIXAlnum}, + {srcChar, 0, isPOSIXBlank}, + {srcChar, 0, isPOSIXGraph}, + {srcChar, 0, isPOSIXPrint}, + {srcChar, 0, isPOSIXXdigit}, + {srcCase, 0, caseBinaryPropertyContains}, // UCHAR_CASED + {srcCase, 0, caseBinaryPropertyContains}, // UCHAR_CASE_IGNORABLE + {srcCase, 0, caseBinaryPropertyContains}, // UCHAR_CHANGES_WHEN_LOWERCASED + {srcCase, 0, caseBinaryPropertyContains}, // UCHAR_CHANGES_WHEN_UPPERCASED + {srcCase, 0, caseBinaryPropertyContains}, // UCHAR_CHANGES_WHEN_TITLECASED + {srcCaseAndNorm, 0, changesWhenCasefolded}, + {srcCase, 0, caseBinaryPropertyContains}, // UCHAR_CHANGES_WHEN_CASEMAPPED + {srcNfkcCf, 0, nil}, // Changes_When_NFKC_Casefolded is currently unsupported + {srcEmoji, 0, hasEmojiProperty}, // UCHAR_EMOJI + {srcEmoji, 0, hasEmojiProperty}, // UCHAR_EMOJI_PRESENTATION + {srcEmoji, 0, hasEmojiProperty}, // UCHAR_EMOJI_MODIFIER + {srcEmoji, 0, hasEmojiProperty}, // UCHAR_EMOJI_MODIFIER_BASE + {srcEmoji, 0, hasEmojiProperty}, // UCHAR_EMOJI_COMPONENT + {2, 0, isRegionalIndicator}, + {1, uchar.Mask(pPrependedConcatenationMark), defaultContains}, + {srcEmoji, 0, hasEmojiProperty}, // UCHAR_EXTENDED_PICTOGRAPHIC + {srcEmoji, 0, hasEmojiProperty}, // UCHAR_BASIC_EMOJI + {srcEmoji, 0, hasEmojiProperty}, // UCHAR_EMOJI_KEYCAP_SEQUENCE + {srcEmoji, 0, hasEmojiProperty}, // UCHAR_RGI_EMOJI_MODIFIER_SEQUENCE + {srcEmoji, 0, hasEmojiProperty}, // UCHAR_RGI_EMOJI_FLAG_SEQUENCE + {srcEmoji, 0, hasEmojiProperty}, // UCHAR_RGI_EMOJI_TAG_SEQUENCE + {srcEmoji, 0, hasEmojiProperty}, // UCHAR_RGI_EMOJI_ZWJ_SEQUENCE + {srcEmoji, 0, hasEmojiProperty}, // UCHAR_RGI_EMOJI +} + +func isBidiControl(_ *binaryProperty, c rune, _ Property) bool { + return ubidi.IsBidiControl(c) +} + +func isMirrored(_ *binaryProperty, c rune, _ Property) bool { + return ubidi.IsMirrored(c) +} + +func isRegionalIndicator(_ *binaryProperty, c rune, _ Property) bool { + return 0x1F1E6 <= c && c <= 0x1F1FF +} + +func changesWhenCasefolded(_ *binaryProperty, c rune, _ Property) bool { + if c < 0 { + return false + } + + nfd := normalizer.Nfc().Decompose(c) + if nfd == nil { + nfd = []rune{c} + } + folded := ucase.FoldRunes(nfd) + return !slices.Equal(nfd, folded) +} + +func isPOSIXXdigit(_ *binaryProperty, c rune, _ Property) bool { + return uchar.IsXDigit(c) +} + +func isPOSIXPrint(_ *binaryProperty, c rune, _ Property) bool { + return uchar.IsPOSIXPrint(c) +} + +func isPOSIXGraph(_ *binaryProperty, c rune, _ Property) bool { + return uchar.IsGraphPOSIX(c) +} + +func isPOSIXBlank(_ *binaryProperty, c rune, _ Property) bool { + return uchar.IsBlank(c) +} + +func isPOSIXAlnum(_ *binaryProperty, c rune, _ Property) bool { + return (uchar.GetUnicodeProperties(c, 1)&uchar.Mask(pAlphabetic)) != 0 || uchar.IsDigit(c) +} + +func isJoinControl(_ *binaryProperty, c rune, _ Property) bool { + return ubidi.IsJoinControl(c) +} + +func hasFullCompositionExclusion(_ *binaryProperty, c rune, _ Property) bool { + impl := normalizer.Nfc() + return impl.IsCompNo(c) +} + +func caseBinaryPropertyContains(_ *binaryProperty, c rune, which Property) bool { + return HasBinaryPropertyUcase(c, which) +} + +func HasBinaryPropertyUcase(c rune, which Property) bool { + /* case mapping properties */ + switch which { + case UCharLowercase: + return ucase.Lower == ucase.GetType(c) + case UCharUppercase: + return ucase.Upper == ucase.GetType(c) + case UCharSoftDotted: + return ucase.IsSoftDotted(c) + case UCharCaseSensitive: + return ucase.IsCaseSensitive(c) + case UCharCased: + return ucase.None != ucase.GetType(c) + case UCharCaseIgnorable: + return (ucase.GetTypeOrIgnorable(c) >> 2) != 0 + /* + * Note: The following Changes_When_Xyz are defined as testing whether + * the NFD form of the input changes when Xyz-case-mapped. + * However, this simpler implementation of these properties, + * ignoring NFD, passes the tests. + * The implementation needs to be changed if the tests start failing. + * When that happens, optimizations should be used to work with the + * per-single-code point ucase_toFullXyz() functions unless + * the NFD form has more than one code point, + * and the property starts set needs to be the union of the + * start sets for normalization and case mappings. + */ + case UCharChangesWhenLowercased: + return ucase.ToFullLower(c) >= 0 + case UCharChangesWhenUppercased: + return ucase.ToFullUpper(c) >= 0 + case UCharChangesWhenTitlecased: + return ucase.ToFullTitle(c) >= 0 + /* case UCHAR_CHANGES_WHEN_CASEFOLDED: -- in uprops.c */ + case UCharChangesWhenCasemapped: + return ucase.ToFullLower(c) >= 0 || ucase.ToFullUpper(c) >= 0 || ucase.ToFullTitle(c) >= 0 + default: + return false + } +} + +func isNormInert(_ *binaryProperty, c rune, which Property) bool { + mode := normalizer.Mode(int32(which) - int32(UCharNfdInert) + int32(normalizer.NormNfd)) + return normalizer.IsInert(c, mode) +} + +func HasBinaryProperty(c rune, which Property) bool { + if which < UCharBinaryStart || uCharBinaryLimit <= which { + return false + } + prop := binProps[which] + if prop.contains == nil { + return false + } + return prop.contains(prop, c, which) +} + +func hasEmojiProperty(_ *binaryProperty, c rune, which Property) bool { + if which < UCharEmoji || UCharRgiEmoji < which { + return false + } + return uemoji.HasBinaryProperty(c, int(which-UCharEmoji)) +} diff --git a/go/mysql/icuregex/internal/uprops/uprops_int.go b/go/mysql/icuregex/internal/uprops/uprops_int.go new file mode 100644 index 00000000000..3e62d31184f --- /dev/null +++ b/go/mysql/icuregex/internal/uprops/uprops_int.go @@ -0,0 +1,265 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package uprops + +import ( + "vitess.io/vitess/go/mysql/icuregex/internal/normalizer" + "vitess.io/vitess/go/mysql/icuregex/internal/ubidi" + "vitess.io/vitess/go/mysql/icuregex/internal/uchar" + "vitess.io/vitess/go/mysql/icuregex/internal/ulayout" +) + +type intPropertyGetValue func(prop *intProperty, c rune, which Property) int32 + +type intProperty struct { + column propertySource + mask uint32 + shift int32 + getValue intPropertyGetValue +} + +const ( + blockMask = 0x0001ff00 + blockShift = 8 + + eaMask = 0x000e0000 + eaShift = 17 + + lbMask = 0x03f00000 + lbShift = 20 + + sbMask = 0x000f8000 + sbShift = 15 + + wbMask = 0x00007c00 + wbShift = 10 + + gcbMask = 0x000003e0 + gcbShift = 5 + + dtMask = 0x0000001f +) + +type numericType int32 + +/** + * Numeric Type constants. + * + * @see UCHAR_NUMERIC_TYPE + * @stable ICU 2.2 + */ +const ( + /* + * Note: UNumericType constants are parsed by preparseucd.py. + * It matches lines like + * U_NT_ + */ + + ntNone numericType = iota /*[None]*/ + ntDecimal /*[de]*/ + ntDigit /*[di]*/ + ntNumeric /*[nu]*/ + /** + * One more than the highest normal UNumericType value. + * The highest value is available via u_getIntPropertyMaxValue(UCHAR_NUMERIC_TYPE). + * + * @deprecated ICU 58 The numeric value may change over time, see ICU ticket #12420. + */ + ntCount +) + +/** + * Hangul Syllable Type constants. + * + * @see UCHAR_HANGUL_SYLLABLE_TYPE + * @stable ICU 2.6 + */ + +type hangunSyllableType int32 + +const ( + /* + * Note: UHangulSyllableType constants are parsed by preparseucd.py. + * It matches lines like + * U_HST_ + */ + + hstNotApplicable hangunSyllableType = iota /*[NA]*/ + hstLeadingJamo /*[L]*/ + hstVowelJamo /*[V]*/ + hstTrailingJamo /*[T]*/ + hstLvSyllable /*[LV]*/ + hstLvtSyllable /*[LVT]*/ + /** + * One more than the highest normal UHangulSyllableType value. + * The highest value is available via u_getIntPropertyMaxValue(UCHAR_HANGUL_SYLLABLE_TYPE). + * + * @deprecated ICU 58 The numeric value may change over time, see ICU ticket #12420. + */ + hstCount +) + +var intProps = [uCharIntLimit - UCharIntStart]*intProperty{ + /* + * column, mask and shift values for int-value properties from u_getUnicodeProperties(). + * Must be in order of corresponding UProperty, + * and there must be exactly one entry per int UProperty. + * + * Properties with mask==0 are handled in code. + * For them, column is the UPropertySource value. + */ + {srcBidi, 0, 0, getBiDiClass}, + {0, blockMask, blockShift, defaultGetValue}, + {srcNfc, 0, 0xff, getCombiningClass}, + {2, dtMask, 0, defaultGetValue}, + {0, eaMask, eaShift, defaultGetValue}, + {srcChar, 0, int32(uchar.CharCategoryCount - 1), getGeneralCategory}, + {srcBidi, 0, 0, getJoiningGroup}, + {srcBidi, 0, 0, getJoiningType}, + {2, lbMask, lbShift, defaultGetValue}, + {srcChar, 0, int32(ntCount - 1), getNumericType}, + {srcPropsvec, 0, 0, getScript}, + {srcPropsvec, 0, int32(hstCount - 1), getHangulSyllableType}, + // UCHAR_NFD_QUICK_CHECK: max=1=YES -- never "maybe", only "no" or "yes" + {srcNfc, 0, int32(normalizer.Yes), getNormQuickCheck}, + // UCHAR_NFKD_QUICK_CHECK: max=1=YES -- never "maybe", only "no" or "yes" + {srcNfkc, 0, int32(normalizer.Yes), getNormQuickCheck}, + // UCHAR_NFC_QUICK_CHECK: max=2=MAYBE + {srcNfc, 0, int32(normalizer.Maybe), getNormQuickCheck}, + // UCHAR_NFKC_QUICK_CHECK: max=2=MAYBE + {srcNfkc, 0, int32(normalizer.Maybe), getNormQuickCheck}, + {srcNfc, 0, 0xff, getLeadCombiningClass}, + {srcNfc, 0, 0xff, getTrailCombiningClass}, + {2, gcbMask, gcbShift, defaultGetValue}, + {2, sbMask, sbShift, defaultGetValue}, + {2, wbMask, wbShift, defaultGetValue}, + {srcBidi, 0, 0, getBiDiPairedBracketType}, + {srcInpc, 0, 0, getInPC}, + {srcInsc, 0, 0, getInSC}, + {srcVo, 0, 0, getVo}, +} + +func getVo(_ *intProperty, c rune, _ Property) int32 { + return int32(ulayout.VoTrie().Get(c)) +} + +func getInSC(_ *intProperty, c rune, _ Property) int32 { + return int32(ulayout.InscTrie().Get(c)) +} + +func getInPC(_ *intProperty, c rune, _ Property) int32 { + return int32(ulayout.InpcTrie().Get(c)) +} + +func getBiDiPairedBracketType(_ *intProperty, c rune, _ Property) int32 { + return int32(ubidi.PairedBracketType(c)) +} + +func getTrailCombiningClass(_ *intProperty, c rune, _ Property) int32 { + return int32(normalizer.Nfc().GetFCD16(c) & 0xff) +} + +func getLeadCombiningClass(_ *intProperty, c rune, _ Property) int32 { + val := int32(normalizer.Nfc().GetFCD16(c) >> 8) + return val +} + +func getNormQuickCheck(_ *intProperty, c rune, which Property) int32 { + return int32(normalizer.QuickCheck(c, normalizer.Mode(int32(which)-int32(UCharNfdQuickCheck)+int32(normalizer.NormNfd)))) +} + +/* + * Map some of the Grapheme Cluster Break values to Hangul Syllable Types. + * Hangul_Syllable_Type is fully redundant with a subset of Grapheme_Cluster_Break. + */ +var gcbToHst = []hangunSyllableType{ + hstNotApplicable, /* U_GCB_OTHER */ + hstNotApplicable, /* U_GCB_CONTROL */ + hstNotApplicable, /* U_GCB_CR */ + hstNotApplicable, /* U_GCB_EXTEND */ + hstLeadingJamo, /* U_GCB_L */ + hstNotApplicable, /* U_GCB_LF */ + hstLvSyllable, /* U_GCB_LV */ + hstLvtSyllable, /* U_GCB_LVT */ + hstTrailingJamo, /* U_GCB_T */ + hstVowelJamo, /* U_GCB_V */ + /* + * Omit GCB values beyond what we need for hst. + * The code below checks for the array length. + */ +} + +func getHangulSyllableType(_ *intProperty, c rune, _ Property) int32 { + /* see comments on gcbToHst[] above */ + gcb := (int32(uchar.GetUnicodeProperties(c, 2)) & gcbMask) >> gcbShift + + if gcb < int32(len(gcbToHst)) { + return int32(gcbToHst[gcb]) + } + return int32(hstNotApplicable) +} + +func getScript(_ *intProperty, c rune, _ Property) int32 { + return script(c) +} + +func getNumericType(_ *intProperty, c rune, _ Property) int32 { + ntv := uchar.NumericTypeValue(c) + return int32(ntvGetType(ntv)) +} + +func getJoiningType(_ *intProperty, c rune, _ Property) int32 { + return int32(ubidi.JoinType(c)) +} + +func getJoiningGroup(_ *intProperty, c rune, _ Property) int32 { + return int32(ubidi.JoinGroup(c)) +} + +func getGeneralCategory(_ *intProperty, c rune, _ Property) int32 { + return int32(uchar.CharType(c)) +} + +func getCombiningClass(_ *intProperty, c rune, _ Property) int32 { + return int32(normalizer.Nfc().CombiningClass(c)) +} + +func defaultGetValue(prop *intProperty, c rune, _ Property) int32 { + return int32(uchar.GetUnicodeProperties(c, int(prop.column))&prop.mask) >> prop.shift +} + +func getBiDiClass(_ *intProperty, c rune, _ Property) int32 { + return int32(ubidi.Class(c)) +} + +func ntvGetType(ntv uint16) numericType { + switch { + case ntv == uchar.UPropsNtvNone: + return ntNone + case ntv < uchar.UPropsNtvDigitStart: + return ntDecimal + case ntv < uchar.UPropsNtvNumericStart: + return ntDigit + default: + return ntNumeric + } +} diff --git a/go/mysql/icuregex/internal/uprops/uscript.go b/go/mysql/icuregex/internal/uprops/uscript.go new file mode 100644 index 00000000000..8a4423849df --- /dev/null +++ b/go/mysql/icuregex/internal/uprops/uscript.go @@ -0,0 +1,505 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package uprops + +import "vitess.io/vitess/go/mysql/icuregex/internal/uchar" + +/** + * Constants for ISO 15924 script codes. + * + * The current set of script code constants supports at least all scripts + * that are encoded in the version of Unicode which ICU currently supports. + * The names of the constants are usually derived from the + * Unicode script property value aliases. + * See UAX #24 Unicode Script Property (http://www.unicode.org/reports/tr24/) + * and http://www.unicode.org/Public/UCD/latest/ucd/PropertyValueAliases.txt . + * + * In addition, constants for many ISO 15924 script codes + * are included, for use with language tags, CLDR data, and similar. + * Some of those codes are not used in the Unicode Character Database (UCD). + * For example, there are no characters that have a UCD script property value of + * Hans or Hant. All Han ideographs have the Hani script property value in Unicode. + * + * Private-use codes Qaaa..Qabx are not included, except as used in the UCD or in CLDR. + * + * Starting with ICU 55, script codes are only added when their scripts + * have been or will certainly be encoded in Unicode, + * and have been assigned Unicode script property value aliases, + * to ensure that their script names are stable and match the names of the constants. + * Script codes like Latf and Aran that are not subject to separate encoding + * may be added at any time. + * + * @stable ICU 2.2 + */ +type code int32 + +/* + * Note: UScriptCode constants and their ISO script code comments + * are parsed by preparseucd.py. + * It matches lines like + * USCRIPT_ = , / * * / + */ + +const ( + /** @stable ICU 2.2 */ + invalidCode code = -1 + /** @stable ICU 2.2 */ + common code = 0 /* Zyyy */ + /** @stable ICU 2.2 */ + inherited code = 1 /* Zinh */ /* "Code for inherited script", for non-spacing combining marks; also Qaai */ + /** @stable ICU 2.2 */ + arabic code = 2 /* Arab */ + /** @stable ICU 2.2 */ + armenian code = 3 /* Armn */ + /** @stable ICU 2.2 */ + bengali code = 4 /* Beng */ + /** @stable ICU 2.2 */ + bopomofo code = 5 /* Bopo */ + /** @stable ICU 2.2 */ + cherokee code = 6 /* Cher */ + /** @stable ICU 2.2 */ + coptic code = 7 /* Copt */ + /** @stable ICU 2.2 */ + cyrillic code = 8 /* Cyrl */ + /** @stable ICU 2.2 */ + deseret code = 9 /* Dsrt */ + /** @stable ICU 2.2 */ + devanagari code = 10 /* Deva */ + /** @stable ICU 2.2 */ + ethiopic code = 11 /* Ethi */ + /** @stable ICU 2.2 */ + georgian code = 12 /* Geor */ + /** @stable ICU 2.2 */ + gothic code = 13 /* Goth */ + /** @stable ICU 2.2 */ + greek code = 14 /* Grek */ + /** @stable ICU 2.2 */ + gujarati code = 15 /* Gujr */ + /** @stable ICU 2.2 */ + gurmukhi code = 16 /* Guru */ + /** @stable ICU 2.2 */ + han code = 17 /* Hani */ + /** @stable ICU 2.2 */ + hangul code = 18 /* Hang */ + /** @stable ICU 2.2 */ + hebrew code = 19 /* Hebr */ + /** @stable ICU 2.2 */ + hiragana code = 20 /* Hira */ + /** @stable ICU 2.2 */ + kannada code = 21 /* Knda */ + /** @stable ICU 2.2 */ + katakana code = 22 /* Kana */ + /** @stable ICU 2.2 */ + khmer code = 23 /* Khmr */ + /** @stable ICU 2.2 */ + lao code = 24 /* Laoo */ + /** @stable ICU 2.2 */ + latin code = 25 /* Latn */ + /** @stable ICU 2.2 */ + malayalam code = 26 /* Mlym */ + /** @stable ICU 2.2 */ + mongolian code = 27 /* Mong */ + /** @stable ICU 2.2 */ + myanmar code = 28 /* Mymr */ + /** @stable ICU 2.2 */ + ogham code = 29 /* Ogam */ + /** @stable ICU 2.2 */ + oldItalic code = 30 /* Ital */ + /** @stable ICU 2.2 */ + oriya code = 31 /* Orya */ + /** @stable ICU 2.2 */ + runic code = 32 /* Runr */ + /** @stable ICU 2.2 */ + sinhala code = 33 /* Sinh */ + /** @stable ICU 2.2 */ + syriac code = 34 /* Syrc */ + /** @stable ICU 2.2 */ + tamil code = 35 /* Taml */ + /** @stable ICU 2.2 */ + telugu code = 36 /* Telu */ + /** @stable ICU 2.2 */ + thaana code = 37 /* Thaa */ + /** @stable ICU 2.2 */ + thai code = 38 /* Thai */ + /** @stable ICU 2.2 */ + tibetan code = 39 /* Tibt */ + /** Canadian_Aboriginal script. @stable ICU 2.6 */ + canadianAboriginal code = 40 /* Cans */ + /** Canadian_Aboriginal script (alias). @stable ICU 2.2 */ + ucas code = canadianAboriginal + /** @stable ICU 2.2 */ + yi code = 41 /* Yiii */ + /* New scripts in Unicode 3.2 */ + /** @stable ICU 2.2 */ + tagalog code = 42 /* Tglg */ + /** @stable ICU 2.2 */ + hanunoo code = 43 /* Hano */ + /** @stable ICU 2.2 */ + buhid code = 44 /* Buhd */ + /** @stable ICU 2.2 */ + tagbanwa code = 45 /* Tagb */ + + /* New scripts in Unicode 4 */ + /** @stable ICU 2.6 */ + braille code = 46 /* Brai */ + /** @stable ICU 2.6 */ + cypriot code = 47 /* Cprt */ + /** @stable ICU 2.6 */ + limbu code = 48 /* Limb */ + /** @stable ICU 2.6 */ + linearB code = 49 /* Linb */ + /** @stable ICU 2.6 */ + osmanya code = 50 /* Osma */ + /** @stable ICU 2.6 */ + shavian code = 51 /* Shaw */ + /** @stable ICU 2.6 */ + taiLe code = 52 /* Tale */ + /** @stable ICU 2.6 */ + ugaratic code = 53 /* Ugar */ + + /** New script code in Unicode 4.0.1 @stable ICU 3.0 */ + katakanaOrHiragana = 54 /*Hrkt */ + + /* New scripts in Unicode 4.1 */ + /** @stable ICU 3.4 */ + buginese code = 55 /* Bugi */ + /** @stable ICU 3.4 */ + glagolitic code = 56 /* Glag */ + /** @stable ICU 3.4 */ + kharoshthi code = 57 /* Khar */ + /** @stable ICU 3.4 */ + sylotiNagri code = 58 /* Sylo */ + /** @stable ICU 3.4 */ + newTaiLue code = 59 /* Talu */ + /** @stable ICU 3.4 */ + tifinagh code = 60 /* Tfng */ + /** @stable ICU 3.4 */ + oldPersian code = 61 /* Xpeo */ + + /* New script codes from Unicode and ISO 15924 */ + /** @stable ICU 3.6 */ + balinese code = 62 /* Bali */ + /** @stable ICU 3.6 */ + batak code = 63 /* Batk */ + /** @stable ICU 3.6 */ + blissymbols code = 64 /* Blis */ + /** @stable ICU 3.6 */ + brahmi code = 65 /* Brah */ + /** @stable ICU 3.6 */ + cham code = 66 /* Cham */ + /** @stable ICU 3.6 */ + cirth code = 67 /* Cirt */ + /** @stable ICU 3.6 */ + oldChurchSlavonicCyrillic code = 68 /* Cyrs */ + /** @stable ICU 3.6 */ + demoticEgyptian code = 69 /* Egyd */ + /** @stable ICU 3.6 */ + hieraticEgyptian code = 70 /* Egyh */ + /** @stable ICU 3.6 */ + egyptianHieroglyphs code = 71 /* Egyp */ + /** @stable ICU 3.6 */ + khutsuri code = 72 /* Geok */ + /** @stable ICU 3.6 */ + simplfiedHan code = 73 /* Hans */ + /** @stable ICU 3.6 */ + traditionalHan code = 74 /* Hant */ + /** @stable ICU 3.6 */ + pahawhHmong code = 75 /* Hmng */ + /** @stable ICU 3.6 */ + oldHungarian code = 76 /* Hung */ + /** @stable ICU 3.6 */ + harappanIndus code = 77 /* Inds */ + /** @stable ICU 3.6 */ + javanese code = 78 /* Java */ + /** @stable ICU 3.6 */ + kayahLi code = 79 /* Kali */ + /** @stable ICU 3.6 */ + latinFraktur code = 80 /* Latf */ + /** @stable ICU 3.6 */ + latinGaelic code = 81 /* Latg */ + /** @stable ICU 3.6 */ + lepcha code = 82 /* Lepc */ + /** @stable ICU 3.6 */ + linearA code = 83 /* Lina */ + /** @stable ICU 4.6 */ + mandaic code = 84 /* Mand */ + /** @stable ICU 3.6 */ + mandaean code = mandaic + /** @stable ICU 3.6 */ + mayanHieroglyphs code = 85 /* Maya */ + /** @stable ICU 4.6 */ + meroiticHieroglyphs code = 86 /* Mero */ + /** @stable ICU 3.6 */ + meroitic code = meroiticHieroglyphs + /** @stable ICU 3.6 */ + nko code = 87 /* Nkoo */ + /** @stable ICU 3.6 */ + orkhon code = 88 /* Orkh */ + /** @stable ICU 3.6 */ + oldPermic code = 89 /* Perm */ + /** @stable ICU 3.6 */ + phagsPa code = 90 /* Phag */ + /** @stable ICU 3.6 */ + phoenician code = 91 /* Phnx */ + /** @stable ICU 52 */ + miao code = 92 /* Plrd */ + /** @stable ICU 3.6 */ + phoneticPollard code = miao + /** @stable ICU 3.6 */ + rongoRongo code = 93 /* Roro */ + /** @stable ICU 3.6 */ + sarati code = 94 /* Sara */ + /** @stable ICU 3.6 */ + extrangeloSyriac code = 95 /* Syre */ + /** @stable ICU 3.6 */ + westernSyriac code = 96 /* Syrj */ + /** @stable ICU 3.6 */ + easternSyriac code = 97 /* Syrn */ + /** @stable ICU 3.6 */ + tengwar code = 98 /* Teng */ + /** @stable ICU 3.6 */ + vai code = 99 /* Vaii */ + /** @stable ICU 3.6 */ + visibleSpeech code = 100 /* Visp */ + /** @stable ICU 3.6 */ + cuneiform code = 101 /* Xsux */ + /** @stable ICU 3.6 */ + unwrittenLanguages code = 102 /* Zxxx */ + /** @stable ICU 3.6 */ + unknown code = 103 /* Zzzz */ /* Unknown="Code for uncoded script", for unassigned code points */ + + /** @stable ICU 3.8 */ + carian code = 104 /* Cari */ + /** @stable ICU 3.8 */ + japanese code = 105 /* Jpan */ + /** @stable ICU 3.8 */ + lanna code = 106 /* Lana */ + /** @stable ICU 3.8 */ + lycian code = 107 /* Lyci */ + /** @stable ICU 3.8 */ + lydian code = 108 /* Lydi */ + /** @stable ICU 3.8 */ + olChiki code = 109 /* Olck */ + /** @stable ICU 3.8 */ + rejang code = 110 /* Rjng */ + /** @stable ICU 3.8 */ + saurashtra code = 111 /* Saur */ + /** Sutton SignWriting @stable ICU 3.8 */ + signWriting code = 112 /* Sgnw */ + /** @stable ICU 3.8 */ + sundanese code = 113 /* Sund */ + /** @stable ICU 3.8 */ + moon code = 114 /* Moon */ + /** @stable ICU 3.8 */ + meiteiMayek code = 115 /* Mtei */ + + /** @stable ICU 4.0 */ + imperialAramaic code = 116 /* Armi */ + /** @stable ICU 4.0 */ + avestan code = 117 /* Avst */ + /** @stable ICU 4.0 */ + chakma code = 118 /* Cakm */ + /** @stable ICU 4.0 */ + korean code = 119 /* Kore */ + /** @stable ICU 4.0 */ + kaithi code = 120 /* Kthi */ + /** @stable ICU 4.0 */ + manichaean code = 121 /* Mani */ + /** @stable ICU 4.0 */ + inscriptionalPahlavi code = 122 /* Phli */ + /** @stable ICU 4.0 */ + psalterPahlavi code = 123 /* Phlp */ + /** @stable ICU 4.0 */ + bookPahlavi code = 124 /* Phlv */ + /** @stable ICU 4.0 */ + inscriptionalParthian code = 125 /* Prti */ + /** @stable ICU 4.0 */ + samaritan code = 126 /* Samr */ + /** @stable ICU 4.0 */ + taiViet code = 127 /* Tavt */ + /** @stable ICU 4.0 */ + mathematicalNotation code = 128 /* Zmth */ + /** @stable ICU 4.0 */ + symbols code = 129 /* Zsym */ + + /** @stable ICU 4.4 */ + bamum code = 130 /* Bamu */ + /** @stable ICU 4.4 */ + lisu code = 131 /* Lisu */ + /** @stable ICU 4.4 */ + nakhiGeba code = 132 /* Nkgb */ + /** @stable ICU 4.4 */ + oldSouthArabian code = 133 /* Sarb */ + + /** @stable ICU 4.6 */ + bassaVah code = 134 /* Bass */ + /** @stable ICU 54 */ + duployan code = 135 /* Dupl */ + /** @stable ICU 4.6 */ + elbasan code = 136 /* Elba */ + /** @stable ICU 4.6 */ + grantha code = 137 /* Gran */ + /** @stable ICU 4.6 */ + kpelle code = 138 /* Kpel */ + /** @stable ICU 4.6 */ + loma code = 139 /* Loma */ + /** Mende Kikakui @stable ICU 4.6 */ + mende code = 140 /* Mend */ + /** @stable ICU 4.6 */ + meroiticCursive code = 141 /* Merc */ + /** @stable ICU 4.6 */ + oldNorthArabian code = 142 /* Narb */ + /** @stable ICU 4.6 */ + nabataean code = 143 /* Nbat */ + /** @stable ICU 4.6 */ + palmyrene code = 144 /* Palm */ + /** @stable ICU 54 */ + khudawadi code = 145 /* Sind */ + /** @stable ICU 4.6 */ + sindhi code = khudawadi + /** @stable ICU 4.6 */ + warangCiti code = 146 /* Wara */ + + /** @stable ICU 4.8 */ + afaka code = 147 /* Afak */ + /** @stable ICU 4.8 */ + jurchen code = 148 /* Jurc */ + /** @stable ICU 4.8 */ + mro code = 149 /* Mroo */ + /** @stable ICU 4.8 */ + nushu code = 150 /* Nshu */ + /** @stable ICU 4.8 */ + sharada code = 151 /* Shrd */ + /** @stable ICU 4.8 */ + soraSompeng code = 152 /* Sora */ + /** @stable ICU 4.8 */ + takri code = 153 /* Takr */ + /** @stable ICU 4.8 */ + tangut code = 154 /* Tang */ + /** @stable ICU 4.8 */ + woleai code = 155 /* Wole */ + + /** @stable ICU 49 */ + anatolianHieroglyphs code = 156 /* Hluw */ + /** @stable ICU 49 */ + khojki code = 157 /* Khoj */ + /** @stable ICU 49 */ + tirhuta code = 158 /* Tirh */ + + /** @stable ICU 52 */ + caucasianAlbanian code = 159 /* Aghb */ + /** @stable ICU 52 */ + mahajani code = 160 /* Mahj */ + + /** @stable ICU 54 */ + ahom code = 161 /* Ahom */ + /** @stable ICU 54 */ + hatran code = 162 /* Hatr */ + /** @stable ICU 54 */ + modi code = 163 /* Modi */ + /** @stable ICU 54 */ + multani code = 164 /* Mult */ + /** @stable ICU 54 */ + pauCinHau code = 165 /* Pauc */ + /** @stable ICU 54 */ + siddham code = 166 /* Sidd */ + + /** @stable ICU 58 */ + adlam code = 167 /* Adlm */ + /** @stable ICU 58 */ + bhaiksuki code = 168 /* Bhks */ + /** @stable ICU 58 */ + marchen code = 169 /* Marc */ + /** @stable ICU 58 */ + newa code = 170 /* Newa */ + /** @stable ICU 58 */ + osage code = 171 /* Osge */ + + /** @stable ICU 58 */ + hanWithBopomofo code = 172 /* Hanb */ + /** @stable ICU 58 */ + jamo code = 173 /* Jamo */ + /** @stable ICU 58 */ + symbolsEmoji code = 174 /* Zsye */ + + /** @stable ICU 60 */ + masaramGondi code = 175 /* Gonm */ + /** @stable ICU 60 */ + soyombo code = 176 /* Soyo */ + /** @stable ICU 60 */ + zanabazarSquare code = 177 /* Zanb */ + + /** @stable ICU 62 */ + dogra code = 178 /* Dogr */ + /** @stable ICU 62 */ + gunjalaGondi code = 179 /* Gong */ + /** @stable ICU 62 */ + makasar code = 180 /* Maka */ + /** @stable ICU 62 */ + medefaidrin code = 181 /* Medf */ + /** @stable ICU 62 */ + hanifiRohingya code = 182 /* Rohg */ + /** @stable ICU 62 */ + sogdian code = 183 /* Sogd */ + /** @stable ICU 62 */ + oldSogdian code = 184 /* Sogo */ + + /** @stable ICU 64 */ + elymaic code = 185 /* Elym */ + /** @stable ICU 64 */ + nyiakengPuachueHmong code = 186 /* Hmnp */ + /** @stable ICU 64 */ + nandinagari code = 187 /* Nand */ + /** @stable ICU 64 */ + wancho code = 188 /* Wcho */ + + /** @stable ICU 66 */ + chorasmian code = 189 /* Chrs */ + /** @stable ICU 66 */ + divesAkuru code = 190 /* Diak */ + /** @stable ICU 66 */ + khitanSmallScript code = 191 /* Kits */ + /** @stable ICU 66 */ + yezedi code = 192 /* Yezi */ +) + +func uscriptHasScript(c rune, sc code) bool { + scriptX := uchar.GetUnicodeProperties(c, 0) & scriptXMask + codeOrIndex := mergeScriptCodeOrIndex(scriptX) + if scriptX < scriptXWithCommon { + return sc == code(codeOrIndex) + } + + scx := uchar.ScriptExtensions(codeOrIndex) + if scriptX >= scriptXWithOther { + scx = uchar.ScriptExtensions(uint32(scx[1])) + } + sc32 := uint32(sc) + if sc32 > 0x7fff { + /* Guard against bogus input that would make us go past the Script_Extensions terminator. */ + return false + } + for sc32 > uint32(scx[0]) { + scx = scx[1:] + } + return sc32 == uint32(scx[0]&0x7fff) +} diff --git a/go/mysql/icuregex/internal/uset/close.go b/go/mysql/icuregex/internal/uset/close.go new file mode 100644 index 00000000000..bd3f9f0f7e3 --- /dev/null +++ b/go/mysql/icuregex/internal/uset/close.go @@ -0,0 +1,96 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package uset + +import "vitess.io/vitess/go/mysql/icuregex/internal/ucase" + +type USet uint32 + +const ( + /** + * Ignore white space within patterns unless quoted or escaped. + * @stable ICU 2.4 + */ + IgnoreSpace USet = 1 + + /** + * Enable case insensitive matching. E.g., "[ab]" with this flag + * will match 'a', 'A', 'b', and 'B'. "[^ab]" with this flag will + * match all except 'a', 'A', 'b', and 'B'. This performs a full + * closure over case mappings, e.g. U+017F for s. + * + * The resulting set is a superset of the input for the code points but + * not for the strings. + * It performs a case mapping closure of the code points and adds + * full case folding strings for the code points, and reduces strings of + * the original set to their full case folding equivalents. + * + * This is designed for case-insensitive matches, for example + * in regular expressions. The full code point case closure allows checking of + * an input character directly against the closure set. + * Strings are matched by comparing the case-folded form from the closure + * set with an incremental case folding of the string in question. + * + * The closure set will also contain single code points if the original + * set contained case-equivalent strings (like U+00DF for "ss" or "Ss" etc.). + * This is not necessary (that is, redundant) for the above matching method + * but results in the same closure sets regardless of whether the original + * set contained the code point or a string. + * + * @stable ICU 2.4 + */ + CaseInsensitive USet = 2 + + /** + * Enable case insensitive matching. E.g., "[ab]" with this flag + * will match 'a', 'A', 'b', and 'B'. "[^ab]" with this flag will + * match all except 'a', 'A', 'b', and 'B'. This adds the lower-, + * title-, and uppercase mappings as well as the case folding + * of each existing element in the set. + * @stable ICU 3.2 + */ + AddCaseMappings USet = 4 +) + +func (u *UnicodeSet) CloseOver(attribute USet) { + if attribute&AddCaseMappings != 0 { + panic("USET_ADD_CASE_MAPPINGS is unsupported") + } + if (attribute & CaseInsensitive) == 0 { + return + } + + foldSet := u.Clone() + n := u.RangeCount() + + for i := 0; i < n; i++ { + start := u.RangeStart(i) + end := u.RangeEnd(i) + + // full case closure + for cp := start; cp <= end; cp++ { + ucase.AddCaseClosure(cp, foldSet) + } + } + + *u = *foldSet +} diff --git a/go/mysql/icuregex/internal/uset/frozen.go b/go/mysql/icuregex/internal/uset/frozen.go new file mode 100644 index 00000000000..2703a4f6975 --- /dev/null +++ b/go/mysql/icuregex/internal/uset/frozen.go @@ -0,0 +1,339 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package uset + +type frozen struct { + // One byte 0 or 1 per Latin-1 character. + latin1Contains [0x100]byte + + // true if contains(U+FFFD) + containsFFFD bool + + /* + * One bit per code point from U+0000..U+07FF. + * The bits are organized vertically; consecutive code points + * correspond to the same bit positions in consecutive table words. + * With code point parts + * lead=c{10..6} + * trail=c{5..0} + * it is set.contains(c)==(table7FF[trail] bit lead) + * + * Bits for 0..7F (non-shortest forms) are set to the result of contains(FFFD) + * for faster validity checking at runtime. + */ + table7FF [64]uint32 + + /* + * One bit per 64 BMP code points. + * The bits are organized vertically; consecutive 64-code point blocks + * correspond to the same bit position in consecutive table words. + * With code point parts + * lead=c{15..12} + * t1=c{11..6} + * test bits (lead+16) and lead in bmpBlockBits[t1]. + * If the upper bit is 0, then the lower bit indicates if contains(c) + * for all code points in the 64-block. + * If the upper bit is 1, then the block is mixed and set.contains(c) + * must be called. + * + * Bits for 0..7FF (non-shortest forms) and D800..DFFF are set to + * the result of contains(FFFD) for faster validity checking at runtime. + */ + bmpBlockBits [64]uint32 + + /* + * Inversion list indexes for restricted binary searches in + * findCodePoint(), from + * findCodePoint(U+0800, U+1000, U+2000, .., U+F000, U+10000). + * U+0800 is the first 3-byte-UTF-8 code point. Code points below U+0800 are + * always looked up in the bit tables. + * The last pair of indexes is for finding supplementary code points. + */ + list4kStarts [18]int32 +} + +func freeze(list []rune) *frozen { + f := &frozen{} + + listEnd := int32(len(list) - 1) + + f.list4kStarts[0] = f.findCodePoint(list, 0x800, 0, listEnd) + for i := 1; i <= 0x10; i++ { + f.list4kStarts[i] = f.findCodePoint(list, rune(i)<<12, f.list4kStarts[i-1], listEnd) + } + f.list4kStarts[0x11] = listEnd + f.containsFFFD = f.containsSlow(list, 0xfffd, f.list4kStarts[0xf], f.list4kStarts[0x10]) + + f.initBits(list) + f.overrideIllegal() + + return f +} + +func (f *frozen) containsSlow(list []rune, c rune, lo, hi int32) bool { + return (f.findCodePoint(list, c, lo, hi) & 1) != 0 +} + +func (f *frozen) findCodePoint(list []rune, c rune, lo, hi int32) int32 { + /* Examples: + findCodePoint(c) + set list[] c=0 1 3 4 7 8 + === ============== =========== + [] [110000] 0 0 0 0 0 0 + [\u0000-\u0003] [0, 4, 110000] 1 1 1 2 2 2 + [\u0004-\u0007] [4, 8, 110000] 0 0 0 1 1 2 + [:Any:] [0, 110000] 1 1 1 1 1 1 + */ + + // Return the smallest i such that c < list[i]. Assume + // list[len - 1] == HIGH and that c is legal (0..HIGH-1). + if c < list[lo] { + return lo + } + // High runner test. c is often after the last range, so an + // initial check for this condition pays off. + if lo >= hi || c >= list[hi-1] { + return hi + } + // invariant: c >= list[lo] + // invariant: c < list[hi] + for { + i := (lo + hi) >> 1 + if i == lo { + break // Found! + } else if c < list[i] { + hi = i + } else { + lo = i + } + } + return hi +} + +func (f *frozen) set32x64bits(table *[64]uint32, start, limit int32) { + lead := start >> 6 // Named for UTF-8 2-byte lead byte with upper 5 bits. + trail := start & 0x3f // Named for UTF-8 2-byte trail byte with lower 6 bits. + + // Set one bit indicating an all-one block. + bits := uint32(1) << lead + if (start + 1) == limit { // Single-character shortcut. + table[trail] |= bits + return + } + + limitLead := limit >> 6 + limitTrail := limit & 0x3f + + if lead == limitLead { + // Partial vertical bit column. + for trail < limitTrail { + table[trail] |= bits + trail++ + } + } else { + // Partial vertical bit column, + // followed by a bit rectangle, + // followed by another partial vertical bit column. + if trail > 0 { + for { + table[trail] |= bits + trail++ + if trail >= 64 { + break + } + } + lead++ + } + if lead < limitLead { + bits = ^((uint32(1) << lead) - 1) + if limitLead < 0x20 { + bits &= (uint32(1) << limitLead) - 1 + } + for trail = 0; trail < 64; trail++ { + table[trail] |= bits + } + } + // limit<=0x800. If limit==0x800 then limitLead=32 and limitTrail=0. + // In that case, bits=1<= 0x100 { + break + } + for { + f.latin1Contains[start] = 1 + start++ + if start >= limit || start >= 0x100 { + break + } + } + if limit > 0x100 { + break + } + } + + // Find the first range overlapping with (or after) 80..FF again, + // to include them in table7FF as well. + listIndex = 0 + for { + start = list[listIndex] + listIndex++ + if listIndex < len(list) { + limit = list[listIndex] + listIndex++ + } else { + limit = 0x110000 + } + if limit > 0x80 { + if start < 0x80 { + start = 0x80 + } + break + } + } + + // Set table7FF[]. + for start < 0x800 { + var end rune + if limit <= 0x800 { + end = limit + } else { + end = 0x800 + } + f.set32x64bits(&f.table7FF, start, end) + if limit > 0x800 { + start = 0x800 + break + } + + start = list[listIndex] + listIndex++ + if listIndex < len(list) { + limit = list[listIndex] + listIndex++ + } else { + limit = 0x110000 + } + } + + // Set bmpBlockBits[]. + minStart := rune(0x800) + for start < 0x10000 { + if limit > 0x10000 { + limit = 0x10000 + } + + if start < minStart { + start = minStart + } + if start < limit { // Else: Another range entirely in a known mixed-value block. + if (start & 0x3f) != 0 { + // Mixed-value block of 64 code points. + start >>= 6 + f.bmpBlockBits[start&0x3f] |= 0x10001 << (start >> 6) + start = (start + 1) << 6 // Round up to the next block boundary. + minStart = start // Ignore further ranges in this block. + } + if start < limit { + if start < (limit &^ 0x3f) { + // Multiple all-ones blocks of 64 code points each. + f.set32x64bits(&f.bmpBlockBits, start>>6, limit>>6) + } + + if (limit & 0x3f) != 0 { + // Mixed-value block of 64 code points. + limit >>= 6 + f.bmpBlockBits[limit&0x3f] |= 0x10001 << (limit >> 6) + limit = (limit + 1) << 6 // Round up to the next block boundary. + minStart = limit // Ignore further ranges in this block. + } + } + } + + if limit == 0x10000 { + break + } + + start = list[listIndex] + listIndex++ + if listIndex < len(list) { + limit = list[listIndex] + listIndex++ + } else { + limit = 0x110000 + } + } +} diff --git a/go/mysql/icuregex/internal/uset/pattern.go b/go/mysql/icuregex/internal/uset/pattern.go new file mode 100644 index 00000000000..20b44da9c6d --- /dev/null +++ b/go/mysql/icuregex/internal/uset/pattern.go @@ -0,0 +1,107 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package uset + +import ( + "strings" + + "vitess.io/vitess/go/mysql/icuregex/internal/pattern" +) + +func (u *UnicodeSet) String() string { + var buf strings.Builder + u.ToPattern(&buf, true) + return buf.String() +} + +func (u *UnicodeSet) ToPattern(w *strings.Builder, escapeUnprintable bool) { + w.WriteByte('[') + + // // Check against the predefined categories. We implicitly build + // // up ALL category sets the first time toPattern() is called. + // for (int8_t cat=0; cat 1 && u.RangeStart(0) == MinValue && u.RangeEnd(count-1) == MaxValue { + + // Emit the inverse + w.WriteByte('^') + + for i := 1; i < count; i++ { + start := u.RangeEnd(i-1) + 1 + end := u.RangeStart(i) - 1 + u.appendToPattern(w, start, escapeUnprintable) + if start != end { + if (start + 1) != end { + w.WriteByte('-') + } + u.appendToPattern(w, end, escapeUnprintable) + } + } + } else { + // Default; emit the ranges as pairs + for i := 0; i < count; i++ { + start := u.RangeStart(i) + end := u.RangeEnd(i) + u.appendToPattern(w, start, escapeUnprintable) + if start != end { + if (start + 1) != end { + w.WriteByte('-') + } + u.appendToPattern(w, end, escapeUnprintable) + } + } + } + + w.WriteByte(']') +} + +func (u *UnicodeSet) appendToPattern(w *strings.Builder, c rune, escapeUnprintable bool) { + if escapeUnprintable && pattern.IsUnprintable(c) { + // Use hex escape notation (\uxxxx or \Uxxxxxxxx) for anything + // unprintable + pattern.EscapeUnprintable(w, c) + return + } + + // Okay to let ':' pass through + switch c { + case '[', ']', '-', '^', '&', '\\', '{', '}', ':', '$': + w.WriteByte('\\') + default: + // Escape whitespace + if pattern.IsWhitespace(c) { + w.WriteByte('\\') + } + } + w.WriteRune(c) +} diff --git a/go/mysql/icuregex/internal/uset/unicode_set.go b/go/mysql/icuregex/internal/uset/unicode_set.go new file mode 100644 index 00000000000..e2f7bd8cbca --- /dev/null +++ b/go/mysql/icuregex/internal/uset/unicode_set.go @@ -0,0 +1,686 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package uset + +import ( + "fmt" + "slices" +) + +// HIGH_VALUE > all valid values. 110000 for codepoints +const unicodeSetHigh = 0x0110000 + +// LOW <= all valid values. ZERO for codepoints +const unicodeSetLow = 0x000000 + +const ( + /** + * Minimum value that can be stored in a UnicodeSet. + * @stable ICU 2.4 + */ + MinValue = 0 + + /** + * Maximum value that can be stored in a UnicodeSet. + * @stable ICU 2.4 + */ + MaxValue = 0x10ffff +) + +type UnicodeSet struct { + list []rune + buffer []rune + frozen *frozen +} + +func New() *UnicodeSet { + buf := make([]rune, 1, 25) + buf[0] = unicodeSetHigh + return &UnicodeSet{list: buf} +} + +func FromRunes(list []rune) *UnicodeSet { + return &UnicodeSet{list: list} +} + +func (u *UnicodeSet) ensureBufferCapacity(c int) { + if cap(u.buffer) < c { + u.buffer = make([]rune, c) + return + } + u.buffer = u.buffer[:cap(u.buffer)] +} + +func (u *UnicodeSet) addbuffer(other []rune, polarity int8) { + if u.frozen != nil { + panic("UnicodeSet is frozen") + } + u.ensureBufferCapacity(len(u.list) + len(other)) + + i := 1 + j := 1 + k := 0 + + a := u.list[0] + b := other[0] + + for { + switch polarity { + case 0: + if a < b { + if k > 0 && a <= u.buffer[k-1] { + k-- + a = max(u.list[i], u.buffer[k]) + } else { + u.buffer[k] = a + k++ + a = u.list[i] + } + i++ + polarity ^= 1 + } else if b < a { + if k > 0 && b <= u.buffer[k-1] { + k-- + b = max(other[j], u.buffer[k]) + } else { + u.buffer[k] = b + k++ + b = other[j] + } + j++ + polarity ^= 2 + } else { + if a == unicodeSetHigh { + goto loopEnd + } + if k > 0 && a <= u.buffer[k-1] { + k-- + a = max(u.list[i], u.buffer[k]) + } else { + u.buffer[k] = a + k++ + a = u.list[i] + } + i++ + polarity ^= 1 + b = other[j] + j++ + polarity ^= 2 + } + case 3: + if b <= a { + if a == unicodeSetHigh { + goto loopEnd + } + u.buffer[k] = a + k++ + } else { + if b == unicodeSetHigh { + goto loopEnd + } + u.buffer[k] = b + k++ + } + a = u.list[i] + i++ + polarity ^= 1 + b = other[j] + j++ + polarity ^= 2 + case 1: + if a < b { + u.buffer[k] = a + k++ + a = u.list[i] + i++ + polarity ^= 1 + } else if b < a { + b = other[j] + j++ + polarity ^= 2 + } else { + if a == unicodeSetHigh { + goto loopEnd + } + a = u.list[i] + i++ + polarity ^= 1 + b = other[j] + j++ + polarity ^= 2 + } + case 2: + if b < a { + u.buffer[k] = b + k++ + b = other[j] + j++ + polarity ^= 2 + } else if a < b { + a = u.list[i] + i++ + polarity ^= 1 + } else { + if a == unicodeSetHigh { + goto loopEnd + } + a = u.list[i] + i++ + polarity ^= 1 + b = other[j] + j++ + polarity ^= 2 + } + } + } + +loopEnd: + u.buffer[k] = unicodeSetHigh + k++ + + u.list, u.buffer = u.buffer[:k], u.list +} + +func pinCodePoint(c *rune) rune { + if *c < unicodeSetLow { + *c = unicodeSetLow + } else if *c > (unicodeSetHigh - 1) { + *c = unicodeSetHigh - 1 + } + return *c +} + +func (u *UnicodeSet) AddRune(c rune) { + if u.frozen != nil { + panic("UnicodeSet is frozen") + } + + // find smallest i such that c < list[i] + // if odd, then it is IN the set + // if even, then it is OUT of the set + i := u.findCodePoint(pinCodePoint(&c)) + + // already in set? + if (i & 1) != 0 { + return + } + + // HIGH is 0x110000 + // assert(list[len-1] == HIGH); + + // empty = [HIGH] + // [start_0, limit_0, start_1, limit_1, HIGH] + + // [..., start_k-1, limit_k-1, start_k, limit_k, ..., HIGH] + // ^ + // list[i] + + // i == 0 means c is before the first range + if c == u.list[i]-1 { + // c is before start of next range + u.list[i] = c + // if we touched the HIGH mark, then add a new one + if c == (unicodeSetHigh - 1) { + u.list = append(u.list, unicodeSetHigh) + } + if i > 0 && c == u.list[i-1] { + // collapse adjacent ranges + + // [..., start_k-1, c, c, limit_k, ..., HIGH] + // ^ + // list[i] + for k := i - 1; k < len(u.list)-2; k++ { + u.list[k] = u.list[k+2] + } + u.list = u.list[:len(u.list)-2] + } + } else if i > 0 && c == u.list[i-1] { + // c is after end of prior range + u.list[i-1]++ + // no need to check for collapse here + } else { + // At this point we know the new char is not adjacent to + // any existing ranges, and it is not 10FFFF. + + // [..., start_k-1, limit_k-1, start_k, limit_k, ..., HIGH] + // ^ + // list[i] + + // [..., start_k-1, limit_k-1, c, c+1, start_k, limit_k, ..., HIGH] + // ^ + // list[i] + u.list = slices.Insert(u.list, i, c, c+1) + } +} + +func (u *UnicodeSet) AddRuneRange(start, end rune) { + if pinCodePoint(&start) < pinCodePoint(&end) { + limit := end + 1 + // Fast path for adding a new range after the last one. + // Odd list length: [..., lastStart, lastLimit, HIGH] + if (len(u.list) & 1) != 0 { + // If the list is empty, set lastLimit low enough to not be adjacent to 0. + var lastLimit rune + if len(u.list) == 1 { + lastLimit = -2 + } else { + lastLimit = u.list[len(u.list)-2] + } + if lastLimit <= start { + if lastLimit == start { + // Extend the last range. + u.list[len(u.list)-2] = limit + if limit == unicodeSetHigh { + u.list = u.list[:len(u.list)-1] + } + } else { + u.list[len(u.list)-1] = start + if limit < unicodeSetHigh { + u.list = append(u.list, limit) + u.list = append(u.list, unicodeSetHigh) + } else { // limit == UNICODESET_HIGH + u.list = append(u.list, unicodeSetHigh) + } + } + return + } + } + // This is slow. Could be much faster using findCodePoint(start) + // and modifying the list, dealing with adjacent & overlapping ranges. + addRange := [3]rune{start, limit, unicodeSetHigh} + u.addbuffer(addRange[:], 0) + } else if start == end { + u.AddRune(start) + } +} + +func (u *UnicodeSet) AddAll(u2 *UnicodeSet) { + if len(u2.list) > 0 { + u.addbuffer(u2.list, 0) + } +} + +func (u *UnicodeSet) Complement() { + if u.frozen != nil { + panic("UnicodeSet is frozen") + } + if u.list[0] == unicodeSetLow { + copy(u.list, u.list[1:]) + u.list = u.list[:len(u.list)-1] + } else { + u.list = slices.Insert(u.list, 0, unicodeSetLow) + } +} + +func (u *UnicodeSet) RemoveRuneRange(start, end rune) { + if pinCodePoint(&start) < pinCodePoint(&end) { + r := [3]rune{start, end + 1, unicodeSetHigh} + u.retain(r[:], 2) + } +} + +func (u *UnicodeSet) RemoveAll(c *UnicodeSet) { + u.retain(c.list, 2) +} + +func (u *UnicodeSet) RetainAll(c *UnicodeSet) { + u.retain(c.list, 0) +} + +func (u *UnicodeSet) retain(other []rune, polarity int8) { + if u.frozen != nil { + panic("UnicodeSet is frozen") + } + + u.ensureBufferCapacity(len(u.list) + len(other)) + + i := 1 + j := 1 + k := 0 + + a := u.list[0] + b := other[0] + + // change from xor is that we have to check overlapping pairs + // polarity bit 1 means a is second, bit 2 means b is. + for { + switch polarity { + case 0: // both first; drop the smaller + if a < b { // drop a + a = u.list[i] + i++ + polarity ^= 1 + } else if b < a { // drop b + b = other[j] + j++ + polarity ^= 2 + } else { // a == b, take one, drop other + if a == unicodeSetHigh { + goto loop_end + } + u.buffer[k] = a + k++ + a = u.list[i] + i++ + polarity ^= 1 + b = other[j] + j++ + polarity ^= 2 + } + case 3: // both second; take lower if unequal + if a < b { // take a + u.buffer[k] = a + k++ + a = u.list[i] + i++ + polarity ^= 1 + } else if b < a { // take b + u.buffer[k] = b + k++ + b = other[j] + j++ + polarity ^= 2 + } else { // a == b, take one, drop other + if a == unicodeSetHigh { + goto loop_end + } + u.buffer[k] = a + k++ + a = u.list[i] + i++ + polarity ^= 1 + b = other[j] + j++ + polarity ^= 2 + } + case 1: // a second, b first; + if a < b { // NO OVERLAP, drop a + a = u.list[i] + i++ + polarity ^= 1 + } else if b < a { // OVERLAP, take b + u.buffer[k] = b + k++ + b = other[j] + j++ + polarity ^= 2 + } else { // a == b, drop both! + if a == unicodeSetHigh { + goto loop_end + } + a = u.list[i] + i++ + polarity ^= 1 + b = other[j] + j++ + polarity ^= 2 + } + case 2: // a first, b second; if a < b, overlap + if b < a { // no overlap, drop b + b = other[j] + j++ + polarity ^= 2 + } else if a < b { // OVERLAP, take a + u.buffer[k] = a + k++ + a = u.list[i] + i++ + polarity ^= 1 + } else { // a == b, drop both! + if a == unicodeSetHigh { + goto loop_end + } + a = u.list[i] + i++ + polarity ^= 1 + b = other[j] + j++ + polarity ^= 2 + } + } + } + +loop_end: + u.buffer[k] = unicodeSetHigh // terminate + k++ + u.list, u.buffer = u.buffer[:k], u.list +} + +func (u *UnicodeSet) Clear() { + if u.frozen != nil { + panic("UnicodeSet is frozen") + } + u.list = u.list[:1] + u.list[0] = unicodeSetHigh +} + +func (u *UnicodeSet) Len() (n int) { + count := u.RangeCount() + for i := 0; i < count; i++ { + n += int(u.RangeEnd(i)) - int(u.RangeStart(i)) + 1 + } + return +} + +func (u *UnicodeSet) RangeCount() int { + return len(u.list) / 2 +} + +func (u *UnicodeSet) RangeStart(idx int) rune { + return u.list[idx*2] +} + +func (u *UnicodeSet) RangeEnd(idx int) rune { + return u.list[idx*2+1] - 1 +} + +func (u *UnicodeSet) RuneAt(idx int) rune { + if idx >= 0 { + // len2 is the largest even integer <= len, that is, it is len + // for even values and len-1 for odd values. With odd values + // the last entry is UNICODESET_HIGH. + len2 := len(u.list) + if (len2 & 0x1) != 0 { + len2-- + } + + var i int + for i < len2 { + start := u.list[i] + count := int(u.list[i+1] - start) + i += 2 + if idx < count { + return start + rune(idx) + } + idx -= count + } + } + return -1 +} + +func (u *UnicodeSet) ContainsRune(c rune) bool { + if f := u.frozen; f != nil { + if c < 0 { + return false + } else if c <= 0xff { + return f.latin1Contains[c] != 0 + } else if c <= 0x7ff { + return (f.table7FF[c&0x3f] & (uint32(1) << (c >> 6))) != 0 + } else if c < 0xd800 || (c >= 0xe000 && c <= 0xffff) { + lead := c >> 12 + twoBits := (f.bmpBlockBits[(c>>6)&0x3f] >> lead) & 0x10001 + if twoBits <= 1 { + // All 64 code points with the same bits 15..6 + // are either in the set or not. + return twoBits != 0 + } + // Look up the code point in its 4k block of code points. + return f.containsSlow(u.list, c, f.list4kStarts[lead], f.list4kStarts[lead+1]) + } else if c <= 0x10ffff { + // surrogate or supplementary code point + return f.containsSlow(u.list, c, f.list4kStarts[0xd], f.list4kStarts[0x11]) + } + // Out-of-range code points get FALSE, consistent with long-standing + // behavior of UnicodeSet::contains(c). + return false + } + + if c >= unicodeSetHigh { + return false + } + i := u.findCodePoint(c) + return (i & 1) != 0 +} + +func (u *UnicodeSet) ContainsRuneRange(from, to rune) bool { + i := u.findCodePoint(from) + return (i&1) != 0 && to < u.list[i] +} + +func (u *UnicodeSet) findCodePoint(c rune) int { + /* Examples: + findCodePoint(c) + set list[] c=0 1 3 4 7 8 + === ============== =========== + [] [110000] 0 0 0 0 0 0 + [\u0000-\u0003] [0, 4, 110000] 1 1 1 2 2 2 + [\u0004-\u0007] [4, 8, 110000] 0 0 0 1 1 2 + [:Any:] [0, 110000] 1 1 1 1 1 1 + */ + + // Return the smallest i such that c < list[i]. Assume + // list[len - 1] == HIGH and that c is legal (0..HIGH-1). + if c < u.list[0] { + return 0 + } + + // High runner test. c is often after the last range, so an + // initial check for this condition pays off. + lo := 0 + hi := len(u.list) - 1 + if lo >= hi || c >= u.list[hi-1] { + return hi + } + + // invariant: c >= list[lo] + // invariant: c < list[hi] + for { + i := (lo + hi) >> 1 + if i == lo { + break // Found! + } else if c < u.list[i] { + hi = i + } else { + lo = i + } + } + return hi +} + +func (u *UnicodeSet) AddString(chars string) { + for _, c := range chars { + u.AddRune(c) + } +} + +type Filter func(ch rune) bool + +func (u *UnicodeSet) ApplyFilter(inclusions *UnicodeSet, filter Filter) { + // Logically, walk through all Unicode characters, noting the start + // and end of each range for which filter.contain(c) is + // true. Add each range to a set. + // + // To improve performance, use an inclusions set which + // encodes information about character ranges that are known + // to have identical properties. + // inclusions contains the first characters of + // same-value ranges for the given property. + + u.Clear() + + startHasProperty := rune(-1) + limitRange := inclusions.RangeCount() + + for j := 0; j < limitRange; j++ { + // get current range + start := inclusions.RangeStart(j) + end := inclusions.RangeEnd(j) + + // for all the code points in the range, process + for ch := start; ch <= end; ch++ { + // only add to this UnicodeSet on inflection points -- + // where the hasProperty value changes to false + if filter(ch) { + if startHasProperty < 0 { + startHasProperty = ch + } + } else if startHasProperty >= 0 { + u.AddRuneRange(startHasProperty, ch-1) + startHasProperty = -1 + } + } + } + if startHasProperty >= 0 { + u.AddRuneRange(startHasProperty, 0x10FFFF) + } +} + +func (u *UnicodeSet) Clone() *UnicodeSet { + return &UnicodeSet{list: slices.Clone(u.list)} +} + +func (u *UnicodeSet) IsEmpty() bool { + return len(u.list) == 1 +} + +func (u *UnicodeSet) CopyFrom(set *UnicodeSet) { + if u.frozen != nil { + panic("UnicodeSet is frozen") + } + u.list = slices.Clone(set.list) +} + +func (u *UnicodeSet) Equals(other *UnicodeSet) bool { + return slices.Equal(u.list, other.list) +} + +func (u *UnicodeSet) Freeze() *UnicodeSet { + u.frozen = freeze(u.list) + return u +} + +func (u *UnicodeSet) FreezeCheck_() error { + if u == nil { + return nil + } + if u.frozen == nil { + return fmt.Errorf("UnicodeSet is not frozen") + } + for r := rune(0); r <= 0x10ffff; r++ { + want := (u.findCodePoint(r) & 1) != 0 + got := u.ContainsRune(r) + if want != got { + return fmt.Errorf("rune '%c' (U+%04X) did not freeze", r, r) + } + } + return nil +} diff --git a/go/mysql/icuregex/internal/uset/unicode_set_test.go b/go/mysql/icuregex/internal/uset/unicode_set_test.go new file mode 100644 index 00000000000..908abd8889d --- /dev/null +++ b/go/mysql/icuregex/internal/uset/unicode_set_test.go @@ -0,0 +1,43 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package uset + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSimpleBelong(t *testing.T) { + ss1 := New() + ss1.AddString("*?+[(){}^$|\\.") + ss2 := New() + ss2.AddString("*?+[(){}^$|\\.") + ss2.Complement() + ss3 := New() + ss3.AddRune('*') + ss3.AddRune('?') + + assert.True(t, ss1.ContainsRune('(')) + assert.False(t, ss2.ContainsRune('(')) + assert.True(t, ss3.ContainsRune('*')) +} diff --git a/go/mysql/icuregex/internal/utf16/helpers.go b/go/mysql/icuregex/internal/utf16/helpers.go new file mode 100644 index 00000000000..bdf53ae731c --- /dev/null +++ b/go/mysql/icuregex/internal/utf16/helpers.go @@ -0,0 +1,65 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utf16 + +import "unicode/utf16" + +func IsLead(c rune) bool { + return (uint32(c) & 0xfffffc00) == 0xd800 +} + +func IsTrail(c rune) bool { + return (uint32(c) & 0xfffffc00) == 0xdc00 +} + +/** + * Is this code point a surrogate (U+d800..U+dfff)? + * @param c 32-bit code point + * @return true or false + * @stable ICU 2.4 + */ +func IsSurrogate(c rune) bool { + return (uint32(c) & 0xfffff800) == 0xd800 +} + +/** + * Assuming c is a surrogate code point (U_IS_SURROGATE(c)), + * is it a lead surrogate? + * @param c 32-bit code point + * @return true or false + * @stable ICU 2.4 + */ +func IsSurrogateLead(c rune) bool { + return (uint32(c) & 0x400) == 0 +} + +func DecodeRune(a, b rune) rune { + return utf16.DecodeRune(a, b) +} + +func NextUnsafe(s []uint16) (rune, []uint16) { + c := rune(s[0]) + if !IsLead(c) { + return c, s[1:] + } + return DecodeRune(c, rune(s[1])), s[2:] +} diff --git a/go/mysql/icuregex/internal/utrie/ucptrie.go b/go/mysql/icuregex/internal/utrie/ucptrie.go new file mode 100644 index 00000000000..74e4eb9b2fa --- /dev/null +++ b/go/mysql/icuregex/internal/utrie/ucptrie.go @@ -0,0 +1,708 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utrie + +import ( + "errors" + "fmt" + + "vitess.io/vitess/go/mysql/icuregex/internal/udata" +) + +type UcpTrie struct { + index []uint16 + data8 []uint8 + data16 []uint16 + data32 []uint32 + + indexLength, dataLength int32 + /** Start of the last range which ends at U+10FFFF. @internal */ + highStart rune + shifted12HighStart uint16 + + typ ucpTrieType + valueWidth ucpTrieValueWidth + + /** + * Internal index-3 null block offset. + * Set to an impossibly high value (e.g., 0xffff) if there is no dedicated index-3 null block. + * @internal + */ + index3NullOffset uint16 + /** + * Internal data null block offset, not shifted. + * Set to an impossibly high value (e.g., 0xfffff) if there is no dedicated data null block. + * @internal + */ + dataNullOffset int32 + + nullValue uint32 +} + +/** + * Selectors for the type of a UCPTrie. + * Different trade-offs for size vs. speed. + * + * @see umutablecptrie_buildImmutable + * @see ucptrie_openFromBinary + * @see ucptrie_getType + * @stable ICU 63 + */ +type ucpTrieType int8 + +const ( + /** + * For ucptrie_openFromBinary() to accept any type. + * ucptrie_getType() will return the actual type. + * @stable ICU 63 + */ + typeAny ucpTrieType = iota - 1 + /** + * Fast/simple/larger BMP data structure. Use functions and "fast" macros. + * @stable ICU 63 + */ + typeFast + /** + * Small/slower BMP data structure. Use functions and "small" macros. + * @stable ICU 63 + */ + typeSmall +) + +/** + * Selectors for the number of bits in a UCPTrie data value. + * + * @see umutablecptrie_buildImmutable + * @see ucptrie_openFromBinary + * @see ucptrie_getValueWidth + * @stable ICU 63 + */ +type ucpTrieValueWidth int8 + +const ( + /** + * For ucptrie_openFromBinary() to accept any data value width. + * ucptrie_getValueWidth() will return the actual data value width. + * @stable ICU 63 + */ + valueBitsAny ucpTrieValueWidth = iota - 1 + /** + * The trie stores 16 bits per data value. + * It returns them as unsigned values 0..0xffff=65535. + * @stable ICU 63 + */ + valueBits16 + /** + * The trie stores 32 bits per data value. + * @stable ICU 63 + */ + valueBits32 + /** + * The trie stores 8 bits per data value. + * It returns them as unsigned values 0..0xff=255. + * @stable ICU 63 + */ + valueBits8 +) + +const ucpTrieSig = 0x54726933 +const ucpTrieOESig = 0x33697254 + +/** + * Constants for use with UCPTrieHeader.options. + * @internal + */ +const ( + optionsDataLengthMask = 0xf000 + optionsDataNullOffsetMask = 0xf00 + optionsReservedMask = 0x38 + optionsValueBitsMask = 7 +) + +const ( + /** @internal */ + fastShift = 6 + + /** Number of entries in a data block for code points below the fast limit. 64=0x40 @internal */ + fastDataBlockLength = 1 << fastShift + + /** Mask for getting the lower bits for the in-fast-data-block offset. @internal */ + fastDataMask = fastDataBlockLength - 1 + + /** @internal */ + smallMax = 0xfff + + /** + * Offset from dataLength (to be subtracted) for fetching the + * value returned for out-of-range code points and ill-formed UTF-8/16. + * @internal + */ + errorValueNegDataOffset = 1 + /** + * Offset from dataLength (to be subtracted) for fetching the + * value returned for code points highStart..U+10FFFF. + * @internal + */ + highValueNegDataOffset = 2 +) + +// Internal constants. +const ( + /** The length of the BMP index table. 1024=0x400 */ + bmpIndexLength = 0x10000 >> fastShift + + smallLimit = 0x1000 + smallIndexLength = smallLimit >> fastShift + + /** Shift size for getting the index-3 table offset. */ + ucpShift3 = 4 + + /** Shift size for getting the index-2 table offset. */ + ucpShift2 = 5 + ucpShift3 + + /** Shift size for getting the index-1 table offset. */ + ucpShift1 = 5 + ucpShift2 + + /** + * Difference between two shift sizes, + * for getting an index-2 offset from an index-3 offset. 5=9-4 + */ + ucpShift2Min3 = ucpShift2 - ucpShift3 + + /** + * Difference between two shift sizes, + * for getting an index-1 offset from an index-2 offset. 5=14-9 + */ + ucpShift1Min2 = ucpShift1 - ucpShift2 + + /** + * Number of index-1 entries for the BMP. (4) + * This part of the index-1 table is omitted from the serialized form. + */ + ucpOmittedBmpIndex1Length = 0x10000 >> ucpShift1 + + /** Number of entries in an index-2 block. 32=0x20 */ + ucpIndex2BlockLength = 1 << ucpShift1Min2 + + /** Mask for getting the lower bits for the in-index-2-block offset. */ + ucpIndex2Mask = ucpIndex2BlockLength - 1 + + /** Number of code points per index-2 table entry. 512=0x200 */ + ucpCpPerIndex2Entry = 1 << ucpShift2 + + /** Number of entries in an index-3 block. 32=0x20 */ + ucpIndex3BlockLength = 1 << ucpShift2Min3 + + /** Mask for getting the lower bits for the in-index-3-block offset. */ + ucpIndex3Mask = ucpIndex3BlockLength - 1 + + /** Number of entries in a small data block. 16=0x10 */ + ucpSmallDataBlockLength = 1 << ucpShift3 + + /** Mask for getting the lower bits for the in-small-data-block offset. */ + ucpSmallDataMask = ucpSmallDataBlockLength - 1 +) + +func UcpTrieFromBytes(bytes *udata.Bytes) (*UcpTrie, error) { + type ucpHeader struct { + /** "Tri3" in big-endian US-ASCII (0x54726933) */ + signature uint32 + + /** + * Options bit field: + * Bits 15..12: Data length bits 19..16. + * Bits 11..8: Data null block offset bits 19..16. + * Bits 7..6: UCPTrieType + * Bits 5..3: Reserved (0). + * Bits 2..0: UCPTrieValueWidth + */ + options uint16 + + /** Total length of the index tables. */ + indexLength uint16 + + /** Data length bits 15..0. */ + dataLength uint16 + + /** Index-3 null block offset, 0x7fff or 0xffff if none. */ + index3NullOffset uint16 + + /** Data null block offset bits 15..0, 0xfffff if none. */ + dataNullOffset uint16 + + /** + * First code point of the single-value range ending with U+10ffff, + * rounded up and then shifted right by UCPTRIE_SHIFT_2. + */ + shiftedHighStart uint16 + } + + var header ucpHeader + header.signature = bytes.Uint32() + + switch header.signature { + case ucpTrieSig: + case ucpTrieOESig: + return nil, errors.New("unsupported: BigEndian encoding") + default: + return nil, fmt.Errorf("invalid signature for UcpTrie: 0x%08x", header.signature) + } + + header.options = bytes.Uint16() + header.indexLength = bytes.Uint16() + header.dataLength = bytes.Uint16() + header.index3NullOffset = bytes.Uint16() + header.dataNullOffset = bytes.Uint16() + header.shiftedHighStart = bytes.Uint16() + + typeInt := (header.options >> 6) & 3 + valueWidthInt := header.options & optionsValueBitsMask + if typeInt > uint16(typeSmall) || valueWidthInt > uint16(valueBits8) || + (header.options&optionsReservedMask) != 0 { + return nil, errors.New("invalid options for serialized UcpTrie") + } + actualType := ucpTrieType(typeInt) + actualValueWidth := ucpTrieValueWidth(valueWidthInt) + + trie := &UcpTrie{ + indexLength: int32(header.indexLength), + dataLength: int32(((header.options & optionsDataLengthMask) << 4) | header.dataLength), + index3NullOffset: header.index3NullOffset, + dataNullOffset: int32(((header.options & optionsDataNullOffsetMask) << 8) | header.dataNullOffset), + highStart: rune(header.shiftedHighStart) << ucpShift2, + typ: actualType, + valueWidth: actualValueWidth, + } + nullValueOffset := trie.dataNullOffset + if nullValueOffset >= trie.dataLength { + nullValueOffset = trie.dataLength - highValueNegDataOffset + } + + trie.shifted12HighStart = uint16((trie.highStart + 0xfff) >> 12) + trie.index = bytes.Uint16Slice(int32(header.indexLength)) + switch actualValueWidth { + case valueBits16: + trie.data16 = bytes.Uint16Slice(trie.dataLength) + trie.nullValue = uint32(trie.data16[nullValueOffset]) + case valueBits32: + trie.data32 = bytes.Uint32Slice(trie.dataLength) + trie.nullValue = trie.data32[nullValueOffset] + case valueBits8: + trie.data8 = bytes.Uint8Slice(trie.dataLength) + trie.nullValue = uint32(trie.data8[nullValueOffset]) + } + + return trie, nil +} + +func (t *UcpTrie) Get(c rune) uint32 { + var dataIndex int32 + if c <= 0x7f { + // linear ASCII + dataIndex = c + } else { + var fastMax rune + if t.typ == typeFast { + fastMax = 0xffff + } else { + fastMax = smallMax + } + dataIndex = t.cpIndex(fastMax, c) + } + return t.getValue(dataIndex) +} + +func (t *UcpTrie) getValue(dataIndex int32) uint32 { + switch t.valueWidth { + case valueBits16: + return uint32(t.data16[dataIndex]) + case valueBits32: + return t.data32[dataIndex] + case valueBits8: + return uint32(t.data8[dataIndex]) + default: + // Unreachable if the trie is properly initialized. + return 0xffffffff + } +} + +/** Internal trie getter for a code point below the fast limit. Returns the data index. @internal */ +func (t *UcpTrie) fastIndex(c rune) int32 { + return int32(t.index[c>>fastShift]) + (c & fastDataMask) +} + +/** Internal trie getter for a code point at or above the fast limit. Returns the data index. @internal */ +func (t *UcpTrie) smallIndex(c rune) int32 { + if c >= t.highStart { + return t.dataLength - highValueNegDataOffset + } + return t.internalSmallIndex(c) +} + +func (t *UcpTrie) internalSmallIndex(c rune) int32 { + i1 := c >> ucpShift1 + if t.typ == typeFast { + i1 += bmpIndexLength - ucpOmittedBmpIndex1Length + } else { + i1 += smallIndexLength + } + i3Block := int32(t.index[int32(t.index[i1])+((c>>ucpShift2)&ucpIndex2Mask)]) + i3 := (c >> ucpShift3) & ucpIndex3Mask + var dataBlock int32 + if (i3Block & 0x8000) == 0 { + // 16-bit indexes + dataBlock = int32(t.index[i3Block+i3]) + } else { + // 18-bit indexes stored in groups of 9 entries per 8 indexes. + i3Block = (i3Block & 0x7fff) + (i3 & ^7) + (i3 >> 3) + i3 &= 7 + dataBlock = int32(t.index[i3Block]) << (2 + (2 * i3)) & 0x30000 + i3Block++ + dataBlock |= int32(t.index[i3Block+i3]) + } + return dataBlock + (c & ucpSmallDataMask) +} + +/** + * Internal trie getter for a code point, with checking that c is in U+0000..10FFFF. + * Returns the data index. + * @internal + */ +func (t *UcpTrie) cpIndex(fastMax, c rune) int32 { + if c <= fastMax { + return t.fastIndex(c) + } + if c <= 0x10ffff { + return t.smallIndex(c) + } + return t.dataLength - errorValueNegDataOffset +} + +/** + * Selectors for how ucpmap_getRange() etc. should report value ranges overlapping with surrogates. + * Most users should use UCPMAP_RANGE_NORMAL. + * + * @see ucpmap_getRange + * @see ucptrie_getRange + * @see umutablecptrie_getRange + * @stable ICU 63 + */ +type UcpMapRangeOption int8 + +const ( + /** + * ucpmap_getRange() enumerates all same-value ranges as stored in the map. + * Most users should use this option. + * @stable ICU 63 + */ + UcpMapRangeNormal UcpMapRangeOption = iota + /** + * ucpmap_getRange() enumerates all same-value ranges as stored in the map, + * except that lead surrogates (U+D800..U+DBFF) are treated as having the + * surrogateValue, which is passed to getRange() as a separate parameter. + * The surrogateValue is not transformed via filter(). + * See U_IS_LEAD(c). + * + * Most users should use UCPMAP_RANGE_NORMAL instead. + * + * This option is useful for maps that map surrogate code *units* to + * special values optimized for UTF-16 string processing + * or for special error behavior for unpaired surrogates, + * but those values are not to be associated with the lead surrogate code *points*. + * @stable ICU 63 + */ + UcpMapRangeFixedLeadSurrogates + /** + * ucpmap_getRange() enumerates all same-value ranges as stored in the map, + * except that all surrogates (U+D800..U+DFFF) are treated as having the + * surrogateValue, which is passed to getRange() as a separate parameter. + * The surrogateValue is not transformed via filter(). + * See U_IS_SURROGATE(c). + * + * Most users should use UCPMAP_RANGE_NORMAL instead. + * + * This option is useful for maps that map surrogate code *units* to + * special values optimized for UTF-16 string processing + * or for special error behavior for unpaired surrogates, + * but those values are not to be associated with the lead surrogate code *points*. + * @stable ICU 63 + */ + UcpMapRangeFixedAllSurrogates +) + +/** + * Callback function type: Modifies a map value. + * Optionally called by ucpmap_getRange()/ucptrie_getRange()/umutablecptrie_getRange(). + * The modified value will be returned by the getRange function. + * + * Can be used to ignore some of the value bits, + * make a filter for one of several values, + * return a value index computed from the map value, etc. + * + * @param context an opaque pointer, as passed into the getRange function + * @param value a value from the map + * @return the modified value + * @stable ICU 63 + */ +type UcpMapValueFilter func(value uint32) uint32 + +/** + * GetRange returns the last code point such that all those from start to there have the same value. + * Can be used to efficiently iterate over all same-value ranges in a trie. + * (This is normally faster than iterating over code points and get()ting each value, + * but much slower than a data structure that stores ranges directly.) + * + * If the UCPMapValueFilter function pointer is not NULL, then + * the value to be delivered is passed through that function, and the return value is the end + * of the range where all values are modified to the same actual value. + * The value is unchanged if that function pointer is NULL. + * + * Example: + * \code + * UChar32 start = 0, end; + * uint32_t value; + * while ((end = ucptrie_getRange(trie, start, UCPMAP_RANGE_NORMAL, 0, + * NULL, NULL, &value)) >= 0) { + * // Work with the range start..end and its value. + * start = end + 1; + * } + * \endcode + * + * @param trie the trie + * @param start range start + * @param option defines whether surrogates are treated normally, + * or as having the surrogateValue; usually UCPMAP_RANGE_NORMAL + * @param surrogateValue value for surrogates; ignored if option==UCPMAP_RANGE_NORMAL + * @param filter a pointer to a function that may modify the trie data value, + * or NULL if the values from the trie are to be used unmodified + * @param context an opaque pointer that is passed on to the filter function + * @param pValue if not NULL, receives the value that every code point start..end has; + * may have been modified by filter(context, trie value) + * if that function pointer is not NULL + * @return the range end code point, or -1 if start is not a valid code point + * @stable ICU 63 + */ +func (t *UcpTrie) GetRange(start rune, option UcpMapRangeOption, surrogateValue uint32, filter UcpMapValueFilter) (rune, uint32) { + if option == UcpMapRangeNormal { + return t.getRange(start, filter) + } + + var surrEnd rune + if option == UcpMapRangeFixedAllSurrogates { + surrEnd = 0xdfff + } else { + surrEnd = 0xdbff + } + end, value := t.getRange(start, filter) + if end < 0xd7ff || start > surrEnd { + return end, value + } + if value == surrogateValue { + if end >= surrEnd { + // Surrogates followed by a non-surrogateValue range, + // or surrogates are part of a larger surrogateValue range. + return end, value + } + } else { + if start <= 0xd7ff { + return 0xd7ff, value // Non-surrogateValue range ends before surrogateValue surrogates. + } + // Start is a surrogate with a non-surrogateValue code *unit* value. + // Return a surrogateValue code *point* range. + value = surrogateValue + if end > surrEnd { + return surrEnd, value // Surrogate range ends before non-surrogateValue rest of range. + } + } + // See if the surrogateValue surrogate range can be merged with + // an immediately following range. + end2, value2 := t.getRange(surrEnd+1, filter) + if value2 == surrogateValue { + return end2, value + } + return surrEnd, value +} + +const maxUnicode = 0x10ffff + +func (t *UcpTrie) getRange(start rune, filter UcpMapValueFilter) (rune, uint32) { + if start > maxUnicode { + return -1, 0 + } + + if start >= t.highStart { + di := t.dataLength - highValueNegDataOffset + value := t.getValue(di) + if filter != nil { + value = filter(value) + } + return maxUnicode, value + } + + nullValue := t.nullValue + if filter != nil { + nullValue = filter(nullValue) + } + index := t.index + + prevI3Block := int32(-1) + prevBlock := int32(-1) + c := start + var trieValue uint32 + value := nullValue + haveValue := false + for { + var i3Block, i3, i3BlockLength, dataBlockLength int32 + if c <= 0xffff && (t.typ == typeFast || c <= smallMax) { + i3Block = 0 + i3 = c >> fastShift + if t.typ == typeFast { + i3BlockLength = bmpIndexLength + } else { + i3BlockLength = smallIndexLength + } + dataBlockLength = fastDataBlockLength + } else { + // Use the multi-stage index. + i1 := c >> ucpShift1 + if t.typ == typeFast { + i1 += bmpIndexLength - ucpOmittedBmpIndex1Length + } else { + i1 += smallIndexLength + } + shft := c >> ucpShift2 + idx := int32(t.index[i1]) + (shft & ucpIndex2Mask) + i3Block = int32(t.index[idx]) + if i3Block == prevI3Block && (c-start) >= ucpCpPerIndex2Entry { + // The index-3 block is the same as the previous one, and filled with value. + c += ucpCpPerIndex2Entry + continue + } + prevI3Block = i3Block + if i3Block == int32(t.index3NullOffset) { + // This is the index-3 null block. + if haveValue { + if nullValue != value { + return c - 1, value + } + } else { + trieValue = t.nullValue + value = nullValue + haveValue = true + } + prevBlock = t.dataNullOffset + c = (c + ucpCpPerIndex2Entry) & ^(ucpCpPerIndex2Entry - 1) + continue + } + i3 = (c >> ucpShift3) & ucpIndex3Mask + i3BlockLength = ucpIndex3BlockLength + dataBlockLength = ucpSmallDataBlockLength + } + + // Enumerate data blocks for one index-3 block. + for { + var block int32 + if (i3Block & 0x8000) == 0 { + block = int32(index[i3Block+i3]) + } else { + // 18-bit indexes stored in groups of 9 entries per 8 indexes. + group := (i3Block & 0x7fff) + (i3 & ^7) + (i3 >> 3) + gi := i3 & 7 + block = (int32(index[group]) << (2 + (2 * gi))) & 0x30000 + group++ + block |= int32(index[group+gi]) + } + if block == prevBlock && (c-start) >= dataBlockLength { + // The block is the same as the previous one, and filled with value. + c += dataBlockLength + } else { + dataMask := dataBlockLength - 1 + prevBlock = block + if block == t.dataNullOffset { + // This is the data null block. + if haveValue { + if nullValue != value { + return c - 1, value + } + } else { + trieValue = t.nullValue + value = nullValue + haveValue = true + } + c = (c + dataBlockLength) & ^dataMask + } else { + di := block + (c & dataMask) + trieValue2 := t.getValue(di) + if haveValue { + if trieValue2 != trieValue { + if filter == nil || maybeFilterValue(trieValue2, t.nullValue, nullValue, filter) != value { + return c - 1, value + } + trieValue = trieValue2 // may or may not help + } + } else { + trieValue = trieValue2 + value = maybeFilterValue(trieValue2, t.nullValue, nullValue, filter) + haveValue = true + } + for { + c++ + if c&dataMask == 0 { + break + } + di++ + trieValue2 = t.getValue(di) + if trieValue2 != trieValue { + if filter == nil || maybeFilterValue(trieValue2, t.nullValue, nullValue, filter) != value { + return c - 1, value + } + trieValue = trieValue2 // may or may not help + } + } + } + } + i3++ + if i3 >= i3BlockLength { + break + } + } + if c >= t.highStart { + break + } + } + + di := t.dataLength - highValueNegDataOffset + highValue := t.getValue(di) + if maybeFilterValue(highValue, t.nullValue, nullValue, filter) != value { + return c - 1, value + } + return maxUnicode, value +} + +func maybeFilterValue(value uint32, trieNullValue uint32, nullValue uint32, filter UcpMapValueFilter) uint32 { + if value == trieNullValue { + value = nullValue + } else if filter != nil { + value = filter(value) + } + return value +} diff --git a/go/mysql/icuregex/internal/utrie/utrie2.go b/go/mysql/icuregex/internal/utrie/utrie2.go new file mode 100644 index 00000000000..2a474356b97 --- /dev/null +++ b/go/mysql/icuregex/internal/utrie/utrie2.go @@ -0,0 +1,433 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utrie + +import ( + "errors" + "fmt" + + "vitess.io/vitess/go/mysql/icuregex/internal/udata" + "vitess.io/vitess/go/mysql/icuregex/internal/utf16" +) + +type UTrie2 struct { + index []uint16 + data16 []uint16 + data32 []uint32 + + indexLength, dataLength int + index2NullOffset uint16 + dataNullOffset uint16 + InitialValue uint32 + ErrorValue uint32 + + HighStart rune + HighValueIndex int +} + +func (t *UTrie2) SerializedLength() int32 { + return 16 + int32(t.indexLength+t.dataLength)*2 +} + +func (t *UTrie2) getIndex(asciiOffset int, c rune) uint16 { + return t.index[t.indexFromCp(asciiOffset, c)] +} + +func (t *UTrie2) Get16(c rune) uint16 { + return t.getIndex(t.indexLength, c) +} + +func (t *UTrie2) indexFromCp(asciiOffset int, c rune) int { + switch { + case c < 0xd800: + return indexRaw(0, t.index, c) + case c <= 0xffff: + var offset int32 + if c <= 0xdbff { + offset = lscpIndex2Offset - (0xd800 >> shift2) + } + return indexRaw(offset, t.index, c) + case c > 0x10ffff: + return asciiOffset + badUtf8DataOffset + case c >= t.HighStart: + return t.HighValueIndex + default: + return indexFromSupp(t.index, c) + } +} + +type EnumRange func(start, end rune, value uint32) bool +type EnumValue func(value uint32) uint32 + +func (t *UTrie2) Enum(enumValue EnumValue, enumRange EnumRange) { + t.enumEitherTrie(0, 0x110000, enumValue, enumRange) +} + +func enumSameValue(value uint32) uint32 { + return value +} + +func (t *UTrie2) enumEitherTrie(start, limit rune, enumValue EnumValue, enumRange EnumRange) { + if enumRange == nil { + return + } + if enumValue == nil { + enumValue = enumSameValue + } + + /* frozen trie */ + var ( + idx = t.index + data32 = t.data32 + index2NullOffset = int(t.index2NullOffset) + nullBlock = int(t.dataNullOffset) + + c rune + prev = start + highStart = t.HighStart + + /* get the enumeration value that corresponds to an initial-value trie data entry */ + initialValue = enumValue(t.InitialValue) + + /* set variables for previous range */ + i2Block int + block int + prevI2Block = -1 + prevBlock = -1 + prevValue = uint32(0) + ) + + /* enumerate index-2 blocks */ + for c = start; c < limit && c < highStart; { + /* Code point limit for iterating inside this i2Block. */ + tempLimit := c + cpPerIndex1Entry + if limit < tempLimit { + tempLimit = limit + } + if c <= 0xffff { + if !utf16.IsSurrogate(c) { + i2Block = int(c >> shift2) + } else if utf16.IsSurrogateLead(c) { + /* + * Enumerate values for lead surrogate code points, not code units: + * This special block has half the normal length. + */ + i2Block = lscpIndex2Offset + tempLimit = min(0xdc00, limit) + } else { + /* + * Switch back to the normal part of the index-2 table. + * Enumerate the second half of the surrogates block. + */ + i2Block = 0xd800 >> shift2 + tempLimit = min(0xe000, limit) + } + } else { + /* supplementary code points */ + i2Block = int(idx[(index1Offset-omittedBmpIndex1Length)+(c>>shift1)]) + if i2Block == prevI2Block && (c-prev) >= cpPerIndex1Entry { + /* + * The index-2 block is the same as the previous one, and filled with prevValue. + * Only possible for supplementary code points because the linear-BMP index-2 + * table creates unique i2Block values. + */ + c += cpPerIndex1Entry + continue + } + } + prevI2Block = i2Block + if i2Block == index2NullOffset { + /* this is the null index-2 block */ + if prevValue != initialValue { + if prev < c && !enumRange(prev, c-1, prevValue) { + return + } + prevBlock = nullBlock + prev = c + prevValue = initialValue + } + c += cpPerIndex1Entry + } else { + /* enumerate data blocks for one index-2 block */ + var i2Limit int + if (c >> shift1) == (tempLimit >> shift1) { + i2Limit = int(tempLimit>>shift2) & index2Mask + } else { + i2Limit = index2BlockLength + } + for i2 := int(c>>shift2) & index2Mask; i2 < i2Limit; i2++ { + block = int(idx[i2Block+i2] << indexShift) + if block == prevBlock && (c-prev) >= dataBlockLength { + /* the block is the same as the previous one, and filled with prevValue */ + c += dataBlockLength + continue + } + prevBlock = block + if block == nullBlock { + /* this is the null data block */ + if prevValue != initialValue { + if prev < c && !enumRange(prev, c-1, prevValue) { + return + } + prev = c + prevValue = initialValue + } + c += dataBlockLength + } else { + for j := 0; j < dataBlockLength; j++ { + var value uint32 + if data32 != nil { + value = data32[block+j] + } else { + value = uint32(idx[block+j]) + } + value = enumValue(value) + if value != prevValue { + if prev < c && !enumRange(prev, c-1, prevValue) { + return + } + prev = c + prevValue = value + } + c++ + } + } + } + } + } + + if c > limit { + c = limit /* could be higher if in the index2NullOffset */ + } else if c < limit { + /* c==highStart>shift1)]) + return (int(index[i1+int((c>>shift2)&index2Mask)]) << indexShift) + int(c&dataMask) +} + +func indexRaw(offset int32, index []uint16, c rune) int { + return int(index[offset+(c>>shift2)]<> shift1 + + /** Number of code points per index-1 table entry. 2048=0x800 */ + cpPerIndex1Entry = 1 << shift1 + + /** Number of entries in an index-2 block. 64=0x40 */ + index2BlockLength = 1 << shift1min2 + + /** Mask for getting the lower bits for the in-index-2-block offset. */ + index2Mask = index2BlockLength - 1 + + /** Number of entries in a data block. 32=0x20 */ + dataBlockLength = 1 << shift2 + + /** Mask for getting the lower bits for the in-data-block offset. */ + dataMask = dataBlockLength - 1 + + /** + * Shift size for shifting left the index array values. + * Increases possible data size with 16-bit index values at the cost + * of compactability. + * This requires data blocks to be aligned by UTRIE2_DATA_GRANULARITY. + */ + indexShift = 2 + + /** The alignment size of a data block. Also the granularity for compaction. */ + dataGranularity = 1 << indexShift + + /* Fixed layout of the first part of the index array. ------------------- */ + + /** + * The part of the index-2 table for U+D800..U+DBFF stores values for + * lead surrogate code _units_ not code _points_. + * Values for lead surrogate code _points_ are indexed with this portion of the table. + * Length=32=0x20=0x400>>UTRIE2_SHIFT_2. (There are 1024=0x400 lead surrogates.) + */ + lscpIndex2Offset = 0x10000 >> shift2 + lscpIndex2Length = 0x400 >> shift2 + + /** Count the lengths of both BMP pieces. 2080=0x820 */ + index2BmpLength = lscpIndex2Offset + lscpIndex2Length + + /** + * The 2-byte UTF-8 version of the index-2 table follows at offset 2080=0x820. + * Length 32=0x20 for lead bytes C0..DF, regardless of UTRIE2_SHIFT_2. + */ + utf82BIndex2Offset = index2BmpLength + utf82BIndex2Length = 0x800 >> 6 /* U+0800 is the first code point after 2-byte UTF-8 */ + + /** + * The index-1 table, only used for supplementary code points, at offset 2112=0x840. + * Variable length, for code points up to highStart, where the last single-value range starts. + * Maximum length 512=0x200=0x100000>>UTRIE2_SHIFT_1. + * (For 0x100000 supplementary code points U+10000..U+10ffff.) + * + * The part of the index-2 table for supplementary code points starts + * after this index-1 table. + * + * Both the index-1 table and the following part of the index-2 table + * are omitted completely if there is only BMP data. + */ + index1Offset = utf82BIndex2Offset + utf82BIndex2Length + maxIndex1Length = 0x100000 >> shift1 + + /* + * Fixed layout of the first part of the data array. ----------------------- + * Starts with 4 blocks (128=0x80 entries) for ASCII. + */ + + /** + * The illegal-UTF-8 data block follows the ASCII block, at offset 128=0x80. + * Used with linear access for single bytes 0..0xbf for simple error handling. + * Length 64=0x40, not UTRIE2_DATA_BLOCK_LENGTH. + */ + badUtf8DataOffset = 0x80 +) + +func UTrie2FromBytes(bytes *udata.Bytes) (*UTrie2, error) { + type utrie2Header struct { + /** "Tri2" in big-endian US-ASCII (0x54726932) */ + signature uint32 + + /** + * options bit field: + * 15.. 4 reserved (0) + * 3.. 0 UTrie2ValueBits valueBits + */ + options uint16 + + /** UTRIE2_INDEX_1_OFFSET..UTRIE2_MAX_INDEX_LENGTH */ + indexLength uint16 + + /** (UTRIE2_DATA_START_OFFSET..UTRIE2_MAX_DATA_LENGTH)>>UTRIE2_INDEX_SHIFT */ + shiftedDataLength uint16 + + /** Null index and data blocks, not shifted. */ + index2NullOffset, dataNullOffset uint16 + + /** + * First code point of the single-value range ending with U+10ffff, + * rounded up and then shifted right by UTRIE2_SHIFT_1. + */ + shiftedHighStart uint16 + } + + var header utrie2Header + header.signature = bytes.Uint32() + + switch header.signature { + case 0x54726932: + case 0x32697254: + return nil, errors.New("unsupported: BigEndian encoding") + default: + return nil, fmt.Errorf("invalid signature for Trie2: 0x%08x", header.signature) + } + + header.options = bytes.Uint16() + header.indexLength = bytes.Uint16() + header.shiftedDataLength = bytes.Uint16() + header.index2NullOffset = bytes.Uint16() + header.dataNullOffset = bytes.Uint16() + header.shiftedHighStart = bytes.Uint16() + + var width int + switch header.options & 0xf { + case 0: + width = 16 + case 1: + width = 32 + default: + return nil, errors.New("invalid width for serialized UTrie2") + } + + trie := &UTrie2{ + indexLength: int(header.indexLength), + dataLength: int(header.shiftedDataLength) << indexShift, + index2NullOffset: header.index2NullOffset, + dataNullOffset: header.dataNullOffset, + HighStart: rune(header.shiftedHighStart) << shift1, + } + + trie.HighValueIndex = trie.dataLength - dataGranularity + if width == 16 { + trie.HighValueIndex += trie.indexLength + } + + indexArraySize := trie.indexLength + if width == 16 { + indexArraySize += trie.dataLength + } + + trie.index = bytes.Uint16Slice(int32(indexArraySize)) + + if width == 16 { + trie.data16 = trie.index[trie.indexLength:] + trie.InitialValue = uint32(trie.index[trie.dataNullOffset]) + trie.ErrorValue = uint32(trie.index[trie.indexLength+badUtf8DataOffset]) + } else { + trie.data32 = bytes.Uint32Slice(int32(trie.dataLength)) + trie.InitialValue = trie.data32[trie.dataNullOffset] + trie.ErrorValue = trie.data32[badUtf8DataOffset] + } + + return trie, nil +} diff --git a/go/mysql/icuregex/matcher.go b/go/mysql/icuregex/matcher.go new file mode 100644 index 00000000000..1b5495f495f --- /dev/null +++ b/go/mysql/icuregex/matcher.go @@ -0,0 +1,1671 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package icuregex + +import ( + "fmt" + "io" + + "vitess.io/vitess/go/mysql/icuregex/internal/ucase" + "vitess.io/vitess/go/mysql/icuregex/internal/uchar" + "vitess.io/vitess/go/mysql/icuregex/internal/uprops" +) + +const timerInitialValue = 10000 +const defaultTimeout = 3 +const defaultStackLimit = 0 + +type Matcher struct { + pattern *Pattern + + input []rune + + regionStart int // Start of the input region, default = 0. + regionLimit int // End of input region, default to input.length. + + anchorStart int // Region bounds for anchoring operations (^ or $). + anchorLimit int // See useAnchoringBounds + + lookStart int // Region bounds for look-ahead/behind and + lookLimit int // and other boundary tests. See + // useTransparentBounds + + activeStart int // Currently active bounds for matching. + activeLimit int // Usually is the same as region, but + // is changed to fLookStart/Limit when + // entering look around regions. + + match bool // True if the last attempted match was successful. + matchStart int // Position of the start of the most recent match + matchEnd int // First position after the end of the most recent match + // Zero if no previous match, even when a region + // is active. + lastMatchEnd int // First position after the end of the previous match, + // or -1 if there was no previous match. + appendPosition int // First position after the end of the previous + // appendReplacement(). As described by the + // JavaDoc for Java Matcher, where it is called + // "append position" + hitEnd bool // True if the last match touched the end of input. + requireEnd bool // True if the last match required end-of-input + // (matched $ or Z) + + stack stack + frame stackFrame // After finding a match, the last active stack frame, + // which will contain the capture group results. + // NOT valid while match engine is running. + + data []int // Data area for use by the compiled pattern. + + timeLimit int32 // Max time (in arbitrary steps) to let the + // match engine run. Zero for unlimited. + + time int32 // Match time, accumulates while matching. + tickCounter int32 // Low bits counter for time. Counts down StateSaves. + // Kept separately from fTime to keep as much + // code as possible out of the inline + // StateSave function. + + dumper io.Writer +} + +func NewMatcher(pat *Pattern) *Matcher { + m := &Matcher{ + pattern: pat, + data: make([]int, pat.dataSize), + stack: stack{ + frameSize: pat.frameSize, + stackLimit: defaultStackLimit, + }, + timeLimit: defaultTimeout, + } + m.reset() + return m +} + +func (m *Matcher) MatchAt(startIdx int, toEnd bool) error { + //-------------------------------------------------------------------------------- + // + // MatchAt This is the actual matching engine. + // + // startIdx: begin matching a this index. + // toEnd: if true, match must extend to end of the input region + // + //-------------------------------------------------------------------------------- + var err error + var isMatch bool // True if the we have a match. + + if m.dumper != nil { + fmt.Fprintf(m.dumper, "MatchAt(startIdx=%d)\n", startIdx) + fmt.Fprintf(m.dumper, "Original Pattern: \"%s\"\n", m.pattern.pattern) + fmt.Fprintf(m.dumper, "Input String: \"%s\"\n\n", string(m.input)) + } + + pat := m.pattern.compiledPat + inputText := m.input + litText := m.pattern.literalText + sets := m.pattern.sets + + fp := m.resetStack() + *fp.inputIdx() = startIdx + *fp.patIdx() = 0 + for i := 0; i < len(m.data); i++ { + m.data[i] = 0 + } + + for { + op := pat[*fp.patIdx()] + + if m.dumper != nil { + fmt.Fprintf(m.dumper, "inputIdx=%d inputChar=%x sp=%3d activeLimit=%d ", *fp.inputIdx(), + charAt(inputText, *fp.inputIdx()), m.stack.sp(), m.activeLimit) + m.pattern.dumpOp(m.dumper, *fp.patIdx()) + } + + *fp.patIdx()++ + + switch op.typ() { + case urxNop: + // Nothing to do. + case urxBacktrack: + // Force a backtrack. In some circumstances, the pattern compiler + // will notice that the pattern can't possibly match anything, and will + // emit one of these at that point. + fp = m.stack.popFrame() + case urxOnechar: + if *fp.inputIdx() < m.activeLimit { + c := charAt(inputText, *fp.inputIdx()) + *fp.inputIdx()++ + if c == rune(op.value()) { + break + } + } else { + m.hitEnd = true + } + fp = m.stack.popFrame() + case urxString: + // Test input against a literal string. + // Strings require two slots in the compiled pattern, one for the + // offset to the string text, and one for the length. + stringStartIdx := op.value() + nextOp := pat[*fp.patIdx()] // Fetch the second operand + *fp.patIdx()++ + stringLen := nextOp.value() + + patternString := litText[stringStartIdx:] + var patternStringIndex int + success := true + for patternStringIndex < stringLen { + if *fp.inputIdx() >= m.activeLimit { + m.hitEnd = true + success = false + break + } + if charAt(patternString, patternStringIndex) != charAt(inputText, *fp.inputIdx()) { + success = false + break + } + patternStringIndex++ + *fp.inputIdx()++ + } + + if !success { + fp = m.stack.popFrame() + } + case urxStateSave: + fp, err = m.stateSave(*fp.inputIdx(), op.value()) + if err != nil { + return err + } + case urxEnd: + // The match loop will exit via this path on a successful match, + // when we reach the end of the pattern. + if toEnd && *fp.inputIdx() != m.activeLimit { + // The pattern matched, but not to the end of input. Try some more. + fp = m.stack.popFrame() + break + } + isMatch = true + goto breakFromLoop + + // Start and End Capture stack frame variables are laid out out like this: + // fp->fExtra[opValue] - The start of a completed capture group + // opValue+1 - The end of a completed capture group + // opValue+2 - the start of a capture group whose end + // has not yet been reached (and might not ever be). + case urxStartCapture: + *fp.extra(op.value() + 2) = *fp.inputIdx() + case urxEndCapture: + *fp.extra(op.value()) = *fp.extra(op.value() + 2) // Tentative start becomes real. + *fp.extra(op.value() + 1) = *fp.inputIdx() // End position + + case urxDollar: // $, test for End of line + if *fp.inputIdx() < m.anchorLimit-2 { + fp = m.stack.popFrame() + break + } + // or for position before new line at end of input + if *fp.inputIdx() >= m.anchorLimit { + // We really are at the end of input. Success. + m.hitEnd = true + m.requireEnd = true + break + } + + if *fp.inputIdx() == m.anchorLimit-1 { + c := m.input[*fp.inputIdx()] + if isLineTerminator(c) { + if !(c == 0x0a && *fp.inputIdx() > m.anchorStart && m.input[*fp.inputIdx()-1] == 0x0d) { + // At new-line at end of input. Success + m.hitEnd = true + m.requireEnd = true + break + } + } + } else if *fp.inputIdx() == m.anchorLimit-2 && m.input[*fp.inputIdx()] == 0x0d && m.input[*fp.inputIdx()+1] == 0x0a { + m.hitEnd = true + m.requireEnd = true + break // At CR/LF at end of input. Success + } + fp = m.stack.popFrame() + + case urxDollarD: // $, test for End of Line, in UNIX_LINES mode. + if *fp.inputIdx() >= m.anchorLimit { + // Off the end of input. Success. + m.hitEnd = true + m.requireEnd = true + break + } + c := charAt(inputText, *fp.inputIdx()) + *fp.inputIdx()++ + // Either at the last character of input, or off the end. + if c == 0x0a && *fp.inputIdx() == m.anchorLimit { + m.hitEnd = true + m.requireEnd = true + break + } + + // Not at end of input. Back-track out. + fp = m.stack.popFrame() + case urxDollarM: // $, test for End of line in multi-line mode + if *fp.inputIdx() >= m.anchorLimit { + // We really are at the end of input. Success. + m.hitEnd = true + m.requireEnd = true + break + } + // If we are positioned just before a new-line, succeed. + // It makes no difference where the new-line is within the input. + c := charAt(inputText, *fp.inputIdx()) + if isLineTerminator(c) { + // At a line end, except for the odd chance of being in the middle of a CR/LF sequence + // In multi-line mode, hitting a new-line just before the end of input does not + // set the hitEnd or requireEnd flags + if !(c == 0x0a && *fp.inputIdx() > m.anchorStart && charAt(inputText, *fp.inputIdx()-1) == 0x0d) { + break + } + } + // not at a new line. Fail. + fp = m.stack.popFrame() + case urxDollarMd: // $, test for End of line in multi-line and UNIX_LINES mode + if *fp.inputIdx() >= m.anchorLimit { + // We really are at the end of input. Success. + m.hitEnd = true + m.requireEnd = true // Java set requireEnd in this case, even though + break // adding a new-line would not lose the match. + } + // If we are not positioned just before a new-line, the test fails; backtrack out. + // It makes no difference where the new-line is within the input. + if charAt(inputText, *fp.inputIdx()) != 0x0a { + fp = m.stack.popFrame() + } + case urxCaret: // ^, test for start of line + if *fp.inputIdx() != m.anchorStart { + fp = m.stack.popFrame() + } + case urxCaretM: // ^, test for start of line in mulit-line mode + if *fp.inputIdx() == m.anchorStart { + // We are at the start input. Success. + break + } + // Check whether character just before the current pos is a new-line + // unless we are at the end of input + c := charAt(inputText, *fp.inputIdx()-1) + if (*fp.inputIdx() < m.anchorLimit) && isLineTerminator(c) { + // It's a new-line. ^ is true. Success. + // TODO: what should be done with positions between a CR and LF? + break + } + // Not at the start of a line. Fail. + fp = m.stack.popFrame() + case urxCaretMUnix: // ^, test for start of line in mulit-line + Unix-line mode + if *fp.inputIdx() <= m.anchorStart { + // We are at the start input. Success. + break + } + + c := charAt(inputText, *fp.inputIdx()-1) + if c != 0x0a { + // Not at the start of a line. Back-track out. + fp = m.stack.popFrame() + } + case urxBackslashB: // Test for word boundaries + success := m.isWordBoundary(*fp.inputIdx()) + success = success != (op.value() != 0) // flip sense for \B + if !success { + fp = m.stack.popFrame() + } + case urxBackslashBu: // Test for word boundaries, Unicode-style + success := m.isUWordBoundary(*fp.inputIdx()) + success = success != (op.value() != 0) // flip sense for \B + if !success { + fp = m.stack.popFrame() + } + case urxBackslashD: // Test for decimal digit + if *fp.inputIdx() >= m.activeLimit { + m.hitEnd = true + fp = m.stack.popFrame() + break + } + + c := charAt(inputText, *fp.inputIdx()) + + success := m.isDecimalDigit(c) + success = success != (op.value() != 0) // flip sense for \D + if success { + *fp.inputIdx()++ + } else { + fp = m.stack.popFrame() + } + + case urxBackslashG: // Test for position at end of previous match + if !((m.match && *fp.inputIdx() == m.matchEnd) || (!m.match && *fp.inputIdx() == m.activeStart)) { + fp = m.stack.popFrame() + } + + case urxBackslashH: // Test for \h, horizontal white space. + if *fp.inputIdx() >= m.activeLimit { + m.hitEnd = true + fp = m.stack.popFrame() + break + } + + c := charAt(inputText, *fp.inputIdx()) + success := m.isHorizWS(c) || c == 9 + success = success != (op.value() != 0) // flip sense for \H + if success { + *fp.inputIdx()++ + } else { + fp = m.stack.popFrame() + } + + case urxBackslashR: // Test for \R, any line break sequence. + if *fp.inputIdx() >= m.activeLimit { + m.hitEnd = true + fp = m.stack.popFrame() + break + } + c := charAt(inputText, *fp.inputIdx()) + if isLineTerminator(c) { + if c == 0x0d && charAt(inputText, *fp.inputIdx()+1) == 0x0a { + *fp.inputIdx()++ + } + *fp.inputIdx()++ + } else { + fp = m.stack.popFrame() + } + + case urxBackslashV: // \v, any single line ending character. + if *fp.inputIdx() >= m.activeLimit { + m.hitEnd = true + fp = m.stack.popFrame() + break + } + c := charAt(inputText, *fp.inputIdx()) + success := isLineTerminator(c) + success = success != (op.value() != 0) // flip sense for \V + if success { + *fp.inputIdx()++ + } else { + fp = m.stack.popFrame() + } + + case urxBackslashX: + // Match a Grapheme, as defined by Unicode UAX 29. + + // Fail if at end of input + if *fp.inputIdx() >= m.activeLimit { + m.hitEnd = true + fp = m.stack.popFrame() + break + } + + *fp.inputIdx() = m.followingGCBoundary(*fp.inputIdx()) + if *fp.inputIdx() >= m.activeLimit { + m.hitEnd = true + *fp.inputIdx() = m.activeLimit + } + + case urxBackslashZ: // Test for end of Input + if *fp.inputIdx() < m.anchorLimit { + fp = m.stack.popFrame() + } else { + m.hitEnd = true + m.requireEnd = true + } + case urxStaticSetref: + // Test input character against one of the predefined sets + // (Word Characters, for example) + // The high bit of the op value is a flag for the match polarity. + // 0: success if input char is in set. + // 1: success if input char is not in set. + if *fp.inputIdx() >= m.activeLimit { + m.hitEnd = true + fp = m.stack.popFrame() + break + } + + success := (op.value() & urxNegSet) == urxNegSet + negOp := op.value() & ^urxNegSet + + c := charAt(inputText, *fp.inputIdx()) + s := staticPropertySets[negOp] + if s.ContainsRune(c) { + success = !success + } + + if success { + *fp.inputIdx()++ + } else { + // the character wasn't in the set. + fp = m.stack.popFrame() + } + case urxStatSetrefN: + // Test input character for NOT being a member of one of + // the predefined sets (Word Characters, for example) + if *fp.inputIdx() >= m.activeLimit { + m.hitEnd = true + fp = m.stack.popFrame() + break + } + + c := charAt(inputText, *fp.inputIdx()) + s := staticPropertySets[op.value()] + if !s.ContainsRune(c) { + *fp.inputIdx()++ + break + } + // the character wasn't in the set. + fp = m.stack.popFrame() + + case urxSetref: + if *fp.inputIdx() >= m.activeLimit { + m.hitEnd = true + fp = m.stack.popFrame() + break + } + + // There is input left. Pick up one char and test it for set membership. + c := charAt(inputText, *fp.inputIdx()) + + s := sets[op.value()] + if s.ContainsRune(c) { + *fp.inputIdx()++ + break + } + + // the character wasn't in the set. + fp = m.stack.popFrame() + + case urxDotany: + // . matches anything, but stops at end-of-line. + if *fp.inputIdx() >= m.activeLimit { + m.hitEnd = true + fp = m.stack.popFrame() + break + } + + c := charAt(inputText, *fp.inputIdx()) + if isLineTerminator(c) { + // End of line in normal mode. . does not match. + fp = m.stack.popFrame() + break + } + *fp.inputIdx()++ + + case urxDotanyAll: + // ., in dot-matches-all (including new lines) mode + if *fp.inputIdx() >= m.activeLimit { + // At end of input. Match failed. Backtrack out. + m.hitEnd = true + fp = m.stack.popFrame() + break + } + + c := charAt(inputText, *fp.inputIdx()) + *fp.inputIdx()++ + if c == 0x0d && *fp.inputIdx() < m.activeLimit { + // In the case of a CR/LF, we need to advance over both. + nextc := charAt(inputText, *fp.inputIdx()) + if nextc == 0x0a { + *fp.inputIdx()++ + } + } + + case urxDotanyUnix: + // '.' operator, matches all, but stops at end-of-line. + // UNIX_LINES mode, so 0x0a is the only recognized line ending. + if *fp.inputIdx() >= m.activeLimit { + // At end of input. Match failed. Backtrack out. + m.hitEnd = true + fp = m.stack.popFrame() + break + } + + // There is input left. Advance over one char, unless we've hit end-of-line + c := charAt(inputText, *fp.inputIdx()) + if c == 0x0a { + // End of line in normal mode. '.' does not match the \n + fp = m.stack.popFrame() + } else { + *fp.inputIdx()++ + } + case urxJmp: + *fp.patIdx() = op.value() + + case urxFail: + isMatch = false + goto breakFromLoop + + case urxJmpSav: + fp, err = m.stateSave(*fp.inputIdx(), *fp.patIdx()) // State save to loc following current + if err != nil { + return err + } + *fp.patIdx() = op.value() // Then JMP. + + case urxJmpSavX: + // This opcode is used with (x)+, when x can match a zero length string. + // Same as JMP_SAV, except conditional on the match having made forward progress. + // Destination of the JMP must be a URX_STO_INP_LOC, from which we get the + // data address of the input position at the start of the loop. + stoOp := pat[op.value()-1] + frameLoc := stoOp.value() + + prevInputIdx := *fp.extra(frameLoc) + if prevInputIdx < *fp.inputIdx() { + // The match did make progress. Repeat the loop. + fp, err = m.stateSave(*fp.inputIdx(), *fp.patIdx()) // State save to loc following current + if err != nil { + return err + } + *fp.patIdx() = op.value() // Then JMP. + *fp.extra(frameLoc) = *fp.inputIdx() + } + // If the input position did not advance, we do nothing here, + // execution will fall out of the loop. + + case urxCtrInit: + *fp.extra(op.value()) = 0 // Set the loop counter variable to zero + + // Pick up the three extra operands that CTR_INIT has, and + // skip the pattern location counter past + instOperandLoc := *fp.patIdx() + *fp.patIdx() += 3 // Skip over the three operands that CTR_INIT has. + + loopLoc := pat[instOperandLoc].value() + minCount := int(pat[instOperandLoc+1]) + maxCount := int(pat[instOperandLoc+2]) + + if minCount == 0 { + fp, err = m.stateSave(*fp.inputIdx(), loopLoc+1) + if err != nil { + return err + } + } + if maxCount == -1 { + *fp.extra(op.value() + 1) = *fp.inputIdx() // For loop breaking. + } else if maxCount == 0 { + fp = m.stack.popFrame() + } + + case utxCtrLoop: + initOp := pat[op.value()] + opValue := initOp.value() + pCounter := fp.extra(opValue) + minCount := int(pat[op.value()+2]) + maxCount := int(pat[op.value()+3]) + *pCounter++ + if *pCounter >= maxCount && maxCount != -1 { + break + } + + if *pCounter >= minCount { + if maxCount == -1 { + // Loop has no hard upper bound. + // Check that it is progressing through the input, break if it is not. + pLastIntputIdx := fp.extra(opValue + 1) + if *pLastIntputIdx == *fp.inputIdx() { + break + } + *pLastIntputIdx = *fp.inputIdx() + } + fp, err = m.stateSave(*fp.inputIdx(), *fp.patIdx()) + if err != nil { + return err + } + } else { + // Increment time-out counter. (StateSave() does it if count >= minCount) + m.tickCounter-- + if m.tickCounter <= 0 { + if err = m.incrementTime(*fp.inputIdx()); err != nil { + return err + } // Re-initializes fTickCounter + } + } + + *fp.patIdx() = op.value() + 4 // Loop back. + + case urxCtrInitNg: + *fp.extra(op.value()) = 0 // Set the loop counter variable to zero + + // Pick up the three extra operands that CTR_INIT_NG has, and + // skip the pattern location counter past + instrOperandLoc := *fp.patIdx() + *fp.patIdx() += 3 + loopLoc := pat[instrOperandLoc].value() + minCount := pat[instrOperandLoc+1].value() + maxCount := pat[instrOperandLoc+2].value() + + if maxCount == -1 { + *fp.extra(op.value() + 1) = *fp.inputIdx() // Save initial input index for loop breaking. + } + + if minCount == 0 { + if maxCount != 0 { + fp, err = m.stateSave(*fp.inputIdx(), *fp.patIdx()) + if err != nil { + return err + } + } + *fp.patIdx() = loopLoc + 1 + } + + case urxCtrLoopNg: + initOp := pat[op.value()] + pCounter := fp.extra(initOp.value()) + minCount := int(pat[op.value()+2]) + maxCount := int(pat[op.value()+3]) + *pCounter++ + if *pCounter >= maxCount && maxCount != -1 { + // The loop has matched the maximum permitted number of times. + // Break out of here with no action. Matching will + // continue with the following pattern. + break + } + + if *pCounter < minCount { + // We haven't met the minimum number of matches yet. + // Loop back for another one. + *fp.patIdx() = op.value() + 4 // Loop back. + // Increment time-out counter. (StateSave() does it if count >= minCount) + m.tickCounter-- + if m.tickCounter <= 0 { + if err = m.incrementTime(*fp.inputIdx()); err != nil { + return err + } // Re-initializes fTickCounter + } + } else { + // We do have the minimum number of matches. + + // If there is no upper bound on the loop iterations, check that the input index + // is progressing, and stop the loop if it is not. + if maxCount == -1 { + lastInputIdx := fp.extra(initOp.value() + 1) + if *fp.inputIdx() == *lastInputIdx { + break + } + *lastInputIdx = *fp.inputIdx() + } + } + + // Loop Continuation: we will fall into the pattern following the loop + // (non-greedy, don't execute loop body first), but first do + // a state save to the top of the loop, so that a match failure + // in the following pattern will try another iteration of the loop. + fp, err = m.stateSave(*fp.inputIdx(), op.value()+4) + if err != nil { + return err + } + + case urxStoSp: + m.data[op.value()] = m.stack.len() + + case urxLdSp: + newStackSize := m.data[op.value()] + newFp := m.stack.offset(newStackSize) + if newFp.equals(fp) { + break + } + copy(newFp, fp) + fp = newFp + + m.stack.setSize(newStackSize) + case urxBackref: + groupStartIdx := *fp.extra(op.value()) + groupEndIdx := *fp.extra(op.value() + 1) + + if groupStartIdx < 0 { + // This capture group has not participated in the match thus far, + fp = m.stack.popFrame() // FAIL, no match. + break + } + + success := true + for { + if groupStartIdx >= groupEndIdx { + success = true + break + } + + if *fp.inputIdx() >= m.activeLimit { + success = false + m.hitEnd = true + break + } + + captureGroupChar := charAt(inputText, groupStartIdx) + inputChar := charAt(inputText, *fp.inputIdx()) + groupStartIdx++ + *fp.inputIdx()++ + if inputChar != captureGroupChar { + success = false + break + } + } + + if !success { + fp = m.stack.popFrame() + } + case urxBackrefI: + groupStartIdx := *fp.extra(op.value()) + groupEndIdx := *fp.extra(op.value() + 1) + + if groupStartIdx < 0 { + // This capture group has not participated in the match thus far, + fp = m.stack.popFrame() // FAIL, no match. + break + } + + captureGroupItr := newCaseFoldIterator(m.input, groupStartIdx, groupEndIdx) + inputItr := newCaseFoldIterator(m.input, *fp.inputIdx(), m.activeLimit) + success := true + + for { + captureGroupChar := captureGroupItr.next() + if captureGroupChar == -1 { + success = true + break + } + inputChar := inputItr.next() + if inputChar == -1 { + success = false + m.hitEnd = true + break + } + if inputChar != captureGroupChar { + success = false + break + } + } + + if success && inputItr.inExpansion() { + // We otained a match by consuming part of a string obtained from + // case-folding a single code point of the input text. + // This does not count as an overall match. + success = false + } + + if success { + *fp.inputIdx() = inputItr.index + } else { + fp = m.stack.popFrame() + } + + case urxStoInpLoc: + *fp.extra(op.value()) = *fp.inputIdx() + + case urxJmpx: + instrOperandLoc := *fp.patIdx() + *fp.patIdx()++ + dataLoc := pat[instrOperandLoc].value() + + saveInputIdx := *fp.extra(dataLoc) + + if saveInputIdx < *fp.inputIdx() { + *fp.patIdx() = op.value() // JMP + } else { + fp = m.stack.popFrame() // FAIL, no progress in loop. + } + + case urxLaStart: + m.data[op.value()] = m.stack.len() + m.data[op.value()+1] = *fp.inputIdx() + m.data[op.value()+2] = m.activeStart + m.data[op.value()+3] = m.activeLimit + m.activeStart = m.lookStart // Set the match region change for + m.activeLimit = m.lookLimit // transparent bounds. + + case urxLaEnd: + stackSize := m.stack.len() + newStackSize := m.data[op.value()] + if stackSize > newStackSize { + // Copy the current top frame back to the new (cut back) top frame. + // This makes the capture groups from within the look-ahead + // expression available. + newFp := m.stack.offset(newStackSize) + copy(newFp, fp) + fp = newFp + m.stack.setSize(newStackSize) + } + + *fp.inputIdx() = m.data[op.value()+1] + + m.activeStart = m.data[op.value()+2] + m.activeLimit = m.data[op.value()+3] + + case urcOnecharI: + // Case insensitive one char. The char from the pattern is already case folded. + // Input text is not, but case folding the input can not reduce two or more code + // points to one. + if *fp.inputIdx() < m.activeLimit { + c := charAt(inputText, *fp.inputIdx()) + if ucase.Fold(c) == op.value32() { + *fp.inputIdx()++ + break + } + } else { + m.hitEnd = true + } + + fp = m.stack.popFrame() + + case urxStringI: + // Case-insensitive test input against a literal string. + // Strings require two slots in the compiled pattern, one for the + // offset to the string text, and one for the length. + // The compiled string has already been case folded. + patternString := litText[op.value():] + var patternStringIdx int + nextOp := pat[*fp.patIdx()] + *fp.patIdx()++ + patternStringLen := nextOp.value() + + success := true + + it := newCaseFoldIterator(inputText, *fp.inputIdx(), m.activeLimit) + for patternStringIdx < patternStringLen { + cText := it.next() + cPattern := patternString[patternStringIdx] + patternStringIdx++ + + if cText != cPattern { + success = false + if cText == -1 { + m.hitEnd = true + } + break + } + } + if it.inExpansion() { + success = false + } + + if success { + *fp.inputIdx() = it.index + } else { + fp = m.stack.popFrame() + } + + case urxLbStart: + // Entering a look-behind block. + // Save Stack Ptr, Input Pos and active input region. + // TODO: implement transparent bounds. Ticket #6067 + m.data[op.value()] = m.stack.len() + m.data[op.value()+1] = *fp.inputIdx() + // Save input string length, then reset to pin any matches to end at + // the current position. + m.data[op.value()+2] = m.activeStart + m.data[op.value()+3] = m.activeLimit + m.activeStart = m.regionStart + m.activeLimit = *fp.inputIdx() + // Init the variable containing the start index for attempted matches. + m.data[op.value()+4] = -1 + case urxLbCont: + // Positive Look-Behind, at top of loop checking for matches of LB expression + // at all possible input starting positions. + + // Fetch the min and max possible match lengths. They are the operands + // of this op in the pattern. + minML := pat[*fp.patIdx()] + *fp.patIdx()++ + maxML := pat[*fp.patIdx()] + *fp.patIdx()++ + + lbStartIdx := &m.data[op.value()+4] + if *lbStartIdx < 0 { + // First time through loop. + *lbStartIdx = *fp.inputIdx() - int(minML) + if *lbStartIdx > 0 { + *lbStartIdx = *fp.inputIdx() + } + } else { + // 2nd through nth time through the loop. + // Back up start position for match by one. + *lbStartIdx-- + } + + if *lbStartIdx < 0 || *lbStartIdx < *fp.inputIdx()-int(maxML) { + // We have tried all potential match starting points without + // getting a match. Backtrack out, and out of the + // Look Behind altogether. + fp = m.stack.popFrame() + m.activeStart = m.data[op.value()+2] + m.activeLimit = m.data[op.value()+3] + break + } + + // Save state to this URX_LB_CONT op, so failure to match will repeat the loop. + // (successful match will fall off the end of the loop.) + fp, err = m.stateSave(*fp.inputIdx(), *fp.patIdx()-3) + if err != nil { + return err + } + *fp.inputIdx() = *lbStartIdx + + case urxLbEnd: + // End of a look-behind block, after a successful match. + if *fp.inputIdx() != m.activeLimit { + // The look-behind expression matched, but the match did not + // extend all the way to the point that we are looking behind from. + // FAIL out of here, which will take us back to the LB_CONT, which + // will retry the match starting at another position or fail + // the look-behind altogether, whichever is appropriate. + fp = m.stack.popFrame() + break + } + + // Look-behind match is good. Restore the orignal input string region, + // which had been truncated to pin the end of the lookbehind match to the + // position being looked-behind. + m.activeStart = m.data[op.value()+2] + m.activeLimit = m.data[op.value()+3] + case urxLbnCount: + // Negative Look-Behind, at top of loop checking for matches of LB expression + // at all possible input starting positions. + + // Fetch the extra parameters of this op. + minML := pat[*fp.patIdx()] + *fp.patIdx()++ + maxML := pat[*fp.patIdx()] + *fp.patIdx()++ + + continueLoc := pat[*fp.patIdx()].value() + *fp.patIdx()++ + + lbStartIdx := &m.data[op.value()+4] + + if *lbStartIdx < 0 { + // First time through loop. + *lbStartIdx = *fp.inputIdx() - int(minML) + if *lbStartIdx > 0 { + // move index to a code point boundary, if it's not on one already. + *lbStartIdx = *fp.inputIdx() + } + } else { + // 2nd through nth time through the loop. + // Back up start position for match by one. + *lbStartIdx-- + } + + if *lbStartIdx < 0 || *lbStartIdx < *fp.inputIdx()-int(maxML) { + // We have tried all potential match starting points without + // getting a match, which means that the negative lookbehind as + // a whole has succeeded. Jump forward to the continue location + m.activeStart = m.data[op.value()+2] + m.activeLimit = m.data[op.value()+3] + *fp.patIdx() = continueLoc + break + } + + // Save state to this URX_LB_CONT op, so failure to match will repeat the loop. + // (successful match will cause a FAIL out of the loop altogether.) + fp, err = m.stateSave(*fp.inputIdx(), *fp.patIdx()-4) + if err != nil { + return err + } + *fp.inputIdx() = *lbStartIdx + case urxLbnEnd: + // End of a negative look-behind block, after a successful match. + + if *fp.inputIdx() != m.activeLimit { + // The look-behind expression matched, but the match did not + // extend all the way to the point that we are looking behind from. + // FAIL out of here, which will take us back to the LB_CONT, which + // will retry the match starting at another position or succeed + // the look-behind altogether, whichever is appropriate. + fp = m.stack.popFrame() + break + } + + // Look-behind expression matched, which means look-behind test as + // a whole Fails + + // Restore the orignal input string length, which had been truncated + // inorder to pin the end of the lookbehind match + // to the position being looked-behind. + m.activeStart = m.data[op.value()+2] + m.activeLimit = m.data[op.value()+3] + + // Restore original stack position, discarding any state saved + // by the successful pattern match. + newStackSize := m.data[op.value()] + m.stack.setSize(newStackSize) + + // FAIL, which will take control back to someplace + // prior to entering the look-behind test. + fp = m.stack.popFrame() + case urxLoopSrI: + // Loop Initialization for the optimized implementation of + // [some character set]* + // This op scans through all matching input. + // The following LOOP_C op emulates stack unwinding if the following pattern fails. + s := sets[op.value()] + + // Loop through input, until either the input is exhausted or + // we reach a character that is not a member of the set. + ix := *fp.inputIdx() + + for { + if ix >= m.activeLimit { + m.hitEnd = true + break + } + c := charAt(inputText, ix) + if !s.ContainsRune(c) { + break + } + ix++ + } + + // If there were no matching characters, skip over the loop altogether. + // The loop doesn't run at all, a * op always succeeds. + if ix == *fp.inputIdx() { + *fp.patIdx()++ // skip the URX_LOOP_C op. + break + } + + // Peek ahead in the compiled pattern, to the URX_LOOP_C that + // must follow. It's operand is the stack location + // that holds the starting input index for the match of this [set]* + loopcOp := pat[*fp.patIdx()] + stackLoc := loopcOp.value() + *fp.extra(stackLoc) = *fp.inputIdx() + *fp.inputIdx() = ix + + // Save State to the URX_LOOP_C op that follows this one, + // so that match failures in the following code will return to there. + // Then bump the pattern idx so the LOOP_C is skipped on the way out of here. + fp, err = m.stateSave(*fp.inputIdx(), *fp.patIdx()) + if err != nil { + return err + } + *fp.patIdx()++ + case urxLoopDotI: + // Loop Initialization for the optimized implementation of .* + // This op scans through all remaining input. + // The following LOOP_C op emulates stack unwinding if the following pattern fails. + + // Loop through input until the input is exhausted (we reach an end-of-line) + // In DOTALL mode, we can just go straight to the end of the input. + var ix int + if (op.value() & 1) == 1 { + // Dot-matches-All mode. Jump straight to the end of the string. + ix = m.activeLimit + m.hitEnd = true + } else { + // NOT DOT ALL mode. Line endings do not match '.' + // Scan forward until a line ending or end of input. + ix = *fp.inputIdx() + for { + if ix >= m.activeLimit { + m.hitEnd = true + break + } + c := charAt(inputText, ix) + if (c & 0x7f) <= 0x29 { // Fast filter of non-new-line-s + if (c == 0x0a) || // 0x0a is newline in both modes. + (((op.value() & 2) == 0) && // IF not UNIX_LINES mode + isLineTerminator(c)) { + // char is a line ending. Exit the scanning loop. + break + } + } + ix++ + } + } + + // If there were no matching characters, skip over the loop altogether. + // The loop doesn't run at all, a * op always succeeds. + if ix == *fp.inputIdx() { + *fp.patIdx()++ // skip the URX_LOOP_C op. + break + } + + // Peek ahead in the compiled pattern, to the URX_LOOP_C that + // must follow. It's operand is the stack location + // that holds the starting input index for the match of this .* + loopcOp := pat[*fp.patIdx()] + stackLoc := loopcOp.value() + *fp.extra(stackLoc) = *fp.inputIdx() + *fp.inputIdx() = ix + + // Save State to the URX_LOOP_C op that follows this one, + // so that match failures in the following code will return to there. + // Then bump the pattern idx so the LOOP_C is skipped on the way out of here. + fp, err = m.stateSave(*fp.inputIdx(), *fp.patIdx()) + if err != nil { + return err + } + *fp.patIdx()++ + + case urxLoopC: + backSearchIndex := *fp.extra(op.value()) + + if backSearchIndex == *fp.inputIdx() { + // We've backed up the input idx to the point that the loop started. + // The loop is done. Leave here without saving state. + // Subsequent failures won't come back here. + break + } + // Set up for the next iteration of the loop, with input index + // backed up by one from the last time through, + // and a state save to this instruction in case the following code fails again. + // (We're going backwards because this loop emulates stack unwinding, not + // the initial scan forward.) + + prevC := charAt(inputText, *fp.inputIdx()-1) + *fp.inputIdx()-- + twoPrevC := charAt(inputText, *fp.inputIdx()-1) + + if prevC == 0x0a && + *fp.inputIdx() > backSearchIndex && + twoPrevC == 0x0d { + prevOp := pat[*fp.patIdx()-2] + if prevOp.typ() == urxLoopDotI { + // .*, stepping back over CRLF pair. + *fp.inputIdx()-- + } + } + + fp, err = m.stateSave(*fp.inputIdx(), *fp.patIdx()-1) + if err != nil { + return err + } + default: + // Trouble. The compiled pattern contains an entry with an + // unrecognized type tag. + // Unknown opcode type in opType = URX_TYPE(pat[fp->fPatIdx]). But we have + // reports of this in production code, don't use UPRV_UNREACHABLE_EXIT. + // See ICU-21669. + return &MatchError{ + Code: InternalMatchError, + Pattern: m.pattern.pattern, + Position: *fp.inputIdx(), + Input: m.input, + } + } + } + +breakFromLoop: + m.match = isMatch + if isMatch { + m.lastMatchEnd = m.matchEnd + m.matchStart = startIdx + m.matchEnd = *fp.inputIdx() + } + + if m.dumper != nil { + if isMatch { + fmt.Fprintf(m.dumper, "Match. start=%d end=%d\n\n", m.matchStart, m.matchEnd) + } else { + fmt.Fprintf(m.dumper, "No match\n\n") + } + } + + m.frame = fp // The active stack frame when the engine stopped. + // Contains the capture group results that we need to + // access later. + return nil +} + +func charAt(str []rune, idx int) rune { + if idx >= 0 && idx < len(str) { + return str[idx] + } + return -1 +} + +func (m *Matcher) isWordBoundary(pos int) bool { + cIsWord := false + + if pos >= m.lookLimit { + m.hitEnd = true + } else { + c := charAt(m.input, pos) + if uprops.HasBinaryProperty(c, uprops.UCharGraphemeExtend) || uchar.CharType(c) == uchar.FormatChar { + return false + } + cIsWord = staticPropertySets[urxIswordSet].ContainsRune(c) + } + + prevCIsWord := false + for { + if pos <= m.lookStart { + break + } + prevChar := charAt(m.input, pos-1) + pos-- + if !(uprops.HasBinaryProperty(prevChar, uprops.UCharGraphemeExtend) || uchar.CharType(prevChar) == uchar.FormatChar) { + prevCIsWord = staticPropertySets[urxIswordSet].ContainsRune(prevChar) + break + } + } + return cIsWord != prevCIsWord +} + +func (m *Matcher) isUWordBoundary(pos int) bool { + // TODO: implement + /* + UBool returnVal = FALSE; + + #if UCONFIG_NO_BREAK_ITERATION==0 + // Note: this point will never be reached if break iteration is configured out. + // Regex patterns that would require this function will fail to compile. + + // If we haven't yet created a break iterator for this matcher, do it now. + if (fWordBreakItr == nullptr) { + fWordBreakItr = BreakIterator::createWordInstance(Locale::getEnglish(), status); + if (U_FAILURE(status)) { + return FALSE; + } + fWordBreakItr->setText(fInputText, status); + } + + // Note: zero width boundary tests like \b see through transparent region bounds, + // which is why fLookLimit is used here, rather than fActiveLimit. + if (pos >= fLookLimit) { + fHitEnd = TRUE; + returnVal = TRUE; // With Unicode word rules, only positions within the interior of "real" + // words are not boundaries. All non-word chars stand by themselves, + // with word boundaries on both sides. + } else { + returnVal = fWordBreakItr->isBoundary((int32_t)pos); + } + #endif + return returnVal; + */ + return false +} + +func (m *Matcher) resetStack() stackFrame { + m.stack.reset() + frame, _ := m.stack.newFrame(0, nil, "") + frame.clearExtra() + return frame +} + +func (m *Matcher) stateSave(inputIdx, savePatIdx int) (stackFrame, error) { + // push storage for a new frame. + newFP, err := m.stack.newFrame(inputIdx, m.input, m.pattern.pattern) + if err != nil { + return nil, err + } + fp := m.stack.prevFromTop() + + // New stack frame = copy of old top frame. + copy(newFP, fp) + + m.tickCounter-- + if m.tickCounter <= 0 { + if err := m.incrementTime(*fp.inputIdx()); err != nil { + return nil, err + } + } + *fp.patIdx() = savePatIdx + return newFP, nil +} + +func (m *Matcher) incrementTime(inputIdx int) error { + m.tickCounter = timerInitialValue + m.time++ + if m.timeLimit > 0 && m.time >= m.timeLimit { + return &MatchError{ + Code: TimeOut, + Pattern: m.pattern.pattern, + Position: inputIdx, + Input: m.input, + } + } + return nil +} + +func (m *Matcher) isDecimalDigit(c rune) bool { + return uchar.IsDigit(c) +} + +func (m *Matcher) isHorizWS(c rune) bool { + return uchar.CharType(c) == uchar.SpaceSeparator || c == 9 +} + +func (m *Matcher) followingGCBoundary(pos int) int { + // TODO: implement + return pos + /* + // Note: this point will never be reached if break iteration is configured out. + // Regex patterns that would require this function will fail to compile. + + // If we haven't yet created a break iterator for this matcher, do it now. + if (m.gcBreakItr == nil) { + m.gcBreakItr = BreakIterator::createCharacterInstance(Locale::getEnglish(), status); + if (U_FAILURE(status)) { + return pos; + } + fGCBreakItr->setText(fInputText, status); + } + result = fGCBreakItr->following(pos); + if (result == BreakIterator::DONE) { + result = pos; + } + */ +} + +func (m *Matcher) ResetString(input string) { + m.Reset([]rune(input)) +} + +func (m *Matcher) Reset(input []rune) { + m.input = input + m.reset() +} + +func (m *Matcher) Matches() (bool, error) { + err := m.MatchAt(m.activeStart, true) + return m.match, err +} + +func (m *Matcher) LookingAt() (bool, error) { + err := m.MatchAt(m.activeStart, false) + return m.match, err +} + +func (m *Matcher) Find() (bool, error) { + startPos := m.matchEnd + if startPos == 0 { + startPos = m.activeStart + } + + if m.match { + // Save the position of any previous successful match. + m.lastMatchEnd = m.matchEnd + if m.matchStart == m.matchEnd { + // Previous match had zero length. Move start position up one position + // to avoid sending find() into a loop on zero-length matches. + if startPos >= m.activeLimit { + m.match = false + m.hitEnd = true + return false, nil + } + startPos++ + } + } else { + if m.lastMatchEnd >= 0 { + // A previous find() failed to match. Don't try again. + // (without this test, a pattern with a zero-length match + // could match again at the end of an input string.) + m.hitEnd = true + return false, nil + } + } + + testStartLimit := m.activeLimit - int(m.pattern.minMatchLen) + if startPos > testStartLimit { + m.match = false + m.hitEnd = true + return false, nil + } + + switch m.pattern.startType { + case startNoInfo: + // No optimization was found. + // Try a match at each input position. + for { + err := m.MatchAt(startPos, false) + if err != nil { + return false, err + } + if m.match { + return true, nil + } + if startPos >= testStartLimit { + m.hitEnd = true + return false, nil + } + startPos++ + } + case startSet: + // Match may start on any char from a pre-computed set. + for { + pos := startPos + c := charAt(m.input, startPos) + startPos++ + // c will be -1 (U_SENTINEL) at end of text, in which case we + // skip this next block (so we don't have a negative array index) + // and handle end of text in the following block. + if c >= 0 && m.pattern.initialChars.ContainsRune(c) { + err := m.MatchAt(pos, false) + if err != nil { + return false, err + } + if m.match { + return true, nil + } + } + + if startPos > testStartLimit { + m.match = false + m.hitEnd = true + return false, nil + } + } + case startStart: + // Matches are only possible at the start of the input string + // (pattern begins with ^ or \A) + if startPos > m.activeStart { + m.match = false + return false, nil + } + err := m.MatchAt(startPos, false) + return m.match, err + case startLine: + var ch rune + if startPos == m.anchorStart { + err := m.MatchAt(startPos, false) + if err != nil { + return false, err + } + if m.match { + return true, nil + } + ch = charAt(m.input, startPos) + startPos++ + } else { + ch = charAt(m.input, startPos-1) + } + + if m.pattern.flags&UnixLines != 0 { + for { + if ch == 0x0a { + err := m.MatchAt(startPos, false) + if err != nil { + return false, err + } + if m.match { + return true, nil + } + } + if startPos >= testStartLimit { + m.match = false + m.hitEnd = true + return false, nil + } + ch = charAt(m.input, startPos) + startPos++ + } + } else { + for { + if isLineTerminator(ch) { + if ch == 0x0d && startPos < m.activeLimit && charAt(m.input, startPos) == 0x0a { + startPos++ + } + err := m.MatchAt(startPos, false) + if err != nil { + return false, err + } + if m.match { + return true, nil + } + } + if startPos >= testStartLimit { + m.match = false + m.hitEnd = true + return false, nil + } + ch = charAt(m.input, startPos) + startPos++ + } + } + case startChar, startString: + // Match starts on exactly one char. + theChar := m.pattern.initialChar + for { + pos := startPos + c := charAt(m.input, startPos) + startPos++ + if c == theChar { + err := m.MatchAt(pos, false) + if err != nil { + return false, err + } + if m.match { + return true, nil + } + } + if startPos > testStartLimit { + m.match = false + m.hitEnd = true + return false, nil + } + } + default: + // Unknown value in fPattern->fStartType, should be from StartOfMatch enum. But + // we have reports of this in production code, don't use UPRV_UNREACHABLE_EXIT. + // See ICU-21669. + return false, &MatchError{ + Code: InternalMatchError, + Pattern: m.pattern.pattern, + Position: startPos, + Input: m.input, + } + } +} + +func (m *Matcher) Start() int { + if !m.match { + return -1 + } + + return m.matchStart +} + +func (m *Matcher) reset() { + m.regionStart = 0 + m.regionLimit = len(m.input) + m.activeStart = 0 + m.activeLimit = len(m.input) + m.anchorStart = 0 + m.anchorLimit = len(m.input) + m.lookStart = 0 + m.lookLimit = len(m.input) + m.resetPreserveRegion() +} + +func (m *Matcher) resetPreserveRegion() { + m.matchStart = 0 + m.matchEnd = 0 + m.lastMatchEnd = -1 + m.appendPosition = 0 + m.match = false + m.hitEnd = false + m.requireEnd = false + m.time = 0 + m.tickCounter = timerInitialValue +} + +func (m *Matcher) GroupCount() int { + return len(m.pattern.groupMap) +} + +func (m *Matcher) StartForGroup(group int) int { + if !m.match { + return -1 + } + if group < 0 || group > len(m.pattern.groupMap) { + return -1 + } + if group == 0 { + return m.matchStart + } + groupOffset := int(m.pattern.groupMap[group-1]) + return *m.frame.extra(groupOffset) +} + +func (m *Matcher) EndForGroup(group int) int { + if !m.match { + return -1 + } + if group < 0 || group > len(m.pattern.groupMap) { + return -1 + } + if group == 0 { + return m.matchEnd + } + groupOffset := int(m.pattern.groupMap[group-1]) + return *m.frame.extra(groupOffset + 1) +} + +func (m *Matcher) HitEnd() bool { + return m.hitEnd +} + +func (m *Matcher) RequireEnd() bool { + return m.requireEnd +} + +func (m *Matcher) Group(i int) (string, bool) { + start := m.StartForGroup(i) + end := m.EndForGroup(i) + if start == -1 || end == -1 { + return "", false + } + return string(m.input[start:end]), true +} + +func (m *Matcher) End() int { + if !m.match { + return -1 + } + + return m.matchEnd +} + +func (m *Matcher) Dumper(out io.Writer) { + m.dumper = out +} + +// Test for any of the Unicode line terminating characters. +func isLineTerminator(c rune) bool { + if (c & ^(0x0a | 0x0b | 0x0c | 0x0d | 0x85 | 0x2028 | 0x2029)) != 0 { + return false + } + return (c <= 0x0d && c >= 0x0a) || c == 0x85 || c == 0x2028 || c == 0x2029 +} diff --git a/go/mysql/icuregex/ops.go b/go/mysql/icuregex/ops.go new file mode 100644 index 00000000000..4150cf523d2 --- /dev/null +++ b/go/mysql/icuregex/ops.go @@ -0,0 +1,414 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package icuregex + +import ( + "slices" + + "vitess.io/vitess/go/mysql/icuregex/internal/ucase" + "vitess.io/vitess/go/mysql/icuregex/internal/utf16" +) + +type opcode uint8 + +const ( + urxReservedOp opcode = iota // For multi-operand ops, most non-first words. + urxBacktrack // Force a backtrack, as if a match test had failed. + urxEnd + urxOnechar // Value field is the 21 bit unicode char to match + urxString // Value field is index of string start + urxStringLen // Value field is string length (code units) + urxStateSave // Value field is pattern position to push + urxNop + urxStartCapture // Value field is capture group number. + urxEndCapture // Value field is capture group number + urxStaticSetref // Value field is index of set in array of sets. + urxSetref // Value field is index of set in array of sets. + urxDotany + urxJmp // Value field is destination position in the pattern. + urxFail // Stop match operation, No match. + + urxJmpSav // Operand: JMP destination location + urxBackslashB // Value field: 0: \b 1: \B + urxBackslashG + urxJmpSavX // Conditional JMP_SAV, + // Used in (x)+, breaks loop on zero length match. + // Operand: Jmp destination. + urxBackslashX + urxBackslashZ // \z Unconditional end of line. + + urxDotanyAll // ., in the . matches any mode. + urxBackslashD // Value field: 0: \d 1: \D + urxCaret // Value field: 1: multi-line mode. + urxDollar // Also for \Z + + urxCtrInit // Counter Inits for {Interval} loops. + urxCtrInitNg // 2 kinds, normal and non-greedy. + // These are 4 word opcodes. See description. + // First Operand: Data loc of counter variable + // 2nd Operand: Pat loc of the URX_CTR_LOOPx + // at the end of the loop. + // 3rd Operand: Minimum count. + // 4th Operand: Max count, -1 for unbounded. + + urxDotanyUnix // '.' operator in UNIX_LINES mode, only \n marks end of line. + + utxCtrLoop // Loop Ops for {interval} loops. + urxCtrLoopNg // Also in three flavors. + // Operand is loc of corresponding CTR_INIT. + + urxCaretMUnix // '^' operator, test for start of line in multi-line + // plus UNIX_LINES mode. + + urxRelocOprnd // Operand value in multi-operand ops that refers + // back into compiled pattern code, and thus must + // be relocated when inserting/deleting ops in code. + + urxStoSp // Store the stack ptr. Operand is location within + // matcher data (not stack data) to store it. + urxLdSp // Load the stack pointer. Operand is location + // to load from. + urxBackref // Back Reference. Parameter is the index of the + // capture group variables in the state stack frame. + urxStoInpLoc // Store the input location. Operand is location + // within the matcher stack frame. + urxJmpx // Conditional JMP. + // First Operand: JMP target location. + // Second Operand: Data location containing an + // input position. If current input position == + // saved input position, FAIL rather than taking + // the JMP + urxLaStart // Starting a LookAround expression. + // Save InputPos, SP and active region in static data. + // Operand: Static data offset for the save + urxLaEnd // Ending a Lookaround expression. + // Restore InputPos and Stack to saved values. + // Operand: Static data offset for saved data. + urcOnecharI // Test for case-insensitive match of a literal character. + // Operand: the literal char. + urxStringI // Case insensitive string compare. + // First Operand: Index of start of string in string literals + // Second Operand (next word in compiled code): + // the length of the string. + urxBackrefI // Case insensitive back reference. + // Parameter is the index of the + // capture group variables in the state stack frame. + urxDollarM // $ in multi-line mode. + urxCaretM // ^ in multi-line mode. + urxLbStart // LookBehind Start. + // Parameter is data location + urxLbCont // LookBehind Continue. + // Param 0: the data location + // Param 1: The minimum length of the look-behind match + // Param 2: The max length of the look-behind match + urxLbEnd // LookBehind End. + // Parameter is the data location. + // Check that match ended at the right spot, + // Restore original input string len. + urxLbnCount // Negative LookBehind Continue + // Param 0: the data location + // Param 1: The minimum length of the look-behind match + // Param 2: The max length of the look-behind match + // Param 3: The pattern loc following the look-behind block. + urxLbnEnd // Negative LookBehind end + // Parameter is the data location. + // Check that the match ended at the right spot. + urxStatSetrefN // Reference to a prebuilt set (e.g. \w), negated + // Operand is index of set in array of sets. + urxLoopSrI // Init a [set]* loop. + // Operand is the sets index in array of user sets. + urxLoopC // Continue a [set]* or OneChar* loop. + // Operand is a matcher static data location. + // Must always immediately follow LOOP_x_I instruction. + urxLoopDotI // .*, initialization of the optimized loop. + // Operand value: + // bit 0: + // 0: Normal (. doesn't match new-line) mode. + // 1: . matches new-line mode. + // bit 1: controls what new-lines are recognized by this operation. + // 0: All Unicode New-lines + // 1: UNIX_LINES, \u000a only. + urxBackslashBu // \b or \B in UREGEX_UWORD mode, using Unicode style + // word boundaries. + urxDollarD // $ end of input test, in UNIX_LINES mode. + urxDollarMd // $ end of input test, in MULTI_LINE and UNIX_LINES mode. + urxBackslashH // Value field: 0: \h 1: \H + urxBackslashR // Any line break sequence. + urxBackslashV // Value field: 0: \v 1: \V + + urxReservedOpN opcode = 255 // For multi-operand ops, negative operand values. +) + +// Keep this list of opcode names in sync with the above enum +// +// Used for debug printing only. +var urxOpcodeNames = []string{ + " ", + "BACKTRACK", + "END", + "ONECHAR", + "STRING", + "STRING_LEN", + "STATE_SAVE", + "NOP", + "START_CAPTURE", + "END_CAPTURE", + "URX_STATIC_SETREF", + "SETREF", + "DOTANY", + "JMP", + "FAIL", + "JMP_SAV", + "BACKSLASH_B", + "BACKSLASH_G", + "JMP_SAV_X", + "BACKSLASH_X", + "BACKSLASH_Z", + "DOTANY_ALL", + "BACKSLASH_D", + "CARET", + "DOLLAR", + "CTR_INIT", + "CTR_INIT_NG", + "DOTANY_UNIX", + "CTR_LOOP", + "CTR_LOOP_NG", + "URX_CARET_M_UNIX", + "RELOC_OPRND", + "STO_SP", + "LD_SP", + "BACKREF", + "STO_INP_LOC", + "JMPX", + "LA_START", + "LA_END", + "ONECHAR_I", + "STRING_I", + "BACKREF_I", + "DOLLAR_M", + "CARET_M", + "LB_START", + "LB_CONT", + "LB_END", + "LBN_CONT", + "LBN_END", + "STAT_SETREF_N", + "LOOP_SR_I", + "LOOP_C", + "LOOP_DOT_I", + "BACKSLASH_BU", + "DOLLAR_D", + "DOLLAR_MD", + "URX_BACKSLASH_H", + "URX_BACKSLASH_R", + "URX_BACKSLASH_V", +} + +type instruction int32 + +func (ins instruction) typ() opcode { + return opcode(uint32(ins) >> 24) +} + +func (ins instruction) value32() int32 { + return int32(ins) & 0xffffff +} + +func (ins instruction) value() int { + return int(ins.value32()) +} + +// Access to Unicode Sets composite character properties +// +// The sets are accessed by the match engine for things like \w (word boundary) +const ( + urxIswordSet = 1 + urxIsalnumSet = 2 + urxIsalphaSet = 3 + urxIsspaceSet = 4 + + urxGcNormal = iota + 1 // Sets for finding grapheme cluster boundaries. + urxGcExtend + urxGcControl + urxGcL + urxGcLv + urxGcLvt + urxGcV + urxGcT + + urxNegSet = 0x800000 // Flag bit to reverse sense of set + // membership test. +) + +type stack struct { + ary []int + frameSize int + stackLimit int +} + +type stackFrame []int + +func (f stackFrame) inputIdx() *int { + return &f[0] +} + +func (f stackFrame) patIdx() *int { + return &f[1] +} + +func (f stackFrame) extra(n int) *int { + return &f[2+n] +} + +func (f stackFrame) equals(f2 stackFrame) bool { + return &f[0] == &f2[0] +} + +func (s *stack) len() int { + return len(s.ary) +} + +func (s *stack) sp() int { + return len(s.ary) - s.frameSize +} + +func (s *stack) newFrame(inputIdx int, input []rune, pattern string) (stackFrame, error) { + if s.stackLimit != 0 && len(s.ary)+s.frameSize > s.stackLimit { + return nil, &MatchError{ + Code: StackOverflow, + Pattern: pattern, + Position: inputIdx, + Input: input, + } + } + s.ary = slices.Grow(s.ary, s.frameSize) + + f := s.ary[len(s.ary) : len(s.ary)+s.frameSize] + s.ary = s.ary[:len(s.ary)+s.frameSize] + return f, nil +} + +func (s *stack) prevFromTop() stackFrame { + return s.ary[len(s.ary)-2*s.frameSize:] +} + +func (s *stack) popFrame() stackFrame { + s.ary = s.ary[:len(s.ary)-s.frameSize] + return s.ary[len(s.ary)-s.frameSize:] +} + +func (s *stack) reset() { + s.ary = s.ary[:0] +} + +func (s *stack) offset(size int) stackFrame { + return s.ary[size-s.frameSize : size] +} + +func (s *stack) setSize(size int) { + s.ary = s.ary[:size] +} + +func (f stackFrame) clearExtra() { + for i := 2; i < len(f); i++ { + f[i] = -1 + } +} + +// number of UVector elements in the header +const restackframeHdrCount = 2 + +// Start-Of-Match type. Used by find() to quickly scan to positions where a +// +// match might start before firing up the full match engine. +type startOfMatch int8 + +const ( + startNoInfo startOfMatch = iota // No hint available. + startChar // Match starts with a literal code point. + startSet // Match starts with something matching a set. + startStart // Match starts at start of buffer only (^ or \A) + startLine // Match starts with ^ in multi-line mode. + startString // Match starts with a literal string. +) + +func (som startOfMatch) String() string { + switch som { + case startNoInfo: + return "START_NO_INFO" + case startChar: + return "START_CHAR" + case startSet: + return "START_SET" + case startStart: + return "START_START" + case startLine: + return "START_LINE" + case startString: + return "START_STRING" + default: + panic("unknown StartOfMatch") + } +} + +type caseFoldIterator struct { + chars []rune + index int + limit int + + foldChars []uint16 +} + +func (it *caseFoldIterator) next() rune { + if len(it.foldChars) == 0 { + // We are not in a string folding of an earlier character. + // Start handling the next char from the input UText. + if it.index >= it.limit { + return -1 + } + + originalC := it.chars[it.index] + it.index++ + + originalC, it.foldChars = ucase.FullFolding(originalC) + if len(it.foldChars) == 0 { + // input code point folds to a single code point, possibly itself. + return originalC + } + } + + var res rune + res, it.foldChars = utf16.NextUnsafe(it.foldChars) + return res +} + +func (it *caseFoldIterator) inExpansion() bool { + return len(it.foldChars) > 0 +} + +func newCaseFoldIterator(chars []rune, start, limit int) caseFoldIterator { + return caseFoldIterator{ + chars: chars, + index: start, + limit: limit, + } +} diff --git a/go/mysql/icuregex/pattern.go b/go/mysql/icuregex/pattern.go new file mode 100644 index 00000000000..90e69b3f55d --- /dev/null +++ b/go/mysql/icuregex/pattern.go @@ -0,0 +1,136 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package icuregex + +import ( + "vitess.io/vitess/go/mysql/icuregex/internal/uset" +) + +type Pattern struct { + pattern string + flags RegexpFlag + + compiledPat []instruction + literalText []rune + + sets []*uset.UnicodeSet + + minMatchLen int32 + frameSize int + dataSize int + + groupMap []int32 + + startType startOfMatch + initialStringIdx int + initialStringLen int + initialChars *uset.UnicodeSet + initialChar rune + needsAltInput bool + + namedCaptureMap map[string]int +} + +func NewPattern(flags RegexpFlag) *Pattern { + return &Pattern{ + flags: flags, + initialChars: uset.New(), + // Slot zero of the vector of sets is reserved. Fill it here. + sets: []*uset.UnicodeSet{nil}, + } +} + +func Compile(in []rune, flags RegexpFlag) (*Pattern, error) { + pat := NewPattern(flags) + cmp := newCompiler(pat) + if err := cmp.compile(in); err != nil { + return nil, err + } + return pat, nil +} + +func CompileString(in string, flags RegexpFlag) (*Pattern, error) { + return Compile([]rune(in), flags) +} + +func (p *Pattern) Match(input string) *Matcher { + m := NewMatcher(p) + m.ResetString(input) + return m +} + +type RegexpFlag int32 + +const ( + /** Enable case insensitive matching. @stable ICU 2.4 */ + CaseInsensitive RegexpFlag = 2 + + /** Allow white space and comments within patterns @stable ICU 2.4 */ + Comments RegexpFlag = 4 + + /** If set, '.' matches line terminators, otherwise '.' matching stops at line end. + * @stable ICU 2.4 */ + DotAll RegexpFlag = 32 + + /** If set, treat the entire pattern as a literal string. + * Metacharacters or escape sequences in the input sequence will be given + * no special meaning. + * + * The flag UREGEX_CASE_INSENSITIVE retains its impact + * on matching when used in conjunction with this flag. + * The other flags become superfluous. + * + * @stable ICU 4.0 + */ + Literal RegexpFlag = 16 + + /** Control behavior of "$" and "^" + * If set, recognize line terminators within string, + * otherwise, match only at start and end of input string. + * @stable ICU 2.4 */ + Multiline RegexpFlag = 8 + + /** Unix-only line endings. + * When this mode is enabled, only \\u000a is recognized as a line ending + * in the behavior of ., ^, and $. + * @stable ICU 4.0 + */ + UnixLines RegexpFlag = 1 + + /** Unicode word boundaries. + * If set, \b uses the Unicode TR 29 definition of word boundaries. + * Warning: Unicode word boundaries are quite different from + * traditional regular expression word boundaries. See + * http://unicode.org/reports/tr29/#Word_Boundaries + * @stable ICU 2.8 + */ + UWord RegexpFlag = 256 + + /** Error on Unrecognized backslash escapes. + * If set, fail with an error on patterns that contain + * backslash-escaped ASCII letters without a known special + * meaning. If this flag is not set, these + * escaped letters represent themselves. + * @stable ICU 4.0 + */ + ErrorOnUnknownEscapes RegexpFlag = 512 +) diff --git a/go/mysql/icuregex/perl_test.go b/go/mysql/icuregex/perl_test.go new file mode 100644 index 00000000000..e8dfc95d6b0 --- /dev/null +++ b/go/mysql/icuregex/perl_test.go @@ -0,0 +1,211 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package icuregex + +import ( + "bufio" + "os" + "strconv" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestPerl(t *testing.T) { + f, err := os.Open("testdata/re_tests.txt") + require.NoError(t, err) + defer f.Close() + + flagPat, err := CompileString(`('?)(.*)\1(.*)`, 0) + require.NoError(t, err) + flagMat := NewMatcher(flagPat) + + groupsPat, err := CompileString(`\$([+\-])\[(\d+)\]`, 0) + require.NoError(t, err) + groupsMat := NewMatcher(groupsPat) + + cgPat, err := CompileString(`\$(\d+)`, 0) + require.NoError(t, err) + cgMat := NewMatcher(cgPat) + + group := func(m *Matcher, idx int) string { + g, _ := m.Group(idx) + return g + } + + lookingAt := func(m *Matcher) bool { + ok, err := m.LookingAt() + require.NoError(t, err) + return ok + } + + replacer := strings.NewReplacer( + `${bang}`, "!", + `${nulnul}`, "\x00\x00", + `${ffff}`, "\uffff", + ) + + scanner := bufio.NewScanner(f) + var lineno int + + for scanner.Scan() { + lineno++ + fields := strings.Split(scanner.Text(), "\t") + + flagMat.ResetString(fields[0]) + ok, _ := flagMat.Matches() + require.Truef(t, ok, "could not match pattern+flags (line %d)", lineno) + + pattern, _ := flagMat.Group(2) + pattern = replacer.Replace(pattern) + + flagStr, _ := flagMat.Group(3) + var flags RegexpFlag + if strings.IndexByte(flagStr, 'i') >= 0 { + flags |= CaseInsensitive + } + if strings.IndexByte(flagStr, 'm') >= 0 { + flags |= Multiline + } + if strings.IndexByte(flagStr, 'x') >= 0 { + flags |= Comments + } + + testPat, err := CompileString(pattern, flags) + if err != nil { + if cerr, ok := err.(*CompileError); ok && cerr.Code == Unimplemented { + continue + } + if strings.IndexByte(fields[2], 'c') == -1 && strings.IndexByte(fields[2], 'i') == -1 { + t.Errorf("line %d: ICU error %q", lineno, err) + } + continue + } + + if strings.IndexByte(fields[2], 'i') >= 0 { + continue + } + if strings.IndexByte(fields[2], 'c') >= 0 { + t.Errorf("line %d: expected error", lineno) + continue + } + + matchString := fields[1] + matchString = replacer.Replace(matchString) + matchString = strings.ReplaceAll(matchString, `\n`, "\n") + + testMat := testPat.Match(matchString) + found, _ := testMat.Find() + expected := strings.IndexByte(fields[2], 'y') >= 0 + + if expected != found { + t.Errorf("line %d: expected %v, found %v", lineno, expected, found) + continue + } + + if !found { + continue + } + + var result []byte + var perlExpr = fields[3] + + for len(perlExpr) > 0 { + groupsMat.ResetString(perlExpr) + cgMat.ResetString(perlExpr) + + switch { + case strings.HasPrefix(perlExpr, "$&"): + result = append(result, group(testMat, 0)...) + perlExpr = perlExpr[2:] + + case lookingAt(groupsMat): + groupNum, err := strconv.ParseInt(group(groupsMat, 2), 10, 32) + require.NoError(t, err) + + var matchPosition int + if group(groupsMat, 1) == "+" { + matchPosition = testMat.EndForGroup(int(groupNum)) + } else { + matchPosition = testMat.StartForGroup(int(groupNum)) + } + if matchPosition != -1 { + result = strconv.AppendInt(result, int64(matchPosition), 10) + } + + perlExpr = perlExpr[groupsMat.EndForGroup(0):] + + case lookingAt(cgMat): + groupNum, err := strconv.ParseInt(group(cgMat, 1), 10, 32) + require.NoError(t, err) + result = append(result, group(testMat, int(groupNum))...) + perlExpr = perlExpr[cgMat.EndForGroup(0):] + + case strings.HasPrefix(perlExpr, "@-"): + for i := 0; i <= testMat.GroupCount(); i++ { + if i > 0 { + result = append(result, ' ') + } + result = strconv.AppendInt(result, int64(testMat.StartForGroup(i)), 10) + } + perlExpr = perlExpr[2:] + + case strings.HasPrefix(perlExpr, "@+"): + for i := 0; i <= testMat.GroupCount(); i++ { + if i > 0 { + result = append(result, ' ') + } + result = strconv.AppendInt(result, int64(testMat.EndForGroup(i)), 10) + } + perlExpr = perlExpr[2:] + + case strings.HasPrefix(perlExpr, "\\"): + if len(perlExpr) > 1 { + perlExpr = perlExpr[1:] + } + c := perlExpr[0] + switch c { + case 'n': + c = '\n' + } + result = append(result, c) + perlExpr = perlExpr[1:] + + default: + result = append(result, perlExpr[0]) + perlExpr = perlExpr[1:] + } + } + + var expectedS string + if len(fields) > 4 { + expectedS = fields[4] + expectedS = replacer.Replace(expectedS) + expectedS = strings.ReplaceAll(expectedS, `\n`, "\n") + } + + if expectedS != string(result) { + t.Errorf("line %d: Incorrect Perl expression results for %s\nwant: %q\ngot: %q", lineno, pattern, expectedS, result) + } + } +} diff --git a/go/mysql/icuregex/sets.go b/go/mysql/icuregex/sets.go new file mode 100644 index 00000000000..0f745b3374d --- /dev/null +++ b/go/mysql/icuregex/sets.go @@ -0,0 +1,104 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package icuregex + +import ( + "vitess.io/vitess/go/mysql/icuregex/internal/uprops" + "vitess.io/vitess/go/mysql/icuregex/internal/uset" +) + +var staticPropertySets [13]*uset.UnicodeSet + +func init() { + staticPropertySets[urxIswordSet] = func() *uset.UnicodeSet { + s := uset.New() + s.AddAll(uprops.MustNewUnicodeSetFomPattern(`\p{Alphabetic}`, 0)) + s.AddAll(uprops.MustNewUnicodeSetFomPattern(`\p{M}`, 0)) + s.AddAll(uprops.MustNewUnicodeSetFomPattern(`\p{Nd}`, 0)) + s.AddAll(uprops.MustNewUnicodeSetFomPattern(`\p{Pc}`, 0)) + s.AddRune(0x200c) + s.AddRune(0x200d) + return s.Freeze() + }() + + staticPropertySets[urxIsspaceSet] = uprops.MustNewUnicodeSetFomPattern(`\p{Whitespace}`, 0).Freeze() + + staticPropertySets[urxGcExtend] = uprops.MustNewUnicodeSetFomPattern(`\p{Grapheme_Extend}`, 0).Freeze() + staticPropertySets[urxGcControl] = func() *uset.UnicodeSet { + s := uset.New() + s.AddAll(uprops.MustNewUnicodeSetFomPattern(`[:Zl:]`, 0)) + s.AddAll(uprops.MustNewUnicodeSetFomPattern(`[:Zp:]`, 0)) + s.AddAll(uprops.MustNewUnicodeSetFomPattern(`[:Cc:]`, 0)) + s.AddAll(uprops.MustNewUnicodeSetFomPattern(`[:Cf:]`, 0)) + s.RemoveAll(uprops.MustNewUnicodeSetFomPattern(`[:Grapheme_Extend:]`, 0)) + return s.Freeze() + }() + staticPropertySets[urxGcL] = uprops.MustNewUnicodeSetFomPattern(`\p{Hangul_Syllable_Type=L}`, 0).Freeze() + staticPropertySets[urxGcLv] = uprops.MustNewUnicodeSetFomPattern(`\p{Hangul_Syllable_Type=LV}`, 0).Freeze() + staticPropertySets[urxGcLvt] = uprops.MustNewUnicodeSetFomPattern(`\p{Hangul_Syllable_Type=LVT}`, 0).Freeze() + staticPropertySets[urxGcV] = uprops.MustNewUnicodeSetFomPattern(`\p{Hangul_Syllable_Type=V}`, 0).Freeze() + staticPropertySets[urxGcT] = uprops.MustNewUnicodeSetFomPattern(`\p{Hangul_Syllable_Type=T}`, 0).Freeze() + + staticPropertySets[urxGcNormal] = func() *uset.UnicodeSet { + s := uset.New() + s.Complement() + s.RemoveRuneRange(0xac00, 0xd7a4) + s.RemoveAll(staticPropertySets[urxGcControl]) + s.RemoveAll(staticPropertySets[urxGcL]) + s.RemoveAll(staticPropertySets[urxGcV]) + s.RemoveAll(staticPropertySets[urxGcT]) + return s.Freeze() + }() +} + +var staticSetUnescape = func() *uset.UnicodeSet { + u := uset.New() + u.AddString("acefnrtuUx") + return u.Freeze() +}() + +const ( + ruleSetDigitChar = 128 + ruleSetASCIILetter = 129 + ruleSetRuleChar = 130 + ruleSetCount = 131 - 128 +) + +var staticRuleSet = [ruleSetCount]*uset.UnicodeSet{ + func() *uset.UnicodeSet { + u := uset.New() + u.AddRuneRange('0', '9') + return u.Freeze() + }(), + func() *uset.UnicodeSet { + u := uset.New() + u.AddRuneRange('A', 'Z') + u.AddRuneRange('a', 'z') + return u.Freeze() + }(), + func() *uset.UnicodeSet { + u := uset.New() + u.AddString("*?+[(){}^$|\\.") + u.Complement() + return u.Freeze() + }(), +} diff --git a/go/mysql/icuregex/sets_test.go b/go/mysql/icuregex/sets_test.go new file mode 100644 index 00000000000..58da9882701 --- /dev/null +++ b/go/mysql/icuregex/sets_test.go @@ -0,0 +1,66 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package icuregex + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestStaticSetContents(t *testing.T) { + // These are the number of codepoints contained in each of the static sets as of ICU73-2, + // as to sanity check that we're re-creating the sets properly. + // This table must be re-created when updating Unicode versions. + var ExpectedSetSizes = map[int]int{ + 1: 139612, + 4: 25, + 5: 1102442, + 6: 2125, + 7: 140, + 8: 125, + 9: 399, + 10: 10773, + 11: 95, + 12: 137, + } + + for setid, expected := range ExpectedSetSizes { + assert.Equalf(t, expected, staticPropertySets[setid].Len(), "static set [%d] has wrong size", setid) + } +} + +func TestStaticFreeze(t *testing.T) { + for _, s := range staticPropertySets { + if err := s.FreezeCheck_(); err != nil { + t.Error(err) + } + } + for _, s := range staticRuleSet { + if err := s.FreezeCheck_(); err != nil { + t.Error(err) + } + } + if err := staticSetUnescape.FreezeCheck_(); err != nil { + t.Error(err) + } +} diff --git a/go/mysql/icuregex/testdata/re_tests.txt b/go/mysql/icuregex/testdata/re_tests.txt new file mode 100644 index 00000000000..c18b638f9b3 --- /dev/null +++ b/go/mysql/icuregex/testdata/re_tests.txt @@ -0,0 +1,923 @@ +abc abc y $& abc +abc abc y $-[0] 0 +abc abc y $+[0] 3 +abc xbc n - - +abc axc n - - +abc abx n - - +abc xabcy y $& abc +abc xabcy y $-[0] 1 +abc xabcy y $+[0] 4 +abc ababc y $& abc +abc ababc y $-[0] 2 +abc ababc y $+[0] 5 +ab*c abc y $& abc +ab*c abc y $-[0] 0 +ab*c abc y $+[0] 3 +ab*bc abc y $& abc +ab*bc abc y $-[0] 0 +ab*bc abc y $+[0] 3 +ab*bc abbc y $& abbc +ab*bc abbc y $-[0] 0 +ab*bc abbc y $+[0] 4 +ab*bc abbbbc y $& abbbbc +ab*bc abbbbc y $-[0] 0 +ab*bc abbbbc y $+[0] 6 +.{1} abbbbc y $& a +.{1} abbbbc y $-[0] 0 +.{1} abbbbc y $+[0] 1 +.{3,4} abbbbc y $& abbb +.{3,4} abbbbc y $-[0] 0 +.{3,4} abbbbc y $+[0] 4 +ab{0,}bc abbbbc y $& abbbbc +ab{0,}bc abbbbc y $-[0] 0 +ab{0,}bc abbbbc y $+[0] 6 +ab+bc abbc y $& abbc +ab+bc abbc y $-[0] 0 +ab+bc abbc y $+[0] 4 +ab+bc abc n - - +ab+bc abq n - - +ab{1,}bc abq n - - +ab+bc abbbbc y $& abbbbc +ab+bc abbbbc y $-[0] 0 +ab+bc abbbbc y $+[0] 6 +ab{1,}bc abbbbc y $& abbbbc +ab{1,}bc abbbbc y $-[0] 0 +ab{1,}bc abbbbc y $+[0] 6 +ab{1,3}bc abbbbc y $& abbbbc +ab{1,3}bc abbbbc y $-[0] 0 +ab{1,3}bc abbbbc y $+[0] 6 +ab{3,4}bc abbbbc y $& abbbbc +ab{3,4}bc abbbbc y $-[0] 0 +ab{3,4}bc abbbbc y $+[0] 6 +ab{4,5}bc abbbbc n - - +ab?bc abbc y $& abbc +ab?bc abc y $& abc +ab{0,1}bc abc y $& abc +ab?bc abbbbc n - - +ab?c abc y $& abc +ab{0,1}c abc y $& abc +^abc$ abc y $& abc +^abc$ abcc n - - +^abc abcc y $& abc +^abc$ aabc n - - +abc$ aabc y $& abc +abc$ aabcd n - - +^ abc y $& +$ abc y $& +a.c abc y $& abc +a.c axc y $& axc +a.*c axyzc y $& axyzc +a.*c axyzd n - - +a[bc]d abc n - - +a[bc]d abd y $& abd +a[b-d]e abd n - - +a[b-d]e ace y $& ace +a[b-d] aac y $& ac +a[-b] a- y $& a- +a[b-] a- y $& a- +a[b-a] - c - Invalid [] range "b-a" +a[]b - ci - Unmatched [ +a[ - c - Unmatched [ +a] a] y $& a] +a[]]b a]b y $& a]b +a[^bc]d aed y $& aed +a[^bc]d abd n - - +a[^-b]c adc y $& adc +a[^-b]c a-c n - - +a[^]b]c a]c n - - +a[^]b]c adc y $& adc +\ba\b a- y - - +\ba\b -a y - - +\ba\b -a- y - - +\by\b xy n - - +\by\b yz n - - +\by\b xyz n - - +\Ba\B a- n - - +\Ba\B -a n - - +\Ba\B -a- n - - +\By\b xy y - - +\By\b xy y $-[0] 1 +\By\b xy y $+[0] 2 +\By\b xy y - - +\by\B yz y - - +\By\B xyz y - - +\w a y - - +\w - n - - +\W a n - - +\W - y - - +a\sb a b y - - +a\sb a-b n - - +a\Sb a b n - - +a\Sb a-b y - - +\d 1 y - - +\d - n - - +\D 1 n - - +\D - y - - +[\w] a y - - +[\w] - n - - +[\W] a n - - +[\W] - y - - +a[\s]b a b y - - +a[\s]b a-b n - - +a[\S]b a b n - - +a[\S]b a-b y - - +[\d] 1 y - - +[\d] - n - - +[\D] 1 n - - +[\D] - y - - +ab|cd abc y $& ab +ab|cd abcd y $& ab +()ef def y $&-$1 ef- +()ef def y $-[0] 1 +()ef def y $+[0] 3 +()ef def y $-[1] 1 +()ef def y $+[1] 1 +*a - c - Quantifier follows nothing +(*)b - c - Quantifier follows nothing +$b b n - - +a\ - c - Search pattern not terminated +a\(b a(b y $&-$1 a(b- +a\(*b ab y $& ab +a\(*b a((b y $& a((b +a\\b a\b y $& a\b +abc) - c - Unmatched ) +(abc - c - Unmatched ( +((a)) abc y $&-$1-$2 a-a-a +((a)) abc y $-[0]-$-[1]-$-[2] 0-0-0 +((a)) abc y $+[0]-$+[1]-$+[2] 1-1-1 +((a)) abc by @- 0 0 0 +((a)) abc by @+ 1 1 1 +(a)b(c) abc y $&-$1-$2 abc-a-c +(a)b(c) abc y $-[0]-$-[1]-$-[2] 0-0-2 +(a)b(c) abc y $+[0]-$+[1]-$+[2] 3-1-3 +a+b+c aabbabc y $& abc +a{1,}b{1,}c aabbabc y $& abc +a** - c - Nested quantifiers +a.+?c abcabc y $& abc +(a+|b)* ab y $&-$1 ab-b +(a+|b)* ab y $-[0] 0 +(a+|b)* ab y $+[0] 2 +(a+|b)* ab y $-[1] 1 +(a+|b)* ab y $+[1] 2 +(a+|b){0,} ab y $&-$1 ab-b +(a+|b)+ ab y $&-$1 ab-b +(a+|b){1,} ab y $&-$1 ab-b +(a+|b)? ab y $&-$1 a-a +(a+|b){0,1} ab y $&-$1 a-a +)( - c - Unmatched ) +[^ab]* cde y $& cde +abc n - - +a* y $& +([abc])*d abbbcd y $&-$1 abbbcd-c +([abc])*bcd abcd y $&-$1 abcd-a +a|b|c|d|e e y $& e +(a|b|c|d|e)f ef y $&-$1 ef-e +(a|b|c|d|e)f ef y $-[0] 0 +(a|b|c|d|e)f ef y $+[0] 2 +(a|b|c|d|e)f ef y $-[1] 0 +(a|b|c|d|e)f ef y $+[1] 1 +abcd*efg abcdefg y $& abcdefg +ab* xabyabbbz y $& ab +ab* xayabbbz y $& a +(ab|cd)e abcde y $&-$1 cde-cd +[abhgefdc]ij hij y $& hij +^(ab|cd)e abcde n x$1y xy +(abc|)ef abcdef y $&-$1 ef- +(a|b)c*d abcd y $&-$1 bcd-b +(ab|ab*)bc abc y $&-$1 abc-a +a([bc]*)c* abc y $&-$1 abc-bc +a([bc]*)(c*d) abcd y $&-$1-$2 abcd-bc-d +a([bc]*)(c*d) abcd y $-[0] 0 +a([bc]*)(c*d) abcd y $+[0] 4 +a([bc]*)(c*d) abcd y $-[1] 1 +a([bc]*)(c*d) abcd y $+[1] 3 +a([bc]*)(c*d) abcd y $-[2] 3 +a([bc]*)(c*d) abcd y $+[2] 4 +a([bc]+)(c*d) abcd y $&-$1-$2 abcd-bc-d +a([bc]*)(c+d) abcd y $&-$1-$2 abcd-b-cd +a([bc]*)(c+d) abcd y $-[0] 0 +a([bc]*)(c+d) abcd y $+[0] 4 +a([bc]*)(c+d) abcd y $-[1] 1 +a([bc]*)(c+d) abcd y $+[1] 2 +a([bc]*)(c+d) abcd y $-[2] 2 +a([bc]*)(c+d) abcd y $+[2] 4 +a[bcd]*dcdcde adcdcde y $& adcdcde +a[bcd]+dcdcde adcdcde n - - +(ab|a)b*c abc y $&-$1 abc-ab +(ab|a)b*c abc y $-[0] 0 +(ab|a)b*c abc y $+[0] 3 +(ab|a)b*c abc y $-[1] 0 +(ab|a)b*c abc y $+[1] 2 +((a)(b)c)(d) abcd y $1-$2-$3-$4 abc-a-b-d +((a)(b)c)(d) abcd y $-[0] 0 +((a)(b)c)(d) abcd y $+[0] 4 +((a)(b)c)(d) abcd y $-[1] 0 +((a)(b)c)(d) abcd y $+[1] 3 +((a)(b)c)(d) abcd y $-[2] 0 +((a)(b)c)(d) abcd y $+[2] 1 +((a)(b)c)(d) abcd y $-[3] 1 +((a)(b)c)(d) abcd y $+[3] 2 +((a)(b)c)(d) abcd y $-[4] 3 +((a)(b)c)(d) abcd y $+[4] 4 +[a-zA-Z_][a-zA-Z0-9_]* alpha y $& alpha +^a(bc+|b[eh])g|.h$ abh y $&-$1 bh- +(bc+d$|ef*g.|h?i(j|k)) effgz y $&-$1-$2 effgz-effgz- +(bc+d$|ef*g.|h?i(j|k)) ij y $&-$1-$2 ij-ij-j +(bc+d$|ef*g.|h?i(j|k)) effg n - - +(bc+d$|ef*g.|h?i(j|k)) bcdd n - - +(bc+d$|ef*g.|h?i(j|k)) reffgz y $&-$1-$2 effgz-effgz- +((((((((((a)))))))))) a y $10 a +((((((((((a)))))))))) a y $-[0] 0 +((((((((((a)))))))))) a y $+[0] 1 +((((((((((a)))))))))) a y $-[10] 0 +((((((((((a)))))))))) a y $+[10] 1 +((((((((((a))))))))))\10 aa y $& aa +((((((((((a))))))))))${bang} aa n - - +((((((((((a))))))))))${bang} a! y $& a! +(((((((((a))))))))) a y $& a +multiple words of text uh-uh n - - +multiple words multiple words, yeah y $& multiple words +(.*)c(.*) abcde y $&-$1-$2 abcde-ab-de +\((.*), (.*)\) (a, b) y ($2, $1) (b, a) +[k] ab n - - +abcd abcd y $&-\$&-\\$& abcd-$&-\abcd +a(bc)d abcd y $1-\$1-\\$1 bc-$1-\bc +a[-]?c ac y $& ac +(abc)\1 abcabc y $1 abc +([a-c]*)\1 abcabc y $1 abc +\1 - c - Reference to nonexistent group +\2 - c - Reference to nonexistent group +(a)|\1 a y - - +(a)|\1 x n - - +(a)|\2 - c - Reference to nonexistent group +(([a-c])b*?\2)* ababbbcbc y $&-$1-$2 ababb-bb-b +(([a-c])b*?\2){3} ababbbcbc y $&-$1-$2 ababbbcbc-cbc-c +((\3|b)\2(a)x)+ aaxabxbaxbbx n - - +((\3|b)\2(a)x)+ aaaxabaxbaaxbbax y $&-$1-$2-$3 bbax-bbax-b-a +((\3|b)\2(a)){2,} bbaababbabaaaaabbaaaabba y $&-$1-$2-$3 bbaaaabba-bba-b-a +(a)|(b) b y $-[0] 0 +(a)|(b) b y $+[0] 1 +(a)|(b) b y x$-[1] x +(a)|(b) b y x$+[1] x +(a)|(b) b y $-[2] 0 +(a)|(b) b y $+[2] 1 +'abc'i ABC y $& ABC +'abc'i XBC n - - +'abc'i AXC n - - +'abc'i ABX n - - +'abc'i XABCY y $& ABC +'abc'i ABABC y $& ABC +'ab*c'i ABC y $& ABC +'ab*bc'i ABC y $& ABC +'ab*bc'i ABBC y $& ABBC +'ab*?bc'i ABBBBC y $& ABBBBC +'ab{0,}?bc'i ABBBBC y $& ABBBBC +'ab+?bc'i ABBC y $& ABBC +'ab+bc'i ABC n - - +'ab+bc'i ABQ n - - +'ab{1,}bc'i ABQ n - - +'ab+bc'i ABBBBC y $& ABBBBC +'ab{1,}?bc'i ABBBBC y $& ABBBBC +'ab{1,3}?bc'i ABBBBC y $& ABBBBC +'ab{3,4}?bc'i ABBBBC y $& ABBBBC +'ab{4,5}?bc'i ABBBBC n - - +'ab??bc'i ABBC y $& ABBC +'ab??bc'i ABC y $& ABC +'ab{0,1}?bc'i ABC y $& ABC +'ab??bc'i ABBBBC n - - +'ab??c'i ABC y $& ABC +'ab{0,1}?c'i ABC y $& ABC +'^abc$'i ABC y $& ABC +'^abc$'i ABCC n - - +'^abc'i ABCC y $& ABC +'^abc$'i AABC n - - +'abc$'i AABC y $& ABC +'^'i ABC y $& +'$'i ABC y $& +'a.c'i ABC y $& ABC +'a.c'i AXC y $& AXC +'a.*?c'i AXYZC y $& AXYZC +'a.*c'i AXYZD n - - +'a[bc]d'i ABC n - - +'a[bc]d'i ABD y $& ABD +'a[b-d]e'i ABD n - - +'a[b-d]e'i ACE y $& ACE +'a[b-d]'i AAC y $& AC +'a[-b]'i A- y $& A- +'a[b-]'i A- y $& A- +'a[b-a]'i - c - Invalid [] range "b-a" +'a[]b'i - ci - Unmatched [ +'a['i - c - Unmatched [ +'a]'i A] y $& A] +'a[]]b'i A]B y $& A]B +'a[^bc]d'i AED y $& AED +'a[^bc]d'i ABD n - - +'a[^-b]c'i ADC y $& ADC +'a[^-b]c'i A-C n - - +'a[^]b]c'i A]C n - - +'a[^]b]c'i ADC y $& ADC +'ab|cd'i ABC y $& AB +'ab|cd'i ABCD y $& AB +'()ef'i DEF y $&-$1 EF- +'*a'i - c - Quantifier follows nothing +'(*)b'i - c - Quantifier follows nothing +'$b'i B n - - +'a\'i - c - Search pattern not terminated +'a\(b'i A(B y $&-$1 A(B- +'a\(*b'i AB y $& AB +'a\(*b'i A((B y $& A((B +'a\\b'i A\B y $& A\B +'abc)'i - c - Unmatched ) +'(abc'i - c - Unmatched ( +'((a))'i ABC y $&-$1-$2 A-A-A +'(a)b(c)'i ABC y $&-$1-$2 ABC-A-C +'a+b+c'i AABBABC y $& ABC +'a{1,}b{1,}c'i AABBABC y $& ABC +'a**'i - c - Nested quantifiers +'a.+?c'i ABCABC y $& ABC +'a.*?c'i ABCABC y $& ABC +'a.{0,5}?c'i ABCABC y $& ABC +'(a+|b)*'i AB y $&-$1 AB-B +'(a+|b){0,}'i AB y $&-$1 AB-B +'(a+|b)+'i AB y $&-$1 AB-B +'(a+|b){1,}'i AB y $&-$1 AB-B +'(a+|b)?'i AB y $&-$1 A-A +'(a+|b){0,1}'i AB y $&-$1 A-A +'(a+|b){0,1}?'i AB y $&-$1 - +')('i - c - Unmatched ) +'[^ab]*'i CDE y $& CDE +'abc'i n - - +'a*'i y $& +'([abc])*d'i ABBBCD y $&-$1 ABBBCD-C +'([abc])*bcd'i ABCD y $&-$1 ABCD-A +'a|b|c|d|e'i E y $& E +'(a|b|c|d|e)f'i EF y $&-$1 EF-E +'abcd*efg'i ABCDEFG y $& ABCDEFG +'ab*'i XABYABBBZ y $& AB +'ab*'i XAYABBBZ y $& A +'(ab|cd)e'i ABCDE y $&-$1 CDE-CD +'[abhgefdc]ij'i HIJ y $& HIJ +'^(ab|cd)e'i ABCDE n x$1y XY +'(abc|)ef'i ABCDEF y $&-$1 EF- +'(a|b)c*d'i ABCD y $&-$1 BCD-B +'(ab|ab*)bc'i ABC y $&-$1 ABC-A +'a([bc]*)c*'i ABC y $&-$1 ABC-BC +'a([bc]*)(c*d)'i ABCD y $&-$1-$2 ABCD-BC-D +'a([bc]+)(c*d)'i ABCD y $&-$1-$2 ABCD-BC-D +'a([bc]*)(c+d)'i ABCD y $&-$1-$2 ABCD-B-CD +'a[bcd]*dcdcde'i ADCDCDE y $& ADCDCDE +'a[bcd]+dcdcde'i ADCDCDE n - - +'(ab|a)b*c'i ABC y $&-$1 ABC-AB +'((a)(b)c)(d)'i ABCD y $1-$2-$3-$4 ABC-A-B-D +'[a-zA-Z_][a-zA-Z0-9_]*'i ALPHA y $& ALPHA +'^a(bc+|b[eh])g|.h$'i ABH y $&-$1 BH- +'(bc+d$|ef*g.|h?i(j|k))'i EFFGZ y $&-$1-$2 EFFGZ-EFFGZ- +'(bc+d$|ef*g.|h?i(j|k))'i IJ y $&-$1-$2 IJ-IJ-J +'(bc+d$|ef*g.|h?i(j|k))'i EFFG n - - +'(bc+d$|ef*g.|h?i(j|k))'i BCDD n - - +'(bc+d$|ef*g.|h?i(j|k))'i REFFGZ y $&-$1-$2 EFFGZ-EFFGZ- +'((((((((((a))))))))))'i A y $10 A +'((((((((((a))))))))))\10'i AA y $& AA +'((((((((((a))))))))))${bang}'i AA n - - +'((((((((((a))))))))))${bang}'i A! y $& A! +'(((((((((a)))))))))'i A y $& A +'(?:(?:(?:(?:(?:(?:(?:(?:(?:(a))))))))))'i A y $1 A +'(?:(?:(?:(?:(?:(?:(?:(?:(?:(a|b|c))))))))))'i C y $1 C +'multiple words of text'i UH-UH n - - +'multiple words'i MULTIPLE WORDS, YEAH y $& MULTIPLE WORDS +'(.*)c(.*)'i ABCDE y $&-$1-$2 ABCDE-AB-DE +'\((.*), (.*)\)'i (A, B) y ($2, $1) (B, A) +'[k]'i AB n - - +'abcd'i ABCD y $&-\$&-\\$& ABCD-$&-\ABCD +'a(bc)d'i ABCD y $1-\$1-\\$1 BC-$1-\BC +'a[-]?c'i AC y $& AC +'(abc)\1'i ABCABC y $1 ABC +'([a-c]*)\1'i ABCABC y $1 ABC +a(?!b). abad y $& ad +a(?=d). abad y $& ad +a(?=c|d). abad y $& ad +a(?:b|c|d)(.) ace y $1 e +a(?:b|c|d)*(.) ace y $1 e +a(?:b|c|d)+?(.) ace y $1 e +a(?:b|c|d)+?(.) acdbcdbe y $1 d +a(?:b|c|d)+(.) acdbcdbe y $1 e +a(?:b|c|d){2}(.) acdbcdbe y $1 b +a(?:b|c|d){4,5}(.) acdbcdbe y $1 b +a(?:b|c|d){4,5}?(.) acdbcdbe y $1 d +((foo)|(bar))* foobar y $1-$2-$3 bar-foo-bar +:(?: - c - Sequence (? incomplete +a(?:b|c|d){6,7}(.) acdbcdbe y $1 e +a(?:b|c|d){6,7}?(.) acdbcdbe y $1 e +a(?:b|c|d){5,6}(.) acdbcdbe y $1 e +a(?:b|c|d){5,6}?(.) acdbcdbe y $1 b +a(?:b|c|d){5,7}(.) acdbcdbe y $1 e +a(?:b|c|d){5,7}?(.) acdbcdbe y $1 b +a(?:b|(c|e){1,2}?|d)+?(.) ace y $1$2 ce +^(.+)?B AB y $1 A +^([^a-z])|(\^)$ . y $1 . +^[<>]& <&OUT y $& <& +^(a\1?){4}$ aaaaaaaaaa y $1 aaaa +^(a\1?){4}$ aaaaaaaaa n - - +^(a\1?){4}$ aaaaaaaaaaa n - - +^(a(?(1)\1)){4}$ aaaaaaaaaa y $1 aaaa +^(a(?(1)\1)){4}$ aaaaaaaaa n - - +^(a(?(1)\1)){4}$ aaaaaaaaaaa n - - +((a{4})+) aaaaaaaaa y $1 aaaaaaaa +(((aa){2})+) aaaaaaaaaa y $1 aaaaaaaa +(((a{2}){2})+) aaaaaaaaaa y $1 aaaaaaaa +(?:(f)(o)(o)|(b)(a)(r))* foobar y $1:$2:$3:$4:$5:$6 f:o:o:b:a:r +(?<=a)b ab y $& b +(?<=a)b cb n - - +(?<=a)b b n - - +(?a+)ab aaab n - - +(?>a+)b aaab y - - +([[:]+) a:[b]: yi $1 :[ Java and ICU dont escape [[xyz +([[=]+) a=[b]= yi $1 =[ Java and ICU dont escape [[xyz +([[.]+) a.[b]. yi $1 .[ Java and ICU dont escape [[xyz +[a[:xyz: - c - Unmatched [ +[a[:xyz:] - c - POSIX class [:xyz:] unknown +[a[:]b[:c] abc yi $& abc Java and ICU embedded [ is nested set +([a[:xyz:]b]+) pbaq c - POSIX class [:xyz:] unknown +[a[:]b[:c] abc iy $& abc Java and ICU embedded [ is nested set +([[:alpha:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ABcd +([[:alnum:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ABcd01Xy +([[:ascii:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ABcd01Xy__-- ${nulnul} +([[:cntrl:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ${nulnul} +([[:digit:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 01 +([[:graph:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ABcd01Xy__-- +([[:lower:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 cd +([[:print:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ABcd01Xy__-- +([[:punct:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 __-- +([[:space:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 +([[:word:]]+) ABcd01Xy__-- ${nulnul}${ffff} yi $1 ABcd01Xy__ +([[:upper:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 AB +([[:xdigit:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ABcd01 +([[:^alpha:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 01 +([[:^alnum:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 __-- ${nulnul}${ffff} +([[:^ascii:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ${ffff} +([[:^cntrl:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ABcd01Xy__-- +([[:^digit:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ABcd +([[:^lower:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 AB +([[:^print:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ${nulnul}${ffff} +([[:^punct:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ABcd01Xy +([[:^space:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ABcd01Xy__-- +([[:^word:]]+) ABcd01Xy__-- ${nulnul}${ffff} yi $1 -- ${nulnul}${ffff} +([[:^upper:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 cd01 +([[:^xdigit:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 Xy__-- ${nulnul}${ffff} +[[:foo:]] - c - POSIX class [:foo:] unknown +[[:^foo:]] - c - POSIX class [:^foo:] unknown +((?>a+)b) aaab y $1 aaab +(?>(a+))b aaab y $1 aaa +((?>[^()]+)|\([^()]*\))+ ((abc(ade)ufh()()x y $& abc(ade)ufh()()x +(?<=x+)y - c - Variable length lookbehind not implemented +a{37,17} - c - Can't do {n,m} with n > m +\Z a\nb\n y $-[0] 3 +\z a\nb\n y $-[0] 4 +$ a\nb\n y $-[0] 3 +\Z b\na\n y $-[0] 3 +\z b\na\n y $-[0] 4 +$ b\na\n y $-[0] 3 +\Z b\na y $-[0] 3 +\z b\na y $-[0] 3 +$ b\na y $-[0] 3 +'\Z'm a\nb\n y $-[0] 3 +'\z'm a\nb\n y $-[0] 4 +'$'m a\nb\n y $-[0] 1 +'\Z'm b\na\n y $-[0] 3 +'\z'm b\na\n y $-[0] 4 +'$'m b\na\n y $-[0] 1 +'\Z'm b\na y $-[0] 3 +'\z'm b\na y $-[0] 3 +'$'m b\na y $-[0] 1 +a\Z a\nb\n n - - +a\z a\nb\n n - - +a$ a\nb\n n - - +a\Z b\na\n y $-[0] 2 +a\z b\na\n n - - +a$ b\na\n y $-[0] 2 +a\Z b\na y $-[0] 2 +a\z b\na y $-[0] 2 +a$ b\na y $-[0] 2 +'a\Z'm a\nb\n n - - +'a\z'm a\nb\n n - - +'a$'m a\nb\n y $-[0] 0 +'a\Z'm b\na\n y $-[0] 2 +'a\z'm b\na\n n - - +'a$'m b\na\n y $-[0] 2 +'a\Z'm b\na y $-[0] 2 +'a\z'm b\na y $-[0] 2 +'a$'m b\na y $-[0] 2 +aa\Z aa\nb\n n - - +aa\z aa\nb\n n - - +aa$ aa\nb\n n - - +aa\Z b\naa\n y $-[0] 2 +aa\z b\naa\n n - - +aa$ b\naa\n y $-[0] 2 +aa\Z b\naa y $-[0] 2 +aa\z b\naa y $-[0] 2 +aa$ b\naa y $-[0] 2 +'aa\Z'm aa\nb\n n - - +'aa\z'm aa\nb\n n - - +'aa$'m aa\nb\n y $-[0] 0 +'aa\Z'm b\naa\n y $-[0] 2 +'aa\z'm b\naa\n n - - +'aa$'m b\naa\n y $-[0] 2 +'aa\Z'm b\naa y $-[0] 2 +'aa\z'm b\naa y $-[0] 2 +'aa$'m b\naa y $-[0] 2 +aa\Z ac\nb\n n - - +aa\z ac\nb\n n - - +aa$ ac\nb\n n - - +aa\Z b\nac\n n - - +aa\z b\nac\n n - - +aa$ b\nac\n n - - +aa\Z b\nac n - - +aa\z b\nac n - - +aa$ b\nac n - - +'aa\Z'm ac\nb\n n - - +'aa\z'm ac\nb\n n - - +'aa$'m ac\nb\n n - - +'aa\Z'm b\nac\n n - - +'aa\z'm b\nac\n n - - +'aa$'m b\nac\n n - - +'aa\Z'm b\nac n - - +'aa\z'm b\nac n - - +'aa$'m b\nac n - - +aa\Z ca\nb\n n - - +aa\z ca\nb\n n - - +aa$ ca\nb\n n - - +aa\Z b\nca\n n - - +aa\z b\nca\n n - - +aa$ b\nca\n n - - +aa\Z b\nca n - - +aa\z b\nca n - - +aa$ b\nca n - - +'aa\Z'm ca\nb\n n - - +'aa\z'm ca\nb\n n - - +'aa$'m ca\nb\n n - - +'aa\Z'm b\nca\n n - - +'aa\z'm b\nca\n n - - +'aa$'m b\nca\n n - - +'aa\Z'm b\nca n - - +'aa\z'm b\nca n - - +'aa$'m b\nca n - - +ab\Z ab\nb\n n - - +ab\z ab\nb\n n - - +ab$ ab\nb\n n - - +ab\Z b\nab\n y $-[0] 2 +ab\z b\nab\n n - - +ab$ b\nab\n y $-[0] 2 +ab\Z b\nab y $-[0] 2 +ab\z b\nab y $-[0] 2 +ab$ b\nab y $-[0] 2 +'ab\Z'm ab\nb\n n - - +'ab\z'm ab\nb\n n - - +'ab$'m ab\nb\n y $-[0] 0 +'ab\Z'm b\nab\n y $-[0] 2 +'ab\z'm b\nab\n n - - +'ab$'m b\nab\n y $-[0] 2 +'ab\Z'm b\nab y $-[0] 2 +'ab\z'm b\nab y $-[0] 2 +'ab$'m b\nab y $-[0] 2 +ab\Z ac\nb\n n - - +ab\z ac\nb\n n - - +ab$ ac\nb\n n - - +ab\Z b\nac\n n - - +ab\z b\nac\n n - - +ab$ b\nac\n n - - +ab\Z b\nac n - - +ab\z b\nac n - - +ab$ b\nac n - - +'ab\Z'm ac\nb\n n - - +'ab\z'm ac\nb\n n - - +'ab$'m ac\nb\n n - - +'ab\Z'm b\nac\n n - - +'ab\z'm b\nac\n n - - +'ab$'m b\nac\n n - - +'ab\Z'm b\nac n - - +'ab\z'm b\nac n - - +'ab$'m b\nac n - - +ab\Z ca\nb\n n - - +ab\z ca\nb\n n - - +ab$ ca\nb\n n - - +ab\Z b\nca\n n - - +ab\z b\nca\n n - - +ab$ b\nca\n n - - +ab\Z b\nca n - - +ab\z b\nca n - - +ab$ b\nca n - - +'ab\Z'm ca\nb\n n - - +'ab\z'm ca\nb\n n - - +'ab$'m ca\nb\n n - - +'ab\Z'm b\nca\n n - - +'ab\z'm b\nca\n n - - +'ab$'m b\nca\n n - - +'ab\Z'm b\nca n - - +'ab\z'm b\nca n - - +'ab$'m b\nca n - - +abb\Z abb\nb\n n - - +abb\z abb\nb\n n - - +abb$ abb\nb\n n - - +abb\Z b\nabb\n y $-[0] 2 +abb\z b\nabb\n n - - +abb$ b\nabb\n y $-[0] 2 +abb\Z b\nabb y $-[0] 2 +abb\z b\nabb y $-[0] 2 +abb$ b\nabb y $-[0] 2 +'abb\Z'm abb\nb\n n - - +'abb\z'm abb\nb\n n - - +'abb$'m abb\nb\n y $-[0] 0 +'abb\Z'm b\nabb\n y $-[0] 2 +'abb\z'm b\nabb\n n - - +'abb$'m b\nabb\n y $-[0] 2 +'abb\Z'm b\nabb y $-[0] 2 +'abb\z'm b\nabb y $-[0] 2 +'abb$'m b\nabb y $-[0] 2 +abb\Z ac\nb\n n - - +abb\z ac\nb\n n - - +abb$ ac\nb\n n - - +abb\Z b\nac\n n - - +abb\z b\nac\n n - - +abb$ b\nac\n n - - +abb\Z b\nac n - - +abb\z b\nac n - - +abb$ b\nac n - - +'abb\Z'm ac\nb\n n - - +'abb\z'm ac\nb\n n - - +'abb$'m ac\nb\n n - - +'abb\Z'm b\nac\n n - - +'abb\z'm b\nac\n n - - +'abb$'m b\nac\n n - - +'abb\Z'm b\nac n - - +'abb\z'm b\nac n - - +'abb$'m b\nac n - - +abb\Z ca\nb\n n - - +abb\z ca\nb\n n - - +abb$ ca\nb\n n - - +abb\Z b\nca\n n - - +abb\z b\nca\n n - - +abb$ b\nca\n n - - +abb\Z b\nca n - - +abb\z b\nca n - - +abb$ b\nca n - - +'abb\Z'm ca\nb\n n - - +'abb\z'm ca\nb\n n - - +'abb$'m ca\nb\n n - - +'abb\Z'm b\nca\n n - - +'abb\z'm b\nca\n n - - +'abb$'m b\nca\n n - - +'abb\Z'm b\nca n - - +'abb\z'm b\nca n - - +'abb$'m b\nca n - - +(^|x)(c) ca y $2 c +a*abc?xyz+pqr{3}ab{2,}xy{4,5}pq{0,6}AB{0,}zz x n - - +a(?{$a=2;$b=3;($b)=$a})b yabz y $b 2 +round\(((?>[^()]+))\) _I(round(xs * sz),1) y $1 xs * sz +'((?x:.) )' x y $1- x - +'((?-x:.) )'x x y $1- x- +foo.bart foo.bart y - - +'^d[x][x][x]'m abcd\ndxxx y - - +.X(.+)+X bbbbXcXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa yi - - # TODO: ICU doesn't optimize on trailing literals in pattern. +.X(.+)+XX bbbbXcXXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa yi - - +.XX(.+)+X bbbbXXcXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa yi - - +.X(.+)+X bbbbXXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ni - - +.X(.+)+XX bbbbXXXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ni - - +.XX(.+)+X bbbbXXXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ni - - +.X(.+)+[X] bbbbXcXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa yi - - +.X(.+)+[X][X] bbbbXcXXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa yi - - +.XX(.+)+[X] bbbbXXcXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa yi - - +.X(.+)+[X] bbbbXXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ni - - +.X(.+)+[X][X] bbbbXXXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ni - - +.XX(.+)+[X] bbbbXXXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ni - - +.[X](.+)+[X] bbbbXcXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa yi - - +.[X](.+)+[X][X] bbbbXcXXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa yi - - +.[X][X](.+)+[X] bbbbXXcXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa yi - - +.[X](.+)+[X] bbbbXXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ni - - +.[X](.+)+[X][X] bbbbXXXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ni - - +.[X][X](.+)+[X] bbbbXXXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ni - - +tt+$ xxxtt y - - +([a-\d]+) za-9z yi $1 a-9 +([\d-z]+) a0-za y $1 0-z +([\d-\s]+) a0- z y $1 0- +([a-[:digit:]]+) za-9z y $1 a-9 +([[:digit:]-z]+) =0-z= y $1 0-z +([[:digit:]-[:alpha:]]+) =0-z= iy $1 0-z Set difference in ICU +\GX.*X aaaXbX n - - +(\d+\.\d+) 3.1415926 y $1 3.1415926 +(\ba.{0,10}br) have a web browser y $1 a web br +'\.c(pp|xx|c)?$'i Changes n - - +'\.c(pp|xx|c)?$'i IO.c y - - +'(\.c(pp|xx|c)?$)'i IO.c y $1 .c +^([a-z]:) C:/ n - - +'^\S\s+aa$'m \nx aa y - - +(^|a)b ab y - - +^([ab]*?)(b)?(c)$ abac y -$2- -- +(\w)?(abc)\1b abcab n - - +^(?:.,){2}c a,b,c y - - +^(.,){2}c a,b,c y $1 b, +^(?:[^,]*,){2}c a,b,c y - - +^([^,]*,){2}c a,b,c y $1 b, +^([^,]*,){3}d aaa,b,c,d y $1 c, +^([^,]*,){3,}d aaa,b,c,d y $1 c, +^([^,]*,){0,3}d aaa,b,c,d y $1 c, +^([^,]{1,3},){3}d aaa,b,c,d y $1 c, +^([^,]{1,3},){3,}d aaa,b,c,d y $1 c, +^([^,]{1,3},){0,3}d aaa,b,c,d y $1 c, +^([^,]{1,},){3}d aaa,b,c,d y $1 c, +^([^,]{1,},){3,}d aaa,b,c,d y $1 c, +^([^,]{1,},){0,3}d aaa,b,c,d y $1 c, +^([^,]{0,3},){3}d aaa,b,c,d y $1 c, +^([^,]{0,3},){3,}d aaa,b,c,d y $1 c, +^([^,]{0,3},){0,3}d aaa,b,c,d y $1 c, +(?i) y - - +'(?!\A)x'm a\nxb\n y - - +^(a(b)?)+$ aba yi -$1-$2- -a-- Java disagrees. Not clear who is right. +'^.{9}abc.*\n'm 123\nabcabcabcabc\n y - - +^(a)?a$ a y -$1- -- +^(a)?(?(1)a|b)+$ a n - - +^(a\1?)(a\1?)(a\2?)(a\3?)$ aaaaaa y $1,$2,$3,$4 a,aa,a,aa +^(a\1?){4}$ aaaaaa y $1 aa +^(0+)?(?:x(1))? x1 y - - +^([0-9a-fA-F]+)(?:x([0-9a-fA-F]+)?)(?:x([0-9a-fA-F]+))? 012cxx0190 y - - +^(b+?|a){1,2}c bbbac y $1 a +^(b+?|a){1,2}c bbbbac y $1 a +\((\w\. \w+)\) cd. (A. Tw) y -$1- -A. Tw- +((?:aaaa|bbbb)cccc)? aaaacccc y - - +((?:aaaa|bbbb)cccc)? bbbbcccc y - - +(a)?(a)+ a y $1:$2 :a - +(ab)?(ab)+ ab y $1:$2 :ab - +(abc)?(abc)+ abc y $1:$2 :abc - +'b\s^'m a\nb\n n - - +\ba a y - - +^(a(??{"(?!)"})|(a)(?{1}))b ab yi $2 a # [ID 20010811.006] +ab(?i)cd AbCd n - - # [ID 20010809.023] +ab(?i)cd abCd y - - +(A|B)*(?(1)(CD)|(CD)) CD y $2-$3 -CD +(A|B)*(?(1)(CD)|(CD)) ABCD y $2-$3 CD- +(A|B)*?(?(1)(CD)|(CD)) CD y $2-$3 -CD # [ID 20010803.016] +(A|B)*?(?(1)(CD)|(CD)) ABCD y $2-$3 CD- +'^(o)(?!.*\1)'i Oo n - - +(.*)\d+\1 abc12bc y $1 bc +(?m:(foo\s*$)) foo\n bar y $1 foo +(.*)c abcd y $1 ab +(.*)(?=c) abcd y $1 ab +(.*)(?=c)c abcd yB $1 ab +(.*)(?=b|c) abcd y $1 ab +(.*)(?=b|c)c abcd y $1 ab +(.*)(?=c|b) abcd y $1 ab +(.*)(?=c|b)c abcd y $1 ab +(.*)(?=[bc]) abcd y $1 ab +(.*)(?=[bc])c abcd yB $1 ab +(.*)(?<=b) abcd y $1 ab +(.*)(?<=b)c abcd y $1 ab +(.*)(?<=b|c) abcd y $1 abc +(.*)(?<=b|c)c abcd y $1 ab +(.*)(?<=c|b) abcd y $1 abc +(.*)(?<=c|b)c abcd y $1 ab +(.*)(?<=[bc]) abcd y $1 abc +(.*)(?<=[bc])c abcd y $1 ab +(.*?)c abcd y $1 ab +(.*?)(?=c) abcd y $1 ab +(.*?)(?=c)c abcd yB $1 ab +(.*?)(?=b|c) abcd y $1 a +(.*?)(?=b|c)c abcd y $1 ab +(.*?)(?=c|b) abcd y $1 a +(.*?)(?=c|b)c abcd y $1 ab +(.*?)(?=[bc]) abcd y $1 a +(.*?)(?=[bc])c abcd yB $1 ab +(.*?)(?<=b) abcd y $1 ab +(.*?)(?<=b)c abcd y $1 ab +(.*?)(?<=b|c) abcd y $1 ab +(.*?)(?<=b|c)c abcd y $1 ab +(.*?)(?<=c|b) abcd y $1 ab +(.*?)(?<=c|b)c abcd y $1 ab +(.*?)(?<=[bc]) abcd y $1 ab +(.*?)(?<=[bc])c abcd y $1 ab +2(]*)?$\1 2 y $& 2 +(??{}) x yi - - diff --git a/go/mysql/icuregex/testdata/regextst.txt b/go/mysql/icuregex/testdata/regextst.txt new file mode 100644 index 00000000000..8d5d2c34a8e --- /dev/null +++ b/go/mysql/icuregex/testdata/regextst.txt @@ -0,0 +1,2793 @@ +# Copyright (C) 2016 and later: Unicode, Inc. and others. +# License & terms of use: http://www.unicode.org/copyright.html +# Copyright (c) 2001-2015 International Business Machines +# Corporation and others. All Rights Reserved. +# +# file: +# +# ICU regular expression test cases. +# +# format: one test case per line, +# = [# comment] +# = "" +# = "" +# the quotes on the pattern and match string can be " or ' or / +# = text, with the start and end of each +# capture group tagged with .... The overall match, +# if any, is group 0, as in <0>matched text +# A region can be specified with ... tags. +# Standard ICU unescape will be applied, allowing \u, \U, etc. to appear. +# +# = any combination of +# i case insensitive match +# x free spacing and comments +# s dot-matches-all mode +# m multi-line mode. +# ($ and ^ match at embedded new-lines) +# D Unix Lines mode (only recognize 0x0a as new-line) +# Q UREGEX_LITERAL flag. Entire pattern is literal string. +# v If icu configured without break iteration, this +# regex test pattern should not compile. +# e set the UREGEX_ERROR_ON_UNKNOWN_ESCAPES flag +# d dump the compiled pattern +# t trace operation of match engine. +# 2-9 a digit between 2 and 9, specifies the number of +# times to execute find(). The expected results are +# for the last find() in the sequence. +# G Only check match / no match. Do not check capture groups. +# E Pattern compilation error expected +# L Use LookingAt() rather than find() +# M Use matches() rather than find(). +# +# a Use non-Anchoring Bounds. +# b Use Transparent Bounds. +# The a and b options only make a difference if +# a region has been specified in the string. +# z|Z hitEnd was expected(z) or not expected (Z). +# With neither, hitEnd is not checked. +# y|Y Require End expected(y) or not expected (Y). +# +# White space must be present between the flags and the match string. +# + +# Look-ahead expressions +# +"(?!0{5})(\d{5})" "<0><1>00001zzzz" +"(?!0{5})(\d{5})z" "<0><1>00001zzzz" +"(?!0{5})(\d{5})(?!y)" "<0><1>00001zzzz" +"abc(?=def)" "<0>abcdef" +"(.*)(?=c)" "<0><1>abcdef" + +"(?:.*)(?=c)" "abcdef" +"(?:.*)(?=c)" b "<0>abcdef" # transparent bounds +"(?:.*)(?=c)" bM "<0>abcdef" # transparent bounds + +"(?:.*)(?=(c))" b "<0>ab<1>cdef" # Capture in look-ahead +"(?=(.)\1\1)\1" "abcc<0><1>dddefg" # Backrefs to look-ahead capture + +".(?!\p{L})" "abc<0>d " # Negated look-ahead +".(?!(\p{L}))" "abc<0>d " # Negated look-ahead, no capture + # visible outside of look-ahead +"and(?=roid)" L "<0>android" +"and(?=roid)" M "android" +"and(?=roid)" bM "<0>android" + +"and(?!roid)" L "<0>androix" +"and(?!roid)" L "android" + +"and(?!roid)" M "<0>android" # Opaque bounds +"and(?!roid)" bM "android" +"and(?!roid)" bM "<0>androix" + +# +# Negated Lookahead, various regions and region transparency +# +"abc(?!def)" "<0>abcxyz" +"abc(?!def)" "abcdef" +"abc(?!def)" "<0>abcdef" +"abc(?!def)" b "abcdef" +"abc(?!def)" b "<0>abcxyz" + +# +# Nested Lookahead / Behind +# +"one(?=(?:(?!).)*)" "<0>one stuff" +"one(?=(?:(?!).)*)" "one " + +# More nesting lookaround: pattern matches "qq" when not preceded by 'a' and followed by 'z' +"(?qqc" +"(?qqc" +"(?A<0>jk<2>B" +"(?=(?<=(\p{Lu})(?=..(\p{Lu})))).." "ajkB" +"(?=(?<=(\p{Lu})(?=..(\p{Lu})))).." "Ajkb" + +# Nested lookaround cases from bug ICU-20564 +"(?<=(?<=((?=)){0}+))" "<0>abc" +"(?<=c(?<=c((?=c)){1}+))" "c<0><1>cc" + +# +# Anchoring Bounds +# +"^def$" "abc<0>defghi" # anchoring (default) bounds +"^def$" a "abcdefghi" # non-anchoring bounds +"^def" a "<0>defghi" # non-anchoring bounds +"def$" a "abc<0>def" # non-anchoring bounds + +"^.*$" m "<0>line 1\n line 2" +"^.*$" m2 "line 1\n<0> line 2" +"^.*$" m3 "line 1\n line 2" +"^.*$" m "li<0>ne 1\n line 2" # anchoring bounds +"^.*$" m2 "line 1\n line 2" # anchoring bounds +"^.*$" am "line 1\n line 2" # non-anchoring bounds +"^.*$" am "li\n<0>ne \n1\n line 2" # non-anchoring bounds + +# +# HitEnd and RequireEnd for new-lines just before end-of-input +# +"xyz$" yz "<0>xyz\n" +"xyz$" yz "<0>xyz\x{d}\x{a}" + +"xyz$" myz "<0>xyz" # multi-line mode +"xyz$" mYZ "<0>xyz\n" +"xyz$" mYZ "<0>xyz\r\n" +"xyz$" mYZ "<0>xyz\x{85}abcd" + +"xyz$" Yz "xyz\nx" +"xyz$" Yz "xyza" +"xyz$" yz "<0>xyz" + +# +# HitEnd +# +"abcd" Lz "a" +"abcd" Lz "ab" +"abcd" Lz "abc" +"abcd" LZ "<0>abcd" +"abcd" LZ "<0>abcde" +"abcd" LZ "abcx" +"abcd" LZ "abx" +"abcd" Lzi "a" +"abcd" Lzi "ab" +"abcd" Lzi "abc" +"abcd" LZi "<0>abcd" +"abcd" LZi "<0>abcde" +"abcd" LZi "abcx" +"abcd" LZi "abx" + +# +# All Unicode line endings recognized. +# 0a, 0b, 0c, 0d, 0x85, 0x2028, 0x2029 +# Multi-line and non-multiline mode take different paths, so repeated tests. +# +"^def$" mYZ "abc\x{a}<0>def\x{a}ghi" +"^def$" mYZ "abc\x{b}<0>def\x{b}ghi" +"^def$" mYZ "abc\x{c}<0>def\x{c}ghi" +"^def$" mYZ "abc\x{d}<0>def\x{d}ghi" +"^def$" mYZ "abc\x{85}<0>def\x{85}ghi" +"^def$" mYZ "abc\x{2028}<0>def\x{2028}ghi" +"^def$" mYZ "abc\x{2029}<0>def\x{2029}ghi" +"^def$" mYZ "abc\r\n<0>def\r\nghi" + +"^def$" yz "<0>def\x{a}" +"^def$" yz "<0>def\x{b}" +"^def$" yz "<0>def\x{c}" +"^def$" yz "<0>def\x{d}" +"^def$" yz "<0>def\x{85}" +"^def$" yz "<0>def\x{2028}" +"^def$" yz "<0>def\x{2029}" +"^def$" yz "<0>def\r\n" +"^def$" yz "<0>def" + + +# "^def$" "<0>def\x{2028" #TODO: should be an error of some sort. + +# +# UNIX_LINES mode +# +"abc$" D "<0>abc\n" +"abc$" D "abc\r" +"abc$" D "abc\u0085" +"a.b" D "<0>a\rb" +"a.b" D "a\nb" +"(?d)abc$" "<0>abc\n" +"(?d)abc$" "abc\r" +"abc$" mD "<0>abc\ndef" +"abc$" mD "abc\rdef" + +".*def" L "abc\r def xyz" # Normal mode, LookingAt() stops at \r +".*def" DL "<0>abc\r def xyz" # Unix Lines mode, \r not line end. +".*def" DL "abc\n def xyz" + +"(?d)a.b" "a\nb" +"(?d)a.b" "<0>a\rb" + +"^abc" m "xyz\r<0>abc" +"^abc" Dm "xyz\rabc" +"^abc" Dm "xyz\n<0>abc" + + + +# Capturing parens +".(..)." "<0>a<1>bcd" + ".*\A( +hello)" "<0><1> hello" +"(hello)|(goodbye)" "<0><1>hello" +"(hello)|(goodbye)" "<0><2>goodbye" +"abc( +( inner(X?) +) xyz)" "leading cruft <0>abc<1> <2> inner<3> xyz cruft" +"\s*([ixsmdt]*)([:letter:]*)" "<0> <1>d<2> " +"(a|b)c*d" "a<0><1>bcd" + +# Non-capturing parens (?: stuff). Groups, but does not capture. +"(?:abc)*(tail)" "<0>abcabcabc<1>tail" + +# Non-greedy *? quantifier +".*?(abc)" "<0> abx <1>abc abc abc abc" +".*(abc)" "<0> abx abc abc abc <1>abc" + +"((?:abc |xyz )*?)abc " "<0><1>xyz abc abc abc " +"((?:abc |xyz )*)abc " "<0><1>xyz abc abc abc " + +# Non-greedy +? quantifier +"(a+?)(a*)" "<0><1>a<2>aaaaaaaaaaaa" +"(a+)(a*)" "<0><1>aaaaaaaaaaaaa<2>" + +"((ab)+?)((ab)*)" "<0><1><2>ab<3>ababababab<4>ab" +"((ab)+)((ab)*)" "<0><1>abababababab<2>ab<3>" + +# Non-greedy ?? quantifier +"(ab)(ab)??(ab)??(ab)??(ab)??c" "<0><1>ab<4>ab<5>abc" + +# Unicode Properties as naked elements in a pattern +"\p{Lu}+" "here we go ... <0>ABC and no more." +"(\p{L}+)(\P{L}*?) (\p{Zs}*)" "7999<0><1>letters<2>4949%^&*( <3> " + +# \w and \W +"\w+" " $%^&*( <0>hello123%^&*(" +"\W+" "<0> $%^&*( hello123%^&*(" + +# \A match at beginning of input only. + ".*\Ahello" "<0>hello hello" + ".*hello" "<0>hello hello" +".*\Ahello" "stuff\nhello" # don't match after embedded new-line. + +# \b \B +# +".*?\b(.).*" "<0> $%^&*( <1>hello123%^&*()gxx" +"\ba\b" "-<0>a" +"\by\b" "xy" +"[ \b]" "<0>b" # in a set, \b is a literal b. + +# Finds first chars of up to 5 words +"(?:.*?\b(\w))?(?:.*?\b(\w))?(?:.*?\b(\w))?(?:.*?\b(\w))?(?:.*?\b(\w))?" "<0><1>Tthe <2>qick <3>brown <4>fox" + +"H.*?((?:\B.)+)" "<0>H<1>ello " +".*?((?:\B.)+).*?((?:\B.)+).*?((?:\B.)+)" "<0>H<1>ello <2> g<3>oodbye " + +"(?:.*?\b(.))?(?:.*?\b(.))?(?:.*?\b(.))?(?:.*?\b(.))?(?:.*?\b(.))?.*" "<0> \u0301 \u0301<1>A\u0302BC\u0303\u0304<2> \u0305 \u0306<3>X\u0307Y\u0308" + + +# +# Unicode word boundary mode +# +"(?w).*?\b" v "<0>hello, world" +"(?w).*?(\b.+?\b).*" v "<0><1> 123.45 " +"(?w).*?(\b\d.*?\b).*" v "<0> <1>123.45 " +".*?(\b.+?\b).*" "<0> <1>123.45 " +"(?w:.*?(\b\d.*?\b).*)" v "<0> <1>123.45 " +"(?w:.*?(\b.+?\b).*)" v "<0><1>don't " +"(?w:.+?(\b\S.+?\b).*)" v "<0> <1>don't " +"(?w:(\b.+?)(\b.+?)(\b.+?)(\b.+?)(\b.+?)(\b.+?)(\b.+?).*)" v "<0><1>.<2> <3>,<4>:<5>$<6>37,000.50<7> " + +# +# Unicode word boundaries with Regions +# +"(?w).*?\b" v "abc<0>defghi" +"(?w).*?\b" v2 "abcdef<0>ghi" +"(?w).*?\b" v3 "abcdefghi" +#"(?w).*?\b" vb "abc<0>defghi" # TODO: bug. Ticket 6073 +#"(?w).*?\b" vb2 "abcdefghi" + + + +# . does not match new-lines +"." "\u000a\u000d\u0085\u000c\u000b\u2028\u2029<0>X\u000aY" +"A." "A\u000a "# no match + +# \d for decimal digits +"\d*" "<0>0123456789\u0660\u06F9\u0969\u0A66\u17E2\uFF10\U0001D7CE\U0001D7FFnon-digits" +"\D+" "<0>non digits" +"\D*(\d*)(\D*)" "<0>non-digits<1>3456666<2>more non digits" + +# \Q...\E quote mode +"hel\Qlo, worl\Ed" "<0>hello, world" +"\Q$*^^(*)?\A\E(a*)" "<0>$*^^(*)?\\A<1>aaaaaaaaaaaaaaa" +"[abc\Q]\r\E]+" "<0>aaaccc]]]\\\\\\\r..." # \Q ... \E escape in a [set] + +# UREGEX_LITERAL - entire pattern is a literal string, no escapes recognized. +# Note that data strings in test cases still get escape processing. +"abc\an\r\E\\abcd\u0031bye" Q "lead<0>abc\\an\\r\\E\\\\abcd\\u0031byeextra" +"case insensitive \\ (l)iteral" Qi "stuff!! <0>cAsE InSenSiTiVE \\\\ (L)ITeral" + +# \S and \s space characters +"\s+" "not_space<0> \t \r \n \u3000 \u2004 \u2028 \u2029xyz" +"(\S+).*?(\S+).*" "<0><1>Not-spaces <2>more-non-spaces " + +# \X consume one Grapheme Cluster. +"(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?" v "<0><1>A<2>B<3> <4>\r\n" +"(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?" v "<0><1>A\u0301<2>\n<3>\u0305<4>a\u0302\u0303\u0304" +"(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?" v "<0><1>\u1100\u1161\u11a8<2>\u115f\u11a2\u11f9" +"(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?" v "<0><1>\u1100\uac01<2>\uac02<3>\uac03\u11b0" +"(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?" v "<0><1>\u1100\u1101\uac02\u0301<2>\u1100" +# Regional indicator pairs are grapheme clusters +"(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?" v "<0><1>\U0001f1e6\U0001f1e8<2>\U0001f1ea\U0001f1ff" +# Grapheme Break rule 9b: Prepend x +"(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?" v "<0><1>\U000111C2x" + +# Grapheme clusters that straddle a match region. Matching is pinned to the region limits, +# giving boundaries inside grapheme clusters +"(\X)?(\X)?(\X)?" v "a\u0301<0><1>\u0301\u0301<2>z\u0302\u0302\u0302" +# Same as previous test case, but without the region limits. +"(\X)?(\X)?(\X)?" v "<0><1>a\u0301\u0301\u0301<2>z\u0302\u0302\u0302" + +# ^ matches only at beginning of line +".*^(Hello)" "<0><1>Hello Hello Hello Hello Goodbye" +".*(Hello)" "<0>Hello Hello Hello <1>Hello Goodbye" +".*^(Hello)" " Hello Hello Hello Hello Goodbye"# No Match + +# $ matches only at end of line, or before a newline preceding the end of line +".*?(Goodbye)$" zy "<0>Hello Goodbye Goodbye <1>Goodbye" +".*?(Goodbye)" ZY "<0>Hello <1>Goodbye Goodbye Goodbye" +".*?(Goodbye)$" z "Hello Goodbye> Goodbye Goodbye "# No Match + +".*?(Goodbye)$" zy "<0>Hello Goodbye Goodbye <1>Goodbye\n" +".*?(Goodbye)$" zy "<0>Hello Goodbye Goodbye <1>Goodbye\n" +".*?(Goodbye)$" zy "<0>Hello Goodbye Goodbye <1>Goodbye\r\n" +".*?(Goodbye)$" z "Hello Goodbye Goodbye Goodbye\n\n"# No Match + +# \Z matches at end of input, like $ with default flags. +".*?(Goodbye)\Z" zy "<0>Hello Goodbye Goodbye <1>Goodbye" +".*?(Goodbye)" ZY "<0>Hello <1>Goodbye Goodbye Goodbye" +".*?(Goodbye)\Z" z "Hello Goodbye> Goodbye Goodbye "# No Match +"here$" z "here\nthe end"# No Match + +".*?(Goodbye)\Z" "<0>Hello Goodbye Goodbye <1>Goodbye\n" +".*?(Goodbye)\Z" "<0>Hello Goodbye Goodbye <1>Goodbye\n" +".*?(Goodbye)\Z" "<0>Hello Goodbye Goodbye <1>Goodbye\r\n" +".*?(Goodbye)\Z" "Hello Goodbye Goodbye Goodbye\n\n"# No Match + +# \z matches only at the end of string. +# no special treatment of new lines. +# no dependencies on flag settings. +".*?(Goodbye)\z" zy "<0>Hello Goodbye Goodbye <1>Goodbye" +".*?(Goodbye)\z" z "Hello Goodbye Goodbye Goodbye "# No Match +"here$" z "here\nthe end"# No Match + +".*?(Goodbye)\z" z "Hello Goodbye Goodbye Goodbye\n"# No Match +".*?(Goodbye)\n\z" zy "<0>Hello Goodbye Goodbye <1>Goodbye\n" +"abc\z|def" ZY "abc<0>def" + +# (?# comment) doesn't muck up pattern +"Hello (?# this is a comment) world" " <0>Hello world..." + +# Check some implementation corner cases base on the way literal strings are compiled. +"A" "<0>A" +"AB" "<0>ABABABAB" +"AB+" "<0>ABBBA" +"AB+" "<0>ABABAB" +"ABC+" "<0>ABCABC" +"ABC+" "<0>ABCCCCABC" +"(?:ABC)+" "<0>ABCABCABCD" +"(?:ABC)DEF+" "<0>ABCDEFFFD" +"AB\.C\eD\u0666E" "<0>AB.C\u001BD\u0666EF" +"ab\Bde" "<0>abde" + +# loop breaking +"(a?)*" "<0><1>xyz" +"(a?)+" "<0><1>xyz" +"^(?:a?b?)*$" "a--" +"(x?)*xyz" "<0>xx<1>xyz" # Sligthtly weird, but correct. The "last" time through (x?), + # it matches the empty string. + +# Set expressions, basic operators and escapes work +# +"[\d]+" "<0>0123abc/.," +"[^\d]+" "0123<0>abc/.," +"[\D]+" "0123<0>abc/.," +"[^\D]+" "<0>0123abc/.," + +"[\s]+" "<0> \tabc/.," +"[^\s]+" " \t<0>abc/.," +"[\S]+" " \t<0>abc/.," +"[^\S]+" "<0> \tabc/.," + +"[\w]+" "<0>abc123 .,;" +"[^\w]+" "abc123<0> .,;" +"[\W]+" "abc123<0> .,;" +"[^\W]+" "<0>abc123 .,;" + +"[\z]+" "abc<0>zzzdef" # \z has no special meaning +"[^\z]+" "<0>abczzzdef" +"[\^]+" "abc<0>^^" +"[^\^]+" "<0>abc^^" + +"[\u0041c]+" "<0>AcAcdef" +"[\U00010002]+" "<0>\ud800\udc02\U00010003" +"[^\U00010002]+" "<0>Hello\x{10002}" +"[\x61b]+" "<0>ababcde" +#"[\x6z]+" "\x06" #TODO: single hex digits should fail +"[\x{9}\x{75}\x{6d6}\x{6ba6}\x{6146B}\x{10ffe3}]+" "<0>\u0009\u0075\u06d6\u6ba6\U0006146B\U0010ffe3abc" + +"[\N{LATIN CAPITAL LETTER TONE SIX}ab\N{VARIATION SELECTOR-70} ]+" "x<0> \u0184\U000E0135 abc" +"[\N{LATIN SMALL LETTER C}-\N{LATIN SMALL LETTER F}]+" "ab<0>cdefghi" + + + +# +# [set expressions], check the precedence of '-', '&', '--', '&&' +# '-' and '&', for compatibility with ICU UnicodeSet, have the same +# precedence as the implicit Union between adjacent items. +# '--' and '&&', for compatibility with Java, have lower precedence than +# the implicit Union operations. '--' and '&&' themselves +# have the same precedence, and group left to right. +# +"[[a-m]-[f-w]p]+" "<0>depfgwxyz" +"[^[a-m]-[f-w]p]+" "dep<0>fgwxyz" + +"[[a-m]--[f-w]p]+" "<0>depfgwxyz" +"[^[a-m]--[f-w]p]+" "de<0>pfgwxyz" + +"[[a-m]&[e-s]w]+" "<0>efmwadnst" +"[^[a-m]&[e-s]w]+" "efmw<0>adnst" + +"[[a-m]&[e-s]]+" "<0>efmadnst" + + + +# {min,max} iteration qualifier +"A{3}BC" "<0>AAABC" + +"(ABC){2,3}AB" "no matchAB" +"(ABC){2,3}AB" "ABCAB" +"(ABC){2,3}AB" "<0>ABC<1>ABCAB" +"(ABC){2,3}AB" "<0>ABCABC<1>ABCAB" +"(ABC){2,3}AB" "<0>ABCABC<1>ABCABCAB" + +"(ABC){2}AB" "ABCAB" +"(ABC){2}AB" "<0>ABC<1>ABCAB" +"(ABC){2}AB" "<0>ABC<1>ABCABCAB" +"(ABC){2}AB" "<0>ABC<1>ABCABCABCAB" + +"(ABC){2,}AB" "ABCAB" +"(ABC){2,}AB" "<0>ABC<1>ABCAB" +"(ABC){2,}AB" "<0>ABCABC<1>ABCAB" +"(ABC){2,}AB" "<0>ABCABCABC<1>ABCAB" + +"X{0,0}ABC" "<0>ABC" +"X{0,1}ABC" "<0>ABC" + +"(?:Hello(!{1,3}) there){1}" "Hello there" +"(?:Hello(!{1,3}) there){1}" "<0>Hello<1>! there" +"(?:Hello(!{1,3}) there){1}" "<0>Hello<1>!! there" +"(?:Hello(!{1,3}) there){1}" "<0>Hello<1>!!! there" +"(?:Hello(!{1,3}) there){1}" "Hello!!!! there" + +# Nongreedy {min,max}? intervals +"(ABC){2,3}?AB" "no matchAB" +"(ABC){2,3}?AB" "ABCAB" +"(ABC){2,3}?AB" "<0>ABC<1>ABCAB" +"(ABC){2,3}?AB" "<0>ABC<1>ABCABCAB" +"(ABC){2,3}?AB" "<0>ABC<1>ABCABCABCAB" +"(ABC){2,3}?AX" "<0>ABCABC<1>ABCAX" +"(ABC){2,3}?AX" "ABC<0>ABCABC<1>ABCAX" + +# Possessive {min,max}+ intervals +"(ABC){2,3}+ABC" "ABCABCABC" +"(ABC){1,2}+ABC" "<0>ABC<1>ABCABC" +"(?:(.)\1){2,5}+." "<0>aabbcc<1>ddex" + + +# Atomic Grouping +"(?>.*)abc" "abcabcabc" # no match. .* consumed entire string. +"(?>(abc{2,4}?))(c*)" "<0><1>abcc<2>cccddd" +"(\.\d\d(?>[1-9]?))\d+" "1.625" +"(\.\d\d(?>[1-9]?))\d+" "1<0><1>.6250" + +# Possessive *+ +"(abc)*+a" "abcabcabc" +"(abc)*+a" "<0>abc<1>abcab" +"(a*b)*+a" "<0><1>aaaabaaaa" + +# Possessive ?+ +"c?+ddd" "<0>cddd" +"c?+cddd" "cddd" +"c?cddd" "<0>cddd" + +# Back Reference +"(?:ab(..)cd\1)*" "<0>ab23cd23ab<1>wwcdwwabxxcdyy" +"ab(?:c|(d?))(\1)" "<0>ab<1><2>c" +"ab(?:c|(d?))(\1)" "<0>ab<1>d<2>d" +"ab(?:c|(d?))(\1)" "<0>ab<1><2>e" +"ab(?:c|(d?))(\1)" "<0>ab<1><2>" + +# Back References that hit/don't hit end +"(abcd) \1" z "abcd abc" +"(abcd) \1" Z "<0><1>abcd abcd" +"(abcd) \1" Z "<0><1>abcd abcd " + +# Case Insensitive back references that hit/don't hit end. +"(abcd) \1" zi "abcd abc" +"(abcd) \1" Zi "<0><1>abcd ABCD" +"(abcd) \1" Zi "<0><1>abcd ABCD " + +# Back references that hit/don't hit boundary limits. + +"(abcd) \1" z "abcd abcd " +"(abcd) \1" Z "<0><1>abcd abcd " +"(abcd) \1" Z "<0><1>abcd abcd " + +"(abcd) \1" zi "abcd abcd " +"(abcd) \1" Zi "<0><1>abcd abcd " +"(abcd) \1" Zi "<0><1>abcd abcd " + +# Back reference that fails match near the end of input without actually hitting the end. +"(abcd) \1" ZL "abcd abd" +"(abcd) \1" ZLi "abcd abd" + +# Back reference to a zero-length match. They are always a successful match. +"ab(x?)cd(\1)ef" "<0>ab<1>cd<2>ef" +"ab(x?)cd(\1)ef" i "<0>ab<1>cd<2>ef" + +# Back refs to capture groups that didn't participate in the match. +"ab(?:(c)|(d))\1" "abde" +"ab(?:(c)|(d))\1" "<0>ab<1>cce" +"ab(?:(c)|(d))\1" i "abde" +"ab(?:(c)|(d))\1" i "<0>ab<1>cce" + +# Named back references +"(?abcd)\k" "<0><1>abcdabcd" +"(no)?(?abcd)\k" "<0><2>abcdabcd" + +"(?...)" E " " # backref names are ascii letters & numbers only" +"(?<1a>...)" E " " # backref names must begin with a letter" +"(?.)(?.)" E " " # Repeated names are illegal. + + +# Case Insensitive +"aBc" i "<0>ABC" +"a[^bc]d" i "ABD" +'((((((((((a))))))))))\10' i "<0><1><2><3><4><5><6><7><8><9><10>AA" + +"(?:(?i)a)b" "<0>Ab" +"ab(?i)cd" "<0>abCd" +"ab$cd" "abcd" + +"ssl" i "abc<0>ßlxyz" +"ssl" i "abc<0>ẞlxyz" +"FIND" i "can <0>find ?" # fi ligature, \ufb01 +"find" i "can <0>FIND ?" +"ῧ" i "xxx<0>ῧxxx" # Composed char (match string) decomposes when case-folded (pattern) + +# White space handling +"a b" "ab" +"abc " "abc" +"abc " "<0>abc " +"ab[cd e]z" "<0>ab z" +"ab\ c" "<0>ab c " +"ab c" "<0>ab c " +"ab c" x "ab c " +"ab\ c" x "<0>ab c " + +# +# Pattern Flags +# +"(?u)abc" "<0>abc" +"(?-u)abc" "<0>abc" + +# +# \c escapes (Control-whatever) +# +"\cA" "<0>\u0001" +"\ca" "<0>\u0001" +"\c\x" "<0>\u001cx" + + +#Multi-line mode +'b\s^' m "a\nb\n" +"(?m)^abc$" "abc \n abc\n<0>abc\nabc" +"(?m)^abc$" 2 "abc \n abc\nabc\n<0>abc" +"^abc$" 2 "abc \n abc\nabc\nabc" + +# Empty and full range +"[\u0000-\U0010ffff]+" "<0>abc\u0000\uffff\U00010000\U0010ffffzz" +"[^\u0000-\U0010ffff]" "abc\u0000\uffff\U00010000\U0010ffffzz" +"[^a--a]+" "<0>abc\u0000\uffff\U00010000\U0010ffffzz" + +# Free-spacing mode +"a b c # this is a comment" x "<0>abc " +'^a (?#xxx) (?#yyy) {3}c' x "<0>aaac" +"a b c [x y z]" x "abc " +"a b c [x y z]" x "a b c " +"a b c [x y z]" x "<0>abcxyz" +"a b c [x y z]" x "<0>abcyyz" + +# +# Look Behind +# +"(?<=a)b" "a<0>b" +"(.*)(?<=[bc])" "<0><1>abcd" +"(?<=(abc))def" "<1>abc<0>def" # lookbehind precedes main match. +"(?<=ab|abc)xyz" "abwxyz" # ab matches, but not far enough. +"(?<=abc)cde" "abcde" +"(?<=abc|ab)cde" "ab<0>cde" +"(?<=abc|ab)cde" "abc<0>cde" + +"(?<=bc?c?c?)cd" "ab<0>cd" +"(?<=bc?c?c?)cd" "abc<0>cd" +"(?<=bc?c?c?)cd" "abcc<0>cd" +"(?<=bc?c?c?)cd" "abccc<0>cd" +"(?<=bc?c?c?)cd" "abcccccd" +"(?<=bc?c?c?)c+d" "ab<0>cccccd" + +".*(?<=: ?)(\w*)" "<0>1:one 2: two 3:<1>three " + +# +# Named Characters +# +"a\N{LATIN SMALL LETTER B}c" "<0>abc" +"a\N{LATIN SMALL LETTER B}c" i "<0>abc" +"a\N{LATIN SMALL LETTER B}c" i "<0>aBc" +"a\N{LATIN SMALL LETTER B}c" "aBc" + +"\N{FULL STOP}*" "<0>...abc" + +"$" "abc<0>" + +# +# Optimizations of .* at end of patterns +# +"abc.*" "<0>abcdef" +"abc.*$" "<0>abcdef" +"abc(.*)" "<0>abc<1>def" +"abc(.*)" "<0>abc<1>" +"abc.*" "<0>abc\ndef" +"abc.*" s "<0>abc\ndef" +"abc.*$" s "<0>abc\ndef" +"abc.*$" "abc\ndef" +"abc.*$" m "<0>abc\ndef" +"abc.*\Z" m "abc\ndef" +"abc.*\Z" sm "<0>abc\ndef" + +"abc*" "<0>abcccd" +"abc*$" "<0>abccc" +"ab(?:ab[xyz]\s)*" "<0>ababy abx abc" + +"(?:(abc)|a)(?:bc)+" "<0>abc" +"(?:(abc)|a)(?:bc)*" "<0><1>abc" +"^[+\-]?[0-9]*\.?[0-9]*" "<0>123.456" + +"ab.+yz" "<0>abc12345xyzttt" +"ab.+yz" s "<0>abc12345xyzttt" + +"ab.+yz" "abc123\n45xyzttt" +"ab.+yz" s "<0>abc12\n345xyzttt" + +"ab[0-9]+yz" "---abyz+++" +"ab[0-9]+yz" "---<0>ab1yz+++" +"ab[0-9]+yz" "---<0>ab12yz+++" +"ab[0-9]+yz" "---<0>ab123456yz+++" + +"ab([0-9]+|[A-Z]+)yz" "---abyz+++" +"ab([0-9]+|[A-Z]+)yz" "---<0>ab<1>1yz+++" +"ab([0-9]+|[A-Z]+)yz" "---<0>ab<1>12yz+++" +"ab([0-9]+|[A-Z]+)yz" "---<0>ab<1>Ayz+++" +"ab([0-9]+|[A-Z]+)yz" "---<0>ab<1>AByz+++" +"ab([0-9]+|[A-Z]+)yz" "---<0>ab<1>ABCDEyz+++" + +# +# Hex format \x escaping +# +"ab\x63" "<0>abc" +"ab\x09w" "<0>ab\u0009w" +"ab\xabcdc" "<0>ab\u00abcdc" +"ab\x{abcd}c" "<0>ab\uabcdc" +"ab\x{101234}c" "<0>ab\U00101234c" +"abα" "<0>abα" + +# +# Octal Escaping. This conforms to Java conventions, not Perl. +"\0101\00\03\073\0154\01442" "<0>A\u0000\u0003\u003b\u006c\u0064\u0032" +"\0776" "<0>\u003f\u0036" # overflow, the 6 is literal. +"\0376xyz" "<0>\u00fexyz" +"\08" E "<0>\u00008" +"\0" E "x" + +# +# \u Surrogate Pairs +# +"\ud800\udc00" "<0>\U00010000" +"\ud800\udc00*" "<0>\U00010000\U00010000\U00010000\U00010001" +# TODO (Vitess): The next case has invalid UTF-8, so it's not supported right now for testing. It likely works in practice though! +# "\ud800\ud800\udc00" "<0>\ud800\U00010000\U00010000\U00010000\U00010001" +"(\ud800)(\udc00)" "\U00010000" +"\U00010001+" "<0>\U00010001\U00010001\udc01" + +# +# hitEnd with find() +# +"abc" Z "aa<0>abc abcab" +"abc" 2Z "aaabc <0>abcab" +"abc" 3z "aa>abc abcab" + +# +# \ escaping +# +"abc\jkl" "<0>abcjkl" # escape of a non-special letter is just itself. +"abc[ \j]kl" "<0>abcjkl" + +# +# \R all newline sequences. +# +"abc\Rxyz" "<0>abc\u000axyzgh" +"abc\Rxyz" "<0>abc\u000bxyzgh" +"abc\Rxyz" "<0>abc\u000cxyzgh" +"abc\Rxyz" "<0>abc\u000dxyzgh" +"abc\Rxyz" "<0>abc\u0085xyzgh" +"abc\Rxyz" "<0>abc\u2028xyzgh" +"abc\Rxyz" "<0>abc\u2029xyzgh" +"abc\Rxyz" "<0>abc\u000d\u000axyzgh" + +"abc\R\nxyz" "abc\u000d\u000axyzgh" # \R cannot match only the CR from a CR/LF sequence. +"abc\r\nxyz" "<0>abc\u000d\u000axyzgh" + +"abc\Rxyz" "abc\u0009xyz" # Assorted non-matches. +"abc\Rxyz" "abc\u000exyz" +"abc\Rxyz" "abc\u202axyz" + +# \v \V single character new line sequences. + +"abc\vxyz" "<0>abc\u000axyzgh" +"abc\vxyz" "<0>abc\u000bxyzgh" +"abc\vxyz" "<0>abc\u000cxyzgh" +"abc\vxyz" "<0>abc\u000dxyzgh" +"abc\vxyz" "<0>abc\u0085xyzgh" +"abc\vxyz" "<0>abc\u2028xyzgh" +"abc\vxyz" "<0>abc\u2029xyzgh" +"abc\vxyz" "abc\u000d\u000axyzgh" +"abc\vxyz" "abc?xyzgh" + +"abc[\v]xyz" "<0>abc\u000axyzgh" +"abc[\v]xyz" "<0>abc\u000bxyzgh" +"abc[\v]xyz" "<0>abc\u000cxyzgh" +"abc[\v]xyz" "<0>abc\u000dxyzgh" +"abc[\v]xyz" "<0>abc\u0085xyzgh" +"abc[\v]xyz" "<0>abc\u2028xyzgh" +"abc[\v]xyz" "<0>abc\u2029xyzgh" +"abc[\v]xyz" "abc\u000d\u000axyzgh" +"abc[\v]xyz" "abc?xyzgh" + +"abc\Vxyz" "abc\u000axyzgh" +"abc\Vxyz" "abc\u000bxyzgh" +"abc\Vxyz" "abc\u000cxyzgh" +"abc\Vxyz" "abc\u000dxyzgh" +"abc\Vxyz" "abc\u0085xyzgh" +"abc\Vxyz" "abc\u2028xyzgh" +"abc\Vxyz" "abc\u2029xyzgh" +"abc\Vxyz" "abc\u000d\u000axyzgh" +"abc\Vxyz" "<0>abc?xyzgh" + +# \h \H horizontal white space. Defined as gc=space_separator plus ascii tab + +"abc\hxyz" "<0>abc xyzgh" +"abc\Hxyz" "abc xyzgh" +"abc\hxyz" "<0>abc\u2003xyzgh" +"abc\Hxyz" "abc\u2003xyzgh" +"abc\hxyz" "<0>abc\u0009xyzgh" +"abc\Hxyz" "abc\u0009xyzgh" +"abc\hxyz" "abc?xyzgh" +"abc\Hxyz" "<0>abc?xyzgh" + +"abc[\h]xyz" "<0>abc xyzgh" +"abc[\H]xyz" "abc xyzgh" +"abc[\h]xyz" "<0>abc\u2003xyzgh" +"abc[\H]xyz" "abc\u2003xyzgh" +"abc[\h]xyz" "<0>abc\u0009xyzgh" +"abc[\H]xyz" "abc\u0009xyzgh" +"abc[\h]xyz" "abc?xyzgh" +"abc[\H]xyz" "<0>abc?xyzgh" + + +# +# Bug xxxx +# +"(?:\-|(\-?\d+\d\d\d))?(?:\-|\-(\d\d))?(?:\-|\-(\d\d))?(T)?(?:(\d\d):(\d\d):(\d\d)(\.\d+)?)?(?:(?:((?:\+|\-)\d\d):(\d\d))|(Z))?" MG "<0>-1234-21-31T41:51:61.789+71:81" + + +# +# A random, complex, meaningless pattern that should at least compile +# +"(?![^\\G)(?![^|\]\070\ne\{\t\[\053\?\\\x51\a\075\0023-\[&&[|\022-\xEA\00-\u41C2&&[^|a-\xCC&&[^\037\uECB3\u3D9A\x31\|\[^\016\r\{\,\uA29D\034\02[\02-\[|\t\056\uF599\x62\e\<\032\uF0AC\0026\0205Q\|\\\06\0164[|\057-\u7A98&&[\061-g|\|\0276\n\042\011\e\xE8\x64B\04\u6D0EDW^\p{Lower}]]]]?)(?<=[^\n\\\t\u8E13\,\0114\u656E\xA5\]&&[\03-\026|\uF39D\01\{i\u3BC2\u14FE]])(?<=[^|\uAE62\054H\|\}&&^\p{Space}])(?sxx)(?<=[\f\006\a\r\xB4]{1,5})|(?x-xd:^{5}+)()" "<0>abc" + + +# +# Bug 3225 + +"1|9" "<0>1" +"1|9" "<0>9" +"1*|9" "<0>1" +"1*|9" "<0>9" + +"(?:a|ac)d" "<0>acd" +"a|ac" "<0>ac" + +# +# Bug 3320 +# +"(a([^ ]+)){0,} (c)" "<0><1>a<2>b <3>c " +"(a([^ ]+))* (c)" "<0><1>a<2>b <3>c " + +# +# Bug 3436 +# +"(.*?) *$" "<0><1>test " + +# +# Bug 4034 +# +"\D" "<0>ABC\u00ffDEF" +"\d" "ABC\u00ffDEF" +"\D" "<0>\u00ffDEF" +"\d" "\u00ffDEF" +"\D" "123<0>\u00ffDEF" +"\D" "<0>\u0100DEF" +"\D" "123<0>\u0100DEF" + +# +#bug 4024, new line sequence handling +# +"(?m)^" "<0>AA\u000d\u000aBB\u000d\u000aCC\u000d\u000a" +"(?m)^" 2 "AA\u000d\u000a<0>BB\u000d\u000aCC\u000d\u000a" +"(?m)^" 3 "AA\u000d\u000aBB\u000d\u000a<0>CC\u000d\u000a" +"(?m)^" 4 "AA\u000d\u000aBB\u000d\u000aCC\u000d\u000a" + +"(?m)$" "AA<0>\u000d\u000aBB\u000d\u000aCC\u000d\u000a" +"(?m)$" 2 "AA\u000d\u000aBB<0>\u000d\u000aCC\u000d\u000a" +"(?m)$" 3 "AA\u000d\u000aBB\u000d\u000aCC<0>\u000d\u000a" +"(?m)$" 4 "AA\u000d\u000aBB\u000d\u000aCC\u000d\u000a<0>" +"(?m)$" 5 "AA\u000d\u000aBB\u000d\u000aCC\u000d\u000a" + +"$" "AA\u000d\u000aBB\u000d\u000aCC<0>\u000d\u000a" +"$" 2 "AA\u000d\u000aBB\u000d\u000aCC\u000d\u000a<0>" +"$" 3 "AA\u000d\u000aBB\u000d\u000aCC\u000d\u000a" + +"$" "\u000a\u0000a<0>\u000a" +"$" 2 "\u000a\u0000a\u000a<0>" +"$" 3 "\u000a\u0000a\u000a" + +"$" "<0>" +"$" 2 "" + +"$" "<0>\u000a" +"$" 2 "\u000a<0>" +"$" 3 "\u000a" + +"^" "<0>" +"^" 2 "" + +"\Z" "<0>" +"\Z" 2 "" +"\Z" 2 "\u000a<0>" +"\Z" "<0>\u000d\u000a" +"\Z" 2 "\u000d\u000a<0>" + + +# No matching ^ at interior new-lines if not in multi-line mode. +"^" "<0>AA\u000d\u000aBB\u000d\u000aCC\u000d\u000a" +"^" 2 "AA\u000d\u000aBB\u000d\u000aCC\u000d\u000a" + +# +# Dot-matches-any mode, and stopping at new-lines if off. +# +"." "<0>123\u000aXYZ" +"." 2 "1<0>23\u000aXYZ" +"." 3 "12<0>3\u000aXYZ" +"." 4 "123\u000a<0>XYZ" # . doesn't match newlines +"." 4 "123\u000b<0>XYZ" +"." 4 "123\u000c<0>XYZ" +"." 4 "123\u000d<0>XYZ" +"." 4 "123\u000d\u000a<0>XYZ" +"." 4 "123\u0085<0>XYZ" +"." 4 "123\u2028<0>XYZ" +"." 4 "123\u2029<0>XYZ" +"." 4s "123<0>\u000aXYZ" # . matches any +"." 4s "123<0>\u000bXYZ" +"." 4s "123<0>\u000cXYZ" +"." 4s "123<0>\u000dXYZ" +"." 4s "123<0>\u000d\u000aXYZ" +"." 4s "123<0>\u0085XYZ" +"." 4s "123<0>\u2028XYZ" +"." 4s "123<0>\u2029XYZ" +".{6}" "123\u000a\u000dXYZ" +".{6}" s "<0>123\u000a\u000dXY" + + +# +# Ranges +# +".*" "abc<0>defghi" +"a" "aaa<0>aaaaaa" +"a" 2 "aaaa<0>aaaaa" +"a" 3 "aaaaa<0>aaaa" +"a" 4 "aaaaaaaaa" +"a" "aaa<0>aaaaaa" + +# +# [set] parsing, systematically run through all of the parser states. +# +# +"[def]+" "abc<0>ddeeffghi" # set-open +"[^def]+" "<0>abcdefghi" +"[:digit:]+" "abc<0>123def" +"[:^digit:]+" "<0>abc123def" +"[\u005edef]+" "abc<0>de^fghi" + +"[]]+" "abc<0>]]][def" # set-open2 +"[^]]+" "<0>abc]]][def" + +"[:Lu:]+" "abc<0>ABCdef" # set-posix +"[:Lu]+" "abc<0>uL::Lu" +"[:^Lu]+" "abc<0>uL:^:Lu" +"[:]+" "abc<0>:::def" +"[:whats this:]" E " " +"[--]+" dE "-------" + +"[[nested]]+" "xyz[<0>nnetsteed]abc" #set-start +"[\x{41}]+" "CB<0>AAZYX" +"[\[\]\\]+" "&*<0>[]\\..." +"[*({<]+" "^&<0>{{(<<*)))" + + +"[-def]+" "abc<0>def-ef-dxyz" # set-start-dash +"[abc[--def]]" E " " + +"[x[&def]]+" "abc<0>def&ghi" # set-start-amp +"[&& is bad at start]" E " " + +"[abc" E " " # set-after-lit +"[def]]" "abcdef" +"[def]]" "abcde<0>f]]" + +"[[def][ghi]]+" "abc]<0>defghi[xyz" # set-after-set +"[[def]ghi]+" "abc]<0>defghi[xyz" +"[[[[[[[[[[[abc]" E " " +"[[abc]\p{Lu}]+" "def<0>abcABCxyz" + +"[d-f]+" "abc<0>defghi" # set-after-range +"[d-f[x-z]]+" "abc<0>defxyzzzgw" +"[\s\d]+" "abc<0> 123def" +"[d-f\d]+" "abc<0>def123ghi" +"[d-fr-t]+" "abc<0>defrstuvw" + +"[abc--]" E " " # set-after-op +"[[def]&&]" E " " +"[-abcd---]+" "<0>abc--" #[-abcd]--[-] +"[&abcd&&&ac]+" "b<0>ac&&cad" #[&abcd]&&[&ac] + +"[[abcd]&[ac]]+" "b<0>acacd" # set-set-amp +"[[abcd]&&[ac]]+" "b<0>acacd" +"[[abcd]&&ac]+" "b<0>acacd" +"[[abcd]&ac]+" "<0>bacacd&&&" + +"[abcd&[ac]]+" "<0>bacacd&&&" #set-lit-amp +"[abcd&&[ac]]+" "b<0>acacd" +"[abcd&&ac]+" "b<0>acacd" + +"[[abcd]-[ac]]+" "a<0>bdbdc" # set-set-dash +"[[abcd]--[ac]]+" "a<0>bdbdc" +"[[abcd]--ac]+" "a<0>bdbdc" +"[[abcd]-ac]+" "<0>bacacd---" + +"[a-d--[b-c]]+" "b<0>adadc" # set-range-dash +"[a-d--b-c]+" "b<0>adadc" +"[a-d-[b-c]]+" "<0>bad-adc" +"[a-d-b-c]+" "<0>bad-adc" +"[\w--[b-c]]+" "b<0>adadc" +"[\w--b-c]+" "b<0>adadc" +"[\w-[b-c]]+" "<0>bad-adc" +"[\w-b-c]+" "<0>bad-adc" + +"[a-d&&[b-c]]+" "a<0>bcbcd" # set-range-amp +"[a-d&&b-c]+" "a<0>bcbcd" +"[a-d&[b-c]]+" "<0>abc&bcd" +"[a-d&b-c]+" "<0>abc&bcd" + +"[abcd--bc]+" "b<0>addac" # set-lit-dash +"[abcd--[bc]]+" "b<0>addac" +"[abcd-[bc]]+" "<0>bad--dacxyz" +"[abcd-]+" "<0>bad--dacxyz" + +"[abcd-\s]+" E "xyz<0>abcd --xyz" # set-lit-dash-esc +"[abcd-\N{LATIN SMALL LETTER G}]+" "xyz-<0>abcdefghij-" +"[bcd-\{]+" "a<0>bcdefyz{|}" + +"[\p{Ll}]+" "ABC<0>abc^&*&" # set-escape +"[\P{Ll}]+" "abc<0>ABC^&*&xyz" +"[\N{LATIN SMALL LETTER Q}]+" "mnop<0>qqqrst" +"[\sa]+" "cb<0>a a (*&" +"[\S]+" " <0>hello " +"[\w]+" " <0>hello_world! " +"[\W]+" "a<0> *$%#,hello " +"[\d]+" "abc<0>123def" +"[\D]+" "123<0>abc567" +"[\$\#]+" "123<0>$#$#\\" + +# +# Try each of the Java compatibility properties. +# These are checked here, while normal Unicode properties aren't, because +# these Java compatibility properties are implemented directly by regexp, while other +# properties are handled by ICU's Property and UnicodeSet APIs. +# +# These tests are only to verify that the names are recognized and the +# implementation isn't dead. They are not intended to verify that the +# function definitions are 100% correct. +# +"[:InBasic Latin:]+" "ΓΔΕΖΗΘ<0>hello, world.ニヌネノハバパ" +"[:^InBasic Latin:]+" "<0>ΓΔΕΖΗΘhello, world.ニヌネノハバパ" +"\p{InBasicLatin}+" "ΓΔΕΖΗΘ<0>hello, world.ニヌネノハバパ" +"\P{InBasicLatin}+" "<0>ΓΔΕΖΗΘhello, world.ニヌネノハバパ" +"\p{InGreek}+" "<0>ΓΔΕΖΗΘhello, world.ニヌネノハバパ" +"\p{InCombining Marks for Symbols}" "<0>\u20d0" +"\p{Incombiningmarksforsymbols}" "<0>\u20d0" + + +"\p{javaDefined}+" "\uffff<0>abcd\U00045678" +"\p{javaDigit}+" "abc<0>1234xyz" +"\p{javaIdentifierIgnorable}+" "abc<0>\u0000\u000e\u009fxyz" +"\p{javaISOControl}+" "abc<0>\u0000\u000d\u0083xyz" +"\p{javaJavaIdentifierPart}+" "#@!<0>abc123_$;" +"\p{javaJavaIdentifierStart}+" "123\u0301<0>abc$_%^&" +"\p{javaLetter}+" "123<0>abcDEF&*()(" +"\p{javaLetterOrDigit}+" "$%^&*<0>123abcகஙசஜஞ☺♘♚☔☎♬⚄⚡" +"\p{javaLowerCase}+" "ABC<0>def&^%#:=" +"\p{javaMirrored}+" "ab$%<0>(){}[]xyz" +"\p{javaSpaceChar}+" "abc<0> \u00a0\u2028!@#" +"\p{javaSupplementaryCodePoint}+" "abc\uffff<0>\U00010000\U0010ffff\u0000" +"\p{javaTitleCase}+" "abCE<0>Džῌᾨ123" +"\p{javaUnicodeIdentifierStart}+" "123<0>abcⅣ%^&&*" +"\p{javaUnicodeIdentifierPart}+" "%&&^<0>abc123\u0301\u0002..." +"\p{javaUpperCase}+" "abc<0>ABC123" +"\p{javaValidCodePoint}+" "<0>\u0000abc\ud800 unpaired \udfff |\U0010ffff" +"\p{javaWhitespace}+" "abc\u00a0\u2007\u202f<0> \u0009\u001c\u001f\u202842" +"\p{all}+" "<0>123\u0000\U0010ffff" +"\P{all}+" "123\u0000\U0010ffff" + +# [:word:] is implemented directly by regexp. Not a java compat property, but PCRE and others. + +"[:word:]+" ".??$<0>abc123ΓΔΕΖΗ_%%%" +"\P{WORD}+" "<0>.??$abc123ΓΔΕΖΗ_%%%" + +# +# Errors on unrecognized ASCII letter escape sequences. +# +"[abc\Y]+" "<0>abcY" +"[abc\Y]+" eE "<0>abcY" + +"(?:a|b|c|\Y)+" "<0>abcY" +"(?:a|b|c|\Y)+" eE "<0>abcY" + +"\Q\Y\E" e "<0>\\Y" + +# +# Reported problem +# +"[a-\w]" E "x" + +# +# Bug 4045 +# +"A*" "<0>AAAA" +"A*" 2 "AAAA<0>" +"A*" 3 "AAAA" +"A*" 4 "AAAA" +"A*" 5 "AAAA" +"A*" 6 "AAAA" +"A*" "<0>" +"A*" 2 "" +"A*" 3 "" +"A*" 4 "" +"A*" 5 "" + +# +# Bug 4046 +# +"(?m)^" "<0>AA\u000dBB\u000dCC\u000d" +"(?m)^" 2 "AA\u000d<0>BB\u000dCC\u000d" +"(?m)^" 3 "AA\u000dBB\u000d<0>CC\u000d" +"(?m)^" 4 "AA\u000dBB\u000dCC\u000d" +"(?m)^" 5 "AA\u000dBB\u000dCC\u000d" +"(?m)^" 6 "AA\u000dBB\u000dCC\u000d" + +"(?m)^" "<0>AA\u000d\u000aBB\u000d\u000aCC\u000d\u000a" +"(?m)^" 2 "AA\u000d\u000a<0>BB\u000d\u000aCC\u000d\u000a" +"(?m)^" 3 "AA\u000d\u000aBB\u000d\u000a<0>CC\u000d\u000a" +"(?m)^" 4 "AA\u000d\u000aBB\u000d\u000aCC\u000d\u000a" + +# +# Bug 4059 +# +"\w+" "<0>イチロー" +"\b....\b." "<0>イチロー?" + + +# +# Bug 4058 ICU Unicode Set patterns have an odd feature - +# A $ as the last character before the close bracket means match +# a \uffff, which means off the end of the string in transliterators. +# Didn't make sense for regular expressions, and is now fixed. +# +"[\$](P|C|D);" "<0>$<1>P;" +"[$](P|C|D);" "<0>$<1>P;" +"[$$](P|C|D);" "<0>$<1>P;" + +# +# bug 4888 Flag settings lost in some cases. +# +"((a){2})|(#)" is "no" +"((a){2})|(#)" is "<0><1>a<2>a#" +"((a){2})|(#)" is "a<0><3>#" + +"((a|b){2})|c" is "<0>c" +"((a|b){2})|c" is "<0>C" +"((a|b){2})|c" s "C" + +# +# bug 5617 ZWJ \u200d shouldn't cause word boundaries +# +".+?\b" "<0> \u0935\u0915\u094D\u200D\u0924\u0947 " +".+?\b" 2 " <0>\u0935\u0915\u094D\u200D\u0924\u0947 " +".+?\b" 3 " \u0935\u0915\u094D\u200D\u0924\u0947 " + +# +# bug 5386 "^.*$" should match empty input +# +"^.*$" "<0>" +"^.*$" m "<0>" +"^.*$" "<0>\n" +"(?s)^.*$" "<0>\n" + +# +# bug 5386 Empty pattern and empty input should match. +# +"" "<0>abc" +"" "<0>" + +# +# bug 5386 Range upper and lower bounds can be equal +# +"[a-a]" "<0>a" + +# +# bug 5386 $* should not fail, should match empty string. +# +"$*" "<0>abc" + +# +# bug 5386 \Q ... \E escaping problem +# +"[a-z\Q-$\E]+" "QE<0>abc-def$." + +# More reported 5386 Java comaptibility failures +# +"[^]*abb]*" "<0>kkkk" +"\xa" "huh" # Java would like to be warned. +"^.*$" "<0>" + +# +# bug 5386 Empty left alternation should produce a zero length match. +# +"|a" "<0>a" +"$|ab" "<0>ab" +"$|ba" "ab<0>" + +# +# bug 5386 Java compatibility for set expressions +# +"[a-z&&[cde]]+" "ab<0>cdefg" + +# +# bug 6019 matches() needs to backtrack and check for a longer match if the +# first match(es) found don't match the entire input. +# +"a?|b" "<0>b" +"a?|b" M "<0>b" +"a?|.*?u|stuff|d" M "<0>stuff" +"a?|.*?(u)|stuff|d" M "<0>stuff<1>u" +"a+?" "<0>aaaaaaaaaaaaa" +"a+?" M "<0>aaaaaaaaaaaaa" + +# +# Bug 7724. Expression to validate zip codes. +# +"(?!0{5})(\d{5})(?!-?0{4})(-?\d{4})?" "<0><1>94040<2>-3344" +"(?!0{5})(\d{5})(?!-?0{4})(-?\d{4})?" "94040-0000" +"(?!0{5})(\d{5})(?!-?0{4})(-?\d{4})?" "00000-3344" + +# +# Bug 8666. Assertion failure on match, bad operand to JMP_SAV_X opcode. +# +"((.??)+|A)*" "<0><1><2>AAAAABBBBBCCCCCDDDDEEEEE" + +# +# Bug 8826. Incorrect results with case insensitive matches. +# +"AS(X)" i "aßx" +"AS.*" i "aßx" # Expansion of sharp s can't split between pattern terms. +"ASßS" i "<0>aßß" # All one literal string, does match. +"ASß{1}S" i "aßß" # Pattern with terms, no match. +"aßx" i "<0>assx" +"aßx" i "<0>ASSX" +"aßx" i "<0>aßx" +"ASS(.)" i "<0>aß<1>x" + +# Case Insensitive, probe some corner cases. +"ass+" i "aß" # Second 's' in pattern is qualified, can't combine with first. +"as+" i "aß" +"aßs" i "as" # Can't match half of a ß +"aß+" i "<0>asssssssss" +"aß+" i "<0>assßSssSSSs" +"a(ß?)+" i "<0>assssssss<1>s" +"a(ß?)+" i "<0>a<1>zzzzzzzzs" + +"\U00010400" i "<0>\U00010428" # case folded supplemental code point. + +"sstuff" i "<0>ßtuff" # exercise optimizations on what chars can start a match. +"sstuff" i "s<0>ßtuff" # exercise optimizations on what chars can start a match. +"ßtuff" i "s<0>sstuff" +"ßtuff" i "s<0>Sstuff" + +"a(..)\1" i "<0>A<1>bcBCdef" +"(ß)\1" i "aa<0><1>ssßzz" # Case insensitive back reference +"..(.)\1" i "<0>aa<1>ßss" +"ab(..)\1" i "xx<0>ab<1>ssßss" + +" (ss) ((\1.*)|(.*))" i "<0> <1>ss <2><4>sß" # The back reference 'ss' must not match in 'sß' + +# Bug 9057 +# \u200c and \u200d should be word characters. +# +"\w+" " <0>abc\u200cdef\u200dghi " +"\w+" i " <0>abc\u200cdef\u200dghi " +"[\w]+" " <0>abc\u200cdef\u200dghi " +"[\w]+" i " <0>abc\u200cdef\u200dghi " + +# Bug 9283 +# uregex_open fails for look-behind assertion + case-insensitive + +"(ab)?(?<=ab)cd|ef" i "<0><1>abcd" + +# Bug 9719 Loop breaking on (zero length match){3,} (unlimited upper bound). +# + +"(?:abc){1,}abc" "<0>abcabcabcabcabc" +"(?:2*){2,}?a2\z" "<0>2a2" +"(?:2*){2,}?a2\z" "2a3" +"(?:x?+){3,}+yz" "w<0>yz" +"(2*){2,}?a2\\z" "2a3" +"(2*){2,}?a2\\z" "<0>2<1>a2\\z" +"(2*){2,}?a2\z" "<0>2<1>a2" + + +# Bug 10024 +# Incorrect (unbounded) longest match length with {1, 20} style quantifiers. +# Unbounded match is disallowed in look-behind expressions. +# Max match length is used to limit where to check for look-behind matches. + +"(?<=a{1,5})bc" "aaaa<0>bcdef" +"(?<=(?:aa){3,20})bc" "aaaaaa<0>bcdef" +"(?jkl" +"(?<=a{11})bc" "aaaaaaaaaaa<0>bc" +"(?<=a{11})bc" "aaaaaaaaaabc" +"(?<=a{1,})bc" E "aaaa<0>bcdef" # U_REGEX_LOOK_BEHIND_LIMIT error. +"(?<=(?:){11})bc" "<0>bc" # Empty (?:) expression. + +# Bug 10835 +# Match Start Set not being correctly computed for case insensitive patterns. +# (Test here is to dump the compiled pattern & manually check the start set.) + +"(private|secret|confidential|classified|restricted)" i "hmm, <0><1>Classified stuff" +"(private|secret|confidential|classified|restricted)" "hmm, Classified stuff" + +# Bug 10844 + +"^([\w\d:]+)$" "<0><1>DiesIst1Beispiel:text" +"^([\w\d:]+)$" i "<0><1>DiesIst1Beispiel:text" +"^(\w+\d\w+:\w+)$" "<0><1>DiesIst1Beispiel:text" +"^(\w+\d\w+:\w+)$" i "<0><1>DiesIst1Beispiel:text" + +# Bug 11049 +# Edge cases in find() when pattern match begins with set of code points +# and the match begins at the end of the string. + +"A|B|C" "hello <0>A" +"A|B|C" "hello \U00011234" +"A|B|\U00012345" "hello <0>\U00012345" +"A|B|\U00010000" "hello \ud800" + +# Bug 11369 +# Incorrect optimization of patterns with a zero length quantifier {0} + +"(.|b)(|b){0}\$(?#xxx){3}(?>\D*)" "AAAAABBBBBCCCCCDDDDEEEEE" +"(|b)ab(c)" "<0><1>ab<2>c" +"(|b){0}a{3}(D*)" "<0>aaa<2>" +"(|b){0,1}a{3}(D*)" "<0><1>aaa<2>" +"((|b){0})a{3}(D*)" "<0><1>aaa<3>" + +# Bug 11370 +# Max match length computation of look-behind expression gives result that is too big to fit in the +# in the 24 bit operand portion of the compiled code. Expressions should fail to compile +# (Look-behind match length must be bounded. This case is treated as unbounded, an error.) + +"(?pre<1>\ud800post\ud800 fin" +"pre(.)post\1" i "pre\ud800post\ud800\udc00" # case insensiteve backrefs take a different code path +"pre(.)post\1" i "<0>pre<1>\ud800post\ud800 fin" + +# Bug 11554 +# +# Maximum match length computation was assuming UTF-16. +# Used in look-behind matches to constrain how far back to look. + +"(?<=a\x{100000})spam" "***a\x{100000}<0>spam**" +"(?<=aą)spam" "**aą<0>spam**" +"(?<=ąabc)spam" "**ąabc<0>spam**" + +"(?<=a\x{100000})spam" "***a\x{100001}spam**" +"(?<=aą)spam" "**bąspam**" +"(?<=ąabc)spam" "**ąabxspam**" + +# with negative look-behind + +"(?spam**" +"(?spam**" +"(?spam**" + +# Bug #12930 +# +# Minimum Match Length computation, int32_t overflow on an empty set in the pattern. +# The empty set, with no match possible, has a min match length of INT32_MAX. +# Was incremented subsequently. Caused assertion failure on pattern compile. + +"[^\u0000-\U0010ffff]bc?" "bc no match" +"[^\u0000-\U0010ffff]?bc?" "<0>bc has a match" + +# Bug #12160 Hit End behavior after find fails to find. +# To match Java, should be true if find fails to find. +# +"abc" Z "<0>abc abc abc xyz" +"abc" Z2 "abc <0>abc abc xyz" +"abc" Z3 "abc abc <0>abc xyz" +"abc" z4 "abc abc abc xyz" + +# Bug #13844 Verify that non-standard Java property names are recognized. +"[\p{IsAlphabetic}]" " <0>A" +"[\P{IsAlphabetic}]" "A<0> " +"[\p{IsIdeographic}]" "A<0>〆" +"[\P{IsIdeographic}]" "〆<0>A" +"[\p{IsLetter}]" " <0>A" +"[\P{IsLetter}]" "A<0> " +"[\p{Letter}]" " <0>A" +"[\p{IsLowercase}]" "A<0>a" +"[\P{IsLowercase}]" "a<0>A" +"[\p{IsUppercase}]" "a<0>A" +"[\P{IsUppercase}]" "A<0>a" +"[\p{IsTitlecase}]" "D<0>Dz" +"[\P{IsTitlecase}]" "Dz<0>D" +"[\p{IsPunctuation}]" " <0>&" +"[\P{IsPunctuation}]" "&<0> " +"[\p{IsControl}]" " <0>\x{82}" +"[\P{IsControl}]" "\x{82}<0> " +"[\p{IsWhite_Space}]" "x<0> " +"[\P{IsWhite_Space}]" " <0>x" +"[\p{IsDigit}]" " <0>4" +"[\P{IsDigit}]" "4<0> " +"[\p{IsHex_Digit}]" " <0>F" +"[\P{IsHex_Digit}]" "F<0> " +"[\p{IsJoin_Control}]" " <0>\x{200d}" +"[\P{IsJoin_Control}]" "\x{200d}<0> " +"[\p{IsNoncharacter_Code_Point}]" "A<0>\x{5fffe}" +"[\p{IsAssigned}]" "\x{10ffff}<0>a" +"[\P{IsAssigned}]" "a<0>\x{10ffff}" + +"[\p{InBasic Latin}]" "〆<0>A" +"[\p{InBasicLatin}]" "〆<0>A" +"[\p{InBasic-Latin}]" "〆<0>A" # ICU accepts '-'; Java does not. +"[\p{InBasic_Latin}]" "〆<0>A" +"[\p{Inbasiclatin}]" "〆<0>A" +"[\p{inbasiclatin}]" E "〆<0>A" # "In" must be cased as shown. Property name part is case insensitive. +"[\p{InCombining_Marks_for_Symbols}]" "a<0>\x{20DD}" # COMBINING ENCLOSING CIRCLE + +"[\p{all}]*" "<0>\x{00}abc\x{10ffff}" +"[\p{javaBadProperty}]" E "whatever" +"[\p{IsBadProperty}]" E "whatever" +"[\p{InBadBlock}]" E "whatever" +"[\p{In}]" E "whatever" +"[\p{Is}]" E "whatever" +"[\p{java}]" "x<0>ꦉ" # Note: "java" is a valid script code. + +"[\p{javaLowerCase}]+" "A<0>a" +"[\p{javaLowerCase}]+" i "<0>Aa" +"[\P{javaLowerCase}]+" "<0>Aa" +"[\P{javaLowerCase}]+" i "Aa" # No Match because case fold of the set happens first, then negation. + # JDK is not case insensitive w named properties, even though + # the insensitive match flag is set. A JDK bug? + +"[a-z]+" i "<0>Aa" # Matches JDK behavior. +"[^a-z]+" i "Aa" # (no match) which is JDK behavior. Case fold first, then negation. + +# Bug 20385. Assertion failure while compiling a negative look-behind expression consisting of a set with +# no contents. Meaning the [set] can never match. There is no syntax to directly express +# an empty set, so generate it by negating (^) a set of all code points. +# Also check empty sets in other contexts. + +"(?abc" + +"(?abc" +"x(?xabc" +"x(?xabc" +"x(?xabc" + +"[^\u0000-\U0010ffff]" "a" +"[^[^\u0000-\U0010ffff]]" "<0>a" + +"This is a string with (?:one |two |three )endings" "<0>This is a string with two endings" + +# Bug ICU-20544. Similar to 20385, above. Assertion failure with a negative look-behind assertion containing +# a set with no contents. Look-behind pattern includes more than just the empty set. + +"(?abc" # note: first 'ⰿ' is \u2c3f, hence empty set. +"(?abc" +"(?<=[^[^]]†)" "abc" # Problem also exists w positive look-behind + +# Bug ICU-20391. Crash in computation of minimum match length with nested look-around patterns. +# +"(?<=(?<=((?=)){0}+)" E "aaa" +"(?<=(?<=((?=)){0}+))" "<0>" +"(?<=c(?<=b((?=a)){1}+))" "aaa" +"abc(?=de(?=f))...g" "<0>abcdefg" +"abc(?=de(?=f))...g" "abcdxfg" + +# Bug ICU-20618 Assertion failure with nested look-around expressions. +# +"(?<=(?<=b?(?=a)))" "hello, world." + +# Bug ICU-20939 +# Incorrect word \b boundaries w UTF-8 input and non-ASCII text +# +"(?w)\b" v2 "äää<0> äää" + +# Bug ICU-21492 Assertion failure with nested look-around expressions. +# +"(?<=(?:(?<=(?:(?<=(?:(?<=)){2})){3})){4}" E "<0>" # orig failure from bug report, w mismatched parens. +"(?:(?<=(?:(?<=)){2}))" "<0>" # Simplified case, with a valid pattern. + +# Random debugging, Temporary +# + +# +# Regexps from http://www.regexlib.com +# +"^[a-zA-Z]{1,2}[0-9][0-9A-Za-z]{0,1} {0,1}[0-9][A-Za-z]{2}$" G "<0>G1 1AA" +"^[a-zA-Z]{1,2}[0-9][0-9A-Za-z]{0,1} {0,1}[0-9][A-Za-z]{2}$" G "<0>EH10 2QQ" +"^[a-zA-Z]{1,2}[0-9][0-9A-Za-z]{0,1} {0,1}[0-9][A-Za-z]{2}$" G "<0>SW1 1ZZ" +"^[a-zA-Z]{1,2}[0-9][0-9A-Za-z]{0,1} {0,1}[0-9][A-Za-z]{2}$" "G111 1AA" +"^[a-zA-Z]{1,2}[0-9][0-9A-Za-z]{0,1} {0,1}[0-9][A-Za-z]{2}$" "X10 WW" +"^[a-zA-Z]{1,2}[0-9][0-9A-Za-z]{0,1} {0,1}[0-9][A-Za-z]{2}$" "DDD 5WW" +#"^[\w\-]+(?:\.[\w\-]+)*@(?:[\w\-]+\.)+[a-zA-Z]{2,7}$" dG "<0>joe.tillis@unit.army.mil" # TODO: \w in pattern +#"^[\w-]+(?:\.[\w-]+)*@(?:[\w-]+\.)+[a-zA-Z]{2,7}$" G "<0>jack_rabbit@slims.com" # TODO: \w in pattern +#"^[\w-]+(?:\.[\w-]+)*@(?:[\w-]+\.)+[a-zA-Z]{2,7}$" G "<0>foo99@foo.co.uk" # TODO: \w in pattern +#"^[\w-]+(?:\.[\w-]+)*@(?:[\w-]+\.)+[a-zA-Z]{2,7}$" "find_the_mistake.@foo.org" # TODO: \w in pattern +#"^[\w-]+(?:\.[\w-]+)*@(?:[\w-]+\.)+[a-zA-Z]{2,7}$" ".prefix.@some.net" +"^([a-zA-Z0-9_\-\.]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?)$" G "<0>asmith@mactec.com" +"^([a-zA-Z0-9_\-\.]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?)$" G "<0>foo12@foo.edu" +"^([a-zA-Z0-9_\-\.]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?)$" G "<0>bob.smith@foo.tv" +"^([a-zA-Z0-9_\-\.]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?)$" "joe" +"^([a-zA-Z0-9_\-\.]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?)$" "@foo.com" +"^([a-zA-Z0-9_\-\.]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?)$" "a@a" +"^\d{1,2}\/\d{1,2}\/\d{4}$" G "<0>4/1/2001" +"^\d{1,2}\/\d{1,2}\/\d{4}$" G "<0>12/12/2001" +"^\d{1,2}\/\d{1,2}\/\d{4}$" G "<0>55/5/3434" +"^\d{1,2}\/\d{1,2}\/\d{4}$" "1/1/01" +"^\d{1,2}\/\d{1,2}\/\d{4}$" "12 Jan 01" +"^\d{1,2}\/\d{1,2}\/\d{4}$" "1-1-2001" +"^(?:(?:(?:0?[13578]|1[02])(\/|-|\.)31)\1|(?:(?:0?[1,3-9]|1[0-2])(\/|-|\.)(?:29|30)\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:0?2(\/|-|\.)29\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:(?:0?[1-9])|(?:1[0-2]))(\/|-|\.)(?:0?[1-9]|1\d|2[0-8])\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$" G "<0>01.1.02" +"^(?:(?:(?:0?[13578]|1[02])(\/|-|\.)31)\1|(?:(?:0?[1,3-9]|1[0-2])(\/|-|\.)(?:29|30)\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:0?2(\/|-|\.)29\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:(?:0?[1-9])|(?:1[0-2]))(\/|-|\.)(?:0?[1-9]|1\d|2[0-8])\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$" G "<0>11-30-2001" +"^(?:(?:(?:0?[13578]|1[02])(\/|-|\.)31)\1|(?:(?:0?[1,3-9]|1[0-2])(\/|-|\.)(?:29|30)\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:0?2(\/|-|\.)29\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:(?:0?[1-9])|(?:1[0-2]))(\/|-|\.)(?:0?[1-9]|1\d|2[0-8])\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$" G "<0>2/29/2000" +"^(?:(?:(?:0?[13578]|1[02])(\/|-|\.)31)\1|(?:(?:0?[1,3-9]|1[0-2])(\/|-|\.)(?:29|30)\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:0?2(\/|-|\.)29\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:(?:0?[1-9])|(?:1[0-2]))(\/|-|\.)(?:0?[1-9]|1\d|2[0-8])\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$" "02/29/01" +"^(?:(?:(?:0?[13578]|1[02])(\/|-|\.)31)\1|(?:(?:0?[1,3-9]|1[0-2])(\/|-|\.)(?:29|30)\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:0?2(\/|-|\.)29\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:(?:0?[1-9])|(?:1[0-2]))(\/|-|\.)(?:0?[1-9]|1\d|2[0-8])\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$" "13/01/2002" +"^(?:(?:(?:0?[13578]|1[02])(\/|-|\.)31)\1|(?:(?:0?[1,3-9]|1[0-2])(\/|-|\.)(?:29|30)\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:0?2(\/|-|\.)29\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:(?:0?[1-9])|(?:1[0-2]))(\/|-|\.)(?:0?[1-9]|1\d|2[0-8])\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$" "11/00/02" +"^(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])$" G "<0>127.0.0.1" +"^(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])$" G "<0>255.255.255.0" +"^(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])$" G "<0>192.168.0.1" +"^(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])$" "1200.5.4.3" +"^(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])$" "abc.def.ghi.jkl" +"^(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])$" "255.foo.bar.1" +"(AUX|PRN|NUL|COM\d|LPT\d)+\s*$" G "<0>COM1" +"(AUX|PRN|NUL|COM\d|LPT\d)+\s*$" G "<0>AUX" +"(AUX|PRN|NUL|COM\d|LPT\d)+\s*$" G "<0>LPT1" +"(AUX|PRN|NUL|COM\d|LPT\d)+\s*$" "image.jpg" +"(AUX|PRN|NUL|COM\d|LPT\d)+\s*$" "index.html" +"(AUX|PRN|NUL|COM\d|LPT\d)+\s*$" "readme.txt" +"^(?:(?:31(\/|-|\.)(?:0?[13578]|1[02]))\1|(?:(?:29|30)(\/|-|\.)(?:0?[1,3-9]|1[0-2])\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:29(\/|-|\.)0?2\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:0?[1-9]|1\d|2[0-8])(\/|-|\.)(?:(?:0?[1-9])|(?:1[0-2]))\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$" G "<0>29/02/1972" +"^(?:(?:31(\/|-|\.)(?:0?[13578]|1[02]))\1|(?:(?:29|30)(\/|-|\.)(?:0?[1,3-9]|1[0-2])\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:29(\/|-|\.)0?2\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:0?[1-9]|1\d|2[0-8])(\/|-|\.)(?:(?:0?[1-9])|(?:1[0-2]))\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$" G "<0>5-9-98" +"^(?:(?:31(\/|-|\.)(?:0?[13578]|1[02]))\1|(?:(?:29|30)(\/|-|\.)(?:0?[1,3-9]|1[0-2])\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:29(\/|-|\.)0?2\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:0?[1-9]|1\d|2[0-8])(\/|-|\.)(?:(?:0?[1-9])|(?:1[0-2]))\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$" G "<0>10-11-2002" +"^(?:(?:31(\/|-|\.)(?:0?[13578]|1[02]))\1|(?:(?:29|30)(\/|-|\.)(?:0?[1,3-9]|1[0-2])\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:29(\/|-|\.)0?2\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:0?[1-9]|1\d|2[0-8])(\/|-|\.)(?:(?:0?[1-9])|(?:1[0-2]))\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$" "29/02/2003" +"^(?:(?:31(\/|-|\.)(?:0?[13578]|1[02]))\1|(?:(?:29|30)(\/|-|\.)(?:0?[1,3-9]|1[0-2])\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:29(\/|-|\.)0?2\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:0?[1-9]|1\d|2[0-8])(\/|-|\.)(?:(?:0?[1-9])|(?:1[0-2]))\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$" "12/13/2002" +"^(?:(?:31(\/|-|\.)(?:0?[13578]|1[02]))\1|(?:(?:29|30)(\/|-|\.)(?:0?[1,3-9]|1[0-2])\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:29(\/|-|\.)0?2\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:0?[1-9]|1\d|2[0-8])(\/|-|\.)(?:(?:0?[1-9])|(?:1[0-2]))\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$" "1-1-1500" +"^(user=([a-z0-9]+,)*(([a-z0-9]+){1});)?(group=([a-z0-9]+,)*(([a-z0-9]+){1});)?(level=[0-9]+;)?$" G "<0>user=foo,bar,quux;group=manager,admin;level=100;" +"^(user=([a-z0-9]+,)*(([a-z0-9]+){1});)?(group=([a-z0-9]+,)*(([a-z0-9]+){1});)?(level=[0-9]+;)?$" G "<0>group=nobody;level=24;" +"^(user=([a-z0-9]+,)*(([a-z0-9]+){1});)?(group=([a-z0-9]+,)*(([a-z0-9]+){1});)?(level=[0-9]+;)?$" "user=foo" +"^(user=([a-z0-9]+,)*(([a-z0-9]+){1});)?(group=([a-z0-9]+,)*(([a-z0-9]+){1});)?(level=[0-9]+;)?$" "blahh" +"^(\(?\+?[0-9]*\)?)?[0-9_\- \(\)]*$" G "<0>(+44)(0)20-12341234" +"^(\(?\+?[0-9]*\)?)?[0-9_\- \(\)]*$" G "<0>02012341234" +"^(\(?\+?[0-9]*\)?)?[0-9_\- \(\)]*$" G "<0>+44 (0) 1234-1234" +"^(\(?\+?[0-9]*\)?)?[0-9_\- \(\)]*$" "(44+)020-12341234" +"^(\(?\+?[0-9]*\)?)?[0-9_\- \(\)]*$" "12341234(+020)" +"\b(\w+)\s+\1\b" G "<0>Tell the the preacher" +"\b(\w+)\s+\1\b" G "<0>some some" +"\b(\w+)\s+\1\b" G "<0>hubba hubba" +"\b(\w+)\s+\1\b" "once an annual report" +"\b(\w+)\s+\1\b" "mandate dated submissions" +"\b(\w+)\s+\1\b" "Hubba hubba" +"(^\+[0-9]{2}|^\+[0-9]{2}\(0\)|^\(\+[0-9]{2}\)\(0\)|^00[0-9]{2}|^0)([0-9]{9}$|[0-9\-\s]{10}$)" G "<0>+31235256677" +"(^\+[0-9]{2}|^\+[0-9]{2}\(0\)|^\(\+[0-9]{2}\)\(0\)|^00[0-9]{2}|^0)([0-9]{9}$|[0-9\-\s]{10}$)" G "<0>+31(0)235256677" +"(^\+[0-9]{2}|^\+[0-9]{2}\(0\)|^\(\+[0-9]{2}\)\(0\)|^00[0-9]{2}|^0)([0-9]{9}$|[0-9\-\s]{10}$)" G "<0>023-5256677" +"(^\+[0-9]{2}|^\+[0-9]{2}\(0\)|^\(\+[0-9]{2}\)\(0\)|^00[0-9]{2}|^0)([0-9]{9}$|[0-9\-\s]{10}$)" "+3123525667788999" +"(^\+[0-9]{2}|^\+[0-9]{2}\(0\)|^\(\+[0-9]{2}\)\(0\)|^00[0-9]{2}|^0)([0-9]{9}$|[0-9\-\s]{10}$)" "3123525667788" +"(^\+[0-9]{2}|^\+[0-9]{2}\(0\)|^\(\+[0-9]{2}\)\(0\)|^00[0-9]{2}|^0)([0-9]{9}$|[0-9\-\s]{10}$)" "232-2566778" +"^[-+]?\d*\.?\d*$" G "<0>123" +"^[-+]?\d*\.?\d*$" G "<0>+3.14159" +"^[-+]?\d*\.?\d*$" G "<0>-3.14159" +"^[-+]?\d*\.?\d*$" "abc" +"^[-+]?\d*\.?\d*$" "3.4.5" +"^[-+]?\d*\.?\d*$" "$99.95" +"^\$?([1-9]{1}[0-9]{0,2}(\,[0-9]{3})*(\.[0-9]{0,2})?|[1-9]{1}[0-9]{0,}(\.[0-9]{0,2})?|0(\.[0-9]{0,2})?|(\.[0-9]{1,2})?)$" G "<0>$1,234.50" +"^\$?([1-9]{1}[0-9]{0,2}(\,[0-9]{3})*(\.[0-9]{0,2})?|[1-9]{1}[0-9]{0,}(\.[0-9]{0,2})?|0(\.[0-9]{0,2})?|(\.[0-9]{1,2})?)$" G "<0>$0.70" +"^\$?([1-9]{1}[0-9]{0,2}(\,[0-9]{3})*(\.[0-9]{0,2})?|[1-9]{1}[0-9]{0,}(\.[0-9]{0,2})?|0(\.[0-9]{0,2})?|(\.[0-9]{1,2})?)$" G "<0>.7" +"^\$?([1-9]{1}[0-9]{0,2}(\,[0-9]{3})*(\.[0-9]{0,2})?|[1-9]{1}[0-9]{0,}(\.[0-9]{0,2})?|0(\.[0-9]{0,2})?|(\.[0-9]{1,2})?)$" "$0,123.50" +"^\$?([1-9]{1}[0-9]{0,2}(\,[0-9]{3})*(\.[0-9]{0,2})?|[1-9]{1}[0-9]{0,}(\.[0-9]{0,2})?|0(\.[0-9]{0,2})?|(\.[0-9]{1,2})?)$" "$00.5" +"^[A-Z]{2}[0-9]{6}[A-DFM]{1}$" G "<0>AB123456D" +"^[A-Z]{2}[0-9]{6}[A-DFM]{1}$" G "<0>AB123456F" +"^[A-Z]{2}[0-9]{6}[A-DFM]{1}$" G "<0>AB123456M" +"^[A-Z]{2}[0-9]{6}[A-DFM]{1}$" "AB123456E" +"^[A-Z]{2}[0-9]{6}[A-DFM]{1}$" "ab123456d" +#"(http|ftp|https):\/\/[\w]+(.[\w]+)([\w\-\.,@?^=%&:/~\+#]*[\w\-\@?^=%&/~\+#])?" G "<0>http://regxlib.com/Default.aspx" # TODO: \w in pattern +#"(http|ftp|https):\/\/[\w]+(.[\w]+)([\w\-\.,@?^=%&:/~\+#]*[\w\-\@?^=%&/~\+#])?" G "<0>http://electronics.cnet.com/electronics/0-6342366-8-8994967-1.html" # TODO: \w in pattern +#"(http|ftp|https):\/\/[\w]+(.[\w]+)([\w\-\.,@?^=%&:/~\+#]*[\w\-\@?^=%&/~\+#])?" "www.yahoo.com" # TODO: \w in pattern +"^[0-9]{4}\s{0,1}[a-zA-Z]{2}$" G "<0>2034AK" +"^[0-9]{4}\s{0,1}[a-zA-Z]{2}$" G "<0>2034 AK" +"^[0-9]{4}\s{0,1}[a-zA-Z]{2}$" G "<0>2034 ak" +"^[0-9]{4}\s{0,1}[a-zA-Z]{2}$" "2034 AK" +"^[0-9]{4}\s{0,1}[a-zA-Z]{2}$" "321321 AKSSAA" +"((\d{2})|(\d))\/((\d{2})|(\d))\/((\d{4})|(\d{2}))" G "<0>4/5/91" +"((\d{2})|(\d))\/((\d{2})|(\d))\/((\d{4})|(\d{2}))" G "<0>04/5/1991" +"((\d{2})|(\d))\/((\d{2})|(\d))\/((\d{4})|(\d{2}))" G "<0>4/05/89" +"((\d{2})|(\d))\/((\d{2})|(\d))\/((\d{4})|(\d{2}))" "4/5/1" +#"(^|\s|\()((([1-9]){1}|([0][1-9]){1}|([1][012]){1}){1}[\/-]((2[0-9]){1}|(3[01]){1}|([01][1-9]){1}|([1-9]){1}){1}[\/-](((19|20)([0-9][0-9]){1}|([0-9][0-9]){1})){1}(([\s|\)|:])|(^|\s|\()((([0-9]){1}|([0][1-9]){1}|([1][012]){1}){1}[\/-](([11-31]){1}|([01][1-9]){1}|([1-9]){1}){1}[\/-](((19|20)([0-9][0-9]){1}|([0-9][0-9]){1})){1}(([\s|\)|:|$|\>])){1}){1}){1}){1}" G "<0>01/01/2001 " #TODO - \s in pattern. +"(^|\s|\()((([1-9]){1}|([0][1-9]){1}|([1][012]){1}){1}[\/-]((2[0-9]){1}|(3[01]){1}|([01][1-9]){1}|([1-9]){1}){1}[\/-](((19|20)([0-9][0-9]){1}|([0-9][0-9]){1})){1}(([\s|\)|:])|(^|\s|\()((([0-9]){1}|([0][1-9]){1}|([1][012]){1}){1}[\/-](([11-31]){1}|([01][1-9]){1}|([1-9]){1}){1}[\/-](((19|20)([0-9][0-9]){1}|([0-9][0-9]){1})){1}(([\s|\)|:|$|\>])){1}){1}){1}){1}" G "<0>01-01-2001:" +"(^|\s|\()((([1-9]){1}|([0][1-9]){1}|([1][012]){1}){1}[\/-]((2[0-9]){1}|(3[01]){1}|([01][1-9]){1}|([1-9]){1}){1}[\/-](((19|20)([0-9][0-9]){1}|([0-9][0-9]){1})){1}(([\s|\)|:])|(^|\s|\()((([0-9]){1}|([0][1-9]){1}|([1][012]){1}){1}[\/-](([11-31]){1}|([01][1-9]){1}|([1-9]){1}){1}[\/-](((19|20)([0-9][0-9]){1}|([0-9][0-9]){1})){1}(([\s|\)|:|$|\>])){1}){1}){1}){1}" G "<0>(1-1-01)" +"(^|\s|\()((([1-9]){1}|([0][1-9]){1}|([1][012]){1}){1}[\/-]((2[0-9]){1}|(3[01]){1}|([01][1-9]){1}|([1-9]){1}){1}[\/-](((19|20)([0-9][0-9]){1}|([0-9][0-9]){1})){1}(([\s|\)|:])|(^|\s|\()((([0-9]){1}|([0][1-9]){1}|([1][012]){1}){1}[\/-](([11-31]){1}|([01][1-9]){1}|([1-9]){1}){1}[\/-](((19|20)([0-9][0-9]){1}|([0-9][0-9]){1})){1}(([\s|\)|:|$|\>])){1}){1}){1}){1}" "13/1/2001" +"(^|\s|\()((([1-9]){1}|([0][1-9]){1}|([1][012]){1}){1}[\/-]((2[0-9]){1}|(3[01]){1}|([01][1-9]){1}|([1-9]){1}){1}[\/-](((19|20)([0-9][0-9]){1}|([0-9][0-9]){1})){1}(([\s|\)|:])|(^|\s|\()((([0-9]){1}|([0][1-9]){1}|([1][012]){1}){1}[\/-](([11-31]){1}|([01][1-9]){1}|([1-9]){1}){1}[\/-](((19|20)([0-9][0-9]){1}|([0-9][0-9]){1})){1}(([\s|\)|:|$|\>])){1}){1}){1}){1}" "1-32-2001" +"(^|\s|\()((([1-9]){1}|([0][1-9]){1}|([1][012]){1}){1}[\/-]((2[0-9]){1}|(3[01]){1}|([01][1-9]){1}|([1-9]){1}){1}[\/-](((19|20)([0-9][0-9]){1}|([0-9][0-9]){1})){1}(([\s|\)|:])|(^|\s|\()((([0-9]){1}|([0][1-9]){1}|([1][012]){1}){1}[\/-](([11-31]){1}|([01][1-9]){1}|([1-9]){1}){1}[\/-](((19|20)([0-9][0-9]){1}|([0-9][0-9]){1})){1}(([\s|\)|:|$|\>])){1}){1}){1}){1}" "1-1-1801" +"^\d{3}\s?\d{3}$" G "<0>400 099" +"^\d{3}\s?\d{3}$" G "<0>400099" +"^\d{3}\s?\d{3}$" G "<0>400050" +"^\d{3}\s?\d{3}$" "2345678" +"^\d{3}\s?\d{3}$" "12345" +"^\d{3}\s?\d{3}$" "asdf" +"^\D?(\d{3})\D?\D?(\d{3})\D?(\d{4})$" G "<0>(111) 222-3333" +"^\D?(\d{3})\D?\D?(\d{3})\D?(\d{4})$" G "<0>1112223333" +"^\D?(\d{3})\D?\D?(\d{3})\D?(\d{4})$" G "<0>111-222-3333" +"^\D?(\d{3})\D?\D?(\d{3})\D?(\d{4})$" "11122223333" +"^\D?(\d{3})\D?\D?(\d{3})\D?(\d{4})$" "11112223333" +"^\D?(\d{3})\D?\D?(\d{3})\D?(\d{4})$" "11122233333" +"^#?([a-f]|[A-F]|[0-9]){3}(([a-f]|[A-F]|[0-9]){3})?$" G "<0>#00ccff" +"^#?([a-f]|[A-F]|[0-9]){3}(([a-f]|[A-F]|[0-9]){3})?$" G "<0>#039" +"^#?([a-f]|[A-F]|[0-9]){3}(([a-f]|[A-F]|[0-9]){3})?$" G "<0>ffffcc" +"^#?([a-f]|[A-F]|[0-9]){3}(([a-f]|[A-F]|[0-9]){3})?$" "blue" +"^#?([a-f]|[A-F]|[0-9]){3}(([a-f]|[A-F]|[0-9]){3})?$" "0x000000" +"^#?([a-f]|[A-F]|[0-9]){3}(([a-f]|[A-F]|[0-9]){3})?$" "#ff000" +"^([0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F])$" G "<0>01:23:45:67:89:ab" +"^([0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F])$" G "<0>01:23:45:67:89:AB" +"^([0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F])$" G "<0>fE:dC:bA:98:76:54" +"^([0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F])$" "01:23:45:67:89:ab:cd" +"^([0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F])$" "01:23:45:67:89:Az" +"^([0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F])$" "01:23:45:56:" +"^(http|https|ftp)\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(:[a-zA-Z0-9]*)?/?([a-zA-Z0-9\-\._\?\,\'/\\\+\&%\$#\=~])*$" G "<0>http://www.blah.com/~joe" +"^(http|https|ftp)\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(:[a-zA-Z0-9]*)?/?([a-zA-Z0-9\-\._\?\,\'/\\\+\&%\$#\=~])*$" G "<0>ftp://ftp.blah.co.uk:2828/blah%20blah.gif" +"^(http|https|ftp)\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(:[a-zA-Z0-9]*)?/?([a-zA-Z0-9\-\._\?\,\'/\\\+\&%\$#\=~])*$" G "<0>https://blah.gov/blah-blah.as" +"^(http|https|ftp)\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(:[a-zA-Z0-9]*)?/?([a-zA-Z0-9\-\._\?\,\'/\\\+\&%\$#\=~])*$" "www.blah.com" +"^(http|https|ftp)\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(:[a-zA-Z0-9]*)?/?([a-zA-Z0-9\-\._\?\,\'/\\\+\&%\$#\=~])*$" "http://www.blah.com/I have spaces!" +"^(http|https|ftp)\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(:[a-zA-Z0-9]*)?/?([a-zA-Z0-9\-\._\?\,\'/\\\+\&%\$#\=~])*$" "ftp://blah_underscore/[nope]" +"^(([0-2]\d|[3][0-1])\/([0]\d|[1][0-2])\/[2][0]\d{2})$|^(([0-2]\d|[3][0-1])\/([0]\d|[1][0-2])\/[2][0]\d{2}\s([0-1]\d|[2][0-3])\:[0-5]\d\:[0-5]\d)$" G "<0>12/01/2002" +"^(([0-2]\d|[3][0-1])\/([0]\d|[1][0-2])\/[2][0]\d{2})$|^(([0-2]\d|[3][0-1])\/([0]\d|[1][0-2])\/[2][0]\d{2}\s([0-1]\d|[2][0-3])\:[0-5]\d\:[0-5]\d)$" G "<0>12/01/2002 12:32:10" +"^(([0-2]\d|[3][0-1])\/([0]\d|[1][0-2])\/[2][0]\d{2})$|^(([0-2]\d|[3][0-1])\/([0]\d|[1][0-2])\/[2][0]\d{2}\s([0-1]\d|[2][0-3])\:[0-5]\d\:[0-5]\d)$" "32/12/2002" +"^(([0-2]\d|[3][0-1])\/([0]\d|[1][0-2])\/[2][0]\d{2})$|^(([0-2]\d|[3][0-1])\/([0]\d|[1][0-2])\/[2][0]\d{2}\s([0-1]\d|[2][0-3])\:[0-5]\d\:[0-5]\d)$" "12/13/2001" +"^(([0-2]\d|[3][0-1])\/([0]\d|[1][0-2])\/[2][0]\d{2})$|^(([0-2]\d|[3][0-1])\/([0]\d|[1][0-2])\/[2][0]\d{2}\s([0-1]\d|[2][0-3])\:[0-5]\d\:[0-5]\d)$" "12/02/06" +"^[0-9](\.[0-9]+)?$" G "<0>1.2345" +"^[0-9](\.[0-9]+)?$" G "<0>0.00001" +"^[0-9](\.[0-9]+)?$" G "<0>7" +"^[0-9](\.[0-9]+)?$" "12.2" +"^[0-9](\.[0-9]+)?$" "1.10.1" +"^[0-9](\.[0-9]+)?$" "15.98" +"^(?:[mM]{1,3})?(?:(?:[cC][dDmM])|(?:[dD]?(?:[cC]{1,3})?))?[lL]?(([xX])(?:\2{1,2}|[lL]|[cC])?)?((([iI])((\5{1,2})|[vV]|[xX]|[lL])?)|([vV]?([iI]{1,3})?))?$" G "<0>III" +"^(?:[mM]{1,3})?(?:(?:[cC][dDmM])|(?:[dD]?(?:[cC]{1,3})?))?[lL]?(([xX])(?:\2{1,2}|[lL]|[cC])?)?((([iI])((\5{1,2})|[vV]|[xX]|[lL])?)|([vV]?([iI]{1,3})?))?$" G "<0>xiv" +"^(?:[mM]{1,3})?(?:(?:[cC][dDmM])|(?:[dD]?(?:[cC]{1,3})?))?[lL]?(([xX])(?:\2{1,2}|[lL]|[cC])?)?((([iI])((\5{1,2})|[vV]|[xX]|[lL])?)|([vV]?([iI]{1,3})?))?$" G "<0>MCMLXLIX" +"^(?:[mM]{1,3})?(?:(?:[cC][dDmM])|(?:[dD]?(?:[cC]{1,3})?))?[lL]?(([xX])(?:\2{1,2}|[lL]|[cC])?)?((([iI])((\5{1,2})|[vV]|[xX]|[lL])?)|([vV]?([iI]{1,3})?))?$" "iiV" +"^(?:[mM]{1,3})?(?:(?:[cC][dDmM])|(?:[dD]?(?:[cC]{1,3})?))?[lL]?(([xX])(?:\2{1,2}|[lL]|[cC])?)?((([iI])((\5{1,2})|[vV]|[xX]|[lL])?)|([vV]?([iI]{1,3})?))?$" "MCCM" +"^(?:[mM]{1,3})?(?:(?:[cC][dDmM])|(?:[dD]?(?:[cC]{1,3})?))?[lL]?(([xX])(?:\2{1,2}|[lL]|[cC])?)?((([iI])((\5{1,2})|[vV]|[xX]|[lL])?)|([vV]?([iI]{1,3})?))?$" "XXXX" +"^[-+]?[0-9]+[.]?[0-9]*([eE][-+]?[0-9]+)?$" G "<0>123" +"^[-+]?[0-9]+[.]?[0-9]*([eE][-+]?[0-9]+)?$" G "<0>-123.35" +"^[-+]?[0-9]+[.]?[0-9]*([eE][-+]?[0-9]+)?$" G "<0>-123.35e-2" +"^[-+]?[0-9]+[.]?[0-9]*([eE][-+]?[0-9]+)?$" "abc" +"^[-+]?[0-9]+[.]?[0-9]*([eE][-+]?[0-9]+)?$" "123.32e" +"^[-+]?[0-9]+[.]?[0-9]*([eE][-+]?[0-9]+)?$" "123.32.3" +"^[a-zA-Z]+(([\'\,\.\- ][a-zA-Z ])?[a-zA-Z]*)*$" G "<0>T.F. Johnson" +"^[a-zA-Z]+(([\'\,\.\- ][a-zA-Z ])?[a-zA-Z]*)*$" G "<0>John O'Neil" +"^[a-zA-Z]+(([\'\,\.\- ][a-zA-Z ])?[a-zA-Z]*)*$" G "<0>Mary-Kate Johnson" +"^[a-zA-Z]+(([\'\,\.\- ][a-zA-Z ])?[a-zA-Z]*)*$" "sam_johnson" +"^[a-zA-Z]+(([\'\,\.\- ][a-zA-Z ])?[a-zA-Z]*)*$" "Joe--Bob Jones" +"^[a-zA-Z]+(([\'\,\.\- ][a-zA-Z ])?[a-zA-Z]*)*$" "dfjsd0rd" +"^(20|21|22|23|[0-1]\d)[0-5]\d$" G "<0>1200" +"^(20|21|22|23|[0-1]\d)[0-5]\d$" G "<0>1645" +"^(20|21|22|23|[0-1]\d)[0-5]\d$" G "<0>2359" +"^(20|21|22|23|[0-1]\d)[0-5]\d$" "2400" +"^(20|21|22|23|[0-1]\d)[0-5]\d$" "asbc" +"^(20|21|22|23|[0-1]\d)[0-5]\d$" "12:45" +/<[^>]*\n?.*=("|')?(.*\.jpg)("|')?.*\n?[^<]*>/ G '<0>' +/<[^>]*\n?.*=("|')?(.*\.jpg)("|')?.*\n?[^<]*>/ G "<0>" +/<[^>]*\n?.*=("|')?(.*\.jpg)("|')?.*\n?[^<]*>/ G "<0>" +/<[^>]*\n?.*=("|')?(.*\.jpg)("|')?.*\n?[^<]*>/ "= img.jpg" +/<[^>]*\n?.*=("|')?(.*\.jpg)("|')?.*\n?[^<]*>/ "img.jpg" +"^(\d{5}-\d{4}|\d{5})$|^([a-zA-Z]\d[a-zA-Z] \d[a-zA-Z]\d)$" G "<0>78754" +"^(\d{5}-\d{4}|\d{5})$|^([a-zA-Z]\d[a-zA-Z] \d[a-zA-Z]\d)$" G "<0>78754-1234" +"^(\d{5}-\d{4}|\d{5})$|^([a-zA-Z]\d[a-zA-Z] \d[a-zA-Z]\d)$" G "<0>G3H 6A3" +"^(\d{5}-\d{4}|\d{5})$|^([a-zA-Z]\d[a-zA-Z] \d[a-zA-Z]\d)$" "78754-12aA" +"^(\d{5}-\d{4}|\d{5})$|^([a-zA-Z]\d[a-zA-Z] \d[a-zA-Z]\d)$" "7875A" +"^(\d{5}-\d{4}|\d{5})$|^([a-zA-Z]\d[a-zA-Z] \d[a-zA-Z]\d)$" "g3h6a3" +#"^([\w\-\.]+)@((\[([0-9]{1,3}\.){3}[0-9]{1,3}\])|(([\w\-]+\.)+)([a-zA-Z]{2,4}))$" G "<0>bob@somewhere.com" # TODO: \w in pattern +#"^([\w\-\.]+)@((\[([0-9]{1,3}\.){3}[0-9]{1,3}\])|(([\w\-]+\.)+)([a-zA-Z]{2,4}))$" G "<0>bob.jones@[1.1.1.1]" +#"^([\w\-\.]+)@((\[([0-9]{1,3}\.){3}[0-9]{1,3}\])|(([\w\-]+\.)+)([a-zA-Z]{2,4}))$" G "<0>bob@a.b.c.d.info" # TODO: \w in pattern +#"^([\w\-\.]+)@((\[([0-9]{1,3}\.){3}[0-9]{1,3}\])|(([\w\-]+\.)+)([a-zA-Z]{2,4}))$" "bob@com" # TODO: \w in pattern +#"^([\w\-\.]+)@((\[([0-9]{1,3}\.){3}[0-9]{1,3}\])|(([\w\-]+\.)+)([a-zA-Z]{2,4}))$" "bob.jones@some.where" # TODO: \w in pattern +#"^([\w\-\.]+)@((\[([0-9]{1,3}\.){3}[0-9]{1,3}\])|(([\w\-]+\.)+)([a-zA-Z]{2,4}))$" "bob@1.1.1.123" # TODO: \w in pattern +#"^(([-\w \.]+)|(""[-\w \.]+"") )?<([\w\-\.]+)@((\[([0-9]{1,3}\.){3}[0-9]{1,3}\])|(([\w\-]+\.)+)([a-zA-Z]{2,4}))>$" G "<0>" # TODO: \w in pattern +#"^(([-\w \.]+)|(""[-\w \.]+"") )?<([\w\-\.]+)@((\[([0-9]{1,3}\.){3}[0-9]{1,3}\])|(([\w\-]+\.)+)([a-zA-Z]{2,4}))>$" G "<0>bob A. jones " # TODO: \w in pattern +#"^(([-\w \.]+)|(""[-\w \.]+"") )?<([\w\-\.]+)@((\[([0-9]{1,3}\.){3}[0-9]{1,3}\])|(([\w\-]+\.)+)([a-zA-Z]{2,4}))>$" G "<0>bob A. jones " # TODO: \w in pattern +#"^(([-\w \.]+)|(""[-\w \.]+"") )?<([\w\-\.]+)@((\[([0-9]{1,3}\.){3}[0-9]{1,3}\])|(([\w\-]+\.)+)([a-zA-Z]{2,4}))>$" "ab@cd.ef" # TODO: \w in pattern +#"^(([-\w \.]+)|(""[-\w \.]+"") )?<([\w\-\.]+)@((\[([0-9]{1,3}\.){3}[0-9]{1,3}\])|(([\w\-]+\.)+)([a-zA-Z]{2,4}))>$" ""bob A. jones " # TODO: \w in pattern +#"^(([-\w \.]+)|(""[-\w \.]+"") )?<([\w\-\.]+)@((\[([0-9]{1,3}\.){3}[0-9]{1,3}\])|(([\w\-]+\.)+)([a-zA-Z]{2,4}))>$" "bob A. jones " # TODO: \w in pattern +"^[A-Za-z]{1,2}[0-9A-Za-z]{1,2}[ ]?[0-9]{0,1}[A-Za-z]{2}$" G "<0>SW112LE" +"^[A-Za-z]{1,2}[0-9A-Za-z]{1,2}[ ]?[0-9]{0,1}[A-Za-z]{2}$" G "<0>SW11 2LE" +"^[A-Za-z]{1,2}[0-9A-Za-z]{1,2}[ ]?[0-9]{0,1}[A-Za-z]{2}$" G "<0>CR05LE" +"^[A-Za-z]{1,2}[0-9A-Za-z]{1,2}[ ]?[0-9]{0,1}[A-Za-z]{2}$" "12CR0LE" +"^[A-Za-z]{1,2}[0-9A-Za-z]{1,2}[ ]?[0-9]{0,1}[A-Za-z]{2}$" "12CR 0LE" +"^[A-Za-z]{1,2}[0-9A-Za-z]{1,2}[ ]?[0-9]{0,1}[A-Za-z]{2}$" "SWLE05" +"20\d{2}(-|\/)((0[1-9])|(1[0-2]))(-|\/)((0[1-9])|([1-2][0-9])|(3[0-1]))(T|\s)(([0-1][0-9])|(2[0-3])):([0-5][0-9]):([0-5][0-9])" G "<0>2099-12-31T23:59:59" +"20\d{2}(-|\/)((0[1-9])|(1[0-2]))(-|\/)((0[1-9])|([1-2][0-9])|(3[0-1]))(T|\s)(([0-1][0-9])|(2[0-3])):([0-5][0-9]):([0-5][0-9])" G "<0>2002/02/09 16:30:00" +"20\d{2}(-|\/)((0[1-9])|(1[0-2]))(-|\/)((0[1-9])|([1-2][0-9])|(3[0-1]))(T|\s)(([0-1][0-9])|(2[0-3])):([0-5][0-9]):([0-5][0-9])" G "<0>2000-01-01T00:00:00" +"20\d{2}(-|\/)((0[1-9])|(1[0-2]))(-|\/)((0[1-9])|([1-2][0-9])|(3[0-1]))(T|\s)(([0-1][0-9])|(2[0-3])):([0-5][0-9]):([0-5][0-9])" "2000-13-31T00:00:00" +"20\d{2}(-|\/)((0[1-9])|(1[0-2]))(-|\/)((0[1-9])|([1-2][0-9])|(3[0-1]))(T|\s)(([0-1][0-9])|(2[0-3])):([0-5][0-9]):([0-5][0-9])" "2002/02/33 24:00:00" +"20\d{2}(-|\/)((0[1-9])|(1[0-2]))(-|\/)((0[1-9])|([1-2][0-9])|(3[0-1]))(T|\s)(([0-1][0-9])|(2[0-3])):([0-5][0-9]):([0-5][0-9])" "2000-01-01 60:00:00" +"^((?:4\d{3})|(?:5[1-5]\d{2})|(?:6011)|(?:3[68]\d{2})|(?:30[012345]\d))[ -]?(\d{4})[ -]?(\d{4})[ -]?(\d{4}|3[4,7]\d{13})$" G "<0>6011567812345678" +"^((?:4\d{3})|(?:5[1-5]\d{2})|(?:6011)|(?:3[68]\d{2})|(?:30[012345]\d))[ -]?(\d{4})[ -]?(\d{4})[ -]?(\d{4}|3[4,7]\d{13})$" G "<0>6011 5678 1234 5678" +"^((?:4\d{3})|(?:5[1-5]\d{2})|(?:6011)|(?:3[68]\d{2})|(?:30[012345]\d))[ -]?(\d{4})[ -]?(\d{4})[ -]?(\d{4}|3[4,7]\d{13})$" G "<0>6011-5678-1234-5678" +"^((?:4\d{3})|(?:5[1-5]\d{2})|(?:6011)|(?:3[68]\d{2})|(?:30[012345]\d))[ -]?(\d{4})[ -]?(\d{4})[ -]?(\d{4}|3[4,7]\d{13})$" "1234567890123456" +"^((((0[13578])|(1[02]))[\/]?(([0-2][0-9])|(3[01])))|(((0[469])|(11))[\/]?(([0-2][0-9])|(30)))|(02[\/]?[0-2][0-9]))[\/]?\d{4}$" G "<0>01/01/2001" +"^((((0[13578])|(1[02]))[\/]?(([0-2][0-9])|(3[01])))|(((0[469])|(11))[\/]?(([0-2][0-9])|(30)))|(02[\/]?[0-2][0-9]))[\/]?\d{4}$" G "<0>02/29/2002" +"^((((0[13578])|(1[02]))[\/]?(([0-2][0-9])|(3[01])))|(((0[469])|(11))[\/]?(([0-2][0-9])|(30)))|(02[\/]?[0-2][0-9]))[\/]?\d{4}$" G "<0>12/31/2002" +"^((((0[13578])|(1[02]))[\/]?(([0-2][0-9])|(3[01])))|(((0[469])|(11))[\/]?(([0-2][0-9])|(30)))|(02[\/]?[0-2][0-9]))[\/]?\d{4}$" "1/1/02" +"^((((0[13578])|(1[02]))[\/]?(([0-2][0-9])|(3[01])))|(((0[469])|(11))[\/]?(([0-2][0-9])|(30)))|(02[\/]?[0-2][0-9]))[\/]?\d{4}$" "02/30/2002" +"^((((0[13578])|(1[02]))[\/]?(([0-2][0-9])|(3[01])))|(((0[469])|(11))[\/]?(([0-2][0-9])|(30)))|(02[\/]?[0-2][0-9]))[\/]?\d{4}$" "1/25/2002" +#"^(?=[^\&])(?:(?[^:/?#]+):)?(?://(?[^/?#]*))?(?[^?#]*)(?:\?(?[^#]*))?(?:#(?.*))?" G "<0>http://regexlib.com/REDetails.aspx?regexp_id=x#Details" # out of context, can't work stand-alone +#"^(?=[^\&])(?:(?[^:/?#]+):)?(?://(?[^/?#]*))?(?[^?#]*)(?:\?(?[^#]*))?(?:#(?.*))?" "&" # out of context, can't work stand-alone +"^[-+]?\d+(\.\d+)?$" G "<0>123" +"^[-+]?\d+(\.\d+)?$" G "<0>-123.45" +"^[-+]?\d+(\.\d+)?$" G "<0>+123.56" +"^[-+]?\d+(\.\d+)?$" "123x" +"^[-+]?\d+(\.\d+)?$" ".123" +"^[-+]?\d+(\.\d+)?$" "-123." +"^(\d{4}[- ]){3}\d{4}|\d{16}$" G "<0>1234-1234-1234-1234" +"^(\d{4}[- ]){3}\d{4}|\d{16}$" G "<0>1234 1234 1234 1234" +"^(\d{4}[- ]){3}\d{4}|\d{16}$" G "<0>1234123412341234" +"^(\d{4}[- ]){3}\d{4}|\d{16}$" "Visa" +"^(\d{4}[- ]){3}\d{4}|\d{16}$" "1234" +"^(\d{4}[- ]){3}\d{4}|\d{16}$" "123-1234-12345" +"^((4\d{3})|(5[1-5]\d{2})|(6011))-?\d{4}-?\d{4}-?\d{4}|3[4,7]\d{13}$" G "<0>6011-1111-1111-1111" +"^((4\d{3})|(5[1-5]\d{2})|(6011))-?\d{4}-?\d{4}-?\d{4}|3[4,7]\d{13}$" G "<0>5423-1111-1111-1111" +"^((4\d{3})|(5[1-5]\d{2})|(6011))-?\d{4}-?\d{4}-?\d{4}|3[4,7]\d{13}$" G "<0>341111111111111" +"^((4\d{3})|(5[1-5]\d{2})|(6011))-?\d{4}-?\d{4}-?\d{4}|3[4,7]\d{13}$" "4111-111-111-111" +"^((4\d{3})|(5[1-5]\d{2})|(6011))-?\d{4}-?\d{4}-?\d{4}|3[4,7]\d{13}$" "3411-1111-1111-111" +"^((4\d{3})|(5[1-5]\d{2})|(6011))-?\d{4}-?\d{4}-?\d{4}|3[4,7]\d{13}$" "Visa" +"^[A-Z0-9]{8}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{12}$" G "<0>4D28C5AD-6482-41CD-B84E-4573F384BB5C" +"^[A-Z0-9]{8}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{12}$" G "<0>B1E1282C-A35C-4D5A-BF8B-7A3A51D9E388" +"^[A-Z0-9]{8}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{12}$" G "91036A4A-A0F4-43F0-8CD" +"^[A-Z0-9]{8}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{12}$" "{B1E1282C-A35C-4D3A-BF8B-7A3A51D9E388}" +"^[A-Z0-9]{8}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{12}$" "AAAAAAAAAAAAAAAAA" +"^[A-Z0-9]{8}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{12}$" "B;E1282C-A35C-4D3A-BF8B-7A3A51D9E38" +"(^(4|5)\d{3}-?\d{4}-?\d{4}-?\d{4}|(4|5)\d{15})|(^(6011)-?\d{4}-?\d{4}-?\d{4}|(6011)-?\d{12})|(^((3\d{3}))-\d{6}-\d{5}|^((3\d{14})))" G "<0>4111-1234-1234-1234" +"(^(4|5)\d{3}-?\d{4}-?\d{4}-?\d{4}|(4|5)\d{15})|(^(6011)-?\d{4}-?\d{4}-?\d{4}|(6011)-?\d{12})|(^((3\d{3}))-\d{6}-\d{5}|^((3\d{14})))" G "<0>6011123412341234" +"(^(4|5)\d{3}-?\d{4}-?\d{4}-?\d{4}|(4|5)\d{15})|(^(6011)-?\d{4}-?\d{4}-?\d{4}|(6011)-?\d{12})|(^((3\d{3}))-\d{6}-\d{5}|^((3\d{14})))" G "<0>3711-123456-12345" +"(^(4|5)\d{3}-?\d{4}-?\d{4}-?\d{4}|(4|5)\d{15})|(^(6011)-?\d{4}-?\d{4}-?\d{4}|(6011)-?\d{12})|(^((3\d{3}))-\d{6}-\d{5}|^((3\d{14})))" "1234567890123456" +"(^(4|5)\d{3}-?\d{4}-?\d{4}-?\d{4}|(4|5)\d{15})|(^(6011)-?\d{4}-?\d{4}-?\d{4}|(6011)-?\d{12})|(^((3\d{3}))-\d{6}-\d{5}|^((3\d{14})))" "4111-123-1234-1234" +"(^(4|5)\d{3}-?\d{4}-?\d{4}-?\d{4}|(4|5)\d{15})|(^(6011)-?\d{4}-?\d{4}-?\d{4}|(6011)-?\d{12})|(^((3\d{3}))-\d{6}-\d{5}|^((3\d{14})))" "412-1234-1234-1234" +#'\[link="(?((.|\n)*?))"\](?((.|\n)*?))\[\/link\]' G '<0>[link="http://www.yahoo.com"]Yahoo[/link]' #named capture +#'\[link="(?((.|\n)*?))"\](?((.|\n)*?))\[\/link\]' "[link]http://www.yahoo.com[/link]" #named capture +#'\[link="(?((.|\n)*?))"\](?((.|\n)*?))\[\/link\]' "[link=http://www.yahoo.com]Yahoo[/link]" #named capture +"^[a-zA-Z0-9]+$" G "<0>10a" +"^[a-zA-Z0-9]+$" G "<0>ABC" +"^[a-zA-Z0-9]+$" G "<0>A3fg" +"^[a-zA-Z0-9]+$" "45.3" +"^[a-zA-Z0-9]+$" "this or that" +"^[a-zA-Z0-9]+$" "$23" +"((\(\d{3}\) ?)|(\d{3}-))?\d{3}-\d{4}" G "<0>(123) 456-7890" +"((\(\d{3}\) ?)|(\d{3}-))?\d{3}-\d{4}" G "<0>123-456-7890" +"((\(\d{3}\) ?)|(\d{3}-))?\d{3}-\d{4}" "1234567890" +"^[a-zA-Z]\w{3,14}$" G "<0>abcd" +"^[a-zA-Z]\w{3,14}$" G "<0>aBc45DSD_sdf" +"^[a-zA-Z]\w{3,14}$" G "<0>password" +"^[a-zA-Z]\w{3,14}$" "afv" +"^[a-zA-Z]\w{3,14}$" "1234" +"^[a-zA-Z]\w{3,14}$" "reallylongpassword" +"^[A-Z]{1,2}[1-9][0-9]?[A-Z]? [0-9][A-Z]{2,}|GIR 0AA$" G "<0>G1 1AA " +"^[A-Z]{1,2}[1-9][0-9]?[A-Z]? [0-9][A-Z]{2,}|GIR 0AA$" G "<0>GIR 0AA" +"^[A-Z]{1,2}[1-9][0-9]?[A-Z]? [0-9][A-Z]{2,}|GIR 0AA$" G "<0>SW1 1ZZ" +"^[A-Z]{1,2}[1-9][0-9]?[A-Z]? [0-9][A-Z]{2,}|GIR 0AA$" "BT01 3RT" +"^[A-Z]{1,2}[1-9][0-9]?[A-Z]? [0-9][A-Z]{2,}|GIR 0AA$" "G111 1AA" +"^0[23489]{1}(\-)?[^0\D]{1}\d{6}$" G "<0>03-6106666" +"^0[23489]{1}(\-)?[^0\D]{1}\d{6}$" G "<0>036106666" +"^0[23489]{1}(\-)?[^0\D]{1}\d{6}$" G "<0>02-5523344" +"^0[23489]{1}(\-)?[^0\D]{1}\d{6}$" "00-6106666" +"^0[23489]{1}(\-)?[^0\D]{1}\d{6}$" "03-0106666" +"^0[23489]{1}(\-)?[^0\D]{1}\d{6}$" "02-55812346" +"^0(5[012345678]|6[47]){1}(\-)?[^0\D]{1}\d{5}$" G "<0>050-346634" +"^0(5[012345678]|6[47]){1}(\-)?[^0\D]{1}\d{5}$" G "<0>058633633" +"^0(5[012345678]|6[47]){1}(\-)?[^0\D]{1}\d{5}$" G "<0>064-228226" +"^0(5[012345678]|6[47]){1}(\-)?[^0\D]{1}\d{5}$" "059-336622" +"^0(5[012345678]|6[47]){1}(\-)?[^0\D]{1}\d{5}$" "064-022663" +"^0(5[012345678]|6[47]){1}(\-)?[^0\D]{1}\d{5}$" "0545454545" +"^([A-Z]{1,2}[0-9]{1,2}|[A-Z]{3}|[A-Z]{1,2}[0-9][A-Z])( |-)[0-9][A-Z]{2}" G "<0>AA11 1AA" +"^([A-Z]{1,2}[0-9]{1,2}|[A-Z]{3}|[A-Z]{1,2}[0-9][A-Z])( |-)[0-9][A-Z]{2}" G "<0>AA1A 1AA" +"^([A-Z]{1,2}[0-9]{1,2}|[A-Z]{3}|[A-Z]{1,2}[0-9][A-Z])( |-)[0-9][A-Z]{2}" G "<0>A11-1AA" +"^([A-Z]{1,2}[0-9]{1,2}|[A-Z]{3}|[A-Z]{1,2}[0-9][A-Z])( |-)[0-9][A-Z]{2}" "111 AAA" +"^([A-Z]{1,2}[0-9]{1,2}|[A-Z]{3}|[A-Z]{1,2}[0-9][A-Z])( |-)[0-9][A-Z]{2}" "1AAA 1AA" +"^([A-Z]{1,2}[0-9]{1,2}|[A-Z]{3}|[A-Z]{1,2}[0-9][A-Z])( |-)[0-9][A-Z]{2}" "A1AA 1AA" +"@{2}((\S)+)@{2}" G "<0>@@test@@" +"@{2}((\S)+)@{2}" G "<0>@@name@@" +"@{2}((\S)+)@{2}" G "<0>@@2342@@" +"@{2}((\S)+)@{2}" "@test@" +"@{2}((\S)+)@{2}" "@@na me@@" +"@{2}((\S)+)@{2}" "@@ name@@" +"([0-1][0-9]|2[0-3]):[0-5][0-9]" G "<0>00:00" +"([0-1][0-9]|2[0-3]):[0-5][0-9]" G "<0>13:59" +"([0-1][0-9]|2[0-3]):[0-5][0-9]" G "<0>23:59" +"([0-1][0-9]|2[0-3]):[0-5][0-9]" "24:00" +"([0-1][0-9]|2[0-3]):[0-5][0-9]" "23:60" +"^[+-]?([0-9]*\.?[0-9]+|[0-9]+\.?[0-9]*)([eE][+-]?[0-9]+)?$" G "<0>23" +"^[+-]?([0-9]*\.?[0-9]+|[0-9]+\.?[0-9]*)([eE][+-]?[0-9]+)?$" G "<0>-17.e23" +"^[+-]?([0-9]*\.?[0-9]+|[0-9]+\.?[0-9]*)([eE][+-]?[0-9]+)?$" G "<0>+.23e+2" +"^[+-]?([0-9]*\.?[0-9]+|[0-9]+\.?[0-9]*)([eE][+-]?[0-9]+)?$" "+.e2" +"^[+-]?([0-9]*\.?[0-9]+|[0-9]+\.?[0-9]*)([eE][+-]?[0-9]+)?$" "23.17.5" +"^[+-]?([0-9]*\.?[0-9]+|[0-9]+\.?[0-9]*)([eE][+-]?[0-9]+)?$" "10e2.0" +"^([1-zA-Z0-1@.\s ]{1,255})$" G "<0>email@email.com" +"^([1-zA-Z0-1@.\s ]{1,255})$" G "<0>My Name" +"^([1-zA-Z0-1@.\s ]{1,255})$" G "<0>asdf12df" +"^([1-zA-Z0-1@.\s ]{1,255})$" "‘,\*&$<>" +"^([1-zA-Z0-1@.\s ]{1,255})$" "1001' string" +"^((0[1-9])|(1[0-2]))\/(\d{4})$" G "<0>12/2002" +"^((0[1-9])|(1[0-2]))\/(\d{4})$" G "<0>11/1900" +"^((0[1-9])|(1[0-2]))\/(\d{4})$" G "<0>02/1977" +"^((0[1-9])|(1[0-2]))\/(\d{4})$" "1/1977" +"^((0[1-9])|(1[0-2]))\/(\d{4})$" "00/000" +"^((0[1-9])|(1[0-2]))\/(\d{4})$" "15/2002" +"^\(\d{1,2}(\s\d{1,2}){1,2}\)\s(\d{1,2}(\s\d{1,2}){1,2})((-(\d{1,4})){0,1})$" G "<0>(0 34 56) 34 56 67" +"^\(\d{1,2}(\s\d{1,2}){1,2}\)\s(\d{1,2}(\s\d{1,2}){1,2})((-(\d{1,4})){0,1})$" G "<0>(03 45) 5 67 67" +"^\(\d{1,2}(\s\d{1,2}){1,2}\)\s(\d{1,2}(\s\d{1,2}){1,2})((-(\d{1,4})){0,1})$" G "<0>(0 45) 2 33 45-45" +"^\(\d{1,2}(\s\d{1,2}){1,2}\)\s(\d{1,2}(\s\d{1,2}){1,2})((-(\d{1,4})){0,1})$" "(2345) 34 34" +"^\(\d{1,2}(\s\d{1,2}){1,2}\)\s(\d{1,2}(\s\d{1,2}){1,2})((-(\d{1,4})){0,1})$" "(0 56) 456 456" +"^\(\d{1,2}(\s\d{1,2}){1,2}\)\s(\d{1,2}(\s\d{1,2}){1,2})((-(\d{1,4})){0,1})$" "(3 45) 2 34-45678" +"(?:\d|I{1,3})?\s?\w{2,}\.?\s*\d{1,}\:\d{1,}-?,?\d{0,2}(?:,\d{0,2}){0,2}" G "<0>Genesis 3:3-4,6" +"(?:\d|I{1,3})?\s?\w{2,}\.?\s*\d{1,}\:\d{1,}-?,?\d{0,2}(?:,\d{0,2}){0,2}" G "<0>II Sam 2:11,2" +"(?:\d|I{1,3})?\s?\w{2,}\.?\s*\d{1,}\:\d{1,}-?,?\d{0,2}(?:,\d{0,2}){0,2}" G "<0>2 Tim 3:16" +"(?:\d|I{1,3})?\s?\w{2,}\.?\s*\d{1,}\:\d{1,}-?,?\d{0,2}(?:,\d{0,2}){0,2}" "Genesis chap 3, verse 3" +"(?:\d|I{1,3})?\s?\w{2,}\.?\s*\d{1,}\:\d{1,}-?,?\d{0,2}(?:,\d{0,2}){0,2}" "2nd Samuel 2" +"(\[[Ii][Mm][Gg]\])(\S+?)(\[\/[Ii][Mm][Gg]\])" G "<0>[IMG]http://bleh.jpg[/IMG]" +"(\[[Ii][Mm][Gg]\])(\S+?)(\[\/[Ii][Mm][Gg]\])" G "<0>[ImG]bleh[/imG]" +"(\[[Ii][Mm][Gg]\])(\S+?)(\[\/[Ii][Mm][Gg]\])" G "<0>[img]ftp://login:pass@bleh.gif[/img]" +"(\[[Ii][Mm][Gg]\])(\S+?)(\[\/[Ii][Mm][Gg]\])" '' +"^([0-9]{1,2})[./-]+([0-9]{1,2})[./-]+([0-9]{2}|[0-9]{4})$" G "<0>10/03/1979" +"^([0-9]{1,2})[./-]+([0-9]{1,2})[./-]+([0-9]{2}|[0-9]{4})$" G "<0>1-1-02" +"^([0-9]{1,2})[./-]+([0-9]{1,2})[./-]+([0-9]{2}|[0-9]{4})$" G "<0>01.1.2003" +"^([0-9]{1,2})[./-]+([0-9]{1,2})[./-]+([0-9]{2}|[0-9]{4})$" "10/03/197" +"^([0-9]{1,2})[./-]+([0-9]{1,2})[./-]+([0-9]{2}|[0-9]{4})$" "01-02-003" +"^([0-9]{1,2})[./-]+([0-9]{1,2})[./-]+([0-9]{2}|[0-9]{4})$" "01 02 03" +#"^(?(^00000(|-0000))|(\d{5}(|-\d{4})))$" G "<0>12345" # No Conditionals? +#"^(?(^00000(|-0000))|(\d{5}(|-\d{4})))$" G "<0>12345-6789" # No Conditionals? +#"^(?(^00000(|-0000))|(\d{5}(|-\d{4})))$" "00000" # No Conditionals? +#"^(?(^00000(|-0000))|(\d{5}(|-\d{4})))$" "00000-0000" # No Conditionals? +#"^(?(^00000(|-0000))|(\d{5}(|-\d{4})))$" "a4650-465s" # No Conditionals? +"^((0?[1-9])|((1|2)[0-9])|30|31)$" G "<0>01" +"^((0?[1-9])|((1|2)[0-9])|30|31)$" G "<0>12" +"^((0?[1-9])|((1|2)[0-9])|30|31)$" G "<0>31" +"^((0?[1-9])|((1|2)[0-9])|30|31)$" "123" +"^((0?[1-9])|((1|2)[0-9])|30|31)$" "32" +"^((0?[1-9])|((1|2)[0-9])|30|31)$" "abc" +"^([0-1]([\s\-./\\])?)?(\(?[2-9]\d{2}\)?|[2-9]\d{3})([\s\-./\\])?(\d{3}([\s\-./\\])?\d{4}|[a-zA-Z0-9]{7})$" G "<0>1.222.333.1234" +"^([0-1]([\s\-./\\])?)?(\(?[2-9]\d{2}\)?|[2-9]\d{3})([\s\-./\\])?(\d{3}([\s\-./\\])?\d{4}|[a-zA-Z0-9]{7})$" G "<0>1-223-123-1232" +"^([0-1]([\s\-./\\])?)?(\(?[2-9]\d{2}\)?|[2-9]\d{3})([\s\-./\\])?(\d{3}([\s\-./\\])?\d{4}|[a-zA-Z0-9]{7})$" G "<0>12223334444" +"^([0-1]([\s\-./\\])?)?(\(?[2-9]\d{2}\)?|[2-9]\d{3})([\s\-./\\])?(\d{3}([\s\-./\\])?\d{4}|[a-zA-Z0-9]{7})$" "1.1.123123.123" +"^([0-1]([\s\-./\\])?)?(\(?[2-9]\d{2}\)?|[2-9]\d{3})([\s\-./\\])?(\d{3}([\s\-./\\])?\d{4}|[a-zA-Z0-9]{7})$" "12-1322-112-31" +"^([0-1]([\s\-./\\])?)?(\(?[2-9]\d{2}\)?|[2-9]\d{3})([\s\-./\\])?(\d{3}([\s\-./\\])?\d{4}|[a-zA-Z0-9]{7})$" "11231321131" +"^([A-PR-UWYZ0-9][A-HK-Y0-9][AEHMNPRTVXY0-9]?[ABEHMNPRVWXY0-9]? {1,2}[0-9][ABD-HJLN-UW-Z]{2}|GIR 0AA)$" G "<0>DN3 6GB" +"^([A-PR-UWYZ0-9][A-HK-Y0-9][AEHMNPRTVXY0-9]?[ABEHMNPRVWXY0-9]? {1,2}[0-9][ABD-HJLN-UW-Z]{2}|GIR 0AA)$" G "<0>SW42 4RG" +"^([A-PR-UWYZ0-9][A-HK-Y0-9][AEHMNPRTVXY0-9]?[ABEHMNPRVWXY0-9]? {1,2}[0-9][ABD-HJLN-UW-Z]{2}|GIR 0AA)$" G "<0>GIR 0AA" +"^([A-PR-UWYZ0-9][A-HK-Y0-9][AEHMNPRTVXY0-9]?[ABEHMNPRVWXY0-9]? {1,2}[0-9][ABD-HJLN-UW-Z]{2}|GIR 0AA)$" "SEW4 5TY" +"^([A-PR-UWYZ0-9][A-HK-Y0-9][AEHMNPRTVXY0-9]?[ABEHMNPRVWXY0-9]? {1,2}[0-9][ABD-HJLN-UW-Z]{2}|GIR 0AA)$" "AA2C 4FG" +"^([A-PR-UWYZ0-9][A-HK-Y0-9][AEHMNPRTVXY0-9]?[ABEHMNPRVWXY0-9]? {1,2}[0-9][ABD-HJLN-UW-Z]{2}|GIR 0AA)$" "AA2 4CV" +"^(?=.*\d)(?=.*[a-z])(?=.*[A-Z]).{4,8}$" G "<0>asD1" +"^(?=.*\d)(?=.*[a-z])(?=.*[A-Z]).{4,8}$" G "<0>asDF1234" +"^(?=.*\d)(?=.*[a-z])(?=.*[A-Z]).{4,8}$" G "<0>ASPgo123" +"^(?=.*\d)(?=.*[a-z])(?=.*[A-Z]).{4,8}$" "asdf" +"^(?=.*\d)(?=.*[a-z])(?=.*[A-Z]).{4,8}$" "1234" +"^(?=.*\d)(?=.*[a-z])(?=.*[A-Z]).{4,8}$" "ASDF12345" +"^([0-1]([\s\-./\\])?)?(\(?[2-9]\d{2}\)?|[2-9]\d{3})([\s\-./\\])?([0-9]{3}([\s\-./\\])?[0-9]{4}|[a-zA-Z0-9]{7}|([0-9]{3}[-][a-zA-Z0-9]{4}))" G "<0>1.222.333.1234" +"^([0-1]([\s\-./\\])?)?(\(?[2-9]\d{2}\)?|[2-9]\d{3})([\s\-./\\])?([0-9]{3}([\s\-./\\])?[0-9]{4}|[a-zA-Z0-9]{7}|([0-9]{3}[-][a-zA-Z0-9]{4}))" G "<0>1-223-123-1232" +"^([0-1]([\s\-./\\])?)?(\(?[2-9]\d{2}\)?|[2-9]\d{3})([\s\-./\\])?([0-9]{3}([\s\-./\\])?[0-9]{4}|[a-zA-Z0-9]{7}|([0-9]{3}[-][a-zA-Z0-9]{4}))" G "<0>1-888-425-DELL" +"^([0-1]([\s\-./\\])?)?(\(?[2-9]\d{2}\)?|[2-9]\d{3})([\s\-./\\])?([0-9]{3}([\s\-./\\])?[0-9]{4}|[a-zA-Z0-9]{7}|([0-9]{3}[-][a-zA-Z0-9]{4}))" "1.1.123123.123" +"^([0-1]([\s\-./\\])?)?(\(?[2-9]\d{2}\)?|[2-9]\d{3})([\s\-./\\])?([0-9]{3}([\s\-./\\])?[0-9]{4}|[a-zA-Z0-9]{7}|([0-9]{3}[-][a-zA-Z0-9]{4}))" "12-1322-112-31" +"^([0-1]([\s\-./\\])?)?(\(?[2-9]\d{2}\)?|[2-9]\d{3})([\s\-./\\])?([0-9]{3}([\s\-./\\])?[0-9]{4}|[a-zA-Z0-9]{7}|([0-9]{3}[-][a-zA-Z0-9]{4}))" "1-800-CALL-DEL" +"^(([0]?[1-9]|1[0-2])(:)([0-5][0-9]))$" G "<0>09:00" +"^(([0]?[1-9]|1[0-2])(:)([0-5][0-9]))$" G "<0>9:00" +"^(([0]?[1-9]|1[0-2])(:)([0-5][0-9]))$" G "<0>11:35" +"^(([0]?[1-9]|1[0-2])(:)([0-5][0-9]))$" "13:00" +"^(([0]?[1-9]|1[0-2])(:)([0-5][0-9]))$" "9.00" +"^(([0]?[1-9]|1[0-2])(:)([0-5][0-9]))$" "6:60" +"^([1-9]|[1-9]\d|1\d{2}|2[0-4]\d|25[0-5])$" G "<0>1" +"^([1-9]|[1-9]\d|1\d{2}|2[0-4]\d|25[0-5])$" G "<0>108" +"^([1-9]|[1-9]\d|1\d{2}|2[0-4]\d|25[0-5])$" G "<0>255" +"^([1-9]|[1-9]\d|1\d{2}|2[0-4]\d|25[0-5])$" "01" +"^([1-9]|[1-9]\d|1\d{2}|2[0-4]\d|25[0-5])$" "256" +"^((((0[13578])|([13578])|(1[02]))[\/](([1-9])|([0-2][0-9])|(3[01])))|(((0[469])|([469])|(11))[\/](([1-9])|([0-2][0-9])|(30)))|((2|02)[\/](([1-9])|([0-2][0-9]))))[\/]\d{4}$|^\d{4}$" G "<0>01/01/2001" +"^((((0[13578])|([13578])|(1[02]))[\/](([1-9])|([0-2][0-9])|(3[01])))|(((0[469])|([469])|(11))[\/](([1-9])|([0-2][0-9])|(30)))|((2|02)[\/](([1-9])|([0-2][0-9]))))[\/]\d{4}$|^\d{4}$" G "<0>1/01/2001" +"^((((0[13578])|([13578])|(1[02]))[\/](([1-9])|([0-2][0-9])|(3[01])))|(((0[469])|([469])|(11))[\/](([1-9])|([0-2][0-9])|(30)))|((2|02)[\/](([1-9])|([0-2][0-9]))))[\/]\d{4}$|^\d{4}$" G "<0>2002" +"^((((0[13578])|([13578])|(1[02]))[\/](([1-9])|([0-2][0-9])|(3[01])))|(((0[469])|([469])|(11))[\/](([1-9])|([0-2][0-9])|(30)))|((2|02)[\/](([1-9])|([0-2][0-9]))))[\/]\d{4}$|^\d{4}$" "2/30/2002" +"^((((0[13578])|([13578])|(1[02]))[\/](([1-9])|([0-2][0-9])|(3[01])))|(((0[469])|([469])|(11))[\/](([1-9])|([0-2][0-9])|(30)))|((2|02)[\/](([1-9])|([0-2][0-9]))))[\/]\d{4}$|^\d{4}$" "13/23/2002" +"^((((0[13578])|([13578])|(1[02]))[\/](([1-9])|([0-2][0-9])|(3[01])))|(((0[469])|([469])|(11))[\/](([1-9])|([0-2][0-9])|(30)))|((2|02)[\/](([1-9])|([0-2][0-9]))))[\/]\d{4}$|^\d{4}$" "12345" +"^[A-Za-z]{2}[0-9]{6}[A-Za-z]{1}$" G "<0>SP939393H" +"^[A-Za-z]{2}[0-9]{6}[A-Za-z]{1}$" G "<0>PX123456D" +"^[A-Za-z]{2}[0-9]{6}[A-Za-z]{1}$" G "<0>SW355667G" +"^[A-Za-z]{2}[0-9]{6}[A-Za-z]{1}$" "12SP9393H" +"^[A-Za-z]{2}[0-9]{6}[A-Za-z]{1}$" "S3P93930D" +"^[A-Za-z]{2}[0-9]{6}[A-Za-z]{1}$" "11223344SP00ddSS" +"(^0[78][2347][0-9]{7})" G "<0>0834128458" +"(^0[78][2347][0-9]{7})" G "<0>0749526308" +"(^0[78][2347][0-9]{7})" "0861212308" +"(^0[78][2347][0-9]{7})" "0892549851" +"^([A-HJ-TP-Z]{1}\d{4}[A-Z]{3}|[a-z]{1}\d{4}[a-hj-tp-z]{3})$" G "<0>C1406HHA" +"^([A-HJ-TP-Z]{1}\d{4}[A-Z]{3}|[a-z]{1}\d{4}[a-hj-tp-z]{3})$" G "<0>A4126AAB" +"^([A-HJ-TP-Z]{1}\d{4}[A-Z]{3}|[a-z]{1}\d{4}[a-hj-tp-z]{3})$" G "<0>c1406hha" +"^([A-HJ-TP-Z]{1}\d{4}[A-Z]{3}|[a-z]{1}\d{4}[a-hj-tp-z]{3})$" "c1406HHA" +"^([A-HJ-TP-Z]{1}\d{4}[A-Z]{3}|[a-z]{1}\d{4}[a-hj-tp-z]{3})$" "4126" +"^([A-HJ-TP-Z]{1}\d{4}[A-Z]{3}|[a-z]{1}\d{4}[a-hj-tp-z]{3})$" "C1406hha" +"^(((25[0-5]|2[0-4][0-9]|19[0-1]|19[3-9]|18[0-9]|17[0-1]|17[3-9]|1[0-6][0-9]|1[1-9]|[2-9][0-9]|[0-9])\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9]))|(192\.(25[0-5]|2[0-4][0-9]|16[0-7]|169|1[0-5][0-9]|1[7-9][0-9]|[1-9][0-9]|[0-9]))|(172\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|1[0-5]|3[2-9]|[4-9][0-9]|[0-9])))\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])$" G "<0>66.129.71.120" +"^(((25[0-5]|2[0-4][0-9]|19[0-1]|19[3-9]|18[0-9]|17[0-1]|17[3-9]|1[0-6][0-9]|1[1-9]|[2-9][0-9]|[0-9])\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9]))|(192\.(25[0-5]|2[0-4][0-9]|16[0-7]|169|1[0-5][0-9]|1[7-9][0-9]|[1-9][0-9]|[0-9]))|(172\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|1[0-5]|3[2-9]|[4-9][0-9]|[0-9])))\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])$" G "<0>207.46.230.218" +"^(((25[0-5]|2[0-4][0-9]|19[0-1]|19[3-9]|18[0-9]|17[0-1]|17[3-9]|1[0-6][0-9]|1[1-9]|[2-9][0-9]|[0-9])\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9]))|(192\.(25[0-5]|2[0-4][0-9]|16[0-7]|169|1[0-5][0-9]|1[7-9][0-9]|[1-9][0-9]|[0-9]))|(172\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|1[0-5]|3[2-9]|[4-9][0-9]|[0-9])))\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])$" G "<0>64.58.76.225" +"^(((25[0-5]|2[0-4][0-9]|19[0-1]|19[3-9]|18[0-9]|17[0-1]|17[3-9]|1[0-6][0-9]|1[1-9]|[2-9][0-9]|[0-9])\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9]))|(192\.(25[0-5]|2[0-4][0-9]|16[0-7]|169|1[0-5][0-9]|1[7-9][0-9]|[1-9][0-9]|[0-9]))|(172\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|1[0-5]|3[2-9]|[4-9][0-9]|[0-9])))\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])$" "10.0.5.4" +"^(((25[0-5]|2[0-4][0-9]|19[0-1]|19[3-9]|18[0-9]|17[0-1]|17[3-9]|1[0-6][0-9]|1[1-9]|[2-9][0-9]|[0-9])\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9]))|(192\.(25[0-5]|2[0-4][0-9]|16[0-7]|169|1[0-5][0-9]|1[7-9][0-9]|[1-9][0-9]|[0-9]))|(172\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|1[0-5]|3[2-9]|[4-9][0-9]|[0-9])))\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])$" "192.168.0.1" +"^(((25[0-5]|2[0-4][0-9]|19[0-1]|19[3-9]|18[0-9]|17[0-1]|17[3-9]|1[0-6][0-9]|1[1-9]|[2-9][0-9]|[0-9])\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9]))|(192\.(25[0-5]|2[0-4][0-9]|16[0-7]|169|1[0-5][0-9]|1[7-9][0-9]|[1-9][0-9]|[0-9]))|(172\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|1[0-5]|3[2-9]|[4-9][0-9]|[0-9])))\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])$" "my ip address" +"^([\w\d\-\.]+)@{1}(([\w\d\-]{1,67})|([\w\d\-]+\.[\w\d\-]{1,67}))\.(([a-zA-Z\d]{2,4})(\.[a-zA-Z\d]{2})?)$" G "<0>foo@foo.com" +"^([\w\d\-\.]+)@{1}(([\w\d\-]{1,67})|([\w\d\-]+\.[\w\d\-]{1,67}))\.(([a-zA-Z\d]{2,4})(\.[a-zA-Z\d]{2})?)$" G "<0>foo@foo-foo.com.au" +"^([\w\d\-\.]+)@{1}(([\w\d\-]{1,67})|([\w\d\-]+\.[\w\d\-]{1,67}))\.(([a-zA-Z\d]{2,4})(\.[a-zA-Z\d]{2})?)$" G "<0>foo@foo.foo.info" +"^([\w\d\-\.]+)@{1}(([\w\d\-]{1,67})|([\w\d\-]+\.[\w\d\-]{1,67}))\.(([a-zA-Z\d]{2,4})(\.[a-zA-Z\d]{2})?)$" "foo@.com" +"^([\w\d\-\.]+)@{1}(([\w\d\-]{1,67})|([\w\d\-]+\.[\w\d\-]{1,67}))\.(([a-zA-Z\d]{2,4})(\.[a-zA-Z\d]{2})?)$" "foo@foo..com" +"^([\w\d\-\.]+)@{1}(([\w\d\-]{1,67})|([\w\d\-]+\.[\w\d\-]{1,67}))\.(([a-zA-Z\d]{2,4})(\.[a-zA-Z\d]{2})?)$" "foo@me@.com" +"/\*[\d\D]*?\*/" G "<0>/* my comment */" +"/\*[\d\D]*?\*/" G "<0>/* my multiline comment */" +"/\*[\d\D]*?\*/" G "<0>/* my nested comment */" +"/\*[\d\D]*?\*/" "*/ anything here /*" +"/\*[\d\D]*?\*/" "anything between 2 separate comments" +"/\*[\d\D]*?\*/" "\* *\\" +"/\*[\p{N}\P{N}]*?\*/" G "<0>/* my comment */" +"/\*[\p{N}\P{N}]*?\*/" G "<0>/* my multiline comment */" +"/\*[\p{N}\P{N}]*?\*/" G "<0>/* my nested comment */" +"/\*[\p{N}\P{N}]*?\*/" "*/ anything here /*" +"/\*[\p{N}\P{N}]*?\*/" "anything between 2 separate comments" +"/\*[\p{N}\P{N}]*?\*/" "\* *\\" +"((0?[13578]|10|12)(-|\/)((0[0-9])|([12])([0-9]?)|(3[01]?))(-|\/)((\d{4})|(\d{2}))|(0?[2469]|11)(-|\/)((0[0-9])|([12])([0-9]?)|(3[0]?))(-|\/)((\d{4}|\d{2})))" G "<0>1/31/2002" +"((0?[13578]|10|12)(-|\/)((0[0-9])|([12])([0-9]?)|(3[01]?))(-|\/)((\d{4})|(\d{2}))|(0?[2469]|11)(-|\/)((0[0-9])|([12])([0-9]?)|(3[0]?))(-|\/)((\d{4}|\d{2})))" G "<0>04-30-02" +"((0?[13578]|10|12)(-|\/)((0[0-9])|([12])([0-9]?)|(3[01]?))(-|\/)((\d{4})|(\d{2}))|(0?[2469]|11)(-|\/)((0[0-9])|([12])([0-9]?)|(3[0]?))(-|\/)((\d{4}|\d{2})))" G "<0>12-01/2002" +"((0?[13578]|10|12)(-|\/)((0[0-9])|([12])([0-9]?)|(3[01]?))(-|\/)((\d{4})|(\d{2}))|(0?[2469]|11)(-|\/)((0[0-9])|([12])([0-9]?)|(3[0]?))(-|\/)((\d{4}|\d{2})))" "2/31/2002" +"((0?[13578]|10|12)(-|\/)((0[0-9])|([12])([0-9]?)|(3[01]?))(-|\/)((\d{4})|(\d{2}))|(0?[2469]|11)(-|\/)((0[0-9])|([12])([0-9]?)|(3[0]?))(-|\/)((\d{4}|\d{2})))" "13/0/02" +"((0?[13578]|10|12)(-|\/)((0[0-9])|([12])([0-9]?)|(3[01]?))(-|\/)((\d{4})|(\d{2}))|(0?[2469]|11)(-|\/)((0[0-9])|([12])([0-9]?)|(3[0]?))(-|\/)((\d{4}|\d{2})))" "Jan 1, 2001" +'^(([^<>;()\[\]\\.,;:@"]+(\.[^<>()\[\]\\.,;:@"]+)*)|(".+"))@((([a-z]([-a-z0-9]*[a-z0-9])?)|(#[0-9]+)|(\[((([01]?[0-9]{0,2})|(2(([0-4][0-9])|(5[0-5]))))\.){3}(([01]?[0-9]{0,2})|(2(([0-4][0-9])|(5[0-5]))))\]))\.)*(([a-z]([-a-z0-9]*[a-z0-9])?)|(#[0-9]+)|(\[((([01]?[0-9]{0,2})|(2(([0-4][0-9])|(5[0-5]))))\.){3}(([01]?[0-9]{0,2})|(2(([0-4][0-9])|(5[0-5]))))\]))$' G "<0>blah@[10.0.0.1]" +'^(([^<>;()\[\]\\.,;:@"]+(\.[^<>()\[\]\\.,;:@"]+)*)|(".+"))@((([a-z]([-a-z0-9]*[a-z0-9])?)|(#[0-9]+)|(\[((([01]?[0-9]{0,2})|(2(([0-4][0-9])|(5[0-5]))))\.){3}(([01]?[0-9]{0,2})|(2(([0-4][0-9])|(5[0-5]))))\]))\.)*(([a-z]([-a-z0-9]*[a-z0-9])?)|(#[0-9]+)|(\[((([01]?[0-9]{0,2})|(2(([0-4][0-9])|(5[0-5]))))\.){3}(([01]?[0-9]{0,2})|(2(([0-4][0-9])|(5[0-5]))))\]))$' G "<0>a@b.c" +'^(([^<>;()\[\]\\.,;:@"]+(\.[^<>()\[\]\\.,;:@"]+)*)|(".+"))@((([a-z]([-a-z0-9]*[a-z0-9])?)|(#[0-9]+)|(\[((([01]?[0-9]{0,2})|(2(([0-4][0-9])|(5[0-5]))))\.){3}(([01]?[0-9]{0,2})|(2(([0-4][0-9])|(5[0-5]))))\]))\.)*(([a-z]([-a-z0-9]*[a-z0-9])?)|(#[0-9]+)|(\[((([01]?[0-9]{0,2})|(2(([0-4][0-9])|(5[0-5]))))\.){3}(([01]?[0-9]{0,2})|(2(([0-4][0-9])|(5[0-5]))))\]))$' "non@match@." +"^\d{9}[\d|X]$" G "<0>1234123412" +"^\d{9}[\d|X]$" G "<0>123412341X" +"^\d{9}[\d|X]$" "not an isbn" +"^\d{9}(\d|X)$" G "<0>1234123412" +"^\d{9}(\d|X)$" G "<0>123412341X" +"^\d{9}(\d|X)$" "not an isbn" +"^(([1-9])|(0[1-9])|(1[0-2]))\/(([0-9])|([0-2][0-9])|(3[0-1]))\/(([0-9][0-9])|([1-2][0,9][0-9][0-9]))$" G "<0>01/01/2001" +"^(([1-9])|(0[1-9])|(1[0-2]))\/(([0-9])|([0-2][0-9])|(3[0-1]))\/(([0-9][0-9])|([1-2][0,9][0-9][0-9]))$" G "<0>1/1/1999" +"^(([1-9])|(0[1-9])|(1[0-2]))\/(([0-9])|([0-2][0-9])|(3[0-1]))\/(([0-9][0-9])|([1-2][0,9][0-9][0-9]))$" G "<0>10/20/2080" +"^(([1-9])|(0[1-9])|(1[0-2]))\/(([0-9])|([0-2][0-9])|(3[0-1]))\/(([0-9][0-9])|([1-2][0,9][0-9][0-9]))$" "13/01/2001" +"^(([1-9])|(0[1-9])|(1[0-2]))\/(([0-9])|([0-2][0-9])|(3[0-1]))\/(([0-9][0-9])|([1-2][0,9][0-9][0-9]))$" "1/1/1800" +"^(([1-9])|(0[1-9])|(1[0-2]))\/(([0-9])|([0-2][0-9])|(3[0-1]))\/(([0-9][0-9])|([1-2][0,9][0-9][0-9]))$" "10/32/2080" +"^\d*\.?((25)|(50)|(5)|(75)|(0)|(00))?$" G "<0>0.25" +"^\d*\.?((25)|(50)|(5)|(75)|(0)|(00))?$" G "<0>.75" +"^\d*\.?((25)|(50)|(5)|(75)|(0)|(00))?$" G "<0>123.50" +"^\d*\.?((25)|(50)|(5)|(75)|(0)|(00))?$" ".77" +"^\d*\.?((25)|(50)|(5)|(75)|(0)|(00))?$" "1.435" +"^(s-|S-){0,1}[0-9]{3}\s?[0-9]{2}$" G "<0>12345" +"^(s-|S-){0,1}[0-9]{3}\s?[0-9]{2}$" G "<0>932 68" +"^(s-|S-){0,1}[0-9]{3}\s?[0-9]{2}$" G "<0>S-621 46" +"^(s-|S-){0,1}[0-9]{3}\s?[0-9]{2}$" "5367" +"^(s-|S-){0,1}[0-9]{3}\s?[0-9]{2}$" "425611" +"^(s-|S-){0,1}[0-9]{3}\s?[0-9]{2}$" "31 545" +"^\d{5}(-\d{4})?$" G "<0>48222" +"^\d{5}(-\d{4})?$" G "<0>48222-1746" +"^\d{5}(-\d{4})?$" "4632" +"^\d{5}(-\d{4})?$" "Blake" +"^\d{5}(-\d{4})?$" "37333-32" +'^(?!^(PRN|AUX|CLOCK\$|NUL|CON|COM\d|LPT\d|\..*)(\..+)?$)[^\x00-\x1f\\?*:\";|/]+$' G "<0>test.txt" +'^(?!^(PRN|AUX|CLOCK\$|NUL|CON|COM\d|LPT\d|\..*)(\..+)?$)[^\x00-\x1f\\?*:\";|/]+$' G "<0>test.jpg.txt" +'^(?!^(PRN|AUX|CLOCK\$|NUL|CON|COM\d|LPT\d|\..*)(\..+)?$)[^\x00-\x1f\\?*:\";|/]+$' G "<0>a&b c.bmp" +'^(?!^(PRN|AUX|CLOCK\$|NUL|CON|COM\d|LPT\d|\..*)(\..+)?$)[^\x00-\x1f\\?*:\";|/]+$' "CON" +'^(?!^(PRN|AUX|CLOCK\$|NUL|CON|COM\d|LPT\d|\..*)(\..+)?$)[^\x00-\x1f\\?*:\";|/]+$' ".pdf" +'^(?!^(PRN|AUX|CLOCK\$|NUL|CON|COM\d|LPT\d|\..*)(\..+)?$)[^\x00-\x1f\\?*:\";|/]+$' "test:2.pdf" +"^(\d{1,3}'(\d{3}')*\d{3}(\.\d{1,3})?|\d{1,3}(\.\d{3})?)$" G "<0>1'235.140" +"^(\d{1,3}'(\d{3}')*\d{3}(\.\d{1,3})?|\d{1,3}(\.\d{3})?)$" G "<0>1'222'333.120" +"^(\d{1,3}'(\d{3}')*\d{3}(\.\d{1,3})?|\d{1,3}(\.\d{3})?)$" G "<0>456" +"^(\d{1,3}'(\d{3}')*\d{3}(\.\d{1,3})?|\d{1,3}(\.\d{3})?)$" "1234.500" +"^(\d{1,3}'(\d{3}')*\d{3}(\.\d{1,3})?|\d{1,3}(\.\d{3})?)$" "78'45.123" +"^(\d{1,3}'(\d{3}')*\d{3}(\.\d{1,3})?|\d{1,3}(\.\d{3})?)$" "123,0012" +"^[a-zA-Z][0-9][a-zA-Z]\s?[0-9][a-zA-Z][0-9]$" G "<0>T2p 3c7" +"^[a-zA-Z][0-9][a-zA-Z]\s?[0-9][a-zA-Z][0-9]$" G "<0>T3P3c7" +"^[a-zA-Z][0-9][a-zA-Z]\s?[0-9][a-zA-Z][0-9]$" G "<0>T2P 3C7" +"^[a-zA-Z][0-9][a-zA-Z]\s?[0-9][a-zA-Z][0-9]$" "123456" +"^[a-zA-Z][0-9][a-zA-Z]\s?[0-9][a-zA-Z][0-9]$" "3C7T2P" +"^[a-zA-Z][0-9][a-zA-Z]\s?[0-9][a-zA-Z][0-9]$" "11T21RWW" +"^\$[0-9]+(\.[0-9][0-9])?$" G "<0>$1.50" +"^\$[0-9]+(\.[0-9][0-9])?$" G "<0>$49" +"^\$[0-9]+(\.[0-9][0-9])?$" G "<0>$0.50" +"^\$[0-9]+(\.[0-9][0-9])?$" "1.5" +"^\$[0-9]+(\.[0-9][0-9])?$" "$1.333" +"^\$[0-9]+(\.[0-9][0-9])?$" "this $5.12 fails" +"\b((25[0-5]|2[0-4]\d|[01]\d\d|\d?\d)\.){3}(25[0-5]|2[0-4]\d|[01]\d\d|\d?\d)\b" G "<0>217.6.9.89" +"\b((25[0-5]|2[0-4]\d|[01]\d\d|\d?\d)\.){3}(25[0-5]|2[0-4]\d|[01]\d\d|\d?\d)\b" G "<0>0.0.0.0" +"\b((25[0-5]|2[0-4]\d|[01]\d\d|\d?\d)\.){3}(25[0-5]|2[0-4]\d|[01]\d\d|\d?\d)\b" G "<0>255.255.255.255" +"\b((25[0-5]|2[0-4]\d|[01]\d\d|\d?\d)\.){3}(25[0-5]|2[0-4]\d|[01]\d\d|\d?\d)\b" "256.0.0.0" +"\b((25[0-5]|2[0-4]\d|[01]\d\d|\d?\d)\.){3}(25[0-5]|2[0-4]\d|[01]\d\d|\d?\d)\b" "0978.3.3.3" +"\b((25[0-5]|2[0-4]\d|[01]\d\d|\d?\d)\.){3}(25[0-5]|2[0-4]\d|[01]\d\d|\d?\d)\b" "65.4t.54.3" +"((mailto\:|(news|(ht|f)tp(s?))\://){1}\S+)" G "<0>http://www.aspemporium.com" +"((mailto\:|(news|(ht|f)tp(s?))\://){1}\S+)" G "<0>mailto:dominionx@hotmail.com" +"((mailto\:|(news|(ht|f)tp(s?))\://){1}\S+)" G "<0>ftp://ftp.test.com" +"((mailto\:|(news|(ht|f)tp(s?))\://){1}\S+)" "www.aspemporium.com" +"((mailto\:|(news|(ht|f)tp(s?))\://){1}\S+)" "dominionx@hotmail.com" +"((mailto\:|(news|(ht|f)tp(s?))\://){1}\S+)" "bloggs" +"\(([0-9]{2}|0{1}((x|[0-9]){2}[0-9]{2}))\)\s*[0-9]{3,4}[- ]*[0-9]{4}" G "<0>(12) 123 1234" +"\(([0-9]{2}|0{1}((x|[0-9]){2}[0-9]{2}))\)\s*[0-9]{3,4}[- ]*[0-9]{4}" G "<0>(01512) 123 1234" +"\(([0-9]{2}|0{1}((x|[0-9]){2}[0-9]{2}))\)\s*[0-9]{3,4}[- ]*[0-9]{4}" G "<0>(0xx12) 1234 1234" +"\(([0-9]{2}|0{1}((x|[0-9]){2}[0-9]{2}))\)\s*[0-9]{3,4}[- ]*[0-9]{4}" "12 123 1234" +"\(([0-9]{2}|0{1}((x|[0-9]){2}[0-9]{2}))\)\s*[0-9]{3,4}[- ]*[0-9]{4}" "(012) 123/1234" +"\(([0-9]{2}|0{1}((x|[0-9]){2}[0-9]{2}))\)\s*[0-9]{3,4}[- ]*[0-9]{4}" "(012) 123 12345" +"^\w+[\w-\.]*\@\w+((-\w+)|(\w*))\.[a-z]{2,3}$" G "<0>bob-smith@foo.com" +"^\w+[\w-\.]*\@\w+((-\w+)|(\w*))\.[a-z]{2,3}$" G "<0>bob.smith@foo.com" +"^\w+[\w-\.]*\@\w+((-\w+)|(\w*))\.[a-z]{2,3}$" G "<0>bob_smith@foo.com" +"^\w+[\w-\.]*\@\w+((-\w+)|(\w*))\.[a-z]{2,3}$" "-smith@foo.com" +"^\w+[\w-\.]*\@\w+((-\w+)|(\w*))\.[a-z]{2,3}$" ".smith@foo.com" +"^\w+[\w-\.]*\@\w+((-\w+)|(\w*))\.[a-z]{2,3}$" "smith@foo_com" +"^(?=.*\d).{4,8}$" G "<0>1234" +"^(?=.*\d).{4,8}$" G "<0>asdf1234" +"^(?=.*\d).{4,8}$" G "<0>asp123" +"^(?=.*\d).{4,8}$" "asdf" +"^(?=.*\d).{4,8}$" "asdf12345" +"^(?=.*\d).{4,8}$" "password" +"[^A-Za-z0-9_@\.]|@{2,}|\.{5,}" G "<0>user name" +"[^A-Za-z0-9_@\.]|@{2,}|\.{5,}" G "<0>user#name" +"[^A-Za-z0-9_@\.]|@{2,}|\.{5,}" G "<0>....." +"[^A-Za-z0-9_@\.]|@{2,}|\.{5,}" "User_Name1" +"[^A-Za-z0-9_@\.]|@{2,}|\.{5,}" "username@foo.com" +"[^A-Za-z0-9_@\.]|@{2,}|\.{5,}" "user.name@mail.foo.com" +"^100$|^[0-9]{1,2}$|^[0-9]{1,2}\,[0-9]{1,3}$" G "<0>12,654" +"^100$|^[0-9]{1,2}$|^[0-9]{1,2}\,[0-9]{1,3}$" G "<0>1,987" +"^100$|^[0-9]{1,2}$|^[0-9]{1,2}\,[0-9]{1,3}$" "128,2" +"^100$|^[0-9]{1,2}$|^[0-9]{1,2}\,[0-9]{1,3}$" "12," +"^(http|https|ftp)\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(:[a-zA-Z0-9]*)?/?([a-zA-Z0-9\-\._\?\,\'/\\\+\&%\$#\=~])*[^\.\,\)\(\s]$" G "<0>https://www.restrictd.com/~myhome/" +"^(http|https|ftp)\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(:[a-zA-Z0-9]*)?/?([a-zA-Z0-9\-\._\?\,\'/\\\+\&%\$#\=~])*[^\.\,\)\(\s]$" "http://www.krumedia.com." +"^(http|https|ftp)\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(:[a-zA-Z0-9]*)?/?([a-zA-Z0-9\-\._\?\,\'/\\\+\&%\$#\=~])*[^\.\,\)\(\s]$" "(http://www.krumedia.com)" +"^(http|https|ftp)\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(:[a-zA-Z0-9]*)?/?([a-zA-Z0-9\-\._\?\,\'/\\\+\&%\$#\=~])*[^\.\,\)\(\s]$" "http://www.krumedia.com," +"(\d{1,3},(\d{3},)*\d{3}(\.\d{1,3})?|\d{1,3}(\.\d{3})?)$" G "<0>2&651.50" +"(\d{1,3},(\d{3},)*\d{3}(\.\d{1,3})?|\d{1,3}(\.\d{3})?)$" G "<0>987.895" +"(\d{1,3},(\d{3},)*\d{3}(\.\d{1,3})?|\d{1,3}(\.\d{3})?)$" "25$%787*" +"\$[0-9]?[0-9]?[0-9]?((\,[0-9][0-9][0-9])*)?(\.[0-9][0-9]?)?$" G "<0>$1,456,983.00" +"\$[0-9]?[0-9]?[0-9]?((\,[0-9][0-9][0-9])*)?(\.[0-9][0-9]?)?$" G "<0>$1,700.07" +"\$[0-9]?[0-9]?[0-9]?((\,[0-9][0-9][0-9])*)?(\.[0-9][0-9]?)?$" G "<0>$68,944.23" +"\$[0-9]?[0-9]?[0-9]?((\,[0-9][0-9][0-9])*)?(\.[0-9][0-9]?)?$" "$20,86.93" +"\$[0-9]?[0-9]?[0-9]?((\,[0-9][0-9][0-9])*)?(\.[0-9][0-9]?)?$" "$1098.84" +"\$[0-9]?[0-9]?[0-9]?((\,[0-9][0-9][0-9])*)?(\.[0-9][0-9]?)?$" "$150." +"\$[0-9]?[0-9]?[0-9]?((\,[0-9][0-9][0-9])*)?(\.[0-9][0-9])?$" G "<0>$28,009,987.88" +"\$[0-9]?[0-9]?[0-9]?((\,[0-9][0-9][0-9])*)?(\.[0-9][0-9])?$" G "<0>$23,099.05" +"\$[0-9]?[0-9]?[0-9]?((\,[0-9][0-9][0-9])*)?(\.[0-9][0-9])?$" G "<0>$.88" +"\$[0-9]?[0-9]?[0-9]?((\,[0-9][0-9][0-9])*)?(\.[0-9][0-9])?$" "$234,5.99" +"^((((31\/(0?[13578]|1[02]))|((29|30)\/(0?[1,3-9]|1[0-2])))\/(1[6-9]|[2-9]\d)?\d{2})|(29\/0?2\/(((1[6-9]|[2-9]\d)?(0[48]|[2468][048]|[13579][26])|((16|[2468][048]|[3579][26])00))))|(0?[1-9]|1\d|2[0-8])\/((0?[1-9])|(1[0-2]))\/((1[6-9]|[2-9]\d)?\d{2})) (20|21|22|23|[0-1]?\d):[0-5]?\d:[0-5]?\d$" G "<0>29/02/2004 20:15:27" +"^((((31\/(0?[13578]|1[02]))|((29|30)\/(0?[1,3-9]|1[0-2])))\/(1[6-9]|[2-9]\d)?\d{2})|(29\/0?2\/(((1[6-9]|[2-9]\d)?(0[48]|[2468][048]|[13579][26])|((16|[2468][048]|[3579][26])00))))|(0?[1-9]|1\d|2[0-8])\/((0?[1-9])|(1[0-2]))\/((1[6-9]|[2-9]\d)?\d{2})) (20|21|22|23|[0-1]?\d):[0-5]?\d:[0-5]?\d$" G "<0>29/2/04 8:9:5" +"^((((31\/(0?[13578]|1[02]))|((29|30)\/(0?[1,3-9]|1[0-2])))\/(1[6-9]|[2-9]\d)?\d{2})|(29\/0?2\/(((1[6-9]|[2-9]\d)?(0[48]|[2468][048]|[13579][26])|((16|[2468][048]|[3579][26])00))))|(0?[1-9]|1\d|2[0-8])\/((0?[1-9])|(1[0-2]))\/((1[6-9]|[2-9]\d)?\d{2})) (20|21|22|23|[0-1]?\d):[0-5]?\d:[0-5]?\d$" G "<0>31/3/2004 9:20:17" +"^((((31\/(0?[13578]|1[02]))|((29|30)\/(0?[1,3-9]|1[0-2])))\/(1[6-9]|[2-9]\d)?\d{2})|(29\/0?2\/(((1[6-9]|[2-9]\d)?(0[48]|[2468][048]|[13579][26])|((16|[2468][048]|[3579][26])00))))|(0?[1-9]|1\d|2[0-8])\/((0?[1-9])|(1[0-2]))\/((1[6-9]|[2-9]\d)?\d{2})) (20|21|22|23|[0-1]?\d):[0-5]?\d:[0-5]?\d$" "29/02/2003 20:15:15" +"^((((31\/(0?[13578]|1[02]))|((29|30)\/(0?[1,3-9]|1[0-2])))\/(1[6-9]|[2-9]\d)?\d{2})|(29\/0?2\/(((1[6-9]|[2-9]\d)?(0[48]|[2468][048]|[13579][26])|((16|[2468][048]|[3579][26])00))))|(0?[1-9]|1\d|2[0-8])\/((0?[1-9])|(1[0-2]))\/((1[6-9]|[2-9]\d)?\d{2})) (20|21|22|23|[0-1]?\d):[0-5]?\d:[0-5]?\d$" "2/29/04 20:15:15" +"^((((31\/(0?[13578]|1[02]))|((29|30)\/(0?[1,3-9]|1[0-2])))\/(1[6-9]|[2-9]\d)?\d{2})|(29\/0?2\/(((1[6-9]|[2-9]\d)?(0[48]|[2468][048]|[13579][26])|((16|[2468][048]|[3579][26])00))))|(0?[1-9]|1\d|2[0-8])\/((0?[1-9])|(1[0-2]))\/((1[6-9]|[2-9]\d)?\d{2})) (20|21|22|23|[0-1]?\d):[0-5]?\d:[0-5]?\d$" "31/3/4 9:20:17" +"^([a-zA-Z0-9_\-\.]+)@([a-zA-Z0-9_\-\.]+)\.([a-zA-Z]{2,5})$" G "<0>something@someserver.com" +"^([a-zA-Z0-9_\-\.]+)@([a-zA-Z0-9_\-\.]+)\.([a-zA-Z]{2,5})$" G "<0>firstname.lastname@mailserver.domain.com" +"^([a-zA-Z0-9_\-\.]+)@([a-zA-Z0-9_\-\.]+)\.([a-zA-Z]{2,5})$" G "<0>username-something@some-server.nl" +"^([a-zA-Z0-9_\-\.]+)@([a-zA-Z0-9_\-\.]+)\.([a-zA-Z]{2,5})$" "username@someserver.domain.c" +"^([a-zA-Z0-9_\-\.]+)@([a-zA-Z0-9_\-\.]+)\.([a-zA-Z]{2,5})$" "somename@server.domain-com" +"^([a-zA-Z0-9_\-\.]+)@([a-zA-Z0-9_\-\.]+)\.([a-zA-Z]{2,5})$" "someone@something.se_eo" +"(^([0-9]|[0-1][0-9]|[2][0-3]):([0-5][0-9])(\s{0,1})(AM|PM|am|pm|aM|Am|pM|Pm{2,2})$)|(^([0-9]|[1][0-9]|[2][0-3])(\s{0,1})(AM|PM|am|pm|aM|Am|pM|Pm{2,2})$)" G "<0>8am" +"(^([0-9]|[0-1][0-9]|[2][0-3]):([0-5][0-9])(\s{0,1})(AM|PM|am|pm|aM|Am|pM|Pm{2,2})$)|(^([0-9]|[1][0-9]|[2][0-3])(\s{0,1})(AM|PM|am|pm|aM|Am|pM|Pm{2,2})$)" G "<0>8 am" +"(^([0-9]|[0-1][0-9]|[2][0-3]):([0-5][0-9])(\s{0,1})(AM|PM|am|pm|aM|Am|pM|Pm{2,2})$)|(^([0-9]|[1][0-9]|[2][0-3])(\s{0,1})(AM|PM|am|pm|aM|Am|pM|Pm{2,2})$)" G "<0>8:00 am" +"(^([0-9]|[0-1][0-9]|[2][0-3]):([0-5][0-9])(\s{0,1})(AM|PM|am|pm|aM|Am|pM|Pm{2,2})$)|(^([0-9]|[1][0-9]|[2][0-3])(\s{0,1})(AM|PM|am|pm|aM|Am|pM|Pm{2,2})$)" "8a" +"(^([0-9]|[0-1][0-9]|[2][0-3]):([0-5][0-9])(\s{0,1})(AM|PM|am|pm|aM|Am|pM|Pm{2,2})$)|(^([0-9]|[1][0-9]|[2][0-3])(\s{0,1})(AM|PM|am|pm|aM|Am|pM|Pm{2,2})$)" "8 a" +"(^([0-9]|[0-1][0-9]|[2][0-3]):([0-5][0-9])(\s{0,1})(AM|PM|am|pm|aM|Am|pM|Pm{2,2})$)|(^([0-9]|[1][0-9]|[2][0-3])(\s{0,1})(AM|PM|am|pm|aM|Am|pM|Pm{2,2})$)" "8:00 a" +"^([0-9]{2})?(\([0-9]{2})\)([0-9]{3}|[0-9]{4})-[0-9]{4}$" G "<0>55(21)123-4567" +"^([0-9]{2})?(\([0-9]{2})\)([0-9]{3}|[0-9]{4})-[0-9]{4}$" G "<0>(11)1234-5678" +"^([0-9]{2})?(\([0-9]{2})\)([0-9]{3}|[0-9]{4})-[0-9]{4}$" G "<0>55(71)4562-2234" +"^([0-9]{2})?(\([0-9]{2})\)([0-9]{3}|[0-9]{4})-[0-9]{4}$" "3434-3432" +"^([0-9]{2})?(\([0-9]{2})\)([0-9]{3}|[0-9]{4})-[0-9]{4}$" "4(23)232-3232" +"^([0-9]{2})?(\([0-9]{2})\)([0-9]{3}|[0-9]{4})-[0-9]{4}$" "55(2)232-232" +"^((([0]?[1-9]|1[0-2])(:|\.)[0-5][0-9]((:|\.)[0-5][0-9])?( )?(AM|am|aM|Am|PM|pm|pM|Pm))|(([0]?[0-9]|1[0-9]|2[0-3])(:|\.)[0-5][0-9]((:|\.)[0-5][0-9])?))$" G "<0>1:01 AM" +"^((([0]?[1-9]|1[0-2])(:|\.)[0-5][0-9]((:|\.)[0-5][0-9])?( )?(AM|am|aM|Am|PM|pm|pM|Pm))|(([0]?[0-9]|1[0-9]|2[0-3])(:|\.)[0-5][0-9]((:|\.)[0-5][0-9])?))$" G "<0>23:52:01" +"^((([0]?[1-9]|1[0-2])(:|\.)[0-5][0-9]((:|\.)[0-5][0-9])?( )?(AM|am|aM|Am|PM|pm|pM|Pm))|(([0]?[0-9]|1[0-9]|2[0-3])(:|\.)[0-5][0-9]((:|\.)[0-5][0-9])?))$" G "<0>03.24.36 AM" +"^((([0]?[1-9]|1[0-2])(:|\.)[0-5][0-9]((:|\.)[0-5][0-9])?( )?(AM|am|aM|Am|PM|pm|pM|Pm))|(([0]?[0-9]|1[0-9]|2[0-3])(:|\.)[0-5][0-9]((:|\.)[0-5][0-9])?))$" "19:31 AM" +"^((([0]?[1-9]|1[0-2])(:|\.)[0-5][0-9]((:|\.)[0-5][0-9])?( )?(AM|am|aM|Am|PM|pm|pM|Pm))|(([0]?[0-9]|1[0-9]|2[0-3])(:|\.)[0-5][0-9]((:|\.)[0-5][0-9])?))$" "9:9 PM" +"^((([0]?[1-9]|1[0-2])(:|\.)[0-5][0-9]((:|\.)[0-5][0-9])?( )?(AM|am|aM|Am|PM|pm|pM|Pm))|(([0]?[0-9]|1[0-9]|2[0-3])(:|\.)[0-5][0-9]((:|\.)[0-5][0-9])?))$" "25:60:61" +"^\d{0,2}(\.\d{1,2})?$" G "<0>99.99" +"^\d{0,2}(\.\d{1,2})?$" G "<0>99" +"^\d{0,2}(\.\d{1,2})?$" G "<0>.99" +"^\d{0,2}(\.\d{1,2})?$" "999.999" +"^\d{0,2}(\.\d{1,2})?$" "999" +"^\d{0,2}(\.\d{1,2})?$" ".999" +"^(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?!.*\s).{4,8}$" G "<0>1agdA*$#" +"^(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?!.*\s).{4,8}$" G "<0>1agdA*$#" +"^(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?!.*\s).{4,8}$" G "<0>1agdA*$#" +"^(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?!.*\s).{4,8}$" "wyrn%@*&$# f" +"^(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?!.*\s).{4,8}$" "mbndkfh782" +"^(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?!.*\s).{4,8}$" "BNfhjdhfjd&*)%#$)" +"^([a-zA-Z0-9][-a-zA-Z0-9]*[a-zA-Z0-9]\.)+([a-zA-Z0-9]{3,5})$" G "<0>freshmeat.net" +"^([a-zA-Z0-9][-a-zA-Z0-9]*[a-zA-Z0-9]\.)+([a-zA-Z0-9]{3,5})$" G "<0>123.com" +"^([a-zA-Z0-9][-a-zA-Z0-9]*[a-zA-Z0-9]\.)+([a-zA-Z0-9]{3,5})$" G "<0>TempLate-toolkKt.orG" +"^([a-zA-Z0-9][-a-zA-Z0-9]*[a-zA-Z0-9]\.)+([a-zA-Z0-9]{3,5})$" "-dog.com" +"^([a-zA-Z0-9][-a-zA-Z0-9]*[a-zA-Z0-9]\.)+([a-zA-Z0-9]{3,5})$" "?boy.net" +"^([a-zA-Z0-9][-a-zA-Z0-9]*[a-zA-Z0-9]\.)+([a-zA-Z0-9]{3,5})$" "this.domain" +"^[^']*$" G "<0>asljas" +"^[^']*$" G "<0>%/&89uhuhadjkh" +"^[^']*$" G '<0>"hi there!"' +"^[^']*$" "'hi there!'" +"^[^']*$" "It's 9 o'clock" +"^[^']*$" "'''''" +"(^\(\)$|^\(((\([0-9]+,(\((\([0-9]+,[0-9]+,[0-9]+\),)*(\([0-9]+,[0-9]+,[0-9]+\)){1}\))+\),)*(\([0-9]+,(\((\([0-9]+,[0-9]+,[0-9]+\),)*(\([0-9]+,[0-9]+,[0-9]+\)){1}\))+\)){1}\)))$" G "<0>((24,((1,2,3),(3,4,5))))" +"(^\(\)$|^\(((\([0-9]+,(\((\([0-9]+,[0-9]+,[0-9]+\),)*(\([0-9]+,[0-9]+,[0-9]+\)){1}\))+\),)*(\([0-9]+,(\((\([0-9]+,[0-9]+,[0-9]+\),)*(\([0-9]+,[0-9]+,[0-9]+\)){1}\))+\)){1}\)))$" G "<0>((1,((2,3,4),(4,5,6),(96,34,26))),(12,((1,3,4),(4,5,6),(7,8,9))))" +"(^\(\)$|^\(((\([0-9]+,(\((\([0-9]+,[0-9]+,[0-9]+\),)*(\([0-9]+,[0-9]+,[0-9]+\)){1}\))+\),)*(\([0-9]+,(\((\([0-9]+,[0-9]+,[0-9]+\),)*(\([0-9]+,[0-9]+,[0-9]+\)){1}\))+\)){1}\)))$" G "<0>()" +"(^\(\)$|^\(((\([0-9]+,(\((\([0-9]+,[0-9]+,[0-9]+\),)*(\([0-9]+,[0-9]+,[0-9]+\)){1}\))+\),)*(\([0-9]+,(\((\([0-9]+,[0-9]+,[0-9]+\),)*(\([0-9]+,[0-9]+,[0-9]+\)){1}\))+\)){1}\)))$" "(24,((1,2,3),(3,4,5)))" +"(^\(\)$|^\(((\([0-9]+,(\((\([0-9]+,[0-9]+,[0-9]+\),)*(\([0-9]+,[0-9]+,[0-9]+\)){1}\))+\),)*(\([0-9]+,(\((\([0-9]+,[0-9]+,[0-9]+\),)*(\([0-9]+,[0-9]+,[0-9]+\)){1}\))+\)){1}\)))$" "( )" +"(^\(\)$|^\(((\([0-9]+,(\((\([0-9]+,[0-9]+,[0-9]+\),)*(\([0-9]+,[0-9]+,[0-9]+\)){1}\))+\),)*(\([0-9]+,(\((\([0-9]+,[0-9]+,[0-9]+\),)*(\([0-9]+,[0-9]+,[0-9]+\)){1}\))+\)){1}\)))$" "((23,(12,3,4),(4,5,6)))" +"^[a-zA-Z0-9\s .\-_']+$" G "<0>dony d'gsa" +"^[a-zA-Z0-9\s .\-_']+$" "^[a-zA-Z0-9\s.\-_']+$" +"^[_a-zA-Z0-9-]+(\.[_a-zA-Z0-9-]+)*@[a-zA-Z0-9-]+(\.[a-zA-Z0-9-]+)*\.(([0-9]{1,3})|([a-zA-Z]{2,3})|(aero|coop|info|museum|name))$" G "<0>example@example.com" +"^[_a-zA-Z0-9-]+(\.[_a-zA-Z0-9-]+)*@[a-zA-Z0-9-]+(\.[a-zA-Z0-9-]+)*\.(([0-9]{1,3})|([a-zA-Z]{2,3})|(aero|coop|info|museum|name))$" G "<0>foo@bar.info" +"^[_a-zA-Z0-9-]+(\.[_a-zA-Z0-9-]+)*@[a-zA-Z0-9-]+(\.[a-zA-Z0-9-]+)*\.(([0-9]{1,3})|([a-zA-Z]{2,3})|(aero|coop|info|museum|name))$" G "<0>blah@127.0.0.1" +"^[_a-zA-Z0-9-]+(\.[_a-zA-Z0-9-]+)*@[a-zA-Z0-9-]+(\.[a-zA-Z0-9-]+)*\.(([0-9]{1,3})|([a-zA-Z]{2,3})|(aero|coop|info|museum|name))$" "broken@@example.com" +"^[_a-zA-Z0-9-]+(\.[_a-zA-Z0-9-]+)*@[a-zA-Z0-9-]+(\.[a-zA-Z0-9-]+)*\.(([0-9]{1,3})|([a-zA-Z]{2,3})|(aero|coop|info|museum|name))$" "foo@bar.infp" +"^[_a-zA-Z0-9-]+(\.[_a-zA-Z0-9-]+)*@[a-zA-Z0-9-]+(\.[a-zA-Z0-9-]+)*\.(([0-9]{1,3})|([a-zA-Z]{2,3})|(aero|coop|info|museum|name))$" "blah@.nospam.biz" +"^\d{5}(-\d{3})?$" G "<0>13165-000" +"^\d{5}(-\d{3})?$" G "<0>38175-000" +"^\d{5}(-\d{3})?$" G "<0>81470-276" +"^\d{5}(-\d{3})?$" "13165-00" +"^\d{5}(-\d{3})?$" "38175-abc" +"^\d{5}(-\d{3})?$" "81470-2763" +"^\$(\d{1,3}(\,\d{3})*|(\d+))(\.\d{2})?$" G "<0>$0.84" +"^\$(\d{1,3}(\,\d{3})*|(\d+))(\.\d{2})?$" G "<0>$123458" +"^\$(\d{1,3}(\,\d{3})*|(\d+))(\.\d{2})?$" G "<0>$1,234,567.89" +"^\$(\d{1,3}(\,\d{3})*|(\d+))(\.\d{2})?$" "$12,3456.01" +"^\$(\d{1,3}(\,\d{3})*|(\d+))(\.\d{2})?$" "12345" +"^\$(\d{1,3}(\,\d{3})*|(\d+))(\.\d{2})?$" "$1.234" +"([A-Z]:\\[^/:\*\?<>\|]+\.\w{2,6})|(\\{2}[^/:\*\?<>\|]+\.\w{2,6})" G "<0>C:\\temp\\this allows spaces\\web.config" +"([A-Z]:\\[^/:\*\?<>\|]+\.\w{2,6})|(\\{2}[^/:\*\?<>\|]+\.\w{2,6})" G "<0>\\\\Andromeda\\share\\file name.123" +"([A-Z]:\\[^/:\*\?<>\|]+\.\w{2,6})|(\\{2}[^/:\*\?<>\|]+\.\w{2,6})" "tz:\temp\ fi*le?na:m.doc" +"([A-Z]:\\[^/:\*\?<>\|]+\.\w{2,6})|(\\{2}[^/:\*\?<>\|]+\.\w{2,6})" "\\Andromeda\share\filename.a" +"(^([0-9]|[0-1][0-9]|[2][0-3]):([0-5][0-9])$)|(^([0-9]|[1][0-9]|[2][0-3])$)" G "<0>10:35" +"(^([0-9]|[0-1][0-9]|[2][0-3]):([0-5][0-9])$)|(^([0-9]|[1][0-9]|[2][0-3])$)" G "<0>9:20" +"(^([0-9]|[0-1][0-9]|[2][0-3]):([0-5][0-9])$)|(^([0-9]|[1][0-9]|[2][0-3])$)" G "<0>23" +"(^([0-9]|[0-1][0-9]|[2][0-3]):([0-5][0-9])$)|(^([0-9]|[1][0-9]|[2][0-3])$)" "24:00" +"(^([0-9]|[0-1][0-9]|[2][0-3]):([0-5][0-9])$)|(^([0-9]|[1][0-9]|[2][0-3])$)" "20 PM" +"(^([0-9]|[0-1][0-9]|[2][0-3]):([0-5][0-9])$)|(^([0-9]|[1][0-9]|[2][0-3])$)" "20:15 PM" +"^\$?([0-9]{1,3},([0-9]{3},)*[0-9]{3}|[0-9]+)(\.[0-9][0-9])?$" G "<0>$3,023,123.34" +"^\$?([0-9]{1,3},([0-9]{3},)*[0-9]{3}|[0-9]+)(\.[0-9][0-9])?$" G "<0>9,876,453" +"^\$?([0-9]{1,3},([0-9]{3},)*[0-9]{3}|[0-9]+)(\.[0-9][0-9])?$" G "<0>123456.78" +"^\$?([0-9]{1,3},([0-9]{3},)*[0-9]{3}|[0-9]+)(\.[0-9][0-9])?$" "4,33,234.34" +"^\$?([0-9]{1,3},([0-9]{3},)*[0-9]{3}|[0-9]+)(\.[0-9][0-9])?$" "$1.234" +"^\$?([0-9]{1,3},([0-9]{3},)*[0-9]{3}|[0-9]+)(\.[0-9][0-9])?$" "abc" +"^\$?\d+(\.(\d{2}))?$" G "<0>$2.43" +"^\$?\d+(\.(\d{2}))?$" G "<0>2.02" +"^\$?\d+(\.(\d{2}))?$" G "<0>$2112" +"^\$?\d+(\.(\d{2}))?$" "2.1" +"^\$?\d+(\.(\d{2}))?$" "$.14" +"^\$?\d+(\.(\d{2}))?$" "$2,222.12" +/("[^"]*")|('[^\r]*)(\r\n)?/ G '<0>"my string"' +/("[^"]*")|('[^\r]*)(\r\n)?/ G '<0>"a string with \u0027 in it"' +/("[^"]*")|('[^\r]*)(\r\n)?/ G "<0>' comment" +/("[^"]*")|('[^\r]*)(\r\n)?/ /asd "/ +"^[A-Za-z0-9]{8}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{12}$" G "<0>BFDB4D31-3E35-4DAB-AFCA-5E6E5C8F61EA" +"^[A-Za-z0-9]{8}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{12}$" G "<0>BFDB4d31-3e35-4dab-afca-5e6e5c8f61ea" +"^[A-Za-z0-9]{8}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{12}$" "qqqBFDB4D31-3E35-4DAB-AFCA-5E6E5C8F61EA" +"^[A-Za-z0-9]{8}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{12}$" "BFDB4D31-3E-4DAB-AFCA-5E6E5C8F61EA" +"^[A-Za-z0-9]{8}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{12}$" "BFDB4D31-3E35-4DAB-AF" +"^\d{2}(\x2e)(\d{3})(-\d{3})?$" G "<0>12.345-678" +"^\d{2}(\x2e)(\d{3})(-\d{3})?$" G "<0>23.345-123" +"^\d{2}(\x2e)(\d{3})(-\d{3})?$" G "<0>99.999" +"^\d{2}(\x2e)(\d{3})(-\d{3})?$" "41222-222" +"^\d{2}(\x2e)(\d{3})(-\d{3})?$" "3.444-233" +"^\d{2}(\x2e)(\d{3})(-\d{3})?$" "43.324444" +"^\d{2}(\u002e)(\d{3})(-\d{3})?$" G "<0>12.345-678" +"^\d{2}(\u002e)(\d{3})(-\d{3})?$" G "<0>23.345-123" +"^\d{2}(\u002e)(\d{3})(-\d{3})?$" G "<0>99.999" +"^\d{2}(\u002e)(\d{3})(-\d{3})?$" "41222-222" +"^\d{2}(\u002e)(\d{3})(-\d{3})?$" "3.444-233" +"^\d{2}(\u002e)(\d{3})(-\d{3})?$" "43.324444" +#"^(([a-zA-Z]:)|(\\{2}\w+)\$?)(\\(\w[\w ]*))+\.(txt|TXT)$" G "<0>c:\file.txt" # TODO: debug +#"^(([a-zA-Z]:)|(\\{2}\w+)\$?)(\\(\w[\w ]*))+\.(txt|TXT)$" G "<0>c:\folder\sub folder\file.txt" # TODO: debug +#"^(([a-zA-Z]:)|(\\{2}\w+)\$?)(\\(\w[\w ]*))+\.(txt|TXT)$" G "<0>\\network\folder\file.txt" # TODO: debug +"^(([a-zA-Z]:)|(\\{2}\w+)\$?)(\\(\w[\w ]*))+\.(txt|TXT)$" "C:" +"^(([a-zA-Z]:)|(\\{2}\w+)\$?)(\\(\w[\w ]*))+\.(txt|TXT)$" "C:\file.xls" +"^(([a-zA-Z]:)|(\\{2}\w+)\$?)(\\(\w[\w ]*))+\.(txt|TXT)$" "folder.txt" +"^[a-zA-Z0-9]+([a-zA-Z0-9\-\.]+)?\.(com|org|net|mil|edu|COM|ORG|NET|MIL|EDU)$" G "<0>my.domain.com" +"^[a-zA-Z0-9]+([a-zA-Z0-9\-\.]+)?\.(com|org|net|mil|edu|COM|ORG|NET|MIL|EDU)$" G "<0>regexlib.com" +"^[a-zA-Z0-9]+([a-zA-Z0-9\-\.]+)?\.(com|org|net|mil|edu|COM|ORG|NET|MIL|EDU)$" G "<0>big-reg.com" +"^[a-zA-Z0-9]+([a-zA-Z0-9\-\.]+)?\.(com|org|net|mil|edu|COM|ORG|NET|MIL|EDU)$" ".mydomain.com" +"^[a-zA-Z0-9]+([a-zA-Z0-9\-\.]+)?\.(com|org|net|mil|edu|COM|ORG|NET|MIL|EDU)$" "regexlib.comm" +"^[a-zA-Z0-9]+([a-zA-Z0-9\-\.]+)?\.(com|org|net|mil|edu|COM|ORG|NET|MIL|EDU)$" "-bigreg.com" +"^\d{4}[\-\/\s]?((((0[13578])|(1[02]))[\-\/\s]?(([0-2][0-9])|(3[01])))|(((0[469])|(11))[\-\/\s]?(([0-2][0-9])|(30)))|(02[\-\/\s]?[0-2][0-9]))$" G "<0>0001-12-31" +"^\d{4}[\-\/\s ]?((((0[13578])|(1[02]))[\-\/\s ]?(([0-2][0-9])|(3[01])))|(((0[469])|(11))[\-\/\s ]?(([0-2][0-9])|(30)))|(02[\-\/\s ]?[0-2][0-9]))$" G "<0>9999 09 30" +"^\d{4}[\-\/\s]?((((0[13578])|(1[02]))[\-\/\s]?(([0-2][0-9])|(3[01])))|(((0[469])|(11))[\-\/\s]?(([0-2][0-9])|(30)))|(02[\-\/\s]?[0-2][0-9]))$" G "<0>2002/03/03" +"^\d{4}[\-\/\s]?((((0[13578])|(1[02]))[\-\/\s]?(([0-2][0-9])|(3[01])))|(((0[469])|(11))[\-\/\s]?(([0-2][0-9])|(30)))|(02[\-\/\s]?[0-2][0-9]))$" "0001\\02\\30" +"^\d{4}[\-\/\s]?((((0[13578])|(1[02]))[\-\/\s]?(([0-2][0-9])|(3[01])))|(((0[469])|(11))[\-\/\s]?(([0-2][0-9])|(30)))|(02[\-\/\s]?[0-2][0-9]))$" "9999.15.01" +"^\d{4}[\-\/\s]?((((0[13578])|(1[02]))[\-\/\s]?(([0-2][0-9])|(3[01])))|(((0[469])|(11))[\-\/\s]?(([0-2][0-9])|(30)))|(02[\-\/\s]?[0-2][0-9]))$" "2002/3/3" +"^http\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(/\S*)?$" G "<0>http://psychopop.org" +"^http\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(/\S*)?$" G "<0>http://www.edsroom.com/newUser.asp" +"^http\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(/\S*)?$" G "<0>http://unpleasant.jarrin.net/markov/inde" +"^http\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(/\S*)?$" "ftp://psychopop.org" +"^http\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(/\S*)?$" "http://www.edsroom/" +"^http\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(/\S*)?$" "http://un/pleasant.jarrin.net/markov/index.asp" +"^( [1-9]|[1-9]|0[1-9]|10|11|12)[0-5]\d$" G "<0>1145" +"^( [1-9]|[1-9]|0[1-9]|10|11|12)[0-5]\d$" G "<0>933" +"^( [1-9]|[1-9]|0[1-9]|10|11|12)[0-5]\d$" G "<0> 801" +"^( [1-9]|[1-9]|0[1-9]|10|11|12)[0-5]\d$" "0000" +"^( [1-9]|[1-9]|0[1-9]|10|11|12)[0-5]\d$" "1330" +"^( [1-9]|[1-9]|0[1-9]|10|11|12)[0-5]\d$" "8:30" +"^\d{1,2}\/\d{2,4}$" G "<0>9/02" +"^\d{1,2}\/\d{2,4}$" G "<0>09/2002" +"^\d{1,2}\/\d{2,4}$" G "<0>09/02" +"^\d{1,2}\/\d{2,4}$" "Fall 2002" +"^\d{1,2}\/\d{2,4}$" "Sept 2002" +"^(|(0[1-9])|(1[0-2]))\/((0[1-9])|(1\d)|(2\d)|(3[0-1]))\/((\d{4}))$" G "<0>01/01/2001" +"^(|(0[1-9])|(1[0-2]))\/((0[1-9])|(1\d)|(2\d)|(3[0-1]))\/((\d{4}))$" G "<0>02/30/2001" +"^(|(0[1-9])|(1[0-2]))\/((0[1-9])|(1\d)|(2\d)|(3[0-1]))\/((\d{4}))$" G "<0>12/31/2002" +"^(|(0[1-9])|(1[0-2]))\/((0[1-9])|(1\d)|(2\d)|(3[0-1]))\/((\d{4}))$" "1/1/02" +"^(|(0[1-9])|(1[0-2]))\/((0[1-9])|(1\d)|(2\d)|(3[0-1]))\/((\d{4}))$" "1/1/2002" +"^(|(0[1-9])|(1[0-2]))\/((0[1-9])|(1\d)|(2\d)|(3[0-1]))\/((\d{4}))$" "1/25/2002" +"^(1?(-?\d{3})-?)?(\d{3})(-?\d{4})$" G "<0>15615552323" +"^(1?(-?\d{3})-?)?(\d{3})(-?\d{4})$" G "<0>1-561-555-1212" +"^(1?(-?\d{3})-?)?(\d{3})(-?\d{4})$" G "<0>5613333" +"^(1?(-?\d{3})-?)?(\d{3})(-?\d{4})$" "1-555-5555" +"^(1?(-?\d{3})-?)?(\d{3})(-?\d{4})$" "15553333" +"^(1?(-?\d{3})-?)?(\d{3})(-?\d{4})$" "0-561-555-1212" +'<[^>]*name[\s]*=[\s]*"?[^\w_]*"?[^>]*>' G '<0>' +'<[^>]*name[\s]*=[\s]*"?[^\w_]*"?[^>]*>' G '<0>" # TODO: \w in pattern +'<[^>]*name[\s]*=[\s]*"?[^\w_]*"?[^>]*>' '' # TODO: \w in pattern +'<[^>]*name[\s]*=[\s]*"?[^\w_]*"?[^>]*>' "The dirty brown fox stank like" +"^(1|01|2|02|3|03|4|04|5|05|6|06|7|07|8|08|9|09|10|11|12{1,2}):(([0-5]{1}[0-9]{1}\s{0,1})([AM|PM|am|pm]{2,2}))\W{0}$" G "<0>1:00 AM" +"^(1|01|2|02|3|03|4|04|5|05|6|06|7|07|8|08|9|09|10|11|12{1,2}):(([0-5]{1}[0-9]{1}\s{0,1})([AM|PM|am|pm]{2,2}))\W{0}$" G "<0>12:00 PM" +"^(1|01|2|02|3|03|4|04|5|05|6|06|7|07|8|08|9|09|10|11|12{1,2}):(([0-5]{1}[0-9]{1}\s{0,1})([AM|PM|am|pm]{2,2}))\W{0}$" G "<0>1:00am" +"^(1|01|2|02|3|03|4|04|5|05|6|06|7|07|8|08|9|09|10|11|12{1,2}):(([0-5]{1}[0-9]{1}\s{0,1})([AM|PM|am|pm]{2,2}))\W{0}$" "24:00" +"^\d*$" G "<0>123" +"^\d*$" G "<0>000" +"^\d*$" G "<0>43" +"^\d*$" "asbc" +"^\d*$" "-34" +"^\d*$" "3.1415" +"^[-+]?\d*$" G "<0>123" +"^[-+]?\d*$" G "<0>-123" +"^[-+]?\d*$" G "<0>+123" +"^[-+]?\d*$" "abc" +"^[-+]?\d*$" "3.14159" +"^[-+]?\d*$" "-3.14159" +"^\d*\.?\d*$" G "<0>123" +"^\d*\.?\d*$" G "<0>3.14159" +"^\d*\.?\d*$" G "<0>.234" +"^\d*\.?\d*$" "abc" +"^\d*\.?\d*$" "-3.14159" +"^\d*\.?\d*$" "3.4.2" +"^((\d{5}-\d{4})|(\d{5})|([A-Z]\d[A-Z]\s\d[A-Z]\d))$" G "<0>44240" +"^((\d{5}-\d{4})|(\d{5})|([A-Z]\d[A-Z]\s\d[A-Z]\d))$" G "<0>44240-5555" +"^((\d{5}-\d{4})|(\d{5})|([A-Z]\d[A-Z]\s\d[A-Z]\d))$" G "<0>T2P 3C7" +"^((\d{5}-\d{4})|(\d{5})|([A-Z]\d[A-Z]\s\d[A-Z]\d))$" "44240ddd" +"^((\d{5}-\d{4})|(\d{5})|([A-Z]\d[A-Z]\s\d[A-Z]\d))$" "t44240-55" +"^((\d{5}-\d{4})|(\d{5})|([A-Z]\d[A-Z]\s\d[A-Z]\d))$" "t2p3c7" +"^[\\(]{0,1}([0-9]){3}[\\)]{0,1}[ ]?([^0-1]){1}([0-9]){2}[ ]?[-]?[ ]?([0-9]){4}[ ]*((x){0,1}([0-9]){1,5}){0,1}$" G "<0>(910)456-7890" +"^[\\(]{0,1}([0-9]){3}[\\)]{0,1}[ ]?([^0-1]){1}([0-9]){2}[ ]?[-]?[ ]?([0-9]){4}[ ]*((x){0,1}([0-9]){1,5}){0,1}$" G "<0>(910)456-8970 x12" +"^[\\(]{0,1}([0-9]){3}[\\)]{0,1}[ ]?([^0-1]){1}([0-9]){2}[ ]?[-]?[ ]?([0-9]){4}[ ]*((x){0,1}([0-9]){1,5}){0,1}$" G "<0>(910)456-8970 1211" +"^[\\(]{0,1}([0-9]){3}[\\)]{0,1}[ ]?([^0-1]){1}([0-9]){2}[ ]?[-]?[ ]?([0-9]){4}[ ]*((x){0,1}([0-9]){1,5}){0,1}$" "(910) 156-7890" +"^[\\(]{0,1}([0-9]){3}[\\)]{0,1}[ ]?([^0-1]){1}([0-9]){2}[ ]?[-]?[ ]?([0-9]){4}[ ]*((x){0,1}([0-9]){1,5}){0,1}$" "(910) 056-7890" +"^[\\(]{0,1}([0-9]){3}[\\)]{0,1}[ ]?([^0-1]){1}([0-9]){2}[ ]?[-]?[ ]?([0-9]){4}[ ]*((x){0,1}([0-9]){1,5}){0,1}$" "(910) 556-7890 x" +"^((0?[1-9]|[12][1-9]|3[01])\.(0?[13578]|1[02])\.20[0-9]{2}|(0?[1-9]|[12][1-9]|30)\.(0?[13456789]|1[012])\.20[0-9]{2}|(0?[1-9]|1[1-9]|2[0-8])\.(0?[123456789]|1[012])\.20[0-9]{2}|(0?[1-9]|[12][1-9])\.(0?[123456789]|1[012])\.20(00|04|08|12|16|20|24|28|32|36|40|44|48|52|56|60|64|68|72|76|80|84|88|92|96))$" G "<0>31.01.2002" +"^((0?[1-9]|[12][1-9]|3[01])\.(0?[13578]|1[02])\.20[0-9]{2}|(0?[1-9]|[12][1-9]|30)\.(0?[13456789]|1[012])\.20[0-9]{2}|(0?[1-9]|1[1-9]|2[0-8])\.(0?[123456789]|1[012])\.20[0-9]{2}|(0?[1-9]|[12][1-9])\.(0?[123456789]|1[012])\.20(00|04|08|12|16|20|24|28|32|36|40|44|48|52|56|60|64|68|72|76|80|84|88|92|96))$" G "<0>29.2.2004" +"^((0?[1-9]|[12][1-9]|3[01])\.(0?[13578]|1[02])\.20[0-9]{2}|(0?[1-9]|[12][1-9]|30)\.(0?[13456789]|1[012])\.20[0-9]{2}|(0?[1-9]|1[1-9]|2[0-8])\.(0?[123456789]|1[012])\.20[0-9]{2}|(0?[1-9]|[12][1-9])\.(0?[123456789]|1[012])\.20(00|04|08|12|16|20|24|28|32|36|40|44|48|52|56|60|64|68|72|76|80|84|88|92|96))$" G "<0>09.02.2005" +"^((0?[1-9]|[12][1-9]|3[01])\.(0?[13578]|1[02])\.20[0-9]{2}|(0?[1-9]|[12][1-9]|30)\.(0?[13456789]|1[012])\.20[0-9]{2}|(0?[1-9]|1[1-9]|2[0-8])\.(0?[123456789]|1[012])\.20[0-9]{2}|(0?[1-9]|[12][1-9])\.(0?[123456789]|1[012])\.20(00|04|08|12|16|20|24|28|32|36|40|44|48|52|56|60|64|68|72|76|80|84|88|92|96))$" "31.11.2002" +"^((0?[1-9]|[12][1-9]|3[01])\.(0?[13578]|1[02])\.20[0-9]{2}|(0?[1-9]|[12][1-9]|30)\.(0?[13456789]|1[012])\.20[0-9]{2}|(0?[1-9]|1[1-9]|2[0-8])\.(0?[123456789]|1[012])\.20[0-9]{2}|(0?[1-9]|[12][1-9])\.(0?[123456789]|1[012])\.20(00|04|08|12|16|20|24|28|32|36|40|44|48|52|56|60|64|68|72|76|80|84|88|92|96))$" "29.2.2002" +"^((0?[1-9]|[12][1-9]|3[01])\.(0?[13578]|1[02])\.20[0-9]{2}|(0?[1-9]|[12][1-9]|30)\.(0?[13456789]|1[012])\.20[0-9]{2}|(0?[1-9]|1[1-9]|2[0-8])\.(0?[123456789]|1[012])\.20[0-9]{2}|(0?[1-9]|[12][1-9])\.(0?[123456789]|1[012])\.20(00|04|08|12|16|20|24|28|32|36|40|44|48|52|56|60|64|68|72|76|80|84|88|92|96))$" "33.06.2000" +"^(0[1-9]|1[0-2])\/((0[1-9]|2\d)|3[0-1])\/(19\d\d|200[0-3])$" G "<0>12/31/2003" +"^(0[1-9]|1[0-2])\/((0[1-9]|2\d)|3[0-1])\/(19\d\d|200[0-3])$" G "<0>01/01/1900" +"^(0[1-9]|1[0-2])\/((0[1-9]|2\d)|3[0-1])\/(19\d\d|200[0-3])$" G "<0>11/31/2002" +"^(0[1-9]|1[0-2])\/((0[1-9]|2\d)|3[0-1])\/(19\d\d|200[0-3])$" "1/1/2002" +"^(0[1-9]|1[0-2])\/((0[1-9]|2\d)|3[0-1])\/(19\d\d|200[0-3])$" "01/01/02" +"^(0[1-9]|1[0-2])\/((0[1-9]|2\d)|3[0-1])\/(19\d\d|200[0-3])$" "01/01/2004" +"^((((([13578])|(1[0-2]))[\-\/\s]?(([1-9])|([1-2][0-9])|(3[01])))|((([469])|(11))[\-\/\s]?(([1-9])|([1-2][0-9])|(30)))|(2[\-\/\s]?(([1-9])|([1-2][0-9]))))[\-\/\s]?\d{4})(\s((([1-9])|(1[02]))\:([0-5][0-9])((\s)|(\:([0-5][0-9])\s))([AM|PM|am|pm]{2,2})))?$" G "<0>3/3/2003" +"^((((([13578])|(1[0-2]))[\-\/\s]?(([1-9])|([1-2][0-9])|(3[01])))|((([469])|(11))[\-\/\s]?(([1-9])|([1-2][0-9])|(30)))|(2[\-\/\s]?(([1-9])|([1-2][0-9]))))[\-\/\s]?\d{4})(\s((([1-9])|(1[02]))\:([0-5][0-9])((\s)|(\:([0-5][0-9])\s))([AM|PM|am|pm]{2,2})))?$" G "<0>3/3/2002 3:33 pm" +"^((((([13578])|(1[0-2]))[\-\/\s]?(([1-9])|([1-2][0-9])|(3[01])))|((([469])|(11))[\-\/\s]?(([1-9])|([1-2][0-9])|(30)))|(2[\-\/\s]?(([1-9])|([1-2][0-9]))))[\-\/\s]?\d{4})(\s((([1-9])|(1[02]))\:([0-5][0-9])((\s)|(\:([0-5][0-9])\s))([AM|PM|am|pm]{2,2})))?$" G "<0>3/3/2003 3:33:33 am" +"^((((([13578])|(1[0-2]))[\-\/\s]?(([1-9])|([1-2][0-9])|(3[01])))|((([469])|(11))[\-\/\s]?(([1-9])|([1-2][0-9])|(30)))|(2[\-\/\s]?(([1-9])|([1-2][0-9]))))[\-\/\s]?\d{4})(\s((([1-9])|(1[02]))\:([0-5][0-9])((\s)|(\:([0-5][0-9])\s))([AM|PM|am|pm]{2,2})))?$" "13/1/2002" +"^((((([13578])|(1[0-2]))[\-\/\s]?(([1-9])|([1-2][0-9])|(3[01])))|((([469])|(11))[\-\/\s]?(([1-9])|([1-2][0-9])|(30)))|(2[\-\/\s]?(([1-9])|([1-2][0-9]))))[\-\/\s]?\d{4})(\s((([1-9])|(1[02]))\:([0-5][0-9])((\s)|(\:([0-5][0-9])\s))([AM|PM|am|pm]{2,2})))?$" "3/3/2002 3:33" +"^((((([13578])|(1[0-2]))[\-\/\s]?(([1-9])|([1-2][0-9])|(3[01])))|((([469])|(11))[\-\/\s]?(([1-9])|([1-2][0-9])|(30)))|(2[\-\/\s]?(([1-9])|([1-2][0-9]))))[\-\/\s]?\d{4})(\s((([1-9])|(1[02]))\:([0-5][0-9])((\s)|(\:([0-5][0-9])\s))([AM|PM|am|pm]{2,2})))?$" "31/3/2002" +"([a-zA-Z]:(\\w+)*\\[a-zA-Z0_9]+)?.xls" G "<0>E:\DyAGT\SD01A_specV2.xls" +"([a-zA-Z]:(\\w+)*\\[a-zA-Z0_9]+)?.xls" "E:\DyAGT\SD01A_specV2.txt" +"(((0[13578]|10|12)([-./])(0[1-9]|[12][0-9]|3[01])([-./])(\d{4}))|((0[469]|11)([-./])([0][1-9]|[12][0-9]|30)([-./])(\d{4}))|((2)([-./])(0[1-9]|1[0-9]|2[0-8])([-./])(\d{4}))|((2)(\.|-|\/)(29)([-./])([02468][048]00))|((2)([-./])(29)([-./])([13579][26]00))|((2)([-./])(29)([-./])([0-9][0-9][0][48]))|((2)([-./])(29)([-./])([0-9][0-9][2468][048]))|((2)([-./])(29)([-./])([0-9][0-9][13579][26])))" G "<0>02/29/2084" +"(((0[13578]|10|12)([-./])(0[1-9]|[12][0-9]|3[01])([-./])(\d{4}))|((0[469]|11)([-./])([0][1-9]|[12][0-9]|30)([-./])(\d{4}))|((2)([-./])(0[1-9]|1[0-9]|2[0-8])([-./])(\d{4}))|((2)(\.|-|\/)(29)([-./])([02468][048]00))|((2)([-./])(29)([-./])([13579][26]00))|((2)([-./])(29)([-./])([0-9][0-9][0][48]))|((2)([-./])(29)([-./])([0-9][0-9][2468][048]))|((2)([-./])(29)([-./])([0-9][0-9][13579][26])))" G "<0>01/31/2000" +"(((0[13578]|10|12)([-./])(0[1-9]|[12][0-9]|3[01])([-./])(\d{4}))|((0[469]|11)([-./])([0][1-9]|[12][0-9]|30)([-./])(\d{4}))|((2)([-./])(0[1-9]|1[0-9]|2[0-8])([-./])(\d{4}))|((2)(\.|-|\/)(29)([-./])([02468][048]00))|((2)([-./])(29)([-./])([13579][26]00))|((2)([-./])(29)([-./])([0-9][0-9][0][48]))|((2)([-./])(29)([-./])([0-9][0-9][2468][048]))|((2)([-./])(29)([-./])([0-9][0-9][13579][26])))" G "<0>11/30/2000" +"(((0[13578]|10|12)([-./])(0[1-9]|[12][0-9]|3[01])([-./])(\d{4}))|((0[469]|11)([-./])([0][1-9]|[12][0-9]|30)([-./])(\d{4}))|((2)([-./])(0[1-9]|1[0-9]|2[0-8])([-./])(\d{4}))|((2)(\.|-|\/)(29)([-./])([02468][048]00))|((2)([-./])(29)([-./])([13579][26]00))|((2)([-./])(29)([-./])([0-9][0-9][0][48]))|((2)([-./])(29)([-./])([0-9][0-9][2468][048]))|((2)([-./])(29)([-./])([0-9][0-9][13579][26])))" "02/29/2083" +"(((0[13578]|10|12)([-./])(0[1-9]|[12][0-9]|3[01])([-./])(\d{4}))|((0[469]|11)([-./])([0][1-9]|[12][0-9]|30)([-./])(\d{4}))|((2)([-./])(0[1-9]|1[0-9]|2[0-8])([-./])(\d{4}))|((2)(\.|-|\/)(29)([-./])([02468][048]00))|((2)([-./])(29)([-./])([13579][26]00))|((2)([-./])(29)([-./])([0-9][0-9][0][48]))|((2)([-./])(29)([-./])([0-9][0-9][2468][048]))|((2)([-./])(29)([-./])([0-9][0-9][13579][26])))" "11/31/2000" +"(((0[13578]|10|12)([-./])(0[1-9]|[12][0-9]|3[01])([-./])(\d{4}))|((0[469]|11)([-./])([0][1-9]|[12][0-9]|30)([-./])(\d{4}))|((2)([-./])(0[1-9]|1[0-9]|2[0-8])([-./])(\d{4}))|((2)(\.|-|\/)(29)([-./])([02468][048]00))|((2)([-./])(29)([-./])([13579][26]00))|((2)([-./])(29)([-./])([0-9][0-9][0][48]))|((2)([-./])(29)([-./])([0-9][0-9][2468][048]))|((2)([-./])(29)([-./])([0-9][0-9][13579][26])))" "01/32/2000" +"^[a-zA-Z0-9\s .\-]+$" G "<0>2222 Mock St." # TODO: \s in patterns not implemented +"^[a-zA-Z0-9\s .\-]+$" G "<0>1 A St." +"^[a-zA-Z0-9\s .\-]+$" G "<0>555-1212" +"^[a-zA-Z0-9\s.\-]+$" "[A Street]" +"^[a-zA-Z0-9\s.\-]+$" "(3 A St.)" +"^[a-zA-Z0-9\s.\-]+$" "{34 C Ave.}" +"^[a-zA-Z0-9\s.\-]+$" "Last.*?(\d+.?\d*)" +"^[a-zA-Z0-9\s .\-]+$" G " Last1-(123)-123-1234" +"^([0-9]( |-)?)?(\(?[0-9]{3}\)?|[0-9]{3})( |-)?([0-9]{3}( |-)?[0-9]{4}|[a-zA-Z0-9]{7})$" G "<0>123 123 1234" +"^([0-9]( |-)?)?(\(?[0-9]{3}\)?|[0-9]{3})( |-)?([0-9]{3}( |-)?[0-9]{4}|[a-zA-Z0-9]{7})$" G "<0>1-800-ALPHNUM" +"^([0-9]( |-)?)?(\(?[0-9]{3}\)?|[0-9]{3})( |-)?([0-9]{3}( |-)?[0-9]{4}|[a-zA-Z0-9]{7})$" "1.123.123.1234" +"^([0-9]( |-)?)?(\(?[0-9]{3}\)?|[0-9]{3})( |-)?([0-9]{3}( |-)?[0-9]{4}|[a-zA-Z0-9]{7})$" "(123)-1234-123" +"^([0-9]( |-)?)?(\(?[0-9]{3}\)?|[0-9]{3})( |-)?([0-9]{3}( |-)?[0-9]{4}|[a-zA-Z0-9]{7})$" "123-1234" +"^([0-1][0-9]|[2][0-3]):([0-5][0-9])$" G "<0>02:04" +"^([0-1][0-9]|[2][0-3]):([0-5][0-9])$" G "<0>16:56" +"^([0-1][0-9]|[2][0-3]):([0-5][0-9])$" G "<0>23:59" +"^([0-1][0-9]|[2][0-3]):([0-5][0-9])$" "02:00 PM" +"^([0-1][0-9]|[2][0-3]):([0-5][0-9])$" "PM2:00" +"^([0-1][0-9]|[2][0-3]):([0-5][0-9])$" "24:00" +"^[0,1]?\d{1}\/(([0-2]?\d{1})|([3][0,1]{1}))\/(([1]{1}[9]{1}[9]{1}\d{1})|([2-9]{1}\d{3}))$" G "<0>01/01/1990" +"^[0,1]?\d{1}\/(([0-2]?\d{1})|([3][0,1]{1}))\/(([1]{1}[9]{1}[9]{1}\d{1})|([2-9]{1}\d{3}))$" G "<0>12/12/9999" +"^[0,1]?\d{1}\/(([0-2]?\d{1})|([3][0,1]{1}))\/(([1]{1}[9]{1}[9]{1}\d{1})|([2-9]{1}\d{3}))$" G "<0>3/28/2001" +"^[0,1]?\d{1}\/(([0-2]?\d{1})|([3][0,1]{1}))\/(([1]{1}[9]{1}[9]{1}\d{1})|([2-9]{1}\d{3}))$" "3-8-01" +"^[0,1]?\d{1}\/(([0-2]?\d{1})|([3][0,1]{1}))\/(([1]{1}[9]{1}[9]{1}\d{1})|([2-9]{1}\d{3}))$" "13/32/1001" +"^[0,1]?\d{1}\/(([0-2]?\d{1})|([3][0,1]{1}))\/(([1]{1}[9]{1}[9]{1}\d{1})|([2-9]{1}\d{3}))$" "03/32/1989" +"((\(\d{3}\)?)|(\d{3}))([\s \-./]?)(\d{3})([\s \-./]?)(\d{4})" G "<0>1.2123644567" +"((\(\d{3}\)?)|(\d{3}))([\s \-./]?)(\d{3})([\s \-./]?)(\d{4})" G "<0>0-234.567/8912" +"((\(\d{3}\)?)|(\d{3}))([\s \-./]?)(\d{3})([\s \-./]?)(\d{4})" G "<0>1-(212)-123 4567" +"((\(\d{3}\)?)|(\d{3}))([\s \-./]?)(\d{3})([\s \-./]?)(\d{4})" "0-212364345" +"((\(\d{3}\)?)|(\d{3}))([\s \-./]?)(\d{3})([\s \-./]?)(\d{4})" "1212-364,4321" +"((\(\d{3}\)?)|(\d{3}))([\s \-./]?)(\d{3})([\s \-./]?)(\d{4})" "0212\345/6789" +"^([0-9]{6}[\s \-]{1}[0-9]{12}|[0-9]{18})$" G "<0>000000 000000000000" +"^([0-9]{6}[\s \-]{1}[0-9]{12}|[0-9]{18})$" G "<0>000000-000000000000" +"^([0-9]{6}[\s \-]{1}[0-9]{12}|[0-9]{18})$" G "<0>000000000000000000" +"^([0-9]{6}[\s \-]{1}[0-9]{12}|[0-9]{18})$" "000000_000000000000" +"^(([1-9])|(0[1-9])|(1[0-2]))\/((0[1-9])|([1-31]))\/((\d{2})|(\d{4}))$" G "<0>01/01/2001" +"^(([1-9])|(0[1-9])|(1[0-2]))\/((0[1-9])|([1-31]))\/((\d{2})|(\d{4}))$" G "<0>1/1/2001" +"^(([1-9])|(0[1-9])|(1[0-2]))\/((0[1-9])|([1-31]))\/((\d{2})|(\d{4}))$" G "<0>01/1/01" +"^(([1-9])|(0[1-9])|(1[0-2]))\/((0[1-9])|([1-31]))\/((\d{2})|(\d{4}))$" "13/01/2001" +"^(([1-9])|(0[1-9])|(1[0-2]))\/((0[1-9])|([1-31]))\/((\d{2})|(\d{4}))$" "1/2/100" +"^(([1-9])|(0[1-9])|(1[0-2]))\/((0[1-9])|([1-31]))\/((\d{2})|(\d{4}))$" "09/32/2001" +"^\$?([0-9]{1,3},([0-9]{3},)*[0-9]{3}|[0-9]+)(.[0-9][0-9])?$" G "<0>$3,023,123.34" +"^\$?([0-9]{1,3},([0-9]{3},)*[0-9]{3}|[0-9]+)(.[0-9][0-9])?$" G "<0>9,876,453" +"^\$?([0-9]{1,3},([0-9]{3},)*[0-9]{3}|[0-9]+)(.[0-9][0-9])?$" G "<0>123456.78" +"^\$?([0-9]{1,3},([0-9]{3},)*[0-9]{3}|[0-9]+)(.[0-9][0-9])?$" "4,33,234.34" +"^\$?([0-9]{1,3},([0-9]{3},)*[0-9]{3}|[0-9]+)(.[0-9][0-9])?$" "$1.234" +"^\$?([0-9]{1,3},([0-9]{3},)*[0-9]{3}|[0-9]+)(.[0-9][0-9])?$" "abc" +"^\d{5}$|^\d{5}-\d{4}$" G "<0>55555-5555" +"^\d{5}$|^\d{5}-\d{4}$" G "<0>34564-3342" +"^\d{5}$|^\d{5}-\d{4}$" G "<0>90210" +"^\d{5}$|^\d{5}-\d{4}$" "434454444" +"^\d{5}$|^\d{5}-\d{4}$" "645-32-2345" +"^\d{5}$|^\d{5}-\d{4}$" "abc" +"^\d{3}-\d{2}-\d{4}$" G "<0>333-22-4444" +"^\d{3}-\d{2}-\d{4}$" G "<0>123-45-6789" +"^\d{3}-\d{2}-\d{4}$" "123456789" +"^\d{3}-\d{2}-\d{4}$" "SSN" +"^[2-9]\d{2}-\d{3}-\d{4}$" G "<0>800-555-5555" +"^[2-9]\d{2}-\d{3}-\d{4}$" G "<0>333-444-5555" +"^[2-9]\d{2}-\d{3}-\d{4}$" G "<0>212-666-1234" +"^[2-9]\d{2}-\d{3}-\d{4}$" "000-000-0000" +"^[2-9]\d{2}-\d{3}-\d{4}$" "123-456-7890" +"^[2-9]\d{2}-\d{3}-\d{4}$" "2126661234" +"^\d{5}-\d{4}|\d{5}|[A-Z]\d[A-Z] \d[A-Z]\d$" G "<0>44240" +"^\d{5}-\d{4}|\d{5}|[A-Z]\d[A-Z] \d[A-Z]\d$" G "<0>44240-5555" +"^\d{5}-\d{4}|\d{5}|[A-Z]\d[A-Z] \d[A-Z]\d$" G "<0>G3H 6A3" +"^\d{5}-\d{4}|\d{5}|[A-Z]\d[A-Z] \d[A-Z]\d$" "Ohio" +"^\d{5}-\d{4}|\d{5}|[A-Z]\d[A-Z] \d[A-Z]\d$" "abc" +"^\d{5}-\d{4}|\d{5}|[A-Z]\d[A-Z] \d[A-Z]\d$" "g3h6a3" +"[0-9]{4}\s*[a-zA-Z]{2}" G "<0>1054 WD" +"[0-9]{4}\s*[a-zA-Z]{2}" G "<0>1054WD" +"[0-9]{4}\s*[a-zA-Z]{2}" G "<0>1054 wd" +"[0-9]{4}\s*[a-zA-Z]{2}" "10543" +"(^1300\d{6}$)|(^1800|1900|1902\d{6}$)|(^0[2|3|7|8]{1}[0-9]{8}$)|(^13\d{4}$)|(^04\d{2,3}\d{6}$)" G "<0>0732105432" +"(^1300\d{6}$)|(^1800|1900|1902\d{6}$)|(^0[2|3|7|8]{1}[0-9]{8}$)|(^13\d{4}$)|(^04\d{2,3}\d{6}$)" G "<0>1300333444" +"(^1300\d{6}$)|(^1800|1900|1902\d{6}$)|(^0[2|3|7|8]{1}[0-9]{8}$)|(^13\d{4}$)|(^04\d{2,3}\d{6}$)" G "<0>131313" +"(^1300\d{6}$)|(^1800|1900|1902\d{6}$)|(^0[2|3|7|8]{1}[0-9]{8}$)|(^13\d{4}$)|(^04\d{2,3}\d{6}$)" "32105432" +"(^1300\d{6}$)|(^1800|1900|1902\d{6}$)|(^0[2|3|7|8]{1}[0-9]{8}$)|(^13\d{4}$)|(^04\d{2,3}\d{6}$)" "13000456" +"^((https?|ftp)\://((\[?(\d{1,3}\.){3}\d{1,3}\]?)|(([\-a-zA-Z0-9]+\.)+[a-zA-Z]{2,4}))(\:\d+)?(/[\-a-zA-Z0-9._?,'+\&%$#=~\\]+)*/?)$" G "<0>http://207.68.172.254/home.ashx" +"^((https?|ftp)\://((\[?(\d{1,3}\.){3}\d{1,3}\]?)|(([\-a-zA-Z0-9]+\.)+[a-zA-Z]{2,4}))(\:\d+)?(/[\-a-zA-Z0-9._?,'+\&%$#=~\\]+)*/?)$" G "<0>ftp://ftp.netscape.com/" +"^((https?|ftp)\://((\[?(\d{1,3}\.){3}\d{1,3}\]?)|(([\-a-zA-Z0-9]+\.)+[a-zA-Z]{2,4}))(\:\d+)?(/[\-a-zA-Z0-9._?,'+\&%$#=~\\]+)*/?)$" G "<0>https://www.brinkster.com/login.asp" +"^((https?|ftp)\://((\[?(\d{1,3}\.){3}\d{1,3}\]?)|(([\-a-zA-Z0-9]+\.)+[a-zA-Z]{2,4}))(\:\d+)?(/[\-a-zA-Z0-9._?,'+\&%$#=~\\]+)*/?)$" "htp://mistake.com/" +"^((https?|ftp)\://((\[?(\d{1,3}\.){3}\d{1,3}\]?)|(([\-a-zA-Z0-9]+\.)+[a-zA-Z]{2,4}))(\:\d+)?(/[\-a-zA-Z0-9._?,'+\&%$#=~\\]+)*/?)$" "http://www_address.com/" +"^((https?|ftp)\://((\[?(\d{1,3}\.){3}\d{1,3}\]?)|(([\-a-zA-Z0-9]+\.)+[a-zA-Z]{2,4}))(\:\d+)?(/[\-a-zA-Z0-9._?,'+\&%$#=~\\]+)*/?)$" "ftp://www.files.com/file with spaces.txt" +"([0-9]{4})-([0-9]{1,2})-([0-9]{1,2})" G "<0>2002-11-03" +"([0-9]{4})-([0-9]{1,2})-([0-9]{1,2})" G "<0>2007-17-08" +"([0-9]{4})-([0-9]{1,2})-([0-9]{1,2})" G "<0>9999-99-99" +"([0-9]{4})-([0-9]{1,2})-([0-9]{1,2})" "2002/17/18" +"([0-9]{4})-([0-9]{1,2})-([0-9]{1,2})" "2002.18.45" +"([0-9]{4})-([0-9]{1,2})-([0-9]{1,2})" "18.45.2002" +"^\$?(\d{1,3}(\,\d{3})*|(\d+))(\.\d{0,2})?$" G "<0>$0,234.50" +"^\$?(\d{1,3}(\,\d{3})*|(\d+))(\.\d{0,2})?$" G "<0>0234.5" +"^\$?(\d{1,3}(\,\d{3})*|(\d+))(\.\d{0,2})?$" G "<0>0,234." +"^\$?(\d{1,3}(\,\d{3})*|(\d+))(\.\d{0,2})?$" "$1,23,50" +"^\$?(\d{1,3}(\,\d{3})*|(\d+))(\.\d{0,2})?$" "$123.123" +"(^\d{5}-\d{3}|^\d{2}.\d{3}-\d{3}|\d{8})" G "<0>12.345-678" +"(^\d{5}-\d{3}|^\d{2}.\d{3}-\d{3}|\d{8})" G "<0>12345-678" +"(^\d{5}-\d{3}|^\d{2}.\d{3}-\d{3}|\d{8})" G "<0>12345678" +"(^\d{5}-\d{3}|^\d{2}.\d{3}-\d{3}|\d{8})" "12.345678" +"(^\d{5}-\d{3}|^\d{2}.\d{3}-\d{3}|\d{8})" "12345-1" +"(^\d{5}-\d{3}|^\d{2}.\d{3}-\d{3}|\d{8})" "123" +'^([a-zA-Z]\:|\\)\\([^\\]+\\)*[^\/:*?"<>|]+\.htm(l)?$' G "<0>x:\\test\\testing.htm" +'^([a-zA-Z]\:|\\)\\([^\\]+\\)*[^\/:*?"<>|]+\.htm(l)?$' G "<0>x:\\test\\test#$ ing.html" +'^([a-zA-Z]\:|\\)\\([^\\]+\\)*[^\/:*?"<>|]+\.htm(l)?$' G "<0>\\\\test\testing.html" +'^([a-zA-Z]\:|\\)\\([^\\]+\\)*[^\/:*?"<>|]+\.htm(l)?$' "x:\test\test/ing.htm" +'^([a-zA-Z]\:|\\)\\([^\\]+\\)*[^\/:*?"<>|]+\.htm(l)?$' "x:\test\test*.htm" +'^([a-zA-Z]\:|\\)\\([^\\]+\\)*[^\/:*?"<>|]+\.htm(l)?$' "\\test?<.htm" +"^[1-9]{1}[0-9]{3}$" G "<0>1234" +"^[1-9]{1}[0-9]{3}$" "123" +"^[1-9]{1}[0-9]{3}$" "123A" +"^[A-Z]{1}( |-)?[1-9]{1}[0-9]{3}$" G "<0>A-1234" +"^[A-Z]{1}( |-)?[1-9]{1}[0-9]{3}$" G "<0>A 1234" +"^[A-Z]{1}( |-)?[1-9]{1}[0-9]{3}$" G "<0>A1234" +"^[A-Z]{1}( |-)?[1-9]{1}[0-9]{3}$" "AA-1234" +"^[A-Z]{1}( |-)?[1-9]{1}[0-9]{3}$" "A12345" +"^(F-)?[0-9]{5}$" G "<0>12345" +"^(F-)?[0-9]{5}$" G "<0>F-12345" +"^(F-)?[0-9]{5}$" "F12345" +"^(F-)?[0-9]{5}$" "F-123456" +"^(F-)?[0-9]{5}$" "123456" +"^(V-|I-)?[0-9]{4}$" G "<0>1234" +"^(V-|I-)?[0-9]{4}$" G "<0>V-1234" +"^(V-|I-)?[0-9]{4}$" "12345" +"^[1-9]{1}[0-9]{3} ?[A-Z]{2}$" G "<0>1234 AB" +"^[1-9]{1}[0-9]{3} ?[A-Z]{2}$" G "<0>1234AB" +"^[1-9]{1}[0-9]{3} ?[A-Z]{2}$" "123AB" +"^[1-9]{1}[0-9]{3} ?[A-Z]{2}$" "1234AAA" +"^([1-9]{2}|[0-9][1-9]|[1-9][0-9])[0-9]{3}$" G "<0>12345" +"^([1-9]{2}|[0-9][1-9]|[1-9][0-9])[0-9]{3}$" G "<0>10234" +"^([1-9]{2}|[0-9][1-9]|[1-9][0-9])[0-9]{3}$" G "<0>01234" +"^([1-9]{2}|[0-9][1-9]|[1-9][0-9])[0-9]{3}$" "00123" +"^(/w|/W|[^<>+?$%\{}\&])+$" G "<0>John Doe Sr." +"^(/w|/W|[^<>+?$%\{}\&])+$" G "<0>100 Elm St., Suite 25" +"^(/w|/W|[^<>+?$%\{}\&])+$" G "<0>Valerie's Gift Shop" +"^(/w|/W|[^<>+?$%\{}\&])+$" "

Hey

" +/<[a-zA-Z][^>]*\son\w+=(\w+|'[^']*'|"[^"]*")[^>]*>/ G '<0>' +/<[a-zA-Z][^>]*\son\w+=(\w+|'[^']*'|"[^"]*")[^>]*>/ '' +"(?!^0*$)(?!^0*\.0*$)^\d{1,5}(\.\d{1,3})?$" G "<0>1" +"(?!^0*$)(?!^0*\.0*$)^\d{1,5}(\.\d{1,3})?$" G "<0>12345.123" +"(?!^0*$)(?!^0*\.0*$)^\d{1,5}(\.\d{1,3})?$" G "<0>0.5" +"(?!^0*$)(?!^0*\.0*$)^\d{1,5}(\.\d{1,3})?$" "0" +"(?!^0*$)(?!^0*\.0*$)^\d{1,5}(\.\d{1,3})?$" "0.0" +"(?!^0*$)(?!^0*\.0*$)^\d{1,5}(\.\d{1,3})?$" "123456.1234" +"^.+@[^\.].*\.[a-z]{2,}$" G "<0>whatever@somewhere.museum" +"^.+@[^\.].*\.[a-z]{2,}$" G "<0>foreignchars@myforeigncharsdomain.nu" +"^.+@[^\.].*\.[a-z]{2,}$" G "<0>me+mysomething@mydomain.com" +"^.+@[^\.].*\.[a-z]{2,}$" "a@b.c" +"^.+@[^\.].*\.[a-z]{2,}$" "me@.my.com" +"^.+@[^\.].*\.[a-z]{2,}$" "a@b.comFOREIGNCHAR" +"^(\d{5}-\d{4}|\d{5})$" G "<0>12345" +"^(\d{5}-\d{4}|\d{5})$" G "<0>12345-1234" +"^(\d{5}-\d{4}|\d{5})$" "12345-12345" +"^(\d{5}-\d{4}|\d{5})$" "123" +"^(\d{5}-\d{4}|\d{5})$" "12345-abcd" +"^(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])$" G "<0>0.0.0.0" +"^(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])$" G "<0>255.255.255.02" +"^(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])$" G "<0>192.168.0.136" +"^(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])$" "256.1.3.4" +"^(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])$" "023.44.33.22" +"^(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])$" "10.57.98.23." +"]*[^/])>" G '<0>' +"]*[^/])>" '' +"" G "<0>" +"" G "<0>" +"" "this is a comment" +"" G "<0>" +"" G "<0>" +"" "this is a comment" +/<\u002f?(\w+)(\s+\w+=(\w+|"[^"]*"|'[^']*'))*>/ G "<0>" +/<\u002f?(\w+)(\s+\w+=(\w+|"[^"]*"|'[^']*'))*>/ G '<0>' +/<\u002f?(\w+)(\s+\w+=(\w+|"[^"]*"|'[^']*'))*>/ G "<0>" +/<\u002f?(\w+)(\s+\w+=(\w+|"[^"]*"|'[^']*'))*>/ "No Tag Here ..." +"(\{\\f\d*)\\([^;]+;)" G "<0>{\\f0\\Some Font names here;" +"(\{\\f\d*)\\([^;]+;)" G "<0>{\\f1\\fswiss\\fcharset0\\fprq2{\\*\\panose 020b0604020202020204}Arial;" +"(\{\\f\d*)\\([^;]+;)" G "{\\f" +"(\{\\f\d*)\\([^;]+;)" "{f0fs20 some text}" +#"" G '<0>space' # TODO: Can't quote this pattern with the test syntax! +#"" "this is not a tag" +"^([0]\d|[1][0-2])\/([0-2]\d|[3][0-1])\/([2][01]|[1][6-9])\d{2}(\s([0-1]\d|[2][0-3])(\:[0-5]\d){1,2})?$" G "<0>12/30/2002" +"^([0]\d|[1][0-2])\/([0-2]\d|[3][0-1])\/([2][01]|[1][6-9])\d{2}(\s([0-1]\d|[2][0-3])(\:[0-5]\d){1,2})?$" G "<0>01/12/1998 13:30" +"^([0]\d|[1][0-2])\/([0-2]\d|[3][0-1])\/([2][01]|[1][6-9])\d{2}(\s([0-1]\d|[2][0-3])(\:[0-5]\d){1,2})?$" G "<0>01/28/2002 22:35:00" +"^([0]\d|[1][0-2])\/([0-2]\d|[3][0-1])\/([2][01]|[1][6-9])\d{2}(\s([0-1]\d|[2][0-3])(\:[0-5]\d){1,2})?$" "13/30/2002" +"^([0]\d|[1][0-2])\/([0-2]\d|[3][0-1])\/([2][01]|[1][6-9])\d{2}(\s([0-1]\d|[2][0-3])(\:[0-5]\d){1,2})?$" "01/12/1998 24:30" +"^([0]\d|[1][0-2])\/([0-2]\d|[3][0-1])\/([2][01]|[1][6-9])\d{2}(\s([0-1]\d|[2][0-3])(\:[0-5]\d){1,2})?$" "01/28/2002 22:35:64" +#"((?(^[A-Z0-9-;=]*:))(?(.*)))" G "<0>BEGIN:" #named capture +#"((?(^[A-Z0-9-;=]*:))(?(.*)))" G "<0>TEL;WORK;VOICE:" #named capture +#"((?(^[A-Z0-9-;=]*:))(?(.*)))" G "<0>TEL:" #named capture +#"((?(^[A-Z0-9-;=]*:))(?(.*)))" "begin:" #named capture +#"((?(^[A-Z0-9-;=]*:))(?(.*)))" "TEL;PREF;" #named capture +'^]*)>(.*?(?=<\/a>))<\/a>$' G '<0>my external link' +'^]*)>(.*?(?=<\/a>))<\/a>$' G ']*)>(.*?(?=<\/a>))<\/a>$' 'my internal link' +"^([0]\d|[1][0-2])\/([0-2]\d|[3][0-1])\/([2][01]|[1][6-9])\d{2}(\s([0]\d|[1][0-2])(\:[0-5]\d){1,2})*\s*([aApP][mM]{0,2})?$" G "<0>12/31/2002" +"^([0]\d|[1][0-2])\/([0-2]\d|[3][0-1])\/([2][01]|[1][6-9])\d{2}(\s([0]\d|[1][0-2])(\:[0-5]\d){1,2})*\s*([aApP][mM]{0,2})?$" G "<0>12/31/2002 08:00" +"^([0]\d|[1][0-2])\/([0-2]\d|[3][0-1])\/([2][01]|[1][6-9])\d{2}(\s([0]\d|[1][0-2])(\:[0-5]\d){1,2})*\s*([aApP][mM]{0,2})?$" G "<0>12/31/2002 08:00 AM" +"^([0]\d|[1][0-2])\/([0-2]\d|[3][0-1])\/([2][01]|[1][6-9])\d{2}(\s([0]\d|[1][0-2])(\:[0-5]\d){1,2})*\s*([aApP][mM]{0,2})?$" "12/31/02" +"^([0]\d|[1][0-2])\/([0-2]\d|[3][0-1])\/([2][01]|[1][6-9])\d{2}(\s([0]\d|[1][0-2])(\:[0-5]\d){1,2})*\s*([aApP][mM]{0,2})?$" "12/31/2002 14:00" +"
(?:\s*([^<]+)
\s*)+
" G "<0>
string1
string2
string3
" +"
(?:\s*([^<]+)
\s*)+
" ".." +"^((0?[13578]|10|12)(-|\/)((0[0-9])|([12])([0-9]?)|(3[01]?))(-|\/)((19)([2-9])(\d{1})|(20)([01])(\d{1})|([8901])(\d{1}))|(0?[2469]|11)(-|\/)((0[0-9])|([12])([0-9]?)|(3[0]?))(-|\/)((19)([2-9])(\d{1})|(20)([01])(\d{1})|([8901])(\d{1})))$" G "<0>1/2/03" +"^((0?[13578]|10|12)(-|\/)((0[0-9])|([12])([0-9]?)|(3[01]?))(-|\/)((19)([2-9])(\d{1})|(20)([01])(\d{1})|([8901])(\d{1}))|(0?[2469]|11)(-|\/)((0[0-9])|([12])([0-9]?)|(3[0]?))(-|\/)((19)([2-9])(\d{1})|(20)([01])(\d{1})|([8901])(\d{1})))$" G "<0>2/30/1999" +"^((0?[13578]|10|12)(-|\/)((0[0-9])|([12])([0-9]?)|(3[01]?))(-|\/)((19)([2-9])(\d{1})|(20)([01])(\d{1})|([8901])(\d{1}))|(0?[2469]|11)(-|\/)((0[0-9])|([12])([0-9]?)|(3[0]?))(-|\/)((19)([2-9])(\d{1})|(20)([01])(\d{1})|([8901])(\d{1})))$" G "<0>03/04/19" +"^((0?[13578]|10|12)(-|\/)((0[0-9])|([12])([0-9]?)|(3[01]?))(-|\/)((19)([2-9])(\d{1})|(20)([01])(\d{1})|([8901])(\d{1}))|(0?[2469]|11)(-|\/)((0[0-9])|([12])([0-9]?)|(3[0]?))(-|\/)((19)([2-9])(\d{1})|(20)([01])(\d{1})|([8901])(\d{1})))$" "3/4/2020" +"^((0?[13578]|10|12)(-|\/)((0[0-9])|([12])([0-9]?)|(3[01]?))(-|\/)((19)([2-9])(\d{1})|(20)([01])(\d{1})|([8901])(\d{1}))|(0?[2469]|11)(-|\/)((0[0-9])|([12])([0-9]?)|(3[0]?))(-|\/)((19)([2-9])(\d{1})|(20)([01])(\d{1})|([8901])(\d{1})))$" "3/4/1919" +']*))*|/?>' G '<0>' +']*))*|/?>' G "<0>" +']*))*|/?>' G "<0>
" +']*))*|/?>' "this is a test..." +"^ *(1[0-2]|[1-9]):[0-5][0-9] *(a|p|A|P)(m|M) *$" G "<0>12:00am" +"^ *(1[0-2]|[1-9]):[0-5][0-9] *(a|p|A|P)(m|M) *$" G "<0>1:00 PM" +"^ *(1[0-2]|[1-9]):[0-5][0-9] *(a|p|A|P)(m|M) *$" G "<0> 12:59 pm" +"^ *(1[0-2]|[1-9]):[0-5][0-9] *(a|p|A|P)(m|M) *$" "0:00" +"^ *(1[0-2]|[1-9]):[0-5][0-9] *(a|p|A|P)(m|M) *$" "0:01 am" +"^ *(1[0-2]|[1-9]):[0-5][0-9] *(a|p|A|P)(m|M) *$" "13:00 pm" +"\({1}[0-9]{3}\){1}\-{1}[0-9]{3}\-{1}[0-9]{4}" G "<0>(111)-111-1111" +"\({1}[0-9]{3}\){1}\-{1}[0-9]{3}\-{1}[0-9]{4}" "11111111111" +"[^abc]" G "<0>def" +"[^abc]" "abc" +"^(([0]?[1-9]|[1][0-2])[\/|\-|\.]([0-2]\d|[3][0-1]|[1-9])[\/|\-|\.]([2][0])?\d{2}\s+((([0][0-9]|[1][0-2]|[0-9])[\:|\-|\.]([0-5]\d)\s*([aApP][mM])?)|(([0-1][0-9]|[2][0-3]|[0-9])[\:|\-|\.]([0-5]\d))))$" G "<0>01/01/2002 04:42" +"^(([0]?[1-9]|[1][0-2])[\/|\-|\.]([0-2]\d|[3][0-1]|[1-9])[\/|\-|\.]([2][0])?\d{2}\s+((([0][0-9]|[1][0-2]|[0-9])[\:|\-|\.]([0-5]\d)\s*([aApP][mM])?)|(([0-1][0-9]|[2][0-3]|[0-9])[\:|\-|\.]([0-5]\d))))$" G "<0>5-12-02 04:42 AM" +"^(([0]?[1-9]|[1][0-2])[\/|\-|\.]([0-2]\d|[3][0-1]|[1-9])[\/|\-|\.]([2][0])?\d{2}\s+((([0][0-9]|[1][0-2]|[0-9])[\:|\-|\.]([0-5]\d)\s*([aApP][mM])?)|(([0-1][0-9]|[2][0-3]|[0-9])[\:|\-|\.]([0-5]\d))))$" G "<0>01.01/02 04-42aM" +"^(([0]?[1-9]|[1][0-2])[\/|\-|\.]([0-2]\d|[3][0-1]|[1-9])[\/|\-|\.]([2][0])?\d{2}\s+((([0][0-9]|[1][0-2]|[0-9])[\:|\-|\.]([0-5]\d)\s*([aApP][mM])?)|(([0-1][0-9]|[2][0-3]|[0-9])[\:|\-|\.]([0-5]\d))))$" "01-12-1999 4:50PM" +"^(([0]?[1-9]|[1][0-2])[\/|\-|\.]([0-2]\d|[3][0-1]|[1-9])[\/|\-|\.]([2][0])?\d{2}\s+((([0][0-9]|[1][0-2]|[0-9])[\:|\-|\.]([0-5]\d)\s*([aApP][mM])?)|(([0-1][0-9]|[2][0-3]|[0-9])[\:|\-|\.]([0-5]\d))))$" "01-12-2002 15:10PM" +"^(([0]?[1-9]|[1][0-2])[\/|\-|\.]([0-2]\d|[3][0-1]|[1-9])[\/|\-|\.]([2][0])?\d{2}\s+((([0][0-9]|[1][0-2]|[0-9])[\:|\-|\.]([0-5]\d)\s*([aApP][mM])?)|(([0-1][0-9]|[2][0-3]|[0-9])[\:|\-|\.]([0-5]\d))))$" "01-12-002 8:20PM" +"^([1][12]|[0]?[1-9])[\/-]([3][01]|[12]\d|[0]?[1-9])[\/-](\d{4}|\d{2})$" G "<0>11-02-02" +"^([1][12]|[0]?[1-9])[\/-]([3][01]|[12]\d|[0]?[1-9])[\/-](\d{4}|\d{2})$" G "<0>1-25-2002" +"^([1][12]|[0]?[1-9])[\/-]([3][01]|[12]\d|[0]?[1-9])[\/-](\d{4}|\d{2})$" G "<0>01/25/2002" +"^([1][12]|[0]?[1-9])[\/-]([3][01]|[12]\d|[0]?[1-9])[\/-](\d{4}|\d{2})$" "13-02-02" +"^([1][12]|[0]?[1-9])[\/-]([3][01]|[12]\d|[0]?[1-9])[\/-](\d{4}|\d{2})$" "11.02.02" +"^([1][12]|[0]?[1-9])[\/-]([3][01]|[12]\d|[0]?[1-9])[\/-](\d{4}|\d{2})$" "11/32/2002" +"(([0-1][0-9])|([2][0-3])):([0-5][0-9]):([0-5][0-9])" G "<0>09:30:00" +"(([0-1][0-9])|([2][0-3])):([0-5][0-9]):([0-5][0-9])" G "<0>17:45:20" +"(([0-1][0-9])|([2][0-3])):([0-5][0-9]):([0-5][0-9])" G "<0>23:59:59" +"(([0-1][0-9])|([2][0-3])):([0-5][0-9]):([0-5][0-9])" "24:00:00" +"(((0[1-9]|[12][0-9]|3[01])([-./])(0[13578]|10|12)([-./])(\d{4}))|(([0][1-9]|[12][0-9]|30)([-./])(0[469]|11)([-./])(\d{4}))|((0[1-9]|1[0-9]|2[0-8])([-./])(02)([-./])(\d{4}))|((29)(\.|-|\/)(02)([-./])([02468][048]00))|((29)([-./])(02)([-./])([13579][26]00))|((29)([-./])(02)([-./])([0-9][0-9][0][48]))|((29)([-./])(02)([-./])([0-9][0-9][2468][048]))|((29)([-./])(02)([-./])([0-9][0-9][13579][26])))" G "<0>29/02/2000" +"(((0[1-9]|[12][0-9]|3[01])([-./])(0[13578]|10|12)([-./])(\d{4}))|(([0][1-9]|[12][0-9]|30)([-./])(0[469]|11)([-./])(\d{4}))|((0[1-9]|1[0-9]|2[0-8])([-./])(02)([-./])(\d{4}))|((29)(\.|-|\/)(02)([-./])([02468][048]00))|((29)([-./])(02)([-./])([13579][26]00))|((29)([-./])(02)([-./])([0-9][0-9][0][48]))|((29)([-./])(02)([-./])([0-9][0-9][2468][048]))|((29)([-./])(02)([-./])([0-9][0-9][13579][26])))" G "<0>31/01/2000" +"(((0[1-9]|[12][0-9]|3[01])([-./])(0[13578]|10|12)([-./])(\d{4}))|(([0][1-9]|[12][0-9]|30)([-./])(0[469]|11)([-./])(\d{4}))|((0[1-9]|1[0-9]|2[0-8])([-./])(02)([-./])(\d{4}))|((29)(\.|-|\/)(02)([-./])([02468][048]00))|((29)([-./])(02)([-./])([13579][26]00))|((29)([-./])(02)([-./])([0-9][0-9][0][48]))|((29)([-./])(02)([-./])([0-9][0-9][2468][048]))|((29)([-./])(02)([-./])([0-9][0-9][13579][26])))" G "<0>30-01-2000" +"(((0[1-9]|[12][0-9]|3[01])([-./])(0[13578]|10|12)([-./])(\d{4}))|(([0][1-9]|[12][0-9]|30)([-./])(0[469]|11)([-./])(\d{4}))|((0[1-9]|1[0-9]|2[0-8])([-./])(02)([-./])(\d{4}))|((29)(\.|-|\/)(02)([-./])([02468][048]00))|((29)([-./])(02)([-./])([13579][26]00))|((29)([-./])(02)([-./])([0-9][0-9][0][48]))|((29)([-./])(02)([-./])([0-9][0-9][2468][048]))|((29)([-./])(02)([-./])([0-9][0-9][13579][26])))" "29/02/2002" +"(((0[1-9]|[12][0-9]|3[01])([-./])(0[13578]|10|12)([-./])(\d{4}))|(([0][1-9]|[12][0-9]|30)([-./])(0[469]|11)([-./])(\d{4}))|((0[1-9]|1[0-9]|2[0-8])([-./])(02)([-./])(\d{4}))|((29)(\.|-|\/)(02)([-./])([02468][048]00))|((29)([-./])(02)([-./])([13579][26]00))|((29)([-./])(02)([-./])([0-9][0-9][0][48]))|((29)([-./])(02)([-./])([0-9][0-9][2468][048]))|((29)([-./])(02)([-./])([0-9][0-9][13579][26])))" "32/01/2002" +"(((0[1-9]|[12][0-9]|3[01])([-./])(0[13578]|10|12)([-./])(\d{4}))|(([0][1-9]|[12][0-9]|30)([-./])(0[469]|11)([-./])(\d{4}))|((0[1-9]|1[0-9]|2[0-8])([-./])(02)([-./])(\d{4}))|((29)(\.|-|\/)(02)([-./])([02468][048]00))|((29)([-./])(02)([-./])([13579][26]00))|((29)([-./])(02)([-./])([0-9][0-9][0][48]))|((29)([-./])(02)([-./])([0-9][0-9][2468][048]))|((29)([-./])(02)([-./])([0-9][0-9][13579][26])))" "10/2/2002" +"^0[1-6]{1}(([0-9]{2}){4})|((\s[0-9]{2}){4})|((-[0-9]{2}){4})$" G "<0>01 46 70 89 12" +"^0[1-6]{1}(([0-9]{2}){4})|((\s[0-9]{2}){4})|((-[0-9]{2}){4})$" G "<0>01-46-70-89-12" +"^0[1-6]{1}(([0-9]{2}){4})|((\s[0-9]{2}){4})|((-[0-9]{2}){4})$" G "<0>0146708912" +"^0[1-6]{1}(([0-9]{2}){4})|((\s[0-9]{2}){4})|((-[0-9]{2}){4})$" "01-46708912" +"^0[1-6]{1}(([0-9]{2}){4})|((\s[0-9]{2}){4})|((-[0-9]{2}){4})$" "01 46708912" +"^0[1-6]{1}(([0-9]{2}){4})|((\s[0-9]{2}){4})|((-[0-9]{2}){4})$" "+33235256677" +"^[0-9A-Za-z_ ]+(.[jJ][pP][gG]|.[gG][iI][fF])$" G "<0>good.gif" +"^[0-9A-Za-z_ ]+(.[jJ][pP][gG]|.[gG][iI][fF])$" G "<0>go d.GIf" +"^[0-9A-Za-z_ ]+(.[jJ][pP][gG]|.[gG][iI][fF])$" G "<0>goo_d.jPg" +"^[0-9A-Za-z_ ]+(.[jJ][pP][gG]|.[gG][iI][fF])$" "junk" +"^[0-9A-Za-z_ ]+(.[jJ][pP][gG]|.[gG][iI][fF])$" "bad.bad.gif" +"^[0-9A-Za-z_ ]+(.[jJ][pP][gG]|.[gG][iI][fF])$" "slash\gif." +"<[^>\s]*\bauthor\b[^>]*>" G '<0>' +"<[^>\s]*\bauthor\b[^>]*>" G "<0>" +# "<[^>\s]*\bauthor\b[^>]*>" G '<0>' #Debug should work +"<[^> ]*\bauthor\b[^>]*>" G "<0>" +"<[^> ]*\bauthor\b[^>]*>" G '<0>' +"<[^>\s]*\bauthor\b[^>]*>" "" +"<[^>\s]*\bauthor\b[^>]*>" "" +"<[^>\s]*\bauthor\b[^>]*>" "author" +"^(?:(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00)))(\/|-|\.)(?:0?2\1(?:29))$)|(?:(?:1[6-9]|[2-9]\d)?\d{2})(\/|-|\.)(?:(?:(?:0?[13578]|1[02])\2(?:31))|(?:(?:0?[1,3-9]|1[0-2])\2(29|30))|(?:(?:0?[1-9])|(?:1[0-2]))\2(?:0?[1-9]|1\d|2[0-8]))$" G "<0>04/2/29" +"^(?:(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00)))(\/|-|\.)(?:0?2\1(?:29))$)|(?:(?:1[6-9]|[2-9]\d)?\d{2})(\/|-|\.)(?:(?:(?:0?[13578]|1[02])\2(?:31))|(?:(?:0?[1,3-9]|1[0-2])\2(29|30))|(?:(?:0?[1-9])|(?:1[0-2]))\2(?:0?[1-9]|1\d|2[0-8]))$" G "<0>2002-4-30" +"^(?:(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00)))(\/|-|\.)(?:0?2\1(?:29))$)|(?:(?:1[6-9]|[2-9]\d)?\d{2})(\/|-|\.)(?:(?:(?:0?[13578]|1[02])\2(?:31))|(?:(?:0?[1,3-9]|1[0-2])\2(29|30))|(?:(?:0?[1-9])|(?:1[0-2]))\2(?:0?[1-9]|1\d|2[0-8]))$" G "<0>02.10.31" +"^(?:(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00)))(\/|-|\.)(?:0?2\1(?:29))$)|(?:(?:1[6-9]|[2-9]\d)?\d{2})(\/|-|\.)(?:(?:(?:0?[13578]|1[02])\2(?:31))|(?:(?:0?[1,3-9]|1[0-2])\2(29|30))|(?:(?:0?[1-9])|(?:1[0-2]))\2(?:0?[1-9]|1\d|2[0-8]))$" "2003/2/29" +"^(?:(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00)))(\/|-|\.)(?:0?2\1(?:29))$)|(?:(?:1[6-9]|[2-9]\d)?\d{2})(\/|-|\.)(?:(?:(?:0?[13578]|1[02])\2(?:31))|(?:(?:0?[1,3-9]|1[0-2])\2(29|30))|(?:(?:0?[1-9])|(?:1[0-2]))\2(?:0?[1-9]|1\d|2[0-8]))$" "02.4.31" +"^(?:(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00)))(\/|-|\.)(?:0?2\1(?:29))$)|(?:(?:1[6-9]|[2-9]\d)?\d{2})(\/|-|\.)(?:(?:(?:0?[13578]|1[02])\2(?:31))|(?:(?:0?[1,3-9]|1[0-2])\2(29|30))|(?:(?:0?[1-9])|(?:1[0-2]))\2(?:0?[1-9]|1\d|2[0-8]))$" "00/00/00" +'(\d*)\u0027*-*(\d*)/*(\d*)"' G '<0>5\u0027-3/16"' +'(\d*)\u0027*-*(\d*)/*(\d*)"' G '<0>1\u0027-2"' +'(\d*)\u0027*-*(\d*)/*(\d*)"' G '<0>5/16"' +'(\d*)\u0027*-*(\d*)/*(\d*)"' '1 3/16' +"^[1-9]{1}$|^[1-4]{1}[0-9]{1}$|^50$" G "<0>1" +"^[1-9]{1}$|^[1-4]{1}[0-9]{1}$|^50$" G "<0>23" +"^[1-9]{1}$|^[1-4]{1}[0-9]{1}$|^50$" G "<0>50" +"^[1-9]{1}$|^[1-4]{1}[0-9]{1}$|^50$" "0" +"^[1-9]{1}$|^[1-4]{1}[0-9]{1}$|^50$" "111" +"^[1-9]{1}$|^[1-4]{1}[0-9]{1}$|^50$" "xyz" +"^([ \u00c0-\u01ffa-zA-Z'])+$" G "<0>Jon Doe" +"^([ \u00c0-\u01ffa-zA-Z'])+$" G "<0>J\u00f8rn" +"^([ \u00c0-\u01ffa-zA-Z'])+$" G "<0>Mc'Neelan" +"^([ \u00c0-\u01ffa-zA-Z'])+$" "Henry); hacking attempt" +"^((([0]?[1-9]|1[0-2])(:|\.)(00|15|30|45)?( )?(AM|am|aM|Am|PM|pm|pM|Pm))|(([0]?[0-9]|1[0-9]|2[0-3])(:|\.)(00|15|30|45)?))$" G "<0>1:00 PM" +"^((([0]?[1-9]|1[0-2])(:|\.)(00|15|30|45)?( )?(AM|am|aM|Am|PM|pm|pM|Pm))|(([0]?[0-9]|1[0-9]|2[0-3])(:|\.)(00|15|30|45)?))$" G "<0>6:45 am" +"^((([0]?[1-9]|1[0-2])(:|\.)(00|15|30|45)?( )?(AM|am|aM|Am|PM|pm|pM|Pm))|(([0]?[0-9]|1[0-9]|2[0-3])(:|\.)(00|15|30|45)?))$" G "<0>17:30" +"^((([0]?[1-9]|1[0-2])(:|\.)(00|15|30|45)?( )?(AM|am|aM|Am|PM|pm|pM|Pm))|(([0]?[0-9]|1[0-9]|2[0-3])(:|\.)(00|15|30|45)?))$" "4:32 am" +"^((([0]?[1-9]|1[0-2])(:|\.)(00|15|30|45)?( )?(AM|am|aM|Am|PM|pm|pM|Pm))|(([0]?[0-9]|1[0-9]|2[0-3])(:|\.)(00|15|30|45)?))$" "5:30:00 am" +"^((([0]?[1-9]|1[0-2])(:|\.)(00|15|30|45)?( )?(AM|am|aM|Am|PM|pm|pM|Pm))|(([0]?[0-9]|1[0-9]|2[0-3])(:|\.)(00|15|30|45)?))$" "17:01" +"(^\d*\.?\d*[1-9]+\d*$)|(^[1-9]+\d*\.\d*$)" G "<0>0.050" +"(^\d*\.?\d*[1-9]+\d*$)|(^[1-9]+\d*\.\d*$)" G "<0>5.0000" +"(^\d*\.?\d*[1-9]+\d*$)|(^[1-9]+\d*\.\d*$)" G "<0>5000" +"(^\d*\.?\d*[1-9]+\d*$)|(^[1-9]+\d*\.\d*$)" "0" +"(^\d*\.?\d*[1-9]+\d*$)|(^[1-9]+\d*\.\d*$)" "0.0" +"(^\d*\.?\d*[1-9]+\d*$)|(^[1-9]+\d*\.\d*$)" ".0" +"^([A-Z]{1}[a-z]{1,})$|^([A-Z]{1}[a-z]{1,}\040[A-Z]{1}[a-z]{1,})$|^([A-Z]{1}[a-z]{1,}\040[A-Z]{1}[a-z]{1,}\040[A-Z]{1}[a-z]{1,})$|^$" G "<0>Sacramento" +"^([A-Z]{1}[a-z]{1,})$|^([A-Z]{1}[a-z]{1,}\040[A-Z]{1}[a-z]{1,})$|^([A-Z]{1}[a-z]{1,}\040[A-Z]{1}[a-z]{1,}\040[A-Z]{1}[a-z]{1,})$|^$" "<0><2>San Francisco" +"^([A-Z]{1}[a-z]{1,})$|^([A-Z]{1}[a-z]{1,}\040[A-Z]{1}[a-z]{1,})$|^([A-Z]{1}[a-z]{1,}\040[A-Z]{1}[a-z]{1,}\040[A-Z]{1}[a-z]{1,})$|^$" "<0><3>San Luis Obispo" +"^([A-Z]{1}[a-z]{1,})$|^([A-Z]{1}[a-z]{1,}\040[A-Z]{1}[a-z]{1,})$|^([A-Z]{1}[a-z]{1,}\040[A-Z]{1}[a-z]{1,}\040[A-Z]{1}[a-z]{1,})$|^$" "SanFrancisco" +"^([A-Z]{1}[a-z]{1,})$|^([A-Z]{1}[a-z]{1,}\040[A-Z]{1}[a-z]{1,})$|^([A-Z]{1}[a-z]{1,}\040[A-Z]{1}[a-z]{1,}\040[A-Z]{1}[a-z]{1,})$|^$" "SanLuisObispo" +"^([A-Z]{1}[a-z]{1,})$|^([A-Z]{1}[a-z]{1,}\040[A-Z]{1}[a-z]{1,})$|^([A-Z]{1}[a-z]{1,}\040[A-Z]{1}[a-z]{1,}\040[A-Z]{1}[a-z]{1,})$|^$" "San francisco" +"^\{?[a-fA-F\d]{8}-([a-fA-F\d]{4}-){3}[a-fA-F\d]{12}\}?$" G "<0>{e02ff0e4-00ad-090A-c030-0d00a0008ba0}" +"^\{?[a-fA-F\d]{8}-([a-fA-F\d]{4}-){3}[a-fA-F\d]{12}\}?$" G "<0>e02ff0e4-00ad-090A-c030-0d00a0008ba0" +"^\{?[a-fA-F\d]{8}-([a-fA-F\d]{4}-){3}[a-fA-F\d]{12}\}?$" "0xe02ff0e400ad090Ac0300d00a0008ba0" +"^\{?[a-fA-F0-9]{8}-([a-fA-F0-9]{4}-){3}[a-fA-F0-9]{12}\}?$" G "<0>{e02ff0e4-00ad-090A-c030-0d00a0008ba0}" +"^\{?[a-fA-F0-9]{8}-([a-fA-F0-9]{4}-){3}[a-fA-F0-9]{12}\}?$" G "<0>e02ff0e4-00ad-090A-c030-0d00a0008ba0" +"^\{?[a-fA-F0-9]{8}-([a-fA-F0-9]{4}-){3}[a-fA-F0-9]{12}\}?$" "0xe02ff0e400ad090Ac0300d00a0008ba0" +"^([a-zA-Z0-9@*#]{8,15})$" G "<0>@12X*567" +"^([a-zA-Z0-9@*#]{8,15})$" G "<0>1#Zv96g@*Yfasd4" +"^([a-zA-Z0-9@*#]{8,15})$" G "<0>#67jhgt@erd" +"^([a-zA-Z0-9@*#]{8,15})$" "$12X*567" +"^([a-zA-Z0-9@*#]{8,15})$" "1#Zv_96" +"^([a-zA-Z0-9@*#]{8,15})$" "+678jhgt@erd" +'(("|\u0027)[a-z0-9\/\.\?\=\&]*(\.htm|\.asp|\.php|\.jsp)[a-z0-9\/\.\?\=\&]*("|\u0027))|(href=*?[a-z0-9\/\.\?\=\&"\u0027]*)' G '<0>href="produktsida.asp?kategori2=218"' +'(("|\u0027)[a-z0-9\/\.\?\=\&]*(\.htm|\.asp|\.php|\.jsp)[a-z0-9\/\.\?\=\&]*("|\u0027))|(href=*?[a-z0-9\/\.\?\=\&"\u0027]*)' G '<0>href="NuclearTesting.htm"' +'(("|\u0027)[a-z0-9\/\.\?\=\&]*(\.htm|\.asp|\.php|\.jsp)[a-z0-9\/\.\?\=\&]*("|\u0027))|(href=*?[a-z0-9\/\.\?\=\&"\u0027]*)' 'U Suck' +"^(((((0[1-9])|(1\d)|(2[0-8]))-((0[1-9])|(1[0-2])))|((31-((0[13578])|(1[02])))|((29|30)-((0[1,3-9])|(1[0-2])))))-((20[0-9][0-9]))|(29-02-20(([02468][048])|([13579][26]))))$" G "<0>05-01-2002" +"^(((((0[1-9])|(1\d)|(2[0-8]))-((0[1-9])|(1[0-2])))|((31-((0[13578])|(1[02])))|((29|30)-((0[1,3-9])|(1[0-2])))))-((20[0-9][0-9]))|(29-02-20(([02468][048])|([13579][26]))))$" G "<0>29-02-2004" +"^(((((0[1-9])|(1\d)|(2[0-8]))-((0[1-9])|(1[0-2])))|((31-((0[13578])|(1[02])))|((29|30)-((0[1,3-9])|(1[0-2])))))-((20[0-9][0-9]))|(29-02-20(([02468][048])|([13579][26]))))$" G "<0>31-12-2002" +"^(((((0[1-9])|(1\d)|(2[0-8]))-((0[1-9])|(1[0-2])))|((31-((0[13578])|(1[02])))|((29|30)-((0[1,3-9])|(1[0-2])))))-((20[0-9][0-9]))|(29-02-20(([02468][048])|([13579][26]))))$" "1-1-02" +"^(((((0[1-9])|(1\d)|(2[0-8]))-((0[1-9])|(1[0-2])))|((31-((0[13578])|(1[02])))|((29|30)-((0[1,3-9])|(1[0-2])))))-((20[0-9][0-9]))|(29-02-20(([02468][048])|([13579][26]))))$" "29-02-2002" +"^(((((0[1-9])|(1\d)|(2[0-8]))-((0[1-9])|(1[0-2])))|((31-((0[13578])|(1[02])))|((29|30)-((0[1,3-9])|(1[0-2])))))-((20[0-9][0-9]))|(29-02-20(([02468][048])|([13579][26]))))$" "31-11-2002" +"^\d*[0-9](|.\d*[0-9]|,\d*[0-9])?$" G "<0>123456.123456" +"^\d*[0-9](|.\d*[0-9]|,\d*[0-9])?$" G "<0>123456,123456" +"^\d*[0-9](|.\d*[0-9]|,\d*[0-9])?$" G "<0>123456" +"^\d*[0-9](|.\d*[0-9]|,\d*[0-9])?$" "123a.123" +"^\d*[0-9](|.\d*[0-9]|,\d*[0-9])?$" "123a,123" +"^\d*[0-9](|.\d*[0-9]|,\d*[0-9])?$" "a" +"^(ac|AC|al|AL|am|AM|ap|AP|ba|BA|ce|CE|df|DF|es|ES|go|GO|ma|MA|mg|MG|ms|MS|mt|MT|pa|PA|pb|PB|pe|PE|pi|PI|pr|PR|rj|RJ|rn|RN|ro|RO|rr|RR|rs|RS|sc|SC|se|SE|sp|SP|to|TO)$" G "<0>AC" +"^(ac|AC|al|AL|am|AM|ap|AP|ba|BA|ce|CE|df|DF|es|ES|go|GO|ma|MA|mg|MG|ms|MS|mt|MT|pa|PA|pb|PB|pe|PE|pi|PI|pr|PR|rj|RJ|rn|RN|ro|RO|rr|RR|rs|RS|sc|SC|se|SE|sp|SP|to|TO)$" G "<0>RJ" +"^(ac|AC|al|AL|am|AM|ap|AP|ba|BA|ce|CE|df|DF|es|ES|go|GO|ma|MA|mg|MG|ms|MS|mt|MT|pa|PA|pb|PB|pe|PE|pi|PI|pr|PR|rj|RJ|rn|RN|ro|RO|rr|RR|rs|RS|sc|SC|se|SE|sp|SP|to|TO)$" G "<0>SP" +"^(ac|AC|al|AL|am|AM|ap|AP|ba|BA|ce|CE|df|DF|es|ES|go|GO|ma|MA|mg|MG|ms|MS|mt|MT|pa|PA|pb|PB|pe|PE|pi|PI|pr|PR|rj|RJ|rn|RN|ro|RO|rr|RR|rs|RS|sc|SC|se|SE|sp|SP|to|TO)$" "XX" +"^(ac|AC|al|AL|am|AM|ap|AP|ba|BA|ce|CE|df|DF|es|ES|go|GO|ma|MA|mg|MG|ms|MS|mt|MT|pa|PA|pb|PB|pe|PE|pi|PI|pr|PR|rj|RJ|rn|RN|ro|RO|rr|RR|rs|RS|sc|SC|se|SE|sp|SP|to|TO)$" "AB" +"^(ac|AC|al|AL|am|AM|ap|AP|ba|BA|ce|CE|df|DF|es|ES|go|GO|ma|MA|mg|MG|ms|MS|mt|MT|pa|PA|pb|PB|pe|PE|pi|PI|pr|PR|rj|RJ|rn|RN|ro|RO|rr|RR|rs|RS|sc|SC|se|SE|sp|SP|to|TO)$" "HJ" +"^[+]?\d*$" G "<0>0123456789" +"^[+]?\d*$" G "<0>1234" +"^[+]?\d*$" G "<0>1" +"^[+]?\d*$" "1.0?&" +"^[+]?\d*$" "a1" +"^[+]?\d*$" "2a-" +#/<[aA][ ]{0,}([a-zA-Z0-9"'_,.:;!?@$\&()%=\u002f ]|[\-]|[ \f]){0,}>((<(([a-zA-Z0-9"'_,.:;!?@$\&()%=\u002f ]|[\-]|[ \f]){0,})>([a-zA-Z0-9"'_,.:;!?@$\&()%=\u002f ]|[\-]|[ \f]){0,})|(([a-zA-Z0-9"'_,.:;!?@$\&()%=\u002f ]|[\-]|[ \f]){0,})){1,}/ G "<0>this text is italicized" #TODO: Need infinite loop breaking +#/<[aA][ ]{0,}([a-zA-Z0-9"'_,.:;!?@$\&()%=\u002f ]|[\-]|[ \f]){0,}>((<(([a-zA-Z0-9"'_,.:;!?@$\&()%=\u002f ]|[\-]|[ \f]){0,})>([a-zA-Z0-9"'_,.:;!?@$\&()%=\u002f ]|[\-]|[ \f]){0,})|(([a-zA-Z0-9"'_,.:;!?@$\&()%=\u002f ]|[\-]|[ \f]){0,})){1,}/ "

" #TODO: need infinite loop breaking. +"^([0-1]?[0-9]|[2][0-3]):([0-5][0-9])$" G "<0>0:00" +"^([0-1]?[0-9]|[2][0-3]):([0-5][0-9])$" G "<0>23:00" +"^([0-1]?[0-9]|[2][0-3]):([0-5][0-9])$" G "<0>00:59" +"^([0-1]?[0-9]|[2][0-3]):([0-5][0-9])$" "0:0" +"^([0-1]?[0-9]|[2][0-3]):([0-5][0-9])$" "24:00" +"^([0-1]?[0-9]|[2][0-3]):([0-5][0-9])$" "00:60" +"^((0[1-9])|(1[0-2]))\/(\d{2})$" G "<0>11/03" +"^((0[1-9])|(1[0-2]))\/(\d{2})$" G "<0>01/04" +"^((0[1-9])|(1[0-2]))\/(\d{2})$" "13/03" +"^((0[1-9])|(1[0-2]))\/(\d{2})$" "10/2003" +"]*>[\w|\t|\r|\W]*" G '<0>' +"]*>[\w|\t|\r|\W]*" "--" +"]*>[\w|\t|\r|\W]*" "A-Z][a-z]+" +#"]*>[\w|\t|\r|\W]*" G "<0>strFirstName" # Test Case damaged? +#"]*>[\w|\t|\r|\W]*" G "<0>intAgeInYears" # Test Case damaged? +#"]*>[\w|\t|\r|\W]*" G "<0>Where the Wild Things Are" # Test Case damaged? +"]*>[\w|\t|\r|\W]*" "123" +"]*>[\w|\t|\r|\W]*" "abc" +"]*>[\w|\t|\r|\W]*" "this has no caps in it" +"(^-\d*\.?\d*[1-9]+\d*$)|(^-[1-9]+\d*\.\d*$)" G "<0>-0.050" +"(^-\d*\.?\d*[1-9]+\d*$)|(^-[1-9]+\d*\.\d*$)" G "<0>-5.000" +"(^-\d*\.?\d*[1-9]+\d*$)|(^-[1-9]+\d*\.\d*$)" G "<0>-5" +"(^-\d*\.?\d*[1-9]+\d*$)|(^-[1-9]+\d*\.\d*$)" "0" +"(^-\d*\.?\d*[1-9]+\d*$)|(^-[1-9]+\d*\.\d*$)" "0.0" +"(^-\d*\.?\d*[1-9]+\d*$)|(^-[1-9]+\d*\.\d*$)" ".0" +"^([2][0]\d{2}\/([0]\d|[1][0-2])\/([0-2]\d|[3][0-1]))$|^([2][0]\d{2}\/([0]\d|[1][0-2])\/([0-2]\d|[3][0-1])\s([0-1]\d|[2][0-3])\:[0-5]\d\:[0-5]\d)$" G "<0>2002/02/03" +"^([2][0]\d{2}\/([0]\d|[1][0-2])\/([0-2]\d|[3][0-1]))$|^([2][0]\d{2}\/([0]\d|[1][0-2])\/([0-2]\d|[3][0-1])\s([0-1]\d|[2][0-3])\:[0-5]\d\:[0-5]\d)$" G "<0>2002/02/03 12:12:18" +"^([2][0]\d{2}\/([0]\d|[1][0-2])\/([0-2]\d|[3][0-1]))$|^([2][0]\d{2}\/([0]\d|[1][0-2])\/([0-2]\d|[3][0-1])\s([0-1]\d|[2][0-3])\:[0-5]\d\:[0-5]\d)$" "2002/02/36" +"^([2][0]\d{2}\/([0]\d|[1][0-2])\/([0-2]\d|[3][0-1]))$|^([2][0]\d{2}\/([0]\d|[1][0-2])\/([0-2]\d|[3][0-1])\s([0-1]\d|[2][0-3])\:[0-5]\d\:[0-5]\d)$" "02/03/2002" +"^(\d|,)*\.?\d*$" G "<0>1,000" +"^(\d|,)*\.?\d*$" G "<0>3,000.05" +"^(\d|,)*\.?\d*$" G "<0>5,000,000" +"^(\d|,)*\.?\d*$" "abc" +"^(\d|,)*\.?\d*$" "$100,000" +"^(\d|,)*\.?\d*$" "Forty" +"^\d$" G "<0>1" +"^\d$" G "<0>2" +"^\d$" G "<0>3" +"^\d$" "a" +"^\d$" "324" +"^\d$" "num" +"^[0-9]+$" G "<0>1234567890" +"^[0-9]+$" G "<0>1234567890" +"^[0-9]+$" G "<0>1234567890" +"^[0-9]+$" "http://none" +"^[0-9]+$" "http://none" +"^[0-9]+$" "http://none" +"^.{4,8}$" G "<0>asdf" +"^.{4,8}$" G "<0>1234" +"^.{4,8}$" G "<0>asdf1234" +"^.{4,8}$" "asd" +"^.{4,8}$" "123" +"^.{4,8}$" "asdfe12345" +"^[\w\.=-]+@[\w\.-]+\.[\w]{2,3}$" G "<0>a@a.com" +"^[\w\.=-]+@[\w\.-]+\.[\w]{2,3}$" G "<0>a@a.com.au" +"^[\w\.=-]+@[\w\.-]+\.[\w]{2,3}$" G "<0>a@a.au" +"^[\w\.=-]+@[\w\.-]+\.[\w]{2,3}$" "word" +"^[\w\.=-]+@[\w\.-]+\.[\w]{2,3}$" "word@" +"^[\w\.=-]+@[\w\.-]+\.[\w]{2,3}$" "@word" +"^\d{5}-\d{4}$" G "<0>22222-3333" +"^\d{5}-\d{4}$" G "<0>34545-2367" +"^\d{5}-\d{4}$" G "<0>56334-2343" +"^\d{5}-\d{4}$" "123456789" +"^\d{5}-\d{4}$" "A3B 4C5" +"^\d{5}-\d{4}$" "55335" +"(a|b|c).(a.b)*.b+.c" G "<0>autbfc" +"(a|b|c).(a.b)*.b+.c" "attc" +'"((\\")|[^"(\\")])+"' G '<0>"test"' +'"((\\")|[^"(\\")])+"' G '<0>"escape\"quote"' +'"((\\")|[^"(\\")])+"' G '<0>"\\""' +'"((\\")|[^"(\\")])+"' "test" +'"((\\")|[^"(\\")])+"' '"test' +'"((\\")|[^"(\\")])+"' '""test\\"' +"((0[1-9])|(1[02]))/\d{2}" G "<0>01/00" +"((0[1-9])|(1[02]))/\d{2}" G "<0>12/99" +"((0[1-9])|(1[02]))/\d{2}" "13/00" +"((0[1-9])|(1[02]))/\d{2}" "12/AS" +"^[a-zA-Z]$" G "<0>a" +"^[a-zA-Z]$" G "<0>B" +"^[a-zA-Z]$" G "<0>c" +"^[a-zA-Z]$" "0" +"^[a-zA-Z]$" "&" +"^[a-zA-Z]$" "AbC" +"^[a-zA-Z]+$" G "<0>abc" +"^[a-zA-Z]+$" G "<0>ABC" +"^[a-zA-Z]+$" G "<0>aBcDeF" +"^[a-zA-Z]+$" "abc123" +"^[a-zA-Z]+$" "mr." +"^[a-zA-Z]+$" "a word" +"^\s*[a-zA-Z,\p{Zs}]+\s*$" G "<0>Smith, Ed" +"^\s*[a-zA-Z,\p{Zs}]+\s*$" G "<0>Ed Smith" +"^\s*[a-zA-Z,\p{Zs}]+\s*$" G "<0>aBcDeFgH" +"^\s*[a-zA-Z,\p{Zs}]+\s*$" "a123" +"^\s*[a-zA-Z,\p{Zs}]+\s*$" "AB5" +"^\s*[a-zA-Z,\p{Zs}]+\s*$" "Mr. Ed" +"(\w+?@\w+?\u002E.+)" G "<0>bob@vsnl.com" +"(\w+?@\w+?\u002E.+)" "[AABB]" +"^\d+$" G "<0>123" +"^\d+$" G "<0>10" +"^\d+$" G "<0>54" +"^\d+$" "-54" +"^\d+$" "54.234" +"^\d+$" "abc" +"^(\+|-)?\d+$" G "<0>-34" +"^(\+|-)?\d+$" G "<0>34" +"^(\+|-)?\d+$" G "<0>+5" +"^(\+|-)?\d+$" "abc" +"^(\+|-)?\d+$" "3.1415" +"^(\+|-)?\d+$" "-5.3" +"foo" G "<0>foo" +"foo" "bar" +"^[1-5]$" G "<0>1" +"^[1-5]$" G "<0>3" +"^[1-5]$" G "<0>4" +"^[1-5]$" "6" +"^[1-5]$" "23" +"^[1-5]$" "a" +"^[12345]$" G "<0>1" +"^[12345]$" G "<0>2" +"^[12345]$" G "<0>4" +"^[12345]$" "6" +"^[12345]$" "-1" +"^[12345]$" "abc" +"^[\w-\.]+@([\w-]+\.)+[\w-]{2,4}$" G "<0>joe@aol.com" +"^[\w-\.]+@([\w-]+\.)+[\w-]{2,4}$" G "<0>joe@wrox.co.uk" +"^[\w-\.]+@([\w-]+\.)+[\w-]{2,4}$" G "<0>joe@domain.info" +"^[\w-\.]+@([\w-]+\.)+[\w-]{2,4}$" "a@b" +"^[\w-\.]+@([\w-]+\.)+[\w-]{2,4}$" "notanemail" +"^[\w-\.]+@([\w-]+\.)+[\w-]{2,4}$" "joe@@." +"^\w+@[a-zA-Z_]+?\.[a-zA-Z]{2,3}$" G "<0>joe@aol.com" +"^\w+@[a-zA-Z_]+?\.[a-zA-Z]{2,3}$" G "<0>ssmith@aspalliance.com" +"^\w+@[a-zA-Z_]+?\.[a-zA-Z]{2,3}$" G "<0>a@b.cc" +"^\w+@[a-zA-Z_]+?\.[a-zA-Z]{2,3}$" "joe@123aspx.com" +"^\w+@[a-zA-Z_]+?\.[a-zA-Z]{2,3}$" "joe@web.info" +"^\w+@[a-zA-Z_]+?\.[a-zA-Z]{2,3}$" "joe@company.co.uk" +"[\w-]+@([\w-]+\.)+[\w-]+" G "<0>joe@aol.com" +"[\w-]+@([\w-]+\.)+[\w-]+" G "<0>a@b.c" +"[\w-]+@([\w-]+\.)+[\w-]+" "asdf" +"[\w-]+@([\w-]+\.)+[\w-]+" "1234" +"\d{4}-?\d{4}-?\d{4}-?\d{4}" G "<0>1234-1234-1234-1234" +"\d{4}-?\d{4}-?\d{4}-?\d{4}" G "<0>1234123412341234" +"\d{4}-?\d{4}-?\d{4}-?\d{4}" "1234123412345" +"^\d{5}$" G "<0>33333" +"^\d{5}$" G "<0>55555" +"^\d{5}$" G "<0>23445" +"^\d{5}$" "abcd" +"^\d{5}$" "1324" +"^\d{5}$" "as;lkjdf" +"(\w+)\s+\1" G "<0>hubba hubba" +"(\w+)\s+\1" G "<0>mandate dated" +"(\w+)\s+\1" G "<0>an annual" +"(\w+)\s+\1" "may day" +"(\w+)\s+\1" "gogo" +"(\w+)\s+\1" "1212" +"^[a-zA-Z0-9\-\.]+\.(com|org|net|mil|edu|COM|ORG|NET|MIL|EDU)$" G "<0>3SquareBand.com" +"^[a-zA-Z0-9\-\.]+\.(com|org|net|mil|edu|COM|ORG|NET|MIL|EDU)$" G "<0>asp.net" +"^[a-zA-Z0-9\-\.]+\.(com|org|net|mil|edu|COM|ORG|NET|MIL|EDU)$" G "<0>army.mil" +"^[a-zA-Z0-9\-\.]+\.(com|org|net|mil|edu|COM|ORG|NET|MIL|EDU)$" "$SquareBand.com" +"^[a-zA-Z0-9\-\.]+\.(com|org|net|mil|edu|COM|ORG|NET|MIL|EDU)$" "asp/dot.net" +"^[a-zA-Z0-9\-\.]+\.(com|org|net|mil|edu|COM|ORG|NET|MIL|EDU)$" "army.military" + diff --git a/go/mysql/icuregex/testdata/regextst_extended.txt b/go/mysql/icuregex/testdata/regextst_extended.txt new file mode 100644 index 00000000000..c6b567931e3 --- /dev/null +++ b/go/mysql/icuregex/testdata/regextst_extended.txt @@ -0,0 +1,128 @@ +# Copyright (C) 2016 and later: Unicode, Inc. and others. +# License & terms of use: http://www.unicode.org/copyright.html +# Copyright (c) 2001-2015 International Business Machines +# Corporation and others. All Rights Reserved. +# +# file: +# +# ICU regular expression test cases. +# +# format: one test case per line, +# = [# comment] +# = "" +# = "" +# the quotes on the pattern and match string can be " or ' or / +# = text, with the start and end of each +# capture group tagged with .... The overall match, +# if any, is group 0, as in <0>matched text +# A region can be specified with ... tags. +# Standard ICU unescape will be applied, allowing \u, \U, etc. to appear. +# +# = any combination of +# i case insensitive match +# x free spacing and comments +# s dot-matches-all mode +# m multi-line mode. +# ($ and ^ match at embedded new-lines) +# D Unix Lines mode (only recognize 0x0a as new-line) +# Q UREGEX_LITERAL flag. Entire pattern is literal string. +# v If icu configured without break iteration, this +# regex test pattern should not compile. +# e set the UREGEX_ERROR_ON_UNKNOWN_ESCAPES flag +# d dump the compiled pattern +# t trace operation of match engine. +# 2-9 a digit between 2 and 9, specifies the number of +# times to execute find(). The expected results are +# for the last find() in the sequence. +# G Only check match / no match. Do not check capture groups. +# E Pattern compilation error expected +# L Use LookingAt() rather than find() +# M Use matches() rather than find(). +# +# a Use non-Anchoring Bounds. +# b Use Transparent Bounds. +# The a and b options only make a difference if +# a region has been specified in the string. +# z|Z hitEnd was expected(z) or not expected (Z). +# With neither, hitEnd is not checked. +# y|Y Require End expected(y) or not expected (Y). +# +# White space must be present between the flags and the match string. +# + +"[:xdigit:]" " <0>4f" +"\P{XDIGIT}+" "4f<0> " + +"[:blank:]" "<0> 4f" +"\P{BLANK}+" "<0>4f " + +"[:print:]" "<0> 4f\x07" +"\P{PRINT}+" " 4f<0>\x07" + +"\p{Age=1.1}" "<0>4f🥱" +"\p{Age=11}" "4f🥱" +"\p{Age=12}" "4f<0>🥱" + +"\p{Name=LATIN SMALL LETTER B}" "Good<0>bye" + +"\p{Numeric_Value=3}" "Good<0>3ye" +"\p{Numeric_Value=14}" "Good<0>⑭ye" + +"\p{Script_Extensions=Greek}" "Good<0>βye" + +"\p{Bidi_Control}" "Good<0>\u200Eye" +"\p{Bidi_Class=LeftToRight}" "<0>Goodbye" +"\p{Bidi_Class=RightToLeft}" "Goodbye" +"\p{Bidi_Class=LeftToRight}" "؈" +"\p{Bidi_Paired_Bracket_Type=Open}" "Good<0>(ye" + +"\p{Soft_Dotted}" "Good<0>iye" + +"\p{Changes_When_Lowercased}" "<0>Goodbye" +"\p{Changes_When_Titlecased}" "<0>goodbye" +"\p{Changes_When_Uppercased}" "G<0>oodbye" +"\p{Changes_When_CaseMapped}" " <0>Goodbye3" +"\p{Cased}" " <0>Goodbye3" +"\p{CaseIgnorable}" "foo<0>.bar" + +"\p{Indic_Syllabic_Category=Avagraha}" "foo<0>\u09BDbar" +"\p{IndicPositionalCategory=Top_And_Left_And_Right}" "foo<0>\u0B4Cbar" +"\p{VerticalOrientation=U}" "foo<0>\uA015bar" + +"\p{Canonical_Combining_Class=Nukta}" "foo<0>\u093Cbar" +"\p{Lead_Canonical_Combining_Class=Above}" "foo<0>\u0300bar" +"\p{Trail_Canonical_Combining_Class=Above}" "foo<0>\u0300bar" + +"\p{Changes_When_Casefolded}" "<0>\uFB03Goodbye" +"\p{Changes_When_Casefolded}" 2 "\uFB03<0>Goodbye" + +"\p{NFC_Inert}" "foo<0>\uFB03bar" +"\p{NFKC_Inert}" "foo<0>\uFB03bar" +"\P{NFD_Inert}" "foo<0>Àbar" +"\P{NFKD_Inert}" "foo<0>Àbar" + +"\p{NFC_Quick_Check=No}" "foo<0>\u0340bar" +"\p{NFKC_Quick_Check=No}" "foo<0>\u0340bar" +"\p{NFD_Quick_Check=No}" "foo<0>\u00C0bar" +"\p{NFKD_Quick_Check=No}" "foo<0>\u00C0bar" + +"\p{Full_Composition_Exclusion}" "foo<0>\u0374bar" + +"\p{Numeric_Type=Decimal}" "foo<0>3bar" +"\p{Joining_Type=Dual_Joining}" "foo<0>\u0626bar" +"\p{Joining_Group=African_Feh}" "foo<0>\u08BBbar" +"\p{General_Category=Close_Punctuation}" "foo[bar" +"\p{General_Category=Close_Punctuation}" "foo<0>]]bar" +"\p{General_Category=Close_Punctuation}" 2 "foo]<0>]bar" + +"\p{Hangul_Syllable_Type=Not_Applicable}" "<0>f" +"\p{Hangul_Syllable_Type=Leading_Jamo}" "foo<0>\u1100bar" + +"\p{Regional_Indicator=Yes}" "foo<0>\U0001F1E6bar" + +# Currently unsupported property classes below. They require +# significant additional code to support. +"\p{Changes_When_NFKC_Casefolded}" E "foo<0>\uFB03bar" +"\p{Segment_Starter}" E "<0>\uFB03Goodbye" + +"\p{Emoji}" "foo<0>😀bar" \ No newline at end of file diff --git a/go/mysql/json/helpers.go b/go/mysql/json/helpers.go index bc9995b48cb..1df38b2d769 100644 --- a/go/mysql/json/helpers.go +++ b/go/mysql/json/helpers.go @@ -18,6 +18,8 @@ package json import ( "vitess.io/vitess/go/sqltypes" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vthash" ) @@ -25,7 +27,7 @@ const hashPrefixJSON = 0xCCBB func (v *Value) Hash(h *vthash.Hasher) { h.Write16(hashPrefixJSON) - _, _ = h.Write(v.ToRawBytes()) + _, _ = h.Write(v.WeightString(nil)) } func (v *Value) ToRawBytes() []byte { @@ -81,14 +83,35 @@ func NewOpaqueValue(raw string) *Value { return &Value{s: raw, t: TypeOpaque} } -func (v *Value) Depth() int { - max := func(a, b int) int { - if a > b { - return a - } - return b +func NewFromSQL(v sqltypes.Value) (*Value, error) { + switch { + case v.Type() == sqltypes.TypeJSON: + var p Parser + return p.ParseBytes(v.Raw()) + case v.IsSigned(): + return NewNumber(v.RawStr(), NumberTypeSigned), nil + case v.IsUnsigned(): + return NewNumber(v.RawStr(), NumberTypeUnsigned), nil + case v.IsDecimal(): + return NewNumber(v.RawStr(), NumberTypeDecimal), nil + case v.IsFloat(): + return NewNumber(v.RawStr(), NumberTypeFloat), nil + case v.IsText(): + return NewString(v.RawStr()), nil + case v.IsBinary(): + return NewBlob(v.RawStr()), nil + case v.IsDateTime(), v.IsTimestamp(): + return NewDateTime(v.RawStr()), nil + case v.IsDate(): + return NewDate(v.RawStr()), nil + case v.IsTime(): + return NewTime(v.RawStr()), nil + default: + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "cannot coerce %v as a JSON type", v) } +} +func (v *Value) Depth() int { var depth int switch v.t { case TypeObject: diff --git a/go/mysql/json/json_path_test.go b/go/mysql/json/json_path_test.go index 7dc7e7f58ba..63313b55ac3 100644 --- a/go/mysql/json/json_path_test.go +++ b/go/mysql/json/json_path_test.go @@ -17,9 +17,8 @@ limitations under the License. package json import ( + "slices" "testing" - - "golang.org/x/exp/slices" ) func TestParseJSONPath(t *testing.T) { diff --git a/go/mysql/json/marshal.go b/go/mysql/json/marshal.go index 77d30285a69..8e63cddb171 100644 --- a/go/mysql/json/marshal.go +++ b/go/mysql/json/marshal.go @@ -42,9 +42,9 @@ func (v *Value) marshalSQLInternal(top bool, dst []byte) []byte { if i != 0 { dst = append(dst, ", "...) } - dst = append(dst, "_utf8mb4'"...) - dst = append(dst, vv.k...) - dst = append(dst, "', "...) + dst = append(dst, "_utf8mb4"...) + dst = append(dst, sqltypes.EncodeStringSQL(vv.k)...) + dst = append(dst, ", "...) dst = vv.v.marshalSQLInternal(false, dst) } dst = append(dst, ')') @@ -137,7 +137,7 @@ func (v *Value) marshalSQLInternal(top bool, dst []byte) []byte { return dst case TypeBoolean: if top { - dst = append(dst, "CAST("...) + dst = append(dst, "CAST(_utf8mb4'"...) } if v == ValueTrue { dst = append(dst, "true"...) @@ -145,16 +145,16 @@ func (v *Value) marshalSQLInternal(top bool, dst []byte) []byte { dst = append(dst, "false"...) } if top { - dst = append(dst, " as JSON)"...) + dst = append(dst, "' as JSON)"...) } return dst case TypeNull: if top { - dst = append(dst, "CAST("...) + dst = append(dst, "CAST(_utf8mb4'"...) } dst = append(dst, "null"...) if top { - dst = append(dst, " as JSON)"...) + dst = append(dst, "' as JSON)"...) } return dst default: diff --git a/go/mysql/json/marshal_test.go b/go/mysql/json/marshal_test.go new file mode 100644 index 00000000000..9329c3cd49a --- /dev/null +++ b/go/mysql/json/marshal_test.go @@ -0,0 +1,57 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package json + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestMarshalSQLTo(t *testing.T) { + testcases := []struct { + input string + expected string + }{ + { + input: "null", + expected: "CAST(_utf8mb4'null' as JSON)", + }, + { + input: `{}`, + expected: `JSON_OBJECT()`, + }, + { + input: `{"a": 1}`, + expected: `JSON_OBJECT(_utf8mb4'a', 1)`, + }, + { + input: `{"key with ' in it": []}`, + expected: `JSON_OBJECT(_utf8mb4'key with \' in it', JSON_ARRAY())`, + }, + } + for _, tc := range testcases { + t.Run(tc.input, func(t *testing.T) { + var p Parser + + v, err := p.Parse(tc.input) + require.NoError(t, err) + buf := v.MarshalSQLTo(nil) + require.Equal(t, tc.expected, string(buf)) + }) + } +} diff --git a/go/mysql/json/parser.go b/go/mysql/json/parser.go index b660884508d..322c623058e 100644 --- a/go/mysql/json/parser.go +++ b/go/mysql/json/parser.go @@ -21,13 +21,12 @@ import ( "bytes" "encoding/base64" "fmt" + "slices" "strconv" "strings" "time" "unicode/utf16" - "golang.org/x/exp/slices" - "vitess.io/vitess/go/mysql/fastparse" "vitess.io/vitess/go/hack" @@ -585,8 +584,18 @@ func (o *Object) sort() { return } - slices.SortStableFunc(o.kvs, func(a, b kv) bool { - return a.k < b.k + slices.SortStableFunc(o.kvs, func(a, b kv) int { + // TODO: switch to cmp.Compare for Go 1.21+. + // + // https://pkg.go.dev/cmp@master#Compare. + switch { + case a.k < b.k: + return -1 + case a.k > b.k: + return 1 + default: + return 0 + } }) uniq := o.kvs[:1] for _, kv := range o.kvs[1:] { @@ -704,6 +713,15 @@ func (v *Value) MarshalTime() string { return "" } +func (v *Value) marshalFloat(dst []byte) []byte { + f, _ := v.Float64() + buf := format.FormatFloat(f) + if bytes.IndexByte(buf, '.') == -1 && bytes.IndexByte(buf, 'e') == -1 { + buf = append(buf, '.', '0') + } + return append(dst, buf...) +} + // MarshalTo appends marshaled v to dst and returns the result. func (v *Value) MarshalTo(dst []byte) []byte { switch v.t { @@ -744,12 +762,7 @@ func (v *Value) MarshalTo(dst []byte) []byte { return dst case TypeNumber: if v.NumberType() == NumberTypeFloat { - f, _ := v.Float64() - buf := format.FormatFloat(f) - if bytes.IndexByte(buf, '.') == -1 && bytes.IndexByte(buf, 'e') == -1 { - buf = append(buf, '.', '0') - } - return append(dst, buf...) + return v.marshalFloat(dst) } return append(dst, v.s...) case TypeBoolean: diff --git a/go/mysql/json/update.go b/go/mysql/json/update.go index 6c86797e11f..eb74af46f49 100644 --- a/go/mysql/json/update.go +++ b/go/mysql/json/update.go @@ -17,7 +17,7 @@ limitations under the License. package json -import "golang.org/x/exp/slices" +import "slices" // Del deletes the entry with the given key from o. func (o *Object) Del(key string) { diff --git a/go/mysql/json/weights.go b/go/mysql/json/weights.go new file mode 100644 index 00000000000..262fe96e9cf --- /dev/null +++ b/go/mysql/json/weights.go @@ -0,0 +1,169 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package json + +import ( + "encoding/binary" + "strings" + + "vitess.io/vitess/go/hack" + "vitess.io/vitess/go/mysql/fastparse" +) + +const ( + JSON_KEY_NULL = '\x00' + JSON_KEY_NUMBER_NEG = '\x01' + JSON_KEY_NUMBER_ZERO = '\x02' + JSON_KEY_NUMBER_POS = '\x03' + JSON_KEY_STRING = '\x04' + JSON_KEY_OBJECT = '\x05' + JSON_KEY_ARRAY = '\x06' + JSON_KEY_FALSE = '\x07' + JSON_KEY_TRUE = '\x08' + JSON_KEY_DATE = '\x09' + JSON_KEY_TIME = '\x0A' + JSON_KEY_DATETIME = '\x0B' + JSON_KEY_OPAQUE = '\x0C' +) + +// numericWeightString generates a fixed-width weight string for any JSON +// number. It requires the `num` representation to be normalized, otherwise +// the resulting string will not sort. +func (v *Value) numericWeightString(dst []byte, num string) []byte { + const MaxPadLength = 30 + + var ( + exponent string + exp int64 + significant string + negative bool + original = len(dst) + ) + + if num[0] == '-' { + negative = true + num = num[1:] + } + + if i := strings.IndexByte(num, 'e'); i >= 0 { + exponent = num[i+1:] + num = num[:i] + } + + significant = num + for len(significant) > 0 { + if significant[0] >= '1' && significant[0] <= '9' { + break + } + significant = significant[1:] + } + if len(significant) == 0 { + return append(dst, JSON_KEY_NUMBER_ZERO) + } + + if len(exponent) > 0 { + exp, _ = fastparse.ParseInt64(exponent, 10) + } else { + dec := strings.IndexByte(num, '.') + ofs := len(num) - len(significant) + if dec < 0 { + exp = int64(len(significant) - 1) + } else if ofs < dec { + exp = int64(dec - ofs - 1) + } else { + exp = int64(dec - ofs) + } + } + + if negative { + dst = append(dst, JSON_KEY_NUMBER_NEG) + dst = binary.BigEndian.AppendUint16(dst, uint16(-exp)^(1<<15)) + + for _, ch := range []byte(significant) { + if ch >= '0' && ch <= '9' { + dst = append(dst, '9'-ch+'0') + } + } + for len(dst)-original < MaxPadLength { + dst = append(dst, '9') + } + } else { + dst = append(dst, JSON_KEY_NUMBER_POS) + dst = binary.BigEndian.AppendUint16(dst, uint16(exp)^(1<<15)) + + for _, ch := range []byte(significant) { + if ch >= '0' && ch <= '9' { + dst = append(dst, ch) + } + } + for len(dst)-original < MaxPadLength { + dst = append(dst, '0') + } + } + + return dst +} + +func (v *Value) WeightString(dst []byte) []byte { + switch v.Type() { + case TypeNull: + dst = append(dst, JSON_KEY_NULL) + case TypeNumber: + if v.NumberType() == NumberTypeFloat { + f := v.marshalFloat(nil) + dst = v.numericWeightString(dst, hack.String(f)) + } else { + dst = v.numericWeightString(dst, v.s) + } + case TypeString: + dst = append(dst, JSON_KEY_STRING) + dst = append(dst, v.s...) + case TypeObject: + // MySQL compat: we follow the same behavior as MySQL does for weight strings in JSON, + // where Objects and Arrays are only sorted by their length and not by the values + // of their contents. + // Note that in MySQL, generating the weight string of a JSON Object or Array will actually + // print a warning in the logs! We're not printing anything. + dst = append(dst, JSON_KEY_OBJECT) + dst = binary.BigEndian.AppendUint32(dst, uint32(v.o.Len())) + case TypeArray: + dst = append(dst, JSON_KEY_ARRAY) + dst = binary.BigEndian.AppendUint32(dst, uint32(len(v.a))) + case TypeBoolean: + switch v { + case ValueTrue: + dst = append(dst, JSON_KEY_TRUE) + case ValueFalse: + dst = append(dst, JSON_KEY_FALSE) + default: + panic("invalid JSON Boolean") + } + case TypeDate: + dst = append(dst, JSON_KEY_DATE) + dst = append(dst, v.MarshalDate()...) + case TypeDateTime: + dst = append(dst, JSON_KEY_DATETIME) + dst = append(dst, v.MarshalDateTime()...) + case TypeTime: + dst = append(dst, JSON_KEY_TIME) + dst = append(dst, v.MarshalTime()...) + case TypeOpaque, TypeBit, TypeBlob: + dst = append(dst, JSON_KEY_OPAQUE) + dst = append(dst, v.s...) + } + return dst +} diff --git a/go/mysql/json/weights_test.go b/go/mysql/json/weights_test.go new file mode 100644 index 00000000000..9bbcd548e50 --- /dev/null +++ b/go/mysql/json/weights_test.go @@ -0,0 +1,44 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package json + +import ( + "bytes" + "testing" + + "vitess.io/vitess/go/mysql/format" +) + +func TestWeightStrings(t *testing.T) { + var cases = []struct { + l, r *Value + }{ + {NewNumber("-2.3742940301417033", NumberTypeFloat), NewNumber("-0.024384053736998118", NumberTypeFloat)}, + {NewNumber("2.3742940301417033", NumberTypeFloat), NewNumber("20.3742940301417033", NumberTypeFloat)}, + {NewNumber(string(format.FormatFloat(1000000000000000.0)), NumberTypeFloat), NewNumber("100000000000000000", NumberTypeDecimal)}, + } + + for _, tc := range cases { + l := tc.l.WeightString(nil) + r := tc.r.WeightString(nil) + + if bytes.Compare(l, r) >= 0 { + t.Errorf("expected %s < %s\nl = %v\n = %v\nr = %v\n = %v", + tc.l.String(), tc.r.String(), l, string(l), r, string(r)) + } + } +} diff --git a/go/mysql/query.go b/go/mysql/query.go index 5085bc687a9..0213878b49d 100644 --- a/go/mysql/query.go +++ b/go/mysql/query.go @@ -23,6 +23,7 @@ import ( "strings" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" @@ -48,7 +49,7 @@ func (c *Conn) WriteComQuery(query string) error { pos++ copy(data[pos:], query) if err := c.writeEphemeralPacket(); err != nil { - return NewSQLError(CRServerGone, SSUnknownSQLState, err.Error()) + return sqlerror.NewSQLError(sqlerror.CRServerGone, sqlerror.SSUnknownSQLState, err.Error()) } return nil } @@ -62,7 +63,7 @@ func (c *Conn) writeComInitDB(db string) error { pos++ copy(data[pos:], db) if err := c.writeEphemeralPacket(); err != nil { - return NewSQLError(CRServerGone, SSUnknownSQLState, err.Error()) + return sqlerror.NewSQLError(sqlerror.CRServerGone, sqlerror.SSUnknownSQLState, err.Error()) } return nil } @@ -75,7 +76,7 @@ func (c *Conn) writeComSetOption(operation uint16) error { pos++ writeUint16(data, pos, operation) if err := c.writeEphemeralPacket(); err != nil { - return NewSQLError(CRServerGone, SSUnknownSQLState, err.Error()) + return sqlerror.NewSQLError(sqlerror.CRServerGone, sqlerror.SSUnknownSQLState, err.Error()) } return nil } @@ -85,36 +86,36 @@ func (c *Conn) writeComSetOption(operation uint16) error { func (c *Conn) readColumnDefinition(field *querypb.Field, index int) error { colDef, err := c.readEphemeralPacket() if err != nil { - return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err) } defer c.recycleReadPacket() // Catalog is ignored, always set to "def" pos, ok := skipLenEncString(colDef, 0) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "skipping col %v catalog failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "skipping col %v catalog failed", index) } // schema, table, orgTable, name and OrgName are strings. field.Database, pos, ok = readLenEncString(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "extracting col %v schema failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "extracting col %v schema failed", index) } field.Table, pos, ok = readLenEncString(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "extracting col %v table failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "extracting col %v table failed", index) } field.OrgTable, pos, ok = readLenEncString(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "extracting col %v org_table failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "extracting col %v org_table failed", index) } field.Name, pos, ok = readLenEncString(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "extracting col %v name failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "extracting col %v name failed", index) } field.OrgName, pos, ok = readLenEncString(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "extracting col %v org_name failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "extracting col %v org_name failed", index) } // Skip length of fixed-length fields. @@ -123,37 +124,37 @@ func (c *Conn) readColumnDefinition(field *querypb.Field, index int) error { // characterSet is a uint16. characterSet, pos, ok := readUint16(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "extracting col %v characterSet failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "extracting col %v characterSet failed", index) } field.Charset = uint32(characterSet) // columnLength is a uint32. field.ColumnLength, pos, ok = readUint32(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "extracting col %v columnLength failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "extracting col %v columnLength failed", index) } // type is one byte. t, pos, ok := readByte(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "extracting col %v type failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "extracting col %v type failed", index) } // flags is 2 bytes. flags, pos, ok := readUint16(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "extracting col %v flags failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "extracting col %v flags failed", index) } // Convert MySQL type to Vitess type. field.Type, err = sqltypes.MySQLToType(int64(t), int64(flags)) if err != nil { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "MySQLToType(%v,%v) failed for column %v: %v", t, flags, index, err) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "MySQLToType(%v,%v) failed for column %v: %v", t, flags, index, err) } // Decimals is a byte. decimals, _, ok := readByte(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "extracting col %v decimals failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "extracting col %v decimals failed", index) } field.Decimals = uint32(decimals) @@ -183,7 +184,7 @@ func (c *Conn) readColumnDefinition(field *querypb.Field, index int) error { func (c *Conn) readColumnDefinitionType(field *querypb.Field, index int) error { colDef, err := c.readEphemeralPacket() if err != nil { - return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err) } defer c.recycleReadPacket() @@ -191,27 +192,27 @@ func (c *Conn) readColumnDefinitionType(field *querypb.Field, index int) error { // strings, all skipped. pos, ok := skipLenEncString(colDef, 0) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "skipping col %v catalog failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "skipping col %v catalog failed", index) } pos, ok = skipLenEncString(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "skipping col %v schema failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "skipping col %v schema failed", index) } pos, ok = skipLenEncString(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "skipping col %v table failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "skipping col %v table failed", index) } pos, ok = skipLenEncString(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "skipping col %v org_table failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "skipping col %v org_table failed", index) } pos, ok = skipLenEncString(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "skipping col %v name failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "skipping col %v name failed", index) } pos, ok = skipLenEncString(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "skipping col %v org_name failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "skipping col %v org_name failed", index) } // Skip length of fixed-length fields. @@ -220,31 +221,31 @@ func (c *Conn) readColumnDefinitionType(field *querypb.Field, index int) error { // characterSet is a uint16. _, pos, ok = readUint16(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "extracting col %v characterSet failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "extracting col %v characterSet failed", index) } // columnLength is a uint32. _, pos, ok = readUint32(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "extracting col %v columnLength failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "extracting col %v columnLength failed", index) } // type is one byte t, pos, ok := readByte(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "extracting col %v type failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "extracting col %v type failed", index) } // flags is 2 bytes flags, _, ok := readUint16(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "extracting col %v flags failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "extracting col %v flags failed", index) } // Convert MySQL type to Vitess type. field.Type, err = sqltypes.MySQLToType(int64(t), int64(flags)) if err != nil { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "MySQLToType(%v,%v) failed for column %v: %v", t, flags, index, err) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "MySQLToType(%v,%v) failed for column %v: %v", t, flags, index, err) } // skip decimals @@ -270,7 +271,7 @@ func (c *Conn) parseRow(data []byte, fields []*querypb.Field, reader func([]byte var ok bool s, pos, ok = reader(data, pos) if !ok { - return nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "decoding string failed") + return nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "decoding string failed") } result = append(result, sqltypes.MakeTrusted(fields[i].Type, s)) } @@ -311,7 +312,7 @@ func (c *Conn) ExecuteFetch(query string, maxrows int, wantfields bool) (result func (c *Conn) ExecuteFetchMulti(query string, maxrows int, wantfields bool) (result *sqltypes.Result, more bool, err error) { defer func() { if err != nil { - if sqlerr, ok := err.(*SQLError); ok { + if sqlerr, ok := err.(*sqlerror.SQLError); ok { sqlerr.Query = query } } @@ -335,7 +336,7 @@ func (c *Conn) ExecuteFetchMulti(query string, maxrows int, wantfields bool) (re func (c *Conn) ExecuteFetchWithWarningCount(query string, maxrows int, wantfields bool) (result *sqltypes.Result, warnings uint16, err error) { defer func() { if err != nil { - if sqlerr, ok := err.(*SQLError); ok { + if sqlerr, ok := err.(*sqlerror.SQLError); ok { sqlerr.Query = query } } @@ -395,7 +396,7 @@ func (c *Conn) ReadQueryResult(maxrows int, wantfields bool) (*sqltypes.Result, // EOF is only present here if it's not deprecated. data, err := c.readEphemeralPacket() if err != nil { - return nil, false, 0, NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) + return nil, false, 0, sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err) } if c.isEOFPacket(data) { @@ -417,7 +418,7 @@ func (c *Conn) ReadQueryResult(maxrows int, wantfields bool) (*sqltypes.Result, for { data, err := c.readEphemeralPacket() if err != nil { - return nil, false, 0, NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) + return nil, false, 0, sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err) } if c.isEOFPacket(data) { @@ -482,7 +483,7 @@ func (c *Conn) drainResults() error { for { data, err := c.readEphemeralPacket() if err != nil { - return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err) } if c.isEOFPacket(data) { c.recycleReadPacket() @@ -498,11 +499,11 @@ func (c *Conn) drainResults() error { func (c *Conn) readComQueryResponse() (int, *PacketOK, error) { data, err := c.readEphemeralPacket() if err != nil { - return 0, nil, NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) + return 0, nil, sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err) } defer c.recycleReadPacket() if len(data) == 0 { - return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "invalid empty COM_QUERY response packet") + return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "invalid empty COM_QUERY response packet") } switch data[0] { @@ -519,10 +520,10 @@ func (c *Conn) readComQueryResponse() (int, *PacketOK, error) { } n, pos, ok := readLenEncInt(data, 0) if !ok { - return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "cannot get column number") + return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "cannot get column number") } if pos != len(data) { - return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "extra data in COM_QUERY response") + return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "extra data in COM_QUERY response") } return int(n), &PacketOK{}, nil } @@ -552,32 +553,32 @@ func (c *Conn) parseComStmtExecute(prepareData map[uint32]*PrepareData, data []b // statement ID stmtID, pos, ok := readUint32(payload, 0) if !ok { - return 0, 0, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "reading statement ID failed") + return 0, 0, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "reading statement ID failed") } prepare, ok := prepareData[stmtID] if !ok { - return 0, 0, NewSQLError(CRCommandsOutOfSync, SSUnknownSQLState, "statement ID is not found from record") + return 0, 0, sqlerror.NewSQLError(sqlerror.CRCommandsOutOfSync, sqlerror.SSUnknownSQLState, "statement ID is not found from record") } // cursor type flags cursorType, pos, ok := readByte(payload, pos) if !ok { - return stmtID, 0, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "reading cursor type flags failed") + return stmtID, 0, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "reading cursor type flags failed") } // iteration count iterCount, pos, ok := readUint32(payload, pos) if !ok { - return stmtID, 0, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "reading iteration count failed") + return stmtID, 0, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "reading iteration count failed") } if iterCount != uint32(1) { - return stmtID, 0, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "iteration count is not equal to 1") + return stmtID, 0, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "iteration count is not equal to 1") } if prepare.ParamsCount > 0 { bitMap, pos, ok = readBytes(payload, pos, (int(prepare.ParamsCount)+7)/8) if !ok { - return stmtID, 0, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "reading NULL-bitmap failed") + return stmtID, 0, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "reading NULL-bitmap failed") } } @@ -587,18 +588,18 @@ func (c *Conn) parseComStmtExecute(prepareData map[uint32]*PrepareData, data []b for i := uint16(0); i < prepare.ParamsCount; i++ { mysqlType, pos, ok = readByte(payload, pos) if !ok { - return stmtID, 0, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "reading parameter type failed") + return stmtID, 0, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "reading parameter type failed") } flags, pos, ok = readByte(payload, pos) if !ok { - return stmtID, 0, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "reading parameter flags failed") + return stmtID, 0, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "reading parameter flags failed") } // convert MySQL type to internal type. valType, err := sqltypes.MySQLToType(int64(mysqlType), int64(flags)) if err != nil { - return stmtID, 0, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "MySQLToType(%v,%v) failed: %v", mysqlType, flags, err) + return stmtID, 0, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "MySQLToType(%v,%v) failed: %v", mysqlType, flags, err) } prepare.ParamsType[i] = int32(valType) @@ -620,7 +621,7 @@ func (c *Conn) parseComStmtExecute(prepareData map[uint32]*PrepareData, data []b val, pos, ok = c.parseStmtArgs(payload, querypb.Type(prepare.ParamsType[i]), pos) } if !ok { - return stmtID, 0, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "decoding parameter value failed: %v", prepare.ParamsType[i]) + return stmtID, 0, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "decoding parameter value failed: %v", prepare.ParamsType[i]) } prepare.BindVars[parameterID] = sqltypes.ValueBindVariable(val) diff --git a/go/mysql/query_test.go b/go/mysql/query_test.go index bf902b5165f..07012f83b9f 100644 --- a/go/mysql/query_test.go +++ b/go/mysql/query_test.go @@ -24,6 +24,8 @@ import ( "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/mysql/collations" "github.com/stretchr/testify/assert" @@ -378,7 +380,7 @@ func TestSQLErrorOnServerClose(t *testing.T) { // We should be getting a Connection lost error. _, _, _, err = cConn.ReadQueryResult(100, true) require.Error(t, err) - require.True(t, IsConnLostDuringQuery(err), err.Error()) + require.True(t, sqlerror.IsConnLostDuringQuery(err), err.Error()) } func TestQueries(t *testing.T) { diff --git a/go/mysql/replication.go b/go/mysql/replication.go index 33f24860266..399698d6a2a 100644 --- a/go/mysql/replication.go +++ b/go/mysql/replication.go @@ -17,6 +17,7 @@ limitations under the License. package mysql import ( + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" ) @@ -45,7 +46,7 @@ func (c *Conn) WriteComBinlogDump(serverID uint32, binlogFilename string, binlog pos = writeUint32(data, pos, serverID) _ = writeEOFString(data, pos, binlogFilename) if err := c.writeEphemeralPacket(); err != nil { - return NewSQLError(CRServerGone, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerGone, sqlerror.SSUnknownSQLState, "%v", err) } return nil } @@ -92,7 +93,7 @@ func (c *Conn) WriteComBinlogDumpGTID(serverID uint32, binlogFilename string, bi pos = writeUint32(data, pos, uint32(len(gtidSet))) //nolint pos += copy(data[pos:], gtidSet) //nolint if err := c.writeEphemeralPacket(); err != nil { - return NewSQLError(CRServerGone, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerGone, sqlerror.SSUnknownSQLState, "%v", err) } return nil } @@ -110,7 +111,7 @@ func (c *Conn) SendSemiSyncAck(binlogFilename string, binlogPos uint64) error { pos = writeUint64(data, pos, binlogPos) _ = writeEOFString(data, pos, binlogFilename) if err := c.writeEphemeralPacket(); err != nil { - return NewSQLError(CRServerGone, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerGone, sqlerror.SSUnknownSQLState, "%v", err) } return nil @@ -132,7 +133,7 @@ func (c *Conn) WriteBinlogEvent(ev BinlogEvent, semiSyncEnabled bool) error { } _ = writeEOFString(data, pos, string(ev.Bytes())) if err := c.writeEphemeralPacket(); err != nil { - return NewSQLError(CRServerGone, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerGone, sqlerror.SSUnknownSQLState, "%v", err) } return nil } diff --git a/go/mysql/filepos_gtid.go b/go/mysql/replication/filepos_gtid.go similarity index 68% rename from go/mysql/filepos_gtid.go rename to go/mysql/replication/filepos_gtid.go index e5bfd055bee..850fb421915 100644 --- a/go/mysql/filepos_gtid.go +++ b/go/mysql/replication/filepos_gtid.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mysql +package replication import ( "fmt" @@ -38,9 +38,9 @@ func parseFilePosGTID(s string) (GTID, error) { return nil, fmt.Errorf("invalid FilePos GTID (%v): expecting pos to be an integer", s) } - return filePosGTID{ - file: parts[0], - pos: uint32(pos), + return FilePosGTID{ + File: parts[0], + Pos: uint32(pos), }, nil } @@ -50,69 +50,69 @@ func ParseFilePosGTIDSet(s string) (GTIDSet, error) { if err != nil { return nil, err } - return gtid.(filePosGTID), err + return gtid.(FilePosGTID), err } -// filePosGTID implements GTID. -type filePosGTID struct { - file string - pos uint32 +// FilePosGTID implements GTID. +type FilePosGTID struct { + File string + Pos uint32 } // String implements GTID.String(). -func (gtid filePosGTID) String() string { - return fmt.Sprintf("%s:%d", gtid.file, gtid.pos) +func (gtid FilePosGTID) String() string { + return fmt.Sprintf("%s:%d", gtid.File, gtid.Pos) } // Flavor implements GTID.Flavor(). -func (gtid filePosGTID) Flavor() string { +func (gtid FilePosGTID) Flavor() string { return FilePosFlavorID } // SequenceDomain implements GTID.SequenceDomain(). -func (gtid filePosGTID) SequenceDomain() any { +func (gtid FilePosGTID) SequenceDomain() any { return nil } // SourceServer implements GTID.SourceServer(). -func (gtid filePosGTID) SourceServer() any { +func (gtid FilePosGTID) SourceServer() any { return nil } // SequenceNumber implements GTID.SequenceNumber(). -func (gtid filePosGTID) SequenceNumber() any { +func (gtid FilePosGTID) SequenceNumber() any { return nil } // GTIDSet implements GTID.GTIDSet(). -func (gtid filePosGTID) GTIDSet() GTIDSet { +func (gtid FilePosGTID) GTIDSet() GTIDSet { return gtid } // ContainsGTID implements GTIDSet.ContainsGTID(). -func (gtid filePosGTID) ContainsGTID(other GTID) bool { +func (gtid FilePosGTID) ContainsGTID(other GTID) bool { if other == nil { return true } - filePosOther, ok := other.(filePosGTID) + filePosOther, ok := other.(FilePosGTID) if !ok { return false } - if filePosOther.file < gtid.file { + if filePosOther.File < gtid.File { return true } - if filePosOther.file > gtid.file { + if filePosOther.File > gtid.File { return false } - return filePosOther.pos <= gtid.pos + return filePosOther.Pos <= gtid.Pos } // Contains implements GTIDSet.Contains(). -func (gtid filePosGTID) Contains(other GTIDSet) bool { +func (gtid FilePosGTID) Contains(other GTIDSet) bool { if other == nil { return false } - filePosOther, ok := other.(filePosGTID) + filePosOther, ok := other.(FilePosGTID) if !ok { return false } @@ -120,8 +120,8 @@ func (gtid filePosGTID) Contains(other GTIDSet) bool { } // Equal implements GTIDSet.Equal(). -func (gtid filePosGTID) Equal(other GTIDSet) bool { - filePosOther, ok := other.(filePosGTID) +func (gtid FilePosGTID) Equal(other GTIDSet) bool { + filePosOther, ok := other.(FilePosGTID) if !ok { return false } @@ -129,8 +129,8 @@ func (gtid filePosGTID) Equal(other GTIDSet) bool { } // AddGTID implements GTIDSet.AddGTID(). -func (gtid filePosGTID) AddGTID(other GTID) GTIDSet { - filePosOther, ok := other.(filePosGTID) +func (gtid FilePosGTID) AddGTID(other GTID) GTIDSet { + filePosOther, ok := other.(FilePosGTID) if !ok { return gtid } @@ -138,8 +138,8 @@ func (gtid filePosGTID) AddGTID(other GTID) GTIDSet { } // Union implements GTIDSet.Union(). -func (gtid filePosGTID) Union(other GTIDSet) GTIDSet { - filePosOther, ok := other.(filePosGTID) +func (gtid FilePosGTID) Union(other GTIDSet) GTIDSet { + filePosOther, ok := other.(FilePosGTID) if !ok || gtid.Contains(other) { return gtid } @@ -150,12 +150,11 @@ func (gtid filePosGTID) Union(other GTIDSet) GTIDSet { // Last returns last filePosition // For filePos based GTID we have only one position // here we will just return the current filePos -func (gtid filePosGTID) Last() string { +func (gtid FilePosGTID) Last() string { return gtid.String() } func init() { gtidParsers[FilePosFlavorID] = parseFilePosGTID gtidSetParsers[FilePosFlavorID] = ParseFilePosGTIDSet - flavors[FilePosFlavorID] = newFilePosFlavor } diff --git a/go/mysql/filepos_gtid_test.go b/go/mysql/replication/filepos_gtid_test.go similarity index 77% rename from go/mysql/filepos_gtid_test.go rename to go/mysql/replication/filepos_gtid_test.go index ec7f9d33142..174aed6ccf9 100644 --- a/go/mysql/filepos_gtid_test.go +++ b/go/mysql/replication/filepos_gtid_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mysql +package replication import ( "testing" @@ -38,12 +38,12 @@ func Test_filePosGTID_String(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - gtid := filePosGTID{ - file: tt.fields.file, - pos: tt.fields.pos, + gtid := FilePosGTID{ + File: tt.fields.file, + Pos: tt.fields.pos, } if got := gtid.String(); got != tt.want { - t.Errorf("filePosGTID.String() = %v, want %v", got, tt.want) + t.Errorf("FilePosGTID.String() = %v, want %v", got, tt.want) } }) } @@ -66,36 +66,36 @@ func Test_filePosGTID_ContainsGTID(t *testing.T) { { "returns true when the position is equal", fields{file: "testfile", pos: 1234}, - args{other: filePosGTID{file: "testfile", pos: 1234}}, + args{other: FilePosGTID{File: "testfile", Pos: 1234}}, true, }, { "returns true when the position is less than equal", fields{file: "testfile", pos: 1234}, - args{other: filePosGTID{file: "testfile", pos: 1233}}, + args{other: FilePosGTID{File: "testfile", Pos: 1233}}, true, }, { "returns false when the position is less than equal", fields{file: "testfile", pos: 1234}, - args{other: filePosGTID{file: "testfile", pos: 1235}}, + args{other: FilePosGTID{File: "testfile", Pos: 1235}}, false, }, { "it uses integer value for comparison (it is not lexicographical order)", fields{file: "testfile", pos: 99761227}, - args{other: filePosGTID{file: "testfile", pos: 103939867}}, + args{other: FilePosGTID{File: "testfile", Pos: 103939867}}, false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - gtid := filePosGTID{ - file: tt.fields.file, - pos: tt.fields.pos, + gtid := FilePosGTID{ + File: tt.fields.file, + Pos: tt.fields.pos, } if got := gtid.ContainsGTID(tt.args.other); got != tt.want { - t.Errorf("filePosGTID.ContainsGTID() = %v, want %v", got, tt.want) + t.Errorf("FilePosGTID.ContainsGTID() = %v, want %v", got, tt.want) } }) } diff --git a/go/mysql/gtid.go b/go/mysql/replication/gtid.go similarity index 99% rename from go/mysql/gtid.go rename to go/mysql/replication/gtid.go index d5f6a44df74..14e781a714f 100644 --- a/go/mysql/gtid.go +++ b/go/mysql/replication/gtid.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mysql +package replication import ( "fmt" diff --git a/go/mysql/gtid_set.go b/go/mysql/replication/gtid_set.go similarity index 99% rename from go/mysql/gtid_set.go rename to go/mysql/replication/gtid_set.go index 812b7f33caf..1e4ca29b42e 100644 --- a/go/mysql/gtid_set.go +++ b/go/mysql/replication/gtid_set.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mysql +package replication // GTIDSet represents the set of transactions received or applied by a server. // In some flavors, a single GTID is enough to specify the set of all diff --git a/go/mysql/gtid_test.go b/go/mysql/replication/gtid_test.go similarity index 99% rename from go/mysql/gtid_test.go rename to go/mysql/replication/gtid_test.go index 8dfea641727..8713f94b115 100644 --- a/go/mysql/gtid_test.go +++ b/go/mysql/replication/gtid_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mysql +package replication import ( "strings" diff --git a/go/mysql/mariadb_gtid.go b/go/mysql/replication/mariadb_gtid.go similarity index 97% rename from go/mysql/mariadb_gtid.go rename to go/mysql/replication/mariadb_gtid.go index 713ef2c72b4..ff63964bbf1 100644 --- a/go/mysql/mariadb_gtid.go +++ b/go/mysql/replication/mariadb_gtid.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mysql +package replication import ( "fmt" @@ -62,8 +62,8 @@ func parseMariadbGTID(s string) (GTID, error) { }, nil } -// parseMariadbGTIDSet is registered as a GTIDSet parser. -func parseMariadbGTIDSet(s string) (GTIDSet, error) { +// ParseMariadbGTIDSet is registered as a GTIDSet parser. +func ParseMariadbGTIDSet(s string) (GTIDSet, error) { gtidStrings := strings.Split(s, ",") gtidSet := make(MariadbGTIDSet, len(gtidStrings)) for _, gtidString := range gtidStrings { @@ -272,5 +272,5 @@ func (gtidSet MariadbGTIDSet) addGTID(otherGTID MariadbGTID) { func init() { gtidParsers[MariadbFlavorID] = parseMariadbGTID - gtidSetParsers[MariadbFlavorID] = parseMariadbGTIDSet + gtidSetParsers[MariadbFlavorID] = ParseMariadbGTIDSet } diff --git a/go/mysql/mariadb_gtid_test.go b/go/mysql/replication/mariadb_gtid_test.go similarity index 98% rename from go/mysql/mariadb_gtid_test.go rename to go/mysql/replication/mariadb_gtid_test.go index 49472ab8d33..3fe02b31822 100644 --- a/go/mysql/mariadb_gtid_test.go +++ b/go/mysql/replication/mariadb_gtid_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mysql +package replication import ( "strings" @@ -81,9 +81,9 @@ func TestParseMariaGTIDSet(t *testing.T) { 11: MariadbGTID{Domain: 11, Server: 22, Sequence: 3333}, } - got, err := parseMariadbGTIDSet(input) + got, err := ParseMariadbGTIDSet(input) assert.NoError(t, err, "%v", err) - assert.True(t, got.Equal(want), "parseMariadbGTIDSet(%#v) = %#v, want %#v", input, got, want) + assert.True(t, got.Equal(want), "ParseMariadbGTIDSet(%#v) = %#v, want %#v", input, got, want) } @@ -91,13 +91,13 @@ func TestParseInvalidMariaGTIDSet(t *testing.T) { input := "12-34-5678,11-22-33e33" want := "invalid MariaDB GTID Sequence number" - _, err := parseMariadbGTIDSet(input) + _, err := ParseMariadbGTIDSet(input) if err == nil { t.Errorf("expected error for invalid input (%#v)", input) return } if got := err.Error(); !strings.HasPrefix(got, want) { - t.Errorf("parseMariadbGTIDSet(%#v) error = %#v, want %#v", input, got, want) + t.Errorf("ParseMariadbGTIDSet(%#v) error = %#v, want %#v", input, got, want) } } @@ -621,7 +621,7 @@ func TestMariaGTIDSetLast(t *testing.T) { "12-34-5678": "12-34-5678", } for input, want := range testCases { - got, err := parseMariadbGTIDSet(input) + got, err := ParseMariadbGTIDSet(input) require.NoError(t, err) assert.Equal(t, want, got.Last()) } diff --git a/go/mysql/mysql56_gtid.go b/go/mysql/replication/mysql56_gtid.go similarity index 99% rename from go/mysql/mysql56_gtid.go rename to go/mysql/replication/mysql56_gtid.go index 0aae3d54336..4ec861b84e5 100644 --- a/go/mysql/mysql56_gtid.go +++ b/go/mysql/replication/mysql56_gtid.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mysql +package replication import ( "encoding/hex" diff --git a/go/mysql/mysql56_gtid_set.go b/go/mysql/replication/mysql56_gtid_set.go similarity index 97% rename from go/mysql/mysql56_gtid_set.go rename to go/mysql/replication/mysql56_gtid_set.go index 63e778f3527..1d46176b19a 100644 --- a/go/mysql/mysql56_gtid_set.go +++ b/go/mysql/replication/mysql56_gtid_set.go @@ -14,16 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mysql +package replication import ( "bytes" "encoding/binary" + "slices" "strconv" "strings" - "golang.org/x/exp/slices" - "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" ) @@ -126,8 +125,18 @@ func ParseMysql56GTIDSet(s string) (Mysql56GTIDSet, error) { intervals = append(sidIntervals, intervals...) } // Internally we expect intervals to be stored in order. - slices.SortFunc(intervals, func(a, b interval) bool { - return a.start < b.start + slices.SortFunc(intervals, func(a, b interval) int { + // TODO: switch to cmp.Compare for Go 1.21+. + // + // https://pkg.go.dev/cmp@master#Compare. + switch { + case a.start < b.start: + return -1 + case a.start > b.start: + return 1 + default: + return 0 + } }) set[sid] = intervals } @@ -149,8 +158,8 @@ func (set Mysql56GTIDSet) SIDs() []SID { } func sortSIDs(sids []SID) { - slices.SortFunc(sids, func(a, b SID) bool { - return bytes.Compare(a[:], b[:]) < 0 + slices.SortFunc(sids, func(a, b SID) int { + return bytes.Compare(a[:], b[:]) }) } diff --git a/go/mysql/mysql56_gtid_set_test.go b/go/mysql/replication/mysql56_gtid_set_test.go similarity index 99% rename from go/mysql/mysql56_gtid_set_test.go rename to go/mysql/replication/mysql56_gtid_set_test.go index 03082bc736e..323baae3885 100644 --- a/go/mysql/mysql56_gtid_set_test.go +++ b/go/mysql/replication/mysql56_gtid_set_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mysql +package replication import ( "fmt" diff --git a/go/mysql/mysql56_gtid_test.go b/go/mysql/replication/mysql56_gtid_test.go similarity index 90% rename from go/mysql/mysql56_gtid_test.go rename to go/mysql/replication/mysql56_gtid_test.go index 335835d8199..7a4bc9862a8 100644 --- a/go/mysql/mysql56_gtid_test.go +++ b/go/mysql/replication/mysql56_gtid_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mysql +package replication import ( "strings" @@ -141,3 +141,15 @@ func TestMysql56GTIDGTIDSet(t *testing.T) { t.Errorf("%#v.GTIDSet() = %#v, want %#v", input, got, want) } } + +func TestMysql56ParseGTID(t *testing.T) { + input := "00010203-0405-0607-0809-0A0B0C0D0E0F:56789" + want := Mysql56GTID{ + Server: SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, + Sequence: 56789, + } + + got, err := parseMysql56GTID(input) + require.NoError(t, err, "unexpected error: %v", err) + assert.Equal(t, want, got, "(&mysql56{}).ParseGTID(%#v) = %#v, want %#v", input, got, want) +} diff --git a/go/mysql/primary_status.go b/go/mysql/replication/primary_status.go similarity index 53% rename from go/mysql/primary_status.go rename to go/mysql/replication/primary_status.go index e8524862917..679b152f9d4 100644 --- a/go/mysql/primary_status.go +++ b/go/mysql/replication/primary_status.go @@ -14,10 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mysql +package replication import ( + "fmt" + + "vitess.io/vitess/go/vt/log" replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata" + "vitess.io/vitess/go/vt/vterrors" ) // PrimaryStatus holds replication information from SHOW MASTER STATUS. @@ -35,3 +39,32 @@ func PrimaryStatusToProto(s PrimaryStatus) *replicationdatapb.PrimaryStatus { FilePosition: EncodePosition(s.FilePosition), } } + +func ParseMysqlPrimaryStatus(resultMap map[string]string) (PrimaryStatus, error) { + status := ParsePrimaryStatus(resultMap) + + var err error + status.Position.GTIDSet, err = ParseMysql56GTIDSet(resultMap["Executed_Gtid_Set"]) + if err != nil { + return PrimaryStatus{}, vterrors.Wrapf(err, "PrimaryStatus can't parse MySQL 5.6 GTID (Executed_Gtid_Set: %#v)", resultMap["Executed_Gtid_Set"]) + } + + return status, nil +} + +// ParsePrimaryStatus parses the common fields of SHOW MASTER STATUS. +func ParsePrimaryStatus(fields map[string]string) PrimaryStatus { + status := PrimaryStatus{} + + fileExecPosStr := fields["Position"] + file := fields["File"] + if file != "" && fileExecPosStr != "" { + var err error + status.FilePosition.GTIDSet, err = ParseFilePosGTIDSet(fmt.Sprintf("%s:%s", file, fileExecPosStr)) + if err != nil { + log.Warningf("Error parsing GTID set %s:%s: %v", file, fileExecPosStr, err) + } + } + + return status +} diff --git a/go/mysql/replication_position.go b/go/mysql/replication/replication_position.go similarity index 88% rename from go/mysql/replication_position.go rename to go/mysql/replication/replication_position.go index d32d99e8011..240321f2c6f 100644 --- a/go/mysql/replication_position.go +++ b/go/mysql/replication/replication_position.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mysql +package replication import ( "encoding/json" @@ -145,6 +145,25 @@ func DecodePosition(s string) (rp Position, err error) { return ParsePosition(flav, gtid) } +// DecodePositionDefaultFlavor converts a string in the format returned by +// EncodePosition back into a Position value with the +// correct underlying flavor. If the string does not indicate a flavor, then the 'flavor' argument +// is used. For example: +// - DecodePositionDefaultFlavor("MySQL56/16b1039f-22b6-11ed-b765-0a43f95f28a3:1-615", "foo"): "MySQL56" explicitly indicated, this is the flavor. +// - DecodePositionDefaultFlavor("16b1039f-22b6-11ed-b765-0a43f95f28a3:1-615", "MySQL56"): No flavor indicated in `s`, therefore using "MySQL56" +func DecodePositionDefaultFlavor(s string, flavor string) (rp Position, err error) { + if s == "" { + return rp, nil + } + + flav, gtid, ok := strings.Cut(s, "/") + if !ok { + gtid = s + flav = flavor + } + return ParsePosition(flav, gtid) +} + // ParsePosition calls the parser for the specified flavor. func ParsePosition(flavor, value string) (rp Position, err error) { parser := gtidSetParsers[flavor] @@ -190,7 +209,7 @@ func (rp *Position) MatchesFlavor(flavor string) bool { _, matches := rp.GTIDSet.(MariadbGTIDSet) return matches case FilePosFlavorID: - _, matches := rp.GTIDSet.(filePosGTID) + _, matches := rp.GTIDSet.(FilePosGTID) return matches } return false diff --git a/go/mysql/replication_position_test.go b/go/mysql/replication/replication_position_test.go similarity index 94% rename from go/mysql/replication_position_test.go rename to go/mysql/replication/replication_position_test.go index 5bb2e5385d0..125f5929bbe 100644 --- a/go/mysql/replication_position_test.go +++ b/go/mysql/replication/replication_position_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mysql +package replication import ( "encoding/json" @@ -272,6 +272,24 @@ func TestDecodePosition(t *testing.T) { } +func TestDecodePositionDefaultFlavor(t *testing.T) { + gtidSetParsers[Mysql56FlavorID] = func(s string) (GTIDSet, error) { + return ParseMysql56GTIDSet(s) + } + { + pos := "MySQL56/16b1039f-22b6-11ed-b765-0a43f95f28a3:1-615" + rp, err := DecodePositionDefaultFlavor(pos, "foo") + assert.NoError(t, err) + assert.Equal(t, "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-615", rp.GTIDSet.String()) + } + { + pos := "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-615" + rp, err := DecodePositionDefaultFlavor(pos, Mysql56FlavorID) + assert.NoError(t, err) + assert.Equal(t, "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-615", rp.GTIDSet.String()) + } +} + func TestDecodePositionZero(t *testing.T) { input := "" want := Position{} diff --git a/go/mysql/replication_status.go b/go/mysql/replication/replication_status.go similarity index 63% rename from go/mysql/replication_status.go rename to go/mysql/replication/replication_status.go index ff06d559a56..6b3d1bf2214 100644 --- a/go/mysql/replication_status.go +++ b/go/mysql/replication/replication_status.go @@ -14,11 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mysql +package replication import ( "fmt" + "strconv" + "vitess.io/vitess/go/vt/log" replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata" "vitess.io/vitess/go/vt/vterrors" ) @@ -219,3 +221,124 @@ func (s *ReplicationStatus) FindErrantGTIDs(otherReplicaStatuses []*ReplicationS return diffSet, nil } + +func ParseMysqlReplicationStatus(resultMap map[string]string) (ReplicationStatus, error) { + status := ParseReplicationStatus(resultMap) + uuidString := resultMap["Master_UUID"] + if uuidString != "" { + sid, err := ParseSID(uuidString) + if err != nil { + return ReplicationStatus{}, vterrors.Wrapf(err, "cannot decode SourceUUID") + } + status.SourceUUID = sid + } + + var err error + status.Position.GTIDSet, err = ParseMysql56GTIDSet(resultMap["Executed_Gtid_Set"]) + if err != nil { + return ReplicationStatus{}, vterrors.Wrapf(err, "ReplicationStatus can't parse MySQL 5.6 GTID (Executed_Gtid_Set: %#v)", resultMap["Executed_Gtid_Set"]) + } + relayLogGTIDSet, err := ParseMysql56GTIDSet(resultMap["Retrieved_Gtid_Set"]) + if err != nil { + return ReplicationStatus{}, vterrors.Wrapf(err, "ReplicationStatus can't parse MySQL 5.6 GTID (Retrieved_Gtid_Set: %#v)", resultMap["Retrieved_Gtid_Set"]) + } + // We take the union of the executed and retrieved gtidset, because the retrieved gtidset only represents GTIDs since + // the relay log has been reset. To get the full Position, we need to take a union of executed GTIDSets, since these would + // have been in the relay log's GTIDSet in the past, prior to a reset. + status.RelayLogPosition.GTIDSet = status.Position.GTIDSet.Union(relayLogGTIDSet) + + return status, nil +} + +func ParseMariadbReplicationStatus(resultMap map[string]string) (ReplicationStatus, error) { + status := ParseReplicationStatus(resultMap) + + var err error + status.Position.GTIDSet, err = ParseMariadbGTIDSet(resultMap["Gtid_Slave_Pos"]) + if err != nil { + return ReplicationStatus{}, vterrors.Wrapf(err, "ReplicationStatus can't parse MariaDB GTID (Gtid_Slave_Pos: %#v)", resultMap["Gtid_Slave_Pos"]) + } + + return status, nil +} + +func ParseFilePosReplicationStatus(resultMap map[string]string) (ReplicationStatus, error) { + status := ParseReplicationStatus(resultMap) + + status.Position = status.FilePosition + status.RelayLogPosition = status.RelayLogSourceBinlogEquivalentPosition + + return status, nil +} + +func ParseFilePosPrimaryStatus(resultMap map[string]string) (PrimaryStatus, error) { + status := ParsePrimaryStatus(resultMap) + + status.Position = status.FilePosition + + return status, nil +} + +// ParseReplicationStatus parses the common (non-flavor-specific) fields of ReplicationStatus +func ParseReplicationStatus(fields map[string]string) ReplicationStatus { + // The field names in the map are identical to what we receive from the database + // Hence the names still contain Master + status := ReplicationStatus{ + SourceHost: fields["Master_Host"], + SourceUser: fields["Master_User"], + SSLAllowed: fields["Master_SSL_Allowed"] == "Yes", + AutoPosition: fields["Auto_Position"] == "1", + UsingGTID: fields["Using_Gtid"] != "No" && fields["Using_Gtid"] != "", + HasReplicationFilters: (fields["Replicate_Do_DB"] != "") || (fields["Replicate_Ignore_DB"] != "") || (fields["Replicate_Do_Table"] != "") || (fields["Replicate_Ignore_Table"] != "") || (fields["Replicate_Wild_Do_Table"] != "") || (fields["Replicate_Wild_Ignore_Table"] != ""), + // These fields are returned from the underlying DB and cannot be renamed + IOState: ReplicationStatusToState(fields["Slave_IO_Running"]), + LastIOError: fields["Last_IO_Error"], + SQLState: ReplicationStatusToState(fields["Slave_SQL_Running"]), + LastSQLError: fields["Last_SQL_Error"], + } + parseInt, _ := strconv.ParseInt(fields["Master_Port"], 10, 32) + status.SourcePort = int32(parseInt) + parseInt, _ = strconv.ParseInt(fields["Connect_Retry"], 10, 32) + status.ConnectRetry = int32(parseInt) + parseUint, err := strconv.ParseUint(fields["Seconds_Behind_Master"], 10, 32) + if err != nil { + // we could not parse the value into a valid uint32 -- most commonly because the value is NULL from the + // database -- so let's reflect that the underlying value was unknown on our last check + status.ReplicationLagUnknown = true + } else { + status.ReplicationLagUnknown = false + status.ReplicationLagSeconds = uint32(parseUint) + } + parseUint, _ = strconv.ParseUint(fields["Master_Server_Id"], 10, 32) + status.SourceServerID = uint32(parseUint) + parseUint, _ = strconv.ParseUint(fields["SQL_Delay"], 10, 32) + status.SQLDelay = uint32(parseUint) + + executedPosStr := fields["Exec_Master_Log_Pos"] + file := fields["Relay_Master_Log_File"] + if file != "" && executedPosStr != "" { + status.FilePosition.GTIDSet, err = ParseFilePosGTIDSet(fmt.Sprintf("%s:%s", file, executedPosStr)) + if err != nil { + log.Warningf("Error parsing GTID set %s:%s: %v", file, executedPosStr, err) + } + } + + readPosStr := fields["Read_Master_Log_Pos"] + file = fields["Master_Log_File"] + if file != "" && readPosStr != "" { + status.RelayLogSourceBinlogEquivalentPosition.GTIDSet, err = ParseFilePosGTIDSet(fmt.Sprintf("%s:%s", file, readPosStr)) + if err != nil { + log.Warningf("Error parsing GTID set %s:%s: %v", file, readPosStr, err) + } + } + + relayPosStr := fields["Relay_Log_Pos"] + file = fields["Relay_Log_File"] + if file != "" && relayPosStr != "" { + status.RelayLogFilePosition.GTIDSet, err = ParseFilePosGTIDSet(fmt.Sprintf("%s:%s", file, relayPosStr)) + if err != nil { + log.Warningf("Error parsing GTID set %s:%s: %v", file, relayPosStr, err) + } + } + return status +} diff --git a/go/mysql/replication/replication_status_test.go b/go/mysql/replication/replication_status_test.go new file mode 100644 index 00000000000..c1f5991f253 --- /dev/null +++ b/go/mysql/replication/replication_status_test.go @@ -0,0 +1,292 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package replication + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestStatusReplicationRunning(t *testing.T) { + input := &ReplicationStatus{ + IOState: ReplicationStatusToState("yes"), + SQLState: ReplicationStatusToState("yes"), + } + want := true + if got := input.Running(); got != want { + t.Errorf("%#v.Running() = %v, want %v", input, got, want) + } +} + +func TestStatusIOThreadNotRunning(t *testing.T) { + input := &ReplicationStatus{ + IOState: ReplicationStatusToState("no"), + SQLState: ReplicationStatusToState("yes"), + } + want := false + if got := input.Running(); got != want { + t.Errorf("%#v.Running() = %v, want %v", input, got, want) + } +} + +func TestStatusSQLThreadNotRunning(t *testing.T) { + input := &ReplicationStatus{ + IOState: ReplicationStatusToState("yes"), + SQLState: ReplicationStatusToState("no"), + } + want := false + if got := input.Running(); got != want { + t.Errorf("%#v.Running() = %v, want %v", input, got, want) + } +} + +func TestFindErrantGTIDs(t *testing.T) { + sid1 := SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} + sid2 := SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16} + sid3 := SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 17} + sid4 := SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 18} + sourceSID := SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 19} + + set1 := Mysql56GTIDSet{ + sid1: []interval{{20, 30}, {35, 39}, {40, 53}, {55, 75}}, + sid2: []interval{{1, 7}, {20, 50}, {60, 70}}, + sid4: []interval{{1, 30}}, + sourceSID: []interval{{1, 7}, {20, 30}}, + } + + set2 := Mysql56GTIDSet{ + sid1: []interval{{20, 30}, {35, 37}, {50, 60}}, + sid2: []interval{{3, 5}, {22, 25}, {32, 37}, {67, 70}}, + sid3: []interval{{1, 45}}, + sourceSID: []interval{{2, 6}, {15, 40}}, + } + + set3 := Mysql56GTIDSet{ + sid1: []interval{{20, 30}, {35, 38}, {50, 70}}, + sid2: []interval{{3, 5}, {22, 25}, {32, 37}, {67, 70}}, + sid3: []interval{{1, 45}}, + sourceSID: []interval{{2, 6}, {15, 45}}, + } + + testcases := []struct { + mainRepStatus *ReplicationStatus + otherRepStatuses []*ReplicationStatus + want Mysql56GTIDSet + }{{ + mainRepStatus: &ReplicationStatus{SourceUUID: sourceSID, RelayLogPosition: Position{GTIDSet: set1}}, + otherRepStatuses: []*ReplicationStatus{ + {SourceUUID: sourceSID, RelayLogPosition: Position{GTIDSet: set2}}, + {SourceUUID: sourceSID, RelayLogPosition: Position{GTIDSet: set3}}, + }, + want: Mysql56GTIDSet{ + sid1: []interval{{39, 39}, {40, 49}, {71, 75}}, + sid2: []interval{{1, 2}, {6, 7}, {20, 21}, {26, 31}, {38, 50}, {60, 66}}, + sid4: []interval{{1, 30}}, + }, + }, { + mainRepStatus: &ReplicationStatus{SourceUUID: sourceSID, RelayLogPosition: Position{GTIDSet: set1}}, + otherRepStatuses: []*ReplicationStatus{{SourceUUID: sid1, RelayLogPosition: Position{GTIDSet: set1}}}, + // servers with the same GTID sets should not be diagnosed with errant GTIDs + want: nil, + }} + + for _, testcase := range testcases { + t.Run("", func(t *testing.T) { + got, err := testcase.mainRepStatus.FindErrantGTIDs(testcase.otherRepStatuses) + require.NoError(t, err) + require.Equal(t, testcase.want, got) + }) + } +} + +func TestMysqlShouldGetPosition(t *testing.T) { + resultMap := map[string]string{ + "Executed_Gtid_Set": "3e11fa47-71ca-11e1-9e33-c80aa9429562:1-5", + "Position": "1307", + "File": "source-bin.000003", + } + + sid, _ := ParseSID("3e11fa47-71ca-11e1-9e33-c80aa9429562") + want := PrimaryStatus{ + Position: Position{GTIDSet: Mysql56GTIDSet{sid: []interval{{start: 1, end: 5}}}}, + FilePosition: Position{GTIDSet: FilePosGTID{File: "source-bin.000003", Pos: 1307}}, + } + got, err := ParseMysqlPrimaryStatus(resultMap) + require.NoError(t, err) + assert.Equalf(t, got.Position.GTIDSet.String(), want.Position.GTIDSet.String(), "got Position: %v; want Position: %v", got.Position.GTIDSet, want.Position.GTIDSet) + assert.Equalf(t, got.FilePosition.GTIDSet.String(), want.FilePosition.GTIDSet.String(), "got FilePosition: %v; want FilePosition: %v", got.FilePosition.GTIDSet, want.FilePosition.GTIDSet) +} + +func TestMysqlRetrieveSourceServerId(t *testing.T) { + resultMap := map[string]string{ + "Master_Server_Id": "1", + } + + want := ReplicationStatus{SourceServerID: 1} + got, err := ParseMysqlReplicationStatus(resultMap) + require.NoError(t, err) + assert.Equalf(t, got.SourceServerID, want.SourceServerID, "got SourceServerID: %v; want SourceServerID: %v", got.SourceServerID, want.SourceServerID) +} + +func TestMysqlRetrieveFileBasedPositions(t *testing.T) { + resultMap := map[string]string{ + "Exec_Master_Log_Pos": "1307", + "Relay_Master_Log_File": "master-bin.000002", + "Read_Master_Log_Pos": "1308", + "Master_Log_File": "master-bin.000003", + "Relay_Log_Pos": "1309", + "Relay_Log_File": "relay-bin.000004", + } + + want := ReplicationStatus{ + FilePosition: Position{GTIDSet: FilePosGTID{File: "master-bin.000002", Pos: 1307}}, + RelayLogSourceBinlogEquivalentPosition: Position{GTIDSet: FilePosGTID{File: "master-bin.000003", Pos: 1308}}, + RelayLogFilePosition: Position{GTIDSet: FilePosGTID{File: "relay-bin.000004", Pos: 1309}}, + } + got, err := ParseMysqlReplicationStatus(resultMap) + require.NoError(t, err) + assert.Equalf(t, got.FilePosition.GTIDSet, want.FilePosition.GTIDSet, "got FilePosition: %v; want FilePosition: %v", got.FilePosition.GTIDSet, want.FilePosition.GTIDSet) + assert.Equalf(t, got.RelayLogFilePosition.GTIDSet, want.RelayLogFilePosition.GTIDSet, "got RelayLogFilePosition: %v; want RelayLogFilePosition: %v", got.RelayLogFilePosition.GTIDSet, want.RelayLogFilePosition.GTIDSet) + assert.Equalf(t, got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, want.RelayLogSourceBinlogEquivalentPosition.GTIDSet, "got RelayLogSourceBinlogEquivalentPosition: %v; want RelayLogSourceBinlogEquivalentPosition: %v", got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, want.RelayLogSourceBinlogEquivalentPosition.GTIDSet) +} + +func TestMysqlShouldGetRelayLogPosition(t *testing.T) { + resultMap := map[string]string{ + "Executed_Gtid_Set": "3e11fa47-71ca-11e1-9e33-c80aa9429562:1-5", + "Retrieved_Gtid_Set": "3e11fa47-71ca-11e1-9e33-c80aa9429562:6-9", + "Exec_Master_Log_Pos": "1307", + "Relay_Master_Log_File": "master-bin.000002", + "Read_Master_Log_Pos": "1308", + "Master_Log_File": "master-bin.000003", + } + + sid, _ := ParseSID("3e11fa47-71ca-11e1-9e33-c80aa9429562") + want := ReplicationStatus{ + Position: Position{GTIDSet: Mysql56GTIDSet{sid: []interval{{start: 1, end: 5}}}}, + RelayLogPosition: Position{GTIDSet: Mysql56GTIDSet{sid: []interval{{start: 1, end: 9}}}}, + } + got, err := ParseMysqlReplicationStatus(resultMap) + require.NoError(t, err) + assert.Equalf(t, got.RelayLogPosition.GTIDSet.String(), want.RelayLogPosition.GTIDSet.String(), "got RelayLogPosition: %v; want RelayLogPosition: %v", got.RelayLogPosition.GTIDSet, want.RelayLogPosition.GTIDSet) +} + +func TestMariadbRetrieveSourceServerId(t *testing.T) { + resultMap := map[string]string{ + "Master_Server_Id": "1", + "Gtid_Slave_Pos": "0-101-2320", + } + + want := ReplicationStatus{SourceServerID: 1} + got, err := ParseMariadbReplicationStatus(resultMap) + require.NoError(t, err) + assert.Equal(t, got.SourceServerID, want.SourceServerID, fmt.Sprintf("got SourceServerID: %v; want SourceServerID: %v", got.SourceServerID, want.SourceServerID)) +} + +func TestMariadbRetrieveFileBasedPositions(t *testing.T) { + resultMap := map[string]string{ + "Exec_Master_Log_Pos": "1307", + "Relay_Master_Log_File": "master-bin.000002", + "Read_Master_Log_Pos": "1308", + "Master_Log_File": "master-bin.000003", + "Gtid_Slave_Pos": "0-101-2320", + "Relay_Log_Pos": "1309", + "Relay_Log_File": "relay-bin.000004", + } + + want := ReplicationStatus{ + FilePosition: Position{GTIDSet: FilePosGTID{File: "master-bin.000002", Pos: 1307}}, + RelayLogSourceBinlogEquivalentPosition: Position{GTIDSet: FilePosGTID{File: "master-bin.000003", Pos: 1308}}, + RelayLogFilePosition: Position{GTIDSet: FilePosGTID{File: "relay-bin.000004", Pos: 1309}}, + } + got, err := ParseMariadbReplicationStatus(resultMap) + require.NoError(t, err) + assert.Equalf(t, got.RelayLogFilePosition.GTIDSet, want.RelayLogFilePosition.GTIDSet, "got RelayLogFilePosition: %v; want RelayLogFilePosition: %v", got.RelayLogFilePosition.GTIDSet, want.RelayLogFilePosition.GTIDSet) + assert.Equal(t, got.FilePosition.GTIDSet, want.FilePosition.GTIDSet, fmt.Sprintf("got FilePosition: %v; want FilePosition: %v", got.FilePosition.GTIDSet, want.FilePosition.GTIDSet)) + assert.Equal(t, got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, want.RelayLogSourceBinlogEquivalentPosition.GTIDSet, fmt.Sprintf("got RelayLogSourceBinlogEquivalentPosition: %v; want RelayLogSourceBinlogEquivalentPosition: %v", got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, want.RelayLogSourceBinlogEquivalentPosition.GTIDSet)) +} + +func TestMariadbShouldGetNilRelayLogPosition(t *testing.T) { + resultMap := map[string]string{ + "Exec_Master_Log_Pos": "1307", + "Relay_Master_Log_File": "master-bin.000002", + "Read_Master_Log_Pos": "1308", + "Master_Log_File": "master-bin.000003", + "Gtid_Slave_Pos": "0-101-2320", + } + got, err := ParseMariadbReplicationStatus(resultMap) + require.NoError(t, err) + assert.Truef(t, got.RelayLogPosition.IsZero(), "Got a filled in RelayLogPosition. For MariaDB we should get back nil, because MariaDB does not return the retrieved GTIDSet. got: %#v", got.RelayLogPosition) +} + +func TestFilePosRetrieveSourceServerId(t *testing.T) { + resultMap := map[string]string{ + "Master_Server_Id": "1", + } + + want := ReplicationStatus{SourceServerID: 1} + got, err := ParseFilePosReplicationStatus(resultMap) + require.NoError(t, err) + assert.Equalf(t, got.SourceServerID, want.SourceServerID, "got SourceServerID: %v; want SourceServerID: %v", got.SourceServerID, want.SourceServerID) +} + +func TestFilePosRetrieveExecutedPosition(t *testing.T) { + resultMap := map[string]string{ + "Exec_Master_Log_Pos": "1307", + "Relay_Master_Log_File": "master-bin.000002", + "Read_Master_Log_Pos": "1308", + "Master_Log_File": "master-bin.000003", + "Relay_Log_Pos": "1309", + "Relay_Log_File": "relay-bin.000004", + } + + want := ReplicationStatus{ + Position: Position{GTIDSet: FilePosGTID{File: "master-bin.000002", Pos: 1307}}, + RelayLogPosition: Position{GTIDSet: FilePosGTID{File: "master-bin.000003", Pos: 1308}}, + FilePosition: Position{GTIDSet: FilePosGTID{File: "master-bin.000002", Pos: 1307}}, + RelayLogSourceBinlogEquivalentPosition: Position{GTIDSet: FilePosGTID{File: "master-bin.000003", Pos: 1308}}, + RelayLogFilePosition: Position{GTIDSet: FilePosGTID{File: "relay-bin.000004", Pos: 1309}}, + } + got, err := ParseFilePosReplicationStatus(resultMap) + require.NoError(t, err) + assert.Equalf(t, got.Position.GTIDSet, want.Position.GTIDSet, "got Position: %v; want Position: %v", got.Position.GTIDSet, want.Position.GTIDSet) + assert.Equalf(t, got.RelayLogPosition.GTIDSet, want.RelayLogPosition.GTIDSet, "got RelayLogPosition: %v; want RelayLogPosition: %v", got.RelayLogPosition.GTIDSet, want.RelayLogPosition.GTIDSet) + assert.Equalf(t, got.RelayLogFilePosition.GTIDSet, want.RelayLogFilePosition.GTIDSet, "got RelayLogFilePosition: %v; want RelayLogFilePosition: %v", got.RelayLogFilePosition.GTIDSet, want.RelayLogFilePosition.GTIDSet) + assert.Equalf(t, got.FilePosition.GTIDSet, want.FilePosition.GTIDSet, "got FilePosition: %v; want FilePosition: %v", got.FilePosition.GTIDSet, want.FilePosition.GTIDSet) + assert.Equalf(t, got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, want.RelayLogSourceBinlogEquivalentPosition.GTIDSet, "got RelayLogSourceBinlogEquivalentPosition: %v; want RelayLogSourceBinlogEquivalentPosition: %v", got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, want.RelayLogSourceBinlogEquivalentPosition.GTIDSet) + assert.Equalf(t, got.Position.GTIDSet, got.FilePosition.GTIDSet, "FilePosition and Position don't match when they should for the FilePos flavor") + assert.Equalf(t, got.RelayLogPosition.GTIDSet, got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, "RelayLogPosition and RelayLogSourceBinlogEquivalentPosition don't match when they should for the FilePos flavor") +} + +func TestFilePosShouldGetPosition(t *testing.T) { + resultMap := map[string]string{ + "Position": "1307", + "File": "source-bin.000003", + } + + want := PrimaryStatus{ + Position: Position{GTIDSet: FilePosGTID{File: "source-bin.000003", Pos: 1307}}, + FilePosition: Position{GTIDSet: FilePosGTID{File: "source-bin.000003", Pos: 1307}}, + } + got, err := ParseFilePosPrimaryStatus(resultMap) + require.NoError(t, err) + assert.Equalf(t, got.Position.GTIDSet, want.Position.GTIDSet, "got Position: %v; want Position: %v", got.Position.GTIDSet, want.Position.GTIDSet) + assert.Equalf(t, got.FilePosition.GTIDSet, want.FilePosition.GTIDSet, "got FilePosition: %v; want FilePosition: %v", got.FilePosition.GTIDSet, want.FilePosition.GTIDSet) + assert.Equalf(t, got.Position.GTIDSet, got.FilePosition.GTIDSet, "FilePosition and Position don't match when they should for the FilePos flavor") +} diff --git a/go/mysql/replication/state.go b/go/mysql/replication/state.go new file mode 100644 index 00000000000..d08965a6fb6 --- /dev/null +++ b/go/mysql/replication/state.go @@ -0,0 +1,49 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package replication + +import "strings" + +type ReplicationState int32 + +const ( + ReplicationStateUnknown ReplicationState = iota + ReplicationStateStopped + ReplicationStateConnecting + ReplicationStateRunning +) + +// ReplicationStatusToState converts a value you have for the IO thread(s) or SQL +// thread(s) or Group Replication applier thread(s) from MySQL or intermediate +// layers to a ReplicationState. +// on,yes,true == ReplicationStateRunning +// off,no,false == ReplicationStateStopped +// connecting == ReplicationStateConnecting +// anything else == ReplicationStateUnknown +func ReplicationStatusToState(s string) ReplicationState { + // Group Replication uses ON instead of Yes + switch strings.ToLower(s) { + case "yes", "on", "true": + return ReplicationStateRunning + case "no", "off", "false": + return ReplicationStateStopped + case "connecting": + return ReplicationStateConnecting + default: + return ReplicationStateUnknown + } +} diff --git a/go/mysql/replication_status_test.go b/go/mysql/replication_status_test.go deleted file mode 100644 index 556f2cfaaeb..00000000000 --- a/go/mysql/replication_status_test.go +++ /dev/null @@ -1,115 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mysql - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestStatusReplicationRunning(t *testing.T) { - input := &ReplicationStatus{ - IOState: ReplicationStatusToState("yes"), - SQLState: ReplicationStatusToState("yes"), - } - want := true - if got := input.Running(); got != want { - t.Errorf("%#v.Running() = %v, want %v", input, got, want) - } -} - -func TestStatusIOThreadNotRunning(t *testing.T) { - input := &ReplicationStatus{ - IOState: ReplicationStatusToState("no"), - SQLState: ReplicationStatusToState("yes"), - } - want := false - if got := input.Running(); got != want { - t.Errorf("%#v.Running() = %v, want %v", input, got, want) - } -} - -func TestStatusSQLThreadNotRunning(t *testing.T) { - input := &ReplicationStatus{ - IOState: ReplicationStatusToState("yes"), - SQLState: ReplicationStatusToState("no"), - } - want := false - if got := input.Running(); got != want { - t.Errorf("%#v.Running() = %v, want %v", input, got, want) - } -} - -func TestFindErrantGTIDs(t *testing.T) { - sid1 := SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} - sid2 := SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16} - sid3 := SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 17} - sid4 := SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 18} - sourceSID := SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 19} - - set1 := Mysql56GTIDSet{ - sid1: []interval{{20, 30}, {35, 39}, {40, 53}, {55, 75}}, - sid2: []interval{{1, 7}, {20, 50}, {60, 70}}, - sid4: []interval{{1, 30}}, - sourceSID: []interval{{1, 7}, {20, 30}}, - } - - set2 := Mysql56GTIDSet{ - sid1: []interval{{20, 30}, {35, 37}, {50, 60}}, - sid2: []interval{{3, 5}, {22, 25}, {32, 37}, {67, 70}}, - sid3: []interval{{1, 45}}, - sourceSID: []interval{{2, 6}, {15, 40}}, - } - - set3 := Mysql56GTIDSet{ - sid1: []interval{{20, 30}, {35, 38}, {50, 70}}, - sid2: []interval{{3, 5}, {22, 25}, {32, 37}, {67, 70}}, - sid3: []interval{{1, 45}}, - sourceSID: []interval{{2, 6}, {15, 45}}, - } - - testcases := []struct { - mainRepStatus *ReplicationStatus - otherRepStatuses []*ReplicationStatus - want Mysql56GTIDSet - }{{ - mainRepStatus: &ReplicationStatus{SourceUUID: sourceSID, RelayLogPosition: Position{GTIDSet: set1}}, - otherRepStatuses: []*ReplicationStatus{ - {SourceUUID: sourceSID, RelayLogPosition: Position{GTIDSet: set2}}, - {SourceUUID: sourceSID, RelayLogPosition: Position{GTIDSet: set3}}, - }, - want: Mysql56GTIDSet{ - sid1: []interval{{39, 39}, {40, 49}, {71, 75}}, - sid2: []interval{{1, 2}, {6, 7}, {20, 21}, {26, 31}, {38, 50}, {60, 66}}, - sid4: []interval{{1, 30}}, - }, - }, { - mainRepStatus: &ReplicationStatus{SourceUUID: sourceSID, RelayLogPosition: Position{GTIDSet: set1}}, - otherRepStatuses: []*ReplicationStatus{{SourceUUID: sid1, RelayLogPosition: Position{GTIDSet: set1}}}, - // servers with the same GTID sets should not be diagnosed with errant GTIDs - want: nil, - }} - - for _, testcase := range testcases { - t.Run("", func(t *testing.T) { - got, err := testcase.mainRepStatus.FindErrantGTIDs(testcase.otherRepStatuses) - require.NoError(t, err) - require.Equal(t, testcase.want, got) - }) - } -} diff --git a/go/mysql/schema.go b/go/mysql/schema.go index c2f6c12e1f7..933ce657c3a 100644 --- a/go/mysql/schema.go +++ b/go/mysql/schema.go @@ -51,24 +51,6 @@ FROM ( ) _inner GROUP BY table_name, column_name, ordinal_position, character_set_name, collation_name, data_type, column_key HAVING COUNT(*) = 1 -` - - // DetectSchemaChangeOnlyBaseTable query detects if there is any schema change from previous copy excluding view tables. - DetectSchemaChangeOnlyBaseTable = ` -SELECT DISTINCT table_name -FROM ( - SELECT table_name, column_name, ordinal_position, character_set_name, collation_name, data_type, column_key - FROM information_schema.columns - WHERE table_schema = database() and table_name in (select table_name from information_schema.tables where table_schema = database() and table_type = 'BASE TABLE') - - UNION ALL - - SELECT table_name, column_name, ordinal_position, character_set_name, collation_name, data_type, column_key - FROM %s.schemacopy - WHERE table_schema = database() -) _inner -GROUP BY table_name, column_name, ordinal_position, character_set_name, collation_name, data_type, column_key -HAVING COUNT(*) = 1 ` // ClearSchemaCopy query clears the schemacopy table. @@ -80,22 +62,6 @@ select table_schema, table_name, column_name, ordinal_position, character_set_na from information_schema.columns where table_schema = database()` - // fetchColumns are the columns we fetch - fetchColumns = "table_name, column_name, data_type, collation_name" - - // FetchUpdatedTables queries fetches all information about updated tables - FetchUpdatedTables = `select ` + fetchColumns + ` -from %s.schemacopy -where table_schema = database() and - table_name in ::tableNames -order by table_name, ordinal_position` - - // FetchTables queries fetches all information about tables - FetchTables = `select ` + fetchColumns + ` -from %s.schemacopy -where table_schema = database() -order by table_name, ordinal_position` - // GetColumnNamesQueryPatternForTable is used for mocking queries in unit tests GetColumnNamesQueryPatternForTable = `SELECT COLUMN_NAME.*TABLE_NAME.*%s.*` ) diff --git a/go/mysql/server.go b/go/mysql/server.go index 82897b724b2..10219e9467d 100644 --- a/go/mysql/server.go +++ b/go/mysql/server.go @@ -26,20 +26,21 @@ import ( "sync/atomic" "time" - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/vt/servenv" - - "vitess.io/vitess/go/sqlescape" + "github.com/pires/go-proxyproto" - proxyproto "github.com/pires/go-proxyproto" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/netutil" + "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/tb" "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/vterrors" ) @@ -123,7 +124,7 @@ type Handler interface { ComBinlogDump(c *Conn, logFile string, binlogPos uint32) error // ComBinlogDumpGTID is called when a connection receives a ComBinlogDumpGTID request - ComBinlogDumpGTID(c *Conn, logFile string, logPos uint64, gtidSet GTIDSet) error + ComBinlogDumpGTID(c *Conn, logFile string, logPos uint64, gtidSet replication.GTIDSet) error // WarningCount is called at the end of each query to obtain // the value to be returned to the client in the EOF packet. @@ -212,6 +213,9 @@ type Listener struct { // connBufferPooling configures if vtgate server pools connection buffers connBufferPooling bool + // connKeepAlivePeriod is period between tcp keep-alives. + connKeepAlivePeriod time.Duration + // shutdown indicates that Shutdown method was called. shutdown atomic.Bool @@ -234,15 +238,17 @@ func NewFromListener( connReadTimeout time.Duration, connWriteTimeout time.Duration, connBufferPooling bool, + keepAlivePeriod time.Duration, ) (*Listener, error) { cfg := ListenerConfig{ - Listener: l, - AuthServer: authServer, - Handler: handler, - ConnReadTimeout: connReadTimeout, - ConnWriteTimeout: connWriteTimeout, - ConnReadBufferSize: connBufferSize, - ConnBufferPooling: connBufferPooling, + Listener: l, + AuthServer: authServer, + Handler: handler, + ConnReadTimeout: connReadTimeout, + ConnWriteTimeout: connWriteTimeout, + ConnReadBufferSize: connBufferSize, + ConnBufferPooling: connBufferPooling, + ConnKeepAlivePeriod: keepAlivePeriod, } return NewListenerWithConfig(cfg) } @@ -256,6 +262,7 @@ func NewListener( connWriteTimeout time.Duration, proxyProtocol bool, connBufferPooling bool, + keepAlivePeriod time.Duration, ) (*Listener, error) { listener, err := net.Listen(protocol, address) if err != nil { @@ -263,24 +270,25 @@ func NewListener( } if proxyProtocol { proxyListener := &proxyproto.Listener{Listener: listener} - return NewFromListener(proxyListener, authServer, handler, connReadTimeout, connWriteTimeout, connBufferPooling) + return NewFromListener(proxyListener, authServer, handler, connReadTimeout, connWriteTimeout, connBufferPooling, keepAlivePeriod) } - return NewFromListener(listener, authServer, handler, connReadTimeout, connWriteTimeout, connBufferPooling) + return NewFromListener(listener, authServer, handler, connReadTimeout, connWriteTimeout, connBufferPooling, keepAlivePeriod) } // ListenerConfig should be used with NewListenerWithConfig to specify listener parameters. type ListenerConfig struct { // Protocol-Address pair and Listener are mutually exclusive parameters - Protocol string - Address string - Listener net.Listener - AuthServer AuthServer - Handler Handler - ConnReadTimeout time.Duration - ConnWriteTimeout time.Duration - ConnReadBufferSize int - ConnBufferPooling bool + Protocol string + Address string + Listener net.Listener + AuthServer AuthServer + Handler Handler + ConnReadTimeout time.Duration + ConnWriteTimeout time.Duration + ConnReadBufferSize int + ConnBufferPooling bool + ConnKeepAlivePeriod time.Duration } // NewListenerWithConfig creates new listener using provided config. There are @@ -298,15 +306,16 @@ func NewListenerWithConfig(cfg ListenerConfig) (*Listener, error) { } return &Listener{ - authServer: cfg.AuthServer, - handler: cfg.Handler, - listener: l, - ServerVersion: servenv.AppVersion.MySQLVersion(), - connectionID: 1, - connReadTimeout: cfg.ConnReadTimeout, - connWriteTimeout: cfg.ConnWriteTimeout, - connReadBufferSize: cfg.ConnReadBufferSize, - connBufferPooling: cfg.ConnBufferPooling, + authServer: cfg.AuthServer, + handler: cfg.Handler, + listener: l, + ServerVersion: servenv.AppVersion.MySQLVersion(), + connectionID: 1, + connReadTimeout: cfg.ConnReadTimeout, + connWriteTimeout: cfg.ConnWriteTimeout, + connReadBufferSize: cfg.ConnReadBufferSize, + connBufferPooling: cfg.ConnBufferPooling, + connKeepAlivePeriod: cfg.ConnKeepAlivePeriod, }, nil } @@ -473,12 +482,12 @@ func (l *Listener) handle(conn net.Conn, connectionID uint32, acceptTime time.Ti } if negotiatedAuthMethod == nil { - c.writeErrorPacket(CRServerHandshakeErr, SSUnknownSQLState, "No authentication methods available for authentication.") + c.writeErrorPacket(sqlerror.CRServerHandshakeErr, sqlerror.SSUnknownSQLState, "No authentication methods available for authentication.") return } if !l.AllowClearTextWithoutTLS.Load() && !c.TLSEnabled() && !negotiatedAuthMethod.AllowClearTextWithoutTLS() { - c.writeErrorPacket(CRServerHandshakeErr, SSUnknownSQLState, "Cannot use clear text authentication over non-SSL connections.") + c.writeErrorPacket(sqlerror.CRServerHandshakeErr, sqlerror.SSUnknownSQLState, "Cannot use clear text authentication over non-SSL connections.") return } @@ -592,7 +601,8 @@ func (l *Listener) handle(conn net.Conn, connectionID uint32, acceptTime time.Ti for { l.handler.SetAuthServer(l.authServer) kontinue := c.handleNextCommand(l.handler) - if !kontinue { + // before going for next command check if the connection should be closed or not. + if !kontinue || c.IsMarkedForClose() { return } } diff --git a/go/mysql/server_flaky_test.go b/go/mysql/server_flaky_test.go index 364afdf54e3..1cb50528333 100644 --- a/go/mysql/server_flaky_test.go +++ b/go/mysql/server_flaky_test.go @@ -32,6 +32,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" @@ -248,7 +251,7 @@ func (th *testHandler) ComRegisterReplica(c *Conn, replicaHost string, replicaPo func (th *testHandler) ComBinlogDump(c *Conn, logFile string, binlogPos uint32) error { return nil } -func (th *testHandler) ComBinlogDumpGTID(c *Conn, logFile string, logPos uint64, gtidSet GTIDSet) error { +func (th *testHandler) ComBinlogDumpGTID(c *Conn, logFile string, logPos uint64, gtidSet replication.GTIDSet) error { return nil } @@ -279,7 +282,7 @@ func TestConnectionFromListener(t *testing.T) { listener, err := net.Listen("tcp", "127.0.0.1:") require.NoError(t, err, "net.Listener failed") - l, err := NewFromListener(listener, authServer, th, 0, 0, false) + l, err := NewFromListener(listener, authServer, th, 0, 0, false, 0) require.NoError(t, err, "NewListener failed") defer l.Close() go l.Accept() @@ -308,7 +311,7 @@ func TestConnectionWithoutSourceHost(t *testing.T) { UserData: "userData1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err, "NewListener failed") defer l.Close() go l.Accept() @@ -341,7 +344,7 @@ func TestConnectionWithSourceHost(t *testing.T) { } defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err, "NewListener failed") defer l.Close() go l.Accept() @@ -374,7 +377,7 @@ func TestConnectionUseMysqlNativePasswordWithSourceHost(t *testing.T) { } defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err, "NewListener failed") defer l.Close() go l.Accept() @@ -412,7 +415,7 @@ func TestConnectionUnixSocket(t *testing.T) { os.Remove(unixSocket.Name()) - l, err := NewListener("unix", unixSocket.Name(), authServer, th, 0, 0, false, false) + l, err := NewListener("unix", unixSocket.Name(), authServer, th, 0, 0, false, false, 0) require.NoError(t, err, "NewListener failed") defer l.Close() go l.Accept() @@ -438,7 +441,7 @@ func TestClientFoundRows(t *testing.T) { UserData: "userData1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err, "NewListener failed") defer l.Close() go l.Accept() @@ -487,7 +490,7 @@ func TestConnCounts(t *testing.T) { UserData: "userData1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err, "NewListener failed") defer l.Close() go l.Accept() @@ -519,12 +522,12 @@ func TestConnCounts(t *testing.T) { // Test after closing connections. time.Sleep lets it work, but seems flakey. c.Close() - //time.Sleep(10 * time.Millisecond) - //checkCountsForUser(t, user, 1) + // time.Sleep(10 * time.Millisecond) + // checkCountsForUser(t, user, 1) c2.Close() - //time.Sleep(10 * time.Millisecond) - //checkCountsForUser(t, user, 0) + // time.Sleep(10 * time.Millisecond) + // checkCountsForUser(t, user, 0) } func checkCountsForUser(t *testing.T, user string, expected int64) { @@ -544,7 +547,7 @@ func TestServer(t *testing.T) { UserData: "userData1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err) l.SlowConnectWarnThreshold.Store(time.Nanosecond.Nanoseconds()) defer l.Close() @@ -581,7 +584,7 @@ func TestServer(t *testing.T) { // If there's an error after streaming has started, // we should get a 2013 - th.SetErr(NewSQLError(ERUnknownComError, SSNetError, "forced error after send")) + th.SetErr(sqlerror.NewSQLError(sqlerror.ERUnknownComError, sqlerror.SSNetError, "forced error after send")) output, err = runMysqlWithErr(t, params, "error after send") require.Error(t, err) assert.Contains(t, output, "ERROR 2013 (HY000)", "Unexpected output for 'panic'") @@ -644,7 +647,7 @@ func TestServerStats(t *testing.T) { UserData: "userData1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err) l.SlowConnectWarnThreshold.Store(time.Nanosecond.Nanoseconds()) defer l.Close() @@ -667,7 +670,7 @@ func TestServerStats(t *testing.T) { connRefuse.Reset() // Run an 'error' command. - th.SetErr(NewSQLError(ERUnknownComError, SSNetError, "forced query error")) + th.SetErr(sqlerror.NewSQLError(sqlerror.ERUnknownComError, sqlerror.SSNetError, "forced query error")) output, ok := runMysql(t, params, "error") require.False(t, ok, "mysql should have failed: %v", output) @@ -718,7 +721,7 @@ func TestClearTextServer(t *testing.T) { UserData: "userData1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err) defer l.Close() go l.Accept() @@ -791,7 +794,7 @@ func TestDialogServer(t *testing.T) { UserData: "userData1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err) l.AllowClearTextWithoutTLS.Store(true) defer l.Close() @@ -834,7 +837,7 @@ func TestTLSServer(t *testing.T) { // Below, we are enabling --ssl-verify-server-cert, which adds // a check that the common name of the certificate matches the // server host name we connect to. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err) defer l.Close() @@ -886,7 +889,7 @@ func TestTLSServer(t *testing.T) { // Run a 'select rows' command with results. conn, err := Connect(context.Background(), params) - //output, ok := runMysql(t, params, "select rows") + // output, ok := runMysql(t, params, "select rows") require.NoError(t, err) results, err := conn.ExecuteFetch("select rows", 1000, true) require.NoError(t, err) @@ -932,7 +935,7 @@ func TestTLSRequired(t *testing.T) { // Below, we are enabling --ssl-verify-server-cert, which adds // a check that the common name of the certificate matches the // server host name we connect to. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err) defer l.Close() @@ -1021,7 +1024,7 @@ func TestCachingSha2PasswordAuthWithTLS(t *testing.T) { defer authServer.close() // Create the listener, so we can get its host. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err, "NewListener failed: %v", err) defer l.Close() host := l.Addr().(*net.TCPAddr).IP.String() @@ -1115,7 +1118,7 @@ func TestCachingSha2PasswordAuthWithMoreData(t *testing.T) { defer authServer.close() // Create the listener, so we can get its host. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err, "NewListener failed: %v", err) defer l.Close() host := l.Addr().(*net.TCPAddr).IP.String() @@ -1184,7 +1187,7 @@ func TestCachingSha2PasswordAuthWithoutTLS(t *testing.T) { defer authServer.close() // Create the listener. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err, "NewListener failed: %v", err) defer l.Close() host := l.Addr().(*net.TCPAddr).IP.String() @@ -1226,7 +1229,7 @@ func TestErrorCodes(t *testing.T) { UserData: "userData1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err) defer l.Close() go l.Accept() @@ -1249,7 +1252,7 @@ func TestErrorCodes(t *testing.T) { // internal vitess errors tests := []struct { err error - code ErrorCode + code sqlerror.ErrorCode sqlState string text string }{ @@ -1257,48 +1260,48 @@ func TestErrorCodes(t *testing.T) { err: vterrors.Errorf( vtrpcpb.Code_INVALID_ARGUMENT, "invalid argument"), - code: ERUnknownError, - sqlState: SSUnknownSQLState, + code: sqlerror.ERUnknownError, + sqlState: sqlerror.SSUnknownSQLState, text: "invalid argument", }, { err: vterrors.Errorf( vtrpcpb.Code_INVALID_ARGUMENT, - "(errno %v) (sqlstate %v) invalid argument with errno", ERDupEntry, SSConstraintViolation), - code: ERDupEntry, - sqlState: SSConstraintViolation, + "(errno %v) (sqlstate %v) invalid argument with errno", sqlerror.ERDupEntry, sqlerror.SSConstraintViolation), + code: sqlerror.ERDupEntry, + sqlState: sqlerror.SSConstraintViolation, text: "invalid argument with errno", }, { err: vterrors.Errorf( vtrpcpb.Code_DEADLINE_EXCEEDED, "connection deadline exceeded"), - code: ERQueryInterrupted, - sqlState: SSQueryInterrupted, + code: sqlerror.ERQueryInterrupted, + sqlState: sqlerror.SSQueryInterrupted, text: "deadline exceeded", }, { err: vterrors.Errorf( vtrpcpb.Code_RESOURCE_EXHAUSTED, "query pool timeout"), - code: ERTooManyUserConnections, - sqlState: SSClientError, + code: sqlerror.ERTooManyUserConnections, + sqlState: sqlerror.SSClientError, text: "resource exhausted", }, { err: vterrors.Wrap(vterrors.Errorf(vtrpcpb.Code_ABORTED, "Row count exceeded 10000"), "wrapped"), - code: ERQueryInterrupted, - sqlState: SSQueryInterrupted, + code: sqlerror.ERQueryInterrupted, + sqlState: sqlerror.SSQueryInterrupted, text: "aborted", }, } for _, test := range tests { t.Run(test.err.Error(), func(t *testing.T) { - th.SetErr(NewSQLErrorFromError(test.err)) + th.SetErr(sqlerror.NewSQLErrorFromError(test.err)) rs, err := client.ExecuteFetch("error", 100, false) require.Error(t, err, "mysql should have failed but returned: %v", rs) - serr, ok := err.(*SQLError) + serr, ok := err.(*sqlerror.SQLError) require.True(t, ok, "mysql should have returned a SQLError") assert.Equal(t, test.code, serr.Number(), "error in %s: want code %v got %v", test.text, test.code, serr.Number()) @@ -1404,7 +1407,7 @@ func TestListenerShutdown(t *testing.T) { UserData: "userData1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err) defer l.Close() go l.Accept() @@ -1435,11 +1438,11 @@ func TestListenerShutdown(t *testing.T) { err = conn.Ping() require.EqualError(t, err, "Server shutdown in progress (errno 1053) (sqlstate 08S01)") - sqlErr, ok := err.(*SQLError) + sqlErr, ok := err.(*sqlerror.SQLError) require.True(t, ok, "Wrong error type: %T", err) - require.Equal(t, ERServerShutdown, sqlErr.Number()) - require.Equal(t, SSNetError, sqlErr.SQLState()) + require.Equal(t, sqlerror.ERServerShutdown, sqlErr.Number()) + require.Equal(t, sqlerror.SSNetError, sqlErr.SQLState()) require.Equal(t, "Server shutdown in progress", sqlErr.Message) } @@ -1477,7 +1480,7 @@ func TestServerFlush(t *testing.T) { th := &testHandler{} - l, err := NewListener("tcp", "127.0.0.1:", NewAuthServerNone(), th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", NewAuthServerNone(), th, 0, 0, false, false, 0) require.NoError(t, err) defer l.Close() go l.Accept() @@ -1534,4 +1537,31 @@ func (th *testHandler) ValidUseDB(c *Conn, db string, authServer AuthServer) err } func (th *testHandler) SetAuthServer(authServer AuthServer) { + +} +func TestTcpKeepAlive(t *testing.T) { + th := &testHandler{} + l, err := NewListener("tcp", "127.0.0.1:", NewAuthServerNone(), th, 0, 0, false, false, 0) + require.NoError(t, err) + defer l.Close() + go l.Accept() + + host, port := getHostPort(t, l.Addr()) + params := &ConnParams{ + Host: host, + Port: port, + } + + // on connect, the tcp method should be called. + c, err := Connect(context.Background(), params) + require.NoError(t, err) + defer c.Close() + require.True(t, th.lastConn.keepAliveOn, "tcp property method not called") + + // close the connection + th.lastConn.Close() + + // now calling this method should fail. + err = setTcpConnProperties(th.lastConn.conn.(*net.TCPConn), 0) + require.ErrorContains(t, err, "unable to enable keepalive on tcp connection") } diff --git a/go/mysql/sqlerror/constants.go b/go/mysql/sqlerror/constants.go new file mode 100644 index 00000000000..ffa4d0d93c7 --- /dev/null +++ b/go/mysql/sqlerror/constants.go @@ -0,0 +1,503 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlerror + +import ( + "strconv" + "strings" +) + +type ErrorCode uint16 + +func (e ErrorCode) ToString() string { + return strconv.FormatUint(uint64(e), 10) +} + +// Error codes for server-side errors. +// Originally found in include/mysql/mysqld_error.h and +// https://dev.mysql.com/doc/mysql-errors/en/server-error-reference.html +// The below are in sorted order by value, grouped by vterror code they should be bucketed into. +// See above reference for more information on each code. +const ( + // Vitess specific errors, (100-999) + ERNotReplica = ErrorCode(100) + + // unknown + ERUnknownError = ErrorCode(1105) + + // internal + ERInternalError = ErrorCode(1815) + + // unimplemented + ERNotSupportedYet = ErrorCode(1235) + ERUnsupportedPS = ErrorCode(1295) + + // resource exhausted + ERDiskFull = ErrorCode(1021) + EROutOfMemory = ErrorCode(1037) + EROutOfSortMemory = ErrorCode(1038) + ERConCount = ErrorCode(1040) + EROutOfResources = ErrorCode(1041) + ERRecordFileFull = ErrorCode(1114) + ERHostIsBlocked = ErrorCode(1129) + ERCantCreateThread = ErrorCode(1135) + ERTooManyDelayedThreads = ErrorCode(1151) + ERNetPacketTooLarge = ErrorCode(1153) + ERTooManyUserConnections = ErrorCode(1203) + ERLockTableFull = ErrorCode(1206) + ERUserLimitReached = ErrorCode(1226) + + // deadline exceeded + ERLockWaitTimeout = ErrorCode(1205) + + // unavailable + ERServerShutdown = ErrorCode(1053) + + // not found + ERDbDropExists = ErrorCode(1008) + ERCantFindFile = ErrorCode(1017) + ERFormNotFound = ErrorCode(1029) + ERKeyNotFound = ErrorCode(1032) + ERBadFieldError = ErrorCode(1054) + ERNoSuchThread = ErrorCode(1094) + ERUnknownTable = ErrorCode(1109) + ERCantFindUDF = ErrorCode(1122) + ERNonExistingGrant = ErrorCode(1141) + ERNoSuchTable = ErrorCode(1146) + ERNonExistingTableGrant = ErrorCode(1147) + ERKeyDoesNotExist = ErrorCode(1176) + + // permissions + ERDBAccessDenied = ErrorCode(1044) + ERAccessDeniedError = ErrorCode(1045) + ERKillDenied = ErrorCode(1095) + ERNoPermissionToCreateUsers = ErrorCode(1211) + ERSpecifiedAccessDenied = ErrorCode(1227) + + // failed precondition + ERNoDb = ErrorCode(1046) + ERNoSuchIndex = ErrorCode(1082) + ERCantDropFieldOrKey = ErrorCode(1091) + ERTableNotLockedForWrite = ErrorCode(1099) + ERTableNotLocked = ErrorCode(1100) + ERTooBigSelect = ErrorCode(1104) + ERNotAllowedCommand = ErrorCode(1148) + ERTooLongString = ErrorCode(1162) + ERDelayedInsertTableLocked = ErrorCode(1165) + ERDupUnique = ErrorCode(1169) + ERRequiresPrimaryKey = ErrorCode(1173) + ERCantDoThisDuringAnTransaction = ErrorCode(1179) + ERReadOnlyTransaction = ErrorCode(1207) + ERCannotAddForeign = ErrorCode(1215) + ERNoReferencedRow = ErrorCode(1216) + ERRowIsReferenced = ErrorCode(1217) + ERCantUpdateWithReadLock = ErrorCode(1223) + ERNoDefault = ErrorCode(1230) + ERMasterFatalReadingBinlog = ErrorCode(1236) + EROperandColumns = ErrorCode(1241) + ERSubqueryNo1Row = ErrorCode(1242) + ERUnknownStmtHandler = ErrorCode(1243) + ERWarnDataOutOfRange = ErrorCode(1264) + ERNonUpdateableTable = ErrorCode(1288) + ERFeatureDisabled = ErrorCode(1289) + EROptionPreventsStatement = ErrorCode(1290) + ERDuplicatedValueInType = ErrorCode(1291) + ERSPDoesNotExist = ErrorCode(1305) + ERNoDefaultForField = ErrorCode(1364) + ErSPNotVarArg = ErrorCode(1414) + ERRowIsReferenced2 = ErrorCode(1451) + ErNoReferencedRow2 = ErrorCode(1452) + ERDupIndex = ErrorCode(1831) + ERInnodbReadOnly = ErrorCode(1874) + + // already exists + ERDbCreateExists = ErrorCode(1007) + ERTableExists = ErrorCode(1050) + ERDupEntry = ErrorCode(1062) + ERFileExists = ErrorCode(1086) + ERUDFExists = ErrorCode(1125) + + // aborted + ERGotSignal = ErrorCode(1078) + ERForcingClose = ErrorCode(1080) + ERAbortingConnection = ErrorCode(1152) + ERLockDeadlock = ErrorCode(1213) + + // invalid arg + ERUnknownComError = ErrorCode(1047) + ERBadNullError = ErrorCode(1048) + ERBadDb = ErrorCode(1049) + ERBadTable = ErrorCode(1051) + ERNonUniq = ErrorCode(1052) + ERWrongFieldWithGroup = ErrorCode(1055) + ERWrongGroupField = ErrorCode(1056) + ERWrongSumSelect = ErrorCode(1057) + ERWrongValueCount = ErrorCode(1058) + ERTooLongIdent = ErrorCode(1059) + ERDupFieldName = ErrorCode(1060) + ERDupKeyName = ErrorCode(1061) + ERWrongFieldSpec = ErrorCode(1063) + ERParseError = ErrorCode(1064) + EREmptyQuery = ErrorCode(1065) + ERNonUniqTable = ErrorCode(1066) + ERInvalidDefault = ErrorCode(1067) + ERMultiplePriKey = ErrorCode(1068) + ERTooManyKeys = ErrorCode(1069) + ERTooManyKeyParts = ErrorCode(1070) + ERTooLongKey = ErrorCode(1071) + ERKeyColumnDoesNotExist = ErrorCode(1072) + ERBlobUsedAsKey = ErrorCode(1073) + ERTooBigFieldLength = ErrorCode(1074) + ERWrongAutoKey = ErrorCode(1075) + ERWrongFieldTerminators = ErrorCode(1083) + ERBlobsAndNoTerminated = ErrorCode(1084) + ERTextFileNotReadable = ErrorCode(1085) + ERWrongSubKey = ErrorCode(1089) + ERCantRemoveAllFields = ErrorCode(1090) + ERUpdateTableUsed = ErrorCode(1093) + ERNoTablesUsed = ErrorCode(1096) + ERTooBigSet = ErrorCode(1097) + ERBlobCantHaveDefault = ErrorCode(1101) + ERWrongDbName = ErrorCode(1102) + ERWrongTableName = ErrorCode(1103) + ERUnknownProcedure = ErrorCode(1106) + ERWrongParamCountToProcedure = ErrorCode(1107) + ERWrongParametersToProcedure = ErrorCode(1108) + ERFieldSpecifiedTwice = ErrorCode(1110) + ERInvalidGroupFuncUse = ErrorCode(1111) + ERTableMustHaveColumns = ErrorCode(1113) + ERUnknownCharacterSet = ErrorCode(1115) + ERTooManyTables = ErrorCode(1116) + ERTooManyFields = ErrorCode(1117) + ERTooBigRowSize = ErrorCode(1118) + ERWrongOuterJoin = ErrorCode(1120) + ERNullColumnInIndex = ErrorCode(1121) + ERFunctionNotDefined = ErrorCode(1128) + ERWrongValueCountOnRow = ErrorCode(1136) + ERInvalidUseOfNull = ErrorCode(1138) + ERRegexpError = ErrorCode(1139) + ERMixOfGroupFuncAndFields = ErrorCode(1140) + ERIllegalGrantForTable = ErrorCode(1144) + ERSyntaxError = ErrorCode(1149) + ERWrongColumnName = ErrorCode(1166) + ERWrongKeyColumn = ErrorCode(1167) + ERBlobKeyWithoutLength = ErrorCode(1170) + ERPrimaryCantHaveNull = ErrorCode(1171) + ERTooManyRows = ErrorCode(1172) + ERLockOrActiveTransaction = ErrorCode(1192) + ERUnknownSystemVariable = ErrorCode(1193) + ERSetConstantsOnly = ErrorCode(1204) + ERWrongArguments = ErrorCode(1210) + ERWrongUsage = ErrorCode(1221) + ERWrongNumberOfColumnsInSelect = ErrorCode(1222) + ERDupArgument = ErrorCode(1225) + ERLocalVariable = ErrorCode(1228) + ERGlobalVariable = ErrorCode(1229) + ERWrongValueForVar = ErrorCode(1231) + ERWrongTypeForVar = ErrorCode(1232) + ERVarCantBeRead = ErrorCode(1233) + ERCantUseOptionHere = ErrorCode(1234) + ERIncorrectGlobalLocalVar = ErrorCode(1238) + ERWrongFKDef = ErrorCode(1239) + ERKeyRefDoNotMatchTableRef = ErrorCode(1240) + ERCyclicReference = ErrorCode(1245) + ERIllegalReference = ErrorCode(1247) + ERDerivedMustHaveAlias = ErrorCode(1248) + ERTableNameNotAllowedHere = ErrorCode(1250) + ERCollationCharsetMismatch = ErrorCode(1253) + ERWarnDataTruncated = ErrorCode(1265) + ERCantAggregate2Collations = ErrorCode(1267) + ERCantAggregate3Collations = ErrorCode(1270) + ERCantAggregateNCollations = ErrorCode(1271) + ERVariableIsNotStruct = ErrorCode(1272) + ERUnknownCollation = ErrorCode(1273) + ERWrongNameForIndex = ErrorCode(1280) + ERWrongNameForCatalog = ErrorCode(1281) + ERBadFTColumn = ErrorCode(1283) + ERTruncatedWrongValue = ErrorCode(1292) + ERTooMuchAutoTimestampCols = ErrorCode(1293) + ERInvalidOnUpdate = ErrorCode(1294) + ERUnknownTimeZone = ErrorCode(1298) + ERInvalidCharacterString = ErrorCode(1300) + ERQueryInterrupted = ErrorCode(1317) + ERTruncatedWrongValueForField = ErrorCode(1366) + ERIllegalValueForType = ErrorCode(1367) + ERDataTooLong = ErrorCode(1406) + ErrWrongValueForType = ErrorCode(1411) + ERNoSuchUser = ErrorCode(1449) + ERForbidSchemaChange = ErrorCode(1450) + ERWrongValue = ErrorCode(1525) + ERDataOutOfRange = ErrorCode(1690) + ERInvalidJSONText = ErrorCode(3140) + ERInvalidJSONTextInParams = ErrorCode(3141) + ERInvalidJSONBinaryData = ErrorCode(3142) + ERInvalidJSONCharset = ErrorCode(3144) + ERInvalidCastToJSON = ErrorCode(3147) + ERJSONValueTooBig = ErrorCode(3150) + ERJSONDocumentTooDeep = ErrorCode(3157) + + ERRegexpStringNotTerminated = ErrorCode(3684) + ERRegexpBufferOverflow = ErrorCode(3684) + ERRegexpIllegalArgument = ErrorCode(3685) + ERRegexpIndexOutOfBounds = ErrorCode(3686) + ERRegexpInternal = ErrorCode(3687) + ERRegexpRuleSyntax = ErrorCode(3688) + ERRegexpBadEscapeSequence = ErrorCode(3689) + ERRegexpUnimplemented = ErrorCode(3690) + ERRegexpMismatchParen = ErrorCode(3691) + ERRegexpBadInterval = ErrorCode(3692) + ERRRegexpMaxLtMin = ErrorCode(3693) + ERRegexpInvalidBackRef = ErrorCode(3694) + ERRegexpLookBehindLimit = ErrorCode(3695) + ERRegexpMissingCloseBracket = ErrorCode(3696) + ERRegexpInvalidRange = ErrorCode(3697) + ERRegexpStackOverflow = ErrorCode(3698) + ERRegexpTimeOut = ErrorCode(3699) + ERRegexpPatternTooBig = ErrorCode(3700) + ERRegexpInvalidCaptureGroup = ErrorCode(3887) + ERRegexpInvalidFlag = ErrorCode(3900) + + ERCharacterSetMismatch = ErrorCode(3995) + + ERWrongParametersToNativeFct = ErrorCode(1583) + + // max execution time exceeded + ERQueryTimeout = ErrorCode(3024) + + ErrCantCreateGeometryObject = ErrorCode(1416) + ErrGISDataWrongEndianess = ErrorCode(3055) + ErrNotImplementedForCartesianSRS = ErrorCode(3704) + ErrNotImplementedForProjectedSRS = ErrorCode(3705) + ErrNonPositiveRadius = ErrorCode(3706) + + // server not available + ERServerIsntAvailable = ErrorCode(3168) +) + +// Sql states for errors. +// Originally found in include/mysql/sql_state.h +const ( + // SSUnknownSqlstate is ER_SIGNAL_EXCEPTION in + // include/mysql/sql_state.h, but: + // const char *unknown_sqlstate= "HY000" + // in client.c. So using that one. + SSUnknownSQLState = "HY000" + + // SSNetError is network related error + SSNetError = "08S01" + + // SSWrongNumberOfColumns is related to columns error + SSWrongNumberOfColumns = "21000" + + // SSWrongValueCountOnRow is related to columns count mismatch error + SSWrongValueCountOnRow = "21S01" + + // SSDataTooLong is ER_DATA_TOO_LONG + SSDataTooLong = "22001" + + // SSDataOutOfRange is ER_DATA_OUT_OF_RANGE + SSDataOutOfRange = "22003" + + // SSConstraintViolation is constraint violation + SSConstraintViolation = "23000" + + // SSCantDoThisDuringAnTransaction is + // ER_CANT_DO_THIS_DURING_AN_TRANSACTION + SSCantDoThisDuringAnTransaction = "25000" + + // SSAccessDeniedError is ER_ACCESS_DENIED_ERROR + SSAccessDeniedError = "28000" + + // SSNoDB is ER_NO_DB_ERROR + SSNoDB = "3D000" + + // SSLockDeadlock is ER_LOCK_DEADLOCK + SSLockDeadlock = "40001" + + // SSClientError is the state on client errors + SSClientError = "42000" + + // SSDupFieldName is ER_DUP_FIELD_NAME + SSDupFieldName = "42S21" + + // SSBadFieldError is ER_BAD_FIELD_ERROR + SSBadFieldError = "42S22" + + // SSUnknownTable is ER_UNKNOWN_TABLE + SSUnknownTable = "42S02" + + // SSQueryInterrupted is ER_QUERY_INTERRUPTED; + SSQueryInterrupted = "70100" +) + +// IsConnErr returns true if the error is a connection error. +func IsConnErr(err error) bool { + if IsTooManyConnectionsErr(err) { + return false + } + if sqlErr, ok := err.(*SQLError); ok { + num := sqlErr.Number() + return (num >= CRUnknownError && num <= CRNamedPipeStateError) || num == ERQueryInterrupted + } + return false +} + +// IsConnLostDuringQuery returns true if the error is a CRServerLost error. +// Happens most commonly when a query is killed MySQL server-side. +func IsConnLostDuringQuery(err error) bool { + if sqlErr, ok := err.(*SQLError); ok { + num := sqlErr.Number() + return (num == CRServerLost) + } + return false +} + +func IsConnErrByCross(err error) bool { + if sqlErr, ok := err.(*SQLError); ok { + return sqlErr.Number() == CRServerLost && sqlErr.SQLState() == SSUnknownSQLState && sqlErr.Message == "EOF" + } + return false +} + +// IsEphemeralError returns true if the error is ephemeral and the caller should +// retry if possible. Note: non-SQL errors are always treated as ephemeral. +func IsEphemeralError(err error) bool { + if sqlErr, ok := err.(*SQLError); ok { + en := sqlErr.Number() + switch en { + case + CRConnectionError, + CRConnHostError, + CRMalformedPacket, + CRNamedPipeStateError, + CRServerHandshakeErr, + CRServerGone, + CRServerLost, + CRSSLConnectionError, + CRUnknownError, + CRUnknownHost, + ERCantCreateThread, + ERDiskFull, + ERForcingClose, + ERGotSignal, + ERHostIsBlocked, + ERLockTableFull, + ERInnodbReadOnly, + ERInternalError, + ERLockDeadlock, + ERLockWaitTimeout, + ERQueryTimeout, + EROutOfMemory, + EROutOfResources, + EROutOfSortMemory, + ERQueryInterrupted, + ERServerIsntAvailable, + ERServerShutdown, + ERTooManyUserConnections, + ERUnknownError, + ERUserLimitReached: + return true + default: + return false + } + } + // If it's not an sqlError then we assume it's ephemeral + return true +} + +// IsTooManyConnectionsErr returns true if the error is due to too many connections. +func IsTooManyConnectionsErr(err error) bool { + if sqlErr, ok := err.(*SQLError); ok { + if sqlErr.Number() == CRServerHandshakeErr && strings.Contains(sqlErr.Message, "Too many connections") { + return true + } + } + return false +} + +// IsSchemaApplyError returns true when given error is a MySQL error applying schema change +func IsSchemaApplyError(err error) bool { + merr, isSQLErr := err.(*SQLError) + if !isSQLErr { + return false + } + switch merr.Num { + case + ERDupKeyName, + ERCantDropFieldOrKey, + ERTableExists, + ERDupFieldName: + return true + } + return false +} + +// Error codes for client-side errors. +// Originally found in include/mysql/errmsg.h and +// https://dev.mysql.com/doc/mysql-errors/en/client-error-reference.html +const ( + // CRUnknownError is CR_UNKNOWN_ERROR + CRUnknownError = ErrorCode(2000) + + // CRConnectionError is CR_CONNECTION_ERROR + // This is returned if a connection via a Unix socket fails. + CRConnectionError = ErrorCode(2002) + + // CRConnHostError is CR_CONN_HOST_ERROR + // This is returned if a connection via a TCP socket fails. + CRConnHostError = ErrorCode(2003) + + // CRUnknownHost is CR_UNKNOWN_HOST + // This is returned if the host name cannot be resolved. + CRUnknownHost = ErrorCode(2005) + + // CRServerGone is CR_SERVER_GONE_ERROR. + // This is returned if the client tries to send a command but it fails. + CRServerGone = ErrorCode(2006) + + // CRVersionError is CR_VERSION_ERROR + // This is returned if the server versions don't match what we support. + CRVersionError = ErrorCode(2007) + + // CRServerHandshakeErr is CR_SERVER_HANDSHAKE_ERR + CRServerHandshakeErr = ErrorCode(2012) + + // CRServerLost is CR_SERVER_LOST. + // Used when: + // - the client cannot write an initial auth packet. + // - the client cannot read an initial auth packet. + // - the client cannot read a response from the server. + // This happens when a running query is killed. + CRServerLost = ErrorCode(2013) + + // CRCommandsOutOfSync is CR_COMMANDS_OUT_OF_SYNC + // Sent when the streaming calls are not done in the right order. + CRCommandsOutOfSync = ErrorCode(2014) + + // CRNamedPipeStateError is CR_NAMEDPIPESETSTATE_ERROR. + // This is the highest possible number for a connection error. + CRNamedPipeStateError = ErrorCode(2018) + + // CRCantReadCharset is CR_CANT_READ_CHARSET + CRCantReadCharset = ErrorCode(2019) + + // CRSSLConnectionError is CR_SSL_CONNECTION_ERROR + CRSSLConnectionError = ErrorCode(2026) + + // CRMalformedPacket is CR_MALFORMED_PACKET + CRMalformedPacket = ErrorCode(2027) +) diff --git a/go/mysql/sql_error.go b/go/mysql/sqlerror/sql_error.go similarity index 80% rename from go/mysql/sql_error.go rename to go/mysql/sqlerror/sql_error.go index 1670e899dd0..9b1f65c82e3 100644 --- a/go/mysql/sql_error.go +++ b/go/mysql/sqlerror/sql_error.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mysql +package sqlerror import ( "bytes" @@ -212,12 +212,37 @@ var stateToMysqlCode = map[vterrors.State]mysqlCode{ vterrors.ServerNotAvailable: {num: ERServerIsntAvailable, state: SSNetError}, vterrors.CantDoThisInTransaction: {num: ERCantDoThisDuringAnTransaction, state: SSCantDoThisDuringAnTransaction}, vterrors.RequiresPrimaryKey: {num: ERRequiresPrimaryKey, state: SSClientError}, + vterrors.RowIsReferenced2: {num: ERRowIsReferenced2, state: SSConstraintViolation}, + vterrors.NoReferencedRow2: {num: ErNoReferencedRow2, state: SSConstraintViolation}, vterrors.NoSuchSession: {num: ERUnknownComError, state: SSNetError}, vterrors.OperandColumns: {num: EROperandColumns, state: SSWrongNumberOfColumns}, vterrors.WrongValueCountOnRow: {num: ERWrongValueCountOnRow, state: SSWrongValueCountOnRow}, vterrors.WrongArguments: {num: ERWrongArguments, state: SSUnknownSQLState}, vterrors.UnknownStmtHandler: {num: ERUnknownStmtHandler, state: SSUnknownSQLState}, vterrors.UnknownTimeZone: {num: ERUnknownTimeZone, state: SSUnknownSQLState}, + vterrors.RegexpStringNotTerminated: {num: ERRegexpStringNotTerminated, state: SSUnknownSQLState}, + vterrors.RegexpBufferOverflow: {num: ERRegexpBufferOverflow, state: SSUnknownSQLState}, + vterrors.RegexpIllegalArgument: {num: ERRegexpIllegalArgument, state: SSUnknownSQLState}, + vterrors.RegexpIndexOutOfBounds: {num: ERRegexpIndexOutOfBounds, state: SSUnknownSQLState}, + vterrors.RegexpInternal: {num: ERRegexpInternal, state: SSUnknownSQLState}, + vterrors.RegexpRuleSyntax: {num: ERRegexpRuleSyntax, state: SSUnknownSQLState}, + vterrors.RegexpBadEscapeSequence: {num: ERRegexpBadEscapeSequence, state: SSUnknownSQLState}, + vterrors.RegexpUnimplemented: {num: ERRegexpUnimplemented, state: SSUnknownSQLState}, + vterrors.RegexpMismatchParen: {num: ERRegexpMismatchParen, state: SSUnknownSQLState}, + vterrors.RegexpBadInterval: {num: ERRegexpBadInterval, state: SSUnknownSQLState}, + vterrors.RegexpMaxLtMin: {num: ERRRegexpMaxLtMin, state: SSUnknownSQLState}, + vterrors.RegexpInvalidBackRef: {num: ERRegexpInvalidBackRef, state: SSUnknownSQLState}, + vterrors.RegexpLookBehindLimit: {num: ERRegexpLookBehindLimit, state: SSUnknownSQLState}, + vterrors.RegexpMissingCloseBracket: {num: ERRegexpMissingCloseBracket, state: SSUnknownSQLState}, + vterrors.RegexpInvalidRange: {num: ERRegexpInvalidRange, state: SSUnknownSQLState}, + vterrors.RegexpStackOverflow: {num: ERRegexpStackOverflow, state: SSUnknownSQLState}, + vterrors.RegexpTimeOut: {num: ERRegexpTimeOut, state: SSUnknownSQLState}, + vterrors.RegexpPatternTooBig: {num: ERRegexpPatternTooBig, state: SSUnknownSQLState}, + vterrors.RegexpInvalidFlag: {num: ERRegexpInvalidFlag, state: SSUnknownSQLState}, + vterrors.RegexpInvalidCaptureGroup: {num: ERRegexpInvalidCaptureGroup, state: SSUnknownSQLState}, + vterrors.CharacterSetMismatch: {num: ERCharacterSetMismatch, state: SSUnknownSQLState}, + vterrors.WrongParametersToNativeFct: {num: ERWrongParametersToNativeFct, state: SSUnknownSQLState}, + vterrors.KillDeniedError: {num: ERKillDenied, state: SSUnknownSQLState}, } func getStateToMySQLState(state vterrors.State) mysqlCode { @@ -272,10 +297,3 @@ func demuxResourceExhaustedErrors(msg string) ErrorCode { return ERTooManyUserConnections } } - -func IsConnErrByCross(err error) bool { - if sqlErr, ok := err.(*SQLError); ok { - return sqlErr.Number() == CRServerLost && sqlErr.SQLState() == SSUnknownSQLState && sqlErr.Message == "EOF" - } - return false -} diff --git a/go/mysql/sql_error_test.go b/go/mysql/sqlerror/sql_error_test.go similarity index 99% rename from go/mysql/sql_error_test.go rename to go/mysql/sqlerror/sql_error_test.go index d0a248d3599..3c7f3114b68 100644 --- a/go/mysql/sql_error_test.go +++ b/go/mysql/sqlerror/sql_error_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mysql +package sqlerror import ( "fmt" diff --git a/go/mysql/streaming_query.go b/go/mysql/streaming_query.go index 31e55c3a9a4..05ba62c4cd0 100644 --- a/go/mysql/streaming_query.go +++ b/go/mysql/streaming_query.go @@ -17,6 +17,7 @@ limitations under the License. package mysql import ( + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" @@ -30,7 +31,7 @@ import ( func (c *Conn) ExecuteStreamFetch(query string) (err error) { defer func() { if err != nil { - if sqlerr, ok := err.(*SQLError); ok { + if sqlerr, ok := err.(*sqlerror.SQLError); ok { sqlerr.Query = query } } @@ -38,7 +39,7 @@ func (c *Conn) ExecuteStreamFetch(query string) (err error) { // Sanity check. if c.fields != nil { - return NewSQLError(CRCommandsOutOfSync, SSUnknownSQLState, "streaming query already in progress") + return sqlerror.NewSQLError(sqlerror.CRCommandsOutOfSync, sqlerror.SSUnknownSQLState, "streaming query already in progress") } // Send the query as a COM_QUERY packet. @@ -75,7 +76,7 @@ func (c *Conn) ExecuteStreamFetch(query string) (err error) { // EOF is only present here if it's not deprecated. data, err := c.readEphemeralPacket() if err != nil { - return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err) } defer c.recycleReadPacket() if c.isEOFPacket(data) { @@ -85,7 +86,7 @@ func (c *Conn) ExecuteStreamFetch(query string) (err error) { } else if isErrorPacket(data) { return ParseErrorPacket(data) } else { - return NewSQLError(CRCommandsOutOfSync, SSUnknownSQLState, "unexpected packet after fields: %v", data) + return sqlerror.NewSQLError(sqlerror.CRCommandsOutOfSync, sqlerror.SSUnknownSQLState, "unexpected packet after fields: %v", data) } } @@ -96,7 +97,7 @@ func (c *Conn) ExecuteStreamFetch(query string) (err error) { // Fields returns the fields for an ongoing streaming query. func (c *Conn) Fields() ([]*querypb.Field, error) { if c.fields == nil { - return nil, NewSQLError(CRCommandsOutOfSync, SSUnknownSQLState, "no streaming query in progress") + return nil, sqlerror.NewSQLError(sqlerror.CRCommandsOutOfSync, sqlerror.SSUnknownSQLState, "no streaming query in progress") } if len(c.fields) == 0 { // The query returned an empty field list. @@ -110,7 +111,7 @@ func (c *Conn) Fields() ([]*querypb.Field, error) { func (c *Conn) FetchNext(in []sqltypes.Value) ([]sqltypes.Value, error) { if c.fields == nil { // We are already done, and the result was closed. - return nil, NewSQLError(CRCommandsOutOfSync, SSUnknownSQLState, "no streaming query in progress") + return nil, sqlerror.NewSQLError(sqlerror.CRCommandsOutOfSync, sqlerror.SSUnknownSQLState, "no streaming query in progress") } if len(c.fields) == 0 { @@ -153,7 +154,7 @@ func (c *Conn) CloseResult() { func (c *Conn) ExecuteStreamLoadData(lines chan string, query string) (result *sqltypes.Result, err error) { defer func() { if err != nil { - if sqlerr, ok := err.(*SQLError); ok { + if sqlerr, ok := err.(*sqlerror.SQLError); ok { sqlerr.Query = query } } diff --git a/go/mysql/vault/auth_server_vault.go b/go/mysql/vault/auth_server_vault.go index 84ace52203e..f9e76647518 100644 --- a/go/mysql/vault/auth_server_vault.go +++ b/go/mysql/vault/auth_server_vault.go @@ -30,6 +30,8 @@ import ( vaultapi "github.com/aquarapid/vaultlib" "github.com/spf13/pflag" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/servenv" @@ -186,14 +188,14 @@ func (a *AuthServerVault) UserEntryWithHash(conn *mysql.Conn, salt []byte, user a.mu.Unlock() if !ok { - return &mysql.StaticUserData{}, mysql.NewSQLError(mysql.ERAccessDeniedError, mysql.SSAccessDeniedError, "Access denied for user '%v'", user) + return &mysql.StaticUserData{}, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } for _, entry := range userEntries { if entry.MysqlNativePassword != "" { hash, err := mysql.DecodeMysqlNativePasswordHex(entry.MysqlNativePassword) if err != nil { - return &mysql.StaticUserData{Username: entry.UserData, Groups: entry.Groups}, mysql.NewSQLError(mysql.ERAccessDeniedError, mysql.SSAccessDeniedError, "Access denied for user '%v'", user) + return &mysql.StaticUserData{Username: entry.UserData, Groups: entry.Groups}, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } isPass := mysql.VerifyHashedMysqlNativePassword(authResponse, salt, hash) if mysql.MatchSourceHost(remoteAddr, entry.SourceHost) && isPass { @@ -207,7 +209,7 @@ func (a *AuthServerVault) UserEntryWithHash(conn *mysql.Conn, salt []byte, user } } } - return &mysql.StaticUserData{}, mysql.NewSQLError(mysql.ERAccessDeniedError, mysql.SSAccessDeniedError, "Access denied for user '%v'", user) + return &mysql.StaticUserData{}, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } func (a *AuthServerVault) setTTLTicker(ttl time.Duration) { diff --git a/go/netutil/netutil.go b/go/netutil/netutil.go index 54e53e85226..fbac6e88424 100644 --- a/go/netutil/netutil.go +++ b/go/netutil/netutil.go @@ -29,10 +29,6 @@ import ( "time" ) -func init() { - rand.Seed(time.Now().UnixNano()) -} - // byPriorityWeight sorts records by ascending priority and weight. type byPriorityWeight []*net.SRV @@ -48,7 +44,7 @@ func (addrs byPriorityWeight) Less(i, j int) bool { // shuffleByWeight shuffles SRV records by weight using the algorithm // described in RFC 2782. // NOTE(msolo) This is disabled when the weights are zero. -func (addrs byPriorityWeight) shuffleByWeight() { +func (addrs byPriorityWeight) shuffleByWeight(rand *rand.Rand) { sum := 0 for _, addr := range addrs { sum += int(addr.Weight) @@ -72,21 +68,21 @@ func (addrs byPriorityWeight) shuffleByWeight() { } } -func (addrs byPriorityWeight) sortRfc2782() { +func (addrs byPriorityWeight) sortRfc2782(rand *rand.Rand) { sort.Sort(addrs) i := 0 for j := 1; j < len(addrs); j++ { if addrs[i].Priority != addrs[j].Priority { - addrs[i:j].shuffleByWeight() + addrs[i:j].shuffleByWeight(rand) i = j } } - addrs[i:].shuffleByWeight() + addrs[i:].shuffleByWeight(rand) } // SortRfc2782 reorders SRV records as specified in RFC 2782. func SortRfc2782(srvs []*net.SRV) { - byPriorityWeight(srvs).sortRfc2782() + byPriorityWeight(srvs).sortRfc2782(rand.New(rand.NewSource(time.Now().UTC().UnixNano()))) } // SplitHostPort is an alternative to net.SplitHostPort that also parses the diff --git a/go/netutil/netutil_test.go b/go/netutil/netutil_test.go index 574bda5f26b..b8cfc563acb 100644 --- a/go/netutil/netutil_test.go +++ b/go/netutil/netutil_test.go @@ -24,7 +24,7 @@ import ( "testing" ) -func checkDistribution(t *testing.T, data []*net.SRV, margin float64) { +func checkDistribution(t *testing.T, rand *rand.Rand, data []*net.SRV, margin float64) { sum := 0 for _, srv := range data { sum += int(srv.Weight) @@ -36,7 +36,7 @@ func checkDistribution(t *testing.T, data []*net.SRV, margin float64) { for j := 0; j < count; j++ { d := make([]*net.SRV, len(data)) copy(d, data) - byPriorityWeight(d).shuffleByWeight() + byPriorityWeight(d).shuffleByWeight(rand) key := d[0].Target results[key] = results[key] + 1 } @@ -54,12 +54,11 @@ func checkDistribution(t *testing.T, data []*net.SRV, margin float64) { } func testUniformity(t *testing.T, size int, margin float64) { - rand.Seed(1) data := make([]*net.SRV, size) for i := 0; i < size; i++ { data[i] = &net.SRV{Target: fmt.Sprintf("%c", 'a'+i), Weight: 1} } - checkDistribution(t, data, margin) + checkDistribution(t, rand.New(rand.NewSource(1)), data, margin) } func TestUniformity(t *testing.T) { @@ -70,13 +69,12 @@ func TestUniformity(t *testing.T) { } func testWeighting(t *testing.T, margin float64) { - rand.Seed(1) data := []*net.SRV{ {Target: "a", Weight: 60}, {Target: "b", Weight: 30}, {Target: "c", Weight: 10}, } - checkDistribution(t, data, margin) + checkDistribution(t, rand.New(rand.NewSource(1)), data, margin) } func TestWeighting(t *testing.T) { diff --git a/go/slices2/slices.go b/go/slice/slice.go similarity index 60% rename from go/slices2/slices.go rename to go/slice/slice.go index 69e9cdc62d4..0a8efd46194 100644 --- a/go/slices2/slices.go +++ b/go/slice/slice.go @@ -14,9 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package slices2 contains generic Slice helpers; +// Package slice contains generic Slice helpers; // Some of this code is sourced from https://github.com/luraim/fun (Apache v2) -package slices2 +package slice // All returns true if all elements return true for given predicate func All[T any](s []T, fn func(T) bool) bool { @@ -38,6 +38,7 @@ func Any[T any](s []T, fn func(T) bool) bool { return false } +// Map applies a function to each element of a slice and returns a new slice func Map[From, To any](in []From, f func(From) To) []To { if in == nil { return nil @@ -48,3 +49,32 @@ func Map[From, To any](in []From, f func(From) To) []To { } return result } + +// MapWithError applies a function to each element of a slice and returns a new slice, or an error +func MapWithError[From, To any](in []From, f func(From) (To, error)) (result []To, err error) { + if in == nil { + return nil, nil + } + result = make([]To, len(in)) + for i, col := range in { + result[i], err = f(col) + if err != nil { + return nil, err + } + } + return +} + +// Filter returns a new slice containing only the elements for which the predicate returns true +func Filter[T any](in []T, f func(T) bool) []T { + if in == nil { + return nil + } + result := make([]T, 0, len(in)) + for _, col := range in { + if f(col) { + result = append(result, col) + } + } + return result +} diff --git a/go/sqltypes/bind_variables.go b/go/sqltypes/bind_variables.go index 6a79919cf0c..041730ec517 100644 --- a/go/sqltypes/bind_variables.go +++ b/go/sqltypes/bind_variables.go @@ -40,6 +40,13 @@ var ( NullBindVariable = &querypb.BindVariable{Type: querypb.Type_NULL_TYPE} ) +func TupleToProto(v []Value) *querypb.Value { + return &querypb.Value{ + Type: querypb.Type_TUPLE, + Value: encodeTuple(v), + } +} + // ValueToProto converts Value to a *querypb.Value. func ValueToProto(v Value) *querypb.Value { return &querypb.Value{Type: v.typ, Value: v.val} diff --git a/go/sqltypes/bind_variables_test.go b/go/sqltypes/bind_variables_test.go index 40925d228a1..77b3381f751 100644 --- a/go/sqltypes/bind_variables_test.go +++ b/go/sqltypes/bind_variables_test.go @@ -18,28 +18,54 @@ package sqltypes import ( "fmt" - "reflect" "strings" "testing" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" querypb "vitess.io/vitess/go/vt/proto/query" ) +// TestProtoConversions checks coverting to and fro between querypb.Value and sqltypes.Value. func TestProtoConversions(t *testing.T) { - v := TestValue(Int64, "1") - got := ValueToProto(v) - want := &querypb.Value{Type: Int64, Value: []byte("1")} - if !proto.Equal(got, want) { - t.Errorf("ValueToProto: %v, want %v", got, want) + tcases := []struct { + name string + val Value + protoVal *querypb.Value + }{ + { + name: "integer value", + val: TestValue(Int64, "1"), + protoVal: &querypb.Value{Type: Int64, Value: []byte("1")}, + }, { + name: "tuple value", + val: TestTuple(TestValue(VarChar, "1"), TestValue(Int64, "3")), + }, { + name: "tuple of tuple as a value", + val: TestTuple( + TestTuple( + TestValue(VarChar, "1"), + TestValue(Int64, "3"), + ), + TestValue(Int64, "5"), + ), + }, } - gotback := ProtoToValue(got) - if !reflect.DeepEqual(gotback, v) { - t.Errorf("ProtoToValue: %v, want %v", gotback, v) + + for _, tcase := range tcases { + t.Run(tcase.name, func(t *testing.T) { + got := ValueToProto(tcase.val) + // If we have an expected protoVal, check that serialization matches. + // For nested tuples, we do not attempt to generate a protoVal, as it is binary data. + // We simply check that the roundtrip is correct. + if tcase.protoVal != nil { + require.True(t, proto.Equal(got, tcase.protoVal), "ValueToProto: %v, want %v", got, tcase.protoVal) + } + gotback := ProtoToValue(got) + require.EqualValues(t, tcase.val, gotback) + }) } } @@ -329,7 +355,7 @@ func TestValidateBindVarables(t *testing.T) { Value: []byte("a"), }, }, - err: `v: strconv.ParseInt: parsing "a": invalid syntax`, + err: `v: cannot parse int64 from "a"`, }, { in: map[string]*querypb.BindVariable{ "v": { @@ -340,7 +366,7 @@ func TestValidateBindVarables(t *testing.T) { }}, }, }, - err: `v: strconv.ParseInt: parsing "a": invalid syntax`, + err: `v: cannot parse int64 from "a"`, }} for _, tcase := range tcases { err := ValidateBindVariables(tcase.in) @@ -500,31 +526,31 @@ func TestValidateBindVariable(t *testing.T) { Type: querypb.Type_INT64, Value: []byte(InvalidNeg), }, - err: "out of range", + err: `cannot parse int64 from "-9223372036854775809": overflow`, }, { in: &querypb.BindVariable{ Type: querypb.Type_INT64, Value: []byte(InvalidPos), }, - err: "out of range", + err: `cannot parse int64 from "18446744073709551616": overflow`, }, { in: &querypb.BindVariable{ Type: querypb.Type_UINT64, Value: []byte("-1"), }, - err: "invalid syntax", + err: `cannot parse uint64 from "-1"`, }, { in: &querypb.BindVariable{ Type: querypb.Type_UINT64, Value: []byte(InvalidPos), }, - err: "out of range", + err: `cannot parse uint64 from "18446744073709551616": overflow`, }, { in: &querypb.BindVariable{ Type: querypb.Type_FLOAT64, Value: []byte("a"), }, - err: "invalid syntax", + err: `unparsed tail left after parsing float64 from "a"`, }, { in: &querypb.BindVariable{ Type: querypb.Type_EXPRESSION, diff --git a/go/sqltypes/cast.go b/go/sqltypes/cast.go new file mode 100644 index 00000000000..e97e47ea17c --- /dev/null +++ b/go/sqltypes/cast.go @@ -0,0 +1,54 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqltypes + +import ( + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +// Cast converts a Value to the target type. +func Cast(v Value, typ Type) (Value, error) { + if v.Type() == typ || v.IsNull() { + return v, nil + } + vBytes, err := v.ToBytes() + if err != nil { + return v, err + } + if IsSigned(typ) && v.IsSigned() { + return MakeTrusted(typ, vBytes), nil + } + if IsUnsigned(typ) && v.IsUnsigned() { + return MakeTrusted(typ, vBytes), nil + } + if (IsFloat(typ) || typ == Decimal) && (v.IsIntegral() || v.IsFloat() || v.Type() == Decimal) { + return MakeTrusted(typ, vBytes), nil + } + if IsQuoted(typ) && (v.IsIntegral() || v.IsFloat() || v.Type() == Decimal || v.IsQuoted()) { + return MakeTrusted(typ, vBytes), nil + } + + // Explicitly disallow Expression. + if v.Type() == Expression { + return NULL, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v cannot be cast to %v", v, typ) + } + + // If the above fast-paths were not possible, + // go through full validation. + return NewValue(typ, vBytes) +} diff --git a/go/sqltypes/cast_test.go b/go/sqltypes/cast_test.go new file mode 100644 index 00000000000..f2a7d24e88a --- /dev/null +++ b/go/sqltypes/cast_test.go @@ -0,0 +1,123 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqltypes + +import ( + "reflect" + "testing" + + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +func TestCast(t *testing.T) { + tcases := []struct { + typ Type + v Value + out Value + err error + }{{ + typ: VarChar, + v: NULL, + out: NULL, + }, { + typ: VarChar, + v: TestValue(VarChar, "exact types"), + out: TestValue(VarChar, "exact types"), + }, { + typ: Int64, + v: TestValue(Int32, "32"), + out: TestValue(Int64, "32"), + }, { + typ: Int24, + v: TestValue(Uint64, "64"), + out: TestValue(Int24, "64"), + }, { + typ: Int24, + v: TestValue(VarChar, "bad int"), + err: vterrors.New(vtrpcpb.Code_UNKNOWN, `cannot parse int64 from "bad int"`), + }, { + typ: Uint64, + v: TestValue(Uint32, "32"), + out: TestValue(Uint64, "32"), + }, { + typ: Uint24, + v: TestValue(Int64, "64"), + out: TestValue(Uint24, "64"), + }, { + typ: Uint24, + v: TestValue(Int64, "-1"), + err: vterrors.New(vtrpcpb.Code_UNKNOWN, `cannot parse uint64 from "-1"`), + }, { + typ: Float64, + v: TestValue(Int64, "64"), + out: TestValue(Float64, "64"), + }, { + typ: Float32, + v: TestValue(Float64, "64"), + out: TestValue(Float32, "64"), + }, { + typ: Float32, + v: TestValue(Decimal, "1.24"), + out: TestValue(Float32, "1.24"), + }, { + typ: Float64, + v: TestValue(VarChar, "1.25"), + out: TestValue(Float64, "1.25"), + }, { + typ: Float64, + v: TestValue(VarChar, "bad float"), + err: vterrors.New(vtrpcpb.Code_UNKNOWN, `unparsed tail left after parsing float64 from "bad float": "bad float"`), + }, { + typ: VarChar, + v: TestValue(Int64, "64"), + out: TestValue(VarChar, "64"), + }, { + typ: VarBinary, + v: TestValue(Float64, "64"), + out: TestValue(VarBinary, "64"), + }, { + typ: VarBinary, + v: TestValue(Decimal, "1.24"), + out: TestValue(VarBinary, "1.24"), + }, { + typ: VarBinary, + v: TestValue(VarChar, "1.25"), + out: TestValue(VarBinary, "1.25"), + }, { + typ: VarChar, + v: TestValue(VarBinary, "valid string"), + out: TestValue(VarChar, "valid string"), + }, { + typ: VarChar, + v: TestValue(Expression, "bad string"), + err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "expression cannot be converted to bytes"), + }} + for _, tcase := range tcases { + got, err := Cast(tcase.v, tcase.typ) + if !vterrors.Equals(err, tcase.err) { + t.Errorf("Cast(%v) error: %v, want %v", tcase.v, vterrors.Print(err), vterrors.Print(tcase.err)) + } + if tcase.err != nil { + continue + } + + if !reflect.DeepEqual(got, tcase.out) { + t.Errorf("Cast(%v): %v, want %v", tcase.v, got, tcase.out) + } + } +} diff --git a/go/sqltypes/marshal.go b/go/sqltypes/marshal.go new file mode 100644 index 00000000000..bbf43106110 --- /dev/null +++ b/go/sqltypes/marshal.go @@ -0,0 +1,484 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqltypes + +import ( + "fmt" + "reflect" + "strings" + "time" + + "vitess.io/vitess/go/protoutil" + "vitess.io/vitess/go/vt/vterrors" + + querypb "vitess.io/vitess/go/vt/proto/query" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/proto/vttime" +) + +// ResultMarshaller knows how to marshal itself into a Result. +type ResultMarshaller interface { + MarshalResult() (*Result, error) +} + +// ValueMarshaller knows how to marshal itself into the bytes for a column of +// a particular type. +type ValueMarshaller interface { + MarshalSQL(typ querypb.Type) ([]byte, error) +} + +// ReplaceFields remaps the fields and/or row columns of a given result. This +// is useful when you need to embed a struct to modify its marshalling behavior, +// then cleanup or otherwise transfer the redunant fields. +// For example: +/* +| uuid | tablet | retries | migration_uuid | $$tablet | +| abc | --- | 1 | abc | zone1-101 | + +=> becomes + +| migration_uuid | tablet | retries | +| abc | zone1-101 | 1 | +*/ +func ReplaceFields(result *Result, remap map[string]string) *Result { + var ( + // orig maps fieldname => original col (field and row) + orig = make(map[string]int, len(result.Fields)) + // fieldIdx maps final col (field) => fieldname + fieldIdx = make([]string, len(result.Fields)) + // rowIdx maps final col (row) => fieldname + rowIdx = make([]string, len(result.Fields)) + + // inverseRemap is the inverse of the remapping, so we know also if a + // field is the target of a rename + inverseRemap = make(map[string]string, len(remap)) + ) + + for i, field := range result.Fields { + orig[field.Name] = i + + if n, ok := remap[field.Name]; ok { + inverseRemap[n] = field.Name + } + } + + for i, field := range result.Fields { + if _, ok := inverseRemap[field.Name]; ok { + continue + } + + if newName, ok := remap[field.Name]; ok { + rowIdx[i] = newName + rowIdx[orig[newName]] = field.Name + + if strings.HasPrefix(field.Name, "$$") { + // Replace rows only; field stays unchanged. + fieldIdx[i] = field.Name + fieldIdx[orig[newName]] = newName + } else { + fieldIdx[i] = newName + fieldIdx[orig[newName]] = field.Name + } + } else { + fieldIdx[i] = field.Name + rowIdx[i] = field.Name + } + } + + var fields []*querypb.Field + for _, name := range fieldIdx { + fields = append(fields, result.Fields[orig[name]]) + } + + fields = fields[:len(result.Fields)-len(remap)] + + var rows []Row + for _, origRow := range result.Rows { + var row []Value + for _, name := range rowIdx { + row = append(row, origRow[orig[name]]) + } + + rows = append(rows, row[:len(fields)]) + } + + return &Result{ + Fields: fields, + Rows: rows, + } +} + +// MarshalResult marshals the object into a Result object. It is semi-complete. +func MarshalResult(v any) (*Result, error) { + if m, ok := v.(ResultMarshaller); ok { + return m.MarshalResult() + } + + val := reflect.ValueOf(v) + if val.Type().Kind() != reflect.Slice { + vals := reflect.Append( + reflect.MakeSlice(reflect.SliceOf(val.Type()), 0, 1), + val, + ) + return MarshalResult(vals.Interface()) + } + + // Value of the slice element. + // TODO: handle other cases; We're assuming it's a pointer to a struct + elem := val.Type().Elem() + elemType := elem.Elem() + + var ( + exportedStructFields []reflect.StructField + fields []*querypb.Field + rows []Row + ) + + for _, field := range reflect.VisibleFields(elemType) { + if !field.IsExported() { + continue + } + + // Anonymous fields are redundant. For example, consider the following: + // + // type T1 struct { Foo string } + // type T2 struct { *T1; Bar string } + // + // If we did not skip Anonymous fields, marshalling T2 would result in + // the following "fields": + // | t1 | foo | bar | + // + // Skipping Anonymous fields results in the correct set: + // | foo | bar | + // + // From the VisibleFields documentation: + // > The returned fields include fields inside anonymous struct members + // > and unexported fields. They follow the same order found in the + // > struct, with anonymous fields followed immediately by their + // > promoted fields. + if field.Anonymous { + continue + } + + exportedStructFields = append(exportedStructFields, field) + sqlField, err := structToQueryField(field) + if err != nil { + return nil, err + } + fields = append(fields, sqlField) + } + + for i := 0; i < val.Len(); i++ { + // TODO: handle case where val is a slice of non-pointer objects. + v := val.Index(i).Elem() + row, err := marshalRow(v, fields, exportedStructFields) + if err != nil { + return nil, err + } + + rows = append(rows, row) + } + + return &Result{ + Fields: fields, + Rows: rows, + }, nil +} + +func marshalRow(val reflect.Value, sqlFields []*querypb.Field, structFields []reflect.StructField) (Row, error) { + var row Row + for i, structField := range structFields { + var ( + sqlField = sqlFields[i] + + sqlVal Value + err error + ) + if f := val.FieldByName(structField.Name); f.IsValid() { + sqlVal, err = structToQueryValue(f.Interface(), structField, sqlField.Type) + if err != nil { + return nil, err + } + } else { + sqlVal = NULL + } + + row = append(row, sqlVal) + } + + return row, nil +} + +func structToQueryField(field reflect.StructField) (*querypb.Field, error) { + name := field.Name + parts := strings.SplitN(field.Tag.Get("sqltypes"), ",", 3) + for len(parts) < 3 { + parts = append(parts, "") + } + + if parts[0] != "" { + name = parts[0] + } + + typ, err := fieldType(field) + if err != nil { + return nil, err + } + + return &querypb.Field{ + Name: snakeCase(name), + Type: typ, + }, nil +} + +func fieldType(field reflect.StructField) (querypb.Type, error) { + var err error + typeName := field.Type.String() + switch field.Type.Kind() { + case reflect.Pointer: + ptr := field.Type.Elem() + switch ptr.Kind() { + case reflect.Struct: + switch ptr.PkgPath() { + case "vitess.io/vitess/go/vt/proto/vttime": + switch ptr.Name() { + case "Time": + typeName = "timestamp" + case "Duration": + typeName = "varchar" + default: + // Impossible unless we add a new type to vttime.proto and + // forget to update this function. + err = vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unknown vttime proto message %s", ptr.Name()) + } + case "time": + switch ptr.Name() { + case "Time": + typeName = "timestamp" + case "Duration": + typeName = "varchar" + default: + err = vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unknown time type %s", ptr.Name()) + } + } + default: + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unsupported pointer type %v", ptr.Kind()) + } + case reflect.Struct: + switch field.Type.PkgPath() { + case "vitess.io/vitess/go/vt/proto/vttime": + switch field.Type.Name() { + case "Time": + typeName = "timestamp" + case "Duration": + typeName = "varchar" + default: + // Impossible unless we add a new type to vttime.proto and + // forget to update this function. + err = vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unknown vttime proto message %s", field.Type.Name()) + } + case "time": + switch field.Type.Name() { + case "Time": + typeName = "timestamp" + case "Duration": + typeName = "varchar" + default: + err = vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unknown time type %s", field.Type.Name()) + } + } + case reflect.Int: + typeName = "int64" + case reflect.Uint: + typeName = "uint64" + case reflect.String: + typeName = "varchar" + case reflect.Slice: + elem := field.Type.Elem() + switch elem.Kind() { + case reflect.Uint8: + typeName = "varbinary" + default: + err = vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unsupported field type %v", field.Type.Kind()) + } + } + + if err != nil { + return 0, err + } + + return querypb.Type(querypb.Type_value[strings.ToUpper(typeName)]), nil +} + +func structToQueryValue(value any, field reflect.StructField, typ querypb.Type) (Value, error) { + if v, ok := value.(ValueMarshaller); ok { + col, err := v.MarshalSQL(typ) + if err != nil { + return Value{}, err + } + + return MakeTrusted(typ, col), nil + } + + switch typ { + case querypb.Type_UINT8: + if v, ok := value.(bool); ok { + return NewBoolean(v), nil + } else if v, ok := value.(uint8); ok { + return NewUint8(v), nil + } else { + return Value{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v (%T) is not uint8 or bool", value, value) + } + case querypb.Type_UINT16: + if v, ok := value.(uint16); ok { + return NewUint16(v), nil + } else { + return Value{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v (%T) is not uint16", value, value) + } + case querypb.Type_UINT32: + if v, ok := value.(uint32); ok { + return NewUint32(v), nil + } else { + return Value{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v (%T) is not uint32", value, value) + } + case querypb.Type_UINT64: + switch v := value.(type) { + case uint64: + return NewUint64(v), nil + case uint: + return NewUint64(uint64(v)), nil + default: + return Value{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v (%T) is not uint64", value, value) + } + case querypb.Type_INT8: + if v, ok := value.(int8); ok { + return NewInt8(v), nil + } else { + return Value{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v (%T) is not int8", value, value) + } + case querypb.Type_INT16: + if v, ok := value.(int16); ok { + return NewInt16(v), nil + } else { + return Value{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v (%T) is not int16", value, value) + } + case querypb.Type_INT32: + if v, ok := value.(int32); ok { + return NewInt32(v), nil + } else { + return Value{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v (%T) is not int32", value, value) + } + case querypb.Type_INT64: + switch v := value.(type) { + case int64: + return NewInt64(v), nil + case int: + return NewInt64(int64(v)), nil + default: + return Value{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v (%T) is not int64", value, value) + } + case querypb.Type_FLOAT32: + if v, ok := value.(float32); ok { + return NewFloat32(v), nil + } else { + return Value{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v (%T) is not float32", value, value) + } + case querypb.Type_FLOAT64: + if v, ok := value.(float64); ok { + return NewFloat64(v), nil + } else { + return Value{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v (%T) is not float64", value, value) + } + case querypb.Type_VARCHAR, querypb.Type_VARBINARY: + var s string + if v, ok := value.(fmt.Stringer); ok { + s = v.String() + } else if v, ok := value.(string); ok { + s = v + } else { + return Value{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v (%T) is not string-like", value, value) + } + + if typ == querypb.Type_VARBINARY { + return NewVarBinary(s), nil + } + + return NewVarChar(s), nil + case querypb.Type_TIMESTAMP: + var s string + switch v := value.(type) { // TODO: support overrides for other timestamp formats + case *time.Time: + if v == nil { + return NULL, nil + } + + s = v.Format(TimestampFormat) + case time.Time: + s = v.Format(TimestampFormat) + case *vttime.Time: + if v == nil { + return NULL, nil + } + + s = protoutil.TimeFromProto(v).Format(TimestampFormat) + case vttime.Time: + s = protoutil.TimeFromProto(&v).Format(TimestampFormat) + case string: + s = v + default: + _s, ok := value.(string) + if !ok { + return Value{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v (%T) is not time or string-like", value, value) + } + + s = _s + } + + return NewTimestamp(s), nil + case querypb.Type_NULL_TYPE: + return NewValue(Null, nil) + } + + return Value{}, vterrors.Errorf(0, "unsupported query field type %s", strings.ToLower(querypb.Type_name[int32(typ)])) +} + +func snakeCase(s string) string { + var ( + buf strings.Builder + start = true + lower = strings.ToLower(s) + ) + + /* + Foo => foo + FooBar => foo_bar + */ + for i, c := range s { + // `c` is an uppercase letter + if byte(c) != lower[i] { + if !start { + buf.WriteByte('_') + } + + start = false + } + + buf.WriteByte(lower[i]) + } + + return buf.String() +} diff --git a/go/sqltypes/marshal_test.go b/go/sqltypes/marshal_test.go new file mode 100644 index 00000000000..e8e62018456 --- /dev/null +++ b/go/sqltypes/marshal_test.go @@ -0,0 +1,115 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqltypes + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/topo/topoproto" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" +) + +type T1 struct { + Name string + Age int + Tablet *topodatapb.TabletAlias + AddedAt time.Time + Period time.Duration +} + +type T2 T1 + +func (t2 *T2) MarshalResult() (*Result, error) { + tmp := struct { + *T1 + Tablet_ string `sqltypes:"$$tablet"` + AddedTimestamp time.Time + PeriodSeconds int + }{ + T1: (*T1)(t2), + Tablet_: topoproto.TabletAliasString(t2.Tablet), + AddedTimestamp: t2.AddedAt, + PeriodSeconds: int(t2.Period.Seconds()), + } + + res, err := MarshalResult(&tmp) + if err != nil { + return nil, err + } + + return ReplaceFields(res, map[string]string{ + // Replace `period`/'added_at` field and column values. + "period": "period_seconds", + "added_at": "added_timestamp", + // Replace `tablet` column values only. + "$$tablet": "tablet", + }), nil +} + +func TestMarshalResult(t *testing.T) { + t.Parallel() + + now := time.Now() + t1 := &T1{ + Name: "test", + Age: 10, + Tablet: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + AddedAt: now, + Period: time.Minute, + } + + r, err := MarshalResult((*T2)(t1)) + require.NoError(t, err) + + row := r.Named().Rows[0] + + assert.Equal(t, "test", row.AsString("name", "")) + assert.Equal(t, int64(10), row.AsInt64("age", 0)) + assert.Equal(t, "zone1-0000000100", row.AsString("tablet", "")) + assert.Equal(t, now.Format(TimestampFormat), row.AsString("added_timestamp", "")) + assert.Equal(t, int64(60), row.AsInt64("period_seconds", 0)) + + // fields we renamed/remapped are not present + assert.Empty(t, row.AsString("$$tablet", "")) + assert.Empty(t, row.AsString("added_at", "")) + assert.Empty(t, row.AsString("period", "")) +} + +func TestSnakeCase(t *testing.T) { + t.Parallel() + + tests := []struct { + in, out string + }{ + {"Foo", "foo"}, + {"FooBar", "foo_bar"}, + } + + for _, test := range tests { + t.Run(test.in, func(t *testing.T) { + assert.Equal(t, test.out, snakeCase(test.in)) + }) + } +} diff --git a/go/sqltypes/result.go b/go/sqltypes/result.go index 80952598ec9..7c04e1d89fa 100644 --- a/go/sqltypes/result.go +++ b/go/sqltypes/result.go @@ -99,7 +99,7 @@ func (result *Result) Copy() *Result { if result.Fields != nil { out.Fields = make([]*querypb.Field, len(result.Fields)) for i, f := range result.Fields { - out.Fields[i] = proto.Clone(f).(*querypb.Field) + out.Fields[i] = f.CloneVT() } } if result.Rows != nil { diff --git a/go/sqltypes/testing.go b/go/sqltypes/testing.go index 50daed076a0..9042acf6680 100644 --- a/go/sqltypes/testing.go +++ b/go/sqltypes/testing.go @@ -18,8 +18,14 @@ package sqltypes import ( "bytes" + crand "crypto/rand" + "encoding/base64" + "encoding/hex" "fmt" + "math/rand" + "strconv" "strings" + "time" querypb "vitess.io/vitess/go/vt/proto/query" ) @@ -72,6 +78,7 @@ func MakeTestResult(fields []*querypb.Field, rows ...string) *Result { result.Rows[i] = make([]Value, len(fields)) for j, col := range split(row) { if col == "null" { + result.Rows[i][j] = NULL continue } result.Rows[i][j] = MakeTrusted(fields[j].Type, []byte(col)) @@ -136,6 +143,15 @@ func TestValue(typ querypb.Type, val string) Value { return MakeTrusted(typ, []byte(val)) } +// TestTuple builds a tuple Value from a list of Values. +// This function should only be used for testing. +func TestTuple(vals ...Value) Value { + return Value{ + typ: Tuple, + val: encodeTuple(vals), + } +} + // PrintResults prints []*Results into a string. // This function should only be used for testing. func PrintResults(results []*Result) string { @@ -153,3 +169,124 @@ func PrintResults(results []*Result) string { func split(str string) []string { return strings.Split(str, "|") } + +func TestRandomValues() (Value, Value) { + if rand.Int()%2 == 0 { + // create a single value, and turn it into two different types + v := rand.Int() + return randomNumericType(v), randomNumericType(v) + } + + // just produce two arbitrary random values and compare + return randomNumericType(rand.Int()), randomNumericType(rand.Int()) +} + +func randomNumericType(i int) Value { + r := rand.Intn(len(numericTypes)) + return numericTypes[r](i) +} + +var numericTypes = []func(int) Value{ + func(i int) Value { return NULL }, + func(i int) Value { return NewInt8(int8(i)) }, + func(i int) Value { return NewInt32(int32(i)) }, + func(i int) Value { return NewInt64(int64(i)) }, + func(i int) Value { return NewUint64(uint64(i)) }, + func(i int) Value { return NewUint32(uint32(i)) }, + func(i int) Value { return NewFloat64(float64(i)) }, + func(i int) Value { return NewDecimal(fmt.Sprintf("%d", i)) }, + func(i int) Value { return NewVarChar(fmt.Sprintf("%d", i)) }, + func(i int) Value { return NewVarChar(fmt.Sprintf(" %f aa", float64(i))) }, +} + +type RandomGenerator func() Value + +func randomBytes() []byte { + b := make([]byte, rand.Intn(128)) + _, _ = crand.Read(b) + return b +} + +var RandomGenerators = map[Type]RandomGenerator{ + Null: func() Value { + return NULL + }, + Int8: func() Value { + return NewInt8(int8(rand.Intn(255))) + }, + Int32: func() Value { + return NewInt32(rand.Int31()) + }, + Int64: func() Value { + return NewInt64(rand.Int63()) + }, + Uint32: func() Value { + return NewUint32(rand.Uint32()) + }, + Uint64: func() Value { + return NewUint64(rand.Uint64()) + }, + Float64: func() Value { + return NewFloat64(rand.ExpFloat64()) + }, + Decimal: func() Value { + dec := fmt.Sprintf("%d.%d", rand.Intn(9999999999), rand.Intn(9999999999)) + if rand.Int()&0x1 == 1 { + dec = "-" + dec + } + return NewDecimal(dec) + }, + VarChar: func() Value { + return NewVarChar(base64.StdEncoding.EncodeToString(randomBytes())) + }, + VarBinary: func() Value { + return NewVarBinary(string(randomBytes())) + }, + Date: func() Value { + return NewDate(randTime().Format(time.DateOnly)) + }, + Datetime: func() Value { + return NewDatetime(randTime().Format(time.DateTime)) + }, + Timestamp: func() Value { + return NewTimestamp(randTime().Format(time.DateTime)) + }, + Time: func() Value { + return NewTime(randTime().Format(time.TimeOnly)) + }, + TypeJSON: func() Value { + var j string + switch rand.Intn(6) { + case 0: + j = "null" + case 1: + i := rand.Int63() + if rand.Int()&0x1 == 1 { + i = -i + } + j = strconv.FormatInt(i, 10) + case 2: + j = strconv.FormatFloat(rand.NormFloat64(), 'g', -1, 64) + case 3: + j = strconv.Quote(hex.EncodeToString(randomBytes())) + case 4: + j = "true" + case 5: + j = "false" + } + v, err := NewJSON(j) + if err != nil { + panic(err) + } + return v + }, +} + +func randTime() time.Time { + min := time.Date(1970, 1, 0, 0, 0, 0, 0, time.UTC).Unix() + max := time.Date(2070, 1, 0, 0, 0, 0, 0, time.UTC).Unix() + delta := max - min + + sec := rand.Int63n(delta) + min + return time.Unix(sec, 0) +} diff --git a/go/sqltypes/value.go b/go/sqltypes/value.go index d4a017798f0..331c494710e 100644 --- a/go/sqltypes/value.go +++ b/go/sqltypes/value.go @@ -27,11 +27,14 @@ import ( "strconv" "strings" + "google.golang.org/protobuf/encoding/protowire" + "vitess.io/vitess/go/bytes2" "vitess.io/vitess/go/hack" - + "vitess.io/vitess/go/mysql/decimal" + "vitess.io/vitess/go/mysql/fastparse" + "vitess.io/vitess/go/mysql/format" querypb "vitess.io/vitess/go/vt/proto/query" - "vitess.io/vitess/go/vt/proto/vtrpc" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" ) @@ -74,17 +77,22 @@ type ( func NewValue(typ querypb.Type, val []byte) (v Value, err error) { switch { case IsSigned(typ): - if _, err := strconv.ParseInt(string(val), 10, 64); err != nil { + if _, err := fastparse.ParseInt64(hack.String(val), 10); err != nil { return NULL, err } return MakeTrusted(typ, val), nil case IsUnsigned(typ): - if _, err := strconv.ParseUint(string(val), 10, 64); err != nil { + if _, err := fastparse.ParseUint64(hack.String(val), 10); err != nil { + return NULL, err + } + return MakeTrusted(typ, val), nil + case IsFloat(typ): + if _, err := fastparse.ParseFloat64(hack.String(val)); err != nil { return NULL, err } return MakeTrusted(typ, val), nil - case IsFloat(typ) || typ == Decimal: - if _, err := strconv.ParseFloat(string(val), 64); err != nil { + case IsDecimal(typ): + if _, err := decimal.NewFromMySQL(val); err != nil { return NULL, err } return MakeTrusted(typ, val), nil @@ -103,11 +111,9 @@ func NewValue(typ querypb.Type, val []byte) (v Value, err error) { // comments. Other packages can also use the function to create // VarBinary or VarChar values. func MakeTrusted(typ querypb.Type, val []byte) Value { - if typ == Null { return NULL } - return Value{typ: typ, val: val} } @@ -141,6 +147,11 @@ func NewInt32(v int32) Value { return MakeTrusted(Int32, strconv.AppendInt(nil, int64(v), 10)) } +// NewInt16 builds a Int16 Value. +func NewInt16(v int16) Value { + return MakeTrusted(Int16, strconv.AppendInt(nil, int64(v), 10)) +} + // NewUint64 builds an Uint64 Value. func NewUint64(v uint64) Value { return MakeTrusted(Uint64, strconv.AppendUint(nil, v, 10)) @@ -151,9 +162,29 @@ func NewUint32(v uint32) Value { return MakeTrusted(Uint32, strconv.AppendUint(nil, uint64(v), 10)) } +// NewUint16 builds a Uint16 Value. +func NewUint16(v uint16) Value { + return MakeTrusted(Uint16, strconv.AppendUint(nil, uint64(v), 10)) +} + +// NewUint8 builds a Uint8 Value. +func NewUint8(v uint8) Value { + return MakeTrusted(Uint8, strconv.AppendUint(nil, uint64(v), 10)) +} + +// NewBoolean builds a Uint8 Value from a boolean. +func NewBoolean(v bool) Value { + return MakeTrusted(Uint8, strconv.AppendBool(nil, v)) +} + // NewFloat64 builds an Float64 Value. func NewFloat64(v float64) Value { - return MakeTrusted(Float64, strconv.AppendFloat(nil, v, 'g', -1, 64)) + return MakeTrusted(Float64, format.FormatFloat(v)) +} + +// NewFloat32 builds a Float32 Value. +func NewFloat32(v float32) Value { + return MakeTrusted(Float32, format.FormatFloat(float64(v))) } // NewVarChar builds a VarChar Value. @@ -286,7 +317,12 @@ func (v Value) ToInt64() (int64, error) { return 0, ErrIncompatibleTypeCast } - return strconv.ParseInt(v.RawStr(), 10, 64) + return fastparse.ParseInt64(v.RawStr(), 10) +} + +// ToCastInt64 returns the best effort value as MySQL would return it as a int64. +func (v Value) ToCastInt64() (int64, error) { + return fastparse.ParseInt64(v.RawStr(), 10) } func (v Value) ToInt32() (int32, error) { @@ -313,7 +349,7 @@ func (v Value) ToFloat64() (float64, error) { return 0, ErrIncompatibleTypeCast } - return strconv.ParseFloat(v.RawStr(), 64) + return fastparse.ParseFloat64(v.RawStr()) } // ToUint16 returns the value as MySQL would return it as a uint16. @@ -332,7 +368,12 @@ func (v Value) ToUint64() (uint64, error) { return 0, ErrIncompatibleTypeCast } - return strconv.ParseUint(v.RawStr(), 10, 64) + return fastparse.ParseUint64(v.RawStr(), 10) +} + +// ToCastUint64 returns the best effort value as MySQL would return it as a uint64. +func (v Value) ToCastUint64() (uint64, error) { + return fastparse.ParseUint64(v.RawStr(), 10) } func (v Value) ToUint32() (uint32, error) { @@ -403,6 +444,17 @@ func (v Value) EncodeSQLStringBuilder(b *strings.Builder) { encodeBytesSQLStringBuilder(v.val, b) case v.typ == Bit: encodeBytesSQLBits(v.val, b) + case v.typ == Tuple: + b.WriteByte('(') + var i int + _ = v.ForEachValue(func(bv Value) { + if i > 0 { + b.WriteString(", ") + } + bv.EncodeSQLStringBuilder(b) + i++ + }) + b.WriteByte(')') default: b.Write(v.val) } @@ -480,6 +532,16 @@ func (v Value) IsDateTime() bool { return v.typ == querypb.Type_DATETIME } +// IsTimestamp returns true if Value is date. +func (v Value) IsTimestamp() bool { + return v.typ == querypb.Type_TIMESTAMP +} + +// IsDate returns true if Value is date. +func (v Value) IsDate() bool { + return v.typ == querypb.Type_DATE +} + // IsTime returns true if Value is time. func (v Value) IsTime() bool { return v.typ == querypb.Type_TIME @@ -550,7 +612,7 @@ func (v *Value) UnmarshalJSON(b []byte) error { // an INSERT was performed with x'A1' having been specified as a value func (v *Value) decodeHexVal() ([]byte, error) { if len(v.val) < 3 || (v.val[0] != 'x' && v.val[0] != 'X') || v.val[1] != '\'' || v.val[len(v.val)-1] != '\'' { - return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid hex value: %v", v.val) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid hex value: %v", v.val) } hexBytes := v.val[2 : len(v.val)-1] decodedHexBytes, err := hex.DecodeString(string(hexBytes)) @@ -565,7 +627,7 @@ func (v *Value) decodeHexVal() ([]byte, error) { // an INSERT was performed with 0xA1 having been specified as a value func (v *Value) decodeHexNum() ([]byte, error) { if len(v.val) < 3 || v.val[0] != '0' || v.val[1] != 'x' { - return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid hex number: %v", v.val) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid hex number: %v", v.val) } hexBytes := v.val[2:] decodedHexBytes, err := hex.DecodeString(string(hexBytes)) @@ -580,16 +642,61 @@ func (v *Value) decodeHexNum() ([]byte, error) { // an INSERT was performed with 0x5 having been specified as a value func (v *Value) decodeBitNum() ([]byte, error) { if len(v.val) < 3 || v.val[0] != '0' || v.val[1] != 'b' { - return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid bit number: %v", v.val) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid bit number: %v", v.val) } var i big.Int _, ok := i.SetString(string(v.val), 0) if !ok { - return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid bit number: %v", v.val) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid bit number: %v", v.val) } return i.Bytes(), nil } +var ErrBadTupleEncoding = errors.New("bad tuple encoding in sqltypes.Value") + +func encodeTuple(tuple []Value) []byte { + var total int + for _, v := range tuple { + total += len(v.val) + 3 + } + + buf := make([]byte, 0, total) + for _, v := range tuple { + buf = protowire.AppendVarint(buf, uint64(v.typ)) + buf = protowire.AppendVarint(buf, uint64(len(v.val))) + buf = append(buf, v.val...) + } + return buf +} + +func (v *Value) ForEachValue(each func(bv Value)) error { + if v.typ != Tuple { + panic("Value.ForEachValue on non-tuple") + } + + var sz, ty uint64 + var varlen int + buf := v.val + for len(buf) > 0 { + ty, varlen = protowire.ConsumeVarint(buf) + if varlen < 0 { + return ErrBadTupleEncoding + } + + buf = buf[varlen:] + sz, varlen = protowire.ConsumeVarint(buf) + if varlen < 0 { + return ErrBadTupleEncoding + } + + buf = buf[varlen:] + each(Value{val: buf[:sz], typ: Type(ty)}) + + buf = buf[sz:] + } + return nil +} + func encodeBytesSQL(val []byte, b BinWriter) { buf := &bytes2.Buffer{} encodeBytesSQLBytes2(val, buf) diff --git a/go/sqltypes/value_test.go b/go/sqltypes/value_test.go index 82aea752480..86c751f3d0d 100644 --- a/go/sqltypes/value_test.go +++ b/go/sqltypes/value_test.go @@ -165,23 +165,23 @@ func TestNewValue(t *testing.T) { }, { inType: Int64, inVal: InvalidNeg, - outErr: "out of range", + outErr: `cannot parse int64 from "-9223372036854775809": overflow`, }, { inType: Int64, inVal: InvalidPos, - outErr: "out of range", + outErr: `cannot parse int64 from "18446744073709551616": overflow`, }, { inType: Uint64, inVal: "-1", - outErr: "invalid syntax", + outErr: `cannot parse uint64 from "-1"`, }, { inType: Uint64, inVal: InvalidPos, - outErr: "out of range", + outErr: `cannot parse uint64 from "18446744073709551616": overflow`, }, { inType: Float64, inVal: "a", - outErr: "invalid syntax", + outErr: `unparsed tail left after parsing float64 from "a"`, }, { inType: Expression, inVal: "a", diff --git a/go/stats/counter_test.go b/go/stats/counter_test.go index e4153f5bc33..f290dc733d7 100644 --- a/go/stats/counter_test.go +++ b/go/stats/counter_test.go @@ -26,7 +26,7 @@ import ( func TestCounter(t *testing.T) { var gotname string var gotv *Counter - clear() + clearStats() Register(func(name string, v expvar.Var) { gotname = name gotv = v.(*Counter) @@ -54,7 +54,7 @@ func TestCounter(t *testing.T) { func TestGaugeFunc(t *testing.T) { var gotname string var gotv *GaugeFunc - clear() + clearStats() Register(func(name string, v expvar.Var) { gotname = name gotv = v.(*GaugeFunc) @@ -77,7 +77,7 @@ func TestGaugeFunc(t *testing.T) { func TestGaugeFloat64(t *testing.T) { var gotname string var gotv *GaugeFloat64 - clear() + clearStats() Register(func(name string, v expvar.Var) { gotname = name gotv = v.(*GaugeFloat64) diff --git a/go/stats/counters.go b/go/stats/counters.go index f144c0ce3dd..e79da39c48b 100644 --- a/go/stats/counters.go +++ b/go/stats/counters.go @@ -70,9 +70,7 @@ func (c *counters) ZeroAll() { c.mu.Lock() defer c.mu.Unlock() - for k := range c.counts { - c.counts[k] = 0 - } + clear(c.counts) } // Counts returns a copy of the Counters' map. @@ -323,6 +321,29 @@ func (g *GaugesWithSingleLabel) Set(name string, value int64) { g.counters.set(name, value) } +// SyncGaugesWithSingleLabel is a GaugesWithSingleLabel that proactively pushes +// stats to push-based backends when Set is called. +type SyncGaugesWithSingleLabel struct { + GaugesWithSingleLabel + name string +} + +// NewSyncGaugesWithSingleLabel creates a new SyncGaugesWithSingleLabel. +func NewSyncGaugesWithSingleLabel(name, help, label string, tags ...string) *SyncGaugesWithSingleLabel { + return &SyncGaugesWithSingleLabel{ + GaugesWithSingleLabel: *NewGaugesWithSingleLabel(name, help, label, tags...), + name: name, + } +} + +// Set sets the value of a named gauge. +func (sg *SyncGaugesWithSingleLabel) Set(name string, value int64) { + sg.GaugesWithSingleLabel.Set(name, value) + if sg.name != "" { + _ = pushOne(sg.name, &sg.GaugesWithSingleLabel) + } +} + // GaugesWithMultiLabels is a CountersWithMultiLabels implementation where // the values can go up and down. type GaugesWithMultiLabels struct { @@ -347,6 +368,11 @@ func NewGaugesWithMultiLabels(name, help string, labels []string) *GaugesWithMul return t } +// GetLabelName returns a label name using the provided values. +func (mg *GaugesWithMultiLabels) GetLabelName(names ...string) string { + return safeJoinLabels(names, nil) +} + // Set sets the value of a named counter. // len(names) must be equal to len(Labels). func (mg *GaugesWithMultiLabels) Set(names []string, value int64) { @@ -356,6 +382,17 @@ func (mg *GaugesWithMultiLabels) Set(names []string, value int64) { mg.counters.set(safeJoinLabels(names, nil), value) } +// ResetKey resets a specific key. +// +// It is the equivalent of `Reset(names)` except that it expects the key to +// be obtained from the internal counters map. +// +// This is useful when you range over all internal counts and you want to reset +// specific keys. +func (mg *GaugesWithMultiLabels) ResetKey(key string) { + mg.counters.set(key, 0) +} + // GaugesFuncWithMultiLabels is a wrapper around CountersFuncWithMultiLabels // for values that go up/down for implementations (like Prometheus) that // need to differ between Counters and Gauges. diff --git a/go/stats/counters_test.go b/go/stats/counters_test.go index d3be6ccf02f..22d6e769d3d 100644 --- a/go/stats/counters_test.go +++ b/go/stats/counters_test.go @@ -29,7 +29,7 @@ import ( ) func TestCounters(t *testing.T) { - clear() + clearStats() c := NewCountersWithSingleLabel("counter1", "help", "label") c.Add("c1", 1) c.Add("c2", 1) @@ -49,7 +49,7 @@ func TestCounters(t *testing.T) { } func TestCountersTags(t *testing.T) { - clear() + clearStats() c := NewCountersWithSingleLabel("counterTag1", "help", "label") want := map[string]int64{} got := c.Counts() @@ -66,7 +66,7 @@ func TestCountersTags(t *testing.T) { } func TestMultiCounters(t *testing.T) { - clear() + clearStats() c := NewCountersWithMultiLabels("mapCounter1", "help", []string{"aaa", "bbb"}) c.Add([]string{"c1a", "c1b"}, 1) c.Add([]string{"c2a", "c2b"}, 1) @@ -95,7 +95,7 @@ func TestMultiCounters(t *testing.T) { } func TestMultiCountersDot(t *testing.T) { - clear() + clearStats() c := NewCountersWithMultiLabels("mapCounter2", "help", []string{"aaa", "bbb"}) c.Add([]string{"c1.a", "c1b"}, 1) c.Add([]string{"c2a", "c2.b"}, 1) @@ -121,7 +121,7 @@ func TestMultiCountersDot(t *testing.T) { func TestCountersHook(t *testing.T) { var gotname string var gotv *CountersWithSingleLabel - clear() + clearStats() Register(func(name string, v expvar.Var) { gotname = name gotv = v.(*CountersWithSingleLabel) @@ -139,7 +139,7 @@ func TestCountersHook(t *testing.T) { var benchCounter = NewCountersWithSingleLabel("bench", "help", "label") func BenchmarkCounters(b *testing.B) { - clear() + clearStats() benchCounter.Add("c1", 1) b.ResetTimer() @@ -153,7 +153,7 @@ func BenchmarkCounters(b *testing.B) { var benchMultiCounter = NewCountersWithMultiLabels("benchMulti", "help", []string{"call", "keyspace", "dbtype"}) func BenchmarkMultiCounters(b *testing.B) { - clear() + clearStats() key := []string{"execute-key-ranges", "keyspacename", "replica"} benchMultiCounter.Add(key, 1) b.ResetTimer() @@ -169,7 +169,7 @@ func BenchmarkCountersTailLatency(b *testing.B) { // For this one, ignore the time reported by 'go test'. // The 99th Percentile log line is all that matters. // (Cmd: go test -bench=BenchmarkCountersTailLatency -benchtime=30s -cpu=10) - clear() + clearStats() benchCounter.Add("c1", 1) c := make(chan time.Duration, 100) done := make(chan struct{}) @@ -208,7 +208,7 @@ func BenchmarkCountersTailLatency(b *testing.B) { } func TestCountersFuncWithMultiLabels(t *testing.T) { - clear() + clearStats() f := NewCountersFuncWithMultiLabels("TestCountersFuncWithMultiLabels", "help", []string{"label1"}, func() map[string]int64 { return map[string]int64{ "c1": 1, @@ -226,7 +226,7 @@ func TestCountersFuncWithMultiLabels(t *testing.T) { func TestCountersFuncWithMultiLabels_Hook(t *testing.T) { var gotname string var gotv *CountersFuncWithMultiLabels - clear() + clearStats() Register(func(name string, v expvar.Var) { gotname = name gotv = v.(*CountersFuncWithMultiLabels) @@ -244,13 +244,13 @@ func TestCountersFuncWithMultiLabels_Hook(t *testing.T) { } func TestCountersCombineDimension(t *testing.T) { - clear() + clearStats() // Empty labels shouldn't be combined. c0 := NewCountersWithSingleLabel("counter_combine_dim0", "help", "") c0.Add("c1", 1) assert.Equal(t, `{"c1": 1}`, c0.String()) - clear() + clearStats() combineDimensions = "a,c" c1 := NewCountersWithSingleLabel("counter_combine_dim1", "help", "label") diff --git a/go/stats/duration_test.go b/go/stats/duration_test.go index cabc79ae77a..b1aeb0cd1f5 100644 --- a/go/stats/duration_test.go +++ b/go/stats/duration_test.go @@ -25,7 +25,7 @@ import ( func TestCounterDuration(t *testing.T) { var gotname string var gotv *CounterDuration - clear() + clearStats() Register(func(name string, v expvar.Var) { gotname = name gotv = v.(*CounterDuration) @@ -52,7 +52,7 @@ func TestCounterDuration(t *testing.T) { func TestCounterDurationFunc(t *testing.T) { var gotname string var gotv *CounterDurationFunc - clear() + clearStats() Register(func(name string, v expvar.Var) { gotname = name gotv = v.(*CounterDurationFunc) @@ -75,7 +75,7 @@ func TestCounterDurationFunc(t *testing.T) { func TestGaugeDuration(t *testing.T) { var gotname string var gotv *GaugeDuration - clear() + clearStats() Register(func(name string, v expvar.Var) { gotname = name gotv = v.(*GaugeDuration) @@ -103,7 +103,7 @@ func TestGaugeDuration(t *testing.T) { func TestGaugeDurationFunc(t *testing.T) { var gotname string var gotv *GaugeDurationFunc - clear() + clearStats() Register(func(name string, v expvar.Var) { gotname = name gotv = v.(*GaugeDurationFunc) diff --git a/go/stats/export.go b/go/stats/export.go index e98ef0a969c..58be67e13f9 100644 --- a/go/stats/export.go +++ b/go/stats/export.go @@ -29,6 +29,7 @@ package stats import ( "bytes" + "context" "expvar" "fmt" "strconv" @@ -45,6 +46,7 @@ var ( emitStats bool statsEmitPeriod = 60 * time.Second statsBackend string + statsBackendInit = make(chan struct{}) combineDimensions string dropVariables string ) @@ -121,6 +123,22 @@ func Publish(name string, v expvar.Var) { publish(name, v) } +func pushAll() error { + backend, ok := pushBackends[statsBackend] + if !ok { + return fmt.Errorf("no PushBackend registered with name %s", statsBackend) + } + return backend.PushAll() +} + +func pushOne(name string, v Variable) error { + backend, ok := pushBackends[statsBackend] + if !ok { + return fmt.Errorf("no PushBackend registered with name %s", statsBackend) + } + return backend.PushOne(name, v) +} + // StringMapFuncWithMultiLabels is a multidimensional string map publisher. // // Map keys are compound names made with joining multiple strings with '.', @@ -183,14 +201,28 @@ func publish(name string, v expvar.Var) { // to be pushed to it. It's used to support push-based metrics backends, as expvar // by default only supports pull-based ones. type PushBackend interface { - // PushAll pushes all stats from expvar to the backend + // PushAll pushes all stats from expvar to the backend. PushAll() error + // PushOne pushes a single stat from expvar to the backend. + PushOne(name string, v Variable) error } var pushBackends = make(map[string]PushBackend) var pushBackendsLock sync.Mutex var once sync.Once +func AwaitBackend(ctx context.Context) error { + if statsBackend == "" { + return nil + } + select { + case <-ctx.Done(): + return ctx.Err() + case <-statsBackendInit: + return nil + } +} + // RegisterPushBackend allows modules to register PushBackend implementations. // Should be called on init(). func RegisterPushBackend(name string, backend PushBackend) { @@ -200,6 +232,9 @@ func RegisterPushBackend(name string, backend PushBackend) { log.Fatalf("PushBackend %s already exists; can't register the same name multiple times", name) } pushBackends[name] = backend + if name == statsBackend { + close(statsBackendInit) + } if emitStats { // Start a single goroutine to emit stats periodically once.Do(func() { @@ -214,13 +249,7 @@ func emitToBackend(emitPeriod *time.Duration) { ticker := time.NewTicker(*emitPeriod) defer ticker.Stop() for range ticker.C { - backend, ok := pushBackends[statsBackend] - if !ok { - log.Errorf("No PushBackend registered with name %s", statsBackend) - return - } - err := backend.PushAll() - if err != nil { + if err := pushAll(); err != nil { // TODO(aaijazi): This might cause log spam... log.Warningf("Pushing stats to backend %v failed: %v", statsBackend, err) } diff --git a/go/stats/export_test.go b/go/stats/export_test.go index faf4f4b5d80..e6160f77184 100644 --- a/go/stats/export_test.go +++ b/go/stats/export_test.go @@ -24,7 +24,7 @@ import ( "github.com/stretchr/testify/require" ) -func clear() { +func clearStats() { defaultVarGroup.vars = make(map[string]expvar.Var) defaultVarGroup.newVarHook = nil combineDimensions = "" @@ -34,7 +34,7 @@ func clear() { } func TestNoHook(t *testing.T) { - clear() + clearStats() v := NewCounter("plainint", "help") v.Add(1) if v.String() != "1" { @@ -45,7 +45,7 @@ func TestNoHook(t *testing.T) { func TestString(t *testing.T) { var gotname string var gotv *String - clear() + clearStats() Register(func(name string, v expvar.Var) { gotname = name gotv = v.(*String) @@ -82,7 +82,7 @@ func (m *Mystr) String() string { func TestPublish(t *testing.T) { var gotname string var gotv expvar.Var - clear() + clearStats() Register(func(name string, v expvar.Var) { gotname = name gotv = v.(*Mystr) @@ -110,7 +110,7 @@ func (f expvarFunc) String() string { func TestPublishFunc(t *testing.T) { var gotname string var gotv expvarFunc - clear() + clearStats() Register(func(name string, v expvar.Var) { gotname = name gotv = v.(expvarFunc) @@ -125,7 +125,7 @@ func TestPublishFunc(t *testing.T) { } func TestDropVariable(t *testing.T) { - clear() + clearStats() dropVariables = "dropTest" // This should not panic. @@ -161,7 +161,7 @@ func TestParseCommonTags(t *testing.T) { } func TestStringMapWithMultiLabels(t *testing.T) { - clear() + clearStats() c := NewStringMapFuncWithMultiLabels("stringMap1", "help", []string{"aaa", "bbb"}, "ccc", func() map[string]string { m := make(map[string]string) m["c1a.c1b"] = "1" diff --git a/go/stats/histogram_test.go b/go/stats/histogram_test.go index f78934e7ba6..1c7b05d8e9a 100644 --- a/go/stats/histogram_test.go +++ b/go/stats/histogram_test.go @@ -22,7 +22,7 @@ import ( ) func TestHistogram(t *testing.T) { - clear() + clearStats() h := NewHistogram("hist1", "help", []int64{1, 5}) for i := 0; i < 10; i++ { h.Add(int64(i)) @@ -54,7 +54,7 @@ func TestHistogram(t *testing.T) { } func TestGenericHistogram(t *testing.T) { - clear() + clearStats() h := NewGenericHistogram( "histgen", "help", @@ -72,7 +72,7 @@ func TestGenericHistogram(t *testing.T) { func TestHistogramHook(t *testing.T) { var gotname string var gotv *Histogram - clear() + clearStats() Register(func(name string, v expvar.Var) { gotname = name gotv = v.(*Histogram) diff --git a/go/stats/multidimensional_test.go b/go/stats/multidimensional_test.go index 84805e00a2e..61dd8bb3b10 100644 --- a/go/stats/multidimensional_test.go +++ b/go/stats/multidimensional_test.go @@ -23,7 +23,7 @@ import ( ) func TestMultiTimingsCounterFor(t *testing.T) { - clear() + clearStats() mtm := NewMultiTimings("multitimings3", "help", []string{"dim1", "dim2"}) mtm.Add([]string{"tag1a", "tag1b"}, 500*time.Microsecond) diff --git a/go/stats/opentsdb/backend.go b/go/stats/opentsdb/backend.go new file mode 100644 index 00000000000..e5c766ba797 --- /dev/null +++ b/go/stats/opentsdb/backend.go @@ -0,0 +1,58 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package opentsdb + +import ( + "time" + + "vitess.io/vitess/go/stats" +) + +// backend implements stats.PushBackend +type backend struct { + // The prefix is the name of the binary (vtgate, vttablet, etc.) and will be + // prepended to all the stats reported. + prefix string + // Tags that should be included with every data point. If there's a tag name + // collision between the common tags and a single data point's tags, the data + // point tag will override the common tag. + commonTags map[string]string + // writer is used to send data points somewhere (file, http, ...). + writer writer +} + +// PushAll pushes all stats to OpenTSDB +func (b *backend) PushAll() error { + collector := b.collector() + collector.collectAll() + return b.writer.Write(collector.data) +} + +// PushOne pushes a single stat to OpenTSDB +func (b *backend) PushOne(name string, v stats.Variable) error { + collector := b.collector() + collector.collectOne(name, v) + return b.writer.Write(collector.data) +} + +func (b *backend) collector() *collector { + return &collector{ + commonTags: b.commonTags, + prefix: b.prefix, + timestamp: time.Now().Unix(), + } +} diff --git a/go/stats/opentsdb/by_metric.go b/go/stats/opentsdb/by_metric.go new file mode 100644 index 00000000000..5177109a18e --- /dev/null +++ b/go/stats/opentsdb/by_metric.go @@ -0,0 +1,54 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package opentsdb + +// byMetric implements sort.Interface for []*DataPoint based on the metric key +// and then tag values (prioritized in tag name order). Having a consistent sort order +// is convenient when refreshing /debug/opentsdb or for encoding and comparing JSON directly +// in the tests. +type byMetric []*DataPoint + +func (m byMetric) Len() int { return len(m) } +func (m byMetric) Swap(i, j int) { m[i], m[j] = m[j], m[i] } +func (m byMetric) Less(i, j int) bool { + if m[i].Metric < m[j].Metric { + return true + } + + if m[i].Metric > m[j].Metric { + return false + } + + // Metric names are the same. We can use tag values to figure out the sort order. + // The deciding tag will be the lexicographically earliest tag name where tag values differ. + decidingTagName := "" + result := false + for tagName, iVal := range m[i].Tags { + jVal, ok := m[j].Tags[tagName] + if !ok { + // We'll arbitrarily declare that if i has any tag name that j doesn't then it sorts earlier. + // This shouldn't happen in practice, though, if metric code is correct... + return true + } + + if iVal != jVal && (tagName < decidingTagName || decidingTagName == "") { + decidingTagName = tagName + result = iVal < jVal + } + } + return result +} diff --git a/go/stats/opentsdb/opentsdb.go b/go/stats/opentsdb/collector.go similarity index 54% rename from go/stats/opentsdb/opentsdb.go rename to go/stats/opentsdb/collector.go index 3e85052b5f4..9b870815067 100644 --- a/go/stats/opentsdb/opentsdb.go +++ b/go/stats/opentsdb/collector.go @@ -14,151 +14,47 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package opentsdb adds support for pushing stats to opentsdb. package opentsdb import ( "bytes" "encoding/json" "expvar" - "net/http" - "sort" "strings" - "time" "unicode" - "github.com/spf13/pflag" - "vitess.io/vitess/go/stats" - "vitess.io/vitess/go/vt/servenv" ) -var openTsdbURI string - -func registerFlags(fs *pflag.FlagSet) { - fs.StringVar(&openTsdbURI, "opentsdb_uri", openTsdbURI, "URI of opentsdb /api/put method") -} - -func init() { - servenv.OnParseFor("vtctld", registerFlags) - servenv.OnParseFor("vtgate", registerFlags) - servenv.OnParseFor("vttablet", registerFlags) -} - -// dataPoint represents a single OpenTSDB data point. -type dataPoint struct { - // Example: sys.cpu.nice - Metric string `json:"metric"` - // Seconds or milliseconds since unix epoch. - Timestamp float64 `json:"timestamp"` - Value float64 `json:"value"` - Tags map[string]string `json:"tags"` -} - -// sendDataPoints pushes a list of data points to openTSDB. -// All other code in this file is just to support getting this function called -// with all stats represented as data points. -func sendDataPoints(data []dataPoint) error { - json, err := json.Marshal(data) - if err != nil { - return err - } - - resp, err := http.Post(openTsdbURI, "application/json", bytes.NewReader(json)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// openTSDBBackend implements stats.PushBackend -type openTSDBBackend struct { - // The prefix is the name of the binary (vtgate, vttablet, etc.) and will be - // prepended to all the stats reported. - prefix string - // Tags that should be included with every data point. If there's a tag name - // collision between the common tags and a single data point's tags, the data - // point tag will override the common tag. +// collector tracks state for a single pass of stats reporting / data collection. +type collector struct { commonTags map[string]string -} - -// dataCollector tracks state for a single pass of stats reporting / data collection. -type dataCollector struct { - settings *openTSDBBackend + data []*DataPoint + prefix string timestamp int64 - dataPoints []dataPoint -} - -// Init attempts to create a singleton openTSDBBackend and register it as a PushBackend. -// If it fails to create one, this is a noop. The prefix argument is an optional string -// to prepend to the name of every data point reported. -func Init(prefix string) { - // Needs to happen in servenv.OnRun() instead of init because it requires flag parsing and logging - servenv.OnRun(func() { - InitWithoutServenv(prefix) - }) -} - -// InitWithoutServenv initializes the opentsdb without servenv -func InitWithoutServenv(prefix string) { - if openTsdbURI == "" { - return - } - - backend := &openTSDBBackend{ - prefix: prefix, - commonTags: stats.ParseCommonTags(stats.CommonTags), - } - - stats.RegisterPushBackend("opentsdb", backend) - - servenv.HTTPHandleFunc("/debug/opentsdb", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - dataPoints := (*backend).getDataPoints() - sort.Sort(byMetric(dataPoints)) - - if b, err := json.MarshalIndent(dataPoints, "", " "); err != nil { - w.Write([]byte(err.Error())) - } else { - w.Write(b) - } - }) } -// PushAll pushes all stats to OpenTSDB -func (backend *openTSDBBackend) PushAll() error { - return sendDataPoints(backend.getDataPoints()) -} - -// getDataPoints fetches all stats in an opentsdb-compatible format. -// This is separated from PushAll() so it can be reused for the /debug/opentsdb handler. -func (backend *openTSDBBackend) getDataPoints() []dataPoint { - dataCollector := &dataCollector{ - settings: backend, - timestamp: time.Now().Unix(), - } - +func (dc *collector) collectAll() { expvar.Do(func(kv expvar.KeyValue) { - dataCollector.addExpVar(kv) + dc.addExpVar(kv) }) - - return dataCollector.dataPoints } -// combineMetricName joins parts of a hierarchical name with a "." -func combineMetricName(parts ...string) string { - return strings.Join(parts, ".") +func (dc *collector) collectOne(name string, v expvar.Var) { + dc.addExpVar(expvar.KeyValue{ + Key: name, + Value: v, + }) } -func (dc *dataCollector) addInt(metric string, val int64, tags map[string]string) { +func (dc *collector) addInt(metric string, val int64, tags map[string]string) { dc.addFloat(metric, float64(val), tags) } -func (dc *dataCollector) addFloat(metric string, val float64, tags map[string]string) { +func (dc *collector) addFloat(metric string, val float64, tags map[string]string) { var fullMetric string - if len(dc.settings.prefix) > 0 { - fullMetric = combineMetricName(dc.settings.prefix, metric) + if len(dc.prefix) > 0 { + fullMetric = combineMetricName(dc.prefix, metric) } else { fullMetric = metric } @@ -182,20 +78,20 @@ func (dc *dataCollector) addFloat(metric string, val float64, tags map[string]st } fullTags := make(map[string]string) - for k, v := range dc.settings.commonTags { + for k, v := range dc.commonTags { fullTags[sanitize(k)] = sanitize(v) } for k, v := range tags { fullTags[sanitize(k)] = sanitize(v) } - dp := dataPoint{ + dp := &DataPoint{ Metric: sanitize(fullMetric), Value: val, Timestamp: float64(dc.timestamp), Tags: fullTags, } - dc.dataPoints = append(dc.dataPoints, dp) + dc.data = append(dc.data, dp) } // addExpVar adds all the data points associated with a particular expvar to the list of @@ -206,7 +102,7 @@ func (dc *dataCollector) addFloat(metric string, val float64, tags map[string]st // // Generic unrecognized expvars are serialized to json and their int/float values are exported. // Strings and lists in expvars are not exported. -func (dc *dataCollector) addExpVar(kv expvar.KeyValue) { +func (dc *collector) addExpVar(kv expvar.KeyValue) { k := kv.Key switch v := kv.Value.(type) { case stats.FloatFunc: @@ -268,24 +164,8 @@ func (dc *dataCollector) addExpVar(kv expvar.KeyValue) { } } -// makeLabel builds a tag list with a single label + value. -func makeLabel(labelName string, labelVal string) map[string]string { - return map[string]string{labelName: labelVal} -} - -// makeLabels takes the vitess stat representation of label values ("."-separated list) and breaks it -// apart into a map of label name -> label value. -func makeLabels(labelNames []string, labelValsCombined string) map[string]string { - tags := make(map[string]string) - labelVals := strings.Split(labelValsCombined, ".") - for i, v := range labelVals { - tags[labelNames[i]] = v - } - return tags -} - // addUnrecognizedExpvars recurses into a json object to pull out float64 variables to report. -func (dc *dataCollector) addUnrecognizedExpvars(prefix string, obj map[string]any) { +func (dc *collector) addUnrecognizedExpvars(prefix string, obj map[string]any) { for k, v := range obj { prefix := combineMetricName(prefix, k) switch v := v.(type) { @@ -298,7 +178,7 @@ func (dc *dataCollector) addUnrecognizedExpvars(prefix string, obj map[string]an } // addTimings converts a vitess Timings stat to something opentsdb can deal with. -func (dc *dataCollector) addTimings(labels []string, timings *stats.Timings, prefix string) { +func (dc *collector) addTimings(labels []string, timings *stats.Timings, prefix string) { histograms := timings.Histograms() for labelValsCombined, histogram := range histograms { // If you prefer millisecond timings over nanoseconds you can pass 1000000 here instead of 1. @@ -306,7 +186,7 @@ func (dc *dataCollector) addTimings(labels []string, timings *stats.Timings, pre } } -func (dc *dataCollector) addHistogram(histogram *stats.Histogram, divideBy int64, prefix string, tags map[string]string) { +func (dc *collector) addHistogram(histogram *stats.Histogram, divideBy int64, prefix string, tags map[string]string) { // TODO: OpenTSDB 2.3 doesn't have histogram support, although it's forthcoming. // For simplicity we report each bucket as a different metric. // @@ -335,39 +215,23 @@ func (dc *dataCollector) addHistogram(histogram *stats.Histogram, divideBy int64 ) } -// byMetric implements sort.Interface for []dataPoint based on the metric key -// and then tag values (prioritized in tag name order). Having a consistent sort order -// is convenient when refreshing /debug/opentsdb or for encoding and comparing JSON directly -// in the tests. -type byMetric []dataPoint - -func (m byMetric) Len() int { return len(m) } -func (m byMetric) Swap(i, j int) { m[i], m[j] = m[j], m[i] } -func (m byMetric) Less(i, j int) bool { - if m[i].Metric < m[j].Metric { - return true - } - - if m[i].Metric > m[j].Metric { - return false - } +// combineMetricName joins parts of a hierarchical name with a "." +func combineMetricName(parts ...string) string { + return strings.Join(parts, ".") +} - // Metric names are the same. We can use tag values to figure out the sort order. - // The deciding tag will be the lexicographically earliest tag name where tag values differ. - decidingTagName := "" - result := false - for tagName, iVal := range m[i].Tags { - jVal, ok := m[j].Tags[tagName] - if !ok { - // We'll arbitrarily declare that if i has any tag name that j doesn't then it sorts earlier. - // This shouldn't happen in practice, though, if metric code is correct... - return true - } +// makeLabel builds a tag list with a single label + value. +func makeLabel(labelName string, labelVal string) map[string]string { + return map[string]string{labelName: labelVal} +} - if iVal != jVal && (tagName < decidingTagName || decidingTagName == "") { - decidingTagName = tagName - result = iVal < jVal - } +// makeLabels takes the vitess stat representation of label values ("."-separated list) and breaks it +// apart into a map of label name -> label value. +func makeLabels(labelNames []string, labelValsCombined string) map[string]string { + tags := make(map[string]string) + labelVals := strings.Split(labelValsCombined, ".") + for i, v := range labelVals { + tags[labelNames[i]] = v } - return result + return tags } diff --git a/go/stats/opentsdb/datapoint.go b/go/stats/opentsdb/datapoint.go new file mode 100644 index 00000000000..42e69b84d47 --- /dev/null +++ b/go/stats/opentsdb/datapoint.go @@ -0,0 +1,90 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package opentsdb + +import ( + "fmt" + "strconv" + "strings" +) + +// DataPoint represents a single OpenTSDB data point. +type DataPoint struct { + // Example: sys.cpu.nice + Metric string `json:"metric"` + // Seconds or milliseconds since unix epoch. + Timestamp float64 `json:"timestamp"` + Value float64 `json:"value"` + Tags map[string]string `json:"tags"` +} + +func (dp *DataPoint) MarshalText() (string, error) { + var sb strings.Builder + + if _, err := sb.WriteString(fmt.Sprintf("%s %f %f", dp.Metric, dp.Timestamp, dp.Value)); err != nil { + return "", err + } + + for k, v := range dp.Tags { + if _, err := sb.WriteString(fmt.Sprintf(" %s=%s", k, v)); err != nil { + return "", err + } + } + + if _, err := sb.WriteString("\n"); err != nil { + return "", err + } + + return sb.String(), nil +} + +func unmarshalTextToData(dp *DataPoint, text []byte) error { + parts := strings.Split(string(text), " ") + + if len(parts) < 3 { + // Technically every OpenTSDB time series requires at least one tag, + // but some of the metrics we send have zero. + return fmt.Errorf("require format: [ ]") + } + + dp.Metric = parts[0] + + timestamp, err := strconv.ParseFloat(parts[1], 64) + if err != nil { + return err + } + dp.Timestamp = timestamp + + value, err := strconv.ParseFloat(parts[2], 64) + if err != nil { + return err + } + dp.Value = value + + for _, kv := range parts[3:] { + tagParts := strings.Split(kv, "=") + if len(tagParts) != 2 { + return fmt.Errorf("require tag format: ") + } + if dp.Tags == nil { + dp.Tags = make(map[string]string) + } + dp.Tags[tagParts[0]] = tagParts[1] + } + + return nil +} diff --git a/go/stats/opentsdb/datapoint_reader.go b/go/stats/opentsdb/datapoint_reader.go new file mode 100644 index 00000000000..441be9eb7a1 --- /dev/null +++ b/go/stats/opentsdb/datapoint_reader.go @@ -0,0 +1,53 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package opentsdb + +import ( + "bufio" + "io" +) + +var newLineDelimiter = byte('\n') + +// DataPointReader parses bytes from io.Reader into DataPoints. +type DataPointReader struct { + reader *bufio.Reader +} + +func NewDataPointReader(r io.Reader) *DataPointReader { + return &DataPointReader{ + reader: bufio.NewReader(r), + } +} + +// Read returns a DataPoint from the underlying io.Reader. +// +// Returns an error if no DataPoint could be parsed. +func (tr *DataPointReader) Read() (*DataPoint, error) { + bs, err := tr.reader.ReadBytes(newLineDelimiter) + if err != nil { + return nil, err + } + + dp := &DataPoint{} + + if err := unmarshalTextToData(dp, bs[:len(bs)-1]); err != nil { + return nil, err + } + + return dp, nil +} diff --git a/go/stats/opentsdb/doc.go b/go/stats/opentsdb/doc.go new file mode 100644 index 00000000000..88c22a58c70 --- /dev/null +++ b/go/stats/opentsdb/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package opentsdb adds support for pushing stats to opentsdb. +package opentsdb diff --git a/go/stats/opentsdb/file_writer.go b/go/stats/opentsdb/file_writer.go new file mode 100644 index 00000000000..7f2d2f2ccc7 --- /dev/null +++ b/go/stats/opentsdb/file_writer.go @@ -0,0 +1,52 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package opentsdb + +import ( + "io" + "os" +) + +type fileWriter struct { + writer io.WriteCloser +} + +func newFileWriter(path string) (writer, error) { + f, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_SYNC|os.O_WRONLY, 0644) + if err != nil { + return nil, err + } + + return &fileWriter{ + writer: f, + }, nil +} + +func (fw *fileWriter) Write(data []*DataPoint) error { + for _, d := range data { + text, err := d.MarshalText() + if err != nil { + return err + } + + if _, err := fw.writer.Write([]byte(text)); err != nil { + return err + } + } + + return nil +} diff --git a/go/vt/status/status.go b/go/stats/opentsdb/flags.go similarity index 64% rename from go/vt/status/status.go rename to go/stats/opentsdb/flags.go index 61ea9b8c1bf..8ccd0279981 100644 --- a/go/vt/status/status.go +++ b/go/stats/opentsdb/flags.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Vitess Authors. +Copyright 2023 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,9 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package status defines a few useful functions for our binaries, -// mainly to link the status page with a vtctld instance. -package status +package opentsdb import ( "github.com/spf13/pflag" @@ -24,14 +22,17 @@ import ( "vitess.io/vitess/go/vt/servenv" ) -// TODO(deepthi): This entire file (and package) can be deleted after v17 +var ( + openTSDBURI string +) + func registerFlags(fs *pflag.FlagSet) { - fs.String("vtctld_addr", "", "address of a vtctld instance") - _ = fs.MarkDeprecated("vtctld_addr", "will be removed after v17") + fs.StringVar(&openTSDBURI, "opentsdb_uri", openTSDBURI, "URI of opentsdb /api/put method") } func init() { - servenv.OnParseFor("vtcombo", registerFlags) + servenv.OnParseFor("vtbackup", registerFlags) + servenv.OnParseFor("vtctld", registerFlags) servenv.OnParseFor("vtgate", registerFlags) servenv.OnParseFor("vttablet", registerFlags) } diff --git a/go/stats/opentsdb/http_writer.go b/go/stats/opentsdb/http_writer.go new file mode 100644 index 00000000000..7b7801d7f77 --- /dev/null +++ b/go/stats/opentsdb/http_writer.go @@ -0,0 +1,51 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package opentsdb + +import ( + "bytes" + "encoding/json" + "net/http" +) + +type httpWriter struct { + client *http.Client + uri string +} + +func newHTTPWriter(client *http.Client, uri string) *httpWriter { + return &httpWriter{ + client: client, + uri: uri, + } +} + +func (hw *httpWriter) Write(data []*DataPoint) error { + jsonb, err := json.Marshal(data) + if err != nil { + return err + } + + resp, err := hw.client.Post(hw.uri, "application/json", bytes.NewReader(jsonb)) + if err != nil { + return err + } + + resp.Body.Close() + + return nil +} diff --git a/go/stats/opentsdb/init.go b/go/stats/opentsdb/init.go new file mode 100644 index 00000000000..51186ad7650 --- /dev/null +++ b/go/stats/opentsdb/init.go @@ -0,0 +1,104 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package opentsdb + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + "sort" + + "vitess.io/vitess/go/stats" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/servenv" +) + +var singletonBackend stats.PushBackend + +// Init attempts to create a singleton *opentsdb.backend and register it as a PushBackend. +// If it fails to create one, this is a noop. The prefix argument is an optional string +// to prepend to the name of every data point reported. +func Init(prefix string) { + // Needs to happen in servenv.OnRun() instead of init because it requires flag parsing and logging + servenv.OnRun(func() { + log.Info("Initializing opentsdb backend...") + backend, err := InitWithoutServenv(prefix) + if err != nil { + log.Infof("Failed to initialize singleton opentsdb backend: %v", err) + } else { + singletonBackend = backend + log.Info("Initialized opentsdb backend.") + } + }) +} + +// InitWithoutServenv initializes the opentsdb without servenv +func InitWithoutServenv(prefix string) (stats.PushBackend, error) { + b, err := newBackend(prefix) + + if err != nil { + return nil, err + } + + stats.RegisterPushBackend("opentsdb", b) + + servenv.HTTPHandleFunc("/debug/opentsdb", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + collector := b.collector() + collector.collectAll() + data := collector.data + sort.Sort(byMetric(data)) + + if b, err := json.MarshalIndent(data, "", " "); err != nil { + w.Write([]byte(err.Error())) + } else { + w.Write(b) + } + }) + + return b, nil +} + +func newBackend(prefix string) (*backend, error) { + if openTSDBURI == "" { + return nil, fmt.Errorf("cannot create opentsdb PushBackend with empty --opentsdb_uri") + } + + var w writer + + // Use the file API when the uri is in format file://... + u, err := url.Parse(openTSDBURI) + if err != nil { + return nil, fmt.Errorf("failed to parse --opentsdb_uri %s: %v", openTSDBURI, err) + } else if u.Scheme == "file" { + fw, err := newFileWriter(u.Path) + if err != nil { + return nil, fmt.Errorf("failed to create file-based writer for --opentsdb_uri %s: %v", openTSDBURI, err) + } else { + w = fw + } + } else { + w = newHTTPWriter(&http.Client{}, openTSDBURI) + } + + return &backend{ + prefix: prefix, + commonTags: stats.ParseCommonTags(stats.CommonTags), + writer: w, + }, nil +} diff --git a/go/stats/opentsdb/opentsdb_test.go b/go/stats/opentsdb/opentsdb_test.go index 0e8ff240500..940ee845ada 100644 --- a/go/stats/opentsdb/opentsdb_test.go +++ b/go/stats/opentsdb/opentsdb_test.go @@ -352,15 +352,16 @@ func TestOpenTsdbTimings(t *testing.T) { } func checkOutput(t *testing.T, statName string, wantJSON string) { - backend := &openTSDBBackend{ + b := &backend{ prefix: "vtgate", commonTags: map[string]string{"host": "localhost"}, } timestamp := int64(1234) - dc := &dataCollector{ - settings: backend, - timestamp: timestamp, + dc := &collector{ + commonTags: b.commonTags, + prefix: b.prefix, + timestamp: timestamp, } found := false expvar.Do(func(kv expvar.KeyValue) { @@ -368,9 +369,9 @@ func checkOutput(t *testing.T, statName string, wantJSON string) { found = true dc.addExpVar(kv) - sort.Sort(byMetric(dc.dataPoints)) + sort.Sort(byMetric(dc.data)) - gotBytes, err := json.MarshalIndent(dc.dataPoints, "", " ") + gotBytes, err := json.MarshalIndent(dc.data, "", " ") if err != nil { t.Errorf("Failed to marshal json: %v", err) return diff --git a/go/vt/topo/k8stopo/boilerplate.go.txt b/go/stats/opentsdb/writer.go similarity index 83% rename from go/vt/topo/k8stopo/boilerplate.go.txt rename to go/stats/opentsdb/writer.go index 3f6ccc17d97..49d221cc782 100644 --- a/go/vt/topo/k8stopo/boilerplate.go.txt +++ b/go/stats/opentsdb/writer.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Vitess Authors. +Copyright 2023 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,3 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +package opentsdb + +type writer interface { + Write([]*DataPoint) error +} diff --git a/go/stats/rates.go b/go/stats/rates.go index 7aa4f7d3ce7..48864585225 100644 --- a/go/stats/rates.go +++ b/go/stats/rates.go @@ -17,6 +17,7 @@ limitations under the License. package stats import ( + "context" "encoding/json" "math" "sync" @@ -65,6 +66,8 @@ type Rates struct { // totalRate is the rate of total counts per second seen in the latest // sampling interval e.g. 100 queries / 5 seconds sampling interval = 20 QPS. totalRate float64 + ctx context.Context + cancel context.CancelFunc } // NewRates reports rolling rate information for countTracker. samples specifies @@ -76,6 +79,7 @@ func NewRates(name string, countTracker CountTracker, samples int, interval time if interval < 1*time.Second && interval != -1*time.Second { panic("interval too small") } + ctx, cancel := context.WithCancel(context.Background()) rt := &Rates{ timeStamps: NewRingInt64(samples + 1), counts: make(map[string]*RingInt64), @@ -83,6 +87,8 @@ func NewRates(name string, countTracker CountTracker, samples int, interval time samples: samples + 1, interval: interval, timestampLastSampling: timeNow(), + ctx: ctx, + cancel: cancel, } if name != "" { publish(name, rt) @@ -93,10 +99,20 @@ func NewRates(name string, countTracker CountTracker, samples int, interval time return rt } +func (rt *Rates) Stop() { + rt.cancel() +} + func (rt *Rates) track() { + t := time.NewTicker(rt.interval) + defer t.Stop() for { - rt.snapshot() - <-time.After(rt.interval) + select { + case <-rt.ctx.Done(): + return + case <-t.C: + rt.snapshot() + } } } diff --git a/go/stats/rates_test.go b/go/stats/rates_test.go index e37cbbd8af8..a25a055020a 100644 --- a/go/stats/rates_test.go +++ b/go/stats/rates_test.go @@ -41,9 +41,10 @@ func TestRates(t *testing.T) { return now } - clear() + clearStats() c := NewCountersWithSingleLabel("rcounter1", "rcounter help", "label") r := NewRates("rates1", c, 3, -1*time.Second) + defer r.Stop() r.snapshot() now = now.Add(epsilon) c.Add("tag1", 0) @@ -89,9 +90,10 @@ func TestRatesConsistency(t *testing.T) { // This tests the following invariant: in the time window // covered by rates, the sum of the rates reported must be // equal to the count reported by the counter. - clear() + clearStats() c := NewCountersWithSingleLabel("rcounter4", "rcounter4 help", "label") r := NewRates("rates4", c, 100, -1*time.Second) + defer r.Stop() r.snapshot() now = now.Add(epsilon) @@ -122,17 +124,18 @@ func TestRatesConsistency(t *testing.T) { } func TestRatesHook(t *testing.T) { - clear() + clearStats() c := NewCountersWithSingleLabel("rcounter2", "rcounter2 help", "label") var gotname string var gotv *Rates - clear() + clearStats() Register(func(name string, v expvar.Var) { gotname = name gotv = v.(*Rates) }) v := NewRates("rates2", c, 2, 10*time.Second) + defer v.Stop() if gotname != "rates2" { t.Errorf("want rates2, got %s", gotname) } diff --git a/go/stats/statsd/statsd.go b/go/stats/statsd/statsd.go index 269b185ff7c..f791d7b742d 100644 --- a/go/stats/statsd/statsd.go +++ b/go/stats/statsd/statsd.go @@ -219,7 +219,7 @@ func (sb StatsBackend) addExpVar(kv expvar.KeyValue) { } } -// PushAll flush out the pending metrics +// PushAll flushes out the pending metrics func (sb StatsBackend) PushAll() error { expvar.Do(func(kv expvar.KeyValue) { sb.addExpVar(kv) @@ -229,3 +229,15 @@ func (sb StatsBackend) PushAll() error { } return nil } + +// PushOne pushes the single provided metric. +func (sb StatsBackend) PushOne(name string, v stats.Variable) error { + sb.addExpVar(expvar.KeyValue{ + Key: name, + Value: v, + }) + if err := sb.statsdClient.Flush(); err != nil { + return err + } + return nil +} diff --git a/go/stats/timings.go b/go/stats/timings.go index fe12ccd0604..d0fb82ebedf 100644 --- a/go/stats/timings.go +++ b/go/stats/timings.go @@ -61,10 +61,12 @@ func NewTimings(name, help, label string, categories ...string) *Timings { return t } -// Reset will clear histograms: used during testing +// Reset will clear histograms and counters: used during testing func (t *Timings) Reset() { t.mu.RLock() t.histograms = make(map[string]*Histogram) + t.totalCount.Store(0) + t.totalTime.Store(0) t.mu.RUnlock() } diff --git a/go/stats/timings_test.go b/go/stats/timings_test.go index 9657004a76f..a632f3fba6a 100644 --- a/go/stats/timings_test.go +++ b/go/stats/timings_test.go @@ -26,7 +26,7 @@ import ( ) func TestTimings(t *testing.T) { - clear() + clearStats() tm := NewTimings("timings1", "help", "category") tm.Add("tag1", 500*time.Microsecond) tm.Add("tag1", 1*time.Millisecond) @@ -38,7 +38,7 @@ func TestTimings(t *testing.T) { } func TestMultiTimings(t *testing.T) { - clear() + clearStats() mtm := NewMultiTimings("maptimings1", "help", []string{"dim1", "dim2"}) mtm.Add([]string{"tag1a", "tag1b"}, 500*time.Microsecond) mtm.Add([]string{"tag1a", "tag1b"}, 1*time.Millisecond) @@ -50,7 +50,7 @@ func TestMultiTimings(t *testing.T) { } func TestMultiTimingsDot(t *testing.T) { - clear() + clearStats() mtm := NewMultiTimings("maptimings2", "help", []string{"label"}) mtm.Add([]string{"value.dot"}, 500*time.Microsecond) safe := safeLabel("value.dot") @@ -64,7 +64,7 @@ func TestMultiTimingsDot(t *testing.T) { func TestTimingsHook(t *testing.T) { var gotname string var gotv *Timings - clear() + clearStats() Register(func(name string, v expvar.Var) { gotname = name gotv = v.(*Timings) @@ -81,7 +81,7 @@ func TestTimingsHook(t *testing.T) { } func TestTimingsCombineDimension(t *testing.T) { - clear() + clearStats() combineDimensions = "a,c" t1 := NewTimings("timing_combine_dim1", "help", "label") diff --git a/go/streamlog/streamlog.go b/go/streamlog/streamlog.go index 572c4481777..26248fcd1b1 100644 --- a/go/streamlog/streamlog.go +++ b/go/streamlog/streamlog.go @@ -262,7 +262,7 @@ func GetFormatter[T any](logger *StreamLogger[T]) LogFormatter { // ShouldEmitLog returns whether the log with the given SQL query // should be emitted or filtered func ShouldEmitLog(sql string, rowsAffected, rowsReturned uint64) bool { - if queryLogRowThreshold > maxUint64(rowsAffected, rowsReturned) && queryLogFilterTag == "" { + if queryLogRowThreshold > max(rowsAffected, rowsReturned) && queryLogFilterTag == "" { return false } if queryLogFilterTag != "" { @@ -270,10 +270,3 @@ func ShouldEmitLog(sql string, rowsAffected, rowsReturned uint64) bool { } return true } - -func maxUint64(a, b uint64) uint64 { - if a < b { - return b - } - return a -} diff --git a/go/test/endtoend/backup/pitr/backup_mysqlctld_pitr_test.go b/go/test/endtoend/backup/pitr/backup_mysqlctld_pitr_test.go deleted file mode 100644 index 56e9f990eee..00000000000 --- a/go/test/endtoend/backup/pitr/backup_mysqlctld_pitr_test.go +++ /dev/null @@ -1,243 +0,0 @@ -/* -Copyright 2022 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mysqlctld - -import ( - "context" - "fmt" - "math/rand" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/mysql" - backup "vitess.io/vitess/go/test/endtoend/backup/vtctlbackup" - "vitess.io/vitess/go/test/endtoend/cluster" -) - -func waitForReplica(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - pMsgs := backup.ReadRowsFromPrimary(t) - for { - rMsgs := backup.ReadRowsFromReplica(t) - if len(pMsgs) == len(rMsgs) { - // success - return - } - select { - case <-ctx.Done(): - assert.FailNow(t, "timeout waiting for replica to catch up") - return - case <-time.After(time.Second): - // - } - } -} - -// TestIncrementalBackupMysqlctld - tests incremental backups using myslctld -func TestIncrementalBackup(t *testing.T) { - defer cluster.PanicHandler(t) - - tcases := []struct { - name string - setupType int - comprss *backup.CompressionDetails - }{ - { - "BuiltinBackup", backup.BuiltinBackup, nil, - }, - { - "XtraBackup", backup.XtraBackup, &backup.CompressionDetails{ - CompressorEngineName: "pgzip", - }, - }, - { - "Mysqlctld", backup.Mysqlctld, nil, - }, - } - for _, tcase := range tcases { - t.Run(tcase.name, func(t *testing.T) { - // setup cluster for the testing - code, err := backup.LaunchCluster(tcase.setupType, "xbstream", 0, tcase.comprss) - require.NoError(t, err, "setup failed with status code %d", code) - defer backup.TearDownCluster() - - backup.InitTestTable(t) - - rowsPerPosition := map[string]int{} - backupPositions := []string{} - - recordRowsPerPosition := func(t *testing.T) { - pos := backup.GetReplicaPosition(t) - msgs := backup.ReadRowsFromReplica(t) - if _, ok := rowsPerPosition[pos]; !ok { - backupPositions = append(backupPositions, pos) - rowsPerPosition[pos] = len(msgs) - } - } - - var fullBackupPos mysql.Position - t.Run("full backup", func(t *testing.T) { - backup.InsertRowOnPrimary(t, "before-full-backup") - waitForReplica(t) - - manifest, _ := backup.TestReplicaFullBackup(t) - fullBackupPos = manifest.Position - require.False(t, fullBackupPos.IsZero()) - // - msgs := backup.ReadRowsFromReplica(t) - pos := mysql.EncodePosition(fullBackupPos) - backupPositions = append(backupPositions, pos) - rowsPerPosition[pos] = len(msgs) - }) - - lastBackupPos := fullBackupPos - backup.InsertRowOnPrimary(t, "before-incremental-backups") - - tt := []struct { - name string - writeBeforeBackup bool - fromFullPosition bool - autoPosition bool - expectError string - }{ - { - name: "first incremental backup", - }, - { - name: "fail1", - expectError: "no binary logs to backup", - }, - { - name: "fail2", - expectError: "no binary logs to backup", - }, - { - name: "make writes, succeed", - writeBeforeBackup: true, - }, - { - name: "fail, no binary logs to backup", - expectError: "no binary logs to backup", - }, - { - name: "make writes again, succeed", - writeBeforeBackup: true, - }, - { - name: "auto position, succeed", - writeBeforeBackup: true, - autoPosition: true, - }, - { - name: "fail auto position, no binary logs to backup", - autoPosition: true, - expectError: "no binary logs to backup", - }, - { - name: "auto position, make writes again, succeed", - writeBeforeBackup: true, - autoPosition: true, - }, - { - name: "from full backup position", - fromFullPosition: true, - }, - } - var fromFullPositionBackups []string - for _, tc := range tt { - t.Run(tc.name, func(t *testing.T) { - if tc.writeBeforeBackup { - backup.InsertRowOnPrimary(t, "") - } - // we wait for >1 second because backups are written to a directory named after the current timestamp, - // in 1 second resolution. We want to avoid two backups that have the same pathname. Realistically this - // is only ever a problem in this end-to-end test, not in production. - // Also, we give the replica a chance to catch up. - time.Sleep(1100 * time.Millisecond) - waitForReplica(t) - recordRowsPerPosition(t) - // configure --incremental-from-pos to either: - // - auto - // - explicit last backup pos - // - back in history to the original full backup - var incrementalFromPos mysql.Position - if !tc.autoPosition { - incrementalFromPos = lastBackupPos - if tc.fromFullPosition { - incrementalFromPos = fullBackupPos - } - } - manifest, backupName := backup.TestReplicaIncrementalBackup(t, incrementalFromPos, tc.expectError) - if tc.expectError != "" { - return - } - defer func() { - lastBackupPos = manifest.Position - }() - if tc.fromFullPosition { - fromFullPositionBackups = append(fromFullPositionBackups, backupName) - } - require.False(t, manifest.FromPosition.IsZero()) - require.NotEqual(t, manifest.Position, manifest.FromPosition) - require.True(t, manifest.Position.GTIDSet.Union(manifest.PurgedPosition.GTIDSet).Contains(manifest.FromPosition.GTIDSet)) - - gtidPurgedPos, err := mysql.ParsePosition(mysql.Mysql56FlavorID, backup.GetReplicaGtidPurged(t)) - require.NoError(t, err) - fromPositionIncludingPurged := manifest.FromPosition.GTIDSet.Union(gtidPurgedPos.GTIDSet) - - expectFromPosition := lastBackupPos.GTIDSet.Union(gtidPurgedPos.GTIDSet) - if !incrementalFromPos.IsZero() { - expectFromPosition = incrementalFromPos.GTIDSet.Union(gtidPurgedPos.GTIDSet) - } - require.Equalf(t, expectFromPosition, fromPositionIncludingPurged, "expected: %v, found: %v, gtid_purged: %v, manifest.Position: %v", expectFromPosition, fromPositionIncludingPurged, gtidPurgedPos, manifest.Position) - }) - } - - testRestores := func(t *testing.T) { - for _, r := range rand.Perm(len(backupPositions)) { - pos := backupPositions[r] - testName := fmt.Sprintf("%s, %d records", pos, rowsPerPosition[pos]) - t.Run(testName, func(t *testing.T) { - restoreToPos, err := mysql.DecodePosition(pos) - require.NoError(t, err) - backup.TestReplicaRestoreToPos(t, restoreToPos, "") - msgs := backup.ReadRowsFromReplica(t) - count, ok := rowsPerPosition[pos] - require.True(t, ok) - assert.Equalf(t, count, len(msgs), "messages: %v", msgs) - }) - } - } - t.Run("PITR", func(t *testing.T) { - testRestores(t) - }) - t.Run("remove full position backups", func(t *testing.T) { - // Delete the fromFullPosition backup(s), which leaves us with less restore options. Try again. - for _, backupName := range fromFullPositionBackups { - backup.RemoveBackup(t, backupName) - } - }) - t.Run("PITR-2", func(t *testing.T) { - testRestores(t) - }) - }) - } -} diff --git a/go/test/endtoend/backup/pitr/backup_pitr_test.go b/go/test/endtoend/backup/pitr/backup_pitr_test.go new file mode 100644 index 00000000000..fcf8e9490e8 --- /dev/null +++ b/go/test/endtoend/backup/pitr/backup_pitr_test.go @@ -0,0 +1,83 @@ +/* +Copyright 2022 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mysqlctld + +import ( + "testing" + + backup "vitess.io/vitess/go/test/endtoend/backup/vtctlbackup" +) + +// TestIncrementalBackupAndRestoreToPos - tests incremental backups and restores. +// The general outline of the test: +// - Generate some schema with data +// - Take a full backup +// - Proceed to take a series of inremental backups. In between, inject data (insert rows), and keep record +// of which data (number of rows) is present in each backup, and at which position. +// - Expect backups success/failure per scenario +// - Next up, we start testing restores. Randomly pick recorded positions and restore to those points in time. +// - In each restore, excpect to find the data (number of rows) recorded for said position +// - Some restores should fail because the position exceeds the last binlog +// - Do so for all recorded positions. +// - Then, a 2nd round where some backups are purged -- this tests to see that we're still able to find a restore path +// (of course we only delete backups that still leave us with valid restore paths). +// - Last, create a new tablet with --restore_from_backup --restore-to-pos and see that it bootstraps with restored data +// and that it ends up in DRAINED type +func TestIncrementalBackupAndRestoreToPos(t *testing.T) { + tcase := &backup.PITRTestCase{ + Name: "BuiltinBackup", + SetupType: backup.BuiltinBackup, + ComprssDetails: nil, + } + backup.ExecTestIncrementalBackupAndRestoreToPos(t, tcase) +} + +// TestIncrementalBackupAndRestoreToTimestamp - tests incremental backups and restores. +// The general outline of the test: +// - Generate some schema with data +// - Take a full backup +// - Proceed to take a series of inremental backups. In between, inject data (insert rows), and keep record +// of which data (number of rows) is present in each backup, and at which timestamp. +// - Expect backups success/failure per scenario +// - Next up, we start testing restores. Randomly pick recorded timestamps and restore to those points in time. +// - In each restore, excpect to find the data (number of rows) recorded for said timestamp +// - Some restores should fail because the timestamp exceeds the last binlog +// - Do so for all recorded tiemstamps. +// - Then, a 2nd round where some backups are purged -- this tests to see that we're still able to find a restore path +// (of course we only delete backups that still leave us with valid restore paths). +// - Last, create a new tablet with --restore_from_backup --restore-to-timestamp and see that it bootstraps with restored data +// and that it ends up in DRAINED type +func TestIncrementalBackupAndRestoreToTimestamp(t *testing.T) { + tcase := &backup.PITRTestCase{ + Name: "BuiltinBackup", + SetupType: backup.BuiltinBackup, + ComprssDetails: nil, + } + backup.ExecTestIncrementalBackupAndRestoreToTimestamp(t, tcase) +} + +// TestIncrementalBackupOnTwoTablets runs a series of interleaved backups on two different replicas: full and incremental. +// Specifically, it's designed to test how incremental backups are taken by interleaved replicas, so that they successfully build on +// one another. +func TestIncrementalBackupOnTwoTablets(t *testing.T) { + tcase := &backup.PITRTestCase{ + Name: "BuiltinBackup", + SetupType: backup.BuiltinBackup, + ComprssDetails: nil, + } + backup.ExecTestIncrementalBackupOnTwoTablets(t, tcase) +} diff --git a/go/test/endtoend/backup/pitr_xtrabackup/backup_pitr_xtrabackup_test.go b/go/test/endtoend/backup/pitr_xtrabackup/backup_pitr_xtrabackup_test.go new file mode 100644 index 00000000000..b69e950fe0b --- /dev/null +++ b/go/test/endtoend/backup/pitr_xtrabackup/backup_pitr_xtrabackup_test.go @@ -0,0 +1,61 @@ +/* +Copyright 2022 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mysqlctld + +import ( + "testing" + + backup "vitess.io/vitess/go/test/endtoend/backup/vtctlbackup" +) + +// TestIncrementalBackupAndRestoreToPos +func TestIncrementalBackupAndRestoreToPos(t *testing.T) { + tcase := &backup.PITRTestCase{ + Name: "XtraBackup", + SetupType: backup.XtraBackup, + ComprssDetails: &backup.CompressionDetails{ + CompressorEngineName: "pgzip", + }, + } + backup.ExecTestIncrementalBackupAndRestoreToPos(t, tcase) +} + +// TestIncrementalBackupAndRestoreToTimestamp - tests incremental backups and restores. +// The general outline of the test: +// - Generate some schema with data +// - Take a full backup +// - Proceed to take a series of inremental backups. In between, inject data (insert rows), and keep record +// of which data (number of rows) is present in each backup, and at which timestamp. +// - Expect backups success/failure per scenario +// - Next up, we start testing restores. Randomly pick recorded timestamps and restore to those points in time. +// - In each restore, excpect to find the data (number of rows) recorded for said timestamp +// - Some restores should fail because the timestamp exceeds the last binlog +// - Do so for all recorded tiemstamps. +// - Then, a 2nd round where some backups are purged -- this tests to see that we're still able to find a restore path +// (of course we only delete backups that still leave us with valid restore paths). +// +// All of the above is done for BuiltinBackup, XtraBackup, Mysqlctld (which is technically builtin) +func TestIncrementalBackupAndRestoreToTimestamp(t *testing.T) { + tcase := &backup.PITRTestCase{ + Name: "XtraBackup", + SetupType: backup.XtraBackup, + ComprssDetails: &backup.CompressionDetails{ + CompressorEngineName: "pgzip", + }, + } + backup.ExecTestIncrementalBackupAndRestoreToTimestamp(t, tcase) +} diff --git a/go/test/endtoend/backup/vtbackup/backup_only_test.go b/go/test/endtoend/backup/vtbackup/backup_only_test.go index 025c82daee1..e84346b846c 100644 --- a/go/test/endtoend/backup/vtbackup/backup_only_test.go +++ b/go/test/endtoend/backup/vtbackup/backup_only_test.go @@ -19,7 +19,9 @@ package vtbackup import ( "context" "encoding/json" + "errors" "fmt" + "io" "os" "path" "strings" @@ -30,6 +32,7 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/stats/opentsdb" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/mysqlctl" @@ -59,8 +62,9 @@ func TestTabletInitialBackup(t *testing.T) { waitForReplicationToCatchup([]cluster.Vttablet{*replica1, *replica2}) - vtBackup(t, true, false, false) + dataPointReader := vtBackup(t, true, false, false) verifyBackupCount(t, shardKsName, 1) + verifyBackupStats(t, dataPointReader, true /* initialBackup */) // Initialize the tablets initTablets(t, false, false) @@ -144,11 +148,13 @@ func firstBackupTest(t *testing.T, tabletType string) { // backup the replica log.Infof("taking backup %s", time.Now()) - vtBackup(t, false, true, true) + dataPointReader := vtBackup(t, false, true, true) log.Infof("done taking backup %s", time.Now()) // check that the backup shows up in the listing verifyBackupCount(t, shardKsName, len(backups)+1) + // check that backup stats are what we expect + verifyBackupStats(t, dataPointReader, false /* initialBackup */) // insert more data on the primary _, err = primary.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test2')", keyspaceName, true) @@ -173,16 +179,24 @@ func firstBackupTest(t *testing.T, tabletType string) { verifyBackupCount(t, shardKsName, 0) } -func vtBackup(t *testing.T, initialBackup bool, restartBeforeBackup, disableRedoLog bool) { +func vtBackup(t *testing.T, initialBackup bool, restartBeforeBackup, disableRedoLog bool) *opentsdb.DataPointReader { mysqlSocket, err := os.CreateTemp("", "vtbackup_test_mysql.sock") require.Nil(t, err) defer os.Remove(mysqlSocket.Name()) + // Prepare opentsdb stats file path. + statsPath := path.Join(t.TempDir(), fmt.Sprintf("opentsdb.%s.txt", t.Name())) + // Take the back using vtbackup executable extraArgs := []string{ "--allow_first_backup", "--db-credentials-file", dbCredentialFile, "--mysql_socket", mysqlSocket.Name(), + + // Use opentsdb for stats. + "--stats_backend", "opentsdb", + // Write stats to file for reading afterwards. + "--opentsdb_uri", fmt.Sprintf("file://%s", statsPath), } if restartBeforeBackup { extraArgs = append(extraArgs, "--restart_before_backup") @@ -201,6 +215,10 @@ func vtBackup(t *testing.T, initialBackup bool, restartBeforeBackup, disableRedo log.Infof("starting backup tablet %s", time.Now()) err = localCluster.StartVtbackup(newInitDBFile, initialBackup, keyspaceName, shardName, cell, extraArgs...) require.Nil(t, err) + + f, err := os.OpenFile(statsPath, os.O_RDONLY, 0) + require.NoError(t, err) + return opentsdb.NewDataPointReader(f) } func verifyBackupCount(t *testing.T, shardKsName string, expected int) []string { @@ -413,3 +431,73 @@ func waitForReplicationToCatchup(tablets []cluster.Vttablet) bool { } } } + +func verifyBackupStats(t *testing.T, dataPointReader *opentsdb.DataPointReader, initialBackup bool) { + // During execution, the following phases will become active, in order. + var expectActivePhases []string + if initialBackup { + expectActivePhases = []string{ + "initialbackup", + } + } else { + expectActivePhases = []string{ + "restorelastbackup", + "catchupreplication", + "takenewbackup", + } + } + + // Sequence of phase activity. + activePhases := make([]string, 0) + + // Last seen phase values. + phaseValues := make(map[string]int64) + + // Scan for phase activity until all we're out of stats to scan. + for dataPoint, err := dataPointReader.Read(); !errors.Is(err, io.EOF); dataPoint, err = dataPointReader.Read() { + // We're only interested in "vtbackup.phase" metrics in this test. + if dataPoint.Metric != "vtbackup.phase" { + continue + } + + phase := dataPoint.Tags["phase"] + value := int64(dataPoint.Value) + lastValue, ok := phaseValues[phase] + + // The value should always be 0 or 1. + require.True(t, int64(0) == value || int64(1) == value) + + // The first time the phase is reported, it should be 0. + if !ok { + require.Equal(t, int64(0), value) + } + + // Eventually the phase should go active. The next time it reports, + // it should go inactive. + if lastValue == 1 { + require.Equal(t, int64(0), value) + } + + // Record current value. + phaseValues[phase] = value + + // Add phase to sequence once it goes from active to inactive. + if lastValue == 1 && value == 0 { + activePhases = append(activePhases, phase) + } + + // Verify at most one phase is active. + activeCount := 0 + for _, value := range phaseValues { + if value == int64(0) { + continue + } + + activeCount++ + require.LessOrEqual(t, activeCount, 1) + } + } + + // Verify phase sequences. + require.Equal(t, expectActivePhases, activePhases) +} diff --git a/go/test/endtoend/backup/vtctlbackup/backup_utils.go b/go/test/endtoend/backup/vtctlbackup/backup_utils.go index d93b9326d4f..2fbfadcd22e 100644 --- a/go/test/endtoend/backup/vtctlbackup/backup_utils.go +++ b/go/test/endtoend/backup/vtctlbackup/backup_utils.go @@ -33,8 +33,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/json2" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" @@ -60,6 +61,7 @@ var ( primary *cluster.Vttablet replica1 *cluster.Vttablet replica2 *cluster.Vttablet + replica3 *cluster.Vttablet localCluster *cluster.LocalProcessCluster newInitDBFile string useXtrabackup bool @@ -83,11 +85,13 @@ var ( } vtInsertTest = ` - create table vt_insert_test ( - id bigint auto_increment, - msg varchar(64), - primary key (id) - ) Engine=InnoDB` + create table vt_insert_test ( + id bigint auto_increment, + msg varchar(64), + primary key (id) + ) Engine=InnoDB + ` + SetupReplica3Tablet func(extraArgs []string) (*cluster.Vttablet, error) ) type CompressionDetails struct { @@ -155,7 +159,7 @@ func LaunchCluster(setupType int, streamMode string, stripes int, cDetails *Comp // if streamMode is xbstream, add some additional args to test other xtrabackup flags if streamMode == "xbstream" { - xtrabackupArgs = append(xtrabackupArgs, "--xtrabackup_prepare_flags", fmt.Sprintf("--use-memory=100M")) //nolint + xtrabackupArgs = append(xtrabackupArgs, "--xtrabackup_prepare_flags", "--use-memory=100M") } commonTabletArg = append(commonTabletArg, xtrabackupArgs...) @@ -164,11 +168,14 @@ func LaunchCluster(setupType int, streamMode string, stripes int, cDetails *Comp commonTabletArg = append(commonTabletArg, getCompressorArgs(cDetails)...) var mysqlProcs []*exec.Cmd - for i := 0; i < 3; i++ { - tabletType := "replica" - if i == 0 { - tabletType = "primary" - } + tabletTypes := map[int]string{ + 0: "primary", + 1: "replica", + 2: "rdonly", + 3: "spare", + } + + createTablet := func(tabletType string) error { tablet := localCluster.NewVttabletInstance(tabletType, 0, cell) tablet.VttabletProcess = localCluster.VtprocessInstanceFromVttablet(tablet, shard.Name, keyspaceName) tablet.VttabletProcess.DbPassword = dbPassword @@ -178,33 +185,40 @@ func LaunchCluster(setupType int, streamMode string, stripes int, cDetails *Comp if setupType == Mysqlctld { mysqlctldProcess, err := cluster.MysqlCtldProcessInstance(tablet.TabletUID, tablet.MySQLPort, localCluster.TmpDirectory) if err != nil { - return 1, err + return err } tablet.MysqlctldProcess = *mysqlctldProcess tablet.MysqlctldProcess.InitDBFile = newInitDBFile tablet.MysqlctldProcess.ExtraArgs = extraArgs tablet.MysqlctldProcess.Password = tablet.VttabletProcess.DbPassword if err := tablet.MysqlctldProcess.Start(); err != nil { - return 1, err + return err } shard.Vttablets = append(shard.Vttablets, tablet) - continue + return nil } mysqlctlProcess, err := cluster.MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, localCluster.TmpDirectory) if err != nil { - return 1, err + return err } tablet.MysqlctlProcess = *mysqlctlProcess tablet.MysqlctlProcess.InitDBFile = newInitDBFile tablet.MysqlctlProcess.ExtraArgs = extraArgs proc, err := tablet.MysqlctlProcess.StartProcess() if err != nil { - return 1, err + return err } mysqlProcs = append(mysqlProcs, proc) shard.Vttablets = append(shard.Vttablets, tablet) + return nil + } + for i := 0; i < 4; i++ { + tabletType := tabletTypes[i] + if err := createTablet(tabletType); err != nil { + return 1, err + } } for _, proc := range mysqlProcs { if err := proc.Wait(); err != nil { @@ -214,6 +228,7 @@ func LaunchCluster(setupType int, streamMode string, stripes int, cDetails *Comp primary = shard.Vttablets[0] replica1 = shard.Vttablets[1] replica2 = shard.Vttablets[2] + replica3 = shard.Vttablets[3] if err := localCluster.VtctlclientProcess.InitTablet(primary, cell, keyspaceName, hostname, shard.Name); err != nil { return 1, err @@ -221,18 +236,29 @@ func LaunchCluster(setupType int, streamMode string, stripes int, cDetails *Comp if err := localCluster.VtctlclientProcess.InitTablet(replica1, cell, keyspaceName, hostname, shard.Name); err != nil { return 1, err } + if err := localCluster.VtctlclientProcess.InitTablet(replica2, cell, keyspaceName, hostname, shard.Name); err != nil { + return 1, err + } vtctldClientProcess := cluster.VtctldClientProcessInstance("localhost", localCluster.VtctldProcess.GrpcPort, localCluster.TmpDirectory) _, err = vtctldClientProcess.ExecuteCommandWithOutput("SetKeyspaceDurabilityPolicy", keyspaceName, "--durability-policy=semi_sync") if err != nil { return 1, err } - for _, tablet := range []cluster.Vttablet{*primary, *replica1} { + for _, tablet := range []*cluster.Vttablet{primary, replica1, replica2} { // we don't start replica3 yet if err := tablet.VttabletProcess.Setup(); err != nil { return 1, err } } + SetupReplica3Tablet = func(extraArgs []string) (*cluster.Vttablet, error) { + replica3.VttabletProcess.ExtraArgs = append(replica3.VttabletProcess.ExtraArgs, extraArgs...) + if err := replica3.VttabletProcess.Setup(); err != nil { + return replica3, err + } + return replica3, nil + } + if err := localCluster.VtctlclientProcess.InitShardPrimary(keyspaceName, shard.Name, cell, primary.TabletUID); err != nil { return 1, err } @@ -812,6 +838,7 @@ func terminatedRestore(t *testing.T) { } func checkTabletType(t *testing.T, alias string, tabletType topodata.TabletType) { + t.Helper() // for loop for 15 seconds to check if tablet type is correct for i := 0; i < 15; i++ { output, err := localCluster.VtctldClientProcess.ExecuteCommandWithOutput("GetTablet", alias) @@ -1062,49 +1089,38 @@ func terminateRestore(t *testing.T) { assert.Fail(t, "restore in progress file missing") } tmpProcess.Process.Signal(syscall.SIGTERM) - found = true //nolint - return + found = true + break } } assert.True(t, found, "Restore message not found") } -func vtctlBackupReplicaNoDestroyNoWrites(t *testing.T, tabletType string) (backups []string, destroy func(t *testing.T)) { +func vtctlBackupReplicaNoDestroyNoWrites(t *testing.T, replicaIndex int) (backups []string) { + replica := getReplica(t, replicaIndex) numBackups := len(waitForNumBackups(t, -1)) - restoreWaitForBackup(t, tabletType, nil, true) - verifyInitialReplication(t) - err := localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) + err := localCluster.VtctldClientProcess.ExecuteCommand("Backup", replica.Alias) require.Nil(t, err) backups = waitForNumBackups(t, numBackups+1) require.NotEmpty(t, backups) - verifyTabletBackupStats(t, replica1.VttabletProcess.GetVars()) + verifyTabletBackupStats(t, replica.VttabletProcess.GetVars()) - err = replica2.VttabletProcess.WaitForTabletStatusesForTimeout([]string{"SERVING"}, 25*time.Second) - require.Nil(t, err) - - err = replica2.VttabletProcess.TearDown() - require.Nil(t, err) - - err = localCluster.VtctlclientProcess.ExecuteCommand("DeleteTablet", replica2.Alias) - require.Nil(t, err) - - destroy = func(t *testing.T) { - verifyAfterRemovingBackupNoBackupShouldBePresent(t, backups) - } - return backups, destroy + return backups } -func GetReplicaPosition(t *testing.T) string { - pos, _ := cluster.GetPrimaryPosition(t, *replica1, hostname) +func GetReplicaPosition(t *testing.T, replicaIndex int) string { + replica := getReplica(t, replicaIndex) + pos, _ := cluster.GetPrimaryPosition(t, *replica, hostname) return pos } -func GetReplicaGtidPurged(t *testing.T) string { +func GetReplicaGtidPurged(t *testing.T, replicaIndex int) string { + replica := getReplica(t, replicaIndex) query := "select @@global.gtid_purged as gtid_purged" - rs, err := replica1.VttabletProcess.QueryTablet(query, keyspaceName, true) + rs, err := replica.VttabletProcess.QueryTablet(query, keyspaceName, true) require.NoError(t, err) row := rs.Named().Row() require.NotNil(t, row) @@ -1137,13 +1153,64 @@ func ReadRowsFromPrimary(t *testing.T) (msgs []string) { return ReadRowsFromTablet(t, primary) } -func ReadRowsFromReplica(t *testing.T) (msgs []string) { - return ReadRowsFromTablet(t, replica1) +func getReplica(t *testing.T, replicaIndex int) *cluster.Vttablet { + switch replicaIndex { + case 0: + return replica1 + case 1: + return replica2 + case 2: + return replica3 + default: + assert.Failf(t, "invalid replica index", "index=%d", replicaIndex) + return nil + } +} + +func ReadRowsFromReplica(t *testing.T, replicaIndex int) (msgs []string) { + return ReadRowsFromTablet(t, getReplica(t, replicaIndex)) +} + +// FlushBinaryLogsOnReplica issues `FLUSH BINARY LOGS` times +func FlushBinaryLogsOnReplica(t *testing.T, replicaIndex int, count int) { + replica := getReplica(t, replicaIndex) + query := "flush binary logs" + for i := 0; i < count; i++ { + _, err := replica.VttabletProcess.QueryTablet(query, keyspaceName, true) + require.NoError(t, err) + } +} + +// FlushAndPurgeBinaryLogsOnReplica intentionally loses all existing binary logs. It flushes into a new binary log +// and immediately purges all previous logs. +// This is used to lose information. +func FlushAndPurgeBinaryLogsOnReplica(t *testing.T, replicaIndex int) (lastBinlog string) { + FlushBinaryLogsOnReplica(t, replicaIndex, 1) + + replica := getReplica(t, replicaIndex) + { + query := "show binary logs" + rs, err := replica.VttabletProcess.QueryTablet(query, keyspaceName, true) + require.NoError(t, err) + require.NotEmpty(t, rs.Rows) + for _, row := range rs.Rows { + // binlog file name is first column + lastBinlog = row[0].ToString() + } + } + { + query, err := sqlparser.ParseAndBind("purge binary logs to %a", sqltypes.StringBindVariable(lastBinlog)) + require.NoError(t, err) + _, err = replica.VttabletProcess.QueryTablet(query, keyspaceName, true) + require.NoError(t, err) + } + return lastBinlog } func readManifestFile(t *testing.T, backupLocation string) (manifest *mysqlctl.BackupManifest) { // reading manifest - data, err := os.ReadFile(backupLocation + "/MANIFEST") + fullPath := backupLocation + "/MANIFEST" + data, err := os.ReadFile(fullPath) require.NoErrorf(t, err, "error while reading MANIFEST %v", err) // parsing manifest @@ -1153,11 +1220,11 @@ func readManifestFile(t *testing.T, backupLocation string) (manifest *mysqlctl.B return manifest } -func TestReplicaFullBackup(t *testing.T) (manifest *mysqlctl.BackupManifest, destroy func(t *testing.T)) { - backups, destroy := vtctlBackupReplicaNoDestroyNoWrites(t, "replica") +func TestReplicaFullBackup(t *testing.T, replicaIndex int) (manifest *mysqlctl.BackupManifest) { + backups := vtctlBackupReplicaNoDestroyNoWrites(t, replicaIndex) backupLocation := localCluster.CurrentVTDATAROOT + "/backups/" + shardKsName + "/" + backups[len(backups)-1] - return readManifestFile(t, backupLocation), destroy + return readManifestFile(t, backupLocation) } // waitForNumBackups waits for GetBackups to list exactly the given expected number. @@ -1190,13 +1257,13 @@ func waitForNumBackups(t *testing.T, expectNumBackups int) []string { } } -func TestReplicaIncrementalBackup(t *testing.T, incrementalFromPos mysql.Position, expectError string) (manifest *mysqlctl.BackupManifest, backupName string) { +func testReplicaIncrementalBackup(t *testing.T, replica *cluster.Vttablet, incrementalFromPos replication.Position, expectError string) (manifest *mysqlctl.BackupManifest, backupName string) { numBackups := len(waitForNumBackups(t, -1)) incrementalFromPosArg := "auto" if !incrementalFromPos.IsZero() { - incrementalFromPosArg = mysql.EncodePosition(incrementalFromPos) + incrementalFromPosArg = replication.EncodePosition(incrementalFromPos) } - output, err := localCluster.VtctlclientProcess.ExecuteCommandWithOutput("Backup", "--", "--incremental_from_pos", incrementalFromPosArg, replica1.Alias) + output, err := localCluster.VtctldClientProcess.ExecuteCommandWithOutput("Backup", "--incremental-from-pos", incrementalFromPosArg, replica.Alias) if expectError != "" { require.Errorf(t, err, "expected: %v", expectError) require.Contains(t, output, expectError) @@ -1206,16 +1273,50 @@ func TestReplicaIncrementalBackup(t *testing.T, incrementalFromPos mysql.Positio backups := waitForNumBackups(t, numBackups+1) require.NotEmptyf(t, backups, "output: %v", output) - verifyTabletBackupStats(t, replica1.VttabletProcess.GetVars()) + verifyTabletBackupStats(t, replica.VttabletProcess.GetVars()) backupName = backups[len(backups)-1] backupLocation := localCluster.CurrentVTDATAROOT + "/backups/" + shardKsName + "/" + backupName return readManifestFile(t, backupLocation), backupName } -func TestReplicaRestoreToPos(t *testing.T, restoreToPos mysql.Position, expectError string) { +func TestReplicaIncrementalBackup(t *testing.T, replicaIndex int, incrementalFromPos replication.Position, expectError string) (manifest *mysqlctl.BackupManifest, backupName string) { + replica := getReplica(t, replicaIndex) + return testReplicaIncrementalBackup(t, replica, incrementalFromPos, expectError) +} + +func TestReplicaFullRestore(t *testing.T, replicaIndex int, expectError string) { + replica := getReplica(t, replicaIndex) + + output, err := localCluster.VtctlclientProcess.ExecuteCommandWithOutput("RestoreFromBackup", replica.Alias) + if expectError != "" { + require.Errorf(t, err, "expected: %v", expectError) + require.Contains(t, output, expectError) + return + } + require.NoErrorf(t, err, "output: %v", output) + verifyTabletRestoreStats(t, replica.VttabletProcess.GetVars()) +} + +func TestReplicaRestoreToPos(t *testing.T, replicaIndex int, restoreToPos replication.Position, expectError string) { + replica := getReplica(t, replicaIndex) + require.False(t, restoreToPos.IsZero()) - restoreToPosArg := mysql.EncodePosition(restoreToPos) - output, err := localCluster.VtctlclientProcess.ExecuteCommandWithOutput("RestoreFromBackup", "--", "--restore_to_pos", restoreToPosArg, replica1.Alias) + restoreToPosArg := replication.EncodePosition(restoreToPos) + output, err := localCluster.VtctlclientProcess.ExecuteCommandWithOutput("RestoreFromBackup", "--", "--restore_to_pos", restoreToPosArg, replica.Alias) + if expectError != "" { + require.Errorf(t, err, "expected: %v", expectError) + require.Contains(t, output, expectError) + return + } + require.NoErrorf(t, err, "output: %v", output) + verifyTabletRestoreStats(t, replica.VttabletProcess.GetVars()) + checkTabletType(t, replica1.Alias, topodata.TabletType_DRAINED) +} + +func TestReplicaRestoreToTimestamp(t *testing.T, restoreToTimestamp time.Time, expectError string) { + require.False(t, restoreToTimestamp.IsZero()) + restoreToTimestampArg := mysqlctl.FormatRFC3339(restoreToTimestamp) + output, err := localCluster.VtctldClientProcess.ExecuteCommandWithOutput("RestoreFromBackup", "--restore-to-timestamp", restoreToTimestampArg, replica1.Alias) if expectError != "" { require.Errorf(t, err, "expected: %v", expectError) require.Contains(t, output, expectError) @@ -1223,6 +1324,7 @@ func TestReplicaRestoreToPos(t *testing.T, restoreToPos mysql.Position, expectEr } require.NoErrorf(t, err, "output: %v", output) verifyTabletRestoreStats(t, replica1.VttabletProcess.GetVars()) + checkTabletType(t, replica1.Alias, topodata.TabletType_DRAINED) } func verifyTabletBackupStats(t *testing.T, vars map[string]any) { @@ -1278,7 +1380,7 @@ func verifyRestorePositionAndTimeStats(t *testing.T, vars map[string]any) { require.Contains(t, vars, "RestorePosition") require.NotEqual(t, "", backupPosition) require.NotEqual(t, "", backupTime) - rp, err := mysql.DecodePosition(backupPosition) + rp, err := replication.DecodePosition(backupPosition) require.NoError(t, err) require.False(t, rp.IsZero()) } diff --git a/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go b/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go new file mode 100644 index 00000000000..8b9014e7f8c --- /dev/null +++ b/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go @@ -0,0 +1,694 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vtctlbackup + +import ( + "context" + "fmt" + "math/rand" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql/replication" + + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/vt/mysqlctl" +) + +var ( + gracefulPostBackupDuration = 10 * time.Millisecond + backupTimeoutDuration = 3 * time.Minute +) + +const ( + postWriteSleepDuration = 2 * time.Second // Nice for debugging purposes: clearly distinguishes the timestamps of certain operations, and as results the names/timestamps of backups. +) + +const ( + operationFullBackup = iota + operationIncrementalBackup + operationRestore + operationFlushAndPurge +) + +type PITRTestCase struct { + Name string + SetupType int + ComprssDetails *CompressionDetails +} + +type testedBackupTimestampInfo struct { + rows int + postTimestamp time.Time +} + +func waitForReplica(t *testing.T, replicaIndex int) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + pMsgs := ReadRowsFromPrimary(t) + for { + rMsgs := ReadRowsFromReplica(t, replicaIndex) + if len(pMsgs) == len(rMsgs) { + // success + return + } + select { + case <-ctx.Done(): + assert.FailNow(t, "timeout waiting for replica to catch up") + return + case <-time.After(time.Second): + // + } + } +} + +// ExecTestIncrementalBackupAndRestoreToPos runs a series of backups: a full backup and multiple incremental backups. +// in between, it makes writes to the database, and takes notes: what data was available in what backup. +// It then restores each and every one of those backups, in random order, and expects to find the specific data associated with the backup. +func ExecTestIncrementalBackupAndRestoreToPos(t *testing.T, tcase *PITRTestCase) { + defer cluster.PanicHandler(t) + + t.Run(tcase.Name, func(t *testing.T) { + // setup cluster for the testing + code, err := LaunchCluster(tcase.SetupType, "xbstream", 0, tcase.ComprssDetails) + require.NoError(t, err, "setup failed with status code %d", code) + defer TearDownCluster() + + InitTestTable(t) + + rowsPerPosition := map[string]int{} + backupPositions := []string{} + + recordRowsPerPosition := func(t *testing.T) { + pos := GetReplicaPosition(t, 0) + msgs := ReadRowsFromReplica(t, 0) + if _, ok := rowsPerPosition[pos]; !ok { + backupPositions = append(backupPositions, pos) + rowsPerPosition[pos] = len(msgs) + } + } + + var fullBackupPos replication.Position + t.Run("full backup", func(t *testing.T) { + InsertRowOnPrimary(t, "before-full-backup") + waitForReplica(t, 0) + + manifest := TestReplicaFullBackup(t, 0) + fullBackupPos = manifest.Position + require.False(t, fullBackupPos.IsZero()) + // + msgs := ReadRowsFromReplica(t, 0) + pos := replication.EncodePosition(fullBackupPos) + backupPositions = append(backupPositions, pos) + rowsPerPosition[pos] = len(msgs) + }) + + lastBackupPos := fullBackupPos + InsertRowOnPrimary(t, "before-incremental-backups") + + tt := []struct { + name string + writeBeforeBackup bool + fromFullPosition bool + autoPosition bool + expectError string + }{ + { + name: "first incremental backup", + }, + { + name: "fail1", + expectError: "no binary logs to backup", + }, + { + name: "fail2", + expectError: "no binary logs to backup", + }, + { + name: "make writes, succeed", + writeBeforeBackup: true, + }, + { + name: "fail, no binary logs to backup", + expectError: "no binary logs to backup", + }, + { + name: "make writes again, succeed", + writeBeforeBackup: true, + }, + { + name: "auto position, succeed", + writeBeforeBackup: true, + autoPosition: true, + }, + { + name: "fail auto position, no binary logs to backup", + autoPosition: true, + expectError: "no binary logs to backup", + }, + { + name: "auto position, make writes again, succeed", + writeBeforeBackup: true, + autoPosition: true, + }, + { + name: "from full backup position", + fromFullPosition: true, + }, + } + var fromFullPositionBackups []string + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + if tc.writeBeforeBackup { + InsertRowOnPrimary(t, "") + } + // we wait for >1 second because backups are written to a directory named after the current timestamp, + // in 1 second resolution. We want to avoid two backups that have the same pathname. Realistically this + // is only ever a problem in this end-to-end test, not in production. + // Also, we give the replica a chance to catch up. + time.Sleep(postWriteSleepDuration) + // randomly flush binary logs 0, 1 or 2 times + FlushBinaryLogsOnReplica(t, 0, rand.Intn(3)) + waitForReplica(t, 0) + recordRowsPerPosition(t) + // configure --incremental-from-pos to either: + // - auto + // - explicit last backup pos + // - back in history to the original full backup + var incrementalFromPos replication.Position + if !tc.autoPosition { + incrementalFromPos = lastBackupPos + if tc.fromFullPosition { + incrementalFromPos = fullBackupPos + } + } + // always use same 1st replica + manifest, backupName := TestReplicaIncrementalBackup(t, 0, incrementalFromPos, tc.expectError) + if tc.expectError != "" { + return + } + defer func() { + lastBackupPos = manifest.Position + }() + if tc.fromFullPosition { + fromFullPositionBackups = append(fromFullPositionBackups, backupName) + } + require.False(t, manifest.FromPosition.IsZero()) + require.NotEqual(t, manifest.Position, manifest.FromPosition) + require.True(t, manifest.Position.GTIDSet.Union(manifest.PurgedPosition.GTIDSet).Contains(manifest.FromPosition.GTIDSet)) + + gtidPurgedPos, err := replication.ParsePosition(replication.Mysql56FlavorID, GetReplicaGtidPurged(t, 0)) + require.NoError(t, err) + fromPositionIncludingPurged := manifest.FromPosition.GTIDSet.Union(gtidPurgedPos.GTIDSet) + + expectFromPosition := lastBackupPos.GTIDSet + if !incrementalFromPos.IsZero() { + expectFromPosition = incrementalFromPos.GTIDSet.Union(gtidPurgedPos.GTIDSet) + } + require.Equalf(t, expectFromPosition, fromPositionIncludingPurged, "expected: %v, found: %v, gtid_purged: %v, manifest.Position: %v", expectFromPosition, fromPositionIncludingPurged, gtidPurgedPos, manifest.Position) + }) + } + + sampleTestedBackupPos := "" + testRestores := func(t *testing.T) { + for _, r := range rand.Perm(len(backupPositions)) { + pos := backupPositions[r] + testName := fmt.Sprintf("%s, %d records", pos, rowsPerPosition[pos]) + t.Run(testName, func(t *testing.T) { + restoreToPos, err := replication.DecodePosition(pos) + require.NoError(t, err) + TestReplicaRestoreToPos(t, 0, restoreToPos, "") + msgs := ReadRowsFromReplica(t, 0) + count, ok := rowsPerPosition[pos] + require.True(t, ok) + assert.Equalf(t, count, len(msgs), "messages: %v", msgs) + if sampleTestedBackupPos == "" { + sampleTestedBackupPos = pos + } + }) + } + } + t.Run("PITR", func(t *testing.T) { + testRestores(t) + }) + t.Run("remove full position backups", func(t *testing.T) { + // Delete the fromFullPosition backup(s), which leaves us with less restore options. Try again. + for _, backupName := range fromFullPositionBackups { + RemoveBackup(t, backupName) + } + }) + t.Run("PITR-2", func(t *testing.T) { + testRestores(t) + }) + // Test that we can create a new tablet with --restore_from_backup --restore-to-pos and that it bootstraps + // via PITR and ends up in DRAINED type. + t.Run("init tablet PITR", func(t *testing.T) { + require.NotEmpty(t, sampleTestedBackupPos) + + var tablet *cluster.Vttablet + + t.Run(fmt.Sprintf("init from backup pos %s", sampleTestedBackupPos), func(t *testing.T) { + tablet, err = SetupReplica3Tablet([]string{"--restore-to-pos", sampleTestedBackupPos}) + assert.NoError(t, err) + }) + t.Run("wait for drained", func(t *testing.T) { + err = tablet.VttabletProcess.WaitForTabletTypesForTimeout([]string{"drained"}, backupTimeoutDuration) + assert.NoError(t, err) + }) + t.Run(fmt.Sprintf("validate %d rows", rowsPerPosition[sampleTestedBackupPos]), func(t *testing.T) { + require.NotZero(t, rowsPerPosition[sampleTestedBackupPos]) + msgs := ReadRowsFromReplica(t, 2) + assert.Equal(t, rowsPerPosition[sampleTestedBackupPos], len(msgs)) + }) + }) + }) +} + +// ExecTestIncrementalBackupAndRestoreToPos +func ExecTestIncrementalBackupAndRestoreToTimestamp(t *testing.T, tcase *PITRTestCase) { + defer cluster.PanicHandler(t) + + var lastInsertedRowTimestamp time.Time + insertRowOnPrimary := func(t *testing.T, hint string) { + InsertRowOnPrimary(t, hint) + lastInsertedRowTimestamp = time.Now() + } + + t.Run(tcase.Name, func(t *testing.T) { + // setup cluster for the testing + code, err := LaunchCluster(tcase.SetupType, "xbstream", 0, &CompressionDetails{ + CompressorEngineName: "pgzip", + }) + require.NoError(t, err, "setup failed with status code %d", code) + defer TearDownCluster() + + InitTestTable(t) + + testedBackups := []testedBackupTimestampInfo{} + + var fullBackupPos replication.Position + t.Run("full backup", func(t *testing.T) { + insertRowOnPrimary(t, "before-full-backup") + waitForReplica(t, 0) + + manifest := TestReplicaFullBackup(t, 0) + fullBackupPos = manifest.Position + require.False(t, fullBackupPos.IsZero()) + // + rows := ReadRowsFromReplica(t, 0) + testedBackups = append(testedBackups, testedBackupTimestampInfo{len(rows), time.Now()}) + }) + + lastBackupPos := fullBackupPos + insertRowOnPrimary(t, "before-incremental-backups") + + tt := []struct { + name string + writeBeforeBackup bool + fromFullPosition bool + autoPosition bool + expectError string + }{ + { + name: "first incremental backup", + }, + { + name: "fail1", + expectError: "no binary logs to backup", + }, + { + name: "fail2", + expectError: "no binary logs to backup", + }, + { + name: "make writes, succeed", + writeBeforeBackup: true, + }, + { + name: "fail, no binary logs to backup", + expectError: "no binary logs to backup", + }, + { + name: "make writes again, succeed", + writeBeforeBackup: true, + }, + { + name: "auto position, succeed", + writeBeforeBackup: true, + autoPosition: true, + }, + { + name: "fail auto position, no binary logs to backup", + autoPosition: true, + expectError: "no binary logs to backup", + }, + { + name: "auto position, make writes again, succeed", + writeBeforeBackup: true, + autoPosition: true, + }, + { + name: "from full backup position", + fromFullPosition: true, + }, + } + var fromFullPositionBackups []string + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + if tc.writeBeforeBackup { + insertRowOnPrimary(t, "") + } + // we wait for >1 second because backups are written to a directory named after the current timestamp, + // in 1 second resolution. We want to avoid two backups that have the same pathname. Realistically this + // is only ever a problem in this end-to-end test, not in production. + // Also, we give the replica a chance to catch up. + time.Sleep(postWriteSleepDuration) + waitForReplica(t, 0) + rowsBeforeBackup := ReadRowsFromReplica(t, 0) + // configure --incremental-from-pos to either: + // - auto + // - explicit last backup pos + // - back in history to the original full backup + var incrementalFromPos replication.Position + if !tc.autoPosition { + incrementalFromPos = lastBackupPos + if tc.fromFullPosition { + incrementalFromPos = fullBackupPos + } + } + manifest, backupName := TestReplicaIncrementalBackup(t, 0, incrementalFromPos, tc.expectError) + if tc.expectError != "" { + return + } + // We wish to mark the current post-backup timestamp. We will later on retore to this point in time. + // However, the restore is up to and _exclusive_ of the timestamp. So for test's sake, we sleep + // an extra few milliseconds just to ensure the timestamp we read is strictly after the backup time. + // This is basicaly to avoid weird flakiness in CI. + time.Sleep(gracefulPostBackupDuration) + testedBackups = append(testedBackups, testedBackupTimestampInfo{len(rowsBeforeBackup), time.Now()}) + defer func() { + lastBackupPos = manifest.Position + }() + if tc.fromFullPosition { + fromFullPositionBackups = append(fromFullPositionBackups, backupName) + } + require.False(t, manifest.FromPosition.IsZero()) + require.NotEqual(t, manifest.Position, manifest.FromPosition) + require.True(t, manifest.Position.GTIDSet.Union(manifest.PurgedPosition.GTIDSet).Contains(manifest.FromPosition.GTIDSet)) + { + incrDetails := manifest.IncrementalDetails + require.NotNil(t, incrDetails) + require.NotEmpty(t, incrDetails.FirstTimestamp) + require.NotEmpty(t, incrDetails.FirstTimestampBinlog) + require.NotEmpty(t, incrDetails.LastTimestamp) + require.NotEmpty(t, incrDetails.LastTimestampBinlog) + require.GreaterOrEqual(t, incrDetails.LastTimestamp, incrDetails.FirstTimestamp) + + if tc.fromFullPosition { + require.Greater(t, incrDetails.LastTimestampBinlog, incrDetails.FirstTimestampBinlog) + } else { + // No binlog rotation + require.Equal(t, incrDetails.LastTimestampBinlog, incrDetails.FirstTimestampBinlog) + } + } + + gtidPurgedPos, err := replication.ParsePosition(replication.Mysql56FlavorID, GetReplicaGtidPurged(t, 0)) + require.NoError(t, err) + fromPositionIncludingPurged := manifest.FromPosition.GTIDSet.Union(gtidPurgedPos.GTIDSet) + + expectFromPosition := lastBackupPos.GTIDSet.Union(gtidPurgedPos.GTIDSet) + if !incrementalFromPos.IsZero() { + expectFromPosition = incrementalFromPos.GTIDSet.Union(gtidPurgedPos.GTIDSet) + } + require.Equalf(t, expectFromPosition, fromPositionIncludingPurged, "expected: %v, found: %v, gtid_purged: %v, manifest.Position: %v", expectFromPosition, fromPositionIncludingPurged, gtidPurgedPos, manifest.Position) + }) + } + + sampleTestedBackupIndex := -1 + testRestores := func(t *testing.T) { + numFailedRestores := 0 + numSuccessfulRestores := 0 + for _, backupIndex := range rand.Perm(len(testedBackups)) { + testedBackup := testedBackups[backupIndex] + testName := fmt.Sprintf("backup num%v at %v, %v rows", backupIndex, mysqlctl.FormatRFC3339(testedBackup.postTimestamp), testedBackup.rows) + t.Run(testName, func(t *testing.T) { + expectError := "" + if testedBackup.postTimestamp.After(lastInsertedRowTimestamp) { + // The restore_to_timestamp value is beyond the last incremental + // There is no path to restore to this timestamp. + expectError = "no path found" + } + TestReplicaRestoreToTimestamp(t, testedBackup.postTimestamp, expectError) + if expectError == "" { + msgs := ReadRowsFromReplica(t, 0) + assert.Equalf(t, testedBackup.rows, len(msgs), "messages: %v", msgs) + numSuccessfulRestores++ + if sampleTestedBackupIndex < 0 { + sampleTestedBackupIndex = backupIndex + } + } else { + numFailedRestores++ + } + }) + } + // Integrity check for the test itself: ensure we have both successful and failed restores. + require.NotZero(t, numFailedRestores) + require.NotZero(t, numSuccessfulRestores) + } + t.Run("PITR", func(t *testing.T) { + testRestores(t) + }) + t.Run("remove full position backups", func(t *testing.T) { + // Delete the fromFullPosition backup(s), which leaves us with less restore options. Try again. + for _, backupName := range fromFullPositionBackups { + RemoveBackup(t, backupName) + } + }) + t.Run("PITR-2", func(t *testing.T) { + testRestores(t) + }) + // Test that we can create a new tablet with --restore_from_backup --restore-to-timestamp and that it bootstraps + // via PITR and ends up in DRAINED type. + t.Run("init tablet PITR", func(t *testing.T) { + require.GreaterOrEqual(t, sampleTestedBackupIndex, 0) + sampleTestedBackup := testedBackups[sampleTestedBackupIndex] + restoreToTimestampArg := mysqlctl.FormatRFC3339(sampleTestedBackup.postTimestamp) + + var tablet *cluster.Vttablet + + t.Run(fmt.Sprintf("init from backup num %d", sampleTestedBackupIndex), func(t *testing.T) { + tablet, err = SetupReplica3Tablet([]string{"--restore-to-timestamp", restoreToTimestampArg}) + assert.NoError(t, err) + }) + t.Run("wait for drained", func(t *testing.T) { + err = tablet.VttabletProcess.WaitForTabletTypesForTimeout([]string{"drained"}, backupTimeoutDuration) + assert.NoError(t, err) + }) + t.Run(fmt.Sprintf("validate %d rows", sampleTestedBackup.rows), func(t *testing.T) { + require.NotZero(t, sampleTestedBackup.rows) + msgs := ReadRowsFromReplica(t, 2) + assert.Equal(t, sampleTestedBackup.rows, len(msgs)) + }) + }) + }) +} + +// ExecTestIncrementalBackupOnTwoTablets runs a series of interleaved backups on two different replicas: full and incremental. +// Specifically, it's designed to test how incremental backups are taken by interleaved replicas, so that they successfully build on +// one another. +func ExecTestIncrementalBackupOnTwoTablets(t *testing.T, tcase *PITRTestCase) { + defer cluster.PanicHandler(t) + + t.Run(tcase.Name, func(t *testing.T) { + // setup cluster for the testing + code, err := LaunchCluster(tcase.SetupType, "xbstream", 0, tcase.ComprssDetails) + require.NoError(t, err, "setup failed with status code %d", code) + defer TearDownCluster() + + InitTestTable(t) + + rowsPerPosition := map[string]int{} + + recordRowsPerPosition := func(t *testing.T, replicaIndex int) { + pos := GetReplicaPosition(t, replicaIndex) + msgs := ReadRowsFromReplica(t, replicaIndex) + if _, ok := rowsPerPosition[pos]; !ok { + rowsPerPosition[pos] = len(msgs) + } + } + + var lastBackupPos replication.Position + InsertRowOnPrimary(t, "before-incremental-backups") + waitForReplica(t, 0) + waitForReplica(t, 1) + + tt := []struct { + name string + operationType int + replicaIndex int + expectError string + }{ + // The following tests run sequentially and build on top of previous results + { + name: "full1", + operationType: operationFullBackup, + }, + { + name: "incremental1", + operationType: operationIncrementalBackup, + }, + { + name: "restore1", + operationType: operationRestore, + }, + { + // Shows you can take an incremental restore when full & incremental backups were only ever executed on a different replica + name: "incremental2", + operationType: operationIncrementalBackup, + replicaIndex: 1, + }, + { + name: "full2", + operationType: operationFullBackup, + replicaIndex: 1, + }, + { + // This incremental backup will use full2 as the base backup + name: "incremental2-after-full2", + operationType: operationIncrementalBackup, + replicaIndex: 1, + }, + { + name: "restore2", + operationType: operationRestore, + replicaIndex: 1, + }, + // Begin a series of interleaved incremental backups + { + name: "incremental-replica1", + operationType: operationIncrementalBackup, + }, + { + name: "incremental-replica2", + operationType: operationIncrementalBackup, + replicaIndex: 1, + }, + { + name: "incremental-replica1", + operationType: operationIncrementalBackup, + }, + { + name: "incremental-replica2", + operationType: operationIncrementalBackup, + replicaIndex: 1, + }, + // Done interleaved backups. + { + // Lose binary log data + name: "flush and purge 1", + operationType: operationFlushAndPurge, + replicaIndex: 0, + }, + { + // Fail to run incremental backup due to lost data + name: "incremental-replica1 failure", + operationType: operationIncrementalBackup, + expectError: "Required entries have been purged", + }, + { + // Lose binary log data + name: "flush and purge 2", + operationType: operationFlushAndPurge, + replicaIndex: 1, + }, + { + // Fail to run incremental backup due to lost data + name: "incremental-replica2 failure", + operationType: operationIncrementalBackup, + replicaIndex: 1, + expectError: "Required entries have been purged", + }, + { + // Since we've lost binlog data, incremental backups are no longer possible. The situation can be salvaged by running a full backup + name: "full1 after purge", + operationType: operationFullBackup, + }, + { + // Show that replica2 incremental backup is able to work based on the above full backup + name: "incremental-replica2 after purge and backup", + operationType: operationIncrementalBackup, + replicaIndex: 1, + }, + } + insertRowAndWait := func(t *testing.T, replicaIndex int, data string) { + t.Run("insert row and wait", func(t *testing.T) { + InsertRowOnPrimary(t, data) + time.Sleep(postWriteSleepDuration) + waitForReplica(t, replicaIndex) + recordRowsPerPosition(t, replicaIndex) + }) + } + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + insertRowAndWait(t, tc.replicaIndex, tc.name) + t.Run("running operation", func(t *testing.T) { + switch tc.operationType { + case operationFlushAndPurge: + FlushAndPurgeBinaryLogsOnReplica(t, tc.replicaIndex) + case operationFullBackup: + manifest := TestReplicaFullBackup(t, tc.replicaIndex) + fullBackupPos := manifest.Position + require.False(t, fullBackupPos.IsZero()) + // + msgs := ReadRowsFromReplica(t, tc.replicaIndex) + pos := replication.EncodePosition(fullBackupPos) + rowsPerPosition[pos] = len(msgs) + + lastBackupPos = fullBackupPos + case operationIncrementalBackup: + var incrementalFromPos replication.Position // keep zero, we will use "auto" + manifest, _ := TestReplicaIncrementalBackup(t, tc.replicaIndex, incrementalFromPos, tc.expectError) + if tc.expectError != "" { + return + } + defer func() { + lastBackupPos = manifest.Position + }() + require.False(t, manifest.FromPosition.IsZero()) + require.NotEqual(t, manifest.Position, manifest.FromPosition) + require.True(t, manifest.Position.GTIDSet.Union(manifest.PurgedPosition.GTIDSet).Contains(manifest.FromPosition.GTIDSet)) + + gtidPurgedPos, err := replication.ParsePosition(replication.Mysql56FlavorID, GetReplicaGtidPurged(t, tc.replicaIndex)) + require.NoError(t, err) + fromPositionIncludingPurged := manifest.FromPosition.GTIDSet.Union(gtidPurgedPos.GTIDSet) + + require.True(t, lastBackupPos.GTIDSet.Contains(fromPositionIncludingPurged), "expected: %v to contain %v", lastBackupPos.GTIDSet, fromPositionIncludingPurged) + case operationRestore: + TestReplicaFullRestore(t, tc.replicaIndex, "") + // should return into replication stream + insertRowAndWait(t, tc.replicaIndex, "post-restore-check") + default: + require.FailNowf(t, "unknown operation type", "operation: %d", tc.operationType) + } + }) + }) + } + }) +} diff --git a/go/test/endtoend/cluster/cluster_process.go b/go/test/endtoend/cluster/cluster_process.go index 91ab69cd846..be43bcea002 100644 --- a/go/test/endtoend/cluster/cluster_process.go +++ b/go/test/endtoend/cluster/cluster_process.go @@ -32,17 +32,18 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "syscall" "testing" "time" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/json2" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/endtoend/filelock" "vitess.io/vitess/go/vt/grpcclient" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/vtgateconn" @@ -61,6 +62,7 @@ import ( const ( DefaultCell = "zone1" DefaultStartPort = 6700 + DefaultVttestEnv = "VTTEST=endtoend" ) var ( @@ -145,7 +147,6 @@ type Vttablet struct { MysqlctlProcess MysqlctlProcess MysqlctldProcess MysqlctldProcess VttabletProcess *VttabletProcess - VtgrProcess *VtgrProcess } // Keyspace : Cluster accepts keyspace to launch it @@ -189,18 +190,20 @@ func (shard *Shard) Replica() *Vttablet { return nil } -// CtrlCHandler handles the teardown for the ctrl-c. -func (cluster *LocalProcessCluster) CtrlCHandler() { +// SetupCtrlCHandler handles the teardown for the ctrl-c. +func (cluster *LocalProcessCluster) SetupCtrlCHandler() { cluster.Context, cluster.CancelFunc = context.WithCancel(context.Background()) - c := make(chan os.Signal, 2) - signal.Notify(c, os.Interrupt, syscall.SIGTERM) - select { - case <-c: - cluster.Teardown() - os.Exit(0) - case <-cluster.Done(): - } + go func() { + c := make(chan os.Signal, 2) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + select { + case <-c: + cluster.Teardown() + os.Exit(0) + case <-cluster.Done(): + } + }() } // StartTopo starts topology server @@ -330,17 +333,16 @@ func (cluster *LocalProcessCluster) startKeyspace(keyspace Keyspace, shardNames log.Infof("Starting keyspace: %v", keyspace.Name) if keyspace.SidecarDBName == "" { - keyspace.SidecarDBName = sidecardb.DefaultName + keyspace.SidecarDBName = sidecar.DefaultName } // Create the keyspace if it doesn't already exist. _ = cluster.VtctlProcess.CreateKeyspace(keyspace.Name, keyspace.SidecarDBName) - var mysqlctlProcessList []*exec.Cmd for _, shardName := range shardNames { shard := &Shard{ Name: shardName, } log.Infof("Starting shard: %v", shardName) - mysqlctlProcessList = []*exec.Cmd{} + var mysqlctlProcessList []*exec.Cmd for i := 0; i < totalTabletsRequired; i++ { // instantiate vttablet object with reserved ports tabletUID := cluster.GetAndReserveTabletUID() @@ -483,7 +485,7 @@ func (cluster *LocalProcessCluster) StartKeyspaceLegacy(keyspace Keyspace, shard log.Infof("Starting keyspace: %v", keyspace.Name) if keyspace.SidecarDBName == "" { - keyspace.SidecarDBName = sidecardb.DefaultName + keyspace.SidecarDBName = sidecar.DefaultName } // Create the keyspace if it doesn't already exist. _ = cluster.VtctlProcess.CreateKeyspace(keyspace.Name, keyspace.SidecarDBName) @@ -624,7 +626,7 @@ func (cluster *LocalProcessCluster) SetupCluster(keyspace *Keyspace, shards []Sh log.Infof("Starting keyspace: %v", keyspace.Name) if keyspace.SidecarDBName == "" { - keyspace.SidecarDBName = sidecardb.DefaultName + keyspace.SidecarDBName = sidecar.DefaultName } if !cluster.ReusingVTDATAROOT { @@ -716,7 +718,7 @@ func (cluster *LocalProcessCluster) NewVtgateInstance() *VtgateProcess { // NewBareCluster instantiates a new cluster and does not assume existence of any of the vitess processes func NewBareCluster(cell string, hostname string) *LocalProcessCluster { cluster := &LocalProcessCluster{Cell: cell, Hostname: hostname, mx: new(sync.Mutex), DefaultCharset: "utf8mb4"} - go cluster.CtrlCHandler() + cluster.SetupCtrlCHandler() cluster.OriginalVTDATAROOT = os.Getenv("VTDATAROOT") cluster.CurrentVTDATAROOT = path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("vtroot_%d", cluster.GetAndReservePort())) @@ -942,28 +944,28 @@ func (cluster *LocalProcessCluster) StreamTabletHealthUntil(ctx context.Context, return err } - conditionSuccess := false - timeoutExceeded := false + var conditionSuccess atomic.Bool + var timeoutExceeded atomic.Bool go func() { time.Sleep(timeout) - timeoutExceeded = true + timeoutExceeded.Store(true) }() err = conn.StreamHealth(ctx, func(shr *querypb.StreamHealthResponse) error { if condition(shr) { - conditionSuccess = true + conditionSuccess.Store(true) } - if timeoutExceeded || conditionSuccess { + if timeoutExceeded.Load() || conditionSuccess.Load() { return io.EOF } return nil }) - if conditionSuccess { + if conditionSuccess.Load() { return nil } - if timeoutExceeded { + if timeoutExceeded.Load() { return errors.New("timeout exceed while waiting for the condition in StreamHealth") } return err @@ -1125,7 +1127,7 @@ func (cluster *LocalProcessCluster) GetAndReservePort() int { for { cluster.nextPortForProcess = cluster.nextPortForProcess + 1 log.Infof("Attempting to reserve port: %v", cluster.nextPortForProcess) - ln, err := net.Listen("tcp", fmt.Sprintf(":%v", cluster.nextPortForProcess)) + ln, err := net.Listen("tcp", net.JoinHostPort("127.0.0.1", strconv.Itoa(cluster.nextPortForProcess))) if err != nil { log.Errorf("Can't listen on port %v: %s, trying next port", cluster.nextPortForProcess, err) @@ -1244,19 +1246,6 @@ func (cluster *LocalProcessCluster) NewVTOrcProcess(config VTOrcConfiguration) * } } -// NewVtgrProcess creates a new VtgrProcess object -func (cluster *LocalProcessCluster) NewVtgrProcess(clusters []string, config string, grPort int) *VtgrProcess { - base := VtctlProcessInstance(cluster.TopoProcess.Port, cluster.Hostname) - base.Binary = "vtgr" - return &VtgrProcess{ - VtctlProcess: *base, - LogDir: cluster.TmpDirectory, - clusters: clusters, - config: config, - grPort: grPort, - } -} - // VtprocessInstanceFromVttablet creates a new vttablet object func (cluster *LocalProcessCluster) VtprocessInstanceFromVttablet(tablet *Vttablet, shardName string, ksName string) *VttabletProcess { return VttabletProcessInstance( @@ -1276,8 +1265,16 @@ func (cluster *LocalProcessCluster) VtprocessInstanceFromVttablet(tablet *Vttabl } // StartVttablet starts a new tablet -func (cluster *LocalProcessCluster) StartVttablet(tablet *Vttablet, servingStatus string, - supportBackup bool, cell string, keyspaceName string, hostname string, shardName string) error { +func (cluster *LocalProcessCluster) StartVttablet( + tablet *Vttablet, + explicitServingStatus bool, + servingStatus string, + supportBackup bool, + cell string, + keyspaceName string, + hostname string, + shardName string, +) error { tablet.VttabletProcess = VttabletProcessInstance( tablet.HTTPPort, tablet.GrpcPort, @@ -1295,6 +1292,7 @@ func (cluster *LocalProcessCluster) StartVttablet(tablet *Vttablet, servingStatu tablet.VttabletProcess.SupportsBackup = supportBackup tablet.VttabletProcess.ServingStatus = servingStatus + tablet.VttabletProcess.ExplicitServingStatus = explicitServingStatus return tablet.VttabletProcess.Setup() } diff --git a/go/test/endtoend/cluster/cluster_util.go b/go/test/endtoend/cluster/cluster_util.go index 16022b29c32..3b2af8e2699 100644 --- a/go/test/endtoend/cluster/cluster_util.go +++ b/go/test/endtoend/cluster/cluster_util.go @@ -129,7 +129,7 @@ func VerifyRowsInTablet(t *testing.T, vttablet *Vttablet, ksName string, expecte } // PanicHandler handles the panic in the testcase. -func PanicHandler(t *testing.T) { +func PanicHandler(t testing.TB) { err := recover() if t == nil { return @@ -225,26 +225,53 @@ func filterResultWhenRunsForCoverage(input string) string { return result } +func ValidateReplicationIsHealthy(t *testing.T, tablet *Vttablet) bool { + query := "show replica status" + rs, err := tablet.VttabletProcess.QueryTablet(query, "", true) + assert.NoError(t, err) + row := rs.Named().Row() + require.NotNil(t, row) + + ioRunning := row.AsString("Replica_IO_Running", "") + require.NotEmpty(t, ioRunning) + ioHealthy := assert.Equalf(t, "Yes", ioRunning, "Replication is broken. Replication status: %v", row) + sqlRunning := row.AsString("Replica_SQL_Running", "") + require.NotEmpty(t, sqlRunning) + sqlHealthy := assert.Equalf(t, "Yes", sqlRunning, "Replication is broken. Replication status: %v", row) + + return ioHealthy && sqlHealthy +} + // WaitForReplicationPos will wait for replication position to catch-up -func WaitForReplicationPos(t *testing.T, tabletA *Vttablet, tabletB *Vttablet, hostname string, timeout float64) { +func WaitForReplicationPos(t *testing.T, tabletA *Vttablet, tabletB *Vttablet, validateReplication bool, timeout time.Duration) { + hostname := "localhost" + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + ticker := time.NewTicker(10 * time.Millisecond) + defer ticker.Stop() + replicationPosA, _ := GetPrimaryPosition(t, *tabletA, hostname) for { + if validateReplication { + if !ValidateReplicationIsHealthy(t, tabletB) { + assert.FailNowf(t, "Replication broken on tablet %v. Will not wait for position", tabletB.Alias) + } + if t.Failed() { + return + } + } replicationPosB, _ := GetPrimaryPosition(t, *tabletB, hostname) if positionAtLeast(t, tabletA, replicationPosB, replicationPosA) { - break + return } msg := fmt.Sprintf("%s's replication position to catch up to %s's;currently at: %s, waiting to catch up to: %s", tabletB.Alias, tabletA.Alias, replicationPosB, replicationPosA) - waitStep(t, msg, timeout, 0.01) - } -} - -func waitStep(t *testing.T, msg string, timeout float64, sleepTime float64) float64 { - timeout = timeout - sleepTime - if timeout < 0.0 { - t.Errorf("timeout waiting for condition '%s'", msg) + select { + case <-ctx.Done(): + assert.FailNowf(t, "Timeout waiting for condition '%s'", msg) + return + case <-ticker.C: + } } - time.Sleep(time.Duration(sleepTime) * time.Second) - return timeout } func positionAtLeast(t *testing.T, tablet *Vttablet, a string, b string) bool { diff --git a/go/test/endtoend/cluster/mysqlctl_process.go b/go/test/endtoend/cluster/mysqlctl_process.go index db09546c875..7b118890d81 100644 --- a/go/test/endtoend/cluster/mysqlctl_process.go +++ b/go/test/endtoend/cluster/mysqlctl_process.go @@ -47,6 +47,7 @@ type MysqlctlProcess struct { ExtraArgs []string InitMysql bool SecureTransport bool + MajorVersion int } // InitDb executes mysqlctl command to add cell info @@ -54,8 +55,13 @@ func (mysqlctl *MysqlctlProcess) InitDb() (err error) { args := []string{"--log_dir", mysqlctl.LogDirectory, "--tablet_uid", fmt.Sprintf("%d", mysqlctl.TabletUID), "--mysql_port", fmt.Sprintf("%d", mysqlctl.MySQLPort), - "init", "--", - "--init_db_sql_file", mysqlctl.InitDBFile} + "init", + } + if mysqlctl.MajorVersion < 18 { + args = append(args, "--") + } + + args = append(args, "--init_db_sql_file", mysqlctl.InitDBFile) if *isCoverage { args = append([]string{"--test.coverprofile=" + getCoveragePath("mysql-initdb.out"), "--test.v"}, args...) } @@ -143,16 +149,26 @@ ssl_key={{.ServerKey}} } if init { - tmpProcess.Args = append(tmpProcess.Args, "init", "--", - "--init_db_sql_file", mysqlctl.InitDBFile) + tmpProcess.Args = append(tmpProcess.Args, "init") + if mysqlctl.MajorVersion < 18 { + tmpProcess.Args = append(tmpProcess.Args, "--") + } + + tmpProcess.Args = append(tmpProcess.Args, "--init_db_sql_file", mysqlctl.InitDBFile) + } else { + tmpProcess.Args = append(tmpProcess.Args, "start") } + } else { + tmpProcess.Args = append(tmpProcess.Args, "start") } - tmpProcess.Args = append(tmpProcess.Args, "start") + tmpProcess.Env = append(tmpProcess.Env, os.Environ()...) + tmpProcess.Env = append(tmpProcess.Env, DefaultVttestEnv) log.Infof("Starting mysqlctl with command: %v", tmpProcess.Args) return tmpProcess, tmpProcess.Start() } -// Stop executes mysqlctl command to stop mysql instance and kills the mysql instance if it doesn't shutdown in 30 seconds. +// Stop executes mysqlctl command to stop mysql instance and kills the mysql instance +// if it doesn't shutdown in 30 seconds. func (mysqlctl *MysqlctlProcess) Stop() (err error) { log.Infof("Shutting down MySQL: %d", mysqlctl.TabletUID) defer log.Infof("MySQL shutdown complete: %d", mysqlctl.TabletUID) @@ -188,6 +204,21 @@ func (mysqlctl *MysqlctlProcess) Stop() (err error) { if err != nil { return err } + // We first need to try and kill any associated mysqld_safe process or + // else it will immediately restart the mysqld process when we kill it. + mspidb, err := exec.Command("sh", "-c", + fmt.Sprintf("ps auxww | grep -E 'mysqld_safe|mariadbd-safe' | grep vt_%010d | awk '{print $2}'", mysqlctl.TabletUID)).Output() + if err != nil { + return err + } + mysqldSafePID, err := strconv.Atoi(strings.TrimSpace(string(mspidb))) + // If we found a valid associated mysqld_safe process then let's kill + // it first. + if err == nil && mysqldSafePID > 0 { + if err = syscall.Kill(mysqldSafePID, syscall.SIGKILL); err != nil { + return err + } + } return syscall.Kill(pid, syscall.SIGKILL) } @@ -238,11 +269,17 @@ func MysqlCtlProcessInstanceOptionalInit(tabletUID int, mySQLPort int, tmpDirect if err != nil { return nil, err } + + version, err := GetMajorVersion("mysqlctl") + if err != nil { + log.Warningf("failed to get major mysqlctl version; backwards-compatibility for CLI changes may not work: %s", err) + } mysqlctl := &MysqlctlProcess{ Name: "mysqlctl", Binary: "mysqlctl", LogDirectory: tmpDirectory, InitDBFile: initFile, + MajorVersion: version, } mysqlctl.MySQLPort = mySQLPort mysqlctl.TabletUID = tabletUID diff --git a/go/test/endtoend/cluster/mysqlctld_process.go b/go/test/endtoend/cluster/mysqlctld_process.go index e710bf1ca3d..6f0a3513cb5 100644 --- a/go/test/endtoend/cluster/mysqlctld_process.go +++ b/go/test/endtoend/cluster/mysqlctld_process.go @@ -35,6 +35,7 @@ type MysqlctldProcess struct { Name string Binary string LogDirectory string + ErrorLog string Password string TabletUID int MySQLPort int @@ -43,17 +44,24 @@ type MysqlctldProcess struct { process *exec.Cmd exit chan error InitMysql bool + SocketFile string exitSignalReceived bool } // InitDb executes mysqlctld command to add cell info func (mysqlctld *MysqlctldProcess) InitDb() (err error) { - tmpProcess := exec.Command( - mysqlctld.Binary, + args := []string{ "--log_dir", mysqlctld.LogDirectory, "--tablet_uid", fmt.Sprintf("%d", mysqlctld.TabletUID), "--mysql_port", fmt.Sprintf("%d", mysqlctld.MySQLPort), "--init_db_sql_file", mysqlctld.InitDBFile, + } + if mysqlctld.SocketFile != "" { + args = append(args, "--socket_file", mysqlctld.SocketFile) + } + tmpProcess := exec.Command( + mysqlctld.Binary, + args..., ) return tmpProcess.Run() } @@ -64,11 +72,17 @@ func (mysqlctld *MysqlctldProcess) Start() error { return fmt.Errorf("process is already running") } _ = createDirectory(mysqlctld.LogDirectory, 0700) - tempProcess := exec.Command( - mysqlctld.Binary, + args := []string{ "--log_dir", mysqlctld.LogDirectory, "--tablet_uid", fmt.Sprintf("%d", mysqlctld.TabletUID), "--mysql_port", fmt.Sprintf("%d", mysqlctld.MySQLPort), + } + if mysqlctld.SocketFile != "" { + args = append(args, "--socket_file", mysqlctld.SocketFile) + } + tempProcess := exec.Command( + mysqlctld.Binary, + args..., ) tempProcess.Args = append(tempProcess.Args, mysqlctld.ExtraArgs...) @@ -82,8 +96,10 @@ func (mysqlctld *MysqlctldProcess) Start() error { tempProcess.Stderr = errFile tempProcess.Env = append(tempProcess.Env, os.Environ()...) + tempProcess.Env = append(tempProcess.Env, DefaultVttestEnv) tempProcess.Stdout = os.Stdout tempProcess.Stderr = os.Stderr + mysqlctld.ErrorLog = errFile.Name() log.Infof("%v", strings.Join(tempProcess.Args, " ")) @@ -98,6 +114,12 @@ func (mysqlctld *MysqlctldProcess) Start() error { go func(mysqlctld *MysqlctldProcess) { err := mysqlctld.process.Wait() if !mysqlctld.exitSignalReceived { + errBytes, ferr := os.ReadFile(mysqlctld.ErrorLog) + if ferr == nil { + log.Errorf("mysqlctld error log contents:\n%s", string(errBytes)) + } else { + log.Errorf("Failed to read the mysqlctld error log file %q: %v", mysqlctld.ErrorLog, ferr) + } fmt.Printf("mysqlctld stopped unexpectedly, tabletUID %v, mysql port %v, PID %v\n", mysqlctld.TabletUID, mysqlctld.MySQLPort, mysqlctld.process.Process.Pid) } mysqlctld.process = nil diff --git a/go/test/endtoend/cluster/topo_process.go b/go/test/endtoend/cluster/topo_process.go index 7326aa57a52..45a2e6586fa 100644 --- a/go/test/endtoend/cluster/topo_process.go +++ b/go/test/endtoend/cluster/topo_process.go @@ -17,8 +17,10 @@ limitations under the License. package cluster import ( + "context" "encoding/json" "fmt" + "net" "net/http" "os" "os/exec" @@ -27,7 +29,10 @@ import ( "syscall" "time" + clientv3 "go.etcd.io/etcd/client/v3" + "vitess.io/vitess/go/vt/log" + vtopo "vitess.io/vitess/go/vt/topo" ) // TopoProcess is a generic handle for a running Topo service . @@ -37,6 +42,7 @@ type TopoProcess struct { Binary string DataDirectory string LogDirectory string + ErrorLog string ListenClientURL string AdvertiseClientURL string Port int @@ -44,6 +50,7 @@ type TopoProcess struct { VerifyURL string PeerURL string ZKPorts string + Client interface{} proc *exec.Cmd exit chan error @@ -57,10 +64,9 @@ func (topo *TopoProcess) Setup(topoFlavor string, cluster *LocalProcessCluster) case "consul": return topo.SetupConsul(cluster) default: - // We still rely on the etcd v2 API for things like mkdir. - // If this ENV var is not set then some tests may fail with etcd 3.4+ - // where the v2 API is disabled by default in both the client and server. - os.Setenv("ETCDCTL_API", "2") + // Override any inherited ETCDCTL_API env value to + // ensure that we use the v3 API and storage. + os.Setenv("ETCDCTL_API", "3") return topo.SetupEtcd() } } @@ -77,7 +83,6 @@ func (topo *TopoProcess) SetupEtcd() (err error) { "--initial-advertise-peer-urls", topo.PeerURL, "--listen-peer-urls", topo.PeerURL, "--initial-cluster", fmt.Sprintf("%s=%s", topo.Name, topo.PeerURL), - "--enable-v2=true", ) err = createDirectory(topo.DataDirectory, 0700) @@ -90,8 +95,10 @@ func (topo *TopoProcess) SetupEtcd() (err error) { } topo.proc.Stderr = errFile + topo.ErrorLog = errFile.Name() topo.proc.Env = append(topo.proc.Env, os.Environ()...) + topo.proc.Env = append(topo.proc.Env, DefaultVttestEnv) log.Infof("Starting etcd with command: %v", strings.Join(topo.proc.Args, " ")) @@ -109,10 +116,24 @@ func (topo *TopoProcess) SetupEtcd() (err error) { timeout := time.Now().Add(60 * time.Second) for time.Now().Before(timeout) { if topo.IsHealthy() { + cli, cerr := clientv3.New(clientv3.Config{ + Endpoints: []string{net.JoinHostPort(topo.Host, fmt.Sprintf("%d", topo.Port))}, + DialTimeout: 5 * time.Second, + }) + if cerr != nil { + return err + } + topo.Client = cli return } select { case err := <-topo.exit: + errBytes, ferr := os.ReadFile(topo.ErrorLog) + if ferr == nil { + log.Errorf("%s error log contents:\n%s", topo.Binary, string(errBytes)) + } else { + log.Errorf("Failed to read the %s error log file %q: %v", topo.Binary, topo.ErrorLog, ferr) + } return fmt.Errorf("process '%s' exited prematurely (err: %s)", topo.Binary, err) default: time.Sleep(300 * time.Millisecond) @@ -125,7 +146,6 @@ func (topo *TopoProcess) SetupEtcd() (err error) { // SetupZookeeper spawns a new zookeeper topo service and initializes it with the defaults. // The service is kept running in the background until TearDown() is called. func (topo *TopoProcess) SetupZookeeper(cluster *LocalProcessCluster) (err error) { - host, err := os.Hostname() if err != nil { return @@ -171,7 +191,6 @@ type PortsInfo struct { // SetupConsul spawns a new consul service and initializes it with the defaults. // The service is kept running in the background until TearDown() is called. func (topo *TopoProcess) SetupConsul(cluster *LocalProcessCluster) (err error) { - topo.VerifyURL = fmt.Sprintf("http://%s:%d/v1/kv/?keys", topo.Host, topo.Port) _ = os.MkdirAll(topo.LogDirectory, os.ModePerm) @@ -247,8 +266,16 @@ func (topo *TopoProcess) SetupConsul(cluster *LocalProcessCluster) (err error) { return fmt.Errorf("process '%s' timed out after 60s (err: %s)", topo.Binary, <-topo.exit) } -// TearDown shutdowns the running topo service +// TearDown shutdowns the running topo service. func (topo *TopoProcess) TearDown(Cell string, originalVtRoot string, currentRoot string, keepdata bool, topoFlavor string) error { + if topo.Client != nil { + switch cli := topo.Client.(type) { + case *clientv3.Client: + _ = cli.Close() + default: + log.Errorf("Unknown topo client type %T", cli) + } + } if topoFlavor == "zk2" { cmd := "shutdown" @@ -324,6 +351,9 @@ func (topo *TopoProcess) ManageTopoDir(command string, directory string) (err er url := topo.VerifyURL + directory payload := strings.NewReader(`{"dir":"true"}`) if command == "mkdir" { + if *topoFlavor == "etcd2" { // No need to create the empty prefix keys in v3 + return nil + } req, _ := http.NewRequest("PUT", url, payload) req.Header.Add("content-type", "application/json") resp, err := http.DefaultClient.Do(req) @@ -332,6 +362,22 @@ func (topo *TopoProcess) ManageTopoDir(command string, directory string) (err er } return err } else if command == "rmdir" { + if *topoFlavor == "etcd2" { + if topo.Client == nil { + return fmt.Errorf("etcd client is not initialized") + } + cli, ok := topo.Client.(*clientv3.Client) + if !ok { + return fmt.Errorf("etcd client is invalid") + } + ctx, cancel := context.WithTimeout(context.Background(), vtopo.RemoteOperationTimeout) + defer cancel() + _, err = cli.Delete(ctx, directory, clientv3.WithPrefix()) + if err != nil { + return err + } + return nil + } req, _ := http.NewRequest("DELETE", url+"?dir=true", payload) resp, err := http.DefaultClient.Do(req) if err == nil { @@ -366,7 +412,7 @@ func TopoProcessInstance(port int, peerPort int, hostname string, flavor string, topo.ListenClientURL = fmt.Sprintf("http://%s:%d", topo.Host, topo.Port) topo.DataDirectory = path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("%s_%d", "topo", port)) topo.LogDirectory = path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("%s_%d", "topo", port), "logs") - topo.VerifyURL = fmt.Sprintf("http://%s:%d/v2/keys", topo.Host, topo.Port) + topo.VerifyURL = fmt.Sprintf("http://%s:%d/health", topo.Host, topo.Port) topo.PeerURL = fmt.Sprintf("http://%s:%d", hostname, peerPort) return topo } diff --git a/go/test/endtoend/cluster/vtbackup_process.go b/go/test/endtoend/cluster/vtbackup_process.go index be75026bf0d..57350922a21 100644 --- a/go/test/endtoend/cluster/vtbackup_process.go +++ b/go/test/endtoend/cluster/vtbackup_process.go @@ -69,8 +69,7 @@ func (vtbackup *VtbackupProcess) Setup() (err error) { //Backup Arguments are not optional "--backup_storage_implementation", "file", - "--file_backup_storage_root", - path.Join(os.Getenv("VTDATAROOT"), "tmp", "backupstorage"), + "--file_backup_storage_root", path.Join(os.Getenv("VTDATAROOT"), "tmp", "backupstorage"), ) if vtbackup.initialBackup { @@ -84,6 +83,7 @@ func (vtbackup *VtbackupProcess) Setup() (err error) { vtbackup.proc.Stdout = os.Stdout vtbackup.proc.Env = append(vtbackup.proc.Env, os.Environ()...) + vtbackup.proc.Env = append(vtbackup.proc.Env, DefaultVttestEnv) log.Infof("Running vtbackup with args: %v", strings.Join(vtbackup.proc.Args, " ")) err = vtbackup.proc.Run() diff --git a/go/test/endtoend/cluster/vtctlclient_process.go b/go/test/endtoend/cluster/vtctlclient_process.go index cf75ec1a5fa..0c5fb1bc8c2 100644 --- a/go/test/endtoend/cluster/vtctlclient_process.go +++ b/go/test/endtoend/cluster/vtctlclient_process.go @@ -44,6 +44,7 @@ type VtctlClientParams struct { MigrationContext string UUIDList string CallerID string + BatchSize int } // InitShardPrimary executes vtctlclient command to make specified tablet the primary for the shard. @@ -87,7 +88,9 @@ func (vtctlclient *VtctlClientProcess) ApplySchemaWithOutput(Keyspace string, SQ if params.UUIDList != "" { args = append(args, "--uuid_list", params.UUIDList) } - + if params.BatchSize > 0 { + args = append(args, "--batch_size", fmt.Sprintf("%d", params.BatchSize)) + } if params.CallerID != "" { args = append(args, "--caller_id", params.CallerID) } diff --git a/go/test/endtoend/cluster/vtctld_process.go b/go/test/endtoend/cluster/vtctld_process.go index 5e85f172ce1..d0b2e5ab93e 100644 --- a/go/test/endtoend/cluster/vtctld_process.go +++ b/go/test/endtoend/cluster/vtctld_process.go @@ -39,6 +39,7 @@ type VtctldProcess struct { BackupStorageImplementation string FileBackupStorageRoot string LogDir string + ErrorLog string Port int GrpcPort int VerifyURL string @@ -65,6 +66,14 @@ func (vtctld *VtctldProcess) Setup(cell string, extraArgs ...string) (err error) "--port", fmt.Sprintf("%d", vtctld.Port), "--grpc_port", fmt.Sprintf("%d", vtctld.GrpcPort), ) + + if v, err := GetMajorVersion("vtctld"); err != nil { + return err + } else if v >= 18 { + vtctld.proc.Args = append(vtctld.proc.Args, "--bind-address", "127.0.0.1") + vtctld.proc.Args = append(vtctld.proc.Args, "--grpc_bind_address", "127.0.0.1") + } + if *isCoverage { vtctld.proc.Args = append(vtctld.proc.Args, "--test.coverprofile="+getCoveragePath("vtctld.out")) } @@ -72,8 +81,10 @@ func (vtctld *VtctldProcess) Setup(cell string, extraArgs ...string) (err error) errFile, _ := os.Create(path.Join(vtctld.LogDir, "vtctld-stderr.txt")) vtctld.proc.Stderr = errFile + vtctld.ErrorLog = errFile.Name() vtctld.proc.Env = append(vtctld.proc.Env, os.Environ()...) + vtctld.proc.Env = append(vtctld.proc.Env, DefaultVttestEnv) log.Infof("Starting vtctld with command: %v", strings.Join(vtctld.proc.Args, " ")) @@ -95,6 +106,12 @@ func (vtctld *VtctldProcess) Setup(cell string, extraArgs ...string) (err error) } select { case err := <-vtctld.exit: + errBytes, ferr := os.ReadFile(vtctld.ErrorLog) + if ferr == nil { + log.Errorf("vtctld error log contents:\n%s", string(errBytes)) + } else { + log.Errorf("Failed to read the vtctld error log file %q: %v", vtctld.ErrorLog, ferr) + } return fmt.Errorf("process '%s' exited prematurely (err: %s)", vtctld.Name, err) default: time.Sleep(300 * time.Millisecond) diff --git a/go/test/endtoend/cluster/vtctldclient_process.go b/go/test/endtoend/cluster/vtctldclient_process.go index b3c632a5afe..52e0f985680 100644 --- a/go/test/endtoend/cluster/vtctldclient_process.go +++ b/go/test/endtoend/cluster/vtctldclient_process.go @@ -120,3 +120,13 @@ func (vtctldclient *VtctldClientProcess) CreateKeyspace(keyspaceName string, sid } return err } + +// OnlineDDLShowRecent responds with recent schema migration list +func (vtctldclient *VtctldClientProcess) OnlineDDLShowRecent(Keyspace string) (result string, err error) { + return vtctldclient.ExecuteCommandWithOutput( + "OnlineDDL", + "show", + Keyspace, + "recent", + ) +} diff --git a/go/test/endtoend/cluster/vtgate_process.go b/go/test/endtoend/cluster/vtgate_process.go index 69c34bb6091..5475459b81e 100644 --- a/go/test/endtoend/cluster/vtgate_process.go +++ b/go/test/endtoend/cluster/vtgate_process.go @@ -43,6 +43,7 @@ type VtgateProcess struct { Binary string CommonArg VtctlProcess LogDir string + ErrorLog string FileToLogQueries string Port int GrpcPort int @@ -65,7 +66,7 @@ type VtgateProcess struct { exit chan error } -const defaultVtGatePlannerVersion = planbuilder.Gen4CompareV3 +const defaultVtGatePlannerVersion = planbuilder.Gen4 // Setup starts Vtgate process with required arguements func (vtgate *VtgateProcess) Setup() (err error) { @@ -85,6 +86,12 @@ func (vtgate *VtgateProcess) Setup() (err error) { "--service_map", vtgate.ServiceMap, "--mysql_auth_server_impl", vtgate.MySQLAuthServerImpl, } + if v, err := GetMajorVersion("vtgate"); err != nil { + return err + } else if v >= 18 { + args = append(args, "--bind-address", "127.0.0.1") + args = append(args, "--grpc_bind_address", "127.0.0.1") + } // If no explicit mysql_server_version has been specified then we autodetect // the MySQL version that will be used for the test and base the vtgate's // mysql server version on that. @@ -127,6 +134,7 @@ func (vtgate *VtgateProcess) Setup() (err error) { vtgate.proc.Stderr = errFile vtgate.proc.Env = append(vtgate.proc.Env, os.Environ()...) + vtgate.proc.Env = append(vtgate.proc.Env, DefaultVttestEnv) log.Infof("Running vtgate with command: %v", strings.Join(vtgate.proc.Args, " ")) @@ -149,6 +157,12 @@ func (vtgate *VtgateProcess) Setup() (err error) { } select { case err := <-vtgate.exit: + errBytes, ferr := os.ReadFile(vtgate.ErrorLog) + if ferr == nil { + log.Errorf("vtgate error log contents:\n%s", string(errBytes)) + } else { + log.Errorf("Failed to read the vtgate error log file %q: %v", vtgate.ErrorLog, ferr) + } return fmt.Errorf("process '%s' exited prematurely (err: %s)", vtgate.Name, err) default: time.Sleep(300 * time.Millisecond) diff --git a/go/test/endtoend/cluster/vtgr_process.go b/go/test/endtoend/cluster/vtgr_process.go deleted file mode 100644 index 1960e469489..00000000000 --- a/go/test/endtoend/cluster/vtgr_process.go +++ /dev/null @@ -1,106 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cluster - -import ( - "fmt" - "os" - "os/exec" - "path" - "strings" - "syscall" - "time" - - "vitess.io/vitess/go/vt/log" -) - -// VtgrProcess represents the vtgr process -type VtgrProcess struct { - VtctlProcess - LogDir string - ExtraArgs []string - clusters []string - config string - grPort int - proc *exec.Cmd - exit chan error -} - -// Start starts vtgr process with required arguements -func (vtgr *VtgrProcess) Start(alias string) (err error) { - /* minimal command line arguments: - $ vtgr --topo_implementation etcd2 \ - --topo_global_server_address localhost:2379 \ - --topo_global_root /vitess/global \ - --clusters_to_watch ks/0 - */ - vtgr.proc = exec.Command( - vtgr.Binary, - "--topo_implementation", vtgr.TopoImplementation, - "--topo_global_server_address", vtgr.TopoGlobalAddress, - "--topo_global_root", vtgr.TopoGlobalRoot, - "--tablet_manager_protocol", "grpc", - "--scan_repair_timeout", "50s", - "--clusters_to_watch", strings.Join(vtgr.clusters, ","), - ) - if vtgr.config != "" { - vtgr.proc.Args = append(vtgr.proc.Args, fmt.Sprintf("--config=%s", vtgr.config)) - } - if vtgr.grPort != 0 { - vtgr.proc.Args = append(vtgr.proc.Args, fmt.Sprintf("--gr_port=%d", vtgr.grPort)) - } - vtgr.proc.Args = append(vtgr.proc.Args, vtgr.ExtraArgs...) - errFile, _ := os.Create(path.Join(vtgr.LogDir, fmt.Sprintf("vtgr-stderr-%v.txt", alias))) - vtgr.proc.Stderr = errFile - vtgr.proc.Env = append(vtgr.proc.Env, os.Environ()...) - log.Infof("Running vtgr with command: %v", strings.Join(vtgr.proc.Args, " ")) - err = vtgr.proc.Start() - if err != nil { - return - } - - vtgr.exit = make(chan error) - go func() { - if vtgr.proc != nil { - vtgr.exit <- vtgr.proc.Wait() - close(vtgr.exit) - } - }() - - return nil -} - -// TearDown shuts down the running vtgr service -func (vtgr *VtgrProcess) TearDown() error { - if vtgr.proc == nil || vtgr.exit == nil { - return nil - } - // Attempt graceful shutdown with SIGTERM first - _ = vtgr.proc.Process.Signal(syscall.SIGTERM) - - select { - case <-vtgr.exit: - vtgr.proc = nil - return nil - - case <-time.After(10 * time.Second): - vtgr.proc.Process.Kill() - err := <-vtgr.exit - vtgr.proc = nil - return err - } -} diff --git a/go/test/endtoend/cluster/vtorc_process.go b/go/test/endtoend/cluster/vtorc_process.go index 34c4f3295ab..f80690d8d60 100644 --- a/go/test/endtoend/cluster/vtorc_process.go +++ b/go/test/endtoend/cluster/vtorc_process.go @@ -117,8 +117,14 @@ func (orc *VTOrcProcess) Setup() (err error) { "--instance-poll-time", "1s", // Faster topo information refresh speeds up the tests. This doesn't add any significant load either "--topo-information-refresh-duration", "3s", - "--orc_web_dir", path.Join(os.Getenv("VTROOT"), "web", "vtorc"), ) + + if v, err := GetMajorVersion("vtorc"); err != nil { + return err + } else if v >= 18 { + orc.proc.Args = append(orc.proc.Args, "--bind-address", "127.0.0.1") + } + if *isCoverage { orc.proc.Args = append(orc.proc.Args, "--test.coverprofile="+getCoveragePath("orc.out")) } @@ -133,6 +139,7 @@ func (orc *VTOrcProcess) Setup() (err error) { orc.proc.Stderr = errFile orc.proc.Env = append(orc.proc.Env, os.Environ()...) + orc.proc.Env = append(orc.proc.Env, DefaultVttestEnv) log.Infof("Running vtorc with command: %v", strings.Join(orc.proc.Args, " ")) diff --git a/go/test/endtoend/cluster/vttablet_process.go b/go/test/endtoend/cluster/vttablet_process.go index 11d5d21b3b6..4a0548dfa40 100644 --- a/go/test/endtoend/cluster/vttablet_process.go +++ b/go/test/endtoend/cluster/vttablet_process.go @@ -37,10 +37,10 @@ import ( "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/sqlparser" ) @@ -60,6 +60,7 @@ type VttabletProcess struct { Shard string CommonArg VtctlProcess LogDir string + ErrorLog string TabletHostname string Keyspace string TabletType string @@ -73,6 +74,7 @@ type VttabletProcess struct { QueryzURL string StatusDetailsURL string SupportsBackup bool + ExplicitServingStatus bool ServingStatus string DbPassword string DbPort int @@ -81,7 +83,7 @@ type VttabletProcess struct { Charset string ConsolidationsURL string - //Extra Args to be set before starting the vttablet process + // Extra Args to be set before starting the vttablet process ExtraArgs []string proc *exec.Cmd @@ -109,11 +111,16 @@ func (vttablet *VttabletProcess) Setup() (err error) { "--backup_storage_implementation", vttablet.BackupStorageImplementation, "--file_backup_storage_root", vttablet.FileBackupStorageRoot, "--service_map", vttablet.ServiceMap, - "--vtctld_addr", vttablet.VtctldAddress, - "--vtctld_addr", vttablet.VtctldAddress, "--vreplication_tablet_type", vttablet.VreplicationTabletType, "--db_charset", vttablet.Charset, ) + if v, err := GetMajorVersion("vttablet"); err != nil { + return err + } else if v >= 18 { + vttablet.proc.Args = append(vttablet.proc.Args, "--bind-address", "127.0.0.1") + vttablet.proc.Args = append(vttablet.proc.Args, "--grpc_bind_address", "127.0.0.1") + } + if *isCoverage { vttablet.proc.Args = append(vttablet.proc.Args, "--test.coverprofile="+getCoveragePath("vttablet.out")) } @@ -132,8 +139,10 @@ func (vttablet *VttabletProcess) Setup() (err error) { fname := path.Join(vttablet.LogDir, vttablet.TabletPath+"-vttablet-stderr.txt") errFile, _ := os.Create(fname) vttablet.proc.Stderr = errFile + vttablet.ErrorLog = errFile.Name() vttablet.proc.Env = append(vttablet.proc.Env, os.Environ()...) + vttablet.proc.Env = append(vttablet.proc.Env, DefaultVttestEnv) log.Infof("Running vttablet with command: %v", strings.Join(vttablet.proc.Args, " ")) @@ -151,7 +160,15 @@ func (vttablet *VttabletProcess) Setup() (err error) { }() if vttablet.ServingStatus != "" { - if err = vttablet.WaitForTabletStatus(vttablet.ServingStatus); err != nil { + // If the tablet has an explicit serving status we use the serving status + // otherwise we wait for any serving status to show up in the healthcheck. + var servingStatus []string + if vttablet.ExplicitServingStatus { + servingStatus = append(servingStatus, vttablet.ServingStatus) + } else { + servingStatus = append(servingStatus, "SERVING", "NOT_SERVING") + } + if err = vttablet.WaitForTabletStatuses(servingStatus); err != nil { errFileContent, _ := os.ReadFile(fname) if errFileContent != nil { log.Infof("vttablet error:\n%s\n", string(errFileContent)) @@ -299,6 +316,12 @@ func (vttablet *VttabletProcess) WaitForTabletStatusesForTimeout(expectedStatuse } select { case err := <-vttablet.exit: + errBytes, ferr := os.ReadFile(vttablet.ErrorLog) + if ferr == nil { + log.Errorf("vttablet error log contents:\n%s", string(errBytes)) + } else { + log.Errorf("Failed to read the vttablet error log file %q: %v", vttablet.ErrorLog, ferr) + } return fmt.Errorf("process '%s' exited prematurely (err: %s)", vttablet.Name, err) default: time.Sleep(300 * time.Millisecond) @@ -521,7 +544,7 @@ func (vttablet *VttabletProcess) ToggleProfiling() error { // WaitForVReplicationToCatchup waits for "workflow" to finish copying func (vttablet *VttabletProcess) WaitForVReplicationToCatchup(t testing.TB, workflow, database string, sidecarDBName string, duration time.Duration) { if sidecarDBName == "" { - sidecarDBName = sidecardb.DefaultName + sidecarDBName = sidecar.DefaultName } // Escape it if/as needed ics := sqlparser.NewIdentifierCS(sidecarDBName) diff --git a/go/test/endtoend/clustertest/etcd_test.go b/go/test/endtoend/clustertest/etcd_test.go index 1f5e548696f..5239d960c47 100644 --- a/go/test/endtoend/clustertest/etcd_test.go +++ b/go/test/endtoend/clustertest/etcd_test.go @@ -18,15 +18,46 @@ package clustertest import ( "fmt" + "net" "testing" + "time" + + "github.com/stretchr/testify/require" + clientv3 "go.etcd.io/etcd/client/v3" "vitess.io/vitess/go/test/endtoend/cluster" ) func TestEtcdServer(t *testing.T) { defer cluster.PanicHandler(t) - etcdURL := fmt.Sprintf("http://%s:%d/v2/keys", clusterInstance.Hostname, clusterInstance.TopoPort) - testURL(t, etcdURL, "generic etcd url") - testURL(t, etcdURL+"/vitess/global", "vitess global key") - testURL(t, etcdURL+"/vitess/zone1", "vitess zone1 key") + + // Confirm the basic etcd cluster health. + etcdHealthURL := fmt.Sprintf("http://%s:%d/health", clusterInstance.Hostname, clusterInstance.TopoPort) + testURL(t, etcdHealthURL, "generic etcd health url") + + // Confirm that we have a working topo server by looking for some + // expected keys. + etcdClientOptions := []clientv3.OpOption{ + clientv3.WithPrefix(), + clientv3.WithKeysOnly(), + clientv3.WithLimit(1), + } + cli, err := clientv3.New(clientv3.Config{ + Endpoints: []string{net.JoinHostPort(clusterInstance.TopoProcess.Host, fmt.Sprintf("%d", clusterInstance.TopoProcess.Port))}, + DialTimeout: 5 * time.Second, + }) + require.NoError(t, err) + defer cli.Close() + keyPrefixes := []string{ + // At a minimum, this prefix confirms that we have a functioning + // global topo server with a valid cell from the test env. + fmt.Sprintf("/vitess/global/cells/%s", cell), + } + for _, keyPrefix := range keyPrefixes { + res, err := cli.Get(cli.Ctx(), keyPrefix, etcdClientOptions...) + require.NoError(t, err) + require.NotNil(t, res) + // Confirm that we have at least one key matching the prefix. + require.Greaterf(t, len(res.Kvs), 0, "no keys found matching prefix: %s", keyPrefix) + } } diff --git a/go/test/endtoend/encryption/encryptedreplication/encrypted_replication_test.go b/go/test/endtoend/encryption/encryptedreplication/encrypted_replication_test.go index 4fb53d8f6fb..725659a5ee1 100644 --- a/go/test/endtoend/encryption/encryptedreplication/encrypted_replication_test.go +++ b/go/test/endtoend/encryption/encryptedreplication/encrypted_replication_test.go @@ -25,10 +25,10 @@ import ( "github.com/stretchr/testify/require" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/encryption" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/sidecardb" ) var ( @@ -131,7 +131,7 @@ func initializeCluster(t *testing.T) (int, error) { for _, keyspaceStr := range []string{keyspace} { KeyspacePtr := &cluster.Keyspace{Name: keyspaceStr} keyspace := *KeyspacePtr - if err := clusterInstance.VtctlProcess.CreateKeyspace(keyspace.Name, sidecardb.DefaultName); err != nil { + if err := clusterInstance.VtctlProcess.CreateKeyspace(keyspace.Name, sidecar.DefaultName); err != nil { return 1, err } shard := &cluster.Shard{ diff --git a/go/test/endtoend/encryption/encryptedtransport/encrypted_transport_test.go b/go/test/endtoend/encryption/encryptedtransport/encrypted_transport_test.go index ec4a553849b..b076006ec2c 100644 --- a/go/test/endtoend/encryption/encryptedtransport/encrypted_transport_test.go +++ b/go/test/endtoend/encryption/encryptedtransport/encrypted_transport_test.go @@ -65,10 +65,10 @@ import ( "github.com/pkg/errors" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/test/endtoend/encryption" "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/vterrors" "github.com/stretchr/testify/assert" @@ -350,7 +350,7 @@ func clusterSetUp(t *testing.T) (int, error) { for _, keyspaceStr := range []string{keyspace} { KeyspacePtr := &cluster.Keyspace{Name: keyspaceStr} keyspace := *KeyspacePtr - if err := clusterInstance.VtctlProcess.CreateKeyspace(keyspace.Name, sidecardb.DefaultName); err != nil { + if err := clusterInstance.VtctlProcess.CreateKeyspace(keyspace.Name, sidecar.DefaultName); err != nil { return 1, err } shard := &cluster.Shard{ diff --git a/go/test/endtoend/keyspace/keyspace_test.go b/go/test/endtoend/keyspace/keyspace_test.go index c8ff519910f..338ad5c8cd2 100644 --- a/go/test/endtoend/keyspace/keyspace_test.go +++ b/go/test/endtoend/keyspace/keyspace_test.go @@ -248,7 +248,7 @@ func TestDeleteKeyspace(t *testing.T) { // Start over and this time use recursive DeleteKeyspace to do everything. _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("CreateKeyspace", "test_delete_keyspace") _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("CreateShard", "test_delete_keyspace/0") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("InitTablet", "--", "--port=1234", "--keyspace=test_delete_keyspace", "--shard=0", "zone1-0000000100", "primary") + _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("InitTablet", "--", "--port=1234", "--bind-address=127.0.0.1", "--keyspace=test_delete_keyspace", "--shard=0", "zone1-0000000100", "primary") // Create the serving/replication entries and check that they exist, // so we can later check they're deleted. @@ -279,10 +279,10 @@ func TestDeleteKeyspace(t *testing.T) { _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("CreateKeyspace", "test_delete_keyspace_removekscell") _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("CreateShard", "test_delete_keyspace_removekscell/0") _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("CreateShard", "test_delete_keyspace_removekscell/1") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("InitTablet", "--port=1234", "-keyspace=test_delete_keyspace_removekscell", "--shard=0", "zone1-0000000100", "primary") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("InitTablet", "--port=1234", "-keyspace=test_delete_keyspace_removekscell", "--shard=1", "zone1-0000000101", "primary") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("InitTablet", "--port=1234", "-keyspace=test_delete_keyspace_removekscell", "--shard=0", "zone2-0000000100", "replica") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("InitTablet", "--port=1234", "-keyspace=test_delete_keyspace_removekscell", "--shard=1", "zone2-0000000101", "replica") + _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("InitTablet", "--port=1234", "--bind-address=127.0.0.1", "-keyspace=test_delete_keyspace_removekscell", "--shard=0", "zone1-0000000100", "primary") + _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("InitTablet", "--port=1234", "--bind-address=127.0.0.1", "-keyspace=test_delete_keyspace_removekscell", "--shard=1", "zone1-0000000101", "primary") + _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("InitTablet", "--port=1234", "--bind-address=127.0.0.1", "-keyspace=test_delete_keyspace_removekscell", "--shard=0", "zone2-0000000100", "replica") + _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("InitTablet", "--port=1234", "--bind-address=127.0.0.1", "-keyspace=test_delete_keyspace_removekscell", "--shard=1", "zone2-0000000101", "replica") // Create the serving/replication entries and check that they exist, so we can later check they're deleted. _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("RebuildKeyspaceGraph", "test_delete_keyspace_removekscell") diff --git a/go/test/endtoend/messaging/message_test.go b/go/test/endtoend/messaging/message_test.go index 95ee0b3022f..3082f295055 100644 --- a/go/test/endtoend/messaging/message_test.go +++ b/go/test/endtoend/messaging/message_test.go @@ -30,15 +30,15 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/vtgate/vtgateconn" + + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" cmp "vitess.io/vitess/go/test/utils" - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/proto/query" - querypb "vitess.io/vitess/go/vt/proto/query" - "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/vt/vtgate/vtgateconn" ) var testMessage = "{\"message\": \"hello world\"}" @@ -358,8 +358,8 @@ func getTimeEpoch(qr *sqltypes.Result) (int64, int64) { if len(qr.Rows) != 1 { return 0, 0 } - t, _ := evalengine.ToInt64(qr.Rows[0][0]) - e, _ := evalengine.ToInt64(qr.Rows[0][1]) + t, _ := qr.Rows[0][0].ToCastInt64() + e, _ := qr.Rows[0][1].ToCastInt64() return t, e } @@ -510,9 +510,9 @@ func testMessaging(t *testing.T, name, ks string) { res, err := stream.MessageStream(ks, "", nil, name) require.Nil(t, err) require.Equal(t, 3, len(res.Fields)) - validateField(t, res.Fields[0], "id", query.Type_INT64) - validateField(t, res.Fields[1], "tenant_id", query.Type_INT64) - validateField(t, res.Fields[2], "message", query.Type_JSON) + validateField(t, res.Fields[0], "id", querypb.Type_INT64) + validateField(t, res.Fields[1], "tenant_id", querypb.Type_INT64) + validateField(t, res.Fields[2], "message", querypb.Type_JSON) // validate recieved msgs resMap := make(map[string]string) @@ -554,7 +554,7 @@ func testMessaging(t *testing.T, name, ks string) { assert.Equal(t, uint64(1), qr.RowsAffected) } -func validateField(t *testing.T, field *query.Field, name string, _type query.Type) { +func validateField(t *testing.T, field *querypb.Field, name string, _type querypb.Type) { assert.Equal(t, name, field.Name) assert.Equal(t, _type, field.Type) } @@ -582,7 +582,7 @@ func VtgateGrpcConn(ctx context.Context, cluster *cluster.LocalProcessCluster) ( } // MessageStream strarts the stream for the corresponding connection. -func (stream *VTGateStream) MessageStream(ks, shard string, keyRange *topodata.KeyRange, name string) (*sqltypes.Result, error) { +func (stream *VTGateStream) MessageStream(ks, shard string, keyRange *topodatapb.KeyRange, name string) (*sqltypes.Result, error) { // start message stream which send received message to the respChan session := stream.Session("@primary", nil) resultStream, err := session.StreamExecute(stream.ctx, fmt.Sprintf("stream * from %s", name), nil) diff --git a/go/test/endtoend/messaging/r b/go/test/endtoend/messaging/r deleted file mode 100755 index 3fd3dd8c7d2..00000000000 --- a/go/test/endtoend/messaging/r +++ /dev/null @@ -1,7 +0,0 @@ -~/scratch/killprocs -rm -rf ~/vtdataroot/* -mkdir -p ~/vtdataroot -mkdir -p ~/vtdataroot/tmp -mkdir -p ~/vtdataroot/ext -mkdir -p ~/vtdataroot/ext/tmp -eatmydata go test -failfast -v --alsologtostderr -run TestReparenting diff --git a/go/test/endtoend/mysqlctl/mysqlctl_test.go b/go/test/endtoend/mysqlctl/mysqlctl_test.go index 121449866cf..3b28c5bcf30 100644 --- a/go/test/endtoend/mysqlctl/mysqlctl_test.go +++ b/go/test/endtoend/mysqlctl/mysqlctl_test.go @@ -25,8 +25,8 @@ import ( "github.com/stretchr/testify/require" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/test/endtoend/cluster" - "vitess.io/vitess/go/vt/sidecardb" ) var ( @@ -53,7 +53,7 @@ func TestMain(m *testing.M) { return 1 } - if err := clusterInstance.VtctlProcess.CreateKeyspace(keyspaceName, sidecardb.DefaultName); err != nil { + if err := clusterInstance.VtctlProcess.CreateKeyspace(keyspaceName, sidecar.DefaultName); err != nil { return 1 } diff --git a/go/test/endtoend/mysqlctld/mysqlctld_test.go b/go/test/endtoend/mysqlctld/mysqlctld_test.go index b73efccdba8..908a870d6f0 100644 --- a/go/test/endtoend/mysqlctld/mysqlctld_test.go +++ b/go/test/endtoend/mysqlctld/mysqlctld_test.go @@ -17,15 +17,19 @@ limitations under the License. package mysqlctld import ( + "context" "flag" "fmt" "os" + "path" "testing" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/vt/mysqlctl/mysqlctlclient" + "vitess.io/vitess/go/test/endtoend/cluster" - "vitess.io/vitess/go/vt/sidecardb" ) var ( @@ -52,7 +56,7 @@ func TestMain(m *testing.M) { return 1 } - if err := clusterInstance.VtctlProcess.CreateKeyspace(keyspaceName, sidecardb.DefaultName); err != nil { + if err := clusterInstance.VtctlProcess.CreateKeyspace(keyspaceName, sidecar.DefaultName); err != nil { return 1 } @@ -101,6 +105,7 @@ func initCluster(shardNames []string, totalTabletsRequired int) error { if err != nil { return err } + mysqlctldProcess.SocketFile = path.Join(clusterInstance.TmpDirectory, fmt.Sprintf("mysqlctld_%d.sock", tablet.TabletUID)) tablet.MysqlctldProcess = *mysqlctldProcess err = tablet.MysqlctldProcess.Start() if err != nil { @@ -156,3 +161,11 @@ func TestAutoDetect(t *testing.T) { err = clusterInstance.VtctlclientProcess.InitializeShard(keyspaceName, shardName, cell, primaryTablet.TabletUID) require.Nil(t, err, "error should be nil") } + +func TestVersionString(t *testing.T) { + client, err := mysqlctlclient.New("unix", primaryTablet.MysqlctldProcess.SocketFile) + require.NoError(t, err) + version, err := client.VersionString(context.Background()) + require.NoError(t, err) + require.NotEmpty(t, version) +} diff --git a/go/test/endtoend/mysqlserver/mysql_server_test.go b/go/test/endtoend/mysqlserver/mysql_server_test.go index 6cc2d091d0e..caed342688d 100644 --- a/go/test/endtoend/mysqlserver/mysql_server_test.go +++ b/go/test/endtoend/mysqlserver/mysql_server_test.go @@ -32,6 +32,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" @@ -116,9 +118,9 @@ func TestTimeout(t *testing.T) { _, err = conn.ExecuteFetch("SELECT SLEEP(5);", 1, false) require.NotNilf(t, err, "quiry timeout error expected") - mysqlErr, ok := err.(*mysql.SQLError) + mysqlErr, ok := err.(*sqlerror.SQLError) require.Truef(t, ok, "invalid error type") - assert.Equal(t, mysql.ERQueryInterrupted, mysqlErr.Number(), err) + assert.Equal(t, sqlerror.ERQueryInterrupted, mysqlErr.Number(), err) } // TestInvalidField tries to fetch invalid column and verifies the error. @@ -132,9 +134,9 @@ func TestInvalidField(t *testing.T) { _, err = conn.ExecuteFetch("SELECT invalid_field from vt_insert_test;", 1, false) require.NotNil(t, err, "invalid field error expected") - mysqlErr, ok := err.(*mysql.SQLError) + mysqlErr, ok := err.(*sqlerror.SQLError) require.Truef(t, ok, "invalid error type") - assert.Equal(t, mysql.ERBadFieldError, mysqlErr.Number(), err) + assert.Equal(t, sqlerror.ERBadFieldError, mysqlErr.Number(), err) } // TestWarnings validates the behaviour of SHOW WARNINGS. diff --git a/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go b/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go index dd0b6d84a53..bd2e34ff3ba 100644 --- a/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go +++ b/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go @@ -154,8 +154,15 @@ func TestMain(m *testing.M) { "--heartbeat_interval", "250ms", "--heartbeat_on_demand_duration", "5s", "--migration_check_interval", "5s", - "--queryserver-config-schema-change-signal-interval", "0.1", "--watch_replication_stream", + // The next flags are deprecated, and we incldue them to verify that they are nonetheless still allowed. + // The values are irrelevant. Just the fact that the flags are allowed in what's important. + // These should be included in v18, and removed in v19. + "--throttle_threshold", "1m", + "--throttle_metrics_query", "select 1 from dual", + "--throttle_metrics_threshold", "1.5", + "--throttle_check_as_check_self=false", + "--throttler-config-via-topo=true", } clusterInstance.VtGateExtraArgs = []string{ "--ddl_strategy", "online", @@ -764,11 +771,21 @@ func testRevert(t *testing.T) { defer wg.Done() runMultipleConnections(ctx, t) }() - uuid := testOnlineDDLStatementForTable(t, fmt.Sprintf(alterHintStatement, hint), "online", "vtgate", hint) - uuids = append(uuids, uuid) - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) - cancel() // will cause runMultipleConnections() to terminate - wg.Wait() + + func() { + // Ensures runMultipleConnections completes before the overall + // test does, even in the face of calls to t.FailNow() in the + // main goroutine, which still executes deferred functions + defer func() { + cancel() // will cause runMultipleConnections() to terminate + wg.Wait() + }() + + uuid := testOnlineDDLStatementForTable(t, fmt.Sprintf(alterHintStatement, hint), "online", "vtgate", hint) + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + }() + testSelectTableMetrics(t) }) } @@ -783,11 +800,20 @@ func testRevert(t *testing.T) { defer wg.Done() runMultipleConnections(ctx, t) }() - uuid := testRevertMigration(t, uuids[len(uuids)-1], ddlStrategy) - uuids = append(uuids, uuid) - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) - cancel() // will cause runMultipleConnections() to terminate - wg.Wait() + + func() { + // Ensures runMultipleConnections completes before the overall + // test does, even in the face of calls to t.FailNow() in the + // main goroutine, which still executes deferred functions + defer func() { + cancel() // will cause runMultipleConnections() to terminate + wg.Wait() + }() + + uuid := testRevertMigration(t, uuids[len(uuids)-1], ddlStrategy) + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + }() checkMigratedTable(t, tableName, alterHints[0]) testSelectTableMetrics(t) }) @@ -802,11 +828,20 @@ func testRevert(t *testing.T) { defer wg.Done() runMultipleConnections(ctx, t) }() - uuid := testRevertMigration(t, uuids[len(uuids)-1], ddlStrategy) - uuids = append(uuids, uuid) - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) - cancel() // will cause runMultipleConnections() to terminate - wg.Wait() + + func() { + // Ensures runMultipleConnections completes before the overall + // test does, even in the face of calls to t.FailNow() in the + // main goroutine, which still executes deferred functions + defer func() { + cancel() // will cause runMultipleConnections() to terminate + wg.Wait() + }() + + uuid := testRevertMigration(t, uuids[len(uuids)-1], ddlStrategy) + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + }() checkMigratedTable(t, tableName, alterHints[1]) testSelectTableMetrics(t) }) @@ -821,11 +856,20 @@ func testRevert(t *testing.T) { defer wg.Done() runMultipleConnections(ctx, t) }() - uuid := testRevertMigration(t, uuids[len(uuids)-1], ddlStrategy) - uuids = append(uuids, uuid) - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) - cancel() // will cause runMultipleConnections() to terminate - wg.Wait() + + func() { + // Ensures runMultipleConnections completes before the overall + // test does, even in the face of calls to t.FailNow() in the + // main goroutine, which still executes deferred functions + defer func() { + cancel() // will cause runMultipleConnections() to terminate + wg.Wait() + }() + + uuid := testRevertMigration(t, uuids[len(uuids)-1], ddlStrategy) + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + }() checkMigratedTable(t, tableName, alterHints[0]) testSelectTableMetrics(t) }) @@ -839,6 +883,15 @@ func testRevert(t *testing.T) { defer wg.Done() runMultipleConnections(ctx, t) }() + + // Ensures runMultipleConnections completes before the overall + // test does, even in the face of calls to t.FailNow() in the + // main goroutine, which still executes deferred functions + defer func() { + cancel() // will cause runMultipleConnections() to terminate + wg.Wait() + }() + uuid := testRevertMigration(t, uuids[len(uuids)-1], ddlStrategy+" --postpone-completion") uuids = append(uuids, uuid) // Should be still running! @@ -849,8 +902,6 @@ func testRevert(t *testing.T) { status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, 60*time.Second, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) - cancel() // will cause runMultipleConnections() to terminate - wg.Wait() } t.Run("postponed revert", func(t *testing.T) { testPostponedRevert(t, schema.OnlineDDLStatusRunning) diff --git a/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go b/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go index 4030b3a9184..e471931a20c 100644 --- a/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go +++ b/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go @@ -35,6 +35,7 @@ import ( "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/onlineddl" @@ -274,6 +275,9 @@ func TestSchemaChange(t *testing.T) { t.Run("summary: validate sequential migration IDs", func(t *testing.T) { onlineddl.ValidateSequentialMigrationIDs(t, &vtParams, shards) }) + t.Run("summary: validate completed_timestamp", func(t *testing.T) { + onlineddl.ValidateCompletedTimestamp(t, &vtParams) + }) } func testScheduler(t *testing.T) { @@ -533,7 +537,7 @@ func testScheduler(t *testing.T) { testTableSequentialTimes(t, t1uuid, t2uuid) }) - t.Run("ALTER both tables, elligible for concurrenct", func(t *testing.T) { + t.Run("ALTER both tables, elligible for concurrent", func(t *testing.T) { // ALTER TABLE is allowed to run concurrently when no other ALTER is busy with copy state. Our tables are tiny so we expect to find both migrations running t1uuid = testOnlineDDLStatement(t, createParams(trivialAlterT1Statement, ddlStrategy+" --allow-concurrent --postpone-completion", "vtgate", "", "", true)) // skip wait t2uuid = testOnlineDDLStatement(t, createParams(trivialAlterT2Statement, ddlStrategy+" --allow-concurrent --postpone-completion", "vtgate", "", "", true)) // skip wait @@ -568,9 +572,11 @@ func testScheduler(t *testing.T) { }) testTableCompletionTimes(t, t2uuid, t1uuid) }) - t.Run("ALTER both tables, elligible for concurrenct, with throttling", func(t *testing.T) { + t.Run("ALTER both tables, elligible for concurrent, with throttling", func(t *testing.T) { onlineddl.ThrottleAllMigrations(t, &vtParams) defer onlineddl.UnthrottleAllMigrations(t, &vtParams) + onlineddl.CheckThrottledApps(t, &vtParams, throttlerapp.OnlineDDLName, true) + // ALTER TABLE is allowed to run concurrently when no other ALTER is busy with copy state. Our tables are tiny so we expect to find both migrations running t1uuid = testOnlineDDLStatement(t, createParams(trivialAlterT1Statement, ddlStrategy+" -allow-concurrent -postpone-completion", "vtgate", "", "", true)) // skip wait t2uuid = testOnlineDDLStatement(t, createParams(trivialAlterT2Statement, ddlStrategy+" -allow-concurrent -postpone-completion", "vtgate", "", "", true)) // skip wait @@ -587,6 +593,7 @@ func testScheduler(t *testing.T) { onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusRunning) onlineddl.CheckMigrationStatus(t, &vtParams, shards, t2uuid, schema.OnlineDDLStatusQueued, schema.OnlineDDLStatusReady) }) + t.Run("check ready to complete (before)", func(t *testing.T) { for _, uuid := range []string{t1uuid, t2uuid} { waitForReadyToComplete(t, uuid, false) @@ -627,6 +634,8 @@ func testScheduler(t *testing.T) { testTableCompletionTimes(t, t2uuid, t1uuid) }) + onlineddl.CheckThrottledApps(t, &vtParams, throttlerapp.OnlineDDLName, false) + t.Run("REVERT both tables concurrent, postponed", func(t *testing.T) { t1uuid = testRevertMigration(t, createRevertParams(t1uuid, ddlStrategy+" --allow-concurrent --postpone-completion", "vtgate", "", true)) t2uuid = testRevertMigration(t, createRevertParams(t2uuid, ddlStrategy+" --allow-concurrent --postpone-completion", "vtgate", "", true)) @@ -875,6 +884,60 @@ func testScheduler(t *testing.T) { }) }) + t.Run("Cleanup artifacts", func(t *testing.T) { + // Create a migration with a low --retain-artifacts value. + // We will cancel the migration and expect the artifact to be cleaned. + t.Run("start migration", func(t *testing.T) { + t1uuid = testOnlineDDLStatement(t, createParams(trivialAlterT1Statement, ddlStrategy+" --postpone-completion --retain-artifacts=1s", "vtctl", "", "", true)) // skip wait + onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t1uuid, normalWaitTime, schema.OnlineDDLStatusRunning) + }) + var artifacts []string + t.Run("validate artifact exists", func(t *testing.T) { + rs := onlineddl.ReadMigrations(t, &vtParams, t1uuid) + require.NotNil(t, rs) + row := rs.Named().Row() + require.NotNil(t, row) + + artifacts = textutil.SplitDelimitedList(row.AsString("artifacts", "")) + assert.NotEmpty(t, artifacts) + assert.Equal(t, 1, len(artifacts)) + checkTable(t, artifacts[0], true) + + retainArtifactsSeconds := row.AsInt64("retain_artifacts_seconds", 0) + assert.Equal(t, int64(1), retainArtifactsSeconds) // due to --retain-artifacts=1s + }) + t.Run("cancel migration", func(t *testing.T) { + onlineddl.CheckCancelMigration(t, &vtParams, shards, t1uuid, true) + status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t1uuid, normalWaitTime, schema.OnlineDDLStatusFailed, schema.OnlineDDLStatusCancelled) + fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusCancelled) + }) + t.Run("wait for cleanup", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), normalWaitTime) + defer cancel() + + for { + rs := onlineddl.ReadMigrations(t, &vtParams, t1uuid) + require.NotNil(t, rs) + row := rs.Named().Row() + require.NotNil(t, row) + if !row["cleanup_timestamp"].IsNull() { + // This is what we've been waiting for + break + } + select { + case <-ctx.Done(): + assert.Fail(t, "timeout waiting for cleanup") + return + case <-time.After(time.Second): + } + } + }) + t.Run("validate artifact does not exist", func(t *testing.T) { + checkTable(t, artifacts[0], false) + }) + }) + // INSTANT DDL instantDDLCapable, err := capableOf(mysql.InstantAddLastColumnFlavorCapability) require.NoError(t, err) diff --git a/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go b/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go index b012cd4f074..49e72eda290 100644 --- a/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go +++ b/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go @@ -19,7 +19,6 @@ package vrepl import ( "flag" "fmt" - "io" "os" "path" "strings" @@ -27,24 +26,24 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/onlineddl" "vitess.io/vitess/go/test/endtoend/throttler" "vitess.io/vitess/go/vt/schema" - throttlebase "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/base" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) var ( clusterInstance *cluster.LocalProcessCluster shards []cluster.Shard vtParams mysql.ConnParams - httpClient = throttlebase.SetupHTTPClient(time.Second) normalMigrationWait = 45 * time.Second extendedMigrationWait = 60 * time.Second @@ -150,6 +149,12 @@ var ( ` ) +const ( + customThreshold = 5 + throttlerEnabledTimeout = 60 * time.Second + noCustomQuery = "" +) + func TestMain(m *testing.M) { defer cluster.PanicHandler(nil) flag.Parse() @@ -192,7 +197,6 @@ func TestMain(m *testing.M) { if err := clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 1, false); err != nil { return 1, err } - vtgateInstance := clusterInstance.NewVtgateInstance() // Start vtgate if err := vtgateInstance.Setup(); err != nil { @@ -216,29 +220,6 @@ func TestMain(m *testing.M) { } -// direct per-tablet throttler API instruction -func throttleResponse(tablet *cluster.Vttablet, path string) (respBody string, err error) { - apiURL := fmt.Sprintf("http://%s:%d/%s", tablet.VttabletProcess.TabletHostname, tablet.HTTPPort, path) - resp, err := httpClient.Get(apiURL) - if err != nil { - return "", err - } - defer resp.Body.Close() - b, err := io.ReadAll(resp.Body) - respBody = string(b) - return respBody, err -} - -// direct per-tablet throttler API instruction -func throttleApp(tablet *cluster.Vttablet, throttlerApp throttlerapp.Name) (string, error) { - return throttleResponse(tablet, fmt.Sprintf("throttler/throttle-app?app=%s&duration=1h", throttlerApp)) -} - -// direct per-tablet throttler API instruction -func unthrottleApp(tablet *cluster.Vttablet, throttlerApp throttlerapp.Name) (string, error) { - return throttleResponse(tablet, fmt.Sprintf("throttler/unthrottle-app?app=%s", throttlerApp)) -} - func TestSchemaChange(t *testing.T) { defer cluster.PanicHandler(t) @@ -257,16 +238,34 @@ func TestSchemaChange(t *testing.T) { err := clusterInstance.WaitForTabletsToHealthyInVtgate() require.NoError(t, err) - _, err = throttler.UpdateThrottlerTopoConfig(clusterInstance, true, false, 0, "") - require.NoError(t, err) + t.Run("WaitForSrvKeyspace", func(t *testing.T) { + for _, ks := range clusterInstance.Keyspaces { + t.Run(ks.Name, func(t *testing.T) { + err := throttler.WaitForSrvKeyspace(clusterInstance, cell, ks.Name) + require.NoError(t, err) + }) + } + }) + t.Run("updating throttler config", func(t *testing.T) { + _, err := throttler.UpdateThrottlerTopoConfig(clusterInstance, true, false, customThreshold, noCustomQuery, nil) + require.NoError(t, err) + }) - for _, ks := range clusterInstance.Keyspaces { - for _, shard := range ks.Shards { - for _, tablet := range shard.Vttablets { - throttler.WaitForThrottlerStatusEnabled(t, tablet, true, nil, extendedMigrationWait) - } + t.Run("checking throttler config", func(t *testing.T) { + for _, ks := range clusterInstance.Keyspaces { + t.Run(ks.Name, func(t *testing.T) { + for _, shard := range ks.Shards { + t.Run(shard.Name, func(t *testing.T) { + for _, tablet := range shard.Vttablets { + t.Run(tablet.Alias, func(t *testing.T) { + throttler.WaitForThrottlerStatusEnabled(t, tablet, true, &throttler.Config{Query: throttler.DefaultQuery, Threshold: customThreshold}, throttlerEnabledTimeout) + }) + } + }) + } + }) } - } + }) testWithInitialSchema(t) t.Run("alter non_online", func(t *testing.T) { @@ -412,18 +411,9 @@ func TestSchemaChange(t *testing.T) { var uuid string func() { - for _, shard := range shards { - // technically we only need to throttle on a REPLICA, because that's the - // vstreamer source; but it's OK to be on the safe side and throttle on all tablets. Doesn't - // change the essence of this test. - for _, tablet := range shard.Vttablets { - body, err := throttleApp(tablet, throttlerapp.VStreamerName) - defer unthrottleApp(tablet, throttlerapp.VStreamerName) - - assert.NoError(t, err) - assert.Contains(t, body, throttlerapp.VStreamerName) - } - } + _, err := throttler.ThrottleAppAndWaitUntilTabletsConfirm(t, clusterInstance, throttlerapp.VStreamerName) + defer throttler.UnthrottleAppAndWaitUntilTabletsConfirm(t, clusterInstance, throttlerapp.VStreamerName) + require.NoError(t, err) uuid = testOnlineDDLStatement(t, alterTableTrivialStatement, "vitess", providedUUID, providedMigrationContext, "vtgate", "test_val", "", true) _ = onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, normalMigrationWait, schema.OnlineDDLStatusRunning) @@ -520,24 +510,10 @@ func TestSchemaChange(t *testing.T) { t.Run(fmt.Sprintf("PlannedReparentShard via throttling %d/2", (currentPrimaryTabletIndex+1)), func(t *testing.T) { insertRows(t, 2) - for i := range shards { - var body string - var err error - switch i { - case 0: - // this is the shard where we run PRS - // Use per-tablet throttling API - body, err = throttleApp(currentPrimaryTablet, throttlerapp.OnlineDDLName) - defer unthrottleApp(currentPrimaryTablet, throttlerapp.OnlineDDLName) - case 1: - // no PRS on this shard - // Use per-tablet throttling API - body, err = throttleApp(shards[i].Vttablets[0], throttlerapp.OnlineDDLName) - defer unthrottleApp(shards[i].Vttablets[0], throttlerapp.OnlineDDLName) - } - assert.NoError(t, err) - assert.Contains(t, body, throttlerapp.OnlineDDLName) - } + _, err = throttler.ThrottleAppAndWaitUntilTabletsConfirm(t, clusterInstance, throttlerapp.OnlineDDLName) + assert.NoError(t, err) + defer throttler.UnthrottleAppAndWaitUntilTabletsConfirm(t, clusterInstance, throttlerapp.OnlineDDLName) + uuid := testOnlineDDLStatement(t, alterTableTrivialStatement, "vitess", providedUUID, providedMigrationContext, "vtgate", "test_val", "", true) t.Run("wait for migration to run", func(t *testing.T) { @@ -545,12 +521,12 @@ func TestSchemaChange(t *testing.T) { onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusRunning) }) t.Run("wait for vreplication to run on shard -80", func(t *testing.T) { - vreplStatus := onlineddl.WaitForVReplicationStatus(t, &vtParams, currentPrimaryTablet, uuid, normalMigrationWait, "Copying", "Running") - require.Contains(t, []string{"Copying", "Running"}, vreplStatus) + vreplStatus := onlineddl.WaitForVReplicationStatus(t, &vtParams, currentPrimaryTablet, uuid, normalMigrationWait, binlogdatapb.VReplicationWorkflowState_Copying.String(), binlogdatapb.VReplicationWorkflowState_Running.String()) + require.Contains(t, []string{binlogdatapb.VReplicationWorkflowState_Copying.String(), binlogdatapb.VReplicationWorkflowState_Running.String()}, vreplStatus) }) t.Run("wait for vreplication to run on shard 80-", func(t *testing.T) { - vreplStatus := onlineddl.WaitForVReplicationStatus(t, &vtParams, shards[1].Vttablets[0], uuid, normalMigrationWait, "Copying", "Running") - require.Contains(t, []string{"Copying", "Running"}, vreplStatus) + vreplStatus := onlineddl.WaitForVReplicationStatus(t, &vtParams, shards[1].Vttablets[0], uuid, normalMigrationWait, binlogdatapb.VReplicationWorkflowState_Copying.String(), binlogdatapb.VReplicationWorkflowState_Running.String()) + require.Contains(t, []string{binlogdatapb.VReplicationWorkflowState_Copying.String(), binlogdatapb.VReplicationWorkflowState_Running.String()}, vreplStatus) }) t.Run("check status again", func(t *testing.T) { // again see that we're still 'running' @@ -585,22 +561,8 @@ func TestSchemaChange(t *testing.T) { onlineddl.PrintQueryResult(os.Stdout, rs) }) t.Run("unthrottle", func(t *testing.T) { - for i := range shards { - var body string - var err error - switch i { - case 0: - // this is the shard where we run PRS - // Use per-tablet throttling API - body, err = unthrottleApp(currentPrimaryTablet, throttlerapp.OnlineDDLName) - case 1: - // no PRS on this shard - // Use per-tablet throttling API - body, err = unthrottleApp(shards[i].Vttablets[0], throttlerapp.OnlineDDLName) - } - assert.NoError(t, err) - assert.Contains(t, body, throttlerapp.OnlineDDLName) - } + _, err = throttler.UnthrottleAppAndWaitUntilTabletsConfirm(t, clusterInstance, throttlerapp.OnlineDDLName) + assert.NoError(t, err) }) t.Run("expect completion", func(t *testing.T) { _ = onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, extendedMigrationWait, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) @@ -654,12 +616,12 @@ func TestSchemaChange(t *testing.T) { onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusRunning) }) t.Run("wait for vreplication to run on shard -80", func(t *testing.T) { - vreplStatus := onlineddl.WaitForVReplicationStatus(t, &vtParams, currentPrimaryTablet, uuid, normalMigrationWait, "Copying", "Running") - require.Contains(t, []string{"Copying", "Running"}, vreplStatus) + vreplStatus := onlineddl.WaitForVReplicationStatus(t, &vtParams, currentPrimaryTablet, uuid, normalMigrationWait, binlogdatapb.VReplicationWorkflowState_Copying.String(), binlogdatapb.VReplicationWorkflowState_Running.String()) + require.Contains(t, []string{binlogdatapb.VReplicationWorkflowState_Copying.String(), binlogdatapb.VReplicationWorkflowState_Running.String()}, vreplStatus) }) t.Run("wait for vreplication to run on shard 80-", func(t *testing.T) { - vreplStatus := onlineddl.WaitForVReplicationStatus(t, &vtParams, shards[1].Vttablets[0], uuid, normalMigrationWait, "Copying", "Running") - require.Contains(t, []string{"Copying", "Running"}, vreplStatus) + vreplStatus := onlineddl.WaitForVReplicationStatus(t, &vtParams, shards[1].Vttablets[0], uuid, normalMigrationWait, binlogdatapb.VReplicationWorkflowState_Copying.String(), binlogdatapb.VReplicationWorkflowState_Running.String()) + require.Contains(t, []string{binlogdatapb.VReplicationWorkflowState_Copying.String(), binlogdatapb.VReplicationWorkflowState_Running.String()}, vreplStatus) }) t.Run("check status again", func(t *testing.T) { // again see that we're still 'running' @@ -818,36 +780,28 @@ func TestSchemaChange(t *testing.T) { // - two shards as opposed to one // - tablet throttling t.Run("Revert a migration completed on one shard and cancelled on another", func(t *testing.T) { - // shard 0 will run normally, shard 1 will be throttled - defer unthrottleApp(shards[1].Vttablets[0], throttlerapp.OnlineDDLName) - t.Run("throttle shard 1", func(t *testing.T) { - body, err := throttleApp(shards[1].Vttablets[0], throttlerapp.OnlineDDLName) - assert.NoError(t, err) - assert.Contains(t, body, throttlerapp.OnlineDDLName) - }) + // shard 0 will run normally, shard 1 will be postponed var uuid string - t.Run("run migrations, expect 1st to complete, 2nd to be running", func(t *testing.T) { - uuid = testOnlineDDLStatement(t, alterTableTrivialStatement, "vitess", providedUUID, providedMigrationContext, "vtgate", "test_val", "", true) + t.Run("run migrations, expect running on both shards", func(t *testing.T) { + uuid = testOnlineDDLStatement(t, alterTableTrivialStatement, "vitess --postpone-launch", providedUUID, providedMigrationContext, "vtgate", "test_val", "", true) + onlineddl.CheckLaunchMigration(t, &vtParams, shards[0:1], uuid, "-80", true) { status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards[:1], uuid, normalMigrationWait, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) onlineddl.CheckMigrationStatus(t, &vtParams, shards[:1], uuid, schema.OnlineDDLStatusComplete) } { - // shard 1 is throttled - status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards[1:], uuid, normalMigrationWait, schema.OnlineDDLStatusRunning) + status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards[1:], uuid, normalMigrationWait, schema.OnlineDDLStatusQueued) fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) - onlineddl.CheckMigrationStatus(t, &vtParams, shards[1:], uuid, schema.OnlineDDLStatusRunning) + onlineddl.CheckMigrationStatus(t, &vtParams, shards[1:], uuid, schema.OnlineDDLStatusQueued) } }) t.Run("check cancel migration", func(t *testing.T) { onlineddl.CheckCancelAllMigrations(t, &vtParams, 1) }) - t.Run("unthrottle shard 1", func(t *testing.T) { - body, err := unthrottleApp(shards[1].Vttablets[0], throttlerapp.OnlineDDLName) - assert.NoError(t, err) - assert.Contains(t, body, throttlerapp.OnlineDDLName) + t.Run("launch-all", func(t *testing.T) { + onlineddl.CheckLaunchAllMigrations(t, &vtParams, 0) }) var revertUUID string t.Run("issue revert migration", func(t *testing.T) { @@ -859,12 +813,12 @@ func TestSchemaChange(t *testing.T) { revertUUID = row.AsString("uuid", "") assert.NotEmpty(t, revertUUID) }) - t.Run("expect one revert successful, another failed", func(t *testing.T) { + t.Run("migrations were cancelled, revert should impossible", func(t *testing.T) { { // shard 0 migration was complete. Revert should be successful status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards[:1], revertUUID, normalMigrationWait, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) - onlineddl.CheckMigrationStatus(t, &vtParams, shards[:1], revertUUID, schema.OnlineDDLStatusComplete) + onlineddl.CheckMigrationStatus(t, &vtParams, shards[:1], revertUUID, schema.OnlineDDLStatusFailed) } { // shard 0 migration was cancelled. Revert should not be possible @@ -896,6 +850,9 @@ func TestSchemaChange(t *testing.T) { t.Run("summary: validate sequential migration IDs", func(t *testing.T) { onlineddl.ValidateSequentialMigrationIDs(t, &vtParams, shards) }) + t.Run("summary: validate completed_timestamp", func(t *testing.T) { + onlineddl.ValidateCompletedTimestamp(t, &vtParams) + }) } func insertRow(t *testing.T) { diff --git a/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go b/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go index 1f1f3b9c5b7..7f560a24f9e 100644 --- a/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go +++ b/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go @@ -23,9 +23,9 @@ import ( "math/rand" "os" "path" + "runtime" "strings" "sync" - "sync/atomic" "testing" "time" @@ -136,12 +136,14 @@ var ( writeMetrics WriteMetrics ) +var ( + countIterations = 5 +) + const ( - maxTableRows = 4096 - maxConcurrency = 20 - singleConnectionSleepInterval = 2 * time.Millisecond - countIterations = 5 - migrationWaitTimeout = 60 * time.Second + maxTableRows = 4096 + workloadDuration = 5 * time.Second + migrationWaitTimeout = 60 * time.Second ) func resetOpOrder() { @@ -227,6 +229,8 @@ func TestMain(m *testing.M) { func TestSchemaChange(t *testing.T) { defer cluster.PanicHandler(t) + ctx := context.Background() + shards = clusterInstance.Keyspaces[0].Shards require.Equal(t, 1, len(shards)) @@ -251,16 +255,17 @@ func TestSchemaChange(t *testing.T) { // that our testing/metrics logic is sound in the first place. testName := fmt.Sprintf("workload without ALTER TABLE %d/%d", (i + 1), countIterations) t.Run(testName, func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) initTable(t) + + ctx, cancel := context.WithTimeout(ctx, workloadDuration) + defer cancel() + var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() runMultipleConnections(ctx, t) }() - time.Sleep(5 * time.Second) - cancel() // will cause runMultipleConnections() to terminate wg.Wait() testSelectTableMetrics(t) }) @@ -285,7 +290,7 @@ func TestSchemaChange(t *testing.T) { // the vreplication/ALTER TABLE did not corrupt our data and we are happy. testName := fmt.Sprintf("ALTER TABLE with workload %d/%d", (i + 1), countIterations) t.Run(testName, func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx := context.Background() t.Run("create schema", func(t *testing.T) { testWithInitialSchema(t) }) @@ -293,6 +298,9 @@ func TestSchemaChange(t *testing.T) { initTable(t) }) t.Run("migrate", func(t *testing.T) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + var wg sync.WaitGroup wg.Add(1) go func() { @@ -302,7 +310,7 @@ func TestSchemaChange(t *testing.T) { hint := fmt.Sprintf("hint-alter-with-workload-%d", i) uuid := testOnlineDDLStatement(t, fmt.Sprintf(alterHintStatement, hint), onlineDDLStrategy, "vtgate", hint) onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) - cancel() // will cause runMultipleConnections() to terminate + cancel() // Now that the migration is complete, we can stop the workload. wg.Wait() }) t.Run("validate metrics", func(t *testing.T) { @@ -371,7 +379,11 @@ func checkTablesCount(t *testing.T, tablet *cluster.Vttablet, showTableName stri query := fmt.Sprintf(`show tables like '%%%s%%';`, showTableName) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + rowcount := 0 + for { queryResult, err := tablet.VttabletProcess.QueryTablet(query, keyspaceName, true) require.Nil(t, err) @@ -381,11 +393,15 @@ func checkTablesCount(t *testing.T, tablet *cluster.Vttablet, showTableName stri } select { - case <-time.After(time.Second): + case <-ticker.C: + continue // Keep looping case <-ctx.Done(): - break + // Break below to the assertion } + + break } + assert.Equal(t, expectCount, rowcount) } @@ -480,7 +496,7 @@ func generateDelete(t *testing.T, conn *mysql.Conn) error { return err } -func runSingleConnection(ctx context.Context, t *testing.T, done *int64) { +func runSingleConnection(ctx context.Context, t *testing.T, sleepInterval time.Duration) { log.Infof("Running single connection") conn, err := mysql.Connect(ctx, &vtParams) require.Nil(t, err) @@ -491,11 +507,10 @@ func runSingleConnection(ctx context.Context, t *testing.T, done *int64) { _, err = conn.ExecuteFetch("set transaction isolation level read committed", 1000, true) require.Nil(t, err) + ticker := time.NewTicker(sleepInterval) + defer ticker.Stop() + for { - if atomic.LoadInt64(done) == 1 { - log.Infof("Terminating single connection") - return - } switch rand.Int31n(3) { case 0: err = generateInsert(t, conn) @@ -504,27 +519,39 @@ func runSingleConnection(ctx context.Context, t *testing.T, done *int64) { case 2: err = generateDelete(t, conn) } + select { + case <-ctx.Done(): + log.Infof("Terminating single connection") + return + case <-ticker.C: + } assert.Nil(t, err) - time.Sleep(singleConnectionSleepInterval) } } func runMultipleConnections(ctx context.Context, t *testing.T) { - log.Infof("Running multiple connections") - var done int64 + // The workload for a 16 vCPU machine is: + // - Concurrency of 16 + // - 2ms interval between queries for each connection + // As the number of vCPUs decreases, so do we decrease concurrency, and increase intervals. For example, on a 8 vCPU machine + // we run concurrency of 8 and interval of 4ms. On a 4 vCPU machine we run concurrency of 4 and interval of 8ms. + maxConcurrency := runtime.NumCPU() + sleepModifier := 16.0 / float64(maxConcurrency) + baseSleepInterval := 2 * time.Millisecond + singleConnectionSleepIntervalNanoseconds := float64(baseSleepInterval.Nanoseconds()) * sleepModifier + sleepInterval := time.Duration(int64(singleConnectionSleepIntervalNanoseconds)) + + log.Infof("Running multiple connections: maxConcurrency=%v, sleep interval=%v", maxConcurrency, sleepInterval) var wg sync.WaitGroup for i := 0; i < maxConcurrency; i++ { wg.Add(1) go func() { defer wg.Done() - runSingleConnection(ctx, t, &done) + runSingleConnection(ctx, t, sleepInterval) }() } - <-ctx.Done() - atomic.StoreInt64(&done, 1) - log.Infof("Running multiple connections: done") wg.Wait() - log.Infof("All connections cancelled") + log.Infof("Running multiple connections: done") } func initTable(t *testing.T) { diff --git a/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go b/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go index e83426fe4a9..88cc1ca32f4 100644 --- a/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go +++ b/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go @@ -41,6 +41,7 @@ import ( "time" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/timer" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/schema" @@ -706,9 +707,9 @@ func runSingleConnection(ctx context.Context, t *testing.T, autoIncInsert bool, // Table renamed to _before, due to -vreplication-test-suite flag err = nil } - if sqlErr, ok := err.(*mysql.SQLError); ok { + if sqlErr, ok := err.(*sqlerror.SQLError); ok { switch sqlErr.Number() { - case mysql.ERLockDeadlock: + case sqlerror.ERLockDeadlock: // That's fine. We create a lot of contention; some transactions will deadlock and // rollback. It happens, and we can ignore those and keep on going. err = nil diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/gbk-charset/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/gbk-charset/create.sql deleted file mode 100644 index 706f2a5db07..00000000000 --- a/go/test/endtoend/onlineddl/vrepl_suite/testdata/gbk-charset/create.sql +++ /dev/null @@ -1,26 +0,0 @@ -/*drop table if exists onlineddl_test; -create table onlineddl_test ( - id int(11) NOT NULL AUTO_INCREMENT, - name varchar(512) DEFAULT NULL, - v varchar(255) DEFAULT NULL COMMENT '添加普通列测试', - PRIMARY KEY (id) -) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=gbk; - -insert into onlineddl_test values (null, 'gbk-test-initial', '添加普通列测试-添加普通列测试'); -insert into onlineddl_test values (null, 'gbk-test-initial', '添加普通列测试-添加普通列测试'); - -drop event if exists onlineddl_test; -delimiter ;; -create event onlineddl_test - on schedule every 1 second - starts current_timestamp - ends current_timestamp + interval 60 second - on completion not preserve - enable - do -begin - insert into onlineddl_test (name) values ('gbk-test-default'); - insert into onlineddl_test values (null, 'gbk-test', '添加普通列测试-添加普通列测试'); - update onlineddl_test set v='添加普通列测试' where v='添加普通列测试-添加普通列测试' order by id desc limit 1; -end ;; -*/ \ No newline at end of file diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-retype-json/after_columns b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-retype-json/after_columns new file mode 100644 index 00000000000..99f86097862 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-retype-json/after_columns @@ -0,0 +1 @@ +id, c1j diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-retype-json/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-retype-json/alter new file mode 100644 index 00000000000..f2e64ff0894 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-retype-json/alter @@ -0,0 +1 @@ +change column c1 c1j json diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-retype-json/before_columns b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-retype-json/before_columns new file mode 100644 index 00000000000..b791aa0d27a --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-retype-json/before_columns @@ -0,0 +1 @@ +id, c1 diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-retype-json/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-retype-json/create.sql new file mode 100644 index 00000000000..5280498e9fd --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-retype-json/create.sql @@ -0,0 +1,22 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + c1 int not null, + primary key (id) +) auto_increment=1; + +insert into onlineddl_test values (1, 11); +insert into onlineddl_test values (2, 13); + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, 17); +end ;; diff --git a/go/test/endtoend/onlineddl/vtctlutil.go b/go/test/endtoend/onlineddl/vtctlutil.go index 7bfd09a0585..19a6ff79604 100644 --- a/go/test/endtoend/onlineddl/vtctlutil.go +++ b/go/test/endtoend/onlineddl/vtctlutil.go @@ -18,15 +18,12 @@ package onlineddl import ( "testing" - "time" "vitess.io/vitess/go/test/endtoend/cluster" "github.com/stretchr/testify/assert" ) -var throttlerConfigTimeout = 90 * time.Second - // CheckCancelAllMigrations cancels all pending migrations. There is no validation for affected migrations. func CheckCancelAllMigrationsViaVtctl(t *testing.T, vtctlclient *cluster.VtctlClientProcess, keyspace string) { cancelQuery := "alter vitess_migration cancel all" diff --git a/go/test/endtoend/onlineddl/vtgate_util.go b/go/test/endtoend/onlineddl/vtgate_util.go index e59e5759a75..693523cec48 100644 --- a/go/test/endtoend/onlineddl/vtgate_util.go +++ b/go/test/endtoend/onlineddl/vtgate_util.go @@ -19,27 +19,35 @@ package onlineddl import ( "context" "fmt" - "io" "math" - "net/http" "os" "testing" "time" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" "vitess.io/vitess/go/test/endtoend/cluster" - "github.com/buger/jsonparser" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +const ( + ThrottledAppsTimeout = 60 * time.Second +) + +var ( + testsStartupTime time.Time +) + +func init() { + testsStartupTime = time.Now() +} + // VtgateExecQuery runs a query on VTGate using given query params func VtgateExecQuery(t *testing.T, vtParams *mysql.ConnParams, query string, expectError string) *sqltypes.Result { t.Helper() @@ -199,7 +207,7 @@ func CheckLaunchAllMigrations(t *testing.T, vtParams *mysql.ConnParams, expectCo } // CheckMigrationStatus verifies that the migration indicated by given UUID has the given expected status -func CheckMigrationStatus(t *testing.T, vtParams *mysql.ConnParams, shards []cluster.Shard, uuid string, expectStatuses ...schema.OnlineDDLStatus) { +func CheckMigrationStatus(t *testing.T, vtParams *mysql.ConnParams, shards []cluster.Shard, uuid string, expectStatuses ...schema.OnlineDDLStatus) bool { query, err := sqlparser.ParseAndBind("show vitess_migrations like %a", sqltypes.StringBindVariable(uuid), ) @@ -221,7 +229,7 @@ func CheckMigrationStatus(t *testing.T, vtParams *mysql.ConnParams, shards []clu } } } - assert.Equal(t, len(shards), count) + return assert.Equal(t, len(shards), count) } // WaitForMigrationStatus waits for a migration to reach either provided statuses (returns immediately), or eventually time out @@ -239,9 +247,13 @@ func WaitForMigrationStatus(t *testing.T, vtParams *mysql.ConnParams, shards []c for _, status := range expectStatuses { statusesMap[string(status)] = true } - startTime := time.Now() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + lastKnownStatus := "" - for time.Since(startTime) < timeout { + for { countMatchedShards := 0 r := VtgateExecQuery(t, vtParams, query, "") for _, row := range r.Named().Rows { @@ -258,9 +270,12 @@ func WaitForMigrationStatus(t *testing.T, vtParams *mysql.ConnParams, shards []c if countMatchedShards == len(shards) { return schema.OnlineDDLStatus(lastKnownStatus) } - time.Sleep(1 * time.Second) + select { + case <-ctx.Done(): + return schema.OnlineDDLStatus(lastKnownStatus) + case <-ticker.C: + } } - return schema.OnlineDDLStatus(lastKnownStatus) } // CheckMigrationArtifacts verifies given migration exists, and checks if it has artifacts @@ -313,16 +328,35 @@ func UnthrottleAllMigrations(t *testing.T, vtParams *mysql.ConnParams) { // CheckThrottledApps checks for existence or non-existence of an app in the throttled apps list func CheckThrottledApps(t *testing.T, vtParams *mysql.ConnParams, throttlerApp throttlerapp.Name, expectFind bool) { - query := "show vitess_throttled_apps" - r := VtgateExecQuery(t, vtParams, query, "") - found := false - for _, row := range r.Named().Rows { - if throttlerApp.Equals(row.AsString("app", "")) { - found = true + ctx, cancel := context.WithTimeout(context.Background(), ThrottledAppsTimeout) + defer cancel() + + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + + for { + query := "show vitess_throttled_apps" + r := VtgateExecQuery(t, vtParams, query, "") + + appFound := false + for _, row := range r.Named().Rows { + if throttlerApp.Equals(row.AsString("app", "")) { + appFound = true + } + } + if appFound == expectFind { + // we're all good + return + } + + select { + case <-ctx.Done(): + assert.Failf(t, "CheckThrottledApps timed out waiting for %v to be in throttled status '%v'", throttlerApp.String(), expectFind) + return + case <-ticker.C: } } - assert.Equal(t, expectFind, found, "check app %v in throttled apps: %v", throttlerApp, found) } // WaitForThrottledTimestamp waits for a migration to have a non-empty last_throttled_timestamp @@ -350,49 +384,6 @@ func WaitForThrottledTimestamp(t *testing.T, vtParams *mysql.ConnParams, uuid st return } -// WaitForThrottlerStatusEnabled waits for a tablet to report its throttler status as enabled. -func WaitForThrottlerStatusEnabled(t *testing.T, tablet *cluster.Vttablet, timeout time.Duration) { - jsonPath := "IsEnabled" - url := fmt.Sprintf("http://localhost:%d/throttler/status", tablet.HTTPPort) - - ctx, cancel := context.WithTimeout(context.Background(), throttlerConfigTimeout) - defer cancel() - - ticker := time.NewTicker(time.Second) - defer ticker.Stop() - - for { - body := getHTTPBody(url) - val, err := jsonparser.GetBoolean([]byte(body), jsonPath) - require.NoError(t, err) - if val { - return - } - select { - case <-ctx.Done(): - t.Error("timeout waiting for tablet's throttler status to be enabled") - return - case <-ticker.C: - } - } -} - -func getHTTPBody(url string) string { - resp, err := http.Get(url) - if err != nil { - log.Infof("http Get returns %+v", err) - return "" - } - if resp.StatusCode != 200 { - log.Infof("http Get returns status %d", resp.StatusCode) - return "" - } - respByte, _ := io.ReadAll(resp.Body) - defer resp.Body.Close() - body := string(respByte) - return body -} - // ValidateSequentialMigrationIDs validates that schem_migrations.id column, which is an AUTO_INCREMENT, does // not have gaps func ValidateSequentialMigrationIDs(t *testing.T, vtParams *mysql.ConnParams, shards []cluster.Shard) { @@ -429,3 +420,31 @@ func ValidateSequentialMigrationIDs(t *testing.T, vtParams *mysql.ConnParams, sh assert.Equalf(t, count, shardMax[shard]-shardMin[shard]+1, "mismatch: shared=%v, count=%v, min=%v, max=%v", shard, count, shardMin[shard], shardMax[shard]) } } + +// ValidateCompletedTimestamp ensures that any migration in `cancelled`, `completed`, `failed` statuses +// has a non-nil and valid `completed_timestamp` value. +func ValidateCompletedTimestamp(t *testing.T, vtParams *mysql.ConnParams) { + require.False(t, testsStartupTime.IsZero()) + r := VtgateExecQuery(t, vtParams, "show vitess_migrations", "") + + completedTimestampNumValidations := 0 + for _, row := range r.Named().Rows { + migrationStatus := row.AsString("migration_status", "") + require.NotEmpty(t, migrationStatus) + switch migrationStatus { + case string(schema.OnlineDDLStatusComplete), + string(schema.OnlineDDLStatusFailed), + string(schema.OnlineDDLStatusCancelled): + { + assert.False(t, row["completed_timestamp"].IsNull()) + // Also make sure the timestamp is "real", and that it is recent. + timestamp := row.AsString("completed_timestamp", "") + completedTime, err := time.Parse(sqltypes.TimestampFormat, timestamp) + assert.NoError(t, err) + assert.Greater(t, completedTime.Unix(), testsStartupTime.Unix()) + completedTimestampNumValidations++ + } + } + } + assert.NotZero(t, completedTimestampNumValidations) +} diff --git a/go/test/endtoend/preparestmt/stmt_methods_test.go b/go/test/endtoend/preparestmt/stmt_methods_test.go index 21369ea4d3a..24fb58bff81 100644 --- a/go/test/endtoend/preparestmt/stmt_methods_test.go +++ b/go/test/endtoend/preparestmt/stmt_methods_test.go @@ -436,3 +436,24 @@ func TestShowColumns(t *testing.T) { require.Len(t, cols, 6) require.False(t, rows.Next()) } + +func TestBinaryColumn(t *testing.T) { + defer cluster.PanicHandler(t) + dbo := Connect(t, "interpolateParams=false") + defer dbo.Close() + + _, err := dbo.Query(`SELECT DISTINCT + BINARY table_info.table_name AS table_name, + table_info.create_options AS create_options, + table_info.table_comment AS table_comment + FROM information_schema.tables AS table_info + JOIN information_schema.columns AS column_info + ON BINARY column_info.table_name = BINARY table_info.table_name + WHERE + table_info.table_schema = ? + AND column_info.table_schema = ? + -- Exclude views. + AND table_info.table_type = 'BASE TABLE' + ORDER BY BINARY table_info.table_name`, keyspaceName, keyspaceName) + require.NoError(t, err) +} diff --git a/go/test/endtoend/recovery/pitr/shardedpitr_test.go b/go/test/endtoend/recovery/pitr/shardedpitr_test.go index 0b0858117ad..e7e9d5cad65 100644 --- a/go/test/endtoend/recovery/pitr/shardedpitr_test.go +++ b/go/test/endtoend/recovery/pitr/shardedpitr_test.go @@ -29,11 +29,11 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/sidecardb" ) var ( @@ -304,8 +304,8 @@ func performResharding(t *testing.T) { require.NoError(t, err) waitTimeout := 30 * time.Second - shard0Primary.VttabletProcess.WaitForVReplicationToCatchup(t, "ks.reshardWorkflow", dbName, sidecardb.DefaultName, waitTimeout) - shard1Primary.VttabletProcess.WaitForVReplicationToCatchup(t, "ks.reshardWorkflow", dbName, sidecardb.DefaultName, waitTimeout) + shard0Primary.VttabletProcess.WaitForVReplicationToCatchup(t, "ks.reshardWorkflow", dbName, sidecar.DefaultName, waitTimeout) + shard1Primary.VttabletProcess.WaitForVReplicationToCatchup(t, "ks.reshardWorkflow", dbName, sidecar.DefaultName, waitTimeout) waitForNoWorkflowLag(t, clusterInstance, "ks.reshardWorkflow") diff --git a/go/test/endtoend/reparent/plannedreparent/reparent_test.go b/go/test/endtoend/reparent/plannedreparent/reparent_test.go index 59734bce57a..f7afea1431b 100644 --- a/go/test/endtoend/reparent/plannedreparent/reparent_test.go +++ b/go/test/endtoend/reparent/plannedreparent/reparent_test.go @@ -20,15 +20,17 @@ import ( "context" "fmt" "strconv" + "strings" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/replication" + "google.golang.org/protobuf/encoding/protojson" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/reparent/utils" "vitess.io/vitess/go/vt/log" @@ -123,9 +125,16 @@ func TestReparentReplicaOffline(t *testing.T) { // Perform a graceful reparent operation. out, err := utils.PrsWithTimeout(t, clusterInstance, tablets[1], false, "", "31s") require.Error(t, err) - assert.True(t, utils.SetReplicationSourceFailed(tablets[3], out)) - utils.CheckPrimaryTablet(t, clusterInstance, tablets[1]) + // Assert that PRS failed + if clusterInstance.VtctlMajorVersion <= 17 { + assert.True(t, utils.SetReplicationSourceFailed(tablets[3], out)) + utils.CheckPrimaryTablet(t, clusterInstance, tablets[1]) + } else { + assert.Contains(t, out, "rpc error: code = DeadlineExceeded desc") + utils.CheckPrimaryTablet(t, clusterInstance, tablets[0]) + } + } func TestReparentAvoid(t *testing.T) { @@ -155,7 +164,11 @@ func TestReparentAvoid(t *testing.T) { utils.StopTablet(t, tablets[0], true) out, err := utils.PrsAvoid(t, clusterInstance, tablets[1]) require.Error(t, err) - assert.Contains(t, out, "cannot find a tablet to reparent to in the same cell as the current primary") + if clusterInstance.VtctlMajorVersion <= 17 { + assert.Contains(t, out, "cannot find a tablet to reparent to in the same cell as the current primary") + } else { + assert.Contains(t, out, "rpc error: code = DeadlineExceeded desc = latest balancer error") + } utils.ValidateTopology(t, clusterInstance, false) utils.CheckPrimaryTablet(t, clusterInstance, tablets[1]) } @@ -275,17 +288,24 @@ func TestReparentWithDownReplica(t *testing.T) { // Perform a graceful reparent operation. It will fail as one tablet is down. out, err := utils.Prs(t, clusterInstance, tablets[1]) require.Error(t, err) - assert.True(t, utils.SetReplicationSourceFailed(tablets[2], out)) - - // insert data into the new primary, check the connected replica work - insertVal := utils.ConfirmReplication(t, tablets[1], []*cluster.Vttablet{tablets[0], tablets[3]}) + var insertVal int + // Assert that PRS failed + if clusterInstance.VtctlMajorVersion <= 17 { + assert.True(t, utils.SetReplicationSourceFailed(tablets[2], out)) + // insert data into the new primary, check the connected replica work + insertVal = utils.ConfirmReplication(t, tablets[1], []*cluster.Vttablet{tablets[0], tablets[3]}) + } else { + assert.Contains(t, out, fmt.Sprintf("TabletManager.PrimaryStatus on %s error", tablets[2].Alias)) + // insert data into the old primary, check the connected replica works. The primary tablet shouldn't have changed. + insertVal = utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[3]}) + } // restart mysql on the old replica, should still be connecting to the old primary tablets[2].MysqlctlProcess.InitMysql = false err = tablets[2].MysqlctlProcess.Start() require.NoError(t, err) - // Use the same PlannedReparentShard command to fix up the tablet. + // Use the same PlannedReparentShard command to promote the new primary. _, err = utils.Prs(t, clusterInstance, tablets[1]) require.NoError(t, err) @@ -465,8 +485,8 @@ func TestFullStatus(t *testing.T) { assert.NotEmpty(t, replicaStatus.ServerUuid) assert.NotEmpty(t, replicaStatus.ServerId) assert.Contains(t, replicaStatus.ReplicationStatus.Position, "MySQL56/"+replicaStatus.ReplicationStatus.SourceUuid) - assert.EqualValues(t, mysql.ReplicationStateRunning, replicaStatus.ReplicationStatus.IoState) - assert.EqualValues(t, mysql.ReplicationStateRunning, replicaStatus.ReplicationStatus.SqlState) + assert.EqualValues(t, replication.ReplicationStateRunning, replicaStatus.ReplicationStatus.IoState) + assert.EqualValues(t, replication.ReplicationStateRunning, replicaStatus.ReplicationStatus.SqlState) assert.Equal(t, fileNameFromPosition(replicaStatus.ReplicationStatus.FilePosition), fileNameFromPosition(primaryStatus.PrimaryStatus.FilePosition)) assert.LessOrEqual(t, rowNumberFromPosition(replicaStatus.ReplicationStatus.FilePosition), rowNumberFromPosition(primaryStatus.PrimaryStatus.FilePosition)) assert.Equal(t, replicaStatus.ReplicationStatus.RelayLogSourceBinlogEquivalentPosition, primaryStatus.PrimaryStatus.FilePosition) @@ -538,7 +558,16 @@ func waitForFilePosition(t *testing.T, clusterInstance *cluster.LocalProcessClus // fileNameFromPosition gets the file name from the position func fileNameFromPosition(pos string) string { - return pos[0 : len(pos)-4] + s := strings.SplitN(pos, ":", 2) + if len(s) != 2 { + return "" + } + return s[0] +} + +func TestFileNameFromPosition(t *testing.T) { + assert.Equal(t, "", fileNameFromPosition("shouldfail")) + assert.Equal(t, "FilePos/vt-0000000101-bin.000001", fileNameFromPosition("FilePos/vt-0000000101-bin.000001:123456789")) } // rowNumberFromPosition gets the row number from the position diff --git a/go/test/endtoend/reparent/utils/utils.go b/go/test/endtoend/reparent/utils/utils.go index 7d809392c0d..fc9e88f6471 100644 --- a/go/test/endtoend/reparent/utils/utils.go +++ b/go/test/endtoend/reparent/utils/utils.go @@ -65,7 +65,7 @@ var ( replicationWaitTimeout = time.Duration(15 * time.Second) ) -//region cluster setup/teardown +// region cluster setup/teardown // SetupReparentCluster is used to setup the reparent cluster func SetupReparentCluster(t *testing.T, durability string) *cluster.LocalProcessCluster { @@ -139,19 +139,13 @@ func setupCluster(ctx context.Context, t *testing.T, shardName string, cells []s // In this case, the close method and initSchema method of the onlineDDL executor race. // If the initSchema acquires the lock, then it takes about 30 seconds for it to run during which time the // DemotePrimary rpc is stalled! - "--queryserver_enable_online_ddl=false", - // disabling active reparents on the tablet since we don't want the replication manager - // to fix replication if it is stopped. Some tests deliberately do that. Also, we don't want - // the replication manager to silently fix the replication in case ERS or PRS mess up. All the - // tests in this test suite should work irrespective of this flag. Each run of ERS, PRS should be - // setting up the replication correctly. - "--disable-replication-manager") + "--queryserver_enable_online_ddl=false") // Initialize Cluster err = clusterInstance.SetupCluster(keyspace, []cluster.Shard{*shard}) require.NoError(t, err, "Cannot launch cluster") - //Start MySql + // Start MySql var mysqlCtlProcessList []*exec.Cmd for _, shard := range clusterInstance.Keyspaces[0].Shards { for _, tablet := range shard.Vttablets { @@ -254,7 +248,7 @@ func StartNewVTTablet(t *testing.T, clusterInstance *cluster.LocalProcessCluster return tablet } -//endregion +// endregion // region database queries func getMysqlConnParam(tablet *cluster.Vttablet) mysql.ConnParams { @@ -282,7 +276,7 @@ func execute(t *testing.T, conn *mysql.Conn, query string) *sqltypes.Result { return qr } -//endregion +// endregion // region ers, prs @@ -630,7 +624,7 @@ func CheckReparentFromOutside(t *testing.T, clusterInstance *cluster.LocalProces streamHealthResponse := shrs[0] assert.Equal(t, streamHealthResponse.Target.TabletType, topodatapb.TabletType_PRIMARY) - assert.True(t, streamHealthResponse.TabletExternallyReparentedTimestamp >= baseTime) + assert.True(t, streamHealthResponse.PrimaryTermStartTimestamp >= baseTime) } // WaitForReplicationPosition waits for tablet B to catch up to the replication position of tablet A. diff --git a/go/test/endtoend/sharded/sharded_keyspace_test.go b/go/test/endtoend/sharded/sharded_keyspace_test.go index 1560d531a28..857dc455206 100644 --- a/go/test/endtoend/sharded/sharded_keyspace_test.go +++ b/go/test/endtoend/sharded/sharded_keyspace_test.go @@ -26,9 +26,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/sidecardb" ) var ( @@ -84,7 +84,7 @@ func TestMain(m *testing.M) { if err := clusterInstance.StartTopo(); err != nil { return 1, err } - if err := clusterInstance.VtctlProcess.CreateKeyspace(keyspaceName, sidecardb.DefaultName); err != nil { + if err := clusterInstance.VtctlProcess.CreateKeyspace(keyspaceName, sidecar.DefaultName); err != nil { return 1, err } diff --git a/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go b/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go index 843c6800622..8a3dd4f9b73 100644 --- a/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go +++ b/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go @@ -251,7 +251,6 @@ func (bt *BufferingTest) createCluster() (*cluster.LocalProcessCluster, int) { if err := clusterInstance.StartVtgate(); err != nil { return nil, 1 } - rand.Seed(time.Now().UnixNano()) return clusterInstance, 0 } diff --git a/go/test/endtoend/tabletgateway/buffer/reparent/failover_buffer_test.go b/go/test/endtoend/tabletgateway/buffer/reparent/failover_buffer_test.go index ace652fc1d2..d3828eb8166 100644 --- a/go/test/endtoend/tabletgateway/buffer/reparent/failover_buffer_test.go +++ b/go/test/endtoend/tabletgateway/buffer/reparent/failover_buffer_test.go @@ -51,7 +51,7 @@ func failoverExternalReparenting(t *testing.T, clusterInstance *cluster.LocalPro primary.VttabletProcess.QueryTablet(demoteQuery, keyspaceUnshardedName, true) // Wait for replica to catch up to primary. - cluster.WaitForReplicationPos(t, primary, replica, "localhost", 60.0) + cluster.WaitForReplicationPos(t, primary, replica, false, time.Minute) duration := time.Since(start) minUnavailabilityInS := 1.0 diff --git a/go/test/endtoend/tabletmanager/custom_rule_topo_test.go b/go/test/endtoend/tabletmanager/custom_rule_topo_test.go index fb6a64efef3..aa09a99e0fe 100644 --- a/go/test/endtoend/tabletmanager/custom_rule_topo_test.go +++ b/go/test/endtoend/tabletmanager/custom_rule_topo_test.go @@ -71,7 +71,7 @@ func TestTopoCustomRule(t *testing.T) { require.Nil(t, err, "error should be Nil") // Start Vttablet - err = clusterInstance.StartVttablet(rTablet, "SERVING", false, cell, keyspaceName, hostname, shardName) + err = clusterInstance.StartVttablet(rTablet, false, "SERVING", false, cell, keyspaceName, hostname, shardName) require.Nil(t, err, "error should be Nil") err = clusterInstance.VtctlclientProcess.ExecuteCommand("Validate") diff --git a/go/test/endtoend/tabletmanager/primary/tablet_test.go b/go/test/endtoend/tabletmanager/primary/tablet_test.go index 3db692694b5..f6255b1f71a 100644 --- a/go/test/endtoend/tabletmanager/primary/tablet_test.go +++ b/go/test/endtoend/tabletmanager/primary/tablet_test.go @@ -155,11 +155,11 @@ func TestRepeatedInitShardPrimary(t *testing.T) { checkTabletType(t, replicaTablet.Alias, "REPLICA") } -func TestPrimaryRestartSetsTERTimestamp(t *testing.T) { +func TestPrimaryRestartSetsPTSTimestamp(t *testing.T) { defer cluster.PanicHandler(t) - // Test that TER timestamp is set when we restart the PRIMARY vttablet. - // TER = TabletExternallyReparented. - // See StreamHealthResponse.tablet_externally_reparented_timestamp for details. + // Test that PTS timestamp is set when we restart the PRIMARY vttablet. + // PTS = PrimaryTermStart. + // See StreamHealthResponse.primary_term_start_timestamp for details. // Make replica as primary err := clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shardName, cell, replicaTablet.TabletUID) @@ -168,7 +168,7 @@ func TestPrimaryRestartSetsTERTimestamp(t *testing.T) { err = replicaTablet.VttabletProcess.WaitForTabletStatus("SERVING") require.NoError(t, err) - // Capture the current TER. + // Capture the current PTS. shrs, err := clusterInstance.StreamTabletHealth(context.Background(), &replicaTablet, 1) require.NoError(t, err) @@ -178,9 +178,9 @@ func TestPrimaryRestartSetsTERTimestamp(t *testing.T) { got := fmt.Sprintf("%d", actualType) want := fmt.Sprintf("%d", tabletType) assert.Equal(t, want, got) - assert.NotNil(t, streamHealthRes1.GetTabletExternallyReparentedTimestamp()) - assert.True(t, streamHealthRes1.GetTabletExternallyReparentedTimestamp() > 0, - "TER on PRIMARY must be set after InitShardPrimary") + assert.NotNil(t, streamHealthRes1.GetPrimaryTermStartTimestamp()) + assert.True(t, streamHealthRes1.GetPrimaryTermStartTimestamp() > 0, + "PTS on PRIMARY must be set after InitShardPrimary") // Restart the PRIMARY vttablet and test again @@ -189,10 +189,10 @@ func TestPrimaryRestartSetsTERTimestamp(t *testing.T) { require.NoError(t, err) // Start Vttablet - err = clusterInstance.StartVttablet(&replicaTablet, "SERVING", false, cell, keyspaceName, hostname, shardName) + err = clusterInstance.StartVttablet(&replicaTablet, false, "SERVING", false, cell, keyspaceName, hostname, shardName) require.NoError(t, err) - // Make sure that the TER did not change + // Make sure that the PTS did not change shrs, err = clusterInstance.StreamTabletHealth(context.Background(), &replicaTablet, 1) require.NoError(t, err) @@ -204,12 +204,12 @@ func TestPrimaryRestartSetsTERTimestamp(t *testing.T) { want = fmt.Sprintf("%d", tabletType) assert.Equal(t, want, got) - assert.NotNil(t, streamHealthRes2.GetTabletExternallyReparentedTimestamp()) - assert.True(t, streamHealthRes2.GetTabletExternallyReparentedTimestamp() == streamHealthRes1.GetTabletExternallyReparentedTimestamp(), + assert.NotNil(t, streamHealthRes2.GetPrimaryTermStartTimestamp()) + assert.True(t, streamHealthRes2.GetPrimaryTermStartTimestamp() == streamHealthRes1.GetPrimaryTermStartTimestamp(), fmt.Sprintf("When the PRIMARY vttablet was restarted, "+ - "the TER timestamp must be set by reading the old value from the tablet record. Old: %d, New: %d", - streamHealthRes1.GetTabletExternallyReparentedTimestamp(), - streamHealthRes2.GetTabletExternallyReparentedTimestamp())) + "the PTS timestamp must be set by reading the old value from the tablet record. Old: %d, New: %d", + streamHealthRes1.GetPrimaryTermStartTimestamp(), + streamHealthRes2.GetPrimaryTermStartTimestamp())) // Reset primary err = clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shardName, cell, primaryTablet.TabletUID) diff --git a/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go b/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go index 1b43ecf2d90..c6f7253c791 100644 --- a/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go +++ b/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go @@ -18,6 +18,7 @@ package tablegc import ( "context" "flag" + "fmt" "os" "testing" "time" @@ -414,3 +415,19 @@ func TestPurgeView(t *testing.T) { validateTableExists(t, "t1") validateAnyState(t, 1024, schema.EvacTableGCState, schema.DropTableGCState, schema.TableDroppedGCState) } + +func TestDropView(t *testing.T) { + viewName, err := schema.GenerateGCTableName(schema.DropTableGCState, time.Now().Add(tableTransitionExpiration)) // shortly in the future + require.NoError(t, err) + createStatement := fmt.Sprintf("create or replace view %s as select 1", viewName) + + _, err = primaryTablet.VttabletProcess.QueryTablet(createStatement, keyspaceName, true) + require.NoError(t, err) + + // view should be there, because the timestamp hint is still in the near future. + validateTableExists(t, viewName) + + time.Sleep(tableTransitionExpiration / 2) + // But by now, after the above sleep, the view's timestamp hint is in the past, and we expect TableGC to have dropped the view. + validateTableDoesNotExist(t, viewName) +} diff --git a/go/test/endtoend/tabletmanager/tablet_health_test.go b/go/test/endtoend/tabletmanager/tablet_health_test.go index 9ee6d8a2c63..19b730671c9 100644 --- a/go/test/endtoend/tabletmanager/tablet_health_test.go +++ b/go/test/endtoend/tabletmanager/tablet_health_test.go @@ -21,13 +21,14 @@ import ( "encoding/json" "fmt" "net/http" + "slices" "sync" + "sync/atomic" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "k8s.io/utils/strings/slices" "vitess.io/vitess/go/json2" "vitess.io/vitess/go/mysql" @@ -73,7 +74,7 @@ func TestTabletReshuffle(t *testing.T) { // SupportsBackup=False prevents vttablet from trying to restore // Start vttablet process - err = clusterInstance.StartVttablet(rTablet, "SERVING", false, cell, keyspaceName, hostname, shardName) + err = clusterInstance.StartVttablet(rTablet, false, "SERVING", false, cell, keyspaceName, hostname, shardName) require.NoError(t, err) sql := "select value from t1" @@ -106,7 +107,7 @@ func TestHealthCheck(t *testing.T) { defer replicaConn.Close() // start vttablet process, should be in SERVING state as we already have a primary - err = clusterInstance.StartVttablet(rTablet, "SERVING", false, cell, keyspaceName, hostname, shardName) + err = clusterInstance.StartVttablet(rTablet, true, "SERVING", false, cell, keyspaceName, hostname, shardName) require.NoError(t, err) conn, err := mysql.Connect(ctx, &primaryTabletParams) @@ -227,7 +228,7 @@ func TestHealthCheckSchemaChangeSignal(t *testing.T) { clusterInstance.VtTabletExtraArgs = oldArgs }() // start vttablet process, should be in SERVING state as we already have a primary. - err = clusterInstance.StartVttablet(tempTablet, "SERVING", false, cell, keyspaceName, hostname, shardName) + err = clusterInstance.StartVttablet(tempTablet, false, "SERVING", false, cell, keyspaceName, hostname, shardName) require.NoError(t, err) defer func() { @@ -250,17 +251,19 @@ func TestHealthCheckSchemaChangeSignal(t *testing.T) { func verifyHealthStreamSchemaChangeSignals(t *testing.T, vtgateConn *mysql.Conn, primaryTablet *cluster.Vttablet, viewsEnabled bool) { var streamErr error - wg := sync.WaitGroup{} + var wg sync.WaitGroup + var ranOnce atomic.Bool + var finished atomic.Bool + wg.Add(1) - ranOnce := false - finished := false ch := make(chan *querypb.StreamHealthResponse) + go func() { defer wg.Done() streamErr = clusterInstance.StreamTabletHealthUntil(context.Background(), primaryTablet, 30*time.Second, func(shr *querypb.StreamHealthResponse) bool { - ranOnce = true + ranOnce.Store(true) // If we are finished, then close the channel and end the stream. - if finished { + if finished.Load() { close(ch) return true } @@ -272,13 +275,14 @@ func verifyHealthStreamSchemaChangeSignals(t *testing.T, vtgateConn *mysql.Conn, // The test becomes flaky if we run the DDL immediately after starting the above go routine because the client for the Stream // sometimes isn't registered by the time DDL runs, and it misses the update we get. To prevent this situation, we wait for one Stream packet // to have returned. Once we know we received a Stream packet, then we know that we are registered for the health stream and can execute the DDL. - for i := 0; i < 30; i++ { - if ranOnce { - break - } + for i := 0; i < 30 && !ranOnce.Load(); i++ { time.Sleep(1 * time.Second) } + if !ranOnce.Load() { + t.Fatalf("HealthCheck did not ran?") + } + verifyTableDDLSchemaChangeSignal(t, vtgateConn, ch, "CREATE TABLE `area` (`id` int NOT NULL, `country` varchar(30), PRIMARY KEY (`id`))", "area") verifyTableDDLSchemaChangeSignal(t, vtgateConn, ch, "CREATE TABLE `area2` (`id` int NOT NULL, PRIMARY KEY (`id`))", "area2") verifyViewDDLSchemaChangeSignal(t, vtgateConn, ch, "CREATE VIEW v2 as select * from t1", viewsEnabled) @@ -288,7 +292,7 @@ func verifyHealthStreamSchemaChangeSignals(t *testing.T, vtgateConn *mysql.Conn, verifyViewDDLSchemaChangeSignal(t, vtgateConn, ch, "DROP VIEW v2", viewsEnabled) verifyTableDDLSchemaChangeSignal(t, vtgateConn, ch, "DROP TABLE `area`", "area") - finished = true + finished.Store(true) wg.Wait() require.NoError(t, streamErr) } @@ -381,7 +385,7 @@ func TestHealthCheckDrainedStateDoesNotShutdownQueryService(t *testing.T) { // - the second tablet will be set to 'drained' and we expect that // - the query service won't be shutdown - //Wait if tablet is not in service state + // Wait if tablet is not in service state defer cluster.PanicHandler(t) clusterInstance.DisableVTOrcRecoveries(t) defer clusterInstance.EnableVTOrcRecoveries(t) diff --git a/go/test/endtoend/tabletmanager/tablet_security_policy_test.go b/go/test/endtoend/tabletmanager/tablet_security_policy_test.go index 2ad907ec7b8..b3b11405abb 100644 --- a/go/test/endtoend/tabletmanager/tablet_security_policy_test.go +++ b/go/test/endtoend/tabletmanager/tablet_security_policy_test.go @@ -39,7 +39,7 @@ func TestFallbackSecurityPolicy(t *testing.T) { // Requesting an unregistered security_policy should fallback to deny-all. clusterInstance.VtTabletExtraArgs = []string{"--security_policy", "bogus"} - err = clusterInstance.StartVttablet(mTablet, "SERVING", false, cell, keyspaceName, hostname, shardName) + err = clusterInstance.StartVttablet(mTablet, false, "SERVING", false, cell, keyspaceName, hostname, shardName) require.NoError(t, err) // It should deny ADMIN role. @@ -94,7 +94,7 @@ func TestDenyAllSecurityPolicy(t *testing.T) { // Requesting a deny-all security_policy. clusterInstance.VtTabletExtraArgs = []string{"--security_policy", "deny-all"} - err = clusterInstance.StartVttablet(mTablet, "SERVING", false, cell, keyspaceName, hostname, shardName) + err = clusterInstance.StartVttablet(mTablet, false, "SERVING", false, cell, keyspaceName, hostname, shardName) require.NoError(t, err) // It should deny ADMIN role. @@ -126,7 +126,7 @@ func TestReadOnlySecurityPolicy(t *testing.T) { // Requesting a read-only security_policy. clusterInstance.VtTabletExtraArgs = []string{"--security_policy", "read-only"} - err = clusterInstance.StartVttablet(mTablet, "SERVING", false, cell, keyspaceName, hostname, shardName) + err = clusterInstance.StartVttablet(mTablet, false, "SERVING", false, cell, keyspaceName, hostname, shardName) require.NoError(t, err) // It should deny ADMIN role. diff --git a/go/test/endtoend/tabletmanager/tablet_test.go b/go/test/endtoend/tabletmanager/tablet_test.go index 97715d39a58..4fe5a70d125 100644 --- a/go/test/endtoend/tabletmanager/tablet_test.go +++ b/go/test/endtoend/tabletmanager/tablet_test.go @@ -43,7 +43,7 @@ func TestEnsureDB(t *testing.T) { log.Info(fmt.Sprintf("Started vttablet %v", tablet)) // Start vttablet process as replica. It won't be able to serve because there's no db. - err = clusterInstance.StartVttablet(tablet, "NOT_SERVING", false, cell, "dbtest", hostname, "0") + err = clusterInstance.StartVttablet(tablet, false, "NOT_SERVING", false, cell, "dbtest", hostname, "0") require.NoError(t, err) // Make it the primary. @@ -78,7 +78,7 @@ func TestResetReplicationParameters(t *testing.T) { log.Info(fmt.Sprintf("Started vttablet %v", tablet)) // Start vttablet process as replica. It won't be able to serve because there's no db. - err = clusterInstance.StartVttablet(tablet, "NOT_SERVING", false, cell, "dbtest", hostname, "0") + err = clusterInstance.StartVttablet(tablet, false, "NOT_SERVING", false, cell, "dbtest", hostname, "0") require.NoError(t, err) // Set a replication source on the tablet and start replication diff --git a/go/test/endtoend/tabletmanager/throttler/throttler_test.go b/go/test/endtoend/tabletmanager/throttler/throttler_test.go deleted file mode 100644 index 5ca4bc32a87..00000000000 --- a/go/test/endtoend/tabletmanager/throttler/throttler_test.go +++ /dev/null @@ -1,319 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package throttler - -import ( - "context" - "flag" - "fmt" - "io" - "net/http" - "os" - "testing" - "time" - - "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/base" - - "vitess.io/vitess/go/test/endtoend/cluster" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -var ( - clusterInstance *cluster.LocalProcessCluster - primaryTablet *cluster.Vttablet - replicaTablet *cluster.Vttablet - hostname = "localhost" - keyspaceName = "ks" - cell = "zone1" - sqlSchema = ` - create table t1( - id bigint, - value varchar(16), - primary key(id) - ) Engine=InnoDB; -` - - vSchema = ` - { - "sharded": true, - "vindexes": { - "hash": { - "type": "hash" - } - }, - "tables": { - "t1": { - "column_vindexes": [ - { - "column": "id", - "name": "hash" - } - ] - } - } - }` - - httpClient = base.SetupHTTPClient(time.Second) - throttledAppsAPIPath = "throttler/throttled-apps" - checkAPIPath = "throttler/check" - checkSelfAPIPath = "throttler/check-self" -) - -const ( - throttlerThreshold = 1 * time.Second // standard, tight threshold - onDemandHeartbeatDuration = 5 * time.Second - applyConfigWait = 15 * time.Second // time after which we're sure the throttler has refreshed config and tablets -) - -func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) - flag.Parse() - - exitCode := func() int { - clusterInstance = cluster.NewCluster(cell, hostname) - defer clusterInstance.Teardown() - - // Start topo server - err := clusterInstance.StartTopo() - if err != nil { - return 1 - } - - // Set extra tablet args for lock timeout - clusterInstance.VtTabletExtraArgs = []string{ - "--throttler-config-via-topo=false", - "--lock_tables_timeout", "5s", - "--watch_replication_stream", - "--enable_replication_reporter", - "--enable-lag-throttler", - "--throttle_threshold", throttlerThreshold.String(), - "--heartbeat_interval", "250ms", - "--heartbeat_on_demand_duration", onDemandHeartbeatDuration.String(), - "--disable_active_reparents", - } - - // Start keyspace - keyspace := &cluster.Keyspace{ - Name: keyspaceName, - SchemaSQL: sqlSchema, - VSchema: vSchema, - } - - if err = clusterInstance.StartUnshardedKeyspace(*keyspace, 1, false); err != nil { - return 1 - } - - // Collect table paths and ports - tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets - for _, tablet := range tablets { - if tablet.Type == "primary" { - primaryTablet = tablet - } else if tablet.Type != "rdonly" { - replicaTablet = tablet - } - } - - return m.Run() - }() - os.Exit(exitCode) -} - -func throttledApps(tablet *cluster.Vttablet) (resp *http.Response, respBody string, err error) { - resp, err = httpClient.Get(fmt.Sprintf("http://localhost:%d/%s", tablet.HTTPPort, throttledAppsAPIPath)) - if err != nil { - return resp, respBody, err - } - b, err := io.ReadAll(resp.Body) - if err != nil { - return resp, respBody, err - } - respBody = string(b) - return resp, respBody, err -} - -func throttleCheck(tablet *cluster.Vttablet, skipRequestHeartbeats bool) (*http.Response, error) { - return httpClient.Get(fmt.Sprintf("http://localhost:%d/%s?s=%t", tablet.HTTPPort, checkAPIPath, skipRequestHeartbeats)) -} - -func throttleCheckSelf(tablet *cluster.Vttablet) (*http.Response, error) { - return httpClient.Head(fmt.Sprintf("http://localhost:%d/%s", tablet.HTTPPort, checkSelfAPIPath)) -} - -func warmUpHeartbeat(t *testing.T) (respStatus int) { - // because we run with -heartbeat_on_demand_duration=5s, the heartbeat is "cold" right now. - // Let's warm it up. - resp, err := throttleCheck(primaryTablet, false) - require.NoError(t, err) - defer resp.Body.Close() - time.Sleep(time.Second) - return resp.StatusCode -} - -// waitForThrottleCheckStatus waits for the tablet to return the provided HTTP code in a throttle check -func waitForThrottleCheckStatus(t *testing.T, tablet *cluster.Vttablet, wantCode int) { - _ = warmUpHeartbeat(t) - ctx, cancel := context.WithTimeout(context.Background(), onDemandHeartbeatDuration+applyConfigWait) - defer cancel() - - for { - resp, err := throttleCheck(tablet, true) - require.NoError(t, err) - - if wantCode == resp.StatusCode { - // Wait for any cached check values to be cleared and the new - // status value to be in effect everywhere before returning. - resp.Body.Close() - return - } - select { - case <-ctx.Done(): - b, err := io.ReadAll(resp.Body) - require.NoError(t, err) - resp.Body.Close() - - assert.Equal(t, wantCode, resp.StatusCode, "body: %v", string(b)) - return - default: - resp.Body.Close() - time.Sleep(time.Second) - } - } -} - -func TestThrottlerAfterMetricsCollected(t *testing.T) { - defer cluster.PanicHandler(t) - - // We run with on-demand heartbeats. Immediately as the tablet manager opens, it sends a one-time - // request for heartbeats, which means the throttler is able to collect initial "good" data. - // After a few seconds, the heartbeat lease terminates. We wait for that. - // {"StatusCode":429,"Value":4.864921,"Threshold":1,"Message":"Threshold exceeded"} - t.Run("expect push back once initial heartbeat lease terminates", func(t *testing.T) { - time.Sleep(onDemandHeartbeatDuration) - waitForThrottleCheckStatus(t, primaryTablet, http.StatusTooManyRequests) - }) - t.Run("requesting heartbeats", func(t *testing.T) { - respStatus := warmUpHeartbeat(t) - assert.NotEqual(t, http.StatusOK, respStatus) - }) - t.Run("expect OK once heartbeats lease renewed", func(t *testing.T) { - time.Sleep(1 * time.Second) - resp, err := throttleCheck(primaryTablet, false) - require.NoError(t, err) - defer resp.Body.Close() - assert.Equal(t, http.StatusOK, resp.StatusCode) - }) - t.Run("expect OK once heartbeats lease renewed, still", func(t *testing.T) { - time.Sleep(1 * time.Second) - resp, err := throttleCheck(primaryTablet, false) - require.NoError(t, err) - defer resp.Body.Close() - assert.Equal(t, http.StatusOK, resp.StatusCode) - }) - t.Run("validate throttled-apps", func(t *testing.T) { - resp, body, err := throttledApps(primaryTablet) - require.NoError(t, err) - defer resp.Body.Close() - assert.Equal(t, http.StatusOK, resp.StatusCode) - assert.Contains(t, body, "always-throttled-app") - }) - t.Run("validate check-self", func(t *testing.T) { - resp, err := throttleCheckSelf(primaryTablet) - require.NoError(t, err) - defer resp.Body.Close() - assert.Equal(t, http.StatusOK, resp.StatusCode) - }) - t.Run("validate check-self, again", func(t *testing.T) { - resp, err := throttleCheckSelf(replicaTablet) - require.NoError(t, err) - defer resp.Body.Close() - assert.Equal(t, http.StatusOK, resp.StatusCode) - }) -} - -func TestLag(t *testing.T) { - defer cluster.PanicHandler(t) - // Stop VTOrc because we want to stop replication to increase lag. - // We don't want VTOrc to fix this. - clusterInstance.DisableVTOrcRecoveries(t) - defer clusterInstance.EnableVTOrcRecoveries(t) - - t.Run("stopping replication", func(t *testing.T) { - err := clusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", replicaTablet.Alias) - assert.NoError(t, err) - }) - t.Run("accumulating lag, expecting throttler push back", func(t *testing.T) { - time.Sleep(2 * throttlerThreshold) - - resp, err := throttleCheck(primaryTablet, false) - require.NoError(t, err) - defer resp.Body.Close() - assert.Equal(t, http.StatusTooManyRequests, resp.StatusCode) - }) - t.Run("primary self-check should still be fine", func(t *testing.T) { - resp, err := throttleCheckSelf(primaryTablet) - require.NoError(t, err) - defer resp.Body.Close() - // self (on primary) is unaffected by replication lag - assert.Equal(t, http.StatusOK, resp.StatusCode) - }) - t.Run("replica self-check should show error", func(t *testing.T) { - resp, err := throttleCheckSelf(replicaTablet) - require.NoError(t, err) - defer resp.Body.Close() - assert.Equal(t, http.StatusTooManyRequests, resp.StatusCode) - }) - t.Run("starting replication", func(t *testing.T) { - err := clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", replicaTablet.Alias) - assert.NoError(t, err) - }) - t.Run("expecting replication to catch up and throttler check to return OK", func(t *testing.T) { - waitForThrottleCheckStatus(t, primaryTablet, http.StatusOK) - }) - t.Run("primary self-check should be fine", func(t *testing.T) { - resp, err := throttleCheckSelf(primaryTablet) - require.NoError(t, err) - defer resp.Body.Close() - // self (on primary) is unaffected by replication lag - assert.Equal(t, http.StatusOK, resp.StatusCode) - }) - t.Run("replica self-check should be fine", func(t *testing.T) { - resp, err := throttleCheckSelf(replicaTablet) - require.NoError(t, err) - defer resp.Body.Close() - assert.Equal(t, http.StatusOK, resp.StatusCode) - }) -} - -func TestNoReplicas(t *testing.T) { - defer cluster.PanicHandler(t) - t.Run("changing replica to RDONLY", func(t *testing.T) { - err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "RDONLY") - assert.NoError(t, err) - - // This makes no REPLICA servers available. We expect something like: - // {"StatusCode":200,"Value":0,"Threshold":1,"Message":""} - waitForThrottleCheckStatus(t, primaryTablet, http.StatusOK) - }) - t.Run("restoring to REPLICA", func(t *testing.T) { - - err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "REPLICA") - assert.NoError(t, err) - - waitForThrottleCheckStatus(t, primaryTablet, http.StatusOK) - }) -} diff --git a/go/test/endtoend/tabletmanager/throttler_custom_config/throttler_test.go b/go/test/endtoend/tabletmanager/throttler_custom_config/throttler_test.go deleted file mode 100644 index e173384eb62..00000000000 --- a/go/test/endtoend/tabletmanager/throttler_custom_config/throttler_test.go +++ /dev/null @@ -1,264 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package throttler - -import ( - "context" - "flag" - "fmt" - "net/http" - "os" - "sync" - "testing" - "time" - - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/base" - - "vitess.io/vitess/go/test/endtoend/cluster" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -var ( - clusterInstance *cluster.LocalProcessCluster - primaryTablet *cluster.Vttablet - replicaTablet *cluster.Vttablet - hostname = "localhost" - keyspaceName = "ks" - cell = "zone1" - sqlSchema = ` - create table t1( - id bigint, - value varchar(16), - primary key(id) - ) Engine=InnoDB; -` - - vSchema = ` - { - "sharded": true, - "vindexes": { - "hash": { - "type": "hash" - } - }, - "tables": { - "t1": { - "column_vindexes": [ - { - "column": "id", - "name": "hash" - } - ] - } - } - }` - - httpClient = base.SetupHTTPClient(time.Second) - checkAPIPath = "throttler/check" - checkSelfAPIPath = "throttler/check-self" - vtParams mysql.ConnParams -) - -const ( - testThreshold = 5 - applyConfigWait = 15 * time.Second // time after which we're sure the throttler has refreshed config and tablets - statusWaitTimeout = 30 * time.Second -) - -func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) - flag.Parse() - - exitCode := func() int { - clusterInstance = cluster.NewCluster(cell, hostname) - defer clusterInstance.Teardown() - - // Start topo server - err := clusterInstance.StartTopo() - if err != nil { - return 1 - } - - // Set extra tablet args for lock timeout - clusterInstance.VtTabletExtraArgs = []string{ - "--throttler-config-via-topo=false", - "--lock_tables_timeout", "5s", - "--watch_replication_stream", - "--enable_replication_reporter", - "--enable-lag-throttler", - "--throttle_metrics_query", "show global status like 'threads_running'", - "--throttle_metrics_threshold", fmt.Sprintf("%d", testThreshold), - "--throttle_check_as_check_self", - "--heartbeat_interval", "250ms", - } - - // Start keyspace - keyspace := &cluster.Keyspace{ - Name: keyspaceName, - SchemaSQL: sqlSchema, - VSchema: vSchema, - } - - if err = clusterInstance.StartUnshardedKeyspace(*keyspace, 0, false); err != nil { - return 1 - } - - // Collect table paths and ports - tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets - for _, tablet := range tablets { - if tablet.Type == "primary" { - primaryTablet = tablet - } else if tablet.Type != "rdonly" { - replicaTablet = tablet - } - } - - vtgateInstance := clusterInstance.NewVtgateInstance() - // Start vtgate - if err := vtgateInstance.Setup(); err != nil { - return 1 - } - // ensure it is torn down during cluster TearDown - clusterInstance.VtgateProcess = *vtgateInstance - vtParams = mysql.ConnParams{ - Host: clusterInstance.Hostname, - Port: clusterInstance.VtgateMySQLPort, - } - - return m.Run() - }() - os.Exit(exitCode) -} - -func throttleCheck(tablet *cluster.Vttablet) (*http.Response, error) { - resp, err := httpClient.Get(fmt.Sprintf("http://localhost:%d/%s", tablet.HTTPPort, checkAPIPath)) - return resp, err -} - -func waitForThrottlerStatus(tablet *cluster.Vttablet, status int) error { - ctx, cancel := context.WithTimeout(context.Background(), statusWaitTimeout) - defer cancel() - tkr := time.NewTicker(100 * time.Millisecond) - defer tkr.Stop() - - for { - resp, _ := throttleCheck(tablet) - seenStatus := resp.StatusCode - resp.Body.Close() - if seenStatus == status { - return nil - } - select { - case <-ctx.Done(): - return fmt.Errorf("timed out waiting for expected throttler status %d after %v; last seen value: %d", - status, statusWaitTimeout, seenStatus) - case <-tkr.C: - } - } -} - -func throttleCheckSelf(tablet *cluster.Vttablet) (*http.Response, error) { - return httpClient.Head(fmt.Sprintf("http://localhost:%d/%s", tablet.HTTPPort, checkSelfAPIPath)) -} - -func TestThrottlerThresholdOK(t *testing.T) { - defer cluster.PanicHandler(t) - - t.Run("immediately", func(t *testing.T) { - // The tablet throttler can still be initializing so we wait for - // the status to be OK. - err := waitForThrottlerStatus(primaryTablet, http.StatusOK) - require.NoError(t, err) - }) - t.Run("after long wait", func(t *testing.T) { - time.Sleep(applyConfigWait) - resp, err := throttleCheck(primaryTablet) - require.NoError(t, err) - defer resp.Body.Close() - assert.Equal(t, http.StatusOK, resp.StatusCode) - }) -} - -func TestThreadsRunning(t *testing.T) { - defer cluster.PanicHandler(t) - - sleepDuration := 10 * time.Second - var wg sync.WaitGroup - for i := 0; i < testThreshold; i++ { - // generate different Sleep() calls, all at minimum sleepDuration - wg.Add(1) - go func(i int) { - defer wg.Done() - vtgateExec(t, fmt.Sprintf("select sleep(%d)", int(sleepDuration.Seconds())+i), "") - }(i) - } - t.Run("exceeds threshold", func(t *testing.T) { - time.Sleep(sleepDuration / 2) - // by this time we will have testThreshold+1 threads_running, and we should hit the threshold - // {"StatusCode":429,"Value":2,"Threshold":2,"Message":"Threshold exceeded"} - { - resp, err := throttleCheck(primaryTablet) - require.NoError(t, err) - defer resp.Body.Close() - assert.Equal(t, http.StatusTooManyRequests, resp.StatusCode) - } - { - resp, err := throttleCheckSelf(primaryTablet) - require.NoError(t, err) - defer resp.Body.Close() - assert.Equal(t, http.StatusTooManyRequests, resp.StatusCode) - } - }) - t.Run("wait for queries to terminate", func(t *testing.T) { - wg.Wait() - }) - t.Run("restored below threshold", func(t *testing.T) { - { - resp, err := throttleCheck(primaryTablet) - require.NoError(t, err) - defer resp.Body.Close() - assert.Equal(t, http.StatusOK, resp.StatusCode) - } - { - resp, err := throttleCheckSelf(primaryTablet) - require.NoError(t, err) - defer resp.Body.Close() - assert.Equal(t, http.StatusOK, resp.StatusCode) - } - }) -} - -func vtgateExec(t *testing.T, query string, expectError string) *sqltypes.Result { - t.Helper() - - ctx := context.Background() - conn, err := mysql.Connect(ctx, &vtParams) - require.NoError(t, err) - defer conn.Close() - - qr, err := conn.ExecuteFetch(query, 1000, true) - if expectError == "" { - require.NoError(t, err) - } else { - require.Error(t, err, "error should not be nil") - assert.Contains(t, err.Error(), expectError, "Unexpected error") - } - return qr -} diff --git a/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go b/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go index 654870fae97..7c0f05bdcc2 100644 --- a/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go +++ b/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go @@ -27,11 +27,12 @@ import ( "time" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/base" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/throttler" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/base" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -45,6 +46,7 @@ const ( onDemandHeartbeatDuration = 5 * time.Second throttlerEnabledTimeout = 60 * time.Second useDefaultQuery = "" + testAppName = "test" ) var ( @@ -170,12 +172,12 @@ func throttledApps(tablet *cluster.Vttablet) (resp *http.Response, respBody stri } func throttleCheck(tablet *cluster.Vttablet, skipRequestHeartbeats bool) (*http.Response, error) { - resp, err := httpClient.Get(fmt.Sprintf("http://localhost:%d/%s?app=test&s=%t", tablet.HTTPPort, checkAPIPath, skipRequestHeartbeats)) + resp, err := httpClient.Get(fmt.Sprintf("http://localhost:%d/%s?app=%s&s=%t", tablet.HTTPPort, checkAPIPath, testAppName, skipRequestHeartbeats)) return resp, err } func throttleCheckSelf(tablet *cluster.Vttablet) (*http.Response, error) { - return httpClient.Get(fmt.Sprintf("http://localhost:%d/%s?app=test", tablet.HTTPPort, checkSelfAPIPath)) + return httpClient.Get(fmt.Sprintf("http://localhost:%d/%s?app=%s", tablet.HTTPPort, checkSelfAPIPath, testAppName)) } func warmUpHeartbeat(t *testing.T) (respStatus int) { @@ -245,7 +247,7 @@ func TestInitialThrottler(t *testing.T) { waitForThrottleCheckStatus(t, primaryTablet, http.StatusOK) }) t.Run("enabling throttler with very low threshold", func(t *testing.T) { - _, err := throttler.UpdateThrottlerTopoConfig(clusterInstance, true, false, unreasonablyLowThreshold.Seconds(), useDefaultQuery) + _, err := throttler.UpdateThrottlerTopoConfig(clusterInstance, true, false, unreasonablyLowThreshold.Seconds(), useDefaultQuery, nil) assert.NoError(t, err) // Wait for the throttler to be enabled everywhere with the new config. @@ -257,7 +259,7 @@ func TestInitialThrottler(t *testing.T) { waitForThrottleCheckStatus(t, primaryTablet, http.StatusTooManyRequests) }) t.Run("disabling throttler", func(t *testing.T) { - _, err := throttler.UpdateThrottlerTopoConfig(clusterInstance, false, true, unreasonablyLowThreshold.Seconds(), useDefaultQuery) + _, err := throttler.UpdateThrottlerTopoConfig(clusterInstance, false, true, unreasonablyLowThreshold.Seconds(), useDefaultQuery, nil) assert.NoError(t, err) // Wait for the throttler to be disabled everywhere. @@ -271,7 +273,7 @@ func TestInitialThrottler(t *testing.T) { t.Run("enabling throttler, again", func(t *testing.T) { // Enable the throttler again with the default query which also moves us back // to the default threshold. - _, err := throttler.UpdateThrottlerTopoConfig(clusterInstance, true, false, 0, useDefaultQuery) + _, err := throttler.UpdateThrottlerTopoConfig(clusterInstance, true, false, 0, useDefaultQuery, nil) assert.NoError(t, err) // Wait for the throttler to be enabled everywhere again with the default config. @@ -283,7 +285,7 @@ func TestInitialThrottler(t *testing.T) { waitForThrottleCheckStatus(t, primaryTablet, http.StatusTooManyRequests) }) t.Run("setting high threshold", func(t *testing.T) { - _, err := throttler.UpdateThrottlerTopoConfig(clusterInstance, false, false, extremelyHighThreshold.Seconds(), useDefaultQuery) + _, err := throttler.UpdateThrottlerTopoConfig(clusterInstance, false, false, extremelyHighThreshold.Seconds(), useDefaultQuery, nil) assert.NoError(t, err) // Wait for the throttler to be enabled everywhere with new config. @@ -295,7 +297,7 @@ func TestInitialThrottler(t *testing.T) { waitForThrottleCheckStatus(t, primaryTablet, http.StatusOK) }) t.Run("setting low threshold", func(t *testing.T) { - _, err := throttler.UpdateThrottlerTopoConfig(clusterInstance, false, false, throttler.DefaultThreshold.Seconds(), useDefaultQuery) + _, err := throttler.UpdateThrottlerTopoConfig(clusterInstance, false, false, throttler.DefaultThreshold.Seconds(), useDefaultQuery, nil) assert.NoError(t, err) // Wait for the throttler to be enabled everywhere with new config. @@ -392,6 +394,26 @@ func TestLag(t *testing.T) { defer resp.Body.Close() assert.Equalf(t, http.StatusTooManyRequests, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp)) }) + t.Run("exempting test app", func(t *testing.T) { + appRule := &topodatapb.ThrottledAppRule{ + Name: testAppName, + ExpiresAt: protoutil.TimeToProto(time.Now().Add(time.Hour)), + Exempt: true, + } + _, err := throttler.UpdateThrottlerTopoConfig(clusterInstance, false, false, throttler.DefaultThreshold.Seconds(), useDefaultQuery, appRule) + assert.NoError(t, err) + waitForThrottleCheckStatus(t, primaryTablet, http.StatusOK) + }) + t.Run("unexempting test app", func(t *testing.T) { + appRule := &topodatapb.ThrottledAppRule{ + Name: testAppName, + ExpiresAt: protoutil.TimeToProto(time.Now()), + } + _, err := throttler.UpdateThrottlerTopoConfig(clusterInstance, false, false, throttler.DefaultThreshold.Seconds(), useDefaultQuery, appRule) + assert.NoError(t, err) + waitForThrottleCheckStatus(t, primaryTablet, http.StatusTooManyRequests) + }) + t.Run("starting replication", func(t *testing.T) { err := clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", replicaTablet.Alias) assert.NoError(t, err) @@ -436,7 +458,7 @@ func TestCustomQuery(t *testing.T) { defer cluster.PanicHandler(t) t.Run("enabling throttler with custom query and threshold", func(t *testing.T) { - _, err := throttler.UpdateThrottlerTopoConfig(clusterInstance, true, false, customThreshold, customQuery) + _, err := throttler.UpdateThrottlerTopoConfig(clusterInstance, true, false, customThreshold, customQuery, nil) assert.NoError(t, err) // Wait for the throttler to be enabled everywhere with new custom config. @@ -504,7 +526,7 @@ func TestRestoreDefaultQuery(t *testing.T) { // Validate going back from custom-query to default-query (replication lag) still works. t.Run("enabling throttler with default query and threshold", func(t *testing.T) { - _, err := throttler.UpdateThrottlerTopoConfig(clusterInstance, true, false, throttler.DefaultThreshold.Seconds(), useDefaultQuery) + _, err := throttler.UpdateThrottlerTopoConfig(clusterInstance, true, false, throttler.DefaultThreshold.Seconds(), useDefaultQuery, nil) assert.NoError(t, err) // Wait for the throttler to be up and running everywhere again with the default config. diff --git a/go/test/endtoend/throttler/util.go b/go/test/endtoend/throttler/util.go index c6a7b2f69a5..40cfdb53118 100644 --- a/go/test/endtoend/throttler/util.go +++ b/go/test/endtoend/throttler/util.go @@ -18,6 +18,7 @@ package throttler import ( "context" + "encoding/json" "fmt" "io" "net/http" @@ -25,12 +26,17 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tidwall/gjson" + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/log" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/base" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" ) type Config struct { @@ -40,7 +46,7 @@ type Config struct { const ( DefaultQuery = "select unix_timestamp(now(6))-max(ts/1000000000) as replication_lag from _vt.heartbeat" - DefaultThreshold = 1 * time.Second + DefaultThreshold = 5 * time.Second ConfigTimeout = 60 * time.Second ) @@ -53,7 +59,7 @@ var DefaultConfig = &Config{ // This retries the command until it succeeds or times out as the // SrvKeyspace record may not yet exist for a newly created // Keyspace that is still initializing before it becomes serving. -func UpdateThrottlerTopoConfigRaw(vtctldProcess *cluster.VtctldClientProcess, keyspaceName string, enable bool, disable bool, threshold float64, metricsQuery string) (result string, err error) { +func UpdateThrottlerTopoConfigRaw(vtctldProcess *cluster.VtctldClientProcess, keyspaceName string, enable bool, disable bool, threshold float64, metricsQuery string, appRule *topodatapb.ThrottledAppRule) (result string, err error) { args := []string{} args = append(args, "UpdateThrottlerConfig") if enable { @@ -71,6 +77,14 @@ func UpdateThrottlerTopoConfigRaw(vtctldProcess *cluster.VtctldClientProcess, ke } else { args = append(args, "--check-as-check-shard") } + if appRule != nil { + args = append(args, "--throttle-app", appRule.Name) + args = append(args, "--throttle-app-duration", time.Until(protoutil.TimeFromProto(appRule.ExpiresAt).UTC()).String()) + args = append(args, "--throttle-app-ratio", fmt.Sprintf("%f", appRule.Ratio)) + if appRule.Exempt { + args = append(args, "--throttle-app-exempt") + } + } args = append(args, keyspaceName) ctx, cancel := context.WithTimeout(context.Background(), ConfigTimeout) @@ -96,14 +110,94 @@ func UpdateThrottlerTopoConfigRaw(vtctldProcess *cluster.VtctldClientProcess, ke // This retries the command until it succeeds or times out as the // SrvKeyspace record may not yet exist for a newly created // Keyspace that is still initializing before it becomes serving. -func UpdateThrottlerTopoConfig(clusterInstance *cluster.LocalProcessCluster, enable bool, disable bool, threshold float64, metricsQuery string) (string, error) { +func UpdateThrottlerTopoConfig(clusterInstance *cluster.LocalProcessCluster, enable bool, disable bool, threshold float64, metricsQuery string, appRule *topodatapb.ThrottledAppRule) (string, error) { + rec := concurrency.AllErrorRecorder{} + var ( + err error + res strings.Builder + ) + for _, ks := range clusterInstance.Keyspaces { + ires, err := UpdateThrottlerTopoConfigRaw(&clusterInstance.VtctldClientProcess, ks.Name, enable, disable, threshold, metricsQuery, appRule) + if err != nil { + rec.RecordError(err) + } + res.WriteString(ires) + } + if rec.HasErrors() { + err = rec.Error() + } + return res.String(), err +} + +// WaitForSrvKeyspace waits until the given srvkeyspace entry is found in the given cell +func WaitForSrvKeyspace(clusterInstance *cluster.LocalProcessCluster, cell, keyspace string) error { + args := []string{"GetSrvKeyspaceNames", cell} + + ctx, cancel := context.WithTimeout(context.Background(), ConfigTimeout) + defer cancel() + + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + + for { + result, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput(args...) + if err != nil { + return err + } + if strings.Contains(result, `"`+keyspace+`"`) { + return nil + } + select { + case <-ctx.Done(): + return fmt.Errorf("timed out waiting for GetSrvKeyspaceNames to contain '%v'", keyspace) + case <-ticker.C: + } + } +} + +// throttleAppRaw runs vtctlclient UpdateThrottlerConfig with --throttle-app flags +// This retries the command until it succeeds or times out as the +// SrvKeyspace record may not yet exist for a newly created +// Keyspace that is still initializing before it becomes serving. +func throttleAppRaw(vtctldProcess *cluster.VtctldClientProcess, keyspaceName string, throttlerApp throttlerapp.Name, throttle bool) (result string, err error) { + args := []string{} + args = append(args, "UpdateThrottlerConfig") + if throttle { + args = append(args, "--throttle-app", throttlerApp.String()) + args = append(args, "--throttle-app-duration", "1h") + } else { + args = append(args, "--unthrottle-app", throttlerApp.String()) + } + args = append(args, keyspaceName) + + ctx, cancel := context.WithTimeout(context.Background(), ConfigTimeout) + defer cancel() + + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + + for { + result, err = vtctldProcess.ExecuteCommandWithOutput(args...) + if err == nil { + return result, nil + } + select { + case <-ctx.Done(): + return "", fmt.Errorf("timed out waiting for UpdateThrottlerConfig to succeed after %v; last seen value: %+v, error: %v", ConfigTimeout, result, err) + case <-ticker.C: + } + } +} + +// throttleApp throttles or unthrottles an app +func throttleApp(clusterInstance *cluster.LocalProcessCluster, throttlerApp throttlerapp.Name, throttle bool) (string, error) { rec := concurrency.AllErrorRecorder{} var ( err error res strings.Builder ) for _, ks := range clusterInstance.Keyspaces { - ires, err := UpdateThrottlerTopoConfigRaw(&clusterInstance.VtctldClientProcess, ks.Name, enable, disable, threshold, metricsQuery) + ires, err := throttleAppRaw(&clusterInstance.VtctldClientProcess, ks.Name, throttlerApp, throttle) if err != nil { rec.RecordError(err) } @@ -115,6 +209,46 @@ func UpdateThrottlerTopoConfig(clusterInstance *cluster.LocalProcessCluster, ena return res.String(), err } +// ThrottleApp throttles given app name for the next hour +func ThrottleApp(clusterInstance *cluster.LocalProcessCluster, throttlerApp throttlerapp.Name) (string, error) { + return throttleApp(clusterInstance, throttlerApp, true) +} + +// ThrottleApp unthrottles given app name +func UnthrottleApp(clusterInstance *cluster.LocalProcessCluster, throttlerApp throttlerapp.Name) (string, error) { + return throttleApp(clusterInstance, throttlerApp, false) +} + +func WaitUntilTabletsConfirmThrottledApp(t *testing.T, clusterInstance *cluster.LocalProcessCluster, throttlerApp throttlerapp.Name, expectThrottled bool) { + for _, ks := range clusterInstance.Keyspaces { + for _, shard := range ks.Shards { + for _, tablet := range shard.Vttablets { + WaitForThrottledApp(t, tablet, throttlerApp, expectThrottled, ConfigTimeout) + } + } + } +} + +// ThrottleAppAndWaitUntilTabletsConfirm +func ThrottleAppAndWaitUntilTabletsConfirm(t *testing.T, clusterInstance *cluster.LocalProcessCluster, throttlerApp throttlerapp.Name) (string, error) { + res, err := throttleApp(clusterInstance, throttlerApp, true) + if err != nil { + return res, err + } + WaitUntilTabletsConfirmThrottledApp(t, clusterInstance, throttlerApp, true) + return res, nil +} + +// UnthrottleAppAndWaitUntilTabletsConfirm +func UnthrottleAppAndWaitUntilTabletsConfirm(t *testing.T, clusterInstance *cluster.LocalProcessCluster, throttlerApp throttlerapp.Name) (string, error) { + res, err := throttleApp(clusterInstance, throttlerApp, false) + if err != nil { + return res, err + } + WaitUntilTabletsConfirmThrottledApp(t, clusterInstance, throttlerApp, false) + return res, nil +} + // WaitForThrottlerStatusEnabled waits for a tablet to report its throttler status as // enabled/disabled and have the provided config (if any) until the specified timeout. func WaitForThrottlerStatusEnabled(t *testing.T, tablet *cluster.Vttablet, enabled bool, config *Config, timeout time.Duration) { @@ -161,11 +295,57 @@ func WaitForThrottlerStatusEnabled(t *testing.T, tablet *cluster.Vttablet, enabl } } +// WaitForThrottlerStatusEnabled waits for a tablet to report its throttler status as +// enabled/disabled and have the provided config (if any) until the specified timeout. +func WaitForThrottledApp(t *testing.T, tablet *cluster.Vttablet, throttlerApp throttlerapp.Name, expectThrottled bool, timeout time.Duration) { + throttledAppsURL := fmt.Sprintf("http://localhost:%d/throttler/throttled-apps", tablet.HTTPPort) + tabletURL := fmt.Sprintf("http://localhost:%d/debug/status_details", tablet.HTTPPort) + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + + for { + throttledAppsBody := getHTTPBody(throttledAppsURL) + var throttledApps []base.AppThrottle + err := json.Unmarshal([]byte(throttledAppsBody), &throttledApps) + assert.NoError(t, err) + require.NotEmpty(t, throttledApps) // "always-throttled-app" is always there. + appFoundThrottled := false + for _, throttledApp := range throttledApps { + if throttledApp.AppName == throttlerApp.String() && throttledApp.ExpireAt.After(time.Now()) { + appFoundThrottled = true + break + } + } + if appFoundThrottled == expectThrottled { + return + } + // If the tablet is Not Serving due to e.g. being involved in a + // Reshard where its QueryService is explicitly disabled, then + // we should not fail the test as the throttler will not be Open. + tabletBody := getHTTPBody(tabletURL) + class := strings.ToLower(gjson.Get(tabletBody, "0.Class").String()) + value := strings.ToLower(gjson.Get(tabletBody, "0.Value").String()) + if class == "unhappy" && strings.Contains(value, "not serving") { + log.Infof("tablet %s is Not Serving, so ignoring throttler status as the throttler will not be Opened", tablet.Alias) + return + } + select { + case <-ctx.Done(): + t.Errorf("timed out waiting for the %s tablet's throttled apps with the correct config (expecting %s to be %v) after %v; last seen value: %s", + tablet.Alias, throttlerApp.String(), expectThrottled, timeout, throttledAppsBody) + return + case <-ticker.C: + } + } +} + // EnableLagThrottlerAndWaitForStatus is a utility function to enable the throttler at the beginning of an endtoend test. // The throttler is configued to use the standard replication lag metric. The function waits until the throttler is confirmed // to be running on all tablets. func EnableLagThrottlerAndWaitForStatus(t *testing.T, clusterInstance *cluster.LocalProcessCluster, lag time.Duration) { - _, err := UpdateThrottlerTopoConfig(clusterInstance, true, false, lag.Seconds(), "") + _, err := UpdateThrottlerTopoConfig(clusterInstance, true, false, lag.Seconds(), "", nil) require.NoError(t, err) for _, ks := range clusterInstance.Keyspaces { diff --git a/go/test/endtoend/utils/cmp.go b/go/test/endtoend/utils/cmp.go index 34f5417f8b5..38726d6c3aa 100644 --- a/go/test/endtoend/utils/cmp.go +++ b/go/test/endtoend/utils/cmp.go @@ -89,7 +89,7 @@ func (mcmp *MySQLCompare) AssertMatchesAny(query string, expected ...string) { func (mcmp *MySQLCompare) AssertMatchesAnyNoCompare(query string, expected ...string) { mcmp.t.Helper() - mQr, vQr := mcmp.execNoCompare(query) + mQr, vQr := mcmp.ExecNoCompare(query) got := fmt.Sprintf("%v", mQr.Rows) valid := false for _, e := range expected { @@ -171,7 +171,7 @@ func (mcmp *MySQLCompare) AssertFoundRowsValue(query, workload string, count int // AssertMatchesNoCompare compares the record of mysql and vitess separately and not with each other. func (mcmp *MySQLCompare) AssertMatchesNoCompare(query, mExp string, vExp string) { mcmp.t.Helper() - mQr, vQr := mcmp.execNoCompare(query) + mQr, vQr := mcmp.ExecNoCompare(query) got := fmt.Sprintf("%v", mQr.Rows) diff := cmp.Diff(mExp, got) if diff != "" { @@ -200,7 +200,8 @@ func (mcmp *MySQLCompare) Exec(query string) *sqltypes.Result { return vtQr } -func (mcmp *MySQLCompare) execNoCompare(query string) (*sqltypes.Result, *sqltypes.Result) { +// ExecNoCompare executes the query on vitess and mysql but does not compare the result with each other. +func (mcmp *MySQLCompare) ExecNoCompare(query string) (*sqltypes.Result, *sqltypes.Result) { mcmp.t.Helper() vtQr, err := mcmp.VtConn.ExecuteFetch(query, 1000, true) require.NoError(mcmp.t, err, "[Vitess Error] for query: "+query) @@ -232,6 +233,8 @@ func (mcmp *MySQLCompare) ExecWithColumnCompare(query string) *sqltypes.Result { // - MySQL and Vitess did not find an error, but their results are matching // // The result set and error produced by Vitess are returned to the caller. +// If the Vitess and MySQL error are both nil, but the results do not match, +// the mismatched results are instead returned as an error, as well as the Vitess result set func (mcmp *MySQLCompare) ExecAllowAndCompareError(query string) (*sqltypes.Result, error) { mcmp.t.Helper() vtQr, vtErr := mcmp.VtConn.ExecuteFetch(query, 1000, true) @@ -241,7 +244,7 @@ func (mcmp *MySQLCompare) ExecAllowAndCompareError(query string) (*sqltypes.Resu // Since we allow errors, we don't want to compare results if one of the client failed. // Vitess and MySQL should always be agreeing whether the query returns an error or not. if vtErr == nil && mysqlErr == nil { - compareVitessAndMySQLResults(mcmp.t, query, mcmp.VtConn, vtQr, mysqlQr, false) + vtErr = compareVitessAndMySQLResults(mcmp.t, query, mcmp.VtConn, vtQr, mysqlQr, false) } return vtQr, vtErr } @@ -253,3 +256,33 @@ func (mcmp *MySQLCompare) ExecAndIgnore(query string) (*sqltypes.Result, error) _, _ = mcmp.MySQLConn.ExecuteFetch(query, 1000, true) return mcmp.VtConn.ExecuteFetch(query, 1000, true) } + +func (mcmp *MySQLCompare) Run(query string, f func(mcmp *MySQLCompare)) { + mcmp.t.Run(query, func(t *testing.T) { + inner := &MySQLCompare{ + t: t, + MySQLConn: mcmp.MySQLConn, + VtConn: mcmp.VtConn, + } + f(inner) + }) +} + +// ExecAllowError executes the query against both Vitess and MySQL. +// If there is no error, it compares the result +// Return any Vitess execution error without comparing the results. +func (mcmp *MySQLCompare) ExecAllowError(query string) (*sqltypes.Result, error) { + mcmp.t.Helper() + vtQr, vtErr := mcmp.VtConn.ExecuteFetch(query, 1000, true) + if vtErr != nil { + return nil, vtErr + } + mysqlQr, mysqlErr := mcmp.MySQLConn.ExecuteFetch(query, 1000, true) + + // Since we allow errors, we don't want to compare results if one of the client failed. + // Vitess and MySQL should always be agreeing whether the query returns an error or not. + if mysqlErr == nil { + vtErr = compareVitessAndMySQLResults(mcmp.t, query, mcmp.VtConn, vtQr, mysqlQr, false) + } + return vtQr, vtErr +} diff --git a/go/test/endtoend/utils/mysql.go b/go/test/endtoend/utils/mysql.go index 6249d639a4d..de8ce40f992 100644 --- a/go/test/endtoend/utils/mysql.go +++ b/go/test/endtoend/utils/mysql.go @@ -18,6 +18,7 @@ package utils import ( "context" + "errors" "fmt" "os" "path" @@ -154,24 +155,27 @@ func prepareMySQLWithSchema(params mysql.ConnParams, sql string) error { return nil } -func compareVitessAndMySQLResults(t *testing.T, query string, vtConn *mysql.Conn, vtQr, mysqlQr *sqltypes.Result, compareColumns bool) { +func compareVitessAndMySQLResults(t *testing.T, query string, vtConn *mysql.Conn, vtQr, mysqlQr *sqltypes.Result, compareColumns bool) error { if vtQr == nil && mysqlQr == nil { - return + return nil } if vtQr == nil { t.Error("Vitess result is 'nil' while MySQL's is not.") - return + return errors.New("Vitess result is 'nil' while MySQL's is not.\n") } if mysqlQr == nil { t.Error("MySQL result is 'nil' while Vitess' is not.") - return + return errors.New("MySQL result is 'nil' while Vitess' is not.\n") } + + var errStr string if compareColumns { vtColCount := len(vtQr.Fields) myColCount := len(mysqlQr.Fields) if vtColCount > 0 && myColCount > 0 { if vtColCount != myColCount { t.Errorf("column count does not match: %d vs %d", vtColCount, myColCount) + errStr += fmt.Sprintf("column count does not match: %d vs %d\n", vtColCount, myColCount) } var vtCols []string @@ -180,38 +184,44 @@ func compareVitessAndMySQLResults(t *testing.T, query string, vtConn *mysql.Conn vtCols = append(vtCols, vtField.Name) myCols = append(myCols, mysqlQr.Fields[i].Name) } - assert.Equal(t, myCols, vtCols, "column names do not match - the expected values are what mysql produced") + if !assert.Equal(t, myCols, vtCols, "column names do not match - the expected values are what mysql produced") { + errStr += "column names do not match - the expected values are what mysql produced\n" + errStr += fmt.Sprintf("Not equal: \nexpected: %v\nactual: %v\n", myCols, vtCols) + } } } stmt, err := sqlparser.Parse(query) if err != nil { t.Error(err) - return + return err } orderBy := false if selStmt, isSelStmt := stmt.(sqlparser.SelectStatement); isSelStmt { orderBy = selStmt.GetOrderBy() != nil } - if orderBy && sqltypes.ResultsEqual([]sqltypes.Result{*vtQr}, []sqltypes.Result{*mysqlQr}) { - return - } else if sqltypes.ResultsEqualUnordered([]sqltypes.Result{*vtQr}, []sqltypes.Result{*mysqlQr}) { - return + if (orderBy && sqltypes.ResultsEqual([]sqltypes.Result{*vtQr}, []sqltypes.Result{*mysqlQr})) || sqltypes.ResultsEqualUnordered([]sqltypes.Result{*vtQr}, []sqltypes.Result{*mysqlQr}) { + return nil } - errStr := "Query (" + query + ") results mismatched.\nVitess Results:\n" + errStr += "Query (" + query + ") results mismatched.\nVitess Results:\n" for _, row := range vtQr.Rows { errStr += fmt.Sprintf("%s\n", row) } + errStr += fmt.Sprintf("Vitess RowsAffected: %v\n", vtQr.RowsAffected) errStr += "MySQL Results:\n" for _, row := range mysqlQr.Rows { errStr += fmt.Sprintf("%s\n", row) } + errStr += fmt.Sprintf("MySQL RowsAffected: %v\n", mysqlQr.RowsAffected) if vtConn != nil { - qr := Exec(t, vtConn, fmt.Sprintf("vexplain plan %s", query)) - errStr += fmt.Sprintf("query plan: \n%s\n", qr.Rows[0][0].ToString()) + qr, _ := ExecAllowError(t, vtConn, fmt.Sprintf("vexplain plan %s", query)) + if qr != nil && len(qr.Rows) > 0 { + errStr += fmt.Sprintf("query plan: \n%s\n", qr.Rows[0][0].ToString()) + } } t.Error(errStr) + return errors.New(errStr) } func compareVitessAndMySQLErrors(t *testing.T, vtErr, mysqlErr error) { diff --git a/go/test/endtoend/utils/mysqlvsvitess/main_test.go b/go/test/endtoend/utils/mysqlvsvitess/main_test.go index f59132c3858..8f162fae41d 100644 --- a/go/test/endtoend/utils/mysqlvsvitess/main_test.go +++ b/go/test/endtoend/utils/mysqlvsvitess/main_test.go @@ -83,7 +83,7 @@ func TestMain(m *testing.M) { VSchema: vschema, } clusterInstance.VtGateExtraArgs = []string{"--schema_change_signal"} - clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal", "--queryserver-config-schema-change-signal-interval", "0.1"} + clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal"} err = clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 0, false) if err != nil { return 1 diff --git a/go/test/endtoend/utils/utils.go b/go/test/endtoend/utils/utils.go index 2e44292504d..594ca35b633 100644 --- a/go/test/endtoend/utils/utils.go +++ b/go/test/endtoend/utils/utils.go @@ -17,7 +17,10 @@ limitations under the License. package utils import ( + "context" "fmt" + "os" + "path" "strings" "testing" "time" @@ -169,11 +172,17 @@ func ExecCompareMySQL(t *testing.T, vtConn, mysqlConn *mysql.Conn, query string) // ExecAllowError executes the given query without failing the test if it produces // an error. The error is returned to the client, along with the result set. -func ExecAllowError(t *testing.T, conn *mysql.Conn, query string) (*sqltypes.Result, error) { +func ExecAllowError(t testing.TB, conn *mysql.Conn, query string) (*sqltypes.Result, error) { t.Helper() return conn.ExecuteFetch(query, 1000, true) } +// ExecWithRowCount is similar to ExecAllowError with max row count provided. +func ExecWithRowCount(t testing.TB, conn *mysql.Conn, query string, rowCount int) (*sqltypes.Result, error) { + t.Helper() + return conn.ExecuteFetch(query, rowCount, true) +} + // SkipIfBinaryIsBelowVersion skips the given test if the binary's major version is below majorVersion. func SkipIfBinaryIsBelowVersion(t *testing.T, majorVersion int, binary string) { version, err := cluster.GetMajorVersion(binary) @@ -221,7 +230,7 @@ func AssertMatchesWithTimeout(t *testing.T, conn *mysql.Conn, query, expected st // WaitForAuthoritative waits for a table to become authoritative func WaitForAuthoritative(t *testing.T, ks, tbl string, readVSchema func() (*interface{}, error)) error { - timeout := time.After(10 * time.Second) + timeout := time.After(60 * time.Second) for { select { case <-timeout: @@ -246,7 +255,7 @@ func WaitForAuthoritative(t *testing.T, ks, tbl string, readVSchema func() (*int // WaitForColumn waits for a table's column to be present func WaitForColumn(t *testing.T, vtgateProcess cluster.VtgateProcess, ks, tbl, col string) error { - timeout := time.After(10 * time.Second) + timeout := time.After(60 * time.Second) for { select { case <-timeout: @@ -334,3 +343,59 @@ func TimeoutAction(t *testing.T, timeout time.Duration, errMsg string, action fu } } } + +// RunSQLs is used to run a list of SQL statements on the given tablet +func RunSQLs(t *testing.T, sqls []string, tablet *cluster.Vttablet, db string) error { + // Get Connection + tabletParams := getMysqlConnParam(tablet, db) + var timeoutDuration = time.Duration(5 * len(sqls)) + ctx, cancel := context.WithTimeout(context.Background(), timeoutDuration*time.Second) + defer cancel() + conn, err := mysql.Connect(ctx, &tabletParams) + require.Nil(t, err) + defer conn.Close() + + // Run SQLs + for _, sql := range sqls { + if _, err := execute(t, conn, sql); err != nil { + return err + } + } + return nil +} + +// RunSQL is used to run a SQL statement on the given tablet +func RunSQL(t *testing.T, sql string, tablet *cluster.Vttablet, db string) (*sqltypes.Result, error) { + // Get Connection + tabletParams := getMysqlConnParam(tablet, db) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + conn, err := mysql.Connect(ctx, &tabletParams) + require.Nil(t, err) + defer conn.Close() + + // RunSQL + return execute(t, conn, sql) +} + +// GetMySQLConn gets a MySQL connection for the given tablet +func GetMySQLConn(tablet *cluster.Vttablet, db string) (*mysql.Conn, error) { + tabletParams := getMysqlConnParam(tablet, db) + return mysql.Connect(context.Background(), &tabletParams) +} + +func execute(t *testing.T, conn *mysql.Conn, query string) (*sqltypes.Result, error) { + t.Helper() + return conn.ExecuteFetch(query, 1000, true) +} + +func getMysqlConnParam(tablet *cluster.Vttablet, db string) mysql.ConnParams { + connParams := mysql.ConnParams{ + Uname: "vt_dba", + UnixSocket: path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("/vt_%010d/mysql.sock", tablet.TabletUID)), + } + if db != "" { + connParams.DbName = db + } + return connParams +} diff --git a/go/test/endtoend/vreplication/cluster_test.go b/go/test/endtoend/vreplication/cluster_test.go index 12539b778de..af93ac40726 100644 --- a/go/test/endtoend/vreplication/cluster_test.go +++ b/go/test/endtoend/vreplication/cluster_test.go @@ -30,6 +30,8 @@ import ( "testing" "time" + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" @@ -52,8 +54,9 @@ var ( sidecarDBIdentifier = sqlparser.String(sqlparser.NewIdentifierCS(sidecarDBName)) mainClusterConfig *ClusterConfig externalClusterConfig *ClusterConfig - extraVTGateArgs = []string{"--tablet_refresh_interval", "10ms"} - extraVtctldArgs = []string{"--remote_operation_timeout", "600s", "--topo_etcd_lease_ttl", "120"} + extraVTGateArgs = []string{"--tablet_refresh_interval", "10ms", "--enable_buffer", "--buffer_window", loadTestBufferingWindowDurationStr, + "--buffer_size", "100000", "--buffer_min_time_between_failovers", "0s", "--buffer_max_failover_duration", loadTestBufferingWindowDurationStr} + extraVtctldArgs = []string{"--remote_operation_timeout", "600s", "--topo_etcd_lease_ttl", "120"} // This variable can be used within specific tests to alter vttablet behavior extraVTTabletArgs = []string{} @@ -86,6 +89,7 @@ type ClusterConfig struct { // VitessCluster represents all components within the test cluster type VitessCluster struct { + t *testing.T ClusterConfig *ClusterConfig Name string Cells map[string]*Cell @@ -244,19 +248,32 @@ func downloadDBTypeVersion(dbType string, majorVersion string, path string) erro if _, err := os.Stat(file); err == nil { return nil } - resp, err := client.Get(url) - if err != nil { - return fmt.Errorf("error downloading contents of %s to %s. Error: %v", url, file, err) + downloadFile := func() error { + resp, err := client.Get(url) + if err != nil { + return fmt.Errorf("error downloading contents of %s to %s. Error: %v", url, file, err) + } + defer resp.Body.Close() + out, err := os.Create(file) + if err != nil { + return fmt.Errorf("error creating file %s to save the contents of %s. Error: %v", file, url, err) + } + defer out.Close() + _, err = io.Copy(out, resp.Body) + if err != nil { + return fmt.Errorf("error saving contents of %s to %s. Error: %v", url, file, err) + } + return nil } - defer resp.Body.Close() - out, err := os.Create(file) - if err != nil { - return fmt.Errorf("error creating file %s to save the contents of %s. Error: %v", file, url, err) + retries := 5 + var dlerr error + for i := 0; i < retries; i++ { + if dlerr = downloadFile(); dlerr == nil { + break + } } - defer out.Close() - _, err = io.Copy(out, resp.Body) - if err != nil { - return fmt.Errorf("error saving contents of %s to %s. Error: %v", url, file, err) + if dlerr != nil { + return dlerr } untarCmd := exec.Command("/bin/sh", "-c", fmt.Sprintf("tar xvf %s -C %s --strip-components=1", file, path)) @@ -304,7 +321,6 @@ func init() { if os.Getenv("VREPLICATION_E2E_DEBUG") != "" { debugMode = true } - rand.Seed(time.Now().UTC().UnixNano()) originalVtdataroot = os.Getenv("VTDATAROOT") var mainVtDataRoot string if debugMode { @@ -318,8 +334,11 @@ func init() { // NewVitessCluster starts a basic cluster with vtgate, vtctld and the topo func NewVitessCluster(t *testing.T, name string, cellNames []string, clusterConfig *ClusterConfig) *VitessCluster { - vc := &VitessCluster{Name: name, Cells: make(map[string]*Cell), ClusterConfig: clusterConfig} + vc := &VitessCluster{t: t, Name: name, Cells: make(map[string]*Cell), ClusterConfig: clusterConfig} require.NotNil(t, vc) + + vc.CleanupDataroot(t, true) + topo := cluster.TopoProcessInstance(vc.ClusterConfig.topoPort, vc.ClusterConfig.topoPort+1, vc.ClusterConfig.hostname, "etcd2", "global") require.NotNil(t, topo) @@ -355,6 +374,26 @@ func NewVitessCluster(t *testing.T, name string, cellNames []string, clusterConf return vc } +// CleanupDataroot deletes the vtdataroot directory. Since we run multiple tests sequentially in a single CI test shard, +// we can run out of disk space due to all the leftover artifacts from previous tests. +func (vc *VitessCluster) CleanupDataroot(t *testing.T, recreate bool) { + // This is always set to "true" on GitHub Actions runners: + // https://docs.github.com/en/actions/learn-github-actions/variables#default-environment-variables + ci, ok := os.LookupEnv("CI") + if !ok || strings.ToLower(ci) != "true" { + // Leave the directory in place to support local debugging. + return + } + dir := vc.ClusterConfig.vtdataroot + log.Infof("Deleting vtdataroot %s", dir) + err := os.RemoveAll(dir) + require.NoError(t, err) + if recreate { + err = os.Mkdir(dir, 0700) + require.NoError(t, err) + } +} + // AddKeyspace creates a keyspace with specified shard keys and number of replica/read-only tablets. // You can pass optional key value pairs (opts) if you want conditional behavior. func (vc *VitessCluster) AddKeyspace(t *testing.T, cells []*Cell, ksName string, shards string, vschema string, schema string, numReplicas int, numRdonly int, tabletIDBase int, opts map[string]string) (*Keyspace, error) { @@ -369,7 +408,7 @@ func (vc *VitessCluster) AddKeyspace(t *testing.T, cells []*Cell, ksName string, } log.Infof("Applying throttler config for keyspace %s", keyspace.Name) - res, err := throttler.UpdateThrottlerTopoConfigRaw(vc.VtctldClient, keyspace.Name, true, false, throttlerConfig.Threshold, throttlerConfig.Query) + res, err := throttler.UpdateThrottlerTopoConfigRaw(vc.VtctldClient, keyspace.Name, true, false, throttlerConfig.Threshold, throttlerConfig.Query, nil) require.NoError(t, err, res) cellsToWatch := "" @@ -533,7 +572,43 @@ func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspa for ind, proc := range dbProcesses { log.Infof("Waiting for mysql process for tablet %s", tablets[ind].Name) if err := proc.Wait(); err != nil { - t.Fatalf("%v :: Unable to start mysql server for %v", err, tablets[ind].Vttablet) + // Retry starting the database process before giving up. + t.Logf("%v :: Unable to start mysql server for %v. Will cleanup files and processes, then retry...", err, tablets[ind].Vttablet) + tablets[ind].DbServer.CleanupFiles(tablets[ind].Vttablet.TabletUID) + // Kill any process we own that's listening on the port we + // want to use as that is the most common problem. + tablets[ind].DbServer.Stop() + if _, err = exec.Command("fuser", "-n", "tcp", "-k", fmt.Sprintf("%d", tablets[ind].DbServer.MySQLPort)).Output(); err != nil { + log.Errorf("Failed to kill process listening on port %d: %v", tablets[ind].DbServer.MySQLPort, err) + } + // Sleep for the kernel's TCP TIME_WAIT timeout to avoid the + // port already in use error, which is the common cause for + // the process not starting. It's a long wait, but it's worth + // avoiding the test/workflow failure that otherwise occurs. + time.Sleep(60 * time.Second) + dbcmd, err := tablets[ind].DbServer.StartProcess() + require.NoError(t, err) + if err = dbcmd.Wait(); err != nil { + // Get logs to help understand why it failed... + vtdataroot := os.Getenv("VTDATAROOT") + mysqlctlLog := path.Join(vtdataroot, "/tmp/mysqlctl.INFO") + logBytes, ferr := os.ReadFile(mysqlctlLog) + if ferr == nil { + log.Errorf("mysqlctl log contents:\n%s", string(logBytes)) + } else { + log.Errorf("Failed to read the mysqlctl log file %q: %v", mysqlctlLog, ferr) + } + mysqldLog := path.Join(vtdataroot, fmt.Sprintf("/vt_%010d/error.log", tablets[ind].Vttablet.TabletUID)) + logBytes, ferr = os.ReadFile(mysqldLog) + if ferr == nil { + log.Errorf("mysqld error log contents:\n%s", string(logBytes)) + } else { + log.Errorf("Failed to read the mysqld error log file %q: %v", mysqldLog, ferr) + } + output, _ := dbcmd.CombinedOutput() + t.Fatalf("%v :: Unable to start mysql server for %v; Output: %s", err, + tablets[ind].Vttablet, string(output)) + } } } for ind, tablet := range tablets { @@ -613,7 +688,7 @@ func (vc *VitessCluster) AddCell(t testing.TB, name string) (*Cell, error) { return cell, nil } -func (vc *VitessCluster) teardown(t testing.TB) { +func (vc *VitessCluster) teardown() { for _, cell := range vc.Cells { for _, vtgate := range cell.Vtgates { if err := vtgate.TearDown(); err != nil { @@ -640,7 +715,7 @@ func (vc *VitessCluster) teardown(t testing.TB) { go func(tablet2 *Tablet) { defer wg.Done() if tablet2.DbServer != nil && tablet2.DbServer.TabletUID > 0 { - if _, err := tablet2.DbServer.StopProcess(); err != nil { + if err := tablet2.DbServer.Stop(); err != nil { log.Infof("Error stopping mysql process: %s", err.Error()) } } @@ -676,13 +751,13 @@ func (vc *VitessCluster) teardown(t testing.TB) { } // TearDown brings down a cluster, deleting processes, removing topo keys -func (vc *VitessCluster) TearDown(t testing.TB) { +func (vc *VitessCluster) TearDown(t *testing.T) { if debugMode { return } done := make(chan bool) go func() { - vc.teardown(t) + vc.teardown() done <- true }() select { @@ -693,6 +768,7 @@ func (vc *VitessCluster) TearDown(t testing.TB) { } // some processes seem to hang around for a bit time.Sleep(5 * time.Second) + vc.CleanupDataroot(t, false) } func (vc *VitessCluster) getVttabletsInKeyspace(t *testing.T, cell *Cell, ksName string, tabletType string) map[string]*cluster.VttabletProcess { @@ -730,6 +806,10 @@ func (vc *VitessCluster) getPrimaryTablet(t *testing.T, ksName, shardName string return nil } +func (vc *VitessCluster) GetVTGateConn(t *testing.T) *mysql.Conn { + return getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) +} + func (vc *VitessCluster) startQuery(t *testing.T, query string) (func(t *testing.T), func(t *testing.T)) { conn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) _, err := conn.ExecuteFetch("begin", 1000, false) diff --git a/go/test/endtoend/vreplication/config_test.go b/go/test/endtoend/vreplication/config_test.go index 213ad0bcc75..0e430548a13 100644 --- a/go/test/endtoend/vreplication/config_test.go +++ b/go/test/endtoend/vreplication/config_test.go @@ -55,10 +55,11 @@ create table _vt_PURGE_4f9194b43b2011eb8a0104ed332e05c2_20221210194431(id int, v create table db_order_test (c_uuid varchar(64) not null default '', created_at datetime not null, dstuff varchar(128), dtstuff text, dbstuff blob, cstuff char(32), primary key (c_uuid,created_at), key (dstuff)) CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; create table vdiff_order (order_id varchar(50) collate utf8mb4_unicode_ci not null, primary key (order_id), key (order_id)) charset=utf8mb4 COLLATE=utf8mb4_unicode_ci; create table datze (id int, dt1 datetime not null default current_timestamp, dt2 datetime not null, ts1 timestamp default current_timestamp, primary key (id), key (dt1)); -create table json_tbl (id int, j1 json, j2 json, primary key(id)); +create table json_tbl (id int, j1 json, j2 json, j3 json not null, primary key(id)); create table geom_tbl (id int, g geometry, p point, ls linestring, pg polygon, mp multipoint, mls multilinestring, mpg multipolygon, gc geometrycollection, primary key(id)); create table ` + "`blüb_tbl`" + ` (id int, val1 varchar(20), ` + "`blöb1`" + ` blob, val2 varbinary(20), ` + "`bl@b2`" + ` longblob, txt1 text, blb3 tinyblob, txt2 longtext, blb4 mediumblob, primary key(id)); create table reftable (id int, val1 varchar(20), primary key(id), key(val1)); +create table loadtest (id int, name varchar(256), primary key(id), key(name)); ` // These should always be ignored in vreplication internalSchema = ` @@ -73,31 +74,33 @@ create table reftable (id int, val1 varchar(20), primary key(id), key(val1)); initialProductVSchema = ` { "tables": { - "product": {}, - "merchant": {}, - "orders": {}, - "customer": {}, - "customer_seq": { - "type": "sequence" - }, - "customer2": {}, - "customer_seq2": { - "type": "sequence" - }, - "order_seq": { - "type": "sequence" - }, - "Lead": {}, - "Lead-1": {}, - "db_order_test": {}, - "vdiff_order": {}, - "datze": {}, - "reftable": { - "type": "reference" - } + "product": {}, + "merchant": {}, + "orders": {}, + "loadtest": {}, + "customer": {}, + "customer_seq": { + "type": "sequence" + }, + "customer2": {}, + "customer_seq2": { + "type": "sequence" + }, + "order_seq": { + "type": "sequence" + }, + "Lead": {}, + "Lead-1": {}, + "db_order_test": {}, + "vdiff_order": {}, + "datze": {}, + "reftable": { + "type": "reference" + } } } ` + customerSchema = "" customerVSchema = ` { @@ -117,6 +120,14 @@ create table reftable (id int, val1 varchar(20), primary key(id), key(val1)); } }, "tables": { + "loadtest": { + "column_vindexes": [ + { + "column": "id", + "name": "reverse_bits" + } + ] + }, "customer": { "column_vindexes": [ { @@ -283,7 +294,15 @@ create table reftable (id int, val1 varchar(20), primary key(id), key(val1)); } }, "tables": { - "customer": { + "loadtest": { + "column_vindexes": [ + { + "column": "id", + "name": "reverse_bits" + } + ] + }, + "customer": { "column_vindexes": [ { "column": "cid", @@ -362,11 +381,11 @@ create table reftable (id int, val1 varchar(20), primary key(id), key(val1)); materializeProductSpec = ` { "workflow": "cproduct", - "sourceKeyspace": "product", - "targetKeyspace": "customer", - "tableSettings": [{ - "targetTable": "cproduct", - "sourceExpression": "select * from product", + "source_keyspace": "product", + "target_keyspace": "customer", + "table_settings": [{ + "target_table": "cproduct", + "source_expression": "select * from product", "create_ddl": "create table cproduct(pid bigint, description varchar(128), date1 datetime not null default '0000-00-00 00:00:00', date2 datetime not null default '2021-00-01 00:00:00', primary key(pid)) CHARSET=utf8mb4" }] } @@ -417,11 +436,11 @@ create table reftable (id int, val1 varchar(20), primary key(id), key(val1)); materializeMerchantOrdersSpec = ` { "workflow": "morders", - "sourceKeyspace": "customer", - "targetKeyspace": "merchant-type", - "tableSettings": [{ - "targetTable": "morders", - "sourceExpression": "select oid, cid, mname, pid, price, qty, total from orders", + "source_keyspace": "customer", + "target_keyspace": "merchant-type", + "table_settings": [{ + "target_table": "morders", + "source_expression": "select oid, cid, mname, pid, price, qty, total from orders", "create_ddl": "create table morders(oid int, cid int, mname varchar(128), pid int, price int, qty int, total int, total2 int as (10 * total), primary key(oid)) CHARSET=utf8" }] } @@ -430,11 +449,11 @@ create table reftable (id int, val1 varchar(20), primary key(id), key(val1)); materializeMerchantSalesSpec = ` { "workflow": "msales", - "sourceKeyspace": "customer", - "targetKeyspace": "merchant-type", - "tableSettings": [{ - "targetTable": "msales", - "sourceExpression": "select mname as merchant_name, count(*) as kount, sum(price) as amount from orders group by merchant_name", + "source_keyspace": "customer", + "target_keyspace": "merchant-type", + "table_settings": [{ + "target_table": "msales", + "source_expression": "select mname as merchant_name, count(*) as kount, sum(price) as amount from orders group by merchant_name", "create_ddl": "create table msales(merchant_name varchar(128), kount int, amount int, primary key(merchant_name)) CHARSET=utf8" }] } @@ -457,11 +476,11 @@ create table reftable (id int, val1 varchar(20), primary key(id), key(val1)); materializeSalesSpec = ` { "workflow": "sales", - "sourceKeyspace": "customer", - "targetKeyspace": "product", - "tableSettings": [{ - "targetTable": "sales", - "sourceExpression": "select pid, count(*) as kount, sum(price) as amount from orders group by pid", + "source_keyspace": "customer", + "target_keyspace": "product", + "table_settings": [{ + "target_Table": "sales", + "source_expression": "select pid, count(*) as kount, sum(price) as amount from orders group by pid", "create_ddl": "create table sales(pid int, kount int, amount int, primary key(pid)) CHARSET=utf8" }] } @@ -469,11 +488,11 @@ create table reftable (id int, val1 varchar(20), primary key(id), key(val1)); materializeRollupSpec = ` { "workflow": "rollup", - "sourceKeyspace": "product", - "targetKeyspace": "product", - "tableSettings": [{ - "targetTable": "rollup", - "sourceExpression": "select 'total' as rollupname, count(*) as kount from product group by rollupname", + "source_keyspace": "product", + "target_keyspace": "product", + "table_settings": [{ + "target_table": "rollup", + "source_expression": "select 'total' as rollupname, count(*) as kount from product group by rollupname", "create_ddl": "create table rollup(rollupname varchar(100), kount int, primary key (rollupname)) CHARSET=utf8mb4" }] } diff --git a/go/test/endtoend/vreplication/fk_config_test.go b/go/test/endtoend/vreplication/fk_config_test.go new file mode 100644 index 00000000000..5b02aeb62bb --- /dev/null +++ b/go/test/endtoend/vreplication/fk_config_test.go @@ -0,0 +1,65 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +var ( + initialFKSchema = ` +create table parent(id int, name varchar(128), primary key(id)) engine=innodb; +create table child(id int, parent_id int, name varchar(128), primary key(id), foreign key(parent_id) references parent(id) on delete cascade) engine=innodb; +` + initialFKData = ` +insert into parent values(1, 'parent1'), (2, 'parent2'); +insert into child values(1, 1, 'child11'), (2, 1, 'child21'), (3, 2, 'child32');` + + initialFKSourceVSchema = ` +{ + "tables": { + "parent": {}, + "child": {} + } +} +` + + initialFKTargetVSchema = ` +{ + "sharded": true, + "vindexes": { + "reverse_bits": { + "type": "reverse_bits" + } + }, + "tables": { + "parent": { + "column_vindexes": [ + { + "column": "id", + "name": "reverse_bits" + } + ] + }, + "child": { + "column_vindexes": [ + { + "column": "parent_id", + "name": "reverse_bits" + } + ] + } + } +} +` +) diff --git a/go/test/endtoend/vreplication/fk_test.go b/go/test/endtoend/vreplication/fk_test.go new file mode 100644 index 00000000000..31886864f11 --- /dev/null +++ b/go/test/endtoend/vreplication/fk_test.go @@ -0,0 +1,274 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "context" + "fmt" + "math/rand" + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/vt/log" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" +) + +// TestFKWorkflow runs a MoveTables workflow with atomic copy for a db with foreign key constraints. +// It inserts initial data, then simulates load. We insert both child rows with foreign keys and those without, +// i.e. with foreign_key_checks=0. +func TestFKWorkflow(t *testing.T) { + // ensure that there are multiple copy phase cycles per table + extraVTTabletArgs = []string{"--vstream_packet_size=256"} + defer func() { extraVTTabletArgs = nil }() + + cellName := "zone" + cells := []string{cellName} + vc = NewVitessCluster(t, "TestFKWorkflow", cells, mainClusterConfig) + + require.NotNil(t, vc) + allCellNames = cellName + defaultCellName := cellName + defaultCell = vc.Cells[defaultCellName] + sourceKeyspace := "fksource" + shardName := "0" + + defer vc.TearDown(t) + + cell := vc.Cells[cellName] + vc.AddKeyspace(t, []*Cell{cell}, sourceKeyspace, shardName, initialFKSourceVSchema, initialFKSchema, 0, 0, 100, sourceKsOpts) + + vtgate = cell.Vtgates[0] + require.NotNil(t, vtgate) + err := cluster.WaitForHealthyShard(vc.VtctldClient, sourceKeyspace, shardName) + require.NoError(t, err) + vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", sourceKeyspace, shardName), 1, 30*time.Second) + + vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() + verifyClusterHealth(t, vc) + + var ls *fkLoadSimulator + + insertInitialFKData(t) + withLoad := true // Set it to false to skip load simulation, while debugging + var cancel context.CancelFunc + var ctx context.Context + if withLoad { + ctx, cancel = context.WithCancel(context.Background()) + ls = newFKLoadSimulator(t, ctx) + defer func() { + select { + case <-ctx.Done(): + default: + cancel() + } + }() + go ls.simulateLoad() + } + targetKeyspace := "fktarget" + targetTabletId := 200 + vc.AddKeyspace(t, []*Cell{cell}, targetKeyspace, shardName, initialFKTargetVSchema, initialFKSchema, 0, 0, targetTabletId, sourceKsOpts) + vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", targetKeyspace, shardName), 1, 30*time.Second) + + workflowName := "fk" + ksWorkflow := fmt.Sprintf("%s.%s", targetKeyspace, workflowName) + + mt := newMoveTables(vc, &moveTables{ + workflowName: workflowName, + targetKeyspace: targetKeyspace, + sourceKeyspace: sourceKeyspace, + atomicCopy: true, + }, moveTablesFlavorRandom) + mt.Create() + + waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) + targetKs := vc.Cells[cellName].Keyspaces[targetKeyspace] + targetTab := targetKs.Shards["0"].Tablets[fmt.Sprintf("%s-%d", cellName, targetTabletId)].Vttablet + require.NotNil(t, targetTab) + catchup(t, targetTab, workflowName, "MoveTables") + vdiff(t, targetKeyspace, workflowName, cellName, true, false, nil) + ls.waitForAdditionalRows(200) + vdiff(t, targetKeyspace, workflowName, cellName, true, false, nil) + if withLoad { + cancel() + <-ch + } + mt.SwitchReadsAndWrites() + + log.Infof("Switch traffic done") + + if withLoad { + ctx, cancel = context.WithCancel(context.Background()) + ls = newFKLoadSimulator(t, ctx) + defer cancel() + go ls.simulateLoad() + } + ls.waitForAdditionalRows(200) + if withLoad { + cancel() + <-ch + } +} + +func insertInitialFKData(t *testing.T) { + t.Run("insertInitialFKData", func(t *testing.T) { + sourceKeyspace := "fksource" + shard := "0" + db := fmt.Sprintf("%s:%s", sourceKeyspace, shard) + log.Infof("Inserting initial FK data") + execMultipleQueries(t, vtgateConn, db, initialFKData) + log.Infof("Done inserting initial FK data") + waitForRowCount(t, vtgateConn, db, "parent", 2) + waitForRowCount(t, vtgateConn, db, "child", 3) + }) +} + +var currentParentId int64 +var currentChildId int64 + +func init() { + currentParentId = 100 + currentChildId = 100 +} + +var ch = make(chan bool) + +type fkLoadSimulator struct { + t *testing.T + ctx context.Context +} + +func newFKLoadSimulator(t *testing.T, ctx context.Context) *fkLoadSimulator { + return &fkLoadSimulator{ + t: t, + ctx: ctx, + } +} + +func (ls *fkLoadSimulator) simulateLoad() { + t := ls.t + var err error + for i := 0; ; i++ { + if i%1000 == 0 { + log.Infof("Load simulation iteration %d", i) + } + select { + case <-ls.ctx.Done(): + ch <- true + return + default: + } + // Decide operation based on random number + op := rand.Intn(100) + switch { + case op < 50: // 50% chance to insert + ls.insert() + case op < 80: // 30% chance to update + ls.update() + default: // 20% chance to delete + ls.delete() + } + require.NoError(t, err) + time.Sleep(1 * time.Millisecond) + } +} + +func (ls *fkLoadSimulator) getNumRowsParent(vtgateConn *mysql.Conn) int { + t := ls.t + qr := execVtgateQuery(t, vtgateConn, "fksource", "SELECT COUNT(*) FROM parent") + require.NotNil(t, qr) + numRows, err := strconv.Atoi(qr.Rows[0][0].ToString()) + require.NoError(t, err) + return numRows +} + +func (ls *fkLoadSimulator) waitForAdditionalRows(count int) { + t := ls.t + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() + numRowsStart := ls.getNumRowsParent(vtgateConn) + shortCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + for { + switch { + case shortCtx.Err() != nil: + t.Fatalf("Timed out waiting for additional rows") + default: + numRows := ls.getNumRowsParent(vtgateConn) + if numRows >= numRowsStart+count { + return + } + time.Sleep(10 * time.Millisecond) + } + } +} + +func (ls *fkLoadSimulator) insert() { + t := ls.t + currentParentId++ + insertQuery := fmt.Sprintf("INSERT INTO parent (id) VALUES (%d)", currentParentId) + qr := ls.exec(insertQuery) + require.NotNil(t, qr) + // insert one or more children, some with valid foreign keys, some without. + for i := 0; i < rand.Intn(4)+1; i++ { + currentChildId++ + if i == 3 { + insertQuery = fmt.Sprintf("INSERT /*+ SET_VAR(foreign_key_checks=0) */ INTO child (id, parent_id) VALUES (%d, %d)", currentChildId, currentParentId+1000000) + ls.exec(insertQuery) + } else { + insertQuery = fmt.Sprintf("INSERT INTO child (id, parent_id) VALUES (%d, %d)", currentChildId, currentParentId) + ls.exec(insertQuery) + } + } +} + +func (ls *fkLoadSimulator) getRandomId() int64 { + t := ls.t + selectQuery := "SELECT id FROM parent ORDER BY RAND() LIMIT 1" + qr := ls.exec(selectQuery) + require.NotNil(t, qr) + if len(qr.Rows) == 0 { + return 0 + } + id, err := qr.Rows[0][0].ToInt64() + require.NoError(t, err) + return id +} + +func (ls *fkLoadSimulator) update() { + updateQuery := fmt.Sprintf("UPDATE parent SET name = 'parent%d' WHERE id = %d", rand.Intn(1000)+1, ls.getRandomId()) + ls.exec(updateQuery) +} + +func (ls *fkLoadSimulator) delete() { + deleteQuery := fmt.Sprintf("DELETE FROM parent WHERE id = %d", ls.getRandomId()) + ls.exec(deleteQuery) +} + +func (ls *fkLoadSimulator) exec(query string) *sqltypes.Result { + t := ls.t + qr := execVtgateQuery(t, vtgateConn, "fksource", query) + require.NotNil(t, qr) + return qr +} diff --git a/go/test/endtoend/vreplication/helper_test.go b/go/test/endtoend/vreplication/helper_test.go index 49f35d49b06..839ffdfa306 100644 --- a/go/test/endtoend/vreplication/helper_test.go +++ b/go/test/endtoend/vreplication/helper_test.go @@ -27,18 +27,18 @@ import ( "os/exec" "regexp" "sort" - "strconv" "strings" + "sync/atomic" "testing" "time" - "github.com/PuerkitoBio/goquery" "github.com/buger/jsonparser" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tidwall/gjson" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/endtoend/cluster" @@ -46,16 +46,14 @@ import ( "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) const ( defaultTick = 1 * time.Second - defaultTimeout = 30 * time.Second + defaultTimeout = 60 * time.Second workflowStateTimeout = 90 * time.Second - workflowStateCopying = "Copying" // nolint - workflowStateRunning = "Running" // nolint - workflowStateStopped = "Stopped" // nolint - workflowStateError = "Error" // nolint ) func execMultipleQueries(t *testing.T, conn *mysql.Conn, database string, lines string) { @@ -160,10 +158,13 @@ func waitForNoWorkflowLag(t *testing.T, vc *VitessCluster, keyspace, worfklow st timer := time.NewTimer(defaultTimeout) defer timer.Stop() for { - output, err := vc.VtctlClient.ExecuteCommandWithOutput("Workflow", "--", ksWorkflow, "show") - require.NoError(t, err) - lag, err = jsonparser.GetInt([]byte(output), "MaxVReplicationTransactionLag") + // We don't need log records for this so pass --include-logs=false. + output, err := vc.VtctldClient.ExecuteCommandWithOutput("workflow", "--keyspace", keyspace, "show", "--workflow", worfklow, "--include-logs=false") require.NoError(t, err) + // Confirm that we got no log records back. + require.NotEmpty(t, len(gjson.Get(output, "workflows.0.shard_streams.*.streams.0").String()), "workflow %q had no streams listed in the output: %s", ksWorkflow, output) + require.Equal(t, 0, len(gjson.Get(output, "workflows.0.shard_streams.*.streams.0.logs").Array()), "workflow %q returned log records when we expected none", ksWorkflow) + lag = gjson.Get(output, "workflows.0.max_v_replication_lag").Int() if lag == 0 { return } @@ -232,12 +233,57 @@ func waitForRowCountInTablet(t *testing.T, vttablet *cluster.VttabletProcess, da } } -func validateThatQueryExecutesOnTablet(t *testing.T, conn *mysql.Conn, tablet *cluster.VttabletProcess, ksName string, query string, matchQuery string) bool { - count := getQueryCount(tablet.QueryzURL, matchQuery) +// waitForSequenceValue queries the provided sequence name in the +// provided database using the provided vtgate connection until +// we get a next value from it. This allows us to move forward +// with queries that rely on the sequence working as expected. +// The read next value is also returned so that the caller can +// use it if they want. +// Note: you specify the number of values that you want to reserve +// and you get back the max value reserved. +func waitForSequenceValue(t *testing.T, conn *mysql.Conn, database, sequence string, numVals int) int64 { + query := fmt.Sprintf("select next %d values from %s.%s", numVals, database, sequence) + timer := time.NewTimer(defaultTimeout) + defer timer.Stop() + for { + qr, err := conn.ExecuteFetch(query, 1, false) + if err == nil && qr != nil && len(qr.Rows) == 1 { // We got a value back + val, err := qr.Rows[0][0].ToInt64() + require.NoError(t, err, "invalid sequence value: %v", qr.Rows[0][0]) + return val + } + select { + case <-timer.C: + require.FailNow(t, fmt.Sprintf("sequence %q did not provide a next value before the timeout of %s; last seen result: %+v, error: %v", + sequence, defaultTimeout, qr, err)) + default: + time.Sleep(defaultTick) + } + } +} + +func executeOnTablet(t *testing.T, conn *mysql.Conn, tablet *cluster.VttabletProcess, ksName string, query string, matchQuery string) (int, []byte, int, []byte) { + queryStatsURL := fmt.Sprintf("http://%s:%d/debug/query_stats", tablet.TabletHostname, tablet.Port) + + count0, body0 := getQueryCount(t, queryStatsURL, matchQuery) + qr := execVtgateQuery(t, conn, ksName, query) require.NotNil(t, qr) - newCount := getQueryCount(tablet.QueryzURL, matchQuery) - return newCount == count+1 + + count1, body1 := getQueryCount(t, queryStatsURL, matchQuery) + return count0, body0, count1, body1 +} + +func assertQueryExecutesOnTablet(t *testing.T, conn *mysql.Conn, tablet *cluster.VttabletProcess, ksName string, query string, matchQuery string) { + t.Helper() + count0, body0, count1, body1 := executeOnTablet(t, conn, tablet, ksName, query, matchQuery) + assert.Equalf(t, count0+1, count1, "query %q did not execute in target;\ntried to match %q\nbefore:\n%s\n\nafter:\n%s\n\n", query, matchQuery, body0, body1) +} + +func assertQueryDoesNotExecutesOnTablet(t *testing.T, conn *mysql.Conn, tablet *cluster.VttabletProcess, ksName string, query string, matchQuery string) { + t.Helper() + count0, body0, count1, body1 := executeOnTablet(t, conn, tablet, ksName, query, matchQuery) + assert.Equalf(t, count0, count1, "query %q executed in target;\ntried to match %q\nbefore:\n%s\n\nafter:\n%s\n\n", query, matchQuery, body0, body1) } // waitForWorkflowState waits for all of the given workflow's @@ -352,77 +398,36 @@ func confirmTablesHaveSecondaryKeys(t *testing.T, tablets []*cluster.VttabletPro } } -func getHTTPBody(url string) string { +func getHTTPBody(t *testing.T, url string) []byte { resp, err := http.Get(url) - if err != nil { - log.Infof("http Get returns %+v", err) - return "" - } - if resp.StatusCode != 200 { - log.Infof("http Get returns status %d", resp.StatusCode) - return "" - } - respByte, _ := io.ReadAll(resp.Body) + require.NoError(t, err) + require.Equal(t, 200, resp.StatusCode) + defer resp.Body.Close() - body := string(respByte) + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) return body } -func getQueryCount(url string, query string) int { - var headings, row []string - var rows [][]string - body := getHTTPBody(url) - doc, err := goquery.NewDocumentFromReader(strings.NewReader(body)) - if err != nil { - log.Infof("goquery parsing returns %+v\n", err) - return 0 +func getQueryCount(t *testing.T, url string, query string) (int, []byte) { + body := getHTTPBody(t, url) + + var queryStats []struct { + Query string + QueryCount uint64 } - var queryIndex, countIndex, count int - queryIndex = -1 - countIndex = -1 + err := json.Unmarshal(body, &queryStats) + require.NoError(t, err) - doc.Find("table").Each(func(index int, tablehtml *goquery.Selection) { - tablehtml.Find("tr").Each(func(indextr int, rowhtml *goquery.Selection) { - rowhtml.Find("th").Each(func(indexth int, tableheading *goquery.Selection) { - heading := tableheading.Text() - if heading == "Query" { - queryIndex = indexth - } - if heading == "Count" { - countIndex = indexth - } - headings = append(headings, heading) - }) - rowhtml.Find("td").Each(func(indexth int, tablecell *goquery.Selection) { - row = append(row, tablecell.Text()) - }) - rows = append(rows, row) - row = nil - }) - }) - if queryIndex == -1 || countIndex == -1 { - log.Infof("Queryz response is incorrect") - return 0 - } - for _, row := range rows { - if len(row) != len(headings) { - continue - } - filterChars := []string{"_", "`"} - //Queries seem to include non-printable characters at times and hence equality fails unless these are removed - re := regexp.MustCompile("[[:^ascii:]]") - foundQuery := re.ReplaceAllLiteralString(row[queryIndex], "") - cleanQuery := re.ReplaceAllLiteralString(query, "") - for _, filterChar := range filterChars { - foundQuery = strings.ReplaceAll(foundQuery, filterChar, "") - cleanQuery = strings.ReplaceAll(cleanQuery, filterChar, "") - } - if foundQuery == cleanQuery || strings.Contains(foundQuery, cleanQuery) { - count, _ = strconv.Atoi(row[countIndex]) + for _, q := range queryStats { + if strings.Contains(q.Query, query) { + return int(q.QueryCount), body } } - return count + + return 0, body } func validateDryRunResults(t *testing.T, output string, want []string) { @@ -478,7 +483,17 @@ func checkIfTableExists(t *testing.T, vc *VitessCluster, tabletAlias string, tab return found, nil } -func checkIfDenyListExists(t *testing.T, vc *VitessCluster, ksShard string, table string) (bool, error) { +func validateTableInDenyList(t *testing.T, vc *VitessCluster, ksShard string, table string, mustExist bool) { + found, err := isTableInDenyList(t, vc, ksShard, table) + require.NoError(t, err) + if mustExist { + require.True(t, found, "Table %s not found in deny list", table) + } else { + require.False(t, found, "Table %s found in deny list", table) + } +} + +func isTableInDenyList(t *testing.T, vc *VitessCluster, ksShard string, table string) (bool, error) { var output string var err error found := false @@ -531,23 +546,12 @@ func getDebugVar(t *testing.T, port int, varPath []string) (string, error) { var err error url := fmt.Sprintf("http://localhost:%d/debug/vars", port) log.Infof("url: %s, varPath: %s", url, strings.Join(varPath, ":")) - body := getHTTPBody(url) - val, _, _, err = jsonparser.Get([]byte(body), varPath...) + body := getHTTPBody(t, url) + val, _, _, err = jsonparser.Get(body, varPath...) require.NoError(t, err) return string(val), nil } -func getDebugVars(t *testing.T, port int) map[string]any { - out := map[string]any{} - response, err := http.Get(fmt.Sprintf("http://localhost:%d/debug/vars", port)) - if err != nil { - return out - } - defer response.Body.Close() - _ = json.NewDecoder(response.Body).Decode(&out) - return out -} - func confirmWorkflowHasCopiedNoData(t *testing.T, targetKS, workflow string) { timer := time.NewTimer(defaultTimeout) defer timer.Stop() @@ -563,7 +567,7 @@ func confirmWorkflowHasCopiedNoData(t *testing.T, targetKS, workflow string) { state := attributeValue.Get("State").String() pos := attributeValue.Get("Pos").String() // If we've actually copied anything then we'll have a position in the stream - if (state == workflowStateRunning || state == workflowStateCopying) && pos != "" { + if (state == binlogdatapb.VReplicationWorkflowState_Running.String() || state == binlogdatapb.VReplicationWorkflowState_Copying.String()) && pos != "" { require.FailNowf(t, "Unexpected data copied in workflow", "The MoveTables workflow %q copied data in less than %s when it should have been waiting. Show output: %s", ksWorkflow, defaultTimeout, output) @@ -698,3 +702,121 @@ func isBinlogRowImageNoBlob(t *testing.T, tablet *cluster.VttabletProcess) bool mode := strings.ToLower(rs.Rows[0][0].ToString()) return mode == "noblob" } + +const ( + loadTestBufferingWindowDurationStr = "30s" + loadTestPostBufferingInsertWindow = 60 * time.Second // should be greater than loadTestBufferingWindowDurationStr + loadTestWaitForCancel = 30 * time.Second + loadTestWaitBetweenQueries = 2 * time.Millisecond +) + +type loadGenerator struct { + t *testing.T + vc *VitessCluster + ctx context.Context + cancel context.CancelFunc +} + +func newLoadGenerator(t *testing.T, vc *VitessCluster) *loadGenerator { + return &loadGenerator{ + t: t, + vc: vc, + } +} + +func (lg *loadGenerator) stop() { + time.Sleep(loadTestPostBufferingInsertWindow) // wait for buffering to stop and additional records to be inserted by startLoad after traffic is switched + log.Infof("Canceling load") + lg.cancel() + time.Sleep(loadTestWaitForCancel) // wait for cancel to take effect + log.Flush() + +} + +func (lg *loadGenerator) start() { + t := lg.t + lg.ctx, lg.cancel = context.WithCancel(context.Background()) + + var id int64 + log.Infof("startLoad: starting") + queryTemplate := "insert into loadtest(id, name) values (%d, 'name-%d')" + var totalQueries, successfulQueries int64 + var deniedErrors, ambiguousErrors, reshardedErrors, tableNotFoundErrors, otherErrors int64 + defer func() { + + log.Infof("startLoad: totalQueries: %d, successfulQueries: %d, deniedErrors: %d, ambiguousErrors: %d, reshardedErrors: %d, tableNotFoundErrors: %d, otherErrors: %d", + totalQueries, successfulQueries, deniedErrors, ambiguousErrors, reshardedErrors, tableNotFoundErrors, otherErrors) + }() + logOnce := true + for { + select { + case <-lg.ctx.Done(): + log.Infof("startLoad: context cancelled") + log.Infof("startLoad: deniedErrors: %d, ambiguousErrors: %d, reshardedErrors: %d, tableNotFoundErrors: %d, otherErrors: %d", + deniedErrors, ambiguousErrors, reshardedErrors, tableNotFoundErrors, otherErrors) + require.Equal(t, int64(0), deniedErrors) + require.Equal(t, int64(0), otherErrors) + require.Equal(t, totalQueries, successfulQueries) + return + default: + go func() { + conn := vc.GetVTGateConn(t) + defer conn.Close() + atomic.AddInt64(&id, 1) + query := fmt.Sprintf(queryTemplate, id, id) + _, err := conn.ExecuteFetch(query, 1, false) + atomic.AddInt64(&totalQueries, 1) + if err != nil { + sqlErr := err.(*sqlerror.SQLError) + if strings.Contains(strings.ToLower(err.Error()), "denied tables") { + log.Infof("startLoad: denied tables error executing query: %d:%v", sqlErr.Number(), err) + atomic.AddInt64(&deniedErrors, 1) + } else if strings.Contains(strings.ToLower(err.Error()), "ambiguous") { + // this can happen when a second keyspace is setup with the same tables, but there are no routing rules + // set yet by MoveTables. So we ignore these errors. + atomic.AddInt64(&ambiguousErrors, 1) + } else if strings.Contains(strings.ToLower(err.Error()), "current keyspace is being resharded") { + atomic.AddInt64(&reshardedErrors, 1) + } else if strings.Contains(strings.ToLower(err.Error()), "not found") { + atomic.AddInt64(&tableNotFoundErrors, 1) + } else { + if logOnce { + log.Infof("startLoad: error executing query: %d:%v", sqlErr.Number(), err) + logOnce = false + } + atomic.AddInt64(&otherErrors, 1) + } + time.Sleep(loadTestWaitBetweenQueries) + } else { + atomic.AddInt64(&successfulQueries, 1) + } + }() + time.Sleep(loadTestWaitBetweenQueries) + } + } +} + +func (lg *loadGenerator) waitForCount(want int64) { + t := lg.t + conn := vc.GetVTGateConn(t) + defer conn.Close() + timer := time.NewTimer(defaultTimeout) + defer timer.Stop() + for { + qr, err := conn.ExecuteFetch("select count(*) from loadtest", 1, false) + require.NoError(t, err) + require.NotNil(t, qr) + got, _ := qr.Rows[0][0].ToInt64() + + if int64(got) >= want { + return + } + select { + case <-timer.C: + require.FailNow(t, fmt.Sprintf("table %q did not reach the expected number of rows (%d) before the timeout of %s; last seen count: %v", + "loadtest", want, defaultTimeout, got)) + default: + time.Sleep(defaultTick) + } + } +} diff --git a/go/test/endtoend/vreplication/initial_data_test.go b/go/test/endtoend/vreplication/initial_data_test.go index 828c7136373..bf93a040942 100644 --- a/go/test/endtoend/vreplication/initial_data_test.go +++ b/go/test/endtoend/vreplication/initial_data_test.go @@ -48,12 +48,16 @@ const NumJSONRows = 100 func insertJSONValues(t *testing.T) { // insert null value combinations - execVtgateQuery(t, vtgateConn, "product:0", "insert into json_tbl(id) values(1)") - execVtgateQuery(t, vtgateConn, "product:0", "insert into json_tbl(id, j1) values(2, \"{}\")") - execVtgateQuery(t, vtgateConn, "product:0", "insert into json_tbl(id, j2) values(3, \"{}\")") + execVtgateQuery(t, vtgateConn, "product:0", "insert into json_tbl(id, j3) values(1, \"{}\")") + execVtgateQuery(t, vtgateConn, "product:0", "insert into json_tbl(id, j1, j3) values(2, \"{}\", \"{}\")") + execVtgateQuery(t, vtgateConn, "product:0", "insert into json_tbl(id, j2, j3) values(3, \"{}\", \"{}\")") + execVtgateQuery(t, vtgateConn, "product:0", "insert into json_tbl(id, j1, j2, j3) values(4, NULL, 'null', '\"null\"')") + execVtgateQuery(t, vtgateConn, "product:0", "insert into json_tbl(id, j3) values(5, JSON_QUOTE('null'))") + execVtgateQuery(t, vtgateConn, "product:0", "insert into json_tbl(id, j3) values(6, '{}')") - id := 4 - q := "insert into json_tbl(id, j1, j2) values(%d, '%s', '%s')" + id := 8 // 6 inserted above and one after copy phase is done + + q := "insert into json_tbl(id, j1, j2, j3) values(%d, '%s', '%s', '{}')" numJsonValues := len(jsonValues) for id <= NumJSONRows { id++ @@ -67,14 +71,25 @@ func insertJSONValues(t *testing.T) { // insertMoreCustomers creates additional customers. // Note: this will only work when the customer sequence is in place. func insertMoreCustomers(t *testing.T, numCustomers int) { - sql := "insert into customer (name) values " - i := 0 - for i < numCustomers { - i++ - sql += fmt.Sprintf("('customer%d')", i) + // Let's first be sure that the sequence is working. + // We reserve all of the sequence values we need for + // the number of customer records we are going to + // create. The value we get back is the max value + // that we reserved. + maxID := waitForSequenceValue(t, vtgateConn, "product", "customer_seq", numCustomers) + // So we need to calculate the first value we reserved + // from the max. + cid := maxID - int64(numCustomers) + + // Now let's insert the records using the sequence + // values we reserved. + sql := "insert into customer (cid, name) values " + for i := 1; i <= numCustomers; i++ { + sql += fmt.Sprintf("(%d, 'customer%d')", cid, i) if i != numCustomers { sql += "," } + cid++ } execVtgateQuery(t, vtgateConn, "customer", sql) } diff --git a/go/test/endtoend/vreplication/materialize_test.go b/go/test/endtoend/vreplication/materialize_test.go index a13ec1d0da6..63205a56c0a 100644 --- a/go/test/endtoend/vreplication/materialize_test.go +++ b/go/test/endtoend/vreplication/materialize_test.go @@ -62,7 +62,7 @@ const smMaterializeSpec = `{"workflow": "wf1", "source_keyspace": "ks1", "target const initDataQuery = `insert into ks1.tx(id, typ, val) values (1, 1, 'abc'), (2, 1, 'def'), (3, 2, 'def'), (4, 2, 'abc'), (5, 3, 'def'), (6, 3, 'abc')` // testShardedMaterialize tests a materialize workflow for a sharded cluster (single shard) using comparison filters -func testShardedMaterialize(t *testing.T) { +func testShardedMaterialize(t *testing.T, useVtctldClient bool) { defaultCellName := "zone1" allCells := []string{"zone1"} allCellNames = "zone1" @@ -92,7 +92,7 @@ func testShardedMaterialize(t *testing.T) { verifyClusterHealth(t, vc) _, err = vtgateConn.ExecuteFetch(initDataQuery, 0, false) require.NoError(t, err) - materialize(t, smMaterializeSpec) + materialize(t, smMaterializeSpec, useVtctldClient) tab := vc.getPrimaryTablet(t, ks2, "0") catchup(t, tab, "wf1", "Materialize") @@ -181,7 +181,7 @@ DETERMINISTIC RETURN id * length(val); ` -func testMaterialize(t *testing.T) { +func testMaterialize(t *testing.T, useVtctldClient bool) { defaultCellName := "zone1" allCells := []string{"zone1"} allCellNames = "zone1" @@ -217,7 +217,7 @@ func testMaterialize(t *testing.T) { _, err = ks2Primary.QueryTablet(customFunc, targetKs, true) require.NoError(t, err) - materialize(t, smMaterializeSpec2) + materialize(t, smMaterializeSpec2, useVtctldClient) catchup(t, ks2Primary, "wf1", "Materialize") // validate data after the copy phase @@ -234,12 +234,23 @@ func testMaterialize(t *testing.T) { waitForQueryResult(t, vtgateConn, targetKs, "select id, val, ts, day, month, x from mat2", want) } -// TestMaterialize runs all the individual materialize tests defined above +// TestMaterialize runs all the individual materialize tests defined above. func TestMaterialize(t *testing.T) { t.Run("Materialize", func(t *testing.T) { - testMaterialize(t) + testMaterialize(t, false) }) t.Run("ShardedMaterialize", func(t *testing.T) { - testShardedMaterialize(t) + testShardedMaterialize(t, false) + }) +} + +// TestMaterializeVtctldClient runs all the individual materialize tests +// defined above using vtctldclient instead of vtctlclient. +func TestMaterializeVtctldClient(t *testing.T) { + t.Run("Materialize", func(t *testing.T) { + testMaterialize(t, true) + }) + t.Run("ShardedMaterialize", func(t *testing.T) { + testShardedMaterialize(t, true) }) } diff --git a/go/test/endtoend/vreplication/migrate_test.go b/go/test/endtoend/vreplication/migrate_test.go index 0c83658cee8..75ab6a3151b 100644 --- a/go/test/endtoend/vreplication/migrate_test.go +++ b/go/test/endtoend/vreplication/migrate_test.go @@ -20,10 +20,14 @@ import ( "fmt" "testing" + "github.com/tidwall/gjson" + "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) func insertInitialDataIntoExternalCluster(t *testing.T, conn *mysql.Conn) { @@ -37,29 +41,30 @@ func insertInitialDataIntoExternalCluster(t *testing.T, conn *mysql.Conn) { }) } -// TestMigrate runs an e2e test for importing from an external cluster using the Mount and Migrate commands. +// TestVtctlMigrate runs an e2e test for importing from an external cluster using the vtctl Mount and Migrate commands. // We have an anti-pattern in Vitess: vt executables look for an environment variable VTDATAROOT for certain cluster parameters // like the log directory when they are created. Until this test we just needed a single cluster for e2e tests. // However now we need to create an external Vitess cluster. For this we need a different VTDATAROOT and // hence the VTDATAROOT env variable gets overwritten. // Each time we need to create vt processes in the "other" cluster we need to set the appropriate VTDATAROOT -func TestMigrate(t *testing.T) { +func TestVtctlMigrate(t *testing.T) { defaultCellName := "zone1" cells := []string{"zone1"} allCellNames = "zone1" vc = NewVitessCluster(t, "TestMigrate", cells, mainClusterConfig) - require.NotNil(t, vc) + require.NotNil(t, vc, "failed to create VitessCluster") defaultReplicas = 0 defaultRdonly = 0 defer vc.TearDown(t) defaultCell = vc.Cells[defaultCellName] - vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) - err := cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0") - require.NoError(t, err) + _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) + require.NoError(t, err, "failed to create product keyspace") + err = cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0") + require.NoError(t, err, "product shard did not become healthy") vtgate = defaultCell.Vtgates[0] - require.NotNil(t, vtgate) + require.NotNil(t, vtgate, "failed to get vtgate") vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() @@ -109,7 +114,7 @@ func TestMigrate(t *testing.T) { "--source=ext1.rating", "create", ksWorkflow); err != nil { t.Fatalf("Migrate command failed with %+v : %s\n", err, output) } - waitForWorkflowState(t, vc, ksWorkflow, workflowStateRunning) + waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) expectNumberOfStreams(t, vtgateConn, "migrate", "e1", "product:0", 1) waitForRowCount(t, vtgateConn, "product:0", "rating", 2) waitForRowCount(t, vtgateConn, "product:0", "review", 3) @@ -117,7 +122,7 @@ func TestMigrate(t *testing.T) { execVtgateQuery(t, extVtgateConn, "rating", "insert into rating(gid, pid, rating) values(3, 1, 3);") waitForRowCount(t, vtgateConn, "product:0", "rating", 3) waitForRowCount(t, vtgateConn, "product:0", "review", 4) - vdiff1(t, ksWorkflow, "extcell1") + vdiffSideBySide(t, ksWorkflow, "extcell1") if output, err = vc.VtctlClient.ExecuteCommandWithOutput("Migrate", "complete", ksWorkflow); err != nil { t.Fatalf("Migrate command failed with %+v : %s\n", err, output) @@ -162,3 +167,150 @@ func TestMigrate(t *testing.T) { require.Errorf(t, err, "there is no vitess cluster named ext1") }) } + +// TestVtctldMigrate runs an e2e test for importing from an external cluster using the vtctld Mount and Migrate commands. +// We have an anti-pattern in Vitess: vt executables look for an environment variable VTDATAROOT for certain cluster parameters +// like the log directory when they are created. Until this test we just needed a single cluster for e2e tests. +// However now we need to create an external Vitess cluster. For this we need a different VTDATAROOT and +// hence the VTDATAROOT env variable gets overwritten. +// Each time we need to create vt processes in the "other" cluster we need to set the appropriate VTDATAROOT +func TestVtctldMigrate(t *testing.T) { + defaultCellName := "zone1" + cells := []string{"zone1"} + allCellNames = "zone1" + vc = NewVitessCluster(t, "TestMigrateVtctld", cells, mainClusterConfig) + + require.NotNil(t, vc, "failed to create VitessCluster") + defaultReplicas = 0 + defaultRdonly = 0 + defer vc.TearDown(t) + + defaultCell = vc.Cells[defaultCellName] + _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", + initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) + require.NoError(t, err, "failed to create product keyspace") + err = cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0") + require.NoError(t, err, "product shard did not become healthy") + vtgate = defaultCell.Vtgates[0] + require.NotNil(t, vtgate, "failed to get vtgate") + + vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() + verifyClusterHealth(t, vc) + insertInitialData(t) + + // create external cluster + extCell := "extcell1" + extCells := []string{extCell} + extVc := NewVitessCluster(t, t.Name(), extCells, externalClusterConfig) + require.NotNil(t, extVc) + defer extVc.TearDown(t) + + extCell2 := extVc.Cells[extCell] + extVc.AddKeyspace(t, []*Cell{extCell2}, "rating", "0", + initialExternalVSchema, initialExternalSchema, 0, 0, 1000, nil) + extVtgate := extCell2.Vtgates[0] + require.NotNil(t, extVtgate) + + err = cluster.WaitForHealthyShard(extVc.VtctldClient, "rating", "0") + require.NoError(t, err) + verifyClusterHealth(t, extVc) + extVtgateConn := getConnection(t, extVc.ClusterConfig.hostname, extVc.ClusterConfig.vtgateMySQLPort) + insertInitialDataIntoExternalCluster(t, extVtgateConn) + + var output, expected string + + t.Run("mount external cluster", func(t *testing.T) { + output, err := vc.VtctldClient.ExecuteCommandWithOutput("Mount", "register", "--name=ext1", "--topo-type=etcd2", + fmt.Sprintf("--topo-server=localhost:%d", extVc.ClusterConfig.topoPort), "--topo-root=/vitess/global") + require.NoError(t, err, "Mount Register command failed with %s", output) + + output, err = vc.VtctldClient.ExecuteCommandWithOutput("Mount", "list") + require.NoError(t, err, "Mount List command failed with %s", output) + + names := gjson.Get(output, "names") + require.Equal(t, 1, len(names.Array())) + require.Equal(t, "ext1", names.Array()[0].String()) + output, err = vc.VtctldClient.ExecuteCommandWithOutput("Mount", "show", "--name=ext1") + require.NoError(t, err, "Mount command failed with %s\n", output) + + require.Equal(t, "etcd2", gjson.Get(output, "topo_type").String()) + require.Equal(t, "localhost:12379", gjson.Get(output, "topo_server").String()) + require.Equal(t, "/vitess/global", gjson.Get(output, "topo_root").String()) + }) + + ksWorkflow := "product.e1" + + t.Run("migrate from external cluster", func(t *testing.T) { + if output, err = vc.VtctldClient.ExecuteCommandWithOutput("Migrate", + "--target-keyspace", "product", "--workflow", "e1", + "create", "--source-keyspace", "rating", "--mount-name", "ext1", "--all-tables", "--cells=extcell1", "--tablet-types=primary,replica"); err != nil { + t.Fatalf("Migrate command failed with %+v : %s\n", err, output) + } + waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) + expectNumberOfStreams(t, vtgateConn, "migrate", "e1", "product:0", 1) + waitForRowCount(t, vtgateConn, "product:0", "rating", 2) + waitForRowCount(t, vtgateConn, "product:0", "review", 3) + execVtgateQuery(t, extVtgateConn, "rating", "insert into review(rid, pid, review) values(4, 1, 'review4');") + execVtgateQuery(t, extVtgateConn, "rating", "insert into rating(gid, pid, rating) values(3, 1, 3);") + waitForRowCount(t, vtgateConn, "product:0", "rating", 3) + waitForRowCount(t, vtgateConn, "product:0", "review", 4) + vdiffSideBySide(t, ksWorkflow, "extcell1") + + output, err = vc.VtctldClient.ExecuteCommandWithOutput("Migrate", + "--target-keyspace", "product", "--workflow", "e1", "show") + require.NoError(t, err, "Migrate command failed with %s", output) + + wf := gjson.Get(output, "workflows").Array()[0] + require.Equal(t, "e1", wf.Get("name").String()) + require.Equal(t, "Migrate", wf.Get("workflow_type").String()) + + output, err = vc.VtctldClient.ExecuteCommandWithOutput("Migrate", + "--target-keyspace", "product", "--workflow", "e1", "status", "--format=json") + require.NoError(t, err, "Migrate command failed with %s", output) + + require.Equal(t, "Running", gjson.Get(output, "shard_streams.product/0.streams.0.status").String()) + + output, err = vc.VtctldClient.ExecuteCommandWithOutput("Migrate", + "--target-keyspace", "product", "--workflow", "e1", "complete") + require.NoError(t, err, "Migrate command failed with %s", output) + + expectNumberOfStreams(t, vtgateConn, "migrate", "e1", "product:0", 0) + }) + t.Run("cancel migrate workflow", func(t *testing.T) { + execVtgateQuery(t, vtgateConn, "product", "drop table review,rating") + output, err = vc.VtctldClient.ExecuteCommandWithOutput("Migrate", + "--target-keyspace", "product", "--workflow", "e1", "Create", "--source-keyspace", "rating", + "--mount-name", "ext1", "--all-tables", "--auto-start=false", "--cells=extcell1") + require.NoError(t, err, "Migrate command failed with %s", output) + + expectNumberOfStreams(t, vtgateConn, "migrate", "e1", "product:0", 1) + waitForRowCount(t, vtgateConn, "product:0", "rating", 0) + waitForRowCount(t, vtgateConn, "product:0", "review", 0) + output, err = vc.VtctldClient.ExecuteCommandWithOutput("Migrate", + "--target-keyspace", "product", "--workflow", "e1", "cancel") + require.NoError(t, err, "Migrate command failed with %s", output) + + expectNumberOfStreams(t, vtgateConn, "migrate", "e1", "product:0", 0) + var found bool + found, err = checkIfTableExists(t, vc, "zone1-100", "review") + require.NoError(t, err) + require.False(t, found) + found, err = checkIfTableExists(t, vc, "zone1-100", "rating") + require.NoError(t, err) + require.False(t, found) + }) + + t.Run("unmount external cluster", func(t *testing.T) { + output, err = vc.VtctldClient.ExecuteCommandWithOutput("Mount", "unregister", "--name=ext1") + require.NoError(t, err, "Mount command failed with %s\n", output) + + output, err = vc.VtctldClient.ExecuteCommandWithOutput("Mount", "list") + require.NoError(t, err, "Mount command failed with %+v : %s\n", output) + expected = "{}\n" + require.Equal(t, expected, output) + + output, err = vc.VtctldClient.ExecuteCommandWithOutput("Mount", "show", "--name=ext1") + require.Errorf(t, err, "there is no vitess cluster named ext1") + }) +} diff --git a/go/test/endtoend/vreplication/movetables_buffering_test.go b/go/test/endtoend/vreplication/movetables_buffering_test.go new file mode 100644 index 00000000000..4e4b7cada97 --- /dev/null +++ b/go/test/endtoend/vreplication/movetables_buffering_test.go @@ -0,0 +1,45 @@ +package vreplication + +import ( + "testing" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/wrangler" +) + +func TestMoveTablesBuffering(t *testing.T) { + defaultRdonly = 1 + vc = setupMinimalCluster(t) + defer vtgateConn.Close() + defer vc.TearDown(t) + + currentWorkflowType = wrangler.MoveTablesWorkflow + setupMinimalCustomerKeyspace(t) + tables := "loadtest" + err := tstWorkflowExec(t, defaultCellName, workflowName, sourceKs, targetKs, + tables, workflowActionCreate, "", "", "", false) + require.NoError(t, err) + waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) + + lg := newLoadGenerator(t, vc) + go func() { + lg.start() + }() + lg.waitForCount(1000) + + catchup(t, targetTab1, workflowName, "MoveTables") + catchup(t, targetTab2, workflowName, "MoveTables") + vdiffSideBySide(t, ksWorkflow, "") + waitForLowLag(t, "customer", workflowName) + tstWorkflowSwitchReads(t, "", "") + tstWorkflowSwitchWrites(t) + log.Infof("SwitchWrites done") + lg.stop() + + log.Infof("TestMoveTablesBuffering: done") + log.Flush() +} diff --git a/go/test/endtoend/vreplication/partial_movetables_seq_test.go b/go/test/endtoend/vreplication/partial_movetables_seq_test.go new file mode 100644 index 00000000000..6a1ed92cb9c --- /dev/null +++ b/go/test/endtoend/vreplication/partial_movetables_seq_test.go @@ -0,0 +1,583 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "fmt" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/tidwall/gjson" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/wrangler" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" +) + +/* + This file introduces a new helper framework for vreplication tests. The current one uses a lot of globals + and make assumptions which make adding new types of tests difficult. + + As part of a separate cleanup we will build on this framework to replace the existing one. +*/ + +type keyspace struct { + name string + vschema string + schema string + baseID int64 + shards []string +} + +type workflowOptions struct { + tables []string + sourceShards []string + targetShards []string +} + +type workflow struct { + name string + fromKeyspace string + toKeyspace string + typ string + tc *vrepTestCase + options *workflowOptions +} + +type vrepTestCase struct { + testName string + t *testing.T + defaultCellName string + vtgateConn *mysql.Conn + keyspaces map[string]*keyspace + workflows map[string]*workflow + + vc *VitessCluster + vtgate *cluster.VtgateProcess +} + +func initPartialMoveTablesComplexTestCase(t *testing.T, name string) *vrepTestCase { + const ( + seqVSchema = `{ + "sharded": false, + "tables": { + "customer_seq": { + "type": "sequence" + } + } + }` + seqSchema = `create table customer_seq(id int, next_id bigint, cache bigint, primary key(id)) comment 'vitess_sequence';` + commerceSchema = `create table customer(cid int, name varchar(128), ts timestamp(3) not null default current_timestamp(3), primary key(cid));` + commerceVSchema = ` + { + "tables": { + "customer": {} + } + } +` + customerSchema = "" + customerVSchema = ` + { + "sharded": true, + "vindexes": { + "reverse_bits": { + "type": "reverse_bits" + } + }, + "tables": { + "customer": { + "column_vindexes": [ + { + "column": "cid", + "name": "reverse_bits" + } + ], + "auto_increment": { + "column": "cid", + "sequence": "customer_seq" + } + } + } + } + ` + ) + tc := &vrepTestCase{ + t: t, + testName: name, + keyspaces: make(map[string]*keyspace), + defaultCellName: "zone1", + workflows: make(map[string]*workflow), + } + tc.keyspaces["commerce"] = &keyspace{ + name: "commerce", + vschema: commerceVSchema, + schema: commerceSchema, + baseID: 100, + shards: []string{"0"}, + } + tc.keyspaces["customer"] = &keyspace{ + name: "customer", + vschema: customerVSchema, + schema: customerSchema, + baseID: 200, + shards: []string{"-80", "80-"}, + } + tc.keyspaces["customer2"] = &keyspace{ + name: "customer2", + vschema: customerVSchema, + schema: "", + baseID: 1200, + shards: []string{"-80", "80-"}, + } + tc.keyspaces["seqSrc"] = &keyspace{ + name: "seqSrc", + vschema: seqVSchema, + schema: seqSchema, + baseID: 400, + shards: []string{"0"}, + } + tc.keyspaces["seqTgt"] = &keyspace{ + name: "seqTgt", + vschema: "", + schema: "", + baseID: 500, + shards: []string{"0"}, + } + tc.setupCluster() + tc.initData() + return tc +} + +func (tc *vrepTestCase) teardown() { + tc.vtgateConn.Close() + vc.TearDown(tc.t) +} + +func (tc *vrepTestCase) setupCluster() { + cells := []string{"zone1"} + + tc.vc = NewVitessCluster(tc.t, tc.testName, cells, mainClusterConfig) + vc = tc.vc // for backward compatibility since vc is used globally in this package + require.NotNil(tc.t, tc.vc) + tc.setupKeyspaces([]string{"commerce", "seqSrc"}) + tc.vtgateConn = getConnection(tc.t, tc.vc.ClusterConfig.hostname, tc.vc.ClusterConfig.vtgateMySQLPort) + vtgateConn = tc.vtgateConn // for backward compatibility since vtgateConn is used globally in this package +} + +func (tc *vrepTestCase) initData() { + _, err := tc.vtgateConn.ExecuteFetch("insert into customer_seq(id, next_id, cache) values(0, 1000, 1000)", 1000, false) + require.NoError(tc.t, err) + _, err = tc.vtgateConn.ExecuteFetch("insert into customer(cid, name) values(1, 'customer1'), (2, 'customer2'),(3, 'customer3')", 1000, false) + require.NoError(tc.t, err) +} + +func (tc *vrepTestCase) setupKeyspaces(keyspaces []string) { + for _, keyspace := range keyspaces { + ks, ok := tc.keyspaces[keyspace] + require.Equal(tc.t, true, ok, "keyspace %s not found", keyspace) + tc.setupKeyspace(ks) + } +} + +func (tc *vrepTestCase) setupKeyspace(ks *keyspace) { + t := tc.t + if _, err := tc.vc.AddKeyspace(t, []*Cell{tc.vc.Cells["zone1"]}, ks.name, strings.Join(ks.shards, ","), + ks.vschema, ks.schema, 0, 0, int(ks.baseID), nil); err != nil { + t.Fatal(err) + } + if tc.vtgate == nil { + defaultCellName := "zone1" + defaultCell := tc.vc.Cells[defaultCellName] + require.NotNil(tc.t, defaultCell) + tc.vtgate = defaultCell.Vtgates[0] + + } + for _, shard := range ks.shards { + require.NoError(t, cluster.WaitForHealthyShard(tc.vc.VtctldClient, ks.name, shard)) + require.NoError(t, tc.vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", ks.name, shard), 1, 30*time.Second)) + } +} + +func (tc *vrepTestCase) newWorkflow(typ, workflowName, fromKeyspace, toKeyspace string, options *workflowOptions) *workflow { + wf := &workflow{ + name: workflowName, + fromKeyspace: fromKeyspace, + toKeyspace: toKeyspace, + typ: typ, + tc: tc, + options: options, + } + return wf +} + +func (wf *workflow) create() { + var err error + t := wf.tc.t + typ := strings.ToLower(wf.typ) + cell := wf.tc.defaultCellName + switch typ { + case "movetables": + currentWorkflowType = wrangler.MoveTablesWorkflow + sourceShards := strings.Join(wf.options.sourceShards, ",") + err = tstWorkflowExec(t, cell, wf.name, wf.fromKeyspace, wf.toKeyspace, + strings.Join(wf.options.tables, ","), workflowActionCreate, "", sourceShards, "", false) + case "reshard": + currentWorkflowType = wrangler.ReshardWorkflow + sourceShards := strings.Join(wf.options.sourceShards, ",") + targetShards := strings.Join(wf.options.targetShards, ",") + if targetShards == "" { + targetShards = sourceShards + } + err = tstWorkflowExec(t, cell, wf.name, wf.fromKeyspace, wf.toKeyspace, + strings.Join(wf.options.tables, ","), workflowActionCreate, "", sourceShards, targetShards, false) + default: + panic(fmt.Sprintf("unknown workflow type: %s", wf.typ)) + } + require.NoError(t, err) + waitForWorkflowState(t, wf.tc.vc, fmt.Sprintf("%s.%s", wf.toKeyspace, wf.name), binlogdatapb.VReplicationWorkflowState_Running.String()) + ks2 := wf.tc.vc.Cells[cell].Keyspaces[wf.toKeyspace] + var i int64 + for _, shardName := range wf.tc.keyspaces[wf.toKeyspace].shards { + tab := ks2.Shards[shardName].Tablets[fmt.Sprintf("%s-%d", cell, wf.tc.keyspaces[wf.toKeyspace].baseID+i)].Vttablet + catchup(t, tab, wf.name, wf.typ) + i += 100 + } + doVtctldclientVDiff(t, wf.toKeyspace, wf.name, cell, nil) + +} + +func (wf *workflow) switchTraffic() { + require.NoError(wf.tc.t, tstWorkflowExec(wf.tc.t, wf.tc.defaultCellName, wf.name, wf.fromKeyspace, wf.toKeyspace, "", workflowActionSwitchTraffic, "", "", "", false)) +} + +func (wf *workflow) reverseTraffic() { + require.NoError(wf.tc.t, tstWorkflowExec(wf.tc.t, wf.tc.defaultCellName, wf.name, wf.fromKeyspace, wf.toKeyspace, "", workflowActionReverseTraffic, "", "", "", false)) +} + +func (wf *workflow) complete() { + require.NoError(wf.tc.t, tstWorkflowExec(wf.tc.t, wf.tc.defaultCellName, wf.name, wf.fromKeyspace, wf.toKeyspace, "", workflowActionComplete, "", "", "", false)) +} + +// TestPartialMoveTablesWithSequences enhances TestPartialMoveTables by adding an unsharded keyspace which has a +// sequence. This tests that the sequence is migrated correctly and that we can reverse traffic back to the source +func TestPartialMoveTablesWithSequences(t *testing.T) { + + origExtraVTGateArgs := extraVTGateArgs + + extraVTGateArgs = append(extraVTGateArgs, []string{ + "--enable-partial-keyspace-migration", + "--schema_change_signal=false", + }...) + defer func() { + extraVTGateArgs = origExtraVTGateArgs + }() + + tc := initPartialMoveTablesComplexTestCase(t, "TestPartialMoveTablesComplex") + defer tc.teardown() + var err error + + t.Run("Move customer table from unsharded product keyspace to sharded customer keyspace.", func(t *testing.T) { + tc.setupKeyspaces([]string{"customer"}) + wf := tc.newWorkflow("MoveTables", "customer", "commerce", "customer", &workflowOptions{ + tables: []string{"customer"}, + }) + wf.create() + wf.switchTraffic() + wf.complete() + }) + + var wfSeq *workflow + t.Run("Start MoveTables for Sequence", func(t *testing.T) { + tc.setupKeyspace(tc.keyspaces["seqTgt"]) + wfSeq = tc.newWorkflow("MoveTables", "seq", "seqSrc", "seqTgt", &workflowOptions{ + tables: []string{"customer_seq"}, + }) + wfSeq.create() + }) + + var emptyGlobalRoutingRules, emptyShardRoutingRules, preCutoverShardRoutingRules, halfCutoverShardRoutingRules, postCutoverShardRoutingRules string + t.Run("Define and setup RoutingRules", func(t *testing.T) { + emptyGlobalRoutingRules = "{}\n" + + // These should be listed in shard order + emptyShardRoutingRules = `{"rules":[]}` + preCutoverShardRoutingRules = `{"rules":[{"from_keyspace":"customer2","to_keyspace":"customer","shard":"-80"},{"from_keyspace":"customer2","to_keyspace":"customer","shard":"80-"}]}` + halfCutoverShardRoutingRules = `{"rules":[{"from_keyspace":"customer2","to_keyspace":"customer","shard":"-80"},{"from_keyspace":"customer","to_keyspace":"customer2","shard":"80-"}]}` + postCutoverShardRoutingRules = `{"rules":[{"from_keyspace":"customer","to_keyspace":"customer2","shard":"-80"},{"from_keyspace":"customer","to_keyspace":"customer2","shard":"80-"}]}` + + // Remove any manually applied shard routing rules as these + // should be set by SwitchTraffic. + applyShardRoutingRules(t, emptyShardRoutingRules) + require.Equal(t, emptyShardRoutingRules, getShardRoutingRules(t)) + }) + + wfName := "partial80Dash" + sourceKs := "customer" + targetKs := "customer2" + shard := "80-" + var wf80Dash, wfDash80 *workflow + currentCustomerCount = getCustomerCount(t, "before customer2.80-") + t.Run("Start MoveTables on customer2.80-", func(t *testing.T) { + // Now setup the customer2 keyspace so we can do a partial move tables for one of the two shards: 80-. + defaultRdonly = 0 + tc.setupKeyspaces([]string{"customer2"}) + wf80Dash = tc.newWorkflow("MoveTables", wfName, "customer", "customer2", &workflowOptions{ + sourceShards: []string{"80-"}, + tables: []string{"customer"}, + }) + wf80Dash.create() + + currentCustomerCount = getCustomerCount(t, "after customer2.80-") + waitForRowCount(t, vtgateConn, "customer2:80-", "customer", 2) // customer2: 80- + waitForRowCount(t, vtgateConn, "customer", "customer", 3) // customer: all shards + waitForRowCount(t, vtgateConn, "customer2", "customer", 3) // customer2: all shards + }) + + currentCustomerCount = getCustomerCount(t, "after customer2.80-/2") + log.Flush() + + // This query uses an ID that should always get routed to shard 80- + shard80MinusRoutedQuery := "select name from customer where cid = 1 and noexistcol = 'foo'" + // This query uses an ID that should always get routed to shard -80 + shardMinus80RoutedQuery := "select name from customer where cid = 2 and noexistcol = 'foo'" + + // Reset any existing vtgate connection state. + vtgateConn.Close() + vtgateConn = getConnection(t, tc.vc.ClusterConfig.hostname, tc.vc.ClusterConfig.vtgateMySQLPort) + t.Run("Confirm routing rules", func(t *testing.T) { + + // Global routing rules should be in place with everything going to the source keyspace (customer). + confirmGlobalRoutingToSource(t) + + // Shard routing rules should now also be in place with everything + // going to the source keyspace (customer). + require.Equal(t, preCutoverShardRoutingRules, getShardRoutingRules(t)) + + // Confirm shard targeting works before we switch any traffic. + // Everything should be routed to the source keyspace (customer). + + log.Infof("Testing reverse route (target->source) for shard being switched") + _, err = vtgateConn.ExecuteFetch("use `customer2:80-`", 0, false) + require.NoError(t, err) + _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + require.Error(t, err) + require.Contains(t, err.Error(), "target: customer.80-.primary", "Query was routed to the target before any SwitchTraffic") + + log.Infof("Testing reverse route (target->source) for shard NOT being switched") + _, err = vtgateConn.ExecuteFetch("use `customer2:-80`", 0, false) + require.NoError(t, err) + _, err = vtgateConn.ExecuteFetch(shardMinus80RoutedQuery, 0, false) + require.Error(t, err) + require.Contains(t, err.Error(), "target: customer.-80.primary", "Query was routed to the target before any SwitchTraffic") + + _, err = vtgateConn.ExecuteFetch("use `customer`", 0, false) // switch vtgate default db back to customer + require.NoError(t, err) + currentCustomerCount = getCustomerCount(t, "") + + // Switch all traffic for the shard + wf80Dash.switchTraffic() + expectedSwitchOutput := fmt.Sprintf("SwitchTraffic was successful for workflow %s.%s\nStart State: Reads Not Switched. Writes Not Switched\nCurrent State: Reads partially switched, for shards: %s. Writes partially switched, for shards: %s\n\n", + targetKs, wfName, shard, shard) + require.Equal(t, expectedSwitchOutput, lastOutput) + currentCustomerCount = getCustomerCount(t, "") + + // Confirm global routing rules -- everything should still be routed + // to the source side, customer, globally. + confirmGlobalRoutingToSource(t) + + // Confirm shard routing rules -- all traffic for the 80- shard should be + // routed into the customer2 keyspace, overriding the global routing rules. + require.Equal(t, halfCutoverShardRoutingRules, getShardRoutingRules(t)) + + // Confirm global routing rules: -80 should still be be routed to customer + // while 80- should be routed to customer2. + require.Equal(t, halfCutoverShardRoutingRules, getShardRoutingRules(t)) + }) + vtgateConn.Close() + vtgateConn = getConnection(t, tc.vc.ClusterConfig.hostname, tc.vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() + + t.Run("Validate shard and tablet type routing", func(t *testing.T) { + + // No shard targeting + _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + require.Error(t, err) + require.Contains(t, err.Error(), "target: customer2.80-.primary", "Query was routed to the source after partial SwitchTraffic") + _, err = vtgateConn.ExecuteFetch(shardMinus80RoutedQuery, 0, false) + require.Error(t, err) + require.Contains(t, err.Error(), "target: customer.-80.primary", "Query was routed to the target before partial SwitchTraffic") + + // Shard targeting + _, err = vtgateConn.ExecuteFetch("use `customer2:80-`", 0, false) + require.NoError(t, err) + _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + require.Error(t, err) + require.Contains(t, err.Error(), "target: customer2.80-.primary", "Query was routed to the source after partial SwitchTraffic") + _, err = vtgateConn.ExecuteFetch("use `customer:80-`", 0, false) + require.NoError(t, err) + _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + require.Error(t, err) + require.Contains(t, err.Error(), "target: customer2.80-.primary", "Query was routed to the source after partial SwitchTraffic") + + _, err = vtgateConn.ExecuteFetch("use `customer`", 0, false) // switch vtgate default db back to customer + require.NoError(t, err) + }) + currentCustomerCount = getCustomerCount(t, "") + + // Now move the other shard: -80 + t.Run("Move shard -80 and validate routing rules", func(t *testing.T) { + // Now move the other shard: -80 + wfName = "partialDash80" + shard = "-80" + wfDash80 = tc.newWorkflow("MoveTables", wfName, "customer", "customer2", &workflowOptions{ + sourceShards: []string{"-80"}, + tables: []string{"customer"}, + }) + wfDash80.create() + wfDash80.switchTraffic() + + expectedSwitchOutput := fmt.Sprintf("SwitchTraffic was successful for workflow %s.%s\nStart State: Reads partially switched, for shards: 80-. Writes partially switched, for shards: 80-\nCurrent State: All Reads Switched. All Writes Switched\n\n", + targetKs, wfName) + require.Equal(t, expectedSwitchOutput, lastOutput) + + // Confirm global routing rules: everything should still be routed + // to the source side, customer, globally. + confirmGlobalRoutingToSource(t) + + // Confirm shard routing rules: all shards should be routed to the + // target side (customer2). + require.Equal(t, postCutoverShardRoutingRules, getShardRoutingRules(t)) + }) + + var output string + + _, err = vtgateConn.ExecuteFetch("use `customer`", 0, false) // switch vtgate default db back to customer + require.NoError(t, err) + currentCustomerCount = getCustomerCount(t, "") + t.Run("Switch sequence traffic forward and reverse and validate workflows still exist and sequence routing works", func(t *testing.T) { + wfSeq.switchTraffic() + log.Infof("SwitchTraffic was successful for workflow seqTgt.seq, with output %s", lastOutput) + + insertCustomers(t) + + wfSeq.reverseTraffic() + log.Infof("ReverseTraffic was successful for workflow seqTgt.seq, with output %s", lastOutput) + + insertCustomers(t) + + wfSeq.switchTraffic() + log.Infof("SwitchTraffic was successful for workflow seqTgt.seq, with output %s", lastOutput) + + insertCustomers(t) + + output, err = tc.vc.VtctlClient.ExecuteCommandWithOutput("Workflow", "seqTgt.seq", "show") + require.NoError(t, err) + + output, err = tc.vc.VtctlClient.ExecuteCommandWithOutput("Workflow", "seqSrc.seq_reverse", "show") + require.NoError(t, err) + + wfSeq.complete() + }) + + t.Run("Cancel reverse workflows and validate", func(t *testing.T) { + // Cancel both reverse workflows (as we've done the cutover), which should + // clean up both the global routing rules and the shard routing rules. + for _, wf := range []string{"partialDash80", "partial80Dash"} { + // We switched traffic, so it's the reverse workflow we want to cancel. + reverseWf := wf + "_reverse" + reverseKs := sourceKs // customer + err = tstWorkflowExec(t, "", reverseWf, "", reverseKs, "", workflowActionCancel, "", "", "", false) + require.NoError(t, err) + + output, err := tc.vc.VtctlClient.ExecuteCommandWithOutput("Workflow", fmt.Sprintf("%s.%s", reverseKs, reverseWf), "show") + require.Error(t, err) + require.Contains(t, output, "no streams found") + + // Delete the original workflow + originalKsWf := fmt.Sprintf("%s.%s", targetKs, wf) + _, err = tc.vc.VtctlClient.ExecuteCommandWithOutput("Workflow", originalKsWf, "delete") + require.NoError(t, err) + output, err = tc.vc.VtctlClient.ExecuteCommandWithOutput("Workflow", originalKsWf, "show") + require.Error(t, err) + require.Contains(t, output, "no streams found") + } + + // Confirm that the global routing rules are now gone. + output, err = tc.vc.VtctlClient.ExecuteCommandWithOutput("GetRoutingRules") + require.NoError(t, err) + require.Equal(t, emptyGlobalRoutingRules, output) + + // Confirm that the shard routing rules are now gone. + require.Equal(t, emptyShardRoutingRules, getShardRoutingRules(t)) + }) +} + +var customerCount int64 +var currentCustomerCount int64 +var newCustomerCount = int64(201) +var lastCustomerId int64 + +func getCustomerCount(t *testing.T, msg string) int64 { + qr := execVtgateQuery(t, vtgateConn, "", "select count(*) from customer") + require.NotNil(t, qr) + count, err := qr.Rows[0][0].ToInt64() + require.NoError(t, err) + return count +} + +func confirmLastCustomerIdHasIncreased(t *testing.T) { + qr := execVtgateQuery(t, vtgateConn, "", "select cid from customer order by cid desc limit 1") + require.NotNil(t, qr) + currentCustomerId, err := qr.Rows[0][0].ToInt64() + require.NoError(t, err) + require.Greater(t, currentCustomerId, lastCustomerId) + lastCustomerId = currentCustomerId +} + +func insertCustomers(t *testing.T) { + for i := int64(1); i < newCustomerCount+1; i++ { + execVtgateQuery(t, vtgateConn, "customer@primary", fmt.Sprintf("insert into customer(name) values ('name-%d')", currentCustomerCount+i)) + } + customerCount = getCustomerCount(t, "") + require.Equal(t, currentCustomerCount+newCustomerCount, customerCount) + currentCustomerCount = customerCount + + confirmLastCustomerIdHasIncreased(t) +} + +func confirmGlobalRoutingToSource(t *testing.T) { + output, err := vc.VtctlClient.ExecuteCommandWithOutput("GetRoutingRules") + require.NoError(t, err) + result := gjson.Get(output, "rules") + result.ForEach(func(attributeKey, attributeValue gjson.Result) bool { + // 0 is the keyspace and 1 is optional tablename[@tablettype] + fromKsTbl := strings.Split(attributeValue.Get("fromTable").String(), ".") + // 0 is the keyspace and 1 is the tablename + toKsTbl := strings.Split(attributeValue.Get("toTables.0").String(), ".") + // All tables in the customer and customer2 keyspaces should be + // routed to the customer keyspace. + if fromKsTbl[0] == "customer" || fromKsTbl[0] == "customer2" { + require.Equal(t, "customer", toKsTbl[0]) + } + return true + }) +} diff --git a/go/test/endtoend/vreplication/partial_movetables_test.go b/go/test/endtoend/vreplication/partial_movetables_test.go index c130000e53a..5583232fbdc 100644 --- a/go/test/endtoend/vreplication/partial_movetables_test.go +++ b/go/test/endtoend/vreplication/partial_movetables_test.go @@ -21,6 +21,8 @@ import ( "strings" "testing" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + "github.com/stretchr/testify/require" "github.com/tidwall/gjson" @@ -28,9 +30,55 @@ import ( "vitess.io/vitess/go/vt/wrangler" ) -// TestPartialMoveTables tests partial move tables by moving each +// testCancel() starts and cancels a partial MoveTables for one of the shards which will be actually moved later on. +// Before canceling, we first switch traffic to the target keyspace and then reverse it back to the source keyspace. +// This tests that artifacts are being properly cleaned up when a MoveTables ia canceled. +func testCancel(t *testing.T) { + targetKeyspace := "customer2" + sourceKeyspace := "customer" + workflowName := "partial80DashForCancel" + ksWorkflow := fmt.Sprintf("%s.%s", targetKeyspace, workflowName) + // We use a different table in this MoveTables than the subsequent one, so that setting up of the artifacts + // while creating MoveTables do not paper over any issues with cleaning up artifacts when MoveTables is canceled. + // Ref: https://github.com/vitessio/vitess/issues/13998 + table := "customer2" + shard := "80-" + // start the partial movetables for 80- + mt := newMoveTables(vc, &moveTables{ + workflowName: workflowName, + targetKeyspace: targetKeyspace, + sourceKeyspace: sourceKeyspace, + tables: table, + sourceShards: shard, + }, moveTablesFlavorRandom) + mt.Create() + + checkDenyList := func(keyspace string, expected bool) { + validateTableInDenyList(t, vc, fmt.Sprintf("%s:%s", keyspace, shard), table, expected) + } + + waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) + + checkDenyList(targetKeyspace, false) + checkDenyList(sourceKeyspace, false) + + mt.SwitchReadsAndWrites() + checkDenyList(targetKeyspace, false) + checkDenyList(sourceKeyspace, true) + + mt.ReverseReadsAndWrites() + checkDenyList(targetKeyspace, true) + checkDenyList(sourceKeyspace, false) + + mt.Cancel() + checkDenyList(targetKeyspace, false) + checkDenyList(sourceKeyspace, false) + +} + +// TestPartialMoveTablesBasic tests partial move tables by moving each // customer shard -- -80,80- -- once a a time to customer2. -func TestPartialMoveTables(t *testing.T) { +func TestPartialMoveTablesBasic(t *testing.T) { origDefaultRdonly := defaultRdonly defer func() { defaultRdonly = origDefaultRdonly @@ -51,14 +99,14 @@ func TestPartialMoveTables(t *testing.T) { defer func() { extraVTGateArgs = origExtraVTGateArgs }() - vc = setupCluster(t) + vc = setupMinimalCluster(t) defer vtgateConn.Close() defer vc.TearDown(t) - setupCustomerKeyspace(t) + setupMinimalCustomerKeyspace(t) // Move customer table from unsharded product keyspace to // sharded customer keyspace. - createMoveTablesWorkflow(t, "customer") + createMoveTablesWorkflow(t, "customer,loadtest,customer2") tstWorkflowSwitchReadsAndWrites(t) tstWorkflowComplete(t) @@ -75,10 +123,15 @@ func TestPartialMoveTables(t *testing.T) { applyShardRoutingRules(t, emptyShardRoutingRules) require.Equal(t, emptyShardRoutingRules, getShardRoutingRules(t)) + runWithLoad := true + // Now setup the customer2 keyspace so we can do a partial // move tables for one of the two shards: 80-. defaultRdonly = 0 setupCustomer2Keyspace(t) + + testCancel(t) + currentWorkflowType = wrangler.MoveTablesWorkflow wfName := "partial80Dash" sourceKs := "customer" @@ -88,11 +141,20 @@ func TestPartialMoveTables(t *testing.T) { // start the partial movetables for 80- err := tstWorkflowExec(t, defaultCellName, wfName, sourceKs, targetKs, - "customer", workflowActionCreate, "", shard, "") + "customer,loadtest", workflowActionCreate, "", shard, "", false) require.NoError(t, err) + var lg *loadGenerator + if runWithLoad { // start load after routing rules are set, otherwise we end up with ambiguous tables + lg = newLoadGenerator(t, vc) + go func() { + lg.start() + }() + lg.waitForCount(1000) + } + targetTab1 = vc.getPrimaryTablet(t, targetKs, shard) catchup(t, targetTab1, wfName, "Partial MoveTables Customer to Customer2") - vdiff1(t, ksWf, "") + vdiffSideBySide(t, ksWf, "") waitForRowCount(t, vtgateConn, "customer", "customer", 3) // customer: all shards waitForRowCount(t, vtgateConn, "customer2", "customer", 3) // customer2: all shards @@ -152,7 +214,7 @@ func TestPartialMoveTables(t *testing.T) { require.Contains(t, err.Error(), "target: customer.-80.primary", "Query was routed to the target before any SwitchTraffic") // Switch all traffic for the shard - require.NoError(t, tstWorkflowExec(t, "", wfName, "", targetKs, "", workflowActionSwitchTraffic, "", "", "")) + require.NoError(t, tstWorkflowExec(t, "", wfName, "", targetKs, "", workflowActionSwitchTraffic, "", "", "", false)) expectedSwitchOutput := fmt.Sprintf("SwitchTraffic was successful for workflow %s.%s\nStart State: Reads Not Switched. Writes Not Switched\nCurrent State: Reads partially switched, for shards: %s. Writes partially switched, for shards: %s\n\n", targetKs, wfName, shard, shard) require.Equal(t, expectedSwitchOutput, lastOutput) @@ -210,7 +272,7 @@ func TestPartialMoveTables(t *testing.T) { // We cannot Complete a partial move tables at the moment because // it will find that all traffic has (obviously) not been switched. - err = tstWorkflowExec(t, "", wfName, "", targetKs, "", workflowActionComplete, "", "", "") + err = tstWorkflowExec(t, "", wfName, "", targetKs, "", workflowActionComplete, "", "", "", false) require.Error(t, err) // Confirm global routing rules: -80 should still be be routed to customer @@ -221,16 +283,16 @@ func TestPartialMoveTables(t *testing.T) { wfName = "partialDash80" shard = "-80" ksWf = fmt.Sprintf("%s.%s", targetKs, wfName) - // Start the partial movetables for -80, 80- has already been switched err = tstWorkflowExec(t, defaultCellName, wfName, sourceKs, targetKs, - "customer", workflowActionCreate, "", shard, "") + "customer,loadtest", workflowActionCreate, "", shard, "", false) require.NoError(t, err) targetTab2 := vc.getPrimaryTablet(t, targetKs, shard) catchup(t, targetTab2, wfName, "Partial MoveTables Customer to Customer2: -80") - vdiff1(t, ksWf, "") + vdiffSideBySide(t, ksWf, "") + // Switch all traffic for the shard - require.NoError(t, tstWorkflowExec(t, "", wfName, "", targetKs, "", workflowActionSwitchTraffic, "", "", "")) + require.NoError(t, tstWorkflowExec(t, "", wfName, "", targetKs, "", workflowActionSwitchTraffic, "", "", "", false)) expectedSwitchOutput = fmt.Sprintf("SwitchTraffic was successful for workflow %s.%s\nStart State: Reads partially switched, for shards: 80-. Writes partially switched, for shards: 80-\nCurrent State: All Reads Switched. All Writes Switched\n\n", targetKs, wfName) require.Equal(t, expectedSwitchOutput, lastOutput) @@ -243,13 +305,15 @@ func TestPartialMoveTables(t *testing.T) { // target side (customer2). require.Equal(t, postCutoverShardRoutingRules, getShardRoutingRules(t)) + lg.stop() + // Cancel both reverse workflows (as we've done the cutover), which should // clean up both the global routing rules and the shard routing rules. for _, wf := range []string{"partialDash80", "partial80Dash"} { // We switched traffic, so it's the reverse workflow we want to cancel. reverseWf := wf + "_reverse" reverseKs := sourceKs // customer - err = tstWorkflowExec(t, "", reverseWf, "", reverseKs, "", workflowActionCancel, "", "", "") + err = tstWorkflowExec(t, "", reverseWf, "", reverseKs, "", workflowActionCancel, "", "", "", false) require.NoError(t, err) output, err := vc.VtctlClient.ExecuteCommandWithOutput("Workflow", fmt.Sprintf("%s.%s", reverseKs, reverseWf), "show") @@ -272,4 +336,5 @@ func TestPartialMoveTables(t *testing.T) { // Confirm that the shard routing rules are now gone. require.Equal(t, emptyShardRoutingRules, getShardRoutingRules(t)) + } diff --git a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go index e12dbfa1cb1..338310fdf14 100644 --- a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go +++ b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go @@ -30,6 +30,8 @@ import ( "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/wrangler" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) const ( @@ -61,13 +63,13 @@ var ( func createReshardWorkflow(t *testing.T, sourceShards, targetShards string) error { err := tstWorkflowExec(t, defaultCellName, workflowName, targetKs, targetKs, - "", workflowActionCreate, "", sourceShards, targetShards) + "", workflowActionCreate, "", sourceShards, targetShards, false) require.NoError(t, err) - waitForWorkflowState(t, vc, ksWorkflow, workflowStateRunning) + waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) confirmTablesHaveSecondaryKeys(t, []*cluster.VttabletProcess{targetTab1}, targetKs, "") catchup(t, targetTab1, workflowName, "Reshard") catchup(t, targetTab2, workflowName, "Reshard") - vdiff1(t, ksWorkflow, "") + vdiffSideBySide(t, ksWorkflow, "") return nil } @@ -76,20 +78,20 @@ func createMoveTablesWorkflow(t *testing.T, tables string) { tables = tablesToMove } err := tstWorkflowExec(t, defaultCellName, workflowName, sourceKs, targetKs, - tables, workflowActionCreate, "", "", "") + tables, workflowActionCreate, "", "", "", false) require.NoError(t, err) - waitForWorkflowState(t, vc, ksWorkflow, workflowStateRunning) + waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) confirmTablesHaveSecondaryKeys(t, []*cluster.VttabletProcess{targetTab1}, targetKs, tables) catchup(t, targetTab1, workflowName, "MoveTables") catchup(t, targetTab2, workflowName, "MoveTables") - vdiff1(t, ksWorkflow, "") + vdiffSideBySide(t, ksWorkflow, "") } func tstWorkflowAction(t *testing.T, action, tabletTypes, cells string) error { - return tstWorkflowExec(t, cells, workflowName, sourceKs, targetKs, tablesToMove, action, tabletTypes, "", "") + return tstWorkflowExec(t, cells, workflowName, sourceKs, targetKs, tablesToMove, action, tabletTypes, "", "", false) } -func tstWorkflowExec(t *testing.T, cells, workflow, sourceKs, targetKs, tables, action, tabletTypes, sourceShards, targetShards string) error { +func tstWorkflowExec(t *testing.T, cells, workflow, sourceKs, targetKs, tables, action, tabletTypes, sourceShards, targetShards string, atomicCopy bool) error { var args []string if currentWorkflowType == wrangler.MoveTablesWorkflow { args = append(args, "MoveTables") @@ -102,11 +104,18 @@ func tstWorkflowExec(t *testing.T, cells, workflow, sourceKs, targetKs, tables, if BypassLagCheck { args = append(args, "--max_replication_lag_allowed=2542087h") } - + if atomicCopy { + args = append(args, "--atomic-copy") + } switch action { case workflowActionCreate: if currentWorkflowType == wrangler.MoveTablesWorkflow { - args = append(args, "--source", sourceKs, "--tables", tables) + args = append(args, "--source", sourceKs) + if tables != "" { + args = append(args, "--tables", tables) + } else { + args = append(args, "--all") + } if sourceShards != "" { args = append(args, "--source_shards", sourceShards) } @@ -116,7 +125,10 @@ func tstWorkflowExec(t *testing.T, cells, workflow, sourceKs, targetKs, tables, // Test new experimental --defer-secondary-keys flag switch currentWorkflowType { case wrangler.MoveTablesWorkflow, wrangler.MigrateWorkflow, wrangler.ReshardWorkflow: - args = append(args, "--defer-secondary-keys") + if !atomicCopy { + args = append(args, "--defer-secondary-keys") + } + args = append(args, "--initialize-target-sequences") // Only used for MoveTables } } if cells != "" { @@ -206,7 +218,7 @@ func validateReadsRoute(t *testing.T, tabletTypes string, tablet *cluster.Vttabl for _, tt := range []string{"replica", "rdonly"} { destination := fmt.Sprintf("%s:%s@%s", tablet.Keyspace, tablet.Shard, tt) if strings.Contains(tabletTypes, tt) { - require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, tablet, destination, readQuery, readQuery)) + assertQueryExecutesOnTablet(t, vtgateConn, tablet, destination, readQuery, readQuery) } } } @@ -221,17 +233,17 @@ func validateReadsRouteToTarget(t *testing.T, tabletTypes string) { func validateWritesRouteToSource(t *testing.T) { insertQuery := "insert into customer(name, cid) values('tempCustomer2', 200)" - matchInsertQuery := "insert into customer(name, cid) values" - require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, sourceTab, "customer", insertQuery, matchInsertQuery)) + matchInsertQuery := "insert into customer(`name`, cid) values" + assertQueryExecutesOnTablet(t, vtgateConn, sourceTab, "customer", insertQuery, matchInsertQuery) execVtgateQuery(t, vtgateConn, "customer", "delete from customer where cid > 100") } func validateWritesRouteToTarget(t *testing.T) { insertQuery := "insert into customer(name, cid) values('tempCustomer3', 101)" - matchInsertQuery := "insert into customer(name, cid) values" - require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, targetTab2, "customer", insertQuery, matchInsertQuery)) + matchInsertQuery := "insert into customer(`name`, cid) values" + assertQueryExecutesOnTablet(t, vtgateConn, targetTab2, "customer", insertQuery, matchInsertQuery) insertQuery = "insert into customer(name, cid) values('tempCustomer3', 102)" - require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, targetTab1, "customer", insertQuery, matchInsertQuery)) + assertQueryExecutesOnTablet(t, vtgateConn, targetTab1, "customer", insertQuery, matchInsertQuery) execVtgateQuery(t, vtgateConn, "customer", "delete from customer where cid > 100") } @@ -242,7 +254,7 @@ func revert(t *testing.T, workflowType string) { validateReadsRouteToSource(t, "replica") // cancel the workflow to cleanup - _, err := vc.VtctlClient.ExecuteCommandWithOutput(workflowType, "--", "Cancel", ksWorkflow) + _, err := vc.VtctldClient.ExecuteCommandWithOutput(workflowType, "--target-keyspace", targetKs, "--workflow", workflowName, "cancel") require.NoError(t, err, fmt.Sprintf("%s Cancel error: %v", workflowType, err)) } @@ -305,17 +317,17 @@ func testVSchemaForSequenceAfterMoveTables(t *testing.T) { // use MoveTables to move customer2 from product to customer using currentWorkflowType = wrangler.MoveTablesWorkflow err := tstWorkflowExec(t, defaultCellName, "wf2", sourceKs, targetKs, - "customer2", workflowActionCreate, "", "", "") + "customer2", workflowActionCreate, "", "", "", false) require.NoError(t, err) - waitForWorkflowState(t, vc, "customer.wf2", workflowStateRunning) + waitForWorkflowState(t, vc, "customer.wf2", binlogdatapb.VReplicationWorkflowState_Running.String()) waitForLowLag(t, "customer", "wf2") err = tstWorkflowExec(t, defaultCellName, "wf2", sourceKs, targetKs, - "", workflowActionSwitchTraffic, "", "", "") + "", workflowActionSwitchTraffic, "", "", "", false) require.NoError(t, err) err = tstWorkflowExec(t, defaultCellName, "wf2", sourceKs, targetKs, - "", workflowActionComplete, "", "", "") + "", workflowActionComplete, "", "", "", false) require.NoError(t, err) // sanity check @@ -340,16 +352,16 @@ func testVSchemaForSequenceAfterMoveTables(t *testing.T) { // use MoveTables to move customer2 back to product. Note that now the table has an associated sequence err = tstWorkflowExec(t, defaultCellName, "wf3", targetKs, sourceKs, - "customer2", workflowActionCreate, "", "", "") + "customer2", workflowActionCreate, "", "", "", false) require.NoError(t, err) - waitForWorkflowState(t, vc, "product.wf3", workflowStateRunning) + waitForWorkflowState(t, vc, "product.wf3", binlogdatapb.VReplicationWorkflowState_Running.String()) waitForLowLag(t, "product", "wf3") err = tstWorkflowExec(t, defaultCellName, "wf3", targetKs, sourceKs, - "", workflowActionSwitchTraffic, "", "", "") + "", workflowActionSwitchTraffic, "", "", "", false) require.NoError(t, err) err = tstWorkflowExec(t, defaultCellName, "wf3", targetKs, sourceKs, - "", workflowActionComplete, "", "", "") + "", workflowActionComplete, "", "", "", false) require.NoError(t, err) // sanity check @@ -387,10 +399,10 @@ func testReplicatingWithPKEnumCols(t *testing.T) { insertQuery := "insert into customer(cid, name, typ, sport, meta) values(2, 'Paül','soho','cricket',convert(x'7b7d' using utf8mb4))" execVtgateQuery(t, vtgateConn, sourceKs, deleteQuery) waitForNoWorkflowLag(t, vc, targetKs, workflowName) - vdiff1(t, ksWorkflow, "") + vdiffSideBySide(t, ksWorkflow, "") execVtgateQuery(t, vtgateConn, sourceKs, insertQuery) waitForNoWorkflowLag(t, vc, targetKs, workflowName) - vdiff1(t, ksWorkflow, "") + vdiffSideBySide(t, ksWorkflow, "") } func testReshardV2Workflow(t *testing.T) { @@ -427,7 +439,7 @@ func testMoveTablesV2Workflow(t *testing.T) { setupCustomerKeyspace(t) // The purge table should get skipped/ignored // If it's not then we'll get an error as the table doesn't exist in the vschema - createMoveTablesWorkflow(t, "customer,vdiff_order,reftable,_vt_PURGE_4f9194b43b2011eb8a0104ed332e05c2_20221210194431") + createMoveTablesWorkflow(t, "customer,loadtest,vdiff_order,reftable,_vt_PURGE_4f9194b43b2011eb8a0104ed332e05c2_20221210194431") if !strings.Contains(lastOutput, "Workflow started successfully") { t.Fail() } @@ -637,19 +649,56 @@ func setupCustomer2Keyspace(t *testing.T) { c2shards := []string{"-80", "80-"} c2keyspace := "customer2" if _, err := vc.AddKeyspace(t, []*Cell{vc.Cells["zone1"]}, c2keyspace, strings.Join(c2shards, ","), - customerVSchema, customerSchema, defaultReplicas, defaultRdonly, 1200, nil); err != nil { + customerVSchema, customerSchema, 0, 0, 1200, nil); err != nil { t.Fatal(err) } for _, c2shard := range c2shards { err := cluster.WaitForHealthyShard(vc.VtctldClient, c2keyspace, c2shard) require.NoError(t, err) - if defaultReplicas > 0 { - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", c2keyspace, c2shard), defaultReplicas, 30*time.Second)) - } - if defaultRdonly > 0 { - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", c2keyspace, c2shard), defaultRdonly, 30*time.Second)) - } + require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", c2keyspace, c2shard), 1, 30*time.Second)) + } +} + +func setupMinimalCluster(t *testing.T) *VitessCluster { + cells := []string{"zone1"} + + vc = NewVitessCluster(t, "TestBasicVreplicationWorkflow", cells, mainClusterConfig) + require.NotNil(t, vc) + defaultCellName := "zone1" + allCellNames = defaultCellName + defaultCell = vc.Cells[defaultCellName] + + zone1 := vc.Cells["zone1"] + + vc.AddKeyspace(t, []*Cell{zone1}, "product", "0", initialProductVSchema, initialProductSchema, 0, 0, 100, nil) + + vtgate = zone1.Vtgates[0] + require.NotNil(t, vtgate) + err := cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0") + require.NoError(t, err) + require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 1, 30*time.Second)) + + vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + verifyClusterHealth(t, vc) + insertInitialData(t) + + sourceTab = vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-100"].Vttablet + + return vc +} + +func setupMinimalCustomerKeyspace(t *testing.T) { + if _, err := vc.AddKeyspace(t, []*Cell{vc.Cells["zone1"]}, "customer", "-80,80-", + customerVSchema, customerSchema, 0, 0, 200, nil); err != nil { + t.Fatal(err) } + require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, "customer", "-80")) + require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, "customer", "80-")) + require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "customer", "-80"), 1, 30*time.Second)) + require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "customer", "80-"), 1, 30*time.Second)) + custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"] + targetTab1 = custKs.Shards["-80"].Tablets["zone1-200"].Vttablet + targetTab2 = custKs.Shards["80-"].Tablets["zone1-300"].Vttablet } func TestSwitchReadsWritesInAnyOrder(t *testing.T) { @@ -684,7 +733,7 @@ func moveCustomerTableSwitchFlows(t *testing.T, cells []*Cell, sourceCellOrAlias moveTablesAction(t, "Create", sourceCellOrAlias, workflow, sourceKs, targetKs, tables) catchup(t, targetTab1, workflow, workflowType) catchup(t, targetTab2, workflow, workflowType) - vdiff1(t, ksWorkflow, "") + vdiffSideBySide(t, ksWorkflow, "") } var switchReadsFollowedBySwitchWrites = func() { diff --git a/go/test/endtoend/vreplication/time_zone_test.go b/go/test/endtoend/vreplication/time_zone_test.go index 2bf63bd6e61..2d0d1eeaf0b 100644 --- a/go/test/endtoend/vreplication/time_zone_test.go +++ b/go/test/endtoend/vreplication/time_zone_test.go @@ -119,7 +119,7 @@ func TestMoveTablesTZ(t *testing.T) { _, err = vtgateConn.ExecuteFetch("insert into datze(id, dt2) values (12, '2022-04-01 5:06:07')", 1, false) // dst require.NoError(t, err) - vdiff1(t, ksWorkflow, "") + vdiffSideBySide(t, ksWorkflow, "") // update to test date conversions in replication (vplayer) mode (update statements) _, err = vtgateConn.ExecuteFetch("update datze set dt2 = '2022-04-01 5:06:07' where id = 11", 1, false) // dst @@ -127,7 +127,7 @@ func TestMoveTablesTZ(t *testing.T) { _, err = vtgateConn.ExecuteFetch("update datze set dt2 = '2022-01-01 10:20:30' where id = 12", 1, false) // standard time require.NoError(t, err) - vdiff1(t, ksWorkflow, "") + vdiffSideBySide(t, ksWorkflow, "") query := "select * from datze" qrSourceUSPacific, err := productTab.QueryTablet(query, sourceKs, true) @@ -154,7 +154,7 @@ func TestMoveTablesTZ(t *testing.T) { require.NotEqual(t, row.AsString("ts1", ""), qrTargetUTC.Named().Rows[i].AsString("ts1", "")) dtLayout := "2006-01-02 15:04:05" - // now compare times b/w source and target (actual). VDiff has already compared, but we want to validate that vdiff1 is right too! + // now compare times b/w source and target (actual). VDiff has already compared, but we want to validate that vdiffSideBySide is right too! dt2a, err := time.Parse(dtLayout, qrTargetUTC.Named().Rows[i].AsString("dt2", "")) require.NoError(t, err) targetUTCTUnix := dt2a.Unix() @@ -206,5 +206,5 @@ func TestMoveTablesTZ(t *testing.T) { // inserts to test date conversions in reverse replication execVtgateQuery(t, vtgateConn, "customer", "insert into datze(id, dt2) values (13, '2022-01-01 18:20:30')") execVtgateQuery(t, vtgateConn, "customer", "insert into datze(id, dt2) values (14, '2022-04-01 12:06:07')") - vdiff1(t, ksReverseWorkflow, "") + vdiffSideBySide(t, ksReverseWorkflow, "") } diff --git a/go/test/endtoend/vreplication/unsharded_init_data.sql b/go/test/endtoend/vreplication/unsharded_init_data.sql index a29a4d1b405..8af0cab6608 100644 --- a/go/test/endtoend/vreplication/unsharded_init_data.sql +++ b/go/test/endtoend/vreplication/unsharded_init_data.sql @@ -1,6 +1,7 @@ insert into customer(cid, name, typ, sport, meta) values(1, 'Jøhn "❤️" Rizzolo',1,'football,baseball','{}'); insert into customer(cid, name, typ, sport, meta) values(2, 'Paül','soho','cricket',convert(x'7b7d' using utf8mb4)); -insert into customer(cid, name, typ, sport, blb) values(3, 'ringo','enterprise','','blob data'); +-- We use a high cid value here to test the target sequence initialization. +insert into customer(cid, name, typ, sport, blb) values(999999, 'ringo','enterprise','','blob data'); insert into merchant(mname, category) values('Monoprice', 'eléctronics'); insert into merchant(mname, category) values('newegg', 'elec†ronics'); insert into product(pid, description) values(1, 'keyböard ⌨️'); diff --git a/go/test/endtoend/vreplication/vdiff2_test.go b/go/test/endtoend/vreplication/vdiff2_test.go index d3bdff5e1af..72b09e8fede 100644 --- a/go/test/endtoend/vreplication/vdiff2_test.go +++ b/go/test/endtoend/vreplication/vdiff2_test.go @@ -22,8 +22,6 @@ import ( "testing" "time" - "vitess.io/vitess/go/vt/topo/topoproto" - "github.com/stretchr/testify/require" "github.com/tidwall/gjson" @@ -67,9 +65,9 @@ var testCases = []*testCase{ tabletBaseID: 200, tables: "customer,Lead,Lead-1", autoRetryError: true, - retryInsert: `insert into customer(cid, name, typ) values(91234, 'Testy McTester', 'soho')`, + retryInsert: `insert into customer(cid, name, typ) values(1991234, 'Testy McTester', 'soho')`, resume: true, - resumeInsert: `insert into customer(cid, name, typ) values(92234, 'Testy McTester (redux)', 'enterprise')`, + resumeInsert: `insert into customer(cid, name, typ) values(1992234, 'Testy McTester (redux)', 'enterprise')`, testCLIErrors: true, // test for errors in the simplest workflow testCLICreateWait: true, // test wait on create feature against simplest workflow }, @@ -83,9 +81,9 @@ var testCases = []*testCase{ targetShards: "-40,40-a0,a0-", tabletBaseID: 400, autoRetryError: true, - retryInsert: `insert into customer(cid, name, typ) values(93234, 'Testy McTester Jr', 'enterprise'), (94234, 'Testy McTester II', 'enterprise')`, + retryInsert: `insert into customer(cid, name, typ) values(1993234, 'Testy McTester Jr', 'enterprise'), (1993235, 'Testy McTester II', 'enterprise')`, resume: true, - resumeInsert: `insert into customer(cid, name, typ) values(95234, 'Testy McTester III', 'enterprise')`, + resumeInsert: `insert into customer(cid, name, typ) values(1994234, 'Testy McTester III', 'enterprise')`, stop: true, }, { @@ -98,16 +96,15 @@ var testCases = []*testCase{ targetShards: "0", tabletBaseID: 700, autoRetryError: true, - retryInsert: `insert into customer(cid, name, typ) values(96234, 'Testy McTester IV', 'enterprise')`, + retryInsert: `insert into customer(cid, name, typ) values(1995234, 'Testy McTester IV', 'enterprise')`, resume: true, - resumeInsert: `insert into customer(cid, name, typ) values(97234, 'Testy McTester V', 'enterprise'), (98234, 'Testy McTester VI', 'enterprise')`, + resumeInsert: `insert into customer(cid, name, typ) values(1996234, 'Testy McTester V', 'enterprise'), (1996235, 'Testy McTester VI', 'enterprise')`, stop: true, }, } func TestVDiff2(t *testing.T) { - allCellNames = "zone1" - defaultCellName := "zone1" + allCellNames = "zone5,zone1,zone2,zone3,zone4" sourceKs := "product" sourceShards := []string{"0"} targetKs := "customer" @@ -115,14 +112,19 @@ func TestVDiff2(t *testing.T) { // This forces us to use multiple vstream packets even with small test tables extraVTTabletArgs = []string{"--vstream_packet_size=1"} - vc = NewVitessCluster(t, "TestVDiff2", []string{allCellNames}, mainClusterConfig) + vc = NewVitessCluster(t, "TestVDiff2", strings.Split(allCellNames, ","), mainClusterConfig) require.NotNil(t, vc) - defaultCell = vc.Cells[defaultCellName] - cells := []*Cell{defaultCell} + zone1 := vc.Cells["zone1"] + zone2 := vc.Cells["zone2"] + zone3 := vc.Cells["zone3"] + defaultCell = zone1 defer vc.TearDown(t) - vc.AddKeyspace(t, cells, sourceKs, strings.Join(sourceShards, ","), initialProductVSchema, initialProductSchema, 0, 0, 100, sourceKsOpts) + // The primary tablet is only added in the first cell. + // We ONLY add primary tablets in this test. + _, err := vc.AddKeyspace(t, []*Cell{zone2, zone1, zone3}, sourceKs, strings.Join(sourceShards, ","), initialProductVSchema, initialProductSchema, 0, 0, 100, sourceKsOpts) + require.NoError(t, err) vtgate = defaultCell.Vtgates[0] require.NotNil(t, vtgate) @@ -142,7 +144,9 @@ func TestVDiff2(t *testing.T) { generateMoreCustomers(t, sourceKs, 100) - _, err := vc.AddKeyspace(t, cells, targetKs, strings.Join(targetShards, ","), customerVSchema, customerSchema, 0, 0, 200, targetKsOpts) + // The primary tablet is only added in the first cell. + // We ONLY add primary tablets in this test. + tks, err := vc.AddKeyspace(t, []*Cell{zone3, zone1, zone2}, targetKs, strings.Join(targetShards, ","), customerVSchema, customerSchema, 0, 0, 200, targetKsOpts) require.NoError(t, err) for _, shard := range targetShards { require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, targetKs, shard)) @@ -150,15 +154,15 @@ func TestVDiff2(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - testWorkflow(t, vc, tc, cells) + // Primary tablets for any new shards are added in the first cell. + testWorkflow(t, vc, tc, tks, []*Cell{zone3, zone2, zone1}) }) } } -func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, cells []*Cell) { +func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, tks *Keyspace, cells []*Cell) { arrTargetShards := strings.Split(tc.targetShards, ",") if tc.typ == "Reshard" { - tks := vc.Cells[cells[0].Name].Keyspaces[tc.targetKs] require.NoError(t, vc.AddShards(t, cells, tks, tc.targetShards, 0, 0, tc.tabletBaseID, targetKsOpts)) for _, shard := range arrTargetShards { require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, tc.targetKs, shard)) @@ -171,6 +175,7 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, cells []*Cell) if tc.typ == "Reshard" { args = append(args, "--source_shards", tc.sourceShards, "--target_shards", tc.targetShards) } + args = append(args, "--cells", allCellNames) args = append(args, "--tables", tc.tables) args = append(args, "Create") args = append(args, ksWorkflow) @@ -182,14 +187,14 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, cells []*Cell) catchup(t, tab, tc.workflow, tc.typ) } - vdiff(t, tc.targetKs, tc.workflow, cells[0].Name, true, true, nil) + vdiff(t, tc.targetKs, tc.workflow, allCellNames, true, true, nil) if tc.autoRetryError { - testAutoRetryError(t, tc, cells[0].Name) + testAutoRetryError(t, tc, allCellNames) } if tc.resume { - testResume(t, tc, cells[0].Name) + testResume(t, tc, allCellNames) } // These are done here so that we have a valid workflow to test the commands against @@ -207,8 +212,8 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, cells []*Cell) // create another VDiff record to confirm it gets deleted when the workflow is completed ts := time.Now() - uuid, _ := performVDiff2Action(t, ksWorkflow, allCellNames, "create", "", false) - waitForVDiff2ToComplete(t, ksWorkflow, allCellNames, uuid, ts) + uuid, _ := performVDiff2Action(t, false, ksWorkflow, allCellNames, "create", "", false) + waitForVDiff2ToComplete(t, false, ksWorkflow, allCellNames, uuid, ts) err = vc.VtctlClient.ExecuteCommand(tc.typ, "--", "SwitchTraffic", ksWorkflow) require.NoError(t, err) @@ -221,16 +226,18 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, cells []*Cell) func testCLIErrors(t *testing.T, ksWorkflow, cells string) { t.Run("Client error handling", func(t *testing.T) { - _, output := performVDiff2Action(t, ksWorkflow, cells, "badcmd", "", true) - require.Contains(t, output, "usage:") - _, output = performVDiff2Action(t, ksWorkflow, cells, "create", "invalid_uuid", true) - require.Contains(t, output, "please provide a valid UUID") - _, output = performVDiff2Action(t, ksWorkflow, cells, "resume", "invalid_uuid", true) - require.Contains(t, output, "can only resume a specific vdiff, please provide a valid UUID") - _, output = performVDiff2Action(t, ksWorkflow, cells, "delete", "invalid_uuid", true) - require.Contains(t, output, "can only delete a specific vdiff, please provide a valid UUID") - uuid, _ := performVDiff2Action(t, ksWorkflow, cells, "show", "last", false) - _, output = performVDiff2Action(t, ksWorkflow, cells, "create", uuid, true) + _, output := performVDiff2Action(t, false, ksWorkflow, cells, "badcmd", "", true) + require.Contains(t, output, "Usage:") + _, output = performVDiff2Action(t, false, ksWorkflow, cells, "create", "invalid_uuid", true) + require.Contains(t, output, "invalid UUID provided") + _, output = performVDiff2Action(t, false, ksWorkflow, cells, "resume", "invalid_uuid", true) + require.Contains(t, output, "invalid UUID provided") + _, output = performVDiff2Action(t, false, ksWorkflow, cells, "delete", "invalid_uuid", true) + require.Contains(t, output, "invalid argument provided") + _, output = performVDiff2Action(t, false, ksWorkflow, cells, "show", "invalid_uuid", true) + require.Contains(t, output, "invalid argument provided") + uuid, _ := performVDiff2Action(t, false, ksWorkflow, cells, "show", "last", false) + _, output = performVDiff2Action(t, false, ksWorkflow, cells, "create", uuid, true) require.Contains(t, output, "already exists") }) } @@ -248,35 +255,35 @@ func testDelete(t *testing.T, ksWorkflow, cells string) { } return int64(len(seen)) } - _, output := performVDiff2Action(t, ksWorkflow, cells, "show", "all", false) + _, output := performVDiff2Action(t, false, ksWorkflow, cells, "show", "all", false) initialVDiffCount := uuidCount(gjson.Get(output, "#.UUID").Array()) for ; initialVDiffCount < 3; initialVDiffCount++ { - _, _ = performVDiff2Action(t, ksWorkflow, cells, "create", "", false) + _, _ = performVDiff2Action(t, false, ksWorkflow, cells, "create", "", false) } // Now let's confirm that we have at least 3 unique VDiffs. - _, output = performVDiff2Action(t, ksWorkflow, cells, "show", "all", false) + _, output = performVDiff2Action(t, false, ksWorkflow, cells, "show", "all", false) require.GreaterOrEqual(t, uuidCount(gjson.Get(output, "#.UUID").Array()), int64(3)) // And that our initial count is what we expect. require.Equal(t, initialVDiffCount, uuidCount(gjson.Get(output, "#.UUID").Array())) // Test show last with verbose too as a side effect. - uuid, output := performVDiff2Action(t, ksWorkflow, cells, "show", "last", false, "--verbose") + uuid, output := performVDiff2Action(t, false, ksWorkflow, cells, "show", "last", false, "--verbose") // The TableSummary is only present with --verbose. require.Contains(t, output, `"TableSummary":`) // Now let's delete one of the VDiffs. - _, output = performVDiff2Action(t, ksWorkflow, cells, "delete", uuid, false) + _, output = performVDiff2Action(t, false, ksWorkflow, cells, "delete", uuid, false) require.Equal(t, "completed", gjson.Get(output, "Status").String()) // And confirm that our unique VDiff count has only decreased by one. - _, output = performVDiff2Action(t, ksWorkflow, cells, "show", "all", false) + _, output = performVDiff2Action(t, false, ksWorkflow, cells, "show", "all", false) require.Equal(t, initialVDiffCount-1, uuidCount(gjson.Get(output, "#.UUID").Array())) // Now let's delete all of them. - _, output = performVDiff2Action(t, ksWorkflow, cells, "delete", "all", false) + _, output = performVDiff2Action(t, false, ksWorkflow, cells, "delete", "all", false) require.Equal(t, "completed", gjson.Get(output, "Status").String()) // And finally confirm that we have no more VDiffs. - _, output = performVDiff2Action(t, ksWorkflow, cells, "show", "all", false) + _, output = performVDiff2Action(t, false, ksWorkflow, cells, "show", "all", false) require.Equal(t, int64(0), gjson.Get(output, "#").Int()) }) } @@ -298,7 +305,7 @@ func testResume(t *testing.T, tc *testCase, cells string) { ksWorkflow := fmt.Sprintf("%s.%s", tc.targetKs, tc.workflow) // confirm the last VDiff is in the expected completed state - uuid, output := performVDiff2Action(t, ksWorkflow, cells, "show", "last", false) + uuid, output := performVDiff2Action(t, false, ksWorkflow, cells, "show", "last", false) jsonOutput := getVDiffInfo(output) require.Equal(t, "completed", jsonOutput.State) // save the number of rows compared in previous runs @@ -314,8 +321,8 @@ func testResume(t *testing.T, tc *testCase, cells string) { // confirm that the VDiff was resumed, able to complete, and we compared the // expected number of rows in total (original run and resume) - uuid, _ = performVDiff2Action(t, ksWorkflow, cells, "resume", uuid, false) - info := waitForVDiff2ToComplete(t, ksWorkflow, cells, uuid, ogTime) + _, _ = performVDiff2Action(t, false, ksWorkflow, cells, "resume", uuid, false) + info := waitForVDiff2ToComplete(t, false, ksWorkflow, cells, uuid, ogTime) require.False(t, info.HasMismatch) require.Equal(t, expectedRows, info.RowsCompared) }) @@ -324,10 +331,10 @@ func testResume(t *testing.T, tc *testCase, cells string) { func testStop(t *testing.T, ksWorkflow, cells string) { t.Run("Stop", func(t *testing.T) { // create a new VDiff and immediately stop it - uuid, _ := performVDiff2Action(t, ksWorkflow, cells, "create", "", false) - _, _ = performVDiff2Action(t, ksWorkflow, cells, "stop", uuid, false) + uuid, _ := performVDiff2Action(t, false, ksWorkflow, cells, "create", "", false) + _, _ = performVDiff2Action(t, false, ksWorkflow, cells, "stop", uuid, false) // confirm the VDiff is in the expected stopped state - _, output := performVDiff2Action(t, ksWorkflow, cells, "show", uuid, false) + _, output := performVDiff2Action(t, false, ksWorkflow, cells, "show", uuid, false) jsonOutput := getVDiffInfo(output) require.Equal(t, "stopped", jsonOutput.State) // confirm that the context cancelled error was also cleared @@ -340,7 +347,7 @@ func testAutoRetryError(t *testing.T, tc *testCase, cells string) { ksWorkflow := fmt.Sprintf("%s.%s", tc.targetKs, tc.workflow) // confirm the last VDiff is in the expected completed state - uuid, output := performVDiff2Action(t, ksWorkflow, cells, "show", "last", false) + uuid, output := performVDiff2Action(t, false, ksWorkflow, cells, "show", "last", false) jsonOutput := getVDiffInfo(output) require.Equal(t, "completed", jsonOutput.State) // save the number of rows compared in the first run @@ -359,7 +366,7 @@ func testAutoRetryError(t *testing.T, tc *testCase, cells string) { // update the VDiff to simulate an ephemeral error having occurred for _, shard := range strings.Split(tc.targetShards, ",") { tab := vc.getPrimaryTablet(t, tc.targetKs, shard) - res, err := tab.QueryTabletWithDB(sqlparser.BuildParsedQuery(sqlSimulateError, sidecarDBIdentifier, sidecarDBIdentifier, encodeString(uuid)).Query, topoproto.VtDbPrefix+tc.targetKs) + res, err := tab.QueryTabletWithDB(sqlparser.BuildParsedQuery(sqlSimulateError, sidecarDBIdentifier, sidecarDBIdentifier, encodeString(uuid)).Query, "vt_"+tc.targetKs) require.NoError(t, err) // should have updated the vdiff record and at least one vdiff_table record require.GreaterOrEqual(t, int(res.RowsAffected), 2) @@ -367,7 +374,7 @@ func testAutoRetryError(t *testing.T, tc *testCase, cells string) { // confirm that the VDiff was retried, able to complete, and we compared the expected // number of rows in total (original run and retry) - info := waitForVDiff2ToComplete(t, ksWorkflow, cells, uuid, ogTime) + info := waitForVDiff2ToComplete(t, false, ksWorkflow, cells, uuid, ogTime) require.False(t, info.HasMismatch) require.Equal(t, expectedRows, info.RowsCompared) }) @@ -377,7 +384,7 @@ func testCLICreateWait(t *testing.T, ksWorkflow string, cells string) { t.Run("vtctl create and wait", func(t *testing.T) { chCompleted := make(chan bool) go func() { - _, output := performVDiff2Action(t, ksWorkflow, cells, "create", "", false, "--wait", "--wait-update-interval=1s") + _, output := performVDiff2Action(t, false, ksWorkflow, cells, "create", "", false, "--wait", "--wait-update-interval=1s") completed := false // We don't try to parse the JSON output as it may contain a series of outputs // that together do not form a valid JSON document. We can change this in the diff --git a/go/test/endtoend/vreplication/vdiff_helper_test.go b/go/test/endtoend/vreplication/vdiff_helper_test.go index 15098325ce8..38ae9273a42 100644 --- a/go/test/endtoend/vreplication/vdiff_helper_test.go +++ b/go/test/endtoend/vreplication/vdiff_helper_test.go @@ -17,23 +17,17 @@ limitations under the License. package vreplication import ( - "encoding/json" "fmt" "strings" "testing" "time" - "vitess.io/vitess/go/vt/topo/topoproto" - "github.com/stretchr/testify/require" "github.com/tidwall/gjson" - "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/vt/log" vdiff2 "vitess.io/vitess/go/vt/vttablet/tabletmanager/vdiff" - "vitess.io/vitess/go/vt/wrangler" ) const ( @@ -44,50 +38,52 @@ var ( runVDiffsSideBySide = true ) -func vdiff(t *testing.T, keyspace, workflow, cells string, v1, v2 bool, wantV2Result *expectedVDiff2Result) { - ksWorkflow := fmt.Sprintf("%s.%s", keyspace, workflow) - if v1 { - doVDiff1(t, ksWorkflow, cells) +func vdiff(t *testing.T, keyspace, workflow, cells string, vtctlclient, vtctldclient bool, wantV2Result *expectedVDiff2Result) { + if vtctlclient { + doVtctlclientVDiff(t, keyspace, workflow, cells, wantV2Result) } - if v2 { - doVdiff2(t, keyspace, workflow, cells, wantV2Result) + if vtctldclient { + doVtctldclientVDiff(t, keyspace, workflow, cells, wantV2Result) } } -func vdiff1(t *testing.T, ksWorkflow, cells string) { - if !runVDiffsSideBySide { - doVDiff1(t, ksWorkflow, cells) - return - } +// vdiffSideBySide will run the VDiff command using both vtctlclient +// and vtctldclient. +func vdiffSideBySide(t *testing.T, ksWorkflow, cells string) { arr := strings.Split(ksWorkflow, ".") keyspace := arr[0] workflowName := arr[1] + if !runVDiffsSideBySide { + doVtctlclientVDiff(t, keyspace, workflowName, cells, nil) + return + } vdiff(t, keyspace, workflowName, cells, true, true, nil) } -func doVDiff1(t *testing.T, ksWorkflow, cells string) { - t.Run(fmt.Sprintf("vdiff1 %s", ksWorkflow), func(t *testing.T) { - output, err := vc.VtctlClient.ExecuteCommandWithOutput("VDiff", "--", "--v1", "--tablet_types=primary", "--source_cell="+cells, "--format", "json", ksWorkflow) - log.Infof("vdiff1 err: %+v, output: %+v", err, output) - require.NoError(t, err) - require.NotNil(t, output) - diffReports := make(map[string]*wrangler.DiffReport) - t.Logf("vdiff1 output: %s", output) - err = json.Unmarshal([]byte(output), &diffReports) - require.NoError(t, err) - if len(diffReports) < 1 { - t.Fatal("VDiff did not return a valid json response " + output + "\n") +func doVtctlclientVDiff(t *testing.T, keyspace, workflow, cells string, want *expectedVDiff2Result) { + ksWorkflow := fmt.Sprintf("%s.%s", keyspace, workflow) + t.Run(fmt.Sprintf("vtctlclient vdiff %s", ksWorkflow), func(t *testing.T) { + // update-table-stats is needed in order to test progress reports. + uuid, _ := performVDiff2Action(t, true, ksWorkflow, cells, "create", "", false, "--auto-retry", "--update-table-stats") + info := waitForVDiff2ToComplete(t, true, ksWorkflow, cells, uuid, time.Time{}) + require.Equal(t, workflow, info.Workflow) + require.Equal(t, keyspace, info.Keyspace) + if want != nil { + require.Equal(t, want.state, info.State) + require.Equal(t, strings.Join(want.shards, ","), info.Shards) + require.Equal(t, want.hasMismatch, info.HasMismatch) + } else { + require.Equal(t, "completed", info.State, "vdiff results: %+v", info) + require.False(t, info.HasMismatch, "vdiff results: %+v", info) } - require.True(t, len(diffReports) > 0) - for key, diffReport := range diffReports { - if diffReport.ProcessedRows != diffReport.MatchingRows { - require.Failf(t, "vdiff1 failed", "Table %d : %#v\n", key, diffReport) - } + if strings.Contains(t.Name(), "AcrossDBVersions") { + log.Errorf("VDiff resume cannot be guaranteed between major MySQL versions due to implied collation differences, skipping resume test...") + return } }) } -func waitForVDiff2ToComplete(t *testing.T, ksWorkflow, cells, uuid string, completedAtMin time.Time) *vdiffInfo { +func waitForVDiff2ToComplete(t *testing.T, useVtctlclient bool, ksWorkflow, cells, uuid string, completedAtMin time.Time) *vdiffInfo { var info *vdiffInfo first := true previousProgress := vdiff2.ProgressReport{} @@ -95,7 +91,7 @@ func waitForVDiff2ToComplete(t *testing.T, ksWorkflow, cells, uuid string, compl go func() { for { time.Sleep(1 * time.Second) - _, jsonStr := performVDiff2Action(t, ksWorkflow, cells, "show", uuid, false) + _, jsonStr := performVDiff2Action(t, useVtctlclient, ksWorkflow, cells, "show", uuid, false) info = getVDiffInfo(jsonStr) if info.State == "completed" { if !completedAtMin.IsZero() { @@ -113,12 +109,20 @@ func waitForVDiff2ToComplete(t *testing.T, ksWorkflow, cells, uuid string, compl // The timestamp format allows us to compare them lexicographically. // We don't test that the ETA always increases as it can decrease based on how // quickly we're doing work. - if info.Progress.ETA != "" { - // If we're operating at the second boundary then the ETA can be up - // to 1 second in the past due to using second based precision. - loc, _ := time.LoadLocation("UTC") - require.GreaterOrEqual(t, info.Progress.ETA, time.Now().Add(-time.Second).In(loc).Format(vdiff2.TimestampFormat)) - } + + // Commenting out this check for now as it is quite flaky in Github CI: we sometimes get a difference of + // more than 1s between the ETA and the current time, empirically seen 2s when it has failed, + // but presumably it can be higher. Keeping the code here for now in case we want to re-enable it. + + /* + if info.Progress.ETA != "" { + // If we're operating at the second boundary then the ETA can be up + // to 1 second in the past due to using second based precision. + loc, _ := time.LoadLocation("UTC") + require.GreaterOrEqual(t, info.Progress.ETA, time.Now().Add(-time.Second).In(loc).Format(vdiff2.TimestampFormat)) + } + */ + if !first { require.GreaterOrEqual(t, info.Progress.Percentage, previousProgress.Percentage) } @@ -143,12 +147,12 @@ type expectedVDiff2Result struct { hasMismatch bool } -func doVdiff2(t *testing.T, keyspace, workflow, cells string, want *expectedVDiff2Result) { +func doVtctldclientVDiff(t *testing.T, keyspace, workflow, cells string, want *expectedVDiff2Result) { ksWorkflow := fmt.Sprintf("%s.%s", keyspace, workflow) - t.Run(fmt.Sprintf("vdiff2 %s", ksWorkflow), func(t *testing.T) { + t.Run(fmt.Sprintf("vtctldclient vdiff %s", ksWorkflow), func(t *testing.T) { // update-table-stats is needed in order to test progress reports. - uuid, _ := performVDiff2Action(t, ksWorkflow, cells, "create", "", false, "--auto-retry", "--update-table-stats") - info := waitForVDiff2ToComplete(t, ksWorkflow, cells, uuid, time.Time{}) + uuid, _ := performVDiff2Action(t, false, ksWorkflow, cells, "create", "", false, "--auto-retry", "--update-table-stats") + info := waitForVDiff2ToComplete(t, false, ksWorkflow, cells, uuid, time.Time{}) require.Equal(t, workflow, info.Workflow) require.Equal(t, keyspace, info.Keyspace) @@ -167,23 +171,58 @@ func doVdiff2(t *testing.T, keyspace, workflow, cells string, want *expectedVDif }) } -func performVDiff2Action(t *testing.T, ksWorkflow, cells, action, actionArg string, expectError bool, extraFlags ...string) (uuid string, output string) { +func performVDiff2Action(t *testing.T, useVtctlclient bool, ksWorkflow, cells, action, actionArg string, expectError bool, extraFlags ...string) (uuid string, output string) { var err error - args := []string{"VDiff", "--", "--tablet_types=primary", "--source_cell=" + cells, "--format=json"} - if len(extraFlags) > 0 { - args = append(args, extraFlags...) - } - args = append(args, ksWorkflow, action, actionArg) - output, err = vc.VtctlClient.ExecuteCommandWithOutput(args...) - log.Infof("vdiff2 output: %+v (err: %+v)", output, err) - if !expectError { - require.Nil(t, err) - uuid = gjson.Get(output, "UUID").String() - if action != "delete" && !(action == "show" && actionArg == "all") { // a UUID is not required + targetKeyspace, workflowName, ok := strings.Cut(ksWorkflow, ".") + require.True(t, ok, "invalid keyspace.workflow value: %s", ksWorkflow) + + if useVtctlclient { + // This will always result in us using a PRIMARY tablet, which is all + // we start in many e2e tests, but it avoids the tablet picker logic + // where when you ONLY specify the PRIMARY type it then picks the + // shard's primary and ignores any cell settings. + args := []string{"VDiff", "--", "--tablet_types=in_order:primary,replica", "--source_cell=" + cells, "--format=json"} + if len(extraFlags) > 0 { + args = append(args, extraFlags...) + } + args = append(args, ksWorkflow, action, actionArg) + output, err = vc.VtctlClient.ExecuteCommandWithOutput(args...) + log.Infof("vdiff output: %+v (err: %+v)", output, err) + if !expectError { + require.Nil(t, err) + uuid = gjson.Get(output, "UUID").String() + if action != "delete" && !(action == "show" && actionArg == "all") { // A UUID is not required + require.NoError(t, err) + require.NotEmpty(t, uuid) + } + } + } else { + args := []string{"VDiff", "--target-keyspace", targetKeyspace, "--workflow", workflowName, "--format=json", action} + if strings.ToLower(action) == string(vdiff2.CreateAction) { + // This will always result in us using a PRIMARY tablet, which is all + // we start in many e2e tests, but it avoids the tablet picker logic + // where when you ONLY specify the PRIMARY type it then picks the + // shard's primary and ignores any cell settings. + args = append(args, "--tablet-types=primary,replica", "--tablet-types-in-preference-order", "--source-cells="+cells) + } + if len(extraFlags) > 0 { + args = append(args, extraFlags...) + } + if actionArg != "" { + args = append(args, actionArg) + } + output, err = vc.VtctldClient.ExecuteCommandWithOutput(args...) + log.Infof("vdiff output: %+v (err: %+v)", output, err) + if !expectError { require.NoError(t, err) - require.NotEmpty(t, uuid) + ouuid := gjson.Get(output, "UUID").String() + if action == "create" || (action == "show" && actionArg != "all") { // A UUID is returned + require.NotEmpty(t, ouuid) + uuid = ouuid + } } } + return uuid, output } @@ -218,30 +257,6 @@ func encodeString(in string) string { return buf.String() } -// updateTableStats runs ANALYZE TABLE on each table involved in the workflow. -// You should execute this if you leverage table information from e.g. -// information_schema.tables in your test. -func updateTableStats(t *testing.T, tablet *cluster.VttabletProcess, tables string) { - dbName := topoproto.VtDbPrefix + tablet.Keyspace - tableList := strings.Split(strings.TrimSpace(tables), ",") - if len(tableList) == 0 { - // we need to get all of the tables in the keyspace - res, err := tablet.QueryTabletWithDB("show tables", dbName) - require.NoError(t, err) - for _, row := range res.Rows { - tableList = append(tableList, row[0].String()) - } - } - for _, table := range tableList { - table = strings.TrimSpace(table) - if table != "" { - res, err := tablet.QueryTabletWithDB(fmt.Sprintf(sqlAnalyzeTable, sqlescape.EscapeID(table)), dbName) - require.NoError(t, err) - require.Equal(t, 1, len(res.Rows)) - } - } -} - // generateMoreCustomers creates additional test data for better tests // when needed. func generateMoreCustomers(t *testing.T, keyspace string, numCustomers int64) { diff --git a/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go b/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go new file mode 100644 index 00000000000..0f6a9f668d0 --- /dev/null +++ b/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go @@ -0,0 +1,135 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/tidwall/gjson" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/vt/log" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" +) + +func TestMultipleConcurrentVDiffs(t *testing.T) { + cellName := "zone" + cells := []string{cellName} + vc = NewVitessCluster(t, t.Name(), cells, mainClusterConfig) + + require.NotNil(t, vc) + allCellNames = cellName + defaultCellName := cellName + defaultCell = vc.Cells[defaultCellName] + sourceKeyspace := "product" + shardName := "0" + + defer vc.TearDown(t) + + cell := vc.Cells[cellName] + vc.AddKeyspace(t, []*Cell{cell}, sourceKeyspace, shardName, initialProductVSchema, initialProductSchema, 0, 0, 100, sourceKsOpts) + + vtgate = cell.Vtgates[0] + require.NotNil(t, vtgate) + err := cluster.WaitForHealthyShard(vc.VtctldClient, sourceKeyspace, shardName) + require.NoError(t, err) + vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", sourceKeyspace, shardName), 1, 30*time.Second) + + vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() + verifyClusterHealth(t, vc) + + insertInitialData(t) + targetTabletId := 200 + targetKeyspace := "customer" + vc.AddKeyspace(t, []*Cell{cell}, targetKeyspace, shardName, initialProductVSchema, initialProductSchema, 0, 0, targetTabletId, sourceKsOpts) + vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", targetKeyspace, shardName), 1, 30*time.Second) + + index := 1000 + var loadCtx context.Context + var loadCancel context.CancelFunc + loadCtx, loadCancel = context.WithCancel(context.Background()) + load := func(tableName string) { + query := "insert into %s(cid, name) values(%d, 'customer-%d')" + for { + select { + case <-loadCtx.Done(): + log.Infof("load cancelled") + return + default: + index += 1 + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + q := fmt.Sprintf(query, tableName, index, index) + vtgateConn.ExecuteFetch(q, 1000, false) + vtgateConn.Close() + } + time.Sleep(10 * time.Millisecond) + } + } + targetKs := vc.Cells[cellName].Keyspaces[targetKeyspace] + targetTab := targetKs.Shards["0"].Tablets[fmt.Sprintf("%s-%d", cellName, targetTabletId)].Vttablet + require.NotNil(t, targetTab) + + time.Sleep(15 * time.Second) // wait for some rows to be inserted. + + createWorkflow := func(workflowName, tables string) { + mt := newMoveTables(vc, &moveTables{ + workflowName: workflowName, + targetKeyspace: targetKeyspace, + sourceKeyspace: sourceKeyspace, + tables: tables, + }, moveTablesFlavorVtctld) + mt.Create() + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", targetKeyspace, workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) + catchup(t, targetTab, workflowName, "MoveTables") + } + + createWorkflow("wf1", "customer") + createWorkflow("wf2", "customer2") + + go load("customer") + go load("customer2") + + var wg sync.WaitGroup + wg.Add(2) + + doVdiff := func(workflowName, table string) { + defer wg.Done() + vdiff(t, targetKeyspace, workflowName, cellName, true, false, nil) + } + go doVdiff("wf1", "customer") + go doVdiff("wf2", "customer2") + wg.Wait() + loadCancel() + + // confirm that show all shows the correct workflow and only that workflow. + output, err := vc.VtctldClient.ExecuteCommandWithOutput("VDiff", "--format", "json", "--workflow", "wf1", "--target-keyspace", "customer", "show", "all") + require.NoError(t, err) + log.Infof("VDiff output: %s", output) + count := gjson.Get(output, "..#").Int() + wf := gjson.Get(output, "0.Workflow").String() + ksName := gjson.Get(output, "0.Keyspace").String() + require.Equal(t, int64(1), count) + require.Equal(t, "wf1", wf) + require.Equal(t, "customer", ksName) +} diff --git a/go/test/endtoend/vreplication/vreplication_test.go b/go/test/endtoend/vreplication/vreplication_test.go index 3bdb3eb1abb..88e3a49afc3 100644 --- a/go/test/endtoend/vreplication/vreplication_test.go +++ b/go/test/endtoend/vreplication/vreplication_test.go @@ -21,6 +21,7 @@ import ( "fmt" "io" "net/http" + "runtime" "strings" "sync" "testing" @@ -170,7 +171,7 @@ func TestVReplicationDDLHandling(t *testing.T) { _, err = vtgateConn.ExecuteFetch(addColDDL, 1, false) require.NoError(t, err, "error executing %q: %v", addColDDL, err) // Confirm workflow is still running fine - waitForWorkflowState(t, vc, ksWorkflow, "Running") + waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) // Confirm new col does not exist on target waitForQueryResult(t, vtgateConn, targetKs, checkColQueryTarget, "[[INT64(0)]]") // Confirm new col does exist on source @@ -200,7 +201,7 @@ func TestVReplicationDDLHandling(t *testing.T) { _, err = vtgateConn.ExecuteFetch(addColDDL, 1, false) require.NoError(t, err, "error executing %q: %v", addColDDL, err) // Confirm that the worfklow stopped because of the DDL - waitForWorkflowState(t, vc, ksWorkflow, "Stopped", fmt.Sprintf("Message==Stopped at DDL %s", addColDDL)) + waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Stopped.String(), fmt.Sprintf("Message==Stopped at DDL %s", addColDDL)) // Confirm that the target does not have new col waitForQueryResult(t, vtgateConn, targetKs, checkColQueryTarget, "[[INT64(0)]]") moveTablesAction(t, "Cancel", defaultCellName, workflow, sourceKs, targetKs, table) @@ -215,7 +216,7 @@ func TestVReplicationDDLHandling(t *testing.T) { _, err = vtgateConn.ExecuteFetch(dropColDDL, 1, false) require.NoError(t, err, "error executing %q: %v", dropColDDL, err) // Confirm workflow is still running fine - waitForWorkflowState(t, vc, ksWorkflow, "Running") + waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) // Confirm new col was dropped on target waitForQueryResult(t, vtgateConn, targetKs, checkColQueryTarget, "[[INT64(0)]]") moveTablesAction(t, "Cancel", defaultCellName, workflow, sourceKs, targetKs, table) @@ -269,9 +270,9 @@ func TestVreplicationCopyThrottling(t *testing.T) { // We need to force primary tablet types as the history list has been increased on the source primary // We use a small timeout and ignore errors as we don't expect the MoveTables to start here // because of the InnoDB History List length. - moveTablesActionWithTabletTypes(t, "Create", defaultCell.Name, workflow, sourceKs, targetKs, table, "primary", 5*time.Second, true) + moveTablesActionWithTabletTypes(t, "Create", defaultCell.Name, workflow, sourceKs, targetKs, table, "primary", true) // Wait for the copy phase to start - waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", targetKs, workflow), workflowStateCopying) + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", targetKs, workflow), binlogdatapb.VReplicationWorkflowState_Copying.String()) // The initial copy phase should be blocking on the history list confirmWorkflowHasCopiedNoData(t, targetKs, workflow) releaseInnoDBRowHistory(t, trxConn) @@ -327,7 +328,8 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string defer vtgateConn.Close() verifyClusterHealth(t, vc) insertInitialData(t) - materializeRollup(t) + materializeRollup(t, true) + shardCustomer(t, true, []*Cell{defaultCell}, defaultCellName, false) // the Lead and Lead-1 tables tested a specific case with binary sharding keys. Drop it now so that we don't @@ -341,11 +343,11 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string return } - materializeProduct(t) + materializeProduct(t, true) - materializeMerchantOrders(t) - materializeSales(t) - materializeMerchantSales(t) + materializeMerchantOrders(t, true) + materializeSales(t, true) + materializeMerchantSales(t, true) reshardMerchant2to3SplitMerge(t) reshardMerchant3to1Merge(t) @@ -366,6 +368,44 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string verifyCopyStateIsOptimized(t, tablet) } }) + + t.Run("Test LookupVindex", func(t *testing.T) { + // LookupVindex does not support noblob images. + if strings.ToLower(binlogRowImage) == "noblob" { + return + } + _, err = vtgateConn.ExecuteFetch("use customer", 1, false) + require.NoError(t, err, "error using customer keyspace: %v", err) + res, err := vtgateConn.ExecuteFetch("select count(*) from customer where name is not null", 1, false) + require.NoError(t, err, "error getting current row count in customer: %v", err) + require.Equal(t, 1, len(res.Rows), "expected 1 row in count(*) query, got %d", len(res.Rows)) + rows, _ := res.Rows[0][0].ToInt32() + // Insert a couple of rows with a NULL name to confirm that they + // are ignored. + insert := "insert into customer (cid, name, typ, sport, meta) values (100, NULL, 'soho', 'football','{}'), (101, NULL, 'enterprise','baseball','{}')" + _, err = vtgateConn.ExecuteFetch(insert, -1, false) + require.NoError(t, err, "error executing %q: %v", insert, err) + + vindexName := "customer_name_keyspace_id" + err = vc.VtctldClient.ExecuteCommand("LookupVindex", "--name", vindexName, "--table-keyspace=product", "create", "--keyspace=customer", + "--type=consistent_lookup", "--table-owner=customer", "--table-owner-columns=name,cid", "--ignore-nulls", "--tablet-types=PRIMARY") + require.NoError(t, err, "error executing LookupVindex create: %v", err) + waitForWorkflowState(t, vc, fmt.Sprintf("product.%s", vindexName), binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForRowCount(t, vtgateConn, "product", vindexName, int(rows)) + customerVSchema, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", "customer") + require.NoError(t, err, "error executing GetVSchema: %v", err) + vdx := gjson.Get(customerVSchema, fmt.Sprintf("vindexes.%s", vindexName)) + require.NotNil(t, vdx, "lookup vindex %s not found", vindexName) + require.Equal(t, "true", vdx.Get("params.write_only").String(), "expected write_only parameter to be true") + + err = vc.VtctldClient.ExecuteCommand("LookupVindex", "--name", vindexName, "--table-keyspace=product", "externalize", "--keyspace=customer") + require.NoError(t, err, "error executing LookupVindex externalize: %v", err) + customerVSchema, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", "customer") + require.NoError(t, err, "error executing GetVSchema: %v", err) + vdx = gjson.Get(customerVSchema, fmt.Sprintf("vindexes.%s", vindexName)) + require.NotNil(t, vdx, "lookup vindex %s not found", vindexName) + require.NotEqual(t, "true", vdx.Get("params.write_only").String(), "did not expect write_only parameter to be true") + }) } func testV2WorkflowsAcrossDBVersions(t *testing.T) { @@ -411,7 +451,7 @@ func TestMultiCellVreplicationWorkflow(t *testing.T) { verifyClusterHealth(t, vc) insertInitialData(t) shardCustomer(t, true, []*Cell{cell1, cell2}, cell2.Name, true) - checkIfDenyListExists(t, vc, "product:0", "customer") + isTableInDenyList(t, vc, "product:0", "customer") // we tag along this test so as not to create the overhead of creating another cluster testVStreamCellFlag(t) } @@ -705,7 +745,7 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl defaultCell := cells[0] custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"] - tables := "customer,Lead,Lead-1,db_order_test,geom_tbl,json_tbl,blüb_tbl,vdiff_order,reftable" + tables := "customer,loadtest,Lead,Lead-1,db_order_test,geom_tbl,json_tbl,blüb_tbl,vdiff_order,reftable" moveTablesAction(t, "Create", sourceCellOrAlias, workflow, sourceKs, targetKs, tables) customerTab1 := custKs.Shards["-80"].Tablets["zone1-200"].Vttablet @@ -725,6 +765,8 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl dec80Replicated := false execVtgateQuery(t, vtgateConn, sourceKs, "update customer set dec80 = 0") execVtgateQuery(t, vtgateConn, sourceKs, "update customer set blb = \"new blob data\" where cid=3") + execVtgateQuery(t, vtgateConn, sourceKs, "update json_tbl set j1 = null, j2 = 'null', j3 = '\"null\"'") + execVtgateQuery(t, vtgateConn, sourceKs, "insert into json_tbl(id, j1, j2, j3) values (7, null, 'null', '\"null\"')") waitForNoWorkflowLag(t, vc, targetKs, workflow) for _, shard := range []string{"-80", "80-"} { shardTarget := fmt.Sprintf("%s:%s", targetKs, shard) @@ -761,26 +803,29 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl }) query := "select cid from customer" - require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, productTab, "product", query, query)) + assertQueryExecutesOnTablet(t, vtgateConn, productTab, "product", query, query) insertQuery1 := "insert into customer(cid, name) values(1001, 'tempCustomer1')" matchInsertQuery1 := "insert into customer(cid, `name`) values (:vtg1 /* INT64 */, :vtg2 /* VARCHAR */)" - require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, productTab, "product", insertQuery1, matchInsertQuery1)) - - // confirm that the backticking of table names in the routing rules works - tbls := []string{"Lead", "Lead-1"} - for _, tbl := range tbls { - output, err := osExec(t, "mysql", []string{"-u", "vtdba", "-P", fmt.Sprintf("%d", vc.ClusterConfig.vtgateMySQLPort), - "--host=127.0.0.1", "--default-character-set=utf8mb4", "-e", fmt.Sprintf("select * from `%s`", tbl)}) - if err != nil { - require.FailNow(t, output) + assertQueryExecutesOnTablet(t, vtgateConn, productTab, "product", insertQuery1, matchInsertQuery1) + + // FIXME for some reason, these inserts fails on mac, need to investigate, some + // vreplication bug because of case insensitiveness of table names on mac? + if runtime.GOOS == "linux" { + // Confirm that the backticking of table names in the routing rules works. + tbls := []string{"Lead", "Lead-1"} + for _, tbl := range tbls { + output, err := osExec(t, "mysql", []string{"-u", "vtdba", "-P", fmt.Sprintf("%d", vc.ClusterConfig.vtgateMySQLPort), + "--host=127.0.0.1", "--default-character-set=utf8mb4", "-e", fmt.Sprintf("select * from `%s`", tbl)}) + if err != nil { + require.FailNow(t, output) + } + execVtgateQuery(t, vtgateConn, "product", fmt.Sprintf("update `%s` set name='xyz'", tbl)) } - execVtgateQuery(t, vtgateConn, "product", fmt.Sprintf("update `%s` set name='xyz'", tbl)) } - - vdiff1(t, ksWorkflow, "") + vdiffSideBySide(t, ksWorkflow, "") switchReadsDryRun(t, workflowType, allCellNames, ksWorkflow, dryRunResultsReadCustomerShard) switchReads(t, workflowType, allCellNames, ksWorkflow, false) - require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, productTab, "customer", query, query)) + assertQueryExecutesOnTablet(t, vtgateConn, productTab, "customer", query, query) var commit func(t *testing.T) if withOpenTx { @@ -788,15 +833,22 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl } switchWritesDryRun(t, workflowType, ksWorkflow, dryRunResultsSwitchWritesCustomerShard) switchWrites(t, workflowType, ksWorkflow, false) + checkThatVDiffFails(t, targetKs, workflow) + // The original unsharded customer data included an insert with the + // vindex column (cid) of 999999, so the backing sequence table should + // now have a next_id of 1000000 after SwitchTraffic. + res := execVtgateQuery(t, vtgateConn, sourceKs, "select next_id from customer_seq where id = 0") + require.Equal(t, "1000000", res.Rows[0][0].ToString()) + if withOpenTx && commit != nil { commit(t) } catchup(t, productTab, workflow, "MoveTables") - vdiff1(t, "product.p2c_reverse", "") + vdiffSideBySide(t, "product.p2c_reverse", "") if withOpenTx { execVtgateQuery(t, vtgateConn, "", deleteOpenTxQuery) } @@ -804,14 +856,14 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl ksShards := []string{"product/0", "customer/-80", "customer/80-"} printShardPositions(vc, ksShards) insertQuery2 := "insert into customer(name, cid) values('tempCustomer2', 100)" - matchInsertQuery2 := "insert into customer(`name`, cid) values (:vtg1 /* VARCHAR */, :_cid0)" - require.False(t, validateThatQueryExecutesOnTablet(t, vtgateConn, productTab, "customer", insertQuery2, matchInsertQuery2)) + matchInsertQuery2 := "insert into customer(`name`, cid) values (:vtg1 /* VARCHAR */, :_cid_0)" + assertQueryDoesNotExecutesOnTablet(t, vtgateConn, productTab, "customer", insertQuery2, matchInsertQuery2) insertQuery2 = "insert into customer(name, cid) values('tempCustomer3', 101)" // ID 101, hence due to reverse_bits in shard 80- - require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, customerTab2, "customer", insertQuery2, matchInsertQuery2)) + assertQueryExecutesOnTablet(t, vtgateConn, customerTab2, "customer", insertQuery2, matchInsertQuery2) insertQuery2 = "insert into customer(name, cid) values('tempCustomer4', 102)" // ID 102, hence due to reverse_bits in shard -80 - require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, customerTab1, "customer", insertQuery2, matchInsertQuery2)) + assertQueryExecutesOnTablet(t, vtgateConn, customerTab1, "customer", insertQuery2, matchInsertQuery2) execVtgateQuery(t, vtgateConn, "customer", "update customer set meta = convert(x'7b7d' using utf8mb4) where cid = 1") if testReverse { @@ -826,12 +878,12 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl require.Contains(t, output, "'customer.bmd5'") insertQuery1 = "insert into customer(cid, name) values(1002, 'tempCustomer5')" - require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, productTab, "product", insertQuery1, matchInsertQuery1)) + assertQueryExecutesOnTablet(t, vtgateConn, productTab, "product", insertQuery1, matchInsertQuery1) // both inserts go into 80-, this tests the edge-case where a stream (-80) has no relevant new events after the previous switch insertQuery1 = "insert into customer(cid, name) values(1003, 'tempCustomer6')" - require.False(t, validateThatQueryExecutesOnTablet(t, vtgateConn, customerTab1, "customer", insertQuery1, matchInsertQuery1)) + assertQueryDoesNotExecutesOnTablet(t, vtgateConn, customerTab1, "customer", insertQuery1, matchInsertQuery1) insertQuery1 = "insert into customer(cid, name) values(1004, 'tempCustomer7')" - require.False(t, validateThatQueryExecutesOnTablet(t, vtgateConn, customerTab2, "customer", insertQuery1, matchInsertQuery1)) + assertQueryDoesNotExecutesOnTablet(t, vtgateConn, customerTab2, "customer", insertQuery1, matchInsertQuery1) waitForNoWorkflowLag(t, vc, targetKs, workflow) @@ -840,13 +892,13 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl switchWrites(t, workflowType, ksWorkflow, false) var exists bool - exists, err = checkIfDenyListExists(t, vc, "product:0", "customer") + exists, err = isTableInDenyList(t, vc, "product:0", "customer") require.NoError(t, err, "Error getting denylist for customer:0") require.True(t, exists) moveTablesAction(t, "Complete", allCellNames, workflow, sourceKs, targetKs, tables) - exists, err = checkIfDenyListExists(t, vc, "product:0", "customer") + exists, err = isTableInDenyList(t, vc, "product:0", "customer") require.NoError(t, err, "Error getting denylist for customer:0") require.False(t, exists) @@ -866,11 +918,11 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl require.True(t, found) insertQuery2 = "insert into customer(name, cid) values('tempCustomer8', 103)" // ID 103, hence due to reverse_bits in shard 80- - require.False(t, validateThatQueryExecutesOnTablet(t, vtgateConn, productTab, "customer", insertQuery2, matchInsertQuery2)) + assertQueryDoesNotExecutesOnTablet(t, vtgateConn, productTab, "customer", insertQuery2, matchInsertQuery2) insertQuery2 = "insert into customer(name, cid) values('tempCustomer10', 104)" // ID 105, hence due to reverse_bits in shard -80 - require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, customerTab1, "customer", insertQuery2, matchInsertQuery2)) + assertQueryExecutesOnTablet(t, vtgateConn, customerTab1, "customer", insertQuery2, matchInsertQuery2) insertQuery2 = "insert into customer(name, cid) values('tempCustomer9', 105)" // ID 104, hence due to reverse_bits in shard 80- - require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, customerTab2, "customer", insertQuery2, matchInsertQuery2)) + assertQueryExecutesOnTablet(t, vtgateConn, customerTab2, "customer", insertQuery2, matchInsertQuery2) execVtgateQuery(t, vtgateConn, "customer", "delete from customer where name like 'tempCustomer%'") waitForRowCountInTablet(t, customerTab1, "customer", "customer", 1) @@ -1013,11 +1065,8 @@ func reshard(t *testing.T, ksName string, tableName string, workflow string, sou autoIncrementStep, autoIncrementStep) tablet.QueryTablet(autoIncrementSetQuery, "", false) } - workflowType := "Reshard" - if err := vc.VtctlClient.ExecuteCommand(workflowType, "--", "--source_shards="+sourceShards, "--target_shards="+targetShards, - "--cells="+sourceCellOrAlias, "--tablet_types=replica,primary", "Create", ksWorkflow); err != nil { - t.Fatalf("Reshard Create command failed with %+v\n", err) - } + reshardAction(t, "Create", workflow, ksName, sourceShards, targetShards, sourceCellOrAlias, "replica,primary") + targetShards = "," + targetShards + "," for _, tab := range tablets { if strings.Contains(targetShards, ","+tab.Shard+",") { @@ -1028,19 +1077,17 @@ func reshard(t *testing.T, ksName string, tableName string, workflow string, sou continue } } - vdiff1(t, ksWorkflow, "") + restartWorkflow(t, ksWorkflow) + vdiffSideBySide(t, ksWorkflow, "") if dryRunResultSwitchReads != nil { - switchReadsDryRun(t, workflowType, allCellNames, ksWorkflow, dryRunResultSwitchReads) + reshardAction(t, "SwitchTraffic", workflow, ksName, "", "", allCellNames, "rdonly,replica", "--dry-run") } - switchReads(t, workflowType, allCellNames, ksWorkflow, false) + reshardAction(t, "SwitchTraffic", workflow, ksName, "", "", allCellNames, "rdonly,replica") if dryRunResultSwitchWrites != nil { - switchWritesDryRun(t, workflowType, ksWorkflow, dryRunResultSwitchWrites) - } - switchWrites(t, workflowType, ksWorkflow, false) - if err := vc.VtctlClient.ExecuteCommand(workflowType, "--", "--source_shards="+sourceShards, "--target_shards="+targetShards, - "--cells="+sourceCellOrAlias, "--tablet_types=replica,primary", "Complete", ksWorkflow); err != nil { - t.Fatalf("Reshard Complete command failed with %+v\n", err) + reshardAction(t, "SwitchTraffic", workflow, ksName, "", "", allCellNames, "primary", "--dry-run") } + reshardAction(t, "SwitchTraffic", workflow, ksName, "", "", allCellNames, "primary") + reshardAction(t, "Complete", workflow, ksName, "", "", "", "") for tabletName, count := range counts { if tablets[tabletName] == nil { continue @@ -1067,7 +1114,7 @@ func shardOrders(t *testing.T) { workflowType := "MoveTables" catchup(t, customerTab1, workflow, workflowType) catchup(t, customerTab2, workflow, workflowType) - vdiff1(t, ksWorkflow, "") + vdiffSideBySide(t, ksWorkflow, "") switchReads(t, workflowType, allCellNames, ksWorkflow, false) switchWrites(t, workflowType, ksWorkflow, false) moveTablesAction(t, "Complete", cell, workflow, sourceKs, targetKs, tables) @@ -1079,7 +1126,7 @@ func shardOrders(t *testing.T) { func checkThatVDiffFails(t *testing.T, keyspace, workflow string) { ksWorkflow := fmt.Sprintf("%s.%s", keyspace, workflow) - t.Run("check that vdiff1 won't run", func(t2 *testing.T) { + t.Run("check that vdiffSideBySide won't run", func(t2 *testing.T) { output, err := vc.VtctlClient.ExecuteCommandWithOutput("VDiff", "--", "--v1", ksWorkflow) require.Error(t, err) require.Contains(t, output, "invalid VDiff run") @@ -1115,7 +1162,7 @@ func shardMerchant(t *testing.T) { catchup(t, merchantTab1, workflow, workflowType) catchup(t, merchantTab2, workflow, workflowType) - vdiff1(t, fmt.Sprintf("%s.%s", merchantKeyspace, workflow), "") + vdiffSideBySide(t, fmt.Sprintf("%s.%s", merchantKeyspace, workflow), "") switchReads(t, workflowType, allCellNames, ksWorkflow, false) switchWrites(t, workflowType, ksWorkflow, false) printRoutingRules(t, vc, "After merchant movetables") @@ -1134,20 +1181,43 @@ func shardMerchant(t *testing.T) { }) } -func materialize(t *testing.T, spec string) { - t.Run("materialize", func(t *testing.T) { - err := vc.VtctlClient.ExecuteCommand("Materialize", spec) - require.NoError(t, err, "Materialize") - }) +func materialize(t *testing.T, spec string, useVtctldClient bool) { + if useVtctldClient { + t.Run("vtctldclient materialize", func(t *testing.T) { + // Split out the parameters from the JSON spec for + // use in the vtctldclient command flags. + // This allows us to test both clients with the same + // input. + sj := gjson.Parse(spec) + workflow := sj.Get("workflow").String() + require.NotEmpty(t, workflow, "workflow not found in spec: %s", spec) + sourceKeyspace := sj.Get("source_keyspace").String() + require.NotEmpty(t, sourceKeyspace, "source_keyspace not found in spec: %s", spec) + targetKeyspace := sj.Get("target_keyspace").String() + require.NotEmpty(t, targetKeyspace, "target_keyspace not found in spec: %s", spec) + tableSettings := sj.Get("table_settings").String() + require.NotEmpty(t, tableSettings, "table_settings not found in spec: %s", spec) + stopAfterCopy := sj.Get("stop-after-copy").Bool() // Optional + err := vc.VtctldClient.ExecuteCommand("materialize", "--workflow", workflow, "--target-keyspace", targetKeyspace, + "create", "--source-keyspace", sourceKeyspace, "--table-settings", tableSettings, + fmt.Sprintf("--stop-after-copy=%t", stopAfterCopy)) + require.NoError(t, err, "Materialize") + }) + } else { + t.Run("materialize", func(t *testing.T) { + err := vc.VtctlClient.ExecuteCommand("Materialize", spec) + require.NoError(t, err, "Materialize") + }) + } } -func materializeProduct(t *testing.T) { +func materializeProduct(t *testing.T, useVtctldClient bool) { t.Run("materializeProduct", func(t *testing.T) { // materializing from "product" keyspace to "customer" keyspace workflow := "cproduct" keyspace := "customer" applyVSchema(t, materializeProductVSchema, keyspace) - materialize(t, materializeProductSpec) + materialize(t, materializeProductSpec, useVtctldClient) customerTablets := vc.getVttabletsInKeyspace(t, defaultCell, keyspace, "primary") for _, tab := range customerTablets { catchup(t, tab, workflow, "Materialize") @@ -1222,13 +1292,13 @@ func materializeProduct(t *testing.T) { }) } -func materializeRollup(t *testing.T) { +func materializeRollup(t *testing.T, useVtctldClient bool) { t.Run("materializeRollup", func(t *testing.T) { keyspace := "product" workflow := "rollup" applyVSchema(t, materializeSalesVSchema, keyspace) productTab := vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-100"].Vttablet - materialize(t, materializeRollupSpec) + materialize(t, materializeRollupSpec, useVtctldClient) catchup(t, productTab, workflow, "Materialize") waitForRowCount(t, vtgateConn, "product", "rollup", 1) waitForQueryResult(t, vtgateConn, "product:0", "select rollupname, kount from rollup", @@ -1236,11 +1306,11 @@ func materializeRollup(t *testing.T) { }) } -func materializeSales(t *testing.T) { +func materializeSales(t *testing.T, useVtctldClient bool) { t.Run("materializeSales", func(t *testing.T) { keyspace := "product" applyVSchema(t, materializeSalesVSchema, keyspace) - materialize(t, materializeSalesSpec) + materialize(t, materializeSalesSpec, useVtctldClient) productTab := vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-100"].Vttablet catchup(t, productTab, "sales", "Materialize") waitForRowCount(t, vtgateConn, "product", "sales", 2) @@ -1249,10 +1319,10 @@ func materializeSales(t *testing.T) { }) } -func materializeMerchantSales(t *testing.T) { +func materializeMerchantSales(t *testing.T, useVtctldClient bool) { t.Run("materializeMerchantSales", func(t *testing.T) { workflow := "msales" - materialize(t, materializeMerchantSalesSpec) + materialize(t, materializeMerchantSalesSpec, useVtctldClient) merchantTablets := vc.getVttabletsInKeyspace(t, defaultCell, merchantKeyspace, "primary") for _, tab := range merchantTablets { catchup(t, tab, workflow, "Materialize") @@ -1263,12 +1333,12 @@ func materializeMerchantSales(t *testing.T) { }) } -func materializeMerchantOrders(t *testing.T) { +func materializeMerchantOrders(t *testing.T, useVtctldClient bool) { t.Run("materializeMerchantOrders", func(t *testing.T) { workflow := "morders" keyspace := merchantKeyspace applyVSchema(t, merchantOrdersVSchema, keyspace) - materialize(t, materializeMerchantOrdersSpec) + materialize(t, materializeMerchantOrdersSpec, useVtctldClient) merchantTablets := vc.getVttabletsInKeyspace(t, defaultCell, merchantKeyspace, "primary") for _, tab := range merchantTablets { catchup(t, tab, workflow, "Materialize") @@ -1283,7 +1353,7 @@ func checkVtgateHealth(t *testing.T, cell *Cell) { for _, vtgate := range cell.Vtgates { vtgateHealthURL := strings.Replace(vtgate.VerifyURL, "vars", "health", -1) if !checkHealth(t, vtgateHealthURL) { - assert.Failf(t, "Vtgate not healthy: ", vtgateHealthURL) + assert.Fail(t, "Vtgate not healthy: ", vtgateHealthURL) } } } @@ -1291,7 +1361,7 @@ func checkVtgateHealth(t *testing.T, cell *Cell) { func checkTabletHealth(t *testing.T, tablet *Tablet) { vttabletHealthURL := strings.Replace(tablet.Vttablet.VerifyURL, "debug/vars", "healthz", -1) if !checkHealth(t, vttabletHealthURL) { - assert.Failf(t, "Vttablet not healthy: ", vttabletHealthURL) + assert.Fail(t, "Vttablet not healthy: ", vttabletHealthURL) } } @@ -1354,27 +1424,62 @@ func catchup(t *testing.T, vttablet *cluster.VttabletProcess, workflow, info str func moveTablesAction(t *testing.T, action, cell, workflow, sourceKs, targetKs, tables string, extraFlags ...string) { var err error - if len(extraFlags) > 0 { - err = vc.VtctlClient.ExecuteCommand("MoveTables", "--", "--source="+sourceKs, "--tables="+tables, - "--cells="+cell, "--tablet_types=primary,replica,rdonly", strings.Join(extraFlags, " "), - action, fmt.Sprintf("%s.%s", targetKs, workflow)) - } else { - err = vc.VtctlClient.ExecuteCommand("MoveTables", "--", "--source="+sourceKs, "--tables="+tables, "--cells="+cell, - "--tablet_types=primary,replica,rdonly", action, fmt.Sprintf("%s.%s", targetKs, workflow)) + args := []string{"MoveTables", "--workflow=" + workflow, "--target-keyspace=" + targetKs, action} + switch strings.ToLower(action) { + case strings.ToLower(workflowActionCreate): + extraFlags = append(extraFlags, "--source-keyspace="+sourceKs, "--tables="+tables, "--cells="+cell, "--tablet-types=primary,replica,rdonly") + case strings.ToLower(workflowActionSwitchTraffic): + extraFlags = append(extraFlags, "--initialize-target-sequences") + } + args = append(args, extraFlags...) + output, err := vc.VtctldClient.ExecuteCommandWithOutput(args...) + if output != "" { + fmt.Printf("Output of vtctldclient MoveTables %s for %s workflow:\n++++++\n%s\n--------\n", + action, workflow, output) } if err != nil { t.Fatalf("MoveTables %s command failed with %+v\n", action, err) } } -func moveTablesActionWithTabletTypes(t *testing.T, action, cell, workflow, sourceKs, targetKs, tables string, tabletTypes string, timeout time.Duration, ignoreErrors bool) { - if err := vc.VtctlClient.ExecuteCommand("MoveTables", "--", "--source="+sourceKs, "--tables="+tables, "--cells="+cell, - "--tablet_types="+tabletTypes, "--timeout="+timeout.String(), action, fmt.Sprintf("%s.%s", targetKs, workflow)); err != nil { +func moveTablesActionWithTabletTypes(t *testing.T, action, cell, workflow, sourceKs, targetKs, tables string, tabletTypes string, ignoreErrors bool) { + if err := vc.VtctldClient.ExecuteCommand("MoveTables", "--workflow="+workflow, "--target-keyspace="+targetKs, action, + "--source-keyspace="+sourceKs, "--tables="+tables, "--cells="+cell, "--tablet-types="+tabletTypes); err != nil { if !ignoreErrors { t.Fatalf("MoveTables %s command failed with %+v\n", action, err) } } } +// reshardAction is a helper function to run the reshard command and +// action using vtctldclient. +func reshardAction(t *testing.T, action, workflow, keyspaceName, sourceShards, targetShards, cell, tabletTypes string, extraFlags ...string) { + var err error + args := []string{"Reshard", "--workflow=" + workflow, "--target-keyspace=" + keyspaceName, action} + + switch strings.ToLower(action) { + case strings.ToLower(workflowActionCreate): + if tabletTypes == "" { + tabletTypes = "replica,rdonly,primary" + } + args = append(args, "--source-shards="+sourceShards, "--target-shards="+targetShards) + } + if cell != "" { + args = append(args, "--cells="+cell) + } + if tabletTypes != "" { + args = append(args, "--tablet-types="+tabletTypes) + } + args = append(args, extraFlags...) + output, err := vc.VtctldClient.ExecuteCommandWithOutput(args...) + if output != "" { + log.Infof("Output of vtctldclient Reshard %s for %s workflow:\n++++++\n%s\n--------\n", + action, workflow, output) + } + if err != nil { + t.Fatalf("Reshard %s command failed with %+v\n", action, err) + } +} + func applyVSchema(t *testing.T, vschema, keyspace string) { err := vc.VtctlClient.ExecuteCommand("ApplyVSchema", "--", "--vschema", vschema, keyspace) require.NoError(t, err) @@ -1386,6 +1491,7 @@ func switchReadsDryRun(t *testing.T, workflowType, cells, ksWorkflow string, dry require.FailNowf(t, "Invalid workflow type for SwitchTraffic, must be MoveTables or Reshard", "workflow type specified: %s", workflowType) } + ensureCanSwitch(t, workflowType, cells, ksWorkflow) output, err := vc.VtctlClient.ExecuteCommandWithOutput(workflowType, "--", "--cells="+cells, "--tablet_types=rdonly,replica", "--dry_run", "SwitchTraffic", ksWorkflow) require.NoError(t, err, fmt.Sprintf("Switching Reads DryRun Error: %s: %s", err, output)) @@ -1394,9 +1500,26 @@ func switchReadsDryRun(t *testing.T, workflowType, cells, ksWorkflow string, dry } } +func ensureCanSwitch(t *testing.T, workflowType, cells, ksWorkflow string) { + timer := time.NewTimer(defaultTimeout) + defer timer.Stop() + for { + _, err := vc.VtctlClient.ExecuteCommandWithOutput(workflowType, "--", "--cells="+cells, "--dry_run", "SwitchTraffic", ksWorkflow) + if err == nil { + return + } + select { + case <-timer.C: + t.Fatalf("Did not become ready to switch traffic for %s before the timeout of %s", ksWorkflow, defaultTimeout) + default: + time.Sleep(defaultTick) + } + } +} + func switchReads(t *testing.T, workflowType, cells, ksWorkflow string, reverse bool) { - if workflowType != binlogdatapb.VReplicationWorkflowType_name[int32(binlogdatapb.VReplicationWorkflowType_MoveTables)] && - workflowType != binlogdatapb.VReplicationWorkflowType_name[int32(binlogdatapb.VReplicationWorkflowType_Reshard)] { + if workflowType != binlogdatapb.VReplicationWorkflowType_MoveTables.String() && + workflowType != binlogdatapb.VReplicationWorkflowType_Reshard.String() { require.FailNowf(t, "Invalid workflow type for SwitchTraffic, must be MoveTables or Reshard", "workflow type specified: %s", workflowType) } @@ -1406,6 +1529,7 @@ func switchReads(t *testing.T, workflowType, cells, ksWorkflow string, reverse b if reverse { command = "ReverseTraffic" } + ensureCanSwitch(t, workflowType, cells, ksWorkflow) output, err = vc.VtctlClient.ExecuteCommandWithOutput(workflowType, "--", "--cells="+cells, "--tablet_types=rdonly", command, ksWorkflow) require.NoError(t, err, fmt.Sprintf("%s Error: %s: %s", command, err, output)) @@ -1414,6 +1538,35 @@ func switchReads(t *testing.T, workflowType, cells, ksWorkflow string, reverse b require.NoError(t, err, fmt.Sprintf("%s Error: %s: %s", command, err, output)) } +func switchWrites(t *testing.T, workflowType, ksWorkflow string, reverse bool) { + if workflowType != binlogdatapb.VReplicationWorkflowType_MoveTables.String() && + workflowType != binlogdatapb.VReplicationWorkflowType_Reshard.String() { + require.FailNowf(t, "Invalid workflow type for SwitchTraffic, must be MoveTables or Reshard", + "workflow type specified: %s", workflowType) + } + command := "SwitchTraffic" + if reverse { + command = "ReverseTraffic" + } + const SwitchWritesTimeout = "91s" // max: 3 tablet picker 30s waits + 1 + ensureCanSwitch(t, workflowType, "", ksWorkflow) + // Use vtctldclient for MoveTables SwitchTraffic ~ 50% of the time. + if workflowType == binlogdatapb.VReplicationWorkflowType_MoveTables.String() && time.Now().Second()%2 == 0 { + parts := strings.Split(ksWorkflow, ".") + require.Equal(t, 2, len(parts)) + moveTablesAction(t, command, defaultCellName, parts[1], sourceKs, parts[0], "", "--timeout="+SwitchWritesTimeout, "--tablet-types=primary") + return + } + output, err := vc.VtctlClient.ExecuteCommandWithOutput(workflowType, "--", "--tablet_types=primary", + "--timeout="+SwitchWritesTimeout, "--initialize-target-sequences", command, ksWorkflow) + if output != "" { + fmt.Printf("Output of switching writes with vtctlclient for %s:\n++++++\n%s\n--------\n", ksWorkflow, output) + } + // printSwitchWritesExtraDebug is useful when debugging failures in Switch writes due to corner cases/races + _ = printSwitchWritesExtraDebug + require.NoError(t, err, fmt.Sprintf("Switch writes Error: %s: %s", err, output)) +} + func switchWritesDryRun(t *testing.T, workflowType, ksWorkflow string, dryRunResults []string) { if workflowType != binlogdatapb.VReplicationWorkflowType_name[int32(binlogdatapb.VReplicationWorkflowType_MoveTables)] && workflowType != binlogdatapb.VReplicationWorkflowType_name[int32(binlogdatapb.VReplicationWorkflowType_Reshard)] { @@ -1426,6 +1579,19 @@ func switchWritesDryRun(t *testing.T, workflowType, ksWorkflow string, dryRunRes validateDryRunResults(t, output, dryRunResults) } +// restartWorkflow confirms that a workflow can be successfully +// stopped and started. +func restartWorkflow(t *testing.T, ksWorkflow string) { + keyspace, workflow, found := strings.Cut(ksWorkflow, ".") + require.True(t, found, "unexpected ksWorkflow value: %s", ksWorkflow) + err := vc.VtctldClient.ExecuteCommand("workflow", "--keyspace", keyspace, "stop", "--workflow", workflow) + require.NoError(t, err, "failed to stop workflow: %v", err) + waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Stopped.String()) + err = vc.VtctldClient.ExecuteCommand("workflow", "--keyspace", keyspace, "start", "--workflow", workflow) + require.NoError(t, err, "failed to start workflow: %v", err) + waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) +} + func printSwitchWritesExtraDebug(t *testing.T, ksWorkflow, msg string) { // Temporary code: print lots of info for debugging occasional flaky failures in customer reshard in CI for multicell test debug := true @@ -1456,27 +1622,6 @@ func printSwitchWritesExtraDebug(t *testing.T, ksWorkflow, msg string) { } } -func switchWrites(t *testing.T, workflowType, ksWorkflow string, reverse bool) { - if workflowType != binlogdatapb.VReplicationWorkflowType_name[int32(binlogdatapb.VReplicationWorkflowType_MoveTables)] && - workflowType != binlogdatapb.VReplicationWorkflowType_name[int32(binlogdatapb.VReplicationWorkflowType_Reshard)] { - require.FailNowf(t, "Invalid workflow type for SwitchTraffic, must be MoveTables or Reshard", - "workflow type specified: %s", workflowType) - } - command := "SwitchTraffic" - if reverse { - command = "ReverseTraffic" - } - const SwitchWritesTimeout = "91s" // max: 3 tablet picker 30s waits + 1 - output, err := vc.VtctlClient.ExecuteCommandWithOutput(workflowType, "--", "--tablet_types=primary", - "--timeout="+SwitchWritesTimeout, command, ksWorkflow) - if output != "" { - fmt.Printf("Output of switching writes for %s:\n++++++\n%s\n--------\n", ksWorkflow, output) - } - // printSwitchWritesExtraDebug is useful when debugging failures in Switch writes due to corner cases/races - _ = printSwitchWritesExtraDebug - require.NoError(t, err, fmt.Sprintf("Switch writes Error: %s: %s", err, output)) -} - // generateInnoDBRowHistory generates at least maxSourceTrxHistory rollback segment entries. // This allows us to confirm two behaviors: // 1. MoveTables blocks on starting its first copy phase until we rollback @@ -1513,6 +1658,7 @@ func generateInnoDBRowHistory(t *testing.T, sourceKS string, neededTrxHistory in // expected length. func waitForInnoDBHistoryLength(t *testing.T, tablet *cluster.VttabletProcess, expectedLength int64) { timer := time.NewTimer(defaultTimeout) + defer timer.Stop() historyLen := int64(0) for { res, err := tablet.QueryTablet(historyLenQuery, tablet.Keyspace, false) diff --git a/go/test/endtoend/vreplication/vreplication_test_env.go b/go/test/endtoend/vreplication/vreplication_test_env.go index f10aa47ac38..ede63fd2e47 100644 --- a/go/test/endtoend/vreplication/vreplication_test_env.go +++ b/go/test/endtoend/vreplication/vreplication_test_env.go @@ -19,14 +19,14 @@ package vreplication var dryRunResultsSwitchWritesCustomerShard = []string{ "Lock keyspace product", "Lock keyspace customer", - "Stop writes on keyspace product, tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,reftable,vdiff_order]:", + "Stop writes on keyspace product, tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,loadtest,reftable,vdiff_order]:", "/ Keyspace product, Shard 0 at Position", "Wait for VReplication on stopped streams to catchup for up to 30s", "Create reverse replication workflow p2c_reverse", "Create journal entries on source databases", - "Enable writes on keyspace customer tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,reftable,vdiff_order]", + "Enable writes on keyspace customer tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,loadtest,reftable,vdiff_order]", "Switch routing from keyspace product to keyspace customer", - "Routing rules for tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,reftable,vdiff_order] will be updated", + "Routing rules for tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,loadtest,reftable,vdiff_order] will be updated", "Switch writes completed, freeze and delete vreplication streams on:", " tablet 200 ", " tablet 300 ", @@ -41,8 +41,8 @@ var dryRunResultsSwitchWritesCustomerShard = []string{ var dryRunResultsReadCustomerShard = []string{ "Lock keyspace product", - "Switch reads for tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,reftable,vdiff_order] to keyspace customer for tablet types [RDONLY,REPLICA]", - "Routing rules for tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,reftable,vdiff_order] will be updated", + "Switch reads for tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,loadtest,reftable,vdiff_order] to keyspace customer for tablet types [RDONLY,REPLICA]", + "Routing rules for tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,loadtest,reftable,vdiff_order] will be updated", "Unlock keyspace product", } diff --git a/go/test/endtoend/vreplication/vstream_test.go b/go/test/endtoend/vreplication/vstream_test.go index 63c6655cf5c..5c5e6a80130 100644 --- a/go/test/endtoend/vreplication/vstream_test.go +++ b/go/test/endtoend/vreplication/vstream_test.go @@ -33,7 +33,6 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" _ "vitess.io/vitess/go/vt/vtctl/grpcvtctlclient" - "vitess.io/vitess/go/vt/vtgate/evalengine" _ "vitess.io/vitess/go/vt/vtgate/grpcvtgateconn" "vitess.io/vitess/go/vt/vtgate/vtgateconn" @@ -171,7 +170,7 @@ func testVStreamWithFailover(t *testing.T, failover bool) { qr := execVtgateQuery(t, vtgateConn, "product", "select count(*) from customer") require.NotNil(t, qr) // total number of row events found by the VStream API should match the rows inserted - insertedRows, err := evalengine.ToInt64(qr.Rows[0][0]) + insertedRows, err := qr.Rows[0][0].ToCastInt64() require.NoError(t, err) require.Equal(t, insertedRows, numRowEvents) } @@ -521,7 +520,7 @@ func testVStreamCopyMultiKeyspaceReshard(t *testing.T, baseTabletID int) numEven // We believe that checking the number of row events for the unsharded keyspace, which should always be greater than 0 before and after resharding, // is sufficient to confirm that the resharding of one keyspace does not affect another keyspace, while keeping the test straightforward. customerResult := execVtgateQuery(t, vtgateConn, "sharded", "select count(*) from customer") - insertedCustomerRows, err := evalengine.ToInt64(customerResult.Rows[0][0]) + insertedCustomerRows, err := customerResult.Rows[0][0].ToCastInt64() require.NoError(t, err) require.Equal(t, insertedCustomerRows, ne.numLessThan80Events+ne.numGreaterThan80Events+ne.numLessThan40Events+ne.numGreaterThan40Events) return ne diff --git a/go/test/endtoend/vreplication/wrappers_test.go b/go/test/endtoend/vreplication/wrappers_test.go new file mode 100644 index 00000000000..6bd0bbb19d8 --- /dev/null +++ b/go/test/endtoend/vreplication/wrappers_test.go @@ -0,0 +1,206 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "math/rand" + "strconv" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/log" +) + +type moveTablesFlavor int + +const ( + moveTablesFlavorRandom moveTablesFlavor = iota + moveTablesFlavorVtctl + moveTablesFlavorVtctld +) + +var moveTablesFlavors = []moveTablesFlavor{ + moveTablesFlavorVtctl, + moveTablesFlavorVtctld, +} + +type moveTables struct { + vc *VitessCluster + workflowName string + targetKeyspace string + sourceKeyspace string + tables string + atomicCopy bool + sourceShards string +} + +type iMoveTables interface { + Create() + Show() + SwitchReads() + SwitchWrites() + SwitchReadsAndWrites() + ReverseReadsAndWrites() + Cancel() + Complete() + Flavor() string +} + +func newMoveTables(vc *VitessCluster, mt *moveTables, flavor moveTablesFlavor) iMoveTables { + mt.vc = vc + var mt2 iMoveTables + if flavor == moveTablesFlavorRandom { + flavor = moveTablesFlavors[rand.Intn(len(moveTablesFlavors))] + } + switch flavor { + case moveTablesFlavorVtctl: + mt2 = newVtctlMoveTables(mt) + case moveTablesFlavorVtctld: + mt2 = newVtctldMoveTables(mt) + default: + panic("unreachable") + } + log.Infof("Using moveTables flavor: %s", mt2.Flavor()) + return mt2 +} + +type VtctlMoveTables struct { + *moveTables +} + +func (vmt *VtctlMoveTables) Flavor() string { + return "vtctl" +} + +func newVtctlMoveTables(mt *moveTables) *VtctlMoveTables { + return &VtctlMoveTables{mt} +} + +func (vmt *VtctlMoveTables) Create() { + log.Infof("vmt is %+v", vmt.vc, vmt.tables) + err := tstWorkflowExec(vmt.vc.t, "", vmt.workflowName, vmt.sourceKeyspace, vmt.targetKeyspace, + vmt.tables, workflowActionCreate, "", vmt.sourceShards, "", vmt.atomicCopy) + require.NoError(vmt.vc.t, err) +} + +func (vmt *VtctlMoveTables) SwitchReadsAndWrites() { + err := tstWorkflowExec(vmt.vc.t, "", vmt.workflowName, vmt.sourceKeyspace, vmt.targetKeyspace, + vmt.tables, workflowActionSwitchTraffic, "", "", "", vmt.atomicCopy) + require.NoError(vmt.vc.t, err) +} + +func (vmt *VtctlMoveTables) ReverseReadsAndWrites() { + err := tstWorkflowExec(vmt.vc.t, "", vmt.workflowName, vmt.sourceKeyspace, vmt.targetKeyspace, + vmt.tables, workflowActionReverseTraffic, "", "", "", vmt.atomicCopy) + require.NoError(vmt.vc.t, err) +} + +func (vmt *VtctlMoveTables) Show() { + //TODO implement me + panic("implement me") +} + +func (vmt *VtctlMoveTables) SwitchReads() { + //TODO implement me + panic("implement me") +} + +func (vmt *VtctlMoveTables) SwitchWrites() { + //TODO implement me + panic("implement me") +} + +func (vmt *VtctlMoveTables) Cancel() { + err := tstWorkflowExec(vmt.vc.t, "", vmt.workflowName, vmt.sourceKeyspace, vmt.targetKeyspace, + vmt.tables, workflowActionCancel, "", "", "", vmt.atomicCopy) + require.NoError(vmt.vc.t, err) +} + +func (vmt *VtctlMoveTables) Complete() { + //TODO implement me + panic("implement me") +} + +var _ iMoveTables = (*VtctldMoveTables)(nil) + +type VtctldMoveTables struct { + *moveTables +} + +func newVtctldMoveTables(mt *moveTables) *VtctldMoveTables { + return &VtctldMoveTables{mt} +} + +func (v VtctldMoveTables) Flavor() string { + return "vtctld" +} + +func (v VtctldMoveTables) exec(args ...string) { + args2 := []string{"MoveTables", "--workflow=" + v.workflowName, "--target-keyspace=" + v.targetKeyspace} + args2 = append(args2, args...) + if err := vc.VtctldClient.ExecuteCommand(args2...); err != nil { + v.vc.t.Fatalf("failed to create MoveTables workflow: %v", err) + } +} + +func (v VtctldMoveTables) Create() { + args := []string{"Create", "--source-keyspace=" + v.sourceKeyspace} + if v.tables != "" { + args = append(args, "--tables="+v.tables) + } else { + args = append(args, "--all-tables") + } + if v.atomicCopy { + args = append(args, "--atomic-copy="+strconv.FormatBool(v.atomicCopy)) + } + if v.sourceShards != "" { + args = append(args, "--source-shards="+v.sourceShards) + } + v.exec(args...) +} + +func (v VtctldMoveTables) SwitchReadsAndWrites() { + v.exec("SwitchTraffic") +} + +func (v VtctldMoveTables) ReverseReadsAndWrites() { + v.exec("ReverseTraffic") +} + +func (v VtctldMoveTables) Show() { + //TODO implement me + panic("implement me") +} + +func (v VtctldMoveTables) SwitchReads() { + //TODO implement me + panic("implement me") +} + +func (v VtctldMoveTables) SwitchWrites() { + //TODO implement me + panic("implement me") +} + +func (v VtctldMoveTables) Cancel() { + v.exec("Cancel") +} + +func (v VtctldMoveTables) Complete() { + //TODO implement me + panic("implement me") +} diff --git a/go/test/endtoend/vtgate/errors_as_warnings/main_test.go b/go/test/endtoend/vtgate/errors_as_warnings/main_test.go index 548df6c8b2e..66e587e9075 100644 --- a/go/test/endtoend/vtgate/errors_as_warnings/main_test.go +++ b/go/test/endtoend/vtgate/errors_as_warnings/main_test.go @@ -24,6 +24,7 @@ import ( "strings" "testing" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/test/endtoend/utils" "github.com/stretchr/testify/require" @@ -140,15 +141,15 @@ func TestScatterErrsAsWarns(t *testing.T) { utils.Exec(t, mode.conn, fmt.Sprintf("set workload = %s", mode.m)) utils.AssertMatches(t, mode.conn, query1, `[[INT64(4)]]`) - assertContainsOneOf(t, mode.conn, showQ, "no valid tablet", "no healthy tablet", "mysql.socket: connect: no such file or directory") + assertContainsOneOf(t, mode.conn, showQ, "operation not allowed in state SHUTTING_DOWN", "no valid tablet", "no healthy tablet", "mysql.socket: connect: no such file or directory") utils.AssertMatches(t, mode.conn, query2, `[[INT64(4)]]`) - assertContainsOneOf(t, mode.conn, showQ, "no valid tablet", "no healthy tablet", "mysql.socket: connect: no such file or directory") + assertContainsOneOf(t, mode.conn, showQ, "operation not allowed in state SHUTTING_DOWN", "no valid tablet", "no healthy tablet", "mysql.socket: connect: no such file or directory") // invalid_field should throw error and not warning _, err = mode.conn.ExecuteFetch("SELECT /*vt+ PLANNER=Gen4 SCATTER_ERRORS_AS_WARNINGS */ invalid_field from t1;", 1, false) require.Error(t, err) - serr := mysql.NewSQLErrorFromError(err).(*mysql.SQLError) - require.Equal(t, mysql.ERBadFieldError, serr.Number(), serr.Error()) + serr := sqlerror.NewSQLErrorFromError(err).(*sqlerror.SQLError) + require.Equal(t, sqlerror.ERBadFieldError, serr.Number(), serr.Error()) }) } } diff --git a/go/test/endtoend/vtgate/foreignkey/fk_fuzz_test.go b/go/test/endtoend/vtgate/foreignkey/fk_fuzz_test.go new file mode 100644 index 00000000000..134b9cfa180 --- /dev/null +++ b/go/test/endtoend/vtgate/foreignkey/fk_fuzz_test.go @@ -0,0 +1,734 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package foreignkey + +import ( + "database/sql" + "fmt" + "math/rand" + "sync" + "sync/atomic" + "testing" + "time" + + _ "github.com/go-sql-driver/mysql" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" + "vitess.io/vitess/go/vt/log" +) + +type QueryFormat string + +const ( + SQLQueries QueryFormat = "SQLQueries" + PreparedStatmentQueries QueryFormat = "PreparedStatmentQueries" + PreparedStatementPacket QueryFormat = "PreparedStatementPacket" +) + +// fuzzer runs threads that runs queries against the databases. +// It has parameters that define the way the queries are constructed. +type fuzzer struct { + maxValForId int + maxValForCol int + insertShare int + deleteShare int + updateShare int + concurrency int + queryFormat QueryFormat + + // shouldStop is an internal state variable, that tells the fuzzer + // whether it should stop or not. + shouldStop atomic.Bool + // wg is an internal state variable, that used to know whether the fuzzer threads are running or not. + wg sync.WaitGroup + // firstFailureInfo stores the information about the database state after the first failure occurs. + firstFailureInfo *debugInfo +} + +// debugInfo stores the debugging information we can collect after a failure happens. +type debugInfo struct { + // This can be a list of queries for prepared statements. + queryToFail []string + vitessState []*sqltypes.Result + mysqlState []*sqltypes.Result +} + +// newFuzzer creates a new fuzzer struct. +func newFuzzer(concurrency int, maxValForId int, maxValForCol int, insertShare int, deleteShare int, updateShare int, queryFormat QueryFormat) *fuzzer { + fz := &fuzzer{ + concurrency: concurrency, + maxValForId: maxValForId, + maxValForCol: maxValForCol, + insertShare: insertShare, + deleteShare: deleteShare, + updateShare: updateShare, + queryFormat: queryFormat, + wg: sync.WaitGroup{}, + } + // Initially the fuzzer thread is stopped. + fz.shouldStop.Store(true) + return fz +} + +// generateQuery generates a query from the parameters for the fuzzer. +// The returned set is a list of strings, because for prepared statements, we have to run +// set queries first and then the final query eventually. +func (fz *fuzzer) generateQuery() []string { + val := rand.Intn(fz.insertShare + fz.updateShare + fz.deleteShare) + if val < fz.insertShare { + switch fz.queryFormat { + case SQLQueries: + return []string{fz.generateInsertDMLQuery()} + case PreparedStatmentQueries: + return fz.getPreparedInsertQueries() + default: + panic("Unknown query type") + } + } + if val < fz.insertShare+fz.updateShare { + switch fz.queryFormat { + case SQLQueries: + return []string{fz.generateUpdateDMLQuery()} + case PreparedStatmentQueries: + return fz.getPreparedUpdateQueries() + default: + panic("Unknown query type") + } + } + switch fz.queryFormat { + case SQLQueries: + return []string{fz.generateDeleteDMLQuery()} + case PreparedStatmentQueries: + return fz.getPreparedDeleteQueries() + default: + panic("Unknown query type") + } +} + +// generateInsertDMLQuery generates an INSERT query from the parameters for the fuzzer. +func (fz *fuzzer) generateInsertDMLQuery() string { + tableId := rand.Intn(len(fkTables)) + idValue := 1 + rand.Intn(fz.maxValForId) + tableName := fkTables[tableId] + if tableName == "fk_t20" { + colValue := rand.Intn(1 + fz.maxValForCol) + col2Value := rand.Intn(1 + fz.maxValForCol) + return fmt.Sprintf("insert into %v (id, col, col2) values (%v, %v, %v)", tableName, idValue, convertColValueToString(colValue), convertColValueToString(col2Value)) + } else if isMultiColFkTable(tableName) { + colaValue := rand.Intn(1 + fz.maxValForCol) + colbValue := rand.Intn(1 + fz.maxValForCol) + return fmt.Sprintf("insert into %v (id, cola, colb) values (%v, %v, %v)", tableName, idValue, convertColValueToString(colaValue), convertColValueToString(colbValue)) + } else { + colValue := rand.Intn(1 + fz.maxValForCol) + return fmt.Sprintf("insert into %v (id, col) values (%v, %v)", tableName, idValue, convertColValueToString(colValue)) + } +} + +// convertColValueToString converts the given value to a string +func convertColValueToString(value int) string { + if value == 0 { + return "NULL" + } + return fmt.Sprintf("%d", value) +} + +// generateUpdateDMLQuery generates an UPDATE query from the parameters for the fuzzer. +func (fz *fuzzer) generateUpdateDMLQuery() string { + tableId := rand.Intn(len(fkTables)) + idValue := 1 + rand.Intn(fz.maxValForId) + tableName := fkTables[tableId] + if tableName == "fk_t20" { + colValue := rand.Intn(1 + fz.maxValForCol) + col2Value := rand.Intn(1 + fz.maxValForCol) + return fmt.Sprintf("update %v set col = %v, col2 = %v where id = %v", tableName, convertColValueToString(colValue), convertColValueToString(col2Value), idValue) + } else if isMultiColFkTable(tableName) { + colaValue := rand.Intn(1 + fz.maxValForCol) + colbValue := rand.Intn(1 + fz.maxValForCol) + return fmt.Sprintf("update %v set cola = %v, colb = %v where id = %v", tableName, convertColValueToString(colaValue), convertColValueToString(colbValue), idValue) + } else { + colValue := rand.Intn(1 + fz.maxValForCol) + return fmt.Sprintf("update %v set col = %v where id = %v", tableName, convertColValueToString(colValue), idValue) + } +} + +// generateDeleteDMLQuery generates a DELETE query from the parameters for the fuzzer. +func (fz *fuzzer) generateDeleteDMLQuery() string { + tableId := rand.Intn(len(fkTables)) + idValue := 1 + rand.Intn(fz.maxValForId) + query := fmt.Sprintf("delete from %v where id = %v", fkTables[tableId], idValue) + return query +} + +// start starts running the fuzzer. +func (fz *fuzzer) start(t *testing.T, sharded bool) { + // We mark the fuzzer thread to be running now. + fz.shouldStop.Store(false) + fz.wg.Add(fz.concurrency) + for i := 0; i < fz.concurrency; i++ { + fuzzerThreadId := i + go func() { + fz.runFuzzerThread(t, sharded, fuzzerThreadId) + }() + } +} + +// runFuzzerThread is used to run a thread of the fuzzer. +func (fz *fuzzer) runFuzzerThread(t *testing.T, sharded bool, fuzzerThreadId int) { + // Whenever we finish running this thread, we should mark the thread has stopped. + defer func() { + fz.wg.Done() + }() + // Create a MySQL Compare that connects to both Vitess and MySQL and runs the queries against both. + mcmp, err := utils.NewMySQLCompare(t, vtParams, mysqlParams) + require.NoError(t, err) + var vitessDb, mysqlDb *sql.DB + if fz.queryFormat == PreparedStatementPacket { + // Open another connection to Vitess using the go-sql-driver so that we can send prepared statements as COM_STMT_PREPARE packets. + vitessDb, err = sql.Open("mysql", fmt.Sprintf("@tcp(%s:%v)/%s", vtParams.Host, vtParams.Port, vtParams.DbName)) + require.NoError(t, err) + defer vitessDb.Close() + // Open a similar connection to MySQL + mysqlDb, err = sql.Open("mysql", fmt.Sprintf("%v:%v@unix(%s)/%s", mysqlParams.Uname, mysqlParams.Pass, mysqlParams.UnixSocket, mysqlParams.DbName)) + require.NoError(t, err) + defer mysqlDb.Close() + } + // Set the correct keyspace to use from VtGates. + if sharded { + _ = utils.Exec(t, mcmp.VtConn, "use `ks`") + if vitessDb != nil { + _, _ = vitessDb.Exec("use `ks`") + } + } else { + _ = utils.Exec(t, mcmp.VtConn, "use `uks`") + if vitessDb != nil { + _, _ = vitessDb.Exec("use `uks`") + } + } + for { + // If fuzzer thread is marked to be stopped, then we should exit this go routine. + if fz.shouldStop.Load() == true { + return + } + switch fz.queryFormat { + case SQLQueries, PreparedStatmentQueries: + if fz.generateAndExecuteStatementQuery(t, mcmp) { + return + } + case PreparedStatementPacket: + if fz.generateAndExecutePreparedPacketQuery(t, mysqlDb, vitessDb, mcmp) { + return + } + default: + panic("Unknown query format") + } + + } +} + +// generateAndExecuteStatementQuery generates a query and runs it on Vitess (and possibly MySQL). +// In this function we send the queries to Vitess always using COM_QUERY packets. +// We handle 2 query formats in this function: +// 1. SQLQueries: DML queries are run as a single SQL query. +// 2. PreparedStatmentQueries: We execute a prepared statement as a SQL query, SET user defined variables and then Execute the DML. +func (fz *fuzzer) generateAndExecuteStatementQuery(t *testing.T, mcmp utils.MySQLCompare) (exit bool) { + // Get a query and execute it. + queries := fz.generateQuery() + // We get a set of queries only when we are using prepared statements, which require running `SET` queries before running the actual DML query. + for _, query := range queries { + // When the concurrency is 1, then we run the query both on MySQL and Vitess. + if fz.concurrency == 1 { + _, _ = mcmp.ExecAllowAndCompareError(query) + // If t is marked failed, we have encountered our first failure. + // Let's collect the required information and finish execution. + if t.Failed() { + fz.firstFailureInfo = &debugInfo{ + queryToFail: queries, + mysqlState: collectFkTablesState(mcmp.MySQLConn), + vitessState: collectFkTablesState(mcmp.VtConn), + } + return true + } + } else { + // When we are running concurrent threads, then we run all the queries on Vitess. + _, _ = utils.ExecAllowError(t, mcmp.VtConn, query) + } + } + return false +} + +// generateAndExecutePreparedPacketQuery generates a query and runs it on Vitess (and possibly MySQL). +// This function handles the query format PreparedStatementPacket. Here we send the prepared statement as a COM_STMT_PREPARE packet. +// Following which we execute it. To this end, we use the go-sql-driver. +func (fz *fuzzer) generateAndExecutePreparedPacketQuery(t *testing.T, mysqlDB *sql.DB, vitessDb *sql.DB, mcmp utils.MySQLCompare) bool { + query, params := fz.generateParameterizedQuery() + // When the concurrency is 1, then we run the query both on MySQL and Vitess. + if fz.concurrency == 1 { + // When the concurrency is 1, then we run the query both on MySQL and Vitess. + fz.execAndCompareMySQlAndVitess(t, mysqlDB, vitessDb, query, params) + // If t is marked failed, we have encountered our first failure. + // Let's collect the required information and finish execution. + if t.Failed() { + fz.firstFailureInfo = &debugInfo{ + queryToFail: []string{query}, + mysqlState: collectFkTablesState(mcmp.MySQLConn), + vitessState: collectFkTablesState(mcmp.VtConn), + } + return true + } + } else { + // When we are running concurrent threads, then we run all the queries on Vitess. + _, _ = vitessDb.Exec(query, params...) + } + return false +} + +// execAndCompareMySQlAndVitess executes the given query with the parameters on MySQL and Vitess and compares their results. +func (fz *fuzzer) execAndCompareMySQlAndVitess(t *testing.T, mysqlDB *sql.DB, vitessDb *sql.DB, query string, params []any) { + mysqlRes, mysqlErr := mysqlDB.Exec(query, params...) + vtRes, vtErr := vitessDb.Exec(query, params...) + compareVitessAndMySQLErrors(t, vtErr, mysqlErr) + compareVitessAndMySQLResults(t, vtRes, mysqlRes) +} + +// stop stops the fuzzer and waits for it to finish execution. +func (fz *fuzzer) stop() { + // Mark the thread to be stopped. + fz.shouldStop.Store(true) + // Wait for the fuzzer thread to stop. + fz.wg.Wait() +} + +// getPreparedDeleteQueries gets the list of queries to run for executing an DELETE using prepared statements. +func (fz *fuzzer) getPreparedDeleteQueries() []string { + tableId := rand.Intn(len(fkTables)) + idValue := 1 + rand.Intn(fz.maxValForId) + return []string{ + fmt.Sprintf("prepare stmt_del from 'delete from %v where id = ?'", fkTables[tableId]), + fmt.Sprintf("SET @id = %v", idValue), + "execute stmt_del using @id", + } +} + +// getPreparedInsertQueries gets the list of queries to run for executing an INSERT using prepared statements. +func (fz *fuzzer) getPreparedInsertQueries() []string { + tableId := rand.Intn(len(fkTables)) + idValue := 1 + rand.Intn(fz.maxValForId) + tableName := fkTables[tableId] + if tableName == "fk_t20" { + colValue := rand.Intn(1 + fz.maxValForCol) + col2Value := rand.Intn(1 + fz.maxValForCol) + return []string{ + "prepare stmt_insert from 'insert into fk_t20 (id, col, col2) values (?, ?, ?)'", + fmt.Sprintf("SET @id = %v", idValue), + fmt.Sprintf("SET @col = %v", convertColValueToString(colValue)), + fmt.Sprintf("SET @col2 = %v", convertColValueToString(col2Value)), + "execute stmt_insert using @id, @col, @col2", + } + } else if isMultiColFkTable(tableName) { + colaValue := rand.Intn(1 + fz.maxValForCol) + colbValue := rand.Intn(1 + fz.maxValForCol) + return []string{ + fmt.Sprintf("prepare stmt_insert from 'insert into %v (id, cola, colb) values (?, ?, ?)'", tableName), + fmt.Sprintf("SET @id = %v", idValue), + fmt.Sprintf("SET @cola = %v", convertColValueToString(colaValue)), + fmt.Sprintf("SET @colb = %v", convertColValueToString(colbValue)), + "execute stmt_insert using @id, @cola, @colb", + } + } else { + colValue := rand.Intn(1 + fz.maxValForCol) + return []string{ + fmt.Sprintf("prepare stmt_insert from 'insert into %v (id, col) values (?, ?)'", tableName), + fmt.Sprintf("SET @id = %v", idValue), + fmt.Sprintf("SET @col = %v", convertColValueToString(colValue)), + "execute stmt_insert using @id, @col", + } + } +} + +// getPreparedUpdateQueries gets the list of queries to run for executing an UPDATE using prepared statements. +func (fz *fuzzer) getPreparedUpdateQueries() []string { + tableId := rand.Intn(len(fkTables)) + idValue := 1 + rand.Intn(fz.maxValForId) + tableName := fkTables[tableId] + if tableName == "fk_t20" { + colValue := rand.Intn(1 + fz.maxValForCol) + col2Value := rand.Intn(1 + fz.maxValForCol) + return []string{ + "prepare stmt_update from 'update fk_t20 set col = ?, col2 = ? where id = ?'", + fmt.Sprintf("SET @id = %v", idValue), + fmt.Sprintf("SET @col = %v", convertColValueToString(colValue)), + fmt.Sprintf("SET @col2 = %v", convertColValueToString(col2Value)), + "execute stmt_update using @col, @col2, @id", + } + } else if isMultiColFkTable(tableName) { + colaValue := rand.Intn(1 + fz.maxValForCol) + colbValue := rand.Intn(1 + fz.maxValForCol) + return []string{ + fmt.Sprintf("prepare stmt_update from 'update %v set cola = ?, colb = ? where id = ?'", tableName), + fmt.Sprintf("SET @id = %v", idValue), + fmt.Sprintf("SET @cola = %v", convertColValueToString(colaValue)), + fmt.Sprintf("SET @colb = %v", convertColValueToString(colbValue)), + "execute stmt_update using @cola, @colb, @id", + } + } else { + colValue := rand.Intn(1 + fz.maxValForCol) + return []string{ + fmt.Sprintf("prepare stmt_update from 'update %v set col = ? where id = ?'", tableName), + fmt.Sprintf("SET @id = %v", idValue), + fmt.Sprintf("SET @col = %v", convertColValueToString(colValue)), + "execute stmt_update using @col, @id", + } + } +} + +// generateParameterizedQuery generates a parameterized query for the query format PreparedStatementPacket. +func (fz *fuzzer) generateParameterizedQuery() (query string, params []any) { + val := rand.Intn(fz.insertShare + fz.updateShare + fz.deleteShare) + if val < fz.insertShare { + return fz.generateParameterizedInsertQuery() + } + if val < fz.insertShare+fz.updateShare { + return fz.generateParameterizedUpdateQuery() + } + return fz.generateParameterizedDeleteQuery() +} + +// generateParameterizedInsertQuery generates a parameterized INSERT query for the query format PreparedStatementPacket. +func (fz *fuzzer) generateParameterizedInsertQuery() (query string, params []any) { + tableId := rand.Intn(len(fkTables)) + idValue := 1 + rand.Intn(fz.maxValForId) + tableName := fkTables[tableId] + if tableName == "fk_t20" { + colValue := rand.Intn(1 + fz.maxValForCol) + col2Value := rand.Intn(1 + fz.maxValForCol) + return fmt.Sprintf("insert into %v (id, col, col2) values (?, ?, ?)", tableName), []any{idValue, convertColValueToString(colValue), convertColValueToString(col2Value)} + } else if isMultiColFkTable(tableName) { + colaValue := rand.Intn(1 + fz.maxValForCol) + colbValue := rand.Intn(1 + fz.maxValForCol) + return fmt.Sprintf("insert into %v (id, cola, colb) values (?, ?, ?)", tableName), []any{idValue, convertColValueToString(colaValue), convertColValueToString(colbValue)} + } else { + colValue := rand.Intn(1 + fz.maxValForCol) + return fmt.Sprintf("insert into %v (id, col) values (?, ?)", tableName), []any{idValue, convertColValueToString(colValue)} + } +} + +// generateParameterizedUpdateQuery generates a parameterized UPDATE query for the query format PreparedStatementPacket. +func (fz *fuzzer) generateParameterizedUpdateQuery() (query string, params []any) { + tableId := rand.Intn(len(fkTables)) + idValue := 1 + rand.Intn(fz.maxValForId) + tableName := fkTables[tableId] + if tableName == "fk_t20" { + colValue := rand.Intn(1 + fz.maxValForCol) + col2Value := rand.Intn(1 + fz.maxValForCol) + return fmt.Sprintf("update %v set col = ?, col2 = ? where id = ?", tableName), []any{convertColValueToString(colValue), convertColValueToString(col2Value), idValue} + } else if isMultiColFkTable(tableName) { + colaValue := rand.Intn(1 + fz.maxValForCol) + colbValue := rand.Intn(1 + fz.maxValForCol) + return fmt.Sprintf("update %v set cola = ?, colb = ? where id = ?", tableName), []any{convertColValueToString(colaValue), convertColValueToString(colbValue), idValue} + } else { + colValue := rand.Intn(1 + fz.maxValForCol) + return fmt.Sprintf("update %v set col = ? where id = ?", tableName), []any{convertColValueToString(colValue), idValue} + } +} + +// generateParameterizedDeleteQuery generates a parameterized DELETE query for the query format PreparedStatementPacket. +func (fz *fuzzer) generateParameterizedDeleteQuery() (query string, params []any) { + tableId := rand.Intn(len(fkTables)) + idValue := 1 + rand.Intn(fz.maxValForId) + return fmt.Sprintf("delete from %v where id = ?", fkTables[tableId]), []any{idValue} +} + +// TestFkFuzzTest is a fuzzer test that works by querying the database concurrently. +// We have a pre-written set of query templates that we will use, but the data in the queries will +// be randomly generated. The intent is that we hammer the database as a real-world application would +// and check the correctness of data with MySQL. +// We are using the same schema for this test as we do for TestFkScenarios. +/* + * fk_t1 + * │ + * │ On Delete Restrict + * │ On Update Restrict + * ▼ + * ┌────────────────fk_t2────────────────┐ + * │ │ + * │On Delete Set Null │ On Delete Set Null + * │On Update Set Null │ On Update Set Null + * ▼ ▼ + * fk_t7 fk_t3───────────────────┐ + * │ │ + * │ │ On Delete Set Null + * On Delete Set Null │ │ On Update Set Null + * On Update Set Null │ │ + * ▼ ▼ + * fk_t4 fk_t6 + * │ + * │ + * On Delete Restrict │ + * On Update Restrict │ + * │ + * ▼ + * fk_t5 + */ +/* + * fk_t10 + * │ + * On Delete Cascade │ + * On Update Cascade │ + * │ + * ▼ + * fk_t11──────────────────┐ + * │ │ + * │ │ On Delete Restrict + * On Delete Cascade │ │ On Update Restrict + * On Update Cascade │ │ + * │ │ + * ▼ ▼ + * fk_t12 fk_t13 + */ +/* + * fk_t15 + * │ + * │ + * On Delete Cascade │ + * On Update Cascade │ + * │ + * ▼ + * fk_t16 + * │ + * On Delete Set Null │ + * On Update Set Null │ + * │ + * ▼ + * fk_t17──────────────────┐ + * │ │ + * │ │ On Delete Set Null + * On Delete Cascade │ │ On Update Set Null + * On Update Cascade │ │ + * │ │ + * ▼ ▼ + * fk_t18 fk_t19 + */ +/* + Self referenced foreign key from col2 to col in fk_t20 +*/ +func TestFkFuzzTest(t *testing.T) { + // Wait for schema-tracking to be complete. + waitForSchemaTrackingForFkTables(t) + // Remove all the foreign key constraints for all the replicas. + // We can then verify that the replica, and the primary have the same data, to ensure + // that none of the queries ever lead to cascades/updates on MySQL level. + for _, ks := range []string{shardedKs, unshardedKs} { + replicas := getReplicaTablets(ks) + for _, replica := range replicas { + removeAllForeignKeyConstraints(t, replica, ks) + } + } + + testcases := []struct { + name string + concurrency int + timeForTesting time.Duration + maxValForId int + maxValForCol int + insertShare int + deleteShare int + updateShare int + }{ + { + name: "Single Thread - Only Inserts", + concurrency: 1, + timeForTesting: 5 * time.Second, + maxValForCol: 5, + maxValForId: 10, + insertShare: 100, + deleteShare: 0, + updateShare: 0, + }, + { + name: "Single Thread - Balanced Inserts and Deletes", + concurrency: 1, + timeForTesting: 5 * time.Second, + maxValForCol: 5, + maxValForId: 10, + insertShare: 50, + deleteShare: 50, + updateShare: 0, + }, + { + name: "Single Thread - Balanced Inserts and Updates", + concurrency: 1, + timeForTesting: 5 * time.Second, + maxValForCol: 5, + maxValForId: 10, + insertShare: 50, + deleteShare: 0, + updateShare: 50, + }, + { + name: "Single Thread - Balanced Inserts, Updates and Deletes", + concurrency: 1, + timeForTesting: 5 * time.Second, + maxValForCol: 5, + maxValForId: 10, + insertShare: 50, + deleteShare: 50, + updateShare: 50, + }, + { + name: "Multi Thread - Balanced Inserts, Updates and Deletes", + concurrency: 30, + timeForTesting: 5 * time.Second, + maxValForCol: 5, + maxValForId: 30, + insertShare: 50, + deleteShare: 50, + updateShare: 50, + }, + } + + for _, tt := range testcases { + for _, testSharded := range []bool{false, true} { + for _, queryFormat := range []QueryFormat{SQLQueries, PreparedStatmentQueries, PreparedStatementPacket} { + t.Run(getTestName(tt.name, testSharded)+fmt.Sprintf(" QueryFormat - %v", queryFormat), func(t *testing.T) { + mcmp, closer := start(t) + defer closer() + // Set the correct keyspace to use from VtGates. + if testSharded { + t.Skip("Skip test since we don't have sharded foreign key support yet") + _ = utils.Exec(t, mcmp.VtConn, "use `ks`") + } else { + _ = utils.Exec(t, mcmp.VtConn, "use `uks`") + } + // Ensure that the Vitess database is originally empty + ensureDatabaseState(t, mcmp.VtConn, true) + ensureDatabaseState(t, mcmp.MySQLConn, true) + + // Create the fuzzer. + fz := newFuzzer(tt.concurrency, tt.maxValForId, tt.maxValForCol, tt.insertShare, tt.deleteShare, tt.updateShare, queryFormat) + + // Start the fuzzer. + fz.start(t, testSharded) + + // Wait for the timeForTesting so that the threads continue to run. + time.Sleep(tt.timeForTesting) + + fz.stop() + + // We encountered an error while running the fuzzer. Let's print out the information! + if fz.firstFailureInfo != nil { + log.Errorf("Failing query - %v", fz.firstFailureInfo.queryToFail) + for idx, table := range fkTables { + log.Errorf("MySQL data for %v -\n%v", table, fz.firstFailureInfo.mysqlState[idx].Rows) + log.Errorf("Vitess data for %v -\n%v", table, fz.firstFailureInfo.vitessState[idx].Rows) + } + } + + // ensure Vitess database has some data. This ensures not all the commands failed. + ensureDatabaseState(t, mcmp.VtConn, false) + // Verify the consistency of the data. + verifyDataIsCorrect(t, mcmp, tt.concurrency) + }) + } + } + } +} + +// ensureDatabaseState ensures that the database is either empty or not. +func ensureDatabaseState(t *testing.T, vtconn *mysql.Conn, empty bool) { + results := collectFkTablesState(vtconn) + isEmpty := true + for _, res := range results { + if len(res.Rows) > 0 { + isEmpty = false + } + } + require.Equal(t, isEmpty, empty) +} + +// verifyDataIsCorrect verifies that the data in MySQL database matches the data in the Vitess database. +func verifyDataIsCorrect(t *testing.T, mcmp utils.MySQLCompare, concurrency int) { + // For single concurrent thread, we run all the queries on both MySQL and Vitess, so we can verify correctness + // by just checking if the data in MySQL and Vitess match. + if concurrency == 1 { + for _, table := range fkTables { + query := fmt.Sprintf("SELECT * FROM %v ORDER BY id", table) + mcmp.Exec(query) + } + } else { + // For higher concurrency, we don't have MySQL data to verify everything is fine, + // so we'll have to do something different. + // We run LEFT JOIN queries on all the parent and child tables linked by foreign keys + // to make sure that nothing is broken in the database. + for _, reference := range fkReferences { + query := fmt.Sprintf("select %v.id from %v left join %v on (%v.col = %v.col) where %v.col is null and %v.col is not null", reference.childTable, reference.childTable, reference.parentTable, reference.parentTable, reference.childTable, reference.parentTable, reference.childTable) + if isMultiColFkTable(reference.childTable) { + query = fmt.Sprintf("select %v.id from %v left join %v on (%v.cola = %v.cola and %v.colb = %v.colb) where %v.cola is null and %v.cola is not null and %v.colb is not null", reference.childTable, reference.childTable, reference.parentTable, reference.parentTable, reference.childTable, reference.parentTable, reference.childTable, reference.parentTable, reference.childTable, reference.childTable) + } + res, err := mcmp.VtConn.ExecuteFetch(query, 1000, false) + require.NoError(t, err) + require.Zerof(t, len(res.Rows), "Query %v gave non-empty results", query) + } + } + // We also verify that the results in Primary and Replica table match as is. + for _, keyspace := range clusterInstance.Keyspaces { + for _, shard := range keyspace.Shards { + var primaryTab, replicaTab *cluster.Vttablet + for _, vttablet := range shard.Vttablets { + if vttablet.Type == "primary" { + primaryTab = vttablet + } else { + replicaTab = vttablet + } + } + require.NotNil(t, primaryTab) + require.NotNil(t, replicaTab) + checkReplicationHealthy(t, replicaTab) + cluster.WaitForReplicationPos(t, primaryTab, replicaTab, true, 60.0) + primaryConn, err := utils.GetMySQLConn(primaryTab, fmt.Sprintf("vt_%v", keyspace.Name)) + require.NoError(t, err) + replicaConn, err := utils.GetMySQLConn(replicaTab, fmt.Sprintf("vt_%v", keyspace.Name)) + require.NoError(t, err) + primaryRes := collectFkTablesState(primaryConn) + replicaRes := collectFkTablesState(replicaConn) + verifyDataMatches(t, primaryRes, replicaRes) + } + } +} + +// verifyDataMatches verifies that the two list of results are the same. +func verifyDataMatches(t *testing.T, resOne []*sqltypes.Result, resTwo []*sqltypes.Result) { + require.EqualValues(t, len(resTwo), len(resOne), "Res 1 - %v, Res 2 - %v", resOne, resTwo) + for idx, resultOne := range resOne { + resultTwo := resTwo[idx] + require.True(t, resultOne.Equal(resultTwo), "Data for %v doesn't match\nRows 1\n%v\nRows 2\n%v", fkTables[idx], resultOne.Rows, resultTwo.Rows) + } +} + +// collectFkTablesState collects the data stored in the foreign key tables for the given connection. +func collectFkTablesState(conn *mysql.Conn) []*sqltypes.Result { + var tablesData []*sqltypes.Result + for _, table := range fkTables { + query := fmt.Sprintf("SELECT * FROM %v ORDER BY id", table) + res, _ := conn.ExecuteFetch(query, 10000, true) + tablesData = append(tablesData, res) + } + return tablesData +} diff --git a/go/test/endtoend/vtgate/foreignkey/fk_test.go b/go/test/endtoend/vtgate/foreignkey/fk_test.go new file mode 100644 index 00000000000..c3be526e584 --- /dev/null +++ b/go/test/endtoend/vtgate/foreignkey/fk_test.go @@ -0,0 +1,776 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package foreignkey + +import ( + "context" + "io" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/vtgate/vtgateconn" +) + +// TestInsertWithFK tests that insertions work as expected when foreign key management is enabled in Vitess. +func TestInsertWithFK(t *testing.T) { + mcmp, closer := start(t) + conn := mcmp.VtConn + defer closer() + + // insert some data. + utils.Exec(t, conn, `insert into t1(id, col) values (100, 123),(10, 12),(1, 13),(1000, 1234)`) + + // Verify that inserting data into a table that has shard scoped foreign keys works. + utils.Exec(t, conn, `insert into t2(id, col) values (100, 125), (1, 132)`) + + // Verify that insertion fails if the data doesn't follow the fk constraint. + _, err := utils.ExecAllowError(t, conn, `insert into t2(id, col) values (1310, 125)`) + assert.ErrorContains(t, err, "Cannot add or update a child row: a foreign key constraint fails") + + // Verify that insertion fails if the table has cross-shard foreign keys (even if the data follows the constraints). + _, err = utils.ExecAllowError(t, conn, `insert into t3(id, col) values (100, 100)`) + assert.ErrorContains(t, err, "VT12002: unsupported: cross-shard foreign keys") + + // insert some data in a table with multicol vindex. + utils.Exec(t, conn, `insert into multicol_tbl1(cola, colb, colc, msg) values (100, 'a', 'b', 'msg'), (101, 'c', 'd', 'msg2')`) + + // Verify that inserting data into a table that has shard scoped multi-column foreign keys works. + utils.Exec(t, conn, `insert into multicol_tbl2(cola, colb, colc, msg) values (100, 'a', 'b', 'msg3')`) + + // Verify that insertion fails if the data doesn't follow the fk constraint. + _, err = utils.ExecAllowError(t, conn, `insert into multicol_tbl2(cola, colb, colc, msg) values (103, 'c', 'd', 'msg2')`) + assert.ErrorContains(t, err, "Cannot add or update a child row: a foreign key constraint fails") +} + +// TestDeleteWithFK tests that deletions work as expected when foreign key management is enabled in Vitess. +func TestDeleteWithFK(t *testing.T) { + mcmp, closer := start(t) + conn := mcmp.VtConn + defer closer() + + // insert some data. + utils.Exec(t, conn, `insert into t1(id, col) values (100, 123),(10, 12),(1, 13),(1000, 1234)`) + utils.Exec(t, conn, `insert into t2(id, col) values (100, 125), (1, 132)`) + utils.Exec(t, conn, `insert into t4(id, col) values (1, 321)`) + utils.Exec(t, conn, `insert into multicol_tbl1(cola, colb, colc, msg) values (100, 'a', 'b', 'msg'), (101, 'c', 'd', 'msg2')`) + utils.Exec(t, conn, `insert into multicol_tbl2(cola, colb, colc, msg) values (100, 'a', 'b', 'msg3')`) + + // child foreign key is shard scoped. Query will fail at mysql due to On Delete Restrict. + _, err := utils.ExecAllowError(t, conn, `delete from t2 where col = 132`) + assert.ErrorContains(t, err, "Cannot delete or update a parent row: a foreign key constraint fails") + + // child row does not exist so query will succeed. + qr := utils.Exec(t, conn, `delete from t2 where col = 125`) + assert.EqualValues(t, 1, qr.RowsAffected) + + // table's child foreign key has cross shard fk, so query will fail at vtgate. + _, err = utils.ExecAllowError(t, conn, `delete from t1 where id = 42`) + assert.ErrorContains(t, err, "VT12002: unsupported: cross-shard foreign keys (errno 1235) (sqlstate 42000)") + + // child foreign key is cascade, so this should work as expected. + qr = utils.Exec(t, conn, `delete from multicol_tbl1 where cola = 100`) + assert.EqualValues(t, 1, qr.RowsAffected) + // we also verify that the rows in the child table were deleted. + qr = utils.Exec(t, conn, `select * from multicol_tbl2 where cola = 100`) + assert.Zero(t, qr.Rows) + + // Unsharded keyspace tests + utils.Exec(t, conn, `use uks`) + // insert some data. + utils.Exec(t, conn, `insert into u_t1(id, col1) values (100, 123), (10, 12), (1, 13), (1000, 1234)`) + utils.Exec(t, conn, `insert into u_t2(id, col2) values (342, 123), (19, 1234)`) + + // Delete from u_t1 which has a foreign key constraint to t2 with SET NULL type. + qr = utils.Exec(t, conn, `delete from u_t1 where id = 100`) + assert.EqualValues(t, 1, qr.RowsAffected) + // Verify the result in u_t2 as well + utils.AssertMatches(t, conn, `select * from u_t2`, `[[INT64(342) NULL] [INT64(19) INT64(1234)]]`) +} + +// TestUpdateWithFK tests that update work as expected when foreign key management is enabled in Vitess. +func TestUpdateWithFK(t *testing.T) { + mcmp, closer := start(t) + conn := mcmp.VtConn + defer closer() + + // insert some data. + utils.Exec(t, conn, `insert into t1(id, col) values (100, 123),(10, 12),(1, 13),(1000, 1234)`) + utils.Exec(t, conn, `insert into t2(id, col, mycol) values (100, 125, 'foo'), (1, 132, 'bar')`) + utils.Exec(t, conn, `insert into t4(id, col, t2_col, t2_mycol) values (1, 321, 132, 'bar')`) + utils.Exec(t, conn, `insert into t5(pk, sk, col1) values (1, 1, 1),(2, 1, 1),(3, 1, 10),(4, 1, 20),(5, 1, 30),(6, 1, 40)`) + utils.Exec(t, conn, `insert into t6(pk, sk, col1) values (10, 1, 1), (20, 1, 20)`) + + // parent foreign key is shard scoped and value is not updated. Query will succeed. + _ = utils.Exec(t, conn, `update t4 set t2_mycol = 'bar' where id = 1`) + + // parent foreign key is shard scoped and value does not exists in parent table. Query will fail at mysql due to On Update Restrict. + _, err := utils.ExecAllowError(t, conn, `update t4 set t2_mycol = 'foo' where id = 1`) + assert.ErrorContains(t, err, "Cannot add or update a child row: a foreign key constraint fails") + + // updating column which does not have foreign key constraint, so query will succeed. + qr := utils.Exec(t, conn, `update t4 set col = 20 where id = 1`) + assert.EqualValues(t, 1, qr.RowsAffected) + + // updating column which does not have foreign key constraint, so query will succeed. + _ = utils.Exec(t, conn, `update t2 set mycol = 'baz' where id = 100`) + assert.EqualValues(t, 1, qr.RowsAffected) + + // child table have restrict in shard scoped and value exists in parent table. + _ = utils.Exec(t, conn, `update t6 set col1 = 40 where pk = 20`) + assert.EqualValues(t, 1, qr.RowsAffected) + + // Unsharded keyspace tests + utils.Exec(t, conn, `use uks`) + // insert some data. + utils.Exec(t, conn, `insert into u_t1(id, col1) values (100, 123), (10, 12), (1, 13), (1000, 1234)`) + utils.Exec(t, conn, `insert into u_t2(id, col2) values (342, 123), (19, 1234)`) + utils.Exec(t, conn, `insert into u_t3(id, col3) values (32, 123), (1, 12)`) + + // Cascade update with a new value + _ = utils.Exec(t, conn, `update u_t1 set col1 = 2 where id = 100`) + // Verify the result in u_t2 and u_t3 as well. + utils.AssertMatches(t, conn, `select * from u_t2 order by id`, `[[INT64(19) INT64(1234)] [INT64(342) NULL]]`) + utils.AssertMatches(t, conn, `select * from u_t3 order by id`, `[[INT64(1) INT64(12)] [INT64(32) INT64(2)]]`) + + // Update u_t1 which has a foreign key constraint to u_t2 with SET NULL type, and to u_t3 with CASCADE type. + qr = utils.Exec(t, conn, `update u_t1 set col1 = 13 where id = 100`) + assert.EqualValues(t, 1, qr.RowsAffected) + // Verify the result in u_t2 and u_t3 as well. + utils.AssertMatches(t, conn, `select * from u_t2 order by id`, `[[INT64(19) INT64(1234)] [INT64(342) NULL]]`) + utils.AssertMatches(t, conn, `select * from u_t3 order by id`, `[[INT64(1) INT64(12)] [INT64(32) INT64(13)]]`) + + // Update u_t1 which has a foreign key constraint to u_t2 with SET NULL type, and to u_t3 with CASCADE type. + // This update however doesn't change the table. + qr = utils.Exec(t, conn, `update u_t1 set col1 = 1234 where id = 1000`) + assert.EqualValues(t, 0, qr.RowsAffected) + // Verify the result in u_t2 and u_t3 as well. + utils.AssertMatches(t, conn, `select * from u_t2 order by id`, `[[INT64(19) INT64(1234)] [INT64(342) NULL]]`) + utils.AssertMatches(t, conn, `select * from u_t3 order by id`, `[[INT64(1) INT64(12)] [INT64(32) INT64(13)]]`) +} + +// TestVstreamForFKBinLog tests that dml queries with fks are written with child row first approach in the binary logs. +func TestVstreamForFKBinLog(t *testing.T) { + vtgateConn, err := cluster.DialVTGate(context.Background(), t.Name(), vtgateGrpcAddress, "fk_user", "") + require.NoError(t, err) + defer vtgateConn.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ch := make(chan *binlogdatapb.VEvent) + runVStream(t, ctx, ch, vtgateConn) + + mcmp, closer := start(t) + conn := mcmp.VtConn + defer closer() + defer cancel() + + utils.Exec(t, conn, `use uks`) + + // insert some data. + utils.Exec(t, conn, `insert into u_t1(id, col1) values (1,2), (11,4), (111,6)`) + utils.Exec(t, conn, `insert into u_t2(id, col2) values (2,2), (22,4)`) + utils.Exec(t, conn, `insert into u_t3(id, col3) values (33,4), (333,6)`) + // drain 3 row events. + _ = drainEvents(t, ch, 3) + + tcases := []struct { + query string + events int + rowEvents []string + }{{ + query: `update u_t1 set col1 = 3 where id = 11`, + events: 3, + rowEvents: []string{ + `table_name:"uks.u_t3" row_changes:{before:{lengths:2 lengths:1 values:"334"} after:{lengths:2 lengths:1 values:"333"}} keyspace:"uks" shard:"0" flags:3`, + `table_name:"uks.u_t2" row_changes:{before:{lengths:2 lengths:1 values:"224"} after:{lengths:2 lengths:-1 values:"22"}} keyspace:"uks" shard:"0" flags:1`, + `table_name:"uks.u_t1" row_changes:{before:{lengths:2 lengths:1 values:"114"} after:{lengths:2 lengths:1 values:"113"}} keyspace:"uks" shard:"0" flags:1`, + }, + }, { + query: `update u_t1 set col1 = 5 where id = 11`, + events: 2, + rowEvents: []string{ + `table_name:"uks.u_t3" row_changes:{before:{lengths:2 lengths:1 values:"333"} after:{lengths:2 lengths:1 values:"335"}} keyspace:"uks" shard:"0" flags:3`, + `table_name:"uks.u_t1" row_changes:{before:{lengths:2 lengths:1 values:"113"} after:{lengths:2 lengths:1 values:"115"}} keyspace:"uks" shard:"0" flags:1`, + }, + }, { + query: `delete from u_t1 where col1 = 6`, + events: 2, + rowEvents: []string{ + `table_name:"uks.u_t3" row_changes:{before:{lengths:3 lengths:1 values:"3336"}} keyspace:"uks" shard:"0" flags:1`, + `table_name:"uks.u_t1" row_changes:{before:{lengths:3 lengths:1 values:"1116"}} keyspace:"uks" shard:"0" flags:1`, + }, + }, { + query: `update u_t1 set col1 = null where id = 11`, + events: 2, + rowEvents: []string{ + `table_name:"uks.u_t3" row_changes:{before:{lengths:2 lengths:1 values:"335"} after:{lengths:2 lengths:-1 values:"33"}} keyspace:"uks" shard:"0" flags:3`, + `table_name:"uks.u_t1" row_changes:{before:{lengths:2 lengths:1 values:"115"} after:{lengths:2 lengths:-1 values:"11"}} keyspace:"uks" shard:"0" flags:1`, + }, + }, { + query: `delete from u_t1 where id = 11`, + events: 1, + rowEvents: []string{ + `table_name:"uks.u_t1" row_changes:{before:{lengths:2 lengths:-1 values:"11"}} keyspace:"uks" shard:"0" flags:1`, + }, + }} + for _, tcase := range tcases { + t.Run(tcase.query, func(t *testing.T) { + utils.Exec(t, conn, tcase.query) + // drain row events. + rowEvents := drainEvents(t, ch, tcase.events) + assert.ElementsMatch(t, tcase.rowEvents, rowEvents) + }) + } +} + +func runVStream(t *testing.T, ctx context.Context, ch chan *binlogdatapb.VEvent, vtgateConn *vtgateconn.VTGateConn) { + vgtid := &binlogdatapb.VGtid{ + ShardGtids: []*binlogdatapb.ShardGtid{ + {Keyspace: unshardedKs, Shard: "0", Gtid: "current"}, + }} + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/u.*", + }}, + } + vReader, err := vtgateConn.VStream(ctx, topodatapb.TabletType_REPLICA, vgtid, filter, nil) + require.NoError(t, err) + + go func() { + for { + evs, err := vReader.Recv() + if err == io.EOF || ctx.Err() != nil { + return + } + require.NoError(t, err) + + for _, ev := range evs { + if ev.Type == binlogdatapb.VEventType_ROW { + ch <- ev + } + } + } + }() +} + +func drainEvents(t *testing.T, ch chan *binlogdatapb.VEvent, count int) []string { + var rowEvents []string + for i := 0; i < count; i++ { + select { + case re := <-ch: + rowEvents = append(rowEvents, re.RowEvent.String()) + case <-time.After(10 * time.Second): + t.Fatalf("timeout waiting for event number: %d", i+1) + } + } + return rowEvents +} + +// TestFkScenarios tests the various foreign key scenarios with different constraints +// and makes sure that Vitess works with them as expected. All the tables are present in both sharded and unsharded keyspace +// and all the foreign key constraints are cross-shard ones for the sharded keyspace. +// The test has 4 independent Schemas that are used for testing - +/* + * fk_t1 + * │ + * │ On Delete Restrict + * │ On Update Restrict + * ▼ + * ┌────────────────fk_t2────────────────┐ + * │ │ + * │On Delete Set Null │ On Delete Set Null + * │On Update Set Null │ On Update Set Null + * ▼ ▼ + * fk_t7 fk_t3───────────────────┐ + * │ │ + * │ │ On Delete Set Null + * On Delete Set Null │ │ On Update Set Null + * On Update Set Null │ │ + * ▼ ▼ + * fk_t4 fk_t6 + * │ + * │ + * On Delete Restrict │ + * On Update Restrict │ + * │ + * ▼ + * fk_t5 + */ +/* + * fk_t10 + * │ + * On Delete Cascade │ + * On Update Cascade │ + * │ + * ▼ + * fk_t11──────────────────┐ + * │ │ + * │ │ On Delete Restrict + * On Delete Cascade │ │ On Update Restrict + * On Update Cascade │ │ + * │ │ + * ▼ ▼ + * fk_t12 fk_t13 + */ +/* + * fk_t15 + * │ + * │ + * On Delete Cascade │ + * On Update Cascade │ + * │ + * ▼ + * fk_t16 + * │ + * On Delete Set Null │ + * On Update Set Null │ + * │ + * ▼ + * fk_t17──────────────────┐ + * │ │ + * │ │ On Delete Set Null + * On Delete Cascade │ │ On Update Set Null + * On Update Cascade │ │ + * │ │ + * ▼ ▼ + * fk_t18 fk_t19 + */ +/* + Self referenced foreign key from col2 to col in fk_t20 +*/ +func TestFkScenarios(t *testing.T) { + // Wait for schema-tracking to be complete. + waitForSchemaTrackingForFkTables(t) + + testcases := []struct { + name string + dataQueries []string + dmlQuery string + assertionQueries []string + }{ + { + name: "Insert failure due to parent key not existing", + dataQueries: []string{ + "insert into fk_t1(id, col) values (1, 5)", + }, + dmlQuery: "insert into t2(id, col) values (1, 7)", + assertionQueries: []string{ + "select * from fk_t1 order by id", + "select * from fk_t2 order by id", + }, + }, { + name: "Insert success", + dataQueries: []string{ + "insert into fk_t1(id, col) values (1, 7)", + }, + dmlQuery: "insert into fk_t2(id, col) values (1, 7)", + assertionQueries: []string{ + "select * from fk_t1 order by id", + "select * from fk_t2 order by id", + }, + }, { + name: "Update failure with restrict foreign keys", + dataQueries: []string{ + "insert into fk_t1(id, col) values (1, 7)", + "insert into fk_t2(id, col) values (1, 7)", + }, + dmlQuery: "update fk_t1 set col = 5 where id = 1", + assertionQueries: []string{ + "select * from fk_t1 order by id", + "select * from fk_t2 order by id", + }, + }, { + name: "Update success with restrict foreign keys", + dataQueries: []string{ + "insert into fk_t1(id, col) values (1, 7), (2, 9)", + "insert into fk_t2(id, col) values (1, 7)", + }, + dmlQuery: "update fk_t1 set col = 5 where id = 2", + assertionQueries: []string{ + "select * from fk_t1 order by id", + "select * from fk_t2 order by id", + }, + }, { + name: "Delete failure with restrict foreign keys", + dataQueries: []string{ + "insert into fk_t1(id, col) values (1, 7)", + "insert into fk_t2(id, col) values (1, 7)", + }, + dmlQuery: "delete from fk_t1 where id = 1", + assertionQueries: []string{ + "select * from fk_t1 order by id", + "select * from fk_t2 order by id", + }, + }, { + name: "Delete success with restrict foreign keys", + dataQueries: []string{ + "insert into fk_t1(id, col) values (1, 7), (2, 9)", + "insert into fk_t2(id, col) values (1, 7)", + }, + dmlQuery: "delete from fk_t1 where id = 2", + assertionQueries: []string{ + "select * from fk_t1 order by id", + "select * from fk_t2 order by id", + }, + }, { + name: "Update success with set null foreign key", + dataQueries: []string{ + "insert into fk_t1(id, col) values (1, 7), (2, 9)", + "insert into fk_t2(id, col) values (1, 7), (2, 9)", + "insert into fk_t3(id, col) values (1, 7), (2, 9)", + "insert into fk_t6(id, col) values (1, 7)", + }, + dmlQuery: "update fk_t3 set col = 9 where id = 1", + assertionQueries: []string{ + "select * from fk_t1 order by id", + "select * from fk_t2 order by id", + "select * from fk_t3 order by id", + "select * from fk_t6 order by id", + }, + }, { + name: "Update failure with set null foreign key with child having a restrict foreign key", + dataQueries: []string{ + "insert into fk_t1(id, col) values (1, 7), (2, 9)", + "insert into fk_t2(id, col) values (1, 7), (2, 9)", + "insert into fk_t3(id, col) values (1, 7), (2, 9)", + "insert into fk_t4(id, col) values (1, 7)", + "insert into fk_t5(id, col) values (1, 7)", + }, + dmlQuery: "update fk_t3 set col = 9 where id = 1", + assertionQueries: []string{ + "select * from fk_t1 order by id", + "select * from fk_t2 order by id", + "select * from fk_t3 order by id", + "select * from fk_t4 order by id", + "select * from fk_t5 order by id", + }, + }, { + name: "Update success with cascaded set nulls", + dataQueries: []string{ + "insert into fk_t1(id, col) values (1, 7), (2, 9)", + "insert into fk_t2(id, col) values (1, 7), (2, 9)", + "insert into fk_t3(id, col) values (1, 7), (2, 9)", + "insert into fk_t4(id, col) values (1, 7), (2, 9)", + "insert into fk_t6(id, col) values (1, 7), (2, 9)", + }, + dmlQuery: "update fk_t2 set col = 9 where id = 1", + assertionQueries: []string{ + "select * from fk_t1 order by id", + "select * from fk_t2 order by id", + "select * from fk_t3 order by id", + "select * from fk_t4 order by id", + "select * from fk_t6 order by id", + }, + }, { + name: "Delete success with set null foreign key", + dataQueries: []string{ + "insert into fk_t1(id, col) values (1, 7), (2, 9)", + "insert into fk_t2(id, col) values (1, 7), (2, 9)", + "insert into fk_t3(id, col) values (1, 7), (2, 9)", + "insert into fk_t6(id, col) values (1, 7)", + }, + dmlQuery: "delete from fk_t3 where id = 1", + assertionQueries: []string{ + "select * from fk_t1 order by id", + "select * from fk_t2 order by id", + "select * from fk_t3 order by id", + "select * from fk_t6 order by id", + }, + }, { + name: "Delete failure with set null foreign key with child having a restrict foreign key", + dataQueries: []string{ + "insert into fk_t1(id, col) values (1, 7), (2, 9)", + "insert into fk_t2(id, col) values (1, 7), (2, 9)", + "insert into fk_t3(id, col) values (1, 7), (2, 9)", + "insert into fk_t4(id, col) values (1, 7)", + "insert into fk_t5(id, col) values (1, 7)", + }, + dmlQuery: "delete from fk_t3 where id = 1", + assertionQueries: []string{ + "select * from fk_t1 order by id", + "select * from fk_t2 order by id", + "select * from fk_t3 order by id", + "select * from fk_t4 order by id", + "select * from fk_t5 order by id", + }, + }, { + name: "Delete success with cascaded set nulls", + dataQueries: []string{ + "insert into fk_t1(id, col) values (1, 7), (2, 9)", + "insert into fk_t2(id, col) values (1, 7), (2, 9)", + "insert into fk_t3(id, col) values (1, 7), (2, 9)", + "insert into fk_t4(id, col) values (1, 7), (2, 9)", + "insert into fk_t6(id, col) values (1, 7), (2, 9)", + }, + dmlQuery: "delete from fk_t2 where id = 1", + assertionQueries: []string{ + "select * from fk_t1 order by id", + "select * from fk_t2 order by id", + "select * from fk_t3 order by id", + "select * from fk_t4 order by id", + "select * from fk_t6 order by id", + }, + }, { + name: "Update success with cascade foreign key", + dataQueries: []string{ + "insert into fk_t10(id, col) values (1, 7), (2, 9)", + "insert into fk_t11(id, col) values (1, 7)", + }, + dmlQuery: "update fk_t10 set col = 5 where id = 1", + assertionQueries: []string{ + "select * from fk_t10 order by id", + "select * from fk_t11 order by id", + }, + }, { + name: "Update failure with cascade foreign key with child having a restrict foreign key", + dataQueries: []string{ + "insert into fk_t10(id, col) values (1, 7), (2, 9)", + "insert into fk_t11(id, col) values (1, 7)", + "insert into fk_t13(id, col) values (1, 7)", + }, + dmlQuery: "update fk_t10 set col = 5 where id = 1", + assertionQueries: []string{ + "select * from fk_t10 order by id", + "select * from fk_t11 order by id", + "select * from fk_t13 order by id", + }, + }, { + name: "Update success with cascaded cascade foreign keys", + dataQueries: []string{ + "insert into fk_t10(id, col) values (1, 7), (2, 9)", + "insert into fk_t11(id, col) values (1, 7)", + "insert into fk_t12(id, col) values (1, 7)", + }, + dmlQuery: "update fk_t10 set col = 5 where id = 1", + assertionQueries: []string{ + "select * from fk_t10 order by id", + "select * from fk_t11 order by id", + "select * from fk_t12 order by id", + }, + }, { + name: "Delete success with cascade foreign key", + dataQueries: []string{ + "insert into fk_t10(id, col) values (1, 7), (2, 9)", + "insert into fk_t11(id, col) values (1, 7)", + }, + dmlQuery: "delete from fk_t10 where id = 1", + assertionQueries: []string{ + "select * from fk_t10 order by id", + "select * from fk_t11 order by id", + }, + }, { + name: "Delete failure with cascade foreign key with child having a restrict foreign key", + dataQueries: []string{ + "insert into fk_t10(id, col) values (1, 7), (2, 9)", + "insert into fk_t11(id, col) values (1, 7)", + "insert into fk_t13(id, col) values (1, 7)", + }, + dmlQuery: "delete from fk_t10 where id = 1", + assertionQueries: []string{ + "select * from fk_t10 order by id", + "select * from fk_t11 order by id", + "select * from fk_t13 order by id", + }, + }, { + name: "Delete success with cascaded cascade foreign keys", + dataQueries: []string{ + "insert into fk_t10(id, col) values (1, 7), (2, 9)", + "insert into fk_t11(id, col) values (1, 7)", + "insert into fk_t12(id, col) values (1, 7)", + }, + dmlQuery: "delete from fk_t10 where id = 1", + assertionQueries: []string{ + "select * from fk_t10 order by id", + "select * from fk_t11 order by id", + "select * from fk_t12 order by id", + }, + }, { + name: "Delete success with set null to an update cascade foreign key", + dataQueries: []string{ + "insert into fk_t15(id, col) values (1, 7), (2, 9)", + "insert into fk_t16(id, col) values (1, 7), (2, 9)", + "insert into fk_t17(id, col) values (1, 7)", + "insert into fk_t18(id, col) values (1, 7)", + }, + dmlQuery: "delete from fk_t16 where id = 1", + assertionQueries: []string{ + "select * from fk_t15 order by id", + "select * from fk_t16 order by id", + "select * from fk_t17 order by id", + "select * from fk_t18 order by id", + }, + }, { + name: "Delete success with cascade to delete with set null to an update set null foreign key", + dataQueries: []string{ + "insert into fk_t15(id, col) values (1, 7), (2, 9)", + "insert into fk_t16(id, col) values (1, 7), (2, 9)", + "insert into fk_t17(id, col) values (1, 7)", + "insert into fk_t19(id, col) values (1, 7)", + }, + dmlQuery: "delete from fk_t15 where id = 1", + assertionQueries: []string{ + "select * from fk_t15 order by id", + "select * from fk_t16 order by id", + "select * from fk_t17 order by id", + "select * from fk_t19 order by id", + }, + }, { + name: "Update success with cascade to an update set null to an update cascade foreign key", + dataQueries: []string{ + "insert into fk_t15(id, col) values (1, 7), (2, 9)", + "insert into fk_t16(id, col) values (1, 7), (2, 9)", + "insert into fk_t17(id, col) values (1, 7)", + "insert into fk_t18(id, col) values (1, 7)", + }, + dmlQuery: "update fk_t15 set col = 3 where id = 1", + assertionQueries: []string{ + "select * from fk_t15 order by id", + "select * from fk_t16 order by id", + "select * from fk_t17 order by id", + "select * from fk_t18 order by id", + }, + }, { + name: "Insert success for self-referenced foreign key", + dataQueries: []string{ + "insert into fk_t20(id, col, col2) values (1, 7, NULL)", + }, + dmlQuery: "insert into fk_t20(id, col, col2) values (2, 9, 7), (3, 10, 9)", + assertionQueries: []string{ + "select * from fk_t20 order by id", + }, + }, { + name: "Insert failure for self-referenced foreign key", + dataQueries: []string{ + "insert into fk_t20(id, col, col2) values (5, 7, NULL)", + }, + dmlQuery: "insert into fk_t20(id, col, col2) values (6, 9, 6)", + assertionQueries: []string{ + "select * from fk_t20 order by id", + }, + }, + } + + for _, tt := range testcases { + for _, testSharded := range []bool{false, true} { + t.Run(getTestName(tt.name, testSharded), func(t *testing.T) { + mcmp, closer := start(t) + defer closer() + // Set the correct keyspace to use from VtGates. + if testSharded { + t.Skip("Skip test since we don't have sharded foreign key support yet") + _ = utils.Exec(t, mcmp.VtConn, "use `ks`") + } else { + _ = utils.Exec(t, mcmp.VtConn, "use `uks`") + } + + // Insert all the data required for running the test. + for _, query := range tt.dataQueries { + mcmp.Exec(query) + } + + // Run the DML query that needs to be tested and verify output with MySQL. + _, _ = mcmp.ExecAllowAndCompareError(tt.dmlQuery) + + // Run the assertion queries and verify we get the expected outputs. + for _, query := range tt.assertionQueries { + mcmp.Exec(query) + } + }) + } + } + + for _, testSharded := range []bool{false, true} { + t.Run(getTestName("Transactions with intermediate failure", testSharded), func(t *testing.T) { + mcmp, closer := start(t) + defer closer() + // Set the correct keyspace to use from VtGates. + if testSharded { + t.Skip("Skip test since we don't have sharded foreign key support yet") + _ = utils.Exec(t, mcmp.VtConn, "use `ks`") + } else { + _ = utils.Exec(t, mcmp.VtConn, "use `uks`") + } + + // Insert some rows + mcmp.Exec("INSERT INTO fk_t10(id, col) VALUES (1, 7), (2, 9), (3, 5)") + mcmp.Exec("INSERT INTO fk_t11(id, col) VALUES (1, 7), (2, 9), (3, 5)") + mcmp.Exec("INSERT INTO fk_t12(id, col) VALUES (1, 7), (2, 9), (3, 5)") + + // Start a transaction + mcmp.Exec("BEGIN") + + // Insert another row. + mcmp.Exec("INSERT INTO fk_t13(id, col) VALUES (1, 7)") + + // Delete success for cascaded (2, 9) + mcmp.Exec("DELETE FROM fk_t10 WHERE id = 2") + + // Verify the results + mcmp.Exec("SELECT * FROM fk_t10 ORDER BY id") + mcmp.Exec("SELECT * FROM fk_t11 ORDER BY id") + mcmp.Exec("SELECT * FROM fk_t12 ORDER BY id") + mcmp.Exec("SELECT * FROM fk_t13 ORDER BY id") + + // Update that fails + _, err := mcmp.ExecAllowAndCompareError("UPDATE fk_t10 SET col = 15 WHERE id = 1") + require.Error(t, err) + + // Verify the results + // Since we are in a transaction, we still expect the transaction to be ongoing, with no change to the tables + // since the update should fail. + mcmp.Exec("SELECT * FROM fk_t10 ORDER BY id") + mcmp.Exec("SELECT * FROM fk_t11 ORDER BY id") + mcmp.Exec("SELECT * FROM fk_t12 ORDER BY id") + mcmp.Exec("SELECT * FROM fk_t13 ORDER BY id") + + // Update that is a success + mcmp.Exec("UPDATE fk_t10 SET col = 15 where id = 3") + + // Verify the results + mcmp.Exec("SELECT * FROM fk_t10 ORDER BY id") + mcmp.Exec("SELECT * FROM fk_t11 ORDER BY id") + mcmp.Exec("SELECT * FROM fk_t12 ORDER BY id") + mcmp.Exec("SELECT * FROM fk_t13 ORDER BY id") + + // Insert a new row + mcmp.Exec("INSERT INTO fk_t13(id, col) VALUES (3, 15)") + + // Verify the results + mcmp.Exec("SELECT * FROM fk_t10 ORDER BY id") + mcmp.Exec("SELECT * FROM fk_t11 ORDER BY id") + mcmp.Exec("SELECT * FROM fk_t12 ORDER BY id") + mcmp.Exec("SELECT * FROM fk_t13 ORDER BY id") + + // Rollback the transaction. + mcmp.Exec("ROLLBACK") + + // Verify the results + mcmp.Exec("SELECT * FROM fk_t10 ORDER BY id") + mcmp.Exec("SELECT * FROM fk_t11 ORDER BY id") + mcmp.Exec("SELECT * FROM fk_t12 ORDER BY id") + mcmp.Exec("SELECT * FROM fk_t13 ORDER BY id") + }) + } +} diff --git a/go/test/endtoend/vtgate/foreignkey/main_test.go b/go/test/endtoend/vtgate/foreignkey/main_test.go new file mode 100644 index 00000000000..dae78ae93a1 --- /dev/null +++ b/go/test/endtoend/vtgate/foreignkey/main_test.go @@ -0,0 +1,188 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package foreignkey + +import ( + _ "embed" + "flag" + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + mysqlParams mysql.ConnParams + vtgateGrpcAddress string + shardedKs = "ks" + unshardedKs = "uks" + Cell = "test" + //go:embed sharded_schema.sql + shardedSchemaSQL string + + //go:embed unsharded_schema.sql + unshardedSchemaSQL string + + //go:embed sharded_vschema.json + shardedVSchema string + + //go:embed unsharded_vschema.json + unshardedVSchema string + + fkTables = []string{"fk_t1", "fk_t2", "fk_t3", "fk_t4", "fk_t5", "fk_t6", "fk_t7", + "fk_t10", "fk_t11", "fk_t12", "fk_t13", "fk_t15", "fk_t16", "fk_t17", "fk_t18", "fk_t19", "fk_t20", + "fk_multicol_t1", "fk_multicol_t2", "fk_multicol_t3", "fk_multicol_t4", "fk_multicol_t5", "fk_multicol_t6", "fk_multicol_t7", + "fk_multicol_t10", "fk_multicol_t11", "fk_multicol_t12", "fk_multicol_t13", "fk_multicol_t15", "fk_multicol_t16", "fk_multicol_t17", "fk_multicol_t18", "fk_multicol_t19"} + fkReferences = []fkReference{ + {parentTable: "fk_t1", childTable: "fk_t2"}, + {parentTable: "fk_t2", childTable: "fk_t7"}, + {parentTable: "fk_t2", childTable: "fk_t3"}, + {parentTable: "fk_t3", childTable: "fk_t4"}, + {parentTable: "fk_t3", childTable: "fk_t6"}, + {parentTable: "fk_t4", childTable: "fk_t5"}, + {parentTable: "fk_t10", childTable: "fk_t11"}, + {parentTable: "fk_t11", childTable: "fk_t12"}, + {parentTable: "fk_t11", childTable: "fk_t13"}, + {parentTable: "fk_t15", childTable: "fk_t16"}, + {parentTable: "fk_t16", childTable: "fk_t17"}, + {parentTable: "fk_t17", childTable: "fk_t18"}, + {parentTable: "fk_t17", childTable: "fk_t19"}, + {parentTable: "fk_multicol_t1", childTable: "fk_multicol_t2"}, + {parentTable: "fk_multicol_t2", childTable: "fk_multicol_t7"}, + {parentTable: "fk_multicol_t2", childTable: "fk_multicol_t3"}, + {parentTable: "fk_multicol_t3", childTable: "fk_multicol_t4"}, + {parentTable: "fk_multicol_t3", childTable: "fk_multicol_t6"}, + {parentTable: "fk_multicol_t4", childTable: "fk_multicol_t5"}, + {parentTable: "fk_multicol_t10", childTable: "fk_multicol_t11"}, + {parentTable: "fk_multicol_t11", childTable: "fk_multicol_t12"}, + {parentTable: "fk_multicol_t11", childTable: "fk_multicol_t13"}, + {parentTable: "fk_multicol_t15", childTable: "fk_multicol_t16"}, + {parentTable: "fk_multicol_t16", childTable: "fk_multicol_t17"}, + {parentTable: "fk_multicol_t17", childTable: "fk_multicol_t18"}, + {parentTable: "fk_multicol_t17", childTable: "fk_multicol_t19"}, + } +) + +// fkReference stores a foreign key reference from one table to another. +type fkReference struct { + parentTable string + childTable string +} + +func TestMain(m *testing.M) { + defer cluster.PanicHandler(nil) + flag.Parse() + + exitCode := func() int { + clusterInstance = cluster.NewCluster(Cell, "localhost") + defer clusterInstance.Teardown() + + // Start topo server + err := clusterInstance.StartTopo() + if err != nil { + return 1 + } + + // Start keyspace + sKs := &cluster.Keyspace{ + Name: shardedKs, + SchemaSQL: shardedSchemaSQL, + VSchema: shardedVSchema, + } + + err = clusterInstance.StartKeyspace(*sKs, []string{"-80", "80-"}, 1, false) + if err != nil { + return 1 + } + + uKs := &cluster.Keyspace{ + Name: unshardedKs, + SchemaSQL: unshardedSchemaSQL, + VSchema: unshardedVSchema, + } + err = clusterInstance.StartUnshardedKeyspace(*uKs, 1, false) + if err != nil { + return 1 + } + + err = clusterInstance.VtctlclientProcess.ExecuteCommand("RebuildVSchemaGraph") + if err != nil { + return 1 + } + + // Start vtgate + err = clusterInstance.StartVtgate() + if err != nil { + return 1 + } + vtParams = mysql.ConnParams{ + Host: clusterInstance.Hostname, + Port: clusterInstance.VtgateMySQLPort, + } + vtgateGrpcAddress = fmt.Sprintf("%s:%d", clusterInstance.Hostname, clusterInstance.VtgateGrpcPort) + + connParams, closer, err := utils.NewMySQL(clusterInstance, shardedKs, shardedSchemaSQL) + if err != nil { + fmt.Println(err) + return 1 + } + defer closer() + mysqlParams = connParams + return m.Run() + }() + os.Exit(exitCode) +} + +func start(t *testing.T) (utils.MySQLCompare, func()) { + mcmp, err := utils.NewMySQLCompare(t, vtParams, mysqlParams) + require.NoError(t, err) + + deleteAll := func() { + _ = utils.Exec(t, mcmp.VtConn, "use `ks/-80`") + tables := []string{"t4", "t3", "t2", "t1", "multicol_tbl2", "multicol_tbl1"} + tables = append(tables, fkTables...) + for _, table := range tables { + _, _ = mcmp.ExecAndIgnore("delete /*+ SET_VAR(foreign_key_checks=OFF) */ from " + table) + } + _ = utils.Exec(t, mcmp.VtConn, "use `ks/80-`") + for _, table := range tables { + _, _ = mcmp.ExecAndIgnore("delete /*+ SET_VAR(foreign_key_checks=OFF) */ from " + table) + } + _ = utils.Exec(t, mcmp.VtConn, "use `uks`") + tables = []string{"u_t1", "u_t2", "u_t3"} + tables = append(tables, fkTables...) + for _, table := range tables { + _, _ = mcmp.ExecAndIgnore("delete /*+ SET_VAR(foreign_key_checks=OFF) */ from " + table) + } + _ = utils.Exec(t, mcmp.VtConn, "use `ks`") + } + + deleteAll() + + return mcmp, func() { + deleteAll() + mcmp.Close() + cluster.PanicHandler(t) + } +} diff --git a/go/test/endtoend/vtgate/foreignkey/sharded_schema.sql b/go/test/endtoend/vtgate/foreignkey/sharded_schema.sql new file mode 100644 index 00000000000..c1f511350f2 --- /dev/null +++ b/go/test/endtoend/vtgate/foreignkey/sharded_schema.sql @@ -0,0 +1,521 @@ +create table t1 +( + id bigint, + col bigint, + primary key (id) +) Engine = InnoDB; + +create table t2 +( + id bigint, + col bigint, + mycol varchar(50), + primary key (id), + index(id, mycol), + index(id, col), + foreign key (id) references t1 (id) on delete restrict +) Engine = InnoDB; + +create table t3 +( + id bigint, + col bigint, + primary key (id), + foreign key (col) references t1 (id) on delete restrict +) Engine = InnoDB; + +create table multicol_tbl1 +( + cola bigint, + colb varbinary(50), + colc varchar(50), + msg varchar(50), + primary key (cola, colb, colc) +) Engine = InnoDB; + +create table multicol_tbl2 +( + cola bigint, + colb varbinary(50), + colc varchar(50), + msg varchar(50), + primary key (cola, colb, colc), + foreign key (cola, colb, colc) references multicol_tbl1 (cola, colb, colc) on delete cascade +) Engine = InnoDB; + +create table t4 +( + id bigint, + col bigint, + t2_mycol varchar(50), + t2_col bigint, + primary key (id), + foreign key (id) references t2 (id) on delete restrict, + foreign key (id, t2_mycol) references t2 (id, mycol) on update restrict, + foreign key (id, t2_col) references t2 (id, col) on update cascade +) Engine = InnoDB; + +create table t5 +( + pk bigint, + sk bigint, + col1 varchar(50), + primary key (pk), + index(sk, col1) +) Engine = InnoDB; + +create table t6 +( + pk bigint, + sk bigint, + col1 varchar(50), + primary key (pk), + foreign key (sk, col1) references t5 (sk, col1) on delete restrict on update restrict +) Engine = InnoDB; + +/* + * fk_t1 + * │ + * │ On Delete Restrict + * │ On Update Restrict + * ▼ + * ┌────────────────fk_t2────────────────┐ + * │ │ + * │On Delete Set Null │ On Delete Set Null + * │On Update Set Null │ On Update Set Null + * ▼ ▼ + * fk_t7 fk_t3───────────────────┐ + * │ │ + * │ │ On Delete Set Null + * On Delete Set Null │ │ On Update Set Null + * On Update Set Null │ │ + * ▼ ▼ + * fk_t4 fk_t6 + * │ + * │ + * On Delete Restrict │ + * On Update Restrict │ + * │ + * ▼ + * fk_t5 + */ + +create table fk_t1 +( + id bigint, + col varchar(10), + primary key (id), + index(col) +) Engine = InnoDB; + +create table fk_t2 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t1(col) on delete restrict on update restrict +) Engine = InnoDB; + +create table fk_t3 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t2(col) on delete set null on update set null +) Engine = InnoDB; + +create table fk_t4 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t3(col) on delete set null on update set null +) Engine = InnoDB; + +create table fk_t5 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t4(col) on delete restrict on update restrict +) Engine = InnoDB; + +create table fk_t6 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t3(col) on delete set null on update set null +) Engine = InnoDB; + +create table fk_t7 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t2(col) on delete set null on update set null +) Engine = InnoDB; + +/* + * fk_t10 + * │ + * On Delete Cascade │ + * On Update Cascade │ + * │ + * ▼ + * fk_t11──────────────────┐ + * │ │ + * │ │ On Delete Restrict + * On Delete Cascade │ │ On Update Restrict + * On Update Cascade │ │ + * │ │ + * ▼ ▼ + * fk_t12 fk_t13 + */ + +create table fk_t10 +( + id bigint, + col varchar(10), + primary key (id), + index(col) +) Engine = InnoDB; + +create table fk_t11 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t10(col) on delete cascade on update cascade +) Engine = InnoDB; + +create table fk_t12 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t11(col) on delete cascade on update cascade +) Engine = InnoDB; + +create table fk_t13 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t11(col) on delete restrict on update restrict +) Engine = InnoDB; + +/* + * fk_t15 + * │ + * │ + * On Delete Cascade │ + * On Update Cascade │ + * │ + * ▼ + * fk_t16 + * │ + * On Delete Set Null │ + * On Update Set Null │ + * │ + * ▼ + * fk_t17──────────────────┐ + * │ │ + * │ │ On Delete Set Null + * On Delete Cascade │ │ On Update Set Null + * On Update Cascade │ │ + * │ │ + * ▼ ▼ + * fk_t18 fk_t19 + */ + +create table fk_t15 +( + id bigint, + col varchar(10), + primary key (id), + index(col) +) Engine = InnoDB; + +create table fk_t16 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t15(col) on delete cascade on update cascade +) Engine = InnoDB; + +create table fk_t17 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t16(col) on delete set null on update set null +) Engine = InnoDB; + +create table fk_t18 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t17(col) on delete cascade on update cascade +) Engine = InnoDB; + +create table fk_t19 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t17(col) on delete set null on update set null +) Engine = InnoDB; + +/* + Self referenced foreign key from col2 to col in fk_t20 +*/ + +create table fk_t20 +( + id bigint, + col varchar(10), + col2 varchar(10), + primary key (id), + index(col), + foreign key (col2) references fk_t20(col) on delete restrict on update restrict +) Engine = InnoDB; + +/* + * fk_multicol_t1 + * │ + * │ On Delete Restrict + * │ On Update Restrict + * ▼ + * ┌────────fk_multicol_t2───────────────┐ + * │ │ + * │On Delete Set Null │ On Delete Set Null + * │On Update Set Null │ On Update Set Null + * ▼ ▼ + * fk_multicol_t7 fk_multicol_t3───────────────────┐ + * │ │ + * │ │ On Delete Set Null + * On Delete Set Null │ │ On Update Set Null + * On Update Set Null │ │ + * ▼ ▼ + * fk_multicol_t4 fk_multicol_t6 + * │ + * │ + * On Delete Restrict │ + * On Update Restrict │ + * │ + * ▼ + * fk_multicol_t5 + */ +create table fk_multicol_t1 +( + id bigint, + colb varchar(10), + cola varchar(10), + primary key (id), + index(cola, colb) +) Engine = InnoDB; + +create table fk_multicol_t2 +( + id bigint, + colb varchar(10), + cola varchar(10), + primary key (id), + index(cola, colb), + foreign key (cola, colb) references fk_multicol_t1(cola, colb) on delete restrict on update restrict +) Engine = InnoDB; + +create table fk_multicol_t3 +( + id bigint, + colb varchar(10), + cola varchar(10), + primary key (id), + index(cola, colb), + foreign key (cola, colb) references fk_multicol_t2(cola, colb) on delete set null on update set null +) Engine = InnoDB; + +create table fk_multicol_t4 +( + id bigint, + colb varchar(10), + cola varchar(10), + primary key (id), + index(cola, colb), + foreign key (cola, colb) references fk_multicol_t3(cola, colb) on delete set null on update set null +) Engine = InnoDB; + +create table fk_multicol_t5 +( + id bigint, + colb varchar(10), + cola varchar(10), + primary key (id), + index(cola, colb), + foreign key (cola, colb) references fk_multicol_t4(cola, colb) on delete restrict on update restrict +) Engine = InnoDB; + +create table fk_multicol_t6 +( + id bigint, + colb varchar(10), + cola varchar(10), + primary key (id), + index(cola, colb), + foreign key (cola, colb) references fk_multicol_t3(cola, colb) on delete set null on update set null +) Engine = InnoDB; + +create table fk_multicol_t7 +( + id bigint, + colb varchar(10), + cola varchar(10), + primary key (id), + index(cola, colb), + foreign key (cola, colb) references fk_multicol_t2(cola, colb) on delete set null on update set null +) Engine = InnoDB; + +/* + * fk_multicol_t10 + * │ + * On Delete Cascade │ + * On Update Cascade │ + * │ + * ▼ + * fk_multicol_t11──────────────────┐ + * │ │ + * │ │ On Delete Restrict + * On Delete Cascade │ │ On Update Restrict + * On Update Cascade │ │ + * │ │ + * ▼ ▼ + * fk_multicol_t12 fk_multicol_t13 + */ + +create table fk_multicol_t10 +( + id bigint, + colb varchar(10), + cola varchar(10), + primary key (id), + index(cola, colb) +) Engine = InnoDB; + +create table fk_multicol_t11 +( + id bigint, + colb varchar(10), + cola varchar(10), + primary key (id), + index(cola, colb), + foreign key (cola, colb) references fk_multicol_t10(cola, colb) on delete cascade on update cascade +) Engine = InnoDB; + +create table fk_multicol_t12 +( + id bigint, + colb varchar(10), + cola varchar(10), + primary key (id), + index(cola, colb), + foreign key (cola, colb) references fk_multicol_t11(cola, colb) on delete cascade on update cascade +) Engine = InnoDB; + +create table fk_multicol_t13 +( + id bigint, + colb varchar(10), + cola varchar(10), + primary key (id), + index(cola, colb), + foreign key (cola, colb) references fk_multicol_t11(cola, colb) on delete restrict on update restrict +) Engine = InnoDB; + +/* + * fk_multicol_t15 + * │ + * │ + * On Delete Cascade │ + * On Update Cascade │ + * │ + * ▼ + * fk_multicol_t16 + * │ + * On Delete Set Null │ + * On Update Set Null │ + * │ + * ▼ + * fk_multicol_t17──────────────────┐ + * │ │ + * │ │ On Delete Set Null + * On Delete Cascade │ │ On Update Set Null + * On Update Cascade │ │ + * │ │ + * ▼ ▼ + * fk_multicol_t18 fk_multicol_t19 + */ + +create table fk_multicol_t15 +( + id bigint, + colb varchar(10), + cola varchar(10), + primary key (id), + index(cola, colb) +) Engine = InnoDB; + +create table fk_multicol_t16 +( + id bigint, + colb varchar(10), + cola varchar(10), + primary key (id), + index(cola, colb), + foreign key (cola, colb) references fk_multicol_t15(cola, colb) on delete cascade on update cascade +) Engine = InnoDB; + +create table fk_multicol_t17 +( + id bigint, + colb varchar(10), + cola varchar(10), + primary key (id), + index(cola, colb), + foreign key (cola, colb) references fk_multicol_t16(cola, colb) on delete set null on update set null +) Engine = InnoDB; + +create table fk_multicol_t18 +( + id bigint, + colb varchar(10), + cola varchar(10), + primary key (id), + index(cola, colb), + foreign key (cola, colb) references fk_multicol_t17(cola, colb) on delete cascade on update cascade +) Engine = InnoDB; + +create table fk_multicol_t19 +( + id bigint, + colb varchar(10), + cola varchar(10), + primary key (id), + index(cola, colb), + foreign key (cola, colb) references fk_multicol_t17(cola, colb) on delete set null on update set null +) Engine = InnoDB; diff --git a/go/test/endtoend/vtgate/foreignkey/sharded_vschema.json b/go/test/endtoend/vtgate/foreignkey/sharded_vschema.json new file mode 100644 index 00000000000..688228f8772 --- /dev/null +++ b/go/test/endtoend/vtgate/foreignkey/sharded_vschema.json @@ -0,0 +1,355 @@ +{ + "sharded": true, + "foreignKeyMode": "managed", + "vindexes": { + "xxhash": { + "type": "xxhash" + }, + "multicol_vdx": { + "type": "multicol", + "params": { + "column_count": "3", + "column_bytes": "1,3,4", + "column_vindex": "hash,binary,unicode_loose_xxhash" + } + } + }, + "tables": { + "t1": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "t2": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "t3": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "t4": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "t5": { + "column_vindexes": [ + { + "column": "sk", + "name": "xxhash" + } + ] + }, + "t6": { + "column_vindexes": [ + { + "column": "sk", + "name": "xxhash" + } + ] + }, + "multicol_tbl1": { + "column_vindexes": [ + { + "columns": [ + "cola", + "colb", + "colc" + ], + "name": "multicol_vdx" + } + ] + }, + "multicol_tbl2": { + "column_vindexes": [ + { + "columns": [ + "cola", + "colb", + "colc" + ], + "name": "multicol_vdx" + } + ] + }, + "fk_t1": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_t2": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_t3": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_t4": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_t5": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_t6": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_t7": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_t10": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_t11": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_t12": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_t13": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_t15": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_t16": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_t17": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_t18": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_t19": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_t20": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_multicol_t1": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_multicol_t2": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_multicol_t3": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_multicol_t4": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_multicol_t5": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_multicol_t6": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_multicol_t7": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_multicol_t10": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_multicol_t11": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_multicol_t12": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_multicol_t13": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_multicol_t15": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_multicol_t16": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_multicol_t17": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_multicol_t18": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_multicol_t19": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + } + } +} \ No newline at end of file diff --git a/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go b/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go new file mode 100644 index 00000000000..dd6428372eb --- /dev/null +++ b/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go @@ -0,0 +1,1165 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fkstress + +import ( + "context" + "flag" + "fmt" + "math/rand" + "os" + "path" + "slices" + "strings" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/onlineddl" + "vitess.io/vitess/go/test/endtoend/utils" + "vitess.io/vitess/go/textutil" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/schema" + "vitess.io/vitess/go/vt/sqlparser" +) + +// This endtoend test is designd to validate VTGate's FOREIGN KEY implementation for unsharded/single-sharded/shard-scope, meaning +// we expect foreign key constraints to be limited to a shard (related rows can never be on diffrent shards). +// +// This test validates NO ACTION, CASCADE and SET NULL reference actions. +// VTGate's support for foreign keys includes: +// - Analyzing the foreign key constraints in a keyspace. +// - Rejecting INSERT statements for child table when there's no matching row on a parent table. +// - Handling DELETE and UPDATE statements on a parent table according to the reference action on all children. +// Specifically, this means for example that VTGate will handle a ON DELETE CASCADE in Vitess plane. It will first delete rows +// from the child (recursive operation) before deleting the row on the parent. As result, the underlying MySQL server will have +// nothing to cascade. +// +// The design of this test is as follows: +// - Create a cluster with PRIMARY and REPLICA tablets +// - Given this structure of tables with foreign key constraints: +// stress_parent +// +- stress_child +// +- stress_grandchild +// +- stress_child2 +// - Create these tables. Then, on the MySQL replica, remove the foreign key constraints. +// - Static test: +// - Randomly populate all tables via highly-contentive INSERT/UPDATE/DELETE statements +// - Validate collected metrics match actual table data +// - Validate foreign key constraints integrity +// - Workload test: +// - Initially populate tables as above +// - Run a high contention workload where multiple connections issue random INSERT/UPDATE/DELETE on all related tables +// - Validate collected metrics match actual table data +// - Validate foreign key constraints integrity on MySQL primary +// - Validate foreign key constraints integrity on MySQL replica +// - Compare data on primary & replica +// +// We of course know that foreign key integrity is maintained on the MySQL primary. However, the replica does not have the matching +// constraints. Since cascaded (SET NULL, CASCADE) writes are handled internally by InnoDB and not written to the binary log, +// any cascaded writes on the primary are lost, and the replica is unaware of those writes. Without VTGate intervention, we expect +// the replica to quickly diverge from the primary, and in fact in all likelyhood replication will break very quickly. +// However, if VTGate implements the cascading rules correctly, the primary MySQL server will never have any actual cascades, and +// so cascaded writes are all accounted for in the binary logs, which means we can expect the replica to be compliant with the +// primary. + +type WriteMetrics struct { + mu sync.Mutex + insertsAttempts, insertsFailures, insertsNoops, inserts int64 + updatesAttempts, updatesFailures, updatesNoops, updates int64 + deletesAttempts, deletesFailures, deletesNoops, deletes int64 + + insertsFKErrors, updatesFKErrors, deletesFKErrors int64 + sampleInsertFKError, sampleUpdateFKError, sampleDeleteFKError error +} + +func (w *WriteMetrics) Clear() { + w.mu.Lock() + defer w.mu.Unlock() + + w.inserts = 0 + w.updates = 0 + w.deletes = 0 + + w.insertsAttempts = 0 + w.insertsFailures = 0 + w.insertsNoops = 0 + + w.updatesAttempts = 0 + w.updatesFailures = 0 + w.updatesNoops = 0 + + w.deletesAttempts = 0 + w.deletesFailures = 0 + w.deletesNoops = 0 + + w.insertsFKErrors = 0 + w.updatesFKErrors = 0 + w.deletesFKErrors = 0 +} + +func (w *WriteMetrics) String() string { + return fmt.Sprintf(`WriteMetrics: inserts-deletes=%d, updates-deletes=%d, +insertsAttempts=%d, insertsFailures=%d, insertsNoops=%d, inserts=%d, +updatesAttempts=%d, updatesFailures=%d, updatesNoops=%d, updates=%d, +deletesAttempts=%d, deletesFailures=%d, deletesNoops=%d, deletes=%d, +`, + w.inserts-w.deletes, w.updates-w.deletes, + w.insertsAttempts, w.insertsFailures, w.insertsNoops, w.inserts, + w.updatesAttempts, w.updatesFailures, w.updatesNoops, w.updates, + w.deletesAttempts, w.deletesFailures, w.deletesNoops, w.deletes, + ) +} + +var ( + clusterInstance *cluster.LocalProcessCluster + shards []cluster.Shard + primary *cluster.Vttablet + replica *cluster.Vttablet + vtParams mysql.ConnParams + + onlineDDLStrategy = "vitess --unsafe-allow-foreign-keys --cut-over-threshold=15s" + hostname = "localhost" + keyspaceName = "ks" + cell = "zone1" + schemaChangeDirectory = "" + parentTableName = "stress_parent" + childTableName = "stress_child" + child2TableName = "stress_child2" + grandchildTableName = "stress_grandchild" + tableNames = []string{parentTableName, childTableName, child2TableName, grandchildTableName} + reverseTableNames []string + + seedOnce sync.Once + + referenceActionMap = map[sqlparser.ReferenceAction]string{ + sqlparser.NoAction: "NO ACTION", + sqlparser.Cascade: "CASCADE", + sqlparser.SetNull: "SET NULL", + } + referenceActions = []sqlparser.ReferenceAction{sqlparser.NoAction, sqlparser.SetNull, sqlparser.Cascade} + createStatements = []string{ + ` + CREATE TABLE stress_parent ( + id bigint not null, + parent_id bigint, + rand_val varchar(32) null default '', + hint_col varchar(64) not null default '', + created_timestamp timestamp not null default current_timestamp, + updates int unsigned not null default 0, + PRIMARY KEY (id), + key parent_id_idx(parent_id), + key created_idx(created_timestamp), + key updates_idx(updates) + ) ENGINE=InnoDB + `, + ` + CREATE TABLE stress_child ( + id bigint not null, + parent_id bigint, + rand_val varchar(32) null default '', + hint_col varchar(64) not null default '', + created_timestamp timestamp not null default current_timestamp, + updates int unsigned not null default 0, + PRIMARY KEY (id), + key parent_id_idx(parent_id), + key created_idx(created_timestamp), + key updates_idx(updates), + CONSTRAINT child_parent_fk FOREIGN KEY (parent_id) REFERENCES stress_parent (id) ON DELETE %s ON UPDATE %s + ) ENGINE=InnoDB + `, + ` + CREATE TABLE stress_child2 ( + id bigint not null, + parent_id bigint, + rand_val varchar(32) null default '', + hint_col varchar(64) not null default '', + created_timestamp timestamp not null default current_timestamp, + updates int unsigned not null default 0, + PRIMARY KEY (id), + key parent_id_idx(parent_id), + key created_idx(created_timestamp), + key updates_idx(updates), + CONSTRAINT child2_parent_fk FOREIGN KEY (parent_id) REFERENCES stress_parent (id) ON DELETE %s ON UPDATE %s + ) ENGINE=InnoDB + `, + ` + CREATE TABLE stress_grandchild ( + id bigint not null, + parent_id bigint, + rand_val varchar(32) null default '', + hint_col varchar(64) not null default '', + created_timestamp timestamp not null default current_timestamp, + updates int unsigned not null default 0, + PRIMARY KEY (id), + key parent_id_idx(parent_id), + key created_idx(created_timestamp), + key updates_idx(updates), + CONSTRAINT grandchild_child_fk FOREIGN KEY (parent_id) REFERENCES stress_child (id) ON DELETE %s ON UPDATE %s + ) ENGINE=InnoDB + `, + } + dropConstraintsStatements = []string{ + `ALTER TABLE stress_child DROP CONSTRAINT child_parent_fk`, + `ALTER TABLE stress_child2 DROP CONSTRAINT child2_parent_fk`, + `ALTER TABLE stress_grandchild DROP CONSTRAINT grandchild_child_fk`, + } + alterHintStatement = ` + ALTER TABLE %s modify hint_col varchar(64) not null default '%s' + ` + insertRowStatement = ` + INSERT IGNORE INTO %s (id, parent_id, rand_val) VALUES (%d, %d, left(md5(rand()), 8)) + ` + updateRowStatement = ` + UPDATE %s SET rand_val=left(md5(rand()), 8), updates=updates+1 WHERE id=%d + ` + updateRowIdStatement = ` + UPDATE %s SET id=%v, rand_val=left(md5(rand()), 8), updates=updates+1 WHERE id=%d + ` + deleteRowStatement = ` + DELETE FROM %s WHERE id=%d AND updates=1 + ` + // We use CAST(SUM(updates) AS SIGNED) because SUM() returns a DECIMAL datatype, and we want to read a SIGNED INTEGER type + selectCountRowsStatement = ` + SELECT COUNT(*) AS num_rows, CAST(SUM(updates) AS SIGNED) AS sum_updates FROM %s + ` + selectMatchingRowsChild = ` + select stress_child.id from stress_child join stress_parent on (stress_parent.id = stress_child.parent_id) + ` + selectMatchingRowsChild2 = ` + select stress_child2.id from stress_child2 join stress_parent on (stress_parent.id = stress_child2.parent_id) + ` + selectMatchingRowsGrandchild = ` + select stress_grandchild.id from stress_grandchild join stress_child on (stress_child.id = stress_grandchild.parent_id) + ` + selectOrphanedRowsChild = ` + select stress_child.id from stress_child left join stress_parent on (stress_parent.id = stress_child.parent_id) where stress_parent.id is null + ` + selectOrphanedRowsChild2 = ` + select stress_child2.id from stress_child2 left join stress_parent on (stress_parent.id = stress_child2.parent_id) where stress_parent.id is null + ` + selectOrphanedRowsGrandchild = ` + select stress_grandchild.id from stress_grandchild left join stress_child on (stress_child.id = stress_grandchild.parent_id) where stress_child.id is null + ` + deleteAllStatement = ` + DELETE FROM %s + ` + writeMetrics = map[string]*WriteMetrics{} +) + +const ( + maxTableRows = 4096 + workloadDuration = 5 * time.Second + migrationWaitTimeout = 60 * time.Second +) + +// The following variables are fit for a local, strong developer box. +// The test overrides these into more relaxed values if running on GITHUB_ACTIONS, +// seeing that GitHub CI is much weaker. +var ( + maxConcurrency = 10 + singleConnectionSleepInterval = 10 * time.Millisecond + countIterations = 3 +) + +func TestMain(m *testing.M) { + defer cluster.PanicHandler(nil) + flag.Parse() + + exitcode, err := func() (int, error) { + clusterInstance = cluster.NewCluster(cell, hostname) + schemaChangeDirectory = path.Join("/tmp", fmt.Sprintf("schema_change_dir_%d", clusterInstance.GetAndReserveTabletUID())) + defer os.RemoveAll(schemaChangeDirectory) + defer clusterInstance.Teardown() + + if _, err := os.Stat(schemaChangeDirectory); os.IsNotExist(err) { + _ = os.Mkdir(schemaChangeDirectory, 0700) + } + + clusterInstance.VtctldExtraArgs = []string{ + "--schema_change_dir", schemaChangeDirectory, + "--schema_change_controller", "local", + "--schema_change_check_interval", "1s", + } + + clusterInstance.VtTabletExtraArgs = []string{ + "--heartbeat_enable", + "--heartbeat_interval", "250ms", + "--heartbeat_on_demand_duration", "5s", + "--migration_check_interval", "5s", + "--watch_replication_stream", + "--vreplication_tablet_type", "primary", + } + clusterInstance.VtGateExtraArgs = []string{} + + if err := clusterInstance.StartTopo(); err != nil { + return 1, err + } + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: keyspaceName, + VSchema: `{ + "sharded": false, + "foreignKeyMode": "managed" + }`, + } + + // We will use a replica to confirm that vtgate's cascading works correctly. + if err := clusterInstance.StartKeyspace(*keyspace, []string{"1"}, 1, false); err != nil { + return 1, err + } + + vtgateInstance := clusterInstance.NewVtgateInstance() + // Start vtgate + if err := vtgateInstance.Setup(); err != nil { + return 1, err + } + // ensure it is torn down during cluster TearDown + clusterInstance.VtgateProcess = *vtgateInstance + vtParams = mysql.ConnParams{ + Host: clusterInstance.Hostname, + Port: clusterInstance.VtgateMySQLPort, + } + + return m.Run(), nil + }() + if err != nil { + fmt.Printf("%v\n", err) + os.Exit(1) + } else { + os.Exit(exitcode) + } + +} + +func queryTablet(t *testing.T, tablet *cluster.Vttablet, query string, expectError string) *sqltypes.Result { + rs, err := tablet.VttabletProcess.QueryTablet(query, keyspaceName, true) + if expectError == "" { + assert.NoError(t, err) + } else { + assert.ErrorContains(t, err, expectError) + } + return rs +} + +func tabletTestName(t *testing.T, tablet *cluster.Vttablet) string { + switch tablet { + case primary: + return "primary" + case replica: + return "replica" + default: + assert.FailNowf(t, "unknown tablet", "%v, type=%v", tablet.Alias, tablet.Type) + } + return "" +} + +func getTabletPosition(t *testing.T, tablet *cluster.Vttablet) replication.Position { + rs := queryTablet(t, tablet, "select @@gtid_executed as gtid_executed", "") + row := rs.Named().Row() + require.NotNil(t, row) + gtidExecuted := row.AsString("gtid_executed", "") + require.NotEmpty(t, gtidExecuted) + pos, err := replication.DecodePositionDefaultFlavor(gtidExecuted, replication.Mysql56FlavorID) + assert.NoError(t, err) + return pos +} + +func waitForReplicaCatchup(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + primaryPos := getTabletPosition(t, primary) + for { + replicaPos := getTabletPosition(t, replica) + if replicaPos.GTIDSet.Contains(primaryPos.GTIDSet) { + // success + return + } + if !cluster.ValidateReplicationIsHealthy(t, replica) { + assert.FailNow(t, "replication is broken; not waiting for catchup") + return + } + select { + case <-ctx.Done(): + assert.FailNow(t, "timeout waiting for replica to catch up") + return + case <-time.After(time.Second): + // + } + } +} + +func validateMetrics(t *testing.T, tcase *testCase) { + for _, workloadTable := range []string{parentTableName, childTableName, child2TableName, grandchildTableName} { + t.Run(workloadTable, func(t *testing.T) { + t.Run("fk errors", func(t *testing.T) { + testSelectTableFKErrors(t, workloadTable, tcase) + }) + var primaryRows, replicaRows int64 + t.Run(tabletTestName(t, primary), func(t *testing.T) { + primaryRows = testSelectTableMetrics(t, primary, workloadTable, tcase) + }) + t.Run(tabletTestName(t, replica), func(t *testing.T) { + replicaRows = testSelectTableMetrics(t, replica, workloadTable, tcase) + }) + t.Run("compare primary and replica", func(t *testing.T) { + assert.Equal(t, primaryRows, replicaRows) + }) + }) + } +} + +func TestInitialSetup(t *testing.T) { + shards = clusterInstance.Keyspaces[0].Shards + require.Equal(t, 1, len(shards)) + require.Equal(t, 2, len(shards[0].Vttablets)) + primary = shards[0].Vttablets[0] + require.NotNil(t, primary) + replica = shards[0].Vttablets[1] + require.NotNil(t, replica) + require.NotEqual(t, primary.Alias, replica.Alias) + + tableNames = []string{parentTableName, childTableName, child2TableName, grandchildTableName} + reverseTableNames = slices.Clone(tableNames) + slices.Reverse(reverseTableNames) + require.ElementsMatch(t, tableNames, reverseTableNames) + + for _, tableName := range tableNames { + writeMetrics[tableName] = &WriteMetrics{} + } + + if val, present := os.LookupEnv("GITHUB_ACTIONS"); present && val != "" { + // This is the place to fine tune the stress parameters if GitHub actions are too slow + maxConcurrency = maxConcurrency / 2 + singleConnectionSleepInterval = singleConnectionSleepInterval * 2 + } + t.Logf("==== test setup: maxConcurrency=%v, singleConnectionSleepInterval=%v", maxConcurrency, singleConnectionSleepInterval) +} + +type testCase struct { + onDeleteAction sqlparser.ReferenceAction + onUpdateAction sqlparser.ReferenceAction + workload bool + onlineDDLTable string +} + +// ExecuteFKTest runs a single test case, which can be: +// - With/out workload +// - Either one of ON DELETE actions +// - Either one of ON UPDATE actions +// - Potentially running an Online DDL on an indicated table (this will not work in Vanilla MySQL, see https://vitess.io/blog/2021-06-15-online-ddl-why-no-fk/) +func ExecuteFKTest(t *testing.T, tcase *testCase) { + t.Logf("==== test setup: maxConcurrency=%v, singleConnectionSleepInterval=%v", maxConcurrency, singleConnectionSleepInterval) + workloadName := "static data" + if tcase.workload { + workloadName = "workload" + } + testName := fmt.Sprintf("%s/del=%s/upd=%s", workloadName, referenceActionMap[tcase.onDeleteAction], referenceActionMap[tcase.onUpdateAction]) + if tcase.onlineDDLTable != "" { + testName = fmt.Sprintf("%s/ddl=%s", testName, tcase.onlineDDLTable) + } + t.Run(testName, func(t *testing.T) { + ctx := context.Background() + + t.Run("create schema", func(t *testing.T) { + createInitialSchema(t, tcase) + }) + t.Run("init tables", func(t *testing.T) { + populateTables(t) + }) + if tcase.workload { + t.Run("workload", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, workloadDuration) + defer cancel() + + var wg sync.WaitGroup + for _, workloadTable := range []string{parentTableName, childTableName, child2TableName, grandchildTableName} { + wg.Add(1) + go func(tbl string) { + defer wg.Done() + runMultipleConnections(ctx, t, tbl) + }(workloadTable) + } + + if tcase.onlineDDLTable != "" { + t.Run("migrating", func(t *testing.T) { + // This cannot work with Vanilla MySQL. We put the code for testing, but we're not actually going to use it + // for now. The test cases all have empty tcase.onlineDDLTable + hint := "hint-alter" + uuid := testOnlineDDLStatement(t, fmt.Sprintf(alterHintStatement, tcase.onlineDDLTable, hint), onlineDDLStrategy, "vtgate", hint) + ok := onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + require.True(t, ok) // or else don't attempt to cleanup artifacts + t.Run("cleanup artifacts", func(t *testing.T) { + rs := onlineddl.ReadMigrations(t, &vtParams, uuid) + require.NotNil(t, rs) + row := rs.Named().Row() + require.NotNil(t, row) + + artifacts := textutil.SplitDelimitedList(row.AsString("artifacts", "")) + for _, artifact := range artifacts { + t.Run(artifact, func(t *testing.T) { + err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, "drop table if exists "+artifact) + require.NoError(t, err) + }) + } + }) + }) + } + wg.Wait() + }) + } + t.Run("wait for replica", func(t *testing.T) { + waitForReplicaCatchup(t) + }) + t.Run("validate metrics", func(t *testing.T) { + validateMetrics(t, tcase) + }) + t.Run("validate replication health", func(t *testing.T) { + cluster.ValidateReplicationIsHealthy(t, replica) + }) + t.Run("validate fk", func(t *testing.T) { + testFKIntegrity(t, primary, tcase) + testFKIntegrity(t, replica, tcase) + }) + }) +} + +func TestStressFK(t *testing.T) { + defer cluster.PanicHandler(t) + + t.Run("validate replication health", func(t *testing.T) { + cluster.ValidateReplicationIsHealthy(t, replica) + }) + + runOnlineDDL := false + if val, present := os.LookupEnv("FK_STRESS_ONLINE_DDL"); present && val != "" { + runOnlineDDL = true + } + // Without workload ; with workload + for _, workload := range []bool{false, true} { + // For any type of ON DELETE action + for _, actionDelete := range referenceActions { + // For any type of ON UPDATE action + for _, actionUpdate := range referenceActions { + tcase := &testCase{ + workload: workload, + onDeleteAction: actionDelete, + onUpdateAction: actionUpdate, + } + ExecuteFKTest(t, tcase) + } + } + } + + if runOnlineDDL { + // Foreign keys introduce some overhead. We reduce concurrency so that GitHub CI can accommodate. + maxConcurrency = maxConcurrency * 4 / 5 + singleConnectionSleepInterval = singleConnectionSleepInterval * 2 + + // Running Online DDL on all test tables. We don't use all of the combinations + // presented above; we will run with workload, and suffice with same ON DELETE - ON UPDATE actions. + for _, action := range referenceActions { + for _, table := range tableNames { + tcase := &testCase{ + workload: true, + onDeleteAction: action, + onUpdateAction: action, + onlineDDLTable: table, + } + ExecuteFKTest(t, tcase) + } + } + } +} + +// createInitialSchema creates the tables from scratch, and drops the foreign key constraints on the replica. +func createInitialSchema(t *testing.T, tcase *testCase) { + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.Nil(t, err) + defer conn.Close() + + t.Run("dropping tables", func(t *testing.T) { + for _, tableName := range reverseTableNames { + err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, "drop table if exists "+tableName) + require.NoError(t, err) + } + }) + t.Run("creating tables", func(t *testing.T) { + // Create the stress tables + var b strings.Builder + for i, sql := range createStatements { + if i == 0 { + // parent table, no foreign keys + b.WriteString(sql) + } else { + b.WriteString(fmt.Sprintf(sql, referenceActionMap[tcase.onDeleteAction], referenceActionMap[tcase.onUpdateAction])) + } + b.WriteString(";") + } + err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, b.String()) + require.NoError(t, err) + }) + t.Run("wait for replica", func(t *testing.T) { + waitForReplicaCatchup(t) + }) + t.Run("validating tables: vttablet", func(t *testing.T) { + // Check if table is created. Checked on tablets. + checkTable(t, parentTableName, "hint_col") + checkTable(t, childTableName, "hint_col") + checkTable(t, child2TableName, "hint_col") + checkTable(t, grandchildTableName, "hint_col") + }) + t.Run("validating tables: vtgate", func(t *testing.T) { + // Wait for tables to appear on VTGate + waitForTable(t, parentTableName, conn) + waitForTable(t, childTableName, conn) + waitForTable(t, child2TableName, conn) + waitForTable(t, grandchildTableName, conn) + }) + t.Run("waiting for vschema definition to apply", func(t *testing.T) { + for _, tableName := range []string{parentTableName, childTableName, child2TableName, grandchildTableName} { + err := utils.WaitForColumn(t, clusterInstance.VtgateProcess, keyspaceName, tableName, "id") + require.NoError(t, err) + } + }) + + t.Run("dropping foreign keys on replica", func(t *testing.T) { + for _, statement := range dropConstraintsStatements { + _ = queryTablet(t, replica, "set global super_read_only=0", "") + _ = queryTablet(t, replica, statement, "") + _ = queryTablet(t, replica, "set global super_read_only=1", "") + } + }) + t.Run("validate definitions", func(t *testing.T) { + for _, tableName := range []string{childTableName, child2TableName, grandchildTableName} { + t.Run(tableName, func(t *testing.T) { + t.Run(tabletTestName(t, primary), func(t *testing.T) { + stmt := getCreateTableStatement(t, primary, tableName) + assert.Contains(t, stmt, "CONSTRAINT") + }) + t.Run(tabletTestName(t, replica), func(t *testing.T) { + stmt := getCreateTableStatement(t, replica, tableName) + assert.NotContains(t, stmt, "CONSTRAINT") + }) + }) + } + }) +} + +// testOnlineDDLStatement runs an online DDL, ALTER statement +func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy string, executeStrategy string, expectHint string) (uuid string) { + if executeStrategy == "vtgate" { + row := onlineddl.VtgateExecDDL(t, &vtParams, ddlStrategy, alterStatement, "").Named().Row() + if row != nil { + uuid = row.AsString("uuid", "") + } + } else { + var err error + uuid, err = clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, cluster.VtctlClientParams{DDLStrategy: ddlStrategy}) + assert.NoError(t, err) + } + uuid = strings.TrimSpace(uuid) + fmt.Println("# Generated UUID (for debug purposes):") + fmt.Printf("<%s>\n", uuid) + + strategySetting, err := schema.ParseDDLStrategy(ddlStrategy) + assert.NoError(t, err) + + if !strategySetting.Strategy.IsDirect() { + t.Logf("===== waiting for migration %v to conclude", uuid) + status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, migrationWaitTimeout, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) + fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) + } + + if expectHint != "" { + stmt, err := sqlparser.Parse(alterStatement) + require.NoError(t, err) + ddlStmt, ok := stmt.(sqlparser.DDLStatement) + require.True(t, ok) + tableName := ddlStmt.GetTable().Name.String() + checkTable(t, tableName, expectHint) + } + + if !strategySetting.Strategy.IsDirect() { + // let's see what FK tables have been renamed to + rs := onlineddl.ReadMigrations(t, &vtParams, uuid) + require.NotNil(t, rs) + row := rs.Named().Row() + require.NotNil(t, row) + + artifacts := textutil.SplitDelimitedList(row.AsString("artifacts", "")) + for _, artifact := range artifacts { + checkTable(t, artifact, "") + } + } + + return uuid +} + +// waitForTable waits until table is seen in VTGate +func waitForTable(t *testing.T, tableName string, conn *mysql.Conn) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + + query := fmt.Sprintf("select count(*) from %s", tableName) + for { + if _, err := conn.ExecuteFetch(query, 1, false); err == nil { + return // good + } + select { + case <-ticker.C: + case <-ctx.Done(): + t.Fail() + return + } + } +} + +// checkTable checks that the given table exists on all tablets +func checkTable(t *testing.T, showTableName string, expectHint string) { + for _, tablet := range shards[0].Vttablets { + checkTablesCount(t, tablet, showTableName, 1) + if expectHint != "" { + createStatement := getCreateTableStatement(t, tablet, showTableName) + assert.Contains(t, createStatement, expectHint) + } + } +} + +// checkTablesCount checks the number of tables in the given tablet +func checkTablesCount(t *testing.T, tablet *cluster.Vttablet, showTableName string, expectCount int) { + query := fmt.Sprintf(`show tables like '%s';`, showTableName) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + rowcount := 0 + for { + queryResult, err := tablet.VttabletProcess.QueryTablet(query, keyspaceName, true) + require.Nil(t, err) + rowcount = len(queryResult.Rows) + if rowcount > 0 { + break + } + + select { + case <-time.After(time.Second): + case <-ctx.Done(): + break + } + } + assert.Equal(t, expectCount, rowcount) +} + +// getCreateTableStatement returns the CREATE TABLE statement for a given table +func getCreateTableStatement(t *testing.T, tablet *cluster.Vttablet, tableName string) (statement string) { + queryResult := queryTablet(t, tablet, fmt.Sprintf("show create table %s", tableName), "") + + require.Equal(t, len(queryResult.Rows), 1) + row := queryResult.Rows[0] + assert.Equal(t, len(row), 2) // table name, create statement + statement = row[1].ToString() + return statement +} + +func isFKError(err error) bool { + if err == nil { + return false + } + sqlErr, ok := err.(*sqlerror.SQLError) + if !ok { + return false + } + + // Let's try and account for all known errors: + switch sqlErr.Number() { + case sqlerror.ERDupEntry: // happens since we hammer the tables randomly + return false + case sqlerror.ERTooManyUserConnections: // can happen in Online DDL cut-over + return false + case sqlerror.ERUnknownError: // happens when query buffering times out + return false + case sqlerror.ERQueryInterrupted: // cancelled due to context expiration + return false + case sqlerror.ERLockDeadlock: + return false // bummer, but deadlocks can happen, it's a legit error. + case sqlerror.ERNoReferencedRow, + sqlerror.ERRowIsReferenced, + sqlerror.ERRowIsReferenced2, + sqlerror.ErNoReferencedRow2: + return true + case sqlerror.ERNotSupportedYet: + return true + } + // Unknown error + fmt.Printf("Unexpected error detected in isFKError: %v\n", err) + // Treat it as if it's a FK error + return true +} + +func generateInsert(t *testing.T, tableName string, conn *mysql.Conn) error { + id := rand.Int31n(int32(maxTableRows)) + parentId := rand.Int31n(int32(maxTableRows)) + query := fmt.Sprintf(insertRowStatement, tableName, id, parentId) + qr, err := conn.ExecuteFetch(query, 1000, true) + + func() { + writeMetrics[tableName].mu.Lock() + defer writeMetrics[tableName].mu.Unlock() + + writeMetrics[tableName].insertsAttempts++ + if err != nil { + writeMetrics[tableName].insertsFailures++ + if isFKError(err) { + writeMetrics[tableName].insertsFKErrors++ + writeMetrics[tableName].sampleInsertFKError = err + } + return + } + assert.Less(t, qr.RowsAffected, uint64(2)) + if qr.RowsAffected == 0 { + writeMetrics[tableName].insertsNoops++ + return + } + writeMetrics[tableName].inserts++ + }() + return err +} + +func generateUpdate(t *testing.T, tableName string, conn *mysql.Conn) error { + // Most of the UPDATEs we run are "normal" updates, but the minority will actually change the + // `id` column itself, which is the FOREIGN KEY parent column for some of the tables. + id := rand.Int31n(int32(maxTableRows)) + query := fmt.Sprintf(updateRowStatement, tableName, id) + if tableName == parentTableName || tableName == childTableName { + if rand.Intn(4) == 0 { + updatedId := rand.Int31n(int32(maxTableRows)) + query = fmt.Sprintf(updateRowIdStatement, tableName, updatedId, id) + } + } + qr, err := conn.ExecuteFetch(query, 1000, true) + + func() { + writeMetrics[tableName].mu.Lock() + defer writeMetrics[tableName].mu.Unlock() + + writeMetrics[tableName].updatesAttempts++ + if err != nil { + writeMetrics[tableName].updatesFailures++ + if isFKError(err) { + writeMetrics[tableName].updatesFKErrors++ + writeMetrics[tableName].sampleUpdateFKError = err + } + return + } + assert.Less(t, qr.RowsAffected, uint64(2)) + if qr.RowsAffected == 0 { + writeMetrics[tableName].updatesNoops++ + return + } + writeMetrics[tableName].updates++ + }() + return err +} + +func generateDelete(t *testing.T, tableName string, conn *mysql.Conn) error { + id := rand.Int31n(int32(maxTableRows)) + query := fmt.Sprintf(deleteRowStatement, tableName, id) + qr, err := conn.ExecuteFetch(query, 1000, true) + + func() { + writeMetrics[tableName].mu.Lock() + defer writeMetrics[tableName].mu.Unlock() + + writeMetrics[tableName].deletesAttempts++ + if err != nil { + writeMetrics[tableName].deletesFailures++ + if isFKError(err) { + writeMetrics[tableName].deletesFKErrors++ + writeMetrics[tableName].sampleDeleteFKError = err + } + return + } + assert.Less(t, qr.RowsAffected, uint64(2)) + if qr.RowsAffected == 0 { + writeMetrics[tableName].deletesNoops++ + return + } + writeMetrics[tableName].deletes++ + }() + return err +} + +func runSingleConnection(ctx context.Context, t *testing.T, tableName string) { + log.Infof("Running single connection on %s", tableName) + conn, err := mysql.Connect(ctx, &vtParams) + require.Nil(t, err) + defer conn.Close() + + _, err = conn.ExecuteFetch("set autocommit=1", 1000, true) + require.Nil(t, err) + _, err = conn.ExecuteFetch("set transaction isolation level read committed", 1000, true) + require.Nil(t, err) + + for { + switch rand.Int31n(3) { + case 0: + _ = generateInsert(t, tableName, conn) + case 1: + _ = generateUpdate(t, tableName, conn) + case 2: + _ = generateDelete(t, tableName, conn) + } + select { + case <-ctx.Done(): + log.Infof("Terminating single connection") + return + case <-time.After(singleConnectionSleepInterval): + } + } +} + +func runMultipleConnections(ctx context.Context, t *testing.T, tableName string) { + log.Infof("Running multiple connections") + var wg sync.WaitGroup + for i := 0; i < maxConcurrency; i++ { + wg.Add(1) + go func() { + defer wg.Done() + runSingleConnection(ctx, t, tableName) + }() + } + wg.Wait() + log.Infof("Running multiple connections: done") +} + +func wrapWithNoFKChecks(sql string) string { + return fmt.Sprintf("set foreign_key_checks=0; %s; set foreign_key_checks=1;", sql) +} + +// populateTables randomly populates all test tables. This is done sequentially. +func populateTables(t *testing.T) { + log.Infof("initTable begin") + defer log.Infof("initTable complete") + + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.Nil(t, err) + defer conn.Close() + + t.Logf("===== clearing tables") + for _, tableName := range reverseTableNames { + writeMetrics[tableName].Clear() + deleteQuery := fmt.Sprintf(deleteAllStatement, tableName) + _, err = conn.ExecuteFetch(deleteQuery, 1000, true) + require.Nil(t, err) + } + // In an ideal world we would randomly re-seed the tables in each and every instance of the test. + // In reality, that takes a lot of time, and while the seeding is important, it's not the heart of + // the test. To that effect, the seeding works as follows: + // - First ever time, we randomly seed the tables (running thousands of queries). We then create *_seed + // tables and clone the data in those seed tables. + // - 2nd test and forward: we just copy over the rows from the *_seed tables. + tablesSeeded := false + seedOnce.Do(func() { + for _, tableName := range tableNames { + t.Run(tableName, func(t *testing.T) { + t.Run("populating", func(t *testing.T) { + // populate parent, then child, child2, then grandchild + for i := 0; i < maxTableRows/2; i++ { + generateInsert(t, tableName, conn) + } + for i := 0; i < maxTableRows/4; i++ { + generateUpdate(t, tableName, conn) + } + for i := 0; i < maxTableRows/4; i++ { + generateDelete(t, tableName, conn) + } + }) + t.Run("creating seed", func(t *testing.T) { + // We create the seed table in the likeness of stress_parent, because that's the only table + // that doesn't have FK constraints. + { + createSeedQuery := fmt.Sprintf("create table %s_seed like %s", tableName, parentTableName) + _, err := conn.ExecuteFetch(createSeedQuery, 1000, true) + require.NoError(t, err) + } + { + seedQuery := fmt.Sprintf("insert into %s_seed select * from %s", tableName, tableName) + _, err := conn.ExecuteFetch(seedQuery, 1000, true) + require.NoError(t, err) + } + { + validationQuery := fmt.Sprintf("select count(*) as c from %s_seed", tableName) + rs, err := conn.ExecuteFetch(validationQuery, 1000, true) + require.NoError(t, err) + row := rs.Named().Row() + require.NotNil(t, row) + require.NotZero(t, row.AsInt64("c", 0)) + } + }) + }) + } + tablesSeeded = true + }) + if !tablesSeeded { + t.Run("reseeding", func(t *testing.T) { + for _, tableName := range tableNames { + seedQuery := fmt.Sprintf("insert into %s select * from %s_seed", tableName, tableName) + _, err := conn.ExecuteFetch(seedQuery, 1000, true) + require.NoError(t, err) + } + }) + } + + t.Run("validating table rows", func(t *testing.T) { + for _, tableName := range tableNames { + validationQuery := fmt.Sprintf(selectCountRowsStatement, tableName) + rs, err := conn.ExecuteFetch(validationQuery, 1000, true) + require.NoError(t, err) + row := rs.Named().Row() + require.NotNil(t, row) + numRows := row.AsInt64("num_rows", 0) + sumUpdates := row.AsInt64("sum_updates", 0) + require.NotZero(t, numRows) + if !tablesSeeded { + // We cloned the data from *_seed tables. This means we didn't populate writeMetrics. Now, + // this function only takes care of the base seed. We will later on run a stress workload on + // these tables, at the end of which we will examine the writeMetrics. We thus have to have those + // metrics consistent with the cloned data. It's a bit ugly, but we inject fake writeMetrics. + writeMetrics[tableName].deletes = 1 + writeMetrics[tableName].inserts = numRows + writeMetrics[tableName].deletes + writeMetrics[tableName].updates = sumUpdates + writeMetrics[tableName].deletes + } + } + }) +} + +// testSelectTableMetrics cross references the known metrics (number of successful insert/delete/updates) on each table, with the +// actual number of rows and with the row values on those tables. +// With CASCADE/SET NULL rules we can't do the comparison, because child tables are implicitly affected by the cascading rules, +// and the values do not match what reported to us when we UPDATE/DELETE on the parent tables. +func testSelectTableMetrics( + t *testing.T, + tablet *cluster.Vttablet, + tableName string, + tcase *testCase, +) int64 { + switch tcase.onDeleteAction { + case sqlparser.Cascade, sqlparser.SetNull: + if tableName != parentTableName { + // We can't validate those tables because they will have been affected by cascading rules. + return 0 + } + } + // metrics are unaffected by value of onUpdateAction. + + writeMetrics[tableName].mu.Lock() + defer writeMetrics[tableName].mu.Unlock() + + log.Infof("%s %s", tableName, writeMetrics[tableName].String()) + + rs := queryTablet(t, tablet, fmt.Sprintf(selectCountRowsStatement, tableName), "") + + row := rs.Named().Row() + require.NotNil(t, row) + log.Infof("testSelectTableMetrics, row: %v", row) + numRows := row.AsInt64("num_rows", 0) + sumUpdates := row.AsInt64("sum_updates", 0) + assert.NotZero(t, numRows) + assert.NotZero(t, sumUpdates) + assert.NotZero(t, writeMetrics[tableName].inserts) + assert.NotZero(t, writeMetrics[tableName].deletes) + assert.NotZero(t, writeMetrics[tableName].updates) + assert.Equal(t, writeMetrics[tableName].inserts-writeMetrics[tableName].deletes, numRows) + assert.Equal(t, writeMetrics[tableName].updates-writeMetrics[tableName].deletes, sumUpdates) // because we DELETE WHERE updates=1 + + return numRows +} + +// testSelectTableFKErrors +func testSelectTableFKErrors( + t *testing.T, + tableName string, + tcase *testCase, +) { + writeMetrics[tableName].mu.Lock() + defer writeMetrics[tableName].mu.Unlock() + + if tcase.onDeleteAction == sqlparser.Cascade { + assert.Zerof(t, writeMetrics[tableName].deletesFKErrors, "unexpected foreign key errors for DELETEs in ON DELETE CASCADE. Sample error: %v", writeMetrics[tableName].sampleDeleteFKError) + } + if tcase.onUpdateAction == sqlparser.Cascade { + assert.Zerof(t, writeMetrics[tableName].updatesFKErrors, "unexpected foreign key errors for UPDATEs in ON UPDATE CASCADE. Sample error: %v", writeMetrics[tableName].sampleUpdateFKError) + } +} + +// testFKIntegrity validates that foreign key consitency is maintained on the given tablet. We cross reference all +// parent-child relationships. +// There are two test types: +// 1. Do a JOIN on parent-child associated rows, expect non-empty +// 2. Check that there are no orphaned child rows. Notes: +// - This applies to NO ACTION and CASCADE, but not to SET NULL, because SET NULL by design creates orphaned rows. +// - On the primary database, this test trivially passes because of course MySQL maintains this integrity. But remember +// that we remove the foreign key constraints on the replica. Also remember that cascaded writes are not written to +// the binary log. And so, if VTGate does not do a proper job, then a parent and child will drift apart in CASCADE writes. +func testFKIntegrity( + t *testing.T, + tablet *cluster.Vttablet, + tcase *testCase, +) { + testName := tabletTestName(t, tablet) + t.Run(testName, func(t *testing.T) { + t.Run("matching parent-child rows", func(t *testing.T) { + rs := queryTablet(t, tablet, selectMatchingRowsChild, "") + assert.NotZero(t, len(rs.Rows)) + }) + t.Run("matching parent-child2 rows", func(t *testing.T) { + rs := queryTablet(t, tablet, selectMatchingRowsChild2, "") + assert.NotZero(t, len(rs.Rows)) + }) + t.Run("matching child-grandchild rows", func(t *testing.T) { + rs := queryTablet(t, tablet, selectMatchingRowsGrandchild, "") + assert.NotZero(t, len(rs.Rows)) + }) + if tcase.onDeleteAction != sqlparser.SetNull && tcase.onUpdateAction != sqlparser.SetNull { + // Because with SET NULL there _are_ orphaned rows + t.Run("parent-child orphaned rows", func(t *testing.T) { + rs := queryTablet(t, tablet, selectOrphanedRowsChild, "") + assert.Zero(t, len(rs.Rows)) + }) + t.Run("parent-child2 orphaned rows", func(t *testing.T) { + rs := queryTablet(t, tablet, selectOrphanedRowsChild2, "") + assert.Zero(t, len(rs.Rows)) + }) + t.Run("child-grandchild orphaned rows", func(t *testing.T) { + rs := queryTablet(t, tablet, selectOrphanedRowsGrandchild, "") + assert.Zero(t, len(rs.Rows)) + }) + } + }) +} diff --git a/go/test/endtoend/vtgate/foreignkey/unsharded_schema.sql b/go/test/endtoend/vtgate/foreignkey/unsharded_schema.sql new file mode 100644 index 00000000000..3b4496d47fb --- /dev/null +++ b/go/test/endtoend/vtgate/foreignkey/unsharded_schema.sql @@ -0,0 +1,472 @@ +create table u_t1 +( + id bigint, + col1 bigint, + index(col1), + primary key (id) +) Engine = InnoDB; + +create table u_t2 +( + id bigint, + col2 bigint, + primary key (id), + foreign key (col2) references u_t1 (col1) on delete set null on update set null +) Engine = InnoDB; + +create table u_t3 +( + id bigint, + col3 bigint, + primary key (id), + foreign key (col3) references u_t1 (col1) on delete cascade on update cascade +) Engine = InnoDB; + + +/* + * fk_t1 + * │ + * │ On Delete Restrict + * │ On Update Restrict + * ▼ + * ┌────────────────fk_t2────────────────┐ + * │ │ + * │On Delete Set Null │ On Delete Set Null + * │On Update Set Null │ On Update Set Null + * ▼ ▼ + * fk_t7 fk_t3───────────────────┐ + * │ │ + * │ │ On Delete Set Null + * On Delete Set Null │ │ On Update Set Null + * On Update Set Null │ │ + * ▼ ▼ + * fk_t4 fk_t6 + * │ + * │ + * On Delete Restrict │ + * On Update Restrict │ + * │ + * ▼ + * fk_t5 + */ + +create table fk_t1 +( + id bigint, + col varchar(10), + primary key (id), + index(col) +) Engine = InnoDB; + +create table fk_t2 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t1(col) on delete restrict on update restrict +) Engine = InnoDB; + +create table fk_t3 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t2(col) on delete set null on update set null +) Engine = InnoDB; + +create table fk_t4 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t3(col) on delete set null on update set null +) Engine = InnoDB; + +create table fk_t5 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t4(col) on delete restrict on update restrict +) Engine = InnoDB; + +create table fk_t6 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t3(col) on delete set null on update set null +) Engine = InnoDB; + +create table fk_t7 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t2(col) on delete set null on update set null +) Engine = InnoDB; + +/* + * fk_t10 + * │ + * On Delete Cascade │ + * On Update Cascade │ + * │ + * ▼ + * fk_t11──────────────────┐ + * │ │ + * │ │ On Delete Restrict + * On Delete Cascade │ │ On Update Restrict + * On Update Cascade │ │ + * │ │ + * ▼ ▼ + * fk_t12 fk_t13 + */ + +create table fk_t10 +( + id bigint, + col varchar(10), + primary key (id), + index(col) +) Engine = InnoDB; + +create table fk_t11 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t10(col) on delete cascade on update cascade +) Engine = InnoDB; + +create table fk_t12 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t11(col) on delete cascade on update cascade +) Engine = InnoDB; + +create table fk_t13 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t11(col) on delete restrict on update restrict +) Engine = InnoDB; + +/* + * fk_t15 + * │ + * │ + * On Delete Cascade │ + * On Update Cascade │ + * │ + * ▼ + * fk_t16 + * │ + * On Delete Set Null │ + * On Update Set Null │ + * │ + * ▼ + * fk_t17──────────────────┐ + * │ │ + * │ │ On Delete Set Null + * On Delete Cascade │ │ On Update Set Null + * On Update Cascade │ │ + * │ │ + * ▼ ▼ + * fk_t18 fk_t19 + */ + +create table fk_t15 +( + id bigint, + col varchar(10), + primary key (id), + index(col) +) Engine = InnoDB; + +create table fk_t16 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t15(col) on delete cascade on update cascade +) Engine = InnoDB; + +create table fk_t17 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t16(col) on delete set null on update set null +) Engine = InnoDB; + +create table fk_t18 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t17(col) on delete cascade on update cascade +) Engine = InnoDB; + +create table fk_t19 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t17(col) on delete set null on update set null +) Engine = InnoDB; + +/* + Self referenced foreign key from col2 to col in fk_t20 +*/ + +create table fk_t20 +( + id bigint, + col varchar(10), + col2 varchar(10), + primary key (id), + index(col), + foreign key (col2) references fk_t20(col) on delete restrict on update restrict +) Engine = InnoDB; + + +/* + * fk_multicol_t1 + * │ + * │ On Delete Restrict + * │ On Update Restrict + * ▼ + * ┌────────fk_multicol_t2───────────────┐ + * │ │ + * │On Delete Set Null │ On Delete Set Null + * │On Update Set Null │ On Update Set Null + * ▼ ▼ + * fk_multicol_t7 fk_multicol_t3───────────────────┐ + * │ │ + * │ │ On Delete Set Null + * On Delete Set Null │ │ On Update Set Null + * On Update Set Null │ │ + * ▼ ▼ + * fk_multicol_t4 fk_multicol_t6 + * │ + * │ + * On Delete Restrict │ + * On Update Restrict │ + * │ + * ▼ + * fk_multicol_t5 + */ +create table fk_multicol_t1 +( + id bigint, + colb varchar(10), + cola varchar(10), + primary key (id), + index(cola, colb) +) Engine = InnoDB; + +create table fk_multicol_t2 +( + id bigint, + colb varchar(10), + cola varchar(10), + primary key (id), + index(cola, colb), + foreign key (cola, colb) references fk_multicol_t1(cola, colb) on delete restrict on update restrict +) Engine = InnoDB; + +create table fk_multicol_t3 +( + id bigint, + colb varchar(10), + cola varchar(10), + primary key (id), + index(cola, colb), + foreign key (cola, colb) references fk_multicol_t2(cola, colb) on delete set null on update set null +) Engine = InnoDB; + +create table fk_multicol_t4 +( + id bigint, + colb varchar(10), + cola varchar(10), + primary key (id), + index(cola, colb), + foreign key (cola, colb) references fk_multicol_t3(cola, colb) on delete set null on update set null +) Engine = InnoDB; + +create table fk_multicol_t5 +( + id bigint, + colb varchar(10), + cola varchar(10), + primary key (id), + index(cola, colb), + foreign key (cola, colb) references fk_multicol_t4(cola, colb) on delete restrict on update restrict +) Engine = InnoDB; + +create table fk_multicol_t6 +( + id bigint, + colb varchar(10), + cola varchar(10), + primary key (id), + index(cola, colb), + foreign key (cola, colb) references fk_multicol_t3(cola, colb) on delete set null on update set null +) Engine = InnoDB; + +create table fk_multicol_t7 +( + id bigint, + colb varchar(10), + cola varchar(10), + primary key (id), + index(cola, colb), + foreign key (cola, colb) references fk_multicol_t2(cola, colb) on delete set null on update set null +) Engine = InnoDB; + +/* + * fk_multicol_t10 + * │ + * On Delete Cascade │ + * On Update Cascade │ + * │ + * ▼ + * fk_multicol_t11──────────────────┐ + * │ │ + * │ │ On Delete Restrict + * On Delete Cascade │ │ On Update Restrict + * On Update Cascade │ │ + * │ │ + * ▼ ▼ + * fk_multicol_t12 fk_multicol_t13 + */ + +create table fk_multicol_t10 +( + id bigint, + colb varchar(10), + cola varchar(10), + primary key (id), + index(cola, colb) +) Engine = InnoDB; + +create table fk_multicol_t11 +( + id bigint, + colb varchar(10), + cola varchar(10), + primary key (id), + index(cola, colb), + foreign key (cola, colb) references fk_multicol_t10(cola, colb) on delete cascade on update cascade +) Engine = InnoDB; + +create table fk_multicol_t12 +( + id bigint, + colb varchar(10), + cola varchar(10), + primary key (id), + index(cola, colb), + foreign key (cola, colb) references fk_multicol_t11(cola, colb) on delete cascade on update cascade +) Engine = InnoDB; + +create table fk_multicol_t13 +( + id bigint, + colb varchar(10), + cola varchar(10), + primary key (id), + index(cola, colb), + foreign key (cola, colb) references fk_multicol_t11(cola, colb) on delete restrict on update restrict +) Engine = InnoDB; + +/* + * fk_multicol_t15 + * │ + * │ + * On Delete Cascade │ + * On Update Cascade │ + * │ + * ▼ + * fk_multicol_t16 + * │ + * On Delete Set Null │ + * On Update Set Null │ + * │ + * ▼ + * fk_multicol_t17──────────────────┐ + * │ │ + * │ │ On Delete Set Null + * On Delete Cascade │ │ On Update Set Null + * On Update Cascade │ │ + * │ │ + * ▼ ▼ + * fk_multicol_t18 fk_multicol_t19 + */ + +create table fk_multicol_t15 +( + id bigint, + colb varchar(10), + cola varchar(10), + primary key (id), + index(cola, colb) +) Engine = InnoDB; + +create table fk_multicol_t16 +( + id bigint, + colb varchar(10), + cola varchar(10), + primary key (id), + index(cola, colb), + foreign key (cola, colb) references fk_multicol_t15(cola, colb) on delete cascade on update cascade +) Engine = InnoDB; + +create table fk_multicol_t17 +( + id bigint, + colb varchar(10), + cola varchar(10), + primary key (id), + index(cola, colb), + foreign key (cola, colb) references fk_multicol_t16(cola, colb) on delete set null on update set null +) Engine = InnoDB; + +create table fk_multicol_t18 +( + id bigint, + colb varchar(10), + cola varchar(10), + primary key (id), + index(cola, colb), + foreign key (cola, colb) references fk_multicol_t17(cola, colb) on delete cascade on update cascade +) Engine = InnoDB; + +create table fk_multicol_t19 +( + id bigint, + colb varchar(10), + cola varchar(10), + primary key (id), + index(cola, colb), + foreign key (cola, colb) references fk_multicol_t17(cola, colb) on delete set null on update set null +) Engine = InnoDB; diff --git a/go/test/endtoend/vtgate/foreignkey/unsharded_vschema.json b/go/test/endtoend/vtgate/foreignkey/unsharded_vschema.json new file mode 100644 index 00000000000..31f02ca41c4 --- /dev/null +++ b/go/test/endtoend/vtgate/foreignkey/unsharded_vschema.json @@ -0,0 +1,41 @@ +{ + "sharded": false, + "foreignKeyMode": "managed", + "tables": { + "u_t1": {}, + "u_t2": {}, + "fk_t1": {}, + "fk_t2": {}, + "fk_t3": {}, + "fk_t4": {}, + "fk_t5": {}, + "fk_t6": {}, + "fk_t7": {}, + "fk_t10": {}, + "fk_t11": {}, + "fk_t12": {}, + "fk_t13": {}, + "fk_t15": {}, + "fk_t16": {}, + "fk_t17": {}, + "fk_t18": {}, + "fk_t19": {}, + "fk_t20": {}, + "fk_multicol_t1": {}, + "fk_multicol_t2": {}, + "fk_multicol_t3": {}, + "fk_multicol_t4": {}, + "fk_multicol_t5": {}, + "fk_multicol_t6": {}, + "fk_multicol_t7": {}, + "fk_multicol_t10": {}, + "fk_multicol_t11": {}, + "fk_multicol_t12": {}, + "fk_multicol_t13": {}, + "fk_multicol_t15": {}, + "fk_multicol_t16": {}, + "fk_multicol_t17": {}, + "fk_multicol_t18": {}, + "fk_multicol_t19": {} + } +} \ No newline at end of file diff --git a/go/test/endtoend/vtgate/foreignkey/utils_test.go b/go/test/endtoend/vtgate/foreignkey/utils_test.go new file mode 100644 index 00000000000..5e0b4a8a3cc --- /dev/null +++ b/go/test/endtoend/vtgate/foreignkey/utils_test.go @@ -0,0 +1,144 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package foreignkey + +import ( + "database/sql" + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" +) + +// getTestName prepends whether the test is for a sharded keyspace or not to the test name. +func getTestName(testName string, testSharded bool) string { + if testSharded { + return "Sharded - " + testName + } + return "Unsharded - " + testName +} + +// isMultiColFkTable tells if the table is a multicol table or not. +func isMultiColFkTable(tableName string) bool { + return strings.Contains(tableName, "multicol") +} + +// waitForSchemaTrackingForFkTables waits for schema tracking to have run and seen the tables used +// for foreign key tests. +func waitForSchemaTrackingForFkTables(t *testing.T) { + err := utils.WaitForColumn(t, clusterInstance.VtgateProcess, shardedKs, "fk_t1", "col") + require.NoError(t, err) + err = utils.WaitForColumn(t, clusterInstance.VtgateProcess, shardedKs, "fk_t18", "col") + require.NoError(t, err) + err = utils.WaitForColumn(t, clusterInstance.VtgateProcess, shardedKs, "fk_t11", "col") + require.NoError(t, err) + err = utils.WaitForColumn(t, clusterInstance.VtgateProcess, unshardedKs, "fk_t1", "col") + require.NoError(t, err) + err = utils.WaitForColumn(t, clusterInstance.VtgateProcess, unshardedKs, "fk_t18", "col") + require.NoError(t, err) + err = utils.WaitForColumn(t, clusterInstance.VtgateProcess, unshardedKs, "fk_t11", "col") + require.NoError(t, err) +} + +// getReplicaTablets gets all the replica tablets. +func getReplicaTablets(keyspace string) []*cluster.Vttablet { + var replicaTablets []*cluster.Vttablet + for _, ks := range clusterInstance.Keyspaces { + if ks.Name != keyspace { + continue + } + for _, shard := range ks.Shards { + for _, vttablet := range shard.Vttablets { + if vttablet.Type != "primary" { + replicaTablets = append(replicaTablets, vttablet) + } + } + } + } + return replicaTablets +} + +// removeAllForeignKeyConstraints removes all the foreign key constraints from the given tablet. +func removeAllForeignKeyConstraints(t *testing.T, vttablet *cluster.Vttablet, keyspace string) { + getAllFksQuery := `SELECT RefCons.table_name, RefCons.constraint_name FROM information_schema.referential_constraints RefCons;` + res, err := utils.RunSQL(t, getAllFksQuery, vttablet, "") + require.NoError(t, err) + var queries []string + queries = append(queries, "set global super_read_only=0") + for _, row := range res.Rows { + tableName := row[0].ToString() + constraintName := row[1].ToString() + removeFkQuery := fmt.Sprintf("ALTER TABLE %v DROP CONSTRAINT %v", tableName, constraintName) + queries = append(queries, removeFkQuery) + } + queries = append(queries, "set global super_read_only=1") + err = utils.RunSQLs(t, queries, vttablet, fmt.Sprintf("vt_%v", keyspace)) + require.NoError(t, err) +} + +// checkReplicationHealthy verifies that the replication on the given vttablet is working as expected. +func checkReplicationHealthy(t *testing.T, vttablet *cluster.Vttablet) { + rs, err := utils.RunSQL(t, "show replica status", vttablet, "") + require.NoError(t, err) + var ioThreadRunning, sqlThreadRunning string + for idx, value := range rs.Rows[0] { + fieldName := rs.Fields[idx].Name + if fieldName == "Replica_IO_Running" { + ioThreadRunning = value.ToString() + } + if fieldName == "Replica_SQL_Running" { + sqlThreadRunning = value.ToString() + } + } + require.Equal(t, "Yes", sqlThreadRunning, "SQL Thread isn't happy on %v, Replica status - %v", vttablet.Alias, rs.Rows) + require.Equal(t, "Yes", ioThreadRunning, "IO Thread isn't happy on %v, Replica status - %v", vttablet.Alias, rs.Rows) +} + +// compareVitessAndMySQLResults compares Vitess and MySQL results and reports if they don't report the same number of rows affected. +func compareVitessAndMySQLResults(t *testing.T, vtRes sql.Result, mysqlRes sql.Result) { + if vtRes == nil && mysqlRes == nil { + return + } + if vtRes == nil { + t.Error("Vitess result is 'nil' while MySQL's is not.") + return + } + if mysqlRes == nil { + t.Error("MySQL result is 'nil' while Vitess' is not.") + return + } + vtRa, err := vtRes.RowsAffected() + require.NoError(t, err) + mysqlRa, err := mysqlRes.RowsAffected() + require.NoError(t, err) + if mysqlRa != vtRa { + t.Errorf("Vitess and MySQL don't agree on the rows affected. Vitess rows affected - %v, MySQL rows affected - %v", vtRa, mysqlRa) + } +} + +// compareVitessAndMySQLErrors compares Vitess and MySQL errors and reports if one errors and the other doesn't. +func compareVitessAndMySQLErrors(t *testing.T, vtErr, mysqlErr error) { + if vtErr != nil && mysqlErr != nil || vtErr == nil && mysqlErr == nil { + return + } + out := fmt.Sprintf("Vitess and MySQL are not erroring the same way.\nVitess error: %v\nMySQL error: %v", vtErr, mysqlErr) + t.Error(out) +} diff --git a/go/test/endtoend/vtgate/gen4/gen4_test.go b/go/test/endtoend/vtgate/gen4/gen4_test.go index 77b7daea05f..8764328495c 100644 --- a/go/test/endtoend/vtgate/gen4/gen4_test.go +++ b/go/test/endtoend/vtgate/gen4/gen4_test.go @@ -60,25 +60,6 @@ func TestCorrelatedExistsSubquery(t *testing.T) { utils.AssertMatches(t, mcmp.VtConn, `select id from t1 where id in (select id from t2) order by id`, `[[INT64(1)] [INT64(100)]]`) - utils.AssertMatches(t, mcmp.VtConn, ` -select id -from t1 -where exists( - select t2.id, count(*) - from t2 - where t1.col = t2.tcol2 - having count(*) > 0 -)`, - `[[INT64(100)]]`) - utils.AssertMatches(t, mcmp.VtConn, ` -select id -from t1 -where exists( - select t2.id, count(*) - from t2 - where t1.col = t2.tcol1 -) order by id`, - `[[INT64(1)] [INT64(4)] [INT64(100)]]`) utils.AssertMatchesNoOrder(t, mcmp.VtConn, ` select id from t1 @@ -132,34 +113,28 @@ func TestDistinctAggregationFunc(t *testing.T) { defer closer() // insert some data. - utils.Exec(t, mcmp.VtConn, `insert into t2(id, tcol1, tcol2) values (1, 'A', 'A'),(2, 'B', 'C'),(3, 'A', 'C'),(4, 'C', 'A'),(5, 'A', 'A'),(6, 'B', 'C'),(7, 'B', 'A'),(8, 'C', 'A')`) + mcmp.Exec(`insert into t2(id, tcol1, tcol2) values (1, 'A', 'A'),(2, 'B', 'C'),(3, 'A', 'C'),(4, 'C', 'A'),(5, 'A', 'A'),(6, 'B', 'C'),(7, 'B', 'A'),(8, 'C', 'A')`) // count on primary vindex - utils.AssertMatches(t, mcmp.VtConn, `select tcol1, count(distinct id) from t2 group by tcol1`, - `[[VARCHAR("A") INT64(3)] [VARCHAR("B") INT64(3)] [VARCHAR("C") INT64(2)]]`) + mcmp.Exec(`select tcol1, count(distinct id) from t2 group by tcol1`) // count on any column - utils.AssertMatches(t, mcmp.VtConn, `select tcol1, count(distinct tcol2) from t2 group by tcol1`, - `[[VARCHAR("A") INT64(2)] [VARCHAR("B") INT64(2)] [VARCHAR("C") INT64(1)]]`) + mcmp.Exec(`select tcol1, count(distinct tcol2) from t2 group by tcol1`) // sum of columns - utils.AssertMatches(t, mcmp.VtConn, `select sum(id), sum(tcol1) from t2`, - `[[DECIMAL(36) FLOAT64(0)]]`) + mcmp.Exec(`select sum(id), sum(tcol1) from t2`) // sum on primary vindex - utils.AssertMatches(t, mcmp.VtConn, `select tcol1, sum(distinct id) from t2 group by tcol1`, - `[[VARCHAR("A") DECIMAL(9)] [VARCHAR("B") DECIMAL(15)] [VARCHAR("C") DECIMAL(12)]]`) + mcmp.Exec(`select tcol1, sum(distinct id) from t2 group by tcol1`) // sum on any column - utils.AssertMatches(t, mcmp.VtConn, `select tcol1, sum(distinct tcol2) from t2 group by tcol1`, - `[[VARCHAR("A") DECIMAL(0)] [VARCHAR("B") DECIMAL(0)] [VARCHAR("C") DECIMAL(0)]]`) + mcmp.Exec(`select tcol1, sum(distinct tcol2) from t2 group by tcol1`) // insert more data to get values on sum - utils.Exec(t, mcmp.VtConn, `insert into t2(id, tcol1, tcol2) values (9, 'AA', null),(10, 'AA', '4'),(11, 'AA', '4'),(12, null, '5'),(13, null, '6'),(14, 'BB', '10'),(15, 'BB', '20'),(16, 'BB', 'X')`) + mcmp.Exec(`insert into t2(id, tcol1, tcol2) values (9, 'AA', null),(10, 'AA', '4'),(11, 'AA', '4'),(12, null, '5'),(13, null, '6'),(14, 'BB', '10'),(15, 'BB', '20'),(16, 'BB', 'X')`) // multi distinct - utils.AssertMatches(t, mcmp.VtConn, `select tcol1, count(distinct tcol2), sum(distinct tcol2) from t2 group by tcol1`, - `[[NULL INT64(2) DECIMAL(11)] [VARCHAR("A") INT64(2) DECIMAL(0)] [VARCHAR("AA") INT64(1) DECIMAL(4)] [VARCHAR("B") INT64(2) DECIMAL(0)] [VARCHAR("BB") INT64(3) DECIMAL(30)] [VARCHAR("C") INT64(1) DECIMAL(0)]]`) + mcmp.Exec(`select tcol1, count(distinct tcol2), sum(distinct tcol2) from t2 group by tcol1`) } func TestDistinct(t *testing.T) { @@ -170,7 +145,7 @@ func TestDistinct(t *testing.T) { utils.Exec(t, mcmp.VtConn, `insert into t2(id, tcol1, tcol2) values (1, 'A', 'A'),(2, 'B', 'C'),(3, 'A', 'C'),(4, 'C', 'A'),(5, 'A', 'A'),(6, 'B', 'C'),(7, 'B', 'A'),(8, 'C', 'A')`) // multi distinct - utils.AssertMatches(t, mcmp.VtConn, `select distinct tcol1, tcol2 from t2`, + utils.AssertMatchesNoOrder(t, mcmp.VtConn, `select distinct tcol1, tcol2 from t2`, `[[VARCHAR("A") VARCHAR("A")] [VARCHAR("A") VARCHAR("C")] [VARCHAR("B") VARCHAR("A")] [VARCHAR("B") VARCHAR("C")] [VARCHAR("C") VARCHAR("A")]]`) } @@ -184,17 +159,9 @@ func TestSubQueries(t *testing.T) { utils.AssertMatches(t, mcmp.VtConn, `select t2.tcol1, t2.tcol2 from t2 where t2.id IN (select id from t3) order by t2.id`, `[[VARCHAR("A") VARCHAR("A")] [VARCHAR("B") VARCHAR("C")] [VARCHAR("A") VARCHAR("C")] [VARCHAR("C") VARCHAR("A")] [VARCHAR("A") VARCHAR("A")] [VARCHAR("B") VARCHAR("C")] [VARCHAR("B") VARCHAR("A")] [VARCHAR("C") VARCHAR("B")]]`) utils.AssertMatches(t, mcmp.VtConn, `select t2.tcol1, t2.tcol2 from t2 where t2.id IN (select t3.id from t3 join t2 on t2.id = t3.id) order by t2.id`, `[[VARCHAR("A") VARCHAR("A")] [VARCHAR("B") VARCHAR("C")] [VARCHAR("A") VARCHAR("C")] [VARCHAR("C") VARCHAR("A")] [VARCHAR("A") VARCHAR("A")] [VARCHAR("B") VARCHAR("C")] [VARCHAR("B") VARCHAR("A")] [VARCHAR("C") VARCHAR("B")]]`) - utils.AssertMatches(t, mcmp.VtConn, `select u_a.a from u_a left join t2 on t2.id IN (select id from t2)`, `[]`) // inserting some data in u_a utils.Exec(t, mcmp.VtConn, `insert into u_a(id, a) values (1, 1)`) - // execute same query again. - qr := utils.Exec(t, mcmp.VtConn, `select u_a.a from u_a left join t2 on t2.id IN (select id from t2)`) - assert.EqualValues(t, 8, len(qr.Rows)) - for index, row := range qr.Rows { - assert.EqualValues(t, `[INT64(1)]`, fmt.Sprintf("%v", row), "does not match for row: %d", index+1) - } - // fail as projection subquery is not scalar _, err := utils.ExecAllowError(t, mcmp.VtConn, `select (select id from t2) from t2 order by id`) assert.EqualError(t, err, "subquery returned more than one row (errno 1105) (sqlstate HY000) during query: select (select id from t2) from t2 order by id") @@ -202,6 +169,24 @@ func TestSubQueries(t *testing.T) { utils.AssertMatches(t, mcmp.VtConn, `select (select id from t2 order by id limit 1) from t2 order by id limit 2`, `[[INT64(1)] [INT64(1)]]`) } +func TestSubQueriesOnOuterJoinOnCondition(t *testing.T) { + t.Skip("not supported") + mcmp, closer := start(t) + defer closer() + + utils.Exec(t, mcmp.VtConn, `insert into t2(id, tcol1, tcol2) values (1, 'A', 'A'),(2, 'B', 'C'),(3, 'A', 'C'),(4, 'C', 'A'),(5, 'A', 'A'),(6, 'B', 'C'),(7, 'B', 'A'),(8, 'C', 'B')`) + utils.Exec(t, mcmp.VtConn, `insert into t3(id, tcol1, tcol2) values (1, 'A', 'A'),(2, 'B', 'C'),(3, 'A', 'C'),(4, 'C', 'A'),(5, 'A', 'A'),(6, 'B', 'C'),(7, 'B', 'A'),(8, 'C', 'B')`) + + utils.AssertMatches(t, mcmp.VtConn, `select u_a.a from u_a left join t2 on t2.id IN (select id from t2)`, `[]`) + // inserting some data in u_a + utils.Exec(t, mcmp.VtConn, `insert into u_a(id, a) values (1, 1)`) + qr := utils.Exec(t, mcmp.VtConn, `select u_a.a from u_a left join t2 on t2.id IN (select id from t2)`) + assert.EqualValues(t, 8, len(qr.Rows)) + for index, row := range qr.Rows { + assert.EqualValues(t, `[INT64(1)]`, fmt.Sprintf("%v", row), "does not match for row: %d", index+1) + } +} + func TestPlannerWarning(t *testing.T) { mcmp, closer := start(t) defer closer() diff --git a/go/test/endtoend/vtgate/gen4/main_test.go b/go/test/endtoend/vtgate/gen4/main_test.go index cc50cbba40a..378b2d2969e 100644 --- a/go/test/endtoend/vtgate/gen4/main_test.go +++ b/go/test/endtoend/vtgate/gen4/main_test.go @@ -85,7 +85,7 @@ func TestMain(m *testing.M) { } clusterInstance.VtGateExtraArgs = []string{"--schema_change_signal"} - clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal", "--queryserver-config-schema-change-signal-interval", "0.1"} + clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal"} err = clusterInstance.StartKeyspace(*sKs, shardedKsShards, 0, false) if err != nil { return 1 diff --git a/go/test/endtoend/vtgate/gen4/system_schema_test.go b/go/test/endtoend/vtgate/gen4/system_schema_test.go index 1138b4e0d5c..f79e71b93da 100644 --- a/go/test/endtoend/vtgate/gen4/system_schema_test.go +++ b/go/test/endtoend/vtgate/gen4/system_schema_test.go @@ -126,7 +126,8 @@ func TestFKConstraintUsingInformationSchema(t *testing.T) { query := "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as name, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk on fk.constraint_schema = rc.constraint_schema and fk.constraint_name = rc.constraint_name where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = 't7_fk' and rc.constraint_schema = database() and rc.table_name = 't7_fk'" utils.AssertMatchesAny(t, conn, query, `[[VARCHAR("t7_xxhash") VARCHAR("uid") VARCHAR("t7_uid") VARCHAR("t7_fk_ibfk_1") VARCHAR("CASCADE") VARCHAR("SET NULL")]]`, - `[[VARBINARY("t7_xxhash") VARCHAR("uid") VARCHAR("t7_uid") VARCHAR("t7_fk_ibfk_1") BINARY("CASCADE") BINARY("SET NULL")]]`) + `[[VARBINARY("t7_xxhash") VARCHAR("uid") VARCHAR("t7_uid") VARCHAR("t7_fk_ibfk_1") BINARY("CASCADE") BINARY("SET NULL")]]`, + `[[VARCHAR("t7_xxhash") VARCHAR("uid") VARCHAR("t7_uid") VARCHAR("t7_fk_ibfk_1") BINARY("CASCADE") BINARY("SET NULL")]]`) } func TestConnectWithSystemSchema(t *testing.T) { diff --git a/go/test/endtoend/vtgate/grpc_api/main_test.go b/go/test/endtoend/vtgate/grpc_api/main_test.go index a51c6d9e6f2..3c8605f79a0 100644 --- a/go/test/endtoend/vtgate/grpc_api/main_test.go +++ b/go/test/endtoend/vtgate/grpc_api/main_test.go @@ -111,6 +111,7 @@ func TestMain(m *testing.M) { "--grpc_auth_static_password_file", grpcServerAuthStaticPath, "--grpc_use_effective_callerid", "--grpc-use-static-authentication-callerid", + "--grpc-send-session-in-streaming", } // Configure vttablet to use table ACL diff --git a/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go b/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go index ab844a8ffd1..4971d03060b 100644 --- a/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go +++ b/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go @@ -23,10 +23,8 @@ package keyspacewatches import ( "database/sql" "fmt" - "math/rand" "os" "testing" - "time" _ "github.com/go-sql-driver/mysql" "github.com/stretchr/testify/require" @@ -115,7 +113,6 @@ func createCluster(extraVTGateArgs []string) (*cluster.LocalProcessCluster, int) Host: clusterInstance.Hostname, Port: clusterInstance.VtgateMySQLPort, } - rand.Seed(time.Now().UnixNano()) return clusterInstance, 0 } diff --git a/go/test/endtoend/vtgate/lookup_test.go b/go/test/endtoend/vtgate/lookup_test.go index a95201ca87f..b4b53295d8d 100644 --- a/go/test/endtoend/vtgate/lookup_test.go +++ b/go/test/endtoend/vtgate/lookup_test.go @@ -24,6 +24,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -126,8 +128,8 @@ func TestConsistentLookup(t *testing.T) { _, err = conn.ExecuteFetch("insert into t1(id1, id2) values(1, 4)", 1000, false) utils.Exec(t, conn, "rollback") require.Error(t, err) - mysqlErr := err.(*mysql.SQLError) - assert.Equal(t, mysql.ERDupEntry, mysqlErr.Num) + mysqlErr := err.(*sqlerror.SQLError) + assert.Equal(t, sqlerror.ERDupEntry, mysqlErr.Num) assert.Equal(t, "23000", mysqlErr.State) assert.Contains(t, mysqlErr.Message, "reverted partial DML execution") diff --git a/go/test/endtoend/vtgate/main_test.go b/go/test/endtoend/vtgate/main_test.go index 1d2bc59b50a..12abcf4dd01 100644 --- a/go/test/endtoend/vtgate/main_test.go +++ b/go/test/endtoend/vtgate/main_test.go @@ -73,7 +73,7 @@ func TestMain(m *testing.M) { VSchema: VSchema, } clusterInstance.VtGateExtraArgs = []string{"--schema_change_signal"} - clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal", "--queryserver-config-schema-change-signal-interval", "0.1", "--queryserver-config-max-result-size", "100", "--queryserver-config-terse-errors"} + clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal", "--queryserver-config-max-result-size", "100", "--queryserver-config-terse-errors"} err = clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 0, false) if err != nil { return 1 diff --git a/go/test/endtoend/vtgate/misc_test.go b/go/test/endtoend/vtgate/misc_test.go index 19442d55e61..7ebfe7aeef1 100644 --- a/go/test/endtoend/vtgate/misc_test.go +++ b/go/test/endtoend/vtgate/misc_test.go @@ -20,13 +20,21 @@ import ( "fmt" "testing" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/test/endtoend/utils" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +func TestInsertNeg(t *testing.T) { + conn, closer := start(t) + defer closer() + + utils.Exec(t, conn, "insert ignore into t10(id, sharding_key, col1, col2, col3) values(10, 20, 'a', 1, 2), (20, -20, 'b', 3, 4), (30, -40, 'c', 6, 7), (40, 60, 'd', 4, 10)") + utils.Exec(t, conn, "insert ignore into t10(id, sharding_key, col1, col2, col3) values(1, 2, 'a', 1, 2), (2, -2, 'b', -3, 4), (3, -4, 'c', 6, -7), (4, 6, 'd', 4, -10)") +} + func TestSelectNull(t *testing.T) { conn, closer := start(t) defer closer() @@ -428,7 +436,7 @@ ts12 TIMESTAMP DEFAULT LOCALTIME() )`) utils.Exec(t, conn, "drop table function_default") - utils.Exec(t, conn, `create table function_default (ts TIMESTAMP DEFAULT now())`) + utils.Exec(t, conn, `create table function_default (ts TIMESTAMP DEFAULT (UTC_TIMESTAMP))`) utils.Exec(t, conn, "drop table function_default") utils.Exec(t, conn, `create table function_default (x varchar(25) DEFAULT "check")`) @@ -694,8 +702,8 @@ func TestDescribeVindex(t *testing.T) { _, err := conn.ExecuteFetch("describe hash", 1000, false) require.Error(t, err) - mysqlErr := err.(*mysql.SQLError) - assert.Equal(t, mysql.ERNoSuchTable, mysqlErr.Num) + mysqlErr := err.(*sqlerror.SQLError) + assert.Equal(t, sqlerror.ERNoSuchTable, mysqlErr.Num) assert.Equal(t, "42S02", mysqlErr.State) assert.Contains(t, mysqlErr.Message, "NotFound desc") } diff --git a/go/test/endtoend/vtgate/mysql80/misc_test.go b/go/test/endtoend/vtgate/mysql80/misc_test.go index 6a642178432..b29eb13ecdc 100644 --- a/go/test/endtoend/vtgate/mysql80/misc_test.go +++ b/go/test/endtoend/vtgate/mysql80/misc_test.go @@ -65,8 +65,8 @@ ts12 TIMESTAMP DEFAULT LOCALTIME() )`) utils.Exec(t, conn, "drop table function_default") - // this query works because utc_timestamp will get parenthesised before reaching MySQL. However, this syntax is not supported in MySQL 8.0 - utils.Exec(t, conn, `create table function_default (ts TIMESTAMP DEFAULT UTC_TIMESTAMP)`) + // this query works only as an expression. + utils.Exec(t, conn, `create table function_default (ts TIMESTAMP DEFAULT (UTC_TIMESTAMP))`) utils.Exec(t, conn, "drop table function_default") utils.Exec(t, conn, `create table function_default (x varchar(25) DEFAULT "check")`) diff --git a/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go b/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go index cc4eb100114..6f15aa71d5b 100644 --- a/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go +++ b/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go @@ -18,10 +18,14 @@ package aggregation import ( "fmt" + "slices" + "sort" + "strings" "testing" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -68,6 +72,7 @@ func testAggregateTypes(t *testing.T) { mcmp.AssertMatches("select val1 as a, count(*) from aggr_test group by a", `[[VARCHAR("a") INT64(2)] [VARCHAR("b") INT64(1)] [VARCHAR("c") INT64(2)] [VARCHAR("d") INT64(1)] [VARCHAR("e") INT64(2)]]`) mcmp.AssertMatches("select val1 as a, count(*) from aggr_test group by a order by a", `[[VARCHAR("a") INT64(2)] [VARCHAR("b") INT64(1)] [VARCHAR("c") INT64(2)] [VARCHAR("d") INT64(1)] [VARCHAR("e") INT64(2)]]`) mcmp.AssertMatches("select val1 as a, count(*) from aggr_test group by a order by 2, a", `[[VARCHAR("b") INT64(1)] [VARCHAR("d") INT64(1)] [VARCHAR("a") INT64(2)] [VARCHAR("c") INT64(2)] [VARCHAR("e") INT64(2)]]`) + mcmp.AssertMatches("select sum(val1) from aggr_test", `[[FLOAT64(0)]]`) } func TestGroupBy(t *testing.T) { @@ -76,12 +81,12 @@ func TestGroupBy(t *testing.T) { mcmp.Exec("insert into t3(id5, id6, id7) values(1,1,2), (2,2,4), (3,2,4), (4,1,2), (5,1,2), (6,3,6)") // test ordering and group by int column mcmp.AssertMatches("select id6, id7, count(*) k from t3 group by id6, id7 order by k", `[[INT64(3) INT64(6) INT64(1)] [INT64(2) INT64(4) INT64(2)] [INT64(1) INT64(2) INT64(3)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=Gen4 */ id6+id7, count(*) k from t3 group by id6+id7 order by k", `[[INT64(9) INT64(1)] [INT64(6) INT64(2)] [INT64(3) INT64(3)]]`) + mcmp.AssertMatches("select id6+id7, count(*) k from t3 group by id6+id7 order by k", `[[INT64(9) INT64(1)] [INT64(6) INT64(2)] [INT64(3) INT64(3)]]`) // Test the same queries in streaming mode utils.Exec(t, mcmp.VtConn, "set workload = olap") mcmp.AssertMatches("select id6, id7, count(*) k from t3 group by id6, id7 order by k", `[[INT64(3) INT64(6) INT64(1)] [INT64(2) INT64(4) INT64(2)] [INT64(1) INT64(2) INT64(3)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=Gen4 */ id6+id7, count(*) k from t3 group by id6+id7 order by k", `[[INT64(9) INT64(1)] [INT64(6) INT64(2)] [INT64(3) INT64(3)]]`) + mcmp.AssertMatches("select id6+id7, count(*) k from t3 group by id6+id7 order by k", `[[INT64(9) INT64(1)] [INT64(6) INT64(2)] [INT64(3) INT64(3)]]`) } func TestEqualFilterOnScatter(t *testing.T) { @@ -95,18 +100,18 @@ func TestEqualFilterOnScatter(t *testing.T) { t.Run(workload, func(t *testing.T) { utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = '%s'", workload)) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having 1 = 1", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a = 5", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having 5 = a", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a = a", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a = 3+2", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having 1+4 = 3+2", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a = 1", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a = \"1\"", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a = \"5\"", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a = 5.00", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a, val1 from aggr_test group by val1 having a = 1.00", `[[INT64(1) VARCHAR("a")] [INT64(1) VARCHAR("b")] [INT64(1) VARCHAR("c")] [INT64(1) VARCHAR("d")] [INT64(1) VARCHAR("e")]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ 1 from aggr_test having count(*) = 5", `[[INT64(1)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having 1 = 1", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a = 5", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having 5 = a", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a = a", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a = 3+2", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having 1+4 = 3+2", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a = 1", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a = \"1\"", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a = \"5\"", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a = 5.00", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a, val1 from aggr_test group by val1 having a = 1.00", `[[INT64(1) VARCHAR("a")] [INT64(1) VARCHAR("b")] [INT64(1) VARCHAR("c")] [INT64(1) VARCHAR("d")] [INT64(1) VARCHAR("e")]]`) + mcmp.AssertMatches("select 1 from aggr_test having count(*) = 5", `[[INT64(1)]]`) }) } } @@ -118,7 +123,7 @@ func TestAggrOnJoin(t *testing.T) { mcmp.Exec("insert into t3(id5, id6, id7) values(1,1,1), (2,2,4), (3,2,4), (4,1,2), (5,1,1), (6,3,6)") mcmp.Exec("insert into aggr_test(id, val1, val2) values(1,'a',1), (2,'a',1), (3,'b',1), (4,'c',3), (5,'c',4)") - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) from aggr_test a join t3 t on a.val2 = t.id7", + mcmp.AssertMatches("select count(*) from aggr_test a join t3 t on a.val2 = t.id7", "[[INT64(8)]]") /* mysql> select count(*) from aggr_test a join t3 t on a.val2 = t.id7; @@ -129,7 +134,7 @@ func TestAggrOnJoin(t *testing.T) { +----------+ 1 row in set (0.00 sec) */ - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ a.val1, count(*) from aggr_test a join t3 t on a.val2 = t.id7 group by a.val1", + mcmp.AssertMatches("select a.val1, count(*) from aggr_test a join t3 t on a.val2 = t.id7 group by a.val1", `[[VARCHAR("a") INT64(4)] [VARCHAR("b") INT64(2)] [VARCHAR("c") INT64(2)]]`) /* mysql> select a.val1, count(*) from aggr_test a join t3 t on a.val2 = t.id7 group by a.val1; @@ -143,7 +148,7 @@ func TestAggrOnJoin(t *testing.T) { 3 rows in set (0.00 sec) */ - mcmp.AssertMatches(`select /*vt+ PLANNER=gen4 */ max(a1.val2), max(a2.val2), count(*) from aggr_test a1 join aggr_test a2 on a1.val2 = a2.id join t3 t on a2.val2 = t.id7`, + mcmp.AssertMatches(`select max(a1.val2), max(a2.val2), count(*) from aggr_test a1 join aggr_test a2 on a1.val2 = a2.id join t3 t on a2.val2 = t.id7`, "[[INT64(3) INT64(1) INT64(8)]]") /* mysql> select max(a1.val2), max(a2.val2), count(*) from aggr_test a1 join aggr_test a2 on a1.val2 = a2.id join t3 t on a2.val2 = t.id7; @@ -155,17 +160,17 @@ func TestAggrOnJoin(t *testing.T) { 1 row in set (0.00 sec) */ - mcmp.AssertMatches(`select /*vt+ PLANNER=gen4 */ a1.val1, count(distinct a1.val2) from aggr_test a1 join aggr_test a2 on a1.val2 = a2.id join t3 t on a2.val2 = t.id7 group by a1.val1`, + mcmp.AssertMatches(`select a1.val1, count(distinct a1.val2) from aggr_test a1 join aggr_test a2 on a1.val2 = a2.id join t3 t on a2.val2 = t.id7 group by a1.val1`, `[[VARCHAR("a") INT64(1)] [VARCHAR("b") INT64(1)] [VARCHAR("c") INT64(1)]]`) // having on aggregation on top of join - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ a.val1, count(*) from aggr_test a join t3 t on a.val2 = t.id7 group by a.val1 having count(*) = 4", + mcmp.AssertMatches("select a.val1, count(*) from aggr_test a join t3 t on a.val2 = t.id7 group by a.val1 having count(*) = 4", `[[VARCHAR("a") INT64(4)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ a.val1, count(*) as leCount from aggr_test a join t3 t on a.val2 = t.id7 group by a.val1 having leCount = 4", + mcmp.AssertMatches("select a.val1, count(*) as leCount from aggr_test a join t3 t on a.val2 = t.id7 group by a.val1 having leCount = 4", `[[VARCHAR("a") INT64(4)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ a.val1 from aggr_test a join t3 t on a.val2 = t.id7 group by a.val1 having count(*) = 4", + mcmp.AssertMatches("select a.val1 from aggr_test a join t3 t on a.val2 = t.id7 group by a.val1 having count(*) = 4", `[[VARCHAR("a")]]`) } @@ -180,15 +185,15 @@ func TestNotEqualFilterOnScatter(t *testing.T) { t.Run(workload, func(t *testing.T) { utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = '%s'", workload)) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a != 5", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having 5 != a", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a != a", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a != 3+2", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a != 1", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a != \"1\"", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a != \"5\"", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a != 5.00", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ 1 from aggr_test having count(*) != 5", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a != 5", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having 5 != a", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a != a", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a != 3+2", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a != 1", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a != \"1\"", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a != \"5\"", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a != 5.00", `[]`) + mcmp.AssertMatches("select 1 from aggr_test having count(*) != 5", `[]`) }) } } @@ -203,15 +208,15 @@ func TestLessFilterOnScatter(t *testing.T) { for _, workload := range workloads { t.Run(workload, func(t *testing.T) { utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = '%s'", workload)) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a < 10", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having 1 < a", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a < a", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a < 3+2", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a < 1", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a < \"10\"", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a < \"5\"", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a < 6.00", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ 1 from aggr_test having count(*) < 5", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a < 10", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having 1 < a", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a < a", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a < 3+2", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a < 1", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a < \"10\"", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a < \"5\"", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a < 6.00", `[[INT64(5)]]`) + mcmp.AssertMatches("select 1 from aggr_test having count(*) < 5", `[]`) }) } } @@ -227,15 +232,15 @@ func TestLessEqualFilterOnScatter(t *testing.T) { t.Run(workload, func(t *testing.T) { utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = '%s'", workload)) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a <= 10", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having 1 <= a", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a <= a", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a <= 3+2", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a <= 1", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a <= \"10\"", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a <= \"5\"", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a <= 5.00", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ 1 from aggr_test having count(*) <= 5", `[[INT64(1)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a <= 10", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having 1 <= a", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a <= a", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a <= 3+2", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a <= 1", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a <= \"10\"", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a <= \"5\"", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a <= 5.00", `[[INT64(5)]]`) + mcmp.AssertMatches("select 1 from aggr_test having count(*) <= 5", `[[INT64(1)]]`) }) } } @@ -251,15 +256,15 @@ func TestGreaterFilterOnScatter(t *testing.T) { t.Run(workload, func(t *testing.T) { utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = '%s'", workload)) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a > 1", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having 1 > a", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a > a", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a > 3+1", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a > 10", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a > \"1\"", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a > \"5\"", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a > 4.00", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ 1 from aggr_test having count(*) > 5", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a > 1", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having 1 > a", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a > a", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a > 3+1", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a > 10", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a > \"1\"", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a > \"5\"", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a > 4.00", `[[INT64(5)]]`) + mcmp.AssertMatches("select 1 from aggr_test having count(*) > 5", `[]`) }) } } @@ -275,15 +280,15 @@ func TestGreaterEqualFilterOnScatter(t *testing.T) { t.Run(workload, func(t *testing.T) { utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = '%s'", workload)) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a >= 1", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having 1 >= a", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a >= a", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a >= 3+2", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a >= 10", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a >= \"1\"", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a >= \"5\"", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a >= 5.00", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ 1 from aggr_test having count(*) >= 5", `[[INT64(1)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a >= 1", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having 1 >= a", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a >= a", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a >= 3+2", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a >= 10", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a >= \"1\"", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a >= \"5\"", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a >= 5.00", `[[INT64(5)]]`) + mcmp.AssertMatches("select 1 from aggr_test having count(*) >= 5", `[[INT64(1)]]`) }) } } @@ -297,7 +302,7 @@ func TestGroupByOnlyFullGroupByOff(t *testing.T) { mcmp.Exec("set @@sql_mode = ' '") // We do not use AssertMatches here because the results for the second column are random - _, err := mcmp.ExecAndIgnore("select /*vt+ PLANNER=gen4 */ id2, id3 from t9 group by id2") + _, err := mcmp.ExecAndIgnore("select id2, id3 from t9 group by id2") require.NoError(t, err) } @@ -309,22 +314,22 @@ func TestAggOnTopOfLimit(t *testing.T) { for _, workload := range []string{"oltp", "olap"} { t.Run(workload, func(t *testing.T) { utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = '%s'", workload)) - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ count(*) from (select id, val1 from aggr_test where val2 < 4 limit 2) as x", "[[INT64(2)]]") - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ count(val1) from (select id, val1 from aggr_test where val2 < 4 order by val1 desc limit 2) as x", "[[INT64(2)]]") - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ count(*) from (select id, val1 from aggr_test where val2 is null limit 2) as x", "[[INT64(2)]]") - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ count(val1) from (select id, val1 from aggr_test where val2 is null limit 2) as x", "[[INT64(1)]]") - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ count(val2) from (select id, val2 from aggr_test where val2 is null limit 2) as x", "[[INT64(0)]]") - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ val1, count(*) from (select id, val1 from aggr_test where val2 < 4 order by val1 limit 2) as x group by val1", `[[NULL INT64(1)] [VARCHAR("a") INT64(1)]]`) - mcmp.AssertMatchesNoOrder(" select /*vt+ PLANNER=gen4 */ val1, count(val2) from (select val1, val2 from aggr_test limit 8) as x group by val1", `[[NULL INT64(1)] [VARCHAR("a") INT64(2)] [VARCHAR("b") INT64(1)] [VARCHAR("c") INT64(2)]]`) + mcmp.AssertMatches(" select count(*) from (select id, val1 from aggr_test where val2 < 4 limit 2) as x", "[[INT64(2)]]") + mcmp.AssertMatches(" select count(val1) from (select id, val1 from aggr_test where val2 < 4 order by val1 desc limit 2) as x", "[[INT64(2)]]") + mcmp.AssertMatches(" select count(*) from (select id, val1 from aggr_test where val2 is null limit 2) as x", "[[INT64(2)]]") + mcmp.AssertMatches(" select count(val1) from (select id, val1 from aggr_test where val2 is null limit 2) as x", "[[INT64(1)]]") + mcmp.AssertMatches(" select count(val2) from (select id, val2 from aggr_test where val2 is null limit 2) as x", "[[INT64(0)]]") + mcmp.AssertMatches(" select val1, count(*) from (select id, val1 from aggr_test where val2 < 4 order by val1 limit 2) as x group by val1", `[[NULL INT64(1)] [VARCHAR("a") INT64(1)]]`) + mcmp.AssertMatchesNoOrder(" select val1, count(val2) from (select val1, val2 from aggr_test limit 8) as x group by val1", `[[NULL INT64(1)] [VARCHAR("a") INT64(2)] [VARCHAR("b") INT64(1)] [VARCHAR("c") INT64(2)]]`) // mysql returns FLOAT64(0), vitess returns DECIMAL(0) - mcmp.AssertMatchesNoCompare(" select /*vt+ PLANNER=gen4 */ count(*), sum(val1) from (select id, val1 from aggr_test where val2 < 4 order by val1 desc limit 2) as x", "[[INT64(2) FLOAT64(0)]]", "[[INT64(2) DECIMAL(0)]]") - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ count(val1), sum(id) from (select id, val1 from aggr_test where val2 < 4 order by val1 desc limit 2) as x", "[[INT64(2) DECIMAL(7)]]") - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ count(*), sum(id) from (select id, val1 from aggr_test where val2 is null limit 2) as x", "[[INT64(2) DECIMAL(14)]]") - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ count(val1), sum(id) from (select id, val1 from aggr_test where val2 is null limit 2) as x", "[[INT64(1) DECIMAL(14)]]") - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ count(val2), sum(val2) from (select id, val2 from aggr_test where val2 is null limit 2) as x", "[[INT64(0) NULL]]") - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ val1, count(*), sum(id) from (select id, val1 from aggr_test where val2 < 4 order by val1 limit 2) as x group by val1", `[[NULL INT64(1) DECIMAL(7)] [VARCHAR("a") INT64(1) DECIMAL(2)]]`) - mcmp.AssertMatchesNoOrder(" select /*vt+ PLANNER=gen4 */ val1, count(val2), sum(val2) from (select val1, val2 from aggr_test limit 8) as x group by val1", `[[NULL INT64(1) DECIMAL(2)] [VARCHAR("a") INT64(2) DECIMAL(7)] [VARCHAR("b") INT64(1) DECIMAL(1)] [VARCHAR("c") INT64(2) DECIMAL(7)]]`) + mcmp.AssertMatchesNoCompare(" select count(*), sum(val1) from (select id, val1 from aggr_test where val2 < 4 order by val1 desc limit 2) as x", "[[INT64(2) FLOAT64(0)]]", "[[INT64(2) FLOAT64(0)]]") + mcmp.AssertMatches(" select count(val1), sum(id) from (select id, val1 from aggr_test where val2 < 4 order by val1 desc limit 2) as x", "[[INT64(2) DECIMAL(7)]]") + mcmp.AssertMatches(" select count(*), sum(id) from (select id, val1 from aggr_test where val2 is null limit 2) as x", "[[INT64(2) DECIMAL(14)]]") + mcmp.AssertMatches(" select count(val1), sum(id) from (select id, val1 from aggr_test where val2 is null limit 2) as x", "[[INT64(1) DECIMAL(14)]]") + mcmp.AssertMatches(" select count(val2), sum(val2) from (select id, val2 from aggr_test where val2 is null limit 2) as x", "[[INT64(0) NULL]]") + mcmp.AssertMatches(" select val1, count(*), sum(id) from (select id, val1 from aggr_test where val2 < 4 order by val1 limit 2) as x group by val1", `[[NULL INT64(1) DECIMAL(7)] [VARCHAR("a") INT64(1) DECIMAL(2)]]`) + mcmp.AssertMatchesNoOrder(" select val1, count(val2), sum(val2) from (select val1, val2 from aggr_test limit 8) as x group by val1", `[[NULL INT64(1) DECIMAL(2)] [VARCHAR("a") INT64(2) DECIMAL(7)] [VARCHAR("b") INT64(1) DECIMAL(1)] [VARCHAR("c") INT64(2) DECIMAL(7)]]`) }) } } @@ -336,10 +341,10 @@ func TestEmptyTableAggr(t *testing.T) { for _, workload := range []string{"oltp", "olap"} { t.Run(workload, func(t *testing.T) { utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = %s", workload)) - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ count(*) from t1 inner join t2 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[INT64(0)]]") - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ count(*) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[INT64(0)]]") - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ t1.`name`, count(*) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo' group by t1.`name`", "[]") - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ t1.`name`, count(*) from t1 inner join t2 on (t1.t1_id = t2.id) where t1.value = 'foo' group by t1.`name`", "[]") + mcmp.AssertMatches(" select count(*) from t1 inner join t2 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[INT64(0)]]") + mcmp.AssertMatches(" select count(*) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[INT64(0)]]") + mcmp.AssertMatches(" select t1.`name`, count(*) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo' group by t1.`name`", "[]") + mcmp.AssertMatches(" select t1.`name`, count(*) from t1 inner join t2 on (t1.t1_id = t2.id) where t1.value = 'foo' group by t1.`name`", "[]") }) } @@ -348,10 +353,10 @@ func TestEmptyTableAggr(t *testing.T) { for _, workload := range []string{"oltp", "olap"} { t.Run(workload, func(t *testing.T) { utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = %s", workload)) - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ count(*) from t1 inner join t2 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[INT64(0)]]") - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ count(*) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[INT64(0)]]") - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ t1.`name`, count(*) from t1 inner join t2 on (t1.t1_id = t2.id) where t1.value = 'foo' group by t1.`name`", "[]") - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ t1.`name`, count(*) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo' group by t1.`name`", "[]") + mcmp.AssertMatches(" select count(*) from t1 inner join t2 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[INT64(0)]]") + mcmp.AssertMatches(" select count(*) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[INT64(0)]]") + mcmp.AssertMatches(" select t1.`name`, count(*) from t1 inner join t2 on (t1.t1_id = t2.id) where t1.value = 'foo' group by t1.`name`", "[]") + mcmp.AssertMatches(" select t1.`name`, count(*) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo' group by t1.`name`", "[]") }) } @@ -363,20 +368,20 @@ func TestOrderByCount(t *testing.T) { mcmp.Exec("insert into t9(id1, id2, id3) values(1, '1', '1'), (2, '2', '2'), (3, '2', '2'), (4, '3', '3'), (5, '3', '3'), (6, '3', '3')") - mcmp.AssertMatches("SELECT /*vt+ PLANNER=gen4 */ t9.id2 FROM t9 GROUP BY t9.id2 ORDER BY COUNT(t9.id2) DESC", `[[VARCHAR("3")] [VARCHAR("2")] [VARCHAR("1")]]`) + mcmp.AssertMatches("SELECT t9.id2 FROM t9 GROUP BY t9.id2 ORDER BY COUNT(t9.id2) DESC", `[[VARCHAR("3")] [VARCHAR("2")] [VARCHAR("1")]]`) } -func TestAggregateRandom(t *testing.T) { +func TestAggregateAnyValue(t *testing.T) { mcmp, closer := start(t) defer closer() mcmp.Exec("insert into t1(t1_id, name, value, shardKey) values (1, 'name 1', 'value 1', 1), (2, 'name 2', 'value 2', 2)") mcmp.Exec("insert into t2(id, shardKey) values (1, 10), (2, 20)") - mcmp.AssertMatches("SELECT /*vt+ PLANNER=gen4 */ t1.shardKey, t1.name, count(t2.id) FROM t1 JOIN t2 ON t1.value != t2.shardKey GROUP BY t1.t1_id", `[[INT64(1) VARCHAR("name 1") INT64(2)] [INT64(2) VARCHAR("name 2") INT64(2)]]`) + mcmp.AssertMatches("SELECT t1.shardKey, t1.name, count(t2.id) FROM t1 JOIN t2 ON t1.value != t2.shardKey GROUP BY t1.t1_id", `[[INT64(1) VARCHAR("name 1") INT64(2)] [INT64(2) VARCHAR("name 2") INT64(2)]]`) mcmp.Exec("set sql_mode=''") - mcmp.AssertMatches("select /*vt+ PLANNER=Gen4 */ tbl0.comm, count(*) from emp as tbl0, emp as tbl1 where tbl0.empno = tbl1.deptno", `[[NULL INT64(0)]]`) + mcmp.AssertMatches("select tbl0.comm, count(*) from emp as tbl0, emp as tbl1 where tbl0.empno = tbl1.deptno", `[[NULL INT64(0)]]`) } // TestAggregateLeftJoin tests that aggregates work with left joins and does not ignore the count when column value does not match the right side table. @@ -420,7 +425,7 @@ func TestScalarAggregate(t *testing.T) { defer closer() mcmp.Exec("insert into aggr_test(id, val1, val2) values(1,'a',1), (2,'A',1), (3,'b',1), (4,'c',3), (5,'c',4)") - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(distinct val1) from aggr_test", `[[INT64(3)]]`) + mcmp.AssertMatches("select count(distinct val1) from aggr_test", `[[INT64(3)]]`) } func TestAggregationRandomOnAnAggregatedValue(t *testing.T) { @@ -429,7 +434,7 @@ func TestAggregationRandomOnAnAggregatedValue(t *testing.T) { mcmp.Exec("insert into t10(k, a, b) values (0, 100, 10), (10, 200, 20);") - mcmp.AssertMatchesNoOrder("select /*vt+ PLANNER=gen4 */ A.a, A.b, (A.a / A.b) as d from (select sum(a) as a, sum(b) as b from t10 where a = 100) A;", + mcmp.AssertMatchesNoOrder("select A.a, A.b, (A.a / A.b) as d from (select sum(a) as a, sum(b) as b from t10 where a = 100) A;", `[[DECIMAL(100) DECIMAL(10) DECIMAL(10.0000)]]`) } @@ -441,16 +446,16 @@ func TestBuggyQueries(t *testing.T) { mcmp.Exec("insert into t10(k, a, b) values (0, 100, 10), (10, 200, 20), (20, null, null)") - mcmp.AssertMatches("select /*vt+ PLANNER=Gen4 */ sum(t1.a) from t10 as t1, t10 as t2", + mcmp.AssertMatches("select sum(t1.a) from t10 as t1, t10 as t2", `[[DECIMAL(900)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */t1.a, sum(t1.a), count(*), t1.a, sum(t1.a), count(*) from t10 as t1, t10 as t2 group by t1.a", + mcmp.AssertMatches("select t1.a, sum(t1.a), count(*), t1.a, sum(t1.a), count(*) from t10 as t1, t10 as t2 group by t1.a", "[[NULL NULL INT64(3) NULL NULL INT64(3)] "+ "[INT32(100) DECIMAL(300) INT64(3) INT32(100) DECIMAL(300) INT64(3)] "+ "[INT32(200) DECIMAL(600) INT64(3) INT32(200) DECIMAL(600) INT64(3)]]") - mcmp.Exec("select /*vt+ PLANNER=gen4 */sum(tbl1.a), min(tbl0.b) from t10 as tbl0, t10 as tbl1 left join t10 as tbl2 on tbl1.a = tbl2.a and tbl1.b = tbl2.k") - mcmp.Exec("select /*vt+ PLANNER=gen4 */count(*) from t10 left join t10 as t11 on t10.a = t11.b where t11.a") + mcmp.Exec("select sum(tbl1.a), min(tbl0.b) from t10 as tbl0, t10 as tbl1 left join t10 as tbl2 on tbl1.a = tbl2.a and tbl1.b = tbl2.k") + mcmp.Exec("select count(*) from t10 left join t10 as t11 on t10.a = t11.b where t11.a") } func TestMinMaxAcrossJoins(t *testing.T) { @@ -460,6 +465,105 @@ func TestMinMaxAcrossJoins(t *testing.T) { mcmp.Exec("insert into t2(id, shardKey) values (1, 10), (2, 20)") mcmp.AssertMatchesNoOrder( - `SELECT /*vt+ PLANNER=gen4 */ t1.name, max(t1.shardKey), t2.shardKey, min(t2.id) FROM t1 JOIN t2 ON t1.t1_id != t2.shardKey GROUP BY t1.name, t2.shardKey`, + `SELECT t1.name, max(t1.shardKey), t2.shardKey, min(t2.id) FROM t1 JOIN t2 ON t1.t1_id != t2.shardKey GROUP BY t1.name, t2.shardKey`, `[[VARCHAR("name 2") INT64(2) INT64(10) INT64(1)] [VARCHAR("name 1") INT64(1) INT64(10) INT64(1)] [VARCHAR("name 2") INT64(2) INT64(20) INT64(2)] [VARCHAR("name 1") INT64(1) INT64(20) INT64(2)]]`) } + +func TestComplexAggregation(t *testing.T) { + mcmp, closer := start(t) + defer closer() + mcmp.Exec("insert into t1(t1_id, `name`, `value`, shardkey) values(1,'a1','foo',100), (2,'b1','foo',200), (3,'c1','foo',300), (4,'a1','foo',100), (5,'d1','toto',200), (6,'c1','tata',893), (7,'a1','titi',2380), (8,'b1','tete',12833), (9,'e1','yoyo',783493)") + + mcmp.Exec("set @@sql_mode = ' '") + mcmp.Exec(`SELECT 1+COUNT(t1_id) FROM t1`) + mcmp.Exec(`SELECT COUNT(t1_id)+1 FROM t1`) + mcmp.Exec(`SELECT COUNT(t1_id)+MAX(shardkey) FROM t1`) + mcmp.Exec(`SELECT shardkey, MIN(t1_id)+MAX(t1_id) FROM t1 GROUP BY shardkey`) + mcmp.Exec(`SELECT shardkey + MIN(t1_id)+MAX(t1_id) FROM t1 GROUP BY shardkey`) + mcmp.Exec(`SELECT name+COUNT(t1_id)+1 FROM t1 GROUP BY name`) + mcmp.Exec(`SELECT COUNT(*)+shardkey+MIN(t1_id)+1+MAX(t1_id)*SUM(t1_id)+1+name FROM t1 GROUP BY shardkey, name`) +} + +// TestGroupConcatAggregation tests the group_concat function with vitess doing the aggregation. +func TestGroupConcatAggregation(t *testing.T) { + mcmp, closer := start(t) + defer closer() + mcmp.Exec("insert into t1(t1_id, `name`, `value`, shardkey) values(1,'a1',null,100), (2,'b1','foo',20), (3,'c1','foo',10), (4,'a1','foo',100), (5,'d1','toto',200), (6,'c1',null,893), (10,'a1','titi',2380), (20,'b1','tete',12833), (9,'e1','yoyo',783493)") + mcmp.Exec("insert into t2(id, shardKey) values (1, 10), (2, 20)") + + mQr, vtQr := mcmp.ExecNoCompare(`SELECT group_concat(name) FROM t1`) + compareRow(t, mQr, vtQr, nil, []int{0}) + mQr, vtQr = mcmp.ExecNoCompare(`SELECT group_concat(value) FROM t1 join t2 on t1.shardKey = t2.shardKey `) + compareRow(t, mQr, vtQr, nil, []int{0}) + mQr, vtQr = mcmp.ExecNoCompare(`SELECT group_concat(value) FROM t1 join t2 on t1.t1_id = t2.shardKey `) + compareRow(t, mQr, vtQr, nil, []int{0}) + mQr, vtQr = mcmp.ExecNoCompare(`SELECT group_concat(value) FROM t1 join t2 on t1.shardKey = t2.id `) + compareRow(t, mQr, vtQr, nil, []int{0}) + mQr, vtQr = mcmp.ExecNoCompare(`SELECT group_concat(value), t1.name FROM t1, t2 group by t1.name`) + compareRow(t, mQr, vtQr, []int{1}, []int{0}) +} + +func compareRow(t *testing.T, mRes *sqltypes.Result, vtRes *sqltypes.Result, grpCols []int, fCols []int) { + require.Equal(t, len(mRes.Rows), len(vtRes.Rows), "mysql and vitess result count does not match") + for _, row := range vtRes.Rows { + var grpKey string + for _, col := range grpCols { + grpKey += row[col].String() + } + var foundKey bool + for _, mRow := range mRes.Rows { + var mKey string + for _, col := range grpCols { + mKey += mRow[col].String() + } + if grpKey != mKey { + continue + } + foundKey = true + for _, col := range fCols { + vtFValSplit := strings.Split(row[col].ToString(), ",") + sort.Strings(vtFValSplit) + mFValSplit := strings.Split(mRow[col].ToString(), ",") + sort.Strings(mFValSplit) + require.True(t, slices.Equal(vtFValSplit, mFValSplit), "mysql and vitess result are not same: vitess:%v, mysql:%v", vtRes.Rows, mRes.Rows) + } + } + require.True(t, foundKey, "mysql and vitess result does not same row: vitess:%v, mysql:%v", vtRes.Rows, mRes.Rows) + } +} + +func TestDistinctAggregation(t *testing.T) { + mcmp, closer := start(t) + defer closer() + mcmp.Exec("insert into t1(t1_id, `name`, `value`, shardkey) values(1,'a1','foo',100), (2,'b1','foo',200), (3,'c1','foo',300), (4,'a1','foo',100), (5,'d1','toto',200), (6,'c1','tata',893), (7,'a1','titi',2380), (8,'b1','tete',12833), (9,'e1','yoyo',783493)") + + tcases := []struct { + query string + expectedErr string + }{{ + query: `SELECT COUNT(DISTINCT value), SUM(DISTINCT shardkey) FROM t1`, + expectedErr: "VT12001: unsupported: only one DISTINCT aggregation is allowed in a SELECT: sum(distinct shardkey) (errno 1235) (sqlstate 42000)", + }, { + query: `SELECT a.t1_id, SUM(DISTINCT b.shardkey) FROM t1 a, t1 b group by a.t1_id`, + }, { + query: `SELECT a.value, SUM(DISTINCT b.shardkey) FROM t1 a, t1 b group by a.value`, + }, { + query: `SELECT count(distinct a.value), SUM(DISTINCT b.t1_id) FROM t1 a, t1 b`, + expectedErr: "VT12001: unsupported: only one DISTINCT aggregation is allowed in a SELECT: sum(distinct b.t1_id) (errno 1235) (sqlstate 42000)", + }, { + query: `SELECT a.value, SUM(DISTINCT b.t1_id), min(DISTINCT a.t1_id) FROM t1 a, t1 b group by a.value`, + }, { + query: `SELECT distinct count(*) from t1, (select distinct count(*) from t1) as t2`, + }} + + for _, tc := range tcases { + mcmp.Run(tc.query, func(mcmp *utils.MySQLCompare) { + _, err := mcmp.ExecAllowError(tc.query) + if tc.expectedErr == "" { + require.NoError(t, err) + return + } + require.ErrorContains(t, err, tc.expectedErr) + }) + } +} diff --git a/go/test/endtoend/vtgate/queries/aggregation/fuzz_test.go b/go/test/endtoend/vtgate/queries/aggregation/fuzz_test.go deleted file mode 100644 index 25bec1a39b4..00000000000 --- a/go/test/endtoend/vtgate/queries/aggregation/fuzz_test.go +++ /dev/null @@ -1,212 +0,0 @@ -/* -Copyright 2023 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package aggregation - -import ( - "fmt" - "math/rand" - "strings" - "testing" - "time" - - "golang.org/x/exp/maps" - - "vitess.io/vitess/go/vt/log" -) - -type ( - column struct { - name string - typ string - } - tableT struct { - name string - columns []column - } -) - -func TestFuzzAggregations(t *testing.T) { - // This test randomizes values and queries, and checks that mysql returns the same values that Vitess does - mcmp, closer := start(t) - defer closer() - - noOfRows := rand.Intn(20) - var values []string - for i := 0; i < noOfRows; i++ { - values = append(values, fmt.Sprintf("(%d, 'name%d', 'value%d', %d)", i, i, i, i)) - } - t1Insert := fmt.Sprintf("insert into t1 (t1_id, name, value, shardKey) values %s;", strings.Join(values, ",")) - values = nil - noOfRows = rand.Intn(20) - for i := 0; i < noOfRows; i++ { - values = append(values, fmt.Sprintf("(%d, %d)", i, i)) - } - t2Insert := fmt.Sprintf("insert into t2 (id, shardKey) values %s;", strings.Join(values, ",")) - - mcmp.Exec(t1Insert) - mcmp.Exec(t2Insert) - - t.Cleanup(func() { - if t.Failed() { - fmt.Println(t1Insert) - fmt.Println(t2Insert) - } - }) - - schema := map[string]tableT{ - "t1": {name: "t1", columns: []column{ - {name: "t1_id", typ: "bigint"}, - {name: "name", typ: "varchar"}, - {name: "value", typ: "varchar"}, - {name: "shardKey", typ: "bigint"}, - }}, - "t2": {name: "t2", columns: []column{ - {name: "id", typ: "bigint"}, - {name: "shardKey", typ: "bigint"}, - }}, - } - - endBy := time.Now().Add(1 * time.Second) - schemaTables := maps.Values(schema) - - var queryCount int - for time.Now().Before(endBy) || t.Failed() { - tables := createTables(schemaTables) - query := randomQuery(tables, 3, 3) - mcmp.Exec(query) - if t.Failed() { - fmt.Println(query) - } - queryCount++ - } - log.Info("Queries successfully executed: %d", queryCount) -} - -func randomQuery(tables []tableT, maxAggrs, maxGroupBy int) string { - randomCol := func(tblIdx int) (string, string) { - tbl := tables[tblIdx] - col := randomEl(tbl.columns) - return fmt.Sprintf("tbl%d.%s", tblIdx, col.name), col.typ - } - predicates := createPredicates(tables, randomCol) - aggregates := createAggregations(tables, maxAggrs, randomCol) - grouping := createGroupBy(tables, maxGroupBy, randomCol) - sel := "select /*vt+ PLANNER=Gen4 */ " + strings.Join(aggregates, ", ") + " from " - - var tbls []string - for i, s := range tables { - tbls = append(tbls, fmt.Sprintf("%s as tbl%d", s.name, i)) - } - sel += strings.Join(tbls, ", ") - - if len(predicates) > 0 { - sel += " where " - sel += strings.Join(predicates, " and ") - } - if len(grouping) > 0 { - sel += " group by " - sel += strings.Join(grouping, ", ") - } - // we do it this way so we don't have to do only `only_full_group_by` queries - var noOfOrderBy int - if len(grouping) > 0 { - // panic on rand function call if value is 0 - noOfOrderBy = rand.Intn(len(grouping)) - } - if noOfOrderBy > 0 { - noOfOrderBy = 0 // TODO turning on ORDER BY here causes lots of failures to happen - } - if noOfOrderBy > 0 { - var orderBy []string - for noOfOrderBy > 0 { - noOfOrderBy-- - if rand.Intn(2) == 0 || len(grouping) == 0 { - orderBy = append(orderBy, randomEl(aggregates)) - } else { - orderBy = append(orderBy, randomEl(grouping)) - } - } - sel += " order by " - sel += strings.Join(orderBy, ", ") - } - return sel -} - -func createGroupBy(tables []tableT, maxGB int, randomCol func(tblIdx int) (string, string)) (grouping []string) { - noOfGBs := rand.Intn(maxGB) - for i := 0; i < noOfGBs; i++ { - tblIdx := rand.Intn(len(tables)) - col, _ := randomCol(tblIdx) - grouping = append(grouping, col) - } - return -} - -func createAggregations(tables []tableT, maxAggrs int, randomCol func(tblIdx int) (string, string)) (aggregates []string) { - aggregations := []func(string) string{ - func(_ string) string { return "count(*)" }, - func(e string) string { return fmt.Sprintf("count(%s)", e) }, - //func(e string) string { return fmt.Sprintf("sum(%s)", e) }, - //func(e string) string { return fmt.Sprintf("avg(%s)", e) }, - //func(e string) string { return fmt.Sprintf("min(%s)", e) }, - //func(e string) string { return fmt.Sprintf("max(%s)", e) }, - } - - noOfAggrs := rand.Intn(maxAggrs) + 1 - for i := 0; i < noOfAggrs; i++ { - tblIdx := rand.Intn(len(tables)) - e, _ := randomCol(tblIdx) - aggregates = append(aggregates, randomEl(aggregations)(e)) - } - return aggregates -} - -func createTables(schemaTables []tableT) []tableT { - noOfTables := rand.Intn(2) + 1 - var tables []tableT - - for i := 0; i < noOfTables; i++ { - tables = append(tables, randomEl(schemaTables)) - } - return tables -} - -func createPredicates(tables []tableT, randomCol func(tblIdx int) (string, string)) (predicates []string) { - for idx1 := range tables { - for idx2 := range tables { - if idx1 == idx2 { - continue - } - noOfPredicates := rand.Intn(2) - - for noOfPredicates > 0 { - col1, t1 := randomCol(idx1) - col2, t2 := randomCol(idx2) - if t1 != t2 { - continue - } - predicates = append(predicates, fmt.Sprintf("%s = %s", col1, col2)) - noOfPredicates-- - } - } - } - return predicates -} - -func randomEl[K any](in []K) K { - return in[rand.Intn(len(in))] -} diff --git a/go/test/endtoend/vtgate/queries/aggregation/main_test.go b/go/test/endtoend/vtgate/queries/aggregation/main_test.go index a859002f44a..02013a9b0e2 100644 --- a/go/test/endtoend/vtgate/queries/aggregation/main_test.go +++ b/go/test/endtoend/vtgate/queries/aggregation/main_test.go @@ -64,7 +64,7 @@ func TestMain(m *testing.M) { VSchema: vschema, } clusterInstance.VtGateExtraArgs = []string{"--schema_change_signal"} - clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal", "--queryserver-config-schema-change-signal-interval", "0.1"} + clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal"} err = clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 0, false) if err != nil { return 1 diff --git a/go/test/endtoend/vtgate/queries/benchmark/benchmark_test.go b/go/test/endtoend/vtgate/queries/benchmark/benchmark_test.go new file mode 100644 index 00000000000..3fd7edd14de --- /dev/null +++ b/go/test/endtoend/vtgate/queries/benchmark/benchmark_test.go @@ -0,0 +1,80 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dml + +import ( + "fmt" + "math/rand" + "strconv" + "strings" + "testing" + + "vitess.io/vitess/go/test/endtoend/utils" +) + +type testQuery struct { + tableName string + cols []string + intTyp []bool +} + +func (tq *testQuery) getInsertQuery(rows int) string { + var allRows []string + for i := 0; i < rows; i++ { + var row []string + for _, isInt := range tq.intTyp { + if isInt { + row = append(row, strconv.Itoa(i)) + continue + } + row = append(row, "'"+getRandomString(50)+"'") + } + allRows = append(allRows, "("+strings.Join(row, ",")+")") + } + return fmt.Sprintf("insert into %s(%s) values %s", tq.tableName, strings.Join(tq.cols, ","), strings.Join(allRows, ",")) +} + +func getRandomString(size int) string { + var str strings.Builder + + for i := 0; i < size; i++ { + str.WriteByte(byte(rand.Intn(27) + 97)) + } + return str.String() +} + +func BenchmarkShardedTblNoLookup(b *testing.B) { + conn, closer := start(b) + defer closer() + + cols := []string{"id", "c1", "c2", "c3", "c4", "c5", "c6", "c7", "c8", "c9", "c10", "c11", "c12"} + intType := make([]bool, len(cols)) + intType[0] = true + tq := &testQuery{ + tableName: "tbl_no_lkp_vdx", + cols: cols, + intTyp: intType, + } + for _, rows := range []int{1, 10, 100, 500, 1000, 5000, 10000} { + insStmt := tq.getInsertQuery(rows) + b.Run(fmt.Sprintf("16-shards-%d-rows", rows), func(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = utils.Exec(b, conn, insStmt) + } + }) + } +} diff --git a/go/test/endtoend/vtgate/queries/benchmark/main_test.go b/go/test/endtoend/vtgate/queries/benchmark/main_test.go new file mode 100644 index 00000000000..6978d0b9428 --- /dev/null +++ b/go/test/endtoend/vtgate/queries/benchmark/main_test.go @@ -0,0 +1,124 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dml + +import ( + "context" + _ "embed" + "flag" + "os" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + mysqlParams mysql.ConnParams + sKs = "sks" + uKs = "uks" + cell = "test" + + //go:embed sharded_schema.sql + sSchemaSQL string + + //go:embed vschema.json + sVSchema string +) + +var ( + shards4 = []string{ + "-40", "40-80", "80-c0", "c0-", + } + + shards8 = []string{ + "-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-", + } + + shards16 = []string{ + "-10", "10-20", "20-30", "30-40", "40-50", "50-60", "60-70", "70-80", "80-90", "90-a0", "a0-b0", "b0-c0", "c0-d0", "d0-e0", "e0-f0", "f0-", + } + + shards32 = []string{ + "-05", "05-10", "10-15", "15-20", "20-25", "25-30", "30-35", "35-40", "40-45", "45-50", "50-55", "55-60", "60-65", "65-70", "70-75", "75-80", + "80-85", "85-90", "90-95", "95-a0", "a0-a5", "a5-b0", "b0-b5", "b5-c0", "c0-c5", "c5-d0", "d0-d5", "d5-e0", "e0-e5", "e5-f0", "f0-f5", "f5-", + } +) + +func TestMain(m *testing.M) { + defer cluster.PanicHandler(nil) + flag.Parse() + + exitCode := func() int { + clusterInstance = cluster.NewCluster(cell, "localhost") + defer clusterInstance.Teardown() + + // Start topo server + err := clusterInstance.StartTopo() + if err != nil { + return 1 + } + + // Start sharded keyspace + sKeyspace := &cluster.Keyspace{ + Name: sKs, + SchemaSQL: sSchemaSQL, + VSchema: sVSchema, + } + + err = clusterInstance.StartKeyspace(*sKeyspace, shards4, 0, false) + if err != nil { + return 1 + } + + // Start vtgate + err = clusterInstance.StartVtgate() + if err != nil { + return 1 + } + + vtParams = clusterInstance.GetVTParams(sKs) + + return m.Run() + }() + os.Exit(exitCode) +} + +func start(b *testing.B) (*mysql.Conn, func()) { + conn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(b, err) + + deleteAll := func() { + tables := []string{"tbl_no_lkp_vdx"} + for _, table := range tables { + _, _ = utils.ExecAllowError(b, conn, "delete from "+table) + } + } + + deleteAll() + + return conn, func() { + deleteAll() + conn.Close() + cluster.PanicHandler(b) + } +} diff --git a/go/test/endtoend/vtgate/queries/benchmark/sharded_schema.sql b/go/test/endtoend/vtgate/queries/benchmark/sharded_schema.sql new file mode 100644 index 00000000000..850b6ffc15a --- /dev/null +++ b/go/test/endtoend/vtgate/queries/benchmark/sharded_schema.sql @@ -0,0 +1,16 @@ +create table tbl_no_lkp_vdx +( + id bigint, + c1 varchar(50), + c2 varchar(50), + c3 varchar(50), + c4 varchar(50), + c5 varchar(50), + c6 varchar(50), + c7 varchar(50), + c8 varchar(50), + c9 varchar(50), + c10 varchar(50), + c11 varchar(50), + c12 varchar(50) +) Engine = InnoDB; \ No newline at end of file diff --git a/go/test/endtoend/vtgate/schematracker/unauthorized/vschema.json b/go/test/endtoend/vtgate/queries/benchmark/vschema.json similarity index 79% rename from go/test/endtoend/vtgate/schematracker/unauthorized/vschema.json rename to go/test/endtoend/vtgate/queries/benchmark/vschema.json index 002c6f00386..4970e8b7437 100644 --- a/go/test/endtoend/vtgate/schematracker/unauthorized/vschema.json +++ b/go/test/endtoend/vtgate/queries/benchmark/vschema.json @@ -6,10 +6,10 @@ } }, "tables": { - "t2": { + "tbl_no_lkp_vdx": { "column_vindexes": [ { - "column": "id3", + "column": "id", "name": "xxhash" } ] diff --git a/go/test/endtoend/vtgate/queries/derived/derived_test.go b/go/test/endtoend/vtgate/queries/derived/derived_test.go index 27061c909a7..e29408b8c5d 100644 --- a/go/test/endtoend/vtgate/queries/derived/derived_test.go +++ b/go/test/endtoend/vtgate/queries/derived/derived_test.go @@ -56,7 +56,6 @@ func TestDerivedTableWithOrderByLimit(t *testing.T) { } func TestDerivedAggregationOnRHS(t *testing.T) { - t.Skip("skipped for now, issue: https://github.com/vitessio/vitess/issues/11703") mcmp, closer := start(t) defer closer() @@ -85,7 +84,8 @@ func TestDerivedTableWithHaving(t *testing.T) { mcmp.Exec("insert into user(id, name) values(1,'toto'), (2,'tata'), (3,'titi'), (4,'tete'), (5,'foo')") mcmp.Exec("set sql_mode = ''") - mcmp.AssertMatchesAnyNoCompare("select /*vt+ PLANNER=Gen4 */ * from (select id from user having count(*) >= 1) s", "[[INT64(1)]]", "[[INT64(4)]]") + // For the given query, we can get any id back, because we aren't grouping by it. + mcmp.AssertMatchesAnyNoCompare("select /*vt+ PLANNER=Gen4 */ * from (select id from user having count(*) >= 1) s", "[[INT64(1)]]", "[[INT64(2)]]", "[[INT64(3)]]", "[[INT64(4)]]", "[[INT64(5)]]") } func TestDerivedTableColumns(t *testing.T) { diff --git a/go/test/endtoend/vtgate/queries/foundrows/main_test.go b/go/test/endtoend/vtgate/queries/foundrows/main_test.go index e0d9d737efa..8f992863008 100644 --- a/go/test/endtoend/vtgate/queries/foundrows/main_test.go +++ b/go/test/endtoend/vtgate/queries/foundrows/main_test.go @@ -66,7 +66,7 @@ func TestMain(m *testing.M) { VSchema: vschema, } clusterInstance.VtGateExtraArgs = []string{"--schema_change_signal"} - clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal", "--queryserver-config-schema-change-signal-interval", "0.1"} + clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal"} err = clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 0, false) if err != nil { return 1 diff --git a/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go b/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go index 4230d763804..d1d50039d99 100644 --- a/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go +++ b/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go @@ -99,8 +99,8 @@ func TestInformationSchemaQueryGetsRoutedToTheRightTableAndKeyspace(t *testing.T utils.Exec(t, mcmp.VtConn, "insert into t1(id1, id2) values (1, 1), (2, 2), (3,3), (4,4)") - _ = utils.Exec(t, mcmp.VtConn, "SELECT /*vt+ PLANNER=gen4 */ * FROM t1000") // test that the routed table is available to us - result := utils.Exec(t, mcmp.VtConn, "SELECT /*vt+ PLANNER=gen4 */ * FROM information_schema.tables WHERE table_schema = database() and table_name='t1000'") + _ = utils.Exec(t, mcmp.VtConn, "SELECT * FROM t1000") // test that the routed table is available to us + result := utils.Exec(t, mcmp.VtConn, "SELECT * FROM information_schema.tables WHERE table_schema = database() and table_name='t1000'") assert.NotEmpty(t, result.Rows) } @@ -111,7 +111,8 @@ func TestFKConstraintUsingInformationSchema(t *testing.T) { query := "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as name, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk on fk.constraint_schema = rc.constraint_schema and fk.constraint_name = rc.constraint_name where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = 't7_fk' and rc.constraint_schema = database() and rc.table_name = 't7_fk'" mcmp.AssertMatchesAny(query, `[[VARBINARY("t7_xxhash") VARCHAR("uid") VARCHAR("t7_uid") VARCHAR("t7_fk_ibfk_1") BINARY("CASCADE") BINARY("SET NULL")]]`, - `[[VARCHAR("t7_xxhash") VARCHAR("uid") VARCHAR("t7_uid") VARCHAR("t7_fk_ibfk_1") VARCHAR("CASCADE") VARCHAR("SET NULL")]]`) + `[[VARCHAR("t7_xxhash") VARCHAR("uid") VARCHAR("t7_uid") VARCHAR("t7_fk_ibfk_1") VARCHAR("CASCADE") VARCHAR("SET NULL")]]`, + `[[VARCHAR("t7_xxhash") VARCHAR("uid") VARCHAR("t7_uid") VARCHAR("t7_fk_ibfk_1") BINARY("CASCADE") BINARY("SET NULL")]]`) } func TestConnectWithSystemSchema(t *testing.T) { diff --git a/go/test/endtoend/vtgate/queries/informationschema/main_test.go b/go/test/endtoend/vtgate/queries/informationschema/main_test.go index c15c546dfc9..06c5b188d18 100644 --- a/go/test/endtoend/vtgate/queries/informationschema/main_test.go +++ b/go/test/endtoend/vtgate/queries/informationschema/main_test.go @@ -72,7 +72,7 @@ func TestMain(m *testing.M) { VSchema: vschema, } clusterInstance.VtGateExtraArgs = []string{"--schema_change_signal"} - clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal", "--queryserver-config-schema-change-signal-interval", "0.1"} + clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal"} err = clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 0, false) if err != nil { return 1 @@ -88,7 +88,6 @@ func TestMain(m *testing.M) { return 1 } - clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, "--enable_system_settings=true") // Start vtgate err = clusterInstance.StartVtgate() if err != nil { diff --git a/go/test/endtoend/vtgate/queries/kill/kill_test.go b/go/test/endtoend/vtgate/queries/kill/kill_test.go new file mode 100644 index 00000000000..ad57722dd97 --- /dev/null +++ b/go/test/endtoend/vtgate/queries/kill/kill_test.go @@ -0,0 +1,246 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kill + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/utils" +) + +// TestKillConnection kills its own connection and checks the error message received. +func TestKillOwnConnection(t *testing.T) { + conn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + defer conn.Close() + + _, err = utils.ExecAllowError(t, conn, fmt.Sprintf("kill %d", conn.ConnectionID)) + require.NoError(t, err) + + // the connection should be closed. + _, err = utils.ExecAllowError(t, conn, "select 1") + require.ErrorContains(t, err, "EOF (errno 2013) (sqlstate HY000)") +} + +// TestKillDifferentConnection kills different connection and check relevant error messages. +func TestKillDifferentConnection(t *testing.T) { + conn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + defer conn.Close() + + killConn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + defer killConn.Close() + + // connection does not exist + _, err = utils.ExecAllowError(t, killConn, "kill 12345") + require.ErrorContains(t, err, "Unknown thread id: 12345 (errno 1094) (sqlstate HY000)") + + // connection exist + _, err = utils.ExecAllowError(t, killConn, fmt.Sprintf("kill %d", conn.ConnectionID)) + require.NoError(t, err) + + // executing on closed connection + _, err = utils.ExecAllowError(t, conn, "select 1") + require.ErrorContains(t, err, "EOF (errno 2013) (sqlstate HY000)") +} + +// TestKillOwnQuery kills the kill statement itself +func TestKillOwnQuery(t *testing.T) { + conn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + defer conn.Close() + + _, err = utils.ExecAllowError(t, conn, fmt.Sprintf("kill query %d", conn.ConnectionID)) + // TODO: does not really change anything, but expect to receive Queery Interrupted error + // "(errno 1317) (sqlstate 70100)" + require.NoError(t, err) +} + +// TestKillDifferentConnectionQuery kills query on different connection and check relevant error messages. +func TestKillDifferentConnectionQuery(t *testing.T) { + setupData(t, false) + defer dropData(t) + + conn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + defer conn.Close() + + killConn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + defer killConn.Close() + + // killing query on non-existent connection + _, err = utils.ExecAllowError(t, killConn, "kill query 12345") + require.ErrorContains(t, err, "Unknown thread id: 12345 (errno 1094) (sqlstate HY000)") + + done := make(chan error) + go func() { + // 20 seconds sleep. Should be stopped by kill statement. + _, err := utils.ExecAllowError(t, conn, "select sleep(20) from test") + done <- err + }() + + for { + select { + case execErr := <-done: + require.ErrorContains(t, execErr, "context canceled (errno 1317) (sqlstate 70100)") + return + case <-time.After(100 * time.Millisecond): + _, err = utils.ExecAllowError(t, killConn, fmt.Sprintf("kill query %d", conn.ConnectionID)) + require.NoError(t, err) + case <-time.After(5 * time.Second): + t.Fatal("test did not complete in 5 seconds.") + } + } +} + +// TestKillOnHungQuery test that any hung query should return. +func TestKillOnHungQuery(t *testing.T) { + + execFunc := func(conn *mysql.Conn) error { + utils.Exec(t, conn, "begin") + _, err := utils.ExecAllowError(t, conn, "insert into test(id, msg, extra) values (1, 'a', 'e')") + require.Error(t, err) + return err + } + + t.Run("connection close", func(t *testing.T) { + testHungQuery(t, execFunc, func(hungConn *mysql.Conn, _ *mysql.Conn) { + // closing the hung query connection. + hungConn.Close() + }, "(errno 2013) (sqlstate HY000)") + }) + + t.Run("connection kill", func(t *testing.T) { + testHungQuery(t, execFunc, func(hungConn *mysql.Conn, killConn *mysql.Conn) { + // kill the hung connection + utils.ExecAllowError(t, killConn, fmt.Sprintf("kill %d", hungConn.ConnectionID)) + }, "context canceled") + }) + + t.Run("query kill", func(t *testing.T) { + testHungQuery(t, execFunc, func(hungConn *mysql.Conn, killConn *mysql.Conn) { + // kill the hung query + utils.ExecAllowError(t, killConn, fmt.Sprintf("kill query %d", hungConn.ConnectionID)) + }, "context canceled") + }) +} + +func testHungQuery(t *testing.T, execFunc func(*mysql.Conn) error, killFunc func(*mysql.Conn, *mysql.Conn), errMsgs ...string) { + killConn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + defer killConn.Close() + + utils.Exec(t, killConn, "begin") + utils.Exec(t, killConn, "insert into test(id, msg, extra) values (1, 'a', 'e')") + + hungConn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + defer hungConn.Close() + + runQueryInGoRoutineAndCheckError(t, hungConn, killConn, execFunc, killFunc, errMsgs) +} + +// TestKillStmtOnHugeData tests different kill scenario on huge data. +func TestKillStmtOnHugeData(t *testing.T) { + setupData(t, true) + defer dropData(t) + + execFunc := func(conn *mysql.Conn) error { + _, err := utils.ExecWithRowCount(t, conn, "select * from test", 640000) + require.Error(t, err) + return err + } + + t.Run("oltp - kill conn", func(t *testing.T) { + testHugeData(t, "oltp", execFunc, func(conn *mysql.Conn, killConn *mysql.Conn) { + utils.ExecAllowError(t, killConn, fmt.Sprintf("kill query %d", conn.ConnectionID)) + }, "context canceled (errno 1317) (sqlstate 70100)") + }) + + t.Run("oltp - kill query", func(t *testing.T) { + testHugeData(t, "oltp", execFunc, func(conn *mysql.Conn, killConn *mysql.Conn) { + utils.ExecAllowError(t, killConn, fmt.Sprintf("kill query %d", conn.ConnectionID)) + }, "(errno 1317) (sqlstate 70100)") + }) + + t.Run("olap - kill conn", func(t *testing.T) { + testHugeData(t, "olap", execFunc, func(conn *mysql.Conn, killConn *mysql.Conn) { + utils.ExecAllowError(t, killConn, fmt.Sprintf("kill query %d", conn.ConnectionID)) + }, "context canceled (errno 1317) (sqlstate 70100)", "EOF (errno 2013) (sqlstate HY000)") + }) + + t.Run("olap - kill query", func(t *testing.T) { + testHugeData(t, "olap", execFunc, func(conn *mysql.Conn, killConn *mysql.Conn) { + utils.ExecAllowError(t, killConn, fmt.Sprintf("kill query %d", conn.ConnectionID)) + }, "context canceled (errno 1317) (sqlstate 70100)", "EOF (errno 2013) (sqlstate HY000)") + }) +} + +func testHugeData(t *testing.T, workload string, execFunc func(*mysql.Conn) error, killFunc func(*mysql.Conn, *mysql.Conn), errMsgs ...string) { + conn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + defer conn.Close() + utils.Exec(t, conn, fmt.Sprintf("set workload = %s", workload)) + + killConn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + defer killConn.Close() + utils.Exec(t, killConn, fmt.Sprintf("set workload = %s", workload)) + + runQueryInGoRoutineAndCheckError(t, conn, killConn, execFunc, killFunc, errMsgs) +} + +func runQueryInGoRoutineAndCheckError(t *testing.T, conn *mysql.Conn, killConn *mysql.Conn, execFunc func(*mysql.Conn) error, killFunc func(*mysql.Conn, *mysql.Conn), errMsgs []string) { + done := make(chan bool) + go func() { + err := execFunc(conn) + // if exec has failed, marking channel done to fail fast. + if t.Failed() { + done <- true + } + // going through all the expected error messages and if it matches any then test passes. + for _, errMsg := range errMsgs { + if strings.Contains(err.Error(), errMsg) { + done <- true + return + } + } + require.Failf(t, "error message does not match", "%v does not contain any of %v", err.Error(), errMsgs) + done <- true + }() + + totalTime := time.After(5 * time.Second) + for { + select { + case <-done: + return + case <-time.After(20 * time.Millisecond): + killFunc(conn, killConn) + case <-totalTime: + t.Fatal("test did not complete in 5 seconds.") + } + } +} diff --git a/go/test/endtoend/vtgate/queries/kill/main_test.go b/go/test/endtoend/vtgate/queries/kill/main_test.go new file mode 100644 index 00000000000..836603c91ee --- /dev/null +++ b/go/test/endtoend/vtgate/queries/kill/main_test.go @@ -0,0 +1,148 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kill + +import ( + "context" + _ "embed" + "flag" + "fmt" + "math/rand" + "os" + "strconv" + "strings" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" + "vitess.io/vitess/go/vt/vtgate/planbuilder" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + cell = "zone1" + hostname = "localhost" + ks = "ks" + + //go:embed schema.sql + schema string + + //go:embed vschema.json + vschema string +) + +func TestMain(m *testing.M) { + defer cluster.PanicHandler(nil) + flag.Parse() + + exitCode := func() int { + clusterInstance = cluster.NewCluster(cell, hostname) + defer clusterInstance.Teardown() + + // Start topo server + if err := clusterInstance.StartTopo(); err != nil { + return 1 + } + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: ks, + SchemaSQL: schema, + VSchema: vschema, + } + var maxGrpcSize int64 = 256 * 1024 * 1024 + clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, + "--queryserver-config-max-result-size", "10000000", + "--grpc_max_message_size", strconv.FormatInt(maxGrpcSize, 10)) + if err := clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 0, false); err != nil { + return 1 + } + + // Start vtgate + clusterInstance.VtGatePlannerVersion = planbuilder.Gen4 + clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, + "--grpc_max_message_size", strconv.FormatInt(maxGrpcSize, 10), + "--max_memory_rows", "999999", + "--allow-kill-statement") + if err := clusterInstance.StartVtgate(); err != nil { + return 1 + } + + vtParams = clusterInstance.GetVTParams(ks) + + return m.Run() + }() + os.Exit(exitCode) +} + +func setupData(t *testing.T, huge bool) { + conn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + defer conn.Close() + + initialRow := 9999 + multiplier := 32 + if !huge { + initialRow = 4 + multiplier = 0 + } + r1 := getRandomString(10) + r2 := getRandomString(20) + r3 := getRandomString(30) + r4 := getRandomString(40) + + for i := 0; i < initialRow; i += 4 { + utils.Exec(t, conn, fmt.Sprintf("insert into test(id, msg, extra) values (%d, '%s', '%s'),(%d, '%s', '%s'),(%d, '%s', '%s'),(%d, '%s', '%s')", + i, r1, r2, + i+1, r2, r3, + i+2, r3, r4, + i+3, r4, r1)) + } + if !huge { + utils.AssertMatches(t, conn, `select count(*), min(id), max(id) from test`, `[[INT64(4) INT64(0) INT64(3)]]`) + return + } + + utils.AssertMatches(t, conn, `select count(*), min(id), max(id) from test`, `[[INT64(10000) INT64(0) INT64(9999)]]`) + for i := 1; i < multiplier; i = i << 1 { + utils.Exec(t, conn, fmt.Sprintf("insert into test(id, msg, extra) select id+%d, msg, extra from test", (initialRow+1)*i)) + } + utils.AssertMatches(t, conn, `select count(*), min(id), max(id) from test`, `[[INT64(320000) INT64(0) INT64(319999)]]`) +} + +func dropData(t *testing.T) { + conn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + defer conn.Close() + + utils.Exec(t, conn, "drop table if exists test") + utils.Exec(t, conn, schema) +} + +func getRandomString(size int) string { + var str strings.Builder + + for i := 0; i < size; i++ { + str.WriteByte(byte((rand.Int() % 26) + 97)) + } + + return str.String() +} diff --git a/go/test/endtoend/vtgate/queries/kill/schema.sql b/go/test/endtoend/vtgate/queries/kill/schema.sql new file mode 100644 index 00000000000..21a059f69ac --- /dev/null +++ b/go/test/endtoend/vtgate/queries/kill/schema.sql @@ -0,0 +1,16 @@ +create table test +( + id bigint not null, + msg varchar(50) not null, + extra varchar(100), + primary key (id), + index(msg) +) ENGINE=InnoDB; + +create table test_idx +( + msg varchar(50) not null, + id bigint not null, + keyspace_id varbinary(50), + primary key (msg, id) +) ENGINE=InnoDB; \ No newline at end of file diff --git a/go/test/endtoend/vtgate/queries/kill/vschema.json b/go/test/endtoend/vtgate/queries/kill/vschema.json new file mode 100644 index 00000000000..3173d8c7819 --- /dev/null +++ b/go/test/endtoend/vtgate/queries/kill/vschema.json @@ -0,0 +1,42 @@ +{ + "sharded": true, + "vindexes": { + "unicode_loose_xxhash" : { + "type": "unicode_loose_xxhash" + }, + "xxhash" : { + "type": "xxhash" + }, + "test_vdx": { + "type": "consistent_lookup", + "params": { + "table": "test_idx", + "from": "msg,id", + "to": "keyspace_id" + }, + "owner": "test" + } + }, + "tables": { + "test": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + }, + { + "columns": ["msg", "id"], + "name": "test_vdx" + } + ] + }, + "test_idx": { + "column_vindexes": [ + { + "column": "msg", + "name": "unicode_loose_xxhash" + } + ] + } + } +} diff --git a/go/test/endtoend/vtgate/queries/lookup_queries/main_test.go b/go/test/endtoend/vtgate/queries/lookup_queries/main_test.go index 9660c25e0cd..c385941502a 100644 --- a/go/test/endtoend/vtgate/queries/lookup_queries/main_test.go +++ b/go/test/endtoend/vtgate/queries/lookup_queries/main_test.go @@ -69,7 +69,6 @@ func TestMain(m *testing.M) { VSchema: shardedVSchema, } - clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal-interval", "0.1"} err = clusterInstance.StartKeyspace(*sKs, shardedKsShards, 0, false) if err != nil { return 1 diff --git a/go/test/endtoend/vtgate/queries/misc/main_test.go b/go/test/endtoend/vtgate/queries/misc/main_test.go index d71dc55ef46..a3858284884 100644 --- a/go/test/endtoend/vtgate/queries/misc/main_test.go +++ b/go/test/endtoend/vtgate/queries/misc/main_test.go @@ -62,9 +62,7 @@ func TestMain(m *testing.M) { } clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, - "--queryserver-config-max-result-size", "1000000", - "--queryserver-config-query-timeout", "200", - "--queryserver-config-query-pool-timeout", "200") + "--queryserver-config-max-result-size", "1000000") // Start Unsharded keyspace ukeyspace := &cluster.Keyspace{ Name: uks, @@ -86,8 +84,6 @@ func TestMain(m *testing.M) { return 1 } - clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, - "--query-timeout", "100") // Start vtgate err = clusterInstance.StartVtgate() if err != nil { diff --git a/go/test/endtoend/vtgate/queries/misc/misc_test.go b/go/test/endtoend/vtgate/queries/misc/misc_test.go index 5a85448ce01..77764de7c14 100644 --- a/go/test/endtoend/vtgate/queries/misc/misc_test.go +++ b/go/test/endtoend/vtgate/queries/misc/misc_test.go @@ -24,7 +24,6 @@ import ( "testing" _ "github.com/go-sql-driver/mysql" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -37,7 +36,7 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { require.NoError(t, err) deleteAll := func() { - tables := []string{"t1"} + tables := []string{"t1", "uks.unsharded"} for _, table := range tables { _, _ = mcmp.ExecAndIgnore("delete from " + table) } @@ -213,10 +212,12 @@ func TestHighNumberOfParams(t *testing.T) { // connect to the vitess cluster db, err := sql.Open("mysql", fmt.Sprintf("@tcp(%s:%v)/%s", vtParams.Host, vtParams.Port, vtParams.DbName)) require.NoError(t, err) + defer db.Close() // run the query - r, err := db.Query(fmt.Sprintf("SELECT /*vt+ QUERY_TIMEOUT_MS=10000 */ id1 FROM t1 WHERE id1 in (%s) ORDER BY id1 ASC", strings.Join(params, ", ")), vals...) + r, err := db.Query(fmt.Sprintf("SELECT id1 FROM t1 WHERE id1 in (%s) ORDER BY id1 ASC", strings.Join(params, ", ")), vals...) require.NoError(t, err) + defer r.Close() // check the results we got, we should get 5 rows with each: 0, 1, 2, 3, 4 // count is the row number we are currently visiting, also correspond to the @@ -288,8 +289,8 @@ func TestPrepareStatements(t *testing.T) { assert.ErrorContains(t, err, "VT09011: Unknown prepared statement handler (prep_art) given to DEALLOCATE PREPARE") } +// TestBuggyOuterJoin validates inconsistencies around outer joins, adding these tests to stop regressions. func TestBuggyOuterJoin(t *testing.T) { - // We found a couple of inconsistencies around outer joins, adding these tests to stop regressions mcmp, closer := start(t) defer closer() @@ -297,3 +298,26 @@ func TestBuggyOuterJoin(t *testing.T) { mcmp.Exec("select t1.id1, t2.id1 from t1 left join t1 as t2 on t2.id1 = t2.id2") } + +func TestLeftJoinUsingUnsharded(t *testing.T) { + mcmp, closer := start(t) + defer closer() + + utils.Exec(t, mcmp.VtConn, "insert into uks.unsharded(id1) values (1),(2),(3),(4),(5)") + utils.Exec(t, mcmp.VtConn, "select * from uks.unsharded as A left join uks.unsharded as B using(id1)") +} + +// TestAnalyze executes different analyze statement and validates that they run successfully. +func TestAnalyze(t *testing.T) { + mcmp, closer := start(t) + defer closer() + + for _, workload := range []string{"olap", "oltp"} { + t.Run(workload, func(t *testing.T) { + utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = %s", workload)) + utils.Exec(t, mcmp.VtConn, "analyze table t1") + utils.Exec(t, mcmp.VtConn, "analyze table uks.unsharded") + utils.Exec(t, mcmp.VtConn, "analyze table mysql.user") + }) + } +} diff --git a/go/test/endtoend/vtgate/queries/normalize/normalize_test.go b/go/test/endtoend/vtgate/queries/normalize/normalize_test.go index 4fa4313e76c..52e30accf03 100644 --- a/go/test/endtoend/vtgate/queries/normalize/normalize_test.go +++ b/go/test/endtoend/vtgate/queries/normalize/normalize_test.go @@ -25,11 +25,11 @@ import ( "testing" "time" - "vitess.io/vitess/go/test/endtoend/utils" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/test/endtoend/utils" + "vitess.io/vitess/go/mysql" ) @@ -46,38 +46,24 @@ func TestNormalizeAllFields(t *testing.T) { assert.Equal(t, 1, len(qr.Rows), "wrong number of table rows, expected 1 but had %d. Results: %v", len(qr.Rows), qr.Rows) // Now need to figure out the best way to check the normalized query in the planner cache... - results, err := getPlanCache(fmt.Sprintf("%s:%d", vtParams.Host, clusterInstance.VtgateProcess.Port)) - require.Nil(t, err) - found := false - for _, record := range results { - key := record["Key"].(string) - if key == normalizedInsertQuery { - found = true - break - } - } - assert.Truef(t, found, "correctly normalized record not found in planner cache %v", results) + results := getPlanCache(t, fmt.Sprintf("%s:%d", vtParams.Host, clusterInstance.VtgateProcess.Port)) + assert.Contains(t, results, normalizedInsertQuery) } -func getPlanCache(vtgateHostPort string) ([]map[string]any, error) { - var results []map[string]any +func getPlanCache(t *testing.T, vtgateHostPort string) map[string]any { + var results map[string]any client := http.Client{ Timeout: 10 * time.Second, } resp, err := client.Get(fmt.Sprintf("http://%s/debug/query_plans", vtgateHostPort)) - if err != nil { - return results, err - } + require.NoError(t, err) defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) - if err != nil { - return results, err - } + require.NoError(t, err) err = json.Unmarshal(body, &results) - if err != nil { - return results, err - } + require.NoErrorf(t, err, "failed to unmarshal results. contents:\n%s\n\n", body) - return results, nil + return results } diff --git a/go/test/endtoend/vtgate/queries/orderby/main_test.go b/go/test/endtoend/vtgate/queries/orderby/main_test.go index c073e615c1f..9f18377ee3f 100644 --- a/go/test/endtoend/vtgate/queries/orderby/main_test.go +++ b/go/test/endtoend/vtgate/queries/orderby/main_test.go @@ -64,7 +64,7 @@ func TestMain(m *testing.M) { VSchema: vschema, } clusterInstance.VtGateExtraArgs = []string{"--schema_change_signal"} - clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal", "--queryserver-config-schema-change-signal-interval", "0.1"} + clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal"} err = clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 0, false) if err != nil { return 1 diff --git a/go/test/endtoend/vtgate/queries/random/main_test.go b/go/test/endtoend/vtgate/queries/random/main_test.go new file mode 100644 index 00000000000..e3256f60796 --- /dev/null +++ b/go/test/endtoend/vtgate/queries/random/main_test.go @@ -0,0 +1,94 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package random + +import ( + _ "embed" + "flag" + "fmt" + "os" + "testing" + + "vitess.io/vitess/go/test/endtoend/utils" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/cluster" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + mysqlParams mysql.ConnParams + keyspaceName = "ks_random" + cell = "test_random" + + //go:embed schema.sql + schemaSQL string + + //go:embed vschema.json + vschema string +) + +func TestMain(m *testing.M) { + defer cluster.PanicHandler(nil) + flag.Parse() + + exitCode := func() int { + clusterInstance = cluster.NewCluster(cell, "localhost") + defer clusterInstance.Teardown() + + // Start topo server + err := clusterInstance.StartTopo() + if err != nil { + return 1 + } + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: keyspaceName, + SchemaSQL: schemaSQL, + VSchema: vschema, + } + clusterInstance.VtGateExtraArgs = []string{"--schema_change_signal"} + clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal"} + err = clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 0, false) + if err != nil { + return 1 + } + + clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, "--enable_system_settings=true") + // Start vtgate + err = clusterInstance.StartVtgate() + if err != nil { + return 1 + } + + vtParams = clusterInstance.GetVTParams(keyspaceName) + + // create mysql instance and connection parameters + conn, closer, err := utils.NewMySQL(clusterInstance, keyspaceName, schemaSQL) + if err != nil { + fmt.Println(err) + return 1 + } + defer closer() + mysqlParams = conn + + return m.Run() + }() + os.Exit(exitCode) +} diff --git a/go/test/endtoend/vtgate/queries/random/query_gen.go b/go/test/endtoend/vtgate/queries/random/query_gen.go new file mode 100644 index 00000000000..3f8fccb05bb --- /dev/null +++ b/go/test/endtoend/vtgate/queries/random/query_gen.go @@ -0,0 +1,639 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package random + +import ( + "fmt" + "math/rand" + "slices" + + "vitess.io/vitess/go/slice" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/sqlparser" +) + +// this file contains the structs and functions to generate random queries + +// to test only a particular type of query, delete the corresponding testFailingQueries clause +// there should be a comment indicating the type of query being disabled +// if true then known failing query types are still generated by randomQuery() +const testFailingQueries = false + +type ( + // selectGenerator generates select statements + selectGenerator struct { + r *rand.Rand + genConfig sqlparser.ExprGeneratorConfig + maxTables int + maxAggrs int + maxGBs int + schemaTables []tableT + sel *sqlparser.Select + } + + // queryGenerator generates queries, which can either be unions or select statements + queryGenerator struct { + stmt sqlparser.SelectStatement + selGen *selectGenerator + } + + column struct { + name string + // TODO: perhaps remove tableName and always pass columns through a tableT + tableName string + typ string + } + + tableT struct { + // the tableT struct can be used to represent the schema of a table or a derived table + // in the former case tableExpr will be a sqlparser.TableName, in the latter a sqlparser.DerivedTable + // in order to create a query with a derived table, its AST form is retrieved from tableExpr + // once the derived table is aliased, alias is updated + tableExpr sqlparser.SimpleTableExpr + alias string + cols []column + } +) + +var _ sqlparser.ExprGenerator = (*tableT)(nil) +var _ sqlparser.ExprGenerator = (*column)(nil) +var _ sqlparser.QueryGenerator = (*selectGenerator)(nil) +var _ sqlparser.QueryGenerator = (*queryGenerator)(nil) + +func newQueryGenerator(r *rand.Rand, genConfig sqlparser.ExprGeneratorConfig, maxTables, maxAggrs, maxGBs int, schemaTables []tableT) *queryGenerator { + return &queryGenerator{ + selGen: newSelectGenerator(r, genConfig, maxTables, maxAggrs, maxGBs, schemaTables), + } +} + +func newSelectGenerator(r *rand.Rand, genConfig sqlparser.ExprGeneratorConfig, maxTables, maxAggrs, maxGBs int, schemaTables []tableT) *selectGenerator { + if maxTables <= 0 { + log.Fatalf("maxTables must be at least 1, currently %d\n", maxTables) + } + + return &selectGenerator{ + r: r, + genConfig: genConfig, + maxTables: maxTables, + maxAggrs: maxAggrs, + maxGBs: maxGBs, + schemaTables: schemaTables, + sel: &sqlparser.Select{}, + } +} + +// getASTExpr returns the AST representation of a column +func (c *column) getASTExpr() sqlparser.Expr { + return sqlparser.NewColNameWithQualifier(c.name, sqlparser.NewTableName(c.tableName)) +} + +// getName returns the alias if it is nonempty +// if the alias is nonempty and tableExpr is of type sqlparser.TableName, +// then getName returns Name from tableExpr +// otherwise getName returns an empty string +func (t *tableT) getName() string { + if t.alias != "" { + return t.alias + } else if tName, ok := t.tableExpr.(sqlparser.TableName); ok { + return sqlparser.String(tName.Name) + } + + return "" +} + +// setAlias sets the alias for t, as well as setting the tableName for all columns in cols +func (t *tableT) setAlias(newName string) { + t.alias = newName + for i := range t.cols { + t.cols[i].tableName = newName + } +} + +// addColumns adds columns to t, and automatically assigns each column.tableName +// this makes it unnatural to modify tableName +func (t *tableT) addColumns(col ...column) { + for i := range col { + col[i].tableName = t.getName() + t.cols = append(t.cols, col[i]) + } +} + +func (t *tableT) clone() *tableT { + return &tableT{ + tableExpr: sqlparser.CloneSimpleTableExpr(t.tableExpr), + alias: t.alias, + cols: slices.Clone(t.cols), + } +} + +func (c *column) Generate(_ *rand.Rand, genConfig sqlparser.ExprGeneratorConfig) sqlparser.Expr { + if c.typ == genConfig.Type || genConfig.Type == "" { + return c.getASTExpr() + } + + return nil +} + +func (t *tableT) Generate(r *rand.Rand, genConfig sqlparser.ExprGeneratorConfig) sqlparser.Expr { + colsCopy := slices.Clone(t.cols) + + for len(colsCopy) > 0 { + idx := r.Intn(len(colsCopy)) + randCol := colsCopy[idx] + if randCol.typ == genConfig.Type || genConfig.Type == "" { + return randCol.getASTExpr() + } + + // delete randCol from colsCopy + colsCopy[idx] = colsCopy[len(colsCopy)-1] + colsCopy = colsCopy[:len(colsCopy)-1] + } + + return nil +} + +// Generate generates a subquery based on sg +// TODO: currently unused; generate random expressions with union +func (sg *selectGenerator) Generate(r *rand.Rand, genConfig sqlparser.ExprGeneratorConfig) sqlparser.Expr { + var schemaTablesCopy []tableT + for _, tbl := range sg.schemaTables { + schemaTablesCopy = append(schemaTablesCopy, *tbl.clone()) + } + + newSG := newQueryGenerator(r, genConfig, sg.maxTables, sg.maxAggrs, sg.maxGBs, schemaTablesCopy) + newSG.randomQuery() + + return &sqlparser.Subquery{Select: newSG.selGen.sel} +} + +// Generate generates a subquery based on qg +func (qg *queryGenerator) Generate(r *rand.Rand, genConfig sqlparser.ExprGeneratorConfig) sqlparser.Expr { + var schemaTablesCopy []tableT + for _, tbl := range qg.selGen.schemaTables { + schemaTablesCopy = append(schemaTablesCopy, *tbl.clone()) + } + + newQG := newQueryGenerator(r, genConfig, qg.selGen.maxTables, qg.selGen.maxAggrs, qg.selGen.maxGBs, schemaTablesCopy) + newQG.randomQuery() + + return &sqlparser.Subquery{Select: newQG.stmt} +} + +func (sg *selectGenerator) IsQueryGenerator() {} +func (qg *queryGenerator) IsQueryGenerator() {} + +func (qg *queryGenerator) randomQuery() { + if qg.selGen.r.Intn(10) < 1 && testFailingQueries { + qg.createUnion() + } else { + qg.selGen.randomSelect() + qg.stmt = qg.selGen.sel + } +} + +// createUnion creates a simple UNION or UNION ALL; no LIMIT or ORDER BY +func (qg *queryGenerator) createUnion() { + union := &sqlparser.Union{} + + if qg.selGen.r.Intn(2) < 1 { + union.Distinct = true + } + + // specify between 1-4 columns + qg.selGen.genConfig.NumCols = qg.selGen.r.Intn(4) + 1 + + qg.randomQuery() + union.Left = qg.stmt + qg.randomQuery() + union.Right = qg.stmt + + qg.stmt = union +} + +func (sg *selectGenerator) randomSelect() { + // make sure the random expressions can generally not contain aggregates; change appropriately + sg.genConfig = sg.genConfig.CannotAggregateConfig() + + sg.sel = &sqlparser.Select{} + sg.sel.SetComments(sqlparser.Comments{"/*vt+ PLANNER=Gen4 */"}) + + // select distinct (fails with group by bigint) + isDistinct := sg.r.Intn(2) < 1 + if isDistinct { + sg.sel.MakeDistinct() + } + + // create both tables and join at the same time since both occupy the from clause + tables, isJoin := sg.createTablesAndJoin() + + // canAggregate determines if the query will have + // aggregate columns, group by, and having + canAggregate := sg.r.Intn(4) < 3 + + var ( + grouping, aggregates []column + newTable tableT + ) + // TODO: distinct makes vitess think there is grouping on aggregation columns + if canAggregate { + if testFailingQueries || !isDistinct { + // group by + if !sg.genConfig.SingleRow { + grouping = sg.createGroupBy(tables) + } + } + + // having + isHaving := sg.r.Intn(2) < 1 + // TODO: having creates a lot of results mismatched + if isHaving && testFailingQueries { + sg.createHavingPredicates(grouping) + } + + // alias the grouping columns + grouping = sg.aliasGroupingColumns(grouping) + + // aggregation columns + aggregates = sg.createAggregations(tables) + + // add the grouping and aggregation to newTable + newTable.addColumns(grouping...) + newTable.addColumns(aggregates...) + } + + // where + sg.createWherePredicates(tables) + + // add random expression to select + // TODO: random expressions cause a lot of failures + isRandomExpr := sg.r.Intn(2) < 1 && testFailingQueries + + // TODO: selecting a random expression potentially with columns creates + // TODO: only_full_group_by related errors in Vitess + var exprGenerators []sqlparser.ExprGenerator + if canAggregate && testFailingQueries { + exprGenerators = slice.Map(tables, func(t tableT) sqlparser.ExprGenerator { return &t }) + // add scalar subqueries + if sg.r.Intn(10) < 1 { + exprGenerators = append(exprGenerators, sg) + } + } + + // make sure we have at least one select expression + for isRandomExpr || len(sg.sel.SelectExprs) == 0 { + // TODO: if the random expression is an int literal, + // TODO: and if the query is (potentially) an aggregate query, + // TODO: then we must group by the random expression, + // TODO: but we cannot do this for int literals, + // TODO: so we loop until we get a non-int-literal random expression + // TODO: this is necessary because grouping by the alias (crandom0) currently fails on vitess + randomExpr := sg.getRandomExpr(exprGenerators...) + literal, ok := randomExpr.(*sqlparser.Literal) + isIntLiteral := ok && literal.Type == sqlparser.IntVal + if isIntLiteral && canAggregate { + continue + } + + // TODO: select distinct [literal] fails + sg.sel.Distinct = false + + // alias randomly + col := sg.randomlyAlias(randomExpr, "crandom0") + newTable.addColumns(col) + + // make sure to add the random expression to group by for only_full_group_by + if canAggregate { + sg.sel.AddGroupBy(randomExpr) + } + + break + } + + // can add both aggregate and grouping columns to order by + // TODO: order fails with distinct and outer joins + isOrdered := sg.r.Intn(2) < 1 && (!isDistinct || testFailingQueries) && (!isJoin || testFailingQueries) + if isOrdered || (!canAggregate && sg.genConfig.SingleRow) /* TODO: might be redundant */ { + sg.createOrderBy() + } + + // only add a limit if there is an ordering + // TODO: limit fails a lot + isLimit := sg.r.Intn(2) < 1 && len(sg.sel.OrderBy) > 0 && testFailingQueries + if isLimit || (!canAggregate && sg.genConfig.SingleRow) /* TODO: might be redundant */ { + sg.createLimit() + } + + // this makes sure the query generated has the correct number of columns (sg.selGen.genConfig.numCols) + newTable = sg.matchNumCols(tables, newTable, canAggregate) + + // add new table to schemaTables + newTable.tableExpr = sqlparser.NewDerivedTable(false, sg.sel) + sg.schemaTables = append(sg.schemaTables, newTable) + + // derived tables (partially unsupported) + if sg.r.Intn(10) < 1 { + sg.randomSelect() + } +} + +func (sg *selectGenerator) createTablesAndJoin() ([]tableT, bool) { + var tables []tableT + // add at least one of original emp/dept tables + tables = append(tables, sg.schemaTables[sg.r.Intn(2)]) + + tables[0].setAlias("tbl0") + sg.sel.From = append(sg.sel.From, newAliasedTable(tables[0], "tbl0")) + + numTables := sg.r.Intn(sg.maxTables) + for i := 0; i < numTables; i++ { + tables = append(tables, randomEl(sg.r, sg.schemaTables)) + alias := fmt.Sprintf("tbl%d", i+1) + sg.sel.From = append(sg.sel.From, newAliasedTable(tables[i+1], alias)) + tables[i+1].setAlias(alias) + } + + // TODO: outer joins produce results mismatched + isJoin := sg.r.Intn(2) < 1 && testFailingQueries + if isJoin { + // TODO: do nested joins + newTable := randomEl(sg.r, sg.schemaTables) + alias := fmt.Sprintf("tbl%d", numTables+1) + newTable.setAlias(alias) + tables = append(tables, newTable) + + sg.createJoin(tables) + } + + return tables, isJoin +} + +// creates a left join (without the condition) between the last table in sel and newTable +// tables should have one more table than sel +func (sg *selectGenerator) createJoin(tables []tableT) { + n := len(sg.sel.From) + if len(tables) != n+1 { + log.Fatalf("sel has %d tables and tables has %d tables", len(sg.sel.From), n) + } + + joinPredicate := sqlparser.AndExpressions(sg.createJoinPredicates(tables)...) + joinCondition := sqlparser.NewJoinCondition(joinPredicate, nil) + newTable := newAliasedTable(tables[n], fmt.Sprintf("tbl%d", n)) + sg.sel.From[n-1] = sqlparser.NewJoinTableExpr(sg.sel.From[n-1], getRandomJoinType(sg.r), newTable, joinCondition) +} + +// returns 1-3 random expressions based on the last two elements of tables +// tables should have at least two elements +func (sg *selectGenerator) createJoinPredicates(tables []tableT) sqlparser.Exprs { + if len(tables) < 2 { + log.Fatalf("tables has %d elements, needs at least 2", len(tables)) + } + + exprGenerators := []sqlparser.ExprGenerator{&tables[len(tables)-2], &tables[len(tables)-1]} + // add scalar subqueries + // TODO: subqueries fail + if sg.r.Intn(10) < 1 && testFailingQueries { + exprGenerators = append(exprGenerators, sg) + } + + return sg.createRandomExprs(1, 3, exprGenerators...) +} + +// returns the grouping columns as []column +func (sg *selectGenerator) createGroupBy(tables []tableT) (grouping []column) { + if sg.maxGBs <= 0 { + return + } + numGBs := sg.r.Intn(sg.maxGBs + 1) + for i := 0; i < numGBs; i++ { + tblIdx := sg.r.Intn(len(tables)) + col := randomEl(sg.r, tables[tblIdx].cols) + // TODO: grouping by a date column sometimes errors + if col.typ == "date" && !testFailingQueries { + continue + } + sg.sel.GroupBy = append(sg.sel.GroupBy, col.getASTExpr()) + + // add to select + if sg.r.Intn(2) < 1 { + sg.sel.SelectExprs = append(sg.sel.SelectExprs, newAliasedColumn(col, "")) + grouping = append(grouping, col) + } + } + + return +} + +// aliasGroupingColumns randomly aliases the grouping columns in the SelectExprs +func (sg *selectGenerator) aliasGroupingColumns(grouping []column) []column { + if len(grouping) != len(sg.sel.SelectExprs) { + log.Fatalf("grouping (length: %d) and sg.sel.SelectExprs (length: %d) should have the same length at this point", len(grouping), len(sg.sel.SelectExprs)) + } + + for i := range grouping { + if sg.r.Intn(2) < 1 { + if aliasedExpr, ok := sg.sel.SelectExprs[i].(*sqlparser.AliasedExpr); ok { + alias := fmt.Sprintf("cgroup%d", i) + aliasedExpr.SetAlias(alias) + grouping[i].name = alias + } + } + } + + return grouping +} + +// returns the aggregation columns as three types: sqlparser.SelectExprs, []column +func (sg *selectGenerator) createAggregations(tables []tableT) (aggregates []column) { + exprGenerators := slice.Map(tables, func(t tableT) sqlparser.ExprGenerator { return &t }) + // add scalar subqueries + // TODO: subqueries fail + if sg.r.Intn(10) < 1 && testFailingQueries { + exprGenerators = append(exprGenerators, sg) + } + + sg.genConfig = sg.genConfig.IsAggregateConfig() + aggrExprs := sg.createRandomExprs(0, sg.maxAggrs, exprGenerators...) + sg.genConfig = sg.genConfig.CannotAggregateConfig() + + for i, expr := range aggrExprs { + col := sg.randomlyAlias(expr, fmt.Sprintf("caggr%d", i)) + aggregates = append(aggregates, col) + } + + return +} + +// orders on all grouping expressions and on random SelectExprs +func (sg *selectGenerator) createOrderBy() { + // always order on grouping expressions + for _, expr := range sg.sel.GroupBy { + sg.sel.OrderBy = append(sg.sel.OrderBy, sqlparser.NewOrder(expr, getRandomOrderDirection(sg.r))) + } + + // randomly order on SelectExprs + for _, selExpr := range sg.sel.SelectExprs { + if aliasedExpr, ok := selExpr.(*sqlparser.AliasedExpr); ok && sg.r.Intn(2) < 1 { + literal, ok := aliasedExpr.Expr.(*sqlparser.Literal) + isIntLiteral := ok && literal.Type == sqlparser.IntVal + if isIntLiteral { + continue + } + sg.sel.OrderBy = append(sg.sel.OrderBy, sqlparser.NewOrder(aliasedExpr.Expr, getRandomOrderDirection(sg.r))) + } + } +} + +// returns 0-2 random expressions based on tables +func (sg *selectGenerator) createWherePredicates(tables []tableT) { + exprGenerators := slice.Map(tables, func(t tableT) sqlparser.ExprGenerator { return &t }) + // add scalar subqueries + // TODO: subqueries fail + if sg.r.Intn(10) < 1 && testFailingQueries { + exprGenerators = append(exprGenerators, sg) + } + + predicates := sg.createRandomExprs(0, 2, exprGenerators...) + sg.sel.AddWhere(sqlparser.AndExpressions(predicates...)) +} + +// creates predicates for the having clause comparing a column to a random expression +func (sg *selectGenerator) createHavingPredicates(grouping []column) { + exprGenerators := slice.Map(grouping, func(c column) sqlparser.ExprGenerator { return &c }) + // add scalar subqueries + // TODO: subqueries fail + if sg.r.Intn(10) < 1 && testFailingQueries { + exprGenerators = append(exprGenerators, sg) + } + + sg.genConfig = sg.genConfig.CanAggregateConfig() + predicates := sg.createRandomExprs(0, 2, exprGenerators...) + sg.genConfig = sg.genConfig.CannotAggregateConfig() + + sg.sel.AddHaving(sqlparser.AndExpressions(predicates...)) +} + +// returns between minExprs and maxExprs random expressions using generators +func (sg *selectGenerator) createRandomExprs(minExprs, maxExprs int, generators ...sqlparser.ExprGenerator) (predicates sqlparser.Exprs) { + if minExprs > maxExprs { + log.Fatalf("minExprs is greater than maxExprs; minExprs: %d, maxExprs: %d\n", minExprs, maxExprs) + } else if maxExprs <= 0 { + return + } + numPredicates := sg.r.Intn(maxExprs-minExprs+1) + minExprs + for i := 0; i < numPredicates; i++ { + predicates = append(predicates, sg.getRandomExpr(generators...)) + } + + return +} + +// getRandomExpr returns a random expression +func (sg *selectGenerator) getRandomExpr(generators ...sqlparser.ExprGenerator) sqlparser.Expr { + var g *sqlparser.Generator + if generators == nil { + g = sqlparser.NewGenerator(sg.r, 2) + } else { + g = sqlparser.NewGenerator(sg.r, 2, generators...) + } + + return g.Expression(sg.genConfig.SingleRowConfig().SetNumCols(1)) +} + +// creates sel.Limit +func (sg *selectGenerator) createLimit() { + if sg.genConfig.SingleRow { + sg.sel.Limit = sqlparser.NewLimitWithoutOffset(1) + return + } + + limitNum := sg.r.Intn(10) + if sg.r.Intn(2) < 1 { + offset := sg.r.Intn(10) + sg.sel.Limit = sqlparser.NewLimit(offset, limitNum) + } else { + sg.sel.Limit = sqlparser.NewLimitWithoutOffset(limitNum) + } +} + +// randomlyAlias randomly aliases expr with alias alias, adds it to sel.SelectExprs, and returns the column created +func (sg *selectGenerator) randomlyAlias(expr sqlparser.Expr, alias string) column { + var col column + if sg.r.Intn(2) < 1 { + alias = "" + col.name = sqlparser.String(expr) + } else { + col.name = alias + } + sg.sel.SelectExprs = append(sg.sel.SelectExprs, sqlparser.NewAliasedExpr(expr, alias)) + + return col +} + +// matchNumCols makes sure sg.sel.SelectExprs and newTable both have the same number of cols: sg.genConfig.NumCols +func (sg *selectGenerator) matchNumCols(tables []tableT, newTable tableT, canAggregate bool) tableT { + // remove SelectExprs and newTable.cols randomly until there are sg.genConfig.NumCols amount + for len(sg.sel.SelectExprs) > sg.genConfig.NumCols && sg.genConfig.NumCols > 0 { + // select a random index and remove it from SelectExprs and newTable + idx := sg.r.Intn(len(sg.sel.SelectExprs)) + + sg.sel.SelectExprs[idx] = sg.sel.SelectExprs[len(sg.sel.SelectExprs)-1] + sg.sel.SelectExprs = sg.sel.SelectExprs[:len(sg.sel.SelectExprs)-1] + + newTable.cols[idx] = newTable.cols[len(newTable.cols)-1] + newTable.cols = newTable.cols[:len(newTable.cols)-1] + } + + // alternatively, add random expressions until there are sg.genConfig.NumCols amount + if sg.genConfig.NumCols > len(sg.sel.SelectExprs) { + diff := sg.genConfig.NumCols - len(sg.sel.SelectExprs) + exprs := sg.createRandomExprs(diff, diff, + slice.Map(tables, func(t tableT) sqlparser.ExprGenerator { return &t })...) + + for i, expr := range exprs { + col := sg.randomlyAlias(expr, fmt.Sprintf("crandom%d", i+1)) + newTable.addColumns(col) + + if canAggregate { + sg.sel.AddGroupBy(expr) + } + } + } + + return newTable +} + +func getRandomOrderDirection(r *rand.Rand) sqlparser.OrderDirection { + // asc, desc + return randomEl(r, []sqlparser.OrderDirection{0, 1}) +} + +func getRandomJoinType(r *rand.Rand) sqlparser.JoinType { + // normal, straight, left, right, natural, natural left, natural right + return randomEl(r, []sqlparser.JoinType{0, 1, 2, 3, 4, 5, 6}) +} + +func randomEl[K any](r *rand.Rand, in []K) K { + return in[r.Intn(len(in))] +} + +func newAliasedTable(tbl tableT, alias string) *sqlparser.AliasedTableExpr { + return sqlparser.NewAliasedTableExpr(tbl.tableExpr, alias) +} + +func newAliasedColumn(col column, alias string) *sqlparser.AliasedExpr { + return sqlparser.NewAliasedExpr(col.getASTExpr(), alias) +} diff --git a/go/test/endtoend/vtgate/queries/random/query_gen_test.go b/go/test/endtoend/vtgate/queries/random/query_gen_test.go new file mode 100644 index 00000000000..fe8aa6f6492 --- /dev/null +++ b/go/test/endtoend/vtgate/queries/random/query_gen_test.go @@ -0,0 +1,62 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package random + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/sqlparser" +) + +// TestSeed makes sure that the seed is deterministic +func TestSeed(t *testing.T) { + // specify the schema (that is defined in schema.sql) + schemaTables := []tableT{ + {tableExpr: sqlparser.NewTableName("emp")}, + {tableExpr: sqlparser.NewTableName("dept")}, + } + schemaTables[0].addColumns([]column{ + {name: "empno", typ: "bigint"}, + {name: "ename", typ: "varchar"}, + {name: "job", typ: "varchar"}, + {name: "mgr", typ: "bigint"}, + {name: "hiredate", typ: "date"}, + {name: "sal", typ: "bigint"}, + {name: "comm", typ: "bigint"}, + {name: "deptno", typ: "bigint"}, + }...) + schemaTables[1].addColumns([]column{ + {name: "deptno", typ: "bigint"}, + {name: "dname", typ: "varchar"}, + {name: "loc", typ: "varchar"}, + }...) + + seed := int64(1689757943775102000) + genConfig := sqlparser.NewExprGeneratorConfig(sqlparser.CannotAggregate, "", 0, false) + qg := newQueryGenerator(rand.New(rand.NewSource(seed)), genConfig, 2, 2, 2, schemaTables) + qg.randomQuery() + query1 := sqlparser.String(qg.stmt) + qg = newQueryGenerator(rand.New(rand.NewSource(seed)), genConfig, 2, 2, 2, schemaTables) + qg.randomQuery() + query2 := sqlparser.String(qg.stmt) + fmt.Println(query1) + require.Equal(t, query1, query2) +} diff --git a/go/test/endtoend/vtgate/queries/random/random_expr_test.go b/go/test/endtoend/vtgate/queries/random/random_expr_test.go new file mode 100644 index 00000000000..450169a8d9f --- /dev/null +++ b/go/test/endtoend/vtgate/queries/random/random_expr_test.go @@ -0,0 +1,59 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package random + +import ( + "math/rand" + "testing" + "time" + + "vitess.io/vitess/go/slice" + "vitess.io/vitess/go/vt/sqlparser" +) + +// This test tests that generating random expressions with a schema does not panic +func TestRandomExprWithTables(t *testing.T) { + // specify the schema (that is defined in schema.sql) + schemaTables := []tableT{ + {tableExpr: sqlparser.NewTableName("emp")}, + {tableExpr: sqlparser.NewTableName("dept")}, + } + schemaTables[0].addColumns([]column{ + {name: "empno", typ: "bigint"}, + {name: "ename", typ: "varchar"}, + {name: "job", typ: "varchar"}, + {name: "mgr", typ: "bigint"}, + {name: "hiredate", typ: "date"}, + {name: "sal", typ: "bigint"}, + {name: "comm", typ: "bigint"}, + {name: "deptno", typ: "bigint"}, + }...) + schemaTables[1].addColumns([]column{ + {name: "deptno", typ: "bigint"}, + {name: "dname", typ: "varchar"}, + {name: "loc", typ: "varchar"}, + }...) + + for i := 0; i < 100; i++ { + + seed := time.Now().UnixNano() + r := rand.New(rand.NewSource(seed)) + genConfig := sqlparser.NewExprGeneratorConfig(sqlparser.CanAggregate, "", 0, false) + g := sqlparser.NewGenerator(r, 3, slice.Map(schemaTables, func(t tableT) sqlparser.ExprGenerator { return &t })...) + g.Expression(genConfig) + } +} diff --git a/go/test/endtoend/vtgate/queries/random/random_test.go b/go/test/endtoend/vtgate/queries/random/random_test.go new file mode 100644 index 00000000000..7b0ab93c165 --- /dev/null +++ b/go/test/endtoend/vtgate/queries/random/random_test.go @@ -0,0 +1,371 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package random + +import ( + "fmt" + "math/rand" + "strings" + "testing" + "time" + + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/vt/sqlparser" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" +) + +// this test uses the AST defined in the sqlparser package to randomly generate queries + +// if true then execution will always stop on a "must fix" error: a results mismatched or EOF +const stopOnMustFixError = false + +func start(t *testing.T) (utils.MySQLCompare, func()) { + mcmp, err := utils.NewMySQLCompare(t, vtParams, mysqlParams) + require.NoError(t, err) + + deleteAll := func() { + _, _ = utils.ExecAllowError(t, mcmp.VtConn, "set workload = oltp") + + tables := []string{"emp", "dept"} + for _, table := range tables { + _, _ = mcmp.ExecAndIgnore("delete from " + table) + } + } + + deleteAll() + + // disable only_full_group_by + // mcmp.Exec("set sql_mode=''") + + // insert data + mcmp.Exec("INSERT INTO emp(empno, ename, job, mgr, hiredate, sal, comm, deptno) VALUES (7369,'SMITH','CLERK',7902,'1980-12-17',800,NULL,20), (7499,'ALLEN','SALESMAN',7698,'1981-02-20',1600,300,30), (7521,'WARD','SALESMAN',7698,'1981-02-22',1250,500,30), (7566,'JONES','MANAGER',7839,'1981-04-02',2975,NULL,20), (7654,'MARTIN','SALESMAN',7698,'1981-09-28',1250,1400,30), (7698,'BLAKE','MANAGER',7839,'1981-05-01',2850,NULL,30), (7782,'CLARK','MANAGER',7839,'1981-06-09',2450,NULL,10), (7788,'SCOTT','ANALYST',7566,'1982-12-09',3000,NULL,20), (7839,'KING','PRESIDENT',NULL,'1981-11-17',5000,NULL,10), (7844,'TURNER','SALESMAN',7698,'1981-09-08',1500,0,30), (7876,'ADAMS','CLERK',7788,'1983-01-12',1100,NULL,20), (7900,'JAMES','CLERK',7698,'1981-12-03',950,NULL,30), (7902,'FORD','ANALYST',7566,'1981-12-03',3000,NULL,20), (7934,'MILLER','CLERK',7782,'1982-01-23',1300,NULL,10)") + mcmp.Exec("INSERT INTO dept(deptno, dname, loc) VALUES ('10','ACCOUNTING','NEW YORK'), ('20','RESEARCH','DALLAS'), ('30','SALES','CHICAGO'), ('40','OPERATIONS','BOSTON')") + + return mcmp, func() { + deleteAll() + mcmp.Close() + cluster.PanicHandler(t) + } +} + +func helperTest(t *testing.T, query string) { + t.Helper() + t.Run(query, func(t *testing.T) { + mcmp, closer := start(t) + defer closer() + + result, err := mcmp.ExecAllowAndCompareError(query) + fmt.Println(result) + fmt.Println(err) + }) +} + +func TestMustFix(t *testing.T) { + t.Skip("Skip CI") + + require.NoError(t, utils.WaitForAuthoritative(t, keyspaceName, "emp", clusterInstance.VtgateProcess.ReadVSchema)) + require.NoError(t, utils.WaitForAuthoritative(t, keyspaceName, "dept", clusterInstance.VtgateProcess.ReadVSchema)) + + // results mismatched + helperTest(t, "select /*vt+ PLANNER=Gen4 */ distinct case count(*) when 0 then -0 end from emp as tbl0, emp as tbl1 where 0") + + // results mismatched (maybe derived tables) + helperTest(t, "select /*vt+ PLANNER=Gen4 */ 0 as crandom0 from dept as tbl0, (select /*vt+ PLANNER=Gen4 */ distinct count(*) from emp as tbl1 where 0) as tbl1") + + // results mismatched + helperTest(t, "select /*vt+ PLANNER=Gen4 */ distinct case count(distinct true) when 'b' then 't' end from emp as tbl1 where 's'") + + // results mismatched + helperTest(t, "select /*vt+ PLANNER=Gen4 */ distinct sum(distinct tbl1.deptno) from dept as tbl0, emp as tbl1") + + // mismatched number of columns + helperTest(t, "select /*vt+ PLANNER=Gen4 */ count(*) + 0 from emp as tbl0 order by count(*) desc") + + // results mismatched (mismatched types) + helperTest(t, "select /*vt+ PLANNER=Gen4 */ count(0 >> 0), sum(distinct tbl2.empno) from emp as tbl0 left join emp as tbl2 on -32") + + // results mismatched (decimals off by a little; evalengine problem) + helperTest(t, "select /*vt+ PLANNER=Gen4 */ sum(case false when true then tbl1.deptno else -154 / 132 end) as caggr1 from emp as tbl0, dept as tbl1") + + // EOF + helperTest(t, "select /*vt+ PLANNER=Gen4 */ tbl1.dname as cgroup0, tbl1.dname as cgroup1, tbl1.deptno as crandom0 from dept as tbl0, dept as tbl1 group by tbl1.dname, tbl1.deptno order by tbl1.deptno desc") + + // results mismatched + // limit >= 9 works + helperTest(t, "select /*vt+ PLANNER=Gen4 */ tbl0.ename as cgroup1 from emp as tbl0 group by tbl0.job, tbl0.ename having sum(tbl0.mgr) order by tbl0.job desc, tbl0.ename asc limit 8") + + // results mismatched + helperTest(t, "select /*vt+ PLANNER=Gen4 */ distinct count(*) as caggr1 from emp as tbl1 group by tbl1.sal having max(0) != true") + + // results mismatched + helperTest(t, "select /*vt+ PLANNER=Gen4 */ distinct 0 as caggr0 from dept as tbl0, dept as tbl1 group by tbl1.deptno having max(0) <= 0") + + // results mismatched + helperTest(t, "select /*vt+ PLANNER=Gen4 */ min(0) as caggr0 from dept as tbl0, emp as tbl1 where case when false then tbl0.dname end group by tbl1.comm") + + // results mismatched + helperTest(t, "select /*vt+ PLANNER=Gen4 */ count(*) as caggr0, 0 as crandom0 from dept as tbl0, emp as tbl1 where 0") + + // results mismatched + helperTest(t, "select /*vt+ PLANNER=Gen4 */ count(*) as caggr0, 0 as crandom0 from dept as tbl0, emp as tbl1 where 'o'") + + // similar to previous two + // results mismatched + helperTest(t, "select /*vt+ PLANNER=Gen4 */ distinct 'o' as crandom0 from dept as tbl0, emp as tbl1 where 0 having count(*) = count(*)") + + // results mismatched (group by + right join) + // left instead of right works + // swapping tables and predicates and changing to left fails + helperTest(t, "select /*vt+ PLANNER=Gen4 */ 0 from dept as tbl0 right join emp as tbl1 on tbl0.deptno = tbl1.empno and tbl0.deptno = tbl1.deptno group by tbl0.deptno") + + // results mismatched (count + right join) + // left instead of right works + // swapping tables and predicates and changing to left fails + helperTest(t, "select /*vt+ PLANNER=Gen4 */ count(tbl1.comm) from emp as tbl1 right join emp as tbl2 on tbl1.mgr = tbl2.sal") + + // Passes with different errors + // vitess error: EOF + // mysql error: Operand should contain 1 column(s) + helperTest(t, "select /*vt+ PLANNER=Gen4 */ 8 < -31 xor (-29, sum((tbl0.deptno, 'wren', 'ostrich')), max(distinct (tbl0.dname, -15, -8))) in ((sum(distinct (tbl0.dname, 'bengal', -10)), 'ant', true)) as caggr0 from dept as tbl0 where tbl0.deptno * (77 - 61)") + + // EOF + helperTest(t, "select /*vt+ PLANNER=Gen4 */ tbl1.deptno as cgroup0, tbl1.loc as cgroup1, count(distinct tbl1.loc) as caggr1, tbl1.loc as crandom0 from dept as tbl0, dept as tbl1 group by tbl1.deptno, tbl1.loc") + + // EOF + helperTest(t, "select /*vt+ PLANNER=Gen4 */ count(*) from dept as tbl0, (select count(*) from emp as tbl0, emp as tbl1 limit 18) as tbl1") +} + +func TestKnownFailures(t *testing.T) { + t.Skip("Skip CI") + + require.NoError(t, utils.WaitForAuthoritative(t, keyspaceName, "emp", clusterInstance.VtgateProcess.ReadVSchema)) + require.NoError(t, utils.WaitForAuthoritative(t, keyspaceName, "dept", clusterInstance.VtgateProcess.ReadVSchema)) + + // logs more stuff + //clusterInstance.EnableGeneralLog() + + // column 'tbl1.`not exists (select 1 from dual)`' not found + helperTest(t, "select /*vt+ PLANNER=Gen4 */ tbl1.`not exists (select 1 from dual)`, count(*) from dept as tbl0, (select /*vt+ PLANNER=Gen4 */ not exists (select 1 from dual) from dept as tbl0 where tbl0.dname) as tbl1 group by tbl0.deptno, tbl1.`not exists (select 1 from dual)`") + + // VT13001: [BUG] failed to find the corresponding column + helperTest(t, "select /*vt+ PLANNER=Gen4 */ tbl1.dname as cgroup0, tbl1.dname as cgroup1 from dept as tbl0, dept as tbl1 group by tbl1.dname, tbl1.deptno order by tbl1.deptno desc") + + // vitess error: + // mysql error: Operand should contain 1 column(s) + helperTest(t, "select (count('sheepdog') ^ (-71 % sum(emp.mgr) ^ count('koi')) and count(*), 'fly') from emp, dept") + + // rhs of an In operation should be a tuple + helperTest(t, "select /*vt+ PLANNER=Gen4 */ (case when true then min(distinct tbl1.job) else 'bee' end, 'molly') not in (('dane', 0)) as caggr1 from emp as tbl0, emp as tbl1") + + // VT13001: [BUG] in scatter query: complex ORDER BY expression: :vtg1 /* VARCHAR */ + helperTest(t, "select /*vt+ PLANNER=Gen4 */ tbl1.job as cgroup0, sum(distinct 'mudfish'), tbl1.job as crandom0 from emp as tbl0, emp as tbl1 group by tbl1.job order by tbl1.job asc limit 8, 1") + + // VT13001: [BUG] column should not be pushed to projection while doing a column lookup + helperTest(t, "select /*vt+ PLANNER=Gen4 */ -26 in (tbl2.mgr, -8, tbl0.deptno) as crandom0 from dept as tbl0, emp as tbl1 left join emp as tbl2 on tbl2.ename") + + // unsupported: min/max on types that are not comparable is not supported + helperTest(t, "select /*vt+ PLANNER=Gen4 */ max(case true when false then 'gnu' when true then 'meerkat' end) as caggr0 from dept as tbl0") + + // vttablet: rpc error: code = InvalidArgument desc = BIGINT UNSIGNED value is out of range in '(-(273) + (-(15) & 124))' + helperTest(t, "select /*vt+ PLANNER=Gen4 */ -273 + (-15 & 124) as crandom0 from emp as tbl0, emp as tbl1 where tbl1.sal >= tbl1.mgr") + + // vitess error: cannot compare strings, collation is unknown or unsupported (collation ID: 0) + helperTest(t, "select /*vt+ PLANNER=Gen4 */ max(tbl1.dname) as caggr1 from dept as tbl0, dept as tbl1 group by tbl1.dname order by tbl1.dname asc") + + // vitess error: + // mysql error: Incorrect DATE value: 'tuna' + helperTest(t, "select /*vt+ PLANNER=Gen4 */ min(tbl0.empno) as caggr0 from emp as tbl0 where case 'gator' when false then 314 else 'weevil' end > tbl0.job having min(tbl0.hiredate) <=> 'tuna'") + + // vitess error: + // mysql error: Unknown column 'tbl0.deptno' in 'having clause' + helperTest(t, "select /*vt+ PLANNER=Gen4 */ count(*) as caggr0 from dept as tbl0 having tbl0.deptno") + + // coercion should not try to coerce this value: DATE("1980-12-17") + helperTest(t, "select /*vt+ PLANNER=Gen4 */ distinct tbl1.hiredate as cgroup0, count(tbl1.mgr) as caggr0 from emp as tbl1 group by tbl1.hiredate, tbl1.ename") + + // only_full_group_by enabled + // vitess error: In aggregated query without GROUP BY, expression #1 of SELECT list contains nonaggregated column 'ks_random.tbl0.EMPNO'; this is incompatible with sql_mode=only_full_group_by + helperTest(t, "select /*vt+ PLANNER=Gen4 */ distinct tbl0.empno as cgroup0, count(distinct 56) as caggr0, min('flounder' = 'penguin') as caggr1 from emp as tbl0, (select /*vt+ PLANNER=Gen4 */ 'manatee' as crandom0 from dept as tbl0 where -26 limit 2) as tbl2 where 'anteater' like 'catfish' is null and -11 group by tbl0.empno order by tbl0.empno asc, count(distinct 56) asc, min('flounder' = 'penguin') desc") + + // only_full_group_by enabled + // vitess error: + // mysql error: In aggregated query without GROUP BY, expression #1 of SELECT list contains nonaggregated column 'ks_random.tbl0.ENAME'; this is incompatible with sql_mode=only_full_group_by + helperTest(t, "select /*vt+ PLANNER=Gen4 */ tbl0.ename, min(tbl0.comm) from emp as tbl0 left join emp as tbl1 on tbl0.empno = tbl1.comm and tbl0.empno = tbl1.empno") + + // only_full_group_by enabled + // vitess error: + // mysql error: Expression #1 of ORDER BY clause is not in SELECT list, references column 'ks_random.tbl2.DNAME' which is not in SELECT list; this is incompatible with DISTINCT + helperTest(t, "select /*vt+ PLANNER=Gen4 */ distinct count(*) as caggr0 from dept as tbl2 group by tbl2.dname order by tbl2.dname asc") + + // vttablet: rpc error: code = NotFound desc = Unknown column 'cgroup0' in 'field list' (errno 1054) (sqlstate 42S22) (CallerID: userData1) + helperTest(t, "select /*vt+ PLANNER=Gen4 */ tbl1.ename as cgroup0, max(tbl0.comm) as caggr0 from emp as tbl0, emp as tbl1 group by cgroup0") + + // vttablet: rpc error: code = NotFound desc = Unknown column '347' in 'group statement' + helperTest(t, "select /*vt+ PLANNER=Gen4 */ distinct 347 as crandom0 from emp as tbl0") + + // vttablet: rpc error: code = InvalidArgument desc = Can't group on 'count(*)' (errno 1056) (sqlstate 42000) (CallerID: userData1) + helperTest(t, "select /*vt+ PLANNER=Gen4 */ distinct count(*) from dept as tbl0 group by tbl0.deptno") + + // unsupported + // VT12001: unsupported: only one DISTINCT aggregation is allowed in a SELECT: sum(distinct 1) as caggr1 + helperTest(t, "select /*vt+ PLANNER=Gen4 */ sum(distinct tbl0.comm) as caggr0, sum(distinct 1) as caggr1 from emp as tbl0 having 'redfish' < 'blowfish'") + + // unsupported + // VT12001: unsupported: aggregation on top of aggregation not supported + helperTest(t, "select /*vt+ PLANNER=Gen4 */ count(*) from dept as tbl1 join (select count(*) from emp as tbl0, dept as tbl1 group by tbl1.loc) as tbl2") + + // unsupported + // VT12001: unsupported: in scatter query: complex aggregate expression + helperTest(t, "select /*vt+ PLANNER=Gen4 */ (select count(*) from emp as tbl0) from emp as tbl0") + + // unsupported + // VT12001: unsupported: using aggregation on top of a *planbuilder.filter plan + helperTest(t, "select /*vt+ PLANNER=Gen4 */ count(tbl1.dname) as caggr1 from dept as tbl0 left join dept as tbl1 on tbl1.dname > tbl1.loc where tbl1.loc <=> tbl1.dname group by tbl1.dname order by tbl1.dname asc") + + // unsupported + // VT12001: unsupported: aggregation on top of aggregation not supported + helperTest(t, "select /*vt+ PLANNER=Gen4 */ count(*) from (select count(*) from dept as tbl0) as tbl0") + + // unsupported + // VT12001: unsupported: aggregation on top of aggregation not supported + helperTest(t, "select /*vt+ PLANNER=Gen4 */ count(*), count(*) from (select count(*) from dept as tbl0) as tbl0, dept as tbl1") + + // unsupported + // VT12001: unsupported: in scatter query: aggregation function 'avg(tbl0.deptno)' + helperTest(t, "select /*vt+ PLANNER=Gen4 */ avg(tbl0.deptno) from dept as tbl0") + + // unsupported + // VT12001: unsupported: LEFT JOIN with derived tables + helperTest(t, "select /*vt+ PLANNER=Gen4 */ -1 as crandom0 from emp as tbl2 left join (select count(*) from dept as tbl1) as tbl3 on 6 != tbl2.deptno") + + // unsupported + // VT12001: unsupported: subqueries in GROUP BY + helperTest(t, "select /*vt+ PLANNER=Gen4 */ exists (select 1) as crandom0 from dept as tbl0 group by exists (select 1)") +} + +func TestRandom(t *testing.T) { + t.Skip("Skip CI; random expressions generate too many failures to properly limit") + + mcmp, closer := start(t) + defer closer() + + require.NoError(t, utils.WaitForAuthoritative(t, keyspaceName, "emp", clusterInstance.VtgateProcess.ReadVSchema)) + require.NoError(t, utils.WaitForAuthoritative(t, keyspaceName, "dept", clusterInstance.VtgateProcess.ReadVSchema)) + + // specify the schema (that is defined in schema.sql) + schemaTables := []tableT{ + {tableExpr: sqlparser.NewTableName("emp")}, + {tableExpr: sqlparser.NewTableName("dept")}, + } + schemaTables[0].addColumns([]column{ + {name: "empno", typ: "bigint"}, + {name: "ename", typ: "varchar"}, + {name: "job", typ: "varchar"}, + {name: "mgr", typ: "bigint"}, + {name: "hiredate", typ: "date"}, + {name: "sal", typ: "bigint"}, + {name: "comm", typ: "bigint"}, + {name: "deptno", typ: "bigint"}, + }...) + schemaTables[1].addColumns([]column{ + {name: "deptno", typ: "bigint"}, + {name: "dname", typ: "varchar"}, + {name: "loc", typ: "varchar"}, + }...) + + endBy := time.Now().Add(1 * time.Second) + + var queryCount, queryFailCount int + // continue testing after an error if and only if testFailingQueries is true + for time.Now().Before(endBy) && (!t.Failed() || !testFailingQueries) { + seed := time.Now().UnixNano() + genConfig := sqlparser.NewExprGeneratorConfig(sqlparser.CannotAggregate, "", 0, false) + qg := newQueryGenerator(rand.New(rand.NewSource(seed)), genConfig, 2, 2, 2, schemaTables) + qg.randomQuery() + query := sqlparser.String(qg.stmt) + _, vtErr := mcmp.ExecAllowAndCompareError(query) + + // this assumes all queries are valid mysql queries + if vtErr != nil { + fmt.Printf("seed: %d\n", seed) + fmt.Println(query) + fmt.Println(vtErr) + + if stopOnMustFixError { + // results mismatched + if strings.Contains(vtErr.Error(), "results mismatched") { + simplified := simplifyResultsMismatchedQuery(t, query) + fmt.Printf("final simplified query: %s\n", simplified) + break + } + // EOF + if sqlError, ok := vtErr.(*sqlerror.SQLError); ok && strings.Contains(sqlError.Message, "EOF") { + break + } + } + + // restart the mysql and vitess connections in case something bad happened + closer() + mcmp, closer = start(t) + + fmt.Printf("\n\n\n") + queryFailCount++ + } + queryCount++ + } + fmt.Printf("Queries successfully executed: %d\n", queryCount) + fmt.Printf("Queries failed: %d\n", queryFailCount) +} + +// these queries were previously failing and have now been fixed +func TestBuggyQueries(t *testing.T) { + mcmp, closer := start(t) + defer closer() + + require.NoError(t, utils.WaitForAuthoritative(t, keyspaceName, "emp", clusterInstance.VtgateProcess.ReadVSchema)) + require.NoError(t, utils.WaitForAuthoritative(t, keyspaceName, "dept", clusterInstance.VtgateProcess.ReadVSchema)) + + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ sum(tbl1.sal) as caggr1 from emp as tbl0, emp as tbl1 group by tbl1.ename order by tbl1.ename asc") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ count(*), count(*), count(*) from dept as tbl0, emp as tbl1 where tbl0.deptno = tbl1.deptno group by tbl1.empno order by tbl1.empno") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ count(tbl0.deptno) from dept as tbl0, emp as tbl1 group by tbl1.job order by tbl1.job limit 3") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ count(*), count(*) from emp as tbl0 group by tbl0.empno order by tbl0.empno") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ distinct count(*), tbl0.loc from dept as tbl0 group by tbl0.loc") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ distinct count(*) from dept as tbl0 group by tbl0.loc") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ sum(tbl1.comm) from emp as tbl0, emp as tbl1") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ tbl1.mgr, tbl1.mgr, count(*) from emp as tbl1 group by tbl1.mgr") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ tbl1.mgr, tbl1.mgr, count(*) from emp as tbl0, emp as tbl1 group by tbl1.mgr") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ count(*), count(*), count(tbl0.comm) from emp as tbl0, emp as tbl1 join dept as tbl2") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ count(*), count(*) from (select count(*) from dept as tbl0 group by tbl0.deptno) as tbl0, dept as tbl1") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ count(*) from (select count(*) from dept as tbl0 group by tbl0.deptno) as tbl0") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ min(tbl0.loc) from dept as tbl0") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ tbl1.empno, max(tbl1.job) from dept as tbl0, emp as tbl1 group by tbl1.empno") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ tbl1.ename, max(tbl0.comm) from emp as tbl0, emp as tbl1 group by tbl1.ename") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ tbl0.dname, tbl0.dname, min(tbl0.deptno) from dept as tbl0, dept as tbl1 group by tbl0.dname, tbl0.dname") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ tbl0.dname, min(tbl1.deptno) from dept as tbl0, dept as tbl1 group by tbl0.dname, tbl1.dname") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ max(tbl0.hiredate) from emp as tbl0") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ min(tbl0.deptno) as caggr0, count(*) as caggr1 from dept as tbl0 left join dept as tbl1 on tbl1.loc = tbl1.dname") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ count(tbl1.loc) as caggr0 from dept as tbl1 left join dept as tbl2 on tbl1.loc = tbl2.loc where (tbl2.deptno)") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ sum(tbl1.ename), min(tbl0.empno) from emp as tbl0, emp as tbl1 left join dept as tbl2 on tbl1.job = tbl2.loc and tbl1.comm = tbl2.deptno where ('trout') and tbl0.deptno = tbl1.comm") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ distinct max(tbl0.deptno), count(tbl0.job) from emp as tbl0, dept as tbl1 left join dept as tbl2 on tbl1.dname = tbl2.loc and tbl1.dname = tbl2.loc where (tbl2.loc) and tbl0.deptno = tbl1.deptno") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ count(*), count(*) from (select count(*) from dept as tbl0 group by tbl0.deptno) as tbl0") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ distinct max(tbl0.dname) as caggr0, 'cattle' as crandom0 from dept as tbl0, emp as tbl1 where tbl0.deptno != tbl1.sal group by tbl1.comm") + +} diff --git a/go/test/endtoend/vtgate/queries/random/schema.sql b/go/test/endtoend/vtgate/queries/random/schema.sql new file mode 100644 index 00000000000..7ef4721a381 --- /dev/null +++ b/go/test/endtoend/vtgate/queries/random/schema.sql @@ -0,0 +1,20 @@ +CREATE TABLE emp ( + EMPNO bigint NOT NULL, + ENAME VARCHAR(10), + JOB VARCHAR(9), + MGR bigint, + HIREDATE DATE, + SAL bigint, + COMM bigint, + DEPTNO bigint, + PRIMARY KEY (EMPNO) +) Engine = InnoDB + COLLATE = utf8mb4_general_ci; + +CREATE TABLE dept ( + DEPTNO bigint, + DNAME VARCHAR(14), + LOC VARCHAR(13), + PRIMARY KEY (DEPTNO) +) Engine = InnoDB + COLLATE = utf8mb4_general_ci; \ No newline at end of file diff --git a/go/test/endtoend/vtgate/queries/random/simplifier_test.go b/go/test/endtoend/vtgate/queries/random/simplifier_test.go new file mode 100644 index 00000000000..478ee355d34 --- /dev/null +++ b/go/test/endtoend/vtgate/queries/random/simplifier_test.go @@ -0,0 +1,116 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package random + +import ( + "fmt" + "strings" + "testing" + + "vitess.io/vitess/go/test/vschemawrapper" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/test/endtoend/utils" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/planbuilder" + "vitess.io/vitess/go/vt/vtgate/simplifier" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +func TestSimplifyResultsMismatchedQuery(t *testing.T) { + t.Skip("Skip CI") + + var queries []string + queries = append(queries, "select /*vt+ PLANNER=Gen4 */ (68 - -16) / case false when -45 then 3 when 28 then -43 else -62 end as crandom0 from dept as tbl0, (select /*vt+ PLANNER=Gen4 */ distinct not not false and count(*) from emp as tbl0, emp as tbl1 where tbl1.ename) as tbl1 limit 1", + "select /*vt+ PLANNER=Gen4 */ distinct case true when 'burro' then 'trout' else 'elf' end < case count(distinct true) when 'bobcat' then 'turkey' else 'penguin' end from dept as tbl0, emp as tbl1 where 'spider'", + "select /*vt+ PLANNER=Gen4 */ distinct sum(distinct tbl1.deptno) from dept as tbl0, emp as tbl1 where tbl0.deptno and tbl1.comm in (12, tbl0.deptno, case false when 67 then -17 when -78 then -35 end, -76 >> -68)", + "select /*vt+ PLANNER=Gen4 */ count(*) + 1 from emp as tbl0 order by count(*) desc", + "select /*vt+ PLANNER=Gen4 */ count(2 >> tbl2.mgr), sum(distinct tbl2.empno <=> 15) from emp as tbl0 left join emp as tbl2 on -32", + "select /*vt+ PLANNER=Gen4 */ sum(case false when true then tbl1.deptno else -154 / 132 end) as caggr1 from emp as tbl0, dept as tbl1", + "select /*vt+ PLANNER=Gen4 */ tbl1.dname as cgroup0, tbl1.dname as cgroup1 from dept as tbl0, dept as tbl1 group by tbl1.dname, tbl1.deptno order by tbl1.deptno desc", + "select /*vt+ PLANNER=Gen4 */ tbl0.ename as cgroup1 from emp as tbl0 group by tbl0.job, tbl0.ename having sum(tbl0.mgr) = sum(tbl0.mgr) order by tbl0.job desc, tbl0.ename asc limit 8", + "select /*vt+ PLANNER=Gen4 */ distinct count(*) as caggr1 from dept as tbl0, emp as tbl1 group by tbl1.sal having max(tbl1.comm) != true", + "select /*vt+ PLANNER=Gen4 */ distinct sum(tbl1.loc) as caggr0 from dept as tbl0, dept as tbl1 group by tbl1.deptno having max(tbl1.dname) <= 1", + "select /*vt+ PLANNER=Gen4 */ min(tbl0.deptno) as caggr0 from dept as tbl0, emp as tbl1 where case when false then tbl0.dname end group by tbl1.comm", + "select /*vt+ PLANNER=Gen4 */ count(*) as caggr0, 1 as crandom0 from dept as tbl0, emp as tbl1 where 1 = 0", + "select /*vt+ PLANNER=Gen4 */ count(*) as caggr0, 1 as crandom0 from dept as tbl0, emp as tbl1 where 'octopus'", + "select /*vt+ PLANNER=Gen4 */ distinct 'octopus' as crandom0 from dept as tbl0, emp as tbl1 where tbl0.deptno = tbl1.empno having count(*) = count(*)", + "select /*vt+ PLANNER=Gen4 */ max(tbl0.deptno) from dept as tbl0 right join emp as tbl1 on tbl0.deptno = tbl1.empno and tbl0.deptno = tbl1.deptno group by tbl0.deptno", + "select /*vt+ PLANNER=Gen4 */ count(tbl1.comm) from emp as tbl1 right join emp as tbl2 on tbl1.mgr = tbl2.sal") + + for _, query := range queries { + var simplified string + t.Run("simplification "+query, func(t *testing.T) { + simplified = simplifyResultsMismatchedQuery(t, query) + }) + + t.Run("simplified "+query, func(t *testing.T) { + mcmp, closer := start(t) + defer closer() + + mcmp.ExecAllowAndCompareError(simplified) + }) + + fmt.Printf("final simplified query: %s\n", simplified) + } +} + +// given a query that errors with results mismatched, simplifyResultsMismatchedQuery returns a simpler version with the same error +func simplifyResultsMismatchedQuery(t *testing.T, query string) string { + t.Helper() + mcmp, closer := start(t) + defer closer() + + _, err := mcmp.ExecAllowAndCompareError(query) + if err == nil { + t.Fatalf("query (%s) does not error", query) + } else if !strings.Contains(err.Error(), "mismatched") { + t.Fatalf("query (%s) does not error with results mismatched\nError: %v", query, err) + } + + require.NoError(t, utils.WaitForAuthoritative(t, keyspaceName, "emp", clusterInstance.VtgateProcess.ReadVSchema)) + require.NoError(t, utils.WaitForAuthoritative(t, keyspaceName, "dept", clusterInstance.VtgateProcess.ReadVSchema)) + + formal, err := vindexes.LoadFormal("svschema.json") + require.NoError(t, err) + vSchema := vindexes.BuildVSchema(formal) + vSchemaWrapper := &vschemawrapper.VSchemaWrapper{ + V: vSchema, + Version: planbuilder.Gen4, + } + + stmt, err := sqlparser.Parse(query) + require.NoError(t, err) + + simplified := simplifier.SimplifyStatement( + stmt.(sqlparser.SelectStatement), + vSchemaWrapper.CurrentDb(), + vSchemaWrapper, + func(statement sqlparser.SelectStatement) bool { + q := sqlparser.String(statement) + _, newErr := mcmp.ExecAllowAndCompareError(q) + if newErr == nil { + return false + } else { + return strings.Contains(newErr.Error(), "mismatched") + } + }, + ) + + return sqlparser.String(simplified) +} diff --git a/go/test/endtoend/vtgate/queries/random/svschema.json b/go/test/endtoend/vtgate/queries/random/svschema.json new file mode 100644 index 00000000000..ccbbc6ed3a6 --- /dev/null +++ b/go/test/endtoend/vtgate/queries/random/svschema.json @@ -0,0 +1,6 @@ +{ + "keyspaces": { + "ks_random": { + } + } +} \ No newline at end of file diff --git a/go/test/endtoend/vtgate/queries/random/vschema.json b/go/test/endtoend/vtgate/queries/random/vschema.json new file mode 100644 index 00000000000..21e31d5618c --- /dev/null +++ b/go/test/endtoend/vtgate/queries/random/vschema.json @@ -0,0 +1,26 @@ +{ + "sharded": true, + "vindexes": { + "hash": { + "type": "hash" + } + }, + "tables": { + "emp": { + "column_vindexes": [ + { + "column": "deptno", + "name": "hash" + } + ] + }, + "dept": { + "column_vindexes": [ + { + "column": "deptno", + "name": "hash" + } + ] + } + } +} \ No newline at end of file diff --git a/go/test/endtoend/vtgate/queries/subquery/main_test.go b/go/test/endtoend/vtgate/queries/subquery/main_test.go index 2053518178d..9eaf3b4caa0 100644 --- a/go/test/endtoend/vtgate/queries/subquery/main_test.go +++ b/go/test/endtoend/vtgate/queries/subquery/main_test.go @@ -64,7 +64,7 @@ func TestMain(m *testing.M) { VSchema: VSchema, } clusterInstance.VtGateExtraArgs = []string{"--schema_change_signal"} - clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal", "--queryserver-config-schema-change-signal-interval", "0.1"} + clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal"} err = clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 0, false) if err != nil { return 1 diff --git a/go/test/endtoend/vtgate/queries/timeout/main_test.go b/go/test/endtoend/vtgate/queries/timeout/main_test.go new file mode 100644 index 00000000000..d71dc55ef46 --- /dev/null +++ b/go/test/endtoend/vtgate/queries/timeout/main_test.go @@ -0,0 +1,110 @@ +/* +Copyright 2022 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package misc + +import ( + _ "embed" + "flag" + "fmt" + "os" + "testing" + + "vitess.io/vitess/go/test/endtoend/utils" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/cluster" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + mysqlParams mysql.ConnParams + keyspaceName = "ks_misc" + uks = "uks" + cell = "test_misc" + + //go:embed uschema.sql + uschemaSQL string + + //go:embed schema.sql + schemaSQL string + + //go:embed vschema.json + vschema string +) + +func TestMain(m *testing.M) { + defer cluster.PanicHandler(nil) + flag.Parse() + + exitCode := func() int { + clusterInstance = cluster.NewCluster(cell, "localhost") + defer clusterInstance.Teardown() + + // Start topo server + err := clusterInstance.StartTopo() + if err != nil { + return 1 + } + + clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, + "--queryserver-config-max-result-size", "1000000", + "--queryserver-config-query-timeout", "200", + "--queryserver-config-query-pool-timeout", "200") + // Start Unsharded keyspace + ukeyspace := &cluster.Keyspace{ + Name: uks, + SchemaSQL: uschemaSQL, + } + err = clusterInstance.StartUnshardedKeyspace(*ukeyspace, 0, false) + if err != nil { + return 1 + } + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: keyspaceName, + SchemaSQL: schemaSQL, + VSchema: vschema, + } + err = clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 0, false) + if err != nil { + return 1 + } + + clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, + "--query-timeout", "100") + // Start vtgate + err = clusterInstance.StartVtgate() + if err != nil { + return 1 + } + + vtParams = clusterInstance.GetVTParams(keyspaceName) + + // create mysql instance and connection parameters + conn, closer, err := utils.NewMySQL(clusterInstance, keyspaceName, schemaSQL) + if err != nil { + fmt.Println(err) + return 1 + } + defer closer() + mysqlParams = conn + return m.Run() + }() + os.Exit(exitCode) +} diff --git a/go/test/endtoend/vtgate/queries/timeout/schema.sql b/go/test/endtoend/vtgate/queries/timeout/schema.sql new file mode 100644 index 00000000000..ceac0c07e6d --- /dev/null +++ b/go/test/endtoend/vtgate/queries/timeout/schema.sql @@ -0,0 +1,5 @@ +create table if not exists t1( + id1 bigint, + id2 bigint, + primary key(id1) +) Engine=InnoDB; \ No newline at end of file diff --git a/go/test/endtoend/vtgate/queries/timeout/timeout_test.go b/go/test/endtoend/vtgate/queries/timeout/timeout_test.go new file mode 100644 index 00000000000..9c81a6c5822 --- /dev/null +++ b/go/test/endtoend/vtgate/queries/timeout/timeout_test.go @@ -0,0 +1,100 @@ +/* +Copyright 2022 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package misc + +import ( + "testing" + + _ "github.com/go-sql-driver/mysql" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" +) + +func start(t *testing.T) (utils.MySQLCompare, func()) { + mcmp, err := utils.NewMySQLCompare(t, vtParams, mysqlParams) + require.NoError(t, err) + + deleteAll := func() { + tables := []string{"t1", "uks.unsharded"} + for _, table := range tables { + _, _ = mcmp.ExecAndIgnore("delete from " + table) + } + } + + deleteAll() + + return mcmp, func() { + deleteAll() + mcmp.Close() + cluster.PanicHandler(t) + } +} + +func TestQueryTimeoutWithDual(t *testing.T) { + mcmp, closer := start(t) + defer closer() + + _, err := utils.ExecAllowError(t, mcmp.VtConn, "select sleep(0.04) from dual") + assert.NoError(t, err) + _, err = utils.ExecAllowError(t, mcmp.VtConn, "select sleep(0.24) from dual") + assert.Error(t, err) + _, err = utils.ExecAllowError(t, mcmp.VtConn, "set @@session.query_timeout=20") + require.NoError(t, err) + _, err = utils.ExecAllowError(t, mcmp.VtConn, "select sleep(0.04) from dual") + assert.Error(t, err) + _, err = utils.ExecAllowError(t, mcmp.VtConn, "select sleep(0.01) from dual") + assert.NoError(t, err) + _, err = utils.ExecAllowError(t, mcmp.VtConn, "select /*vt+ QUERY_TIMEOUT_MS=500 */ sleep(0.24) from dual") + assert.NoError(t, err) + _, err = utils.ExecAllowError(t, mcmp.VtConn, "select /*vt+ QUERY_TIMEOUT_MS=10 */ sleep(0.04) from dual") + assert.Error(t, err) + _, err = utils.ExecAllowError(t, mcmp.VtConn, "select /*vt+ QUERY_TIMEOUT_MS=15 */ sleep(0.001) from dual") + assert.NoError(t, err) +} + +func TestQueryTimeoutWithTables(t *testing.T) { + mcmp, closer := start(t) + defer closer() + + // unsharded + utils.Exec(t, mcmp.VtConn, "insert /*vt+ QUERY_TIMEOUT_MS=1000 */ into uks.unsharded(id1) values (1),(2),(3),(4),(5)") + for i := 0; i < 12; i++ { + utils.Exec(t, mcmp.VtConn, "insert /*vt+ QUERY_TIMEOUT_MS=2000 */ into uks.unsharded(id1) select id1+5 from uks.unsharded") + } + + utils.Exec(t, mcmp.VtConn, "select count(*) from uks.unsharded where id1 > 31") + utils.Exec(t, mcmp.VtConn, "select /*vt+ QUERY_TIMEOUT_MS=100 */ count(*) from uks.unsharded where id1 > 31") + + // the query usually takes more than 5ms to return. So this should fail. + _, err := utils.ExecAllowError(t, mcmp.VtConn, "select /*vt+ QUERY_TIMEOUT_MS=1 */ count(*) from uks.unsharded where id1 > 31") + require.Error(t, err) + assert.Contains(t, err.Error(), "context deadline exceeded") + assert.Contains(t, err.Error(), "(errno 1317) (sqlstate 70100)") + + // sharded + utils.Exec(t, mcmp.VtConn, "insert /*vt+ QUERY_TIMEOUT_MS=1000 */ into ks_misc.t1(id1, id2) values (1,2),(2,4),(3,6),(4,8),(5,10)") + + // sleep take in seconds, so 0.1 is 100ms + utils.Exec(t, mcmp.VtConn, "select /*vt+ QUERY_TIMEOUT_MS=500 */ sleep(0.1) from t1 where id1 = 1") + _, err = utils.ExecAllowError(t, mcmp.VtConn, "select /*vt+ QUERY_TIMEOUT_MS=20 */ sleep(0.1) from t1 where id1 = 1") + require.Error(t, err) + assert.Contains(t, err.Error(), "context deadline exceeded") + assert.Contains(t, err.Error(), "(errno 1317) (sqlstate 70100)") +} diff --git a/go/test/endtoend/vtgate/queries/timeout/uschema.sql b/go/test/endtoend/vtgate/queries/timeout/uschema.sql new file mode 100644 index 00000000000..6ba158b134e --- /dev/null +++ b/go/test/endtoend/vtgate/queries/timeout/uschema.sql @@ -0,0 +1,5 @@ +create table unsharded( + id1 bigint, + id2 bigint, + key(id1) +) Engine=InnoDB; \ No newline at end of file diff --git a/go/test/endtoend/vtgate/queries/timeout/vschema.json b/go/test/endtoend/vtgate/queries/timeout/vschema.json new file mode 100644 index 00000000000..60aa2bc9c07 --- /dev/null +++ b/go/test/endtoend/vtgate/queries/timeout/vschema.json @@ -0,0 +1,18 @@ +{ + "sharded": true, + "vindexes": { + "hash": { + "type": "hash" + } + }, + "tables": { + "t1": { + "column_vindexes": [ + { + "column": "id1", + "name": "hash" + } + ] + } + } +} \ No newline at end of file diff --git a/go/test/endtoend/vtgate/queries/union/main_test.go b/go/test/endtoend/vtgate/queries/union/main_test.go index ea3577f3af8..06ec07a6c2f 100644 --- a/go/test/endtoend/vtgate/queries/union/main_test.go +++ b/go/test/endtoend/vtgate/queries/union/main_test.go @@ -63,7 +63,7 @@ func TestMain(m *testing.M) { VSchema: vschema, } clusterInstance.VtGateExtraArgs = []string{"--schema_change_signal"} - clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal", "--queryserver-config-schema-change-signal-interval", "0.1"} + clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal"} err = clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 0, false) if err != nil { return 1 diff --git a/go/test/endtoend/vtgate/reservedconn/reconnect1/main_test.go b/go/test/endtoend/vtgate/reservedconn/reconnect1/main_test.go index 577155f16cb..11325a0f2f8 100644 --- a/go/test/endtoend/vtgate/reservedconn/reconnect1/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/reconnect1/main_test.go @@ -179,9 +179,17 @@ func TestServingChangeStreaming(t *testing.T) { rdonlyTablet.Type = "replica" // this should fail as there is no rdonly present + // This can also close the streaming connection if it goes to 80- shard first and sends the fields from that. + // Current, stream logic is to close the server connection if partial stream result is sent and an error is received later. _, err = utils.ExecAllowError(t, conn, "select * from test") require.Error(t, err) + // check if connection is still available + _, err = utils.ExecAllowError(t, conn, "select 1") + if err != nil { + t.Skip("connection is closed, cannot continue with the test") + } + // changing replica tablet to rdonly to make rdonly available for serving. replicaTablet := clusterInstance.Keyspaces[0].Shards[0].Replica() err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "rdonly") diff --git a/go/test/endtoend/vtgate/reservedconn/sysvar_test.go b/go/test/endtoend/vtgate/reservedconn/sysvar_test.go index ddc5bc81c2f..46f83983f68 100644 --- a/go/test/endtoend/vtgate/reservedconn/sysvar_test.go +++ b/go/test/endtoend/vtgate/reservedconn/sysvar_test.go @@ -22,6 +22,7 @@ import ( "testing" "time" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/test/endtoend/utils" "github.com/stretchr/testify/assert" @@ -344,10 +345,10 @@ func TestSysvarSocket(t *testing.T) { _, err = utils.ExecAllowError(t, conn, "set socket = '/any/path'") require.Error(t, err) - sqlErr, ok := err.(*mysql.SQLError) + sqlErr, ok := err.(*sqlerror.SQLError) require.True(t, ok, "not a mysql error: %T", err) - assert.Equal(t, mysql.ERIncorrectGlobalLocalVar, sqlErr.Number()) - assert.Equal(t, mysql.SSUnknownSQLState, sqlErr.SQLState()) + assert.Equal(t, sqlerror.ERIncorrectGlobalLocalVar, sqlErr.Number()) + assert.Equal(t, sqlerror.SSUnknownSQLState, sqlErr.SQLState()) assert.Equal(t, "VT03010: variable 'socket' is a read only variable (errno 1238) (sqlstate HY000) during query: set socket = '/any/path'", sqlErr.Error()) } diff --git a/go/test/endtoend/vtgate/schema.sql b/go/test/endtoend/vtgate/schema.sql index 536bec397ec..a883a26519f 100644 --- a/go/test/endtoend/vtgate/schema.sql +++ b/go/test/endtoend/vtgate/schema.sql @@ -143,6 +143,9 @@ create table t10 ( id bigint, sharding_key bigint, + col1 varchar(50), + col2 int, + col3 int, primary key (id) ) Engine = InnoDB; diff --git a/go/test/endtoend/vtgate/schema/schema_test.go b/go/test/endtoend/vtgate/schema/schema_test.go index 82cc07b4125..9a4c749f85a 100644 --- a/go/test/endtoend/vtgate/schema/schema_test.go +++ b/go/test/endtoend/vtgate/schema/schema_test.go @@ -107,6 +107,7 @@ func TestSchemaChange(t *testing.T) { testWithAlterDatabase(t) testWithDropCreateSchema(t) testDropNonExistentTables(t) + testApplySchemaBatch(t) testCreateInvalidView(t) testCopySchemaShards(t, clusterInstance.Keyspaces[0].Shards[0].Vttablets[0].VttabletProcess.TabletPath, 2) testCopySchemaShards(t, fmt.Sprintf("%s/0", keyspaceName), 3) @@ -126,7 +127,6 @@ func testWithInitialSchema(t *testing.T) { // Check if 4 tables are created checkTables(t, totalTableCount) - checkTables(t, totalTableCount) // Also match the vschema for those tablets matchSchema(t, clusterInstance.Keyspaces[0].Shards[0].Vttablets[0].VttabletProcess.TabletPath, clusterInstance.Keyspaces[0].Shards[1].Vttablets[0].VttabletProcess.TabletPath) @@ -144,7 +144,7 @@ func testWithAlterSchema(t *testing.T) { func testWithAlterDatabase(t *testing.T) { sql := "create database alter_database_test; alter database alter_database_test default character set = utf8mb4; drop database alter_database_test" err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, sql) - assert.Nil(t, err) + assert.NoError(t, err) } // testWithDropCreateSchema , we should be able to drop and create same schema @@ -158,7 +158,7 @@ func testWithAlterDatabase(t *testing.T) { func testWithDropCreateSchema(t *testing.T) { dropCreateTable := fmt.Sprintf("DROP TABLE vt_select_test_%02d ;", 2) + fmt.Sprintf(createTable, fmt.Sprintf("vt_select_test_%02d", 2)) err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, dropCreateTable) - require.Nil(t, err) + require.NoError(t, err) checkTables(t, totalTableCount) } @@ -225,6 +225,33 @@ func testCreateInvalidView(t *testing.T) { } } +func testApplySchemaBatch(t *testing.T) { + { + sqls := "create table batch1(id int primary key);create table batch2(id int primary key);create table batch3(id int primary key);create table batch4(id int primary key);create table batch5(id int primary key);" + _, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ApplySchema", "--", "--sql", sqls, "--batch_size", "2", keyspaceName) + require.NoError(t, err) + checkTables(t, totalTableCount+5) + } + { + sqls := "drop table batch1; drop table batch2; drop table batch3; drop table batch4; drop table batch5" + _, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ApplySchema", "--", "--sql", sqls, keyspaceName) + require.NoError(t, err) + checkTables(t, totalTableCount) + } + { + sqls := "create table batch1(id int primary key);create table batch2(id int primary key);create table batch3(id int primary key);create table batch4(id int primary key);create table batch5(id int primary key);" + _, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ApplySchema", "--", "--ddl_strategy", "direct --allow-zero-in-date", "--sql", sqls, "--batch_size", "2", keyspaceName) + require.NoError(t, err) + checkTables(t, totalTableCount+5) + } + { + sqls := "drop table batch1; drop table batch2; drop table batch3; drop table batch4; drop table batch5" + _, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ApplySchema", "--", "--sql", sqls, keyspaceName) + require.NoError(t, err) + checkTables(t, totalTableCount) + } +} + // checkTables checks the number of tables in the first two shards. func checkTables(t *testing.T, count int) { checkTablesCount(t, clusterInstance.Keyspaces[0].Shards[0].Vttablets[0], count) diff --git a/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go b/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go index adbba68c460..b89b0916e37 100644 --- a/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go +++ b/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go @@ -42,7 +42,6 @@ var ( hostname = "localhost" keyspaceName = "ks" cell = "zone1" - signalInterval = 1 sqlSchema = ` create table vt_user ( id bigint, @@ -78,8 +77,7 @@ func TestMain(m *testing.M) { } // List of users authorized to execute vschema ddl operations - clusterInstance.VtGateExtraArgs = []string{"--schema_change_signal"} - + clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, "--schema_change_signal") // Start keyspace keyspace := &cluster.Keyspace{ Name: keyspaceName, @@ -91,10 +89,7 @@ func TestMain(m *testing.M) { // restart the tablet so that the schema.Engine gets a chance to start with existing schema tablet := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet() - tablet.VttabletProcess.ExtraArgs = []string{ - "--queryserver-config-schema-change-signal", - fmt.Sprintf("--queryserver-config-schema-change-signal-interval=%d", signalInterval), - } + tablet.VttabletProcess.ExtraArgs = append(tablet.VttabletProcess.ExtraArgs, "--queryserver-config-schema-change-signal") if err := tablet.RestartOnlyTablet(); err != nil { return 1 } diff --git a/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go b/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go index 5bcd319b6d5..1c9f4b0b6e2 100644 --- a/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go +++ b/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go @@ -26,15 +26,12 @@ import ( "time" "github.com/stretchr/testify/assert" - - "vitess.io/vitess/go/test/endtoend/utils" - "vitess.io/vitess/go/vt/sidecardb" - "vitess.io/vitess/go/vt/vtgate/planbuilder" - "github.com/stretchr/testify/require" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" ) var ( @@ -70,7 +67,7 @@ func TestMain(m *testing.M) { // For upgrade/downgrade tests. if vtgateVer < 17 || vttabletVer < 17 { // Then only the default sidecarDBName is supported. - sidecarDBName = sidecardb.DefaultName + sidecarDBName = sidecar.DefaultName } // Start topo server @@ -86,15 +83,10 @@ func TestMain(m *testing.M) { VSchema: VSchema, SidecarDBName: sidecarDBName, } - clusterInstance.VtGateExtraArgs = []string{"--schema_change_signal", - "--vschema_ddl_authorized_users", "%", - "--schema_change_signal_user", "userData1"} - clusterInstance.VtGatePlannerVersion = planbuilder.Gen4 - clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal", - "--queryserver-config-schema-change-signal-interval", "0.1", - "--queryserver-config-strict-table-acl", - "--queryserver-config-acl-exempt-acl", "userData1", - "--table-acl-config", "dummy.json"} + clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, + "--schema_change_signal", + "--vschema_ddl_authorized_users", "%") + clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--queryserver-config-schema-change-signal") if vtgateVer >= 16 && vttabletVer >= 16 { clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, "--enable-views") diff --git a/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go b/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go index 8ad110b2572..3ff0b61b482 100644 --- a/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go +++ b/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go @@ -24,8 +24,7 @@ import ( "testing" "time" - "vitess.io/vitess/go/vt/sidecardb" - + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/test/endtoend/utils" "github.com/stretchr/testify/require" @@ -142,11 +141,11 @@ func TestMain(m *testing.M) { // For upgrade/downgrade tests. if vtgateVer < 17 || vttabletVer < 17 { // Then only the default sidecarDBName is supported. - sidecarDBName = sidecardb.DefaultName + sidecarDBName = sidecar.DefaultName } - clusterInstance.VtGateExtraArgs = []string{"--schema_change_signal", "--schema_change_signal_user", "userData1"} - clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal", "--queryserver-config-schema-change-signal-interval", "5", "--queryserver-config-strict-table-acl", "--queryserver-config-acl-exempt-acl", "userData1", "--table-acl-config", "dummy.json"} + clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, "--schema_change_signal") + clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--queryserver-config-schema-change-signal") // Start topo server err = clusterInstance.StartTopo() diff --git a/go/test/endtoend/vtgate/schematracker/unauthorized/schema.sql b/go/test/endtoend/vtgate/schematracker/unauthorized/schema.sql deleted file mode 100644 index 48771a04267..00000000000 --- a/go/test/endtoend/vtgate/schematracker/unauthorized/schema.sql +++ /dev/null @@ -1,5 +0,0 @@ -create table t2( - id3 bigint, - id4 bigint, - primary key(id3) -) Engine=InnoDB; diff --git a/go/test/endtoend/vtgate/schematracker/unauthorized/unauthorized_test.go b/go/test/endtoend/vtgate/schematracker/unauthorized/unauthorized_test.go deleted file mode 100644 index c8fa2de2f20..00000000000 --- a/go/test/endtoend/vtgate/schematracker/unauthorized/unauthorized_test.go +++ /dev/null @@ -1,136 +0,0 @@ -/* -Copyright 2022 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unauthorized - -import ( - "context" - _ "embed" - "flag" - "fmt" - "os" - "path" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" -) - -var ( - clusterInstance *cluster.LocalProcessCluster - vtParams mysql.ConnParams - KeyspaceName = "ks" - Cell = "test" - - //go:embed schema.sql - SchemaSQL string - - //go:embed vschema.json - VSchema string -) - -func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) - flag.Parse() - - exitCode := func() int { - clusterInstance = cluster.NewCluster(Cell, "localhost") - defer clusterInstance.Teardown() - - // Start topo server - err := clusterInstance.StartTopo() - if err != nil { - return 1 - } - - // Start keyspace - keyspace := &cluster.Keyspace{ - Name: KeyspaceName, - SchemaSQL: SchemaSQL, - VSchema: VSchema, - } - clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, "--schema_change_signal") - clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, - "--queryserver-config-schema-change-signal", - "--queryserver-config-schema-change-signal-interval", "0.1", - "--queryserver-config-strict-table-acl", - "--queryserver-config-acl-exempt-acl", "userData1", - "--table-acl-config", "dummy.json") - err = clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 0, false) - if err != nil { - return 1 - } - - // Start vtgate - err = clusterInstance.StartVtgate() - if err != nil { - return 1 - } - - err = clusterInstance.WaitForVTGateAndVTTablets(5 * time.Minute) - if err != nil { - fmt.Println(err) - return 1 - } - - vtParams = mysql.ConnParams{ - Host: clusterInstance.Hostname, - Port: clusterInstance.VtgateMySQLPort, - } - return m.Run() - }() - os.Exit(exitCode) -} - -func TestSchemaTrackingError(t *testing.T) { - vtgateVersion, err := cluster.GetMajorVersion("vtgate") - require.NoError(t, err) - if vtgateVersion > 17 { - t.Skip("schema tracking error is only logged in vtgate version < 18") - } - - ctx := context.Background() - conn, err := mysql.Connect(ctx, &vtParams) - require.NoError(t, err) - defer conn.Close() - - logDir := clusterInstance.VtgateProcess.LogDir - - timeout := time.After(5 * time.Minute) - var present bool - for { - select { - case <-timeout: - t.Error("timeout waiting for schema tracking error") - case <-time.After(1 * time.Second): - // check info logs, continue if the file could not be read correctly. - all, err := os.ReadFile(path.Join(logDir, "vtgate.WARNING")) - if err != nil { - continue - } - if strings.Contains(string(all), "Table ACL might be enabled, --schema_change_signal_user needs to be passed to VTGate for schema tracking to work. Check 'schema tracking' docs on vitess.io") { - present = true - } - } - if present { - break - } - } -} diff --git a/go/test/endtoend/vtgate/schematracker/unsharded/st_unsharded_test.go b/go/test/endtoend/vtgate/schematracker/unsharded/st_unsharded_test.go index 720ab124c12..1a37dfb5cf7 100644 --- a/go/test/endtoend/vtgate/schematracker/unsharded/st_unsharded_test.go +++ b/go/test/endtoend/vtgate/schematracker/unsharded/st_unsharded_test.go @@ -24,11 +24,11 @@ import ( "testing" "time" - "vitess.io/vitess/go/test/endtoend/utils" - "vitess.io/vitess/go/vt/sidecardb" - "github.com/stretchr/testify/require" + "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/test/endtoend/utils" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" ) @@ -68,7 +68,7 @@ func TestMain(m *testing.M) { // For upgrade/downgrade tests. if vtgateVer < 17 || vttabletVer < 17 { // Then only the default sidecarDBName is supported. - sidecarDBName = sidecardb.DefaultName + sidecarDBName = sidecar.DefaultName } // Start topo server @@ -83,7 +83,7 @@ func TestMain(m *testing.M) { SchemaSQL: sqlSchema, SidecarDBName: sidecarDBName, } - clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal", "--queryserver-config-schema-change-signal-interval", "0.1"} + clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal"} err = clusterInstance.StartUnshardedKeyspace(*keyspace, 0, false) if err != nil { return 1 diff --git a/go/test/endtoend/vtgate/sequence/seq_test.go b/go/test/endtoend/vtgate/sequence/seq_test.go index 918a463ca33..dd7542becc5 100644 --- a/go/test/endtoend/vtgate/sequence/seq_test.go +++ b/go/test/endtoend/vtgate/sequence/seq_test.go @@ -24,6 +24,7 @@ import ( "strings" "testing" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/test/endtoend/utils" "github.com/stretchr/testify/assert" @@ -289,8 +290,8 @@ func TestDotTableSeq(t *testing.T) { _, err = conn.ExecuteFetch("insert into `dotted.tablename` (c1,c2) values (10,10)", 1000, true) require.Error(t, err) - mysqlErr := err.(*mysql.SQLError) - assert.Equal(t, mysql.ERDupEntry, mysqlErr.Num) + mysqlErr := err.(*sqlerror.SQLError) + assert.Equal(t, sqlerror.ERDupEntry, mysqlErr.Num) assert.Equal(t, "23000", mysqlErr.State) assert.Contains(t, mysqlErr.Message, "Duplicate entry") } diff --git a/go/test/endtoend/vtgate/transaction/single/schema.sql b/go/test/endtoend/vtgate/transaction/single/schema.sql index 98bb23f2715..500f58200b9 100644 --- a/go/test/endtoend/vtgate/transaction/single/schema.sql +++ b/go/test/endtoend/vtgate/transaction/single/schema.sql @@ -17,22 +17,22 @@ CREATE TABLE `t1` ( `id` bigint(20) NOT NULL, `txn_id` varchar(50) DEFAULT NULL, PRIMARY KEY (`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci; +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; CREATE TABLE `t1_id_vdx` ( `id` bigint(20) NOT NULL, `keyspace_id` varbinary(50) NOT NULL, PRIMARY KEY (`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci; +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; CREATE TABLE `t2` ( `id` bigint(20) NOT NULL, `txn_id` varchar(50) DEFAULT NULL, PRIMARY KEY (`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci; +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; CREATE TABLE `t2_id_vdx` ( `id` bigint(20) NOT NULL, `keyspace_id` varbinary(50) NOT NULL, PRIMARY KEY (`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci; \ No newline at end of file +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; diff --git a/go/test/endtoend/vtgate/unsharded/main_test.go b/go/test/endtoend/vtgate/unsharded/main_test.go index 77325d8e309..377d439835b 100644 --- a/go/test/endtoend/vtgate/unsharded/main_test.go +++ b/go/test/endtoend/vtgate/unsharded/main_test.go @@ -131,7 +131,6 @@ BEGIN insert into allDefaults () values (); select * from allDefaults; delete from allDefaults; - set autocommit = 0; END; CREATE PROCEDURE in_parameter(IN val int) diff --git a/go/test/endtoend/vtgr/my.cnf b/go/test/endtoend/vtgr/my.cnf deleted file mode 100644 index 14185182e5a..00000000000 --- a/go/test/endtoend/vtgr/my.cnf +++ /dev/null @@ -1,41 +0,0 @@ -[mysqld] -innodb_log_file_size=4GB -innodb_flush_neighbors=0 -innodb_log_buffer_size=67108864 -innodb_buffer_pool_size=96GB -innodb_buffer_pool_instances=16 -innodb_io_capacity=100 - -log_error_verbosity=3 - -# binlog appliers -slave_parallel_type=LOGICAL_CLOCK -slave_preserve_commit_order=1 -binlog_transaction_dependency_tracking=WRITESET_SESSION -slave_parallel_workers=32 -sync_relay_log=0 -relay_log_recovery=1 - -plugin-load-add='mysql_clone.so' -plugin-load-add='group_replication.so' - -gtid_mode=ON -enforce_gtid_consistency=ON -log_slave_updates=ON -binlog_format=ROW - -# Group replication -loose_group_replication_start_on_boot=OFF -loose_group_replication_bootstrap_group=OFF -# use auto-rejoin instead of expel timeout so that we can remove the group member -# loose_group_replication_member_expel_timeout=0 -loose_group_replication_autorejoin_tries=3 -loose_group_replication_exit_state_action=OFFLINE_MODE -loose_group_replication_communication_debug_options='GCS_DEBUG_BASIC,XCOM_DEBUG_BASIC' -loose_group-replication-recovery-retry-count=3 -loose-group_replication_ssl_mode = REQUIRED -loose-group_replication_recovery_use_ssl = 1 -loose-group_replication_ip_whitelist = "0.0.0.0/0" - -# Set multi-primary mode -loose-group_replication_single_primary_mode = ON \ No newline at end of file diff --git a/go/test/endtoend/vtgr/test_config.json b/go/test/endtoend/vtgr/test_config.json deleted file mode 100644 index 03cf0e49701..00000000000 --- a/go/test/endtoend/vtgr/test_config.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "MySQLTopologyUser": "orc_client_user", - "MySQLTopologyPassword": "orc_client_user_password", - "MySQLReplicaUser": "vt_repl", - "MySQLReplicaPassword": "", - "InstancePollSeconds": 1, - "MySQLConnectTimeoutSeconds": 50, - "MySQLTopologyReadTimeoutSeconds": 50 -} diff --git a/go/test/endtoend/vtgr/vtgr_test.go b/go/test/endtoend/vtgr/vtgr_test.go deleted file mode 100644 index 64bc5ba655e..00000000000 --- a/go/test/endtoend/vtgr/vtgr_test.go +++ /dev/null @@ -1,366 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package vtgr - -import ( - "fmt" - "os" - "os/exec" - "path" - "strconv" - "strings" - "testing" - "time" - - "vitess.io/vitess/go/sqltypes" - - "github.com/stretchr/testify/require" - "gotest.tools/assert" - - "vitess.io/vitess/go/json2" - "vitess.io/vitess/go/test/endtoend/cluster" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" -) - -// To run this test locally on MacOS, set hostname to localhost first: -// $ sudo scutil --set HostName localhost - -func createCluster(t *testing.T, numReplicas int) *cluster.LocalProcessCluster { - keyspaceName := "ks" - shardName := "0" - keyspace := &cluster.Keyspace{Name: keyspaceName} - shard0 := &cluster.Shard{Name: shardName} - hostname := "localhost" - cell1 := "zone1" - tablets := []*cluster.Vttablet{} - clusterInstance := cluster.NewCluster(cell1, hostname) - - os.Setenv("EXTRA_MY_CNF", path.Join(os.Getenv("PWD"), "my.cnf")) - - // Start topo server - err := clusterInstance.StartTopo() - require.NoError(t, err) - - uidBase := 100 - for i := 0; i < numReplicas; i++ { - tablet := clusterInstance.NewVttabletInstance("replica", uidBase+i, cell1) - tablets = append(tablets, tablet) - } - - // Initialize Cluster - shard0.Vttablets = tablets - err = clusterInstance.SetupCluster(keyspace, []cluster.Shard{*shard0}) - require.NoError(t, err) - - // Start MySql - var mysqlCtlProcessList []*exec.Cmd - for _, tablet := range shard0.Vttablets { - proc, err := tablet.MysqlctlProcess.StartProcess() - require.NoError(t, err) - mysqlCtlProcessList = append(mysqlCtlProcessList, proc) - } - - // Wait for mysql processes to start - for _, proc := range mysqlCtlProcessList { - err := proc.Wait() - require.NoError(t, err) - } - for _, tablet := range shard0.Vttablets { - // Reset status, don't wait for the tablet status. We will check it later - tablet.VttabletProcess.ServingStatus = "" - tablet.VttabletProcess.DbFlavor = "MysqlGR" - // If we enable backup the GR setup is a bit wacky - tablet.VttabletProcess.SupportsBackup = false - // Start the tablet - err := tablet.VttabletProcess.Setup() - require.NoError(t, err) - } - - // Start vtgr - we deploy vtgr on the tablet node in the test - baseGrPort := 33061 - for i, tablet := range shard0.Vttablets { - tablet.VtgrProcess = clusterInstance.NewVtgrProcess( - []string{fmt.Sprintf("%s/%s", keyspaceName, shardName)}, - path.Join(os.Getenv("PWD"), "test_config.json"), - baseGrPort+i, - ) - } - - for _, tablet := range shard0.Vttablets { - err := tablet.VttabletProcess.WaitForTabletTypes([]string{"NOT_SERVING"}) - require.NoError(t, err) - } - return clusterInstance -} - -func killTablets(t *testing.T, shard *cluster.Shard) { - for _, tablet := range shard.Vttablets { - if tablet.VtgrProcess != nil { - err := tablet.VtgrProcess.TearDown() - require.NoError(t, err) - } - err := tablet.VttabletProcess.TearDown() - require.NoError(t, err) - } -} - -func TestBasicSetup(t *testing.T) { - defer cluster.PanicHandler(t) - clusterInstance := createCluster(t, 2) - keyspace := &clusterInstance.Keyspaces[0] - shard0 := &keyspace.Shards[0] - defer func() { - clusterInstance.Teardown() - killTablets(t, shard0) - }() - for _, tablet := range shard0.Vttablets { - // Until there is a primary, all tablets are replica and should all be NOT_SERVING status - tab := getTablet(t, clusterInstance, tablet.Alias) - assert.Equal(t, tab.Type.String(), "REPLICA") - assert.Equal(t, tablet.VttabletProcess.GetTabletStatus(), "NOT_SERVING") - } - _, err := getPrimaryTablet(t, clusterInstance, keyspace.Name, shard0.Name) - assert.ErrorContains(t, err, "timeout looking for primary tablet") - - tablet1 := shard0.Vttablets[0] - query := `select count(*) - from performance_schema.replication_group_members - where MEMBER_STATE='ONLINE'` - var count int - err = getSQLResult(t, tablet1, query, func(values []sqltypes.Value) bool { - cnt, err := values[0].ToInt64() - if err != nil { - return false - } - count = int(cnt) - return true - }) - require.NoError(t, err) - require.NoError(t, err) - // without vtgr, tablet process will not create a mysql group - // and all the nodes are replicas type in NOT_SERVING state - assert.Equal(t, 0, int(count)) -} - -func TestVTGRSetup(t *testing.T) { - defer cluster.PanicHandler(t) - clusterInstance := createCluster(t, 2) - keyspace := &clusterInstance.Keyspaces[0] - shard0 := &keyspace.Shards[0] - defer func() { - clusterInstance.Teardown() - killTablets(t, shard0) - }() - for _, tablet := range shard0.Vttablets { - // Until there is a primary, all tablets are replica and should all be NOT_SERVING status - tab := getTablet(t, clusterInstance, tablet.Alias) - assert.Equal(t, tab.Type.String(), "REPLICA") - assert.Equal(t, tablet.VttabletProcess.GetTabletStatus(), "NOT_SERVING") - } - - // start VTGR processes - for _, tablet := range shard0.Vttablets { - err := tablet.VtgrProcess.Start(tablet.Alias) - require.NoError(t, err) - } - - // VTGR will pick one tablet as the primary - primaryAlias, err := getPrimaryTablet(t, clusterInstance, keyspace.Name, shard0.Name) - require.NoError(t, err) - require.NotEqual(t, nil, primaryAlias) - - tablet1 := shard0.Vttablets[0] - query := `select count(*) - from performance_schema.replication_group_members - where MEMBER_STATE='ONLINE'` - err = getSQLResult(t, tablet1, query, func(values []sqltypes.Value) bool { - cnt, err := values[0].ToInt64() - if err != nil { - return false - } - // VTGR should bootstrap the group and put the replica into the group - return cnt == 2 - }) - require.NoError(t, err) -} - -func TestVTGRWrongPrimaryTablet(t *testing.T) { - defer cluster.PanicHandler(t) - clusterInstance := createCluster(t, 2) - keyspace := &clusterInstance.Keyspaces[0] - shard0 := &keyspace.Shards[0] - defer func() { - clusterInstance.Teardown() - killTablets(t, shard0) - }() - for _, tablet := range shard0.Vttablets { - // Until there is a primary, all tablets are replica and should all be NOT_SERVING status - tab := getTablet(t, clusterInstance, tablet.Alias) - assert.Equal(t, tab.Type.String(), "REPLICA") - assert.Equal(t, tablet.VttabletProcess.GetTabletStatus(), "NOT_SERVING") - } - // start VTGR processes - for _, tablet := range shard0.Vttablets { - err := tablet.VtgrProcess.Start(tablet.Alias) - require.NoError(t, err) - } - // VTGR will pick one tablet as the primary - primaryAlias, err := getPrimaryTablet(t, clusterInstance, keyspace.Name, shard0.Name) - require.NoError(t, err) - require.NotEqual(t, nil, primaryAlias) - tablet := shard0.Vttablets[0] - query := `select member_id - from performance_schema.replication_group_members - where member_role='SECONDARY' and member_state='ONLINE'` - var member string - err = getSQLResult(t, tablet, query, func(values []sqltypes.Value) bool { - member = values[0].ToString() - return true - }) - require.NoError(t, err) - query = fmt.Sprintf(`select group_replication_set_as_primary('%s')`, member) - _, err = tablet.VttabletProcess.QueryTabletWithDB(query, "") - require.NoError(t, err) - - // Verify the mysql primary changed, and also the primary tablet changed as well - query = fmt.Sprintf(`select member_role from performance_schema.replication_group_members where member_id='%s'`, member) - err = getSQLResult(t, tablet, query, func(values []sqltypes.Value) bool { - return values[0].ToString() == "PRIMARY" - }) - require.NoError(t, err) - err = verifyPrimaryChange(t, clusterInstance, keyspace.Name, shard0.Name, primaryAlias) - require.NoError(t, err) -} - -func TestVTGRFailover(t *testing.T) { - defer cluster.PanicHandler(t) - clusterInstance := createCluster(t, 3) - keyspace := &clusterInstance.Keyspaces[0] - shard0 := &keyspace.Shards[0] - defer func() { - clusterInstance.Teardown() - killTablets(t, shard0) - }() - for _, tablet := range shard0.Vttablets { - // Until there is a primary, all tablets are replica and should all be NOT_SERVING status - tab := getTablet(t, clusterInstance, tablet.Alias) - assert.Equal(t, tab.Type.String(), "REPLICA") - assert.Equal(t, tablet.VttabletProcess.GetTabletStatus(), "NOT_SERVING") - } - // start VTGR processes - for _, tablet := range shard0.Vttablets { - err := tablet.VtgrProcess.Start(tablet.Alias) - require.NoError(t, err) - } - primaryAlias, err := getPrimaryTablet(t, clusterInstance, keyspace.Name, shard0.Name) - require.NoError(t, err) - // VTGR has init the cluster - require.NotEqual(t, "", primaryAlias) - primaryTablet := findTabletByAlias(shard0.Vttablets, primaryAlias) - require.NotNil(t, primaryTablet) - // Wait until there are two nodes in the group - query := `select count(*) from - performance_schema.replication_group_members - where MEMBER_STATE='ONLINE'` - err = getSQLResult(t, primaryTablet, query, func(values []sqltypes.Value) bool { - return values[0].ToString() == "3" - }) - require.NoError(t, err) - - // Now kill the primary - // VTGR should move mysql primary to a different node and change failover primary tablet - err = primaryTablet.VttabletProcess.TearDown() - require.NoError(t, err) - err = verifyPrimaryChange(t, clusterInstance, keyspace.Name, shard0.Name, primaryAlias) - require.NoError(t, err) - // now the primary has changed - primaryAlias, err = getPrimaryTablet(t, clusterInstance, keyspace.Name, shard0.Name) - require.NoError(t, err) - // verify on the _new_ primary node, we are running the mysql primary as well - primaryTablet = findTabletByAlias(shard0.Vttablets, primaryAlias) - require.NotNil(t, primaryTablet) - query = `SELECT count(*) FROM - performance_schema.replication_group_members - WHERE MEMBER_STATE='ONLINE' AND MEMBER_ROLE='PRIMARY' AND MEMBER_PORT=@@port` - err = getSQLResult(t, primaryTablet, query, func(values []sqltypes.Value) bool { - return values[0].ToString() == "1" - }) - require.NoError(t, err) -} - -func getTablet(t *testing.T, cluster *cluster.LocalProcessCluster, alias string) *topodatapb.Tablet { - result, err := cluster.VtctlclientProcess.ExecuteCommandWithOutput("GetTablet", alias) - require.NoError(t, err) - var tabletInfo topodatapb.Tablet - err = json2.Unmarshal([]byte(result), &tabletInfo) - require.NoError(t, err) - return &tabletInfo -} - -func findTabletByAlias(tablets []*cluster.Vttablet, alias *topodatapb.TabletAlias) *cluster.Vttablet { - for _, tablet := range tablets { - if tablet.Cell == alias.Cell && strings.HasSuffix(tablet.Alias, strconv.Itoa(int(alias.Uid))) { - return tablet - } - } - return nil -} - -func verifyPrimaryChange(t *testing.T, cluster *cluster.LocalProcessCluster, ks, shard string, old *topodatapb.TabletAlias) error { - timeToWait := time.Now().Add(180 * time.Second) - for time.Now().Before(timeToWait) { - time.Sleep(1 * time.Second) - result, err := cluster.VtctlclientProcess.ExecuteCommandWithOutput("GetShard", fmt.Sprintf("%s/%s", ks, shard)) - require.NoError(t, err) - var shardInfo topodatapb.Shard - err = json2.Unmarshal([]byte(result), &shardInfo) - require.NoError(t, err) - if shardInfo.PrimaryAlias.String() != old.String() { - return nil - } - } - return fmt.Errorf("fail to verify primary change") -} - -func getPrimaryTablet(t *testing.T, cluster *cluster.LocalProcessCluster, ks, shard string) (*topodatapb.TabletAlias, error) { - timeToWait := time.Now().Add(180 * time.Second) - for time.Now().Before(timeToWait) { - time.Sleep(1 * time.Second) - result, err := cluster.VtctlclientProcess.ExecuteCommandWithOutput("GetShard", fmt.Sprintf("%s/%s", ks, shard)) - require.NoError(t, err) - var shardInfo topodatapb.Shard - err = json2.Unmarshal([]byte(result), &shardInfo) - require.NoError(t, err) - if shardInfo.PrimaryAlias != nil { - return shardInfo.PrimaryAlias, nil - } - } - return nil, fmt.Errorf("timeout looking for primary tablet") -} - -func getSQLResult(t *testing.T, tablet *cluster.Vttablet, query string, check func([]sqltypes.Value) bool) error { - timeToWait := time.Now().Add(180 * time.Second) - for time.Now().Before(timeToWait) { - time.Sleep(1 * time.Second) - qr, err := tablet.VttabletProcess.QueryTabletWithDB(query, "") - require.NoError(t, err) - if len(qr.Rows) == 1 && check(qr.Rows[0]) { - return nil - } - } - return fmt.Errorf("timeout waiting for sql result") -} diff --git a/go/test/endtoend/vtorc/api/api_test.go b/go/test/endtoend/vtorc/api/api_test.go index 43cc8ea4ccb..83b594ea8bd 100644 --- a/go/test/endtoend/vtorc/api/api_test.go +++ b/go/test/endtoend/vtorc/api/api_test.go @@ -17,7 +17,10 @@ limitations under the License. package api import ( + "encoding/json" "fmt" + "math" + "reflect" "testing" "time" @@ -90,7 +93,7 @@ func TestAPIEndpoints(t *testing.T) { // Before we disable recoveries, let us wait until VTOrc has fixed all the issues (if any). _, _ = utils.MakeAPICallRetry(t, vtorc, "/api/replication-analysis", func(_ int, response string) bool { - return response != "[]" + return response != "null" }) t.Run("Disable Recoveries API", func(t *testing.T) { @@ -110,29 +113,29 @@ func TestAPIEndpoints(t *testing.T) { // Wait until VTOrc picks up on this issue and verify // that we see a not null result on the api/replication-analysis page status, resp := utils.MakeAPICallRetry(t, vtorc, "/api/replication-analysis", func(_ int, response string) bool { - return response == "[]" + return response == "null" }) assert.Equal(t, 200, status, resp) - assert.Contains(t, resp, fmt.Sprintf(`"Port": %d`, replica.MySQLPort)) + assert.Contains(t, resp, fmt.Sprintf(`"AnalyzedInstanceAlias": "%s"`, replica.Alias)) assert.Contains(t, resp, `"Analysis": "ReplicationStopped"`) // Verify that filtering also works in the API as intended status, resp, err = utils.MakeAPICall(t, vtorc, "/api/replication-analysis?keyspace=ks&shard=0") require.NoError(t, err) assert.Equal(t, 200, status, resp) - assert.Contains(t, resp, fmt.Sprintf(`"Port": %d`, replica.MySQLPort)) + assert.Contains(t, resp, fmt.Sprintf(`"AnalyzedInstanceAlias": "%s"`, replica.Alias)) // Verify that filtering by keyspace also works in the API as intended status, resp, err = utils.MakeAPICall(t, vtorc, "/api/replication-analysis?keyspace=ks") require.NoError(t, err) assert.Equal(t, 200, status, resp) - assert.Contains(t, resp, fmt.Sprintf(`"Port": %d`, replica.MySQLPort)) + assert.Contains(t, resp, fmt.Sprintf(`"AnalyzedInstanceAlias": "%s"`, replica.Alias)) // Check that filtering using keyspace and shard works status, resp, err = utils.MakeAPICall(t, vtorc, "/api/replication-analysis?keyspace=ks&shard=80-") require.NoError(t, err) assert.Equal(t, 200, status, resp) - assert.Equal(t, "[]", resp) + assert.Equal(t, "null", resp) // Check that filtering using just the shard fails status, resp, err = utils.MakeAPICall(t, vtorc, "/api/replication-analysis?shard=0") @@ -197,5 +200,60 @@ func TestAPIEndpoints(t *testing.T) { require.NoError(t, err) assert.Equal(t, 400, status, resp) assert.Equal(t, "Filtering by shard without keyspace isn't supported\n", resp) + + // Also verify that we see the tablet in the errant GTIDs API call + status, resp, err = utils.MakeAPICall(t, vtorc, "/api/errant-gtids") + require.NoError(t, err) + assert.Equal(t, 200, status, resp) + assert.Contains(t, resp, fmt.Sprintf(`"InstanceAlias": "%v"`, replica.Alias)) + + // Check that filtering using keyspace and shard works + status, resp, err = utils.MakeAPICall(t, vtorc, "/api/errant-gtids?keyspace=ks&shard=0") + require.NoError(t, err) + assert.Equal(t, 200, status, resp) + assert.Contains(t, resp, fmt.Sprintf(`"InstanceAlias": "%v"`, replica.Alias)) + + // Check that filtering using keyspace works + status, resp, err = utils.MakeAPICall(t, vtorc, "/api/errant-gtids?keyspace=ks") + require.NoError(t, err) + assert.Equal(t, 200, status, resp) + assert.Contains(t, resp, fmt.Sprintf(`"InstanceAlias": "%v"`, replica.Alias)) + + // Check that filtering using keyspace and shard works + status, resp, err = utils.MakeAPICall(t, vtorc, "/api/errant-gtids?keyspace=ks&shard=80-") + require.NoError(t, err) + assert.Equal(t, 200, status, resp) + assert.Equal(t, "null", resp) + + // Check that filtering using just the shard fails + status, resp, err = utils.MakeAPICall(t, vtorc, "/api/errant-gtids?shard=0") + require.NoError(t, err) + assert.Equal(t, 400, status, resp) + assert.Equal(t, "Filtering by shard without keyspace isn't supported\n", resp) + + // Also verify that the metric for errant GTIDs is reporting the correct count. + waitForErrantGTIDCount(t, vtorc, 1) }) } + +func waitForErrantGTIDCount(t *testing.T, vtorc *cluster.VTOrcProcess, errantGTIDCountWanted int) { + timeout := time.After(15 * time.Second) + for { + select { + case <-timeout: + t.Fatalf("Timed out waiting for errant gtid count in the metrics to be %v", errantGTIDCountWanted) + return + default: + _, resp, err := utils.MakeAPICall(t, vtorc, "/debug/vars") + require.NoError(t, err) + resultMap := make(map[string]any) + err = json.Unmarshal([]byte(resp), &resultMap) + require.NoError(t, err) + errantGTIDTabletsCount := reflect.ValueOf(resultMap["ErrantGtidTabletCount"]) + if int(math.Round(errantGTIDTabletsCount.Float())) == errantGTIDCountWanted { + return + } + time.Sleep(100 * time.Millisecond) + } + } +} diff --git a/go/test/endtoend/vtorc/general/vtorc_test.go b/go/test/endtoend/vtorc/general/vtorc_test.go index aea199bba7f..adce77d38b4 100644 --- a/go/test/endtoend/vtorc/general/vtorc_test.go +++ b/go/test/endtoend/vtorc/general/vtorc_test.go @@ -28,6 +28,7 @@ import ( "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/vtorc/utils" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/vtorc/inst" "vitess.io/vitess/go/vt/vtorc/logic" ) @@ -76,6 +77,7 @@ func TestSingleKeyspace(t *testing.T) { utils.CheckPrimaryTablet(t, clusterInfo, shard0.Vttablets[0], true) utils.CheckReplication(t, clusterInfo, shard0.Vttablets[0], shard0.Vttablets[1:], 10*time.Second) utils.WaitForSuccessfulRecoveryCount(t, clusterInfo.ClusterInstance.VTOrcProcesses[0], logic.ElectNewPrimaryRecoveryName, 1) + utils.WaitForSuccessfulPRSCount(t, clusterInfo.ClusterInstance.VTOrcProcesses[0], keyspace.Name, shard0.Name, 1) } // Cases to test: @@ -94,6 +96,7 @@ func TestKeyspaceShard(t *testing.T) { utils.CheckPrimaryTablet(t, clusterInfo, shard0.Vttablets[0], true) utils.CheckReplication(t, clusterInfo, shard0.Vttablets[0], shard0.Vttablets[1:], 10*time.Second) utils.WaitForSuccessfulRecoveryCount(t, clusterInfo.ClusterInstance.VTOrcProcesses[0], logic.ElectNewPrimaryRecoveryName, 1) + utils.WaitForSuccessfulPRSCount(t, clusterInfo.ClusterInstance.VTOrcProcesses[0], keyspace.Name, shard0.Name, 1) } // Cases to test: @@ -102,10 +105,11 @@ func TestKeyspaceShard(t *testing.T) { // 3. stop replication, let vtorc repair // 4. setup replication from non-primary, let vtorc repair // 5. make instance A replicates from B and B from A, wait for repair +// 6. disable recoveries and make sure the detected problems are set correctly. func TestVTOrcRepairs(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) defer cluster.PanicHandler(t) - utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 3, 0, nil, cluster.VTOrcConfiguration{ + utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 3, 0, []string{"--change-tablets-with-errant-gtid-to-drained"}, cluster.VTOrcConfiguration{ PreventCrossDataCenterPrimaryFailover: true, }, 1, "") keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] @@ -116,6 +120,7 @@ func TestVTOrcRepairs(t *testing.T) { assert.NotNil(t, curPrimary, "should have elected a primary") vtOrcProcess := clusterInfo.ClusterInstance.VTOrcProcesses[0] utils.WaitForSuccessfulRecoveryCount(t, vtOrcProcess, logic.ElectNewPrimaryRecoveryName, 1) + utils.WaitForSuccessfulPRSCount(t, vtOrcProcess, keyspace.Name, shard0.Name, 1) var replica, otherReplica *cluster.Vttablet for _, tablet := range shard0.Vttablets { @@ -215,6 +220,48 @@ func TestVTOrcRepairs(t *testing.T) { // check that the writes still succeed utils.VerifyWritesSucceed(t, clusterInfo, curPrimary, []*cluster.Vttablet{replica, otherReplica}, 10*time.Second) }) + + t.Run("Errant GTID Detected", func(t *testing.T) { + // insert an errant GTID in the replica + _, err := utils.RunSQL(t, "insert into vt_insert_test(id, msg) values (10173, 'test 178342')", replica, "vt_ks") + require.NoError(t, err) + // When VTOrc detects errant GTIDs, it should change the tablet to a drained type. + utils.WaitForTabletType(t, replica, "drained") + }) + + t.Run("Sets DetectedProblems metric correctly", func(t *testing.T) { + // Since we're using a boolean metric here, disable recoveries for now. + status, _, err := utils.MakeAPICall(t, vtOrcProcess, "/api/disable-global-recoveries") + require.NoError(t, err) + require.Equal(t, 200, status) + + // Make the current primary database read-only. + _, err = utils.RunSQL(t, "set global read_only=ON", curPrimary, "") + require.NoError(t, err) + + // Wait for problems to be set. + utils.WaitForDetectedProblems(t, vtOrcProcess, + string(inst.PrimaryIsReadOnly), + curPrimary.Alias, + keyspace.Name, + shard0.Name, + 1, + ) + + // Enable recoveries. + status, _, err = utils.MakeAPICall(t, vtOrcProcess, "/api/enable-global-recoveries") + require.NoError(t, err) + assert.Equal(t, 200, status) + + // wait for detected problem to be cleared. + utils.WaitForDetectedProblems(t, vtOrcProcess, + string(inst.PrimaryIsReadOnly), + curPrimary.Alias, + keyspace.Name, + shard0.Name, + 0, + ) + }) } func TestRepairAfterTER(t *testing.T) { @@ -335,6 +382,7 @@ func TestVTOrcWithPrs(t *testing.T) { assert.NotNil(t, curPrimary, "should have elected a primary") vtOrcProcess := clusterInfo.ClusterInstance.VTOrcProcesses[0] utils.WaitForSuccessfulRecoveryCount(t, vtOrcProcess, logic.ElectNewPrimaryRecoveryName, 1) + utils.WaitForSuccessfulPRSCount(t, vtOrcProcess, keyspace.Name, shard0.Name, 1) // find any replica tablet other than the current primary var replica *cluster.Vttablet @@ -362,7 +410,9 @@ func TestVTOrcWithPrs(t *testing.T) { utils.CheckPrimaryTablet(t, clusterInfo, replica, true) // Verify that VTOrc didn't run any other recovery utils.WaitForSuccessfulRecoveryCount(t, vtOrcProcess, logic.ElectNewPrimaryRecoveryName, 1) + utils.WaitForSuccessfulPRSCount(t, vtOrcProcess, keyspace.Name, shard0.Name, 1) utils.WaitForSuccessfulRecoveryCount(t, vtOrcProcess, logic.RecoverDeadPrimaryRecoveryName, 0) + utils.WaitForSuccessfulERSCount(t, vtOrcProcess, keyspace.Name, shard0.Name, 0) utils.WaitForSuccessfulRecoveryCount(t, vtOrcProcess, logic.FixPrimaryRecoveryName, 0) utils.WaitForSuccessfulRecoveryCount(t, vtOrcProcess, logic.FixReplicaRecoveryName, 0) utils.WaitForSuccessfulRecoveryCount(t, vtOrcProcess, logic.RecoverPrimaryHasPrimaryRecoveryName, 0) @@ -430,7 +480,7 @@ func TestDurabilityPolicySetLater(t *testing.T) { time.Sleep(30 * time.Second) // Now set the correct durability policy - out, err := newCluster.VtctldClientProcess.ExecuteCommandWithOutput("SetKeyspaceDurabilityPolicy", keyspace.Name, "--durability-policy=semi_sync") + out, err := newCluster.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("SetKeyspaceDurabilityPolicy", keyspace.Name, "--durability-policy=semi_sync") require.NoError(t, err, out) // VTOrc should promote a new primary after seeing the durability policy change diff --git a/go/test/endtoend/vtorc/primaryfailure/main_test.go b/go/test/endtoend/vtorc/primaryfailure/main_test.go index 7d9c57b6b22..a3e50bd0cc9 100644 --- a/go/test/endtoend/vtorc/primaryfailure/main_test.go +++ b/go/test/endtoend/vtorc/primaryfailure/main_test.go @@ -32,7 +32,7 @@ func TestMain(m *testing.M) { var cellInfos []*utils.CellInfo cellInfos = append(cellInfos, &utils.CellInfo{ CellName: utils.Cell1, - NumReplicas: 12, + NumReplicas: 13, NumRdonly: 3, UIDBase: 100, }) diff --git a/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go b/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go index 4b5f0dda046..90c70526ba5 100644 --- a/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go +++ b/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go @@ -53,6 +53,7 @@ func TestDownPrimary(t *testing.T) { assert.NotNil(t, curPrimary, "should have elected a primary") vtOrcProcess := clusterInfo.ClusterInstance.VTOrcProcesses[0] utils.WaitForSuccessfulRecoveryCount(t, vtOrcProcess, logic.ElectNewPrimaryRecoveryName, 1) + utils.WaitForSuccessfulPRSCount(t, vtOrcProcess, keyspace.Name, shard0.Name, 1) // find the replica and rdonly tablets var replica, rdonly *cluster.Vttablet @@ -99,6 +100,128 @@ func TestDownPrimary(t *testing.T) { // also check that the replication is working correctly after failover utils.VerifyWritesSucceed(t, clusterInfo, replica, []*cluster.Vttablet{crossCellReplica}, 10*time.Second) utils.WaitForSuccessfulRecoveryCount(t, vtOrcProcess, logic.RecoverDeadPrimaryRecoveryName, 1) + utils.WaitForSuccessfulERSCount(t, vtOrcProcess, keyspace.Name, shard0.Name, 1) +} + +// bring down primary before VTOrc has started, let vtorc repair. +func TestDownPrimaryBeforeVTOrc(t *testing.T) { + defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) + defer cluster.PanicHandler(t) + utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{}, 0, "none") + keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] + shard0 := &keyspace.Shards[0] + curPrimary := shard0.Vttablets[0] + + // Promote the first tablet as the primary + err := clusterInfo.ClusterInstance.VtctlclientProcess.InitializeShard(keyspace.Name, shard0.Name, clusterInfo.ClusterInstance.Cell, curPrimary.TabletUID) + require.NoError(t, err) + + // find the replica and rdonly tablets + var replica, rdonly *cluster.Vttablet + for _, tablet := range shard0.Vttablets { + // we know we have only two replcia tablets, so the one not the primary must be the other replica + if tablet.Alias != curPrimary.Alias && tablet.Type == "replica" { + replica = tablet + } + if tablet.Type == "rdonly" { + rdonly = tablet + } + } + assert.NotNil(t, replica, "could not find replica tablet") + assert.NotNil(t, rdonly, "could not find rdonly tablet") + + // check that the replication is setup correctly before we failover + utils.CheckReplication(t, clusterInfo, curPrimary, []*cluster.Vttablet{rdonly, replica}, 10*time.Second) + + // Make the current primary vttablet unavailable. + _ = curPrimary.VttabletProcess.TearDown() + err = curPrimary.MysqlctlProcess.Stop() + require.NoError(t, err) + + // Start a VTOrc instance + utils.StartVTOrcs(t, clusterInfo, []string{"--remote_operation_timeout=10s"}, cluster.VTOrcConfiguration{ + PreventCrossDataCenterPrimaryFailover: true, + }, 1) + + vtOrcProcess := clusterInfo.ClusterInstance.VTOrcProcesses[0] + + defer func() { + // we remove the tablet from our global list + utils.PermanentlyRemoveVttablet(clusterInfo, curPrimary) + }() + + // check that the replica gets promoted + utils.CheckPrimaryTablet(t, clusterInfo, replica, true) + + // also check that the replication is working correctly after failover + utils.VerifyWritesSucceed(t, clusterInfo, replica, []*cluster.Vttablet{rdonly}, 10*time.Second) + utils.WaitForSuccessfulRecoveryCount(t, vtOrcProcess, logic.RecoverDeadPrimaryRecoveryName, 1) + utils.WaitForSuccessfulERSCount(t, vtOrcProcess, keyspace.Name, shard0.Name, 1) +} + +// delete the primary record and let vtorc repair. +func TestDeletedPrimaryTablet(t *testing.T) { + defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) + defer cluster.PanicHandler(t) + utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, []string{"--remote_operation_timeout=10s"}, cluster.VTOrcConfiguration{}, 1, "none") + keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] + shard0 := &keyspace.Shards[0] + // find primary from topo + curPrimary := utils.ShardPrimaryTablet(t, clusterInfo, keyspace, shard0) + assert.NotNil(t, curPrimary, "should have elected a primary") + vtOrcProcess := clusterInfo.ClusterInstance.VTOrcProcesses[0] + utils.WaitForSuccessfulRecoveryCount(t, vtOrcProcess, logic.ElectNewPrimaryRecoveryName, 1) + utils.WaitForSuccessfulPRSCount(t, vtOrcProcess, keyspace.Name, shard0.Name, 1) + + // find the replica and rdonly tablets + var replica, rdonly *cluster.Vttablet + for _, tablet := range shard0.Vttablets { + // we know we have only two replcia tablets, so the one not the primary must be the other replica + if tablet.Alias != curPrimary.Alias && tablet.Type == "replica" { + replica = tablet + } + if tablet.Type == "rdonly" { + rdonly = tablet + } + } + assert.NotNil(t, replica, "could not find replica tablet") + assert.NotNil(t, rdonly, "could not find rdonly tablet") + + // check that the replication is setup correctly before we failover + utils.CheckReplication(t, clusterInfo, curPrimary, []*cluster.Vttablet{replica, rdonly}, 10*time.Second) + + // Disable VTOrc recoveries + vtOrcProcess.DisableGlobalRecoveries(t) + // use vtctlclient to stop replication on the replica + _, err := clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("StopReplication", replica.Alias) + require.NoError(t, err) + // insert a write that is not available on the replica. + utils.VerifyWritesSucceed(t, clusterInfo, curPrimary, []*cluster.Vttablet{rdonly}, 10*time.Second) + + // Make the current primary vttablet unavailable and delete its tablet record. + _ = curPrimary.VttabletProcess.TearDown() + err = curPrimary.MysqlctlProcess.Stop() + require.NoError(t, err) + // use vtctlclient to start replication on the replica back + _, err = clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("StartReplication", replica.Alias) + require.NoError(t, err) + err = clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommand("DeleteTablets", "--allow-primary", curPrimary.Alias) + require.NoError(t, err) + // Enable VTOrc recoveries now + vtOrcProcess.EnableGlobalRecoveries(t) + + defer func() { + // we remove the tablet from our global list + utils.PermanentlyRemoveVttablet(clusterInfo, curPrimary) + }() + + // check that the replica gets promoted. Also verify that it has all the writes. + utils.CheckPrimaryTablet(t, clusterInfo, replica, true) + utils.CheckTabletUptoDate(t, clusterInfo, replica) + + // also check that the replication is working correctly after failover + utils.VerifyWritesSucceed(t, clusterInfo, replica, []*cluster.Vttablet{rdonly}, 10*time.Second) + utils.WaitForSuccessfulRecoveryCount(t, vtOrcProcess, logic.RecoverPrimaryTabletDeletedRecoveryName, 1) } // TestDeadPrimaryRecoversImmediately test Vtorc ability to recover immediately if primary is dead. @@ -120,6 +243,7 @@ func TestDeadPrimaryRecoversImmediately(t *testing.T) { assert.NotNil(t, curPrimary, "should have elected a primary") vtOrcProcess := clusterInfo.ClusterInstance.VTOrcProcesses[0] utils.WaitForSuccessfulRecoveryCount(t, vtOrcProcess, logic.ElectNewPrimaryRecoveryName, 1) + utils.WaitForSuccessfulPRSCount(t, vtOrcProcess, keyspace.Name, shard0.Name, 1) // find the replica and rdonly tablets var replica, rdonly *cluster.Vttablet @@ -156,13 +280,14 @@ func TestDeadPrimaryRecoversImmediately(t *testing.T) { // also check that the replication is working correctly after failover utils.VerifyWritesSucceed(t, clusterInfo, replica, []*cluster.Vttablet{crossCellReplica}, 10*time.Second) utils.WaitForSuccessfulRecoveryCount(t, vtOrcProcess, logic.RecoverDeadPrimaryRecoveryName, 1) + utils.WaitForSuccessfulERSCount(t, vtOrcProcess, keyspace.Name, shard0.Name, 1) // Parse log file and find out how much time it took for DeadPrimary to recover. logFile := path.Join(vtOrcProcess.LogDir, vtOrcProcess.LogFileName) // log prefix printed at the end of analysis where we conclude we have DeadPrimary - t1 := extractTimeFromLog(t, logFile, "Proceeding with DeadPrimary recovery validation after acquiring shard lock") + t1 := extractTimeFromLog(t, logFile, "Proceeding with DeadPrimary recovery") // log prefix printed at the end of recovery - t2 := extractTimeFromLog(t, logFile, "auditType:recover-dead-primary") + t2 := extractTimeFromLog(t, logFile, "auditType:RecoverDeadPrimary") curr := time.Now().Format("2006-01-02") timeLayout := "2006-01-02 15:04:05.000000" timeStr1 := fmt.Sprintf("%s %s", curr, t1) diff --git a/go/test/endtoend/vtorc/readtopologyinstance/main_test.go b/go/test/endtoend/vtorc/readtopologyinstance/main_test.go index ec8206ab603..e3b55d64c6b 100644 --- a/go/test/endtoend/vtorc/readtopologyinstance/main_test.go +++ b/go/test/endtoend/vtorc/readtopologyinstance/main_test.go @@ -72,12 +72,11 @@ func TestReadTopologyInstanceBufferable(t *testing.T) { } } - primaryInstance, err := inst.ReadTopologyInstanceBufferable(&inst.InstanceKey{ - Hostname: utils.Hostname, - Port: primary.MySQLPort, - }, nil) + primaryInstance, err := inst.ReadTopologyInstanceBufferable(primary.Alias, nil) require.NoError(t, err) require.NotNil(t, primaryInstance) + assert.Equal(t, utils.Hostname, primaryInstance.Hostname) + assert.Equal(t, primary.MySQLPort, primaryInstance.Port) assert.Contains(t, primaryInstance.InstanceAlias, "zone1") assert.NotEqual(t, 0, primaryInstance.ServerID) assert.Greater(t, len(primaryInstance.ServerUUID), 10) @@ -121,12 +120,11 @@ func TestReadTopologyInstanceBufferable(t *testing.T) { err = logic.EnableRecovery() require.NoError(t, err) - replicaInstance, err := inst.ReadTopologyInstanceBufferable(&inst.InstanceKey{ - Hostname: utils.Hostname, - Port: replica.MySQLPort, - }, nil) + replicaInstance, err := inst.ReadTopologyInstanceBufferable(replica.Alias, nil) require.NoError(t, err) require.NotNil(t, replicaInstance) + assert.Equal(t, utils.Hostname, replicaInstance.Hostname) + assert.Equal(t, replica.MySQLPort, replicaInstance.Port) assert.Contains(t, replicaInstance.InstanceAlias, "zone1") assert.NotEqual(t, 0, replicaInstance.ServerID) assert.Greater(t, len(replicaInstance.ServerUUID), 10) @@ -138,6 +136,8 @@ func TestReadTopologyInstanceBufferable(t *testing.T) { assert.Equal(t, "ROW", replicaInstance.BinlogFormat) assert.Equal(t, "ON", replicaInstance.GTIDMode) assert.Equal(t, "FULL", replicaInstance.BinlogRowImage) + assert.Equal(t, utils.Hostname, replicaInstance.SourceHost) + assert.Equal(t, primary.MySQLPort, replicaInstance.SourcePort) assert.Contains(t, replicaInstance.SelfBinlogCoordinates.LogFile, fmt.Sprintf("vt-0000000%d-bin", replica.TabletUID)) assert.Greater(t, replicaInstance.SelfBinlogCoordinates.LogPos, uint32(0)) assert.False(t, replicaInstance.SemiSyncPrimaryEnabled) diff --git a/go/test/endtoend/vtorc/utils/utils.go b/go/test/endtoend/vtorc/utils/utils.go index 8f9bcd41d8a..0a8a5c6fb2e 100644 --- a/go/test/endtoend/vtorc/utils/utils.go +++ b/go/test/endtoend/vtorc/utils/utils.go @@ -20,9 +20,11 @@ import ( "context" "encoding/json" "fmt" + "math" "os" "os/exec" "path" + "reflect" "strconv" "strings" "testing" @@ -43,7 +45,6 @@ import ( // Register topo implementations. _ "vitess.io/vitess/go/vt/topo/consultopo" _ "vitess.io/vitess/go/vt/topo/etcd2topo" - _ "vitess.io/vitess/go/vt/topo/k8stopo" _ "vitess.io/vitess/go/vt/topo/zk2topo" ) @@ -68,11 +69,10 @@ type CellInfo struct { // VTOrcClusterInfo stores the information for a cluster. This is supposed to be used only for VTOrc tests. type VTOrcClusterInfo struct { - ClusterInstance *cluster.LocalProcessCluster - Ts *topo.Server - CellInfos []*CellInfo - VtctldClientProcess *cluster.VtctldClientProcess - lastUsedValue int + ClusterInstance *cluster.LocalProcessCluster + Ts *topo.Server + CellInfos []*CellInfo + lastUsedValue int } // CreateClusterAndStartTopo starts the cluster and topology service @@ -101,17 +101,13 @@ func CreateClusterAndStartTopo(cellInfos []*CellInfo) (*VTOrcClusterInfo, error) return nil, err } - // store the vtctldclient process - vtctldClientProcess := cluster.VtctldClientProcessInstance("localhost", clusterInstance.VtctldProcess.GrpcPort, clusterInstance.TmpDirectory) - // create topo server connection ts, err := topo.OpenServer(*clusterInstance.TopoFlavorString(), clusterInstance.VtctlProcess.TopoGlobalAddress, clusterInstance.VtctlProcess.TopoGlobalRoot) return &VTOrcClusterInfo{ - ClusterInstance: clusterInstance, - Ts: ts, - CellInfos: cellInfos, - lastUsedValue: 100, - VtctldClientProcess: vtctldClientProcess, + ClusterInstance: clusterInstance, + Ts: ts, + CellInfos: cellInfos, + lastUsedValue: 100, }, err } @@ -209,10 +205,8 @@ func shutdownVttablets(clusterInfo *VTOrcClusterInfo) error { } // Remove the tablet record for this tablet } - err = clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", vttablet.Alias) - if err != nil { - return err - } + // Ignoring error here because some tests delete tablets themselves. + _ = clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", vttablet.Alias) } clusterInfo.ClusterInstance.Keyspaces[0].Shards[0].Vttablets = nil return nil @@ -308,8 +302,15 @@ func SetupVttabletsAndVTOrcs(t *testing.T, clusterInfo *VTOrcClusterInfo, numRep if durability == "" { durability = "none" } - out, err := clusterInfo.VtctldClientProcess.ExecuteCommandWithOutput("SetKeyspaceDurabilityPolicy", keyspaceName, fmt.Sprintf("--durability-policy=%s", durability)) + out, err := clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("SetKeyspaceDurabilityPolicy", keyspaceName, fmt.Sprintf("--durability-policy=%s", durability)) require.NoError(t, err, out) + // VTOrc now uses shard record too, so we need to clear that as well for correct testing. + _, err = clusterInfo.Ts.UpdateShardFields(context.Background(), keyspaceName, shardName, func(info *topo.ShardInfo) error { + info.PrimaryTermStartTime = nil + info.PrimaryAlias = nil + return nil + }) + require.NoError(t, err) // start vtorc StartVTOrcs(t, clusterInfo, orcExtraArgs, config, vtorcCount) @@ -436,8 +437,8 @@ func CheckReplication(t *testing.T, clusterInfo *VTOrcClusterInfo, primary *clus time.Sleep(100 * time.Millisecond) break } - confirmReplication(t, primary, replicas, time.Until(endTime), clusterInfo.lastUsedValue) clusterInfo.lastUsedValue++ + confirmReplication(t, primary, replicas, time.Until(endTime), clusterInfo.lastUsedValue) validateTopology(t, clusterInfo, true, time.Until(endTime)) return } @@ -448,8 +449,8 @@ func CheckReplication(t *testing.T, clusterInfo *VTOrcClusterInfo, primary *clus // Call this function only after CheckReplication has been executed once, since that function creates the table that this function uses. func VerifyWritesSucceed(t *testing.T, clusterInfo *VTOrcClusterInfo, primary *cluster.Vttablet, replicas []*cluster.Vttablet, timeToWait time.Duration) { t.Helper() - confirmReplication(t, primary, replicas, timeToWait, clusterInfo.lastUsedValue) clusterInfo.lastUsedValue++ + confirmReplication(t, primary, replicas, timeToWait, clusterInfo.lastUsedValue) } func confirmReplication(t *testing.T, primary *cluster.Vttablet, replicas []*cluster.Vttablet, timeToWait time.Duration, valueToInsert int) { @@ -484,6 +485,12 @@ func confirmReplication(t *testing.T, primary *cluster.Vttablet, replicas []*clu } } +// CheckTabletUptoDate verifies that the tablet has all the writes so far +func CheckTabletUptoDate(t *testing.T, clusterInfo *VTOrcClusterInfo, tablet *cluster.Vttablet) { + err := checkInsertedValues(t, tablet, clusterInfo.lastUsedValue) + require.NoError(t, err) +} + func checkInsertedValues(t *testing.T, tablet *cluster.Vttablet, index int) error { selectSQL := fmt.Sprintf("select msg from ks.vt_insert_test where id=%d", index) qr, err := RunSQL(t, selectSQL, tablet, "") @@ -830,20 +837,17 @@ func SetupNewClusterSemiSync(t *testing.T) *VTOrcClusterInfo { require.NoError(t, err) } - vtctldClientProcess := cluster.VtctldClientProcessInstance("localhost", clusterInstance.VtctldProcess.GrpcPort, clusterInstance.TmpDirectory) - - out, err := vtctldClientProcess.ExecuteCommandWithOutput("SetKeyspaceDurabilityPolicy", keyspaceName, "--durability-policy=semi_sync") + out, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("SetKeyspaceDurabilityPolicy", keyspaceName, "--durability-policy=semi_sync") require.NoError(t, err, out) // create topo server connection ts, err := topo.OpenServer(*clusterInstance.TopoFlavorString(), clusterInstance.VtctlProcess.TopoGlobalAddress, clusterInstance.VtctlProcess.TopoGlobalRoot) require.NoError(t, err) clusterInfo := &VTOrcClusterInfo{ - ClusterInstance: clusterInstance, - Ts: ts, - CellInfos: nil, - lastUsedValue: 100, - VtctldClientProcess: vtctldClientProcess, + ClusterInstance: clusterInstance, + Ts: ts, + CellInfos: nil, + lastUsedValue: 100, } return clusterInfo } @@ -955,7 +959,7 @@ func WaitForSuccessfulRecoveryCount(t *testing.T, vtorcInstance *cluster.VTOrcPr for time.Since(startTime) < timeout { vars := vtorcInstance.GetVars() successfulRecoveriesMap := vars["SuccessfulRecoveries"].(map[string]interface{}) - successCount := successfulRecoveriesMap[recoveryName] + successCount := getIntFromValue(successfulRecoveriesMap[recoveryName]) if successCount == countExpected { return } @@ -963,10 +967,107 @@ func WaitForSuccessfulRecoveryCount(t *testing.T, vtorcInstance *cluster.VTOrcPr } vars := vtorcInstance.GetVars() successfulRecoveriesMap := vars["SuccessfulRecoveries"].(map[string]interface{}) - successCount := successfulRecoveriesMap[recoveryName] + successCount := getIntFromValue(successfulRecoveriesMap[recoveryName]) + assert.EqualValues(t, countExpected, successCount) +} + +// WaitForSuccessfulPRSCount waits until the given keyspace-shard's count of successful prs runs matches the count expected. +func WaitForSuccessfulPRSCount(t *testing.T, vtorcInstance *cluster.VTOrcProcess, keyspace, shard string, countExpected int) { + t.Helper() + timeout := 15 * time.Second + startTime := time.Now() + mapKey := fmt.Sprintf("%v.%v.success", keyspace, shard) + for time.Since(startTime) < timeout { + vars := vtorcInstance.GetVars() + prsCountsMap := vars["planned_reparent_counts"].(map[string]interface{}) + successCount := getIntFromValue(prsCountsMap[mapKey]) + if successCount == countExpected { + return + } + time.Sleep(time.Second) + } + vars := vtorcInstance.GetVars() + prsCountsMap := vars["planned_reparent_counts"].(map[string]interface{}) + successCount := getIntFromValue(prsCountsMap[mapKey]) + assert.EqualValues(t, countExpected, successCount) +} + +// WaitForSuccessfulERSCount waits until the given keyspace-shard's count of successful ers runs matches the count expected. +func WaitForSuccessfulERSCount(t *testing.T, vtorcInstance *cluster.VTOrcProcess, keyspace, shard string, countExpected int) { + t.Helper() + timeout := 15 * time.Second + startTime := time.Now() + mapKey := fmt.Sprintf("%v.%v.success", keyspace, shard) + for time.Since(startTime) < timeout { + vars := vtorcInstance.GetVars() + ersCountsMap := vars["emergency_reparent_counts"].(map[string]interface{}) + successCount := getIntFromValue(ersCountsMap[mapKey]) + if successCount == countExpected { + return + } + time.Sleep(time.Second) + } + vars := vtorcInstance.GetVars() + ersCountsMap := vars["emergency_reparent_counts"].(map[string]interface{}) + successCount := getIntFromValue(ersCountsMap[mapKey]) assert.EqualValues(t, countExpected, successCount) } +// getIntFromValue is a helper function to get an integer from the given value. +// If it is convertible to a float, then we round the number to the nearest integer. +// If the value is not numeric at all, we return 0. +func getIntFromValue(val any) int { + value := reflect.ValueOf(val) + if value.CanFloat() { + return int(math.Round(value.Float())) + } + if value.CanInt() { + return int(value.Int()) + } + return 0 +} + +// WaitForDetectedProblems waits until the given analysis code, alias, keyspace and shard count matches the count expected. +func WaitForDetectedProblems(t *testing.T, vtorcInstance *cluster.VTOrcProcess, code, alias, ks, shard string, expect int) { + t.Helper() + key := strings.Join([]string{code, alias, ks, shard}, ".") + timeout := 15 * time.Second + startTime := time.Now() + + for time.Since(startTime) < timeout { + vars := vtorcInstance.GetVars() + problems := vars["DetectedProblems"].(map[string]interface{}) + actual := getIntFromValue(problems[key]) + if actual == expect { + return + } + time.Sleep(time.Second) + } + + vars := vtorcInstance.GetVars() + problems := vars["DetectedProblems"].(map[string]interface{}) + actual, ok := problems[key] + actual = getIntFromValue(actual) + + assert.True(t, ok, + "The metric DetectedProblems[%s] should exist but does not (all problems: %+v)", + key, problems, + ) + + assert.EqualValues(t, expect, actual, + "The metric DetectedProblems[%s] should be %v but is %v (all problems: %+v)", + key, expect, actual, + problems, + ) +} + +// WaitForTabletType waits for the tablet to reach a certain type. +func WaitForTabletType(t *testing.T, tablet *cluster.Vttablet, expectedTabletType string) { + t.Helper() + err := tablet.VttabletProcess.WaitForTabletTypes([]string{expectedTabletType}) + require.NoError(t, err) +} + // WaitForInstancePollSecondsExceededCount waits for 30 seconds and then queries api/aggregated-discovery-metrics. // It expects to find minimum occurrence or exact count of `keyName` provided. func WaitForInstancePollSecondsExceededCount(t *testing.T, vtorcInstance *cluster.VTOrcProcess, keyName string, minCountExpected float64, enforceEquality bool) { diff --git a/go/test/fuzzing/tablet_manager_fuzzer.go b/go/test/fuzzing/tablet_manager_fuzzer.go index 316cf75fb82..4c61afa64bc 100644 --- a/go/test/fuzzing/tablet_manager_fuzzer.go +++ b/go/test/fuzzing/tablet_manager_fuzzer.go @@ -41,6 +41,7 @@ func FuzzTabletManagerExecuteFetchAsDba(data []byte) int { ctx := context.Background() cp := mysql.ConnParams{} db := fakesqldb.New(t) + defer db.Close() db.AddQueryPattern(".*", &sqltypes.Result{}) daemon := mysqlctl.NewFakeMysqlDaemon(db) diff --git a/go/test/fuzzing/vtctl_fuzzer.go b/go/test/fuzzing/vtctl_fuzzer.go index aed11774cc8..82fdaa572de 100644 --- a/go/test/fuzzing/vtctl_fuzzer.go +++ b/go/test/fuzzing/vtctl_fuzzer.go @@ -189,6 +189,6 @@ func Fuzz(data []byte) int { } func createTopo(ctx context.Context) (*topo.Server, error) { - ts := memorytopo.NewServer("zone1", "zone2", "zone3") + ts := memorytopo.NewServer(ctx, "zone1", "zone2", "zone3") return ts, nil } diff --git a/go/test/utils/noleak.go b/go/test/utils/noleak.go new file mode 100644 index 00000000000..31d454ec789 --- /dev/null +++ b/go/test/utils/noleak.go @@ -0,0 +1,96 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "context" + "testing" + "time" + + "go.uber.org/goleak" +) + +// LeakCheckContext returns a Context that will be automatically cancelled at the end +// of this test. If the test has finished successfully, it will be checked for goroutine +// leaks after context cancellation. +func LeakCheckContext(t testing.TB) context.Context { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(func() { + cancel() + EnsureNoLeaks(t) + }) + return ctx +} + +// LeakCheckContextTimeout behaves like LeakCheckContext but the returned Context will +// be cancelled after `timeout`, or after the test finishes, whichever happens first. +func LeakCheckContextTimeout(t testing.TB, timeout time.Duration) context.Context { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + t.Cleanup(func() { + cancel() + EnsureNoLeaks(t) + }) + return ctx +} + +// EnsureNoLeaks checks for goroutine and socket leaks and fails the test if any are found. +func EnsureNoLeaks(t testing.TB) { + if t.Failed() { + return + } + if err := ensureNoLeaks(); err != nil { + t.Fatal(err) + } +} + +// GetLeaks checks for goroutine and socket leaks and returns an error if any are found. +// One use case is in TestMain()s to ensure that all tests are cleaned up. +func GetLeaks() error { + return ensureNoLeaks() +} + +func ensureNoLeaks() error { + if err := ensureNoGoroutines(); err != nil { + return err + } + return nil +} + +func ensureNoGoroutines() error { + var ignored = []goleak.Option{ + goleak.IgnoreTopFunction("github.com/golang/glog.(*fileSink).flushDaemon"), + goleak.IgnoreTopFunction("github.com/golang/glog.(*loggingT).flushDaemon"), + goleak.IgnoreTopFunction("vitess.io/vitess/go/vt/dbconfigs.init.0.func1"), + goleak.IgnoreTopFunction("vitess.io/vitess/go/vt/vtgate.resetAggregators"), + goleak.IgnoreTopFunction("vitess.io/vitess/go/vt/vtgate.processQueryInfo"), + goleak.IgnoreTopFunction("github.com/patrickmn/go-cache.(*janitor).Run"), + goleak.IgnoreTopFunction("vitess.io/vitess/go/vt/logutil.(*ThrottledLogger).log.func1"), + goleak.IgnoreTopFunction("vitess.io/vitess/go/vt/vttablet/tabletserver/throttle.initThrottleTicker.func1.1"), + goleak.IgnoreTopFunction("vitess.io/vitess/go/vt/vttablet/tabletserver/throttle.NewBackgroundClient.initThrottleTicker.func1.1"), + goleak.IgnoreTopFunction("testing.tRunner.func1"), + } + + var err error + for i := 0; i < 5; i++ { + err = goleak.Find(ignored...) + if err == nil { + return nil + } + time.Sleep(100 * time.Millisecond) + } + return err +} diff --git a/go/test/vschemawrapper/vschema_wrapper.go b/go/test/vschemawrapper/vschema_wrapper.go new file mode 100644 index 00000000000..1656fafa41b --- /dev/null +++ b/go/test/vschemawrapper/vschema_wrapper.go @@ -0,0 +1,325 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vschemawrapper + +import ( + "context" + "fmt" + "strings" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/key" + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" + vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +var _ plancontext.VSchema = (*VSchemaWrapper)(nil) + +type VSchemaWrapper struct { + V *vindexes.VSchema + Keyspace *vindexes.Keyspace + TabletType_ topodatapb.TabletType + Dest key.Destination + SysVarEnabled bool + Version plancontext.PlannerVersion + EnableViews bool + TestBuilder func(query string, vschema plancontext.VSchema, keyspace string) (*engine.Plan, error) +} + +func (vw *VSchemaWrapper) GetPrepareData(stmtName string) *vtgatepb.PrepareData { + switch stmtName { + case "prep_one_param": + return &vtgatepb.PrepareData{ + PrepareStatement: "select 1 from user where id = :v1", + ParamsCount: 1, + } + case "prep_in_param": + return &vtgatepb.PrepareData{ + PrepareStatement: "select 1 from user where id in (:v1, :v2)", + ParamsCount: 2, + } + case "prep_no_param": + return &vtgatepb.PrepareData{ + PrepareStatement: "select 1 from user", + ParamsCount: 0, + } + case "prep_delete": + return &vtgatepb.PrepareData{ + PrepareStatement: "delete from tbl5 where id = :v1", + ParamsCount: 1, + } + } + return nil +} + +func (vw *VSchemaWrapper) PlanPrepareStatement(ctx context.Context, query string) (*engine.Plan, sqlparser.Statement, error) { + plan, err := vw.TestBuilder(query, vw, vw.CurrentDb()) + if err != nil { + return nil, nil, err + } + stmt, _, err := sqlparser.Parse2(query) + if err != nil { + return nil, nil, err + } + return plan, stmt, nil +} + +func (vw *VSchemaWrapper) ClearPrepareData(string) {} + +func (vw *VSchemaWrapper) StorePrepareData(string, *vtgatepb.PrepareData) {} + +func (vw *VSchemaWrapper) GetUDV(name string) *querypb.BindVariable { + if strings.EqualFold(name, "prep_stmt") { + return sqltypes.StringBindVariable("select * from user where id in (?, ?, ?)") + } + return nil +} + +func (vw *VSchemaWrapper) IsShardRoutingEnabled() bool { + return false +} + +func (vw *VSchemaWrapper) GetVSchema() *vindexes.VSchema { + return vw.V +} + +func (vw *VSchemaWrapper) GetSrvVschema() *vschemapb.SrvVSchema { + return &vschemapb.SrvVSchema{ + Keyspaces: map[string]*vschemapb.Keyspace{ + "user": { + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{}, + Tables: map[string]*vschemapb.Table{ + "user": {}, + }, + }, + }, + } +} + +func (vw *VSchemaWrapper) ConnCollation() collations.ID { + return collations.CollationUtf8mb3ID +} + +func (vw *VSchemaWrapper) PlannerWarning(_ string) { +} + +func (vw *VSchemaWrapper) ForeignKeyMode(keyspace string) (vschemapb.Keyspace_ForeignKeyMode, error) { + defaultFkMode := vschemapb.Keyspace_unmanaged + if vw.V.Keyspaces[keyspace] != nil && vw.V.Keyspaces[keyspace].ForeignKeyMode != vschemapb.Keyspace_unspecified { + return vw.V.Keyspaces[keyspace].ForeignKeyMode, nil + } + return defaultFkMode, nil +} + +func (vw *VSchemaWrapper) AllKeyspace() ([]*vindexes.Keyspace, error) { + if vw.Keyspace == nil { + return nil, vterrors.VT13001("keyspace not available") + } + return []*vindexes.Keyspace{vw.Keyspace}, nil +} + +// FindKeyspace implements the VSchema interface +func (vw *VSchemaWrapper) FindKeyspace(keyspace string) (*vindexes.Keyspace, error) { + if vw.Keyspace == nil { + return nil, vterrors.VT13001("keyspace not available") + } + if vw.Keyspace.Name == keyspace { + return vw.Keyspace, nil + } + return nil, nil +} + +func (vw *VSchemaWrapper) Planner() plancontext.PlannerVersion { + return vw.Version +} + +// SetPlannerVersion implements the ContextVSchema interface +func (vw *VSchemaWrapper) SetPlannerVersion(v plancontext.PlannerVersion) { + vw.Version = v +} + +func (vw *VSchemaWrapper) GetSemTable() *semantics.SemTable { + return nil +} + +func (vw *VSchemaWrapper) KeyspaceExists(keyspace string) bool { + if vw.Keyspace != nil { + return vw.Keyspace.Name == keyspace + } + return false +} + +func (vw *VSchemaWrapper) SysVarSetEnabled() bool { + return vw.SysVarEnabled +} + +func (vw *VSchemaWrapper) TargetDestination(qualifier string) (key.Destination, *vindexes.Keyspace, topodatapb.TabletType, error) { + var keyspaceName string + if vw.Keyspace != nil { + keyspaceName = vw.Keyspace.Name + } + if vw.Dest == nil && qualifier != "" { + keyspaceName = qualifier + } + if keyspaceName == "" { + return nil, nil, 0, vterrors.VT03007() + } + keyspace := vw.V.Keyspaces[keyspaceName] + if keyspace == nil { + return nil, nil, 0, vterrors.VT05003(keyspaceName) + } + return vw.Dest, keyspace.Keyspace, vw.TabletType_, nil + +} + +func (vw *VSchemaWrapper) TabletType() topodatapb.TabletType { + return vw.TabletType_ +} + +func (vw *VSchemaWrapper) Destination() key.Destination { + return vw.Dest +} + +func (vw *VSchemaWrapper) FindTable(tab sqlparser.TableName) (*vindexes.Table, string, topodatapb.TabletType, key.Destination, error) { + destKeyspace, destTabletType, destTarget, err := topoproto.ParseDestination(tab.Qualifier.String(), topodatapb.TabletType_PRIMARY) + if err != nil { + return nil, destKeyspace, destTabletType, destTarget, err + } + table, err := vw.V.FindTable(destKeyspace, tab.Name.String()) + if err != nil { + return nil, destKeyspace, destTabletType, destTarget, err + } + return table, destKeyspace, destTabletType, destTarget, nil +} + +func (vw *VSchemaWrapper) FindView(tab sqlparser.TableName) sqlparser.SelectStatement { + destKeyspace, _, _, err := topoproto.ParseDestination(tab.Qualifier.String(), topodatapb.TabletType_PRIMARY) + if err != nil { + return nil + } + return vw.V.FindView(destKeyspace, tab.Name.String()) +} + +func (vw *VSchemaWrapper) FindTableOrVindex(tab sqlparser.TableName) (*vindexes.Table, vindexes.Vindex, string, topodatapb.TabletType, key.Destination, error) { + if tab.Qualifier.IsEmpty() && tab.Name.String() == "dual" { + ksName := vw.getActualKeyspace() + var ks *vindexes.Keyspace + if ksName == "" { + ks = vw.getfirstKeyspace() + ksName = ks.Name + } else { + ks = vw.V.Keyspaces[ksName].Keyspace + } + tbl := &vindexes.Table{ + Name: sqlparser.NewIdentifierCS("dual"), + Keyspace: ks, + Type: vindexes.TypeReference, + } + return tbl, nil, ksName, topodatapb.TabletType_PRIMARY, nil, nil + } + destKeyspace, destTabletType, destTarget, err := topoproto.ParseDestination(tab.Qualifier.String(), topodatapb.TabletType_PRIMARY) + if err != nil { + return nil, nil, destKeyspace, destTabletType, destTarget, err + } + if destKeyspace == "" { + destKeyspace = vw.getActualKeyspace() + } + table, vindex, err := vw.V.FindTableOrVindex(destKeyspace, tab.Name.String(), topodatapb.TabletType_PRIMARY) + if err != nil { + return nil, nil, destKeyspace, destTabletType, destTarget, err + } + return table, vindex, destKeyspace, destTabletType, destTarget, nil +} + +func (vw *VSchemaWrapper) getfirstKeyspace() (ks *vindexes.Keyspace) { + var f string + for name, schema := range vw.V.Keyspaces { + if f == "" || f > name { + f = name + ks = schema.Keyspace + } + } + return +} + +func (vw *VSchemaWrapper) getActualKeyspace() string { + if vw.Keyspace == nil { + return "" + } + if !sqlparser.SystemSchema(vw.Keyspace.Name) { + return vw.Keyspace.Name + } + ks, err := vw.AnyKeyspace() + if err != nil { + return "" + } + return ks.Name +} + +func (vw *VSchemaWrapper) DefaultKeyspace() (*vindexes.Keyspace, error) { + return vw.V.Keyspaces["main"].Keyspace, nil +} + +func (vw *VSchemaWrapper) AnyKeyspace() (*vindexes.Keyspace, error) { + return vw.DefaultKeyspace() +} + +func (vw *VSchemaWrapper) FirstSortedKeyspace() (*vindexes.Keyspace, error) { + return vw.V.Keyspaces["main"].Keyspace, nil +} + +func (vw *VSchemaWrapper) TargetString() string { + return "targetString" +} + +func (vw *VSchemaWrapper) WarnUnshardedOnly(_ string, _ ...any) { + +} + +func (vw *VSchemaWrapper) ErrorIfShardedF(keyspace *vindexes.Keyspace, _, errFmt string, params ...any) error { + if keyspace.Sharded { + return fmt.Errorf(errFmt, params...) + } + return nil +} + +func (vw *VSchemaWrapper) CurrentDb() string { + ksName := "" + if vw.Keyspace != nil { + ksName = vw.Keyspace.Name + } + return ksName +} + +func (vw *VSchemaWrapper) FindRoutedShard(keyspace, shard string) (string, error) { + return "", nil +} + +func (vw *VSchemaWrapper) IsViewsEnabled() bool { + return vw.EnableViews +} diff --git a/go/textutil/strings.go b/go/textutil/strings.go index bd5dd6ff7f4..7bdd22df610 100644 --- a/go/textutil/strings.go +++ b/go/textutil/strings.go @@ -22,7 +22,9 @@ import ( "strings" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/proto/binlogdata" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) var ( @@ -91,7 +93,7 @@ func ValueIsSimulatedNull(val any) bool { return cval == SimulatedNullString case []string: return len(cval) == 1 && cval[0] == sqltypes.NULL.String() - case binlogdata.OnDDLAction: + case binlogdatapb.OnDDLAction: return int32(cval) == int32(SimulatedNullInt) case int: return cval == SimulatedNullInt @@ -99,6 +101,10 @@ func ValueIsSimulatedNull(val any) bool { return int32(cval) == int32(SimulatedNullInt) case int64: return int64(cval) == int64(SimulatedNullInt) + case []topodatapb.TabletType: + return len(cval) == 1 && cval[0] == topodatapb.TabletType(SimulatedNullInt) + case binlogdatapb.VReplicationWorkflowState: + return int32(cval) == int32(SimulatedNullInt) default: return false } diff --git a/go/timer/timer.go b/go/timer/timer.go index 5a28820274f..5407190ef55 100644 --- a/go/timer/timer.go +++ b/go/timer/timer.go @@ -59,7 +59,7 @@ type Timer struct { // state management mu sync.Mutex - running bool + running atomic.Bool // msg is used for out-of-band messages msg chan typeAction @@ -78,10 +78,10 @@ func NewTimer(interval time.Duration) *Timer { func (tm *Timer) Start(keephouse func()) { tm.mu.Lock() defer tm.mu.Unlock() - if tm.running { + isRunning := tm.running.Swap(true) + if isRunning { return } - tm.running = true go tm.run(keephouse) } @@ -118,7 +118,7 @@ func (tm *Timer) SetInterval(ns time.Duration) { tm.interval.Store(ns.Nanoseconds()) tm.mu.Lock() defer tm.mu.Unlock() - if tm.running { + if tm.running.Load() { tm.msg <- timerReset } } @@ -128,7 +128,7 @@ func (tm *Timer) SetInterval(ns time.Duration) { func (tm *Timer) Trigger() { tm.mu.Lock() defer tm.mu.Unlock() - if tm.running { + if tm.running.Load() { tm.msg <- timerTrigger } } @@ -146,9 +146,9 @@ func (tm *Timer) TriggerAfter(duration time.Duration) { func (tm *Timer) Stop() { tm.mu.Lock() defer tm.mu.Unlock() - if tm.running { + isRunning := tm.running.Swap(false) + if isRunning { tm.msg <- timerStop - tm.running = false } } @@ -158,7 +158,5 @@ func (tm *Timer) Interval() time.Duration { } func (tm *Timer) Running() bool { - tm.mu.Lock() - defer tm.mu.Unlock() - return tm.running + return tm.running.Load() } diff --git a/go/tools/astfmtgen/main.go b/go/tools/astfmtgen/main.go index ea968715ac1..38a14d77e7a 100644 --- a/go/tools/astfmtgen/main.go +++ b/go/tools/astfmtgen/main.go @@ -25,10 +25,10 @@ import ( "log" "os" "path" + "slices" "strconv" "strings" - "golang.org/x/exp/slices" "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/packages" diff --git a/go/tools/asthelpergen/asthelpergen.go b/go/tools/asthelpergen/asthelpergen.go index c0e870c21c5..d7d6187fa02 100644 --- a/go/tools/asthelpergen/asthelpergen.go +++ b/go/tools/asthelpergen/asthelpergen.go @@ -26,6 +26,8 @@ import ( "strings" "github.com/dave/jennifer/jen" + "golang.org/x/text/cases" + "golang.org/x/text/language" "golang.org/x/tools/go/packages" "vitess.io/vitess/go/tools/codegen" @@ -76,6 +78,9 @@ type ( } ) +// exprInterfacePath is the path of the sqlparser.Expr interface. +const exprInterfacePath = "vitess.io/vitess/go/vt/sqlparser.Expr" + func (gen *astHelperGen) iface() *types.Interface { return gen._iface } @@ -198,22 +203,15 @@ func GenerateASTHelpers(options *Options) (map[string]*jen.File, error) { scopes[pkg.PkgPath] = pkg.Types.Scope() } - pos := strings.LastIndexByte(options.RootInterface, '.') - if pos < 0 { - return nil, fmt.Errorf("unexpected input type: %s", options.RootInterface) - } - - pkgname := options.RootInterface[:pos] - typename := options.RootInterface[pos+1:] - - scope := scopes[pkgname] - if scope == nil { - return nil, fmt.Errorf("no scope found for type '%s'", options.RootInterface) + tt, err := findTypeObject(options.RootInterface, scopes) + if err != nil { + return nil, err } - tt := scope.Lookup(typename) - if tt == nil { - return nil, fmt.Errorf("no type called '%s' found in '%s'", typename, pkgname) + exprType, _ := findTypeObject(exprInterfacePath, scopes) + var exprInterface *types.Interface + if exprType != nil { + exprInterface = exprType.Type().(*types.Named).Underlying().(*types.Interface) } nt := tt.Type().(*types.Named) @@ -222,7 +220,7 @@ func GenerateASTHelpers(options *Options) (map[string]*jen.File, error) { newEqualsGen(pName, &options.Equals), newCloneGen(pName, &options.Clone), newVisitGen(pName), - newRewriterGen(pName, types.TypeString(nt, noQualifier)), + newRewriterGen(pName, types.TypeString(nt, noQualifier), exprInterface), newCOWGen(pName, nt), ) @@ -234,6 +232,28 @@ func GenerateASTHelpers(options *Options) (map[string]*jen.File, error) { return it, nil } +// findTypeObject finds the types.Object for the given interface from the given scopes. +func findTypeObject(interfaceToFind string, scopes map[string]*types.Scope) (types.Object, error) { + pos := strings.LastIndexByte(interfaceToFind, '.') + if pos < 0 { + return nil, fmt.Errorf("unexpected input type: %s", interfaceToFind) + } + + pkgname := interfaceToFind[:pos] + typename := interfaceToFind[pos+1:] + + scope := scopes[pkgname] + if scope == nil { + return nil, fmt.Errorf("no scope found for type '%s'", interfaceToFind) + } + + tt := scope.Lookup(typename) + if tt == nil { + return nil, fmt.Errorf("no type called '%s' found in '%s'", typename, pkgname) + } + return tt, nil +} + var _ generatorSPI = (*astHelperGen)(nil) func (gen *astHelperGen) scope() *types.Scope { @@ -304,7 +324,7 @@ func printableTypeName(t types.Type) string { case *types.Named: return t.Obj().Name() case *types.Basic: - return strings.Title(t.Name()) // nolint + return cases.Title(language.AmericanEnglish).String(t.Name()) case *types.Interface: return t.String() default: diff --git a/go/tools/asthelpergen/clone_gen.go b/go/tools/asthelpergen/clone_gen.go index 79251140845..10387a5dc25 100644 --- a/go/tools/asthelpergen/clone_gen.go +++ b/go/tools/asthelpergen/clone_gen.go @@ -20,10 +20,10 @@ import ( "fmt" "go/types" "log" + "slices" "strings" "github.com/dave/jennifer/jen" - "golang.org/x/exp/slices" ) type CloneOptions struct { diff --git a/go/tools/asthelpergen/copy_on_rewrite_gen.go b/go/tools/asthelpergen/copy_on_rewrite_gen.go index 09d00c26308..1daa8d18981 100644 --- a/go/tools/asthelpergen/copy_on_rewrite_gen.go +++ b/go/tools/asthelpergen/copy_on_rewrite_gen.go @@ -132,22 +132,6 @@ func (c *cowGen) basicMethod(t types.Type, basic *types.Basic, spi generatorSPI) return nil } -func (c *cowGen) copySliceElement(t types.Type, elType types.Type, spi generatorSPI) jen.Code { - if !isNamed(t) && isBasic(elType) { - // copy(res, n) - return jen.Id("copy").Call(jen.Id("res"), jen.Id("n")) - } - - // for i := range n { - // res[i] = CloneAST(x) - // } - spi.addType(elType) - - return jen.For(jen.List(jen.Id("i"), jen.Id("x"))).Op(":=").Range().Id("n").Block( - jen.Id("res").Index(jen.Id("i")).Op("=").Add(c.readValueOfType(elType, jen.Id("x"), spi)), - ) -} - func ifNotNil(id string, stmts ...jen.Code) *jen.Statement { return jen.If(jen.Id(id).Op("!=").Nil()).Block(stmts...) } diff --git a/go/tools/asthelpergen/rewrite_gen.go b/go/tools/asthelpergen/rewrite_gen.go index 4804ef8d874..cc8b18a78e9 100644 --- a/go/tools/asthelpergen/rewrite_gen.go +++ b/go/tools/asthelpergen/rewrite_gen.go @@ -30,18 +30,21 @@ const ( type rewriteGen struct { ifaceName string file *jen.File + // exprInterface is used to store the sqlparser.Expr interface + exprInterface *types.Interface } var _ generator = (*rewriteGen)(nil) -func newRewriterGen(pkgname string, ifaceName string) *rewriteGen { +func newRewriterGen(pkgname string, ifaceName string, exprInterface *types.Interface) *rewriteGen { file := jen.NewFile(pkgname) file.HeaderComment(licenseFileHeader) file.HeaderComment("Code generated by ASTHelperGen. DO NOT EDIT.") return &rewriteGen{ - ifaceName: ifaceName, - file: file, + ifaceName: ifaceName, + file: file, + exprInterface: exprInterface, } } @@ -105,7 +108,7 @@ func (r *rewriteGen) structMethod(t types.Type, strct *types.Struct, spi generat } fields := r.rewriteAllStructFields(t, strct, spi, true) - stmts := []jen.Code{executePre()} + stmts := []jen.Code{r.executePre(t)} stmts = append(stmts, fields...) stmts = append(stmts, executePost(len(fields) > 0)) stmts = append(stmts, returnTrue()) @@ -130,7 +133,7 @@ func (r *rewriteGen) ptrToStructMethod(t types.Type, strct *types.Struct, spi ge return nil } */ - stmts = append(stmts, executePre()) + stmts = append(stmts, r.executePre(t)) fields := r.rewriteAllStructFields(t, strct, spi, false) stmts = append(stmts, fields...) stmts = append(stmts, executePost(len(fields) > 0)) @@ -225,9 +228,19 @@ func setupCursor() []jen.Code { jen.Id("a.cur.node = node"), } } -func executePre() jen.Code { +func (r *rewriteGen) executePre(t types.Type) jen.Code { curStmts := setupCursor() - curStmts = append(curStmts, jen.If(jen.Id("!a.pre(&a.cur)")).Block(returnTrue())) + if r.exprInterface != nil && types.Implements(t, r.exprInterface) { + curStmts = append(curStmts, jen.Id("kontinue").Op(":=").Id("!a.pre(&a.cur)"), + jen.If(jen.Id("a.cur.revisit").Block( + jen.Id("a.cur.revisit").Op("=").False(), + jen.Return(jen.Id("a.rewriteExpr(parent, a.cur.node.(Expr), replacer)")), + )), + jen.If(jen.Id("kontinue").Block(jen.Return(jen.True()))), + ) + } else { + curStmts = append(curStmts, jen.If(jen.Id("!a.pre(&a.cur)")).Block(returnTrue())) + } return jen.If(jen.Id("a.pre!= nil").Block(curStmts...)) } @@ -251,7 +264,7 @@ func (r *rewriteGen) basicMethod(t types.Type, _ *types.Basic, spi generatorSPI) return nil } - stmts := []jen.Code{executePre(), executePost(false), returnTrue()} + stmts := []jen.Code{r.executePre(t), executePost(false), returnTrue()} r.rewriteFunc(t, stmts) return nil } diff --git a/go/tools/go-upgrade/go-upgrade.go b/go/tools/go-upgrade/go-upgrade.go index 20fa9aade95..b3ba7ca628d 100644 --- a/go/tools/go-upgrade/go-upgrade.go +++ b/go/tools/go-upgrade/go-upgrade.go @@ -17,6 +17,7 @@ limitations under the License. package main import ( + "bufio" "fmt" "io" "log" @@ -36,6 +37,36 @@ import ( const ( goDevAPI = "https://go.dev/dl/?mode=json" + + // regexpFindBootstrapVersion greps the current bootstrap version from the Makefile. The bootstrap + // version is composed of either one or two numbers, for instance: 18.1 or 18. + // The expected format of the input is BOOTSTRAP_VERSION=18 or BOOTSTRAP_VERSION=18.1 + regexpFindBootstrapVersion = "(?i).*BOOTSTRAP_VERSION[[:space:]]*=[[:space:]]*([0-9.]+).*" + + // regexpFindGolangVersion greps all numbers separated by a . after the goversion_min function call + // This is used to understand what is the current version of Golang using either two or three numbers + // The major, minor and optional patch number of the Golang version + regexpFindGolangVersion = "(?i).*goversion_min[[:space:]]*([0-9.]+).*" + + // regexpReplaceGoModGoVersion replaces the top-level golang version instruction in the go.mod file + // Example going from go1.20 to go1.20: `go 1.20` -> `go 1.21` + regexpReplaceGoModGoVersion = `go[[:space:]]([0-9.]+)\.([0-9.]+)` + + // The regular expressions below match the entire bootstrap_version declaration in Dockerfiles and Makefile + // A bootstrap version declaration is usually: 'ARG bootstrap_version = 18' in Dockerfile, and + // 'BOOTSTRAP_VERSION=18' in the Makefile. Note that the value 18 can also be a float. + regexpReplaceDockerfileBootstrapVersion = "ARG[[:space:]]*bootstrap_version[[:space:]]*=[[:space:]]*[0-9.]+" + regexpReplaceMakefileBootstrapVersion = "BOOTSTRAP_VERSION[[:space:]]*=[[:space:]]*[0-9.]+" + + // The regular expression below matches the bootstrap_version we are using in the test.go file. + // In test.go, there is a flag named 'bootstrap-version' that has a default value. We are looking + // to match the entire flag name + the default value (being the current bootstrap version) + // Example input: "flag.String("bootstrap-version", "20", "the version identifier to use for the docker images")" + regexpReplaceTestGoBootstrapVersion = `\"bootstrap-version\",[[:space:]]*\"([0-9.]+)\"` + + // regexpReplaceGolangVersionInWorkflow matches the golang version increment in the string `go-version: 1.20.5` + // which is used to replace the golang version we use inside our workflows + regexpReplaceGolangVersionInWorkflow = `go-version:[[:space:]]*([0-9.]+).*` ) type ( @@ -186,7 +217,7 @@ func updateWorkflowFilesOnly(goTo string) error { for _, fileToChange := range filesToChange { err = replaceInFile( - []*regexp.Regexp{regexp.MustCompile(`go-version:[[:space:]]*([0-9.]+).*`)}, + []*regexp.Regexp{regexp.MustCompile(regexpReplaceGolangVersionInWorkflow)}, []string{"go-version: " + newV.String()}, fileToChange, ) @@ -249,7 +280,7 @@ func currentGolangVersion() (*version.Version, error) { } content := string(contentRaw) - versre := regexp.MustCompile("(?i).*goversion_min[[:space:]]*([0-9.]+).*") + versre := regexp.MustCompile(regexpFindGolangVersion) versionStr := versre.FindStringSubmatch(content) if len(versionStr) != 2 { return nil, fmt.Errorf("malformatted error, got: %v", versionStr) @@ -264,7 +295,7 @@ func currentBootstrapVersion() (bootstrapVersion, error) { } content := string(contentRaw) - versre := regexp.MustCompile("(?i).*BOOTSTRAP_VERSION[[:space:]]*=[[:space:]]*([0-9.]+).*") + versre := regexp.MustCompile(regexpFindBootstrapVersion) versionStr := versre.FindStringSubmatch(content) if len(versionStr) != 2 { return bootstrapVersion{}, fmt.Errorf("malformatted error, got: %v", versionStr) @@ -372,6 +403,7 @@ func replaceGoVersionInCodebase(old, new *version.Version, workflowUpdate bool) } for _, fileToChange := range filesToChange { + // The regular expression below simply replace the old version string by the new golang version err = replaceInFile( []*regexp.Regexp{regexp.MustCompile(fmt.Sprintf(`(%s)`, old.String()))}, []string{new.String()}, @@ -384,7 +416,7 @@ func replaceGoVersionInCodebase(old, new *version.Version, workflowUpdate bool) if !isSameMajorMinorVersion(old, new) { err = replaceInFile( - []*regexp.Regexp{regexp.MustCompile(`go[[:space:]]*([0-9.]+)`)}, + []*regexp.Regexp{regexp.MustCompile(regexpReplaceGoModGoVersion)}, []string{fmt.Sprintf("go %d.%d", new.Segments()[0], new.Segments()[1])}, "./go.mod", ) @@ -414,8 +446,8 @@ func updateBootstrapVersionInCodebase(old, new string, newGoVersion *version.Ver for _, file := range files { err = replaceInFile( []*regexp.Regexp{ - regexp.MustCompile(`ARG[[:space:]]*bootstrap_version[[:space:]]*=[[:space:]]*[0-9.]+`), // Dockerfile - regexp.MustCompile(`BOOTSTRAP_VERSION[[:space:]]*=[[:space:]]*[0-9.]+`), // Makefile + regexp.MustCompile(regexpReplaceDockerfileBootstrapVersion), // Dockerfile + regexp.MustCompile(regexpReplaceMakefileBootstrapVersion), // Makefile }, []string{ fmt.Sprintf("ARG bootstrap_version=%s", new), // Dockerfile @@ -429,7 +461,7 @@ func updateBootstrapVersionInCodebase(old, new string, newGoVersion *version.Ver } err = replaceInFile( - []*regexp.Regexp{regexp.MustCompile(`\"bootstrap-version\",[[:space:]]*\"([0-9.]+)\"`)}, + []*regexp.Regexp{regexp.MustCompile(regexpReplaceTestGoBootstrapVersion)}, []string{fmt.Sprintf("\"bootstrap-version\", \"%s\"", new)}, "./test.go", ) @@ -510,17 +542,23 @@ func replaceInFile(oldexps []*regexp.Regexp, new []string, fileToChange string) } defer f.Close() - content, err := io.ReadAll(f) - if err != nil { - return err - } - contentStr := string(content) - - for i, oldex := range oldexps { - contentStr = oldex.ReplaceAllString(contentStr, new[i]) + var res []string + reader := bufio.NewReader(f) + for { + line, err := reader.ReadString('\n') + if err != nil { + if err == io.EOF { + break + } + panic(err) + } + for i, oldexp := range oldexps { + line = oldexp.ReplaceAllString(line, new[i]) + } + res = append(res, line) } - _, err = f.WriteAt([]byte(contentStr), 0) + _, err = f.WriteAt([]byte(strings.Join(res, "")), 0) if err != nil { return err } diff --git a/go/tools/go-upgrade/go-upgrade_test.go b/go/tools/go-upgrade/go-upgrade_test.go new file mode 100644 index 00000000000..378672d544f --- /dev/null +++ b/go/tools/go-upgrade/go-upgrade_test.go @@ -0,0 +1,105 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "regexp" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestRegularExpressions(t *testing.T) { + lists := []struct { + regexp string + input string + checkF func(t *testing.T, regexp *regexp.Regexp, input string) + }{ + { + regexp: regexpFindBootstrapVersion, + input: "BOOTSTRAP_VERSION=18.1", + checkF: func(t *testing.T, regexp *regexp.Regexp, input string) { + submatch := regexp.FindStringSubmatch(input) + require.Len(t, submatch, 2, "Should have two submatches in the regular expression") + require.Equal(t, "18.1", submatch[1]) + }, + }, + { + regexp: regexpFindGolangVersion, + input: `goversion_min 1.20.5 || echo "Go version reported`, + checkF: func(t *testing.T, regexp *regexp.Regexp, input string) { + submatch := regexp.FindStringSubmatch(input) + require.Len(t, submatch, 2, "Should have two submatches in the regular expression") + require.Equal(t, "1.20.5", submatch[1]) + }, + }, + { + regexp: regexpReplaceGoModGoVersion, + input: "go 1.20", + checkF: func(t *testing.T, regexp *regexp.Regexp, input string) { + res := regexp.ReplaceAllString(input, "go 1.21") + require.Equal(t, "go 1.21", res) + }, + }, + { + regexp: regexpReplaceGoModGoVersion, + input: "go 1 20", + checkF: func(t *testing.T, regexp *regexp.Regexp, input string) { + res := regexp.ReplaceAllString(input, "go 1.21") + require.Equal(t, "go 1 20", res) + }, + }, + { + regexp: regexpReplaceDockerfileBootstrapVersion, + input: "ARG bootstrap_version=18.1", + checkF: func(t *testing.T, regexp *regexp.Regexp, input string) { + res := regexp.ReplaceAllString(input, "ARG bootstrap_version=18.2") + require.Equal(t, "ARG bootstrap_version=18.2", res) + }, + }, + { + regexp: regexpReplaceMakefileBootstrapVersion, + input: "BOOTSTRAP_VERSION=18.1", + checkF: func(t *testing.T, regexp *regexp.Regexp, input string) { + res := regexp.ReplaceAllString(input, "BOOTSTRAP_VERSION=18.2") + require.Equal(t, "BOOTSTRAP_VERSION=18.2", res) + }, + }, + { + regexp: regexpReplaceTestGoBootstrapVersion, + input: `flag.String("bootstrap-version", "18.1", "the version identifier to use for the docker images")`, + checkF: func(t *testing.T, regexp *regexp.Regexp, input string) { + res := regexp.ReplaceAllString(input, "\"bootstrap-version\", \"18.2\"") + require.Equal(t, `flag.String("bootstrap-version", "18.2", "the version identifier to use for the docker images")`, res) + }, + }, + { + regexp: regexpReplaceGolangVersionInWorkflow, + input: "go-version: 1.20.5", + checkF: func(t *testing.T, regexp *regexp.Regexp, input string) { + res := regexp.ReplaceAllString(input, "go-version: 1.20.6") + require.Equal(t, `go-version: 1.20.6`, res) + }, + }, + } + + for _, list := range lists { + t.Run(list.regexp+" "+list.input, func(t *testing.T) { + list.checkF(t, regexp.MustCompile(list.regexp), list.input) + }) + } +} diff --git a/go/trace/trace_test.go b/go/trace/trace_test.go index 08027a35c85..c98a47167a8 100644 --- a/go/trace/trace_test.go +++ b/go/trace/trace_test.go @@ -20,7 +20,6 @@ import ( "context" "fmt" "io" - "strings" "testing" "github.com/spf13/viper" @@ -104,15 +103,6 @@ func (f *fakeTracer) Close() error { panic("implement me") } -func (f *fakeTracer) assertNoSpanWith(t *testing.T, substr string) { - t.Helper() - for _, logLine := range f.log { - if strings.Contains(logLine, substr) { - t.Fatalf("expected to not find [%v] but found it in [%v]", substr, logLine) - } - } -} - type mockSpan struct { tracer *fakeTracer } diff --git a/go/viperutil/config.go b/go/viperutil/config.go index 4b1120675f6..49e3f960875 100644 --- a/go/viperutil/config.go +++ b/go/viperutil/config.go @@ -166,6 +166,7 @@ func LoadConfig() (context.CancelFunc, error) { msg := "Failed to read in config %s: %s" switch configFileNotFoundHandling.Get() { case WarnOnConfigFileNotFound: + msg += ". This is optional, and can be ignored if you are not using config files. For a detailed explanation, see https://github.com/vitessio/vitess/blob/main/doc/viper/viper.md#config-files." log.WARN(msg, registry.Static.ConfigFileUsed(), nferr.Error()) fallthrough // after warning, ignore the error case IgnoreConfigFileNotFound: diff --git a/go/viperutil/debug/handler.go b/go/viperutil/debug/handler.go index 07442dd13ab..b5730a2e41e 100644 --- a/go/viperutil/debug/handler.go +++ b/go/viperutil/debug/handler.go @@ -26,7 +26,7 @@ import ( "github.com/spf13/viper" "vitess.io/vitess/go/acl" - "vitess.io/vitess/go/slices2" + "vitess.io/vitess/go/slice" "vitess.io/vitess/go/viperutil/internal/registry" ) @@ -53,7 +53,7 @@ func HandlerFunc(w http.ResponseWriter, r *http.Request) { switch { case format == "": v.DebugTo(w) - case slices2.Any(viper.SupportedExts, func(ext string) bool { return ext == format }): + case slice.Any(viper.SupportedExts, func(ext string) bool { return ext == format }): // Got a supported format; write the config to a tempfile in that format, // then copy it to the response. // @@ -77,6 +77,6 @@ func HandlerFunc(w http.ResponseWriter, r *http.Request) { return } default: - http.Error(w, fmt.Sprintf("unsupported config format %s", format), http.StatusBadRequest) + http.Error(w, "unsupported config format", http.StatusBadRequest) } } diff --git a/go/viperutil/internal/sync/sync.go b/go/viperutil/internal/sync/sync.go index 11cb028c286..6608569d86c 100644 --- a/go/viperutil/internal/sync/sync.go +++ b/go/viperutil/internal/sync/sync.go @@ -23,6 +23,7 @@ import ( "time" "github.com/fsnotify/fsnotify" + "github.com/spf13/afero" "github.com/spf13/pflag" "github.com/spf13/viper" @@ -48,7 +49,17 @@ type Viper struct { subscribers []chan<- struct{} watchingConfig bool + fs afero.Fs + setCh chan struct{} + + // for testing purposes only + onConfigWrite func() +} + +func (v *Viper) SetFs(fs afero.Fs) { + v.fs = fs + v.disk.SetFs(fs) } // New returns a new synced Viper. @@ -57,6 +68,7 @@ func New() *Viper { disk: viper.New(), live: viper.New(), keys: map[string]*sync.RWMutex{}, + fs: afero.NewOsFs(), // default Fs used by viper, but we need this set so loadFromDisk doesn't accidentally nil-out the live fs setCh: make(chan struct{}, 1), } } @@ -217,6 +229,10 @@ func (v *Viper) persistChanges(ctx context.Context, minWaitInterval time.Duratio // WriteConfig writes the live viper config back to disk. func (v *Viper) WriteConfig() error { + if v.onConfigWrite != nil { + defer v.onConfigWrite() + } + for _, m := range v.keys { m.Lock() // This won't fire until after the config has been written. @@ -263,6 +279,7 @@ func (v *Viper) loadFromDisk() { // Reset v.live so explicit Set calls don't win over what's just changed on // disk. v.live = viper.New() + v.live.SetFs(v.fs) // Fun fact! MergeConfigMap actually only ever returns nil. Maybe in an // older version of viper it used to actually handle errors, but now it @@ -272,10 +289,29 @@ func (v *Viper) loadFromDisk() { // begin implementation of registry.Bindable for sync.Viper -func (v *Viper) BindEnv(vars ...string) error { return v.disk.BindEnv(vars...) } -func (v *Viper) BindPFlag(key string, flag *pflag.Flag) error { return v.disk.BindPFlag(key, flag) } -func (v *Viper) RegisterAlias(alias string, key string) { v.disk.RegisterAlias(alias, key) } -func (v *Viper) SetDefault(key string, value any) { v.disk.SetDefault(key, value) } +func (v *Viper) BindEnv(vars ...string) error { + if err := v.disk.BindEnv(vars...); err != nil { + return err + } + return v.live.BindEnv(vars...) +} + +func (v *Viper) BindPFlag(key string, flag *pflag.Flag) error { + if err := v.disk.BindPFlag(key, flag); err != nil { + return err + } + return v.live.BindPFlag(key, flag) +} + +func (v *Viper) RegisterAlias(alias string, key string) { + v.disk.RegisterAlias(alias, key) + v.live.RegisterAlias(alias, key) +} + +func (v *Viper) SetDefault(key string, value any) { + v.disk.SetDefault(key, value) + v.live.SetDefault(key, value) +} // end implementation of registry.Bindable for sync.Viper diff --git a/go/viperutil/internal/sync/sync_darwin_test.go b/go/viperutil/internal/sync/sync_darwin_test.go new file mode 100644 index 00000000000..3c27ed97616 --- /dev/null +++ b/go/viperutil/internal/sync/sync_darwin_test.go @@ -0,0 +1,34 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sync_test + +import "os" + +// atomicWrite overwrites a file in such a way as to produce exactly one +// filesystem event of the type CREATE or WRITE (which are tracked by viper) +// without producing any REMOVE events. +// +// At time of writing, this produces the following on darwin: +// CHMOD => WRITE => CHMOD. +func atomicWrite(path string, data []byte) error { + stat, err := os.Stat(path) + if err != nil { + return err + } + + return os.WriteFile(path, data, stat.Mode()) +} diff --git a/go/viperutil/internal/sync/sync_internal_test.go b/go/viperutil/internal/sync/sync_internal_test.go new file mode 100644 index 00000000000..cc8a163fa18 --- /dev/null +++ b/go/viperutil/internal/sync/sync_internal_test.go @@ -0,0 +1,136 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sync + +import ( + "context" + "encoding/json" + "math/rand" + "testing" + "time" + + "github.com/spf13/afero" + "github.com/spf13/viper" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPersistConfig(t *testing.T) { + type config struct { + Foo int `json:"foo"` + } + + loadConfig := func(t *testing.T, fs afero.Fs) config { + t.Helper() + + data, err := afero.ReadFile(fs, "config.json") + require.NoError(t, err) + + var cfg config + require.NoError(t, json.Unmarshal(data, &cfg)) + + return cfg + } + + setup := func(t *testing.T, v *Viper, minWaitInterval time.Duration) (afero.Fs, <-chan struct{}) { + t.Helper() + + fs := afero.NewMemMapFs() + cfg := config{ + Foo: jitter(1, 100), + } + + data, err := json.Marshal(&cfg) + require.NoError(t, err) + + err = afero.WriteFile(fs, "config.json", data, 0644) + require.NoError(t, err) + + static := viper.New() + static.SetFs(fs) + static.SetConfigFile("config.json") + + require.NoError(t, static.ReadInConfig()) + require.Equal(t, cfg.Foo, static.GetInt("foo")) + + ch := make(chan struct{}, 1) + v.onConfigWrite = func() { ch <- struct{}{} } + v.SetFs(fs) + + cancel, err := v.Watch(context.Background(), static, minWaitInterval) + require.NoError(t, err) + + t.Cleanup(cancel) + return fs, ch + } + + t.Run("basic", func(t *testing.T) { + v := New() + + minPersistWaitInterval := 10 * time.Second + get := AdaptGetter("foo", func(v *viper.Viper) func(key string) int { return v.GetInt }, v) + fs, ch := setup(t, v, minPersistWaitInterval) + + old := get("foo") + loadConfig(t, fs) + v.Set("foo", old+1) + // This should happen immediately in-memory and on-disk. + assert.Equal(t, old+1, get("foo")) + <-ch + assert.Equal(t, old+1, loadConfig(t, fs).Foo) + + v.Set("foo", old+2) + // This should _also_ happen immediately in-memory, but not on-disk. + // It will take up to 2 * minPersistWaitInterval to reach the disk. + assert.Equal(t, old+2, get("foo")) + assert.Equal(t, old+1, loadConfig(t, fs).Foo) + + select { + case <-ch: + case <-time.After(3 * minPersistWaitInterval): + assert.Fail(t, "config was not persisted quickly enough", "config took longer than %s to persist (minPersistWaitInterval = %s)", 3*minPersistWaitInterval, minPersistWaitInterval) + } + + assert.Equal(t, old+2, loadConfig(t, fs).Foo) + }) + + t.Run("no persist interval", func(t *testing.T) { + v := New() + + var minPersistWaitInterval time.Duration + get := AdaptGetter("foo", func(v *viper.Viper) func(key string) int { return v.GetInt }, v) + fs, ch := setup(t, v, minPersistWaitInterval) + + old := get("foo") + loadConfig(t, fs) + v.Set("foo", old+1) + // This should happen immediately in-memory and on-disk. + assert.Equal(t, old+1, get("foo")) + <-ch + assert.Equal(t, old+1, loadConfig(t, fs).Foo) + + v.Set("foo", old+2) + // This should _also_ happen immediately in-memory, and on-disk. + assert.Equal(t, old+2, get("foo")) + <-ch + assert.Equal(t, old+2, loadConfig(t, fs).Foo) + }) +} + +func jitter(min, max int) int { + return min + rand.Intn(max-min+1) +} diff --git a/go/viperutil/internal/sync/sync_linux_test.go b/go/viperutil/internal/sync/sync_linux_test.go new file mode 100644 index 00000000000..83ccfad66cc --- /dev/null +++ b/go/viperutil/internal/sync/sync_linux_test.go @@ -0,0 +1,39 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sync_test + +import "os" + +// atomicWrite overwrites a file in such a way as to produce exactly one +// filesystem event of the type CREATE or WRITE (which are tracked by viper) +// without producing any REMOVE events. +// +// At time of writing, this produces the following on x86_64 linux: +// CREATE. +func atomicWrite(path string, data []byte) error { + stat, err := os.Stat(path) + if err != nil { + return err + } + + tmp := path + ".tmp" + if err := os.WriteFile(tmp, data, stat.Mode()); err != nil { + return err + } + + return os.Rename(tmp, path) +} diff --git a/go/viperutil/internal/sync/sync_test.go b/go/viperutil/internal/sync/sync_test.go index df494c19bae..6b8efa1b105 100644 --- a/go/viperutil/internal/sync/sync_test.go +++ b/go/viperutil/internal/sync/sync_test.go @@ -19,17 +19,14 @@ package sync_test import ( "context" "encoding/json" - "fmt" "math/rand" "os" - "strings" "sync" "testing" "time" "github.com/fsnotify/fsnotify" "github.com/spf13/viper" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/viperutil" @@ -37,140 +34,40 @@ import ( "vitess.io/vitess/go/viperutil/internal/value" ) -func TestPersistConfig(t *testing.T) { - t.Skip("temporarily skipping this to unblock PRs since it's flaky") - type config struct { - Foo int `json:"foo"` - } - - loadConfig := func(t *testing.T, f *os.File) config { - t.Helper() - - data, err := os.ReadFile(f.Name()) - require.NoError(t, err) - - var cfg config - require.NoError(t, json.Unmarshal(data, &cfg)) - - return cfg - } - - setup := func(t *testing.T, v *vipersync.Viper, minWaitInterval time.Duration) (*os.File, chan struct{}) { - tmp, err := os.CreateTemp(t.TempDir(), fmt.Sprintf("%s_*.json", strings.ReplaceAll(t.Name(), "/", "_"))) - require.NoError(t, err) - - t.Cleanup(func() { os.Remove(tmp.Name()) }) - - cfg := config{ - Foo: jitter(1, 100), - } - - data, err := json.Marshal(&cfg) - require.NoError(t, err) - - _, err = tmp.Write(data) - require.NoError(t, err) - - static := viper.New() - static.SetConfigFile(tmp.Name()) - require.NoError(t, static.ReadInConfig()) - - ch := make(chan struct{}, 1) - v.Notify(ch) - - cancel, err := v.Watch(context.Background(), static, minWaitInterval) - require.NoError(t, err) - t.Cleanup(cancel) - - return tmp, ch - } - - t.Run("basic", func(t *testing.T) { - v := vipersync.New() - - minPersistWaitInterval := 10 * time.Second - get := vipersync.AdaptGetter("foo", viperutil.GetFuncForType[int](), v) - f, ch := setup(t, v, minPersistWaitInterval) - - old := get("foo") - loadConfig(t, f) - v.Set("foo", old+1) - // This should happen immediately in-memory and on-disk. - assert.Equal(t, old+1, get("foo")) - <-ch - assert.Equal(t, old+1, loadConfig(t, f).Foo) - - v.Set("foo", old+2) - // This should _also_ happen immediately in-memory, but not on-disk. - // It will take up to 2 * minPersistWaitInterval to reach the disk. - assert.Equal(t, old+2, get("foo")) - assert.Equal(t, old+1, loadConfig(t, f).Foo) - - select { - case <-ch: - case <-time.After(2 * minPersistWaitInterval): - assert.Fail(t, "config was not persisted quickly enough", "config took longer than %s to persist (minPersistWaitInterval = %s)", 2*minPersistWaitInterval, minPersistWaitInterval) - } - - assert.Equal(t, old+2, loadConfig(t, f).Foo) - }) - - t.Run("no persist interval", func(t *testing.T) { - v := vipersync.New() - - var minPersistWaitInterval time.Duration - get := vipersync.AdaptGetter("foo", viperutil.GetFuncForType[int](), v) - f, ch := setup(t, v, minPersistWaitInterval) - - old := get("foo") - loadConfig(t, f) - v.Set("foo", old+1) - // This should happen immediately in-memory and on-disk. - assert.Equal(t, old+1, get("foo")) - <-ch - assert.Equal(t, old+1, loadConfig(t, f).Foo) - - v.Set("foo", old+2) - // This should _also_ happen immediately in-memory, and on-disk. - assert.Equal(t, old+2, get("foo")) - <-ch - assert.Equal(t, old+2, loadConfig(t, f).Foo) - }) -} - func TestWatchConfig(t *testing.T) { type config struct { A, B int } - tmp, err := os.CreateTemp(".", "TestWatchConfig_*.json") - require.NoError(t, err) - t.Cleanup(func() { os.Remove(tmp.Name()) }) - - stat, err := os.Stat(tmp.Name()) - require.NoError(t, err) - - writeConfig := func(a, b int) error { + writeConfig := func(tmp *os.File, a, b int) error { data, err := json.Marshal(&config{A: a, B: b}) if err != nil { return err } - return os.WriteFile(tmp.Name(), data, stat.Mode()) + // In order to guarantee viper's watcher detects exactly one config + // change, we perform a write specific to the platform we're executing + // on. + // + // Consequently, this test only supports linux and macos for now. + return atomicWrite(tmp.Name(), data) } - writeRandomConfig := func() error { + writeRandomConfig := func(tmp *os.File) error { a, b := rand.Intn(100), rand.Intn(100) - return writeConfig(a, b) + return writeConfig(tmp, a, b) } - require.NoError(t, writeRandomConfig()) + tmp, err := os.CreateTemp(t.TempDir(), "TestWatchConfig_*.json") + require.NoError(t, err) + + require.NoError(t, writeRandomConfig(tmp)) v := viper.New() v.SetConfigFile(tmp.Name()) require.NoError(t, v.ReadInConfig()) wCh, rCh := make(chan struct{}), make(chan struct{}) - v.OnConfigChange(func(in fsnotify.Event) { + v.OnConfigChange(func(event fsnotify.Event) { select { case <-rCh: return @@ -186,7 +83,7 @@ func TestWatchConfig(t *testing.T) { // Make sure that basic, unsynchronized WatchConfig is set up before // beginning the actual test. a, b := v.GetInt("a"), v.GetInt("b") - require.NoError(t, writeConfig(a+1, b+1)) + require.NoError(t, writeConfig(tmp, a+1, b+1)) <-wCh // wait for the update to finish require.Equal(t, a+1, v.GetInt("a")) @@ -196,7 +93,10 @@ func TestWatchConfig(t *testing.T) { sv := vipersync.New() A := viperutil.Configure("a", viperutil.Options[int]{Dynamic: true}) - B := viperutil.Configure("b", viperutil.Options[int]{Dynamic: true}) + B := viperutil.Configure("b", viperutil.Options[int]{FlagName: "b", Dynamic: true, Default: 5}) + + // Check that default values are actually used + require.Equal(t, B.Get(), B.Default()) A.(*value.Dynamic[int]).Base.BoundGetFunc = vipersync.AdaptGetter("a", func(v *viper.Viper) func(key string) int { return v.GetInt @@ -246,7 +146,7 @@ func TestWatchConfig(t *testing.T) { } for i := 0; i < 100; i++ { - require.NoError(t, writeRandomConfig()) + require.NoError(t, writeRandomConfig(tmp)) time.Sleep(writeJitter()) } diff --git a/go/vt/binlog/binlog_connection.go b/go/vt/binlog/binlog_connection.go index 1cdb2d6cacc..f7c7acd8e9c 100644 --- a/go/vt/binlog/binlog_connection.go +++ b/go/vt/binlog/binlog_connection.go @@ -17,15 +17,16 @@ limitations under the License. package binlog import ( + "context" crand "crypto/rand" "fmt" "math" "math/big" "sync" - "context" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/pools" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" @@ -99,12 +100,12 @@ func connectForReplication(cp dbconfigs.Connector) (*mysql.Conn, error) { // StartBinlogDumpFromCurrent requests a replication binlog dump from // the current position. -func (bc *BinlogConnection) StartBinlogDumpFromCurrent(ctx context.Context) (mysql.Position, <-chan mysql.BinlogEvent, <-chan error, error) { +func (bc *BinlogConnection) StartBinlogDumpFromCurrent(ctx context.Context) (replication.Position, <-chan mysql.BinlogEvent, <-chan error, error) { ctx, bc.cancel = context.WithCancel(ctx) position, err := bc.Conn.PrimaryPosition() if err != nil { - return mysql.Position{}, nil, nil, fmt.Errorf("failed to get primary position: %v", err) + return replication.Position{}, nil, nil, fmt.Errorf("failed to get primary position: %v", err) } c, e, err := bc.StartBinlogDumpFromPosition(ctx, "", position) @@ -120,7 +121,7 @@ func (bc *BinlogConnection) StartBinlogDumpFromCurrent(ctx context.Context) (mys // by canceling the context. // // Note the context is valid and used until eventChan is closed. -func (bc *BinlogConnection) StartBinlogDumpFromPosition(ctx context.Context, binlogFilename string, startPos mysql.Position) (<-chan mysql.BinlogEvent, <-chan error, error) { +func (bc *BinlogConnection) StartBinlogDumpFromPosition(ctx context.Context, binlogFilename string, startPos replication.Position) (<-chan mysql.BinlogEvent, <-chan error, error) { ctx, bc.cancel = context.WithCancel(ctx) log.Infof("sending binlog dump command: startPos=%v, serverID=%v", startPos, bc.serverID) @@ -156,7 +157,7 @@ func (bc *BinlogConnection) streamEvents(ctx context.Context) (chan mysql.Binlog case errChan <- err: case <-ctx.Done(): } - if sqlErr, ok := err.(*mysql.SQLError); ok && sqlErr.Number() == mysql.CRServerLost { + if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Number() == sqlerror.CRServerLost { // CRServerLost = Lost connection to MySQL server during query // This is not necessarily an error. It could just be that we closed // the connection from outside. diff --git a/go/vt/binlog/binlog_streamer.go b/go/vt/binlog/binlog_streamer.go index d9e275778f6..d8b424b2f35 100644 --- a/go/vt/binlog/binlog_streamer.go +++ b/go/vt/binlog/binlog_streamer.go @@ -25,6 +25,8 @@ import ( "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/binlog" "vitess.io/vitess/go/sqltypes" @@ -142,7 +144,7 @@ type Streamer struct { extractPK bool clientCharset *binlogdatapb.Charset - startPos mysql.Position + startPos replication.Position timestamp int64 sendTransaction sendTransactionFunc usePreviousGTIDs bool @@ -158,7 +160,7 @@ type Streamer struct { // startPos is the position to start streaming at. Incompatible with timestamp. // timestamp is the timestamp to start streaming at. Incompatible with startPos. // sendTransaction is called each time a transaction is committed or rolled back. -func NewStreamer(cp dbconfigs.Connector, se *schema.Engine, clientCharset *binlogdatapb.Charset, startPos mysql.Position, timestamp int64, sendTransaction sendTransactionFunc) *Streamer { +func NewStreamer(cp dbconfigs.Connector, se *schema.Engine, clientCharset *binlogdatapb.Charset, startPos replication.Position, timestamp int64, sendTransaction sendTransactionFunc) *Streamer { return &Streamer{ cp: cp, se: se, @@ -246,10 +248,10 @@ func (bls *Streamer) Stream(ctx context.Context) (err error) { // If the sendTransaction func returns io.EOF, parseEvents returns ErrClientEOF. // If the events channel is closed, parseEvents returns ErrServerEOF. // If the context is done, returns ctx.Err(). -func (bls *Streamer) parseEvents(ctx context.Context, events <-chan mysql.BinlogEvent, errs <-chan error) (mysql.Position, error) { +func (bls *Streamer) parseEvents(ctx context.Context, events <-chan mysql.BinlogEvent, errs <-chan error) (replication.Position, error) { var statements []FullBinlogStatement var format mysql.BinlogFormat - var gtid mysql.GTID + var gtid replication.GTID var pos = bls.startPos var autocommit = true var err error @@ -274,7 +276,7 @@ func (bls *Streamer) parseEvents(ctx context.Context, events <-chan mysql.Binlog if int64(timestamp) >= bls.timestamp { eventToken := &querypb.EventToken{ Timestamp: int64(timestamp), - Position: mysql.EncodePosition(pos), + Position: replication.EncodePosition(pos), } if err = bls.sendTransaction(eventToken, statements); err != nil { if err == io.EOF { @@ -348,7 +350,7 @@ func (bls *Streamer) parseEvents(ctx context.Context, events <-chan mysql.Binlog return pos, fmt.Errorf("can't get GTID from binlog event: %v, event data: %#v", err, ev) } oldpos := pos - pos = mysql.AppendGTID(pos, gtid) + pos = replication.AppendGTID(pos, gtid) // If the event is received outside of a transaction, it must // be sent. Otherwise, it will get lost and the targets will go out // of sync. @@ -363,7 +365,7 @@ func (bls *Streamer) parseEvents(ctx context.Context, events <-chan mysql.Binlog if err != nil { return pos, fmt.Errorf("can't get GTID from binlog event: %v, event data: %#v", err, ev) } - pos = mysql.AppendGTID(pos, gtid) + pos = replication.AppendGTID(pos, gtid) if hasBegin { begin() } diff --git a/go/vt/binlog/binlog_streamer_rbr_test.go b/go/vt/binlog/binlog_streamer_rbr_test.go index 89027b2bdfb..1fe98dedd6d 100644 --- a/go/vt/binlog/binlog_streamer_rbr_test.go +++ b/go/vt/binlog/binlog_streamer_rbr_test.go @@ -26,6 +26,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/binlog" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" @@ -169,7 +170,7 @@ func TestStreamerParseRBREvents(t *testing.T) { mysql.NewRotateEvent(f, s, 0, ""), mysql.NewFormatDescriptionEvent(f, s), mysql.NewTableMapEvent(f, s, tableID, tm), - mysql.NewMariaDBGTIDEvent(f, s, mysql.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), + mysql.NewMariaDBGTIDEvent(f, s, replication.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), mysql.NewQueryEvent(f, s, mysql.Query{ Database: topoproto.VtDbPrefix + "test_keyspace", SQL: "BEGIN"}), @@ -241,9 +242,9 @@ func TestStreamerParseRBREvents(t *testing.T) { }, eventToken: &querypb.EventToken{ Timestamp: 1407805592, - Position: mysql.EncodePosition(mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 0: mysql.MariadbGTID{ + Position: replication.EncodePosition(replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 0: replication.MariadbGTID{ Domain: 0, Server: 62344, Sequence: 0x0d, @@ -267,7 +268,7 @@ func TestStreamerParseRBREvents(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, se, nil, mysql.Position{}, 0, sendTransaction) + bls := NewStreamer(dbcfgs, se, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) _, err := bls.parseEvents(context.Background(), events, errs) @@ -418,7 +419,7 @@ func TestStreamerParseRBRNameEscapes(t *testing.T) { mysql.NewRotateEvent(f, s, 0, ""), mysql.NewFormatDescriptionEvent(f, s), mysql.NewTableMapEvent(f, s, tableID, tm), - mysql.NewMariaDBGTIDEvent(f, s, mysql.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), + mysql.NewMariaDBGTIDEvent(f, s, replication.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), mysql.NewQueryEvent(f, s, mysql.Query{ Database: topoproto.VtDbPrefix + "test_keyspace", SQL: "BEGIN"}), @@ -490,9 +491,9 @@ func TestStreamerParseRBRNameEscapes(t *testing.T) { }, eventToken: &querypb.EventToken{ Timestamp: 1407805592, - Position: mysql.EncodePosition(mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 0: mysql.MariadbGTID{ + Position: replication.EncodePosition(replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 0: replication.MariadbGTID{ Domain: 0, Server: 62344, Sequence: 0x0d, @@ -516,7 +517,7 @@ func TestStreamerParseRBRNameEscapes(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, se, nil, mysql.Position{}, 0, sendTransaction) + bls := NewStreamer(dbcfgs, se, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) _, err := bls.parseEvents(context.Background(), events, errs) diff --git a/go/vt/binlog/binlog_streamer_test.go b/go/vt/binlog/binlog_streamer_test.go index c0ca0bd08cd..d3a7d7c7a67 100644 --- a/go/vt/binlog/binlog_streamer_test.go +++ b/go/vt/binlog/binlog_streamer_test.go @@ -17,6 +17,7 @@ limitations under the License. package binlog import ( + "context" "fmt" "io" "strings" @@ -28,7 +29,8 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" - "context" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/mysql" @@ -87,7 +89,7 @@ func TestStreamerParseEventsXID(t *testing.T) { input := []mysql.BinlogEvent{ mysql.NewRotateEvent(f, s, 0, ""), mysql.NewFormatDescriptionEvent(f, s), - mysql.NewMariaDBGTIDEvent(f, s, mysql.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), + mysql.NewMariaDBGTIDEvent(f, s, replication.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), mysql.NewQueryEvent(f, s, mysql.Query{ Database: topoproto.VtDbPrefix + "test_keyspace", SQL: "BEGIN"}), @@ -108,9 +110,9 @@ func TestStreamerParseEventsXID(t *testing.T) { }, EventToken: &querypb.EventToken{ Timestamp: 1407805592, - Position: mysql.EncodePosition(mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 0: mysql.MariadbGTID{ + Position: replication.EncodePosition(replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 0: replication.MariadbGTID{ Domain: 0, Server: 62344, Sequence: 0x0d, @@ -128,7 +130,7 @@ func TestStreamerParseEventsXID(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, (&got).sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, (&got).sendTransaction) go sendTestEvents(events, input) _, err := bls.parseEvents(context.Background(), events, errs) @@ -149,7 +151,7 @@ func TestStreamerParseEventsCommit(t *testing.T) { input := []mysql.BinlogEvent{ mysql.NewRotateEvent(f, s, 0, ""), mysql.NewFormatDescriptionEvent(f, s), - mysql.NewMariaDBGTIDEvent(f, s, mysql.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), + mysql.NewMariaDBGTIDEvent(f, s, replication.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), mysql.NewQueryEvent(f, s, mysql.Query{ Database: topoproto.VtDbPrefix + "test_keyspace", SQL: "BEGIN"}), @@ -172,9 +174,9 @@ func TestStreamerParseEventsCommit(t *testing.T) { }, EventToken: &querypb.EventToken{ Timestamp: 1407805592, - Position: mysql.EncodePosition(mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 0: mysql.MariadbGTID{ + Position: replication.EncodePosition(replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 0: replication.MariadbGTID{ Domain: 0, Server: 62344, Sequence: 0x0d, @@ -191,7 +193,7 @@ func TestStreamerParseEventsCommit(t *testing.T) { dbcfgs := dbconfigs.New(mcp) var got binlogStatements - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, (&got).sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, (&got).sendTransaction) go sendTestEvents(events, input) _, err := bls.parseEvents(context.Background(), events, errs) @@ -218,7 +220,7 @@ func TestStreamerStop(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, sendTransaction) // Start parseEvents(), but don't send it anything, so it just waits. ctx, cancel := context.WithCancel(context.Background()) @@ -271,7 +273,7 @@ func TestStreamerParseEventsClientEOF(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) _, err := bls.parseEvents(context.Background(), events, errs) @@ -296,7 +298,7 @@ func TestStreamerParseEventsServerEOF(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, sendTransaction) _, err := bls.parseEvents(context.Background(), events, errs) if err != want { t.Errorf("wrong error, got %#v, want %#v", err, want) @@ -310,7 +312,7 @@ func TestStreamerParseEventsServerEOF(t *testing.T) { func TestStreamerParseEventsGTIDPurged(t *testing.T) { events := make(chan mysql.BinlogEvent) errs := make(chan error) - expectedStreamErr := mysql.NewSQLError(mysql.ERMasterFatalReadingBinlog, mysql.SSUnknownSQLState, + expectedStreamErr := sqlerror.NewSQLError(sqlerror.ERMasterFatalReadingBinlog, sqlerror.SSUnknownSQLState, "Cannot replicate because the master purged required binary logs.") sendTransaction := func(eventToken *querypb.EventToken, statements []FullBinlogStatement) error { @@ -332,13 +334,13 @@ func TestStreamerParseEventsGTIDPurged(t *testing.T) { } }() - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, sendTransaction) _, err := bls.parseEvents(context.Background(), events, errs) require.Error(t, err) - sqlErr, ok := err.(*mysql.SQLError) + sqlErr, ok := err.(*sqlerror.SQLError) require.True(t, ok, "expected SQLError, got %T", err) - require.True(t, sqlErr.Num == mysql.ERMasterFatalReadingBinlog, "expected ERMasterFatalReadingBinlog (%d), got %d", - mysql.ERMasterFatalReadingBinlog, sqlErr.Num) + require.True(t, sqlErr.Num == sqlerror.ERMasterFatalReadingBinlog, "expected ERMasterFatalReadingBinlog (%d), got %d", + sqlerror.ERMasterFatalReadingBinlog, sqlErr.Num) } func TestStreamerParseEventsSendErrorXID(t *testing.T) { @@ -371,7 +373,7 @@ func TestStreamerParseEventsSendErrorXID(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) @@ -417,7 +419,7 @@ func TestStreamerParseEventsSendErrorCommit(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) _, err := bls.parseEvents(context.Background(), events, errs) @@ -458,7 +460,7 @@ func TestStreamerParseEventsInvalid(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) _, err := bls.parseEvents(context.Background(), events, errs) @@ -501,7 +503,7 @@ func TestStreamerParseEventsInvalidFormat(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) _, err := bls.parseEvents(context.Background(), events, errs) @@ -544,7 +546,7 @@ func TestStreamerParseEventsNoFormat(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) _, err := bls.parseEvents(context.Background(), events, errs) @@ -585,7 +587,7 @@ func TestStreamerParseEventsInvalidQuery(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) _, err := bls.parseEvents(context.Background(), events, errs) @@ -606,7 +608,7 @@ func TestStreamerParseEventsRollback(t *testing.T) { input := []mysql.BinlogEvent{ mysql.NewRotateEvent(f, s, 0, ""), mysql.NewFormatDescriptionEvent(f, s), - mysql.NewMariaDBGTIDEvent(f, s, mysql.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), + mysql.NewMariaDBGTIDEvent(f, s, replication.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), mysql.NewQueryEvent(f, s, mysql.Query{ Database: topoproto.VtDbPrefix + "test_keyspace", SQL: "BEGIN"}), @@ -636,9 +638,9 @@ func TestStreamerParseEventsRollback(t *testing.T) { Statements: nil, EventToken: &querypb.EventToken{ Timestamp: 1407805592, - Position: mysql.EncodePosition(mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 0: mysql.MariadbGTID{ + Position: replication.EncodePosition(replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 0: replication.MariadbGTID{ Domain: 0, Server: 62344, Sequence: 0x0d, @@ -654,9 +656,9 @@ func TestStreamerParseEventsRollback(t *testing.T) { }, EventToken: &querypb.EventToken{ Timestamp: 1407805592, - Position: mysql.EncodePosition(mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 0: mysql.MariadbGTID{ + Position: replication.EncodePosition(replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 0: replication.MariadbGTID{ Domain: 0, Server: 62344, Sequence: 0x0d, @@ -673,7 +675,7 @@ func TestStreamerParseEventsRollback(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, (&got).sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, (&got).sendTransaction) go sendTestEvents(events, input) if _, err := bls.parseEvents(context.Background(), events, errs); err != ErrServerEOF { @@ -693,7 +695,7 @@ func TestStreamerParseEventsDMLWithoutBegin(t *testing.T) { input := []mysql.BinlogEvent{ mysql.NewRotateEvent(f, s, 0, ""), mysql.NewFormatDescriptionEvent(f, s), - mysql.NewMariaDBGTIDEvent(f, s, mysql.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), + mysql.NewMariaDBGTIDEvent(f, s, replication.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), mysql.NewQueryEvent(f, s, mysql.Query{ Database: topoproto.VtDbPrefix + "test_keyspace", SQL: "insert into vt_a(eid, id) values (1, 1) /* _stream vt_a (eid id ) (1 1 ); */"}), @@ -711,9 +713,9 @@ func TestStreamerParseEventsDMLWithoutBegin(t *testing.T) { }, EventToken: &querypb.EventToken{ Timestamp: 1407805592, - Position: mysql.EncodePosition(mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 0: mysql.MariadbGTID{ + Position: replication.EncodePosition(replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 0: replication.MariadbGTID{ Domain: 0, Server: 62344, Sequence: 0x0d, @@ -726,9 +728,9 @@ func TestStreamerParseEventsDMLWithoutBegin(t *testing.T) { Statements: nil, EventToken: &querypb.EventToken{ Timestamp: 1407805592, - Position: mysql.EncodePosition(mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 0: mysql.MariadbGTID{ + Position: replication.EncodePosition(replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 0: replication.MariadbGTID{ Domain: 0, Server: 62344, Sequence: 0x0d, @@ -746,7 +748,7 @@ func TestStreamerParseEventsDMLWithoutBegin(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, (&got).sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, (&got).sendTransaction) go sendTestEvents(events, input) if _, err := bls.parseEvents(context.Background(), events, errs); err != ErrServerEOF { @@ -766,7 +768,7 @@ func TestStreamerParseEventsBeginWithoutCommit(t *testing.T) { input := []mysql.BinlogEvent{ mysql.NewRotateEvent(f, s, 0, ""), mysql.NewFormatDescriptionEvent(f, s), - mysql.NewMariaDBGTIDEvent(f, s, mysql.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), + mysql.NewMariaDBGTIDEvent(f, s, replication.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), mysql.NewQueryEvent(f, s, mysql.Query{ Database: topoproto.VtDbPrefix + "test_keyspace", SQL: "insert into vt_a(eid, id) values (1, 1) /* _stream vt_a (eid id ) (1 1 ); */"}), @@ -787,9 +789,9 @@ func TestStreamerParseEventsBeginWithoutCommit(t *testing.T) { }, EventToken: &querypb.EventToken{ Timestamp: 1407805592, - Position: mysql.EncodePosition(mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 0: mysql.MariadbGTID{ + Position: replication.EncodePosition(replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 0: replication.MariadbGTID{ Domain: 0, Server: 62344, Sequence: 0x0d, @@ -802,9 +804,9 @@ func TestStreamerParseEventsBeginWithoutCommit(t *testing.T) { Statements: nil, EventToken: &querypb.EventToken{ Timestamp: 1407805592, - Position: mysql.EncodePosition(mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 0: mysql.MariadbGTID{ + Position: replication.EncodePosition(replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 0: replication.MariadbGTID{ Domain: 0, Server: 62344, Sequence: 0x0d, @@ -822,7 +824,7 @@ func TestStreamerParseEventsBeginWithoutCommit(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, (&got).sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, (&got).sendTransaction) go sendTestEvents(events, input) if _, err := bls.parseEvents(context.Background(), events, errs); err != ErrServerEOF { @@ -842,7 +844,7 @@ func TestStreamerParseEventsSetInsertID(t *testing.T) { input := []mysql.BinlogEvent{ mysql.NewRotateEvent(f, s, 0, ""), mysql.NewFormatDescriptionEvent(f, s), - mysql.NewMariaDBGTIDEvent(f, s, mysql.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), + mysql.NewMariaDBGTIDEvent(f, s, replication.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), mysql.NewQueryEvent(f, s, mysql.Query{ Database: topoproto.VtDbPrefix + "test_keyspace", SQL: "BEGIN"}), @@ -865,9 +867,9 @@ func TestStreamerParseEventsSetInsertID(t *testing.T) { }, EventToken: &querypb.EventToken{ Timestamp: 1407805592, - Position: mysql.EncodePosition(mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 0: mysql.MariadbGTID{ + Position: replication.EncodePosition(replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 0: replication.MariadbGTID{ Domain: 0, Server: 62344, Sequence: 0x0d, @@ -884,7 +886,7 @@ func TestStreamerParseEventsSetInsertID(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, (&got).sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, (&got).sendTransaction) go sendTestEvents(events, input) if _, err := bls.parseEvents(context.Background(), events, errs); err != ErrServerEOF { @@ -926,7 +928,7 @@ func TestStreamerParseEventsInvalidIntVar(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) _, err := bls.parseEvents(context.Background(), events, errs) @@ -947,7 +949,7 @@ func TestStreamerParseEventsOtherDB(t *testing.T) { input := []mysql.BinlogEvent{ mysql.NewRotateEvent(f, s, 0, ""), mysql.NewFormatDescriptionEvent(f, s), - mysql.NewMariaDBGTIDEvent(f, s, mysql.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), + mysql.NewMariaDBGTIDEvent(f, s, replication.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), mysql.NewQueryEvent(f, s, mysql.Query{ Database: topoproto.VtDbPrefix + "test_keyspace", SQL: "BEGIN"}), @@ -971,9 +973,9 @@ func TestStreamerParseEventsOtherDB(t *testing.T) { }, EventToken: &querypb.EventToken{ Timestamp: 1407805592, - Position: mysql.EncodePosition(mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 0: mysql.MariadbGTID{ + Position: replication.EncodePosition(replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 0: replication.MariadbGTID{ Domain: 0, Server: 62344, Sequence: 0x0d, @@ -990,7 +992,7 @@ func TestStreamerParseEventsOtherDB(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, (&got).sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, (&got).sendTransaction) go sendTestEvents(events, input) if _, err := bls.parseEvents(context.Background(), events, errs); err != ErrServerEOF { @@ -1010,7 +1012,7 @@ func TestStreamerParseEventsOtherDBBegin(t *testing.T) { input := []mysql.BinlogEvent{ mysql.NewRotateEvent(f, s, 0, ""), mysql.NewFormatDescriptionEvent(f, s), - mysql.NewMariaDBGTIDEvent(f, s, mysql.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), + mysql.NewMariaDBGTIDEvent(f, s, replication.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), mysql.NewQueryEvent(f, s, mysql.Query{ Database: "other", SQL: "BEGIN"}), // Check that this doesn't get filtered out. @@ -1034,9 +1036,9 @@ func TestStreamerParseEventsOtherDBBegin(t *testing.T) { }, EventToken: &querypb.EventToken{ Timestamp: 1407805592, - Position: mysql.EncodePosition(mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 0: mysql.MariadbGTID{ + Position: replication.EncodePosition(replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 0: replication.MariadbGTID{ Domain: 0, Server: 62344, Sequence: 0x0d, @@ -1053,7 +1055,7 @@ func TestStreamerParseEventsOtherDBBegin(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, (&got).sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, (&got).sendTransaction) go sendTestEvents(events, input) if _, err := bls.parseEvents(context.Background(), events, errs); err != ErrServerEOF { @@ -1095,7 +1097,7 @@ func TestStreamerParseEventsBeginAgain(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, sendTransaction) before := binlogStreamerErrors.Counts()["ParseEvents"] go sendTestEvents(events, input) @@ -1119,7 +1121,7 @@ func TestStreamerParseEventsMariadbBeginGTID(t *testing.T) { input := []mysql.BinlogEvent{ mysql.NewRotateEvent(f, s, 4, "filename.0001"), mysql.NewFormatDescriptionEvent(f, s), - mysql.NewMariaDBGTIDEvent(f, s, mysql.MariadbGTID{Domain: 0, Sequence: 10}, true /* hasBegin */), + mysql.NewMariaDBGTIDEvent(f, s, replication.MariadbGTID{Domain: 0, Sequence: 10}, true /* hasBegin */), mysql.NewQueryEvent(f, s, mysql.Query{ Charset: &binlogdatapb.Charset{Client: 33, Conn: 33, Server: 33}, SQL: "insert into vt_insert_test(msg) values ('test 0') /* _stream vt_insert_test (id ) (null ); */", @@ -1146,9 +1148,9 @@ func TestStreamerParseEventsMariadbBeginGTID(t *testing.T) { }, EventToken: &querypb.EventToken{ Timestamp: 1409892744, - Position: mysql.EncodePosition(mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 0: mysql.MariadbGTID{ + Position: replication.EncodePosition(replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 0: replication.MariadbGTID{ Domain: 0, Server: 62344, Sequence: 10, @@ -1165,7 +1167,7 @@ func TestStreamerParseEventsMariadbBeginGTID(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, (&got).sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, (&got).sendTransaction) go sendTestEvents(events, input) if _, err := bls.parseEvents(context.Background(), events, errs); err != ErrServerEOF { @@ -1188,7 +1190,7 @@ func TestStreamerParseEventsMariadbStandaloneGTID(t *testing.T) { input := []mysql.BinlogEvent{ mysql.NewRotateEvent(f, s, 4, "filename.0001"), mysql.NewFormatDescriptionEvent(f, s), - mysql.NewMariaDBGTIDEvent(f, s, mysql.MariadbGTID{Domain: 0, Sequence: 9}, false /* hasBegin */), + mysql.NewMariaDBGTIDEvent(f, s, replication.MariadbGTID{Domain: 0, Sequence: 9}, false /* hasBegin */), mysql.NewQueryEvent(f, s, mysql.Query{ Charset: &binlogdatapb.Charset{Client: 8, Conn: 8, Server: 33}, SQL: "create table if not exists vt_insert_test (\nid bigint auto_increment,\nmsg varchar(64),\nprimary key (id)\n) Engine=InnoDB", @@ -1206,9 +1208,9 @@ func TestStreamerParseEventsMariadbStandaloneGTID(t *testing.T) { }, EventToken: &querypb.EventToken{ Timestamp: 1409892744, - Position: mysql.EncodePosition(mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 0: mysql.MariadbGTID{ + Position: replication.EncodePosition(replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 0: replication.MariadbGTID{ Domain: 0, Server: 62344, Sequence: 9, @@ -1225,7 +1227,7 @@ func TestStreamerParseEventsMariadbStandaloneGTID(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, (&got).sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, (&got).sendTransaction) go sendTestEvents(events, input) if _, err := bls.parseEvents(context.Background(), events, errs); err != ErrServerEOF { diff --git a/go/vt/binlog/binlogplayer/binlog_player.go b/go/vt/binlog/binlogplayer/binlog_player.go index c1db2b67c75..6d689bc5436 100644 --- a/go/vt/binlog/binlogplayer/binlog_player.go +++ b/go/vt/binlog/binlogplayer/binlog_player.go @@ -35,6 +35,9 @@ import ( "github.com/spf13/pflag" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" + "google.golang.org/protobuf/proto" "vitess.io/vitess/go/history" @@ -42,10 +45,11 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/log" - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/throttler" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) var ( @@ -58,17 +62,6 @@ var ( BlplQuery = "Query" // BlplTransaction is the key for the stats map. BlplTransaction = "Transaction" - - // VReplicationInit is for the Init state. - VReplicationInit = "Init" - // VReplicationCopying is for the Copying state. - VReplicationCopying = "Copying" - // BlpRunning is for the Running state. - BlpRunning = "Running" - // BlpStopped is for the Stopped state. - BlpStopped = "Stopped" - // BlpError is for the Error state. - BlpError = "Error" ) // Stats is the internal stats of a player. It is a different @@ -81,7 +74,7 @@ type Stats struct { // Last saved status lastPositionMutex sync.Mutex - lastPosition mysql.Position + lastPosition replication.Position heartbeatMutex sync.Mutex heartbeat int64 @@ -124,14 +117,14 @@ func (bps *Stats) Heartbeat() int64 { } // SetLastPosition sets the last replication position. -func (bps *Stats) SetLastPosition(pos mysql.Position) { +func (bps *Stats) SetLastPosition(pos replication.Position) { bps.lastPositionMutex.Lock() defer bps.lastPositionMutex.Unlock() bps.lastPosition = pos } // LastPosition gets the last replication position. -func (bps *Stats) LastPosition() mysql.Position { +func (bps *Stats) LastPosition() replication.Position { bps.lastPositionMutex.Lock() defer bps.lastPositionMutex.Unlock() return bps.lastPosition @@ -149,6 +142,11 @@ func (bps *Stats) MessageHistory() []string { return strs } +func (bps *Stats) Stop() { + bps.Rates.Stop() + bps.VReplicationLagRates.Stop() +} + // NewStats creates a new Stats structure. func NewStats() *Stats { bps := &Stats{} @@ -185,8 +183,8 @@ type BinlogPlayer struct { // common to all uid int32 - position mysql.Position - stopPosition mysql.Position + position replication.Position + stopPosition replication.Position blplStats *Stats defaultCharset *binlogdatapb.Charset currentCharset *binlogdatapb.Charset @@ -231,12 +229,12 @@ func NewBinlogPlayerTables(dbClient DBClient, tablet *topodatapb.Tablet, tables // If an error is encountered, it updates the vreplication state to "Error". // If a stop position was specified, and reached, the state is updated to "Stopped". func (blp *BinlogPlayer) ApplyBinlogEvents(ctx context.Context) error { - if err := blp.setVReplicationState(BlpRunning, ""); err != nil { + if err := blp.setVReplicationState(binlogdatapb.VReplicationWorkflowState_Running, ""); err != nil { log.Errorf("Error writing Running state: %v", err) } if err := blp.applyEvents(ctx); err != nil { - if err := blp.setVReplicationState(BlpError, err.Error()); err != nil { + if err := blp.setVReplicationState(binlogdatapb.VReplicationWorkflowState_Error, err.Error()); err != nil { log.Errorf("Error writing stop state: %v", err) } return err @@ -291,14 +289,14 @@ func (blp *BinlogPlayer) applyEvents(ctx context.Context) error { case blp.position.Equal(blp.stopPosition): msg := fmt.Sprintf("not starting BinlogPlayer, we're already at the desired position %v", blp.stopPosition) log.Info(msg) - if err := blp.setVReplicationState(BlpStopped, msg); err != nil { + if err := blp.setVReplicationState(binlogdatapb.VReplicationWorkflowState_Stopped, msg); err != nil { log.Errorf("Error writing stop state: %v", err) } return nil case blp.position.AtLeast(blp.stopPosition): msg := fmt.Sprintf("starting point %v greater than stopping point %v", blp.position, blp.stopPosition) log.Error(msg) - if err := blp.setVReplicationState(BlpStopped, msg); err != nil { + if err := blp.setVReplicationState(binlogdatapb.VReplicationWorkflowState_Stopped, msg); err != nil { log.Errorf("Error writing stop state: %v", err) } // Don't return an error. Otherwise, it will keep retrying. @@ -347,9 +345,9 @@ func (blp *BinlogPlayer) applyEvents(ctx context.Context) error { var stream BinlogTransactionStream if len(blp.tables) > 0 { - stream, err = blplClient.StreamTables(ctx, mysql.EncodePosition(blp.position), blp.tables, blp.defaultCharset) + stream, err = blplClient.StreamTables(ctx, replication.EncodePosition(blp.position), blp.tables, blp.defaultCharset) } else { - stream, err = blplClient.StreamKeyRange(ctx, mysql.EncodePosition(blp.position), blp.keyRange, blp.defaultCharset) + stream, err = blplClient.StreamKeyRange(ctx, replication.EncodePosition(blp.position), blp.keyRange, blp.defaultCharset) } if err != nil { err := fmt.Errorf("error sending streaming query to binlog server: %v", err) @@ -398,7 +396,7 @@ func (blp *BinlogPlayer) applyEvents(ctx context.Context) error { if blp.position.AtLeast(blp.stopPosition) { msg := "Reached stopping position, done playing logs" log.Info(msg) - if err := blp.setVReplicationState(BlpStopped, msg); err != nil { + if err := blp.setVReplicationState(binlogdatapb.VReplicationWorkflowState_Stopped, msg); err != nil { log.Errorf("Error writing stop state: %v", err) } return nil @@ -444,7 +442,7 @@ func (blp *BinlogPlayer) processTransaction(tx *binlogdatapb.BinlogTransaction) if _, err = blp.exec(string(stmt.Sql)); err == nil { continue } - if sqlErr, ok := err.(*mysql.SQLError); ok && sqlErr.Number() == mysql.ERLockDeadlock { + if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Number() == sqlerror.ERLockDeadlock { // Deadlock: ask for retry log.Infof("Deadlock: %v", err) if err = blp.dbClient.Rollback(); err != nil { @@ -513,15 +511,15 @@ func (blp *BinlogPlayer) writeRecoveryPosition(tx *binlogdatapb.BinlogTransactio return nil } -func (blp *BinlogPlayer) setVReplicationState(state, message string) error { +func (blp *BinlogPlayer) setVReplicationState(state binlogdatapb.VReplicationWorkflowState, message string) error { if message != "" { blp.blplStats.History.Add(&StatsHistoryRecord{ Time: time.Now(), Message: message, }) } - blp.blplStats.State.Store(state) - query := fmt.Sprintf("update _vt.vreplication set state='%v', message=%v where id=%v", state, encodeString(MessageTruncate(message)), blp.uid) + blp.blplStats.State.Store(state.String()) + query := fmt.Sprintf("update _vt.vreplication set state='%v', message=%v where id=%v", state.String(), encodeString(MessageTruncate(message)), blp.uid) if _, err := blp.dbClient.ExecuteFetch(query, 1); err != nil { return fmt.Errorf("could not set state: %v: %v", query, err) } @@ -530,11 +528,11 @@ func (blp *BinlogPlayer) setVReplicationState(state, message string) error { // VRSettings contains the settings of a vreplication table. type VRSettings struct { - StartPos mysql.Position - StopPos mysql.Position + StartPos replication.Position + StopPos replication.Position MaxTPS int64 MaxReplicationLag int64 - State string + State binlogdatapb.VReplicationWorkflowState WorkflowType binlogdatapb.VReplicationWorkflowType WorkflowSubType binlogdatapb.VReplicationWorkflowSubType WorkflowName string @@ -557,7 +555,7 @@ func ReadVRSettings(dbClient DBClient, uid int32) (VRSettings, error) { maxTPS, err := vrRow.ToInt64("max_tps") if err != nil { - return VRSettings{}, fmt.Errorf("failed to parse max_tps column2: %v", err) + return VRSettings{}, fmt.Errorf("failed to parse max_tps column: %v", err) } maxReplicationLag, err := vrRow.ToInt64("max_replication_lag") if err != nil { @@ -567,7 +565,7 @@ func ReadVRSettings(dbClient DBClient, uid int32) (VRSettings, error) { if err != nil { return VRSettings{}, fmt.Errorf("failed to parse pos column: %v", err) } - stopPos, err := mysql.DecodePosition(vrRow.AsString("stop_pos", "")) + stopPos, err := replication.DecodePosition(vrRow.AsString("stop_pos", "")) if err != nil { return VRSettings{}, fmt.Errorf("failed to parse stop_pos column: %v", err) } @@ -588,7 +586,7 @@ func ReadVRSettings(dbClient DBClient, uid int32) (VRSettings, error) { StopPos: stopPos, MaxTPS: maxTPS, MaxReplicationLag: maxReplicationLag, - State: vrRow.AsString("state", ""), + State: binlogdatapb.VReplicationWorkflowState(binlogdatapb.VReplicationWorkflowState_value[vrRow.AsString("state", "")]), WorkflowType: binlogdatapb.VReplicationWorkflowType(workflowType), WorkflowName: vrRow.AsString("workflow", ""), WorkflowSubType: binlogdatapb.VReplicationWorkflowSubType(workflowSubType), @@ -604,23 +602,23 @@ func CreateVReplication(workflow string, source *binlogdatapb.BinlogSource, posi "(workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, defer_secondary_keys) "+ "values (%v, %v, %v, %v, %v, %v, 0, '%v', %v, %d, %d, %v)", encodeString(workflow), encodeString(source.String()), encodeString(position), maxTPS, maxReplicationLag, - timeUpdated, BlpRunning, encodeString(dbName), workflowType, workflowSubType, deferSecondaryKeys) + timeUpdated, binlogdatapb.VReplicationWorkflowState_Running.String(), encodeString(dbName), workflowType, workflowSubType, deferSecondaryKeys) } // CreateVReplicationState returns a statement to create a stopped vreplication. -func CreateVReplicationState(workflow string, source *binlogdatapb.BinlogSource, position, state string, dbName string, +func CreateVReplicationState(workflow string, source *binlogdatapb.BinlogSource, position string, state binlogdatapb.VReplicationWorkflowState, dbName string, workflowType binlogdatapb.VReplicationWorkflowType, workflowSubType binlogdatapb.VReplicationWorkflowSubType) string { return fmt.Sprintf("insert into _vt.vreplication "+ "(workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type) "+ "values (%v, %v, %v, %v, %v, %v, 0, '%v', %v, %d, %d)", encodeString(workflow), encodeString(source.String()), encodeString(position), throttler.MaxRateModuleDisabled, - throttler.ReplicationLagModuleDisabled, time.Now().Unix(), state, encodeString(dbName), + throttler.ReplicationLagModuleDisabled, time.Now().Unix(), state.String(), encodeString(dbName), workflowType, workflowSubType) } // GenerateUpdatePos returns a statement to record the latest processed gtid in the _vt.vreplication table. -func GenerateUpdatePos(uid int32, pos mysql.Position, timeUpdated int64, txTimestamp int64, rowsCopied int64, compress bool) string { - strGTID := encodeString(mysql.EncodePosition(pos)) +func GenerateUpdatePos(uid int32, pos replication.Position, timeUpdated int64, txTimestamp int64, rowsCopied int64, compress bool) string { + strGTID := encodeString(replication.EncodePosition(pos)) if compress { strGTID = fmt.Sprintf("compress(%s)", strGTID) } @@ -658,21 +656,21 @@ func GenerateUpdateTimeThrottled(uid int32, timeThrottledUnix int64, componentTh func StartVReplication(uid int32) string { return fmt.Sprintf( "update _vt.vreplication set state='%v', stop_pos=NULL where id=%v", - BlpRunning, uid) + binlogdatapb.VReplicationWorkflowState_Running.String(), uid) } // StartVReplicationUntil returns a statement to start the replication with a stop position. func StartVReplicationUntil(uid int32, pos string) string { return fmt.Sprintf( "update _vt.vreplication set state='%v', stop_pos=%v where id=%v", - BlpRunning, encodeString(pos), uid) + binlogdatapb.VReplicationWorkflowState_Running.String(), encodeString(pos), uid) } // StopVReplication returns a statement to stop the replication. func StopVReplication(uid int32, message string) string { return fmt.Sprintf( "update _vt.vreplication set state='%v', message=%v where id=%v", - BlpStopped, encodeString(MessageTruncate(message)), uid) + binlogdatapb.VReplicationWorkflowState_Stopped.String(), encodeString(MessageTruncate(message)), uid) } // DeleteVReplication returns a statement to delete the replication. @@ -741,12 +739,12 @@ func MysqlUncompress(input string) []byte { } // DecodePosition attempts to uncompress the passed value first and if it fails tries to decode it as a valid GTID -func DecodePosition(gtid string) (mysql.Position, error) { +func DecodePosition(gtid string) (replication.Position, error) { b := MysqlUncompress(gtid) if b != nil { gtid = string(b) } - return mysql.DecodePosition(gtid) + return replication.DecodePosition(gtid) } // StatsHistoryRecord is used to store a Message with timestamp diff --git a/go/vt/binlog/binlogplayer/binlog_player_test.go b/go/vt/binlog/binlogplayer/binlog_player_test.go index 20f75430644..148c4fb386b 100644 --- a/go/vt/binlog/binlogplayer/binlog_player_test.go +++ b/go/vt/binlog/binlogplayer/binlog_player_test.go @@ -17,15 +17,15 @@ limitations under the License. package binlogplayer import ( + "context" "errors" "testing" "time" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" querypb "vitess.io/vitess/go/vt/proto/query" - "context" - - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/throttler" @@ -54,11 +54,11 @@ var ( sqltypes.NULL, // stop_pos sqltypes.NewInt64(9223372036854775807), // max_tps sqltypes.NewInt64(9223372036854775807), // max_replication_lag - sqltypes.NewVarBinary("Running"), // state - sqltypes.NewInt64(1), // workflow_type - sqltypes.NewVarChar("wf"), // workflow - sqltypes.NewInt64(0), // workflow_sub_type - sqltypes.NewInt64(0), // defer_secondary_keys + sqltypes.NewVarBinary(binlogdatapb.VReplicationWorkflowState_Running.String()), // state + sqltypes.NewInt64(1), // workflow_type + sqltypes.NewVarChar("wf"), // workflow + sqltypes.NewInt64(0), // workflow_sub_type + sqltypes.NewInt64(0), // defer_secondary_keys }, }, } @@ -86,7 +86,9 @@ func TestNewBinlogPlayerKeyRange(t *testing.T) { } wantKeyRange := &topodatapb.KeyRange{End: []byte{0x80}} - blp := NewBinlogPlayerKeyRange(dbClient, wantTablet, wantKeyRange, 1, NewStats()) + stats := NewStats() + defer stats.Stop() + blp := NewBinlogPlayerKeyRange(dbClient, wantTablet, wantKeyRange, 1, stats) errfunc := applyEvents(blp) dbClient.Wait() @@ -117,7 +119,9 @@ func TestNewBinlogPlayerTables(t *testing.T) { } wantTables := []string{"a", "b"} - blp := NewBinlogPlayerTables(dbClient, wantTablet, wantTables, 1, NewStats()) + stats := NewStats() + defer stats.Stop() + blp := NewBinlogPlayerTables(dbClient, wantTablet, wantTables, 1, stats) errfunc := applyEvents(blp) dbClient.Wait() @@ -138,7 +142,9 @@ func TestApplyEventsFail(t *testing.T) { _ = newFakeBinlogClient() - blp := NewBinlogPlayerTables(dbClient, nil, []string{"a"}, 1, NewStats()) + stats := NewStats() + defer stats.Stop() + blp := NewBinlogPlayerTables(dbClient, nil, []string{"a"}, 1, stats) errfunc := applyEvents(blp) dbClient.Wait() @@ -171,15 +177,15 @@ func TestStopPosEqual(t *testing.T) { InsertID: 0, Rows: [][]sqltypes.Value{ { - sqltypes.NewVarBinary("MariaDB/0-1-1083"), // pos - sqltypes.NewVarBinary("MariaDB/0-1-1083"), // stop_pos - sqltypes.NewInt64(9223372036854775807), // max_tps - sqltypes.NewInt64(9223372036854775807), // max_replication_lag - sqltypes.NewVarBinary("Running"), // state - sqltypes.NewInt64(1), // workflow_type - sqltypes.NewVarChar("wf"), // workflow - sqltypes.NewInt64(1), // workflow_sub_type - sqltypes.NewInt64(1), // defer_secondary_keys + sqltypes.NewVarBinary("MariaDB/0-1-1083"), // pos + sqltypes.NewVarBinary("MariaDB/0-1-1083"), // stop_pos + sqltypes.NewInt64(9223372036854775807), // max_tps + sqltypes.NewInt64(9223372036854775807), // max_replication_lag + sqltypes.NewVarBinary(binlogdatapb.VReplicationWorkflowState_Running.String()), // state + sqltypes.NewInt64(1), // workflow_type + sqltypes.NewVarChar("wf"), // workflow + sqltypes.NewInt64(1), // workflow_sub_type + sqltypes.NewInt64(1), // defer_secondary_keys }, }, } @@ -188,7 +194,9 @@ func TestStopPosEqual(t *testing.T) { _ = newFakeBinlogClient() - blp := NewBinlogPlayerTables(dbClient, nil, []string{"a"}, 1, NewStats()) + stats := NewStats() + defer stats.Stop() + blp := NewBinlogPlayerTables(dbClient, nil, []string{"a"}, 1, stats) errfunc := applyEvents(blp) dbClient.Wait() @@ -208,15 +216,15 @@ func TestStopPosLess(t *testing.T) { InsertID: 0, Rows: [][]sqltypes.Value{ { - sqltypes.NewVarBinary("MariaDB/0-1-1083"), // pos - sqltypes.NewVarBinary("MariaDB/0-1-1082"), // stop_pos - sqltypes.NewInt64(9223372036854775807), // max_tps - sqltypes.NewInt64(9223372036854775807), // max_replication_lag - sqltypes.NewVarBinary("Running"), // state - sqltypes.NewInt64(1), // workflow_type - sqltypes.NewVarChar("wf"), // workflow - sqltypes.NewInt64(1), // workflow_sub_type - sqltypes.NewInt64(1), // defer_secondary_keys + sqltypes.NewVarBinary("MariaDB/0-1-1083"), // pos + sqltypes.NewVarBinary("MariaDB/0-1-1082"), // stop_pos + sqltypes.NewInt64(9223372036854775807), // max_tps + sqltypes.NewInt64(9223372036854775807), // max_replication_lag + sqltypes.NewVarBinary(binlogdatapb.VReplicationWorkflowState_Running.String()), // state + sqltypes.NewInt64(1), // workflow_type + sqltypes.NewVarChar("wf"), // workflow + sqltypes.NewInt64(1), // workflow_sub_type + sqltypes.NewInt64(1), // defer_secondary_keys }, }, } @@ -225,7 +233,9 @@ func TestStopPosLess(t *testing.T) { _ = newFakeBinlogClient() - blp := NewBinlogPlayerTables(dbClient, nil, []string{"a"}, 1, NewStats()) + stats := NewStats() + defer stats.Stop() + blp := NewBinlogPlayerTables(dbClient, nil, []string{"a"}, 1, stats) errfunc := applyEvents(blp) dbClient.Wait() @@ -245,15 +255,15 @@ func TestStopPosGreater(t *testing.T) { InsertID: 0, Rows: [][]sqltypes.Value{ { - sqltypes.NewVarBinary("MariaDB/0-1-1083"), // pos - sqltypes.NewVarBinary("MariaDB/0-1-1085"), // stop_pos - sqltypes.NewInt64(9223372036854775807), // max_tps - sqltypes.NewInt64(9223372036854775807), // max_replication_lag - sqltypes.NewVarBinary("Running"), // state - sqltypes.NewInt64(1), // workflow_type - sqltypes.NewVarChar("wf"), // workflow - sqltypes.NewInt64(1), // workflow_sub_type - sqltypes.NewInt64(1), // defer_secondary_keys + sqltypes.NewVarBinary("MariaDB/0-1-1083"), // pos + sqltypes.NewVarBinary("MariaDB/0-1-1085"), // stop_pos + sqltypes.NewInt64(9223372036854775807), // max_tps + sqltypes.NewInt64(9223372036854775807), // max_replication_lag + sqltypes.NewVarBinary(binlogdatapb.VReplicationWorkflowState_Running.String()), // state + sqltypes.NewInt64(1), // workflow_type + sqltypes.NewVarChar("wf"), // workflow + sqltypes.NewInt64(1), // workflow_sub_type + sqltypes.NewInt64(1), // defer_secondary_keys }, }, } @@ -266,7 +276,9 @@ func TestStopPosGreater(t *testing.T) { _ = newFakeBinlogClient() - blp := NewBinlogPlayerTables(dbClient, nil, []string{"a"}, 1, NewStats()) + stats := NewStats() + defer stats.Stop() + blp := NewBinlogPlayerTables(dbClient, nil, []string{"a"}, 1, stats) errfunc := applyEvents(blp) dbClient.Wait() @@ -286,15 +298,15 @@ func TestContextCancel(t *testing.T) { InsertID: 0, Rows: [][]sqltypes.Value{ { - sqltypes.NewVarBinary("MariaDB/0-1-1083"), // pos - sqltypes.NewVarBinary("MariaDB/0-1-1085"), // stop_pos - sqltypes.NewInt64(9223372036854775807), // max_tps - sqltypes.NewInt64(9223372036854775807), // max_replication_lag - sqltypes.NewVarBinary("Running"), // state - sqltypes.NewInt64(1), // workflow_type - sqltypes.NewVarChar("wf"), // workflow - sqltypes.NewInt64(1), // workflow_sub_type - sqltypes.NewInt64(1), // defer_secondary_keys + sqltypes.NewVarBinary("MariaDB/0-1-1083"), // pos + sqltypes.NewVarBinary("MariaDB/0-1-1085"), // stop_pos + sqltypes.NewInt64(9223372036854775807), // max_tps + sqltypes.NewInt64(9223372036854775807), // max_replication_lag + sqltypes.NewVarBinary(binlogdatapb.VReplicationWorkflowState_Running.String()), // state + sqltypes.NewInt64(1), // workflow_type + sqltypes.NewVarChar("wf"), // workflow + sqltypes.NewInt64(1), // workflow_sub_type + sqltypes.NewInt64(1), // defer_secondary_keys }, }, } @@ -307,7 +319,9 @@ func TestContextCancel(t *testing.T) { _ = newFakeBinlogClient() - blp := NewBinlogPlayerTables(dbClient, nil, []string{"a"}, 1, NewStats()) + stats := NewStats() + defer stats.Stop() + blp := NewBinlogPlayerTables(dbClient, nil, []string{"a"}, 1, stats) errfunc := applyEvents(blp) dbClient.Wait() @@ -326,7 +340,7 @@ func TestRetryOnDeadlock(t *testing.T) { dbClient := NewMockDBClient(t) dbClient.ExpectRequest("update _vt.vreplication set state='Running', message='' where id=1", testDMLResponse, nil) dbClient.ExpectRequest("select pos, stop_pos, max_tps, max_replication_lag, state, workflow_type, workflow, workflow_sub_type, defer_secondary_keys from _vt.vreplication where id=1", testSettingsResponse, nil) - deadlocked := &mysql.SQLError{Num: 1213, Message: "deadlocked"} + deadlocked := &sqlerror.SQLError{Num: 1213, Message: "deadlocked"} dbClient.ExpectRequest("begin", nil, nil) dbClient.ExpectRequest("insert into t values(1)", nil, deadlocked) dbClient.ExpectRequest("rollback", nil, nil) @@ -335,7 +349,9 @@ func TestRetryOnDeadlock(t *testing.T) { dbClient.ExpectRequestRE("update _vt.vreplication set pos='MariaDB/0-1-1235', time_updated=.*", testDMLResponse, nil) dbClient.ExpectRequest("commit", nil, nil) - blp := NewBinlogPlayerTables(dbClient, nil, []string{"a"}, 1, NewStats()) + stats := NewStats() + defer stats.Stop() + blp := NewBinlogPlayerTables(dbClient, nil, []string{"a"}, 1, stats) blp.deadlockRetry = 10 * time.Millisecond errfunc := applyEvents(blp) @@ -400,24 +416,24 @@ func TestCreateVReplicationTables(t *testing.T) { } func TestUpdateVReplicationPos(t *testing.T) { - gtid := mysql.MustParseGTID("MariaDB", "0-1-8283") + gtid := replication.MustParseGTID("MariaDB", "0-1-8283") want := "update _vt.vreplication " + "set pos='MariaDB/0-1-8283', time_updated=88822, rows_copied=0, message='' " + "where id=78522" - got := GenerateUpdatePos(78522, mysql.Position{GTIDSet: gtid.GTIDSet()}, 88822, 0, 0, false) + got := GenerateUpdatePos(78522, replication.Position{GTIDSet: gtid.GTIDSet()}, 88822, 0, 0, false) if got != want { t.Errorf("updateVReplicationPos() = %#v, want %#v", got, want) } } func TestUpdateVReplicationTimestamp(t *testing.T) { - gtid := mysql.MustParseGTID("MariaDB", "0-2-582") + gtid := replication.MustParseGTID("MariaDB", "0-2-582") want := "update _vt.vreplication " + "set pos='MariaDB/0-2-582', time_updated=88822, transaction_timestamp=481828, rows_copied=0, message='' " + "where id=78522" - got := GenerateUpdatePos(78522, mysql.Position{GTIDSet: gtid.GTIDSet()}, 88822, 481828, 0, false) + got := GenerateUpdatePos(78522, replication.Position{GTIDSet: gtid.GTIDSet()}, 88822, 481828, 0, false) if got != want { t.Errorf("updateVReplicationPos() = %#v, want %#v", got, want) } diff --git a/go/vt/binlog/binlogplayer/dbclient.go b/go/vt/binlog/binlogplayer/dbclient.go index 251da23e929..f9cd03691a5 100644 --- a/go/vt/binlog/binlogplayer/dbclient.go +++ b/go/vt/binlog/binlogplayer/dbclient.go @@ -20,11 +20,12 @@ import ( "context" "fmt" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/sqlparser" ) @@ -55,7 +56,7 @@ type dbClientImplWithSidecarDBReplacement struct { // NewDBClient creates a DBClient instance func NewDBClient(params dbconfigs.Connector) DBClient { - if sidecardb.GetName() != sidecardb.DefaultName { + if sidecar.GetName() != sidecar.DefaultName { return &dbClientImplWithSidecarDBReplacement{ dbClientImpl{dbConfig: params}, } @@ -66,7 +67,7 @@ func NewDBClient(params dbconfigs.Connector) DBClient { } func (dc *dbClientImpl) handleError(err error) { - if mysql.IsConnErr(err) { + if sqlerror.IsConnErr(err) { dc.Close() } } @@ -141,7 +142,7 @@ func (dc *dbClientImpl) ExecuteFetch(query string, maxrows int) (*sqltypes.Resul func (dcr *dbClientImplWithSidecarDBReplacement) ExecuteFetch(query string, maxrows int) (*sqltypes.Result, error) { // Replace any provided sidecar database qualifiers with the correct one. - uq, err := sqlparser.ReplaceTableQualifiers(query, sidecardb.DefaultName, sidecardb.GetName()) + uq, err := sqlparser.ReplaceTableQualifiers(query, sidecar.DefaultName, sidecar.GetName()) if err != nil { return nil, err } diff --git a/go/vt/binlog/binlogplayer/mock_dbclient.go b/go/vt/binlog/binlogplayer/mock_dbclient.go index 50df683976d..d64c4d40146 100644 --- a/go/vt/binlog/binlogplayer/mock_dbclient.go +++ b/go/vt/binlog/binlogplayer/mock_dbclient.go @@ -17,8 +17,10 @@ limitations under the License. package binlogplayer import ( + "fmt" "regexp" "strings" + "sync" "testing" "time" @@ -34,9 +36,11 @@ type MockDBClient struct { t *testing.T UName string expect []*mockExpect + expectMu sync.Mutex currentResult int done chan struct{} invariants map[string]*sqltypes.Result + Tag string } type mockExpect struct { @@ -56,6 +60,28 @@ func NewMockDBClient(t *testing.T) *MockDBClient { "CREATE TABLE IF NOT EXISTS _vt.vreplication_log": {}, "select id, type, state, message from _vt.vreplication_log": {}, "insert into _vt.vreplication_log": {}, + // The following statements don't have a deterministic order as they are + // executed in the normal program flow, but ALSO done in a defer as a protective + // measure as they are resetting the values back to the original one. This also + // means that the values they set are based on the session defaults, which can + // change. So we make these invariants for unit test stability. + "select @@foreign_key_checks": sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "@@foreign_key_checks", + "int64", + ), + "1", + ), + "set @@session.foreign_key_checks": {}, + "set foreign_key_checks": {}, + "select @@session.sql_mode": sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "sql_mode", "varchar", + ), + "ONLY_FULL_GROUP_BY,NO_AUTO_VALUE_ON_ZERO,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION", + ), + "set @@session.sql_mode": {}, + "set sql_mode": {}, }, } } @@ -77,6 +103,8 @@ func (dc *MockDBClient) ExpectRequest(query string, result *sqltypes.Result, err dc.done = make(chan struct{}) default: } + dc.expectMu.Lock() + defer dc.expectMu.Unlock() dc.expect = append(dc.expect, &mockExpect{ query: query, result: result, @@ -93,6 +121,8 @@ func (dc *MockDBClient) ExpectRequestRE(queryRE string, result *sqltypes.Result, dc.done = make(chan struct{}) default: } + dc.expectMu.Lock() + defer dc.expectMu.Unlock() dc.expect = append(dc.expect, &mockExpect{ query: queryRE, re: regexp.MustCompile(queryRE), @@ -149,25 +179,43 @@ func (dc *MockDBClient) Close() { // ExecuteFetch is part of the DBClient interface func (dc *MockDBClient) ExecuteFetch(query string, maxrows int) (qr *sqltypes.Result, err error) { dc.t.Helper() - dc.t.Logf("DBClient query: %v", query) + msg := "DBClient query: %v" + if dc.Tag != "" { + msg = fmt.Sprintf("[%s] %s", dc.Tag, msg) + } + dc.t.Logf(msg, query) for q, result := range dc.invariants { - if strings.Contains(query, q) { + if strings.Contains(strings.ToLower(query), strings.ToLower(q)) { return result, nil } } + dc.expectMu.Lock() + defer dc.expectMu.Unlock() if dc.currentResult >= len(dc.expect) { - dc.t.Fatalf("DBClientMock: query: %s, no more requests are expected", query) + msg := "DBClientMock: query: %s, no more requests are expected" + if dc.Tag != "" { + msg = fmt.Sprintf("[%s] %s", dc.Tag, msg) + } + dc.t.Fatalf(msg, query) } result := dc.expect[dc.currentResult] if result.re == nil { if query != result.query { - dc.t.Fatalf("DBClientMock: query: %s, want %s", query, result.query) + msg := "DBClientMock: query: %s, want %s" + if dc.Tag != "" { + msg = fmt.Sprintf("[%s] %s", dc.Tag, msg) + } + dc.t.Fatalf(msg, query, result.query) } } else { if !result.re.MatchString(query) { - dc.t.Fatalf("DBClientMock: query: %s, must match %s", query, result.query) + msg := "DBClientMock: query: %s, must match %s" + if dc.Tag != "" { + msg = fmt.Sprintf("[%s] %s", dc.Tag, msg) + } + dc.t.Fatalf(msg, query, result.query) } } dc.currentResult++ diff --git a/go/vt/binlog/event_streamer.go b/go/vt/binlog/event_streamer.go index a8cce64a0c9..a872b089bff 100644 --- a/go/vt/binlog/event_streamer.go +++ b/go/vt/binlog/event_streamer.go @@ -17,14 +17,13 @@ limitations under the License. package binlog import ( + "context" "encoding/base64" "fmt" "strconv" "strings" - "context" - - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" @@ -52,7 +51,7 @@ type EventStreamer struct { } // NewEventStreamer returns a new EventStreamer on top of a Streamer -func NewEventStreamer(cp dbconfigs.Connector, se *schema.Engine, startPos mysql.Position, timestamp int64, sendEvent sendEventFunc) *EventStreamer { +func NewEventStreamer(cp dbconfigs.Connector, se *schema.Engine, startPos replication.Position, timestamp int64, sendEvent sendEventFunc) *EventStreamer { evs := &EventStreamer{ sendEvent: sendEvent, } diff --git a/go/vt/binlog/eventtoken/compare.go b/go/vt/binlog/eventtoken/compare.go index 2fe908527d2..e1c9501a8dc 100644 --- a/go/vt/binlog/eventtoken/compare.go +++ b/go/vt/binlog/eventtoken/compare.go @@ -19,7 +19,7 @@ limitations under the License. package eventtoken import ( - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" querypb "vitess.io/vitess/go/vt/proto/query" ) @@ -45,11 +45,11 @@ func Fresher(ev1, ev2 *querypb.EventToken) int { } // We can parse them. - pos1, err := mysql.DecodePosition(ev1.Position) + pos1, err := replication.DecodePosition(ev1.Position) if err != nil { return -1 } - pos2, err := mysql.DecodePosition(ev2.Position) + pos2, err := replication.DecodePosition(ev2.Position) if err != nil { return -1 } diff --git a/go/vt/binlog/updatestreamctl.go b/go/vt/binlog/updatestreamctl.go index 7ece45cda9c..78d61c0860c 100644 --- a/go/vt/binlog/updatestreamctl.go +++ b/go/vt/binlog/updatestreamctl.go @@ -17,13 +17,12 @@ limitations under the License. package binlog import ( + "context" "fmt" "sync" "sync/atomic" - "context" - - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/tb" "vitess.io/vitess/go/vt/dbconfigs" @@ -250,7 +249,7 @@ func (updateStream *UpdateStreamImpl) IsEnabled() bool { // StreamKeyRange is part of the UpdateStream interface func (updateStream *UpdateStreamImpl) StreamKeyRange(ctx context.Context, position string, keyRange *topodatapb.KeyRange, charset *binlogdatapb.Charset, callback func(trans *binlogdatapb.BinlogTransaction) error) (err error) { - pos, err := mysql.DecodePosition(position) + pos, err := replication.DecodePosition(position) if err != nil { return err } @@ -290,7 +289,7 @@ func (updateStream *UpdateStreamImpl) StreamKeyRange(ctx context.Context, positi // StreamTables is part of the UpdateStream interface func (updateStream *UpdateStreamImpl) StreamTables(ctx context.Context, position string, tables []string, charset *binlogdatapb.Charset, callback func(trans *binlogdatapb.BinlogTransaction) error) (err error) { - pos, err := mysql.DecodePosition(position) + pos, err := replication.DecodePosition(position) if err != nil { return err } diff --git a/go/vt/dbconfigs/credentials.go b/go/vt/dbconfigs/credentials.go index 5a5dbc1c1a1..4e0e5518869 100644 --- a/go/vt/dbconfigs/credentials.go +++ b/go/vt/dbconfigs/credentials.go @@ -61,7 +61,6 @@ var ( "mysqlctld", "vtbackup", "vtcombo", - "vtgr", "vttablet", } ) diff --git a/go/vt/dbconfigs/dbconfigs.go b/go/vt/dbconfigs/dbconfigs.go index 78167d2c971..c1c3d2434fc 100644 --- a/go/vt/dbconfigs/dbconfigs.go +++ b/go/vt/dbconfigs/dbconfigs.go @@ -418,6 +418,6 @@ func NewTestDBConfigs(genParams, appDebugParams mysql.ConnParams, dbname string) replParams: genParams, externalReplParams: genParams, DBName: dbname, - Charset: "utf8mb4_general_ci", + Charset: "", } } diff --git a/go/vt/dbconnpool/connection.go b/go/vt/dbconnpool/connection.go index bdf74b8a429..8e9a0f4a5c0 100644 --- a/go/vt/dbconnpool/connection.go +++ b/go/vt/dbconnpool/connection.go @@ -21,6 +21,7 @@ import ( "fmt" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/dbconfigs" ) @@ -112,7 +113,7 @@ func (dbc *DBConnection) ExecuteStreamFetch(query string, callback func(*sqltype } func (dbc *DBConnection) handleError(err error) { - if mysql.IsConnErr(err) { + if sqlerror.IsConnErr(err) { dbc.Close() } } diff --git a/go/vt/discovery/fake_healthcheck.go b/go/vt/discovery/fake_healthcheck.go index acff538b78d..cb959902c19 100644 --- a/go/vt/discovery/fake_healthcheck.go +++ b/go/vt/discovery/fake_healthcheck.go @@ -365,12 +365,24 @@ func (fhc *FakeHealthCheck) GetAllTablets() map[string]*topodatapb.Tablet { return res } +// BroadcastAll broadcasts all the tablets' healthchecks +func (fhc *FakeHealthCheck) BroadcastAll() { + if fhc.ch == nil { + return + } + fhc.mu.Lock() + defer fhc.mu.Unlock() + for _, item := range fhc.items { + fhc.ch <- simpleCopy(item.ts) + } +} + func simpleCopy(th *TabletHealth) *TabletHealth { return &TabletHealth{ Conn: th.Conn, - Tablet: proto.Clone(th.Tablet).(*topodatapb.Tablet), - Target: proto.Clone(th.Target).(*querypb.Target), - Stats: proto.Clone(th.Stats).(*querypb.RealtimeStats), + Tablet: th.Tablet.CloneVT(), + Target: th.Target.CloneVT(), + Stats: th.Stats.CloneVT(), LastError: th.LastError, PrimaryTermStartTime: th.PrimaryTermStartTime, Serving: th.Serving, diff --git a/go/vt/discovery/healthcheck_test.go b/go/vt/discovery/healthcheck_test.go index 07350a0a64a..5fadc57eb2e 100644 --- a/go/vt/discovery/healthcheck_test.go +++ b/go/vt/discovery/healthcheck_test.go @@ -64,10 +64,12 @@ func init() { } func TestHealthCheck(t *testing.T) { + ctx := utils.LeakCheckContext(t) // reset error counters hcErrorCounters.ResetAll() - ts := memorytopo.NewServer("cell") - hc := createTestHc(ts) + ts := memorytopo.NewServer(ctx, "cell") + defer ts.Close() + hc := createTestHc(ctx, ts) // close healthcheck defer hc.Close() tablet := createTestTablet(0, "cell", "a") @@ -97,8 +99,8 @@ func TestHealthCheck(t *testing.T) { Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, Serving: true, - TabletExternallyReparentedTimestamp: 0, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.5}, + PrimaryTermStartTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.5}, } input <- shr result = <-resultChan @@ -130,11 +132,11 @@ func TestHealthCheck(t *testing.T) { // TabletType changed, should get both old and new event shr = &querypb.StreamHealthResponse{ - TabletAlias: tablet.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}, - Serving: true, - TabletExternallyReparentedTimestamp: 10, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2}, + TabletAlias: tablet.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}, + Serving: true, + PrimaryTermStartTimestamp: 10, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2}, } want = &TabletHealth{ Tablet: tablet, @@ -159,11 +161,11 @@ func TestHealthCheck(t *testing.T) { // Serving & RealtimeStats changed shr = &querypb.StreamHealthResponse{ - TabletAlias: tablet.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, - Serving: false, - TabletExternallyReparentedTimestamp: 0, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.3}, + TabletAlias: tablet.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Serving: false, + PrimaryTermStartTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.3}, } want = &TabletHealth{ Tablet: tablet, @@ -179,11 +181,11 @@ func TestHealthCheck(t *testing.T) { // HealthError shr = &querypb.StreamHealthResponse{ - TabletAlias: tablet.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, - Serving: true, - TabletExternallyReparentedTimestamp: 0, - RealtimeStats: &querypb.RealtimeStats{HealthError: "some error", ReplicationLagSeconds: 1, CpuUsage: 0.3}, + TabletAlias: tablet.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Serving: true, + PrimaryTermStartTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{HealthError: "some error", ReplicationLagSeconds: 1, CpuUsage: 0.3}, } want = &TabletHealth{ Tablet: tablet, @@ -206,8 +208,11 @@ func TestHealthCheck(t *testing.T) { } func TestHealthCheckStreamError(t *testing.T) { - ts := memorytopo.NewServer("cell") - hc := createTestHc(ts) + ctx := utils.LeakCheckContext(t) + + ts := memorytopo.NewServer(ctx, "cell") + defer ts.Close() + hc := createTestHc(ctx, ts) defer hc.Close() tablet := createTestTablet(0, "cell", "a") @@ -229,11 +234,11 @@ func TestHealthCheckStreamError(t *testing.T) { // one tablet after receiving a StreamHealthResponse shr := &querypb.StreamHealthResponse{ - TabletAlias: tablet.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, - Serving: true, - TabletExternallyReparentedTimestamp: 0, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2}, + TabletAlias: tablet.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Serving: true, + PrimaryTermStartTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2}, } want = &TabletHealth{ Tablet: tablet, @@ -267,8 +272,11 @@ func TestHealthCheckStreamError(t *testing.T) { // TestHealthCheckErrorOnPrimary is the same as TestHealthCheckStreamError except for tablet type func TestHealthCheckErrorOnPrimary(t *testing.T) { - ts := memorytopo.NewServer("cell") - hc := createTestHc(ts) + ctx := utils.LeakCheckContext(t) + + ts := memorytopo.NewServer(ctx, "cell") + defer ts.Close() + hc := createTestHc(ctx, ts) defer hc.Close() tablet := createTestTablet(0, "cell", "a") @@ -290,11 +298,11 @@ func TestHealthCheckErrorOnPrimary(t *testing.T) { // one tablet after receiving a StreamHealthResponse shr := &querypb.StreamHealthResponse{ - TabletAlias: tablet.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}, - Serving: true, - TabletExternallyReparentedTimestamp: 10, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2}, + TabletAlias: tablet.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}, + Serving: true, + PrimaryTermStartTimestamp: 10, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2}, } want = &TabletHealth{ Tablet: tablet, @@ -327,8 +335,11 @@ func TestHealthCheckErrorOnPrimary(t *testing.T) { } func TestHealthCheckErrorOnPrimaryAfterExternalReparent(t *testing.T) { - ts := memorytopo.NewServer("cell") - hc := createTestHc(ts) + ctx := utils.LeakCheckContext(t) + + ts := memorytopo.NewServer(ctx, "cell") + defer ts.Close() + hc := createTestHc(ctx, ts) defer hc.Close() resultChan := hc.Subscribe() @@ -348,20 +359,20 @@ func TestHealthCheckErrorOnPrimaryAfterExternalReparent(t *testing.T) { <-resultChan shr2 := &querypb.StreamHealthResponse{ - TabletAlias: tablet2.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, - Serving: true, - TabletExternallyReparentedTimestamp: 0, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 10, CpuUsage: 0.2}, + TabletAlias: tablet2.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Serving: true, + PrimaryTermStartTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 10, CpuUsage: 0.2}, } input2 <- shr2 <-resultChan shr1 := &querypb.StreamHealthResponse{ - TabletAlias: tablet1.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}, - Serving: true, - TabletExternallyReparentedTimestamp: 10, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 0, CpuUsage: 0.2}, + TabletAlias: tablet1.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}, + Serving: true, + PrimaryTermStartTimestamp: 10, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 0, CpuUsage: 0.2}, } input1 <- shr1 <-resultChan @@ -377,11 +388,11 @@ func TestHealthCheckErrorOnPrimaryAfterExternalReparent(t *testing.T) { mustMatch(t, health, a, "unexpected result") shr2 = &querypb.StreamHealthResponse{ - TabletAlias: tablet2.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}, - Serving: true, - TabletExternallyReparentedTimestamp: 20, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 0, CpuUsage: 0.2}, + TabletAlias: tablet2.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}, + Serving: true, + PrimaryTermStartTimestamp: 20, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 0, CpuUsage: 0.2}, } input2 <- shr2 <-resultChan @@ -405,8 +416,11 @@ func TestHealthCheckErrorOnPrimaryAfterExternalReparent(t *testing.T) { } func TestHealthCheckVerifiesTabletAlias(t *testing.T) { - ts := memorytopo.NewServer("cell") - hc := createTestHc(ts) + ctx := utils.LeakCheckContext(t) + + ts := memorytopo.NewServer(ctx, "cell") + defer ts.Close() + hc := createTestHc(ctx, ts) defer hc.Close() tablet := createTestTablet(0, "cell", "a") @@ -427,11 +441,11 @@ func TestHealthCheckVerifiesTabletAlias(t *testing.T) { mustMatch(t, want, result, "Wrong TabletHealth data") input <- &querypb.StreamHealthResponse{ - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}, - TabletAlias: &topodatapb.TabletAlias{Uid: 20, Cell: "cellb"}, - Serving: true, - TabletExternallyReparentedTimestamp: 10, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2}, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}, + TabletAlias: &topodatapb.TabletAlias{Uid: 20, Cell: "cellb"}, + Serving: true, + PrimaryTermStartTimestamp: 10, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2}, } ticker := time.NewTicker(1 * time.Second) @@ -448,8 +462,12 @@ func TestHealthCheckVerifiesTabletAlias(t *testing.T) { // TestHealthCheckCloseWaitsForGoRoutines tests that Close() waits for all Go // routines to finish and the listener won't be called anymore. func TestHealthCheckCloseWaitsForGoRoutines(t *testing.T) { - ts := memorytopo.NewServer("cell") - hc := createTestHc(ts) + ctx := utils.LeakCheckContext(t) + + ts := memorytopo.NewServer(ctx, "cell") + defer ts.Close() + hc := createTestHc(ctx, ts) + defer hc.Close() tablet := createTestTablet(0, "cell", "a") input := make(chan *querypb.StreamHealthResponse, 1) createFakeConn(tablet, input) @@ -469,11 +487,11 @@ func TestHealthCheckCloseWaitsForGoRoutines(t *testing.T) { // one tablet after receiving a StreamHealthResponse shr := &querypb.StreamHealthResponse{ - TabletAlias: tablet.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, - Serving: true, - TabletExternallyReparentedTimestamp: 0, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2}, + TabletAlias: tablet.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Serving: true, + PrimaryTermStartTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2}, } want = &TabletHealth{ Tablet: tablet, @@ -488,7 +506,7 @@ func TestHealthCheckCloseWaitsForGoRoutines(t *testing.T) { mustMatch(t, want, result, "Wrong TabletHealth data") // Change input to distinguish between stats sent before and after Close(). - shr.TabletExternallyReparentedTimestamp = 11 + shr.PrimaryTermStartTimestamp = 11 // Close the healthcheck. Tablet connections are closed asynchronously and // Close() will block until all Go routines (one per connection) are done. assert.Nil(t, hc.Close(), "Close returned error") @@ -508,10 +526,13 @@ func TestHealthCheckCloseWaitsForGoRoutines(t *testing.T) { } func TestHealthCheckTimeout(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // reset counters hcErrorCounters.ResetAll() - ts := memorytopo.NewServer("cell") - hc := createTestHc(ts) + ts := memorytopo.NewServer(ctx, "cell") + defer ts.Close() + hc := createTestHc(ctx, ts) hc.healthCheckTimeout = 500 * time.Millisecond defer hc.Close() tablet := createTestTablet(0, "cell", "a") @@ -531,11 +552,11 @@ func TestHealthCheckTimeout(t *testing.T) { // one tablet after receiving a StreamHealthResponse shr := &querypb.StreamHealthResponse{ - TabletAlias: tablet.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, - Serving: true, - TabletExternallyReparentedTimestamp: 0, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2}, + TabletAlias: tablet.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Serving: true, + PrimaryTermStartTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2}, } want = &TabletHealth{ Tablet: tablet, @@ -580,8 +601,11 @@ func TestHealthCheckTimeout(t *testing.T) { } func TestWaitForAllServingTablets(t *testing.T) { - ts := memorytopo.NewServer("cell") - hc := createTestHc(ts) + ctx := utils.LeakCheckContext(t) + + ts := memorytopo.NewServer(ctx, "cell") + defer ts.Close() + hc := createTestHc(ctx, ts) defer hc.Close() tablet := createTestTablet(0, "cell", "a") tablet.Type = topodatapb.TabletType_REPLICA @@ -601,18 +625,19 @@ func TestWaitForAllServingTablets(t *testing.T) { // there will be a first result, get and discard it <-resultChan // empty - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, 1*time.Second) defer cancel() err := hc.WaitForAllServingTablets(ctx, targets) assert.NotNil(t, err, "error should not be nil") shr := &querypb.StreamHealthResponse{ - TabletAlias: tablet.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, - Serving: true, - TabletExternallyReparentedTimestamp: 0, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2}, + TabletAlias: tablet.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Serving: true, + PrimaryTermStartTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2}, } input <- shr @@ -672,8 +697,11 @@ func TestWaitForAllServingTablets(t *testing.T) { // TestRemoveTablet tests the behavior when a tablet goes away. func TestRemoveTablet(t *testing.T) { - ts := memorytopo.NewServer("cell") - hc := createTestHc(ts) + ctx := utils.LeakCheckContext(t) + + ts := memorytopo.NewServer(ctx, "cell") + defer ts.Close() + hc := createTestHc(ctx, ts) defer hc.Close() tablet := createTestTablet(0, "cell", "a") tablet.Type = topodatapb.TabletType_REPLICA @@ -687,11 +715,11 @@ func TestRemoveTablet(t *testing.T) { <-resultChan shrReplica := &querypb.StreamHealthResponse{ - TabletAlias: tablet.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, - Serving: true, - TabletExternallyReparentedTimestamp: 0, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2}, + TabletAlias: tablet.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Serving: true, + PrimaryTermStartTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2}, } want := []*TabletHealth{{ Tablet: tablet, @@ -736,11 +764,11 @@ func TestRemoveTablet(t *testing.T) { // Change the tablet type to RDONLY. tablet.Type = topodatapb.TabletType_RDONLY shrRdonly := &querypb.StreamHealthResponse{ - TabletAlias: tablet.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_RDONLY}, - Serving: true, - TabletExternallyReparentedTimestamp: 0, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 2, CpuUsage: 0.4}, + TabletAlias: tablet.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_RDONLY}, + Serving: true, + PrimaryTermStartTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 2, CpuUsage: 0.4}, } // Now Replace it, which does a Remove and Add. The tablet should be removed @@ -779,8 +807,11 @@ func TestRemoveTablet(t *testing.T) { // TestGetHealthyTablets tests the functionality of GetHealthyTabletStats. func TestGetHealthyTablets(t *testing.T) { - ts := memorytopo.NewServer("cell") - hc := createTestHc(ts) + ctx := utils.LeakCheckContext(t) + + ts := memorytopo.NewServer(ctx, "cell") + defer ts.Close() + hc := createTestHc(ctx, ts) defer hc.Close() tablet := createTestTablet(0, "cell", "a") tablet.Type = topodatapb.TabletType_REPLICA @@ -797,11 +828,11 @@ func TestGetHealthyTablets(t *testing.T) { assert.Empty(t, a, "wrong result, expected empty list") shr := &querypb.StreamHealthResponse{ - TabletAlias: tablet.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, - Serving: true, - TabletExternallyReparentedTimestamp: 0, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2}, + TabletAlias: tablet.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Serving: true, + PrimaryTermStartTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2}, } want := []*TabletHealth{{ Tablet: tablet, @@ -818,11 +849,11 @@ func TestGetHealthyTablets(t *testing.T) { // update health with a change that won't change health array shr = &querypb.StreamHealthResponse{ - TabletAlias: tablet.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, - Serving: true, - TabletExternallyReparentedTimestamp: 0, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 2, CpuUsage: 0.2}, + TabletAlias: tablet.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Serving: true, + PrimaryTermStartTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 2, CpuUsage: 0.2}, } input <- shr // wait for result before checking @@ -833,11 +864,11 @@ func TestGetHealthyTablets(t *testing.T) { // update stats with a change that will change health array shr = &querypb.StreamHealthResponse{ - TabletAlias: tablet.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, - Serving: true, - TabletExternallyReparentedTimestamp: 0, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 35, CpuUsage: 0.2}, + TabletAlias: tablet.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Serving: true, + PrimaryTermStartTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 35, CpuUsage: 0.2}, } want = []*TabletHealth{{ Tablet: tablet, @@ -863,11 +894,11 @@ func TestGetHealthyTablets(t *testing.T) { <-resultChan shr2 := &querypb.StreamHealthResponse{ - TabletAlias: tablet2.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, - Serving: true, - TabletExternallyReparentedTimestamp: 0, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 10, CpuUsage: 0.2}, + TabletAlias: tablet2.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Serving: true, + PrimaryTermStartTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 10, CpuUsage: 0.2}, } want2 := []*TabletHealth{{ Tablet: tablet, @@ -893,11 +924,11 @@ func TestGetHealthyTablets(t *testing.T) { mustMatch(t, want2, a, "unexpected result") shr2 = &querypb.StreamHealthResponse{ - TabletAlias: tablet2.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, - Serving: false, - TabletExternallyReparentedTimestamp: 0, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 10, CpuUsage: 0.2}, + TabletAlias: tablet2.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Serving: false, + PrimaryTermStartTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 10, CpuUsage: 0.2}, } input2 <- shr2 // wait for result @@ -911,7 +942,7 @@ func TestGetHealthyTablets(t *testing.T) { Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}, Serving: true, - TabletExternallyReparentedTimestamp: 10, + PrimaryTermStartTimestamp: 10, RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 0, CpuUsage: 0.2}, } @@ -935,11 +966,11 @@ func TestGetHealthyTablets(t *testing.T) { // reparent: old replica goes into primary shr = &querypb.StreamHealthResponse{ - TabletAlias: tablet.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}, - Serving: true, - TabletExternallyReparentedTimestamp: 20, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 0, CpuUsage: 0.2}, + TabletAlias: tablet.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}, + Serving: true, + PrimaryTermStartTimestamp: 20, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 0, CpuUsage: 0.2}, } input <- shr <-resultChan @@ -965,8 +996,11 @@ func TestGetHealthyTablets(t *testing.T) { } func TestPrimaryInOtherCell(t *testing.T) { - ts := memorytopo.NewServer("cell1", "cell2") - hc := NewHealthCheck(context.Background(), 1*time.Millisecond, time.Hour, ts, "cell1", "cell1, cell2") + ctx := utils.LeakCheckContext(t) + + ts := memorytopo.NewServer(ctx, "cell1", "cell2") + defer ts.Close() + hc := NewHealthCheck(ctx, 1*time.Millisecond, time.Hour, ts, "cell1", "cell1, cell2") defer hc.Close() // add a tablet as primary in different cell @@ -989,11 +1023,11 @@ func TestPrimaryInOtherCell(t *testing.T) { } shr := &querypb.StreamHealthResponse{ - TabletAlias: tablet.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}, - Serving: true, - TabletExternallyReparentedTimestamp: 20, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 0, CpuUsage: 0.2}, + TabletAlias: tablet.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}, + Serving: true, + PrimaryTermStartTimestamp: 20, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 0, CpuUsage: 0.2}, } want := &TabletHealth{ Tablet: tablet, @@ -1022,8 +1056,11 @@ func TestPrimaryInOtherCell(t *testing.T) { } func TestReplicaInOtherCell(t *testing.T) { - ts := memorytopo.NewServer("cell1", "cell2") - hc := NewHealthCheck(context.Background(), 1*time.Millisecond, time.Hour, ts, "cell1", "cell1, cell2") + ctx := utils.LeakCheckContext(t) + + ts := memorytopo.NewServer(ctx, "cell1", "cell2") + defer ts.Close() + hc := NewHealthCheck(ctx, 1*time.Millisecond, time.Hour, ts, "cell1", "cell1, cell2") defer hc.Close() // add a tablet as replica @@ -1045,11 +1082,11 @@ func TestReplicaInOtherCell(t *testing.T) { } shr := &querypb.StreamHealthResponse{ - TabletAlias: local.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, - Serving: true, - TabletExternallyReparentedTimestamp: 0, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 10, CpuUsage: 0.2}, + TabletAlias: local.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Serving: true, + PrimaryTermStartTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 10, CpuUsage: 0.2}, } want := &TabletHealth{ Tablet: local, @@ -1091,11 +1128,11 @@ func TestReplicaInOtherCell(t *testing.T) { } shr2 := &querypb.StreamHealthResponse{ - TabletAlias: remote.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, - Serving: true, - TabletExternallyReparentedTimestamp: 0, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 10, CpuUsage: 0.2}, + TabletAlias: remote.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Serving: true, + PrimaryTermStartTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 10, CpuUsage: 0.2}, } want2 := &TabletHealth{ Tablet: remote, @@ -1124,8 +1161,11 @@ func TestReplicaInOtherCell(t *testing.T) { } func TestCellAliases(t *testing.T) { - ts := memorytopo.NewServer("cell1", "cell2") - hc := NewHealthCheck(context.Background(), 1*time.Millisecond, time.Hour, ts, "cell1", "cell1, cell2") + ctx := utils.LeakCheckContext(t) + + ts := memorytopo.NewServer(ctx, "cell1", "cell2") + defer ts.Close() + hc := NewHealthCheck(ctx, 1*time.Millisecond, time.Hour, ts, "cell1", "cell1, cell2") defer hc.Close() cellsAlias := &topodatapb.CellsAlias{ @@ -1154,11 +1194,11 @@ func TestCellAliases(t *testing.T) { } shr := &querypb.StreamHealthResponse{ - TabletAlias: tablet.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, - Serving: true, - TabletExternallyReparentedTimestamp: 0, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 10, CpuUsage: 0.2}, + TabletAlias: tablet.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Serving: true, + PrimaryTermStartTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 10, CpuUsage: 0.2}, } want := []*TabletHealth{{ Tablet: tablet, @@ -1184,8 +1224,11 @@ func TestCellAliases(t *testing.T) { } func TestHealthCheckChecksGrpcPort(t *testing.T) { - ts := memorytopo.NewServer("cell") - hc := createTestHc(ts) + ctx := utils.LeakCheckContext(t) + + ts := memorytopo.NewServer(ctx, "cell") + defer ts.Close() + hc := createTestHc(ctx, ts) defer hc.Close() tablet := createTestTablet(0, "cell", "a") @@ -1204,6 +1247,7 @@ func TestHealthCheckChecksGrpcPort(t *testing.T) { } func TestTemplate(t *testing.T) { + defer utils.EnsureNoLeaks(t) TabletURLTemplateString = "http://{{.GetTabletHostPort}}" ParseTabletURLTemplateFromFlag() @@ -1231,6 +1275,7 @@ func TestTemplate(t *testing.T) { } func TestDebugURLFormatting(t *testing.T) { + defer utils.EnsureNoLeaks(t) TabletURLTemplateString = "https://{{.GetHostNameLevel 0}}.bastion.{{.Tablet.Alias.Cell}}.corp" ParseTabletURLTemplateFromFlag() @@ -1270,8 +1315,8 @@ func tabletDialer(tablet *topodatapb.Tablet, _ grpcclient.FailFast) (queryservic return nil, fmt.Errorf("tablet %v not found", key) } -func createTestHc(ts *topo.Server) *HealthCheckImpl { - return NewHealthCheck(context.Background(), 1*time.Millisecond, time.Hour, ts, "cell", "") +func createTestHc(ctx context.Context, ts *topo.Server) *HealthCheckImpl { + return NewHealthCheck(ctx, 1*time.Millisecond, time.Hour, ts, "cell", "") } type fakeConn struct { diff --git a/go/vt/discovery/keyspace_events.go b/go/vt/discovery/keyspace_events.go index aeaa0ff91de..163f240de8c 100644 --- a/go/vt/discovery/keyspace_events.go +++ b/go/vt/discovery/keyspace_events.go @@ -23,13 +23,17 @@ import ( "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/proto/query" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/srvtopo" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/topotools" + + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" ) // KeyspaceEventWatcher is an auxiliary watcher that watches all availability incidents @@ -62,11 +66,14 @@ type KeyspaceEvent struct { // Shards is a list of all the shards in the keyspace, including their state after the event is resolved Shards []ShardEvent + + // MoveTablesState records the current state of an ongoing MoveTables workflow + MoveTablesState MoveTablesState } type ShardEvent struct { Tablet *topodatapb.TabletAlias - Target *query.Target + Target *querypb.Target Serving bool } @@ -86,6 +93,16 @@ func NewKeyspaceEventWatcher(ctx context.Context, topoServer srvtopo.Server, hc return kew } +type MoveTablesStatus int + +const ( + MoveTablesUnknown MoveTablesStatus = iota + // MoveTablesSwitching is set when the write traffic is the middle of being switched from the source to the target + MoveTablesSwitching + // MoveTablesSwitched is set when write traffic has been completely switched to the target + MoveTablesSwitched +) + // keyspaceState is the internal state for all the keyspaces that the KEW is // currently watching type keyspaceState struct { @@ -99,6 +116,8 @@ type keyspaceState struct { lastError error lastKeyspace *topodatapb.SrvKeyspace shards map[string]*shardState + + moveTablesState *MoveTablesState } // Format prints the internal state for this keyspace for debug purposes @@ -125,17 +144,27 @@ func (kss *keyspaceState) beingResharded(currentShard string) bool { kss.mu.Lock() defer kss.mu.Unlock() - // if the keyspace is gone, or if it has no known availability events, the keyspace - // cannot be in the middle of a resharding operation - if kss.deleted || kss.consistent { + // If the keyspace is gone, has no known availability events, or is in the middle of a + // MoveTables then the keyspace cannot be in the middle of a resharding operation. + if kss.deleted || kss.consistent || (kss.moveTablesState != nil && kss.moveTablesState.Typ != MoveTablesType(MoveTablesNone)) { return false } - // for all the known shards, try to find a primary shard besides the one we're trying to access - // and which is currently healthy. if there are other healthy primaries in the keyspace, it means - // we're in the middle of a resharding operation + // If there are unequal and overlapping shards in the keyspace and any of them are + // currently serving then we assume that we are in the middle of a Reshard. + _, ckr, err := topo.ValidateShardName(currentShard) + if err != nil || ckr == nil { // Assume not and avoid potential panic + return false + } for shard, sstate := range kss.shards { - if shard != currentShard && sstate.serving { + if !sstate.serving || shard == currentShard { + continue + } + _, skr, err := topo.ValidateShardName(shard) + if err != nil || skr == nil { // Assume not and avoid potential panic + return false + } + if key.KeyRangeIntersect(ckr, skr) { return true } } @@ -144,7 +173,7 @@ func (kss *keyspaceState) beingResharded(currentShard string) bool { } type shardState struct { - target *query.Target + target *querypb.Target serving bool externallyReparented int64 currentPrimary *topodatapb.TabletAlias @@ -192,7 +221,7 @@ func (kew *KeyspaceEventWatcher) run(ctx context.Context) { if result == nil { return } - kew.processHealthCheck(result) + kew.processHealthCheck(ctx, result) } } }() @@ -205,7 +234,7 @@ func (kew *KeyspaceEventWatcher) run(ctx context.Context) { return } for _, ks := range keyspaces { - kew.getKeyspaceStatus(ks) + kew.getKeyspaceStatus(ctx, ks) } }() } @@ -218,6 +247,10 @@ func (kss *keyspaceState) ensureConsistentLocked() { return } + if kss.moveTablesState != nil && kss.moveTablesState.Typ != MoveTablesNone && kss.moveTablesState.State != MoveTablesSwitched { + return + } + // get the topology metadata for our primary from `lastKeyspace`; this value is refreshed // from our topology watcher whenever a change is detected, so it should always be up to date primary := topoproto.SrvKeyspaceGetPartition(kss.lastKeyspace, topodatapb.TabletType_PRIMARY) @@ -252,16 +285,25 @@ func (kss *keyspaceState) ensureConsistentLocked() { } } + // clone the current moveTablesState, if any, to handle race conditions where it can get updated while we're broadcasting + var moveTablesState MoveTablesState + if kss.moveTablesState != nil { + moveTablesState = *kss.moveTablesState + } + + ksevent := &KeyspaceEvent{ + Cell: kss.kew.localCell, + Keyspace: kss.keyspace, + Shards: make([]ShardEvent, 0, len(kss.shards)), + MoveTablesState: moveTablesState, + } + // we haven't found any inconsistencies between the HealthCheck stream and the topology // watcher. this means the ongoing availability event has been resolved, so we can broadcast // a resolution event to all listeners kss.consistent = true - ksevent := &KeyspaceEvent{ - Cell: kss.kew.localCell, - Keyspace: kss.keyspace, - Shards: make([]ShardEvent, 0, len(kss.shards)), - } + kss.moveTablesState = nil for shard, sstate := range kss.shards { ksevent.Shards = append(ksevent.Shards, ShardEvent{ @@ -329,6 +371,97 @@ func (kss *keyspaceState) onHealthCheck(th *TabletHealth) { kss.ensureConsistentLocked() } +type MoveTablesType int + +const ( + MoveTablesNone MoveTablesType = iota + MoveTablesRegular + MoveTablesShardByShard +) + +type MoveTablesState struct { + Typ MoveTablesType + State MoveTablesStatus +} + +func (kss *keyspaceState) getMoveTablesStatus(vs *vschemapb.SrvVSchema) (*MoveTablesState, error) { + mtState := &MoveTablesState{ + Typ: MoveTablesNone, + State: MoveTablesUnknown, + } + + // if there are no routing rules defined, then movetables is not in progress, exit early + if (vs.RoutingRules != nil && len(vs.RoutingRules.Rules) == 0) && + (vs.ShardRoutingRules != nil && len(vs.ShardRoutingRules.Rules) == 0) { + return mtState, nil + } + + shortCtx, cancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout) + defer cancel() + ts, _ := kss.kew.ts.GetTopoServer() + + // collect all current shard information from the topo + var shardInfos []*topo.ShardInfo + for _, sstate := range kss.shards { + si, err := ts.GetShard(shortCtx, kss.keyspace, sstate.target.Shard) + if err != nil { + return nil, err + } + shardInfos = append(shardInfos, si) + } + + // check if any shard has denied tables and if so, record one of these to check where it currently points to + // using the (shard) routing rules + var shardsWithDeniedTables []string + var oneDeniedTable string + for _, si := range shardInfos { + for _, tc := range si.TabletControls { + if len(tc.DeniedTables) > 0 { + oneDeniedTable = tc.DeniedTables[0] + shardsWithDeniedTables = append(shardsWithDeniedTables, si.ShardName()) + } + } + } + if len(shardsWithDeniedTables) == 0 { + return mtState, nil + } + + // check if a shard by shard migration is in progress and if so detect if it has been switched + isPartialTables := vs.ShardRoutingRules != nil && len(vs.ShardRoutingRules.Rules) > 0 + + if isPartialTables { + srr := topotools.GetShardRoutingRulesMap(vs.ShardRoutingRules) + mtState.Typ = MoveTablesShardByShard + mtState.State = MoveTablesSwitched + for _, shard := range shardsWithDeniedTables { + ruleKey := topotools.GetShardRoutingRuleKey(kss.keyspace, shard) + if _, ok := srr[ruleKey]; ok { + // still pointing to the source shard + mtState.State = MoveTablesSwitching + break + } + } + log.Infof("getMoveTablesStatus: keyspace %s declaring partial move tables %v", kss.keyspace, mtState) + return mtState, nil + } + + // it wasn't a shard by shard migration, but since we have denied tables it must be a regular MoveTables + mtState.Typ = MoveTablesRegular + mtState.State = MoveTablesSwitching + rr := topotools.GetRoutingRulesMap(vs.RoutingRules) + if rr != nil { + r, ok := rr[oneDeniedTable] + // if a rule exists for the table and points to the target keyspace, writes have been switched + if ok && len(r) > 0 && r[0] != fmt.Sprintf("%s.%s", kss.keyspace, oneDeniedTable) { + mtState.State = MoveTablesSwitched + log.Infof("onSrvKeyspace:: keyspace %s writes have been switched for table %s, rule %v", kss.keyspace, oneDeniedTable, r[0]) + } + } + log.Infof("getMoveTablesStatus: keyspace %s declaring regular move tables %v", kss.keyspace, mtState) + + return mtState, nil +} + // onSrvKeyspace is the callback that updates this keyspace with fresh topology data from our topology server. // this callback is called from a Watcher in the topo server whenever a change to the topology for this keyspace // occurs. this watcher is dedicated to this keyspace, and will only yield topology metadata changes for as @@ -379,24 +512,54 @@ func (kss *keyspaceState) onSrvKeyspace(newKeyspace *topodatapb.SrvKeyspace, new return true } +// isServing returns whether a keyspace has at least one serving shard or not. +func (kss *keyspaceState) isServing() bool { + kss.mu.Lock() + defer kss.mu.Unlock() + for _, state := range kss.shards { + if state.serving { + return true + } + } + return false +} + +// onSrvVSchema is called from a Watcher in the topo server whenever the SrvVSchema is updated by Vitess. +// For the purposes here, we are interested in updates to the RoutingRules or ShardRoutingRules. +// In addition, the traffic switcher updates SrvVSchema when the DeniedTables attributes in a Shard record is +// modified. +func (kss *keyspaceState) onSrvVSchema(vs *vschemapb.SrvVSchema, err error) bool { + kss.mu.Lock() + defer kss.mu.Unlock() + kss.moveTablesState, _ = kss.getMoveTablesStatus(vs) + if kss.moveTablesState != nil && kss.moveTablesState.Typ != MoveTablesNone { + // mark the keyspace as inconsistent. ensureConsistentLocked() checks if the workflow is switched, + // and if so, it will send an event to the buffering subscribers to indicate that buffering can be stopped. + kss.consistent = false + kss.ensureConsistentLocked() + } + return true +} + // newKeyspaceState allocates the internal state required to keep track of availability incidents // in this keyspace, and starts up a SrvKeyspace watcher on our topology server which will update // our keyspaceState with any topology changes in real time. -func newKeyspaceState(kew *KeyspaceEventWatcher, cell, keyspace string) *keyspaceState { +func newKeyspaceState(ctx context.Context, kew *KeyspaceEventWatcher, cell, keyspace string) *keyspaceState { log.Infof("created dedicated watcher for keyspace %s/%s", cell, keyspace) kss := &keyspaceState{ kew: kew, keyspace: keyspace, shards: make(map[string]*shardState), } - kew.ts.WatchSrvKeyspace(context.Background(), cell, keyspace, kss.onSrvKeyspace) + kew.ts.WatchSrvKeyspace(ctx, cell, keyspace, kss.onSrvKeyspace) + kew.ts.WatchSrvVSchema(ctx, cell, kss.onSrvVSchema) return kss } // processHealthCheck is the callback that is called by the global HealthCheck stream that was initiated // by this KeyspaceEventWatcher. it redirects the TabletHealth event to the corresponding keyspaceState -func (kew *KeyspaceEventWatcher) processHealthCheck(th *TabletHealth) { - kss := kew.getKeyspaceStatus(th.Target.Keyspace) +func (kew *KeyspaceEventWatcher) processHealthCheck(ctx context.Context, th *TabletHealth) { + kss := kew.getKeyspaceStatus(ctx, th.Target.Keyspace) if kss == nil { return } @@ -406,13 +569,12 @@ func (kew *KeyspaceEventWatcher) processHealthCheck(th *TabletHealth) { // getKeyspaceStatus returns the keyspaceState object for the corresponding keyspace, allocating it // if we've never seen the keyspace before. -func (kew *KeyspaceEventWatcher) getKeyspaceStatus(keyspace string) *keyspaceState { +func (kew *KeyspaceEventWatcher) getKeyspaceStatus(ctx context.Context, keyspace string) *keyspaceState { kew.mu.Lock() defer kew.mu.Unlock() - kss := kew.keyspaces[keyspace] if kss == nil { - kss = newKeyspaceState(kew, kew.localCell, keyspace) + kss = newKeyspaceState(ctx, kew, kew.localCell, keyspace) kew.keyspaces[keyspace] = kss } if kss.deleted { @@ -434,11 +596,11 @@ func (kew *KeyspaceEventWatcher) getKeyspaceStatus(keyspace string) *keyspaceSta // This is not a fully accurate heuristic, but it's good enough that we'd want to buffer the // request for the given target under the assumption that the reason why it cannot be completed // right now is transitory. -func (kew *KeyspaceEventWatcher) TargetIsBeingResharded(target *query.Target) bool { +func (kew *KeyspaceEventWatcher) TargetIsBeingResharded(ctx context.Context, target *querypb.Target) bool { if target.TabletType != topodatapb.TabletType_PRIMARY { return false } - ks := kew.getKeyspaceStatus(target.Keyspace) + ks := kew.getKeyspaceStatus(ctx, target.Keyspace) if ks == nil { return false } @@ -455,11 +617,11 @@ func (kew *KeyspaceEventWatcher) TargetIsBeingResharded(target *query.Target) bo // to determine that there was a serving primary which now became non serving. This is only possible in a DemotePrimary // RPC which are only called from ERS and PRS. So buffering will stop when these operations succeed. // We return the tablet alias of the primary if it is serving. -func (kew *KeyspaceEventWatcher) PrimaryIsNotServing(target *query.Target) (*topodatapb.TabletAlias, bool) { +func (kew *KeyspaceEventWatcher) PrimaryIsNotServing(ctx context.Context, target *querypb.Target) (*topodatapb.TabletAlias, bool) { if target.TabletType != topodatapb.TabletType_PRIMARY { return nil, false } - ks := kew.getKeyspaceStatus(target.Keyspace) + ks := kew.getKeyspaceStatus(ctx, target.Keyspace) if ks == nil { return nil, false } @@ -471,3 +633,17 @@ func (kew *KeyspaceEventWatcher) PrimaryIsNotServing(target *query.Target) (*top } return nil, false } + +// GetServingKeyspaces gets the serving keyspaces from the keyspace event watcher. +func (kew *KeyspaceEventWatcher) GetServingKeyspaces() []string { + kew.mu.Lock() + defer kew.mu.Unlock() + + var servingKeyspaces []string + for ksName, state := range kew.keyspaces { + if state.isServing() { + servingKeyspaces = append(servingKeyspaces, ksName) + } + } + return servingKeyspaces +} diff --git a/go/vt/discovery/keyspace_events_test.go b/go/vt/discovery/keyspace_events_test.go index 456d8566e87..43af4bf49de 100644 --- a/go/vt/discovery/keyspace_events_test.go +++ b/go/vt/discovery/keyspace_events_test.go @@ -24,21 +24,26 @@ import ( "github.com/stretchr/testify/require" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - vschemapb "vitess.io/vitess/go/vt/proto/vschema" + "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/faketopo" + + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" ) func TestSrvKeyspaceWithNilNewKeyspace(t *testing.T) { + ctx := utils.LeakCheckContext(t) cell := "cell" keyspace := "testks" factory := faketopo.NewFakeTopoFactory() factory.AddCell(cell) - ts := faketopo.NewFakeTopoServer(factory) + ts := faketopo.NewFakeTopoServer(ctx, factory) ts2 := &fakeTopoServer{} - hc := NewHealthCheck(context.Background(), 1*time.Millisecond, time.Hour, ts, cell, "") - kew := NewKeyspaceEventWatcher(context.Background(), ts2, hc, cell) + hc := NewHealthCheck(ctx, 1*time.Millisecond, time.Hour, ts, cell, "") + defer hc.Close() + kew := NewKeyspaceEventWatcher(ctx, ts2, hc, cell) kss := &keyspaceState{ kew: kew, keyspace: keyspace, @@ -55,6 +60,223 @@ func TestSrvKeyspaceWithNilNewKeyspace(t *testing.T) { require.True(t, kss.onSrvKeyspace(nil, nil)) } +// TestKeyspaceEventTypes confirms that the keyspace event watcher determines +// that the unavailability event is caused by the correct scenario. We should +// consider it to be caused by a resharding operation when the following +// conditions are present: +// 1. The keyspace is inconsistent (in the middle of an availability event) +// 2. The target tablet is a primary +// 3. The keyspace has overlapping shards +// 4. The overlapping shard's tablet is serving +// And we should consider the cause to be a primary not serving when the +// following conditions exist: +// 1. The keyspace is inconsistent (in the middle of an availability event) +// 2. The target tablet is a primary +// 3. The target tablet is not serving +// 4. The shard's externallyReparented time is not 0 +// 5. The shard's currentPrimary state is not nil +// We should never consider both as a possible cause given the same +// keyspace state. +func TestKeyspaceEventTypes(t *testing.T) { + utils.EnsureNoLeaks(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cell := "cell" + keyspace := "testks" + factory := faketopo.NewFakeTopoFactory() + factory.AddCell(cell) + ts := faketopo.NewFakeTopoServer(ctx, factory) + ts2 := &fakeTopoServer{} + hc := NewHealthCheck(ctx, 1*time.Millisecond, time.Hour, ts, cell, "") + defer hc.Close() + kew := NewKeyspaceEventWatcher(ctx, ts2, hc, cell) + + type testCase struct { + name string + kss *keyspaceState + shardToCheck string + expectResharding bool + expectPrimaryNotServing bool + } + + testCases := []testCase{ + { + name: "one to two resharding in progress", + kss: &keyspaceState{ + kew: kew, + keyspace: keyspace, + shards: map[string]*shardState{ + "-": { + target: &querypb.Target{ + Keyspace: keyspace, + Shard: "-", + TabletType: topodatapb.TabletType_PRIMARY, + }, + serving: false, + }, + "-80": { + target: &querypb.Target{ + Keyspace: keyspace, + Shard: "-80", + TabletType: topodatapb.TabletType_PRIMARY, + }, + serving: true, + }, + "80-": { + target: &querypb.Target{ + Keyspace: keyspace, + Shard: "80-", + TabletType: topodatapb.TabletType_PRIMARY, + }, + serving: false, + }, + }, + consistent: false, + }, + shardToCheck: "-", + expectResharding: true, + expectPrimaryNotServing: false, + }, + { + name: "two to four resharding in progress", + kss: &keyspaceState{ + kew: kew, + keyspace: keyspace, + shards: map[string]*shardState{ + "-80": { + target: &querypb.Target{ + Keyspace: keyspace, + Shard: "-80", + TabletType: topodatapb.TabletType_PRIMARY, + }, + serving: false, + }, + "80-": { + target: &querypb.Target{ + Keyspace: keyspace, + Shard: "80-", + TabletType: topodatapb.TabletType_PRIMARY, + }, + serving: true, + }, + "-40": { + target: &querypb.Target{ + Keyspace: keyspace, + Shard: "-40", + TabletType: topodatapb.TabletType_PRIMARY, + }, + serving: true, + }, + "40-80": { + target: &querypb.Target{ + Keyspace: keyspace, + Shard: "40-80", + TabletType: topodatapb.TabletType_PRIMARY, + }, + serving: true, + }, + "80-c0": { + target: &querypb.Target{ + Keyspace: keyspace, + Shard: "80-c0", + TabletType: topodatapb.TabletType_PRIMARY, + }, + serving: false, + }, + "c0-": { + target: &querypb.Target{ + Keyspace: keyspace, + Shard: "c0-", + TabletType: topodatapb.TabletType_PRIMARY, + }, + serving: false, + }, + }, + consistent: false, + }, + shardToCheck: "-80", + expectResharding: true, + expectPrimaryNotServing: false, + }, + { + name: "unsharded primary not serving", + kss: &keyspaceState{ + kew: kew, + keyspace: keyspace, + shards: map[string]*shardState{ + "-": { + target: &querypb.Target{ + Keyspace: keyspace, + Shard: "-", + TabletType: topodatapb.TabletType_PRIMARY, + }, + serving: false, + externallyReparented: time.Now().UnixNano(), + currentPrimary: &topodatapb.TabletAlias{ + Cell: cell, + Uid: 100, + }, + }, + }, + consistent: false, + }, + shardToCheck: "-", + expectResharding: false, + expectPrimaryNotServing: true, + }, + { + name: "sharded primary not serving", + kss: &keyspaceState{ + kew: kew, + keyspace: keyspace, + shards: map[string]*shardState{ + "-80": { + target: &querypb.Target{ + Keyspace: keyspace, + Shard: "-80", + TabletType: topodatapb.TabletType_PRIMARY, + }, + serving: false, + externallyReparented: time.Now().UnixNano(), + currentPrimary: &topodatapb.TabletAlias{ + Cell: cell, + Uid: 100, + }, + }, + "80-": { + target: &querypb.Target{ + Keyspace: keyspace, + Shard: "80-", + TabletType: topodatapb.TabletType_PRIMARY, + }, + serving: true, + }, + }, + consistent: false, + }, + shardToCheck: "-80", + expectResharding: false, + expectPrimaryNotServing: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + kew.mu.Lock() + kew.keyspaces[keyspace] = tc.kss + kew.mu.Unlock() + + require.NotNil(t, tc.kss.shards[tc.shardToCheck], "the specified shardToCheck of %q does not exist in the shardState", tc.shardToCheck) + + resharding := kew.TargetIsBeingResharded(ctx, tc.kss.shards[tc.shardToCheck].target) + require.Equal(t, resharding, tc.expectResharding, "TargetIsBeingResharded should return %t", tc.expectResharding) + + _, primaryDown := kew.PrimaryIsNotServing(ctx, tc.kss.shards[tc.shardToCheck].target) + require.Equal(t, primaryDown, tc.expectPrimaryNotServing, "PrimaryIsNotServing should return %t", tc.expectPrimaryNotServing) + }) + } +} + type fakeTopoServer struct { } diff --git a/go/vt/discovery/replicationlag.go b/go/vt/discovery/replicationlag.go index 8ae168ddff9..e7afa5ca844 100644 --- a/go/vt/discovery/replicationlag.go +++ b/go/vt/discovery/replicationlag.go @@ -278,13 +278,6 @@ func (a tabletLagSnapshotList) Len() int { return len(a) } func (a tabletLagSnapshotList) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a tabletLagSnapshotList) Less(i, j int) bool { return a[i].replag < a[j].replag } -func min(a, b int) int { - if a > b { - return b - } - return a -} - // mean calculates the mean value over the given list, // while excluding the item with the specified index. func mean(tabletHealthList []*TabletHealth, idxExclude int) (uint64, error) { diff --git a/go/vt/discovery/replicationlag_test.go b/go/vt/discovery/replicationlag_test.go index 9c047469fb2..5cef05a3f4b 100644 --- a/go/vt/discovery/replicationlag_test.go +++ b/go/vt/discovery/replicationlag_test.go @@ -45,6 +45,7 @@ func testSetMinNumTablets(newMin int) { } func TestFilterByReplicationLagUnhealthy(t *testing.T) { + defer utils.EnsureNoLeaks(t) // 1 healthy serving tablet, 1 not healthy ts1 := &TabletHealth{ Tablet: topo.NewTablet(1, "cell", "host1"), @@ -62,6 +63,7 @@ func TestFilterByReplicationLagUnhealthy(t *testing.T) { } func TestFilterByReplicationLag(t *testing.T) { + defer utils.EnsureNoLeaks(t) // Use simplified logic testSetLegacyReplicationLagAlgorithm(false) @@ -138,6 +140,7 @@ func TestFilterByReplicationLag(t *testing.T) { } func TestFilterByReplicationLagThreeTabletMin(t *testing.T) { + defer utils.EnsureNoLeaks(t) // Use at least 3 tablets if possible testSetMinNumTablets(3) // lags of (1s, 1s, 10m, 11m) - returns at least32 items where the slightly delayed ones that are returned are the 10m and 11m ones. @@ -194,6 +197,7 @@ func TestFilterByReplicationLagThreeTabletMin(t *testing.T) { } func TestFilterStatsByReplicationLagOneTabletMin(t *testing.T) { + defer utils.EnsureNoLeaks(t) // Use at least 1 tablets if possible testSetMinNumTablets(1) // lags of (1s, 100m) - return only healthy tablet if that is all that is available. diff --git a/go/vt/discovery/tablet_health_check.go b/go/vt/discovery/tablet_health_check.go index 95821db88a2..24496155e74 100644 --- a/go/vt/discovery/tablet_health_check.go +++ b/go/vt/discovery/tablet_health_check.go @@ -189,7 +189,7 @@ func (thc *tabletHealthCheck) processResponse(hc *HealthCheckImpl, shr *query.St prevTarget.TabletType != topodata.TabletType_PRIMARY && prevTarget.TabletType == shr.Target.TabletType && thc.isTrivialReplagChange(shr.RealtimeStats) thc.lastResponseTimestamp = time.Now() thc.Target = shr.Target - thc.PrimaryTermStartTime = shr.TabletExternallyReparentedTimestamp + thc.PrimaryTermStartTime = shr.PrimaryTermStartTimestamp thc.Stats = shr.RealtimeStats thc.LastError = healthErr reason := "healthCheck update" diff --git a/go/vt/discovery/tablet_picker.go b/go/vt/discovery/tablet_picker.go index 20d4126831a..d1c77f7ca1a 100644 --- a/go/vt/discovery/tablet_picker.go +++ b/go/vt/discovery/tablet_picker.go @@ -17,7 +17,9 @@ limitations under the License. package discovery import ( + "context" "fmt" + "io" "math/rand" "sort" "strings" @@ -25,20 +27,16 @@ import ( "time" "vitess.io/vitess/go/stats" - + "vitess.io/vitess/go/vt/grpcclient" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" - - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletconn" - "vitess.io/vitess/go/vt/log" - - "context" - + querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/vterrors" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) type TabletPickerCellPreference int @@ -58,13 +56,13 @@ const ( TabletPickerTabletOrder_Any TabletPickerTabletOrder = iota // Provided tablet types are expected to be prioritized in the given order. TabletPickerTabletOrder_InOrder + InOrderHint = "in_order:" ) var ( tabletPickerRetryDelay = 30 * time.Second muTabletPickerRetryDelay sync.Mutex globalTPStats *tabletPickerStats - inOrderHint = "in_order:" tabletPickerCellPreferenceMap = map[string]TabletPickerCellPreference{ "preferlocalwithalias": TabletPickerCellPreference_PreferLocalWithAlias, @@ -92,8 +90,9 @@ func SetTabletPickerRetryDelay(delay time.Duration) { } type TabletPickerOptions struct { - CellPreference string - TabletOrder string + CellPreference string + TabletOrder string + IncludeNonServingTablets bool } func parseTabletPickerCellPreferenceString(str string) (TabletPickerCellPreference, error) { @@ -137,6 +136,7 @@ type TabletPicker struct { inOrder bool cellPref TabletPickerCellPreference localCellInfo localCellInfo + options TabletPickerOptions } // NewTabletPicker returns a TabletPicker. @@ -148,6 +148,9 @@ func NewTabletPicker( options TabletPickerOptions, ) (*TabletPicker, error) { // Keep inOrder parsing here for backward compatability until TabletPickerTabletOrder is fully adopted. + if tabletTypesStr == "" { + tabletTypesStr = "replica,rdonly,primary" + } tabletTypes, inOrder, err := ParseTabletTypesAndOrder(tabletTypesStr) if err != nil { return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "failed to parse list of tablet types: %v", tabletTypesStr) @@ -227,6 +230,7 @@ func NewTabletPicker( tabletTypes: tabletTypes, inOrder: inOrder, cellPref: cellPref, + options: options, }, nil } @@ -280,13 +284,46 @@ func (tp *TabletPicker) orderByTabletType(candidates []*topo.TabletInfo) []*topo return candidates } -// PickForStreaming picks an available tablet. +func (tp *TabletPicker) sortCandidates(ctx context.Context, candidates []*topo.TabletInfo) []*topo.TabletInfo { + if tp.cellPref == TabletPickerCellPreference_PreferLocalWithAlias { + sameCellCandidates, sameAliasCandidates, allOtherCandidates := tp.prioritizeTablets(candidates) + + if tp.inOrder { + sameCellCandidates = tp.orderByTabletType(sameCellCandidates) + sameAliasCandidates = tp.orderByTabletType(sameAliasCandidates) + allOtherCandidates = tp.orderByTabletType(allOtherCandidates) + } else { + // Randomize candidates + rand.Shuffle(len(sameCellCandidates), func(i, j int) { + sameCellCandidates[i], sameCellCandidates[j] = sameCellCandidates[j], sameCellCandidates[i] + }) + rand.Shuffle(len(sameAliasCandidates), func(i, j int) { + sameAliasCandidates[i], sameAliasCandidates[j] = sameAliasCandidates[j], sameAliasCandidates[i] + }) + rand.Shuffle(len(allOtherCandidates), func(i, j int) { + allOtherCandidates[i], allOtherCandidates[j] = allOtherCandidates[j], allOtherCandidates[i] + }) + } + + candidates = append(sameCellCandidates, sameAliasCandidates...) + candidates = append(candidates, allOtherCandidates...) + } else if tp.inOrder { + candidates = tp.orderByTabletType(candidates) + } else { + // Randomize candidates. + rand.Shuffle(len(candidates), func(i, j int) { + candidates[i], candidates[j] = candidates[j], candidates[i] + }) + } + return candidates +} + +// PickForStreaming picks a tablet that is healthy and serving. // Selection is based on CellPreference. // See prioritizeTablets for prioritization logic. func (tp *TabletPicker) PickForStreaming(ctx context.Context) (*topodatapb.Tablet, error) { - rand.Seed(time.Now().UnixNano()) - // keep trying at intervals (tabletPickerRetryDelay) until a tablet is found - // or the context is canceled + // Keep trying at intervals (tabletPickerRetryDelay) until a healthy + // serving tablet is found or the context is cancelled. for { select { case <-ctx.Done(): @@ -294,40 +331,11 @@ func (tp *TabletPicker) PickForStreaming(ctx context.Context) (*topodatapb.Table default: } candidates := tp.GetMatchingTablets(ctx) - if tp.cellPref == TabletPickerCellPreference_PreferLocalWithAlias { - sameCellCandidates, sameAliasCandidates, allOtherCandidates := tp.prioritizeTablets(candidates) - - if tp.inOrder { - sameCellCandidates = tp.orderByTabletType(sameCellCandidates) - sameAliasCandidates = tp.orderByTabletType(sameAliasCandidates) - allOtherCandidates = tp.orderByTabletType(allOtherCandidates) - } else { - // Randomize candidates - rand.Shuffle(len(sameCellCandidates), func(i, j int) { - sameCellCandidates[i], sameCellCandidates[j] = sameCellCandidates[j], sameCellCandidates[i] - }) - rand.Shuffle(len(sameAliasCandidates), func(i, j int) { - sameAliasCandidates[i], sameAliasCandidates[j] = sameAliasCandidates[j], sameAliasCandidates[i] - }) - rand.Shuffle(len(allOtherCandidates), func(i, j int) { - allOtherCandidates[i], allOtherCandidates[j] = allOtherCandidates[j], allOtherCandidates[i] - }) - } - - candidates = append(sameCellCandidates, sameAliasCandidates...) - candidates = append(candidates, allOtherCandidates...) - } else if tp.inOrder { - candidates = tp.orderByTabletType(candidates) - } else { - // Randomize candidates - rand.Shuffle(len(candidates), func(i, j int) { - candidates[i], candidates[j] = candidates[j], candidates[i] - }) - } + candidates = tp.sortCandidates(ctx, candidates) if len(candidates) == 0 { - // if no candidates were found, sleep and try again + // If no viable candidates were found, sleep and try again. tp.incNoTabletFoundStat() - log.Infof("No tablet found for streaming, shard %s.%s, cells %v, tabletTypes %v, sleeping for %.3f seconds", + log.Infof("No healthy serving tablet found for streaming, shard %s.%s, cells %v, tabletTypes %v, sleeping for %.3f seconds.", tp.keyspace, tp.shard, tp.cells, tp.tabletTypes, float64(GetTabletPickerRetryDelay().Milliseconds())/1000.0) timer := time.NewTimer(GetTabletPickerRetryDelay()) select { @@ -338,58 +346,49 @@ func (tp *TabletPicker) PickForStreaming(ctx context.Context) (*topodatapb.Table } continue } - for _, ti := range candidates { - // try to connect to tablet - if conn, err := tabletconn.GetDialer()(ti.Tablet, true); err == nil { - // OK to use ctx here because it is not actually used by the underlying Close implementation - _ = conn.Close(ctx) - log.Infof("tablet picker found tablet %s", ti.Tablet.String()) - return ti.Tablet, nil - } - // err found - log.Warningf("unable to connect to tablet for alias %v", ti.Alias) - } - // Got here? Means we iterated all tablets and did not find a healthy one - tp.incNoTabletFoundStat() + log.Infof("Tablet picker found a healthy tablet for streaming: %s", candidates[0].Tablet.String()) + return candidates[0].Tablet, nil } } -// GetMatchingTablets returns a list of TabletInfo for tablets -// that match the cells, keyspace, shard and tabletTypes for this TabletPicker +// GetMatchingTablets returns a list of TabletInfo for healthy +// serving tablets that match the cells, keyspace, shard and +// tabletTypes for this TabletPicker. func (tp *TabletPicker) GetMatchingTablets(ctx context.Context) []*topo.TabletInfo { - // Special handling for PRIMARY tablet type - // Since there is only one primary, we ignore cell and find the primary + // Special handling for PRIMARY tablet type: since there is only + // one primary per shard, we ignore cell and find the primary. aliases := make([]*topodatapb.TabletAlias, 0) if len(tp.tabletTypes) == 1 && tp.tabletTypes[0] == topodatapb.TabletType_PRIMARY { shortCtx, cancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) defer cancel() si, err := tp.ts.GetShard(shortCtx, tp.keyspace, tp.shard) if err != nil { - log.Errorf("error getting shard %s/%s: %s", tp.keyspace, tp.shard, err.Error()) + log.Errorf("Error getting shard %s/%s: %v", tp.keyspace, tp.shard, err) return nil } aliases = append(aliases, si.PrimaryAlias) } else { actualCells := make([]string, 0) for _, cell := range tp.cells { - // check if cell is actually an alias - // non-blocking read so that this is fast + // Check if cell is actually an alias; using a + // non-blocking read so that this is fast. shortCtx, cancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) defer cancel() _, err := tp.ts.GetCellInfo(shortCtx, cell, false) if err != nil { - // not a valid cell, check whether it is a cell alias + // Not a valid cell, check whether it is a cell alias... shortCtx, cancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) defer cancel() alias, err := tp.ts.GetCellsAlias(shortCtx, cell, false) - // if we get an error, either cellAlias doesn't exist or it isn't a cell alias at all. Ignore and continue + // If we get an error, either cellAlias doesn't exist or + // it isn't a cell alias at all; ignore and continue. if err == nil { actualCells = append(actualCells, alias.Cells...) } else { log.Infof("Unable to resolve cell %s, ignoring", cell) } } else { - // valid cell, add it to our list + // Valid cell, add it to our list. actualCells = append(actualCells, cell) } } @@ -397,12 +396,11 @@ func (tp *TabletPicker) GetMatchingTablets(ctx context.Context) []*topo.TabletIn for _, cell := range actualCells { shortCtx, cancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) defer cancel() - // match cell, keyspace and shard + // Match cell, keyspace, and shard. sri, err := tp.ts.GetShardReplication(shortCtx, cell, tp.keyspace, tp.shard) if err != nil { continue } - for _, node := range sri.Nodes { aliases = append(aliases, node.TabletAlias) } @@ -412,33 +410,50 @@ func (tp *TabletPicker) GetMatchingTablets(ctx context.Context) []*topo.TabletIn if len(aliases) == 0 { return nil } + shortCtx, cancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) defer cancel() tabletMap, err := tp.ts.GetTabletMap(shortCtx, aliases) if err != nil { - log.Warningf("error fetching tablets from topo: %v", err) - // If we get a partial result we can still use it, otherwise return + log.Warningf("Error fetching tablets from topo: %v", err) + // If we get a partial result we can still use it, otherwise return. if len(tabletMap) == 0 { return nil } } + tablets := make([]*topo.TabletInfo, 0, len(aliases)) for _, tabletAlias := range aliases { tabletInfo, ok := tabletMap[topoproto.TabletAliasString(tabletAlias)] if !ok { - // Either tablet disappeared on us, or we got a partial result (GetTabletMap ignores - // topo.ErrNoNode). Just log a warning - log.Warningf("failed to load tablet %v", tabletAlias) + // Either tablet disappeared on us, or we got a partial result + // (GetTabletMap ignores topo.ErrNoNode); just log a warning. + log.Warningf("Tablet picker failed to load tablet %v", tabletAlias) } else if topoproto.IsTypeInList(tabletInfo.Type, tp.tabletTypes) { - tablets = append(tablets, tabletInfo) + // Try to connect to the tablet and confirm that it's usable. + if conn, err := tabletconn.GetDialer()(tabletInfo.Tablet, grpcclient.FailFast(true)); err == nil { + // Ensure that the tablet is healthy and serving. + shortCtx, cancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) + defer cancel() + if err := conn.StreamHealth(shortCtx, func(shr *querypb.StreamHealthResponse) error { + if shr != nil && + (shr.Serving || tp.options.IncludeNonServingTablets) && + shr.RealtimeStats != nil && + shr.RealtimeStats.HealthError == "" { + return io.EOF // End the stream + } + return vterrors.New(vtrpcpb.Code_INTERNAL, "tablet is not healthy and serving") + }); err == nil || err == io.EOF { + tablets = append(tablets, tabletInfo) + } + _ = conn.Close(ctx) + } } } return tablets } func init() { - // TODO(sougou): consolidate this call to be once per process. - rand.Seed(time.Now().UnixNano()) globalTPStats = newTabletPickerStats() } @@ -457,7 +472,7 @@ func (tp *TabletPicker) incNoTabletFoundStat() { globalTPStats.mu.Lock() defer globalTPStats.mu.Unlock() cells := strings.Join(tp.cells, "_") - tabletTypes := strings.Join(topoproto.MakeStringTypeList(tp.tabletTypes), "_") + tabletTypes := strings.ReplaceAll(topoproto.MakeStringTypeCSV(tp.tabletTypes), ",", "_") labels := []string{cells, tp.keyspace, tp.shard, tabletTypes} globalTPStats.noTabletFoundError.Add(labels, 1) } diff --git a/go/vt/discovery/tablet_picker_test.go b/go/vt/discovery/tablet_picker_test.go index 88368c02a60..dfae39cd98c 100644 --- a/go/vt/discovery/tablet_picker_test.go +++ b/go/vt/discovery/tablet_picker_test.go @@ -22,28 +22,40 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" - querypb "vitess.io/vitess/go/vt/proto/query" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" + + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" +) + +const ( + contextTimeout = 5 * time.Second + numTestIterations = 50 ) func TestPickPrimary(t *testing.T) { - te := newPickerTestEnv(t, []string{"cell", "otherCell"}) - want := addTablet(te, 100, topodatapb.TabletType_PRIMARY, "cell", true, true) - defer deleteTablet(t, te, want) + defer utils.EnsureNoLeaks(t) ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond) defer cancel() + + te := newPickerTestEnv(t, ctx, []string{"cell", "otherCell"}) + want := addTablet(ctx, te, 100, topodatapb.TabletType_PRIMARY, "cell", true, true) + defer deleteTablet(t, te, want) + ctx, cancel = context.WithTimeout(ctx, 200*time.Millisecond) + defer cancel() _, err := te.topoServ.UpdateShardFields(ctx, te.keyspace, te.shard, func(si *topo.ShardInfo) error { si.PrimaryAlias = want.Alias return nil }) require.NoError(t, err) - tp, err := NewTabletPicker(context.Background(), te.topoServ, []string{"otherCell"}, "cell", te.keyspace, te.shard, "primary", TabletPickerOptions{}) + tp, err := NewTabletPicker(ctx, te.topoServ, []string{"otherCell"}, "cell", te.keyspace, te.shard, "primary", TabletPickerOptions{}) require.NoError(t, err) - ctx2, cancel2 := context.WithTimeout(context.Background(), 200*time.Millisecond) + ctx2, cancel2 := context.WithTimeout(ctx, 200*time.Millisecond) defer cancel2() tablet, err := tp.PickForStreaming(ctx2) require.NoError(t, err) @@ -51,6 +63,7 @@ func TestPickPrimary(t *testing.T) { } func TestPickLocalPreferences(t *testing.T) { + defer utils.EnsureNoLeaks(t) type tablet struct { id uint32 typ topodatapb.TabletType @@ -262,20 +275,21 @@ func TestPickLocalPreferences(t *testing.T) { }, } - ctx := context.Background() for _, tcase := range tcases { t.Run(tcase.name, func(t *testing.T) { - te := newPickerTestEnv(t, tcase.envCells) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + te := newPickerTestEnv(t, ctx, tcase.envCells) var testTablets []*topodatapb.Tablet for _, tab := range tcase.tablets { - testTablets = append(testTablets, addTablet(te, int(tab.id), tab.typ, tab.cell, true, true)) + testTablets = append(testTablets, addTablet(ctx, te, int(tab.id), tab.typ, tab.cell, true, true)) } defer func() { for _, tab := range testTablets { deleteTablet(t, te, tab) } }() - tp, err := NewTabletPicker(context.Background(), te.topoServ, tcase.inCells, tcase.localCell, te.keyspace, te.shard, tcase.inTabletTypes, tcase.options) + tp, err := NewTabletPicker(ctx, te.topoServ, tcase.inCells, tcase.localCell, te.keyspace, te.shard, tcase.inTabletTypes, tcase.options) require.NoError(t, err) require.Equal(t, tp.localCellInfo.localCell, tcase.localCell) require.ElementsMatch(t, tp.cells, tcase.tpCells) @@ -296,26 +310,26 @@ func TestPickLocalPreferences(t *testing.T) { } func TestPickCellPreferenceLocalCell(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // test env puts all cells into an alias called "cella" - te := newPickerTestEnv(t, []string{"cell", "otherCell"}) - want1 := addTablet(te, 100, topodatapb.TabletType_REPLICA, "cell", true, true) + te := newPickerTestEnv(t, ctx, []string{"cell", "otherCell"}) + want1 := addTablet(ctx, te, 100, topodatapb.TabletType_REPLICA, "cell", true, true) defer deleteTablet(t, te, want1) // Local cell preference is default - tp, err := NewTabletPicker(context.Background(), te.topoServ, []string{"cella"}, "cell", te.keyspace, te.shard, "replica", TabletPickerOptions{}) + tp, err := NewTabletPicker(ctx, te.topoServ, []string{"cella"}, "cell", te.keyspace, te.shard, "replica", TabletPickerOptions{}) require.NoError(t, err) - ctx1, cancel1 := context.WithTimeout(context.Background(), 200*time.Millisecond) - defer cancel1() - tablet, err := tp.PickForStreaming(ctx1) + tablet, err := tp.PickForStreaming(ctx) require.NoError(t, err) assert.True(t, proto.Equal(want1, tablet), "Pick: %v, want %v", tablet, want1) // create a tablet in the other cell - want2 := addTablet(te, 101, topodatapb.TabletType_REPLICA, "otherCell", true, true) + want2 := addTablet(ctx, te, 101, topodatapb.TabletType_REPLICA, "otherCell", true, true) defer deleteTablet(t, te, want2) - ctx2, cancel2 := context.WithTimeout(context.Background(), 200*time.Millisecond) + ctx2, cancel2 := context.WithTimeout(ctx, 200*time.Millisecond) defer cancel2() // In 20 attempts, only tablet in "cell" will be picked because we give local cell priority by default @@ -335,49 +349,81 @@ func TestPickCellPreferenceLocalCell(t *testing.T) { } func TestPickCellPreferenceLocalAlias(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // test env puts all cells into an alias called "cella" - te := newPickerTestEnv(t, []string{"cell", "otherCell"}) - tp, err := NewTabletPicker(context.Background(), te.topoServ, []string{"cella"}, "cell", te.keyspace, te.shard, "replica", TabletPickerOptions{}) + te := newPickerTestEnv(t, ctx, []string{"cell", "otherCell"}) + tp, err := NewTabletPicker(ctx, te.topoServ, []string{"cella"}, "cell", te.keyspace, te.shard, "replica", TabletPickerOptions{}) require.NoError(t, err) // create a tablet in the other cell, it should be picked - want := addTablet(te, 101, topodatapb.TabletType_REPLICA, "otherCell", true, true) + want := addTablet(ctx, te, 101, topodatapb.TabletType_REPLICA, "otherCell", true, true) defer deleteTablet(t, te, want) - ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond) - defer cancel() tablet, err := tp.PickForStreaming(ctx) require.NoError(t, err) assert.True(t, proto.Equal(want, tablet), "Pick: %v, want %v", tablet, want) } +// TestPickUsingCellAsAlias confirms that when the tablet picker is +// given a cell name that is an alias, it will choose a tablet that +// exists within a cell that is part of the alias. +func TestPickUsingCellAsAlias(t *testing.T) { + ctx := utils.LeakCheckContext(t) + + // The test env puts all cells into an alias called "cella". + // We're also going to specify an optional extraCell that is NOT + // added to the alias. + te := newPickerTestEnv(t, ctx, []string{"cell1", "cell2", "cell3"}, "xtracell") + // Specify the alias as the cell. + tp, err := NewTabletPicker(ctx, te.topoServ, []string{"cella"}, "cell1", te.keyspace, te.shard, "replica", TabletPickerOptions{}) + require.NoError(t, err) + + // Create a tablet in one of the main cells, it should be + // picked as it is part of the cella alias. This tablet is + // NOT part of the talbet picker's local cell (cell1) so it + // will not be given local preference. + want := addTablet(ctx, te, 101, topodatapb.TabletType_REPLICA, "cell2", true, true) + defer deleteTablet(t, te, want) + // Create a tablet in an extra cell which is thus NOT part of + // the cella alias so it should NOT be picked. + noWant := addTablet(ctx, te, 102, topodatapb.TabletType_REPLICA, "xtracell", true, true) + defer deleteTablet(t, te, noWant) + // Try it many times to be sure we don't ever pick the wrong one. + for i := 0; i < 100; i++ { + tablet, err := tp.PickForStreaming(ctx) + require.NoError(t, err) + assert.True(t, proto.Equal(want, tablet), "Pick: %v, want %v", tablet, want) + } +} + func TestPickUsingCellAliasOnlySpecified(t *testing.T) { + ctx := utils.LeakCheckContextTimeout(t, 200*time.Millisecond) + // test env puts all cells into an alias called "cella" - te := newPickerTestEnv(t, []string{"cell", "otherCell"}) - want1 := addTablet(te, 100, topodatapb.TabletType_REPLICA, "cell", true, true) + te := newPickerTestEnv(t, ctx, []string{"cell", "otherCell"}) + want1 := addTablet(ctx, te, 100, topodatapb.TabletType_REPLICA, "cell", true, true) defer deleteTablet(t, te, want1) - tp, err := NewTabletPicker(context.Background(), te.topoServ, []string{"cella"}, "cell", te.keyspace, te.shard, "replica", TabletPickerOptions{CellPreference: "OnlySpecified"}) + tp, err := NewTabletPicker(ctx, te.topoServ, []string{"cella"}, "cell", te.keyspace, te.shard, "replica", TabletPickerOptions{CellPreference: "OnlySpecified"}) require.NoError(t, err) - ctx1, cancel1 := context.WithTimeout(context.Background(), 200*time.Millisecond) - defer cancel1() - tablet, err := tp.PickForStreaming(ctx1) + tablet, err := tp.PickForStreaming(ctx) require.NoError(t, err) assert.True(t, proto.Equal(want1, tablet), "Pick: %v, want %v", tablet, want1) // create a tablet in the other cell, it should be picked deleteTablet(t, te, want1) - want2 := addTablet(te, 101, topodatapb.TabletType_REPLICA, "otherCell", true, true) + want2 := addTablet(ctx, te, 101, topodatapb.TabletType_REPLICA, "otherCell", true, true) defer deleteTablet(t, te, want2) - ctx2, cancel2 := context.WithTimeout(context.Background(), 200*time.Millisecond) + ctx2, cancel2 := context.WithTimeout(ctx, 200*time.Millisecond) defer cancel2() tablet, err = tp.PickForStreaming(ctx2) require.NoError(t, err) assert.True(t, proto.Equal(want2, tablet), "Pick: %v, want %v", tablet, want2) // addTablet again and test that both are picked at least once - want1 = addTablet(te, 100, topodatapb.TabletType_REPLICA, "cell", true, true) - ctx3, cancel3 := context.WithTimeout(context.Background(), 200*time.Millisecond) + want1 = addTablet(ctx, te, 100, topodatapb.TabletType_REPLICA, "cell", true, true) + ctx3, cancel3 := context.WithTimeout(ctx, 200*time.Millisecond) defer cancel3() // In 20 attempts each of the tablets should get picked at least once. @@ -398,8 +444,10 @@ func TestPickUsingCellAliasOnlySpecified(t *testing.T) { } func TestTabletAppearsDuringSleep(t *testing.T) { - te := newPickerTestEnv(t, []string{"cell"}) - tp, err := NewTabletPicker(context.Background(), te.topoServ, te.cells, "cell", te.keyspace, te.shard, "replica", TabletPickerOptions{}) + ctx := utils.LeakCheckContextTimeout(t, 200*time.Millisecond) + + te := newPickerTestEnv(t, ctx, []string{"cell"}) + tp, err := NewTabletPicker(ctx, te.topoServ, te.cells, "cell", te.keyspace, te.shard, "replica", TabletPickerOptions{}) require.NoError(t, err) delay := GetTabletPickerRetryDelay() @@ -411,14 +459,14 @@ func TestTabletAppearsDuringSleep(t *testing.T) { result := make(chan *topodatapb.Tablet) // start picker first, then add tablet go func() { - ctx, cancel := context.WithTimeout(context.Background(), 20*time.Millisecond) + ctx, cancel := context.WithTimeout(ctx, 20*time.Millisecond) defer cancel() tablet, err := tp.PickForStreaming(ctx) assert.NoError(t, err) result <- tablet }() - want := addTablet(te, 100, topodatapb.TabletType_REPLICA, "cell", true, true) + want := addTablet(ctx, te, 100, topodatapb.TabletType_REPLICA, "cell", true, true) defer deleteTablet(t, te, want) got := <-result require.NotNil(t, got, "Tablet should not be nil") @@ -426,11 +474,13 @@ func TestTabletAppearsDuringSleep(t *testing.T) { } func TestPickErrorLocalPreferenceDefault(t *testing.T) { - te := newPickerTestEnv(t, []string{"cell"}) - _, err := NewTabletPicker(context.Background(), te.topoServ, te.cells, "cell", te.keyspace, te.shard, "badtype", TabletPickerOptions{}) + ctx := utils.LeakCheckContext(t) + + te := newPickerTestEnv(t, ctx, []string{"cell"}) + _, err := NewTabletPicker(ctx, te.topoServ, te.cells, "cell", te.keyspace, te.shard, "badtype", TabletPickerOptions{}) assert.EqualError(t, err, "failed to parse list of tablet types: badtype") - tp, err := NewTabletPicker(context.Background(), te.topoServ, te.cells, "cell", te.keyspace, te.shard, "replica", TabletPickerOptions{}) + tp, err := NewTabletPicker(ctx, te.topoServ, te.cells, "cell", te.keyspace, te.shard, "replica", TabletPickerOptions{}) require.NoError(t, err) delay := GetTabletPickerRetryDelay() defer func() { @@ -438,25 +488,27 @@ func TestPickErrorLocalPreferenceDefault(t *testing.T) { }() SetTabletPickerRetryDelay(11 * time.Millisecond) - ctx, cancel := context.WithTimeout(context.Background(), 20*time.Millisecond) - defer cancel() + timeoutCtx, timeoutCancel := context.WithTimeout(ctx, 20*time.Millisecond) + defer timeoutCancel() // no tablets - _, err = tp.PickForStreaming(ctx) + _, err = tp.PickForStreaming(timeoutCtx) require.EqualError(t, err, "context has expired") // no tablets of the correct type - defer deleteTablet(t, te, addTablet(te, 200, topodatapb.TabletType_RDONLY, "cell", true, true)) - ctx, cancel = context.WithTimeout(context.Background(), 20*time.Millisecond) - defer cancel() - _, err = tp.PickForStreaming(ctx) + defer deleteTablet(t, te, addTablet(ctx, te, 200, topodatapb.TabletType_RDONLY, "cell", true, true)) + timeoutCtx, timeoutCancel = context.WithTimeout(ctx, 20*time.Millisecond) + defer timeoutCancel() + _, err = tp.PickForStreaming(timeoutCtx) require.EqualError(t, err, "context has expired") // if local preference is selected, tp cells include's the local cell's alias require.Greater(t, globalTPStats.noTabletFoundError.Counts()["cell_cella.ks.0.replica"], int64(0)) } func TestPickErrorOnlySpecified(t *testing.T) { - te := newPickerTestEnv(t, []string{"cell"}) + ctx := utils.LeakCheckContext(t) - tp, err := NewTabletPicker(context.Background(), te.topoServ, te.cells, "cell", te.keyspace, te.shard, "replica", TabletPickerOptions{CellPreference: "OnlySpecified"}) + te := newPickerTestEnv(t, ctx, []string{"cell"}) + + tp, err := NewTabletPicker(ctx, te.topoServ, te.cells, "cell", te.keyspace, te.shard, "replica", TabletPickerOptions{CellPreference: "OnlySpecified"}) require.NoError(t, err) delay := GetTabletPickerRetryDelay() defer func() { @@ -464,21 +516,129 @@ func TestPickErrorOnlySpecified(t *testing.T) { }() SetTabletPickerRetryDelay(11 * time.Millisecond) - ctx, cancel := context.WithTimeout(context.Background(), 20*time.Millisecond) - defer cancel() + timeoutCtx, timeoutCancel := context.WithTimeout(ctx, 20*time.Millisecond) + defer timeoutCancel() // no tablets - _, err = tp.PickForStreaming(ctx) + _, err = tp.PickForStreaming(timeoutCtx) require.EqualError(t, err, "context has expired") // no tablets of the correct type - defer deleteTablet(t, te, addTablet(te, 200, topodatapb.TabletType_RDONLY, "cell", true, true)) - ctx, cancel = context.WithTimeout(context.Background(), 20*time.Millisecond) - defer cancel() - _, err = tp.PickForStreaming(ctx) + defer deleteTablet(t, te, addTablet(ctx, te, 200, topodatapb.TabletType_RDONLY, "cell", true, true)) + timeoutCtx, timeoutCancel = context.WithTimeout(ctx, 20*time.Millisecond) + defer timeoutCancel() + _, err = tp.PickForStreaming(timeoutCtx) require.EqualError(t, err, "context has expired") require.Greater(t, globalTPStats.noTabletFoundError.Counts()["cell.ks.0.replica"], int64(0)) } +// TestPickFallbackType tests that when providing a list of tablet types to +// pick from, with the list in preference order, that when the primary/first +// type has no available healthy serving tablets that we select a healthy +// serving tablet from the secondary/second type. +func TestPickFallbackType(t *testing.T) { + ctx := utils.LeakCheckContext(t) + + cells := []string{"cell1", "cell2"} + localCell := cells[0] + tabletTypes := "replica,primary" + options := TabletPickerOptions{ + TabletOrder: "InOrder", + } + te := newPickerTestEnv(t, ctx, cells) + + // This one should be selected even though it's the secondary type + // as it is healthy and serving. + primaryTablet := addTablet(ctx, te, 100, topodatapb.TabletType_PRIMARY, localCell, true, true) + defer deleteTablet(t, te, primaryTablet) + + // Replica tablet should not be selected as it is unhealthy. + replicaTablet := addTablet(ctx, te, 200, topodatapb.TabletType_REPLICA, localCell, false, false) + defer deleteTablet(t, te, replicaTablet) + + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, 200*time.Millisecond) + defer cancel() + _, err := te.topoServ.UpdateShardFields(ctx, te.keyspace, te.shard, func(si *topo.ShardInfo) error { + si.PrimaryAlias = primaryTablet.Alias + return nil + }) + require.NoError(t, err) + + tp, err := NewTabletPicker(ctx, te.topoServ, cells, localCell, te.keyspace, te.shard, tabletTypes, options) + require.NoError(t, err) + ctx2, cancel2 := context.WithTimeout(ctx, 1*time.Second) + defer cancel2() + tablet, err := tp.PickForStreaming(ctx2) + require.NoError(t, err) + assert.True(t, proto.Equal(primaryTablet, tablet), "Pick: %v, want %v", tablet, primaryTablet) +} + +// TestPickNonServingTablets validates that non serving tablets are included when the +// IncludeNonServingTablets option is set. Unhealthy tablets should not be picked, irrespective of this option. +func TestPickNonServingTablets(t *testing.T) { + ctx := utils.LeakCheckContext(t) + + cells := []string{"cell1", "cell2"} + localCell := cells[0] + tabletTypes := "replica,primary" + options := TabletPickerOptions{} + te := newPickerTestEnv(t, ctx, cells) + + // Tablet should be selected as it is healthy and serving. + primaryTablet := addTablet(ctx, te, 100, topodatapb.TabletType_PRIMARY, localCell, true, true) + defer deleteTablet(t, te, primaryTablet) + + // Tablet should not be selected as it is unhealthy. + replicaTablet := addTablet(ctx, te, 200, topodatapb.TabletType_REPLICA, localCell, false, false) + defer deleteTablet(t, te, replicaTablet) + + // Tablet should be selected because the IncludeNonServingTablets option is set and it is healthy. + replicaTablet2 := addTablet(ctx, te, 300, topodatapb.TabletType_REPLICA, localCell, false, true) + defer deleteTablet(t, te, replicaTablet2) + + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, contextTimeout) + defer cancel() + _, err := te.topoServ.UpdateShardFields(ctx, te.keyspace, te.shard, func(si *topo.ShardInfo) error { + si.PrimaryAlias = primaryTablet.Alias + return nil + }) + require.NoError(t, err) + + tp, err := NewTabletPicker(ctx, te.topoServ, cells, localCell, te.keyspace, te.shard, tabletTypes, options) + require.NoError(t, err) + ctx2, cancel2 := context.WithTimeout(ctx, contextTimeout) + defer cancel2() + tablet, err := tp.PickForStreaming(ctx2) + require.NoError(t, err) + // IncludeNonServingTablets is false: only the healthy serving tablet should be picked. + assert.True(t, proto.Equal(primaryTablet, tablet), "Pick: %v, want %v", tablet, primaryTablet) + + options.IncludeNonServingTablets = true + tp, err = NewTabletPicker(ctx, te.topoServ, cells, localCell, te.keyspace, te.shard, tabletTypes, options) + require.NoError(t, err) + ctx3, cancel3 := context.WithTimeout(ctx, contextTimeout) + defer cancel3() + var picked1, picked2, picked3 bool + // IncludeNonServingTablets is true: both the healthy tablets should be picked even though one is not serving. + for i := 0; i < numTestIterations; i++ { + tablet, err := tp.PickForStreaming(ctx3) + require.NoError(t, err) + if proto.Equal(tablet, primaryTablet) { + picked1 = true + } + if proto.Equal(tablet, replicaTablet) { + picked2 = true + } + if proto.Equal(tablet, replicaTablet2) { + picked3 = true + } + } + assert.True(t, picked1) + assert.False(t, picked2) + assert.True(t, picked3) +} + type pickerTestEnv struct { t *testing.T keyspace string @@ -488,17 +648,20 @@ type pickerTestEnv struct { topoServ *topo.Server } -func newPickerTestEnv(t *testing.T, cells []string) *pickerTestEnv { - ctx := context.Background() - +// newPickerTestEnv creates a test environment for TabletPicker tests. +// It creates a cell alias called 'cella' which contains all of the +// provided cells. However, if any optional extraCells are provided, those +// are NOT added to the cell alias. +func newPickerTestEnv(t *testing.T, ctx context.Context, cells []string, extraCells ...string) *pickerTestEnv { + allCells := append(cells, extraCells...) te := &pickerTestEnv{ t: t, keyspace: "ks", shard: "0", cells: cells, - topoServ: memorytopo.NewServer(cells...), + topoServ: memorytopo.NewServer(ctx, allCells...), } - // create cell alias + // Create cell alias containing the cells (but NOT the extraCells). err := te.topoServ.CreateCellsAlias(ctx, "cella", &topodatapb.CellsAlias{ Cells: cells, }) @@ -510,7 +673,7 @@ func newPickerTestEnv(t *testing.T, cells []string) *pickerTestEnv { return te } -func addTablet(te *pickerTestEnv, id int, tabletType topodatapb.TabletType, cell string, serving, healthy bool) *topodatapb.Tablet { +func addTablet(ctx context.Context, te *pickerTestEnv, id int, tabletType topodatapb.TabletType, cell string, serving, healthy bool) *topodatapb.Tablet { tablet := &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ Cell: cell, @@ -524,21 +687,24 @@ func addTablet(te *pickerTestEnv, id int, tabletType topodatapb.TabletType, cell "test": int32(id), }, } - err := te.topoServ.CreateTablet(context.Background(), tablet) + err := te.topoServ.CreateTablet(ctx, tablet) require.NoError(te.t, err) + shr := &querypb.StreamHealthResponse{ + Serving: serving, + Target: &querypb.Target{ + Keyspace: te.keyspace, + Shard: te.shard, + TabletType: tabletType, + }, + RealtimeStats: &querypb.RealtimeStats{HealthError: "tablet is unhealthy"}, + } if healthy { - _ = createFixedHealthConn(tablet, &querypb.StreamHealthResponse{ - Serving: serving, - Target: &querypb.Target{ - Keyspace: te.keyspace, - Shard: te.shard, - TabletType: tabletType, - }, - RealtimeStats: &querypb.RealtimeStats{HealthError: ""}, - }) + shr.RealtimeStats.HealthError = "" } + _ = createFixedHealthConn(tablet, shr) + return tablet } diff --git a/go/vt/discovery/topology_watcher.go b/go/vt/discovery/topology_watcher.go index 99d61e8c545..d1bd2d3acf8 100644 --- a/go/vt/discovery/topology_watcher.go +++ b/go/vt/discovery/topology_watcher.go @@ -18,6 +18,7 @@ package discovery import ( "bytes" + "context" "fmt" "hash/crc32" "sort" @@ -29,8 +30,6 @@ import ( "vitess.io/vitess/go/vt/key" - "context" - "vitess.io/vitess/go/stats" "vitess.io/vitess/go/trace" diff --git a/go/vt/discovery/topology_watcher_test.go b/go/vt/discovery/topology_watcher_test.go index dff8ba720c7..3ac567acef8 100644 --- a/go/vt/discovery/topology_watcher_test.go +++ b/go/vt/discovery/topology_watcher_test.go @@ -26,6 +26,8 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/vt/logutil" topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo" @@ -56,8 +58,12 @@ func checkChecksum(t *testing.T, tw *TopologyWatcher, want uint32) { } func TestStartAndCloseTopoWatcher(t *testing.T) { - ts := memorytopo.NewServer("aa") + ctx := utils.LeakCheckContext(t) + + ts := memorytopo.NewServer(ctx, "aa") + defer ts.Close() fhc := NewFakeHealthCheck(nil) + defer fhc.Close() topologyWatcherOperations.ZeroAll() tw := NewCellTabletsWatcher(context.Background(), ts, fhc, nil, "aa", 100*time.Microsecond, true, 5) @@ -110,8 +116,12 @@ func TestCellTabletsWatcherNoRefreshKnown(t *testing.T) { } func checkWatcher(t *testing.T, refreshKnownTablets bool) { - ts := memorytopo.NewServer("aa") + ctx := utils.LeakCheckContext(t) + + ts := memorytopo.NewServer(ctx, "aa") + defer ts.Close() fhc := NewFakeHealthCheck(nil) + defer fhc.Close() logger := logutil.NewMemoryLogger() topologyWatcherOperations.ZeroAll() counts := topologyWatcherOperations.Counts() @@ -197,7 +207,7 @@ func checkWatcher(t *testing.T, refreshKnownTablets bool) { // if refreshKnownTablets is disabled, this case is *not* // detected and the tablet remains in the topo using the // old key - origTablet := proto.Clone(tablet).(*topodatapb.Tablet) + origTablet := tablet.CloneVT() origKey := TabletToMapKey(tablet) tablet.PortMap["vt"] = 456 if _, err := ts.UpdateTabletFields(context.Background(), tablet.Alias, func(t *topodatapb.Tablet) error { @@ -236,9 +246,8 @@ func checkWatcher(t *testing.T, refreshKnownTablets bool) { // tablet2 happens to land on the host:port that tablet 1 used to be on. // This can only be tested when we refresh known tablets. if refreshKnownTablets { - origTablet := proto.Clone(tablet).(*topodatapb.Tablet) - origTablet2 := proto.Clone(tablet2).(*topodatapb.Tablet) - + origTablet := tablet.CloneVT() + origTablet2 := tablet2.CloneVT() if _, err := ts.UpdateTabletFields(context.Background(), tablet2.Alias, func(t *topodatapb.Tablet) error { t.Hostname = tablet.Hostname t.PortMap = tablet.PortMap @@ -429,9 +438,12 @@ var ( ) func TestFilterByKeyspace(t *testing.T) { + ctx := utils.LeakCheckContext(t) + hc := NewFakeHealthCheck(nil) f := NewFilterByKeyspace(testKeyspacesToWatch) - ts := memorytopo.NewServer(testCell) + ts := memorytopo.NewServer(ctx, testCell) + defer ts.Close() tw := NewCellTabletsWatcher(context.Background(), ts, hc, f, testCell, 10*time.Minute, true, 5) for _, test := range testFilterByKeyspace { @@ -509,8 +521,12 @@ func TestFilterByKeyspace(t *testing.T) { // - does not continuosly call GetTablets for tablets that do not satisfy the filter // - does not add or remove these filtered out tablets from the its healtcheck func TestFilterByKeypsaceSkipsIgnoredTablets(t *testing.T) { - ts := memorytopo.NewServer("aa") + ctx := utils.LeakCheckContext(t) + + ts := memorytopo.NewServer(ctx, "aa") + defer ts.Close() fhc := NewFakeHealthCheck(nil) + defer fhc.Close() topologyWatcherOperations.ZeroAll() counts := topologyWatcherOperations.Counts() f := NewFilterByKeyspace(testKeyspacesToWatch) @@ -590,7 +606,7 @@ func TestFilterByKeypsaceSkipsIgnoredTablets(t *testing.T) { allTablets = fhc.GetAllTablets() assert.Len(t, allTablets, 1) origKey := TabletToMapKey(tablet) - tabletWithNewPort := proto.Clone(tablet).(*topodatapb.Tablet) + tabletWithNewPort := tablet.CloneVT() tabletWithNewPort.PortMap["vt"] = 456 keyWithNewPort := TabletToMapKey(tabletWithNewPort) assert.Contains(t, allTablets, origKey) diff --git a/go/vt/discovery/utils.go b/go/vt/discovery/utils.go index 02f3b7132af..3a601830d35 100644 --- a/go/vt/discovery/utils.go +++ b/go/vt/discovery/utils.go @@ -50,9 +50,9 @@ func RemoveUnhealthyTablets(tabletStatsList []TabletHealth) []TabletHealth { func ParseTabletTypesAndOrder(tabletTypesStr string) ([]topodatapb.TabletType, bool, error) { inOrder := false - if strings.HasPrefix(tabletTypesStr, inOrderHint) { + if strings.HasPrefix(tabletTypesStr, InOrderHint) { inOrder = true - tabletTypesStr = tabletTypesStr[len(inOrderHint):] + tabletTypesStr = tabletTypesStr[len(InOrderHint):] } tabletTypes, err := topoproto.ParseTabletTypes(tabletTypesStr) diff --git a/go/vt/external/golib/sqlutils/sqlutils.go b/go/vt/external/golib/sqlutils/sqlutils.go index 88ab443b56b..eb1cb8c8941 100644 --- a/go/vt/external/golib/sqlutils/sqlutils.go +++ b/go/vt/external/golib/sqlutils/sqlutils.go @@ -77,35 +77,10 @@ func (this *RowData) MarshalJSON() ([]byte, error) { return json.Marshal(cells) } -func (this *RowData) Args() []any { - result := make([]any, len(*this)) - for i := range *this { - result[i] = (*(*this)[i].NullString()) - } - return result -} - -// ResultData is an ordered row set of RowData -type ResultData []RowData -type NamedResultData struct { - Columns []string - Data ResultData -} - -var EmptyResultData = ResultData{} - func (this *RowMap) GetString(key string) string { return (*this)[key].String } -// GetStringD returns a string from the map, or a default value if the key does not exist -func (this *RowMap) GetStringD(key string, def string) string { - if cell, ok := (*this)[key]; ok { - return cell.String - } - return def -} - func (this *RowMap) GetInt64(key string) int64 { res, _ := strconv.ParseInt(this.GetString(key), 10, 64) return res @@ -130,40 +105,16 @@ func (this *RowMap) GetInt(key string) int { return res } -func (this *RowMap) GetIntD(key string, def int) int { - res, err := strconv.Atoi(this.GetString(key)) - if err != nil { - return def - } - return res -} - func (this *RowMap) GetUint(key string) uint { res, _ := strconv.ParseUint(this.GetString(key), 10, 0) return uint(res) } -func (this *RowMap) GetUintD(key string, def uint) uint { - res, err := strconv.ParseUint(this.GetString(key), 10, 0) - if err != nil { - return def - } - return uint(res) -} - func (this *RowMap) GetUint64(key string) uint64 { res, _ := strconv.ParseUint(this.GetString(key), 10, 64) return res } -func (this *RowMap) GetUint64D(key string, def uint64) uint64 { - res, err := strconv.ParseUint(this.GetString(key), 10, 64) - if err != nil { - return def - } - return res -} - func (this *RowMap) GetUint32(key string) uint32 { res, _ := strconv.ParseUint(this.GetString(key), 10, 32) return uint32(res) @@ -181,7 +132,7 @@ func (this *RowMap) GetTime(key string) time.Time { } // knownDBs is a DB cache by uri -var knownDBs map[string]*sql.DB = make(map[string]*sql.DB) +var knownDBs = make(map[string]*sql.DB) var knownDBsMutex = &sync.Mutex{} // GetGenericDB returns a DB instance based on uri. @@ -203,12 +154,6 @@ func GetGenericDB(driverName, dataSourceName string) (*sql.DB, bool, error) { return knownDBs[dataSourceName], exists, nil } -// GetDB returns a MySQL DB instance based on uri. -// bool result indicates whether the DB was returned from cache; err -func GetDB(mysql_uri string) (*sql.DB, bool, error) { - return GetGenericDB("mysql", mysql_uri) -} - // GetSQLiteDB returns a SQLite DB instance based on DB file name. // bool result indicates whether the DB was returned from cache; err func GetSQLiteDB(dbFile string) (*sql.DB, bool, error) { diff --git a/go/vt/grpcclient/client.go b/go/vt/grpcclient/client.go index d3865c88c84..b2ef0d4fb28 100644 --- a/go/vt/grpcclient/client.go +++ b/go/vt/grpcclient/client.go @@ -56,7 +56,6 @@ var ( "vtctld", "vtgate", "vtgateclienttest", - "vtgr", "vtorc", "vttablet", "vttestserver", diff --git a/go/vt/grpcclient/client_flaky_test.go b/go/vt/grpcclient/client_flaky_test.go index c6baad962de..edc6d9be98c 100644 --- a/go/vt/grpcclient/client_flaky_test.go +++ b/go/vt/grpcclient/client_flaky_test.go @@ -43,7 +43,7 @@ func TestDialErrors(t *testing.T) { t.Fatal(err) } vtg := vtgateservicepb.NewVitessClient(gconn) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) _, err = vtg.Execute(ctx, &vtgatepb.ExecuteRequest{}) cancel() gconn.Close() diff --git a/go/vt/logutil/logger.go b/go/vt/logutil/logger.go index 8ebf88e085d..524ca4db4d7 100644 --- a/go/vt/logutil/logger.go +++ b/go/vt/logutil/logger.go @@ -25,6 +25,7 @@ import ( "sync" "time" + "vitess.io/vitess/go/protoutil" logutilpb "vitess.io/vitess/go/vt/proto/logutil" ) @@ -73,7 +74,7 @@ func EventToBuffer(event *logutilpb.Event, buf *bytes.Buffer) { return } - t := ProtoToTime(event.Time) + t := protoutil.TimeFromProto(event.Time).UTC() _, month, day := t.Date() hour, minute, second := t.Clock() twoDigits(buf, int(month)) @@ -137,7 +138,7 @@ func NewCallbackLogger(f func(*logutilpb.Event)) *CallbackLogger { func (cl *CallbackLogger) InfoDepth(depth int, s string) { file, line := fileAndLine(2 + depth) cl.f(&logutilpb.Event{ - Time: TimeToProto(time.Now()), + Time: protoutil.TimeToProto(time.Now()), Level: logutilpb.Level_INFO, File: file, Line: line, @@ -149,7 +150,7 @@ func (cl *CallbackLogger) InfoDepth(depth int, s string) { func (cl *CallbackLogger) WarningDepth(depth int, s string) { file, line := fileAndLine(2 + depth) cl.f(&logutilpb.Event{ - Time: TimeToProto(time.Now()), + Time: protoutil.TimeToProto(time.Now()), Level: logutilpb.Level_WARNING, File: file, Line: line, @@ -161,7 +162,7 @@ func (cl *CallbackLogger) WarningDepth(depth int, s string) { func (cl *CallbackLogger) ErrorDepth(depth int, s string) { file, line := fileAndLine(2 + depth) cl.f(&logutilpb.Event{ - Time: TimeToProto(time.Now()), + Time: protoutil.TimeToProto(time.Now()), Level: logutilpb.Level_ERROR, File: file, Line: line, @@ -198,7 +199,7 @@ func (cl *CallbackLogger) Error(err error) { func (cl *CallbackLogger) Printf(format string, v ...any) { file, line := fileAndLine(2) cl.f(&logutilpb.Event{ - Time: TimeToProto(time.Now()), + Time: protoutil.TimeToProto(time.Now()), Level: logutilpb.Level_CONSOLE, File: file, Line: line, diff --git a/go/vt/logutil/logger_test.go b/go/vt/logutil/logger_test.go index c34f8cf8ec3..0eb4edb2b93 100644 --- a/go/vt/logutil/logger_test.go +++ b/go/vt/logutil/logger_test.go @@ -20,6 +20,7 @@ import ( "testing" "time" + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/race" logutilpb "vitess.io/vitess/go/vt/proto/logutil" ) @@ -31,7 +32,7 @@ func TestLogEvent(t *testing.T) { }{ { event: &logutilpb.Event{ - Time: TimeToProto(time.Date(2014, time.November, 10, 23, 30, 12, 123456000, time.UTC)), + Time: protoutil.TimeToProto(time.Date(2014, time.November, 10, 23, 30, 12, 123456000, time.UTC)), Level: logutilpb.Level_INFO, File: "file.go", Line: 123, @@ -41,7 +42,7 @@ func TestLogEvent(t *testing.T) { }, { event: &logutilpb.Event{ - Time: TimeToProto(time.Date(2014, time.January, 20, 23, 30, 12, 0, time.UTC)), + Time: protoutil.TimeToProto(time.Date(2014, time.January, 20, 23, 30, 12, 0, time.UTC)), Level: logutilpb.Level_WARNING, File: "file2.go", Line: 567, @@ -51,7 +52,7 @@ func TestLogEvent(t *testing.T) { }, { event: &logutilpb.Event{ - Time: TimeToProto(time.Date(2014, time.January, 20, 23, 30, 12, 0, time.UTC)), + Time: protoutil.TimeToProto(time.Date(2014, time.January, 20, 23, 30, 12, 0, time.UTC)), Level: logutilpb.Level_ERROR, File: "file2.go", Line: 567, @@ -61,7 +62,7 @@ func TestLogEvent(t *testing.T) { }, { event: &logutilpb.Event{ - Time: TimeToProto(time.Date(2014, time.January, 20, 23, 30, 12, 0, time.UTC)), + Time: protoutil.TimeToProto(time.Date(2014, time.January, 20, 23, 30, 12, 0, time.UTC)), Level: logutilpb.Level_CONSOLE, File: "file2.go", Line: 567, diff --git a/go/vt/logutil/proto3.go b/go/vt/logutil/proto3.go index 2bde4656dbd..b62ed8810da 100644 --- a/go/vt/logutil/proto3.go +++ b/go/vt/logutil/proto3.go @@ -17,37 +17,11 @@ limitations under the License. package logutil import ( - "time" - logutilpb "vitess.io/vitess/go/vt/proto/logutil" - vttimepb "vitess.io/vitess/go/vt/proto/vttime" ) // This file contains a few functions to help with proto3. -// ProtoToTime converts a vttimepb.Time to a time.Time. -// proto3 will eventually support timestamps, at which point we'll retire -// this. -// -// A nil pointer is like the empty timestamp. -func ProtoToTime(ts *vttimepb.Time) time.Time { - if ts == nil { - // treat nil like the empty Timestamp - return time.Time{} - } - return time.Unix(ts.Seconds, int64(ts.Nanoseconds)).UTC() -} - -// TimeToProto converts the time.Time to a vttimepb.Time. -func TimeToProto(t time.Time) *vttimepb.Time { - seconds := t.Unix() - nanos := int64(t.Sub(time.Unix(seconds, 0))) - return &vttimepb.Time{ - Seconds: seconds, - Nanoseconds: int32(nanos), - } -} - // EventStream is an interface used by RPC clients when the streaming // RPC returns a stream of log events. type EventStream interface { diff --git a/go/vt/logutil/proto3_test.go b/go/vt/logutil/proto3_test.go deleted file mode 100644 index 58a78dea2ef..00000000000 --- a/go/vt/logutil/proto3_test.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package logutil - -import ( - "math" - "testing" - "time" - - "google.golang.org/protobuf/proto" - - "vitess.io/vitess/go/vt/proto/vttime" -) - -const ( - // Seconds field of the earliest valid Timestamp. - // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - minValidSeconds = -62135596800 - // Seconds field just after the latest valid Timestamp. - // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - maxValidSeconds = 253402300800 -) - -func utcDate(year, month, day int) time.Time { - return time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC) -} - -var tests = []struct { - pt *vttime.Time - t time.Time -}{ - // The timestamp representing the Unix epoch date. - {pt: &vttime.Time{Seconds: 0, Nanoseconds: 0}, - t: utcDate(1970, 1, 1)}, - - // The smallest representable timestamp with non-negative nanos. - {pt: &vttime.Time{Seconds: math.MinInt64, Nanoseconds: 0}, - t: time.Unix(math.MinInt64, 0).UTC()}, - - // The earliest valid timestamp. - {pt: &vttime.Time{Seconds: minValidSeconds, Nanoseconds: 0}, - t: utcDate(1, 1, 1)}, - - // The largest representable timestamp with nanos in range. - {pt: &vttime.Time{Seconds: math.MaxInt64, Nanoseconds: 1e9 - 1}, - t: time.Unix(math.MaxInt64, 1e9-1).UTC()}, - - // The largest valid timestamp. - {pt: &vttime.Time{Seconds: maxValidSeconds - 1, Nanoseconds: 1e9 - 1}, - t: time.Date(9999, 12, 31, 23, 59, 59, 1e9-1, time.UTC)}, - - // The smallest invalid timestamp that is larger than the valid range. - {pt: &vttime.Time{Seconds: maxValidSeconds, Nanoseconds: 0}, - t: time.Unix(maxValidSeconds, 0).UTC()}, - - // A date before the epoch. - {pt: &vttime.Time{Seconds: -281836800, Nanoseconds: 0}, - t: utcDate(1961, 1, 26)}, - - // A date after the epoch. - {pt: &vttime.Time{Seconds: 1296000000, Nanoseconds: 0}, - t: utcDate(2011, 1, 26)}, - - // A date after the epoch, in the middle of the day. - {pt: &vttime.Time{Seconds: 1296012345, Nanoseconds: 940483}, - t: time.Date(2011, 1, 26, 3, 25, 45, 940483, time.UTC)}, -} - -func TestProtoToTime(t *testing.T) { - for i, s := range tests { - got := ProtoToTime(s.pt) - if got != s.t { - t.Errorf("ProtoToTime[%v](%v) = %v, want %v", i, s.pt, got, s.t) - } - } -} - -func TestTimeToProto(t *testing.T) { - for i, s := range tests { - got := TimeToProto(s.t) - if !proto.Equal(got, s.pt) { - t.Errorf("TimeToProto[%v](%v) = %v, want %v", i, s.t, got, s.pt) - } - } -} diff --git a/go/vt/logutil/throttled.go b/go/vt/logutil/throttled.go index 917798626bb..4ee11912e71 100644 --- a/go/vt/logutil/throttled.go +++ b/go/vt/logutil/throttled.go @@ -69,7 +69,7 @@ func (tl *ThrottledLogger) log(logF logFunc, format string, v ...any) { // to log and reset skippedCount if tl.skippedCount == 0 { go func(d time.Duration) { - time.Sleep(d) + <-time.After(d) tl.mu.Lock() defer tl.mu.Unlock() // Because of the go func(), we lose the stack trace, diff --git a/go/vt/mysqlctl/backup.go b/go/vt/mysqlctl/backup.go index b6e63401f16..9a19175164a 100644 --- a/go/vt/mysqlctl/backup.go +++ b/go/vt/mysqlctl/backup.go @@ -37,7 +37,6 @@ import ( "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vterrors" - stats "vitess.io/vitess/go/vt/mysqlctl/backupstats" topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) @@ -57,6 +56,11 @@ const ( RestoreState = "restore_in_progress" // BackupTimestampFormat is the format in which we save BackupTime and FinishedTime BackupTimestampFormat = "2006-01-02.150405" + + // closeTimeout is the timeout for closing backup files after writing. + // The value is a bit arbitrary. How long does it make sense to wait for a Close()? With a cloud-based implementation, + // network might be an issue. _Seconds_ are probably too short. The whereabouts of a minute us a reasonable value. + closeTimeout = 1 * time.Minute ) const ( @@ -93,6 +97,18 @@ func init() { } } +func FormatRFC3339(t time.Time) string { + return t.Format(time.RFC3339) +} + +func ParseRFC3339(timestamp string) (time.Time, error) { + return time.Parse(time.RFC3339, timestamp) +} + +func ParseBinlogTimestamp(timestamp string) (time.Time, error) { + return time.Parse("060102 15:04:05", timestamp) +} + func registerBackupFlags(fs *pflag.FlagSet) { fs.BoolVar(&backupStorageCompress, "backup_storage_compress", backupStorageCompress, "if set, the backup files will be compressed.") fs.IntVar(&backupCompressBlockSize, "backup_storage_block_size", backupCompressBlockSize, "if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000).") @@ -105,7 +121,7 @@ func registerBackupFlags(fs *pflag.FlagSet) { // - remember if we were replicating, restore the exact same state func Backup(ctx context.Context, params BackupParams) error { if params.Stats == nil { - params.Stats = stats.NoStats() + params.Stats = backupstats.NoStats() } startTs := time.Now() @@ -120,8 +136,8 @@ func Backup(ctx context.Context, params BackupParams) error { // Scope bsStats to selected storage engine. bsStats := params.Stats.Scope( - stats.Component(stats.BackupStorage), - stats.Implementation( + backupstats.Component(backupstats.BackupStorage), + backupstats.Implementation( titleCase(backupstorage.BackupStorageImplementation), ), ) @@ -139,8 +155,8 @@ func Backup(ctx context.Context, params BackupParams) error { // Scope stats to selected backup engine. beParams := params.Copy() beParams.Stats = params.Stats.Scope( - stats.Component(stats.BackupEngine), - stats.Implementation(titleCase(backupEngineImplementation)), + backupstats.Component(backupstats.BackupEngine), + backupstats.Implementation(titleCase(backupEngineImplementation)), ) var be BackupEngine if isIncrementalBackup(beParams) { @@ -175,8 +191,8 @@ func Backup(ctx context.Context, params BackupParams) error { } // The backup worked, so just return the finish error, if any. - stats.DeprecatedBackupDurationS.Set(int64(time.Since(startTs).Seconds())) - params.Stats.Scope(stats.Operation("Backup")).TimedIncrement(time.Since(startTs)) + backupstats.DeprecatedBackupDurationS.Set(int64(time.Since(startTs).Seconds())) + params.Stats.Scope(backupstats.Operation("Backup")).TimedIncrement(time.Since(startTs)) return finishErr } @@ -342,7 +358,7 @@ func ensureRestoredGTIDPurgedMatchesManifest(ctx context.Context, manifest *Back // and returns ErrNoBackup. Any other error is returned. func Restore(ctx context.Context, params RestoreParams) (*BackupManifest, error) { if params.Stats == nil { - params.Stats = stats.NoStats() + params.Stats = backupstats.NoStats() } startTs := time.Now() @@ -356,8 +372,8 @@ func Restore(ctx context.Context, params RestoreParams) (*BackupManifest, error) // Scope bsStats to selected storage engine. bsStats := params.Stats.Scope( - stats.Component(backupstats.BackupStorage), - stats.Implementation( + backupstats.Component(backupstats.BackupStorage), + backupstats.Implementation( titleCase(backupstorage.BackupStorageImplementation), ), ) @@ -411,8 +427,8 @@ func Restore(ctx context.Context, params RestoreParams) (*BackupManifest, error) // Scope stats to selected backup engine. reParams := params.Copy() reParams.Stats = params.Stats.Scope( - stats.Component(backupstats.BackupEngine), - stats.Implementation(titleCase(backupEngineImplementation)), + backupstats.Component(backupstats.BackupEngine), + backupstats.Implementation(titleCase(backupEngineImplementation)), ) manifest, err := re.ExecuteRestore(ctx, reParams, bh) if err != nil { @@ -470,8 +486,8 @@ func Restore(ctx context.Context, params RestoreParams) (*BackupManifest, error) return nil, err } - stats.DeprecatedRestoreDurationS.Set(int64(time.Since(startTs).Seconds())) - params.Stats.Scope(stats.Operation("Restore")).TimedIncrement(time.Since(startTs)) + backupstats.DeprecatedRestoreDurationS.Set(int64(time.Since(startTs).Seconds())) + params.Stats.Scope(backupstats.Operation("Restore")).TimedIncrement(time.Since(startTs)) params.Logger.Infof("Restore: complete") return manifest, nil } diff --git a/go/vt/mysqlctl/backup_blackbox_test.go b/go/vt/mysqlctl/backup_blackbox_test.go new file mode 100644 index 00000000000..8de6a8679fa --- /dev/null +++ b/go/vt/mysqlctl/backup_blackbox_test.go @@ -0,0 +1,601 @@ +/* +Copyright 2022 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package mysqlctl_test is the blackbox tests for package mysqlctl. +package mysqlctl_test + +import ( + "context" + "fmt" + "os" + "path" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/test/utils" + + "vitess.io/vitess/go/mysql/replication" + + "vitess.io/vitess/go/sqltypes" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/fakesqldb" + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/mysqlctl/backupstats" + "vitess.io/vitess/go/vt/mysqlctl/filebackupstorage" + "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/proto/vttime" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/memorytopo" +) + +func setBuiltinBackupMysqldDeadline(t time.Duration) time.Duration { + old := mysqlctl.BuiltinBackupMysqldTimeout + mysqlctl.BuiltinBackupMysqldTimeout = t + + return old +} + +func createBackupDir(root string, dirs ...string) error { + for _, dir := range dirs { + if err := os.MkdirAll(path.Join(root, dir), 0755); err != nil { + return err + } + } + + return nil +} + +func createBackupFiles(root string, fileCount int, ext string) error { + for i := 0; i < fileCount; i++ { + f, err := os.Create(path.Join(root, fmt.Sprintf("%d.%s", i, ext))) + if err != nil { + return err + } + if _, err := f.Write([]byte("hello, world!")); err != nil { + return err + } + defer f.Close() + } + + return nil +} + +func TestExecuteBackup(t *testing.T) { + ctx := utils.LeakCheckContext(t) + + // Set up local backup directory + backupRoot := "testdata/builtinbackup_test" + filebackupstorage.FileBackupStorageRoot = backupRoot + require.NoError(t, createBackupDir(backupRoot, "innodb", "log", "datadir")) + dataDir := path.Join(backupRoot, "datadir") + // Add some files under data directory to force backup to actually backup files. + require.NoError(t, createBackupDir(dataDir, "test1")) + require.NoError(t, createBackupDir(dataDir, "test2")) + require.NoError(t, createBackupFiles(path.Join(dataDir, "test1"), 2, "ibd")) + require.NoError(t, createBackupFiles(path.Join(dataDir, "test2"), 2, "ibd")) + defer os.RemoveAll(backupRoot) + + needIt, err := needInnoDBRedoLogSubdir() + require.NoError(t, err) + if needIt { + fpath := path.Join("log", mysql.DynamicRedoLogSubdir) + if err := createBackupDir(backupRoot, fpath); err != nil { + require.Failf(t, err.Error(), "failed to create directory: %s", fpath) + } + } + + // Set up topo + keyspace, shard := "mykeyspace", "-80" + ts := memorytopo.NewServer(ctx, "cell1") + defer ts.Close() + + require.NoError(t, ts.CreateKeyspace(ctx, keyspace, &topodata.Keyspace{})) + require.NoError(t, ts.CreateShard(ctx, keyspace, shard)) + + tablet := topo.NewTablet(100, "cell1", "mykeyspace-00-80-0100") + tablet.Keyspace = keyspace + tablet.Shard = shard + + require.NoError(t, ts.CreateTablet(ctx, tablet)) + + _, err = ts.UpdateShardFields(ctx, keyspace, shard, func(si *topo.ShardInfo) error { + si.PrimaryAlias = &topodata.TabletAlias{Uid: 100, Cell: "cell1"} + + now := time.Now() + si.PrimaryTermStartTime = &vttime.Time{Seconds: int64(now.Second()), Nanoseconds: int32(now.Nanosecond())} + + return nil + }) + require.NoError(t, err) + + be := &mysqlctl.BuiltinBackupEngine{} + + // Configure a tight deadline to force a timeout + oldDeadline := setBuiltinBackupMysqldDeadline(time.Second) + defer setBuiltinBackupMysqldDeadline(oldDeadline) + + bh := filebackupstorage.NewBackupHandle(nil, "", "", false) + + // Spin up a fake daemon to be used in backups. It needs to be allowed to receive: + // "STOP SLAVE", "START SLAVE", in that order. + fakedb := fakesqldb.New(t) + defer fakedb.Close() + mysqld := mysqlctl.NewFakeMysqlDaemon(fakedb) + defer mysqld.Close() + mysqld.ExpectedExecuteSuperQueryList = []string{"STOP SLAVE", "START SLAVE"} + // mysqld.ShutdownTime = time.Minute + + fakeStats := backupstats.NewFakeStats() + + ok, err := be.ExecuteBackup(ctx, mysqlctl.BackupParams{ + Logger: logutil.NewConsoleLogger(), + Mysqld: mysqld, + Cnf: &mysqlctl.Mycnf{ + InnodbDataHomeDir: path.Join(backupRoot, "innodb"), + InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), + DataDir: path.Join(backupRoot, "datadir"), + }, + Concurrency: 2, + HookExtraEnv: map[string]string{}, + TopoServer: ts, + Keyspace: keyspace, + Shard: shard, + Stats: fakeStats, + }, bh) + + require.NoError(t, err) + assert.True(t, ok) + + var destinationCloseStats int + var destinationOpenStats int + var destinationWriteStats int + var sourceCloseStats int + var sourceOpenStats int + var sourceReadStats int + + for _, sr := range fakeStats.ScopeReturns { + switch sr.ScopeV[backupstats.ScopeOperation] { + case "Destination:Close": + destinationCloseStats++ + require.Len(t, sr.TimedIncrementCalls, 1) + case "Destination:Open": + destinationOpenStats++ + require.Len(t, sr.TimedIncrementCalls, 1) + case "Destination:Write": + destinationWriteStats++ + require.GreaterOrEqual(t, len(sr.TimedIncrementBytesCalls), 1) + case "Source:Close": + sourceCloseStats++ + require.Len(t, sr.TimedIncrementCalls, 1) + case "Source:Open": + sourceOpenStats++ + require.Len(t, sr.TimedIncrementCalls, 1) + case "Source:Read": + sourceReadStats++ + require.GreaterOrEqual(t, len(sr.TimedIncrementBytesCalls), 1) + } + } + + require.Equal(t, 4, destinationCloseStats) + require.Equal(t, 4, destinationOpenStats) + require.Equal(t, 4, destinationWriteStats) + require.Equal(t, 4, sourceCloseStats) + require.Equal(t, 4, sourceOpenStats) + require.Equal(t, 4, sourceReadStats) + + mysqld.ExpectedExecuteSuperQueryCurrent = 0 // resest the index of what queries we've run + mysqld.ShutdownTime = time.Minute // reminder that shutdownDeadline is 1s + + ok, err = be.ExecuteBackup(ctx, mysqlctl.BackupParams{ + Logger: logutil.NewConsoleLogger(), + Mysqld: mysqld, + Cnf: &mysqlctl.Mycnf{ + InnodbDataHomeDir: path.Join(backupRoot, "innodb"), + InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), + DataDir: path.Join(backupRoot, "datadir"), + }, + HookExtraEnv: map[string]string{}, + TopoServer: ts, + Keyspace: keyspace, + Shard: shard, + }, bh) + + assert.Error(t, err) + assert.False(t, ok) +} + +func TestExecuteBackupWithSafeUpgrade(t *testing.T) { + ctx := utils.LeakCheckContext(t) + + // Set up local backup directory + backupRoot := "testdata/builtinbackup_test" + filebackupstorage.FileBackupStorageRoot = backupRoot + require.NoError(t, createBackupDir(backupRoot, "innodb", "log", "datadir")) + dataDir := path.Join(backupRoot, "datadir") + // Add some files under data directory to force backup to actually backup files. + require.NoError(t, createBackupDir(dataDir, "test1")) + require.NoError(t, createBackupDir(dataDir, "test2")) + require.NoError(t, createBackupFiles(path.Join(dataDir, "test1"), 2, "ibd")) + require.NoError(t, createBackupFiles(path.Join(dataDir, "test2"), 2, "ibd")) + defer os.RemoveAll(backupRoot) + + needIt, err := needInnoDBRedoLogSubdir() + require.NoError(t, err) + if needIt { + fpath := path.Join("log", mysql.DynamicRedoLogSubdir) + if err := createBackupDir(backupRoot, fpath); err != nil { + require.Failf(t, err.Error(), "failed to create directory: %s", fpath) + } + } + + // Set up topo + keyspace, shard := "mykeyspace", "-80" + ts := memorytopo.NewServer(ctx, "cell1") + defer ts.Close() + + require.NoError(t, ts.CreateKeyspace(ctx, keyspace, &topodata.Keyspace{})) + require.NoError(t, ts.CreateShard(ctx, keyspace, shard)) + + tablet := topo.NewTablet(100, "cell1", "mykeyspace-00-80-0100") + tablet.Keyspace = keyspace + tablet.Shard = shard + + require.NoError(t, ts.CreateTablet(ctx, tablet)) + + _, err = ts.UpdateShardFields(ctx, keyspace, shard, func(si *topo.ShardInfo) error { + si.PrimaryAlias = &topodata.TabletAlias{Uid: 100, Cell: "cell1"} + + now := time.Now() + si.PrimaryTermStartTime = &vttime.Time{Seconds: int64(now.Second()), Nanoseconds: int32(now.Nanosecond())} + + return nil + }) + require.NoError(t, err) + + be := &mysqlctl.BuiltinBackupEngine{} + + // Configure a tight deadline to force a timeout + oldDeadline := setBuiltinBackupMysqldDeadline(time.Second) + defer setBuiltinBackupMysqldDeadline(oldDeadline) + + bh := filebackupstorage.NewBackupHandle(nil, "", "", false) + + // Spin up a fake daemon to be used in backups. It needs to be allowed to receive: + // "STOP SLAVE", "START SLAVE", in that order. + // It also needs to be allowed to receive the query to disable the innodb_fast_shutdown flag. + fakedb := fakesqldb.New(t) + defer fakedb.Close() + mysqld := mysqlctl.NewFakeMysqlDaemon(fakedb) + defer mysqld.Close() + mysqld.ExpectedExecuteSuperQueryList = []string{"STOP SLAVE", "START SLAVE"} + mysqld.FetchSuperQueryMap = map[string]*sqltypes.Result{ + "SET GLOBAL innodb_fast_shutdown=0": {}, + } + + ok, err := be.ExecuteBackup(ctx, mysqlctl.BackupParams{ + Logger: logutil.NewConsoleLogger(), + Mysqld: mysqld, + Cnf: &mysqlctl.Mycnf{ + InnodbDataHomeDir: path.Join(backupRoot, "innodb"), + InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), + DataDir: path.Join(backupRoot, "datadir"), + }, + Concurrency: 2, + TopoServer: ts, + Keyspace: keyspace, + Shard: shard, + Stats: backupstats.NewFakeStats(), + UpgradeSafe: true, + }, bh) + + require.NoError(t, err) + assert.True(t, ok) +} + +// TestExecuteBackupWithCanceledContext tests the ability of the backup function to gracefully handle cases where errors +// occur due to various reasons, such as context time cancel. The process should not panic in these situations. +func TestExecuteBackupWithCanceledContext(t *testing.T) { + ctx := utils.LeakCheckContext(t) + + // Set up local backup directory + id := fmt.Sprintf("%d", time.Now().UnixNano()) + backupRoot := fmt.Sprintf("testdata/builtinbackup_test_%s", id) + filebackupstorage.FileBackupStorageRoot = backupRoot + require.NoError(t, createBackupDir(backupRoot, "innodb", "log", "datadir")) + dataDir := path.Join(backupRoot, "datadir") + // Add some files under data directory to force backup to execute semaphore acquire inside + // backupFiles() method (https://github.com/vitessio/vitess/blob/main/go/vt/mysqlctl/builtinbackupengine.go#L483). + require.NoError(t, createBackupDir(dataDir, "test1")) + require.NoError(t, createBackupDir(dataDir, "test2")) + require.NoError(t, createBackupFiles(path.Join(dataDir, "test1"), 2, "ibd")) + require.NoError(t, createBackupFiles(path.Join(dataDir, "test2"), 2, "ibd")) + defer os.RemoveAll(backupRoot) + + needIt, err := needInnoDBRedoLogSubdir() + require.NoError(t, err) + if needIt { + fpath := path.Join("log", mysql.DynamicRedoLogSubdir) + if err := createBackupDir(backupRoot, fpath); err != nil { + require.Failf(t, err.Error(), "failed to create directory: %s", fpath) + } + } + + // Set up topo + keyspace, shard := "mykeyspace", "-80" + ts := memorytopo.NewServer(ctx, "cell1") + defer ts.Close() + + require.NoError(t, ts.CreateKeyspace(ctx, keyspace, &topodata.Keyspace{})) + require.NoError(t, ts.CreateShard(ctx, keyspace, shard)) + + tablet := topo.NewTablet(100, "cell1", "mykeyspace-00-80-0100") + tablet.Keyspace = keyspace + tablet.Shard = shard + + require.NoError(t, ts.CreateTablet(ctx, tablet)) + + _, err = ts.UpdateShardFields(ctx, keyspace, shard, func(si *topo.ShardInfo) error { + si.PrimaryAlias = &topodata.TabletAlias{Uid: 100, Cell: "cell1"} + + now := time.Now() + si.PrimaryTermStartTime = &vttime.Time{Seconds: int64(now.Second()), Nanoseconds: int32(now.Nanosecond())} + + return nil + }) + require.NoError(t, err) + + be := &mysqlctl.BuiltinBackupEngine{} + bh := filebackupstorage.NewBackupHandle(nil, "", "", false) + // Spin up a fake daemon to be used in backups. It needs to be allowed to receive: + // "STOP SLAVE", "START SLAVE", in that order. + fakedb := fakesqldb.New(t) + defer fakedb.Close() + mysqld := mysqlctl.NewFakeMysqlDaemon(fakedb) + defer mysqld.Close() + mysqld.ExpectedExecuteSuperQueryList = []string{"STOP SLAVE", "START SLAVE"} + + // Cancel the context deliberately + cancelledCtx, cancelCtx := context.WithCancel(context.Background()) + cancelCtx() + + ok, err := be.ExecuteBackup(cancelledCtx, mysqlctl.BackupParams{ + Logger: logutil.NewConsoleLogger(), + Mysqld: mysqld, + Cnf: &mysqlctl.Mycnf{ + InnodbDataHomeDir: path.Join(backupRoot, "innodb"), + InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), + DataDir: path.Join(backupRoot, "datadir"), + }, + Stats: backupstats.NewFakeStats(), + Concurrency: 2, + HookExtraEnv: map[string]string{}, + TopoServer: ts, + Keyspace: keyspace, + Shard: shard, + }, bh) + + require.Error(t, err) + // all four files will fail + require.ErrorContains(t, err, "context canceled;context canceled;context canceled;context canceled") + assert.False(t, ok) +} + +// TestExecuteRestoreWithCanceledContext tests the ability of the restore function to gracefully handle cases where errors +// occur due to various reasons, such as context timed-out. The process should not panic in these situations. +func TestExecuteRestoreWithTimedOutContext(t *testing.T) { + ctx := utils.LeakCheckContext(t) + + // Set up local backup directory + id := fmt.Sprintf("%d", time.Now().UnixNano()) + backupRoot := fmt.Sprintf("testdata/builtinbackup_test_%s", id) + filebackupstorage.FileBackupStorageRoot = backupRoot + require.NoError(t, createBackupDir(backupRoot, "innodb", "log", "datadir")) + dataDir := path.Join(backupRoot, "datadir") + // Add some files under data directory to force backup to execute semaphore acquire inside + // backupFiles() method (https://github.com/vitessio/vitess/blob/main/go/vt/mysqlctl/builtinbackupengine.go#L483). + require.NoError(t, createBackupDir(dataDir, "test1")) + require.NoError(t, createBackupDir(dataDir, "test2")) + require.NoError(t, createBackupFiles(path.Join(dataDir, "test1"), 2, "ibd")) + require.NoError(t, createBackupFiles(path.Join(dataDir, "test2"), 2, "ibd")) + defer os.RemoveAll(backupRoot) + + needIt, err := needInnoDBRedoLogSubdir() + require.NoError(t, err) + if needIt { + fpath := path.Join("log", mysql.DynamicRedoLogSubdir) + if err := createBackupDir(backupRoot, fpath); err != nil { + require.Failf(t, err.Error(), "failed to create directory: %s", fpath) + } + } + + // Set up topo + keyspace, shard := "mykeyspace", "-80" + ts := memorytopo.NewServer(ctx, "cell1") + defer ts.Close() + + require.NoError(t, ts.CreateKeyspace(ctx, keyspace, &topodata.Keyspace{})) + require.NoError(t, ts.CreateShard(ctx, keyspace, shard)) + + tablet := topo.NewTablet(100, "cell1", "mykeyspace-00-80-0100") + tablet.Keyspace = keyspace + tablet.Shard = shard + + require.NoError(t, ts.CreateTablet(ctx, tablet)) + + _, err = ts.UpdateShardFields(ctx, keyspace, shard, func(si *topo.ShardInfo) error { + si.PrimaryAlias = &topodata.TabletAlias{Uid: 100, Cell: "cell1"} + + now := time.Now() + si.PrimaryTermStartTime = &vttime.Time{Seconds: int64(now.Second()), Nanoseconds: int32(now.Nanosecond())} + + return nil + }) + require.NoError(t, err) + + be := &mysqlctl.BuiltinBackupEngine{} + bh := filebackupstorage.NewBackupHandle(nil, "", "", false) + // Spin up a fake daemon to be used in backups. It needs to be allowed to receive: + // "STOP SLAVE", "START SLAVE", in that order. + fakedb := fakesqldb.New(t) + defer fakedb.Close() + mysqld := mysqlctl.NewFakeMysqlDaemon(fakedb) + defer mysqld.Close() + mysqld.ExpectedExecuteSuperQueryList = []string{"STOP SLAVE", "START SLAVE"} + + ok, err := be.ExecuteBackup(ctx, mysqlctl.BackupParams{ + Logger: logutil.NewConsoleLogger(), + Mysqld: mysqld, + Cnf: &mysqlctl.Mycnf{ + InnodbDataHomeDir: path.Join(backupRoot, "innodb"), + InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), + DataDir: path.Join(backupRoot, "datadir"), + }, + Stats: backupstats.NewFakeStats(), + Concurrency: 2, + HookExtraEnv: map[string]string{}, + TopoServer: ts, + Keyspace: keyspace, + Shard: shard, + }, bh) + + require.NoError(t, err) + assert.True(t, ok) + + // Now try to restore the above backup. + bh = filebackupstorage.NewBackupHandle(nil, "", "", true) + fakedb = fakesqldb.New(t) + defer fakedb.Close() + mysqld = mysqlctl.NewFakeMysqlDaemon(fakedb) + defer mysqld.Close() + mysqld.ExpectedExecuteSuperQueryList = []string{"STOP SLAVE", "START SLAVE"} + + fakeStats := backupstats.NewFakeStats() + + restoreParams := mysqlctl.RestoreParams{ + Cnf: &mysqlctl.Mycnf{ + InnodbDataHomeDir: path.Join(backupRoot, "innodb"), + InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), + DataDir: path.Join(backupRoot, "datadir"), + BinLogPath: path.Join(backupRoot, "binlog"), + RelayLogPath: path.Join(backupRoot, "relaylog"), + RelayLogIndexPath: path.Join(backupRoot, "relaylogindex"), + RelayLogInfoPath: path.Join(backupRoot, "relayloginfo"), + }, + Logger: logutil.NewConsoleLogger(), + Mysqld: mysqld, + Concurrency: 2, + HookExtraEnv: map[string]string{}, + DeleteBeforeRestore: false, + DbName: "test", + Keyspace: "test", + Shard: "-", + StartTime: time.Now(), + RestoreToPos: replication.Position{}, + RestoreToTimestamp: time.Time{}, + DryRun: false, + Stats: fakeStats, + } + + // Successful restore. + bm, err := be.ExecuteRestore(ctx, restoreParams, bh) + assert.NoError(t, err) + assert.NotNil(t, bm) + + var destinationCloseStats int + var destinationOpenStats int + var destinationWriteStats int + var sourceCloseStats int + var sourceOpenStats int + var sourceReadStats int + + for _, sr := range fakeStats.ScopeReturns { + switch sr.ScopeV[backupstats.ScopeOperation] { + case "Destination:Close": + destinationCloseStats++ + require.Len(t, sr.TimedIncrementCalls, 1) + case "Destination:Open": + destinationOpenStats++ + require.Len(t, sr.TimedIncrementCalls, 1) + case "Destination:Write": + destinationWriteStats++ + require.GreaterOrEqual(t, len(sr.TimedIncrementBytesCalls), 1) + case "Source:Close": + sourceCloseStats++ + require.Len(t, sr.TimedIncrementCalls, 1) + case "Source:Open": + sourceOpenStats++ + require.Len(t, sr.TimedIncrementCalls, 1) + case "Source:Read": + sourceReadStats++ + require.GreaterOrEqual(t, len(sr.TimedIncrementBytesCalls), 1) + } + } + + require.Equal(t, 4, destinationCloseStats) + require.Equal(t, 4, destinationOpenStats) + require.Equal(t, 4, destinationWriteStats) + require.Equal(t, 4, sourceCloseStats) + require.Equal(t, 4, sourceOpenStats) + require.Equal(t, 4, sourceReadStats) + + // Restore using timed-out context + fakedb = fakesqldb.New(t) + defer fakedb.Close() + mysqld = mysqlctl.NewFakeMysqlDaemon(fakedb) + defer mysqld.Close() + mysqld.ExpectedExecuteSuperQueryList = []string{"STOP SLAVE", "START SLAVE"} + restoreParams.Mysqld = mysqld + timedOutCtx, cancel := context.WithTimeout(ctx, 1*time.Second) + defer cancel() + // Let the context time out. + time.Sleep(1 * time.Second) + bm, err = be.ExecuteRestore(timedOutCtx, restoreParams, bh) + // ExecuteRestore should fail. + assert.Error(t, err) + assert.Nil(t, bm) + // error message can contain any combination of "context deadline exceeded" or "context canceled" + if !strings.Contains(err.Error(), "context canceled") && !strings.Contains(err.Error(), "context deadline exceeded") { + assert.Fail(t, "Test should fail with either `context canceled` or `context deadline exceeded`") + } +} + +// needInnoDBRedoLogSubdir indicates whether we need to create a redo log subdirectory. +// Starting with MySQL 8.0.30, the InnoDB redo logs are stored in a subdirectory of the +// (/. by default) called "#innodb_redo". See: +// +// https://dev.mysql.com/doc/refman/8.0/en/innodb-redo-log.html#innodb-modifying-redo-log-capacity +func needInnoDBRedoLogSubdir() (needIt bool, err error) { + mysqldVersionStr, err := mysqlctl.GetVersionString() + if err != nil { + return needIt, err + } + _, sv, err := mysqlctl.ParseVersionString(mysqldVersionStr) + if err != nil { + return needIt, err + } + versionStr := fmt.Sprintf("%d.%d.%d", sv.Major, sv.Minor, sv.Patch) + _, capableOf, _ := mysql.GetFlavor(versionStr, nil) + if capableOf == nil { + return needIt, fmt.Errorf("cannot determine database flavor details for version %s", versionStr) + } + return capableOf(mysql.DynamicRedoLogCapacityFlavorCapability) +} diff --git a/go/vt/mysqlctl/backup_test.go b/go/vt/mysqlctl/backup_test.go index d26ca873243..5b97f709c2f 100644 --- a/go/vt/mysqlctl/backup_test.go +++ b/go/vt/mysqlctl/backup_test.go @@ -20,6 +20,7 @@ import ( "bytes" "context" "encoding/json" + "fmt" "io" "os" "path" @@ -30,6 +31,10 @@ import ( "github.com/stretchr/testify/require" + "vitess.io/vitess/go/test/utils" + + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/vt/logutil" @@ -40,8 +45,7 @@ import ( // TestBackupExecutesBackupWithScopedParams tests that Backup passes // a Scope()-ed stats to backupengine ExecuteBackup. func TestBackupExecutesBackupWithScopedParams(t *testing.T) { - env, closer := createFakeBackupRestoreEnv(t) - defer closer() + env := createFakeBackupRestoreEnv(t) require.Nil(t, Backup(env.ctx, env.backupParams), env.logger.Events) @@ -50,7 +54,7 @@ func TestBackupExecutesBackupWithScopedParams(t *testing.T) { var executeBackupStats *backupstats.FakeStats for _, sr := range env.stats.ScopeReturns { if sr == executeBackupParams.Stats { - executeBackupStats = sr.(*backupstats.FakeStats) + executeBackupStats = sr } } require.Contains(t, executeBackupStats.ScopeV, backupstats.ScopeComponent) @@ -62,9 +66,7 @@ func TestBackupExecutesBackupWithScopedParams(t *testing.T) { // TestBackupNoStats tests that if BackupParams.Stats is nil, then Backup will // pass non-nil Stats to sub-components. func TestBackupNoStats(t *testing.T) { - env, closer := createFakeBackupRestoreEnv(t) - defer closer() - + env := createFakeBackupRestoreEnv(t) env.setStats(nil) require.Nil(t, Backup(env.ctx, env.backupParams), env.logger.Events) @@ -77,8 +79,7 @@ func TestBackupNoStats(t *testing.T) { // TestBackupParameterizesBackupStorageWithScopedStats tests that Backup passes // a Scope()-ed stats to BackupStorage.WithParams. func TestBackupParameterizesBackupStorageWithScopedStats(t *testing.T) { - env, closer := createFakeBackupRestoreEnv(t) - defer closer() + env := createFakeBackupRestoreEnv(t) require.Nil(t, Backup(env.ctx, env.backupParams), env.logger.Events) @@ -86,7 +87,7 @@ func TestBackupParameterizesBackupStorageWithScopedStats(t *testing.T) { var storageStats *backupstats.FakeStats for _, sr := range env.stats.ScopeReturns { if sr == env.backupStorage.WithParamsCalls[0].Stats { - storageStats = sr.(*backupstats.FakeStats) + storageStats = sr } } require.Contains(t, storageStats.ScopeV, backupstats.ScopeComponent) @@ -97,8 +98,7 @@ func TestBackupParameterizesBackupStorageWithScopedStats(t *testing.T) { // TestBackupEmitsStats tests that Backup emits stats. func TestBackupEmitsStats(t *testing.T) { - env, closer := createFakeBackupRestoreEnv(t) - defer closer() + env := createFakeBackupRestoreEnv(t) // Force ExecuteBackup to take time so we can test stats emission. env.backupEngine.ExecuteBackupDuration = 1001 * time.Millisecond @@ -114,8 +114,7 @@ func TestBackupEmitsStats(t *testing.T) { // backupstorage.Params to backupstorage, but only if it responds to // backupstorage.WithParams. func TestBackupTriesToParameterizeBackupStorage(t *testing.T) { - env, closer := createFakeBackupRestoreEnv(t) - defer closer() + env := createFakeBackupRestoreEnv(t) require.Nil(t, Backup(env.ctx, env.backupParams), env.logger.Events) @@ -319,8 +318,7 @@ func TestFindFilesToBackupWithRedoLog(t *testing.T) { // TestRestoreEmitsStats tests that Restore emits stats. func TestRestoreEmitsStats(t *testing.T) { - env, closer := createFakeBackupRestoreEnv(t) - defer closer() + env := createFakeBackupRestoreEnv(t) // Force ExecuteRestore to take time so we can test stats emission. env.backupEngine.ExecuteRestoreDuration = 1001 * time.Millisecond @@ -336,8 +334,7 @@ func TestRestoreEmitsStats(t *testing.T) { // TestRestoreExecutesRestoreWithScopedParams tests that Restore passes // a Scope()-ed stats to backupengine ExecuteRestore. func TestRestoreExecutesRestoreWithScopedParams(t *testing.T) { - env, closer := createFakeBackupRestoreEnv(t) - defer closer() + env := createFakeBackupRestoreEnv(t) _, err := Restore(env.ctx, env.restoreParams) require.Nil(t, err, env.logger.Events) @@ -347,7 +344,7 @@ func TestRestoreExecutesRestoreWithScopedParams(t *testing.T) { var executeRestoreStats *backupstats.FakeStats for _, sr := range env.stats.ScopeReturns { if sr == executeRestoreParams.Stats { - executeRestoreStats = sr.(*backupstats.FakeStats) + executeRestoreStats = sr } } require.Contains(t, executeRestoreStats.ScopeV, backupstats.ScopeComponent) @@ -359,9 +356,7 @@ func TestRestoreExecutesRestoreWithScopedParams(t *testing.T) { // TestRestoreNoStats tests that if RestoreParams.Stats is nil, then Restore will // pass non-nil Stats to sub-components. func TestRestoreNoStats(t *testing.T) { - env, closer := createFakeBackupRestoreEnv(t) - defer closer() - + env := createFakeBackupRestoreEnv(t) env.setStats(nil) _, err := Restore(env.ctx, env.restoreParams) @@ -375,8 +370,7 @@ func TestRestoreNoStats(t *testing.T) { // TestRestoreParameterizesBackupStorageWithScopedStats tests that Restore passes // a Scope()-ed stats to BackupStorage.WithParams. func TestRestoreParameterizesBackupStorageWithScopedStats(t *testing.T) { - env, closer := createFakeBackupRestoreEnv(t) - defer closer() + env := createFakeBackupRestoreEnv(t) _, err := Restore(env.ctx, env.restoreParams) require.Nil(t, err, env.logger.Events) @@ -385,7 +379,7 @@ func TestRestoreParameterizesBackupStorageWithScopedStats(t *testing.T) { var storageStats *backupstats.FakeStats for _, sr := range env.stats.ScopeReturns { if sr == env.backupStorage.WithParamsCalls[0].Stats { - storageStats = sr.(*backupstats.FakeStats) + storageStats = sr } } require.Contains(t, storageStats.ScopeV, backupstats.ScopeComponent) @@ -398,8 +392,7 @@ func TestRestoreParameterizesBackupStorageWithScopedStats(t *testing.T) { // backupstorage.Params to backupstorage, but only if it responds to // backupstorage.WithParams. func TestRestoreTriesToParameterizeBackupStorage(t *testing.T) { - env, closer := createFakeBackupRestoreEnv(t) - defer closer() + env := createFakeBackupRestoreEnv(t) _, err := Restore(env.ctx, env.restoreParams) require.Nil(t, err, env.logger.Events) @@ -419,6 +412,133 @@ func TestRestoreTriesToParameterizeBackupStorage(t *testing.T) { require.NotNil(t, scopedStats) } +// TestRestoreManifestMySQLVersionValidation tests that Restore tries to validate +// the MySQL version and safe upgrade attribute. +func TestRestoreManifestMySQLVersionValidation(t *testing.T) { + testCases := []struct { + fromVersion, toVersion string + upgradeSafe bool + wantErr bool + }{ + { + fromVersion: "mysqld Ver 5.6.42", + toVersion: "mysqld Ver 5.7.40", + upgradeSafe: false, + wantErr: true, + }, + { + fromVersion: "mysqld Ver 5.6.42", + toVersion: "mysqld Ver 5.7.40", + upgradeSafe: true, + wantErr: false, + }, + { + fromVersion: "mysqld Ver 5.7.42", + toVersion: "mysqld Ver 8.0.32", + upgradeSafe: true, + wantErr: false, + }, + { + fromVersion: "mysqld Ver 5.7.42", + toVersion: "mysqld Ver 8.0.32", + upgradeSafe: false, + wantErr: true, + }, + { + fromVersion: "mysqld Ver 5.7.42", + toVersion: "mysqld Ver 8.0.32", + upgradeSafe: true, + wantErr: false, + }, + { + fromVersion: "mysqld Ver 8.0.32", + toVersion: "mysqld Ver 8.0.32", + upgradeSafe: false, + wantErr: false, + }, + { + fromVersion: "mysqld Ver 8.0.32", + toVersion: "mysqld Ver 8.0.32", + upgradeSafe: true, + wantErr: false, + }, + { + fromVersion: "mysqld Ver 8.0.32", + toVersion: "mysqld Ver 8.0.31", + upgradeSafe: false, + wantErr: true, + }, + { + fromVersion: "mysqld Ver 8.0.32", + toVersion: "mysqld Ver 8.0.31", + upgradeSafe: true, + wantErr: true, + }, + { + fromVersion: "mysqld Ver 8.0.32", + toVersion: "mysqld Ver 8.0.33", + upgradeSafe: false, + wantErr: true, + }, + { + fromVersion: "mysqld Ver 8.0.32", + toVersion: "mysqld Ver 8.0.33", + upgradeSafe: true, + wantErr: false, + }, + { + fromVersion: "", + toVersion: "mysqld Ver 8.0.33", + upgradeSafe: false, + wantErr: false, + }, + { + fromVersion: "", + toVersion: "mysqld Ver 8.0.33", + upgradeSafe: true, + wantErr: false, + }, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("%s->%s upgradeSafe=%t", tc.fromVersion, tc.toVersion, tc.upgradeSafe), func(t *testing.T) { + env := createFakeBackupRestoreEnv(t) + env.mysqld.Version = tc.toVersion + + manifest := BackupManifest{ + BackupTime: time.Now().Add(-1 * time.Hour).Format(time.RFC3339), + BackupMethod: "fake", + Keyspace: "test", + Shard: "-", + MySQLVersion: tc.fromVersion, + UpgradeSafe: tc.upgradeSafe, + } + + manifestBytes, err := json.Marshal(manifest) + require.Nil(t, err) + + env.backupEngine.ExecuteRestoreReturn = FakeBackupEngineExecuteRestoreReturn{&manifest, nil} + env.backupStorage.ListBackupsReturn = FakeBackupStorageListBackupsReturn{ + BackupHandles: []backupstorage.BackupHandle{ + &FakeBackupHandle{ + ReadFileReturnF: func(context.Context, string) (io.ReadCloser, error) { + return io.NopCloser(bytes.NewBuffer(manifestBytes)), nil + }, + }, + }, + } + + _, err = Restore(env.ctx, env.restoreParams) + if tc.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } + +} + type forTest []FileEntry func (f forTest) Len() int { return len(f) } @@ -436,7 +556,7 @@ type fakeBackupRestoreEnv struct { stats *backupstats.FakeStats } -func createFakeBackupRestoreEnv(t *testing.T) (*fakeBackupRestoreEnv, func()) { +func createFakeBackupRestoreEnv(t *testing.T) *fakeBackupRestoreEnv { ctx := context.Background() logger := logutil.NewMemoryLogger() @@ -444,7 +564,6 @@ func createFakeBackupRestoreEnv(t *testing.T) (*fakeBackupRestoreEnv, func()) { sqldb.SetNeverFail(true) mysqld := NewFakeMysqlDaemon(sqldb) require.Nil(t, mysqld.Shutdown(ctx, nil, false)) - defer mysqld.Close() dirName, err := os.MkdirTemp("", "vt_backup_test") require.Nil(t, err) @@ -480,16 +599,17 @@ func createFakeBackupRestoreEnv(t *testing.T) (*fakeBackupRestoreEnv, func()) { Keyspace: "test", Shard: "-", StartTime: time.Now(), - RestoreToPos: mysql.Position{}, + RestoreToPos: replication.Position{}, DryRun: false, Stats: stats, } manifest := BackupManifest{ - BackupTime: time.Now().Add(-1 * time.Hour).Format(time.RFC3339), + BackupTime: FormatRFC3339(time.Now().Add(-1 * time.Hour)), BackupMethod: "fake", Keyspace: "test", Shard: "-", + MySQLVersion: "8.0.32", } manifestBytes, err := json.Marshal(manifest) @@ -519,7 +639,12 @@ func createFakeBackupRestoreEnv(t *testing.T) (*fakeBackupRestoreEnv, func()) { previousBackupStorageImplementation := backupstorage.BackupStorageImplementation backupstorage.BackupStorageImplementation = "fake" - closer := func() { + // all restore integration tests must be leak checked + t.Cleanup(func() { + utils.EnsureNoLeaks(t) + }) + + t.Cleanup(func() { backupstats.DeprecatedBackupDurationS.Reset() backupstats.DeprecatedRestoreDurationS.Reset() @@ -528,7 +653,9 @@ func createFakeBackupRestoreEnv(t *testing.T) (*fakeBackupRestoreEnv, func()) { delete(backupstorage.BackupStorageMap, "fake") backupstorage.BackupStorageImplementation = previousBackupStorageImplementation - } + mysqld.Close() + sqldb.Close() + }) return &fakeBackupRestoreEnv{ backupEngine: &testBackupEngine, @@ -539,7 +666,7 @@ func createFakeBackupRestoreEnv(t *testing.T) (*fakeBackupRestoreEnv, func()) { mysqld: mysqld, restoreParams: restoreParams, stats: stats, - }, closer + } } func (fbe *fakeBackupRestoreEnv) setStats(stats *backupstats.FakeStats) { diff --git a/go/vt/mysqlctl/backupengine.go b/go/vt/mysqlctl/backupengine.go index 1401195548a..5a79edbdde0 100644 --- a/go/vt/mysqlctl/backupengine.go +++ b/go/vt/mysqlctl/backupengine.go @@ -29,6 +29,7 @@ import ( "github.com/spf13/pflag" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl/backupstats" "vitess.io/vitess/go/vt/mysqlctl/backupstorage" @@ -74,22 +75,25 @@ type BackupParams struct { IncrementalFromPos string // Stats let's backup engines report detailed backup timings. Stats backupstats.Stats + // UpgradeSafe indicates whether the backup is safe for upgrade and created with innodb_fast_shutdown=0 + UpgradeSafe bool } -func (b BackupParams) Copy() BackupParams { +func (b *BackupParams) Copy() BackupParams { return BackupParams{ - b.Cnf, - b.Mysqld, - b.Logger, - b.Concurrency, - b.HookExtraEnv, - b.TopoServer, - b.Keyspace, - b.Shard, - b.TabletAlias, - b.BackupTime, - b.IncrementalFromPos, - b.Stats, + Cnf: b.Cnf, + Mysqld: b.Mysqld, + Logger: b.Logger, + Concurrency: b.Concurrency, + HookExtraEnv: b.HookExtraEnv, + TopoServer: b.TopoServer, + Keyspace: b.Keyspace, + Shard: b.Shard, + TabletAlias: b.TabletAlias, + BackupTime: b.BackupTime, + IncrementalFromPos: b.IncrementalFromPos, + Stats: b.Stats, + UpgradeSafe: b.UpgradeSafe, } } @@ -117,33 +121,44 @@ type RestoreParams struct { StartTime time.Time // RestoreToPos hints that a point in time recovery is requested, to recover up to the specific given pos. // When empty, the restore is a normal from full backup - RestoreToPos mysql.Position + RestoreToPos replication.Position + // RestoreToTimestamp hints that a point in time recovery is requested, to recover up to, and excluding, the + // given timestamp. + // RestoreToTimestamp and RestoreToPos are mutually exclusive. + RestoreToTimestamp time.Time // When DryRun is set, no restore actually takes place; but some of its steps are validated. DryRun bool // Stats let's restore engines report detailed restore timings. Stats backupstats.Stats } -func (p RestoreParams) Copy() RestoreParams { +func (p *RestoreParams) Copy() RestoreParams { return RestoreParams{ - p.Cnf, - p.Mysqld, - p.Logger, - p.Concurrency, - p.HookExtraEnv, - p.DeleteBeforeRestore, - p.DbName, - p.Keyspace, - p.Shard, - p.StartTime, - p.RestoreToPos, - p.DryRun, - p.Stats, + Cnf: p.Cnf, + Mysqld: p.Mysqld, + Logger: p.Logger, + Concurrency: p.Concurrency, + HookExtraEnv: p.HookExtraEnv, + DeleteBeforeRestore: p.DeleteBeforeRestore, + DbName: p.DbName, + Keyspace: p.Keyspace, + Shard: p.Shard, + StartTime: p.StartTime, + RestoreToPos: p.RestoreToPos, + RestoreToTimestamp: p.RestoreToTimestamp, + DryRun: p.DryRun, + Stats: p.Stats, } } func (p *RestoreParams) IsIncrementalRecovery() bool { - return !p.RestoreToPos.IsZero() + if !p.RestoreToPos.IsZero() { + return true + } + if !p.RestoreToTimestamp.IsZero() { + return true + } + return false } // RestoreEngine is the interface to restore a backup with a given engine. @@ -235,6 +250,14 @@ func getBackupManifestInto(ctx context.Context, backup backupstorage.BackupHandl return nil } +// IncrementalBackupDetails lists some incremental backup specific information +type IncrementalBackupDetails struct { + FirstTimestamp string + FirstTimestampBinlog string + LastTimestamp string + LastTimestampBinlog string +} + // BackupManifest defines the common fields in the MANIFEST file. // All backup engines must include at least these fields. They are free to add // their own custom fields by embedding this struct anonymously into their own @@ -247,14 +270,17 @@ type BackupManifest struct { BackupMethod string // Position is the replication position at which the backup was taken. - Position mysql.Position + Position replication.Position // PurgedPosition stands for purged GTIDs, information that is necessary for PITR recovery. This is specific to MySQL56 - PurgedPosition mysql.Position + PurgedPosition replication.Position // FromPosition is only applicable to incremental backups, and stands for the position from // which incremental changes are backed up. - FromPosition mysql.Position + FromPosition replication.Position + + // FromBackup indicates the backup name on which this incremental backup is based, assumign this is an incremental backup with "auto" pos`` + FromBackup string // Incremental indicates whether this is an incremental backup Incremental bool @@ -274,6 +300,15 @@ type BackupManifest struct { Keyspace string Shard string + + // MySQLversion is the version of MySQL when the backup was taken. + MySQLVersion string + + // UpgradeSafe indicates whether the backup is safe to use for an upgrade to a newer MySQL version + UpgradeSafe bool + + // IncrementalDetails is nil for non-incremental backups + IncrementalDetails *IncrementalBackupDetails } func (m *BackupManifest) HashKey() string { @@ -369,9 +404,16 @@ func (p *RestorePath) String() string { // FindLatestSuccessfulBackup returns the handle and manifest for the last good backup, // which can be either full or increment -func FindLatestSuccessfulBackup(ctx context.Context, logger logutil.Logger, bhs []backupstorage.BackupHandle) (backupstorage.BackupHandle, *BackupManifest, error) { +func FindLatestSuccessfulBackup(ctx context.Context, logger logutil.Logger, bhs []backupstorage.BackupHandle, excludeBackupName string) (backupstorage.BackupHandle, *BackupManifest, error) { for index := len(bhs) - 1; index >= 0; index-- { bh := bhs[index] + if bh.Name() == excludeBackupName { + // skip this bh. Use case: in an incremental backup, as we look for previous successful backups, + // the new incremental backup handle is partial: the directory exists, it will show in ListBackups, but + // the MANIFEST file does nto exist yet. So we avoid the errors/warnings associated with reading this partial backup, + // and just skip it. + continue + } // Check that the backup MANIFEST exists and can be successfully decoded. bm, err := GetBackupManifest(ctx, bh) if err != nil { @@ -383,9 +425,32 @@ func FindLatestSuccessfulBackup(ctx context.Context, logger logutil.Logger, bhs return nil, nil, ErrNoCompleteBackup } +// FindLatestSuccessfulBackupPosition returns the position of the last known successful backup +func FindLatestSuccessfulBackupPosition(ctx context.Context, params BackupParams, excludeBackupName string) (backupName string, pos replication.Position, err error) { + bs, err := backupstorage.GetBackupStorage() + if err != nil { + return "", pos, err + } + defer bs.Close() + + // Backups are stored in a directory structure that starts with + // / + backupDir := GetBackupDir(params.Keyspace, params.Shard) + bhs, err := bs.ListBackups(ctx, backupDir) + if err != nil { + return "", pos, vterrors.Wrap(err, "ListBackups failed") + } + bh, manifest, err := FindLatestSuccessfulBackup(ctx, params.Logger, bhs, excludeBackupName) + if err != nil { + return "", pos, vterrors.Wrap(err, "FindLatestSuccessfulBackup failed") + } + pos = manifest.Position + return bh.Name(), pos, nil +} + // FindBackupToRestore returns a path, a sequence of backup handles, to be restored. // The returned handles stand for valid backups with complete manifests. -func FindBackupToRestore(ctx context.Context, params RestoreParams, bhs []backupstorage.BackupHandle) (*RestorePath, error) { +func FindBackupToRestore(ctx context.Context, params RestoreParams, bhs []backupstorage.BackupHandle) (restorePath *RestorePath, err error) { // if a StartTime is provided in params, then find a backup that was taken at or before that time checkBackupTime := !params.StartTime.IsZero() backupDir := GetBackupDir(params.Keyspace, params.Shard) @@ -393,83 +458,132 @@ func FindBackupToRestore(ctx context.Context, params RestoreParams, bhs []backup manifests := make([]*BackupManifest, len(bhs)) manifestHandleMap := NewManifestHandleMap() - fullBackupIndex := func() int { - for index := len(bhs) - 1; index >= 0; index-- { - bh := bhs[index] - // Check that the backup MANIFEST exists and can be successfully decoded. - bm, err := GetBackupManifest(ctx, bh) - if err != nil { - params.Logger.Warningf("Possibly incomplete backup %v in directory %v on BackupStorage: can't read MANIFEST: %v)", bh.Name(), backupDir, err) - continue - } - // the manifest is valid - manifests[index] = bm // manifests's order is insignificant, it will be sorted later on - manifestHandleMap.Map(bm, bh) - if bm.Incremental { - // We're looking for a full backup - continue - } + mysqlVersion, err := params.Mysqld.GetVersionString(ctx) + if err != nil { + return nil, err + } - var backupTime time.Time - if checkBackupTime { - backupTime, err = time.Parse(time.RFC3339, bm.BackupTime) - if err != nil { - params.Logger.Warningf("Restore: skipping backup %v/%v with invalid time %v: %v", backupDir, bh.Name(), bm.BackupTime, err) + // Let's first populate the manifests + for i, bh := range bhs { + // Check that the backup MANIFEST exists and can be successfully decoded. + bm, err := GetBackupManifest(ctx, bh) + if err != nil { + params.Logger.Warningf("Possibly incomplete backup %v in directory %v on BackupStorage: can't read MANIFEST: %v)", bh.Name(), backupDir, err) + continue + } + // the manifest is valid + manifests[i] = bm // manifests's order is insignificant, it will be sorted later on + manifestHandleMap.Map(bm, bh) + } + restorePath = &RestorePath{ + manifestHandleMap: manifestHandleMap, + } + if !params.IsIncrementalRecovery() { + // incremental recovery has its own logic for searching the best full backup. Here we only deal with full backup recovery. + fullBackupIndex := func() int { + for index := len(manifests) - 1; index >= 0; index-- { + bm := manifests[index] + if bm == nil { continue } - } - - switch { - case checkBackupTime: - // restore to specific time - if backupTime.Equal(params.StartTime) || backupTime.Before(params.StartTime) { - params.Logger.Infof("Restore: found backup %v %v to restore using the specified timestamp of '%v'", bh.Directory(), bh.Name(), params.StartTime.Format(BackupTimestampFormat)) - return index + if bm.Incremental { + // We're looking for a full backup + continue + } + bh := manifestHandleMap.Handle(bm) + + // check if the backup can be used with this MySQL version. + if bm.MySQLVersion != "" { + if err := validateMySQLVersionUpgradeCompatible(mysqlVersion, bm.MySQLVersion, bm.UpgradeSafe); err != nil { + params.Logger.Warningf("Skipping backup %v/%v with incompatible MySQL version %v (upgrade safe: %v): %v", backupDir, bh.Name(), bm.MySQLVersion, bm.UpgradeSafe, err) + continue + } } - case !params.RestoreToPos.IsZero(): - // restore to specific pos - if params.RestoreToPos.GTIDSet.Contains(bm.Position.GTIDSet) { - // this is the most recent backup which is <= desired position + + switch { + case checkBackupTime: + backupTime, err := ParseRFC3339(bm.BackupTime) + if err != nil { + params.Logger.Warningf("Restore: skipping backup %v/%v with invalid time %v: %v", backupDir, bh.Name(), bm.BackupTime, err) + continue + } + // restore to specific time + if backupTime.Equal(params.StartTime) || backupTime.Before(params.StartTime) { + params.Logger.Infof("Restore: found backup %v %v to restore using the specified timestamp of '%v'", bh.Directory(), bh.Name(), params.StartTime.Format(BackupTimestampFormat)) + return index + } + default: + // restore latest full backup + params.Logger.Infof("Restore: found latest backup %v %v to restore", bh.Directory(), bh.Name()) return index } - default: - // restore latest full backup - params.Logger.Infof("Restore: found latest backup %v %v to restore", bh.Directory(), bh.Name()) - return index } + return -1 + }() + if fullBackupIndex < 0 { + if checkBackupTime { + params.Logger.Errorf("No valid backup found before time %v", params.StartTime.Format(BackupTimestampFormat)) + } + // There is at least one attempted backup, but none could be read. + // This implies there is data we ought to have, so it's not safe to start + // up empty. + return nil, ErrNoCompleteBackup } - return -1 - }() - if fullBackupIndex < 0 { - if checkBackupTime { - params.Logger.Errorf("No valid backup found before time %v", params.StartTime.Format(BackupTimestampFormat)) - } - // There is at least one attempted backup, but none could be read. - // This implies there is data we ought to have, so it's not safe to start - // up empty. - return nil, ErrNoCompleteBackup - } - // Anything taken before the full backup that we picked, is not of interest: - manifests = manifests[fullBackupIndex:] - restorePath := &RestorePath{ - manifestHandleMap: manifestHandleMap, - } - if params.RestoreToPos.IsZero() { // restoring from a single full backup: - restorePath.Add(manifests[0]) + restorePath.Add(manifests[fullBackupIndex]) return restorePath, nil } - // restore to a position (using incremental backups): + // restore to a position/timestamp (using incremental backups): // we calculate a possible restore path based on the manifests. The resulting manifests are // a sorted subsequence, with the full backup first, and zero or more incremental backups to follow. - manifests, err := FindPITRPath(params.RestoreToPos.GTIDSet, manifests) + switch { + case !params.RestoreToPos.IsZero(): + manifests, err = FindPITRPath(params.RestoreToPos.GTIDSet, manifests) + case !params.RestoreToTimestamp.IsZero(): + manifests, err = FindPITRToTimePath(params.RestoreToTimestamp, manifests) + } + restorePath.manifests = manifests if err != nil { return nil, err } - restorePath.manifests = manifests return restorePath, nil } +func validateMySQLVersionUpgradeCompatible(to string, from string, upgradeSafe bool) error { + // It's always safe to use the same version. + if to == from { + return nil + } + + flavorTo, parsedTo, err := ParseVersionString(to) + if err != nil { + return err + } + + flavorFrom, parsedFrom, err := ParseVersionString(from) + if err != nil { + return err + } + + if flavorTo != flavorFrom { + return fmt.Errorf("cannot use backup between different flavors: %q vs. %q", from, to) + } + + if parsedTo == parsedFrom { + return nil + } + + if !parsedTo.atLeast(parsedFrom) { + return fmt.Errorf("running MySQL version %q is older than backup MySQL version %q", to, from) + } + + if upgradeSafe { + return nil + } + + return fmt.Errorf("running MySQL version %q is newer than backup MySQL version %q which is not safe to upgrade", to, from) +} + func prepareToRestore(ctx context.Context, cnf *Mycnf, mysqld MysqlDaemon, logger logutil.Logger) error { // shutdown mysqld if it is running logger.Infof("Restore: shutdown mysqld") diff --git a/go/vt/mysqlctl/backupstats/fake_stats.go b/go/vt/mysqlctl/backupstats/fake_stats.go index e8e84431eb9..29728d86db5 100644 --- a/go/vt/mysqlctl/backupstats/fake_stats.go +++ b/go/vt/mysqlctl/backupstats/fake_stats.go @@ -13,7 +13,7 @@ type FakeStats struct { Duration time.Duration } ScopeCalls [][]Scope - ScopeReturns []Stats + ScopeReturns []*FakeStats mutex sync.Mutex } diff --git a/go/vt/mysqlctl/backupstats/stats.go b/go/vt/mysqlctl/backupstats/stats.go index 1d4b643a0e9..6f64dec864f 100644 --- a/go/vt/mysqlctl/backupstats/stats.go +++ b/go/vt/mysqlctl/backupstats/stats.go @@ -21,7 +21,6 @@ import ( "time" "vitess.io/vitess/go/stats" - vtstats "vitess.io/vitess/go/stats" ) // Stats is a reporting interface meant to be shared among backup and restore @@ -52,9 +51,9 @@ type Stats interface { type noStats struct{} type scopedStats struct { - bytes *vtstats.CountersWithMultiLabels - count *vtstats.CountersWithMultiLabels - durationNs *vtstats.CountersWithMultiLabels + bytes *stats.CountersWithMultiLabels + count *stats.CountersWithMultiLabels + durationNs *stats.CountersWithMultiLabels labelValues []string } diff --git a/go/vt/mysqlctl/binlogs_gtid.go b/go/vt/mysqlctl/binlogs_gtid.go index 70f734b7cae..3ea48663578 100644 --- a/go/vt/mysqlctl/binlogs_gtid.go +++ b/go/vt/mysqlctl/binlogs_gtid.go @@ -21,8 +21,9 @@ import ( "fmt" "sort" "strings" + "time" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" ) @@ -54,8 +55,8 @@ func (p *BackupManifestPath) String() string { // possible, or is empty. func ChooseBinlogsForIncrementalBackup( ctx context.Context, - backupFromGTIDSet mysql.GTIDSet, - purgedGTIDSet mysql.GTIDSet, + backupFromGTIDSet replication.GTIDSet, + purgedGTIDSet replication.GTIDSet, binaryLogs []string, pgtids func(ctx context.Context, binlog string) (gtids string, err error), ) ( @@ -64,13 +65,13 @@ func ChooseBinlogsForIncrementalBackup( incrementalBackupToGTID string, err error, ) { - var prevGTIDsUnion mysql.GTIDSet + var prevGTIDsUnion replication.GTIDSet for i, binlog := range binaryLogs { previousGtids, err := pgtids(ctx, binlog) if err != nil { return nil, "", "", vterrors.Wrapf(err, "cannot get previous gtids for binlog %v", binlog) } - previousGTIDsPos, err := mysql.ParsePosition(mysql.Mysql56FlavorID, previousGtids) + previousGTIDsPos, err := replication.ParsePosition(replication.Mysql56FlavorID, previousGtids) if err != nil { return nil, "", "", vterrors.Wrapf(err, "cannot decode binlog %s position in incremental backup: %v", binlog, previousGTIDsPos) } @@ -93,13 +94,21 @@ func ChooseBinlogsForIncrementalBackup( // know this when we look into the _next_ binlog file's Previous-GTIDs. continue } + // Got here? This means backupFromGTIDSet does not full contain the current binlog's Previous-GTIDs. + // In other words, Previoud-GTIDs have entries on top of backupFromGTIDSet. Which suggests that these + // entries were added by the previous binary log. if i == 0 { + // Ummm... there _is no_ previous binary log. return nil, "", "", vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "Required entries have been purged. Oldest binary log %v expects entries not found in backup pos. Expected pos=%v", binlog, previousGTIDsPos) } - if !prevGTIDsUnion.Union(purgedGTIDSet).Contains(backupFromGTIDSet) { + // The other thing to validate, is that we can't allow a situation where the backup-GTIDs have entries not covered + // by our binary log's Previous-GTIDs (padded with purged GTIDs). Because that means we can't possibly restore to + // such position. + prevGTIDsUnionPurged := prevGTIDsUnion.Union(purgedGTIDSet) + if !prevGTIDsUnionPurged.Contains(backupFromGTIDSet) { return nil, "", "", vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, - "Mismatching GTID entries. Requested backup pos has entries not found in the binary logs, and binary logs have entries not found in the requested backup pos. Neither fully contains the other. Requested pos=%v, binlog pos=%v", - backupFromGTIDSet, previousGTIDsPos.GTIDSet) + "Mismatching GTID entries. Requested backup pos has entries not found in the binary logs, and binary logs have entries not found in the requested backup pos. Neither fully contains the other.\n- Requested pos=%v\n- binlog pos=%v\n- purgedGTIDSet=%v\n- union=%v\n- union purged=%v", + backupFromGTIDSet, previousGTIDsPos.GTIDSet, purgedGTIDSet, prevGTIDsUnion, prevGTIDsUnionPurged) } // We begin with the previous binary log, and we ignore the last binary log, because it's still open and being written to. binaryLogsToBackup = binaryLogs[i-1 : len(binaryLogs)-1] @@ -130,7 +139,7 @@ func ChooseBinlogsForIncrementalBackup( // IsValidIncrementalBakcup determines whether the given manifest can be used to extend a backup // based on baseGTIDSet. The manifest must be able to pick up from baseGTIDSet, and must extend it by at least // one entry. -func IsValidIncrementalBakcup(baseGTIDSet mysql.GTIDSet, purgedGTIDSet mysql.GTIDSet, manifest *BackupManifest) bool { +func IsValidIncrementalBakcup(baseGTIDSet replication.GTIDSet, purgedGTIDSet replication.GTIDSet, manifest *BackupManifest) bool { if manifest == nil { return false } @@ -159,7 +168,7 @@ func IsValidIncrementalBakcup(baseGTIDSet mysql.GTIDSet, purgedGTIDSet mysql.GTI // - zero or more incremental backups // The path ends with restoreToGTIDSet or goes beyond it. No shorter path will do the same. // The function returns an error when a path cannot be found. -func FindPITRPath(restoreToGTIDSet mysql.GTIDSet, manifests [](*BackupManifest)) (shortestPath [](*BackupManifest), err error) { +func FindPITRPath(restoreToGTIDSet replication.GTIDSet, manifests [](*BackupManifest)) (shortestPath [](*BackupManifest), err error) { sortedManifests := make([](*BackupManifest), 0, len(manifests)) for _, m := range manifests { if m != nil { @@ -199,8 +208,8 @@ func FindPITRPath(restoreToGTIDSet mysql.GTIDSet, manifests [](*BackupManifest)) var validRestorePaths []BackupManifestPath // recursive function that searches for all possible paths: - var findPaths func(baseGTIDSet mysql.GTIDSet, pathManifests []*BackupManifest, remainingManifests []*BackupManifest) - findPaths = func(baseGTIDSet mysql.GTIDSet, pathManifests []*BackupManifest, remainingManifests []*BackupManifest) { + var findPaths func(baseGTIDSet replication.GTIDSet, pathManifests []*BackupManifest, remainingManifests []*BackupManifest) + findPaths = func(baseGTIDSet replication.GTIDSet, pathManifests []*BackupManifest, remainingManifests []*BackupManifest) { // The algorithm was first designed to find all possible paths. But then we recognized that it will be // doing excessive work. At this time we choose to end the search once we find the first valid path, even if // it's not the most optimal. The next "if" statement is the addition to the algorithm, where we suffice with @@ -244,3 +253,148 @@ func FindPITRPath(restoreToGTIDSet mysql.GTIDSet, manifests [](*BackupManifest)) } return shortestPath, nil } + +// FindPITRToTimePath evaluates the shortest path to recover a restoreToGTIDSet. The past is composed of: +// - a full backup, followed by: +// - zero or more incremental backups +// The path ends with restoreToGTIDSet or goes beyond it. No shorter path will do the same. +// The function returns an error when a path cannot be found. +func FindPITRToTimePath(restoreToTime time.Time, manifests [](*BackupManifest)) (shortestPath [](*BackupManifest), err error) { + restoreToTimeStr := FormatRFC3339(restoreToTime) + sortedManifests := make([](*BackupManifest), 0, len(manifests)) + for _, m := range manifests { + if m != nil { + sortedManifests = append(sortedManifests, m) + } + } + sort.SliceStable(sortedManifests, func(i, j int) bool { + return sortedManifests[j].Position.GTIDSet.Union(sortedManifests[i].PurgedPosition.GTIDSet).Contains(sortedManifests[i].Position.GTIDSet) + }) + mostRelevantFullBackupIndex := -1 // an invalid value + for i, manifest := range sortedManifests { + if manifest.Incremental { + continue + } + startTime, err := ParseRFC3339(manifest.BackupTime) + if err != nil { + return nil, vterrors.Wrapf(err, "parsing manifest BackupTime %s", manifest.BackupTime) + } + finishedTime, err := ParseRFC3339(manifest.FinishedTime) + if err != nil { + return nil, vterrors.Wrapf(err, "parsing manifest FinishedTime %s", manifest.FinishedTime) + } + var compareWithTime time.Time + switch manifest.BackupMethod { + case xtrabackupEngineName: + // Xtrabackup backups are true to the time they complete (the snapshot is taken at the very end). + // Therefore the finish time best represents the backup time. + compareWithTime = finishedTime + case builtinBackupEngineName: + // Builtin takes down the MySQL server. Hence the _start time_ represents the backup time best + compareWithTime = startTime + default: + compareWithTime = startTime + } + if restoreToTime.Before(compareWithTime) { + // We want a bfull backup whose time is _before_ restore-to-time, and we will top it with + // inremental restore via binlogs. + continue + } + mostRelevantFullBackupIndex = i + } + + if mostRelevantFullBackupIndex < 0 { + // No full backup prior to desired restore point... + return nil, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "no full backup found before timestmap %v", restoreToTimeStr) + } + // All that interests us starts with mostRelevantFullBackupIndex: that's where the full backup is, + // and any relevant incremental backups follow that point (because manifests are sorted by backup pos, ascending) + sortedManifests = sortedManifests[mostRelevantFullBackupIndex:] + // Of all relevant backups, we take the most recent one. + fullBackup := sortedManifests[0] + purgedGTIDSet := fullBackup.PurgedPosition.GTIDSet + + timeIsInRange := func(t, from, to time.Time) bool { + // integrity: + if to.Before(from) { + return false // bad input + } + if t.Before(from) { + return false + } + if t.After(to) { + return false + } + return true + } + + var validRestorePaths []BackupManifestPath + // recursive function that searches for all possible paths: + var findPaths func(baseGTIDSet replication.GTIDSet, pathManifests []*BackupManifest, remainingManifests []*BackupManifest) error + findPaths = func(baseGTIDSet replication.GTIDSet, pathManifests []*BackupManifest, remainingManifests []*BackupManifest) error { + // The algorithm was first designed to find all possible paths. But then we recognized that it will be + // doing excessive work. At this time we choose to end the search once we find the first valid path, even if + // it's not the most optimal. The next "if" statement is the addition to the algorithm, where we suffice with + // a single result. + if len(validRestorePaths) > 0 { + return nil + } + // remove the above if you wish to explore all paths. + lastManifest := pathManifests[len(pathManifests)-1] + if lastManifest.Incremental { + lastManifestIncrementalDetails := lastManifest.IncrementalDetails + + firstTimestamp, err := ParseRFC3339(lastManifestIncrementalDetails.FirstTimestamp) + if err != nil { + return err + } + if restoreToTime.Before(firstTimestamp) { + // the restore-to-time falls between previous manifest's timestamp (whether previous manifest is a + // full backup or incremental backup is not important), and this manifest's first-timestamp. + // This means the previous manifest is the end of a valid restore path. We couldn't know it back then. + validRestorePaths = append(validRestorePaths, pathManifests[0:len(pathManifests)-1]) + return nil + } + lastTimestamp, err := ParseRFC3339(lastManifestIncrementalDetails.LastTimestamp) + if err != nil { + return err + } + if timeIsInRange(restoreToTime, firstTimestamp, lastTimestamp) { + // successful end of path. Update list of successful paths + validRestorePaths = append(validRestorePaths, pathManifests) + return nil + } + } + if len(remainingManifests) == 0 { + // end of the road. No possibilities from here. + return nil + } + // if the next manifest is eligible to be part of the path, try it out + if IsValidIncrementalBakcup(baseGTIDSet, purgedGTIDSet, remainingManifests[0]) { + nextGTIDSet := baseGTIDSet.Union(remainingManifests[0].Position.GTIDSet) + findPaths(nextGTIDSet, append(pathManifests, remainingManifests[0]), remainingManifests[1:]) + } + // also, try without the next manifest + findPaths(baseGTIDSet, pathManifests, remainingManifests[1:]) + return nil + } + // find all paths, entry point + if err := findPaths(fullBackup.Position.GTIDSet, sortedManifests[0:1], sortedManifests[1:]); err != nil { + return nil, err + } + if len(validRestorePaths) == 0 { + return nil, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "no path found that leads to timestamp %v", restoreToTimeStr) + } + // Now find a shortest path + for i := range validRestorePaths { + path := validRestorePaths[i] + if shortestPath == nil { + shortestPath = path + continue + } + if len(path) < len(shortestPath) { + shortestPath = path + } + } + return shortestPath, nil +} diff --git a/go/vt/mysqlctl/binlogs_gtid_test.go b/go/vt/mysqlctl/binlogs_gtid_test.go index c39047d10ea..655208e908e 100644 --- a/go/vt/mysqlctl/binlogs_gtid_test.go +++ b/go/vt/mysqlctl/binlogs_gtid_test.go @@ -24,11 +24,12 @@ import ( "context" "fmt" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" ) func TestChooseBinlogsForIncrementalBackup(t *testing.T) { @@ -111,6 +112,46 @@ func TestChooseBinlogsForIncrementalBackup(t *testing.T) { backupPos: "16b1039f-0000-0000-0000-000000000000:1-63", expectError: "Mismatching GTID entries", }, + { + name: "empty previous GTIDs in first binlog with gap, with good backup pos", + previousGTIDs: map[string]string{ + "vt-bin.000001": "", + "vt-bin.000002": "16b1039f-22b6-11ed-b765-0a43f95f28a3:40-60", + "vt-bin.000003": "16b1039f-22b6-11ed-b765-0a43f95f28a3:40-60", + "vt-bin.000004": "16b1039f-22b6-11ed-b765-0a43f95f28a3:40-78", + "vt-bin.000005": "16b1039f-22b6-11ed-b765-0a43f95f28a3:40-243", + "vt-bin.000006": "16b1039f-22b6-11ed-b765-0a43f95f28a3:40-331", + }, + backupPos: "16b1039f-22b6-11ed-b765-0a43f95f28a3:40-78", + expectBinlogs: []string{"vt-bin.000004", "vt-bin.000005"}, + }, + { + name: "empty previous GTIDs in first binlog with gap, and without gtid_purged", + previousGTIDs: map[string]string{ + "vt-bin.000001": "", + "vt-bin.000002": "16b1039f-22b6-11ed-b765-0a43f95f28a3:40-60", + "vt-bin.000003": "16b1039f-22b6-11ed-b765-0a43f95f28a3:40-60", + "vt-bin.000004": "16b1039f-22b6-11ed-b765-0a43f95f28a3:40-78", + "vt-bin.000005": "16b1039f-22b6-11ed-b765-0a43f95f28a3:40-243", + "vt-bin.000006": "16b1039f-22b6-11ed-b765-0a43f95f28a3:40-331", + }, + backupPos: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-78", + expectError: "Mismatching GTID entries", + }, + { + name: "empty previous GTIDs in first binlog but with proper gtid_purged", + previousGTIDs: map[string]string{ + "vt-bin.000001": "", + "vt-bin.000002": "16b1039f-22b6-11ed-b765-0a43f95f28a3:40-60", + "vt-bin.000003": "16b1039f-22b6-11ed-b765-0a43f95f28a3:40-60", + "vt-bin.000004": "16b1039f-22b6-11ed-b765-0a43f95f28a3:40-78", + "vt-bin.000005": "16b1039f-22b6-11ed-b765-0a43f95f28a3:40-243", + "vt-bin.000006": "16b1039f-22b6-11ed-b765-0a43f95f28a3:40-331", + }, + backupPos: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-78", + gtidPurged: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-40", + expectBinlogs: []string{"vt-bin.000004", "vt-bin.000005"}, + }, { name: "empty previous GTIDs in first binlog covering backup pos", previousGTIDs: map[string]string{ @@ -230,9 +271,9 @@ func TestChooseBinlogsForIncrementalBackup(t *testing.T) { } for _, tc := range tt { t.Run(tc.name, func(t *testing.T) { - backupPos, err := mysql.ParsePosition(mysql.Mysql56FlavorID, tc.backupPos) + backupPos, err := replication.ParsePosition(replication.Mysql56FlavorID, tc.backupPos) require.NoError(t, err) - gtidPurged, err := mysql.ParsePosition(mysql.Mysql56FlavorID, tc.gtidPurged) + gtidPurged, err := replication.ParsePosition(replication.Mysql56FlavorID, tc.gtidPurged) require.NoError(t, err) binlogsToBackup, fromGTID, toGTID, err := ChooseBinlogsForIncrementalBackup( context.Background(), @@ -267,8 +308,8 @@ func TestChooseBinlogsForIncrementalBackup(t *testing.T) { func TestIsValidIncrementalBakcup(t *testing.T) { incrementalManifest := func(backupPos string, backupFromPos string) *BackupManifest { return &BackupManifest{ - Position: mysql.MustParsePosition(mysql.Mysql56FlavorID, fmt.Sprintf("16b1039f-22b6-11ed-b765-0a43f95f28a3:%s", backupPos)), - FromPosition: mysql.MustParsePosition(mysql.Mysql56FlavorID, fmt.Sprintf("16b1039f-22b6-11ed-b765-0a43f95f28a3:%s", backupFromPos)), + Position: replication.MustParsePosition(replication.Mysql56FlavorID, fmt.Sprintf("16b1039f-22b6-11ed-b765-0a43f95f28a3:%s", backupPos)), + FromPosition: replication.MustParsePosition(replication.Mysql56FlavorID, fmt.Sprintf("16b1039f-22b6-11ed-b765-0a43f95f28a3:%s", backupFromPos)), Incremental: true, } } @@ -344,9 +385,9 @@ func TestIsValidIncrementalBakcup(t *testing.T) { } for i, tc := range tt { t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { - basePos, err := mysql.ParsePosition(mysql.Mysql56FlavorID, tc.baseGTID) + basePos, err := replication.ParsePosition(replication.Mysql56FlavorID, tc.baseGTID) require.NoError(t, err) - purgedPos, err := mysql.ParsePosition(mysql.Mysql56FlavorID, tc.purgedGTID) + purgedPos, err := replication.ParsePosition(replication.Mysql56FlavorID, tc.purgedGTID) require.NoError(t, err) isValid := IsValidIncrementalBakcup(basePos.GTIDSet, purgedPos.GTIDSet, incrementalManifest(tc.backupPos, tc.backupFromPos)) assert.Equal(t, tc.expectIsValid, isValid) @@ -355,8 +396,8 @@ func TestIsValidIncrementalBakcup(t *testing.T) { } func TestFindPITRPath(t *testing.T) { - generatePosition := func(posRange string) mysql.Position { - return mysql.MustParsePosition(mysql.Mysql56FlavorID, fmt.Sprintf("16b1039f-22b6-11ed-b765-0a43f95f28a3:%s", posRange)) + generatePosition := func(posRange string) replication.Position { + return replication.MustParsePosition(replication.Mysql56FlavorID, fmt.Sprintf("16b1039f-22b6-11ed-b765-0a43f95f28a3:%s", posRange)) } fullManifest := func(backupPos string) *BackupManifest { return &BackupManifest{ @@ -546,17 +587,17 @@ func TestFindPITRPath(t *testing.T) { for i := range fullBackups { var err error fullBackup := fullBackups[i] - fullBackup.PurgedPosition, err = mysql.ParsePosition(mysql.Mysql56FlavorID, tc.purgedGTID) + fullBackup.PurgedPosition, err = replication.ParsePosition(replication.Mysql56FlavorID, tc.purgedGTID) require.NoError(t, err) defer func() { - fullBackup.PurgedPosition = mysql.Position{} + fullBackup.PurgedPosition = replication.Position{} }() } var manifests []*BackupManifest manifests = append(manifests, fullBackups...) manifests = append(manifests, tc.incrementalBackups...) - restorePos, err := mysql.ParsePosition(mysql.Mysql56FlavorID, tc.restoreGTID) + restorePos, err := replication.ParsePosition(replication.Mysql56FlavorID, tc.restoreGTID) require.NoErrorf(t, err, "%v", err) path, err := FindPITRPath(restorePos.GTIDSet, manifests) if tc.expectError != "" { @@ -582,3 +623,297 @@ func TestFindPITRPath(t *testing.T) { }) } } + +func TestFindPITRToTimePath(t *testing.T) { + generatePosition := func(posRange string) replication.Position { + return replication.MustParsePosition(replication.Mysql56FlavorID, fmt.Sprintf("16b1039f-22b6-11ed-b765-0a43f95f28a3:%s", posRange)) + } + fullManifest := func(backupPos string, timeStr string) *BackupManifest { + _, err := ParseRFC3339(timeStr) + require.NoError(t, err) + return &BackupManifest{ + BackupMethod: builtinBackupEngineName, + Position: generatePosition(backupPos), + BackupTime: timeStr, + FinishedTime: timeStr, + } + } + incrementalManifest := func(backupPos string, backupFromPos string, firstTimestampStr string, lastTimestampStr string) *BackupManifest { + firstTimestamp, err := ParseRFC3339(firstTimestampStr) + require.NoError(t, err) + lastTimestamp, err := ParseRFC3339(lastTimestampStr) + require.NoError(t, err) + + return &BackupManifest{ + Position: generatePosition(backupPos), + FromPosition: generatePosition(backupFromPos), + Incremental: true, + IncrementalDetails: &IncrementalBackupDetails{ + FirstTimestamp: FormatRFC3339(firstTimestamp), + LastTimestamp: FormatRFC3339(lastTimestamp), + }, + } + } + + fullManifests := map[string]*BackupManifest{ + "1-50": fullManifest("1-50", "2020-02-02T02:20:20.000000Z"), + "1-5": fullManifest("1-5", "2020-02-02T02:01:20.000000Z"), + "1-80": fullManifest("1-80", "2020-02-02T03:31:00.000000Z"), + "1-70": fullManifest("1-70", "2020-02-02T03:10:01.000000Z"), + "1-70b": fullManifest("1-70", "2020-02-02T03:10:11.000000Z"), + } + fullBackups := []*BackupManifest{ + fullManifests["1-50"], + fullManifests["1-5"], + fullManifests["1-80"], + fullManifests["1-70"], + fullManifests["1-70b"], + } + incrementalManifests := map[string]*BackupManifest{ + "1-34:1-5": incrementalManifest("1-34", "1-5", "2020-02-02T02:01:44.000000Z", "2020-02-02T02:17:00.000000Z"), + "1-38:1-34": incrementalManifest("1-38", "1-34", "2020-02-02T02:17:05.000000Z", "2020-02-02T02:18:00.000000Z"), + "1-52:1-35": incrementalManifest("1-52", "1-35", "2020-02-02T02:17:59.000000Z", "2020-02-02T02:22:00.000000Z"), + "1-60:1-50": incrementalManifest("1-60", "1-50", "2020-02-02T02:20:21.000000Z", "2020-02-02T02:47:20.000000Z"), + "1-70:1-60": incrementalManifest("1-70", "1-60", "2020-02-02T02:47:20.000000Z", "2020-02-02T03:10:00.700000Z"), + "1-82:1-70": incrementalManifest("1-82", "1-70", "2020-02-02T03:10:11.000000Z", "2020-02-02T03:39:09.000000Z"), + "1-92:1-79": incrementalManifest("1-92", "1-79", "2020-02-02T03:37:07.000000Z", "2020-02-02T04:04:04.000000Z"), + "1-95:1-89": incrementalManifest("1-95", "1-89", "2020-02-02T03:59:05.000000Z", "2020-02-02T04:15:00.000000Z"), + } + incrementalBackups := []*BackupManifest{ + incrementalManifests["1-34:1-5"], + incrementalManifests["1-38:1-34"], + incrementalManifests["1-52:1-35"], + incrementalManifests["1-60:1-50"], + incrementalManifests["1-70:1-60"], + incrementalManifests["1-82:1-70"], + incrementalManifests["1-92:1-79"], + incrementalManifests["1-95:1-89"], + } + incrementalBackupName := func(manifest *BackupManifest) string { + for k, v := range incrementalManifests { + if v == manifest { + return k + } + } + return "unknown" + } + tt := []struct { + name string + restoreToTimestamp string + purgedGTID string + incrementalBackups []*BackupManifest + expectFullManifest *BackupManifest + expectIncrementalManifests []*BackupManifest + expectError string + }{ + { + name: "full is enough", + restoreToTimestamp: "2020-02-02T02:01:20.000000Z", + expectFullManifest: fullManifests["1-5"], + expectIncrementalManifests: []*BackupManifest{}, + }, + { + name: "full is still enough", + restoreToTimestamp: "2020-02-02T02:01:41.000000Z", + expectFullManifest: fullManifests["1-5"], + expectIncrementalManifests: []*BackupManifest{}, + }, + { + name: "full is just not enough", + restoreToTimestamp: "2020-02-02T02:01:44.000000Z", + expectFullManifest: fullManifests["1-5"], + expectIncrementalManifests: []*BackupManifest{ + incrementalManifests["1-34:1-5"], + }, + }, + { + name: "just one", + restoreToTimestamp: "2020-02-02T02:20:21.000000Z", + expectFullManifest: fullManifests["1-50"], + expectIncrementalManifests: []*BackupManifest{ + incrementalManifests["1-52:1-35"], + }, + }, + { + name: "two", + restoreToTimestamp: "2020-02-02T02:23:23.000000Z", + expectFullManifest: fullManifests["1-50"], + expectIncrementalManifests: []*BackupManifest{ + incrementalManifests["1-52:1-35"], + incrementalManifests["1-60:1-50"], + }, + }, + { + name: "three", + restoreToTimestamp: "2020-02-02T02:55:55.000000Z", + expectFullManifest: fullManifests["1-50"], + expectIncrementalManifests: []*BackupManifest{ + incrementalManifests["1-52:1-35"], + incrementalManifests["1-60:1-50"], + incrementalManifests["1-70:1-60"], + }, + }, + { + name: "still three", + restoreToTimestamp: "2020-02-02T03:10:00.600000Z", + expectFullManifest: fullManifests["1-50"], + expectIncrementalManifests: []*BackupManifest{ + incrementalManifests["1-52:1-35"], + incrementalManifests["1-60:1-50"], + incrementalManifests["1-70:1-60"], + }, + }, + { + name: "and still three", + restoreToTimestamp: "2020-02-02T03:10:00.700000Z", + expectFullManifest: fullManifests["1-50"], + expectIncrementalManifests: []*BackupManifest{ + incrementalManifests["1-52:1-35"], + incrementalManifests["1-60:1-50"], + incrementalManifests["1-70:1-60"], + }, + }, + { + name: "and still three, exceeding binlog last timestamp", + restoreToTimestamp: "2020-02-02T03:10:00.800000Z", + expectFullManifest: fullManifests["1-50"], + expectIncrementalManifests: []*BackupManifest{ + incrementalManifests["1-52:1-35"], + incrementalManifests["1-60:1-50"], + incrementalManifests["1-70:1-60"], + }, + }, + { + name: "next backup 1-70", + restoreToTimestamp: "2020-02-02T03:10:01.000000Z", + expectFullManifest: fullManifests["1-70"], + expectIncrementalManifests: []*BackupManifest{}, + }, + { + name: "next backup 1-70 with one binlog", + restoreToTimestamp: "2020-02-02T03:10:13.000000Z", + expectFullManifest: fullManifests["1-70"], + expectIncrementalManifests: []*BackupManifest{ + incrementalManifests["1-82:1-70"], + }, + }, + { + name: "next backup 1-70b, included first binlog", + restoreToTimestamp: "2020-02-02T03:10:11.000000Z", + expectFullManifest: fullManifests["1-70b"], + expectIncrementalManifests: []*BackupManifest{ + incrementalManifests["1-82:1-70"], + }, + }, + { + name: "next backup 1-70b, still included first binlog", + restoreToTimestamp: "2020-02-02T03:20:11.000000Z", + expectFullManifest: fullManifests["1-70b"], + expectIncrementalManifests: []*BackupManifest{ + incrementalManifests["1-82:1-70"], + }, + }, + { + name: "1-80 and two binlogs", + restoreToTimestamp: "2020-02-02T04:00:00.000000Z", + expectFullManifest: fullManifests["1-80"], + expectIncrementalManifests: []*BackupManifest{ + incrementalManifests["1-82:1-70"], + incrementalManifests["1-92:1-79"], + }, + }, + { + name: "1-80 and all remaining binlogs", + restoreToTimestamp: "2020-02-02T04:10:00.000000Z", + expectFullManifest: fullManifests["1-80"], + expectIncrementalManifests: []*BackupManifest{ + incrementalManifests["1-82:1-70"], + incrementalManifests["1-92:1-79"], + incrementalManifests["1-95:1-89"], + }, + }, + { + name: "no incremental backup reaches this timestamp", + restoreToTimestamp: "2020-02-02T07:07:07.000000Z", + expectError: "no path found", + }, + { + name: "sooner than any full backup", + restoreToTimestamp: "2020-02-02T01:59:59.000000Z", + expectError: "no full backup", + }, + } + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + if tc.incrementalBackups == nil { + tc.incrementalBackups = incrementalBackups + } + for i := range fullBackups { + var err error + fullBackup := fullBackups[i] + fullBackup.PurgedPosition, err = replication.ParsePosition(replication.Mysql56FlavorID, tc.purgedGTID) + require.NoError(t, err) + defer func() { + fullBackup.PurgedPosition = replication.Position{} + }() + } + var manifests []*BackupManifest + manifests = append(manifests, fullBackups...) + manifests = append(manifests, tc.incrementalBackups...) + + restoreToTime, err := ParseRFC3339(tc.restoreToTimestamp) + require.NoError(t, err) + require.False(t, restoreToTime.IsZero()) + + path, err := FindPITRToTimePath(restoreToTime, manifests) + if tc.expectError != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), tc.expectError) + return + } + require.NoError(t, err) + require.NotEmpty(t, path) + // the path always consists of one full backup and zero or more incremental backups + fullBackup := path[0] + require.False(t, fullBackup.Incremental) + for _, manifest := range path[1:] { + require.True(t, manifest.Incremental) + } + assert.Equal(t, tc.expectFullManifest.Position.GTIDSet, fullBackup.Position.GTIDSet) + if tc.expectIncrementalManifests == nil { + tc.expectIncrementalManifests = []*BackupManifest{} + } + expected := BackupManifestPath(tc.expectIncrementalManifests) + got := BackupManifestPath(path[1:]) + gotNames := []string{} + for _, manifest := range got { + gotNames = append(gotNames, incrementalBackupName(manifest)) + } + assert.Equal(t, expected, got, "got names: %v", gotNames) + }) + } + t.Run("iterate all valid timestamps", func(t *testing.T) { + var manifests []*BackupManifest + manifests = append(manifests, fullBackups...) + manifests = append(manifests, incrementalBackups...) + + firstTimestamp, err := ParseRFC3339(fullManifests["1-5"].BackupTime) + require.NoError(t, err) + lastTimestamp, err := ParseRFC3339(incrementalManifests["1-95:1-89"].IncrementalDetails.LastTimestamp) + require.NoError(t, err) + + for restoreToTime := firstTimestamp; !restoreToTime.After(lastTimestamp); restoreToTime = restoreToTime.Add(10 * time.Second) { + testName := fmt.Sprintf("restore to %v", restoreToTime) + t.Run(testName, func(t *testing.T) { + path, err := FindPITRToTimePath(restoreToTime, manifests) + require.NoError(t, err) + require.NotEmpty(t, path) + fullBackup := path[0] + require.False(t, fullBackup.Incremental) + for _, manifest := range path[1:] { + require.True(t, manifest.Incremental) + } + }) + } + }) +} diff --git a/go/vt/mysqlctl/builtinbackupengine.go b/go/vt/mysqlctl/builtinbackupengine.go index cf8c6a1a564..e46932bcd51 100644 --- a/go/vt/mysqlctl/builtinbackupengine.go +++ b/go/vt/mysqlctl/builtinbackupengine.go @@ -21,6 +21,7 @@ import ( "context" "encoding/hex" "encoding/json" + "errors" "fmt" "hash" "hash/crc32" @@ -37,18 +38,22 @@ import ( "vitess.io/vitess/go/ioutil" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" stats "vitess.io/vitess/go/vt/mysqlctl/backupstats" "vitess.io/vitess/go/vt/mysqlctl/backupstorage" - tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" - "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tmclient" + + mysqlctlpb "vitess.io/vitess/go/vt/proto/mysqlctl" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + "vitess.io/vitess/go/vt/proto/vtrpc" ) const ( @@ -209,6 +214,22 @@ func (be *BuiltinBackupEngine) ExecuteBackup(ctx context.Context, params BackupP return be.executeFullBackup(ctx, params, bh) } +// getIncrementalFromPosGTIDSet turns the given string into a valid Mysql56GTIDSet +func getIncrementalFromPosGTIDSet(incrementalFromPos string) (replication.Mysql56GTIDSet, error) { + pos, err := replication.DecodePositionDefaultFlavor(incrementalFromPos, replication.Mysql56FlavorID) + if err != nil { + return nil, vterrors.Wrapf(err, "cannot decode position in incremental backup: %v", incrementalFromPos) + } + if !pos.MatchesFlavor(replication.Mysql56FlavorID) { + return nil, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "incremental backup only supports MySQL GTID positions. Got: %v", incrementalFromPos) + } + ifPosGTIDSet, ok := pos.GTIDSet.(replication.Mysql56GTIDSet) + if !ok { + return nil, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "cannot get MySQL GTID value: %v", pos) + } + return ifPosGTIDSet, nil +} + // executeIncrementalBackup runs an incremental backup, based on given 'incremental_from_pos', which can be: // - A valid position // - "auto", indicating the incremental backup should begin with last successful backup end position. @@ -219,62 +240,38 @@ func (be *BuiltinBackupEngine) executeIncrementalBackup(ctx context.Context, par if err != nil { return false, vterrors.Wrap(err, "can't get server uuid") } - // @@gtid_purged - getPurgedGTIDSet := func() (mysql.Position, mysql.Mysql56GTIDSet, error) { - gtidPurged, err := params.Mysqld.GetGTIDPurged(ctx) - if err != nil { - return gtidPurged, nil, vterrors.Wrap(err, "can't get @@gtid_purged") - } - purgedGTIDSet, ok := gtidPurged.GTIDSet.(mysql.Mysql56GTIDSet) - if !ok { - return gtidPurged, nil, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "cannot get MySQL GTID purged value: %v", gtidPurged) - } - return gtidPurged, purgedGTIDSet, nil - } - gtidPurged, purgedGTIDSet, err := getPurgedGTIDSet() + mysqlVersion, err := params.Mysqld.GetVersionString(ctx) if err != nil { - return false, err + return false, vterrors.Wrap(err, "can't get MySQL version") } + var fromBackupName string if params.IncrementalFromPos == autoIncrementalFromPos { params.Logger.Infof("auto evaluating incremental_from_pos") - bs, err := backupstorage.GetBackupStorage() + backupName, pos, err := FindLatestSuccessfulBackupPosition(ctx, params, bh.Name()) if err != nil { return false, err } - defer bs.Close() - - // Backups are stored in a directory structure that starts with - // / - backupDir := GetBackupDir(params.Keyspace, params.Shard) - bhs, err := bs.ListBackups(ctx, backupDir) - if err != nil { - return false, vterrors.Wrap(err, "ListBackups failed") - } - _, manifest, err := FindLatestSuccessfulBackup(ctx, params.Logger, bhs) - if err != nil { - return false, vterrors.Wrap(err, "FindLatestSuccessfulBackup failed") - } - params.IncrementalFromPos = mysql.EncodePosition(manifest.Position) + fromBackupName = backupName + params.IncrementalFromPos = replication.EncodePosition(pos) params.Logger.Infof("auto evaluated incremental_from_pos: %s", params.IncrementalFromPos) } - // params.IncrementalFromPos is a string. We want to turn that into a MySQL GTID - getIncrementalFromPosGTIDSet := func() (mysql.Mysql56GTIDSet, error) { - pos, err := mysql.DecodePosition(params.IncrementalFromPos) + // @@gtid_purged + getPurgedGTIDSet := func() (replication.Position, replication.Mysql56GTIDSet, error) { + gtidPurged, err := params.Mysqld.GetGTIDPurged(ctx) if err != nil { - return nil, vterrors.Wrapf(err, "cannot decode position in incremental backup: %v", params.IncrementalFromPos) - } - if !pos.MatchesFlavor(mysql.Mysql56FlavorID) { - return nil, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "incremental backup only supports MySQL GTID positions. Got: %v", params.IncrementalFromPos) + return gtidPurged, nil, vterrors.Wrap(err, "can't get @@gtid_purged") } - ifPosGTIDSet, ok := pos.GTIDSet.(mysql.Mysql56GTIDSet) + purgedGTIDSet, ok := gtidPurged.GTIDSet.(replication.Mysql56GTIDSet) if !ok { - return nil, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "cannot get MySQL GTID value: %v", pos) + return gtidPurged, nil, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "cannot get MySQL GTID purged value: %v", gtidPurged) } - return ifPosGTIDSet, nil + return gtidPurged, purgedGTIDSet, nil } - backupFromGTIDSet, err := getIncrementalFromPosGTIDSet() + + // params.IncrementalFromPos is a string. We want to turn that into a MySQL GTID + backupFromGTIDSet, err := getIncrementalFromPosGTIDSet(params.IncrementalFromPos) if err != nil { return false, err } @@ -294,6 +291,13 @@ func (be *BuiltinBackupEngine) executeIncrementalBackup(ctx context.Context, par if err != nil { return false, vterrors.Wrapf(err, "cannot get binary logs in incremental backup") } + // gtid_purged is important information. The restore flow uses this info to to complement binary logs' Previous-GTIDs. + // It is important to only get gtid_purged _after_ we've rotated into the new binary log, because the `FLUSH BINARY LOGS` + // command may also purge old logs, hence affecting the value of gtid_purged. + gtidPurged, purgedGTIDSet, err := getPurgedGTIDSet() + if err != nil { + return false, err + } previousGTIDs := map[string]string{} getBinlogPreviousGTIDs := func(ctx context.Context, binlog string) (gtids string, err error) { gtids, ok := previousGTIDs[binlog] @@ -312,14 +316,45 @@ func (be *BuiltinBackupEngine) executeIncrementalBackup(ctx context.Context, par if err != nil { return false, vterrors.Wrapf(err, "cannot get binary logs to backup in incremental backup") } - incrementalBackupFromPosition, err := mysql.ParsePosition(mysql.Mysql56FlavorID, incrementalBackupFromGTID) + incrementalBackupFromPosition, err := replication.ParsePosition(replication.Mysql56FlavorID, incrementalBackupFromGTID) if err != nil { return false, vterrors.Wrapf(err, "cannot parse position %v", incrementalBackupFromGTID) } - incrementalBackupToPosition, err := mysql.ParsePosition(mysql.Mysql56FlavorID, incrementalBackupToGTID) + incrementalBackupToPosition, err := replication.ParsePosition(replication.Mysql56FlavorID, incrementalBackupToGTID) if err != nil { return false, vterrors.Wrapf(err, "cannot parse position %v", incrementalBackupToGTID) } + // The backup position is the GTISset of the last binary log (taken from Previous-GTIDs of the one-next binary log), and we + // also include gtid_purged ; this complies with the "standard" way MySQL "thinks" about GTIDs: there's gtid_executed, which includes + // everything that's ever been applied, and a subset of that is gtid_purged, which are the event no longer available in binary logs. + // When we consider Vitess incremental backups, what's important for us is "what's the GTIDSet that's true when this backup was taken, + // and which will be true when we restore this backup". The answer to this is the GTIDSet that includes the purged GTIDs. + // It's also nice for incremental backups that are taken on _other_ tablets, so that they don't need to understand what exactly was purged + // on _this_ tablet. They don't care, all they want to know is "what GTIDSet can we get from this". + incrementalBackupToPosition.GTIDSet = incrementalBackupToPosition.GTIDSet.Union(gtidPurged.GTIDSet) + req := &mysqlctlpb.ReadBinlogFilesTimestampsRequest{} + for _, binlogFile := range binaryLogsToBackup { + fe := FileEntry{Base: backupBinlogDir, Name: binlogFile} + fullPath, err := fe.fullPath(params.Cnf) + if err != nil { + return false, err + } + req.BinlogFileNames = append(req.BinlogFileNames, fullPath) + } + resp, err := params.Mysqld.ReadBinlogFilesTimestamps(ctx, req) + if err != nil { + return false, vterrors.Wrapf(err, "reading timestamps from binlog files %v", binaryLogsToBackup) + } + if resp.FirstTimestampBinlog == "" || resp.LastTimestampBinlog == "" { + return false, vterrors.Errorf(vtrpc.Code_ABORTED, "empty binlog name in response. Request=%v, Response=%v", req, resp) + } + log.Infof("ReadBinlogFilesTimestampsResponse: %+v", resp) + incrDetails := &IncrementalBackupDetails{ + FirstTimestamp: FormatRFC3339(protoutil.TimeFromProto(resp.FirstTimestamp).UTC()), + FirstTimestampBinlog: filepath.Base(resp.FirstTimestampBinlog), + LastTimestamp: FormatRFC3339(protoutil.TimeFromProto(resp.LastTimestamp).UTC()), + LastTimestampBinlog: filepath.Base(resp.LastTimestampBinlog), + } // It's worthwhile we explain the difference between params.IncrementalFromPos and incrementalBackupFromPosition. // params.IncrementalFromPos is supplied by the user. They want an incremental backup that covers that position. // However, we implement incremental backups by copying complete binlog files. That position could potentially @@ -329,7 +364,7 @@ func (be *BuiltinBackupEngine) executeIncrementalBackup(ctx context.Context, par // incrementalBackupFromGTID is the "previous GTIDs" of the first binlog file we back up. // It is a fact that incrementalBackupFromGTID is earlier or equal to params.IncrementalFromPos. // In the backup manifest file, we document incrementalBackupFromGTID, not the user's requested position. - if err := be.backupFiles(ctx, params, bh, incrementalBackupToPosition, gtidPurged, incrementalBackupFromPosition, binaryLogsToBackup, serverUUID); err != nil { + if err := be.backupFiles(ctx, params, bh, incrementalBackupToPosition, gtidPurged, incrementalBackupFromPosition, fromBackupName, binaryLogsToBackup, serverUUID, mysqlVersion, incrDetails); err != nil { return false, err } return true, nil @@ -348,7 +383,7 @@ func (be *BuiltinBackupEngine) executeFullBackup(ctx context.Context, params Bac sourceIsPrimary := false superReadOnly := true //nolint readOnly := true //nolint - var replicationPosition mysql.Position + var replicationPosition replication.Position semiSyncSource, semiSyncReplica := params.Mysqld.SemiSyncEnabled() // See if we need to restart replication after backup. @@ -419,6 +454,18 @@ func (be *BuiltinBackupEngine) executeFullBackup(ctx context.Context, params Bac return false, vterrors.Wrap(err, "can't get server uuid") } + mysqlVersion, err := params.Mysqld.GetVersionString(ctx) + if err != nil { + return false, vterrors.Wrap(err, "can't get MySQL version") + } + + // check if we need to set innodb_fast_shutdown=0 for a backup safe for upgrades + if params.UpgradeSafe { + if _, err := params.Mysqld.FetchSuperQuery(ctx, "SET GLOBAL innodb_fast_shutdown=0"); err != nil { + return false, vterrors.Wrapf(err, "failed to disable fast shutdown") + } + } + // shutdown mysqld shutdownCtx, cancel := context.WithTimeout(ctx, BuiltinBackupMysqldTimeout) err = params.Mysqld.Shutdown(shutdownCtx, params.Cnf, true) @@ -428,7 +475,7 @@ func (be *BuiltinBackupEngine) executeFullBackup(ctx context.Context, params Bac } // Backup everything, capture the error. - backupErr := be.backupFiles(ctx, params, bh, replicationPosition, gtidPurgedPosition, mysql.Position{}, nil, serverUUID) + backupErr := be.backupFiles(ctx, params, bh, replicationPosition, gtidPurgedPosition, replication.Position{}, "", nil, serverUUID, mysqlVersion, nil) usable := backupErr == nil // Try to restart mysqld, use background context in case we timed out the original context @@ -508,11 +555,14 @@ func (be *BuiltinBackupEngine) backupFiles( ctx context.Context, params BackupParams, bh backupstorage.BackupHandle, - replicationPosition mysql.Position, - purgedPosition mysql.Position, - fromPosition mysql.Position, + backupPosition replication.Position, + purgedPosition replication.Position, + fromPosition replication.Position, + fromBackupName string, binlogFiles []string, serverUUID string, + mysqlVersion string, + incrDetails *IncrementalBackupDetails, ) (finalErr error) { // Get the files to backup. // We don't care about totalSize because we add each file separately. @@ -587,7 +637,8 @@ func (be *BuiltinBackupEngine) backupFiles( return vterrors.Wrapf(err, "cannot add %v to backup", backupManifestFileName) } defer func() { - if closeErr := wc.Close(); finalErr == nil { + closeErr := wc.Close() + if finalErr == nil { finalErr = closeErr } }() @@ -596,17 +647,21 @@ func (be *BuiltinBackupEngine) backupFiles( bm := &builtinBackupManifest{ // Common base fields BackupManifest: BackupManifest{ - BackupMethod: builtinBackupEngineName, - Position: replicationPosition, - PurgedPosition: purgedPosition, - FromPosition: fromPosition, - Incremental: !fromPosition.IsZero(), - ServerUUID: serverUUID, - TabletAlias: params.TabletAlias, - Keyspace: params.Keyspace, - Shard: params.Shard, - BackupTime: params.BackupTime.UTC().Format(time.RFC3339), - FinishedTime: time.Now().UTC().Format(time.RFC3339), + BackupMethod: builtinBackupEngineName, + Position: backupPosition, + PurgedPosition: purgedPosition, + FromPosition: fromPosition, + FromBackup: fromBackupName, + Incremental: !fromPosition.IsZero(), + ServerUUID: serverUUID, + TabletAlias: params.TabletAlias, + Keyspace: params.Keyspace, + Shard: params.Shard, + BackupTime: params.BackupTime.UTC().Format(time.RFC3339), + FinishedTime: time.Now().UTC().Format(time.RFC3339), + MySQLVersion: mysqlVersion, + UpgradeSafe: params.UpgradeSafe, + IncrementalDetails: incrDetails, }, // Builtin-specific fields @@ -711,6 +766,8 @@ func (bp *backupPipe) ReportProgress(period time.Duration, logger logutil.Logger // backupFile backs up an individual file. func (be *BuiltinBackupEngine) backupFile(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle, fe *FileEntry, name string) (finalErr error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() // Open the source file for reading. openSourceAt := time.Now() source, err := fe.open(params.Cnf, true) @@ -748,12 +805,9 @@ func (be *BuiltinBackupEngine) backupFile(ctx context.Context, params BackupPara defer func(name, fileName string) { closeDestAt := time.Now() if rerr := dest.Close(); rerr != nil { - if finalErr != nil { - // We already have an error, just log this one. - params.Logger.Errorf2(rerr, "failed to close file %v,%v", name, fe.Name) - } else { - finalErr = rerr - } + rerr = vterrors.Wrapf(rerr, "failed to close file %v,%v", name, fe.Name) + params.Logger.Error(rerr) + finalErr = errors.Join(finalErr, rerr) } params.Stats.Scope(stats.Operation("Destination:Close")).TimedIncrement(time.Since(closeDestAt)) }(name, fe.Name) @@ -763,43 +817,57 @@ func (be *BuiltinBackupEngine) backupFile(ctx context.Context, params BackupPara bw := newBackupWriter(fe.Name, builtinBackupStorageWriteBufferSize, fi.Size(), timedDest) - var reader io.Reader = br - var writer io.Writer = bw + // We create the following inner function because: + // - we must `defer` the compressor's Close() function + // - but it must take place before we close the pipe reader&writer + createAndCopy := func() (createAndCopyErr error) { + var reader io.Reader = br + var writer io.Writer = bw + + // Create the gzip compression pipe, if necessary. + if backupStorageCompress { + var compressor io.WriteCloser + if ExternalCompressorCmd != "" { + compressor, err = newExternalCompressor(ctx, ExternalCompressorCmd, writer, params.Logger) + } else { + compressor, err = newBuiltinCompressor(CompressionEngineName, writer, params.Logger) + } + if err != nil { + return vterrors.Wrap(err, "can't create compressor") + } - // Create the gzip compression pipe, if necessary. - var compressor io.WriteCloser - if backupStorageCompress { - if ExternalCompressorCmd != "" { - compressor, err = newExternalCompressor(ctx, ExternalCompressorCmd, writer, params.Logger) - } else { - compressor, err = newBuiltinCompressor(CompressionEngineName, writer, params.Logger) - } - if err != nil { - return vterrors.Wrap(err, "can't create compressor") - } + compressStats := params.Stats.Scope(stats.Operation("Compressor:Write")) + writer = ioutil.NewMeteredWriter(compressor, compressStats.TimedIncrementBytes) - compressStats := params.Stats.Scope(stats.Operation("Compressor:Write")) - writer = ioutil.NewMeteredWriter(compressor, compressStats.TimedIncrementBytes) - } + closer := ioutil.NewTimeoutCloser(ctx, compressor, closeTimeout) + defer func() { + // Close gzip to flush it, after that all data is sent to writer. + closeCompressorAt := time.Now() + params.Logger.Infof("closing compressor") + if cerr := closer.Close(); err != nil { + cerr = vterrors.Wrapf(cerr, "failed to close compressor %v", name) + params.Logger.Error(cerr) + createAndCopyErr = errors.Join(createAndCopyErr, cerr) + } + params.Stats.Scope(stats.Operation("Compressor:Close")).TimedIncrement(time.Since(closeCompressorAt)) + }() + } - if builtinBackupFileReadBufferSize > 0 { - reader = bufio.NewReaderSize(br, int(builtinBackupFileReadBufferSize)) - } + if builtinBackupFileReadBufferSize > 0 { + reader = bufio.NewReaderSize(br, int(builtinBackupFileReadBufferSize)) + } - // Copy from the source file to writer (optional gzip, - // optional pipe, tee, output file and hasher). - _, err = io.Copy(writer, reader) - if err != nil { - return vterrors.Wrap(err, "cannot copy data") + // Copy from the source file to writer (optional gzip, + // optional pipe, tee, output file and hasher). + _, err = io.Copy(writer, reader) + if err != nil { + return vterrors.Wrap(err, "cannot copy data") + } + return nil } - // Close gzip to flush it, after that all data is sent to writer. - if compressor != nil { - closeCompressorAt := time.Now() - if err = compressor.Close(); err != nil { - return vterrors.Wrap(err, "cannot close compressor") - } - params.Stats.Scope(stats.Operation("Compressor:Close")).TimedIncrement(time.Since(closeCompressorAt)) + if err := createAndCopy(); err != nil { + return err } // Close the backupPipe to finish writing on destination. @@ -849,7 +917,14 @@ func (be *BuiltinBackupEngine) executeRestoreIncrementalBackup(ctx context.Conte if err != nil { return vterrors.Wrap(err, "failed to restore file") } - if err := mysqld.ApplyBinlogFile(ctx, binlogFile, params.RestoreToPos); err != nil { + req := &mysqlctlpb.ApplyBinlogFileRequest{ + BinlogFileName: binlogFile, + BinlogRestoreDatetime: protoutil.TimeToProto(params.RestoreToTimestamp), + } + if params.RestoreToPos.GTIDSet != nil { + req.BinlogRestorePosition = params.RestoreToPos.GTIDSet.String() + } + if err := mysqld.ApplyBinlogFile(ctx, req); err != nil { return vterrors.Wrapf(err, "failed to apply binlog file %v", binlogFile) } defer os.Remove(binlogFile) @@ -961,6 +1036,8 @@ func (be *BuiltinBackupEngine) restoreFiles(ctx context.Context, params RestoreP // restoreFile restores an individual file. func (be *BuiltinBackupEngine) restoreFile(ctx context.Context, params RestoreParams, bh backupstorage.BackupHandle, fe *FileEntry, bm builtinBackupManifest, name string) (finalErr error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() // Open the source file for reading. openSourceAt := time.Now() source, err := bh.ReadFile(ctx, name) @@ -993,12 +1070,7 @@ func (be *BuiltinBackupEngine) restoreFile(ctx context.Context, params RestorePa defer func() { closeDestAt := time.Now() if cerr := dest.Close(); cerr != nil { - if finalErr != nil { - // We already have an error, just log this one. - log.Errorf("failed to close file %v: %v", name, cerr) - } else { - finalErr = vterrors.Wrap(cerr, "failed to close destination file") - } + finalErr = errors.Join(finalErr, vterrors.Wrap(cerr, "failed to close destination file")) } params.Stats.Scope(stats.Operation("Destination:Close")).TimedIncrement(time.Since(closeDestAt)) }() @@ -1037,27 +1109,25 @@ func (be *BuiltinBackupEngine) restoreFile(ctx context.Context, params RestorePa if err != nil { return vterrors.Wrap(err, "can't create decompressor") } + closer := ioutil.NewTimeoutCloser(ctx, decompressor, closeTimeout) decompressStats := params.Stats.Scope(stats.Operation("Decompressor:Read")) reader = ioutil.NewMeteredReader(decompressor, decompressStats.TimedIncrementBytes) defer func() { closeDecompressorAt := time.Now() - if cerr := decompressor.Close(); cerr != nil { - params.Logger.Errorf("failed to close decompressor: %v", cerr) - if finalErr != nil { - // We already have an error, just log this one. - log.Errorf("failed to close decompressor %v: %v", name, cerr) - } else { - finalErr = vterrors.Wrap(cerr, "failed to close decompressor") - } + params.Logger.Infof("closing decompressor") + if cerr := closer.Close(); err != nil { + cerr = vterrors.Wrapf(cerr, "failed to close decompressor %v", name) + params.Logger.Error(cerr) + finalErr = errors.Join(finalErr, cerr) } params.Stats.Scope(stats.Operation("Decompressor:Close")).TimedIncrement(time.Since(closeDecompressorAt)) }() } // Copy the data. Will also write to the hasher. - if _, err = io.Copy(bufferedDest, reader); err != nil { + if _, err := io.Copy(bufferedDest, reader); err != nil { return vterrors.Wrap(err, "failed to copy file contents") } @@ -1089,25 +1159,25 @@ func (be *BuiltinBackupEngine) ShouldDrainForBackup(req *tabletmanagerdatapb.Bac return true } -func getPrimaryPosition(ctx context.Context, tmc tmclient.TabletManagerClient, ts *topo.Server, keyspace, shard string) (mysql.Position, error) { +func getPrimaryPosition(ctx context.Context, tmc tmclient.TabletManagerClient, ts *topo.Server, keyspace, shard string) (replication.Position, error) { si, err := ts.GetShard(ctx, keyspace, shard) if err != nil { - return mysql.Position{}, vterrors.Wrap(err, "can't read shard") + return replication.Position{}, vterrors.Wrap(err, "can't read shard") } if topoproto.TabletAliasIsZero(si.PrimaryAlias) { - return mysql.Position{}, fmt.Errorf("shard %v/%v has no primary", keyspace, shard) + return replication.Position{}, fmt.Errorf("shard %v/%v has no primary", keyspace, shard) } ti, err := ts.GetTablet(ctx, si.PrimaryAlias) if err != nil { - return mysql.Position{}, fmt.Errorf("can't get primary tablet record %v: %v", topoproto.TabletAliasString(si.PrimaryAlias), err) + return replication.Position{}, fmt.Errorf("can't get primary tablet record %v: %v", topoproto.TabletAliasString(si.PrimaryAlias), err) } posStr, err := tmc.PrimaryPosition(ctx, ti.Tablet) if err != nil { - return mysql.Position{}, fmt.Errorf("can't get primary replication position: %v", err) + return replication.Position{}, fmt.Errorf("can't get primary replication position: %v", err) } - pos, err := mysql.DecodePosition(posStr) + pos, err := replication.DecodePosition(posStr) if err != nil { - return mysql.Position{}, fmt.Errorf("can't decode primary replication position %q: %v", posStr, err) + return replication.Position{}, fmt.Errorf("can't decode primary replication position %q: %v", posStr, err) } return pos, nil } diff --git a/go/vt/mysqlctl/builtinbackupengine2_test.go b/go/vt/mysqlctl/builtinbackupengine2_test.go deleted file mode 100644 index ace019f93e2..00000000000 --- a/go/vt/mysqlctl/builtinbackupengine2_test.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright 2023 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package mysqlctl_test is the blackbox tests for package mysqlctl. -package mysqlctl - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" -) - -func TestShouldDrainForBackupBuiltIn(t *testing.T) { - be := &BuiltinBackupEngine{} - - assert.True(t, be.ShouldDrainForBackup(&tabletmanagerdatapb.BackupRequest{})) - assert.False(t, be.ShouldDrainForBackup(&tabletmanagerdatapb.BackupRequest{IncrementalFromPos: "auto"})) - assert.False(t, be.ShouldDrainForBackup(&tabletmanagerdatapb.BackupRequest{IncrementalFromPos: "99ca8ed4-399c-11ee-861b-0a43f95f28a3:1-197"})) - assert.False(t, be.ShouldDrainForBackup(&tabletmanagerdatapb.BackupRequest{IncrementalFromPos: "MySQL56/99ca8ed4-399c-11ee-861b-0a43f95f28a3:1-197"})) -} diff --git a/go/vt/mysqlctl/builtinbackupengine_test.go b/go/vt/mysqlctl/builtinbackupengine_test.go index 1d30956d527..39e4aa7ae1c 100644 --- a/go/vt/mysqlctl/builtinbackupengine_test.go +++ b/go/vt/mysqlctl/builtinbackupengine_test.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Vitess Authors. +Copyright 2023 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,475 +15,66 @@ limitations under the License. */ // Package mysqlctl_test is the blackbox tests for package mysqlctl. -package mysqlctl_test +package mysqlctl import ( - "context" - "fmt" - "os" - "path" - "strings" "testing" - "time" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/mysql/fakesqldb" - "vitess.io/vitess/go/vt/logutil" - "vitess.io/vitess/go/vt/mysqlctl" - "vitess.io/vitess/go/vt/mysqlctl/backupstats" - "vitess.io/vitess/go/vt/mysqlctl/filebackupstorage" - "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/proto/vttime" - "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/topo/memorytopo" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" ) -func setBuiltinBackupMysqldDeadline(t time.Duration) time.Duration { - old := mysqlctl.BuiltinBackupMysqldTimeout - mysqlctl.BuiltinBackupMysqldTimeout = t - - return old -} - -func createBackupDir(root string, dirs ...string) error { - for _, dir := range dirs { - if err := os.MkdirAll(path.Join(root, dir), 0755); err != nil { - return err - } - } - - return nil -} - -func createBackupFiles(root string, fileCount int, ext string) error { - for i := 0; i < fileCount; i++ { - f, err := os.Create(path.Join(root, fmt.Sprintf("%d.%s", i, ext))) - if err != nil { - return err - } - if _, err := f.Write([]byte("hello, world!")); err != nil { - return err - } - defer f.Close() - } - - return nil -} - -func TestExecuteBackup(t *testing.T) { - // Set up local backup directory - backupRoot := "testdata/builtinbackup_test" - filebackupstorage.FileBackupStorageRoot = backupRoot - require.NoError(t, createBackupDir(backupRoot, "innodb", "log", "datadir")) - dataDir := path.Join(backupRoot, "datadir") - // Add some files under data directory to force backup to actually backup files. - require.NoError(t, createBackupDir(dataDir, "test1")) - require.NoError(t, createBackupDir(dataDir, "test2")) - require.NoError(t, createBackupFiles(path.Join(dataDir, "test1"), 2, "ibd")) - require.NoError(t, createBackupFiles(path.Join(dataDir, "test2"), 2, "ibd")) - defer os.RemoveAll(backupRoot) - - ctx := context.Background() - - needIt, err := needInnoDBRedoLogSubdir() - require.NoError(t, err) - if needIt { - fpath := path.Join("log", mysql.DynamicRedoLogSubdir) - if err := createBackupDir(backupRoot, fpath); err != nil { - require.Failf(t, err.Error(), "failed to create directory: %s", fpath) - } - } - - // Set up topo - keyspace, shard := "mykeyspace", "-80" - ts := memorytopo.NewServer("cell1") - defer ts.Close() - - require.NoError(t, ts.CreateKeyspace(ctx, keyspace, &topodata.Keyspace{})) - require.NoError(t, ts.CreateShard(ctx, keyspace, shard)) - - tablet := topo.NewTablet(100, "cell1", "mykeyspace-00-80-0100") - tablet.Keyspace = keyspace - tablet.Shard = shard - - require.NoError(t, ts.CreateTablet(ctx, tablet)) - - _, err = ts.UpdateShardFields(ctx, keyspace, shard, func(si *topo.ShardInfo) error { - si.PrimaryAlias = &topodata.TabletAlias{Uid: 100, Cell: "cell1"} - - now := time.Now() - si.PrimaryTermStartTime = &vttime.Time{Seconds: int64(now.Second()), Nanoseconds: int32(now.Nanosecond())} - - return nil - }) - require.NoError(t, err) - - be := &mysqlctl.BuiltinBackupEngine{} - - // Configure a tight deadline to force a timeout - oldDeadline := setBuiltinBackupMysqldDeadline(time.Second) - defer setBuiltinBackupMysqldDeadline(oldDeadline) - - bh := filebackupstorage.NewBackupHandle(nil, "", "", false) - - // Spin up a fake daemon to be used in backups. It needs to be allowed to receive: - // "STOP SLAVE", "START SLAVE", in that order. - mysqld := mysqlctl.NewFakeMysqlDaemon(fakesqldb.New(t)) - mysqld.ExpectedExecuteSuperQueryList = []string{"STOP SLAVE", "START SLAVE"} - // mysqld.ShutdownTime = time.Minute - - fakeStats := backupstats.NewFakeStats() - - ok, err := be.ExecuteBackup(ctx, mysqlctl.BackupParams{ - Logger: logutil.NewConsoleLogger(), - Mysqld: mysqld, - Cnf: &mysqlctl.Mycnf{ - InnodbDataHomeDir: path.Join(backupRoot, "innodb"), - InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), - DataDir: path.Join(backupRoot, "datadir"), +func TestGetIncrementalFromPosGTIDSet(t *testing.T) { + tcases := []struct { + incrementalFromPos string + gtidSet string + expctError bool + }{ + { + "MySQL56/16b1039f-22b6-11ed-b765-0a43f95f28a3:1-615", + "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-615", + false, }, - Concurrency: 2, - HookExtraEnv: map[string]string{}, - TopoServer: ts, - Keyspace: keyspace, - Shard: shard, - Stats: fakeStats, - }, bh) - - require.NoError(t, err) - assert.True(t, ok) - - var destinationCloseStats int - var destinationOpenStats int - var destinationWriteStats int - var sourceCloseStats int - var sourceOpenStats int - var sourceReadStats int - - for _, sr := range fakeStats.ScopeReturns { - sfs := sr.(*backupstats.FakeStats) - switch sfs.ScopeV[backupstats.ScopeOperation] { - case "Destination:Close": - destinationCloseStats++ - require.Len(t, sfs.TimedIncrementCalls, 1) - case "Destination:Open": - destinationOpenStats++ - require.Len(t, sfs.TimedIncrementCalls, 1) - case "Destination:Write": - destinationWriteStats++ - require.GreaterOrEqual(t, len(sfs.TimedIncrementBytesCalls), 1) - case "Source:Close": - sourceCloseStats++ - require.Len(t, sfs.TimedIncrementCalls, 1) - case "Source:Open": - sourceOpenStats++ - require.Len(t, sfs.TimedIncrementCalls, 1) - case "Source:Read": - sourceReadStats++ - require.GreaterOrEqual(t, len(sfs.TimedIncrementBytesCalls), 1) - } - } - - require.Equal(t, 4, destinationCloseStats) - require.Equal(t, 4, destinationOpenStats) - require.Equal(t, 4, destinationWriteStats) - require.Equal(t, 4, sourceCloseStats) - require.Equal(t, 4, sourceOpenStats) - require.Equal(t, 4, sourceReadStats) - - mysqld.ExpectedExecuteSuperQueryCurrent = 0 // resest the index of what queries we've run - mysqld.ShutdownTime = time.Minute // reminder that shutdownDeadline is 1s - - ok, err = be.ExecuteBackup(ctx, mysqlctl.BackupParams{ - Logger: logutil.NewConsoleLogger(), - Mysqld: mysqld, - Cnf: &mysqlctl.Mycnf{ - InnodbDataHomeDir: path.Join(backupRoot, "innodb"), - InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), - DataDir: path.Join(backupRoot, "datadir"), + { + "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-615", + "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-615", + false, }, - HookExtraEnv: map[string]string{}, - TopoServer: ts, - Keyspace: keyspace, - Shard: shard, - }, bh) - - assert.Error(t, err) - assert.False(t, ok) -} - -// TestExecuteBackupWithCanceledContext tests the ability of the backup function to gracefully handle cases where errors -// occur due to various reasons, such as context time cancel. The process should not panic in these situations. -func TestExecuteBackupWithCanceledContext(t *testing.T) { - // Set up local backup directory - id := fmt.Sprintf("%d", time.Now().UnixNano()) - backupRoot := fmt.Sprintf("testdata/builtinbackup_test_%s", id) - filebackupstorage.FileBackupStorageRoot = backupRoot - require.NoError(t, createBackupDir(backupRoot, "innodb", "log", "datadir")) - dataDir := path.Join(backupRoot, "datadir") - // Add some files under data directory to force backup to execute semaphore acquire inside - // backupFiles() method (https://github.com/vitessio/vitess/blob/main/go/vt/mysqlctl/builtinbackupengine.go#L483). - require.NoError(t, createBackupDir(dataDir, "test1")) - require.NoError(t, createBackupDir(dataDir, "test2")) - require.NoError(t, createBackupFiles(path.Join(dataDir, "test1"), 2, "ibd")) - require.NoError(t, createBackupFiles(path.Join(dataDir, "test2"), 2, "ibd")) - defer os.RemoveAll(backupRoot) - - // Cancel the context deliberately - ctx, cancel := context.WithCancel(context.Background()) - cancel() - needIt, err := needInnoDBRedoLogSubdir() - require.NoError(t, err) - if needIt { - fpath := path.Join("log", mysql.DynamicRedoLogSubdir) - if err := createBackupDir(backupRoot, fpath); err != nil { - require.Failf(t, err.Error(), "failed to create directory: %s", fpath) - } - } - - // Set up topo - keyspace, shard := "mykeyspace", "-80" - ts := memorytopo.NewServer("cell1") - defer ts.Close() - - require.NoError(t, ts.CreateKeyspace(ctx, keyspace, &topodata.Keyspace{})) - require.NoError(t, ts.CreateShard(ctx, keyspace, shard)) - - tablet := topo.NewTablet(100, "cell1", "mykeyspace-00-80-0100") - tablet.Keyspace = keyspace - tablet.Shard = shard - - require.NoError(t, ts.CreateTablet(ctx, tablet)) - - _, err = ts.UpdateShardFields(ctx, keyspace, shard, func(si *topo.ShardInfo) error { - si.PrimaryAlias = &topodata.TabletAlias{Uid: 100, Cell: "cell1"} - - now := time.Now() - si.PrimaryTermStartTime = &vttime.Time{Seconds: int64(now.Second()), Nanoseconds: int32(now.Nanosecond())} - - return nil - }) - require.NoError(t, err) - - be := &mysqlctl.BuiltinBackupEngine{} - bh := filebackupstorage.NewBackupHandle(nil, "", "", false) - // Spin up a fake daemon to be used in backups. It needs to be allowed to receive: - // "STOP SLAVE", "START SLAVE", in that order. - mysqld := mysqlctl.NewFakeMysqlDaemon(fakesqldb.New(t)) - mysqld.ExpectedExecuteSuperQueryList = []string{"STOP SLAVE", "START SLAVE"} - - ok, err := be.ExecuteBackup(ctx, mysqlctl.BackupParams{ - Logger: logutil.NewConsoleLogger(), - Mysqld: mysqld, - Cnf: &mysqlctl.Mycnf{ - InnodbDataHomeDir: path.Join(backupRoot, "innodb"), - InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), - DataDir: path.Join(backupRoot, "datadir"), + { + "MySQL56/16b1039f-22b6-11ed-b765-0a43f95f28a3", + "", + true, }, - Stats: backupstats.NewFakeStats(), - Concurrency: 2, - HookExtraEnv: map[string]string{}, - TopoServer: ts, - Keyspace: keyspace, - Shard: shard, - }, bh) - - require.Error(t, err) - // all four files will fail - require.ErrorContains(t, err, "context canceled;context canceled;context canceled;context canceled") - assert.False(t, ok) -} - -// TestExecuteRestoreWithCanceledContext tests the ability of the restore function to gracefully handle cases where errors -// occur due to various reasons, such as context timed-out. The process should not panic in these situations. -func TestExecuteRestoreWithTimedOutContext(t *testing.T) { - // Set up local backup directory - id := fmt.Sprintf("%d", time.Now().UnixNano()) - backupRoot := fmt.Sprintf("testdata/builtinbackup_test_%s", id) - filebackupstorage.FileBackupStorageRoot = backupRoot - require.NoError(t, createBackupDir(backupRoot, "innodb", "log", "datadir")) - dataDir := path.Join(backupRoot, "datadir") - // Add some files under data directory to force backup to execute semaphore acquire inside - // backupFiles() method (https://github.com/vitessio/vitess/blob/main/go/vt/mysqlctl/builtinbackupengine.go#L483). - require.NoError(t, createBackupDir(dataDir, "test1")) - require.NoError(t, createBackupDir(dataDir, "test2")) - require.NoError(t, createBackupFiles(path.Join(dataDir, "test1"), 2, "ibd")) - require.NoError(t, createBackupFiles(path.Join(dataDir, "test2"), 2, "ibd")) - defer os.RemoveAll(backupRoot) - - ctx := context.Background() - needIt, err := needInnoDBRedoLogSubdir() - require.NoError(t, err) - if needIt { - fpath := path.Join("log", mysql.DynamicRedoLogSubdir) - if err := createBackupDir(backupRoot, fpath); err != nil { - require.Failf(t, err.Error(), "failed to create directory: %s", fpath) - } - } - - // Set up topo - keyspace, shard := "mykeyspace", "-80" - ts := memorytopo.NewServer("cell1") - defer ts.Close() - - require.NoError(t, ts.CreateKeyspace(ctx, keyspace, &topodata.Keyspace{})) - require.NoError(t, ts.CreateShard(ctx, keyspace, shard)) - - tablet := topo.NewTablet(100, "cell1", "mykeyspace-00-80-0100") - tablet.Keyspace = keyspace - tablet.Shard = shard - - require.NoError(t, ts.CreateTablet(ctx, tablet)) - - _, err = ts.UpdateShardFields(ctx, keyspace, shard, func(si *topo.ShardInfo) error { - si.PrimaryAlias = &topodata.TabletAlias{Uid: 100, Cell: "cell1"} - - now := time.Now() - si.PrimaryTermStartTime = &vttime.Time{Seconds: int64(now.Second()), Nanoseconds: int32(now.Nanosecond())} - - return nil - }) - require.NoError(t, err) - - be := &mysqlctl.BuiltinBackupEngine{} - bh := filebackupstorage.NewBackupHandle(nil, "", "", false) - // Spin up a fake daemon to be used in backups. It needs to be allowed to receive: - // "STOP SLAVE", "START SLAVE", in that order. - mysqld := mysqlctl.NewFakeMysqlDaemon(fakesqldb.New(t)) - mysqld.ExpectedExecuteSuperQueryList = []string{"STOP SLAVE", "START SLAVE"} - - ok, err := be.ExecuteBackup(ctx, mysqlctl.BackupParams{ - Logger: logutil.NewConsoleLogger(), - Mysqld: mysqld, - Cnf: &mysqlctl.Mycnf{ - InnodbDataHomeDir: path.Join(backupRoot, "innodb"), - InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), - DataDir: path.Join(backupRoot, "datadir"), + { + "MySQL56/invalid", + "", + true, }, - Stats: backupstats.NewFakeStats(), - Concurrency: 2, - HookExtraEnv: map[string]string{}, - TopoServer: ts, - Keyspace: keyspace, - Shard: shard, - }, bh) - - require.NoError(t, err) - assert.True(t, ok) - - // Now try to restore the above backup. - bh = filebackupstorage.NewBackupHandle(nil, "", "", true) - mysqld = mysqlctl.NewFakeMysqlDaemon(fakesqldb.New(t)) - mysqld.ExpectedExecuteSuperQueryList = []string{"STOP SLAVE", "START SLAVE"} - - fakeStats := backupstats.NewFakeStats() - - restoreParams := mysqlctl.RestoreParams{ - Cnf: &mysqlctl.Mycnf{ - InnodbDataHomeDir: path.Join(backupRoot, "innodb"), - InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), - DataDir: path.Join(backupRoot, "datadir"), - BinLogPath: path.Join(backupRoot, "binlog"), - RelayLogPath: path.Join(backupRoot, "relaylog"), - RelayLogIndexPath: path.Join(backupRoot, "relaylogindex"), - RelayLogInfoPath: path.Join(backupRoot, "relayloginfo"), + { + "16b1039f-22b6-11ed-b765-0a43f95f28a3", + "", + true, }, - Logger: logutil.NewConsoleLogger(), - Mysqld: mysqld, - Concurrency: 2, - HookExtraEnv: map[string]string{}, - DeleteBeforeRestore: false, - DbName: "test", - Keyspace: "test", - Shard: "-", - StartTime: time.Now(), - RestoreToPos: mysql.Position{}, - DryRun: false, - Stats: fakeStats, } - - // Successful restore. - bm, err := be.ExecuteRestore(ctx, restoreParams, bh) - assert.NoError(t, err) - assert.NotNil(t, bm) - - var destinationCloseStats int - var destinationOpenStats int - var destinationWriteStats int - var sourceCloseStats int - var sourceOpenStats int - var sourceReadStats int - - for _, sr := range fakeStats.ScopeReturns { - sfs := sr.(*backupstats.FakeStats) - switch sfs.ScopeV[backupstats.ScopeOperation] { - case "Destination:Close": - destinationCloseStats++ - require.Len(t, sfs.TimedIncrementCalls, 1) - case "Destination:Open": - destinationOpenStats++ - require.Len(t, sfs.TimedIncrementCalls, 1) - case "Destination:Write": - destinationWriteStats++ - require.GreaterOrEqual(t, len(sfs.TimedIncrementBytesCalls), 1) - case "Source:Close": - sourceCloseStats++ - require.Len(t, sfs.TimedIncrementCalls, 1) - case "Source:Open": - sourceOpenStats++ - require.Len(t, sfs.TimedIncrementCalls, 1) - case "Source:Read": - sourceReadStats++ - require.GreaterOrEqual(t, len(sfs.TimedIncrementBytesCalls), 1) - } - } - - require.Equal(t, 4, destinationCloseStats) - require.Equal(t, 4, destinationOpenStats) - require.Equal(t, 4, destinationWriteStats) - require.Equal(t, 4, sourceCloseStats) - require.Equal(t, 4, sourceOpenStats) - require.Equal(t, 4, sourceReadStats) - - // Restore using timed-out context - mysqld = mysqlctl.NewFakeMysqlDaemon(fakesqldb.New(t)) - mysqld.ExpectedExecuteSuperQueryList = []string{"STOP SLAVE", "START SLAVE"} - restoreParams.Mysqld = mysqld - timedOutCtx, cancel := context.WithTimeout(ctx, 1*time.Second) - defer cancel() - // Let the context time out. - time.Sleep(1 * time.Second) - bm, err = be.ExecuteRestore(timedOutCtx, restoreParams, bh) - // ExecuteRestore should fail. - assert.Error(t, err) - assert.Nil(t, bm) - // error message can contain any combination of "context deadline exceeded" or "context canceled" - if !strings.Contains(err.Error(), "context canceled") && !strings.Contains(err.Error(), "context deadline exceeded") { - assert.Fail(t, "Test should fail with either `context canceled` or `context deadline exceeded`") + for _, tcase := range tcases { + t.Run(tcase.incrementalFromPos, func(t *testing.T) { + gtidSet, err := getIncrementalFromPosGTIDSet(tcase.incrementalFromPos) + if tcase.expctError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tcase.gtidSet, gtidSet.String()) + } + }) } } -// needInnoDBRedoLogSubdir indicates whether we need to create a redo log subdirectory. -// Starting with MySQL 8.0.30, the InnoDB redo logs are stored in a subdirectory of the -// (/. by default) called "#innodb_redo". See: -// -// https://dev.mysql.com/doc/refman/8.0/en/innodb-redo-log.html#innodb-modifying-redo-log-capacity -func needInnoDBRedoLogSubdir() (needIt bool, err error) { - mysqldVersionStr, err := mysqlctl.GetVersionString() - if err != nil { - return needIt, err - } - _, sv, err := mysqlctl.ParseVersionString(mysqldVersionStr) - if err != nil { - return needIt, err - } - versionStr := fmt.Sprintf("%d.%d.%d", sv.Major, sv.Minor, sv.Patch) - _, capableOf, _ := mysql.GetFlavor(versionStr, nil) - if capableOf == nil { - return needIt, fmt.Errorf("cannot determine database flavor details for version %s", versionStr) - } - return capableOf(mysql.DynamicRedoLogCapacityFlavorCapability) +func TestShouldDrainForBackupBuiltIn(t *testing.T) { + be := &BuiltinBackupEngine{} + + assert.True(t, be.ShouldDrainForBackup(&tabletmanagerdatapb.BackupRequest{})) + assert.False(t, be.ShouldDrainForBackup(&tabletmanagerdatapb.BackupRequest{IncrementalFromPos: "auto"})) + assert.False(t, be.ShouldDrainForBackup(&tabletmanagerdatapb.BackupRequest{IncrementalFromPos: "99ca8ed4-399c-11ee-861b-0a43f95f28a3:1-197"})) + assert.False(t, be.ShouldDrainForBackup(&tabletmanagerdatapb.BackupRequest{IncrementalFromPos: "MySQL56/99ca8ed4-399c-11ee-861b-0a43f95f28a3:1-197"})) } diff --git a/go/vt/mysqlctl/compression_benchmark_test.go b/go/vt/mysqlctl/compression_benchmark_test.go index 73cd684c719..de52519fa57 100644 --- a/go/vt/mysqlctl/compression_benchmark_test.go +++ b/go/vt/mysqlctl/compression_benchmark_test.go @@ -19,6 +19,8 @@ import ( "github.com/klauspost/compress/zstd" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/hack" + "vitess.io/vitess/go/vt/logutil" ) @@ -372,6 +374,7 @@ func (tw *timedWriter) Write(p []byte) (nbytes int, err error) { } func TestMain(m *testing.M) { + hack.DisableProtoBufRandomness() code := m.Run() u, _ := dataURL() diff --git a/go/vt/mysqlctl/fakemysqldaemon.go b/go/vt/mysqlctl/fakemysqldaemon.go index 10877a4d6d1..39ecca84156 100644 --- a/go/vt/mysqlctl/fakemysqldaemon.go +++ b/go/vt/mysqlctl/fakemysqldaemon.go @@ -26,12 +26,13 @@ import ( "sync/atomic" "time" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/fakesqldb" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/dbconnpool" "vitess.io/vitess/go/vt/mysqlctl/tmutils" + mysqlctlpb "vitess.io/vitess/go/vt/proto/mysqlctl" querypb "vitess.io/vitess/go/vt/proto/query" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" ) @@ -72,10 +73,10 @@ type FakeMysqlDaemon struct { // CurrentPrimaryPosition is returned by PrimaryPosition // and ReplicationStatus - CurrentPrimaryPosition mysql.Position + CurrentPrimaryPosition replication.Position // CurrentSourceFilePosition is used to determine the executed file based positioning of the replication source. - CurrentSourceFilePosition mysql.Position + CurrentSourceFilePosition replication.Position // ReplicationStatusError is used by ReplicationStatus ReplicationStatusError error @@ -106,10 +107,10 @@ type FakeMysqlDaemon struct { // SetReplicationPositionPos is matched against the input of SetReplicationPosition. // If it doesn't match, SetReplicationPosition will return an error. - SetReplicationPositionPos mysql.Position + SetReplicationPositionPos replication.Position // StartReplicationUntilAfterPos is matched against the input - StartReplicationUntilAfterPos mysql.Position + StartReplicationUntilAfterPos replication.Position // SetReplicationSourceInputs are matched against the input of SetReplicationSource // (as "%v:%v"). If all of them don't match, SetReplicationSource will return an error. @@ -118,12 +119,15 @@ type FakeMysqlDaemon struct { // SetReplicationSourceError is used by SetReplicationSource SetReplicationSourceError error + // StopReplicationError error is used by StopReplication + StopReplicationError error + // WaitPrimaryPositions is checked by WaitSourcePos, if the value is found // in it, then the function returns nil, else the function returns an error - WaitPrimaryPositions []mysql.Position + WaitPrimaryPositions []replication.Position // PromoteResult is returned by Promote - PromoteResult mysql.Position + PromoteResult replication.Position // PromoteError is used by Promote PromoteError error @@ -167,6 +171,9 @@ type FakeMysqlDaemon struct { // TimeoutHook is a func that can be called at the beginning of any method to fake a timeout. // all a test needs to do is make it { return context.DeadlineExceeded } TimeoutHook func() error + + // Version is the version that will be returned by GetVersionString. + Version string } // NewFakeMysqlDaemon returns a FakeMysqlDaemon where mysqld appears @@ -177,6 +184,7 @@ func NewFakeMysqlDaemon(db *fakesqldb.DB) *FakeMysqlDaemon { db: db, Running: true, IOThreadRunning: true, + Version: "8.0.32", } if db != nil { result.appPool = dbconnpool.NewConnectionPool("AppConnPool", 5, time.Minute, 0, 0) @@ -227,10 +235,15 @@ func (fmd *FakeMysqlDaemon) RunMysqlUpgrade(ctx context.Context) error { } // ApplyBinlogFile is part of the MysqlDaemon interface -func (fmd *FakeMysqlDaemon) ApplyBinlogFile(ctx context.Context, binlogFile string, restorePos mysql.Position) error { +func (fmd *FakeMysqlDaemon) ApplyBinlogFile(ctx context.Context, req *mysqlctlpb.ApplyBinlogFileRequest) error { return nil } +// ReadBinlogFilesTimestamps is part of the MysqlDaemon interface +func (fmd *FakeMysqlDaemon) ReadBinlogFilesTimestamps(ctx context.Context, req *mysqlctlpb.ReadBinlogFilesTimestampsRequest) (*mysqlctlpb.ReadBinlogFilesTimestampsResponse, error) { + return nil, nil +} + // ReinitConfig is part of the MysqlDaemon interface func (fmd *FakeMysqlDaemon) ReinitConfig(ctx context.Context, cnf *Mycnf) error { return nil @@ -265,47 +278,47 @@ func (fmd *FakeMysqlDaemon) GetServerUUID(ctx context.Context) (string, error) { } // CurrentPrimaryPositionLocked is thread-safe -func (fmd *FakeMysqlDaemon) CurrentPrimaryPositionLocked(pos mysql.Position) { +func (fmd *FakeMysqlDaemon) CurrentPrimaryPositionLocked(pos replication.Position) { fmd.mu.Lock() defer fmd.mu.Unlock() fmd.CurrentPrimaryPosition = pos } // ReplicationStatus is part of the MysqlDaemon interface -func (fmd *FakeMysqlDaemon) ReplicationStatus() (mysql.ReplicationStatus, error) { +func (fmd *FakeMysqlDaemon) ReplicationStatus() (replication.ReplicationStatus, error) { if fmd.ReplicationStatusError != nil { - return mysql.ReplicationStatus{}, fmd.ReplicationStatusError + return replication.ReplicationStatus{}, fmd.ReplicationStatusError } fmd.mu.Lock() defer fmd.mu.Unlock() - return mysql.ReplicationStatus{ + return replication.ReplicationStatus{ Position: fmd.CurrentPrimaryPosition, FilePosition: fmd.CurrentSourceFilePosition, RelayLogSourceBinlogEquivalentPosition: fmd.CurrentSourceFilePosition, ReplicationLagSeconds: fmd.ReplicationLagSeconds, // implemented as AND to avoid changing all tests that were // previously using Replicating = false - IOState: mysql.ReplicationStatusToState(fmt.Sprintf("%v", fmd.Replicating && fmd.IOThreadRunning)), - SQLState: mysql.ReplicationStatusToState(fmt.Sprintf("%v", fmd.Replicating)), + IOState: replication.ReplicationStatusToState(fmt.Sprintf("%v", fmd.Replicating && fmd.IOThreadRunning)), + SQLState: replication.ReplicationStatusToState(fmt.Sprintf("%v", fmd.Replicating)), SourceHost: fmd.CurrentSourceHost, SourcePort: fmd.CurrentSourcePort, }, nil } // PrimaryStatus is part of the MysqlDaemon interface -func (fmd *FakeMysqlDaemon) PrimaryStatus(ctx context.Context) (mysql.PrimaryStatus, error) { +func (fmd *FakeMysqlDaemon) PrimaryStatus(ctx context.Context) (replication.PrimaryStatus, error) { if fmd.PrimaryStatusError != nil { - return mysql.PrimaryStatus{}, fmd.PrimaryStatusError + return replication.PrimaryStatus{}, fmd.PrimaryStatusError } - return mysql.PrimaryStatus{ + return replication.PrimaryStatus{ Position: fmd.CurrentPrimaryPosition, FilePosition: fmd.CurrentSourceFilePosition, }, nil } // GetGTIDPurged is part of the MysqlDaemon interface -func (fmd *FakeMysqlDaemon) GetGTIDPurged(ctx context.Context) (mysql.Position, error) { - return mysql.Position{}, nil +func (fmd *FakeMysqlDaemon) GetGTIDPurged(ctx context.Context) (replication.Position, error) { + return replication.Position{}, nil } // ResetReplication is part of the MysqlDaemon interface. @@ -358,7 +371,7 @@ func (fmd *FakeMysqlDaemon) GetPreviousGTIDs(ctx context.Context, binlog string) } // PrimaryPosition is part of the MysqlDaemon interface -func (fmd *FakeMysqlDaemon) PrimaryPosition() (mysql.Position, error) { +func (fmd *FakeMysqlDaemon) PrimaryPosition() (replication.Position, error) { return fmd.CurrentPrimaryPosition, nil } @@ -405,7 +418,7 @@ func (fmd *FakeMysqlDaemon) RestartReplication(hookExtraEnv map[string]string) e } // StartReplicationUntilAfter is part of the MysqlDaemon interface. -func (fmd *FakeMysqlDaemon) StartReplicationUntilAfter(ctx context.Context, pos mysql.Position) error { +func (fmd *FakeMysqlDaemon) StartReplicationUntilAfter(ctx context.Context, pos replication.Position) error { if !reflect.DeepEqual(fmd.StartReplicationUntilAfterPos, pos) { return fmt.Errorf("wrong pos for StartReplicationUntilAfter: expected %v got %v", fmd.SetReplicationPositionPos, pos) } @@ -417,6 +430,9 @@ func (fmd *FakeMysqlDaemon) StartReplicationUntilAfter(ctx context.Context, pos // StopReplication is part of the MysqlDaemon interface. func (fmd *FakeMysqlDaemon) StopReplication(hookExtraEnv map[string]string) error { + if fmd.StopReplicationError != nil { + return fmd.StopReplicationError + } return fmd.ExecuteSuperQueryList(context.Background(), []string{ "STOP SLAVE", }) @@ -430,7 +446,7 @@ func (fmd *FakeMysqlDaemon) StopIOThread(ctx context.Context) error { } // SetReplicationPosition is part of the MysqlDaemon interface. -func (fmd *FakeMysqlDaemon) SetReplicationPosition(ctx context.Context, pos mysql.Position) error { +func (fmd *FakeMysqlDaemon) SetReplicationPosition(ctx context.Context, pos replication.Position) error { if !reflect.DeepEqual(fmd.SetReplicationPositionPos, pos) { return fmt.Errorf("wrong pos for SetReplicationPosition: expected %v got %v", fmd.SetReplicationPositionPos, pos) } @@ -462,6 +478,8 @@ func (fmd *FakeMysqlDaemon) SetReplicationSource(ctx context.Context, host strin if startReplicationAfter { cmds = append(cmds, "START SLAVE") } + fmd.CurrentSourceHost = host + fmd.CurrentSourcePort = port return fmd.ExecuteSuperQueryList(ctx, cmds) } @@ -471,7 +489,7 @@ func (fmd *FakeMysqlDaemon) WaitForReparentJournal(ctx context.Context, timeCrea } // WaitSourcePos is part of the MysqlDaemon interface -func (fmd *FakeMysqlDaemon) WaitSourcePos(_ context.Context, pos mysql.Position) error { +func (fmd *FakeMysqlDaemon) WaitSourcePos(_ context.Context, pos replication.Position) error { if fmd.TimeoutHook != nil { return fmd.TimeoutHook() } @@ -484,12 +502,12 @@ func (fmd *FakeMysqlDaemon) WaitSourcePos(_ context.Context, pos mysql.Position) } // Promote is part of the MysqlDaemon interface -func (fmd *FakeMysqlDaemon) Promote(hookExtraEnv map[string]string) (mysql.Position, error) { +func (fmd *FakeMysqlDaemon) Promote(hookExtraEnv map[string]string) (replication.Position, error) { if fmd.PromoteLag > 0 { time.Sleep(fmd.PromoteLag) } if fmd.PromoteError != nil { - return mysql.Position{}, fmd.PromoteError + return replication.Position{}, fmd.PromoteError } return fmd.PromoteResult, nil } @@ -678,11 +696,11 @@ func (fmd *FakeMysqlDaemon) SemiSyncReplicationStatus() (bool, error) { } // GetVersionString is part of the MysqlDaemon interface. -func (fmd *FakeMysqlDaemon) GetVersionString(ctx context.Context) string { - return "" +func (fmd *FakeMysqlDaemon) GetVersionString(ctx context.Context) (string, error) { + return fmd.Version, nil } // GetVersionComment is part of the MysqlDaemon interface. -func (fmd *FakeMysqlDaemon) GetVersionComment(ctx context.Context) string { - return "" +func (fmd *FakeMysqlDaemon) GetVersionComment(ctx context.Context) (string, error) { + return "", nil } diff --git a/go/vt/mysqlctl/grpcmysqlctlclient/client.go b/go/vt/mysqlctl/grpcmysqlctlclient/client.go index 9c3d3598f70..150402a8c44 100644 --- a/go/vt/mysqlctl/grpcmysqlctlclient/client.go +++ b/go/vt/mysqlctl/grpcmysqlctlclient/client.go @@ -26,6 +26,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/status" "vitess.io/vitess/go/vt/grpcclient" @@ -41,9 +42,14 @@ type client struct { func factory(network, addr string) (mysqlctlclient.MysqlctlClient, error) { // create the RPC client - cc, err := grpcclient.Dial(addr, grpcclient.FailFast(false), grpc.WithInsecure(), grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { //nolint:staticcheck - return net.DialTimeout(network, addr, timeout) - })) + cc, err := grpcclient.Dial( + addr, + grpcclient.FailFast(false), + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithContextDialer(func(ctx context.Context, addr string, + ) (net.Conn, error) { + return new(net.Dialer).DialContext(ctx, network, addr) + })) if err != nil { return nil, err } @@ -84,17 +90,22 @@ func (c *client) RunMysqlUpgrade(ctx context.Context) error { } // ApplyBinlogFile is part of the MysqlctlClient interface. -func (c *client) ApplyBinlogFile(ctx context.Context, binlogFileName, binlogRestorePosition string) error { - req := &mysqlctlpb.ApplyBinlogFileRequest{ - BinlogFileName: binlogFileName, - BinlogRestorePosition: binlogRestorePosition, - } +func (c *client) ApplyBinlogFile(ctx context.Context, req *mysqlctlpb.ApplyBinlogFileRequest) error { return c.withRetry(ctx, func() error { _, err := c.c.ApplyBinlogFile(ctx, req) return err }) } +// ReadBinlogFilesTimestamps is part of the MysqlctlClient interface. +func (c *client) ReadBinlogFilesTimestamps(ctx context.Context, req *mysqlctlpb.ReadBinlogFilesTimestampsRequest) (resp *mysqlctlpb.ReadBinlogFilesTimestampsResponse, err error) { + err = c.withRetry(ctx, func() error { + resp, err = c.c.ReadBinlogFilesTimestamps(ctx, req) + return err + }) + return resp, err +} + // ReinitConfig is part of the MysqlctlClient interface. func (c *client) ReinitConfig(ctx context.Context) error { return c.withRetry(ctx, func() error { @@ -111,6 +122,20 @@ func (c *client) RefreshConfig(ctx context.Context) error { }) } +// VersionString is part of the MysqlctlClient interface. +func (c *client) VersionString(ctx context.Context) (string, error) { + var version string + err := c.withRetry(ctx, func() error { + r, err := c.c.VersionString(ctx, &mysqlctlpb.VersionStringRequest{}) + if err != nil { + return err + } + version = r.Version + return nil + }) + return version, err +} + // Close is part of the MysqlctlClient interface. func (c *client) Close() { c.cc.Close() diff --git a/go/vt/mysqlctl/grpcmysqlctlserver/server.go b/go/vt/mysqlctl/grpcmysqlctlserver/server.go index 898d06f1a96..84953020534 100644 --- a/go/vt/mysqlctl/grpcmysqlctlserver/server.go +++ b/go/vt/mysqlctl/grpcmysqlctlserver/server.go @@ -25,7 +25,6 @@ import ( "google.golang.org/grpc" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/mysqlctl" mysqlctlpb "vitess.io/vitess/go/vt/proto/mysqlctl" ) @@ -54,11 +53,7 @@ func (s *server) RunMysqlUpgrade(ctx context.Context, _ *mysqlctlpb.RunMysqlUpgr // RunMysqlUpgrade implements the server side of the MysqlctlClient interface. func (s *server) ApplyBinlogFile(ctx context.Context, request *mysqlctlpb.ApplyBinlogFileRequest) (*mysqlctlpb.ApplyBinlogFileResponse, error) { - pos, err := mysql.DecodePosition(request.BinlogRestorePosition) - if err != nil { - return nil, err - } - return &mysqlctlpb.ApplyBinlogFileResponse{}, s.mysqld.ApplyBinlogFile(ctx, request.BinlogFileName, pos) + return &mysqlctlpb.ApplyBinlogFileResponse{}, s.mysqld.ApplyBinlogFile(ctx, request) } // ReinitConfig implements the server side of the MysqlctlClient interface. @@ -71,6 +66,15 @@ func (s *server) RefreshConfig(ctx context.Context, request *mysqlctlpb.RefreshC return &mysqlctlpb.RefreshConfigResponse{}, s.mysqld.RefreshConfig(ctx, s.cnf) } +// VersionString registers the Server for RPCs. +func (s *server) VersionString(ctx context.Context, request *mysqlctlpb.VersionStringRequest) (*mysqlctlpb.VersionStringResponse, error) { + version, err := s.mysqld.GetVersionString(ctx) + if err != nil { + return nil, err + } + return &mysqlctlpb.VersionStringResponse{Version: version}, nil +} + // StartServer registers the Server for RPCs. func StartServer(s *grpc.Server, cnf *mysqlctl.Mycnf, mysqld *mysqlctl.Mysqld) { mysqlctlpb.RegisterMysqlCtlServer(s, &server{cnf: cnf, mysqld: mysqld}) diff --git a/go/vt/mysqlctl/mysql_daemon.go b/go/vt/mysqlctl/mysql_daemon.go index ee7e366d724..c0f97d438e6 100644 --- a/go/vt/mysqlctl/mysql_daemon.go +++ b/go/vt/mysqlctl/mysql_daemon.go @@ -19,11 +19,12 @@ package mysqlctl import ( "context" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/dbconnpool" "vitess.io/vitess/go/vt/mysqlctl/tmutils" + mysqlctlpb "vitess.io/vitess/go/vt/proto/mysqlctl" querypb "vitess.io/vitess/go/vt/proto/query" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" ) @@ -34,7 +35,8 @@ type MysqlDaemon interface { Start(ctx context.Context, cnf *Mycnf, mysqldArgs ...string) error Shutdown(ctx context.Context, cnf *Mycnf, waitForMysqld bool) error RunMysqlUpgrade(ctx context.Context) error - ApplyBinlogFile(ctx context.Context, binlogFile string, restorePos mysql.Position) error + ApplyBinlogFile(ctx context.Context, req *mysqlctlpb.ApplyBinlogFileRequest) error + ReadBinlogFilesTimestamps(ctx context.Context, req *mysqlctlpb.ReadBinlogFilesTimestampsRequest) (*mysqlctlpb.ReadBinlogFilesTimestampsResponse, error) ReinitConfig(ctx context.Context, cnf *Mycnf) error Wait(ctx context.Context, cnf *Mycnf) error @@ -50,12 +52,12 @@ type MysqlDaemon interface { // replication related methods StartReplication(hookExtraEnv map[string]string) error RestartReplication(hookExtraEnv map[string]string) error - StartReplicationUntilAfter(ctx context.Context, pos mysql.Position) error + StartReplicationUntilAfter(ctx context.Context, pos replication.Position) error StopReplication(hookExtraEnv map[string]string) error StopIOThread(ctx context.Context) error - ReplicationStatus() (mysql.ReplicationStatus, error) - PrimaryStatus(ctx context.Context) (mysql.PrimaryStatus, error) - GetGTIDPurged(ctx context.Context) (mysql.Position, error) + ReplicationStatus() (replication.ReplicationStatus, error) + PrimaryStatus(ctx context.Context) (replication.PrimaryStatus, error) + GetGTIDPurged(ctx context.Context) (replication.Position, error) SetSemiSyncEnabled(source, replica bool) error SemiSyncEnabled() (source, replica bool) SemiSyncExtensionLoaded() (bool, error) @@ -72,20 +74,20 @@ type MysqlDaemon interface { // reparenting related methods ResetReplication(ctx context.Context) error - PrimaryPosition() (mysql.Position, error) + PrimaryPosition() (replication.Position, error) IsReadOnly() (bool, error) IsSuperReadOnly() (bool, error) SetReadOnly(on bool) error SetSuperReadOnly(on bool) (ResetSuperReadOnlyFunc, error) - SetReplicationPosition(ctx context.Context, pos mysql.Position) error + SetReplicationPosition(ctx context.Context, pos replication.Position) error SetReplicationSource(ctx context.Context, host string, port int32, stopReplicationBefore bool, startReplicationAfter bool) error WaitForReparentJournal(ctx context.Context, timeCreatedNS int64) error - WaitSourcePos(context.Context, mysql.Position) error + WaitSourcePos(context.Context, replication.Position) error // Promote makes the current server the primary. It will not change // the read_only state of the server. - Promote(map[string]string) (mysql.Position, error) + Promote(map[string]string) (replication.Position, error) // Schema related methods GetSchema(ctx context.Context, dbName string, request *tabletmanagerdatapb.GetSchemaRequest) (*tabletmanagerdatapb.SchemaDefinition, error) @@ -103,10 +105,10 @@ type MysqlDaemon interface { GetAllPrivsConnection(ctx context.Context) (*dbconnpool.DBConnection, error) // GetVersionString returns the database version as a string - GetVersionString(ctx context.Context) string + GetVersionString(ctx context.Context) (string, error) // GetVersionComment returns the version comment - GetVersionComment(ctx context.Context) string + GetVersionComment(ctx context.Context) (string, error) // ExecuteSuperQueryList executes a list of queries, no result ExecuteSuperQueryList(ctx context.Context, queryList []string) error diff --git a/go/vt/mysqlctl/mysqlctlclient/interface.go b/go/vt/mysqlctl/mysqlctlclient/interface.go index 1800d8d98e5..4ab03a9df5b 100644 --- a/go/vt/mysqlctl/mysqlctlclient/interface.go +++ b/go/vt/mysqlctl/mysqlctlclient/interface.go @@ -25,6 +25,7 @@ import ( "github.com/spf13/pflag" "vitess.io/vitess/go/vt/log" + mysqlctlpb "vitess.io/vitess/go/vt/proto/mysqlctl" "vitess.io/vitess/go/vt/servenv" ) @@ -50,7 +51,10 @@ type MysqlctlClient interface { RunMysqlUpgrade(ctx context.Context) error // ApplyBinlogFile calls Mysqld.ApplyBinlogFile remotely. - ApplyBinlogFile(ctx context.Context, binlogFileName, binlogRestorePosition string) error + ApplyBinlogFile(ctx context.Context, req *mysqlctlpb.ApplyBinlogFileRequest) error + + // ReadBinlogFilesTimestamps calls Mysqld.ReadBinlogFilesTimestamps remotely. + ReadBinlogFilesTimestamps(ctx context.Context, req *mysqlctlpb.ReadBinlogFilesTimestampsRequest) (*mysqlctlpb.ReadBinlogFilesTimestampsResponse, error) // ReinitConfig calls Mysqld.ReinitConfig remotely. ReinitConfig(ctx context.Context) error @@ -58,6 +62,9 @@ type MysqlctlClient interface { // RefreshConfig calls Mysqld.RefreshConfig remotely. RefreshConfig(ctx context.Context) error + // VersionString calls Mysqld.VersionString remotely. + VersionString(ctx context.Context) (string, error) + // Close will terminate the connection. This object won't be used anymore. Close() } diff --git a/go/vt/mysqlctl/mysqld.go b/go/vt/mysqlctl/mysqld.go index c93c4574022..b8597735b9b 100644 --- a/go/vt/mysqlctl/mysqld.go +++ b/go/vt/mysqlctl/mysqld.go @@ -42,8 +42,12 @@ import ( "github.com/spf13/pflag" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/protoutil" + "vitess.io/vitess/config" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/dbconnpool" "vitess.io/vitess/go/vt/hook" @@ -53,6 +57,8 @@ import ( "vitess.io/vitess/go/vt/vterrors" vtenv "vitess.io/vitess/go/vt/env" + mysqlctlpb "vitess.io/vitess/go/vt/proto/mysqlctl" + "vitess.io/vitess/go/vt/proto/vtrpc" ) var ( @@ -81,6 +87,9 @@ var ( replicationConnectRetry = 10 * time.Second versionRegex = regexp.MustCompile(`Ver ([0-9]+)\.([0-9]+)\.([0-9]+)`) + + binlogEntryCommittedTimestampRegex = regexp.MustCompile("original_committed_timestamp=([0-9]+)") + binlogEntryTimestampGTIDRegexp = regexp.MustCompile(`^#(.+) server id.*\bGTID\b`) ) // How many bytes from MySQL error log to sample for error messages @@ -568,7 +577,6 @@ func (mysqld *Mysqld) Shutdown(ctx context.Context, cnf *Mycnf, waitForMysqld bo // If input is not nil, pipe it to the command's stdin. func execCmd(name string, args, env []string, dir string, input io.Reader) (cmd *exec.Cmd, output string, err error) { cmdPath, _ := exec.LookPath(name) - log.Infof("execCmd: %v %v %v", name, cmdPath, args) cmd = exec.Command(cmdPath, args...) cmd.Env = env @@ -579,10 +587,9 @@ func execCmd(name string, args, env []string, dir string, input io.Reader) (cmd out, err := cmd.CombinedOutput() output = string(out) if err != nil { - log.Infof("execCmd: %v failed: %v", name, err) - err = fmt.Errorf("%v: %v, output: %v", name, err, output) + log.Errorf("execCmd: %v failed: %v", name, err) + err = fmt.Errorf("%v: %w, output: %v", name, err, output) } - log.Infof("execCmd: %v output: %v", name, output) return cmd, output, err } @@ -651,7 +658,7 @@ func (mysqld *Mysqld) Init(ctx context.Context, cnf *Mycnf, initDBSQLFile string return err } if initDBSQLFile == "" { // default to built-in - if err := mysqld.executeMysqlScript(params, strings.NewReader(config.DefaultInitDB)); err != nil { + if err := mysqld.executeMysqlScript(ctx, params, config.DefaultInitDB); err != nil { return fmt.Errorf("failed to initialize mysqld: %v", err) } return nil @@ -663,7 +670,11 @@ func (mysqld *Mysqld) Init(ctx context.Context, cnf *Mycnf, initDBSQLFile string return fmt.Errorf("can't open init_db_sql_file (%v): %v", initDBSQLFile, err) } defer sqlFile.Close() - if err := mysqld.executeMysqlScript(params, sqlFile); err != nil { + script, err := io.ReadAll(sqlFile) + if err != nil { + return fmt.Errorf("can't read init_db_sql_file (%v): %v", initDBSQLFile, err) + } + if err := mysqld.executeMysqlScript(ctx, params, string(script)); err != nil { return fmt.Errorf("can't run init_db_sql_file (%v): %v", initDBSQLFile, err) } return nil @@ -1006,34 +1017,25 @@ func deleteTopDir(dir string) (removalErr error) { return } -// executeMysqlScript executes a .sql script from an io.Reader with the mysql -// command line tool. It uses the connParams as is, not adding credentials. -func (mysqld *Mysqld) executeMysqlScript(connParams *mysql.ConnParams, sql io.Reader) error { - dir, err := vtenv.VtMysqlRoot() +// executeMysqlScript executes the contents of an SQL script as a string. +// It uses the connParams as is, not adding credentials. +func (mysqld *Mysqld) executeMysqlScript(ctx context.Context, connParams *mysql.ConnParams, sql string) error { + connector := dbconfigs.New(connParams) + conn, err := connector.Connect(ctx) if err != nil { return err } - name, err := binaryPath(dir, "mysql") - if err != nil { - return err - } - cnf, err := mysqld.defaultsExtraFile(connParams) - if err != nil { - return err - } - defer os.Remove(cnf) - args := []string{ - "--defaults-extra-file=" + cnf, - "--batch", - "--default-character-set=utf8mb4", - } - env, err := buildLdPaths() + defer conn.Close() + + _, more, err := conn.ExecuteFetchMulti(sql, -1, false) if err != nil { return err } - _, _, err = execCmd(name, args, env, dir, sql) - if err != nil { - return err + for more { + _, more, _, err = conn.ReadQueryResult(0, false) + if err != nil { + return err + } } return nil } @@ -1133,38 +1135,35 @@ func buildLdPaths() ([]string, error) { } // GetVersionString is part of the MysqlExecutor interface. -func (mysqld *Mysqld) GetVersionString(ctx context.Context) string { - qr, err := mysqld.FetchSuperQuery(ctx, "select @@global.version") - if err != nil { - log.Errorf("Error fetching MySQL version: %v", err) - return "" - } - if len(qr.Rows) != 1 { - log.Errorf("Unexpected number of rows: %v", qr.Rows) - return "" +func (mysqld *Mysqld) GetVersionString(ctx context.Context) (string, error) { + // Execute as remote action on mysqlctld to ensure we get the actual running MySQL version. + if socketFile != "" { + client, err := mysqlctlclient.New("unix", socketFile) + if err != nil { + return "", fmt.Errorf("can't dial mysqlctld: %v", err) + } + defer client.Close() + return client.VersionString(ctx) } - res := qr.Named().Row() - version, _ := res.ToString("@@global.version") - return version + return GetVersionString() } // GetVersionComment gets the version comment. -func (mysqld *Mysqld) GetVersionComment(ctx context.Context) string { +func (mysqld *Mysqld) GetVersionComment(ctx context.Context) (string, error) { qr, err := mysqld.FetchSuperQuery(ctx, "select @@global.version_comment") if err != nil { - return "" + return "", err } if len(qr.Rows) != 1 { - return "" + return "", fmt.Errorf("unexpected result length: %v", len(qr.Rows)) } res := qr.Named().Row() - versionComment, _ := res.ToString("@@global.version_comment") - return versionComment + return res.ToString("@@global.version_comment") } // ApplyBinlogFile extracts a binary log file and applies it to MySQL. It is the equivalent of: // $ mysqlbinlog --include-gtids binlog.file | mysql -func (mysqld *Mysqld) ApplyBinlogFile(ctx context.Context, binlogFile string, restorePos mysql.Position) error { +func (mysqld *Mysqld) ApplyBinlogFile(ctx context.Context, req *mysqlctlpb.ApplyBinlogFileRequest) error { if socketFile != "" { log.Infof("executing Mysqld.ApplyBinlogFile() remotely via mysqlctld server: %v", socketFile) client, err := mysqlctlclient.New("unix", socketFile) @@ -1172,7 +1171,7 @@ func (mysqld *Mysqld) ApplyBinlogFile(ctx context.Context, binlogFile string, re return fmt.Errorf("can't dial mysqlctld: %v", err) } defer client.Close() - return client.ApplyBinlogFile(ctx, binlogFile, mysql.EncodePosition(restorePos)) + return client.ApplyBinlogFile(ctx, req) } var pipe io.ReadCloser var mysqlbinlogCmd *exec.Cmd @@ -1192,14 +1191,20 @@ func (mysqld *Mysqld) ApplyBinlogFile(ctx context.Context, binlogFile string, re return err } args := []string{} - if gtids := restorePos.GTIDSet.String(); gtids != "" { + if gtids := req.BinlogRestorePosition; gtids != "" { args = append(args, "--include-gtids", gtids, ) } + if restoreToTimestamp := protoutil.TimeFromProto(req.BinlogRestoreDatetime).UTC(); !restoreToTimestamp.IsZero() { + args = append(args, + "--stop-datetime", + restoreToTimestamp.Format(sqltypes.TimestampFormat), + ) + } - args = append(args, binlogFile) + args = append(args, req.BinlogFileName) mysqlbinlogCmd = exec.Command(name, args...) mysqlbinlogCmd.Dir = dir @@ -1241,7 +1246,7 @@ func (mysqld *Mysqld) ApplyBinlogFile(ctx context.Context, binlogFile string, re log.Infof("ApplyBinlogFile: disabling super_read_only") resetFunc, err := mysqld.SetSuperReadOnly(false) if err != nil { - if sqlErr, ok := err.(*mysql.SQLError); ok && sqlErr.Number() == mysql.ERUnknownSystemVariable { + if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Number() == sqlerror.ERUnknownSystemVariable { log.Warningf("ApplyBinlogFile: server does not know about super_read_only, continuing anyway...") } else { log.Errorf("ApplyBinlogFile: unexpected error while trying to set super_read_only: %v", err) @@ -1288,6 +1293,174 @@ func (mysqld *Mysqld) ApplyBinlogFile(ctx context.Context, binlogFile string, re return nil } +// parseBinlogEntryTimestamp attempts to extract a timestamp from a binlog entry. +func parseBinlogEntryTimestamp(logEntry string) (t time.Time, err error) { + if len(logEntry) == 0 { + return t, nil + } + if logEntry[0] != '#' { + return t, nil + } + if submatch := binlogEntryCommittedTimestampRegex.FindStringSubmatch(logEntry); submatch != nil { + // MySQL 8.0 + binlogEntryCommittedTimestamp := submatch[1] + unixMicros, err := strconv.ParseInt(binlogEntryCommittedTimestamp, 10, 64) + if err != nil { + return t, err + } + return time.UnixMicro(unixMicros), nil + } + if submatch := binlogEntryTimestampGTIDRegexp.FindStringSubmatch(logEntry); submatch != nil { + // MySQL 5.7 + t, err = ParseBinlogTimestamp(submatch[1]) + if err != nil { + return t, err + } + return t, nil + } + return t, nil +} + +// scanBinlogTimestamp invokes a `mysqlbinlog` binary to look for a timestamp in the given binary. The function +// looks for the first and last timestamps. +func (mysqld *Mysqld) scanBinlogTimestamp( + mysqlbinlogDir string, + mysqlbinlogEnv []string, + mysqlbinlogName string, + binlogFile string, + stopAtFirst bool, // unused at this moment, to be used as an optimization hint +) ( + firstMatchedTime time.Time, + lastMatchedTime time.Time, + err error, +) { + args := []string{binlogFile} + mysqlbinlogCmd := exec.Command(mysqlbinlogName, args...) + mysqlbinlogCmd.Dir = mysqlbinlogDir + mysqlbinlogCmd.Env = mysqlbinlogEnv + log.Infof("ApplyBinlogFile: running mysqlbinlog command: %#v", mysqlbinlogCmd) + pipe, err := mysqlbinlogCmd.StdoutPipe() // to be piped into mysql + if err != nil { + return firstMatchedTime, lastMatchedTime, err + } + scan := func() error { + // Read line by line and process it + scanner := bufio.NewScanner(pipe) + for scanner.Scan() { + logEntry := scanner.Text() + + t, err := parseBinlogEntryTimestamp(logEntry) + if err != nil { + return err + } + if t.IsZero() { + continue + } + if firstMatchedTime.IsZero() { + firstMatchedTime = t + } + lastMatchedTime = t + } + return nil + } + if err := mysqlbinlogCmd.Start(); err != nil { // Start() is nonblockig + return firstMatchedTime, lastMatchedTime, err + } + defer mysqlbinlogCmd.Process.Kill() + if err := scan(); err != nil { // We must first exhaust reading the command's output, before calling cmd.Wait() + return firstMatchedTime, lastMatchedTime, vterrors.Wrapf(err, "scanning mysqlbinlog output in ReadBinlogFilesTimestamps") + } + if err := mysqlbinlogCmd.Wait(); err != nil { + return firstMatchedTime, lastMatchedTime, vterrors.Wrapf(err, "waiting on mysqlbinlog command in ReadBinlogFilesTimestamps") + } + return firstMatchedTime, lastMatchedTime, nil +} + +// ReadBinlogFilesTimestamps reads all given binlog files via `mysqlbinlog` command and returns the first and last found transaction timestamps +func (mysqld *Mysqld) ReadBinlogFilesTimestamps(ctx context.Context, req *mysqlctlpb.ReadBinlogFilesTimestampsRequest) (*mysqlctlpb.ReadBinlogFilesTimestampsResponse, error) { + if len(req.BinlogFileNames) == 0 { + return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "empty binlog list in ReadBinlogFilesTimestampsRequest") + } + if socketFile != "" { + log.Infof("executing Mysqld.ReadBinlogFilesTimestamps() remotely via mysqlctld server: %v", socketFile) + client, err := mysqlctlclient.New("unix", socketFile) + if err != nil { + return nil, fmt.Errorf("can't dial mysqlctld: %v", err) + } + defer client.Close() + return client.ReadBinlogFilesTimestamps(ctx, req) + } + dir, err := vtenv.VtMysqlRoot() + if err != nil { + return nil, err + } + env, err := buildLdPaths() + if err != nil { + return nil, err + } + mysqlbinlogName, err := binaryPath(dir, "mysqlbinlog") + if err != nil { + return nil, err + } + + lastMatchedTimeMap := map[string]time.Time{} // a simple cache to avoid rescanning same files. Key=binlog file name + + resp := &mysqlctlpb.ReadBinlogFilesTimestampsResponse{} + // Find first timestamp + err = func() error { + for _, binlogFile := range req.BinlogFileNames { + firstMatchedTime, lastMatchedTime, err := mysqld.scanBinlogTimestamp(dir, env, mysqlbinlogName, binlogFile, true) + if err != nil { + return vterrors.Wrapf(err, "while scanning for first binlog timestamp in %v", binlogFile) + } + if !lastMatchedTime.IsZero() { + // cache result + lastMatchedTimeMap[binlogFile] = lastMatchedTime + } + if firstMatchedTime.IsZero() { + // Timestamp not found in this file. + continue + } + resp.FirstTimestamp = protoutil.TimeToProto(firstMatchedTime) + resp.FirstTimestampBinlog = binlogFile + return nil // early break + } + return nil + }() + if err != nil { + return resp, err + } + // Find last timestamp + err = func() error { + for i := len(req.BinlogFileNames) - 1; i >= 0; i-- { + binlogFile := req.BinlogFileNames[i] + + // See if we have a cached value for this file. This is certainly be the situation if there's a single binary log file in req.BinlogFileNames, + // which means the first file and last file are the same, and so we have already parsed the file while searching for the first timestamp. + lastMatchedTime, ok := lastMatchedTimeMap[binlogFile] + if !ok { + var err error + _, lastMatchedTime, err = mysqld.scanBinlogTimestamp(dir, env, mysqlbinlogName, binlogFile, false) + if err != nil { + return vterrors.Wrapf(err, "while scanning for last binlog timestamp in %v", binlogFile) + } + } + if lastMatchedTime.IsZero() { + // Timestamp not found in this file. + continue + } + resp.LastTimestamp = protoutil.TimeToProto(lastMatchedTime) + resp.LastTimestampBinlog = binlogFile + return nil // early break + } + return nil + }() + if err != nil { + return resp, err + } + return resp, nil +} + // noSocketFile panics if socketFile is set. This is to prevent // incorrect use of settings not supported when we're running // remote through mysqlctl. @@ -1295,7 +1468,7 @@ func noSocketFile() { if socketFile != "" { // We log an error for now until we fix the issue with ApplySchema surfacing in MoveTables. // See https://github.com/vitessio/vitess/issues/13203 and https://github.com/vitessio/vitess/pull/13178 - //panic("Running remotely through mysqlctl, socketFile must not be set") + // panic("Running remotely through mysqlctl, socketFile must not be set") log.Warning("Running remotely through mysqlctl and thus socketFile should not be set") } } diff --git a/go/vt/mysqlctl/mysqld_test.go b/go/vt/mysqlctl/mysqld_test.go index 35f038493eb..435090008f2 100644 --- a/go/vt/mysqlctl/mysqld_test.go +++ b/go/vt/mysqlctl/mysqld_test.go @@ -18,6 +18,10 @@ package mysqlctl import ( "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) type testcase struct { @@ -105,3 +109,66 @@ func TestParseVersionString(t *testing.T) { } } + +func TestRegexps(t *testing.T) { + { + submatch := binlogEntryTimestampGTIDRegexp.FindStringSubmatch(`#230608 13:14:31 server id 484362839 end_log_pos 259 CRC32 0xc07510d0 GTID last_committed=0 sequence_number=1 rbr_only=yes`) + require.NotEmpty(t, submatch) + assert.Equal(t, "230608 13:14:31", submatch[1]) + _, err := ParseBinlogTimestamp(submatch[1]) + assert.NoError(t, err) + } + { + submatch := binlogEntryTimestampGTIDRegexp.FindStringSubmatch(`#230608 13:14:31 server id 484362839 end_log_pos 322 CRC32 0x651af842 Query thread_id=62 exec_time=0 error_code=0`) + assert.Empty(t, submatch) + } + + { + submatch := binlogEntryCommittedTimestampRegex.FindStringSubmatch(`#230605 16:06:34 server id 22233 end_log_pos 1037 CRC32 0xa4707c5b GTID last_committed=4 sequence_number=5 rbr_only=no original_committed_timestamp=1685970394031366 immediate_commit_timestamp=1685970394032458 transaction_length=186`) + require.NotEmpty(t, submatch) + assert.Equal(t, "1685970394031366", submatch[1]) + } + { + submatch := binlogEntryCommittedTimestampRegex.FindStringSubmatch(`#230608 13:14:31 server id 484362839 end_log_pos 322 CRC32 0x651af842 Query thread_id=62 exec_time=0 error_code=0`) + assert.Empty(t, submatch) + } + +} + +func TestParseBinlogEntryTimestamp(t *testing.T) { + tcases := []struct { + name string + entry string + tm time.Time + }{ + { + name: "empty", + entry: "", + }, + { + name: "irrelevant", + entry: "/*!80001 SET @@session.original_commit_timestamp=1685970394031366*//*!*/;", + }, + { + name: "irrelevant 2", + entry: "#230605 16:06:34 server id 22233 end_log_pos 1139 CRC32 0x9fa6f3c8 Query thread_id=21 exec_time=0 error_code=0", + }, + { + name: "mysql80", + entry: "#230605 16:06:34 server id 22233 end_log_pos 1037 CRC32 0xa4707c5b GTID last_committed=4 sequence_number=5 rbr_only=no original_committed_timestamp=1685970394031366 immediate_commit_timestamp=1685970394032458 transaction_length=186", + tm: time.UnixMicro(1685970394031366), + }, + { + name: "mysql57", + entry: "#230608 13:14:31 server id 484362839 end_log_pos 259 CRC32 0xc07510d0 GTID last_committed=0 sequence_number=1 rbr_only=yes", + tm: time.Date(2023, time.June, 8, 13, 14, 31, 0, time.UTC), + }, + } + for _, tcase := range tcases { + t.Run(tcase.name, func(t *testing.T) { + tm, err := parseBinlogEntryTimestamp(tcase.entry) + assert.NoError(t, err) + assert.Equal(t, tcase.tm, tm) + }) + } +} diff --git a/go/vt/mysqlctl/query.go b/go/vt/mysqlctl/query.go index 1063d1a20b7..ceed3f58e03 100644 --- a/go/vt/mysqlctl/query.go +++ b/go/vt/mysqlctl/query.go @@ -22,7 +22,7 @@ import ( "strings" "time" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/dbconnpool" "vitess.io/vitess/go/vt/log" @@ -38,7 +38,7 @@ func getPoolReconnect(ctx context.Context, pool *dbconnpool.ConnectionPool) (*db // Run a test query to see if this connection is still good. if _, err := conn.ExecuteFetch("SELECT 1", 1, false); err != nil { // If we get a connection error, try to reconnect. - if sqlErr, ok := err.(*mysql.SQLError); ok && (sqlErr.Number() == mysql.CRServerGone || sqlErr.Number() == mysql.CRServerLost) { + if sqlErr, ok := err.(*sqlerror.SQLError); ok && (sqlErr.Number() == sqlerror.CRServerGone || sqlErr.Number() == sqlerror.CRServerLost) { if err := conn.Reconnect(ctx); err != nil { conn.Recycle() return nil, err diff --git a/go/vt/mysqlctl/reparent.go b/go/vt/mysqlctl/reparent.go index a258b530f9f..b76e342d0cd 100644 --- a/go/vt/mysqlctl/reparent.go +++ b/go/vt/mysqlctl/reparent.go @@ -21,15 +21,14 @@ This file contains the reparenting methods for mysqlctl. */ import ( + "context" "time" - "vitess.io/vitess/go/vt/sidecardb" + "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/log" - - "context" ) // GenerateInitialBinlogEntry is used to create a binlog entry when @@ -37,20 +36,20 @@ import ( // can set it as the starting position for replicas to start MySQL // Replication from. func GenerateInitialBinlogEntry() string { - return sidecardb.GetCreateQuery() + return sidecar.GetCreateQuery() } // PopulateReparentJournal returns the SQL command to use to populate // the reparent_journal table, as well as the time_created_ns // value used. -func PopulateReparentJournal(timeCreatedNS int64, actionName, primaryAlias string, pos mysql.Position) string { - posStr := mysql.EncodePosition(pos) - if len(posStr) > mysql.MaximumPositionSize { - posStr = posStr[:mysql.MaximumPositionSize] +func PopulateReparentJournal(timeCreatedNS int64, actionName, primaryAlias string, pos replication.Position) string { + posStr := replication.EncodePosition(pos) + if len(posStr) > replication.MaximumPositionSize { + posStr = posStr[:replication.MaximumPositionSize] } return sqlparser.BuildParsedQuery("INSERT INTO %s.reparent_journal "+ "(time_created_ns, action_name, primary_alias, replication_position) "+ - "VALUES (%d, '%s', '%s', '%s')", sidecardb.GetIdentifier(), + "VALUES (%d, '%s', '%s', '%s')", sidecar.GetIdentifier(), timeCreatedNS, actionName, primaryAlias, posStr).Query } @@ -58,7 +57,7 @@ func PopulateReparentJournal(timeCreatedNS int64, actionName, primaryAlias strin // for a reparent_journal row. func queryReparentJournal(timeCreatedNS int64) string { return sqlparser.BuildParsedQuery("SELECT action_name, primary_alias, replication_position FROM %s.reparent_journal WHERE time_created_ns=%d", - sidecardb.GetIdentifier(), timeCreatedNS).Query + sidecar.GetIdentifier(), timeCreatedNS).Query } // WaitForReparentJournal will wait until the context is done for @@ -86,11 +85,11 @@ func (mysqld *Mysqld) WaitForReparentJournal(ctx context.Context, timeCreatedNS } // Promote will promote this server to be the new primary. -func (mysqld *Mysqld) Promote(hookExtraEnv map[string]string) (mysql.Position, error) { +func (mysqld *Mysqld) Promote(hookExtraEnv map[string]string) (replication.Position, error) { ctx := context.TODO() conn, err := getPoolReconnect(ctx, mysqld.dbaPool) if err != nil { - return mysql.Position{}, err + return replication.Position{}, err } defer conn.Recycle() @@ -107,7 +106,7 @@ func (mysqld *Mysqld) Promote(hookExtraEnv map[string]string) (mysql.Position, e } if err := mysqld.executeSuperQueryListConn(ctx, conn, cmds); err != nil { - return mysql.Position{}, err + return replication.Position{}, err } return conn.PrimaryPosition() } diff --git a/go/vt/mysqlctl/replication.go b/go/vt/mysqlctl/replication.go index e180cd1e9c9..2b92f5d961d 100644 --- a/go/vt/mysqlctl/replication.go +++ b/go/vt/mysqlctl/replication.go @@ -29,9 +29,7 @@ import ( "strings" "time" - "vitess.io/vitess/go/vt/vtgate/evalengine" - - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/netutil" "vitess.io/vitess/go/vt/hook" "vitess.io/vitess/go/vt/log" @@ -87,7 +85,7 @@ func (mysqld *Mysqld) StartReplication(hookExtraEnv map[string]string) error { } // StartReplicationUntilAfter starts replication until replication has come to `targetPos`, then it stops replication -func (mysqld *Mysqld) StartReplicationUntilAfter(ctx context.Context, targetPos mysql.Position) error { +func (mysqld *Mysqld) StartReplicationUntilAfter(ctx context.Context, targetPos replication.Position) error { conn, err := getPoolReconnect(ctx, mysqld.dbaPool) if err != nil { return err @@ -100,7 +98,7 @@ func (mysqld *Mysqld) StartReplicationUntilAfter(ctx context.Context, targetPos } // StartSQLThreadUntilAfter starts replication's SQL thread(s) until replication has come to `targetPos`, then it stops it -func (mysqld *Mysqld) StartSQLThreadUntilAfter(ctx context.Context, targetPos mysql.Position) error { +func (mysqld *Mysqld) StartSQLThreadUntilAfter(ctx context.Context, targetPos replication.Position) error { conn, err := getPoolReconnect(ctx, mysqld.dbaPool) if err != nil { return err @@ -183,7 +181,7 @@ func (mysqld *Mysqld) GetMysqlPort() (int32, error) { if len(qr.Rows) != 1 { return 0, errors.New("no port variable in mysql") } - utemp, err := evalengine.ToUint64(qr.Rows[0][1]) + utemp, err := qr.Rows[0][1].ToCastUint64() if err != nil { return 0, err } @@ -199,7 +197,7 @@ func (mysqld *Mysqld) GetServerID(ctx context.Context) (uint32, error) { if len(qr.Rows) != 1 { return 0, errors.New("no server_id in mysql") } - utemp, err := evalengine.ToUint64(qr.Rows[0][0]) + utemp, err := qr.Rows[0][0].ToCastUint64() if err != nil { return 0, err } @@ -258,8 +256,7 @@ func (mysqld *Mysqld) SetReadOnly(on bool) error { case true: newState = "ReadOnly" } - log.Infof("SetReadOnly setting connection setting of %s:%d to : %s", - mysqld.dbcfgs.Host, mysqld.dbcfgs.Port, newState) + log.Infof("SetReadOnly setting to : %s", newState) query := "SET GLOBAL read_only = " if on { @@ -319,7 +316,7 @@ func (mysqld *Mysqld) SetSuperReadOnly(on bool) (ResetSuperReadOnlyFunc, error) } // WaitSourcePos lets replicas wait to given replication position -func (mysqld *Mysqld) WaitSourcePos(ctx context.Context, targetPos mysql.Position) error { +func (mysqld *Mysqld) WaitSourcePos(ctx context.Context, targetPos replication.Position) error { // Get a connection. conn, err := getPoolReconnect(ctx, mysqld.dbaPool) if err != nil { @@ -331,7 +328,7 @@ func (mysqld *Mysqld) WaitSourcePos(ctx context.Context, targetPos mysql.Positio // unless that flavor is also filePos. waitCommandName := "WaitUntilPositionCommand" var query string - if targetPos.MatchesFlavor(mysql.FilePosFlavorID) { + if targetPos.MatchesFlavor(replication.FilePosFlavorID) { // If we are the primary, WaitUntilFilePositionCommand will fail. // But position is most likely reached. So, check the position // first. @@ -387,10 +384,10 @@ func (mysqld *Mysqld) WaitSourcePos(ctx context.Context, targetPos mysql.Positio } // ReplicationStatus returns the server replication status -func (mysqld *Mysqld) ReplicationStatus() (mysql.ReplicationStatus, error) { +func (mysqld *Mysqld) ReplicationStatus() (replication.ReplicationStatus, error) { conn, err := getPoolReconnect(context.TODO(), mysqld.dbaPool) if err != nil { - return mysql.ReplicationStatus{}, err + return replication.ReplicationStatus{}, err } defer conn.Recycle() @@ -398,10 +395,10 @@ func (mysqld *Mysqld) ReplicationStatus() (mysql.ReplicationStatus, error) { } // PrimaryStatus returns the primary replication statuses -func (mysqld *Mysqld) PrimaryStatus(ctx context.Context) (mysql.PrimaryStatus, error) { +func (mysqld *Mysqld) PrimaryStatus(ctx context.Context) (replication.PrimaryStatus, error) { conn, err := getPoolReconnect(ctx, mysqld.dbaPool) if err != nil { - return mysql.PrimaryStatus{}, err + return replication.PrimaryStatus{}, err } defer conn.Recycle() @@ -409,10 +406,10 @@ func (mysqld *Mysqld) PrimaryStatus(ctx context.Context) (mysql.PrimaryStatus, e } // GetGTIDPurged returns the gtid purged statuses -func (mysqld *Mysqld) GetGTIDPurged(ctx context.Context) (mysql.Position, error) { +func (mysqld *Mysqld) GetGTIDPurged(ctx context.Context) (replication.Position, error) { conn, err := getPoolReconnect(ctx, mysqld.dbaPool) if err != nil { - return mysql.Position{}, err + return replication.Position{}, err } defer conn.Recycle() @@ -420,10 +417,10 @@ func (mysqld *Mysqld) GetGTIDPurged(ctx context.Context) (mysql.Position, error) } // PrimaryPosition returns the primary replication position. -func (mysqld *Mysqld) PrimaryPosition() (mysql.Position, error) { +func (mysqld *Mysqld) PrimaryPosition() (replication.Position, error) { conn, err := getPoolReconnect(context.TODO(), mysqld.dbaPool) if err != nil { - return mysql.Position{}, err + return replication.Position{}, err } defer conn.Recycle() @@ -432,7 +429,7 @@ func (mysqld *Mysqld) PrimaryPosition() (mysql.Position, error) { // SetReplicationPosition sets the replication position at which the replica will resume // when its replication is started. -func (mysqld *Mysqld) SetReplicationPosition(ctx context.Context, pos mysql.Position) error { +func (mysqld *Mysqld) SetReplicationPosition(ctx context.Context, pos replication.Position) error { conn, err := getPoolReconnect(ctx, mysqld.dbaPool) if err != nil { return err diff --git a/go/vt/mysqlctl/s3backupstorage/s3.go b/go/vt/mysqlctl/s3backupstorage/s3.go index 4d10cd7f080..ef3bfc37b31 100644 --- a/go/vt/mysqlctl/s3backupstorage/s3.go +++ b/go/vt/mysqlctl/s3backupstorage/s3.go @@ -36,6 +36,7 @@ import ( "sort" "strings" "sync" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/client" @@ -48,6 +49,7 @@ import ( "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/log" + stats "vitess.io/vitess/go/vt/mysqlctl/backupstats" "vitess.io/vitess/go/vt/mysqlctl/backupstorage" "vitess.io/vitess/go/vt/servenv" ) @@ -170,7 +172,8 @@ func (bh *S3BackupHandle) AddFile(ctx context.Context, filename string, filesize u.PartSize = partSizeBytes }) object := objName(bh.dir, bh.name, filename) - + sendStats := bh.bs.params.Stats.Scope(stats.Operation("AWS:Request:Send")) + // Using UploadWithContext breaks uploading to Minio and Ceph https://github.com/vitessio/vitess/issues/14188 _, err := uploader.Upload(&s3manager.UploadInput{ Bucket: &bucket, Key: object, @@ -179,7 +182,11 @@ func (bh *S3BackupHandle) AddFile(ctx context.Context, filename string, filesize SSECustomerAlgorithm: bh.bs.s3SSE.customerAlg, SSECustomerKey: bh.bs.s3SSE.customerKey, SSECustomerKeyMD5: bh.bs.s3SSE.customerMd5, - }) + }, s3manager.WithUploaderRequestOptions(func(r *request.Request) { + r.Handlers.CompleteAttempt.PushBack(func(r *request.Request) { + sendStats.TimedIncrement(time.Since(r.AttemptTime)) + }) + })) if err != nil { reader.CloseWithError(err) bh.RecordError(err) @@ -212,12 +219,17 @@ func (bh *S3BackupHandle) ReadFile(ctx context.Context, filename string) (io.Rea return nil, fmt.Errorf("ReadFile cannot be called on read-write backup") } object := objName(bh.dir, bh.name, filename) - out, err := bh.client.GetObject(&s3.GetObjectInput{ + sendStats := bh.bs.params.Stats.Scope(stats.Operation("AWS:Request:Send")) + out, err := bh.client.GetObjectWithContext(ctx, &s3.GetObjectInput{ Bucket: &bucket, Key: object, SSECustomerAlgorithm: bh.bs.s3SSE.customerAlg, SSECustomerKey: bh.bs.s3SSE.customerKey, SSECustomerKeyMD5: bh.bs.s3SSE.customerMd5, + }, func(r *request.Request) { + r.Handlers.CompleteAttempt.PushBack(func(r *request.Request) { + sendStats.TimedIncrement(time.Since(r.AttemptTime)) + }) }) if err != nil { return nil, err @@ -272,6 +284,7 @@ type S3BackupStorage struct { _client *s3.S3 mu sync.Mutex s3SSE S3ServerSideEncryption + params backupstorage.Params } // ListBackups is part of the backupstorage.BackupStorage interface. @@ -411,8 +424,7 @@ func (bs *S3BackupStorage) Close() error { } func (bs *S3BackupStorage) WithParams(params backupstorage.Params) backupstorage.BackupStorage { - // TODO(maxeng): return a new S3BackupStorage that uses params. - return bs + return &S3BackupStorage{params: params} } var _ backupstorage.BackupStorage = (*S3BackupStorage)(nil) @@ -485,7 +497,7 @@ func objName(parts ...string) *string { } func init() { - backupstorage.BackupStorageMap["s3"] = &S3BackupStorage{} + backupstorage.BackupStorageMap["s3"] = &S3BackupStorage{params: backupstorage.NoParams()} logNameMap = logNameToLogLevel{ "LogOff": aws.LogOff, diff --git a/go/vt/mysqlctl/s3backupstorage/s3_test.go b/go/vt/mysqlctl/s3backupstorage/s3_test.go index 5303d88e5e5..a10432b78c2 100644 --- a/go/vt/mysqlctl/s3backupstorage/s3_test.go +++ b/go/vt/mysqlctl/s3backupstorage/s3_test.go @@ -5,31 +5,135 @@ import ( "crypto/rand" "encoding/base64" "errors" + "fmt" "net/http" + "net/url" "os" "testing" + "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3iface" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/logutil" + stats "vitess.io/vitess/go/vt/mysqlctl/backupstats" + "vitess.io/vitess/go/vt/mysqlctl/backupstorage" ) -type s3ErrorClient struct{ s3iface.S3API } +type s3FakeClient struct { + s3iface.S3API + err error + delay time.Duration +} -func (s3errclient *s3ErrorClient) PutObjectRequest(in *s3.PutObjectInput) (*request.Request, *s3.PutObjectOutput) { +func (sfc *s3FakeClient) PutObjectRequest(in *s3.PutObjectInput) (*request.Request, *s3.PutObjectOutput) { + u, _ := url.Parse("http://localhost:1234") req := request.Request{ - HTTPRequest: &http.Request{}, // without this we segfault \_(ツ)_/¯ (see https://github.com/aws/aws-sdk-go/blob/v1.28.8/aws/request/request_context.go#L13) - Error: errors.New("some error"), // this forces req.Send() (which is called by the uploader) to always return non-nil error + HTTPRequest: &http.Request{ // without this we segfault \_(ツ)_/¯ (see https://github.com/aws/aws-sdk-go/blob/v1.28.8/aws/request/request_context.go#L13) + Header: make(http.Header), + URL: u, + }, + Retryer: client.DefaultRetryer{}, } + req.Handlers.Send.PushBack(func(r *request.Request) { + r.Error = sfc.err + if sfc.delay > 0 { + time.Sleep(sfc.delay) + } + }) + return &req, &s3.PutObjectOutput{} } func TestAddFileError(t *testing.T) { - bh := &S3BackupHandle{client: &s3ErrorClient{}, bs: &S3BackupStorage{}, readOnly: false} + bh := &S3BackupHandle{ + client: &s3FakeClient{err: errors.New("some error")}, + bs: &S3BackupStorage{ + params: backupstorage.NoParams(), + }, + readOnly: false, + } + + wc, err := bh.AddFile(aws.BackgroundContext(), "somefile", 100000) + require.NoErrorf(t, err, "AddFile() expected no error, got %s", err) + assert.NotNil(t, wc, "AddFile() expected non-nil WriteCloser") + + n, err := wc.Write([]byte("here are some bytes")) + require.NoErrorf(t, err, "TestAddFile() could not write to uploader, got %d bytes written, err %s", n, err) + + err = wc.Close() + require.NoErrorf(t, err, "TestAddFile() could not close writer, got %s", err) + + bh.waitGroup.Wait() // wait for the goroutine to finish, at which point it should have recorded an error + + require.True(t, bh.HasErrors(), "AddFile() expected bh to record async error but did not") +} + +func TestAddFileStats(t *testing.T) { + fakeStats := stats.NewFakeStats() + + delay := 10 * time.Millisecond + + bh := &S3BackupHandle{ + client: &s3FakeClient{delay: delay}, + bs: &S3BackupStorage{ + params: backupstorage.Params{ + Logger: logutil.NewMemoryLogger(), + Stats: fakeStats, + }, + }, + readOnly: false, + } + + for i := 0; i < 4; i++ { + wc, err := bh.AddFile(aws.BackgroundContext(), fmt.Sprintf("somefile-%d", i), 100000) + require.NoErrorf(t, err, "AddFile() expected no error, got %s", err) + assert.NotNil(t, wc, "AddFile() expected non-nil WriteCloser") + + n, err := wc.Write([]byte("here are some bytes")) + require.NoErrorf(t, err, "TestAddFile() could not write to uploader, got %d bytes written, err %s", n, err) + + err = wc.Close() + require.NoErrorf(t, err, "TestAddFile() could not close writer, got %s", err) + } + + bh.waitGroup.Wait() // wait for the goroutine to finish, at which point it should have recorded an error + + require.Equal(t, bh.HasErrors(), false, "AddFile() expected bh not to record async errors but did") + + require.Len(t, fakeStats.ScopeCalls, 4) + scopedStats := fakeStats.ScopeReturns[0] + require.Len(t, scopedStats.ScopeV, 1) + require.Equal(t, scopedStats.ScopeV[stats.ScopeOperation], "AWS:Request:Send") + require.Len(t, scopedStats.TimedIncrementCalls, 1) + require.GreaterOrEqual(t, scopedStats.TimedIncrementCalls[0], delay) + require.Len(t, scopedStats.TimedIncrementBytesCalls, 0) +} + +func TestAddFileErrorStats(t *testing.T) { + fakeStats := stats.NewFakeStats() + + delay := 10 * time.Millisecond + + bh := &S3BackupHandle{ + client: &s3FakeClient{ + delay: delay, + err: errors.New("some error"), + }, + bs: &S3BackupStorage{ + params: backupstorage.Params{ + Logger: logutil.NewMemoryLogger(), + Stats: fakeStats, + }, + }, + readOnly: false, + } wc, err := bh.AddFile(aws.BackgroundContext(), "somefile", 100000) require.NoErrorf(t, err, "AddFile() expected no error, got %s", err) @@ -43,7 +147,15 @@ func TestAddFileError(t *testing.T) { bh.waitGroup.Wait() // wait for the goroutine to finish, at which point it should have recorded an error - require.Equal(t, bh.HasErrors(), true, "AddFile() expected bh to record async error but did not") + require.True(t, bh.HasErrors(), "AddFile() expected bh not to record async errors but did") + + require.Len(t, fakeStats.ScopeCalls, 1) + scopedStats := fakeStats.ScopeReturns[0] + require.Len(t, scopedStats.ScopeV, 1) + require.Equal(t, scopedStats.ScopeV[stats.ScopeOperation], "AWS:Request:Send") + require.Len(t, scopedStats.TimedIncrementCalls, 1) + require.GreaterOrEqual(t, scopedStats.TimedIncrementCalls[0], delay) + require.Len(t, scopedStats.TimedIncrementBytesCalls, 0) } func TestNoSSE(t *testing.T) { diff --git a/go/vt/mysqlctl/schema.go b/go/vt/mysqlctl/schema.go index b29ed9c49f2..397668145ef 100644 --- a/go/vt/mysqlctl/schema.go +++ b/go/vt/mysqlctl/schema.go @@ -22,21 +22,28 @@ import ( "regexp" "sort" "strings" - "sync" - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/concurrency" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/evalengine" + "golang.org/x/sync/errgroup" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqlescape" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/mysqlctl/tmutils" - querypb "vitess.io/vitess/go/vt/proto/query" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +const ( + // In a local environment and without latency, we have seen that an unbounded concurrency still translates to less than + // 20 concurrent MySQL connections. Which is why placing a limit of 20 concurrent goroutines (each mapped to a MySQL connection) + // is unlikely to affect optimal environments. + // In high latency environments, unbounded concurrency can translate to a very high number of concurrent MySQL connections. This + // is an undesirable behavior. We prefer to push back on GetSchema and make it run over longer time, instead. + getSchemaConcurrency = 20 ) var autoIncr = regexp.MustCompile(` AUTO_INCREMENT=\d+`) @@ -49,15 +56,13 @@ func (e EmptyColumnsErr) Error() string { return fmt.Sprintf("unable to get columns for table %s.%s using query %s", e.dbName, e.tableName, e.query) } -// executeSchemaCommands executes some SQL commands, using the mysql -// command line tool. It uses the dba connection parameters, with credentials. -func (mysqld *Mysqld) executeSchemaCommands(sql string) error { +// executeSchemaCommands executes some SQL commands. It uses the dba connection parameters, with credentials. +func (mysqld *Mysqld) executeSchemaCommands(ctx context.Context, sql string) error { params, err := mysqld.dbcfgs.DbaConnector().MysqlParams() if err != nil { return err } - - return mysqld.executeMysqlScript(params, strings.NewReader(sql)) + return mysqld.executeMysqlScript(ctx, params, sql) } func encodeEntityName(name string) string { @@ -104,60 +109,59 @@ func (mysqld *Mysqld) GetSchema(ctx context.Context, dbName string, request *tab ctx, cancel := context.WithCancel(ctx) defer cancel() - var wg sync.WaitGroup allErrors := &concurrency.AllErrorRecorder{} + eg, ctx := errgroup.WithContext(ctx) + eg.SetLimit(getSchemaConcurrency) + // Get per-table schema concurrently. tableNames := make([]string, 0, len(tds)) for _, td := range tds { tableNames = append(tableNames, td.Name) + td := td - wg.Add(1) - go func(td *tabletmanagerdatapb.TableDefinition) { - defer wg.Done() - + eg.Go(func() error { fields, columns, schema, err := mysqld.collectSchema(ctx, dbName, td.Name, td.Type, request.TableSchemaOnly) if err != nil { // There's a possible race condition: it could happen that a table was dropped in between reading // the list of tables (collectBasicTableData(), earlier) and the point above where we investigate // the table. // This is fine. We identify the situation and keep the table without any fields/columns/key information - sqlErr, isSQLErr := mysql.NewSQLErrorFromError(err).(*mysql.SQLError) - if isSQLErr && sqlErr != nil && sqlErr.Number() == mysql.ERNoSuchTable { - return + sqlErr, isSQLErr := sqlerror.NewSQLErrorFromError(err).(*sqlerror.SQLError) + if isSQLErr && sqlErr != nil && sqlErr.Number() == sqlerror.ERNoSuchTable { + return nil } allErrors.RecordError(err) cancel() - return + return err } td.Fields = fields td.Columns = columns td.Schema = schema - }(td) + return nil + }) } + colMap := map[string][]string{} // Get primary columns concurrently. // The below runs a single query on `INFORMATION_SCHEMA` and does not interact with the actual tables. // It is therefore safe to run even if some tables are dropped in the interim. - colMap := map[string][]string{} - if len(tableNames) > 0 { - wg.Add(1) - go func() { - defer wg.Done() - + if len(tableNames) > 0 && !request.TableSchemaOnly { + eg.Go(func() error { var err error colMap, err = mysqld.getPrimaryKeyColumns(ctx, dbName, tableNames...) if err != nil { allErrors.RecordError(err) cancel() - return + return err } - }() + return nil + }) } - wg.Wait() + eg.Wait() if err := allErrors.AggrError(vterrors.Aggregate); err != nil { return nil, err } @@ -202,7 +206,7 @@ func (mysqld *Mysqld) collectBasicTableData(ctx context.Context, dbName string, var dataLength uint64 if !row[2].IsNull() { // dataLength is NULL for views, then we use 0 - dataLength, err = evalengine.ToUint64(row[2]) + dataLength, err = row[2].ToCastUint64() if err != nil { return nil, err } @@ -211,7 +215,7 @@ func (mysqld *Mysqld) collectBasicTableData(ctx context.Context, dbName string, // get row count var rowCount uint64 if !row[3].IsNull() { - rowCount, err = evalengine.ToUint64(row[3]) + rowCount, err = row[3].ToCastUint64() if err != nil { return nil, err } @@ -439,7 +443,7 @@ func (mysqld *Mysqld) PreflightSchemaChange(ctx context.Context, dbName string, initialCopySQL += s + ";\n" } } - if err = mysqld.executeSchemaCommands(initialCopySQL); err != nil { + if err = mysqld.executeSchemaCommands(ctx, initialCopySQL); err != nil { return nil, err } @@ -455,7 +459,7 @@ func (mysqld *Mysqld) PreflightSchemaChange(ctx context.Context, dbName string, sql := "SET sql_log_bin = 0;\n" sql += "USE _vt_preflight;\n" sql += change - if err = mysqld.executeSchemaCommands(sql); err != nil { + if err = mysqld.executeSchemaCommands(ctx, sql); err != nil { return nil, err } @@ -471,7 +475,7 @@ func (mysqld *Mysqld) PreflightSchemaChange(ctx context.Context, dbName string, // and clean up the extra database dropSQL := "SET sql_log_bin = 0;\n" dropSQL += "DROP DATABASE _vt_preflight;\n" - if err = mysqld.executeSchemaCommands(dropSQL); err != nil { + if err = mysqld.executeSchemaCommands(ctx, dropSQL); err != nil { return nil, err } @@ -531,7 +535,7 @@ func (mysqld *Mysqld) ApplySchemaChange(ctx context.Context, dbName string, chan // execute the schema change using an external mysql process // (to benefit from the extra commands in mysql cli) - if err = mysqld.executeSchemaCommands(sql); err != nil { + if err = mysqld.executeSchemaCommands(ctx, sql); err != nil { return nil, err } diff --git a/go/vt/mysqlctl/tmutils/schema.go b/go/vt/mysqlctl/tmutils/schema.go index 694b02abf5e..aae529f89b0 100644 --- a/go/vt/mysqlctl/tmutils/schema.go +++ b/go/vt/mysqlctl/tmutils/schema.go @@ -177,7 +177,7 @@ func (f *TableFilter) Includes(tableName string, tableType string) bool { // (tables), no denied tables (excludeTables) and optionally // views (includeViews). func FilterTables(sd *tabletmanagerdatapb.SchemaDefinition, tables, excludeTables []string, includeViews bool) (*tabletmanagerdatapb.SchemaDefinition, error) { - copy := proto.Clone(sd).(*tabletmanagerdatapb.SchemaDefinition) + copy := sd.CloneVT() copy.TableDefinitions = make([]*tabletmanagerdatapb.TableDefinition, 0, len(sd.TableDefinitions)) f, err := NewTableFilter(tables, excludeTables, includeViews) diff --git a/go/vt/mysqlctl/tmutils/schema_test.go b/go/vt/mysqlctl/tmutils/schema_test.go index 2b9ff3472b2..0f3d9572107 100644 --- a/go/vt/mysqlctl/tmutils/schema_test.go +++ b/go/vt/mysqlctl/tmutils/schema_test.go @@ -234,7 +234,7 @@ func TestSchemaDiff(t *testing.T) { }) testDiff(t, sd4, sd5, "sd4", "sd5", []string{ - fmt.Sprintf("schemas differ on table type for table table2:\nsd4: VIEW\n differs from:\nsd5: BASE TABLE"), //nolint + "schemas differ on table type for table table2:\nsd4: VIEW\n differs from:\nsd5: BASE TABLE", }) sd1.DatabaseSchema = "CREATE DATABASE {{.DatabaseName}}" diff --git a/go/vt/mysqlctl/xtrabackupengine.go b/go/vt/mysqlctl/xtrabackupengine.go index 6b3d04c77ca..87c0a657022 100644 --- a/go/vt/mysqlctl/xtrabackupengine.go +++ b/go/vt/mysqlctl/xtrabackupengine.go @@ -32,7 +32,8 @@ import ( "github.com/spf13/pflag" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/ioutil" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl/backupstorage" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" @@ -71,9 +72,6 @@ const ( xtrabackupBinaryName = "xtrabackup" xtrabackupEngineName = "xtrabackup" xbstream = "xbstream" - - // closeTimeout is the timeout for closing backup files after writing. - closeTimeout = 10 * time.Minute ) // xtraBackupManifest represents a backup. @@ -180,7 +178,6 @@ func (be *XtrabackupEngine) ExecuteBackup(ctx context.Context, params BackupPara // executeFullBackup returns a boolean that indicates if the backup is usable, // and an overall error. func (be *XtrabackupEngine) executeFullBackup(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle) (complete bool, finalErr error) { - if params.IncrementalFromPos != "" { return false, vterrors.New(vtrpc.Code_INVALID_ARGUMENT, "incremental backups not supported in xtrabackup engine.") } @@ -212,6 +209,11 @@ func (be *XtrabackupEngine) executeFullBackup(ctx context.Context, params Backup return false, vterrors.Wrap(err, "can't get server uuid") } + mysqlVersion, err := params.Mysqld.GetVersionString(ctx) + if err != nil { + return false, vterrors.Wrap(err, "can't get MySQL version") + } + flavor := pos.GTIDSet.Flavor() params.Logger.Infof("Detected MySQL flavor: %v", flavor) @@ -249,8 +251,12 @@ func (be *XtrabackupEngine) executeFullBackup(ctx context.Context, params Backup TabletAlias: params.TabletAlias, Keyspace: params.Keyspace, Shard: params.Shard, - BackupTime: params.BackupTime.UTC().Format(time.RFC3339), - FinishedTime: time.Now().UTC().Format(time.RFC3339), + BackupTime: FormatRFC3339(params.BackupTime.UTC()), + FinishedTime: FormatRFC3339(time.Now().UTC()), + MySQLVersion: mysqlVersion, + // xtrabackup backups are always created such that they + // are safe to use for upgrades later on. + UpgradeSafe: true, }, // XtraBackup-specific fields @@ -284,8 +290,7 @@ func (be *XtrabackupEngine) backupFiles( backupFileName string, numStripes int, flavor string, -) (replicationPosition mysql.Position, finalErr error) { - +) (replicationPosition replication.Position, finalErr error) { backupProgram := path.Join(xtrabackupEnginePath, xtrabackupBinaryName) flagsToExec := []string{"--defaults-file=" + params.Cnf.Path, "--backup", @@ -356,7 +361,7 @@ func (be *XtrabackupEngine) backupFiles( destWriters := []io.Writer{} destBuffers := []*bufio.Writer{} - destCompressors := []io.WriteCloser{} + destCompressors := []io.Closer{} for _, file := range destFiles { buffer := bufio.NewWriterSize(file, writerBufferSize) destBuffers = append(destBuffers, buffer) @@ -376,7 +381,7 @@ func (be *XtrabackupEngine) backupFiles( } writer = compressor - destCompressors = append(destCompressors, compressor) + destCompressors = append(destCompressors, ioutil.NewTimeoutCloser(ctx, compressor, closeTimeout)) } destWriters = append(destWriters, writer) @@ -624,7 +629,7 @@ func (be *XtrabackupEngine) extractFiles(ctx context.Context, logger logutil.Log }() srcReaders := []io.Reader{} - srcDecompressors := []io.ReadCloser{} + srcDecompressors := []io.Closer{} for _, file := range srcFiles { reader := io.Reader(file) @@ -657,7 +662,7 @@ func (be *XtrabackupEngine) extractFiles(ctx context.Context, logger logutil.Log if err != nil { return vterrors.Wrap(err, "can't create decompressor") } - srcDecompressors = append(srcDecompressors, decompressor) + srcDecompressors = append(srcDecompressors, ioutil.NewTimeoutCloser(ctx, decompressor, closeTimeout)) reader = decompressor } @@ -746,10 +751,10 @@ func (be *XtrabackupEngine) extractFiles(ctx context.Context, logger logutil.Log var xtrabackupReplicationPositionRegexp = regexp.MustCompile(`GTID of the last change '([^']*)'`) -func findReplicationPosition(input, flavor string, logger logutil.Logger) (mysql.Position, error) { +func findReplicationPosition(input, flavor string, logger logutil.Logger) (replication.Position, error) { match := xtrabackupReplicationPositionRegexp.FindStringSubmatch(input) if match == nil || len(match) != 2 { - return mysql.Position{}, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "couldn't find replication position in xtrabackup stderr output") + return replication.Position{}, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "couldn't find replication position in xtrabackup stderr output") } position := match[1] // Remove all spaces, tabs, and newlines. @@ -758,13 +763,13 @@ func findReplicationPosition(input, flavor string, logger logutil.Logger) (mysql position = strings.Replace(position, "\n", "", -1) logger.Infof("Found position: %v", position) if position == "" { - return mysql.Position{}, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "empty replication position from xtrabackup") + return replication.Position{}, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "empty replication position from xtrabackup") } // flavor is required to parse a string into a mysql.Position - replicationPosition, err := mysql.ParsePosition(flavor, position) + replicationPosition, err := replication.ParsePosition(flavor, position) if err != nil { - return mysql.Position{}, vterrors.Wrapf(err, "can't parse replication position from xtrabackup: %v", position) + return replication.Position{}, vterrors.Wrapf(err, "can't parse replication position from xtrabackup: %v", position) } return replicationPosition, nil } diff --git a/go/vt/proto/binlogdata/binlogdata.pb.go b/go/vt/proto/binlogdata/binlogdata.pb.go index 6136d7b1ae8..3da747d3832 100644 --- a/go/vt/proto/binlogdata/binlogdata.pb.go +++ b/go/vt/proto/binlogdata/binlogdata.pb.go @@ -19,8 +19,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.6.1 +// protoc-gen-go v1.30.0 +// protoc v3.21.3 // source: binlogdata.proto package binlogdata @@ -158,8 +158,9 @@ func (VReplicationWorkflowType) EnumDescriptor() ([]byte, []int) { type VReplicationWorkflowSubType int32 const ( - VReplicationWorkflowSubType_None VReplicationWorkflowSubType = 0 - VReplicationWorkflowSubType_Partial VReplicationWorkflowSubType = 1 + VReplicationWorkflowSubType_None VReplicationWorkflowSubType = 0 + VReplicationWorkflowSubType_Partial VReplicationWorkflowSubType = 1 + VReplicationWorkflowSubType_AtomicCopy VReplicationWorkflowSubType = 2 ) // Enum value maps for VReplicationWorkflowSubType. @@ -167,10 +168,12 @@ var ( VReplicationWorkflowSubType_name = map[int32]string{ 0: "None", 1: "Partial", + 2: "AtomicCopy", } VReplicationWorkflowSubType_value = map[string]int32{ - "None": 0, - "Partial": 1, + "None": 0, + "Partial": 1, + "AtomicCopy": 2, } ) @@ -201,6 +204,68 @@ func (VReplicationWorkflowSubType) EnumDescriptor() ([]byte, []int) { return file_binlogdata_proto_rawDescGZIP(), []int{2} } +// VReplicationWorklfowState defines the valid states that a workflow can be in. +type VReplicationWorkflowState int32 + +const ( + VReplicationWorkflowState_Unknown VReplicationWorkflowState = 0 + VReplicationWorkflowState_Init VReplicationWorkflowState = 1 + VReplicationWorkflowState_Stopped VReplicationWorkflowState = 2 + VReplicationWorkflowState_Copying VReplicationWorkflowState = 3 + VReplicationWorkflowState_Running VReplicationWorkflowState = 4 + VReplicationWorkflowState_Error VReplicationWorkflowState = 5 + VReplicationWorkflowState_Lagging VReplicationWorkflowState = 6 +) + +// Enum value maps for VReplicationWorkflowState. +var ( + VReplicationWorkflowState_name = map[int32]string{ + 0: "Unknown", + 1: "Init", + 2: "Stopped", + 3: "Copying", + 4: "Running", + 5: "Error", + 6: "Lagging", + } + VReplicationWorkflowState_value = map[string]int32{ + "Unknown": 0, + "Init": 1, + "Stopped": 2, + "Copying": 3, + "Running": 4, + "Error": 5, + "Lagging": 6, + } +) + +func (x VReplicationWorkflowState) Enum() *VReplicationWorkflowState { + p := new(VReplicationWorkflowState) + *p = x + return p +} + +func (x VReplicationWorkflowState) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (VReplicationWorkflowState) Descriptor() protoreflect.EnumDescriptor { + return file_binlogdata_proto_enumTypes[3].Descriptor() +} + +func (VReplicationWorkflowState) Type() protoreflect.EnumType { + return &file_binlogdata_proto_enumTypes[3] +} + +func (x VReplicationWorkflowState) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use VReplicationWorkflowState.Descriptor instead. +func (VReplicationWorkflowState) EnumDescriptor() ([]byte, []int) { + return file_binlogdata_proto_rawDescGZIP(), []int{3} +} + // VEventType enumerates the event types. Many of these types // will not be encountered in RBR mode. type VEventType int32 @@ -301,11 +366,11 @@ func (x VEventType) String() string { } func (VEventType) Descriptor() protoreflect.EnumDescriptor { - return file_binlogdata_proto_enumTypes[3].Descriptor() + return file_binlogdata_proto_enumTypes[4].Descriptor() } func (VEventType) Type() protoreflect.EnumType { - return &file_binlogdata_proto_enumTypes[3] + return &file_binlogdata_proto_enumTypes[4] } func (x VEventType) Number() protoreflect.EnumNumber { @@ -314,7 +379,7 @@ func (x VEventType) Number() protoreflect.EnumNumber { // Deprecated: Use VEventType.Descriptor instead. func (VEventType) EnumDescriptor() ([]byte, []int) { - return file_binlogdata_proto_rawDescGZIP(), []int{3} + return file_binlogdata_proto_rawDescGZIP(), []int{4} } // MigrationType specifies the type of migration for the Journal. @@ -348,11 +413,11 @@ func (x MigrationType) String() string { } func (MigrationType) Descriptor() protoreflect.EnumDescriptor { - return file_binlogdata_proto_enumTypes[4].Descriptor() + return file_binlogdata_proto_enumTypes[5].Descriptor() } func (MigrationType) Type() protoreflect.EnumType { - return &file_binlogdata_proto_enumTypes[4] + return &file_binlogdata_proto_enumTypes[5] } func (x MigrationType) Number() protoreflect.EnumNumber { @@ -361,7 +426,7 @@ func (x MigrationType) Number() protoreflect.EnumNumber { // Deprecated: Use MigrationType.Descriptor instead. func (MigrationType) EnumDescriptor() ([]byte, []int) { - return file_binlogdata_proto_rawDescGZIP(), []int{4} + return file_binlogdata_proto_rawDescGZIP(), []int{5} } type BinlogTransaction_Statement_Category int32 @@ -419,11 +484,11 @@ func (x BinlogTransaction_Statement_Category) String() string { } func (BinlogTransaction_Statement_Category) Descriptor() protoreflect.EnumDescriptor { - return file_binlogdata_proto_enumTypes[5].Descriptor() + return file_binlogdata_proto_enumTypes[6].Descriptor() } func (BinlogTransaction_Statement_Category) Type() protoreflect.EnumType { - return &file_binlogdata_proto_enumTypes[5] + return &file_binlogdata_proto_enumTypes[6] } func (x BinlogTransaction_Statement_Category) Number() protoreflect.EnumNumber { @@ -465,11 +530,11 @@ func (x Filter_FieldEventMode) String() string { } func (Filter_FieldEventMode) Descriptor() protoreflect.EnumDescriptor { - return file_binlogdata_proto_enumTypes[6].Descriptor() + return file_binlogdata_proto_enumTypes[7].Descriptor() } func (Filter_FieldEventMode) Type() protoreflect.EnumType { - return &file_binlogdata_proto_enumTypes[6] + return &file_binlogdata_proto_enumTypes[7] } func (x Filter_FieldEventMode) Number() protoreflect.EnumNumber { @@ -1346,6 +1411,7 @@ type RowEvent struct { RowChanges []*RowChange `protobuf:"bytes,2,rep,name=row_changes,json=rowChanges,proto3" json:"row_changes,omitempty"` Keyspace string `protobuf:"bytes,3,opt,name=keyspace,proto3" json:"keyspace,omitempty"` Shard string `protobuf:"bytes,4,opt,name=shard,proto3" json:"shard,omitempty"` + Flags uint32 `protobuf:"varint,5,opt,name=flags,proto3" json:"flags,omitempty"` // https://dev.mysql.com/doc/dev/mysql-server/latest/classbinary__log_1_1Rows__event.html } func (x *RowEvent) Reset() { @@ -1408,6 +1474,13 @@ func (x *RowEvent) GetShard() string { return "" } +func (x *RowEvent) GetFlags() uint32 { + if x != nil { + return x.Flags + } + return 0 +} + // FieldEvent represents the field info for a table. type FieldEvent struct { state protoimpl.MessageState @@ -2372,6 +2445,158 @@ func (x *VStreamRowsResponse) GetHeartbeat() bool { return false } +// VStreamTablesRequest is the payload for VStreamTables +type VStreamTablesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId,proto3" json:"effective_caller_id,omitempty"` + ImmediateCallerId *query.VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId,proto3" json:"immediate_caller_id,omitempty"` + Target *query.Target `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` +} + +func (x *VStreamTablesRequest) Reset() { + *x = VStreamTablesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_binlogdata_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VStreamTablesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VStreamTablesRequest) ProtoMessage() {} + +func (x *VStreamTablesRequest) ProtoReflect() protoreflect.Message { + mi := &file_binlogdata_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VStreamTablesRequest.ProtoReflect.Descriptor instead. +func (*VStreamTablesRequest) Descriptor() ([]byte, []int) { + return file_binlogdata_proto_rawDescGZIP(), []int{24} +} + +func (x *VStreamTablesRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if x != nil { + return x.EffectiveCallerId + } + return nil +} + +func (x *VStreamTablesRequest) GetImmediateCallerId() *query.VTGateCallerID { + if x != nil { + return x.ImmediateCallerId + } + return nil +} + +func (x *VStreamTablesRequest) GetTarget() *query.Target { + if x != nil { + return x.Target + } + return nil +} + +// VStreamTablesResponse is the response from VStreamTables +type VStreamTablesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName,proto3" json:"table_name,omitempty"` + Fields []*query.Field `protobuf:"bytes,2,rep,name=fields,proto3" json:"fields,omitempty"` + Pkfields []*query.Field `protobuf:"bytes,3,rep,name=pkfields,proto3" json:"pkfields,omitempty"` + Gtid string `protobuf:"bytes,4,opt,name=gtid,proto3" json:"gtid,omitempty"` + Rows []*query.Row `protobuf:"bytes,5,rep,name=rows,proto3" json:"rows,omitempty"` + Lastpk *query.Row `protobuf:"bytes,6,opt,name=lastpk,proto3" json:"lastpk,omitempty"` +} + +func (x *VStreamTablesResponse) Reset() { + *x = VStreamTablesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_binlogdata_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VStreamTablesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VStreamTablesResponse) ProtoMessage() {} + +func (x *VStreamTablesResponse) ProtoReflect() protoreflect.Message { + mi := &file_binlogdata_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VStreamTablesResponse.ProtoReflect.Descriptor instead. +func (*VStreamTablesResponse) Descriptor() ([]byte, []int) { + return file_binlogdata_proto_rawDescGZIP(), []int{25} +} + +func (x *VStreamTablesResponse) GetTableName() string { + if x != nil { + return x.TableName + } + return "" +} + +func (x *VStreamTablesResponse) GetFields() []*query.Field { + if x != nil { + return x.Fields + } + return nil +} + +func (x *VStreamTablesResponse) GetPkfields() []*query.Field { + if x != nil { + return x.Pkfields + } + return nil +} + +func (x *VStreamTablesResponse) GetGtid() string { + if x != nil { + return x.Gtid + } + return "" +} + +func (x *VStreamTablesResponse) GetRows() []*query.Row { + if x != nil { + return x.Rows + } + return nil +} + +func (x *VStreamTablesResponse) GetLastpk() *query.Row { + if x != nil { + return x.Lastpk + } + return nil +} + type LastPKEvent struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2384,7 +2609,7 @@ type LastPKEvent struct { func (x *LastPKEvent) Reset() { *x = LastPKEvent{} if protoimpl.UnsafeEnabled { - mi := &file_binlogdata_proto_msgTypes[24] + mi := &file_binlogdata_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2397,7 +2622,7 @@ func (x *LastPKEvent) String() string { func (*LastPKEvent) ProtoMessage() {} func (x *LastPKEvent) ProtoReflect() protoreflect.Message { - mi := &file_binlogdata_proto_msgTypes[24] + mi := &file_binlogdata_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2410,7 +2635,7 @@ func (x *LastPKEvent) ProtoReflect() protoreflect.Message { // Deprecated: Use LastPKEvent.ProtoReflect.Descriptor instead. func (*LastPKEvent) Descriptor() ([]byte, []int) { - return file_binlogdata_proto_rawDescGZIP(), []int{24} + return file_binlogdata_proto_rawDescGZIP(), []int{26} } func (x *LastPKEvent) GetTableLastPK() *TableLastPK { @@ -2439,7 +2664,7 @@ type TableLastPK struct { func (x *TableLastPK) Reset() { *x = TableLastPK{} if protoimpl.UnsafeEnabled { - mi := &file_binlogdata_proto_msgTypes[25] + mi := &file_binlogdata_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2452,7 +2677,7 @@ func (x *TableLastPK) String() string { func (*TableLastPK) ProtoMessage() {} func (x *TableLastPK) ProtoReflect() protoreflect.Message { - mi := &file_binlogdata_proto_msgTypes[25] + mi := &file_binlogdata_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2465,7 +2690,7 @@ func (x *TableLastPK) ProtoReflect() protoreflect.Message { // Deprecated: Use TableLastPK.ProtoReflect.Descriptor instead. func (*TableLastPK) Descriptor() ([]byte, []int) { - return file_binlogdata_proto_rawDescGZIP(), []int{25} + return file_binlogdata_proto_rawDescGZIP(), []int{27} } func (x *TableLastPK) GetTableName() string { @@ -2499,7 +2724,7 @@ type VStreamResultsRequest struct { func (x *VStreamResultsRequest) Reset() { *x = VStreamResultsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_binlogdata_proto_msgTypes[26] + mi := &file_binlogdata_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2512,7 +2737,7 @@ func (x *VStreamResultsRequest) String() string { func (*VStreamResultsRequest) ProtoMessage() {} func (x *VStreamResultsRequest) ProtoReflect() protoreflect.Message { - mi := &file_binlogdata_proto_msgTypes[26] + mi := &file_binlogdata_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2525,7 +2750,7 @@ func (x *VStreamResultsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VStreamResultsRequest.ProtoReflect.Descriptor instead. func (*VStreamResultsRequest) Descriptor() ([]byte, []int) { - return file_binlogdata_proto_rawDescGZIP(), []int{26} + return file_binlogdata_proto_rawDescGZIP(), []int{28} } func (x *VStreamResultsRequest) GetEffectiveCallerId() *vtrpc.CallerID { @@ -2571,7 +2796,7 @@ type VStreamResultsResponse struct { func (x *VStreamResultsResponse) Reset() { *x = VStreamResultsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_binlogdata_proto_msgTypes[27] + mi := &file_binlogdata_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2584,7 +2809,7 @@ func (x *VStreamResultsResponse) String() string { func (*VStreamResultsResponse) ProtoMessage() {} func (x *VStreamResultsResponse) ProtoReflect() protoreflect.Message { - mi := &file_binlogdata_proto_msgTypes[27] + mi := &file_binlogdata_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2597,7 +2822,7 @@ func (x *VStreamResultsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VStreamResultsResponse.ProtoReflect.Descriptor instead. func (*VStreamResultsResponse) Descriptor() ([]byte, []int) { - return file_binlogdata_proto_rawDescGZIP(), []int{27} + return file_binlogdata_proto_rawDescGZIP(), []int{29} } func (x *VStreamResultsResponse) GetFields() []*query.Field { @@ -2637,7 +2862,7 @@ type BinlogTransaction_Statement struct { func (x *BinlogTransaction_Statement) Reset() { *x = BinlogTransaction_Statement{} if protoimpl.UnsafeEnabled { - mi := &file_binlogdata_proto_msgTypes[28] + mi := &file_binlogdata_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2650,7 +2875,7 @@ func (x *BinlogTransaction_Statement) String() string { func (*BinlogTransaction_Statement) ProtoMessage() {} func (x *BinlogTransaction_Statement) ProtoReflect() protoreflect.Message { - mi := &file_binlogdata_proto_msgTypes[28] + mi := &file_binlogdata_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2699,7 +2924,7 @@ type RowChange_Bitmap struct { func (x *RowChange_Bitmap) Reset() { *x = RowChange_Bitmap{} if protoimpl.UnsafeEnabled { - mi := &file_binlogdata_proto_msgTypes[32] + mi := &file_binlogdata_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2712,7 +2937,7 @@ func (x *RowChange_Bitmap) String() string { func (*RowChange_Bitmap) ProtoMessage() {} func (x *RowChange_Bitmap) ProtoReflect() protoreflect.Message { - mi := &file_binlogdata_proto_msgTypes[32] + mi := &file_binlogdata_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2925,7 +3150,7 @@ var file_binlogdata_proto_rawDesc = []byte{ 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x1a, 0x32, 0x0a, 0x06, 0x42, 0x69, 0x74, 0x6d, 0x61, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, - 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x63, 0x6f, 0x6c, 0x73, 0x22, 0x93, + 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x63, 0x6f, 0x6c, 0x73, 0x22, 0xa9, 0x01, 0x0a, 0x08, 0x52, 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x72, 0x6f, @@ -2935,166 +3160,155 @@ var file_binlogdata_proto_rawDesc = []byte{ 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x0a, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x22, 0x83, 0x01, 0x0a, 0x0a, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x1a, + 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x22, 0x88, 0x01, 0x0a, 0x09, 0x53, 0x68, 0x61, 0x72, 0x64, 0x47, 0x74, 0x69, 0x64, 0x12, 0x1a, + 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x67, 0x74, 0x69, 0x64, 0x12, 0x35, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x70, 0x5f, + 0x6b, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, + 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, + 0x4b, 0x52, 0x08, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x4b, 0x73, 0x22, 0x3f, 0x0a, 0x05, 0x56, + 0x47, 0x74, 0x69, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x67, 0x74, + 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x62, 0x69, 0x6e, 0x6c, + 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x47, 0x74, 0x69, 0x64, + 0x52, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x47, 0x74, 0x69, 0x64, 0x73, 0x22, 0x41, 0x0a, 0x0d, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1a, 0x0a, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, + 0xbc, 0x02, 0x0a, 0x07, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x12, 0x0e, 0x0a, 0x02, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x40, 0x0a, 0x0e, 0x6d, + 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0d, + 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x70, + 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x0b, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x67, 0x74, 0x69, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x47, 0x74, 0x69, 0x64, 0x52, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x47, + 0x74, 0x69, 0x64, 0x73, 0x12, 0x3d, 0x0a, 0x0c, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, + 0x61, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x62, 0x69, 0x6e, + 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, + 0x6e, 0x74, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x77, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x22, 0x8b, + 0x04, 0x0a, 0x06, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x09, 0x72, 0x6f, 0x77, 0x5f, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, + 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x08, + 0x72, 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x37, 0x0a, 0x0b, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, + 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x67, 0x74, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x11, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x47, + 0x74, 0x69, 0x64, 0x52, 0x05, 0x76, 0x67, 0x74, 0x69, 0x64, 0x12, 0x2d, 0x0a, 0x07, 0x6a, 0x6f, + 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x62, 0x69, + 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, + 0x52, 0x07, 0x6a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6d, 0x6c, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x6d, 0x6c, 0x12, 0x21, 0x0a, 0x0c, 0x63, + 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x14, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x0b, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3c, + 0x0a, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x5f, 0x6b, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, + 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1a, 0x0a, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x16, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1c, + 0x0a, 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, 0x18, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x22, 0x68, 0x0a, 0x0c, + 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x61, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x1e, 0x0a, 0x0b, 0x70, 0x5f, 0x6b, 0x5f, 0x63, 0x6f, + 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x03, 0x52, 0x09, 0x70, 0x4b, 0x43, + 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x22, 0x41, 0x0a, 0x0d, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x61, + 0x6c, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x30, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x61, 0x6c, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22, 0xc7, 0x02, 0x0a, 0x0e, 0x56, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, + 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, + 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, + 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, + 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, + 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, + 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, + 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, + 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x06, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x0f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6c, 0x61, 0x73, + 0x74, 0x5f, 0x70, 0x5f, 0x6b, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, + 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4c, + 0x61, 0x73, 0x74, 0x50, 0x4b, 0x52, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, + 0x50, 0x4b, 0x73, 0x22, 0x3d, 0x0a, 0x0f, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x73, 0x22, 0x85, 0x02, 0x0a, 0x12, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x6f, + 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, + 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, + 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, + 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, + 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, + 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, + 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x2a, + 0x0a, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, + 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x22, 0xf9, 0x01, 0x0a, 0x13, 0x56, + 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x88, 0x01, 0x0a, 0x09, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x47, 0x74, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, - 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12, 0x35, - 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x70, 0x5f, 0x6b, 0x73, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x52, 0x08, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x50, 0x4b, 0x73, 0x22, 0x3f, 0x0a, 0x05, 0x56, 0x47, 0x74, 0x69, 0x64, 0x12, 0x36, - 0x0a, 0x0b, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x67, 0x74, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x47, 0x74, 0x69, 0x64, 0x52, 0x0a, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x47, 0x74, 0x69, 0x64, 0x73, 0x22, 0x41, 0x0a, 0x0d, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0xbc, 0x02, 0x0a, 0x07, 0x4a, 0x6f, - 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x40, 0x0a, 0x0e, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, - 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x69, 0x67, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0d, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, - 0x25, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x50, 0x6f, - 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x0b, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, - 0x67, 0x74, 0x69, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x62, 0x69, - 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x47, 0x74, - 0x69, 0x64, 0x52, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x47, 0x74, 0x69, 0x64, 0x73, 0x12, 0x3d, - 0x0a, 0x0c, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x18, 0x06, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, - 0x0c, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x12, 0x29, 0x0a, - 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x22, 0x8b, 0x04, 0x0a, 0x06, 0x56, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x16, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, - 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x12, 0x0a, - 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, 0x74, 0x69, - 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, - 0x31, 0x0a, 0x09, 0x72, 0x6f, 0x77, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x52, 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x08, 0x72, 0x6f, 0x77, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x12, 0x37, 0x0a, 0x0b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, - 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x27, 0x0a, 0x05, 0x76, - 0x67, 0x74, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x62, 0x69, 0x6e, - 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x47, 0x74, 0x69, 0x64, 0x52, 0x05, 0x76, - 0x67, 0x74, 0x69, 0x64, 0x12, 0x2d, 0x0a, 0x07, 0x6a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x07, 0x6a, 0x6f, 0x75, 0x72, - 0x6e, 0x61, 0x6c, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6d, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x64, 0x6d, 0x6c, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x14, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x63, 0x75, 0x72, - 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3c, 0x0a, 0x0e, 0x6c, 0x61, 0x73, 0x74, - 0x5f, 0x70, 0x5f, 0x6b, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4c, 0x61, - 0x73, 0x74, 0x50, 0x4b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x50, - 0x4b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x18, 0x16, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x6f, - 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, 0x18, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x74, 0x68, 0x72, - 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x22, 0x68, 0x0a, 0x0c, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x61, - 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, + 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x28, 0x0a, 0x08, 0x70, 0x6b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, - 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, - 0x12, 0x1e, 0x0a, 0x0b, 0x70, 0x5f, 0x6b, 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x03, 0x52, 0x09, 0x70, 0x4b, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, - 0x22, 0x41, 0x0a, 0x0d, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x61, 0x6c, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x12, 0x30, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x18, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, - 0x69, 0x6e, 0x69, 0x6d, 0x61, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x06, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x73, 0x22, 0xc7, 0x02, 0x0a, 0x0e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, - 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, - 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, - 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, - 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, - 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, - 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, - 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, - 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x2a, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x3e, 0x0a, - 0x0f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x5f, 0x6b, 0x73, - 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x52, - 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x73, 0x22, 0x3d, 0x0a, - 0x0f, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x2a, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x12, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x85, 0x02, 0x0a, - 0x12, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, - 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, - 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, - 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, - 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, - 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, - 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, - 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x2a, 0x0a, 0x06, 0x6c, 0x61, 0x73, 0x74, - 0x70, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, - 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x6c, 0x61, - 0x73, 0x74, 0x70, 0x6b, 0x22, 0xf9, 0x01, 0x0a, 0x13, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x06, - 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, - 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x73, 0x12, 0x28, 0x0a, 0x08, 0x70, 0x6b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x52, 0x08, 0x70, 0x6b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x12, 0x0a, 0x04, - 0x67, 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, - 0x12, 0x1e, 0x0a, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, - 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x04, 0x72, 0x6f, 0x77, 0x73, - 0x12, 0x22, 0x0a, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x06, 0x6c, 0x61, - 0x73, 0x74, 0x70, 0x6b, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, - 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, - 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, - 0x22, 0x69, 0x0a, 0x0b, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, - 0x3c, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x5f, - 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, - 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x12, 0x1c, 0x0a, - 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x22, 0x58, 0x0a, 0x0b, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x6c, 0x61, 0x73, - 0x74, 0x70, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x6c, - 0x61, 0x73, 0x74, 0x70, 0x6b, 0x22, 0xdc, 0x01, 0x0a, 0x15, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x08, 0x70, 0x6b, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, + 0x52, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x12, 0x22, 0x0a, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, + 0x6f, 0x77, 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x68, + 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x74, + 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x68, 0x65, 0x61, 0x72, + 0x74, 0x62, 0x65, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x68, 0x65, 0x61, + 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x22, 0xc5, 0x01, 0x0a, 0x14, 0x56, 0x53, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, @@ -3105,54 +3319,101 @@ var file_binlogdata_proto_rawDesc = []byte{ 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, - 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x14, - 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, - 0x75, 0x65, 0x72, 0x79, 0x22, 0x72, 0x0a, 0x16, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, - 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, - 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, - 0x65, 0x6c, 0x64, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x04, 0x72, 0x6f, 0x77, 0x73, - 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, - 0x6f, 0x77, 0x52, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x2a, 0x3e, 0x0a, 0x0b, 0x4f, 0x6e, 0x44, 0x44, - 0x4c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x49, 0x47, 0x4e, 0x4f, 0x52, - 0x45, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x54, 0x4f, 0x50, 0x10, 0x01, 0x12, 0x08, 0x0a, - 0x04, 0x45, 0x58, 0x45, 0x43, 0x10, 0x02, 0x12, 0x0f, 0x0a, 0x0b, 0x45, 0x58, 0x45, 0x43, 0x5f, - 0x49, 0x47, 0x4e, 0x4f, 0x52, 0x45, 0x10, 0x03, 0x2a, 0x7b, 0x0a, 0x18, 0x56, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, - 0x69, 0x7a, 0x65, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x73, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, - 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, - 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x65, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x10, 0x04, 0x12, 0x0d, 0x0a, 0x09, 0x4f, 0x6e, 0x6c, 0x69, 0x6e, 0x65, - 0x44, 0x44, 0x4c, 0x10, 0x05, 0x2a, 0x34, 0x0a, 0x1b, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x75, 0x62, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x6f, 0x6e, 0x65, 0x10, 0x00, 0x12, 0x0b, - 0x0a, 0x07, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x10, 0x01, 0x2a, 0x8d, 0x02, 0x0a, 0x0a, - 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x47, 0x54, 0x49, 0x44, 0x10, - 0x01, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x45, 0x47, 0x49, 0x4e, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, - 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x10, 0x03, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x4f, 0x4c, 0x4c, - 0x42, 0x41, 0x43, 0x4b, 0x10, 0x04, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x44, 0x4c, 0x10, 0x05, 0x12, - 0x0a, 0x0a, 0x06, 0x49, 0x4e, 0x53, 0x45, 0x52, 0x54, 0x10, 0x06, 0x12, 0x0b, 0x0a, 0x07, 0x52, - 0x45, 0x50, 0x4c, 0x41, 0x43, 0x45, 0x10, 0x07, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x50, 0x44, 0x41, - 0x54, 0x45, 0x10, 0x08, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x09, - 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x0a, 0x12, 0x09, 0x0a, 0x05, 0x4f, 0x54, 0x48, - 0x45, 0x52, 0x10, 0x0b, 0x12, 0x07, 0x0a, 0x03, 0x52, 0x4f, 0x57, 0x10, 0x0c, 0x12, 0x09, 0x0a, - 0x05, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x48, 0x45, 0x41, 0x52, - 0x54, 0x42, 0x45, 0x41, 0x54, 0x10, 0x0e, 0x12, 0x09, 0x0a, 0x05, 0x56, 0x47, 0x54, 0x49, 0x44, - 0x10, 0x0f, 0x12, 0x0b, 0x0a, 0x07, 0x4a, 0x4f, 0x55, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x10, 0x12, - 0x0b, 0x0a, 0x07, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x10, 0x11, 0x12, 0x0a, 0x0a, 0x06, - 0x4c, 0x41, 0x53, 0x54, 0x50, 0x4b, 0x10, 0x12, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x41, 0x56, 0x45, - 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x13, 0x12, 0x12, 0x0a, 0x0e, 0x43, 0x4f, 0x50, 0x59, 0x5f, - 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x14, 0x2a, 0x27, 0x0a, 0x0d, 0x4d, - 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, - 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x48, 0x41, 0x52, - 0x44, 0x53, 0x10, 0x01, 0x42, 0x29, 0x5a, 0x27, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, - 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x22, 0xde, + 0x01, 0x0a, 0x15, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x28, 0x0a, + 0x08, 0x70, 0x6b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x08, 0x70, + 0x6b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x04, 0x72, + 0x6f, 0x77, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x12, 0x22, 0x0a, 0x06, 0x6c, + 0x61, 0x73, 0x74, 0x70, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x22, + 0x69, 0x0a, 0x0b, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x3c, + 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x5f, 0x6b, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x52, + 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x12, 0x1c, 0x0a, 0x09, + 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x22, 0x58, 0x0a, 0x0b, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x6c, 0x61, 0x73, 0x74, + 0x70, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, + 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x6c, 0x61, + 0x73, 0x74, 0x70, 0x6b, 0x22, 0xdc, 0x01, 0x0a, 0x15, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, + 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, + 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, + 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, + 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, + 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, + 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, + 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, + 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x14, 0x0a, + 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x22, 0x72, 0x0a, 0x16, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, + 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, + 0x77, 0x52, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x2a, 0x3e, 0x0a, 0x0b, 0x4f, 0x6e, 0x44, 0x44, 0x4c, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x49, 0x47, 0x4e, 0x4f, 0x52, 0x45, + 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x54, 0x4f, 0x50, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, + 0x45, 0x58, 0x45, 0x43, 0x10, 0x02, 0x12, 0x0f, 0x0a, 0x0b, 0x45, 0x58, 0x45, 0x43, 0x5f, 0x49, + 0x47, 0x4e, 0x4f, 0x52, 0x45, 0x10, 0x03, 0x2a, 0x7b, 0x0a, 0x18, 0x56, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, + 0x7a, 0x65, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x73, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x6f, + 0x6f, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x4d, + 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x65, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x10, 0x04, 0x12, 0x0d, 0x0a, 0x09, 0x4f, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x44, + 0x44, 0x4c, 0x10, 0x05, 0x2a, 0x44, 0x0a, 0x1b, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x75, 0x62, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x6f, 0x6e, 0x65, 0x10, 0x00, 0x12, 0x0b, 0x0a, + 0x07, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x74, + 0x6f, 0x6d, 0x69, 0x63, 0x43, 0x6f, 0x70, 0x79, 0x10, 0x02, 0x2a, 0x71, 0x0a, 0x19, 0x56, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, + 0x77, 0x6e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x6e, 0x69, 0x74, 0x10, 0x01, 0x12, 0x0b, + 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x43, + 0x6f, 0x70, 0x79, 0x69, 0x6e, 0x67, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x75, 0x6e, 0x6e, + 0x69, 0x6e, 0x67, 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x10, 0x05, + 0x12, 0x0b, 0x0a, 0x07, 0x4c, 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x10, 0x06, 0x2a, 0x8d, 0x02, + 0x0a, 0x0a, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, + 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x47, 0x54, 0x49, + 0x44, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x45, 0x47, 0x49, 0x4e, 0x10, 0x02, 0x12, 0x0a, + 0x0a, 0x06, 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x10, 0x03, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x4f, + 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x04, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x44, 0x4c, 0x10, + 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x49, 0x4e, 0x53, 0x45, 0x52, 0x54, 0x10, 0x06, 0x12, 0x0b, 0x0a, + 0x07, 0x52, 0x45, 0x50, 0x4c, 0x41, 0x43, 0x45, 0x10, 0x07, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x50, + 0x44, 0x41, 0x54, 0x45, 0x10, 0x08, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, + 0x10, 0x09, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x0a, 0x12, 0x09, 0x0a, 0x05, 0x4f, + 0x54, 0x48, 0x45, 0x52, 0x10, 0x0b, 0x12, 0x07, 0x0a, 0x03, 0x52, 0x4f, 0x57, 0x10, 0x0c, 0x12, + 0x09, 0x0a, 0x05, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x48, 0x45, + 0x41, 0x52, 0x54, 0x42, 0x45, 0x41, 0x54, 0x10, 0x0e, 0x12, 0x09, 0x0a, 0x05, 0x56, 0x47, 0x54, + 0x49, 0x44, 0x10, 0x0f, 0x12, 0x0b, 0x0a, 0x07, 0x4a, 0x4f, 0x55, 0x52, 0x4e, 0x41, 0x4c, 0x10, + 0x10, 0x12, 0x0b, 0x0a, 0x07, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x10, 0x11, 0x12, 0x0a, + 0x0a, 0x06, 0x4c, 0x41, 0x53, 0x54, 0x50, 0x4b, 0x10, 0x12, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x41, + 0x56, 0x45, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x13, 0x12, 0x12, 0x0a, 0x0e, 0x43, 0x4f, 0x50, + 0x59, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x14, 0x2a, 0x27, 0x0a, + 0x0d, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, + 0x0a, 0x06, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x48, + 0x41, 0x52, 0x44, 0x53, 0x10, 0x01, 0x42, 0x29, 0x5a, 0x27, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, + 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, + 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -3167,123 +3428,133 @@ func file_binlogdata_proto_rawDescGZIP() []byte { return file_binlogdata_proto_rawDescData } -var file_binlogdata_proto_enumTypes = make([]protoimpl.EnumInfo, 7) -var file_binlogdata_proto_msgTypes = make([]protoimpl.MessageInfo, 33) +var file_binlogdata_proto_enumTypes = make([]protoimpl.EnumInfo, 8) +var file_binlogdata_proto_msgTypes = make([]protoimpl.MessageInfo, 35) var file_binlogdata_proto_goTypes = []interface{}{ (OnDDLAction)(0), // 0: binlogdata.OnDDLAction (VReplicationWorkflowType)(0), // 1: binlogdata.VReplicationWorkflowType (VReplicationWorkflowSubType)(0), // 2: binlogdata.VReplicationWorkflowSubType - (VEventType)(0), // 3: binlogdata.VEventType - (MigrationType)(0), // 4: binlogdata.MigrationType - (BinlogTransaction_Statement_Category)(0), // 5: binlogdata.BinlogTransaction.Statement.Category - (Filter_FieldEventMode)(0), // 6: binlogdata.Filter.FieldEventMode - (*Charset)(nil), // 7: binlogdata.Charset - (*BinlogTransaction)(nil), // 8: binlogdata.BinlogTransaction - (*StreamKeyRangeRequest)(nil), // 9: binlogdata.StreamKeyRangeRequest - (*StreamKeyRangeResponse)(nil), // 10: binlogdata.StreamKeyRangeResponse - (*StreamTablesRequest)(nil), // 11: binlogdata.StreamTablesRequest - (*StreamTablesResponse)(nil), // 12: binlogdata.StreamTablesResponse - (*CharsetConversion)(nil), // 13: binlogdata.CharsetConversion - (*Rule)(nil), // 14: binlogdata.Rule - (*Filter)(nil), // 15: binlogdata.Filter - (*BinlogSource)(nil), // 16: binlogdata.BinlogSource - (*RowChange)(nil), // 17: binlogdata.RowChange - (*RowEvent)(nil), // 18: binlogdata.RowEvent - (*FieldEvent)(nil), // 19: binlogdata.FieldEvent - (*ShardGtid)(nil), // 20: binlogdata.ShardGtid - (*VGtid)(nil), // 21: binlogdata.VGtid - (*KeyspaceShard)(nil), // 22: binlogdata.KeyspaceShard - (*Journal)(nil), // 23: binlogdata.Journal - (*VEvent)(nil), // 24: binlogdata.VEvent - (*MinimalTable)(nil), // 25: binlogdata.MinimalTable - (*MinimalSchema)(nil), // 26: binlogdata.MinimalSchema - (*VStreamRequest)(nil), // 27: binlogdata.VStreamRequest - (*VStreamResponse)(nil), // 28: binlogdata.VStreamResponse - (*VStreamRowsRequest)(nil), // 29: binlogdata.VStreamRowsRequest - (*VStreamRowsResponse)(nil), // 30: binlogdata.VStreamRowsResponse - (*LastPKEvent)(nil), // 31: binlogdata.LastPKEvent - (*TableLastPK)(nil), // 32: binlogdata.TableLastPK - (*VStreamResultsRequest)(nil), // 33: binlogdata.VStreamResultsRequest - (*VStreamResultsResponse)(nil), // 34: binlogdata.VStreamResultsResponse - (*BinlogTransaction_Statement)(nil), // 35: binlogdata.BinlogTransaction.Statement - nil, // 36: binlogdata.Rule.ConvertEnumToTextEntry - nil, // 37: binlogdata.Rule.ConvertCharsetEntry - nil, // 38: binlogdata.Rule.ConvertIntToEnumEntry - (*RowChange_Bitmap)(nil), // 39: binlogdata.RowChange.Bitmap - (*query.EventToken)(nil), // 40: query.EventToken - (*topodata.KeyRange)(nil), // 41: topodata.KeyRange - (topodata.TabletType)(0), // 42: topodata.TabletType - (*query.Row)(nil), // 43: query.Row - (*query.Field)(nil), // 44: query.Field - (*vtrpc.CallerID)(nil), // 45: vtrpc.CallerID - (*query.VTGateCallerID)(nil), // 46: query.VTGateCallerID - (*query.Target)(nil), // 47: query.Target - (*query.QueryResult)(nil), // 48: query.QueryResult + (VReplicationWorkflowState)(0), // 3: binlogdata.VReplicationWorkflowState + (VEventType)(0), // 4: binlogdata.VEventType + (MigrationType)(0), // 5: binlogdata.MigrationType + (BinlogTransaction_Statement_Category)(0), // 6: binlogdata.BinlogTransaction.Statement.Category + (Filter_FieldEventMode)(0), // 7: binlogdata.Filter.FieldEventMode + (*Charset)(nil), // 8: binlogdata.Charset + (*BinlogTransaction)(nil), // 9: binlogdata.BinlogTransaction + (*StreamKeyRangeRequest)(nil), // 10: binlogdata.StreamKeyRangeRequest + (*StreamKeyRangeResponse)(nil), // 11: binlogdata.StreamKeyRangeResponse + (*StreamTablesRequest)(nil), // 12: binlogdata.StreamTablesRequest + (*StreamTablesResponse)(nil), // 13: binlogdata.StreamTablesResponse + (*CharsetConversion)(nil), // 14: binlogdata.CharsetConversion + (*Rule)(nil), // 15: binlogdata.Rule + (*Filter)(nil), // 16: binlogdata.Filter + (*BinlogSource)(nil), // 17: binlogdata.BinlogSource + (*RowChange)(nil), // 18: binlogdata.RowChange + (*RowEvent)(nil), // 19: binlogdata.RowEvent + (*FieldEvent)(nil), // 20: binlogdata.FieldEvent + (*ShardGtid)(nil), // 21: binlogdata.ShardGtid + (*VGtid)(nil), // 22: binlogdata.VGtid + (*KeyspaceShard)(nil), // 23: binlogdata.KeyspaceShard + (*Journal)(nil), // 24: binlogdata.Journal + (*VEvent)(nil), // 25: binlogdata.VEvent + (*MinimalTable)(nil), // 26: binlogdata.MinimalTable + (*MinimalSchema)(nil), // 27: binlogdata.MinimalSchema + (*VStreamRequest)(nil), // 28: binlogdata.VStreamRequest + (*VStreamResponse)(nil), // 29: binlogdata.VStreamResponse + (*VStreamRowsRequest)(nil), // 30: binlogdata.VStreamRowsRequest + (*VStreamRowsResponse)(nil), // 31: binlogdata.VStreamRowsResponse + (*VStreamTablesRequest)(nil), // 32: binlogdata.VStreamTablesRequest + (*VStreamTablesResponse)(nil), // 33: binlogdata.VStreamTablesResponse + (*LastPKEvent)(nil), // 34: binlogdata.LastPKEvent + (*TableLastPK)(nil), // 35: binlogdata.TableLastPK + (*VStreamResultsRequest)(nil), // 36: binlogdata.VStreamResultsRequest + (*VStreamResultsResponse)(nil), // 37: binlogdata.VStreamResultsResponse + (*BinlogTransaction_Statement)(nil), // 38: binlogdata.BinlogTransaction.Statement + nil, // 39: binlogdata.Rule.ConvertEnumToTextEntry + nil, // 40: binlogdata.Rule.ConvertCharsetEntry + nil, // 41: binlogdata.Rule.ConvertIntToEnumEntry + (*RowChange_Bitmap)(nil), // 42: binlogdata.RowChange.Bitmap + (*query.EventToken)(nil), // 43: query.EventToken + (*topodata.KeyRange)(nil), // 44: topodata.KeyRange + (topodata.TabletType)(0), // 45: topodata.TabletType + (*query.Row)(nil), // 46: query.Row + (*query.Field)(nil), // 47: query.Field + (*vtrpc.CallerID)(nil), // 48: vtrpc.CallerID + (*query.VTGateCallerID)(nil), // 49: query.VTGateCallerID + (*query.Target)(nil), // 50: query.Target + (*query.QueryResult)(nil), // 51: query.QueryResult } var file_binlogdata_proto_depIdxs = []int32{ - 35, // 0: binlogdata.BinlogTransaction.statements:type_name -> binlogdata.BinlogTransaction.Statement - 40, // 1: binlogdata.BinlogTransaction.event_token:type_name -> query.EventToken - 41, // 2: binlogdata.StreamKeyRangeRequest.key_range:type_name -> topodata.KeyRange - 7, // 3: binlogdata.StreamKeyRangeRequest.charset:type_name -> binlogdata.Charset - 8, // 4: binlogdata.StreamKeyRangeResponse.binlog_transaction:type_name -> binlogdata.BinlogTransaction - 7, // 5: binlogdata.StreamTablesRequest.charset:type_name -> binlogdata.Charset - 8, // 6: binlogdata.StreamTablesResponse.binlog_transaction:type_name -> binlogdata.BinlogTransaction - 36, // 7: binlogdata.Rule.convert_enum_to_text:type_name -> binlogdata.Rule.ConvertEnumToTextEntry - 37, // 8: binlogdata.Rule.convert_charset:type_name -> binlogdata.Rule.ConvertCharsetEntry - 38, // 9: binlogdata.Rule.convert_int_to_enum:type_name -> binlogdata.Rule.ConvertIntToEnumEntry - 14, // 10: binlogdata.Filter.rules:type_name -> binlogdata.Rule - 6, // 11: binlogdata.Filter.field_event_mode:type_name -> binlogdata.Filter.FieldEventMode - 42, // 12: binlogdata.BinlogSource.tablet_type:type_name -> topodata.TabletType - 41, // 13: binlogdata.BinlogSource.key_range:type_name -> topodata.KeyRange - 15, // 14: binlogdata.BinlogSource.filter:type_name -> binlogdata.Filter + 38, // 0: binlogdata.BinlogTransaction.statements:type_name -> binlogdata.BinlogTransaction.Statement + 43, // 1: binlogdata.BinlogTransaction.event_token:type_name -> query.EventToken + 44, // 2: binlogdata.StreamKeyRangeRequest.key_range:type_name -> topodata.KeyRange + 8, // 3: binlogdata.StreamKeyRangeRequest.charset:type_name -> binlogdata.Charset + 9, // 4: binlogdata.StreamKeyRangeResponse.binlog_transaction:type_name -> binlogdata.BinlogTransaction + 8, // 5: binlogdata.StreamTablesRequest.charset:type_name -> binlogdata.Charset + 9, // 6: binlogdata.StreamTablesResponse.binlog_transaction:type_name -> binlogdata.BinlogTransaction + 39, // 7: binlogdata.Rule.convert_enum_to_text:type_name -> binlogdata.Rule.ConvertEnumToTextEntry + 40, // 8: binlogdata.Rule.convert_charset:type_name -> binlogdata.Rule.ConvertCharsetEntry + 41, // 9: binlogdata.Rule.convert_int_to_enum:type_name -> binlogdata.Rule.ConvertIntToEnumEntry + 15, // 10: binlogdata.Filter.rules:type_name -> binlogdata.Rule + 7, // 11: binlogdata.Filter.field_event_mode:type_name -> binlogdata.Filter.FieldEventMode + 45, // 12: binlogdata.BinlogSource.tablet_type:type_name -> topodata.TabletType + 44, // 13: binlogdata.BinlogSource.key_range:type_name -> topodata.KeyRange + 16, // 14: binlogdata.BinlogSource.filter:type_name -> binlogdata.Filter 0, // 15: binlogdata.BinlogSource.on_ddl:type_name -> binlogdata.OnDDLAction - 43, // 16: binlogdata.RowChange.before:type_name -> query.Row - 43, // 17: binlogdata.RowChange.after:type_name -> query.Row - 39, // 18: binlogdata.RowChange.data_columns:type_name -> binlogdata.RowChange.Bitmap - 17, // 19: binlogdata.RowEvent.row_changes:type_name -> binlogdata.RowChange - 44, // 20: binlogdata.FieldEvent.fields:type_name -> query.Field - 32, // 21: binlogdata.ShardGtid.table_p_ks:type_name -> binlogdata.TableLastPK - 20, // 22: binlogdata.VGtid.shard_gtids:type_name -> binlogdata.ShardGtid - 4, // 23: binlogdata.Journal.migration_type:type_name -> binlogdata.MigrationType - 20, // 24: binlogdata.Journal.shard_gtids:type_name -> binlogdata.ShardGtid - 22, // 25: binlogdata.Journal.participants:type_name -> binlogdata.KeyspaceShard - 3, // 26: binlogdata.VEvent.type:type_name -> binlogdata.VEventType - 18, // 27: binlogdata.VEvent.row_event:type_name -> binlogdata.RowEvent - 19, // 28: binlogdata.VEvent.field_event:type_name -> binlogdata.FieldEvent - 21, // 29: binlogdata.VEvent.vgtid:type_name -> binlogdata.VGtid - 23, // 30: binlogdata.VEvent.journal:type_name -> binlogdata.Journal - 31, // 31: binlogdata.VEvent.last_p_k_event:type_name -> binlogdata.LastPKEvent - 44, // 32: binlogdata.MinimalTable.fields:type_name -> query.Field - 25, // 33: binlogdata.MinimalSchema.tables:type_name -> binlogdata.MinimalTable - 45, // 34: binlogdata.VStreamRequest.effective_caller_id:type_name -> vtrpc.CallerID - 46, // 35: binlogdata.VStreamRequest.immediate_caller_id:type_name -> query.VTGateCallerID - 47, // 36: binlogdata.VStreamRequest.target:type_name -> query.Target - 15, // 37: binlogdata.VStreamRequest.filter:type_name -> binlogdata.Filter - 32, // 38: binlogdata.VStreamRequest.table_last_p_ks:type_name -> binlogdata.TableLastPK - 24, // 39: binlogdata.VStreamResponse.events:type_name -> binlogdata.VEvent - 45, // 40: binlogdata.VStreamRowsRequest.effective_caller_id:type_name -> vtrpc.CallerID - 46, // 41: binlogdata.VStreamRowsRequest.immediate_caller_id:type_name -> query.VTGateCallerID - 47, // 42: binlogdata.VStreamRowsRequest.target:type_name -> query.Target - 48, // 43: binlogdata.VStreamRowsRequest.lastpk:type_name -> query.QueryResult - 44, // 44: binlogdata.VStreamRowsResponse.fields:type_name -> query.Field - 44, // 45: binlogdata.VStreamRowsResponse.pkfields:type_name -> query.Field - 43, // 46: binlogdata.VStreamRowsResponse.rows:type_name -> query.Row - 43, // 47: binlogdata.VStreamRowsResponse.lastpk:type_name -> query.Row - 32, // 48: binlogdata.LastPKEvent.table_last_p_k:type_name -> binlogdata.TableLastPK - 48, // 49: binlogdata.TableLastPK.lastpk:type_name -> query.QueryResult - 45, // 50: binlogdata.VStreamResultsRequest.effective_caller_id:type_name -> vtrpc.CallerID - 46, // 51: binlogdata.VStreamResultsRequest.immediate_caller_id:type_name -> query.VTGateCallerID - 47, // 52: binlogdata.VStreamResultsRequest.target:type_name -> query.Target - 44, // 53: binlogdata.VStreamResultsResponse.fields:type_name -> query.Field - 43, // 54: binlogdata.VStreamResultsResponse.rows:type_name -> query.Row - 5, // 55: binlogdata.BinlogTransaction.Statement.category:type_name -> binlogdata.BinlogTransaction.Statement.Category - 7, // 56: binlogdata.BinlogTransaction.Statement.charset:type_name -> binlogdata.Charset - 13, // 57: binlogdata.Rule.ConvertCharsetEntry.value:type_name -> binlogdata.CharsetConversion - 58, // [58:58] is the sub-list for method output_type - 58, // [58:58] is the sub-list for method input_type - 58, // [58:58] is the sub-list for extension type_name - 58, // [58:58] is the sub-list for extension extendee - 0, // [0:58] is the sub-list for field type_name + 46, // 16: binlogdata.RowChange.before:type_name -> query.Row + 46, // 17: binlogdata.RowChange.after:type_name -> query.Row + 42, // 18: binlogdata.RowChange.data_columns:type_name -> binlogdata.RowChange.Bitmap + 18, // 19: binlogdata.RowEvent.row_changes:type_name -> binlogdata.RowChange + 47, // 20: binlogdata.FieldEvent.fields:type_name -> query.Field + 35, // 21: binlogdata.ShardGtid.table_p_ks:type_name -> binlogdata.TableLastPK + 21, // 22: binlogdata.VGtid.shard_gtids:type_name -> binlogdata.ShardGtid + 5, // 23: binlogdata.Journal.migration_type:type_name -> binlogdata.MigrationType + 21, // 24: binlogdata.Journal.shard_gtids:type_name -> binlogdata.ShardGtid + 23, // 25: binlogdata.Journal.participants:type_name -> binlogdata.KeyspaceShard + 4, // 26: binlogdata.VEvent.type:type_name -> binlogdata.VEventType + 19, // 27: binlogdata.VEvent.row_event:type_name -> binlogdata.RowEvent + 20, // 28: binlogdata.VEvent.field_event:type_name -> binlogdata.FieldEvent + 22, // 29: binlogdata.VEvent.vgtid:type_name -> binlogdata.VGtid + 24, // 30: binlogdata.VEvent.journal:type_name -> binlogdata.Journal + 34, // 31: binlogdata.VEvent.last_p_k_event:type_name -> binlogdata.LastPKEvent + 47, // 32: binlogdata.MinimalTable.fields:type_name -> query.Field + 26, // 33: binlogdata.MinimalSchema.tables:type_name -> binlogdata.MinimalTable + 48, // 34: binlogdata.VStreamRequest.effective_caller_id:type_name -> vtrpc.CallerID + 49, // 35: binlogdata.VStreamRequest.immediate_caller_id:type_name -> query.VTGateCallerID + 50, // 36: binlogdata.VStreamRequest.target:type_name -> query.Target + 16, // 37: binlogdata.VStreamRequest.filter:type_name -> binlogdata.Filter + 35, // 38: binlogdata.VStreamRequest.table_last_p_ks:type_name -> binlogdata.TableLastPK + 25, // 39: binlogdata.VStreamResponse.events:type_name -> binlogdata.VEvent + 48, // 40: binlogdata.VStreamRowsRequest.effective_caller_id:type_name -> vtrpc.CallerID + 49, // 41: binlogdata.VStreamRowsRequest.immediate_caller_id:type_name -> query.VTGateCallerID + 50, // 42: binlogdata.VStreamRowsRequest.target:type_name -> query.Target + 51, // 43: binlogdata.VStreamRowsRequest.lastpk:type_name -> query.QueryResult + 47, // 44: binlogdata.VStreamRowsResponse.fields:type_name -> query.Field + 47, // 45: binlogdata.VStreamRowsResponse.pkfields:type_name -> query.Field + 46, // 46: binlogdata.VStreamRowsResponse.rows:type_name -> query.Row + 46, // 47: binlogdata.VStreamRowsResponse.lastpk:type_name -> query.Row + 48, // 48: binlogdata.VStreamTablesRequest.effective_caller_id:type_name -> vtrpc.CallerID + 49, // 49: binlogdata.VStreamTablesRequest.immediate_caller_id:type_name -> query.VTGateCallerID + 50, // 50: binlogdata.VStreamTablesRequest.target:type_name -> query.Target + 47, // 51: binlogdata.VStreamTablesResponse.fields:type_name -> query.Field + 47, // 52: binlogdata.VStreamTablesResponse.pkfields:type_name -> query.Field + 46, // 53: binlogdata.VStreamTablesResponse.rows:type_name -> query.Row + 46, // 54: binlogdata.VStreamTablesResponse.lastpk:type_name -> query.Row + 35, // 55: binlogdata.LastPKEvent.table_last_p_k:type_name -> binlogdata.TableLastPK + 51, // 56: binlogdata.TableLastPK.lastpk:type_name -> query.QueryResult + 48, // 57: binlogdata.VStreamResultsRequest.effective_caller_id:type_name -> vtrpc.CallerID + 49, // 58: binlogdata.VStreamResultsRequest.immediate_caller_id:type_name -> query.VTGateCallerID + 50, // 59: binlogdata.VStreamResultsRequest.target:type_name -> query.Target + 47, // 60: binlogdata.VStreamResultsResponse.fields:type_name -> query.Field + 46, // 61: binlogdata.VStreamResultsResponse.rows:type_name -> query.Row + 6, // 62: binlogdata.BinlogTransaction.Statement.category:type_name -> binlogdata.BinlogTransaction.Statement.Category + 8, // 63: binlogdata.BinlogTransaction.Statement.charset:type_name -> binlogdata.Charset + 14, // 64: binlogdata.Rule.ConvertCharsetEntry.value:type_name -> binlogdata.CharsetConversion + 65, // [65:65] is the sub-list for method output_type + 65, // [65:65] is the sub-list for method input_type + 65, // [65:65] is the sub-list for extension type_name + 65, // [65:65] is the sub-list for extension extendee + 0, // [0:65] is the sub-list for field type_name } func init() { file_binlogdata_proto_init() } @@ -3581,7 +3852,7 @@ func file_binlogdata_proto_init() { } } file_binlogdata_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LastPKEvent); i { + switch v := v.(*VStreamTablesRequest); i { case 0: return &v.state case 1: @@ -3593,7 +3864,7 @@ func file_binlogdata_proto_init() { } } file_binlogdata_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TableLastPK); i { + switch v := v.(*VStreamTablesResponse); i { case 0: return &v.state case 1: @@ -3605,7 +3876,7 @@ func file_binlogdata_proto_init() { } } file_binlogdata_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VStreamResultsRequest); i { + switch v := v.(*LastPKEvent); i { case 0: return &v.state case 1: @@ -3617,7 +3888,7 @@ func file_binlogdata_proto_init() { } } file_binlogdata_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VStreamResultsResponse); i { + switch v := v.(*TableLastPK); i { case 0: return &v.state case 1: @@ -3629,6 +3900,30 @@ func file_binlogdata_proto_init() { } } file_binlogdata_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VStreamResultsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_binlogdata_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VStreamResultsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_binlogdata_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BinlogTransaction_Statement); i { case 0: return &v.state @@ -3640,7 +3935,7 @@ func file_binlogdata_proto_init() { return nil } } - file_binlogdata_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + file_binlogdata_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RowChange_Bitmap); i { case 0: return &v.state @@ -3658,8 +3953,8 @@ func file_binlogdata_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_binlogdata_proto_rawDesc, - NumEnums: 7, - NumMessages: 33, + NumEnums: 8, + NumMessages: 35, NumExtensions: 0, NumServices: 0, }, diff --git a/go/vt/proto/binlogdata/binlogdata_vtproto.pb.go b/go/vt/proto/binlogdata/binlogdata_vtproto.pb.go index 926ea5434c8..379583b0354 100644 --- a/go/vt/proto/binlogdata/binlogdata_vtproto.pb.go +++ b/go/vt/proto/binlogdata/binlogdata_vtproto.pb.go @@ -1,11 +1,12 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 +// protoc-gen-go-vtproto version: v0.5.0 // source: binlogdata.proto package binlogdata import ( fmt "fmt" + proto "google.golang.org/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl" io "io" bits "math/bits" @@ -22,6 +23,840 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +func (m *Charset) CloneVT() *Charset { + if m == nil { + return (*Charset)(nil) + } + r := &Charset{ + Client: m.Client, + Conn: m.Conn, + Server: m.Server, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Charset) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *BinlogTransaction_Statement) CloneVT() *BinlogTransaction_Statement { + if m == nil { + return (*BinlogTransaction_Statement)(nil) + } + r := &BinlogTransaction_Statement{ + Category: m.Category, + Charset: m.Charset.CloneVT(), + } + if rhs := m.Sql; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.Sql = tmpBytes + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *BinlogTransaction_Statement) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *BinlogTransaction) CloneVT() *BinlogTransaction { + if m == nil { + return (*BinlogTransaction)(nil) + } + r := &BinlogTransaction{ + EventToken: m.EventToken.CloneVT(), + } + if rhs := m.Statements; rhs != nil { + tmpContainer := make([]*BinlogTransaction_Statement, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Statements = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *BinlogTransaction) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StreamKeyRangeRequest) CloneVT() *StreamKeyRangeRequest { + if m == nil { + return (*StreamKeyRangeRequest)(nil) + } + r := &StreamKeyRangeRequest{ + Position: m.Position, + KeyRange: m.KeyRange.CloneVT(), + Charset: m.Charset.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StreamKeyRangeRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StreamKeyRangeResponse) CloneVT() *StreamKeyRangeResponse { + if m == nil { + return (*StreamKeyRangeResponse)(nil) + } + r := &StreamKeyRangeResponse{ + BinlogTransaction: m.BinlogTransaction.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StreamKeyRangeResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StreamTablesRequest) CloneVT() *StreamTablesRequest { + if m == nil { + return (*StreamTablesRequest)(nil) + } + r := &StreamTablesRequest{ + Position: m.Position, + Charset: m.Charset.CloneVT(), + } + if rhs := m.Tables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Tables = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StreamTablesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StreamTablesResponse) CloneVT() *StreamTablesResponse { + if m == nil { + return (*StreamTablesResponse)(nil) + } + r := &StreamTablesResponse{ + BinlogTransaction: m.BinlogTransaction.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StreamTablesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CharsetConversion) CloneVT() *CharsetConversion { + if m == nil { + return (*CharsetConversion)(nil) + } + r := &CharsetConversion{ + FromCharset: m.FromCharset, + ToCharset: m.ToCharset, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CharsetConversion) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Rule) CloneVT() *Rule { + if m == nil { + return (*Rule)(nil) + } + r := &Rule{ + Match: m.Match, + Filter: m.Filter, + SourceUniqueKeyColumns: m.SourceUniqueKeyColumns, + TargetUniqueKeyColumns: m.TargetUniqueKeyColumns, + SourceUniqueKeyTargetColumns: m.SourceUniqueKeyTargetColumns, + } + if rhs := m.ConvertEnumToText; rhs != nil { + tmpContainer := make(map[string]string, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v + } + r.ConvertEnumToText = tmpContainer + } + if rhs := m.ConvertCharset; rhs != nil { + tmpContainer := make(map[string]*CharsetConversion, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.ConvertCharset = tmpContainer + } + if rhs := m.ConvertIntToEnum; rhs != nil { + tmpContainer := make(map[string]bool, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v + } + r.ConvertIntToEnum = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Rule) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Filter) CloneVT() *Filter { + if m == nil { + return (*Filter)(nil) + } + r := &Filter{ + FieldEventMode: m.FieldEventMode, + WorkflowType: m.WorkflowType, + WorkflowName: m.WorkflowName, + } + if rhs := m.Rules; rhs != nil { + tmpContainer := make([]*Rule, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Rules = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Filter) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *BinlogSource) CloneVT() *BinlogSource { + if m == nil { + return (*BinlogSource)(nil) + } + r := &BinlogSource{ + Keyspace: m.Keyspace, + Shard: m.Shard, + TabletType: m.TabletType, + KeyRange: m.KeyRange.CloneVT(), + Filter: m.Filter.CloneVT(), + OnDdl: m.OnDdl, + ExternalMysql: m.ExternalMysql, + StopAfterCopy: m.StopAfterCopy, + ExternalCluster: m.ExternalCluster, + SourceTimeZone: m.SourceTimeZone, + TargetTimeZone: m.TargetTimeZone, + } + if rhs := m.Tables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Tables = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *BinlogSource) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RowChange_Bitmap) CloneVT() *RowChange_Bitmap { + if m == nil { + return (*RowChange_Bitmap)(nil) + } + r := &RowChange_Bitmap{ + Count: m.Count, + } + if rhs := m.Cols; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.Cols = tmpBytes + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RowChange_Bitmap) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RowChange) CloneVT() *RowChange { + if m == nil { + return (*RowChange)(nil) + } + r := &RowChange{ + Before: m.Before.CloneVT(), + After: m.After.CloneVT(), + DataColumns: m.DataColumns.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RowChange) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RowEvent) CloneVT() *RowEvent { + if m == nil { + return (*RowEvent)(nil) + } + r := &RowEvent{ + TableName: m.TableName, + Keyspace: m.Keyspace, + Shard: m.Shard, + Flags: m.Flags, + } + if rhs := m.RowChanges; rhs != nil { + tmpContainer := make([]*RowChange, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.RowChanges = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RowEvent) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *FieldEvent) CloneVT() *FieldEvent { + if m == nil { + return (*FieldEvent)(nil) + } + r := &FieldEvent{ + TableName: m.TableName, + Keyspace: m.Keyspace, + Shard: m.Shard, + } + if rhs := m.Fields; rhs != nil { + tmpContainer := make([]*query.Field, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Fields = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *FieldEvent) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ShardGtid) CloneVT() *ShardGtid { + if m == nil { + return (*ShardGtid)(nil) + } + r := &ShardGtid{ + Keyspace: m.Keyspace, + Shard: m.Shard, + Gtid: m.Gtid, + } + if rhs := m.TablePKs; rhs != nil { + tmpContainer := make([]*TableLastPK, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.TablePKs = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ShardGtid) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VGtid) CloneVT() *VGtid { + if m == nil { + return (*VGtid)(nil) + } + r := &VGtid{} + if rhs := m.ShardGtids; rhs != nil { + tmpContainer := make([]*ShardGtid, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.ShardGtids = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VGtid) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *KeyspaceShard) CloneVT() *KeyspaceShard { + if m == nil { + return (*KeyspaceShard)(nil) + } + r := &KeyspaceShard{ + Keyspace: m.Keyspace, + Shard: m.Shard, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *KeyspaceShard) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Journal) CloneVT() *Journal { + if m == nil { + return (*Journal)(nil) + } + r := &Journal{ + Id: m.Id, + MigrationType: m.MigrationType, + LocalPosition: m.LocalPosition, + } + if rhs := m.Tables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Tables = tmpContainer + } + if rhs := m.ShardGtids; rhs != nil { + tmpContainer := make([]*ShardGtid, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.ShardGtids = tmpContainer + } + if rhs := m.Participants; rhs != nil { + tmpContainer := make([]*KeyspaceShard, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Participants = tmpContainer + } + if rhs := m.SourceWorkflows; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.SourceWorkflows = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Journal) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VEvent) CloneVT() *VEvent { + if m == nil { + return (*VEvent)(nil) + } + r := &VEvent{ + Type: m.Type, + Timestamp: m.Timestamp, + Gtid: m.Gtid, + Statement: m.Statement, + RowEvent: m.RowEvent.CloneVT(), + FieldEvent: m.FieldEvent.CloneVT(), + Vgtid: m.Vgtid.CloneVT(), + Journal: m.Journal.CloneVT(), + Dml: m.Dml, + CurrentTime: m.CurrentTime, + LastPKEvent: m.LastPKEvent.CloneVT(), + Keyspace: m.Keyspace, + Shard: m.Shard, + Throttled: m.Throttled, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VEvent) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *MinimalTable) CloneVT() *MinimalTable { + if m == nil { + return (*MinimalTable)(nil) + } + r := &MinimalTable{ + Name: m.Name, + } + if rhs := m.Fields; rhs != nil { + tmpContainer := make([]*query.Field, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Fields = tmpContainer + } + if rhs := m.PKColumns; rhs != nil { + tmpContainer := make([]int64, len(rhs)) + copy(tmpContainer, rhs) + r.PKColumns = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *MinimalTable) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *MinimalSchema) CloneVT() *MinimalSchema { + if m == nil { + return (*MinimalSchema)(nil) + } + r := &MinimalSchema{} + if rhs := m.Tables; rhs != nil { + tmpContainer := make([]*MinimalTable, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Tables = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *MinimalSchema) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VStreamRequest) CloneVT() *VStreamRequest { + if m == nil { + return (*VStreamRequest)(nil) + } + r := &VStreamRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Position: m.Position, + Filter: m.Filter.CloneVT(), + } + if rhs := m.TableLastPKs; rhs != nil { + tmpContainer := make([]*TableLastPK, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.TableLastPKs = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VStreamRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VStreamResponse) CloneVT() *VStreamResponse { + if m == nil { + return (*VStreamResponse)(nil) + } + r := &VStreamResponse{} + if rhs := m.Events; rhs != nil { + tmpContainer := make([]*VEvent, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Events = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VStreamResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VStreamRowsRequest) CloneVT() *VStreamRowsRequest { + if m == nil { + return (*VStreamRowsRequest)(nil) + } + r := &VStreamRowsRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Query: m.Query, + Lastpk: m.Lastpk.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VStreamRowsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VStreamRowsResponse) CloneVT() *VStreamRowsResponse { + if m == nil { + return (*VStreamRowsResponse)(nil) + } + r := &VStreamRowsResponse{ + Gtid: m.Gtid, + Lastpk: m.Lastpk.CloneVT(), + Throttled: m.Throttled, + Heartbeat: m.Heartbeat, + } + if rhs := m.Fields; rhs != nil { + tmpContainer := make([]*query.Field, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Fields = tmpContainer + } + if rhs := m.Pkfields; rhs != nil { + tmpContainer := make([]*query.Field, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Pkfields = tmpContainer + } + if rhs := m.Rows; rhs != nil { + tmpContainer := make([]*query.Row, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Rows = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VStreamRowsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VStreamTablesRequest) CloneVT() *VStreamTablesRequest { + if m == nil { + return (*VStreamTablesRequest)(nil) + } + r := &VStreamTablesRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VStreamTablesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VStreamTablesResponse) CloneVT() *VStreamTablesResponse { + if m == nil { + return (*VStreamTablesResponse)(nil) + } + r := &VStreamTablesResponse{ + TableName: m.TableName, + Gtid: m.Gtid, + Lastpk: m.Lastpk.CloneVT(), + } + if rhs := m.Fields; rhs != nil { + tmpContainer := make([]*query.Field, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Fields = tmpContainer + } + if rhs := m.Pkfields; rhs != nil { + tmpContainer := make([]*query.Field, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Pkfields = tmpContainer + } + if rhs := m.Rows; rhs != nil { + tmpContainer := make([]*query.Row, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Rows = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VStreamTablesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *LastPKEvent) CloneVT() *LastPKEvent { + if m == nil { + return (*LastPKEvent)(nil) + } + r := &LastPKEvent{ + TableLastPK: m.TableLastPK.CloneVT(), + Completed: m.Completed, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *LastPKEvent) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *TableLastPK) CloneVT() *TableLastPK { + if m == nil { + return (*TableLastPK)(nil) + } + r := &TableLastPK{ + TableName: m.TableName, + Lastpk: m.Lastpk.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *TableLastPK) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VStreamResultsRequest) CloneVT() *VStreamResultsRequest { + if m == nil { + return (*VStreamResultsRequest)(nil) + } + r := &VStreamResultsRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Query: m.Query, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VStreamResultsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VStreamResultsResponse) CloneVT() *VStreamResultsResponse { + if m == nil { + return (*VStreamResultsResponse)(nil) + } + r := &VStreamResultsResponse{ + Gtid: m.Gtid, + } + if rhs := m.Fields; rhs != nil { + tmpContainer := make([]*query.Field, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Fields = tmpContainer + } + if rhs := m.Rows; rhs != nil { + tmpContainer := make([]*query.Row, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Rows = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VStreamResultsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *Charset) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -887,6 +1722,11 @@ func (m *RowEvent) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.Flags != 0 { + i = encodeVarint(dAtA, i, uint64(m.Flags)) + i-- + dAtA[i] = 0x28 + } if len(m.Shard) > 0 { i -= len(m.Shard) copy(dAtA[i:], m.Shard) @@ -1833,6 +2673,162 @@ func (m *VStreamRowsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *VStreamTablesRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VStreamTablesRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VStreamTablesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Target != nil { + size, err := m.Target.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.ImmediateCallerId != nil { + size, err := m.ImmediateCallerId.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.EffectiveCallerId != nil { + size, err := m.EffectiveCallerId.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VStreamTablesResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VStreamTablesResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VStreamTablesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Lastpk != nil { + size, err := m.Lastpk.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.Rows) > 0 { + for iNdEx := len(m.Rows) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Rows[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Gtid) > 0 { + i -= len(m.Gtid) + copy(dAtA[i:], m.Gtid) + i = encodeVarint(dAtA, i, uint64(len(m.Gtid))) + i-- + dAtA[i] = 0x22 + } + if len(m.Pkfields) > 0 { + for iNdEx := len(m.Pkfields) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Pkfields[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Fields) > 0 { + for iNdEx := len(m.Fields) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Fields[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.TableName) > 0 { + i -= len(m.TableName) + copy(dAtA[i:], m.TableName) + i = encodeVarint(dAtA, i, uint64(len(m.TableName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *LastPKEvent) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -2089,11 +3085,23 @@ var vtprotoPool_VStreamRowsResponse = sync.Pool{ } func (m *VStreamRowsResponse) ResetVT() { + for _, mm := range m.Fields { + mm.Reset() + } + f0 := m.Fields[:0] + for _, mm := range m.Pkfields { + mm.Reset() + } + f1 := m.Pkfields[:0] for _, mm := range m.Rows { mm.ResetVT() } + f2 := m.Rows[:0] m.Lastpk.ReturnToVTPool() m.Reset() + m.Fields = f0 + m.Pkfields = f1 + m.Rows = f2 } func (m *VStreamRowsResponse) ReturnToVTPool() { if m != nil { @@ -2104,6 +3112,41 @@ func (m *VStreamRowsResponse) ReturnToVTPool() { func VStreamRowsResponseFromVTPool() *VStreamRowsResponse { return vtprotoPool_VStreamRowsResponse.Get().(*VStreamRowsResponse) } + +var vtprotoPool_VStreamTablesResponse = sync.Pool{ + New: func() interface{} { + return &VStreamTablesResponse{} + }, +} + +func (m *VStreamTablesResponse) ResetVT() { + for _, mm := range m.Fields { + mm.Reset() + } + f0 := m.Fields[:0] + for _, mm := range m.Pkfields { + mm.Reset() + } + f1 := m.Pkfields[:0] + for _, mm := range m.Rows { + mm.ResetVT() + } + f2 := m.Rows[:0] + m.Lastpk.ReturnToVTPool() + m.Reset() + m.Fields = f0 + m.Pkfields = f1 + m.Rows = f2 +} +func (m *VStreamTablesResponse) ReturnToVTPool() { + if m != nil { + m.ResetVT() + vtprotoPool_VStreamTablesResponse.Put(m) + } +} +func VStreamTablesResponseFromVTPool() *VStreamTablesResponse { + return vtprotoPool_VStreamTablesResponse.Get().(*VStreamTablesResponse) +} func (m *Charset) SizeVT() (n int) { if m == nil { return 0 @@ -2461,6 +3504,9 @@ func (m *RowEvent) SizeVT() (n int) { if l > 0 { n += 1 + l + sov(uint64(l)) } + if m.Flags != 0 { + n += 1 + sov(uint64(m.Flags)) + } n += len(m.unknownFields) return n } @@ -2736,27 +3782,99 @@ func (m *VStreamRequest) SizeVT() (n int) { n += 1 + l + sov(uint64(l)) } } - n += len(m.unknownFields) - return n -} - -func (m *VStreamResponse) SizeVT() (n int) { - if m == nil { - return 0 + n += len(m.unknownFields) + return n +} + +func (m *VStreamResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *VStreamRowsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EffectiveCallerId != nil { + l = m.EffectiveCallerId.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.ImmediateCallerId != nil { + l = m.ImmediateCallerId.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Target != nil { + l = m.Target.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.Query) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Lastpk != nil { + l = m.Lastpk.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *VStreamRowsResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Fields) > 0 { + for _, e := range m.Fields { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if len(m.Pkfields) > 0 { + for _, e := range m.Pkfields { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + l = len(m.Gtid) + if l > 0 { + n += 1 + l + sov(uint64(l)) } - var l int - _ = l - if len(m.Events) > 0 { - for _, e := range m.Events { + if len(m.Rows) > 0 { + for _, e := range m.Rows { l = e.SizeVT() n += 1 + l + sov(uint64(l)) } } + if m.Lastpk != nil { + l = m.Lastpk.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Throttled { + n += 2 + } + if m.Heartbeat { + n += 2 + } n += len(m.unknownFields) return n } -func (m *VStreamRowsRequest) SizeVT() (n int) { +func (m *VStreamTablesRequest) SizeVT() (n int) { if m == nil { return 0 } @@ -2774,24 +3892,20 @@ func (m *VStreamRowsRequest) SizeVT() (n int) { l = m.Target.SizeVT() n += 1 + l + sov(uint64(l)) } - l = len(m.Query) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.Lastpk != nil { - l = m.Lastpk.SizeVT() - n += 1 + l + sov(uint64(l)) - } n += len(m.unknownFields) return n } -func (m *VStreamRowsResponse) SizeVT() (n int) { +func (m *VStreamTablesResponse) SizeVT() (n int) { if m == nil { return 0 } var l int _ = l + l = len(m.TableName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } if len(m.Fields) > 0 { for _, e := range m.Fields { l = e.SizeVT() @@ -2818,12 +3932,6 @@ func (m *VStreamRowsResponse) SizeVT() (n int) { l = m.Lastpk.SizeVT() n += 1 + l + sov(uint64(l)) } - if m.Throttled { - n += 2 - } - if m.Heartbeat { - n += 2 - } n += len(m.unknownFields) return n } @@ -4837,12 +5945,230 @@ func (m *BinlogSource) UnmarshalVT(dAtA []byte) error { if err := m.Filter.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } - iNdEx = postIndex - case 7: + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OnDdl", wireType) + } + m.OnDdl = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OnDdl |= OnDDLAction(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalMysql", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalMysql = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StopAfterCopy", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.StopAfterCopy = bool(v != 0) + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalCluster", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalCluster = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceTimeZone", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourceTimeZone = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetTimeZone", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetTimeZone = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RowChange_Bitmap) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RowChange_Bitmap: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RowChange_Bitmap: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OnDdl", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) } - m.OnDdl = 0 + m.Count = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -4852,16 +6178,16 @@ func (m *BinlogSource) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.OnDdl |= OnDDLAction(b&0x7F) << shift + m.Count |= int64(b&0x7F) << shift if b < 0x80 { break } } - case 8: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalMysql", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Cols", wireType) } - var stringLen uint64 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -4871,49 +6197,82 @@ func (m *BinlogSource) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if byteLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.ExternalMysql = string(dAtA[iNdEx:postIndex]) + m.Cols = append(m.Cols[:0], dAtA[iNdEx:postIndex]...) + if m.Cols == nil { + m.Cols = []byte{} + } iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StopAfterCopy", wireType) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength } - m.StopAfterCopy = bool(v != 0) - case 10: + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RowChange) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RowChange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RowChange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalCluster", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Before", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -4923,29 +6282,33 @@ func (m *BinlogSource) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.ExternalCluster = string(dAtA[iNdEx:postIndex]) + if m.Before == nil { + m.Before = &query.Row{} + } + if err := m.Before.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 11: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SourceTimeZone", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field After", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -4955,29 +6318,33 @@ func (m *BinlogSource) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.SourceTimeZone = string(dAtA[iNdEx:postIndex]) + if m.After == nil { + m.After = &query.Row{} + } + if err := m.After.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 12: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetTimeZone", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DataColumns", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -4987,23 +6354,27 @@ func (m *BinlogSource) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.TargetTimeZone = string(dAtA[iNdEx:postIndex]) + if m.DataColumns == nil { + m.DataColumns = &RowChange_Bitmap{} + } + if err := m.DataColumns.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -5027,7 +6398,7 @@ func (m *BinlogSource) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RowChange_Bitmap) UnmarshalVT(dAtA []byte) error { +func (m *RowEvent) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5050,17 +6421,17 @@ func (m *RowChange_Bitmap) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RowChange_Bitmap: wiretype end group for non-group") + return fmt.Errorf("proto: RowEvent: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RowChange_Bitmap: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RowEvent: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TableName", wireType) } - m.Count = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -5070,16 +6441,95 @@ func (m *RowChange_Bitmap) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Count |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TableName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cols", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RowChanges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RowChanges = append(m.RowChanges, &RowChange{}) + if err := m.RowChanges[len(m.RowChanges)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } - var byteLen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -5089,26 +6539,43 @@ func (m *RowChange_Bitmap) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + byteLen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Cols = append(m.Cols[:0], dAtA[iNdEx:postIndex]...) - if m.Cols == nil { - m.Cols = []byte{} - } + m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType) + } + m.Flags = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Flags |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -5131,7 +6598,7 @@ func (m *RowChange_Bitmap) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RowChange) UnmarshalVT(dAtA []byte) error { +func (m *FieldEvent) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5154,17 +6621,17 @@ func (m *RowChange) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RowChange: wiretype end group for non-group") + return fmt.Errorf("proto: FieldEvent: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RowChange: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: FieldEvent: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Before", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TableName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -5174,31 +6641,27 @@ func (m *RowChange) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Before == nil { - m.Before = &query.Row{} - } - if err := m.Before.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.TableName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field After", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5225,18 +6688,16 @@ func (m *RowChange) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.After == nil { - m.After = &query.Row{} - } - if err := m.After.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Fields = append(m.Fields, &query.Field{}) + if err := m.Fields[len(m.Fields)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DataColumns", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -5246,27 +6707,55 @@ func (m *RowChange) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.DataColumns == nil { - m.DataColumns = &RowChange_Bitmap{} + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } - if err := m.DataColumns.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF } + m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -5290,7 +6779,7 @@ func (m *RowChange) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RowEvent) UnmarshalVT(dAtA []byte) error { +func (m *ShardGtid) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5313,15 +6802,15 @@ func (m *RowEvent) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RowEvent: wiretype end group for non-group") + return fmt.Errorf("proto: ShardGtid: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RowEvent: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ShardGtid: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TableName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5349,13 +6838,13 @@ func (m *RowEvent) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TableName = string(dAtA[iNdEx:postIndex]) + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RowChanges", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -5365,29 +6854,27 @@ func (m *RowEvent) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.RowChanges = append(m.RowChanges, &RowChange{}) - if err := m.RowChanges[len(m.RowChanges)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Gtid", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5415,13 +6902,13 @@ func (m *RowEvent) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + m.Gtid = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TablePKs", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -5431,23 +6918,25 @@ func (m *RowEvent) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) + m.TablePKs = append(m.TablePKs, &TableLastPK{}) + if err := m.TablePKs[len(m.TablePKs)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -5471,7 +6960,7 @@ func (m *RowEvent) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *FieldEvent) UnmarshalVT(dAtA []byte) error { +func (m *VGtid) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5494,17 +6983,17 @@ func (m *FieldEvent) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: FieldEvent: wiretype end group for non-group") + return fmt.Errorf("proto: VGtid: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: FieldEvent: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VGtid: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TableName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ShardGtids", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -5514,59 +7003,78 @@ func (m *FieldEvent) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.TableName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + m.ShardGtids = append(m.ShardGtids, &ShardGtid{}) + if err := m.ShardGtids[len(m.ShardGtids)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - if msglen < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - if postIndex > l { + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KeyspaceShard) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Fields = append(m.Fields, &query.Field{}) - if err := m.Fields[len(m.Fields)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - iNdEx = postIndex - case 3: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyspaceShard: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyspaceShard: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } @@ -5598,7 +7106,7 @@ func (m *FieldEvent) UnmarshalVT(dAtA []byte) error { } m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } @@ -5652,7 +7160,7 @@ func (m *FieldEvent) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ShardGtid) UnmarshalVT(dAtA []byte) error { +func (m *Journal) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5675,15 +7183,53 @@ func (m *ShardGtid) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ShardGtid: wiretype end group for non-group") + return fmt.Errorf("proto: Journal: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ShardGtid: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Journal: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MigrationType", wireType) + } + m.MigrationType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MigrationType |= MigrationType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5711,11 +7257,11 @@ func (m *ShardGtid) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + m.Tables = append(m.Tables, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LocalPosition", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5743,13 +7289,13 @@ func (m *ShardGtid) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) + m.LocalPosition = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Gtid", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ShardGtids", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -5759,27 +7305,29 @@ func (m *ShardGtid) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Gtid = string(dAtA[iNdEx:postIndex]) + m.ShardGtids = append(m.ShardGtids, &ShardGtid{}) + if err := m.ShardGtids[len(m.ShardGtids)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TablePKs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Participants", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5806,67 +7354,16 @@ func (m *ShardGtid) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TablePKs = append(m.TablePKs, &TableLastPK{}) - if err := m.TablePKs[len(m.TablePKs)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Participants = append(m.Participants, &KeyspaceShard{}) + if err := m.Participants[len(m.Participants)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VGtid) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VGtid: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VGtid: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShardGtids", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SourceWorkflows", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -5876,25 +7373,23 @@ func (m *VGtid) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.ShardGtids = append(m.ShardGtids, &ShardGtid{}) - if err := m.ShardGtids[len(m.ShardGtids)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.SourceWorkflows = append(m.SourceWorkflows, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -5918,7 +7413,7 @@ func (m *VGtid) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *KeyspaceShard) UnmarshalVT(dAtA []byte) error { +func (m *VEvent) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5941,15 +7436,53 @@ func (m *KeyspaceShard) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: KeyspaceShard: wiretype end group for non-group") + return fmt.Errorf("proto: VEvent: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: KeyspaceShard: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VEvent: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= VEventType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Gtid", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5977,11 +7510,11 @@ func (m *KeyspaceShard) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + m.Gtid = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Statement", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6009,64 +7542,49 @@ func (m *KeyspaceShard) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) + m.Statement = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RowEvent", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + if msglen < 0 { + return ErrInvalidLength } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Journal) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if m.RowEvent == nil { + m.RowEvent = &RowEvent{} + } + if err := m.RowEvent.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Journal: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Journal: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldEvent", wireType) } - m.Id = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -6076,16 +7594,33 @@ func (m *Journal) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Id |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MigrationType", wireType) + if msglen < 0 { + return ErrInvalidLength } - m.MigrationType = 0 + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FieldEvent == nil { + m.FieldEvent = &FieldEvent{} + } + if err := m.FieldEvent.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vgtid", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -6095,16 +7630,33 @@ func (m *Journal) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MigrationType |= MigrationType(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 3: + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Vgtid == nil { + m.Vgtid = &VGtid{} + } + if err := m.Vgtid.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Journal", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -6114,27 +7666,31 @@ func (m *Journal) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Tables = append(m.Tables, string(dAtA[iNdEx:postIndex])) + if m.Journal == nil { + m.Journal = &Journal{} + } + if err := m.Journal.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalPosition", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Dml", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6162,11 +7718,30 @@ func (m *Journal) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.LocalPosition = string(dAtA[iNdEx:postIndex]) + m.Dml = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 20: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentTime", wireType) + } + m.CurrentTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 21: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShardGtids", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastPKEvent", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6193,16 +7768,18 @@ func (m *Journal) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ShardGtids = append(m.ShardGtids, &ShardGtid{}) - if err := m.ShardGtids[len(m.ShardGtids)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if m.LastPKEvent == nil { + m.LastPKEvent = &LastPKEvent{} + } + if err := m.LastPKEvent.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: + case 22: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Participants", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -6212,29 +7789,27 @@ func (m *Journal) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Participants = append(m.Participants, &KeyspaceShard{}) - if err := m.Participants[len(m.Participants)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 7: + case 23: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SourceWorkflows", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6262,8 +7837,28 @@ func (m *Journal) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SourceWorkflows = append(m.SourceWorkflows, string(dAtA[iNdEx:postIndex])) + m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 24: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Throttled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Throttled = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -6286,7 +7881,7 @@ func (m *Journal) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *VEvent) UnmarshalVT(dAtA []byte) error { +func (m *MinimalTable) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6301,93 +7896,23 @@ func (m *VEvent) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VEvent: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VEvent: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= VEventType(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - m.Timestamp = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Timestamp |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Gtid", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - m.Gtid = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MinimalTable: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MinimalTable: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Statement", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6415,11 +7940,11 @@ func (m *VEvent) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Statement = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RowEvent", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6446,88 +7971,141 @@ func (m *VEvent) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RowEvent == nil { - m.RowEvent = &RowEvent{} - } - if err := m.RowEvent.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Fields = append(m.Fields, &query.Field{}) + if err := m.Fields[len(m.Fields)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldEvent", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + case 3: + if wireType == 0 { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { - return io.ErrUnexpectedEOF + m.PKColumns = append(m.PKColumns, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break + if packedLen < 0 { + return ErrInvalidLength } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.FieldEvent == nil { - m.FieldEvent = &FieldEvent{} - } - if err := m.FieldEvent.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Vgtid", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.PKColumns) == 0 { + m.PKColumns = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PKColumns = append(m.PKColumns, v) } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PKColumns", wireType) } - if msglen < 0 { - return ErrInvalidLength + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.Vgtid == nil { - m.Vgtid = &VGtid{} + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MinimalSchema) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - if err := m.Vgtid.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + if iNdEx >= l { + return io.ErrUnexpectedEOF } - iNdEx = postIndex - case 8: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MinimalSchema: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MinimalSchema: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Journal", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6554,67 +8132,65 @@ func (m *VEvent) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Journal == nil { - m.Journal = &Journal{} - } - if err := m.Journal.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Tables = append(m.Tables, &MinimalTable{}) + if err := m.Tables[len(m.Tables)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Dml", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VStreamRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Dml = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 20: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentTime", wireType) - } - m.CurrentTime = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CurrentTime |= int64(b&0x7F) << shift - if b < 0x80 { - break - } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - case 21: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VStreamRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VStreamRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastPKEvent", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EffectiveCallerId", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6641,18 +8217,18 @@ func (m *VEvent) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.LastPKEvent == nil { - m.LastPKEvent = &LastPKEvent{} + if m.EffectiveCallerId == nil { + m.EffectiveCallerId = &vtrpc.CallerID{} } - if err := m.LastPKEvent.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.EffectiveCallerId.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 22: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ImmediateCallerId", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -6662,29 +8238,33 @@ func (m *VEvent) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + if m.ImmediateCallerId == nil { + m.ImmediateCallerId = &query.VTGateCallerID{} + } + if err := m.ImmediateCallerId.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 23: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -6694,98 +8274,31 @@ func (m *VEvent) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 24: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Throttled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if m.Target == nil { + m.Target = &query.Target{} } - m.Throttled = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { + if err := m.Target.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MinimalTable) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MinimalTable: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MinimalTable: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6813,11 +8326,11 @@ func (m *MinimalTable) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Position = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6844,87 +8357,47 @@ func (m *MinimalTable) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Fields = append(m.Fields, &query.Field{}) - if err := m.Fields[len(m.Fields)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if m.Filter == nil { + m.Filter = &Filter{} + } + if err := m.Filter.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType == 0 { - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.PKColumns = append(m.PKColumns, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLength + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TableLastPKs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.PKColumns) == 0 { - m.PKColumns = make([]int64, 0, elementCount) - } - for iNdEx < postIndex { - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.PKColumns = append(m.PKColumns, v) + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field PKColumns", wireType) } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TableLastPKs = append(m.TableLastPKs, &TableLastPK{}) + if err := m.TableLastPKs[len(m.TableLastPKs)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -6947,7 +8420,7 @@ func (m *MinimalTable) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *MinimalSchema) UnmarshalVT(dAtA []byte) error { +func (m *VStreamResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6970,15 +8443,15 @@ func (m *MinimalSchema) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MinimalSchema: wiretype end group for non-group") + return fmt.Errorf("proto: VStreamResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MinimalSchema: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VStreamResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -7005,8 +8478,8 @@ func (m *MinimalSchema) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Tables = append(m.Tables, &MinimalTable{}) - if err := m.Tables[len(m.Tables)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Events = append(m.Events, &VEvent{}) + if err := m.Events[len(m.Events)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -7032,7 +8505,7 @@ func (m *MinimalSchema) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *VStreamRequest) UnmarshalVT(dAtA []byte) error { +func (m *VStreamRowsRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7055,10 +8528,10 @@ func (m *VStreamRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VStreamRequest: wiretype end group for non-group") + return fmt.Errorf("proto: VStreamRowsRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VStreamRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VStreamRowsRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -7171,7 +8644,7 @@ func (m *VStreamRequest) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -7199,11 +8672,11 @@ func (m *VStreamRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Position = string(dAtA[iNdEx:postIndex]) + m.Query = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Lastpk", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -7230,16 +8703,222 @@ func (m *VStreamRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Filter == nil { - m.Filter = &Filter{} + if m.Lastpk == nil { + m.Lastpk = &query.QueryResult{} } - if err := m.Filter.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Lastpk.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VStreamRowsResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VStreamRowsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VStreamRowsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if len(m.Fields) == cap(m.Fields) { + m.Fields = append(m.Fields, &query.Field{}) + } else { + m.Fields = m.Fields[:len(m.Fields)+1] + if m.Fields[len(m.Fields)-1] == nil { + m.Fields[len(m.Fields)-1] = &query.Field{} + } + } + if err := m.Fields[len(m.Fields)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pkfields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if len(m.Pkfields) == cap(m.Pkfields) { + m.Pkfields = append(m.Pkfields, &query.Field{}) + } else { + m.Pkfields = m.Pkfields[:len(m.Pkfields)+1] + if m.Pkfields[len(m.Pkfields)-1] == nil { + m.Pkfields[len(m.Pkfields)-1] = &query.Field{} + } + } + if err := m.Pkfields[len(m.Pkfields)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gtid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Gtid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rows", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if len(m.Rows) == cap(m.Rows) { + m.Rows = append(m.Rows, &query.Row{}) + } else { + m.Rows = m.Rows[:len(m.Rows)+1] + if m.Rows[len(m.Rows)-1] == nil { + m.Rows[len(m.Rows)-1] = &query.Row{} + } + } + if err := m.Rows[len(m.Rows)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TableLastPKs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Lastpk", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -7266,67 +8945,18 @@ func (m *VStreamRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TableLastPKs = append(m.TableLastPKs, &TableLastPK{}) - if err := m.TableLastPKs[len(m.TableLastPKs)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + if m.Lastpk == nil { + m.Lastpk = query.RowFromVTPool() } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { + if err := m.Lastpk.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VStreamResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VStreamResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VStreamResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Throttled", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -7336,26 +8966,32 @@ func (m *VStreamResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF + m.Throttled = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Heartbeat", wireType) } - m.Events = append(m.Events, &VEvent{}) - if err := m.Events[len(m.Events)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex + m.Heartbeat = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -7378,7 +9014,7 @@ func (m *VStreamResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *VStreamRowsRequest) UnmarshalVT(dAtA []byte) error { +func (m *VStreamTablesRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7401,10 +9037,10 @@ func (m *VStreamRowsRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VStreamRowsRequest: wiretype end group for non-group") + return fmt.Errorf("proto: VStreamTablesRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VStreamRowsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VStreamTablesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -7515,74 +9151,6 @@ func (m *VStreamRowsRequest) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Query = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Lastpk", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Lastpk == nil { - m.Lastpk = &query.QueryResult{} - } - if err := m.Lastpk.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -7605,7 +9173,7 @@ func (m *VStreamRowsRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *VStreamRowsResponse) UnmarshalVT(dAtA []byte) error { +func (m *VStreamTablesResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7628,13 +9196,45 @@ func (m *VStreamRowsResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VStreamRowsResponse: wiretype end group for non-group") + return fmt.Errorf("proto: VStreamTablesResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VStreamRowsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VStreamTablesResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TableName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TableName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) } @@ -7675,7 +9275,7 @@ func (m *VStreamRowsResponse) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Pkfields", wireType) } @@ -7716,7 +9316,7 @@ func (m *VStreamRowsResponse) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex - case 3: + case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Gtid", wireType) } @@ -7748,7 +9348,7 @@ func (m *VStreamRowsResponse) UnmarshalVT(dAtA []byte) error { } m.Gtid = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Rows", wireType) } @@ -7789,7 +9389,7 @@ func (m *VStreamRowsResponse) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex - case 5: + case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Lastpk", wireType) } @@ -7825,46 +9425,6 @@ func (m *VStreamRowsResponse) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Throttled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Throttled = bool(v != 0) - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Heartbeat", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Heartbeat = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) diff --git a/go/vt/proto/binlogservice/binlogservice.pb.go b/go/vt/proto/binlogservice/binlogservice.pb.go index 33bb79ccc71..4eac50296c1 100644 --- a/go/vt/proto/binlogservice/binlogservice.pb.go +++ b/go/vt/proto/binlogservice/binlogservice.pb.go @@ -19,8 +19,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.6.1 +// protoc-gen-go v1.30.0 +// protoc v3.21.3 // source: binlogservice.proto package binlogservice diff --git a/go/vt/proto/binlogservice/binlogservice_grpc.pb.go b/go/vt/proto/binlogservice/binlogservice_grpc.pb.go index daf62c96083..25638898b9a 100644 --- a/go/vt/proto/binlogservice/binlogservice_grpc.pb.go +++ b/go/vt/proto/binlogservice/binlogservice_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 -// - protoc v3.6.1 +// - protoc v3.21.3 // source: binlogservice.proto package binlogservice diff --git a/go/vt/proto/logutil/logutil.pb.go b/go/vt/proto/logutil/logutil.pb.go index 5a985ac3223..b2675716168 100644 --- a/go/vt/proto/logutil/logutil.pb.go +++ b/go/vt/proto/logutil/logutil.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.6.1 +// protoc-gen-go v1.30.0 +// protoc v3.21.3 // source: logutil.proto package logutil diff --git a/go/vt/proto/logutil/logutil_vtproto.pb.go b/go/vt/proto/logutil/logutil_vtproto.pb.go index 234c26eea93..1d3ccb74271 100644 --- a/go/vt/proto/logutil/logutil_vtproto.pb.go +++ b/go/vt/proto/logutil/logutil_vtproto.pb.go @@ -1,11 +1,12 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 +// protoc-gen-go-vtproto version: v0.5.0 // source: logutil.proto package logutil import ( fmt "fmt" + proto "google.golang.org/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl" io "io" bits "math/bits" @@ -19,6 +20,28 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +func (m *Event) CloneVT() *Event { + if m == nil { + return (*Event)(nil) + } + r := &Event{ + Time: m.Time.CloneVT(), + Level: m.Level, + File: m.File, + Line: m.Line, + Value: m.Value, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Event) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *Event) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil diff --git a/go/vt/proto/mysqlctl/mysqlctl.pb.go b/go/vt/proto/mysqlctl/mysqlctl.pb.go index 00d6128183b..19f70887681 100644 --- a/go/vt/proto/mysqlctl/mysqlctl.pb.go +++ b/go/vt/proto/mysqlctl/mysqlctl.pb.go @@ -18,8 +18,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.6.1 +// protoc-gen-go v1.30.0 +// protoc v3.21.3 // source: mysqlctl.proto package mysqlctl @@ -97,7 +97,7 @@ func (x BackupInfo_Status) Number() protoreflect.EnumNumber { // Deprecated: Use BackupInfo_Status.Descriptor instead. func (BackupInfo_Status) EnumDescriptor() ([]byte, []int) { - return file_mysqlctl_proto_rawDescGZIP(), []int{12, 0} + return file_mysqlctl_proto_rawDescGZIP(), []int{16, 0} } type StartRequest struct { @@ -351,8 +351,9 @@ type ApplyBinlogFileRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - BinlogFileName string `protobuf:"bytes,1,opt,name=binlog_file_name,json=binlogFileName,proto3" json:"binlog_file_name,omitempty"` - BinlogRestorePosition string `protobuf:"bytes,2,opt,name=binlog_restore_position,json=binlogRestorePosition,proto3" json:"binlog_restore_position,omitempty"` + BinlogFileName string `protobuf:"bytes,1,opt,name=binlog_file_name,json=binlogFileName,proto3" json:"binlog_file_name,omitempty"` + BinlogRestorePosition string `protobuf:"bytes,2,opt,name=binlog_restore_position,json=binlogRestorePosition,proto3" json:"binlog_restore_position,omitempty"` + BinlogRestoreDatetime *vttime.Time `protobuf:"bytes,3,opt,name=binlog_restore_datetime,json=binlogRestoreDatetime,proto3" json:"binlog_restore_datetime,omitempty"` } func (x *ApplyBinlogFileRequest) Reset() { @@ -401,6 +402,13 @@ func (x *ApplyBinlogFileRequest) GetBinlogRestorePosition() string { return "" } +func (x *ApplyBinlogFileRequest) GetBinlogRestoreDatetime() *vttime.Time { + if x != nil { + return x.BinlogRestoreDatetime + } + return nil +} + type ApplyBinlogFileResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -439,6 +447,128 @@ func (*ApplyBinlogFileResponse) Descriptor() ([]byte, []int) { return file_mysqlctl_proto_rawDescGZIP(), []int{7} } +type ReadBinlogFilesTimestampsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BinlogFileNames []string `protobuf:"bytes,1,rep,name=binlog_file_names,json=binlogFileNames,proto3" json:"binlog_file_names,omitempty"` +} + +func (x *ReadBinlogFilesTimestampsRequest) Reset() { + *x = ReadBinlogFilesTimestampsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_mysqlctl_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadBinlogFilesTimestampsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadBinlogFilesTimestampsRequest) ProtoMessage() {} + +func (x *ReadBinlogFilesTimestampsRequest) ProtoReflect() protoreflect.Message { + mi := &file_mysqlctl_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadBinlogFilesTimestampsRequest.ProtoReflect.Descriptor instead. +func (*ReadBinlogFilesTimestampsRequest) Descriptor() ([]byte, []int) { + return file_mysqlctl_proto_rawDescGZIP(), []int{8} +} + +func (x *ReadBinlogFilesTimestampsRequest) GetBinlogFileNames() []string { + if x != nil { + return x.BinlogFileNames + } + return nil +} + +type ReadBinlogFilesTimestampsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // FirstTimestamp is the timestamp of the first found transaction searching in order of given binlog files + FirstTimestamp *vttime.Time `protobuf:"bytes,1,opt,name=first_timestamp,json=firstTimestamp,proto3" json:"first_timestamp,omitempty"` + // FirstTimestampBinlog is the name of the binary log in which the first timestamp is found + FirstTimestampBinlog string `protobuf:"bytes,2,opt,name=first_timestamp_binlog,json=firstTimestampBinlog,proto3" json:"first_timestamp_binlog,omitempty"` + // LastTimestamp is the timestamp of the last found transaction in given binlog files + LastTimestamp *vttime.Time `protobuf:"bytes,3,opt,name=last_timestamp,json=lastTimestamp,proto3" json:"last_timestamp,omitempty"` + // LastTimestampBinlog is the name of the binary log in which the last timestamp is found + LastTimestampBinlog string `protobuf:"bytes,4,opt,name=last_timestamp_binlog,json=lastTimestampBinlog,proto3" json:"last_timestamp_binlog,omitempty"` +} + +func (x *ReadBinlogFilesTimestampsResponse) Reset() { + *x = ReadBinlogFilesTimestampsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_mysqlctl_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadBinlogFilesTimestampsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadBinlogFilesTimestampsResponse) ProtoMessage() {} + +func (x *ReadBinlogFilesTimestampsResponse) ProtoReflect() protoreflect.Message { + mi := &file_mysqlctl_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadBinlogFilesTimestampsResponse.ProtoReflect.Descriptor instead. +func (*ReadBinlogFilesTimestampsResponse) Descriptor() ([]byte, []int) { + return file_mysqlctl_proto_rawDescGZIP(), []int{9} +} + +func (x *ReadBinlogFilesTimestampsResponse) GetFirstTimestamp() *vttime.Time { + if x != nil { + return x.FirstTimestamp + } + return nil +} + +func (x *ReadBinlogFilesTimestampsResponse) GetFirstTimestampBinlog() string { + if x != nil { + return x.FirstTimestampBinlog + } + return "" +} + +func (x *ReadBinlogFilesTimestampsResponse) GetLastTimestamp() *vttime.Time { + if x != nil { + return x.LastTimestamp + } + return nil +} + +func (x *ReadBinlogFilesTimestampsResponse) GetLastTimestampBinlog() string { + if x != nil { + return x.LastTimestampBinlog + } + return "" +} + type ReinitConfigRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -448,7 +578,7 @@ type ReinitConfigRequest struct { func (x *ReinitConfigRequest) Reset() { *x = ReinitConfigRequest{} if protoimpl.UnsafeEnabled { - mi := &file_mysqlctl_proto_msgTypes[8] + mi := &file_mysqlctl_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -461,7 +591,7 @@ func (x *ReinitConfigRequest) String() string { func (*ReinitConfigRequest) ProtoMessage() {} func (x *ReinitConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_mysqlctl_proto_msgTypes[8] + mi := &file_mysqlctl_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -474,7 +604,7 @@ func (x *ReinitConfigRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ReinitConfigRequest.ProtoReflect.Descriptor instead. func (*ReinitConfigRequest) Descriptor() ([]byte, []int) { - return file_mysqlctl_proto_rawDescGZIP(), []int{8} + return file_mysqlctl_proto_rawDescGZIP(), []int{10} } type ReinitConfigResponse struct { @@ -486,7 +616,7 @@ type ReinitConfigResponse struct { func (x *ReinitConfigResponse) Reset() { *x = ReinitConfigResponse{} if protoimpl.UnsafeEnabled { - mi := &file_mysqlctl_proto_msgTypes[9] + mi := &file_mysqlctl_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -499,7 +629,7 @@ func (x *ReinitConfigResponse) String() string { func (*ReinitConfigResponse) ProtoMessage() {} func (x *ReinitConfigResponse) ProtoReflect() protoreflect.Message { - mi := &file_mysqlctl_proto_msgTypes[9] + mi := &file_mysqlctl_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -512,7 +642,7 @@ func (x *ReinitConfigResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ReinitConfigResponse.ProtoReflect.Descriptor instead. func (*ReinitConfigResponse) Descriptor() ([]byte, []int) { - return file_mysqlctl_proto_rawDescGZIP(), []int{9} + return file_mysqlctl_proto_rawDescGZIP(), []int{11} } type RefreshConfigRequest struct { @@ -524,7 +654,7 @@ type RefreshConfigRequest struct { func (x *RefreshConfigRequest) Reset() { *x = RefreshConfigRequest{} if protoimpl.UnsafeEnabled { - mi := &file_mysqlctl_proto_msgTypes[10] + mi := &file_mysqlctl_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -537,7 +667,7 @@ func (x *RefreshConfigRequest) String() string { func (*RefreshConfigRequest) ProtoMessage() {} func (x *RefreshConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_mysqlctl_proto_msgTypes[10] + mi := &file_mysqlctl_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -550,7 +680,7 @@ func (x *RefreshConfigRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RefreshConfigRequest.ProtoReflect.Descriptor instead. func (*RefreshConfigRequest) Descriptor() ([]byte, []int) { - return file_mysqlctl_proto_rawDescGZIP(), []int{10} + return file_mysqlctl_proto_rawDescGZIP(), []int{12} } type RefreshConfigResponse struct { @@ -562,7 +692,7 @@ type RefreshConfigResponse struct { func (x *RefreshConfigResponse) Reset() { *x = RefreshConfigResponse{} if protoimpl.UnsafeEnabled { - mi := &file_mysqlctl_proto_msgTypes[11] + mi := &file_mysqlctl_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -575,7 +705,7 @@ func (x *RefreshConfigResponse) String() string { func (*RefreshConfigResponse) ProtoMessage() {} func (x *RefreshConfigResponse) ProtoReflect() protoreflect.Message { - mi := &file_mysqlctl_proto_msgTypes[11] + mi := &file_mysqlctl_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -588,7 +718,92 @@ func (x *RefreshConfigResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RefreshConfigResponse.ProtoReflect.Descriptor instead. func (*RefreshConfigResponse) Descriptor() ([]byte, []int) { - return file_mysqlctl_proto_rawDescGZIP(), []int{11} + return file_mysqlctl_proto_rawDescGZIP(), []int{13} +} + +type VersionStringRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VersionStringRequest) Reset() { + *x = VersionStringRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_mysqlctl_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VersionStringRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VersionStringRequest) ProtoMessage() {} + +func (x *VersionStringRequest) ProtoReflect() protoreflect.Message { + mi := &file_mysqlctl_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VersionStringRequest.ProtoReflect.Descriptor instead. +func (*VersionStringRequest) Descriptor() ([]byte, []int) { + return file_mysqlctl_proto_rawDescGZIP(), []int{14} +} + +type VersionStringResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` +} + +func (x *VersionStringResponse) Reset() { + *x = VersionStringResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_mysqlctl_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VersionStringResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VersionStringResponse) ProtoMessage() {} + +func (x *VersionStringResponse) ProtoReflect() protoreflect.Message { + mi := &file_mysqlctl_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VersionStringResponse.ProtoReflect.Descriptor instead. +func (*VersionStringResponse) Descriptor() ([]byte, []int) { + return file_mysqlctl_proto_rawDescGZIP(), []int{15} +} + +func (x *VersionStringResponse) GetVersion() string { + if x != nil { + return x.Version + } + return "" } // BackupInfo is the read-only attributes of a mysqlctl/backupstorage.BackupHandle. @@ -612,7 +827,7 @@ type BackupInfo struct { func (x *BackupInfo) Reset() { *x = BackupInfo{} if protoimpl.UnsafeEnabled { - mi := &file_mysqlctl_proto_msgTypes[12] + mi := &file_mysqlctl_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -625,7 +840,7 @@ func (x *BackupInfo) String() string { func (*BackupInfo) ProtoMessage() {} func (x *BackupInfo) ProtoReflect() protoreflect.Message { - mi := &file_mysqlctl_proto_msgTypes[12] + mi := &file_mysqlctl_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -638,7 +853,7 @@ func (x *BackupInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use BackupInfo.ProtoReflect.Descriptor instead. func (*BackupInfo) Descriptor() ([]byte, []int) { - return file_mysqlctl_proto_rawDescGZIP(), []int{12} + return file_mysqlctl_proto_rawDescGZIP(), []int{16} } func (x *BackupInfo) GetName() string { @@ -715,79 +930,121 @@ var file_mysqlctl_proto_rawDesc = []byte{ 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x18, 0x0a, 0x16, 0x52, 0x75, 0x6e, 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x19, 0x0a, 0x17, 0x52, 0x75, 0x6e, 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x55, - 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7a, - 0x0a, 0x16, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x62, 0x69, 0x6e, 0x6c, - 0x6f, 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x17, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, 0x72, 0x65, 0x73, - 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x15, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x74, 0x6f, - 0x72, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x19, 0x0a, 0x17, 0x41, 0x70, - 0x70, 0x6c, 0x79, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x15, 0x0a, 0x13, 0x52, 0x65, 0x69, 0x6e, 0x69, 0x74, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x16, 0x0a, 0x14, - 0x52, 0x65, 0x69, 0x6e, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x17, 0x0a, 0x15, - 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xe6, 0x02, 0x0a, 0x0a, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, - 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x12, 0x20, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x04, - 0x74, 0x69, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x33, 0x0a, 0x06, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x6d, - 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, - 0x66, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x22, 0x4b, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, - 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x4e, 0x43, 0x4f, - 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x4f, 0x4d, 0x50, - 0x4c, 0x45, 0x54, 0x45, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, - 0x44, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x04, 0x32, 0xe4, - 0x03, 0x0a, 0x08, 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x43, 0x74, 0x6c, 0x12, 0x3a, 0x0a, 0x05, 0x53, - 0x74, 0x61, 0x72, 0x74, 0x12, 0x16, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, - 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x6d, - 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x08, 0x53, 0x68, 0x75, 0x74, 0x64, - 0x6f, 0x77, 0x6e, 0x12, 0x19, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x53, - 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, - 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, - 0x77, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x0f, - 0x52, 0x75, 0x6e, 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x12, - 0x20, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x75, 0x6e, 0x4d, 0x79, - 0x73, 0x71, 0x6c, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x75, 0x6e, - 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x0f, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x42, - 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x20, 0x2e, 0x6d, 0x79, 0x73, 0x71, + 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xc0, + 0x01, 0x0a, 0x16, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, + 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x62, 0x69, 0x6e, + 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x17, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, 0x72, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, 0x17, 0x62, + 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x64, 0x61, + 0x74, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, + 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x15, 0x62, 0x69, 0x6e, 0x6c, + 0x6f, 0x67, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x44, 0x61, 0x74, 0x65, 0x74, 0x69, 0x6d, + 0x65, 0x22, 0x19, 0x0a, 0x17, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, + 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4e, 0x0a, 0x20, + 0x52, 0x65, 0x61, 0x64, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x2a, 0x0a, 0x11, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x62, 0x69, 0x6e, + 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0xf9, 0x01, 0x0a, + 0x21, 0x52, 0x65, 0x61, 0x64, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x73, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x35, 0x0a, 0x0f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, + 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0e, 0x66, 0x69, 0x72, 0x73, 0x74, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x34, 0x0a, 0x16, 0x66, 0x69, 0x72, + 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x62, 0x69, 0x6e, + 0x6c, 0x6f, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x66, 0x69, 0x72, 0x73, 0x74, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x12, + 0x33, 0x0a, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0d, 0x6c, 0x61, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x12, 0x32, 0x0a, 0x15, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x13, 0x6c, 0x61, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x22, 0x15, 0x0a, 0x13, 0x52, 0x65, 0x69, 0x6e, + 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, + 0x16, 0x0a, 0x14, 0x52, 0x65, 0x69, 0x6e, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x66, 0x72, 0x65, + 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, + 0x17, 0x0a, 0x15, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x0a, 0x14, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x22, 0x31, 0x0a, 0x15, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x22, 0xe6, 0x02, 0x0a, 0x0a, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, + 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, + 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x12, 0x20, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, + 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x04, 0x74, 0x69, + 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x6d, 0x79, 0x73, + 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, + 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, + 0x4b, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x4e, 0x43, 0x4f, 0x4d, 0x50, + 0x4c, 0x45, 0x54, 0x45, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, + 0x54, 0x45, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, + 0x03, 0x12, 0x09, 0x0a, 0x05, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x04, 0x32, 0xb0, 0x05, 0x0a, + 0x08, 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x43, 0x74, 0x6c, 0x12, 0x3a, 0x0a, 0x05, 0x53, 0x74, 0x61, + 0x72, 0x74, 0x12, 0x16, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x53, 0x74, + 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x6d, 0x79, 0x73, + 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x08, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, + 0x6e, 0x12, 0x19, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x53, 0x68, 0x75, + 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x6d, + 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x0f, 0x52, 0x75, + 0x6e, 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x12, 0x20, 0x2e, + 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x75, 0x6e, 0x4d, 0x79, 0x73, 0x71, + 0x6c, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x21, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x75, 0x6e, 0x4d, 0x79, + 0x73, 0x71, 0x6c, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x0f, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x42, 0x69, 0x6e, + 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x20, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, + 0x74, 0x6c, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, + 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, - 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x79, - 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x42, 0x69, 0x6e, 0x6c, - 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x4f, 0x0a, 0x0c, 0x52, 0x65, 0x69, 0x6e, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x1d, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x65, 0x69, 0x6e, - 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x1e, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x65, 0x69, 0x6e, 0x69, - 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x52, 0x0a, 0x0d, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x1e, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x65, - 0x66, 0x72, 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x65, - 0x66, 0x72, 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x27, 0x5a, 0x25, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, - 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x76, + 0x0a, 0x19, 0x52, 0x65, 0x61, 0x64, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, + 0x73, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x12, 0x2a, 0x2e, 0x6d, 0x79, + 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x42, 0x69, 0x6e, 0x6c, 0x6f, + 0x67, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, + 0x74, 0x6c, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, + 0x65, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x0c, 0x52, 0x65, 0x69, 0x6e, 0x69, 0x74, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1d, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, + 0x6c, 0x2e, 0x52, 0x65, 0x69, 0x6e, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, + 0x2e, 0x52, 0x65, 0x69, 0x6e, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x0d, 0x52, 0x65, 0x66, 0x72, 0x65, + 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1e, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, + 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, + 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x0d, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x1e, 0x2e, 0x6d, + 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x6d, + 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, + 0x27, 0x5a, 0x25, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, + 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -803,46 +1060,57 @@ func file_mysqlctl_proto_rawDescGZIP() []byte { } var file_mysqlctl_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_mysqlctl_proto_msgTypes = make([]protoimpl.MessageInfo, 13) +var file_mysqlctl_proto_msgTypes = make([]protoimpl.MessageInfo, 17) var file_mysqlctl_proto_goTypes = []interface{}{ - (BackupInfo_Status)(0), // 0: mysqlctl.BackupInfo.Status - (*StartRequest)(nil), // 1: mysqlctl.StartRequest - (*StartResponse)(nil), // 2: mysqlctl.StartResponse - (*ShutdownRequest)(nil), // 3: mysqlctl.ShutdownRequest - (*ShutdownResponse)(nil), // 4: mysqlctl.ShutdownResponse - (*RunMysqlUpgradeRequest)(nil), // 5: mysqlctl.RunMysqlUpgradeRequest - (*RunMysqlUpgradeResponse)(nil), // 6: mysqlctl.RunMysqlUpgradeResponse - (*ApplyBinlogFileRequest)(nil), // 7: mysqlctl.ApplyBinlogFileRequest - (*ApplyBinlogFileResponse)(nil), // 8: mysqlctl.ApplyBinlogFileResponse - (*ReinitConfigRequest)(nil), // 9: mysqlctl.ReinitConfigRequest - (*ReinitConfigResponse)(nil), // 10: mysqlctl.ReinitConfigResponse - (*RefreshConfigRequest)(nil), // 11: mysqlctl.RefreshConfigRequest - (*RefreshConfigResponse)(nil), // 12: mysqlctl.RefreshConfigResponse - (*BackupInfo)(nil), // 13: mysqlctl.BackupInfo - (*topodata.TabletAlias)(nil), // 14: topodata.TabletAlias - (*vttime.Time)(nil), // 15: vttime.Time + (BackupInfo_Status)(0), // 0: mysqlctl.BackupInfo.Status + (*StartRequest)(nil), // 1: mysqlctl.StartRequest + (*StartResponse)(nil), // 2: mysqlctl.StartResponse + (*ShutdownRequest)(nil), // 3: mysqlctl.ShutdownRequest + (*ShutdownResponse)(nil), // 4: mysqlctl.ShutdownResponse + (*RunMysqlUpgradeRequest)(nil), // 5: mysqlctl.RunMysqlUpgradeRequest + (*RunMysqlUpgradeResponse)(nil), // 6: mysqlctl.RunMysqlUpgradeResponse + (*ApplyBinlogFileRequest)(nil), // 7: mysqlctl.ApplyBinlogFileRequest + (*ApplyBinlogFileResponse)(nil), // 8: mysqlctl.ApplyBinlogFileResponse + (*ReadBinlogFilesTimestampsRequest)(nil), // 9: mysqlctl.ReadBinlogFilesTimestampsRequest + (*ReadBinlogFilesTimestampsResponse)(nil), // 10: mysqlctl.ReadBinlogFilesTimestampsResponse + (*ReinitConfigRequest)(nil), // 11: mysqlctl.ReinitConfigRequest + (*ReinitConfigResponse)(nil), // 12: mysqlctl.ReinitConfigResponse + (*RefreshConfigRequest)(nil), // 13: mysqlctl.RefreshConfigRequest + (*RefreshConfigResponse)(nil), // 14: mysqlctl.RefreshConfigResponse + (*VersionStringRequest)(nil), // 15: mysqlctl.VersionStringRequest + (*VersionStringResponse)(nil), // 16: mysqlctl.VersionStringResponse + (*BackupInfo)(nil), // 17: mysqlctl.BackupInfo + (*vttime.Time)(nil), // 18: vttime.Time + (*topodata.TabletAlias)(nil), // 19: topodata.TabletAlias } var file_mysqlctl_proto_depIdxs = []int32{ - 14, // 0: mysqlctl.BackupInfo.tablet_alias:type_name -> topodata.TabletAlias - 15, // 1: mysqlctl.BackupInfo.time:type_name -> vttime.Time - 0, // 2: mysqlctl.BackupInfo.status:type_name -> mysqlctl.BackupInfo.Status - 1, // 3: mysqlctl.MysqlCtl.Start:input_type -> mysqlctl.StartRequest - 3, // 4: mysqlctl.MysqlCtl.Shutdown:input_type -> mysqlctl.ShutdownRequest - 5, // 5: mysqlctl.MysqlCtl.RunMysqlUpgrade:input_type -> mysqlctl.RunMysqlUpgradeRequest - 7, // 6: mysqlctl.MysqlCtl.ApplyBinlogFile:input_type -> mysqlctl.ApplyBinlogFileRequest - 9, // 7: mysqlctl.MysqlCtl.ReinitConfig:input_type -> mysqlctl.ReinitConfigRequest - 11, // 8: mysqlctl.MysqlCtl.RefreshConfig:input_type -> mysqlctl.RefreshConfigRequest - 2, // 9: mysqlctl.MysqlCtl.Start:output_type -> mysqlctl.StartResponse - 4, // 10: mysqlctl.MysqlCtl.Shutdown:output_type -> mysqlctl.ShutdownResponse - 6, // 11: mysqlctl.MysqlCtl.RunMysqlUpgrade:output_type -> mysqlctl.RunMysqlUpgradeResponse - 8, // 12: mysqlctl.MysqlCtl.ApplyBinlogFile:output_type -> mysqlctl.ApplyBinlogFileResponse - 10, // 13: mysqlctl.MysqlCtl.ReinitConfig:output_type -> mysqlctl.ReinitConfigResponse - 12, // 14: mysqlctl.MysqlCtl.RefreshConfig:output_type -> mysqlctl.RefreshConfigResponse - 9, // [9:15] is the sub-list for method output_type - 3, // [3:9] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name + 18, // 0: mysqlctl.ApplyBinlogFileRequest.binlog_restore_datetime:type_name -> vttime.Time + 18, // 1: mysqlctl.ReadBinlogFilesTimestampsResponse.first_timestamp:type_name -> vttime.Time + 18, // 2: mysqlctl.ReadBinlogFilesTimestampsResponse.last_timestamp:type_name -> vttime.Time + 19, // 3: mysqlctl.BackupInfo.tablet_alias:type_name -> topodata.TabletAlias + 18, // 4: mysqlctl.BackupInfo.time:type_name -> vttime.Time + 0, // 5: mysqlctl.BackupInfo.status:type_name -> mysqlctl.BackupInfo.Status + 1, // 6: mysqlctl.MysqlCtl.Start:input_type -> mysqlctl.StartRequest + 3, // 7: mysqlctl.MysqlCtl.Shutdown:input_type -> mysqlctl.ShutdownRequest + 5, // 8: mysqlctl.MysqlCtl.RunMysqlUpgrade:input_type -> mysqlctl.RunMysqlUpgradeRequest + 7, // 9: mysqlctl.MysqlCtl.ApplyBinlogFile:input_type -> mysqlctl.ApplyBinlogFileRequest + 9, // 10: mysqlctl.MysqlCtl.ReadBinlogFilesTimestamps:input_type -> mysqlctl.ReadBinlogFilesTimestampsRequest + 11, // 11: mysqlctl.MysqlCtl.ReinitConfig:input_type -> mysqlctl.ReinitConfigRequest + 13, // 12: mysqlctl.MysqlCtl.RefreshConfig:input_type -> mysqlctl.RefreshConfigRequest + 15, // 13: mysqlctl.MysqlCtl.VersionString:input_type -> mysqlctl.VersionStringRequest + 2, // 14: mysqlctl.MysqlCtl.Start:output_type -> mysqlctl.StartResponse + 4, // 15: mysqlctl.MysqlCtl.Shutdown:output_type -> mysqlctl.ShutdownResponse + 6, // 16: mysqlctl.MysqlCtl.RunMysqlUpgrade:output_type -> mysqlctl.RunMysqlUpgradeResponse + 8, // 17: mysqlctl.MysqlCtl.ApplyBinlogFile:output_type -> mysqlctl.ApplyBinlogFileResponse + 10, // 18: mysqlctl.MysqlCtl.ReadBinlogFilesTimestamps:output_type -> mysqlctl.ReadBinlogFilesTimestampsResponse + 12, // 19: mysqlctl.MysqlCtl.ReinitConfig:output_type -> mysqlctl.ReinitConfigResponse + 14, // 20: mysqlctl.MysqlCtl.RefreshConfig:output_type -> mysqlctl.RefreshConfigResponse + 16, // 21: mysqlctl.MysqlCtl.VersionString:output_type -> mysqlctl.VersionStringResponse + 14, // [14:22] is the sub-list for method output_type + 6, // [6:14] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name } func init() { file_mysqlctl_proto_init() } @@ -948,7 +1216,7 @@ func file_mysqlctl_proto_init() { } } file_mysqlctl_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReinitConfigRequest); i { + switch v := v.(*ReadBinlogFilesTimestampsRequest); i { case 0: return &v.state case 1: @@ -960,7 +1228,7 @@ func file_mysqlctl_proto_init() { } } file_mysqlctl_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReinitConfigResponse); i { + switch v := v.(*ReadBinlogFilesTimestampsResponse); i { case 0: return &v.state case 1: @@ -972,7 +1240,7 @@ func file_mysqlctl_proto_init() { } } file_mysqlctl_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RefreshConfigRequest); i { + switch v := v.(*ReinitConfigRequest); i { case 0: return &v.state case 1: @@ -984,7 +1252,7 @@ func file_mysqlctl_proto_init() { } } file_mysqlctl_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RefreshConfigResponse); i { + switch v := v.(*ReinitConfigResponse); i { case 0: return &v.state case 1: @@ -996,6 +1264,54 @@ func file_mysqlctl_proto_init() { } } file_mysqlctl_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RefreshConfigRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mysqlctl_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RefreshConfigResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mysqlctl_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VersionStringRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mysqlctl_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VersionStringResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mysqlctl_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BackupInfo); i { case 0: return &v.state @@ -1014,7 +1330,7 @@ func file_mysqlctl_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_mysqlctl_proto_rawDesc, NumEnums: 1, - NumMessages: 13, + NumMessages: 17, NumExtensions: 0, NumServices: 1, }, diff --git a/go/vt/proto/mysqlctl/mysqlctl_grpc.pb.go b/go/vt/proto/mysqlctl/mysqlctl_grpc.pb.go index 2613bb0999f..6d0fd1a28e8 100644 --- a/go/vt/proto/mysqlctl/mysqlctl_grpc.pb.go +++ b/go/vt/proto/mysqlctl/mysqlctl_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 -// - protoc v3.6.1 +// - protoc v3.21.3 // source: mysqlctl.proto package mysqlctl @@ -26,8 +26,10 @@ type MysqlCtlClient interface { Shutdown(ctx context.Context, in *ShutdownRequest, opts ...grpc.CallOption) (*ShutdownResponse, error) RunMysqlUpgrade(ctx context.Context, in *RunMysqlUpgradeRequest, opts ...grpc.CallOption) (*RunMysqlUpgradeResponse, error) ApplyBinlogFile(ctx context.Context, in *ApplyBinlogFileRequest, opts ...grpc.CallOption) (*ApplyBinlogFileResponse, error) + ReadBinlogFilesTimestamps(ctx context.Context, in *ReadBinlogFilesTimestampsRequest, opts ...grpc.CallOption) (*ReadBinlogFilesTimestampsResponse, error) ReinitConfig(ctx context.Context, in *ReinitConfigRequest, opts ...grpc.CallOption) (*ReinitConfigResponse, error) RefreshConfig(ctx context.Context, in *RefreshConfigRequest, opts ...grpc.CallOption) (*RefreshConfigResponse, error) + VersionString(ctx context.Context, in *VersionStringRequest, opts ...grpc.CallOption) (*VersionStringResponse, error) } type mysqlCtlClient struct { @@ -74,6 +76,15 @@ func (c *mysqlCtlClient) ApplyBinlogFile(ctx context.Context, in *ApplyBinlogFil return out, nil } +func (c *mysqlCtlClient) ReadBinlogFilesTimestamps(ctx context.Context, in *ReadBinlogFilesTimestampsRequest, opts ...grpc.CallOption) (*ReadBinlogFilesTimestampsResponse, error) { + out := new(ReadBinlogFilesTimestampsResponse) + err := c.cc.Invoke(ctx, "/mysqlctl.MysqlCtl/ReadBinlogFilesTimestamps", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *mysqlCtlClient) ReinitConfig(ctx context.Context, in *ReinitConfigRequest, opts ...grpc.CallOption) (*ReinitConfigResponse, error) { out := new(ReinitConfigResponse) err := c.cc.Invoke(ctx, "/mysqlctl.MysqlCtl/ReinitConfig", in, out, opts...) @@ -92,6 +103,15 @@ func (c *mysqlCtlClient) RefreshConfig(ctx context.Context, in *RefreshConfigReq return out, nil } +func (c *mysqlCtlClient) VersionString(ctx context.Context, in *VersionStringRequest, opts ...grpc.CallOption) (*VersionStringResponse, error) { + out := new(VersionStringResponse) + err := c.cc.Invoke(ctx, "/mysqlctl.MysqlCtl/VersionString", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // MysqlCtlServer is the server API for MysqlCtl service. // All implementations must embed UnimplementedMysqlCtlServer // for forward compatibility @@ -100,8 +120,10 @@ type MysqlCtlServer interface { Shutdown(context.Context, *ShutdownRequest) (*ShutdownResponse, error) RunMysqlUpgrade(context.Context, *RunMysqlUpgradeRequest) (*RunMysqlUpgradeResponse, error) ApplyBinlogFile(context.Context, *ApplyBinlogFileRequest) (*ApplyBinlogFileResponse, error) + ReadBinlogFilesTimestamps(context.Context, *ReadBinlogFilesTimestampsRequest) (*ReadBinlogFilesTimestampsResponse, error) ReinitConfig(context.Context, *ReinitConfigRequest) (*ReinitConfigResponse, error) RefreshConfig(context.Context, *RefreshConfigRequest) (*RefreshConfigResponse, error) + VersionString(context.Context, *VersionStringRequest) (*VersionStringResponse, error) mustEmbedUnimplementedMysqlCtlServer() } @@ -121,12 +143,18 @@ func (UnimplementedMysqlCtlServer) RunMysqlUpgrade(context.Context, *RunMysqlUpg func (UnimplementedMysqlCtlServer) ApplyBinlogFile(context.Context, *ApplyBinlogFileRequest) (*ApplyBinlogFileResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ApplyBinlogFile not implemented") } +func (UnimplementedMysqlCtlServer) ReadBinlogFilesTimestamps(context.Context, *ReadBinlogFilesTimestampsRequest) (*ReadBinlogFilesTimestampsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadBinlogFilesTimestamps not implemented") +} func (UnimplementedMysqlCtlServer) ReinitConfig(context.Context, *ReinitConfigRequest) (*ReinitConfigResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ReinitConfig not implemented") } func (UnimplementedMysqlCtlServer) RefreshConfig(context.Context, *RefreshConfigRequest) (*RefreshConfigResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RefreshConfig not implemented") } +func (UnimplementedMysqlCtlServer) VersionString(context.Context, *VersionStringRequest) (*VersionStringResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VersionString not implemented") +} func (UnimplementedMysqlCtlServer) mustEmbedUnimplementedMysqlCtlServer() {} // UnsafeMysqlCtlServer may be embedded to opt out of forward compatibility for this service. @@ -212,6 +240,24 @@ func _MysqlCtl_ApplyBinlogFile_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +func _MysqlCtl_ReadBinlogFilesTimestamps_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadBinlogFilesTimestampsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MysqlCtlServer).ReadBinlogFilesTimestamps(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/mysqlctl.MysqlCtl/ReadBinlogFilesTimestamps", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MysqlCtlServer).ReadBinlogFilesTimestamps(ctx, req.(*ReadBinlogFilesTimestampsRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _MysqlCtl_ReinitConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ReinitConfigRequest) if err := dec(in); err != nil { @@ -248,6 +294,24 @@ func _MysqlCtl_RefreshConfig_Handler(srv interface{}, ctx context.Context, dec f return interceptor(ctx, in, info, handler) } +func _MysqlCtl_VersionString_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VersionStringRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MysqlCtlServer).VersionString(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/mysqlctl.MysqlCtl/VersionString", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MysqlCtlServer).VersionString(ctx, req.(*VersionStringRequest)) + } + return interceptor(ctx, in, info, handler) +} + // MysqlCtl_ServiceDesc is the grpc.ServiceDesc for MysqlCtl service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -271,6 +335,10 @@ var MysqlCtl_ServiceDesc = grpc.ServiceDesc{ MethodName: "ApplyBinlogFile", Handler: _MysqlCtl_ApplyBinlogFile_Handler, }, + { + MethodName: "ReadBinlogFilesTimestamps", + Handler: _MysqlCtl_ReadBinlogFilesTimestamps_Handler, + }, { MethodName: "ReinitConfig", Handler: _MysqlCtl_ReinitConfig_Handler, @@ -279,6 +347,10 @@ var MysqlCtl_ServiceDesc = grpc.ServiceDesc{ MethodName: "RefreshConfig", Handler: _MysqlCtl_RefreshConfig_Handler, }, + { + MethodName: "VersionString", + Handler: _MysqlCtl_VersionString_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "mysqlctl.proto", diff --git a/go/vt/proto/mysqlctl/mysqlctl_vtproto.pb.go b/go/vt/proto/mysqlctl/mysqlctl_vtproto.pb.go index deaf09c9e37..bb2ec78e03a 100644 --- a/go/vt/proto/mysqlctl/mysqlctl_vtproto.pb.go +++ b/go/vt/proto/mysqlctl/mysqlctl_vtproto.pb.go @@ -1,11 +1,12 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 +// protoc-gen-go-vtproto version: v0.5.0 // source: mysqlctl.proto package mysqlctl import ( fmt "fmt" + proto "google.golang.org/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl" io "io" bits "math/bits" @@ -20,6 +21,310 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +func (m *StartRequest) CloneVT() *StartRequest { + if m == nil { + return (*StartRequest)(nil) + } + r := &StartRequest{} + if rhs := m.MysqldArgs; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.MysqldArgs = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StartRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StartResponse) CloneVT() *StartResponse { + if m == nil { + return (*StartResponse)(nil) + } + r := &StartResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StartResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ShutdownRequest) CloneVT() *ShutdownRequest { + if m == nil { + return (*ShutdownRequest)(nil) + } + r := &ShutdownRequest{ + WaitForMysqld: m.WaitForMysqld, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ShutdownRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ShutdownResponse) CloneVT() *ShutdownResponse { + if m == nil { + return (*ShutdownResponse)(nil) + } + r := &ShutdownResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ShutdownResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RunMysqlUpgradeRequest) CloneVT() *RunMysqlUpgradeRequest { + if m == nil { + return (*RunMysqlUpgradeRequest)(nil) + } + r := &RunMysqlUpgradeRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RunMysqlUpgradeRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RunMysqlUpgradeResponse) CloneVT() *RunMysqlUpgradeResponse { + if m == nil { + return (*RunMysqlUpgradeResponse)(nil) + } + r := &RunMysqlUpgradeResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RunMysqlUpgradeResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ApplyBinlogFileRequest) CloneVT() *ApplyBinlogFileRequest { + if m == nil { + return (*ApplyBinlogFileRequest)(nil) + } + r := &ApplyBinlogFileRequest{ + BinlogFileName: m.BinlogFileName, + BinlogRestorePosition: m.BinlogRestorePosition, + BinlogRestoreDatetime: m.BinlogRestoreDatetime.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ApplyBinlogFileRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ApplyBinlogFileResponse) CloneVT() *ApplyBinlogFileResponse { + if m == nil { + return (*ApplyBinlogFileResponse)(nil) + } + r := &ApplyBinlogFileResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ApplyBinlogFileResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReadBinlogFilesTimestampsRequest) CloneVT() *ReadBinlogFilesTimestampsRequest { + if m == nil { + return (*ReadBinlogFilesTimestampsRequest)(nil) + } + r := &ReadBinlogFilesTimestampsRequest{} + if rhs := m.BinlogFileNames; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.BinlogFileNames = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReadBinlogFilesTimestampsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReadBinlogFilesTimestampsResponse) CloneVT() *ReadBinlogFilesTimestampsResponse { + if m == nil { + return (*ReadBinlogFilesTimestampsResponse)(nil) + } + r := &ReadBinlogFilesTimestampsResponse{ + FirstTimestamp: m.FirstTimestamp.CloneVT(), + FirstTimestampBinlog: m.FirstTimestampBinlog, + LastTimestamp: m.LastTimestamp.CloneVT(), + LastTimestampBinlog: m.LastTimestampBinlog, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReadBinlogFilesTimestampsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReinitConfigRequest) CloneVT() *ReinitConfigRequest { + if m == nil { + return (*ReinitConfigRequest)(nil) + } + r := &ReinitConfigRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReinitConfigRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReinitConfigResponse) CloneVT() *ReinitConfigResponse { + if m == nil { + return (*ReinitConfigResponse)(nil) + } + r := &ReinitConfigResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReinitConfigResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RefreshConfigRequest) CloneVT() *RefreshConfigRequest { + if m == nil { + return (*RefreshConfigRequest)(nil) + } + r := &RefreshConfigRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RefreshConfigRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RefreshConfigResponse) CloneVT() *RefreshConfigResponse { + if m == nil { + return (*RefreshConfigResponse)(nil) + } + r := &RefreshConfigResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RefreshConfigResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VersionStringRequest) CloneVT() *VersionStringRequest { + if m == nil { + return (*VersionStringRequest)(nil) + } + r := &VersionStringRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VersionStringRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VersionStringResponse) CloneVT() *VersionStringResponse { + if m == nil { + return (*VersionStringResponse)(nil) + } + r := &VersionStringResponse{ + Version: m.Version, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VersionStringResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *BackupInfo) CloneVT() *BackupInfo { + if m == nil { + return (*BackupInfo)(nil) + } + r := &BackupInfo{ + Name: m.Name, + Directory: m.Directory, + Keyspace: m.Keyspace, + Shard: m.Shard, + TabletAlias: m.TabletAlias.CloneVT(), + Time: m.Time.CloneVT(), + Engine: m.Engine, + Status: m.Status, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *BackupInfo) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *StartRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -267,6 +572,16 @@ func (m *ApplyBinlogFileRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.BinlogRestoreDatetime != nil { + size, err := m.BinlogRestoreDatetime.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } if len(m.BinlogRestorePosition) > 0 { i -= len(m.BinlogRestorePosition) copy(dAtA[i:], m.BinlogRestorePosition) @@ -317,6 +632,115 @@ func (m *ApplyBinlogFileResponse) MarshalToSizedBufferVT(dAtA []byte) (int, erro return len(dAtA) - i, nil } +func (m *ReadBinlogFilesTimestampsRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadBinlogFilesTimestampsRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReadBinlogFilesTimestampsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.BinlogFileNames) > 0 { + for iNdEx := len(m.BinlogFileNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.BinlogFileNames[iNdEx]) + copy(dAtA[i:], m.BinlogFileNames[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.BinlogFileNames[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ReadBinlogFilesTimestampsResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadBinlogFilesTimestampsResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReadBinlogFilesTimestampsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.LastTimestampBinlog) > 0 { + i -= len(m.LastTimestampBinlog) + copy(dAtA[i:], m.LastTimestampBinlog) + i = encodeVarint(dAtA, i, uint64(len(m.LastTimestampBinlog))) + i-- + dAtA[i] = 0x22 + } + if m.LastTimestamp != nil { + size, err := m.LastTimestamp.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.FirstTimestampBinlog) > 0 { + i -= len(m.FirstTimestampBinlog) + copy(dAtA[i:], m.FirstTimestampBinlog) + i = encodeVarint(dAtA, i, uint64(len(m.FirstTimestampBinlog))) + i-- + dAtA[i] = 0x12 + } + if m.FirstTimestamp != nil { + size, err := m.FirstTimestamp.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *ReinitConfigRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -449,7 +873,7 @@ func (m *RefreshConfigResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) return len(dAtA) - i, nil } -func (m *BackupInfo) MarshalVT() (dAtA []byte, err error) { +func (m *VersionStringRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -462,12 +886,12 @@ func (m *BackupInfo) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *BackupInfo) MarshalToVT(dAtA []byte) (int, error) { +func (m *VersionStringRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *BackupInfo) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *VersionStringRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -479,31 +903,104 @@ func (m *BackupInfo) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Status != 0 { - i = encodeVarint(dAtA, i, uint64(m.Status)) - i-- - dAtA[i] = 0x40 - } - if len(m.Engine) > 0 { - i -= len(m.Engine) - copy(dAtA[i:], m.Engine) - i = encodeVarint(dAtA, i, uint64(len(m.Engine))) - i-- - dAtA[i] = 0x3a + return len(dAtA) - i, nil +} + +func (m *VersionStringResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil } - if m.Time != nil { - size, err := m.Time.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x32 + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { + return dAtA[:n], nil +} + +func (m *VersionStringResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VersionStringResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarint(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BackupInfo) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BackupInfo) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *BackupInfo) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Status != 0 { + i = encodeVarint(dAtA, i, uint64(m.Status)) + i-- + dAtA[i] = 0x40 + } + if len(m.Engine) > 0 { + i -= len(m.Engine) + copy(dAtA[i:], m.Engine) + i = encodeVarint(dAtA, i, uint64(len(m.Engine))) + i-- + dAtA[i] = 0x3a + } + if m.Time != nil { + size, err := m.Time.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { return 0, err } i -= size @@ -636,6 +1133,10 @@ func (m *ApplyBinlogFileRequest) SizeVT() (n int) { if l > 0 { n += 1 + l + sov(uint64(l)) } + if m.BinlogRestoreDatetime != nil { + l = m.BinlogRestoreDatetime.SizeVT() + n += 1 + l + sov(uint64(l)) + } n += len(m.unknownFields) return n } @@ -650,6 +1151,48 @@ func (m *ApplyBinlogFileResponse) SizeVT() (n int) { return n } +func (m *ReadBinlogFilesTimestampsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.BinlogFileNames) > 0 { + for _, s := range m.BinlogFileNames { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ReadBinlogFilesTimestampsResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FirstTimestamp != nil { + l = m.FirstTimestamp.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.FirstTimestampBinlog) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.LastTimestamp != nil { + l = m.LastTimestamp.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.LastTimestampBinlog) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + func (m *ReinitConfigRequest) SizeVT() (n int) { if m == nil { return 0 @@ -690,6 +1233,30 @@ func (m *RefreshConfigResponse) SizeVT() (n int) { return n } +func (m *VersionStringRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *VersionStringResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Version) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + func (m *BackupInfo) SizeVT() (n int) { if m == nil { return 0 @@ -1188,6 +1755,42 @@ func (m *ApplyBinlogFileRequest) UnmarshalVT(dAtA []byte) error { } m.BinlogRestorePosition = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BinlogRestoreDatetime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BinlogRestoreDatetime == nil { + m.BinlogRestoreDatetime = &vttime.Time{} + } + if err := m.BinlogRestoreDatetime.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -1261,7 +1864,7 @@ func (m *ApplyBinlogFileResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ReinitConfigRequest) UnmarshalVT(dAtA []byte) error { +func (m *ReadBinlogFilesTimestampsRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1284,63 +1887,44 @@ func (m *ReinitConfigRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReinitConfigRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ReadBinlogFilesTimestampsRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReinitConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReadBinlogFilesTimestampsRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BinlogFileNames", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReinitConfigResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReinitConfigResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReinitConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + m.BinlogFileNames = append(m.BinlogFileNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -1363,7 +1947,7 @@ func (m *ReinitConfigResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RefreshConfigRequest) UnmarshalVT(dAtA []byte) error { +func (m *ReadBinlogFilesTimestampsResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1386,26 +1970,315 @@ func (m *RefreshConfigRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RefreshConfigRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ReadBinlogFilesTimestampsResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RefreshConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReadBinlogFilesTimestampsResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FirstTimestamp", wireType) } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FirstTimestamp == nil { + m.FirstTimestamp = &vttime.Time{} + } + if err := m.FirstTimestamp.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FirstTimestampBinlog", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FirstTimestampBinlog = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastTimestamp == nil { + m.LastTimestamp = &vttime.Time{} + } + if err := m.LastTimestamp.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTimestampBinlog", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LastTimestampBinlog = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReinitConfigRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReinitConfigRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReinitConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReinitConfigResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReinitConfigResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReinitConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RefreshConfigRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RefreshConfigRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RefreshConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy } } @@ -1465,6 +2338,140 @@ func (m *RefreshConfigResponse) UnmarshalVT(dAtA []byte) error { } return nil } +func (m *VersionStringRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VersionStringRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VersionStringRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VersionStringResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VersionStringResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VersionStringResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *BackupInfo) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/go/vt/proto/query/query.pb.go b/go/vt/proto/query/query.pb.go index e156fa431fc..49e3ad38cb7 100644 --- a/go/vt/proto/query/query.pb.go +++ b/go/vt/proto/query/query.pb.go @@ -18,8 +18,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.6.1 +// protoc-gen-go v1.30.0 +// protoc v3.21.3 // source: query.proto package query @@ -5265,8 +5265,8 @@ type StreamHealthResponse struct { // if filtered replication is enabled on a primary for instance, // or if a replica should not be used because the keyspace is being resharded. Serving bool `protobuf:"varint,2,opt,name=serving,proto3" json:"serving,omitempty"` - // tablet_externally_reparented_timestamp can be interpreted as the - // last time we knew that this tablet was the PRIMARY of this shard + // primary_term_start_timestamp can be interpreted as the + // last time we knew that this tablet was promoted to a PRIMARY of this shard // (if StreamHealthResponse describes a group of tablets, between // two vtgates, only one primary will be present in the group, and // this is this primary's value). @@ -5296,8 +5296,8 @@ type StreamHealthResponse struct { // topology (see go/vt/vttablet/tabletmanager/init_tablet.go) // // OR - // d) 0 if the vttablet was never a PRIMARY. - TabletExternallyReparentedTimestamp int64 `protobuf:"varint,3,opt,name=tablet_externally_reparented_timestamp,json=tabletExternallyReparentedTimestamp,proto3" json:"tablet_externally_reparented_timestamp,omitempty"` + // d) 0 if the vttablet is not a PRIMARY. + PrimaryTermStartTimestamp int64 `protobuf:"varint,3,opt,name=primary_term_start_timestamp,json=primaryTermStartTimestamp,proto3" json:"primary_term_start_timestamp,omitempty"` // realtime_stats contains information about the tablet status. // It is only filled in if the information is about a tablet. RealtimeStats *RealtimeStats `protobuf:"bytes,4,opt,name=realtime_stats,json=realtimeStats,proto3" json:"realtime_stats,omitempty"` @@ -5354,9 +5354,9 @@ func (x *StreamHealthResponse) GetServing() bool { return false } -func (x *StreamHealthResponse) GetTabletExternallyReparentedTimestamp() int64 { +func (x *StreamHealthResponse) GetPrimaryTermStartTimestamp() int64 { if x != nil { - return x.TabletExternallyReparentedTimestamp + return x.PrimaryTermStartTimestamp } return 0 } @@ -6581,156 +6581,155 @@ var file_query_proto_rawDesc = []byte{ 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x67, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x5f, 0x6d, 0x61, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x18, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x61, - 0x67, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x4d, 0x61, 0x78, 0x22, 0xa9, 0x02, 0x0a, 0x14, + 0x67, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x4d, 0x61, 0x78, 0x22, 0x95, 0x02, 0x0a, 0x14, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x6e, 0x67, 0x12, 0x53, 0x0a, 0x26, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, - 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x5f, 0x72, 0x65, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x23, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, - 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x3b, 0x0a, 0x0e, 0x72, 0x65, - 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x65, 0x61, 0x6c, 0x74, - 0x69, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x0d, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, - 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x22, 0xae, 0x01, 0x0a, 0x13, 0x54, 0x72, 0x61, 0x6e, - 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, - 0x12, 0x0a, 0x04, 0x64, 0x74, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, - 0x74, 0x69, 0x64, 0x12, 0x2d, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, - 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x31, 0x0a, 0x0c, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, - 0x70, 0x61, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, - 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x74, - 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x10, 0x47, 0x65, 0x74, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, - 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, - 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, - 0x72, 0x67, 0x65, 0x74, 0x12, 0x35, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, - 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, - 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0xb1, 0x01, 0x0a, - 0x11, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x58, 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x69, - 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x71, - 0x75, 0x65, 0x72, 0x79, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x65, 0x66, 0x69, - 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x42, 0x0a, 0x14, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x22, 0xdd, 0x02, 0x0a, 0x15, 0x4c, 0x6f, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, - 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, - 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, - 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, - 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, - 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, - 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, - 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a, 0x05, 0x71, 0x75, 0x65, - 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, - 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, - 0x72, 0x79, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, - 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, - 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, - 0x6e, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x65, 0x73, - 0x22, 0x44, 0x0a, 0x16, 0x4c, 0x6f, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, - 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, - 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2a, 0x92, 0x03, 0x0a, 0x09, 0x4d, 0x79, 0x53, 0x71, 0x6c, - 0x46, 0x6c, 0x61, 0x67, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x10, 0x00, 0x12, - 0x11, 0x0a, 0x0d, 0x4e, 0x4f, 0x54, 0x5f, 0x4e, 0x55, 0x4c, 0x4c, 0x5f, 0x46, 0x4c, 0x41, 0x47, - 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x50, 0x52, 0x49, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x46, 0x4c, - 0x41, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x55, 0x4e, 0x49, 0x51, 0x55, 0x45, 0x5f, 0x4b, - 0x45, 0x59, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x4d, 0x55, 0x4c, - 0x54, 0x49, 0x50, 0x4c, 0x45, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x08, - 0x12, 0x0d, 0x0a, 0x09, 0x42, 0x4c, 0x4f, 0x42, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x10, 0x12, - 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x53, 0x49, 0x47, 0x4e, 0x45, 0x44, 0x5f, 0x46, 0x4c, 0x41, 0x47, - 0x10, 0x20, 0x12, 0x11, 0x0a, 0x0d, 0x5a, 0x45, 0x52, 0x4f, 0x46, 0x49, 0x4c, 0x4c, 0x5f, 0x46, - 0x4c, 0x41, 0x47, 0x10, 0x40, 0x12, 0x10, 0x0a, 0x0b, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, 0x5f, - 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x01, 0x12, 0x0e, 0x0a, 0x09, 0x45, 0x4e, 0x55, 0x4d, 0x5f, - 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x02, 0x12, 0x18, 0x0a, 0x13, 0x41, 0x55, 0x54, 0x4f, 0x5f, - 0x49, 0x4e, 0x43, 0x52, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, - 0x04, 0x12, 0x13, 0x0a, 0x0e, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x5f, 0x46, - 0x4c, 0x41, 0x47, 0x10, 0x80, 0x08, 0x12, 0x0d, 0x0a, 0x08, 0x53, 0x45, 0x54, 0x5f, 0x46, 0x4c, - 0x41, 0x47, 0x10, 0x80, 0x10, 0x12, 0x1a, 0x0a, 0x15, 0x4e, 0x4f, 0x5f, 0x44, 0x45, 0x46, 0x41, - 0x55, 0x4c, 0x54, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, - 0x20, 0x12, 0x17, 0x0a, 0x12, 0x4f, 0x4e, 0x5f, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x4e, - 0x4f, 0x57, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x40, 0x12, 0x0e, 0x0a, 0x08, 0x4e, 0x55, - 0x4d, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x80, 0x02, 0x12, 0x13, 0x0a, 0x0d, 0x50, 0x41, - 0x52, 0x54, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x80, 0x01, 0x12, - 0x10, 0x0a, 0x0a, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x80, - 0x02, 0x12, 0x11, 0x0a, 0x0b, 0x55, 0x4e, 0x49, 0x51, 0x55, 0x45, 0x5f, 0x46, 0x4c, 0x41, 0x47, - 0x10, 0x80, 0x80, 0x04, 0x12, 0x11, 0x0a, 0x0b, 0x42, 0x49, 0x4e, 0x43, 0x4d, 0x50, 0x5f, 0x46, - 0x4c, 0x41, 0x47, 0x10, 0x80, 0x80, 0x08, 0x1a, 0x02, 0x10, 0x01, 0x2a, 0x6b, 0x0a, 0x04, 0x46, - 0x6c, 0x61, 0x67, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x0f, 0x0a, - 0x0a, 0x49, 0x53, 0x49, 0x4e, 0x54, 0x45, 0x47, 0x52, 0x41, 0x4c, 0x10, 0x80, 0x02, 0x12, 0x0f, - 0x0a, 0x0a, 0x49, 0x53, 0x55, 0x4e, 0x53, 0x49, 0x47, 0x4e, 0x45, 0x44, 0x10, 0x80, 0x04, 0x12, - 0x0c, 0x0a, 0x07, 0x49, 0x53, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x80, 0x08, 0x12, 0x0d, 0x0a, - 0x08, 0x49, 0x53, 0x51, 0x55, 0x4f, 0x54, 0x45, 0x44, 0x10, 0x80, 0x10, 0x12, 0x0b, 0x0a, 0x06, - 0x49, 0x53, 0x54, 0x45, 0x58, 0x54, 0x10, 0x80, 0x20, 0x12, 0x0d, 0x0a, 0x08, 0x49, 0x53, 0x42, - 0x49, 0x4e, 0x41, 0x52, 0x59, 0x10, 0x80, 0x40, 0x2a, 0xc0, 0x03, 0x0a, 0x04, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x55, 0x4c, 0x4c, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x00, - 0x12, 0x09, 0x0a, 0x04, 0x49, 0x4e, 0x54, 0x38, 0x10, 0x81, 0x02, 0x12, 0x0a, 0x0a, 0x05, 0x55, - 0x49, 0x4e, 0x54, 0x38, 0x10, 0x82, 0x06, 0x12, 0x0a, 0x0a, 0x05, 0x49, 0x4e, 0x54, 0x31, 0x36, - 0x10, 0x83, 0x02, 0x12, 0x0b, 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54, 0x31, 0x36, 0x10, 0x84, 0x06, - 0x12, 0x0a, 0x0a, 0x05, 0x49, 0x4e, 0x54, 0x32, 0x34, 0x10, 0x85, 0x02, 0x12, 0x0b, 0x0a, 0x06, - 0x55, 0x49, 0x4e, 0x54, 0x32, 0x34, 0x10, 0x86, 0x06, 0x12, 0x0a, 0x0a, 0x05, 0x49, 0x4e, 0x54, - 0x33, 0x32, 0x10, 0x87, 0x02, 0x12, 0x0b, 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, - 0x88, 0x06, 0x12, 0x0a, 0x0a, 0x05, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x89, 0x02, 0x12, 0x0b, - 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x8a, 0x06, 0x12, 0x0c, 0x0a, 0x07, 0x46, - 0x4c, 0x4f, 0x41, 0x54, 0x33, 0x32, 0x10, 0x8b, 0x08, 0x12, 0x0c, 0x0a, 0x07, 0x46, 0x4c, 0x4f, - 0x41, 0x54, 0x36, 0x34, 0x10, 0x8c, 0x08, 0x12, 0x0e, 0x0a, 0x09, 0x54, 0x49, 0x4d, 0x45, 0x53, - 0x54, 0x41, 0x4d, 0x50, 0x10, 0x8d, 0x10, 0x12, 0x09, 0x0a, 0x04, 0x44, 0x41, 0x54, 0x45, 0x10, - 0x8e, 0x10, 0x12, 0x09, 0x0a, 0x04, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x8f, 0x10, 0x12, 0x0d, 0x0a, - 0x08, 0x44, 0x41, 0x54, 0x45, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x90, 0x10, 0x12, 0x09, 0x0a, 0x04, - 0x59, 0x45, 0x41, 0x52, 0x10, 0x91, 0x06, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x43, 0x49, 0x4d, - 0x41, 0x4c, 0x10, 0x12, 0x12, 0x09, 0x0a, 0x04, 0x54, 0x45, 0x58, 0x54, 0x10, 0x93, 0x30, 0x12, - 0x09, 0x0a, 0x04, 0x42, 0x4c, 0x4f, 0x42, 0x10, 0x94, 0x50, 0x12, 0x0c, 0x0a, 0x07, 0x56, 0x41, - 0x52, 0x43, 0x48, 0x41, 0x52, 0x10, 0x95, 0x30, 0x12, 0x0e, 0x0a, 0x09, 0x56, 0x41, 0x52, 0x42, - 0x49, 0x4e, 0x41, 0x52, 0x59, 0x10, 0x96, 0x50, 0x12, 0x09, 0x0a, 0x04, 0x43, 0x48, 0x41, 0x52, - 0x10, 0x97, 0x30, 0x12, 0x0b, 0x0a, 0x06, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, 0x10, 0x98, 0x50, - 0x12, 0x08, 0x0a, 0x03, 0x42, 0x49, 0x54, 0x10, 0x99, 0x10, 0x12, 0x09, 0x0a, 0x04, 0x45, 0x4e, - 0x55, 0x4d, 0x10, 0x9a, 0x10, 0x12, 0x08, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x9b, 0x10, 0x12, - 0x09, 0x0a, 0x05, 0x54, 0x55, 0x50, 0x4c, 0x45, 0x10, 0x1c, 0x12, 0x0d, 0x0a, 0x08, 0x47, 0x45, - 0x4f, 0x4d, 0x45, 0x54, 0x52, 0x59, 0x10, 0x9d, 0x10, 0x12, 0x09, 0x0a, 0x04, 0x4a, 0x53, 0x4f, - 0x4e, 0x10, 0x9e, 0x10, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x52, 0x45, 0x53, 0x53, 0x49, - 0x4f, 0x4e, 0x10, 0x1f, 0x12, 0x0b, 0x0a, 0x06, 0x48, 0x45, 0x58, 0x4e, 0x55, 0x4d, 0x10, 0xa0, - 0x20, 0x12, 0x0b, 0x0a, 0x06, 0x48, 0x45, 0x58, 0x56, 0x41, 0x4c, 0x10, 0xa1, 0x20, 0x12, 0x0b, - 0x0a, 0x06, 0x42, 0x49, 0x54, 0x4e, 0x55, 0x4d, 0x10, 0xa2, 0x20, 0x2a, 0x46, 0x0a, 0x10, 0x54, - 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, - 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, - 0x50, 0x52, 0x45, 0x50, 0x41, 0x52, 0x45, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4f, 0x4d, - 0x4d, 0x49, 0x54, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x4f, 0x4c, 0x4c, 0x42, 0x41, 0x43, - 0x4b, 0x10, 0x03, 0x2a, 0x31, 0x0a, 0x0f, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x56, 0x49, 0x45, 0x57, 0x53, 0x10, - 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53, 0x10, 0x01, 0x12, 0x07, 0x0a, - 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x02, 0x42, 0x35, 0x0a, 0x0f, 0x69, 0x6f, 0x2e, 0x76, 0x69, 0x74, - 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x22, 0x76, 0x69, 0x74, 0x65, 0x73, - 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, - 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x72, 0x76, 0x69, 0x6e, 0x67, 0x12, 0x3f, 0x0a, 0x1c, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, + 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x19, 0x70, 0x72, 0x69, + 0x6d, 0x61, 0x72, 0x79, 0x54, 0x65, 0x72, 0x6d, 0x53, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x3b, 0x0a, 0x0e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, + 0x6d, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, + 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x73, 0x52, 0x0d, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x74, + 0x61, 0x74, 0x73, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, + 0x69, 0x61, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x4a, 0x04, 0x08, + 0x06, 0x10, 0x07, 0x22, 0xae, 0x01, 0x0a, 0x13, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x64, + 0x74, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x74, 0x69, 0x64, 0x12, + 0x2d, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, + 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x31, 0x0a, 0x0c, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, + 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, + 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, + 0x61, 0x6e, 0x74, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x12, 0x35, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0xb1, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, + 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, + 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x65, + 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x42, 0x0a, 0x14, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xdd, 0x02, 0x0a, + 0x15, 0x4c, 0x6f, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, + 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, + 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, + 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, + 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, + 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, + 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, + 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, + 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x6f, 0x75, + 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x25, + 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x18, + 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x22, 0x44, 0x0a, 0x16, + 0x4c, 0x6f, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x2a, 0x92, 0x03, 0x0a, 0x09, 0x4d, 0x79, 0x53, 0x71, 0x6c, 0x46, 0x6c, 0x61, 0x67, + 0x12, 0x09, 0x0a, 0x05, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4e, + 0x4f, 0x54, 0x5f, 0x4e, 0x55, 0x4c, 0x4c, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x01, 0x12, 0x10, + 0x0a, 0x0c, 0x50, 0x52, 0x49, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x02, + 0x12, 0x13, 0x0a, 0x0f, 0x55, 0x4e, 0x49, 0x51, 0x55, 0x45, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x46, + 0x4c, 0x41, 0x47, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x4d, 0x55, 0x4c, 0x54, 0x49, 0x50, 0x4c, + 0x45, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x08, 0x12, 0x0d, 0x0a, 0x09, + 0x42, 0x4c, 0x4f, 0x42, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x10, 0x12, 0x11, 0x0a, 0x0d, 0x55, + 0x4e, 0x53, 0x49, 0x47, 0x4e, 0x45, 0x44, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x20, 0x12, 0x11, + 0x0a, 0x0d, 0x5a, 0x45, 0x52, 0x4f, 0x46, 0x49, 0x4c, 0x4c, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, + 0x40, 0x12, 0x10, 0x0a, 0x0b, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, 0x5f, 0x46, 0x4c, 0x41, 0x47, + 0x10, 0x80, 0x01, 0x12, 0x0e, 0x0a, 0x09, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x46, 0x4c, 0x41, 0x47, + 0x10, 0x80, 0x02, 0x12, 0x18, 0x0a, 0x13, 0x41, 0x55, 0x54, 0x4f, 0x5f, 0x49, 0x4e, 0x43, 0x52, + 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x04, 0x12, 0x13, 0x0a, + 0x0e, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, + 0x80, 0x08, 0x12, 0x0d, 0x0a, 0x08, 0x53, 0x45, 0x54, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, + 0x10, 0x12, 0x1a, 0x0a, 0x15, 0x4e, 0x4f, 0x5f, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x5f, + 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x20, 0x12, 0x17, 0x0a, + 0x12, 0x4f, 0x4e, 0x5f, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x4e, 0x4f, 0x57, 0x5f, 0x46, + 0x4c, 0x41, 0x47, 0x10, 0x80, 0x40, 0x12, 0x0e, 0x0a, 0x08, 0x4e, 0x55, 0x4d, 0x5f, 0x46, 0x4c, + 0x41, 0x47, 0x10, 0x80, 0x80, 0x02, 0x12, 0x13, 0x0a, 0x0d, 0x50, 0x41, 0x52, 0x54, 0x5f, 0x4b, + 0x45, 0x59, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x80, 0x01, 0x12, 0x10, 0x0a, 0x0a, 0x47, + 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x80, 0x02, 0x12, 0x11, 0x0a, + 0x0b, 0x55, 0x4e, 0x49, 0x51, 0x55, 0x45, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x80, 0x04, + 0x12, 0x11, 0x0a, 0x0b, 0x42, 0x49, 0x4e, 0x43, 0x4d, 0x50, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, + 0x80, 0x80, 0x08, 0x1a, 0x02, 0x10, 0x01, 0x2a, 0x6b, 0x0a, 0x04, 0x46, 0x6c, 0x61, 0x67, 0x12, + 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0a, 0x49, 0x53, 0x49, + 0x4e, 0x54, 0x45, 0x47, 0x52, 0x41, 0x4c, 0x10, 0x80, 0x02, 0x12, 0x0f, 0x0a, 0x0a, 0x49, 0x53, + 0x55, 0x4e, 0x53, 0x49, 0x47, 0x4e, 0x45, 0x44, 0x10, 0x80, 0x04, 0x12, 0x0c, 0x0a, 0x07, 0x49, + 0x53, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x80, 0x08, 0x12, 0x0d, 0x0a, 0x08, 0x49, 0x53, 0x51, + 0x55, 0x4f, 0x54, 0x45, 0x44, 0x10, 0x80, 0x10, 0x12, 0x0b, 0x0a, 0x06, 0x49, 0x53, 0x54, 0x45, + 0x58, 0x54, 0x10, 0x80, 0x20, 0x12, 0x0d, 0x0a, 0x08, 0x49, 0x53, 0x42, 0x49, 0x4e, 0x41, 0x52, + 0x59, 0x10, 0x80, 0x40, 0x2a, 0xc0, 0x03, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, + 0x09, 0x4e, 0x55, 0x4c, 0x4c, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x04, + 0x49, 0x4e, 0x54, 0x38, 0x10, 0x81, 0x02, 0x12, 0x0a, 0x0a, 0x05, 0x55, 0x49, 0x4e, 0x54, 0x38, + 0x10, 0x82, 0x06, 0x12, 0x0a, 0x0a, 0x05, 0x49, 0x4e, 0x54, 0x31, 0x36, 0x10, 0x83, 0x02, 0x12, + 0x0b, 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54, 0x31, 0x36, 0x10, 0x84, 0x06, 0x12, 0x0a, 0x0a, 0x05, + 0x49, 0x4e, 0x54, 0x32, 0x34, 0x10, 0x85, 0x02, 0x12, 0x0b, 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54, + 0x32, 0x34, 0x10, 0x86, 0x06, 0x12, 0x0a, 0x0a, 0x05, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x87, + 0x02, 0x12, 0x0b, 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x88, 0x06, 0x12, 0x0a, + 0x0a, 0x05, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x89, 0x02, 0x12, 0x0b, 0x0a, 0x06, 0x55, 0x49, + 0x4e, 0x54, 0x36, 0x34, 0x10, 0x8a, 0x06, 0x12, 0x0c, 0x0a, 0x07, 0x46, 0x4c, 0x4f, 0x41, 0x54, + 0x33, 0x32, 0x10, 0x8b, 0x08, 0x12, 0x0c, 0x0a, 0x07, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x36, 0x34, + 0x10, 0x8c, 0x08, 0x12, 0x0e, 0x0a, 0x09, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, + 0x10, 0x8d, 0x10, 0x12, 0x09, 0x0a, 0x04, 0x44, 0x41, 0x54, 0x45, 0x10, 0x8e, 0x10, 0x12, 0x09, + 0x0a, 0x04, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x8f, 0x10, 0x12, 0x0d, 0x0a, 0x08, 0x44, 0x41, 0x54, + 0x45, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x90, 0x10, 0x12, 0x09, 0x0a, 0x04, 0x59, 0x45, 0x41, 0x52, + 0x10, 0x91, 0x06, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x43, 0x49, 0x4d, 0x41, 0x4c, 0x10, 0x12, + 0x12, 0x09, 0x0a, 0x04, 0x54, 0x45, 0x58, 0x54, 0x10, 0x93, 0x30, 0x12, 0x09, 0x0a, 0x04, 0x42, + 0x4c, 0x4f, 0x42, 0x10, 0x94, 0x50, 0x12, 0x0c, 0x0a, 0x07, 0x56, 0x41, 0x52, 0x43, 0x48, 0x41, + 0x52, 0x10, 0x95, 0x30, 0x12, 0x0e, 0x0a, 0x09, 0x56, 0x41, 0x52, 0x42, 0x49, 0x4e, 0x41, 0x52, + 0x59, 0x10, 0x96, 0x50, 0x12, 0x09, 0x0a, 0x04, 0x43, 0x48, 0x41, 0x52, 0x10, 0x97, 0x30, 0x12, + 0x0b, 0x0a, 0x06, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, 0x10, 0x98, 0x50, 0x12, 0x08, 0x0a, 0x03, + 0x42, 0x49, 0x54, 0x10, 0x99, 0x10, 0x12, 0x09, 0x0a, 0x04, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x9a, + 0x10, 0x12, 0x08, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x9b, 0x10, 0x12, 0x09, 0x0a, 0x05, 0x54, + 0x55, 0x50, 0x4c, 0x45, 0x10, 0x1c, 0x12, 0x0d, 0x0a, 0x08, 0x47, 0x45, 0x4f, 0x4d, 0x45, 0x54, + 0x52, 0x59, 0x10, 0x9d, 0x10, 0x12, 0x09, 0x0a, 0x04, 0x4a, 0x53, 0x4f, 0x4e, 0x10, 0x9e, 0x10, + 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x52, 0x45, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x10, 0x1f, + 0x12, 0x0b, 0x0a, 0x06, 0x48, 0x45, 0x58, 0x4e, 0x55, 0x4d, 0x10, 0xa0, 0x20, 0x12, 0x0b, 0x0a, + 0x06, 0x48, 0x45, 0x58, 0x56, 0x41, 0x4c, 0x10, 0xa1, 0x20, 0x12, 0x0b, 0x0a, 0x06, 0x42, 0x49, + 0x54, 0x4e, 0x55, 0x4d, 0x10, 0xa2, 0x20, 0x2a, 0x46, 0x0a, 0x10, 0x54, 0x72, 0x61, 0x6e, 0x73, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x52, 0x45, 0x50, + 0x41, 0x52, 0x45, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x10, + 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x4f, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x03, 0x2a, + 0x31, 0x0a, 0x0f, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x56, 0x49, 0x45, 0x57, 0x53, 0x10, 0x00, 0x12, 0x0a, 0x0a, + 0x06, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, 0x4c, + 0x10, 0x02, 0x42, 0x35, 0x0a, 0x0f, 0x69, 0x6f, 0x2e, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x22, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, + 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( diff --git a/go/vt/proto/query/query_vtproto.pb.go b/go/vt/proto/query/query_vtproto.pb.go index 4ce5c212f43..94dac1155a8 100644 --- a/go/vt/proto/query/query_vtproto.pb.go +++ b/go/vt/proto/query/query_vtproto.pb.go @@ -1,5 +1,5 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 +// protoc-gen-go-vtproto version: v0.5.0 // source: query.proto package query @@ -7,6 +7,7 @@ package query import ( binary "encoding/binary" fmt "fmt" + proto "google.golang.org/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl" io "io" math "math" @@ -23,6 +24,1527 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +func (m *Target) CloneVT() *Target { + if m == nil { + return (*Target)(nil) + } + r := &Target{ + Keyspace: m.Keyspace, + Shard: m.Shard, + TabletType: m.TabletType, + Cell: m.Cell, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Target) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VTGateCallerID) CloneVT() *VTGateCallerID { + if m == nil { + return (*VTGateCallerID)(nil) + } + r := &VTGateCallerID{ + Username: m.Username, + } + if rhs := m.Groups; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Groups = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VTGateCallerID) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *EventToken) CloneVT() *EventToken { + if m == nil { + return (*EventToken)(nil) + } + r := &EventToken{ + Timestamp: m.Timestamp, + Shard: m.Shard, + Position: m.Position, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *EventToken) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Value) CloneVT() *Value { + if m == nil { + return (*Value)(nil) + } + r := &Value{ + Type: m.Type, + } + if rhs := m.Value; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.Value = tmpBytes + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Value) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *BindVariable) CloneVT() *BindVariable { + if m == nil { + return (*BindVariable)(nil) + } + r := &BindVariable{ + Type: m.Type, + } + if rhs := m.Value; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.Value = tmpBytes + } + if rhs := m.Values; rhs != nil { + tmpContainer := make([]*Value, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Values = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *BindVariable) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *BoundQuery) CloneVT() *BoundQuery { + if m == nil { + return (*BoundQuery)(nil) + } + r := &BoundQuery{ + Sql: m.Sql, + } + if rhs := m.BindVariables; rhs != nil { + tmpContainer := make(map[string]*BindVariable, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.BindVariables = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *BoundQuery) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteOptions) CloneVT() *ExecuteOptions { + if m == nil { + return (*ExecuteOptions)(nil) + } + r := &ExecuteOptions{ + IncludedFields: m.IncludedFields, + ClientFoundRows: m.ClientFoundRows, + Workload: m.Workload, + SqlSelectLimit: m.SqlSelectLimit, + TransactionIsolation: m.TransactionIsolation, + SkipQueryPlanCache: m.SkipQueryPlanCache, + PlannerVersion: m.PlannerVersion, + HasCreatedTempTables: m.HasCreatedTempTables, + Consolidator: m.Consolidator, + WorkloadName: m.WorkloadName, + Priority: m.Priority, + UagInfo: m.UagInfo, + } + if rhs := m.TransactionAccessMode; rhs != nil { + tmpContainer := make([]ExecuteOptions_TransactionAccessMode, len(rhs)) + copy(tmpContainer, rhs) + r.TransactionAccessMode = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ExecuteOptions) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Field) CloneVT() *Field { + if m == nil { + return (*Field)(nil) + } + r := &Field{ + Name: m.Name, + Type: m.Type, + Table: m.Table, + OrgTable: m.OrgTable, + Database: m.Database, + OrgName: m.OrgName, + ColumnLength: m.ColumnLength, + Charset: m.Charset, + Decimals: m.Decimals, + Flags: m.Flags, + ColumnType: m.ColumnType, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Field) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Row) CloneVT() *Row { + if m == nil { + return (*Row)(nil) + } + r := &Row{} + if rhs := m.Lengths; rhs != nil { + tmpContainer := make([]int64, len(rhs)) + copy(tmpContainer, rhs) + r.Lengths = tmpContainer + } + if rhs := m.Values; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.Values = tmpBytes + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Row) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *QueryResult) CloneVT() *QueryResult { + if m == nil { + return (*QueryResult)(nil) + } + r := &QueryResult{ + RowsAffected: m.RowsAffected, + InsertId: m.InsertId, + Info: m.Info, + SessionStateChanges: m.SessionStateChanges, + } + if rhs := m.Fields; rhs != nil { + tmpContainer := make([]*Field, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Fields = tmpContainer + } + if rhs := m.Rows; rhs != nil { + tmpContainer := make([]*Row, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Rows = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *QueryResult) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *QueryWarning) CloneVT() *QueryWarning { + if m == nil { + return (*QueryWarning)(nil) + } + r := &QueryWarning{ + Code: m.Code, + Message: m.Message, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *QueryWarning) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StreamEvent_Statement) CloneVT() *StreamEvent_Statement { + if m == nil { + return (*StreamEvent_Statement)(nil) + } + r := &StreamEvent_Statement{ + Category: m.Category, + TableName: m.TableName, + } + if rhs := m.PrimaryKeyFields; rhs != nil { + tmpContainer := make([]*Field, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.PrimaryKeyFields = tmpContainer + } + if rhs := m.PrimaryKeyValues; rhs != nil { + tmpContainer := make([]*Row, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.PrimaryKeyValues = tmpContainer + } + if rhs := m.Sql; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.Sql = tmpBytes + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StreamEvent_Statement) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StreamEvent) CloneVT() *StreamEvent { + if m == nil { + return (*StreamEvent)(nil) + } + r := &StreamEvent{ + EventToken: m.EventToken.CloneVT(), + } + if rhs := m.Statements; rhs != nil { + tmpContainer := make([]*StreamEvent_Statement, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Statements = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StreamEvent) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteRequest) CloneVT() *ExecuteRequest { + if m == nil { + return (*ExecuteRequest)(nil) + } + r := &ExecuteRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Query: m.Query.CloneVT(), + TransactionId: m.TransactionId, + Options: m.Options.CloneVT(), + ReservedId: m.ReservedId, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ExecuteRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteResponse) CloneVT() *ExecuteResponse { + if m == nil { + return (*ExecuteResponse)(nil) + } + r := &ExecuteResponse{ + Result: m.Result.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ExecuteResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ResultWithError) CloneVT() *ResultWithError { + if m == nil { + return (*ResultWithError)(nil) + } + r := &ResultWithError{ + Error: m.Error.CloneVT(), + Result: m.Result.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ResultWithError) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StreamExecuteRequest) CloneVT() *StreamExecuteRequest { + if m == nil { + return (*StreamExecuteRequest)(nil) + } + r := &StreamExecuteRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Query: m.Query.CloneVT(), + Options: m.Options.CloneVT(), + TransactionId: m.TransactionId, + ReservedId: m.ReservedId, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StreamExecuteRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StreamExecuteResponse) CloneVT() *StreamExecuteResponse { + if m == nil { + return (*StreamExecuteResponse)(nil) + } + r := &StreamExecuteResponse{ + Result: m.Result.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StreamExecuteResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *BeginRequest) CloneVT() *BeginRequest { + if m == nil { + return (*BeginRequest)(nil) + } + r := &BeginRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Options: m.Options.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *BeginRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *BeginResponse) CloneVT() *BeginResponse { + if m == nil { + return (*BeginResponse)(nil) + } + r := &BeginResponse{ + TransactionId: m.TransactionId, + TabletAlias: m.TabletAlias.CloneVT(), + SessionStateChanges: m.SessionStateChanges, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *BeginResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CommitRequest) CloneVT() *CommitRequest { + if m == nil { + return (*CommitRequest)(nil) + } + r := &CommitRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + TransactionId: m.TransactionId, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CommitRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CommitResponse) CloneVT() *CommitResponse { + if m == nil { + return (*CommitResponse)(nil) + } + r := &CommitResponse{ + ReservedId: m.ReservedId, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CommitResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RollbackRequest) CloneVT() *RollbackRequest { + if m == nil { + return (*RollbackRequest)(nil) + } + r := &RollbackRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + TransactionId: m.TransactionId, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RollbackRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RollbackResponse) CloneVT() *RollbackResponse { + if m == nil { + return (*RollbackResponse)(nil) + } + r := &RollbackResponse{ + ReservedId: m.ReservedId, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RollbackResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PrepareRequest) CloneVT() *PrepareRequest { + if m == nil { + return (*PrepareRequest)(nil) + } + r := &PrepareRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + TransactionId: m.TransactionId, + Dtid: m.Dtid, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *PrepareRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PrepareResponse) CloneVT() *PrepareResponse { + if m == nil { + return (*PrepareResponse)(nil) + } + r := &PrepareResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *PrepareResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CommitPreparedRequest) CloneVT() *CommitPreparedRequest { + if m == nil { + return (*CommitPreparedRequest)(nil) + } + r := &CommitPreparedRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Dtid: m.Dtid, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CommitPreparedRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CommitPreparedResponse) CloneVT() *CommitPreparedResponse { + if m == nil { + return (*CommitPreparedResponse)(nil) + } + r := &CommitPreparedResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CommitPreparedResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RollbackPreparedRequest) CloneVT() *RollbackPreparedRequest { + if m == nil { + return (*RollbackPreparedRequest)(nil) + } + r := &RollbackPreparedRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + TransactionId: m.TransactionId, + Dtid: m.Dtid, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RollbackPreparedRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RollbackPreparedResponse) CloneVT() *RollbackPreparedResponse { + if m == nil { + return (*RollbackPreparedResponse)(nil) + } + r := &RollbackPreparedResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RollbackPreparedResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CreateTransactionRequest) CloneVT() *CreateTransactionRequest { + if m == nil { + return (*CreateTransactionRequest)(nil) + } + r := &CreateTransactionRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Dtid: m.Dtid, + } + if rhs := m.Participants; rhs != nil { + tmpContainer := make([]*Target, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Participants = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CreateTransactionRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CreateTransactionResponse) CloneVT() *CreateTransactionResponse { + if m == nil { + return (*CreateTransactionResponse)(nil) + } + r := &CreateTransactionResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CreateTransactionResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StartCommitRequest) CloneVT() *StartCommitRequest { + if m == nil { + return (*StartCommitRequest)(nil) + } + r := &StartCommitRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + TransactionId: m.TransactionId, + Dtid: m.Dtid, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StartCommitRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StartCommitResponse) CloneVT() *StartCommitResponse { + if m == nil { + return (*StartCommitResponse)(nil) + } + r := &StartCommitResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StartCommitResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetRollbackRequest) CloneVT() *SetRollbackRequest { + if m == nil { + return (*SetRollbackRequest)(nil) + } + r := &SetRollbackRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + TransactionId: m.TransactionId, + Dtid: m.Dtid, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *SetRollbackRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetRollbackResponse) CloneVT() *SetRollbackResponse { + if m == nil { + return (*SetRollbackResponse)(nil) + } + r := &SetRollbackResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *SetRollbackResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ConcludeTransactionRequest) CloneVT() *ConcludeTransactionRequest { + if m == nil { + return (*ConcludeTransactionRequest)(nil) + } + r := &ConcludeTransactionRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Dtid: m.Dtid, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ConcludeTransactionRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ConcludeTransactionResponse) CloneVT() *ConcludeTransactionResponse { + if m == nil { + return (*ConcludeTransactionResponse)(nil) + } + r := &ConcludeTransactionResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ConcludeTransactionResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReadTransactionRequest) CloneVT() *ReadTransactionRequest { + if m == nil { + return (*ReadTransactionRequest)(nil) + } + r := &ReadTransactionRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Dtid: m.Dtid, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReadTransactionRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReadTransactionResponse) CloneVT() *ReadTransactionResponse { + if m == nil { + return (*ReadTransactionResponse)(nil) + } + r := &ReadTransactionResponse{ + Metadata: m.Metadata.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReadTransactionResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *BeginExecuteRequest) CloneVT() *BeginExecuteRequest { + if m == nil { + return (*BeginExecuteRequest)(nil) + } + r := &BeginExecuteRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Query: m.Query.CloneVT(), + Options: m.Options.CloneVT(), + ReservedId: m.ReservedId, + } + if rhs := m.PreQueries; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.PreQueries = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *BeginExecuteRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *BeginExecuteResponse) CloneVT() *BeginExecuteResponse { + if m == nil { + return (*BeginExecuteResponse)(nil) + } + r := &BeginExecuteResponse{ + Error: m.Error.CloneVT(), + Result: m.Result.CloneVT(), + TransactionId: m.TransactionId, + TabletAlias: m.TabletAlias.CloneVT(), + SessionStateChanges: m.SessionStateChanges, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *BeginExecuteResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *BeginStreamExecuteRequest) CloneVT() *BeginStreamExecuteRequest { + if m == nil { + return (*BeginStreamExecuteRequest)(nil) + } + r := &BeginStreamExecuteRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Query: m.Query.CloneVT(), + Options: m.Options.CloneVT(), + ReservedId: m.ReservedId, + } + if rhs := m.PreQueries; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.PreQueries = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *BeginStreamExecuteRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *BeginStreamExecuteResponse) CloneVT() *BeginStreamExecuteResponse { + if m == nil { + return (*BeginStreamExecuteResponse)(nil) + } + r := &BeginStreamExecuteResponse{ + Error: m.Error.CloneVT(), + Result: m.Result.CloneVT(), + TransactionId: m.TransactionId, + TabletAlias: m.TabletAlias.CloneVT(), + SessionStateChanges: m.SessionStateChanges, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *BeginStreamExecuteResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *MessageStreamRequest) CloneVT() *MessageStreamRequest { + if m == nil { + return (*MessageStreamRequest)(nil) + } + r := &MessageStreamRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Name: m.Name, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *MessageStreamRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *MessageStreamResponse) CloneVT() *MessageStreamResponse { + if m == nil { + return (*MessageStreamResponse)(nil) + } + r := &MessageStreamResponse{ + Result: m.Result.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *MessageStreamResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *MessageAckRequest) CloneVT() *MessageAckRequest { + if m == nil { + return (*MessageAckRequest)(nil) + } + r := &MessageAckRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Name: m.Name, + } + if rhs := m.Ids; rhs != nil { + tmpContainer := make([]*Value, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Ids = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *MessageAckRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *MessageAckResponse) CloneVT() *MessageAckResponse { + if m == nil { + return (*MessageAckResponse)(nil) + } + r := &MessageAckResponse{ + Result: m.Result.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *MessageAckResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReserveExecuteRequest) CloneVT() *ReserveExecuteRequest { + if m == nil { + return (*ReserveExecuteRequest)(nil) + } + r := &ReserveExecuteRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Query: m.Query.CloneVT(), + TransactionId: m.TransactionId, + Options: m.Options.CloneVT(), + } + if rhs := m.PreQueries; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.PreQueries = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReserveExecuteRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReserveExecuteResponse) CloneVT() *ReserveExecuteResponse { + if m == nil { + return (*ReserveExecuteResponse)(nil) + } + r := &ReserveExecuteResponse{ + Error: m.Error.CloneVT(), + Result: m.Result.CloneVT(), + ReservedId: m.ReservedId, + TabletAlias: m.TabletAlias.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReserveExecuteResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReserveStreamExecuteRequest) CloneVT() *ReserveStreamExecuteRequest { + if m == nil { + return (*ReserveStreamExecuteRequest)(nil) + } + r := &ReserveStreamExecuteRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Query: m.Query.CloneVT(), + Options: m.Options.CloneVT(), + TransactionId: m.TransactionId, + } + if rhs := m.PreQueries; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.PreQueries = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReserveStreamExecuteRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReserveStreamExecuteResponse) CloneVT() *ReserveStreamExecuteResponse { + if m == nil { + return (*ReserveStreamExecuteResponse)(nil) + } + r := &ReserveStreamExecuteResponse{ + Error: m.Error.CloneVT(), + Result: m.Result.CloneVT(), + ReservedId: m.ReservedId, + TabletAlias: m.TabletAlias.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReserveStreamExecuteResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReserveBeginExecuteRequest) CloneVT() *ReserveBeginExecuteRequest { + if m == nil { + return (*ReserveBeginExecuteRequest)(nil) + } + r := &ReserveBeginExecuteRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Query: m.Query.CloneVT(), + Options: m.Options.CloneVT(), + } + if rhs := m.PreQueries; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.PreQueries = tmpContainer + } + if rhs := m.PostBeginQueries; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.PostBeginQueries = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReserveBeginExecuteRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReserveBeginExecuteResponse) CloneVT() *ReserveBeginExecuteResponse { + if m == nil { + return (*ReserveBeginExecuteResponse)(nil) + } + r := &ReserveBeginExecuteResponse{ + Error: m.Error.CloneVT(), + Result: m.Result.CloneVT(), + TransactionId: m.TransactionId, + ReservedId: m.ReservedId, + TabletAlias: m.TabletAlias.CloneVT(), + SessionStateChanges: m.SessionStateChanges, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReserveBeginExecuteResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReserveBeginStreamExecuteRequest) CloneVT() *ReserveBeginStreamExecuteRequest { + if m == nil { + return (*ReserveBeginStreamExecuteRequest)(nil) + } + r := &ReserveBeginStreamExecuteRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Query: m.Query.CloneVT(), + Options: m.Options.CloneVT(), + } + if rhs := m.PreQueries; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.PreQueries = tmpContainer + } + if rhs := m.PostBeginQueries; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.PostBeginQueries = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReserveBeginStreamExecuteRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReserveBeginStreamExecuteResponse) CloneVT() *ReserveBeginStreamExecuteResponse { + if m == nil { + return (*ReserveBeginStreamExecuteResponse)(nil) + } + r := &ReserveBeginStreamExecuteResponse{ + Error: m.Error.CloneVT(), + Result: m.Result.CloneVT(), + TransactionId: m.TransactionId, + ReservedId: m.ReservedId, + TabletAlias: m.TabletAlias.CloneVT(), + SessionStateChanges: m.SessionStateChanges, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReserveBeginStreamExecuteResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReleaseRequest) CloneVT() *ReleaseRequest { + if m == nil { + return (*ReleaseRequest)(nil) + } + r := &ReleaseRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + TransactionId: m.TransactionId, + ReservedId: m.ReservedId, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReleaseRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReleaseResponse) CloneVT() *ReleaseResponse { + if m == nil { + return (*ReleaseResponse)(nil) + } + r := &ReleaseResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReleaseResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StreamHealthRequest) CloneVT() *StreamHealthRequest { + if m == nil { + return (*StreamHealthRequest)(nil) + } + r := &StreamHealthRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StreamHealthRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RealtimeStats) CloneVT() *RealtimeStats { + if m == nil { + return (*RealtimeStats)(nil) + } + r := &RealtimeStats{ + HealthError: m.HealthError, + ReplicationLagSeconds: m.ReplicationLagSeconds, + BinlogPlayersCount: m.BinlogPlayersCount, + FilteredReplicationLagSeconds: m.FilteredReplicationLagSeconds, + CpuUsage: m.CpuUsage, + Qps: m.Qps, + } + if rhs := m.TableSchemaChanged; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.TableSchemaChanged = tmpContainer + } + if rhs := m.ViewSchemaChanged; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ViewSchemaChanged = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RealtimeStats) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *AggregateStats) CloneVT() *AggregateStats { + if m == nil { + return (*AggregateStats)(nil) + } + r := &AggregateStats{ + HealthyTabletCount: m.HealthyTabletCount, + UnhealthyTabletCount: m.UnhealthyTabletCount, + ReplicationLagSecondsMin: m.ReplicationLagSecondsMin, + ReplicationLagSecondsMax: m.ReplicationLagSecondsMax, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *AggregateStats) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StreamHealthResponse) CloneVT() *StreamHealthResponse { + if m == nil { + return (*StreamHealthResponse)(nil) + } + r := &StreamHealthResponse{ + Target: m.Target.CloneVT(), + Serving: m.Serving, + PrimaryTermStartTimestamp: m.PrimaryTermStartTimestamp, + RealtimeStats: m.RealtimeStats.CloneVT(), + TabletAlias: m.TabletAlias.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StreamHealthResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *TransactionMetadata) CloneVT() *TransactionMetadata { + if m == nil { + return (*TransactionMetadata)(nil) + } + r := &TransactionMetadata{ + Dtid: m.Dtid, + State: m.State, + TimeCreated: m.TimeCreated, + } + if rhs := m.Participants; rhs != nil { + tmpContainer := make([]*Target, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Participants = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *TransactionMetadata) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSchemaRequest) CloneVT() *GetSchemaRequest { + if m == nil { + return (*GetSchemaRequest)(nil) + } + r := &GetSchemaRequest{ + Target: m.Target.CloneVT(), + TableType: m.TableType, + } + if rhs := m.TableNames; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.TableNames = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetSchemaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSchemaResponse) CloneVT() *GetSchemaResponse { + if m == nil { + return (*GetSchemaResponse)(nil) + } + r := &GetSchemaResponse{} + if rhs := m.TableDefinition; rhs != nil { + tmpContainer := make(map[string]string, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v + } + r.TableDefinition = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetSchemaResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *LoadDataStreamRequest) CloneVT() *LoadDataStreamRequest { + if m == nil { + return (*LoadDataStreamRequest)(nil) + } + r := &LoadDataStreamRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Query: m.Query.CloneVT(), + TransactionId: m.TransactionId, + Options: m.Options.CloneVT(), + } + if rhs := m.Lines; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Lines = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *LoadDataStreamRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *LoadDataStreamResponse) CloneVT() *LoadDataStreamResponse { + if m == nil { + return (*LoadDataStreamResponse)(nil) + } + r := &LoadDataStreamResponse{ + Result: m.Result.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *LoadDataStreamResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *Target) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -4036,8 +5558,8 @@ func (m *StreamHealthResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) i-- dAtA[i] = 0x22 } - if m.TabletExternallyReparentedTimestamp != 0 { - i = encodeVarint(dAtA, i, uint64(m.TabletExternallyReparentedTimestamp)) + if m.PrimaryTermStartTimestamp != 0 { + i = encodeVarint(dAtA, i, uint64(m.PrimaryTermStartTimestamp)) i-- dAtA[i] = 0x18 } @@ -5947,8 +7469,8 @@ func (m *StreamHealthResponse) SizeVT() (n int) { if m.Serving { n += 2 } - if m.TabletExternallyReparentedTimestamp != 0 { - n += 1 + sov(uint64(m.TabletExternallyReparentedTimestamp)) + if m.PrimaryTermStartTimestamp != 0 { + n += 1 + sov(uint64(m.PrimaryTermStartTimestamp)) } if m.RealtimeStats != nil { l = m.RealtimeStats.SizeVT() @@ -16523,9 +18045,9 @@ func (m *StreamHealthResponse) UnmarshalVT(dAtA []byte) error { m.Serving = bool(v != 0) case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletExternallyReparentedTimestamp", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PrimaryTermStartTimestamp", wireType) } - m.TabletExternallyReparentedTimestamp = 0 + m.PrimaryTermStartTimestamp = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -16535,7 +18057,7 @@ func (m *StreamHealthResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TabletExternallyReparentedTimestamp |= int64(b&0x7F) << shift + m.PrimaryTermStartTimestamp |= int64(b&0x7F) << shift if b < 0x80 { break } diff --git a/go/vt/proto/queryservice/queryservice.pb.go b/go/vt/proto/queryservice/queryservice.pb.go index 154cfef0820..1a6283434ed 100644 --- a/go/vt/proto/queryservice/queryservice.pb.go +++ b/go/vt/proto/queryservice/queryservice.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.6.1 +// protoc-gen-go v1.30.0 +// protoc v3.21.3 // source: queryservice.proto package queryservice @@ -45,7 +45,7 @@ var file_queryservice_proto_rawDesc = []byte{ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x71, 0x75, 0x65, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, 0x0b, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x10, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x32, 0xa5, 0x11, 0x0a, 0x05, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x3a, 0x0a, 0x07, 0x45, + 0x6f, 0x32, 0xff, 0x11, 0x0a, 0x05, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x3a, 0x0a, 0x07, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x12, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, @@ -168,25 +168,31 @@ var file_queryservice_proto_rawDesc = []byte{ 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x5b, 0x0a, 0x0e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x21, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x62, 0x69, 0x6e, - 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x30, 0x01, 0x12, 0x42, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, - 0x17, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, - 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x51, 0x0a, 0x0e, 0x4c, 0x6f, 0x61, 0x64, 0x44, 0x61, - 0x74, 0x61, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x1c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, - 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x4c, - 0x6f, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x42, 0x2b, 0x5a, 0x29, 0x76, 0x69, 0x74, - 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, - 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x58, 0x0a, 0x0d, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x20, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, + 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, + 0x5b, 0x0a, 0x0e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x73, 0x12, 0x21, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, + 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x42, 0x0a, 0x09, + 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x17, 0x2e, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, + 0x12, 0x51, 0x0a, 0x0e, 0x4c, 0x6f, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x12, 0x1c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x44, + 0x61, 0x74, 0x61, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x44, 0x61, 0x74, + 0x61, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x28, 0x01, 0x42, 0x2b, 0x5a, 0x29, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, + 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var file_queryservice_proto_goTypes = []interface{}{ @@ -215,37 +221,39 @@ var file_queryservice_proto_goTypes = []interface{}{ (*query.StreamHealthRequest)(nil), // 22: query.StreamHealthRequest (*binlogdata.VStreamRequest)(nil), // 23: binlogdata.VStreamRequest (*binlogdata.VStreamRowsRequest)(nil), // 24: binlogdata.VStreamRowsRequest - (*binlogdata.VStreamResultsRequest)(nil), // 25: binlogdata.VStreamResultsRequest - (*query.GetSchemaRequest)(nil), // 26: query.GetSchemaRequest - (*query.LoadDataStreamRequest)(nil), // 27: query.LoadDataStreamRequest - (*query.ExecuteResponse)(nil), // 28: query.ExecuteResponse - (*query.StreamExecuteResponse)(nil), // 29: query.StreamExecuteResponse - (*query.BeginResponse)(nil), // 30: query.BeginResponse - (*query.CommitResponse)(nil), // 31: query.CommitResponse - (*query.RollbackResponse)(nil), // 32: query.RollbackResponse - (*query.PrepareResponse)(nil), // 33: query.PrepareResponse - (*query.CommitPreparedResponse)(nil), // 34: query.CommitPreparedResponse - (*query.RollbackPreparedResponse)(nil), // 35: query.RollbackPreparedResponse - (*query.CreateTransactionResponse)(nil), // 36: query.CreateTransactionResponse - (*query.StartCommitResponse)(nil), // 37: query.StartCommitResponse - (*query.SetRollbackResponse)(nil), // 38: query.SetRollbackResponse - (*query.ConcludeTransactionResponse)(nil), // 39: query.ConcludeTransactionResponse - (*query.ReadTransactionResponse)(nil), // 40: query.ReadTransactionResponse - (*query.BeginExecuteResponse)(nil), // 41: query.BeginExecuteResponse - (*query.BeginStreamExecuteResponse)(nil), // 42: query.BeginStreamExecuteResponse - (*query.MessageStreamResponse)(nil), // 43: query.MessageStreamResponse - (*query.MessageAckResponse)(nil), // 44: query.MessageAckResponse - (*query.ReserveExecuteResponse)(nil), // 45: query.ReserveExecuteResponse - (*query.ReserveBeginExecuteResponse)(nil), // 46: query.ReserveBeginExecuteResponse - (*query.ReserveStreamExecuteResponse)(nil), // 47: query.ReserveStreamExecuteResponse - (*query.ReserveBeginStreamExecuteResponse)(nil), // 48: query.ReserveBeginStreamExecuteResponse - (*query.ReleaseResponse)(nil), // 49: query.ReleaseResponse - (*query.StreamHealthResponse)(nil), // 50: query.StreamHealthResponse - (*binlogdata.VStreamResponse)(nil), // 51: binlogdata.VStreamResponse - (*binlogdata.VStreamRowsResponse)(nil), // 52: binlogdata.VStreamRowsResponse - (*binlogdata.VStreamResultsResponse)(nil), // 53: binlogdata.VStreamResultsResponse - (*query.GetSchemaResponse)(nil), // 54: query.GetSchemaResponse - (*query.LoadDataStreamResponse)(nil), // 55: query.LoadDataStreamResponse + (*binlogdata.VStreamTablesRequest)(nil), // 25: binlogdata.VStreamTablesRequest + (*binlogdata.VStreamResultsRequest)(nil), // 26: binlogdata.VStreamResultsRequest + (*query.GetSchemaRequest)(nil), // 27: query.GetSchemaRequest + (*query.LoadDataStreamRequest)(nil), // 28: query.LoadDataStreamRequest + (*query.ExecuteResponse)(nil), // 29: query.ExecuteResponse + (*query.StreamExecuteResponse)(nil), // 30: query.StreamExecuteResponse + (*query.BeginResponse)(nil), // 31: query.BeginResponse + (*query.CommitResponse)(nil), // 32: query.CommitResponse + (*query.RollbackResponse)(nil), // 33: query.RollbackResponse + (*query.PrepareResponse)(nil), // 34: query.PrepareResponse + (*query.CommitPreparedResponse)(nil), // 35: query.CommitPreparedResponse + (*query.RollbackPreparedResponse)(nil), // 36: query.RollbackPreparedResponse + (*query.CreateTransactionResponse)(nil), // 37: query.CreateTransactionResponse + (*query.StartCommitResponse)(nil), // 38: query.StartCommitResponse + (*query.SetRollbackResponse)(nil), // 39: query.SetRollbackResponse + (*query.ConcludeTransactionResponse)(nil), // 40: query.ConcludeTransactionResponse + (*query.ReadTransactionResponse)(nil), // 41: query.ReadTransactionResponse + (*query.BeginExecuteResponse)(nil), // 42: query.BeginExecuteResponse + (*query.BeginStreamExecuteResponse)(nil), // 43: query.BeginStreamExecuteResponse + (*query.MessageStreamResponse)(nil), // 44: query.MessageStreamResponse + (*query.MessageAckResponse)(nil), // 45: query.MessageAckResponse + (*query.ReserveExecuteResponse)(nil), // 46: query.ReserveExecuteResponse + (*query.ReserveBeginExecuteResponse)(nil), // 47: query.ReserveBeginExecuteResponse + (*query.ReserveStreamExecuteResponse)(nil), // 48: query.ReserveStreamExecuteResponse + (*query.ReserveBeginStreamExecuteResponse)(nil), // 49: query.ReserveBeginStreamExecuteResponse + (*query.ReleaseResponse)(nil), // 50: query.ReleaseResponse + (*query.StreamHealthResponse)(nil), // 51: query.StreamHealthResponse + (*binlogdata.VStreamResponse)(nil), // 52: binlogdata.VStreamResponse + (*binlogdata.VStreamRowsResponse)(nil), // 53: binlogdata.VStreamRowsResponse + (*binlogdata.VStreamTablesResponse)(nil), // 54: binlogdata.VStreamTablesResponse + (*binlogdata.VStreamResultsResponse)(nil), // 55: binlogdata.VStreamResultsResponse + (*query.GetSchemaResponse)(nil), // 56: query.GetSchemaResponse + (*query.LoadDataStreamResponse)(nil), // 57: query.LoadDataStreamResponse } var file_queryservice_proto_depIdxs = []int32{ 0, // 0: queryservice.Query.Execute:input_type -> query.ExecuteRequest @@ -273,39 +281,41 @@ var file_queryservice_proto_depIdxs = []int32{ 22, // 22: queryservice.Query.StreamHealth:input_type -> query.StreamHealthRequest 23, // 23: queryservice.Query.VStream:input_type -> binlogdata.VStreamRequest 24, // 24: queryservice.Query.VStreamRows:input_type -> binlogdata.VStreamRowsRequest - 25, // 25: queryservice.Query.VStreamResults:input_type -> binlogdata.VStreamResultsRequest - 26, // 26: queryservice.Query.GetSchema:input_type -> query.GetSchemaRequest - 27, // 27: queryservice.Query.LoadDataStream:input_type -> query.LoadDataStreamRequest - 28, // 28: queryservice.Query.Execute:output_type -> query.ExecuteResponse - 29, // 29: queryservice.Query.StreamExecute:output_type -> query.StreamExecuteResponse - 30, // 30: queryservice.Query.Begin:output_type -> query.BeginResponse - 31, // 31: queryservice.Query.Commit:output_type -> query.CommitResponse - 32, // 32: queryservice.Query.Rollback:output_type -> query.RollbackResponse - 33, // 33: queryservice.Query.Prepare:output_type -> query.PrepareResponse - 34, // 34: queryservice.Query.CommitPrepared:output_type -> query.CommitPreparedResponse - 35, // 35: queryservice.Query.RollbackPrepared:output_type -> query.RollbackPreparedResponse - 36, // 36: queryservice.Query.CreateTransaction:output_type -> query.CreateTransactionResponse - 37, // 37: queryservice.Query.StartCommit:output_type -> query.StartCommitResponse - 38, // 38: queryservice.Query.SetRollback:output_type -> query.SetRollbackResponse - 39, // 39: queryservice.Query.ConcludeTransaction:output_type -> query.ConcludeTransactionResponse - 40, // 40: queryservice.Query.ReadTransaction:output_type -> query.ReadTransactionResponse - 41, // 41: queryservice.Query.BeginExecute:output_type -> query.BeginExecuteResponse - 42, // 42: queryservice.Query.BeginStreamExecute:output_type -> query.BeginStreamExecuteResponse - 43, // 43: queryservice.Query.MessageStream:output_type -> query.MessageStreamResponse - 44, // 44: queryservice.Query.MessageAck:output_type -> query.MessageAckResponse - 45, // 45: queryservice.Query.ReserveExecute:output_type -> query.ReserveExecuteResponse - 46, // 46: queryservice.Query.ReserveBeginExecute:output_type -> query.ReserveBeginExecuteResponse - 47, // 47: queryservice.Query.ReserveStreamExecute:output_type -> query.ReserveStreamExecuteResponse - 48, // 48: queryservice.Query.ReserveBeginStreamExecute:output_type -> query.ReserveBeginStreamExecuteResponse - 49, // 49: queryservice.Query.Release:output_type -> query.ReleaseResponse - 50, // 50: queryservice.Query.StreamHealth:output_type -> query.StreamHealthResponse - 51, // 51: queryservice.Query.VStream:output_type -> binlogdata.VStreamResponse - 52, // 52: queryservice.Query.VStreamRows:output_type -> binlogdata.VStreamRowsResponse - 53, // 53: queryservice.Query.VStreamResults:output_type -> binlogdata.VStreamResultsResponse - 54, // 54: queryservice.Query.GetSchema:output_type -> query.GetSchemaResponse - 55, // 55: queryservice.Query.LoadDataStream:output_type -> query.LoadDataStreamResponse - 28, // [28:56] is the sub-list for method output_type - 0, // [0:28] is the sub-list for method input_type + 25, // 25: queryservice.Query.VStreamTables:input_type -> binlogdata.VStreamTablesRequest + 26, // 26: queryservice.Query.VStreamResults:input_type -> binlogdata.VStreamResultsRequest + 27, // 27: queryservice.Query.GetSchema:input_type -> query.GetSchemaRequest + 28, // 28: queryservice.Query.LoadDataStream:input_type -> query.LoadDataStreamRequest + 29, // 29: queryservice.Query.Execute:output_type -> query.ExecuteResponse + 30, // 30: queryservice.Query.StreamExecute:output_type -> query.StreamExecuteResponse + 31, // 31: queryservice.Query.Begin:output_type -> query.BeginResponse + 32, // 32: queryservice.Query.Commit:output_type -> query.CommitResponse + 33, // 33: queryservice.Query.Rollback:output_type -> query.RollbackResponse + 34, // 34: queryservice.Query.Prepare:output_type -> query.PrepareResponse + 35, // 35: queryservice.Query.CommitPrepared:output_type -> query.CommitPreparedResponse + 36, // 36: queryservice.Query.RollbackPrepared:output_type -> query.RollbackPreparedResponse + 37, // 37: queryservice.Query.CreateTransaction:output_type -> query.CreateTransactionResponse + 38, // 38: queryservice.Query.StartCommit:output_type -> query.StartCommitResponse + 39, // 39: queryservice.Query.SetRollback:output_type -> query.SetRollbackResponse + 40, // 40: queryservice.Query.ConcludeTransaction:output_type -> query.ConcludeTransactionResponse + 41, // 41: queryservice.Query.ReadTransaction:output_type -> query.ReadTransactionResponse + 42, // 42: queryservice.Query.BeginExecute:output_type -> query.BeginExecuteResponse + 43, // 43: queryservice.Query.BeginStreamExecute:output_type -> query.BeginStreamExecuteResponse + 44, // 44: queryservice.Query.MessageStream:output_type -> query.MessageStreamResponse + 45, // 45: queryservice.Query.MessageAck:output_type -> query.MessageAckResponse + 46, // 46: queryservice.Query.ReserveExecute:output_type -> query.ReserveExecuteResponse + 47, // 47: queryservice.Query.ReserveBeginExecute:output_type -> query.ReserveBeginExecuteResponse + 48, // 48: queryservice.Query.ReserveStreamExecute:output_type -> query.ReserveStreamExecuteResponse + 49, // 49: queryservice.Query.ReserveBeginStreamExecute:output_type -> query.ReserveBeginStreamExecuteResponse + 50, // 50: queryservice.Query.Release:output_type -> query.ReleaseResponse + 51, // 51: queryservice.Query.StreamHealth:output_type -> query.StreamHealthResponse + 52, // 52: queryservice.Query.VStream:output_type -> binlogdata.VStreamResponse + 53, // 53: queryservice.Query.VStreamRows:output_type -> binlogdata.VStreamRowsResponse + 54, // 54: queryservice.Query.VStreamTables:output_type -> binlogdata.VStreamTablesResponse + 55, // 55: queryservice.Query.VStreamResults:output_type -> binlogdata.VStreamResultsResponse + 56, // 56: queryservice.Query.GetSchema:output_type -> query.GetSchemaResponse + 57, // 57: queryservice.Query.LoadDataStream:output_type -> query.LoadDataStreamResponse + 29, // [29:58] is the sub-list for method output_type + 0, // [0:29] is the sub-list for method input_type 0, // [0:0] is the sub-list for extension type_name 0, // [0:0] is the sub-list for extension extendee 0, // [0:0] is the sub-list for field type_name diff --git a/go/vt/proto/queryservice/queryservice_grpc.pb.go b/go/vt/proto/queryservice/queryservice_grpc.pb.go index 35e7f09450c..03395cf43e3 100644 --- a/go/vt/proto/queryservice/queryservice_grpc.pb.go +++ b/go/vt/proto/queryservice/queryservice_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 -// - protoc v3.6.1 +// - protoc v3.21.3 // source: queryservice.proto package queryservice @@ -79,6 +79,8 @@ type QueryClient interface { VStream(ctx context.Context, in *binlogdata.VStreamRequest, opts ...grpc.CallOption) (Query_VStreamClient, error) // VStreamRows streams rows from the specified starting point. VStreamRows(ctx context.Context, in *binlogdata.VStreamRowsRequest, opts ...grpc.CallOption) (Query_VStreamRowsClient, error) + // VStreamTables streams rows from the specified starting point. + VStreamTables(ctx context.Context, in *binlogdata.VStreamTablesRequest, opts ...grpc.CallOption) (Query_VStreamTablesClient, error) // VStreamResults streams results along with the gtid of the snapshot. VStreamResults(ctx context.Context, in *binlogdata.VStreamResultsRequest, opts ...grpc.CallOption) (Query_VStreamResultsClient, error) // GetSchema returns the schema information. @@ -503,8 +505,40 @@ func (x *queryVStreamRowsClient) Recv() (*binlogdata.VStreamRowsResponse, error) return m, nil } +func (c *queryClient) VStreamTables(ctx context.Context, in *binlogdata.VStreamTablesRequest, opts ...grpc.CallOption) (Query_VStreamTablesClient, error) { + stream, err := c.cc.NewStream(ctx, &Query_ServiceDesc.Streams[8], "/queryservice.Query/VStreamTables", opts...) + if err != nil { + return nil, err + } + x := &queryVStreamTablesClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Query_VStreamTablesClient interface { + Recv() (*binlogdata.VStreamTablesResponse, error) + grpc.ClientStream +} + +type queryVStreamTablesClient struct { + grpc.ClientStream +} + +func (x *queryVStreamTablesClient) Recv() (*binlogdata.VStreamTablesResponse, error) { + m := new(binlogdata.VStreamTablesResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + func (c *queryClient) VStreamResults(ctx context.Context, in *binlogdata.VStreamResultsRequest, opts ...grpc.CallOption) (Query_VStreamResultsClient, error) { - stream, err := c.cc.NewStream(ctx, &Query_ServiceDesc.Streams[8], "/queryservice.Query/VStreamResults", opts...) + stream, err := c.cc.NewStream(ctx, &Query_ServiceDesc.Streams[9], "/queryservice.Query/VStreamResults", opts...) if err != nil { return nil, err } @@ -536,7 +570,7 @@ func (x *queryVStreamResultsClient) Recv() (*binlogdata.VStreamResultsResponse, } func (c *queryClient) GetSchema(ctx context.Context, in *query.GetSchemaRequest, opts ...grpc.CallOption) (Query_GetSchemaClient, error) { - stream, err := c.cc.NewStream(ctx, &Query_ServiceDesc.Streams[9], "/queryservice.Query/GetSchema", opts...) + stream, err := c.cc.NewStream(ctx, &Query_ServiceDesc.Streams[10], "/queryservice.Query/GetSchema", opts...) if err != nil { return nil, err } @@ -568,7 +602,7 @@ func (x *queryGetSchemaClient) Recv() (*query.GetSchemaResponse, error) { } func (c *queryClient) LoadDataStream(ctx context.Context, opts ...grpc.CallOption) (Query_LoadDataStreamClient, error) { - stream, err := c.cc.NewStream(ctx, &Query_ServiceDesc.Streams[10], "/queryservice.Query/LoadDataStream", opts...) + stream, err := c.cc.NewStream(ctx, &Query_ServiceDesc.Streams[11], "/queryservice.Query/LoadDataStream", opts...) if err != nil { return nil, err } @@ -660,6 +694,8 @@ type QueryServer interface { VStream(*binlogdata.VStreamRequest, Query_VStreamServer) error // VStreamRows streams rows from the specified starting point. VStreamRows(*binlogdata.VStreamRowsRequest, Query_VStreamRowsServer) error + // VStreamTables streams rows from the specified starting point. + VStreamTables(*binlogdata.VStreamTablesRequest, Query_VStreamTablesServer) error // VStreamResults streams results along with the gtid of the snapshot. VStreamResults(*binlogdata.VStreamResultsRequest, Query_VStreamResultsServer) error // GetSchema returns the schema information. @@ -747,6 +783,9 @@ func (UnimplementedQueryServer) VStream(*binlogdata.VStreamRequest, Query_VStrea func (UnimplementedQueryServer) VStreamRows(*binlogdata.VStreamRowsRequest, Query_VStreamRowsServer) error { return status.Errorf(codes.Unimplemented, "method VStreamRows not implemented") } +func (UnimplementedQueryServer) VStreamTables(*binlogdata.VStreamTablesRequest, Query_VStreamTablesServer) error { + return status.Errorf(codes.Unimplemented, "method VStreamTables not implemented") +} func (UnimplementedQueryServer) VStreamResults(*binlogdata.VStreamResultsRequest, Query_VStreamResultsServer) error { return status.Errorf(codes.Unimplemented, "method VStreamResults not implemented") } @@ -1243,6 +1282,27 @@ func (x *queryVStreamRowsServer) Send(m *binlogdata.VStreamRowsResponse) error { return x.ServerStream.SendMsg(m) } +func _Query_VStreamTables_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(binlogdata.VStreamTablesRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(QueryServer).VStreamTables(m, &queryVStreamTablesServer{stream}) +} + +type Query_VStreamTablesServer interface { + Send(*binlogdata.VStreamTablesResponse) error + grpc.ServerStream +} + +type queryVStreamTablesServer struct { + grpc.ServerStream +} + +func (x *queryVStreamTablesServer) Send(m *binlogdata.VStreamTablesResponse) error { + return x.ServerStream.SendMsg(m) +} + func _Query_VStreamResults_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(binlogdata.VStreamResultsRequest) if err := stream.RecvMsg(m); err != nil { @@ -1428,6 +1488,11 @@ var Query_ServiceDesc = grpc.ServiceDesc{ Handler: _Query_VStreamRows_Handler, ServerStreams: true, }, + { + StreamName: "VStreamTables", + Handler: _Query_VStreamTables_Handler, + ServerStreams: true, + }, { StreamName: "VStreamResults", Handler: _Query_VStreamResults_Handler, diff --git a/go/vt/proto/replicationdata/replicationdata.pb.go b/go/vt/proto/replicationdata/replicationdata.pb.go index 4720210514c..ec90d6943ac 100644 --- a/go/vt/proto/replicationdata/replicationdata.pb.go +++ b/go/vt/proto/replicationdata/replicationdata.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.6.1 +// protoc-gen-go v1.30.0 +// protoc v3.21.3 // source: replicationdata.proto package replicationdata diff --git a/go/vt/proto/replicationdata/replicationdata_vtproto.pb.go b/go/vt/proto/replicationdata/replicationdata_vtproto.pb.go index 64e5a8d8b94..f92a42b05e4 100644 --- a/go/vt/proto/replicationdata/replicationdata_vtproto.pb.go +++ b/go/vt/proto/replicationdata/replicationdata_vtproto.pb.go @@ -1,11 +1,12 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 +// protoc-gen-go-vtproto version: v0.5.0 // source: replicationdata.proto package replicationdata import ( fmt "fmt" + proto "google.golang.org/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl" io "io" bits "math/bits" @@ -18,6 +19,121 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +func (m *Status) CloneVT() *Status { + if m == nil { + return (*Status)(nil) + } + r := &Status{ + Position: m.Position, + ReplicationLagSeconds: m.ReplicationLagSeconds, + SourceHost: m.SourceHost, + SourcePort: m.SourcePort, + ConnectRetry: m.ConnectRetry, + RelayLogPosition: m.RelayLogPosition, + FilePosition: m.FilePosition, + RelayLogSourceBinlogEquivalentPosition: m.RelayLogSourceBinlogEquivalentPosition, + SourceServerId: m.SourceServerId, + SourceUuid: m.SourceUuid, + IoState: m.IoState, + LastIoError: m.LastIoError, + SqlState: m.SqlState, + LastSqlError: m.LastSqlError, + RelayLogFilePosition: m.RelayLogFilePosition, + SourceUser: m.SourceUser, + SqlDelay: m.SqlDelay, + AutoPosition: m.AutoPosition, + UsingGtid: m.UsingGtid, + HasReplicationFilters: m.HasReplicationFilters, + SslAllowed: m.SslAllowed, + ReplicationLagUnknown: m.ReplicationLagUnknown, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Status) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StopReplicationStatus) CloneVT() *StopReplicationStatus { + if m == nil { + return (*StopReplicationStatus)(nil) + } + r := &StopReplicationStatus{ + Before: m.Before.CloneVT(), + After: m.After.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StopReplicationStatus) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PrimaryStatus) CloneVT() *PrimaryStatus { + if m == nil { + return (*PrimaryStatus)(nil) + } + r := &PrimaryStatus{ + Position: m.Position, + FilePosition: m.FilePosition, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *PrimaryStatus) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *FullStatus) CloneVT() *FullStatus { + if m == nil { + return (*FullStatus)(nil) + } + r := &FullStatus{ + ServerId: m.ServerId, + ServerUuid: m.ServerUuid, + ReplicationStatus: m.ReplicationStatus.CloneVT(), + PrimaryStatus: m.PrimaryStatus.CloneVT(), + GtidPurged: m.GtidPurged, + Version: m.Version, + VersionComment: m.VersionComment, + ReadOnly: m.ReadOnly, + GtidMode: m.GtidMode, + BinlogFormat: m.BinlogFormat, + BinlogRowImage: m.BinlogRowImage, + LogBinEnabled: m.LogBinEnabled, + LogReplicaUpdates: m.LogReplicaUpdates, + SemiSyncPrimaryEnabled: m.SemiSyncPrimaryEnabled, + SemiSyncReplicaEnabled: m.SemiSyncReplicaEnabled, + SemiSyncPrimaryStatus: m.SemiSyncPrimaryStatus, + SemiSyncReplicaStatus: m.SemiSyncReplicaStatus, + SemiSyncPrimaryClients: m.SemiSyncPrimaryClients, + SemiSyncPrimaryTimeout: m.SemiSyncPrimaryTimeout, + SemiSyncWaitForReplicaCount: m.SemiSyncWaitForReplicaCount, + SuperReadOnly: m.SuperReadOnly, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *FullStatus) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *Status) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil diff --git a/go/vt/proto/tableacl/tableacl.pb.go b/go/vt/proto/tableacl/tableacl.pb.go index 491e06526d0..3b26ace8157 100644 --- a/go/vt/proto/tableacl/tableacl.pb.go +++ b/go/vt/proto/tableacl/tableacl.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.6.1 +// protoc-gen-go v1.30.0 +// protoc v3.21.3 // source: tableacl.proto package tableacl diff --git a/go/vt/proto/tableacl/tableacl_vtproto.pb.go b/go/vt/proto/tableacl/tableacl_vtproto.pb.go index 462bf151230..8c9c9a97856 100644 --- a/go/vt/proto/tableacl/tableacl_vtproto.pb.go +++ b/go/vt/proto/tableacl/tableacl_vtproto.pb.go @@ -1,11 +1,12 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 +// protoc-gen-go-vtproto version: v0.5.0 // source: tableacl.proto package tableacl import ( fmt "fmt" + proto "google.golang.org/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl" io "io" bits "math/bits" @@ -18,6 +19,67 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +func (m *TableGroupSpec) CloneVT() *TableGroupSpec { + if m == nil { + return (*TableGroupSpec)(nil) + } + r := &TableGroupSpec{ + Name: m.Name, + } + if rhs := m.TableNamesOrPrefixes; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.TableNamesOrPrefixes = tmpContainer + } + if rhs := m.Readers; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Readers = tmpContainer + } + if rhs := m.Writers; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Writers = tmpContainer + } + if rhs := m.Admins; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Admins = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *TableGroupSpec) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Config) CloneVT() *Config { + if m == nil { + return (*Config)(nil) + } + r := &Config{} + if rhs := m.TableGroups; rhs != nil { + tmpContainer := make([]*TableGroupSpec, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.TableGroups = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Config) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *TableGroupSpec) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil diff --git a/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go b/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go index 388d06725f3..c9039a3cfd9 100644 --- a/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go +++ b/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go @@ -18,8 +18,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.6.1 +// protoc-gen-go v1.30.0 +// protoc v3.21.3 // source: tabletmanagerdata.proto package tabletmanagerdata @@ -45,6 +45,57 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +// This structure allows us to manage tablet selection preferences +// which are eventually passed to a TabletPicker. +type TabletSelectionPreference int32 + +const ( + TabletSelectionPreference_ANY TabletSelectionPreference = 0 + TabletSelectionPreference_INORDER TabletSelectionPreference = 1 + TabletSelectionPreference_UNKNOWN TabletSelectionPreference = 3 // Don't change any existing value +) + +// Enum value maps for TabletSelectionPreference. +var ( + TabletSelectionPreference_name = map[int32]string{ + 0: "ANY", + 1: "INORDER", + 3: "UNKNOWN", + } + TabletSelectionPreference_value = map[string]int32{ + "ANY": 0, + "INORDER": 1, + "UNKNOWN": 3, + } +) + +func (x TabletSelectionPreference) Enum() *TabletSelectionPreference { + p := new(TabletSelectionPreference) + *p = x + return p +} + +func (x TabletSelectionPreference) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TabletSelectionPreference) Descriptor() protoreflect.EnumDescriptor { + return file_tabletmanagerdata_proto_enumTypes[0].Descriptor() +} + +func (TabletSelectionPreference) Type() protoreflect.EnumType { + return &file_tabletmanagerdata_proto_enumTypes[0] +} + +func (x TabletSelectionPreference) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TabletSelectionPreference.Descriptor instead. +func (TabletSelectionPreference) EnumDescriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{0} +} + type TableDefinition struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1576,6 +1627,8 @@ type ApplySchemaRequest struct { BeforeSchema *SchemaDefinition `protobuf:"bytes,4,opt,name=before_schema,json=beforeSchema,proto3" json:"before_schema,omitempty"` AfterSchema *SchemaDefinition `protobuf:"bytes,5,opt,name=after_schema,json=afterSchema,proto3" json:"after_schema,omitempty"` SqlMode string `protobuf:"bytes,6,opt,name=sql_mode,json=sqlMode,proto3" json:"sql_mode,omitempty"` + // BatchSize indicates how many queries to apply together + BatchSize int64 `protobuf:"varint,7,opt,name=batch_size,json=batchSize,proto3" json:"batch_size,omitempty"` } func (x *ApplySchemaRequest) Reset() { @@ -1652,6 +1705,13 @@ func (x *ApplySchemaRequest) GetSqlMode() string { return "" } +func (x *ApplySchemaRequest) GetBatchSize() int64 { + if x != nil { + return x.BatchSize + } + return 0 +} + type ApplySchemaResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -4491,6 +4551,9 @@ type BackupRequest struct { // IncrementalFromPos indicates a position of a previous backup. When this value is non-empty // then the backup becomes incremental and applies as of given position. IncrementalFromPos string `protobuf:"bytes,3,opt,name=incremental_from_pos,json=incrementalFromPos,proto3" json:"incremental_from_pos,omitempty"` + // UpgradeSafe indicates if the backup should be taken with innodb_fast_shutdown=0 + // so that it's a backup that can be used for an upgrade. + UpgradeSafe bool `protobuf:"varint,4,opt,name=upgrade_safe,json=upgradeSafe,proto3" json:"upgrade_safe,omitempty"` } func (x *BackupRequest) Reset() { @@ -4546,6 +4609,13 @@ func (x *BackupRequest) GetIncrementalFromPos() string { return "" } +func (x *BackupRequest) GetUpgradeSafe() bool { + if x != nil { + return x.UpgradeSafe + } + return false +} + type BackupResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -4605,6 +4675,9 @@ type RestoreFromBackupRequest struct { RestoreToPos string `protobuf:"bytes,2,opt,name=restore_to_pos,json=restoreToPos,proto3" json:"restore_to_pos,omitempty"` // Dry run does not actually performs the restore, but validates the steps and availability of backups DryRun bool `protobuf:"varint,3,opt,name=dry_run,json=dryRun,proto3" json:"dry_run,omitempty"` + // RestoreToTimestamp, if given, requested an inremental restore up to (and excluding) the given timestamp. + // RestoreToTimestamp and RestoreToPos are mutually exclusive. + RestoreToTimestamp *vttime.Time `protobuf:"bytes,4,opt,name=restore_to_timestamp,json=restoreToTimestamp,proto3" json:"restore_to_timestamp,omitempty"` } func (x *RestoreFromBackupRequest) Reset() { @@ -4660,6 +4733,13 @@ func (x *RestoreFromBackupRequest) GetDryRun() bool { return false } +func (x *RestoreFromBackupRequest) GetRestoreToTimestamp() *vttime.Time { + if x != nil { + return x.RestoreToTimestamp + } + return nil +} + type RestoreFromBackupResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -4707,21 +4787,31 @@ func (x *RestoreFromBackupResponse) GetEvent() *logutil.Event { return nil } -type VDiffRequest struct { +type CreateVReplicationWorkflowRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Workflow string `protobuf:"bytes,2,opt,name=workflow,proto3" json:"workflow,omitempty"` - Action string `protobuf:"bytes,3,opt,name=action,proto3" json:"action,omitempty"` - ActionArg string `protobuf:"bytes,4,opt,name=action_arg,json=actionArg,proto3" json:"action_arg,omitempty"` - VdiffUuid string `protobuf:"bytes,5,opt,name=vdiff_uuid,json=vdiffUuid,proto3" json:"vdiff_uuid,omitempty"` - Options *VDiffOptions `protobuf:"bytes,6,opt,name=options,proto3" json:"options,omitempty"` -} - -func (x *VDiffRequest) Reset() { - *x = VDiffRequest{} + Workflow string `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"` + BinlogSource []*binlogdata.BinlogSource `protobuf:"bytes,2,rep,name=binlog_source,json=binlogSource,proto3" json:"binlog_source,omitempty"` + // Optional parameters. + Cells []string `protobuf:"bytes,3,rep,name=cells,proto3" json:"cells,omitempty"` + // TabletTypes is the list of tablet types to use when selecting source tablets. + TabletTypes []topodata.TabletType `protobuf:"varint,4,rep,packed,name=tablet_types,json=tabletTypes,proto3,enum=topodata.TabletType" json:"tablet_types,omitempty"` + TabletSelectionPreference TabletSelectionPreference `protobuf:"varint,5,opt,name=tablet_selection_preference,json=tabletSelectionPreference,proto3,enum=tabletmanagerdata.TabletSelectionPreference" json:"tablet_selection_preference,omitempty"` + WorkflowType binlogdata.VReplicationWorkflowType `protobuf:"varint,6,opt,name=workflow_type,json=workflowType,proto3,enum=binlogdata.VReplicationWorkflowType" json:"workflow_type,omitempty"` + WorkflowSubType binlogdata.VReplicationWorkflowSubType `protobuf:"varint,7,opt,name=workflow_sub_type,json=workflowSubType,proto3,enum=binlogdata.VReplicationWorkflowSubType" json:"workflow_sub_type,omitempty"` + // DeferSecondaryKeys specifies if secondary keys should be created in one shot after table + // copy finishes. + DeferSecondaryKeys bool `protobuf:"varint,8,opt,name=defer_secondary_keys,json=deferSecondaryKeys,proto3" json:"defer_secondary_keys,omitempty"` + // AutoStart specifies if the workflow should be started when created. + AutoStart bool `protobuf:"varint,9,opt,name=auto_start,json=autoStart,proto3" json:"auto_start,omitempty"` + // Should the workflow stop after the copy phase. + StopAfterCopy bool `protobuf:"varint,10,opt,name=stop_after_copy,json=stopAfterCopy,proto3" json:"stop_after_copy,omitempty"` +} + +func (x *CreateVReplicationWorkflowRequest) Reset() { + *x = CreateVReplicationWorkflowRequest{} if protoimpl.UnsafeEnabled { mi := &file_tabletmanagerdata_proto_msgTypes[96] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4729,13 +4819,13 @@ func (x *VDiffRequest) Reset() { } } -func (x *VDiffRequest) String() string { +func (x *CreateVReplicationWorkflowRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*VDiffRequest) ProtoMessage() {} +func (*CreateVReplicationWorkflowRequest) ProtoMessage() {} -func (x *VDiffRequest) ProtoReflect() protoreflect.Message { +func (x *CreateVReplicationWorkflowRequest) ProtoReflect() protoreflect.Message { mi := &file_tabletmanagerdata_proto_msgTypes[96] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4747,65 +4837,91 @@ func (x *VDiffRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use VDiffRequest.ProtoReflect.Descriptor instead. -func (*VDiffRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use CreateVReplicationWorkflowRequest.ProtoReflect.Descriptor instead. +func (*CreateVReplicationWorkflowRequest) Descriptor() ([]byte, []int) { return file_tabletmanagerdata_proto_rawDescGZIP(), []int{96} } -func (x *VDiffRequest) GetKeyspace() string { +func (x *CreateVReplicationWorkflowRequest) GetWorkflow() string { if x != nil { - return x.Keyspace + return x.Workflow } return "" } -func (x *VDiffRequest) GetWorkflow() string { +func (x *CreateVReplicationWorkflowRequest) GetBinlogSource() []*binlogdata.BinlogSource { if x != nil { - return x.Workflow + return x.BinlogSource } - return "" + return nil } -func (x *VDiffRequest) GetAction() string { +func (x *CreateVReplicationWorkflowRequest) GetCells() []string { if x != nil { - return x.Action + return x.Cells } - return "" + return nil } -func (x *VDiffRequest) GetActionArg() string { +func (x *CreateVReplicationWorkflowRequest) GetTabletTypes() []topodata.TabletType { if x != nil { - return x.ActionArg + return x.TabletTypes } - return "" + return nil } -func (x *VDiffRequest) GetVdiffUuid() string { +func (x *CreateVReplicationWorkflowRequest) GetTabletSelectionPreference() TabletSelectionPreference { if x != nil { - return x.VdiffUuid + return x.TabletSelectionPreference } - return "" + return TabletSelectionPreference_ANY } -func (x *VDiffRequest) GetOptions() *VDiffOptions { +func (x *CreateVReplicationWorkflowRequest) GetWorkflowType() binlogdata.VReplicationWorkflowType { if x != nil { - return x.Options + return x.WorkflowType } - return nil + return binlogdata.VReplicationWorkflowType(0) } -type VDiffResponse struct { +func (x *CreateVReplicationWorkflowRequest) GetWorkflowSubType() binlogdata.VReplicationWorkflowSubType { + if x != nil { + return x.WorkflowSubType + } + return binlogdata.VReplicationWorkflowSubType(0) +} + +func (x *CreateVReplicationWorkflowRequest) GetDeferSecondaryKeys() bool { + if x != nil { + return x.DeferSecondaryKeys + } + return false +} + +func (x *CreateVReplicationWorkflowRequest) GetAutoStart() bool { + if x != nil { + return x.AutoStart + } + return false +} + +func (x *CreateVReplicationWorkflowRequest) GetStopAfterCopy() bool { + if x != nil { + return x.StopAfterCopy + } + return false +} + +type CreateVReplicationWorkflowResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - Output *query.QueryResult `protobuf:"bytes,2,opt,name=output,proto3" json:"output,omitempty"` - VdiffUuid string `protobuf:"bytes,3,opt,name=vdiff_uuid,json=vdiffUuid,proto3" json:"vdiff_uuid,omitempty"` + Result *query.QueryResult `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` } -func (x *VDiffResponse) Reset() { - *x = VDiffResponse{} +func (x *CreateVReplicationWorkflowResponse) Reset() { + *x = CreateVReplicationWorkflowResponse{} if protoimpl.UnsafeEnabled { mi := &file_tabletmanagerdata_proto_msgTypes[97] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4813,13 +4929,13 @@ func (x *VDiffResponse) Reset() { } } -func (x *VDiffResponse) String() string { +func (x *CreateVReplicationWorkflowResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*VDiffResponse) ProtoMessage() {} +func (*CreateVReplicationWorkflowResponse) ProtoMessage() {} -func (x *VDiffResponse) ProtoReflect() protoreflect.Message { +func (x *CreateVReplicationWorkflowResponse) ProtoReflect() protoreflect.Message { mi := &file_tabletmanagerdata_proto_msgTypes[97] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4831,45 +4947,28 @@ func (x *VDiffResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use VDiffResponse.ProtoReflect.Descriptor instead. -func (*VDiffResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use CreateVReplicationWorkflowResponse.ProtoReflect.Descriptor instead. +func (*CreateVReplicationWorkflowResponse) Descriptor() ([]byte, []int) { return file_tabletmanagerdata_proto_rawDescGZIP(), []int{97} } -func (x *VDiffResponse) GetId() int64 { - if x != nil { - return x.Id - } - return 0 -} - -func (x *VDiffResponse) GetOutput() *query.QueryResult { +func (x *CreateVReplicationWorkflowResponse) GetResult() *query.QueryResult { if x != nil { - return x.Output + return x.Result } return nil } -func (x *VDiffResponse) GetVdiffUuid() string { - if x != nil { - return x.VdiffUuid - } - return "" -} - -// options that influence the tablet selected by the picker for streaming data from -type VDiffPickerOptions struct { +type DeleteVReplicationWorkflowRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TabletTypes string `protobuf:"bytes,1,opt,name=tablet_types,json=tabletTypes,proto3" json:"tablet_types,omitempty"` - SourceCell string `protobuf:"bytes,2,opt,name=source_cell,json=sourceCell,proto3" json:"source_cell,omitempty"` - TargetCell string `protobuf:"bytes,3,opt,name=target_cell,json=targetCell,proto3" json:"target_cell,omitempty"` + Workflow string `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"` } -func (x *VDiffPickerOptions) Reset() { - *x = VDiffPickerOptions{} +func (x *DeleteVReplicationWorkflowRequest) Reset() { + *x = DeleteVReplicationWorkflowRequest{} if protoimpl.UnsafeEnabled { mi := &file_tabletmanagerdata_proto_msgTypes[98] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4877,13 +4976,13 @@ func (x *VDiffPickerOptions) Reset() { } } -func (x *VDiffPickerOptions) String() string { +func (x *DeleteVReplicationWorkflowRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*VDiffPickerOptions) ProtoMessage() {} +func (*DeleteVReplicationWorkflowRequest) ProtoMessage() {} -func (x *VDiffPickerOptions) ProtoReflect() protoreflect.Message { +func (x *DeleteVReplicationWorkflowRequest) ProtoReflect() protoreflect.Message { mi := &file_tabletmanagerdata_proto_msgTypes[98] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4895,45 +4994,28 @@ func (x *VDiffPickerOptions) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use VDiffPickerOptions.ProtoReflect.Descriptor instead. -func (*VDiffPickerOptions) Descriptor() ([]byte, []int) { +// Deprecated: Use DeleteVReplicationWorkflowRequest.ProtoReflect.Descriptor instead. +func (*DeleteVReplicationWorkflowRequest) Descriptor() ([]byte, []int) { return file_tabletmanagerdata_proto_rawDescGZIP(), []int{98} } -func (x *VDiffPickerOptions) GetTabletTypes() string { - if x != nil { - return x.TabletTypes - } - return "" -} - -func (x *VDiffPickerOptions) GetSourceCell() string { - if x != nil { - return x.SourceCell - } - return "" -} - -func (x *VDiffPickerOptions) GetTargetCell() string { +func (x *DeleteVReplicationWorkflowRequest) GetWorkflow() string { if x != nil { - return x.TargetCell + return x.Workflow } return "" } -// options that only influence how vdiff differences are reported -type VDiffReportOptions struct { +type DeleteVReplicationWorkflowResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - OnlyPks bool `protobuf:"varint,1,opt,name=only_pks,json=onlyPks,proto3" json:"only_pks,omitempty"` - DebugQuery bool `protobuf:"varint,2,opt,name=debug_query,json=debugQuery,proto3" json:"debug_query,omitempty"` - Format string `protobuf:"bytes,3,opt,name=format,proto3" json:"format,omitempty"` + Result *query.QueryResult `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` } -func (x *VDiffReportOptions) Reset() { - *x = VDiffReportOptions{} +func (x *DeleteVReplicationWorkflowResponse) Reset() { + *x = DeleteVReplicationWorkflowResponse{} if protoimpl.UnsafeEnabled { mi := &file_tabletmanagerdata_proto_msgTypes[99] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4941,13 +5023,13 @@ func (x *VDiffReportOptions) Reset() { } } -func (x *VDiffReportOptions) String() string { +func (x *DeleteVReplicationWorkflowResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*VDiffReportOptions) ProtoMessage() {} +func (*DeleteVReplicationWorkflowResponse) ProtoMessage() {} -func (x *VDiffReportOptions) ProtoReflect() protoreflect.Message { +func (x *DeleteVReplicationWorkflowResponse) ProtoReflect() protoreflect.Message { mi := &file_tabletmanagerdata_proto_msgTypes[99] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4959,64 +5041,99 @@ func (x *VDiffReportOptions) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use VDiffReportOptions.ProtoReflect.Descriptor instead. -func (*VDiffReportOptions) Descriptor() ([]byte, []int) { +// Deprecated: Use DeleteVReplicationWorkflowResponse.ProtoReflect.Descriptor instead. +func (*DeleteVReplicationWorkflowResponse) Descriptor() ([]byte, []int) { return file_tabletmanagerdata_proto_rawDescGZIP(), []int{99} } -func (x *VDiffReportOptions) GetOnlyPks() bool { +func (x *DeleteVReplicationWorkflowResponse) GetResult() *query.QueryResult { if x != nil { - return x.OnlyPks + return x.Result } - return false + return nil } -func (x *VDiffReportOptions) GetDebugQuery() bool { - if x != nil { - return x.DebugQuery +type ReadVReplicationWorkflowRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Workflow string `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"` +} + +func (x *ReadVReplicationWorkflowRequest) Reset() { + *x = ReadVReplicationWorkflowRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_tabletmanagerdata_proto_msgTypes[100] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return false } -func (x *VDiffReportOptions) GetFormat() string { +func (x *ReadVReplicationWorkflowRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadVReplicationWorkflowRequest) ProtoMessage() {} + +func (x *ReadVReplicationWorkflowRequest) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[100] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadVReplicationWorkflowRequest.ProtoReflect.Descriptor instead. +func (*ReadVReplicationWorkflowRequest) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{100} +} + +func (x *ReadVReplicationWorkflowRequest) GetWorkflow() string { if x != nil { - return x.Format + return x.Workflow } return "" } -type VDiffCoreOptions struct { +type ReadVReplicationWorkflowResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Tables string `protobuf:"bytes,1,opt,name=tables,proto3" json:"tables,omitempty"` - AutoRetry bool `protobuf:"varint,2,opt,name=auto_retry,json=autoRetry,proto3" json:"auto_retry,omitempty"` - MaxRows int64 `protobuf:"varint,3,opt,name=max_rows,json=maxRows,proto3" json:"max_rows,omitempty"` - Checksum bool `protobuf:"varint,4,opt,name=checksum,proto3" json:"checksum,omitempty"` - SamplePct int64 `protobuf:"varint,5,opt,name=sample_pct,json=samplePct,proto3" json:"sample_pct,omitempty"` - TimeoutSeconds int64 `protobuf:"varint,6,opt,name=timeout_seconds,json=timeoutSeconds,proto3" json:"timeout_seconds,omitempty"` - MaxExtraRowsToCompare int64 `protobuf:"varint,7,opt,name=max_extra_rows_to_compare,json=maxExtraRowsToCompare,proto3" json:"max_extra_rows_to_compare,omitempty"` - UpdateTableStats bool `protobuf:"varint,8,opt,name=update_table_stats,json=updateTableStats,proto3" json:"update_table_stats,omitempty"` + Workflow string `protobuf:"bytes,2,opt,name=workflow,proto3" json:"workflow,omitempty"` + Cells string `protobuf:"bytes,3,opt,name=cells,proto3" json:"cells,omitempty"` + TabletTypes []topodata.TabletType `protobuf:"varint,4,rep,packed,name=tablet_types,json=tabletTypes,proto3,enum=topodata.TabletType" json:"tablet_types,omitempty"` + TabletSelectionPreference TabletSelectionPreference `protobuf:"varint,5,opt,name=tablet_selection_preference,json=tabletSelectionPreference,proto3,enum=tabletmanagerdata.TabletSelectionPreference" json:"tablet_selection_preference,omitempty"` + DbName string `protobuf:"bytes,6,opt,name=db_name,json=dbName,proto3" json:"db_name,omitempty"` + Tags string `protobuf:"bytes,7,opt,name=tags,proto3" json:"tags,omitempty"` + WorkflowType binlogdata.VReplicationWorkflowType `protobuf:"varint,8,opt,name=workflow_type,json=workflowType,proto3,enum=binlogdata.VReplicationWorkflowType" json:"workflow_type,omitempty"` + WorkflowSubType binlogdata.VReplicationWorkflowSubType `protobuf:"varint,9,opt,name=workflow_sub_type,json=workflowSubType,proto3,enum=binlogdata.VReplicationWorkflowSubType" json:"workflow_sub_type,omitempty"` + DeferSecondaryKeys bool `protobuf:"varint,10,opt,name=defer_secondary_keys,json=deferSecondaryKeys,proto3" json:"defer_secondary_keys,omitempty"` + Streams []*ReadVReplicationWorkflowResponse_Stream `protobuf:"bytes,11,rep,name=streams,proto3" json:"streams,omitempty"` } -func (x *VDiffCoreOptions) Reset() { - *x = VDiffCoreOptions{} +func (x *ReadVReplicationWorkflowResponse) Reset() { + *x = ReadVReplicationWorkflowResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[100] + mi := &file_tabletmanagerdata_proto_msgTypes[101] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *VDiffCoreOptions) String() string { +func (x *ReadVReplicationWorkflowResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*VDiffCoreOptions) ProtoMessage() {} +func (*ReadVReplicationWorkflowResponse) ProtoMessage() {} -func (x *VDiffCoreOptions) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[100] +func (x *ReadVReplicationWorkflowResponse) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[101] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5027,94 +5144,828 @@ func (x *VDiffCoreOptions) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use VDiffCoreOptions.ProtoReflect.Descriptor instead. -func (*VDiffCoreOptions) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{100} +// Deprecated: Use ReadVReplicationWorkflowResponse.ProtoReflect.Descriptor instead. +func (*ReadVReplicationWorkflowResponse) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{101} } -func (x *VDiffCoreOptions) GetTables() string { +func (x *ReadVReplicationWorkflowResponse) GetWorkflow() string { if x != nil { - return x.Tables + return x.Workflow } return "" } -func (x *VDiffCoreOptions) GetAutoRetry() bool { +func (x *ReadVReplicationWorkflowResponse) GetCells() string { if x != nil { - return x.AutoRetry + return x.Cells } - return false + return "" } -func (x *VDiffCoreOptions) GetMaxRows() int64 { +func (x *ReadVReplicationWorkflowResponse) GetTabletTypes() []topodata.TabletType { if x != nil { - return x.MaxRows + return x.TabletTypes } - return 0 + return nil } -func (x *VDiffCoreOptions) GetChecksum() bool { +func (x *ReadVReplicationWorkflowResponse) GetTabletSelectionPreference() TabletSelectionPreference { if x != nil { - return x.Checksum + return x.TabletSelectionPreference } - return false + return TabletSelectionPreference_ANY } -func (x *VDiffCoreOptions) GetSamplePct() int64 { +func (x *ReadVReplicationWorkflowResponse) GetDbName() string { if x != nil { - return x.SamplePct + return x.DbName } - return 0 + return "" } -func (x *VDiffCoreOptions) GetTimeoutSeconds() int64 { +func (x *ReadVReplicationWorkflowResponse) GetTags() string { if x != nil { - return x.TimeoutSeconds + return x.Tags } - return 0 + return "" } -func (x *VDiffCoreOptions) GetMaxExtraRowsToCompare() int64 { +func (x *ReadVReplicationWorkflowResponse) GetWorkflowType() binlogdata.VReplicationWorkflowType { if x != nil { - return x.MaxExtraRowsToCompare + return x.WorkflowType } - return 0 + return binlogdata.VReplicationWorkflowType(0) +} + +func (x *ReadVReplicationWorkflowResponse) GetWorkflowSubType() binlogdata.VReplicationWorkflowSubType { + if x != nil { + return x.WorkflowSubType + } + return binlogdata.VReplicationWorkflowSubType(0) +} + +func (x *ReadVReplicationWorkflowResponse) GetDeferSecondaryKeys() bool { + if x != nil { + return x.DeferSecondaryKeys + } + return false +} + +func (x *ReadVReplicationWorkflowResponse) GetStreams() []*ReadVReplicationWorkflowResponse_Stream { + if x != nil { + return x.Streams + } + return nil +} + +type VDiffRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Workflow string `protobuf:"bytes,2,opt,name=workflow,proto3" json:"workflow,omitempty"` + Action string `protobuf:"bytes,3,opt,name=action,proto3" json:"action,omitempty"` + ActionArg string `protobuf:"bytes,4,opt,name=action_arg,json=actionArg,proto3" json:"action_arg,omitempty"` + VdiffUuid string `protobuf:"bytes,5,opt,name=vdiff_uuid,json=vdiffUuid,proto3" json:"vdiff_uuid,omitempty"` + Options *VDiffOptions `protobuf:"bytes,6,opt,name=options,proto3" json:"options,omitempty"` +} + +func (x *VDiffRequest) Reset() { + *x = VDiffRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_tabletmanagerdata_proto_msgTypes[102] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VDiffRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VDiffRequest) ProtoMessage() {} + +func (x *VDiffRequest) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[102] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VDiffRequest.ProtoReflect.Descriptor instead. +func (*VDiffRequest) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{102} +} + +func (x *VDiffRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *VDiffRequest) GetWorkflow() string { + if x != nil { + return x.Workflow + } + return "" +} + +func (x *VDiffRequest) GetAction() string { + if x != nil { + return x.Action + } + return "" +} + +func (x *VDiffRequest) GetActionArg() string { + if x != nil { + return x.ActionArg + } + return "" +} + +func (x *VDiffRequest) GetVdiffUuid() string { + if x != nil { + return x.VdiffUuid + } + return "" +} + +func (x *VDiffRequest) GetOptions() *VDiffOptions { + if x != nil { + return x.Options + } + return nil +} + +type VDiffResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Output *query.QueryResult `protobuf:"bytes,2,opt,name=output,proto3" json:"output,omitempty"` + VdiffUuid string `protobuf:"bytes,3,opt,name=vdiff_uuid,json=vdiffUuid,proto3" json:"vdiff_uuid,omitempty"` +} + +func (x *VDiffResponse) Reset() { + *x = VDiffResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_tabletmanagerdata_proto_msgTypes[103] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VDiffResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VDiffResponse) ProtoMessage() {} + +func (x *VDiffResponse) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[103] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VDiffResponse.ProtoReflect.Descriptor instead. +func (*VDiffResponse) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{103} +} + +func (x *VDiffResponse) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *VDiffResponse) GetOutput() *query.QueryResult { + if x != nil { + return x.Output + } + return nil +} + +func (x *VDiffResponse) GetVdiffUuid() string { + if x != nil { + return x.VdiffUuid + } + return "" +} + +// options that influence the tablet selected by the picker for streaming data from +type VDiffPickerOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TabletTypes string `protobuf:"bytes,1,opt,name=tablet_types,json=tabletTypes,proto3" json:"tablet_types,omitempty"` + SourceCell string `protobuf:"bytes,2,opt,name=source_cell,json=sourceCell,proto3" json:"source_cell,omitempty"` + TargetCell string `protobuf:"bytes,3,opt,name=target_cell,json=targetCell,proto3" json:"target_cell,omitempty"` +} + +func (x *VDiffPickerOptions) Reset() { + *x = VDiffPickerOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_tabletmanagerdata_proto_msgTypes[104] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VDiffPickerOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VDiffPickerOptions) ProtoMessage() {} + +func (x *VDiffPickerOptions) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[104] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VDiffPickerOptions.ProtoReflect.Descriptor instead. +func (*VDiffPickerOptions) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{104} +} + +func (x *VDiffPickerOptions) GetTabletTypes() string { + if x != nil { + return x.TabletTypes + } + return "" +} + +func (x *VDiffPickerOptions) GetSourceCell() string { + if x != nil { + return x.SourceCell + } + return "" +} + +func (x *VDiffPickerOptions) GetTargetCell() string { + if x != nil { + return x.TargetCell + } + return "" +} + +// options that only influence how vdiff differences are reported +type VDiffReportOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OnlyPks bool `protobuf:"varint,1,opt,name=only_pks,json=onlyPks,proto3" json:"only_pks,omitempty"` + DebugQuery bool `protobuf:"varint,2,opt,name=debug_query,json=debugQuery,proto3" json:"debug_query,omitempty"` + Format string `protobuf:"bytes,3,opt,name=format,proto3" json:"format,omitempty"` +} + +func (x *VDiffReportOptions) Reset() { + *x = VDiffReportOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_tabletmanagerdata_proto_msgTypes[105] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VDiffReportOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VDiffReportOptions) ProtoMessage() {} + +func (x *VDiffReportOptions) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[105] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VDiffReportOptions.ProtoReflect.Descriptor instead. +func (*VDiffReportOptions) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{105} +} + +func (x *VDiffReportOptions) GetOnlyPks() bool { + if x != nil { + return x.OnlyPks + } + return false +} + +func (x *VDiffReportOptions) GetDebugQuery() bool { + if x != nil { + return x.DebugQuery + } + return false +} + +func (x *VDiffReportOptions) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +type VDiffCoreOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Tables string `protobuf:"bytes,1,opt,name=tables,proto3" json:"tables,omitempty"` + AutoRetry bool `protobuf:"varint,2,opt,name=auto_retry,json=autoRetry,proto3" json:"auto_retry,omitempty"` + MaxRows int64 `protobuf:"varint,3,opt,name=max_rows,json=maxRows,proto3" json:"max_rows,omitempty"` + Checksum bool `protobuf:"varint,4,opt,name=checksum,proto3" json:"checksum,omitempty"` + SamplePct int64 `protobuf:"varint,5,opt,name=sample_pct,json=samplePct,proto3" json:"sample_pct,omitempty"` + TimeoutSeconds int64 `protobuf:"varint,6,opt,name=timeout_seconds,json=timeoutSeconds,proto3" json:"timeout_seconds,omitempty"` + MaxExtraRowsToCompare int64 `protobuf:"varint,7,opt,name=max_extra_rows_to_compare,json=maxExtraRowsToCompare,proto3" json:"max_extra_rows_to_compare,omitempty"` + UpdateTableStats bool `protobuf:"varint,8,opt,name=update_table_stats,json=updateTableStats,proto3" json:"update_table_stats,omitempty"` +} + +func (x *VDiffCoreOptions) Reset() { + *x = VDiffCoreOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_tabletmanagerdata_proto_msgTypes[106] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VDiffCoreOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VDiffCoreOptions) ProtoMessage() {} + +func (x *VDiffCoreOptions) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[106] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VDiffCoreOptions.ProtoReflect.Descriptor instead. +func (*VDiffCoreOptions) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{106} +} + +func (x *VDiffCoreOptions) GetTables() string { + if x != nil { + return x.Tables + } + return "" +} + +func (x *VDiffCoreOptions) GetAutoRetry() bool { + if x != nil { + return x.AutoRetry + } + return false +} + +func (x *VDiffCoreOptions) GetMaxRows() int64 { + if x != nil { + return x.MaxRows + } + return 0 +} + +func (x *VDiffCoreOptions) GetChecksum() bool { + if x != nil { + return x.Checksum + } + return false +} + +func (x *VDiffCoreOptions) GetSamplePct() int64 { + if x != nil { + return x.SamplePct + } + return 0 +} + +func (x *VDiffCoreOptions) GetTimeoutSeconds() int64 { + if x != nil { + return x.TimeoutSeconds + } + return 0 +} + +func (x *VDiffCoreOptions) GetMaxExtraRowsToCompare() int64 { + if x != nil { + return x.MaxExtraRowsToCompare + } + return 0 } func (x *VDiffCoreOptions) GetUpdateTableStats() bool { if x != nil { - return x.UpdateTableStats + return x.UpdateTableStats + } + return false +} + +type VDiffOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PickerOptions *VDiffPickerOptions `protobuf:"bytes,1,opt,name=picker_options,json=pickerOptions,proto3" json:"picker_options,omitempty"` + CoreOptions *VDiffCoreOptions `protobuf:"bytes,2,opt,name=core_options,json=coreOptions,proto3" json:"core_options,omitempty"` + ReportOptions *VDiffReportOptions `protobuf:"bytes,3,opt,name=report_options,json=reportOptions,proto3" json:"report_options,omitempty"` +} + +func (x *VDiffOptions) Reset() { + *x = VDiffOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_tabletmanagerdata_proto_msgTypes[107] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VDiffOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VDiffOptions) ProtoMessage() {} + +func (x *VDiffOptions) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[107] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VDiffOptions.ProtoReflect.Descriptor instead. +func (*VDiffOptions) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{107} +} + +func (x *VDiffOptions) GetPickerOptions() *VDiffPickerOptions { + if x != nil { + return x.PickerOptions + } + return nil +} + +func (x *VDiffOptions) GetCoreOptions() *VDiffCoreOptions { + if x != nil { + return x.CoreOptions + } + return nil +} + +func (x *VDiffOptions) GetReportOptions() *VDiffReportOptions { + if x != nil { + return x.ReportOptions + } + return nil +} + +type UpdateVReplicationWorkflowRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Workflow string `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"` + Cells []string `protobuf:"bytes,2,rep,name=cells,proto3" json:"cells,omitempty"` + TabletTypes []topodata.TabletType `protobuf:"varint,3,rep,packed,name=tablet_types,json=tabletTypes,proto3,enum=topodata.TabletType" json:"tablet_types,omitempty"` + TabletSelectionPreference TabletSelectionPreference `protobuf:"varint,4,opt,name=tablet_selection_preference,json=tabletSelectionPreference,proto3,enum=tabletmanagerdata.TabletSelectionPreference" json:"tablet_selection_preference,omitempty"` + OnDdl binlogdata.OnDDLAction `protobuf:"varint,5,opt,name=on_ddl,json=onDdl,proto3,enum=binlogdata.OnDDLAction" json:"on_ddl,omitempty"` + State binlogdata.VReplicationWorkflowState `protobuf:"varint,6,opt,name=state,proto3,enum=binlogdata.VReplicationWorkflowState" json:"state,omitempty"` +} + +func (x *UpdateVReplicationWorkflowRequest) Reset() { + *x = UpdateVReplicationWorkflowRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_tabletmanagerdata_proto_msgTypes[108] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateVReplicationWorkflowRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateVReplicationWorkflowRequest) ProtoMessage() {} + +func (x *UpdateVReplicationWorkflowRequest) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[108] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateVReplicationWorkflowRequest.ProtoReflect.Descriptor instead. +func (*UpdateVReplicationWorkflowRequest) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{108} +} + +func (x *UpdateVReplicationWorkflowRequest) GetWorkflow() string { + if x != nil { + return x.Workflow + } + return "" +} + +func (x *UpdateVReplicationWorkflowRequest) GetCells() []string { + if x != nil { + return x.Cells + } + return nil +} + +func (x *UpdateVReplicationWorkflowRequest) GetTabletTypes() []topodata.TabletType { + if x != nil { + return x.TabletTypes + } + return nil +} + +func (x *UpdateVReplicationWorkflowRequest) GetTabletSelectionPreference() TabletSelectionPreference { + if x != nil { + return x.TabletSelectionPreference + } + return TabletSelectionPreference_ANY +} + +func (x *UpdateVReplicationWorkflowRequest) GetOnDdl() binlogdata.OnDDLAction { + if x != nil { + return x.OnDdl + } + return binlogdata.OnDDLAction(0) +} + +func (x *UpdateVReplicationWorkflowRequest) GetState() binlogdata.VReplicationWorkflowState { + if x != nil { + return x.State + } + return binlogdata.VReplicationWorkflowState(0) +} + +type UpdateVReplicationWorkflowResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Result *query.QueryResult `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` +} + +func (x *UpdateVReplicationWorkflowResponse) Reset() { + *x = UpdateVReplicationWorkflowResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_tabletmanagerdata_proto_msgTypes[109] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateVReplicationWorkflowResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateVReplicationWorkflowResponse) ProtoMessage() {} + +func (x *UpdateVReplicationWorkflowResponse) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[109] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateVReplicationWorkflowResponse.ProtoReflect.Descriptor instead. +func (*UpdateVReplicationWorkflowResponse) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{109} +} + +func (x *UpdateVReplicationWorkflowResponse) GetResult() *query.QueryResult { + if x != nil { + return x.Result + } + return nil +} + +type ResetSequencesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Tables []string `protobuf:"bytes,1,rep,name=tables,proto3" json:"tables,omitempty"` +} + +func (x *ResetSequencesRequest) Reset() { + *x = ResetSequencesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_tabletmanagerdata_proto_msgTypes[110] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResetSequencesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResetSequencesRequest) ProtoMessage() {} + +func (x *ResetSequencesRequest) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[110] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResetSequencesRequest.ProtoReflect.Descriptor instead. +func (*ResetSequencesRequest) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{110} +} + +func (x *ResetSequencesRequest) GetTables() []string { + if x != nil { + return x.Tables + } + return nil +} + +type ResetSequencesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ResetSequencesResponse) Reset() { + *x = ResetSequencesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_tabletmanagerdata_proto_msgTypes[111] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResetSequencesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResetSequencesResponse) ProtoMessage() {} + +func (x *ResetSequencesResponse) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[111] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResetSequencesResponse.ProtoReflect.Descriptor instead. +func (*ResetSequencesResponse) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{111} +} + +type CheckThrottlerRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AppName string `protobuf:"bytes,1,opt,name=app_name,json=appName,proto3" json:"app_name,omitempty"` +} + +func (x *CheckThrottlerRequest) Reset() { + *x = CheckThrottlerRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_tabletmanagerdata_proto_msgTypes[112] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CheckThrottlerRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CheckThrottlerRequest) ProtoMessage() {} + +func (x *CheckThrottlerRequest) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[112] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CheckThrottlerRequest.ProtoReflect.Descriptor instead. +func (*CheckThrottlerRequest) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{112} +} + +func (x *CheckThrottlerRequest) GetAppName() string { + if x != nil { + return x.AppName } - return false + return "" } -type VDiffOptions struct { +type CheckThrottlerResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - PickerOptions *VDiffPickerOptions `protobuf:"bytes,1,opt,name=picker_options,json=pickerOptions,proto3" json:"picker_options,omitempty"` - CoreOptions *VDiffCoreOptions `protobuf:"bytes,2,opt,name=core_options,json=coreOptions,proto3" json:"core_options,omitempty"` - ReportOptions *VDiffReportOptions `protobuf:"bytes,3,opt,name=report_options,json=reportOptions,proto3" json:"report_options,omitempty"` -} - -func (x *VDiffOptions) Reset() { - *x = VDiffOptions{} + // StatusCode is HTTP compliant response code (e.g. 200 for OK) + StatusCode int32 `protobuf:"varint,1,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` + // Value is the metric value collected by the tablet + Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` + // Threshold is the throttling threshold the table was comparing the value with + Threshold float64 `protobuf:"fixed64,3,opt,name=threshold,proto3" json:"threshold,omitempty"` + // Error indicates an error retrieving the value + Error string `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"` + // Message + Message string `protobuf:"bytes,5,opt,name=message,proto3" json:"message,omitempty"` + // RecentlyChecked indicates that the tablet has been hit with a user-facing check, which can then imply + // that heartbeats lease should be renwed. + RecentlyChecked bool `protobuf:"varint,6,opt,name=recently_checked,json=recentlyChecked,proto3" json:"recently_checked,omitempty"` +} + +func (x *CheckThrottlerResponse) Reset() { + *x = CheckThrottlerResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[101] + mi := &file_tabletmanagerdata_proto_msgTypes[113] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *VDiffOptions) String() string { +func (x *CheckThrottlerResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*VDiffOptions) ProtoMessage() {} +func (*CheckThrottlerResponse) ProtoMessage() {} -func (x *VDiffOptions) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[101] +func (x *CheckThrottlerResponse) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[113] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5125,60 +5976,91 @@ func (x *VDiffOptions) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use VDiffOptions.ProtoReflect.Descriptor instead. -func (*VDiffOptions) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{101} +// Deprecated: Use CheckThrottlerResponse.ProtoReflect.Descriptor instead. +func (*CheckThrottlerResponse) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{113} } -func (x *VDiffOptions) GetPickerOptions() *VDiffPickerOptions { +func (x *CheckThrottlerResponse) GetStatusCode() int32 { if x != nil { - return x.PickerOptions + return x.StatusCode } - return nil + return 0 } -func (x *VDiffOptions) GetCoreOptions() *VDiffCoreOptions { +func (x *CheckThrottlerResponse) GetValue() float64 { if x != nil { - return x.CoreOptions + return x.Value } - return nil + return 0 } -func (x *VDiffOptions) GetReportOptions() *VDiffReportOptions { +func (x *CheckThrottlerResponse) GetThreshold() float64 { if x != nil { - return x.ReportOptions + return x.Threshold } - return nil + return 0 +} + +func (x *CheckThrottlerResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +func (x *CheckThrottlerResponse) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *CheckThrottlerResponse) GetRecentlyChecked() bool { + if x != nil { + return x.RecentlyChecked + } + return false } -type UpdateVRWorkflowRequest struct { +type ReadVReplicationWorkflowResponse_Stream struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Workflow string `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"` - Cells []string `protobuf:"bytes,2,rep,name=cells,proto3" json:"cells,omitempty"` - TabletTypes []string `protobuf:"bytes,3,rep,name=tablet_types,json=tabletTypes,proto3" json:"tablet_types,omitempty"` - OnDdl binlogdata.OnDDLAction `protobuf:"varint,4,opt,name=on_ddl,json=onDdl,proto3,enum=binlogdata.OnDDLAction" json:"on_ddl,omitempty"` -} - -func (x *UpdateVRWorkflowRequest) Reset() { - *x = UpdateVRWorkflowRequest{} + Id int32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Bls *binlogdata.BinlogSource `protobuf:"bytes,2,opt,name=bls,proto3" json:"bls,omitempty"` + Pos string `protobuf:"bytes,3,opt,name=pos,proto3" json:"pos,omitempty"` + StopPos string `protobuf:"bytes,4,opt,name=stop_pos,json=stopPos,proto3" json:"stop_pos,omitempty"` + MaxTps int64 `protobuf:"varint,5,opt,name=max_tps,json=maxTps,proto3" json:"max_tps,omitempty"` + MaxReplicationLag int64 `protobuf:"varint,6,opt,name=max_replication_lag,json=maxReplicationLag,proto3" json:"max_replication_lag,omitempty"` + TimeUpdated *vttime.Time `protobuf:"bytes,7,opt,name=time_updated,json=timeUpdated,proto3" json:"time_updated,omitempty"` + TransactionTimestamp *vttime.Time `protobuf:"bytes,8,opt,name=transaction_timestamp,json=transactionTimestamp,proto3" json:"transaction_timestamp,omitempty"` + State binlogdata.VReplicationWorkflowState `protobuf:"varint,9,opt,name=state,proto3,enum=binlogdata.VReplicationWorkflowState" json:"state,omitempty"` + Message string `protobuf:"bytes,10,opt,name=message,proto3" json:"message,omitempty"` + RowsCopied int64 `protobuf:"varint,11,opt,name=rows_copied,json=rowsCopied,proto3" json:"rows_copied,omitempty"` + TimeHeartbeat *vttime.Time `protobuf:"bytes,12,opt,name=time_heartbeat,json=timeHeartbeat,proto3" json:"time_heartbeat,omitempty"` + TimeThrottled *vttime.Time `protobuf:"bytes,13,opt,name=time_throttled,json=timeThrottled,proto3" json:"time_throttled,omitempty"` + ComponentThrottled string `protobuf:"bytes,14,opt,name=component_throttled,json=componentThrottled,proto3" json:"component_throttled,omitempty"` +} + +func (x *ReadVReplicationWorkflowResponse_Stream) Reset() { + *x = ReadVReplicationWorkflowResponse_Stream{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[102] + mi := &file_tabletmanagerdata_proto_msgTypes[117] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *UpdateVRWorkflowRequest) String() string { +func (x *ReadVReplicationWorkflowResponse_Stream) String() string { return protoimpl.X.MessageStringOf(x) } -func (*UpdateVRWorkflowRequest) ProtoMessage() {} +func (*ReadVReplicationWorkflowResponse_Stream) ProtoMessage() {} -func (x *UpdateVRWorkflowRequest) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[102] +func (x *ReadVReplicationWorkflowResponse_Stream) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[117] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5189,86 +6071,109 @@ func (x *UpdateVRWorkflowRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use UpdateVRWorkflowRequest.ProtoReflect.Descriptor instead. -func (*UpdateVRWorkflowRequest) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{102} +// Deprecated: Use ReadVReplicationWorkflowResponse_Stream.ProtoReflect.Descriptor instead. +func (*ReadVReplicationWorkflowResponse_Stream) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{101, 0} } -func (x *UpdateVRWorkflowRequest) GetWorkflow() string { +func (x *ReadVReplicationWorkflowResponse_Stream) GetId() int32 { if x != nil { - return x.Workflow + return x.Id } - return "" + return 0 } -func (x *UpdateVRWorkflowRequest) GetCells() []string { +func (x *ReadVReplicationWorkflowResponse_Stream) GetBls() *binlogdata.BinlogSource { if x != nil { - return x.Cells + return x.Bls } return nil } -func (x *UpdateVRWorkflowRequest) GetTabletTypes() []string { +func (x *ReadVReplicationWorkflowResponse_Stream) GetPos() string { if x != nil { - return x.TabletTypes + return x.Pos } - return nil + return "" } -func (x *UpdateVRWorkflowRequest) GetOnDdl() binlogdata.OnDDLAction { +func (x *ReadVReplicationWorkflowResponse_Stream) GetStopPos() string { if x != nil { - return x.OnDdl + return x.StopPos } - return binlogdata.OnDDLAction(0) + return "" } -type UpdateVRWorkflowResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *ReadVReplicationWorkflowResponse_Stream) GetMaxTps() int64 { + if x != nil { + return x.MaxTps + } + return 0 +} - Result *query.QueryResult `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` +func (x *ReadVReplicationWorkflowResponse_Stream) GetMaxReplicationLag() int64 { + if x != nil { + return x.MaxReplicationLag + } + return 0 } -func (x *UpdateVRWorkflowResponse) Reset() { - *x = UpdateVRWorkflowResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[103] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *ReadVReplicationWorkflowResponse_Stream) GetTimeUpdated() *vttime.Time { + if x != nil { + return x.TimeUpdated } + return nil } -func (x *UpdateVRWorkflowResponse) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *ReadVReplicationWorkflowResponse_Stream) GetTransactionTimestamp() *vttime.Time { + if x != nil { + return x.TransactionTimestamp + } + return nil } -func (*UpdateVRWorkflowResponse) ProtoMessage() {} +func (x *ReadVReplicationWorkflowResponse_Stream) GetState() binlogdata.VReplicationWorkflowState { + if x != nil { + return x.State + } + return binlogdata.VReplicationWorkflowState(0) +} -func (x *UpdateVRWorkflowResponse) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[103] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *ReadVReplicationWorkflowResponse_Stream) GetMessage() string { + if x != nil { + return x.Message } - return mi.MessageOf(x) + return "" } -// Deprecated: Use UpdateVRWorkflowResponse.ProtoReflect.Descriptor instead. -func (*UpdateVRWorkflowResponse) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{103} +func (x *ReadVReplicationWorkflowResponse_Stream) GetRowsCopied() int64 { + if x != nil { + return x.RowsCopied + } + return 0 } -func (x *UpdateVRWorkflowResponse) GetResult() *query.QueryResult { +func (x *ReadVReplicationWorkflowResponse_Stream) GetTimeHeartbeat() *vttime.Time { if x != nil { - return x.Result + return x.TimeHeartbeat + } + return nil +} + +func (x *ReadVReplicationWorkflowResponse_Stream) GetTimeThrottled() *vttime.Time { + if x != nil { + return x.TimeThrottled } return nil } +func (x *ReadVReplicationWorkflowResponse_Stream) GetComponentThrottled() string { + if x != nil { + return x.ComponentThrottled + } + return "" +} + var File_tabletmanagerdata_proto protoreflect.FileDescriptor var file_tabletmanagerdata_proto_rawDesc = []byte{ @@ -5445,7 +6350,7 @@ var file_tabletmanagerdata_proto_rawDesc = []byte{ 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x0d, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x96, 0x02, 0x0a, 0x12, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, + 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0xb5, 0x02, 0x0a, 0x12, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x71, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x71, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, @@ -5462,369 +6367,544 @@ var file_tabletmanagerdata_proto_rawDesc = []byte{ 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x61, 0x66, 0x74, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x71, 0x6c, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x71, 0x6c, 0x4d, 0x6f, 0x64, 0x65, 0x22, 0xa7, - 0x01, 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x0d, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, - 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x0c, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x12, 0x46, 0x0a, 0x0c, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x61, 0x66, 0x74, - 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x13, 0x0a, 0x11, 0x4c, 0x6f, 0x63, 0x6b, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x14, 0x0a, - 0x12, 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x15, 0x0a, 0x13, 0x55, 0x6e, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x16, 0x0a, 0x14, 0x55, 0x6e, - 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x8d, 0x01, 0x0a, 0x13, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x51, 0x75, - 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, - 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, - 0x12, 0x17, 0x0a, 0x07, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, - 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x6d, 0x61, 0x78, - 0x52, 0x6f, 0x77, 0x73, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, - 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, - 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, - 0x49, 0x64, 0x22, 0x42, 0x0a, 0x14, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x51, 0x75, 0x65, - 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, - 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, - 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xb2, 0x01, 0x0a, 0x18, 0x45, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x62, 0x61, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x62, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x62, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x27, 0x0a, - 0x0f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x42, - 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x6c, 0x6f, 0x61, 0x64, - 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x72, - 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x47, 0x0a, 0x19, 0x45, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x62, 0x61, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, - 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x22, 0x8e, 0x01, 0x0a, 0x1d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, - 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x6c, 0x6c, 0x50, 0x72, 0x69, 0x76, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x17, 0x0a, 0x07, - 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, - 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x6f, 0x77, - 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x6f, 0x77, 0x73, - 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x72, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x4c, 0x0a, 0x1e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, - 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x6c, 0x6c, 0x50, 0x72, 0x69, 0x76, 0x73, 0x52, + 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x71, 0x6c, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x1d, + 0x0a, 0x0a, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x09, 0x62, 0x61, 0x74, 0x63, 0x68, 0x53, 0x69, 0x7a, 0x65, 0x22, 0xa7, 0x01, + 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x0d, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x5f, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x0c, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, + 0x46, 0x0a, 0x0c, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x61, 0x66, 0x74, 0x65, + 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x13, 0x0a, 0x11, 0x4c, 0x6f, 0x63, 0x6b, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x14, 0x0a, 0x12, + 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x15, 0x0a, 0x13, 0x55, 0x6e, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x16, 0x0a, 0x14, 0x55, 0x6e, 0x6c, + 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x8d, 0x01, 0x0a, 0x13, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, + 0x17, 0x0a, 0x07, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, + 0x72, 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, + 0x6f, 0x77, 0x73, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, + 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, + 0x64, 0x22, 0x42, 0x0a, 0x14, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xb2, 0x01, 0x0a, 0x18, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x62, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x62, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x62, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x27, 0x0a, 0x0f, + 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x73, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x42, 0x69, + 0x6e, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x72, 0x65, + 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x47, 0x0a, 0x19, 0x45, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x62, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x22, 0x4b, 0x0a, 0x18, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, - 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, - 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x6f, 0x77, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x6f, 0x77, 0x73, - 0x22, 0x47, 0x0a, 0x19, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, - 0x41, 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, - 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, - 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x1a, 0x0a, 0x18, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x4c, 0x0a, 0x19, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x22, 0x16, 0x0a, 0x14, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x4f, 0x0a, 0x15, 0x50, - 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x18, 0x0a, 0x16, - 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x35, 0x0a, 0x17, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, - 0x79, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x34, 0x0a, - 0x16, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x22, 0x19, 0x0a, 0x17, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, - 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x18, - 0x0a, 0x16, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x19, 0x0a, 0x17, 0x53, 0x74, 0x6f, 0x70, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x5e, 0x0a, 0x1d, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x75, 0x6c, 0x74, 0x22, 0x8e, 0x01, 0x0a, 0x1d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, + 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x6c, 0x6c, 0x50, 0x72, 0x69, 0x76, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x64, + 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x62, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x6f, 0x77, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x6f, 0x77, 0x73, 0x12, + 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x72, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x22, 0x4c, 0x0a, 0x1e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, + 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x6c, 0x6c, 0x50, 0x72, 0x69, 0x76, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x22, 0x4b, 0x0a, 0x18, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, + 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, + 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x6f, 0x77, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x6f, 0x77, 0x73, 0x22, + 0x47, 0x0a, 0x19, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, + 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x1a, 0x0a, 0x18, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x22, 0x4c, 0x0a, 0x19, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x22, 0x16, 0x0a, 0x14, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x4f, 0x0a, 0x15, 0x50, 0x72, + 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x50, + 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x35, 0x0a, 0x17, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, + 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x34, 0x0a, 0x16, + 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x19, 0x0a, 0x17, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x18, 0x0a, + 0x16, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x19, 0x0a, 0x17, 0x53, 0x74, 0x6f, 0x70, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x5e, 0x0a, 0x1d, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x21, 0x0a, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x77, 0x61, 0x69, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x22, 0x3c, 0x0a, 0x1e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x21, 0x0a, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x77, 0x61, 0x69, 0x74, 0x54, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x22, 0x3c, 0x0a, 0x1e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x22, 0x35, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, - 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, - 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x22, 0x1a, 0x0a, 0x18, 0x53, 0x74, 0x61, 0x72, - 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x62, 0x0a, 0x21, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x41, 0x66, 0x74, - 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x77, 0x61, 0x69, - 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x24, 0x0a, 0x22, 0x53, 0x74, 0x61, 0x72, - 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x6e, 0x74, 0x69, - 0x6c, 0x41, 0x66, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x14, - 0x0a, 0x12, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x22, 0x2b, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x61, - 0x64, 0x64, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x61, 0x64, 0x64, 0x72, - 0x73, 0x22, 0x19, 0x0a, 0x17, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x1a, 0x0a, 0x18, - 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2f, 0x0a, 0x17, 0x56, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x22, 0x46, 0x0a, 0x18, 0x56, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, - 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x22, 0x4b, 0x0a, 0x1d, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x02, - 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x20, - 0x0a, 0x1e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, - 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x30, 0x0a, 0x12, 0x49, 0x6e, 0x69, 0x74, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, - 0x6e, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, - 0x6e, 0x63, 0x22, 0x31, 0x0a, 0x13, 0x49, 0x6e, 0x69, 0x74, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, - 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xd8, 0x01, 0x0a, 0x1e, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, - 0x74, 0x65, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, - 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, - 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4e, 0x73, - 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x3a, 0x0a, 0x0d, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x61, 0x6c, 0x69, - 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, - 0x0c, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x31, 0x0a, - 0x14, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x73, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x72, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x22, 0x21, 0x0a, 0x1f, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0xba, 0x01, 0x0a, 0x12, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, - 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x14, 0x72, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, - 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6e, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x64, 0x4e, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, - 0x22, 0x15, 0x0a, 0x13, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x65, 0x6d, 0x6f, 0x74, - 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, - 0x64, 0x0a, 0x15, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0e, 0x70, 0x72, 0x69, 0x6d, - 0x61, 0x72, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1e, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x0d, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4a, - 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0x36, 0x0a, 0x18, 0x55, 0x6e, 0x64, 0x6f, 0x44, 0x65, 0x6d, - 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x22, 0x1b, 0x0a, - 0x19, 0x55, 0x6e, 0x64, 0x6f, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, - 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0x0a, 0x19, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x1c, 0x0a, 0x1a, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x57, 0x61, 0x73, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x23, 0x0a, 0x21, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, - 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x24, 0x0a, 0x22, 0x52, 0x65, - 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x13, 0x0a, 0x11, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x49, 0x0a, 0x12, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x72, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x75, - 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x22, 0xed, 0x01, 0x0a, 0x1b, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x2d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, - 0x26, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, - 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x64, 0x4e, 0x73, 0x12, 0x36, 0x0a, 0x17, 0x66, 0x6f, 0x72, 0x63, 0x65, - 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x53, - 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x23, 0x0a, 0x0d, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, - 0x22, 0x1e, 0x0a, 0x1c, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x4b, 0x0a, 0x1a, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x52, 0x65, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, - 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, - 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x22, 0x1d, 0x0a, - 0x1b, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x52, 0x65, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7e, 0x0a, 0x22, - 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, - 0x6e, 0x64, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x58, 0x0a, 0x15, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x24, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x13, 0x73, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x22, 0x6b, 0x0a, 0x23, - 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, - 0x6e, 0x64, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0x33, 0x0a, 0x15, 0x50, 0x72, 0x6f, - 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x22, 0x34, - 0x0a, 0x16, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x22, 0x35, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, + 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, + 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x22, 0x1a, 0x0a, 0x18, 0x53, 0x74, 0x61, 0x72, 0x74, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x62, 0x0a, 0x21, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x41, 0x66, 0x74, 0x65, + 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x77, 0x61, 0x69, 0x74, + 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x24, 0x0a, 0x22, 0x53, 0x74, 0x61, 0x72, 0x74, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x6e, 0x74, 0x69, 0x6c, + 0x41, 0x66, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x14, 0x0a, + 0x12, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x22, 0x2b, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x64, + 0x64, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x61, 0x64, 0x64, 0x72, 0x73, + 0x22, 0x19, 0x0a, 0x17, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x1a, 0x0a, 0x18, 0x52, + 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2f, 0x0a, 0x17, 0x56, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x22, 0x46, 0x0a, 0x18, 0x56, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x22, 0x4b, 0x0a, 0x1d, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x02, 0x69, + 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x20, 0x0a, + 0x1e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x69, + 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x30, 0x0a, 0x12, 0x49, 0x6e, 0x69, 0x74, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, + 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, + 0x63, 0x22, 0x31, 0x0a, 0x13, 0x49, 0x6e, 0x69, 0x74, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x88, 0x01, 0x0a, 0x0d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, - 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x63, 0x6f, 0x6e, - 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, - 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x30, 0x0a, - 0x14, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x72, 0x6f, - 0x6d, 0x5f, 0x70, 0x6f, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x69, 0x6e, 0x63, - 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x73, 0x22, - 0x36, 0x0a, 0x0e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xd8, 0x01, 0x0a, 0x1e, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x5f, + 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4e, 0x73, 0x12, + 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x3a, 0x0a, 0x0d, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x61, 0x6c, 0x69, 0x61, + 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0c, + 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x31, 0x0a, 0x14, + 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x21, 0x0a, 0x1f, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0xba, 0x01, 0x0a, 0x12, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x14, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x74, + 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6e, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x64, 0x4e, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x22, + 0x15, 0x0a, 0x13, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x64, + 0x0a, 0x15, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0e, 0x70, 0x72, 0x69, 0x6d, 0x61, + 0x72, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1e, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x0d, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4a, 0x04, + 0x08, 0x01, 0x10, 0x02, 0x22, 0x36, 0x0a, 0x18, 0x55, 0x6e, 0x64, 0x6f, 0x44, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x22, 0x1b, 0x0a, 0x19, + 0x55, 0x6e, 0x64, 0x6f, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, + 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0x0a, 0x19, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x1c, 0x0a, 0x1a, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x57, 0x61, 0x73, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x23, 0x0a, 0x21, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x24, 0x0a, 0x22, 0x52, 0x65, 0x73, + 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x13, 0x0a, 0x11, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x22, 0x49, 0x0a, 0x12, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x75, 0x6c, + 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, + 0xed, 0x01, 0x0a, 0x1b, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x2d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x26, + 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6e, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x64, 0x4e, 0x73, 0x12, 0x36, 0x0a, 0x17, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x5f, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x53, 0x74, + 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, + 0x0a, 0x0d, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x22, + 0x1e, 0x0a, 0x1c, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x4b, 0x0a, 0x1a, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x52, 0x65, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x22, 0x1d, 0x0a, 0x1b, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, + 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7e, 0x0a, 0x22, 0x53, + 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6e, + 0x64, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x58, 0x0a, 0x15, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x24, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x13, 0x73, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x22, 0x6b, 0x0a, 0x23, 0x53, + 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6e, + 0x64, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0x33, 0x0a, 0x15, 0x50, 0x72, 0x6f, 0x6d, + 0x6f, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x22, 0x34, 0x0a, + 0x16, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x22, 0xab, 0x01, 0x0a, 0x0d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, + 0x65, 0x6e, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, + 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, + 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x30, 0x0a, 0x14, + 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x72, 0x6f, 0x6d, + 0x5f, 0x70, 0x6f, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x69, 0x6e, 0x63, 0x72, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x73, 0x12, 0x21, + 0x0a, 0x0c, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, 0x73, 0x61, 0x66, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x53, 0x61, 0x66, + 0x65, 0x22, 0x36, 0x0a, 0x0e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0xc8, 0x01, 0x0a, 0x18, 0x52, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x0b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, + 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x5f, 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, + 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x50, 0x6f, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x64, + 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, + 0x79, 0x52, 0x75, 0x6e, 0x12, 0x3e, 0x0a, 0x14, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, + 0x74, 0x6f, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x52, 0x12, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x22, 0x41, 0x0a, 0x19, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, + 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x88, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x73, 0x74, - 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x0b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, - 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x54, - 0x69, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, - 0x6f, 0x5f, 0x70, 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, - 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x50, 0x6f, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, - 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, - 0x75, 0x6e, 0x22, 0x41, 0x0a, 0x19, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, - 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x24, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, - 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, - 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0xd7, 0x01, 0x0a, 0x0c, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x16, - 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x61, 0x72, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x41, 0x72, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x64, 0x69, 0x66, 0x66, 0x5f, 0x75, - 0x75, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x64, 0x69, 0x66, 0x66, - 0x55, 0x75, 0x69, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, - 0x6a, 0x0a, 0x0d, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, - 0x12, 0x2a, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x1d, 0x0a, 0x0a, - 0x76, 0x64, 0x69, 0x66, 0x66, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x76, 0x64, 0x69, 0x66, 0x66, 0x55, 0x75, 0x69, 0x64, 0x22, 0x79, 0x0a, 0x12, 0x56, - 0x44, 0x69, 0x66, 0x66, 0x50, 0x69, 0x63, 0x6b, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, - 0x65, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, - 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x22, 0x68, 0x0a, 0x12, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, - 0x65, 0x70, 0x6f, 0x72, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x19, 0x0a, 0x08, - 0x6f, 0x6e, 0x6c, 0x79, 0x5f, 0x70, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, - 0x6f, 0x6e, 0x6c, 0x79, 0x50, 0x6b, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, - 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, - 0x62, 0x75, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, - 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, - 0x22, 0xb0, 0x02, 0x0a, 0x10, 0x56, 0x44, 0x69, 0x66, 0x66, 0x43, 0x6f, 0x72, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x1d, 0x0a, - 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x52, 0x65, 0x74, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, - 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, - 0x6d, 0x61, 0x78, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x68, 0x65, 0x63, 0x6b, - 0x73, 0x75, 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x63, 0x68, 0x65, 0x63, 0x6b, - 0x73, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x70, 0x63, - 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x50, - 0x63, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, - 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x74, 0x69, 0x6d, - 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x38, 0x0a, 0x19, 0x6d, - 0x61, 0x78, 0x5f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x74, 0x6f, - 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, - 0x6d, 0x61, 0x78, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x6f, 0x77, 0x73, 0x54, 0x6f, 0x43, 0x6f, - 0x6d, 0x70, 0x61, 0x72, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x10, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, - 0x61, 0x74, 0x73, 0x22, 0xf2, 0x01, 0x0a, 0x0c, 0x56, 0x44, 0x69, 0x66, 0x66, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4c, 0x0a, 0x0e, 0x70, 0x69, 0x63, 0x6b, 0x65, 0x72, 0x5f, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x74, + 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0xd4, 0x04, 0x0a, 0x21, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, + 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x3d, 0x0a, 0x0d, 0x62, 0x69, 0x6e, + 0x6c, 0x6f, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x69, + 0x6e, 0x6c, 0x6f, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0c, 0x62, 0x69, 0x6e, 0x6c, + 0x6f, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x37, + 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x6c, 0x0a, 0x1b, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x19, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, + 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x62, + 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x53, 0x0a, 0x11, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x75, 0x62, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x62, 0x69, + 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x75, 0x62, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x75, + 0x62, 0x54, 0x79, 0x70, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x65, 0x66, 0x65, 0x72, 0x5f, 0x73, + 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x12, 0x64, 0x65, 0x66, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, + 0x61, 0x72, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x5f, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x75, 0x74, + 0x6f, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x61, + 0x66, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0d, 0x73, 0x74, 0x6f, 0x70, 0x41, 0x66, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x70, 0x79, 0x22, 0x50, + 0x0a, 0x22, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x22, 0x3f, 0x0a, 0x21, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x22, 0x50, 0x0a, 0x22, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x22, 0x3d, 0x0a, 0x1f, 0x52, 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x22, 0x94, 0x09, 0x0a, 0x20, 0x52, 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0e, 0x32, + 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x73, 0x12, 0x6c, 0x0a, 0x1b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, + 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x12, 0x17, 0x0a, 0x07, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, + 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x49, 0x0a, + 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0f, 0x77, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x12, 0x30, 0x0a, + 0x14, 0x64, 0x65, 0x66, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, + 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x64, 0x65, 0x66, + 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x12, + 0x54, 0x0a, 0x07, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x3a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x07, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x73, 0x1a, 0xc1, 0x04, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x02, 0x69, 0x64, + 0x12, 0x2a, 0x0a, 0x03, 0x62, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, + 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x69, 0x6e, 0x6c, 0x6f, + 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x03, 0x62, 0x6c, 0x73, 0x12, 0x10, 0x0a, 0x03, + 0x70, 0x6f, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x70, 0x6f, 0x73, 0x12, 0x19, + 0x0a, 0x08, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x70, 0x6f, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x73, 0x74, 0x6f, 0x70, 0x50, 0x6f, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x6d, 0x61, 0x78, + 0x5f, 0x74, 0x70, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6d, 0x61, 0x78, 0x54, + 0x70, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x11, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, + 0x61, 0x67, 0x12, 0x2f, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, + 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x64, 0x12, 0x41, 0x0a, 0x15, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x52, 0x14, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x3b, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x0a, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1f, 0x0a, + 0x0b, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x63, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0a, 0x72, 0x6f, 0x77, 0x73, 0x43, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x12, 0x33, + 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, + 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, + 0x65, 0x61, 0x74, 0x12, 0x33, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x74, 0x68, 0x72, 0x6f, + 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, + 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x54, + 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x70, + 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, + 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, + 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x22, 0xd7, 0x01, 0x0a, 0x0c, 0x56, 0x44, + 0x69, 0x66, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x72, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x72, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x64, 0x69, + 0x66, 0x66, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, + 0x64, 0x69, 0x66, 0x66, 0x55, 0x75, 0x69, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, + 0x69, 0x66, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x22, 0x6a, 0x0a, 0x0d, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x02, 0x69, 0x64, 0x12, 0x2a, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, + 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x64, 0x69, 0x66, 0x66, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x64, 0x69, 0x66, 0x66, 0x55, 0x75, 0x69, 0x64, 0x22, + 0x79, 0x0a, 0x12, 0x56, 0x44, 0x69, 0x66, 0x66, 0x50, 0x69, 0x63, 0x6b, 0x65, 0x72, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x22, 0x68, 0x0a, 0x12, 0x56, 0x44, + 0x69, 0x66, 0x66, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x6e, 0x6c, 0x79, 0x5f, 0x70, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x07, 0x6f, 0x6e, 0x6c, 0x79, 0x50, 0x6b, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x64, + 0x65, 0x62, 0x75, 0x67, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0a, 0x64, 0x65, 0x62, 0x75, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x16, 0x0a, 0x06, + 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x22, 0xb0, 0x02, 0x0a, 0x10, 0x56, 0x44, 0x69, 0x66, 0x66, 0x43, 0x6f, + 0x72, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x52, 0x65, 0x74, 0x72, 0x79, + 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, + 0x65, 0x5f, 0x70, 0x63, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x73, 0x61, 0x6d, + 0x70, 0x6c, 0x65, 0x50, 0x63, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, + 0x38, 0x0a, 0x19, 0x6d, 0x61, 0x78, 0x5f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x72, 0x6f, 0x77, + 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x6f, 0x77, 0x73, + 0x54, 0x6f, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x22, 0xf2, 0x01, 0x0a, 0x0c, 0x56, 0x44, 0x69, 0x66, + 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4c, 0x0a, 0x0e, 0x70, 0x69, 0x63, 0x6b, + 0x65, 0x72, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x50, 0x69, 0x63, 0x6b, 0x65, 0x72, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0d, 0x70, 0x69, 0x63, 0x6b, 0x65, 0x72, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x46, 0x0a, 0x0c, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x50, 0x69, 0x63, 0x6b, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x0d, 0x70, 0x69, 0x63, 0x6b, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x46, 0x0a, 0x0c, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, - 0x66, 0x66, 0x43, 0x6f, 0x72, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x63, - 0x6f, 0x72, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4c, 0x0a, 0x0e, 0x72, 0x65, - 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x70, 0x6f, - 0x72, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0d, 0x72, 0x65, 0x70, 0x6f, 0x72, - 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x9e, 0x01, 0x0a, 0x17, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x56, 0x52, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x2e, 0x0a, 0x06, 0x6f, 0x6e, 0x5f, - 0x64, 0x64, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e, 0x6c, - 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4f, 0x6e, 0x44, 0x44, 0x4c, 0x41, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x05, 0x6f, 0x6e, 0x44, 0x64, 0x6c, 0x22, 0x46, 0x0a, 0x18, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x56, 0x52, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, - 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x42, 0x30, 0x5a, 0x2e, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, - 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, - 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x43, 0x6f, 0x72, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x0b, 0x63, 0x6f, 0x72, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4c, + 0x0a, 0x0e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, + 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0d, 0x72, + 0x65, 0x70, 0x6f, 0x72, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe9, 0x02, 0x0a, + 0x21, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x14, + 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, + 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, + 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x6c, 0x0a, + 0x1b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x52, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x2e, 0x0a, 0x06, 0x6f, + 0x6e, 0x5f, 0x64, 0x64, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x62, 0x69, + 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4f, 0x6e, 0x44, 0x44, 0x4c, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x6f, 0x6e, 0x44, 0x64, 0x6c, 0x12, 0x3b, 0x0a, 0x05, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x62, 0x69, 0x6e, + 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x22, 0x50, 0x0a, 0x22, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, + 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, + 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x2f, 0x0a, 0x15, 0x52, 0x65, + 0x73, 0x65, 0x74, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x52, + 0x65, 0x73, 0x65, 0x74, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x32, 0x0a, 0x15, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x68, + 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, + 0x0a, 0x08, 0x61, 0x70, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xc8, 0x01, 0x0a, 0x16, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, + 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x74, + 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, + 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, + 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, + 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0f, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x65, 0x64, 0x2a, 0x3e, 0x0a, 0x19, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x59, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, + 0x4f, 0x52, 0x44, 0x45, 0x52, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, + 0x57, 0x4e, 0x10, 0x03, 0x42, 0x30, 0x5a, 0x2e, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, + 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -5839,178 +6919,217 @@ func file_tabletmanagerdata_proto_rawDescGZIP() []byte { return file_tabletmanagerdata_proto_rawDescData } -var file_tabletmanagerdata_proto_msgTypes = make([]protoimpl.MessageInfo, 107) +var file_tabletmanagerdata_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_tabletmanagerdata_proto_msgTypes = make([]protoimpl.MessageInfo, 118) var file_tabletmanagerdata_proto_goTypes = []interface{}{ - (*TableDefinition)(nil), // 0: tabletmanagerdata.TableDefinition - (*SchemaDefinition)(nil), // 1: tabletmanagerdata.SchemaDefinition - (*SchemaChangeResult)(nil), // 2: tabletmanagerdata.SchemaChangeResult - (*UserPermission)(nil), // 3: tabletmanagerdata.UserPermission - (*DbPermission)(nil), // 4: tabletmanagerdata.DbPermission - (*Permissions)(nil), // 5: tabletmanagerdata.Permissions - (*PingRequest)(nil), // 6: tabletmanagerdata.PingRequest - (*PingResponse)(nil), // 7: tabletmanagerdata.PingResponse - (*SleepRequest)(nil), // 8: tabletmanagerdata.SleepRequest - (*SleepResponse)(nil), // 9: tabletmanagerdata.SleepResponse - (*ExecuteHookRequest)(nil), // 10: tabletmanagerdata.ExecuteHookRequest - (*ExecuteHookResponse)(nil), // 11: tabletmanagerdata.ExecuteHookResponse - (*GetSchemaRequest)(nil), // 12: tabletmanagerdata.GetSchemaRequest - (*GetSchemaResponse)(nil), // 13: tabletmanagerdata.GetSchemaResponse - (*GetPermissionsRequest)(nil), // 14: tabletmanagerdata.GetPermissionsRequest - (*GetPermissionsResponse)(nil), // 15: tabletmanagerdata.GetPermissionsResponse - (*SetReadOnlyRequest)(nil), // 16: tabletmanagerdata.SetReadOnlyRequest - (*SetReadOnlyResponse)(nil), // 17: tabletmanagerdata.SetReadOnlyResponse - (*SetReadWriteRequest)(nil), // 18: tabletmanagerdata.SetReadWriteRequest - (*SetReadWriteResponse)(nil), // 19: tabletmanagerdata.SetReadWriteResponse - (*ChangeTypeRequest)(nil), // 20: tabletmanagerdata.ChangeTypeRequest - (*ChangeTypeResponse)(nil), // 21: tabletmanagerdata.ChangeTypeResponse - (*RefreshStateRequest)(nil), // 22: tabletmanagerdata.RefreshStateRequest - (*RefreshStateResponse)(nil), // 23: tabletmanagerdata.RefreshStateResponse - (*RunHealthCheckRequest)(nil), // 24: tabletmanagerdata.RunHealthCheckRequest - (*RunHealthCheckResponse)(nil), // 25: tabletmanagerdata.RunHealthCheckResponse - (*ReloadSchemaRequest)(nil), // 26: tabletmanagerdata.ReloadSchemaRequest - (*ReloadSchemaResponse)(nil), // 27: tabletmanagerdata.ReloadSchemaResponse - (*PreflightSchemaRequest)(nil), // 28: tabletmanagerdata.PreflightSchemaRequest - (*PreflightSchemaResponse)(nil), // 29: tabletmanagerdata.PreflightSchemaResponse - (*ApplySchemaRequest)(nil), // 30: tabletmanagerdata.ApplySchemaRequest - (*ApplySchemaResponse)(nil), // 31: tabletmanagerdata.ApplySchemaResponse - (*LockTablesRequest)(nil), // 32: tabletmanagerdata.LockTablesRequest - (*LockTablesResponse)(nil), // 33: tabletmanagerdata.LockTablesResponse - (*UnlockTablesRequest)(nil), // 34: tabletmanagerdata.UnlockTablesRequest - (*UnlockTablesResponse)(nil), // 35: tabletmanagerdata.UnlockTablesResponse - (*ExecuteQueryRequest)(nil), // 36: tabletmanagerdata.ExecuteQueryRequest - (*ExecuteQueryResponse)(nil), // 37: tabletmanagerdata.ExecuteQueryResponse - (*ExecuteFetchAsDbaRequest)(nil), // 38: tabletmanagerdata.ExecuteFetchAsDbaRequest - (*ExecuteFetchAsDbaResponse)(nil), // 39: tabletmanagerdata.ExecuteFetchAsDbaResponse - (*ExecuteFetchAsAllPrivsRequest)(nil), // 40: tabletmanagerdata.ExecuteFetchAsAllPrivsRequest - (*ExecuteFetchAsAllPrivsResponse)(nil), // 41: tabletmanagerdata.ExecuteFetchAsAllPrivsResponse - (*ExecuteFetchAsAppRequest)(nil), // 42: tabletmanagerdata.ExecuteFetchAsAppRequest - (*ExecuteFetchAsAppResponse)(nil), // 43: tabletmanagerdata.ExecuteFetchAsAppResponse - (*ReplicationStatusRequest)(nil), // 44: tabletmanagerdata.ReplicationStatusRequest - (*ReplicationStatusResponse)(nil), // 45: tabletmanagerdata.ReplicationStatusResponse - (*PrimaryStatusRequest)(nil), // 46: tabletmanagerdata.PrimaryStatusRequest - (*PrimaryStatusResponse)(nil), // 47: tabletmanagerdata.PrimaryStatusResponse - (*PrimaryPositionRequest)(nil), // 48: tabletmanagerdata.PrimaryPositionRequest - (*PrimaryPositionResponse)(nil), // 49: tabletmanagerdata.PrimaryPositionResponse - (*WaitForPositionRequest)(nil), // 50: tabletmanagerdata.WaitForPositionRequest - (*WaitForPositionResponse)(nil), // 51: tabletmanagerdata.WaitForPositionResponse - (*StopReplicationRequest)(nil), // 52: tabletmanagerdata.StopReplicationRequest - (*StopReplicationResponse)(nil), // 53: tabletmanagerdata.StopReplicationResponse - (*StopReplicationMinimumRequest)(nil), // 54: tabletmanagerdata.StopReplicationMinimumRequest - (*StopReplicationMinimumResponse)(nil), // 55: tabletmanagerdata.StopReplicationMinimumResponse - (*StartReplicationRequest)(nil), // 56: tabletmanagerdata.StartReplicationRequest - (*StartReplicationResponse)(nil), // 57: tabletmanagerdata.StartReplicationResponse - (*StartReplicationUntilAfterRequest)(nil), // 58: tabletmanagerdata.StartReplicationUntilAfterRequest - (*StartReplicationUntilAfterResponse)(nil), // 59: tabletmanagerdata.StartReplicationUntilAfterResponse - (*GetReplicasRequest)(nil), // 60: tabletmanagerdata.GetReplicasRequest - (*GetReplicasResponse)(nil), // 61: tabletmanagerdata.GetReplicasResponse - (*ResetReplicationRequest)(nil), // 62: tabletmanagerdata.ResetReplicationRequest - (*ResetReplicationResponse)(nil), // 63: tabletmanagerdata.ResetReplicationResponse - (*VReplicationExecRequest)(nil), // 64: tabletmanagerdata.VReplicationExecRequest - (*VReplicationExecResponse)(nil), // 65: tabletmanagerdata.VReplicationExecResponse - (*VReplicationWaitForPosRequest)(nil), // 66: tabletmanagerdata.VReplicationWaitForPosRequest - (*VReplicationWaitForPosResponse)(nil), // 67: tabletmanagerdata.VReplicationWaitForPosResponse - (*InitPrimaryRequest)(nil), // 68: tabletmanagerdata.InitPrimaryRequest - (*InitPrimaryResponse)(nil), // 69: tabletmanagerdata.InitPrimaryResponse - (*PopulateReparentJournalRequest)(nil), // 70: tabletmanagerdata.PopulateReparentJournalRequest - (*PopulateReparentJournalResponse)(nil), // 71: tabletmanagerdata.PopulateReparentJournalResponse - (*InitReplicaRequest)(nil), // 72: tabletmanagerdata.InitReplicaRequest - (*InitReplicaResponse)(nil), // 73: tabletmanagerdata.InitReplicaResponse - (*DemotePrimaryRequest)(nil), // 74: tabletmanagerdata.DemotePrimaryRequest - (*DemotePrimaryResponse)(nil), // 75: tabletmanagerdata.DemotePrimaryResponse - (*UndoDemotePrimaryRequest)(nil), // 76: tabletmanagerdata.UndoDemotePrimaryRequest - (*UndoDemotePrimaryResponse)(nil), // 77: tabletmanagerdata.UndoDemotePrimaryResponse - (*ReplicaWasPromotedRequest)(nil), // 78: tabletmanagerdata.ReplicaWasPromotedRequest - (*ReplicaWasPromotedResponse)(nil), // 79: tabletmanagerdata.ReplicaWasPromotedResponse - (*ResetReplicationParametersRequest)(nil), // 80: tabletmanagerdata.ResetReplicationParametersRequest - (*ResetReplicationParametersResponse)(nil), // 81: tabletmanagerdata.ResetReplicationParametersResponse - (*FullStatusRequest)(nil), // 82: tabletmanagerdata.FullStatusRequest - (*FullStatusResponse)(nil), // 83: tabletmanagerdata.FullStatusResponse - (*SetReplicationSourceRequest)(nil), // 84: tabletmanagerdata.SetReplicationSourceRequest - (*SetReplicationSourceResponse)(nil), // 85: tabletmanagerdata.SetReplicationSourceResponse - (*ReplicaWasRestartedRequest)(nil), // 86: tabletmanagerdata.ReplicaWasRestartedRequest - (*ReplicaWasRestartedResponse)(nil), // 87: tabletmanagerdata.ReplicaWasRestartedResponse - (*StopReplicationAndGetStatusRequest)(nil), // 88: tabletmanagerdata.StopReplicationAndGetStatusRequest - (*StopReplicationAndGetStatusResponse)(nil), // 89: tabletmanagerdata.StopReplicationAndGetStatusResponse - (*PromoteReplicaRequest)(nil), // 90: tabletmanagerdata.PromoteReplicaRequest - (*PromoteReplicaResponse)(nil), // 91: tabletmanagerdata.PromoteReplicaResponse - (*BackupRequest)(nil), // 92: tabletmanagerdata.BackupRequest - (*BackupResponse)(nil), // 93: tabletmanagerdata.BackupResponse - (*RestoreFromBackupRequest)(nil), // 94: tabletmanagerdata.RestoreFromBackupRequest - (*RestoreFromBackupResponse)(nil), // 95: tabletmanagerdata.RestoreFromBackupResponse - (*VDiffRequest)(nil), // 96: tabletmanagerdata.VDiffRequest - (*VDiffResponse)(nil), // 97: tabletmanagerdata.VDiffResponse - (*VDiffPickerOptions)(nil), // 98: tabletmanagerdata.VDiffPickerOptions - (*VDiffReportOptions)(nil), // 99: tabletmanagerdata.VDiffReportOptions - (*VDiffCoreOptions)(nil), // 100: tabletmanagerdata.VDiffCoreOptions - (*VDiffOptions)(nil), // 101: tabletmanagerdata.VDiffOptions - (*UpdateVRWorkflowRequest)(nil), // 102: tabletmanagerdata.UpdateVRWorkflowRequest - (*UpdateVRWorkflowResponse)(nil), // 103: tabletmanagerdata.UpdateVRWorkflowResponse - nil, // 104: tabletmanagerdata.UserPermission.PrivilegesEntry - nil, // 105: tabletmanagerdata.DbPermission.PrivilegesEntry - nil, // 106: tabletmanagerdata.ExecuteHookRequest.ExtraEnvEntry - (*query.Field)(nil), // 107: query.Field - (topodata.TabletType)(0), // 108: topodata.TabletType - (*vtrpc.CallerID)(nil), // 109: vtrpc.CallerID - (*query.QueryResult)(nil), // 110: query.QueryResult - (*replicationdata.Status)(nil), // 111: replicationdata.Status - (*replicationdata.PrimaryStatus)(nil), // 112: replicationdata.PrimaryStatus - (*topodata.TabletAlias)(nil), // 113: topodata.TabletAlias - (*replicationdata.FullStatus)(nil), // 114: replicationdata.FullStatus - (replicationdata.StopReplicationMode)(0), // 115: replicationdata.StopReplicationMode - (*replicationdata.StopReplicationStatus)(nil), // 116: replicationdata.StopReplicationStatus - (*logutil.Event)(nil), // 117: logutil.Event - (*vttime.Time)(nil), // 118: vttime.Time - (binlogdata.OnDDLAction)(0), // 119: binlogdata.OnDDLAction + (TabletSelectionPreference)(0), // 0: tabletmanagerdata.TabletSelectionPreference + (*TableDefinition)(nil), // 1: tabletmanagerdata.TableDefinition + (*SchemaDefinition)(nil), // 2: tabletmanagerdata.SchemaDefinition + (*SchemaChangeResult)(nil), // 3: tabletmanagerdata.SchemaChangeResult + (*UserPermission)(nil), // 4: tabletmanagerdata.UserPermission + (*DbPermission)(nil), // 5: tabletmanagerdata.DbPermission + (*Permissions)(nil), // 6: tabletmanagerdata.Permissions + (*PingRequest)(nil), // 7: tabletmanagerdata.PingRequest + (*PingResponse)(nil), // 8: tabletmanagerdata.PingResponse + (*SleepRequest)(nil), // 9: tabletmanagerdata.SleepRequest + (*SleepResponse)(nil), // 10: tabletmanagerdata.SleepResponse + (*ExecuteHookRequest)(nil), // 11: tabletmanagerdata.ExecuteHookRequest + (*ExecuteHookResponse)(nil), // 12: tabletmanagerdata.ExecuteHookResponse + (*GetSchemaRequest)(nil), // 13: tabletmanagerdata.GetSchemaRequest + (*GetSchemaResponse)(nil), // 14: tabletmanagerdata.GetSchemaResponse + (*GetPermissionsRequest)(nil), // 15: tabletmanagerdata.GetPermissionsRequest + (*GetPermissionsResponse)(nil), // 16: tabletmanagerdata.GetPermissionsResponse + (*SetReadOnlyRequest)(nil), // 17: tabletmanagerdata.SetReadOnlyRequest + (*SetReadOnlyResponse)(nil), // 18: tabletmanagerdata.SetReadOnlyResponse + (*SetReadWriteRequest)(nil), // 19: tabletmanagerdata.SetReadWriteRequest + (*SetReadWriteResponse)(nil), // 20: tabletmanagerdata.SetReadWriteResponse + (*ChangeTypeRequest)(nil), // 21: tabletmanagerdata.ChangeTypeRequest + (*ChangeTypeResponse)(nil), // 22: tabletmanagerdata.ChangeTypeResponse + (*RefreshStateRequest)(nil), // 23: tabletmanagerdata.RefreshStateRequest + (*RefreshStateResponse)(nil), // 24: tabletmanagerdata.RefreshStateResponse + (*RunHealthCheckRequest)(nil), // 25: tabletmanagerdata.RunHealthCheckRequest + (*RunHealthCheckResponse)(nil), // 26: tabletmanagerdata.RunHealthCheckResponse + (*ReloadSchemaRequest)(nil), // 27: tabletmanagerdata.ReloadSchemaRequest + (*ReloadSchemaResponse)(nil), // 28: tabletmanagerdata.ReloadSchemaResponse + (*PreflightSchemaRequest)(nil), // 29: tabletmanagerdata.PreflightSchemaRequest + (*PreflightSchemaResponse)(nil), // 30: tabletmanagerdata.PreflightSchemaResponse + (*ApplySchemaRequest)(nil), // 31: tabletmanagerdata.ApplySchemaRequest + (*ApplySchemaResponse)(nil), // 32: tabletmanagerdata.ApplySchemaResponse + (*LockTablesRequest)(nil), // 33: tabletmanagerdata.LockTablesRequest + (*LockTablesResponse)(nil), // 34: tabletmanagerdata.LockTablesResponse + (*UnlockTablesRequest)(nil), // 35: tabletmanagerdata.UnlockTablesRequest + (*UnlockTablesResponse)(nil), // 36: tabletmanagerdata.UnlockTablesResponse + (*ExecuteQueryRequest)(nil), // 37: tabletmanagerdata.ExecuteQueryRequest + (*ExecuteQueryResponse)(nil), // 38: tabletmanagerdata.ExecuteQueryResponse + (*ExecuteFetchAsDbaRequest)(nil), // 39: tabletmanagerdata.ExecuteFetchAsDbaRequest + (*ExecuteFetchAsDbaResponse)(nil), // 40: tabletmanagerdata.ExecuteFetchAsDbaResponse + (*ExecuteFetchAsAllPrivsRequest)(nil), // 41: tabletmanagerdata.ExecuteFetchAsAllPrivsRequest + (*ExecuteFetchAsAllPrivsResponse)(nil), // 42: tabletmanagerdata.ExecuteFetchAsAllPrivsResponse + (*ExecuteFetchAsAppRequest)(nil), // 43: tabletmanagerdata.ExecuteFetchAsAppRequest + (*ExecuteFetchAsAppResponse)(nil), // 44: tabletmanagerdata.ExecuteFetchAsAppResponse + (*ReplicationStatusRequest)(nil), // 45: tabletmanagerdata.ReplicationStatusRequest + (*ReplicationStatusResponse)(nil), // 46: tabletmanagerdata.ReplicationStatusResponse + (*PrimaryStatusRequest)(nil), // 47: tabletmanagerdata.PrimaryStatusRequest + (*PrimaryStatusResponse)(nil), // 48: tabletmanagerdata.PrimaryStatusResponse + (*PrimaryPositionRequest)(nil), // 49: tabletmanagerdata.PrimaryPositionRequest + (*PrimaryPositionResponse)(nil), // 50: tabletmanagerdata.PrimaryPositionResponse + (*WaitForPositionRequest)(nil), // 51: tabletmanagerdata.WaitForPositionRequest + (*WaitForPositionResponse)(nil), // 52: tabletmanagerdata.WaitForPositionResponse + (*StopReplicationRequest)(nil), // 53: tabletmanagerdata.StopReplicationRequest + (*StopReplicationResponse)(nil), // 54: tabletmanagerdata.StopReplicationResponse + (*StopReplicationMinimumRequest)(nil), // 55: tabletmanagerdata.StopReplicationMinimumRequest + (*StopReplicationMinimumResponse)(nil), // 56: tabletmanagerdata.StopReplicationMinimumResponse + (*StartReplicationRequest)(nil), // 57: tabletmanagerdata.StartReplicationRequest + (*StartReplicationResponse)(nil), // 58: tabletmanagerdata.StartReplicationResponse + (*StartReplicationUntilAfterRequest)(nil), // 59: tabletmanagerdata.StartReplicationUntilAfterRequest + (*StartReplicationUntilAfterResponse)(nil), // 60: tabletmanagerdata.StartReplicationUntilAfterResponse + (*GetReplicasRequest)(nil), // 61: tabletmanagerdata.GetReplicasRequest + (*GetReplicasResponse)(nil), // 62: tabletmanagerdata.GetReplicasResponse + (*ResetReplicationRequest)(nil), // 63: tabletmanagerdata.ResetReplicationRequest + (*ResetReplicationResponse)(nil), // 64: tabletmanagerdata.ResetReplicationResponse + (*VReplicationExecRequest)(nil), // 65: tabletmanagerdata.VReplicationExecRequest + (*VReplicationExecResponse)(nil), // 66: tabletmanagerdata.VReplicationExecResponse + (*VReplicationWaitForPosRequest)(nil), // 67: tabletmanagerdata.VReplicationWaitForPosRequest + (*VReplicationWaitForPosResponse)(nil), // 68: tabletmanagerdata.VReplicationWaitForPosResponse + (*InitPrimaryRequest)(nil), // 69: tabletmanagerdata.InitPrimaryRequest + (*InitPrimaryResponse)(nil), // 70: tabletmanagerdata.InitPrimaryResponse + (*PopulateReparentJournalRequest)(nil), // 71: tabletmanagerdata.PopulateReparentJournalRequest + (*PopulateReparentJournalResponse)(nil), // 72: tabletmanagerdata.PopulateReparentJournalResponse + (*InitReplicaRequest)(nil), // 73: tabletmanagerdata.InitReplicaRequest + (*InitReplicaResponse)(nil), // 74: tabletmanagerdata.InitReplicaResponse + (*DemotePrimaryRequest)(nil), // 75: tabletmanagerdata.DemotePrimaryRequest + (*DemotePrimaryResponse)(nil), // 76: tabletmanagerdata.DemotePrimaryResponse + (*UndoDemotePrimaryRequest)(nil), // 77: tabletmanagerdata.UndoDemotePrimaryRequest + (*UndoDemotePrimaryResponse)(nil), // 78: tabletmanagerdata.UndoDemotePrimaryResponse + (*ReplicaWasPromotedRequest)(nil), // 79: tabletmanagerdata.ReplicaWasPromotedRequest + (*ReplicaWasPromotedResponse)(nil), // 80: tabletmanagerdata.ReplicaWasPromotedResponse + (*ResetReplicationParametersRequest)(nil), // 81: tabletmanagerdata.ResetReplicationParametersRequest + (*ResetReplicationParametersResponse)(nil), // 82: tabletmanagerdata.ResetReplicationParametersResponse + (*FullStatusRequest)(nil), // 83: tabletmanagerdata.FullStatusRequest + (*FullStatusResponse)(nil), // 84: tabletmanagerdata.FullStatusResponse + (*SetReplicationSourceRequest)(nil), // 85: tabletmanagerdata.SetReplicationSourceRequest + (*SetReplicationSourceResponse)(nil), // 86: tabletmanagerdata.SetReplicationSourceResponse + (*ReplicaWasRestartedRequest)(nil), // 87: tabletmanagerdata.ReplicaWasRestartedRequest + (*ReplicaWasRestartedResponse)(nil), // 88: tabletmanagerdata.ReplicaWasRestartedResponse + (*StopReplicationAndGetStatusRequest)(nil), // 89: tabletmanagerdata.StopReplicationAndGetStatusRequest + (*StopReplicationAndGetStatusResponse)(nil), // 90: tabletmanagerdata.StopReplicationAndGetStatusResponse + (*PromoteReplicaRequest)(nil), // 91: tabletmanagerdata.PromoteReplicaRequest + (*PromoteReplicaResponse)(nil), // 92: tabletmanagerdata.PromoteReplicaResponse + (*BackupRequest)(nil), // 93: tabletmanagerdata.BackupRequest + (*BackupResponse)(nil), // 94: tabletmanagerdata.BackupResponse + (*RestoreFromBackupRequest)(nil), // 95: tabletmanagerdata.RestoreFromBackupRequest + (*RestoreFromBackupResponse)(nil), // 96: tabletmanagerdata.RestoreFromBackupResponse + (*CreateVReplicationWorkflowRequest)(nil), // 97: tabletmanagerdata.CreateVReplicationWorkflowRequest + (*CreateVReplicationWorkflowResponse)(nil), // 98: tabletmanagerdata.CreateVReplicationWorkflowResponse + (*DeleteVReplicationWorkflowRequest)(nil), // 99: tabletmanagerdata.DeleteVReplicationWorkflowRequest + (*DeleteVReplicationWorkflowResponse)(nil), // 100: tabletmanagerdata.DeleteVReplicationWorkflowResponse + (*ReadVReplicationWorkflowRequest)(nil), // 101: tabletmanagerdata.ReadVReplicationWorkflowRequest + (*ReadVReplicationWorkflowResponse)(nil), // 102: tabletmanagerdata.ReadVReplicationWorkflowResponse + (*VDiffRequest)(nil), // 103: tabletmanagerdata.VDiffRequest + (*VDiffResponse)(nil), // 104: tabletmanagerdata.VDiffResponse + (*VDiffPickerOptions)(nil), // 105: tabletmanagerdata.VDiffPickerOptions + (*VDiffReportOptions)(nil), // 106: tabletmanagerdata.VDiffReportOptions + (*VDiffCoreOptions)(nil), // 107: tabletmanagerdata.VDiffCoreOptions + (*VDiffOptions)(nil), // 108: tabletmanagerdata.VDiffOptions + (*UpdateVReplicationWorkflowRequest)(nil), // 109: tabletmanagerdata.UpdateVReplicationWorkflowRequest + (*UpdateVReplicationWorkflowResponse)(nil), // 110: tabletmanagerdata.UpdateVReplicationWorkflowResponse + (*ResetSequencesRequest)(nil), // 111: tabletmanagerdata.ResetSequencesRequest + (*ResetSequencesResponse)(nil), // 112: tabletmanagerdata.ResetSequencesResponse + (*CheckThrottlerRequest)(nil), // 113: tabletmanagerdata.CheckThrottlerRequest + (*CheckThrottlerResponse)(nil), // 114: tabletmanagerdata.CheckThrottlerResponse + nil, // 115: tabletmanagerdata.UserPermission.PrivilegesEntry + nil, // 116: tabletmanagerdata.DbPermission.PrivilegesEntry + nil, // 117: tabletmanagerdata.ExecuteHookRequest.ExtraEnvEntry + (*ReadVReplicationWorkflowResponse_Stream)(nil), // 118: tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream + (*query.Field)(nil), // 119: query.Field + (topodata.TabletType)(0), // 120: topodata.TabletType + (*vtrpc.CallerID)(nil), // 121: vtrpc.CallerID + (*query.QueryResult)(nil), // 122: query.QueryResult + (*replicationdata.Status)(nil), // 123: replicationdata.Status + (*replicationdata.PrimaryStatus)(nil), // 124: replicationdata.PrimaryStatus + (*topodata.TabletAlias)(nil), // 125: topodata.TabletAlias + (*replicationdata.FullStatus)(nil), // 126: replicationdata.FullStatus + (replicationdata.StopReplicationMode)(0), // 127: replicationdata.StopReplicationMode + (*replicationdata.StopReplicationStatus)(nil), // 128: replicationdata.StopReplicationStatus + (*logutil.Event)(nil), // 129: logutil.Event + (*vttime.Time)(nil), // 130: vttime.Time + (*binlogdata.BinlogSource)(nil), // 131: binlogdata.BinlogSource + (binlogdata.VReplicationWorkflowType)(0), // 132: binlogdata.VReplicationWorkflowType + (binlogdata.VReplicationWorkflowSubType)(0), // 133: binlogdata.VReplicationWorkflowSubType + (binlogdata.OnDDLAction)(0), // 134: binlogdata.OnDDLAction + (binlogdata.VReplicationWorkflowState)(0), // 135: binlogdata.VReplicationWorkflowState } var file_tabletmanagerdata_proto_depIdxs = []int32{ - 107, // 0: tabletmanagerdata.TableDefinition.fields:type_name -> query.Field - 0, // 1: tabletmanagerdata.SchemaDefinition.table_definitions:type_name -> tabletmanagerdata.TableDefinition - 1, // 2: tabletmanagerdata.SchemaChangeResult.before_schema:type_name -> tabletmanagerdata.SchemaDefinition - 1, // 3: tabletmanagerdata.SchemaChangeResult.after_schema:type_name -> tabletmanagerdata.SchemaDefinition - 104, // 4: tabletmanagerdata.UserPermission.privileges:type_name -> tabletmanagerdata.UserPermission.PrivilegesEntry - 105, // 5: tabletmanagerdata.DbPermission.privileges:type_name -> tabletmanagerdata.DbPermission.PrivilegesEntry - 3, // 6: tabletmanagerdata.Permissions.user_permissions:type_name -> tabletmanagerdata.UserPermission - 4, // 7: tabletmanagerdata.Permissions.db_permissions:type_name -> tabletmanagerdata.DbPermission - 106, // 8: tabletmanagerdata.ExecuteHookRequest.extra_env:type_name -> tabletmanagerdata.ExecuteHookRequest.ExtraEnvEntry - 1, // 9: tabletmanagerdata.GetSchemaResponse.schema_definition:type_name -> tabletmanagerdata.SchemaDefinition - 5, // 10: tabletmanagerdata.GetPermissionsResponse.permissions:type_name -> tabletmanagerdata.Permissions - 108, // 11: tabletmanagerdata.ChangeTypeRequest.tablet_type:type_name -> topodata.TabletType - 2, // 12: tabletmanagerdata.PreflightSchemaResponse.change_results:type_name -> tabletmanagerdata.SchemaChangeResult - 1, // 13: tabletmanagerdata.ApplySchemaRequest.before_schema:type_name -> tabletmanagerdata.SchemaDefinition - 1, // 14: tabletmanagerdata.ApplySchemaRequest.after_schema:type_name -> tabletmanagerdata.SchemaDefinition - 1, // 15: tabletmanagerdata.ApplySchemaResponse.before_schema:type_name -> tabletmanagerdata.SchemaDefinition - 1, // 16: tabletmanagerdata.ApplySchemaResponse.after_schema:type_name -> tabletmanagerdata.SchemaDefinition - 109, // 17: tabletmanagerdata.ExecuteQueryRequest.caller_id:type_name -> vtrpc.CallerID - 110, // 18: tabletmanagerdata.ExecuteQueryResponse.result:type_name -> query.QueryResult - 110, // 19: tabletmanagerdata.ExecuteFetchAsDbaResponse.result:type_name -> query.QueryResult - 110, // 20: tabletmanagerdata.ExecuteFetchAsAllPrivsResponse.result:type_name -> query.QueryResult - 110, // 21: tabletmanagerdata.ExecuteFetchAsAppResponse.result:type_name -> query.QueryResult - 111, // 22: tabletmanagerdata.ReplicationStatusResponse.status:type_name -> replicationdata.Status - 112, // 23: tabletmanagerdata.PrimaryStatusResponse.status:type_name -> replicationdata.PrimaryStatus - 110, // 24: tabletmanagerdata.VReplicationExecResponse.result:type_name -> query.QueryResult - 113, // 25: tabletmanagerdata.PopulateReparentJournalRequest.primary_alias:type_name -> topodata.TabletAlias - 113, // 26: tabletmanagerdata.InitReplicaRequest.parent:type_name -> topodata.TabletAlias - 112, // 27: tabletmanagerdata.DemotePrimaryResponse.primary_status:type_name -> replicationdata.PrimaryStatus - 114, // 28: tabletmanagerdata.FullStatusResponse.status:type_name -> replicationdata.FullStatus - 113, // 29: tabletmanagerdata.SetReplicationSourceRequest.parent:type_name -> topodata.TabletAlias - 113, // 30: tabletmanagerdata.ReplicaWasRestartedRequest.parent:type_name -> topodata.TabletAlias - 115, // 31: tabletmanagerdata.StopReplicationAndGetStatusRequest.stop_replication_mode:type_name -> replicationdata.StopReplicationMode - 116, // 32: tabletmanagerdata.StopReplicationAndGetStatusResponse.status:type_name -> replicationdata.StopReplicationStatus - 117, // 33: tabletmanagerdata.BackupResponse.event:type_name -> logutil.Event - 118, // 34: tabletmanagerdata.RestoreFromBackupRequest.backup_time:type_name -> vttime.Time - 117, // 35: tabletmanagerdata.RestoreFromBackupResponse.event:type_name -> logutil.Event - 101, // 36: tabletmanagerdata.VDiffRequest.options:type_name -> tabletmanagerdata.VDiffOptions - 110, // 37: tabletmanagerdata.VDiffResponse.output:type_name -> query.QueryResult - 98, // 38: tabletmanagerdata.VDiffOptions.picker_options:type_name -> tabletmanagerdata.VDiffPickerOptions - 100, // 39: tabletmanagerdata.VDiffOptions.core_options:type_name -> tabletmanagerdata.VDiffCoreOptions - 99, // 40: tabletmanagerdata.VDiffOptions.report_options:type_name -> tabletmanagerdata.VDiffReportOptions - 119, // 41: tabletmanagerdata.UpdateVRWorkflowRequest.on_ddl:type_name -> binlogdata.OnDDLAction - 110, // 42: tabletmanagerdata.UpdateVRWorkflowResponse.result:type_name -> query.QueryResult - 43, // [43:43] is the sub-list for method output_type - 43, // [43:43] is the sub-list for method input_type - 43, // [43:43] is the sub-list for extension type_name - 43, // [43:43] is the sub-list for extension extendee - 0, // [0:43] is the sub-list for field type_name + 119, // 0: tabletmanagerdata.TableDefinition.fields:type_name -> query.Field + 1, // 1: tabletmanagerdata.SchemaDefinition.table_definitions:type_name -> tabletmanagerdata.TableDefinition + 2, // 2: tabletmanagerdata.SchemaChangeResult.before_schema:type_name -> tabletmanagerdata.SchemaDefinition + 2, // 3: tabletmanagerdata.SchemaChangeResult.after_schema:type_name -> tabletmanagerdata.SchemaDefinition + 115, // 4: tabletmanagerdata.UserPermission.privileges:type_name -> tabletmanagerdata.UserPermission.PrivilegesEntry + 116, // 5: tabletmanagerdata.DbPermission.privileges:type_name -> tabletmanagerdata.DbPermission.PrivilegesEntry + 4, // 6: tabletmanagerdata.Permissions.user_permissions:type_name -> tabletmanagerdata.UserPermission + 5, // 7: tabletmanagerdata.Permissions.db_permissions:type_name -> tabletmanagerdata.DbPermission + 117, // 8: tabletmanagerdata.ExecuteHookRequest.extra_env:type_name -> tabletmanagerdata.ExecuteHookRequest.ExtraEnvEntry + 2, // 9: tabletmanagerdata.GetSchemaResponse.schema_definition:type_name -> tabletmanagerdata.SchemaDefinition + 6, // 10: tabletmanagerdata.GetPermissionsResponse.permissions:type_name -> tabletmanagerdata.Permissions + 120, // 11: tabletmanagerdata.ChangeTypeRequest.tablet_type:type_name -> topodata.TabletType + 3, // 12: tabletmanagerdata.PreflightSchemaResponse.change_results:type_name -> tabletmanagerdata.SchemaChangeResult + 2, // 13: tabletmanagerdata.ApplySchemaRequest.before_schema:type_name -> tabletmanagerdata.SchemaDefinition + 2, // 14: tabletmanagerdata.ApplySchemaRequest.after_schema:type_name -> tabletmanagerdata.SchemaDefinition + 2, // 15: tabletmanagerdata.ApplySchemaResponse.before_schema:type_name -> tabletmanagerdata.SchemaDefinition + 2, // 16: tabletmanagerdata.ApplySchemaResponse.after_schema:type_name -> tabletmanagerdata.SchemaDefinition + 121, // 17: tabletmanagerdata.ExecuteQueryRequest.caller_id:type_name -> vtrpc.CallerID + 122, // 18: tabletmanagerdata.ExecuteQueryResponse.result:type_name -> query.QueryResult + 122, // 19: tabletmanagerdata.ExecuteFetchAsDbaResponse.result:type_name -> query.QueryResult + 122, // 20: tabletmanagerdata.ExecuteFetchAsAllPrivsResponse.result:type_name -> query.QueryResult + 122, // 21: tabletmanagerdata.ExecuteFetchAsAppResponse.result:type_name -> query.QueryResult + 123, // 22: tabletmanagerdata.ReplicationStatusResponse.status:type_name -> replicationdata.Status + 124, // 23: tabletmanagerdata.PrimaryStatusResponse.status:type_name -> replicationdata.PrimaryStatus + 122, // 24: tabletmanagerdata.VReplicationExecResponse.result:type_name -> query.QueryResult + 125, // 25: tabletmanagerdata.PopulateReparentJournalRequest.primary_alias:type_name -> topodata.TabletAlias + 125, // 26: tabletmanagerdata.InitReplicaRequest.parent:type_name -> topodata.TabletAlias + 124, // 27: tabletmanagerdata.DemotePrimaryResponse.primary_status:type_name -> replicationdata.PrimaryStatus + 126, // 28: tabletmanagerdata.FullStatusResponse.status:type_name -> replicationdata.FullStatus + 125, // 29: tabletmanagerdata.SetReplicationSourceRequest.parent:type_name -> topodata.TabletAlias + 125, // 30: tabletmanagerdata.ReplicaWasRestartedRequest.parent:type_name -> topodata.TabletAlias + 127, // 31: tabletmanagerdata.StopReplicationAndGetStatusRequest.stop_replication_mode:type_name -> replicationdata.StopReplicationMode + 128, // 32: tabletmanagerdata.StopReplicationAndGetStatusResponse.status:type_name -> replicationdata.StopReplicationStatus + 129, // 33: tabletmanagerdata.BackupResponse.event:type_name -> logutil.Event + 130, // 34: tabletmanagerdata.RestoreFromBackupRequest.backup_time:type_name -> vttime.Time + 130, // 35: tabletmanagerdata.RestoreFromBackupRequest.restore_to_timestamp:type_name -> vttime.Time + 129, // 36: tabletmanagerdata.RestoreFromBackupResponse.event:type_name -> logutil.Event + 131, // 37: tabletmanagerdata.CreateVReplicationWorkflowRequest.binlog_source:type_name -> binlogdata.BinlogSource + 120, // 38: tabletmanagerdata.CreateVReplicationWorkflowRequest.tablet_types:type_name -> topodata.TabletType + 0, // 39: tabletmanagerdata.CreateVReplicationWorkflowRequest.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference + 132, // 40: tabletmanagerdata.CreateVReplicationWorkflowRequest.workflow_type:type_name -> binlogdata.VReplicationWorkflowType + 133, // 41: tabletmanagerdata.CreateVReplicationWorkflowRequest.workflow_sub_type:type_name -> binlogdata.VReplicationWorkflowSubType + 122, // 42: tabletmanagerdata.CreateVReplicationWorkflowResponse.result:type_name -> query.QueryResult + 122, // 43: tabletmanagerdata.DeleteVReplicationWorkflowResponse.result:type_name -> query.QueryResult + 120, // 44: tabletmanagerdata.ReadVReplicationWorkflowResponse.tablet_types:type_name -> topodata.TabletType + 0, // 45: tabletmanagerdata.ReadVReplicationWorkflowResponse.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference + 132, // 46: tabletmanagerdata.ReadVReplicationWorkflowResponse.workflow_type:type_name -> binlogdata.VReplicationWorkflowType + 133, // 47: tabletmanagerdata.ReadVReplicationWorkflowResponse.workflow_sub_type:type_name -> binlogdata.VReplicationWorkflowSubType + 118, // 48: tabletmanagerdata.ReadVReplicationWorkflowResponse.streams:type_name -> tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream + 108, // 49: tabletmanagerdata.VDiffRequest.options:type_name -> tabletmanagerdata.VDiffOptions + 122, // 50: tabletmanagerdata.VDiffResponse.output:type_name -> query.QueryResult + 105, // 51: tabletmanagerdata.VDiffOptions.picker_options:type_name -> tabletmanagerdata.VDiffPickerOptions + 107, // 52: tabletmanagerdata.VDiffOptions.core_options:type_name -> tabletmanagerdata.VDiffCoreOptions + 106, // 53: tabletmanagerdata.VDiffOptions.report_options:type_name -> tabletmanagerdata.VDiffReportOptions + 120, // 54: tabletmanagerdata.UpdateVReplicationWorkflowRequest.tablet_types:type_name -> topodata.TabletType + 0, // 55: tabletmanagerdata.UpdateVReplicationWorkflowRequest.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference + 134, // 56: tabletmanagerdata.UpdateVReplicationWorkflowRequest.on_ddl:type_name -> binlogdata.OnDDLAction + 135, // 57: tabletmanagerdata.UpdateVReplicationWorkflowRequest.state:type_name -> binlogdata.VReplicationWorkflowState + 122, // 58: tabletmanagerdata.UpdateVReplicationWorkflowResponse.result:type_name -> query.QueryResult + 131, // 59: tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.bls:type_name -> binlogdata.BinlogSource + 130, // 60: tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.time_updated:type_name -> vttime.Time + 130, // 61: tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.transaction_timestamp:type_name -> vttime.Time + 135, // 62: tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.state:type_name -> binlogdata.VReplicationWorkflowState + 130, // 63: tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.time_heartbeat:type_name -> vttime.Time + 130, // 64: tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.time_throttled:type_name -> vttime.Time + 65, // [65:65] is the sub-list for method output_type + 65, // [65:65] is the sub-list for method input_type + 65, // [65:65] is the sub-list for extension type_name + 65, // [65:65] is the sub-list for extension extendee + 0, // [0:65] is the sub-list for field type_name } func init() { file_tabletmanagerdata_proto_init() } @@ -7172,7 +8291,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[96].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VDiffRequest); i { + switch v := v.(*CreateVReplicationWorkflowRequest); i { case 0: return &v.state case 1: @@ -7184,7 +8303,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[97].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VDiffResponse); i { + switch v := v.(*CreateVReplicationWorkflowResponse); i { case 0: return &v.state case 1: @@ -7196,7 +8315,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[98].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VDiffPickerOptions); i { + switch v := v.(*DeleteVReplicationWorkflowRequest); i { case 0: return &v.state case 1: @@ -7208,7 +8327,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[99].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VDiffReportOptions); i { + switch v := v.(*DeleteVReplicationWorkflowResponse); i { case 0: return &v.state case 1: @@ -7220,7 +8339,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[100].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VDiffCoreOptions); i { + switch v := v.(*ReadVReplicationWorkflowRequest); i { case 0: return &v.state case 1: @@ -7232,7 +8351,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[101].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VDiffOptions); i { + switch v := v.(*ReadVReplicationWorkflowResponse); i { case 0: return &v.state case 1: @@ -7244,7 +8363,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[102].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateVRWorkflowRequest); i { + switch v := v.(*VDiffRequest); i { case 0: return &v.state case 1: @@ -7256,7 +8375,139 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[103].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateVRWorkflowResponse); i { + switch v := v.(*VDiffResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tabletmanagerdata_proto_msgTypes[104].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VDiffPickerOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tabletmanagerdata_proto_msgTypes[105].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VDiffReportOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tabletmanagerdata_proto_msgTypes[106].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VDiffCoreOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tabletmanagerdata_proto_msgTypes[107].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VDiffOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tabletmanagerdata_proto_msgTypes[108].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateVReplicationWorkflowRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tabletmanagerdata_proto_msgTypes[109].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateVReplicationWorkflowResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tabletmanagerdata_proto_msgTypes[110].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResetSequencesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tabletmanagerdata_proto_msgTypes[111].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResetSequencesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tabletmanagerdata_proto_msgTypes[112].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CheckThrottlerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tabletmanagerdata_proto_msgTypes[113].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CheckThrottlerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tabletmanagerdata_proto_msgTypes[117].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadVReplicationWorkflowResponse_Stream); i { case 0: return &v.state case 1: @@ -7273,13 +8524,14 @@ func file_tabletmanagerdata_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_tabletmanagerdata_proto_rawDesc, - NumEnums: 0, - NumMessages: 107, + NumEnums: 1, + NumMessages: 118, NumExtensions: 0, NumServices: 0, }, GoTypes: file_tabletmanagerdata_proto_goTypes, DependencyIndexes: file_tabletmanagerdata_proto_depIdxs, + EnumInfos: file_tabletmanagerdata_proto_enumTypes, MessageInfos: file_tabletmanagerdata_proto_msgTypes, }.Build() File_tabletmanagerdata_proto = out.File diff --git a/go/vt/proto/tabletmanagerdata/tabletmanagerdata_vtproto.pb.go b/go/vt/proto/tabletmanagerdata/tabletmanagerdata_vtproto.pb.go index 091bf9ff0d3..502a4c17ff9 100644 --- a/go/vt/proto/tabletmanagerdata/tabletmanagerdata_vtproto.pb.go +++ b/go/vt/proto/tabletmanagerdata/tabletmanagerdata_vtproto.pb.go @@ -1,13 +1,16 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 +// protoc-gen-go-vtproto version: v0.5.0 // source: tabletmanagerdata.proto package tabletmanagerdata import ( + binary "encoding/binary" fmt "fmt" + proto "google.golang.org/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl" io "io" + math "math" bits "math/bits" binlogdata "vitess.io/vitess/go/vt/proto/binlogdata" logutil "vitess.io/vitess/go/vt/proto/logutil" @@ -25,2210 +28,2242 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -func (m *TableDefinition) MarshalVT() (dAtA []byte, err error) { +func (m *TableDefinition) CloneVT() *TableDefinition { if m == nil { - return nil, nil + return (*TableDefinition)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &TableDefinition{ + Name: m.Name, + Schema: m.Schema, + Type: m.Type, + DataLength: m.DataLength, + RowCount: m.RowCount, } - return dAtA[:n], nil + if rhs := m.Columns; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Columns = tmpContainer + } + if rhs := m.PrimaryKeyColumns; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.PrimaryKeyColumns = tmpContainer + } + if rhs := m.Fields; rhs != nil { + tmpContainer := make([]*query.Field, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Fields = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *TableDefinition) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *TableDefinition) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *TableDefinition) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SchemaDefinition) CloneVT() *SchemaDefinition { if m == nil { - return 0, nil + return (*SchemaDefinition)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &SchemaDefinition{ + DatabaseSchema: m.DatabaseSchema, } - if len(m.Fields) > 0 { - for iNdEx := len(m.Fields) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Fields[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x42 + if rhs := m.TableDefinitions; rhs != nil { + tmpContainer := make([]*TableDefinition, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } + r.TableDefinitions = tmpContainer } - if m.RowCount != 0 { - i = encodeVarint(dAtA, i, uint64(m.RowCount)) - i-- - dAtA[i] = 0x38 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.DataLength != 0 { - i = encodeVarint(dAtA, i, uint64(m.DataLength)) - i-- - dAtA[i] = 0x30 + return r +} + +func (m *SchemaDefinition) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SchemaChangeResult) CloneVT() *SchemaChangeResult { + if m == nil { + return (*SchemaChangeResult)(nil) } - if len(m.Type) > 0 { - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarint(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0x2a + r := &SchemaChangeResult{ + BeforeSchema: m.BeforeSchema.CloneVT(), + AfterSchema: m.AfterSchema.CloneVT(), } - if len(m.PrimaryKeyColumns) > 0 { - for iNdEx := len(m.PrimaryKeyColumns) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.PrimaryKeyColumns[iNdEx]) - copy(dAtA[i:], m.PrimaryKeyColumns[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.PrimaryKeyColumns[iNdEx]))) - i-- - dAtA[i] = 0x22 - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.Columns) > 0 { - for iNdEx := len(m.Columns) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Columns[iNdEx]) - copy(dAtA[i:], m.Columns[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Columns[iNdEx]))) - i-- - dAtA[i] = 0x1a - } + return r +} + +func (m *SchemaChangeResult) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *UserPermission) CloneVT() *UserPermission { + if m == nil { + return (*UserPermission)(nil) } - if len(m.Schema) > 0 { - i -= len(m.Schema) - copy(dAtA[i:], m.Schema) - i = encodeVarint(dAtA, i, uint64(len(m.Schema))) - i-- - dAtA[i] = 0x12 + r := &UserPermission{ + Host: m.Host, + User: m.User, + PasswordChecksum: m.PasswordChecksum, } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa + if rhs := m.Privileges; rhs != nil { + tmpContainer := make(map[string]string, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v + } + r.Privileges = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *SchemaDefinition) MarshalVT() (dAtA []byte, err error) { +func (m *UserPermission) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *DbPermission) CloneVT() *DbPermission { if m == nil { - return nil, nil + return (*DbPermission)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &DbPermission{ + Host: m.Host, + Db: m.Db, + User: m.User, } - return dAtA[:n], nil + if rhs := m.Privileges; rhs != nil { + tmpContainer := make(map[string]string, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v + } + r.Privileges = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *SchemaDefinition) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *DbPermission) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *SchemaDefinition) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *Permissions) CloneVT() *Permissions { if m == nil { - return 0, nil + return (*Permissions)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &Permissions{} + if rhs := m.UserPermissions; rhs != nil { + tmpContainer := make([]*UserPermission, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.UserPermissions = tmpContainer } - if len(m.TableDefinitions) > 0 { - for iNdEx := len(m.TableDefinitions) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.TableDefinitions[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 + if rhs := m.DbPermissions; rhs != nil { + tmpContainer := make([]*DbPermission, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } + r.DbPermissions = tmpContainer } - if len(m.DatabaseSchema) > 0 { - i -= len(m.DatabaseSchema) - copy(dAtA[i:], m.DatabaseSchema) - i = encodeVarint(dAtA, i, uint64(len(m.DatabaseSchema))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *SchemaChangeResult) MarshalVT() (dAtA []byte, err error) { +func (m *Permissions) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PingRequest) CloneVT() *PingRequest { if m == nil { - return nil, nil + return (*PingRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &PingRequest{ + Payload: m.Payload, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *SchemaChangeResult) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *PingRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *SchemaChangeResult) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *PingResponse) CloneVT() *PingResponse { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*PingResponse)(nil) } - if m.AfterSchema != nil { - size, err := m.AfterSchema.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 + r := &PingResponse{ + Payload: m.Payload, } - if m.BeforeSchema != nil { - size, err := m.BeforeSchema.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *UserPermission) MarshalVT() (dAtA []byte, err error) { +func (m *PingResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SleepRequest) CloneVT() *SleepRequest { if m == nil { - return nil, nil + return (*SleepRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &SleepRequest{ + Duration: m.Duration, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *UserPermission) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *SleepRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *UserPermission) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SleepResponse) CloneVT() *SleepResponse { if m == nil { - return 0, nil + return (*SleepResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &SleepResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.Privileges) > 0 { - for k := range m.Privileges { - v := m.Privileges[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarint(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x22 - } + return r +} + +func (m *SleepResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteHookRequest) CloneVT() *ExecuteHookRequest { + if m == nil { + return (*ExecuteHookRequest)(nil) } - if m.PasswordChecksum != 0 { - i = encodeVarint(dAtA, i, uint64(m.PasswordChecksum)) - i-- - dAtA[i] = 0x18 + r := &ExecuteHookRequest{ + Name: m.Name, } - if len(m.User) > 0 { - i -= len(m.User) - copy(dAtA[i:], m.User) - i = encodeVarint(dAtA, i, uint64(len(m.User))) - i-- - dAtA[i] = 0x12 + if rhs := m.Parameters; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Parameters = tmpContainer } - if len(m.Host) > 0 { - i -= len(m.Host) - copy(dAtA[i:], m.Host) - i = encodeVarint(dAtA, i, uint64(len(m.Host))) - i-- - dAtA[i] = 0xa + if rhs := m.ExtraEnv; rhs != nil { + tmpContainer := make(map[string]string, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v + } + r.ExtraEnv = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *DbPermission) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteHookRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteHookResponse) CloneVT() *ExecuteHookResponse { if m == nil { - return nil, nil + return (*ExecuteHookResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ExecuteHookResponse{ + ExitStatus: m.ExitStatus, + Stdout: m.Stdout, + Stderr: m.Stderr, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *DbPermission) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ExecuteHookResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *DbPermission) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetSchemaRequest) CloneVT() *GetSchemaRequest { if m == nil { - return 0, nil + return (*GetSchemaRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &GetSchemaRequest{ + IncludeViews: m.IncludeViews, + TableSchemaOnly: m.TableSchemaOnly, } - if len(m.Privileges) > 0 { - for k := range m.Privileges { - v := m.Privileges[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarint(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x22 - } + if rhs := m.Tables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Tables = tmpContainer } - if len(m.User) > 0 { - i -= len(m.User) - copy(dAtA[i:], m.User) - i = encodeVarint(dAtA, i, uint64(len(m.User))) - i-- - dAtA[i] = 0x1a + if rhs := m.ExcludeTables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ExcludeTables = tmpContainer } - if len(m.Db) > 0 { - i -= len(m.Db) - copy(dAtA[i:], m.Db) - i = encodeVarint(dAtA, i, uint64(len(m.Db))) - i-- - dAtA[i] = 0x12 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.Host) > 0 { - i -= len(m.Host) - copy(dAtA[i:], m.Host) - i = encodeVarint(dAtA, i, uint64(len(m.Host))) - i-- - dAtA[i] = 0xa + return r +} + +func (m *GetSchemaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSchemaResponse) CloneVT() *GetSchemaResponse { + if m == nil { + return (*GetSchemaResponse)(nil) } - return len(dAtA) - i, nil + r := &GetSchemaResponse{ + SchemaDefinition: m.SchemaDefinition.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *Permissions) MarshalVT() (dAtA []byte, err error) { +func (m *GetSchemaResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetPermissionsRequest) CloneVT() *GetPermissionsRequest { if m == nil { - return nil, nil + return (*GetPermissionsRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetPermissionsRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *Permissions) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetPermissionsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *Permissions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetPermissionsResponse) CloneVT() *GetPermissionsResponse { if m == nil { - return 0, nil + return (*GetPermissionsResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &GetPermissionsResponse{ + Permissions: m.Permissions.CloneVT(), } - if len(m.DbPermissions) > 0 { - for iNdEx := len(m.DbPermissions) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.DbPermissions[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.UserPermissions) > 0 { - for iNdEx := len(m.UserPermissions) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.UserPermissions[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } + return r +} + +func (m *GetPermissionsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetReadOnlyRequest) CloneVT() *SetReadOnlyRequest { + if m == nil { + return (*SetReadOnlyRequest)(nil) } - return len(dAtA) - i, nil + r := &SetReadOnlyRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *PingRequest) MarshalVT() (dAtA []byte, err error) { +func (m *SetReadOnlyRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetReadOnlyResponse) CloneVT() *SetReadOnlyResponse { if m == nil { - return nil, nil + return (*SetReadOnlyResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &SetReadOnlyResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *PingRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *SetReadOnlyResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *PingRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SetReadWriteRequest) CloneVT() *SetReadWriteRequest { if m == nil { - return 0, nil + return (*SetReadWriteRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &SetReadWriteRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.Payload) > 0 { - i -= len(m.Payload) - copy(dAtA[i:], m.Payload) - i = encodeVarint(dAtA, i, uint64(len(m.Payload))) - i-- - dAtA[i] = 0xa + return r +} + +func (m *SetReadWriteRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetReadWriteResponse) CloneVT() *SetReadWriteResponse { + if m == nil { + return (*SetReadWriteResponse)(nil) } - return len(dAtA) - i, nil + r := &SetReadWriteResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *PingResponse) MarshalVT() (dAtA []byte, err error) { +func (m *SetReadWriteResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ChangeTypeRequest) CloneVT() *ChangeTypeRequest { if m == nil { - return nil, nil + return (*ChangeTypeRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ChangeTypeRequest{ + TabletType: m.TabletType, + SemiSync: m.SemiSync, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *PingResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ChangeTypeRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *PingResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ChangeTypeResponse) CloneVT() *ChangeTypeResponse { if m == nil { - return 0, nil + return (*ChangeTypeResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ChangeTypeResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.Payload) > 0 { - i -= len(m.Payload) - copy(dAtA[i:], m.Payload) - i = encodeVarint(dAtA, i, uint64(len(m.Payload))) - i-- - dAtA[i] = 0xa + return r +} + +func (m *ChangeTypeResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RefreshStateRequest) CloneVT() *RefreshStateRequest { + if m == nil { + return (*RefreshStateRequest)(nil) } - return len(dAtA) - i, nil + r := &RefreshStateRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *SleepRequest) MarshalVT() (dAtA []byte, err error) { +func (m *RefreshStateRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RefreshStateResponse) CloneVT() *RefreshStateResponse { if m == nil { - return nil, nil + return (*RefreshStateResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &RefreshStateResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *SleepRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *RefreshStateResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *SleepRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *RunHealthCheckRequest) CloneVT() *RunHealthCheckRequest { if m == nil { - return 0, nil + return (*RunHealthCheckRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &RunHealthCheckRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.Duration != 0 { - i = encodeVarint(dAtA, i, uint64(m.Duration)) - i-- - dAtA[i] = 0x8 + return r +} + +func (m *RunHealthCheckRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RunHealthCheckResponse) CloneVT() *RunHealthCheckResponse { + if m == nil { + return (*RunHealthCheckResponse)(nil) } - return len(dAtA) - i, nil + r := &RunHealthCheckResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *SleepResponse) MarshalVT() (dAtA []byte, err error) { +func (m *RunHealthCheckResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReloadSchemaRequest) CloneVT() *ReloadSchemaRequest { if m == nil { - return nil, nil + return (*ReloadSchemaRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ReloadSchemaRequest{ + WaitPosition: m.WaitPosition, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *SleepResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ReloadSchemaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *SleepResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ReloadSchemaResponse) CloneVT() *ReloadSchemaResponse { if m == nil { - return 0, nil + return (*ReloadSchemaResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ReloadSchemaResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ExecuteHookRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ReloadSchemaResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PreflightSchemaRequest) CloneVT() *PreflightSchemaRequest { if m == nil { - return nil, nil + return (*PreflightSchemaRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &PreflightSchemaRequest{} + if rhs := m.Changes; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Changes = tmpContainer } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteHookRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *PreflightSchemaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ExecuteHookRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *PreflightSchemaResponse) CloneVT() *PreflightSchemaResponse { if m == nil { - return 0, nil + return (*PreflightSchemaResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &PreflightSchemaResponse{} + if rhs := m.ChangeResults; rhs != nil { + tmpContainer := make([]*SchemaChangeResult, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.ChangeResults = tmpContainer } - if len(m.ExtraEnv) > 0 { - for k := range m.ExtraEnv { - v := m.ExtraEnv[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarint(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1a - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.Parameters) > 0 { - for iNdEx := len(m.Parameters) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Parameters[iNdEx]) - copy(dAtA[i:], m.Parameters[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Parameters[iNdEx]))) - i-- - dAtA[i] = 0x12 - } + return r +} + +func (m *PreflightSchemaResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ApplySchemaRequest) CloneVT() *ApplySchemaRequest { + if m == nil { + return (*ApplySchemaRequest)(nil) } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa + r := &ApplySchemaRequest{ + Sql: m.Sql, + Force: m.Force, + AllowReplication: m.AllowReplication, + BeforeSchema: m.BeforeSchema.CloneVT(), + AfterSchema: m.AfterSchema.CloneVT(), + SqlMode: m.SqlMode, + BatchSize: m.BatchSize, } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteHookResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ApplySchemaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ApplySchemaResponse) CloneVT() *ApplySchemaResponse { if m == nil { - return nil, nil + return (*ApplySchemaResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ApplySchemaResponse{ + BeforeSchema: m.BeforeSchema.CloneVT(), + AfterSchema: m.AfterSchema.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteHookResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ApplySchemaResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ExecuteHookResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *LockTablesRequest) CloneVT() *LockTablesRequest { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*LockTablesRequest)(nil) } - if len(m.Stderr) > 0 { - i -= len(m.Stderr) - copy(dAtA[i:], m.Stderr) - i = encodeVarint(dAtA, i, uint64(len(m.Stderr))) - i-- - dAtA[i] = 0x1a + r := &LockTablesRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.Stdout) > 0 { - i -= len(m.Stdout) - copy(dAtA[i:], m.Stdout) - i = encodeVarint(dAtA, i, uint64(len(m.Stdout))) - i-- - dAtA[i] = 0x12 + return r +} + +func (m *LockTablesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *LockTablesResponse) CloneVT() *LockTablesResponse { + if m == nil { + return (*LockTablesResponse)(nil) } - if m.ExitStatus != 0 { - i = encodeVarint(dAtA, i, uint64(m.ExitStatus)) - i-- - dAtA[i] = 0x8 + r := &LockTablesResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetSchemaRequest) MarshalVT() (dAtA []byte, err error) { +func (m *LockTablesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *UnlockTablesRequest) CloneVT() *UnlockTablesRequest { if m == nil { - return nil, nil + return (*UnlockTablesRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &UnlockTablesRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *GetSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *UnlockTablesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *UnlockTablesResponse) CloneVT() *UnlockTablesResponse { if m == nil { - return 0, nil + return (*UnlockTablesResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &UnlockTablesResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.TableSchemaOnly { - i-- - if m.TableSchemaOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 + return r +} + +func (m *UnlockTablesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteQueryRequest) CloneVT() *ExecuteQueryRequest { + if m == nil { + return (*ExecuteQueryRequest)(nil) } - if len(m.ExcludeTables) > 0 { - for iNdEx := len(m.ExcludeTables) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ExcludeTables[iNdEx]) - copy(dAtA[i:], m.ExcludeTables[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.ExcludeTables[iNdEx]))) - i-- - dAtA[i] = 0x1a - } + r := &ExecuteQueryRequest{ + DbName: m.DbName, + MaxRows: m.MaxRows, + CallerId: m.CallerId.CloneVT(), } - if m.IncludeViews { - i-- - if m.IncludeViews { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 + if rhs := m.Query; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.Query = tmpBytes } - if len(m.Tables) > 0 { - for iNdEx := len(m.Tables) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Tables[iNdEx]) - copy(dAtA[i:], m.Tables[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Tables[iNdEx]))) - i-- - dAtA[i] = 0xa - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetSchemaResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteQueryRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteQueryResponse) CloneVT() *ExecuteQueryResponse { if m == nil { - return nil, nil + return (*ExecuteQueryResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ExecuteQueryResponse{ + Result: m.Result.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ExecuteQueryResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsDbaRequest) CloneVT() *ExecuteFetchAsDbaRequest { if m == nil { - return 0, nil + return (*ExecuteFetchAsDbaRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ExecuteFetchAsDbaRequest{ + DbName: m.DbName, + MaxRows: m.MaxRows, + DisableBinlogs: m.DisableBinlogs, + ReloadSchema: m.ReloadSchema, } - if m.SchemaDefinition != nil { - size, err := m.SchemaDefinition.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if rhs := m.Query; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.Query = tmpBytes } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetPermissionsRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteFetchAsDbaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteFetchAsDbaResponse) CloneVT() *ExecuteFetchAsDbaResponse { if m == nil { - return nil, nil + return (*ExecuteFetchAsDbaResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ExecuteFetchAsDbaResponse{ + Result: m.Result.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetPermissionsRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ExecuteFetchAsDbaResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetPermissionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAllPrivsRequest) CloneVT() *ExecuteFetchAsAllPrivsRequest { if m == nil { - return 0, nil + return (*ExecuteFetchAsAllPrivsRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ExecuteFetchAsAllPrivsRequest{ + DbName: m.DbName, + MaxRows: m.MaxRows, + ReloadSchema: m.ReloadSchema, } - return len(dAtA) - i, nil + if rhs := m.Query; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.Query = tmpBytes + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetPermissionsResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteFetchAsAllPrivsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteFetchAsAllPrivsResponse) CloneVT() *ExecuteFetchAsAllPrivsResponse { if m == nil { - return nil, nil + return (*ExecuteFetchAsAllPrivsResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ExecuteFetchAsAllPrivsResponse{ + Result: m.Result.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetPermissionsResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ExecuteFetchAsAllPrivsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetPermissionsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAppRequest) CloneVT() *ExecuteFetchAsAppRequest { if m == nil { - return 0, nil + return (*ExecuteFetchAsAppRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ExecuteFetchAsAppRequest{ + MaxRows: m.MaxRows, } - if m.Permissions != nil { - size, err := m.Permissions.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if rhs := m.Query; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.Query = tmpBytes } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *SetReadOnlyRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteFetchAsAppRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteFetchAsAppResponse) CloneVT() *ExecuteFetchAsAppResponse { if m == nil { - return nil, nil + return (*ExecuteFetchAsAppResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ExecuteFetchAsAppResponse{ + Result: m.Result.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *SetReadOnlyRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ExecuteFetchAsAppResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *SetReadOnlyRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ReplicationStatusRequest) CloneVT() *ReplicationStatusRequest { if m == nil { - return 0, nil + return (*ReplicationStatusRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ReplicationStatusRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *SetReadOnlyResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ReplicationStatusRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReplicationStatusResponse) CloneVT() *ReplicationStatusResponse { if m == nil { - return nil, nil + return (*ReplicationStatusResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ReplicationStatusResponse{ + Status: m.Status.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *SetReadOnlyResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ReplicationStatusResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *SetReadOnlyResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *PrimaryStatusRequest) CloneVT() *PrimaryStatusRequest { if m == nil { - return 0, nil + return (*PrimaryStatusRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &PrimaryStatusRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *SetReadWriteRequest) MarshalVT() (dAtA []byte, err error) { +func (m *PrimaryStatusRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PrimaryStatusResponse) CloneVT() *PrimaryStatusResponse { if m == nil { - return nil, nil + return (*PrimaryStatusResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &PrimaryStatusResponse{ + Status: m.Status.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *SetReadWriteRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *PrimaryStatusResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *SetReadWriteRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *PrimaryPositionRequest) CloneVT() *PrimaryPositionRequest { if m == nil { - return 0, nil + return (*PrimaryPositionRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &PrimaryPositionRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *SetReadWriteResponse) MarshalVT() (dAtA []byte, err error) { +func (m *PrimaryPositionRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PrimaryPositionResponse) CloneVT() *PrimaryPositionResponse { if m == nil { - return nil, nil + return (*PrimaryPositionResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &PrimaryPositionResponse{ + Position: m.Position, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *SetReadWriteResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *PrimaryPositionResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *SetReadWriteResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *WaitForPositionRequest) CloneVT() *WaitForPositionRequest { if m == nil { - return 0, nil + return (*WaitForPositionRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &WaitForPositionRequest{ + Position: m.Position, } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ChangeTypeRequest) MarshalVT() (dAtA []byte, err error) { +func (m *WaitForPositionRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *WaitForPositionResponse) CloneVT() *WaitForPositionResponse { if m == nil { - return nil, nil + return (*WaitForPositionResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &WaitForPositionResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *ChangeTypeRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *WaitForPositionResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ChangeTypeRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *StopReplicationRequest) CloneVT() *StopReplicationRequest { if m == nil { - return 0, nil + return (*StopReplicationRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if m.SemiSync { - i-- - if m.SemiSync { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if m.TabletType != 0 { - i = encodeVarint(dAtA, i, uint64(m.TabletType)) - i-- - dAtA[i] = 0x8 + r := &StopReplicationRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ChangeTypeResponse) MarshalVT() (dAtA []byte, err error) { +func (m *StopReplicationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StopReplicationResponse) CloneVT() *StopReplicationResponse { if m == nil { - return nil, nil + return (*StopReplicationResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &StopReplicationResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *ChangeTypeResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *StopReplicationResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ChangeTypeResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *StopReplicationMinimumRequest) CloneVT() *StopReplicationMinimumRequest { if m == nil { - return 0, nil + return (*StopReplicationMinimumRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &StopReplicationMinimumRequest{ + Position: m.Position, + WaitTimeout: m.WaitTimeout, } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *RefreshStateRequest) MarshalVT() (dAtA []byte, err error) { +func (m *StopReplicationMinimumRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StopReplicationMinimumResponse) CloneVT() *StopReplicationMinimumResponse { if m == nil { - return nil, nil + return (*StopReplicationMinimumResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &StopReplicationMinimumResponse{ + Position: m.Position, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *RefreshStateRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *StopReplicationMinimumResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *RefreshStateRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *StartReplicationRequest) CloneVT() *StartReplicationRequest { if m == nil { - return 0, nil + return (*StartReplicationRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &StartReplicationRequest{ + SemiSync: m.SemiSync, } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *RefreshStateResponse) MarshalVT() (dAtA []byte, err error) { +func (m *StartReplicationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StartReplicationResponse) CloneVT() *StartReplicationResponse { if m == nil { - return nil, nil + return (*StartReplicationResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &StartReplicationResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *RefreshStateResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *StartReplicationResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *RefreshStateResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *StartReplicationUntilAfterRequest) CloneVT() *StartReplicationUntilAfterRequest { if m == nil { - return 0, nil + return (*StartReplicationUntilAfterRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &StartReplicationUntilAfterRequest{ + Position: m.Position, + WaitTimeout: m.WaitTimeout, } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *RunHealthCheckRequest) MarshalVT() (dAtA []byte, err error) { +func (m *StartReplicationUntilAfterRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StartReplicationUntilAfterResponse) CloneVT() *StartReplicationUntilAfterResponse { if m == nil { - return nil, nil + return (*StartReplicationUntilAfterResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &StartReplicationUntilAfterResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *RunHealthCheckRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *StartReplicationUntilAfterResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *RunHealthCheckRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetReplicasRequest) CloneVT() *GetReplicasRequest { if m == nil { - return 0, nil + return (*GetReplicasRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &GetReplicasRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *RunHealthCheckResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetReplicasRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetReplicasResponse) CloneVT() *GetReplicasResponse { if m == nil { - return nil, nil + return (*GetReplicasResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetReplicasResponse{} + if rhs := m.Addrs; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Addrs = tmpContainer } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *RunHealthCheckResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetReplicasResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *RunHealthCheckResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ResetReplicationRequest) CloneVT() *ResetReplicationRequest { if m == nil { - return 0, nil + return (*ResetReplicationRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ResetReplicationRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ReloadSchemaRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ResetReplicationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ResetReplicationResponse) CloneVT() *ResetReplicationResponse { if m == nil { - return nil, nil + return (*ResetReplicationResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ResetReplicationResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *ReloadSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ResetReplicationResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ReloadSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *VReplicationExecRequest) CloneVT() *VReplicationExecRequest { if m == nil { - return 0, nil + return (*VReplicationExecRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &VReplicationExecRequest{ + Query: m.Query, } - if len(m.WaitPosition) > 0 { - i -= len(m.WaitPosition) - copy(dAtA[i:], m.WaitPosition) - i = encodeVarint(dAtA, i, uint64(len(m.WaitPosition))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ReloadSchemaResponse) MarshalVT() (dAtA []byte, err error) { +func (m *VReplicationExecRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VReplicationExecResponse) CloneVT() *VReplicationExecResponse { if m == nil { - return nil, nil + return (*VReplicationExecResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &VReplicationExecResponse{ + Result: m.Result.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ReloadSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *VReplicationExecResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ReloadSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *VReplicationWaitForPosRequest) CloneVT() *VReplicationWaitForPosRequest { if m == nil { - return 0, nil + return (*VReplicationWaitForPosRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &VReplicationWaitForPosRequest{ + Id: m.Id, + Position: m.Position, } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *PreflightSchemaRequest) MarshalVT() (dAtA []byte, err error) { +func (m *VReplicationWaitForPosRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VReplicationWaitForPosResponse) CloneVT() *VReplicationWaitForPosResponse { if m == nil { - return nil, nil + return (*VReplicationWaitForPosResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &VReplicationWaitForPosResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *PreflightSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *VReplicationWaitForPosResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *PreflightSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *InitPrimaryRequest) CloneVT() *InitPrimaryRequest { if m == nil { - return 0, nil + return (*InitPrimaryRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &InitPrimaryRequest{ + SemiSync: m.SemiSync, } - if len(m.Changes) > 0 { - for iNdEx := len(m.Changes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Changes[iNdEx]) - copy(dAtA[i:], m.Changes[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Changes[iNdEx]))) - i-- - dAtA[i] = 0xa - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *PreflightSchemaResponse) MarshalVT() (dAtA []byte, err error) { +func (m *InitPrimaryRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *InitPrimaryResponse) CloneVT() *InitPrimaryResponse { if m == nil { - return nil, nil + return (*InitPrimaryResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &InitPrimaryResponse{ + Position: m.Position, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *PreflightSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *InitPrimaryResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *PreflightSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *PopulateReparentJournalRequest) CloneVT() *PopulateReparentJournalRequest { if m == nil { - return 0, nil + return (*PopulateReparentJournalRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &PopulateReparentJournalRequest{ + TimeCreatedNs: m.TimeCreatedNs, + ActionName: m.ActionName, + PrimaryAlias: m.PrimaryAlias.CloneVT(), + ReplicationPosition: m.ReplicationPosition, } - if len(m.ChangeResults) > 0 { - for iNdEx := len(m.ChangeResults) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.ChangeResults[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ApplySchemaRequest) MarshalVT() (dAtA []byte, err error) { +func (m *PopulateReparentJournalRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PopulateReparentJournalResponse) CloneVT() *PopulateReparentJournalResponse { if m == nil { - return nil, nil + return (*PopulateReparentJournalResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &PopulateReparentJournalResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *ApplySchemaRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *PopulateReparentJournalResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ApplySchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *InitReplicaRequest) CloneVT() *InitReplicaRequest { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*InitReplicaRequest)(nil) } - if len(m.SqlMode) > 0 { - i -= len(m.SqlMode) - copy(dAtA[i:], m.SqlMode) - i = encodeVarint(dAtA, i, uint64(len(m.SqlMode))) - i-- - dAtA[i] = 0x32 + r := &InitReplicaRequest{ + Parent: m.Parent.CloneVT(), + ReplicationPosition: m.ReplicationPosition, + TimeCreatedNs: m.TimeCreatedNs, + SemiSync: m.SemiSync, } - if m.AfterSchema != nil { - size, err := m.AfterSchema.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x2a + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.BeforeSchema != nil { - size, err := m.BeforeSchema.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x22 + return r +} + +func (m *InitReplicaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *InitReplicaResponse) CloneVT() *InitReplicaResponse { + if m == nil { + return (*InitReplicaResponse)(nil) } - if m.AllowReplication { - i-- - if m.AllowReplication { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 + r := &InitReplicaResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.Force { - i-- - if m.Force { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 + return r +} + +func (m *InitReplicaResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *DemotePrimaryRequest) CloneVT() *DemotePrimaryRequest { + if m == nil { + return (*DemotePrimaryRequest)(nil) } - if len(m.Sql) > 0 { - i -= len(m.Sql) - copy(dAtA[i:], m.Sql) - i = encodeVarint(dAtA, i, uint64(len(m.Sql))) - i-- - dAtA[i] = 0xa + r := &DemotePrimaryRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ApplySchemaResponse) MarshalVT() (dAtA []byte, err error) { +func (m *DemotePrimaryRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *DemotePrimaryResponse) CloneVT() *DemotePrimaryResponse { if m == nil { - return nil, nil + return (*DemotePrimaryResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &DemotePrimaryResponse{ + PrimaryStatus: m.PrimaryStatus.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ApplySchemaResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *DemotePrimaryResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ApplySchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *UndoDemotePrimaryRequest) CloneVT() *UndoDemotePrimaryRequest { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*UndoDemotePrimaryRequest)(nil) } - if m.AfterSchema != nil { - size, err := m.AfterSchema.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 + r := &UndoDemotePrimaryRequest{ + SemiSync: m.SemiSync, } - if m.BeforeSchema != nil { - size, err := m.BeforeSchema.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *LockTablesRequest) MarshalVT() (dAtA []byte, err error) { +func (m *UndoDemotePrimaryRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *UndoDemotePrimaryResponse) CloneVT() *UndoDemotePrimaryResponse { if m == nil { - return nil, nil + return (*UndoDemotePrimaryResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &UndoDemotePrimaryResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *LockTablesRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *UndoDemotePrimaryResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *LockTablesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ReplicaWasPromotedRequest) CloneVT() *ReplicaWasPromotedRequest { if m == nil { - return 0, nil + return (*ReplicaWasPromotedRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ReplicaWasPromotedRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *LockTablesResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ReplicaWasPromotedRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReplicaWasPromotedResponse) CloneVT() *ReplicaWasPromotedResponse { if m == nil { - return nil, nil + return (*ReplicaWasPromotedResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ReplicaWasPromotedResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *LockTablesResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ReplicaWasPromotedResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *LockTablesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ResetReplicationParametersRequest) CloneVT() *ResetReplicationParametersRequest { if m == nil { - return 0, nil + return (*ResetReplicationParametersRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ResetReplicationParametersRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *UnlockTablesRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ResetReplicationParametersRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ResetReplicationParametersResponse) CloneVT() *ResetReplicationParametersResponse { if m == nil { - return nil, nil + return (*ResetReplicationParametersResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ResetReplicationParametersResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *UnlockTablesRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ResetReplicationParametersResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *UnlockTablesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *FullStatusRequest) CloneVT() *FullStatusRequest { if m == nil { - return 0, nil + return (*FullStatusRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &FullStatusRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *UnlockTablesResponse) MarshalVT() (dAtA []byte, err error) { +func (m *FullStatusRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *FullStatusResponse) CloneVT() *FullStatusResponse { if m == nil { - return nil, nil + return (*FullStatusResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &FullStatusResponse{ + Status: m.Status.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *UnlockTablesResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *FullStatusResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *UnlockTablesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SetReplicationSourceRequest) CloneVT() *SetReplicationSourceRequest { if m == nil { - return 0, nil + return (*SetReplicationSourceRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &SetReplicationSourceRequest{ + Parent: m.Parent.CloneVT(), + TimeCreatedNs: m.TimeCreatedNs, + ForceStartReplication: m.ForceStartReplication, + WaitPosition: m.WaitPosition, + SemiSync: m.SemiSync, } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteQueryRequest) MarshalVT() (dAtA []byte, err error) { +func (m *SetReplicationSourceRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetReplicationSourceResponse) CloneVT() *SetReplicationSourceResponse { if m == nil { - return nil, nil + return (*SetReplicationSourceResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &SetReplicationSourceResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *ExecuteQueryRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *SetReplicationSourceResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ExecuteQueryRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ReplicaWasRestartedRequest) CloneVT() *ReplicaWasRestartedRequest { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*ReplicaWasRestartedRequest)(nil) } - if m.CallerId != nil { - size, err := m.CallerId.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x22 + r := &ReplicaWasRestartedRequest{ + Parent: m.Parent.CloneVT(), } - if m.MaxRows != 0 { - i = encodeVarint(dAtA, i, uint64(m.MaxRows)) - i-- - dAtA[i] = 0x18 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.DbName) > 0 { - i -= len(m.DbName) - copy(dAtA[i:], m.DbName) - i = encodeVarint(dAtA, i, uint64(len(m.DbName))) - i-- - dAtA[i] = 0x12 + return r +} + +func (m *ReplicaWasRestartedRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReplicaWasRestartedResponse) CloneVT() *ReplicaWasRestartedResponse { + if m == nil { + return (*ReplicaWasRestartedResponse)(nil) } - if len(m.Query) > 0 { - i -= len(m.Query) - copy(dAtA[i:], m.Query) - i = encodeVarint(dAtA, i, uint64(len(m.Query))) - i-- - dAtA[i] = 0xa + r := &ReplicaWasRestartedResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ExecuteQueryResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ReplicaWasRestartedResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StopReplicationAndGetStatusRequest) CloneVT() *StopReplicationAndGetStatusRequest { if m == nil { - return nil, nil + return (*StopReplicationAndGetStatusRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &StopReplicationAndGetStatusRequest{ + StopReplicationMode: m.StopReplicationMode, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteQueryResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *StopReplicationAndGetStatusRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ExecuteQueryResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *StopReplicationAndGetStatusResponse) CloneVT() *StopReplicationAndGetStatusResponse { if m == nil { - return 0, nil + return (*StopReplicationAndGetStatusResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &StopReplicationAndGetStatusResponse{ + Status: m.Status.CloneVT(), } - if m.Result != nil { - size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ExecuteFetchAsDbaRequest) MarshalVT() (dAtA []byte, err error) { +func (m *StopReplicationAndGetStatusResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PromoteReplicaRequest) CloneVT() *PromoteReplicaRequest { if m == nil { - return nil, nil + return (*PromoteReplicaRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &PromoteReplicaRequest{ + SemiSync: m.SemiSync, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteFetchAsDbaRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *PromoteReplicaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ExecuteFetchAsDbaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *PromoteReplicaResponse) CloneVT() *PromoteReplicaResponse { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*PromoteReplicaResponse)(nil) } - if m.ReloadSchema { - i-- - if m.ReloadSchema { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 + r := &PromoteReplicaResponse{ + Position: m.Position, } - if m.DisableBinlogs { - i-- - if m.DisableBinlogs { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.MaxRows != 0 { - i = encodeVarint(dAtA, i, uint64(m.MaxRows)) - i-- - dAtA[i] = 0x18 + return r +} + +func (m *PromoteReplicaResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *BackupRequest) CloneVT() *BackupRequest { + if m == nil { + return (*BackupRequest)(nil) } - if len(m.DbName) > 0 { - i -= len(m.DbName) - copy(dAtA[i:], m.DbName) - i = encodeVarint(dAtA, i, uint64(len(m.DbName))) - i-- - dAtA[i] = 0x12 + r := &BackupRequest{ + Concurrency: m.Concurrency, + AllowPrimary: m.AllowPrimary, + IncrementalFromPos: m.IncrementalFromPos, + UpgradeSafe: m.UpgradeSafe, } - if len(m.Query) > 0 { - i -= len(m.Query) - copy(dAtA[i:], m.Query) - i = encodeVarint(dAtA, i, uint64(len(m.Query))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ExecuteFetchAsDbaResponse) MarshalVT() (dAtA []byte, err error) { +func (m *BackupRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *BackupResponse) CloneVT() *BackupResponse { if m == nil { - return nil, nil + return (*BackupResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &BackupResponse{ + Event: m.Event.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteFetchAsDbaResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *BackupResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ExecuteFetchAsDbaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *RestoreFromBackupRequest) CloneVT() *RestoreFromBackupRequest { if m == nil { - return 0, nil + return (*RestoreFromBackupRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &RestoreFromBackupRequest{ + BackupTime: m.BackupTime.CloneVT(), + RestoreToPos: m.RestoreToPos, + DryRun: m.DryRun, + RestoreToTimestamp: m.RestoreToTimestamp.CloneVT(), } - if m.Result != nil { - size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ExecuteFetchAsAllPrivsRequest) MarshalVT() (dAtA []byte, err error) { +func (m *RestoreFromBackupRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RestoreFromBackupResponse) CloneVT() *RestoreFromBackupResponse { if m == nil { - return nil, nil + return (*RestoreFromBackupResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &RestoreFromBackupResponse{ + Event: m.Event.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteFetchAsAllPrivsRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *RestoreFromBackupResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ExecuteFetchAsAllPrivsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CreateVReplicationWorkflowRequest) CloneVT() *CreateVReplicationWorkflowRequest { if m == nil { - return 0, nil + return (*CreateVReplicationWorkflowRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &CreateVReplicationWorkflowRequest{ + Workflow: m.Workflow, + TabletSelectionPreference: m.TabletSelectionPreference, + WorkflowType: m.WorkflowType, + WorkflowSubType: m.WorkflowSubType, + DeferSecondaryKeys: m.DeferSecondaryKeys, + AutoStart: m.AutoStart, + StopAfterCopy: m.StopAfterCopy, } - if m.ReloadSchema { - i-- - if m.ReloadSchema { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if rhs := m.BinlogSource; rhs != nil { + tmpContainer := make([]*binlogdata.BinlogSource, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i-- - dAtA[i] = 0x20 + r.BinlogSource = tmpContainer } - if m.MaxRows != 0 { - i = encodeVarint(dAtA, i, uint64(m.MaxRows)) - i-- - dAtA[i] = 0x18 + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer } - if len(m.DbName) > 0 { - i -= len(m.DbName) - copy(dAtA[i:], m.DbName) - i = encodeVarint(dAtA, i, uint64(len(m.DbName))) - i-- - dAtA[i] = 0x12 + if rhs := m.TabletTypes; rhs != nil { + tmpContainer := make([]topodata.TabletType, len(rhs)) + copy(tmpContainer, rhs) + r.TabletTypes = tmpContainer } - if len(m.Query) > 0 { - i -= len(m.Query) - copy(dAtA[i:], m.Query) - i = encodeVarint(dAtA, i, uint64(len(m.Query))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ExecuteFetchAsAllPrivsResponse) MarshalVT() (dAtA []byte, err error) { +func (m *CreateVReplicationWorkflowRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CreateVReplicationWorkflowResponse) CloneVT() *CreateVReplicationWorkflowResponse { if m == nil { - return nil, nil + return (*CreateVReplicationWorkflowResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &CreateVReplicationWorkflowResponse{ + Result: m.Result.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteFetchAsAllPrivsResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *CreateVReplicationWorkflowResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ExecuteFetchAsAllPrivsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteVReplicationWorkflowRequest) CloneVT() *DeleteVReplicationWorkflowRequest { if m == nil { - return 0, nil + return (*DeleteVReplicationWorkflowRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &DeleteVReplicationWorkflowRequest{ + Workflow: m.Workflow, } - if m.Result != nil { - size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ExecuteFetchAsAppRequest) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteVReplicationWorkflowRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *DeleteVReplicationWorkflowResponse) CloneVT() *DeleteVReplicationWorkflowResponse { if m == nil { - return nil, nil + return (*DeleteVReplicationWorkflowResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &DeleteVReplicationWorkflowResponse{ + Result: m.Result.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteFetchAsAppRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *DeleteVReplicationWorkflowResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ExecuteFetchAsAppRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ReadVReplicationWorkflowRequest) CloneVT() *ReadVReplicationWorkflowRequest { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*ReadVReplicationWorkflowRequest)(nil) } - if m.MaxRows != 0 { - i = encodeVarint(dAtA, i, uint64(m.MaxRows)) - i-- - dAtA[i] = 0x10 + r := &ReadVReplicationWorkflowRequest{ + Workflow: m.Workflow, } - if len(m.Query) > 0 { - i -= len(m.Query) - copy(dAtA[i:], m.Query) - i = encodeVarint(dAtA, i, uint64(len(m.Query))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ExecuteFetchAsAppResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ReadVReplicationWorkflowRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReadVReplicationWorkflowResponse_Stream) CloneVT() *ReadVReplicationWorkflowResponse_Stream { if m == nil { - return nil, nil + return (*ReadVReplicationWorkflowResponse_Stream)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ReadVReplicationWorkflowResponse_Stream{ + Id: m.Id, + Bls: m.Bls.CloneVT(), + Pos: m.Pos, + StopPos: m.StopPos, + MaxTps: m.MaxTps, + MaxReplicationLag: m.MaxReplicationLag, + TimeUpdated: m.TimeUpdated.CloneVT(), + TransactionTimestamp: m.TransactionTimestamp.CloneVT(), + State: m.State, + Message: m.Message, + RowsCopied: m.RowsCopied, + TimeHeartbeat: m.TimeHeartbeat.CloneVT(), + TimeThrottled: m.TimeThrottled.CloneVT(), + ComponentThrottled: m.ComponentThrottled, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteFetchAsAppResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ReadVReplicationWorkflowResponse_Stream) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ExecuteFetchAsAppResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ReadVReplicationWorkflowResponse) CloneVT() *ReadVReplicationWorkflowResponse { if m == nil { - return 0, nil + return (*ReadVReplicationWorkflowResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ReadVReplicationWorkflowResponse{ + Workflow: m.Workflow, + Cells: m.Cells, + TabletSelectionPreference: m.TabletSelectionPreference, + DbName: m.DbName, + Tags: m.Tags, + WorkflowType: m.WorkflowType, + WorkflowSubType: m.WorkflowSubType, + DeferSecondaryKeys: m.DeferSecondaryKeys, } - if m.Result != nil { - size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if rhs := m.TabletTypes; rhs != nil { + tmpContainer := make([]topodata.TabletType, len(rhs)) + copy(tmpContainer, rhs) + r.TabletTypes = tmpContainer + } + if rhs := m.Streams; rhs != nil { + tmpContainer := make([]*ReadVReplicationWorkflowResponse_Stream, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + r.Streams = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ReplicationStatusRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ReadVReplicationWorkflowResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VDiffRequest) CloneVT() *VDiffRequest { if m == nil { - return nil, nil + return (*VDiffRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &VDiffRequest{ + Keyspace: m.Keyspace, + Workflow: m.Workflow, + Action: m.Action, + ActionArg: m.ActionArg, + VdiffUuid: m.VdiffUuid, + Options: m.Options.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ReplicationStatusRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *VDiffRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ReplicationStatusRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *VDiffResponse) CloneVT() *VDiffResponse { if m == nil { - return 0, nil + return (*VDiffResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &VDiffResponse{ + Id: m.Id, + Output: m.Output.CloneVT(), + VdiffUuid: m.VdiffUuid, } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ReplicationStatusResponse) MarshalVT() (dAtA []byte, err error) { +func (m *VDiffResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VDiffPickerOptions) CloneVT() *VDiffPickerOptions { if m == nil { - return nil, nil + return (*VDiffPickerOptions)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &VDiffPickerOptions{ + TabletTypes: m.TabletTypes, + SourceCell: m.SourceCell, + TargetCell: m.TargetCell, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ReplicationStatusResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *VDiffPickerOptions) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ReplicationStatusResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *VDiffReportOptions) CloneVT() *VDiffReportOptions { if m == nil { - return 0, nil + return (*VDiffReportOptions)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &VDiffReportOptions{ + OnlyPks: m.OnlyPks, + DebugQuery: m.DebugQuery, + Format: m.Format, } - if m.Status != nil { - size, err := m.Status.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *PrimaryStatusRequest) MarshalVT() (dAtA []byte, err error) { +func (m *VDiffReportOptions) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VDiffCoreOptions) CloneVT() *VDiffCoreOptions { if m == nil { - return nil, nil + return (*VDiffCoreOptions)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &VDiffCoreOptions{ + Tables: m.Tables, + AutoRetry: m.AutoRetry, + MaxRows: m.MaxRows, + Checksum: m.Checksum, + SamplePct: m.SamplePct, + TimeoutSeconds: m.TimeoutSeconds, + MaxExtraRowsToCompare: m.MaxExtraRowsToCompare, + UpdateTableStats: m.UpdateTableStats, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *PrimaryStatusRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *VDiffCoreOptions) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *PrimaryStatusRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *VDiffOptions) CloneVT() *VDiffOptions { if m == nil { - return 0, nil + return (*VDiffOptions)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &VDiffOptions{ + PickerOptions: m.PickerOptions.CloneVT(), + CoreOptions: m.CoreOptions.CloneVT(), + ReportOptions: m.ReportOptions.CloneVT(), } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *PrimaryStatusResponse) MarshalVT() (dAtA []byte, err error) { +func (m *VDiffOptions) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *UpdateVReplicationWorkflowRequest) CloneVT() *UpdateVReplicationWorkflowRequest { if m == nil { - return nil, nil + return (*UpdateVReplicationWorkflowRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &UpdateVReplicationWorkflowRequest{ + Workflow: m.Workflow, + TabletSelectionPreference: m.TabletSelectionPreference, + OnDdl: m.OnDdl, + State: m.State, } - return dAtA[:n], nil + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if rhs := m.TabletTypes; rhs != nil { + tmpContainer := make([]topodata.TabletType, len(rhs)) + copy(tmpContainer, rhs) + r.TabletTypes = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *PrimaryStatusResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *UpdateVReplicationWorkflowRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *PrimaryStatusResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *UpdateVReplicationWorkflowResponse) CloneVT() *UpdateVReplicationWorkflowResponse { if m == nil { - return 0, nil + return (*UpdateVReplicationWorkflowResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &UpdateVReplicationWorkflowResponse{ + Result: m.Result.CloneVT(), } - if m.Status != nil { - size, err := m.Status.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *PrimaryPositionRequest) MarshalVT() (dAtA []byte, err error) { +func (m *UpdateVReplicationWorkflowResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ResetSequencesRequest) CloneVT() *ResetSequencesRequest { + if m == nil { + return (*ResetSequencesRequest)(nil) + } + r := &ResetSequencesRequest{} + if rhs := m.Tables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Tables = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ResetSequencesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ResetSequencesResponse) CloneVT() *ResetSequencesResponse { + if m == nil { + return (*ResetSequencesResponse)(nil) + } + r := &ResetSequencesResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ResetSequencesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CheckThrottlerRequest) CloneVT() *CheckThrottlerRequest { + if m == nil { + return (*CheckThrottlerRequest)(nil) + } + r := &CheckThrottlerRequest{ + AppName: m.AppName, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CheckThrottlerRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CheckThrottlerResponse) CloneVT() *CheckThrottlerResponse { + if m == nil { + return (*CheckThrottlerResponse)(nil) + } + r := &CheckThrottlerResponse{ + StatusCode: m.StatusCode, + Value: m.Value, + Threshold: m.Threshold, + Error: m.Error, + Message: m.Message, + RecentlyChecked: m.RecentlyChecked, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CheckThrottlerResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *TableDefinition) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2241,12 +2276,12 @@ func (m *PrimaryPositionRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PrimaryPositionRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *TableDefinition) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *PrimaryPositionRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *TableDefinition) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2258,10 +2293,71 @@ func (m *PrimaryPositionRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.Fields) > 0 { + for iNdEx := len(m.Fields) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Fields[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + } + if m.RowCount != 0 { + i = encodeVarint(dAtA, i, uint64(m.RowCount)) + i-- + dAtA[i] = 0x38 + } + if m.DataLength != 0 { + i = encodeVarint(dAtA, i, uint64(m.DataLength)) + i-- + dAtA[i] = 0x30 + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarint(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x2a + } + if len(m.PrimaryKeyColumns) > 0 { + for iNdEx := len(m.PrimaryKeyColumns) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PrimaryKeyColumns[iNdEx]) + copy(dAtA[i:], m.PrimaryKeyColumns[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.PrimaryKeyColumns[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Columns) > 0 { + for iNdEx := len(m.Columns) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Columns[iNdEx]) + copy(dAtA[i:], m.Columns[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Columns[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Schema) > 0 { + i -= len(m.Schema) + copy(dAtA[i:], m.Schema) + i = encodeVarint(dAtA, i, uint64(len(m.Schema))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *PrimaryPositionResponse) MarshalVT() (dAtA []byte, err error) { +func (m *SchemaDefinition) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2274,12 +2370,12 @@ func (m *PrimaryPositionResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PrimaryPositionResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *SchemaDefinition) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *PrimaryPositionResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SchemaDefinition) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2291,17 +2387,29 @@ func (m *PrimaryPositionResponse) MarshalToSizedBufferVT(dAtA []byte) (int, erro i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Position) > 0 { - i -= len(m.Position) - copy(dAtA[i:], m.Position) - i = encodeVarint(dAtA, i, uint64(len(m.Position))) + if len(m.TableDefinitions) > 0 { + for iNdEx := len(m.TableDefinitions) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.TableDefinitions[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.DatabaseSchema) > 0 { + i -= len(m.DatabaseSchema) + copy(dAtA[i:], m.DatabaseSchema) + i = encodeVarint(dAtA, i, uint64(len(m.DatabaseSchema))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *WaitForPositionRequest) MarshalVT() (dAtA []byte, err error) { +func (m *SchemaChangeResult) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2314,12 +2422,12 @@ func (m *WaitForPositionRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *WaitForPositionRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *SchemaChangeResult) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *WaitForPositionRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SchemaChangeResult) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2331,17 +2439,30 @@ func (m *WaitForPositionRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Position) > 0 { - i -= len(m.Position) - copy(dAtA[i:], m.Position) - i = encodeVarint(dAtA, i, uint64(len(m.Position))) + if m.AfterSchema != nil { + size, err := m.AfterSchema.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.BeforeSchema != nil { + size, err := m.BeforeSchema.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *WaitForPositionResponse) MarshalVT() (dAtA []byte, err error) { +func (m *UserPermission) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2354,12 +2475,12 @@ func (m *WaitForPositionResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *WaitForPositionResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *UserPermission) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *WaitForPositionResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *UserPermission) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2371,10 +2492,48 @@ func (m *WaitForPositionResponse) MarshalToSizedBufferVT(dAtA []byte) (int, erro i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.Privileges) > 0 { + for k := range m.Privileges { + v := m.Privileges[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if m.PasswordChecksum != 0 { + i = encodeVarint(dAtA, i, uint64(m.PasswordChecksum)) + i-- + dAtA[i] = 0x18 + } + if len(m.User) > 0 { + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarint(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x12 + } + if len(m.Host) > 0 { + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarint(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *StopReplicationRequest) MarshalVT() (dAtA []byte, err error) { +func (m *DbPermission) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2387,12 +2546,12 @@ func (m *StopReplicationRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *StopReplicationRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *DbPermission) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *StopReplicationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DbPermission) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2404,10 +2563,50 @@ func (m *StopReplicationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.Privileges) > 0 { + for k := range m.Privileges { + v := m.Privileges[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.User) > 0 { + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarint(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x1a + } + if len(m.Db) > 0 { + i -= len(m.Db) + copy(dAtA[i:], m.Db) + i = encodeVarint(dAtA, i, uint64(len(m.Db))) + i-- + dAtA[i] = 0x12 + } + if len(m.Host) > 0 { + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarint(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *StopReplicationResponse) MarshalVT() (dAtA []byte, err error) { +func (m *Permissions) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2420,12 +2619,12 @@ func (m *StopReplicationResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *StopReplicationResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *Permissions) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *StopReplicationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *Permissions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2437,10 +2636,34 @@ func (m *StopReplicationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, erro i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - return len(dAtA) - i, nil + if len(m.DbPermissions) > 0 { + for iNdEx := len(m.DbPermissions) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DbPermissions[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.UserPermissions) > 0 { + for iNdEx := len(m.UserPermissions) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.UserPermissions[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *StopReplicationMinimumRequest) MarshalVT() (dAtA []byte, err error) { +func (m *PingRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2453,12 +2676,12 @@ func (m *StopReplicationMinimumRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *StopReplicationMinimumRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *PingRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *StopReplicationMinimumRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *PingRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2470,22 +2693,17 @@ func (m *StopReplicationMinimumRequest) MarshalToSizedBufferVT(dAtA []byte) (int i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.WaitTimeout != 0 { - i = encodeVarint(dAtA, i, uint64(m.WaitTimeout)) - i-- - dAtA[i] = 0x10 - } - if len(m.Position) > 0 { - i -= len(m.Position) - copy(dAtA[i:], m.Position) - i = encodeVarint(dAtA, i, uint64(len(m.Position))) + if len(m.Payload) > 0 { + i -= len(m.Payload) + copy(dAtA[i:], m.Payload) + i = encodeVarint(dAtA, i, uint64(len(m.Payload))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *StopReplicationMinimumResponse) MarshalVT() (dAtA []byte, err error) { +func (m *PingResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2498,12 +2716,12 @@ func (m *StopReplicationMinimumResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *StopReplicationMinimumResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *PingResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *StopReplicationMinimumResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *PingResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2515,17 +2733,17 @@ func (m *StopReplicationMinimumResponse) MarshalToSizedBufferVT(dAtA []byte) (in i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Position) > 0 { - i -= len(m.Position) - copy(dAtA[i:], m.Position) - i = encodeVarint(dAtA, i, uint64(len(m.Position))) + if len(m.Payload) > 0 { + i -= len(m.Payload) + copy(dAtA[i:], m.Payload) + i = encodeVarint(dAtA, i, uint64(len(m.Payload))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *StartReplicationRequest) MarshalVT() (dAtA []byte, err error) { +func (m *SleepRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2538,12 +2756,12 @@ func (m *StartReplicationRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *StartReplicationRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *SleepRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *StartReplicationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SleepRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2555,20 +2773,15 @@ func (m *StartReplicationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, erro i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.SemiSync { - i-- - if m.SemiSync { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } + if m.Duration != 0 { + i = encodeVarint(dAtA, i, uint64(m.Duration)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } -func (m *StartReplicationResponse) MarshalVT() (dAtA []byte, err error) { +func (m *SleepResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2581,12 +2794,12 @@ func (m *StartReplicationResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *StartReplicationResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *SleepResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *StartReplicationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SleepResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2601,7 +2814,7 @@ func (m *StartReplicationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, err return len(dAtA) - i, nil } -func (m *StartReplicationUntilAfterRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteHookRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2614,12 +2827,12 @@ func (m *StartReplicationUntilAfterRequest) MarshalVT() (dAtA []byte, err error) return dAtA[:n], nil } -func (m *StartReplicationUntilAfterRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteHookRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *StartReplicationUntilAfterRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteHookRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2631,22 +2844,45 @@ func (m *StartReplicationUntilAfterRequest) MarshalToSizedBufferVT(dAtA []byte) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.WaitTimeout != 0 { - i = encodeVarint(dAtA, i, uint64(m.WaitTimeout)) - i-- - dAtA[i] = 0x10 + if len(m.ExtraEnv) > 0 { + for k := range m.ExtraEnv { + v := m.ExtraEnv[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } } - if len(m.Position) > 0 { - i -= len(m.Position) - copy(dAtA[i:], m.Position) - i = encodeVarint(dAtA, i, uint64(len(m.Position))) + if len(m.Parameters) > 0 { + for iNdEx := len(m.Parameters) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Parameters[iNdEx]) + copy(dAtA[i:], m.Parameters[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Parameters[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *StartReplicationUntilAfterResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteHookResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2659,12 +2895,12 @@ func (m *StartReplicationUntilAfterResponse) MarshalVT() (dAtA []byte, err error return dAtA[:n], nil } -func (m *StartReplicationUntilAfterResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteHookResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *StartReplicationUntilAfterResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteHookResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2676,10 +2912,29 @@ func (m *StartReplicationUntilAfterResponse) MarshalToSizedBufferVT(dAtA []byte) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.Stderr) > 0 { + i -= len(m.Stderr) + copy(dAtA[i:], m.Stderr) + i = encodeVarint(dAtA, i, uint64(len(m.Stderr))) + i-- + dAtA[i] = 0x1a + } + if len(m.Stdout) > 0 { + i -= len(m.Stdout) + copy(dAtA[i:], m.Stdout) + i = encodeVarint(dAtA, i, uint64(len(m.Stdout))) + i-- + dAtA[i] = 0x12 + } + if m.ExitStatus != 0 { + i = encodeVarint(dAtA, i, uint64(m.ExitStatus)) + i-- + dAtA[i] = 0x8 + } return len(dAtA) - i, nil } -func (m *GetReplicasRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetSchemaRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2692,12 +2947,12 @@ func (m *GetReplicasRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetReplicasRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetReplicasRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2709,10 +2964,48 @@ func (m *GetReplicasRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.TableSchemaOnly { + i-- + if m.TableSchemaOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.ExcludeTables) > 0 { + for iNdEx := len(m.ExcludeTables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ExcludeTables[iNdEx]) + copy(dAtA[i:], m.ExcludeTables[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.ExcludeTables[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if m.IncludeViews { + i-- + if m.IncludeViews { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Tables) > 0 { + for iNdEx := len(m.Tables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Tables[iNdEx]) + copy(dAtA[i:], m.Tables[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Tables[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } return len(dAtA) - i, nil } -func (m *GetReplicasResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetSchemaResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2725,12 +3018,12 @@ func (m *GetReplicasResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetReplicasResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetReplicasResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2742,19 +3035,20 @@ func (m *GetReplicasResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Addrs) > 0 { - for iNdEx := len(m.Addrs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Addrs[iNdEx]) - copy(dAtA[i:], m.Addrs[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Addrs[iNdEx]))) - i-- - dAtA[i] = 0xa + if m.SchemaDefinition != nil { + size, err := m.SchemaDefinition.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ResetReplicationRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetPermissionsRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2767,12 +3061,12 @@ func (m *ResetReplicationRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResetReplicationRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetPermissionsRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ResetReplicationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetPermissionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2787,7 +3081,7 @@ func (m *ResetReplicationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, erro return len(dAtA) - i, nil } -func (m *ResetReplicationResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetPermissionsResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2800,12 +3094,12 @@ func (m *ResetReplicationResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResetReplicationResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetPermissionsResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ResetReplicationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetPermissionsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2817,10 +3111,20 @@ func (m *ResetReplicationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, err i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.Permissions != nil { + size, err := m.Permissions.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *VReplicationExecRequest) MarshalVT() (dAtA []byte, err error) { +func (m *SetReadOnlyRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2833,12 +3137,12 @@ func (m *VReplicationExecRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VReplicationExecRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *SetReadOnlyRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *VReplicationExecRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SetReadOnlyRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2850,17 +3154,10 @@ func (m *VReplicationExecRequest) MarshalToSizedBufferVT(dAtA []byte) (int, erro i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Query) > 0 { - i -= len(m.Query) - copy(dAtA[i:], m.Query) - i = encodeVarint(dAtA, i, uint64(len(m.Query))) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *VReplicationExecResponse) MarshalVT() (dAtA []byte, err error) { +func (m *SetReadOnlyResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2873,12 +3170,12 @@ func (m *VReplicationExecResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VReplicationExecResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *SetReadOnlyResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *VReplicationExecResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SetReadOnlyResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2890,20 +3187,10 @@ func (m *VReplicationExecResponse) MarshalToSizedBufferVT(dAtA []byte) (int, err i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Result != nil { - size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *VReplicationWaitForPosRequest) MarshalVT() (dAtA []byte, err error) { +func (m *SetReadWriteRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2916,12 +3203,12 @@ func (m *VReplicationWaitForPosRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VReplicationWaitForPosRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *SetReadWriteRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *VReplicationWaitForPosRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SetReadWriteRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2933,22 +3220,10 @@ func (m *VReplicationWaitForPosRequest) MarshalToSizedBufferVT(dAtA []byte) (int i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Position) > 0 { - i -= len(m.Position) - copy(dAtA[i:], m.Position) - i = encodeVarint(dAtA, i, uint64(len(m.Position))) - i-- - dAtA[i] = 0x12 - } - if m.Id != 0 { - i = encodeVarint(dAtA, i, uint64(m.Id)) - i-- - dAtA[i] = 0x8 - } return len(dAtA) - i, nil } -func (m *VReplicationWaitForPosResponse) MarshalVT() (dAtA []byte, err error) { +func (m *SetReadWriteResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2961,12 +3236,12 @@ func (m *VReplicationWaitForPosResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VReplicationWaitForPosResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *SetReadWriteResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *VReplicationWaitForPosResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SetReadWriteResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2981,7 +3256,7 @@ func (m *VReplicationWaitForPosResponse) MarshalToSizedBufferVT(dAtA []byte) (in return len(dAtA) - i, nil } -func (m *InitPrimaryRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ChangeTypeRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2994,12 +3269,12 @@ func (m *InitPrimaryRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *InitPrimaryRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ChangeTypeRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *InitPrimaryRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ChangeTypeRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3019,12 +3294,17 @@ func (m *InitPrimaryRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { dAtA[i] = 0 } i-- + dAtA[i] = 0x10 + } + if m.TabletType != 0 { + i = encodeVarint(dAtA, i, uint64(m.TabletType)) + i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } -func (m *InitPrimaryResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ChangeTypeResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3037,12 +3317,12 @@ func (m *InitPrimaryResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *InitPrimaryResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ChangeTypeResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *InitPrimaryResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ChangeTypeResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3054,17 +3334,10 @@ func (m *InitPrimaryResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Position) > 0 { - i -= len(m.Position) - copy(dAtA[i:], m.Position) - i = encodeVarint(dAtA, i, uint64(len(m.Position))) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *PopulateReparentJournalRequest) MarshalVT() (dAtA []byte, err error) { +func (m *RefreshStateRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3077,12 +3350,12 @@ func (m *PopulateReparentJournalRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PopulateReparentJournalRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *RefreshStateRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *PopulateReparentJournalRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *RefreshStateRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3094,39 +3367,10 @@ func (m *PopulateReparentJournalRequest) MarshalToSizedBufferVT(dAtA []byte) (in i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.ReplicationPosition) > 0 { - i -= len(m.ReplicationPosition) - copy(dAtA[i:], m.ReplicationPosition) - i = encodeVarint(dAtA, i, uint64(len(m.ReplicationPosition))) - i-- - dAtA[i] = 0x22 - } - if m.PrimaryAlias != nil { - size, err := m.PrimaryAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x1a - } - if len(m.ActionName) > 0 { - i -= len(m.ActionName) - copy(dAtA[i:], m.ActionName) - i = encodeVarint(dAtA, i, uint64(len(m.ActionName))) - i-- - dAtA[i] = 0x12 - } - if m.TimeCreatedNs != 0 { - i = encodeVarint(dAtA, i, uint64(m.TimeCreatedNs)) - i-- - dAtA[i] = 0x8 - } return len(dAtA) - i, nil } -func (m *PopulateReparentJournalResponse) MarshalVT() (dAtA []byte, err error) { +func (m *RefreshStateResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3139,12 +3383,12 @@ func (m *PopulateReparentJournalResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PopulateReparentJournalResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *RefreshStateResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *PopulateReparentJournalResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *RefreshStateResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3159,7 +3403,7 @@ func (m *PopulateReparentJournalResponse) MarshalToSizedBufferVT(dAtA []byte) (i return len(dAtA) - i, nil } -func (m *InitReplicaRequest) MarshalVT() (dAtA []byte, err error) { +func (m *RunHealthCheckRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3172,12 +3416,12 @@ func (m *InitReplicaRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *InitReplicaRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *RunHealthCheckRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *InitReplicaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *RunHealthCheckRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3189,42 +3433,10 @@ func (m *InitReplicaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.SemiSync { - i-- - if m.SemiSync { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if m.TimeCreatedNs != 0 { - i = encodeVarint(dAtA, i, uint64(m.TimeCreatedNs)) - i-- - dAtA[i] = 0x18 - } - if len(m.ReplicationPosition) > 0 { - i -= len(m.ReplicationPosition) - copy(dAtA[i:], m.ReplicationPosition) - i = encodeVarint(dAtA, i, uint64(len(m.ReplicationPosition))) - i-- - dAtA[i] = 0x12 - } - if m.Parent != nil { - size, err := m.Parent.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *InitReplicaResponse) MarshalVT() (dAtA []byte, err error) { +func (m *RunHealthCheckResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3237,12 +3449,12 @@ func (m *InitReplicaResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *InitReplicaResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *RunHealthCheckResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *InitReplicaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *RunHealthCheckResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3257,7 +3469,7 @@ func (m *InitReplicaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *DemotePrimaryRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ReloadSchemaRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3270,12 +3482,12 @@ func (m *DemotePrimaryRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *DemotePrimaryRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ReloadSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *DemotePrimaryRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ReloadSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3287,10 +3499,17 @@ func (m *DemotePrimaryRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.WaitPosition) > 0 { + i -= len(m.WaitPosition) + copy(dAtA[i:], m.WaitPosition) + i = encodeVarint(dAtA, i, uint64(len(m.WaitPosition))) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *DemotePrimaryResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ReloadSchemaResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3303,12 +3522,12 @@ func (m *DemotePrimaryResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *DemotePrimaryResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ReloadSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *DemotePrimaryResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ReloadSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3320,20 +3539,10 @@ func (m *DemotePrimaryResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.PrimaryStatus != nil { - size, err := m.PrimaryStatus.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 - } return len(dAtA) - i, nil } -func (m *UndoDemotePrimaryRequest) MarshalVT() (dAtA []byte, err error) { +func (m *PreflightSchemaRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3346,12 +3555,12 @@ func (m *UndoDemotePrimaryRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *UndoDemotePrimaryRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *PreflightSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *UndoDemotePrimaryRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *PreflightSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3363,20 +3572,19 @@ func (m *UndoDemotePrimaryRequest) MarshalToSizedBufferVT(dAtA []byte) (int, err i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.SemiSync { - i-- - if m.SemiSync { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.Changes) > 0 { + for iNdEx := len(m.Changes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Changes[iNdEx]) + copy(dAtA[i:], m.Changes[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Changes[iNdEx]))) + i-- + dAtA[i] = 0xa } - i-- - dAtA[i] = 0x8 } return len(dAtA) - i, nil } -func (m *UndoDemotePrimaryResponse) MarshalVT() (dAtA []byte, err error) { +func (m *PreflightSchemaResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3389,12 +3597,12 @@ func (m *UndoDemotePrimaryResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *UndoDemotePrimaryResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *PreflightSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *UndoDemotePrimaryResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *PreflightSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3406,10 +3614,22 @@ func (m *UndoDemotePrimaryResponse) MarshalToSizedBufferVT(dAtA []byte) (int, er i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.ChangeResults) > 0 { + for iNdEx := len(m.ChangeResults) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ChangeResults[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } return len(dAtA) - i, nil } -func (m *ReplicaWasPromotedRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ApplySchemaRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3422,12 +3642,12 @@ func (m *ReplicaWasPromotedRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReplicaWasPromotedRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ApplySchemaRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ReplicaWasPromotedRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ApplySchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3439,43 +3659,69 @@ func (m *ReplicaWasPromotedRequest) MarshalToSizedBufferVT(dAtA []byte) (int, er i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - return len(dAtA) - i, nil -} - -func (m *ReplicaWasPromotedResponse) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil + if m.BatchSize != 0 { + i = encodeVarint(dAtA, i, uint64(m.BatchSize)) + i-- + dAtA[i] = 0x38 } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + if len(m.SqlMode) > 0 { + i -= len(m.SqlMode) + copy(dAtA[i:], m.SqlMode) + i = encodeVarint(dAtA, i, uint64(len(m.SqlMode))) + i-- + dAtA[i] = 0x32 } - return dAtA[:n], nil -} - -func (m *ReplicaWasPromotedResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *ReplicaWasPromotedResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil + if m.AfterSchema != nil { + size, err := m.AfterSchema.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + if m.BeforeSchema != nil { + size, err := m.BeforeSchema.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.AllowReplication { + i-- + if m.AllowReplication { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Force { + i-- + if m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Sql) > 0 { + i -= len(m.Sql) + copy(dAtA[i:], m.Sql) + i = encodeVarint(dAtA, i, uint64(len(m.Sql))) + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ResetReplicationParametersRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ApplySchemaResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3488,12 +3734,12 @@ func (m *ResetReplicationParametersRequest) MarshalVT() (dAtA []byte, err error) return dAtA[:n], nil } -func (m *ResetReplicationParametersRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ApplySchemaResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ResetReplicationParametersRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ApplySchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3505,10 +3751,30 @@ func (m *ResetReplicationParametersRequest) MarshalToSizedBufferVT(dAtA []byte) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.AfterSchema != nil { + size, err := m.AfterSchema.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.BeforeSchema != nil { + size, err := m.BeforeSchema.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *ResetReplicationParametersResponse) MarshalVT() (dAtA []byte, err error) { +func (m *LockTablesRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3521,12 +3787,12 @@ func (m *ResetReplicationParametersResponse) MarshalVT() (dAtA []byte, err error return dAtA[:n], nil } -func (m *ResetReplicationParametersResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *LockTablesRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ResetReplicationParametersResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *LockTablesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3541,7 +3807,7 @@ func (m *ResetReplicationParametersResponse) MarshalToSizedBufferVT(dAtA []byte) return len(dAtA) - i, nil } -func (m *FullStatusRequest) MarshalVT() (dAtA []byte, err error) { +func (m *LockTablesResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3554,12 +3820,12 @@ func (m *FullStatusRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *FullStatusRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *LockTablesResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *FullStatusRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *LockTablesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3574,7 +3840,7 @@ func (m *FullStatusRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *FullStatusResponse) MarshalVT() (dAtA []byte, err error) { +func (m *UnlockTablesRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3587,12 +3853,12 @@ func (m *FullStatusResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *FullStatusResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *UnlockTablesRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *FullStatusResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *UnlockTablesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3604,20 +3870,10 @@ func (m *FullStatusResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Status != nil { - size, err := m.Status.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *SetReplicationSourceRequest) MarshalVT() (dAtA []byte, err error) { +func (m *UnlockTablesResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3630,12 +3886,12 @@ func (m *SetReplicationSourceRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SetReplicationSourceRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *UnlockTablesResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SetReplicationSourceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *UnlockTablesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3647,52 +3903,10 @@ func (m *SetReplicationSourceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.SemiSync { - i-- - if m.SemiSync { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - if len(m.WaitPosition) > 0 { - i -= len(m.WaitPosition) - copy(dAtA[i:], m.WaitPosition) - i = encodeVarint(dAtA, i, uint64(len(m.WaitPosition))) - i-- - dAtA[i] = 0x22 - } - if m.ForceStartReplication { - i-- - if m.ForceStartReplication { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if m.TimeCreatedNs != 0 { - i = encodeVarint(dAtA, i, uint64(m.TimeCreatedNs)) - i-- - dAtA[i] = 0x10 - } - if m.Parent != nil { - size, err := m.Parent.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *SetReplicationSourceResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteQueryRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3705,12 +3919,12 @@ func (m *SetReplicationSourceResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SetReplicationSourceResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteQueryRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SetReplicationSourceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteQueryRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3722,10 +3936,39 @@ func (m *SetReplicationSourceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.CallerId != nil { + size, err := m.CallerId.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.MaxRows != 0 { + i = encodeVarint(dAtA, i, uint64(m.MaxRows)) + i-- + dAtA[i] = 0x18 + } + if len(m.DbName) > 0 { + i -= len(m.DbName) + copy(dAtA[i:], m.DbName) + i = encodeVarint(dAtA, i, uint64(len(m.DbName))) + i-- + dAtA[i] = 0x12 + } + if len(m.Query) > 0 { + i -= len(m.Query) + copy(dAtA[i:], m.Query) + i = encodeVarint(dAtA, i, uint64(len(m.Query))) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *ReplicaWasRestartedRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteQueryResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3738,12 +3981,12 @@ func (m *ReplicaWasRestartedRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReplicaWasRestartedRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteQueryResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ReplicaWasRestartedRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteQueryResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3755,8 +3998,8 @@ func (m *ReplicaWasRestartedRequest) MarshalToSizedBufferVT(dAtA []byte) (int, e i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Parent != nil { - size, err := m.Parent.MarshalToSizedBufferVT(dAtA[:i]) + if m.Result != nil { + size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } @@ -3768,7 +4011,7 @@ func (m *ReplicaWasRestartedRequest) MarshalToSizedBufferVT(dAtA []byte) (int, e return len(dAtA) - i, nil } -func (m *ReplicaWasRestartedResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteFetchAsDbaRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3781,12 +4024,12 @@ func (m *ReplicaWasRestartedResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReplicaWasRestartedResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsDbaRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ReplicaWasRestartedResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsDbaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3798,10 +4041,49 @@ func (m *ReplicaWasRestartedResponse) MarshalToSizedBufferVT(dAtA []byte) (int, i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.ReloadSchema { + i-- + if m.ReloadSchema { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.DisableBinlogs { + i-- + if m.DisableBinlogs { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.MaxRows != 0 { + i = encodeVarint(dAtA, i, uint64(m.MaxRows)) + i-- + dAtA[i] = 0x18 + } + if len(m.DbName) > 0 { + i -= len(m.DbName) + copy(dAtA[i:], m.DbName) + i = encodeVarint(dAtA, i, uint64(len(m.DbName))) + i-- + dAtA[i] = 0x12 + } + if len(m.Query) > 0 { + i -= len(m.Query) + copy(dAtA[i:], m.Query) + i = encodeVarint(dAtA, i, uint64(len(m.Query))) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *StopReplicationAndGetStatusRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteFetchAsDbaResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3814,12 +4096,12 @@ func (m *StopReplicationAndGetStatusRequest) MarshalVT() (dAtA []byte, err error return dAtA[:n], nil } -func (m *StopReplicationAndGetStatusRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsDbaResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *StopReplicationAndGetStatusRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsDbaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3831,15 +4113,20 @@ func (m *StopReplicationAndGetStatusRequest) MarshalToSizedBufferVT(dAtA []byte) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.StopReplicationMode != 0 { - i = encodeVarint(dAtA, i, uint64(m.StopReplicationMode)) + if m.Result != nil { + size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *StopReplicationAndGetStatusResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteFetchAsAllPrivsRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3852,12 +4139,12 @@ func (m *StopReplicationAndGetStatusResponse) MarshalVT() (dAtA []byte, err erro return dAtA[:n], nil } -func (m *StopReplicationAndGetStatusResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAllPrivsRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *StopReplicationAndGetStatusResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAllPrivsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3869,20 +4156,39 @@ func (m *StopReplicationAndGetStatusResponse) MarshalToSizedBufferVT(dAtA []byte i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Status != nil { - size, err := m.Status.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if m.ReloadSchema { + i-- + if m.ReloadSchema { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x20 + } + if m.MaxRows != 0 { + i = encodeVarint(dAtA, i, uint64(m.MaxRows)) + i-- + dAtA[i] = 0x18 + } + if len(m.DbName) > 0 { + i -= len(m.DbName) + copy(dAtA[i:], m.DbName) + i = encodeVarint(dAtA, i, uint64(len(m.DbName))) i-- dAtA[i] = 0x12 } + if len(m.Query) > 0 { + i -= len(m.Query) + copy(dAtA[i:], m.Query) + i = encodeVarint(dAtA, i, uint64(len(m.Query))) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *PromoteReplicaRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteFetchAsAllPrivsResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3895,12 +4201,12 @@ func (m *PromoteReplicaRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PromoteReplicaRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAllPrivsResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *PromoteReplicaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAllPrivsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3912,20 +4218,20 @@ func (m *PromoteReplicaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.SemiSync { - i-- - if m.SemiSync { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.Result != nil { + size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *PromoteReplicaResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteFetchAsAppRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3938,12 +4244,12 @@ func (m *PromoteReplicaResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PromoteReplicaResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAppRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *PromoteReplicaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAppRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3955,17 +4261,22 @@ func (m *PromoteReplicaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Position) > 0 { - i -= len(m.Position) - copy(dAtA[i:], m.Position) - i = encodeVarint(dAtA, i, uint64(len(m.Position))) + if m.MaxRows != 0 { + i = encodeVarint(dAtA, i, uint64(m.MaxRows)) + i-- + dAtA[i] = 0x10 + } + if len(m.Query) > 0 { + i -= len(m.Query) + copy(dAtA[i:], m.Query) + i = encodeVarint(dAtA, i, uint64(len(m.Query))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *BackupRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteFetchAsAppResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3978,12 +4289,12 @@ func (m *BackupRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *BackupRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAppResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *BackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAppResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3995,32 +4306,20 @@ func (m *BackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.IncrementalFromPos) > 0 { - i -= len(m.IncrementalFromPos) - copy(dAtA[i:], m.IncrementalFromPos) - i = encodeVarint(dAtA, i, uint64(len(m.IncrementalFromPos))) - i-- - dAtA[i] = 0x1a - } - if m.AllowPrimary { - i-- - if m.AllowPrimary { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.Result != nil { + size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x10 - } - if m.Concurrency != 0 { - i = encodeVarint(dAtA, i, uint64(m.Concurrency)) - i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *BackupResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ReplicationStatusRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -4033,12 +4332,12 @@ func (m *BackupResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *BackupResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ReplicationStatusRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *BackupResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ReplicationStatusRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -4050,20 +4349,10 @@ func (m *BackupResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Event != nil { - size, err := m.Event.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *RestoreFromBackupRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ReplicationStatusResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -4076,12 +4365,12 @@ func (m *RestoreFromBackupRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RestoreFromBackupRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ReplicationStatusResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RestoreFromBackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ReplicationStatusResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -4093,25 +4382,8 @@ func (m *RestoreFromBackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, err i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.DryRun { - i-- - if m.DryRun { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if len(m.RestoreToPos) > 0 { - i -= len(m.RestoreToPos) - copy(dAtA[i:], m.RestoreToPos) - i = encodeVarint(dAtA, i, uint64(len(m.RestoreToPos))) - i-- - dAtA[i] = 0x12 - } - if m.BackupTime != nil { - size, err := m.BackupTime.MarshalToSizedBufferVT(dAtA[:i]) + if m.Status != nil { + size, err := m.Status.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } @@ -4123,7 +4395,7 @@ func (m *RestoreFromBackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, err return len(dAtA) - i, nil } -func (m *RestoreFromBackupResponse) MarshalVT() (dAtA []byte, err error) { +func (m *PrimaryStatusRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -4136,12 +4408,12 @@ func (m *RestoreFromBackupResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RestoreFromBackupResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *PrimaryStatusRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RestoreFromBackupResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *PrimaryStatusRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -4153,20 +4425,10 @@ func (m *RestoreFromBackupResponse) MarshalToSizedBufferVT(dAtA []byte) (int, er i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Event != nil { - size, err := m.Event.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *VDiffRequest) MarshalVT() (dAtA []byte, err error) { +func (m *PrimaryStatusResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -4179,12 +4441,12 @@ func (m *VDiffRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VDiffRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *PrimaryStatusResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *VDiffRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *PrimaryStatusResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -4196,55 +4458,20 @@ func (m *VDiffRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Options != nil { - size, err := m.Options.MarshalToSizedBufferVT(dAtA[:i]) + if m.Status != nil { + size, err := m.Status.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x32 - } - if len(m.VdiffUuid) > 0 { - i -= len(m.VdiffUuid) - copy(dAtA[i:], m.VdiffUuid) - i = encodeVarint(dAtA, i, uint64(len(m.VdiffUuid))) - i-- - dAtA[i] = 0x2a - } - if len(m.ActionArg) > 0 { - i -= len(m.ActionArg) - copy(dAtA[i:], m.ActionArg) - i = encodeVarint(dAtA, i, uint64(len(m.ActionArg))) - i-- - dAtA[i] = 0x22 - } - if len(m.Action) > 0 { - i -= len(m.Action) - copy(dAtA[i:], m.Action) - i = encodeVarint(dAtA, i, uint64(len(m.Action))) - i-- - dAtA[i] = 0x1a - } - if len(m.Workflow) > 0 { - i -= len(m.Workflow) - copy(dAtA[i:], m.Workflow) - i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) - i-- - dAtA[i] = 0x12 - } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *VDiffResponse) MarshalVT() (dAtA []byte, err error) { +func (m *PrimaryPositionRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -4257,12 +4484,12 @@ func (m *VDiffResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VDiffResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *PrimaryPositionRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *VDiffResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *PrimaryPositionRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -4274,32 +4501,10 @@ func (m *VDiffResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.VdiffUuid) > 0 { - i -= len(m.VdiffUuid) - copy(dAtA[i:], m.VdiffUuid) - i = encodeVarint(dAtA, i, uint64(len(m.VdiffUuid))) - i-- - dAtA[i] = 0x1a - } - if m.Output != nil { - size, err := m.Output.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 - } - if m.Id != 0 { - i = encodeVarint(dAtA, i, uint64(m.Id)) - i-- - dAtA[i] = 0x8 - } return len(dAtA) - i, nil } -func (m *VDiffPickerOptions) MarshalVT() (dAtA []byte, err error) { +func (m *PrimaryPositionResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -4312,12 +4517,12 @@ func (m *VDiffPickerOptions) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VDiffPickerOptions) MarshalToVT(dAtA []byte) (int, error) { +func (m *PrimaryPositionResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *VDiffPickerOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *PrimaryPositionResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -4329,31 +4534,17 @@ func (m *VDiffPickerOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.TargetCell) > 0 { - i -= len(m.TargetCell) - copy(dAtA[i:], m.TargetCell) - i = encodeVarint(dAtA, i, uint64(len(m.TargetCell))) + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarint(dAtA, i, uint64(len(m.Position))) i-- - dAtA[i] = 0x1a - } - if len(m.SourceCell) > 0 { - i -= len(m.SourceCell) - copy(dAtA[i:], m.SourceCell) - i = encodeVarint(dAtA, i, uint64(len(m.SourceCell))) - i-- - dAtA[i] = 0x12 - } - if len(m.TabletTypes) > 0 { - i -= len(m.TabletTypes) - copy(dAtA[i:], m.TabletTypes) - i = encodeVarint(dAtA, i, uint64(len(m.TabletTypes))) - i-- - dAtA[i] = 0xa + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *VDiffReportOptions) MarshalVT() (dAtA []byte, err error) { +func (m *WaitForPositionRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -4366,12 +4557,12 @@ func (m *VDiffReportOptions) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VDiffReportOptions) MarshalToVT(dAtA []byte) (int, error) { +func (m *WaitForPositionRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *VDiffReportOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *WaitForPositionRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -4383,37 +4574,17 @@ func (m *VDiffReportOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Format) > 0 { - i -= len(m.Format) - copy(dAtA[i:], m.Format) - i = encodeVarint(dAtA, i, uint64(len(m.Format))) - i-- - dAtA[i] = 0x1a - } - if m.DebugQuery { - i-- - if m.DebugQuery { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if m.OnlyPks { - i-- - if m.OnlyPks { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarint(dAtA, i, uint64(len(m.Position))) i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *VDiffCoreOptions) MarshalVT() (dAtA []byte, err error) { +func (m *WaitForPositionResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -4426,12 +4597,12 @@ func (m *VDiffCoreOptions) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VDiffCoreOptions) MarshalToVT(dAtA []byte) (int, error) { +func (m *WaitForPositionResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *VDiffCoreOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *WaitForPositionResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -4443,67 +4614,76 @@ func (m *VDiffCoreOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.UpdateTableStats { - i-- - if m.UpdateTableStats { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x40 + return len(dAtA) - i, nil +} + +func (m *StopReplicationRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil } - if m.MaxExtraRowsToCompare != 0 { - i = encodeVarint(dAtA, i, uint64(m.MaxExtraRowsToCompare)) - i-- - dAtA[i] = 0x38 + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - if m.TimeoutSeconds != 0 { - i = encodeVarint(dAtA, i, uint64(m.TimeoutSeconds)) - i-- - dAtA[i] = 0x30 + return dAtA[:n], nil +} + +func (m *StopReplicationRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *StopReplicationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil } - if m.SamplePct != 0 { - i = encodeVarint(dAtA, i, uint64(m.SamplePct)) - i-- - dAtA[i] = 0x28 + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.Checksum { - i-- - if m.Checksum { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 + return len(dAtA) - i, nil +} + +func (m *StopReplicationResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil } - if m.MaxRows != 0 { - i = encodeVarint(dAtA, i, uint64(m.MaxRows)) - i-- - dAtA[i] = 0x18 + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - if m.AutoRetry { - i-- - if m.AutoRetry { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 + return dAtA[:n], nil +} + +func (m *StopReplicationResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *StopReplicationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil } - if len(m.Tables) > 0 { - i -= len(m.Tables) - copy(dAtA[i:], m.Tables) - i = encodeVarint(dAtA, i, uint64(len(m.Tables))) - i-- - dAtA[i] = 0xa + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } return len(dAtA) - i, nil } -func (m *VDiffOptions) MarshalVT() (dAtA []byte, err error) { +func (m *StopReplicationMinimumRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -4516,12 +4696,12 @@ func (m *VDiffOptions) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VDiffOptions) MarshalToVT(dAtA []byte) (int, error) { +func (m *StopReplicationMinimumRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *VDiffOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *StopReplicationMinimumRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -4533,40 +4713,22 @@ func (m *VDiffOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.ReportOptions != nil { - size, err := m.ReportOptions.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x1a - } - if m.CoreOptions != nil { - size, err := m.CoreOptions.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + if m.WaitTimeout != 0 { + i = encodeVarint(dAtA, i, uint64(m.WaitTimeout)) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x10 } - if m.PickerOptions != nil { - size, err := m.PickerOptions.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarint(dAtA, i, uint64(len(m.Position))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *UpdateVRWorkflowRequest) MarshalVT() (dAtA []byte, err error) { +func (m *StopReplicationMinimumResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -4579,12 +4741,12 @@ func (m *UpdateVRWorkflowRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *UpdateVRWorkflowRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *StopReplicationMinimumResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *UpdateVRWorkflowRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *StopReplicationMinimumResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -4596,40 +4758,17 @@ func (m *UpdateVRWorkflowRequest) MarshalToSizedBufferVT(dAtA []byte) (int, erro i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.OnDdl != 0 { - i = encodeVarint(dAtA, i, uint64(m.OnDdl)) - i-- - dAtA[i] = 0x20 - } - if len(m.TabletTypes) > 0 { - for iNdEx := len(m.TabletTypes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.TabletTypes[iNdEx]) - copy(dAtA[i:], m.TabletTypes[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.TabletTypes[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if len(m.Cells) > 0 { - for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Cells[iNdEx]) - copy(dAtA[i:], m.Cells[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Workflow) > 0 { - i -= len(m.Workflow) - copy(dAtA[i:], m.Workflow) - i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarint(dAtA, i, uint64(len(m.Position))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *UpdateVRWorkflowResponse) MarshalVT() (dAtA []byte, err error) { +func (m *StartReplicationRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -4642,12 +4781,12 @@ func (m *UpdateVRWorkflowResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *UpdateVRWorkflowResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *StartReplicationRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *UpdateVRWorkflowResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *StartReplicationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -4659,1655 +4798,6308 @@ func (m *UpdateVRWorkflowResponse) MarshalToSizedBufferVT(dAtA []byte) (int, err i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Result != nil { - size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if m.SemiSync { + i-- + if m.SemiSync { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x8 } return len(dAtA) - i, nil } -func encodeVarint(dAtA []byte, offset int, v uint64) int { - offset -= sov(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *TableDefinition) SizeVT() (n int) { +func (m *StartReplicationResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Schema) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if len(m.Columns) > 0 { - for _, s := range m.Columns { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } - if len(m.PrimaryKeyColumns) > 0 { - for _, s := range m.PrimaryKeyColumns { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } - l = len(m.Type) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.DataLength != 0 { - n += 1 + sov(uint64(m.DataLength)) - } - if m.RowCount != 0 { - n += 1 + sov(uint64(m.RowCount)) + return nil, nil } - if len(m.Fields) > 0 { - for _, e := range m.Fields { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *SchemaDefinition) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.DatabaseSchema) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if len(m.TableDefinitions) > 0 { - for _, e := range m.TableDefinitions { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } - } - n += len(m.unknownFields) - return n +func (m *StartReplicationResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SchemaChangeResult) SizeVT() (n int) { +func (m *StartReplicationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.BeforeSchema != nil { - l = m.BeforeSchema.SizeVT() - n += 1 + l + sov(uint64(l)) - } - if m.AfterSchema != nil { - l = m.AfterSchema.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *UserPermission) SizeVT() (n int) { +func (m *StartReplicationUntilAfterRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Host) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.User) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.PasswordChecksum != 0 { - n += 1 + sov(uint64(m.PasswordChecksum)) + return nil, nil } - if len(m.Privileges) > 0 { - for k, v := range m.Privileges { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v))) - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *DbPermission) SizeVT() (n int) { +func (m *StartReplicationUntilAfterRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *StartReplicationUntilAfterRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Host) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Db) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.User) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.WaitTimeout != 0 { + i = encodeVarint(dAtA, i, uint64(m.WaitTimeout)) + i-- + dAtA[i] = 0x10 } - if len(m.Privileges) > 0 { - for k, v := range m.Privileges { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v))) - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) - } + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarint(dAtA, i, uint64(len(m.Position))) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *Permissions) SizeVT() (n int) { +func (m *StartReplicationUntilAfterResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - if len(m.UserPermissions) > 0 { - for _, e := range m.UserPermissions { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } + return nil, nil } - if len(m.DbPermissions) > 0 { - for _, e := range m.DbPermissions { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *PingRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Payload) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n +func (m *StartReplicationUntilAfterResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *PingResponse) SizeVT() (n int) { +func (m *StartReplicationUntilAfterResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Payload) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *SleepRequest) SizeVT() (n int) { +func (m *GetReplicasRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Duration != 0 { - n += 1 + sov(uint64(m.Duration)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *SleepResponse) SizeVT() (n int) { +func (m *GetReplicasRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetReplicasRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - n += len(m.unknownFields) - return n + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil } -func (m *ExecuteHookRequest) SizeVT() (n int) { +func (m *GetReplicasResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if len(m.Parameters) > 0 { - for _, s := range m.Parameters { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } - if len(m.ExtraEnv) > 0 { - for k, v := range m.ExtraEnv { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v))) - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ExecuteHookResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ExitStatus != 0 { - n += 1 + sov(uint64(m.ExitStatus)) - } - l = len(m.Stdout) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Stderr) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n +func (m *GetReplicasResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetSchemaRequest) SizeVT() (n int) { +func (m *GetReplicasResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if len(m.Tables) > 0 { - for _, s := range m.Tables { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } - if m.IncludeViews { - n += 2 + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.ExcludeTables) > 0 { - for _, s := range m.ExcludeTables { - l = len(s) - n += 1 + l + sov(uint64(l)) + if len(m.Addrs) > 0 { + for iNdEx := len(m.Addrs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Addrs[iNdEx]) + copy(dAtA[i:], m.Addrs[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Addrs[iNdEx]))) + i-- + dAtA[i] = 0xa } } - if m.TableSchemaOnly { - n += 2 - } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *GetSchemaResponse) SizeVT() (n int) { +func (m *ResetReplicationRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.SchemaDefinition != nil { - l = m.SchemaDefinition.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *GetPermissionsRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n +func (m *ResetReplicationRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetPermissionsResponse) SizeVT() (n int) { +func (m *ResetReplicationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.Permissions != nil { - l = m.Permissions.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *SetReadOnlyRequest) SizeVT() (n int) { +func (m *ResetReplicationResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n -} - -func (m *SetReadOnlyResponse) SizeVT() (n int) { - if m == nil { - return 0 + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - var l int - _ = l - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *SetReadWriteRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n +func (m *ResetReplicationResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SetReadWriteResponse) SizeVT() (n int) { +func (m *ResetReplicationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - n += len(m.unknownFields) - return n + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil } -func (m *ChangeTypeRequest) SizeVT() (n int) { +func (m *VReplicationExecRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - if m.TabletType != 0 { - n += 1 + sov(uint64(m.TabletType)) + return nil, nil } - if m.SemiSync { - n += 2 + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ChangeTypeResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n +func (m *VReplicationExecRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RefreshStateRequest) SizeVT() (n int) { +func (m *VReplicationExecRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - n += len(m.unknownFields) - return n + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Query) > 0 { + i -= len(m.Query) + copy(dAtA[i:], m.Query) + i = encodeVarint(dAtA, i, uint64(len(m.Query))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *RefreshStateResponse) SizeVT() (n int) { +func (m *VReplicationExecResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *RunHealthCheckRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n +func (m *VReplicationExecResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RunHealthCheckResponse) SizeVT() (n int) { +func (m *VReplicationExecResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - n += len(m.unknownFields) - return n + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Result != nil { + size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *ReloadSchemaRequest) SizeVT() (n int) { +func (m *VReplicationWaitForPosRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - l = len(m.WaitPosition) - if l > 0 { - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ReloadSchemaResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n +func (m *VReplicationWaitForPosRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *PreflightSchemaRequest) SizeVT() (n int) { +func (m *VReplicationWaitForPosRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if len(m.Changes) > 0 { - for _, s := range m.Changes { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarint(dAtA, i, uint64(len(m.Position))) + i-- + dAtA[i] = 0x12 + } + if m.Id != 0 { + i = encodeVarint(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } -func (m *PreflightSchemaResponse) SizeVT() (n int) { +func (m *VReplicationWaitForPosResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if len(m.ChangeResults) > 0 { - for _, e := range m.ChangeResults { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ApplySchemaRequest) SizeVT() (n int) { +func (m *VReplicationWaitForPosResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VReplicationWaitForPosResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Sql) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.Force { - n += 2 - } - if m.AllowReplication { - n += 2 - } - if m.BeforeSchema != nil { - l = m.BeforeSchema.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.AfterSchema != nil { - l = m.AfterSchema.SizeVT() - n += 1 + l + sov(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *InitPrimaryRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil } - l = len(m.SqlMode) - if l > 0 { - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ApplySchemaResponse) SizeVT() (n int) { +func (m *InitPrimaryRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *InitPrimaryRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.BeforeSchema != nil { - l = m.BeforeSchema.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.AfterSchema != nil { - l = m.AfterSchema.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.SemiSync { + i-- + if m.SemiSync { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *LockTablesRequest) SizeVT() (n int) { +func (m *InitPrimaryResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *LockTablesResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n +func (m *InitPrimaryResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *UnlockTablesRequest) SizeVT() (n int) { +func (m *InitPrimaryResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - n += len(m.unknownFields) - return n + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarint(dAtA, i, uint64(len(m.Position))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *UnlockTablesResponse) SizeVT() (n int) { +func (m *PopulateReparentJournalRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ExecuteQueryRequest) SizeVT() (n int) { +func (m *PopulateReparentJournalRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *PopulateReparentJournalRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Query) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.DbName) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.MaxRows != 0 { - n += 1 + sov(uint64(m.MaxRows)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.CallerId != nil { - l = m.CallerId.SizeVT() - n += 1 + l + sov(uint64(l)) + if len(m.ReplicationPosition) > 0 { + i -= len(m.ReplicationPosition) + copy(dAtA[i:], m.ReplicationPosition) + i = encodeVarint(dAtA, i, uint64(len(m.ReplicationPosition))) + i-- + dAtA[i] = 0x22 } - n += len(m.unknownFields) - return n + if m.PrimaryAlias != nil { + size, err := m.PrimaryAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.ActionName) > 0 { + i -= len(m.ActionName) + copy(dAtA[i:], m.ActionName) + i = encodeVarint(dAtA, i, uint64(len(m.ActionName))) + i-- + dAtA[i] = 0x12 + } + if m.TimeCreatedNs != 0 { + i = encodeVarint(dAtA, i, uint64(m.TimeCreatedNs)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } -func (m *ExecuteQueryResponse) SizeVT() (n int) { +func (m *PopulateReparentJournalResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Result != nil { - l = m.Result.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ExecuteFetchAsDbaRequest) SizeVT() (n int) { +func (m *PopulateReparentJournalResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *PopulateReparentJournalResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Query) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.DbName) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.MaxRows != 0 { - n += 1 + sov(uint64(m.MaxRows)) - } - if m.DisableBinlogs { - n += 2 - } - if m.ReloadSchema { - n += 2 + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ExecuteFetchAsDbaResponse) SizeVT() (n int) { +func (m *InitReplicaRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Result != nil { - l = m.Result.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ExecuteFetchAsAllPrivsRequest) SizeVT() (n int) { +func (m *InitReplicaRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *InitReplicaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Query) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.DbName) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.SemiSync { + i-- + if m.SemiSync { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 } - if m.MaxRows != 0 { - n += 1 + sov(uint64(m.MaxRows)) + if m.TimeCreatedNs != 0 { + i = encodeVarint(dAtA, i, uint64(m.TimeCreatedNs)) + i-- + dAtA[i] = 0x18 } - if m.ReloadSchema { - n += 2 + if len(m.ReplicationPosition) > 0 { + i -= len(m.ReplicationPosition) + copy(dAtA[i:], m.ReplicationPosition) + i = encodeVarint(dAtA, i, uint64(len(m.ReplicationPosition))) + i-- + dAtA[i] = 0x12 } - n += len(m.unknownFields) - return n + if m.Parent != nil { + size, err := m.Parent.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *ExecuteFetchAsAllPrivsResponse) SizeVT() (n int) { +func (m *InitReplicaResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Result != nil { - l = m.Result.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ExecuteFetchAsAppRequest) SizeVT() (n int) { +func (m *InitReplicaResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *InitReplicaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Query) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.MaxRows != 0 { - n += 1 + sov(uint64(m.MaxRows)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ExecuteFetchAsAppResponse) SizeVT() (n int) { +func (m *DemotePrimaryRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Result != nil { - l = m.Result.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ReplicationStatusRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n +func (m *DemotePrimaryRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ReplicationStatusResponse) SizeVT() (n int) { +func (m *DemotePrimaryRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.Status != nil { - l = m.Status.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *PrimaryStatusRequest) SizeVT() (n int) { +func (m *DemotePrimaryResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PrimaryStatusResponse) SizeVT() (n int) { +func (m *DemotePrimaryResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *DemotePrimaryResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.Status != nil { - l = m.Status.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + if m.PrimaryStatus != nil { + size, err := m.PrimaryStatus.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil } -func (m *PrimaryPositionRequest) SizeVT() (n int) { +func (m *UndoDemotePrimaryRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PrimaryPositionResponse) SizeVT() (n int) { +func (m *UndoDemotePrimaryRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *UndoDemotePrimaryRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Position) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + if m.SemiSync { + i-- + if m.SemiSync { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } -func (m *WaitForPositionRequest) SizeVT() (n int) { +func (m *UndoDemotePrimaryResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - l = len(m.Position) - if l > 0 { - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *WaitForPositionResponse) SizeVT() (n int) { +func (m *UndoDemotePrimaryResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *UndoDemotePrimaryResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - n += len(m.unknownFields) - return n + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil } -func (m *StopReplicationRequest) SizeVT() (n int) { +func (m *ReplicaWasPromotedRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *StopReplicationResponse) SizeVT() (n int) { +func (m *ReplicaWasPromotedRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReplicaWasPromotedRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - n += len(m.unknownFields) - return n + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil } -func (m *StopReplicationMinimumRequest) SizeVT() (n int) { +func (m *ReplicaWasPromotedResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Position) - if l > 0 { - n += 1 + l + sov(uint64(l)) + return nil, nil } - if m.WaitTimeout != 0 { - n += 1 + sov(uint64(m.WaitTimeout)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *StopReplicationMinimumResponse) SizeVT() (n int) { +func (m *ReplicaWasPromotedResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReplicaWasPromotedResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Position) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *StartReplicationRequest) SizeVT() (n int) { +func (m *ResetReplicationParametersRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.SemiSync { - n += 2 + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *StartReplicationResponse) SizeVT() (n int) { +func (m *ResetReplicationParametersRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ResetReplicationParametersRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - n += len(m.unknownFields) - return n + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil } -func (m *StartReplicationUntilAfterRequest) SizeVT() (n int) { +func (m *ResetReplicationParametersResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Position) - if l > 0 { - n += 1 + l + sov(uint64(l)) + return nil, nil } - if m.WaitTimeout != 0 { - n += 1 + sov(uint64(m.WaitTimeout)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *StartReplicationUntilAfterResponse) SizeVT() (n int) { +func (m *ResetReplicationParametersResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ResetReplicationParametersResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - n += len(m.unknownFields) - return n + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil } -func (m *GetReplicasRequest) SizeVT() (n int) { +func (m *FullStatusRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *GetReplicasResponse) SizeVT() (n int) { +func (m *FullStatusRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *FullStatusRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if len(m.Addrs) > 0 { - for _, s := range m.Addrs { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ResetReplicationRequest) SizeVT() (n int) { +func (m *FullStatusResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ResetReplicationResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n +func (m *FullStatusResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *VReplicationExecRequest) SizeVT() (n int) { +func (m *FullStatusResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Query) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + if m.Status != nil { + size, err := m.Status.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *VReplicationExecResponse) SizeVT() (n int) { +func (m *SetReplicationSourceRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Result != nil { - l = m.Result.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *VReplicationWaitForPosRequest) SizeVT() (n int) { +func (m *SetReplicationSourceRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetReplicationSourceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.Id != 0 { - n += 1 + sov(uint64(m.Id)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.Position) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.SemiSync { + i-- + if m.SemiSync { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 } - n += len(m.unknownFields) - return n -} - -func (m *VReplicationWaitForPosResponse) SizeVT() (n int) { - if m == nil { - return 0 + if len(m.WaitPosition) > 0 { + i -= len(m.WaitPosition) + copy(dAtA[i:], m.WaitPosition) + i = encodeVarint(dAtA, i, uint64(len(m.WaitPosition))) + i-- + dAtA[i] = 0x22 } - var l int - _ = l - n += len(m.unknownFields) - return n + if m.ForceStartReplication { + i-- + if m.ForceStartReplication { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.TimeCreatedNs != 0 { + i = encodeVarint(dAtA, i, uint64(m.TimeCreatedNs)) + i-- + dAtA[i] = 0x10 + } + if m.Parent != nil { + size, err := m.Parent.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *InitPrimaryRequest) SizeVT() (n int) { +func (m *SetReplicationSourceResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.SemiSync { - n += 2 + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *InitPrimaryResponse) SizeVT() (n int) { +func (m *SetReplicationSourceResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetReplicationSourceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Position) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *PopulateReparentJournalRequest) SizeVT() (n int) { +func (m *ReplicaWasRestartedRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - if m.TimeCreatedNs != 0 { - n += 1 + sov(uint64(m.TimeCreatedNs)) - } - l = len(m.ActionName) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.PrimaryAlias != nil { - l = m.PrimaryAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + return nil, nil } - l = len(m.ReplicationPosition) - if l > 0 { - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *PopulateReparentJournalResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n +func (m *ReplicaWasRestartedRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *InitReplicaRequest) SizeVT() (n int) { +func (m *ReplicaWasRestartedRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.Parent != nil { - l = m.Parent.SizeVT() - n += 1 + l + sov(uint64(l)) - } - l = len(m.ReplicationPosition) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.TimeCreatedNs != 0 { - n += 1 + sov(uint64(m.TimeCreatedNs)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.SemiSync { - n += 2 + if m.Parent != nil { + size, err := m.Parent.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *InitReplicaResponse) SizeVT() (n int) { +func (m *ReplicaWasRestartedResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *DemotePrimaryRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n +func (m *ReplicaWasRestartedResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *DemotePrimaryResponse) SizeVT() (n int) { +func (m *ReplicaWasRestartedResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.PrimaryStatus != nil { - l = m.PrimaryStatus.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *UndoDemotePrimaryRequest) SizeVT() (n int) { +func (m *StopReplicationAndGetStatusRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.SemiSync { - n += 2 + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *UndoDemotePrimaryResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n +func (m *StopReplicationAndGetStatusRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ReplicaWasPromotedRequest) SizeVT() (n int) { +func (m *StopReplicationAndGetStatusRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - n += len(m.unknownFields) - return n -} - -func (m *ReplicaWasPromotedResponse) SizeVT() (n int) { - if m == nil { - return 0 + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - var l int - _ = l - n += len(m.unknownFields) - return n + if m.StopReplicationMode != 0 { + i = encodeVarint(dAtA, i, uint64(m.StopReplicationMode)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } -func (m *ResetReplicationParametersRequest) SizeVT() (n int) { +func (m *StopReplicationAndGetStatusResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ResetReplicationParametersResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n +func (m *StopReplicationAndGetStatusResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *FullStatusRequest) SizeVT() (n int) { +func (m *StopReplicationAndGetStatusResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - n += len(m.unknownFields) - return n + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Status != nil { + size, err := m.Status.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil } -func (m *FullStatusResponse) SizeVT() (n int) { +func (m *PromoteReplicaRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Status != nil { - l = m.Status.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *SetReplicationSourceRequest) SizeVT() (n int) { +func (m *PromoteReplicaRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *PromoteReplicaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.Parent != nil { - l = m.Parent.SizeVT() - n += 1 + l + sov(uint64(l)) - } - if m.TimeCreatedNs != 0 { - n += 1 + sov(uint64(m.TimeCreatedNs)) - } - if m.ForceStartReplication { - n += 2 - } - l = len(m.WaitPosition) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } if m.SemiSync { - n += 2 + i-- + if m.SemiSync { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *SetReplicationSourceResponse) SizeVT() (n int) { +func (m *PromoteReplicaResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ReplicaWasRestartedRequest) SizeVT() (n int) { +func (m *PromoteReplicaResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *PromoteReplicaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.Parent != nil { - l = m.Parent.SizeVT() - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *ReplicaWasRestartedResponse) SizeVT() (n int) { - if m == nil { - return 0 + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - var l int - _ = l - n += len(m.unknownFields) - return n + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarint(dAtA, i, uint64(len(m.Position))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *StopReplicationAndGetStatusRequest) SizeVT() (n int) { +func (m *BackupRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.StopReplicationMode != 0 { - n += 1 + sov(uint64(m.StopReplicationMode)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *StopReplicationAndGetStatusResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Status != nil { - l = m.Status.SizeVT() - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n +func (m *BackupRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *PromoteReplicaRequest) SizeVT() (n int) { +func (m *BackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.SemiSync { - n += 2 + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + if m.UpgradeSafe { + i-- + if m.UpgradeSafe { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.IncrementalFromPos) > 0 { + i -= len(m.IncrementalFromPos) + copy(dAtA[i:], m.IncrementalFromPos) + i = encodeVarint(dAtA, i, uint64(len(m.IncrementalFromPos))) + i-- + dAtA[i] = 0x1a + } + if m.AllowPrimary { + i-- + if m.AllowPrimary { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Concurrency != 0 { + i = encodeVarint(dAtA, i, uint64(m.Concurrency)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } -func (m *PromoteReplicaResponse) SizeVT() (n int) { +func (m *BackupResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - l = len(m.Position) - if l > 0 { - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *BackupRequest) SizeVT() (n int) { +func (m *BackupResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *BackupResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.Concurrency != 0 { - n += 1 + sov(uint64(m.Concurrency)) - } - if m.AllowPrimary { - n += 2 + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.IncrementalFromPos) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.Event != nil { + size, err := m.Event.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *BackupResponse) SizeVT() (n int) { +func (m *RestoreFromBackupRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Event != nil { - l = m.Event.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *RestoreFromBackupRequest) SizeVT() (n int) { +func (m *RestoreFromBackupRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RestoreFromBackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.BackupTime != nil { - l = m.BackupTime.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.RestoreToPos) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.RestoreToTimestamp != nil { + size, err := m.RestoreToTimestamp.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 } if m.DryRun { - n += 2 + i-- + if m.DryRun { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 } - n += len(m.unknownFields) - return n + if len(m.RestoreToPos) > 0 { + i -= len(m.RestoreToPos) + copy(dAtA[i:], m.RestoreToPos) + i = encodeVarint(dAtA, i, uint64(len(m.RestoreToPos))) + i-- + dAtA[i] = 0x12 + } + if m.BackupTime != nil { + size, err := m.BackupTime.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *RestoreFromBackupResponse) SizeVT() (n int) { +func (m *RestoreFromBackupResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Event != nil { - l = m.Event.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *VDiffRequest) SizeVT() (n int) { +func (m *RestoreFromBackupResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RestoreFromBackupResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.Workflow) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Action) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.ActionArg) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.Event != nil { + size, err := m.Event.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - l = len(m.VdiffUuid) - if l > 0 { - n += 1 + l + sov(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *CreateVReplicationWorkflowRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil } - if m.Options != nil { - l = m.Options.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *VDiffResponse) SizeVT() (n int) { +func (m *CreateVReplicationWorkflowRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *CreateVReplicationWorkflowRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.Id != 0 { - n += 1 + sov(uint64(m.Id)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.Output != nil { - l = m.Output.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.StopAfterCopy { + i-- + if m.StopAfterCopy { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 } - l = len(m.VdiffUuid) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.AutoStart { + i-- + if m.AutoStart { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 } - n += len(m.unknownFields) - return n -} - -func (m *VDiffPickerOptions) SizeVT() (n int) { - if m == nil { - return 0 + if m.DeferSecondaryKeys { + i-- + if m.DeferSecondaryKeys { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 } - var l int - _ = l - l = len(m.TabletTypes) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.WorkflowSubType != 0 { + i = encodeVarint(dAtA, i, uint64(m.WorkflowSubType)) + i-- + dAtA[i] = 0x38 } - l = len(m.SourceCell) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.WorkflowType != 0 { + i = encodeVarint(dAtA, i, uint64(m.WorkflowType)) + i-- + dAtA[i] = 0x30 } - l = len(m.TargetCell) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.TabletSelectionPreference != 0 { + i = encodeVarint(dAtA, i, uint64(m.TabletSelectionPreference)) + i-- + dAtA[i] = 0x28 } - n += len(m.unknownFields) - return n + if len(m.TabletTypes) > 0 { + var pksize2 int + for _, num := range m.TabletTypes { + pksize2 += sov(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num1 := range m.TabletTypes { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = encodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x22 + } + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.BinlogSource) > 0 { + for iNdEx := len(m.BinlogSource) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.BinlogSource[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *VDiffReportOptions) SizeVT() (n int) { +func (m *CreateVReplicationWorkflowResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - if m.OnlyPks { - n += 2 - } - if m.DebugQuery { - n += 2 + return nil, nil } - l = len(m.Format) - if l > 0 { - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *VDiffCoreOptions) SizeVT() (n int) { +func (m *CreateVReplicationWorkflowResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *CreateVReplicationWorkflowResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Tables) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.AutoRetry { - n += 2 - } - if m.MaxRows != 0 { - n += 1 + sov(uint64(m.MaxRows)) - } - if m.Checksum { - n += 2 - } - if m.SamplePct != 0 { - n += 1 + sov(uint64(m.SamplePct)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.TimeoutSeconds != 0 { - n += 1 + sov(uint64(m.TimeoutSeconds)) + if m.Result != nil { + size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - if m.MaxExtraRowsToCompare != 0 { - n += 1 + sov(uint64(m.MaxExtraRowsToCompare)) + return len(dAtA) - i, nil +} + +func (m *DeleteVReplicationWorkflowRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil } - if m.UpdateTableStats { - n += 2 + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *VDiffOptions) SizeVT() (n int) { +func (m *DeleteVReplicationWorkflowRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *DeleteVReplicationWorkflowRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.PickerOptions != nil { - l = m.PickerOptions.SizeVT() - n += 1 + l + sov(uint64(l)) - } - if m.CoreOptions != nil { - l = m.CoreOptions.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.ReportOptions != nil { - l = m.ReportOptions.SizeVT() - n += 1 + l + sov(uint64(l)) + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *UpdateVRWorkflowRequest) SizeVT() (n int) { +func (m *DeleteVReplicationWorkflowResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - l = len(m.Workflow) - if l > 0 { - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - if len(m.Cells) > 0 { - for _, s := range m.Cells { - l = len(s) - n += 1 + l + sov(uint64(l)) + return dAtA[:n], nil +} + +func (m *DeleteVReplicationWorkflowResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *DeleteVReplicationWorkflowResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Result != nil { + size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ReadVReplicationWorkflowRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadVReplicationWorkflowRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReadVReplicationWorkflowRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ReadVReplicationWorkflowResponse_Stream) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadVReplicationWorkflowResponse_Stream) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReadVReplicationWorkflowResponse_Stream) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ComponentThrottled) > 0 { + i -= len(m.ComponentThrottled) + copy(dAtA[i:], m.ComponentThrottled) + i = encodeVarint(dAtA, i, uint64(len(m.ComponentThrottled))) + i-- + dAtA[i] = 0x72 + } + if m.TimeThrottled != nil { + size, err := m.TimeThrottled.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } + if m.TimeHeartbeat != nil { + size, err := m.TimeHeartbeat.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } + if m.RowsCopied != 0 { + i = encodeVarint(dAtA, i, uint64(m.RowsCopied)) + i-- + dAtA[i] = 0x58 + } + if len(m.Message) > 0 { + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarint(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x52 + } + if m.State != 0 { + i = encodeVarint(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x48 + } + if m.TransactionTimestamp != nil { + size, err := m.TransactionTimestamp.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if m.TimeUpdated != nil { + size, err := m.TimeUpdated.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.MaxReplicationLag != 0 { + i = encodeVarint(dAtA, i, uint64(m.MaxReplicationLag)) + i-- + dAtA[i] = 0x30 + } + if m.MaxTps != 0 { + i = encodeVarint(dAtA, i, uint64(m.MaxTps)) + i-- + dAtA[i] = 0x28 + } + if len(m.StopPos) > 0 { + i -= len(m.StopPos) + copy(dAtA[i:], m.StopPos) + i = encodeVarint(dAtA, i, uint64(len(m.StopPos))) + i-- + dAtA[i] = 0x22 + } + if len(m.Pos) > 0 { + i -= len(m.Pos) + copy(dAtA[i:], m.Pos) + i = encodeVarint(dAtA, i, uint64(len(m.Pos))) + i-- + dAtA[i] = 0x1a + } + if m.Bls != nil { + size, err := m.Bls.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Id != 0 { + i = encodeVarint(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ReadVReplicationWorkflowResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadVReplicationWorkflowResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReadVReplicationWorkflowResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Streams) > 0 { + for iNdEx := len(m.Streams) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Streams[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a } } + if m.DeferSecondaryKeys { + i-- + if m.DeferSecondaryKeys { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if m.WorkflowSubType != 0 { + i = encodeVarint(dAtA, i, uint64(m.WorkflowSubType)) + i-- + dAtA[i] = 0x48 + } + if m.WorkflowType != 0 { + i = encodeVarint(dAtA, i, uint64(m.WorkflowType)) + i-- + dAtA[i] = 0x40 + } + if len(m.Tags) > 0 { + i -= len(m.Tags) + copy(dAtA[i:], m.Tags) + i = encodeVarint(dAtA, i, uint64(len(m.Tags))) + i-- + dAtA[i] = 0x3a + } + if len(m.DbName) > 0 { + i -= len(m.DbName) + copy(dAtA[i:], m.DbName) + i = encodeVarint(dAtA, i, uint64(len(m.DbName))) + i-- + dAtA[i] = 0x32 + } + if m.TabletSelectionPreference != 0 { + i = encodeVarint(dAtA, i, uint64(m.TabletSelectionPreference)) + i-- + dAtA[i] = 0x28 + } if len(m.TabletTypes) > 0 { - for _, s := range m.TabletTypes { - l = len(s) - n += 1 + l + sov(uint64(l)) + var pksize2 int + for _, num := range m.TabletTypes { + pksize2 += sov(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num1 := range m.TabletTypes { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = encodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x22 + } + if len(m.Cells) > 0 { + i -= len(m.Cells) + copy(dAtA[i:], m.Cells) + i = encodeVarint(dAtA, i, uint64(len(m.Cells))) + i-- + dAtA[i] = 0x1a + } + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} + +func (m *VDiffRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VDiffRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VDiffRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Options != nil { + size, err := m.Options.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.VdiffUuid) > 0 { + i -= len(m.VdiffUuid) + copy(dAtA[i:], m.VdiffUuid) + i = encodeVarint(dAtA, i, uint64(len(m.VdiffUuid))) + i-- + dAtA[i] = 0x2a + } + if len(m.ActionArg) > 0 { + i -= len(m.ActionArg) + copy(dAtA[i:], m.ActionArg) + i = encodeVarint(dAtA, i, uint64(len(m.ActionArg))) + i-- + dAtA[i] = 0x22 + } + if len(m.Action) > 0 { + i -= len(m.Action) + copy(dAtA[i:], m.Action) + i = encodeVarint(dAtA, i, uint64(len(m.Action))) + i-- + dAtA[i] = 0x1a + } + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VDiffResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VDiffResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VDiffResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.VdiffUuid) > 0 { + i -= len(m.VdiffUuid) + copy(dAtA[i:], m.VdiffUuid) + i = encodeVarint(dAtA, i, uint64(len(m.VdiffUuid))) + i-- + dAtA[i] = 0x1a + } + if m.Output != nil { + size, err := m.Output.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Id != 0 { + i = encodeVarint(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *VDiffPickerOptions) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VDiffPickerOptions) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VDiffPickerOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.TargetCell) > 0 { + i -= len(m.TargetCell) + copy(dAtA[i:], m.TargetCell) + i = encodeVarint(dAtA, i, uint64(len(m.TargetCell))) + i-- + dAtA[i] = 0x1a + } + if len(m.SourceCell) > 0 { + i -= len(m.SourceCell) + copy(dAtA[i:], m.SourceCell) + i = encodeVarint(dAtA, i, uint64(len(m.SourceCell))) + i-- + dAtA[i] = 0x12 + } + if len(m.TabletTypes) > 0 { + i -= len(m.TabletTypes) + copy(dAtA[i:], m.TabletTypes) + i = encodeVarint(dAtA, i, uint64(len(m.TabletTypes))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VDiffReportOptions) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VDiffReportOptions) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VDiffReportOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Format) > 0 { + i -= len(m.Format) + copy(dAtA[i:], m.Format) + i = encodeVarint(dAtA, i, uint64(len(m.Format))) + i-- + dAtA[i] = 0x1a + } + if m.DebugQuery { + i-- + if m.DebugQuery { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.OnlyPks { + i-- + if m.OnlyPks { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *VDiffCoreOptions) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VDiffCoreOptions) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VDiffCoreOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.UpdateTableStats { + i-- + if m.UpdateTableStats { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if m.MaxExtraRowsToCompare != 0 { + i = encodeVarint(dAtA, i, uint64(m.MaxExtraRowsToCompare)) + i-- + dAtA[i] = 0x38 + } + if m.TimeoutSeconds != 0 { + i = encodeVarint(dAtA, i, uint64(m.TimeoutSeconds)) + i-- + dAtA[i] = 0x30 + } + if m.SamplePct != 0 { + i = encodeVarint(dAtA, i, uint64(m.SamplePct)) + i-- + dAtA[i] = 0x28 + } + if m.Checksum { + i-- + if m.Checksum { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.MaxRows != 0 { + i = encodeVarint(dAtA, i, uint64(m.MaxRows)) + i-- + dAtA[i] = 0x18 + } + if m.AutoRetry { + i-- + if m.AutoRetry { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Tables) > 0 { + i -= len(m.Tables) + copy(dAtA[i:], m.Tables) + i = encodeVarint(dAtA, i, uint64(len(m.Tables))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VDiffOptions) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VDiffOptions) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VDiffOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ReportOptions != nil { + size, err := m.ReportOptions.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.CoreOptions != nil { + size, err := m.CoreOptions.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.PickerOptions != nil { + size, err := m.PickerOptions.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpdateVReplicationWorkflowRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateVReplicationWorkflowRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *UpdateVReplicationWorkflowRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.State != 0 { + i = encodeVarint(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x30 + } + if m.OnDdl != 0 { + i = encodeVarint(dAtA, i, uint64(m.OnDdl)) + i-- + dAtA[i] = 0x28 + } + if m.TabletSelectionPreference != 0 { + i = encodeVarint(dAtA, i, uint64(m.TabletSelectionPreference)) + i-- + dAtA[i] = 0x20 + } + if len(m.TabletTypes) > 0 { + var pksize2 int + for _, num := range m.TabletTypes { + pksize2 += sov(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num1 := range m.TabletTypes { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = encodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x1a + } + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpdateVReplicationWorkflowResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateVReplicationWorkflowResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *UpdateVReplicationWorkflowResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Result != nil { + size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResetSequencesRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResetSequencesRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ResetSequencesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Tables) > 0 { + for iNdEx := len(m.Tables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Tables[iNdEx]) + copy(dAtA[i:], m.Tables[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Tables[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResetSequencesResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResetSequencesResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ResetSequencesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *CheckThrottlerRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CheckThrottlerRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *CheckThrottlerRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.AppName) > 0 { + i -= len(m.AppName) + copy(dAtA[i:], m.AppName) + i = encodeVarint(dAtA, i, uint64(len(m.AppName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CheckThrottlerResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CheckThrottlerResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *CheckThrottlerResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.RecentlyChecked { + i-- + if m.RecentlyChecked { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if len(m.Message) > 0 { + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarint(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + } + if len(m.Error) > 0 { + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarint(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x22 + } + if m.Threshold != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Threshold)))) + i-- + dAtA[i] = 0x19 + } + if m.Value != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x11 + } + if m.StatusCode != 0 { + i = encodeVarint(dAtA, i, uint64(m.StatusCode)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarint(dAtA []byte, offset int, v uint64) int { + offset -= sov(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *TableDefinition) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Schema) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Columns) > 0 { + for _, s := range m.Columns { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.PrimaryKeyColumns) > 0 { + for _, s := range m.PrimaryKeyColumns { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + l = len(m.Type) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.DataLength != 0 { + n += 1 + sov(uint64(m.DataLength)) + } + if m.RowCount != 0 { + n += 1 + sov(uint64(m.RowCount)) + } + if len(m.Fields) > 0 { + for _, e := range m.Fields { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *SchemaDefinition) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DatabaseSchema) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.TableDefinitions) > 0 { + for _, e := range m.TableDefinitions { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *SchemaChangeResult) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BeforeSchema != nil { + l = m.BeforeSchema.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.AfterSchema != nil { + l = m.AfterSchema.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *UserPermission) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Host) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.User) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.PasswordChecksum != 0 { + n += 1 + sov(uint64(m.PasswordChecksum)) + } + if len(m.Privileges) > 0 { + for k, v := range m.Privileges { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v))) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *DbPermission) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Host) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Db) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.User) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Privileges) > 0 { + for k, v := range m.Privileges { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v))) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Permissions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.UserPermissions) > 0 { + for _, e := range m.UserPermissions { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if len(m.DbPermissions) > 0 { + for _, e := range m.DbPermissions { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *PingRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Payload) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *PingResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Payload) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SleepRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Duration != 0 { + n += 1 + sov(uint64(m.Duration)) + } + n += len(m.unknownFields) + return n +} + +func (m *SleepResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ExecuteHookRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Parameters) > 0 { + for _, s := range m.Parameters { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.ExtraEnv) > 0 { + for k, v := range m.ExtraEnv { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v))) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ExecuteHookResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ExitStatus != 0 { + n += 1 + sov(uint64(m.ExitStatus)) + } + l = len(m.Stdout) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Stderr) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetSchemaRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Tables) > 0 { + for _, s := range m.Tables { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if m.IncludeViews { + n += 2 + } + if len(m.ExcludeTables) > 0 { + for _, s := range m.ExcludeTables { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if m.TableSchemaOnly { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *GetSchemaResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SchemaDefinition != nil { + l = m.SchemaDefinition.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetPermissionsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetPermissionsResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Permissions != nil { + l = m.Permissions.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SetReadOnlyRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *SetReadOnlyResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *SetReadWriteRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *SetReadWriteResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ChangeTypeRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletType != 0 { + n += 1 + sov(uint64(m.TabletType)) + } + if m.SemiSync { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ChangeTypeResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RefreshStateRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RefreshStateResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RunHealthCheckRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RunHealthCheckResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ReloadSchemaRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.WaitPosition) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ReloadSchemaResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *PreflightSchemaRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Changes) > 0 { + for _, s := range m.Changes { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *PreflightSchemaResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ChangeResults) > 0 { + for _, e := range m.ChangeResults { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ApplySchemaRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Sql) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Force { + n += 2 + } + if m.AllowReplication { + n += 2 + } + if m.BeforeSchema != nil { + l = m.BeforeSchema.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.AfterSchema != nil { + l = m.AfterSchema.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.SqlMode) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.BatchSize != 0 { + n += 1 + sov(uint64(m.BatchSize)) + } + n += len(m.unknownFields) + return n +} + +func (m *ApplySchemaResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BeforeSchema != nil { + l = m.BeforeSchema.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.AfterSchema != nil { + l = m.AfterSchema.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *LockTablesRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *LockTablesResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *UnlockTablesRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *UnlockTablesResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ExecuteQueryRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Query) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.DbName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.MaxRows != 0 { + n += 1 + sov(uint64(m.MaxRows)) + } + if m.CallerId != nil { + l = m.CallerId.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ExecuteQueryResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = m.Result.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ExecuteFetchAsDbaRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Query) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.DbName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.MaxRows != 0 { + n += 1 + sov(uint64(m.MaxRows)) + } + if m.DisableBinlogs { + n += 2 + } + if m.ReloadSchema { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ExecuteFetchAsDbaResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = m.Result.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ExecuteFetchAsAllPrivsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Query) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.DbName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.MaxRows != 0 { + n += 1 + sov(uint64(m.MaxRows)) + } + if m.ReloadSchema { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ExecuteFetchAsAllPrivsResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = m.Result.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ExecuteFetchAsAppRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Query) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.MaxRows != 0 { + n += 1 + sov(uint64(m.MaxRows)) + } + n += len(m.unknownFields) + return n +} + +func (m *ExecuteFetchAsAppResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = m.Result.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ReplicationStatusRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ReplicationStatusResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != nil { + l = m.Status.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *PrimaryStatusRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *PrimaryStatusResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != nil { + l = m.Status.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *PrimaryPositionRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *PrimaryPositionResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Position) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *WaitForPositionRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Position) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *WaitForPositionResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *StopReplicationRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *StopReplicationResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *StopReplicationMinimumRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Position) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.WaitTimeout != 0 { + n += 1 + sov(uint64(m.WaitTimeout)) + } + n += len(m.unknownFields) + return n +} + +func (m *StopReplicationMinimumResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Position) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *StartReplicationRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SemiSync { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *StartReplicationResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *StartReplicationUntilAfterRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Position) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.WaitTimeout != 0 { + n += 1 + sov(uint64(m.WaitTimeout)) + } + n += len(m.unknownFields) + return n +} + +func (m *StartReplicationUntilAfterResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetReplicasRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetReplicasResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Addrs) > 0 { + for _, s := range m.Addrs { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ResetReplicationRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ResetReplicationResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *VReplicationExecRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Query) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *VReplicationExecResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = m.Result.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *VReplicationWaitForPosRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sov(uint64(m.Id)) + } + l = len(m.Position) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *VReplicationWaitForPosResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *InitPrimaryRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SemiSync { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *InitPrimaryResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Position) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *PopulateReparentJournalRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TimeCreatedNs != 0 { + n += 1 + sov(uint64(m.TimeCreatedNs)) + } + l = len(m.ActionName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.PrimaryAlias != nil { + l = m.PrimaryAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.ReplicationPosition) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *PopulateReparentJournalResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *InitReplicaRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Parent != nil { + l = m.Parent.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.ReplicationPosition) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.TimeCreatedNs != 0 { + n += 1 + sov(uint64(m.TimeCreatedNs)) + } + if m.SemiSync { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *InitReplicaResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *DemotePrimaryRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *DemotePrimaryResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PrimaryStatus != nil { + l = m.PrimaryStatus.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *UndoDemotePrimaryRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SemiSync { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *UndoDemotePrimaryResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ReplicaWasPromotedRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ReplicaWasPromotedResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ResetReplicationParametersRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ResetReplicationParametersResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *FullStatusRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *FullStatusResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != nil { + l = m.Status.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SetReplicationSourceRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Parent != nil { + l = m.Parent.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.TimeCreatedNs != 0 { + n += 1 + sov(uint64(m.TimeCreatedNs)) + } + if m.ForceStartReplication { + n += 2 + } + l = len(m.WaitPosition) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.SemiSync { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *SetReplicationSourceResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ReplicaWasRestartedRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Parent != nil { + l = m.Parent.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ReplicaWasRestartedResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *StopReplicationAndGetStatusRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StopReplicationMode != 0 { + n += 1 + sov(uint64(m.StopReplicationMode)) + } + n += len(m.unknownFields) + return n +} + +func (m *StopReplicationAndGetStatusResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != nil { + l = m.Status.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *PromoteReplicaRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SemiSync { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *PromoteReplicaResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Position) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *BackupRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Concurrency != 0 { + n += 1 + sov(uint64(m.Concurrency)) + } + if m.AllowPrimary { + n += 2 + } + l = len(m.IncrementalFromPos) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.UpgradeSafe { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *BackupResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Event != nil { + l = m.Event.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RestoreFromBackupRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BackupTime != nil { + l = m.BackupTime.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.RestoreToPos) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.DryRun { + n += 2 + } + if m.RestoreToTimestamp != nil { + l = m.RestoreToTimestamp.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RestoreFromBackupResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Event != nil { + l = m.Event.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CreateVReplicationWorkflowRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.BinlogSource) > 0 { + for _, e := range m.BinlogSource { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.TabletTypes) > 0 { + l = 0 + for _, e := range m.TabletTypes { + l += sov(uint64(e)) + } + n += 1 + sov(uint64(l)) + l + } + if m.TabletSelectionPreference != 0 { + n += 1 + sov(uint64(m.TabletSelectionPreference)) + } + if m.WorkflowType != 0 { + n += 1 + sov(uint64(m.WorkflowType)) + } + if m.WorkflowSubType != 0 { + n += 1 + sov(uint64(m.WorkflowSubType)) + } + if m.DeferSecondaryKeys { + n += 2 + } + if m.AutoStart { + n += 2 + } + if m.StopAfterCopy { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *CreateVReplicationWorkflowResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = m.Result.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DeleteVReplicationWorkflowRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DeleteVReplicationWorkflowResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = m.Result.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ReadVReplicationWorkflowRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ReadVReplicationWorkflowResponse_Stream) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sov(uint64(m.Id)) + } + if m.Bls != nil { + l = m.Bls.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.Pos) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.StopPos) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.MaxTps != 0 { + n += 1 + sov(uint64(m.MaxTps)) + } + if m.MaxReplicationLag != 0 { + n += 1 + sov(uint64(m.MaxReplicationLag)) + } + if m.TimeUpdated != nil { + l = m.TimeUpdated.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.TransactionTimestamp != nil { + l = m.TransactionTimestamp.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.State != 0 { + n += 1 + sov(uint64(m.State)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.RowsCopied != 0 { + n += 1 + sov(uint64(m.RowsCopied)) + } + if m.TimeHeartbeat != nil { + l = m.TimeHeartbeat.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.TimeThrottled != nil { + l = m.TimeThrottled.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.ComponentThrottled) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ReadVReplicationWorkflowResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Cells) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.TabletTypes) > 0 { + l = 0 + for _, e := range m.TabletTypes { + l += sov(uint64(e)) + } + n += 1 + sov(uint64(l)) + l + } + if m.TabletSelectionPreference != 0 { + n += 1 + sov(uint64(m.TabletSelectionPreference)) + } + l = len(m.DbName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Tags) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.WorkflowType != 0 { + n += 1 + sov(uint64(m.WorkflowType)) + } + if m.WorkflowSubType != 0 { + n += 1 + sov(uint64(m.WorkflowSubType)) + } + if m.DeferSecondaryKeys { + n += 2 + } + if len(m.Streams) > 0 { + for _, e := range m.Streams { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *VDiffRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Action) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.ActionArg) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.VdiffUuid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Options != nil { + l = m.Options.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *VDiffResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sov(uint64(m.Id)) + } + if m.Output != nil { + l = m.Output.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.VdiffUuid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *VDiffPickerOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TabletTypes) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.SourceCell) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.TargetCell) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *VDiffReportOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OnlyPks { + n += 2 + } + if m.DebugQuery { + n += 2 + } + l = len(m.Format) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *VDiffCoreOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Tables) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.AutoRetry { + n += 2 + } + if m.MaxRows != 0 { + n += 1 + sov(uint64(m.MaxRows)) + } + if m.Checksum { + n += 2 + } + if m.SamplePct != 0 { + n += 1 + sov(uint64(m.SamplePct)) + } + if m.TimeoutSeconds != 0 { + n += 1 + sov(uint64(m.TimeoutSeconds)) + } + if m.MaxExtraRowsToCompare != 0 { + n += 1 + sov(uint64(m.MaxExtraRowsToCompare)) + } + if m.UpdateTableStats { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *VDiffOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PickerOptions != nil { + l = m.PickerOptions.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.CoreOptions != nil { + l = m.CoreOptions.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.ReportOptions != nil { + l = m.ReportOptions.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *UpdateVReplicationWorkflowRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.TabletTypes) > 0 { + l = 0 + for _, e := range m.TabletTypes { + l += sov(uint64(e)) + } + n += 1 + sov(uint64(l)) + l + } + if m.TabletSelectionPreference != 0 { + n += 1 + sov(uint64(m.TabletSelectionPreference)) + } + if m.OnDdl != 0 { + n += 1 + sov(uint64(m.OnDdl)) + } + if m.State != 0 { + n += 1 + sov(uint64(m.State)) + } + n += len(m.unknownFields) + return n +} + +func (m *UpdateVReplicationWorkflowResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = m.Result.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ResetSequencesRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Tables) > 0 { + for _, s := range m.Tables { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ResetSequencesResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *CheckThrottlerRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.AppName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CheckThrottlerResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StatusCode != 0 { + n += 1 + sov(uint64(m.StatusCode)) + } + if m.Value != 0 { + n += 9 + } + if m.Threshold != 0 { + n += 9 + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.RecentlyChecked { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func sov(x uint64) (n int) { + return (bits.Len64(x|1) + 6) / 7 +} +func soz(x uint64) (n int) { + return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *TableDefinition) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TableDefinition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TableDefinition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Schema = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Columns", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Columns = append(m.Columns, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrimaryKeyColumns", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PrimaryKeyColumns = append(m.PrimaryKeyColumns, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DataLength", wireType) + } + m.DataLength = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DataLength |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RowCount", wireType) + } + m.RowCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RowCount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Fields = append(m.Fields, &query.Field{}) + if err := m.Fields[len(m.Fields)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SchemaDefinition) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SchemaDefinition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SchemaDefinition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DatabaseSchema", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DatabaseSchema = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TableDefinitions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TableDefinitions = append(m.TableDefinitions, &TableDefinition{}) + if err := m.TableDefinitions[len(m.TableDefinitions)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SchemaChangeResult) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SchemaChangeResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SchemaChangeResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BeforeSchema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BeforeSchema == nil { + m.BeforeSchema = &SchemaDefinition{} + } + if err := m.BeforeSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AfterSchema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AfterSchema == nil { + m.AfterSchema = &SchemaDefinition{} + } + if err := m.AfterSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserPermission) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserPermission: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserPermission: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PasswordChecksum", wireType) + } + m.PasswordChecksum = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PasswordChecksum |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Privileges == nil { + m.Privileges = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLength + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLength + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Privileges[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DbPermission) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DbPermission: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DbPermission: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Db", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Db = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Privileges == nil { + m.Privileges = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLength + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLength + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Privileges[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Permissions) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Permissions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Permissions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserPermissions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserPermissions = append(m.UserPermissions, &UserPermission{}) + if err := m.UserPermissions[len(m.UserPermissions)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DbPermissions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DbPermissions = append(m.DbPermissions, &DbPermission{}) + if err := m.DbPermissions[len(m.DbPermissions)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PingRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PingRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PingRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Payload = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PingResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PingResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PingResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Payload = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SleepRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SleepRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SleepRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + m.Duration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Duration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy } } - if m.OnDdl != 0 { - n += 1 + sov(uint64(m.OnDdl)) - } - n += len(m.unknownFields) - return n -} -func (m *UpdateVRWorkflowResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Result != nil { - l = m.Result.SizeVT() - n += 1 + l + sov(uint64(l)) + if iNdEx > l { + return io.ErrUnexpectedEOF } - n += len(m.unknownFields) - return n + return nil } +func (m *SleepResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SleepResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SleepResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } -func sov(x uint64) (n int) { - return (bits.Len64(x|1) + 6) / 7 -} -func soz(x uint64) (n int) { - return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil } -func (m *TableDefinition) UnmarshalVT(dAtA []byte) error { +func (m *ExecuteHookRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6330,10 +11122,10 @@ func (m *TableDefinition) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: TableDefinition: wiretype end group for non-group") + return fmt.Errorf("proto: ExecuteHookRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: TableDefinition: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExecuteHookRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -6370,7 +11162,7 @@ func (m *TableDefinition) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6398,13 +11190,191 @@ func (m *TableDefinition) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Schema = string(dAtA[iNdEx:postIndex]) + m.Parameters = append(m.Parameters, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Columns", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ExtraEnv", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExtraEnv == nil { + m.ExtraEnv = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLength + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLength + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ExtraEnv[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecuteHookResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecuteHookResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecuteHookResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType) } - var stringLen uint64 + m.ExitStatus = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -6414,27 +11384,14 @@ func (m *TableDefinition) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.ExitStatus |= int64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Columns = append(m.Columns, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PrimaryKeyColumns", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6462,11 +11419,11 @@ func (m *TableDefinition) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.PrimaryKeyColumns = append(m.PrimaryKeyColumns, string(dAtA[iNdEx:postIndex])) + m.Stdout = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6494,79 +11451,7 @@ func (m *TableDefinition) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DataLength", wireType) - } - m.DataLength = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DataLength |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RowCount", wireType) - } - m.RowCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RowCount |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Fields = append(m.Fields, &query.Field{}) - if err := m.Fields[len(m.Fields)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Stderr = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -6590,7 +11475,7 @@ func (m *TableDefinition) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SchemaDefinition) UnmarshalVT(dAtA []byte) error { +func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6613,15 +11498,15 @@ func (m *SchemaDefinition) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SchemaDefinition: wiretype end group for non-group") + return fmt.Errorf("proto: GetSchemaRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SchemaDefinition: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DatabaseSchema", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6649,13 +11534,33 @@ func (m *SchemaDefinition) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DatabaseSchema = string(dAtA[iNdEx:postIndex]) + m.Tables = append(m.Tables, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeViews", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeViews = bool(v != 0) + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TableDefinitions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ExcludeTables", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -6665,26 +11570,44 @@ func (m *SchemaDefinition) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.TableDefinitions = append(m.TableDefinitions, &TableDefinition{}) - if err := m.TableDefinitions[len(m.TableDefinitions)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.ExcludeTables = append(m.ExcludeTables, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TableSchemaOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TableSchemaOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -6707,7 +11630,7 @@ func (m *SchemaDefinition) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SchemaChangeResult) UnmarshalVT(dAtA []byte) error { +func (m *GetSchemaResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6730,15 +11653,15 @@ func (m *SchemaChangeResult) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SchemaChangeResult: wiretype end group for non-group") + return fmt.Errorf("proto: GetSchemaResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SchemaChangeResult: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BeforeSchema", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SchemaDefinition", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6765,49 +11688,64 @@ func (m *SchemaChangeResult) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.BeforeSchema == nil { - m.BeforeSchema = &SchemaDefinition{} + if m.SchemaDefinition == nil { + m.SchemaDefinition = &SchemaDefinition{} } - if err := m.BeforeSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.SchemaDefinition.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AfterSchema", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.AfterSchema == nil { - m.AfterSchema = &SchemaDefinition{} + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetPermissionsRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - if err := m.AfterSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + if iNdEx >= l { + return io.ErrUnexpectedEOF } - iNdEx = postIndex + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetPermissionsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetPermissionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -6830,7 +11768,7 @@ func (m *SchemaChangeResult) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *UserPermission) UnmarshalVT(dAtA []byte) error { +func (m *GetPermissionsResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6853,17 +11791,17 @@ func (m *UserPermission) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: UserPermission: wiretype end group for non-group") + return fmt.Errorf("proto: GetPermissionsResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: UserPermission: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetPermissionsResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Permissions", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -6873,202 +11811,130 @@ func (m *UserPermission) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Host = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + if m.Permissions == nil { + m.Permissions = &Permissions{} } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.Permissions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.User = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PasswordChecksum", wireType) - } - m.PasswordChecksum = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PasswordChecksum |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SetReadOnlyRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) + if iNdEx >= l { + return io.ErrUnexpectedEOF } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - if msglen < 0 { - return ErrInvalidLength + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetReadOnlyRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetReadOnlyRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.Privileges == nil { - m.Privileges = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLength - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLength - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SetReadOnlyResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - m.Privileges[mapkey] = mapvalue - iNdEx = postIndex + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetReadOnlyResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetReadOnlyResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -7091,7 +11957,7 @@ func (m *UserPermission) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DbPermission) UnmarshalVT(dAtA []byte) error { +func (m *SetReadWriteRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7114,235 +11980,63 @@ func (m *DbPermission) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DbPermission: wiretype end group for non-group") + return fmt.Errorf("proto: SetReadWriteRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DbPermission: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SetReadWriteRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Host = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Db", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Db = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.User = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.Privileges == nil { - m.Privileges = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLength - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLength - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SetReadWriteResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - m.Privileges[mapkey] = mapvalue - iNdEx = postIndex + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetReadWriteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetReadWriteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -7365,7 +12059,7 @@ func (m *DbPermission) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *Permissions) UnmarshalVT(dAtA []byte) error { +func (m *ChangeTypeRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7388,17 +12082,17 @@ func (m *Permissions) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Permissions: wiretype end group for non-group") + return fmt.Errorf("proto: ChangeTypeRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Permissions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ChangeTypeRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UserPermissions", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletType", wireType) } - var msglen int + m.TabletType = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -7408,31 +12102,16 @@ func (m *Permissions) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.TabletType |= topodata.TabletType(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UserPermissions = append(m.UserPermissions, &UserPermission{}) - if err := m.UserPermissions[len(m.UserPermissions)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DbPermissions", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SemiSync", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -7442,26 +12121,63 @@ func (m *Permissions) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength + m.SemiSync = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.DbPermissions = append(m.DbPermissions, &DbPermission{}) - if err := m.DbPermissions[len(m.DbPermissions)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ChangeTypeResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - iNdEx = postIndex + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ChangeTypeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ChangeTypeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -7484,7 +12200,7 @@ func (m *Permissions) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *PingRequest) UnmarshalVT(dAtA []byte) error { +func (m *RefreshStateRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7507,44 +12223,114 @@ func (m *PingRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PingRequest: wiretype end group for non-group") + return fmt.Errorf("proto: RefreshStateRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PingRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RefreshStateRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RefreshStateResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RefreshStateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RefreshStateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.Payload = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RunHealthCheckRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RunHealthCheckRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RunHealthCheckRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -7567,7 +12353,7 @@ func (m *PingRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *PingResponse) UnmarshalVT(dAtA []byte) error { +func (m *RunHealthCheckResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7590,44 +12376,12 @@ func (m *PingResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PingResponse: wiretype end group for non-group") + return fmt.Errorf("proto: RunHealthCheckResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PingResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RunHealthCheckResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Payload = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -7650,7 +12404,7 @@ func (m *PingResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SleepRequest) UnmarshalVT(dAtA []byte) error { +func (m *ReloadSchemaRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7673,17 +12427,17 @@ func (m *SleepRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SleepRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ReloadSchemaRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SleepRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReloadSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WaitPosition", wireType) } - m.Duration = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -7693,11 +12447,24 @@ func (m *SleepRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Duration |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WaitPosition = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -7720,7 +12487,7 @@ func (m *SleepRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SleepResponse) UnmarshalVT(dAtA []byte) error { +func (m *ReloadSchemaResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7743,10 +12510,10 @@ func (m *SleepResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SleepResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ReloadSchemaResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SleepResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReloadSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -7771,7 +12538,7 @@ func (m *SleepResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ExecuteHookRequest) UnmarshalVT(dAtA []byte) error { +func (m *PreflightSchemaRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7794,47 +12561,15 @@ func (m *ExecuteHookRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExecuteHookRequest: wiretype end group for non-group") + return fmt.Errorf("proto: PreflightSchemaRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteHookRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PreflightSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Changes", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -7862,134 +12597,92 @@ func (m *ExecuteHookRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Parameters = append(m.Parameters, string(dAtA[iNdEx:postIndex])) + m.Changes = append(m.Changes, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExtraEnv", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.ExtraEnv == nil { - m.ExtraEnv = make(map[string]string) + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PreflightSchemaResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLength - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLength - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PreflightSchemaResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PreflightSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChangeResults", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break } } - m.ExtraEnv[mapkey] = mapvalue + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChangeResults = append(m.ChangeResults, &SchemaChangeResult{}) + if err := m.ChangeResults[len(m.ChangeResults)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -8013,7 +12706,7 @@ func (m *ExecuteHookRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ExecuteHookResponse) UnmarshalVT(dAtA []byte) error { +func (m *ApplySchemaRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8036,17 +12729,17 @@ func (m *ExecuteHookResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExecuteHookResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ApplySchemaRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteHookResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ApplySchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sql", wireType) } - m.ExitStatus = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -8056,16 +12749,69 @@ func (m *ExecuteHookResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ExitStatus |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sql = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Force = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowReplication", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowReplication = bool(v != 0) + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field BeforeSchema", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -8075,27 +12821,67 @@ func (m *ExecuteHookResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Stdout = string(dAtA[iNdEx:postIndex]) + if m.BeforeSchema == nil { + m.BeforeSchema = &SchemaDefinition{} + } + if err := m.BeforeSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AfterSchema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AfterSchema == nil { + m.AfterSchema = &SchemaDefinition{} + } + if err := m.AfterSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SqlMode", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -8123,8 +12909,27 @@ func (m *ExecuteHookResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Stderr = string(dAtA[iNdEx:postIndex]) + m.SqlMode = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BatchSize", wireType) + } + m.BatchSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BatchSize |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -8147,7 +12952,7 @@ func (m *ExecuteHookResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { +func (m *ApplySchemaResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8170,69 +12975,17 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetSchemaRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ApplySchemaResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ApplySchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Tables = append(m.Tables, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludeViews", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.IncludeViews = bool(v != 0) - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExcludeTables", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field BeforeSchema", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -8242,29 +12995,33 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.ExcludeTables = append(m.ExcludeTables, string(dAtA[iNdEx:postIndex])) + if m.BeforeSchema == nil { + m.BeforeSchema = &SchemaDefinition{} + } + if err := m.BeforeSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TableSchemaOnly", wireType) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AfterSchema", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -8274,12 +13031,28 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.TableSchemaOnly = bool(v != 0) + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AfterSchema == nil { + m.AfterSchema = &SchemaDefinition{} + } + if err := m.AfterSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -8302,7 +13075,7 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetSchemaResponse) UnmarshalVT(dAtA []byte) error { +func (m *LockTablesRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8325,48 +13098,12 @@ func (m *GetSchemaResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetSchemaResponse: wiretype end group for non-group") + return fmt.Errorf("proto: LockTablesRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LockTablesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SchemaDefinition", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SchemaDefinition == nil { - m.SchemaDefinition = &SchemaDefinition{} - } - if err := m.SchemaDefinition.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -8389,7 +13126,7 @@ func (m *GetSchemaResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetPermissionsRequest) UnmarshalVT(dAtA []byte) error { +func (m *LockTablesResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8412,10 +13149,10 @@ func (m *GetPermissionsRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetPermissionsRequest: wiretype end group for non-group") + return fmt.Errorf("proto: LockTablesResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetPermissionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LockTablesResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -8440,7 +13177,7 @@ func (m *GetPermissionsRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetPermissionsResponse) UnmarshalVT(dAtA []byte) error { +func (m *UnlockTablesRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8463,48 +13200,12 @@ func (m *GetPermissionsResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetPermissionsResponse: wiretype end group for non-group") + return fmt.Errorf("proto: UnlockTablesRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetPermissionsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: UnlockTablesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Permissions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Permissions == nil { - m.Permissions = &Permissions{} - } - if err := m.Permissions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -8527,7 +13228,7 @@ func (m *GetPermissionsResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SetReadOnlyRequest) UnmarshalVT(dAtA []byte) error { +func (m *UnlockTablesResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8550,10 +13251,10 @@ func (m *SetReadOnlyRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetReadOnlyRequest: wiretype end group for non-group") + return fmt.Errorf("proto: UnlockTablesResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetReadOnlyRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: UnlockTablesResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -8578,7 +13279,7 @@ func (m *SetReadOnlyRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SetReadOnlyResponse) UnmarshalVT(dAtA []byte) error { +func (m *ExecuteQueryRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8588,25 +13289,146 @@ func (m *SetReadOnlyResponse) UnmarshalVT(dAtA []byte) error { if shift >= 64 { return ErrIntOverflow } - if iNdEx >= l { + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecuteQueryRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecuteQueryRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Query = append(m.Query[:0], dAtA[iNdEx:postIndex]...) + if m.Query == nil { + m.Query = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DbName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DbName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxRows", wireType) + } + m.MaxRows = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxRows |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if m.CallerId == nil { + m.CallerId = &vtrpc.CallerID{} } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SetReadOnlyResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SetReadOnlyResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + if err := m.CallerId.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -8629,7 +13451,7 @@ func (m *SetReadOnlyResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SetReadWriteRequest) UnmarshalVT(dAtA []byte) error { +func (m *ExecuteQueryResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8652,63 +13474,48 @@ func (m *SetReadWriteRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetReadWriteRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ExecuteQueryResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetReadWriteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExecuteQueryResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + if msglen < 0 { + return ErrInvalidLength } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SetReadWriteResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if m.Result == nil { + m.Result = &query.QueryResult{} } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SetReadWriteResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SetReadWriteResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + if err := m.Result.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -8731,7 +13538,7 @@ func (m *SetReadWriteResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ChangeTypeRequest) UnmarshalVT(dAtA []byte) error { +func (m *ExecuteFetchAsDbaRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8754,17 +13561,17 @@ func (m *ChangeTypeRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ChangeTypeRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ExecuteFetchAsDbaRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ChangeTypeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExecuteFetchAsDbaRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletType", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) } - m.TabletType = 0 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -8774,16 +13581,31 @@ func (m *ChangeTypeRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TabletType |= topodata.TabletType(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Query = append(m.Query[:0], dAtA[iNdEx:postIndex]...) + if m.Query == nil { + m.Query = []byte{} + } + iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SemiSync", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DbName", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -8793,63 +13615,83 @@ func (m *ChangeTypeRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.SemiSync = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - if (skippy < 0) || (iNdEx+skippy) < 0 { + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLength } - if (iNdEx + skippy) > l { + if postIndex > l { return io.ErrUnexpectedEOF } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ChangeTypeResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + m.DbName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxRows", wireType) } - if iNdEx >= l { - return io.ErrUnexpectedEOF + m.MaxRows = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxRows |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DisableBinlogs", wireType) } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ChangeTypeResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ChangeTypeResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DisableBinlogs = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReloadSchema", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReloadSchema = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -8872,7 +13714,7 @@ func (m *ChangeTypeResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RefreshStateRequest) UnmarshalVT(dAtA []byte) error { +func (m *ExecuteFetchAsDbaResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8895,12 +13737,48 @@ func (m *RefreshStateRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RefreshStateRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ExecuteFetchAsDbaResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RefreshStateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExecuteFetchAsDbaResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &query.QueryResult{} + } + if err := m.Result.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -8923,7 +13801,7 @@ func (m *RefreshStateRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RefreshStateResponse) UnmarshalVT(dAtA []byte) error { +func (m *ExecuteFetchAsAllPrivsRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8946,12 +13824,117 @@ func (m *RefreshStateResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RefreshStateResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ExecuteFetchAsAllPrivsRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RefreshStateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExecuteFetchAsAllPrivsRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Query = append(m.Query[:0], dAtA[iNdEx:postIndex]...) + if m.Query == nil { + m.Query = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DbName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DbName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxRows", wireType) + } + m.MaxRows = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxRows |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReloadSchema", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReloadSchema = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -8974,7 +13957,7 @@ func (m *RefreshStateResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RunHealthCheckRequest) UnmarshalVT(dAtA []byte) error { +func (m *ExecuteFetchAsAllPrivsResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8997,12 +13980,48 @@ func (m *RunHealthCheckRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RunHealthCheckRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ExecuteFetchAsAllPrivsResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RunHealthCheckRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExecuteFetchAsAllPrivsResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &query.QueryResult{} + } + if err := m.Result.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -9025,7 +14044,7 @@ func (m *RunHealthCheckRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RunHealthCheckResponse) UnmarshalVT(dAtA []byte) error { +func (m *ExecuteFetchAsAppRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9048,12 +14067,65 @@ func (m *RunHealthCheckResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RunHealthCheckResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ExecuteFetchAsAppRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RunHealthCheckResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExecuteFetchAsAppRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Query = append(m.Query[:0], dAtA[iNdEx:postIndex]...) + if m.Query == nil { + m.Query = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxRows", wireType) + } + m.MaxRows = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxRows |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -9076,7 +14148,7 @@ func (m *RunHealthCheckResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ReloadSchemaRequest) UnmarshalVT(dAtA []byte) error { +func (m *ExecuteFetchAsAppResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9099,17 +14171,17 @@ func (m *ReloadSchemaRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReloadSchemaRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ExecuteFetchAsAppResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReloadSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExecuteFetchAsAppResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WaitPosition", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -9119,23 +14191,27 @@ func (m *ReloadSchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.WaitPosition = string(dAtA[iNdEx:postIndex]) + if m.Result == nil { + m.Result = &query.QueryResult{} + } + if err := m.Result.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -9159,7 +14235,7 @@ func (m *ReloadSchemaRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ReloadSchemaResponse) UnmarshalVT(dAtA []byte) error { +func (m *ReplicationStatusRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9182,10 +14258,10 @@ func (m *ReloadSchemaResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReloadSchemaResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicationStatusRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReloadSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicationStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -9210,7 +14286,7 @@ func (m *ReloadSchemaResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *PreflightSchemaRequest) UnmarshalVT(dAtA []byte) error { +func (m *ReplicationStatusResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9233,17 +14309,17 @@ func (m *PreflightSchemaRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PreflightSchemaRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicationStatusResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PreflightSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicationStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Changes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -9253,23 +14329,27 @@ func (m *PreflightSchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Changes = append(m.Changes, string(dAtA[iNdEx:postIndex])) + if m.Status == nil { + m.Status = &replicationdata.Status{} + } + if err := m.Status.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -9293,7 +14373,7 @@ func (m *PreflightSchemaRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *PreflightSchemaResponse) UnmarshalVT(dAtA []byte) error { +func (m *PrimaryStatusRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9316,46 +14396,12 @@ func (m *PreflightSchemaResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PreflightSchemaResponse: wiretype end group for non-group") + return fmt.Errorf("proto: PrimaryStatusRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PreflightSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PrimaryStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChangeResults", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ChangeResults = append(m.ChangeResults, &SchemaChangeResult{}) - if err := m.ChangeResults[len(m.ChangeResults)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -9378,7 +14424,7 @@ func (m *PreflightSchemaResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ApplySchemaRequest) UnmarshalVT(dAtA []byte) error { +func (m *PrimaryStatusResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9392,170 +14438,26 @@ func (m *ApplySchemaRequest) UnmarshalVT(dAtA []byte) error { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ApplySchemaRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ApplySchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Sql", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Sql = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Force = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowReplication", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AllowReplication = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BeforeSchema", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.BeforeSchema == nil { - m.BeforeSchema = &SchemaDefinition{} - } - if err := m.BeforeSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AfterSchema", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AfterSchema == nil { - m.AfterSchema = &SchemaDefinition{} - } - if err := m.AfterSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - iNdEx = postIndex - case 6: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PrimaryStatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PrimaryStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SqlMode", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -9565,23 +14467,27 @@ func (m *ApplySchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.SqlMode = string(dAtA[iNdEx:postIndex]) + if m.Status == nil { + m.Status = &replicationdata.PrimaryStatus{} + } + if err := m.Status.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -9605,7 +14511,7 @@ func (m *ApplySchemaRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ApplySchemaResponse) UnmarshalVT(dAtA []byte) error { +func (m *PrimaryPositionRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9628,53 +14534,68 @@ func (m *ApplySchemaResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ApplySchemaResponse: wiretype end group for non-group") + return fmt.Errorf("proto: PrimaryPositionRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ApplySchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PrimaryPositionRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BeforeSchema", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.BeforeSchema == nil { - m.BeforeSchema = &SchemaDefinition{} + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PrimaryPositionResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - if err := m.BeforeSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + if iNdEx >= l { + return io.ErrUnexpectedEOF } - iNdEx = postIndex - case 2: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PrimaryPositionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PrimaryPositionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AfterSchema", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -9684,27 +14605,23 @@ func (m *ApplySchemaResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.AfterSchema == nil { - m.AfterSchema = &SchemaDefinition{} - } - if err := m.AfterSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Position = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -9728,7 +14645,7 @@ func (m *ApplySchemaResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *LockTablesRequest) UnmarshalVT(dAtA []byte) error { +func (m *WaitForPositionRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9751,12 +14668,44 @@ func (m *LockTablesRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LockTablesRequest: wiretype end group for non-group") + return fmt.Errorf("proto: WaitForPositionRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LockTablesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WaitForPositionRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Position = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -9779,7 +14728,7 @@ func (m *LockTablesRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *LockTablesResponse) UnmarshalVT(dAtA []byte) error { +func (m *WaitForPositionResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9802,10 +14751,10 @@ func (m *LockTablesResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LockTablesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: WaitForPositionResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LockTablesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WaitForPositionResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -9830,7 +14779,7 @@ func (m *LockTablesResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *UnlockTablesRequest) UnmarshalVT(dAtA []byte) error { +func (m *StopReplicationRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9853,10 +14802,10 @@ func (m *UnlockTablesRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: UnlockTablesRequest: wiretype end group for non-group") + return fmt.Errorf("proto: StopReplicationRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: UnlockTablesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StopReplicationRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -9881,7 +14830,7 @@ func (m *UnlockTablesRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *UnlockTablesResponse) UnmarshalVT(dAtA []byte) error { +func (m *StopReplicationResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9904,10 +14853,10 @@ func (m *UnlockTablesResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: UnlockTablesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: StopReplicationResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: UnlockTablesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StopReplicationResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -9932,7 +14881,7 @@ func (m *UnlockTablesResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ExecuteQueryRequest) UnmarshalVT(dAtA []byte) error { +func (m *StopReplicationMinimumRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9955,49 +14904,15 @@ func (m *ExecuteQueryRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExecuteQueryRequest: wiretype end group for non-group") + return fmt.Errorf("proto: StopReplicationMinimumRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteQueryRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StopReplicationMinimumRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Query = append(m.Query[:0], dAtA[iNdEx:postIndex]...) - if m.Query == nil { - m.Query = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DbName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -10025,32 +14940,13 @@ func (m *ExecuteQueryRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DbName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxRows", wireType) - } - m.MaxRows = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxRows |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CallerId", wireType) + m.Position = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WaitTimeout", wireType) } - var msglen int + m.WaitTimeout = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -10060,28 +14956,11 @@ func (m *ExecuteQueryRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.WaitTimeout |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CallerId == nil { - m.CallerId = &vtrpc.CallerID{} - } - if err := m.CallerId.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -10104,7 +14983,7 @@ func (m *ExecuteQueryRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ExecuteQueryResponse) UnmarshalVT(dAtA []byte) error { +func (m *StopReplicationMinimumResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10127,17 +15006,17 @@ func (m *ExecuteQueryResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExecuteQueryResponse: wiretype end group for non-group") + return fmt.Errorf("proto: StopReplicationMinimumResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteQueryResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StopReplicationMinimumResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -10147,27 +15026,23 @@ func (m *ExecuteQueryResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Result == nil { - m.Result = &query.QueryResult{} - } - if err := m.Result.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Position = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -10191,7 +15066,7 @@ func (m *ExecuteQueryResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ExecuteFetchAsDbaRequest) UnmarshalVT(dAtA []byte) error { +func (m *StartReplicationRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10214,17 +15089,17 @@ func (m *ExecuteFetchAsDbaRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExecuteFetchAsDbaRequest: wiretype end group for non-group") + return fmt.Errorf("proto: StartReplicationRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteFetchAsDbaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StartReplicationRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SemiSync", wireType) } - var byteLen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -10234,29 +15109,117 @@ func (m *ExecuteFetchAsDbaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + m.SemiSync = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - postIndex := iNdEx + byteLen - if postIndex < 0 { + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StartReplicationResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StartReplicationResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StartReplicationResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.Query = append(m.Query[:0], dAtA[iNdEx:postIndex]...) - if m.Query == nil { - m.Query = []byte{} + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StartReplicationUntilAfterRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - iNdEx = postIndex - case 2: + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StartReplicationUntilAfterRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StartReplicationUntilAfterRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DbName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -10284,13 +15247,13 @@ func (m *ExecuteFetchAsDbaRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DbName = string(dAtA[iNdEx:postIndex]) + m.Position = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxRows", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field WaitTimeout", wireType) } - m.MaxRows = 0 + m.WaitTimeout = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -10300,51 +15263,113 @@ func (m *ExecuteFetchAsDbaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxRows |= uint64(b&0x7F) << shift + m.WaitTimeout |= int64(b&0x7F) << shift if b < 0x80 { break } } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DisableBinlogs", wireType) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StartReplicationUntilAfterResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StartReplicationUntilAfterResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StartReplicationUntilAfterResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength } - m.DisableBinlogs = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReloadSchema", wireType) + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetReplicasRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - m.ReloadSchema = bool(v != 0) + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetReplicasRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetReplicasRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -10367,7 +15392,7 @@ func (m *ExecuteFetchAsDbaRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ExecuteFetchAsDbaResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetReplicasResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10390,17 +15415,17 @@ func (m *ExecuteFetchAsDbaResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExecuteFetchAsDbaResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetReplicasResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteFetchAsDbaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetReplicasResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Addrs", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -10410,27 +15435,23 @@ func (m *ExecuteFetchAsDbaResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Result == nil { - m.Result = &query.QueryResult{} - } - if err := m.Result.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Addrs = append(m.Addrs, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -10454,7 +15475,7 @@ func (m *ExecuteFetchAsDbaResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ExecuteFetchAsAllPrivsRequest) UnmarshalVT(dAtA []byte) error { +func (m *ResetReplicationRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10477,117 +15498,63 @@ func (m *ExecuteFetchAsAllPrivsRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExecuteFetchAsAllPrivsRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ResetReplicationRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteFetchAsAllPrivsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResetReplicationRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + byteLen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.Query = append(m.Query[:0], dAtA[iNdEx:postIndex]...) - if m.Query == nil { - m.Query = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DbName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResetReplicationResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.DbName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxRows", wireType) - } - m.MaxRows = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxRows |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReloadSchema", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - m.ReloadSchema = bool(v != 0) + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResetReplicationResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResetReplicationResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -10610,7 +15577,7 @@ func (m *ExecuteFetchAsAllPrivsRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ExecuteFetchAsAllPrivsResponse) UnmarshalVT(dAtA []byte) error { +func (m *VReplicationExecRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10633,17 +15600,17 @@ func (m *ExecuteFetchAsAllPrivsResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExecuteFetchAsAllPrivsResponse: wiretype end group for non-group") + return fmt.Errorf("proto: VReplicationExecRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteFetchAsAllPrivsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VReplicationExecRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -10653,27 +15620,23 @@ func (m *ExecuteFetchAsAllPrivsResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Result == nil { - m.Result = &query.QueryResult{} - } - if err := m.Result.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Query = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -10697,7 +15660,7 @@ func (m *ExecuteFetchAsAllPrivsResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ExecuteFetchAsAppRequest) UnmarshalVT(dAtA []byte) error { +func (m *VReplicationExecResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10720,17 +15683,17 @@ func (m *ExecuteFetchAsAppRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExecuteFetchAsAppRequest: wiretype end group for non-group") + return fmt.Errorf("proto: VReplicationExecResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteFetchAsAppRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VReplicationExecResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -10740,45 +15703,28 @@ func (m *ExecuteFetchAsAppRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Query = append(m.Query[:0], dAtA[iNdEx:postIndex]...) - if m.Query == nil { - m.Query = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxRows", wireType) + if m.Result == nil { + m.Result = &query.QueryResult{} } - m.MaxRows = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxRows |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.Result.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -10801,7 +15747,7 @@ func (m *ExecuteFetchAsAppRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ExecuteFetchAsAppResponse) UnmarshalVT(dAtA []byte) error { +func (m *VReplicationWaitForPosRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10824,17 +15770,36 @@ func (m *ExecuteFetchAsAppResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExecuteFetchAsAppResponse: wiretype end group for non-group") + return fmt.Errorf("proto: VReplicationWaitForPosRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteFetchAsAppResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VReplicationWaitForPosRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -10844,27 +15809,23 @@ func (m *ExecuteFetchAsAppResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Result == nil { - m.Result = &query.QueryResult{} - } - if err := m.Result.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Position = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -10888,7 +15849,7 @@ func (m *ExecuteFetchAsAppResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ReplicationStatusRequest) UnmarshalVT(dAtA []byte) error { +func (m *VReplicationWaitForPosResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10911,10 +15872,10 @@ func (m *ReplicationStatusRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReplicationStatusRequest: wiretype end group for non-group") + return fmt.Errorf("proto: VReplicationWaitForPosResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicationStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VReplicationWaitForPosResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -10939,7 +15900,7 @@ func (m *ReplicationStatusRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ReplicationStatusResponse) UnmarshalVT(dAtA []byte) error { +func (m *InitPrimaryRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10962,17 +15923,17 @@ func (m *ReplicationStatusResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReplicationStatusResponse: wiretype end group for non-group") + return fmt.Errorf("proto: InitPrimaryRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicationStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: InitPrimaryRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SemiSync", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -10982,28 +15943,12 @@ func (m *ReplicationStatusResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Status == nil { - m.Status = &replicationdata.Status{} - } - if err := m.Status.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + m.SemiSync = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -11026,7 +15971,7 @@ func (m *ReplicationStatusResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *PrimaryStatusRequest) UnmarshalVT(dAtA []byte) error { +func (m *InitPrimaryResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11049,12 +15994,44 @@ func (m *PrimaryStatusRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PrimaryStatusRequest: wiretype end group for non-group") + return fmt.Errorf("proto: InitPrimaryResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PrimaryStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: InitPrimaryResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Position = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -11077,7 +16054,7 @@ func (m *PrimaryStatusRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *PrimaryStatusResponse) UnmarshalVT(dAtA []byte) error { +func (m *PopulateReparentJournalRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11100,15 +16077,66 @@ func (m *PrimaryStatusResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PrimaryStatusResponse: wiretype end group for non-group") + return fmt.Errorf("proto: PopulateReparentJournalRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PrimaryStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PopulateReparentJournalRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeCreatedNs", wireType) + } + m.TimeCreatedNs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimeCreatedNs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ActionName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ActionName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrimaryAlias", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11135,13 +16163,45 @@ func (m *PrimaryStatusResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Status == nil { - m.Status = &replicationdata.PrimaryStatus{} + if m.PrimaryAlias == nil { + m.PrimaryAlias = &topodata.TabletAlias{} } - if err := m.Status.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.PrimaryAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReplicationPosition", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReplicationPosition = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -11164,7 +16224,7 @@ func (m *PrimaryStatusResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *PrimaryPositionRequest) UnmarshalVT(dAtA []byte) error { +func (m *PopulateReparentJournalResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11187,10 +16247,10 @@ func (m *PrimaryPositionRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PrimaryPositionRequest: wiretype end group for non-group") + return fmt.Errorf("proto: PopulateReparentJournalResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PrimaryPositionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PopulateReparentJournalResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -11215,7 +16275,7 @@ func (m *PrimaryPositionRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *PrimaryPositionResponse) UnmarshalVT(dAtA []byte) error { +func (m *InitReplicaRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11238,17 +16298,17 @@ func (m *PrimaryPositionResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PrimaryPositionResponse: wiretype end group for non-group") + return fmt.Errorf("proto: InitReplicaRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PrimaryPositionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: InitReplicaRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -11258,78 +16318,31 @@ func (m *PrimaryPositionResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Position = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WaitForPositionRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if m.Parent == nil { + m.Parent = &topodata.TabletAlias{} } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if err := m.Parent.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WaitForPositionRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WaitForPositionRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReplicationPosition", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -11357,8 +16370,47 @@ func (m *WaitForPositionRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Position = string(dAtA[iNdEx:postIndex]) + m.ReplicationPosition = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeCreatedNs", wireType) + } + m.TimeCreatedNs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimeCreatedNs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SemiSync", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SemiSync = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -11381,7 +16433,7 @@ func (m *WaitForPositionRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *WaitForPositionResponse) UnmarshalVT(dAtA []byte) error { +func (m *InitReplicaResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11404,10 +16456,10 @@ func (m *WaitForPositionResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: WaitForPositionResponse: wiretype end group for non-group") + return fmt.Errorf("proto: InitReplicaResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: WaitForPositionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: InitReplicaResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -11432,7 +16484,7 @@ func (m *WaitForPositionResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *StopReplicationRequest) UnmarshalVT(dAtA []byte) error { +func (m *DemotePrimaryRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11455,10 +16507,10 @@ func (m *StopReplicationRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StopReplicationRequest: wiretype end group for non-group") + return fmt.Errorf("proto: DemotePrimaryRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StopReplicationRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DemotePrimaryRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -11483,7 +16535,7 @@ func (m *StopReplicationRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *StopReplicationResponse) UnmarshalVT(dAtA []byte) error { +func (m *DemotePrimaryResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11506,12 +16558,48 @@ func (m *StopReplicationResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StopReplicationResponse: wiretype end group for non-group") + return fmt.Errorf("proto: DemotePrimaryResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StopReplicationResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DemotePrimaryResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrimaryStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PrimaryStatus == nil { + m.PrimaryStatus = &replicationdata.PrimaryStatus{} + } + if err := m.PrimaryStatus.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -11534,7 +16622,7 @@ func (m *StopReplicationResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *StopReplicationMinimumRequest) UnmarshalVT(dAtA []byte) error { +func (m *UndoDemotePrimaryRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11557,17 +16645,17 @@ func (m *StopReplicationMinimumRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StopReplicationMinimumRequest: wiretype end group for non-group") + return fmt.Errorf("proto: UndoDemotePrimaryRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StopReplicationMinimumRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: UndoDemotePrimaryRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SemiSync", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -11577,43 +16665,63 @@ func (m *StopReplicationMinimumRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength + m.SemiSync = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.Position = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field WaitTimeout", wireType) + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UndoDemotePrimaryResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - m.WaitTimeout = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.WaitTimeout |= int64(b&0x7F) << shift - if b < 0x80 { - break - } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UndoDemotePrimaryResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UndoDemotePrimaryResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -11636,7 +16744,7 @@ func (m *StopReplicationMinimumRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *StopReplicationMinimumResponse) UnmarshalVT(dAtA []byte) error { +func (m *ReplicaWasPromotedRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11659,44 +16767,12 @@ func (m *StopReplicationMinimumResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StopReplicationMinimumResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaWasPromotedRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StopReplicationMinimumResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaWasPromotedRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Position = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -11719,7 +16795,7 @@ func (m *StopReplicationMinimumResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *StartReplicationRequest) UnmarshalVT(dAtA []byte) error { +func (m *ReplicaWasPromotedResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11742,32 +16818,12 @@ func (m *StartReplicationRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StartReplicationRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaWasPromotedResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StartReplicationRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaWasPromotedResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SemiSync", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.SemiSync = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -11790,7 +16846,7 @@ func (m *StartReplicationRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *StartReplicationResponse) UnmarshalVT(dAtA []byte) error { +func (m *ResetReplicationParametersRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11813,10 +16869,10 @@ func (m *StartReplicationResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StartReplicationResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ResetReplicationParametersRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StartReplicationResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResetReplicationParametersRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -11841,7 +16897,7 @@ func (m *StartReplicationResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *StartReplicationUntilAfterRequest) UnmarshalVT(dAtA []byte) error { +func (m *ResetReplicationParametersResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11864,63 +16920,12 @@ func (m *StartReplicationUntilAfterRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StartReplicationUntilAfterRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ResetReplicationParametersResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StartReplicationUntilAfterRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResetReplicationParametersResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Position = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field WaitTimeout", wireType) - } - m.WaitTimeout = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.WaitTimeout |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -11943,7 +16948,7 @@ func (m *StartReplicationUntilAfterRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *StartReplicationUntilAfterResponse) UnmarshalVT(dAtA []byte) error { +func (m *FullStatusRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11966,10 +16971,10 @@ func (m *StartReplicationUntilAfterResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StartReplicationUntilAfterResponse: wiretype end group for non-group") + return fmt.Errorf("proto: FullStatusRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StartReplicationUntilAfterResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: FullStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -11994,7 +16999,7 @@ func (m *StartReplicationUntilAfterResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetReplicasRequest) UnmarshalVT(dAtA []byte) error { +func (m *FullStatusResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12017,12 +17022,48 @@ func (m *GetReplicasRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetReplicasRequest: wiretype end group for non-group") + return fmt.Errorf("proto: FullStatusResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetReplicasRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: FullStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &replicationdata.FullStatus{} + } + if err := m.Status.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -12045,7 +17086,7 @@ func (m *GetReplicasRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetReplicasResponse) UnmarshalVT(dAtA []byte) error { +func (m *SetReplicationSourceRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12064,19 +17105,94 @@ func (m *GetReplicasResponse) UnmarshalVT(dAtA []byte) error { if b < 0x80 { break } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetReplicasResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetReplicasResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetReplicationSourceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetReplicationSourceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Parent == nil { + m.Parent = &topodata.TabletAlias{} + } + if err := m.Parent.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeCreatedNs", wireType) + } + m.TimeCreatedNs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimeCreatedNs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ForceStartReplication", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ForceStartReplication = bool(v != 0) + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addrs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field WaitPosition", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -12104,59 +17220,28 @@ func (m *GetReplicasResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Addrs = append(m.Addrs, string(dAtA[iNdEx:postIndex])) + m.WaitPosition = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResetReplicationRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SemiSync", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResetReplicationRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResetReplicationRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + m.SemiSync = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -12179,7 +17264,7 @@ func (m *ResetReplicationRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ResetReplicationResponse) UnmarshalVT(dAtA []byte) error { +func (m *SetReplicationSourceResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12202,10 +17287,10 @@ func (m *ResetReplicationResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResetReplicationResponse: wiretype end group for non-group") + return fmt.Errorf("proto: SetReplicationSourceResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResetReplicationResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SetReplicationSourceResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -12230,7 +17315,7 @@ func (m *ResetReplicationResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *VReplicationExecRequest) UnmarshalVT(dAtA []byte) error { +func (m *ReplicaWasRestartedRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12253,17 +17338,17 @@ func (m *VReplicationExecRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VReplicationExecRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaWasRestartedRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VReplicationExecRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaWasRestartedRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -12273,23 +17358,27 @@ func (m *VReplicationExecRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Query = string(dAtA[iNdEx:postIndex]) + if m.Parent == nil { + m.Parent = &topodata.TabletAlias{} + } + if err := m.Parent.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -12313,7 +17402,7 @@ func (m *VReplicationExecRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *VReplicationExecResponse) UnmarshalVT(dAtA []byte) error { +func (m *ReplicaWasRestartedResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12336,48 +17425,12 @@ func (m *VReplicationExecResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VReplicationExecResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaWasRestartedResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VReplicationExecResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaWasRestartedResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Result == nil { - m.Result = &query.QueryResult{} - } - if err := m.Result.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -12400,7 +17453,7 @@ func (m *VReplicationExecResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *VReplicationWaitForPosRequest) UnmarshalVT(dAtA []byte) error { +func (m *StopReplicationAndGetStatusRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12423,36 +17476,17 @@ func (m *VReplicationWaitForPosRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VReplicationWaitForPosRequest: wiretype end group for non-group") + return fmt.Errorf("proto: StopReplicationAndGetStatusRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VReplicationWaitForPosRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StopReplicationAndGetStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - m.Id = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Id |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StopReplicationMode", wireType) } - var stringLen uint64 + m.StopReplicationMode = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -12462,24 +17496,11 @@ func (m *VReplicationWaitForPosRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.StopReplicationMode |= replicationdata.StopReplicationMode(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Position = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -12502,7 +17523,7 @@ func (m *VReplicationWaitForPosRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *VReplicationWaitForPosResponse) UnmarshalVT(dAtA []byte) error { +func (m *StopReplicationAndGetStatusResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12525,12 +17546,48 @@ func (m *VReplicationWaitForPosResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VReplicationWaitForPosResponse: wiretype end group for non-group") + return fmt.Errorf("proto: StopReplicationAndGetStatusResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VReplicationWaitForPosResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StopReplicationAndGetStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &replicationdata.StopReplicationStatus{} + } + if err := m.Status.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -12553,7 +17610,7 @@ func (m *VReplicationWaitForPosResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *InitPrimaryRequest) UnmarshalVT(dAtA []byte) error { +func (m *PromoteReplicaRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12576,10 +17633,10 @@ func (m *InitPrimaryRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: InitPrimaryRequest: wiretype end group for non-group") + return fmt.Errorf("proto: PromoteReplicaRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: InitPrimaryRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PromoteReplicaRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -12624,7 +17681,7 @@ func (m *InitPrimaryRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *InitPrimaryResponse) UnmarshalVT(dAtA []byte) error { +func (m *PromoteReplicaResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12647,10 +17704,10 @@ func (m *InitPrimaryResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: InitPrimaryResponse: wiretype end group for non-group") + return fmt.Errorf("proto: PromoteReplicaResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: InitPrimaryResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PromoteReplicaResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -12707,7 +17764,7 @@ func (m *InitPrimaryResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *PopulateReparentJournalRequest) UnmarshalVT(dAtA []byte) error { +func (m *BackupRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12730,17 +17787,17 @@ func (m *PopulateReparentJournalRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PopulateReparentJournalRequest: wiretype end group for non-group") + return fmt.Errorf("proto: BackupRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PopulateReparentJournalRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: BackupRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeCreatedNs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Concurrency", wireType) } - m.TimeCreatedNs = 0 + m.Concurrency = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -12750,16 +17807,16 @@ func (m *PopulateReparentJournalRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TimeCreatedNs |= int64(b&0x7F) << shift + m.Concurrency |= int64(b&0x7F) << shift if b < 0x80 { break } } case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ActionName", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowPrimary", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -12769,29 +17826,17 @@ func (m *PopulateReparentJournalRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ActionName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.AllowPrimary = bool(v != 0) case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PrimaryAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IncrementalFromPos", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -12801,33 +17846,29 @@ func (m *PopulateReparentJournalRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.PrimaryAlias == nil { - m.PrimaryAlias = &topodata.TabletAlias{} - } - if err := m.PrimaryAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.IncrementalFromPos = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReplicationPosition", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpgradeSafe", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -12837,24 +17878,12 @@ func (m *PopulateReparentJournalRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ReplicationPosition = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.UpgradeSafe = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -12877,7 +17906,7 @@ func (m *PopulateReparentJournalRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *PopulateReparentJournalResponse) UnmarshalVT(dAtA []byte) error { +func (m *BackupResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12900,12 +17929,48 @@ func (m *PopulateReparentJournalResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PopulateReparentJournalResponse: wiretype end group for non-group") + return fmt.Errorf("proto: BackupResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PopulateReparentJournalResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: BackupResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Event == nil { + m.Event = &logutil.Event{} + } + if err := m.Event.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -12928,7 +17993,7 @@ func (m *PopulateReparentJournalResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *InitReplicaRequest) UnmarshalVT(dAtA []byte) error { +func (m *RestoreFromBackupRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12951,15 +18016,15 @@ func (m *InitReplicaRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: InitReplicaRequest: wiretype end group for non-group") + return fmt.Errorf("proto: RestoreFromBackupRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: InitReplicaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RestoreFromBackupRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field BackupTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12986,16 +18051,16 @@ func (m *InitReplicaRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Parent == nil { - m.Parent = &topodata.TabletAlias{} + if m.BackupTime == nil { + m.BackupTime = &vttime.Time{} } - if err := m.Parent.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.BackupTime.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReplicationPosition", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RestoreToPos", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13023,13 +18088,13 @@ func (m *InitReplicaRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ReplicationPosition = string(dAtA[iNdEx:postIndex]) + m.RestoreToPos = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeCreatedNs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) } - m.TimeCreatedNs = 0 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -13039,16 +18104,17 @@ func (m *InitReplicaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TimeCreatedNs |= int64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } + m.DryRun = bool(v != 0) case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SemiSync", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RestoreToTimestamp", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -13058,114 +18124,28 @@ func (m *InitReplicaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.SemiSync = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if msglen < 0 { return ErrInvalidLength } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *InitReplicaResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: InitReplicaResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: InitReplicaResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLength } - if (iNdEx + skippy) > l { + if postIndex > l { return io.ErrUnexpectedEOF } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DemotePrimaryRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if m.RestoreToTimestamp == nil { + m.RestoreToTimestamp = &vttime.Time{} } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if err := m.RestoreToTimestamp.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DemotePrimaryRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DemotePrimaryRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -13188,7 +18168,7 @@ func (m *DemotePrimaryRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DemotePrimaryResponse) UnmarshalVT(dAtA []byte) error { +func (m *RestoreFromBackupResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13211,15 +18191,15 @@ func (m *DemotePrimaryResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DemotePrimaryResponse: wiretype end group for non-group") + return fmt.Errorf("proto: RestoreFromBackupResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DemotePrimaryResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RestoreFromBackupResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 2: + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PrimaryStatus", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13246,10 +18226,10 @@ func (m *DemotePrimaryResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.PrimaryStatus == nil { - m.PrimaryStatus = &replicationdata.PrimaryStatus{} + if m.Event == nil { + m.Event = &logutil.Event{} } - if err := m.PrimaryStatus.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Event.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -13275,7 +18255,7 @@ func (m *DemotePrimaryResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *UndoDemotePrimaryRequest) UnmarshalVT(dAtA []byte) error { +func (m *CreateVReplicationWorkflowRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13298,17 +18278,17 @@ func (m *UndoDemotePrimaryRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: UndoDemotePrimaryRequest: wiretype end group for non-group") + return fmt.Errorf("proto: CreateVReplicationWorkflowRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: UndoDemotePrimaryRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CreateVReplicationWorkflowRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SemiSync", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -13318,63 +18298,276 @@ func (m *UndoDemotePrimaryRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.SemiSync = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - if (skippy < 0) || (iNdEx+skippy) < 0 { + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLength } - if (iNdEx + skippy) > l { + if postIndex > l { return io.ErrUnexpectedEOF } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UndoDemotePrimaryResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + m.Workflow = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BinlogSource", wireType) } - if iNdEx >= l { + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.BinlogSource = append(m.BinlogSource, &binlogdata.BinlogSource{}) + if err := m.BinlogSource[len(m.BinlogSource)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType == 0 { + var v topodata.TabletType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TabletTypes = append(m.TabletTypes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + if elementCount != 0 && len(m.TabletTypes) == 0 { + m.TabletTypes = make([]topodata.TabletType, 0, elementCount) + } + for iNdEx < postIndex { + var v topodata.TabletType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TabletTypes = append(m.TabletTypes, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field TabletTypes", wireType) + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletSelectionPreference", wireType) + } + m.TabletSelectionPreference = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TabletSelectionPreference |= TabletSelectionPreference(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkflowType", wireType) + } + m.WorkflowType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.WorkflowType |= binlogdata.VReplicationWorkflowType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkflowSubType", wireType) + } + m.WorkflowSubType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.WorkflowSubType |= binlogdata.VReplicationWorkflowSubType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeferSecondaryKeys", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DeferSecondaryKeys = bool(v != 0) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutoStart", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UndoDemotePrimaryResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UndoDemotePrimaryResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + m.AutoStart = bool(v != 0) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StopAfterCopy", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.StopAfterCopy = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -13397,7 +18590,7 @@ func (m *UndoDemotePrimaryResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ReplicaWasPromotedRequest) UnmarshalVT(dAtA []byte) error { +func (m *CreateVReplicationWorkflowResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13420,63 +18613,48 @@ func (m *ReplicaWasPromotedRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReplicaWasPromotedRequest: wiretype end group for non-group") + return fmt.Errorf("proto: CreateVReplicationWorkflowResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaWasPromotedRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CreateVReplicationWorkflowResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + if msglen < 0 { + return ErrInvalidLength } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicaWasPromotedResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if m.Result == nil { + m.Result = &query.QueryResult{} } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicaWasPromotedResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaWasPromotedResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + if err := m.Result.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -13499,7 +18677,7 @@ func (m *ReplicaWasPromotedResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ResetReplicationParametersRequest) UnmarshalVT(dAtA []byte) error { +func (m *DeleteVReplicationWorkflowRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13522,12 +18700,44 @@ func (m *ResetReplicationParametersRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResetReplicationParametersRequest: wiretype end group for non-group") + return fmt.Errorf("proto: DeleteVReplicationWorkflowRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResetReplicationParametersRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeleteVReplicationWorkflowRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Workflow = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -13550,7 +18760,7 @@ func (m *ResetReplicationParametersRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ResetReplicationParametersResponse) UnmarshalVT(dAtA []byte) error { +func (m *DeleteVReplicationWorkflowResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13573,12 +18783,48 @@ func (m *ResetReplicationParametersResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResetReplicationParametersResponse: wiretype end group for non-group") + return fmt.Errorf("proto: DeleteVReplicationWorkflowResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResetReplicationParametersResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeleteVReplicationWorkflowResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &query.QueryResult{} + } + if err := m.Result.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -13601,7 +18847,7 @@ func (m *ResetReplicationParametersResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *FullStatusRequest) UnmarshalVT(dAtA []byte) error { +func (m *ReadVReplicationWorkflowRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13624,12 +18870,44 @@ func (m *FullStatusRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: FullStatusRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ReadVReplicationWorkflowRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: FullStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReadVReplicationWorkflowRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Workflow = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -13652,7 +18930,7 @@ func (m *FullStatusRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *FullStatusResponse) UnmarshalVT(dAtA []byte) error { +func (m *ReadVReplicationWorkflowResponse_Stream) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13675,17 +18953,72 @@ func (m *FullStatusResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: FullStatusResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ReadVReplicationWorkflowResponse_Stream: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: FullStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReadVReplicationWorkflowResponse_Stream: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Bls", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Bls == nil { + m.Bls = &binlogdata.BinlogSource{} + } + if err := m.Bls.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Pos", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -13695,82 +19028,97 @@ func (m *FullStatusResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Status == nil { - m.Status = &replicationdata.FullStatus{} + m.Pos = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StopPos", wireType) } - if err := m.Status.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - if (skippy < 0) || (iNdEx+skippy) < 0 { + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLength } - if (iNdEx + skippy) > l { + if postIndex > l { return io.ErrUnexpectedEOF } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SetReplicationSourceRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + m.StopPos = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxTps", wireType) } - if iNdEx >= l { - return io.ErrUnexpectedEOF + m.MaxTps = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxTps |= int64(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxReplicationLag", wireType) } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SetReplicationSourceRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SetReplicationSourceRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.MaxReplicationLag = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxReplicationLag |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TimeUpdated", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13797,18 +19145,18 @@ func (m *SetReplicationSourceRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Parent == nil { - m.Parent = &topodata.TabletAlias{} + if m.TimeUpdated == nil { + m.TimeUpdated = &vttime.Time{} } - if err := m.Parent.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.TimeUpdated.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeCreatedNs", wireType) + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TransactionTimestamp", wireType) } - m.TimeCreatedNs = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -13818,16 +19166,33 @@ func (m *SetReplicationSourceRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TimeCreatedNs |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 3: + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TransactionTimestamp == nil { + m.TransactionTimestamp = &vttime.Time{} + } + if err := m.TransactionTimestamp.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ForceStartReplication", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) } - var v int + m.State = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -13837,15 +19202,14 @@ func (m *SetReplicationSourceRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + m.State |= binlogdata.VReplicationWorkflowState(b&0x7F) << shift if b < 0x80 { break } } - m.ForceStartReplication = bool(v != 0) - case 4: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WaitPosition", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13873,13 +19237,13 @@ func (m *SetReplicationSourceRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.WaitPosition = string(dAtA[iNdEx:postIndex]) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 11: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SemiSync", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RowsCopied", wireType) } - var v int + m.RowsCopied = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -13889,117 +19253,14 @@ func (m *SetReplicationSourceRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + m.RowsCopied |= int64(b&0x7F) << shift if b < 0x80 { break } } - m.SemiSync = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SetReplicationSourceResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SetReplicationSourceResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SetReplicationSourceResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicaWasRestartedRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicaWasRestartedRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaWasRestartedRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TimeHeartbeat", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14026,120 +19287,54 @@ func (m *ReplicaWasRestartedRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Parent == nil { - m.Parent = &topodata.TabletAlias{} + if m.TimeHeartbeat == nil { + m.TimeHeartbeat = &vttime.Time{} } - if err := m.Parent.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.TimeHeartbeat.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicaWasRestartedResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeThrottled", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicaWasRestartedResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaWasRestartedResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + if msglen < 0 { + return ErrInvalidLength } - if (skippy < 0) || (iNdEx+skippy) < 0 { + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLength } - if (iNdEx + skippy) > l { + if postIndex > l { return io.ErrUnexpectedEOF } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StopReplicationAndGetStatusRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if m.TimeThrottled == nil { + m.TimeThrottled = &vttime.Time{} } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if err := m.TimeThrottled.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StopReplicationAndGetStatusRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StopReplicationAndGetStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StopReplicationMode", wireType) + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ComponentThrottled", wireType) } - m.StopReplicationMode = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -14149,11 +19344,24 @@ func (m *StopReplicationAndGetStatusRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.StopReplicationMode |= replicationdata.StopReplicationMode(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ComponentThrottled = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -14176,7 +19384,7 @@ func (m *StopReplicationAndGetStatusRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *StopReplicationAndGetStatusResponse) UnmarshalVT(dAtA []byte) error { +func (m *ReadVReplicationWorkflowResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14199,17 +19407,17 @@ func (m *StopReplicationAndGetStatusResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StopReplicationAndGetStatusResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ReadVReplicationWorkflowResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StopReplicationAndGetStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReadVReplicationWorkflowResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -14219,84 +19427,130 @@ func (m *StopReplicationAndGetStatusResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Status == nil { - m.Status = &replicationdata.StopReplicationStatus{} - } - if err := m.Status.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Workflow = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PromoteReplicaRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.Cells = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType == 0 { + var v topodata.TabletType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TabletTypes = append(m.TabletTypes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + if elementCount != 0 && len(m.TabletTypes) == 0 { + m.TabletTypes = make([]topodata.TabletType, 0, elementCount) + } + for iNdEx < postIndex { + var v topodata.TabletType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TabletTypes = append(m.TabletTypes, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field TabletTypes", wireType) } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PromoteReplicaRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PromoteReplicaRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 5: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SemiSync", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletSelectionPreference", wireType) } - var v int + m.TabletSelectionPreference = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -14306,66 +19560,46 @@ func (m *PromoteReplicaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + m.TabletSelectionPreference |= TabletSelectionPreference(b&0x7F) << shift if b < 0x80 { break } } - m.SemiSync = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DbName", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PromoteReplicaResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PromoteReplicaResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PromoteReplicaResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.DbName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14393,64 +19627,32 @@ func (m *PromoteReplicaResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Position = string(dAtA[iNdEx:postIndex]) + m.Tags = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BackupRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkflowType", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.WorkflowType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.WorkflowType |= binlogdata.VReplicationWorkflowType(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BackupRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BackupRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 9: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Concurrency", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field WorkflowSubType", wireType) } - m.Concurrency = 0 + m.WorkflowSubType = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -14460,14 +19662,14 @@ func (m *BackupRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Concurrency |= int64(b&0x7F) << shift + m.WorkflowSubType |= binlogdata.VReplicationWorkflowSubType(b&0x7F) << shift if b < 0x80 { break } } - case 2: + case 10: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowPrimary", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DeferSecondaryKeys", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -14484,12 +19686,12 @@ func (m *BackupRequest) UnmarshalVT(dAtA []byte) error { break } } - m.AllowPrimary = bool(v != 0) - case 3: + m.DeferSecondaryKeys = bool(v != 0) + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IncrementalFromPos", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Streams", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -14499,23 +19701,25 @@ func (m *BackupRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.IncrementalFromPos = string(dAtA[iNdEx:postIndex]) + m.Streams = append(m.Streams, &ReadVReplicationWorkflowResponse_Stream{}) + if err := m.Streams[len(m.Streams)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -14539,7 +19743,7 @@ func (m *BackupRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *BackupResponse) UnmarshalVT(dAtA []byte) error { +func (m *VDiffRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14562,17 +19766,17 @@ func (m *BackupResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: BackupResponse: wiretype end group for non-group") + return fmt.Errorf("proto: VDiffRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: BackupResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VDiffRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -14582,84 +19786,29 @@ func (m *BackupResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Event == nil { - m.Event = &logutil.Event{} - } - if err := m.Event.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RestoreFromBackupRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RestoreFromBackupRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RestoreFromBackupRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BackupTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -14669,31 +19818,27 @@ func (m *RestoreFromBackupRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.BackupTime == nil { - m.BackupTime = &vttime.Time{} - } - if err := m.BackupTime.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Workflow = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RestoreToPos", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14721,13 +19866,13 @@ func (m *RestoreFromBackupRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.RestoreToPos = string(dAtA[iNdEx:postIndex]) + m.Action = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActionArg", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -14737,66 +19882,59 @@ func (m *RestoreFromBackupRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.DryRun = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - if (skippy < 0) || (iNdEx+skippy) < 0 { + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLength } - if (iNdEx + skippy) > l { + if postIndex > l { return io.ErrUnexpectedEOF } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RestoreFromBackupResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + m.ActionArg = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VdiffUuid", wireType) } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RestoreFromBackupResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RestoreFromBackupResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VdiffUuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14823,10 +19961,10 @@ func (m *RestoreFromBackupResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Event == nil { - m.Event = &logutil.Event{} + if m.Options == nil { + m.Options = &VDiffOptions{} } - if err := m.Event.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Options.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -14852,7 +19990,7 @@ func (m *RestoreFromBackupResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *VDiffRequest) UnmarshalVT(dAtA []byte) error { +func (m *VDiffResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14875,17 +20013,17 @@ func (m *VDiffRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VDiffRequest: wiretype end group for non-group") + return fmt.Errorf("proto: VDiffResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VDiffRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VDiffResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) } - var stringLen uint64 + m.Id = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -14895,29 +20033,16 @@ func (m *VDiffRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.Id |= int64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Keyspace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Output", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -14927,27 +20052,31 @@ func (m *VDiffRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Workflow = string(dAtA[iNdEx:postIndex]) + if m.Output == nil { + m.Output = &query.QueryResult{} + } + if err := m.Output.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VdiffUuid", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14975,11 +20104,62 @@ func (m *VDiffRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Action = string(dAtA[iNdEx:postIndex]) + m.VdiffUuid = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VDiffPickerOptions) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VDiffPickerOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VDiffPickerOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ActionArg", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletTypes", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -15007,11 +20187,11 @@ func (m *VDiffRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ActionArg = string(dAtA[iNdEx:postIndex]) + m.TabletTypes = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VdiffUuid", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SourceCell", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -15039,13 +20219,13 @@ func (m *VDiffRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.VdiffUuid = string(dAtA[iNdEx:postIndex]) + m.SourceCell = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetCell", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15055,27 +20235,23 @@ func (m *VDiffRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Options == nil { - m.Options = &VDiffOptions{} - } - if err := m.Options.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.TargetCell = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -15099,7 +20275,7 @@ func (m *VDiffRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *VDiffResponse) UnmarshalVT(dAtA []byte) error { +func (m *VDiffReportOptions) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15122,17 +20298,17 @@ func (m *VDiffResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VDiffResponse: wiretype end group for non-group") + return fmt.Errorf("proto: VDiffReportOptions: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VDiffResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VDiffReportOptions: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field OnlyPks", wireType) } - m.Id = 0 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15142,16 +20318,17 @@ func (m *VDiffResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Id |= int64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } + m.OnlyPks = bool(v != 0) case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Output", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DebugQuery", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15161,31 +20338,15 @@ func (m *VDiffResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Output == nil { - m.Output = &query.QueryResult{} - } - if err := m.Output.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + m.DebugQuery = bool(v != 0) case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VdiffUuid", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -15213,7 +20374,7 @@ func (m *VDiffResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.VdiffUuid = string(dAtA[iNdEx:postIndex]) + m.Format = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -15237,7 +20398,7 @@ func (m *VDiffResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *VDiffPickerOptions) UnmarshalVT(dAtA []byte) error { +func (m *VDiffCoreOptions) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15260,15 +20421,15 @@ func (m *VDiffPickerOptions) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VDiffPickerOptions: wiretype end group for non-group") + return fmt.Errorf("proto: VDiffCoreOptions: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VDiffPickerOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VDiffCoreOptions: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletTypes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -15296,13 +20457,13 @@ func (m *VDiffPickerOptions) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TabletTypes = string(dAtA[iNdEx:postIndex]) + m.Tables = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SourceCell", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutoRetry", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15312,29 +20473,56 @@ func (m *VDiffPickerOptions) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength + m.AutoRetry = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxRows", wireType) } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + m.MaxRows = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxRows |= int64(b&0x7F) << shift + if b < 0x80 { + break + } } - if postIndex > l { - return io.ErrUnexpectedEOF + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Checksum", wireType) } - m.SourceCell = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetCell", wireType) + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - var stringLen uint64 + m.Checksum = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SamplePct", wireType) + } + m.SamplePct = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15344,24 +20532,69 @@ func (m *VDiffPickerOptions) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.SamplePct |= int64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + m.TimeoutSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimeoutSeconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } } - if postIndex > l { - return io.ErrUnexpectedEOF + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxExtraRowsToCompare", wireType) } - m.TargetCell = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.MaxExtraRowsToCompare = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxExtraRowsToCompare |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateTableStats", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.UpdateTableStats = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -15384,7 +20617,7 @@ func (m *VDiffPickerOptions) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *VDiffReportOptions) UnmarshalVT(dAtA []byte) error { +func (m *VDiffOptions) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15407,17 +20640,17 @@ func (m *VDiffReportOptions) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VDiffReportOptions: wiretype end group for non-group") + return fmt.Errorf("proto: VDiffOptions: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VDiffReportOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VDiffOptions: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OnlyPks", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PickerOptions", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15427,17 +20660,33 @@ func (m *VDiffReportOptions) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.OnlyPks = bool(v != 0) + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PickerOptions == nil { + m.PickerOptions = &VDiffPickerOptions{} + } + if err := m.PickerOptions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DebugQuery", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CoreOptions", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15447,17 +20696,33 @@ func (m *VDiffReportOptions) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.DebugQuery = bool(v != 0) + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CoreOptions == nil { + m.CoreOptions = &VDiffCoreOptions{} + } + if err := m.CoreOptions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReportOptions", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15467,23 +20732,27 @@ func (m *VDiffReportOptions) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Format = string(dAtA[iNdEx:postIndex]) + if m.ReportOptions == nil { + m.ReportOptions = &VDiffReportOptions{} + } + if err := m.ReportOptions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -15507,7 +20776,7 @@ func (m *VDiffReportOptions) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *VDiffCoreOptions) UnmarshalVT(dAtA []byte) error { +func (m *UpdateVReplicationWorkflowRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15530,15 +20799,15 @@ func (m *VDiffCoreOptions) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VDiffCoreOptions: wiretype end group for non-group") + return fmt.Errorf("proto: UpdateVReplicationWorkflowRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VDiffCoreOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: UpdateVReplicationWorkflowRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -15566,13 +20835,13 @@ func (m *VDiffCoreOptions) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Tables = string(dAtA[iNdEx:postIndex]) + m.Workflow = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AutoRetry", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15582,36 +20851,98 @@ func (m *VDiffCoreOptions) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.AutoRetry = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxRows", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - m.MaxRows = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType == 0 { + var v topodata.TabletType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + m.TabletTypes = append(m.TabletTypes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - m.MaxRows |= int64(b&0x7F) << shift - if b < 0x80 { - break + var elementCount int + if elementCount != 0 && len(m.TabletTypes) == 0 { + m.TabletTypes = make([]topodata.TabletType, 0, elementCount) } + for iNdEx < postIndex { + var v topodata.TabletType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TabletTypes = append(m.TabletTypes, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field TabletTypes", wireType) } case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Checksum", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletSelectionPreference", wireType) } - var v int + m.TabletSelectionPreference = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15621,17 +20952,16 @@ func (m *VDiffCoreOptions) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + m.TabletSelectionPreference |= TabletSelectionPreference(b&0x7F) << shift if b < 0x80 { break } } - m.Checksum = bool(v != 0) case 5: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SamplePct", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field OnDdl", wireType) } - m.SamplePct = 0 + m.OnDdl = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15641,16 +20971,16 @@ func (m *VDiffCoreOptions) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.SamplePct |= int64(b&0x7F) << shift + m.OnDdl |= binlogdata.OnDDLAction(b&0x7F) << shift if b < 0x80 { break } } case 6: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) } - m.TimeoutSeconds = 0 + m.State = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15660,16 +20990,67 @@ func (m *VDiffCoreOptions) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TimeoutSeconds |= int64(b&0x7F) << shift + m.State |= binlogdata.VReplicationWorkflowState(b&0x7F) << shift if b < 0x80 { break } } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxExtraRowsToCompare", wireType) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - m.MaxExtraRowsToCompare = 0 + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateVReplicationWorkflowResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateVReplicationWorkflowResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateVReplicationWorkflowResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15679,16 +21060,84 @@ func (m *VDiffCoreOptions) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxExtraRowsToCompare |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdateTableStats", wireType) + if msglen < 0 { + return ErrInvalidLength } - var v int + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &query.QueryResult{} + } + if err := m.Result.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResetSequencesRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResetSequencesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResetSequencesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15698,12 +21147,24 @@ func (m *VDiffCoreOptions) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.UpdateTableStats = bool(v != 0) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tables = append(m.Tables, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -15726,7 +21187,7 @@ func (m *VDiffCoreOptions) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *VDiffOptions) UnmarshalVT(dAtA []byte) error { +func (m *ResetSequencesResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15749,89 +21210,68 @@ func (m *VDiffOptions) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VDiffOptions: wiretype end group for non-group") + return fmt.Errorf("proto: ResetSequencesResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VDiffOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResetSequencesResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PickerOptions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.PickerOptions == nil { - m.PickerOptions = &VDiffPickerOptions{} - } - if err := m.PickerOptions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CoreOptions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CheckThrottlerRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - if m.CoreOptions == nil { - m.CoreOptions = &VDiffCoreOptions{} - } - if err := m.CoreOptions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - iNdEx = postIndex - case 3: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CheckThrottlerRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CheckThrottlerRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReportOptions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AppName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15841,27 +21281,23 @@ func (m *VDiffOptions) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ReportOptions == nil { - m.ReportOptions = &VDiffReportOptions{} - } - if err := m.ReportOptions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.AppName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -15885,7 +21321,7 @@ func (m *VDiffOptions) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *UpdateVRWorkflowRequest) UnmarshalVT(dAtA []byte) error { +func (m *CheckThrottlerResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15908,17 +21344,17 @@ func (m *UpdateVRWorkflowRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: UpdateVRWorkflowRequest: wiretype end group for non-group") + return fmt.Errorf("proto: CheckThrottlerResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateVRWorkflowRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CheckThrottlerResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StatusCode", wireType) } - var stringLen uint64 + m.StatusCode = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15928,27 +21364,36 @@ func (m *UpdateVRWorkflowRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.StatusCode |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF } - if postIndex > l { + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Threshold", wireType) + } + var v uint64 + if (iNdEx + 8) > l { return io.ErrUnexpectedEOF } - m.Workflow = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Threshold = float64(math.Float64frombits(v)) + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -15976,11 +21421,11 @@ func (m *UpdateVRWorkflowRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletTypes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -16008,83 +21453,13 @@ func (m *UpdateVRWorkflowRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TabletTypes = append(m.TabletTypes, string(dAtA[iNdEx:postIndex])) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 6: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OnDdl", wireType) - } - m.OnDdl = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OnDdl |= binlogdata.OnDDLAction(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength + return fmt.Errorf("proto: wrong wireType = %d for field RecentlyChecked", wireType) } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UpdateVRWorkflowResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UpdateVRWorkflowResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateVRWorkflowResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) - } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -16094,28 +21469,12 @@ func (m *UpdateVRWorkflowResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Result == nil { - m.Result = &query.QueryResult{} - } - if err := m.Result.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + m.RecentlyChecked = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) diff --git a/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go b/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go index 23dab19acd5..608282049ba 100644 --- a/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go +++ b/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go @@ -18,8 +18,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.6.1 +// protoc-gen-go v1.30.0 +// protoc v3.21.3 // source: tabletmanagerservice.proto package tabletmanagerservice @@ -45,7 +45,7 @@ var file_tabletmanagerservice_proto_rawDesc = []byte{ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, 0x17, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x32, 0xb5, 0x27, 0x0a, 0x0d, + 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x32, 0xca, 0x2c, 0x0a, 0x0d, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x49, 0x0a, 0x04, 0x50, 0x69, 0x6e, 0x67, 0x12, 0x1e, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, @@ -124,247 +124,289 @@ var file_tabletmanagerservice_proto_rawDesc = []byte{ 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a, 0x0a, 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x73, 0x12, 0x24, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4c, 0x6f, 0x63, - 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x61, 0x0a, 0x0c, 0x55, 0x6e, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x73, 0x12, 0x26, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x6e, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x6e, - 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x61, 0x0a, 0x0c, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x51, - 0x75, 0x65, 0x72, 0x79, 0x12, 0x26, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, - 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x70, 0x0a, 0x11, 0x45, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x62, 0x61, 0x12, 0x2b, 0x2e, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, - 0x62, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x62, 0x61, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7f, 0x0a, 0x16, 0x45, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x6c, 0x6c, 0x50, 0x72, - 0x69, 0x76, 0x73, 0x12, 0x30, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, - 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x6c, 0x6c, 0x50, 0x72, 0x69, 0x76, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x67, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x53, 0x65, 0x71, + 0x75, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x28, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, + 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x29, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, + 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a, + 0x0a, 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x24, 0x2e, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x61, 0x0a, 0x0c, 0x55, 0x6e, + 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x26, 0x2e, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, + 0x6e, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x6e, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x61, 0x0a, + 0x0c, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x26, 0x2e, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x6c, 0x6c, 0x50, 0x72, 0x69, 0x76, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x70, 0x0a, 0x11, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, 0x70, 0x12, - 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, - 0x41, 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x74, + 0x65, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x70, 0x0a, 0x11, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, + 0x41, 0x73, 0x44, 0x62, 0x61, 0x12, 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x62, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, + 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x62, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x7f, 0x0a, 0x16, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, + 0x63, 0x68, 0x41, 0x73, 0x41, 0x6c, 0x6c, 0x50, 0x72, 0x69, 0x76, 0x73, 0x12, 0x30, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, - 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x70, 0x0a, 0x11, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, + 0x6c, 0x6c, 0x50, 0x72, 0x69, 0x76, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x64, - 0x0a, 0x0d, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x27, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, + 0x73, 0x41, 0x6c, 0x6c, 0x50, 0x72, 0x69, 0x76, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x70, 0x0a, 0x11, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, + 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, 0x70, 0x12, 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x70, 0x0a, 0x11, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2b, 0x2e, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x64, 0x0a, 0x0d, 0x50, 0x72, 0x69, 0x6d, 0x61, + 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x27, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x69, - 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x6a, 0x0a, 0x0f, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x50, - 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x69, 0x6d, - 0x61, 0x72, 0x79, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x50, 0x6f, - 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x6a, 0x0a, 0x0f, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, - 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, - 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6a, 0x0a, 0x0f, - 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x29, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, - 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7f, 0x0a, 0x16, 0x53, 0x74, 0x6f, 0x70, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, - 0x75, 0x6d, 0x12, 0x30, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6d, 0x0a, 0x10, 0x53, 0x74, 0x61, - 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x2e, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, - 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x8b, 0x01, 0x0a, 0x1a, 0x53, 0x74, 0x61, - 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x6e, 0x74, - 0x69, 0x6c, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x34, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, - 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x6e, 0x74, 0x69, - 0x6c, 0x41, 0x66, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, + 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x28, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6a, 0x0a, + 0x0f, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x29, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x50, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6a, 0x0a, 0x0f, 0x57, 0x61, 0x69, + 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x61, 0x69, 0x74, + 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6a, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, + 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x7f, 0x0a, 0x16, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x30, 0x2e, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x41, 0x66, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, + 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x6d, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x8b, 0x01, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x41, 0x66, 0x74, 0x65, 0x72, + 0x12, 0x34, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x41, 0x66, 0x74, 0x65, 0x72, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x6e, 0x74, 0x69, 0x6c, + 0x41, 0x66, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x5e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x25, + 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6d, 0x0a, 0x10, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x12, 0x2a, 0x2e, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7f, 0x0a, 0x16, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x12, - 0x30, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x31, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6d, 0x0a, 0x10, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x56, 0x52, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x2a, 0x2e, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x56, 0x52, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x56, 0x52, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4c, 0x0a, 0x05, 0x56, 0x44, 0x69, 0x66, 0x66, 0x12, 0x1f, + 0x6c, 0x69, 0x63, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x8b, 0x01, 0x0a, 0x1a, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x34, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x20, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x6d, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x65, - 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x8b, 0x01, + 0x0a, 0x1a, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x34, 0x2e, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x56, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x85, 0x01, 0x0a, 0x18, + 0x52, 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x32, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x61, + 0x64, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x52, 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x6d, 0x0a, 0x10, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x12, 0x2a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x7f, 0x0a, 0x16, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x12, 0x30, 0x2e, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x69, + 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, + 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, + 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x8b, 0x01, 0x0a, 0x1a, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x56, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x12, 0x34, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x4c, 0x0a, 0x05, 0x56, 0x44, 0x69, 0x66, 0x66, 0x12, 0x1f, 0x2e, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, + 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x6d, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x5e, 0x0a, 0x0b, 0x49, 0x6e, 0x69, 0x74, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, - 0x79, 0x12, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, - 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, 0x69, - 0x74, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x82, 0x01, 0x0a, 0x17, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x52, - 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x12, 0x31, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5e, + 0x0a, 0x0b, 0x49, 0x6e, 0x69, 0x74, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x25, 0x2e, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x50, 0x72, 0x69, + 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x82, + 0x01, 0x0a, 0x17, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x12, 0x31, 0x2e, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, + 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4a, + 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x5e, 0x0a, 0x0b, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x12, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, + 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x64, 0x0a, 0x0d, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, + 0x6d, 0x61, 0x72, 0x79, 0x12, 0x27, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, + 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x70, 0x0a, 0x11, 0x55, 0x6e, 0x64, + 0x6f, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x32, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5e, 0x0a, 0x0b, 0x49, 0x6e, 0x69, 0x74, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x12, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, + 0x74, 0x61, 0x2e, 0x55, 0x6e, 0x64, 0x6f, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, + 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x55, 0x6e, 0x64, 0x6f, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, + 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x73, 0x0a, 0x12, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, + 0x64, 0x12, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, + 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2d, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x50, 0x72, + 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x8b, 0x01, 0x0a, 0x1a, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, + 0x34, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5b, + 0x0a, 0x0a, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x24, 0x2e, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x79, 0x0a, 0x14, 0x53, + 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x12, 0x2e, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x76, 0x0a, 0x13, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x57, 0x61, 0x73, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x12, 0x2d, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x64, 0x0a, 0x0d, 0x44, 0x65, 0x6d, 0x6f, 0x74, - 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x27, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6d, - 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x28, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, - 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x70, 0x0a, - 0x11, 0x55, 0x6e, 0x64, 0x6f, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, - 0x72, 0x79, 0x12, 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x6e, 0x64, 0x6f, 0x44, 0x65, 0x6d, 0x6f, 0x74, - 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x55, 0x6e, 0x64, 0x6f, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, - 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x73, 0x0a, 0x12, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x50, 0x72, 0x6f, - 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x12, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x57, 0x61, 0x73, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, - 0x61, 0x73, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x8b, 0x01, 0x0a, 0x1a, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, - 0x65, 0x72, 0x73, 0x12, 0x34, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, - 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, - 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x5b, 0x0a, 0x0a, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x24, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x53, + 0x61, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x52, 0x65, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x52, 0x65, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x8e, + 0x01, 0x0a, 0x1b, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x41, 0x6e, 0x64, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x35, + 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x41, 0x6e, 0x64, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6e, 0x64, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x79, 0x0a, 0x14, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x2e, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x76, 0x0a, 0x13, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, - 0x64, 0x12, 0x2d, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, - 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x2e, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x52, - 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x8e, 0x01, 0x0a, 0x1b, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6e, 0x64, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x12, 0x35, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6e, 0x64, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, - 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6e, 0x64, - 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x67, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x12, 0x28, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, - 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x29, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x06, - 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x20, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, - 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x61, 0x63, + 0x67, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x12, 0x28, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x06, 0x42, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x12, 0x20, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x72, 0x0a, 0x11, 0x52, + 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x12, 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, + 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, - 0x72, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x12, 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, - 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, - 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x30, 0x01, 0x42, 0x33, 0x5a, 0x31, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, - 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x67, 0x0a, 0x0e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, + 0x72, 0x12, 0x28, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x68, 0x72, 0x6f, 0x74, + 0x74, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x33, 0x5a, 0x31, 0x76, 0x69, 0x74, 0x65, + 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, + 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var file_tabletmanagerservice_proto_goTypes = []interface{}{ @@ -381,188 +423,208 @@ var file_tabletmanagerservice_proto_goTypes = []interface{}{ (*tabletmanagerdata.ReloadSchemaRequest)(nil), // 10: tabletmanagerdata.ReloadSchemaRequest (*tabletmanagerdata.PreflightSchemaRequest)(nil), // 11: tabletmanagerdata.PreflightSchemaRequest (*tabletmanagerdata.ApplySchemaRequest)(nil), // 12: tabletmanagerdata.ApplySchemaRequest - (*tabletmanagerdata.LockTablesRequest)(nil), // 13: tabletmanagerdata.LockTablesRequest - (*tabletmanagerdata.UnlockTablesRequest)(nil), // 14: tabletmanagerdata.UnlockTablesRequest - (*tabletmanagerdata.ExecuteQueryRequest)(nil), // 15: tabletmanagerdata.ExecuteQueryRequest - (*tabletmanagerdata.ExecuteFetchAsDbaRequest)(nil), // 16: tabletmanagerdata.ExecuteFetchAsDbaRequest - (*tabletmanagerdata.ExecuteFetchAsAllPrivsRequest)(nil), // 17: tabletmanagerdata.ExecuteFetchAsAllPrivsRequest - (*tabletmanagerdata.ExecuteFetchAsAppRequest)(nil), // 18: tabletmanagerdata.ExecuteFetchAsAppRequest - (*tabletmanagerdata.ReplicationStatusRequest)(nil), // 19: tabletmanagerdata.ReplicationStatusRequest - (*tabletmanagerdata.PrimaryStatusRequest)(nil), // 20: tabletmanagerdata.PrimaryStatusRequest - (*tabletmanagerdata.PrimaryPositionRequest)(nil), // 21: tabletmanagerdata.PrimaryPositionRequest - (*tabletmanagerdata.WaitForPositionRequest)(nil), // 22: tabletmanagerdata.WaitForPositionRequest - (*tabletmanagerdata.StopReplicationRequest)(nil), // 23: tabletmanagerdata.StopReplicationRequest - (*tabletmanagerdata.StopReplicationMinimumRequest)(nil), // 24: tabletmanagerdata.StopReplicationMinimumRequest - (*tabletmanagerdata.StartReplicationRequest)(nil), // 25: tabletmanagerdata.StartReplicationRequest - (*tabletmanagerdata.StartReplicationUntilAfterRequest)(nil), // 26: tabletmanagerdata.StartReplicationUntilAfterRequest - (*tabletmanagerdata.GetReplicasRequest)(nil), // 27: tabletmanagerdata.GetReplicasRequest - (*tabletmanagerdata.VReplicationExecRequest)(nil), // 28: tabletmanagerdata.VReplicationExecRequest - (*tabletmanagerdata.VReplicationWaitForPosRequest)(nil), // 29: tabletmanagerdata.VReplicationWaitForPosRequest - (*tabletmanagerdata.UpdateVRWorkflowRequest)(nil), // 30: tabletmanagerdata.UpdateVRWorkflowRequest - (*tabletmanagerdata.VDiffRequest)(nil), // 31: tabletmanagerdata.VDiffRequest - (*tabletmanagerdata.ResetReplicationRequest)(nil), // 32: tabletmanagerdata.ResetReplicationRequest - (*tabletmanagerdata.InitPrimaryRequest)(nil), // 33: tabletmanagerdata.InitPrimaryRequest - (*tabletmanagerdata.PopulateReparentJournalRequest)(nil), // 34: tabletmanagerdata.PopulateReparentJournalRequest - (*tabletmanagerdata.InitReplicaRequest)(nil), // 35: tabletmanagerdata.InitReplicaRequest - (*tabletmanagerdata.DemotePrimaryRequest)(nil), // 36: tabletmanagerdata.DemotePrimaryRequest - (*tabletmanagerdata.UndoDemotePrimaryRequest)(nil), // 37: tabletmanagerdata.UndoDemotePrimaryRequest - (*tabletmanagerdata.ReplicaWasPromotedRequest)(nil), // 38: tabletmanagerdata.ReplicaWasPromotedRequest - (*tabletmanagerdata.ResetReplicationParametersRequest)(nil), // 39: tabletmanagerdata.ResetReplicationParametersRequest - (*tabletmanagerdata.FullStatusRequest)(nil), // 40: tabletmanagerdata.FullStatusRequest - (*tabletmanagerdata.SetReplicationSourceRequest)(nil), // 41: tabletmanagerdata.SetReplicationSourceRequest - (*tabletmanagerdata.ReplicaWasRestartedRequest)(nil), // 42: tabletmanagerdata.ReplicaWasRestartedRequest - (*tabletmanagerdata.StopReplicationAndGetStatusRequest)(nil), // 43: tabletmanagerdata.StopReplicationAndGetStatusRequest - (*tabletmanagerdata.PromoteReplicaRequest)(nil), // 44: tabletmanagerdata.PromoteReplicaRequest - (*tabletmanagerdata.BackupRequest)(nil), // 45: tabletmanagerdata.BackupRequest - (*tabletmanagerdata.RestoreFromBackupRequest)(nil), // 46: tabletmanagerdata.RestoreFromBackupRequest - (*tabletmanagerdata.PingResponse)(nil), // 47: tabletmanagerdata.PingResponse - (*tabletmanagerdata.SleepResponse)(nil), // 48: tabletmanagerdata.SleepResponse - (*tabletmanagerdata.ExecuteHookResponse)(nil), // 49: tabletmanagerdata.ExecuteHookResponse - (*tabletmanagerdata.GetSchemaResponse)(nil), // 50: tabletmanagerdata.GetSchemaResponse - (*tabletmanagerdata.GetPermissionsResponse)(nil), // 51: tabletmanagerdata.GetPermissionsResponse - (*tabletmanagerdata.SetReadOnlyResponse)(nil), // 52: tabletmanagerdata.SetReadOnlyResponse - (*tabletmanagerdata.SetReadWriteResponse)(nil), // 53: tabletmanagerdata.SetReadWriteResponse - (*tabletmanagerdata.ChangeTypeResponse)(nil), // 54: tabletmanagerdata.ChangeTypeResponse - (*tabletmanagerdata.RefreshStateResponse)(nil), // 55: tabletmanagerdata.RefreshStateResponse - (*tabletmanagerdata.RunHealthCheckResponse)(nil), // 56: tabletmanagerdata.RunHealthCheckResponse - (*tabletmanagerdata.ReloadSchemaResponse)(nil), // 57: tabletmanagerdata.ReloadSchemaResponse - (*tabletmanagerdata.PreflightSchemaResponse)(nil), // 58: tabletmanagerdata.PreflightSchemaResponse - (*tabletmanagerdata.ApplySchemaResponse)(nil), // 59: tabletmanagerdata.ApplySchemaResponse - (*tabletmanagerdata.LockTablesResponse)(nil), // 60: tabletmanagerdata.LockTablesResponse - (*tabletmanagerdata.UnlockTablesResponse)(nil), // 61: tabletmanagerdata.UnlockTablesResponse - (*tabletmanagerdata.ExecuteQueryResponse)(nil), // 62: tabletmanagerdata.ExecuteQueryResponse - (*tabletmanagerdata.ExecuteFetchAsDbaResponse)(nil), // 63: tabletmanagerdata.ExecuteFetchAsDbaResponse - (*tabletmanagerdata.ExecuteFetchAsAllPrivsResponse)(nil), // 64: tabletmanagerdata.ExecuteFetchAsAllPrivsResponse - (*tabletmanagerdata.ExecuteFetchAsAppResponse)(nil), // 65: tabletmanagerdata.ExecuteFetchAsAppResponse - (*tabletmanagerdata.ReplicationStatusResponse)(nil), // 66: tabletmanagerdata.ReplicationStatusResponse - (*tabletmanagerdata.PrimaryStatusResponse)(nil), // 67: tabletmanagerdata.PrimaryStatusResponse - (*tabletmanagerdata.PrimaryPositionResponse)(nil), // 68: tabletmanagerdata.PrimaryPositionResponse - (*tabletmanagerdata.WaitForPositionResponse)(nil), // 69: tabletmanagerdata.WaitForPositionResponse - (*tabletmanagerdata.StopReplicationResponse)(nil), // 70: tabletmanagerdata.StopReplicationResponse - (*tabletmanagerdata.StopReplicationMinimumResponse)(nil), // 71: tabletmanagerdata.StopReplicationMinimumResponse - (*tabletmanagerdata.StartReplicationResponse)(nil), // 72: tabletmanagerdata.StartReplicationResponse - (*tabletmanagerdata.StartReplicationUntilAfterResponse)(nil), // 73: tabletmanagerdata.StartReplicationUntilAfterResponse - (*tabletmanagerdata.GetReplicasResponse)(nil), // 74: tabletmanagerdata.GetReplicasResponse - (*tabletmanagerdata.VReplicationExecResponse)(nil), // 75: tabletmanagerdata.VReplicationExecResponse - (*tabletmanagerdata.VReplicationWaitForPosResponse)(nil), // 76: tabletmanagerdata.VReplicationWaitForPosResponse - (*tabletmanagerdata.UpdateVRWorkflowResponse)(nil), // 77: tabletmanagerdata.UpdateVRWorkflowResponse - (*tabletmanagerdata.VDiffResponse)(nil), // 78: tabletmanagerdata.VDiffResponse - (*tabletmanagerdata.ResetReplicationResponse)(nil), // 79: tabletmanagerdata.ResetReplicationResponse - (*tabletmanagerdata.InitPrimaryResponse)(nil), // 80: tabletmanagerdata.InitPrimaryResponse - (*tabletmanagerdata.PopulateReparentJournalResponse)(nil), // 81: tabletmanagerdata.PopulateReparentJournalResponse - (*tabletmanagerdata.InitReplicaResponse)(nil), // 82: tabletmanagerdata.InitReplicaResponse - (*tabletmanagerdata.DemotePrimaryResponse)(nil), // 83: tabletmanagerdata.DemotePrimaryResponse - (*tabletmanagerdata.UndoDemotePrimaryResponse)(nil), // 84: tabletmanagerdata.UndoDemotePrimaryResponse - (*tabletmanagerdata.ReplicaWasPromotedResponse)(nil), // 85: tabletmanagerdata.ReplicaWasPromotedResponse - (*tabletmanagerdata.ResetReplicationParametersResponse)(nil), // 86: tabletmanagerdata.ResetReplicationParametersResponse - (*tabletmanagerdata.FullStatusResponse)(nil), // 87: tabletmanagerdata.FullStatusResponse - (*tabletmanagerdata.SetReplicationSourceResponse)(nil), // 88: tabletmanagerdata.SetReplicationSourceResponse - (*tabletmanagerdata.ReplicaWasRestartedResponse)(nil), // 89: tabletmanagerdata.ReplicaWasRestartedResponse - (*tabletmanagerdata.StopReplicationAndGetStatusResponse)(nil), // 90: tabletmanagerdata.StopReplicationAndGetStatusResponse - (*tabletmanagerdata.PromoteReplicaResponse)(nil), // 91: tabletmanagerdata.PromoteReplicaResponse - (*tabletmanagerdata.BackupResponse)(nil), // 92: tabletmanagerdata.BackupResponse - (*tabletmanagerdata.RestoreFromBackupResponse)(nil), // 93: tabletmanagerdata.RestoreFromBackupResponse + (*tabletmanagerdata.ResetSequencesRequest)(nil), // 13: tabletmanagerdata.ResetSequencesRequest + (*tabletmanagerdata.LockTablesRequest)(nil), // 14: tabletmanagerdata.LockTablesRequest + (*tabletmanagerdata.UnlockTablesRequest)(nil), // 15: tabletmanagerdata.UnlockTablesRequest + (*tabletmanagerdata.ExecuteQueryRequest)(nil), // 16: tabletmanagerdata.ExecuteQueryRequest + (*tabletmanagerdata.ExecuteFetchAsDbaRequest)(nil), // 17: tabletmanagerdata.ExecuteFetchAsDbaRequest + (*tabletmanagerdata.ExecuteFetchAsAllPrivsRequest)(nil), // 18: tabletmanagerdata.ExecuteFetchAsAllPrivsRequest + (*tabletmanagerdata.ExecuteFetchAsAppRequest)(nil), // 19: tabletmanagerdata.ExecuteFetchAsAppRequest + (*tabletmanagerdata.ReplicationStatusRequest)(nil), // 20: tabletmanagerdata.ReplicationStatusRequest + (*tabletmanagerdata.PrimaryStatusRequest)(nil), // 21: tabletmanagerdata.PrimaryStatusRequest + (*tabletmanagerdata.PrimaryPositionRequest)(nil), // 22: tabletmanagerdata.PrimaryPositionRequest + (*tabletmanagerdata.WaitForPositionRequest)(nil), // 23: tabletmanagerdata.WaitForPositionRequest + (*tabletmanagerdata.StopReplicationRequest)(nil), // 24: tabletmanagerdata.StopReplicationRequest + (*tabletmanagerdata.StopReplicationMinimumRequest)(nil), // 25: tabletmanagerdata.StopReplicationMinimumRequest + (*tabletmanagerdata.StartReplicationRequest)(nil), // 26: tabletmanagerdata.StartReplicationRequest + (*tabletmanagerdata.StartReplicationUntilAfterRequest)(nil), // 27: tabletmanagerdata.StartReplicationUntilAfterRequest + (*tabletmanagerdata.GetReplicasRequest)(nil), // 28: tabletmanagerdata.GetReplicasRequest + (*tabletmanagerdata.CreateVReplicationWorkflowRequest)(nil), // 29: tabletmanagerdata.CreateVReplicationWorkflowRequest + (*tabletmanagerdata.DeleteVReplicationWorkflowRequest)(nil), // 30: tabletmanagerdata.DeleteVReplicationWorkflowRequest + (*tabletmanagerdata.ReadVReplicationWorkflowRequest)(nil), // 31: tabletmanagerdata.ReadVReplicationWorkflowRequest + (*tabletmanagerdata.VReplicationExecRequest)(nil), // 32: tabletmanagerdata.VReplicationExecRequest + (*tabletmanagerdata.VReplicationWaitForPosRequest)(nil), // 33: tabletmanagerdata.VReplicationWaitForPosRequest + (*tabletmanagerdata.UpdateVReplicationWorkflowRequest)(nil), // 34: tabletmanagerdata.UpdateVReplicationWorkflowRequest + (*tabletmanagerdata.VDiffRequest)(nil), // 35: tabletmanagerdata.VDiffRequest + (*tabletmanagerdata.ResetReplicationRequest)(nil), // 36: tabletmanagerdata.ResetReplicationRequest + (*tabletmanagerdata.InitPrimaryRequest)(nil), // 37: tabletmanagerdata.InitPrimaryRequest + (*tabletmanagerdata.PopulateReparentJournalRequest)(nil), // 38: tabletmanagerdata.PopulateReparentJournalRequest + (*tabletmanagerdata.InitReplicaRequest)(nil), // 39: tabletmanagerdata.InitReplicaRequest + (*tabletmanagerdata.DemotePrimaryRequest)(nil), // 40: tabletmanagerdata.DemotePrimaryRequest + (*tabletmanagerdata.UndoDemotePrimaryRequest)(nil), // 41: tabletmanagerdata.UndoDemotePrimaryRequest + (*tabletmanagerdata.ReplicaWasPromotedRequest)(nil), // 42: tabletmanagerdata.ReplicaWasPromotedRequest + (*tabletmanagerdata.ResetReplicationParametersRequest)(nil), // 43: tabletmanagerdata.ResetReplicationParametersRequest + (*tabletmanagerdata.FullStatusRequest)(nil), // 44: tabletmanagerdata.FullStatusRequest + (*tabletmanagerdata.SetReplicationSourceRequest)(nil), // 45: tabletmanagerdata.SetReplicationSourceRequest + (*tabletmanagerdata.ReplicaWasRestartedRequest)(nil), // 46: tabletmanagerdata.ReplicaWasRestartedRequest + (*tabletmanagerdata.StopReplicationAndGetStatusRequest)(nil), // 47: tabletmanagerdata.StopReplicationAndGetStatusRequest + (*tabletmanagerdata.PromoteReplicaRequest)(nil), // 48: tabletmanagerdata.PromoteReplicaRequest + (*tabletmanagerdata.BackupRequest)(nil), // 49: tabletmanagerdata.BackupRequest + (*tabletmanagerdata.RestoreFromBackupRequest)(nil), // 50: tabletmanagerdata.RestoreFromBackupRequest + (*tabletmanagerdata.CheckThrottlerRequest)(nil), // 51: tabletmanagerdata.CheckThrottlerRequest + (*tabletmanagerdata.PingResponse)(nil), // 52: tabletmanagerdata.PingResponse + (*tabletmanagerdata.SleepResponse)(nil), // 53: tabletmanagerdata.SleepResponse + (*tabletmanagerdata.ExecuteHookResponse)(nil), // 54: tabletmanagerdata.ExecuteHookResponse + (*tabletmanagerdata.GetSchemaResponse)(nil), // 55: tabletmanagerdata.GetSchemaResponse + (*tabletmanagerdata.GetPermissionsResponse)(nil), // 56: tabletmanagerdata.GetPermissionsResponse + (*tabletmanagerdata.SetReadOnlyResponse)(nil), // 57: tabletmanagerdata.SetReadOnlyResponse + (*tabletmanagerdata.SetReadWriteResponse)(nil), // 58: tabletmanagerdata.SetReadWriteResponse + (*tabletmanagerdata.ChangeTypeResponse)(nil), // 59: tabletmanagerdata.ChangeTypeResponse + (*tabletmanagerdata.RefreshStateResponse)(nil), // 60: tabletmanagerdata.RefreshStateResponse + (*tabletmanagerdata.RunHealthCheckResponse)(nil), // 61: tabletmanagerdata.RunHealthCheckResponse + (*tabletmanagerdata.ReloadSchemaResponse)(nil), // 62: tabletmanagerdata.ReloadSchemaResponse + (*tabletmanagerdata.PreflightSchemaResponse)(nil), // 63: tabletmanagerdata.PreflightSchemaResponse + (*tabletmanagerdata.ApplySchemaResponse)(nil), // 64: tabletmanagerdata.ApplySchemaResponse + (*tabletmanagerdata.ResetSequencesResponse)(nil), // 65: tabletmanagerdata.ResetSequencesResponse + (*tabletmanagerdata.LockTablesResponse)(nil), // 66: tabletmanagerdata.LockTablesResponse + (*tabletmanagerdata.UnlockTablesResponse)(nil), // 67: tabletmanagerdata.UnlockTablesResponse + (*tabletmanagerdata.ExecuteQueryResponse)(nil), // 68: tabletmanagerdata.ExecuteQueryResponse + (*tabletmanagerdata.ExecuteFetchAsDbaResponse)(nil), // 69: tabletmanagerdata.ExecuteFetchAsDbaResponse + (*tabletmanagerdata.ExecuteFetchAsAllPrivsResponse)(nil), // 70: tabletmanagerdata.ExecuteFetchAsAllPrivsResponse + (*tabletmanagerdata.ExecuteFetchAsAppResponse)(nil), // 71: tabletmanagerdata.ExecuteFetchAsAppResponse + (*tabletmanagerdata.ReplicationStatusResponse)(nil), // 72: tabletmanagerdata.ReplicationStatusResponse + (*tabletmanagerdata.PrimaryStatusResponse)(nil), // 73: tabletmanagerdata.PrimaryStatusResponse + (*tabletmanagerdata.PrimaryPositionResponse)(nil), // 74: tabletmanagerdata.PrimaryPositionResponse + (*tabletmanagerdata.WaitForPositionResponse)(nil), // 75: tabletmanagerdata.WaitForPositionResponse + (*tabletmanagerdata.StopReplicationResponse)(nil), // 76: tabletmanagerdata.StopReplicationResponse + (*tabletmanagerdata.StopReplicationMinimumResponse)(nil), // 77: tabletmanagerdata.StopReplicationMinimumResponse + (*tabletmanagerdata.StartReplicationResponse)(nil), // 78: tabletmanagerdata.StartReplicationResponse + (*tabletmanagerdata.StartReplicationUntilAfterResponse)(nil), // 79: tabletmanagerdata.StartReplicationUntilAfterResponse + (*tabletmanagerdata.GetReplicasResponse)(nil), // 80: tabletmanagerdata.GetReplicasResponse + (*tabletmanagerdata.CreateVReplicationWorkflowResponse)(nil), // 81: tabletmanagerdata.CreateVReplicationWorkflowResponse + (*tabletmanagerdata.DeleteVReplicationWorkflowResponse)(nil), // 82: tabletmanagerdata.DeleteVReplicationWorkflowResponse + (*tabletmanagerdata.ReadVReplicationWorkflowResponse)(nil), // 83: tabletmanagerdata.ReadVReplicationWorkflowResponse + (*tabletmanagerdata.VReplicationExecResponse)(nil), // 84: tabletmanagerdata.VReplicationExecResponse + (*tabletmanagerdata.VReplicationWaitForPosResponse)(nil), // 85: tabletmanagerdata.VReplicationWaitForPosResponse + (*tabletmanagerdata.UpdateVReplicationWorkflowResponse)(nil), // 86: tabletmanagerdata.UpdateVReplicationWorkflowResponse + (*tabletmanagerdata.VDiffResponse)(nil), // 87: tabletmanagerdata.VDiffResponse + (*tabletmanagerdata.ResetReplicationResponse)(nil), // 88: tabletmanagerdata.ResetReplicationResponse + (*tabletmanagerdata.InitPrimaryResponse)(nil), // 89: tabletmanagerdata.InitPrimaryResponse + (*tabletmanagerdata.PopulateReparentJournalResponse)(nil), // 90: tabletmanagerdata.PopulateReparentJournalResponse + (*tabletmanagerdata.InitReplicaResponse)(nil), // 91: tabletmanagerdata.InitReplicaResponse + (*tabletmanagerdata.DemotePrimaryResponse)(nil), // 92: tabletmanagerdata.DemotePrimaryResponse + (*tabletmanagerdata.UndoDemotePrimaryResponse)(nil), // 93: tabletmanagerdata.UndoDemotePrimaryResponse + (*tabletmanagerdata.ReplicaWasPromotedResponse)(nil), // 94: tabletmanagerdata.ReplicaWasPromotedResponse + (*tabletmanagerdata.ResetReplicationParametersResponse)(nil), // 95: tabletmanagerdata.ResetReplicationParametersResponse + (*tabletmanagerdata.FullStatusResponse)(nil), // 96: tabletmanagerdata.FullStatusResponse + (*tabletmanagerdata.SetReplicationSourceResponse)(nil), // 97: tabletmanagerdata.SetReplicationSourceResponse + (*tabletmanagerdata.ReplicaWasRestartedResponse)(nil), // 98: tabletmanagerdata.ReplicaWasRestartedResponse + (*tabletmanagerdata.StopReplicationAndGetStatusResponse)(nil), // 99: tabletmanagerdata.StopReplicationAndGetStatusResponse + (*tabletmanagerdata.PromoteReplicaResponse)(nil), // 100: tabletmanagerdata.PromoteReplicaResponse + (*tabletmanagerdata.BackupResponse)(nil), // 101: tabletmanagerdata.BackupResponse + (*tabletmanagerdata.RestoreFromBackupResponse)(nil), // 102: tabletmanagerdata.RestoreFromBackupResponse + (*tabletmanagerdata.CheckThrottlerResponse)(nil), // 103: tabletmanagerdata.CheckThrottlerResponse } var file_tabletmanagerservice_proto_depIdxs = []int32{ - 0, // 0: tabletmanagerservice.TabletManager.Ping:input_type -> tabletmanagerdata.PingRequest - 1, // 1: tabletmanagerservice.TabletManager.Sleep:input_type -> tabletmanagerdata.SleepRequest - 2, // 2: tabletmanagerservice.TabletManager.ExecuteHook:input_type -> tabletmanagerdata.ExecuteHookRequest - 3, // 3: tabletmanagerservice.TabletManager.GetSchema:input_type -> tabletmanagerdata.GetSchemaRequest - 4, // 4: tabletmanagerservice.TabletManager.GetPermissions:input_type -> tabletmanagerdata.GetPermissionsRequest - 5, // 5: tabletmanagerservice.TabletManager.SetReadOnly:input_type -> tabletmanagerdata.SetReadOnlyRequest - 6, // 6: tabletmanagerservice.TabletManager.SetReadWrite:input_type -> tabletmanagerdata.SetReadWriteRequest - 7, // 7: tabletmanagerservice.TabletManager.ChangeType:input_type -> tabletmanagerdata.ChangeTypeRequest - 8, // 8: tabletmanagerservice.TabletManager.RefreshState:input_type -> tabletmanagerdata.RefreshStateRequest - 9, // 9: tabletmanagerservice.TabletManager.RunHealthCheck:input_type -> tabletmanagerdata.RunHealthCheckRequest - 10, // 10: tabletmanagerservice.TabletManager.ReloadSchema:input_type -> tabletmanagerdata.ReloadSchemaRequest - 11, // 11: tabletmanagerservice.TabletManager.PreflightSchema:input_type -> tabletmanagerdata.PreflightSchemaRequest - 12, // 12: tabletmanagerservice.TabletManager.ApplySchema:input_type -> tabletmanagerdata.ApplySchemaRequest - 13, // 13: tabletmanagerservice.TabletManager.LockTables:input_type -> tabletmanagerdata.LockTablesRequest - 14, // 14: tabletmanagerservice.TabletManager.UnlockTables:input_type -> tabletmanagerdata.UnlockTablesRequest - 15, // 15: tabletmanagerservice.TabletManager.ExecuteQuery:input_type -> tabletmanagerdata.ExecuteQueryRequest - 16, // 16: tabletmanagerservice.TabletManager.ExecuteFetchAsDba:input_type -> tabletmanagerdata.ExecuteFetchAsDbaRequest - 17, // 17: tabletmanagerservice.TabletManager.ExecuteFetchAsAllPrivs:input_type -> tabletmanagerdata.ExecuteFetchAsAllPrivsRequest - 18, // 18: tabletmanagerservice.TabletManager.ExecuteFetchAsApp:input_type -> tabletmanagerdata.ExecuteFetchAsAppRequest - 19, // 19: tabletmanagerservice.TabletManager.ReplicationStatus:input_type -> tabletmanagerdata.ReplicationStatusRequest - 20, // 20: tabletmanagerservice.TabletManager.PrimaryStatus:input_type -> tabletmanagerdata.PrimaryStatusRequest - 21, // 21: tabletmanagerservice.TabletManager.PrimaryPosition:input_type -> tabletmanagerdata.PrimaryPositionRequest - 22, // 22: tabletmanagerservice.TabletManager.WaitForPosition:input_type -> tabletmanagerdata.WaitForPositionRequest - 23, // 23: tabletmanagerservice.TabletManager.StopReplication:input_type -> tabletmanagerdata.StopReplicationRequest - 24, // 24: tabletmanagerservice.TabletManager.StopReplicationMinimum:input_type -> tabletmanagerdata.StopReplicationMinimumRequest - 25, // 25: tabletmanagerservice.TabletManager.StartReplication:input_type -> tabletmanagerdata.StartReplicationRequest - 26, // 26: tabletmanagerservice.TabletManager.StartReplicationUntilAfter:input_type -> tabletmanagerdata.StartReplicationUntilAfterRequest - 27, // 27: tabletmanagerservice.TabletManager.GetReplicas:input_type -> tabletmanagerdata.GetReplicasRequest - 28, // 28: tabletmanagerservice.TabletManager.VReplicationExec:input_type -> tabletmanagerdata.VReplicationExecRequest - 29, // 29: tabletmanagerservice.TabletManager.VReplicationWaitForPos:input_type -> tabletmanagerdata.VReplicationWaitForPosRequest - 30, // 30: tabletmanagerservice.TabletManager.UpdateVRWorkflow:input_type -> tabletmanagerdata.UpdateVRWorkflowRequest - 31, // 31: tabletmanagerservice.TabletManager.VDiff:input_type -> tabletmanagerdata.VDiffRequest - 32, // 32: tabletmanagerservice.TabletManager.ResetReplication:input_type -> tabletmanagerdata.ResetReplicationRequest - 33, // 33: tabletmanagerservice.TabletManager.InitPrimary:input_type -> tabletmanagerdata.InitPrimaryRequest - 34, // 34: tabletmanagerservice.TabletManager.PopulateReparentJournal:input_type -> tabletmanagerdata.PopulateReparentJournalRequest - 35, // 35: tabletmanagerservice.TabletManager.InitReplica:input_type -> tabletmanagerdata.InitReplicaRequest - 36, // 36: tabletmanagerservice.TabletManager.DemotePrimary:input_type -> tabletmanagerdata.DemotePrimaryRequest - 37, // 37: tabletmanagerservice.TabletManager.UndoDemotePrimary:input_type -> tabletmanagerdata.UndoDemotePrimaryRequest - 38, // 38: tabletmanagerservice.TabletManager.ReplicaWasPromoted:input_type -> tabletmanagerdata.ReplicaWasPromotedRequest - 39, // 39: tabletmanagerservice.TabletManager.ResetReplicationParameters:input_type -> tabletmanagerdata.ResetReplicationParametersRequest - 40, // 40: tabletmanagerservice.TabletManager.FullStatus:input_type -> tabletmanagerdata.FullStatusRequest - 41, // 41: tabletmanagerservice.TabletManager.SetReplicationSource:input_type -> tabletmanagerdata.SetReplicationSourceRequest - 42, // 42: tabletmanagerservice.TabletManager.ReplicaWasRestarted:input_type -> tabletmanagerdata.ReplicaWasRestartedRequest - 43, // 43: tabletmanagerservice.TabletManager.StopReplicationAndGetStatus:input_type -> tabletmanagerdata.StopReplicationAndGetStatusRequest - 44, // 44: tabletmanagerservice.TabletManager.PromoteReplica:input_type -> tabletmanagerdata.PromoteReplicaRequest - 45, // 45: tabletmanagerservice.TabletManager.Backup:input_type -> tabletmanagerdata.BackupRequest - 46, // 46: tabletmanagerservice.TabletManager.RestoreFromBackup:input_type -> tabletmanagerdata.RestoreFromBackupRequest - 47, // 47: tabletmanagerservice.TabletManager.Ping:output_type -> tabletmanagerdata.PingResponse - 48, // 48: tabletmanagerservice.TabletManager.Sleep:output_type -> tabletmanagerdata.SleepResponse - 49, // 49: tabletmanagerservice.TabletManager.ExecuteHook:output_type -> tabletmanagerdata.ExecuteHookResponse - 50, // 50: tabletmanagerservice.TabletManager.GetSchema:output_type -> tabletmanagerdata.GetSchemaResponse - 51, // 51: tabletmanagerservice.TabletManager.GetPermissions:output_type -> tabletmanagerdata.GetPermissionsResponse - 52, // 52: tabletmanagerservice.TabletManager.SetReadOnly:output_type -> tabletmanagerdata.SetReadOnlyResponse - 53, // 53: tabletmanagerservice.TabletManager.SetReadWrite:output_type -> tabletmanagerdata.SetReadWriteResponse - 54, // 54: tabletmanagerservice.TabletManager.ChangeType:output_type -> tabletmanagerdata.ChangeTypeResponse - 55, // 55: tabletmanagerservice.TabletManager.RefreshState:output_type -> tabletmanagerdata.RefreshStateResponse - 56, // 56: tabletmanagerservice.TabletManager.RunHealthCheck:output_type -> tabletmanagerdata.RunHealthCheckResponse - 57, // 57: tabletmanagerservice.TabletManager.ReloadSchema:output_type -> tabletmanagerdata.ReloadSchemaResponse - 58, // 58: tabletmanagerservice.TabletManager.PreflightSchema:output_type -> tabletmanagerdata.PreflightSchemaResponse - 59, // 59: tabletmanagerservice.TabletManager.ApplySchema:output_type -> tabletmanagerdata.ApplySchemaResponse - 60, // 60: tabletmanagerservice.TabletManager.LockTables:output_type -> tabletmanagerdata.LockTablesResponse - 61, // 61: tabletmanagerservice.TabletManager.UnlockTables:output_type -> tabletmanagerdata.UnlockTablesResponse - 62, // 62: tabletmanagerservice.TabletManager.ExecuteQuery:output_type -> tabletmanagerdata.ExecuteQueryResponse - 63, // 63: tabletmanagerservice.TabletManager.ExecuteFetchAsDba:output_type -> tabletmanagerdata.ExecuteFetchAsDbaResponse - 64, // 64: tabletmanagerservice.TabletManager.ExecuteFetchAsAllPrivs:output_type -> tabletmanagerdata.ExecuteFetchAsAllPrivsResponse - 65, // 65: tabletmanagerservice.TabletManager.ExecuteFetchAsApp:output_type -> tabletmanagerdata.ExecuteFetchAsAppResponse - 66, // 66: tabletmanagerservice.TabletManager.ReplicationStatus:output_type -> tabletmanagerdata.ReplicationStatusResponse - 67, // 67: tabletmanagerservice.TabletManager.PrimaryStatus:output_type -> tabletmanagerdata.PrimaryStatusResponse - 68, // 68: tabletmanagerservice.TabletManager.PrimaryPosition:output_type -> tabletmanagerdata.PrimaryPositionResponse - 69, // 69: tabletmanagerservice.TabletManager.WaitForPosition:output_type -> tabletmanagerdata.WaitForPositionResponse - 70, // 70: tabletmanagerservice.TabletManager.StopReplication:output_type -> tabletmanagerdata.StopReplicationResponse - 71, // 71: tabletmanagerservice.TabletManager.StopReplicationMinimum:output_type -> tabletmanagerdata.StopReplicationMinimumResponse - 72, // 72: tabletmanagerservice.TabletManager.StartReplication:output_type -> tabletmanagerdata.StartReplicationResponse - 73, // 73: tabletmanagerservice.TabletManager.StartReplicationUntilAfter:output_type -> tabletmanagerdata.StartReplicationUntilAfterResponse - 74, // 74: tabletmanagerservice.TabletManager.GetReplicas:output_type -> tabletmanagerdata.GetReplicasResponse - 75, // 75: tabletmanagerservice.TabletManager.VReplicationExec:output_type -> tabletmanagerdata.VReplicationExecResponse - 76, // 76: tabletmanagerservice.TabletManager.VReplicationWaitForPos:output_type -> tabletmanagerdata.VReplicationWaitForPosResponse - 77, // 77: tabletmanagerservice.TabletManager.UpdateVRWorkflow:output_type -> tabletmanagerdata.UpdateVRWorkflowResponse - 78, // 78: tabletmanagerservice.TabletManager.VDiff:output_type -> tabletmanagerdata.VDiffResponse - 79, // 79: tabletmanagerservice.TabletManager.ResetReplication:output_type -> tabletmanagerdata.ResetReplicationResponse - 80, // 80: tabletmanagerservice.TabletManager.InitPrimary:output_type -> tabletmanagerdata.InitPrimaryResponse - 81, // 81: tabletmanagerservice.TabletManager.PopulateReparentJournal:output_type -> tabletmanagerdata.PopulateReparentJournalResponse - 82, // 82: tabletmanagerservice.TabletManager.InitReplica:output_type -> tabletmanagerdata.InitReplicaResponse - 83, // 83: tabletmanagerservice.TabletManager.DemotePrimary:output_type -> tabletmanagerdata.DemotePrimaryResponse - 84, // 84: tabletmanagerservice.TabletManager.UndoDemotePrimary:output_type -> tabletmanagerdata.UndoDemotePrimaryResponse - 85, // 85: tabletmanagerservice.TabletManager.ReplicaWasPromoted:output_type -> tabletmanagerdata.ReplicaWasPromotedResponse - 86, // 86: tabletmanagerservice.TabletManager.ResetReplicationParameters:output_type -> tabletmanagerdata.ResetReplicationParametersResponse - 87, // 87: tabletmanagerservice.TabletManager.FullStatus:output_type -> tabletmanagerdata.FullStatusResponse - 88, // 88: tabletmanagerservice.TabletManager.SetReplicationSource:output_type -> tabletmanagerdata.SetReplicationSourceResponse - 89, // 89: tabletmanagerservice.TabletManager.ReplicaWasRestarted:output_type -> tabletmanagerdata.ReplicaWasRestartedResponse - 90, // 90: tabletmanagerservice.TabletManager.StopReplicationAndGetStatus:output_type -> tabletmanagerdata.StopReplicationAndGetStatusResponse - 91, // 91: tabletmanagerservice.TabletManager.PromoteReplica:output_type -> tabletmanagerdata.PromoteReplicaResponse - 92, // 92: tabletmanagerservice.TabletManager.Backup:output_type -> tabletmanagerdata.BackupResponse - 93, // 93: tabletmanagerservice.TabletManager.RestoreFromBackup:output_type -> tabletmanagerdata.RestoreFromBackupResponse - 47, // [47:94] is the sub-list for method output_type - 0, // [0:47] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name + 0, // 0: tabletmanagerservice.TabletManager.Ping:input_type -> tabletmanagerdata.PingRequest + 1, // 1: tabletmanagerservice.TabletManager.Sleep:input_type -> tabletmanagerdata.SleepRequest + 2, // 2: tabletmanagerservice.TabletManager.ExecuteHook:input_type -> tabletmanagerdata.ExecuteHookRequest + 3, // 3: tabletmanagerservice.TabletManager.GetSchema:input_type -> tabletmanagerdata.GetSchemaRequest + 4, // 4: tabletmanagerservice.TabletManager.GetPermissions:input_type -> tabletmanagerdata.GetPermissionsRequest + 5, // 5: tabletmanagerservice.TabletManager.SetReadOnly:input_type -> tabletmanagerdata.SetReadOnlyRequest + 6, // 6: tabletmanagerservice.TabletManager.SetReadWrite:input_type -> tabletmanagerdata.SetReadWriteRequest + 7, // 7: tabletmanagerservice.TabletManager.ChangeType:input_type -> tabletmanagerdata.ChangeTypeRequest + 8, // 8: tabletmanagerservice.TabletManager.RefreshState:input_type -> tabletmanagerdata.RefreshStateRequest + 9, // 9: tabletmanagerservice.TabletManager.RunHealthCheck:input_type -> tabletmanagerdata.RunHealthCheckRequest + 10, // 10: tabletmanagerservice.TabletManager.ReloadSchema:input_type -> tabletmanagerdata.ReloadSchemaRequest + 11, // 11: tabletmanagerservice.TabletManager.PreflightSchema:input_type -> tabletmanagerdata.PreflightSchemaRequest + 12, // 12: tabletmanagerservice.TabletManager.ApplySchema:input_type -> tabletmanagerdata.ApplySchemaRequest + 13, // 13: tabletmanagerservice.TabletManager.ResetSequences:input_type -> tabletmanagerdata.ResetSequencesRequest + 14, // 14: tabletmanagerservice.TabletManager.LockTables:input_type -> tabletmanagerdata.LockTablesRequest + 15, // 15: tabletmanagerservice.TabletManager.UnlockTables:input_type -> tabletmanagerdata.UnlockTablesRequest + 16, // 16: tabletmanagerservice.TabletManager.ExecuteQuery:input_type -> tabletmanagerdata.ExecuteQueryRequest + 17, // 17: tabletmanagerservice.TabletManager.ExecuteFetchAsDba:input_type -> tabletmanagerdata.ExecuteFetchAsDbaRequest + 18, // 18: tabletmanagerservice.TabletManager.ExecuteFetchAsAllPrivs:input_type -> tabletmanagerdata.ExecuteFetchAsAllPrivsRequest + 19, // 19: tabletmanagerservice.TabletManager.ExecuteFetchAsApp:input_type -> tabletmanagerdata.ExecuteFetchAsAppRequest + 20, // 20: tabletmanagerservice.TabletManager.ReplicationStatus:input_type -> tabletmanagerdata.ReplicationStatusRequest + 21, // 21: tabletmanagerservice.TabletManager.PrimaryStatus:input_type -> tabletmanagerdata.PrimaryStatusRequest + 22, // 22: tabletmanagerservice.TabletManager.PrimaryPosition:input_type -> tabletmanagerdata.PrimaryPositionRequest + 23, // 23: tabletmanagerservice.TabletManager.WaitForPosition:input_type -> tabletmanagerdata.WaitForPositionRequest + 24, // 24: tabletmanagerservice.TabletManager.StopReplication:input_type -> tabletmanagerdata.StopReplicationRequest + 25, // 25: tabletmanagerservice.TabletManager.StopReplicationMinimum:input_type -> tabletmanagerdata.StopReplicationMinimumRequest + 26, // 26: tabletmanagerservice.TabletManager.StartReplication:input_type -> tabletmanagerdata.StartReplicationRequest + 27, // 27: tabletmanagerservice.TabletManager.StartReplicationUntilAfter:input_type -> tabletmanagerdata.StartReplicationUntilAfterRequest + 28, // 28: tabletmanagerservice.TabletManager.GetReplicas:input_type -> tabletmanagerdata.GetReplicasRequest + 29, // 29: tabletmanagerservice.TabletManager.CreateVReplicationWorkflow:input_type -> tabletmanagerdata.CreateVReplicationWorkflowRequest + 30, // 30: tabletmanagerservice.TabletManager.DeleteVReplicationWorkflow:input_type -> tabletmanagerdata.DeleteVReplicationWorkflowRequest + 31, // 31: tabletmanagerservice.TabletManager.ReadVReplicationWorkflow:input_type -> tabletmanagerdata.ReadVReplicationWorkflowRequest + 32, // 32: tabletmanagerservice.TabletManager.VReplicationExec:input_type -> tabletmanagerdata.VReplicationExecRequest + 33, // 33: tabletmanagerservice.TabletManager.VReplicationWaitForPos:input_type -> tabletmanagerdata.VReplicationWaitForPosRequest + 34, // 34: tabletmanagerservice.TabletManager.UpdateVReplicationWorkflow:input_type -> tabletmanagerdata.UpdateVReplicationWorkflowRequest + 35, // 35: tabletmanagerservice.TabletManager.VDiff:input_type -> tabletmanagerdata.VDiffRequest + 36, // 36: tabletmanagerservice.TabletManager.ResetReplication:input_type -> tabletmanagerdata.ResetReplicationRequest + 37, // 37: tabletmanagerservice.TabletManager.InitPrimary:input_type -> tabletmanagerdata.InitPrimaryRequest + 38, // 38: tabletmanagerservice.TabletManager.PopulateReparentJournal:input_type -> tabletmanagerdata.PopulateReparentJournalRequest + 39, // 39: tabletmanagerservice.TabletManager.InitReplica:input_type -> tabletmanagerdata.InitReplicaRequest + 40, // 40: tabletmanagerservice.TabletManager.DemotePrimary:input_type -> tabletmanagerdata.DemotePrimaryRequest + 41, // 41: tabletmanagerservice.TabletManager.UndoDemotePrimary:input_type -> tabletmanagerdata.UndoDemotePrimaryRequest + 42, // 42: tabletmanagerservice.TabletManager.ReplicaWasPromoted:input_type -> tabletmanagerdata.ReplicaWasPromotedRequest + 43, // 43: tabletmanagerservice.TabletManager.ResetReplicationParameters:input_type -> tabletmanagerdata.ResetReplicationParametersRequest + 44, // 44: tabletmanagerservice.TabletManager.FullStatus:input_type -> tabletmanagerdata.FullStatusRequest + 45, // 45: tabletmanagerservice.TabletManager.SetReplicationSource:input_type -> tabletmanagerdata.SetReplicationSourceRequest + 46, // 46: tabletmanagerservice.TabletManager.ReplicaWasRestarted:input_type -> tabletmanagerdata.ReplicaWasRestartedRequest + 47, // 47: tabletmanagerservice.TabletManager.StopReplicationAndGetStatus:input_type -> tabletmanagerdata.StopReplicationAndGetStatusRequest + 48, // 48: tabletmanagerservice.TabletManager.PromoteReplica:input_type -> tabletmanagerdata.PromoteReplicaRequest + 49, // 49: tabletmanagerservice.TabletManager.Backup:input_type -> tabletmanagerdata.BackupRequest + 50, // 50: tabletmanagerservice.TabletManager.RestoreFromBackup:input_type -> tabletmanagerdata.RestoreFromBackupRequest + 51, // 51: tabletmanagerservice.TabletManager.CheckThrottler:input_type -> tabletmanagerdata.CheckThrottlerRequest + 52, // 52: tabletmanagerservice.TabletManager.Ping:output_type -> tabletmanagerdata.PingResponse + 53, // 53: tabletmanagerservice.TabletManager.Sleep:output_type -> tabletmanagerdata.SleepResponse + 54, // 54: tabletmanagerservice.TabletManager.ExecuteHook:output_type -> tabletmanagerdata.ExecuteHookResponse + 55, // 55: tabletmanagerservice.TabletManager.GetSchema:output_type -> tabletmanagerdata.GetSchemaResponse + 56, // 56: tabletmanagerservice.TabletManager.GetPermissions:output_type -> tabletmanagerdata.GetPermissionsResponse + 57, // 57: tabletmanagerservice.TabletManager.SetReadOnly:output_type -> tabletmanagerdata.SetReadOnlyResponse + 58, // 58: tabletmanagerservice.TabletManager.SetReadWrite:output_type -> tabletmanagerdata.SetReadWriteResponse + 59, // 59: tabletmanagerservice.TabletManager.ChangeType:output_type -> tabletmanagerdata.ChangeTypeResponse + 60, // 60: tabletmanagerservice.TabletManager.RefreshState:output_type -> tabletmanagerdata.RefreshStateResponse + 61, // 61: tabletmanagerservice.TabletManager.RunHealthCheck:output_type -> tabletmanagerdata.RunHealthCheckResponse + 62, // 62: tabletmanagerservice.TabletManager.ReloadSchema:output_type -> tabletmanagerdata.ReloadSchemaResponse + 63, // 63: tabletmanagerservice.TabletManager.PreflightSchema:output_type -> tabletmanagerdata.PreflightSchemaResponse + 64, // 64: tabletmanagerservice.TabletManager.ApplySchema:output_type -> tabletmanagerdata.ApplySchemaResponse + 65, // 65: tabletmanagerservice.TabletManager.ResetSequences:output_type -> tabletmanagerdata.ResetSequencesResponse + 66, // 66: tabletmanagerservice.TabletManager.LockTables:output_type -> tabletmanagerdata.LockTablesResponse + 67, // 67: tabletmanagerservice.TabletManager.UnlockTables:output_type -> tabletmanagerdata.UnlockTablesResponse + 68, // 68: tabletmanagerservice.TabletManager.ExecuteQuery:output_type -> tabletmanagerdata.ExecuteQueryResponse + 69, // 69: tabletmanagerservice.TabletManager.ExecuteFetchAsDba:output_type -> tabletmanagerdata.ExecuteFetchAsDbaResponse + 70, // 70: tabletmanagerservice.TabletManager.ExecuteFetchAsAllPrivs:output_type -> tabletmanagerdata.ExecuteFetchAsAllPrivsResponse + 71, // 71: tabletmanagerservice.TabletManager.ExecuteFetchAsApp:output_type -> tabletmanagerdata.ExecuteFetchAsAppResponse + 72, // 72: tabletmanagerservice.TabletManager.ReplicationStatus:output_type -> tabletmanagerdata.ReplicationStatusResponse + 73, // 73: tabletmanagerservice.TabletManager.PrimaryStatus:output_type -> tabletmanagerdata.PrimaryStatusResponse + 74, // 74: tabletmanagerservice.TabletManager.PrimaryPosition:output_type -> tabletmanagerdata.PrimaryPositionResponse + 75, // 75: tabletmanagerservice.TabletManager.WaitForPosition:output_type -> tabletmanagerdata.WaitForPositionResponse + 76, // 76: tabletmanagerservice.TabletManager.StopReplication:output_type -> tabletmanagerdata.StopReplicationResponse + 77, // 77: tabletmanagerservice.TabletManager.StopReplicationMinimum:output_type -> tabletmanagerdata.StopReplicationMinimumResponse + 78, // 78: tabletmanagerservice.TabletManager.StartReplication:output_type -> tabletmanagerdata.StartReplicationResponse + 79, // 79: tabletmanagerservice.TabletManager.StartReplicationUntilAfter:output_type -> tabletmanagerdata.StartReplicationUntilAfterResponse + 80, // 80: tabletmanagerservice.TabletManager.GetReplicas:output_type -> tabletmanagerdata.GetReplicasResponse + 81, // 81: tabletmanagerservice.TabletManager.CreateVReplicationWorkflow:output_type -> tabletmanagerdata.CreateVReplicationWorkflowResponse + 82, // 82: tabletmanagerservice.TabletManager.DeleteVReplicationWorkflow:output_type -> tabletmanagerdata.DeleteVReplicationWorkflowResponse + 83, // 83: tabletmanagerservice.TabletManager.ReadVReplicationWorkflow:output_type -> tabletmanagerdata.ReadVReplicationWorkflowResponse + 84, // 84: tabletmanagerservice.TabletManager.VReplicationExec:output_type -> tabletmanagerdata.VReplicationExecResponse + 85, // 85: tabletmanagerservice.TabletManager.VReplicationWaitForPos:output_type -> tabletmanagerdata.VReplicationWaitForPosResponse + 86, // 86: tabletmanagerservice.TabletManager.UpdateVReplicationWorkflow:output_type -> tabletmanagerdata.UpdateVReplicationWorkflowResponse + 87, // 87: tabletmanagerservice.TabletManager.VDiff:output_type -> tabletmanagerdata.VDiffResponse + 88, // 88: tabletmanagerservice.TabletManager.ResetReplication:output_type -> tabletmanagerdata.ResetReplicationResponse + 89, // 89: tabletmanagerservice.TabletManager.InitPrimary:output_type -> tabletmanagerdata.InitPrimaryResponse + 90, // 90: tabletmanagerservice.TabletManager.PopulateReparentJournal:output_type -> tabletmanagerdata.PopulateReparentJournalResponse + 91, // 91: tabletmanagerservice.TabletManager.InitReplica:output_type -> tabletmanagerdata.InitReplicaResponse + 92, // 92: tabletmanagerservice.TabletManager.DemotePrimary:output_type -> tabletmanagerdata.DemotePrimaryResponse + 93, // 93: tabletmanagerservice.TabletManager.UndoDemotePrimary:output_type -> tabletmanagerdata.UndoDemotePrimaryResponse + 94, // 94: tabletmanagerservice.TabletManager.ReplicaWasPromoted:output_type -> tabletmanagerdata.ReplicaWasPromotedResponse + 95, // 95: tabletmanagerservice.TabletManager.ResetReplicationParameters:output_type -> tabletmanagerdata.ResetReplicationParametersResponse + 96, // 96: tabletmanagerservice.TabletManager.FullStatus:output_type -> tabletmanagerdata.FullStatusResponse + 97, // 97: tabletmanagerservice.TabletManager.SetReplicationSource:output_type -> tabletmanagerdata.SetReplicationSourceResponse + 98, // 98: tabletmanagerservice.TabletManager.ReplicaWasRestarted:output_type -> tabletmanagerdata.ReplicaWasRestartedResponse + 99, // 99: tabletmanagerservice.TabletManager.StopReplicationAndGetStatus:output_type -> tabletmanagerdata.StopReplicationAndGetStatusResponse + 100, // 100: tabletmanagerservice.TabletManager.PromoteReplica:output_type -> tabletmanagerdata.PromoteReplicaResponse + 101, // 101: tabletmanagerservice.TabletManager.Backup:output_type -> tabletmanagerdata.BackupResponse + 102, // 102: tabletmanagerservice.TabletManager.RestoreFromBackup:output_type -> tabletmanagerdata.RestoreFromBackupResponse + 103, // 103: tabletmanagerservice.TabletManager.CheckThrottler:output_type -> tabletmanagerdata.CheckThrottlerResponse + 52, // [52:104] is the sub-list for method output_type + 0, // [0:52] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name } func init() { file_tabletmanagerservice_proto_init() } diff --git a/go/vt/proto/tabletmanagerservice/tabletmanagerservice_grpc.pb.go b/go/vt/proto/tabletmanagerservice/tabletmanagerservice_grpc.pb.go index 40ad0a4459a..f0665947007 100644 --- a/go/vt/proto/tabletmanagerservice/tabletmanagerservice_grpc.pb.go +++ b/go/vt/proto/tabletmanagerservice/tabletmanagerservice_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 -// - protoc v3.6.1 +// - protoc v3.21.3 // source: tabletmanagerservice.proto package tabletmanagerservice @@ -42,6 +42,7 @@ type TabletManagerClient interface { ReloadSchema(ctx context.Context, in *tabletmanagerdata.ReloadSchemaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ReloadSchemaResponse, error) PreflightSchema(ctx context.Context, in *tabletmanagerdata.PreflightSchemaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.PreflightSchemaResponse, error) ApplySchema(ctx context.Context, in *tabletmanagerdata.ApplySchemaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ApplySchemaResponse, error) + ResetSequences(ctx context.Context, in *tabletmanagerdata.ResetSequencesRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ResetSequencesResponse, error) LockTables(ctx context.Context, in *tabletmanagerdata.LockTablesRequest, opts ...grpc.CallOption) (*tabletmanagerdata.LockTablesResponse, error) UnlockTables(ctx context.Context, in *tabletmanagerdata.UnlockTablesRequest, opts ...grpc.CallOption) (*tabletmanagerdata.UnlockTablesResponse, error) ExecuteQuery(ctx context.Context, in *tabletmanagerdata.ExecuteQueryRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ExecuteQueryResponse, error) @@ -69,9 +70,12 @@ type TabletManagerClient interface { // GetReplicas asks for the list of mysql replicas GetReplicas(ctx context.Context, in *tabletmanagerdata.GetReplicasRequest, opts ...grpc.CallOption) (*tabletmanagerdata.GetReplicasResponse, error) // VReplication API + CreateVReplicationWorkflow(ctx context.Context, in *tabletmanagerdata.CreateVReplicationWorkflowRequest, opts ...grpc.CallOption) (*tabletmanagerdata.CreateVReplicationWorkflowResponse, error) + DeleteVReplicationWorkflow(ctx context.Context, in *tabletmanagerdata.DeleteVReplicationWorkflowRequest, opts ...grpc.CallOption) (*tabletmanagerdata.DeleteVReplicationWorkflowResponse, error) + ReadVReplicationWorkflow(ctx context.Context, in *tabletmanagerdata.ReadVReplicationWorkflowRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ReadVReplicationWorkflowResponse, error) VReplicationExec(ctx context.Context, in *tabletmanagerdata.VReplicationExecRequest, opts ...grpc.CallOption) (*tabletmanagerdata.VReplicationExecResponse, error) VReplicationWaitForPos(ctx context.Context, in *tabletmanagerdata.VReplicationWaitForPosRequest, opts ...grpc.CallOption) (*tabletmanagerdata.VReplicationWaitForPosResponse, error) - UpdateVRWorkflow(ctx context.Context, in *tabletmanagerdata.UpdateVRWorkflowRequest, opts ...grpc.CallOption) (*tabletmanagerdata.UpdateVRWorkflowResponse, error) + UpdateVReplicationWorkflow(ctx context.Context, in *tabletmanagerdata.UpdateVReplicationWorkflowRequest, opts ...grpc.CallOption) (*tabletmanagerdata.UpdateVReplicationWorkflowResponse, error) // VDiff API VDiff(ctx context.Context, in *tabletmanagerdata.VDiffRequest, opts ...grpc.CallOption) (*tabletmanagerdata.VDiffResponse, error) // ResetReplication makes the target not replicating @@ -105,6 +109,8 @@ type TabletManagerClient interface { Backup(ctx context.Context, in *tabletmanagerdata.BackupRequest, opts ...grpc.CallOption) (TabletManager_BackupClient, error) // RestoreFromBackup deletes all local data and restores it from the latest backup. RestoreFromBackup(ctx context.Context, in *tabletmanagerdata.RestoreFromBackupRequest, opts ...grpc.CallOption) (TabletManager_RestoreFromBackupClient, error) + // CheckThrottler issues a 'check' on a tablet's throttler + CheckThrottler(ctx context.Context, in *tabletmanagerdata.CheckThrottlerRequest, opts ...grpc.CallOption) (*tabletmanagerdata.CheckThrottlerResponse, error) } type tabletManagerClient struct { @@ -232,6 +238,15 @@ func (c *tabletManagerClient) ApplySchema(ctx context.Context, in *tabletmanager return out, nil } +func (c *tabletManagerClient) ResetSequences(ctx context.Context, in *tabletmanagerdata.ResetSequencesRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ResetSequencesResponse, error) { + out := new(tabletmanagerdata.ResetSequencesResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/ResetSequences", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *tabletManagerClient) LockTables(ctx context.Context, in *tabletmanagerdata.LockTablesRequest, opts ...grpc.CallOption) (*tabletmanagerdata.LockTablesResponse, error) { out := new(tabletmanagerdata.LockTablesResponse) err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/LockTables", in, out, opts...) @@ -367,6 +382,33 @@ func (c *tabletManagerClient) GetReplicas(ctx context.Context, in *tabletmanager return out, nil } +func (c *tabletManagerClient) CreateVReplicationWorkflow(ctx context.Context, in *tabletmanagerdata.CreateVReplicationWorkflowRequest, opts ...grpc.CallOption) (*tabletmanagerdata.CreateVReplicationWorkflowResponse, error) { + out := new(tabletmanagerdata.CreateVReplicationWorkflowResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/CreateVReplicationWorkflow", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) DeleteVReplicationWorkflow(ctx context.Context, in *tabletmanagerdata.DeleteVReplicationWorkflowRequest, opts ...grpc.CallOption) (*tabletmanagerdata.DeleteVReplicationWorkflowResponse, error) { + out := new(tabletmanagerdata.DeleteVReplicationWorkflowResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/DeleteVReplicationWorkflow", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) ReadVReplicationWorkflow(ctx context.Context, in *tabletmanagerdata.ReadVReplicationWorkflowRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ReadVReplicationWorkflowResponse, error) { + out := new(tabletmanagerdata.ReadVReplicationWorkflowResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/ReadVReplicationWorkflow", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *tabletManagerClient) VReplicationExec(ctx context.Context, in *tabletmanagerdata.VReplicationExecRequest, opts ...grpc.CallOption) (*tabletmanagerdata.VReplicationExecResponse, error) { out := new(tabletmanagerdata.VReplicationExecResponse) err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/VReplicationExec", in, out, opts...) @@ -385,9 +427,9 @@ func (c *tabletManagerClient) VReplicationWaitForPos(ctx context.Context, in *ta return out, nil } -func (c *tabletManagerClient) UpdateVRWorkflow(ctx context.Context, in *tabletmanagerdata.UpdateVRWorkflowRequest, opts ...grpc.CallOption) (*tabletmanagerdata.UpdateVRWorkflowResponse, error) { - out := new(tabletmanagerdata.UpdateVRWorkflowResponse) - err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/UpdateVRWorkflow", in, out, opts...) +func (c *tabletManagerClient) UpdateVReplicationWorkflow(ctx context.Context, in *tabletmanagerdata.UpdateVReplicationWorkflowRequest, opts ...grpc.CallOption) (*tabletmanagerdata.UpdateVReplicationWorkflowResponse, error) { + out := new(tabletmanagerdata.UpdateVReplicationWorkflowResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/UpdateVReplicationWorkflow", in, out, opts...) if err != nil { return nil, err } @@ -584,6 +626,15 @@ func (x *tabletManagerRestoreFromBackupClient) Recv() (*tabletmanagerdata.Restor return m, nil } +func (c *tabletManagerClient) CheckThrottler(ctx context.Context, in *tabletmanagerdata.CheckThrottlerRequest, opts ...grpc.CallOption) (*tabletmanagerdata.CheckThrottlerResponse, error) { + out := new(tabletmanagerdata.CheckThrottlerResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/CheckThrottler", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // TabletManagerServer is the server API for TabletManager service. // All implementations must embed UnimplementedTabletManagerServer // for forward compatibility @@ -607,6 +658,7 @@ type TabletManagerServer interface { ReloadSchema(context.Context, *tabletmanagerdata.ReloadSchemaRequest) (*tabletmanagerdata.ReloadSchemaResponse, error) PreflightSchema(context.Context, *tabletmanagerdata.PreflightSchemaRequest) (*tabletmanagerdata.PreflightSchemaResponse, error) ApplySchema(context.Context, *tabletmanagerdata.ApplySchemaRequest) (*tabletmanagerdata.ApplySchemaResponse, error) + ResetSequences(context.Context, *tabletmanagerdata.ResetSequencesRequest) (*tabletmanagerdata.ResetSequencesResponse, error) LockTables(context.Context, *tabletmanagerdata.LockTablesRequest) (*tabletmanagerdata.LockTablesResponse, error) UnlockTables(context.Context, *tabletmanagerdata.UnlockTablesRequest) (*tabletmanagerdata.UnlockTablesResponse, error) ExecuteQuery(context.Context, *tabletmanagerdata.ExecuteQueryRequest) (*tabletmanagerdata.ExecuteQueryResponse, error) @@ -634,9 +686,12 @@ type TabletManagerServer interface { // GetReplicas asks for the list of mysql replicas GetReplicas(context.Context, *tabletmanagerdata.GetReplicasRequest) (*tabletmanagerdata.GetReplicasResponse, error) // VReplication API + CreateVReplicationWorkflow(context.Context, *tabletmanagerdata.CreateVReplicationWorkflowRequest) (*tabletmanagerdata.CreateVReplicationWorkflowResponse, error) + DeleteVReplicationWorkflow(context.Context, *tabletmanagerdata.DeleteVReplicationWorkflowRequest) (*tabletmanagerdata.DeleteVReplicationWorkflowResponse, error) + ReadVReplicationWorkflow(context.Context, *tabletmanagerdata.ReadVReplicationWorkflowRequest) (*tabletmanagerdata.ReadVReplicationWorkflowResponse, error) VReplicationExec(context.Context, *tabletmanagerdata.VReplicationExecRequest) (*tabletmanagerdata.VReplicationExecResponse, error) VReplicationWaitForPos(context.Context, *tabletmanagerdata.VReplicationWaitForPosRequest) (*tabletmanagerdata.VReplicationWaitForPosResponse, error) - UpdateVRWorkflow(context.Context, *tabletmanagerdata.UpdateVRWorkflowRequest) (*tabletmanagerdata.UpdateVRWorkflowResponse, error) + UpdateVReplicationWorkflow(context.Context, *tabletmanagerdata.UpdateVReplicationWorkflowRequest) (*tabletmanagerdata.UpdateVReplicationWorkflowResponse, error) // VDiff API VDiff(context.Context, *tabletmanagerdata.VDiffRequest) (*tabletmanagerdata.VDiffResponse, error) // ResetReplication makes the target not replicating @@ -670,6 +725,8 @@ type TabletManagerServer interface { Backup(*tabletmanagerdata.BackupRequest, TabletManager_BackupServer) error // RestoreFromBackup deletes all local data and restores it from the latest backup. RestoreFromBackup(*tabletmanagerdata.RestoreFromBackupRequest, TabletManager_RestoreFromBackupServer) error + // CheckThrottler issues a 'check' on a tablet's throttler + CheckThrottler(context.Context, *tabletmanagerdata.CheckThrottlerRequest) (*tabletmanagerdata.CheckThrottlerResponse, error) mustEmbedUnimplementedTabletManagerServer() } @@ -716,6 +773,9 @@ func (UnimplementedTabletManagerServer) PreflightSchema(context.Context, *tablet func (UnimplementedTabletManagerServer) ApplySchema(context.Context, *tabletmanagerdata.ApplySchemaRequest) (*tabletmanagerdata.ApplySchemaResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ApplySchema not implemented") } +func (UnimplementedTabletManagerServer) ResetSequences(context.Context, *tabletmanagerdata.ResetSequencesRequest) (*tabletmanagerdata.ResetSequencesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ResetSequences not implemented") +} func (UnimplementedTabletManagerServer) LockTables(context.Context, *tabletmanagerdata.LockTablesRequest) (*tabletmanagerdata.LockTablesResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method LockTables not implemented") } @@ -761,14 +821,23 @@ func (UnimplementedTabletManagerServer) StartReplicationUntilAfter(context.Conte func (UnimplementedTabletManagerServer) GetReplicas(context.Context, *tabletmanagerdata.GetReplicasRequest) (*tabletmanagerdata.GetReplicasResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetReplicas not implemented") } +func (UnimplementedTabletManagerServer) CreateVReplicationWorkflow(context.Context, *tabletmanagerdata.CreateVReplicationWorkflowRequest) (*tabletmanagerdata.CreateVReplicationWorkflowResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateVReplicationWorkflow not implemented") +} +func (UnimplementedTabletManagerServer) DeleteVReplicationWorkflow(context.Context, *tabletmanagerdata.DeleteVReplicationWorkflowRequest) (*tabletmanagerdata.DeleteVReplicationWorkflowResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteVReplicationWorkflow not implemented") +} +func (UnimplementedTabletManagerServer) ReadVReplicationWorkflow(context.Context, *tabletmanagerdata.ReadVReplicationWorkflowRequest) (*tabletmanagerdata.ReadVReplicationWorkflowResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadVReplicationWorkflow not implemented") +} func (UnimplementedTabletManagerServer) VReplicationExec(context.Context, *tabletmanagerdata.VReplicationExecRequest) (*tabletmanagerdata.VReplicationExecResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VReplicationExec not implemented") } func (UnimplementedTabletManagerServer) VReplicationWaitForPos(context.Context, *tabletmanagerdata.VReplicationWaitForPosRequest) (*tabletmanagerdata.VReplicationWaitForPosResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VReplicationWaitForPos not implemented") } -func (UnimplementedTabletManagerServer) UpdateVRWorkflow(context.Context, *tabletmanagerdata.UpdateVRWorkflowRequest) (*tabletmanagerdata.UpdateVRWorkflowResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateVRWorkflow not implemented") +func (UnimplementedTabletManagerServer) UpdateVReplicationWorkflow(context.Context, *tabletmanagerdata.UpdateVReplicationWorkflowRequest) (*tabletmanagerdata.UpdateVReplicationWorkflowResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateVReplicationWorkflow not implemented") } func (UnimplementedTabletManagerServer) VDiff(context.Context, *tabletmanagerdata.VDiffRequest) (*tabletmanagerdata.VDiffResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VDiff not implemented") @@ -818,6 +887,9 @@ func (UnimplementedTabletManagerServer) Backup(*tabletmanagerdata.BackupRequest, func (UnimplementedTabletManagerServer) RestoreFromBackup(*tabletmanagerdata.RestoreFromBackupRequest, TabletManager_RestoreFromBackupServer) error { return status.Errorf(codes.Unimplemented, "method RestoreFromBackup not implemented") } +func (UnimplementedTabletManagerServer) CheckThrottler(context.Context, *tabletmanagerdata.CheckThrottlerRequest) (*tabletmanagerdata.CheckThrottlerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CheckThrottler not implemented") +} func (UnimplementedTabletManagerServer) mustEmbedUnimplementedTabletManagerServer() {} // UnsafeTabletManagerServer may be embedded to opt out of forward compatibility for this service. @@ -1065,6 +1137,24 @@ func _TabletManager_ApplySchema_Handler(srv interface{}, ctx context.Context, de return interceptor(ctx, in, info, handler) } +func _TabletManager_ResetSequences_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.ResetSequencesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).ResetSequences(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/ResetSequences", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).ResetSequences(ctx, req.(*tabletmanagerdata.ResetSequencesRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _TabletManager_LockTables_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(tabletmanagerdata.LockTablesRequest) if err := dec(in); err != nil { @@ -1335,6 +1425,60 @@ func _TabletManager_GetReplicas_Handler(srv interface{}, ctx context.Context, de return interceptor(ctx, in, info, handler) } +func _TabletManager_CreateVReplicationWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.CreateVReplicationWorkflowRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).CreateVReplicationWorkflow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/CreateVReplicationWorkflow", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).CreateVReplicationWorkflow(ctx, req.(*tabletmanagerdata.CreateVReplicationWorkflowRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_DeleteVReplicationWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.DeleteVReplicationWorkflowRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).DeleteVReplicationWorkflow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/DeleteVReplicationWorkflow", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).DeleteVReplicationWorkflow(ctx, req.(*tabletmanagerdata.DeleteVReplicationWorkflowRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_ReadVReplicationWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.ReadVReplicationWorkflowRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).ReadVReplicationWorkflow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/ReadVReplicationWorkflow", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).ReadVReplicationWorkflow(ctx, req.(*tabletmanagerdata.ReadVReplicationWorkflowRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _TabletManager_VReplicationExec_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(tabletmanagerdata.VReplicationExecRequest) if err := dec(in); err != nil { @@ -1371,20 +1515,20 @@ func _TabletManager_VReplicationWaitForPos_Handler(srv interface{}, ctx context. return interceptor(ctx, in, info, handler) } -func _TabletManager_UpdateVRWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(tabletmanagerdata.UpdateVRWorkflowRequest) +func _TabletManager_UpdateVReplicationWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.UpdateVReplicationWorkflowRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(TabletManagerServer).UpdateVRWorkflow(ctx, in) + return srv.(TabletManagerServer).UpdateVReplicationWorkflow(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/tabletmanagerservice.TabletManager/UpdateVRWorkflow", + FullMethod: "/tabletmanagerservice.TabletManager/UpdateVReplicationWorkflow", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TabletManagerServer).UpdateVRWorkflow(ctx, req.(*tabletmanagerdata.UpdateVRWorkflowRequest)) + return srv.(TabletManagerServer).UpdateVReplicationWorkflow(ctx, req.(*tabletmanagerdata.UpdateVReplicationWorkflowRequest)) } return interceptor(ctx, in, info, handler) } @@ -1683,6 +1827,24 @@ func (x *tabletManagerRestoreFromBackupServer) Send(m *tabletmanagerdata.Restore return x.ServerStream.SendMsg(m) } +func _TabletManager_CheckThrottler_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.CheckThrottlerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).CheckThrottler(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/CheckThrottler", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).CheckThrottler(ctx, req.(*tabletmanagerdata.CheckThrottlerRequest)) + } + return interceptor(ctx, in, info, handler) +} + // TabletManager_ServiceDesc is the grpc.ServiceDesc for TabletManager service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -1742,6 +1904,10 @@ var TabletManager_ServiceDesc = grpc.ServiceDesc{ MethodName: "ApplySchema", Handler: _TabletManager_ApplySchema_Handler, }, + { + MethodName: "ResetSequences", + Handler: _TabletManager_ResetSequences_Handler, + }, { MethodName: "LockTables", Handler: _TabletManager_LockTables_Handler, @@ -1802,6 +1968,18 @@ var TabletManager_ServiceDesc = grpc.ServiceDesc{ MethodName: "GetReplicas", Handler: _TabletManager_GetReplicas_Handler, }, + { + MethodName: "CreateVReplicationWorkflow", + Handler: _TabletManager_CreateVReplicationWorkflow_Handler, + }, + { + MethodName: "DeleteVReplicationWorkflow", + Handler: _TabletManager_DeleteVReplicationWorkflow_Handler, + }, + { + MethodName: "ReadVReplicationWorkflow", + Handler: _TabletManager_ReadVReplicationWorkflow_Handler, + }, { MethodName: "VReplicationExec", Handler: _TabletManager_VReplicationExec_Handler, @@ -1811,8 +1989,8 @@ var TabletManager_ServiceDesc = grpc.ServiceDesc{ Handler: _TabletManager_VReplicationWaitForPos_Handler, }, { - MethodName: "UpdateVRWorkflow", - Handler: _TabletManager_UpdateVRWorkflow_Handler, + MethodName: "UpdateVReplicationWorkflow", + Handler: _TabletManager_UpdateVReplicationWorkflow_Handler, }, { MethodName: "VDiff", @@ -1870,6 +2048,10 @@ var TabletManager_ServiceDesc = grpc.ServiceDesc{ MethodName: "PromoteReplica", Handler: _TabletManager_PromoteReplica_Handler, }, + { + MethodName: "CheckThrottler", + Handler: _TabletManager_CheckThrottler_Handler, + }, }, Streams: []grpc.StreamDesc{ { diff --git a/go/vt/proto/throttlerdata/throttlerdata.pb.go b/go/vt/proto/throttlerdata/throttlerdata.pb.go index ef2633abfce..fb12bc09ce8 100644 --- a/go/vt/proto/throttlerdata/throttlerdata.pb.go +++ b/go/vt/proto/throttlerdata/throttlerdata.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.6.1 +// protoc-gen-go v1.30.0 +// protoc v3.21.3 // source: throttlerdata.proto package throttlerdata diff --git a/go/vt/proto/throttlerdata/throttlerdata_vtproto.pb.go b/go/vt/proto/throttlerdata/throttlerdata_vtproto.pb.go index 7a061d1fc38..e032b7db8e8 100644 --- a/go/vt/proto/throttlerdata/throttlerdata_vtproto.pb.go +++ b/go/vt/proto/throttlerdata/throttlerdata_vtproto.pb.go @@ -1,5 +1,5 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 +// protoc-gen-go-vtproto version: v0.5.0 // source: throttlerdata.proto package throttlerdata @@ -7,6 +7,7 @@ package throttlerdata import ( binary "encoding/binary" fmt "fmt" + proto "google.golang.org/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl" io "io" math "math" @@ -20,6 +21,236 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +func (m *MaxRatesRequest) CloneVT() *MaxRatesRequest { + if m == nil { + return (*MaxRatesRequest)(nil) + } + r := &MaxRatesRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *MaxRatesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *MaxRatesResponse) CloneVT() *MaxRatesResponse { + if m == nil { + return (*MaxRatesResponse)(nil) + } + r := &MaxRatesResponse{} + if rhs := m.Rates; rhs != nil { + tmpContainer := make(map[string]int64, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v + } + r.Rates = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *MaxRatesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetMaxRateRequest) CloneVT() *SetMaxRateRequest { + if m == nil { + return (*SetMaxRateRequest)(nil) + } + r := &SetMaxRateRequest{ + Rate: m.Rate, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *SetMaxRateRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetMaxRateResponse) CloneVT() *SetMaxRateResponse { + if m == nil { + return (*SetMaxRateResponse)(nil) + } + r := &SetMaxRateResponse{} + if rhs := m.Names; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Names = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *SetMaxRateResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Configuration) CloneVT() *Configuration { + if m == nil { + return (*Configuration)(nil) + } + r := &Configuration{ + TargetReplicationLagSec: m.TargetReplicationLagSec, + MaxReplicationLagSec: m.MaxReplicationLagSec, + InitialRate: m.InitialRate, + MaxIncrease: m.MaxIncrease, + EmergencyDecrease: m.EmergencyDecrease, + MinDurationBetweenIncreasesSec: m.MinDurationBetweenIncreasesSec, + MaxDurationBetweenIncreasesSec: m.MaxDurationBetweenIncreasesSec, + MinDurationBetweenDecreasesSec: m.MinDurationBetweenDecreasesSec, + SpreadBacklogAcrossSec: m.SpreadBacklogAcrossSec, + IgnoreNSlowestReplicas: m.IgnoreNSlowestReplicas, + IgnoreNSlowestRdonlys: m.IgnoreNSlowestRdonlys, + AgeBadRateAfterSec: m.AgeBadRateAfterSec, + BadRateIncrease: m.BadRateIncrease, + MaxRateApproachThreshold: m.MaxRateApproachThreshold, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Configuration) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetConfigurationRequest) CloneVT() *GetConfigurationRequest { + if m == nil { + return (*GetConfigurationRequest)(nil) + } + r := &GetConfigurationRequest{ + ThrottlerName: m.ThrottlerName, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetConfigurationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetConfigurationResponse) CloneVT() *GetConfigurationResponse { + if m == nil { + return (*GetConfigurationResponse)(nil) + } + r := &GetConfigurationResponse{} + if rhs := m.Configurations; rhs != nil { + tmpContainer := make(map[string]*Configuration, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Configurations = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetConfigurationResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *UpdateConfigurationRequest) CloneVT() *UpdateConfigurationRequest { + if m == nil { + return (*UpdateConfigurationRequest)(nil) + } + r := &UpdateConfigurationRequest{ + ThrottlerName: m.ThrottlerName, + Configuration: m.Configuration.CloneVT(), + CopyZeroValues: m.CopyZeroValues, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *UpdateConfigurationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *UpdateConfigurationResponse) CloneVT() *UpdateConfigurationResponse { + if m == nil { + return (*UpdateConfigurationResponse)(nil) + } + r := &UpdateConfigurationResponse{} + if rhs := m.Names; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Names = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *UpdateConfigurationResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ResetConfigurationRequest) CloneVT() *ResetConfigurationRequest { + if m == nil { + return (*ResetConfigurationRequest)(nil) + } + r := &ResetConfigurationRequest{ + ThrottlerName: m.ThrottlerName, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ResetConfigurationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ResetConfigurationResponse) CloneVT() *ResetConfigurationResponse { + if m == nil { + return (*ResetConfigurationResponse)(nil) + } + r := &ResetConfigurationResponse{} + if rhs := m.Names; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Names = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ResetConfigurationResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *MaxRatesRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil diff --git a/go/vt/proto/throttlerservice/throttlerservice.pb.go b/go/vt/proto/throttlerservice/throttlerservice.pb.go index c907f6d42bc..9bca73e067c 100644 --- a/go/vt/proto/throttlerservice/throttlerservice.pb.go +++ b/go/vt/proto/throttlerservice/throttlerservice.pb.go @@ -18,8 +18,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.6.1 +// protoc-gen-go v1.30.0 +// protoc v3.21.3 // source: throttlerservice.proto package throttlerservice diff --git a/go/vt/proto/throttlerservice/throttlerservice_grpc.pb.go b/go/vt/proto/throttlerservice/throttlerservice_grpc.pb.go index 86bfadf9530..1392d718191 100644 --- a/go/vt/proto/throttlerservice/throttlerservice_grpc.pb.go +++ b/go/vt/proto/throttlerservice/throttlerservice_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 -// - protoc v3.6.1 +// - protoc v3.21.3 // source: throttlerservice.proto package throttlerservice diff --git a/go/vt/proto/topodata/cached_size.go b/go/vt/proto/topodata/cached_size.go index 92da50b703e..d06ebd0d3f0 100644 --- a/go/vt/proto/topodata/cached_size.go +++ b/go/vt/proto/topodata/cached_size.go @@ -41,3 +41,21 @@ func (cached *KeyRange) CachedSize(alloc bool) int64 { } return size } +func (cached *ThrottledAppRule) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(80) + } + // field unknownFields []byte + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownFields))) + } + // field Name string + size += hack.RuntimeAllocSize(int64(len(cached.Name))) + // field ExpiresAt *vitess.io/vitess/go/vt/proto/vttime.Time + size += cached.ExpiresAt.CachedSize(true) + return size +} diff --git a/go/vt/proto/topodata/topodata.pb.go b/go/vt/proto/topodata/topodata.pb.go index a17f0c02218..43ecdbce963 100644 --- a/go/vt/proto/topodata/topodata.pb.go +++ b/go/vt/proto/topodata/topodata.pb.go @@ -20,8 +20,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.6.1 +// protoc-gen-go v1.30.0 +// protoc v3.21.3 // source: topodata.proto package topodata @@ -998,6 +998,83 @@ func (x *ShardTabletControl) GetQueryServiceDisabled() bool { return false } +// ThrottledAppRule defines an app-specific throttling rule, with expiration. +type ThrottledAppRule struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name of the app to be throttled, e.g. "vreplication" or "online-ddl" + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Ratio defines how much the app should be throttled, range [0.0...1.0]. 1.0 means fully throttled. 0.0 means not throttled at all. + // Negative values are reserved for a future implementation. + Ratio float64 `protobuf:"fixed64,2,opt,name=ratio,proto3" json:"ratio,omitempty"` + // ExpiresAt is the time at which the rule expires. + ExpiresAt *vttime.Time `protobuf:"bytes,3,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"` + // Exempt indicates the app should never be throttled, even if the throttler is, in general, throttling other apps. + Exempt bool `protobuf:"varint,4,opt,name=exempt,proto3" json:"exempt,omitempty"` +} + +func (x *ThrottledAppRule) Reset() { + *x = ThrottledAppRule{} + if protoimpl.UnsafeEnabled { + mi := &file_topodata_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ThrottledAppRule) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ThrottledAppRule) ProtoMessage() {} + +func (x *ThrottledAppRule) ProtoReflect() protoreflect.Message { + mi := &file_topodata_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ThrottledAppRule.ProtoReflect.Descriptor instead. +func (*ThrottledAppRule) Descriptor() ([]byte, []int) { + return file_topodata_proto_rawDescGZIP(), []int{9} +} + +func (x *ThrottledAppRule) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ThrottledAppRule) GetRatio() float64 { + if x != nil { + return x.Ratio + } + return 0 +} + +func (x *ThrottledAppRule) GetExpiresAt() *vttime.Time { + if x != nil { + return x.ExpiresAt + } + return nil +} + +func (x *ThrottledAppRule) GetExempt() bool { + if x != nil { + return x.Exempt + } + return false +} + type ThrottlerConfig struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1016,12 +1093,14 @@ type ThrottlerConfig struct { // CheckAsCheckSelf indicates whether a throttler /check request // should behave like a /check-self. CheckAsCheckSelf bool `protobuf:"varint,4,opt,name=check_as_check_self,json=checkAsCheckSelf,proto3" json:"check_as_check_self,omitempty"` + // ThrottledApps is a map of rules for app-specific throttling + ThrottledApps map[string]*ThrottledAppRule `protobuf:"bytes,5,rep,name=throttled_apps,json=throttledApps,proto3" json:"throttled_apps,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *ThrottlerConfig) Reset() { *x = ThrottlerConfig{} if protoimpl.UnsafeEnabled { - mi := &file_topodata_proto_msgTypes[9] + mi := &file_topodata_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1034,7 +1113,7 @@ func (x *ThrottlerConfig) String() string { func (*ThrottlerConfig) ProtoMessage() {} func (x *ThrottlerConfig) ProtoReflect() protoreflect.Message { - mi := &file_topodata_proto_msgTypes[9] + mi := &file_topodata_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1047,7 +1126,7 @@ func (x *ThrottlerConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use ThrottlerConfig.ProtoReflect.Descriptor instead. func (*ThrottlerConfig) Descriptor() ([]byte, []int) { - return file_topodata_proto_rawDescGZIP(), []int{9} + return file_topodata_proto_rawDescGZIP(), []int{10} } func (x *ThrottlerConfig) GetEnabled() bool { @@ -1078,6 +1157,13 @@ func (x *ThrottlerConfig) GetCheckAsCheckSelf() bool { return false } +func (x *ThrottlerConfig) GetThrottledApps() map[string]*ThrottledAppRule { + if x != nil { + return x.ThrottledApps + } + return nil +} + // SrvKeyspace is a rollup node for the keyspace itself. type SrvKeyspace struct { state protoimpl.MessageState @@ -1097,7 +1183,7 @@ type SrvKeyspace struct { func (x *SrvKeyspace) Reset() { *x = SrvKeyspace{} if protoimpl.UnsafeEnabled { - mi := &file_topodata_proto_msgTypes[10] + mi := &file_topodata_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1110,7 +1196,7 @@ func (x *SrvKeyspace) String() string { func (*SrvKeyspace) ProtoMessage() {} func (x *SrvKeyspace) ProtoReflect() protoreflect.Message { - mi := &file_topodata_proto_msgTypes[10] + mi := &file_topodata_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1123,7 +1209,7 @@ func (x *SrvKeyspace) ProtoReflect() protoreflect.Message { // Deprecated: Use SrvKeyspace.ProtoReflect.Descriptor instead. func (*SrvKeyspace) Descriptor() ([]byte, []int) { - return file_topodata_proto_rawDescGZIP(), []int{10} + return file_topodata_proto_rawDescGZIP(), []int{11} } func (x *SrvKeyspace) GetPartitions() []*SrvKeyspace_KeyspacePartition { @@ -1168,7 +1254,7 @@ type CellInfo struct { func (x *CellInfo) Reset() { *x = CellInfo{} if protoimpl.UnsafeEnabled { - mi := &file_topodata_proto_msgTypes[11] + mi := &file_topodata_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1181,7 +1267,7 @@ func (x *CellInfo) String() string { func (*CellInfo) ProtoMessage() {} func (x *CellInfo) ProtoReflect() protoreflect.Message { - mi := &file_topodata_proto_msgTypes[11] + mi := &file_topodata_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1194,7 +1280,7 @@ func (x *CellInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use CellInfo.ProtoReflect.Descriptor instead. func (*CellInfo) Descriptor() ([]byte, []int) { - return file_topodata_proto_rawDescGZIP(), []int{11} + return file_topodata_proto_rawDescGZIP(), []int{12} } func (x *CellInfo) GetServerAddress() string { @@ -1224,7 +1310,7 @@ type CellsAlias struct { func (x *CellsAlias) Reset() { *x = CellsAlias{} if protoimpl.UnsafeEnabled { - mi := &file_topodata_proto_msgTypes[12] + mi := &file_topodata_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1237,7 +1323,7 @@ func (x *CellsAlias) String() string { func (*CellsAlias) ProtoMessage() {} func (x *CellsAlias) ProtoReflect() protoreflect.Message { - mi := &file_topodata_proto_msgTypes[12] + mi := &file_topodata_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1250,7 +1336,7 @@ func (x *CellsAlias) ProtoReflect() protoreflect.Message { // Deprecated: Use CellsAlias.ProtoReflect.Descriptor instead. func (*CellsAlias) Descriptor() ([]byte, []int) { - return file_topodata_proto_rawDescGZIP(), []int{12} + return file_topodata_proto_rawDescGZIP(), []int{13} } func (x *CellsAlias) GetCells() []string { @@ -1273,7 +1359,7 @@ type TopoConfig struct { func (x *TopoConfig) Reset() { *x = TopoConfig{} if protoimpl.UnsafeEnabled { - mi := &file_topodata_proto_msgTypes[13] + mi := &file_topodata_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1286,7 +1372,7 @@ func (x *TopoConfig) String() string { func (*TopoConfig) ProtoMessage() {} func (x *TopoConfig) ProtoReflect() protoreflect.Message { - mi := &file_topodata_proto_msgTypes[13] + mi := &file_topodata_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1299,7 +1385,7 @@ func (x *TopoConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use TopoConfig.ProtoReflect.Descriptor instead. func (*TopoConfig) Descriptor() ([]byte, []int) { - return file_topodata_proto_rawDescGZIP(), []int{13} + return file_topodata_proto_rawDescGZIP(), []int{14} } func (x *TopoConfig) GetTopoType() string { @@ -1334,7 +1420,7 @@ type ExternalVitessCluster struct { func (x *ExternalVitessCluster) Reset() { *x = ExternalVitessCluster{} if protoimpl.UnsafeEnabled { - mi := &file_topodata_proto_msgTypes[14] + mi := &file_topodata_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1347,7 +1433,7 @@ func (x *ExternalVitessCluster) String() string { func (*ExternalVitessCluster) ProtoMessage() {} func (x *ExternalVitessCluster) ProtoReflect() protoreflect.Message { - mi := &file_topodata_proto_msgTypes[14] + mi := &file_topodata_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1360,7 +1446,7 @@ func (x *ExternalVitessCluster) ProtoReflect() protoreflect.Message { // Deprecated: Use ExternalVitessCluster.ProtoReflect.Descriptor instead. func (*ExternalVitessCluster) Descriptor() ([]byte, []int) { - return file_topodata_proto_rawDescGZIP(), []int{14} + return file_topodata_proto_rawDescGZIP(), []int{15} } func (x *ExternalVitessCluster) GetTopoConfig() *TopoConfig { @@ -1382,7 +1468,7 @@ type ExternalClusters struct { func (x *ExternalClusters) Reset() { *x = ExternalClusters{} if protoimpl.UnsafeEnabled { - mi := &file_topodata_proto_msgTypes[15] + mi := &file_topodata_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1395,7 +1481,7 @@ func (x *ExternalClusters) String() string { func (*ExternalClusters) ProtoMessage() {} func (x *ExternalClusters) ProtoReflect() protoreflect.Message { - mi := &file_topodata_proto_msgTypes[15] + mi := &file_topodata_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1408,7 +1494,7 @@ func (x *ExternalClusters) ProtoReflect() protoreflect.Message { // Deprecated: Use ExternalClusters.ProtoReflect.Descriptor instead. func (*ExternalClusters) Descriptor() ([]byte, []int) { - return file_topodata_proto_rawDescGZIP(), []int{15} + return file_topodata_proto_rawDescGZIP(), []int{16} } func (x *ExternalClusters) GetVitessCluster() []*ExternalVitessCluster { @@ -1441,7 +1527,7 @@ type Shard_SourceShard struct { func (x *Shard_SourceShard) Reset() { *x = Shard_SourceShard{} if protoimpl.UnsafeEnabled { - mi := &file_topodata_proto_msgTypes[18] + mi := &file_topodata_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1454,7 +1540,7 @@ func (x *Shard_SourceShard) String() string { func (*Shard_SourceShard) ProtoMessage() {} func (x *Shard_SourceShard) ProtoReflect() protoreflect.Message { - mi := &file_topodata_proto_msgTypes[18] + mi := &file_topodata_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1523,7 +1609,7 @@ type Shard_TabletControl struct { func (x *Shard_TabletControl) Reset() { *x = Shard_TabletControl{} if protoimpl.UnsafeEnabled { - mi := &file_topodata_proto_msgTypes[19] + mi := &file_topodata_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1536,7 +1622,7 @@ func (x *Shard_TabletControl) String() string { func (*Shard_TabletControl) ProtoMessage() {} func (x *Shard_TabletControl) ProtoReflect() protoreflect.Message { - mi := &file_topodata_proto_msgTypes[19] + mi := &file_topodata_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1598,7 +1684,7 @@ type Keyspace_ServedFrom struct { func (x *Keyspace_ServedFrom) Reset() { *x = Keyspace_ServedFrom{} if protoimpl.UnsafeEnabled { - mi := &file_topodata_proto_msgTypes[20] + mi := &file_topodata_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1611,7 +1697,7 @@ func (x *Keyspace_ServedFrom) String() string { func (*Keyspace_ServedFrom) ProtoMessage() {} func (x *Keyspace_ServedFrom) ProtoReflect() protoreflect.Message { - mi := &file_topodata_proto_msgTypes[20] + mi := &file_topodata_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1660,7 +1746,7 @@ type ShardReplication_Node struct { func (x *ShardReplication_Node) Reset() { *x = ShardReplication_Node{} if protoimpl.UnsafeEnabled { - mi := &file_topodata_proto_msgTypes[21] + mi := &file_topodata_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1673,7 +1759,7 @@ func (x *ShardReplication_Node) String() string { func (*ShardReplication_Node) ProtoMessage() {} func (x *ShardReplication_Node) ProtoReflect() protoreflect.Message { - mi := &file_topodata_proto_msgTypes[21] + mi := &file_topodata_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1712,7 +1798,7 @@ type SrvKeyspace_KeyspacePartition struct { func (x *SrvKeyspace_KeyspacePartition) Reset() { *x = SrvKeyspace_KeyspacePartition{} if protoimpl.UnsafeEnabled { - mi := &file_topodata_proto_msgTypes[22] + mi := &file_topodata_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1725,7 +1811,7 @@ func (x *SrvKeyspace_KeyspacePartition) String() string { func (*SrvKeyspace_KeyspacePartition) ProtoMessage() {} func (x *SrvKeyspace_KeyspacePartition) ProtoReflect() protoreflect.Message { - mi := &file_topodata_proto_msgTypes[22] + mi := &file_topodata_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1738,7 +1824,7 @@ func (x *SrvKeyspace_KeyspacePartition) ProtoReflect() protoreflect.Message { // Deprecated: Use SrvKeyspace_KeyspacePartition.ProtoReflect.Descriptor instead. func (*SrvKeyspace_KeyspacePartition) Descriptor() ([]byte, []int) { - return file_topodata_proto_rawDescGZIP(), []int{10, 0} + return file_topodata_proto_rawDescGZIP(), []int{11, 0} } func (x *SrvKeyspace_KeyspacePartition) GetServedType() TabletType { @@ -1778,7 +1864,7 @@ type SrvKeyspace_ServedFrom struct { func (x *SrvKeyspace_ServedFrom) Reset() { *x = SrvKeyspace_ServedFrom{} if protoimpl.UnsafeEnabled { - mi := &file_topodata_proto_msgTypes[23] + mi := &file_topodata_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1791,7 +1877,7 @@ func (x *SrvKeyspace_ServedFrom) String() string { func (*SrvKeyspace_ServedFrom) ProtoMessage() {} func (x *SrvKeyspace_ServedFrom) ProtoReflect() protoreflect.Message { - mi := &file_topodata_proto_msgTypes[23] + mi := &file_topodata_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1804,7 +1890,7 @@ func (x *SrvKeyspace_ServedFrom) ProtoReflect() protoreflect.Message { // Deprecated: Use SrvKeyspace_ServedFrom.ProtoReflect.Descriptor instead. func (*SrvKeyspace_ServedFrom) Descriptor() ([]byte, []int) { - return file_topodata_proto_rawDescGZIP(), []int{10, 1} + return file_topodata_proto_rawDescGZIP(), []int{11, 1} } func (x *SrvKeyspace_ServedFrom) GetTabletType() TabletType { @@ -1989,92 +2075,112 @@ var file_topodata_proto_rawDesc = []byte{ 0x34, 0x0a, 0x16, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x71, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x69, 0x73, - 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x9b, 0x01, 0x0a, 0x0f, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, - 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, - 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x51, - 0x75, 0x65, 0x72, 0x79, 0x12, 0x2d, 0x0a, 0x13, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x61, 0x73, - 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x73, 0x65, 0x6c, 0x66, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x73, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, - 0x65, 0x6c, 0x66, 0x22, 0xb6, 0x04, 0x0a, 0x0b, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x0a, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x0b, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x18, 0x04, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x72, 0x76, - 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, - 0x72, 0x6f, 0x6d, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x12, - 0x44, 0x0a, 0x10, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x74, 0x6f, 0x70, 0x6f, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xe1, 0x01, 0x0a, 0x11, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x35, 0x0a, 0x0b, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x43, 0x0a, 0x10, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x72, 0x65, 0x66, 0x65, - 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, - 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x66, - 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x66, - 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x50, 0x0a, 0x15, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x73, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, - 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x13, 0x73, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x73, 0x1a, 0x5f, 0x0a, 0x0a, 0x53, 0x65, 0x72, - 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, - 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, - 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, - 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x4b, 0x0a, 0x08, - 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, - 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, - 0x6f, 0x6f, 0x74, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, 0x22, 0x0a, 0x0a, 0x43, 0x65, 0x6c, - 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x55, 0x0a, - 0x0a, 0x54, 0x6f, 0x70, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1b, 0x0a, 0x09, 0x74, - 0x6f, 0x70, 0x6f, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x74, 0x6f, 0x70, 0x6f, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x72, 0x6f, 0x6f, 0x74, 0x22, 0x4e, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x56, 0x69, 0x74, 0x65, 0x73, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x35, 0x0a, - 0x0b, 0x74, 0x6f, 0x70, 0x6f, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x6f, - 0x70, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x74, 0x6f, 0x70, 0x6f, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x22, 0x5a, 0x0a, 0x10, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x46, 0x0a, 0x0e, 0x76, 0x69, 0x74, 0x65, - 0x73, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x1f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x56, 0x69, 0x74, 0x65, 0x73, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x52, 0x0d, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x2a, 0x28, 0x0a, 0x0c, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, - 0x53, 0x4e, 0x41, 0x50, 0x53, 0x48, 0x4f, 0x54, 0x10, 0x01, 0x2a, 0x9d, 0x01, 0x0a, 0x0a, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, - 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x52, 0x49, 0x4d, 0x41, 0x52, - 0x59, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x4d, 0x41, 0x53, 0x54, 0x45, 0x52, 0x10, 0x01, 0x12, - 0x0b, 0x0a, 0x07, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, - 0x52, 0x44, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x41, 0x54, 0x43, - 0x48, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x41, 0x52, 0x45, 0x10, 0x04, 0x12, 0x10, - 0x0a, 0x0c, 0x45, 0x58, 0x50, 0x45, 0x52, 0x49, 0x4d, 0x45, 0x4e, 0x54, 0x41, 0x4c, 0x10, 0x05, - 0x12, 0x0a, 0x0a, 0x06, 0x42, 0x41, 0x43, 0x4b, 0x55, 0x50, 0x10, 0x06, 0x12, 0x0b, 0x0a, 0x07, - 0x52, 0x45, 0x53, 0x54, 0x4f, 0x52, 0x45, 0x10, 0x07, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x52, 0x41, - 0x49, 0x4e, 0x45, 0x44, 0x10, 0x08, 0x1a, 0x02, 0x10, 0x01, 0x42, 0x38, 0x0a, 0x0f, 0x69, 0x6f, - 0x2e, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x25, 0x76, - 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, - 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x6f, 0x70, 0x6f, - 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x81, 0x01, 0x0a, 0x10, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, + 0x6c, 0x65, 0x64, 0x41, 0x70, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x12, 0x2b, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x5f, + 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, + 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x09, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, + 0x74, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x06, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x74, 0x22, 0xce, 0x02, 0x0a, 0x0f, 0x54, 0x68, + 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, + 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, + 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, + 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x74, 0x68, 0x72, 0x65, + 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x2d, 0x0a, 0x13, 0x63, 0x68, 0x65, 0x63, + 0x6b, 0x5f, 0x61, 0x73, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x73, 0x65, 0x6c, 0x66, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x73, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x53, 0x65, 0x6c, 0x66, 0x12, 0x53, 0x0a, 0x0e, 0x74, 0x68, 0x72, 0x6f, 0x74, + 0x74, 0x6c, 0x65, 0x64, 0x5f, 0x61, 0x70, 0x70, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x2c, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x68, 0x72, 0x6f, 0x74, + 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x68, 0x72, 0x6f, 0x74, + 0x74, 0x6c, 0x65, 0x64, 0x41, 0x70, 0x70, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x74, + 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x41, 0x70, 0x70, 0x73, 0x1a, 0x5c, 0x0a, 0x12, + 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x41, 0x70, 0x70, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x30, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, + 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x41, 0x70, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb6, 0x04, 0x0a, 0x0b, 0x53, + 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x70, 0x61, + 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, + 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x50, 0x61, + 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x66, 0x72, + 0x6f, 0x6d, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x12, 0x44, 0x0a, 0x10, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, + 0x6c, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x68, 0x72, 0x6f, + 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x74, 0x68, 0x72, + 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xe1, 0x01, 0x0a, + 0x11, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x43, 0x0a, 0x10, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0f, 0x73, + 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x50, + 0x0a, 0x15, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x13, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x73, + 0x1a, 0x5f, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x12, 0x35, + 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, + 0x05, 0x10, 0x06, 0x22, 0x4b, 0x0a, 0x08, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, + 0x25, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, + 0x22, 0x22, 0x0a, 0x0a, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x14, + 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, + 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x55, 0x0a, 0x0a, 0x54, 0x6f, 0x70, 0x6f, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, 0x70, 0x6f, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x6f, 0x70, 0x6f, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x22, 0x4e, 0x0a, 0x15, 0x45, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x56, 0x69, 0x74, 0x65, 0x73, 0x73, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x6f, 0x70, 0x6f, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x6f, 0x70, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x0a, 0x74, 0x6f, 0x70, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x5a, 0x0a, 0x10, 0x45, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, + 0x46, 0x0a, 0x0e, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x56, 0x69, 0x74, 0x65, 0x73, + 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x0d, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2a, 0x28, 0x0a, 0x0c, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x4f, 0x52, 0x4d, 0x41, + 0x4c, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x4e, 0x41, 0x50, 0x53, 0x48, 0x4f, 0x54, 0x10, + 0x01, 0x2a, 0x9d, 0x01, 0x0a, 0x0a, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, 0x0a, + 0x07, 0x50, 0x52, 0x49, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x4d, 0x41, + 0x53, 0x54, 0x45, 0x52, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, + 0x41, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x52, 0x44, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x03, 0x12, + 0x09, 0x0a, 0x05, 0x42, 0x41, 0x54, 0x43, 0x48, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, + 0x41, 0x52, 0x45, 0x10, 0x04, 0x12, 0x10, 0x0a, 0x0c, 0x45, 0x58, 0x50, 0x45, 0x52, 0x49, 0x4d, + 0x45, 0x4e, 0x54, 0x41, 0x4c, 0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x42, 0x41, 0x43, 0x4b, 0x55, + 0x50, 0x10, 0x06, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x45, 0x53, 0x54, 0x4f, 0x52, 0x45, 0x10, 0x07, + 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x52, 0x41, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x08, 0x1a, 0x02, 0x10, + 0x01, 0x42, 0x38, 0x0a, 0x0f, 0x69, 0x6f, 0x2e, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x25, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, + 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( @@ -2090,7 +2196,7 @@ func file_topodata_proto_rawDescGZIP() []byte { } var file_topodata_proto_enumTypes = make([]protoimpl.EnumInfo, 3) -var file_topodata_proto_msgTypes = make([]protoimpl.MessageInfo, 24) +var file_topodata_proto_msgTypes = make([]protoimpl.MessageInfo, 26) var file_topodata_proto_goTypes = []interface{}{ (KeyspaceType)(0), // 0: topodata.KeyspaceType (TabletType)(0), // 1: topodata.TabletType @@ -2104,62 +2210,67 @@ var file_topodata_proto_goTypes = []interface{}{ (*ShardReplicationError)(nil), // 9: topodata.ShardReplicationError (*ShardReference)(nil), // 10: topodata.ShardReference (*ShardTabletControl)(nil), // 11: topodata.ShardTabletControl - (*ThrottlerConfig)(nil), // 12: topodata.ThrottlerConfig - (*SrvKeyspace)(nil), // 13: topodata.SrvKeyspace - (*CellInfo)(nil), // 14: topodata.CellInfo - (*CellsAlias)(nil), // 15: topodata.CellsAlias - (*TopoConfig)(nil), // 16: topodata.TopoConfig - (*ExternalVitessCluster)(nil), // 17: topodata.ExternalVitessCluster - (*ExternalClusters)(nil), // 18: topodata.ExternalClusters - nil, // 19: topodata.Tablet.PortMapEntry - nil, // 20: topodata.Tablet.TagsEntry - (*Shard_SourceShard)(nil), // 21: topodata.Shard.SourceShard - (*Shard_TabletControl)(nil), // 22: topodata.Shard.TabletControl - (*Keyspace_ServedFrom)(nil), // 23: topodata.Keyspace.ServedFrom - (*ShardReplication_Node)(nil), // 24: topodata.ShardReplication.Node - (*SrvKeyspace_KeyspacePartition)(nil), // 25: topodata.SrvKeyspace.KeyspacePartition - (*SrvKeyspace_ServedFrom)(nil), // 26: topodata.SrvKeyspace.ServedFrom - (*vttime.Time)(nil), // 27: vttime.Time + (*ThrottledAppRule)(nil), // 12: topodata.ThrottledAppRule + (*ThrottlerConfig)(nil), // 13: topodata.ThrottlerConfig + (*SrvKeyspace)(nil), // 14: topodata.SrvKeyspace + (*CellInfo)(nil), // 15: topodata.CellInfo + (*CellsAlias)(nil), // 16: topodata.CellsAlias + (*TopoConfig)(nil), // 17: topodata.TopoConfig + (*ExternalVitessCluster)(nil), // 18: topodata.ExternalVitessCluster + (*ExternalClusters)(nil), // 19: topodata.ExternalClusters + nil, // 20: topodata.Tablet.PortMapEntry + nil, // 21: topodata.Tablet.TagsEntry + (*Shard_SourceShard)(nil), // 22: topodata.Shard.SourceShard + (*Shard_TabletControl)(nil), // 23: topodata.Shard.TabletControl + (*Keyspace_ServedFrom)(nil), // 24: topodata.Keyspace.ServedFrom + (*ShardReplication_Node)(nil), // 25: topodata.ShardReplication.Node + nil, // 26: topodata.ThrottlerConfig.ThrottledAppsEntry + (*SrvKeyspace_KeyspacePartition)(nil), // 27: topodata.SrvKeyspace.KeyspacePartition + (*SrvKeyspace_ServedFrom)(nil), // 28: topodata.SrvKeyspace.ServedFrom + (*vttime.Time)(nil), // 29: vttime.Time } var file_topodata_proto_depIdxs = []int32{ 4, // 0: topodata.Tablet.alias:type_name -> topodata.TabletAlias - 19, // 1: topodata.Tablet.port_map:type_name -> topodata.Tablet.PortMapEntry + 20, // 1: topodata.Tablet.port_map:type_name -> topodata.Tablet.PortMapEntry 3, // 2: topodata.Tablet.key_range:type_name -> topodata.KeyRange 1, // 3: topodata.Tablet.type:type_name -> topodata.TabletType - 20, // 4: topodata.Tablet.tags:type_name -> topodata.Tablet.TagsEntry - 27, // 5: topodata.Tablet.primary_term_start_time:type_name -> vttime.Time + 21, // 4: topodata.Tablet.tags:type_name -> topodata.Tablet.TagsEntry + 29, // 5: topodata.Tablet.primary_term_start_time:type_name -> vttime.Time 4, // 6: topodata.Shard.primary_alias:type_name -> topodata.TabletAlias - 27, // 7: topodata.Shard.primary_term_start_time:type_name -> vttime.Time + 29, // 7: topodata.Shard.primary_term_start_time:type_name -> vttime.Time 3, // 8: topodata.Shard.key_range:type_name -> topodata.KeyRange - 21, // 9: topodata.Shard.source_shards:type_name -> topodata.Shard.SourceShard - 22, // 10: topodata.Shard.tablet_controls:type_name -> topodata.Shard.TabletControl - 23, // 11: topodata.Keyspace.served_froms:type_name -> topodata.Keyspace.ServedFrom + 22, // 9: topodata.Shard.source_shards:type_name -> topodata.Shard.SourceShard + 23, // 10: topodata.Shard.tablet_controls:type_name -> topodata.Shard.TabletControl + 24, // 11: topodata.Keyspace.served_froms:type_name -> topodata.Keyspace.ServedFrom 0, // 12: topodata.Keyspace.keyspace_type:type_name -> topodata.KeyspaceType - 27, // 13: topodata.Keyspace.snapshot_time:type_name -> vttime.Time - 12, // 14: topodata.Keyspace.throttler_config:type_name -> topodata.ThrottlerConfig - 24, // 15: topodata.ShardReplication.nodes:type_name -> topodata.ShardReplication.Node + 29, // 13: topodata.Keyspace.snapshot_time:type_name -> vttime.Time + 13, // 14: topodata.Keyspace.throttler_config:type_name -> topodata.ThrottlerConfig + 25, // 15: topodata.ShardReplication.nodes:type_name -> topodata.ShardReplication.Node 2, // 16: topodata.ShardReplicationError.type:type_name -> topodata.ShardReplicationError.Type 4, // 17: topodata.ShardReplicationError.tablet_alias:type_name -> topodata.TabletAlias 3, // 18: topodata.ShardReference.key_range:type_name -> topodata.KeyRange 3, // 19: topodata.ShardTabletControl.key_range:type_name -> topodata.KeyRange - 25, // 20: topodata.SrvKeyspace.partitions:type_name -> topodata.SrvKeyspace.KeyspacePartition - 26, // 21: topodata.SrvKeyspace.served_from:type_name -> topodata.SrvKeyspace.ServedFrom - 12, // 22: topodata.SrvKeyspace.throttler_config:type_name -> topodata.ThrottlerConfig - 16, // 23: topodata.ExternalVitessCluster.topo_config:type_name -> topodata.TopoConfig - 17, // 24: topodata.ExternalClusters.vitess_cluster:type_name -> topodata.ExternalVitessCluster - 3, // 25: topodata.Shard.SourceShard.key_range:type_name -> topodata.KeyRange - 1, // 26: topodata.Shard.TabletControl.tablet_type:type_name -> topodata.TabletType - 1, // 27: topodata.Keyspace.ServedFrom.tablet_type:type_name -> topodata.TabletType - 4, // 28: topodata.ShardReplication.Node.tablet_alias:type_name -> topodata.TabletAlias - 1, // 29: topodata.SrvKeyspace.KeyspacePartition.served_type:type_name -> topodata.TabletType - 10, // 30: topodata.SrvKeyspace.KeyspacePartition.shard_references:type_name -> topodata.ShardReference - 11, // 31: topodata.SrvKeyspace.KeyspacePartition.shard_tablet_controls:type_name -> topodata.ShardTabletControl - 1, // 32: topodata.SrvKeyspace.ServedFrom.tablet_type:type_name -> topodata.TabletType - 33, // [33:33] is the sub-list for method output_type - 33, // [33:33] is the sub-list for method input_type - 33, // [33:33] is the sub-list for extension type_name - 33, // [33:33] is the sub-list for extension extendee - 0, // [0:33] is the sub-list for field type_name + 29, // 20: topodata.ThrottledAppRule.expires_at:type_name -> vttime.Time + 26, // 21: topodata.ThrottlerConfig.throttled_apps:type_name -> topodata.ThrottlerConfig.ThrottledAppsEntry + 27, // 22: topodata.SrvKeyspace.partitions:type_name -> topodata.SrvKeyspace.KeyspacePartition + 28, // 23: topodata.SrvKeyspace.served_from:type_name -> topodata.SrvKeyspace.ServedFrom + 13, // 24: topodata.SrvKeyspace.throttler_config:type_name -> topodata.ThrottlerConfig + 17, // 25: topodata.ExternalVitessCluster.topo_config:type_name -> topodata.TopoConfig + 18, // 26: topodata.ExternalClusters.vitess_cluster:type_name -> topodata.ExternalVitessCluster + 3, // 27: topodata.Shard.SourceShard.key_range:type_name -> topodata.KeyRange + 1, // 28: topodata.Shard.TabletControl.tablet_type:type_name -> topodata.TabletType + 1, // 29: topodata.Keyspace.ServedFrom.tablet_type:type_name -> topodata.TabletType + 4, // 30: topodata.ShardReplication.Node.tablet_alias:type_name -> topodata.TabletAlias + 12, // 31: topodata.ThrottlerConfig.ThrottledAppsEntry.value:type_name -> topodata.ThrottledAppRule + 1, // 32: topodata.SrvKeyspace.KeyspacePartition.served_type:type_name -> topodata.TabletType + 10, // 33: topodata.SrvKeyspace.KeyspacePartition.shard_references:type_name -> topodata.ShardReference + 11, // 34: topodata.SrvKeyspace.KeyspacePartition.shard_tablet_controls:type_name -> topodata.ShardTabletControl + 1, // 35: topodata.SrvKeyspace.ServedFrom.tablet_type:type_name -> topodata.TabletType + 36, // [36:36] is the sub-list for method output_type + 36, // [36:36] is the sub-list for method input_type + 36, // [36:36] is the sub-list for extension type_name + 36, // [36:36] is the sub-list for extension extendee + 0, // [0:36] is the sub-list for field type_name } func init() { file_topodata_proto_init() } @@ -2277,7 +2388,7 @@ func file_topodata_proto_init() { } } file_topodata_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ThrottlerConfig); i { + switch v := v.(*ThrottledAppRule); i { case 0: return &v.state case 1: @@ -2289,7 +2400,7 @@ func file_topodata_proto_init() { } } file_topodata_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SrvKeyspace); i { + switch v := v.(*ThrottlerConfig); i { case 0: return &v.state case 1: @@ -2301,7 +2412,7 @@ func file_topodata_proto_init() { } } file_topodata_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CellInfo); i { + switch v := v.(*SrvKeyspace); i { case 0: return &v.state case 1: @@ -2313,7 +2424,7 @@ func file_topodata_proto_init() { } } file_topodata_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CellsAlias); i { + switch v := v.(*CellInfo); i { case 0: return &v.state case 1: @@ -2325,7 +2436,7 @@ func file_topodata_proto_init() { } } file_topodata_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TopoConfig); i { + switch v := v.(*CellsAlias); i { case 0: return &v.state case 1: @@ -2337,7 +2448,7 @@ func file_topodata_proto_init() { } } file_topodata_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExternalVitessCluster); i { + switch v := v.(*TopoConfig); i { case 0: return &v.state case 1: @@ -2349,6 +2460,18 @@ func file_topodata_proto_init() { } } file_topodata_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExternalVitessCluster); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_topodata_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ExternalClusters); i { case 0: return &v.state @@ -2360,7 +2483,7 @@ func file_topodata_proto_init() { return nil } } - file_topodata_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_topodata_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Shard_SourceShard); i { case 0: return &v.state @@ -2372,7 +2495,7 @@ func file_topodata_proto_init() { return nil } } - file_topodata_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + file_topodata_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Shard_TabletControl); i { case 0: return &v.state @@ -2384,7 +2507,7 @@ func file_topodata_proto_init() { return nil } } - file_topodata_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + file_topodata_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Keyspace_ServedFrom); i { case 0: return &v.state @@ -2396,7 +2519,7 @@ func file_topodata_proto_init() { return nil } } - file_topodata_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + file_topodata_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ShardReplication_Node); i { case 0: return &v.state @@ -2408,7 +2531,7 @@ func file_topodata_proto_init() { return nil } } - file_topodata_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + file_topodata_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SrvKeyspace_KeyspacePartition); i { case 0: return &v.state @@ -2420,7 +2543,7 @@ func file_topodata_proto_init() { return nil } } - file_topodata_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + file_topodata_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SrvKeyspace_ServedFrom); i { case 0: return &v.state @@ -2439,7 +2562,7 @@ func file_topodata_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_topodata_proto_rawDesc, NumEnums: 3, - NumMessages: 24, + NumMessages: 26, NumExtensions: 0, NumServices: 0, }, diff --git a/go/vt/proto/topodata/topodata_vtproto.pb.go b/go/vt/proto/topodata/topodata_vtproto.pb.go index 46634f74c9d..5e675bb4ea0 100644 --- a/go/vt/proto/topodata/topodata_vtproto.pb.go +++ b/go/vt/proto/topodata/topodata_vtproto.pb.go @@ -1,5 +1,5 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 +// protoc-gen-go-vtproto version: v0.5.0 // source: topodata.proto package topodata @@ -7,6 +7,7 @@ package topodata import ( binary "encoding/binary" fmt "fmt" + proto "google.golang.org/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl" io "io" math "math" @@ -21,6 +22,569 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +func (m *KeyRange) CloneVT() *KeyRange { + if m == nil { + return (*KeyRange)(nil) + } + r := &KeyRange{} + if rhs := m.Start; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.Start = tmpBytes + } + if rhs := m.End; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.End = tmpBytes + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *KeyRange) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *TabletAlias) CloneVT() *TabletAlias { + if m == nil { + return (*TabletAlias)(nil) + } + r := &TabletAlias{ + Cell: m.Cell, + Uid: m.Uid, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *TabletAlias) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Tablet) CloneVT() *Tablet { + if m == nil { + return (*Tablet)(nil) + } + r := &Tablet{ + Alias: m.Alias.CloneVT(), + Hostname: m.Hostname, + Keyspace: m.Keyspace, + Shard: m.Shard, + KeyRange: m.KeyRange.CloneVT(), + Type: m.Type, + DbNameOverride: m.DbNameOverride, + MysqlHostname: m.MysqlHostname, + MysqlPort: m.MysqlPort, + PrimaryTermStartTime: m.PrimaryTermStartTime.CloneVT(), + DefaultConnCollation: m.DefaultConnCollation, + } + if rhs := m.PortMap; rhs != nil { + tmpContainer := make(map[string]int32, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v + } + r.PortMap = tmpContainer + } + if rhs := m.Tags; rhs != nil { + tmpContainer := make(map[string]string, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v + } + r.Tags = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Tablet) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Shard_SourceShard) CloneVT() *Shard_SourceShard { + if m == nil { + return (*Shard_SourceShard)(nil) + } + r := &Shard_SourceShard{ + Uid: m.Uid, + Keyspace: m.Keyspace, + Shard: m.Shard, + KeyRange: m.KeyRange.CloneVT(), + } + if rhs := m.Tables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Tables = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Shard_SourceShard) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Shard_TabletControl) CloneVT() *Shard_TabletControl { + if m == nil { + return (*Shard_TabletControl)(nil) + } + r := &Shard_TabletControl{ + TabletType: m.TabletType, + Frozen: m.Frozen, + } + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if rhs := m.DeniedTables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.DeniedTables = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Shard_TabletControl) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Shard) CloneVT() *Shard { + if m == nil { + return (*Shard)(nil) + } + r := &Shard{ + PrimaryAlias: m.PrimaryAlias.CloneVT(), + PrimaryTermStartTime: m.PrimaryTermStartTime.CloneVT(), + KeyRange: m.KeyRange.CloneVT(), + IsPrimaryServing: m.IsPrimaryServing, + } + if rhs := m.SourceShards; rhs != nil { + tmpContainer := make([]*Shard_SourceShard, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.SourceShards = tmpContainer + } + if rhs := m.TabletControls; rhs != nil { + tmpContainer := make([]*Shard_TabletControl, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.TabletControls = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Shard) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Keyspace_ServedFrom) CloneVT() *Keyspace_ServedFrom { + if m == nil { + return (*Keyspace_ServedFrom)(nil) + } + r := &Keyspace_ServedFrom{ + TabletType: m.TabletType, + Keyspace: m.Keyspace, + } + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Keyspace_ServedFrom) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Keyspace) CloneVT() *Keyspace { + if m == nil { + return (*Keyspace)(nil) + } + r := &Keyspace{ + KeyspaceType: m.KeyspaceType, + BaseKeyspace: m.BaseKeyspace, + SnapshotTime: m.SnapshotTime.CloneVT(), + DurabilityPolicy: m.DurabilityPolicy, + ThrottlerConfig: m.ThrottlerConfig.CloneVT(), + SidecarDbName: m.SidecarDbName, + } + if rhs := m.ServedFroms; rhs != nil { + tmpContainer := make([]*Keyspace_ServedFrom, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.ServedFroms = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Keyspace) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ShardReplication_Node) CloneVT() *ShardReplication_Node { + if m == nil { + return (*ShardReplication_Node)(nil) + } + r := &ShardReplication_Node{ + TabletAlias: m.TabletAlias.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ShardReplication_Node) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ShardReplication) CloneVT() *ShardReplication { + if m == nil { + return (*ShardReplication)(nil) + } + r := &ShardReplication{} + if rhs := m.Nodes; rhs != nil { + tmpContainer := make([]*ShardReplication_Node, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Nodes = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ShardReplication) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ShardReplicationError) CloneVT() *ShardReplicationError { + if m == nil { + return (*ShardReplicationError)(nil) + } + r := &ShardReplicationError{ + Type: m.Type, + TabletAlias: m.TabletAlias.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ShardReplicationError) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ShardReference) CloneVT() *ShardReference { + if m == nil { + return (*ShardReference)(nil) + } + r := &ShardReference{ + Name: m.Name, + KeyRange: m.KeyRange.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ShardReference) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ShardTabletControl) CloneVT() *ShardTabletControl { + if m == nil { + return (*ShardTabletControl)(nil) + } + r := &ShardTabletControl{ + Name: m.Name, + KeyRange: m.KeyRange.CloneVT(), + QueryServiceDisabled: m.QueryServiceDisabled, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ShardTabletControl) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ThrottledAppRule) CloneVT() *ThrottledAppRule { + if m == nil { + return (*ThrottledAppRule)(nil) + } + r := &ThrottledAppRule{ + Name: m.Name, + Ratio: m.Ratio, + ExpiresAt: m.ExpiresAt.CloneVT(), + Exempt: m.Exempt, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ThrottledAppRule) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ThrottlerConfig) CloneVT() *ThrottlerConfig { + if m == nil { + return (*ThrottlerConfig)(nil) + } + r := &ThrottlerConfig{ + Enabled: m.Enabled, + Threshold: m.Threshold, + CustomQuery: m.CustomQuery, + CheckAsCheckSelf: m.CheckAsCheckSelf, + } + if rhs := m.ThrottledApps; rhs != nil { + tmpContainer := make(map[string]*ThrottledAppRule, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.ThrottledApps = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ThrottlerConfig) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SrvKeyspace_KeyspacePartition) CloneVT() *SrvKeyspace_KeyspacePartition { + if m == nil { + return (*SrvKeyspace_KeyspacePartition)(nil) + } + r := &SrvKeyspace_KeyspacePartition{ + ServedType: m.ServedType, + } + if rhs := m.ShardReferences; rhs != nil { + tmpContainer := make([]*ShardReference, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.ShardReferences = tmpContainer + } + if rhs := m.ShardTabletControls; rhs != nil { + tmpContainer := make([]*ShardTabletControl, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.ShardTabletControls = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *SrvKeyspace_KeyspacePartition) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SrvKeyspace_ServedFrom) CloneVT() *SrvKeyspace_ServedFrom { + if m == nil { + return (*SrvKeyspace_ServedFrom)(nil) + } + r := &SrvKeyspace_ServedFrom{ + TabletType: m.TabletType, + Keyspace: m.Keyspace, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *SrvKeyspace_ServedFrom) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SrvKeyspace) CloneVT() *SrvKeyspace { + if m == nil { + return (*SrvKeyspace)(nil) + } + r := &SrvKeyspace{ + ThrottlerConfig: m.ThrottlerConfig.CloneVT(), + } + if rhs := m.Partitions; rhs != nil { + tmpContainer := make([]*SrvKeyspace_KeyspacePartition, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Partitions = tmpContainer + } + if rhs := m.ServedFrom; rhs != nil { + tmpContainer := make([]*SrvKeyspace_ServedFrom, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.ServedFrom = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *SrvKeyspace) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CellInfo) CloneVT() *CellInfo { + if m == nil { + return (*CellInfo)(nil) + } + r := &CellInfo{ + ServerAddress: m.ServerAddress, + Root: m.Root, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CellInfo) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CellsAlias) CloneVT() *CellsAlias { + if m == nil { + return (*CellsAlias)(nil) + } + r := &CellsAlias{} + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CellsAlias) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *TopoConfig) CloneVT() *TopoConfig { + if m == nil { + return (*TopoConfig)(nil) + } + r := &TopoConfig{ + TopoType: m.TopoType, + Server: m.Server, + Root: m.Root, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *TopoConfig) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExternalVitessCluster) CloneVT() *ExternalVitessCluster { + if m == nil { + return (*ExternalVitessCluster)(nil) + } + r := &ExternalVitessCluster{ + TopoConfig: m.TopoConfig.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ExternalVitessCluster) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExternalClusters) CloneVT() *ExternalClusters { + if m == nil { + return (*ExternalClusters)(nil) + } + r := &ExternalClusters{} + if rhs := m.VitessCluster; rhs != nil { + tmpContainer := make([]*ExternalVitessCluster, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.VitessCluster = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ExternalClusters) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *KeyRange) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -889,6 +1453,72 @@ func (m *ShardTabletControl) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ThrottledAppRule) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ThrottledAppRule) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ThrottledAppRule) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Exempt { + i-- + if m.Exempt { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.ExpiresAt != nil { + size, err := m.ExpiresAt.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.Ratio != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Ratio)))) + i-- + dAtA[i] = 0x11 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *ThrottlerConfig) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -919,6 +1549,28 @@ func (m *ThrottlerConfig) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.ThrottledApps) > 0 { + for k := range m.ThrottledApps { + v := m.ThrottledApps[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } if m.CheckAsCheckSelf { i-- if m.CheckAsCheckSelf { @@ -1717,6 +2369,30 @@ func (m *ShardTabletControl) SizeVT() (n int) { return n } +func (m *ThrottledAppRule) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Ratio != 0 { + n += 9 + } + if m.ExpiresAt != nil { + l = m.ExpiresAt.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Exempt { + n += 2 + } + n += len(m.unknownFields) + return n +} + func (m *ThrottlerConfig) SizeVT() (n int) { if m == nil { return 0 @@ -1736,6 +2412,19 @@ func (m *ThrottlerConfig) SizeVT() (n int) { if m.CheckAsCheckSelf { n += 2 } + if len(m.ThrottledApps) > 0 { + for k, v := range m.ThrottledApps { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } n += len(m.unknownFields) return n } @@ -4282,6 +4971,156 @@ func (m *ShardTabletControl) UnmarshalVT(dAtA []byte) error { } return nil } +func (m *ThrottledAppRule) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ThrottledAppRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ThrottledAppRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Ratio", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Ratio = float64(math.Float64frombits(v)) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpiresAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExpiresAt == nil { + m.ExpiresAt = &vttime.Time{} + } + if err := m.ExpiresAt.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Exempt", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Exempt = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ThrottlerConfig) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -4394,6 +5233,135 @@ func (m *ThrottlerConfig) UnmarshalVT(dAtA []byte) error { } } m.CheckAsCheckSelf = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ThrottledApps", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ThrottledApps == nil { + m.ThrottledApps = make(map[string]*ThrottledAppRule) + } + var mapkey string + var mapvalue *ThrottledAppRule + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ThrottledAppRule{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ThrottledApps[mapkey] = mapvalue + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) diff --git a/go/vt/proto/vschema/vschema.pb.go b/go/vt/proto/vschema/vschema.pb.go index c1054896262..980b4a3f4c4 100644 --- a/go/vt/proto/vschema/vschema.pb.go +++ b/go/vt/proto/vschema/vschema.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.6.1 +// protoc-gen-go v1.30.0 +// protoc v3.21.3 // source: vschema.proto package vschema @@ -38,6 +38,58 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +type Keyspace_ForeignKeyMode int32 + +const ( + Keyspace_unspecified Keyspace_ForeignKeyMode = 0 + Keyspace_disallow Keyspace_ForeignKeyMode = 1 + Keyspace_unmanaged Keyspace_ForeignKeyMode = 2 + Keyspace_managed Keyspace_ForeignKeyMode = 3 +) + +// Enum value maps for Keyspace_ForeignKeyMode. +var ( + Keyspace_ForeignKeyMode_name = map[int32]string{ + 0: "unspecified", + 1: "disallow", + 2: "unmanaged", + 3: "managed", + } + Keyspace_ForeignKeyMode_value = map[string]int32{ + "unspecified": 0, + "disallow": 1, + "unmanaged": 2, + "managed": 3, + } +) + +func (x Keyspace_ForeignKeyMode) Enum() *Keyspace_ForeignKeyMode { + p := new(Keyspace_ForeignKeyMode) + *p = x + return p +} + +func (x Keyspace_ForeignKeyMode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Keyspace_ForeignKeyMode) Descriptor() protoreflect.EnumDescriptor { + return file_vschema_proto_enumTypes[0].Descriptor() +} + +func (Keyspace_ForeignKeyMode) Type() protoreflect.EnumType { + return &file_vschema_proto_enumTypes[0] +} + +func (x Keyspace_ForeignKeyMode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Keyspace_ForeignKeyMode.Descriptor instead. +func (Keyspace_ForeignKeyMode) EnumDescriptor() ([]byte, []int) { + return file_vschema_proto_rawDescGZIP(), []int{2, 0} +} + // RoutingRules specify the high level routing rules for the VSchema. type RoutingRules struct { state protoimpl.MessageState @@ -156,10 +208,12 @@ type Keyspace struct { Vindexes map[string]*Vindex `protobuf:"bytes,2,rep,name=vindexes,proto3" json:"vindexes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` Tables map[string]*Table `protobuf:"bytes,3,rep,name=tables,proto3" json:"tables,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // If require_explicit_routing is true, vindexes and tables are not added to global routing - RequireExplicitRouting bool `protobuf:"varint,4,opt,name=require_explicit_routing,json=requireExplicitRouting,proto3" json:"require_explicit_routing,omitempty"` - CrossTablet bool `protobuf:"varint,85,opt,name=cross_tablet,json=crossTablet,proto3" json:"cross_tablet,omitempty"` - AttachEnable bool `protobuf:"varint,86,opt,name=attach_enable,json=attachEnable,proto3" json:"attach_enable,omitempty"` - AttachTo string `protobuf:"bytes,87,opt,name=attach_to,json=attachTo,proto3" json:"attach_to,omitempty"` + RequireExplicitRouting bool `protobuf:"varint,4,opt,name=require_explicit_routing,json=requireExplicitRouting,proto3" json:"require_explicit_routing,omitempty"` + // foreign_key_mode dictates how Vitess should handle foreign keys for this keyspace. + ForeignKeyMode Keyspace_ForeignKeyMode `protobuf:"varint,5,opt,name=foreign_key_mode,json=foreignKeyMode,proto3,enum=vschema.Keyspace_ForeignKeyMode" json:"foreign_key_mode,omitempty"` + CrossTablet bool `protobuf:"varint,85,opt,name=cross_tablet,json=crossTablet,proto3" json:"cross_tablet,omitempty"` + AttachEnable bool `protobuf:"varint,86,opt,name=attach_enable,json=attachEnable,proto3" json:"attach_enable,omitempty"` + AttachTo string `protobuf:"bytes,87,opt,name=attach_to,json=attachTo,proto3" json:"attach_to,omitempty"` } func (x *Keyspace) Reset() { @@ -222,6 +276,13 @@ func (x *Keyspace) GetRequireExplicitRouting() bool { return false } +func (x *Keyspace) GetForeignKeyMode() Keyspace_ForeignKeyMode { + if x != nil { + return x.ForeignKeyMode + } + return Keyspace_unspecified +} + func (x *Keyspace) GetCrossTablet() bool { if x != nil { return x.CrossTablet @@ -803,7 +864,7 @@ var file_vschema_proto_rawDesc = []byte{ 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x66, 0x72, 0x6f, 0x6d, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x08, 0x74, 0x6f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22, 0xd0, 0x03, 0x0a, + 0x28, 0x09, 0x52, 0x08, 0x74, 0x6f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22, 0xe9, 0x04, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x65, 0x64, 0x12, 0x3b, 0x0a, 0x08, 0x76, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x18, @@ -817,99 +878,108 @@ var file_vschema_proto_rawDesc = []byte{ 0x72, 0x65, 0x5f, 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x16, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x45, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, - 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x18, 0x55, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x74, 0x74, 0x61, 0x63, 0x68, 0x5f, 0x65, - 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x56, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x74, 0x74, - 0x61, 0x63, 0x68, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x74, 0x74, - 0x61, 0x63, 0x68, 0x5f, 0x74, 0x6f, 0x18, 0x57, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x74, - 0x74, 0x61, 0x63, 0x68, 0x54, 0x6f, 0x1a, 0x4c, 0x0a, 0x0d, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, - 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x2e, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x49, 0x0a, 0x0b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x45, 0x6e, + 0x67, 0x12, 0x4a, 0x0a, 0x10, 0x66, 0x6f, 0x72, 0x65, 0x69, 0x67, 0x6e, 0x5f, 0x6b, 0x65, 0x79, + 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x76, 0x73, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x46, + 0x6f, 0x72, 0x65, 0x69, 0x67, 0x6e, 0x4b, 0x65, 0x79, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x0e, 0x66, + 0x6f, 0x72, 0x65, 0x69, 0x67, 0x6e, 0x4b, 0x65, 0x79, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x21, 0x0a, + 0x0c, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x55, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0b, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x74, 0x74, 0x61, 0x63, 0x68, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x18, 0x56, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x74, 0x74, 0x61, 0x63, 0x68, 0x45, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x74, 0x74, 0x61, 0x63, 0x68, 0x5f, + 0x74, 0x6f, 0x18, 0x57, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x74, 0x74, 0x61, 0x63, 0x68, + 0x54, 0x6f, 0x1a, 0x4c, 0x0a, 0x0d, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, - 0xa2, 0x01, 0x0a, 0x06, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x33, - 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, - 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, - 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x1a, 0x39, 0x0a, 0x0b, 0x50, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb1, 0x02, 0x0a, 0x05, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x12, - 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, - 0x70, 0x65, 0x12, 0x3e, 0x0a, 0x0f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x76, 0x69, 0x6e, - 0x64, 0x65, 0x78, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x73, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x56, 0x69, 0x6e, 0x64, - 0x65, 0x78, 0x52, 0x0e, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, - 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x0e, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x69, 0x6e, 0x63, 0x72, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x76, 0x73, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x52, 0x0d, 0x61, 0x75, 0x74, 0x6f, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x12, 0x29, 0x0a, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x43, 0x6f, 0x6c, - 0x75, 0x6d, 0x6e, 0x52, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x12, 0x16, 0x0a, 0x06, - 0x70, 0x69, 0x6e, 0x6e, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x69, - 0x6e, 0x6e, 0x65, 0x64, 0x12, 0x3a, 0x0a, 0x19, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x6c, - 0x69, 0x73, 0x74, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, - 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x4c, - 0x69, 0x73, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, - 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0x54, 0x0a, 0x0c, 0x43, 0x6f, 0x6c, 0x75, - 0x6d, 0x6e, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6c, 0x75, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x56, + 0x69, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x1a, 0x49, 0x0a, 0x0b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0e, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4b, 0x0a, 0x0e, 0x46, + 0x6f, 0x72, 0x65, 0x69, 0x67, 0x6e, 0x4b, 0x65, 0x79, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0f, 0x0a, + 0x0b, 0x75, 0x6e, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x64, 0x10, 0x00, 0x12, 0x0c, + 0x0a, 0x08, 0x64, 0x69, 0x73, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, + 0x75, 0x6e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x10, 0x03, 0x22, 0xa2, 0x01, 0x0a, 0x06, 0x56, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x2e, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x14, 0x0a, 0x05, + 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, + 0x65, 0x72, 0x1a, 0x39, 0x0a, 0x0b, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb1, 0x02, + 0x0a, 0x05, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3e, 0x0a, 0x0f, 0x63, + 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x76, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x43, + 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x0e, 0x63, 0x6f, 0x6c, + 0x75, 0x6d, 0x6e, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x0e, 0x61, + 0x75, 0x74, 0x6f, 0x5f, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x41, 0x75, + 0x74, 0x6f, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x0d, 0x61, 0x75, 0x74, + 0x6f, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x29, 0x0a, 0x07, 0x63, 0x6f, + 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x73, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x52, 0x07, 0x63, 0x6f, + 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x69, 0x6e, 0x6e, 0x65, 0x64, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x69, 0x6e, 0x6e, 0x65, 0x64, 0x12, 0x3a, 0x0a, + 0x19, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x61, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x17, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x22, 0x54, 0x0a, 0x0c, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x56, 0x69, 0x6e, 0x64, 0x65, + 0x78, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, + 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x22, 0x43, 0x0a, 0x0d, 0x41, 0x75, 0x74, 0x6f, 0x49, + 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, - 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x22, 0x43, - 0x0a, 0x0d, 0x41, 0x75, 0x74, 0x6f, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, - 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x71, 0x75, 0x65, - 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x71, 0x75, 0x65, - 0x6e, 0x63, 0x65, 0x22, 0x3d, 0x0a, 0x06, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x1f, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x0b, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, - 0x70, 0x65, 0x22, 0xa7, 0x02, 0x0a, 0x0a, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x12, 0x40, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, - 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x0d, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, - 0x75, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x73, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, - 0x73, 0x52, 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, - 0x4a, 0x0a, 0x13, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, - 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76, - 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, - 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x11, 0x73, 0x68, 0x61, 0x72, 0x64, 0x52, - 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x1a, 0x4f, 0x0a, 0x0e, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, - 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x44, 0x0a, 0x11, + 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x22, 0x3d, 0x0a, 0x06, + 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0b, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xa7, 0x02, 0x0a, 0x0a, + 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x40, 0x0a, 0x09, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x0d, + 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x6f, + 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x0c, 0x72, 0x6f, 0x75, 0x74, + 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x4a, 0x0a, 0x13, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, - 0x73, 0x12, 0x2f, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x05, 0x72, 0x75, 0x6c, - 0x65, 0x73, 0x22, 0x6e, 0x0a, 0x10, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, - 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x6b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x66, - 0x72, 0x6f, 0x6d, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x74, - 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x42, 0x26, 0x5a, 0x24, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, - 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2f, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x73, 0x52, 0x11, 0x73, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, + 0x75, 0x6c, 0x65, 0x73, 0x1a, 0x4f, 0x0a, 0x0e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x44, 0x0a, 0x11, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, + 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x2f, 0x0a, 0x05, 0x72, 0x75, + 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x76, 0x73, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, + 0x52, 0x75, 0x6c, 0x65, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x22, 0x6e, 0x0a, 0x10, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x66, 0x72, 0x6f, 0x6d, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x6f, 0x4b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x42, 0x26, 0x5a, 0x24, 0x76, + 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, + 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x73, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -924,46 +994,49 @@ func file_vschema_proto_rawDescGZIP() []byte { return file_vschema_proto_rawDescData } +var file_vschema_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_vschema_proto_msgTypes = make([]protoimpl.MessageInfo, 15) var file_vschema_proto_goTypes = []interface{}{ - (*RoutingRules)(nil), // 0: vschema.RoutingRules - (*RoutingRule)(nil), // 1: vschema.RoutingRule - (*Keyspace)(nil), // 2: vschema.Keyspace - (*Vindex)(nil), // 3: vschema.Vindex - (*Table)(nil), // 4: vschema.Table - (*ColumnVindex)(nil), // 5: vschema.ColumnVindex - (*AutoIncrement)(nil), // 6: vschema.AutoIncrement - (*Column)(nil), // 7: vschema.Column - (*SrvVSchema)(nil), // 8: vschema.SrvVSchema - (*ShardRoutingRules)(nil), // 9: vschema.ShardRoutingRules - (*ShardRoutingRule)(nil), // 10: vschema.ShardRoutingRule - nil, // 11: vschema.Keyspace.VindexesEntry - nil, // 12: vschema.Keyspace.TablesEntry - nil, // 13: vschema.Vindex.ParamsEntry - nil, // 14: vschema.SrvVSchema.KeyspacesEntry - (query.Type)(0), // 15: query.Type + (Keyspace_ForeignKeyMode)(0), // 0: vschema.Keyspace.ForeignKeyMode + (*RoutingRules)(nil), // 1: vschema.RoutingRules + (*RoutingRule)(nil), // 2: vschema.RoutingRule + (*Keyspace)(nil), // 3: vschema.Keyspace + (*Vindex)(nil), // 4: vschema.Vindex + (*Table)(nil), // 5: vschema.Table + (*ColumnVindex)(nil), // 6: vschema.ColumnVindex + (*AutoIncrement)(nil), // 7: vschema.AutoIncrement + (*Column)(nil), // 8: vschema.Column + (*SrvVSchema)(nil), // 9: vschema.SrvVSchema + (*ShardRoutingRules)(nil), // 10: vschema.ShardRoutingRules + (*ShardRoutingRule)(nil), // 11: vschema.ShardRoutingRule + nil, // 12: vschema.Keyspace.VindexesEntry + nil, // 13: vschema.Keyspace.TablesEntry + nil, // 14: vschema.Vindex.ParamsEntry + nil, // 15: vschema.SrvVSchema.KeyspacesEntry + (query.Type)(0), // 16: query.Type } var file_vschema_proto_depIdxs = []int32{ - 1, // 0: vschema.RoutingRules.rules:type_name -> vschema.RoutingRule - 11, // 1: vschema.Keyspace.vindexes:type_name -> vschema.Keyspace.VindexesEntry - 12, // 2: vschema.Keyspace.tables:type_name -> vschema.Keyspace.TablesEntry - 13, // 3: vschema.Vindex.params:type_name -> vschema.Vindex.ParamsEntry - 5, // 4: vschema.Table.column_vindexes:type_name -> vschema.ColumnVindex - 6, // 5: vschema.Table.auto_increment:type_name -> vschema.AutoIncrement - 7, // 6: vschema.Table.columns:type_name -> vschema.Column - 15, // 7: vschema.Column.type:type_name -> query.Type - 14, // 8: vschema.SrvVSchema.keyspaces:type_name -> vschema.SrvVSchema.KeyspacesEntry - 0, // 9: vschema.SrvVSchema.routing_rules:type_name -> vschema.RoutingRules - 9, // 10: vschema.SrvVSchema.shard_routing_rules:type_name -> vschema.ShardRoutingRules - 10, // 11: vschema.ShardRoutingRules.rules:type_name -> vschema.ShardRoutingRule - 3, // 12: vschema.Keyspace.VindexesEntry.value:type_name -> vschema.Vindex - 4, // 13: vschema.Keyspace.TablesEntry.value:type_name -> vschema.Table - 2, // 14: vschema.SrvVSchema.KeyspacesEntry.value:type_name -> vschema.Keyspace - 15, // [15:15] is the sub-list for method output_type - 15, // [15:15] is the sub-list for method input_type - 15, // [15:15] is the sub-list for extension type_name - 15, // [15:15] is the sub-list for extension extendee - 0, // [0:15] is the sub-list for field type_name + 2, // 0: vschema.RoutingRules.rules:type_name -> vschema.RoutingRule + 12, // 1: vschema.Keyspace.vindexes:type_name -> vschema.Keyspace.VindexesEntry + 13, // 2: vschema.Keyspace.tables:type_name -> vschema.Keyspace.TablesEntry + 0, // 3: vschema.Keyspace.foreign_key_mode:type_name -> vschema.Keyspace.ForeignKeyMode + 14, // 4: vschema.Vindex.params:type_name -> vschema.Vindex.ParamsEntry + 6, // 5: vschema.Table.column_vindexes:type_name -> vschema.ColumnVindex + 7, // 6: vschema.Table.auto_increment:type_name -> vschema.AutoIncrement + 8, // 7: vschema.Table.columns:type_name -> vschema.Column + 16, // 8: vschema.Column.type:type_name -> query.Type + 15, // 9: vschema.SrvVSchema.keyspaces:type_name -> vschema.SrvVSchema.KeyspacesEntry + 1, // 10: vschema.SrvVSchema.routing_rules:type_name -> vschema.RoutingRules + 10, // 11: vschema.SrvVSchema.shard_routing_rules:type_name -> vschema.ShardRoutingRules + 11, // 12: vschema.ShardRoutingRules.rules:type_name -> vschema.ShardRoutingRule + 4, // 13: vschema.Keyspace.VindexesEntry.value:type_name -> vschema.Vindex + 5, // 14: vschema.Keyspace.TablesEntry.value:type_name -> vschema.Table + 3, // 15: vschema.SrvVSchema.KeyspacesEntry.value:type_name -> vschema.Keyspace + 16, // [16:16] is the sub-list for method output_type + 16, // [16:16] is the sub-list for method input_type + 16, // [16:16] is the sub-list for extension type_name + 16, // [16:16] is the sub-list for extension extendee + 0, // [0:16] is the sub-list for field type_name } func init() { file_vschema_proto_init() } @@ -1110,13 +1183,14 @@ func file_vschema_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_vschema_proto_rawDesc, - NumEnums: 0, + NumEnums: 1, NumMessages: 15, NumExtensions: 0, NumServices: 0, }, GoTypes: file_vschema_proto_goTypes, DependencyIndexes: file_vschema_proto_depIdxs, + EnumInfos: file_vschema_proto_enumTypes, MessageInfos: file_vschema_proto_msgTypes, }.Build() File_vschema_proto = out.File diff --git a/go/vt/proto/vschema/vschema_vtproto.pb.go b/go/vt/proto/vschema/vschema_vtproto.pb.go index c53132625d3..76f26c15333 100644 --- a/go/vt/proto/vschema/vschema_vtproto.pb.go +++ b/go/vt/proto/vschema/vschema_vtproto.pb.go @@ -1,11 +1,12 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 +// protoc-gen-go-vtproto version: v0.5.0 // source: vschema.proto package vschema import ( fmt "fmt" + proto "google.golang.org/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl" io "io" bits "math/bits" @@ -19,6 +20,282 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +func (m *RoutingRules) CloneVT() *RoutingRules { + if m == nil { + return (*RoutingRules)(nil) + } + r := &RoutingRules{} + if rhs := m.Rules; rhs != nil { + tmpContainer := make([]*RoutingRule, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Rules = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RoutingRules) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RoutingRule) CloneVT() *RoutingRule { + if m == nil { + return (*RoutingRule)(nil) + } + r := &RoutingRule{ + FromTable: m.FromTable, + } + if rhs := m.ToTables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ToTables = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RoutingRule) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Keyspace) CloneVT() *Keyspace { + if m == nil { + return (*Keyspace)(nil) + } + r := &Keyspace{ + Sharded: m.Sharded, + RequireExplicitRouting: m.RequireExplicitRouting, + ForeignKeyMode: m.ForeignKeyMode, + CrossTablet: m.CrossTablet, + AttachEnable: m.AttachEnable, + AttachTo: m.AttachTo, + } + if rhs := m.Vindexes; rhs != nil { + tmpContainer := make(map[string]*Vindex, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Vindexes = tmpContainer + } + if rhs := m.Tables; rhs != nil { + tmpContainer := make(map[string]*Table, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Tables = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Keyspace) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Vindex) CloneVT() *Vindex { + if m == nil { + return (*Vindex)(nil) + } + r := &Vindex{ + Type: m.Type, + Owner: m.Owner, + } + if rhs := m.Params; rhs != nil { + tmpContainer := make(map[string]string, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v + } + r.Params = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Vindex) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Table) CloneVT() *Table { + if m == nil { + return (*Table)(nil) + } + r := &Table{ + Type: m.Type, + AutoIncrement: m.AutoIncrement.CloneVT(), + Pinned: m.Pinned, + ColumnListAuthoritative: m.ColumnListAuthoritative, + Source: m.Source, + } + if rhs := m.ColumnVindexes; rhs != nil { + tmpContainer := make([]*ColumnVindex, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.ColumnVindexes = tmpContainer + } + if rhs := m.Columns; rhs != nil { + tmpContainer := make([]*Column, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Columns = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Table) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ColumnVindex) CloneVT() *ColumnVindex { + if m == nil { + return (*ColumnVindex)(nil) + } + r := &ColumnVindex{ + Column: m.Column, + Name: m.Name, + } + if rhs := m.Columns; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Columns = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ColumnVindex) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *AutoIncrement) CloneVT() *AutoIncrement { + if m == nil { + return (*AutoIncrement)(nil) + } + r := &AutoIncrement{ + Column: m.Column, + Sequence: m.Sequence, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *AutoIncrement) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Column) CloneVT() *Column { + if m == nil { + return (*Column)(nil) + } + r := &Column{ + Name: m.Name, + Type: m.Type, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Column) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SrvVSchema) CloneVT() *SrvVSchema { + if m == nil { + return (*SrvVSchema)(nil) + } + r := &SrvVSchema{ + RoutingRules: m.RoutingRules.CloneVT(), + ShardRoutingRules: m.ShardRoutingRules.CloneVT(), + } + if rhs := m.Keyspaces; rhs != nil { + tmpContainer := make(map[string]*Keyspace, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Keyspaces = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *SrvVSchema) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ShardRoutingRules) CloneVT() *ShardRoutingRules { + if m == nil { + return (*ShardRoutingRules)(nil) + } + r := &ShardRoutingRules{} + if rhs := m.Rules; rhs != nil { + tmpContainer := make([]*ShardRoutingRule, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Rules = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ShardRoutingRules) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ShardRoutingRule) CloneVT() *ShardRoutingRule { + if m == nil { + return (*ShardRoutingRule)(nil) + } + r := &ShardRoutingRule{ + FromKeyspace: m.FromKeyspace, + ToKeyspace: m.ToKeyspace, + Shard: m.Shard, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ShardRoutingRule) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *RoutingRules) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -176,6 +453,11 @@ func (m *Keyspace) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i-- dAtA[i] = 0xa8 } + if m.ForeignKeyMode != 0 { + i = encodeVarint(dAtA, i, uint64(m.ForeignKeyMode)) + i-- + dAtA[i] = 0x28 + } if m.RequireExplicitRouting { i-- if m.RequireExplicitRouting { @@ -814,6 +1096,9 @@ func (m *Keyspace) SizeVT() (n int) { if m.RequireExplicitRouting { n += 2 } + if m.ForeignKeyMode != 0 { + n += 1 + sov(uint64(m.ForeignKeyMode)) + } if m.CrossTablet { n += 3 } @@ -1556,6 +1841,25 @@ func (m *Keyspace) UnmarshalVT(dAtA []byte) error { } } m.RequireExplicitRouting = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ForeignKeyMode", wireType) + } + m.ForeignKeyMode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ForeignKeyMode |= Keyspace_ForeignKeyMode(b&0x7F) << shift + if b < 0x80 { + break + } + } case 85: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field CrossTablet", wireType) diff --git a/go/vt/proto/vtadmin/vtadmin.pb.go b/go/vt/proto/vtadmin/vtadmin.pb.go index 553f69b5df9..3e41edd5f7e 100644 --- a/go/vt/proto/vtadmin/vtadmin.pb.go +++ b/go/vt/proto/vtadmin/vtadmin.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.6.1 +// protoc-gen-go v1.30.0 +// protoc v3.21.3 // source: vtadmin.proto package vtadmin diff --git a/go/vt/proto/vtadmin/vtadmin_grpc.pb.go b/go/vt/proto/vtadmin/vtadmin_grpc.pb.go index dd46efd816c..e0e2ce2f44f 100644 --- a/go/vt/proto/vtadmin/vtadmin_grpc.pb.go +++ b/go/vt/proto/vtadmin/vtadmin_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 -// - protoc v3.6.1 +// - protoc v3.21.3 // source: vtadmin.proto package vtadmin diff --git a/go/vt/proto/vtadmin/vtadmin_vtproto.pb.go b/go/vt/proto/vtadmin/vtadmin_vtproto.pb.go index 10685461aed..0e4b4c6e84b 100644 --- a/go/vt/proto/vtadmin/vtadmin_vtproto.pb.go +++ b/go/vt/proto/vtadmin/vtadmin_vtproto.pb.go @@ -1,11 +1,12 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 +// protoc-gen-go-vtproto version: v0.5.0 // source: vtadmin.proto package vtadmin import ( fmt "fmt" + proto "google.golang.org/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl" io "io" bits "math/bits" @@ -24,6 +25,2344 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +func (m *Cluster) CloneVT() *Cluster { + if m == nil { + return (*Cluster)(nil) + } + r := &Cluster{ + Id: m.Id, + Name: m.Name, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Cluster) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ClusterBackup) CloneVT() *ClusterBackup { + if m == nil { + return (*ClusterBackup)(nil) + } + r := &ClusterBackup{ + Cluster: m.Cluster.CloneVT(), + Backup: m.Backup.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ClusterBackup) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ClusterCellsAliases) CloneVT() *ClusterCellsAliases { + if m == nil { + return (*ClusterCellsAliases)(nil) + } + r := &ClusterCellsAliases{ + Cluster: m.Cluster.CloneVT(), + } + if rhs := m.Aliases; rhs != nil { + tmpContainer := make(map[string]*topodata.CellsAlias, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Aliases = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ClusterCellsAliases) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ClusterCellInfo) CloneVT() *ClusterCellInfo { + if m == nil { + return (*ClusterCellInfo)(nil) + } + r := &ClusterCellInfo{ + Cluster: m.Cluster.CloneVT(), + Name: m.Name, + CellInfo: m.CellInfo.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ClusterCellInfo) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ClusterShardReplicationPosition) CloneVT() *ClusterShardReplicationPosition { + if m == nil { + return (*ClusterShardReplicationPosition)(nil) + } + r := &ClusterShardReplicationPosition{ + Cluster: m.Cluster.CloneVT(), + Keyspace: m.Keyspace, + Shard: m.Shard, + PositionInfo: m.PositionInfo.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ClusterShardReplicationPosition) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ClusterWorkflows) CloneVT() *ClusterWorkflows { + if m == nil { + return (*ClusterWorkflows)(nil) + } + r := &ClusterWorkflows{} + if rhs := m.Workflows; rhs != nil { + tmpContainer := make([]*Workflow, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Workflows = tmpContainer + } + if rhs := m.Warnings; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Warnings = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ClusterWorkflows) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Keyspace) CloneVT() *Keyspace { + if m == nil { + return (*Keyspace)(nil) + } + r := &Keyspace{ + Cluster: m.Cluster.CloneVT(), + Keyspace: m.Keyspace.CloneVT(), + } + if rhs := m.Shards; rhs != nil { + tmpContainer := make(map[string]*vtctldata.Shard, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Shards = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Keyspace) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Schema_ShardTableSize) CloneVT() *Schema_ShardTableSize { + if m == nil { + return (*Schema_ShardTableSize)(nil) + } + r := &Schema_ShardTableSize{ + RowCount: m.RowCount, + DataLength: m.DataLength, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Schema_ShardTableSize) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Schema_TableSize) CloneVT() *Schema_TableSize { + if m == nil { + return (*Schema_TableSize)(nil) + } + r := &Schema_TableSize{ + RowCount: m.RowCount, + DataLength: m.DataLength, + } + if rhs := m.ByShard; rhs != nil { + tmpContainer := make(map[string]*Schema_ShardTableSize, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.ByShard = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Schema_TableSize) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Schema) CloneVT() *Schema { + if m == nil { + return (*Schema)(nil) + } + r := &Schema{ + Cluster: m.Cluster.CloneVT(), + Keyspace: m.Keyspace, + } + if rhs := m.TableDefinitions; rhs != nil { + tmpContainer := make([]*tabletmanagerdata.TableDefinition, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.TableDefinitions = tmpContainer + } + if rhs := m.TableSizes; rhs != nil { + tmpContainer := make(map[string]*Schema_TableSize, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.TableSizes = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Schema) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Shard) CloneVT() *Shard { + if m == nil { + return (*Shard)(nil) + } + r := &Shard{ + Cluster: m.Cluster.CloneVT(), + Shard: m.Shard.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Shard) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SrvVSchema) CloneVT() *SrvVSchema { + if m == nil { + return (*SrvVSchema)(nil) + } + r := &SrvVSchema{ + Cell: m.Cell, + Cluster: m.Cluster.CloneVT(), + SrvVSchema: m.SrvVSchema.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *SrvVSchema) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Tablet) CloneVT() *Tablet { + if m == nil { + return (*Tablet)(nil) + } + r := &Tablet{ + Cluster: m.Cluster.CloneVT(), + Tablet: m.Tablet.CloneVT(), + State: m.State, + FQDN: m.FQDN, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Tablet) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VSchema) CloneVT() *VSchema { + if m == nil { + return (*VSchema)(nil) + } + r := &VSchema{ + Cluster: m.Cluster.CloneVT(), + Name: m.Name, + VSchema: m.VSchema.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VSchema) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Vtctld) CloneVT() *Vtctld { + if m == nil { + return (*Vtctld)(nil) + } + r := &Vtctld{ + Hostname: m.Hostname, + Cluster: m.Cluster.CloneVT(), + FQDN: m.FQDN, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Vtctld) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VTGate) CloneVT() *VTGate { + if m == nil { + return (*VTGate)(nil) + } + r := &VTGate{ + Hostname: m.Hostname, + Pool: m.Pool, + Cell: m.Cell, + Cluster: m.Cluster.CloneVT(), + FQDN: m.FQDN, + } + if rhs := m.Keyspaces; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Keyspaces = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VTGate) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Workflow) CloneVT() *Workflow { + if m == nil { + return (*Workflow)(nil) + } + r := &Workflow{ + Cluster: m.Cluster.CloneVT(), + Keyspace: m.Keyspace, + Workflow: m.Workflow.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Workflow) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CreateKeyspaceRequest) CloneVT() *CreateKeyspaceRequest { + if m == nil { + return (*CreateKeyspaceRequest)(nil) + } + r := &CreateKeyspaceRequest{ + ClusterId: m.ClusterId, + Options: m.Options.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CreateKeyspaceRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CreateKeyspaceResponse) CloneVT() *CreateKeyspaceResponse { + if m == nil { + return (*CreateKeyspaceResponse)(nil) + } + r := &CreateKeyspaceResponse{ + Keyspace: m.Keyspace.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CreateKeyspaceResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CreateShardRequest) CloneVT() *CreateShardRequest { + if m == nil { + return (*CreateShardRequest)(nil) + } + r := &CreateShardRequest{ + ClusterId: m.ClusterId, + Options: m.Options.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CreateShardRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *DeleteKeyspaceRequest) CloneVT() *DeleteKeyspaceRequest { + if m == nil { + return (*DeleteKeyspaceRequest)(nil) + } + r := &DeleteKeyspaceRequest{ + ClusterId: m.ClusterId, + Options: m.Options.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *DeleteKeyspaceRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *DeleteShardsRequest) CloneVT() *DeleteShardsRequest { + if m == nil { + return (*DeleteShardsRequest)(nil) + } + r := &DeleteShardsRequest{ + ClusterId: m.ClusterId, + Options: m.Options.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *DeleteShardsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *DeleteTabletRequest) CloneVT() *DeleteTabletRequest { + if m == nil { + return (*DeleteTabletRequest)(nil) + } + r := &DeleteTabletRequest{ + Alias: m.Alias.CloneVT(), + AllowPrimary: m.AllowPrimary, + } + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *DeleteTabletRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *DeleteTabletResponse) CloneVT() *DeleteTabletResponse { + if m == nil { + return (*DeleteTabletResponse)(nil) + } + r := &DeleteTabletResponse{ + Status: m.Status, + Cluster: m.Cluster.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *DeleteTabletResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *EmergencyFailoverShardRequest) CloneVT() *EmergencyFailoverShardRequest { + if m == nil { + return (*EmergencyFailoverShardRequest)(nil) + } + r := &EmergencyFailoverShardRequest{ + ClusterId: m.ClusterId, + Options: m.Options.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *EmergencyFailoverShardRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *EmergencyFailoverShardResponse) CloneVT() *EmergencyFailoverShardResponse { + if m == nil { + return (*EmergencyFailoverShardResponse)(nil) + } + r := &EmergencyFailoverShardResponse{ + Cluster: m.Cluster.CloneVT(), + Keyspace: m.Keyspace, + Shard: m.Shard, + PromotedPrimary: m.PromotedPrimary.CloneVT(), + } + if rhs := m.Events; rhs != nil { + tmpContainer := make([]*logutil.Event, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Events = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *EmergencyFailoverShardResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *FindSchemaRequest) CloneVT() *FindSchemaRequest { + if m == nil { + return (*FindSchemaRequest)(nil) + } + r := &FindSchemaRequest{ + Table: m.Table, + TableSizeOptions: m.TableSizeOptions.CloneVT(), + } + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *FindSchemaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetBackupsRequest) CloneVT() *GetBackupsRequest { + if m == nil { + return (*GetBackupsRequest)(nil) + } + r := &GetBackupsRequest{ + RequestOptions: m.RequestOptions.CloneVT(), + } + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if rhs := m.Keyspaces; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Keyspaces = tmpContainer + } + if rhs := m.KeyspaceShards; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.KeyspaceShards = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetBackupsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetBackupsResponse) CloneVT() *GetBackupsResponse { + if m == nil { + return (*GetBackupsResponse)(nil) + } + r := &GetBackupsResponse{} + if rhs := m.Backups; rhs != nil { + tmpContainer := make([]*ClusterBackup, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Backups = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetBackupsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetCellInfosRequest) CloneVT() *GetCellInfosRequest { + if m == nil { + return (*GetCellInfosRequest)(nil) + } + r := &GetCellInfosRequest{ + NamesOnly: m.NamesOnly, + } + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetCellInfosRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetCellInfosResponse) CloneVT() *GetCellInfosResponse { + if m == nil { + return (*GetCellInfosResponse)(nil) + } + r := &GetCellInfosResponse{} + if rhs := m.CellInfos; rhs != nil { + tmpContainer := make([]*ClusterCellInfo, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.CellInfos = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetCellInfosResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetCellsAliasesRequest) CloneVT() *GetCellsAliasesRequest { + if m == nil { + return (*GetCellsAliasesRequest)(nil) + } + r := &GetCellsAliasesRequest{} + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetCellsAliasesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetCellsAliasesResponse) CloneVT() *GetCellsAliasesResponse { + if m == nil { + return (*GetCellsAliasesResponse)(nil) + } + r := &GetCellsAliasesResponse{} + if rhs := m.Aliases; rhs != nil { + tmpContainer := make([]*ClusterCellsAliases, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Aliases = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetCellsAliasesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetClustersRequest) CloneVT() *GetClustersRequest { + if m == nil { + return (*GetClustersRequest)(nil) + } + r := &GetClustersRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetClustersRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetClustersResponse) CloneVT() *GetClustersResponse { + if m == nil { + return (*GetClustersResponse)(nil) + } + r := &GetClustersResponse{} + if rhs := m.Clusters; rhs != nil { + tmpContainer := make([]*Cluster, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Clusters = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetClustersResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetFullStatusRequest) CloneVT() *GetFullStatusRequest { + if m == nil { + return (*GetFullStatusRequest)(nil) + } + r := &GetFullStatusRequest{ + ClusterId: m.ClusterId, + Alias: m.Alias.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetFullStatusRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetGatesRequest) CloneVT() *GetGatesRequest { + if m == nil { + return (*GetGatesRequest)(nil) + } + r := &GetGatesRequest{} + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetGatesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetGatesResponse) CloneVT() *GetGatesResponse { + if m == nil { + return (*GetGatesResponse)(nil) + } + r := &GetGatesResponse{} + if rhs := m.Gates; rhs != nil { + tmpContainer := make([]*VTGate, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Gates = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetGatesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetKeyspaceRequest) CloneVT() *GetKeyspaceRequest { + if m == nil { + return (*GetKeyspaceRequest)(nil) + } + r := &GetKeyspaceRequest{ + ClusterId: m.ClusterId, + Keyspace: m.Keyspace, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetKeyspaceRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetKeyspacesRequest) CloneVT() *GetKeyspacesRequest { + if m == nil { + return (*GetKeyspacesRequest)(nil) + } + r := &GetKeyspacesRequest{} + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetKeyspacesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetKeyspacesResponse) CloneVT() *GetKeyspacesResponse { + if m == nil { + return (*GetKeyspacesResponse)(nil) + } + r := &GetKeyspacesResponse{} + if rhs := m.Keyspaces; rhs != nil { + tmpContainer := make([]*Keyspace, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Keyspaces = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetKeyspacesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSchemaRequest) CloneVT() *GetSchemaRequest { + if m == nil { + return (*GetSchemaRequest)(nil) + } + r := &GetSchemaRequest{ + ClusterId: m.ClusterId, + Keyspace: m.Keyspace, + Table: m.Table, + TableSizeOptions: m.TableSizeOptions.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetSchemaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSchemasRequest) CloneVT() *GetSchemasRequest { + if m == nil { + return (*GetSchemasRequest)(nil) + } + r := &GetSchemasRequest{ + TableSizeOptions: m.TableSizeOptions.CloneVT(), + } + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetSchemasRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSchemasResponse) CloneVT() *GetSchemasResponse { + if m == nil { + return (*GetSchemasResponse)(nil) + } + r := &GetSchemasResponse{} + if rhs := m.Schemas; rhs != nil { + tmpContainer := make([]*Schema, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Schemas = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetSchemasResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetShardReplicationPositionsRequest) CloneVT() *GetShardReplicationPositionsRequest { + if m == nil { + return (*GetShardReplicationPositionsRequest)(nil) + } + r := &GetShardReplicationPositionsRequest{} + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if rhs := m.Keyspaces; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Keyspaces = tmpContainer + } + if rhs := m.KeyspaceShards; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.KeyspaceShards = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetShardReplicationPositionsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetShardReplicationPositionsResponse) CloneVT() *GetShardReplicationPositionsResponse { + if m == nil { + return (*GetShardReplicationPositionsResponse)(nil) + } + r := &GetShardReplicationPositionsResponse{} + if rhs := m.ReplicationPositions; rhs != nil { + tmpContainer := make([]*ClusterShardReplicationPosition, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.ReplicationPositions = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetShardReplicationPositionsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSrvKeyspaceRequest) CloneVT() *GetSrvKeyspaceRequest { + if m == nil { + return (*GetSrvKeyspaceRequest)(nil) + } + r := &GetSrvKeyspaceRequest{ + ClusterId: m.ClusterId, + Keyspace: m.Keyspace, + } + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetSrvKeyspaceRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSrvKeyspacesRequest) CloneVT() *GetSrvKeyspacesRequest { + if m == nil { + return (*GetSrvKeyspacesRequest)(nil) + } + r := &GetSrvKeyspacesRequest{} + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetSrvKeyspacesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSrvKeyspacesResponse) CloneVT() *GetSrvKeyspacesResponse { + if m == nil { + return (*GetSrvKeyspacesResponse)(nil) + } + r := &GetSrvKeyspacesResponse{} + if rhs := m.SrvKeyspaces; rhs != nil { + tmpContainer := make(map[string]*vtctldata.GetSrvKeyspacesResponse, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.SrvKeyspaces = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetSrvKeyspacesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSrvVSchemaRequest) CloneVT() *GetSrvVSchemaRequest { + if m == nil { + return (*GetSrvVSchemaRequest)(nil) + } + r := &GetSrvVSchemaRequest{ + ClusterId: m.ClusterId, + Cell: m.Cell, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetSrvVSchemaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSrvVSchemasRequest) CloneVT() *GetSrvVSchemasRequest { + if m == nil { + return (*GetSrvVSchemasRequest)(nil) + } + r := &GetSrvVSchemasRequest{} + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetSrvVSchemasRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSrvVSchemasResponse) CloneVT() *GetSrvVSchemasResponse { + if m == nil { + return (*GetSrvVSchemasResponse)(nil) + } + r := &GetSrvVSchemasResponse{} + if rhs := m.SrvVSchemas; rhs != nil { + tmpContainer := make([]*SrvVSchema, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.SrvVSchemas = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetSrvVSchemasResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSchemaTableSizeOptions) CloneVT() *GetSchemaTableSizeOptions { + if m == nil { + return (*GetSchemaTableSizeOptions)(nil) + } + r := &GetSchemaTableSizeOptions{ + AggregateSizes: m.AggregateSizes, + IncludeNonServingShards: m.IncludeNonServingShards, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetSchemaTableSizeOptions) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetTabletRequest) CloneVT() *GetTabletRequest { + if m == nil { + return (*GetTabletRequest)(nil) + } + r := &GetTabletRequest{ + Alias: m.Alias.CloneVT(), + } + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetTabletRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetTabletsRequest) CloneVT() *GetTabletsRequest { + if m == nil { + return (*GetTabletsRequest)(nil) + } + r := &GetTabletsRequest{} + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetTabletsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetTabletsResponse) CloneVT() *GetTabletsResponse { + if m == nil { + return (*GetTabletsResponse)(nil) + } + r := &GetTabletsResponse{} + if rhs := m.Tablets; rhs != nil { + tmpContainer := make([]*Tablet, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Tablets = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetTabletsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetTopologyPathRequest) CloneVT() *GetTopologyPathRequest { + if m == nil { + return (*GetTopologyPathRequest)(nil) + } + r := &GetTopologyPathRequest{ + ClusterId: m.ClusterId, + Path: m.Path, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetTopologyPathRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetVSchemaRequest) CloneVT() *GetVSchemaRequest { + if m == nil { + return (*GetVSchemaRequest)(nil) + } + r := &GetVSchemaRequest{ + ClusterId: m.ClusterId, + Keyspace: m.Keyspace, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetVSchemaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetVSchemasRequest) CloneVT() *GetVSchemasRequest { + if m == nil { + return (*GetVSchemasRequest)(nil) + } + r := &GetVSchemasRequest{} + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetVSchemasRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetVSchemasResponse) CloneVT() *GetVSchemasResponse { + if m == nil { + return (*GetVSchemasResponse)(nil) + } + r := &GetVSchemasResponse{} + if rhs := m.VSchemas; rhs != nil { + tmpContainer := make([]*VSchema, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.VSchemas = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetVSchemasResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetVtctldsRequest) CloneVT() *GetVtctldsRequest { + if m == nil { + return (*GetVtctldsRequest)(nil) + } + r := &GetVtctldsRequest{} + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetVtctldsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetVtctldsResponse) CloneVT() *GetVtctldsResponse { + if m == nil { + return (*GetVtctldsResponse)(nil) + } + r := &GetVtctldsResponse{} + if rhs := m.Vtctlds; rhs != nil { + tmpContainer := make([]*Vtctld, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Vtctlds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetVtctldsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetWorkflowRequest) CloneVT() *GetWorkflowRequest { + if m == nil { + return (*GetWorkflowRequest)(nil) + } + r := &GetWorkflowRequest{ + ClusterId: m.ClusterId, + Keyspace: m.Keyspace, + Name: m.Name, + ActiveOnly: m.ActiveOnly, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetWorkflowRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetWorkflowsRequest) CloneVT() *GetWorkflowsRequest { + if m == nil { + return (*GetWorkflowsRequest)(nil) + } + r := &GetWorkflowsRequest{ + ActiveOnly: m.ActiveOnly, + } + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if rhs := m.Keyspaces; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Keyspaces = tmpContainer + } + if rhs := m.IgnoreKeyspaces; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.IgnoreKeyspaces = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetWorkflowsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetWorkflowsResponse) CloneVT() *GetWorkflowsResponse { + if m == nil { + return (*GetWorkflowsResponse)(nil) + } + r := &GetWorkflowsResponse{} + if rhs := m.WorkflowsByCluster; rhs != nil { + tmpContainer := make(map[string]*ClusterWorkflows, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.WorkflowsByCluster = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetWorkflowsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PingTabletRequest) CloneVT() *PingTabletRequest { + if m == nil { + return (*PingTabletRequest)(nil) + } + r := &PingTabletRequest{ + Alias: m.Alias.CloneVT(), + } + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *PingTabletRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PingTabletResponse) CloneVT() *PingTabletResponse { + if m == nil { + return (*PingTabletResponse)(nil) + } + r := &PingTabletResponse{ + Status: m.Status, + Cluster: m.Cluster.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *PingTabletResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PlannedFailoverShardRequest) CloneVT() *PlannedFailoverShardRequest { + if m == nil { + return (*PlannedFailoverShardRequest)(nil) + } + r := &PlannedFailoverShardRequest{ + ClusterId: m.ClusterId, + Options: m.Options.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *PlannedFailoverShardRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PlannedFailoverShardResponse) CloneVT() *PlannedFailoverShardResponse { + if m == nil { + return (*PlannedFailoverShardResponse)(nil) + } + r := &PlannedFailoverShardResponse{ + Cluster: m.Cluster.CloneVT(), + Keyspace: m.Keyspace, + Shard: m.Shard, + PromotedPrimary: m.PromotedPrimary.CloneVT(), + } + if rhs := m.Events; rhs != nil { + tmpContainer := make([]*logutil.Event, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Events = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *PlannedFailoverShardResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RebuildKeyspaceGraphRequest) CloneVT() *RebuildKeyspaceGraphRequest { + if m == nil { + return (*RebuildKeyspaceGraphRequest)(nil) + } + r := &RebuildKeyspaceGraphRequest{ + ClusterId: m.ClusterId, + Keyspace: m.Keyspace, + AllowPartial: m.AllowPartial, + } + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RebuildKeyspaceGraphRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RebuildKeyspaceGraphResponse) CloneVT() *RebuildKeyspaceGraphResponse { + if m == nil { + return (*RebuildKeyspaceGraphResponse)(nil) + } + r := &RebuildKeyspaceGraphResponse{ + Status: m.Status, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RebuildKeyspaceGraphResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RefreshStateRequest) CloneVT() *RefreshStateRequest { + if m == nil { + return (*RefreshStateRequest)(nil) + } + r := &RefreshStateRequest{ + Alias: m.Alias.CloneVT(), + } + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RefreshStateRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RefreshStateResponse) CloneVT() *RefreshStateResponse { + if m == nil { + return (*RefreshStateResponse)(nil) + } + r := &RefreshStateResponse{ + Status: m.Status, + Cluster: m.Cluster.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RefreshStateResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReloadSchemasRequest) CloneVT() *ReloadSchemasRequest { + if m == nil { + return (*ReloadSchemasRequest)(nil) + } + r := &ReloadSchemasRequest{ + Concurrency: m.Concurrency, + WaitPosition: m.WaitPosition, + IncludePrimary: m.IncludePrimary, + } + if rhs := m.Keyspaces; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Keyspaces = tmpContainer + } + if rhs := m.KeyspaceShards; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.KeyspaceShards = tmpContainer + } + if rhs := m.Tablets; rhs != nil { + tmpContainer := make([]*topodata.TabletAlias, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Tablets = tmpContainer + } + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReloadSchemasRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReloadSchemasResponse_KeyspaceResult) CloneVT() *ReloadSchemasResponse_KeyspaceResult { + if m == nil { + return (*ReloadSchemasResponse_KeyspaceResult)(nil) + } + r := &ReloadSchemasResponse_KeyspaceResult{ + Keyspace: m.Keyspace.CloneVT(), + } + if rhs := m.Events; rhs != nil { + tmpContainer := make([]*logutil.Event, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Events = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReloadSchemasResponse_KeyspaceResult) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReloadSchemasResponse_ShardResult) CloneVT() *ReloadSchemasResponse_ShardResult { + if m == nil { + return (*ReloadSchemasResponse_ShardResult)(nil) + } + r := &ReloadSchemasResponse_ShardResult{ + Shard: m.Shard.CloneVT(), + } + if rhs := m.Events; rhs != nil { + tmpContainer := make([]*logutil.Event, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Events = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReloadSchemasResponse_ShardResult) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReloadSchemasResponse_TabletResult) CloneVT() *ReloadSchemasResponse_TabletResult { + if m == nil { + return (*ReloadSchemasResponse_TabletResult)(nil) + } + r := &ReloadSchemasResponse_TabletResult{ + Tablet: m.Tablet.CloneVT(), + Result: m.Result, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReloadSchemasResponse_TabletResult) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReloadSchemasResponse) CloneVT() *ReloadSchemasResponse { + if m == nil { + return (*ReloadSchemasResponse)(nil) + } + r := &ReloadSchemasResponse{} + if rhs := m.KeyspaceResults; rhs != nil { + tmpContainer := make([]*ReloadSchemasResponse_KeyspaceResult, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.KeyspaceResults = tmpContainer + } + if rhs := m.ShardResults; rhs != nil { + tmpContainer := make([]*ReloadSchemasResponse_ShardResult, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.ShardResults = tmpContainer + } + if rhs := m.TabletResults; rhs != nil { + tmpContainer := make([]*ReloadSchemasResponse_TabletResult, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.TabletResults = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReloadSchemasResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReloadSchemaShardRequest) CloneVT() *ReloadSchemaShardRequest { + if m == nil { + return (*ReloadSchemaShardRequest)(nil) + } + r := &ReloadSchemaShardRequest{ + ClusterId: m.ClusterId, + Keyspace: m.Keyspace, + Shard: m.Shard, + WaitPosition: m.WaitPosition, + IncludePrimary: m.IncludePrimary, + Concurrency: m.Concurrency, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReloadSchemaShardRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReloadSchemaShardResponse) CloneVT() *ReloadSchemaShardResponse { + if m == nil { + return (*ReloadSchemaShardResponse)(nil) + } + r := &ReloadSchemaShardResponse{} + if rhs := m.Events; rhs != nil { + tmpContainer := make([]*logutil.Event, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Events = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReloadSchemaShardResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RefreshTabletReplicationSourceRequest) CloneVT() *RefreshTabletReplicationSourceRequest { + if m == nil { + return (*RefreshTabletReplicationSourceRequest)(nil) + } + r := &RefreshTabletReplicationSourceRequest{ + Alias: m.Alias.CloneVT(), + } + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RefreshTabletReplicationSourceRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RefreshTabletReplicationSourceResponse) CloneVT() *RefreshTabletReplicationSourceResponse { + if m == nil { + return (*RefreshTabletReplicationSourceResponse)(nil) + } + r := &RefreshTabletReplicationSourceResponse{ + Keyspace: m.Keyspace, + Shard: m.Shard, + Primary: m.Primary.CloneVT(), + Cluster: m.Cluster.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RefreshTabletReplicationSourceResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RemoveKeyspaceCellRequest) CloneVT() *RemoveKeyspaceCellRequest { + if m == nil { + return (*RemoveKeyspaceCellRequest)(nil) + } + r := &RemoveKeyspaceCellRequest{ + ClusterId: m.ClusterId, + Keyspace: m.Keyspace, + Cell: m.Cell, + Force: m.Force, + Recursive: m.Recursive, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RemoveKeyspaceCellRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RemoveKeyspaceCellResponse) CloneVT() *RemoveKeyspaceCellResponse { + if m == nil { + return (*RemoveKeyspaceCellResponse)(nil) + } + r := &RemoveKeyspaceCellResponse{ + Status: m.Status, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RemoveKeyspaceCellResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RunHealthCheckRequest) CloneVT() *RunHealthCheckRequest { + if m == nil { + return (*RunHealthCheckRequest)(nil) + } + r := &RunHealthCheckRequest{ + Alias: m.Alias.CloneVT(), + } + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RunHealthCheckRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RunHealthCheckResponse) CloneVT() *RunHealthCheckResponse { + if m == nil { + return (*RunHealthCheckResponse)(nil) + } + r := &RunHealthCheckResponse{ + Status: m.Status, + Cluster: m.Cluster.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RunHealthCheckResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetReadOnlyRequest) CloneVT() *SetReadOnlyRequest { + if m == nil { + return (*SetReadOnlyRequest)(nil) + } + r := &SetReadOnlyRequest{ + Alias: m.Alias.CloneVT(), + } + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *SetReadOnlyRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetReadOnlyResponse) CloneVT() *SetReadOnlyResponse { + if m == nil { + return (*SetReadOnlyResponse)(nil) + } + r := &SetReadOnlyResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *SetReadOnlyResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetReadWriteRequest) CloneVT() *SetReadWriteRequest { + if m == nil { + return (*SetReadWriteRequest)(nil) + } + r := &SetReadWriteRequest{ + Alias: m.Alias.CloneVT(), + } + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *SetReadWriteRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetReadWriteResponse) CloneVT() *SetReadWriteResponse { + if m == nil { + return (*SetReadWriteResponse)(nil) + } + r := &SetReadWriteResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *SetReadWriteResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StartReplicationRequest) CloneVT() *StartReplicationRequest { + if m == nil { + return (*StartReplicationRequest)(nil) + } + r := &StartReplicationRequest{ + Alias: m.Alias.CloneVT(), + } + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StartReplicationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StartReplicationResponse) CloneVT() *StartReplicationResponse { + if m == nil { + return (*StartReplicationResponse)(nil) + } + r := &StartReplicationResponse{ + Status: m.Status, + Cluster: m.Cluster.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StartReplicationResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StopReplicationRequest) CloneVT() *StopReplicationRequest { + if m == nil { + return (*StopReplicationRequest)(nil) + } + r := &StopReplicationRequest{ + Alias: m.Alias.CloneVT(), + } + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StopReplicationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StopReplicationResponse) CloneVT() *StopReplicationResponse { + if m == nil { + return (*StopReplicationResponse)(nil) + } + r := &StopReplicationResponse{ + Status: m.Status, + Cluster: m.Cluster.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StopReplicationResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *TabletExternallyPromotedRequest) CloneVT() *TabletExternallyPromotedRequest { + if m == nil { + return (*TabletExternallyPromotedRequest)(nil) + } + r := &TabletExternallyPromotedRequest{ + Alias: m.Alias.CloneVT(), + } + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *TabletExternallyPromotedRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *TabletExternallyPromotedResponse) CloneVT() *TabletExternallyPromotedResponse { + if m == nil { + return (*TabletExternallyPromotedResponse)(nil) + } + r := &TabletExternallyPromotedResponse{ + Cluster: m.Cluster.CloneVT(), + Keyspace: m.Keyspace, + Shard: m.Shard, + NewPrimary: m.NewPrimary.CloneVT(), + OldPrimary: m.OldPrimary.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *TabletExternallyPromotedResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *TabletExternallyReparentedRequest) CloneVT() *TabletExternallyReparentedRequest { + if m == nil { + return (*TabletExternallyReparentedRequest)(nil) + } + r := &TabletExternallyReparentedRequest{ + Alias: m.Alias.CloneVT(), + } + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *TabletExternallyReparentedRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ValidateRequest) CloneVT() *ValidateRequest { + if m == nil { + return (*ValidateRequest)(nil) + } + r := &ValidateRequest{ + ClusterId: m.ClusterId, + PingTablets: m.PingTablets, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ValidateRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ValidateKeyspaceRequest) CloneVT() *ValidateKeyspaceRequest { + if m == nil { + return (*ValidateKeyspaceRequest)(nil) + } + r := &ValidateKeyspaceRequest{ + ClusterId: m.ClusterId, + Keyspace: m.Keyspace, + PingTablets: m.PingTablets, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ValidateKeyspaceRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ValidateSchemaKeyspaceRequest) CloneVT() *ValidateSchemaKeyspaceRequest { + if m == nil { + return (*ValidateSchemaKeyspaceRequest)(nil) + } + r := &ValidateSchemaKeyspaceRequest{ + ClusterId: m.ClusterId, + Keyspace: m.Keyspace, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ValidateSchemaKeyspaceRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ValidateShardRequest) CloneVT() *ValidateShardRequest { + if m == nil { + return (*ValidateShardRequest)(nil) + } + r := &ValidateShardRequest{ + ClusterId: m.ClusterId, + Keyspace: m.Keyspace, + Shard: m.Shard, + PingTablets: m.PingTablets, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ValidateShardRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ValidateVersionKeyspaceRequest) CloneVT() *ValidateVersionKeyspaceRequest { + if m == nil { + return (*ValidateVersionKeyspaceRequest)(nil) + } + r := &ValidateVersionKeyspaceRequest{ + ClusterId: m.ClusterId, + Keyspace: m.Keyspace, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ValidateVersionKeyspaceRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ValidateVersionShardRequest) CloneVT() *ValidateVersionShardRequest { + if m == nil { + return (*ValidateVersionShardRequest)(nil) + } + r := &ValidateVersionShardRequest{ + ClusterId: m.ClusterId, + Keyspace: m.Keyspace, + Shard: m.Shard, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ValidateVersionShardRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VTExplainRequest) CloneVT() *VTExplainRequest { + if m == nil { + return (*VTExplainRequest)(nil) + } + r := &VTExplainRequest{ + Cluster: m.Cluster, + Keyspace: m.Keyspace, + Sql: m.Sql, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VTExplainRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VTExplainResponse) CloneVT() *VTExplainResponse { + if m == nil { + return (*VTExplainResponse)(nil) + } + r := &VTExplainResponse{ + Response: m.Response, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VTExplainResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *Cluster) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil diff --git a/go/vt/proto/vtctldata/vtctldata.pb.go b/go/vt/proto/vtctldata/vtctldata.pb.go index f1510aca1d3..2e7e5065f94 100644 --- a/go/vt/proto/vtctldata/vtctldata.pb.go +++ b/go/vt/proto/vtctldata/vtctldata.pb.go @@ -18,8 +18,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.6.1 +// protoc-gen-go v1.30.0 +// protoc v3.21.3 // source: vtctldata.proto package vtctldata @@ -101,6 +101,185 @@ func (MaterializationIntent) EnumDescriptor() ([]byte, []int) { return file_vtctldata_proto_rawDescGZIP(), []int{0} } +type QueryOrdering int32 + +const ( + QueryOrdering_NONE QueryOrdering = 0 + QueryOrdering_ASCENDING QueryOrdering = 1 + QueryOrdering_DESCENDING QueryOrdering = 2 +) + +// Enum value maps for QueryOrdering. +var ( + QueryOrdering_name = map[int32]string{ + 0: "NONE", + 1: "ASCENDING", + 2: "DESCENDING", + } + QueryOrdering_value = map[string]int32{ + "NONE": 0, + "ASCENDING": 1, + "DESCENDING": 2, + } +) + +func (x QueryOrdering) Enum() *QueryOrdering { + p := new(QueryOrdering) + *p = x + return p +} + +func (x QueryOrdering) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (QueryOrdering) Descriptor() protoreflect.EnumDescriptor { + return file_vtctldata_proto_enumTypes[1].Descriptor() +} + +func (QueryOrdering) Type() protoreflect.EnumType { + return &file_vtctldata_proto_enumTypes[1] +} + +func (x QueryOrdering) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use QueryOrdering.Descriptor instead. +func (QueryOrdering) EnumDescriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{1} +} + +type SchemaMigration_Strategy int32 + +const ( + // SchemaMigration_VITESS uses vreplication to run the schema migration. It is + // the default strategy for OnlineDDL requests. + // + // SchemaMigration_VITESS was also formerly called "ONLINE". + SchemaMigration_VITESS SchemaMigration_Strategy = 0 + SchemaMigration_ONLINE SchemaMigration_Strategy = 0 + SchemaMigration_GHOST SchemaMigration_Strategy = 1 + SchemaMigration_PTOSC SchemaMigration_Strategy = 2 + // SchemaMigration_DIRECT runs the migration directly against MySQL (e.g. `ALTER TABLE ...`), + // meaning it is not actually an "online" DDL migration. + SchemaMigration_DIRECT SchemaMigration_Strategy = 3 + // SchemaMigration_MYSQL is a managed migration (queued and executed by the + // scheduler) but runs through a MySQL `ALTER TABLE`. + SchemaMigration_MYSQL SchemaMigration_Strategy = 4 +) + +// Enum value maps for SchemaMigration_Strategy. +var ( + SchemaMigration_Strategy_name = map[int32]string{ + 0: "VITESS", + // Duplicate value: 0: "ONLINE", + 1: "GHOST", + 2: "PTOSC", + 3: "DIRECT", + 4: "MYSQL", + } + SchemaMigration_Strategy_value = map[string]int32{ + "VITESS": 0, + "ONLINE": 0, + "GHOST": 1, + "PTOSC": 2, + "DIRECT": 3, + "MYSQL": 4, + } +) + +func (x SchemaMigration_Strategy) Enum() *SchemaMigration_Strategy { + p := new(SchemaMigration_Strategy) + *p = x + return p +} + +func (x SchemaMigration_Strategy) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SchemaMigration_Strategy) Descriptor() protoreflect.EnumDescriptor { + return file_vtctldata_proto_enumTypes[2].Descriptor() +} + +func (SchemaMigration_Strategy) Type() protoreflect.EnumType { + return &file_vtctldata_proto_enumTypes[2] +} + +func (x SchemaMigration_Strategy) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SchemaMigration_Strategy.Descriptor instead. +func (SchemaMigration_Strategy) EnumDescriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{5, 0} +} + +type SchemaMigration_Status int32 + +const ( + SchemaMigration_UNKNOWN SchemaMigration_Status = 0 + SchemaMigration_REQUESTED SchemaMigration_Status = 1 + SchemaMigration_CANCELLED SchemaMigration_Status = 2 + SchemaMigration_QUEUED SchemaMigration_Status = 3 + SchemaMigration_READY SchemaMigration_Status = 4 + SchemaMigration_RUNNING SchemaMigration_Status = 5 + SchemaMigration_COMPLETE SchemaMigration_Status = 6 + SchemaMigration_FAILED SchemaMigration_Status = 7 +) + +// Enum value maps for SchemaMigration_Status. +var ( + SchemaMigration_Status_name = map[int32]string{ + 0: "UNKNOWN", + 1: "REQUESTED", + 2: "CANCELLED", + 3: "QUEUED", + 4: "READY", + 5: "RUNNING", + 6: "COMPLETE", + 7: "FAILED", + } + SchemaMigration_Status_value = map[string]int32{ + "UNKNOWN": 0, + "REQUESTED": 1, + "CANCELLED": 2, + "QUEUED": 3, + "READY": 4, + "RUNNING": 5, + "COMPLETE": 6, + "FAILED": 7, + } +) + +func (x SchemaMigration_Status) Enum() *SchemaMigration_Status { + p := new(SchemaMigration_Status) + *p = x + return p +} + +func (x SchemaMigration_Status) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SchemaMigration_Status) Descriptor() protoreflect.EnumDescriptor { + return file_vtctldata_proto_enumTypes[3].Descriptor() +} + +func (SchemaMigration_Status) Type() protoreflect.EnumType { + return &file_vtctldata_proto_enumTypes[3] +} + +func (x SchemaMigration_Status) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SchemaMigration_Status.Descriptor instead. +func (SchemaMigration_Status) EnumDescriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{5, 1} +} + // ExecuteVtctlCommandRequest is the payload for ExecuteVtctlCommand. // timeouts are in nanoseconds. type ExecuteVtctlCommandRequest struct { @@ -304,7 +483,9 @@ type MaterializeSettings struct { // OnDdl specifies the action to be taken when a DDL is encountered. OnDdl string `protobuf:"bytes,13,opt,name=on_ddl,json=onDdl,proto3" json:"on_ddl,omitempty"` // DeferSecondaryKeys specifies if secondary keys should be created in one shot after table copy finishes. - DeferSecondaryKeys bool `protobuf:"varint,14,opt,name=defer_secondary_keys,json=deferSecondaryKeys,proto3" json:"defer_secondary_keys,omitempty"` + DeferSecondaryKeys bool `protobuf:"varint,14,opt,name=defer_secondary_keys,json=deferSecondaryKeys,proto3" json:"defer_secondary_keys,omitempty"` + TabletSelectionPreference tabletmanagerdata.TabletSelectionPreference `protobuf:"varint,15,opt,name=tablet_selection_preference,json=tabletSelectionPreference,proto3,enum=tabletmanagerdata.TabletSelectionPreference" json:"tablet_selection_preference,omitempty"` + AtomicCopy bool `protobuf:"varint,16,opt,name=atomic_copy,json=atomicCopy,proto3" json:"atomic_copy,omitempty"` } func (x *MaterializeSettings) Reset() { @@ -437,6 +618,20 @@ func (x *MaterializeSettings) GetDeferSecondaryKeys() bool { return false } +func (x *MaterializeSettings) GetTabletSelectionPreference() tabletmanagerdata.TabletSelectionPreference { + if x != nil { + return x.TabletSelectionPreference + } + return tabletmanagerdata.TabletSelectionPreference(0) +} + +func (x *MaterializeSettings) GetAtomicCopy() bool { + if x != nil { + return x.AtomicCopy + } + return false +} + type Keyspace struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -492,18 +687,69 @@ func (x *Keyspace) GetKeyspace() *topodata.Keyspace { return nil } -type Shard struct { +// SchemaMigration represents a row in the schema_migrations sidecar table. +type SchemaMigration struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - Shard *topodata.Shard `protobuf:"bytes,3,opt,name=shard,proto3" json:"shard,omitempty"` -} - -func (x *Shard) Reset() { - *x = Shard{} + Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` + Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,3,opt,name=shard,proto3" json:"shard,omitempty"` + Schema string `protobuf:"bytes,4,opt,name=schema,proto3" json:"schema,omitempty"` + Table string `protobuf:"bytes,5,opt,name=table,proto3" json:"table,omitempty"` + MigrationStatement string `protobuf:"bytes,6,opt,name=migration_statement,json=migrationStatement,proto3" json:"migration_statement,omitempty"` + Strategy SchemaMigration_Strategy `protobuf:"varint,7,opt,name=strategy,proto3,enum=vtctldata.SchemaMigration_Strategy" json:"strategy,omitempty"` + Options string `protobuf:"bytes,8,opt,name=options,proto3" json:"options,omitempty"` + AddedAt *vttime.Time `protobuf:"bytes,9,opt,name=added_at,json=addedAt,proto3" json:"added_at,omitempty"` + RequestedAt *vttime.Time `protobuf:"bytes,10,opt,name=requested_at,json=requestedAt,proto3" json:"requested_at,omitempty"` + ReadyAt *vttime.Time `protobuf:"bytes,11,opt,name=ready_at,json=readyAt,proto3" json:"ready_at,omitempty"` + StartedAt *vttime.Time `protobuf:"bytes,12,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` + LivenessTimestamp *vttime.Time `protobuf:"bytes,13,opt,name=liveness_timestamp,json=livenessTimestamp,proto3" json:"liveness_timestamp,omitempty"` + CompletedAt *vttime.Time `protobuf:"bytes,14,opt,name=completed_at,json=completedAt,proto3" json:"completed_at,omitempty"` + CleanedUpAt *vttime.Time `protobuf:"bytes,15,opt,name=cleaned_up_at,json=cleanedUpAt,proto3" json:"cleaned_up_at,omitempty"` + Status SchemaMigration_Status `protobuf:"varint,16,opt,name=status,proto3,enum=vtctldata.SchemaMigration_Status" json:"status,omitempty"` + LogPath string `protobuf:"bytes,17,opt,name=log_path,json=logPath,proto3" json:"log_path,omitempty"` + Artifacts string `protobuf:"bytes,18,opt,name=artifacts,proto3" json:"artifacts,omitempty"` + Retries uint64 `protobuf:"varint,19,opt,name=retries,proto3" json:"retries,omitempty"` + Tablet *topodata.TabletAlias `protobuf:"bytes,20,opt,name=tablet,proto3" json:"tablet,omitempty"` + TabletFailure bool `protobuf:"varint,21,opt,name=tablet_failure,json=tabletFailure,proto3" json:"tablet_failure,omitempty"` + Progress float32 `protobuf:"fixed32,22,opt,name=progress,proto3" json:"progress,omitempty"` + MigrationContext string `protobuf:"bytes,23,opt,name=migration_context,json=migrationContext,proto3" json:"migration_context,omitempty"` + DdlAction string `protobuf:"bytes,24,opt,name=ddl_action,json=ddlAction,proto3" json:"ddl_action,omitempty"` + Message string `protobuf:"bytes,25,opt,name=message,proto3" json:"message,omitempty"` + EtaSeconds int64 `protobuf:"varint,26,opt,name=eta_seconds,json=etaSeconds,proto3" json:"eta_seconds,omitempty"` + RowsCopied uint64 `protobuf:"varint,27,opt,name=rows_copied,json=rowsCopied,proto3" json:"rows_copied,omitempty"` + TableRows int64 `protobuf:"varint,28,opt,name=table_rows,json=tableRows,proto3" json:"table_rows,omitempty"` + AddedUniqueKeys uint32 `protobuf:"varint,29,opt,name=added_unique_keys,json=addedUniqueKeys,proto3" json:"added_unique_keys,omitempty"` + RemovedUniqueKeys uint32 `protobuf:"varint,30,opt,name=removed_unique_keys,json=removedUniqueKeys,proto3" json:"removed_unique_keys,omitempty"` + LogFile string `protobuf:"bytes,31,opt,name=log_file,json=logFile,proto3" json:"log_file,omitempty"` + ArtifactRetention *vttime.Duration `protobuf:"bytes,32,opt,name=artifact_retention,json=artifactRetention,proto3" json:"artifact_retention,omitempty"` + PostponeCompletion bool `protobuf:"varint,33,opt,name=postpone_completion,json=postponeCompletion,proto3" json:"postpone_completion,omitempty"` + RemovedUniqueKeyNames string `protobuf:"bytes,34,opt,name=removed_unique_key_names,json=removedUniqueKeyNames,proto3" json:"removed_unique_key_names,omitempty"` + DroppedNoDefaultColumnNames string `protobuf:"bytes,35,opt,name=dropped_no_default_column_names,json=droppedNoDefaultColumnNames,proto3" json:"dropped_no_default_column_names,omitempty"` + ExpandedColumnNames string `protobuf:"bytes,36,opt,name=expanded_column_names,json=expandedColumnNames,proto3" json:"expanded_column_names,omitempty"` + RevertibleNotes string `protobuf:"bytes,37,opt,name=revertible_notes,json=revertibleNotes,proto3" json:"revertible_notes,omitempty"` + AllowConcurrent bool `protobuf:"varint,38,opt,name=allow_concurrent,json=allowConcurrent,proto3" json:"allow_concurrent,omitempty"` + RevertedUuid string `protobuf:"bytes,39,opt,name=reverted_uuid,json=revertedUuid,proto3" json:"reverted_uuid,omitempty"` + IsView bool `protobuf:"varint,40,opt,name=is_view,json=isView,proto3" json:"is_view,omitempty"` + ReadyToComplete bool `protobuf:"varint,41,opt,name=ready_to_complete,json=readyToComplete,proto3" json:"ready_to_complete,omitempty"` + VitessLivenessIndicator int64 `protobuf:"varint,42,opt,name=vitess_liveness_indicator,json=vitessLivenessIndicator,proto3" json:"vitess_liveness_indicator,omitempty"` + UserThrottleRatio float32 `protobuf:"fixed32,43,opt,name=user_throttle_ratio,json=userThrottleRatio,proto3" json:"user_throttle_ratio,omitempty"` + SpecialPlan string `protobuf:"bytes,44,opt,name=special_plan,json=specialPlan,proto3" json:"special_plan,omitempty"` + LastThrottledAt *vttime.Time `protobuf:"bytes,45,opt,name=last_throttled_at,json=lastThrottledAt,proto3" json:"last_throttled_at,omitempty"` + ComponentThrottled string `protobuf:"bytes,46,opt,name=component_throttled,json=componentThrottled,proto3" json:"component_throttled,omitempty"` + CancelledAt *vttime.Time `protobuf:"bytes,47,opt,name=cancelled_at,json=cancelledAt,proto3" json:"cancelled_at,omitempty"` + PostponeLaunch bool `protobuf:"varint,48,opt,name=postpone_launch,json=postponeLaunch,proto3" json:"postpone_launch,omitempty"` + Stage string `protobuf:"bytes,49,opt,name=stage,proto3" json:"stage,omitempty"` // enum? + CutoverAttempts uint32 `protobuf:"varint,50,opt,name=cutover_attempts,json=cutoverAttempts,proto3" json:"cutover_attempts,omitempty"` + IsImmediateOperation bool `protobuf:"varint,51,opt,name=is_immediate_operation,json=isImmediateOperation,proto3" json:"is_immediate_operation,omitempty"` + ReviewedAt *vttime.Time `protobuf:"bytes,52,opt,name=reviewed_at,json=reviewedAt,proto3" json:"reviewed_at,omitempty"` + ReadyToCompleteAt *vttime.Time `protobuf:"bytes,53,opt,name=ready_to_complete_at,json=readyToCompleteAt,proto3" json:"ready_to_complete_at,omitempty"` +} + +func (x *SchemaMigration) Reset() { + *x = SchemaMigration{} if protoimpl.UnsafeEnabled { mi := &file_vtctldata_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -511,13 +757,13 @@ func (x *Shard) Reset() { } } -func (x *Shard) String() string { +func (x *SchemaMigration) String() string { return protoimpl.X.MessageStringOf(x) } -func (*Shard) ProtoMessage() {} +func (*SchemaMigration) ProtoMessage() {} -func (x *Shard) ProtoReflect() protoreflect.Message { +func (x *SchemaMigration) ProtoReflect() protoreflect.Message { mi := &file_vtctldata_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -529,406 +775,409 @@ func (x *Shard) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Shard.ProtoReflect.Descriptor instead. -func (*Shard) Descriptor() ([]byte, []int) { +// Deprecated: Use SchemaMigration.ProtoReflect.Descriptor instead. +func (*SchemaMigration) Descriptor() ([]byte, []int) { return file_vtctldata_proto_rawDescGZIP(), []int{5} } -func (x *Shard) GetKeyspace() string { +func (x *SchemaMigration) GetUuid() string { if x != nil { - return x.Keyspace + return x.Uuid } return "" } -func (x *Shard) GetName() string { +func (x *SchemaMigration) GetKeyspace() string { if x != nil { - return x.Name + return x.Keyspace } return "" } -func (x *Shard) GetShard() *topodata.Shard { +func (x *SchemaMigration) GetShard() string { if x != nil { return x.Shard } - return nil + return "" } -// TODO: comment the hell out of this. -type Workflow struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *SchemaMigration) GetSchema() string { + if x != nil { + return x.Schema + } + return "" +} - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Source *Workflow_ReplicationLocation `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"` - Target *Workflow_ReplicationLocation `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` - MaxVReplicationLag int64 `protobuf:"varint,4,opt,name=max_v_replication_lag,json=maxVReplicationLag,proto3" json:"max_v_replication_lag,omitempty"` - ShardStreams map[string]*Workflow_ShardStream `protobuf:"bytes,5,rep,name=shard_streams,json=shardStreams,proto3" json:"shard_streams,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - WorkflowType string `protobuf:"bytes,6,opt,name=workflow_type,json=workflowType,proto3" json:"workflow_type,omitempty"` - WorkflowSubType string `protobuf:"bytes,7,opt,name=workflow_sub_type,json=workflowSubType,proto3" json:"workflow_sub_type,omitempty"` +func (x *SchemaMigration) GetTable() string { + if x != nil { + return x.Table + } + return "" } -func (x *Workflow) Reset() { - *x = Workflow{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *SchemaMigration) GetMigrationStatement() string { + if x != nil { + return x.MigrationStatement } + return "" } -func (x *Workflow) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *SchemaMigration) GetStrategy() SchemaMigration_Strategy { + if x != nil { + return x.Strategy + } + return SchemaMigration_VITESS } -func (*Workflow) ProtoMessage() {} +func (x *SchemaMigration) GetOptions() string { + if x != nil { + return x.Options + } + return "" +} -func (x *Workflow) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *SchemaMigration) GetAddedAt() *vttime.Time { + if x != nil { + return x.AddedAt } - return mi.MessageOf(x) + return nil } -// Deprecated: Use Workflow.ProtoReflect.Descriptor instead. -func (*Workflow) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{6} +func (x *SchemaMigration) GetRequestedAt() *vttime.Time { + if x != nil { + return x.RequestedAt + } + return nil } -func (x *Workflow) GetName() string { +func (x *SchemaMigration) GetReadyAt() *vttime.Time { if x != nil { - return x.Name + return x.ReadyAt } - return "" + return nil } -func (x *Workflow) GetSource() *Workflow_ReplicationLocation { +func (x *SchemaMigration) GetStartedAt() *vttime.Time { if x != nil { - return x.Source + return x.StartedAt } return nil } -func (x *Workflow) GetTarget() *Workflow_ReplicationLocation { +func (x *SchemaMigration) GetLivenessTimestamp() *vttime.Time { if x != nil { - return x.Target + return x.LivenessTimestamp } return nil } -func (x *Workflow) GetMaxVReplicationLag() int64 { +func (x *SchemaMigration) GetCompletedAt() *vttime.Time { if x != nil { - return x.MaxVReplicationLag + return x.CompletedAt } - return 0 + return nil } -func (x *Workflow) GetShardStreams() map[string]*Workflow_ShardStream { +func (x *SchemaMigration) GetCleanedUpAt() *vttime.Time { if x != nil { - return x.ShardStreams + return x.CleanedUpAt } return nil } -func (x *Workflow) GetWorkflowType() string { +func (x *SchemaMigration) GetStatus() SchemaMigration_Status { if x != nil { - return x.WorkflowType + return x.Status } - return "" + return SchemaMigration_UNKNOWN } -func (x *Workflow) GetWorkflowSubType() string { +func (x *SchemaMigration) GetLogPath() string { if x != nil { - return x.WorkflowSubType + return x.LogPath } return "" } -type AddCellInfoRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - CellInfo *topodata.CellInfo `protobuf:"bytes,2,opt,name=cell_info,json=cellInfo,proto3" json:"cell_info,omitempty"` +func (x *SchemaMigration) GetArtifacts() string { + if x != nil { + return x.Artifacts + } + return "" } -func (x *AddCellInfoRequest) Reset() { - *x = AddCellInfoRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *SchemaMigration) GetRetries() uint64 { + if x != nil { + return x.Retries } + return 0 } -func (x *AddCellInfoRequest) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *SchemaMigration) GetTablet() *topodata.TabletAlias { + if x != nil { + return x.Tablet + } + return nil } -func (*AddCellInfoRequest) ProtoMessage() {} +func (x *SchemaMigration) GetTabletFailure() bool { + if x != nil { + return x.TabletFailure + } + return false +} -func (x *AddCellInfoRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *SchemaMigration) GetProgress() float32 { + if x != nil { + return x.Progress } - return mi.MessageOf(x) + return 0 } -// Deprecated: Use AddCellInfoRequest.ProtoReflect.Descriptor instead. -func (*AddCellInfoRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{7} +func (x *SchemaMigration) GetMigrationContext() string { + if x != nil { + return x.MigrationContext + } + return "" } -func (x *AddCellInfoRequest) GetName() string { +func (x *SchemaMigration) GetDdlAction() string { if x != nil { - return x.Name + return x.DdlAction } return "" } -func (x *AddCellInfoRequest) GetCellInfo() *topodata.CellInfo { +func (x *SchemaMigration) GetMessage() string { if x != nil { - return x.CellInfo + return x.Message } - return nil + return "" } -type AddCellInfoResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *SchemaMigration) GetEtaSeconds() int64 { + if x != nil { + return x.EtaSeconds + } + return 0 } -func (x *AddCellInfoResponse) Reset() { - *x = AddCellInfoResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *SchemaMigration) GetRowsCopied() uint64 { + if x != nil { + return x.RowsCopied } + return 0 } -func (x *AddCellInfoResponse) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *SchemaMigration) GetTableRows() int64 { + if x != nil { + return x.TableRows + } + return 0 } -func (*AddCellInfoResponse) ProtoMessage() {} +func (x *SchemaMigration) GetAddedUniqueKeys() uint32 { + if x != nil { + return x.AddedUniqueKeys + } + return 0 +} -func (x *AddCellInfoResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *SchemaMigration) GetRemovedUniqueKeys() uint32 { + if x != nil { + return x.RemovedUniqueKeys } - return mi.MessageOf(x) + return 0 } -// Deprecated: Use AddCellInfoResponse.ProtoReflect.Descriptor instead. -func (*AddCellInfoResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{8} +func (x *SchemaMigration) GetLogFile() string { + if x != nil { + return x.LogFile + } + return "" } -type AddCellsAliasRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Cells []string `protobuf:"bytes,2,rep,name=cells,proto3" json:"cells,omitempty"` +func (x *SchemaMigration) GetArtifactRetention() *vttime.Duration { + if x != nil { + return x.ArtifactRetention + } + return nil } -func (x *AddCellsAliasRequest) Reset() { - *x = AddCellsAliasRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *SchemaMigration) GetPostponeCompletion() bool { + if x != nil { + return x.PostponeCompletion } + return false } -func (x *AddCellsAliasRequest) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *SchemaMigration) GetRemovedUniqueKeyNames() string { + if x != nil { + return x.RemovedUniqueKeyNames + } + return "" } -func (*AddCellsAliasRequest) ProtoMessage() {} - -func (x *AddCellsAliasRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *SchemaMigration) GetDroppedNoDefaultColumnNames() string { + if x != nil { + return x.DroppedNoDefaultColumnNames } - return mi.MessageOf(x) + return "" } -// Deprecated: Use AddCellsAliasRequest.ProtoReflect.Descriptor instead. -func (*AddCellsAliasRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{9} +func (x *SchemaMigration) GetExpandedColumnNames() string { + if x != nil { + return x.ExpandedColumnNames + } + return "" } -func (x *AddCellsAliasRequest) GetName() string { +func (x *SchemaMigration) GetRevertibleNotes() string { if x != nil { - return x.Name + return x.RevertibleNotes } return "" } -func (x *AddCellsAliasRequest) GetCells() []string { +func (x *SchemaMigration) GetAllowConcurrent() bool { if x != nil { - return x.Cells + return x.AllowConcurrent } - return nil + return false } -type AddCellsAliasResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *SchemaMigration) GetRevertedUuid() string { + if x != nil { + return x.RevertedUuid + } + return "" } -func (x *AddCellsAliasResponse) Reset() { - *x = AddCellsAliasResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *SchemaMigration) GetIsView() bool { + if x != nil { + return x.IsView } + return false } -func (x *AddCellsAliasResponse) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *SchemaMigration) GetReadyToComplete() bool { + if x != nil { + return x.ReadyToComplete + } + return false } -func (*AddCellsAliasResponse) ProtoMessage() {} - -func (x *AddCellsAliasResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *SchemaMigration) GetVitessLivenessIndicator() int64 { + if x != nil { + return x.VitessLivenessIndicator } - return mi.MessageOf(x) + return 0 } -// Deprecated: Use AddCellsAliasResponse.ProtoReflect.Descriptor instead. -func (*AddCellsAliasResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{10} +func (x *SchemaMigration) GetUserThrottleRatio() float32 { + if x != nil { + return x.UserThrottleRatio + } + return 0 } -type ApplyRoutingRulesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - RoutingRules *vschema.RoutingRules `protobuf:"bytes,1,opt,name=routing_rules,json=routingRules,proto3" json:"routing_rules,omitempty"` - // SkipRebuild, if set, will cause ApplyRoutingRules to skip rebuilding the - // SrvVSchema objects in each cell in RebuildCells. - SkipRebuild bool `protobuf:"varint,2,opt,name=skip_rebuild,json=skipRebuild,proto3" json:"skip_rebuild,omitempty"` - // RebuildCells limits the SrvVSchema rebuild to the specified cells. If not - // provided the SrvVSchema will be rebuilt in every cell in the topology. - // - // Ignored if SkipRebuild is set. - RebuildCells []string `protobuf:"bytes,3,rep,name=rebuild_cells,json=rebuildCells,proto3" json:"rebuild_cells,omitempty"` +func (x *SchemaMigration) GetSpecialPlan() string { + if x != nil { + return x.SpecialPlan + } + return "" } -func (x *ApplyRoutingRulesRequest) Reset() { - *x = ApplyRoutingRulesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *SchemaMigration) GetLastThrottledAt() *vttime.Time { + if x != nil { + return x.LastThrottledAt } + return nil } -func (x *ApplyRoutingRulesRequest) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *SchemaMigration) GetComponentThrottled() string { + if x != nil { + return x.ComponentThrottled + } + return "" } -func (*ApplyRoutingRulesRequest) ProtoMessage() {} +func (x *SchemaMigration) GetCancelledAt() *vttime.Time { + if x != nil { + return x.CancelledAt + } + return nil +} -func (x *ApplyRoutingRulesRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *SchemaMigration) GetPostponeLaunch() bool { + if x != nil { + return x.PostponeLaunch } - return mi.MessageOf(x) + return false } -// Deprecated: Use ApplyRoutingRulesRequest.ProtoReflect.Descriptor instead. -func (*ApplyRoutingRulesRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{11} +func (x *SchemaMigration) GetStage() string { + if x != nil { + return x.Stage + } + return "" } -func (x *ApplyRoutingRulesRequest) GetRoutingRules() *vschema.RoutingRules { +func (x *SchemaMigration) GetCutoverAttempts() uint32 { if x != nil { - return x.RoutingRules + return x.CutoverAttempts } - return nil + return 0 } -func (x *ApplyRoutingRulesRequest) GetSkipRebuild() bool { +func (x *SchemaMigration) GetIsImmediateOperation() bool { if x != nil { - return x.SkipRebuild + return x.IsImmediateOperation } return false } -func (x *ApplyRoutingRulesRequest) GetRebuildCells() []string { +func (x *SchemaMigration) GetReviewedAt() *vttime.Time { if x != nil { - return x.RebuildCells + return x.ReviewedAt } return nil } -type ApplyRoutingRulesResponse struct { +func (x *SchemaMigration) GetReadyToCompleteAt() *vttime.Time { + if x != nil { + return x.ReadyToCompleteAt + } + return nil +} + +type Shard struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Shard *topodata.Shard `protobuf:"bytes,3,opt,name=shard,proto3" json:"shard,omitempty"` } -func (x *ApplyRoutingRulesResponse) Reset() { - *x = ApplyRoutingRulesResponse{} +func (x *Shard) Reset() { + *x = Shard{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[12] + mi := &file_vtctldata_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ApplyRoutingRulesResponse) String() string { +func (x *Shard) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ApplyRoutingRulesResponse) ProtoMessage() {} +func (*Shard) ProtoMessage() {} -func (x *ApplyRoutingRulesResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[12] +func (x *Shard) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -939,44 +1188,72 @@ func (x *ApplyRoutingRulesResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ApplyRoutingRulesResponse.ProtoReflect.Descriptor instead. -func (*ApplyRoutingRulesResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{12} +// Deprecated: Use Shard.ProtoReflect.Descriptor instead. +func (*Shard) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{6} } -type ApplyShardRoutingRulesRequest struct { +func (x *Shard) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *Shard) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Shard) GetShard() *topodata.Shard { + if x != nil { + return x.Shard + } + return nil +} + +// TODO: comment the hell out of this. +type Workflow struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ShardRoutingRules *vschema.ShardRoutingRules `protobuf:"bytes,1,opt,name=shard_routing_rules,json=shardRoutingRules,proto3" json:"shard_routing_rules,omitempty"` - // SkipRebuild, if set, will cause ApplyShardRoutingRules to skip rebuilding the - // SrvVSchema objects in each cell in RebuildCells. - SkipRebuild bool `protobuf:"varint,2,opt,name=skip_rebuild,json=skipRebuild,proto3" json:"skip_rebuild,omitempty"` - // RebuildCells limits the SrvVSchema rebuild to the specified cells. If not - // provided the SrvVSchema will be rebuilt in every cell in the topology. - // - // Ignored if SkipRebuild is set. - RebuildCells []string `protobuf:"bytes,3,rep,name=rebuild_cells,json=rebuildCells,proto3" json:"rebuild_cells,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Source *Workflow_ReplicationLocation `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"` + Target *Workflow_ReplicationLocation `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + // This represents how long it's been since we processed any event in the + // stream. + MaxVReplicationLag int64 `protobuf:"varint,4,opt,name=max_v_replication_lag,json=maxVReplicationLag,proto3" json:"max_v_replication_lag,omitempty"` + ShardStreams map[string]*Workflow_ShardStream `protobuf:"bytes,5,rep,name=shard_streams,json=shardStreams,proto3" json:"shard_streams,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + WorkflowType string `protobuf:"bytes,6,opt,name=workflow_type,json=workflowType,proto3" json:"workflow_type,omitempty"` + WorkflowSubType string `protobuf:"bytes,7,opt,name=workflow_sub_type,json=workflowSubType,proto3" json:"workflow_sub_type,omitempty"` + // This represents the lag across all shards, between the current time and + // the timestamp of the last transaction OR heartbeat timestamp (if there + // have been no writes to replicate from the source). + MaxVReplicationTransactionLag int64 `protobuf:"varint,8,opt,name=max_v_replication_transaction_lag,json=maxVReplicationTransactionLag,proto3" json:"max_v_replication_transaction_lag,omitempty"` + // This specifies whether to defer the creation of secondary keys. + DeferSecondaryKeys bool `protobuf:"varint,9,opt,name=defer_secondary_keys,json=deferSecondaryKeys,proto3" json:"defer_secondary_keys,omitempty"` } -func (x *ApplyShardRoutingRulesRequest) Reset() { - *x = ApplyShardRoutingRulesRequest{} +func (x *Workflow) Reset() { + *x = Workflow{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[13] + mi := &file_vtctldata_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ApplyShardRoutingRulesRequest) String() string { +func (x *Workflow) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ApplyShardRoutingRulesRequest) ProtoMessage() {} +func (*Workflow) ProtoMessage() {} -func (x *ApplyShardRoutingRulesRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[13] +func (x *Workflow) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -987,115 +1264,100 @@ func (x *ApplyShardRoutingRulesRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ApplyShardRoutingRulesRequest.ProtoReflect.Descriptor instead. -func (*ApplyShardRoutingRulesRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{13} +// Deprecated: Use Workflow.ProtoReflect.Descriptor instead. +func (*Workflow) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{7} } -func (x *ApplyShardRoutingRulesRequest) GetShardRoutingRules() *vschema.ShardRoutingRules { +func (x *Workflow) GetName() string { if x != nil { - return x.ShardRoutingRules + return x.Name } - return nil + return "" } -func (x *ApplyShardRoutingRulesRequest) GetSkipRebuild() bool { +func (x *Workflow) GetSource() *Workflow_ReplicationLocation { if x != nil { - return x.SkipRebuild + return x.Source } - return false + return nil } -func (x *ApplyShardRoutingRulesRequest) GetRebuildCells() []string { +func (x *Workflow) GetTarget() *Workflow_ReplicationLocation { if x != nil { - return x.RebuildCells + return x.Target } return nil } -type ApplyShardRoutingRulesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *Workflow) GetMaxVReplicationLag() int64 { + if x != nil { + return x.MaxVReplicationLag + } + return 0 } -func (x *ApplyShardRoutingRulesResponse) Reset() { - *x = ApplyShardRoutingRulesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *Workflow) GetShardStreams() map[string]*Workflow_ShardStream { + if x != nil { + return x.ShardStreams } + return nil } -func (x *ApplyShardRoutingRulesResponse) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *Workflow) GetWorkflowType() string { + if x != nil { + return x.WorkflowType + } + return "" } -func (*ApplyShardRoutingRulesResponse) ProtoMessage() {} +func (x *Workflow) GetWorkflowSubType() string { + if x != nil { + return x.WorkflowSubType + } + return "" +} -func (x *ApplyShardRoutingRulesResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *Workflow) GetMaxVReplicationTransactionLag() int64 { + if x != nil { + return x.MaxVReplicationTransactionLag } - return mi.MessageOf(x) + return 0 } -// Deprecated: Use ApplyShardRoutingRulesResponse.ProtoReflect.Descriptor instead. -func (*ApplyShardRoutingRulesResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{14} +func (x *Workflow) GetDeferSecondaryKeys() bool { + if x != nil { + return x.DeferSecondaryKeys + } + return false } -type ApplySchemaRequest struct { +type AddCellInfoRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - // Allow large schema changes which incur a longer unavailability of the database. - AllowLongUnavailability bool `protobuf:"varint,2,opt,name=allow_long_unavailability,json=allowLongUnavailability,proto3" json:"allow_long_unavailability,omitempty"` - // SQL commands to run. - Sql []string `protobuf:"bytes,3,rep,name=sql,proto3" json:"sql,omitempty"` - // Online DDL strategy, compatible with @@ddl_strategy session variable (examples: 'gh-ost', 'pt-osc', 'gh-ost --max-load=Threads_running=100'") - DdlStrategy string `protobuf:"bytes,4,opt,name=ddl_strategy,json=ddlStrategy,proto3" json:"ddl_strategy,omitempty"` - // Optional: explicit UUIDs for migration. - // If given, must match number of DDL changes - UuidList []string `protobuf:"bytes,5,rep,name=uuid_list,json=uuidList,proto3" json:"uuid_list,omitempty"` - // For Online DDL, optionally supply a custom unique string used as context for the migration(s) in this command. - // By default a unique context is auto-generated by Vitess - MigrationContext string `protobuf:"bytes,6,opt,name=migration_context,json=migrationContext,proto3" json:"migration_context,omitempty"` - // WaitReplicasTimeout is the duration of time to wait for replicas to catch - // up in reparenting. - WaitReplicasTimeout *vttime.Duration `protobuf:"bytes,7,opt,name=wait_replicas_timeout,json=waitReplicasTimeout,proto3" json:"wait_replicas_timeout,omitempty"` - // Skip pre-apply schema checks, and directly forward schema change query to shards - SkipPreflight bool `protobuf:"varint,8,opt,name=skip_preflight,json=skipPreflight,proto3" json:"skip_preflight,omitempty"` - // caller_id identifies the caller. This is the effective caller ID, - // set by the application to further identify the caller. - CallerId *vtrpc.CallerID `protobuf:"bytes,9,opt,name=caller_id,json=callerId,proto3" json:"caller_id,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + CellInfo *topodata.CellInfo `protobuf:"bytes,2,opt,name=cell_info,json=cellInfo,proto3" json:"cell_info,omitempty"` } -func (x *ApplySchemaRequest) Reset() { - *x = ApplySchemaRequest{} +func (x *AddCellInfoRequest) Reset() { + *x = AddCellInfoRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[15] + mi := &file_vtctldata_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ApplySchemaRequest) String() string { +func (x *AddCellInfoRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ApplySchemaRequest) ProtoMessage() {} +func (*AddCellInfoRequest) ProtoMessage() {} -func (x *ApplySchemaRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[15] +func (x *AddCellInfoRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1106,99 +1368,89 @@ func (x *ApplySchemaRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ApplySchemaRequest.ProtoReflect.Descriptor instead. -func (*ApplySchemaRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{15} +// Deprecated: Use AddCellInfoRequest.ProtoReflect.Descriptor instead. +func (*AddCellInfoRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{8} } -func (x *ApplySchemaRequest) GetKeyspace() string { +func (x *AddCellInfoRequest) GetName() string { if x != nil { - return x.Keyspace + return x.Name } return "" } -func (x *ApplySchemaRequest) GetAllowLongUnavailability() bool { - if x != nil { - return x.AllowLongUnavailability - } - return false -} - -func (x *ApplySchemaRequest) GetSql() []string { +func (x *AddCellInfoRequest) GetCellInfo() *topodata.CellInfo { if x != nil { - return x.Sql + return x.CellInfo } return nil } -func (x *ApplySchemaRequest) GetDdlStrategy() string { - if x != nil { - return x.DdlStrategy - } - return "" +type AddCellInfoResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (x *ApplySchemaRequest) GetUuidList() []string { - if x != nil { - return x.UuidList +func (x *AddCellInfoResponse) Reset() { + *x = AddCellInfoResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -func (x *ApplySchemaRequest) GetMigrationContext() string { - if x != nil { - return x.MigrationContext - } - return "" +func (x *AddCellInfoResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (x *ApplySchemaRequest) GetWaitReplicasTimeout() *vttime.Duration { - if x != nil { - return x.WaitReplicasTimeout - } - return nil -} +func (*AddCellInfoResponse) ProtoMessage() {} -func (x *ApplySchemaRequest) GetSkipPreflight() bool { - if x != nil { - return x.SkipPreflight +func (x *AddCellInfoResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return false + return mi.MessageOf(x) } -func (x *ApplySchemaRequest) GetCallerId() *vtrpc.CallerID { - if x != nil { - return x.CallerId - } - return nil +// Deprecated: Use AddCellInfoResponse.ProtoReflect.Descriptor instead. +func (*AddCellInfoResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{9} } -type ApplySchemaResponse struct { +type AddCellsAliasRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - UuidList []string `protobuf:"bytes,1,rep,name=uuid_list,json=uuidList,proto3" json:"uuid_list,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Cells []string `protobuf:"bytes,2,rep,name=cells,proto3" json:"cells,omitempty"` } -func (x *ApplySchemaResponse) Reset() { - *x = ApplySchemaResponse{} +func (x *AddCellsAliasRequest) Reset() { + *x = AddCellsAliasRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[16] + mi := &file_vtctldata_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ApplySchemaResponse) String() string { +func (x *AddCellsAliasRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ApplySchemaResponse) ProtoMessage() {} +func (*AddCellsAliasRequest) ProtoMessage() {} -func (x *ApplySchemaResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[16] +func (x *AddCellsAliasRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1209,48 +1461,48 @@ func (x *ApplySchemaResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ApplySchemaResponse.ProtoReflect.Descriptor instead. -func (*ApplySchemaResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{16} +// Deprecated: Use AddCellsAliasRequest.ProtoReflect.Descriptor instead. +func (*AddCellsAliasRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{10} } -func (x *ApplySchemaResponse) GetUuidList() []string { +func (x *AddCellsAliasRequest) GetName() string { if x != nil { - return x.UuidList + return x.Name + } + return "" +} + +func (x *AddCellsAliasRequest) GetCells() []string { + if x != nil { + return x.Cells } return nil } -type ApplyVSchemaRequest struct { +type AddCellsAliasResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - SkipRebuild bool `protobuf:"varint,2,opt,name=skip_rebuild,json=skipRebuild,proto3" json:"skip_rebuild,omitempty"` - DryRun bool `protobuf:"varint,3,opt,name=dry_run,json=dryRun,proto3" json:"dry_run,omitempty"` - Cells []string `protobuf:"bytes,4,rep,name=cells,proto3" json:"cells,omitempty"` - VSchema *vschema.Keyspace `protobuf:"bytes,5,opt,name=v_schema,json=vSchema,proto3" json:"v_schema,omitempty"` - Sql string `protobuf:"bytes,6,opt,name=sql,proto3" json:"sql,omitempty"` } -func (x *ApplyVSchemaRequest) Reset() { - *x = ApplyVSchemaRequest{} +func (x *AddCellsAliasResponse) Reset() { + *x = AddCellsAliasResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[17] + mi := &file_vtctldata_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ApplyVSchemaRequest) String() string { +func (x *AddCellsAliasResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ApplyVSchemaRequest) ProtoMessage() {} +func (*AddCellsAliasResponse) ProtoMessage() {} -func (x *ApplyVSchemaRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[17] +func (x *AddCellsAliasResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1261,78 +1513,103 @@ func (x *ApplyVSchemaRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ApplyVSchemaRequest.ProtoReflect.Descriptor instead. -func (*ApplyVSchemaRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{17} +// Deprecated: Use AddCellsAliasResponse.ProtoReflect.Descriptor instead. +func (*AddCellsAliasResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{11} } -func (x *ApplyVSchemaRequest) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" +type ApplyRoutingRulesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RoutingRules *vschema.RoutingRules `protobuf:"bytes,1,opt,name=routing_rules,json=routingRules,proto3" json:"routing_rules,omitempty"` + // SkipRebuild, if set, will cause ApplyRoutingRules to skip rebuilding the + // SrvVSchema objects in each cell in RebuildCells. + SkipRebuild bool `protobuf:"varint,2,opt,name=skip_rebuild,json=skipRebuild,proto3" json:"skip_rebuild,omitempty"` + // RebuildCells limits the SrvVSchema rebuild to the specified cells. If not + // provided the SrvVSchema will be rebuilt in every cell in the topology. + // + // Ignored if SkipRebuild is set. + RebuildCells []string `protobuf:"bytes,3,rep,name=rebuild_cells,json=rebuildCells,proto3" json:"rebuild_cells,omitempty"` } -func (x *ApplyVSchemaRequest) GetSkipRebuild() bool { - if x != nil { - return x.SkipRebuild - } - return false +func (x *ApplyRoutingRulesRequest) Reset() { + *x = ApplyRoutingRulesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (x *ApplyVSchemaRequest) GetDryRun() bool { - if x != nil { - return x.DryRun +func (x *ApplyRoutingRulesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApplyRoutingRulesRequest) ProtoMessage() {} + +func (x *ApplyRoutingRulesRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return false + return mi.MessageOf(x) } -func (x *ApplyVSchemaRequest) GetCells() []string { +// Deprecated: Use ApplyRoutingRulesRequest.ProtoReflect.Descriptor instead. +func (*ApplyRoutingRulesRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{12} +} + +func (x *ApplyRoutingRulesRequest) GetRoutingRules() *vschema.RoutingRules { if x != nil { - return x.Cells + return x.RoutingRules } return nil } -func (x *ApplyVSchemaRequest) GetVSchema() *vschema.Keyspace { +func (x *ApplyRoutingRulesRequest) GetSkipRebuild() bool { if x != nil { - return x.VSchema + return x.SkipRebuild } - return nil + return false } -func (x *ApplyVSchemaRequest) GetSql() string { +func (x *ApplyRoutingRulesRequest) GetRebuildCells() []string { if x != nil { - return x.Sql + return x.RebuildCells } - return "" + return nil } -type ApplyVSchemaResponse struct { +type ApplyRoutingRulesResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - VSchema *vschema.Keyspace `protobuf:"bytes,1,opt,name=v_schema,json=vSchema,proto3" json:"v_schema,omitempty"` } -func (x *ApplyVSchemaResponse) Reset() { - *x = ApplyVSchemaResponse{} +func (x *ApplyRoutingRulesResponse) Reset() { + *x = ApplyRoutingRulesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[18] + mi := &file_vtctldata_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ApplyVSchemaResponse) String() string { +func (x *ApplyRoutingRulesResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ApplyVSchemaResponse) ProtoMessage() {} +func (*ApplyRoutingRulesResponse) ProtoMessage() {} -func (x *ApplyVSchemaResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[18] +func (x *ApplyRoutingRulesResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1343,54 +1620,44 @@ func (x *ApplyVSchemaResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ApplyVSchemaResponse.ProtoReflect.Descriptor instead. -func (*ApplyVSchemaResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{18} -} - -func (x *ApplyVSchemaResponse) GetVSchema() *vschema.Keyspace { - if x != nil { - return x.VSchema - } - return nil +// Deprecated: Use ApplyRoutingRulesResponse.ProtoReflect.Descriptor instead. +func (*ApplyRoutingRulesResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{13} } -type BackupRequest struct { +type ApplyShardRoutingRulesRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` - // AllowPrimary allows the backup to proceed if TabletAlias is a PRIMARY. + ShardRoutingRules *vschema.ShardRoutingRules `protobuf:"bytes,1,opt,name=shard_routing_rules,json=shardRoutingRules,proto3" json:"shard_routing_rules,omitempty"` + // SkipRebuild, if set, will cause ApplyShardRoutingRules to skip rebuilding the + // SrvVSchema objects in each cell in RebuildCells. + SkipRebuild bool `protobuf:"varint,2,opt,name=skip_rebuild,json=skipRebuild,proto3" json:"skip_rebuild,omitempty"` + // RebuildCells limits the SrvVSchema rebuild to the specified cells. If not + // provided the SrvVSchema will be rebuilt in every cell in the topology. // - // WARNING: If using the builtin backup engine, this will shutdown mysqld on - // the primary for the duration of the backup, and no writes will be possible. - AllowPrimary bool `protobuf:"varint,2,opt,name=allow_primary,json=allowPrimary,proto3" json:"allow_primary,omitempty"` - // Concurrency specifies the number of compression/checksum jobs to run - // simultaneously. - Concurrency uint64 `protobuf:"varint,3,opt,name=concurrency,proto3" json:"concurrency,omitempty"` - // IncrementalFromPos indicates a position of a previous backup. When this value is non-empty - // then the backup becomes incremental and applies as of given position. - IncrementalFromPos string `protobuf:"bytes,4,opt,name=incremental_from_pos,json=incrementalFromPos,proto3" json:"incremental_from_pos,omitempty"` + // Ignored if SkipRebuild is set. + RebuildCells []string `protobuf:"bytes,3,rep,name=rebuild_cells,json=rebuildCells,proto3" json:"rebuild_cells,omitempty"` } -func (x *BackupRequest) Reset() { - *x = BackupRequest{} +func (x *ApplyShardRoutingRulesRequest) Reset() { + *x = ApplyShardRoutingRulesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[19] + mi := &file_vtctldata_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *BackupRequest) String() string { +func (x *ApplyShardRoutingRulesRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*BackupRequest) ProtoMessage() {} +func (*ApplyShardRoutingRulesRequest) ProtoMessage() {} -func (x *BackupRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[19] +func (x *ApplyShardRoutingRulesRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1401,68 +1668,55 @@ func (x *BackupRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use BackupRequest.ProtoReflect.Descriptor instead. -func (*BackupRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{19} +// Deprecated: Use ApplyShardRoutingRulesRequest.ProtoReflect.Descriptor instead. +func (*ApplyShardRoutingRulesRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{14} } -func (x *BackupRequest) GetTabletAlias() *topodata.TabletAlias { +func (x *ApplyShardRoutingRulesRequest) GetShardRoutingRules() *vschema.ShardRoutingRules { if x != nil { - return x.TabletAlias + return x.ShardRoutingRules } return nil } -func (x *BackupRequest) GetAllowPrimary() bool { +func (x *ApplyShardRoutingRulesRequest) GetSkipRebuild() bool { if x != nil { - return x.AllowPrimary + return x.SkipRebuild } return false } -func (x *BackupRequest) GetConcurrency() uint64 { - if x != nil { - return x.Concurrency - } - return 0 -} - -func (x *BackupRequest) GetIncrementalFromPos() string { +func (x *ApplyShardRoutingRulesRequest) GetRebuildCells() []string { if x != nil { - return x.IncrementalFromPos + return x.RebuildCells } - return "" + return nil } -type BackupResponse struct { +type ApplyShardRoutingRulesResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - // TabletAlias is the alias being used for the backup. - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` - Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,3,opt,name=shard,proto3" json:"shard,omitempty"` - Event *logutil.Event `protobuf:"bytes,4,opt,name=event,proto3" json:"event,omitempty"` } -func (x *BackupResponse) Reset() { - *x = BackupResponse{} +func (x *ApplyShardRoutingRulesResponse) Reset() { + *x = ApplyShardRoutingRulesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[20] + mi := &file_vtctldata_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *BackupResponse) String() string { +func (x *ApplyShardRoutingRulesResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*BackupResponse) ProtoMessage() {} +func (*ApplyShardRoutingRulesResponse) ProtoMessage() {} -func (x *BackupResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[20] +func (x *ApplyShardRoutingRulesResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1473,71 +1727,54 @@ func (x *BackupResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use BackupResponse.ProtoReflect.Descriptor instead. -func (*BackupResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{20} -} - -func (x *BackupResponse) GetTabletAlias() *topodata.TabletAlias { - if x != nil { - return x.TabletAlias - } - return nil -} - -func (x *BackupResponse) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" -} - -func (x *BackupResponse) GetShard() string { - if x != nil { - return x.Shard - } - return "" -} - -func (x *BackupResponse) GetEvent() *logutil.Event { - if x != nil { - return x.Event - } - return nil +// Deprecated: Use ApplyShardRoutingRulesResponse.ProtoReflect.Descriptor instead. +func (*ApplyShardRoutingRulesResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{15} } -type BackupShardRequest struct { +type ApplySchemaRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - // AllowPrimary allows the backup to occur on a PRIMARY tablet. See - // BackupRequest.AllowPrimary for warnings and caveats. - AllowPrimary bool `protobuf:"varint,3,opt,name=allow_primary,json=allowPrimary,proto3" json:"allow_primary,omitempty"` - // Concurrency specifies the number of compression/checksum jobs to run - // simultaneously. - Concurrency uint64 `protobuf:"varint,4,opt,name=concurrency,proto3" json:"concurrency,omitempty"` + // SQL commands to run. + Sql []string `protobuf:"bytes,3,rep,name=sql,proto3" json:"sql,omitempty"` + // Online DDL strategy, compatible with @@ddl_strategy session variable (examples: 'gh-ost', 'pt-osc', 'gh-ost --max-load=Threads_running=100'") + DdlStrategy string `protobuf:"bytes,4,opt,name=ddl_strategy,json=ddlStrategy,proto3" json:"ddl_strategy,omitempty"` + // Optional: explicit UUIDs for migration. + // If given, must match number of DDL changes + UuidList []string `protobuf:"bytes,5,rep,name=uuid_list,json=uuidList,proto3" json:"uuid_list,omitempty"` + // For Online DDL, optionally supply a custom unique string used as context for the migration(s) in this command. + // By default a unique context is auto-generated by Vitess + MigrationContext string `protobuf:"bytes,6,opt,name=migration_context,json=migrationContext,proto3" json:"migration_context,omitempty"` + // WaitReplicasTimeout is the duration of time to wait for replicas to catch + // up in reparenting. + WaitReplicasTimeout *vttime.Duration `protobuf:"bytes,7,opt,name=wait_replicas_timeout,json=waitReplicasTimeout,proto3" json:"wait_replicas_timeout,omitempty"` + // caller_id identifies the caller. This is the effective caller ID, + // set by the application to further identify the caller. + CallerId *vtrpc.CallerID `protobuf:"bytes,9,opt,name=caller_id,json=callerId,proto3" json:"caller_id,omitempty"` + // BatchSize indicates how many queries to apply together + BatchSize int64 `protobuf:"varint,10,opt,name=batch_size,json=batchSize,proto3" json:"batch_size,omitempty"` } -func (x *BackupShardRequest) Reset() { - *x = BackupShardRequest{} +func (x *ApplySchemaRequest) Reset() { + *x = ApplySchemaRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[21] + mi := &file_vtctldata_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *BackupShardRequest) String() string { +func (x *ApplySchemaRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*BackupShardRequest) ProtoMessage() {} +func (*ApplySchemaRequest) ProtoMessage() {} -func (x *BackupShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[21] +func (x *ApplySchemaRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1548,129 +1785,93 @@ func (x *BackupShardRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use BackupShardRequest.ProtoReflect.Descriptor instead. -func (*BackupShardRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{21} +// Deprecated: Use ApplySchemaRequest.ProtoReflect.Descriptor instead. +func (*ApplySchemaRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{16} } -func (x *BackupShardRequest) GetKeyspace() string { +func (x *ApplySchemaRequest) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -func (x *BackupShardRequest) GetShard() string { +func (x *ApplySchemaRequest) GetSql() []string { if x != nil { - return x.Shard + return x.Sql } - return "" + return nil } -func (x *BackupShardRequest) GetAllowPrimary() bool { +func (x *ApplySchemaRequest) GetDdlStrategy() string { if x != nil { - return x.AllowPrimary + return x.DdlStrategy } - return false + return "" } -func (x *BackupShardRequest) GetConcurrency() uint64 { +func (x *ApplySchemaRequest) GetUuidList() []string { if x != nil { - return x.Concurrency - } - return 0 -} - -type ChangeTabletTypeRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` - DbType topodata.TabletType `protobuf:"varint,2,opt,name=db_type,json=dbType,proto3,enum=topodata.TabletType" json:"db_type,omitempty"` - DryRun bool `protobuf:"varint,3,opt,name=dry_run,json=dryRun,proto3" json:"dry_run,omitempty"` -} - -func (x *ChangeTabletTypeRequest) Reset() { - *x = ChangeTabletTypeRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + return x.UuidList } + return nil } -func (x *ChangeTabletTypeRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ChangeTabletTypeRequest) ProtoMessage() {} - -func (x *ChangeTabletTypeRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *ApplySchemaRequest) GetMigrationContext() string { + if x != nil { + return x.MigrationContext } - return mi.MessageOf(x) -} - -// Deprecated: Use ChangeTabletTypeRequest.ProtoReflect.Descriptor instead. -func (*ChangeTabletTypeRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{22} + return "" } -func (x *ChangeTabletTypeRequest) GetTabletAlias() *topodata.TabletAlias { +func (x *ApplySchemaRequest) GetWaitReplicasTimeout() *vttime.Duration { if x != nil { - return x.TabletAlias + return x.WaitReplicasTimeout } return nil } -func (x *ChangeTabletTypeRequest) GetDbType() topodata.TabletType { +func (x *ApplySchemaRequest) GetCallerId() *vtrpc.CallerID { if x != nil { - return x.DbType + return x.CallerId } - return topodata.TabletType(0) + return nil } -func (x *ChangeTabletTypeRequest) GetDryRun() bool { +func (x *ApplySchemaRequest) GetBatchSize() int64 { if x != nil { - return x.DryRun + return x.BatchSize } - return false + return 0 } -type ChangeTabletTypeResponse struct { +type ApplySchemaResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - BeforeTablet *topodata.Tablet `protobuf:"bytes,1,opt,name=before_tablet,json=beforeTablet,proto3" json:"before_tablet,omitempty"` - AfterTablet *topodata.Tablet `protobuf:"bytes,2,opt,name=after_tablet,json=afterTablet,proto3" json:"after_tablet,omitempty"` - WasDryRun bool `protobuf:"varint,3,opt,name=was_dry_run,json=wasDryRun,proto3" json:"was_dry_run,omitempty"` + UuidList []string `protobuf:"bytes,1,rep,name=uuid_list,json=uuidList,proto3" json:"uuid_list,omitempty"` + RowsAffectedByShard map[string]uint64 `protobuf:"bytes,2,rep,name=rows_affected_by_shard,json=rowsAffectedByShard,proto3" json:"rows_affected_by_shard,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` } -func (x *ChangeTabletTypeResponse) Reset() { - *x = ChangeTabletTypeResponse{} +func (x *ApplySchemaResponse) Reset() { + *x = ApplySchemaResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[23] + mi := &file_vtctldata_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ChangeTabletTypeResponse) String() string { +func (x *ApplySchemaResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ChangeTabletTypeResponse) ProtoMessage() {} +func (*ApplySchemaResponse) ProtoMessage() {} -func (x *ChangeTabletTypeResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[23] +func (x *ApplySchemaResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1681,79 +1882,55 @@ func (x *ChangeTabletTypeResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ChangeTabletTypeResponse.ProtoReflect.Descriptor instead. -func (*ChangeTabletTypeResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{23} +// Deprecated: Use ApplySchemaResponse.ProtoReflect.Descriptor instead. +func (*ApplySchemaResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{17} } -func (x *ChangeTabletTypeResponse) GetBeforeTablet() *topodata.Tablet { +func (x *ApplySchemaResponse) GetUuidList() []string { if x != nil { - return x.BeforeTablet + return x.UuidList } return nil } -func (x *ChangeTabletTypeResponse) GetAfterTablet() *topodata.Tablet { +func (x *ApplySchemaResponse) GetRowsAffectedByShard() map[string]uint64 { if x != nil { - return x.AfterTablet + return x.RowsAffectedByShard } return nil } -func (x *ChangeTabletTypeResponse) GetWasDryRun() bool { - if x != nil { - return x.WasDryRun - } - return false -} - -type CreateKeyspaceRequest struct { +type ApplyVSchemaRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Name is the name of the keyspace. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Force proceeds with the request even if the keyspace already exists. - Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"` - // AllowEmptyVSchema allows a keyspace to be created with no vschema. - AllowEmptyVSchema bool `protobuf:"varint,3,opt,name=allow_empty_v_schema,json=allowEmptyVSchema,proto3" json:"allow_empty_v_schema,omitempty"` - // ServedFroms specifies a set of db_type:keyspace pairs used to serve - // traffic for the keyspace. - ServedFroms []*topodata.Keyspace_ServedFrom `protobuf:"bytes,6,rep,name=served_froms,json=servedFroms,proto3" json:"served_froms,omitempty"` - // Type is the type of the keyspace to create. - Type topodata.KeyspaceType `protobuf:"varint,7,opt,name=type,proto3,enum=topodata.KeyspaceType" json:"type,omitempty"` - // BaseKeyspace specifies the base keyspace for SNAPSHOT keyspaces. It is - // required to create a SNAPSHOT keyspace. - BaseKeyspace string `protobuf:"bytes,8,opt,name=base_keyspace,json=baseKeyspace,proto3" json:"base_keyspace,omitempty"` - // SnapshotTime specifies the snapshot time for this keyspace. It is required - // to create a SNAPSHOT keyspace. - SnapshotTime *vttime.Time `protobuf:"bytes,9,opt,name=snapshot_time,json=snapshotTime,proto3" json:"snapshot_time,omitempty"` - // DurabilityPolicy is the durability policy to be - // used for this keyspace. - DurabilityPolicy string `protobuf:"bytes,10,opt,name=durability_policy,json=durabilityPolicy,proto3" json:"durability_policy,omitempty"` - // SidecarDBName is the name of the sidecar database that - // each vttablet in the keyspace will use. - SidecarDbName string `protobuf:"bytes,11,opt,name=sidecar_db_name,json=sidecarDbName,proto3" json:"sidecar_db_name,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + SkipRebuild bool `protobuf:"varint,2,opt,name=skip_rebuild,json=skipRebuild,proto3" json:"skip_rebuild,omitempty"` + DryRun bool `protobuf:"varint,3,opt,name=dry_run,json=dryRun,proto3" json:"dry_run,omitempty"` + Cells []string `protobuf:"bytes,4,rep,name=cells,proto3" json:"cells,omitempty"` + VSchema *vschema.Keyspace `protobuf:"bytes,5,opt,name=v_schema,json=vSchema,proto3" json:"v_schema,omitempty"` + Sql string `protobuf:"bytes,6,opt,name=sql,proto3" json:"sql,omitempty"` } -func (x *CreateKeyspaceRequest) Reset() { - *x = CreateKeyspaceRequest{} +func (x *ApplyVSchemaRequest) Reset() { + *x = ApplyVSchemaRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[24] + mi := &file_vtctldata_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *CreateKeyspaceRequest) String() string { +func (x *ApplyVSchemaRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*CreateKeyspaceRequest) ProtoMessage() {} +func (*ApplyVSchemaRequest) ProtoMessage() {} -func (x *CreateKeyspaceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[24] +func (x *ApplyVSchemaRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1764,100 +1941,78 @@ func (x *CreateKeyspaceRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CreateKeyspaceRequest.ProtoReflect.Descriptor instead. -func (*CreateKeyspaceRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{24} +// Deprecated: Use ApplyVSchemaRequest.ProtoReflect.Descriptor instead. +func (*ApplyVSchemaRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{18} } -func (x *CreateKeyspaceRequest) GetName() string { +func (x *ApplyVSchemaRequest) GetKeyspace() string { if x != nil { - return x.Name + return x.Keyspace } return "" } -func (x *CreateKeyspaceRequest) GetForce() bool { +func (x *ApplyVSchemaRequest) GetSkipRebuild() bool { if x != nil { - return x.Force + return x.SkipRebuild } return false } -func (x *CreateKeyspaceRequest) GetAllowEmptyVSchema() bool { +func (x *ApplyVSchemaRequest) GetDryRun() bool { if x != nil { - return x.AllowEmptyVSchema + return x.DryRun } return false } -func (x *CreateKeyspaceRequest) GetServedFroms() []*topodata.Keyspace_ServedFrom { +func (x *ApplyVSchemaRequest) GetCells() []string { if x != nil { - return x.ServedFroms + return x.Cells } return nil } -func (x *CreateKeyspaceRequest) GetType() topodata.KeyspaceType { - if x != nil { - return x.Type - } - return topodata.KeyspaceType(0) -} - -func (x *CreateKeyspaceRequest) GetBaseKeyspace() string { - if x != nil { - return x.BaseKeyspace - } - return "" -} - -func (x *CreateKeyspaceRequest) GetSnapshotTime() *vttime.Time { +func (x *ApplyVSchemaRequest) GetVSchema() *vschema.Keyspace { if x != nil { - return x.SnapshotTime + return x.VSchema } return nil } -func (x *CreateKeyspaceRequest) GetDurabilityPolicy() string { - if x != nil { - return x.DurabilityPolicy - } - return "" -} - -func (x *CreateKeyspaceRequest) GetSidecarDbName() string { +func (x *ApplyVSchemaRequest) GetSql() string { if x != nil { - return x.SidecarDbName + return x.Sql } return "" } -type CreateKeyspaceResponse struct { +type ApplyVSchemaResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Keyspace is the newly-created keyspace. - Keyspace *Keyspace `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + VSchema *vschema.Keyspace `protobuf:"bytes,1,opt,name=v_schema,json=vSchema,proto3" json:"v_schema,omitempty"` } -func (x *CreateKeyspaceResponse) Reset() { - *x = CreateKeyspaceResponse{} +func (x *ApplyVSchemaResponse) Reset() { + *x = ApplyVSchemaResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[25] + mi := &file_vtctldata_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *CreateKeyspaceResponse) String() string { +func (x *ApplyVSchemaResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*CreateKeyspaceResponse) ProtoMessage() {} +func (*ApplyVSchemaResponse) ProtoMessage() {} -func (x *CreateKeyspaceResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[25] +func (x *ApplyVSchemaResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1868,52 +2023,57 @@ func (x *CreateKeyspaceResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CreateKeyspaceResponse.ProtoReflect.Descriptor instead. -func (*CreateKeyspaceResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{25} +// Deprecated: Use ApplyVSchemaResponse.ProtoReflect.Descriptor instead. +func (*ApplyVSchemaResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{19} } -func (x *CreateKeyspaceResponse) GetKeyspace() *Keyspace { +func (x *ApplyVSchemaResponse) GetVSchema() *vschema.Keyspace { if x != nil { - return x.Keyspace + return x.VSchema } return nil } -type CreateShardRequest struct { +type BackupRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Keyspace is the name of the keyspace to create the shard in. - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - // ShardName is the name of the shard to create. E.g. "-" or "-80". - ShardName string `protobuf:"bytes,2,opt,name=shard_name,json=shardName,proto3" json:"shard_name,omitempty"` - // Force treats an attempt to create a shard that already exists as a - // non-error. - Force bool `protobuf:"varint,3,opt,name=force,proto3" json:"force,omitempty"` - // IncludeParent creates the parent keyspace as an empty BASE keyspace, if it - // doesn't already exist. - IncludeParent bool `protobuf:"varint,4,opt,name=include_parent,json=includeParent,proto3" json:"include_parent,omitempty"` + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + // AllowPrimary allows the backup to proceed if TabletAlias is a PRIMARY. + // + // WARNING: If using the builtin backup engine, this will shutdown mysqld on + // the primary for the duration of the backup, and no writes will be possible. + AllowPrimary bool `protobuf:"varint,2,opt,name=allow_primary,json=allowPrimary,proto3" json:"allow_primary,omitempty"` + // Concurrency specifies the number of compression/checksum jobs to run + // simultaneously. + Concurrency uint64 `protobuf:"varint,3,opt,name=concurrency,proto3" json:"concurrency,omitempty"` + // IncrementalFromPos indicates a position of a previous backup. When this value is non-empty + // then the backup becomes incremental and applies as of given position. + IncrementalFromPos string `protobuf:"bytes,4,opt,name=incremental_from_pos,json=incrementalFromPos,proto3" json:"incremental_from_pos,omitempty"` + // UpgradeSafe indicates if the backup should be taken with innodb_fast_shutdown=0 + // so that it's a backup that can be used for an upgrade. + UpgradeSafe bool `protobuf:"varint,5,opt,name=upgrade_safe,json=upgradeSafe,proto3" json:"upgrade_safe,omitempty"` } -func (x *CreateShardRequest) Reset() { - *x = CreateShardRequest{} +func (x *BackupRequest) Reset() { + *x = BackupRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[26] + mi := &file_vtctldata_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *CreateShardRequest) String() string { +func (x *BackupRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*CreateShardRequest) ProtoMessage() {} +func (*BackupRequest) ProtoMessage() {} -func (x *CreateShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[26] +func (x *BackupRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1924,71 +2084,75 @@ func (x *CreateShardRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CreateShardRequest.ProtoReflect.Descriptor instead. -func (*CreateShardRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{26} +// Deprecated: Use BackupRequest.ProtoReflect.Descriptor instead. +func (*BackupRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{20} } -func (x *CreateShardRequest) GetKeyspace() string { +func (x *BackupRequest) GetTabletAlias() *topodata.TabletAlias { if x != nil { - return x.Keyspace + return x.TabletAlias } - return "" + return nil } -func (x *CreateShardRequest) GetShardName() string { +func (x *BackupRequest) GetAllowPrimary() bool { if x != nil { - return x.ShardName + return x.AllowPrimary } - return "" + return false } -func (x *CreateShardRequest) GetForce() bool { +func (x *BackupRequest) GetConcurrency() uint64 { if x != nil { - return x.Force + return x.Concurrency } - return false + return 0 } -func (x *CreateShardRequest) GetIncludeParent() bool { +func (x *BackupRequest) GetIncrementalFromPos() string { if x != nil { - return x.IncludeParent + return x.IncrementalFromPos + } + return "" +} + +func (x *BackupRequest) GetUpgradeSafe() bool { + if x != nil { + return x.UpgradeSafe } return false } -type CreateShardResponse struct { +type BackupResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Keyspace is the created keyspace. It is set only if IncludeParent was - // specified in the request and the parent keyspace needed to be created. - Keyspace *Keyspace `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - // Shard is the newly-created shard object. - Shard *Shard `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - // ShardAlreadyExists is set if Force was specified in the request and the - // shard already existed. - ShardAlreadyExists bool `protobuf:"varint,3,opt,name=shard_already_exists,json=shardAlreadyExists,proto3" json:"shard_already_exists,omitempty"` + // TabletAlias is the alias being used for the backup. + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,3,opt,name=shard,proto3" json:"shard,omitempty"` + Event *logutil.Event `protobuf:"bytes,4,opt,name=event,proto3" json:"event,omitempty"` } -func (x *CreateShardResponse) Reset() { - *x = CreateShardResponse{} +func (x *BackupResponse) Reset() { + *x = BackupResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[27] + mi := &file_vtctldata_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *CreateShardResponse) String() string { +func (x *BackupResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*CreateShardResponse) ProtoMessage() {} +func (*BackupResponse) ProtoMessage() {} -func (x *CreateShardResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[27] +func (x *BackupResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1999,58 +2163,77 @@ func (x *CreateShardResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CreateShardResponse.ProtoReflect.Descriptor instead. -func (*CreateShardResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{27} +// Deprecated: Use BackupResponse.ProtoReflect.Descriptor instead. +func (*BackupResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{21} } -func (x *CreateShardResponse) GetKeyspace() *Keyspace { +func (x *BackupResponse) GetTabletAlias() *topodata.TabletAlias { if x != nil { - return x.Keyspace + return x.TabletAlias } return nil } -func (x *CreateShardResponse) GetShard() *Shard { +func (x *BackupResponse) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *BackupResponse) GetShard() string { if x != nil { return x.Shard } - return nil + return "" } -func (x *CreateShardResponse) GetShardAlreadyExists() bool { +func (x *BackupResponse) GetEvent() *logutil.Event { if x != nil { - return x.ShardAlreadyExists + return x.Event } - return false + return nil } -type DeleteCellInfoRequest struct { +type BackupShardRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + // AllowPrimary allows the backup to occur on a PRIMARY tablet. See + // BackupRequest.AllowPrimary for warnings and caveats. + AllowPrimary bool `protobuf:"varint,3,opt,name=allow_primary,json=allowPrimary,proto3" json:"allow_primary,omitempty"` + // Concurrency specifies the number of compression/checksum jobs to run + // simultaneously. + Concurrency uint64 `protobuf:"varint,4,opt,name=concurrency,proto3" json:"concurrency,omitempty"` + // UpgradeSafe indicates if the backup should be taken with innodb_fast_shutdown=0 + // so that it's a backup that can be used for an upgrade. + UpgradeSafe bool `protobuf:"varint,5,opt,name=upgrade_safe,json=upgradeSafe,proto3" json:"upgrade_safe,omitempty"` + // IncrementalFromPos indicates a position of a previous backup. When this value is non-empty + // then the backup becomes incremental and applies as of given position. + IncrementalFromPos string `protobuf:"bytes,6,opt,name=incremental_from_pos,json=incrementalFromPos,proto3" json:"incremental_from_pos,omitempty"` } -func (x *DeleteCellInfoRequest) Reset() { - *x = DeleteCellInfoRequest{} +func (x *BackupShardRequest) Reset() { + *x = BackupShardRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[28] + mi := &file_vtctldata_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *DeleteCellInfoRequest) String() string { +func (x *BackupShardRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteCellInfoRequest) ProtoMessage() {} +func (*BackupShardRequest) ProtoMessage() {} -func (x *DeleteCellInfoRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[28] +func (x *BackupShardRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2061,88 +2244,79 @@ func (x *DeleteCellInfoRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteCellInfoRequest.ProtoReflect.Descriptor instead. -func (*DeleteCellInfoRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{28} +// Deprecated: Use BackupShardRequest.ProtoReflect.Descriptor instead. +func (*BackupShardRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{22} } -func (x *DeleteCellInfoRequest) GetName() string { +func (x *BackupShardRequest) GetKeyspace() string { if x != nil { - return x.Name + return x.Keyspace } return "" } -func (x *DeleteCellInfoRequest) GetForce() bool { +func (x *BackupShardRequest) GetShard() string { if x != nil { - return x.Force + return x.Shard } - return false -} - -type DeleteCellInfoResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields + return "" } -func (x *DeleteCellInfoResponse) Reset() { - *x = DeleteCellInfoResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *BackupShardRequest) GetAllowPrimary() bool { + if x != nil { + return x.AllowPrimary } + return false } -func (x *DeleteCellInfoResponse) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *BackupShardRequest) GetConcurrency() uint64 { + if x != nil { + return x.Concurrency + } + return 0 } -func (*DeleteCellInfoResponse) ProtoMessage() {} - -func (x *DeleteCellInfoResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *BackupShardRequest) GetUpgradeSafe() bool { + if x != nil { + return x.UpgradeSafe } - return mi.MessageOf(x) + return false } -// Deprecated: Use DeleteCellInfoResponse.ProtoReflect.Descriptor instead. -func (*DeleteCellInfoResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{29} +func (x *BackupShardRequest) GetIncrementalFromPos() string { + if x != nil { + return x.IncrementalFromPos + } + return "" } -type DeleteCellsAliasRequest struct { +type CancelSchemaMigrationRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Uuid string `protobuf:"bytes,2,opt,name=uuid,proto3" json:"uuid,omitempty"` } -func (x *DeleteCellsAliasRequest) Reset() { - *x = DeleteCellsAliasRequest{} +func (x *CancelSchemaMigrationRequest) Reset() { + *x = CancelSchemaMigrationRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[30] + mi := &file_vtctldata_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *DeleteCellsAliasRequest) String() string { +func (x *CancelSchemaMigrationRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteCellsAliasRequest) ProtoMessage() {} +func (*CancelSchemaMigrationRequest) ProtoMessage() {} -func (x *DeleteCellsAliasRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[30] +func (x *CancelSchemaMigrationRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2153,41 +2327,50 @@ func (x *DeleteCellsAliasRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteCellsAliasRequest.ProtoReflect.Descriptor instead. -func (*DeleteCellsAliasRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{30} +// Deprecated: Use CancelSchemaMigrationRequest.ProtoReflect.Descriptor instead. +func (*CancelSchemaMigrationRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{23} } -func (x *DeleteCellsAliasRequest) GetName() string { +func (x *CancelSchemaMigrationRequest) GetKeyspace() string { if x != nil { - return x.Name + return x.Keyspace } return "" } -type DeleteCellsAliasResponse struct { +func (x *CancelSchemaMigrationRequest) GetUuid() string { + if x != nil { + return x.Uuid + } + return "" +} + +type CancelSchemaMigrationResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + RowsAffectedByShard map[string]uint64 `protobuf:"bytes,1,rep,name=rows_affected_by_shard,json=rowsAffectedByShard,proto3" json:"rows_affected_by_shard,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` } -func (x *DeleteCellsAliasResponse) Reset() { - *x = DeleteCellsAliasResponse{} +func (x *CancelSchemaMigrationResponse) Reset() { + *x = CancelSchemaMigrationResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[31] + mi := &file_vtctldata_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *DeleteCellsAliasResponse) String() string { +func (x *CancelSchemaMigrationResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteCellsAliasResponse) ProtoMessage() {} +func (*CancelSchemaMigrationResponse) ProtoMessage() {} -func (x *DeleteCellsAliasResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[31] +func (x *CancelSchemaMigrationResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2198,44 +2381,45 @@ func (x *DeleteCellsAliasResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteCellsAliasResponse.ProtoReflect.Descriptor instead. -func (*DeleteCellsAliasResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{31} +// Deprecated: Use CancelSchemaMigrationResponse.ProtoReflect.Descriptor instead. +func (*CancelSchemaMigrationResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{24} } -type DeleteKeyspaceRequest struct { +func (x *CancelSchemaMigrationResponse) GetRowsAffectedByShard() map[string]uint64 { + if x != nil { + return x.RowsAffectedByShard + } + return nil +} + +type ChangeTabletTypeRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Keyspace is the name of the keyspace to delete. - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - // Recursive causes all shards in the keyspace to be recursively deleted - // before deleting the keyspace. It is an error to call DeleteKeyspace on a - // non-empty keyspace without also specifying Recursive. - Recursive bool `protobuf:"varint,2,opt,name=recursive,proto3" json:"recursive,omitempty"` - // Force allows a keyspace to be deleted even if the keyspace lock cannot be - // obtained. This should only be used to force-clean a keyspace. - Force bool `protobuf:"varint,3,opt,name=force,proto3" json:"force,omitempty"` + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + DbType topodata.TabletType `protobuf:"varint,2,opt,name=db_type,json=dbType,proto3,enum=topodata.TabletType" json:"db_type,omitempty"` + DryRun bool `protobuf:"varint,3,opt,name=dry_run,json=dryRun,proto3" json:"dry_run,omitempty"` } -func (x *DeleteKeyspaceRequest) Reset() { - *x = DeleteKeyspaceRequest{} +func (x *ChangeTabletTypeRequest) Reset() { + *x = ChangeTabletTypeRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[32] + mi := &file_vtctldata_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *DeleteKeyspaceRequest) String() string { +func (x *ChangeTabletTypeRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteKeyspaceRequest) ProtoMessage() {} +func (*ChangeTabletTypeRequest) ProtoMessage() {} -func (x *DeleteKeyspaceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[32] +func (x *ChangeTabletTypeRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2246,55 +2430,59 @@ func (x *DeleteKeyspaceRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteKeyspaceRequest.ProtoReflect.Descriptor instead. -func (*DeleteKeyspaceRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{32} +// Deprecated: Use ChangeTabletTypeRequest.ProtoReflect.Descriptor instead. +func (*ChangeTabletTypeRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{25} } -func (x *DeleteKeyspaceRequest) GetKeyspace() string { +func (x *ChangeTabletTypeRequest) GetTabletAlias() *topodata.TabletAlias { if x != nil { - return x.Keyspace + return x.TabletAlias } - return "" + return nil } -func (x *DeleteKeyspaceRequest) GetRecursive() bool { +func (x *ChangeTabletTypeRequest) GetDbType() topodata.TabletType { if x != nil { - return x.Recursive + return x.DbType } - return false + return topodata.TabletType(0) } -func (x *DeleteKeyspaceRequest) GetForce() bool { +func (x *ChangeTabletTypeRequest) GetDryRun() bool { if x != nil { - return x.Force + return x.DryRun } return false } -type DeleteKeyspaceResponse struct { +type ChangeTabletTypeResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + BeforeTablet *topodata.Tablet `protobuf:"bytes,1,opt,name=before_tablet,json=beforeTablet,proto3" json:"before_tablet,omitempty"` + AfterTablet *topodata.Tablet `protobuf:"bytes,2,opt,name=after_tablet,json=afterTablet,proto3" json:"after_tablet,omitempty"` + WasDryRun bool `protobuf:"varint,3,opt,name=was_dry_run,json=wasDryRun,proto3" json:"was_dry_run,omitempty"` } -func (x *DeleteKeyspaceResponse) Reset() { - *x = DeleteKeyspaceResponse{} +func (x *ChangeTabletTypeResponse) Reset() { + *x = ChangeTabletTypeResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[33] + mi := &file_vtctldata_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *DeleteKeyspaceResponse) String() string { +func (x *ChangeTabletTypeResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteKeyspaceResponse) ProtoMessage() {} +func (*ChangeTabletTypeResponse) ProtoMessage() {} -func (x *DeleteKeyspaceResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[33] +func (x *ChangeTabletTypeResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2305,48 +2493,58 @@ func (x *DeleteKeyspaceResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteKeyspaceResponse.ProtoReflect.Descriptor instead. -func (*DeleteKeyspaceResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{33} +// Deprecated: Use ChangeTabletTypeResponse.ProtoReflect.Descriptor instead. +func (*ChangeTabletTypeResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{26} } -type DeleteShardsRequest struct { +func (x *ChangeTabletTypeResponse) GetBeforeTablet() *topodata.Tablet { + if x != nil { + return x.BeforeTablet + } + return nil +} + +func (x *ChangeTabletTypeResponse) GetAfterTablet() *topodata.Tablet { + if x != nil { + return x.AfterTablet + } + return nil +} + +func (x *ChangeTabletTypeResponse) GetWasDryRun() bool { + if x != nil { + return x.WasDryRun + } + return false +} + +type CleanupSchemaMigrationRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Shards is the list of shards to delete. The nested topodatapb.Shard field - // is not required for DeleteShard, but the Keyspace and Shard fields are. - Shards []*Shard `protobuf:"bytes,1,rep,name=shards,proto3" json:"shards,omitempty"` - // Recursive also deletes all tablets belonging to the shard(s). It is an - // error to call DeleteShard on a non-empty shard without also specificying - // Recursive. - Recursive bool `protobuf:"varint,2,opt,name=recursive,proto3" json:"recursive,omitempty"` - // EvenIfServing allows a shard to be deleted even if it is serving, which is - // normally an error. Use with caution. - EvenIfServing bool `protobuf:"varint,4,opt,name=even_if_serving,json=evenIfServing,proto3" json:"even_if_serving,omitempty"` - // Force allows a shard to be deleted even if the shard lock cannot be - // obtained. This should only be used to force-clean a shard. - Force bool `protobuf:"varint,5,opt,name=force,proto3" json:"force,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Uuid string `protobuf:"bytes,2,opt,name=uuid,proto3" json:"uuid,omitempty"` } -func (x *DeleteShardsRequest) Reset() { - *x = DeleteShardsRequest{} +func (x *CleanupSchemaMigrationRequest) Reset() { + *x = CleanupSchemaMigrationRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[34] + mi := &file_vtctldata_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *DeleteShardsRequest) String() string { +func (x *CleanupSchemaMigrationRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteShardsRequest) ProtoMessage() {} +func (*CleanupSchemaMigrationRequest) ProtoMessage() {} -func (x *DeleteShardsRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[34] +func (x *CleanupSchemaMigrationRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2357,62 +2555,50 @@ func (x *DeleteShardsRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteShardsRequest.ProtoReflect.Descriptor instead. -func (*DeleteShardsRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{34} -} - -func (x *DeleteShardsRequest) GetShards() []*Shard { - if x != nil { - return x.Shards - } - return nil -} - -func (x *DeleteShardsRequest) GetRecursive() bool { - if x != nil { - return x.Recursive - } - return false +// Deprecated: Use CleanupSchemaMigrationRequest.ProtoReflect.Descriptor instead. +func (*CleanupSchemaMigrationRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{27} } -func (x *DeleteShardsRequest) GetEvenIfServing() bool { +func (x *CleanupSchemaMigrationRequest) GetKeyspace() string { if x != nil { - return x.EvenIfServing + return x.Keyspace } - return false + return "" } -func (x *DeleteShardsRequest) GetForce() bool { +func (x *CleanupSchemaMigrationRequest) GetUuid() string { if x != nil { - return x.Force + return x.Uuid } - return false + return "" } -type DeleteShardsResponse struct { +type CleanupSchemaMigrationResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + RowsAffectedByShard map[string]uint64 `protobuf:"bytes,1,rep,name=rows_affected_by_shard,json=rowsAffectedByShard,proto3" json:"rows_affected_by_shard,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` } -func (x *DeleteShardsResponse) Reset() { - *x = DeleteShardsResponse{} +func (x *CleanupSchemaMigrationResponse) Reset() { + *x = CleanupSchemaMigrationResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[35] + mi := &file_vtctldata_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *DeleteShardsResponse) String() string { +func (x *CleanupSchemaMigrationResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteShardsResponse) ProtoMessage() {} +func (*CleanupSchemaMigrationResponse) ProtoMessage() {} -func (x *DeleteShardsResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[35] +func (x *CleanupSchemaMigrationResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2423,36 +2609,44 @@ func (x *DeleteShardsResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteShardsResponse.ProtoReflect.Descriptor instead. -func (*DeleteShardsResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{35} +// Deprecated: Use CleanupSchemaMigrationResponse.ProtoReflect.Descriptor instead. +func (*CleanupSchemaMigrationResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{28} } -type DeleteSrvVSchemaRequest struct { +func (x *CleanupSchemaMigrationResponse) GetRowsAffectedByShard() map[string]uint64 { + if x != nil { + return x.RowsAffectedByShard + } + return nil +} + +type CompleteSchemaMigrationRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Cell string `protobuf:"bytes,1,opt,name=cell,proto3" json:"cell,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Uuid string `protobuf:"bytes,2,opt,name=uuid,proto3" json:"uuid,omitempty"` } -func (x *DeleteSrvVSchemaRequest) Reset() { - *x = DeleteSrvVSchemaRequest{} +func (x *CompleteSchemaMigrationRequest) Reset() { + *x = CompleteSchemaMigrationRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[36] + mi := &file_vtctldata_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *DeleteSrvVSchemaRequest) String() string { +func (x *CompleteSchemaMigrationRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteSrvVSchemaRequest) ProtoMessage() {} +func (*CompleteSchemaMigrationRequest) ProtoMessage() {} -func (x *DeleteSrvVSchemaRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[36] +func (x *CompleteSchemaMigrationRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2463,41 +2657,50 @@ func (x *DeleteSrvVSchemaRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteSrvVSchemaRequest.ProtoReflect.Descriptor instead. -func (*DeleteSrvVSchemaRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{36} +// Deprecated: Use CompleteSchemaMigrationRequest.ProtoReflect.Descriptor instead. +func (*CompleteSchemaMigrationRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{29} } -func (x *DeleteSrvVSchemaRequest) GetCell() string { +func (x *CompleteSchemaMigrationRequest) GetKeyspace() string { if x != nil { - return x.Cell + return x.Keyspace } return "" } -type DeleteSrvVSchemaResponse struct { +func (x *CompleteSchemaMigrationRequest) GetUuid() string { + if x != nil { + return x.Uuid + } + return "" +} + +type CompleteSchemaMigrationResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + RowsAffectedByShard map[string]uint64 `protobuf:"bytes,1,rep,name=rows_affected_by_shard,json=rowsAffectedByShard,proto3" json:"rows_affected_by_shard,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` } -func (x *DeleteSrvVSchemaResponse) Reset() { - *x = DeleteSrvVSchemaResponse{} +func (x *CompleteSchemaMigrationResponse) Reset() { + *x = CompleteSchemaMigrationResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[37] + mi := &file_vtctldata_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *DeleteSrvVSchemaResponse) String() string { +func (x *CompleteSchemaMigrationResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteSrvVSchemaResponse) ProtoMessage() {} +func (*CompleteSchemaMigrationResponse) ProtoMessage() {} -func (x *DeleteSrvVSchemaResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[37] +func (x *CompleteSchemaMigrationResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2508,40 +2711,65 @@ func (x *DeleteSrvVSchemaResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteSrvVSchemaResponse.ProtoReflect.Descriptor instead. -func (*DeleteSrvVSchemaResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{37} +// Deprecated: Use CompleteSchemaMigrationResponse.ProtoReflect.Descriptor instead. +func (*CompleteSchemaMigrationResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{30} } -type DeleteTabletsRequest struct { +func (x *CompleteSchemaMigrationResponse) GetRowsAffectedByShard() map[string]uint64 { + if x != nil { + return x.RowsAffectedByShard + } + return nil +} + +type CreateKeyspaceRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // TabletAliases is the list of tablets to delete. - TabletAliases []*topodata.TabletAlias `protobuf:"bytes,1,rep,name=tablet_aliases,json=tabletAliases,proto3" json:"tablet_aliases,omitempty"` - // AllowPrimary allows for the primary tablet of a shard to be deleted. - // Use with caution. - AllowPrimary bool `protobuf:"varint,2,opt,name=allow_primary,json=allowPrimary,proto3" json:"allow_primary,omitempty"` + // Name is the name of the keyspace. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Force proceeds with the request even if the keyspace already exists. + Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"` + // AllowEmptyVSchema allows a keyspace to be created with no vschema. + AllowEmptyVSchema bool `protobuf:"varint,3,opt,name=allow_empty_v_schema,json=allowEmptyVSchema,proto3" json:"allow_empty_v_schema,omitempty"` + // ServedFroms specifies a set of db_type:keyspace pairs used to serve + // traffic for the keyspace. + ServedFroms []*topodata.Keyspace_ServedFrom `protobuf:"bytes,6,rep,name=served_froms,json=servedFroms,proto3" json:"served_froms,omitempty"` + // Type is the type of the keyspace to create. + Type topodata.KeyspaceType `protobuf:"varint,7,opt,name=type,proto3,enum=topodata.KeyspaceType" json:"type,omitempty"` + // BaseKeyspace specifies the base keyspace for SNAPSHOT keyspaces. It is + // required to create a SNAPSHOT keyspace. + BaseKeyspace string `protobuf:"bytes,8,opt,name=base_keyspace,json=baseKeyspace,proto3" json:"base_keyspace,omitempty"` + // SnapshotTime specifies the snapshot time for this keyspace. It is required + // to create a SNAPSHOT keyspace. + SnapshotTime *vttime.Time `protobuf:"bytes,9,opt,name=snapshot_time,json=snapshotTime,proto3" json:"snapshot_time,omitempty"` + // DurabilityPolicy is the durability policy to be + // used for this keyspace. + DurabilityPolicy string `protobuf:"bytes,10,opt,name=durability_policy,json=durabilityPolicy,proto3" json:"durability_policy,omitempty"` + // SidecarDBName is the name of the sidecar database that + // each vttablet in the keyspace will use. + SidecarDbName string `protobuf:"bytes,11,opt,name=sidecar_db_name,json=sidecarDbName,proto3" json:"sidecar_db_name,omitempty"` } -func (x *DeleteTabletsRequest) Reset() { - *x = DeleteTabletsRequest{} +func (x *CreateKeyspaceRequest) Reset() { + *x = CreateKeyspaceRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[38] + mi := &file_vtctldata_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *DeleteTabletsRequest) String() string { +func (x *CreateKeyspaceRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteTabletsRequest) ProtoMessage() {} +func (*CreateKeyspaceRequest) ProtoMessage() {} -func (x *DeleteTabletsRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[38] +func (x *CreateKeyspaceRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2552,48 +2780,100 @@ func (x *DeleteTabletsRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteTabletsRequest.ProtoReflect.Descriptor instead. -func (*DeleteTabletsRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{38} +// Deprecated: Use CreateKeyspaceRequest.ProtoReflect.Descriptor instead. +func (*CreateKeyspaceRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{31} } -func (x *DeleteTabletsRequest) GetTabletAliases() []*topodata.TabletAlias { +func (x *CreateKeyspaceRequest) GetName() string { if x != nil { - return x.TabletAliases + return x.Name } - return nil + return "" } -func (x *DeleteTabletsRequest) GetAllowPrimary() bool { +func (x *CreateKeyspaceRequest) GetForce() bool { if x != nil { - return x.AllowPrimary + return x.Force } return false } -type DeleteTabletsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *CreateKeyspaceRequest) GetAllowEmptyVSchema() bool { + if x != nil { + return x.AllowEmptyVSchema + } + return false } -func (x *DeleteTabletsResponse) Reset() { - *x = DeleteTabletsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[39] +func (x *CreateKeyspaceRequest) GetServedFroms() []*topodata.Keyspace_ServedFrom { + if x != nil { + return x.ServedFroms + } + return nil +} + +func (x *CreateKeyspaceRequest) GetType() topodata.KeyspaceType { + if x != nil { + return x.Type + } + return topodata.KeyspaceType(0) +} + +func (x *CreateKeyspaceRequest) GetBaseKeyspace() string { + if x != nil { + return x.BaseKeyspace + } + return "" +} + +func (x *CreateKeyspaceRequest) GetSnapshotTime() *vttime.Time { + if x != nil { + return x.SnapshotTime + } + return nil +} + +func (x *CreateKeyspaceRequest) GetDurabilityPolicy() string { + if x != nil { + return x.DurabilityPolicy + } + return "" +} + +func (x *CreateKeyspaceRequest) GetSidecarDbName() string { + if x != nil { + return x.SidecarDbName + } + return "" +} + +type CreateKeyspaceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Keyspace is the newly-created keyspace. + Keyspace *Keyspace `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` +} + +func (x *CreateKeyspaceResponse) Reset() { + *x = CreateKeyspaceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *DeleteTabletsResponse) String() string { +func (x *CreateKeyspaceResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteTabletsResponse) ProtoMessage() {} +func (*CreateKeyspaceResponse) ProtoMessage() {} -func (x *DeleteTabletsResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[39] +func (x *CreateKeyspaceResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2604,53 +2884,52 @@ func (x *DeleteTabletsResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteTabletsResponse.ProtoReflect.Descriptor instead. -func (*DeleteTabletsResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{39} +// Deprecated: Use CreateKeyspaceResponse.ProtoReflect.Descriptor instead. +func (*CreateKeyspaceResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{32} } -type EmergencyReparentShardRequest struct { +func (x *CreateKeyspaceResponse) GetKeyspace() *Keyspace { + if x != nil { + return x.Keyspace + } + return nil +} + +type CreateShardRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Keyspace is the name of the keyspace to perform the Emergency Reparent in. + // Keyspace is the name of the keyspace to create the shard in. Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - // Shard is the name of the shard to perform the Emergency Reparent in. - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - // Optional alias of a tablet that should become the new shard primary. If not - // not specified, the vtctld will select the most up-to-date canditate to - // promote. - NewPrimary *topodata.TabletAlias `protobuf:"bytes,3,opt,name=new_primary,json=newPrimary,proto3" json:"new_primary,omitempty"` - // List of replica aliases to ignore during the Emergency Reparent. The vtctld - // will not attempt to stop replication on these tablets, nor attempt to - // demote any that may think they are the shard primary. - IgnoreReplicas []*topodata.TabletAlias `protobuf:"bytes,4,rep,name=ignore_replicas,json=ignoreReplicas,proto3" json:"ignore_replicas,omitempty"` - // WaitReplicasTimeout is the duration of time to wait for replicas to catch - // up in reparenting. - WaitReplicasTimeout *vttime.Duration `protobuf:"bytes,5,opt,name=wait_replicas_timeout,json=waitReplicasTimeout,proto3" json:"wait_replicas_timeout,omitempty"` - // PreventCrossCellPromotion is used to only promote the new primary from the same cell - // as the failed primary. - PreventCrossCellPromotion bool `protobuf:"varint,6,opt,name=prevent_cross_cell_promotion,json=preventCrossCellPromotion,proto3" json:"prevent_cross_cell_promotion,omitempty"` + // ShardName is the name of the shard to create. E.g. "-" or "-80". + ShardName string `protobuf:"bytes,2,opt,name=shard_name,json=shardName,proto3" json:"shard_name,omitempty"` + // Force treats an attempt to create a shard that already exists as a + // non-error. + Force bool `protobuf:"varint,3,opt,name=force,proto3" json:"force,omitempty"` + // IncludeParent creates the parent keyspace as an empty BASE keyspace, if it + // doesn't already exist. + IncludeParent bool `protobuf:"varint,4,opt,name=include_parent,json=includeParent,proto3" json:"include_parent,omitempty"` } -func (x *EmergencyReparentShardRequest) Reset() { - *x = EmergencyReparentShardRequest{} +func (x *CreateShardRequest) Reset() { + *x = CreateShardRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[40] + mi := &file_vtctldata_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *EmergencyReparentShardRequest) String() string { +func (x *CreateShardRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*EmergencyReparentShardRequest) ProtoMessage() {} +func (*CreateShardRequest) ProtoMessage() {} -func (x *EmergencyReparentShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[40] +func (x *CreateShardRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2661,87 +2940,71 @@ func (x *EmergencyReparentShardRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use EmergencyReparentShardRequest.ProtoReflect.Descriptor instead. -func (*EmergencyReparentShardRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{40} +// Deprecated: Use CreateShardRequest.ProtoReflect.Descriptor instead. +func (*CreateShardRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{33} } -func (x *EmergencyReparentShardRequest) GetKeyspace() string { +func (x *CreateShardRequest) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -func (x *EmergencyReparentShardRequest) GetShard() string { +func (x *CreateShardRequest) GetShardName() string { if x != nil { - return x.Shard + return x.ShardName } return "" } -func (x *EmergencyReparentShardRequest) GetNewPrimary() *topodata.TabletAlias { - if x != nil { - return x.NewPrimary - } - return nil -} - -func (x *EmergencyReparentShardRequest) GetIgnoreReplicas() []*topodata.TabletAlias { - if x != nil { - return x.IgnoreReplicas - } - return nil -} - -func (x *EmergencyReparentShardRequest) GetWaitReplicasTimeout() *vttime.Duration { +func (x *CreateShardRequest) GetForce() bool { if x != nil { - return x.WaitReplicasTimeout + return x.Force } - return nil + return false } -func (x *EmergencyReparentShardRequest) GetPreventCrossCellPromotion() bool { +func (x *CreateShardRequest) GetIncludeParent() bool { if x != nil { - return x.PreventCrossCellPromotion + return x.IncludeParent } return false } -type EmergencyReparentShardResponse struct { +type CreateShardResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Keyspace is the name of the keyspace the Emergency Reparent took place in. - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - // Shard is the name of the shard the Emergency Reparent took place in. - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - // PromotedPrimary is the alias of the tablet that was promoted to shard - // primary. If NewPrimary was set in the request, then this will be the same - // alias. Otherwise, it will be the alias of the tablet found to be most - // up-to-date. - PromotedPrimary *topodata.TabletAlias `protobuf:"bytes,3,opt,name=promoted_primary,json=promotedPrimary,proto3" json:"promoted_primary,omitempty"` - Events []*logutil.Event `protobuf:"bytes,4,rep,name=events,proto3" json:"events,omitempty"` + // Keyspace is the created keyspace. It is set only if IncludeParent was + // specified in the request and the parent keyspace needed to be created. + Keyspace *Keyspace `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // Shard is the newly-created shard object. + Shard *Shard `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + // ShardAlreadyExists is set if Force was specified in the request and the + // shard already existed. + ShardAlreadyExists bool `protobuf:"varint,3,opt,name=shard_already_exists,json=shardAlreadyExists,proto3" json:"shard_already_exists,omitempty"` } -func (x *EmergencyReparentShardResponse) Reset() { - *x = EmergencyReparentShardResponse{} +func (x *CreateShardResponse) Reset() { + *x = CreateShardResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[41] + mi := &file_vtctldata_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *EmergencyReparentShardResponse) String() string { +func (x *CreateShardResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*EmergencyReparentShardResponse) ProtoMessage() {} +func (*CreateShardResponse) ProtoMessage() {} -func (x *EmergencyReparentShardResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[41] +func (x *CreateShardResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2752,74 +3015,58 @@ func (x *EmergencyReparentShardResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use EmergencyReparentShardResponse.ProtoReflect.Descriptor instead. -func (*EmergencyReparentShardResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{41} +// Deprecated: Use CreateShardResponse.ProtoReflect.Descriptor instead. +func (*CreateShardResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{34} } -func (x *EmergencyReparentShardResponse) GetKeyspace() string { +func (x *CreateShardResponse) GetKeyspace() *Keyspace { if x != nil { return x.Keyspace } - return "" + return nil } -func (x *EmergencyReparentShardResponse) GetShard() string { +func (x *CreateShardResponse) GetShard() *Shard { if x != nil { return x.Shard } - return "" -} - -func (x *EmergencyReparentShardResponse) GetPromotedPrimary() *topodata.TabletAlias { - if x != nil { - return x.PromotedPrimary - } return nil } -func (x *EmergencyReparentShardResponse) GetEvents() []*logutil.Event { +func (x *CreateShardResponse) GetShardAlreadyExists() bool { if x != nil { - return x.Events + return x.ShardAlreadyExists } - return nil + return false } -type ExecuteFetchAsAppRequest struct { +type DeleteCellInfoRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` - Query string `protobuf:"bytes,2,opt,name=query,proto3" json:"query,omitempty"` - // MaxRows is an optional parameter to limit the number of rows read into the - // QueryResult. Note that this does not apply a LIMIT to the query, just how - // many rows are read from the MySQL server on the tablet side. - // - // This field is optional. Specifying a non-positive value will use whatever - // default is configured in the VtctldService. - MaxRows int64 `protobuf:"varint,3,opt,name=max_rows,json=maxRows,proto3" json:"max_rows,omitempty"` - // UsePool causes the query to be run with a pooled connection to the tablet. - UsePool bool `protobuf:"varint,4,opt,name=use_pool,json=usePool,proto3" json:"use_pool,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"` } -func (x *ExecuteFetchAsAppRequest) Reset() { - *x = ExecuteFetchAsAppRequest{} +func (x *DeleteCellInfoRequest) Reset() { + *x = DeleteCellInfoRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[42] + mi := &file_vtctldata_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ExecuteFetchAsAppRequest) String() string { +func (x *DeleteCellInfoRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ExecuteFetchAsAppRequest) ProtoMessage() {} +func (*DeleteCellInfoRequest) ProtoMessage() {} -func (x *ExecuteFetchAsAppRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[42] +func (x *DeleteCellInfoRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2830,64 +3077,48 @@ func (x *ExecuteFetchAsAppRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ExecuteFetchAsAppRequest.ProtoReflect.Descriptor instead. -func (*ExecuteFetchAsAppRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{42} +// Deprecated: Use DeleteCellInfoRequest.ProtoReflect.Descriptor instead. +func (*DeleteCellInfoRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{35} } -func (x *ExecuteFetchAsAppRequest) GetTabletAlias() *topodata.TabletAlias { +func (x *DeleteCellInfoRequest) GetName() string { if x != nil { - return x.TabletAlias + return x.Name } - return nil + return "" } -func (x *ExecuteFetchAsAppRequest) GetQuery() string { - if x != nil { - return x.Query - } - return "" -} - -func (x *ExecuteFetchAsAppRequest) GetMaxRows() int64 { - if x != nil { - return x.MaxRows - } - return 0 -} - -func (x *ExecuteFetchAsAppRequest) GetUsePool() bool { +func (x *DeleteCellInfoRequest) GetForce() bool { if x != nil { - return x.UsePool + return x.Force } return false } -type ExecuteFetchAsAppResponse struct { +type DeleteCellInfoResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - Result *query.QueryResult `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` } -func (x *ExecuteFetchAsAppResponse) Reset() { - *x = ExecuteFetchAsAppResponse{} +func (x *DeleteCellInfoResponse) Reset() { + *x = DeleteCellInfoResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[43] + mi := &file_vtctldata_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ExecuteFetchAsAppResponse) String() string { +func (x *DeleteCellInfoResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ExecuteFetchAsAppResponse) ProtoMessage() {} +func (*DeleteCellInfoResponse) ProtoMessage() {} -func (x *ExecuteFetchAsAppResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[43] +func (x *DeleteCellInfoResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[36] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2898,57 +3129,36 @@ func (x *ExecuteFetchAsAppResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ExecuteFetchAsAppResponse.ProtoReflect.Descriptor instead. -func (*ExecuteFetchAsAppResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{43} -} - -func (x *ExecuteFetchAsAppResponse) GetResult() *query.QueryResult { - if x != nil { - return x.Result - } - return nil +// Deprecated: Use DeleteCellInfoResponse.ProtoReflect.Descriptor instead. +func (*DeleteCellInfoResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{36} } -type ExecuteFetchAsDBARequest struct { +type DeleteCellsAliasRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` - Query string `protobuf:"bytes,2,opt,name=query,proto3" json:"query,omitempty"` - // MaxRows is an optional parameter to limit the number of rows read into the - // QueryResult. Note that this does not apply a LIMIT to the query, just how - // many rows are read from the MySQL server on the tablet side. - // - // This field is optional. Specifying a non-positive value will use whatever - // default is configured in the VtctldService. - MaxRows int64 `protobuf:"varint,3,opt,name=max_rows,json=maxRows,proto3" json:"max_rows,omitempty"` - // DisableBinlogs instructs the tablet not to use binary logging when - // executing the query. - DisableBinlogs bool `protobuf:"varint,4,opt,name=disable_binlogs,json=disableBinlogs,proto3" json:"disable_binlogs,omitempty"` - // ReloadSchema instructs the tablet to reload its schema after executing the - // query. - ReloadSchema bool `protobuf:"varint,5,opt,name=reload_schema,json=reloadSchema,proto3" json:"reload_schema,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } -func (x *ExecuteFetchAsDBARequest) Reset() { - *x = ExecuteFetchAsDBARequest{} +func (x *DeleteCellsAliasRequest) Reset() { + *x = DeleteCellsAliasRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[44] + mi := &file_vtctldata_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ExecuteFetchAsDBARequest) String() string { +func (x *DeleteCellsAliasRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ExecuteFetchAsDBARequest) ProtoMessage() {} +func (*DeleteCellsAliasRequest) ProtoMessage() {} -func (x *ExecuteFetchAsDBARequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[44] +func (x *DeleteCellsAliasRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[37] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2959,71 +3169,41 @@ func (x *ExecuteFetchAsDBARequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ExecuteFetchAsDBARequest.ProtoReflect.Descriptor instead. -func (*ExecuteFetchAsDBARequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{44} -} - -func (x *ExecuteFetchAsDBARequest) GetTabletAlias() *topodata.TabletAlias { - if x != nil { - return x.TabletAlias - } - return nil +// Deprecated: Use DeleteCellsAliasRequest.ProtoReflect.Descriptor instead. +func (*DeleteCellsAliasRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{37} } -func (x *ExecuteFetchAsDBARequest) GetQuery() string { +func (x *DeleteCellsAliasRequest) GetName() string { if x != nil { - return x.Query + return x.Name } return "" } -func (x *ExecuteFetchAsDBARequest) GetMaxRows() int64 { - if x != nil { - return x.MaxRows - } - return 0 -} - -func (x *ExecuteFetchAsDBARequest) GetDisableBinlogs() bool { - if x != nil { - return x.DisableBinlogs - } - return false -} - -func (x *ExecuteFetchAsDBARequest) GetReloadSchema() bool { - if x != nil { - return x.ReloadSchema - } - return false -} - -type ExecuteFetchAsDBAResponse struct { +type DeleteCellsAliasResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - Result *query.QueryResult `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` } -func (x *ExecuteFetchAsDBAResponse) Reset() { - *x = ExecuteFetchAsDBAResponse{} +func (x *DeleteCellsAliasResponse) Reset() { + *x = DeleteCellsAliasResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[45] + mi := &file_vtctldata_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ExecuteFetchAsDBAResponse) String() string { +func (x *DeleteCellsAliasResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ExecuteFetchAsDBAResponse) ProtoMessage() {} +func (*DeleteCellsAliasResponse) ProtoMessage() {} -func (x *ExecuteFetchAsDBAResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[45] +func (x *DeleteCellsAliasResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[38] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3034,44 +3214,44 @@ func (x *ExecuteFetchAsDBAResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ExecuteFetchAsDBAResponse.ProtoReflect.Descriptor instead. -func (*ExecuteFetchAsDBAResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{45} -} - -func (x *ExecuteFetchAsDBAResponse) GetResult() *query.QueryResult { - if x != nil { - return x.Result - } - return nil +// Deprecated: Use DeleteCellsAliasResponse.ProtoReflect.Descriptor instead. +func (*DeleteCellsAliasResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{38} } -type ExecuteHookRequest struct { +type DeleteKeyspaceRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` - TabletHookRequest *tabletmanagerdata.ExecuteHookRequest `protobuf:"bytes,2,opt,name=tablet_hook_request,json=tabletHookRequest,proto3" json:"tablet_hook_request,omitempty"` + // Keyspace is the name of the keyspace to delete. + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // Recursive causes all shards in the keyspace to be recursively deleted + // before deleting the keyspace. It is an error to call DeleteKeyspace on a + // non-empty keyspace without also specifying Recursive. + Recursive bool `protobuf:"varint,2,opt,name=recursive,proto3" json:"recursive,omitempty"` + // Force allows a keyspace to be deleted even if the keyspace lock cannot be + // obtained. This should only be used to force-clean a keyspace. + Force bool `protobuf:"varint,3,opt,name=force,proto3" json:"force,omitempty"` } -func (x *ExecuteHookRequest) Reset() { - *x = ExecuteHookRequest{} +func (x *DeleteKeyspaceRequest) Reset() { + *x = DeleteKeyspaceRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[46] + mi := &file_vtctldata_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ExecuteHookRequest) String() string { +func (x *DeleteKeyspaceRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ExecuteHookRequest) ProtoMessage() {} +func (*DeleteKeyspaceRequest) ProtoMessage() {} -func (x *ExecuteHookRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[46] +func (x *DeleteKeyspaceRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[39] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3082,50 +3262,55 @@ func (x *ExecuteHookRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ExecuteHookRequest.ProtoReflect.Descriptor instead. -func (*ExecuteHookRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{46} +// Deprecated: Use DeleteKeyspaceRequest.ProtoReflect.Descriptor instead. +func (*DeleteKeyspaceRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{39} } -func (x *ExecuteHookRequest) GetTabletAlias() *topodata.TabletAlias { +func (x *DeleteKeyspaceRequest) GetKeyspace() string { if x != nil { - return x.TabletAlias + return x.Keyspace } - return nil + return "" } -func (x *ExecuteHookRequest) GetTabletHookRequest() *tabletmanagerdata.ExecuteHookRequest { +func (x *DeleteKeyspaceRequest) GetRecursive() bool { if x != nil { - return x.TabletHookRequest + return x.Recursive } - return nil + return false } -type ExecuteHookResponse struct { +func (x *DeleteKeyspaceRequest) GetForce() bool { + if x != nil { + return x.Force + } + return false +} + +type DeleteKeyspaceResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - HookResult *tabletmanagerdata.ExecuteHookResponse `protobuf:"bytes,1,opt,name=hook_result,json=hookResult,proto3" json:"hook_result,omitempty"` } -func (x *ExecuteHookResponse) Reset() { - *x = ExecuteHookResponse{} +func (x *DeleteKeyspaceResponse) Reset() { + *x = DeleteKeyspaceResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[47] + mi := &file_vtctldata_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ExecuteHookResponse) String() string { +func (x *DeleteKeyspaceResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ExecuteHookResponse) ProtoMessage() {} +func (*DeleteKeyspaceResponse) ProtoMessage() {} -func (x *ExecuteHookResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[47] +func (x *DeleteKeyspaceResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[40] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3136,43 +3321,48 @@ func (x *ExecuteHookResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ExecuteHookResponse.ProtoReflect.Descriptor instead. -func (*ExecuteHookResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{47} -} - -func (x *ExecuteHookResponse) GetHookResult() *tabletmanagerdata.ExecuteHookResponse { - if x != nil { - return x.HookResult - } - return nil +// Deprecated: Use DeleteKeyspaceResponse.ProtoReflect.Descriptor instead. +func (*DeleteKeyspaceResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{40} } -type FindAllShardsInKeyspaceRequest struct { +type DeleteShardsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // Shards is the list of shards to delete. The nested topodatapb.Shard field + // is not required for DeleteShard, but the Keyspace and Shard fields are. + Shards []*Shard `protobuf:"bytes,1,rep,name=shards,proto3" json:"shards,omitempty"` + // Recursive also deletes all tablets belonging to the shard(s). It is an + // error to call DeleteShard on a non-empty shard without also specificying + // Recursive. + Recursive bool `protobuf:"varint,2,opt,name=recursive,proto3" json:"recursive,omitempty"` + // EvenIfServing allows a shard to be deleted even if it is serving, which is + // normally an error. Use with caution. + EvenIfServing bool `protobuf:"varint,4,opt,name=even_if_serving,json=evenIfServing,proto3" json:"even_if_serving,omitempty"` + // Force allows a shard to be deleted even if the shard lock cannot be + // obtained. This should only be used to force-clean a shard. + Force bool `protobuf:"varint,5,opt,name=force,proto3" json:"force,omitempty"` } -func (x *FindAllShardsInKeyspaceRequest) Reset() { - *x = FindAllShardsInKeyspaceRequest{} +func (x *DeleteShardsRequest) Reset() { + *x = DeleteShardsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[48] + mi := &file_vtctldata_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *FindAllShardsInKeyspaceRequest) String() string { +func (x *DeleteShardsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*FindAllShardsInKeyspaceRequest) ProtoMessage() {} +func (*DeleteShardsRequest) ProtoMessage() {} -func (x *FindAllShardsInKeyspaceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[48] +func (x *DeleteShardsRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[41] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3183,105 +3373,62 @@ func (x *FindAllShardsInKeyspaceRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use FindAllShardsInKeyspaceRequest.ProtoReflect.Descriptor instead. -func (*FindAllShardsInKeyspaceRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{48} +// Deprecated: Use DeleteShardsRequest.ProtoReflect.Descriptor instead. +func (*DeleteShardsRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{41} } -func (x *FindAllShardsInKeyspaceRequest) GetKeyspace() string { +func (x *DeleteShardsRequest) GetShards() []*Shard { if x != nil { - return x.Keyspace + return x.Shards } - return "" -} - -type FindAllShardsInKeyspaceResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Shards map[string]*Shard `protobuf:"bytes,1,rep,name=shards,proto3" json:"shards,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + return nil } -func (x *FindAllShardsInKeyspaceResponse) Reset() { - *x = FindAllShardsInKeyspaceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[49] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *DeleteShardsRequest) GetRecursive() bool { + if x != nil { + return x.Recursive } + return false } -func (x *FindAllShardsInKeyspaceResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FindAllShardsInKeyspaceResponse) ProtoMessage() {} - -func (x *FindAllShardsInKeyspaceResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[49] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *DeleteShardsRequest) GetEvenIfServing() bool { + if x != nil { + return x.EvenIfServing } - return mi.MessageOf(x) -} - -// Deprecated: Use FindAllShardsInKeyspaceResponse.ProtoReflect.Descriptor instead. -func (*FindAllShardsInKeyspaceResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{49} + return false } -func (x *FindAllShardsInKeyspaceResponse) GetShards() map[string]*Shard { +func (x *DeleteShardsRequest) GetForce() bool { if x != nil { - return x.Shards + return x.Force } - return nil + return false } -type GetBackupsRequest struct { +type DeleteShardsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - // Limit, if nonzero, will return only the most N recent backups. - Limit uint32 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"` - // Detailed indicates whether to use the backupengine, if supported, to - // populate additional fields, such as Engine and Status, on BackupInfo - // objects in the response. If not set, or if the backupengine does not - // support populating these fields, Engine will always be empty, and Status - // will always be UNKNOWN. - Detailed bool `protobuf:"varint,4,opt,name=detailed,proto3" json:"detailed,omitempty"` - // DetailedLimit, if nonzero, will only populate additional fields (see Detailed) - // on the N most recent backups. The Limit field still dictates the total - // number of backup info objects returned, so, in reality, min(Limit, DetailedLimit) - // backup infos will have additional fields set, and any remaining backups - // will not. - DetailedLimit uint32 `protobuf:"varint,5,opt,name=detailed_limit,json=detailedLimit,proto3" json:"detailed_limit,omitempty"` } -func (x *GetBackupsRequest) Reset() { - *x = GetBackupsRequest{} +func (x *DeleteShardsResponse) Reset() { + *x = DeleteShardsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[50] + mi := &file_vtctldata_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetBackupsRequest) String() string { +func (x *DeleteShardsResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetBackupsRequest) ProtoMessage() {} +func (*DeleteShardsResponse) ProtoMessage() {} -func (x *GetBackupsRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[50] +func (x *DeleteShardsResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[42] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3292,71 +3439,36 @@ func (x *GetBackupsRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetBackupsRequest.ProtoReflect.Descriptor instead. -func (*GetBackupsRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{50} -} - -func (x *GetBackupsRequest) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" -} - -func (x *GetBackupsRequest) GetShard() string { - if x != nil { - return x.Shard - } - return "" -} - -func (x *GetBackupsRequest) GetLimit() uint32 { - if x != nil { - return x.Limit - } - return 0 -} - -func (x *GetBackupsRequest) GetDetailed() bool { - if x != nil { - return x.Detailed - } - return false -} - -func (x *GetBackupsRequest) GetDetailedLimit() uint32 { - if x != nil { - return x.DetailedLimit - } - return 0 +// Deprecated: Use DeleteShardsResponse.ProtoReflect.Descriptor instead. +func (*DeleteShardsResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{42} } -type GetBackupsResponse struct { +type DeleteSrvVSchemaRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Backups []*mysqlctl.BackupInfo `protobuf:"bytes,1,rep,name=backups,proto3" json:"backups,omitempty"` + Cell string `protobuf:"bytes,1,opt,name=cell,proto3" json:"cell,omitempty"` } -func (x *GetBackupsResponse) Reset() { - *x = GetBackupsResponse{} +func (x *DeleteSrvVSchemaRequest) Reset() { + *x = DeleteSrvVSchemaRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[51] + mi := &file_vtctldata_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetBackupsResponse) String() string { +func (x *DeleteSrvVSchemaRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetBackupsResponse) ProtoMessage() {} +func (*DeleteSrvVSchemaRequest) ProtoMessage() {} -func (x *GetBackupsResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[51] +func (x *DeleteSrvVSchemaRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[43] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3367,43 +3479,41 @@ func (x *GetBackupsResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetBackupsResponse.ProtoReflect.Descriptor instead. -func (*GetBackupsResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{51} +// Deprecated: Use DeleteSrvVSchemaRequest.ProtoReflect.Descriptor instead. +func (*DeleteSrvVSchemaRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{43} } -func (x *GetBackupsResponse) GetBackups() []*mysqlctl.BackupInfo { +func (x *DeleteSrvVSchemaRequest) GetCell() string { if x != nil { - return x.Backups + return x.Cell } - return nil + return "" } -type GetCellInfoRequest struct { +type DeleteSrvVSchemaResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - Cell string `protobuf:"bytes,1,opt,name=cell,proto3" json:"cell,omitempty"` } -func (x *GetCellInfoRequest) Reset() { - *x = GetCellInfoRequest{} +func (x *DeleteSrvVSchemaResponse) Reset() { + *x = DeleteSrvVSchemaResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[52] + mi := &file_vtctldata_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetCellInfoRequest) String() string { +func (x *DeleteSrvVSchemaResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetCellInfoRequest) ProtoMessage() {} +func (*DeleteSrvVSchemaResponse) ProtoMessage() {} -func (x *GetCellInfoRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[52] +func (x *DeleteSrvVSchemaResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[44] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3414,43 +3524,40 @@ func (x *GetCellInfoRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetCellInfoRequest.ProtoReflect.Descriptor instead. -func (*GetCellInfoRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{52} -} - -func (x *GetCellInfoRequest) GetCell() string { - if x != nil { - return x.Cell - } - return "" +// Deprecated: Use DeleteSrvVSchemaResponse.ProtoReflect.Descriptor instead. +func (*DeleteSrvVSchemaResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{44} } -type GetCellInfoResponse struct { +type DeleteTabletsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - CellInfo *topodata.CellInfo `protobuf:"bytes,1,opt,name=cell_info,json=cellInfo,proto3" json:"cell_info,omitempty"` + // TabletAliases is the list of tablets to delete. + TabletAliases []*topodata.TabletAlias `protobuf:"bytes,1,rep,name=tablet_aliases,json=tabletAliases,proto3" json:"tablet_aliases,omitempty"` + // AllowPrimary allows for the primary tablet of a shard to be deleted. + // Use with caution. + AllowPrimary bool `protobuf:"varint,2,opt,name=allow_primary,json=allowPrimary,proto3" json:"allow_primary,omitempty"` } -func (x *GetCellInfoResponse) Reset() { - *x = GetCellInfoResponse{} +func (x *DeleteTabletsRequest) Reset() { + *x = DeleteTabletsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[53] + mi := &file_vtctldata_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetCellInfoResponse) String() string { +func (x *DeleteTabletsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetCellInfoResponse) ProtoMessage() {} +func (*DeleteTabletsRequest) ProtoMessage() {} -func (x *GetCellInfoResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[53] +func (x *DeleteTabletsRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[45] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3461,41 +3568,48 @@ func (x *GetCellInfoResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetCellInfoResponse.ProtoReflect.Descriptor instead. -func (*GetCellInfoResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{53} +// Deprecated: Use DeleteTabletsRequest.ProtoReflect.Descriptor instead. +func (*DeleteTabletsRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{45} } -func (x *GetCellInfoResponse) GetCellInfo() *topodata.CellInfo { +func (x *DeleteTabletsRequest) GetTabletAliases() []*topodata.TabletAlias { if x != nil { - return x.CellInfo + return x.TabletAliases } return nil } -type GetCellInfoNamesRequest struct { +func (x *DeleteTabletsRequest) GetAllowPrimary() bool { + if x != nil { + return x.AllowPrimary + } + return false +} + +type DeleteTabletsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } -func (x *GetCellInfoNamesRequest) Reset() { - *x = GetCellInfoNamesRequest{} +func (x *DeleteTabletsResponse) Reset() { + *x = DeleteTabletsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[54] + mi := &file_vtctldata_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetCellInfoNamesRequest) String() string { +func (x *DeleteTabletsResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetCellInfoNamesRequest) ProtoMessage() {} +func (*DeleteTabletsResponse) ProtoMessage() {} -func (x *GetCellInfoNamesRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[54] +func (x *DeleteTabletsResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[46] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3506,36 +3620,56 @@ func (x *GetCellInfoNamesRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetCellInfoNamesRequest.ProtoReflect.Descriptor instead. -func (*GetCellInfoNamesRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{54} +// Deprecated: Use DeleteTabletsResponse.ProtoReflect.Descriptor instead. +func (*DeleteTabletsResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{46} } -type GetCellInfoNamesResponse struct { +type EmergencyReparentShardRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Names []string `protobuf:"bytes,1,rep,name=names,proto3" json:"names,omitempty"` + // Keyspace is the name of the keyspace to perform the Emergency Reparent in. + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // Shard is the name of the shard to perform the Emergency Reparent in. + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + // Optional alias of a tablet that should become the new shard primary. If not + // not specified, the vtctld will select the most up-to-date canditate to + // promote. + NewPrimary *topodata.TabletAlias `protobuf:"bytes,3,opt,name=new_primary,json=newPrimary,proto3" json:"new_primary,omitempty"` + // List of replica aliases to ignore during the Emergency Reparent. The vtctld + // will not attempt to stop replication on these tablets, nor attempt to + // demote any that may think they are the shard primary. + IgnoreReplicas []*topodata.TabletAlias `protobuf:"bytes,4,rep,name=ignore_replicas,json=ignoreReplicas,proto3" json:"ignore_replicas,omitempty"` + // WaitReplicasTimeout is the duration of time to wait for replicas to catch + // up in reparenting. + WaitReplicasTimeout *vttime.Duration `protobuf:"bytes,5,opt,name=wait_replicas_timeout,json=waitReplicasTimeout,proto3" json:"wait_replicas_timeout,omitempty"` + // PreventCrossCellPromotion is used to only promote the new primary from the same cell + // as the failed primary. + PreventCrossCellPromotion bool `protobuf:"varint,6,opt,name=prevent_cross_cell_promotion,json=preventCrossCellPromotion,proto3" json:"prevent_cross_cell_promotion,omitempty"` + // WaitForAllTablets makes ERS wait for a response from all the tablets before proceeding. + // Useful when all the tablets are up and reachable. + WaitForAllTablets bool `protobuf:"varint,7,opt,name=wait_for_all_tablets,json=waitForAllTablets,proto3" json:"wait_for_all_tablets,omitempty"` } -func (x *GetCellInfoNamesResponse) Reset() { - *x = GetCellInfoNamesResponse{} +func (x *EmergencyReparentShardRequest) Reset() { + *x = EmergencyReparentShardRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[55] + mi := &file_vtctldata_proto_msgTypes[47] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetCellInfoNamesResponse) String() string { +func (x *EmergencyReparentShardRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetCellInfoNamesResponse) ProtoMessage() {} +func (*EmergencyReparentShardRequest) ProtoMessage() {} -func (x *GetCellInfoNamesResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[55] +func (x *EmergencyReparentShardRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[47] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3546,81 +3680,94 @@ func (x *GetCellInfoNamesResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetCellInfoNamesResponse.ProtoReflect.Descriptor instead. -func (*GetCellInfoNamesResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{55} +// Deprecated: Use EmergencyReparentShardRequest.ProtoReflect.Descriptor instead. +func (*EmergencyReparentShardRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{47} } -func (x *GetCellInfoNamesResponse) GetNames() []string { +func (x *EmergencyReparentShardRequest) GetKeyspace() string { if x != nil { - return x.Names + return x.Keyspace } - return nil + return "" } -type GetCellsAliasesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *EmergencyReparentShardRequest) GetShard() string { + if x != nil { + return x.Shard + } + return "" } -func (x *GetCellsAliasesRequest) Reset() { - *x = GetCellsAliasesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[56] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *EmergencyReparentShardRequest) GetNewPrimary() *topodata.TabletAlias { + if x != nil { + return x.NewPrimary } + return nil } -func (x *GetCellsAliasesRequest) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *EmergencyReparentShardRequest) GetIgnoreReplicas() []*topodata.TabletAlias { + if x != nil { + return x.IgnoreReplicas + } + return nil } -func (*GetCellsAliasesRequest) ProtoMessage() {} +func (x *EmergencyReparentShardRequest) GetWaitReplicasTimeout() *vttime.Duration { + if x != nil { + return x.WaitReplicasTimeout + } + return nil +} -func (x *GetCellsAliasesRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[56] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *EmergencyReparentShardRequest) GetPreventCrossCellPromotion() bool { + if x != nil { + return x.PreventCrossCellPromotion } - return mi.MessageOf(x) + return false } -// Deprecated: Use GetCellsAliasesRequest.ProtoReflect.Descriptor instead. -func (*GetCellsAliasesRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{56} +func (x *EmergencyReparentShardRequest) GetWaitForAllTablets() bool { + if x != nil { + return x.WaitForAllTablets + } + return false } -type GetCellsAliasesResponse struct { +type EmergencyReparentShardResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Aliases map[string]*topodata.CellsAlias `protobuf:"bytes,1,rep,name=aliases,proto3" json:"aliases,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Keyspace is the name of the keyspace the Emergency Reparent took place in. + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // Shard is the name of the shard the Emergency Reparent took place in. + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + // PromotedPrimary is the alias of the tablet that was promoted to shard + // primary. If NewPrimary was set in the request, then this will be the same + // alias. Otherwise, it will be the alias of the tablet found to be most + // up-to-date. + PromotedPrimary *topodata.TabletAlias `protobuf:"bytes,3,opt,name=promoted_primary,json=promotedPrimary,proto3" json:"promoted_primary,omitempty"` + Events []*logutil.Event `protobuf:"bytes,4,rep,name=events,proto3" json:"events,omitempty"` } -func (x *GetCellsAliasesResponse) Reset() { - *x = GetCellsAliasesResponse{} +func (x *EmergencyReparentShardResponse) Reset() { + *x = EmergencyReparentShardResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[57] + mi := &file_vtctldata_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetCellsAliasesResponse) String() string { +func (x *EmergencyReparentShardResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetCellsAliasesResponse) ProtoMessage() {} +func (*EmergencyReparentShardResponse) ProtoMessage() {} -func (x *GetCellsAliasesResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[57] +func (x *EmergencyReparentShardResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[48] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3631,43 +3778,74 @@ func (x *GetCellsAliasesResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetCellsAliasesResponse.ProtoReflect.Descriptor instead. -func (*GetCellsAliasesResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{57} +// Deprecated: Use EmergencyReparentShardResponse.ProtoReflect.Descriptor instead. +func (*EmergencyReparentShardResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{48} } -func (x *GetCellsAliasesResponse) GetAliases() map[string]*topodata.CellsAlias { +func (x *EmergencyReparentShardResponse) GetKeyspace() string { if x != nil { - return x.Aliases + return x.Keyspace + } + return "" +} + +func (x *EmergencyReparentShardResponse) GetShard() string { + if x != nil { + return x.Shard + } + return "" +} + +func (x *EmergencyReparentShardResponse) GetPromotedPrimary() *topodata.TabletAlias { + if x != nil { + return x.PromotedPrimary } return nil } -type GetFullStatusRequest struct { +func (x *EmergencyReparentShardResponse) GetEvents() []*logutil.Event { + if x != nil { + return x.Events + } + return nil +} + +type ExecuteFetchAsAppRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + Query string `protobuf:"bytes,2,opt,name=query,proto3" json:"query,omitempty"` + // MaxRows is an optional parameter to limit the number of rows read into the + // QueryResult. Note that this does not apply a LIMIT to the query, just how + // many rows are read from the MySQL server on the tablet side. + // + // This field is optional. Specifying a non-positive value will use whatever + // default is configured in the VtctldService. + MaxRows int64 `protobuf:"varint,3,opt,name=max_rows,json=maxRows,proto3" json:"max_rows,omitempty"` + // UsePool causes the query to be run with a pooled connection to the tablet. + UsePool bool `protobuf:"varint,4,opt,name=use_pool,json=usePool,proto3" json:"use_pool,omitempty"` } -func (x *GetFullStatusRequest) Reset() { - *x = GetFullStatusRequest{} +func (x *ExecuteFetchAsAppRequest) Reset() { + *x = ExecuteFetchAsAppRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[58] + mi := &file_vtctldata_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetFullStatusRequest) String() string { +func (x *ExecuteFetchAsAppRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetFullStatusRequest) ProtoMessage() {} +func (*ExecuteFetchAsAppRequest) ProtoMessage() {} -func (x *GetFullStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[58] +func (x *ExecuteFetchAsAppRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[49] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3678,43 +3856,64 @@ func (x *GetFullStatusRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetFullStatusRequest.ProtoReflect.Descriptor instead. -func (*GetFullStatusRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{58} +// Deprecated: Use ExecuteFetchAsAppRequest.ProtoReflect.Descriptor instead. +func (*ExecuteFetchAsAppRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{49} } -func (x *GetFullStatusRequest) GetTabletAlias() *topodata.TabletAlias { +func (x *ExecuteFetchAsAppRequest) GetTabletAlias() *topodata.TabletAlias { if x != nil { return x.TabletAlias } return nil } -type GetFullStatusResponse struct { +func (x *ExecuteFetchAsAppRequest) GetQuery() string { + if x != nil { + return x.Query + } + return "" +} + +func (x *ExecuteFetchAsAppRequest) GetMaxRows() int64 { + if x != nil { + return x.MaxRows + } + return 0 +} + +func (x *ExecuteFetchAsAppRequest) GetUsePool() bool { + if x != nil { + return x.UsePool + } + return false +} + +type ExecuteFetchAsAppResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Status *replicationdata.FullStatus `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + Result *query.QueryResult `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` } -func (x *GetFullStatusResponse) Reset() { - *x = GetFullStatusResponse{} +func (x *ExecuteFetchAsAppResponse) Reset() { + *x = ExecuteFetchAsAppResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[59] + mi := &file_vtctldata_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetFullStatusResponse) String() string { +func (x *ExecuteFetchAsAppResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetFullStatusResponse) ProtoMessage() {} +func (*ExecuteFetchAsAppResponse) ProtoMessage() {} -func (x *GetFullStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[59] +func (x *ExecuteFetchAsAppResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[50] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3725,41 +3924,57 @@ func (x *GetFullStatusResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetFullStatusResponse.ProtoReflect.Descriptor instead. -func (*GetFullStatusResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{59} +// Deprecated: Use ExecuteFetchAsAppResponse.ProtoReflect.Descriptor instead. +func (*ExecuteFetchAsAppResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{50} } -func (x *GetFullStatusResponse) GetStatus() *replicationdata.FullStatus { +func (x *ExecuteFetchAsAppResponse) GetResult() *query.QueryResult { if x != nil { - return x.Status + return x.Result } return nil } -type GetKeyspacesRequest struct { +type ExecuteFetchAsDBARequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields -} -func (x *GetKeyspacesRequest) Reset() { - *x = GetKeyspacesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[60] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + Query string `protobuf:"bytes,2,opt,name=query,proto3" json:"query,omitempty"` + // MaxRows is an optional parameter to limit the number of rows read into the + // QueryResult. Note that this does not apply a LIMIT to the query, just how + // many rows are read from the MySQL server on the tablet side. + // + // This field is optional. Specifying a non-positive value will use whatever + // default is configured in the VtctldService. + MaxRows int64 `protobuf:"varint,3,opt,name=max_rows,json=maxRows,proto3" json:"max_rows,omitempty"` + // DisableBinlogs instructs the tablet not to use binary logging when + // executing the query. + DisableBinlogs bool `protobuf:"varint,4,opt,name=disable_binlogs,json=disableBinlogs,proto3" json:"disable_binlogs,omitempty"` + // ReloadSchema instructs the tablet to reload its schema after executing the + // query. + ReloadSchema bool `protobuf:"varint,5,opt,name=reload_schema,json=reloadSchema,proto3" json:"reload_schema,omitempty"` } -func (x *GetKeyspacesRequest) String() string { +func (x *ExecuteFetchAsDBARequest) Reset() { + *x = ExecuteFetchAsDBARequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExecuteFetchAsDBARequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetKeyspacesRequest) ProtoMessage() {} +func (*ExecuteFetchAsDBARequest) ProtoMessage() {} -func (x *GetKeyspacesRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[60] +func (x *ExecuteFetchAsDBARequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[51] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3770,36 +3985,71 @@ func (x *GetKeyspacesRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetKeyspacesRequest.ProtoReflect.Descriptor instead. -func (*GetKeyspacesRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{60} +// Deprecated: Use ExecuteFetchAsDBARequest.ProtoReflect.Descriptor instead. +func (*ExecuteFetchAsDBARequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{51} } -type GetKeyspacesResponse struct { +func (x *ExecuteFetchAsDBARequest) GetTabletAlias() *topodata.TabletAlias { + if x != nil { + return x.TabletAlias + } + return nil +} + +func (x *ExecuteFetchAsDBARequest) GetQuery() string { + if x != nil { + return x.Query + } + return "" +} + +func (x *ExecuteFetchAsDBARequest) GetMaxRows() int64 { + if x != nil { + return x.MaxRows + } + return 0 +} + +func (x *ExecuteFetchAsDBARequest) GetDisableBinlogs() bool { + if x != nil { + return x.DisableBinlogs + } + return false +} + +func (x *ExecuteFetchAsDBARequest) GetReloadSchema() bool { + if x != nil { + return x.ReloadSchema + } + return false +} + +type ExecuteFetchAsDBAResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspaces []*Keyspace `protobuf:"bytes,1,rep,name=keyspaces,proto3" json:"keyspaces,omitempty"` + Result *query.QueryResult `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` } -func (x *GetKeyspacesResponse) Reset() { - *x = GetKeyspacesResponse{} +func (x *ExecuteFetchAsDBAResponse) Reset() { + *x = ExecuteFetchAsDBAResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[61] + mi := &file_vtctldata_proto_msgTypes[52] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetKeyspacesResponse) String() string { +func (x *ExecuteFetchAsDBAResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetKeyspacesResponse) ProtoMessage() {} +func (*ExecuteFetchAsDBAResponse) ProtoMessage() {} -func (x *GetKeyspacesResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[61] +func (x *ExecuteFetchAsDBAResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[52] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3810,43 +4060,44 @@ func (x *GetKeyspacesResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetKeyspacesResponse.ProtoReflect.Descriptor instead. -func (*GetKeyspacesResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{61} +// Deprecated: Use ExecuteFetchAsDBAResponse.ProtoReflect.Descriptor instead. +func (*ExecuteFetchAsDBAResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{52} } -func (x *GetKeyspacesResponse) GetKeyspaces() []*Keyspace { +func (x *ExecuteFetchAsDBAResponse) GetResult() *query.QueryResult { if x != nil { - return x.Keyspaces + return x.Result } return nil } -type GetKeyspaceRequest struct { +type ExecuteHookRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + TabletHookRequest *tabletmanagerdata.ExecuteHookRequest `protobuf:"bytes,2,opt,name=tablet_hook_request,json=tabletHookRequest,proto3" json:"tablet_hook_request,omitempty"` } -func (x *GetKeyspaceRequest) Reset() { - *x = GetKeyspaceRequest{} +func (x *ExecuteHookRequest) Reset() { + *x = ExecuteHookRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[62] + mi := &file_vtctldata_proto_msgTypes[53] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetKeyspaceRequest) String() string { +func (x *ExecuteHookRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetKeyspaceRequest) ProtoMessage() {} +func (*ExecuteHookRequest) ProtoMessage() {} -func (x *GetKeyspaceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[62] +func (x *ExecuteHookRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[53] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3857,43 +4108,50 @@ func (x *GetKeyspaceRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetKeyspaceRequest.ProtoReflect.Descriptor instead. -func (*GetKeyspaceRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{62} +// Deprecated: Use ExecuteHookRequest.ProtoReflect.Descriptor instead. +func (*ExecuteHookRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{53} } -func (x *GetKeyspaceRequest) GetKeyspace() string { +func (x *ExecuteHookRequest) GetTabletAlias() *topodata.TabletAlias { if x != nil { - return x.Keyspace + return x.TabletAlias } - return "" + return nil } -type GetKeyspaceResponse struct { +func (x *ExecuteHookRequest) GetTabletHookRequest() *tabletmanagerdata.ExecuteHookRequest { + if x != nil { + return x.TabletHookRequest + } + return nil +} + +type ExecuteHookResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace *Keyspace `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + HookResult *tabletmanagerdata.ExecuteHookResponse `protobuf:"bytes,1,opt,name=hook_result,json=hookResult,proto3" json:"hook_result,omitempty"` } -func (x *GetKeyspaceResponse) Reset() { - *x = GetKeyspaceResponse{} +func (x *ExecuteHookResponse) Reset() { + *x = ExecuteHookResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[63] + mi := &file_vtctldata_proto_msgTypes[54] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetKeyspaceResponse) String() string { +func (x *ExecuteHookResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetKeyspaceResponse) ProtoMessage() {} +func (*ExecuteHookResponse) ProtoMessage() {} -func (x *GetKeyspaceResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[63] +func (x *ExecuteHookResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[54] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3904,43 +4162,43 @@ func (x *GetKeyspaceResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetKeyspaceResponse.ProtoReflect.Descriptor instead. -func (*GetKeyspaceResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{63} +// Deprecated: Use ExecuteHookResponse.ProtoReflect.Descriptor instead. +func (*ExecuteHookResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{54} } -func (x *GetKeyspaceResponse) GetKeyspace() *Keyspace { +func (x *ExecuteHookResponse) GetHookResult() *tabletmanagerdata.ExecuteHookResponse { if x != nil { - return x.Keyspace + return x.HookResult } return nil } -type GetPermissionsRequest struct { +type FindAllShardsInKeyspaceRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` } -func (x *GetPermissionsRequest) Reset() { - *x = GetPermissionsRequest{} +func (x *FindAllShardsInKeyspaceRequest) Reset() { + *x = FindAllShardsInKeyspaceRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[64] + mi := &file_vtctldata_proto_msgTypes[55] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetPermissionsRequest) String() string { +func (x *FindAllShardsInKeyspaceRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetPermissionsRequest) ProtoMessage() {} +func (*FindAllShardsInKeyspaceRequest) ProtoMessage() {} -func (x *GetPermissionsRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[64] +func (x *FindAllShardsInKeyspaceRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[55] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3951,43 +4209,43 @@ func (x *GetPermissionsRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetPermissionsRequest.ProtoReflect.Descriptor instead. -func (*GetPermissionsRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{64} +// Deprecated: Use FindAllShardsInKeyspaceRequest.ProtoReflect.Descriptor instead. +func (*FindAllShardsInKeyspaceRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{55} } -func (x *GetPermissionsRequest) GetTabletAlias() *topodata.TabletAlias { +func (x *FindAllShardsInKeyspaceRequest) GetKeyspace() string { if x != nil { - return x.TabletAlias + return x.Keyspace } - return nil + return "" } -type GetPermissionsResponse struct { +type FindAllShardsInKeyspaceResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Permissions *tabletmanagerdata.Permissions `protobuf:"bytes,1,opt,name=permissions,proto3" json:"permissions,omitempty"` + Shards map[string]*Shard `protobuf:"bytes,1,rep,name=shards,proto3" json:"shards,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (x *GetPermissionsResponse) Reset() { - *x = GetPermissionsResponse{} +func (x *FindAllShardsInKeyspaceResponse) Reset() { + *x = FindAllShardsInKeyspaceResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[65] + mi := &file_vtctldata_proto_msgTypes[56] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetPermissionsResponse) String() string { +func (x *FindAllShardsInKeyspaceResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetPermissionsResponse) ProtoMessage() {} +func (*FindAllShardsInKeyspaceResponse) ProtoMessage() {} -func (x *GetPermissionsResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[65] +func (x *FindAllShardsInKeyspaceResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[56] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3998,41 +4256,58 @@ func (x *GetPermissionsResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetPermissionsResponse.ProtoReflect.Descriptor instead. -func (*GetPermissionsResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{65} +// Deprecated: Use FindAllShardsInKeyspaceResponse.ProtoReflect.Descriptor instead. +func (*FindAllShardsInKeyspaceResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{56} } -func (x *GetPermissionsResponse) GetPermissions() *tabletmanagerdata.Permissions { +func (x *FindAllShardsInKeyspaceResponse) GetShards() map[string]*Shard { if x != nil { - return x.Permissions + return x.Shards } return nil } -type GetRoutingRulesRequest struct { +type GetBackupsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + // Limit, if nonzero, will return only the most N recent backups. + Limit uint32 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"` + // Detailed indicates whether to use the backupengine, if supported, to + // populate additional fields, such as Engine and Status, on BackupInfo + // objects in the response. If not set, or if the backupengine does not + // support populating these fields, Engine will always be empty, and Status + // will always be UNKNOWN. + Detailed bool `protobuf:"varint,4,opt,name=detailed,proto3" json:"detailed,omitempty"` + // DetailedLimit, if nonzero, will only populate additional fields (see Detailed) + // on the N most recent backups. The Limit field still dictates the total + // number of backup info objects returned, so, in reality, min(Limit, DetailedLimit) + // backup infos will have additional fields set, and any remaining backups + // will not. + DetailedLimit uint32 `protobuf:"varint,5,opt,name=detailed_limit,json=detailedLimit,proto3" json:"detailed_limit,omitempty"` } -func (x *GetRoutingRulesRequest) Reset() { - *x = GetRoutingRulesRequest{} +func (x *GetBackupsRequest) Reset() { + *x = GetBackupsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[66] + mi := &file_vtctldata_proto_msgTypes[57] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetRoutingRulesRequest) String() string { +func (x *GetBackupsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetRoutingRulesRequest) ProtoMessage() {} +func (*GetBackupsRequest) ProtoMessage() {} -func (x *GetRoutingRulesRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[66] +func (x *GetBackupsRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[57] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4043,36 +4318,71 @@ func (x *GetRoutingRulesRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetRoutingRulesRequest.ProtoReflect.Descriptor instead. -func (*GetRoutingRulesRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{66} +// Deprecated: Use GetBackupsRequest.ProtoReflect.Descriptor instead. +func (*GetBackupsRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{57} } -type GetRoutingRulesResponse struct { +func (x *GetBackupsRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *GetBackupsRequest) GetShard() string { + if x != nil { + return x.Shard + } + return "" +} + +func (x *GetBackupsRequest) GetLimit() uint32 { + if x != nil { + return x.Limit + } + return 0 +} + +func (x *GetBackupsRequest) GetDetailed() bool { + if x != nil { + return x.Detailed + } + return false +} + +func (x *GetBackupsRequest) GetDetailedLimit() uint32 { + if x != nil { + return x.DetailedLimit + } + return 0 +} + +type GetBackupsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - RoutingRules *vschema.RoutingRules `protobuf:"bytes,1,opt,name=routing_rules,json=routingRules,proto3" json:"routing_rules,omitempty"` + Backups []*mysqlctl.BackupInfo `protobuf:"bytes,1,rep,name=backups,proto3" json:"backups,omitempty"` } -func (x *GetRoutingRulesResponse) Reset() { - *x = GetRoutingRulesResponse{} +func (x *GetBackupsResponse) Reset() { + *x = GetBackupsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[67] + mi := &file_vtctldata_proto_msgTypes[58] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetRoutingRulesResponse) String() string { +func (x *GetBackupsResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetRoutingRulesResponse) ProtoMessage() {} +func (*GetBackupsResponse) ProtoMessage() {} -func (x *GetRoutingRulesResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[67] +func (x *GetBackupsResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[58] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4083,61 +4393,43 @@ func (x *GetRoutingRulesResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetRoutingRulesResponse.ProtoReflect.Descriptor instead. -func (*GetRoutingRulesResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{67} +// Deprecated: Use GetBackupsResponse.ProtoReflect.Descriptor instead. +func (*GetBackupsResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{58} } -func (x *GetRoutingRulesResponse) GetRoutingRules() *vschema.RoutingRules { +func (x *GetBackupsResponse) GetBackups() []*mysqlctl.BackupInfo { if x != nil { - return x.RoutingRules + return x.Backups } return nil } -type GetSchemaRequest struct { +type GetCellInfoRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` - // Tables is a list of tables for which we should gather information. Each is - // either an exact match, or a regular expression of the form /regexp/. - Tables []string `protobuf:"bytes,2,rep,name=tables,proto3" json:"tables,omitempty"` - // ExcludeTables is a list of tables to exclude from the result. Each is - // either an exact match, or a regular expression of the form /regexp/. - ExcludeTables []string `protobuf:"bytes,3,rep,name=exclude_tables,json=excludeTables,proto3" json:"exclude_tables,omitempty"` - // IncludeViews specifies whether to include views in the result. - IncludeViews bool `protobuf:"varint,4,opt,name=include_views,json=includeViews,proto3" json:"include_views,omitempty"` - // TableNamesOnly specifies whether to limit the results to just table names, - // rather than full schema information for each table. - TableNamesOnly bool `protobuf:"varint,5,opt,name=table_names_only,json=tableNamesOnly,proto3" json:"table_names_only,omitempty"` - // TableSizesOnly specifies whether to limit the results to just table sizes, - // rather than full schema information for each table. It is ignored if - // TableNamesOnly is set to true. - TableSizesOnly bool `protobuf:"varint,6,opt,name=table_sizes_only,json=tableSizesOnly,proto3" json:"table_sizes_only,omitempty"` - // TableSchemaOnly specifies whether to limit the results to just table/view - // schema definition (CREATE TABLE/VIEW statements) and skip column/field information - TableSchemaOnly bool `protobuf:"varint,7,opt,name=table_schema_only,json=tableSchemaOnly,proto3" json:"table_schema_only,omitempty"` + Cell string `protobuf:"bytes,1,opt,name=cell,proto3" json:"cell,omitempty"` } -func (x *GetSchemaRequest) Reset() { - *x = GetSchemaRequest{} +func (x *GetCellInfoRequest) Reset() { + *x = GetCellInfoRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[68] + mi := &file_vtctldata_proto_msgTypes[59] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetSchemaRequest) String() string { +func (x *GetCellInfoRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetSchemaRequest) ProtoMessage() {} +func (*GetCellInfoRequest) ProtoMessage() {} -func (x *GetSchemaRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[68] +func (x *GetCellInfoRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[59] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4148,85 +4440,43 @@ func (x *GetSchemaRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetSchemaRequest.ProtoReflect.Descriptor instead. -func (*GetSchemaRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{68} -} - -func (x *GetSchemaRequest) GetTabletAlias() *topodata.TabletAlias { - if x != nil { - return x.TabletAlias - } - return nil -} - -func (x *GetSchemaRequest) GetTables() []string { - if x != nil { - return x.Tables - } - return nil -} - -func (x *GetSchemaRequest) GetExcludeTables() []string { - if x != nil { - return x.ExcludeTables - } - return nil -} - -func (x *GetSchemaRequest) GetIncludeViews() bool { - if x != nil { - return x.IncludeViews - } - return false -} - -func (x *GetSchemaRequest) GetTableNamesOnly() bool { - if x != nil { - return x.TableNamesOnly - } - return false -} - -func (x *GetSchemaRequest) GetTableSizesOnly() bool { - if x != nil { - return x.TableSizesOnly - } - return false +// Deprecated: Use GetCellInfoRequest.ProtoReflect.Descriptor instead. +func (*GetCellInfoRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{59} } -func (x *GetSchemaRequest) GetTableSchemaOnly() bool { +func (x *GetCellInfoRequest) GetCell() string { if x != nil { - return x.TableSchemaOnly + return x.Cell } - return false + return "" } -type GetSchemaResponse struct { +type GetCellInfoResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Schema *tabletmanagerdata.SchemaDefinition `protobuf:"bytes,1,opt,name=schema,proto3" json:"schema,omitempty"` + CellInfo *topodata.CellInfo `protobuf:"bytes,1,opt,name=cell_info,json=cellInfo,proto3" json:"cell_info,omitempty"` } -func (x *GetSchemaResponse) Reset() { - *x = GetSchemaResponse{} +func (x *GetCellInfoResponse) Reset() { + *x = GetCellInfoResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[69] + mi := &file_vtctldata_proto_msgTypes[60] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetSchemaResponse) String() string { +func (x *GetCellInfoResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetSchemaResponse) ProtoMessage() {} +func (*GetCellInfoResponse) ProtoMessage() {} -func (x *GetSchemaResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[69] +func (x *GetCellInfoResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[60] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4237,44 +4487,41 @@ func (x *GetSchemaResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetSchemaResponse.ProtoReflect.Descriptor instead. -func (*GetSchemaResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{69} +// Deprecated: Use GetCellInfoResponse.ProtoReflect.Descriptor instead. +func (*GetCellInfoResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{60} } -func (x *GetSchemaResponse) GetSchema() *tabletmanagerdata.SchemaDefinition { +func (x *GetCellInfoResponse) GetCellInfo() *topodata.CellInfo { if x != nil { - return x.Schema + return x.CellInfo } return nil } -type GetShardRequest struct { +type GetCellInfoNamesRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - ShardName string `protobuf:"bytes,2,opt,name=shard_name,json=shardName,proto3" json:"shard_name,omitempty"` } -func (x *GetShardRequest) Reset() { - *x = GetShardRequest{} +func (x *GetCellInfoNamesRequest) Reset() { + *x = GetCellInfoNamesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[70] + mi := &file_vtctldata_proto_msgTypes[61] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetShardRequest) String() string { +func (x *GetCellInfoNamesRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetShardRequest) ProtoMessage() {} +func (*GetCellInfoNamesRequest) ProtoMessage() {} -func (x *GetShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[70] +func (x *GetCellInfoNamesRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[61] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4285,50 +4532,36 @@ func (x *GetShardRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetShardRequest.ProtoReflect.Descriptor instead. -func (*GetShardRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{70} -} - -func (x *GetShardRequest) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" -} - -func (x *GetShardRequest) GetShardName() string { - if x != nil { - return x.ShardName - } - return "" +// Deprecated: Use GetCellInfoNamesRequest.ProtoReflect.Descriptor instead. +func (*GetCellInfoNamesRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{61} } -type GetShardResponse struct { +type GetCellInfoNamesResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Shard *Shard `protobuf:"bytes,1,opt,name=shard,proto3" json:"shard,omitempty"` + Names []string `protobuf:"bytes,1,rep,name=names,proto3" json:"names,omitempty"` } -func (x *GetShardResponse) Reset() { - *x = GetShardResponse{} +func (x *GetCellInfoNamesResponse) Reset() { + *x = GetCellInfoNamesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[71] + mi := &file_vtctldata_proto_msgTypes[62] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetShardResponse) String() string { +func (x *GetCellInfoNamesResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetShardResponse) ProtoMessage() {} +func (*GetCellInfoNamesResponse) ProtoMessage() {} -func (x *GetShardResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[71] +func (x *GetCellInfoNamesResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[62] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4339,41 +4572,41 @@ func (x *GetShardResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetShardResponse.ProtoReflect.Descriptor instead. -func (*GetShardResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{71} +// Deprecated: Use GetCellInfoNamesResponse.ProtoReflect.Descriptor instead. +func (*GetCellInfoNamesResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{62} } -func (x *GetShardResponse) GetShard() *Shard { +func (x *GetCellInfoNamesResponse) GetNames() []string { if x != nil { - return x.Shard + return x.Names } return nil } -type GetShardRoutingRulesRequest struct { +type GetCellsAliasesRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } -func (x *GetShardRoutingRulesRequest) Reset() { - *x = GetShardRoutingRulesRequest{} +func (x *GetCellsAliasesRequest) Reset() { + *x = GetCellsAliasesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[72] + mi := &file_vtctldata_proto_msgTypes[63] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetShardRoutingRulesRequest) String() string { +func (x *GetCellsAliasesRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetShardRoutingRulesRequest) ProtoMessage() {} +func (*GetCellsAliasesRequest) ProtoMessage() {} -func (x *GetShardRoutingRulesRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[72] +func (x *GetCellsAliasesRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[63] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4384,36 +4617,36 @@ func (x *GetShardRoutingRulesRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetShardRoutingRulesRequest.ProtoReflect.Descriptor instead. -func (*GetShardRoutingRulesRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{72} +// Deprecated: Use GetCellsAliasesRequest.ProtoReflect.Descriptor instead. +func (*GetCellsAliasesRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{63} } -type GetShardRoutingRulesResponse struct { +type GetCellsAliasesResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ShardRoutingRules *vschema.ShardRoutingRules `protobuf:"bytes,1,opt,name=shard_routing_rules,json=shardRoutingRules,proto3" json:"shard_routing_rules,omitempty"` + Aliases map[string]*topodata.CellsAlias `protobuf:"bytes,1,rep,name=aliases,proto3" json:"aliases,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (x *GetShardRoutingRulesResponse) Reset() { - *x = GetShardRoutingRulesResponse{} +func (x *GetCellsAliasesResponse) Reset() { + *x = GetCellsAliasesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[73] + mi := &file_vtctldata_proto_msgTypes[64] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetShardRoutingRulesResponse) String() string { +func (x *GetCellsAliasesResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetShardRoutingRulesResponse) ProtoMessage() {} +func (*GetCellsAliasesResponse) ProtoMessage() {} -func (x *GetShardRoutingRulesResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[73] +func (x *GetCellsAliasesResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[64] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4424,43 +4657,43 @@ func (x *GetShardRoutingRulesResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetShardRoutingRulesResponse.ProtoReflect.Descriptor instead. -func (*GetShardRoutingRulesResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{73} +// Deprecated: Use GetCellsAliasesResponse.ProtoReflect.Descriptor instead. +func (*GetCellsAliasesResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{64} } -func (x *GetShardRoutingRulesResponse) GetShardRoutingRules() *vschema.ShardRoutingRules { +func (x *GetCellsAliasesResponse) GetAliases() map[string]*topodata.CellsAlias { if x != nil { - return x.ShardRoutingRules + return x.Aliases } return nil } -type GetSrvKeyspaceNamesRequest struct { +type GetFullStatusRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Cells []string `protobuf:"bytes,1,rep,name=cells,proto3" json:"cells,omitempty"` + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` } -func (x *GetSrvKeyspaceNamesRequest) Reset() { - *x = GetSrvKeyspaceNamesRequest{} +func (x *GetFullStatusRequest) Reset() { + *x = GetFullStatusRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[74] + mi := &file_vtctldata_proto_msgTypes[65] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetSrvKeyspaceNamesRequest) String() string { +func (x *GetFullStatusRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetSrvKeyspaceNamesRequest) ProtoMessage() {} +func (*GetFullStatusRequest) ProtoMessage() {} -func (x *GetSrvKeyspaceNamesRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[74] +func (x *GetFullStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[65] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4471,44 +4704,43 @@ func (x *GetSrvKeyspaceNamesRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetSrvKeyspaceNamesRequest.ProtoReflect.Descriptor instead. -func (*GetSrvKeyspaceNamesRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{74} +// Deprecated: Use GetFullStatusRequest.ProtoReflect.Descriptor instead. +func (*GetFullStatusRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{65} } -func (x *GetSrvKeyspaceNamesRequest) GetCells() []string { +func (x *GetFullStatusRequest) GetTabletAlias() *topodata.TabletAlias { if x != nil { - return x.Cells + return x.TabletAlias } return nil } -type GetSrvKeyspaceNamesResponse struct { +type GetFullStatusResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Names is a mapping of cell name to a list of SrvKeyspace names. - Names map[string]*GetSrvKeyspaceNamesResponse_NameList `protobuf:"bytes,1,rep,name=names,proto3" json:"names,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Status *replicationdata.FullStatus `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` } -func (x *GetSrvKeyspaceNamesResponse) Reset() { - *x = GetSrvKeyspaceNamesResponse{} +func (x *GetFullStatusResponse) Reset() { + *x = GetFullStatusResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[75] + mi := &file_vtctldata_proto_msgTypes[66] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetSrvKeyspaceNamesResponse) String() string { +func (x *GetFullStatusResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetSrvKeyspaceNamesResponse) ProtoMessage() {} +func (*GetFullStatusResponse) ProtoMessage() {} -func (x *GetSrvKeyspaceNamesResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[75] +func (x *GetFullStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[66] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4519,46 +4751,41 @@ func (x *GetSrvKeyspaceNamesResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetSrvKeyspaceNamesResponse.ProtoReflect.Descriptor instead. -func (*GetSrvKeyspaceNamesResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{75} +// Deprecated: Use GetFullStatusResponse.ProtoReflect.Descriptor instead. +func (*GetFullStatusResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{66} } -func (x *GetSrvKeyspaceNamesResponse) GetNames() map[string]*GetSrvKeyspaceNamesResponse_NameList { +func (x *GetFullStatusResponse) GetStatus() *replicationdata.FullStatus { if x != nil { - return x.Names + return x.Status } return nil } -type GetSrvKeyspacesRequest struct { +type GetKeyspacesRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - // Cells is a list of cells to lookup a SrvKeyspace for. Leaving this empty is - // equivalent to specifying all cells in the topo. - Cells []string `protobuf:"bytes,2,rep,name=cells,proto3" json:"cells,omitempty"` } -func (x *GetSrvKeyspacesRequest) Reset() { - *x = GetSrvKeyspacesRequest{} +func (x *GetKeyspacesRequest) Reset() { + *x = GetKeyspacesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[76] + mi := &file_vtctldata_proto_msgTypes[67] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetSrvKeyspacesRequest) String() string { +func (x *GetKeyspacesRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetSrvKeyspacesRequest) ProtoMessage() {} +func (*GetKeyspacesRequest) ProtoMessage() {} -func (x *GetSrvKeyspacesRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[76] +func (x *GetKeyspacesRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[67] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4569,51 +4796,36 @@ func (x *GetSrvKeyspacesRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetSrvKeyspacesRequest.ProtoReflect.Descriptor instead. -func (*GetSrvKeyspacesRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{76} -} - -func (x *GetSrvKeyspacesRequest) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" -} - -func (x *GetSrvKeyspacesRequest) GetCells() []string { - if x != nil { - return x.Cells - } - return nil +// Deprecated: Use GetKeyspacesRequest.ProtoReflect.Descriptor instead. +func (*GetKeyspacesRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{67} } -type GetSrvKeyspacesResponse struct { +type GetKeyspacesResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // SrvKeyspaces is a mapping of cell name to SrvKeyspace. - SrvKeyspaces map[string]*topodata.SrvKeyspace `protobuf:"bytes,1,rep,name=srv_keyspaces,json=srvKeyspaces,proto3" json:"srv_keyspaces,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Keyspaces []*Keyspace `protobuf:"bytes,1,rep,name=keyspaces,proto3" json:"keyspaces,omitempty"` } -func (x *GetSrvKeyspacesResponse) Reset() { - *x = GetSrvKeyspacesResponse{} +func (x *GetKeyspacesResponse) Reset() { + *x = GetKeyspacesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[77] + mi := &file_vtctldata_proto_msgTypes[68] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetSrvKeyspacesResponse) String() string { +func (x *GetKeyspacesResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetSrvKeyspacesResponse) ProtoMessage() {} +func (*GetKeyspacesResponse) ProtoMessage() {} -func (x *GetSrvKeyspacesResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[77] +func (x *GetKeyspacesResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[68] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4624,57 +4836,43 @@ func (x *GetSrvKeyspacesResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetSrvKeyspacesResponse.ProtoReflect.Descriptor instead. -func (*GetSrvKeyspacesResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{77} +// Deprecated: Use GetKeyspacesResponse.ProtoReflect.Descriptor instead. +func (*GetKeyspacesResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{68} } -func (x *GetSrvKeyspacesResponse) GetSrvKeyspaces() map[string]*topodata.SrvKeyspace { +func (x *GetKeyspacesResponse) GetKeyspaces() []*Keyspace { if x != nil { - return x.SrvKeyspaces + return x.Keyspaces } return nil } -type UpdateThrottlerConfigRequest struct { +type GetKeyspaceRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - // Enable instructs to enable the throttler - Enable bool `protobuf:"varint,2,opt,name=enable,proto3" json:"enable,omitempty"` - // Disable instructs to disable the throttler - Disable bool `protobuf:"varint,3,opt,name=disable,proto3" json:"disable,omitempty"` - // Threshold for throttler (with no custom query, ie using default query, only positive values are considered) - Threshold float64 `protobuf:"fixed64,4,opt,name=threshold,proto3" json:"threshold,omitempty"` - // CustomQuery replaces the default replication lag query - CustomQuery string `protobuf:"bytes,5,opt,name=custom_query,json=customQuery,proto3" json:"custom_query,omitempty"` - // CustomQuerySet indicates that the value of CustomQuery has changed - CustomQuerySet bool `protobuf:"varint,6,opt,name=custom_query_set,json=customQuerySet,proto3" json:"custom_query_set,omitempty"` - // CheckAsCheckSelf instructs the throttler to respond to /check requests by checking the tablet's own health - CheckAsCheckSelf bool `protobuf:"varint,7,opt,name=check_as_check_self,json=checkAsCheckSelf,proto3" json:"check_as_check_self,omitempty"` - // CheckAsCheckShard instructs the throttler to respond to /check requests by checking the shard's health (this is the default behavior) - CheckAsCheckShard bool `protobuf:"varint,8,opt,name=check_as_check_shard,json=checkAsCheckShard,proto3" json:"check_as_check_shard,omitempty"` } -func (x *UpdateThrottlerConfigRequest) Reset() { - *x = UpdateThrottlerConfigRequest{} +func (x *GetKeyspaceRequest) Reset() { + *x = GetKeyspaceRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[78] + mi := &file_vtctldata_proto_msgTypes[69] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *UpdateThrottlerConfigRequest) String() string { +func (x *GetKeyspaceRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*UpdateThrottlerConfigRequest) ProtoMessage() {} +func (*GetKeyspaceRequest) ProtoMessage() {} -func (x *UpdateThrottlerConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[78] +func (x *GetKeyspaceRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[69] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4685,90 +4883,90 @@ func (x *UpdateThrottlerConfigRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use UpdateThrottlerConfigRequest.ProtoReflect.Descriptor instead. -func (*UpdateThrottlerConfigRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{78} +// Deprecated: Use GetKeyspaceRequest.ProtoReflect.Descriptor instead. +func (*GetKeyspaceRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{69} } -func (x *UpdateThrottlerConfigRequest) GetKeyspace() string { +func (x *GetKeyspaceRequest) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -func (x *UpdateThrottlerConfigRequest) GetEnable() bool { - if x != nil { - return x.Enable - } - return false -} +type GetKeyspaceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (x *UpdateThrottlerConfigRequest) GetDisable() bool { - if x != nil { - return x.Disable - } - return false + Keyspace *Keyspace `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` } -func (x *UpdateThrottlerConfigRequest) GetThreshold() float64 { - if x != nil { - return x.Threshold +func (x *GetKeyspaceResponse) Reset() { + *x = GetKeyspaceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[70] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -func (x *UpdateThrottlerConfigRequest) GetCustomQuery() string { - if x != nil { - return x.CustomQuery - } - return "" +func (x *GetKeyspaceResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (x *UpdateThrottlerConfigRequest) GetCustomQuerySet() bool { - if x != nil { - return x.CustomQuerySet - } - return false -} +func (*GetKeyspaceResponse) ProtoMessage() {} -func (x *UpdateThrottlerConfigRequest) GetCheckAsCheckSelf() bool { - if x != nil { - return x.CheckAsCheckSelf +func (x *GetKeyspaceResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[70] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return false + return mi.MessageOf(x) } -func (x *UpdateThrottlerConfigRequest) GetCheckAsCheckShard() bool { +// Deprecated: Use GetKeyspaceResponse.ProtoReflect.Descriptor instead. +func (*GetKeyspaceResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{70} +} + +func (x *GetKeyspaceResponse) GetKeyspace() *Keyspace { if x != nil { - return x.CheckAsCheckShard + return x.Keyspace } - return false + return nil } -type UpdateThrottlerConfigResponse struct { +type GetPermissionsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` } -func (x *UpdateThrottlerConfigResponse) Reset() { - *x = UpdateThrottlerConfigResponse{} +func (x *GetPermissionsRequest) Reset() { + *x = GetPermissionsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[79] + mi := &file_vtctldata_proto_msgTypes[71] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *UpdateThrottlerConfigResponse) String() string { +func (x *GetPermissionsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*UpdateThrottlerConfigResponse) ProtoMessage() {} +func (*GetPermissionsRequest) ProtoMessage() {} -func (x *UpdateThrottlerConfigResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[79] +func (x *GetPermissionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[71] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4779,36 +4977,43 @@ func (x *UpdateThrottlerConfigResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use UpdateThrottlerConfigResponse.ProtoReflect.Descriptor instead. -func (*UpdateThrottlerConfigResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{79} +// Deprecated: Use GetPermissionsRequest.ProtoReflect.Descriptor instead. +func (*GetPermissionsRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{71} } -type GetSrvVSchemaRequest struct { +func (x *GetPermissionsRequest) GetTabletAlias() *topodata.TabletAlias { + if x != nil { + return x.TabletAlias + } + return nil +} + +type GetPermissionsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Cell string `protobuf:"bytes,1,opt,name=cell,proto3" json:"cell,omitempty"` + Permissions *tabletmanagerdata.Permissions `protobuf:"bytes,1,opt,name=permissions,proto3" json:"permissions,omitempty"` } -func (x *GetSrvVSchemaRequest) Reset() { - *x = GetSrvVSchemaRequest{} +func (x *GetPermissionsResponse) Reset() { + *x = GetPermissionsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[80] + mi := &file_vtctldata_proto_msgTypes[72] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetSrvVSchemaRequest) String() string { +func (x *GetPermissionsResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetSrvVSchemaRequest) ProtoMessage() {} +func (*GetPermissionsResponse) ProtoMessage() {} -func (x *GetSrvVSchemaRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[80] +func (x *GetPermissionsResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[72] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4819,43 +5024,41 @@ func (x *GetSrvVSchemaRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetSrvVSchemaRequest.ProtoReflect.Descriptor instead. -func (*GetSrvVSchemaRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{80} +// Deprecated: Use GetPermissionsResponse.ProtoReflect.Descriptor instead. +func (*GetPermissionsResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{72} } -func (x *GetSrvVSchemaRequest) GetCell() string { +func (x *GetPermissionsResponse) GetPermissions() *tabletmanagerdata.Permissions { if x != nil { - return x.Cell + return x.Permissions } - return "" + return nil } -type GetSrvVSchemaResponse struct { +type GetRoutingRulesRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - SrvVSchema *vschema.SrvVSchema `protobuf:"bytes,1,opt,name=srv_v_schema,json=srvVSchema,proto3" json:"srv_v_schema,omitempty"` } -func (x *GetSrvVSchemaResponse) Reset() { - *x = GetSrvVSchemaResponse{} +func (x *GetRoutingRulesRequest) Reset() { + *x = GetRoutingRulesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[81] + mi := &file_vtctldata_proto_msgTypes[73] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetSrvVSchemaResponse) String() string { +func (x *GetRoutingRulesRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetSrvVSchemaResponse) ProtoMessage() {} +func (*GetRoutingRulesRequest) ProtoMessage() {} -func (x *GetSrvVSchemaResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[81] +func (x *GetRoutingRulesRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[73] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4866,43 +5069,36 @@ func (x *GetSrvVSchemaResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetSrvVSchemaResponse.ProtoReflect.Descriptor instead. -func (*GetSrvVSchemaResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{81} -} - -func (x *GetSrvVSchemaResponse) GetSrvVSchema() *vschema.SrvVSchema { - if x != nil { - return x.SrvVSchema - } - return nil +// Deprecated: Use GetRoutingRulesRequest.ProtoReflect.Descriptor instead. +func (*GetRoutingRulesRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{73} } -type GetSrvVSchemasRequest struct { +type GetRoutingRulesResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Cells []string `protobuf:"bytes,2,rep,name=cells,proto3" json:"cells,omitempty"` + RoutingRules *vschema.RoutingRules `protobuf:"bytes,1,opt,name=routing_rules,json=routingRules,proto3" json:"routing_rules,omitempty"` } -func (x *GetSrvVSchemasRequest) Reset() { - *x = GetSrvVSchemasRequest{} +func (x *GetRoutingRulesResponse) Reset() { + *x = GetRoutingRulesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[82] + mi := &file_vtctldata_proto_msgTypes[74] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetSrvVSchemasRequest) String() string { +func (x *GetRoutingRulesResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetSrvVSchemasRequest) ProtoMessage() {} +func (*GetRoutingRulesResponse) ProtoMessage() {} -func (x *GetSrvVSchemasRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[82] +func (x *GetRoutingRulesResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[74] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4913,44 +5109,61 @@ func (x *GetSrvVSchemasRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetSrvVSchemasRequest.ProtoReflect.Descriptor instead. -func (*GetSrvVSchemasRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{82} +// Deprecated: Use GetRoutingRulesResponse.ProtoReflect.Descriptor instead. +func (*GetRoutingRulesResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{74} } -func (x *GetSrvVSchemasRequest) GetCells() []string { +func (x *GetRoutingRulesResponse) GetRoutingRules() *vschema.RoutingRules { if x != nil { - return x.Cells + return x.RoutingRules } return nil } -type GetSrvVSchemasResponse struct { +type GetSchemaRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // SrvVSchemas is a mapping of cell name to SrvVSchema - SrvVSchemas map[string]*vschema.SrvVSchema `protobuf:"bytes,1,rep,name=srv_v_schemas,json=srvVSchemas,proto3" json:"srv_v_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + // Tables is a list of tables for which we should gather information. Each is + // either an exact match, or a regular expression of the form /regexp/. + Tables []string `protobuf:"bytes,2,rep,name=tables,proto3" json:"tables,omitempty"` + // ExcludeTables is a list of tables to exclude from the result. Each is + // either an exact match, or a regular expression of the form /regexp/. + ExcludeTables []string `protobuf:"bytes,3,rep,name=exclude_tables,json=excludeTables,proto3" json:"exclude_tables,omitempty"` + // IncludeViews specifies whether to include views in the result. + IncludeViews bool `protobuf:"varint,4,opt,name=include_views,json=includeViews,proto3" json:"include_views,omitempty"` + // TableNamesOnly specifies whether to limit the results to just table names, + // rather than full schema information for each table. + TableNamesOnly bool `protobuf:"varint,5,opt,name=table_names_only,json=tableNamesOnly,proto3" json:"table_names_only,omitempty"` + // TableSizesOnly specifies whether to limit the results to just table sizes, + // rather than full schema information for each table. It is ignored if + // TableNamesOnly is set to true. + TableSizesOnly bool `protobuf:"varint,6,opt,name=table_sizes_only,json=tableSizesOnly,proto3" json:"table_sizes_only,omitempty"` + // TableSchemaOnly specifies whether to limit the results to just table/view + // schema definition (CREATE TABLE/VIEW statements) and skip column/field information + TableSchemaOnly bool `protobuf:"varint,7,opt,name=table_schema_only,json=tableSchemaOnly,proto3" json:"table_schema_only,omitempty"` } -func (x *GetSrvVSchemasResponse) Reset() { - *x = GetSrvVSchemasResponse{} +func (x *GetSchemaRequest) Reset() { + *x = GetSchemaRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[83] + mi := &file_vtctldata_proto_msgTypes[75] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetSrvVSchemasResponse) String() string { +func (x *GetSchemaRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetSrvVSchemasResponse) ProtoMessage() {} +func (*GetSchemaRequest) ProtoMessage() {} -func (x *GetSrvVSchemasResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[83] +func (x *GetSchemaRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[75] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4961,43 +5174,85 @@ func (x *GetSrvVSchemasResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetSrvVSchemasResponse.ProtoReflect.Descriptor instead. -func (*GetSrvVSchemasResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{83} +// Deprecated: Use GetSchemaRequest.ProtoReflect.Descriptor instead. +func (*GetSchemaRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{75} } -func (x *GetSrvVSchemasResponse) GetSrvVSchemas() map[string]*vschema.SrvVSchema { +func (x *GetSchemaRequest) GetTabletAlias() *topodata.TabletAlias { if x != nil { - return x.SrvVSchemas + return x.TabletAlias } return nil } -type GetTabletRequest struct { +func (x *GetSchemaRequest) GetTables() []string { + if x != nil { + return x.Tables + } + return nil +} + +func (x *GetSchemaRequest) GetExcludeTables() []string { + if x != nil { + return x.ExcludeTables + } + return nil +} + +func (x *GetSchemaRequest) GetIncludeViews() bool { + if x != nil { + return x.IncludeViews + } + return false +} + +func (x *GetSchemaRequest) GetTableNamesOnly() bool { + if x != nil { + return x.TableNamesOnly + } + return false +} + +func (x *GetSchemaRequest) GetTableSizesOnly() bool { + if x != nil { + return x.TableSizesOnly + } + return false +} + +func (x *GetSchemaRequest) GetTableSchemaOnly() bool { + if x != nil { + return x.TableSchemaOnly + } + return false +} + +type GetSchemaResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + Schema *tabletmanagerdata.SchemaDefinition `protobuf:"bytes,1,opt,name=schema,proto3" json:"schema,omitempty"` } -func (x *GetTabletRequest) Reset() { - *x = GetTabletRequest{} +func (x *GetSchemaResponse) Reset() { + *x = GetSchemaResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[84] + mi := &file_vtctldata_proto_msgTypes[76] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetTabletRequest) String() string { +func (x *GetSchemaResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetTabletRequest) ProtoMessage() {} +func (*GetSchemaResponse) ProtoMessage() {} -func (x *GetTabletRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[84] +func (x *GetSchemaResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[76] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5008,43 +5263,68 @@ func (x *GetTabletRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetTabletRequest.ProtoReflect.Descriptor instead. -func (*GetTabletRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{84} +// Deprecated: Use GetSchemaResponse.ProtoReflect.Descriptor instead. +func (*GetSchemaResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{76} } -func (x *GetTabletRequest) GetTabletAlias() *topodata.TabletAlias { +func (x *GetSchemaResponse) GetSchema() *tabletmanagerdata.SchemaDefinition { if x != nil { - return x.TabletAlias + return x.Schema } return nil } -type GetTabletResponse struct { +// GetSchemaMigrationsRequest controls the behavior of the GetSchemaMigrations +// rpc. +// +// Keyspace is a required field, while all other fields are optional. +// +// If UUID is set, other optional fields will be ignored, since there will be at +// most one migration with that UUID. Furthermore, if no migration with that +// UUID exists, an empty response, not an error, is returned. +// +// MigrationContext, Status, and Recent are mutually exclusive. +type GetSchemaMigrationsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Tablet *topodata.Tablet `protobuf:"bytes,1,opt,name=tablet,proto3" json:"tablet,omitempty"` -} - -func (x *GetTabletResponse) Reset() { - *x = GetTabletResponse{} + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // Uuid, if set, will cause GetSchemaMigrations to return exactly 1 migration, + // namely the one with that UUID. If no migration exists, the response will + // be an empty slice, not an error. + // + // If this field is set, other fields (status filters, limit, skip, order) are + // ignored. + Uuid string `protobuf:"bytes,2,opt,name=uuid,proto3" json:"uuid,omitempty"` + MigrationContext string `protobuf:"bytes,3,opt,name=migration_context,json=migrationContext,proto3" json:"migration_context,omitempty"` + Status SchemaMigration_Status `protobuf:"varint,4,opt,name=status,proto3,enum=vtctldata.SchemaMigration_Status" json:"status,omitempty"` + // Recent, if set, returns migrations requested between now and the provided + // value. + Recent *vttime.Duration `protobuf:"bytes,5,opt,name=recent,proto3" json:"recent,omitempty"` + Order QueryOrdering `protobuf:"varint,6,opt,name=order,proto3,enum=vtctldata.QueryOrdering" json:"order,omitempty"` + Limit uint64 `protobuf:"varint,7,opt,name=limit,proto3" json:"limit,omitempty"` + Skip uint64 `protobuf:"varint,8,opt,name=skip,proto3" json:"skip,omitempty"` +} + +func (x *GetSchemaMigrationsRequest) Reset() { + *x = GetSchemaMigrationsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[85] + mi := &file_vtctldata_proto_msgTypes[77] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetTabletResponse) String() string { +func (x *GetSchemaMigrationsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetTabletResponse) ProtoMessage() {} +func (*GetSchemaMigrationsRequest) ProtoMessage() {} -func (x *GetTabletResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[85] +func (x *GetSchemaMigrationsRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[77] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5055,146 +5335,92 @@ func (x *GetTabletResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetTabletResponse.ProtoReflect.Descriptor instead. -func (*GetTabletResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{85} +// Deprecated: Use GetSchemaMigrationsRequest.ProtoReflect.Descriptor instead. +func (*GetSchemaMigrationsRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{77} } -func (x *GetTabletResponse) GetTablet() *topodata.Tablet { +func (x *GetSchemaMigrationsRequest) GetKeyspace() string { if x != nil { - return x.Tablet - } - return nil -} - -type GetTabletsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Keyspace is the name of the keyspace to return tablets for. Omit to return - // tablets from all keyspaces. - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - // Shard is the name of the shard to return tablets for. This field is ignored - // if Keyspace is not set. - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - // Cells is an optional set of cells to return tablets for. - Cells []string `protobuf:"bytes,3,rep,name=cells,proto3" json:"cells,omitempty"` - // Strict specifies how the server should treat failures from individual - // cells. - // - // When false (the default), GetTablets will return data from any cells that - // return successfully, but will fail the request if all cells fail. When - // true, any individual cell can fail the full request. - Strict bool `protobuf:"varint,4,opt,name=strict,proto3" json:"strict,omitempty"` - // TabletAliases is an optional list of tablet aliases to fetch Tablet objects - // for. If specified, Keyspace, Shard, and Cells are ignored, and tablets are - // looked up by their respective aliases' Cells directly. - TabletAliases []*topodata.TabletAlias `protobuf:"bytes,5,rep,name=tablet_aliases,json=tabletAliases,proto3" json:"tablet_aliases,omitempty"` - // tablet_type specifies the type of tablets to return. Omit to return all - // tablet types. - TabletType topodata.TabletType `protobuf:"varint,6,opt,name=tablet_type,json=tabletType,proto3,enum=topodata.TabletType" json:"tablet_type,omitempty"` -} - -func (x *GetTabletsRequest) Reset() { - *x = GetTabletsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[86] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + return x.Keyspace } + return "" } -func (x *GetTabletsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetTabletsRequest) ProtoMessage() {} - -func (x *GetTabletsRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[86] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *GetSchemaMigrationsRequest) GetUuid() string { + if x != nil { + return x.Uuid } - return mi.MessageOf(x) -} - -// Deprecated: Use GetTabletsRequest.ProtoReflect.Descriptor instead. -func (*GetTabletsRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{86} + return "" } -func (x *GetTabletsRequest) GetKeyspace() string { +func (x *GetSchemaMigrationsRequest) GetMigrationContext() string { if x != nil { - return x.Keyspace + return x.MigrationContext } return "" } -func (x *GetTabletsRequest) GetShard() string { +func (x *GetSchemaMigrationsRequest) GetStatus() SchemaMigration_Status { if x != nil { - return x.Shard + return x.Status } - return "" + return SchemaMigration_UNKNOWN } -func (x *GetTabletsRequest) GetCells() []string { +func (x *GetSchemaMigrationsRequest) GetRecent() *vttime.Duration { if x != nil { - return x.Cells + return x.Recent } return nil } -func (x *GetTabletsRequest) GetStrict() bool { +func (x *GetSchemaMigrationsRequest) GetOrder() QueryOrdering { if x != nil { - return x.Strict + return x.Order } - return false + return QueryOrdering_NONE } -func (x *GetTabletsRequest) GetTabletAliases() []*topodata.TabletAlias { +func (x *GetSchemaMigrationsRequest) GetLimit() uint64 { if x != nil { - return x.TabletAliases + return x.Limit } - return nil + return 0 } -func (x *GetTabletsRequest) GetTabletType() topodata.TabletType { +func (x *GetSchemaMigrationsRequest) GetSkip() uint64 { if x != nil { - return x.TabletType + return x.Skip } - return topodata.TabletType(0) + return 0 } -type GetTabletsResponse struct { +type GetSchemaMigrationsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Tablets []*topodata.Tablet `protobuf:"bytes,1,rep,name=tablets,proto3" json:"tablets,omitempty"` + Migrations []*SchemaMigration `protobuf:"bytes,1,rep,name=migrations,proto3" json:"migrations,omitempty"` } -func (x *GetTabletsResponse) Reset() { - *x = GetTabletsResponse{} +func (x *GetSchemaMigrationsResponse) Reset() { + *x = GetSchemaMigrationsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[87] + mi := &file_vtctldata_proto_msgTypes[78] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetTabletsResponse) String() string { +func (x *GetSchemaMigrationsResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetTabletsResponse) ProtoMessage() {} +func (*GetSchemaMigrationsResponse) ProtoMessage() {} -func (x *GetTabletsResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[87] +func (x *GetSchemaMigrationsResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[78] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5205,43 +5431,44 @@ func (x *GetTabletsResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetTabletsResponse.ProtoReflect.Descriptor instead. -func (*GetTabletsResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{87} +// Deprecated: Use GetSchemaMigrationsResponse.ProtoReflect.Descriptor instead. +func (*GetSchemaMigrationsResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{78} } -func (x *GetTabletsResponse) GetTablets() []*topodata.Tablet { +func (x *GetSchemaMigrationsResponse) GetMigrations() []*SchemaMigration { if x != nil { - return x.Tablets + return x.Migrations } return nil } -type GetTopologyPathRequest struct { +type GetShardRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + ShardName string `protobuf:"bytes,2,opt,name=shard_name,json=shardName,proto3" json:"shard_name,omitempty"` } -func (x *GetTopologyPathRequest) Reset() { - *x = GetTopologyPathRequest{} +func (x *GetShardRequest) Reset() { + *x = GetShardRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[88] + mi := &file_vtctldata_proto_msgTypes[79] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetTopologyPathRequest) String() string { +func (x *GetShardRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetTopologyPathRequest) ProtoMessage() {} +func (*GetShardRequest) ProtoMessage() {} -func (x *GetTopologyPathRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[88] +func (x *GetShardRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[79] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5252,43 +5479,50 @@ func (x *GetTopologyPathRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetTopologyPathRequest.ProtoReflect.Descriptor instead. -func (*GetTopologyPathRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{88} +// Deprecated: Use GetShardRequest.ProtoReflect.Descriptor instead. +func (*GetShardRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{79} } -func (x *GetTopologyPathRequest) GetPath() string { +func (x *GetShardRequest) GetKeyspace() string { if x != nil { - return x.Path + return x.Keyspace } return "" } -type GetTopologyPathResponse struct { +func (x *GetShardRequest) GetShardName() string { + if x != nil { + return x.ShardName + } + return "" +} + +type GetShardResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Cell *TopologyCell `protobuf:"bytes,1,opt,name=cell,proto3" json:"cell,omitempty"` + Shard *Shard `protobuf:"bytes,1,opt,name=shard,proto3" json:"shard,omitempty"` } -func (x *GetTopologyPathResponse) Reset() { - *x = GetTopologyPathResponse{} +func (x *GetShardResponse) Reset() { + *x = GetShardResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[89] + mi := &file_vtctldata_proto_msgTypes[80] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetTopologyPathResponse) String() string { +func (x *GetShardResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetTopologyPathResponse) ProtoMessage() {} +func (*GetShardResponse) ProtoMessage() {} -func (x *GetTopologyPathResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[89] +func (x *GetShardResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[80] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5299,48 +5533,41 @@ func (x *GetTopologyPathResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetTopologyPathResponse.ProtoReflect.Descriptor instead. -func (*GetTopologyPathResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{89} +// Deprecated: Use GetShardResponse.ProtoReflect.Descriptor instead. +func (*GetShardResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{80} } -func (x *GetTopologyPathResponse) GetCell() *TopologyCell { +func (x *GetShardResponse) GetShard() *Shard { if x != nil { - return x.Cell + return x.Shard } return nil } -type TopologyCell struct { +type GetShardRoutingRulesRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` - // Data is the file contents of the cell located at path. - // It is only populated if the cell is a terminal node. - Data string `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` - Children []string `protobuf:"bytes,4,rep,name=children,proto3" json:"children,omitempty"` } -func (x *TopologyCell) Reset() { - *x = TopologyCell{} +func (x *GetShardRoutingRulesRequest) Reset() { + *x = GetShardRoutingRulesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[90] + mi := &file_vtctldata_proto_msgTypes[81] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *TopologyCell) String() string { +func (x *GetShardRoutingRulesRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*TopologyCell) ProtoMessage() {} +func (*GetShardRoutingRulesRequest) ProtoMessage() {} -func (x *TopologyCell) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[90] +func (x *GetShardRoutingRulesRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[81] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5351,64 +5578,83 @@ func (x *TopologyCell) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use TopologyCell.ProtoReflect.Descriptor instead. -func (*TopologyCell) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{90} +// Deprecated: Use GetShardRoutingRulesRequest.ProtoReflect.Descriptor instead. +func (*GetShardRoutingRulesRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{81} } -func (x *TopologyCell) GetName() string { - if x != nil { - return x.Name - } - return "" -} +type GetShardRoutingRulesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (x *TopologyCell) GetPath() string { - if x != nil { - return x.Path - } - return "" + ShardRoutingRules *vschema.ShardRoutingRules `protobuf:"bytes,1,opt,name=shard_routing_rules,json=shardRoutingRules,proto3" json:"shard_routing_rules,omitempty"` } -func (x *TopologyCell) GetData() string { - if x != nil { - return x.Data +func (x *GetShardRoutingRulesResponse) Reset() { + *x = GetShardRoutingRulesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[82] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return "" } -func (x *TopologyCell) GetChildren() []string { - if x != nil { - return x.Children - } - return nil +func (x *GetShardRoutingRulesResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -type GetVSchemaRequest struct { +func (*GetShardRoutingRulesResponse) ProtoMessage() {} + +func (x *GetShardRoutingRulesResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[82] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetShardRoutingRulesResponse.ProtoReflect.Descriptor instead. +func (*GetShardRoutingRulesResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{82} +} + +func (x *GetShardRoutingRulesResponse) GetShardRoutingRules() *vschema.ShardRoutingRules { + if x != nil { + return x.ShardRoutingRules + } + return nil +} + +type GetSrvKeyspaceNamesRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Cells []string `protobuf:"bytes,1,rep,name=cells,proto3" json:"cells,omitempty"` } -func (x *GetVSchemaRequest) Reset() { - *x = GetVSchemaRequest{} +func (x *GetSrvKeyspaceNamesRequest) Reset() { + *x = GetSrvKeyspaceNamesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[91] + mi := &file_vtctldata_proto_msgTypes[83] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetVSchemaRequest) String() string { +func (x *GetSrvKeyspaceNamesRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetVSchemaRequest) ProtoMessage() {} +func (*GetSrvKeyspaceNamesRequest) ProtoMessage() {} -func (x *GetVSchemaRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[91] +func (x *GetSrvKeyspaceNamesRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[83] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5419,43 +5665,44 @@ func (x *GetVSchemaRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetVSchemaRequest.ProtoReflect.Descriptor instead. -func (*GetVSchemaRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{91} +// Deprecated: Use GetSrvKeyspaceNamesRequest.ProtoReflect.Descriptor instead. +func (*GetSrvKeyspaceNamesRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{83} } -func (x *GetVSchemaRequest) GetKeyspace() string { +func (x *GetSrvKeyspaceNamesRequest) GetCells() []string { if x != nil { - return x.Keyspace + return x.Cells } - return "" + return nil } -type GetVersionRequest struct { +type GetSrvKeyspaceNamesResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + // Names is a mapping of cell name to a list of SrvKeyspace names. + Names map[string]*GetSrvKeyspaceNamesResponse_NameList `protobuf:"bytes,1,rep,name=names,proto3" json:"names,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (x *GetVersionRequest) Reset() { - *x = GetVersionRequest{} +func (x *GetSrvKeyspaceNamesResponse) Reset() { + *x = GetSrvKeyspaceNamesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[92] + mi := &file_vtctldata_proto_msgTypes[84] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetVersionRequest) String() string { +func (x *GetSrvKeyspaceNamesResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetVersionRequest) ProtoMessage() {} +func (*GetSrvKeyspaceNamesResponse) ProtoMessage() {} -func (x *GetVersionRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[92] +func (x *GetSrvKeyspaceNamesResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[84] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5466,43 +5713,46 @@ func (x *GetVersionRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetVersionRequest.ProtoReflect.Descriptor instead. -func (*GetVersionRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{92} +// Deprecated: Use GetSrvKeyspaceNamesResponse.ProtoReflect.Descriptor instead. +func (*GetSrvKeyspaceNamesResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{84} } -func (x *GetVersionRequest) GetTabletAlias() *topodata.TabletAlias { +func (x *GetSrvKeyspaceNamesResponse) GetNames() map[string]*GetSrvKeyspaceNamesResponse_NameList { if x != nil { - return x.TabletAlias + return x.Names } return nil } -type GetVersionResponse struct { +type GetSrvKeyspacesRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // Cells is a list of cells to lookup a SrvKeyspace for. Leaving this empty is + // equivalent to specifying all cells in the topo. + Cells []string `protobuf:"bytes,2,rep,name=cells,proto3" json:"cells,omitempty"` } -func (x *GetVersionResponse) Reset() { - *x = GetVersionResponse{} +func (x *GetSrvKeyspacesRequest) Reset() { + *x = GetSrvKeyspacesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[93] + mi := &file_vtctldata_proto_msgTypes[85] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetVersionResponse) String() string { +func (x *GetSrvKeyspacesRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetVersionResponse) ProtoMessage() {} +func (*GetSrvKeyspacesRequest) ProtoMessage() {} -func (x *GetVersionResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[93] +func (x *GetSrvKeyspacesRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[85] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5513,43 +5763,51 @@ func (x *GetVersionResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetVersionResponse.ProtoReflect.Descriptor instead. -func (*GetVersionResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{93} +// Deprecated: Use GetSrvKeyspacesRequest.ProtoReflect.Descriptor instead. +func (*GetSrvKeyspacesRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{85} } -func (x *GetVersionResponse) GetVersion() string { +func (x *GetSrvKeyspacesRequest) GetKeyspace() string { if x != nil { - return x.Version + return x.Keyspace } return "" } -type GetVSchemaResponse struct { +func (x *GetSrvKeyspacesRequest) GetCells() []string { + if x != nil { + return x.Cells + } + return nil +} + +type GetSrvKeyspacesResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - VSchema *vschema.Keyspace `protobuf:"bytes,1,opt,name=v_schema,json=vSchema,proto3" json:"v_schema,omitempty"` + // SrvKeyspaces is a mapping of cell name to SrvKeyspace. + SrvKeyspaces map[string]*topodata.SrvKeyspace `protobuf:"bytes,1,rep,name=srv_keyspaces,json=srvKeyspaces,proto3" json:"srv_keyspaces,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (x *GetVSchemaResponse) Reset() { - *x = GetVSchemaResponse{} +func (x *GetSrvKeyspacesResponse) Reset() { + *x = GetSrvKeyspacesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[94] + mi := &file_vtctldata_proto_msgTypes[86] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetVSchemaResponse) String() string { +func (x *GetSrvKeyspacesResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetVSchemaResponse) ProtoMessage() {} +func (*GetSrvKeyspacesResponse) ProtoMessage() {} -func (x *GetVSchemaResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[94] +func (x *GetSrvKeyspacesResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[86] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5560,45 +5818,59 @@ func (x *GetVSchemaResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetVSchemaResponse.ProtoReflect.Descriptor instead. -func (*GetVSchemaResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{94} +// Deprecated: Use GetSrvKeyspacesResponse.ProtoReflect.Descriptor instead. +func (*GetSrvKeyspacesResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{86} } -func (x *GetVSchemaResponse) GetVSchema() *vschema.Keyspace { +func (x *GetSrvKeyspacesResponse) GetSrvKeyspaces() map[string]*topodata.SrvKeyspace { if x != nil { - return x.VSchema + return x.SrvKeyspaces } return nil } -type GetWorkflowsRequest struct { +type UpdateThrottlerConfigRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - ActiveOnly bool `protobuf:"varint,2,opt,name=active_only,json=activeOnly,proto3" json:"active_only,omitempty"` - NameOnly bool `protobuf:"varint,3,opt,name=name_only,json=nameOnly,proto3" json:"name_only,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // Enable instructs to enable the throttler + Enable bool `protobuf:"varint,2,opt,name=enable,proto3" json:"enable,omitempty"` + // Disable instructs to disable the throttler + Disable bool `protobuf:"varint,3,opt,name=disable,proto3" json:"disable,omitempty"` + // Threshold for throttler (with no custom query, ie using default query, only positive values are considered) + Threshold float64 `protobuf:"fixed64,4,opt,name=threshold,proto3" json:"threshold,omitempty"` + // CustomQuery replaces the default replication lag query + CustomQuery string `protobuf:"bytes,5,opt,name=custom_query,json=customQuery,proto3" json:"custom_query,omitempty"` + // CustomQuerySet indicates that the value of CustomQuery has changed + CustomQuerySet bool `protobuf:"varint,6,opt,name=custom_query_set,json=customQuerySet,proto3" json:"custom_query_set,omitempty"` + // CheckAsCheckSelf instructs the throttler to respond to /check requests by checking the tablet's own health + CheckAsCheckSelf bool `protobuf:"varint,7,opt,name=check_as_check_self,json=checkAsCheckSelf,proto3" json:"check_as_check_self,omitempty"` + // CheckAsCheckShard instructs the throttler to respond to /check requests by checking the shard's health (this is the default behavior) + CheckAsCheckShard bool `protobuf:"varint,8,opt,name=check_as_check_shard,json=checkAsCheckShard,proto3" json:"check_as_check_shard,omitempty"` + // ThrottledApp indicates a single throttled app rule (ignored if name is empty) + ThrottledApp *topodata.ThrottledAppRule `protobuf:"bytes,9,opt,name=throttled_app,json=throttledApp,proto3" json:"throttled_app,omitempty"` } -func (x *GetWorkflowsRequest) Reset() { - *x = GetWorkflowsRequest{} +func (x *UpdateThrottlerConfigRequest) Reset() { + *x = UpdateThrottlerConfigRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[95] + mi := &file_vtctldata_proto_msgTypes[87] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetWorkflowsRequest) String() string { +func (x *UpdateThrottlerConfigRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetWorkflowsRequest) ProtoMessage() {} +func (*UpdateThrottlerConfigRequest) ProtoMessage() {} -func (x *GetWorkflowsRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[95] +func (x *UpdateThrottlerConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[87] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5609,108 +5881,97 @@ func (x *GetWorkflowsRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetWorkflowsRequest.ProtoReflect.Descriptor instead. -func (*GetWorkflowsRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{95} +// Deprecated: Use UpdateThrottlerConfigRequest.ProtoReflect.Descriptor instead. +func (*UpdateThrottlerConfigRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{87} } -func (x *GetWorkflowsRequest) GetKeyspace() string { +func (x *UpdateThrottlerConfigRequest) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -func (x *GetWorkflowsRequest) GetActiveOnly() bool { +func (x *UpdateThrottlerConfigRequest) GetEnable() bool { if x != nil { - return x.ActiveOnly + return x.Enable } return false } -func (x *GetWorkflowsRequest) GetNameOnly() bool { +func (x *UpdateThrottlerConfigRequest) GetDisable() bool { if x != nil { - return x.NameOnly + return x.Disable } return false } -type GetWorkflowsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Workflows []*Workflow `protobuf:"bytes,1,rep,name=workflows,proto3" json:"workflows,omitempty"` +func (x *UpdateThrottlerConfigRequest) GetThreshold() float64 { + if x != nil { + return x.Threshold + } + return 0 } -func (x *GetWorkflowsResponse) Reset() { - *x = GetWorkflowsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[96] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *UpdateThrottlerConfigRequest) GetCustomQuery() string { + if x != nil { + return x.CustomQuery } + return "" } -func (x *GetWorkflowsResponse) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *UpdateThrottlerConfigRequest) GetCustomQuerySet() bool { + if x != nil { + return x.CustomQuerySet + } + return false } -func (*GetWorkflowsResponse) ProtoMessage() {} - -func (x *GetWorkflowsResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[96] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *UpdateThrottlerConfigRequest) GetCheckAsCheckSelf() bool { + if x != nil { + return x.CheckAsCheckSelf } - return mi.MessageOf(x) + return false } -// Deprecated: Use GetWorkflowsResponse.ProtoReflect.Descriptor instead. -func (*GetWorkflowsResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{96} +func (x *UpdateThrottlerConfigRequest) GetCheckAsCheckShard() bool { + if x != nil { + return x.CheckAsCheckShard + } + return false } -func (x *GetWorkflowsResponse) GetWorkflows() []*Workflow { +func (x *UpdateThrottlerConfigRequest) GetThrottledApp() *topodata.ThrottledAppRule { if x != nil { - return x.Workflows + return x.ThrottledApp } return nil } -type InitShardPrimaryRequest struct { +type UpdateThrottlerConfigResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - PrimaryElectTabletAlias *topodata.TabletAlias `protobuf:"bytes,3,opt,name=primary_elect_tablet_alias,json=primaryElectTabletAlias,proto3" json:"primary_elect_tablet_alias,omitempty"` - Force bool `protobuf:"varint,4,opt,name=force,proto3" json:"force,omitempty"` - WaitReplicasTimeout *vttime.Duration `protobuf:"bytes,5,opt,name=wait_replicas_timeout,json=waitReplicasTimeout,proto3" json:"wait_replicas_timeout,omitempty"` } -func (x *InitShardPrimaryRequest) Reset() { - *x = InitShardPrimaryRequest{} +func (x *UpdateThrottlerConfigResponse) Reset() { + *x = UpdateThrottlerConfigResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[97] + mi := &file_vtctldata_proto_msgTypes[88] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *InitShardPrimaryRequest) String() string { +func (x *UpdateThrottlerConfigResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*InitShardPrimaryRequest) ProtoMessage() {} +func (*UpdateThrottlerConfigResponse) ProtoMessage() {} -func (x *InitShardPrimaryRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[97] +func (x *UpdateThrottlerConfigResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[88] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5721,71 +5982,36 @@ func (x *InitShardPrimaryRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use InitShardPrimaryRequest.ProtoReflect.Descriptor instead. -func (*InitShardPrimaryRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{97} -} - -func (x *InitShardPrimaryRequest) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" -} - -func (x *InitShardPrimaryRequest) GetShard() string { - if x != nil { - return x.Shard - } - return "" -} - -func (x *InitShardPrimaryRequest) GetPrimaryElectTabletAlias() *topodata.TabletAlias { - if x != nil { - return x.PrimaryElectTabletAlias - } - return nil -} - -func (x *InitShardPrimaryRequest) GetForce() bool { - if x != nil { - return x.Force - } - return false -} - -func (x *InitShardPrimaryRequest) GetWaitReplicasTimeout() *vttime.Duration { - if x != nil { - return x.WaitReplicasTimeout - } - return nil +// Deprecated: Use UpdateThrottlerConfigResponse.ProtoReflect.Descriptor instead. +func (*UpdateThrottlerConfigResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{88} } -type InitShardPrimaryResponse struct { +type GetSrvVSchemaRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Events []*logutil.Event `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` + Cell string `protobuf:"bytes,1,opt,name=cell,proto3" json:"cell,omitempty"` } -func (x *InitShardPrimaryResponse) Reset() { - *x = InitShardPrimaryResponse{} +func (x *GetSrvVSchemaRequest) Reset() { + *x = GetSrvVSchemaRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[98] + mi := &file_vtctldata_proto_msgTypes[89] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *InitShardPrimaryResponse) String() string { +func (x *GetSrvVSchemaRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*InitShardPrimaryResponse) ProtoMessage() {} +func (*GetSrvVSchemaRequest) ProtoMessage() {} -func (x *InitShardPrimaryResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[98] +func (x *GetSrvVSchemaRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[89] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5796,43 +6022,43 @@ func (x *InitShardPrimaryResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use InitShardPrimaryResponse.ProtoReflect.Descriptor instead. -func (*InitShardPrimaryResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{98} +// Deprecated: Use GetSrvVSchemaRequest.ProtoReflect.Descriptor instead. +func (*GetSrvVSchemaRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{89} } -func (x *InitShardPrimaryResponse) GetEvents() []*logutil.Event { +func (x *GetSrvVSchemaRequest) GetCell() string { if x != nil { - return x.Events + return x.Cell } - return nil + return "" } -type PingTabletRequest struct { +type GetSrvVSchemaResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + SrvVSchema *vschema.SrvVSchema `protobuf:"bytes,1,opt,name=srv_v_schema,json=srvVSchema,proto3" json:"srv_v_schema,omitempty"` } -func (x *PingTabletRequest) Reset() { - *x = PingTabletRequest{} +func (x *GetSrvVSchemaResponse) Reset() { + *x = GetSrvVSchemaResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[99] + mi := &file_vtctldata_proto_msgTypes[90] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *PingTabletRequest) String() string { +func (x *GetSrvVSchemaResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*PingTabletRequest) ProtoMessage() {} +func (*GetSrvVSchemaResponse) ProtoMessage() {} -func (x *PingTabletRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[99] +func (x *GetSrvVSchemaResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[90] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5843,41 +6069,43 @@ func (x *PingTabletRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use PingTabletRequest.ProtoReflect.Descriptor instead. -func (*PingTabletRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{99} +// Deprecated: Use GetSrvVSchemaResponse.ProtoReflect.Descriptor instead. +func (*GetSrvVSchemaResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{90} } -func (x *PingTabletRequest) GetTabletAlias() *topodata.TabletAlias { +func (x *GetSrvVSchemaResponse) GetSrvVSchema() *vschema.SrvVSchema { if x != nil { - return x.TabletAlias + return x.SrvVSchema } return nil } -type PingTabletResponse struct { +type GetSrvVSchemasRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + Cells []string `protobuf:"bytes,2,rep,name=cells,proto3" json:"cells,omitempty"` } -func (x *PingTabletResponse) Reset() { - *x = PingTabletResponse{} +func (x *GetSrvVSchemasRequest) Reset() { + *x = GetSrvVSchemasRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[100] + mi := &file_vtctldata_proto_msgTypes[91] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *PingTabletResponse) String() string { +func (x *GetSrvVSchemasRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*PingTabletResponse) ProtoMessage() {} +func (*GetSrvVSchemasRequest) ProtoMessage() {} -func (x *PingTabletResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[100] +func (x *GetSrvVSchemasRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[91] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5888,57 +6116,44 @@ func (x *PingTabletResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use PingTabletResponse.ProtoReflect.Descriptor instead. -func (*PingTabletResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{100} +// Deprecated: Use GetSrvVSchemasRequest.ProtoReflect.Descriptor instead. +func (*GetSrvVSchemasRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{91} } -type PlannedReparentShardRequest struct { +func (x *GetSrvVSchemasRequest) GetCells() []string { + if x != nil { + return x.Cells + } + return nil +} + +type GetSrvVSchemasResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Keyspace is the name of the keyspace to perform the Planned Reparent in. - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - // Shard is the name of the shard to perform teh Planned Reparent in. - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - // NewPrimary is the alias of the tablet to promote to shard primary. If not - // specified, the vtctld will select the most up-to-date candidate to promote. - // - // It is an error to set NewPrimary and AvoidPrimary to the same alias. - NewPrimary *topodata.TabletAlias `protobuf:"bytes,3,opt,name=new_primary,json=newPrimary,proto3" json:"new_primary,omitempty"` - // AvoidPrimary is the alias of the tablet to demote. In other words, - // specifying an AvoidPrimary alias tells the vtctld to promote any replica - // other than this one. A shard whose current primary is not this one is then - // a no-op. - // - // It is an error to set NewPrimary and AvoidPrimary to the same alias. - AvoidPrimary *topodata.TabletAlias `protobuf:"bytes,4,opt,name=avoid_primary,json=avoidPrimary,proto3" json:"avoid_primary,omitempty"` - // WaitReplicasTimeout is the duration of time to wait for replicas to catch - // up in replication both before and after the reparent. The timeout is not - // cumulative across both wait periods, meaning that the replicas have - // WaitReplicasTimeout time to catch up before the reparent, and an additional - // WaitReplicasTimeout time to catch up after the reparent. - WaitReplicasTimeout *vttime.Duration `protobuf:"bytes,5,opt,name=wait_replicas_timeout,json=waitReplicasTimeout,proto3" json:"wait_replicas_timeout,omitempty"` + // SrvVSchemas is a mapping of cell name to SrvVSchema + SrvVSchemas map[string]*vschema.SrvVSchema `protobuf:"bytes,1,rep,name=srv_v_schemas,json=srvVSchemas,proto3" json:"srv_v_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (x *PlannedReparentShardRequest) Reset() { - *x = PlannedReparentShardRequest{} +func (x *GetSrvVSchemasResponse) Reset() { + *x = GetSrvVSchemasResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[101] + mi := &file_vtctldata_proto_msgTypes[92] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *PlannedReparentShardRequest) String() string { +func (x *GetSrvVSchemasResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*PlannedReparentShardRequest) ProtoMessage() {} +func (*GetSrvVSchemasResponse) ProtoMessage() {} -func (x *PlannedReparentShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[101] +func (x *GetSrvVSchemasResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[92] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5949,80 +6164,43 @@ func (x *PlannedReparentShardRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use PlannedReparentShardRequest.ProtoReflect.Descriptor instead. -func (*PlannedReparentShardRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{101} -} - -func (x *PlannedReparentShardRequest) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" -} - -func (x *PlannedReparentShardRequest) GetShard() string { - if x != nil { - return x.Shard - } - return "" -} - -func (x *PlannedReparentShardRequest) GetNewPrimary() *topodata.TabletAlias { - if x != nil { - return x.NewPrimary - } - return nil -} - -func (x *PlannedReparentShardRequest) GetAvoidPrimary() *topodata.TabletAlias { - if x != nil { - return x.AvoidPrimary - } - return nil +// Deprecated: Use GetSrvVSchemasResponse.ProtoReflect.Descriptor instead. +func (*GetSrvVSchemasResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{92} } -func (x *PlannedReparentShardRequest) GetWaitReplicasTimeout() *vttime.Duration { +func (x *GetSrvVSchemasResponse) GetSrvVSchemas() map[string]*vschema.SrvVSchema { if x != nil { - return x.WaitReplicasTimeout + return x.SrvVSchemas } return nil } -type PlannedReparentShardResponse struct { +type GetTabletRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Keyspace is the name of the keyspace the Planned Reparent took place in. - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - // Shard is the name of the shard the Planned Reparent took place in. - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - // PromotedPrimary is the alias of the tablet that was promoted to shard - // primary. If NewPrimary was set in the request, then this will be the same - // alias. Otherwise, it will be the alias of the tablet found to be most - // up-to-date. - PromotedPrimary *topodata.TabletAlias `protobuf:"bytes,3,opt,name=promoted_primary,json=promotedPrimary,proto3" json:"promoted_primary,omitempty"` - Events []*logutil.Event `protobuf:"bytes,4,rep,name=events,proto3" json:"events,omitempty"` + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` } -func (x *PlannedReparentShardResponse) Reset() { - *x = PlannedReparentShardResponse{} +func (x *GetTabletRequest) Reset() { + *x = GetTabletRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[102] + mi := &file_vtctldata_proto_msgTypes[93] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *PlannedReparentShardResponse) String() string { +func (x *GetTabletRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*PlannedReparentShardResponse) ProtoMessage() {} +func (*GetTabletRequest) ProtoMessage() {} -func (x *PlannedReparentShardResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[102] +func (x *GetTabletRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[93] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6033,68 +6211,111 @@ func (x *PlannedReparentShardResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use PlannedReparentShardResponse.ProtoReflect.Descriptor instead. -func (*PlannedReparentShardResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{102} +// Deprecated: Use GetTabletRequest.ProtoReflect.Descriptor instead. +func (*GetTabletRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{93} } -func (x *PlannedReparentShardResponse) GetKeyspace() string { +func (x *GetTabletRequest) GetTabletAlias() *topodata.TabletAlias { if x != nil { - return x.Keyspace + return x.TabletAlias } - return "" + return nil } -func (x *PlannedReparentShardResponse) GetShard() string { - if x != nil { - return x.Shard +type GetTabletResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Tablet *topodata.Tablet `protobuf:"bytes,1,opt,name=tablet,proto3" json:"tablet,omitempty"` +} + +func (x *GetTabletResponse) Reset() { + *x = GetTabletResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[94] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return "" } -func (x *PlannedReparentShardResponse) GetPromotedPrimary() *topodata.TabletAlias { - if x != nil { - return x.PromotedPrimary +func (x *GetTabletResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetTabletResponse) ProtoMessage() {} + +func (x *GetTabletResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[94] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil + return mi.MessageOf(x) } -func (x *PlannedReparentShardResponse) GetEvents() []*logutil.Event { +// Deprecated: Use GetTabletResponse.ProtoReflect.Descriptor instead. +func (*GetTabletResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{94} +} + +func (x *GetTabletResponse) GetTablet() *topodata.Tablet { if x != nil { - return x.Events + return x.Tablet } return nil } -type RebuildKeyspaceGraphRequest struct { +type GetTabletsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Cells []string `protobuf:"bytes,2,rep,name=cells,proto3" json:"cells,omitempty"` - // AllowPartial, when set, allows a SNAPSHOT keyspace to serve with an - // incomplete set of shards. It is ignored for all other keyspace types. - AllowPartial bool `protobuf:"varint,3,opt,name=allow_partial,json=allowPartial,proto3" json:"allow_partial,omitempty"` + // Keyspace is the name of the keyspace to return tablets for. Omit to return + // tablets from all keyspaces. + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // Shard is the name of the shard to return tablets for. This field is ignored + // if Keyspace is not set. + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + // Cells is an optional set of cells to return tablets for. + Cells []string `protobuf:"bytes,3,rep,name=cells,proto3" json:"cells,omitempty"` + // Strict specifies how the server should treat failures from individual + // cells. + // + // When false (the default), GetTablets will return data from any cells that + // return successfully, but will fail the request if all cells fail. When + // true, any individual cell can fail the full request. + Strict bool `protobuf:"varint,4,opt,name=strict,proto3" json:"strict,omitempty"` + // TabletAliases is an optional list of tablet aliases to fetch Tablet objects + // for. If specified, Keyspace, Shard, and Cells are ignored, and tablets are + // looked up by their respective aliases' Cells directly. + TabletAliases []*topodata.TabletAlias `protobuf:"bytes,5,rep,name=tablet_aliases,json=tabletAliases,proto3" json:"tablet_aliases,omitempty"` + // tablet_type specifies the type of tablets to return. Omit to return all + // tablet types. + TabletType topodata.TabletType `protobuf:"varint,6,opt,name=tablet_type,json=tabletType,proto3,enum=topodata.TabletType" json:"tablet_type,omitempty"` } -func (x *RebuildKeyspaceGraphRequest) Reset() { - *x = RebuildKeyspaceGraphRequest{} +func (x *GetTabletsRequest) Reset() { + *x = GetTabletsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[103] + mi := &file_vtctldata_proto_msgTypes[95] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *RebuildKeyspaceGraphRequest) String() string { +func (x *GetTabletsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RebuildKeyspaceGraphRequest) ProtoMessage() {} +func (*GetTabletsRequest) ProtoMessage() {} -func (x *RebuildKeyspaceGraphRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[103] +func (x *GetTabletsRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[95] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6105,55 +6326,78 @@ func (x *RebuildKeyspaceGraphRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RebuildKeyspaceGraphRequest.ProtoReflect.Descriptor instead. -func (*RebuildKeyspaceGraphRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{103} +// Deprecated: Use GetTabletsRequest.ProtoReflect.Descriptor instead. +func (*GetTabletsRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{95} } -func (x *RebuildKeyspaceGraphRequest) GetKeyspace() string { +func (x *GetTabletsRequest) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -func (x *RebuildKeyspaceGraphRequest) GetCells() []string { +func (x *GetTabletsRequest) GetShard() string { + if x != nil { + return x.Shard + } + return "" +} + +func (x *GetTabletsRequest) GetCells() []string { if x != nil { return x.Cells } return nil } -func (x *RebuildKeyspaceGraphRequest) GetAllowPartial() bool { +func (x *GetTabletsRequest) GetStrict() bool { if x != nil { - return x.AllowPartial + return x.Strict } return false } -type RebuildKeyspaceGraphResponse struct { +func (x *GetTabletsRequest) GetTabletAliases() []*topodata.TabletAlias { + if x != nil { + return x.TabletAliases + } + return nil +} + +func (x *GetTabletsRequest) GetTabletType() topodata.TabletType { + if x != nil { + return x.TabletType + } + return topodata.TabletType(0) +} + +type GetTabletsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + Tablets []*topodata.Tablet `protobuf:"bytes,1,rep,name=tablets,proto3" json:"tablets,omitempty"` } -func (x *RebuildKeyspaceGraphResponse) Reset() { - *x = RebuildKeyspaceGraphResponse{} +func (x *GetTabletsResponse) Reset() { + *x = GetTabletsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[104] + mi := &file_vtctldata_proto_msgTypes[96] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *RebuildKeyspaceGraphResponse) String() string { +func (x *GetTabletsResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RebuildKeyspaceGraphResponse) ProtoMessage() {} +func (*GetTabletsResponse) ProtoMessage() {} -func (x *RebuildKeyspaceGraphResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[104] +func (x *GetTabletsResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[96] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6164,38 +6408,43 @@ func (x *RebuildKeyspaceGraphResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RebuildKeyspaceGraphResponse.ProtoReflect.Descriptor instead. -func (*RebuildKeyspaceGraphResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{104} +// Deprecated: Use GetTabletsResponse.ProtoReflect.Descriptor instead. +func (*GetTabletsResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{96} } -type RebuildVSchemaGraphRequest struct { +func (x *GetTabletsResponse) GetTablets() []*topodata.Tablet { + if x != nil { + return x.Tablets + } + return nil +} + +type GetTopologyPathRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Cells specifies the cells to rebuild the SrvVSchema objects for. If empty, - // RebuildVSchemaGraph rebuilds the SrvVSchema for every cell in the topo. - Cells []string `protobuf:"bytes,1,rep,name=cells,proto3" json:"cells,omitempty"` + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` } -func (x *RebuildVSchemaGraphRequest) Reset() { - *x = RebuildVSchemaGraphRequest{} +func (x *GetTopologyPathRequest) Reset() { + *x = GetTopologyPathRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[105] + mi := &file_vtctldata_proto_msgTypes[97] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *RebuildVSchemaGraphRequest) String() string { +func (x *GetTopologyPathRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RebuildVSchemaGraphRequest) ProtoMessage() {} +func (*GetTopologyPathRequest) ProtoMessage() {} -func (x *RebuildVSchemaGraphRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[105] +func (x *GetTopologyPathRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[97] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6206,41 +6455,43 @@ func (x *RebuildVSchemaGraphRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RebuildVSchemaGraphRequest.ProtoReflect.Descriptor instead. -func (*RebuildVSchemaGraphRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{105} +// Deprecated: Use GetTopologyPathRequest.ProtoReflect.Descriptor instead. +func (*GetTopologyPathRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{97} } -func (x *RebuildVSchemaGraphRequest) GetCells() []string { +func (x *GetTopologyPathRequest) GetPath() string { if x != nil { - return x.Cells + return x.Path } - return nil + return "" } -type RebuildVSchemaGraphResponse struct { +type GetTopologyPathResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + Cell *TopologyCell `protobuf:"bytes,1,opt,name=cell,proto3" json:"cell,omitempty"` } -func (x *RebuildVSchemaGraphResponse) Reset() { - *x = RebuildVSchemaGraphResponse{} +func (x *GetTopologyPathResponse) Reset() { + *x = GetTopologyPathResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[106] + mi := &file_vtctldata_proto_msgTypes[98] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *RebuildVSchemaGraphResponse) String() string { +func (x *GetTopologyPathResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RebuildVSchemaGraphResponse) ProtoMessage() {} +func (*GetTopologyPathResponse) ProtoMessage() {} -func (x *RebuildVSchemaGraphResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[106] +func (x *GetTopologyPathResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[98] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6251,36 +6502,48 @@ func (x *RebuildVSchemaGraphResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RebuildVSchemaGraphResponse.ProtoReflect.Descriptor instead. -func (*RebuildVSchemaGraphResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{106} +// Deprecated: Use GetTopologyPathResponse.ProtoReflect.Descriptor instead. +func (*GetTopologyPathResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{98} } -type RefreshStateRequest struct { +func (x *GetTopologyPathResponse) GetCell() *TopologyCell { + if x != nil { + return x.Cell + } + return nil +} + +type TopologyCell struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + // Data is the file contents of the cell located at path. + // It is only populated if the cell is a terminal node. + Data string `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` + Children []string `protobuf:"bytes,4,rep,name=children,proto3" json:"children,omitempty"` } -func (x *RefreshStateRequest) Reset() { - *x = RefreshStateRequest{} +func (x *TopologyCell) Reset() { + *x = TopologyCell{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[107] + mi := &file_vtctldata_proto_msgTypes[99] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *RefreshStateRequest) String() string { +func (x *TopologyCell) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RefreshStateRequest) ProtoMessage() {} +func (*TopologyCell) ProtoMessage() {} -func (x *RefreshStateRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[107] +func (x *TopologyCell) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[99] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6291,83 +6554,64 @@ func (x *RefreshStateRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RefreshStateRequest.ProtoReflect.Descriptor instead. -func (*RefreshStateRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{107} -} +// Deprecated: Use TopologyCell.ProtoReflect.Descriptor instead. +func (*TopologyCell) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{99} +} -func (x *RefreshStateRequest) GetTabletAlias() *topodata.TabletAlias { +func (x *TopologyCell) GetName() string { if x != nil { - return x.TabletAlias + return x.Name } - return nil -} - -type RefreshStateResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields + return "" } -func (x *RefreshStateResponse) Reset() { - *x = RefreshStateResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[108] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *TopologyCell) GetPath() string { + if x != nil { + return x.Path } + return "" } -func (x *RefreshStateResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RefreshStateResponse) ProtoMessage() {} - -func (x *RefreshStateResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[108] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *TopologyCell) GetData() string { + if x != nil { + return x.Data } - return mi.MessageOf(x) + return "" } -// Deprecated: Use RefreshStateResponse.ProtoReflect.Descriptor instead. -func (*RefreshStateResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{108} +func (x *TopologyCell) GetChildren() []string { + if x != nil { + return x.Children + } + return nil } -type RefreshStateByShardRequest struct { +type GetVSchemaRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - Cells []string `protobuf:"bytes,3,rep,name=cells,proto3" json:"cells,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` } -func (x *RefreshStateByShardRequest) Reset() { - *x = RefreshStateByShardRequest{} +func (x *GetVSchemaRequest) Reset() { + *x = GetVSchemaRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[109] + mi := &file_vtctldata_proto_msgTypes[100] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *RefreshStateByShardRequest) String() string { +func (x *GetVSchemaRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RefreshStateByShardRequest) ProtoMessage() {} +func (*GetVSchemaRequest) ProtoMessage() {} -func (x *RefreshStateByShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[109] +func (x *GetVSchemaRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[100] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6378,59 +6622,43 @@ func (x *RefreshStateByShardRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RefreshStateByShardRequest.ProtoReflect.Descriptor instead. -func (*RefreshStateByShardRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{109} +// Deprecated: Use GetVSchemaRequest.ProtoReflect.Descriptor instead. +func (*GetVSchemaRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{100} } -func (x *RefreshStateByShardRequest) GetKeyspace() string { +func (x *GetVSchemaRequest) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -func (x *RefreshStateByShardRequest) GetShard() string { - if x != nil { - return x.Shard - } - return "" -} - -func (x *RefreshStateByShardRequest) GetCells() []string { - if x != nil { - return x.Cells - } - return nil -} - -type RefreshStateByShardResponse struct { +type GetVersionRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - IsPartialRefresh bool `protobuf:"varint,1,opt,name=is_partial_refresh,json=isPartialRefresh,proto3" json:"is_partial_refresh,omitempty"` - // This explains why we had a partial refresh (if we did) - PartialRefreshDetails string `protobuf:"bytes,2,opt,name=partial_refresh_details,json=partialRefreshDetails,proto3" json:"partial_refresh_details,omitempty"` + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` } -func (x *RefreshStateByShardResponse) Reset() { - *x = RefreshStateByShardResponse{} +func (x *GetVersionRequest) Reset() { + *x = GetVersionRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[110] + mi := &file_vtctldata_proto_msgTypes[101] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *RefreshStateByShardResponse) String() string { +func (x *GetVersionRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RefreshStateByShardResponse) ProtoMessage() {} +func (*GetVersionRequest) ProtoMessage() {} -func (x *RefreshStateByShardResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[110] +func (x *GetVersionRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[101] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6441,50 +6669,43 @@ func (x *RefreshStateByShardResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RefreshStateByShardResponse.ProtoReflect.Descriptor instead. -func (*RefreshStateByShardResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{110} -} - -func (x *RefreshStateByShardResponse) GetIsPartialRefresh() bool { - if x != nil { - return x.IsPartialRefresh - } - return false +// Deprecated: Use GetVersionRequest.ProtoReflect.Descriptor instead. +func (*GetVersionRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{101} } -func (x *RefreshStateByShardResponse) GetPartialRefreshDetails() string { +func (x *GetVersionRequest) GetTabletAlias() *topodata.TabletAlias { if x != nil { - return x.PartialRefreshDetails + return x.TabletAlias } - return "" + return nil } -type ReloadSchemaRequest struct { +type GetVersionResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` } -func (x *ReloadSchemaRequest) Reset() { - *x = ReloadSchemaRequest{} +func (x *GetVersionResponse) Reset() { + *x = GetVersionResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[111] + mi := &file_vtctldata_proto_msgTypes[102] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ReloadSchemaRequest) String() string { +func (x *GetVersionResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ReloadSchemaRequest) ProtoMessage() {} +func (*GetVersionResponse) ProtoMessage() {} -func (x *ReloadSchemaRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[111] +func (x *GetVersionResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[102] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6495,41 +6716,43 @@ func (x *ReloadSchemaRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ReloadSchemaRequest.ProtoReflect.Descriptor instead. -func (*ReloadSchemaRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{111} +// Deprecated: Use GetVersionResponse.ProtoReflect.Descriptor instead. +func (*GetVersionResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{102} } -func (x *ReloadSchemaRequest) GetTabletAlias() *topodata.TabletAlias { +func (x *GetVersionResponse) GetVersion() string { if x != nil { - return x.TabletAlias + return x.Version } - return nil + return "" } -type ReloadSchemaResponse struct { +type GetVSchemaResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + VSchema *vschema.Keyspace `protobuf:"bytes,1,opt,name=v_schema,json=vSchema,proto3" json:"v_schema,omitempty"` } -func (x *ReloadSchemaResponse) Reset() { - *x = ReloadSchemaResponse{} +func (x *GetVSchemaResponse) Reset() { + *x = GetVSchemaResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[112] + mi := &file_vtctldata_proto_msgTypes[103] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ReloadSchemaResponse) String() string { +func (x *GetVSchemaResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ReloadSchemaResponse) ProtoMessage() {} +func (*GetVSchemaResponse) ProtoMessage() {} -func (x *ReloadSchemaResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[112] +func (x *GetVSchemaResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[103] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6540,42 +6763,48 @@ func (x *ReloadSchemaResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ReloadSchemaResponse.ProtoReflect.Descriptor instead. -func (*ReloadSchemaResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{112} +// Deprecated: Use GetVSchemaResponse.ProtoReflect.Descriptor instead. +func (*GetVSchemaResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{103} } -type ReloadSchemaKeyspaceRequest struct { +func (x *GetVSchemaResponse) GetVSchema() *vschema.Keyspace { + if x != nil { + return x.VSchema + } + return nil +} + +type GetWorkflowsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - WaitPosition string `protobuf:"bytes,2,opt,name=wait_position,json=waitPosition,proto3" json:"wait_position,omitempty"` - IncludePrimary bool `protobuf:"varint,3,opt,name=include_primary,json=includePrimary,proto3" json:"include_primary,omitempty"` - // Concurrency is the global concurrency across all shards in the keyspace - // (so, at most this many tablets will be reloaded across the keyspace at any - // given point). - Concurrency uint32 `protobuf:"varint,4,opt,name=concurrency,proto3" json:"concurrency,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + ActiveOnly bool `protobuf:"varint,2,opt,name=active_only,json=activeOnly,proto3" json:"active_only,omitempty"` + NameOnly bool `protobuf:"varint,3,opt,name=name_only,json=nameOnly,proto3" json:"name_only,omitempty"` + // If you only want a specific workflow then set this field. + Workflow string `protobuf:"bytes,4,opt,name=workflow,proto3" json:"workflow,omitempty"` + IncludeLogs bool `protobuf:"varint,5,opt,name=include_logs,json=includeLogs,proto3" json:"include_logs,omitempty"` } -func (x *ReloadSchemaKeyspaceRequest) Reset() { - *x = ReloadSchemaKeyspaceRequest{} +func (x *GetWorkflowsRequest) Reset() { + *x = GetWorkflowsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[113] + mi := &file_vtctldata_proto_msgTypes[104] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ReloadSchemaKeyspaceRequest) String() string { +func (x *GetWorkflowsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ReloadSchemaKeyspaceRequest) ProtoMessage() {} +func (*GetWorkflowsRequest) ProtoMessage() {} -func (x *ReloadSchemaKeyspaceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[113] +func (x *GetWorkflowsRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[104] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6586,64 +6815,71 @@ func (x *ReloadSchemaKeyspaceRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ReloadSchemaKeyspaceRequest.ProtoReflect.Descriptor instead. -func (*ReloadSchemaKeyspaceRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{113} +// Deprecated: Use GetWorkflowsRequest.ProtoReflect.Descriptor instead. +func (*GetWorkflowsRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{104} } -func (x *ReloadSchemaKeyspaceRequest) GetKeyspace() string { +func (x *GetWorkflowsRequest) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -func (x *ReloadSchemaKeyspaceRequest) GetWaitPosition() string { +func (x *GetWorkflowsRequest) GetActiveOnly() bool { if x != nil { - return x.WaitPosition + return x.ActiveOnly } - return "" + return false } -func (x *ReloadSchemaKeyspaceRequest) GetIncludePrimary() bool { +func (x *GetWorkflowsRequest) GetNameOnly() bool { if x != nil { - return x.IncludePrimary + return x.NameOnly } return false } -func (x *ReloadSchemaKeyspaceRequest) GetConcurrency() uint32 { +func (x *GetWorkflowsRequest) GetWorkflow() string { if x != nil { - return x.Concurrency + return x.Workflow } - return 0 + return "" } -type ReloadSchemaKeyspaceResponse struct { +func (x *GetWorkflowsRequest) GetIncludeLogs() bool { + if x != nil { + return x.IncludeLogs + } + return false +} + +type GetWorkflowsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Events []*logutil.Event `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` + Workflows []*Workflow `protobuf:"bytes,1,rep,name=workflows,proto3" json:"workflows,omitempty"` } -func (x *ReloadSchemaKeyspaceResponse) Reset() { - *x = ReloadSchemaKeyspaceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[114] +func (x *GetWorkflowsResponse) Reset() { + *x = GetWorkflowsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[105] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ReloadSchemaKeyspaceResponse) String() string { +func (x *GetWorkflowsResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ReloadSchemaKeyspaceResponse) ProtoMessage() {} +func (*GetWorkflowsResponse) ProtoMessage() {} -func (x *ReloadSchemaKeyspaceResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[114] +func (x *GetWorkflowsResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[105] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6654,48 +6890,47 @@ func (x *ReloadSchemaKeyspaceResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ReloadSchemaKeyspaceResponse.ProtoReflect.Descriptor instead. -func (*ReloadSchemaKeyspaceResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{114} +// Deprecated: Use GetWorkflowsResponse.ProtoReflect.Descriptor instead. +func (*GetWorkflowsResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{105} } -func (x *ReloadSchemaKeyspaceResponse) GetEvents() []*logutil.Event { +func (x *GetWorkflowsResponse) GetWorkflows() []*Workflow { if x != nil { - return x.Events + return x.Workflows } return nil } -type ReloadSchemaShardRequest struct { +type InitShardPrimaryRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - WaitPosition string `protobuf:"bytes,3,opt,name=wait_position,json=waitPosition,proto3" json:"wait_position,omitempty"` - IncludePrimary bool `protobuf:"varint,4,opt,name=include_primary,json=includePrimary,proto3" json:"include_primary,omitempty"` - // Concurrency is the maximum number of tablets to reload at one time. - Concurrency uint32 `protobuf:"varint,5,opt,name=concurrency,proto3" json:"concurrency,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + PrimaryElectTabletAlias *topodata.TabletAlias `protobuf:"bytes,3,opt,name=primary_elect_tablet_alias,json=primaryElectTabletAlias,proto3" json:"primary_elect_tablet_alias,omitempty"` + Force bool `protobuf:"varint,4,opt,name=force,proto3" json:"force,omitempty"` + WaitReplicasTimeout *vttime.Duration `protobuf:"bytes,5,opt,name=wait_replicas_timeout,json=waitReplicasTimeout,proto3" json:"wait_replicas_timeout,omitempty"` } -func (x *ReloadSchemaShardRequest) Reset() { - *x = ReloadSchemaShardRequest{} +func (x *InitShardPrimaryRequest) Reset() { + *x = InitShardPrimaryRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[115] + mi := &file_vtctldata_proto_msgTypes[106] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ReloadSchemaShardRequest) String() string { +func (x *InitShardPrimaryRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ReloadSchemaShardRequest) ProtoMessage() {} +func (*InitShardPrimaryRequest) ProtoMessage() {} -func (x *ReloadSchemaShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[115] +func (x *InitShardPrimaryRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[106] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6706,71 +6941,71 @@ func (x *ReloadSchemaShardRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ReloadSchemaShardRequest.ProtoReflect.Descriptor instead. -func (*ReloadSchemaShardRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{115} +// Deprecated: Use InitShardPrimaryRequest.ProtoReflect.Descriptor instead. +func (*InitShardPrimaryRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{106} } -func (x *ReloadSchemaShardRequest) GetKeyspace() string { +func (x *InitShardPrimaryRequest) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -func (x *ReloadSchemaShardRequest) GetShard() string { +func (x *InitShardPrimaryRequest) GetShard() string { if x != nil { return x.Shard } return "" } -func (x *ReloadSchemaShardRequest) GetWaitPosition() string { +func (x *InitShardPrimaryRequest) GetPrimaryElectTabletAlias() *topodata.TabletAlias { if x != nil { - return x.WaitPosition + return x.PrimaryElectTabletAlias } - return "" + return nil } -func (x *ReloadSchemaShardRequest) GetIncludePrimary() bool { +func (x *InitShardPrimaryRequest) GetForce() bool { if x != nil { - return x.IncludePrimary + return x.Force } return false } -func (x *ReloadSchemaShardRequest) GetConcurrency() uint32 { +func (x *InitShardPrimaryRequest) GetWaitReplicasTimeout() *vttime.Duration { if x != nil { - return x.Concurrency + return x.WaitReplicasTimeout } - return 0 + return nil } -type ReloadSchemaShardResponse struct { +type InitShardPrimaryResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Events []*logutil.Event `protobuf:"bytes,2,rep,name=events,proto3" json:"events,omitempty"` + Events []*logutil.Event `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` } -func (x *ReloadSchemaShardResponse) Reset() { - *x = ReloadSchemaShardResponse{} +func (x *InitShardPrimaryResponse) Reset() { + *x = InitShardPrimaryResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[116] + mi := &file_vtctldata_proto_msgTypes[107] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ReloadSchemaShardResponse) String() string { +func (x *InitShardPrimaryResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ReloadSchemaShardResponse) ProtoMessage() {} +func (*InitShardPrimaryResponse) ProtoMessage() {} -func (x *ReloadSchemaShardResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[116] +func (x *InitShardPrimaryResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[107] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6781,45 +7016,44 @@ func (x *ReloadSchemaShardResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ReloadSchemaShardResponse.ProtoReflect.Descriptor instead. -func (*ReloadSchemaShardResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{116} +// Deprecated: Use InitShardPrimaryResponse.ProtoReflect.Descriptor instead. +func (*InitShardPrimaryResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{107} } -func (x *ReloadSchemaShardResponse) GetEvents() []*logutil.Event { +func (x *InitShardPrimaryResponse) GetEvents() []*logutil.Event { if x != nil { return x.Events } return nil } -type RemoveBackupRequest struct { +type LaunchSchemaMigrationRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + Uuid string `protobuf:"bytes,2,opt,name=uuid,proto3" json:"uuid,omitempty"` } -func (x *RemoveBackupRequest) Reset() { - *x = RemoveBackupRequest{} +func (x *LaunchSchemaMigrationRequest) Reset() { + *x = LaunchSchemaMigrationRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[117] + mi := &file_vtctldata_proto_msgTypes[108] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *RemoveBackupRequest) String() string { +func (x *LaunchSchemaMigrationRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RemoveBackupRequest) ProtoMessage() {} +func (*LaunchSchemaMigrationRequest) ProtoMessage() {} -func (x *RemoveBackupRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[117] +func (x *LaunchSchemaMigrationRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[108] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6830,55 +7064,50 @@ func (x *RemoveBackupRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RemoveBackupRequest.ProtoReflect.Descriptor instead. -func (*RemoveBackupRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{117} +// Deprecated: Use LaunchSchemaMigrationRequest.ProtoReflect.Descriptor instead. +func (*LaunchSchemaMigrationRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{108} } -func (x *RemoveBackupRequest) GetKeyspace() string { +func (x *LaunchSchemaMigrationRequest) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -func (x *RemoveBackupRequest) GetShard() string { - if x != nil { - return x.Shard - } - return "" -} - -func (x *RemoveBackupRequest) GetName() string { +func (x *LaunchSchemaMigrationRequest) GetUuid() string { if x != nil { - return x.Name + return x.Uuid } return "" } -type RemoveBackupResponse struct { +type LaunchSchemaMigrationResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + RowsAffectedByShard map[string]uint64 `protobuf:"bytes,1,rep,name=rows_affected_by_shard,json=rowsAffectedByShard,proto3" json:"rows_affected_by_shard,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` } -func (x *RemoveBackupResponse) Reset() { - *x = RemoveBackupResponse{} +func (x *LaunchSchemaMigrationResponse) Reset() { + *x = LaunchSchemaMigrationResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[118] + mi := &file_vtctldata_proto_msgTypes[109] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *RemoveBackupResponse) String() string { +func (x *LaunchSchemaMigrationResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RemoveBackupResponse) ProtoMessage() {} +func (*LaunchSchemaMigrationResponse) ProtoMessage() {} -func (x *RemoveBackupResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[118] +func (x *LaunchSchemaMigrationResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[109] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6889,44 +7118,49 @@ func (x *RemoveBackupResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RemoveBackupResponse.ProtoReflect.Descriptor instead. -func (*RemoveBackupResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{118} +// Deprecated: Use LaunchSchemaMigrationResponse.ProtoReflect.Descriptor instead. +func (*LaunchSchemaMigrationResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{109} } -type RemoveKeyspaceCellRequest struct { +func (x *LaunchSchemaMigrationResponse) GetRowsAffectedByShard() map[string]uint64 { + if x != nil { + return x.RowsAffectedByShard + } + return nil +} + +type LookupVindexCreateRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Cell string `protobuf:"bytes,2,opt,name=cell,proto3" json:"cell,omitempty"` - // Force proceeds even if the cell's topology server cannot be reached. This - // should only be set if a cell has been shut down entirely, and the global - // topology data just needs to be updated. - Force bool `protobuf:"varint,3,opt,name=force,proto3" json:"force,omitempty"` - // Recursive also deletes all tablets in that cell belonging to the specified - // keyspace. - Recursive bool `protobuf:"varint,4,opt,name=recursive,proto3" json:"recursive,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Workflow string `protobuf:"bytes,2,opt,name=workflow,proto3" json:"workflow,omitempty"` + Cells []string `protobuf:"bytes,3,rep,name=cells,proto3" json:"cells,omitempty"` + Vindex *vschema.Keyspace `protobuf:"bytes,4,opt,name=vindex,proto3" json:"vindex,omitempty"` + ContinueAfterCopyWithOwner bool `protobuf:"varint,5,opt,name=continue_after_copy_with_owner,json=continueAfterCopyWithOwner,proto3" json:"continue_after_copy_with_owner,omitempty"` + TabletTypes []topodata.TabletType `protobuf:"varint,6,rep,packed,name=tablet_types,json=tabletTypes,proto3,enum=topodata.TabletType" json:"tablet_types,omitempty"` + TabletSelectionPreference tabletmanagerdata.TabletSelectionPreference `protobuf:"varint,7,opt,name=tablet_selection_preference,json=tabletSelectionPreference,proto3,enum=tabletmanagerdata.TabletSelectionPreference" json:"tablet_selection_preference,omitempty"` } -func (x *RemoveKeyspaceCellRequest) Reset() { - *x = RemoveKeyspaceCellRequest{} +func (x *LookupVindexCreateRequest) Reset() { + *x = LookupVindexCreateRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[119] + mi := &file_vtctldata_proto_msgTypes[110] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *RemoveKeyspaceCellRequest) String() string { +func (x *LookupVindexCreateRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RemoveKeyspaceCellRequest) ProtoMessage() {} +func (*LookupVindexCreateRequest) ProtoMessage() {} -func (x *RemoveKeyspaceCellRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[119] +func (x *LookupVindexCreateRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[110] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6937,62 +7171,83 @@ func (x *RemoveKeyspaceCellRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RemoveKeyspaceCellRequest.ProtoReflect.Descriptor instead. -func (*RemoveKeyspaceCellRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{119} +// Deprecated: Use LookupVindexCreateRequest.ProtoReflect.Descriptor instead. +func (*LookupVindexCreateRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{110} } -func (x *RemoveKeyspaceCellRequest) GetKeyspace() string { +func (x *LookupVindexCreateRequest) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -func (x *RemoveKeyspaceCellRequest) GetCell() string { +func (x *LookupVindexCreateRequest) GetWorkflow() string { if x != nil { - return x.Cell + return x.Workflow } return "" } -func (x *RemoveKeyspaceCellRequest) GetForce() bool { +func (x *LookupVindexCreateRequest) GetCells() []string { if x != nil { - return x.Force + return x.Cells } - return false + return nil } -func (x *RemoveKeyspaceCellRequest) GetRecursive() bool { +func (x *LookupVindexCreateRequest) GetVindex() *vschema.Keyspace { if x != nil { - return x.Recursive + return x.Vindex + } + return nil +} + +func (x *LookupVindexCreateRequest) GetContinueAfterCopyWithOwner() bool { + if x != nil { + return x.ContinueAfterCopyWithOwner } return false } -type RemoveKeyspaceCellResponse struct { +func (x *LookupVindexCreateRequest) GetTabletTypes() []topodata.TabletType { + if x != nil { + return x.TabletTypes + } + return nil +} + +func (x *LookupVindexCreateRequest) GetTabletSelectionPreference() tabletmanagerdata.TabletSelectionPreference { + if x != nil { + return x.TabletSelectionPreference + } + return tabletmanagerdata.TabletSelectionPreference(0) +} + +type LookupVindexCreateResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } -func (x *RemoveKeyspaceCellResponse) Reset() { - *x = RemoveKeyspaceCellResponse{} +func (x *LookupVindexCreateResponse) Reset() { + *x = LookupVindexCreateResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[120] + mi := &file_vtctldata_proto_msgTypes[111] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *RemoveKeyspaceCellResponse) String() string { +func (x *LookupVindexCreateResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RemoveKeyspaceCellResponse) ProtoMessage() {} +func (*LookupVindexCreateResponse) ProtoMessage() {} -func (x *RemoveKeyspaceCellResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[120] +func (x *LookupVindexCreateResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[111] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7003,45 +7258,41 @@ func (x *RemoveKeyspaceCellResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RemoveKeyspaceCellResponse.ProtoReflect.Descriptor instead. -func (*RemoveKeyspaceCellResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{120} +// Deprecated: Use LookupVindexCreateResponse.ProtoReflect.Descriptor instead. +func (*LookupVindexCreateResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{111} } -type RemoveShardCellRequest struct { +type LookupVindexExternalizeRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - ShardName string `protobuf:"bytes,2,opt,name=shard_name,json=shardName,proto3" json:"shard_name,omitempty"` - Cell string `protobuf:"bytes,3,opt,name=cell,proto3" json:"cell,omitempty"` - // Force proceeds even if the cell's topology server cannot be reached. This - // should only be set if a cell has been shut down entirely, and the global - // topology data just needs to be updated. - Force bool `protobuf:"varint,4,opt,name=force,proto3" json:"force,omitempty"` - // Recursive also deletes all tablets in that cell belonging to the specified - // keyspace and shard. - Recursive bool `protobuf:"varint,5,opt,name=recursive,proto3" json:"recursive,omitempty"` + // Where the lookup vindex lives. + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // This is the name of the lookup vindex and the vreplication workflow. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Where the vreplication workflow lives. + TableKeyspace string `protobuf:"bytes,3,opt,name=table_keyspace,json=tableKeyspace,proto3" json:"table_keyspace,omitempty"` } -func (x *RemoveShardCellRequest) Reset() { - *x = RemoveShardCellRequest{} +func (x *LookupVindexExternalizeRequest) Reset() { + *x = LookupVindexExternalizeRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[121] + mi := &file_vtctldata_proto_msgTypes[112] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *RemoveShardCellRequest) String() string { +func (x *LookupVindexExternalizeRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RemoveShardCellRequest) ProtoMessage() {} +func (*LookupVindexExternalizeRequest) ProtoMessage() {} -func (x *RemoveShardCellRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[121] +func (x *LookupVindexExternalizeRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[112] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7052,69 +7303,58 @@ func (x *RemoveShardCellRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RemoveShardCellRequest.ProtoReflect.Descriptor instead. -func (*RemoveShardCellRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{121} +// Deprecated: Use LookupVindexExternalizeRequest.ProtoReflect.Descriptor instead. +func (*LookupVindexExternalizeRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{112} } -func (x *RemoveShardCellRequest) GetKeyspace() string { +func (x *LookupVindexExternalizeRequest) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -func (x *RemoveShardCellRequest) GetShardName() string { +func (x *LookupVindexExternalizeRequest) GetName() string { if x != nil { - return x.ShardName + return x.Name } return "" } -func (x *RemoveShardCellRequest) GetCell() string { +func (x *LookupVindexExternalizeRequest) GetTableKeyspace() string { if x != nil { - return x.Cell + return x.TableKeyspace } return "" } -func (x *RemoveShardCellRequest) GetForce() bool { - if x != nil { - return x.Force - } - return false -} +type LookupVindexExternalizeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (x *RemoveShardCellRequest) GetRecursive() bool { - if x != nil { - return x.Recursive - } - return false + // Was the workflow also deleted. + WorkflowDeleted bool `protobuf:"varint,1,opt,name=workflow_deleted,json=workflowDeleted,proto3" json:"workflow_deleted,omitempty"` } -type RemoveShardCellResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *RemoveShardCellResponse) Reset() { - *x = RemoveShardCellResponse{} +func (x *LookupVindexExternalizeResponse) Reset() { + *x = LookupVindexExternalizeResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[122] + mi := &file_vtctldata_proto_msgTypes[113] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *RemoveShardCellResponse) String() string { +func (x *LookupVindexExternalizeResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RemoveShardCellResponse) ProtoMessage() {} +func (*LookupVindexExternalizeResponse) ProtoMessage() {} -func (x *RemoveShardCellResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[122] +func (x *LookupVindexExternalizeResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[113] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7125,38 +7365,43 @@ func (x *RemoveShardCellResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RemoveShardCellResponse.ProtoReflect.Descriptor instead. -func (*RemoveShardCellResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{122} +// Deprecated: Use LookupVindexExternalizeResponse.ProtoReflect.Descriptor instead. +func (*LookupVindexExternalizeResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{113} } -type ReparentTabletRequest struct { +func (x *LookupVindexExternalizeResponse) GetWorkflowDeleted() bool { + if x != nil { + return x.WorkflowDeleted + } + return false +} + +type MaterializeCreateRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Tablet is the alias of the tablet that should be reparented under the - // current shard primary. - Tablet *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet,proto3" json:"tablet,omitempty"` + Settings *MaterializeSettings `protobuf:"bytes,1,opt,name=settings,proto3" json:"settings,omitempty"` } -func (x *ReparentTabletRequest) Reset() { - *x = ReparentTabletRequest{} +func (x *MaterializeCreateRequest) Reset() { + *x = MaterializeCreateRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[123] + mi := &file_vtctldata_proto_msgTypes[114] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ReparentTabletRequest) String() string { +func (x *MaterializeCreateRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ReparentTabletRequest) ProtoMessage() {} +func (*MaterializeCreateRequest) ProtoMessage() {} -func (x *ReparentTabletRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[123] +func (x *MaterializeCreateRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[114] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7167,48 +7412,41 @@ func (x *ReparentTabletRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ReparentTabletRequest.ProtoReflect.Descriptor instead. -func (*ReparentTabletRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{123} +// Deprecated: Use MaterializeCreateRequest.ProtoReflect.Descriptor instead. +func (*MaterializeCreateRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{114} } -func (x *ReparentTabletRequest) GetTablet() *topodata.TabletAlias { +func (x *MaterializeCreateRequest) GetSettings() *MaterializeSettings { if x != nil { - return x.Tablet + return x.Settings } return nil } -type ReparentTabletResponse struct { +type MaterializeCreateResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - // Keyspace is the name of the keyspace the tablet was reparented in. - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - // Shard is the name of the shard the tablet was reparented in. - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - // Primary is the alias of the tablet that the tablet was reparented under. - Primary *topodata.TabletAlias `protobuf:"bytes,3,opt,name=primary,proto3" json:"primary,omitempty"` } -func (x *ReparentTabletResponse) Reset() { - *x = ReparentTabletResponse{} +func (x *MaterializeCreateResponse) Reset() { + *x = MaterializeCreateResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[124] + mi := &file_vtctldata_proto_msgTypes[115] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ReparentTabletResponse) String() string { +func (x *MaterializeCreateResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ReparentTabletResponse) ProtoMessage() {} +func (*MaterializeCreateResponse) ProtoMessage() {} -func (x *ReparentTabletResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[124] +func (x *MaterializeCreateResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[115] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7219,66 +7457,61 @@ func (x *ReparentTabletResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ReparentTabletResponse.ProtoReflect.Descriptor instead. -func (*ReparentTabletResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{124} -} - -func (x *ReparentTabletResponse) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" -} - -func (x *ReparentTabletResponse) GetShard() string { - if x != nil { - return x.Shard - } - return "" -} - -func (x *ReparentTabletResponse) GetPrimary() *topodata.TabletAlias { - if x != nil { - return x.Primary - } - return nil +// Deprecated: Use MaterializeCreateResponse.ProtoReflect.Descriptor instead. +func (*MaterializeCreateResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{115} } -type RestoreFromBackupRequest struct { +type MigrateCreateRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` - // BackupTime, if set, will use the backup taken most closely at or before - // this time. If nil, the latest backup will be restored on the tablet. - BackupTime *vttime.Time `protobuf:"bytes,2,opt,name=backup_time,json=backupTime,proto3" json:"backup_time,omitempty"` - // RestoreToPos indicates a position for a point-in-time recovery. The recovery - // is expected to utilize one full backup, followed by zero or more incremental backups, - // that reach the precise desired position - RestoreToPos string `protobuf:"bytes,3,opt,name=restore_to_pos,json=restoreToPos,proto3" json:"restore_to_pos,omitempty"` - // Dry run does not actually performs the restore, but validates the steps and availability of backups - DryRun bool `protobuf:"varint,4,opt,name=dry_run,json=dryRun,proto3" json:"dry_run,omitempty"` + // The necessary info gets passed on to each primary tablet involved + // in the workflow via the CreateVReplicationWorkflow tabletmanager RPC. + Workflow string `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"` + SourceKeyspace string `protobuf:"bytes,2,opt,name=source_keyspace,json=sourceKeyspace,proto3" json:"source_keyspace,omitempty"` + TargetKeyspace string `protobuf:"bytes,3,opt,name=target_keyspace,json=targetKeyspace,proto3" json:"target_keyspace,omitempty"` + MountName string `protobuf:"bytes,4,opt,name=mount_name,json=mountName,proto3" json:"mount_name,omitempty"` + Cells []string `protobuf:"bytes,5,rep,name=cells,proto3" json:"cells,omitempty"` + TabletTypes []topodata.TabletType `protobuf:"varint,6,rep,packed,name=tablet_types,json=tabletTypes,proto3,enum=topodata.TabletType" json:"tablet_types,omitempty"` + TabletSelectionPreference tabletmanagerdata.TabletSelectionPreference `protobuf:"varint,7,opt,name=tablet_selection_preference,json=tabletSelectionPreference,proto3,enum=tabletmanagerdata.TabletSelectionPreference" json:"tablet_selection_preference,omitempty"` + AllTables bool `protobuf:"varint,8,opt,name=all_tables,json=allTables,proto3" json:"all_tables,omitempty"` + IncludeTables []string `protobuf:"bytes,9,rep,name=include_tables,json=includeTables,proto3" json:"include_tables,omitempty"` + ExcludeTables []string `protobuf:"bytes,10,rep,name=exclude_tables,json=excludeTables,proto3" json:"exclude_tables,omitempty"` + // SourceTimeZone is the time zone in which datetimes on the source were stored, provided as an option in MoveTables + SourceTimeZone string `protobuf:"bytes,11,opt,name=source_time_zone,json=sourceTimeZone,proto3" json:"source_time_zone,omitempty"` + // OnDdl specifies the action to be taken when a DDL is encountered. + OnDdl string `protobuf:"bytes,12,opt,name=on_ddl,json=onDdl,proto3" json:"on_ddl,omitempty"` + // StopAfterCopy specifies if vreplication should be stopped after copying. + StopAfterCopy bool `protobuf:"varint,13,opt,name=stop_after_copy,json=stopAfterCopy,proto3" json:"stop_after_copy,omitempty"` + // DropForeignKeys specifies if foreign key constraints should be elided on the target. + DropForeignKeys bool `protobuf:"varint,14,opt,name=drop_foreign_keys,json=dropForeignKeys,proto3" json:"drop_foreign_keys,omitempty"` + // DeferSecondaryKeys specifies if secondary keys should be created in one shot after table copy finishes. + DeferSecondaryKeys bool `protobuf:"varint,15,opt,name=defer_secondary_keys,json=deferSecondaryKeys,proto3" json:"defer_secondary_keys,omitempty"` + // Start the workflow after creating it. + AutoStart bool `protobuf:"varint,16,opt,name=auto_start,json=autoStart,proto3" json:"auto_start,omitempty"` + // NoRoutingRules is set to true if routing rules should not be created on the target when the workflow is created. + NoRoutingRules bool `protobuf:"varint,17,opt,name=no_routing_rules,json=noRoutingRules,proto3" json:"no_routing_rules,omitempty"` } -func (x *RestoreFromBackupRequest) Reset() { - *x = RestoreFromBackupRequest{} +func (x *MigrateCreateRequest) Reset() { + *x = MigrateCreateRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[125] + mi := &file_vtctldata_proto_msgTypes[116] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *RestoreFromBackupRequest) String() string { +func (x *MigrateCreateRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RestoreFromBackupRequest) ProtoMessage() {} +func (*MigrateCreateRequest) ProtoMessage() {} -func (x *RestoreFromBackupRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[125] +func (x *MigrateCreateRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[116] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7289,68 +7522,160 @@ func (x *RestoreFromBackupRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RestoreFromBackupRequest.ProtoReflect.Descriptor instead. -func (*RestoreFromBackupRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{125} +// Deprecated: Use MigrateCreateRequest.ProtoReflect.Descriptor instead. +func (*MigrateCreateRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{116} } -func (x *RestoreFromBackupRequest) GetTabletAlias() *topodata.TabletAlias { +func (x *MigrateCreateRequest) GetWorkflow() string { if x != nil { - return x.TabletAlias + return x.Workflow + } + return "" +} + +func (x *MigrateCreateRequest) GetSourceKeyspace() string { + if x != nil { + return x.SourceKeyspace + } + return "" +} + +func (x *MigrateCreateRequest) GetTargetKeyspace() string { + if x != nil { + return x.TargetKeyspace + } + return "" +} + +func (x *MigrateCreateRequest) GetMountName() string { + if x != nil { + return x.MountName + } + return "" +} + +func (x *MigrateCreateRequest) GetCells() []string { + if x != nil { + return x.Cells } return nil } -func (x *RestoreFromBackupRequest) GetBackupTime() *vttime.Time { +func (x *MigrateCreateRequest) GetTabletTypes() []topodata.TabletType { if x != nil { - return x.BackupTime + return x.TabletTypes } return nil } -func (x *RestoreFromBackupRequest) GetRestoreToPos() string { +func (x *MigrateCreateRequest) GetTabletSelectionPreference() tabletmanagerdata.TabletSelectionPreference { if x != nil { - return x.RestoreToPos + return x.TabletSelectionPreference + } + return tabletmanagerdata.TabletSelectionPreference(0) +} + +func (x *MigrateCreateRequest) GetAllTables() bool { + if x != nil { + return x.AllTables + } + return false +} + +func (x *MigrateCreateRequest) GetIncludeTables() []string { + if x != nil { + return x.IncludeTables + } + return nil +} + +func (x *MigrateCreateRequest) GetExcludeTables() []string { + if x != nil { + return x.ExcludeTables + } + return nil +} + +func (x *MigrateCreateRequest) GetSourceTimeZone() string { + if x != nil { + return x.SourceTimeZone } return "" } -func (x *RestoreFromBackupRequest) GetDryRun() bool { +func (x *MigrateCreateRequest) GetOnDdl() string { if x != nil { - return x.DryRun + return x.OnDdl + } + return "" +} + +func (x *MigrateCreateRequest) GetStopAfterCopy() bool { + if x != nil { + return x.StopAfterCopy } return false } -type RestoreFromBackupResponse struct { +func (x *MigrateCreateRequest) GetDropForeignKeys() bool { + if x != nil { + return x.DropForeignKeys + } + return false +} + +func (x *MigrateCreateRequest) GetDeferSecondaryKeys() bool { + if x != nil { + return x.DeferSecondaryKeys + } + return false +} + +func (x *MigrateCreateRequest) GetAutoStart() bool { + if x != nil { + return x.AutoStart + } + return false +} + +func (x *MigrateCreateRequest) GetNoRoutingRules() bool { + if x != nil { + return x.NoRoutingRules + } + return false +} + +type MigrateCompleteRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // TabletAlias is the alias of the tablet doing the restore. - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` - Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,3,opt,name=shard,proto3" json:"shard,omitempty"` - Event *logutil.Event `protobuf:"bytes,4,opt,name=event,proto3" json:"event,omitempty"` + Workflow string `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"` + TargetKeyspace string `protobuf:"bytes,3,opt,name=target_keyspace,json=targetKeyspace,proto3" json:"target_keyspace,omitempty"` + KeepData bool `protobuf:"varint,4,opt,name=keep_data,json=keepData,proto3" json:"keep_data,omitempty"` + KeepRoutingRules bool `protobuf:"varint,5,opt,name=keep_routing_rules,json=keepRoutingRules,proto3" json:"keep_routing_rules,omitempty"` + RenameTables bool `protobuf:"varint,6,opt,name=rename_tables,json=renameTables,proto3" json:"rename_tables,omitempty"` + DryRun bool `protobuf:"varint,7,opt,name=dry_run,json=dryRun,proto3" json:"dry_run,omitempty"` } -func (x *RestoreFromBackupResponse) Reset() { - *x = RestoreFromBackupResponse{} +func (x *MigrateCompleteRequest) Reset() { + *x = MigrateCompleteRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[126] + mi := &file_vtctldata_proto_msgTypes[117] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *RestoreFromBackupResponse) String() string { +func (x *MigrateCompleteRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RestoreFromBackupResponse) ProtoMessage() {} +func (*MigrateCompleteRequest) ProtoMessage() {} -func (x *RestoreFromBackupResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[126] +func (x *MigrateCompleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[117] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7361,64 +7686,79 @@ func (x *RestoreFromBackupResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RestoreFromBackupResponse.ProtoReflect.Descriptor instead. -func (*RestoreFromBackupResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{126} +// Deprecated: Use MigrateCompleteRequest.ProtoReflect.Descriptor instead. +func (*MigrateCompleteRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{117} } -func (x *RestoreFromBackupResponse) GetTabletAlias() *topodata.TabletAlias { +func (x *MigrateCompleteRequest) GetWorkflow() string { if x != nil { - return x.TabletAlias + return x.Workflow } - return nil + return "" } -func (x *RestoreFromBackupResponse) GetKeyspace() string { +func (x *MigrateCompleteRequest) GetTargetKeyspace() string { if x != nil { - return x.Keyspace + return x.TargetKeyspace } return "" } -func (x *RestoreFromBackupResponse) GetShard() string { +func (x *MigrateCompleteRequest) GetKeepData() bool { if x != nil { - return x.Shard + return x.KeepData } - return "" + return false } -func (x *RestoreFromBackupResponse) GetEvent() *logutil.Event { +func (x *MigrateCompleteRequest) GetKeepRoutingRules() bool { if x != nil { - return x.Event + return x.KeepRoutingRules } - return nil + return false } -type RunHealthCheckRequest struct { +func (x *MigrateCompleteRequest) GetRenameTables() bool { + if x != nil { + return x.RenameTables + } + return false +} + +func (x *MigrateCompleteRequest) GetDryRun() bool { + if x != nil { + return x.DryRun + } + return false +} + +type MigrateCompleteResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + Summary string `protobuf:"bytes,1,opt,name=summary,proto3" json:"summary,omitempty"` + DryRunResults []string `protobuf:"bytes,2,rep,name=dry_run_results,json=dryRunResults,proto3" json:"dry_run_results,omitempty"` } -func (x *RunHealthCheckRequest) Reset() { - *x = RunHealthCheckRequest{} +func (x *MigrateCompleteResponse) Reset() { + *x = MigrateCompleteResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[127] + mi := &file_vtctldata_proto_msgTypes[118] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *RunHealthCheckRequest) String() string { +func (x *MigrateCompleteResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RunHealthCheckRequest) ProtoMessage() {} +func (*MigrateCompleteResponse) ProtoMessage() {} -func (x *RunHealthCheckRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[127] +func (x *MigrateCompleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[118] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7429,41 +7769,53 @@ func (x *RunHealthCheckRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RunHealthCheckRequest.ProtoReflect.Descriptor instead. -func (*RunHealthCheckRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{127} +// Deprecated: Use MigrateCompleteResponse.ProtoReflect.Descriptor instead. +func (*MigrateCompleteResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{118} } -func (x *RunHealthCheckRequest) GetTabletAlias() *topodata.TabletAlias { +func (x *MigrateCompleteResponse) GetSummary() string { if x != nil { - return x.TabletAlias + return x.Summary + } + return "" +} + +func (x *MigrateCompleteResponse) GetDryRunResults() []string { + if x != nil { + return x.DryRunResults } return nil } -type RunHealthCheckResponse struct { +type MountRegisterRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + TopoType string `protobuf:"bytes,1,opt,name=topo_type,json=topoType,proto3" json:"topo_type,omitempty"` + TopoServer string `protobuf:"bytes,2,opt,name=topo_server,json=topoServer,proto3" json:"topo_server,omitempty"` + TopoRoot string `protobuf:"bytes,3,opt,name=topo_root,json=topoRoot,proto3" json:"topo_root,omitempty"` + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` } -func (x *RunHealthCheckResponse) Reset() { - *x = RunHealthCheckResponse{} +func (x *MountRegisterRequest) Reset() { + *x = MountRegisterRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[128] + mi := &file_vtctldata_proto_msgTypes[119] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *RunHealthCheckResponse) String() string { +func (x *MountRegisterRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RunHealthCheckResponse) ProtoMessage() {} +func (*MountRegisterRequest) ProtoMessage() {} -func (x *RunHealthCheckResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[128] +func (x *MountRegisterRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[119] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7474,92 +7826,62 @@ func (x *RunHealthCheckResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RunHealthCheckResponse.ProtoReflect.Descriptor instead. -func (*RunHealthCheckResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{128} -} - -type SetKeyspaceDurabilityPolicyRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - DurabilityPolicy string `protobuf:"bytes,2,opt,name=durability_policy,json=durabilityPolicy,proto3" json:"durability_policy,omitempty"` +// Deprecated: Use MountRegisterRequest.ProtoReflect.Descriptor instead. +func (*MountRegisterRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{119} } -func (x *SetKeyspaceDurabilityPolicyRequest) Reset() { - *x = SetKeyspaceDurabilityPolicyRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[129] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *MountRegisterRequest) GetTopoType() string { + if x != nil { + return x.TopoType } + return "" } -func (x *SetKeyspaceDurabilityPolicyRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetKeyspaceDurabilityPolicyRequest) ProtoMessage() {} - -func (x *SetKeyspaceDurabilityPolicyRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[129] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *MountRegisterRequest) GetTopoServer() string { + if x != nil { + return x.TopoServer } - return mi.MessageOf(x) -} - -// Deprecated: Use SetKeyspaceDurabilityPolicyRequest.ProtoReflect.Descriptor instead. -func (*SetKeyspaceDurabilityPolicyRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{129} + return "" } -func (x *SetKeyspaceDurabilityPolicyRequest) GetKeyspace() string { +func (x *MountRegisterRequest) GetTopoRoot() string { if x != nil { - return x.Keyspace + return x.TopoRoot } return "" } -func (x *SetKeyspaceDurabilityPolicyRequest) GetDurabilityPolicy() string { +func (x *MountRegisterRequest) GetName() string { if x != nil { - return x.DurabilityPolicy + return x.Name } return "" } -type SetKeyspaceDurabilityPolicyResponse struct { +type MountRegisterResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - // Keyspace is the updated keyspace record. - Keyspace *topodata.Keyspace `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` } -func (x *SetKeyspaceDurabilityPolicyResponse) Reset() { - *x = SetKeyspaceDurabilityPolicyResponse{} +func (x *MountRegisterResponse) Reset() { + *x = MountRegisterResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[130] + mi := &file_vtctldata_proto_msgTypes[120] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *SetKeyspaceDurabilityPolicyResponse) String() string { +func (x *MountRegisterResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SetKeyspaceDurabilityPolicyResponse) ProtoMessage() {} +func (*MountRegisterResponse) ProtoMessage() {} -func (x *SetKeyspaceDurabilityPolicyResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[130] +func (x *MountRegisterResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[120] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7570,47 +7892,36 @@ func (x *SetKeyspaceDurabilityPolicyResponse) ProtoReflect() protoreflect.Messag return mi.MessageOf(x) } -// Deprecated: Use SetKeyspaceDurabilityPolicyResponse.ProtoReflect.Descriptor instead. -func (*SetKeyspaceDurabilityPolicyResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{130} -} - -func (x *SetKeyspaceDurabilityPolicyResponse) GetKeyspace() *topodata.Keyspace { - if x != nil { - return x.Keyspace - } - return nil +// Deprecated: Use MountRegisterResponse.ProtoReflect.Descriptor instead. +func (*MountRegisterResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{120} } -type SetKeyspaceServedFromRequest struct { +type MountUnregisterRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - TabletType topodata.TabletType `protobuf:"varint,2,opt,name=tablet_type,json=tabletType,proto3,enum=topodata.TabletType" json:"tablet_type,omitempty"` - Cells []string `protobuf:"bytes,3,rep,name=cells,proto3" json:"cells,omitempty"` - Remove bool `protobuf:"varint,4,opt,name=remove,proto3" json:"remove,omitempty"` - SourceKeyspace string `protobuf:"bytes,5,opt,name=source_keyspace,json=sourceKeyspace,proto3" json:"source_keyspace,omitempty"` + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` } -func (x *SetKeyspaceServedFromRequest) Reset() { - *x = SetKeyspaceServedFromRequest{} +func (x *MountUnregisterRequest) Reset() { + *x = MountUnregisterRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[131] + mi := &file_vtctldata_proto_msgTypes[121] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *SetKeyspaceServedFromRequest) String() string { +func (x *MountUnregisterRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SetKeyspaceServedFromRequest) ProtoMessage() {} +func (*MountUnregisterRequest) ProtoMessage() {} -func (x *SetKeyspaceServedFromRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[131] +func (x *MountUnregisterRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[121] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7621,72 +7932,81 @@ func (x *SetKeyspaceServedFromRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SetKeyspaceServedFromRequest.ProtoReflect.Descriptor instead. -func (*SetKeyspaceServedFromRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{131} +// Deprecated: Use MountUnregisterRequest.ProtoReflect.Descriptor instead. +func (*MountUnregisterRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{121} } -func (x *SetKeyspaceServedFromRequest) GetKeyspace() string { +func (x *MountUnregisterRequest) GetName() string { if x != nil { - return x.Keyspace + return x.Name } return "" } -func (x *SetKeyspaceServedFromRequest) GetTabletType() topodata.TabletType { - if x != nil { - return x.TabletType - } - return topodata.TabletType(0) +type MountUnregisterResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (x *SetKeyspaceServedFromRequest) GetCells() []string { - if x != nil { - return x.Cells +func (x *MountUnregisterResponse) Reset() { + *x = MountUnregisterResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[122] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -func (x *SetKeyspaceServedFromRequest) GetRemove() bool { - if x != nil { - return x.Remove - } - return false +func (x *MountUnregisterResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (x *SetKeyspaceServedFromRequest) GetSourceKeyspace() string { - if x != nil { - return x.SourceKeyspace +func (*MountUnregisterResponse) ProtoMessage() {} + +func (x *MountUnregisterResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[122] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -type SetKeyspaceServedFromResponse struct { +// Deprecated: Use MountUnregisterResponse.ProtoReflect.Descriptor instead. +func (*MountUnregisterResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{122} +} + +type MountShowRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Keyspace is the updated keyspace record. - Keyspace *topodata.Keyspace `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` } -func (x *SetKeyspaceServedFromResponse) Reset() { - *x = SetKeyspaceServedFromResponse{} +func (x *MountShowRequest) Reset() { + *x = MountShowRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[132] + mi := &file_vtctldata_proto_msgTypes[123] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *SetKeyspaceServedFromResponse) String() string { +func (x *MountShowRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SetKeyspaceServedFromResponse) ProtoMessage() {} +func (*MountShowRequest) ProtoMessage() {} -func (x *SetKeyspaceServedFromResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[132] +func (x *MountShowRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[123] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7697,44 +8017,46 @@ func (x *SetKeyspaceServedFromResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SetKeyspaceServedFromResponse.ProtoReflect.Descriptor instead. -func (*SetKeyspaceServedFromResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{132} +// Deprecated: Use MountShowRequest.ProtoReflect.Descriptor instead. +func (*MountShowRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{123} } -func (x *SetKeyspaceServedFromResponse) GetKeyspace() *topodata.Keyspace { +func (x *MountShowRequest) GetName() string { if x != nil { - return x.Keyspace + return x.Name } - return nil + return "" } -type SetKeyspaceShardingInfoRequest struct { +type MountShowResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Force bool `protobuf:"varint,4,opt,name=force,proto3" json:"force,omitempty"` + TopoType string `protobuf:"bytes,1,opt,name=topo_type,json=topoType,proto3" json:"topo_type,omitempty"` + TopoServer string `protobuf:"bytes,2,opt,name=topo_server,json=topoServer,proto3" json:"topo_server,omitempty"` + TopoRoot string `protobuf:"bytes,3,opt,name=topo_root,json=topoRoot,proto3" json:"topo_root,omitempty"` + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` } -func (x *SetKeyspaceShardingInfoRequest) Reset() { - *x = SetKeyspaceShardingInfoRequest{} +func (x *MountShowResponse) Reset() { + *x = MountShowResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[133] + mi := &file_vtctldata_proto_msgTypes[124] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *SetKeyspaceShardingInfoRequest) String() string { +func (x *MountShowResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SetKeyspaceShardingInfoRequest) ProtoMessage() {} +func (*MountShowResponse) ProtoMessage() {} -func (x *SetKeyspaceShardingInfoRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[133] +func (x *MountShowResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[124] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7745,51 +8067,62 @@ func (x *SetKeyspaceShardingInfoRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SetKeyspaceShardingInfoRequest.ProtoReflect.Descriptor instead. -func (*SetKeyspaceShardingInfoRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{133} +// Deprecated: Use MountShowResponse.ProtoReflect.Descriptor instead. +func (*MountShowResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{124} } -func (x *SetKeyspaceShardingInfoRequest) GetKeyspace() string { +func (x *MountShowResponse) GetTopoType() string { if x != nil { - return x.Keyspace + return x.TopoType } return "" } -func (x *SetKeyspaceShardingInfoRequest) GetForce() bool { +func (x *MountShowResponse) GetTopoServer() string { if x != nil { - return x.Force + return x.TopoServer } - return false + return "" } -type SetKeyspaceShardingInfoResponse struct { +func (x *MountShowResponse) GetTopoRoot() string { + if x != nil { + return x.TopoRoot + } + return "" +} + +func (x *MountShowResponse) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type MountListRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - // Keyspace is the updated keyspace record. - Keyspace *topodata.Keyspace `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` } -func (x *SetKeyspaceShardingInfoResponse) Reset() { - *x = SetKeyspaceShardingInfoResponse{} +func (x *MountListRequest) Reset() { + *x = MountListRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[134] + mi := &file_vtctldata_proto_msgTypes[125] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *SetKeyspaceShardingInfoResponse) String() string { +func (x *MountListRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SetKeyspaceShardingInfoResponse) ProtoMessage() {} +func (*MountListRequest) ProtoMessage() {} -func (x *SetKeyspaceShardingInfoResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[134] +func (x *MountListRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[125] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7800,45 +8133,36 @@ func (x *SetKeyspaceShardingInfoResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SetKeyspaceShardingInfoResponse.ProtoReflect.Descriptor instead. -func (*SetKeyspaceShardingInfoResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{134} -} - -func (x *SetKeyspaceShardingInfoResponse) GetKeyspace() *topodata.Keyspace { - if x != nil { - return x.Keyspace - } - return nil +// Deprecated: Use MountListRequest.ProtoReflect.Descriptor instead. +func (*MountListRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{125} } -type SetShardIsPrimaryServingRequest struct { +type MountListResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - IsServing bool `protobuf:"varint,3,opt,name=is_serving,json=isServing,proto3" json:"is_serving,omitempty"` + Names []string `protobuf:"bytes,1,rep,name=names,proto3" json:"names,omitempty"` } -func (x *SetShardIsPrimaryServingRequest) Reset() { - *x = SetShardIsPrimaryServingRequest{} +func (x *MountListResponse) Reset() { + *x = MountListResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[135] + mi := &file_vtctldata_proto_msgTypes[126] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *SetShardIsPrimaryServingRequest) String() string { +func (x *MountListResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SetShardIsPrimaryServingRequest) ProtoMessage() {} +func (*MountListResponse) ProtoMessage() {} -func (x *SetShardIsPrimaryServingRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[135] +func (x *MountListResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[126] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7849,58 +8173,72 @@ func (x *SetShardIsPrimaryServingRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SetShardIsPrimaryServingRequest.ProtoReflect.Descriptor instead. -func (*SetShardIsPrimaryServingRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{135} +// Deprecated: Use MountListResponse.ProtoReflect.Descriptor instead. +func (*MountListResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{126} } -func (x *SetShardIsPrimaryServingRequest) GetKeyspace() string { +func (x *MountListResponse) GetNames() []string { if x != nil { - return x.Keyspace + return x.Names } - return "" + return nil } -func (x *SetShardIsPrimaryServingRequest) GetShard() string { - if x != nil { - return x.Shard - } - return "" -} - -func (x *SetShardIsPrimaryServingRequest) GetIsServing() bool { - if x != nil { - return x.IsServing - } - return false -} - -type SetShardIsPrimaryServingResponse struct { +type MoveTablesCreateRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Shard is the updated shard record. - Shard *topodata.Shard `protobuf:"bytes,1,opt,name=shard,proto3" json:"shard,omitempty"` + // The necessary info gets passed on to each primary tablet involved + // in the workflow via the CreateVReplicationWorkflow tabletmanager RPC. + Workflow string `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"` + SourceKeyspace string `protobuf:"bytes,2,opt,name=source_keyspace,json=sourceKeyspace,proto3" json:"source_keyspace,omitempty"` + TargetKeyspace string `protobuf:"bytes,3,opt,name=target_keyspace,json=targetKeyspace,proto3" json:"target_keyspace,omitempty"` + Cells []string `protobuf:"bytes,4,rep,name=cells,proto3" json:"cells,omitempty"` + TabletTypes []topodata.TabletType `protobuf:"varint,5,rep,packed,name=tablet_types,json=tabletTypes,proto3,enum=topodata.TabletType" json:"tablet_types,omitempty"` + TabletSelectionPreference tabletmanagerdata.TabletSelectionPreference `protobuf:"varint,6,opt,name=tablet_selection_preference,json=tabletSelectionPreference,proto3,enum=tabletmanagerdata.TabletSelectionPreference" json:"tablet_selection_preference,omitempty"` + SourceShards []string `protobuf:"bytes,7,rep,name=source_shards,json=sourceShards,proto3" json:"source_shards,omitempty"` + AllTables bool `protobuf:"varint,8,opt,name=all_tables,json=allTables,proto3" json:"all_tables,omitempty"` + IncludeTables []string `protobuf:"bytes,9,rep,name=include_tables,json=includeTables,proto3" json:"include_tables,omitempty"` + ExcludeTables []string `protobuf:"bytes,10,rep,name=exclude_tables,json=excludeTables,proto3" json:"exclude_tables,omitempty"` + // The name of the external cluster mounted in topo server. + ExternalClusterName string `protobuf:"bytes,11,opt,name=external_cluster_name,json=externalClusterName,proto3" json:"external_cluster_name,omitempty"` + // SourceTimeZone is the time zone in which datetimes on the source were stored, provided as an option in MoveTables + SourceTimeZone string `protobuf:"bytes,12,opt,name=source_time_zone,json=sourceTimeZone,proto3" json:"source_time_zone,omitempty"` + // OnDdl specifies the action to be taken when a DDL is encountered. + OnDdl string `protobuf:"bytes,13,opt,name=on_ddl,json=onDdl,proto3" json:"on_ddl,omitempty"` + // StopAfterCopy specifies if vreplication should be stopped after copying. + StopAfterCopy bool `protobuf:"varint,14,opt,name=stop_after_copy,json=stopAfterCopy,proto3" json:"stop_after_copy,omitempty"` + // DropForeignKeys specifies if foreign key constraints should be elided on the target. + DropForeignKeys bool `protobuf:"varint,15,opt,name=drop_foreign_keys,json=dropForeignKeys,proto3" json:"drop_foreign_keys,omitempty"` + // DeferSecondaryKeys specifies if secondary keys should be created in one shot after table copy finishes. + DeferSecondaryKeys bool `protobuf:"varint,16,opt,name=defer_secondary_keys,json=deferSecondaryKeys,proto3" json:"defer_secondary_keys,omitempty"` + // Start the workflow after creating it. + AutoStart bool `protobuf:"varint,17,opt,name=auto_start,json=autoStart,proto3" json:"auto_start,omitempty"` + // NoRoutingRules is set to true if routing rules should not be created on the target when the workflow is created. + NoRoutingRules bool `protobuf:"varint,18,opt,name=no_routing_rules,json=noRoutingRules,proto3" json:"no_routing_rules,omitempty"` + // Run a single copy phase for the entire database. + AtomicCopy bool `protobuf:"varint,19,opt,name=atomic_copy,json=atomicCopy,proto3" json:"atomic_copy,omitempty"` } -func (x *SetShardIsPrimaryServingResponse) Reset() { - *x = SetShardIsPrimaryServingResponse{} +func (x *MoveTablesCreateRequest) Reset() { + *x = MoveTablesCreateRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[136] + mi := &file_vtctldata_proto_msgTypes[127] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *SetShardIsPrimaryServingResponse) String() string { +func (x *MoveTablesCreateRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SetShardIsPrimaryServingResponse) ProtoMessage() {} +func (*MoveTablesCreateRequest) ProtoMessage() {} -func (x *SetShardIsPrimaryServingResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[136] +func (x *MoveTablesCreateRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[127] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7911,153 +8249,170 @@ func (x *SetShardIsPrimaryServingResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SetShardIsPrimaryServingResponse.ProtoReflect.Descriptor instead. -func (*SetShardIsPrimaryServingResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{136} +// Deprecated: Use MoveTablesCreateRequest.ProtoReflect.Descriptor instead. +func (*MoveTablesCreateRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{127} } -func (x *SetShardIsPrimaryServingResponse) GetShard() *topodata.Shard { +func (x *MoveTablesCreateRequest) GetWorkflow() string { if x != nil { - return x.Shard + return x.Workflow + } + return "" +} + +func (x *MoveTablesCreateRequest) GetSourceKeyspace() string { + if x != nil { + return x.SourceKeyspace + } + return "" +} + +func (x *MoveTablesCreateRequest) GetTargetKeyspace() string { + if x != nil { + return x.TargetKeyspace + } + return "" +} + +func (x *MoveTablesCreateRequest) GetCells() []string { + if x != nil { + return x.Cells } return nil } -type SetShardTabletControlRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *MoveTablesCreateRequest) GetTabletTypes() []topodata.TabletType { + if x != nil { + return x.TabletTypes + } + return nil +} - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - TabletType topodata.TabletType `protobuf:"varint,3,opt,name=tablet_type,json=tabletType,proto3,enum=topodata.TabletType" json:"tablet_type,omitempty"` - Cells []string `protobuf:"bytes,4,rep,name=cells,proto3" json:"cells,omitempty"` - // DeniedTables updates the list of denied tables the shard will serve for - // the given tablet type. This is useful to fix tables that are being blocked - // after a MoveTables operation. - // - // NOTE: Setting this field will cause DisableQueryService to be ignored. - DeniedTables []string `protobuf:"bytes,5,rep,name=denied_tables,json=deniedTables,proto3" json:"denied_tables,omitempty"` - // DisableQueryService instructs whether to enable the query service on - // tablets of the given type in the shard. This is useful to fix Reshard - // operations gone awry. - // - // NOTE: this is ignored if DeniedTables is not empty. - DisableQueryService bool `protobuf:"varint,6,opt,name=disable_query_service,json=disableQueryService,proto3" json:"disable_query_service,omitempty"` - // Remove removes the ShardTabletControl record entirely. If set, this takes - // precedence over DeniedTables and DisableQueryService fields, and is useful - // to manually remove serving restrictions after a completed MoveTables - // operation. - Remove bool `protobuf:"varint,7,opt,name=remove,proto3" json:"remove,omitempty"` +func (x *MoveTablesCreateRequest) GetTabletSelectionPreference() tabletmanagerdata.TabletSelectionPreference { + if x != nil { + return x.TabletSelectionPreference + } + return tabletmanagerdata.TabletSelectionPreference(0) } -func (x *SetShardTabletControlRequest) Reset() { - *x = SetShardTabletControlRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[137] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *MoveTablesCreateRequest) GetSourceShards() []string { + if x != nil { + return x.SourceShards } + return nil } -func (x *SetShardTabletControlRequest) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *MoveTablesCreateRequest) GetAllTables() bool { + if x != nil { + return x.AllTables + } + return false } -func (*SetShardTabletControlRequest) ProtoMessage() {} +func (x *MoveTablesCreateRequest) GetIncludeTables() []string { + if x != nil { + return x.IncludeTables + } + return nil +} -func (x *SetShardTabletControlRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[137] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *MoveTablesCreateRequest) GetExcludeTables() []string { + if x != nil { + return x.ExcludeTables } - return mi.MessageOf(x) + return nil } -// Deprecated: Use SetShardTabletControlRequest.ProtoReflect.Descriptor instead. -func (*SetShardTabletControlRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{137} +func (x *MoveTablesCreateRequest) GetExternalClusterName() string { + if x != nil { + return x.ExternalClusterName + } + return "" } -func (x *SetShardTabletControlRequest) GetKeyspace() string { +func (x *MoveTablesCreateRequest) GetSourceTimeZone() string { if x != nil { - return x.Keyspace + return x.SourceTimeZone } return "" } -func (x *SetShardTabletControlRequest) GetShard() string { +func (x *MoveTablesCreateRequest) GetOnDdl() string { if x != nil { - return x.Shard + return x.OnDdl } return "" } -func (x *SetShardTabletControlRequest) GetTabletType() topodata.TabletType { +func (x *MoveTablesCreateRequest) GetStopAfterCopy() bool { if x != nil { - return x.TabletType + return x.StopAfterCopy } - return topodata.TabletType(0) + return false } -func (x *SetShardTabletControlRequest) GetCells() []string { +func (x *MoveTablesCreateRequest) GetDropForeignKeys() bool { if x != nil { - return x.Cells + return x.DropForeignKeys } - return nil + return false } -func (x *SetShardTabletControlRequest) GetDeniedTables() []string { +func (x *MoveTablesCreateRequest) GetDeferSecondaryKeys() bool { if x != nil { - return x.DeniedTables + return x.DeferSecondaryKeys } - return nil + return false } -func (x *SetShardTabletControlRequest) GetDisableQueryService() bool { +func (x *MoveTablesCreateRequest) GetAutoStart() bool { if x != nil { - return x.DisableQueryService + return x.AutoStart } return false } -func (x *SetShardTabletControlRequest) GetRemove() bool { +func (x *MoveTablesCreateRequest) GetNoRoutingRules() bool { if x != nil { - return x.Remove + return x.NoRoutingRules } return false } -type SetShardTabletControlResponse struct { +func (x *MoveTablesCreateRequest) GetAtomicCopy() bool { + if x != nil { + return x.AtomicCopy + } + return false +} + +type MoveTablesCreateResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Shard is the updated shard record. - Shard *topodata.Shard `protobuf:"bytes,1,opt,name=shard,proto3" json:"shard,omitempty"` + Summary string `protobuf:"bytes,1,opt,name=summary,proto3" json:"summary,omitempty"` + Details []*MoveTablesCreateResponse_TabletInfo `protobuf:"bytes,2,rep,name=details,proto3" json:"details,omitempty"` } -func (x *SetShardTabletControlResponse) Reset() { - *x = SetShardTabletControlResponse{} +func (x *MoveTablesCreateResponse) Reset() { + *x = MoveTablesCreateResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[138] + mi := &file_vtctldata_proto_msgTypes[128] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *SetShardTabletControlResponse) String() string { +func (x *MoveTablesCreateResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SetShardTabletControlResponse) ProtoMessage() {} +func (*MoveTablesCreateResponse) ProtoMessage() {} -func (x *SetShardTabletControlResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[138] +func (x *MoveTablesCreateResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[128] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8068,44 +8423,55 @@ func (x *SetShardTabletControlResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SetShardTabletControlResponse.ProtoReflect.Descriptor instead. -func (*SetShardTabletControlResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{138} +// Deprecated: Use MoveTablesCreateResponse.ProtoReflect.Descriptor instead. +func (*MoveTablesCreateResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{128} } -func (x *SetShardTabletControlResponse) GetShard() *topodata.Shard { +func (x *MoveTablesCreateResponse) GetSummary() string { if x != nil { - return x.Shard + return x.Summary + } + return "" +} + +func (x *MoveTablesCreateResponse) GetDetails() []*MoveTablesCreateResponse_TabletInfo { + if x != nil { + return x.Details } return nil } -type SetWritableRequest struct { +type MoveTablesCompleteRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` - Writable bool `protobuf:"varint,2,opt,name=writable,proto3" json:"writable,omitempty"` + Workflow string `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"` + TargetKeyspace string `protobuf:"bytes,3,opt,name=target_keyspace,json=targetKeyspace,proto3" json:"target_keyspace,omitempty"` + KeepData bool `protobuf:"varint,4,opt,name=keep_data,json=keepData,proto3" json:"keep_data,omitempty"` + KeepRoutingRules bool `protobuf:"varint,5,opt,name=keep_routing_rules,json=keepRoutingRules,proto3" json:"keep_routing_rules,omitempty"` + RenameTables bool `protobuf:"varint,6,opt,name=rename_tables,json=renameTables,proto3" json:"rename_tables,omitempty"` + DryRun bool `protobuf:"varint,7,opt,name=dry_run,json=dryRun,proto3" json:"dry_run,omitempty"` } -func (x *SetWritableRequest) Reset() { - *x = SetWritableRequest{} +func (x *MoveTablesCompleteRequest) Reset() { + *x = MoveTablesCompleteRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[139] + mi := &file_vtctldata_proto_msgTypes[129] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *SetWritableRequest) String() string { +func (x *MoveTablesCompleteRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SetWritableRequest) ProtoMessage() {} +func (*MoveTablesCompleteRequest) ProtoMessage() {} -func (x *SetWritableRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[139] +func (x *MoveTablesCompleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[129] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8116,90 +8482,79 @@ func (x *SetWritableRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SetWritableRequest.ProtoReflect.Descriptor instead. -func (*SetWritableRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{139} +// Deprecated: Use MoveTablesCompleteRequest.ProtoReflect.Descriptor instead. +func (*MoveTablesCompleteRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{129} } -func (x *SetWritableRequest) GetTabletAlias() *topodata.TabletAlias { +func (x *MoveTablesCompleteRequest) GetWorkflow() string { if x != nil { - return x.TabletAlias + return x.Workflow } - return nil + return "" } -func (x *SetWritableRequest) GetWritable() bool { +func (x *MoveTablesCompleteRequest) GetTargetKeyspace() string { if x != nil { - return x.Writable + return x.TargetKeyspace } - return false -} - -type SetWritableResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields + return "" } -func (x *SetWritableResponse) Reset() { - *x = SetWritableResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[140] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *MoveTablesCompleteRequest) GetKeepData() bool { + if x != nil { + return x.KeepData } + return false } -func (x *SetWritableResponse) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *MoveTablesCompleteRequest) GetKeepRoutingRules() bool { + if x != nil { + return x.KeepRoutingRules + } + return false } -func (*SetWritableResponse) ProtoMessage() {} - -func (x *SetWritableResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[140] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *MoveTablesCompleteRequest) GetRenameTables() bool { + if x != nil { + return x.RenameTables } - return mi.MessageOf(x) + return false } -// Deprecated: Use SetWritableResponse.ProtoReflect.Descriptor instead. -func (*SetWritableResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{140} +func (x *MoveTablesCompleteRequest) GetDryRun() bool { + if x != nil { + return x.DryRun + } + return false } -type ShardReplicationAddRequest struct { +type MoveTablesCompleteResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - TabletAlias *topodata.TabletAlias `protobuf:"bytes,3,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + Summary string `protobuf:"bytes,1,opt,name=summary,proto3" json:"summary,omitempty"` + DryRunResults []string `protobuf:"bytes,2,rep,name=dry_run_results,json=dryRunResults,proto3" json:"dry_run_results,omitempty"` } -func (x *ShardReplicationAddRequest) Reset() { - *x = ShardReplicationAddRequest{} +func (x *MoveTablesCompleteResponse) Reset() { + *x = MoveTablesCompleteResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[141] + mi := &file_vtctldata_proto_msgTypes[130] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ShardReplicationAddRequest) String() string { +func (x *MoveTablesCompleteResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ShardReplicationAddRequest) ProtoMessage() {} +func (*MoveTablesCompleteResponse) ProtoMessage() {} -func (x *ShardReplicationAddRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[141] +func (x *MoveTablesCompleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[130] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8210,55 +8565,50 @@ func (x *ShardReplicationAddRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ShardReplicationAddRequest.ProtoReflect.Descriptor instead. -func (*ShardReplicationAddRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{141} +// Deprecated: Use MoveTablesCompleteResponse.ProtoReflect.Descriptor instead. +func (*MoveTablesCompleteResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{130} } -func (x *ShardReplicationAddRequest) GetKeyspace() string { +func (x *MoveTablesCompleteResponse) GetSummary() string { if x != nil { - return x.Keyspace + return x.Summary } return "" } -func (x *ShardReplicationAddRequest) GetShard() string { - if x != nil { - return x.Shard - } - return "" -} - -func (x *ShardReplicationAddRequest) GetTabletAlias() *topodata.TabletAlias { +func (x *MoveTablesCompleteResponse) GetDryRunResults() []string { if x != nil { - return x.TabletAlias + return x.DryRunResults } return nil } -type ShardReplicationAddResponse struct { +type PingTabletRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` } -func (x *ShardReplicationAddResponse) Reset() { - *x = ShardReplicationAddResponse{} +func (x *PingTabletRequest) Reset() { + *x = PingTabletRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[142] + mi := &file_vtctldata_proto_msgTypes[131] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ShardReplicationAddResponse) String() string { +func (x *PingTabletRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ShardReplicationAddResponse) ProtoMessage() {} +func (*PingTabletRequest) ProtoMessage() {} -func (x *ShardReplicationAddResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[142] +func (x *PingTabletRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[131] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8269,38 +8619,41 @@ func (x *ShardReplicationAddResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ShardReplicationAddResponse.ProtoReflect.Descriptor instead. -func (*ShardReplicationAddResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{142} +// Deprecated: Use PingTabletRequest.ProtoReflect.Descriptor instead. +func (*PingTabletRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{131} } -type ShardReplicationFixRequest struct { +func (x *PingTabletRequest) GetTabletAlias() *topodata.TabletAlias { + if x != nil { + return x.TabletAlias + } + return nil +} + +type PingTabletResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - Cell string `protobuf:"bytes,3,opt,name=cell,proto3" json:"cell,omitempty"` } -func (x *ShardReplicationFixRequest) Reset() { - *x = ShardReplicationFixRequest{} +func (x *PingTabletResponse) Reset() { + *x = PingTabletResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[143] + mi := &file_vtctldata_proto_msgTypes[132] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ShardReplicationFixRequest) String() string { +func (x *PingTabletResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ShardReplicationFixRequest) ProtoMessage() {} +func (*PingTabletResponse) ProtoMessage() {} -func (x *ShardReplicationFixRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[143] +func (x *PingTabletResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[132] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8311,60 +8664,57 @@ func (x *ShardReplicationFixRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ShardReplicationFixRequest.ProtoReflect.Descriptor instead. -func (*ShardReplicationFixRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{143} -} - -func (x *ShardReplicationFixRequest) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" -} - -func (x *ShardReplicationFixRequest) GetShard() string { - if x != nil { - return x.Shard - } - return "" -} - -func (x *ShardReplicationFixRequest) GetCell() string { - if x != nil { - return x.Cell - } - return "" +// Deprecated: Use PingTabletResponse.ProtoReflect.Descriptor instead. +func (*PingTabletResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{132} } -type ShardReplicationFixResponse struct { +type PlannedReparentShardRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Error contains information about the error fixed by a - // ShardReplicationFix RPC. If there were no errors to fix (i.e. all nodes - // in the replication graph are valid), this field is nil. - Error *topodata.ShardReplicationError `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + // Keyspace is the name of the keyspace to perform the Planned Reparent in. + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // Shard is the name of the shard to perform teh Planned Reparent in. + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + // NewPrimary is the alias of the tablet to promote to shard primary. If not + // specified, the vtctld will select the most up-to-date candidate to promote. + // + // It is an error to set NewPrimary and AvoidPrimary to the same alias. + NewPrimary *topodata.TabletAlias `protobuf:"bytes,3,opt,name=new_primary,json=newPrimary,proto3" json:"new_primary,omitempty"` + // AvoidPrimary is the alias of the tablet to demote. In other words, + // specifying an AvoidPrimary alias tells the vtctld to promote any replica + // other than this one. A shard whose current primary is not this one is then + // a no-op. + // + // It is an error to set NewPrimary and AvoidPrimary to the same alias. + AvoidPrimary *topodata.TabletAlias `protobuf:"bytes,4,opt,name=avoid_primary,json=avoidPrimary,proto3" json:"avoid_primary,omitempty"` + // WaitReplicasTimeout is the duration of time to wait for replicas to catch + // up in replication both before and after the reparent. The timeout is not + // cumulative across both wait periods, meaning that the replicas have + // WaitReplicasTimeout time to catch up before the reparent, and an additional + // WaitReplicasTimeout time to catch up after the reparent. + WaitReplicasTimeout *vttime.Duration `protobuf:"bytes,5,opt,name=wait_replicas_timeout,json=waitReplicasTimeout,proto3" json:"wait_replicas_timeout,omitempty"` } -func (x *ShardReplicationFixResponse) Reset() { - *x = ShardReplicationFixResponse{} +func (x *PlannedReparentShardRequest) Reset() { + *x = PlannedReparentShardRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[144] + mi := &file_vtctldata_proto_msgTypes[133] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ShardReplicationFixResponse) String() string { +func (x *PlannedReparentShardRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ShardReplicationFixResponse) ProtoMessage() {} +func (*PlannedReparentShardRequest) ProtoMessage() {} -func (x *ShardReplicationFixResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[144] +func (x *PlannedReparentShardRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[133] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8375,44 +8725,80 @@ func (x *ShardReplicationFixResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ShardReplicationFixResponse.ProtoReflect.Descriptor instead. -func (*ShardReplicationFixResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{144} +// Deprecated: Use PlannedReparentShardRequest.ProtoReflect.Descriptor instead. +func (*PlannedReparentShardRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{133} } -func (x *ShardReplicationFixResponse) GetError() *topodata.ShardReplicationError { +func (x *PlannedReparentShardRequest) GetKeyspace() string { if x != nil { - return x.Error + return x.Keyspace + } + return "" +} + +func (x *PlannedReparentShardRequest) GetShard() string { + if x != nil { + return x.Shard + } + return "" +} + +func (x *PlannedReparentShardRequest) GetNewPrimary() *topodata.TabletAlias { + if x != nil { + return x.NewPrimary } return nil } -type ShardReplicationPositionsRequest struct { +func (x *PlannedReparentShardRequest) GetAvoidPrimary() *topodata.TabletAlias { + if x != nil { + return x.AvoidPrimary + } + return nil +} + +func (x *PlannedReparentShardRequest) GetWaitReplicasTimeout() *vttime.Duration { + if x != nil { + return x.WaitReplicasTimeout + } + return nil +} + +type PlannedReparentShardResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // Keyspace is the name of the keyspace the Planned Reparent took place in. Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + // Shard is the name of the shard the Planned Reparent took place in. + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + // PromotedPrimary is the alias of the tablet that was promoted to shard + // primary. If NewPrimary was set in the request, then this will be the same + // alias. Otherwise, it will be the alias of the tablet found to be most + // up-to-date. + PromotedPrimary *topodata.TabletAlias `protobuf:"bytes,3,opt,name=promoted_primary,json=promotedPrimary,proto3" json:"promoted_primary,omitempty"` + Events []*logutil.Event `protobuf:"bytes,4,rep,name=events,proto3" json:"events,omitempty"` } -func (x *ShardReplicationPositionsRequest) Reset() { - *x = ShardReplicationPositionsRequest{} +func (x *PlannedReparentShardResponse) Reset() { + *x = PlannedReparentShardResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[145] + mi := &file_vtctldata_proto_msgTypes[134] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ShardReplicationPositionsRequest) String() string { +func (x *PlannedReparentShardResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ShardReplicationPositionsRequest) ProtoMessage() {} +func (*PlannedReparentShardResponse) ProtoMessage() {} -func (x *ShardReplicationPositionsRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[145] +func (x *PlannedReparentShardResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[134] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8423,55 +8809,68 @@ func (x *ShardReplicationPositionsRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ShardReplicationPositionsRequest.ProtoReflect.Descriptor instead. -func (*ShardReplicationPositionsRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{145} +// Deprecated: Use PlannedReparentShardResponse.ProtoReflect.Descriptor instead. +func (*PlannedReparentShardResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{134} } -func (x *ShardReplicationPositionsRequest) GetKeyspace() string { +func (x *PlannedReparentShardResponse) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -func (x *ShardReplicationPositionsRequest) GetShard() string { +func (x *PlannedReparentShardResponse) GetShard() string { if x != nil { return x.Shard } return "" } -type ShardReplicationPositionsResponse struct { +func (x *PlannedReparentShardResponse) GetPromotedPrimary() *topodata.TabletAlias { + if x != nil { + return x.PromotedPrimary + } + return nil +} + +func (x *PlannedReparentShardResponse) GetEvents() []*logutil.Event { + if x != nil { + return x.Events + } + return nil +} + +type RebuildKeyspaceGraphRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // ReplicationStatuses is a mapping of tablet alias string to replication - // status for that tablet. - ReplicationStatuses map[string]*replicationdata.Status `protobuf:"bytes,1,rep,name=replication_statuses,json=replicationStatuses,proto3" json:"replication_statuses,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // TabletMap is the set of tablets whose replication statuses were queried, - // keyed by tablet alias. - TabletMap map[string]*topodata.Tablet `protobuf:"bytes,2,rep,name=tablet_map,json=tabletMap,proto3" json:"tablet_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Cells []string `protobuf:"bytes,2,rep,name=cells,proto3" json:"cells,omitempty"` + // AllowPartial, when set, allows a SNAPSHOT keyspace to serve with an + // incomplete set of shards. It is ignored for all other keyspace types. + AllowPartial bool `protobuf:"varint,3,opt,name=allow_partial,json=allowPartial,proto3" json:"allow_partial,omitempty"` } -func (x *ShardReplicationPositionsResponse) Reset() { - *x = ShardReplicationPositionsResponse{} +func (x *RebuildKeyspaceGraphRequest) Reset() { + *x = RebuildKeyspaceGraphRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[146] + mi := &file_vtctldata_proto_msgTypes[135] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ShardReplicationPositionsResponse) String() string { +func (x *RebuildKeyspaceGraphRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ShardReplicationPositionsResponse) ProtoMessage() {} +func (*RebuildKeyspaceGraphRequest) ProtoMessage() {} -func (x *ShardReplicationPositionsResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[146] +func (x *RebuildKeyspaceGraphRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[135] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8482,52 +8881,55 @@ func (x *ShardReplicationPositionsResponse) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use ShardReplicationPositionsResponse.ProtoReflect.Descriptor instead. -func (*ShardReplicationPositionsResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{146} +// Deprecated: Use RebuildKeyspaceGraphRequest.ProtoReflect.Descriptor instead. +func (*RebuildKeyspaceGraphRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{135} } -func (x *ShardReplicationPositionsResponse) GetReplicationStatuses() map[string]*replicationdata.Status { +func (x *RebuildKeyspaceGraphRequest) GetKeyspace() string { if x != nil { - return x.ReplicationStatuses + return x.Keyspace } - return nil + return "" } -func (x *ShardReplicationPositionsResponse) GetTabletMap() map[string]*topodata.Tablet { +func (x *RebuildKeyspaceGraphRequest) GetCells() []string { if x != nil { - return x.TabletMap + return x.Cells } return nil } -type ShardReplicationRemoveRequest struct { +func (x *RebuildKeyspaceGraphRequest) GetAllowPartial() bool { + if x != nil { + return x.AllowPartial + } + return false +} + +type RebuildKeyspaceGraphResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - TabletAlias *topodata.TabletAlias `protobuf:"bytes,3,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` } -func (x *ShardReplicationRemoveRequest) Reset() { - *x = ShardReplicationRemoveRequest{} +func (x *RebuildKeyspaceGraphResponse) Reset() { + *x = RebuildKeyspaceGraphResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[147] + mi := &file_vtctldata_proto_msgTypes[136] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ShardReplicationRemoveRequest) String() string { +func (x *RebuildKeyspaceGraphResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ShardReplicationRemoveRequest) ProtoMessage() {} +func (*RebuildKeyspaceGraphResponse) ProtoMessage() {} -func (x *ShardReplicationRemoveRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[147] +func (x *RebuildKeyspaceGraphResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[136] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8538,55 +8940,83 @@ func (x *ShardReplicationRemoveRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ShardReplicationRemoveRequest.ProtoReflect.Descriptor instead. -func (*ShardReplicationRemoveRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{147} +// Deprecated: Use RebuildKeyspaceGraphResponse.ProtoReflect.Descriptor instead. +func (*RebuildKeyspaceGraphResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{136} } -func (x *ShardReplicationRemoveRequest) GetKeyspace() string { - if x != nil { - return x.Keyspace +type RebuildVSchemaGraphRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Cells specifies the cells to rebuild the SrvVSchema objects for. If empty, + // RebuildVSchemaGraph rebuilds the SrvVSchema for every cell in the topo. + Cells []string `protobuf:"bytes,1,rep,name=cells,proto3" json:"cells,omitempty"` +} + +func (x *RebuildVSchemaGraphRequest) Reset() { + *x = RebuildVSchemaGraphRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[137] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return "" } -func (x *ShardReplicationRemoveRequest) GetShard() string { - if x != nil { - return x.Shard +func (x *RebuildVSchemaGraphRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RebuildVSchemaGraphRequest) ProtoMessage() {} + +func (x *RebuildVSchemaGraphRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[137] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -func (x *ShardReplicationRemoveRequest) GetTabletAlias() *topodata.TabletAlias { +// Deprecated: Use RebuildVSchemaGraphRequest.ProtoReflect.Descriptor instead. +func (*RebuildVSchemaGraphRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{137} +} + +func (x *RebuildVSchemaGraphRequest) GetCells() []string { if x != nil { - return x.TabletAlias + return x.Cells } return nil } -type ShardReplicationRemoveResponse struct { +type RebuildVSchemaGraphResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } -func (x *ShardReplicationRemoveResponse) Reset() { - *x = ShardReplicationRemoveResponse{} +func (x *RebuildVSchemaGraphResponse) Reset() { + *x = RebuildVSchemaGraphResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[148] + mi := &file_vtctldata_proto_msgTypes[138] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ShardReplicationRemoveResponse) String() string { +func (x *RebuildVSchemaGraphResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ShardReplicationRemoveResponse) ProtoMessage() {} +func (*RebuildVSchemaGraphResponse) ProtoMessage() {} -func (x *ShardReplicationRemoveResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[148] +func (x *RebuildVSchemaGraphResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[138] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8597,37 +9027,36 @@ func (x *ShardReplicationRemoveResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ShardReplicationRemoveResponse.ProtoReflect.Descriptor instead. -func (*ShardReplicationRemoveResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{148} +// Deprecated: Use RebuildVSchemaGraphResponse.ProtoReflect.Descriptor instead. +func (*RebuildVSchemaGraphResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{138} } -type SleepTabletRequest struct { +type RefreshStateRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` - Duration *vttime.Duration `protobuf:"bytes,2,opt,name=duration,proto3" json:"duration,omitempty"` } -func (x *SleepTabletRequest) Reset() { - *x = SleepTabletRequest{} +func (x *RefreshStateRequest) Reset() { + *x = RefreshStateRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[149] + mi := &file_vtctldata_proto_msgTypes[139] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *SleepTabletRequest) String() string { +func (x *RefreshStateRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SleepTabletRequest) ProtoMessage() {} +func (*RefreshStateRequest) ProtoMessage() {} -func (x *SleepTabletRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[149] +func (x *RefreshStateRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[139] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8638,48 +9067,41 @@ func (x *SleepTabletRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SleepTabletRequest.ProtoReflect.Descriptor instead. -func (*SleepTabletRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{149} +// Deprecated: Use RefreshStateRequest.ProtoReflect.Descriptor instead. +func (*RefreshStateRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{139} } -func (x *SleepTabletRequest) GetTabletAlias() *topodata.TabletAlias { +func (x *RefreshStateRequest) GetTabletAlias() *topodata.TabletAlias { if x != nil { return x.TabletAlias } return nil } -func (x *SleepTabletRequest) GetDuration() *vttime.Duration { - if x != nil { - return x.Duration - } - return nil -} - -type SleepTabletResponse struct { +type RefreshStateResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } -func (x *SleepTabletResponse) Reset() { - *x = SleepTabletResponse{} +func (x *RefreshStateResponse) Reset() { + *x = RefreshStateResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[150] + mi := &file_vtctldata_proto_msgTypes[140] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *SleepTabletResponse) String() string { +func (x *RefreshStateResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SleepTabletResponse) ProtoMessage() {} +func (*RefreshStateResponse) ProtoMessage() {} -func (x *SleepTabletResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[150] +func (x *RefreshStateResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[140] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8690,46 +9112,38 @@ func (x *SleepTabletResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SleepTabletResponse.ProtoReflect.Descriptor instead. -func (*SleepTabletResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{150} +// Deprecated: Use RefreshStateResponse.ProtoReflect.Descriptor instead. +func (*RefreshStateResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{140} } -type SourceShardAddRequest struct { +type RefreshStateByShardRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - Uid int32 `protobuf:"varint,3,opt,name=uid,proto3" json:"uid,omitempty"` - SourceKeyspace string `protobuf:"bytes,4,opt,name=source_keyspace,json=sourceKeyspace,proto3" json:"source_keyspace,omitempty"` - SourceShard string `protobuf:"bytes,5,opt,name=source_shard,json=sourceShard,proto3" json:"source_shard,omitempty"` - // KeyRange identifies the key range to use for the SourceShard. This field is - // optional. - KeyRange *topodata.KeyRange `protobuf:"bytes,6,opt,name=key_range,json=keyRange,proto3" json:"key_range,omitempty"` - // Tables is a list of tables replicate (for MoveTables). Each "table" can be - // either an exact match or a regular expression of the form "/regexp/". - Tables []string `protobuf:"bytes,7,rep,name=tables,proto3" json:"tables,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + Cells []string `protobuf:"bytes,3,rep,name=cells,proto3" json:"cells,omitempty"` } -func (x *SourceShardAddRequest) Reset() { - *x = SourceShardAddRequest{} +func (x *RefreshStateByShardRequest) Reset() { + *x = RefreshStateByShardRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[151] + mi := &file_vtctldata_proto_msgTypes[141] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *SourceShardAddRequest) String() string { +func (x *RefreshStateByShardRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SourceShardAddRequest) ProtoMessage() {} +func (*RefreshStateByShardRequest) ProtoMessage() {} -func (x *SourceShardAddRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[151] +func (x *RefreshStateByShardRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[141] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8740,86 +9154,59 @@ func (x *SourceShardAddRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SourceShardAddRequest.ProtoReflect.Descriptor instead. -func (*SourceShardAddRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{151} +// Deprecated: Use RefreshStateByShardRequest.ProtoReflect.Descriptor instead. +func (*RefreshStateByShardRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{141} } -func (x *SourceShardAddRequest) GetKeyspace() string { +func (x *RefreshStateByShardRequest) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -func (x *SourceShardAddRequest) GetShard() string { +func (x *RefreshStateByShardRequest) GetShard() string { if x != nil { return x.Shard } return "" } -func (x *SourceShardAddRequest) GetUid() int32 { - if x != nil { - return x.Uid - } - return 0 -} - -func (x *SourceShardAddRequest) GetSourceKeyspace() string { - if x != nil { - return x.SourceKeyspace - } - return "" -} - -func (x *SourceShardAddRequest) GetSourceShard() string { - if x != nil { - return x.SourceShard - } - return "" -} - -func (x *SourceShardAddRequest) GetKeyRange() *topodata.KeyRange { - if x != nil { - return x.KeyRange - } - return nil -} - -func (x *SourceShardAddRequest) GetTables() []string { +func (x *RefreshStateByShardRequest) GetCells() []string { if x != nil { - return x.Tables + return x.Cells } return nil } -type SourceShardAddResponse struct { +type RefreshStateByShardResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Shard is the updated shard record. - Shard *topodata.Shard `protobuf:"bytes,1,opt,name=shard,proto3" json:"shard,omitempty"` + IsPartialRefresh bool `protobuf:"varint,1,opt,name=is_partial_refresh,json=isPartialRefresh,proto3" json:"is_partial_refresh,omitempty"` + // This explains why we had a partial refresh (if we did) + PartialRefreshDetails string `protobuf:"bytes,2,opt,name=partial_refresh_details,json=partialRefreshDetails,proto3" json:"partial_refresh_details,omitempty"` } -func (x *SourceShardAddResponse) Reset() { - *x = SourceShardAddResponse{} +func (x *RefreshStateByShardResponse) Reset() { + *x = RefreshStateByShardResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[152] + mi := &file_vtctldata_proto_msgTypes[142] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *SourceShardAddResponse) String() string { +func (x *RefreshStateByShardResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SourceShardAddResponse) ProtoMessage() {} +func (*RefreshStateByShardResponse) ProtoMessage() {} -func (x *SourceShardAddResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[152] +func (x *RefreshStateByShardResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[142] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8830,45 +9217,50 @@ func (x *SourceShardAddResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SourceShardAddResponse.ProtoReflect.Descriptor instead. -func (*SourceShardAddResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{152} +// Deprecated: Use RefreshStateByShardResponse.ProtoReflect.Descriptor instead. +func (*RefreshStateByShardResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{142} } -func (x *SourceShardAddResponse) GetShard() *topodata.Shard { +func (x *RefreshStateByShardResponse) GetIsPartialRefresh() bool { if x != nil { - return x.Shard + return x.IsPartialRefresh } - return nil + return false } -type SourceShardDeleteRequest struct { +func (x *RefreshStateByShardResponse) GetPartialRefreshDetails() string { + if x != nil { + return x.PartialRefreshDetails + } + return "" +} + +type ReloadSchemaRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - Uid int32 `protobuf:"varint,3,opt,name=uid,proto3" json:"uid,omitempty"` + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` } -func (x *SourceShardDeleteRequest) Reset() { - *x = SourceShardDeleteRequest{} +func (x *ReloadSchemaRequest) Reset() { + *x = ReloadSchemaRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[153] + mi := &file_vtctldata_proto_msgTypes[143] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *SourceShardDeleteRequest) String() string { +func (x *ReloadSchemaRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SourceShardDeleteRequest) ProtoMessage() {} +func (*ReloadSchemaRequest) ProtoMessage() {} -func (x *SourceShardDeleteRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[153] +func (x *ReloadSchemaRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[143] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8879,58 +9271,41 @@ func (x *SourceShardDeleteRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SourceShardDeleteRequest.ProtoReflect.Descriptor instead. -func (*SourceShardDeleteRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{153} -} - -func (x *SourceShardDeleteRequest) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" -} - -func (x *SourceShardDeleteRequest) GetShard() string { - if x != nil { - return x.Shard - } - return "" +// Deprecated: Use ReloadSchemaRequest.ProtoReflect.Descriptor instead. +func (*ReloadSchemaRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{143} } -func (x *SourceShardDeleteRequest) GetUid() int32 { +func (x *ReloadSchemaRequest) GetTabletAlias() *topodata.TabletAlias { if x != nil { - return x.Uid + return x.TabletAlias } - return 0 + return nil } -type SourceShardDeleteResponse struct { +type ReloadSchemaResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - // Shard is the updated shard record. - Shard *topodata.Shard `protobuf:"bytes,1,opt,name=shard,proto3" json:"shard,omitempty"` } -func (x *SourceShardDeleteResponse) Reset() { - *x = SourceShardDeleteResponse{} +func (x *ReloadSchemaResponse) Reset() { + *x = ReloadSchemaResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[154] + mi := &file_vtctldata_proto_msgTypes[144] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *SourceShardDeleteResponse) String() string { +func (x *ReloadSchemaResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SourceShardDeleteResponse) ProtoMessage() {} +func (*ReloadSchemaResponse) ProtoMessage() {} -func (x *SourceShardDeleteResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[154] +func (x *ReloadSchemaResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[144] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8941,43 +9316,42 @@ func (x *SourceShardDeleteResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SourceShardDeleteResponse.ProtoReflect.Descriptor instead. -func (*SourceShardDeleteResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{154} -} - -func (x *SourceShardDeleteResponse) GetShard() *topodata.Shard { - if x != nil { - return x.Shard - } - return nil +// Deprecated: Use ReloadSchemaResponse.ProtoReflect.Descriptor instead. +func (*ReloadSchemaResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{144} } -type StartReplicationRequest struct { +type ReloadSchemaKeyspaceRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + WaitPosition string `protobuf:"bytes,2,opt,name=wait_position,json=waitPosition,proto3" json:"wait_position,omitempty"` + IncludePrimary bool `protobuf:"varint,3,opt,name=include_primary,json=includePrimary,proto3" json:"include_primary,omitempty"` + // Concurrency is the global concurrency across all shards in the keyspace + // (so, at most this many tablets will be reloaded across the keyspace at any + // given point). + Concurrency uint32 `protobuf:"varint,4,opt,name=concurrency,proto3" json:"concurrency,omitempty"` } -func (x *StartReplicationRequest) Reset() { - *x = StartReplicationRequest{} +func (x *ReloadSchemaKeyspaceRequest) Reset() { + *x = ReloadSchemaKeyspaceRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[155] + mi := &file_vtctldata_proto_msgTypes[145] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *StartReplicationRequest) String() string { +func (x *ReloadSchemaKeyspaceRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*StartReplicationRequest) ProtoMessage() {} +func (*ReloadSchemaKeyspaceRequest) ProtoMessage() {} -func (x *StartReplicationRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[155] +func (x *ReloadSchemaKeyspaceRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[145] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8988,81 +9362,64 @@ func (x *StartReplicationRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use StartReplicationRequest.ProtoReflect.Descriptor instead. -func (*StartReplicationRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{155} +// Deprecated: Use ReloadSchemaKeyspaceRequest.ProtoReflect.Descriptor instead. +func (*ReloadSchemaKeyspaceRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{145} } -func (x *StartReplicationRequest) GetTabletAlias() *topodata.TabletAlias { +func (x *ReloadSchemaKeyspaceRequest) GetKeyspace() string { if x != nil { - return x.TabletAlias + return x.Keyspace } - return nil -} - -type StartReplicationResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields + return "" } -func (x *StartReplicationResponse) Reset() { - *x = StartReplicationResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[156] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *ReloadSchemaKeyspaceRequest) GetWaitPosition() string { + if x != nil { + return x.WaitPosition } + return "" } -func (x *StartReplicationResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StartReplicationResponse) ProtoMessage() {} - -func (x *StartReplicationResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[156] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *ReloadSchemaKeyspaceRequest) GetIncludePrimary() bool { + if x != nil { + return x.IncludePrimary } - return mi.MessageOf(x) + return false } -// Deprecated: Use StartReplicationResponse.ProtoReflect.Descriptor instead. -func (*StartReplicationResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{156} +func (x *ReloadSchemaKeyspaceRequest) GetConcurrency() uint32 { + if x != nil { + return x.Concurrency + } + return 0 } -type StopReplicationRequest struct { +type ReloadSchemaKeyspaceResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + Events []*logutil.Event `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` } -func (x *StopReplicationRequest) Reset() { - *x = StopReplicationRequest{} +func (x *ReloadSchemaKeyspaceResponse) Reset() { + *x = ReloadSchemaKeyspaceResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[157] + mi := &file_vtctldata_proto_msgTypes[146] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *StopReplicationRequest) String() string { +func (x *ReloadSchemaKeyspaceResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*StopReplicationRequest) ProtoMessage() {} +func (*ReloadSchemaKeyspaceResponse) ProtoMessage() {} -func (x *StopReplicationRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[157] +func (x *ReloadSchemaKeyspaceResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[146] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9073,41 +9430,48 @@ func (x *StopReplicationRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use StopReplicationRequest.ProtoReflect.Descriptor instead. -func (*StopReplicationRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{157} +// Deprecated: Use ReloadSchemaKeyspaceResponse.ProtoReflect.Descriptor instead. +func (*ReloadSchemaKeyspaceResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{146} } -func (x *StopReplicationRequest) GetTabletAlias() *topodata.TabletAlias { +func (x *ReloadSchemaKeyspaceResponse) GetEvents() []*logutil.Event { if x != nil { - return x.TabletAlias + return x.Events } return nil } -type StopReplicationResponse struct { +type ReloadSchemaShardRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + WaitPosition string `protobuf:"bytes,3,opt,name=wait_position,json=waitPosition,proto3" json:"wait_position,omitempty"` + IncludePrimary bool `protobuf:"varint,4,opt,name=include_primary,json=includePrimary,proto3" json:"include_primary,omitempty"` + // Concurrency is the maximum number of tablets to reload at one time. + Concurrency uint32 `protobuf:"varint,5,opt,name=concurrency,proto3" json:"concurrency,omitempty"` } -func (x *StopReplicationResponse) Reset() { - *x = StopReplicationResponse{} +func (x *ReloadSchemaShardRequest) Reset() { + *x = ReloadSchemaShardRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[158] + mi := &file_vtctldata_proto_msgTypes[147] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *StopReplicationResponse) String() string { +func (x *ReloadSchemaShardRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*StopReplicationResponse) ProtoMessage() {} +func (*ReloadSchemaShardRequest) ProtoMessage() {} -func (x *StopReplicationResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[158] +func (x *ReloadSchemaShardRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[147] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9118,88 +9482,71 @@ func (x *StopReplicationResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use StopReplicationResponse.ProtoReflect.Descriptor instead. -func (*StopReplicationResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{158} +// Deprecated: Use ReloadSchemaShardRequest.ProtoReflect.Descriptor instead. +func (*ReloadSchemaShardRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{147} } -type TabletExternallyReparentedRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Tablet is the alias of the tablet that was promoted externally and should - // be updated to the shard primary in the topo. - Tablet *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet,proto3" json:"tablet,omitempty"` +func (x *ReloadSchemaShardRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" } -func (x *TabletExternallyReparentedRequest) Reset() { - *x = TabletExternallyReparentedRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[159] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *ReloadSchemaShardRequest) GetShard() string { + if x != nil { + return x.Shard } + return "" } -func (x *TabletExternallyReparentedRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TabletExternallyReparentedRequest) ProtoMessage() {} - -func (x *TabletExternallyReparentedRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[159] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *ReloadSchemaShardRequest) GetWaitPosition() string { + if x != nil { + return x.WaitPosition } - return mi.MessageOf(x) + return "" } -// Deprecated: Use TabletExternallyReparentedRequest.ProtoReflect.Descriptor instead. -func (*TabletExternallyReparentedRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{159} +func (x *ReloadSchemaShardRequest) GetIncludePrimary() bool { + if x != nil { + return x.IncludePrimary + } + return false } -func (x *TabletExternallyReparentedRequest) GetTablet() *topodata.TabletAlias { +func (x *ReloadSchemaShardRequest) GetConcurrency() uint32 { if x != nil { - return x.Tablet + return x.Concurrency } - return nil + return 0 } -type TabletExternallyReparentedResponse struct { +type ReloadSchemaShardResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - NewPrimary *topodata.TabletAlias `protobuf:"bytes,3,opt,name=new_primary,json=newPrimary,proto3" json:"new_primary,omitempty"` - OldPrimary *topodata.TabletAlias `protobuf:"bytes,4,opt,name=old_primary,json=oldPrimary,proto3" json:"old_primary,omitempty"` + Events []*logutil.Event `protobuf:"bytes,2,rep,name=events,proto3" json:"events,omitempty"` } -func (x *TabletExternallyReparentedResponse) Reset() { - *x = TabletExternallyReparentedResponse{} +func (x *ReloadSchemaShardResponse) Reset() { + *x = ReloadSchemaShardResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[160] + mi := &file_vtctldata_proto_msgTypes[148] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *TabletExternallyReparentedResponse) String() string { +func (x *ReloadSchemaShardResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*TabletExternallyReparentedResponse) ProtoMessage() {} +func (*ReloadSchemaShardResponse) ProtoMessage() {} -func (x *TabletExternallyReparentedResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[160] +func (x *ReloadSchemaShardResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[148] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9210,65 +9557,45 @@ func (x *TabletExternallyReparentedResponse) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use TabletExternallyReparentedResponse.ProtoReflect.Descriptor instead. -func (*TabletExternallyReparentedResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{160} -} - -func (x *TabletExternallyReparentedResponse) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" -} - -func (x *TabletExternallyReparentedResponse) GetShard() string { - if x != nil { - return x.Shard - } - return "" -} - -func (x *TabletExternallyReparentedResponse) GetNewPrimary() *topodata.TabletAlias { - if x != nil { - return x.NewPrimary - } - return nil +// Deprecated: Use ReloadSchemaShardResponse.ProtoReflect.Descriptor instead. +func (*ReloadSchemaShardResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{148} } -func (x *TabletExternallyReparentedResponse) GetOldPrimary() *topodata.TabletAlias { +func (x *ReloadSchemaShardResponse) GetEvents() []*logutil.Event { if x != nil { - return x.OldPrimary + return x.Events } return nil } -type UpdateCellInfoRequest struct { +type RemoveBackupRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - CellInfo *topodata.CellInfo `protobuf:"bytes,2,opt,name=cell_info,json=cellInfo,proto3" json:"cell_info,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` } -func (x *UpdateCellInfoRequest) Reset() { - *x = UpdateCellInfoRequest{} +func (x *RemoveBackupRequest) Reset() { + *x = RemoveBackupRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[161] + mi := &file_vtctldata_proto_msgTypes[149] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *UpdateCellInfoRequest) String() string { +func (x *RemoveBackupRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*UpdateCellInfoRequest) ProtoMessage() {} +func (*RemoveBackupRequest) ProtoMessage() {} -func (x *UpdateCellInfoRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[161] +func (x *RemoveBackupRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[149] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9279,51 +9606,55 @@ func (x *UpdateCellInfoRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use UpdateCellInfoRequest.ProtoReflect.Descriptor instead. -func (*UpdateCellInfoRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{161} +// Deprecated: Use RemoveBackupRequest.ProtoReflect.Descriptor instead. +func (*RemoveBackupRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{149} } -func (x *UpdateCellInfoRequest) GetName() string { +func (x *RemoveBackupRequest) GetKeyspace() string { if x != nil { - return x.Name + return x.Keyspace } return "" } -func (x *UpdateCellInfoRequest) GetCellInfo() *topodata.CellInfo { +func (x *RemoveBackupRequest) GetShard() string { if x != nil { - return x.CellInfo + return x.Shard } - return nil + return "" } -type UpdateCellInfoResponse struct { +func (x *RemoveBackupRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type RemoveBackupResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - CellInfo *topodata.CellInfo `protobuf:"bytes,2,opt,name=cell_info,json=cellInfo,proto3" json:"cell_info,omitempty"` } -func (x *UpdateCellInfoResponse) Reset() { - *x = UpdateCellInfoResponse{} +func (x *RemoveBackupResponse) Reset() { + *x = RemoveBackupResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[162] + mi := &file_vtctldata_proto_msgTypes[150] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *UpdateCellInfoResponse) String() string { +func (x *RemoveBackupResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*UpdateCellInfoResponse) ProtoMessage() {} +func (*RemoveBackupResponse) ProtoMessage() {} -func (x *UpdateCellInfoResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[162] +func (x *RemoveBackupResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[150] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9334,51 +9665,44 @@ func (x *UpdateCellInfoResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use UpdateCellInfoResponse.ProtoReflect.Descriptor instead. -func (*UpdateCellInfoResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{162} -} - -func (x *UpdateCellInfoResponse) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *UpdateCellInfoResponse) GetCellInfo() *topodata.CellInfo { - if x != nil { - return x.CellInfo - } - return nil +// Deprecated: Use RemoveBackupResponse.ProtoReflect.Descriptor instead. +func (*RemoveBackupResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{150} } -type UpdateCellsAliasRequest struct { +type RemoveKeyspaceCellRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - CellsAlias *topodata.CellsAlias `protobuf:"bytes,2,opt,name=cells_alias,json=cellsAlias,proto3" json:"cells_alias,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Cell string `protobuf:"bytes,2,opt,name=cell,proto3" json:"cell,omitempty"` + // Force proceeds even if the cell's topology server cannot be reached. This + // should only be set if a cell has been shut down entirely, and the global + // topology data just needs to be updated. + Force bool `protobuf:"varint,3,opt,name=force,proto3" json:"force,omitempty"` + // Recursive also deletes all tablets in that cell belonging to the specified + // keyspace. + Recursive bool `protobuf:"varint,4,opt,name=recursive,proto3" json:"recursive,omitempty"` } -func (x *UpdateCellsAliasRequest) Reset() { - *x = UpdateCellsAliasRequest{} +func (x *RemoveKeyspaceCellRequest) Reset() { + *x = RemoveKeyspaceCellRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[163] + mi := &file_vtctldata_proto_msgTypes[151] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *UpdateCellsAliasRequest) String() string { +func (x *RemoveKeyspaceCellRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*UpdateCellsAliasRequest) ProtoMessage() {} +func (*RemoveKeyspaceCellRequest) ProtoMessage() {} -func (x *UpdateCellsAliasRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[163] +func (x *RemoveKeyspaceCellRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[151] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9389,51 +9713,62 @@ func (x *UpdateCellsAliasRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use UpdateCellsAliasRequest.ProtoReflect.Descriptor instead. -func (*UpdateCellsAliasRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{163} +// Deprecated: Use RemoveKeyspaceCellRequest.ProtoReflect.Descriptor instead. +func (*RemoveKeyspaceCellRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{151} } -func (x *UpdateCellsAliasRequest) GetName() string { +func (x *RemoveKeyspaceCellRequest) GetKeyspace() string { if x != nil { - return x.Name + return x.Keyspace } return "" } -func (x *UpdateCellsAliasRequest) GetCellsAlias() *topodata.CellsAlias { +func (x *RemoveKeyspaceCellRequest) GetCell() string { if x != nil { - return x.CellsAlias + return x.Cell } - return nil + return "" } -type UpdateCellsAliasResponse struct { +func (x *RemoveKeyspaceCellRequest) GetForce() bool { + if x != nil { + return x.Force + } + return false +} + +func (x *RemoveKeyspaceCellRequest) GetRecursive() bool { + if x != nil { + return x.Recursive + } + return false +} + +type RemoveKeyspaceCellResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - CellsAlias *topodata.CellsAlias `protobuf:"bytes,2,opt,name=cells_alias,json=cellsAlias,proto3" json:"cells_alias,omitempty"` } -func (x *UpdateCellsAliasResponse) Reset() { - *x = UpdateCellsAliasResponse{} +func (x *RemoveKeyspaceCellResponse) Reset() { + *x = RemoveKeyspaceCellResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[164] + mi := &file_vtctldata_proto_msgTypes[152] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *UpdateCellsAliasResponse) String() string { +func (x *RemoveKeyspaceCellResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*UpdateCellsAliasResponse) ProtoMessage() {} +func (*RemoveKeyspaceCellResponse) ProtoMessage() {} -func (x *UpdateCellsAliasResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[164] +func (x *RemoveKeyspaceCellResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[152] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9444,50 +9779,45 @@ func (x *UpdateCellsAliasResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use UpdateCellsAliasResponse.ProtoReflect.Descriptor instead. -func (*UpdateCellsAliasResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{164} -} - -func (x *UpdateCellsAliasResponse) GetName() string { - if x != nil { - return x.Name - } - return "" +// Deprecated: Use RemoveKeyspaceCellResponse.ProtoReflect.Descriptor instead. +func (*RemoveKeyspaceCellResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{152} } -func (x *UpdateCellsAliasResponse) GetCellsAlias() *topodata.CellsAlias { - if x != nil { - return x.CellsAlias - } - return nil -} - -type ValidateRequest struct { +type RemoveShardCellRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - PingTablets bool `protobuf:"varint,1,opt,name=ping_tablets,json=pingTablets,proto3" json:"ping_tablets,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + ShardName string `protobuf:"bytes,2,opt,name=shard_name,json=shardName,proto3" json:"shard_name,omitempty"` + Cell string `protobuf:"bytes,3,opt,name=cell,proto3" json:"cell,omitempty"` + // Force proceeds even if the cell's topology server cannot be reached. This + // should only be set if a cell has been shut down entirely, and the global + // topology data just needs to be updated. + Force bool `protobuf:"varint,4,opt,name=force,proto3" json:"force,omitempty"` + // Recursive also deletes all tablets in that cell belonging to the specified + // keyspace and shard. + Recursive bool `protobuf:"varint,5,opt,name=recursive,proto3" json:"recursive,omitempty"` } -func (x *ValidateRequest) Reset() { - *x = ValidateRequest{} +func (x *RemoveShardCellRequest) Reset() { + *x = RemoveShardCellRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[165] + mi := &file_vtctldata_proto_msgTypes[153] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ValidateRequest) String() string { +func (x *RemoveShardCellRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidateRequest) ProtoMessage() {} +func (*RemoveShardCellRequest) ProtoMessage() {} -func (x *ValidateRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[165] +func (x *RemoveShardCellRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[153] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9498,44 +9828,69 @@ func (x *ValidateRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidateRequest.ProtoReflect.Descriptor instead. -func (*ValidateRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{165} +// Deprecated: Use RemoveShardCellRequest.ProtoReflect.Descriptor instead. +func (*RemoveShardCellRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{153} } -func (x *ValidateRequest) GetPingTablets() bool { +func (x *RemoveShardCellRequest) GetKeyspace() string { if x != nil { - return x.PingTablets + return x.Keyspace + } + return "" +} + +func (x *RemoveShardCellRequest) GetShardName() string { + if x != nil { + return x.ShardName + } + return "" +} + +func (x *RemoveShardCellRequest) GetCell() string { + if x != nil { + return x.Cell + } + return "" +} + +func (x *RemoveShardCellRequest) GetForce() bool { + if x != nil { + return x.Force } return false } -type ValidateResponse struct { +func (x *RemoveShardCellRequest) GetRecursive() bool { + if x != nil { + return x.Recursive + } + return false +} + +type RemoveShardCellResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - Results []string `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` - ResultsByKeyspace map[string]*ValidateKeyspaceResponse `protobuf:"bytes,2,rep,name=results_by_keyspace,json=resultsByKeyspace,proto3" json:"results_by_keyspace,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (x *ValidateResponse) Reset() { - *x = ValidateResponse{} +func (x *RemoveShardCellResponse) Reset() { + *x = RemoveShardCellResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[166] + mi := &file_vtctldata_proto_msgTypes[154] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ValidateResponse) String() string { +func (x *RemoveShardCellResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidateResponse) ProtoMessage() {} +func (*RemoveShardCellResponse) ProtoMessage() {} -func (x *ValidateResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[166] +func (x *RemoveShardCellResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[154] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9546,51 +9901,38 @@ func (x *ValidateResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidateResponse.ProtoReflect.Descriptor instead. -func (*ValidateResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{166} -} - -func (x *ValidateResponse) GetResults() []string { - if x != nil { - return x.Results - } - return nil -} - -func (x *ValidateResponse) GetResultsByKeyspace() map[string]*ValidateKeyspaceResponse { - if x != nil { - return x.ResultsByKeyspace - } - return nil +// Deprecated: Use RemoveShardCellResponse.ProtoReflect.Descriptor instead. +func (*RemoveShardCellResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{154} } -type ValidateKeyspaceRequest struct { +type ReparentTabletRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - PingTablets bool `protobuf:"varint,2,opt,name=ping_tablets,json=pingTablets,proto3" json:"ping_tablets,omitempty"` + // Tablet is the alias of the tablet that should be reparented under the + // current shard primary. + Tablet *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet,proto3" json:"tablet,omitempty"` } -func (x *ValidateKeyspaceRequest) Reset() { - *x = ValidateKeyspaceRequest{} +func (x *ReparentTabletRequest) Reset() { + *x = ReparentTabletRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[167] + mi := &file_vtctldata_proto_msgTypes[155] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ValidateKeyspaceRequest) String() string { +func (x *ReparentTabletRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidateKeyspaceRequest) ProtoMessage() {} +func (*ReparentTabletRequest) ProtoMessage() {} -func (x *ValidateKeyspaceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[167] +func (x *ReparentTabletRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[155] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9601,51 +9943,48 @@ func (x *ValidateKeyspaceRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidateKeyspaceRequest.ProtoReflect.Descriptor instead. -func (*ValidateKeyspaceRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{167} -} - -func (x *ValidateKeyspaceRequest) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" +// Deprecated: Use ReparentTabletRequest.ProtoReflect.Descriptor instead. +func (*ReparentTabletRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{155} } -func (x *ValidateKeyspaceRequest) GetPingTablets() bool { +func (x *ReparentTabletRequest) GetTablet() *topodata.TabletAlias { if x != nil { - return x.PingTablets + return x.Tablet } - return false + return nil } -type ValidateKeyspaceResponse struct { +type ReparentTabletResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Results []string `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` - ResultsByShard map[string]*ValidateShardResponse `protobuf:"bytes,2,rep,name=results_by_shard,json=resultsByShard,proto3" json:"results_by_shard,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Keyspace is the name of the keyspace the tablet was reparented in. + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // Shard is the name of the shard the tablet was reparented in. + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + // Primary is the alias of the tablet that the tablet was reparented under. + Primary *topodata.TabletAlias `protobuf:"bytes,3,opt,name=primary,proto3" json:"primary,omitempty"` } -func (x *ValidateKeyspaceResponse) Reset() { - *x = ValidateKeyspaceResponse{} +func (x *ReparentTabletResponse) Reset() { + *x = ReparentTabletResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[168] + mi := &file_vtctldata_proto_msgTypes[156] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ValidateKeyspaceResponse) String() string { +func (x *ReparentTabletResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidateKeyspaceResponse) ProtoMessage() {} +func (*ReparentTabletResponse) ProtoMessage() {} -func (x *ValidateKeyspaceResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[168] +func (x *ReparentTabletResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[156] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9656,54 +9995,74 @@ func (x *ValidateKeyspaceResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidateKeyspaceResponse.ProtoReflect.Descriptor instead. -func (*ValidateKeyspaceResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{168} +// Deprecated: Use ReparentTabletResponse.ProtoReflect.Descriptor instead. +func (*ReparentTabletResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{156} } -func (x *ValidateKeyspaceResponse) GetResults() []string { +func (x *ReparentTabletResponse) GetKeyspace() string { if x != nil { - return x.Results + return x.Keyspace } - return nil + return "" } -func (x *ValidateKeyspaceResponse) GetResultsByShard() map[string]*ValidateShardResponse { +func (x *ReparentTabletResponse) GetShard() string { if x != nil { - return x.ResultsByShard + return x.Shard + } + return "" +} + +func (x *ReparentTabletResponse) GetPrimary() *topodata.TabletAlias { + if x != nil { + return x.Primary } return nil } -type ValidateSchemaKeyspaceRequest struct { +type ReshardCreateRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - ExcludeTables []string `protobuf:"bytes,2,rep,name=exclude_tables,json=excludeTables,proto3" json:"exclude_tables,omitempty"` - IncludeViews bool `protobuf:"varint,3,opt,name=include_views,json=includeViews,proto3" json:"include_views,omitempty"` - SkipNoPrimary bool `protobuf:"varint,4,opt,name=skip_no_primary,json=skipNoPrimary,proto3" json:"skip_no_primary,omitempty"` - IncludeVschema bool `protobuf:"varint,5,opt,name=include_vschema,json=includeVschema,proto3" json:"include_vschema,omitempty"` + Workflow string `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"` + Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + SourceShards []string `protobuf:"bytes,3,rep,name=source_shards,json=sourceShards,proto3" json:"source_shards,omitempty"` + TargetShards []string `protobuf:"bytes,4,rep,name=target_shards,json=targetShards,proto3" json:"target_shards,omitempty"` + Cells []string `protobuf:"bytes,5,rep,name=cells,proto3" json:"cells,omitempty"` + TabletTypes []topodata.TabletType `protobuf:"varint,6,rep,packed,name=tablet_types,json=tabletTypes,proto3,enum=topodata.TabletType" json:"tablet_types,omitempty"` + TabletSelectionPreference tabletmanagerdata.TabletSelectionPreference `protobuf:"varint,7,opt,name=tablet_selection_preference,json=tabletSelectionPreference,proto3,enum=tabletmanagerdata.TabletSelectionPreference" json:"tablet_selection_preference,omitempty"` + // SkipSchemaCopy specifies if the schema should be copied from the source shard, set false if + // schema is already created on the target shard before Reshard is invoked. + SkipSchemaCopy bool `protobuf:"varint,8,opt,name=skip_schema_copy,json=skipSchemaCopy,proto3" json:"skip_schema_copy,omitempty"` + // OnDdl specifies the action to be taken when a DDL is encountered. + OnDdl string `protobuf:"bytes,9,opt,name=on_ddl,json=onDdl,proto3" json:"on_ddl,omitempty"` + // StopAfterCopy specifies if vreplication should be stopped after copying. + StopAfterCopy bool `protobuf:"varint,10,opt,name=stop_after_copy,json=stopAfterCopy,proto3" json:"stop_after_copy,omitempty"` + // DeferSecondaryKeys specifies if secondary keys should be created in one shot after table copy finishes. + DeferSecondaryKeys bool `protobuf:"varint,11,opt,name=defer_secondary_keys,json=deferSecondaryKeys,proto3" json:"defer_secondary_keys,omitempty"` + // Start the workflow after creating it. + AutoStart bool `protobuf:"varint,12,opt,name=auto_start,json=autoStart,proto3" json:"auto_start,omitempty"` } -func (x *ValidateSchemaKeyspaceRequest) Reset() { - *x = ValidateSchemaKeyspaceRequest{} +func (x *ReshardCreateRequest) Reset() { + *x = ReshardCreateRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[169] + mi := &file_vtctldata_proto_msgTypes[157] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ValidateSchemaKeyspaceRequest) String() string { +func (x *ReshardCreateRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidateSchemaKeyspaceRequest) ProtoMessage() {} +func (*ReshardCreateRequest) ProtoMessage() {} -func (x *ValidateSchemaKeyspaceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[169] +func (x *ReshardCreateRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[157] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9714,128 +10073,132 @@ func (x *ValidateSchemaKeyspaceRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidateSchemaKeyspaceRequest.ProtoReflect.Descriptor instead. -func (*ValidateSchemaKeyspaceRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{169} +// Deprecated: Use ReshardCreateRequest.ProtoReflect.Descriptor instead. +func (*ReshardCreateRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{157} } -func (x *ValidateSchemaKeyspaceRequest) GetKeyspace() string { +func (x *ReshardCreateRequest) GetWorkflow() string { + if x != nil { + return x.Workflow + } + return "" +} + +func (x *ReshardCreateRequest) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -func (x *ValidateSchemaKeyspaceRequest) GetExcludeTables() []string { +func (x *ReshardCreateRequest) GetSourceShards() []string { if x != nil { - return x.ExcludeTables + return x.SourceShards } return nil } -func (x *ValidateSchemaKeyspaceRequest) GetIncludeViews() bool { +func (x *ReshardCreateRequest) GetTargetShards() []string { if x != nil { - return x.IncludeViews + return x.TargetShards } - return false + return nil } -func (x *ValidateSchemaKeyspaceRequest) GetSkipNoPrimary() bool { +func (x *ReshardCreateRequest) GetCells() []string { if x != nil { - return x.SkipNoPrimary + return x.Cells } - return false + return nil } -func (x *ValidateSchemaKeyspaceRequest) GetIncludeVschema() bool { +func (x *ReshardCreateRequest) GetTabletTypes() []topodata.TabletType { if x != nil { - return x.IncludeVschema + return x.TabletTypes } - return false + return nil } -type ValidateSchemaKeyspaceResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *ReshardCreateRequest) GetTabletSelectionPreference() tabletmanagerdata.TabletSelectionPreference { + if x != nil { + return x.TabletSelectionPreference + } + return tabletmanagerdata.TabletSelectionPreference(0) +} - Results []string `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` - ResultsByShard map[string]*ValidateShardResponse `protobuf:"bytes,2,rep,name=results_by_shard,json=resultsByShard,proto3" json:"results_by_shard,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +func (x *ReshardCreateRequest) GetSkipSchemaCopy() bool { + if x != nil { + return x.SkipSchemaCopy + } + return false } -func (x *ValidateSchemaKeyspaceResponse) Reset() { - *x = ValidateSchemaKeyspaceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[170] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *ReshardCreateRequest) GetOnDdl() string { + if x != nil { + return x.OnDdl } + return "" } -func (x *ValidateSchemaKeyspaceResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ValidateSchemaKeyspaceResponse) ProtoMessage() {} - -func (x *ValidateSchemaKeyspaceResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[170] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *ReshardCreateRequest) GetStopAfterCopy() bool { + if x != nil { + return x.StopAfterCopy } - return mi.MessageOf(x) -} - -// Deprecated: Use ValidateSchemaKeyspaceResponse.ProtoReflect.Descriptor instead. -func (*ValidateSchemaKeyspaceResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{170} + return false } -func (x *ValidateSchemaKeyspaceResponse) GetResults() []string { +func (x *ReshardCreateRequest) GetDeferSecondaryKeys() bool { if x != nil { - return x.Results + return x.DeferSecondaryKeys } - return nil + return false } -func (x *ValidateSchemaKeyspaceResponse) GetResultsByShard() map[string]*ValidateShardResponse { +func (x *ReshardCreateRequest) GetAutoStart() bool { if x != nil { - return x.ResultsByShard + return x.AutoStart } - return nil + return false } -type ValidateShardRequest struct { +type RestoreFromBackupRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - PingTablets bool `protobuf:"varint,3,opt,name=ping_tablets,json=pingTablets,proto3" json:"ping_tablets,omitempty"` + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + // BackupTime, if set, will use the backup taken most closely at or before + // this time. If nil, the latest backup will be restored on the tablet. + BackupTime *vttime.Time `protobuf:"bytes,2,opt,name=backup_time,json=backupTime,proto3" json:"backup_time,omitempty"` + // RestoreToPos indicates a position for a point-in-time recovery. The recovery + // is expected to utilize one full backup, followed by zero or more incremental backups, + // that reach the precise desired position + RestoreToPos string `protobuf:"bytes,3,opt,name=restore_to_pos,json=restoreToPos,proto3" json:"restore_to_pos,omitempty"` + // Dry run does not actually performs the restore, but validates the steps and availability of backups + DryRun bool `protobuf:"varint,4,opt,name=dry_run,json=dryRun,proto3" json:"dry_run,omitempty"` + // RestoreToTimestamp, if given, requested an inremental restore up to (and excluding) the given timestamp. + // RestoreToTimestamp and RestoreToPos are mutually exclusive. + RestoreToTimestamp *vttime.Time `protobuf:"bytes,5,opt,name=restore_to_timestamp,json=restoreToTimestamp,proto3" json:"restore_to_timestamp,omitempty"` } -func (x *ValidateShardRequest) Reset() { - *x = ValidateShardRequest{} +func (x *RestoreFromBackupRequest) Reset() { + *x = RestoreFromBackupRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[171] + mi := &file_vtctldata_proto_msgTypes[158] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ValidateShardRequest) String() string { +func (x *RestoreFromBackupRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidateShardRequest) ProtoMessage() {} +func (*RestoreFromBackupRequest) ProtoMessage() {} -func (x *ValidateShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[171] +func (x *RestoreFromBackupRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[158] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9846,57 +10209,75 @@ func (x *ValidateShardRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidateShardRequest.ProtoReflect.Descriptor instead. -func (*ValidateShardRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{171} +// Deprecated: Use RestoreFromBackupRequest.ProtoReflect.Descriptor instead. +func (*RestoreFromBackupRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{158} } -func (x *ValidateShardRequest) GetKeyspace() string { +func (x *RestoreFromBackupRequest) GetTabletAlias() *topodata.TabletAlias { if x != nil { - return x.Keyspace + return x.TabletAlias } - return "" + return nil } -func (x *ValidateShardRequest) GetShard() string { +func (x *RestoreFromBackupRequest) GetBackupTime() *vttime.Time { if x != nil { - return x.Shard + return x.BackupTime + } + return nil +} + +func (x *RestoreFromBackupRequest) GetRestoreToPos() string { + if x != nil { + return x.RestoreToPos } return "" } -func (x *ValidateShardRequest) GetPingTablets() bool { +func (x *RestoreFromBackupRequest) GetDryRun() bool { if x != nil { - return x.PingTablets + return x.DryRun } return false } -type ValidateShardResponse struct { +func (x *RestoreFromBackupRequest) GetRestoreToTimestamp() *vttime.Time { + if x != nil { + return x.RestoreToTimestamp + } + return nil +} + +type RestoreFromBackupResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Results []string `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` + // TabletAlias is the alias of the tablet doing the restore. + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,3,opt,name=shard,proto3" json:"shard,omitempty"` + Event *logutil.Event `protobuf:"bytes,4,opt,name=event,proto3" json:"event,omitempty"` } -func (x *ValidateShardResponse) Reset() { - *x = ValidateShardResponse{} +func (x *RestoreFromBackupResponse) Reset() { + *x = RestoreFromBackupResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[172] + mi := &file_vtctldata_proto_msgTypes[159] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ValidateShardResponse) String() string { +func (x *RestoreFromBackupResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidateShardResponse) ProtoMessage() {} +func (*RestoreFromBackupResponse) ProtoMessage() {} -func (x *ValidateShardResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[172] +func (x *RestoreFromBackupResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[159] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9907,43 +10288,65 @@ func (x *ValidateShardResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidateShardResponse.ProtoReflect.Descriptor instead. -func (*ValidateShardResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{172} +// Deprecated: Use RestoreFromBackupResponse.ProtoReflect.Descriptor instead. +func (*RestoreFromBackupResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{159} } -func (x *ValidateShardResponse) GetResults() []string { +func (x *RestoreFromBackupResponse) GetTabletAlias() *topodata.TabletAlias { if x != nil { - return x.Results + return x.TabletAlias } return nil } -type ValidateVersionKeyspaceRequest struct { +func (x *RestoreFromBackupResponse) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *RestoreFromBackupResponse) GetShard() string { + if x != nil { + return x.Shard + } + return "" +} + +func (x *RestoreFromBackupResponse) GetEvent() *logutil.Event { + if x != nil { + return x.Event + } + return nil +} + +type RetrySchemaMigrationRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Uuid string `protobuf:"bytes,2,opt,name=uuid,proto3" json:"uuid,omitempty"` } -func (x *ValidateVersionKeyspaceRequest) Reset() { - *x = ValidateVersionKeyspaceRequest{} +func (x *RetrySchemaMigrationRequest) Reset() { + *x = RetrySchemaMigrationRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[173] + mi := &file_vtctldata_proto_msgTypes[160] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ValidateVersionKeyspaceRequest) String() string { +func (x *RetrySchemaMigrationRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidateVersionKeyspaceRequest) ProtoMessage() {} +func (*RetrySchemaMigrationRequest) ProtoMessage() {} -func (x *ValidateVersionKeyspaceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[173] +func (x *RetrySchemaMigrationRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[160] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9954,44 +10357,50 @@ func (x *ValidateVersionKeyspaceRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidateVersionKeyspaceRequest.ProtoReflect.Descriptor instead. -func (*ValidateVersionKeyspaceRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{173} +// Deprecated: Use RetrySchemaMigrationRequest.ProtoReflect.Descriptor instead. +func (*RetrySchemaMigrationRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{160} } -func (x *ValidateVersionKeyspaceRequest) GetKeyspace() string { +func (x *RetrySchemaMigrationRequest) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -type ValidateVersionKeyspaceResponse struct { +func (x *RetrySchemaMigrationRequest) GetUuid() string { + if x != nil { + return x.Uuid + } + return "" +} + +type RetrySchemaMigrationResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Results []string `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` - ResultsByShard map[string]*ValidateShardResponse `protobuf:"bytes,2,rep,name=results_by_shard,json=resultsByShard,proto3" json:"results_by_shard,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + RowsAffectedByShard map[string]uint64 `protobuf:"bytes,1,rep,name=rows_affected_by_shard,json=rowsAffectedByShard,proto3" json:"rows_affected_by_shard,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` } -func (x *ValidateVersionKeyspaceResponse) Reset() { - *x = ValidateVersionKeyspaceResponse{} +func (x *RetrySchemaMigrationResponse) Reset() { + *x = RetrySchemaMigrationResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[174] + mi := &file_vtctldata_proto_msgTypes[161] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ValidateVersionKeyspaceResponse) String() string { +func (x *RetrySchemaMigrationResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidateVersionKeyspaceResponse) ProtoMessage() {} +func (*RetrySchemaMigrationResponse) ProtoMessage() {} -func (x *ValidateVersionKeyspaceResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[174] +func (x *RetrySchemaMigrationResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[161] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10002,51 +10411,43 @@ func (x *ValidateVersionKeyspaceResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidateVersionKeyspaceResponse.ProtoReflect.Descriptor instead. -func (*ValidateVersionKeyspaceResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{174} -} - -func (x *ValidateVersionKeyspaceResponse) GetResults() []string { - if x != nil { - return x.Results - } - return nil +// Deprecated: Use RetrySchemaMigrationResponse.ProtoReflect.Descriptor instead. +func (*RetrySchemaMigrationResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{161} } -func (x *ValidateVersionKeyspaceResponse) GetResultsByShard() map[string]*ValidateShardResponse { +func (x *RetrySchemaMigrationResponse) GetRowsAffectedByShard() map[string]uint64 { if x != nil { - return x.ResultsByShard + return x.RowsAffectedByShard } return nil } -type ValidateVersionShardRequest struct { +type RunHealthCheckRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` } -func (x *ValidateVersionShardRequest) Reset() { - *x = ValidateVersionShardRequest{} +func (x *RunHealthCheckRequest) Reset() { + *x = RunHealthCheckRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[175] + mi := &file_vtctldata_proto_msgTypes[162] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ValidateVersionShardRequest) String() string { +func (x *RunHealthCheckRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidateVersionShardRequest) ProtoMessage() {} +func (*RunHealthCheckRequest) ProtoMessage() {} -func (x *ValidateVersionShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[175] +func (x *RunHealthCheckRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[162] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10057,50 +10458,41 @@ func (x *ValidateVersionShardRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidateVersionShardRequest.ProtoReflect.Descriptor instead. -func (*ValidateVersionShardRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{175} -} - -func (x *ValidateVersionShardRequest) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" +// Deprecated: Use RunHealthCheckRequest.ProtoReflect.Descriptor instead. +func (*RunHealthCheckRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{162} } -func (x *ValidateVersionShardRequest) GetShard() string { +func (x *RunHealthCheckRequest) GetTabletAlias() *topodata.TabletAlias { if x != nil { - return x.Shard + return x.TabletAlias } - return "" + return nil } -type ValidateVersionShardResponse struct { +type RunHealthCheckResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - Results []string `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` } -func (x *ValidateVersionShardResponse) Reset() { - *x = ValidateVersionShardResponse{} +func (x *RunHealthCheckResponse) Reset() { + *x = RunHealthCheckResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[176] + mi := &file_vtctldata_proto_msgTypes[163] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ValidateVersionShardResponse) String() string { +func (x *RunHealthCheckResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidateVersionShardResponse) ProtoMessage() {} +func (*RunHealthCheckResponse) ProtoMessage() {} -func (x *ValidateVersionShardResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[176] +func (x *RunHealthCheckResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[163] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10111,46 +10503,37 @@ func (x *ValidateVersionShardResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidateVersionShardResponse.ProtoReflect.Descriptor instead. -func (*ValidateVersionShardResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{176} -} - -func (x *ValidateVersionShardResponse) GetResults() []string { - if x != nil { - return x.Results - } - return nil +// Deprecated: Use RunHealthCheckResponse.ProtoReflect.Descriptor instead. +func (*RunHealthCheckResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{163} } -type ValidateVSchemaRequest struct { +type SetKeyspaceDurabilityPolicyRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shards []string `protobuf:"bytes,2,rep,name=shards,proto3" json:"shards,omitempty"` - ExcludeTables []string `protobuf:"bytes,3,rep,name=exclude_tables,json=excludeTables,proto3" json:"exclude_tables,omitempty"` - IncludeViews bool `protobuf:"varint,4,opt,name=include_views,json=includeViews,proto3" json:"include_views,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + DurabilityPolicy string `protobuf:"bytes,2,opt,name=durability_policy,json=durabilityPolicy,proto3" json:"durability_policy,omitempty"` } -func (x *ValidateVSchemaRequest) Reset() { - *x = ValidateVSchemaRequest{} +func (x *SetKeyspaceDurabilityPolicyRequest) Reset() { + *x = SetKeyspaceDurabilityPolicyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[177] + mi := &file_vtctldata_proto_msgTypes[164] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ValidateVSchemaRequest) String() string { +func (x *SetKeyspaceDurabilityPolicyRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidateVSchemaRequest) ProtoMessage() {} +func (*SetKeyspaceDurabilityPolicyRequest) ProtoMessage() {} -func (x *ValidateVSchemaRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[177] +func (x *SetKeyspaceDurabilityPolicyRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[164] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10161,65 +10544,51 @@ func (x *ValidateVSchemaRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidateVSchemaRequest.ProtoReflect.Descriptor instead. -func (*ValidateVSchemaRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{177} +// Deprecated: Use SetKeyspaceDurabilityPolicyRequest.ProtoReflect.Descriptor instead. +func (*SetKeyspaceDurabilityPolicyRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{164} } -func (x *ValidateVSchemaRequest) GetKeyspace() string { +func (x *SetKeyspaceDurabilityPolicyRequest) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -func (x *ValidateVSchemaRequest) GetShards() []string { - if x != nil { - return x.Shards - } - return nil -} - -func (x *ValidateVSchemaRequest) GetExcludeTables() []string { - if x != nil { - return x.ExcludeTables - } - return nil -} - -func (x *ValidateVSchemaRequest) GetIncludeViews() bool { +func (x *SetKeyspaceDurabilityPolicyRequest) GetDurabilityPolicy() string { if x != nil { - return x.IncludeViews + return x.DurabilityPolicy } - return false + return "" } -type ValidateVSchemaResponse struct { +type SetKeyspaceDurabilityPolicyResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Results []string `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` - ResultsByShard map[string]*ValidateShardResponse `protobuf:"bytes,2,rep,name=results_by_shard,json=resultsByShard,proto3" json:"results_by_shard,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Keyspace is the updated keyspace record. + Keyspace *topodata.Keyspace `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` } -func (x *ValidateVSchemaResponse) Reset() { - *x = ValidateVSchemaResponse{} +func (x *SetKeyspaceDurabilityPolicyResponse) Reset() { + *x = SetKeyspaceDurabilityPolicyResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[178] + mi := &file_vtctldata_proto_msgTypes[165] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ValidateVSchemaResponse) String() string { +func (x *SetKeyspaceDurabilityPolicyResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidateVSchemaResponse) ProtoMessage() {} +func (*SetKeyspaceDurabilityPolicyResponse) ProtoMessage() {} -func (x *ValidateVSchemaResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[178] +func (x *SetKeyspaceDurabilityPolicyResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[165] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10230,53 +10599,47 @@ func (x *ValidateVSchemaResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidateVSchemaResponse.ProtoReflect.Descriptor instead. -func (*ValidateVSchemaResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{178} -} - -func (x *ValidateVSchemaResponse) GetResults() []string { - if x != nil { - return x.Results - } - return nil +// Deprecated: Use SetKeyspaceDurabilityPolicyResponse.ProtoReflect.Descriptor instead. +func (*SetKeyspaceDurabilityPolicyResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{165} } -func (x *ValidateVSchemaResponse) GetResultsByShard() map[string]*ValidateShardResponse { +func (x *SetKeyspaceDurabilityPolicyResponse) GetKeyspace() *topodata.Keyspace { if x != nil { - return x.ResultsByShard + return x.Keyspace } return nil } -type WorkflowUpdateRequest struct { +type SetKeyspaceServedFromRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - // TabletRequest gets passed on to each primary tablet involved - // in the workflow via the UpdateVRWorkflow tabletmanager RPC. - TabletRequest *tabletmanagerdata.UpdateVRWorkflowRequest `protobuf:"bytes,2,opt,name=tablet_request,json=tabletRequest,proto3" json:"tablet_request,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + TabletType topodata.TabletType `protobuf:"varint,2,opt,name=tablet_type,json=tabletType,proto3,enum=topodata.TabletType" json:"tablet_type,omitempty"` + Cells []string `protobuf:"bytes,3,rep,name=cells,proto3" json:"cells,omitempty"` + Remove bool `protobuf:"varint,4,opt,name=remove,proto3" json:"remove,omitempty"` + SourceKeyspace string `protobuf:"bytes,5,opt,name=source_keyspace,json=sourceKeyspace,proto3" json:"source_keyspace,omitempty"` } -func (x *WorkflowUpdateRequest) Reset() { - *x = WorkflowUpdateRequest{} +func (x *SetKeyspaceServedFromRequest) Reset() { + *x = SetKeyspaceServedFromRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[179] + mi := &file_vtctldata_proto_msgTypes[166] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *WorkflowUpdateRequest) String() string { +func (x *SetKeyspaceServedFromRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*WorkflowUpdateRequest) ProtoMessage() {} +func (*SetKeyspaceServedFromRequest) ProtoMessage() {} -func (x *WorkflowUpdateRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[179] +func (x *SetKeyspaceServedFromRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[166] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10287,51 +10650,72 @@ func (x *WorkflowUpdateRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use WorkflowUpdateRequest.ProtoReflect.Descriptor instead. -func (*WorkflowUpdateRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{179} +// Deprecated: Use SetKeyspaceServedFromRequest.ProtoReflect.Descriptor instead. +func (*SetKeyspaceServedFromRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{166} } -func (x *WorkflowUpdateRequest) GetKeyspace() string { +func (x *SetKeyspaceServedFromRequest) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -func (x *WorkflowUpdateRequest) GetTabletRequest() *tabletmanagerdata.UpdateVRWorkflowRequest { +func (x *SetKeyspaceServedFromRequest) GetTabletType() topodata.TabletType { if x != nil { - return x.TabletRequest + return x.TabletType + } + return topodata.TabletType(0) +} + +func (x *SetKeyspaceServedFromRequest) GetCells() []string { + if x != nil { + return x.Cells } return nil } -type WorkflowUpdateResponse struct { +func (x *SetKeyspaceServedFromRequest) GetRemove() bool { + if x != nil { + return x.Remove + } + return false +} + +func (x *SetKeyspaceServedFromRequest) GetSourceKeyspace() string { + if x != nil { + return x.SourceKeyspace + } + return "" +} + +type SetKeyspaceServedFromResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Summary string `protobuf:"bytes,1,opt,name=summary,proto3" json:"summary,omitempty"` - Details []*WorkflowUpdateResponse_TabletInfo `protobuf:"bytes,2,rep,name=details,proto3" json:"details,omitempty"` + // Keyspace is the updated keyspace record. + Keyspace *topodata.Keyspace `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` } -func (x *WorkflowUpdateResponse) Reset() { - *x = WorkflowUpdateResponse{} +func (x *SetKeyspaceServedFromResponse) Reset() { + *x = SetKeyspaceServedFromResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[180] + mi := &file_vtctldata_proto_msgTypes[167] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *WorkflowUpdateResponse) String() string { +func (x *SetKeyspaceServedFromResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*WorkflowUpdateResponse) ProtoMessage() {} +func (*SetKeyspaceServedFromResponse) ProtoMessage() {} -func (x *WorkflowUpdateResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[180] +func (x *SetKeyspaceServedFromResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[167] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10342,51 +10726,44 @@ func (x *WorkflowUpdateResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use WorkflowUpdateResponse.ProtoReflect.Descriptor instead. -func (*WorkflowUpdateResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{180} -} - -func (x *WorkflowUpdateResponse) GetSummary() string { - if x != nil { - return x.Summary - } - return "" +// Deprecated: Use SetKeyspaceServedFromResponse.ProtoReflect.Descriptor instead. +func (*SetKeyspaceServedFromResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{167} } -func (x *WorkflowUpdateResponse) GetDetails() []*WorkflowUpdateResponse_TabletInfo { +func (x *SetKeyspaceServedFromResponse) GetKeyspace() *topodata.Keyspace { if x != nil { - return x.Details + return x.Keyspace } return nil } -type Workflow_ReplicationLocation struct { +type SetKeyspaceShardingInfoRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shards []string `protobuf:"bytes,2,rep,name=shards,proto3" json:"shards,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Force bool `protobuf:"varint,4,opt,name=force,proto3" json:"force,omitempty"` } -func (x *Workflow_ReplicationLocation) Reset() { - *x = Workflow_ReplicationLocation{} +func (x *SetKeyspaceShardingInfoRequest) Reset() { + *x = SetKeyspaceShardingInfoRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[182] + mi := &file_vtctldata_proto_msgTypes[168] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *Workflow_ReplicationLocation) String() string { +func (x *SetKeyspaceShardingInfoRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*Workflow_ReplicationLocation) ProtoMessage() {} +func (*SetKeyspaceShardingInfoRequest) ProtoMessage() {} -func (x *Workflow_ReplicationLocation) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[182] +func (x *SetKeyspaceShardingInfoRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[168] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10397,52 +10774,51 @@ func (x *Workflow_ReplicationLocation) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Workflow_ReplicationLocation.ProtoReflect.Descriptor instead. -func (*Workflow_ReplicationLocation) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{6, 1} +// Deprecated: Use SetKeyspaceShardingInfoRequest.ProtoReflect.Descriptor instead. +func (*SetKeyspaceShardingInfoRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{168} } -func (x *Workflow_ReplicationLocation) GetKeyspace() string { +func (x *SetKeyspaceShardingInfoRequest) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -func (x *Workflow_ReplicationLocation) GetShards() []string { +func (x *SetKeyspaceShardingInfoRequest) GetForce() bool { if x != nil { - return x.Shards + return x.Force } - return nil + return false } -type Workflow_ShardStream struct { +type SetKeyspaceShardingInfoResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Streams []*Workflow_Stream `protobuf:"bytes,1,rep,name=streams,proto3" json:"streams,omitempty"` - TabletControls []*topodata.Shard_TabletControl `protobuf:"bytes,2,rep,name=tablet_controls,json=tabletControls,proto3" json:"tablet_controls,omitempty"` - IsPrimaryServing bool `protobuf:"varint,3,opt,name=is_primary_serving,json=isPrimaryServing,proto3" json:"is_primary_serving,omitempty"` + // Keyspace is the updated keyspace record. + Keyspace *topodata.Keyspace `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` } -func (x *Workflow_ShardStream) Reset() { - *x = Workflow_ShardStream{} +func (x *SetKeyspaceShardingInfoResponse) Reset() { + *x = SetKeyspaceShardingInfoResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[183] + mi := &file_vtctldata_proto_msgTypes[169] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *Workflow_ShardStream) String() string { +func (x *SetKeyspaceShardingInfoResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*Workflow_ShardStream) ProtoMessage() {} +func (*SetKeyspaceShardingInfoResponse) ProtoMessage() {} -func (x *Workflow_ShardStream) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[183] +func (x *SetKeyspaceShardingInfoResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[169] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10453,79 +10829,45 @@ func (x *Workflow_ShardStream) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Workflow_ShardStream.ProtoReflect.Descriptor instead. -func (*Workflow_ShardStream) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{6, 2} +// Deprecated: Use SetKeyspaceShardingInfoResponse.ProtoReflect.Descriptor instead. +func (*SetKeyspaceShardingInfoResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{169} } -func (x *Workflow_ShardStream) GetStreams() []*Workflow_Stream { +func (x *SetKeyspaceShardingInfoResponse) GetKeyspace() *topodata.Keyspace { if x != nil { - return x.Streams + return x.Keyspace } return nil } -func (x *Workflow_ShardStream) GetTabletControls() []*topodata.Shard_TabletControl { - if x != nil { - return x.TabletControls - } - return nil -} - -func (x *Workflow_ShardStream) GetIsPrimaryServing() bool { - if x != nil { - return x.IsPrimaryServing - } - return false -} - -type Workflow_Stream struct { +type SetShardIsPrimaryServingRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - Tablet *topodata.TabletAlias `protobuf:"bytes,3,opt,name=tablet,proto3" json:"tablet,omitempty"` - BinlogSource *binlogdata.BinlogSource `protobuf:"bytes,4,opt,name=binlog_source,json=binlogSource,proto3" json:"binlog_source,omitempty"` - Position string `protobuf:"bytes,5,opt,name=position,proto3" json:"position,omitempty"` - StopPosition string `protobuf:"bytes,6,opt,name=stop_position,json=stopPosition,proto3" json:"stop_position,omitempty"` - State string `protobuf:"bytes,7,opt,name=state,proto3" json:"state,omitempty"` - DbName string `protobuf:"bytes,8,opt,name=db_name,json=dbName,proto3" json:"db_name,omitempty"` - TransactionTimestamp *vttime.Time `protobuf:"bytes,9,opt,name=transaction_timestamp,json=transactionTimestamp,proto3" json:"transaction_timestamp,omitempty"` - TimeUpdated *vttime.Time `protobuf:"bytes,10,opt,name=time_updated,json=timeUpdated,proto3" json:"time_updated,omitempty"` - Message string `protobuf:"bytes,11,opt,name=message,proto3" json:"message,omitempty"` - CopyStates []*Workflow_Stream_CopyState `protobuf:"bytes,12,rep,name=copy_states,json=copyStates,proto3" json:"copy_states,omitempty"` - Logs []*Workflow_Stream_Log `protobuf:"bytes,13,rep,name=logs,proto3" json:"logs,omitempty"` - // LogFetchError is set if we fail to fetch some logs for this stream. We - // will never fail to fetch workflows because we cannot fetch the logs, but - // we will still forward log-fetch errors to the caller, should that be - // relevant to the context in which they are fetching workflows. - // - // Note that this field being set does not necessarily mean that Logs is nil; - // if there are N logs that exist for the stream, and we fail to fetch the - // ith log, we will still return logs in [0, i) + (i, N]. - LogFetchError string `protobuf:"bytes,14,opt,name=log_fetch_error,json=logFetchError,proto3" json:"log_fetch_error,omitempty"` - Tags []string `protobuf:"bytes,15,rep,name=tags,proto3" json:"tags,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + IsServing bool `protobuf:"varint,3,opt,name=is_serving,json=isServing,proto3" json:"is_serving,omitempty"` } -func (x *Workflow_Stream) Reset() { - *x = Workflow_Stream{} +func (x *SetShardIsPrimaryServingRequest) Reset() { + *x = SetShardIsPrimaryServingRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[184] + mi := &file_vtctldata_proto_msgTypes[170] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *Workflow_Stream) String() string { +func (x *SetShardIsPrimaryServingRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*Workflow_Stream) ProtoMessage() {} +func (*SetShardIsPrimaryServingRequest) ProtoMessage() {} -func (x *Workflow_Stream) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[184] +func (x *SetShardIsPrimaryServingRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[170] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10536,142 +10878,215 @@ func (x *Workflow_Stream) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Workflow_Stream.ProtoReflect.Descriptor instead. -func (*Workflow_Stream) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{6, 3} +// Deprecated: Use SetShardIsPrimaryServingRequest.ProtoReflect.Descriptor instead. +func (*SetShardIsPrimaryServingRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{170} } -func (x *Workflow_Stream) GetId() int64 { +func (x *SetShardIsPrimaryServingRequest) GetKeyspace() string { if x != nil { - return x.Id + return x.Keyspace } - return 0 + return "" } -func (x *Workflow_Stream) GetShard() string { +func (x *SetShardIsPrimaryServingRequest) GetShard() string { if x != nil { return x.Shard } return "" } -func (x *Workflow_Stream) GetTablet() *topodata.TabletAlias { +func (x *SetShardIsPrimaryServingRequest) GetIsServing() bool { if x != nil { - return x.Tablet + return x.IsServing } - return nil + return false } -func (x *Workflow_Stream) GetBinlogSource() *binlogdata.BinlogSource { - if x != nil { - return x.BinlogSource +type SetShardIsPrimaryServingResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Shard is the updated shard record. + Shard *topodata.Shard `protobuf:"bytes,1,opt,name=shard,proto3" json:"shard,omitempty"` +} + +func (x *SetShardIsPrimaryServingResponse) Reset() { + *x = SetShardIsPrimaryServingResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[171] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -func (x *Workflow_Stream) GetPosition() string { - if x != nil { - return x.Position +func (x *SetShardIsPrimaryServingResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetShardIsPrimaryServingResponse) ProtoMessage() {} + +func (x *SetShardIsPrimaryServingResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[171] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -func (x *Workflow_Stream) GetStopPosition() string { +// Deprecated: Use SetShardIsPrimaryServingResponse.ProtoReflect.Descriptor instead. +func (*SetShardIsPrimaryServingResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{171} +} + +func (x *SetShardIsPrimaryServingResponse) GetShard() *topodata.Shard { if x != nil { - return x.StopPosition + return x.Shard } - return "" + return nil } -func (x *Workflow_Stream) GetState() string { - if x != nil { - return x.State +type SetShardTabletControlRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + TabletType topodata.TabletType `protobuf:"varint,3,opt,name=tablet_type,json=tabletType,proto3,enum=topodata.TabletType" json:"tablet_type,omitempty"` + Cells []string `protobuf:"bytes,4,rep,name=cells,proto3" json:"cells,omitempty"` + // DeniedTables updates the list of denied tables the shard will serve for + // the given tablet type. This is useful to fix tables that are being blocked + // after a MoveTables operation. + // + // NOTE: Setting this field will cause DisableQueryService to be ignored. + DeniedTables []string `protobuf:"bytes,5,rep,name=denied_tables,json=deniedTables,proto3" json:"denied_tables,omitempty"` + // DisableQueryService instructs whether to enable the query service on + // tablets of the given type in the shard. This is useful to fix Reshard + // operations gone awry. + // + // NOTE: this is ignored if DeniedTables is not empty. + DisableQueryService bool `protobuf:"varint,6,opt,name=disable_query_service,json=disableQueryService,proto3" json:"disable_query_service,omitempty"` + // Remove removes the ShardTabletControl record entirely. If set, this takes + // precedence over DeniedTables and DisableQueryService fields, and is useful + // to manually remove serving restrictions after a completed MoveTables + // operation. + Remove bool `protobuf:"varint,7,opt,name=remove,proto3" json:"remove,omitempty"` +} + +func (x *SetShardTabletControlRequest) Reset() { + *x = SetShardTabletControlRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[172] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return "" } -func (x *Workflow_Stream) GetDbName() string { - if x != nil { - return x.DbName +func (x *SetShardTabletControlRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetShardTabletControlRequest) ProtoMessage() {} + +func (x *SetShardTabletControlRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[172] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -func (x *Workflow_Stream) GetTransactionTimestamp() *vttime.Time { +// Deprecated: Use SetShardTabletControlRequest.ProtoReflect.Descriptor instead. +func (*SetShardTabletControlRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{172} +} + +func (x *SetShardTabletControlRequest) GetKeyspace() string { if x != nil { - return x.TransactionTimestamp + return x.Keyspace } - return nil + return "" } -func (x *Workflow_Stream) GetTimeUpdated() *vttime.Time { +func (x *SetShardTabletControlRequest) GetShard() string { if x != nil { - return x.TimeUpdated + return x.Shard } - return nil + return "" } -func (x *Workflow_Stream) GetMessage() string { +func (x *SetShardTabletControlRequest) GetTabletType() topodata.TabletType { if x != nil { - return x.Message + return x.TabletType } - return "" + return topodata.TabletType(0) } -func (x *Workflow_Stream) GetCopyStates() []*Workflow_Stream_CopyState { +func (x *SetShardTabletControlRequest) GetCells() []string { if x != nil { - return x.CopyStates + return x.Cells } return nil } -func (x *Workflow_Stream) GetLogs() []*Workflow_Stream_Log { +func (x *SetShardTabletControlRequest) GetDeniedTables() []string { if x != nil { - return x.Logs + return x.DeniedTables } return nil } -func (x *Workflow_Stream) GetLogFetchError() string { +func (x *SetShardTabletControlRequest) GetDisableQueryService() bool { if x != nil { - return x.LogFetchError + return x.DisableQueryService } - return "" + return false } -func (x *Workflow_Stream) GetTags() []string { +func (x *SetShardTabletControlRequest) GetRemove() bool { if x != nil { - return x.Tags + return x.Remove } - return nil + return false } -type Workflow_Stream_CopyState struct { +type SetShardTabletControlResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Table string `protobuf:"bytes,1,opt,name=table,proto3" json:"table,omitempty"` - LastPk string `protobuf:"bytes,2,opt,name=last_pk,json=lastPk,proto3" json:"last_pk,omitempty"` + // Shard is the updated shard record. + Shard *topodata.Shard `protobuf:"bytes,1,opt,name=shard,proto3" json:"shard,omitempty"` } -func (x *Workflow_Stream_CopyState) Reset() { - *x = Workflow_Stream_CopyState{} +func (x *SetShardTabletControlResponse) Reset() { + *x = SetShardTabletControlResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[185] + mi := &file_vtctldata_proto_msgTypes[173] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *Workflow_Stream_CopyState) String() string { +func (x *SetShardTabletControlResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*Workflow_Stream_CopyState) ProtoMessage() {} +func (*SetShardTabletControlResponse) ProtoMessage() {} -func (x *Workflow_Stream_CopyState) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[185] +func (x *SetShardTabletControlResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[173] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10682,57 +11097,44 @@ func (x *Workflow_Stream_CopyState) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Workflow_Stream_CopyState.ProtoReflect.Descriptor instead. -func (*Workflow_Stream_CopyState) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{6, 3, 0} +// Deprecated: Use SetShardTabletControlResponse.ProtoReflect.Descriptor instead. +func (*SetShardTabletControlResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{173} } -func (x *Workflow_Stream_CopyState) GetTable() string { +func (x *SetShardTabletControlResponse) GetShard() *topodata.Shard { if x != nil { - return x.Table + return x.Shard } - return "" + return nil } -func (x *Workflow_Stream_CopyState) GetLastPk() string { - if x != nil { - return x.LastPk - } - return "" -} - -type Workflow_Stream_Log struct { +type SetWritableRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - StreamId int64 `protobuf:"varint,2,opt,name=stream_id,json=streamId,proto3" json:"stream_id,omitempty"` - Type string `protobuf:"bytes,3,opt,name=type,proto3" json:"type,omitempty"` - State string `protobuf:"bytes,4,opt,name=state,proto3" json:"state,omitempty"` - CreatedAt *vttime.Time `protobuf:"bytes,5,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` - UpdatedAt *vttime.Time `protobuf:"bytes,6,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` - Message string `protobuf:"bytes,7,opt,name=message,proto3" json:"message,omitempty"` - Count int64 `protobuf:"varint,8,opt,name=count,proto3" json:"count,omitempty"` + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + Writable bool `protobuf:"varint,2,opt,name=writable,proto3" json:"writable,omitempty"` } -func (x *Workflow_Stream_Log) Reset() { - *x = Workflow_Stream_Log{} +func (x *SetWritableRequest) Reset() { + *x = SetWritableRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[186] + mi := &file_vtctldata_proto_msgTypes[174] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *Workflow_Stream_Log) String() string { +func (x *SetWritableRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*Workflow_Stream_Log) ProtoMessage() {} +func (*SetWritableRequest) ProtoMessage() {} -func (x *Workflow_Stream_Log) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[186] +func (x *SetWritableRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[174] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10743,92 +11145,149 @@ func (x *Workflow_Stream_Log) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Workflow_Stream_Log.ProtoReflect.Descriptor instead. -func (*Workflow_Stream_Log) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{6, 3, 1} +// Deprecated: Use SetWritableRequest.ProtoReflect.Descriptor instead. +func (*SetWritableRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{174} } -func (x *Workflow_Stream_Log) GetId() int64 { +func (x *SetWritableRequest) GetTabletAlias() *topodata.TabletAlias { if x != nil { - return x.Id + return x.TabletAlias } - return 0 + return nil } -func (x *Workflow_Stream_Log) GetStreamId() int64 { +func (x *SetWritableRequest) GetWritable() bool { if x != nil { - return x.StreamId + return x.Writable } - return 0 + return false } -func (x *Workflow_Stream_Log) GetType() string { - if x != nil { - return x.Type +type SetWritableResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *SetWritableResponse) Reset() { + *x = SetWritableResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[175] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return "" } -func (x *Workflow_Stream_Log) GetState() string { - if x != nil { - return x.State +func (x *SetWritableResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetWritableResponse) ProtoMessage() {} + +func (x *SetWritableResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[175] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -func (x *Workflow_Stream_Log) GetCreatedAt() *vttime.Time { - if x != nil { - return x.CreatedAt +// Deprecated: Use SetWritableResponse.ProtoReflect.Descriptor instead. +func (*SetWritableResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{175} +} + +type ShardReplicationAddRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + TabletAlias *topodata.TabletAlias `protobuf:"bytes,3,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` +} + +func (x *ShardReplicationAddRequest) Reset() { + *x = ShardReplicationAddRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[176] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -func (x *Workflow_Stream_Log) GetUpdatedAt() *vttime.Time { +func (x *ShardReplicationAddRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ShardReplicationAddRequest) ProtoMessage() {} + +func (x *ShardReplicationAddRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[176] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ShardReplicationAddRequest.ProtoReflect.Descriptor instead. +func (*ShardReplicationAddRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{176} +} + +func (x *ShardReplicationAddRequest) GetKeyspace() string { if x != nil { - return x.UpdatedAt + return x.Keyspace } - return nil + return "" } -func (x *Workflow_Stream_Log) GetMessage() string { +func (x *ShardReplicationAddRequest) GetShard() string { if x != nil { - return x.Message + return x.Shard } return "" } -func (x *Workflow_Stream_Log) GetCount() int64 { +func (x *ShardReplicationAddRequest) GetTabletAlias() *topodata.TabletAlias { if x != nil { - return x.Count + return x.TabletAlias } - return 0 + return nil } -type GetSrvKeyspaceNamesResponse_NameList struct { +type ShardReplicationAddResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - Names []string `protobuf:"bytes,1,rep,name=names,proto3" json:"names,omitempty"` } -func (x *GetSrvKeyspaceNamesResponse_NameList) Reset() { - *x = GetSrvKeyspaceNamesResponse_NameList{} +func (x *ShardReplicationAddResponse) Reset() { + *x = ShardReplicationAddResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[190] + mi := &file_vtctldata_proto_msgTypes[177] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetSrvKeyspaceNamesResponse_NameList) String() string { +func (x *ShardReplicationAddResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetSrvKeyspaceNamesResponse_NameList) ProtoMessage() {} +func (*ShardReplicationAddResponse) ProtoMessage() {} -func (x *GetSrvKeyspaceNamesResponse_NameList) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[190] +func (x *ShardReplicationAddResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[177] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10839,47 +11298,38 @@ func (x *GetSrvKeyspaceNamesResponse_NameList) ProtoReflect() protoreflect.Messa return mi.MessageOf(x) } -// Deprecated: Use GetSrvKeyspaceNamesResponse_NameList.ProtoReflect.Descriptor instead. -func (*GetSrvKeyspaceNamesResponse_NameList) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{75, 1} -} - -func (x *GetSrvKeyspaceNamesResponse_NameList) GetNames() []string { - if x != nil { - return x.Names - } - return nil +// Deprecated: Use ShardReplicationAddResponse.ProtoReflect.Descriptor instead. +func (*ShardReplicationAddResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{177} } -type WorkflowUpdateResponse_TabletInfo struct { +type ShardReplicationFixRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Tablet string `protobuf:"bytes,1,opt,name=tablet,proto3" json:"tablet,omitempty"` - // Changed is true if any of the provided values were different - // than what was already stored. The value is based on the query - // result's RowsAffected being 0 or not. - Changed bool `protobuf:"varint,2,opt,name=changed,proto3" json:"changed,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + Cell string `protobuf:"bytes,3,opt,name=cell,proto3" json:"cell,omitempty"` } -func (x *WorkflowUpdateResponse_TabletInfo) Reset() { - *x = WorkflowUpdateResponse_TabletInfo{} +func (x *ShardReplicationFixRequest) Reset() { + *x = ShardReplicationFixRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[200] + mi := &file_vtctldata_proto_msgTypes[178] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *WorkflowUpdateResponse_TabletInfo) String() string { +func (x *ShardReplicationFixRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*WorkflowUpdateResponse_TabletInfo) ProtoMessage() {} +func (*ShardReplicationFixRequest) ProtoMessage() {} -func (x *WorkflowUpdateResponse_TabletInfo) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[200] +func (x *ShardReplicationFixRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[178] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10890,70 +11340,4135 @@ func (x *WorkflowUpdateResponse_TabletInfo) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use WorkflowUpdateResponse_TabletInfo.ProtoReflect.Descriptor instead. -func (*WorkflowUpdateResponse_TabletInfo) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{180, 0} +// Deprecated: Use ShardReplicationFixRequest.ProtoReflect.Descriptor instead. +func (*ShardReplicationFixRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{178} } -func (x *WorkflowUpdateResponse_TabletInfo) GetTablet() string { +func (x *ShardReplicationFixRequest) GetKeyspace() string { if x != nil { - return x.Tablet + return x.Keyspace } return "" } -func (x *WorkflowUpdateResponse_TabletInfo) GetChanged() bool { +func (x *ShardReplicationFixRequest) GetShard() string { if x != nil { - return x.Changed + return x.Shard } - return false + return "" } -var File_vtctldata_proto protoreflect.FileDescriptor +func (x *ShardReplicationFixRequest) GetCell() string { + if x != nil { + return x.Cell + } + return "" +} -var file_vtctldata_proto_rawDesc = []byte{ - 0x0a, 0x0f, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x09, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x10, 0x62, 0x69, - 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0d, - 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0e, 0x6d, - 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0b, 0x71, - 0x75, 0x65, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x72, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x17, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0e, 0x74, 0x6f, 0x70, 0x6f, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0d, 0x76, 0x73, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0b, 0x76, 0x74, 0x72, 0x70, 0x63, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0c, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x57, 0x0a, 0x1a, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x56, - 0x74, 0x63, 0x74, 0x6c, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, - 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x43, 0x0a, - 0x1b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x43, 0x6f, 0x6d, - 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x05, - 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, - 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x22, 0x89, 0x01, 0x0a, 0x18, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x61, 0x74, 0x65, - 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, - 0x21, 0x0a, 0x0c, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x78, 0x70, - 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, - 0x1d, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x64, 0x64, 0x6c, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x64, 0x6c, 0x22, 0xf4, - 0x04, 0x0a, 0x13, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x74, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x61, 0x66, 0x74, - 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, - 0x74, 0x6f, 0x70, 0x41, 0x66, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x4a, 0x0a, 0x0e, +type ShardReplicationFixResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Error contains information about the error fixed by a + // ShardReplicationFix RPC. If there were no errors to fix (i.e. all nodes + // in the replication graph are valid), this field is nil. + Error *topodata.ShardReplicationError `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *ShardReplicationFixResponse) Reset() { + *x = ShardReplicationFixResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[179] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ShardReplicationFixResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ShardReplicationFixResponse) ProtoMessage() {} + +func (x *ShardReplicationFixResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[179] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ShardReplicationFixResponse.ProtoReflect.Descriptor instead. +func (*ShardReplicationFixResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{179} +} + +func (x *ShardReplicationFixResponse) GetError() *topodata.ShardReplicationError { + if x != nil { + return x.Error + } + return nil +} + +type ShardReplicationPositionsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` +} + +func (x *ShardReplicationPositionsRequest) Reset() { + *x = ShardReplicationPositionsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[180] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ShardReplicationPositionsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ShardReplicationPositionsRequest) ProtoMessage() {} + +func (x *ShardReplicationPositionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[180] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ShardReplicationPositionsRequest.ProtoReflect.Descriptor instead. +func (*ShardReplicationPositionsRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{180} +} + +func (x *ShardReplicationPositionsRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *ShardReplicationPositionsRequest) GetShard() string { + if x != nil { + return x.Shard + } + return "" +} + +type ShardReplicationPositionsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // ReplicationStatuses is a mapping of tablet alias string to replication + // status for that tablet. + ReplicationStatuses map[string]*replicationdata.Status `protobuf:"bytes,1,rep,name=replication_statuses,json=replicationStatuses,proto3" json:"replication_statuses,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // TabletMap is the set of tablets whose replication statuses were queried, + // keyed by tablet alias. + TabletMap map[string]*topodata.Tablet `protobuf:"bytes,2,rep,name=tablet_map,json=tabletMap,proto3" json:"tablet_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ShardReplicationPositionsResponse) Reset() { + *x = ShardReplicationPositionsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[181] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ShardReplicationPositionsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ShardReplicationPositionsResponse) ProtoMessage() {} + +func (x *ShardReplicationPositionsResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[181] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ShardReplicationPositionsResponse.ProtoReflect.Descriptor instead. +func (*ShardReplicationPositionsResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{181} +} + +func (x *ShardReplicationPositionsResponse) GetReplicationStatuses() map[string]*replicationdata.Status { + if x != nil { + return x.ReplicationStatuses + } + return nil +} + +func (x *ShardReplicationPositionsResponse) GetTabletMap() map[string]*topodata.Tablet { + if x != nil { + return x.TabletMap + } + return nil +} + +type ShardReplicationRemoveRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + TabletAlias *topodata.TabletAlias `protobuf:"bytes,3,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` +} + +func (x *ShardReplicationRemoveRequest) Reset() { + *x = ShardReplicationRemoveRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[182] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ShardReplicationRemoveRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ShardReplicationRemoveRequest) ProtoMessage() {} + +func (x *ShardReplicationRemoveRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[182] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ShardReplicationRemoveRequest.ProtoReflect.Descriptor instead. +func (*ShardReplicationRemoveRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{182} +} + +func (x *ShardReplicationRemoveRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *ShardReplicationRemoveRequest) GetShard() string { + if x != nil { + return x.Shard + } + return "" +} + +func (x *ShardReplicationRemoveRequest) GetTabletAlias() *topodata.TabletAlias { + if x != nil { + return x.TabletAlias + } + return nil +} + +type ShardReplicationRemoveResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ShardReplicationRemoveResponse) Reset() { + *x = ShardReplicationRemoveResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[183] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ShardReplicationRemoveResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ShardReplicationRemoveResponse) ProtoMessage() {} + +func (x *ShardReplicationRemoveResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[183] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ShardReplicationRemoveResponse.ProtoReflect.Descriptor instead. +func (*ShardReplicationRemoveResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{183} +} + +type SleepTabletRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + Duration *vttime.Duration `protobuf:"bytes,2,opt,name=duration,proto3" json:"duration,omitempty"` +} + +func (x *SleepTabletRequest) Reset() { + *x = SleepTabletRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[184] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SleepTabletRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SleepTabletRequest) ProtoMessage() {} + +func (x *SleepTabletRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[184] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SleepTabletRequest.ProtoReflect.Descriptor instead. +func (*SleepTabletRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{184} +} + +func (x *SleepTabletRequest) GetTabletAlias() *topodata.TabletAlias { + if x != nil { + return x.TabletAlias + } + return nil +} + +func (x *SleepTabletRequest) GetDuration() *vttime.Duration { + if x != nil { + return x.Duration + } + return nil +} + +type SleepTabletResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *SleepTabletResponse) Reset() { + *x = SleepTabletResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[185] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SleepTabletResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SleepTabletResponse) ProtoMessage() {} + +func (x *SleepTabletResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[185] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SleepTabletResponse.ProtoReflect.Descriptor instead. +func (*SleepTabletResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{185} +} + +type SourceShardAddRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + Uid int32 `protobuf:"varint,3,opt,name=uid,proto3" json:"uid,omitempty"` + SourceKeyspace string `protobuf:"bytes,4,opt,name=source_keyspace,json=sourceKeyspace,proto3" json:"source_keyspace,omitempty"` + SourceShard string `protobuf:"bytes,5,opt,name=source_shard,json=sourceShard,proto3" json:"source_shard,omitempty"` + // KeyRange identifies the key range to use for the SourceShard. This field is + // optional. + KeyRange *topodata.KeyRange `protobuf:"bytes,6,opt,name=key_range,json=keyRange,proto3" json:"key_range,omitempty"` + // Tables is a list of tables replicate (for MoveTables). Each "table" can be + // either an exact match or a regular expression of the form "/regexp/". + Tables []string `protobuf:"bytes,7,rep,name=tables,proto3" json:"tables,omitempty"` +} + +func (x *SourceShardAddRequest) Reset() { + *x = SourceShardAddRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[186] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceShardAddRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceShardAddRequest) ProtoMessage() {} + +func (x *SourceShardAddRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[186] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceShardAddRequest.ProtoReflect.Descriptor instead. +func (*SourceShardAddRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{186} +} + +func (x *SourceShardAddRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *SourceShardAddRequest) GetShard() string { + if x != nil { + return x.Shard + } + return "" +} + +func (x *SourceShardAddRequest) GetUid() int32 { + if x != nil { + return x.Uid + } + return 0 +} + +func (x *SourceShardAddRequest) GetSourceKeyspace() string { + if x != nil { + return x.SourceKeyspace + } + return "" +} + +func (x *SourceShardAddRequest) GetSourceShard() string { + if x != nil { + return x.SourceShard + } + return "" +} + +func (x *SourceShardAddRequest) GetKeyRange() *topodata.KeyRange { + if x != nil { + return x.KeyRange + } + return nil +} + +func (x *SourceShardAddRequest) GetTables() []string { + if x != nil { + return x.Tables + } + return nil +} + +type SourceShardAddResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Shard is the updated shard record. + Shard *topodata.Shard `protobuf:"bytes,1,opt,name=shard,proto3" json:"shard,omitempty"` +} + +func (x *SourceShardAddResponse) Reset() { + *x = SourceShardAddResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[187] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceShardAddResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceShardAddResponse) ProtoMessage() {} + +func (x *SourceShardAddResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[187] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceShardAddResponse.ProtoReflect.Descriptor instead. +func (*SourceShardAddResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{187} +} + +func (x *SourceShardAddResponse) GetShard() *topodata.Shard { + if x != nil { + return x.Shard + } + return nil +} + +type SourceShardDeleteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + Uid int32 `protobuf:"varint,3,opt,name=uid,proto3" json:"uid,omitempty"` +} + +func (x *SourceShardDeleteRequest) Reset() { + *x = SourceShardDeleteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[188] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceShardDeleteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceShardDeleteRequest) ProtoMessage() {} + +func (x *SourceShardDeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[188] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceShardDeleteRequest.ProtoReflect.Descriptor instead. +func (*SourceShardDeleteRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{188} +} + +func (x *SourceShardDeleteRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *SourceShardDeleteRequest) GetShard() string { + if x != nil { + return x.Shard + } + return "" +} + +func (x *SourceShardDeleteRequest) GetUid() int32 { + if x != nil { + return x.Uid + } + return 0 +} + +type SourceShardDeleteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Shard is the updated shard record. + Shard *topodata.Shard `protobuf:"bytes,1,opt,name=shard,proto3" json:"shard,omitempty"` +} + +func (x *SourceShardDeleteResponse) Reset() { + *x = SourceShardDeleteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[189] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceShardDeleteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceShardDeleteResponse) ProtoMessage() {} + +func (x *SourceShardDeleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[189] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceShardDeleteResponse.ProtoReflect.Descriptor instead. +func (*SourceShardDeleteResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{189} +} + +func (x *SourceShardDeleteResponse) GetShard() *topodata.Shard { + if x != nil { + return x.Shard + } + return nil +} + +type StartReplicationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` +} + +func (x *StartReplicationRequest) Reset() { + *x = StartReplicationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[190] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StartReplicationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartReplicationRequest) ProtoMessage() {} + +func (x *StartReplicationRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[190] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartReplicationRequest.ProtoReflect.Descriptor instead. +func (*StartReplicationRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{190} +} + +func (x *StartReplicationRequest) GetTabletAlias() *topodata.TabletAlias { + if x != nil { + return x.TabletAlias + } + return nil +} + +type StartReplicationResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *StartReplicationResponse) Reset() { + *x = StartReplicationResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[191] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StartReplicationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartReplicationResponse) ProtoMessage() {} + +func (x *StartReplicationResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[191] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartReplicationResponse.ProtoReflect.Descriptor instead. +func (*StartReplicationResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{191} +} + +type StopReplicationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` +} + +func (x *StopReplicationRequest) Reset() { + *x = StopReplicationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[192] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StopReplicationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StopReplicationRequest) ProtoMessage() {} + +func (x *StopReplicationRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[192] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StopReplicationRequest.ProtoReflect.Descriptor instead. +func (*StopReplicationRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{192} +} + +func (x *StopReplicationRequest) GetTabletAlias() *topodata.TabletAlias { + if x != nil { + return x.TabletAlias + } + return nil +} + +type StopReplicationResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *StopReplicationResponse) Reset() { + *x = StopReplicationResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[193] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StopReplicationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StopReplicationResponse) ProtoMessage() {} + +func (x *StopReplicationResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[193] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StopReplicationResponse.ProtoReflect.Descriptor instead. +func (*StopReplicationResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{193} +} + +type TabletExternallyReparentedRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Tablet is the alias of the tablet that was promoted externally and should + // be updated to the shard primary in the topo. + Tablet *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet,proto3" json:"tablet,omitempty"` +} + +func (x *TabletExternallyReparentedRequest) Reset() { + *x = TabletExternallyReparentedRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[194] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TabletExternallyReparentedRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TabletExternallyReparentedRequest) ProtoMessage() {} + +func (x *TabletExternallyReparentedRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[194] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TabletExternallyReparentedRequest.ProtoReflect.Descriptor instead. +func (*TabletExternallyReparentedRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{194} +} + +func (x *TabletExternallyReparentedRequest) GetTablet() *topodata.TabletAlias { + if x != nil { + return x.Tablet + } + return nil +} + +type TabletExternallyReparentedResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + NewPrimary *topodata.TabletAlias `protobuf:"bytes,3,opt,name=new_primary,json=newPrimary,proto3" json:"new_primary,omitempty"` + OldPrimary *topodata.TabletAlias `protobuf:"bytes,4,opt,name=old_primary,json=oldPrimary,proto3" json:"old_primary,omitempty"` +} + +func (x *TabletExternallyReparentedResponse) Reset() { + *x = TabletExternallyReparentedResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[195] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TabletExternallyReparentedResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TabletExternallyReparentedResponse) ProtoMessage() {} + +func (x *TabletExternallyReparentedResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[195] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TabletExternallyReparentedResponse.ProtoReflect.Descriptor instead. +func (*TabletExternallyReparentedResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{195} +} + +func (x *TabletExternallyReparentedResponse) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *TabletExternallyReparentedResponse) GetShard() string { + if x != nil { + return x.Shard + } + return "" +} + +func (x *TabletExternallyReparentedResponse) GetNewPrimary() *topodata.TabletAlias { + if x != nil { + return x.NewPrimary + } + return nil +} + +func (x *TabletExternallyReparentedResponse) GetOldPrimary() *topodata.TabletAlias { + if x != nil { + return x.OldPrimary + } + return nil +} + +type UpdateCellInfoRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + CellInfo *topodata.CellInfo `protobuf:"bytes,2,opt,name=cell_info,json=cellInfo,proto3" json:"cell_info,omitempty"` +} + +func (x *UpdateCellInfoRequest) Reset() { + *x = UpdateCellInfoRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[196] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateCellInfoRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateCellInfoRequest) ProtoMessage() {} + +func (x *UpdateCellInfoRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[196] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateCellInfoRequest.ProtoReflect.Descriptor instead. +func (*UpdateCellInfoRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{196} +} + +func (x *UpdateCellInfoRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *UpdateCellInfoRequest) GetCellInfo() *topodata.CellInfo { + if x != nil { + return x.CellInfo + } + return nil +} + +type UpdateCellInfoResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + CellInfo *topodata.CellInfo `protobuf:"bytes,2,opt,name=cell_info,json=cellInfo,proto3" json:"cell_info,omitempty"` +} + +func (x *UpdateCellInfoResponse) Reset() { + *x = UpdateCellInfoResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[197] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateCellInfoResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateCellInfoResponse) ProtoMessage() {} + +func (x *UpdateCellInfoResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[197] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateCellInfoResponse.ProtoReflect.Descriptor instead. +func (*UpdateCellInfoResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{197} +} + +func (x *UpdateCellInfoResponse) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *UpdateCellInfoResponse) GetCellInfo() *topodata.CellInfo { + if x != nil { + return x.CellInfo + } + return nil +} + +type UpdateCellsAliasRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + CellsAlias *topodata.CellsAlias `protobuf:"bytes,2,opt,name=cells_alias,json=cellsAlias,proto3" json:"cells_alias,omitempty"` +} + +func (x *UpdateCellsAliasRequest) Reset() { + *x = UpdateCellsAliasRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[198] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateCellsAliasRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateCellsAliasRequest) ProtoMessage() {} + +func (x *UpdateCellsAliasRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[198] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateCellsAliasRequest.ProtoReflect.Descriptor instead. +func (*UpdateCellsAliasRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{198} +} + +func (x *UpdateCellsAliasRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *UpdateCellsAliasRequest) GetCellsAlias() *topodata.CellsAlias { + if x != nil { + return x.CellsAlias + } + return nil +} + +type UpdateCellsAliasResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + CellsAlias *topodata.CellsAlias `protobuf:"bytes,2,opt,name=cells_alias,json=cellsAlias,proto3" json:"cells_alias,omitempty"` +} + +func (x *UpdateCellsAliasResponse) Reset() { + *x = UpdateCellsAliasResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[199] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateCellsAliasResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateCellsAliasResponse) ProtoMessage() {} + +func (x *UpdateCellsAliasResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[199] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateCellsAliasResponse.ProtoReflect.Descriptor instead. +func (*UpdateCellsAliasResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{199} +} + +func (x *UpdateCellsAliasResponse) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *UpdateCellsAliasResponse) GetCellsAlias() *topodata.CellsAlias { + if x != nil { + return x.CellsAlias + } + return nil +} + +type ValidateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PingTablets bool `protobuf:"varint,1,opt,name=ping_tablets,json=pingTablets,proto3" json:"ping_tablets,omitempty"` +} + +func (x *ValidateRequest) Reset() { + *x = ValidateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[200] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateRequest) ProtoMessage() {} + +func (x *ValidateRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[200] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateRequest.ProtoReflect.Descriptor instead. +func (*ValidateRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{200} +} + +func (x *ValidateRequest) GetPingTablets() bool { + if x != nil { + return x.PingTablets + } + return false +} + +type ValidateResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Results []string `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` + ResultsByKeyspace map[string]*ValidateKeyspaceResponse `protobuf:"bytes,2,rep,name=results_by_keyspace,json=resultsByKeyspace,proto3" json:"results_by_keyspace,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ValidateResponse) Reset() { + *x = ValidateResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[201] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateResponse) ProtoMessage() {} + +func (x *ValidateResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[201] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateResponse.ProtoReflect.Descriptor instead. +func (*ValidateResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{201} +} + +func (x *ValidateResponse) GetResults() []string { + if x != nil { + return x.Results + } + return nil +} + +func (x *ValidateResponse) GetResultsByKeyspace() map[string]*ValidateKeyspaceResponse { + if x != nil { + return x.ResultsByKeyspace + } + return nil +} + +type ValidateKeyspaceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + PingTablets bool `protobuf:"varint,2,opt,name=ping_tablets,json=pingTablets,proto3" json:"ping_tablets,omitempty"` +} + +func (x *ValidateKeyspaceRequest) Reset() { + *x = ValidateKeyspaceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[202] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateKeyspaceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateKeyspaceRequest) ProtoMessage() {} + +func (x *ValidateKeyspaceRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[202] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateKeyspaceRequest.ProtoReflect.Descriptor instead. +func (*ValidateKeyspaceRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{202} +} + +func (x *ValidateKeyspaceRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *ValidateKeyspaceRequest) GetPingTablets() bool { + if x != nil { + return x.PingTablets + } + return false +} + +type ValidateKeyspaceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Results []string `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` + ResultsByShard map[string]*ValidateShardResponse `protobuf:"bytes,2,rep,name=results_by_shard,json=resultsByShard,proto3" json:"results_by_shard,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ValidateKeyspaceResponse) Reset() { + *x = ValidateKeyspaceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[203] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateKeyspaceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateKeyspaceResponse) ProtoMessage() {} + +func (x *ValidateKeyspaceResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[203] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateKeyspaceResponse.ProtoReflect.Descriptor instead. +func (*ValidateKeyspaceResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{203} +} + +func (x *ValidateKeyspaceResponse) GetResults() []string { + if x != nil { + return x.Results + } + return nil +} + +func (x *ValidateKeyspaceResponse) GetResultsByShard() map[string]*ValidateShardResponse { + if x != nil { + return x.ResultsByShard + } + return nil +} + +type ValidateSchemaKeyspaceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + ExcludeTables []string `protobuf:"bytes,2,rep,name=exclude_tables,json=excludeTables,proto3" json:"exclude_tables,omitempty"` + IncludeViews bool `protobuf:"varint,3,opt,name=include_views,json=includeViews,proto3" json:"include_views,omitempty"` + SkipNoPrimary bool `protobuf:"varint,4,opt,name=skip_no_primary,json=skipNoPrimary,proto3" json:"skip_no_primary,omitempty"` + IncludeVschema bool `protobuf:"varint,5,opt,name=include_vschema,json=includeVschema,proto3" json:"include_vschema,omitempty"` +} + +func (x *ValidateSchemaKeyspaceRequest) Reset() { + *x = ValidateSchemaKeyspaceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[204] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateSchemaKeyspaceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateSchemaKeyspaceRequest) ProtoMessage() {} + +func (x *ValidateSchemaKeyspaceRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[204] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateSchemaKeyspaceRequest.ProtoReflect.Descriptor instead. +func (*ValidateSchemaKeyspaceRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{204} +} + +func (x *ValidateSchemaKeyspaceRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *ValidateSchemaKeyspaceRequest) GetExcludeTables() []string { + if x != nil { + return x.ExcludeTables + } + return nil +} + +func (x *ValidateSchemaKeyspaceRequest) GetIncludeViews() bool { + if x != nil { + return x.IncludeViews + } + return false +} + +func (x *ValidateSchemaKeyspaceRequest) GetSkipNoPrimary() bool { + if x != nil { + return x.SkipNoPrimary + } + return false +} + +func (x *ValidateSchemaKeyspaceRequest) GetIncludeVschema() bool { + if x != nil { + return x.IncludeVschema + } + return false +} + +type ValidateSchemaKeyspaceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Results []string `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` + ResultsByShard map[string]*ValidateShardResponse `protobuf:"bytes,2,rep,name=results_by_shard,json=resultsByShard,proto3" json:"results_by_shard,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ValidateSchemaKeyspaceResponse) Reset() { + *x = ValidateSchemaKeyspaceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[205] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateSchemaKeyspaceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateSchemaKeyspaceResponse) ProtoMessage() {} + +func (x *ValidateSchemaKeyspaceResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[205] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateSchemaKeyspaceResponse.ProtoReflect.Descriptor instead. +func (*ValidateSchemaKeyspaceResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{205} +} + +func (x *ValidateSchemaKeyspaceResponse) GetResults() []string { + if x != nil { + return x.Results + } + return nil +} + +func (x *ValidateSchemaKeyspaceResponse) GetResultsByShard() map[string]*ValidateShardResponse { + if x != nil { + return x.ResultsByShard + } + return nil +} + +type ValidateShardRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + PingTablets bool `protobuf:"varint,3,opt,name=ping_tablets,json=pingTablets,proto3" json:"ping_tablets,omitempty"` +} + +func (x *ValidateShardRequest) Reset() { + *x = ValidateShardRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[206] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateShardRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateShardRequest) ProtoMessage() {} + +func (x *ValidateShardRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[206] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateShardRequest.ProtoReflect.Descriptor instead. +func (*ValidateShardRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{206} +} + +func (x *ValidateShardRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *ValidateShardRequest) GetShard() string { + if x != nil { + return x.Shard + } + return "" +} + +func (x *ValidateShardRequest) GetPingTablets() bool { + if x != nil { + return x.PingTablets + } + return false +} + +type ValidateShardResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Results []string `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` +} + +func (x *ValidateShardResponse) Reset() { + *x = ValidateShardResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[207] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateShardResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateShardResponse) ProtoMessage() {} + +func (x *ValidateShardResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[207] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateShardResponse.ProtoReflect.Descriptor instead. +func (*ValidateShardResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{207} +} + +func (x *ValidateShardResponse) GetResults() []string { + if x != nil { + return x.Results + } + return nil +} + +type ValidateVersionKeyspaceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` +} + +func (x *ValidateVersionKeyspaceRequest) Reset() { + *x = ValidateVersionKeyspaceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[208] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateVersionKeyspaceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateVersionKeyspaceRequest) ProtoMessage() {} + +func (x *ValidateVersionKeyspaceRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[208] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateVersionKeyspaceRequest.ProtoReflect.Descriptor instead. +func (*ValidateVersionKeyspaceRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{208} +} + +func (x *ValidateVersionKeyspaceRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +type ValidateVersionKeyspaceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Results []string `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` + ResultsByShard map[string]*ValidateShardResponse `protobuf:"bytes,2,rep,name=results_by_shard,json=resultsByShard,proto3" json:"results_by_shard,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ValidateVersionKeyspaceResponse) Reset() { + *x = ValidateVersionKeyspaceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[209] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateVersionKeyspaceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateVersionKeyspaceResponse) ProtoMessage() {} + +func (x *ValidateVersionKeyspaceResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[209] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateVersionKeyspaceResponse.ProtoReflect.Descriptor instead. +func (*ValidateVersionKeyspaceResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{209} +} + +func (x *ValidateVersionKeyspaceResponse) GetResults() []string { + if x != nil { + return x.Results + } + return nil +} + +func (x *ValidateVersionKeyspaceResponse) GetResultsByShard() map[string]*ValidateShardResponse { + if x != nil { + return x.ResultsByShard + } + return nil +} + +type ValidateVersionShardRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` +} + +func (x *ValidateVersionShardRequest) Reset() { + *x = ValidateVersionShardRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[210] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateVersionShardRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateVersionShardRequest) ProtoMessage() {} + +func (x *ValidateVersionShardRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[210] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateVersionShardRequest.ProtoReflect.Descriptor instead. +func (*ValidateVersionShardRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{210} +} + +func (x *ValidateVersionShardRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *ValidateVersionShardRequest) GetShard() string { + if x != nil { + return x.Shard + } + return "" +} + +type ValidateVersionShardResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Results []string `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` +} + +func (x *ValidateVersionShardResponse) Reset() { + *x = ValidateVersionShardResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[211] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateVersionShardResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateVersionShardResponse) ProtoMessage() {} + +func (x *ValidateVersionShardResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[211] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateVersionShardResponse.ProtoReflect.Descriptor instead. +func (*ValidateVersionShardResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{211} +} + +func (x *ValidateVersionShardResponse) GetResults() []string { + if x != nil { + return x.Results + } + return nil +} + +type ValidateVSchemaRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shards []string `protobuf:"bytes,2,rep,name=shards,proto3" json:"shards,omitempty"` + ExcludeTables []string `protobuf:"bytes,3,rep,name=exclude_tables,json=excludeTables,proto3" json:"exclude_tables,omitempty"` + IncludeViews bool `protobuf:"varint,4,opt,name=include_views,json=includeViews,proto3" json:"include_views,omitempty"` +} + +func (x *ValidateVSchemaRequest) Reset() { + *x = ValidateVSchemaRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[212] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateVSchemaRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateVSchemaRequest) ProtoMessage() {} + +func (x *ValidateVSchemaRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[212] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateVSchemaRequest.ProtoReflect.Descriptor instead. +func (*ValidateVSchemaRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{212} +} + +func (x *ValidateVSchemaRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *ValidateVSchemaRequest) GetShards() []string { + if x != nil { + return x.Shards + } + return nil +} + +func (x *ValidateVSchemaRequest) GetExcludeTables() []string { + if x != nil { + return x.ExcludeTables + } + return nil +} + +func (x *ValidateVSchemaRequest) GetIncludeViews() bool { + if x != nil { + return x.IncludeViews + } + return false +} + +type ValidateVSchemaResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Results []string `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` + ResultsByShard map[string]*ValidateShardResponse `protobuf:"bytes,2,rep,name=results_by_shard,json=resultsByShard,proto3" json:"results_by_shard,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ValidateVSchemaResponse) Reset() { + *x = ValidateVSchemaResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[213] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateVSchemaResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateVSchemaResponse) ProtoMessage() {} + +func (x *ValidateVSchemaResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[213] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateVSchemaResponse.ProtoReflect.Descriptor instead. +func (*ValidateVSchemaResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{213} +} + +func (x *ValidateVSchemaResponse) GetResults() []string { + if x != nil { + return x.Results + } + return nil +} + +func (x *ValidateVSchemaResponse) GetResultsByShard() map[string]*ValidateShardResponse { + if x != nil { + return x.ResultsByShard + } + return nil +} + +type VDiffCreateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Workflow string `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"` + TargetKeyspace string `protobuf:"bytes,2,opt,name=target_keyspace,json=targetKeyspace,proto3" json:"target_keyspace,omitempty"` + Uuid string `protobuf:"bytes,3,opt,name=uuid,proto3" json:"uuid,omitempty"` + SourceCells []string `protobuf:"bytes,4,rep,name=source_cells,json=sourceCells,proto3" json:"source_cells,omitempty"` + TargetCells []string `protobuf:"bytes,5,rep,name=target_cells,json=targetCells,proto3" json:"target_cells,omitempty"` + TabletTypes []topodata.TabletType `protobuf:"varint,6,rep,packed,name=tablet_types,json=tabletTypes,proto3,enum=topodata.TabletType" json:"tablet_types,omitempty"` + TabletSelectionPreference tabletmanagerdata.TabletSelectionPreference `protobuf:"varint,7,opt,name=tablet_selection_preference,json=tabletSelectionPreference,proto3,enum=tabletmanagerdata.TabletSelectionPreference" json:"tablet_selection_preference,omitempty"` + Tables []string `protobuf:"bytes,8,rep,name=tables,proto3" json:"tables,omitempty"` + Limit int64 `protobuf:"varint,9,opt,name=limit,proto3" json:"limit,omitempty"` + FilteredReplicationWaitTime *vttime.Duration `protobuf:"bytes,10,opt,name=filtered_replication_wait_time,json=filteredReplicationWaitTime,proto3" json:"filtered_replication_wait_time,omitempty"` + DebugQuery bool `protobuf:"varint,11,opt,name=debug_query,json=debugQuery,proto3" json:"debug_query,omitempty"` + OnlyPKs bool `protobuf:"varint,12,opt,name=only_p_ks,json=onlyPKs,proto3" json:"only_p_ks,omitempty"` + UpdateTableStats bool `protobuf:"varint,13,opt,name=update_table_stats,json=updateTableStats,proto3" json:"update_table_stats,omitempty"` + MaxExtraRowsToCompare int64 `protobuf:"varint,14,opt,name=max_extra_rows_to_compare,json=maxExtraRowsToCompare,proto3" json:"max_extra_rows_to_compare,omitempty"` + Wait bool `protobuf:"varint,15,opt,name=wait,proto3" json:"wait,omitempty"` + WaitUpdateInterval *vttime.Duration `protobuf:"bytes,16,opt,name=wait_update_interval,json=waitUpdateInterval,proto3" json:"wait_update_interval,omitempty"` + AutoRetry bool `protobuf:"varint,17,opt,name=auto_retry,json=autoRetry,proto3" json:"auto_retry,omitempty"` + Verbose bool `protobuf:"varint,18,opt,name=verbose,proto3" json:"verbose,omitempty"` +} + +func (x *VDiffCreateRequest) Reset() { + *x = VDiffCreateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[214] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VDiffCreateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VDiffCreateRequest) ProtoMessage() {} + +func (x *VDiffCreateRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[214] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VDiffCreateRequest.ProtoReflect.Descriptor instead. +func (*VDiffCreateRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{214} +} + +func (x *VDiffCreateRequest) GetWorkflow() string { + if x != nil { + return x.Workflow + } + return "" +} + +func (x *VDiffCreateRequest) GetTargetKeyspace() string { + if x != nil { + return x.TargetKeyspace + } + return "" +} + +func (x *VDiffCreateRequest) GetUuid() string { + if x != nil { + return x.Uuid + } + return "" +} + +func (x *VDiffCreateRequest) GetSourceCells() []string { + if x != nil { + return x.SourceCells + } + return nil +} + +func (x *VDiffCreateRequest) GetTargetCells() []string { + if x != nil { + return x.TargetCells + } + return nil +} + +func (x *VDiffCreateRequest) GetTabletTypes() []topodata.TabletType { + if x != nil { + return x.TabletTypes + } + return nil +} + +func (x *VDiffCreateRequest) GetTabletSelectionPreference() tabletmanagerdata.TabletSelectionPreference { + if x != nil { + return x.TabletSelectionPreference + } + return tabletmanagerdata.TabletSelectionPreference(0) +} + +func (x *VDiffCreateRequest) GetTables() []string { + if x != nil { + return x.Tables + } + return nil +} + +func (x *VDiffCreateRequest) GetLimit() int64 { + if x != nil { + return x.Limit + } + return 0 +} + +func (x *VDiffCreateRequest) GetFilteredReplicationWaitTime() *vttime.Duration { + if x != nil { + return x.FilteredReplicationWaitTime + } + return nil +} + +func (x *VDiffCreateRequest) GetDebugQuery() bool { + if x != nil { + return x.DebugQuery + } + return false +} + +func (x *VDiffCreateRequest) GetOnlyPKs() bool { + if x != nil { + return x.OnlyPKs + } + return false +} + +func (x *VDiffCreateRequest) GetUpdateTableStats() bool { + if x != nil { + return x.UpdateTableStats + } + return false +} + +func (x *VDiffCreateRequest) GetMaxExtraRowsToCompare() int64 { + if x != nil { + return x.MaxExtraRowsToCompare + } + return 0 +} + +func (x *VDiffCreateRequest) GetWait() bool { + if x != nil { + return x.Wait + } + return false +} + +func (x *VDiffCreateRequest) GetWaitUpdateInterval() *vttime.Duration { + if x != nil { + return x.WaitUpdateInterval + } + return nil +} + +func (x *VDiffCreateRequest) GetAutoRetry() bool { + if x != nil { + return x.AutoRetry + } + return false +} + +func (x *VDiffCreateRequest) GetVerbose() bool { + if x != nil { + return x.Verbose + } + return false +} + +type VDiffCreateResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Intentionally upper case to maintain compatibility with + // vtctlclient and other VDiff client command output. + UUID string `protobuf:"bytes,1,opt,name=UUID,proto3" json:"UUID,omitempty"` +} + +func (x *VDiffCreateResponse) Reset() { + *x = VDiffCreateResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[215] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VDiffCreateResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VDiffCreateResponse) ProtoMessage() {} + +func (x *VDiffCreateResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[215] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VDiffCreateResponse.ProtoReflect.Descriptor instead. +func (*VDiffCreateResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{215} +} + +func (x *VDiffCreateResponse) GetUUID() string { + if x != nil { + return x.UUID + } + return "" +} + +type VDiffDeleteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Workflow string `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"` + TargetKeyspace string `protobuf:"bytes,2,opt,name=target_keyspace,json=targetKeyspace,proto3" json:"target_keyspace,omitempty"` + // This will be 'all' or a UUID. + Arg string `protobuf:"bytes,3,opt,name=arg,proto3" json:"arg,omitempty"` +} + +func (x *VDiffDeleteRequest) Reset() { + *x = VDiffDeleteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[216] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VDiffDeleteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VDiffDeleteRequest) ProtoMessage() {} + +func (x *VDiffDeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[216] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VDiffDeleteRequest.ProtoReflect.Descriptor instead. +func (*VDiffDeleteRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{216} +} + +func (x *VDiffDeleteRequest) GetWorkflow() string { + if x != nil { + return x.Workflow + } + return "" +} + +func (x *VDiffDeleteRequest) GetTargetKeyspace() string { + if x != nil { + return x.TargetKeyspace + } + return "" +} + +func (x *VDiffDeleteRequest) GetArg() string { + if x != nil { + return x.Arg + } + return "" +} + +type VDiffDeleteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VDiffDeleteResponse) Reset() { + *x = VDiffDeleteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[217] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VDiffDeleteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VDiffDeleteResponse) ProtoMessage() {} + +func (x *VDiffDeleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[217] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VDiffDeleteResponse.ProtoReflect.Descriptor instead. +func (*VDiffDeleteResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{217} +} + +type VDiffResumeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Workflow string `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"` + TargetKeyspace string `protobuf:"bytes,2,opt,name=target_keyspace,json=targetKeyspace,proto3" json:"target_keyspace,omitempty"` + Uuid string `protobuf:"bytes,3,opt,name=uuid,proto3" json:"uuid,omitempty"` +} + +func (x *VDiffResumeRequest) Reset() { + *x = VDiffResumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[218] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VDiffResumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VDiffResumeRequest) ProtoMessage() {} + +func (x *VDiffResumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[218] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VDiffResumeRequest.ProtoReflect.Descriptor instead. +func (*VDiffResumeRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{218} +} + +func (x *VDiffResumeRequest) GetWorkflow() string { + if x != nil { + return x.Workflow + } + return "" +} + +func (x *VDiffResumeRequest) GetTargetKeyspace() string { + if x != nil { + return x.TargetKeyspace + } + return "" +} + +func (x *VDiffResumeRequest) GetUuid() string { + if x != nil { + return x.Uuid + } + return "" +} + +type VDiffResumeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VDiffResumeResponse) Reset() { + *x = VDiffResumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[219] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VDiffResumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VDiffResumeResponse) ProtoMessage() {} + +func (x *VDiffResumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[219] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VDiffResumeResponse.ProtoReflect.Descriptor instead. +func (*VDiffResumeResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{219} +} + +type VDiffShowRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Workflow string `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"` + TargetKeyspace string `protobuf:"bytes,2,opt,name=target_keyspace,json=targetKeyspace,proto3" json:"target_keyspace,omitempty"` + // This will be 'all', 'last', or a UUID. + Arg string `protobuf:"bytes,3,opt,name=arg,proto3" json:"arg,omitempty"` +} + +func (x *VDiffShowRequest) Reset() { + *x = VDiffShowRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[220] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VDiffShowRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VDiffShowRequest) ProtoMessage() {} + +func (x *VDiffShowRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[220] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VDiffShowRequest.ProtoReflect.Descriptor instead. +func (*VDiffShowRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{220} +} + +func (x *VDiffShowRequest) GetWorkflow() string { + if x != nil { + return x.Workflow + } + return "" +} + +func (x *VDiffShowRequest) GetTargetKeyspace() string { + if x != nil { + return x.TargetKeyspace + } + return "" +} + +func (x *VDiffShowRequest) GetArg() string { + if x != nil { + return x.Arg + } + return "" +} + +type VDiffShowResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The key is keyspace/shard. + TabletResponses map[string]*tabletmanagerdata.VDiffResponse `protobuf:"bytes,1,rep,name=tablet_responses,json=tabletResponses,proto3" json:"tablet_responses,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *VDiffShowResponse) Reset() { + *x = VDiffShowResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[221] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VDiffShowResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VDiffShowResponse) ProtoMessage() {} + +func (x *VDiffShowResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[221] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VDiffShowResponse.ProtoReflect.Descriptor instead. +func (*VDiffShowResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{221} +} + +func (x *VDiffShowResponse) GetTabletResponses() map[string]*tabletmanagerdata.VDiffResponse { + if x != nil { + return x.TabletResponses + } + return nil +} + +type VDiffStopRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Workflow string `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"` + TargetKeyspace string `protobuf:"bytes,2,opt,name=target_keyspace,json=targetKeyspace,proto3" json:"target_keyspace,omitempty"` + Uuid string `protobuf:"bytes,3,opt,name=uuid,proto3" json:"uuid,omitempty"` +} + +func (x *VDiffStopRequest) Reset() { + *x = VDiffStopRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[222] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VDiffStopRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VDiffStopRequest) ProtoMessage() {} + +func (x *VDiffStopRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[222] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VDiffStopRequest.ProtoReflect.Descriptor instead. +func (*VDiffStopRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{222} +} + +func (x *VDiffStopRequest) GetWorkflow() string { + if x != nil { + return x.Workflow + } + return "" +} + +func (x *VDiffStopRequest) GetTargetKeyspace() string { + if x != nil { + return x.TargetKeyspace + } + return "" +} + +func (x *VDiffStopRequest) GetUuid() string { + if x != nil { + return x.Uuid + } + return "" +} + +type VDiffStopResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VDiffStopResponse) Reset() { + *x = VDiffStopResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[223] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VDiffStopResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VDiffStopResponse) ProtoMessage() {} + +func (x *VDiffStopResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[223] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VDiffStopResponse.ProtoReflect.Descriptor instead. +func (*VDiffStopResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{223} +} + +type WorkflowDeleteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Workflow string `protobuf:"bytes,2,opt,name=workflow,proto3" json:"workflow,omitempty"` + KeepData bool `protobuf:"varint,3,opt,name=keep_data,json=keepData,proto3" json:"keep_data,omitempty"` + KeepRoutingRules bool `protobuf:"varint,4,opt,name=keep_routing_rules,json=keepRoutingRules,proto3" json:"keep_routing_rules,omitempty"` +} + +func (x *WorkflowDeleteRequest) Reset() { + *x = WorkflowDeleteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[224] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WorkflowDeleteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowDeleteRequest) ProtoMessage() {} + +func (x *WorkflowDeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[224] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkflowDeleteRequest.ProtoReflect.Descriptor instead. +func (*WorkflowDeleteRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{224} +} + +func (x *WorkflowDeleteRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *WorkflowDeleteRequest) GetWorkflow() string { + if x != nil { + return x.Workflow + } + return "" +} + +func (x *WorkflowDeleteRequest) GetKeepData() bool { + if x != nil { + return x.KeepData + } + return false +} + +func (x *WorkflowDeleteRequest) GetKeepRoutingRules() bool { + if x != nil { + return x.KeepRoutingRules + } + return false +} + +type WorkflowDeleteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Summary string `protobuf:"bytes,1,opt,name=summary,proto3" json:"summary,omitempty"` + Details []*WorkflowDeleteResponse_TabletInfo `protobuf:"bytes,2,rep,name=details,proto3" json:"details,omitempty"` +} + +func (x *WorkflowDeleteResponse) Reset() { + *x = WorkflowDeleteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[225] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WorkflowDeleteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowDeleteResponse) ProtoMessage() {} + +func (x *WorkflowDeleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[225] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkflowDeleteResponse.ProtoReflect.Descriptor instead. +func (*WorkflowDeleteResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{225} +} + +func (x *WorkflowDeleteResponse) GetSummary() string { + if x != nil { + return x.Summary + } + return "" +} + +func (x *WorkflowDeleteResponse) GetDetails() []*WorkflowDeleteResponse_TabletInfo { + if x != nil { + return x.Details + } + return nil +} + +type WorkflowStatusRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Workflow string `protobuf:"bytes,2,opt,name=workflow,proto3" json:"workflow,omitempty"` +} + +func (x *WorkflowStatusRequest) Reset() { + *x = WorkflowStatusRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[226] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WorkflowStatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowStatusRequest) ProtoMessage() {} + +func (x *WorkflowStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[226] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkflowStatusRequest.ProtoReflect.Descriptor instead. +func (*WorkflowStatusRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{226} +} + +func (x *WorkflowStatusRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *WorkflowStatusRequest) GetWorkflow() string { + if x != nil { + return x.Workflow + } + return "" +} + +type WorkflowStatusResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The key is keyspace/shard. + TableCopyState map[string]*WorkflowStatusResponse_TableCopyState `protobuf:"bytes,1,rep,name=table_copy_state,json=tableCopyState,proto3" json:"table_copy_state,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + ShardStreams map[string]*WorkflowStatusResponse_ShardStreams `protobuf:"bytes,2,rep,name=shard_streams,json=shardStreams,proto3" json:"shard_streams,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + TrafficState string `protobuf:"bytes,3,opt,name=traffic_state,json=trafficState,proto3" json:"traffic_state,omitempty"` +} + +func (x *WorkflowStatusResponse) Reset() { + *x = WorkflowStatusResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[227] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WorkflowStatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowStatusResponse) ProtoMessage() {} + +func (x *WorkflowStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[227] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkflowStatusResponse.ProtoReflect.Descriptor instead. +func (*WorkflowStatusResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{227} +} + +func (x *WorkflowStatusResponse) GetTableCopyState() map[string]*WorkflowStatusResponse_TableCopyState { + if x != nil { + return x.TableCopyState + } + return nil +} + +func (x *WorkflowStatusResponse) GetShardStreams() map[string]*WorkflowStatusResponse_ShardStreams { + if x != nil { + return x.ShardStreams + } + return nil +} + +func (x *WorkflowStatusResponse) GetTrafficState() string { + if x != nil { + return x.TrafficState + } + return "" +} + +type WorkflowSwitchTrafficRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Workflow string `protobuf:"bytes,2,opt,name=workflow,proto3" json:"workflow,omitempty"` + Cells []string `protobuf:"bytes,3,rep,name=cells,proto3" json:"cells,omitempty"` + TabletTypes []topodata.TabletType `protobuf:"varint,4,rep,packed,name=tablet_types,json=tabletTypes,proto3,enum=topodata.TabletType" json:"tablet_types,omitempty"` + MaxReplicationLagAllowed *vttime.Duration `protobuf:"bytes,5,opt,name=max_replication_lag_allowed,json=maxReplicationLagAllowed,proto3" json:"max_replication_lag_allowed,omitempty"` + EnableReverseReplication bool `protobuf:"varint,6,opt,name=enable_reverse_replication,json=enableReverseReplication,proto3" json:"enable_reverse_replication,omitempty"` + Direction int32 `protobuf:"varint,7,opt,name=direction,proto3" json:"direction,omitempty"` + Timeout *vttime.Duration `protobuf:"bytes,8,opt,name=timeout,proto3" json:"timeout,omitempty"` + DryRun bool `protobuf:"varint,9,opt,name=dry_run,json=dryRun,proto3" json:"dry_run,omitempty"` + InitializeTargetSequences bool `protobuf:"varint,10,opt,name=initialize_target_sequences,json=initializeTargetSequences,proto3" json:"initialize_target_sequences,omitempty"` +} + +func (x *WorkflowSwitchTrafficRequest) Reset() { + *x = WorkflowSwitchTrafficRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[228] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WorkflowSwitchTrafficRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowSwitchTrafficRequest) ProtoMessage() {} + +func (x *WorkflowSwitchTrafficRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[228] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkflowSwitchTrafficRequest.ProtoReflect.Descriptor instead. +func (*WorkflowSwitchTrafficRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{228} +} + +func (x *WorkflowSwitchTrafficRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *WorkflowSwitchTrafficRequest) GetWorkflow() string { + if x != nil { + return x.Workflow + } + return "" +} + +func (x *WorkflowSwitchTrafficRequest) GetCells() []string { + if x != nil { + return x.Cells + } + return nil +} + +func (x *WorkflowSwitchTrafficRequest) GetTabletTypes() []topodata.TabletType { + if x != nil { + return x.TabletTypes + } + return nil +} + +func (x *WorkflowSwitchTrafficRequest) GetMaxReplicationLagAllowed() *vttime.Duration { + if x != nil { + return x.MaxReplicationLagAllowed + } + return nil +} + +func (x *WorkflowSwitchTrafficRequest) GetEnableReverseReplication() bool { + if x != nil { + return x.EnableReverseReplication + } + return false +} + +func (x *WorkflowSwitchTrafficRequest) GetDirection() int32 { + if x != nil { + return x.Direction + } + return 0 +} + +func (x *WorkflowSwitchTrafficRequest) GetTimeout() *vttime.Duration { + if x != nil { + return x.Timeout + } + return nil +} + +func (x *WorkflowSwitchTrafficRequest) GetDryRun() bool { + if x != nil { + return x.DryRun + } + return false +} + +func (x *WorkflowSwitchTrafficRequest) GetInitializeTargetSequences() bool { + if x != nil { + return x.InitializeTargetSequences + } + return false +} + +type WorkflowSwitchTrafficResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Summary string `protobuf:"bytes,1,opt,name=summary,proto3" json:"summary,omitempty"` + StartState string `protobuf:"bytes,2,opt,name=start_state,json=startState,proto3" json:"start_state,omitempty"` + CurrentState string `protobuf:"bytes,3,opt,name=current_state,json=currentState,proto3" json:"current_state,omitempty"` + DryRunResults []string `protobuf:"bytes,4,rep,name=dry_run_results,json=dryRunResults,proto3" json:"dry_run_results,omitempty"` +} + +func (x *WorkflowSwitchTrafficResponse) Reset() { + *x = WorkflowSwitchTrafficResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[229] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WorkflowSwitchTrafficResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowSwitchTrafficResponse) ProtoMessage() {} + +func (x *WorkflowSwitchTrafficResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[229] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkflowSwitchTrafficResponse.ProtoReflect.Descriptor instead. +func (*WorkflowSwitchTrafficResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{229} +} + +func (x *WorkflowSwitchTrafficResponse) GetSummary() string { + if x != nil { + return x.Summary + } + return "" +} + +func (x *WorkflowSwitchTrafficResponse) GetStartState() string { + if x != nil { + return x.StartState + } + return "" +} + +func (x *WorkflowSwitchTrafficResponse) GetCurrentState() string { + if x != nil { + return x.CurrentState + } + return "" +} + +func (x *WorkflowSwitchTrafficResponse) GetDryRunResults() []string { + if x != nil { + return x.DryRunResults + } + return nil +} + +type WorkflowUpdateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // TabletRequest gets passed on to each primary tablet involved + // in the workflow via the UpdateVReplicationWorkflow tabletmanager RPC. + TabletRequest *tabletmanagerdata.UpdateVReplicationWorkflowRequest `protobuf:"bytes,2,opt,name=tablet_request,json=tabletRequest,proto3" json:"tablet_request,omitempty"` +} + +func (x *WorkflowUpdateRequest) Reset() { + *x = WorkflowUpdateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[230] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WorkflowUpdateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowUpdateRequest) ProtoMessage() {} + +func (x *WorkflowUpdateRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[230] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkflowUpdateRequest.ProtoReflect.Descriptor instead. +func (*WorkflowUpdateRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{230} +} + +func (x *WorkflowUpdateRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *WorkflowUpdateRequest) GetTabletRequest() *tabletmanagerdata.UpdateVReplicationWorkflowRequest { + if x != nil { + return x.TabletRequest + } + return nil +} + +type WorkflowUpdateResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Summary string `protobuf:"bytes,1,opt,name=summary,proto3" json:"summary,omitempty"` + Details []*WorkflowUpdateResponse_TabletInfo `protobuf:"bytes,2,rep,name=details,proto3" json:"details,omitempty"` +} + +func (x *WorkflowUpdateResponse) Reset() { + *x = WorkflowUpdateResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[231] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WorkflowUpdateResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowUpdateResponse) ProtoMessage() {} + +func (x *WorkflowUpdateResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[231] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkflowUpdateResponse.ProtoReflect.Descriptor instead. +func (*WorkflowUpdateResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{231} +} + +func (x *WorkflowUpdateResponse) GetSummary() string { + if x != nil { + return x.Summary + } + return "" +} + +func (x *WorkflowUpdateResponse) GetDetails() []*WorkflowUpdateResponse_TabletInfo { + if x != nil { + return x.Details + } + return nil +} + +type Workflow_ReplicationLocation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shards []string `protobuf:"bytes,2,rep,name=shards,proto3" json:"shards,omitempty"` +} + +func (x *Workflow_ReplicationLocation) Reset() { + *x = Workflow_ReplicationLocation{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[233] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Workflow_ReplicationLocation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Workflow_ReplicationLocation) ProtoMessage() {} + +func (x *Workflow_ReplicationLocation) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[233] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Workflow_ReplicationLocation.ProtoReflect.Descriptor instead. +func (*Workflow_ReplicationLocation) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{7, 1} +} + +func (x *Workflow_ReplicationLocation) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *Workflow_ReplicationLocation) GetShards() []string { + if x != nil { + return x.Shards + } + return nil +} + +type Workflow_ShardStream struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Streams []*Workflow_Stream `protobuf:"bytes,1,rep,name=streams,proto3" json:"streams,omitempty"` + TabletControls []*topodata.Shard_TabletControl `protobuf:"bytes,2,rep,name=tablet_controls,json=tabletControls,proto3" json:"tablet_controls,omitempty"` + IsPrimaryServing bool `protobuf:"varint,3,opt,name=is_primary_serving,json=isPrimaryServing,proto3" json:"is_primary_serving,omitempty"` +} + +func (x *Workflow_ShardStream) Reset() { + *x = Workflow_ShardStream{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[234] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Workflow_ShardStream) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Workflow_ShardStream) ProtoMessage() {} + +func (x *Workflow_ShardStream) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[234] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Workflow_ShardStream.ProtoReflect.Descriptor instead. +func (*Workflow_ShardStream) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{7, 2} +} + +func (x *Workflow_ShardStream) GetStreams() []*Workflow_Stream { + if x != nil { + return x.Streams + } + return nil +} + +func (x *Workflow_ShardStream) GetTabletControls() []*topodata.Shard_TabletControl { + if x != nil { + return x.TabletControls + } + return nil +} + +func (x *Workflow_ShardStream) GetIsPrimaryServing() bool { + if x != nil { + return x.IsPrimaryServing + } + return false +} + +type Workflow_Stream struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + Tablet *topodata.TabletAlias `protobuf:"bytes,3,opt,name=tablet,proto3" json:"tablet,omitempty"` + BinlogSource *binlogdata.BinlogSource `protobuf:"bytes,4,opt,name=binlog_source,json=binlogSource,proto3" json:"binlog_source,omitempty"` + Position string `protobuf:"bytes,5,opt,name=position,proto3" json:"position,omitempty"` + StopPosition string `protobuf:"bytes,6,opt,name=stop_position,json=stopPosition,proto3" json:"stop_position,omitempty"` + State string `protobuf:"bytes,7,opt,name=state,proto3" json:"state,omitempty"` + DbName string `protobuf:"bytes,8,opt,name=db_name,json=dbName,proto3" json:"db_name,omitempty"` + TransactionTimestamp *vttime.Time `protobuf:"bytes,9,opt,name=transaction_timestamp,json=transactionTimestamp,proto3" json:"transaction_timestamp,omitempty"` + TimeUpdated *vttime.Time `protobuf:"bytes,10,opt,name=time_updated,json=timeUpdated,proto3" json:"time_updated,omitempty"` + Message string `protobuf:"bytes,11,opt,name=message,proto3" json:"message,omitempty"` + CopyStates []*Workflow_Stream_CopyState `protobuf:"bytes,12,rep,name=copy_states,json=copyStates,proto3" json:"copy_states,omitempty"` + Logs []*Workflow_Stream_Log `protobuf:"bytes,13,rep,name=logs,proto3" json:"logs,omitempty"` + // LogFetchError is set if we fail to fetch some logs for this stream. We + // will never fail to fetch workflows because we cannot fetch the logs, but + // we will still forward log-fetch errors to the caller, should that be + // relevant to the context in which they are fetching workflows. + // + // Note that this field being set does not necessarily mean that Logs is nil; + // if there are N logs that exist for the stream, and we fail to fetch the + // ith log, we will still return logs in [0, i) + (i, N]. + LogFetchError string `protobuf:"bytes,14,opt,name=log_fetch_error,json=logFetchError,proto3" json:"log_fetch_error,omitempty"` + Tags []string `protobuf:"bytes,15,rep,name=tags,proto3" json:"tags,omitempty"` + RowsCopied int64 `protobuf:"varint,16,opt,name=rows_copied,json=rowsCopied,proto3" json:"rows_copied,omitempty"` + ThrottlerStatus *Workflow_Stream_ThrottlerStatus `protobuf:"bytes,17,opt,name=throttler_status,json=throttlerStatus,proto3" json:"throttler_status,omitempty"` +} + +func (x *Workflow_Stream) Reset() { + *x = Workflow_Stream{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[235] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Workflow_Stream) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Workflow_Stream) ProtoMessage() {} + +func (x *Workflow_Stream) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[235] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Workflow_Stream.ProtoReflect.Descriptor instead. +func (*Workflow_Stream) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{7, 3} +} + +func (x *Workflow_Stream) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *Workflow_Stream) GetShard() string { + if x != nil { + return x.Shard + } + return "" +} + +func (x *Workflow_Stream) GetTablet() *topodata.TabletAlias { + if x != nil { + return x.Tablet + } + return nil +} + +func (x *Workflow_Stream) GetBinlogSource() *binlogdata.BinlogSource { + if x != nil { + return x.BinlogSource + } + return nil +} + +func (x *Workflow_Stream) GetPosition() string { + if x != nil { + return x.Position + } + return "" +} + +func (x *Workflow_Stream) GetStopPosition() string { + if x != nil { + return x.StopPosition + } + return "" +} + +func (x *Workflow_Stream) GetState() string { + if x != nil { + return x.State + } + return "" +} + +func (x *Workflow_Stream) GetDbName() string { + if x != nil { + return x.DbName + } + return "" +} + +func (x *Workflow_Stream) GetTransactionTimestamp() *vttime.Time { + if x != nil { + return x.TransactionTimestamp + } + return nil +} + +func (x *Workflow_Stream) GetTimeUpdated() *vttime.Time { + if x != nil { + return x.TimeUpdated + } + return nil +} + +func (x *Workflow_Stream) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *Workflow_Stream) GetCopyStates() []*Workflow_Stream_CopyState { + if x != nil { + return x.CopyStates + } + return nil +} + +func (x *Workflow_Stream) GetLogs() []*Workflow_Stream_Log { + if x != nil { + return x.Logs + } + return nil +} + +func (x *Workflow_Stream) GetLogFetchError() string { + if x != nil { + return x.LogFetchError + } + return "" +} + +func (x *Workflow_Stream) GetTags() []string { + if x != nil { + return x.Tags + } + return nil +} + +func (x *Workflow_Stream) GetRowsCopied() int64 { + if x != nil { + return x.RowsCopied + } + return 0 +} + +func (x *Workflow_Stream) GetThrottlerStatus() *Workflow_Stream_ThrottlerStatus { + if x != nil { + return x.ThrottlerStatus + } + return nil +} + +type Workflow_Stream_CopyState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Table string `protobuf:"bytes,1,opt,name=table,proto3" json:"table,omitempty"` + LastPk string `protobuf:"bytes,2,opt,name=last_pk,json=lastPk,proto3" json:"last_pk,omitempty"` +} + +func (x *Workflow_Stream_CopyState) Reset() { + *x = Workflow_Stream_CopyState{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[236] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Workflow_Stream_CopyState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Workflow_Stream_CopyState) ProtoMessage() {} + +func (x *Workflow_Stream_CopyState) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[236] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Workflow_Stream_CopyState.ProtoReflect.Descriptor instead. +func (*Workflow_Stream_CopyState) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{7, 3, 0} +} + +func (x *Workflow_Stream_CopyState) GetTable() string { + if x != nil { + return x.Table + } + return "" +} + +func (x *Workflow_Stream_CopyState) GetLastPk() string { + if x != nil { + return x.LastPk + } + return "" +} + +type Workflow_Stream_Log struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + StreamId int64 `protobuf:"varint,2,opt,name=stream_id,json=streamId,proto3" json:"stream_id,omitempty"` + Type string `protobuf:"bytes,3,opt,name=type,proto3" json:"type,omitempty"` + State string `protobuf:"bytes,4,opt,name=state,proto3" json:"state,omitempty"` + CreatedAt *vttime.Time `protobuf:"bytes,5,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + UpdatedAt *vttime.Time `protobuf:"bytes,6,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` + Message string `protobuf:"bytes,7,opt,name=message,proto3" json:"message,omitempty"` + Count int64 `protobuf:"varint,8,opt,name=count,proto3" json:"count,omitempty"` +} + +func (x *Workflow_Stream_Log) Reset() { + *x = Workflow_Stream_Log{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[237] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Workflow_Stream_Log) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Workflow_Stream_Log) ProtoMessage() {} + +func (x *Workflow_Stream_Log) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[237] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Workflow_Stream_Log.ProtoReflect.Descriptor instead. +func (*Workflow_Stream_Log) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{7, 3, 1} +} + +func (x *Workflow_Stream_Log) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *Workflow_Stream_Log) GetStreamId() int64 { + if x != nil { + return x.StreamId + } + return 0 +} + +func (x *Workflow_Stream_Log) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Workflow_Stream_Log) GetState() string { + if x != nil { + return x.State + } + return "" +} + +func (x *Workflow_Stream_Log) GetCreatedAt() *vttime.Time { + if x != nil { + return x.CreatedAt + } + return nil +} + +func (x *Workflow_Stream_Log) GetUpdatedAt() *vttime.Time { + if x != nil { + return x.UpdatedAt + } + return nil +} + +func (x *Workflow_Stream_Log) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *Workflow_Stream_Log) GetCount() int64 { + if x != nil { + return x.Count + } + return 0 +} + +type Workflow_Stream_ThrottlerStatus struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ComponentThrottled string `protobuf:"bytes,1,opt,name=component_throttled,json=componentThrottled,proto3" json:"component_throttled,omitempty"` + TimeThrottled *vttime.Time `protobuf:"bytes,2,opt,name=time_throttled,json=timeThrottled,proto3" json:"time_throttled,omitempty"` +} + +func (x *Workflow_Stream_ThrottlerStatus) Reset() { + *x = Workflow_Stream_ThrottlerStatus{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[238] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Workflow_Stream_ThrottlerStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Workflow_Stream_ThrottlerStatus) ProtoMessage() {} + +func (x *Workflow_Stream_ThrottlerStatus) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[238] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Workflow_Stream_ThrottlerStatus.ProtoReflect.Descriptor instead. +func (*Workflow_Stream_ThrottlerStatus) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{7, 3, 2} +} + +func (x *Workflow_Stream_ThrottlerStatus) GetComponentThrottled() string { + if x != nil { + return x.ComponentThrottled + } + return "" +} + +func (x *Workflow_Stream_ThrottlerStatus) GetTimeThrottled() *vttime.Time { + if x != nil { + return x.TimeThrottled + } + return nil +} + +type GetSrvKeyspaceNamesResponse_NameList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Names []string `protobuf:"bytes,1,rep,name=names,proto3" json:"names,omitempty"` +} + +func (x *GetSrvKeyspaceNamesResponse_NameList) Reset() { + *x = GetSrvKeyspaceNamesResponse_NameList{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[246] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetSrvKeyspaceNamesResponse_NameList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSrvKeyspaceNamesResponse_NameList) ProtoMessage() {} + +func (x *GetSrvKeyspaceNamesResponse_NameList) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[246] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSrvKeyspaceNamesResponse_NameList.ProtoReflect.Descriptor instead. +func (*GetSrvKeyspaceNamesResponse_NameList) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{84, 1} +} + +func (x *GetSrvKeyspaceNamesResponse_NameList) GetNames() []string { + if x != nil { + return x.Names + } + return nil +} + +type MoveTablesCreateResponse_TabletInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Tablet *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet,proto3" json:"tablet,omitempty"` + // Created is set if the workflow was created on this tablet or not. + Created bool `protobuf:"varint,2,opt,name=created,proto3" json:"created,omitempty"` +} + +func (x *MoveTablesCreateResponse_TabletInfo) Reset() { + *x = MoveTablesCreateResponse_TabletInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[250] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MoveTablesCreateResponse_TabletInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MoveTablesCreateResponse_TabletInfo) ProtoMessage() {} + +func (x *MoveTablesCreateResponse_TabletInfo) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[250] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MoveTablesCreateResponse_TabletInfo.ProtoReflect.Descriptor instead. +func (*MoveTablesCreateResponse_TabletInfo) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{128, 0} +} + +func (x *MoveTablesCreateResponse_TabletInfo) GetTablet() *topodata.TabletAlias { + if x != nil { + return x.Tablet + } + return nil +} + +func (x *MoveTablesCreateResponse_TabletInfo) GetCreated() bool { + if x != nil { + return x.Created + } + return false +} + +type WorkflowDeleteResponse_TabletInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Tablet *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet,proto3" json:"tablet,omitempty"` + // Delete is set if the workflow was deleted on this tablet. + Deleted bool `protobuf:"varint,2,opt,name=deleted,proto3" json:"deleted,omitempty"` +} + +func (x *WorkflowDeleteResponse_TabletInfo) Reset() { + *x = WorkflowDeleteResponse_TabletInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[260] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WorkflowDeleteResponse_TabletInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowDeleteResponse_TabletInfo) ProtoMessage() {} + +func (x *WorkflowDeleteResponse_TabletInfo) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[260] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkflowDeleteResponse_TabletInfo.ProtoReflect.Descriptor instead. +func (*WorkflowDeleteResponse_TabletInfo) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{225, 0} +} + +func (x *WorkflowDeleteResponse_TabletInfo) GetTablet() *topodata.TabletAlias { + if x != nil { + return x.Tablet + } + return nil +} + +func (x *WorkflowDeleteResponse_TabletInfo) GetDeleted() bool { + if x != nil { + return x.Deleted + } + return false +} + +type WorkflowStatusResponse_TableCopyState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RowsCopied int64 `protobuf:"varint,1,opt,name=rows_copied,json=rowsCopied,proto3" json:"rows_copied,omitempty"` + RowsTotal int64 `protobuf:"varint,2,opt,name=rows_total,json=rowsTotal,proto3" json:"rows_total,omitempty"` + RowsPercentage float32 `protobuf:"fixed32,3,opt,name=rows_percentage,json=rowsPercentage,proto3" json:"rows_percentage,omitempty"` + BytesCopied int64 `protobuf:"varint,4,opt,name=bytes_copied,json=bytesCopied,proto3" json:"bytes_copied,omitempty"` + BytesTotal int64 `protobuf:"varint,5,opt,name=bytes_total,json=bytesTotal,proto3" json:"bytes_total,omitempty"` + BytesPercentage float32 `protobuf:"fixed32,6,opt,name=bytes_percentage,json=bytesPercentage,proto3" json:"bytes_percentage,omitempty"` +} + +func (x *WorkflowStatusResponse_TableCopyState) Reset() { + *x = WorkflowStatusResponse_TableCopyState{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[261] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WorkflowStatusResponse_TableCopyState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowStatusResponse_TableCopyState) ProtoMessage() {} + +func (x *WorkflowStatusResponse_TableCopyState) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[261] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkflowStatusResponse_TableCopyState.ProtoReflect.Descriptor instead. +func (*WorkflowStatusResponse_TableCopyState) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{227, 0} +} + +func (x *WorkflowStatusResponse_TableCopyState) GetRowsCopied() int64 { + if x != nil { + return x.RowsCopied + } + return 0 +} + +func (x *WorkflowStatusResponse_TableCopyState) GetRowsTotal() int64 { + if x != nil { + return x.RowsTotal + } + return 0 +} + +func (x *WorkflowStatusResponse_TableCopyState) GetRowsPercentage() float32 { + if x != nil { + return x.RowsPercentage + } + return 0 +} + +func (x *WorkflowStatusResponse_TableCopyState) GetBytesCopied() int64 { + if x != nil { + return x.BytesCopied + } + return 0 +} + +func (x *WorkflowStatusResponse_TableCopyState) GetBytesTotal() int64 { + if x != nil { + return x.BytesTotal + } + return 0 +} + +func (x *WorkflowStatusResponse_TableCopyState) GetBytesPercentage() float32 { + if x != nil { + return x.BytesPercentage + } + return 0 +} + +type WorkflowStatusResponse_ShardStreamState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Tablet *topodata.TabletAlias `protobuf:"bytes,2,opt,name=tablet,proto3" json:"tablet,omitempty"` + SourceShard string `protobuf:"bytes,3,opt,name=source_shard,json=sourceShard,proto3" json:"source_shard,omitempty"` + Position string `protobuf:"bytes,4,opt,name=position,proto3" json:"position,omitempty"` + Status string `protobuf:"bytes,5,opt,name=status,proto3" json:"status,omitempty"` + Info string `protobuf:"bytes,6,opt,name=info,proto3" json:"info,omitempty"` +} + +func (x *WorkflowStatusResponse_ShardStreamState) Reset() { + *x = WorkflowStatusResponse_ShardStreamState{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[262] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WorkflowStatusResponse_ShardStreamState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowStatusResponse_ShardStreamState) ProtoMessage() {} + +func (x *WorkflowStatusResponse_ShardStreamState) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[262] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkflowStatusResponse_ShardStreamState.ProtoReflect.Descriptor instead. +func (*WorkflowStatusResponse_ShardStreamState) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{227, 1} +} + +func (x *WorkflowStatusResponse_ShardStreamState) GetId() int32 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *WorkflowStatusResponse_ShardStreamState) GetTablet() *topodata.TabletAlias { + if x != nil { + return x.Tablet + } + return nil +} + +func (x *WorkflowStatusResponse_ShardStreamState) GetSourceShard() string { + if x != nil { + return x.SourceShard + } + return "" +} + +func (x *WorkflowStatusResponse_ShardStreamState) GetPosition() string { + if x != nil { + return x.Position + } + return "" +} + +func (x *WorkflowStatusResponse_ShardStreamState) GetStatus() string { + if x != nil { + return x.Status + } + return "" +} + +func (x *WorkflowStatusResponse_ShardStreamState) GetInfo() string { + if x != nil { + return x.Info + } + return "" +} + +type WorkflowStatusResponse_ShardStreams struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Streams []*WorkflowStatusResponse_ShardStreamState `protobuf:"bytes,2,rep,name=streams,proto3" json:"streams,omitempty"` +} + +func (x *WorkflowStatusResponse_ShardStreams) Reset() { + *x = WorkflowStatusResponse_ShardStreams{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[263] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WorkflowStatusResponse_ShardStreams) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowStatusResponse_ShardStreams) ProtoMessage() {} + +func (x *WorkflowStatusResponse_ShardStreams) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[263] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkflowStatusResponse_ShardStreams.ProtoReflect.Descriptor instead. +func (*WorkflowStatusResponse_ShardStreams) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{227, 2} +} + +func (x *WorkflowStatusResponse_ShardStreams) GetStreams() []*WorkflowStatusResponse_ShardStreamState { + if x != nil { + return x.Streams + } + return nil +} + +type WorkflowUpdateResponse_TabletInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Tablet *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet,proto3" json:"tablet,omitempty"` + // Changed is true if any of the provided values were different + // than what was already stored on this tablet. + Changed bool `protobuf:"varint,2,opt,name=changed,proto3" json:"changed,omitempty"` +} + +func (x *WorkflowUpdateResponse_TabletInfo) Reset() { + *x = WorkflowUpdateResponse_TabletInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[266] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WorkflowUpdateResponse_TabletInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowUpdateResponse_TabletInfo) ProtoMessage() {} + +func (x *WorkflowUpdateResponse_TabletInfo) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[266] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkflowUpdateResponse_TabletInfo.ProtoReflect.Descriptor instead. +func (*WorkflowUpdateResponse_TabletInfo) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{231, 0} +} + +func (x *WorkflowUpdateResponse_TabletInfo) GetTablet() *topodata.TabletAlias { + if x != nil { + return x.Tablet + } + return nil +} + +func (x *WorkflowUpdateResponse_TabletInfo) GetChanged() bool { + if x != nil { + return x.Changed + } + return false +} + +var File_vtctldata_proto protoreflect.FileDescriptor + +var file_vtctldata_proto_rawDesc = []byte{ + 0x0a, 0x0f, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x09, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x10, 0x62, 0x69, + 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0d, + 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0e, 0x6d, + 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0b, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x17, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0e, 0x74, 0x6f, 0x70, 0x6f, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0d, 0x76, 0x73, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0b, 0x76, 0x74, 0x72, 0x70, 0x63, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0c, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x57, 0x0a, 0x1a, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x56, + 0x74, 0x63, 0x74, 0x6c, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x43, 0x0a, + 0x1b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x43, 0x6f, 0x6d, + 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x05, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, + 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x22, 0x89, 0x01, 0x0a, 0x18, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x61, 0x74, 0x65, + 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, + 0x21, 0x0a, 0x0c, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x78, 0x70, + 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x1d, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x64, 0x64, 0x6c, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x64, 0x6c, 0x22, 0x83, + 0x06, 0x0a, 0x13, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x61, 0x66, 0x74, + 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, + 0x74, 0x6f, 0x70, 0x41, 0x66, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x4a, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, @@ -10982,889 +15497,1463 @@ var file_vtctldata_proto_rawDesc = []byte{ 0x44, 0x64, 0x6c, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x65, 0x66, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x64, 0x65, 0x66, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, - 0x79, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x4e, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x5e, 0x0a, 0x05, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1a, - 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x25, - 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0xd2, 0x0c, 0x0a, 0x08, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x3f, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x31, 0x0a, 0x15, 0x6d, 0x61, 0x78, 0x5f, - 0x76, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, - 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x6d, 0x61, 0x78, 0x56, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x61, 0x67, 0x12, 0x4a, 0x0a, 0x0d, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2a, 0x0a, 0x11, - 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x53, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x60, 0x0a, 0x11, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x35, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, + 0x79, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x6c, 0x0a, 0x1b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, + 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, + 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x5f, 0x63, 0x6f, + 0x70, 0x79, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x74, 0x6f, 0x6d, 0x69, 0x63, + 0x43, 0x6f, 0x70, 0x79, 0x22, 0x4e, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x22, 0x85, 0x13, 0x0a, 0x0f, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, + 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x16, + 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2f, 0x0a, 0x13, + 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6d, 0x69, 0x67, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x3f, 0x0a, + 0x08, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x74, 0x72, 0x61, + 0x74, 0x65, 0x67, 0x79, 0x52, 0x08, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x18, + 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x08, 0x61, 0x64, 0x64, 0x65, + 0x64, 0x5f, 0x61, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, + 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x07, 0x61, 0x64, 0x64, 0x65, 0x64, 0x41, + 0x74, 0x12, 0x2f, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x61, + 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, + 0x41, 0x74, 0x12, 0x27, 0x0a, 0x08, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x61, 0x74, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x52, 0x07, 0x72, 0x65, 0x61, 0x64, 0x79, 0x41, 0x74, 0x12, 0x2b, 0x0a, 0x0a, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x09, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x3b, 0x0a, 0x12, 0x6c, 0x69, 0x76, 0x65, + 0x6e, 0x65, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x0d, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x52, 0x11, 0x6c, 0x69, 0x76, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2f, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, + 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, + 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6c, + 0x65, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x30, 0x0a, 0x0d, 0x63, 0x6c, 0x65, 0x61, 0x6e, 0x65, + 0x64, 0x5f, 0x75, 0x70, 0x5f, 0x61, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, + 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0b, 0x63, 0x6c, 0x65, + 0x61, 0x6e, 0x65, 0x64, 0x55, 0x70, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, + 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1c, + 0x0a, 0x09, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, + 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x72, + 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, + 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x1a, 0x0a, 0x08, + 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x02, 0x52, 0x08, + 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x6d, 0x69, 0x67, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x17, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x10, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x64, 0x64, 0x6c, 0x5f, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x64, 0x6c, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, + 0x19, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1f, + 0x0a, 0x0b, 0x65, 0x74, 0x61, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x1a, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x0a, 0x65, 0x74, 0x61, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, + 0x1f, 0x0a, 0x0b, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x63, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x18, 0x1b, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x72, 0x6f, 0x77, 0x73, 0x43, 0x6f, 0x70, 0x69, 0x65, 0x64, + 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x1c, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x12, + 0x2a, 0x0a, 0x11, 0x61, 0x64, 0x64, 0x65, 0x64, 0x5f, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, + 0x6b, 0x65, 0x79, 0x73, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x61, 0x64, 0x64, 0x65, + 0x64, 0x55, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x72, + 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x5f, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x6b, 0x65, + 0x79, 0x73, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, + 0x64, 0x55, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x6c, + 0x6f, 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, + 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x3f, 0x0a, 0x12, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, + 0x63, 0x74, 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x20, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, + 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x0a, 0x13, 0x70, 0x6f, 0x73, 0x74, 0x70, + 0x6f, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x21, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x70, 0x6f, 0x73, 0x74, 0x70, 0x6f, 0x6e, 0x65, 0x43, 0x6f, + 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x18, 0x72, 0x65, 0x6d, 0x6f, + 0x76, 0x65, 0x64, 0x5f, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x72, 0x65, 0x6d, 0x6f, + 0x76, 0x65, 0x64, 0x55, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x4b, 0x65, 0x79, 0x4e, 0x61, 0x6d, 0x65, + 0x73, 0x12, 0x44, 0x0a, 0x1f, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x6e, 0x6f, 0x5f, + 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1b, 0x64, 0x72, 0x6f, 0x70, + 0x70, 0x65, 0x64, 0x4e, 0x6f, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x43, 0x6f, 0x6c, 0x75, + 0x6d, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x65, 0x78, 0x70, 0x61, 0x6e, + 0x64, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x78, 0x70, 0x61, 0x6e, 0x64, 0x65, 0x64, + 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x72, + 0x65, 0x76, 0x65, 0x72, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x6f, 0x74, 0x65, 0x73, 0x18, + 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, 0x76, 0x65, 0x72, 0x74, 0x69, 0x62, 0x6c, + 0x65, 0x4e, 0x6f, 0x74, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, + 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x26, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, + 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x76, 0x65, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x75, 0x75, + 0x69, 0x64, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x76, 0x65, 0x72, 0x74, + 0x65, 0x64, 0x55, 0x75, 0x69, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x73, 0x5f, 0x76, 0x69, 0x65, + 0x77, 0x18, 0x28, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x69, 0x73, 0x56, 0x69, 0x65, 0x77, 0x12, + 0x2a, 0x0a, 0x11, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x74, 0x6f, 0x5f, 0x63, 0x6f, 0x6d, 0x70, + 0x6c, 0x65, 0x74, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x72, 0x65, 0x61, 0x64, + 0x79, 0x54, 0x6f, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x3a, 0x0a, 0x19, 0x76, + 0x69, 0x74, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x5f, 0x69, + 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x17, + 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x4c, 0x69, 0x76, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x49, 0x6e, + 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x2e, 0x0a, 0x13, 0x75, 0x73, 0x65, 0x72, 0x5f, + 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x2b, + 0x20, 0x01, 0x28, 0x02, 0x52, 0x11, 0x75, 0x73, 0x65, 0x72, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, + 0x6c, 0x65, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x70, 0x65, 0x63, 0x69, + 0x61, 0x6c, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, + 0x70, 0x65, 0x63, 0x69, 0x61, 0x6c, 0x50, 0x6c, 0x61, 0x6e, 0x12, 0x38, 0x0a, 0x11, 0x6c, 0x61, + 0x73, 0x74, 0x5f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, + 0x2d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x52, 0x0f, 0x6c, 0x61, 0x73, 0x74, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, + 0x65, 0x64, 0x41, 0x74, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, + 0x74, 0x5f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, 0x2e, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x54, 0x68, 0x72, 0x6f, + 0x74, 0x74, 0x6c, 0x65, 0x64, 0x12, 0x2f, 0x0a, 0x0c, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x6c, + 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x2f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, + 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0b, 0x63, 0x61, 0x6e, 0x63, 0x65, + 0x6c, 0x6c, 0x65, 0x64, 0x41, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6f, 0x73, 0x74, 0x70, 0x6f, + 0x6e, 0x65, 0x5f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x18, 0x30, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0e, 0x70, 0x6f, 0x73, 0x74, 0x70, 0x6f, 0x6e, 0x65, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x31, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x67, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x74, 0x6f, 0x76, 0x65, 0x72, + 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x0f, 0x63, 0x75, 0x74, 0x6f, 0x76, 0x65, 0x72, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, + 0x12, 0x34, 0x0a, 0x16, 0x69, 0x73, 0x5f, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, + 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x33, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x14, 0x69, 0x73, 0x49, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x4f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x0b, 0x72, 0x65, 0x76, 0x69, 0x65, 0x77, + 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x34, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, + 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x76, 0x69, 0x65, + 0x77, 0x65, 0x64, 0x41, 0x74, 0x12, 0x3d, 0x0a, 0x14, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x74, + 0x6f, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x61, 0x74, 0x18, 0x35, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x52, 0x11, 0x72, 0x65, 0x61, 0x64, 0x79, 0x54, 0x6f, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, + 0x74, 0x65, 0x41, 0x74, 0x22, 0x53, 0x0a, 0x08, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, + 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x49, 0x54, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, + 0x4f, 0x4e, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x48, 0x4f, 0x53, + 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x50, 0x54, 0x4f, 0x53, 0x43, 0x10, 0x02, 0x12, 0x0a, + 0x0a, 0x06, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x59, + 0x53, 0x51, 0x4c, 0x10, 0x04, 0x1a, 0x02, 0x10, 0x01, 0x22, 0x71, 0x0a, 0x06, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, + 0x12, 0x0d, 0x0a, 0x09, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, + 0x0d, 0x0a, 0x09, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x4c, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0a, + 0x0a, 0x06, 0x51, 0x55, 0x45, 0x55, 0x45, 0x44, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x52, 0x45, + 0x41, 0x44, 0x59, 0x10, 0x04, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, + 0x10, 0x05, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x06, + 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x07, 0x22, 0x5e, 0x0a, 0x05, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0xbf, 0x0f, 0x0a, + 0x08, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, + 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x3f, + 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x49, 0x0a, 0x13, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, - 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x73, 0x1a, 0xb9, 0x01, 0x0a, 0x0b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x34, 0x0a, 0x07, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x6c, 0x6f, 0x77, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, + 0x31, 0x0a, 0x15, 0x6d, 0x61, 0x78, 0x5f, 0x76, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, + 0x6d, 0x61, 0x78, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, + 0x61, 0x67, 0x12, 0x4a, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x0c, 0x73, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x23, + 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, + 0x73, 0x75, 0x62, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, + 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x48, 0x0a, 0x21, 0x6d, 0x61, 0x78, 0x5f, 0x76, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6c, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x1d, 0x6d, 0x61, 0x78, 0x56, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x72, 0x61, 0x6e, 0x73, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x61, 0x67, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x65, 0x66, + 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x5f, 0x6b, 0x65, 0x79, + 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x64, 0x65, 0x66, 0x65, 0x72, 0x53, 0x65, + 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x1a, 0x60, 0x0a, 0x11, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x35, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x49, 0x0a, + 0x13, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x16, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x1a, 0xb9, 0x01, 0x0a, 0x0b, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x34, 0x0a, 0x07, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x07, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x46, + 0x0a, 0x0f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x69, 0x73, 0x5f, 0x70, 0x72, 0x69, + 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x10, 0x69, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x6e, 0x67, 0x1a, 0xe7, 0x08, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, + 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x12, 0x3d, 0x0a, 0x0d, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x62, 0x69, + 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0c, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x70, 0x50, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x62, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x62, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x41, 0x0a, 0x15, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x52, 0x14, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2f, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, + 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x45, 0x0a, 0x0b, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x73, + 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x52, 0x07, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x46, 0x0a, 0x0f, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x52, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x69, 0x73, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, - 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x10, 0x69, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, - 0x67, 0x1a, 0xf6, 0x06, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x0e, 0x0a, 0x02, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x12, 0x3d, 0x0a, 0x0d, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, - 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x52, 0x0c, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, - 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x70, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x62, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x41, 0x0a, 0x15, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x14, 0x74, - 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x12, 0x2f, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, - 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x45, - 0x0a, 0x0b, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x73, 0x18, 0x0c, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, - 0x43, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, 0x63, 0x6f, 0x70, 0x79, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x0d, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x61, 0x6d, 0x2e, 0x43, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, 0x63, 0x6f, + 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x04, 0x6c, 0x6f, 0x67, 0x73, + 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x2e, 0x4c, 0x6f, 0x67, 0x52, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x26, 0x0a, 0x0f, + 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x65, 0x74, 0x63, 0x68, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, + 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6c, 0x6f, 0x67, 0x46, 0x65, 0x74, 0x63, 0x68, 0x45, + 0x72, 0x72, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x0f, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x6f, 0x77, 0x73, + 0x5f, 0x63, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, + 0x6f, 0x77, 0x73, 0x43, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x12, 0x55, 0x0a, 0x10, 0x74, 0x68, 0x72, + 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x11, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, - 0x4c, 0x6f, 0x67, 0x52, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6c, 0x6f, 0x67, - 0x5f, 0x66, 0x65, 0x74, 0x63, 0x68, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0e, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0d, 0x6c, 0x6f, 0x67, 0x46, 0x65, 0x74, 0x63, 0x68, 0x45, 0x72, 0x72, 0x6f, - 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x04, 0x74, 0x61, 0x67, 0x73, 0x1a, 0x3a, 0x0a, 0x09, 0x43, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x6c, 0x61, 0x73, 0x74, - 0x5f, 0x70, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x50, - 0x6b, 0x1a, 0xe6, 0x01, 0x0a, 0x03, 0x4c, 0x6f, 0x67, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x73, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, - 0x12, 0x2b, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x2b, 0x0a, - 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x0f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x1a, 0x3a, 0x0a, 0x09, 0x43, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x6b, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x50, 0x6b, 0x1a, 0xe6, 0x01, 0x0a, + 0x03, 0x4c, 0x6f, 0x67, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x02, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, + 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2b, 0x0a, 0x0a, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x09, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x2b, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, + 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0x77, 0x0a, 0x0f, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, + 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x70, + 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, + 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x12, 0x33, 0x0a, 0x0e, 0x74, 0x69, 0x6d, + 0x65, 0x5f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, - 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x59, 0x0a, 0x12, 0x41, 0x64, - 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x69, 0x6e, 0x66, - 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x63, 0x65, 0x6c, - 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x15, 0x0a, 0x13, 0x41, 0x64, 0x64, 0x43, 0x65, 0x6c, 0x6c, - 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x40, 0x0a, 0x14, - 0x41, 0x64, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, + 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x22, 0x59, + 0x0a, 0x12, 0x41, 0x64, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x17, - 0x0a, 0x15, 0x41, 0x64, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9e, 0x01, 0x0a, 0x18, 0x41, 0x70, 0x70, 0x6c, - 0x79, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x0d, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, - 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x73, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, - 0x65, 0x73, 0x52, 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, - 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x6b, 0x69, 0x70, 0x52, 0x65, 0x62, 0x75, - 0x69, 0x6c, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x63, - 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x62, 0x75, - 0x69, 0x6c, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x1b, 0x0a, 0x19, 0x41, 0x70, 0x70, 0x6c, - 0x79, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xb3, 0x01, 0x0a, 0x1d, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4a, 0x0a, 0x13, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, - 0x52, 0x11, 0x73, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, - 0x6c, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x72, 0x65, 0x62, 0x75, - 0x69, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x6b, 0x69, 0x70, 0x52, - 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, - 0x64, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, - 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x20, 0x0a, 0x1e, 0x41, - 0x70, 0x70, 0x6c, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, - 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x86, 0x03, - 0x0a, 0x12, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x12, 0x3a, 0x0a, 0x19, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x75, - 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x17, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4c, 0x6f, 0x6e, 0x67, 0x55, 0x6e, - 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x73, 0x71, 0x6c, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x03, 0x73, 0x71, 0x6c, 0x12, 0x21, - 0x0a, 0x0c, 0x64, 0x64, 0x6c, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x64, 0x6c, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, - 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x75, 0x69, 0x64, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x05, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x75, 0x75, 0x69, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x2b, - 0x0a, 0x11, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, - 0x65, 0x78, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6d, 0x69, 0x67, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x44, 0x0a, 0x15, 0x77, - 0x61, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x6f, 0x75, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, - 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, - 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, - 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x6c, 0x69, - 0x67, 0x68, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x50, - 0x72, 0x65, 0x66, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, - 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, - 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, - 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x22, 0x32, 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, - 0x09, 0x75, 0x75, 0x69, 0x64, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x08, 0x75, 0x75, 0x69, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x22, 0xc3, 0x01, 0x0a, 0x13, 0x41, - 0x70, 0x70, 0x6c, 0x79, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, - 0x0a, 0x0c, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x6b, 0x69, 0x70, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, - 0x64, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, - 0x6c, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, - 0x12, 0x2c, 0x0a, 0x08, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x07, 0x76, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x10, - 0x0a, 0x03, 0x73, 0x71, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x71, 0x6c, - 0x22, 0x44, 0x0a, 0x14, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x08, 0x76, 0x5f, 0x73, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x73, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x07, 0x76, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0xc2, 0x01, 0x0a, 0x0d, 0x42, 0x61, 0x63, 0x6b, 0x75, - 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, - 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, - 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, - 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, - 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x63, 0x6f, - 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x6e, 0x63, - 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x70, 0x6f, - 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0e, - 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, - 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x24, 0x0a, 0x05, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, - 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, - 0x22, 0x8d, 0x01, 0x0a, 0x12, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, - 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, - 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, - 0x22, 0x9b, 0x01, 0x0a, 0x17, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x2d, 0x0a, 0x07, 0x64, 0x62, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x64, - 0x62, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x22, 0xa6, - 0x01, 0x0a, 0x18, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x0d, 0x62, - 0x65, 0x66, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x52, 0x0c, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x12, 0x33, 0x0a, 0x0c, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x0b, 0x61, 0x66, 0x74, 0x65, - 0x72, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1e, 0x0a, 0x0b, 0x77, 0x61, 0x73, 0x5f, 0x64, - 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x77, 0x61, - 0x73, 0x44, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x22, 0x99, 0x03, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x2f, 0x0a, 0x14, 0x61, - 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x76, 0x5f, 0x73, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x40, 0x0a, 0x0c, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, - 0x6d, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x73, 0x12, 0x2a, - 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x74, - 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x61, - 0x73, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, - 0x31, 0x0a, 0x0d, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0c, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x69, - 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x64, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, - 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, - 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, - 0x26, 0x0a, 0x0f, 0x73, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x5f, 0x64, 0x62, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x69, 0x64, 0x65, 0x63, 0x61, - 0x72, 0x44, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, - 0x05, 0x10, 0x06, 0x22, 0x49, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, - 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x8c, - 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, 0x6c, + 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, + 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, + 0x08, 0x63, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x15, 0x0a, 0x13, 0x41, 0x64, 0x64, + 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x40, 0x0a, 0x14, 0x41, 0x64, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, + 0x6c, 0x73, 0x22, 0x17, 0x0a, 0x15, 0x41, 0x64, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9e, 0x01, 0x0a, 0x18, + 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x0d, 0x72, 0x6f, 0x75, 0x74, + 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, + 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, + 0x75, 0x6c, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x72, 0x65, 0x62, + 0x75, 0x69, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x6b, 0x69, 0x70, + 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x62, 0x75, 0x69, + 0x6c, 0x64, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, + 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x1b, 0x0a, 0x19, + 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xb3, 0x01, 0x0a, 0x1d, 0x41, 0x70, + 0x70, 0x6c, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, + 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4a, 0x0a, 0x13, 0x73, + 0x68, 0x61, 0x72, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, + 0x75, 0x6c, 0x65, 0x73, 0x52, 0x11, 0x73, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, + 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6b, 0x69, 0x70, 0x5f, + 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, + 0x6b, 0x69, 0x70, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, + 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0c, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x22, + 0x20, 0x0a, 0x1e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, + 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0xce, 0x02, 0x0a, 0x12, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x71, 0x6c, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x03, 0x73, 0x71, 0x6c, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x64, 0x6c, 0x5f, 0x73, 0x74, + 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x64, + 0x6c, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x75, 0x69, + 0x64, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x75, 0x75, + 0x69, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x10, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x12, 0x44, 0x0a, 0x15, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, + 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, + 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, + 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x61, 0x74, 0x63, 0x68, + 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x62, 0x61, 0x74, + 0x63, 0x68, 0x53, 0x69, 0x7a, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x08, + 0x10, 0x09, 0x22, 0xe8, 0x01, 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x75, + 0x69, 0x64, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x75, + 0x75, 0x69, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x6c, 0x0a, 0x16, 0x72, 0x6f, 0x77, 0x73, 0x5f, + 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, + 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x13, 0x72, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x46, 0x0a, 0x18, 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, + 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc3, 0x01, + 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, - 0x65, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, - 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x22, 0xa0, 0x01, - 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x26, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x30, - 0x0a, 0x14, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, - 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x73, 0x68, - 0x61, 0x72, 0x64, 0x41, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, - 0x22, 0x41, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, - 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, - 0x72, 0x63, 0x65, 0x22, 0x18, 0x0a, 0x16, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, - 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2d, 0x0a, - 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x1a, 0x0a, 0x18, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x67, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1c, 0x0a, - 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, - 0x6f, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, - 0x65, 0x22, 0x18, 0x0a, 0x16, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9b, 0x01, 0x0a, 0x13, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x28, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1c, 0x0a, - 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x65, - 0x76, 0x65, 0x6e, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x49, 0x66, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x6e, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x2d, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x72, 0x76, 0x56, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, - 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, - 0x22, 0x1a, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x79, 0x0a, 0x14, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, - 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, - 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x52, 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, - 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, - 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, - 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x17, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0xd0, 0x02, 0x0a, 0x1d, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, - 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, - 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, - 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x3e, 0x0a, 0x0f, - 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0e, 0x69, 0x67, - 0x6e, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x44, 0x0a, 0x15, - 0x77, 0x61, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, - 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, - 0x61, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, - 0x75, 0x74, 0x12, 0x3f, 0x0a, 0x1c, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x72, - 0x6f, 0x73, 0x73, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x65, 0x6c, 0x6c, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, - 0x69, 0x6f, 0x6e, 0x22, 0xbc, 0x01, 0x0a, 0x1e, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, - 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x40, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x6d, - 0x6f, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, - 0x74, 0x65, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, - 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x73, 0x22, 0xa0, 0x01, 0x0a, 0x18, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, - 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, - 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, - 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x75, 0x73, - 0x65, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x75, 0x73, - 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x22, 0x47, 0x0a, 0x19, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, - 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, - 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xd3, - 0x01, 0x0a, 0x18, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, - 0x73, 0x44, 0x42, 0x41, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x6b, 0x69, 0x70, 0x52, 0x65, 0x62, + 0x75, 0x69, 0x6c, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x12, 0x14, 0x0a, + 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, + 0x6c, 0x6c, 0x73, 0x12, 0x2c, 0x0a, 0x08, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x07, 0x76, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x71, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x73, 0x71, 0x6c, 0x22, 0x44, 0x0a, 0x14, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x56, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x08, 0x76, + 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, + 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x52, 0x07, 0x76, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0xe5, 0x01, 0x0a, 0x0d, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6d, - 0x61, 0x78, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6d, - 0x61, 0x78, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, - 0x65, 0x5f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0e, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x73, 0x12, - 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x72, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x22, 0x47, 0x0a, 0x19, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, - 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x42, 0x41, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xa5, 0x01, - 0x0a, 0x12, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, + 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, + 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, + 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x30, 0x0a, 0x14, + 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x72, 0x6f, 0x6d, + 0x5f, 0x70, 0x6f, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x69, 0x6e, 0x63, 0x72, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x73, 0x12, 0x21, + 0x0a, 0x0c, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, 0x73, 0x61, 0x66, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x53, 0x61, 0x66, + 0x65, 0x22, 0xa2, 0x01, 0x0a, 0x0e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x55, - 0x0a, 0x13, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x68, 0x6f, 0x6f, 0x6b, 0x5f, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x52, 0x11, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x5e, 0x0a, 0x13, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, - 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x0b, - 0x68, 0x6f, 0x6f, 0x6b, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x26, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, - 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x0a, 0x68, 0x6f, 0x6f, 0x6b, 0x52, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x3c, 0x0a, 0x1e, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x22, 0xbe, 0x01, 0x0a, 0x1f, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x1a, 0x4b, 0x0a, 0x0b, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x22, 0x9e, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, - 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6c, 0x69, 0x6d, - 0x69, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x25, - 0x0a, 0x0e, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, - 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x44, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x62, - 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, - 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, - 0x66, 0x6f, 0x52, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x22, 0x28, 0x0a, 0x12, 0x47, - 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x46, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, - 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x09, - 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, - 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x63, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x19, 0x0a, - 0x17, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x4e, 0x61, 0x6d, 0x65, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x30, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x43, - 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x47, 0x65, - 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x22, 0xb6, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, - 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x49, 0x0a, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x2f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, - 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x1a, 0x50, 0x0a, 0x0c, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, - 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x50, 0x0a, - 0x14, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, - 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, - 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, - 0x4c, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x15, 0x0a, - 0x13, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x22, 0x49, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x09, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x22, - 0x30, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, + 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1a, + 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x12, 0x24, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, + 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0xe2, 0x01, 0x0a, 0x12, 0x42, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, + 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, + 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, + 0x6e, 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, + 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, + 0x65, 0x5f, 0x73, 0x61, 0x66, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x70, + 0x67, 0x72, 0x61, 0x64, 0x65, 0x53, 0x61, 0x66, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x6e, 0x63, + 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x70, 0x6f, + 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x73, 0x22, 0x4e, 0x0a, 0x1c, 0x43, + 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x22, 0xdf, 0x01, 0x0a, 0x1d, + 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x76, 0x0a, + 0x16, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x62, + 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, + 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x13, 0x72, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x46, 0x0a, 0x18, 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, + 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x9b, 0x01, + 0x0a, 0x17, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x12, 0x2d, 0x0a, 0x07, 0x64, 0x62, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x64, 0x62, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x22, 0xa6, 0x01, 0x0a, 0x18, + 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x0d, 0x62, 0x65, 0x66, 0x6f, + 0x72, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x52, 0x0c, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, + 0x33, 0x0a, 0x0c, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x0b, 0x61, 0x66, 0x74, 0x65, 0x72, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1e, 0x0a, 0x0b, 0x77, 0x61, 0x73, 0x5f, 0x64, 0x72, 0x79, 0x5f, + 0x72, 0x75, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x77, 0x61, 0x73, 0x44, 0x72, + 0x79, 0x52, 0x75, 0x6e, 0x22, 0x4f, 0x0a, 0x1d, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x22, 0x46, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, - 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x51, 0x0a, 0x15, 0x47, 0x65, 0x74, - 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, - 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, - 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x5a, 0x0a, 0x16, - 0x47, 0x65, 0x74, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x70, 0x65, 0x72, - 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x52, - 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x22, 0x55, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, - 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, - 0x0d, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, - 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x0c, 0x72, 0x6f, 0x75, - 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x22, 0xb0, 0x02, 0x0a, 0x10, 0x47, 0x65, - 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, - 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, - 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, - 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, - 0x64, 0x65, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, - 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x69, 0x65, 0x77, 0x73, 0x12, 0x28, 0x0a, 0x10, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, - 0x65, 0x73, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, - 0x73, 0x69, 0x7a, 0x65, 0x73, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x4f, 0x6e, 0x6c, 0x79, - 0x12, 0x2a, 0x0a, 0x11, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x50, 0x0a, 0x11, - 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x3b, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x23, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x66, 0x69, - 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x4c, - 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, - 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x3a, 0x0a, 0x10, - 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x26, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x10, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x1d, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x6a, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x13, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, - 0x52, 0x11, 0x73, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, - 0x6c, 0x65, 0x73, 0x22, 0x32, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0xf3, 0x01, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x53, - 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, - 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, - 0x1a, 0x69, 0x0a, 0x0a, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x45, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, - 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x20, 0x0a, 0x08, 0x4e, - 0x61, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x4a, 0x0a, - 0x16, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0xcc, 0x01, 0x0a, 0x17, 0x47, 0x65, - 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x59, 0x0a, 0x0d, 0x73, 0x72, 0x76, 0x5f, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x2e, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x0c, 0x73, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, - 0x1a, 0x56, 0x0a, 0x11, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb7, 0x02, 0x0a, 0x1c, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x18, 0x0a, - 0x07, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, - 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, - 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x74, 0x68, 0x72, 0x65, - 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, - 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x75, 0x73, - 0x74, 0x6f, 0x6d, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x28, 0x0a, 0x10, 0x63, 0x75, 0x73, 0x74, - 0x6f, 0x6d, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, - 0x65, 0x74, 0x12, 0x2d, 0x0a, 0x13, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x61, 0x73, 0x5f, 0x63, - 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x73, 0x65, 0x6c, 0x66, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x73, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x6c, - 0x66, 0x12, 0x2f, 0x0a, 0x14, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x61, 0x73, 0x5f, 0x63, 0x68, - 0x65, 0x63, 0x6b, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x11, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x73, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x22, 0x1f, 0x0a, 0x1d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x68, 0x72, 0x6f, - 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x2a, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, - 0x65, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, - 0x4e, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x0c, 0x73, 0x72, 0x76, 0x5f, - 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, - 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x52, 0x0a, 0x73, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, - 0x2d, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0xc5, - 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x56, 0x0a, 0x0d, 0x73, 0x72, 0x76, - 0x5f, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x32, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, - 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x73, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x73, 0x1a, 0x53, 0x0a, 0x10, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4c, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x22, 0x3d, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x06, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x22, 0xe8, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63, - 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, - 0x73, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x06, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x12, 0x3c, 0x0a, 0x0e, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, - 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x22, 0x40, - 0x0a, 0x12, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, - 0x22, 0x2c, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, - 0x61, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x46, - 0x0a, 0x17, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, - 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2b, 0x0a, 0x04, 0x63, 0x65, 0x6c, - 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x43, 0x65, 0x6c, 0x6c, - 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x66, 0x0a, 0x0c, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, - 0x67, 0x79, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x12, - 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, - 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x18, 0x04, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x22, 0x2f, - 0x0a, 0x11, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, - 0x4d, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, - 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, - 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x2e, - 0x0a, 0x12, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x42, - 0x0a, 0x12, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x08, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x07, 0x76, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x22, 0x6f, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, - 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, - 0x76, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x6f, - 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x4f, - 0x6e, 0x6c, 0x79, 0x22, 0x49, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x77, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, - 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x52, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x22, 0xfb, - 0x01, 0x0a, 0x17, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, - 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x52, 0x0a, 0x1a, - 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x17, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, - 0x45, 0x6c, 0x65, 0x63, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, - 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x44, 0x0a, 0x15, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x72, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x75, 0x75, 0x69, 0x64, 0x22, 0xe1, 0x01, 0x0a, 0x1e, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, + 0x70, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x77, 0x0a, 0x16, 0x72, 0x6f, 0x77, 0x73, + 0x5f, 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, + 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x72, 0x6f, + 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x1a, 0x46, 0x0a, 0x18, 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, + 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x50, 0x0a, 0x1e, 0x43, 0x6f, 0x6d, + 0x70, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x22, 0xe3, 0x01, 0x0a, 0x1f, + 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, + 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x78, 0x0a, 0x16, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, + 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x43, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x6f, 0x6d, 0x70, + 0x6c, 0x65, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x6f, 0x77, 0x73, + 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x72, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, + 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x46, 0x0a, 0x18, 0x52, 0x6f, 0x77, + 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x99, 0x03, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, + 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x2f, 0x0a, 0x14, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, + 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x40, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, + 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x0b, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x73, 0x12, 0x2a, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, + 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x31, 0x0a, 0x0d, 0x73, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0c, + 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x11, + 0x64, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, + 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x69, 0x64, + 0x65, 0x63, 0x61, 0x72, 0x5f, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x73, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x44, 0x62, 0x4e, 0x61, 0x6d, + 0x65, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x49, 0x0a, + 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x8c, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, + 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, + 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, + 0x12, 0x25, 0x0a, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, + 0x65, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x22, 0xa0, 0x01, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x2f, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x26, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x10, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x30, 0x0a, 0x14, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x5f, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x73, 0x68, 0x61, 0x72, 0x64, 0x41, 0x6c, 0x72, + 0x65, 0x61, 0x64, 0x79, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x41, 0x0a, 0x15, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x22, 0x18, 0x0a, + 0x16, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2d, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x1a, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x67, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, + 0x73, 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, + 0x72, 0x73, 0x69, 0x76, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x22, 0x18, 0x0a, 0x16, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9b, 0x01, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x28, 0x0a, + 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, + 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, + 0x73, 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, + 0x72, 0x73, 0x69, 0x76, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x65, 0x76, 0x65, 0x6e, 0x5f, 0x69, 0x66, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, + 0x65, 0x76, 0x65, 0x6e, 0x49, 0x66, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x12, 0x14, 0x0a, + 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, + 0x72, 0x63, 0x65, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2d, 0x0a, 0x17, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x1a, 0x0a, 0x18, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x79, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, + 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0d, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, + 0x79, 0x22, 0x17, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x81, 0x03, 0x0a, 0x1d, 0x45, + 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, + 0x0a, 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, + 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x3e, 0x0a, 0x0f, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, + 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0e, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x44, 0x0a, 0x15, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, 0x69, 0x74, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x42, 0x0a, 0x18, - 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, - 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, - 0x22, 0x4d, 0x0a, 0x11, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, - 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, - 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, - 0x14, 0x0a, 0x12, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x89, 0x02, 0x0a, 0x1b, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, - 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, + 0x6c, 0x69, 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3f, 0x0a, 0x1c, + 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x5f, 0x63, 0x65, + 0x6c, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x19, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x43, 0x72, 0x6f, 0x73, 0x73, + 0x43, 0x65, 0x6c, 0x6c, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x0a, + 0x14, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x61, 0x6c, 0x6c, 0x5f, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x77, 0x61, 0x69, + 0x74, 0x46, 0x6f, 0x72, 0x41, 0x6c, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0xbc, + 0x01, 0x0a, 0x1e, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x12, 0x40, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x5f, + 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x50, 0x72, + 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xa0, 0x01, + 0x0a, 0x18, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, + 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, + 0x78, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6d, 0x61, + 0x78, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x5f, 0x70, 0x6f, 0x6f, + 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x75, 0x73, 0x65, 0x50, 0x6f, 0x6f, 0x6c, + 0x22, 0x47, 0x0a, 0x19, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, + 0x41, 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, + 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xd3, 0x01, 0x0a, 0x18, 0x45, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x42, 0x41, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, + 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x6f, + 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x6f, 0x77, + 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, 0x69, 0x6e, + 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x64, 0x69, 0x73, 0x61, + 0x62, 0x6c, 0x65, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, + 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0c, 0x72, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, + 0x47, 0x0a, 0x19, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, + 0x73, 0x44, 0x42, 0x41, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xa5, 0x01, 0x0a, 0x12, 0x45, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x55, 0x0a, 0x13, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x5f, 0x68, 0x6f, 0x6f, 0x6b, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x11, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x22, 0x5e, 0x0a, 0x13, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x0b, 0x68, 0x6f, 0x6f, 0x6b, 0x5f, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x0a, 0x68, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x22, 0x3c, 0x0a, 0x1e, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0xbe, + 0x01, 0x0a, 0x1f, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, + 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, + 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x73, 0x1a, 0x4b, 0x0a, 0x0b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0x9e, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x70, - 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, - 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, - 0x3a, 0x0a, 0x0d, 0x61, 0x76, 0x6f, 0x69, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0c, 0x61, - 0x76, 0x6f, 0x69, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x44, 0x0a, 0x15, 0x77, - 0x61, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x6f, 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, - 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, - 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, - 0x74, 0x22, 0xba, 0x01, 0x0a, 0x1c, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, - 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x12, 0x40, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, - 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, - 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x50, - 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, - 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, - 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x74, - 0x0a, 0x1b, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, - 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, - 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, - 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x61, 0x72, - 0x74, 0x69, 0x61, 0x6c, 0x22, 0x1e, 0x0a, 0x1c, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x32, 0x0a, 0x1a, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1a, 0x0a, + 0x08, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x74, + 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x0d, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, + 0x22, 0x44, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, + 0x74, 0x6c, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x07, 0x62, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x22, 0x28, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, + 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, + 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, + 0x22, 0x46, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, 0x6c, 0x5f, + 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, + 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, + 0x63, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x19, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x43, + 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x22, 0x30, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, + 0x66, 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, + 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, + 0xb6, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x07, 0x61, + 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, + 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x61, + 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x1a, 0x50, 0x0a, 0x0c, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x50, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x46, + 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x4c, 0x0a, 0x15, 0x47, 0x65, + 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x15, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, + 0x49, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, + 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x22, 0x30, 0x0a, 0x12, 0x47, 0x65, + 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x46, 0x0a, 0x13, + 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x22, 0x51, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x50, 0x65, 0x72, 0x6d, 0x69, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, + 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x5a, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x50, 0x65, + 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x65, 0x72, 0x6d, 0x69, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, + 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x55, 0x0a, + 0x17, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x0d, 0x72, 0x6f, 0x75, 0x74, + 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, + 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, + 0x75, 0x6c, 0x65, 0x73, 0x22, 0xb0, 0x02, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x65, + 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x76, 0x69, + 0x65, 0x77, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x6e, 0x63, 0x6c, 0x75, + 0x64, 0x65, 0x56, 0x69, 0x65, 0x77, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x4f, 0x6e, 0x6c, + 0x79, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x73, + 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x50, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x06, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0xb8, 0x02, 0x0a, 0x1a, 0x47, 0x65, + 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, 0x2b, 0x0a, 0x11, 0x6d, 0x69, 0x67, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x10, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x39, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x28, 0x0a, 0x06, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x06, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x05, 0x6f, 0x72, + 0x64, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4f, 0x72, 0x64, 0x65, 0x72, + 0x69, 0x6e, 0x67, 0x52, 0x05, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x12, 0x12, 0x0a, 0x04, 0x73, 0x6b, 0x69, 0x70, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, + 0x73, 0x6b, 0x69, 0x70, 0x22, 0x59, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x0a, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, + 0x4c, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, + 0x0a, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x3a, 0x0a, + 0x10, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x26, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x10, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x1d, 0x0a, 0x1b, 0x47, 0x65, 0x74, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x6a, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x13, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, + 0x73, 0x52, 0x11, 0x73, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, + 0x75, 0x6c, 0x65, 0x73, 0x22, 0x32, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x1d, 0x0a, 0x1b, 0x52, 0x65, 0x62, 0x75, - 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4f, 0x0a, 0x13, 0x52, 0x65, 0x66, 0x72, 0x65, - 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, - 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x66, 0x72, - 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x64, 0x0a, 0x1a, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, - 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, - 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x83, 0x01, 0x0a, 0x1b, 0x52, 0x65, 0x66, 0x72, 0x65, - 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x69, 0x73, 0x5f, 0x70, 0x61, 0x72, - 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x10, 0x69, 0x73, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x66, - 0x72, 0x65, 0x73, 0x68, 0x12, 0x36, 0x0a, 0x17, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, - 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, - 0x66, 0x72, 0x65, 0x73, 0x68, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x4f, 0x0a, 0x13, - 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, - 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, - 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x16, 0x0a, - 0x14, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xa9, 0x01, 0x0a, 0x1b, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, + 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0xf3, 0x01, 0x0a, 0x1b, 0x47, 0x65, 0x74, + 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, + 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x1a, 0x69, 0x0a, 0x0a, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x45, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, + 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x4c, 0x69, 0x73, + 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x20, 0x0a, 0x08, + 0x4e, 0x61, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x4a, + 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0xcc, 0x01, 0x0a, 0x17, 0x47, + 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x59, 0x0a, 0x0d, 0x73, 0x72, 0x76, 0x5f, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x2e, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0c, 0x73, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x73, 0x1a, 0x56, 0x0a, 0x11, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf8, 0x02, 0x0a, 0x1c, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x07, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, + 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x74, 0x68, 0x72, + 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x28, 0x0a, 0x10, 0x63, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x51, 0x75, 0x65, 0x72, 0x79, + 0x53, 0x65, 0x74, 0x12, 0x2d, 0x0a, 0x13, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x61, 0x73, 0x5f, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x73, 0x65, 0x6c, 0x66, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x73, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, + 0x6c, 0x66, 0x12, 0x2f, 0x0a, 0x14, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x61, 0x73, 0x5f, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x11, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x73, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x12, 0x3f, 0x0a, 0x0d, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, + 0x5f, 0x61, 0x70, 0x70, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x74, 0x6f, 0x70, + 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x41, + 0x70, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x0c, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, + 0x64, 0x41, 0x70, 0x70, 0x22, 0x1f, 0x0a, 0x1d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x68, + 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2a, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, + 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, + 0x6c, 0x22, 0x4e, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x0c, 0x73, 0x72, + 0x76, 0x5f, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x13, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0a, 0x73, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x22, 0x2d, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, + 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, + 0x22, 0xc5, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x56, 0x0a, 0x0d, 0x73, + 0x72, 0x76, 0x5f, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, + 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x73, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x73, 0x1a, 0x53, 0x0a, 0x10, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4c, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x3d, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x06, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, + 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x06, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x22, 0xe8, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, + 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, + 0x6c, 0x6c, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x06, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x12, 0x3c, 0x0a, 0x0e, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0d, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, + 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, + 0x22, 0x40, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x73, 0x22, 0x2c, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, + 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, + 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, + 0x22, 0x46, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, + 0x61, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2b, 0x0a, 0x04, 0x63, + 0x65, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x43, 0x65, + 0x6c, 0x6c, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x66, 0x0a, 0x0c, 0x54, 0x6f, 0x70, 0x6f, + 0x6c, 0x6f, 0x67, 0x79, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, + 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x64, 0x61, 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, + 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, + 0x22, 0x2f, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, - 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, - 0x65, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, - 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, - 0x79, 0x22, 0x46, 0x0a, 0x1c, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xbc, 0x01, 0x0a, 0x18, 0x52, 0x65, - 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, + 0x65, 0x22, 0x4d, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, + 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x22, 0x2e, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x22, 0x42, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x08, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x07, 0x76, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x22, 0xae, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, + 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x61, + 0x6d, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x6c, 0x6f, + 0x67, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, + 0x65, 0x4c, 0x6f, 0x67, 0x73, 0x22, 0x49, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, + 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, + 0x22, 0xfb, 0x01, 0x0a, 0x17, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, + 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x52, + 0x0a, 0x1a, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x5f, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x17, 0x70, 0x72, 0x69, 0x6d, 0x61, + 0x72, 0x79, 0x45, 0x6c, 0x65, 0x63, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, + 0x61, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x44, 0x0a, 0x15, 0x77, 0x61, 0x69, 0x74, + 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, 0x69, 0x74, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x42, + 0x0a, 0x18, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, + 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, + 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, + 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x73, 0x22, 0x4e, 0x0a, 0x1c, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, + 0x69, 0x64, 0x22, 0xdf, 0x01, 0x0a, 0x1d, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x76, 0x0a, 0x16, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x61, 0x66, 0x66, + 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, + 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x72, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, + 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x46, 0x0a, 0x18, + 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0xff, 0x02, 0x0a, 0x19, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, + 0x69, 0x6e, 0x64, 0x65, 0x78, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1a, + 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, + 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, + 0x12, 0x29, 0x0a, 0x06, 0x76, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x11, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x52, 0x06, 0x76, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x42, 0x0a, 0x1e, 0x63, + 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, + 0x70, 0x79, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x1a, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x41, 0x66, 0x74, + 0x65, 0x72, 0x43, 0x6f, 0x70, 0x79, 0x57, 0x69, 0x74, 0x68, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, + 0x37, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x6c, 0x0a, 0x1b, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, + 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x19, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x22, 0x1c, 0x0a, 0x1a, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, + 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x77, 0x0a, 0x1e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x69, + 0x6e, 0x64, 0x65, 0x78, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x61, 0x69, 0x74, - 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, - 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, - 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, - 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6e, - 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x22, 0x43, 0x0a, 0x19, 0x52, 0x65, 0x6c, 0x6f, - 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x5b, 0x0a, - 0x13, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, + 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x4c, 0x0a, + 0x1f, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x45, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x29, 0x0a, 0x10, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x64, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x77, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x22, 0x56, 0x0a, 0x18, 0x4d, + 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x08, 0x73, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, + 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x08, 0x73, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x22, 0x1b, 0x0a, 0x19, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, + 0x7a, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0xdd, 0x05, 0x0a, 0x14, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x27, + 0x0a, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x6f, 0x75, 0x6e, 0x74, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x6f, 0x75, + 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x37, 0x0a, 0x0c, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, + 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x6c, 0x0a, 0x1b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, + 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, + 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x6c, 0x6c, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x6c, 0x6c, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x69, 0x6e, 0x63, 0x6c, + 0x75, 0x64, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x63, + 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, + 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, + 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x6f, 0x6e, + 0x5f, 0x64, 0x64, 0x6c, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x6e, 0x44, 0x64, + 0x6c, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, + 0x63, 0x6f, 0x70, 0x79, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x74, 0x6f, 0x70, + 0x41, 0x66, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x64, 0x72, 0x6f, + 0x70, 0x5f, 0x66, 0x6f, 0x72, 0x65, 0x69, 0x67, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x0e, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x64, 0x72, 0x6f, 0x70, 0x46, 0x6f, 0x72, 0x65, 0x69, 0x67, + 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x65, 0x66, 0x65, 0x72, 0x5f, 0x73, + 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x0f, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x12, 0x64, 0x65, 0x66, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, + 0x61, 0x72, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x5f, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x75, 0x74, + 0x6f, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x6e, 0x6f, 0x5f, 0x72, 0x6f, 0x75, + 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0e, 0x6e, 0x6f, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, + 0x22, 0xe6, 0x01, 0x0a, 0x16, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6d, 0x70, + 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x1b, 0x0a, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x6b, 0x65, 0x65, 0x70, 0x44, 0x61, 0x74, 0x61, 0x12, 0x2c, 0x0a, + 0x12, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, + 0x6c, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6b, 0x65, 0x65, 0x70, 0x52, + 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x72, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0c, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, + 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x22, 0x5b, 0x0a, 0x17, 0x4d, 0x69, 0x67, + 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x26, + 0x0a, 0x0f, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x52, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x85, 0x01, 0x0a, 0x14, 0x4d, 0x6f, 0x75, 0x6e, 0x74, + 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1b, 0x0a, 0x09, 0x74, 0x6f, 0x70, 0x6f, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x74, 0x6f, 0x70, 0x6f, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, + 0x74, 0x6f, 0x70, 0x6f, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x74, 0x6f, 0x70, 0x6f, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x1b, 0x0a, + 0x09, 0x74, 0x6f, 0x70, 0x6f, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x74, 0x6f, 0x70, 0x6f, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x17, + 0x0a, 0x15, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2c, 0x0a, 0x16, 0x4d, 0x6f, 0x75, 0x6e, 0x74, + 0x55, 0x6e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x19, 0x0a, 0x17, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x55, 0x6e, + 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x26, 0x0a, 0x10, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x53, 0x68, 0x6f, 0x77, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x82, 0x01, 0x0a, 0x11, 0x4d, 0x6f, 0x75, + 0x6e, 0x74, 0x53, 0x68, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, + 0x0a, 0x09, 0x74, 0x6f, 0x70, 0x6f, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x74, 0x6f, 0x70, 0x6f, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x74, + 0x6f, 0x70, 0x6f, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x74, 0x6f, 0x70, 0x6f, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, + 0x74, 0x6f, 0x70, 0x6f, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x74, 0x6f, 0x70, 0x6f, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x12, 0x0a, + 0x10, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x22, 0x29, 0x0a, 0x11, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0xbb, 0x06, 0x0a, + 0x17, 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x27, 0x0a, + 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x37, 0x0a, 0x0c, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x6c, 0x0a, 0x1b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, + 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, + 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x6c, 0x6c, 0x5f, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x6c, + 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, + 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x25, + 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, + 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x5a, + 0x6f, 0x6e, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x6f, 0x6e, 0x5f, 0x64, 0x64, 0x6c, 0x18, 0x0d, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x6e, 0x44, 0x64, 0x6c, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x74, + 0x6f, 0x70, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x74, 0x6f, 0x70, 0x41, 0x66, 0x74, 0x65, 0x72, 0x43, 0x6f, + 0x70, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x66, 0x6f, 0x72, 0x65, 0x69, + 0x67, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x64, + 0x72, 0x6f, 0x70, 0x46, 0x6f, 0x72, 0x65, 0x69, 0x67, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x30, + 0x0a, 0x14, 0x64, 0x65, 0x66, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, + 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x64, 0x65, + 0x66, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x4b, 0x65, 0x79, 0x73, + 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x11, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, + 0x28, 0x0a, 0x10, 0x6e, 0x6f, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, + 0x6c, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x6e, 0x6f, 0x52, 0x6f, 0x75, + 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x74, 0x6f, + 0x6d, 0x69, 0x63, 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, + 0x61, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x43, 0x6f, 0x70, 0x79, 0x22, 0xd5, 0x01, 0x0a, 0x18, 0x4d, + 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, + 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, + 0x79, 0x12, 0x48, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, + 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x49, 0x6e, + 0x66, 0x6f, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x1a, 0x55, 0x0a, 0x0a, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x64, 0x22, 0xe9, 0x01, 0x0a, 0x19, 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x27, 0x0a, 0x0f, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6b, 0x65, 0x65, 0x70, 0x44, 0x61, + 0x74, 0x61, 0x12, 0x2c, 0x0a, 0x12, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, + 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, + 0x6b, 0x65, 0x65, 0x70, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, + 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x22, 0x5e, + 0x0a, 0x1a, 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x43, 0x6f, 0x6d, 0x70, + 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, + 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x26, 0x0a, 0x0f, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, + 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0d, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x4d, + 0x0a, 0x11, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, + 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x14, 0x0a, + 0x12, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x89, 0x02, 0x0a, 0x1b, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, + 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x72, 0x69, + 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, + 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x3a, 0x0a, + 0x0d, 0x61, 0x76, 0x6f, 0x69, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0c, 0x61, 0x76, 0x6f, + 0x69, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x44, 0x0a, 0x15, 0x77, 0x61, 0x69, + 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, + 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, 0x69, 0x74, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, + 0xba, 0x01, 0x0a, 0x1c, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x12, 0x40, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x5f, 0x70, + 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, + 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x50, 0x72, 0x69, + 0x6d, 0x61, 0x72, 0x79, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x74, 0x0a, 0x1b, + 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, + 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x23, 0x0a, + 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x61, 0x72, 0x74, 0x69, + 0x61, 0x6c, 0x22, 0x1e, 0x0a, 0x1c, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x32, 0x0a, 0x1a, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x1d, 0x0a, 0x1b, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, + 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4f, 0x0a, 0x13, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, + 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x64, + 0x0a, 0x1a, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, + 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, + 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x83, 0x01, 0x0a, 0x1b, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x69, 0x73, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, + 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x10, 0x69, 0x73, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x72, 0x65, + 0x73, 0x68, 0x12, 0x36, 0x0a, 0x17, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x72, 0x65, + 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x15, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x72, + 0x65, 0x73, 0x68, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x4f, 0x0a, 0x13, 0x52, 0x65, + 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x16, 0x0a, 0x14, 0x52, + 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0xa9, 0x01, 0x0a, 0x1b, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, + 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, + 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, + 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x22, + 0x46, 0x0a, 0x1c, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, + 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xbc, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x6c, 0x6f, + 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, - 0x6d, 0x6f, 0x76, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x7f, 0x0a, 0x19, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, - 0x65, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x12, - 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, - 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, - 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, - 0x69, 0x76, 0x65, 0x22, 0x1c, 0x0a, 0x1a, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x9b, 0x01, 0x0a, 0x16, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x68, - 0x61, 0x72, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x66, - 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, - 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, - 0x19, 0x0a, 0x17, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, - 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x46, 0x0a, 0x15, 0x52, 0x65, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x22, 0x7b, 0x0a, 0x16, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x2f, - 0x0a, 0x07, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x07, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, - 0xc2, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, + 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x70, + 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, + 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x69, + 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x72, 0x69, + 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, + 0x6e, 0x63, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, + 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x22, 0x43, 0x0a, 0x19, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, + 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x5b, 0x0a, 0x13, 0x52, + 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, + 0x68, 0x61, 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x6d, 0x6f, + 0x76, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x7f, 0x0a, 0x19, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, + 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x12, 0x14, 0x0a, + 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, + 0x72, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, + 0x65, 0x22, 0x1c, 0x0a, 0x1a, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x9b, 0x01, 0x0a, 0x16, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, + 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, + 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, + 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0x19, 0x0a, + 0x17, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x46, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x22, 0x7b, 0x0a, 0x16, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x2f, 0x0a, 0x07, + 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x52, 0x07, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x8f, 0x04, + 0x0a, 0x14, 0x52, 0x65, 0x73, 0x68, 0x61, 0x72, 0x64, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x23, + 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x74, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, + 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x37, + 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x06, + 0x20, 0x03, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x6c, 0x0a, 0x1b, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x19, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, + 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x73, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0e, 0x73, 0x6b, 0x69, 0x70, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x43, 0x6f, 0x70, 0x79, 0x12, + 0x15, 0x0a, 0x06, 0x6f, 0x6e, 0x5f, 0x64, 0x64, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x6f, 0x6e, 0x44, 0x64, 0x6c, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x61, + 0x66, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0d, 0x73, 0x74, 0x6f, 0x70, 0x41, 0x66, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x30, + 0x0a, 0x14, 0x64, 0x65, 0x66, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, + 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x64, 0x65, + 0x66, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x4b, 0x65, 0x79, 0x73, + 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x0c, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x53, 0x74, 0x61, 0x72, 0x74, 0x22, + 0x82, 0x02, 0x0a, 0x18, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, @@ -11876,7 +16965,11 @@ var file_vtctldata_proto_rawDesc = []byte{ 0x5f, 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x50, 0x6f, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, - 0x79, 0x52, 0x75, 0x6e, 0x22, 0xad, 0x01, 0x0a, 0x19, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x79, 0x52, 0x75, 0x6e, 0x12, 0x3e, 0x0a, 0x14, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, + 0x74, 0x6f, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x52, 0x12, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x22, 0xad, 0x01, 0x0a, 0x19, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, @@ -11887,288 +16980,368 @@ var file_vtctldata_proto_rawDesc = []byte{ 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x24, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x22, 0x51, 0x0a, 0x15, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, - 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x52, 0x75, 0x6e, 0x48, 0x65, - 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x6d, 0x0a, 0x22, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x44, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x76, 0x65, 0x6e, 0x74, 0x22, 0x4d, 0x0a, 0x1b, 0x52, 0x65, 0x74, 0x72, 0x79, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, + 0x75, 0x69, 0x64, 0x22, 0xdd, 0x01, 0x0a, 0x1c, 0x52, 0x65, 0x74, 0x72, 0x79, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x75, 0x0a, 0x16, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x61, 0x66, 0x66, + 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x6f, + 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x72, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, + 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x46, 0x0a, 0x18, 0x52, + 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x22, 0x51, 0x0a, 0x15, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x6d, 0x0a, 0x22, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, + 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x64, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, + 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, + 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, + 0x55, 0x0a, 0x23, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, + 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0xc8, 0x01, 0x0a, 0x1c, 0x53, 0x65, 0x74, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x64, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, - 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, - 0x64, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x22, 0x55, 0x0a, 0x23, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, - 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0xc8, 0x01, 0x0a, 0x1c, 0x53, 0x65, 0x74, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, - 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x61, 0x63, 0x65, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, + 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, + 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x22, 0x4f, 0x0a, 0x1d, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x22, 0x5e, 0x0a, 0x1e, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x03, + 0x10, 0x04, 0x22, 0x51, 0x0a, 0x1f, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x72, 0x0a, 0x1f, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, + 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, - 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, - 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, - 0x73, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x22, 0x4f, 0x0a, 0x1d, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x22, 0x5e, 0x0a, 0x1e, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, + 0x69, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x22, 0x49, 0x0a, 0x20, 0x53, 0x65, 0x74, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, + 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, + 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, + 0x68, 0x61, 0x72, 0x64, 0x22, 0x8e, 0x02, 0x0a, 0x1c, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, - 0x03, 0x10, 0x04, 0x22, 0x51, 0x0a, 0x1f, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x72, 0x0a, 0x1f, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, + 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, + 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x5f, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x6e, + 0x69, 0x65, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x64, 0x69, 0x73, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, + 0x65, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x72, + 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x22, 0x46, 0x0a, 0x1d, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x6a, 0x0a, + 0x12, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, + 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1a, 0x0a, + 0x08, 0x77, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x77, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x65, 0x74, + 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x88, 0x01, 0x0a, 0x1a, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, + 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, + 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x1d, 0x0a, 0x1b, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, + 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x62, 0x0a, 0x1a, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, + 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, + 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x54, + 0x0a, 0x1b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x46, 0x69, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, + 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x22, 0x54, 0x0a, 0x20, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0xaa, 0x03, 0x0a, 0x21, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x78, 0x0a, 0x14, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x5a, 0x0a, 0x0a, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x1a, 0x5f, 0x0a, 0x18, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4e, 0x0a, 0x0e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, + 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x8b, 0x01, 0x0a, 0x1d, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, + 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x69, - 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x09, 0x69, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x22, 0x49, 0x0a, 0x20, 0x53, 0x65, - 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, - 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x8e, 0x02, 0x0a, 0x1c, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x20, 0x0a, 0x1e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7c, 0x0a, 0x12, 0x53, 0x6c, 0x65, 0x65, 0x70, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, + 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x2c, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, + 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xf0, 0x01, 0x0a, + 0x15, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, - 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x5f, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, - 0x6e, 0x69, 0x65, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x64, 0x69, - 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, - 0x6c, 0x65, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x16, - 0x0a, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, - 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x22, 0x46, 0x0a, 0x1d, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x6a, - 0x0a, 0x12, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, - 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, - 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1a, - 0x0a, 0x08, 0x77, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x08, 0x77, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x65, - 0x74, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x88, 0x01, 0x0a, 0x1a, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, - 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, - 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x1d, 0x0a, 0x1b, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x62, 0x0a, 0x1a, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, - 0x69, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x63, - 0x65, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, - 0x54, 0x0a, 0x1b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, - 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x54, 0x0a, 0x20, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0xaa, 0x03, 0x0a, 0x21, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x78, 0x0a, 0x14, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x45, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x5a, 0x0a, 0x0a, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x3b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x1a, 0x5f, 0x0a, 0x18, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4e, 0x0a, 0x0e, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, - 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x8b, 0x01, 0x0a, 0x1d, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, - 0x6f, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x38, 0x0a, 0x0c, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x20, 0x0a, 0x1e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7c, 0x0a, 0x12, 0x53, 0x6c, 0x65, 0x65, - 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, - 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x2c, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, - 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xf0, 0x01, - 0x0a, 0x15, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x2f, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x5f, 0x72, - 0x61, 0x6e, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, - 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x08, - 0x6b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, - 0x22, 0x3f, 0x0a, 0x16, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, - 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, - 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x22, 0x5e, 0x0a, 0x18, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, - 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, - 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x75, 0x69, - 0x64, 0x22, 0x42, 0x0a, 0x19, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, - 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x53, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x1a, 0x0a, 0x18, 0x53, 0x74, - 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x52, 0x0a, 0x16, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x19, 0x0a, 0x17, 0x53, 0x74, - 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x52, 0x0a, 0x21, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, - 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, - 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x22, 0xc6, 0x01, 0x0a, 0x22, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, - 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, - 0x6e, 0x65, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x36, 0x0a, 0x0b, 0x6f, 0x6c, - 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6f, 0x6c, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, - 0x72, 0x79, 0x22, 0x5c, 0x0a, 0x15, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, - 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, - 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x63, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, - 0x22, 0x5d, 0x0a, 0x16, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, - 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x2f, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x61, + 0x6e, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x08, 0x6b, + 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22, + 0x3f, 0x0a, 0x16, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, + 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x22, 0x5e, 0x0a, 0x18, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x10, + 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x75, 0x69, 0x64, + 0x22, 0x42, 0x0a, 0x19, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, + 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, + 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, + 0x68, 0x61, 0x72, 0x64, 0x22, 0x53, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x1a, 0x0a, 0x18, 0x53, 0x74, 0x61, + 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x52, 0x0a, 0x16, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x19, 0x0a, 0x17, 0x53, 0x74, 0x6f, + 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x52, 0x0a, 0x21, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x22, 0xc6, 0x01, 0x0a, 0x22, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, + 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6e, + 0x65, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x36, 0x0a, 0x0b, 0x6f, 0x6c, 0x64, + 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6f, 0x6c, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, + 0x79, 0x22, 0x5c, 0x0a, 0x15, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x63, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, - 0x64, 0x0a, 0x17, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, - 0x0a, 0x0b, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, - 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x63, 0x65, 0x6c, 0x6c, 0x73, - 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x65, 0x0a, 0x18, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, - 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x0b, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x5f, 0x61, - 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, - 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, - 0x52, 0x0a, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x34, 0x0a, 0x0f, - 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x73, 0x22, 0xfb, 0x01, 0x0a, 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x73, 0x12, 0x62, 0x0a, 0x13, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, - 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x73, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x11, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x4b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x1a, 0x69, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, - 0x42, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x22, 0x58, 0x0a, 0x17, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, - 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0xfc, 0x01, 0x0a, 0x18, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x73, 0x12, 0x61, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x76, 0x74, + 0x5d, 0x0a, 0x16, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, + 0x09, 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, + 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x63, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x64, + 0x0a, 0x17, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, + 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, + 0x0b, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, + 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x22, 0x65, 0x0a, 0x18, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, + 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x0b, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x5f, 0x61, 0x6c, + 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, + 0x0a, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x34, 0x0a, 0x0f, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, + 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x73, 0x22, 0xfb, 0x01, 0x0a, 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, + 0x12, 0x62, 0x0a, 0x13, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x73, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x11, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x1a, 0x69, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, + 0x79, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0x58, 0x0a, 0x17, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, + 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0xfc, 0x01, 0x0a, 0x18, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, + 0x12, 0x61, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x73, + 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x1a, 0x63, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd8, 0x01, 0x0a, 0x1d, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, + 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, + 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x23, 0x0a, + 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x69, 0x65, + 0x77, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x6e, 0x6f, 0x5f, 0x70, 0x72, + 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, + 0x70, 0x4e, 0x6f, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, + 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x73, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x22, 0x88, 0x02, 0x0a, 0x1e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, + 0x12, 0x67, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x73, + 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x63, 0x0a, 0x13, 0x52, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x6b, + 0x0a, 0x14, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, + 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, + 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0x31, 0x0a, 0x15, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x3c, + 0x0a, 0x1e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x8a, 0x02, 0x0a, + 0x1f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x68, 0x0a, 0x10, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, @@ -12178,131 +17351,303 @@ var file_vtctldata_proto_rawDesc = []byte{ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd8, 0x01, 0x0a, 0x1d, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, 0x75, - 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x23, - 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x69, - 0x65, 0x77, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x6e, 0x6f, 0x5f, 0x70, - 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, - 0x69, 0x70, 0x4e, 0x6f, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x69, - 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x73, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x22, 0x88, 0x02, 0x0a, 0x1e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x73, 0x12, 0x67, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x63, 0x0a, 0x13, 0x52, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, - 0x6b, 0x0a, 0x14, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, - 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0x31, 0x0a, 0x15, - 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, - 0x3c, 0x0a, 0x1e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x8a, 0x02, - 0x0a, 0x1f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x68, 0x0a, 0x10, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x63, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, - 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4f, 0x0a, 0x1b, 0x56, 0x61, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4f, 0x0a, 0x1b, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x38, 0x0a, 0x1c, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x38, 0x0a, 0x1c, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, + 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x73, 0x22, 0x98, 0x01, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, + 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, + 0x6c, 0x75, 0x64, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, + 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0c, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x69, 0x65, 0x77, 0x73, 0x22, + 0xfa, 0x01, 0x0a, 0x17, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x98, 0x01, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x73, 0x68, - 0x61, 0x72, 0x64, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, - 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, - 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x73, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x0c, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x69, 0x65, 0x77, 0x73, - 0x22, 0xfa, 0x01, 0x0a, 0x17, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, - 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x60, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x36, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x63, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x60, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, + 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x36, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, + 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x63, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x95, 0x06, 0x0a, + 0x12, 0x56, 0x44, 0x69, 0x66, 0x66, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, + 0x27, 0x0a, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x12, + 0x21, 0x0a, 0x0c, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x43, 0x65, 0x6c, + 0x6c, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x6c, 0x0a, 0x1b, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x19, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x55, 0x0a, 0x1e, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x77, 0x61, 0x69, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x1b, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x69, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1f, + 0x0a, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, 0x62, 0x75, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, + 0x1a, 0x0a, 0x09, 0x6f, 0x6e, 0x6c, 0x79, 0x5f, 0x70, 0x5f, 0x6b, 0x73, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x07, 0x6f, 0x6e, 0x6c, 0x79, 0x50, 0x4b, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x38, 0x0a, 0x19, 0x6d, 0x61, 0x78, + 0x5f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x63, + 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x6d, 0x61, + 0x78, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x6f, 0x77, 0x73, 0x54, 0x6f, 0x43, 0x6f, 0x6d, 0x70, + 0x61, 0x72, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x77, 0x61, 0x69, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x04, 0x77, 0x61, 0x69, 0x74, 0x12, 0x42, 0x0a, 0x14, 0x77, 0x61, 0x69, 0x74, 0x5f, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, + 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x77, 0x61, 0x69, 0x74, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x61, + 0x75, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x09, 0x61, 0x75, 0x74, 0x6f, 0x52, 0x65, 0x74, 0x72, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, + 0x72, 0x62, 0x6f, 0x73, 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x76, 0x65, 0x72, + 0x62, 0x6f, 0x73, 0x65, 0x22, 0x29, 0x0a, 0x13, 0x56, 0x44, 0x69, 0x66, 0x66, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x55, + 0x55, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x55, 0x55, 0x49, 0x44, 0x22, + 0x6b, 0x0a, 0x12, 0x56, 0x44, 0x69, 0x66, 0x66, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x72, + 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x61, 0x72, 0x67, 0x22, 0x15, 0x0a, 0x13, + 0x56, 0x44, 0x69, 0x66, 0x66, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x6d, 0x0a, 0x12, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x75, + 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, + 0x69, 0x64, 0x22, 0x15, 0x0a, 0x13, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x75, 0x6d, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x69, 0x0a, 0x10, 0x56, 0x44, 0x69, + 0x66, 0x66, 0x53, 0x68, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, + 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x72, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x61, 0x72, 0x67, 0x22, 0xd7, 0x01, 0x0a, 0x11, 0x56, 0x44, 0x69, 0x66, 0x66, 0x53, 0x68, + 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5c, 0x0a, 0x10, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x53, 0x68, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x1a, 0x64, 0x0a, 0x14, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x6b, + 0x0a, 0x10, 0x56, 0x44, 0x69, 0x66, 0x66, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x27, + 0x0a, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x22, 0x13, 0x0a, 0x11, 0x56, + 0x44, 0x69, 0x66, 0x66, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x9a, 0x01, 0x0a, 0x15, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x12, 0x1b, 0x0a, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6b, 0x65, 0x65, 0x70, 0x44, 0x61, 0x74, 0x61, 0x12, + 0x2c, 0x0a, 0x12, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, + 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6b, 0x65, 0x65, + 0x70, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x22, 0xd1, 0x01, + 0x0a, 0x16, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, + 0x61, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, + 0x72, 0x79, 0x12, 0x46, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x1a, 0x55, 0x0a, 0x0a, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, + 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x64, 0x22, 0x4f, 0x0a, 0x15, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x22, 0xe6, 0x07, 0x0a, 0x16, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5f, 0x0a, + 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x43, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x58, + 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x66, + 0x66, 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0xe8, 0x01, + 0x0a, 0x0e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x63, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x6f, 0x77, 0x73, 0x43, 0x6f, 0x70, 0x69, 0x65, + 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x6f, 0x77, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, + 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, + 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0e, 0x72, 0x6f, 0x77, 0x73, 0x50, + 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x12, 0x1f, 0x0a, 0x0b, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x29, 0x0a, + 0x10, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x50, 0x65, + 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x1a, 0xbc, 0x01, 0x0a, 0x10, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0e, 0x0a, + 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2d, 0x0a, + 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, + 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x1a, 0x5c, 0x0a, 0x0c, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x4c, 0x0a, 0x07, 0x73, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x07, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x73, 0x1a, 0x73, 0x0a, 0x13, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, + 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x46, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x6f, 0x0a, 0x11, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x86, 0x01, - 0x0a, 0x15, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x12, 0x51, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x56, 0x52, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xba, 0x01, 0x0a, 0x16, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x46, 0x0a, 0x07, 0x64, - 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, - 0x69, 0x6c, 0x73, 0x1a, 0x3e, 0x0a, 0x0a, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x49, 0x6e, 0x66, - 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x68, 0x61, - 0x6e, 0x67, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x63, 0x68, 0x61, 0x6e, - 0x67, 0x65, 0x64, 0x2a, 0x4a, 0x0a, 0x15, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, - 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x0a, 0x0a, 0x06, - 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x4d, 0x4f, 0x56, 0x45, - 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x52, 0x45, 0x41, - 0x54, 0x45, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x49, 0x4e, 0x44, 0x45, 0x58, 0x10, 0x02, 0x42, - 0x28, 0x5a, 0x26, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, - 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x79, 0x12, 0x44, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd7, 0x03, 0x0a, 0x1c, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x77, 0x69, 0x74, 0x63, 0x68, 0x54, 0x72, + 0x61, 0x66, 0x66, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0e, + 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x73, 0x12, 0x4f, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x67, 0x5f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, + 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, + 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x61, 0x67, 0x41, 0x6c, 0x6c, + 0x6f, 0x77, 0x65, 0x64, 0x12, 0x3c, 0x0a, 0x1a, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x72, + 0x65, 0x76, 0x65, 0x72, 0x73, 0x65, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x52, 0x65, 0x76, 0x65, 0x72, 0x73, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x2a, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x17, 0x0a, 0x07, + 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, + 0x72, 0x79, 0x52, 0x75, 0x6e, 0x12, 0x3e, 0x0a, 0x1b, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, + 0x69, 0x7a, 0x65, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x71, 0x75, 0x65, + 0x6e, 0x63, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x69, 0x6e, 0x69, 0x74, + 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x53, 0x65, 0x71, 0x75, + 0x65, 0x6e, 0x63, 0x65, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x1d, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x53, 0x77, 0x69, 0x74, 0x63, 0x68, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, + 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, + 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65, + 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x64, 0x72, 0x79, 0x5f, 0x72, + 0x75, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0d, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, + 0x90, 0x01, 0x0a, 0x15, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x5b, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x52, 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x22, 0xd1, 0x01, 0x0a, 0x16, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x46, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, + 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x1a, + 0x55, 0x0a, 0x0a, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2d, 0x0a, + 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x18, 0x0a, 0x07, + 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x63, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x2a, 0x4a, 0x0a, 0x15, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, + 0x0a, 0x0a, 0x06, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x4d, + 0x4f, 0x56, 0x45, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x43, + 0x52, 0x45, 0x41, 0x54, 0x45, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x49, 0x4e, 0x44, 0x45, 0x58, + 0x10, 0x02, 0x2a, 0x38, 0x0a, 0x0d, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4f, 0x72, 0x64, 0x65, 0x72, + 0x69, 0x6e, 0x67, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x0d, 0x0a, + 0x09, 0x41, 0x53, 0x43, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, + 0x44, 0x45, 0x53, 0x43, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x42, 0x28, 0x5a, 0x26, + 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, + 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -12317,396 +17662,528 @@ func file_vtctldata_proto_rawDescGZIP() []byte { return file_vtctldata_proto_rawDescData } -var file_vtctldata_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_vtctldata_proto_msgTypes = make([]protoimpl.MessageInfo, 201) +var file_vtctldata_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_vtctldata_proto_msgTypes = make([]protoimpl.MessageInfo, 267) var file_vtctldata_proto_goTypes = []interface{}{ (MaterializationIntent)(0), // 0: vtctldata.MaterializationIntent - (*ExecuteVtctlCommandRequest)(nil), // 1: vtctldata.ExecuteVtctlCommandRequest - (*ExecuteVtctlCommandResponse)(nil), // 2: vtctldata.ExecuteVtctlCommandResponse - (*TableMaterializeSettings)(nil), // 3: vtctldata.TableMaterializeSettings - (*MaterializeSettings)(nil), // 4: vtctldata.MaterializeSettings - (*Keyspace)(nil), // 5: vtctldata.Keyspace - (*Shard)(nil), // 6: vtctldata.Shard - (*Workflow)(nil), // 7: vtctldata.Workflow - (*AddCellInfoRequest)(nil), // 8: vtctldata.AddCellInfoRequest - (*AddCellInfoResponse)(nil), // 9: vtctldata.AddCellInfoResponse - (*AddCellsAliasRequest)(nil), // 10: vtctldata.AddCellsAliasRequest - (*AddCellsAliasResponse)(nil), // 11: vtctldata.AddCellsAliasResponse - (*ApplyRoutingRulesRequest)(nil), // 12: vtctldata.ApplyRoutingRulesRequest - (*ApplyRoutingRulesResponse)(nil), // 13: vtctldata.ApplyRoutingRulesResponse - (*ApplyShardRoutingRulesRequest)(nil), // 14: vtctldata.ApplyShardRoutingRulesRequest - (*ApplyShardRoutingRulesResponse)(nil), // 15: vtctldata.ApplyShardRoutingRulesResponse - (*ApplySchemaRequest)(nil), // 16: vtctldata.ApplySchemaRequest - (*ApplySchemaResponse)(nil), // 17: vtctldata.ApplySchemaResponse - (*ApplyVSchemaRequest)(nil), // 18: vtctldata.ApplyVSchemaRequest - (*ApplyVSchemaResponse)(nil), // 19: vtctldata.ApplyVSchemaResponse - (*BackupRequest)(nil), // 20: vtctldata.BackupRequest - (*BackupResponse)(nil), // 21: vtctldata.BackupResponse - (*BackupShardRequest)(nil), // 22: vtctldata.BackupShardRequest - (*ChangeTabletTypeRequest)(nil), // 23: vtctldata.ChangeTabletTypeRequest - (*ChangeTabletTypeResponse)(nil), // 24: vtctldata.ChangeTabletTypeResponse - (*CreateKeyspaceRequest)(nil), // 25: vtctldata.CreateKeyspaceRequest - (*CreateKeyspaceResponse)(nil), // 26: vtctldata.CreateKeyspaceResponse - (*CreateShardRequest)(nil), // 27: vtctldata.CreateShardRequest - (*CreateShardResponse)(nil), // 28: vtctldata.CreateShardResponse - (*DeleteCellInfoRequest)(nil), // 29: vtctldata.DeleteCellInfoRequest - (*DeleteCellInfoResponse)(nil), // 30: vtctldata.DeleteCellInfoResponse - (*DeleteCellsAliasRequest)(nil), // 31: vtctldata.DeleteCellsAliasRequest - (*DeleteCellsAliasResponse)(nil), // 32: vtctldata.DeleteCellsAliasResponse - (*DeleteKeyspaceRequest)(nil), // 33: vtctldata.DeleteKeyspaceRequest - (*DeleteKeyspaceResponse)(nil), // 34: vtctldata.DeleteKeyspaceResponse - (*DeleteShardsRequest)(nil), // 35: vtctldata.DeleteShardsRequest - (*DeleteShardsResponse)(nil), // 36: vtctldata.DeleteShardsResponse - (*DeleteSrvVSchemaRequest)(nil), // 37: vtctldata.DeleteSrvVSchemaRequest - (*DeleteSrvVSchemaResponse)(nil), // 38: vtctldata.DeleteSrvVSchemaResponse - (*DeleteTabletsRequest)(nil), // 39: vtctldata.DeleteTabletsRequest - (*DeleteTabletsResponse)(nil), // 40: vtctldata.DeleteTabletsResponse - (*EmergencyReparentShardRequest)(nil), // 41: vtctldata.EmergencyReparentShardRequest - (*EmergencyReparentShardResponse)(nil), // 42: vtctldata.EmergencyReparentShardResponse - (*ExecuteFetchAsAppRequest)(nil), // 43: vtctldata.ExecuteFetchAsAppRequest - (*ExecuteFetchAsAppResponse)(nil), // 44: vtctldata.ExecuteFetchAsAppResponse - (*ExecuteFetchAsDBARequest)(nil), // 45: vtctldata.ExecuteFetchAsDBARequest - (*ExecuteFetchAsDBAResponse)(nil), // 46: vtctldata.ExecuteFetchAsDBAResponse - (*ExecuteHookRequest)(nil), // 47: vtctldata.ExecuteHookRequest - (*ExecuteHookResponse)(nil), // 48: vtctldata.ExecuteHookResponse - (*FindAllShardsInKeyspaceRequest)(nil), // 49: vtctldata.FindAllShardsInKeyspaceRequest - (*FindAllShardsInKeyspaceResponse)(nil), // 50: vtctldata.FindAllShardsInKeyspaceResponse - (*GetBackupsRequest)(nil), // 51: vtctldata.GetBackupsRequest - (*GetBackupsResponse)(nil), // 52: vtctldata.GetBackupsResponse - (*GetCellInfoRequest)(nil), // 53: vtctldata.GetCellInfoRequest - (*GetCellInfoResponse)(nil), // 54: vtctldata.GetCellInfoResponse - (*GetCellInfoNamesRequest)(nil), // 55: vtctldata.GetCellInfoNamesRequest - (*GetCellInfoNamesResponse)(nil), // 56: vtctldata.GetCellInfoNamesResponse - (*GetCellsAliasesRequest)(nil), // 57: vtctldata.GetCellsAliasesRequest - (*GetCellsAliasesResponse)(nil), // 58: vtctldata.GetCellsAliasesResponse - (*GetFullStatusRequest)(nil), // 59: vtctldata.GetFullStatusRequest - (*GetFullStatusResponse)(nil), // 60: vtctldata.GetFullStatusResponse - (*GetKeyspacesRequest)(nil), // 61: vtctldata.GetKeyspacesRequest - (*GetKeyspacesResponse)(nil), // 62: vtctldata.GetKeyspacesResponse - (*GetKeyspaceRequest)(nil), // 63: vtctldata.GetKeyspaceRequest - (*GetKeyspaceResponse)(nil), // 64: vtctldata.GetKeyspaceResponse - (*GetPermissionsRequest)(nil), // 65: vtctldata.GetPermissionsRequest - (*GetPermissionsResponse)(nil), // 66: vtctldata.GetPermissionsResponse - (*GetRoutingRulesRequest)(nil), // 67: vtctldata.GetRoutingRulesRequest - (*GetRoutingRulesResponse)(nil), // 68: vtctldata.GetRoutingRulesResponse - (*GetSchemaRequest)(nil), // 69: vtctldata.GetSchemaRequest - (*GetSchemaResponse)(nil), // 70: vtctldata.GetSchemaResponse - (*GetShardRequest)(nil), // 71: vtctldata.GetShardRequest - (*GetShardResponse)(nil), // 72: vtctldata.GetShardResponse - (*GetShardRoutingRulesRequest)(nil), // 73: vtctldata.GetShardRoutingRulesRequest - (*GetShardRoutingRulesResponse)(nil), // 74: vtctldata.GetShardRoutingRulesResponse - (*GetSrvKeyspaceNamesRequest)(nil), // 75: vtctldata.GetSrvKeyspaceNamesRequest - (*GetSrvKeyspaceNamesResponse)(nil), // 76: vtctldata.GetSrvKeyspaceNamesResponse - (*GetSrvKeyspacesRequest)(nil), // 77: vtctldata.GetSrvKeyspacesRequest - (*GetSrvKeyspacesResponse)(nil), // 78: vtctldata.GetSrvKeyspacesResponse - (*UpdateThrottlerConfigRequest)(nil), // 79: vtctldata.UpdateThrottlerConfigRequest - (*UpdateThrottlerConfigResponse)(nil), // 80: vtctldata.UpdateThrottlerConfigResponse - (*GetSrvVSchemaRequest)(nil), // 81: vtctldata.GetSrvVSchemaRequest - (*GetSrvVSchemaResponse)(nil), // 82: vtctldata.GetSrvVSchemaResponse - (*GetSrvVSchemasRequest)(nil), // 83: vtctldata.GetSrvVSchemasRequest - (*GetSrvVSchemasResponse)(nil), // 84: vtctldata.GetSrvVSchemasResponse - (*GetTabletRequest)(nil), // 85: vtctldata.GetTabletRequest - (*GetTabletResponse)(nil), // 86: vtctldata.GetTabletResponse - (*GetTabletsRequest)(nil), // 87: vtctldata.GetTabletsRequest - (*GetTabletsResponse)(nil), // 88: vtctldata.GetTabletsResponse - (*GetTopologyPathRequest)(nil), // 89: vtctldata.GetTopologyPathRequest - (*GetTopologyPathResponse)(nil), // 90: vtctldata.GetTopologyPathResponse - (*TopologyCell)(nil), // 91: vtctldata.TopologyCell - (*GetVSchemaRequest)(nil), // 92: vtctldata.GetVSchemaRequest - (*GetVersionRequest)(nil), // 93: vtctldata.GetVersionRequest - (*GetVersionResponse)(nil), // 94: vtctldata.GetVersionResponse - (*GetVSchemaResponse)(nil), // 95: vtctldata.GetVSchemaResponse - (*GetWorkflowsRequest)(nil), // 96: vtctldata.GetWorkflowsRequest - (*GetWorkflowsResponse)(nil), // 97: vtctldata.GetWorkflowsResponse - (*InitShardPrimaryRequest)(nil), // 98: vtctldata.InitShardPrimaryRequest - (*InitShardPrimaryResponse)(nil), // 99: vtctldata.InitShardPrimaryResponse - (*PingTabletRequest)(nil), // 100: vtctldata.PingTabletRequest - (*PingTabletResponse)(nil), // 101: vtctldata.PingTabletResponse - (*PlannedReparentShardRequest)(nil), // 102: vtctldata.PlannedReparentShardRequest - (*PlannedReparentShardResponse)(nil), // 103: vtctldata.PlannedReparentShardResponse - (*RebuildKeyspaceGraphRequest)(nil), // 104: vtctldata.RebuildKeyspaceGraphRequest - (*RebuildKeyspaceGraphResponse)(nil), // 105: vtctldata.RebuildKeyspaceGraphResponse - (*RebuildVSchemaGraphRequest)(nil), // 106: vtctldata.RebuildVSchemaGraphRequest - (*RebuildVSchemaGraphResponse)(nil), // 107: vtctldata.RebuildVSchemaGraphResponse - (*RefreshStateRequest)(nil), // 108: vtctldata.RefreshStateRequest - (*RefreshStateResponse)(nil), // 109: vtctldata.RefreshStateResponse - (*RefreshStateByShardRequest)(nil), // 110: vtctldata.RefreshStateByShardRequest - (*RefreshStateByShardResponse)(nil), // 111: vtctldata.RefreshStateByShardResponse - (*ReloadSchemaRequest)(nil), // 112: vtctldata.ReloadSchemaRequest - (*ReloadSchemaResponse)(nil), // 113: vtctldata.ReloadSchemaResponse - (*ReloadSchemaKeyspaceRequest)(nil), // 114: vtctldata.ReloadSchemaKeyspaceRequest - (*ReloadSchemaKeyspaceResponse)(nil), // 115: vtctldata.ReloadSchemaKeyspaceResponse - (*ReloadSchemaShardRequest)(nil), // 116: vtctldata.ReloadSchemaShardRequest - (*ReloadSchemaShardResponse)(nil), // 117: vtctldata.ReloadSchemaShardResponse - (*RemoveBackupRequest)(nil), // 118: vtctldata.RemoveBackupRequest - (*RemoveBackupResponse)(nil), // 119: vtctldata.RemoveBackupResponse - (*RemoveKeyspaceCellRequest)(nil), // 120: vtctldata.RemoveKeyspaceCellRequest - (*RemoveKeyspaceCellResponse)(nil), // 121: vtctldata.RemoveKeyspaceCellResponse - (*RemoveShardCellRequest)(nil), // 122: vtctldata.RemoveShardCellRequest - (*RemoveShardCellResponse)(nil), // 123: vtctldata.RemoveShardCellResponse - (*ReparentTabletRequest)(nil), // 124: vtctldata.ReparentTabletRequest - (*ReparentTabletResponse)(nil), // 125: vtctldata.ReparentTabletResponse - (*RestoreFromBackupRequest)(nil), // 126: vtctldata.RestoreFromBackupRequest - (*RestoreFromBackupResponse)(nil), // 127: vtctldata.RestoreFromBackupResponse - (*RunHealthCheckRequest)(nil), // 128: vtctldata.RunHealthCheckRequest - (*RunHealthCheckResponse)(nil), // 129: vtctldata.RunHealthCheckResponse - (*SetKeyspaceDurabilityPolicyRequest)(nil), // 130: vtctldata.SetKeyspaceDurabilityPolicyRequest - (*SetKeyspaceDurabilityPolicyResponse)(nil), // 131: vtctldata.SetKeyspaceDurabilityPolicyResponse - (*SetKeyspaceServedFromRequest)(nil), // 132: vtctldata.SetKeyspaceServedFromRequest - (*SetKeyspaceServedFromResponse)(nil), // 133: vtctldata.SetKeyspaceServedFromResponse - (*SetKeyspaceShardingInfoRequest)(nil), // 134: vtctldata.SetKeyspaceShardingInfoRequest - (*SetKeyspaceShardingInfoResponse)(nil), // 135: vtctldata.SetKeyspaceShardingInfoResponse - (*SetShardIsPrimaryServingRequest)(nil), // 136: vtctldata.SetShardIsPrimaryServingRequest - (*SetShardIsPrimaryServingResponse)(nil), // 137: vtctldata.SetShardIsPrimaryServingResponse - (*SetShardTabletControlRequest)(nil), // 138: vtctldata.SetShardTabletControlRequest - (*SetShardTabletControlResponse)(nil), // 139: vtctldata.SetShardTabletControlResponse - (*SetWritableRequest)(nil), // 140: vtctldata.SetWritableRequest - (*SetWritableResponse)(nil), // 141: vtctldata.SetWritableResponse - (*ShardReplicationAddRequest)(nil), // 142: vtctldata.ShardReplicationAddRequest - (*ShardReplicationAddResponse)(nil), // 143: vtctldata.ShardReplicationAddResponse - (*ShardReplicationFixRequest)(nil), // 144: vtctldata.ShardReplicationFixRequest - (*ShardReplicationFixResponse)(nil), // 145: vtctldata.ShardReplicationFixResponse - (*ShardReplicationPositionsRequest)(nil), // 146: vtctldata.ShardReplicationPositionsRequest - (*ShardReplicationPositionsResponse)(nil), // 147: vtctldata.ShardReplicationPositionsResponse - (*ShardReplicationRemoveRequest)(nil), // 148: vtctldata.ShardReplicationRemoveRequest - (*ShardReplicationRemoveResponse)(nil), // 149: vtctldata.ShardReplicationRemoveResponse - (*SleepTabletRequest)(nil), // 150: vtctldata.SleepTabletRequest - (*SleepTabletResponse)(nil), // 151: vtctldata.SleepTabletResponse - (*SourceShardAddRequest)(nil), // 152: vtctldata.SourceShardAddRequest - (*SourceShardAddResponse)(nil), // 153: vtctldata.SourceShardAddResponse - (*SourceShardDeleteRequest)(nil), // 154: vtctldata.SourceShardDeleteRequest - (*SourceShardDeleteResponse)(nil), // 155: vtctldata.SourceShardDeleteResponse - (*StartReplicationRequest)(nil), // 156: vtctldata.StartReplicationRequest - (*StartReplicationResponse)(nil), // 157: vtctldata.StartReplicationResponse - (*StopReplicationRequest)(nil), // 158: vtctldata.StopReplicationRequest - (*StopReplicationResponse)(nil), // 159: vtctldata.StopReplicationResponse - (*TabletExternallyReparentedRequest)(nil), // 160: vtctldata.TabletExternallyReparentedRequest - (*TabletExternallyReparentedResponse)(nil), // 161: vtctldata.TabletExternallyReparentedResponse - (*UpdateCellInfoRequest)(nil), // 162: vtctldata.UpdateCellInfoRequest - (*UpdateCellInfoResponse)(nil), // 163: vtctldata.UpdateCellInfoResponse - (*UpdateCellsAliasRequest)(nil), // 164: vtctldata.UpdateCellsAliasRequest - (*UpdateCellsAliasResponse)(nil), // 165: vtctldata.UpdateCellsAliasResponse - (*ValidateRequest)(nil), // 166: vtctldata.ValidateRequest - (*ValidateResponse)(nil), // 167: vtctldata.ValidateResponse - (*ValidateKeyspaceRequest)(nil), // 168: vtctldata.ValidateKeyspaceRequest - (*ValidateKeyspaceResponse)(nil), // 169: vtctldata.ValidateKeyspaceResponse - (*ValidateSchemaKeyspaceRequest)(nil), // 170: vtctldata.ValidateSchemaKeyspaceRequest - (*ValidateSchemaKeyspaceResponse)(nil), // 171: vtctldata.ValidateSchemaKeyspaceResponse - (*ValidateShardRequest)(nil), // 172: vtctldata.ValidateShardRequest - (*ValidateShardResponse)(nil), // 173: vtctldata.ValidateShardResponse - (*ValidateVersionKeyspaceRequest)(nil), // 174: vtctldata.ValidateVersionKeyspaceRequest - (*ValidateVersionKeyspaceResponse)(nil), // 175: vtctldata.ValidateVersionKeyspaceResponse - (*ValidateVersionShardRequest)(nil), // 176: vtctldata.ValidateVersionShardRequest - (*ValidateVersionShardResponse)(nil), // 177: vtctldata.ValidateVersionShardResponse - (*ValidateVSchemaRequest)(nil), // 178: vtctldata.ValidateVSchemaRequest - (*ValidateVSchemaResponse)(nil), // 179: vtctldata.ValidateVSchemaResponse - (*WorkflowUpdateRequest)(nil), // 180: vtctldata.WorkflowUpdateRequest - (*WorkflowUpdateResponse)(nil), // 181: vtctldata.WorkflowUpdateResponse - nil, // 182: vtctldata.Workflow.ShardStreamsEntry - (*Workflow_ReplicationLocation)(nil), // 183: vtctldata.Workflow.ReplicationLocation - (*Workflow_ShardStream)(nil), // 184: vtctldata.Workflow.ShardStream - (*Workflow_Stream)(nil), // 185: vtctldata.Workflow.Stream - (*Workflow_Stream_CopyState)(nil), // 186: vtctldata.Workflow.Stream.CopyState - (*Workflow_Stream_Log)(nil), // 187: vtctldata.Workflow.Stream.Log - nil, // 188: vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry - nil, // 189: vtctldata.GetCellsAliasesResponse.AliasesEntry - nil, // 190: vtctldata.GetSrvKeyspaceNamesResponse.NamesEntry - (*GetSrvKeyspaceNamesResponse_NameList)(nil), // 191: vtctldata.GetSrvKeyspaceNamesResponse.NameList - nil, // 192: vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry - nil, // 193: vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry - nil, // 194: vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry - nil, // 195: vtctldata.ShardReplicationPositionsResponse.TabletMapEntry - nil, // 196: vtctldata.ValidateResponse.ResultsByKeyspaceEntry - nil, // 197: vtctldata.ValidateKeyspaceResponse.ResultsByShardEntry - nil, // 198: vtctldata.ValidateSchemaKeyspaceResponse.ResultsByShardEntry - nil, // 199: vtctldata.ValidateVersionKeyspaceResponse.ResultsByShardEntry - nil, // 200: vtctldata.ValidateVSchemaResponse.ResultsByShardEntry - (*WorkflowUpdateResponse_TabletInfo)(nil), // 201: vtctldata.WorkflowUpdateResponse.TabletInfo - (*logutil.Event)(nil), // 202: logutil.Event - (*topodata.Keyspace)(nil), // 203: topodata.Keyspace - (*topodata.Shard)(nil), // 204: topodata.Shard - (*topodata.CellInfo)(nil), // 205: topodata.CellInfo - (*vschema.RoutingRules)(nil), // 206: vschema.RoutingRules - (*vschema.ShardRoutingRules)(nil), // 207: vschema.ShardRoutingRules - (*vttime.Duration)(nil), // 208: vttime.Duration - (*vtrpc.CallerID)(nil), // 209: vtrpc.CallerID - (*vschema.Keyspace)(nil), // 210: vschema.Keyspace - (*topodata.TabletAlias)(nil), // 211: topodata.TabletAlias - (topodata.TabletType)(0), // 212: topodata.TabletType - (*topodata.Tablet)(nil), // 213: topodata.Tablet - (*topodata.Keyspace_ServedFrom)(nil), // 214: topodata.Keyspace.ServedFrom - (topodata.KeyspaceType)(0), // 215: topodata.KeyspaceType - (*vttime.Time)(nil), // 216: vttime.Time - (*query.QueryResult)(nil), // 217: query.QueryResult - (*tabletmanagerdata.ExecuteHookRequest)(nil), // 218: tabletmanagerdata.ExecuteHookRequest - (*tabletmanagerdata.ExecuteHookResponse)(nil), // 219: tabletmanagerdata.ExecuteHookResponse - (*mysqlctl.BackupInfo)(nil), // 220: mysqlctl.BackupInfo - (*replicationdata.FullStatus)(nil), // 221: replicationdata.FullStatus - (*tabletmanagerdata.Permissions)(nil), // 222: tabletmanagerdata.Permissions - (*tabletmanagerdata.SchemaDefinition)(nil), // 223: tabletmanagerdata.SchemaDefinition - (*vschema.SrvVSchema)(nil), // 224: vschema.SrvVSchema - (*topodata.ShardReplicationError)(nil), // 225: topodata.ShardReplicationError - (*topodata.KeyRange)(nil), // 226: topodata.KeyRange - (*topodata.CellsAlias)(nil), // 227: topodata.CellsAlias - (*tabletmanagerdata.UpdateVRWorkflowRequest)(nil), // 228: tabletmanagerdata.UpdateVRWorkflowRequest - (*topodata.Shard_TabletControl)(nil), // 229: topodata.Shard.TabletControl - (*binlogdata.BinlogSource)(nil), // 230: binlogdata.BinlogSource - (*topodata.SrvKeyspace)(nil), // 231: topodata.SrvKeyspace - (*replicationdata.Status)(nil), // 232: replicationdata.Status + (QueryOrdering)(0), // 1: vtctldata.QueryOrdering + (SchemaMigration_Strategy)(0), // 2: vtctldata.SchemaMigration.Strategy + (SchemaMigration_Status)(0), // 3: vtctldata.SchemaMigration.Status + (*ExecuteVtctlCommandRequest)(nil), // 4: vtctldata.ExecuteVtctlCommandRequest + (*ExecuteVtctlCommandResponse)(nil), // 5: vtctldata.ExecuteVtctlCommandResponse + (*TableMaterializeSettings)(nil), // 6: vtctldata.TableMaterializeSettings + (*MaterializeSettings)(nil), // 7: vtctldata.MaterializeSettings + (*Keyspace)(nil), // 8: vtctldata.Keyspace + (*SchemaMigration)(nil), // 9: vtctldata.SchemaMigration + (*Shard)(nil), // 10: vtctldata.Shard + (*Workflow)(nil), // 11: vtctldata.Workflow + (*AddCellInfoRequest)(nil), // 12: vtctldata.AddCellInfoRequest + (*AddCellInfoResponse)(nil), // 13: vtctldata.AddCellInfoResponse + (*AddCellsAliasRequest)(nil), // 14: vtctldata.AddCellsAliasRequest + (*AddCellsAliasResponse)(nil), // 15: vtctldata.AddCellsAliasResponse + (*ApplyRoutingRulesRequest)(nil), // 16: vtctldata.ApplyRoutingRulesRequest + (*ApplyRoutingRulesResponse)(nil), // 17: vtctldata.ApplyRoutingRulesResponse + (*ApplyShardRoutingRulesRequest)(nil), // 18: vtctldata.ApplyShardRoutingRulesRequest + (*ApplyShardRoutingRulesResponse)(nil), // 19: vtctldata.ApplyShardRoutingRulesResponse + (*ApplySchemaRequest)(nil), // 20: vtctldata.ApplySchemaRequest + (*ApplySchemaResponse)(nil), // 21: vtctldata.ApplySchemaResponse + (*ApplyVSchemaRequest)(nil), // 22: vtctldata.ApplyVSchemaRequest + (*ApplyVSchemaResponse)(nil), // 23: vtctldata.ApplyVSchemaResponse + (*BackupRequest)(nil), // 24: vtctldata.BackupRequest + (*BackupResponse)(nil), // 25: vtctldata.BackupResponse + (*BackupShardRequest)(nil), // 26: vtctldata.BackupShardRequest + (*CancelSchemaMigrationRequest)(nil), // 27: vtctldata.CancelSchemaMigrationRequest + (*CancelSchemaMigrationResponse)(nil), // 28: vtctldata.CancelSchemaMigrationResponse + (*ChangeTabletTypeRequest)(nil), // 29: vtctldata.ChangeTabletTypeRequest + (*ChangeTabletTypeResponse)(nil), // 30: vtctldata.ChangeTabletTypeResponse + (*CleanupSchemaMigrationRequest)(nil), // 31: vtctldata.CleanupSchemaMigrationRequest + (*CleanupSchemaMigrationResponse)(nil), // 32: vtctldata.CleanupSchemaMigrationResponse + (*CompleteSchemaMigrationRequest)(nil), // 33: vtctldata.CompleteSchemaMigrationRequest + (*CompleteSchemaMigrationResponse)(nil), // 34: vtctldata.CompleteSchemaMigrationResponse + (*CreateKeyspaceRequest)(nil), // 35: vtctldata.CreateKeyspaceRequest + (*CreateKeyspaceResponse)(nil), // 36: vtctldata.CreateKeyspaceResponse + (*CreateShardRequest)(nil), // 37: vtctldata.CreateShardRequest + (*CreateShardResponse)(nil), // 38: vtctldata.CreateShardResponse + (*DeleteCellInfoRequest)(nil), // 39: vtctldata.DeleteCellInfoRequest + (*DeleteCellInfoResponse)(nil), // 40: vtctldata.DeleteCellInfoResponse + (*DeleteCellsAliasRequest)(nil), // 41: vtctldata.DeleteCellsAliasRequest + (*DeleteCellsAliasResponse)(nil), // 42: vtctldata.DeleteCellsAliasResponse + (*DeleteKeyspaceRequest)(nil), // 43: vtctldata.DeleteKeyspaceRequest + (*DeleteKeyspaceResponse)(nil), // 44: vtctldata.DeleteKeyspaceResponse + (*DeleteShardsRequest)(nil), // 45: vtctldata.DeleteShardsRequest + (*DeleteShardsResponse)(nil), // 46: vtctldata.DeleteShardsResponse + (*DeleteSrvVSchemaRequest)(nil), // 47: vtctldata.DeleteSrvVSchemaRequest + (*DeleteSrvVSchemaResponse)(nil), // 48: vtctldata.DeleteSrvVSchemaResponse + (*DeleteTabletsRequest)(nil), // 49: vtctldata.DeleteTabletsRequest + (*DeleteTabletsResponse)(nil), // 50: vtctldata.DeleteTabletsResponse + (*EmergencyReparentShardRequest)(nil), // 51: vtctldata.EmergencyReparentShardRequest + (*EmergencyReparentShardResponse)(nil), // 52: vtctldata.EmergencyReparentShardResponse + (*ExecuteFetchAsAppRequest)(nil), // 53: vtctldata.ExecuteFetchAsAppRequest + (*ExecuteFetchAsAppResponse)(nil), // 54: vtctldata.ExecuteFetchAsAppResponse + (*ExecuteFetchAsDBARequest)(nil), // 55: vtctldata.ExecuteFetchAsDBARequest + (*ExecuteFetchAsDBAResponse)(nil), // 56: vtctldata.ExecuteFetchAsDBAResponse + (*ExecuteHookRequest)(nil), // 57: vtctldata.ExecuteHookRequest + (*ExecuteHookResponse)(nil), // 58: vtctldata.ExecuteHookResponse + (*FindAllShardsInKeyspaceRequest)(nil), // 59: vtctldata.FindAllShardsInKeyspaceRequest + (*FindAllShardsInKeyspaceResponse)(nil), // 60: vtctldata.FindAllShardsInKeyspaceResponse + (*GetBackupsRequest)(nil), // 61: vtctldata.GetBackupsRequest + (*GetBackupsResponse)(nil), // 62: vtctldata.GetBackupsResponse + (*GetCellInfoRequest)(nil), // 63: vtctldata.GetCellInfoRequest + (*GetCellInfoResponse)(nil), // 64: vtctldata.GetCellInfoResponse + (*GetCellInfoNamesRequest)(nil), // 65: vtctldata.GetCellInfoNamesRequest + (*GetCellInfoNamesResponse)(nil), // 66: vtctldata.GetCellInfoNamesResponse + (*GetCellsAliasesRequest)(nil), // 67: vtctldata.GetCellsAliasesRequest + (*GetCellsAliasesResponse)(nil), // 68: vtctldata.GetCellsAliasesResponse + (*GetFullStatusRequest)(nil), // 69: vtctldata.GetFullStatusRequest + (*GetFullStatusResponse)(nil), // 70: vtctldata.GetFullStatusResponse + (*GetKeyspacesRequest)(nil), // 71: vtctldata.GetKeyspacesRequest + (*GetKeyspacesResponse)(nil), // 72: vtctldata.GetKeyspacesResponse + (*GetKeyspaceRequest)(nil), // 73: vtctldata.GetKeyspaceRequest + (*GetKeyspaceResponse)(nil), // 74: vtctldata.GetKeyspaceResponse + (*GetPermissionsRequest)(nil), // 75: vtctldata.GetPermissionsRequest + (*GetPermissionsResponse)(nil), // 76: vtctldata.GetPermissionsResponse + (*GetRoutingRulesRequest)(nil), // 77: vtctldata.GetRoutingRulesRequest + (*GetRoutingRulesResponse)(nil), // 78: vtctldata.GetRoutingRulesResponse + (*GetSchemaRequest)(nil), // 79: vtctldata.GetSchemaRequest + (*GetSchemaResponse)(nil), // 80: vtctldata.GetSchemaResponse + (*GetSchemaMigrationsRequest)(nil), // 81: vtctldata.GetSchemaMigrationsRequest + (*GetSchemaMigrationsResponse)(nil), // 82: vtctldata.GetSchemaMigrationsResponse + (*GetShardRequest)(nil), // 83: vtctldata.GetShardRequest + (*GetShardResponse)(nil), // 84: vtctldata.GetShardResponse + (*GetShardRoutingRulesRequest)(nil), // 85: vtctldata.GetShardRoutingRulesRequest + (*GetShardRoutingRulesResponse)(nil), // 86: vtctldata.GetShardRoutingRulesResponse + (*GetSrvKeyspaceNamesRequest)(nil), // 87: vtctldata.GetSrvKeyspaceNamesRequest + (*GetSrvKeyspaceNamesResponse)(nil), // 88: vtctldata.GetSrvKeyspaceNamesResponse + (*GetSrvKeyspacesRequest)(nil), // 89: vtctldata.GetSrvKeyspacesRequest + (*GetSrvKeyspacesResponse)(nil), // 90: vtctldata.GetSrvKeyspacesResponse + (*UpdateThrottlerConfigRequest)(nil), // 91: vtctldata.UpdateThrottlerConfigRequest + (*UpdateThrottlerConfigResponse)(nil), // 92: vtctldata.UpdateThrottlerConfigResponse + (*GetSrvVSchemaRequest)(nil), // 93: vtctldata.GetSrvVSchemaRequest + (*GetSrvVSchemaResponse)(nil), // 94: vtctldata.GetSrvVSchemaResponse + (*GetSrvVSchemasRequest)(nil), // 95: vtctldata.GetSrvVSchemasRequest + (*GetSrvVSchemasResponse)(nil), // 96: vtctldata.GetSrvVSchemasResponse + (*GetTabletRequest)(nil), // 97: vtctldata.GetTabletRequest + (*GetTabletResponse)(nil), // 98: vtctldata.GetTabletResponse + (*GetTabletsRequest)(nil), // 99: vtctldata.GetTabletsRequest + (*GetTabletsResponse)(nil), // 100: vtctldata.GetTabletsResponse + (*GetTopologyPathRequest)(nil), // 101: vtctldata.GetTopologyPathRequest + (*GetTopologyPathResponse)(nil), // 102: vtctldata.GetTopologyPathResponse + (*TopologyCell)(nil), // 103: vtctldata.TopologyCell + (*GetVSchemaRequest)(nil), // 104: vtctldata.GetVSchemaRequest + (*GetVersionRequest)(nil), // 105: vtctldata.GetVersionRequest + (*GetVersionResponse)(nil), // 106: vtctldata.GetVersionResponse + (*GetVSchemaResponse)(nil), // 107: vtctldata.GetVSchemaResponse + (*GetWorkflowsRequest)(nil), // 108: vtctldata.GetWorkflowsRequest + (*GetWorkflowsResponse)(nil), // 109: vtctldata.GetWorkflowsResponse + (*InitShardPrimaryRequest)(nil), // 110: vtctldata.InitShardPrimaryRequest + (*InitShardPrimaryResponse)(nil), // 111: vtctldata.InitShardPrimaryResponse + (*LaunchSchemaMigrationRequest)(nil), // 112: vtctldata.LaunchSchemaMigrationRequest + (*LaunchSchemaMigrationResponse)(nil), // 113: vtctldata.LaunchSchemaMigrationResponse + (*LookupVindexCreateRequest)(nil), // 114: vtctldata.LookupVindexCreateRequest + (*LookupVindexCreateResponse)(nil), // 115: vtctldata.LookupVindexCreateResponse + (*LookupVindexExternalizeRequest)(nil), // 116: vtctldata.LookupVindexExternalizeRequest + (*LookupVindexExternalizeResponse)(nil), // 117: vtctldata.LookupVindexExternalizeResponse + (*MaterializeCreateRequest)(nil), // 118: vtctldata.MaterializeCreateRequest + (*MaterializeCreateResponse)(nil), // 119: vtctldata.MaterializeCreateResponse + (*MigrateCreateRequest)(nil), // 120: vtctldata.MigrateCreateRequest + (*MigrateCompleteRequest)(nil), // 121: vtctldata.MigrateCompleteRequest + (*MigrateCompleteResponse)(nil), // 122: vtctldata.MigrateCompleteResponse + (*MountRegisterRequest)(nil), // 123: vtctldata.MountRegisterRequest + (*MountRegisterResponse)(nil), // 124: vtctldata.MountRegisterResponse + (*MountUnregisterRequest)(nil), // 125: vtctldata.MountUnregisterRequest + (*MountUnregisterResponse)(nil), // 126: vtctldata.MountUnregisterResponse + (*MountShowRequest)(nil), // 127: vtctldata.MountShowRequest + (*MountShowResponse)(nil), // 128: vtctldata.MountShowResponse + (*MountListRequest)(nil), // 129: vtctldata.MountListRequest + (*MountListResponse)(nil), // 130: vtctldata.MountListResponse + (*MoveTablesCreateRequest)(nil), // 131: vtctldata.MoveTablesCreateRequest + (*MoveTablesCreateResponse)(nil), // 132: vtctldata.MoveTablesCreateResponse + (*MoveTablesCompleteRequest)(nil), // 133: vtctldata.MoveTablesCompleteRequest + (*MoveTablesCompleteResponse)(nil), // 134: vtctldata.MoveTablesCompleteResponse + (*PingTabletRequest)(nil), // 135: vtctldata.PingTabletRequest + (*PingTabletResponse)(nil), // 136: vtctldata.PingTabletResponse + (*PlannedReparentShardRequest)(nil), // 137: vtctldata.PlannedReparentShardRequest + (*PlannedReparentShardResponse)(nil), // 138: vtctldata.PlannedReparentShardResponse + (*RebuildKeyspaceGraphRequest)(nil), // 139: vtctldata.RebuildKeyspaceGraphRequest + (*RebuildKeyspaceGraphResponse)(nil), // 140: vtctldata.RebuildKeyspaceGraphResponse + (*RebuildVSchemaGraphRequest)(nil), // 141: vtctldata.RebuildVSchemaGraphRequest + (*RebuildVSchemaGraphResponse)(nil), // 142: vtctldata.RebuildVSchemaGraphResponse + (*RefreshStateRequest)(nil), // 143: vtctldata.RefreshStateRequest + (*RefreshStateResponse)(nil), // 144: vtctldata.RefreshStateResponse + (*RefreshStateByShardRequest)(nil), // 145: vtctldata.RefreshStateByShardRequest + (*RefreshStateByShardResponse)(nil), // 146: vtctldata.RefreshStateByShardResponse + (*ReloadSchemaRequest)(nil), // 147: vtctldata.ReloadSchemaRequest + (*ReloadSchemaResponse)(nil), // 148: vtctldata.ReloadSchemaResponse + (*ReloadSchemaKeyspaceRequest)(nil), // 149: vtctldata.ReloadSchemaKeyspaceRequest + (*ReloadSchemaKeyspaceResponse)(nil), // 150: vtctldata.ReloadSchemaKeyspaceResponse + (*ReloadSchemaShardRequest)(nil), // 151: vtctldata.ReloadSchemaShardRequest + (*ReloadSchemaShardResponse)(nil), // 152: vtctldata.ReloadSchemaShardResponse + (*RemoveBackupRequest)(nil), // 153: vtctldata.RemoveBackupRequest + (*RemoveBackupResponse)(nil), // 154: vtctldata.RemoveBackupResponse + (*RemoveKeyspaceCellRequest)(nil), // 155: vtctldata.RemoveKeyspaceCellRequest + (*RemoveKeyspaceCellResponse)(nil), // 156: vtctldata.RemoveKeyspaceCellResponse + (*RemoveShardCellRequest)(nil), // 157: vtctldata.RemoveShardCellRequest + (*RemoveShardCellResponse)(nil), // 158: vtctldata.RemoveShardCellResponse + (*ReparentTabletRequest)(nil), // 159: vtctldata.ReparentTabletRequest + (*ReparentTabletResponse)(nil), // 160: vtctldata.ReparentTabletResponse + (*ReshardCreateRequest)(nil), // 161: vtctldata.ReshardCreateRequest + (*RestoreFromBackupRequest)(nil), // 162: vtctldata.RestoreFromBackupRequest + (*RestoreFromBackupResponse)(nil), // 163: vtctldata.RestoreFromBackupResponse + (*RetrySchemaMigrationRequest)(nil), // 164: vtctldata.RetrySchemaMigrationRequest + (*RetrySchemaMigrationResponse)(nil), // 165: vtctldata.RetrySchemaMigrationResponse + (*RunHealthCheckRequest)(nil), // 166: vtctldata.RunHealthCheckRequest + (*RunHealthCheckResponse)(nil), // 167: vtctldata.RunHealthCheckResponse + (*SetKeyspaceDurabilityPolicyRequest)(nil), // 168: vtctldata.SetKeyspaceDurabilityPolicyRequest + (*SetKeyspaceDurabilityPolicyResponse)(nil), // 169: vtctldata.SetKeyspaceDurabilityPolicyResponse + (*SetKeyspaceServedFromRequest)(nil), // 170: vtctldata.SetKeyspaceServedFromRequest + (*SetKeyspaceServedFromResponse)(nil), // 171: vtctldata.SetKeyspaceServedFromResponse + (*SetKeyspaceShardingInfoRequest)(nil), // 172: vtctldata.SetKeyspaceShardingInfoRequest + (*SetKeyspaceShardingInfoResponse)(nil), // 173: vtctldata.SetKeyspaceShardingInfoResponse + (*SetShardIsPrimaryServingRequest)(nil), // 174: vtctldata.SetShardIsPrimaryServingRequest + (*SetShardIsPrimaryServingResponse)(nil), // 175: vtctldata.SetShardIsPrimaryServingResponse + (*SetShardTabletControlRequest)(nil), // 176: vtctldata.SetShardTabletControlRequest + (*SetShardTabletControlResponse)(nil), // 177: vtctldata.SetShardTabletControlResponse + (*SetWritableRequest)(nil), // 178: vtctldata.SetWritableRequest + (*SetWritableResponse)(nil), // 179: vtctldata.SetWritableResponse + (*ShardReplicationAddRequest)(nil), // 180: vtctldata.ShardReplicationAddRequest + (*ShardReplicationAddResponse)(nil), // 181: vtctldata.ShardReplicationAddResponse + (*ShardReplicationFixRequest)(nil), // 182: vtctldata.ShardReplicationFixRequest + (*ShardReplicationFixResponse)(nil), // 183: vtctldata.ShardReplicationFixResponse + (*ShardReplicationPositionsRequest)(nil), // 184: vtctldata.ShardReplicationPositionsRequest + (*ShardReplicationPositionsResponse)(nil), // 185: vtctldata.ShardReplicationPositionsResponse + (*ShardReplicationRemoveRequest)(nil), // 186: vtctldata.ShardReplicationRemoveRequest + (*ShardReplicationRemoveResponse)(nil), // 187: vtctldata.ShardReplicationRemoveResponse + (*SleepTabletRequest)(nil), // 188: vtctldata.SleepTabletRequest + (*SleepTabletResponse)(nil), // 189: vtctldata.SleepTabletResponse + (*SourceShardAddRequest)(nil), // 190: vtctldata.SourceShardAddRequest + (*SourceShardAddResponse)(nil), // 191: vtctldata.SourceShardAddResponse + (*SourceShardDeleteRequest)(nil), // 192: vtctldata.SourceShardDeleteRequest + (*SourceShardDeleteResponse)(nil), // 193: vtctldata.SourceShardDeleteResponse + (*StartReplicationRequest)(nil), // 194: vtctldata.StartReplicationRequest + (*StartReplicationResponse)(nil), // 195: vtctldata.StartReplicationResponse + (*StopReplicationRequest)(nil), // 196: vtctldata.StopReplicationRequest + (*StopReplicationResponse)(nil), // 197: vtctldata.StopReplicationResponse + (*TabletExternallyReparentedRequest)(nil), // 198: vtctldata.TabletExternallyReparentedRequest + (*TabletExternallyReparentedResponse)(nil), // 199: vtctldata.TabletExternallyReparentedResponse + (*UpdateCellInfoRequest)(nil), // 200: vtctldata.UpdateCellInfoRequest + (*UpdateCellInfoResponse)(nil), // 201: vtctldata.UpdateCellInfoResponse + (*UpdateCellsAliasRequest)(nil), // 202: vtctldata.UpdateCellsAliasRequest + (*UpdateCellsAliasResponse)(nil), // 203: vtctldata.UpdateCellsAliasResponse + (*ValidateRequest)(nil), // 204: vtctldata.ValidateRequest + (*ValidateResponse)(nil), // 205: vtctldata.ValidateResponse + (*ValidateKeyspaceRequest)(nil), // 206: vtctldata.ValidateKeyspaceRequest + (*ValidateKeyspaceResponse)(nil), // 207: vtctldata.ValidateKeyspaceResponse + (*ValidateSchemaKeyspaceRequest)(nil), // 208: vtctldata.ValidateSchemaKeyspaceRequest + (*ValidateSchemaKeyspaceResponse)(nil), // 209: vtctldata.ValidateSchemaKeyspaceResponse + (*ValidateShardRequest)(nil), // 210: vtctldata.ValidateShardRequest + (*ValidateShardResponse)(nil), // 211: vtctldata.ValidateShardResponse + (*ValidateVersionKeyspaceRequest)(nil), // 212: vtctldata.ValidateVersionKeyspaceRequest + (*ValidateVersionKeyspaceResponse)(nil), // 213: vtctldata.ValidateVersionKeyspaceResponse + (*ValidateVersionShardRequest)(nil), // 214: vtctldata.ValidateVersionShardRequest + (*ValidateVersionShardResponse)(nil), // 215: vtctldata.ValidateVersionShardResponse + (*ValidateVSchemaRequest)(nil), // 216: vtctldata.ValidateVSchemaRequest + (*ValidateVSchemaResponse)(nil), // 217: vtctldata.ValidateVSchemaResponse + (*VDiffCreateRequest)(nil), // 218: vtctldata.VDiffCreateRequest + (*VDiffCreateResponse)(nil), // 219: vtctldata.VDiffCreateResponse + (*VDiffDeleteRequest)(nil), // 220: vtctldata.VDiffDeleteRequest + (*VDiffDeleteResponse)(nil), // 221: vtctldata.VDiffDeleteResponse + (*VDiffResumeRequest)(nil), // 222: vtctldata.VDiffResumeRequest + (*VDiffResumeResponse)(nil), // 223: vtctldata.VDiffResumeResponse + (*VDiffShowRequest)(nil), // 224: vtctldata.VDiffShowRequest + (*VDiffShowResponse)(nil), // 225: vtctldata.VDiffShowResponse + (*VDiffStopRequest)(nil), // 226: vtctldata.VDiffStopRequest + (*VDiffStopResponse)(nil), // 227: vtctldata.VDiffStopResponse + (*WorkflowDeleteRequest)(nil), // 228: vtctldata.WorkflowDeleteRequest + (*WorkflowDeleteResponse)(nil), // 229: vtctldata.WorkflowDeleteResponse + (*WorkflowStatusRequest)(nil), // 230: vtctldata.WorkflowStatusRequest + (*WorkflowStatusResponse)(nil), // 231: vtctldata.WorkflowStatusResponse + (*WorkflowSwitchTrafficRequest)(nil), // 232: vtctldata.WorkflowSwitchTrafficRequest + (*WorkflowSwitchTrafficResponse)(nil), // 233: vtctldata.WorkflowSwitchTrafficResponse + (*WorkflowUpdateRequest)(nil), // 234: vtctldata.WorkflowUpdateRequest + (*WorkflowUpdateResponse)(nil), // 235: vtctldata.WorkflowUpdateResponse + nil, // 236: vtctldata.Workflow.ShardStreamsEntry + (*Workflow_ReplicationLocation)(nil), // 237: vtctldata.Workflow.ReplicationLocation + (*Workflow_ShardStream)(nil), // 238: vtctldata.Workflow.ShardStream + (*Workflow_Stream)(nil), // 239: vtctldata.Workflow.Stream + (*Workflow_Stream_CopyState)(nil), // 240: vtctldata.Workflow.Stream.CopyState + (*Workflow_Stream_Log)(nil), // 241: vtctldata.Workflow.Stream.Log + (*Workflow_Stream_ThrottlerStatus)(nil), // 242: vtctldata.Workflow.Stream.ThrottlerStatus + nil, // 243: vtctldata.ApplySchemaResponse.RowsAffectedByShardEntry + nil, // 244: vtctldata.CancelSchemaMigrationResponse.RowsAffectedByShardEntry + nil, // 245: vtctldata.CleanupSchemaMigrationResponse.RowsAffectedByShardEntry + nil, // 246: vtctldata.CompleteSchemaMigrationResponse.RowsAffectedByShardEntry + nil, // 247: vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry + nil, // 248: vtctldata.GetCellsAliasesResponse.AliasesEntry + nil, // 249: vtctldata.GetSrvKeyspaceNamesResponse.NamesEntry + (*GetSrvKeyspaceNamesResponse_NameList)(nil), // 250: vtctldata.GetSrvKeyspaceNamesResponse.NameList + nil, // 251: vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry + nil, // 252: vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry + nil, // 253: vtctldata.LaunchSchemaMigrationResponse.RowsAffectedByShardEntry + (*MoveTablesCreateResponse_TabletInfo)(nil), // 254: vtctldata.MoveTablesCreateResponse.TabletInfo + nil, // 255: vtctldata.RetrySchemaMigrationResponse.RowsAffectedByShardEntry + nil, // 256: vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry + nil, // 257: vtctldata.ShardReplicationPositionsResponse.TabletMapEntry + nil, // 258: vtctldata.ValidateResponse.ResultsByKeyspaceEntry + nil, // 259: vtctldata.ValidateKeyspaceResponse.ResultsByShardEntry + nil, // 260: vtctldata.ValidateSchemaKeyspaceResponse.ResultsByShardEntry + nil, // 261: vtctldata.ValidateVersionKeyspaceResponse.ResultsByShardEntry + nil, // 262: vtctldata.ValidateVSchemaResponse.ResultsByShardEntry + nil, // 263: vtctldata.VDiffShowResponse.TabletResponsesEntry + (*WorkflowDeleteResponse_TabletInfo)(nil), // 264: vtctldata.WorkflowDeleteResponse.TabletInfo + (*WorkflowStatusResponse_TableCopyState)(nil), // 265: vtctldata.WorkflowStatusResponse.TableCopyState + (*WorkflowStatusResponse_ShardStreamState)(nil), // 266: vtctldata.WorkflowStatusResponse.ShardStreamState + (*WorkflowStatusResponse_ShardStreams)(nil), // 267: vtctldata.WorkflowStatusResponse.ShardStreams + nil, // 268: vtctldata.WorkflowStatusResponse.TableCopyStateEntry + nil, // 269: vtctldata.WorkflowStatusResponse.ShardStreamsEntry + (*WorkflowUpdateResponse_TabletInfo)(nil), // 270: vtctldata.WorkflowUpdateResponse.TabletInfo + (*logutil.Event)(nil), // 271: logutil.Event + (tabletmanagerdata.TabletSelectionPreference)(0), // 272: tabletmanagerdata.TabletSelectionPreference + (*topodata.Keyspace)(nil), // 273: topodata.Keyspace + (*vttime.Time)(nil), // 274: vttime.Time + (*topodata.TabletAlias)(nil), // 275: topodata.TabletAlias + (*vttime.Duration)(nil), // 276: vttime.Duration + (*topodata.Shard)(nil), // 277: topodata.Shard + (*topodata.CellInfo)(nil), // 278: topodata.CellInfo + (*vschema.RoutingRules)(nil), // 279: vschema.RoutingRules + (*vschema.ShardRoutingRules)(nil), // 280: vschema.ShardRoutingRules + (*vtrpc.CallerID)(nil), // 281: vtrpc.CallerID + (*vschema.Keyspace)(nil), // 282: vschema.Keyspace + (topodata.TabletType)(0), // 283: topodata.TabletType + (*topodata.Tablet)(nil), // 284: topodata.Tablet + (*topodata.Keyspace_ServedFrom)(nil), // 285: topodata.Keyspace.ServedFrom + (topodata.KeyspaceType)(0), // 286: topodata.KeyspaceType + (*query.QueryResult)(nil), // 287: query.QueryResult + (*tabletmanagerdata.ExecuteHookRequest)(nil), // 288: tabletmanagerdata.ExecuteHookRequest + (*tabletmanagerdata.ExecuteHookResponse)(nil), // 289: tabletmanagerdata.ExecuteHookResponse + (*mysqlctl.BackupInfo)(nil), // 290: mysqlctl.BackupInfo + (*replicationdata.FullStatus)(nil), // 291: replicationdata.FullStatus + (*tabletmanagerdata.Permissions)(nil), // 292: tabletmanagerdata.Permissions + (*tabletmanagerdata.SchemaDefinition)(nil), // 293: tabletmanagerdata.SchemaDefinition + (*topodata.ThrottledAppRule)(nil), // 294: topodata.ThrottledAppRule + (*vschema.SrvVSchema)(nil), // 295: vschema.SrvVSchema + (*topodata.ShardReplicationError)(nil), // 296: topodata.ShardReplicationError + (*topodata.KeyRange)(nil), // 297: topodata.KeyRange + (*topodata.CellsAlias)(nil), // 298: topodata.CellsAlias + (*tabletmanagerdata.UpdateVReplicationWorkflowRequest)(nil), // 299: tabletmanagerdata.UpdateVReplicationWorkflowRequest + (*topodata.Shard_TabletControl)(nil), // 300: topodata.Shard.TabletControl + (*binlogdata.BinlogSource)(nil), // 301: binlogdata.BinlogSource + (*topodata.SrvKeyspace)(nil), // 302: topodata.SrvKeyspace + (*replicationdata.Status)(nil), // 303: replicationdata.Status + (*tabletmanagerdata.VDiffResponse)(nil), // 304: tabletmanagerdata.VDiffResponse } var file_vtctldata_proto_depIdxs = []int32{ - 202, // 0: vtctldata.ExecuteVtctlCommandResponse.event:type_name -> logutil.Event - 3, // 1: vtctldata.MaterializeSettings.table_settings:type_name -> vtctldata.TableMaterializeSettings + 271, // 0: vtctldata.ExecuteVtctlCommandResponse.event:type_name -> logutil.Event + 6, // 1: vtctldata.MaterializeSettings.table_settings:type_name -> vtctldata.TableMaterializeSettings 0, // 2: vtctldata.MaterializeSettings.materialization_intent:type_name -> vtctldata.MaterializationIntent - 203, // 3: vtctldata.Keyspace.keyspace:type_name -> topodata.Keyspace - 204, // 4: vtctldata.Shard.shard:type_name -> topodata.Shard - 183, // 5: vtctldata.Workflow.source:type_name -> vtctldata.Workflow.ReplicationLocation - 183, // 6: vtctldata.Workflow.target:type_name -> vtctldata.Workflow.ReplicationLocation - 182, // 7: vtctldata.Workflow.shard_streams:type_name -> vtctldata.Workflow.ShardStreamsEntry - 205, // 8: vtctldata.AddCellInfoRequest.cell_info:type_name -> topodata.CellInfo - 206, // 9: vtctldata.ApplyRoutingRulesRequest.routing_rules:type_name -> vschema.RoutingRules - 207, // 10: vtctldata.ApplyShardRoutingRulesRequest.shard_routing_rules:type_name -> vschema.ShardRoutingRules - 208, // 11: vtctldata.ApplySchemaRequest.wait_replicas_timeout:type_name -> vttime.Duration - 209, // 12: vtctldata.ApplySchemaRequest.caller_id:type_name -> vtrpc.CallerID - 210, // 13: vtctldata.ApplyVSchemaRequest.v_schema:type_name -> vschema.Keyspace - 210, // 14: vtctldata.ApplyVSchemaResponse.v_schema:type_name -> vschema.Keyspace - 211, // 15: vtctldata.BackupRequest.tablet_alias:type_name -> topodata.TabletAlias - 211, // 16: vtctldata.BackupResponse.tablet_alias:type_name -> topodata.TabletAlias - 202, // 17: vtctldata.BackupResponse.event:type_name -> logutil.Event - 211, // 18: vtctldata.ChangeTabletTypeRequest.tablet_alias:type_name -> topodata.TabletAlias - 212, // 19: vtctldata.ChangeTabletTypeRequest.db_type:type_name -> topodata.TabletType - 213, // 20: vtctldata.ChangeTabletTypeResponse.before_tablet:type_name -> topodata.Tablet - 213, // 21: vtctldata.ChangeTabletTypeResponse.after_tablet:type_name -> topodata.Tablet - 214, // 22: vtctldata.CreateKeyspaceRequest.served_froms:type_name -> topodata.Keyspace.ServedFrom - 215, // 23: vtctldata.CreateKeyspaceRequest.type:type_name -> topodata.KeyspaceType - 216, // 24: vtctldata.CreateKeyspaceRequest.snapshot_time:type_name -> vttime.Time - 5, // 25: vtctldata.CreateKeyspaceResponse.keyspace:type_name -> vtctldata.Keyspace - 5, // 26: vtctldata.CreateShardResponse.keyspace:type_name -> vtctldata.Keyspace - 6, // 27: vtctldata.CreateShardResponse.shard:type_name -> vtctldata.Shard - 6, // 28: vtctldata.DeleteShardsRequest.shards:type_name -> vtctldata.Shard - 211, // 29: vtctldata.DeleteTabletsRequest.tablet_aliases:type_name -> topodata.TabletAlias - 211, // 30: vtctldata.EmergencyReparentShardRequest.new_primary:type_name -> topodata.TabletAlias - 211, // 31: vtctldata.EmergencyReparentShardRequest.ignore_replicas:type_name -> topodata.TabletAlias - 208, // 32: vtctldata.EmergencyReparentShardRequest.wait_replicas_timeout:type_name -> vttime.Duration - 211, // 33: vtctldata.EmergencyReparentShardResponse.promoted_primary:type_name -> topodata.TabletAlias - 202, // 34: vtctldata.EmergencyReparentShardResponse.events:type_name -> logutil.Event - 211, // 35: vtctldata.ExecuteFetchAsAppRequest.tablet_alias:type_name -> topodata.TabletAlias - 217, // 36: vtctldata.ExecuteFetchAsAppResponse.result:type_name -> query.QueryResult - 211, // 37: vtctldata.ExecuteFetchAsDBARequest.tablet_alias:type_name -> topodata.TabletAlias - 217, // 38: vtctldata.ExecuteFetchAsDBAResponse.result:type_name -> query.QueryResult - 211, // 39: vtctldata.ExecuteHookRequest.tablet_alias:type_name -> topodata.TabletAlias - 218, // 40: vtctldata.ExecuteHookRequest.tablet_hook_request:type_name -> tabletmanagerdata.ExecuteHookRequest - 219, // 41: vtctldata.ExecuteHookResponse.hook_result:type_name -> tabletmanagerdata.ExecuteHookResponse - 188, // 42: vtctldata.FindAllShardsInKeyspaceResponse.shards:type_name -> vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry - 220, // 43: vtctldata.GetBackupsResponse.backups:type_name -> mysqlctl.BackupInfo - 205, // 44: vtctldata.GetCellInfoResponse.cell_info:type_name -> topodata.CellInfo - 189, // 45: vtctldata.GetCellsAliasesResponse.aliases:type_name -> vtctldata.GetCellsAliasesResponse.AliasesEntry - 211, // 46: vtctldata.GetFullStatusRequest.tablet_alias:type_name -> topodata.TabletAlias - 221, // 47: vtctldata.GetFullStatusResponse.status:type_name -> replicationdata.FullStatus - 5, // 48: vtctldata.GetKeyspacesResponse.keyspaces:type_name -> vtctldata.Keyspace - 5, // 49: vtctldata.GetKeyspaceResponse.keyspace:type_name -> vtctldata.Keyspace - 211, // 50: vtctldata.GetPermissionsRequest.tablet_alias:type_name -> topodata.TabletAlias - 222, // 51: vtctldata.GetPermissionsResponse.permissions:type_name -> tabletmanagerdata.Permissions - 206, // 52: vtctldata.GetRoutingRulesResponse.routing_rules:type_name -> vschema.RoutingRules - 211, // 53: vtctldata.GetSchemaRequest.tablet_alias:type_name -> topodata.TabletAlias - 223, // 54: vtctldata.GetSchemaResponse.schema:type_name -> tabletmanagerdata.SchemaDefinition - 6, // 55: vtctldata.GetShardResponse.shard:type_name -> vtctldata.Shard - 207, // 56: vtctldata.GetShardRoutingRulesResponse.shard_routing_rules:type_name -> vschema.ShardRoutingRules - 190, // 57: vtctldata.GetSrvKeyspaceNamesResponse.names:type_name -> vtctldata.GetSrvKeyspaceNamesResponse.NamesEntry - 192, // 58: vtctldata.GetSrvKeyspacesResponse.srv_keyspaces:type_name -> vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry - 224, // 59: vtctldata.GetSrvVSchemaResponse.srv_v_schema:type_name -> vschema.SrvVSchema - 193, // 60: vtctldata.GetSrvVSchemasResponse.srv_v_schemas:type_name -> vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry - 211, // 61: vtctldata.GetTabletRequest.tablet_alias:type_name -> topodata.TabletAlias - 213, // 62: vtctldata.GetTabletResponse.tablet:type_name -> topodata.Tablet - 211, // 63: vtctldata.GetTabletsRequest.tablet_aliases:type_name -> topodata.TabletAlias - 212, // 64: vtctldata.GetTabletsRequest.tablet_type:type_name -> topodata.TabletType - 213, // 65: vtctldata.GetTabletsResponse.tablets:type_name -> topodata.Tablet - 91, // 66: vtctldata.GetTopologyPathResponse.cell:type_name -> vtctldata.TopologyCell - 211, // 67: vtctldata.GetVersionRequest.tablet_alias:type_name -> topodata.TabletAlias - 210, // 68: vtctldata.GetVSchemaResponse.v_schema:type_name -> vschema.Keyspace - 7, // 69: vtctldata.GetWorkflowsResponse.workflows:type_name -> vtctldata.Workflow - 211, // 70: vtctldata.InitShardPrimaryRequest.primary_elect_tablet_alias:type_name -> topodata.TabletAlias - 208, // 71: vtctldata.InitShardPrimaryRequest.wait_replicas_timeout:type_name -> vttime.Duration - 202, // 72: vtctldata.InitShardPrimaryResponse.events:type_name -> logutil.Event - 211, // 73: vtctldata.PingTabletRequest.tablet_alias:type_name -> topodata.TabletAlias - 211, // 74: vtctldata.PlannedReparentShardRequest.new_primary:type_name -> topodata.TabletAlias - 211, // 75: vtctldata.PlannedReparentShardRequest.avoid_primary:type_name -> topodata.TabletAlias - 208, // 76: vtctldata.PlannedReparentShardRequest.wait_replicas_timeout:type_name -> vttime.Duration - 211, // 77: vtctldata.PlannedReparentShardResponse.promoted_primary:type_name -> topodata.TabletAlias - 202, // 78: vtctldata.PlannedReparentShardResponse.events:type_name -> logutil.Event - 211, // 79: vtctldata.RefreshStateRequest.tablet_alias:type_name -> topodata.TabletAlias - 211, // 80: vtctldata.ReloadSchemaRequest.tablet_alias:type_name -> topodata.TabletAlias - 202, // 81: vtctldata.ReloadSchemaKeyspaceResponse.events:type_name -> logutil.Event - 202, // 82: vtctldata.ReloadSchemaShardResponse.events:type_name -> logutil.Event - 211, // 83: vtctldata.ReparentTabletRequest.tablet:type_name -> topodata.TabletAlias - 211, // 84: vtctldata.ReparentTabletResponse.primary:type_name -> topodata.TabletAlias - 211, // 85: vtctldata.RestoreFromBackupRequest.tablet_alias:type_name -> topodata.TabletAlias - 216, // 86: vtctldata.RestoreFromBackupRequest.backup_time:type_name -> vttime.Time - 211, // 87: vtctldata.RestoreFromBackupResponse.tablet_alias:type_name -> topodata.TabletAlias - 202, // 88: vtctldata.RestoreFromBackupResponse.event:type_name -> logutil.Event - 211, // 89: vtctldata.RunHealthCheckRequest.tablet_alias:type_name -> topodata.TabletAlias - 203, // 90: vtctldata.SetKeyspaceDurabilityPolicyResponse.keyspace:type_name -> topodata.Keyspace - 212, // 91: vtctldata.SetKeyspaceServedFromRequest.tablet_type:type_name -> topodata.TabletType - 203, // 92: vtctldata.SetKeyspaceServedFromResponse.keyspace:type_name -> topodata.Keyspace - 203, // 93: vtctldata.SetKeyspaceShardingInfoResponse.keyspace:type_name -> topodata.Keyspace - 204, // 94: vtctldata.SetShardIsPrimaryServingResponse.shard:type_name -> topodata.Shard - 212, // 95: vtctldata.SetShardTabletControlRequest.tablet_type:type_name -> topodata.TabletType - 204, // 96: vtctldata.SetShardTabletControlResponse.shard:type_name -> topodata.Shard - 211, // 97: vtctldata.SetWritableRequest.tablet_alias:type_name -> topodata.TabletAlias - 211, // 98: vtctldata.ShardReplicationAddRequest.tablet_alias:type_name -> topodata.TabletAlias - 225, // 99: vtctldata.ShardReplicationFixResponse.error:type_name -> topodata.ShardReplicationError - 194, // 100: vtctldata.ShardReplicationPositionsResponse.replication_statuses:type_name -> vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry - 195, // 101: vtctldata.ShardReplicationPositionsResponse.tablet_map:type_name -> vtctldata.ShardReplicationPositionsResponse.TabletMapEntry - 211, // 102: vtctldata.ShardReplicationRemoveRequest.tablet_alias:type_name -> topodata.TabletAlias - 211, // 103: vtctldata.SleepTabletRequest.tablet_alias:type_name -> topodata.TabletAlias - 208, // 104: vtctldata.SleepTabletRequest.duration:type_name -> vttime.Duration - 226, // 105: vtctldata.SourceShardAddRequest.key_range:type_name -> topodata.KeyRange - 204, // 106: vtctldata.SourceShardAddResponse.shard:type_name -> topodata.Shard - 204, // 107: vtctldata.SourceShardDeleteResponse.shard:type_name -> topodata.Shard - 211, // 108: vtctldata.StartReplicationRequest.tablet_alias:type_name -> topodata.TabletAlias - 211, // 109: vtctldata.StopReplicationRequest.tablet_alias:type_name -> topodata.TabletAlias - 211, // 110: vtctldata.TabletExternallyReparentedRequest.tablet:type_name -> topodata.TabletAlias - 211, // 111: vtctldata.TabletExternallyReparentedResponse.new_primary:type_name -> topodata.TabletAlias - 211, // 112: vtctldata.TabletExternallyReparentedResponse.old_primary:type_name -> topodata.TabletAlias - 205, // 113: vtctldata.UpdateCellInfoRequest.cell_info:type_name -> topodata.CellInfo - 205, // 114: vtctldata.UpdateCellInfoResponse.cell_info:type_name -> topodata.CellInfo - 227, // 115: vtctldata.UpdateCellsAliasRequest.cells_alias:type_name -> topodata.CellsAlias - 227, // 116: vtctldata.UpdateCellsAliasResponse.cells_alias:type_name -> topodata.CellsAlias - 196, // 117: vtctldata.ValidateResponse.results_by_keyspace:type_name -> vtctldata.ValidateResponse.ResultsByKeyspaceEntry - 197, // 118: vtctldata.ValidateKeyspaceResponse.results_by_shard:type_name -> vtctldata.ValidateKeyspaceResponse.ResultsByShardEntry - 198, // 119: vtctldata.ValidateSchemaKeyspaceResponse.results_by_shard:type_name -> vtctldata.ValidateSchemaKeyspaceResponse.ResultsByShardEntry - 199, // 120: vtctldata.ValidateVersionKeyspaceResponse.results_by_shard:type_name -> vtctldata.ValidateVersionKeyspaceResponse.ResultsByShardEntry - 200, // 121: vtctldata.ValidateVSchemaResponse.results_by_shard:type_name -> vtctldata.ValidateVSchemaResponse.ResultsByShardEntry - 228, // 122: vtctldata.WorkflowUpdateRequest.tablet_request:type_name -> tabletmanagerdata.UpdateVRWorkflowRequest - 201, // 123: vtctldata.WorkflowUpdateResponse.details:type_name -> vtctldata.WorkflowUpdateResponse.TabletInfo - 184, // 124: vtctldata.Workflow.ShardStreamsEntry.value:type_name -> vtctldata.Workflow.ShardStream - 185, // 125: vtctldata.Workflow.ShardStream.streams:type_name -> vtctldata.Workflow.Stream - 229, // 126: vtctldata.Workflow.ShardStream.tablet_controls:type_name -> topodata.Shard.TabletControl - 211, // 127: vtctldata.Workflow.Stream.tablet:type_name -> topodata.TabletAlias - 230, // 128: vtctldata.Workflow.Stream.binlog_source:type_name -> binlogdata.BinlogSource - 216, // 129: vtctldata.Workflow.Stream.transaction_timestamp:type_name -> vttime.Time - 216, // 130: vtctldata.Workflow.Stream.time_updated:type_name -> vttime.Time - 186, // 131: vtctldata.Workflow.Stream.copy_states:type_name -> vtctldata.Workflow.Stream.CopyState - 187, // 132: vtctldata.Workflow.Stream.logs:type_name -> vtctldata.Workflow.Stream.Log - 216, // 133: vtctldata.Workflow.Stream.Log.created_at:type_name -> vttime.Time - 216, // 134: vtctldata.Workflow.Stream.Log.updated_at:type_name -> vttime.Time - 6, // 135: vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry.value:type_name -> vtctldata.Shard - 227, // 136: vtctldata.GetCellsAliasesResponse.AliasesEntry.value:type_name -> topodata.CellsAlias - 191, // 137: vtctldata.GetSrvKeyspaceNamesResponse.NamesEntry.value:type_name -> vtctldata.GetSrvKeyspaceNamesResponse.NameList - 231, // 138: vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry.value:type_name -> topodata.SrvKeyspace - 224, // 139: vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry.value:type_name -> vschema.SrvVSchema - 232, // 140: vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry.value:type_name -> replicationdata.Status - 213, // 141: vtctldata.ShardReplicationPositionsResponse.TabletMapEntry.value:type_name -> topodata.Tablet - 169, // 142: vtctldata.ValidateResponse.ResultsByKeyspaceEntry.value:type_name -> vtctldata.ValidateKeyspaceResponse - 173, // 143: vtctldata.ValidateKeyspaceResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse - 173, // 144: vtctldata.ValidateSchemaKeyspaceResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse - 173, // 145: vtctldata.ValidateVersionKeyspaceResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse - 173, // 146: vtctldata.ValidateVSchemaResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse - 147, // [147:147] is the sub-list for method output_type - 147, // [147:147] is the sub-list for method input_type - 147, // [147:147] is the sub-list for extension type_name - 147, // [147:147] is the sub-list for extension extendee - 0, // [0:147] is the sub-list for field type_name + 272, // 3: vtctldata.MaterializeSettings.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference + 273, // 4: vtctldata.Keyspace.keyspace:type_name -> topodata.Keyspace + 2, // 5: vtctldata.SchemaMigration.strategy:type_name -> vtctldata.SchemaMigration.Strategy + 274, // 6: vtctldata.SchemaMigration.added_at:type_name -> vttime.Time + 274, // 7: vtctldata.SchemaMigration.requested_at:type_name -> vttime.Time + 274, // 8: vtctldata.SchemaMigration.ready_at:type_name -> vttime.Time + 274, // 9: vtctldata.SchemaMigration.started_at:type_name -> vttime.Time + 274, // 10: vtctldata.SchemaMigration.liveness_timestamp:type_name -> vttime.Time + 274, // 11: vtctldata.SchemaMigration.completed_at:type_name -> vttime.Time + 274, // 12: vtctldata.SchemaMigration.cleaned_up_at:type_name -> vttime.Time + 3, // 13: vtctldata.SchemaMigration.status:type_name -> vtctldata.SchemaMigration.Status + 275, // 14: vtctldata.SchemaMigration.tablet:type_name -> topodata.TabletAlias + 276, // 15: vtctldata.SchemaMigration.artifact_retention:type_name -> vttime.Duration + 274, // 16: vtctldata.SchemaMigration.last_throttled_at:type_name -> vttime.Time + 274, // 17: vtctldata.SchemaMigration.cancelled_at:type_name -> vttime.Time + 274, // 18: vtctldata.SchemaMigration.reviewed_at:type_name -> vttime.Time + 274, // 19: vtctldata.SchemaMigration.ready_to_complete_at:type_name -> vttime.Time + 277, // 20: vtctldata.Shard.shard:type_name -> topodata.Shard + 237, // 21: vtctldata.Workflow.source:type_name -> vtctldata.Workflow.ReplicationLocation + 237, // 22: vtctldata.Workflow.target:type_name -> vtctldata.Workflow.ReplicationLocation + 236, // 23: vtctldata.Workflow.shard_streams:type_name -> vtctldata.Workflow.ShardStreamsEntry + 278, // 24: vtctldata.AddCellInfoRequest.cell_info:type_name -> topodata.CellInfo + 279, // 25: vtctldata.ApplyRoutingRulesRequest.routing_rules:type_name -> vschema.RoutingRules + 280, // 26: vtctldata.ApplyShardRoutingRulesRequest.shard_routing_rules:type_name -> vschema.ShardRoutingRules + 276, // 27: vtctldata.ApplySchemaRequest.wait_replicas_timeout:type_name -> vttime.Duration + 281, // 28: vtctldata.ApplySchemaRequest.caller_id:type_name -> vtrpc.CallerID + 243, // 29: vtctldata.ApplySchemaResponse.rows_affected_by_shard:type_name -> vtctldata.ApplySchemaResponse.RowsAffectedByShardEntry + 282, // 30: vtctldata.ApplyVSchemaRequest.v_schema:type_name -> vschema.Keyspace + 282, // 31: vtctldata.ApplyVSchemaResponse.v_schema:type_name -> vschema.Keyspace + 275, // 32: vtctldata.BackupRequest.tablet_alias:type_name -> topodata.TabletAlias + 275, // 33: vtctldata.BackupResponse.tablet_alias:type_name -> topodata.TabletAlias + 271, // 34: vtctldata.BackupResponse.event:type_name -> logutil.Event + 244, // 35: vtctldata.CancelSchemaMigrationResponse.rows_affected_by_shard:type_name -> vtctldata.CancelSchemaMigrationResponse.RowsAffectedByShardEntry + 275, // 36: vtctldata.ChangeTabletTypeRequest.tablet_alias:type_name -> topodata.TabletAlias + 283, // 37: vtctldata.ChangeTabletTypeRequest.db_type:type_name -> topodata.TabletType + 284, // 38: vtctldata.ChangeTabletTypeResponse.before_tablet:type_name -> topodata.Tablet + 284, // 39: vtctldata.ChangeTabletTypeResponse.after_tablet:type_name -> topodata.Tablet + 245, // 40: vtctldata.CleanupSchemaMigrationResponse.rows_affected_by_shard:type_name -> vtctldata.CleanupSchemaMigrationResponse.RowsAffectedByShardEntry + 246, // 41: vtctldata.CompleteSchemaMigrationResponse.rows_affected_by_shard:type_name -> vtctldata.CompleteSchemaMigrationResponse.RowsAffectedByShardEntry + 285, // 42: vtctldata.CreateKeyspaceRequest.served_froms:type_name -> topodata.Keyspace.ServedFrom + 286, // 43: vtctldata.CreateKeyspaceRequest.type:type_name -> topodata.KeyspaceType + 274, // 44: vtctldata.CreateKeyspaceRequest.snapshot_time:type_name -> vttime.Time + 8, // 45: vtctldata.CreateKeyspaceResponse.keyspace:type_name -> vtctldata.Keyspace + 8, // 46: vtctldata.CreateShardResponse.keyspace:type_name -> vtctldata.Keyspace + 10, // 47: vtctldata.CreateShardResponse.shard:type_name -> vtctldata.Shard + 10, // 48: vtctldata.DeleteShardsRequest.shards:type_name -> vtctldata.Shard + 275, // 49: vtctldata.DeleteTabletsRequest.tablet_aliases:type_name -> topodata.TabletAlias + 275, // 50: vtctldata.EmergencyReparentShardRequest.new_primary:type_name -> topodata.TabletAlias + 275, // 51: vtctldata.EmergencyReparentShardRequest.ignore_replicas:type_name -> topodata.TabletAlias + 276, // 52: vtctldata.EmergencyReparentShardRequest.wait_replicas_timeout:type_name -> vttime.Duration + 275, // 53: vtctldata.EmergencyReparentShardResponse.promoted_primary:type_name -> topodata.TabletAlias + 271, // 54: vtctldata.EmergencyReparentShardResponse.events:type_name -> logutil.Event + 275, // 55: vtctldata.ExecuteFetchAsAppRequest.tablet_alias:type_name -> topodata.TabletAlias + 287, // 56: vtctldata.ExecuteFetchAsAppResponse.result:type_name -> query.QueryResult + 275, // 57: vtctldata.ExecuteFetchAsDBARequest.tablet_alias:type_name -> topodata.TabletAlias + 287, // 58: vtctldata.ExecuteFetchAsDBAResponse.result:type_name -> query.QueryResult + 275, // 59: vtctldata.ExecuteHookRequest.tablet_alias:type_name -> topodata.TabletAlias + 288, // 60: vtctldata.ExecuteHookRequest.tablet_hook_request:type_name -> tabletmanagerdata.ExecuteHookRequest + 289, // 61: vtctldata.ExecuteHookResponse.hook_result:type_name -> tabletmanagerdata.ExecuteHookResponse + 247, // 62: vtctldata.FindAllShardsInKeyspaceResponse.shards:type_name -> vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry + 290, // 63: vtctldata.GetBackupsResponse.backups:type_name -> mysqlctl.BackupInfo + 278, // 64: vtctldata.GetCellInfoResponse.cell_info:type_name -> topodata.CellInfo + 248, // 65: vtctldata.GetCellsAliasesResponse.aliases:type_name -> vtctldata.GetCellsAliasesResponse.AliasesEntry + 275, // 66: vtctldata.GetFullStatusRequest.tablet_alias:type_name -> topodata.TabletAlias + 291, // 67: vtctldata.GetFullStatusResponse.status:type_name -> replicationdata.FullStatus + 8, // 68: vtctldata.GetKeyspacesResponse.keyspaces:type_name -> vtctldata.Keyspace + 8, // 69: vtctldata.GetKeyspaceResponse.keyspace:type_name -> vtctldata.Keyspace + 275, // 70: vtctldata.GetPermissionsRequest.tablet_alias:type_name -> topodata.TabletAlias + 292, // 71: vtctldata.GetPermissionsResponse.permissions:type_name -> tabletmanagerdata.Permissions + 279, // 72: vtctldata.GetRoutingRulesResponse.routing_rules:type_name -> vschema.RoutingRules + 275, // 73: vtctldata.GetSchemaRequest.tablet_alias:type_name -> topodata.TabletAlias + 293, // 74: vtctldata.GetSchemaResponse.schema:type_name -> tabletmanagerdata.SchemaDefinition + 3, // 75: vtctldata.GetSchemaMigrationsRequest.status:type_name -> vtctldata.SchemaMigration.Status + 276, // 76: vtctldata.GetSchemaMigrationsRequest.recent:type_name -> vttime.Duration + 1, // 77: vtctldata.GetSchemaMigrationsRequest.order:type_name -> vtctldata.QueryOrdering + 9, // 78: vtctldata.GetSchemaMigrationsResponse.migrations:type_name -> vtctldata.SchemaMigration + 10, // 79: vtctldata.GetShardResponse.shard:type_name -> vtctldata.Shard + 280, // 80: vtctldata.GetShardRoutingRulesResponse.shard_routing_rules:type_name -> vschema.ShardRoutingRules + 249, // 81: vtctldata.GetSrvKeyspaceNamesResponse.names:type_name -> vtctldata.GetSrvKeyspaceNamesResponse.NamesEntry + 251, // 82: vtctldata.GetSrvKeyspacesResponse.srv_keyspaces:type_name -> vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry + 294, // 83: vtctldata.UpdateThrottlerConfigRequest.throttled_app:type_name -> topodata.ThrottledAppRule + 295, // 84: vtctldata.GetSrvVSchemaResponse.srv_v_schema:type_name -> vschema.SrvVSchema + 252, // 85: vtctldata.GetSrvVSchemasResponse.srv_v_schemas:type_name -> vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry + 275, // 86: vtctldata.GetTabletRequest.tablet_alias:type_name -> topodata.TabletAlias + 284, // 87: vtctldata.GetTabletResponse.tablet:type_name -> topodata.Tablet + 275, // 88: vtctldata.GetTabletsRequest.tablet_aliases:type_name -> topodata.TabletAlias + 283, // 89: vtctldata.GetTabletsRequest.tablet_type:type_name -> topodata.TabletType + 284, // 90: vtctldata.GetTabletsResponse.tablets:type_name -> topodata.Tablet + 103, // 91: vtctldata.GetTopologyPathResponse.cell:type_name -> vtctldata.TopologyCell + 275, // 92: vtctldata.GetVersionRequest.tablet_alias:type_name -> topodata.TabletAlias + 282, // 93: vtctldata.GetVSchemaResponse.v_schema:type_name -> vschema.Keyspace + 11, // 94: vtctldata.GetWorkflowsResponse.workflows:type_name -> vtctldata.Workflow + 275, // 95: vtctldata.InitShardPrimaryRequest.primary_elect_tablet_alias:type_name -> topodata.TabletAlias + 276, // 96: vtctldata.InitShardPrimaryRequest.wait_replicas_timeout:type_name -> vttime.Duration + 271, // 97: vtctldata.InitShardPrimaryResponse.events:type_name -> logutil.Event + 253, // 98: vtctldata.LaunchSchemaMigrationResponse.rows_affected_by_shard:type_name -> vtctldata.LaunchSchemaMigrationResponse.RowsAffectedByShardEntry + 282, // 99: vtctldata.LookupVindexCreateRequest.vindex:type_name -> vschema.Keyspace + 283, // 100: vtctldata.LookupVindexCreateRequest.tablet_types:type_name -> topodata.TabletType + 272, // 101: vtctldata.LookupVindexCreateRequest.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference + 7, // 102: vtctldata.MaterializeCreateRequest.settings:type_name -> vtctldata.MaterializeSettings + 283, // 103: vtctldata.MigrateCreateRequest.tablet_types:type_name -> topodata.TabletType + 272, // 104: vtctldata.MigrateCreateRequest.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference + 283, // 105: vtctldata.MoveTablesCreateRequest.tablet_types:type_name -> topodata.TabletType + 272, // 106: vtctldata.MoveTablesCreateRequest.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference + 254, // 107: vtctldata.MoveTablesCreateResponse.details:type_name -> vtctldata.MoveTablesCreateResponse.TabletInfo + 275, // 108: vtctldata.PingTabletRequest.tablet_alias:type_name -> topodata.TabletAlias + 275, // 109: vtctldata.PlannedReparentShardRequest.new_primary:type_name -> topodata.TabletAlias + 275, // 110: vtctldata.PlannedReparentShardRequest.avoid_primary:type_name -> topodata.TabletAlias + 276, // 111: vtctldata.PlannedReparentShardRequest.wait_replicas_timeout:type_name -> vttime.Duration + 275, // 112: vtctldata.PlannedReparentShardResponse.promoted_primary:type_name -> topodata.TabletAlias + 271, // 113: vtctldata.PlannedReparentShardResponse.events:type_name -> logutil.Event + 275, // 114: vtctldata.RefreshStateRequest.tablet_alias:type_name -> topodata.TabletAlias + 275, // 115: vtctldata.ReloadSchemaRequest.tablet_alias:type_name -> topodata.TabletAlias + 271, // 116: vtctldata.ReloadSchemaKeyspaceResponse.events:type_name -> logutil.Event + 271, // 117: vtctldata.ReloadSchemaShardResponse.events:type_name -> logutil.Event + 275, // 118: vtctldata.ReparentTabletRequest.tablet:type_name -> topodata.TabletAlias + 275, // 119: vtctldata.ReparentTabletResponse.primary:type_name -> topodata.TabletAlias + 283, // 120: vtctldata.ReshardCreateRequest.tablet_types:type_name -> topodata.TabletType + 272, // 121: vtctldata.ReshardCreateRequest.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference + 275, // 122: vtctldata.RestoreFromBackupRequest.tablet_alias:type_name -> topodata.TabletAlias + 274, // 123: vtctldata.RestoreFromBackupRequest.backup_time:type_name -> vttime.Time + 274, // 124: vtctldata.RestoreFromBackupRequest.restore_to_timestamp:type_name -> vttime.Time + 275, // 125: vtctldata.RestoreFromBackupResponse.tablet_alias:type_name -> topodata.TabletAlias + 271, // 126: vtctldata.RestoreFromBackupResponse.event:type_name -> logutil.Event + 255, // 127: vtctldata.RetrySchemaMigrationResponse.rows_affected_by_shard:type_name -> vtctldata.RetrySchemaMigrationResponse.RowsAffectedByShardEntry + 275, // 128: vtctldata.RunHealthCheckRequest.tablet_alias:type_name -> topodata.TabletAlias + 273, // 129: vtctldata.SetKeyspaceDurabilityPolicyResponse.keyspace:type_name -> topodata.Keyspace + 283, // 130: vtctldata.SetKeyspaceServedFromRequest.tablet_type:type_name -> topodata.TabletType + 273, // 131: vtctldata.SetKeyspaceServedFromResponse.keyspace:type_name -> topodata.Keyspace + 273, // 132: vtctldata.SetKeyspaceShardingInfoResponse.keyspace:type_name -> topodata.Keyspace + 277, // 133: vtctldata.SetShardIsPrimaryServingResponse.shard:type_name -> topodata.Shard + 283, // 134: vtctldata.SetShardTabletControlRequest.tablet_type:type_name -> topodata.TabletType + 277, // 135: vtctldata.SetShardTabletControlResponse.shard:type_name -> topodata.Shard + 275, // 136: vtctldata.SetWritableRequest.tablet_alias:type_name -> topodata.TabletAlias + 275, // 137: vtctldata.ShardReplicationAddRequest.tablet_alias:type_name -> topodata.TabletAlias + 296, // 138: vtctldata.ShardReplicationFixResponse.error:type_name -> topodata.ShardReplicationError + 256, // 139: vtctldata.ShardReplicationPositionsResponse.replication_statuses:type_name -> vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry + 257, // 140: vtctldata.ShardReplicationPositionsResponse.tablet_map:type_name -> vtctldata.ShardReplicationPositionsResponse.TabletMapEntry + 275, // 141: vtctldata.ShardReplicationRemoveRequest.tablet_alias:type_name -> topodata.TabletAlias + 275, // 142: vtctldata.SleepTabletRequest.tablet_alias:type_name -> topodata.TabletAlias + 276, // 143: vtctldata.SleepTabletRequest.duration:type_name -> vttime.Duration + 297, // 144: vtctldata.SourceShardAddRequest.key_range:type_name -> topodata.KeyRange + 277, // 145: vtctldata.SourceShardAddResponse.shard:type_name -> topodata.Shard + 277, // 146: vtctldata.SourceShardDeleteResponse.shard:type_name -> topodata.Shard + 275, // 147: vtctldata.StartReplicationRequest.tablet_alias:type_name -> topodata.TabletAlias + 275, // 148: vtctldata.StopReplicationRequest.tablet_alias:type_name -> topodata.TabletAlias + 275, // 149: vtctldata.TabletExternallyReparentedRequest.tablet:type_name -> topodata.TabletAlias + 275, // 150: vtctldata.TabletExternallyReparentedResponse.new_primary:type_name -> topodata.TabletAlias + 275, // 151: vtctldata.TabletExternallyReparentedResponse.old_primary:type_name -> topodata.TabletAlias + 278, // 152: vtctldata.UpdateCellInfoRequest.cell_info:type_name -> topodata.CellInfo + 278, // 153: vtctldata.UpdateCellInfoResponse.cell_info:type_name -> topodata.CellInfo + 298, // 154: vtctldata.UpdateCellsAliasRequest.cells_alias:type_name -> topodata.CellsAlias + 298, // 155: vtctldata.UpdateCellsAliasResponse.cells_alias:type_name -> topodata.CellsAlias + 258, // 156: vtctldata.ValidateResponse.results_by_keyspace:type_name -> vtctldata.ValidateResponse.ResultsByKeyspaceEntry + 259, // 157: vtctldata.ValidateKeyspaceResponse.results_by_shard:type_name -> vtctldata.ValidateKeyspaceResponse.ResultsByShardEntry + 260, // 158: vtctldata.ValidateSchemaKeyspaceResponse.results_by_shard:type_name -> vtctldata.ValidateSchemaKeyspaceResponse.ResultsByShardEntry + 261, // 159: vtctldata.ValidateVersionKeyspaceResponse.results_by_shard:type_name -> vtctldata.ValidateVersionKeyspaceResponse.ResultsByShardEntry + 262, // 160: vtctldata.ValidateVSchemaResponse.results_by_shard:type_name -> vtctldata.ValidateVSchemaResponse.ResultsByShardEntry + 283, // 161: vtctldata.VDiffCreateRequest.tablet_types:type_name -> topodata.TabletType + 272, // 162: vtctldata.VDiffCreateRequest.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference + 276, // 163: vtctldata.VDiffCreateRequest.filtered_replication_wait_time:type_name -> vttime.Duration + 276, // 164: vtctldata.VDiffCreateRequest.wait_update_interval:type_name -> vttime.Duration + 263, // 165: vtctldata.VDiffShowResponse.tablet_responses:type_name -> vtctldata.VDiffShowResponse.TabletResponsesEntry + 264, // 166: vtctldata.WorkflowDeleteResponse.details:type_name -> vtctldata.WorkflowDeleteResponse.TabletInfo + 268, // 167: vtctldata.WorkflowStatusResponse.table_copy_state:type_name -> vtctldata.WorkflowStatusResponse.TableCopyStateEntry + 269, // 168: vtctldata.WorkflowStatusResponse.shard_streams:type_name -> vtctldata.WorkflowStatusResponse.ShardStreamsEntry + 283, // 169: vtctldata.WorkflowSwitchTrafficRequest.tablet_types:type_name -> topodata.TabletType + 276, // 170: vtctldata.WorkflowSwitchTrafficRequest.max_replication_lag_allowed:type_name -> vttime.Duration + 276, // 171: vtctldata.WorkflowSwitchTrafficRequest.timeout:type_name -> vttime.Duration + 299, // 172: vtctldata.WorkflowUpdateRequest.tablet_request:type_name -> tabletmanagerdata.UpdateVReplicationWorkflowRequest + 270, // 173: vtctldata.WorkflowUpdateResponse.details:type_name -> vtctldata.WorkflowUpdateResponse.TabletInfo + 238, // 174: vtctldata.Workflow.ShardStreamsEntry.value:type_name -> vtctldata.Workflow.ShardStream + 239, // 175: vtctldata.Workflow.ShardStream.streams:type_name -> vtctldata.Workflow.Stream + 300, // 176: vtctldata.Workflow.ShardStream.tablet_controls:type_name -> topodata.Shard.TabletControl + 275, // 177: vtctldata.Workflow.Stream.tablet:type_name -> topodata.TabletAlias + 301, // 178: vtctldata.Workflow.Stream.binlog_source:type_name -> binlogdata.BinlogSource + 274, // 179: vtctldata.Workflow.Stream.transaction_timestamp:type_name -> vttime.Time + 274, // 180: vtctldata.Workflow.Stream.time_updated:type_name -> vttime.Time + 240, // 181: vtctldata.Workflow.Stream.copy_states:type_name -> vtctldata.Workflow.Stream.CopyState + 241, // 182: vtctldata.Workflow.Stream.logs:type_name -> vtctldata.Workflow.Stream.Log + 242, // 183: vtctldata.Workflow.Stream.throttler_status:type_name -> vtctldata.Workflow.Stream.ThrottlerStatus + 274, // 184: vtctldata.Workflow.Stream.Log.created_at:type_name -> vttime.Time + 274, // 185: vtctldata.Workflow.Stream.Log.updated_at:type_name -> vttime.Time + 274, // 186: vtctldata.Workflow.Stream.ThrottlerStatus.time_throttled:type_name -> vttime.Time + 10, // 187: vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry.value:type_name -> vtctldata.Shard + 298, // 188: vtctldata.GetCellsAliasesResponse.AliasesEntry.value:type_name -> topodata.CellsAlias + 250, // 189: vtctldata.GetSrvKeyspaceNamesResponse.NamesEntry.value:type_name -> vtctldata.GetSrvKeyspaceNamesResponse.NameList + 302, // 190: vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry.value:type_name -> topodata.SrvKeyspace + 295, // 191: vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry.value:type_name -> vschema.SrvVSchema + 275, // 192: vtctldata.MoveTablesCreateResponse.TabletInfo.tablet:type_name -> topodata.TabletAlias + 303, // 193: vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry.value:type_name -> replicationdata.Status + 284, // 194: vtctldata.ShardReplicationPositionsResponse.TabletMapEntry.value:type_name -> topodata.Tablet + 207, // 195: vtctldata.ValidateResponse.ResultsByKeyspaceEntry.value:type_name -> vtctldata.ValidateKeyspaceResponse + 211, // 196: vtctldata.ValidateKeyspaceResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse + 211, // 197: vtctldata.ValidateSchemaKeyspaceResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse + 211, // 198: vtctldata.ValidateVersionKeyspaceResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse + 211, // 199: vtctldata.ValidateVSchemaResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse + 304, // 200: vtctldata.VDiffShowResponse.TabletResponsesEntry.value:type_name -> tabletmanagerdata.VDiffResponse + 275, // 201: vtctldata.WorkflowDeleteResponse.TabletInfo.tablet:type_name -> topodata.TabletAlias + 275, // 202: vtctldata.WorkflowStatusResponse.ShardStreamState.tablet:type_name -> topodata.TabletAlias + 266, // 203: vtctldata.WorkflowStatusResponse.ShardStreams.streams:type_name -> vtctldata.WorkflowStatusResponse.ShardStreamState + 265, // 204: vtctldata.WorkflowStatusResponse.TableCopyStateEntry.value:type_name -> vtctldata.WorkflowStatusResponse.TableCopyState + 267, // 205: vtctldata.WorkflowStatusResponse.ShardStreamsEntry.value:type_name -> vtctldata.WorkflowStatusResponse.ShardStreams + 275, // 206: vtctldata.WorkflowUpdateResponse.TabletInfo.tablet:type_name -> topodata.TabletAlias + 207, // [207:207] is the sub-list for method output_type + 207, // [207:207] is the sub-list for method input_type + 207, // [207:207] is the sub-list for extension type_name + 207, // [207:207] is the sub-list for extension extendee + 0, // [0:207] is the sub-list for field type_name } func init() { file_vtctldata_proto_init() } @@ -12727,8 +18204,572 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExecuteVtctlCommandResponse); i { + file_vtctldata_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExecuteVtctlCommandResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TableMaterializeSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MaterializeSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Keyspace); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SchemaMigration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Shard); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Workflow); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AddCellInfoRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AddCellInfoResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AddCellsAliasRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AddCellsAliasResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyRoutingRulesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyRoutingRulesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyShardRoutingRulesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyShardRoutingRulesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplySchemaRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplySchemaResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyVSchemaRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyVSchemaResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BackupRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BackupResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BackupShardRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CancelSchemaMigrationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CancelSchemaMigrationResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ChangeTabletTypeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ChangeTabletTypeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CleanupSchemaMigrationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CleanupSchemaMigrationResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CompleteSchemaMigrationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CompleteSchemaMigrationResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateKeyspaceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateKeyspaceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateShardRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateShardResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteCellInfoRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteCellInfoResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteCellsAliasRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteCellsAliasResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteKeyspaceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteKeyspaceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteShardsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteShardsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteSrvVSchemaRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteSrvVSchemaResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteTabletsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteTabletsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EmergencyReparentShardRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EmergencyReparentShardResponse); i { case 0: return &v.state case 1: @@ -12739,8 +18780,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TableMaterializeSettings); i { + file_vtctldata_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExecuteFetchAsAppRequest); i { case 0: return &v.state case 1: @@ -12751,8 +18792,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MaterializeSettings); i { + file_vtctldata_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExecuteFetchAsAppResponse); i { case 0: return &v.state case 1: @@ -12763,8 +18804,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Keyspace); i { + file_vtctldata_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExecuteFetchAsDBARequest); i { case 0: return &v.state case 1: @@ -12775,8 +18816,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Shard); i { + file_vtctldata_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExecuteFetchAsDBAResponse); i { case 0: return &v.state case 1: @@ -12787,8 +18828,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Workflow); i { + file_vtctldata_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExecuteHookRequest); i { case 0: return &v.state case 1: @@ -12799,8 +18840,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddCellInfoRequest); i { + file_vtctldata_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExecuteHookResponse); i { case 0: return &v.state case 1: @@ -12811,8 +18852,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddCellInfoResponse); i { + file_vtctldata_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FindAllShardsInKeyspaceRequest); i { case 0: return &v.state case 1: @@ -12823,8 +18864,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddCellsAliasRequest); i { + file_vtctldata_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FindAllShardsInKeyspaceResponse); i { case 0: return &v.state case 1: @@ -12835,8 +18876,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddCellsAliasResponse); i { + file_vtctldata_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetBackupsRequest); i { case 0: return &v.state case 1: @@ -12847,8 +18888,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplyRoutingRulesRequest); i { + file_vtctldata_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetBackupsResponse); i { case 0: return &v.state case 1: @@ -12859,8 +18900,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplyRoutingRulesResponse); i { + file_vtctldata_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetCellInfoRequest); i { case 0: return &v.state case 1: @@ -12871,8 +18912,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplyShardRoutingRulesRequest); i { + file_vtctldata_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetCellInfoResponse); i { case 0: return &v.state case 1: @@ -12883,8 +18924,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplyShardRoutingRulesResponse); i { + file_vtctldata_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetCellInfoNamesRequest); i { case 0: return &v.state case 1: @@ -12895,8 +18936,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplySchemaRequest); i { + file_vtctldata_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetCellInfoNamesResponse); i { case 0: return &v.state case 1: @@ -12907,8 +18948,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplySchemaResponse); i { + file_vtctldata_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetCellsAliasesRequest); i { case 0: return &v.state case 1: @@ -12919,8 +18960,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplyVSchemaRequest); i { + file_vtctldata_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetCellsAliasesResponse); i { case 0: return &v.state case 1: @@ -12931,8 +18972,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplyVSchemaResponse); i { + file_vtctldata_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetFullStatusRequest); i { case 0: return &v.state case 1: @@ -12943,8 +18984,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BackupRequest); i { + file_vtctldata_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetFullStatusResponse); i { case 0: return &v.state case 1: @@ -12955,8 +18996,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BackupResponse); i { + file_vtctldata_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetKeyspacesRequest); i { case 0: return &v.state case 1: @@ -12967,8 +19008,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BackupShardRequest); i { + file_vtctldata_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetKeyspacesResponse); i { case 0: return &v.state case 1: @@ -12979,8 +19020,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ChangeTabletTypeRequest); i { + file_vtctldata_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetKeyspaceRequest); i { case 0: return &v.state case 1: @@ -12991,8 +19032,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ChangeTabletTypeResponse); i { + file_vtctldata_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetKeyspaceResponse); i { case 0: return &v.state case 1: @@ -13003,8 +19044,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateKeyspaceRequest); i { + file_vtctldata_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetPermissionsRequest); i { case 0: return &v.state case 1: @@ -13015,8 +19056,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateKeyspaceResponse); i { + file_vtctldata_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetPermissionsResponse); i { case 0: return &v.state case 1: @@ -13027,8 +19068,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateShardRequest); i { + file_vtctldata_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetRoutingRulesRequest); i { case 0: return &v.state case 1: @@ -13039,8 +19080,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateShardResponse); i { + file_vtctldata_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetRoutingRulesResponse); i { case 0: return &v.state case 1: @@ -13051,8 +19092,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteCellInfoRequest); i { + file_vtctldata_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSchemaRequest); i { case 0: return &v.state case 1: @@ -13063,8 +19104,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteCellInfoResponse); i { + file_vtctldata_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSchemaResponse); i { case 0: return &v.state case 1: @@ -13075,8 +19116,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteCellsAliasRequest); i { + file_vtctldata_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSchemaMigrationsRequest); i { case 0: return &v.state case 1: @@ -13087,8 +19128,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteCellsAliasResponse); i { + file_vtctldata_proto_msgTypes[78].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSchemaMigrationsResponse); i { case 0: return &v.state case 1: @@ -13099,8 +19140,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteKeyspaceRequest); i { + file_vtctldata_proto_msgTypes[79].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetShardRequest); i { case 0: return &v.state case 1: @@ -13111,8 +19152,128 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteKeyspaceResponse); i { + file_vtctldata_proto_msgTypes[80].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetShardResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[81].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetShardRoutingRulesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[82].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetShardRoutingRulesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[83].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSrvKeyspaceNamesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[84].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSrvKeyspaceNamesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[85].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSrvKeyspacesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[86].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSrvKeyspacesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[87].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateThrottlerConfigRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[88].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateThrottlerConfigResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[89].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSrvVSchemaRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[90].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSrvVSchemaResponse); i { case 0: return &v.state case 1: @@ -13123,8 +19284,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteShardsRequest); i { + file_vtctldata_proto_msgTypes[91].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSrvVSchemasRequest); i { case 0: return &v.state case 1: @@ -13135,8 +19296,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteShardsResponse); i { + file_vtctldata_proto_msgTypes[92].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSrvVSchemasResponse); i { case 0: return &v.state case 1: @@ -13147,8 +19308,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteSrvVSchemaRequest); i { + file_vtctldata_proto_msgTypes[93].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTabletRequest); i { case 0: return &v.state case 1: @@ -13159,8 +19320,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteSrvVSchemaResponse); i { + file_vtctldata_proto_msgTypes[94].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTabletResponse); i { case 0: return &v.state case 1: @@ -13171,8 +19332,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteTabletsRequest); i { + file_vtctldata_proto_msgTypes[95].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTabletsRequest); i { case 0: return &v.state case 1: @@ -13183,8 +19344,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteTabletsResponse); i { + file_vtctldata_proto_msgTypes[96].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTabletsResponse); i { case 0: return &v.state case 1: @@ -13195,8 +19356,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EmergencyReparentShardRequest); i { + file_vtctldata_proto_msgTypes[97].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTopologyPathRequest); i { case 0: return &v.state case 1: @@ -13207,8 +19368,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EmergencyReparentShardResponse); i { + file_vtctldata_proto_msgTypes[98].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTopologyPathResponse); i { case 0: return &v.state case 1: @@ -13219,8 +19380,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExecuteFetchAsAppRequest); i { + file_vtctldata_proto_msgTypes[99].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TopologyCell); i { case 0: return &v.state case 1: @@ -13231,8 +19392,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExecuteFetchAsAppResponse); i { + file_vtctldata_proto_msgTypes[100].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetVSchemaRequest); i { case 0: return &v.state case 1: @@ -13243,8 +19404,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExecuteFetchAsDBARequest); i { + file_vtctldata_proto_msgTypes[101].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetVersionRequest); i { case 0: return &v.state case 1: @@ -13255,8 +19416,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExecuteFetchAsDBAResponse); i { + file_vtctldata_proto_msgTypes[102].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetVersionResponse); i { case 0: return &v.state case 1: @@ -13267,8 +19428,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExecuteHookRequest); i { + file_vtctldata_proto_msgTypes[103].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetVSchemaResponse); i { case 0: return &v.state case 1: @@ -13279,8 +19440,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExecuteHookResponse); i { + file_vtctldata_proto_msgTypes[104].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetWorkflowsRequest); i { case 0: return &v.state case 1: @@ -13291,8 +19452,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FindAllShardsInKeyspaceRequest); i { + file_vtctldata_proto_msgTypes[105].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetWorkflowsResponse); i { case 0: return &v.state case 1: @@ -13303,8 +19464,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FindAllShardsInKeyspaceResponse); i { + file_vtctldata_proto_msgTypes[106].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InitShardPrimaryRequest); i { case 0: return &v.state case 1: @@ -13315,8 +19476,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetBackupsRequest); i { + file_vtctldata_proto_msgTypes[107].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InitShardPrimaryResponse); i { case 0: return &v.state case 1: @@ -13327,8 +19488,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetBackupsResponse); i { + file_vtctldata_proto_msgTypes[108].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LaunchSchemaMigrationRequest); i { case 0: return &v.state case 1: @@ -13339,8 +19500,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetCellInfoRequest); i { + file_vtctldata_proto_msgTypes[109].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LaunchSchemaMigrationResponse); i { case 0: return &v.state case 1: @@ -13351,8 +19512,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetCellInfoResponse); i { + file_vtctldata_proto_msgTypes[110].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupVindexCreateRequest); i { case 0: return &v.state case 1: @@ -13363,8 +19524,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetCellInfoNamesRequest); i { + file_vtctldata_proto_msgTypes[111].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupVindexCreateResponse); i { case 0: return &v.state case 1: @@ -13375,8 +19536,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetCellInfoNamesResponse); i { + file_vtctldata_proto_msgTypes[112].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupVindexExternalizeRequest); i { case 0: return &v.state case 1: @@ -13387,8 +19548,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetCellsAliasesRequest); i { + file_vtctldata_proto_msgTypes[113].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LookupVindexExternalizeResponse); i { case 0: return &v.state case 1: @@ -13399,8 +19560,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetCellsAliasesResponse); i { + file_vtctldata_proto_msgTypes[114].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MaterializeCreateRequest); i { case 0: return &v.state case 1: @@ -13411,8 +19572,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetFullStatusRequest); i { + file_vtctldata_proto_msgTypes[115].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MaterializeCreateResponse); i { case 0: return &v.state case 1: @@ -13423,8 +19584,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetFullStatusResponse); i { + file_vtctldata_proto_msgTypes[116].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MigrateCreateRequest); i { case 0: return &v.state case 1: @@ -13435,8 +19596,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetKeyspacesRequest); i { + file_vtctldata_proto_msgTypes[117].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MigrateCompleteRequest); i { case 0: return &v.state case 1: @@ -13447,8 +19608,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetKeyspacesResponse); i { + file_vtctldata_proto_msgTypes[118].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MigrateCompleteResponse); i { case 0: return &v.state case 1: @@ -13459,8 +19620,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetKeyspaceRequest); i { + file_vtctldata_proto_msgTypes[119].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MountRegisterRequest); i { case 0: return &v.state case 1: @@ -13471,8 +19632,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetKeyspaceResponse); i { + file_vtctldata_proto_msgTypes[120].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MountRegisterResponse); i { case 0: return &v.state case 1: @@ -13483,8 +19644,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetPermissionsRequest); i { + file_vtctldata_proto_msgTypes[121].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MountUnregisterRequest); i { case 0: return &v.state case 1: @@ -13495,8 +19656,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetPermissionsResponse); i { + file_vtctldata_proto_msgTypes[122].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MountUnregisterResponse); i { case 0: return &v.state case 1: @@ -13507,8 +19668,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetRoutingRulesRequest); i { + file_vtctldata_proto_msgTypes[123].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MountShowRequest); i { case 0: return &v.state case 1: @@ -13519,8 +19680,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetRoutingRulesResponse); i { + file_vtctldata_proto_msgTypes[124].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MountShowResponse); i { case 0: return &v.state case 1: @@ -13531,8 +19692,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSchemaRequest); i { + file_vtctldata_proto_msgTypes[125].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MountListRequest); i { case 0: return &v.state case 1: @@ -13543,8 +19704,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSchemaResponse); i { + file_vtctldata_proto_msgTypes[126].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MountListResponse); i { case 0: return &v.state case 1: @@ -13555,8 +19716,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetShardRequest); i { + file_vtctldata_proto_msgTypes[127].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MoveTablesCreateRequest); i { case 0: return &v.state case 1: @@ -13567,8 +19728,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetShardResponse); i { + file_vtctldata_proto_msgTypes[128].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MoveTablesCreateResponse); i { case 0: return &v.state case 1: @@ -13579,8 +19740,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetShardRoutingRulesRequest); i { + file_vtctldata_proto_msgTypes[129].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MoveTablesCompleteRequest); i { case 0: return &v.state case 1: @@ -13591,8 +19752,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetShardRoutingRulesResponse); i { + file_vtctldata_proto_msgTypes[130].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MoveTablesCompleteResponse); i { case 0: return &v.state case 1: @@ -13603,8 +19764,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSrvKeyspaceNamesRequest); i { + file_vtctldata_proto_msgTypes[131].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PingTabletRequest); i { case 0: return &v.state case 1: @@ -13615,8 +19776,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSrvKeyspaceNamesResponse); i { + file_vtctldata_proto_msgTypes[132].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PingTabletResponse); i { case 0: return &v.state case 1: @@ -13627,8 +19788,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSrvKeyspacesRequest); i { + file_vtctldata_proto_msgTypes[133].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PlannedReparentShardRequest); i { case 0: return &v.state case 1: @@ -13639,8 +19800,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSrvKeyspacesResponse); i { + file_vtctldata_proto_msgTypes[134].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PlannedReparentShardResponse); i { case 0: return &v.state case 1: @@ -13651,8 +19812,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[78].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateThrottlerConfigRequest); i { + file_vtctldata_proto_msgTypes[135].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RebuildKeyspaceGraphRequest); i { case 0: return &v.state case 1: @@ -13663,8 +19824,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[79].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateThrottlerConfigResponse); i { + file_vtctldata_proto_msgTypes[136].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RebuildKeyspaceGraphResponse); i { case 0: return &v.state case 1: @@ -13675,8 +19836,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[80].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSrvVSchemaRequest); i { + file_vtctldata_proto_msgTypes[137].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RebuildVSchemaGraphRequest); i { case 0: return &v.state case 1: @@ -13687,8 +19848,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[81].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSrvVSchemaResponse); i { + file_vtctldata_proto_msgTypes[138].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RebuildVSchemaGraphResponse); i { case 0: return &v.state case 1: @@ -13699,8 +19860,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[82].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSrvVSchemasRequest); i { + file_vtctldata_proto_msgTypes[139].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RefreshStateRequest); i { case 0: return &v.state case 1: @@ -13711,8 +19872,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[83].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSrvVSchemasResponse); i { + file_vtctldata_proto_msgTypes[140].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RefreshStateResponse); i { case 0: return &v.state case 1: @@ -13723,8 +19884,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[84].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTabletRequest); i { + file_vtctldata_proto_msgTypes[141].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RefreshStateByShardRequest); i { case 0: return &v.state case 1: @@ -13735,8 +19896,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[85].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTabletResponse); i { + file_vtctldata_proto_msgTypes[142].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RefreshStateByShardResponse); i { case 0: return &v.state case 1: @@ -13747,8 +19908,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[86].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTabletsRequest); i { + file_vtctldata_proto_msgTypes[143].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReloadSchemaRequest); i { case 0: return &v.state case 1: @@ -13759,8 +19920,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[87].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTabletsResponse); i { + file_vtctldata_proto_msgTypes[144].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReloadSchemaResponse); i { case 0: return &v.state case 1: @@ -13771,8 +19932,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[88].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTopologyPathRequest); i { + file_vtctldata_proto_msgTypes[145].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReloadSchemaKeyspaceRequest); i { case 0: return &v.state case 1: @@ -13783,8 +19944,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[89].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTopologyPathResponse); i { + file_vtctldata_proto_msgTypes[146].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReloadSchemaKeyspaceResponse); i { case 0: return &v.state case 1: @@ -13795,8 +19956,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[90].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TopologyCell); i { + file_vtctldata_proto_msgTypes[147].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReloadSchemaShardRequest); i { case 0: return &v.state case 1: @@ -13807,8 +19968,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[91].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetVSchemaRequest); i { + file_vtctldata_proto_msgTypes[148].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReloadSchemaShardResponse); i { case 0: return &v.state case 1: @@ -13819,8 +19980,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[92].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetVersionRequest); i { + file_vtctldata_proto_msgTypes[149].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoveBackupRequest); i { case 0: return &v.state case 1: @@ -13831,8 +19992,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[93].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetVersionResponse); i { + file_vtctldata_proto_msgTypes[150].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoveBackupResponse); i { case 0: return &v.state case 1: @@ -13843,8 +20004,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[94].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetVSchemaResponse); i { + file_vtctldata_proto_msgTypes[151].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoveKeyspaceCellRequest); i { case 0: return &v.state case 1: @@ -13855,8 +20016,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[95].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetWorkflowsRequest); i { + file_vtctldata_proto_msgTypes[152].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoveKeyspaceCellResponse); i { case 0: return &v.state case 1: @@ -13867,8 +20028,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[96].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetWorkflowsResponse); i { + file_vtctldata_proto_msgTypes[153].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoveShardCellRequest); i { case 0: return &v.state case 1: @@ -13879,8 +20040,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[97].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*InitShardPrimaryRequest); i { + file_vtctldata_proto_msgTypes[154].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoveShardCellResponse); i { case 0: return &v.state case 1: @@ -13891,8 +20052,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[98].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*InitShardPrimaryResponse); i { + file_vtctldata_proto_msgTypes[155].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReparentTabletRequest); i { case 0: return &v.state case 1: @@ -13903,8 +20064,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[99].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PingTabletRequest); i { + file_vtctldata_proto_msgTypes[156].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReparentTabletResponse); i { case 0: return &v.state case 1: @@ -13915,8 +20076,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[100].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PingTabletResponse); i { + file_vtctldata_proto_msgTypes[157].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReshardCreateRequest); i { case 0: return &v.state case 1: @@ -13927,8 +20088,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[101].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PlannedReparentShardRequest); i { + file_vtctldata_proto_msgTypes[158].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RestoreFromBackupRequest); i { case 0: return &v.state case 1: @@ -13939,8 +20100,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[102].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PlannedReparentShardResponse); i { + file_vtctldata_proto_msgTypes[159].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RestoreFromBackupResponse); i { case 0: return &v.state case 1: @@ -13950,9 +20111,9 @@ func file_vtctldata_proto_init() { default: return nil } - } - file_vtctldata_proto_msgTypes[103].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RebuildKeyspaceGraphRequest); i { + } + file_vtctldata_proto_msgTypes[160].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RetrySchemaMigrationRequest); i { case 0: return &v.state case 1: @@ -13963,8 +20124,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[104].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RebuildKeyspaceGraphResponse); i { + file_vtctldata_proto_msgTypes[161].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RetrySchemaMigrationResponse); i { case 0: return &v.state case 1: @@ -13975,8 +20136,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[105].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RebuildVSchemaGraphRequest); i { + file_vtctldata_proto_msgTypes[162].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RunHealthCheckRequest); i { case 0: return &v.state case 1: @@ -13987,8 +20148,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[106].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RebuildVSchemaGraphResponse); i { + file_vtctldata_proto_msgTypes[163].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RunHealthCheckResponse); i { case 0: return &v.state case 1: @@ -13999,8 +20160,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[107].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RefreshStateRequest); i { + file_vtctldata_proto_msgTypes[164].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetKeyspaceDurabilityPolicyRequest); i { case 0: return &v.state case 1: @@ -14011,8 +20172,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[108].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RefreshStateResponse); i { + file_vtctldata_proto_msgTypes[165].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetKeyspaceDurabilityPolicyResponse); i { case 0: return &v.state case 1: @@ -14023,8 +20184,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[109].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RefreshStateByShardRequest); i { + file_vtctldata_proto_msgTypes[166].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetKeyspaceServedFromRequest); i { case 0: return &v.state case 1: @@ -14035,8 +20196,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[110].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RefreshStateByShardResponse); i { + file_vtctldata_proto_msgTypes[167].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetKeyspaceServedFromResponse); i { case 0: return &v.state case 1: @@ -14047,8 +20208,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[111].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReloadSchemaRequest); i { + file_vtctldata_proto_msgTypes[168].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetKeyspaceShardingInfoRequest); i { case 0: return &v.state case 1: @@ -14059,8 +20220,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[112].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReloadSchemaResponse); i { + file_vtctldata_proto_msgTypes[169].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetKeyspaceShardingInfoResponse); i { case 0: return &v.state case 1: @@ -14071,8 +20232,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[113].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReloadSchemaKeyspaceRequest); i { + file_vtctldata_proto_msgTypes[170].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetShardIsPrimaryServingRequest); i { case 0: return &v.state case 1: @@ -14083,8 +20244,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[114].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReloadSchemaKeyspaceResponse); i { + file_vtctldata_proto_msgTypes[171].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetShardIsPrimaryServingResponse); i { case 0: return &v.state case 1: @@ -14095,8 +20256,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[115].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReloadSchemaShardRequest); i { + file_vtctldata_proto_msgTypes[172].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetShardTabletControlRequest); i { case 0: return &v.state case 1: @@ -14107,8 +20268,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[116].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReloadSchemaShardResponse); i { + file_vtctldata_proto_msgTypes[173].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetShardTabletControlResponse); i { case 0: return &v.state case 1: @@ -14119,8 +20280,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[117].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveBackupRequest); i { + file_vtctldata_proto_msgTypes[174].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetWritableRequest); i { case 0: return &v.state case 1: @@ -14131,8 +20292,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[118].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveBackupResponse); i { + file_vtctldata_proto_msgTypes[175].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetWritableResponse); i { case 0: return &v.state case 1: @@ -14143,8 +20304,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[119].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveKeyspaceCellRequest); i { + file_vtctldata_proto_msgTypes[176].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShardReplicationAddRequest); i { case 0: return &v.state case 1: @@ -14155,8 +20316,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[120].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveKeyspaceCellResponse); i { + file_vtctldata_proto_msgTypes[177].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShardReplicationAddResponse); i { case 0: return &v.state case 1: @@ -14167,8 +20328,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[121].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveShardCellRequest); i { + file_vtctldata_proto_msgTypes[178].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShardReplicationFixRequest); i { case 0: return &v.state case 1: @@ -14179,8 +20340,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[122].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveShardCellResponse); i { + file_vtctldata_proto_msgTypes[179].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShardReplicationFixResponse); i { case 0: return &v.state case 1: @@ -14191,8 +20352,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[123].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReparentTabletRequest); i { + file_vtctldata_proto_msgTypes[180].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShardReplicationPositionsRequest); i { case 0: return &v.state case 1: @@ -14203,8 +20364,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[124].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReparentTabletResponse); i { + file_vtctldata_proto_msgTypes[181].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShardReplicationPositionsResponse); i { case 0: return &v.state case 1: @@ -14215,8 +20376,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[125].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RestoreFromBackupRequest); i { + file_vtctldata_proto_msgTypes[182].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShardReplicationRemoveRequest); i { case 0: return &v.state case 1: @@ -14227,8 +20388,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[126].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RestoreFromBackupResponse); i { + file_vtctldata_proto_msgTypes[183].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShardReplicationRemoveResponse); i { case 0: return &v.state case 1: @@ -14239,8 +20400,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[127].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RunHealthCheckRequest); i { + file_vtctldata_proto_msgTypes[184].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SleepTabletRequest); i { case 0: return &v.state case 1: @@ -14251,8 +20412,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[128].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RunHealthCheckResponse); i { + file_vtctldata_proto_msgTypes[185].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SleepTabletResponse); i { case 0: return &v.state case 1: @@ -14263,8 +20424,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[129].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetKeyspaceDurabilityPolicyRequest); i { + file_vtctldata_proto_msgTypes[186].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceShardAddRequest); i { case 0: return &v.state case 1: @@ -14275,8 +20436,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[130].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetKeyspaceDurabilityPolicyResponse); i { + file_vtctldata_proto_msgTypes[187].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceShardAddResponse); i { case 0: return &v.state case 1: @@ -14287,8 +20448,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[131].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetKeyspaceServedFromRequest); i { + file_vtctldata_proto_msgTypes[188].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceShardDeleteRequest); i { case 0: return &v.state case 1: @@ -14299,8 +20460,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[132].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetKeyspaceServedFromResponse); i { + file_vtctldata_proto_msgTypes[189].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceShardDeleteResponse); i { case 0: return &v.state case 1: @@ -14311,8 +20472,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[133].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetKeyspaceShardingInfoRequest); i { + file_vtctldata_proto_msgTypes[190].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StartReplicationRequest); i { case 0: return &v.state case 1: @@ -14323,8 +20484,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[134].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetKeyspaceShardingInfoResponse); i { + file_vtctldata_proto_msgTypes[191].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StartReplicationResponse); i { case 0: return &v.state case 1: @@ -14335,8 +20496,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[135].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetShardIsPrimaryServingRequest); i { + file_vtctldata_proto_msgTypes[192].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StopReplicationRequest); i { case 0: return &v.state case 1: @@ -14347,8 +20508,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[136].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetShardIsPrimaryServingResponse); i { + file_vtctldata_proto_msgTypes[193].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StopReplicationResponse); i { case 0: return &v.state case 1: @@ -14359,8 +20520,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[137].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetShardTabletControlRequest); i { + file_vtctldata_proto_msgTypes[194].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TabletExternallyReparentedRequest); i { case 0: return &v.state case 1: @@ -14371,8 +20532,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[138].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetShardTabletControlResponse); i { + file_vtctldata_proto_msgTypes[195].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TabletExternallyReparentedResponse); i { case 0: return &v.state case 1: @@ -14383,8 +20544,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[139].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetWritableRequest); i { + file_vtctldata_proto_msgTypes[196].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateCellInfoRequest); i { case 0: return &v.state case 1: @@ -14395,8 +20556,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[140].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetWritableResponse); i { + file_vtctldata_proto_msgTypes[197].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateCellInfoResponse); i { case 0: return &v.state case 1: @@ -14407,8 +20568,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[141].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShardReplicationAddRequest); i { + file_vtctldata_proto_msgTypes[198].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateCellsAliasRequest); i { case 0: return &v.state case 1: @@ -14419,8 +20580,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[142].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShardReplicationAddResponse); i { + file_vtctldata_proto_msgTypes[199].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateCellsAliasResponse); i { case 0: return &v.state case 1: @@ -14431,8 +20592,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[143].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShardReplicationFixRequest); i { + file_vtctldata_proto_msgTypes[200].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateRequest); i { case 0: return &v.state case 1: @@ -14443,8 +20604,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[144].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShardReplicationFixResponse); i { + file_vtctldata_proto_msgTypes[201].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateResponse); i { case 0: return &v.state case 1: @@ -14455,8 +20616,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[145].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShardReplicationPositionsRequest); i { + file_vtctldata_proto_msgTypes[202].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateKeyspaceRequest); i { case 0: return &v.state case 1: @@ -14467,8 +20628,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[146].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShardReplicationPositionsResponse); i { + file_vtctldata_proto_msgTypes[203].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateKeyspaceResponse); i { case 0: return &v.state case 1: @@ -14479,8 +20640,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[147].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShardReplicationRemoveRequest); i { + file_vtctldata_proto_msgTypes[204].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateSchemaKeyspaceRequest); i { case 0: return &v.state case 1: @@ -14491,8 +20652,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[148].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShardReplicationRemoveResponse); i { + file_vtctldata_proto_msgTypes[205].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateSchemaKeyspaceResponse); i { case 0: return &v.state case 1: @@ -14503,8 +20664,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[149].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SleepTabletRequest); i { + file_vtctldata_proto_msgTypes[206].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateShardRequest); i { case 0: return &v.state case 1: @@ -14515,8 +20676,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[150].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SleepTabletResponse); i { + file_vtctldata_proto_msgTypes[207].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateShardResponse); i { case 0: return &v.state case 1: @@ -14527,8 +20688,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[151].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceShardAddRequest); i { + file_vtctldata_proto_msgTypes[208].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateVersionKeyspaceRequest); i { case 0: return &v.state case 1: @@ -14539,8 +20700,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[152].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceShardAddResponse); i { + file_vtctldata_proto_msgTypes[209].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateVersionKeyspaceResponse); i { case 0: return &v.state case 1: @@ -14551,8 +20712,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[153].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceShardDeleteRequest); i { + file_vtctldata_proto_msgTypes[210].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateVersionShardRequest); i { case 0: return &v.state case 1: @@ -14563,8 +20724,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[154].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceShardDeleteResponse); i { + file_vtctldata_proto_msgTypes[211].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateVersionShardResponse); i { case 0: return &v.state case 1: @@ -14575,8 +20736,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[155].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartReplicationRequest); i { + file_vtctldata_proto_msgTypes[212].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateVSchemaRequest); i { case 0: return &v.state case 1: @@ -14587,8 +20748,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[156].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartReplicationResponse); i { + file_vtctldata_proto_msgTypes[213].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateVSchemaResponse); i { case 0: return &v.state case 1: @@ -14599,8 +20760,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[157].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StopReplicationRequest); i { + file_vtctldata_proto_msgTypes[214].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VDiffCreateRequest); i { case 0: return &v.state case 1: @@ -14611,8 +20772,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[158].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StopReplicationResponse); i { + file_vtctldata_proto_msgTypes[215].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VDiffCreateResponse); i { case 0: return &v.state case 1: @@ -14623,8 +20784,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[159].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TabletExternallyReparentedRequest); i { + file_vtctldata_proto_msgTypes[216].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VDiffDeleteRequest); i { case 0: return &v.state case 1: @@ -14635,8 +20796,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[160].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TabletExternallyReparentedResponse); i { + file_vtctldata_proto_msgTypes[217].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VDiffDeleteResponse); i { case 0: return &v.state case 1: @@ -14647,8 +20808,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[161].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateCellInfoRequest); i { + file_vtctldata_proto_msgTypes[218].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VDiffResumeRequest); i { case 0: return &v.state case 1: @@ -14659,8 +20820,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[162].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateCellInfoResponse); i { + file_vtctldata_proto_msgTypes[219].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VDiffResumeResponse); i { case 0: return &v.state case 1: @@ -14671,8 +20832,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[163].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateCellsAliasRequest); i { + file_vtctldata_proto_msgTypes[220].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VDiffShowRequest); i { case 0: return &v.state case 1: @@ -14683,8 +20844,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[164].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateCellsAliasResponse); i { + file_vtctldata_proto_msgTypes[221].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VDiffShowResponse); i { case 0: return &v.state case 1: @@ -14695,8 +20856,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[165].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateRequest); i { + file_vtctldata_proto_msgTypes[222].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VDiffStopRequest); i { case 0: return &v.state case 1: @@ -14707,8 +20868,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[166].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateResponse); i { + file_vtctldata_proto_msgTypes[223].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VDiffStopResponse); i { case 0: return &v.state case 1: @@ -14719,8 +20880,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[167].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateKeyspaceRequest); i { + file_vtctldata_proto_msgTypes[224].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WorkflowDeleteRequest); i { case 0: return &v.state case 1: @@ -14731,8 +20892,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[168].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateKeyspaceResponse); i { + file_vtctldata_proto_msgTypes[225].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WorkflowDeleteResponse); i { case 0: return &v.state case 1: @@ -14743,8 +20904,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[169].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateSchemaKeyspaceRequest); i { + file_vtctldata_proto_msgTypes[226].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WorkflowStatusRequest); i { case 0: return &v.state case 1: @@ -14755,8 +20916,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[170].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateSchemaKeyspaceResponse); i { + file_vtctldata_proto_msgTypes[227].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WorkflowStatusResponse); i { case 0: return &v.state case 1: @@ -14767,8 +20928,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[171].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateShardRequest); i { + file_vtctldata_proto_msgTypes[228].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WorkflowSwitchTrafficRequest); i { case 0: return &v.state case 1: @@ -14779,8 +20940,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[172].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateShardResponse); i { + file_vtctldata_proto_msgTypes[229].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WorkflowSwitchTrafficResponse); i { case 0: return &v.state case 1: @@ -14791,8 +20952,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[173].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateVersionKeyspaceRequest); i { + file_vtctldata_proto_msgTypes[230].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WorkflowUpdateRequest); i { case 0: return &v.state case 1: @@ -14803,8 +20964,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[174].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateVersionKeyspaceResponse); i { + file_vtctldata_proto_msgTypes[231].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WorkflowUpdateResponse); i { case 0: return &v.state case 1: @@ -14815,8 +20976,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[175].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateVersionShardRequest); i { + file_vtctldata_proto_msgTypes[233].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Workflow_ReplicationLocation); i { case 0: return &v.state case 1: @@ -14827,8 +20988,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[176].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateVersionShardResponse); i { + file_vtctldata_proto_msgTypes[234].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Workflow_ShardStream); i { case 0: return &v.state case 1: @@ -14839,8 +21000,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[177].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateVSchemaRequest); i { + file_vtctldata_proto_msgTypes[235].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Workflow_Stream); i { case 0: return &v.state case 1: @@ -14851,8 +21012,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[178].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateVSchemaResponse); i { + file_vtctldata_proto_msgTypes[236].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Workflow_Stream_CopyState); i { case 0: return &v.state case 1: @@ -14863,8 +21024,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[179].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WorkflowUpdateRequest); i { + file_vtctldata_proto_msgTypes[237].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Workflow_Stream_Log); i { case 0: return &v.state case 1: @@ -14875,8 +21036,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[180].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WorkflowUpdateResponse); i { + file_vtctldata_proto_msgTypes[238].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Workflow_Stream_ThrottlerStatus); i { case 0: return &v.state case 1: @@ -14887,8 +21048,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[182].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Workflow_ReplicationLocation); i { + file_vtctldata_proto_msgTypes[246].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSrvKeyspaceNamesResponse_NameList); i { case 0: return &v.state case 1: @@ -14899,8 +21060,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[183].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Workflow_ShardStream); i { + file_vtctldata_proto_msgTypes[250].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MoveTablesCreateResponse_TabletInfo); i { case 0: return &v.state case 1: @@ -14911,8 +21072,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[184].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Workflow_Stream); i { + file_vtctldata_proto_msgTypes[260].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WorkflowDeleteResponse_TabletInfo); i { case 0: return &v.state case 1: @@ -14923,8 +21084,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[185].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Workflow_Stream_CopyState); i { + file_vtctldata_proto_msgTypes[261].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WorkflowStatusResponse_TableCopyState); i { case 0: return &v.state case 1: @@ -14935,8 +21096,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[186].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Workflow_Stream_Log); i { + file_vtctldata_proto_msgTypes[262].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WorkflowStatusResponse_ShardStreamState); i { case 0: return &v.state case 1: @@ -14947,8 +21108,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[190].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSrvKeyspaceNamesResponse_NameList); i { + file_vtctldata_proto_msgTypes[263].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WorkflowStatusResponse_ShardStreams); i { case 0: return &v.state case 1: @@ -14959,7 +21120,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[200].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[266].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*WorkflowUpdateResponse_TabletInfo); i { case 0: return &v.state @@ -14977,8 +21138,8 @@ func file_vtctldata_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_vtctldata_proto_rawDesc, - NumEnums: 1, - NumMessages: 201, + NumEnums: 4, + NumMessages: 267, NumExtensions: 0, NumServices: 0, }, diff --git a/go/vt/proto/vtctldata/vtctldata_vtproto.pb.go b/go/vt/proto/vtctldata/vtctldata_vtproto.pb.go index 530bdb05a07..30f19a15b88 100644 --- a/go/vt/proto/vtctldata/vtctldata_vtproto.pb.go +++ b/go/vt/proto/vtctldata/vtctldata_vtproto.pb.go @@ -1,5 +1,5 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 +// protoc-gen-go-vtproto version: v0.5.0 // source: vtctldata.proto package vtctldata @@ -7,6 +7,7 @@ package vtctldata import ( binary "encoding/binary" fmt "fmt" + proto "google.golang.org/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl" io "io" math "math" @@ -30,5276 +31,5280 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -func (m *ExecuteVtctlCommandRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteVtctlCommandRequest) CloneVT() *ExecuteVtctlCommandRequest { if m == nil { - return nil, nil + return (*ExecuteVtctlCommandRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ExecuteVtctlCommandRequest{ + ActionTimeout: m.ActionTimeout, } - return dAtA[:n], nil + if rhs := m.Args; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Args = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteVtctlCommandRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ExecuteVtctlCommandRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ExecuteVtctlCommandRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteVtctlCommandResponse) CloneVT() *ExecuteVtctlCommandResponse { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*ExecuteVtctlCommandResponse)(nil) } - if m.ActionTimeout != 0 { - i = encodeVarint(dAtA, i, uint64(m.ActionTimeout)) - i-- - dAtA[i] = 0x10 + r := &ExecuteVtctlCommandResponse{ + Event: m.Event.CloneVT(), } - if len(m.Args) > 0 { - for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Args[iNdEx]) - copy(dAtA[i:], m.Args[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Args[iNdEx]))) - i-- - dAtA[i] = 0xa - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ExecuteVtctlCommandResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteVtctlCommandResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *TableMaterializeSettings) CloneVT() *TableMaterializeSettings { if m == nil { - return nil, nil + return (*TableMaterializeSettings)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &TableMaterializeSettings{ + TargetTable: m.TargetTable, + SourceExpression: m.SourceExpression, + CreateDdl: m.CreateDdl, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteVtctlCommandResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *TableMaterializeSettings) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ExecuteVtctlCommandResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *MaterializeSettings) CloneVT() *MaterializeSettings { if m == nil { - return 0, nil + return (*MaterializeSettings)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &MaterializeSettings{ + Workflow: m.Workflow, + SourceKeyspace: m.SourceKeyspace, + TargetKeyspace: m.TargetKeyspace, + StopAfterCopy: m.StopAfterCopy, + Cell: m.Cell, + TabletTypes: m.TabletTypes, + ExternalCluster: m.ExternalCluster, + MaterializationIntent: m.MaterializationIntent, + SourceTimeZone: m.SourceTimeZone, + TargetTimeZone: m.TargetTimeZone, + OnDdl: m.OnDdl, + DeferSecondaryKeys: m.DeferSecondaryKeys, + TabletSelectionPreference: m.TabletSelectionPreference, + AtomicCopy: m.AtomicCopy, } - if m.Event != nil { - size, err := m.Event.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if rhs := m.TableSettings; rhs != nil { + tmpContainer := make([]*TableMaterializeSettings, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + r.TableSettings = tmpContainer } - return len(dAtA) - i, nil + if rhs := m.SourceShards; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.SourceShards = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *TableMaterializeSettings) MarshalVT() (dAtA []byte, err error) { +func (m *MaterializeSettings) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Keyspace) CloneVT() *Keyspace { if m == nil { - return nil, nil + return (*Keyspace)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &Keyspace{ + Name: m.Name, + Keyspace: m.Keyspace.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *TableMaterializeSettings) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *Keyspace) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *TableMaterializeSettings) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SchemaMigration) CloneVT() *SchemaMigration { if m == nil { - return 0, nil + return (*SchemaMigration)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &SchemaMigration{ + Uuid: m.Uuid, + Keyspace: m.Keyspace, + Shard: m.Shard, + Schema: m.Schema, + Table: m.Table, + MigrationStatement: m.MigrationStatement, + Strategy: m.Strategy, + Options: m.Options, + AddedAt: m.AddedAt.CloneVT(), + RequestedAt: m.RequestedAt.CloneVT(), + ReadyAt: m.ReadyAt.CloneVT(), + StartedAt: m.StartedAt.CloneVT(), + LivenessTimestamp: m.LivenessTimestamp.CloneVT(), + CompletedAt: m.CompletedAt.CloneVT(), + CleanedUpAt: m.CleanedUpAt.CloneVT(), + Status: m.Status, + LogPath: m.LogPath, + Artifacts: m.Artifacts, + Retries: m.Retries, + Tablet: m.Tablet.CloneVT(), + TabletFailure: m.TabletFailure, + Progress: m.Progress, + MigrationContext: m.MigrationContext, + DdlAction: m.DdlAction, + Message: m.Message, + EtaSeconds: m.EtaSeconds, + RowsCopied: m.RowsCopied, + TableRows: m.TableRows, + AddedUniqueKeys: m.AddedUniqueKeys, + RemovedUniqueKeys: m.RemovedUniqueKeys, + LogFile: m.LogFile, + ArtifactRetention: m.ArtifactRetention.CloneVT(), + PostponeCompletion: m.PostponeCompletion, + RemovedUniqueKeyNames: m.RemovedUniqueKeyNames, + DroppedNoDefaultColumnNames: m.DroppedNoDefaultColumnNames, + ExpandedColumnNames: m.ExpandedColumnNames, + RevertibleNotes: m.RevertibleNotes, + AllowConcurrent: m.AllowConcurrent, + RevertedUuid: m.RevertedUuid, + IsView: m.IsView, + ReadyToComplete: m.ReadyToComplete, + VitessLivenessIndicator: m.VitessLivenessIndicator, + UserThrottleRatio: m.UserThrottleRatio, + SpecialPlan: m.SpecialPlan, + LastThrottledAt: m.LastThrottledAt.CloneVT(), + ComponentThrottled: m.ComponentThrottled, + CancelledAt: m.CancelledAt.CloneVT(), + PostponeLaunch: m.PostponeLaunch, + Stage: m.Stage, + CutoverAttempts: m.CutoverAttempts, + IsImmediateOperation: m.IsImmediateOperation, + ReviewedAt: m.ReviewedAt.CloneVT(), + ReadyToCompleteAt: m.ReadyToCompleteAt.CloneVT(), } - if len(m.CreateDdl) > 0 { - i -= len(m.CreateDdl) - copy(dAtA[i:], m.CreateDdl) - i = encodeVarint(dAtA, i, uint64(len(m.CreateDdl))) - i-- - dAtA[i] = 0x1a + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.SourceExpression) > 0 { - i -= len(m.SourceExpression) - copy(dAtA[i:], m.SourceExpression) - i = encodeVarint(dAtA, i, uint64(len(m.SourceExpression))) - i-- - dAtA[i] = 0x12 + return r +} + +func (m *SchemaMigration) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Shard) CloneVT() *Shard { + if m == nil { + return (*Shard)(nil) } - if len(m.TargetTable) > 0 { - i -= len(m.TargetTable) - copy(dAtA[i:], m.TargetTable) - i = encodeVarint(dAtA, i, uint64(len(m.TargetTable))) - i-- - dAtA[i] = 0xa + r := &Shard{ + Keyspace: m.Keyspace, + Name: m.Name, + Shard: m.Shard.CloneVT(), } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *MaterializeSettings) MarshalVT() (dAtA []byte, err error) { +func (m *Shard) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Workflow_ReplicationLocation) CloneVT() *Workflow_ReplicationLocation { if m == nil { - return nil, nil + return (*Workflow_ReplicationLocation)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &Workflow_ReplicationLocation{ + Keyspace: m.Keyspace, } - return dAtA[:n], nil + if rhs := m.Shards; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Shards = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *MaterializeSettings) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *Workflow_ReplicationLocation) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *MaterializeSettings) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *Workflow_ShardStream) CloneVT() *Workflow_ShardStream { if m == nil { - return 0, nil + return (*Workflow_ShardStream)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &Workflow_ShardStream{ + IsPrimaryServing: m.IsPrimaryServing, } - if m.DeferSecondaryKeys { - i-- - if m.DeferSecondaryKeys { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if rhs := m.Streams; rhs != nil { + tmpContainer := make([]*Workflow_Stream, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i-- - dAtA[i] = 0x70 - } - if len(m.OnDdl) > 0 { - i -= len(m.OnDdl) - copy(dAtA[i:], m.OnDdl) - i = encodeVarint(dAtA, i, uint64(len(m.OnDdl))) - i-- - dAtA[i] = 0x6a + r.Streams = tmpContainer } - if len(m.SourceShards) > 0 { - for iNdEx := len(m.SourceShards) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.SourceShards[iNdEx]) - copy(dAtA[i:], m.SourceShards[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.SourceShards[iNdEx]))) - i-- - dAtA[i] = 0x62 + if rhs := m.TabletControls; rhs != nil { + tmpContainer := make([]*topodata.Shard_TabletControl, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } + r.TabletControls = tmpContainer } - if len(m.TargetTimeZone) > 0 { - i -= len(m.TargetTimeZone) - copy(dAtA[i:], m.TargetTimeZone) - i = encodeVarint(dAtA, i, uint64(len(m.TargetTimeZone))) - i-- - dAtA[i] = 0x5a - } - if len(m.SourceTimeZone) > 0 { - i -= len(m.SourceTimeZone) - copy(dAtA[i:], m.SourceTimeZone) - i = encodeVarint(dAtA, i, uint64(len(m.SourceTimeZone))) - i-- - dAtA[i] = 0x52 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.MaterializationIntent != 0 { - i = encodeVarint(dAtA, i, uint64(m.MaterializationIntent)) - i-- - dAtA[i] = 0x48 + return r +} + +func (m *Workflow_ShardStream) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Workflow_Stream_CopyState) CloneVT() *Workflow_Stream_CopyState { + if m == nil { + return (*Workflow_Stream_CopyState)(nil) } - if len(m.ExternalCluster) > 0 { - i -= len(m.ExternalCluster) - copy(dAtA[i:], m.ExternalCluster) - i = encodeVarint(dAtA, i, uint64(len(m.ExternalCluster))) - i-- - dAtA[i] = 0x42 + r := &Workflow_Stream_CopyState{ + Table: m.Table, + LastPk: m.LastPk, } - if len(m.TabletTypes) > 0 { - i -= len(m.TabletTypes) - copy(dAtA[i:], m.TabletTypes) - i = encodeVarint(dAtA, i, uint64(len(m.TabletTypes))) - i-- - dAtA[i] = 0x3a + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.Cell) > 0 { - i -= len(m.Cell) - copy(dAtA[i:], m.Cell) - i = encodeVarint(dAtA, i, uint64(len(m.Cell))) - i-- - dAtA[i] = 0x32 + return r +} + +func (m *Workflow_Stream_CopyState) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Workflow_Stream_Log) CloneVT() *Workflow_Stream_Log { + if m == nil { + return (*Workflow_Stream_Log)(nil) } - if len(m.TableSettings) > 0 { - for iNdEx := len(m.TableSettings) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.TableSettings[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x2a - } + r := &Workflow_Stream_Log{ + Id: m.Id, + StreamId: m.StreamId, + Type: m.Type, + State: m.State, + CreatedAt: m.CreatedAt.CloneVT(), + UpdatedAt: m.UpdatedAt.CloneVT(), + Message: m.Message, + Count: m.Count, } - if m.StopAfterCopy { - i-- - if m.StopAfterCopy { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.TargetKeyspace) > 0 { - i -= len(m.TargetKeyspace) - copy(dAtA[i:], m.TargetKeyspace) - i = encodeVarint(dAtA, i, uint64(len(m.TargetKeyspace))) - i-- - dAtA[i] = 0x1a + return r +} + +func (m *Workflow_Stream_Log) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Workflow_Stream_ThrottlerStatus) CloneVT() *Workflow_Stream_ThrottlerStatus { + if m == nil { + return (*Workflow_Stream_ThrottlerStatus)(nil) } - if len(m.SourceKeyspace) > 0 { - i -= len(m.SourceKeyspace) - copy(dAtA[i:], m.SourceKeyspace) - i = encodeVarint(dAtA, i, uint64(len(m.SourceKeyspace))) - i-- - dAtA[i] = 0x12 + r := &Workflow_Stream_ThrottlerStatus{ + ComponentThrottled: m.ComponentThrottled, + TimeThrottled: m.TimeThrottled.CloneVT(), } - if len(m.Workflow) > 0 { - i -= len(m.Workflow) - copy(dAtA[i:], m.Workflow) - i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *Keyspace) MarshalVT() (dAtA []byte, err error) { +func (m *Workflow_Stream_ThrottlerStatus) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Workflow_Stream) CloneVT() *Workflow_Stream { if m == nil { - return nil, nil + return (*Workflow_Stream)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &Workflow_Stream{ + Id: m.Id, + Shard: m.Shard, + Tablet: m.Tablet.CloneVT(), + BinlogSource: m.BinlogSource.CloneVT(), + Position: m.Position, + StopPosition: m.StopPosition, + State: m.State, + DbName: m.DbName, + TransactionTimestamp: m.TransactionTimestamp.CloneVT(), + TimeUpdated: m.TimeUpdated.CloneVT(), + Message: m.Message, + LogFetchError: m.LogFetchError, + RowsCopied: m.RowsCopied, + ThrottlerStatus: m.ThrottlerStatus.CloneVT(), } - return dAtA[:n], nil + if rhs := m.CopyStates; rhs != nil { + tmpContainer := make([]*Workflow_Stream_CopyState, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.CopyStates = tmpContainer + } + if rhs := m.Logs; rhs != nil { + tmpContainer := make([]*Workflow_Stream_Log, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Logs = tmpContainer + } + if rhs := m.Tags; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Tags = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *Keyspace) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *Workflow_Stream) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *Keyspace) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *Workflow) CloneVT() *Workflow { if m == nil { - return 0, nil + return (*Workflow)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &Workflow{ + Name: m.Name, + Source: m.Source.CloneVT(), + Target: m.Target.CloneVT(), + MaxVReplicationLag: m.MaxVReplicationLag, + WorkflowType: m.WorkflowType, + WorkflowSubType: m.WorkflowSubType, + MaxVReplicationTransactionLag: m.MaxVReplicationTransactionLag, + DeferSecondaryKeys: m.DeferSecondaryKeys, } - if m.Keyspace != nil { - size, err := m.Keyspace.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if rhs := m.ShardStreams; rhs != nil { + tmpContainer := make(map[string]*Workflow_ShardStream, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 + r.ShardStreams = tmpContainer } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *Shard) MarshalVT() (dAtA []byte, err error) { +func (m *Workflow) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *AddCellInfoRequest) CloneVT() *AddCellInfoRequest { if m == nil { - return nil, nil + return (*AddCellInfoRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &AddCellInfoRequest{ + Name: m.Name, + CellInfo: m.CellInfo.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *Shard) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *AddCellInfoRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *Shard) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *AddCellInfoResponse) CloneVT() *AddCellInfoResponse { if m == nil { - return 0, nil + return (*AddCellInfoResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &AddCellInfoResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.Shard != nil { - size, err := m.Shard.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x1a + return r +} + +func (m *AddCellInfoResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *AddCellsAliasRequest) CloneVT() *AddCellsAliasRequest { + if m == nil { + return (*AddCellsAliasRequest)(nil) } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 + r := &AddCellsAliasRequest{ + Name: m.Name, } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *Workflow_ReplicationLocation) MarshalVT() (dAtA []byte, err error) { +func (m *AddCellsAliasRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *AddCellsAliasResponse) CloneVT() *AddCellsAliasResponse { if m == nil { - return nil, nil + return (*AddCellsAliasResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &AddCellsAliasResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *Workflow_ReplicationLocation) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *AddCellsAliasResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *Workflow_ReplicationLocation) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ApplyRoutingRulesRequest) CloneVT() *ApplyRoutingRulesRequest { if m == nil { - return 0, nil + return (*ApplyRoutingRulesRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ApplyRoutingRulesRequest{ + RoutingRules: m.RoutingRules.CloneVT(), + SkipRebuild: m.SkipRebuild, } - if len(m.Shards) > 0 { - for iNdEx := len(m.Shards) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Shards[iNdEx]) - copy(dAtA[i:], m.Shards[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Shards[iNdEx]))) - i-- - dAtA[i] = 0x12 - } + if rhs := m.RebuildCells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.RebuildCells = tmpContainer } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *Workflow_ShardStream) MarshalVT() (dAtA []byte, err error) { +func (m *ApplyRoutingRulesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ApplyRoutingRulesResponse) CloneVT() *ApplyRoutingRulesResponse { if m == nil { - return nil, nil + return (*ApplyRoutingRulesResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ApplyRoutingRulesResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *Workflow_ShardStream) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ApplyRoutingRulesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *Workflow_ShardStream) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ApplyShardRoutingRulesRequest) CloneVT() *ApplyShardRoutingRulesRequest { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*ApplyShardRoutingRulesRequest)(nil) } - if m.IsPrimaryServing { - i-- - if m.IsPrimaryServing { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 + r := &ApplyShardRoutingRulesRequest{ + ShardRoutingRules: m.ShardRoutingRules.CloneVT(), + SkipRebuild: m.SkipRebuild, } - if len(m.TabletControls) > 0 { - for iNdEx := len(m.TabletControls) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.TabletControls[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 - } + if rhs := m.RebuildCells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.RebuildCells = tmpContainer } - if len(m.Streams) > 0 { - for iNdEx := len(m.Streams) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Streams[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *Workflow_Stream_CopyState) MarshalVT() (dAtA []byte, err error) { +func (m *ApplyShardRoutingRulesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ApplyShardRoutingRulesResponse) CloneVT() *ApplyShardRoutingRulesResponse { if m == nil { - return nil, nil + return (*ApplyShardRoutingRulesResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ApplyShardRoutingRulesResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *Workflow_Stream_CopyState) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ApplyShardRoutingRulesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *Workflow_Stream_CopyState) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ApplySchemaRequest) CloneVT() *ApplySchemaRequest { if m == nil { - return 0, nil + return (*ApplySchemaRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ApplySchemaRequest{ + Keyspace: m.Keyspace, + DdlStrategy: m.DdlStrategy, + MigrationContext: m.MigrationContext, + WaitReplicasTimeout: m.WaitReplicasTimeout.CloneVT(), + CallerId: m.CallerId.CloneVT(), + BatchSize: m.BatchSize, } - if len(m.LastPk) > 0 { - i -= len(m.LastPk) - copy(dAtA[i:], m.LastPk) - i = encodeVarint(dAtA, i, uint64(len(m.LastPk))) - i-- - dAtA[i] = 0x12 + if rhs := m.Sql; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Sql = tmpContainer } - if len(m.Table) > 0 { - i -= len(m.Table) - copy(dAtA[i:], m.Table) - i = encodeVarint(dAtA, i, uint64(len(m.Table))) - i-- - dAtA[i] = 0xa + if rhs := m.UuidList; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.UuidList = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *Workflow_Stream_Log) MarshalVT() (dAtA []byte, err error) { +func (m *ApplySchemaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ApplySchemaResponse) CloneVT() *ApplySchemaResponse { if m == nil { - return nil, nil + return (*ApplySchemaResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ApplySchemaResponse{} + if rhs := m.UuidList; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.UuidList = tmpContainer } - return dAtA[:n], nil + if rhs := m.RowsAffectedByShard; rhs != nil { + tmpContainer := make(map[string]uint64, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v + } + r.RowsAffectedByShard = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *Workflow_Stream_Log) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ApplySchemaResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *Workflow_Stream_Log) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ApplyVSchemaRequest) CloneVT() *ApplyVSchemaRequest { if m == nil { - return 0, nil + return (*ApplyVSchemaRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ApplyVSchemaRequest{ + Keyspace: m.Keyspace, + SkipRebuild: m.SkipRebuild, + DryRun: m.DryRun, + VSchema: m.VSchema.CloneVT(), + Sql: m.Sql, } - if m.Count != 0 { - i = encodeVarint(dAtA, i, uint64(m.Count)) - i-- - dAtA[i] = 0x40 + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer } - if len(m.Message) > 0 { - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarint(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x3a + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.UpdatedAt != nil { - size, err := m.UpdatedAt.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x32 + return r +} + +func (m *ApplyVSchemaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ApplyVSchemaResponse) CloneVT() *ApplyVSchemaResponse { + if m == nil { + return (*ApplyVSchemaResponse)(nil) } - if m.CreatedAt != nil { - size, err := m.CreatedAt.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x2a + r := &ApplyVSchemaResponse{ + VSchema: m.VSchema.CloneVT(), } - if len(m.State) > 0 { - i -= len(m.State) - copy(dAtA[i:], m.State) - i = encodeVarint(dAtA, i, uint64(len(m.State))) - i-- - dAtA[i] = 0x22 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.Type) > 0 { - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarint(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0x1a + return r +} + +func (m *ApplyVSchemaResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *BackupRequest) CloneVT() *BackupRequest { + if m == nil { + return (*BackupRequest)(nil) } - if m.StreamId != 0 { - i = encodeVarint(dAtA, i, uint64(m.StreamId)) - i-- - dAtA[i] = 0x10 + r := &BackupRequest{ + TabletAlias: m.TabletAlias.CloneVT(), + AllowPrimary: m.AllowPrimary, + Concurrency: m.Concurrency, + IncrementalFromPos: m.IncrementalFromPos, + UpgradeSafe: m.UpgradeSafe, } - if m.Id != 0 { - i = encodeVarint(dAtA, i, uint64(m.Id)) - i-- - dAtA[i] = 0x8 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *Workflow_Stream) MarshalVT() (dAtA []byte, err error) { +func (m *BackupRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *BackupResponse) CloneVT() *BackupResponse { if m == nil { - return nil, nil + return (*BackupResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &BackupResponse{ + TabletAlias: m.TabletAlias.CloneVT(), + Keyspace: m.Keyspace, + Shard: m.Shard, + Event: m.Event.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *Workflow_Stream) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *BackupResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *Workflow_Stream) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *BackupShardRequest) CloneVT() *BackupShardRequest { if m == nil { - return 0, nil + return (*BackupShardRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &BackupShardRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, + AllowPrimary: m.AllowPrimary, + Concurrency: m.Concurrency, + UpgradeSafe: m.UpgradeSafe, + IncrementalFromPos: m.IncrementalFromPos, } - if len(m.Tags) > 0 { - for iNdEx := len(m.Tags) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Tags[iNdEx]) - copy(dAtA[i:], m.Tags[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Tags[iNdEx]))) - i-- - dAtA[i] = 0x7a - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.LogFetchError) > 0 { - i -= len(m.LogFetchError) - copy(dAtA[i:], m.LogFetchError) - i = encodeVarint(dAtA, i, uint64(len(m.LogFetchError))) - i-- - dAtA[i] = 0x72 + return r +} + +func (m *BackupShardRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CancelSchemaMigrationRequest) CloneVT() *CancelSchemaMigrationRequest { + if m == nil { + return (*CancelSchemaMigrationRequest)(nil) } - if len(m.Logs) > 0 { - for iNdEx := len(m.Logs) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Logs[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x6a - } + r := &CancelSchemaMigrationRequest{ + Keyspace: m.Keyspace, + Uuid: m.Uuid, } - if len(m.CopyStates) > 0 { - for iNdEx := len(m.CopyStates) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.CopyStates[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x62 - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.Message) > 0 { - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarint(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x5a + return r +} + +func (m *CancelSchemaMigrationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CancelSchemaMigrationResponse) CloneVT() *CancelSchemaMigrationResponse { + if m == nil { + return (*CancelSchemaMigrationResponse)(nil) } - if m.TimeUpdated != nil { - size, err := m.TimeUpdated.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + r := &CancelSchemaMigrationResponse{} + if rhs := m.RowsAffectedByShard; rhs != nil { + tmpContainer := make(map[string]uint64, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x52 + r.RowsAffectedByShard = tmpContainer } - if m.TransactionTimestamp != nil { - size, err := m.TransactionTimestamp.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x4a + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.DbName) > 0 { - i -= len(m.DbName) - copy(dAtA[i:], m.DbName) - i = encodeVarint(dAtA, i, uint64(len(m.DbName))) - i-- - dAtA[i] = 0x42 + return r +} + +func (m *CancelSchemaMigrationResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ChangeTabletTypeRequest) CloneVT() *ChangeTabletTypeRequest { + if m == nil { + return (*ChangeTabletTypeRequest)(nil) } - if len(m.State) > 0 { - i -= len(m.State) - copy(dAtA[i:], m.State) - i = encodeVarint(dAtA, i, uint64(len(m.State))) - i-- - dAtA[i] = 0x3a + r := &ChangeTabletTypeRequest{ + TabletAlias: m.TabletAlias.CloneVT(), + DbType: m.DbType, + DryRun: m.DryRun, } - if len(m.StopPosition) > 0 { - i -= len(m.StopPosition) - copy(dAtA[i:], m.StopPosition) - i = encodeVarint(dAtA, i, uint64(len(m.StopPosition))) - i-- - dAtA[i] = 0x32 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.Position) > 0 { - i -= len(m.Position) - copy(dAtA[i:], m.Position) - i = encodeVarint(dAtA, i, uint64(len(m.Position))) - i-- - dAtA[i] = 0x2a + return r +} + +func (m *ChangeTabletTypeRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ChangeTabletTypeResponse) CloneVT() *ChangeTabletTypeResponse { + if m == nil { + return (*ChangeTabletTypeResponse)(nil) } - if m.BinlogSource != nil { - size, err := m.BinlogSource.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x22 + r := &ChangeTabletTypeResponse{ + BeforeTablet: m.BeforeTablet.CloneVT(), + AfterTablet: m.AfterTablet.CloneVT(), + WasDryRun: m.WasDryRun, } - if m.Tablet != nil { - size, err := m.Tablet.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x1a + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) - i-- - dAtA[i] = 0x12 + return r +} + +func (m *ChangeTabletTypeResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CleanupSchemaMigrationRequest) CloneVT() *CleanupSchemaMigrationRequest { + if m == nil { + return (*CleanupSchemaMigrationRequest)(nil) } - if m.Id != 0 { - i = encodeVarint(dAtA, i, uint64(m.Id)) - i-- - dAtA[i] = 0x8 + r := &CleanupSchemaMigrationRequest{ + Keyspace: m.Keyspace, + Uuid: m.Uuid, } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *Workflow) MarshalVT() (dAtA []byte, err error) { +func (m *CleanupSchemaMigrationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CleanupSchemaMigrationResponse) CloneVT() *CleanupSchemaMigrationResponse { if m == nil { - return nil, nil + return (*CleanupSchemaMigrationResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &CleanupSchemaMigrationResponse{} + if rhs := m.RowsAffectedByShard; rhs != nil { + tmpContainer := make(map[string]uint64, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v + } + r.RowsAffectedByShard = tmpContainer } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *Workflow) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *CleanupSchemaMigrationResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *Workflow) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CompleteSchemaMigrationRequest) CloneVT() *CompleteSchemaMigrationRequest { if m == nil { - return 0, nil + return (*CompleteSchemaMigrationRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &CompleteSchemaMigrationRequest{ + Keyspace: m.Keyspace, + Uuid: m.Uuid, } - if len(m.WorkflowSubType) > 0 { - i -= len(m.WorkflowSubType) - copy(dAtA[i:], m.WorkflowSubType) - i = encodeVarint(dAtA, i, uint64(len(m.WorkflowSubType))) - i-- - dAtA[i] = 0x3a + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.WorkflowType) > 0 { - i -= len(m.WorkflowType) - copy(dAtA[i:], m.WorkflowType) - i = encodeVarint(dAtA, i, uint64(len(m.WorkflowType))) - i-- - dAtA[i] = 0x32 + return r +} + +func (m *CompleteSchemaMigrationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CompleteSchemaMigrationResponse) CloneVT() *CompleteSchemaMigrationResponse { + if m == nil { + return (*CompleteSchemaMigrationResponse)(nil) } - if len(m.ShardStreams) > 0 { - for k := range m.ShardStreams { - v := m.ShardStreams[k] - baseI := i - size, err := v.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x2a + r := &CompleteSchemaMigrationResponse{} + if rhs := m.RowsAffectedByShard; rhs != nil { + tmpContainer := make(map[string]uint64, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v } + r.RowsAffectedByShard = tmpContainer } - if m.MaxVReplicationLag != 0 { - i = encodeVarint(dAtA, i, uint64(m.MaxVReplicationLag)) - i-- - dAtA[i] = 0x20 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.Target != nil { - size, err := m.Target.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x1a + return r +} + +func (m *CompleteSchemaMigrationResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CreateKeyspaceRequest) CloneVT() *CreateKeyspaceRequest { + if m == nil { + return (*CreateKeyspaceRequest)(nil) } - if m.Source != nil { - size, err := m.Source.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + r := &CreateKeyspaceRequest{ + Name: m.Name, + Force: m.Force, + AllowEmptyVSchema: m.AllowEmptyVSchema, + Type: m.Type, + BaseKeyspace: m.BaseKeyspace, + SnapshotTime: m.SnapshotTime.CloneVT(), + DurabilityPolicy: m.DurabilityPolicy, + SidecarDbName: m.SidecarDbName, + } + if rhs := m.ServedFroms; rhs != nil { + tmpContainer := make([]*topodata.Keyspace_ServedFrom, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 + r.ServedFroms = tmpContainer } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *AddCellInfoRequest) MarshalVT() (dAtA []byte, err error) { +func (m *CreateKeyspaceRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CreateKeyspaceResponse) CloneVT() *CreateKeyspaceResponse { if m == nil { - return nil, nil + return (*CreateKeyspaceResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &CreateKeyspaceResponse{ + Keyspace: m.Keyspace.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *AddCellInfoRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *CreateKeyspaceResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *AddCellInfoRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CreateShardRequest) CloneVT() *CreateShardRequest { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*CreateShardRequest)(nil) } - if m.CellInfo != nil { - size, err := m.CellInfo.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 + r := &CreateShardRequest{ + Keyspace: m.Keyspace, + ShardName: m.ShardName, + Force: m.Force, + IncludeParent: m.IncludeParent, } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *AddCellInfoResponse) MarshalVT() (dAtA []byte, err error) { +func (m *CreateShardRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CreateShardResponse) CloneVT() *CreateShardResponse { if m == nil { - return nil, nil + return (*CreateShardResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &CreateShardResponse{ + Keyspace: m.Keyspace.CloneVT(), + Shard: m.Shard.CloneVT(), + ShardAlreadyExists: m.ShardAlreadyExists, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *AddCellInfoResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *CreateShardResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *AddCellInfoResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteCellInfoRequest) CloneVT() *DeleteCellInfoRequest { if m == nil { - return 0, nil + return (*DeleteCellInfoRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &DeleteCellInfoRequest{ + Name: m.Name, + Force: m.Force, } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *AddCellsAliasRequest) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteCellInfoRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *DeleteCellInfoResponse) CloneVT() *DeleteCellInfoResponse { if m == nil { - return nil, nil + return (*DeleteCellInfoResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &DeleteCellInfoResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *AddCellsAliasRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *DeleteCellInfoResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *AddCellsAliasRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteCellsAliasRequest) CloneVT() *DeleteCellsAliasRequest { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*DeleteCellsAliasRequest)(nil) } - if len(m.Cells) > 0 { - for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Cells[iNdEx]) - copy(dAtA[i:], m.Cells[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) - i-- - dAtA[i] = 0x12 - } + r := &DeleteCellsAliasRequest{ + Name: m.Name, } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *AddCellsAliasResponse) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteCellsAliasRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *DeleteCellsAliasResponse) CloneVT() *DeleteCellsAliasResponse { if m == nil { - return nil, nil + return (*DeleteCellsAliasResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &DeleteCellsAliasResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *AddCellsAliasResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *DeleteCellsAliasResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *AddCellsAliasResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteKeyspaceRequest) CloneVT() *DeleteKeyspaceRequest { if m == nil { - return 0, nil + return (*DeleteKeyspaceRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &DeleteKeyspaceRequest{ + Keyspace: m.Keyspace, + Recursive: m.Recursive, + Force: m.Force, } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ApplyRoutingRulesRequest) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteKeyspaceRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *DeleteKeyspaceResponse) CloneVT() *DeleteKeyspaceResponse { if m == nil { - return nil, nil + return (*DeleteKeyspaceResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &DeleteKeyspaceResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *ApplyRoutingRulesRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *DeleteKeyspaceResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ApplyRoutingRulesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteShardsRequest) CloneVT() *DeleteShardsRequest { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*DeleteShardsRequest)(nil) } - if len(m.RebuildCells) > 0 { - for iNdEx := len(m.RebuildCells) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.RebuildCells[iNdEx]) - copy(dAtA[i:], m.RebuildCells[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.RebuildCells[iNdEx]))) - i-- - dAtA[i] = 0x1a - } + r := &DeleteShardsRequest{ + Recursive: m.Recursive, + EvenIfServing: m.EvenIfServing, + Force: m.Force, } - if m.SkipRebuild { - i-- - if m.SkipRebuild { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if rhs := m.Shards; rhs != nil { + tmpContainer := make([]*Shard, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i-- - dAtA[i] = 0x10 + r.Shards = tmpContainer } - if m.RoutingRules != nil { - size, err := m.RoutingRules.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ApplyRoutingRulesResponse) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteShardsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *DeleteShardsResponse) CloneVT() *DeleteShardsResponse { if m == nil { - return nil, nil + return (*DeleteShardsResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &DeleteShardsResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *ApplyRoutingRulesResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *DeleteShardsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ApplyRoutingRulesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteSrvVSchemaRequest) CloneVT() *DeleteSrvVSchemaRequest { if m == nil { - return 0, nil + return (*DeleteSrvVSchemaRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &DeleteSrvVSchemaRequest{ + Cell: m.Cell, } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ApplyShardRoutingRulesRequest) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteSrvVSchemaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *DeleteSrvVSchemaResponse) CloneVT() *DeleteSrvVSchemaResponse { if m == nil { - return nil, nil + return (*DeleteSrvVSchemaResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &DeleteSrvVSchemaResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *ApplyShardRoutingRulesRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *DeleteSrvVSchemaResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ApplyShardRoutingRulesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteTabletsRequest) CloneVT() *DeleteTabletsRequest { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*DeleteTabletsRequest)(nil) } - if len(m.RebuildCells) > 0 { - for iNdEx := len(m.RebuildCells) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.RebuildCells[iNdEx]) - copy(dAtA[i:], m.RebuildCells[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.RebuildCells[iNdEx]))) - i-- - dAtA[i] = 0x1a - } + r := &DeleteTabletsRequest{ + AllowPrimary: m.AllowPrimary, } - if m.SkipRebuild { - i-- - if m.SkipRebuild { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if rhs := m.TabletAliases; rhs != nil { + tmpContainer := make([]*topodata.TabletAlias, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i-- - dAtA[i] = 0x10 + r.TabletAliases = tmpContainer } - if m.ShardRoutingRules != nil { - size, err := m.ShardRoutingRules.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ApplyShardRoutingRulesResponse) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteTabletsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *DeleteTabletsResponse) CloneVT() *DeleteTabletsResponse { if m == nil { - return nil, nil + return (*DeleteTabletsResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &DeleteTabletsResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *ApplyShardRoutingRulesResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *DeleteTabletsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ApplyShardRoutingRulesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *EmergencyReparentShardRequest) CloneVT() *EmergencyReparentShardRequest { if m == nil { - return 0, nil + return (*EmergencyReparentShardRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &EmergencyReparentShardRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, + NewPrimary: m.NewPrimary.CloneVT(), + WaitReplicasTimeout: m.WaitReplicasTimeout.CloneVT(), + PreventCrossCellPromotion: m.PreventCrossCellPromotion, + WaitForAllTablets: m.WaitForAllTablets, } - return len(dAtA) - i, nil + if rhs := m.IgnoreReplicas; rhs != nil { + tmpContainer := make([]*topodata.TabletAlias, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.IgnoreReplicas = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ApplySchemaRequest) MarshalVT() (dAtA []byte, err error) { +func (m *EmergencyReparentShardRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *EmergencyReparentShardResponse) CloneVT() *EmergencyReparentShardResponse { if m == nil { - return nil, nil + return (*EmergencyReparentShardResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &EmergencyReparentShardResponse{ + Keyspace: m.Keyspace, + Shard: m.Shard, + PromotedPrimary: m.PromotedPrimary.CloneVT(), } - return dAtA[:n], nil + if rhs := m.Events; rhs != nil { + tmpContainer := make([]*logutil.Event, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Events = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ApplySchemaRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *EmergencyReparentShardResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ApplySchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAppRequest) CloneVT() *ExecuteFetchAsAppRequest { if m == nil { - return 0, nil + return (*ExecuteFetchAsAppRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ExecuteFetchAsAppRequest{ + TabletAlias: m.TabletAlias.CloneVT(), + Query: m.Query, + MaxRows: m.MaxRows, + UsePool: m.UsePool, } - if m.CallerId != nil { - size, err := m.CallerId.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x4a + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.SkipPreflight { - i-- - if m.SkipPreflight { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x40 + return r +} + +func (m *ExecuteFetchAsAppRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteFetchAsAppResponse) CloneVT() *ExecuteFetchAsAppResponse { + if m == nil { + return (*ExecuteFetchAsAppResponse)(nil) } - if m.WaitReplicasTimeout != nil { - size, err := m.WaitReplicasTimeout.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x3a + r := &ExecuteFetchAsAppResponse{ + Result: m.Result.CloneVT(), } - if len(m.MigrationContext) > 0 { - i -= len(m.MigrationContext) - copy(dAtA[i:], m.MigrationContext) - i = encodeVarint(dAtA, i, uint64(len(m.MigrationContext))) - i-- - dAtA[i] = 0x32 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.UuidList) > 0 { - for iNdEx := len(m.UuidList) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.UuidList[iNdEx]) - copy(dAtA[i:], m.UuidList[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.UuidList[iNdEx]))) - i-- - dAtA[i] = 0x2a - } + return r +} + +func (m *ExecuteFetchAsAppResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteFetchAsDBARequest) CloneVT() *ExecuteFetchAsDBARequest { + if m == nil { + return (*ExecuteFetchAsDBARequest)(nil) } - if len(m.DdlStrategy) > 0 { - i -= len(m.DdlStrategy) - copy(dAtA[i:], m.DdlStrategy) - i = encodeVarint(dAtA, i, uint64(len(m.DdlStrategy))) - i-- - dAtA[i] = 0x22 + r := &ExecuteFetchAsDBARequest{ + TabletAlias: m.TabletAlias.CloneVT(), + Query: m.Query, + MaxRows: m.MaxRows, + DisableBinlogs: m.DisableBinlogs, + ReloadSchema: m.ReloadSchema, } - if len(m.Sql) > 0 { - for iNdEx := len(m.Sql) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Sql[iNdEx]) - copy(dAtA[i:], m.Sql[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Sql[iNdEx]))) - i-- - dAtA[i] = 0x1a - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.AllowLongUnavailability { - i-- - if m.AllowLongUnavailability { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 + return r +} + +func (m *ExecuteFetchAsDBARequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteFetchAsDBAResponse) CloneVT() *ExecuteFetchAsDBAResponse { + if m == nil { + return (*ExecuteFetchAsDBAResponse)(nil) } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa + r := &ExecuteFetchAsDBAResponse{ + Result: m.Result.CloneVT(), } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ApplySchemaResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteFetchAsDBAResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteHookRequest) CloneVT() *ExecuteHookRequest { if m == nil { - return nil, nil + return (*ExecuteHookRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ExecuteHookRequest{ + TabletAlias: m.TabletAlias.CloneVT(), + TabletHookRequest: m.TabletHookRequest.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ApplySchemaResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ExecuteHookRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ApplySchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteHookResponse) CloneVT() *ExecuteHookResponse { if m == nil { - return 0, nil + return (*ExecuteHookResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ExecuteHookResponse{ + HookResult: m.HookResult.CloneVT(), } - if len(m.UuidList) > 0 { - for iNdEx := len(m.UuidList) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.UuidList[iNdEx]) - copy(dAtA[i:], m.UuidList[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.UuidList[iNdEx]))) - i-- - dAtA[i] = 0xa - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ApplyVSchemaRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteHookResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *FindAllShardsInKeyspaceRequest) CloneVT() *FindAllShardsInKeyspaceRequest { if m == nil { - return nil, nil + return (*FindAllShardsInKeyspaceRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &FindAllShardsInKeyspaceRequest{ + Keyspace: m.Keyspace, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ApplyVSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *FindAllShardsInKeyspaceRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ApplyVSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *FindAllShardsInKeyspaceResponse) CloneVT() *FindAllShardsInKeyspaceResponse { if m == nil { - return 0, nil + return (*FindAllShardsInKeyspaceResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &FindAllShardsInKeyspaceResponse{} + if rhs := m.Shards; rhs != nil { + tmpContainer := make(map[string]*Shard, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Shards = tmpContainer } - if len(m.Sql) > 0 { - i -= len(m.Sql) - copy(dAtA[i:], m.Sql) - i = encodeVarint(dAtA, i, uint64(len(m.Sql))) - i-- - dAtA[i] = 0x32 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.VSchema != nil { - size, err := m.VSchema.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x2a + return r +} + +func (m *FindAllShardsInKeyspaceResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetBackupsRequest) CloneVT() *GetBackupsRequest { + if m == nil { + return (*GetBackupsRequest)(nil) } - if len(m.Cells) > 0 { - for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Cells[iNdEx]) - copy(dAtA[i:], m.Cells[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) - i-- - dAtA[i] = 0x22 - } + r := &GetBackupsRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, + Limit: m.Limit, + Detailed: m.Detailed, + DetailedLimit: m.DetailedLimit, } - if m.DryRun { - i-- - if m.DryRun { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.SkipRebuild { - i-- - if m.SkipRebuild { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + return r +} + +func (m *GetBackupsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetBackupsResponse) CloneVT() *GetBackupsResponse { + if m == nil { + return (*GetBackupsResponse)(nil) + } + r := &GetBackupsResponse{} + if rhs := m.Backups; rhs != nil { + tmpContainer := make([]*mysqlctl.BackupInfo, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i-- - dAtA[i] = 0x10 + r.Backups = tmpContainer } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ApplyVSchemaResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetBackupsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetCellInfoRequest) CloneVT() *GetCellInfoRequest { if m == nil { - return nil, nil + return (*GetCellInfoRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetCellInfoRequest{ + Cell: m.Cell, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ApplyVSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetCellInfoRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ApplyVSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetCellInfoResponse) CloneVT() *GetCellInfoResponse { if m == nil { - return 0, nil + return (*GetCellInfoResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &GetCellInfoResponse{ + CellInfo: m.CellInfo.CloneVT(), } - if m.VSchema != nil { - size, err := m.VSchema.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *BackupRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetCellInfoResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetCellInfoNamesRequest) CloneVT() *GetCellInfoNamesRequest { if m == nil { - return nil, nil + return (*GetCellInfoNamesRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetCellInfoNamesRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *BackupRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetCellInfoNamesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *BackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetCellInfoNamesResponse) CloneVT() *GetCellInfoNamesResponse { if m == nil { - return 0, nil + return (*GetCellInfoNamesResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.IncrementalFromPos) > 0 { - i -= len(m.IncrementalFromPos) - copy(dAtA[i:], m.IncrementalFromPos) - i = encodeVarint(dAtA, i, uint64(len(m.IncrementalFromPos))) - i-- - dAtA[i] = 0x22 + r := &GetCellInfoNamesResponse{} + if rhs := m.Names; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Names = tmpContainer } - if m.Concurrency != 0 { - i = encodeVarint(dAtA, i, uint64(m.Concurrency)) - i-- - dAtA[i] = 0x18 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.AllowPrimary { - i-- - if m.AllowPrimary { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 + return r +} + +func (m *GetCellInfoNamesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetCellsAliasesRequest) CloneVT() *GetCellsAliasesRequest { + if m == nil { + return (*GetCellsAliasesRequest)(nil) } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + r := &GetCellsAliasesRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *BackupResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetCellsAliasesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetCellsAliasesResponse) CloneVT() *GetCellsAliasesResponse { if m == nil { - return nil, nil + return (*GetCellsAliasesResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetCellsAliasesResponse{} + if rhs := m.Aliases; rhs != nil { + tmpContainer := make(map[string]*topodata.CellsAlias, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Aliases = tmpContainer } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *BackupResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetCellsAliasesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *BackupResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetFullStatusRequest) CloneVT() *GetFullStatusRequest { if m == nil { - return 0, nil + return (*GetFullStatusRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &GetFullStatusRequest{ + TabletAlias: m.TabletAlias.CloneVT(), } - if m.Event != nil { - size, err := m.Event.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x22 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) - i-- - dAtA[i] = 0x1a + return r +} + +func (m *GetFullStatusRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetFullStatusResponse) CloneVT() *GetFullStatusResponse { + if m == nil { + return (*GetFullStatusResponse)(nil) } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0x12 + r := &GetFullStatusResponse{ + Status: m.Status.CloneVT(), } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *BackupShardRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetFullStatusResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetKeyspacesRequest) CloneVT() *GetKeyspacesRequest { if m == nil { - return nil, nil + return (*GetKeyspacesRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetKeyspacesRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *BackupShardRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetKeyspacesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *BackupShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetKeyspacesResponse) CloneVT() *GetKeyspacesResponse { if m == nil { - return 0, nil + return (*GetKeyspacesResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &GetKeyspacesResponse{} + if rhs := m.Keyspaces; rhs != nil { + tmpContainer := make([]*Keyspace, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Keyspaces = tmpContainer } - if m.Concurrency != 0 { - i = encodeVarint(dAtA, i, uint64(m.Concurrency)) - i-- - dAtA[i] = 0x20 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.AllowPrimary { - i-- - if m.AllowPrimary { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 + return r +} + +func (m *GetKeyspacesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetKeyspaceRequest) CloneVT() *GetKeyspaceRequest { + if m == nil { + return (*GetKeyspaceRequest)(nil) } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) - i-- - dAtA[i] = 0x12 + r := &GetKeyspaceRequest{ + Keyspace: m.Keyspace, } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ChangeTabletTypeRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetKeyspaceRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetKeyspaceResponse) CloneVT() *GetKeyspaceResponse { if m == nil { - return nil, nil + return (*GetKeyspaceResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetKeyspaceResponse{ + Keyspace: m.Keyspace.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ChangeTabletTypeRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetKeyspaceResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ChangeTabletTypeRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetPermissionsRequest) CloneVT() *GetPermissionsRequest { if m == nil { - return 0, nil + return (*GetPermissionsRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if m.DryRun { - i-- - if m.DryRun { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if m.DbType != 0 { - i = encodeVarint(dAtA, i, uint64(m.DbType)) - i-- - dAtA[i] = 0x10 + r := &GetPermissionsRequest{ + TabletAlias: m.TabletAlias.CloneVT(), } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ChangeTabletTypeResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetPermissionsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetPermissionsResponse) CloneVT() *GetPermissionsResponse { if m == nil { - return nil, nil + return (*GetPermissionsResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetPermissionsResponse{ + Permissions: m.Permissions.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ChangeTabletTypeResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetPermissionsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ChangeTabletTypeResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetRoutingRulesRequest) CloneVT() *GetRoutingRulesRequest { if m == nil { - return 0, nil + return (*GetRoutingRulesRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &GetRoutingRulesRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.WasDryRun { - i-- - if m.WasDryRun { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 + return r +} + +func (m *GetRoutingRulesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetRoutingRulesResponse) CloneVT() *GetRoutingRulesResponse { + if m == nil { + return (*GetRoutingRulesResponse)(nil) } - if m.AfterTablet != nil { - size, err := m.AfterTablet.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 + r := &GetRoutingRulesResponse{ + RoutingRules: m.RoutingRules.CloneVT(), } - if m.BeforeTablet != nil { - size, err := m.BeforeTablet.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *CreateKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetRoutingRulesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSchemaRequest) CloneVT() *GetSchemaRequest { if m == nil { - return nil, nil + return (*GetSchemaRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetSchemaRequest{ + TabletAlias: m.TabletAlias.CloneVT(), + IncludeViews: m.IncludeViews, + TableNamesOnly: m.TableNamesOnly, + TableSizesOnly: m.TableSizesOnly, + TableSchemaOnly: m.TableSchemaOnly, } - return dAtA[:n], nil + if rhs := m.Tables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Tables = tmpContainer + } + if rhs := m.ExcludeTables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ExcludeTables = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *CreateKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetSchemaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *CreateKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetSchemaResponse) CloneVT() *GetSchemaResponse { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.SidecarDbName) > 0 { - i -= len(m.SidecarDbName) - copy(dAtA[i:], m.SidecarDbName) - i = encodeVarint(dAtA, i, uint64(len(m.SidecarDbName))) - i-- - dAtA[i] = 0x5a + return (*GetSchemaResponse)(nil) } - if len(m.DurabilityPolicy) > 0 { - i -= len(m.DurabilityPolicy) - copy(dAtA[i:], m.DurabilityPolicy) - i = encodeVarint(dAtA, i, uint64(len(m.DurabilityPolicy))) - i-- - dAtA[i] = 0x52 + r := &GetSchemaResponse{ + Schema: m.Schema.CloneVT(), } - if m.SnapshotTime != nil { - size, err := m.SnapshotTime.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x4a + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.BaseKeyspace) > 0 { - i -= len(m.BaseKeyspace) - copy(dAtA[i:], m.BaseKeyspace) - i = encodeVarint(dAtA, i, uint64(len(m.BaseKeyspace))) - i-- - dAtA[i] = 0x42 + return r +} + +func (m *GetSchemaResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSchemaMigrationsRequest) CloneVT() *GetSchemaMigrationsRequest { + if m == nil { + return (*GetSchemaMigrationsRequest)(nil) } - if m.Type != 0 { - i = encodeVarint(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x38 + r := &GetSchemaMigrationsRequest{ + Keyspace: m.Keyspace, + Uuid: m.Uuid, + MigrationContext: m.MigrationContext, + Status: m.Status, + Recent: m.Recent.CloneVT(), + Order: m.Order, + Limit: m.Limit, + Skip: m.Skip, } - if len(m.ServedFroms) > 0 { - for iNdEx := len(m.ServedFroms) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.ServedFroms[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x32 - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.AllowEmptyVSchema { - i-- - if m.AllowEmptyVSchema { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 + return r +} + +func (m *GetSchemaMigrationsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSchemaMigrationsResponse) CloneVT() *GetSchemaMigrationsResponse { + if m == nil { + return (*GetSchemaMigrationsResponse)(nil) } - if m.Force { - i-- - if m.Force { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + r := &GetSchemaMigrationsResponse{} + if rhs := m.Migrations; rhs != nil { + tmpContainer := make([]*SchemaMigration, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i-- - dAtA[i] = 0x10 + r.Migrations = tmpContainer } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *CreateKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetSchemaMigrationsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetShardRequest) CloneVT() *GetShardRequest { if m == nil { - return nil, nil + return (*GetShardRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetShardRequest{ + Keyspace: m.Keyspace, + ShardName: m.ShardName, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *CreateKeyspaceResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetShardRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *CreateKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetShardResponse) CloneVT() *GetShardResponse { if m == nil { - return 0, nil + return (*GetShardResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &GetShardResponse{ + Shard: m.Shard.CloneVT(), } - if m.Keyspace != nil { - size, err := m.Keyspace.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *CreateShardRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetShardResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetShardRoutingRulesRequest) CloneVT() *GetShardRoutingRulesRequest { if m == nil { - return nil, nil + return (*GetShardRoutingRulesRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetShardRoutingRulesRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *CreateShardRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetShardRoutingRulesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *CreateShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetShardRoutingRulesResponse) CloneVT() *GetShardRoutingRulesResponse { if m == nil { - return 0, nil + return (*GetShardRoutingRulesResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &GetShardRoutingRulesResponse{ + ShardRoutingRules: m.ShardRoutingRules.CloneVT(), } - if m.IncludeParent { - i-- - if m.IncludeParent { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.Force { - i-- - if m.Force { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 + return r +} + +func (m *GetShardRoutingRulesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSrvKeyspaceNamesRequest) CloneVT() *GetSrvKeyspaceNamesRequest { + if m == nil { + return (*GetSrvKeyspaceNamesRequest)(nil) } - if len(m.ShardName) > 0 { - i -= len(m.ShardName) - copy(dAtA[i:], m.ShardName) - i = encodeVarint(dAtA, i, uint64(len(m.ShardName))) - i-- - dAtA[i] = 0x12 + r := &GetSrvKeyspaceNamesRequest{} + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *CreateShardResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetSrvKeyspaceNamesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSrvKeyspaceNamesResponse_NameList) CloneVT() *GetSrvKeyspaceNamesResponse_NameList { if m == nil { - return nil, nil + return (*GetSrvKeyspaceNamesResponse_NameList)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetSrvKeyspaceNamesResponse_NameList{} + if rhs := m.Names; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Names = tmpContainer } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *CreateShardResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetSrvKeyspaceNamesResponse_NameList) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *CreateShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetSrvKeyspaceNamesResponse) CloneVT() *GetSrvKeyspaceNamesResponse { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if m.ShardAlreadyExists { - i-- - if m.ShardAlreadyExists { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 + return (*GetSrvKeyspaceNamesResponse)(nil) } - if m.Shard != nil { - size, err := m.Shard.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + r := &GetSrvKeyspaceNamesResponse{} + if rhs := m.Names; rhs != nil { + tmpContainer := make(map[string]*GetSrvKeyspaceNamesResponse_NameList, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 + r.Names = tmpContainer } - if m.Keyspace != nil { - size, err := m.Keyspace.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *DeleteCellInfoRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetSrvKeyspaceNamesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSrvKeyspacesRequest) CloneVT() *GetSrvKeyspacesRequest { if m == nil { - return nil, nil + return (*GetSrvKeyspacesRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetSrvKeyspacesRequest{ + Keyspace: m.Keyspace, } - return dAtA[:n], nil + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *DeleteCellInfoRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetSrvKeyspacesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *DeleteCellInfoRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetSrvKeyspacesResponse) CloneVT() *GetSrvKeyspacesResponse { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*GetSrvKeyspacesResponse)(nil) } - if m.Force { - i-- - if m.Force { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + r := &GetSrvKeyspacesResponse{} + if rhs := m.SrvKeyspaces; rhs != nil { + tmpContainer := make(map[string]*topodata.SrvKeyspace, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i-- - dAtA[i] = 0x10 + r.SrvKeyspaces = tmpContainer } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *DeleteCellInfoResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetSrvKeyspacesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *UpdateThrottlerConfigRequest) CloneVT() *UpdateThrottlerConfigRequest { if m == nil { - return nil, nil + return (*UpdateThrottlerConfigRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &UpdateThrottlerConfigRequest{ + Keyspace: m.Keyspace, + Enable: m.Enable, + Disable: m.Disable, + Threshold: m.Threshold, + CustomQuery: m.CustomQuery, + CustomQuerySet: m.CustomQuerySet, + CheckAsCheckSelf: m.CheckAsCheckSelf, + CheckAsCheckShard: m.CheckAsCheckShard, + ThrottledApp: m.ThrottledApp.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *DeleteCellInfoResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *UpdateThrottlerConfigRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *DeleteCellInfoResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *UpdateThrottlerConfigResponse) CloneVT() *UpdateThrottlerConfigResponse { if m == nil { - return 0, nil + return (*UpdateThrottlerConfigResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &UpdateThrottlerConfigResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *DeleteCellsAliasRequest) MarshalVT() (dAtA []byte, err error) { +func (m *UpdateThrottlerConfigResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSrvVSchemaRequest) CloneVT() *GetSrvVSchemaRequest { if m == nil { - return nil, nil + return (*GetSrvVSchemaRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetSrvVSchemaRequest{ + Cell: m.Cell, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *DeleteCellsAliasRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetSrvVSchemaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *DeleteCellsAliasRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetSrvVSchemaResponse) CloneVT() *GetSrvVSchemaResponse { if m == nil { - return 0, nil + return (*GetSrvVSchemaResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &GetSrvVSchemaResponse{ + SrvVSchema: m.SrvVSchema.CloneVT(), } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *DeleteCellsAliasResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetSrvVSchemaResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSrvVSchemasRequest) CloneVT() *GetSrvVSchemasRequest { if m == nil { - return nil, nil + return (*GetSrvVSchemasRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetSrvVSchemasRequest{} + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *DeleteCellsAliasResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetSrvVSchemasRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *DeleteCellsAliasResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetSrvVSchemasResponse) CloneVT() *GetSrvVSchemasResponse { if m == nil { - return 0, nil + return (*GetSrvVSchemasResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &GetSrvVSchemasResponse{} + if rhs := m.SrvVSchemas; rhs != nil { + tmpContainer := make(map[string]*vschema.SrvVSchema, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.SrvVSchemas = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *DeleteKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetSrvVSchemasResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetTabletRequest) CloneVT() *GetTabletRequest { if m == nil { - return nil, nil + return (*GetTabletRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetTabletRequest{ + TabletAlias: m.TabletAlias.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *DeleteKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetTabletRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *DeleteKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetTabletResponse) CloneVT() *GetTabletResponse { if m == nil { - return 0, nil + return (*GetTabletResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &GetTabletResponse{ + Tablet: m.Tablet.CloneVT(), } - if m.Force { - i-- - if m.Force { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.Recursive { - i-- - if m.Recursive { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + return r +} + +func (m *GetTabletResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetTabletsRequest) CloneVT() *GetTabletsRequest { + if m == nil { + return (*GetTabletsRequest)(nil) + } + r := &GetTabletsRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, + Strict: m.Strict, + TabletType: m.TabletType, + } + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if rhs := m.TabletAliases; rhs != nil { + tmpContainer := make([]*topodata.TabletAlias, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i-- - dAtA[i] = 0x10 + r.TabletAliases = tmpContainer } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *DeleteKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetTabletsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetTabletsResponse) CloneVT() *GetTabletsResponse { if m == nil { - return nil, nil + return (*GetTabletsResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetTabletsResponse{} + if rhs := m.Tablets; rhs != nil { + tmpContainer := make([]*topodata.Tablet, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Tablets = tmpContainer } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *DeleteKeyspaceResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetTabletsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *DeleteKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetTopologyPathRequest) CloneVT() *GetTopologyPathRequest { if m == nil { - return 0, nil + return (*GetTopologyPathRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &GetTopologyPathRequest{ + Path: m.Path, } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *DeleteShardsRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetTopologyPathRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetTopologyPathResponse) CloneVT() *GetTopologyPathResponse { if m == nil { - return nil, nil + return (*GetTopologyPathResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetTopologyPathResponse{ + Cell: m.Cell.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *DeleteShardsRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetTopologyPathResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *DeleteShardsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *TopologyCell) CloneVT() *TopologyCell { if m == nil { - return 0, nil + return (*TopologyCell)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &TopologyCell{ + Name: m.Name, + Path: m.Path, + Data: m.Data, } - if m.Force { - i-- - if m.Force { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 + if rhs := m.Children; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Children = tmpContainer } - if m.EvenIfServing { - i-- - if m.EvenIfServing { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.Recursive { - i-- - if m.Recursive { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 + return r +} + +func (m *TopologyCell) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetVSchemaRequest) CloneVT() *GetVSchemaRequest { + if m == nil { + return (*GetVSchemaRequest)(nil) } - if len(m.Shards) > 0 { - for iNdEx := len(m.Shards) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Shards[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } + r := &GetVSchemaRequest{ + Keyspace: m.Keyspace, } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *DeleteShardsResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetVSchemaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetVersionRequest) CloneVT() *GetVersionRequest { if m == nil { - return nil, nil + return (*GetVersionRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetVersionRequest{ + TabletAlias: m.TabletAlias.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *DeleteShardsResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetVersionRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *DeleteShardsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetVersionResponse) CloneVT() *GetVersionResponse { if m == nil { - return 0, nil + return (*GetVersionResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &GetVersionResponse{ + Version: m.Version, } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *DeleteSrvVSchemaRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetVersionResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetVSchemaResponse) CloneVT() *GetVSchemaResponse { if m == nil { - return nil, nil + return (*GetVSchemaResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetVSchemaResponse{ + VSchema: m.VSchema.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *DeleteSrvVSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetVSchemaResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *DeleteSrvVSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetWorkflowsRequest) CloneVT() *GetWorkflowsRequest { if m == nil { - return 0, nil + return (*GetWorkflowsRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &GetWorkflowsRequest{ + Keyspace: m.Keyspace, + ActiveOnly: m.ActiveOnly, + NameOnly: m.NameOnly, + Workflow: m.Workflow, + IncludeLogs: m.IncludeLogs, } - if len(m.Cell) > 0 { - i -= len(m.Cell) - copy(dAtA[i:], m.Cell) - i = encodeVarint(dAtA, i, uint64(len(m.Cell))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *DeleteSrvVSchemaResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetWorkflowsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetWorkflowsResponse) CloneVT() *GetWorkflowsResponse { if m == nil { - return nil, nil + return (*GetWorkflowsResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetWorkflowsResponse{} + if rhs := m.Workflows; rhs != nil { + tmpContainer := make([]*Workflow, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Workflows = tmpContainer } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *DeleteSrvVSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetWorkflowsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *DeleteSrvVSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *InitShardPrimaryRequest) CloneVT() *InitShardPrimaryRequest { if m == nil { - return 0, nil + return (*InitShardPrimaryRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &InitShardPrimaryRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, + PrimaryElectTabletAlias: m.PrimaryElectTabletAlias.CloneVT(), + Force: m.Force, + WaitReplicasTimeout: m.WaitReplicasTimeout.CloneVT(), } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *DeleteTabletsRequest) MarshalVT() (dAtA []byte, err error) { +func (m *InitShardPrimaryRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *InitShardPrimaryResponse) CloneVT() *InitShardPrimaryResponse { if m == nil { - return nil, nil + return (*InitShardPrimaryResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &InitShardPrimaryResponse{} + if rhs := m.Events; rhs != nil { + tmpContainer := make([]*logutil.Event, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Events = tmpContainer } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *DeleteTabletsRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *InitShardPrimaryResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *DeleteTabletsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *LaunchSchemaMigrationRequest) CloneVT() *LaunchSchemaMigrationRequest { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*LaunchSchemaMigrationRequest)(nil) } - if m.AllowPrimary { - i-- - if m.AllowPrimary { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 + r := &LaunchSchemaMigrationRequest{ + Keyspace: m.Keyspace, + Uuid: m.Uuid, } - if len(m.TabletAliases) > 0 { - for iNdEx := len(m.TabletAliases) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.TabletAliases[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *DeleteTabletsResponse) MarshalVT() (dAtA []byte, err error) { +func (m *LaunchSchemaMigrationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *LaunchSchemaMigrationResponse) CloneVT() *LaunchSchemaMigrationResponse { if m == nil { - return nil, nil + return (*LaunchSchemaMigrationResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &LaunchSchemaMigrationResponse{} + if rhs := m.RowsAffectedByShard; rhs != nil { + tmpContainer := make(map[string]uint64, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v + } + r.RowsAffectedByShard = tmpContainer } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *DeleteTabletsResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *LaunchSchemaMigrationResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *DeleteTabletsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *LookupVindexCreateRequest) CloneVT() *LookupVindexCreateRequest { if m == nil { - return 0, nil + return (*LookupVindexCreateRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &LookupVindexCreateRequest{ + Keyspace: m.Keyspace, + Workflow: m.Workflow, + Vindex: m.Vindex.CloneVT(), + ContinueAfterCopyWithOwner: m.ContinueAfterCopyWithOwner, + TabletSelectionPreference: m.TabletSelectionPreference, } - return len(dAtA) - i, nil + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if rhs := m.TabletTypes; rhs != nil { + tmpContainer := make([]topodata.TabletType, len(rhs)) + copy(tmpContainer, rhs) + r.TabletTypes = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *EmergencyReparentShardRequest) MarshalVT() (dAtA []byte, err error) { +func (m *LookupVindexCreateRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *LookupVindexCreateResponse) CloneVT() *LookupVindexCreateResponse { if m == nil { - return nil, nil + return (*LookupVindexCreateResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &LookupVindexCreateResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *EmergencyReparentShardRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *LookupVindexCreateResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *EmergencyReparentShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *LookupVindexExternalizeRequest) CloneVT() *LookupVindexExternalizeRequest { if m == nil { - return 0, nil + return (*LookupVindexExternalizeRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &LookupVindexExternalizeRequest{ + Keyspace: m.Keyspace, + Name: m.Name, + TableKeyspace: m.TableKeyspace, } - if m.PreventCrossCellPromotion { - i-- - if m.PreventCrossCellPromotion { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.WaitReplicasTimeout != nil { - size, err := m.WaitReplicasTimeout.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x2a + return r +} + +func (m *LookupVindexExternalizeRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *LookupVindexExternalizeResponse) CloneVT() *LookupVindexExternalizeResponse { + if m == nil { + return (*LookupVindexExternalizeResponse)(nil) } - if len(m.IgnoreReplicas) > 0 { - for iNdEx := len(m.IgnoreReplicas) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.IgnoreReplicas[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x22 - } + r := &LookupVindexExternalizeResponse{ + WorkflowDeleted: m.WorkflowDeleted, } - if m.NewPrimary != nil { - size, err := m.NewPrimary.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x1a + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) - i-- - dAtA[i] = 0x12 + return r +} + +func (m *LookupVindexExternalizeResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *MaterializeCreateRequest) CloneVT() *MaterializeCreateRequest { + if m == nil { + return (*MaterializeCreateRequest)(nil) } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa + r := &MaterializeCreateRequest{ + Settings: m.Settings.CloneVT(), } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *EmergencyReparentShardResponse) MarshalVT() (dAtA []byte, err error) { +func (m *MaterializeCreateRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *MaterializeCreateResponse) CloneVT() *MaterializeCreateResponse { if m == nil { - return nil, nil + return (*MaterializeCreateResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &MaterializeCreateResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *EmergencyReparentShardResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *MaterializeCreateResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *EmergencyReparentShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *MigrateCreateRequest) CloneVT() *MigrateCreateRequest { if m == nil { - return 0, nil + return (*MigrateCreateRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &MigrateCreateRequest{ + Workflow: m.Workflow, + SourceKeyspace: m.SourceKeyspace, + TargetKeyspace: m.TargetKeyspace, + MountName: m.MountName, + TabletSelectionPreference: m.TabletSelectionPreference, + AllTables: m.AllTables, + SourceTimeZone: m.SourceTimeZone, + OnDdl: m.OnDdl, + StopAfterCopy: m.StopAfterCopy, + DropForeignKeys: m.DropForeignKeys, + DeferSecondaryKeys: m.DeferSecondaryKeys, + AutoStart: m.AutoStart, + NoRoutingRules: m.NoRoutingRules, } - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Events[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x22 - } + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer } - if m.PromotedPrimary != nil { - size, err := m.PromotedPrimary.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x1a + if rhs := m.TabletTypes; rhs != nil { + tmpContainer := make([]topodata.TabletType, len(rhs)) + copy(tmpContainer, rhs) + r.TabletTypes = tmpContainer } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) - i-- - dAtA[i] = 0x12 + if rhs := m.IncludeTables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.IncludeTables = tmpContainer } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa + if rhs := m.ExcludeTables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ExcludeTables = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteFetchAsAppRequest) MarshalVT() (dAtA []byte, err error) { +func (m *MigrateCreateRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *MigrateCompleteRequest) CloneVT() *MigrateCompleteRequest { if m == nil { - return nil, nil + return (*MigrateCompleteRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &MigrateCompleteRequest{ + Workflow: m.Workflow, + TargetKeyspace: m.TargetKeyspace, + KeepData: m.KeepData, + KeepRoutingRules: m.KeepRoutingRules, + RenameTables: m.RenameTables, + DryRun: m.DryRun, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteFetchAsAppRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *MigrateCompleteRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ExecuteFetchAsAppRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *MigrateCompleteResponse) CloneVT() *MigrateCompleteResponse { if m == nil { - return 0, nil + return (*MigrateCompleteResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &MigrateCompleteResponse{ + Summary: m.Summary, } - if m.UsePool { - i-- - if m.UsePool { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 + if rhs := m.DryRunResults; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.DryRunResults = tmpContainer } - if m.MaxRows != 0 { - i = encodeVarint(dAtA, i, uint64(m.MaxRows)) - i-- - dAtA[i] = 0x18 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.Query) > 0 { - i -= len(m.Query) - copy(dAtA[i:], m.Query) - i = encodeVarint(dAtA, i, uint64(len(m.Query))) - i-- - dAtA[i] = 0x12 + return r +} + +func (m *MigrateCompleteResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *MountRegisterRequest) CloneVT() *MountRegisterRequest { + if m == nil { + return (*MountRegisterRequest)(nil) } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + r := &MountRegisterRequest{ + TopoType: m.TopoType, + TopoServer: m.TopoServer, + TopoRoot: m.TopoRoot, + Name: m.Name, } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteFetchAsAppResponse) MarshalVT() (dAtA []byte, err error) { +func (m *MountRegisterRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *MountRegisterResponse) CloneVT() *MountRegisterResponse { if m == nil { - return nil, nil + return (*MountRegisterResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &MountRegisterResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *ExecuteFetchAsAppResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *MountRegisterResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ExecuteFetchAsAppResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *MountUnregisterRequest) CloneVT() *MountUnregisterRequest { if m == nil { - return 0, nil + return (*MountUnregisterRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &MountUnregisterRequest{ + Name: m.Name, } - if m.Result != nil { - size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ExecuteFetchAsDBARequest) MarshalVT() (dAtA []byte, err error) { +func (m *MountUnregisterRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *MountUnregisterResponse) CloneVT() *MountUnregisterResponse { if m == nil { - return nil, nil + return (*MountUnregisterResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &MountUnregisterResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *ExecuteFetchAsDBARequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *MountUnregisterResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ExecuteFetchAsDBARequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *MountShowRequest) CloneVT() *MountShowRequest { if m == nil { - return 0, nil + return (*MountShowRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &MountShowRequest{ + Name: m.Name, } - if m.ReloadSchema { - i-- - if m.ReloadSchema { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.DisableBinlogs { - i-- - if m.DisableBinlogs { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if m.MaxRows != 0 { - i = encodeVarint(dAtA, i, uint64(m.MaxRows)) - i-- - dAtA[i] = 0x18 - } - if len(m.Query) > 0 { - i -= len(m.Query) - copy(dAtA[i:], m.Query) - i = encodeVarint(dAtA, i, uint64(len(m.Query))) - i-- - dAtA[i] = 0x12 - } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return r } -func (m *ExecuteFetchAsDBAResponse) MarshalVT() (dAtA []byte, err error) { +func (m *MountShowRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *MountShowResponse) CloneVT() *MountShowResponse { if m == nil { - return nil, nil + return (*MountShowResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &MountShowResponse{ + TopoType: m.TopoType, + TopoServer: m.TopoServer, + TopoRoot: m.TopoRoot, + Name: m.Name, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteFetchAsDBAResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *MountShowResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ExecuteFetchAsDBAResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *MountListRequest) CloneVT() *MountListRequest { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*MountListRequest)(nil) } - if m.Result != nil { - size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + r := &MountListRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ExecuteHookRequest) MarshalVT() (dAtA []byte, err error) { +func (m *MountListRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *MountListResponse) CloneVT() *MountListResponse { if m == nil { - return nil, nil + return (*MountListResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &MountListResponse{} + if rhs := m.Names; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Names = tmpContainer } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteHookRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *MountListResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ExecuteHookRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *MoveTablesCreateRequest) CloneVT() *MoveTablesCreateRequest { if m == nil { - return 0, nil + return (*MoveTablesCreateRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &MoveTablesCreateRequest{ + Workflow: m.Workflow, + SourceKeyspace: m.SourceKeyspace, + TargetKeyspace: m.TargetKeyspace, + TabletSelectionPreference: m.TabletSelectionPreference, + AllTables: m.AllTables, + ExternalClusterName: m.ExternalClusterName, + SourceTimeZone: m.SourceTimeZone, + OnDdl: m.OnDdl, + StopAfterCopy: m.StopAfterCopy, + DropForeignKeys: m.DropForeignKeys, + DeferSecondaryKeys: m.DeferSecondaryKeys, + AutoStart: m.AutoStart, + NoRoutingRules: m.NoRoutingRules, + AtomicCopy: m.AtomicCopy, } - if m.TabletHookRequest != nil { - size, err := m.TabletHookRequest.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if rhs := m.TabletTypes; rhs != nil { + tmpContainer := make([]topodata.TabletType, len(rhs)) + copy(tmpContainer, rhs) + r.TabletTypes = tmpContainer } - return len(dAtA) - i, nil + if rhs := m.SourceShards; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.SourceShards = tmpContainer + } + if rhs := m.IncludeTables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.IncludeTables = tmpContainer + } + if rhs := m.ExcludeTables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ExcludeTables = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteHookResponse) MarshalVT() (dAtA []byte, err error) { +func (m *MoveTablesCreateRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *MoveTablesCreateResponse_TabletInfo) CloneVT() *MoveTablesCreateResponse_TabletInfo { if m == nil { - return nil, nil + return (*MoveTablesCreateResponse_TabletInfo)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &MoveTablesCreateResponse_TabletInfo{ + Tablet: m.Tablet.CloneVT(), + Created: m.Created, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteHookResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *MoveTablesCreateResponse_TabletInfo) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ExecuteHookResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *MoveTablesCreateResponse) CloneVT() *MoveTablesCreateResponse { if m == nil { - return 0, nil + return (*MoveTablesCreateResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &MoveTablesCreateResponse{ + Summary: m.Summary, } - if m.HookResult != nil { - size, err := m.HookResult.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if rhs := m.Details; rhs != nil { + tmpContainer := make([]*MoveTablesCreateResponse_TabletInfo, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + r.Details = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *FindAllShardsInKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { +func (m *MoveTablesCreateResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *MoveTablesCompleteRequest) CloneVT() *MoveTablesCompleteRequest { if m == nil { - return nil, nil + return (*MoveTablesCompleteRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &MoveTablesCompleteRequest{ + Workflow: m.Workflow, + TargetKeyspace: m.TargetKeyspace, + KeepData: m.KeepData, + KeepRoutingRules: m.KeepRoutingRules, + RenameTables: m.RenameTables, + DryRun: m.DryRun, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *FindAllShardsInKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *MoveTablesCompleteRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *FindAllShardsInKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *MoveTablesCompleteResponse) CloneVT() *MoveTablesCompleteResponse { if m == nil { - return 0, nil + return (*MoveTablesCompleteResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &MoveTablesCompleteResponse{ + Summary: m.Summary, } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa + if rhs := m.DryRunResults; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.DryRunResults = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *FindAllShardsInKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { +func (m *MoveTablesCompleteResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PingTabletRequest) CloneVT() *PingTabletRequest { if m == nil { - return nil, nil + return (*PingTabletRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &PingTabletRequest{ + TabletAlias: m.TabletAlias.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *FindAllShardsInKeyspaceResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *PingTabletRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *FindAllShardsInKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *PingTabletResponse) CloneVT() *PingTabletResponse { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*PingTabletResponse)(nil) } - if len(m.Shards) > 0 { - for k := range m.Shards { - v := m.Shards[k] - baseI := i - size, err := v.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } + r := &PingTabletResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetBackupsRequest) MarshalVT() (dAtA []byte, err error) { +func (m *PingTabletResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PlannedReparentShardRequest) CloneVT() *PlannedReparentShardRequest { if m == nil { - return nil, nil + return (*PlannedReparentShardRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &PlannedReparentShardRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, + NewPrimary: m.NewPrimary.CloneVT(), + AvoidPrimary: m.AvoidPrimary.CloneVT(), + WaitReplicasTimeout: m.WaitReplicasTimeout.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetBackupsRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *PlannedReparentShardRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetBackupsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *PlannedReparentShardResponse) CloneVT() *PlannedReparentShardResponse { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*PlannedReparentShardResponse)(nil) } - if m.DetailedLimit != 0 { - i = encodeVarint(dAtA, i, uint64(m.DetailedLimit)) - i-- - dAtA[i] = 0x28 + r := &PlannedReparentShardResponse{ + Keyspace: m.Keyspace, + Shard: m.Shard, + PromotedPrimary: m.PromotedPrimary.CloneVT(), } - if m.Detailed { - i-- - if m.Detailed { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if rhs := m.Events; rhs != nil { + tmpContainer := make([]*logutil.Event, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i-- - dAtA[i] = 0x20 + r.Events = tmpContainer } - if m.Limit != 0 { - i = encodeVarint(dAtA, i, uint64(m.Limit)) - i-- - dAtA[i] = 0x18 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) - i-- - dAtA[i] = 0x12 + return r +} + +func (m *PlannedReparentShardResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RebuildKeyspaceGraphRequest) CloneVT() *RebuildKeyspaceGraphRequest { + if m == nil { + return (*RebuildKeyspaceGraphRequest)(nil) } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa + r := &RebuildKeyspaceGraphRequest{ + Keyspace: m.Keyspace, + AllowPartial: m.AllowPartial, } - return len(dAtA) - i, nil + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetBackupsResponse) MarshalVT() (dAtA []byte, err error) { +func (m *RebuildKeyspaceGraphRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RebuildKeyspaceGraphResponse) CloneVT() *RebuildKeyspaceGraphResponse { if m == nil { - return nil, nil + return (*RebuildKeyspaceGraphResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &RebuildKeyspaceGraphResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *GetBackupsResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *RebuildKeyspaceGraphResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetBackupsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *RebuildVSchemaGraphRequest) CloneVT() *RebuildVSchemaGraphRequest { if m == nil { - return 0, nil + return (*RebuildVSchemaGraphRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &RebuildVSchemaGraphRequest{} + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer } - if len(m.Backups) > 0 { - for iNdEx := len(m.Backups) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Backups[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetCellInfoRequest) MarshalVT() (dAtA []byte, err error) { +func (m *RebuildVSchemaGraphRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RebuildVSchemaGraphResponse) CloneVT() *RebuildVSchemaGraphResponse { if m == nil { - return nil, nil + return (*RebuildVSchemaGraphResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &RebuildVSchemaGraphResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *GetCellInfoRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *RebuildVSchemaGraphResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetCellInfoRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *RefreshStateRequest) CloneVT() *RefreshStateRequest { if m == nil { - return 0, nil + return (*RefreshStateRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &RefreshStateRequest{ + TabletAlias: m.TabletAlias.CloneVT(), } - if len(m.Cell) > 0 { - i -= len(m.Cell) - copy(dAtA[i:], m.Cell) - i = encodeVarint(dAtA, i, uint64(len(m.Cell))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetCellInfoResponse) MarshalVT() (dAtA []byte, err error) { +func (m *RefreshStateRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RefreshStateResponse) CloneVT() *RefreshStateResponse { if m == nil { - return nil, nil + return (*RefreshStateResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &RefreshStateResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *GetCellInfoResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *RefreshStateResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetCellInfoResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *RefreshStateByShardRequest) CloneVT() *RefreshStateByShardRequest { if m == nil { - return 0, nil + return (*RefreshStateByShardRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &RefreshStateByShardRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, } - if m.CellInfo != nil { - size, err := m.CellInfo.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetCellInfoNamesRequest) MarshalVT() (dAtA []byte, err error) { +func (m *RefreshStateByShardRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RefreshStateByShardResponse) CloneVT() *RefreshStateByShardResponse { if m == nil { - return nil, nil + return (*RefreshStateByShardResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &RefreshStateByShardResponse{ + IsPartialRefresh: m.IsPartialRefresh, + PartialRefreshDetails: m.PartialRefreshDetails, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetCellInfoNamesRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *RefreshStateByShardResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetCellInfoNamesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ReloadSchemaRequest) CloneVT() *ReloadSchemaRequest { if m == nil { - return 0, nil + return (*ReloadSchemaRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ReloadSchemaRequest{ + TabletAlias: m.TabletAlias.CloneVT(), } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetCellInfoNamesResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ReloadSchemaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReloadSchemaResponse) CloneVT() *ReloadSchemaResponse { if m == nil { - return nil, nil + return (*ReloadSchemaResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ReloadSchemaResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *GetCellInfoNamesResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ReloadSchemaResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetCellInfoNamesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ReloadSchemaKeyspaceRequest) CloneVT() *ReloadSchemaKeyspaceRequest { if m == nil { - return 0, nil + return (*ReloadSchemaKeyspaceRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ReloadSchemaKeyspaceRequest{ + Keyspace: m.Keyspace, + WaitPosition: m.WaitPosition, + IncludePrimary: m.IncludePrimary, + Concurrency: m.Concurrency, } - if len(m.Names) > 0 { - for iNdEx := len(m.Names) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Names[iNdEx]) - copy(dAtA[i:], m.Names[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Names[iNdEx]))) - i-- - dAtA[i] = 0xa - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetCellsAliasesRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ReloadSchemaKeyspaceRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReloadSchemaKeyspaceResponse) CloneVT() *ReloadSchemaKeyspaceResponse { if m == nil { - return nil, nil + return (*ReloadSchemaKeyspaceResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ReloadSchemaKeyspaceResponse{} + if rhs := m.Events; rhs != nil { + tmpContainer := make([]*logutil.Event, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Events = tmpContainer } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetCellsAliasesRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ReloadSchemaKeyspaceResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetCellsAliasesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ReloadSchemaShardRequest) CloneVT() *ReloadSchemaShardRequest { if m == nil { - return 0, nil + return (*ReloadSchemaShardRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ReloadSchemaShardRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, + WaitPosition: m.WaitPosition, + IncludePrimary: m.IncludePrimary, + Concurrency: m.Concurrency, } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetCellsAliasesResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ReloadSchemaShardRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReloadSchemaShardResponse) CloneVT() *ReloadSchemaShardResponse { if m == nil { - return nil, nil + return (*ReloadSchemaShardResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ReloadSchemaShardResponse{} + if rhs := m.Events; rhs != nil { + tmpContainer := make([]*logutil.Event, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Events = tmpContainer } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetCellsAliasesResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ReloadSchemaShardResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetCellsAliasesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *RemoveBackupRequest) CloneVT() *RemoveBackupRequest { if m == nil { - return 0, nil + return (*RemoveBackupRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &RemoveBackupRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, + Name: m.Name, } - if len(m.Aliases) > 0 { - for k := range m.Aliases { - v := m.Aliases[k] - baseI := i - size, err := v.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetFullStatusRequest) MarshalVT() (dAtA []byte, err error) { +func (m *RemoveBackupRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RemoveBackupResponse) CloneVT() *RemoveBackupResponse { if m == nil { - return nil, nil + return (*RemoveBackupResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &RemoveBackupResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *GetFullStatusRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *RemoveBackupResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetFullStatusRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *RemoveKeyspaceCellRequest) CloneVT() *RemoveKeyspaceCellRequest { if m == nil { - return 0, nil + return (*RemoveKeyspaceCellRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &RemoveKeyspaceCellRequest{ + Keyspace: m.Keyspace, + Cell: m.Cell, + Force: m.Force, + Recursive: m.Recursive, } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetFullStatusResponse) MarshalVT() (dAtA []byte, err error) { +func (m *RemoveKeyspaceCellRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RemoveKeyspaceCellResponse) CloneVT() *RemoveKeyspaceCellResponse { if m == nil { - return nil, nil + return (*RemoveKeyspaceCellResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &RemoveKeyspaceCellResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *GetFullStatusResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *RemoveKeyspaceCellResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetFullStatusResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *RemoveShardCellRequest) CloneVT() *RemoveShardCellRequest { if m == nil { - return 0, nil + return (*RemoveShardCellRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &RemoveShardCellRequest{ + Keyspace: m.Keyspace, + ShardName: m.ShardName, + Cell: m.Cell, + Force: m.Force, + Recursive: m.Recursive, } - if m.Status != nil { - size, err := m.Status.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetKeyspacesRequest) MarshalVT() (dAtA []byte, err error) { +func (m *RemoveShardCellRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RemoveShardCellResponse) CloneVT() *RemoveShardCellResponse { if m == nil { - return nil, nil + return (*RemoveShardCellResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &RemoveShardCellResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *GetKeyspacesRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *RemoveShardCellResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetKeyspacesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ReparentTabletRequest) CloneVT() *ReparentTabletRequest { if m == nil { - return 0, nil + return (*ReparentTabletRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ReparentTabletRequest{ + Tablet: m.Tablet.CloneVT(), } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetKeyspacesResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ReparentTabletRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReparentTabletResponse) CloneVT() *ReparentTabletResponse { if m == nil { - return nil, nil + return (*ReparentTabletResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ReparentTabletResponse{ + Keyspace: m.Keyspace, + Shard: m.Shard, + Primary: m.Primary.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetKeyspacesResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ReparentTabletResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetKeyspacesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ReshardCreateRequest) CloneVT() *ReshardCreateRequest { if m == nil { - return 0, nil + return (*ReshardCreateRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ReshardCreateRequest{ + Workflow: m.Workflow, + Keyspace: m.Keyspace, + TabletSelectionPreference: m.TabletSelectionPreference, + SkipSchemaCopy: m.SkipSchemaCopy, + OnDdl: m.OnDdl, + StopAfterCopy: m.StopAfterCopy, + DeferSecondaryKeys: m.DeferSecondaryKeys, + AutoStart: m.AutoStart, } - if len(m.Keyspaces) > 0 { - for iNdEx := len(m.Keyspaces) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Keyspaces[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } + if rhs := m.SourceShards; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.SourceShards = tmpContainer } - return len(dAtA) - i, nil + if rhs := m.TargetShards; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.TargetShards = tmpContainer + } + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if rhs := m.TabletTypes; rhs != nil { + tmpContainer := make([]topodata.TabletType, len(rhs)) + copy(tmpContainer, rhs) + r.TabletTypes = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ReshardCreateRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RestoreFromBackupRequest) CloneVT() *RestoreFromBackupRequest { if m == nil { - return nil, nil + return (*RestoreFromBackupRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &RestoreFromBackupRequest{ + TabletAlias: m.TabletAlias.CloneVT(), + BackupTime: m.BackupTime.CloneVT(), + RestoreToPos: m.RestoreToPos, + DryRun: m.DryRun, + RestoreToTimestamp: m.RestoreToTimestamp.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *RestoreFromBackupRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *RestoreFromBackupResponse) CloneVT() *RestoreFromBackupResponse { if m == nil { - return 0, nil + return (*RestoreFromBackupResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &RestoreFromBackupResponse{ + TabletAlias: m.TabletAlias.CloneVT(), + Keyspace: m.Keyspace, + Shard: m.Shard, + Event: m.Event.CloneVT(), } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { +func (m *RestoreFromBackupResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RetrySchemaMigrationRequest) CloneVT() *RetrySchemaMigrationRequest { if m == nil { - return nil, nil + return (*RetrySchemaMigrationRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &RetrySchemaMigrationRequest{ + Keyspace: m.Keyspace, + Uuid: m.Uuid, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetKeyspaceResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *RetrySchemaMigrationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *RetrySchemaMigrationResponse) CloneVT() *RetrySchemaMigrationResponse { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*RetrySchemaMigrationResponse)(nil) } - if m.Keyspace != nil { - size, err := m.Keyspace.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + r := &RetrySchemaMigrationResponse{} + if rhs := m.RowsAffectedByShard; rhs != nil { + tmpContainer := make(map[string]uint64, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + r.RowsAffectedByShard = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetPermissionsRequest) MarshalVT() (dAtA []byte, err error) { +func (m *RetrySchemaMigrationResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RunHealthCheckRequest) CloneVT() *RunHealthCheckRequest { if m == nil { - return nil, nil + return (*RunHealthCheckRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &RunHealthCheckRequest{ + TabletAlias: m.TabletAlias.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetPermissionsRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *RunHealthCheckRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetPermissionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *RunHealthCheckResponse) CloneVT() *RunHealthCheckResponse { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*RunHealthCheckResponse)(nil) } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + r := &RunHealthCheckResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetPermissionsResponse) MarshalVT() (dAtA []byte, err error) { +func (m *RunHealthCheckResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetKeyspaceDurabilityPolicyRequest) CloneVT() *SetKeyspaceDurabilityPolicyRequest { if m == nil { - return nil, nil + return (*SetKeyspaceDurabilityPolicyRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &SetKeyspaceDurabilityPolicyRequest{ + Keyspace: m.Keyspace, + DurabilityPolicy: m.DurabilityPolicy, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetPermissionsResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *SetKeyspaceDurabilityPolicyRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetPermissionsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SetKeyspaceDurabilityPolicyResponse) CloneVT() *SetKeyspaceDurabilityPolicyResponse { if m == nil { - return 0, nil + return (*SetKeyspaceDurabilityPolicyResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &SetKeyspaceDurabilityPolicyResponse{ + Keyspace: m.Keyspace.CloneVT(), } - if m.Permissions != nil { - size, err := m.Permissions.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetRoutingRulesRequest) MarshalVT() (dAtA []byte, err error) { +func (m *SetKeyspaceDurabilityPolicyResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetKeyspaceServedFromRequest) CloneVT() *SetKeyspaceServedFromRequest { if m == nil { - return nil, nil + return (*SetKeyspaceServedFromRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &SetKeyspaceServedFromRequest{ + Keyspace: m.Keyspace, + TabletType: m.TabletType, + Remove: m.Remove, + SourceKeyspace: m.SourceKeyspace, } - return dAtA[:n], nil + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetRoutingRulesRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *SetKeyspaceServedFromRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetRoutingRulesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SetKeyspaceServedFromResponse) CloneVT() *SetKeyspaceServedFromResponse { if m == nil { - return 0, nil + return (*SetKeyspaceServedFromResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &SetKeyspaceServedFromResponse{ + Keyspace: m.Keyspace.CloneVT(), } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetRoutingRulesResponse) MarshalVT() (dAtA []byte, err error) { +func (m *SetKeyspaceServedFromResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetKeyspaceShardingInfoRequest) CloneVT() *SetKeyspaceShardingInfoRequest { if m == nil { - return nil, nil + return (*SetKeyspaceShardingInfoRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &SetKeyspaceShardingInfoRequest{ + Keyspace: m.Keyspace, + Force: m.Force, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetRoutingRulesResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *SetKeyspaceShardingInfoRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetRoutingRulesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SetKeyspaceShardingInfoResponse) CloneVT() *SetKeyspaceShardingInfoResponse { if m == nil { - return 0, nil + return (*SetKeyspaceShardingInfoResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &SetKeyspaceShardingInfoResponse{ + Keyspace: m.Keyspace.CloneVT(), } - if m.RoutingRules != nil { - size, err := m.RoutingRules.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetSchemaRequest) MarshalVT() (dAtA []byte, err error) { +func (m *SetKeyspaceShardingInfoResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetShardIsPrimaryServingRequest) CloneVT() *SetShardIsPrimaryServingRequest { if m == nil { - return nil, nil + return (*SetShardIsPrimaryServingRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &SetShardIsPrimaryServingRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, + IsServing: m.IsServing, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *SetShardIsPrimaryServingRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SetShardIsPrimaryServingResponse) CloneVT() *SetShardIsPrimaryServingResponse { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*SetShardIsPrimaryServingResponse)(nil) } - if m.TableSchemaOnly { - i-- - if m.TableSchemaOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x38 + r := &SetShardIsPrimaryServingResponse{ + Shard: m.Shard.CloneVT(), } - if m.TableSizesOnly { - i-- - if m.TableSizesOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.TableNamesOnly { - i-- - if m.TableNamesOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 + return r +} + +func (m *SetShardIsPrimaryServingResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetShardTabletControlRequest) CloneVT() *SetShardTabletControlRequest { + if m == nil { + return (*SetShardTabletControlRequest)(nil) } - if m.IncludeViews { - i-- - if m.IncludeViews { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 + r := &SetShardTabletControlRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, + TabletType: m.TabletType, + DisableQueryService: m.DisableQueryService, + Remove: m.Remove, } - if len(m.ExcludeTables) > 0 { - for iNdEx := len(m.ExcludeTables) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ExcludeTables[iNdEx]) - copy(dAtA[i:], m.ExcludeTables[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.ExcludeTables[iNdEx]))) - i-- - dAtA[i] = 0x1a - } + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer } - if len(m.Tables) > 0 { - for iNdEx := len(m.Tables) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Tables[iNdEx]) - copy(dAtA[i:], m.Tables[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Tables[iNdEx]))) - i-- - dAtA[i] = 0x12 - } + if rhs := m.DeniedTables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.DeniedTables = tmpContainer } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetSchemaResponse) MarshalVT() (dAtA []byte, err error) { +func (m *SetShardTabletControlRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetShardTabletControlResponse) CloneVT() *SetShardTabletControlResponse { if m == nil { - return nil, nil + return (*SetShardTabletControlResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &SetShardTabletControlResponse{ + Shard: m.Shard.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *SetShardTabletControlResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SetWritableRequest) CloneVT() *SetWritableRequest { if m == nil { - return 0, nil + return (*SetWritableRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &SetWritableRequest{ + TabletAlias: m.TabletAlias.CloneVT(), + Writable: m.Writable, } - if m.Schema != nil { - size, err := m.Schema.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetShardRequest) MarshalVT() (dAtA []byte, err error) { +func (m *SetWritableRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetWritableResponse) CloneVT() *SetWritableResponse { if m == nil { - return nil, nil + return (*SetWritableResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &SetWritableResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *GetShardRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *SetWritableResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ShardReplicationAddRequest) CloneVT() *ShardReplicationAddRequest { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*ShardReplicationAddRequest)(nil) } - if len(m.ShardName) > 0 { - i -= len(m.ShardName) - copy(dAtA[i:], m.ShardName) - i = encodeVarint(dAtA, i, uint64(len(m.ShardName))) - i-- - dAtA[i] = 0x12 + r := &ShardReplicationAddRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, + TabletAlias: m.TabletAlias.CloneVT(), } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetShardResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ShardReplicationAddRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ShardReplicationAddResponse) CloneVT() *ShardReplicationAddResponse { if m == nil { - return nil, nil + return (*ShardReplicationAddResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ShardReplicationAddResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *GetShardResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ShardReplicationAddResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ShardReplicationFixRequest) CloneVT() *ShardReplicationFixRequest { if m == nil { - return 0, nil + return (*ShardReplicationFixRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ShardReplicationFixRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, + Cell: m.Cell, } - if m.Shard != nil { - size, err := m.Shard.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetShardRoutingRulesRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ShardReplicationFixRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ShardReplicationFixResponse) CloneVT() *ShardReplicationFixResponse { if m == nil { - return nil, nil + return (*ShardReplicationFixResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ShardReplicationFixResponse{ + Error: m.Error.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetShardRoutingRulesRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ShardReplicationFixResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetShardRoutingRulesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ShardReplicationPositionsRequest) CloneVT() *ShardReplicationPositionsRequest { if m == nil { - return 0, nil + return (*ShardReplicationPositionsRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ShardReplicationPositionsRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetShardRoutingRulesResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ShardReplicationPositionsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ShardReplicationPositionsResponse) CloneVT() *ShardReplicationPositionsResponse { if m == nil { - return nil, nil + return (*ShardReplicationPositionsResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ShardReplicationPositionsResponse{} + if rhs := m.ReplicationStatuses; rhs != nil { + tmpContainer := make(map[string]*replicationdata.Status, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.ReplicationStatuses = tmpContainer } - return dAtA[:n], nil + if rhs := m.TabletMap; rhs != nil { + tmpContainer := make(map[string]*topodata.Tablet, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.TabletMap = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetShardRoutingRulesResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ShardReplicationPositionsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetShardRoutingRulesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ShardReplicationRemoveRequest) CloneVT() *ShardReplicationRemoveRequest { if m == nil { - return 0, nil + return (*ShardReplicationRemoveRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ShardReplicationRemoveRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, + TabletAlias: m.TabletAlias.CloneVT(), } - if m.ShardRoutingRules != nil { - size, err := m.ShardRoutingRules.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetSrvKeyspaceNamesRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ShardReplicationRemoveRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ShardReplicationRemoveResponse) CloneVT() *ShardReplicationRemoveResponse { if m == nil { - return nil, nil + return (*ShardReplicationRemoveResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ShardReplicationRemoveResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *GetSrvKeyspaceNamesRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ShardReplicationRemoveResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetSrvKeyspaceNamesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SleepTabletRequest) CloneVT() *SleepTabletRequest { if m == nil { - return 0, nil + return (*SleepTabletRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &SleepTabletRequest{ + TabletAlias: m.TabletAlias.CloneVT(), + Duration: m.Duration.CloneVT(), } - if len(m.Cells) > 0 { - for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Cells[iNdEx]) - copy(dAtA[i:], m.Cells[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) - i-- - dAtA[i] = 0xa - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetSrvKeyspaceNamesResponse_NameList) MarshalVT() (dAtA []byte, err error) { +func (m *SleepTabletRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SleepTabletResponse) CloneVT() *SleepTabletResponse { if m == nil { - return nil, nil + return (*SleepTabletResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &SleepTabletResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *GetSrvKeyspaceNamesResponse_NameList) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *SleepTabletResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetSrvKeyspaceNamesResponse_NameList) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SourceShardAddRequest) CloneVT() *SourceShardAddRequest { if m == nil { - return 0, nil + return (*SourceShardAddRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &SourceShardAddRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, + Uid: m.Uid, + SourceKeyspace: m.SourceKeyspace, + SourceShard: m.SourceShard, + KeyRange: m.KeyRange.CloneVT(), } - if len(m.Names) > 0 { - for iNdEx := len(m.Names) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Names[iNdEx]) - copy(dAtA[i:], m.Names[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Names[iNdEx]))) - i-- - dAtA[i] = 0xa - } + if rhs := m.Tables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Tables = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetSrvKeyspaceNamesResponse) MarshalVT() (dAtA []byte, err error) { +func (m *SourceShardAddRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SourceShardAddResponse) CloneVT() *SourceShardAddResponse { if m == nil { - return nil, nil + return (*SourceShardAddResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &SourceShardAddResponse{ + Shard: m.Shard.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetSrvKeyspaceNamesResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *SourceShardAddResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetSrvKeyspaceNamesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SourceShardDeleteRequest) CloneVT() *SourceShardDeleteRequest { if m == nil { - return 0, nil + return (*SourceShardDeleteRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &SourceShardDeleteRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, + Uid: m.Uid, } - if len(m.Names) > 0 { - for k := range m.Names { - v := m.Names[k] - baseI := i - size, err := v.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetSrvKeyspacesRequest) MarshalVT() (dAtA []byte, err error) { +func (m *SourceShardDeleteRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SourceShardDeleteResponse) CloneVT() *SourceShardDeleteResponse { if m == nil { - return nil, nil + return (*SourceShardDeleteResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &SourceShardDeleteResponse{ + Shard: m.Shard.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetSrvKeyspacesRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *SourceShardDeleteResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetSrvKeyspacesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *StartReplicationRequest) CloneVT() *StartReplicationRequest { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*StartReplicationRequest)(nil) } - if len(m.Cells) > 0 { - for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Cells[iNdEx]) - copy(dAtA[i:], m.Cells[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) - i-- - dAtA[i] = 0x12 - } + r := &StartReplicationRequest{ + TabletAlias: m.TabletAlias.CloneVT(), } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetSrvKeyspacesResponse) MarshalVT() (dAtA []byte, err error) { +func (m *StartReplicationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StartReplicationResponse) CloneVT() *StartReplicationResponse { if m == nil { - return nil, nil + return (*StartReplicationResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &StartReplicationResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *GetSrvKeyspacesResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *StartReplicationResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetSrvKeyspacesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *StopReplicationRequest) CloneVT() *StopReplicationRequest { if m == nil { - return 0, nil + return (*StopReplicationRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &StopReplicationRequest{ + TabletAlias: m.TabletAlias.CloneVT(), } - if len(m.SrvKeyspaces) > 0 { - for k := range m.SrvKeyspaces { - v := m.SrvKeyspaces[k] - baseI := i - size, err := v.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *UpdateThrottlerConfigRequest) MarshalVT() (dAtA []byte, err error) { +func (m *StopReplicationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StopReplicationResponse) CloneVT() *StopReplicationResponse { if m == nil { - return nil, nil + return (*StopReplicationResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &StopReplicationResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *UpdateThrottlerConfigRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *StopReplicationResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *UpdateThrottlerConfigRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *TabletExternallyReparentedRequest) CloneVT() *TabletExternallyReparentedRequest { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if m.CheckAsCheckShard { - i-- - if m.CheckAsCheckShard { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x40 - } - if m.CheckAsCheckSelf { - i-- - if m.CheckAsCheckSelf { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x38 - } - if m.CustomQuerySet { - i-- - if m.CustomQuerySet { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 + return (*TabletExternallyReparentedRequest)(nil) } - if len(m.CustomQuery) > 0 { - i -= len(m.CustomQuery) - copy(dAtA[i:], m.CustomQuery) - i = encodeVarint(dAtA, i, uint64(len(m.CustomQuery))) - i-- - dAtA[i] = 0x2a + r := &TabletExternallyReparentedRequest{ + Tablet: m.Tablet.CloneVT(), } - if m.Threshold != 0 { - i -= 8 - binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Threshold)))) - i-- - dAtA[i] = 0x21 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.Disable { - i-- - if m.Disable { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 + return r +} + +func (m *TabletExternallyReparentedRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *TabletExternallyReparentedResponse) CloneVT() *TabletExternallyReparentedResponse { + if m == nil { + return (*TabletExternallyReparentedResponse)(nil) } - if m.Enable { - i-- - if m.Enable { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 + r := &TabletExternallyReparentedResponse{ + Keyspace: m.Keyspace, + Shard: m.Shard, + NewPrimary: m.NewPrimary.CloneVT(), + OldPrimary: m.OldPrimary.CloneVT(), } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *UpdateThrottlerConfigResponse) MarshalVT() (dAtA []byte, err error) { +func (m *TabletExternallyReparentedResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *UpdateCellInfoRequest) CloneVT() *UpdateCellInfoRequest { if m == nil { - return nil, nil + return (*UpdateCellInfoRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &UpdateCellInfoRequest{ + Name: m.Name, + CellInfo: m.CellInfo.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *UpdateThrottlerConfigResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *UpdateCellInfoRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *UpdateThrottlerConfigResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *UpdateCellInfoResponse) CloneVT() *UpdateCellInfoResponse { if m == nil { - return 0, nil + return (*UpdateCellInfoResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &UpdateCellInfoResponse{ + Name: m.Name, + CellInfo: m.CellInfo.CloneVT(), } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetSrvVSchemaRequest) MarshalVT() (dAtA []byte, err error) { +func (m *UpdateCellInfoResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *UpdateCellsAliasRequest) CloneVT() *UpdateCellsAliasRequest { if m == nil { - return nil, nil + return (*UpdateCellsAliasRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &UpdateCellsAliasRequest{ + Name: m.Name, + CellsAlias: m.CellsAlias.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetSrvVSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *UpdateCellsAliasRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetSrvVSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *UpdateCellsAliasResponse) CloneVT() *UpdateCellsAliasResponse { if m == nil { - return 0, nil + return (*UpdateCellsAliasResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &UpdateCellsAliasResponse{ + Name: m.Name, + CellsAlias: m.CellsAlias.CloneVT(), } - if len(m.Cell) > 0 { - i -= len(m.Cell) - copy(dAtA[i:], m.Cell) - i = encodeVarint(dAtA, i, uint64(len(m.Cell))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetSrvVSchemaResponse) MarshalVT() (dAtA []byte, err error) { +func (m *UpdateCellsAliasResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ValidateRequest) CloneVT() *ValidateRequest { if m == nil { - return nil, nil + return (*ValidateRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ValidateRequest{ + PingTablets: m.PingTablets, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetSrvVSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ValidateRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetSrvVSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ValidateResponse) CloneVT() *ValidateResponse { if m == nil { - return 0, nil + return (*ValidateResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ValidateResponse{} + if rhs := m.Results; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Results = tmpContainer } - if m.SrvVSchema != nil { - size, err := m.SrvVSchema.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if rhs := m.ResultsByKeyspace; rhs != nil { + tmpContainer := make(map[string]*ValidateKeyspaceResponse, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + r.ResultsByKeyspace = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetSrvVSchemasRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ValidateResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ValidateKeyspaceRequest) CloneVT() *ValidateKeyspaceRequest { if m == nil { - return nil, nil + return (*ValidateKeyspaceRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ValidateKeyspaceRequest{ + Keyspace: m.Keyspace, + PingTablets: m.PingTablets, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetSrvVSchemasRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ValidateKeyspaceRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetSrvVSchemasRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ValidateKeyspaceResponse) CloneVT() *ValidateKeyspaceResponse { if m == nil { - return 0, nil + return (*ValidateKeyspaceResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ValidateKeyspaceResponse{} + if rhs := m.Results; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Results = tmpContainer } - if len(m.Cells) > 0 { - for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Cells[iNdEx]) - copy(dAtA[i:], m.Cells[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) - i-- - dAtA[i] = 0x12 + if rhs := m.ResultsByShard; rhs != nil { + tmpContainer := make(map[string]*ValidateShardResponse, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } + r.ResultsByShard = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetSrvVSchemasResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ValidateKeyspaceResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ValidateSchemaKeyspaceRequest) CloneVT() *ValidateSchemaKeyspaceRequest { if m == nil { - return nil, nil + return (*ValidateSchemaKeyspaceRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ValidateSchemaKeyspaceRequest{ + Keyspace: m.Keyspace, + IncludeViews: m.IncludeViews, + SkipNoPrimary: m.SkipNoPrimary, + IncludeVschema: m.IncludeVschema, } - return dAtA[:n], nil + if rhs := m.ExcludeTables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ExcludeTables = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetSrvVSchemasResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ValidateSchemaKeyspaceRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetSrvVSchemasResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ValidateSchemaKeyspaceResponse) CloneVT() *ValidateSchemaKeyspaceResponse { if m == nil { - return 0, nil + return (*ValidateSchemaKeyspaceResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ValidateSchemaKeyspaceResponse{} + if rhs := m.Results; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Results = tmpContainer } - if len(m.SrvVSchemas) > 0 { - for k := range m.SrvVSchemas { - v := m.SrvVSchemas[k] - baseI := i - size, err := v.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa + if rhs := m.ResultsByShard; rhs != nil { + tmpContainer := make(map[string]*ValidateShardResponse, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } + r.ResultsByShard = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetTabletRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ValidateSchemaKeyspaceResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ValidateShardRequest) CloneVT() *ValidateShardRequest { if m == nil { - return nil, nil + return (*ValidateShardRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ValidateShardRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, + PingTablets: m.PingTablets, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetTabletRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ValidateShardRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetTabletRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ValidateShardResponse) CloneVT() *ValidateShardResponse { if m == nil { - return 0, nil + return (*ValidateShardResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ValidateShardResponse{} + if rhs := m.Results; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Results = tmpContainer } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetTabletResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ValidateShardResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ValidateVersionKeyspaceRequest) CloneVT() *ValidateVersionKeyspaceRequest { if m == nil { - return nil, nil + return (*ValidateVersionKeyspaceRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ValidateVersionKeyspaceRequest{ + Keyspace: m.Keyspace, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetTabletResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ValidateVersionKeyspaceRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetTabletResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ValidateVersionKeyspaceResponse) CloneVT() *ValidateVersionKeyspaceResponse { if m == nil { - return 0, nil + return (*ValidateVersionKeyspaceResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ValidateVersionKeyspaceResponse{} + if rhs := m.Results; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Results = tmpContainer } - if m.Tablet != nil { - size, err := m.Tablet.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if rhs := m.ResultsByShard; rhs != nil { + tmpContainer := make(map[string]*ValidateShardResponse, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + r.ResultsByShard = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetTabletsRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ValidateVersionKeyspaceResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ValidateVersionShardRequest) CloneVT() *ValidateVersionShardRequest { if m == nil { - return nil, nil + return (*ValidateVersionShardRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ValidateVersionShardRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetTabletsRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ValidateVersionShardRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetTabletsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ValidateVersionShardResponse) CloneVT() *ValidateVersionShardResponse { if m == nil { - return 0, nil + return (*ValidateVersionShardResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ValidateVersionShardResponse{} + if rhs := m.Results; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Results = tmpContainer } - if m.TabletType != 0 { - i = encodeVarint(dAtA, i, uint64(m.TabletType)) - i-- - dAtA[i] = 0x30 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.TabletAliases) > 0 { - for iNdEx := len(m.TabletAliases) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.TabletAliases[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x2a - } + return r +} + +func (m *ValidateVersionShardResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ValidateVSchemaRequest) CloneVT() *ValidateVSchemaRequest { + if m == nil { + return (*ValidateVSchemaRequest)(nil) } - if m.Strict { - i-- - if m.Strict { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 + r := &ValidateVSchemaRequest{ + Keyspace: m.Keyspace, + IncludeViews: m.IncludeViews, } - if len(m.Cells) > 0 { - for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Cells[iNdEx]) - copy(dAtA[i:], m.Cells[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) - i-- - dAtA[i] = 0x1a + if rhs := m.Shards; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Shards = tmpContainer + } + if rhs := m.ExcludeTables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ExcludeTables = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ValidateVSchemaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ValidateVSchemaResponse) CloneVT() *ValidateVSchemaResponse { + if m == nil { + return (*ValidateVSchemaResponse)(nil) + } + r := &ValidateVSchemaResponse{} + if rhs := m.Results; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Results = tmpContainer + } + if rhs := m.ResultsByShard; rhs != nil { + tmpContainer := make(map[string]*ValidateShardResponse, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } + r.ResultsByShard = tmpContainer } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) - i-- - dAtA[i] = 0x12 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa + return r +} + +func (m *ValidateVSchemaResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VDiffCreateRequest) CloneVT() *VDiffCreateRequest { + if m == nil { + return (*VDiffCreateRequest)(nil) } - return len(dAtA) - i, nil + r := &VDiffCreateRequest{ + Workflow: m.Workflow, + TargetKeyspace: m.TargetKeyspace, + Uuid: m.Uuid, + TabletSelectionPreference: m.TabletSelectionPreference, + Limit: m.Limit, + FilteredReplicationWaitTime: m.FilteredReplicationWaitTime.CloneVT(), + DebugQuery: m.DebugQuery, + OnlyPKs: m.OnlyPKs, + UpdateTableStats: m.UpdateTableStats, + MaxExtraRowsToCompare: m.MaxExtraRowsToCompare, + Wait: m.Wait, + WaitUpdateInterval: m.WaitUpdateInterval.CloneVT(), + AutoRetry: m.AutoRetry, + Verbose: m.Verbose, + } + if rhs := m.SourceCells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.SourceCells = tmpContainer + } + if rhs := m.TargetCells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.TargetCells = tmpContainer + } + if rhs := m.TabletTypes; rhs != nil { + tmpContainer := make([]topodata.TabletType, len(rhs)) + copy(tmpContainer, rhs) + r.TabletTypes = tmpContainer + } + if rhs := m.Tables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Tables = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetTabletsResponse) MarshalVT() (dAtA []byte, err error) { +func (m *VDiffCreateRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VDiffCreateResponse) CloneVT() *VDiffCreateResponse { if m == nil { - return nil, nil + return (*VDiffCreateResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &VDiffCreateResponse{ + UUID: m.UUID, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetTabletsResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *VDiffCreateResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetTabletsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *VDiffDeleteRequest) CloneVT() *VDiffDeleteRequest { if m == nil { - return 0, nil + return (*VDiffDeleteRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &VDiffDeleteRequest{ + Workflow: m.Workflow, + TargetKeyspace: m.TargetKeyspace, + Arg: m.Arg, } - if len(m.Tablets) > 0 { - for iNdEx := len(m.Tablets) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Tablets[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VDiffDeleteRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VDiffDeleteResponse) CloneVT() *VDiffDeleteResponse { + if m == nil { + return (*VDiffDeleteResponse)(nil) + } + r := &VDiffDeleteResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VDiffDeleteResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VDiffResumeRequest) CloneVT() *VDiffResumeRequest { + if m == nil { + return (*VDiffResumeRequest)(nil) + } + r := &VDiffResumeRequest{ + Workflow: m.Workflow, + TargetKeyspace: m.TargetKeyspace, + Uuid: m.Uuid, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VDiffResumeRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VDiffResumeResponse) CloneVT() *VDiffResumeResponse { + if m == nil { + return (*VDiffResumeResponse)(nil) + } + r := &VDiffResumeResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VDiffResumeResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VDiffShowRequest) CloneVT() *VDiffShowRequest { + if m == nil { + return (*VDiffShowRequest)(nil) + } + r := &VDiffShowRequest{ + Workflow: m.Workflow, + TargetKeyspace: m.TargetKeyspace, + Arg: m.Arg, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VDiffShowRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VDiffShowResponse) CloneVT() *VDiffShowResponse { + if m == nil { + return (*VDiffShowResponse)(nil) + } + r := &VDiffShowResponse{} + if rhs := m.TabletResponses; rhs != nil { + tmpContainer := make(map[string]*tabletmanagerdata.VDiffResponse, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } + r.TabletResponses = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetTopologyPathRequest) MarshalVT() (dAtA []byte, err error) { +func (m *VDiffShowResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VDiffStopRequest) CloneVT() *VDiffStopRequest { if m == nil { - return nil, nil + return (*VDiffStopRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &VDiffStopRequest{ + Workflow: m.Workflow, + TargetKeyspace: m.TargetKeyspace, + Uuid: m.Uuid, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetTopologyPathRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *VDiffStopRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetTopologyPathRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *VDiffStopResponse) CloneVT() *VDiffStopResponse { if m == nil { - return 0, nil + return (*VDiffStopResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &VDiffStopResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.Path) > 0 { - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarint(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0xa + return r +} + +func (m *VDiffStopResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *WorkflowDeleteRequest) CloneVT() *WorkflowDeleteRequest { + if m == nil { + return (*WorkflowDeleteRequest)(nil) } - return len(dAtA) - i, nil + r := &WorkflowDeleteRequest{ + Keyspace: m.Keyspace, + Workflow: m.Workflow, + KeepData: m.KeepData, + KeepRoutingRules: m.KeepRoutingRules, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetTopologyPathResponse) MarshalVT() (dAtA []byte, err error) { +func (m *WorkflowDeleteRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *WorkflowDeleteResponse_TabletInfo) CloneVT() *WorkflowDeleteResponse_TabletInfo { if m == nil { - return nil, nil + return (*WorkflowDeleteResponse_TabletInfo)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &WorkflowDeleteResponse_TabletInfo{ + Tablet: m.Tablet.CloneVT(), + Deleted: m.Deleted, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetTopologyPathResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *WorkflowDeleteResponse_TabletInfo) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetTopologyPathResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *WorkflowDeleteResponse) CloneVT() *WorkflowDeleteResponse { if m == nil { - return 0, nil + return (*WorkflowDeleteResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &WorkflowDeleteResponse{ + Summary: m.Summary, } - if m.Cell != nil { - size, err := m.Cell.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if rhs := m.Details; rhs != nil { + tmpContainer := make([]*WorkflowDeleteResponse_TabletInfo, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + r.Details = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *TopologyCell) MarshalVT() (dAtA []byte, err error) { +func (m *WorkflowDeleteResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *WorkflowStatusRequest) CloneVT() *WorkflowStatusRequest { if m == nil { - return nil, nil + return (*WorkflowStatusRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &WorkflowStatusRequest{ + Keyspace: m.Keyspace, + Workflow: m.Workflow, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *TopologyCell) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *WorkflowStatusRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *TopologyCell) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *WorkflowStatusResponse_TableCopyState) CloneVT() *WorkflowStatusResponse_TableCopyState { if m == nil { - return 0, nil + return (*WorkflowStatusResponse_TableCopyState)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &WorkflowStatusResponse_TableCopyState{ + RowsCopied: m.RowsCopied, + RowsTotal: m.RowsTotal, + RowsPercentage: m.RowsPercentage, + BytesCopied: m.BytesCopied, + BytesTotal: m.BytesTotal, + BytesPercentage: m.BytesPercentage, } - if len(m.Children) > 0 { - for iNdEx := len(m.Children) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Children[iNdEx]) - copy(dAtA[i:], m.Children[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Children[iNdEx]))) - i-- - dAtA[i] = 0x22 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *WorkflowStatusResponse_TableCopyState) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *WorkflowStatusResponse_ShardStreamState) CloneVT() *WorkflowStatusResponse_ShardStreamState { + if m == nil { + return (*WorkflowStatusResponse_ShardStreamState)(nil) + } + r := &WorkflowStatusResponse_ShardStreamState{ + Id: m.Id, + Tablet: m.Tablet.CloneVT(), + SourceShard: m.SourceShard, + Position: m.Position, + Status: m.Status, + Info: m.Info, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *WorkflowStatusResponse_ShardStreamState) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *WorkflowStatusResponse_ShardStreams) CloneVT() *WorkflowStatusResponse_ShardStreams { + if m == nil { + return (*WorkflowStatusResponse_ShardStreams)(nil) + } + r := &WorkflowStatusResponse_ShardStreams{} + if rhs := m.Streams; rhs != nil { + tmpContainer := make([]*WorkflowStatusResponse_ShardStreamState, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } + r.Streams = tmpContainer } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarint(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x1a + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.Path) > 0 { - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarint(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0x12 + return r +} + +func (m *WorkflowStatusResponse_ShardStreams) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *WorkflowStatusResponse) CloneVT() *WorkflowStatusResponse { + if m == nil { + return (*WorkflowStatusResponse)(nil) } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa + r := &WorkflowStatusResponse{ + TrafficState: m.TrafficState, } - return len(dAtA) - i, nil + if rhs := m.TableCopyState; rhs != nil { + tmpContainer := make(map[string]*WorkflowStatusResponse_TableCopyState, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.TableCopyState = tmpContainer + } + if rhs := m.ShardStreams; rhs != nil { + tmpContainer := make(map[string]*WorkflowStatusResponse_ShardStreams, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.ShardStreams = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetVSchemaRequest) MarshalVT() (dAtA []byte, err error) { +func (m *WorkflowStatusResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *WorkflowSwitchTrafficRequest) CloneVT() *WorkflowSwitchTrafficRequest { if m == nil { - return nil, nil + return (*WorkflowSwitchTrafficRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &WorkflowSwitchTrafficRequest{ + Keyspace: m.Keyspace, + Workflow: m.Workflow, + MaxReplicationLagAllowed: m.MaxReplicationLagAllowed.CloneVT(), + EnableReverseReplication: m.EnableReverseReplication, + Direction: m.Direction, + Timeout: m.Timeout.CloneVT(), + DryRun: m.DryRun, + InitializeTargetSequences: m.InitializeTargetSequences, } - return dAtA[:n], nil + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if rhs := m.TabletTypes; rhs != nil { + tmpContainer := make([]topodata.TabletType, len(rhs)) + copy(tmpContainer, rhs) + r.TabletTypes = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetVSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *WorkflowSwitchTrafficRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetVSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *WorkflowSwitchTrafficResponse) CloneVT() *WorkflowSwitchTrafficResponse { if m == nil { - return 0, nil + return (*WorkflowSwitchTrafficResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &WorkflowSwitchTrafficResponse{ + Summary: m.Summary, + StartState: m.StartState, + CurrentState: m.CurrentState, } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa + if rhs := m.DryRunResults; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.DryRunResults = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetVersionRequest) MarshalVT() (dAtA []byte, err error) { +func (m *WorkflowSwitchTrafficResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *WorkflowUpdateRequest) CloneVT() *WorkflowUpdateRequest { + if m == nil { + return (*WorkflowUpdateRequest)(nil) + } + r := &WorkflowUpdateRequest{ + Keyspace: m.Keyspace, + TabletRequest: m.TabletRequest.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *WorkflowUpdateRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *WorkflowUpdateResponse_TabletInfo) CloneVT() *WorkflowUpdateResponse_TabletInfo { + if m == nil { + return (*WorkflowUpdateResponse_TabletInfo)(nil) + } + r := &WorkflowUpdateResponse_TabletInfo{ + Tablet: m.Tablet.CloneVT(), + Changed: m.Changed, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *WorkflowUpdateResponse_TabletInfo) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *WorkflowUpdateResponse) CloneVT() *WorkflowUpdateResponse { + if m == nil { + return (*WorkflowUpdateResponse)(nil) + } + r := &WorkflowUpdateResponse{ + Summary: m.Summary, + } + if rhs := m.Details; rhs != nil { + tmpContainer := make([]*WorkflowUpdateResponse_TabletInfo, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Details = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *WorkflowUpdateResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteVtctlCommandRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -5312,12 +5317,12 @@ func (m *GetVersionRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetVersionRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteVtctlCommandRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetVersionRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteVtctlCommandRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -5329,20 +5334,24 @@ func (m *GetVersionRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + if m.ActionTimeout != 0 { + i = encodeVarint(dAtA, i, uint64(m.ActionTimeout)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x10 + } + if len(m.Args) > 0 { + for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Args[iNdEx]) + copy(dAtA[i:], m.Args[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Args[iNdEx]))) + i-- + dAtA[i] = 0xa + } } return len(dAtA) - i, nil } -func (m *GetVersionResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteVtctlCommandResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -5355,12 +5364,12 @@ func (m *GetVersionResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetVersionResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteVtctlCommandResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetVersionResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteVtctlCommandResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -5372,17 +5381,20 @@ func (m *GetVersionResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Version) > 0 { - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarint(dAtA, i, uint64(len(m.Version))) + if m.Event != nil { + size, err := m.Event.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *GetVSchemaResponse) MarshalVT() (dAtA []byte, err error) { +func (m *TableMaterializeSettings) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -5395,12 +5407,12 @@ func (m *GetVSchemaResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetVSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *TableMaterializeSettings) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetVSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *TableMaterializeSettings) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -5412,20 +5424,31 @@ func (m *GetVSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.VSchema != nil { - size, err := m.VSchema.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + if len(m.CreateDdl) > 0 { + i -= len(m.CreateDdl) + copy(dAtA[i:], m.CreateDdl) + i = encodeVarint(dAtA, i, uint64(len(m.CreateDdl))) + i-- + dAtA[i] = 0x1a + } + if len(m.SourceExpression) > 0 { + i -= len(m.SourceExpression) + copy(dAtA[i:], m.SourceExpression) + i = encodeVarint(dAtA, i, uint64(len(m.SourceExpression))) + i-- + dAtA[i] = 0x12 + } + if len(m.TargetTable) > 0 { + i -= len(m.TargetTable) + copy(dAtA[i:], m.TargetTable) + i = encodeVarint(dAtA, i, uint64(len(m.TargetTable))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *GetWorkflowsRequest) MarshalVT() (dAtA []byte, err error) { +func (m *MaterializeSettings) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -5438,12 +5461,12 @@ func (m *GetWorkflowsRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetWorkflowsRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *MaterializeSettings) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetWorkflowsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *MaterializeSettings) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -5455,37 +5478,136 @@ func (m *GetWorkflowsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.NameOnly { + if m.AtomicCopy { i-- - if m.NameOnly { + if m.AtomicCopy { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- - dAtA[i] = 0x18 + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 } - if m.ActiveOnly { + if m.TabletSelectionPreference != 0 { + i = encodeVarint(dAtA, i, uint64(m.TabletSelectionPreference)) i-- - if m.ActiveOnly { + dAtA[i] = 0x78 + } + if m.DeferSecondaryKeys { + i-- + if m.DeferSecondaryKeys { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- - dAtA[i] = 0x10 + dAtA[i] = 0x70 } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + if len(m.OnDdl) > 0 { + i -= len(m.OnDdl) + copy(dAtA[i:], m.OnDdl) + i = encodeVarint(dAtA, i, uint64(len(m.OnDdl))) + i-- + dAtA[i] = 0x6a + } + if len(m.SourceShards) > 0 { + for iNdEx := len(m.SourceShards) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SourceShards[iNdEx]) + copy(dAtA[i:], m.SourceShards[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.SourceShards[iNdEx]))) + i-- + dAtA[i] = 0x62 + } + } + if len(m.TargetTimeZone) > 0 { + i -= len(m.TargetTimeZone) + copy(dAtA[i:], m.TargetTimeZone) + i = encodeVarint(dAtA, i, uint64(len(m.TargetTimeZone))) + i-- + dAtA[i] = 0x5a + } + if len(m.SourceTimeZone) > 0 { + i -= len(m.SourceTimeZone) + copy(dAtA[i:], m.SourceTimeZone) + i = encodeVarint(dAtA, i, uint64(len(m.SourceTimeZone))) + i-- + dAtA[i] = 0x52 + } + if m.MaterializationIntent != 0 { + i = encodeVarint(dAtA, i, uint64(m.MaterializationIntent)) + i-- + dAtA[i] = 0x48 + } + if len(m.ExternalCluster) > 0 { + i -= len(m.ExternalCluster) + copy(dAtA[i:], m.ExternalCluster) + i = encodeVarint(dAtA, i, uint64(len(m.ExternalCluster))) + i-- + dAtA[i] = 0x42 + } + if len(m.TabletTypes) > 0 { + i -= len(m.TabletTypes) + copy(dAtA[i:], m.TabletTypes) + i = encodeVarint(dAtA, i, uint64(len(m.TabletTypes))) + i-- + dAtA[i] = 0x3a + } + if len(m.Cell) > 0 { + i -= len(m.Cell) + copy(dAtA[i:], m.Cell) + i = encodeVarint(dAtA, i, uint64(len(m.Cell))) + i-- + dAtA[i] = 0x32 + } + if len(m.TableSettings) > 0 { + for iNdEx := len(m.TableSettings) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.TableSettings[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if m.StopAfterCopy { + i-- + if m.StopAfterCopy { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.TargetKeyspace) > 0 { + i -= len(m.TargetKeyspace) + copy(dAtA[i:], m.TargetKeyspace) + i = encodeVarint(dAtA, i, uint64(len(m.TargetKeyspace))) + i-- + dAtA[i] = 0x1a + } + if len(m.SourceKeyspace) > 0 { + i -= len(m.SourceKeyspace) + copy(dAtA[i:], m.SourceKeyspace) + i = encodeVarint(dAtA, i, uint64(len(m.SourceKeyspace))) + i-- + dAtA[i] = 0x12 + } + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *GetWorkflowsResponse) MarshalVT() (dAtA []byte, err error) { +func (m *Keyspace) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -5498,12 +5620,12 @@ func (m *GetWorkflowsResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetWorkflowsResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *Keyspace) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetWorkflowsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *Keyspace) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -5515,22 +5637,27 @@ func (m *GetWorkflowsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Workflows) > 0 { - for iNdEx := len(m.Workflows) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Workflows[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if m.Keyspace != nil { + size, err := m.Keyspace.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *InitShardPrimaryRequest) MarshalVT() (dAtA []byte, err error) { +func (m *SchemaMigration) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -5543,12 +5670,12 @@ func (m *InitShardPrimaryRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *InitShardPrimaryRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *SchemaMigration) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *InitShardPrimaryRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SchemaMigration) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -5560,54 +5687,495 @@ func (m *InitShardPrimaryRequest) MarshalToSizedBufferVT(dAtA []byte) (int, erro i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.WaitReplicasTimeout != nil { - size, err := m.WaitReplicasTimeout.MarshalToSizedBufferVT(dAtA[:i]) + if m.ReadyToCompleteAt != nil { + size, err := m.ReadyToCompleteAt.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x2a + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xaa } - if m.Force { + if m.ReviewedAt != nil { + size, err := m.ReviewedAt.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- - if m.Force { + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xa2 + } + if m.IsImmediateOperation { + i-- + if m.IsImmediateOperation { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- - dAtA[i] = 0x20 + dAtA[i] = 0x3 + i-- + dAtA[i] = 0x98 } - if m.PrimaryElectTabletAlias != nil { - size, err := m.PrimaryElectTabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if m.CutoverAttempts != 0 { + i = encodeVarint(dAtA, i, uint64(m.CutoverAttempts)) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0x90 + } + if len(m.Stage) > 0 { + i -= len(m.Stage) + copy(dAtA[i:], m.Stage) + i = encodeVarint(dAtA, i, uint64(len(m.Stage))) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0x8a + } + if m.PostponeLaunch { + i-- + if m.PostponeLaunch { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0x80 + } + if m.CancelledAt != nil { + size, err := m.CancelledAt.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x1a + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xfa + } + if len(m.ComponentThrottled) > 0 { + i -= len(m.ComponentThrottled) + copy(dAtA[i:], m.ComponentThrottled) + i = encodeVarint(dAtA, i, uint64(len(m.ComponentThrottled))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xf2 + } + if m.LastThrottledAt != nil { + size, err := m.LastThrottledAt.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xea + } + if len(m.SpecialPlan) > 0 { + i -= len(m.SpecialPlan) + copy(dAtA[i:], m.SpecialPlan) + i = encodeVarint(dAtA, i, uint64(len(m.SpecialPlan))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xe2 + } + if m.UserThrottleRatio != 0 { + i -= 4 + binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.UserThrottleRatio)))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xdd + } + if m.VitessLivenessIndicator != 0 { + i = encodeVarint(dAtA, i, uint64(m.VitessLivenessIndicator)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xd0 + } + if m.ReadyToComplete { + i-- + if m.ReadyToComplete { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xc8 + } + if m.IsView { + i-- + if m.IsView { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xc0 + } + if len(m.RevertedUuid) > 0 { + i -= len(m.RevertedUuid) + copy(dAtA[i:], m.RevertedUuid) + i = encodeVarint(dAtA, i, uint64(len(m.RevertedUuid))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xba + } + if m.AllowConcurrent { + i-- + if m.AllowConcurrent { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb0 + } + if len(m.RevertibleNotes) > 0 { + i -= len(m.RevertibleNotes) + copy(dAtA[i:], m.RevertibleNotes) + i = encodeVarint(dAtA, i, uint64(len(m.RevertibleNotes))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xaa + } + if len(m.ExpandedColumnNames) > 0 { + i -= len(m.ExpandedColumnNames) + copy(dAtA[i:], m.ExpandedColumnNames) + i = encodeVarint(dAtA, i, uint64(len(m.ExpandedColumnNames))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa2 + } + if len(m.DroppedNoDefaultColumnNames) > 0 { + i -= len(m.DroppedNoDefaultColumnNames) + copy(dAtA[i:], m.DroppedNoDefaultColumnNames) + i = encodeVarint(dAtA, i, uint64(len(m.DroppedNoDefaultColumnNames))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x9a + } + if len(m.RemovedUniqueKeyNames) > 0 { + i -= len(m.RemovedUniqueKeyNames) + copy(dAtA[i:], m.RemovedUniqueKeyNames) + i = encodeVarint(dAtA, i, uint64(len(m.RemovedUniqueKeyNames))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 + } + if m.PostponeCompletion { + i-- + if m.PostponeCompletion { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x88 + } + if m.ArtifactRetention != nil { + size, err := m.ArtifactRetention.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x82 + } + if len(m.LogFile) > 0 { + i -= len(m.LogFile) + copy(dAtA[i:], m.LogFile) + i = encodeVarint(dAtA, i, uint64(len(m.LogFile))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xfa + } + if m.RemovedUniqueKeys != 0 { + i = encodeVarint(dAtA, i, uint64(m.RemovedUniqueKeys)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf0 + } + if m.AddedUniqueKeys != 0 { + i = encodeVarint(dAtA, i, uint64(m.AddedUniqueKeys)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe8 + } + if m.TableRows != 0 { + i = encodeVarint(dAtA, i, uint64(m.TableRows)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe0 + } + if m.RowsCopied != 0 { + i = encodeVarint(dAtA, i, uint64(m.RowsCopied)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd8 + } + if m.EtaSeconds != 0 { + i = encodeVarint(dAtA, i, uint64(m.EtaSeconds)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd0 + } + if len(m.Message) > 0 { + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarint(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + if len(m.DdlAction) > 0 { + i -= len(m.DdlAction) + copy(dAtA[i:], m.DdlAction) + i = encodeVarint(dAtA, i, uint64(len(m.DdlAction))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if len(m.MigrationContext) > 0 { + i -= len(m.MigrationContext) + copy(dAtA[i:], m.MigrationContext) + i = encodeVarint(dAtA, i, uint64(len(m.MigrationContext))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + if m.Progress != 0 { + i -= 4 + binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.Progress)))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb5 + } + if m.TabletFailure { + i-- + if m.TabletFailure { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa8 + } + if m.Tablet != nil { + size, err := m.Tablet.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + if m.Retries != 0 { + i = encodeVarint(dAtA, i, uint64(m.Retries)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x98 + } + if len(m.Artifacts) > 0 { + i -= len(m.Artifacts) + copy(dAtA[i:], m.Artifacts) + i = encodeVarint(dAtA, i, uint64(len(m.Artifacts))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if len(m.LogPath) > 0 { + i -= len(m.LogPath) + copy(dAtA[i:], m.LogPath) + i = encodeVarint(dAtA, i, uint64(len(m.LogPath))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if m.Status != 0 { + i = encodeVarint(dAtA, i, uint64(m.Status)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + if m.CleanedUpAt != nil { + size, err := m.CleanedUpAt.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x7a + } + if m.CompletedAt != nil { + size, err := m.CompletedAt.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x72 + } + if m.LivenessTimestamp != nil { + size, err := m.LivenessTimestamp.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } + if m.StartedAt != nil { + size, err := m.StartedAt.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } + if m.ReadyAt != nil { + size, err := m.ReadyAt.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } + if m.RequestedAt != nil { + size, err := m.RequestedAt.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } + if m.AddedAt != nil { + size, err := m.AddedAt.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if len(m.Options) > 0 { + i -= len(m.Options) + copy(dAtA[i:], m.Options) + i = encodeVarint(dAtA, i, uint64(len(m.Options))) + i-- + dAtA[i] = 0x42 + } + if m.Strategy != 0 { + i = encodeVarint(dAtA, i, uint64(m.Strategy)) + i-- + dAtA[i] = 0x38 + } + if len(m.MigrationStatement) > 0 { + i -= len(m.MigrationStatement) + copy(dAtA[i:], m.MigrationStatement) + i = encodeVarint(dAtA, i, uint64(len(m.MigrationStatement))) + i-- + dAtA[i] = 0x32 + } + if len(m.Table) > 0 { + i -= len(m.Table) + copy(dAtA[i:], m.Table) + i = encodeVarint(dAtA, i, uint64(len(m.Table))) + i-- + dAtA[i] = 0x2a + } + if len(m.Schema) > 0 { + i -= len(m.Schema) + copy(dAtA[i:], m.Schema) + i = encodeVarint(dAtA, i, uint64(len(m.Schema))) + i-- + dAtA[i] = 0x22 } if len(m.Shard) > 0 { i -= len(m.Shard) copy(dAtA[i:], m.Shard) i = encodeVarint(dAtA, i, uint64(len(m.Shard))) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x1a } if len(m.Keyspace) > 0 { i -= len(m.Keyspace) copy(dAtA[i:], m.Keyspace) i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) i-- + dAtA[i] = 0x12 + } + if len(m.Uuid) > 0 { + i -= len(m.Uuid) + copy(dAtA[i:], m.Uuid) + i = encodeVarint(dAtA, i, uint64(len(m.Uuid))) + i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *InitShardPrimaryResponse) MarshalVT() (dAtA []byte, err error) { +func (m *Shard) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -5620,12 +6188,12 @@ func (m *InitShardPrimaryResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *InitShardPrimaryResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *Shard) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *InitShardPrimaryResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *Shard) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -5637,22 +6205,34 @@ func (m *InitShardPrimaryResponse) MarshalToSizedBufferVT(dAtA []byte) (int, err i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Events[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if m.Shard != nil { + size, err := m.Shard.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *PingTabletRequest) MarshalVT() (dAtA []byte, err error) { +func (m *Workflow_ReplicationLocation) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -5665,12 +6245,12 @@ func (m *PingTabletRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PingTabletRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *Workflow_ReplicationLocation) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *PingTabletRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *Workflow_ReplicationLocation) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -5682,20 +6262,26 @@ func (m *PingTabletRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Shards) > 0 { + for iNdEx := len(m.Shards) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Shards[iNdEx]) + copy(dAtA[i:], m.Shards[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Shards[iNdEx]))) + i-- + dAtA[i] = 0x12 } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *PingTabletResponse) MarshalVT() (dAtA []byte, err error) { +func (m *Workflow_ShardStream) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -5708,12 +6294,12 @@ func (m *PingTabletResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PingTabletResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *Workflow_ShardStream) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *PingTabletResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *Workflow_ShardStream) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -5725,10 +6311,44 @@ func (m *PingTabletResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.IsPrimaryServing { + i-- + if m.IsPrimaryServing { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.TabletControls) > 0 { + for iNdEx := len(m.TabletControls) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.TabletControls[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Streams) > 0 { + for iNdEx := len(m.Streams) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Streams[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } return len(dAtA) - i, nil } -func (m *PlannedReparentShardRequest) MarshalVT() (dAtA []byte, err error) { +func (m *Workflow_Stream_CopyState) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -5741,12 +6361,12 @@ func (m *PlannedReparentShardRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PlannedReparentShardRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *Workflow_Stream_CopyState) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *PlannedReparentShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *Workflow_Stream_CopyState) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -5758,54 +6378,24 @@ func (m *PlannedReparentShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.WaitReplicasTimeout != nil { - size, err := m.WaitReplicasTimeout.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + if len(m.LastPk) > 0 { + i -= len(m.LastPk) + copy(dAtA[i:], m.LastPk) + i = encodeVarint(dAtA, i, uint64(len(m.LastPk))) i-- - dAtA[i] = 0x2a + dAtA[i] = 0x12 } - if m.AvoidPrimary != nil { - size, err := m.AvoidPrimary.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x22 - } - if m.NewPrimary != nil { - size, err := m.NewPrimary.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x1a - } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) - i-- - dAtA[i] = 0x12 - } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + if len(m.Table) > 0 { + i -= len(m.Table) + copy(dAtA[i:], m.Table) + i = encodeVarint(dAtA, i, uint64(len(m.Table))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *PlannedReparentShardResponse) MarshalVT() (dAtA []byte, err error) { +func (m *Workflow_Stream_Log) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -5818,12 +6408,12 @@ func (m *PlannedReparentShardResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PlannedReparentShardResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *Workflow_Stream_Log) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *PlannedReparentShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *Workflow_Stream_Log) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -5835,46 +6425,66 @@ func (m *PlannedReparentShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Events[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x22 + if m.Count != 0 { + i = encodeVarint(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x40 + } + if len(m.Message) > 0 { + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarint(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x3a + } + if m.UpdatedAt != nil { + size, err := m.UpdatedAt.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 } - if m.PromotedPrimary != nil { - size, err := m.PromotedPrimary.MarshalToSizedBufferVT(dAtA[:i]) + if m.CreatedAt != nil { + size, err := m.CreatedAt.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- + dAtA[i] = 0x2a + } + if len(m.State) > 0 { + i -= len(m.State) + copy(dAtA[i:], m.State) + i = encodeVarint(dAtA, i, uint64(len(m.State))) + i-- + dAtA[i] = 0x22 + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarint(dAtA, i, uint64(len(m.Type))) + i-- dAtA[i] = 0x1a } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + if m.StreamId != 0 { + i = encodeVarint(dAtA, i, uint64(m.StreamId)) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x10 } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + if m.Id != 0 { + i = encodeVarint(dAtA, i, uint64(m.Id)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x8 } return len(dAtA) - i, nil } -func (m *RebuildKeyspaceGraphRequest) MarshalVT() (dAtA []byte, err error) { +func (m *Workflow_Stream_ThrottlerStatus) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -5887,12 +6497,12 @@ func (m *RebuildKeyspaceGraphRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RebuildKeyspaceGraphRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *Workflow_Stream_ThrottlerStatus) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RebuildKeyspaceGraphRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *Workflow_Stream_ThrottlerStatus) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -5904,36 +6514,27 @@ func (m *RebuildKeyspaceGraphRequest) MarshalToSizedBufferVT(dAtA []byte) (int, i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.AllowPartial { - i-- - if m.AllowPartial { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.TimeThrottled != nil { + size, err := m.TimeThrottled.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x18 - } - if len(m.Cells) > 0 { - for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Cells[iNdEx]) - copy(dAtA[i:], m.Cells[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) - i-- - dAtA[i] = 0x12 - } + dAtA[i] = 0x12 } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + if len(m.ComponentThrottled) > 0 { + i -= len(m.ComponentThrottled) + copy(dAtA[i:], m.ComponentThrottled) + i = encodeVarint(dAtA, i, uint64(len(m.ComponentThrottled))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *RebuildKeyspaceGraphResponse) MarshalVT() (dAtA []byte, err error) { +func (m *Workflow_Stream) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -5946,12 +6547,12 @@ func (m *RebuildKeyspaceGraphResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RebuildKeyspaceGraphResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *Workflow_Stream) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RebuildKeyspaceGraphResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *Workflow_Stream) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -5963,10 +6564,156 @@ func (m *RebuildKeyspaceGraphResponse) MarshalToSizedBufferVT(dAtA []byte) (int, i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.ThrottlerStatus != nil { + size, err := m.ThrottlerStatus.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if m.RowsCopied != 0 { + i = encodeVarint(dAtA, i, uint64(m.RowsCopied)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + if len(m.Tags) > 0 { + for iNdEx := len(m.Tags) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Tags[iNdEx]) + copy(dAtA[i:], m.Tags[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Tags[iNdEx]))) + i-- + dAtA[i] = 0x7a + } + } + if len(m.LogFetchError) > 0 { + i -= len(m.LogFetchError) + copy(dAtA[i:], m.LogFetchError) + i = encodeVarint(dAtA, i, uint64(len(m.LogFetchError))) + i-- + dAtA[i] = 0x72 + } + if len(m.Logs) > 0 { + for iNdEx := len(m.Logs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Logs[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } + } + if len(m.CopyStates) > 0 { + for iNdEx := len(m.CopyStates) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.CopyStates[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } + } + if len(m.Message) > 0 { + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarint(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x5a + } + if m.TimeUpdated != nil { + size, err := m.TimeUpdated.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } + if m.TransactionTimestamp != nil { + size, err := m.TransactionTimestamp.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if len(m.DbName) > 0 { + i -= len(m.DbName) + copy(dAtA[i:], m.DbName) + i = encodeVarint(dAtA, i, uint64(len(m.DbName))) + i-- + dAtA[i] = 0x42 + } + if len(m.State) > 0 { + i -= len(m.State) + copy(dAtA[i:], m.State) + i = encodeVarint(dAtA, i, uint64(len(m.State))) + i-- + dAtA[i] = 0x3a + } + if len(m.StopPosition) > 0 { + i -= len(m.StopPosition) + copy(dAtA[i:], m.StopPosition) + i = encodeVarint(dAtA, i, uint64(len(m.StopPosition))) + i-- + dAtA[i] = 0x32 + } + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarint(dAtA, i, uint64(len(m.Position))) + i-- + dAtA[i] = 0x2a + } + if m.BinlogSource != nil { + size, err := m.BinlogSource.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.Tablet != nil { + size, err := m.Tablet.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if m.Id != 0 { + i = encodeVarint(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } return len(dAtA) - i, nil } -func (m *RebuildVSchemaGraphRequest) MarshalVT() (dAtA []byte, err error) { +func (m *Workflow) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -5979,12 +6726,12 @@ func (m *RebuildVSchemaGraphRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RebuildVSchemaGraphRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *Workflow) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RebuildVSchemaGraphRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *Workflow) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -5996,52 +6743,93 @@ func (m *RebuildVSchemaGraphRequest) MarshalToSizedBufferVT(dAtA []byte) (int, e i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Cells) > 0 { - for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Cells[iNdEx]) - copy(dAtA[i:], m.Cells[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) + if m.DeferSecondaryKeys { + i-- + if m.DeferSecondaryKeys { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + } + if m.MaxVReplicationTransactionLag != 0 { + i = encodeVarint(dAtA, i, uint64(m.MaxVReplicationTransactionLag)) + i-- + dAtA[i] = 0x40 + } + if len(m.WorkflowSubType) > 0 { + i -= len(m.WorkflowSubType) + copy(dAtA[i:], m.WorkflowSubType) + i = encodeVarint(dAtA, i, uint64(len(m.WorkflowSubType))) + i-- + dAtA[i] = 0x3a + } + if len(m.WorkflowType) > 0 { + i -= len(m.WorkflowType) + copy(dAtA[i:], m.WorkflowType) + i = encodeVarint(dAtA, i, uint64(len(m.WorkflowType))) + i-- + dAtA[i] = 0x32 + } + if len(m.ShardStreams) > 0 { + for k := range m.ShardStreams { + v := m.ShardStreams[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) i-- dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a } } - return len(dAtA) - i, nil -} - -func (m *RebuildVSchemaGraphResponse) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil + if m.MaxVReplicationLag != 0 { + i = encodeVarint(dAtA, i, uint64(m.MaxVReplicationLag)) + i-- + dAtA[i] = 0x20 } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + if m.Target != nil { + size, err := m.Target.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a } - return dAtA[:n], nil -} - -func (m *RebuildVSchemaGraphResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *RebuildVSchemaGraphResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil + if m.Source != nil { + size, err := m.Source.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *RefreshStateRequest) MarshalVT() (dAtA []byte, err error) { +func (m *AddCellInfoRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6054,12 +6842,12 @@ func (m *RefreshStateRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RefreshStateRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *AddCellInfoRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RefreshStateRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *AddCellInfoRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6071,20 +6859,27 @@ func (m *RefreshStateRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if m.CellInfo != nil { + size, err := m.CellInfo.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *RefreshStateResponse) MarshalVT() (dAtA []byte, err error) { +func (m *AddCellInfoResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6097,12 +6892,12 @@ func (m *RefreshStateResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RefreshStateResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *AddCellInfoResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RefreshStateResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *AddCellInfoResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6117,7 +6912,7 @@ func (m *RefreshStateResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) return len(dAtA) - i, nil } -func (m *RefreshStateByShardRequest) MarshalVT() (dAtA []byte, err error) { +func (m *AddCellsAliasRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6130,12 +6925,12 @@ func (m *RefreshStateByShardRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RefreshStateByShardRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *AddCellsAliasRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RefreshStateByShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *AddCellsAliasRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6153,27 +6948,20 @@ func (m *RefreshStateByShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, e copy(dAtA[i:], m.Cells[iNdEx]) i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) i-- - dAtA[i] = 0x1a + dAtA[i] = 0x12 } } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) - i-- - dAtA[i] = 0x12 - } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *RefreshStateByShardResponse) MarshalVT() (dAtA []byte, err error) { +func (m *AddCellsAliasResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6186,12 +6974,12 @@ func (m *RefreshStateByShardResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RefreshStateByShardResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *AddCellsAliasResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RefreshStateByShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *AddCellsAliasResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6203,27 +6991,10 @@ func (m *RefreshStateByShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.PartialRefreshDetails) > 0 { - i -= len(m.PartialRefreshDetails) - copy(dAtA[i:], m.PartialRefreshDetails) - i = encodeVarint(dAtA, i, uint64(len(m.PartialRefreshDetails))) - i-- - dAtA[i] = 0x12 - } - if m.IsPartialRefresh { - i-- - if m.IsPartialRefresh { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - } return len(dAtA) - i, nil } -func (m *ReloadSchemaRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ApplyRoutingRulesRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6236,12 +7007,12 @@ func (m *ReloadSchemaRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReloadSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ApplyRoutingRulesRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ReloadSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ApplyRoutingRulesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6253,8 +7024,27 @@ func (m *ReloadSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if len(m.RebuildCells) > 0 { + for iNdEx := len(m.RebuildCells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RebuildCells[iNdEx]) + copy(dAtA[i:], m.RebuildCells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.RebuildCells[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if m.SkipRebuild { + i-- + if m.SkipRebuild { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.RoutingRules != nil { + size, err := m.RoutingRules.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } @@ -6266,7 +7056,7 @@ func (m *ReloadSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ReloadSchemaResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ApplyRoutingRulesResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6279,12 +7069,12 @@ func (m *ReloadSchemaResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReloadSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ApplyRoutingRulesResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ReloadSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ApplyRoutingRulesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6299,7 +7089,7 @@ func (m *ReloadSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) return len(dAtA) - i, nil } -func (m *ReloadSchemaKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ApplyShardRoutingRulesRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6312,12 +7102,12 @@ func (m *ReloadSchemaKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReloadSchemaKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ApplyShardRoutingRulesRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ReloadSchemaKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ApplyShardRoutingRulesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6329,39 +7119,39 @@ func (m *ReloadSchemaKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Concurrency != 0 { - i = encodeVarint(dAtA, i, uint64(m.Concurrency)) - i-- - dAtA[i] = 0x20 + if len(m.RebuildCells) > 0 { + for iNdEx := len(m.RebuildCells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RebuildCells[iNdEx]) + copy(dAtA[i:], m.RebuildCells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.RebuildCells[iNdEx]))) + i-- + dAtA[i] = 0x1a + } } - if m.IncludePrimary { + if m.SkipRebuild { i-- - if m.IncludePrimary { + if m.SkipRebuild { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- - dAtA[i] = 0x18 - } - if len(m.WaitPosition) > 0 { - i -= len(m.WaitPosition) - copy(dAtA[i:], m.WaitPosition) - i = encodeVarint(dAtA, i, uint64(len(m.WaitPosition))) - i-- - dAtA[i] = 0x12 + dAtA[i] = 0x10 } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + if m.ShardRoutingRules != nil { + size, err := m.ShardRoutingRules.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ReloadSchemaKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ApplyShardRoutingRulesResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6374,12 +7164,12 @@ func (m *ReloadSchemaKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReloadSchemaKeyspaceResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ApplyShardRoutingRulesResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ReloadSchemaKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ApplyShardRoutingRulesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6391,22 +7181,10 @@ func (m *ReloadSchemaKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Events[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } - } return len(dAtA) - i, nil } -func (m *ReloadSchemaShardRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ApplySchemaRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6419,12 +7197,12 @@ func (m *ReloadSchemaShardRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReloadSchemaShardRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ApplySchemaRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ReloadSchemaShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ApplySchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6436,34 +7214,62 @@ func (m *ReloadSchemaShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, err i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Concurrency != 0 { - i = encodeVarint(dAtA, i, uint64(m.Concurrency)) + if m.BatchSize != 0 { + i = encodeVarint(dAtA, i, uint64(m.BatchSize)) i-- - dAtA[i] = 0x28 + dAtA[i] = 0x50 } - if m.IncludePrimary { + if m.CallerId != nil { + size, err := m.CallerId.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- - if m.IncludePrimary { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + dAtA[i] = 0x4a + } + if m.WaitReplicasTimeout != nil { + size, err := m.WaitReplicasTimeout.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x20 + dAtA[i] = 0x3a } - if len(m.WaitPosition) > 0 { - i -= len(m.WaitPosition) - copy(dAtA[i:], m.WaitPosition) - i = encodeVarint(dAtA, i, uint64(len(m.WaitPosition))) + if len(m.MigrationContext) > 0 { + i -= len(m.MigrationContext) + copy(dAtA[i:], m.MigrationContext) + i = encodeVarint(dAtA, i, uint64(len(m.MigrationContext))) i-- - dAtA[i] = 0x1a + dAtA[i] = 0x32 } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + if len(m.UuidList) > 0 { + for iNdEx := len(m.UuidList) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.UuidList[iNdEx]) + copy(dAtA[i:], m.UuidList[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.UuidList[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.DdlStrategy) > 0 { + i -= len(m.DdlStrategy) + copy(dAtA[i:], m.DdlStrategy) + i = encodeVarint(dAtA, i, uint64(len(m.DdlStrategy))) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x22 + } + if len(m.Sql) > 0 { + for iNdEx := len(m.Sql) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Sql[iNdEx]) + copy(dAtA[i:], m.Sql[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Sql[iNdEx]))) + i-- + dAtA[i] = 0x1a + } } if len(m.Keyspace) > 0 { i -= len(m.Keyspace) @@ -6475,7 +7281,7 @@ func (m *ReloadSchemaShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, err return len(dAtA) - i, nil } -func (m *ReloadSchemaShardResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ApplySchemaResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6488,12 +7294,12 @@ func (m *ReloadSchemaShardResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReloadSchemaShardResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ApplySchemaResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ReloadSchemaShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ApplySchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6505,22 +7311,36 @@ func (m *ReloadSchemaShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, er i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Events[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + if len(m.RowsAffectedByShard) > 0 { + for k := range m.RowsAffectedByShard { + v := m.RowsAffectedByShard[k] + baseI := i + i = encodeVarint(dAtA, i, uint64(v)) + i-- + dAtA[i] = 0x10 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) i-- dAtA[i] = 0x12 } } + if len(m.UuidList) > 0 { + for iNdEx := len(m.UuidList) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.UuidList[iNdEx]) + copy(dAtA[i:], m.UuidList[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.UuidList[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } return len(dAtA) - i, nil } -func (m *RemoveBackupRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ApplyVSchemaRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6533,12 +7353,12 @@ func (m *RemoveBackupRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RemoveBackupRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ApplyVSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RemoveBackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ApplyVSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6550,19 +7370,51 @@ func (m *RemoveBackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) + if len(m.Sql) > 0 { + i -= len(m.Sql) + copy(dAtA[i:], m.Sql) + i = encodeVarint(dAtA, i, uint64(len(m.Sql))) i-- - dAtA[i] = 0x1a + dAtA[i] = 0x32 } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + if m.VSchema != nil { + size, err := m.VSchema.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x2a + } + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if m.DryRun { + i-- + if m.DryRun { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.SkipRebuild { + i-- + if m.SkipRebuild { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 } if len(m.Keyspace) > 0 { i -= len(m.Keyspace) @@ -6574,7 +7426,7 @@ func (m *RemoveBackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *RemoveBackupResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ApplyVSchemaResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6587,12 +7439,12 @@ func (m *RemoveBackupResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RemoveBackupResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ApplyVSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RemoveBackupResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ApplyVSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6604,10 +7456,20 @@ func (m *RemoveBackupResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.VSchema != nil { + size, err := m.VSchema.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *RemoveKeyspaceCellRequest) MarshalVT() (dAtA []byte, err error) { +func (m *BackupRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6620,12 +7482,12 @@ func (m *RemoveKeyspaceCellRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RemoveKeyspaceCellRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *BackupRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RemoveKeyspaceCellRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *BackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6637,44 +7499,52 @@ func (m *RemoveKeyspaceCellRequest) MarshalToSizedBufferVT(dAtA []byte) (int, er i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Recursive { + if m.UpgradeSafe { i-- - if m.Recursive { + if m.UpgradeSafe { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- - dAtA[i] = 0x20 + dAtA[i] = 0x28 } - if m.Force { + if len(m.IncrementalFromPos) > 0 { + i -= len(m.IncrementalFromPos) + copy(dAtA[i:], m.IncrementalFromPos) + i = encodeVarint(dAtA, i, uint64(len(m.IncrementalFromPos))) i-- - if m.Force { + dAtA[i] = 0x22 + } + if m.Concurrency != 0 { + i = encodeVarint(dAtA, i, uint64(m.Concurrency)) + i-- + dAtA[i] = 0x18 + } + if m.AllowPrimary { + i-- + if m.AllowPrimary { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- - dAtA[i] = 0x18 - } - if len(m.Cell) > 0 { - i -= len(m.Cell) - copy(dAtA[i:], m.Cell) - i = encodeVarint(dAtA, i, uint64(len(m.Cell))) - i-- - dAtA[i] = 0x12 + dAtA[i] = 0x10 } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *RemoveKeyspaceCellResponse) MarshalVT() (dAtA []byte, err error) { +func (m *BackupResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6687,12 +7557,12 @@ func (m *RemoveKeyspaceCellResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RemoveKeyspaceCellResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *BackupResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RemoveKeyspaceCellResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *BackupResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6704,10 +7574,44 @@ func (m *RemoveKeyspaceCellResponse) MarshalToSizedBufferVT(dAtA []byte) (int, e i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.Event != nil { + size, err := m.Event.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x1a + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0x12 + } + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *RemoveShardCellRequest) MarshalVT() (dAtA []byte, err error) { +func (m *BackupShardRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6720,12 +7624,12 @@ func (m *RemoveShardCellRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RemoveShardCellRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *BackupShardRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RemoveShardCellRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *BackupShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6737,9 +7641,16 @@ func (m *RemoveShardCellRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Recursive { + if len(m.IncrementalFromPos) > 0 { + i -= len(m.IncrementalFromPos) + copy(dAtA[i:], m.IncrementalFromPos) + i = encodeVarint(dAtA, i, uint64(len(m.IncrementalFromPos))) i-- - if m.Recursive { + dAtA[i] = 0x32 + } + if m.UpgradeSafe { + i-- + if m.UpgradeSafe { dAtA[i] = 1 } else { dAtA[i] = 0 @@ -6747,27 +7658,25 @@ func (m *RemoveShardCellRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error i-- dAtA[i] = 0x28 } - if m.Force { + if m.Concurrency != 0 { + i = encodeVarint(dAtA, i, uint64(m.Concurrency)) i-- - if m.Force { + dAtA[i] = 0x20 + } + if m.AllowPrimary { + i-- + if m.AllowPrimary { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- - dAtA[i] = 0x20 - } - if len(m.Cell) > 0 { - i -= len(m.Cell) - copy(dAtA[i:], m.Cell) - i = encodeVarint(dAtA, i, uint64(len(m.Cell))) - i-- - dAtA[i] = 0x1a + dAtA[i] = 0x18 } - if len(m.ShardName) > 0 { - i -= len(m.ShardName) - copy(dAtA[i:], m.ShardName) - i = encodeVarint(dAtA, i, uint64(len(m.ShardName))) + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) i-- dAtA[i] = 0x12 } @@ -6781,7 +7690,7 @@ func (m *RemoveShardCellRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error return len(dAtA) - i, nil } -func (m *RemoveShardCellResponse) MarshalVT() (dAtA []byte, err error) { +func (m *CancelSchemaMigrationRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6794,12 +7703,12 @@ func (m *RemoveShardCellResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RemoveShardCellResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *CancelSchemaMigrationRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RemoveShardCellResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CancelSchemaMigrationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6811,10 +7720,24 @@ func (m *RemoveShardCellResponse) MarshalToSizedBufferVT(dAtA []byte) (int, erro i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.Uuid) > 0 { + i -= len(m.Uuid) + copy(dAtA[i:], m.Uuid) + i = encodeVarint(dAtA, i, uint64(len(m.Uuid))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *ReparentTabletRequest) MarshalVT() (dAtA []byte, err error) { +func (m *CancelSchemaMigrationResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6827,12 +7750,12 @@ func (m *ReparentTabletRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReparentTabletRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *CancelSchemaMigrationResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ReparentTabletRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CancelSchemaMigrationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6844,20 +7767,27 @@ func (m *ReparentTabletRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Tablet != nil { - size, err := m.Tablet.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if len(m.RowsAffectedByShard) > 0 { + for k := range m.RowsAffectedByShard { + v := m.RowsAffectedByShard[k] + baseI := i + i = encodeVarint(dAtA, i, uint64(v)) + i-- + dAtA[i] = 0x10 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ReparentTabletResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ChangeTabletTypeRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6870,12 +7800,12 @@ func (m *ReparentTabletResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReparentTabletResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ChangeTabletTypeRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ReparentTabletResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ChangeTabletTypeRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6887,34 +7817,35 @@ func (m *ReparentTabletResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Primary != nil { - size, err := m.Primary.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if m.DryRun { + i-- + if m.DryRun { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x1a + dAtA[i] = 0x18 } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + if m.DbType != 0 { + i = encodeVarint(dAtA, i, uint64(m.DbType)) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x10 } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *RestoreFromBackupRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ChangeTabletTypeResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6927,12 +7858,12 @@ func (m *RestoreFromBackupRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RestoreFromBackupRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ChangeTabletTypeResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RestoreFromBackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ChangeTabletTypeResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6944,25 +7875,18 @@ func (m *RestoreFromBackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, err i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.DryRun { + if m.WasDryRun { i-- - if m.DryRun { + if m.WasDryRun { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- - dAtA[i] = 0x20 - } - if len(m.RestoreToPos) > 0 { - i -= len(m.RestoreToPos) - copy(dAtA[i:], m.RestoreToPos) - i = encodeVarint(dAtA, i, uint64(len(m.RestoreToPos))) - i-- - dAtA[i] = 0x1a + dAtA[i] = 0x18 } - if m.BackupTime != nil { - size, err := m.BackupTime.MarshalToSizedBufferVT(dAtA[:i]) + if m.AfterTablet != nil { + size, err := m.AfterTablet.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } @@ -6971,8 +7895,8 @@ func (m *RestoreFromBackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, err i-- dAtA[i] = 0x12 } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if m.BeforeTablet != nil { + size, err := m.BeforeTablet.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } @@ -6984,7 +7908,7 @@ func (m *RestoreFromBackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, err return len(dAtA) - i, nil } -func (m *RestoreFromBackupResponse) MarshalVT() (dAtA []byte, err error) { +func (m *CleanupSchemaMigrationRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6997,12 +7921,12 @@ func (m *RestoreFromBackupResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RestoreFromBackupResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *CleanupSchemaMigrationRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RestoreFromBackupResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CleanupSchemaMigrationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7014,44 +7938,24 @@ func (m *RestoreFromBackupResponse) MarshalToSizedBufferVT(dAtA []byte) (int, er i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Event != nil { - size, err := m.Event.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x22 - } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + if len(m.Uuid) > 0 { + i -= len(m.Uuid) + copy(dAtA[i:], m.Uuid) + i = encodeVarint(dAtA, i, uint64(len(m.Uuid))) i-- - dAtA[i] = 0x1a + dAtA[i] = 0x12 } if len(m.Keyspace) > 0 { i -= len(m.Keyspace) copy(dAtA[i:], m.Keyspace) i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) i-- - dAtA[i] = 0x12 - } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *RunHealthCheckRequest) MarshalVT() (dAtA []byte, err error) { +func (m *CleanupSchemaMigrationResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7064,12 +7968,12 @@ func (m *RunHealthCheckRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RunHealthCheckRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *CleanupSchemaMigrationResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RunHealthCheckRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CleanupSchemaMigrationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7081,20 +7985,27 @@ func (m *RunHealthCheckRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if len(m.RowsAffectedByShard) > 0 { + for k := range m.RowsAffectedByShard { + v := m.RowsAffectedByShard[k] + baseI := i + i = encodeVarint(dAtA, i, uint64(v)) + i-- + dAtA[i] = 0x10 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *RunHealthCheckResponse) MarshalVT() (dAtA []byte, err error) { +func (m *CompleteSchemaMigrationRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7107,12 +8018,12 @@ func (m *RunHealthCheckResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RunHealthCheckResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *CompleteSchemaMigrationRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RunHealthCheckResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CompleteSchemaMigrationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7124,10 +8035,24 @@ func (m *RunHealthCheckResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.Uuid) > 0 { + i -= len(m.Uuid) + copy(dAtA[i:], m.Uuid) + i = encodeVarint(dAtA, i, uint64(len(m.Uuid))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *SetKeyspaceDurabilityPolicyRequest) MarshalVT() (dAtA []byte, err error) { +func (m *CompleteSchemaMigrationResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7140,12 +8065,12 @@ func (m *SetKeyspaceDurabilityPolicyRequest) MarshalVT() (dAtA []byte, err error return dAtA[:n], nil } -func (m *SetKeyspaceDurabilityPolicyRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *CompleteSchemaMigrationResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SetKeyspaceDurabilityPolicyRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CompleteSchemaMigrationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7157,24 +8082,27 @@ func (m *SetKeyspaceDurabilityPolicyRequest) MarshalToSizedBufferVT(dAtA []byte) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.DurabilityPolicy) > 0 { - i -= len(m.DurabilityPolicy) - copy(dAtA[i:], m.DurabilityPolicy) - i = encodeVarint(dAtA, i, uint64(len(m.DurabilityPolicy))) - i-- - dAtA[i] = 0x12 - } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa + if len(m.RowsAffectedByShard) > 0 { + for k := range m.RowsAffectedByShard { + v := m.RowsAffectedByShard[k] + baseI := i + i = encodeVarint(dAtA, i, uint64(v)) + i-- + dAtA[i] = 0x10 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } } return len(dAtA) - i, nil } -func (m *SetKeyspaceDurabilityPolicyResponse) MarshalVT() (dAtA []byte, err error) { +func (m *CreateKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7187,12 +8115,12 @@ func (m *SetKeyspaceDurabilityPolicyResponse) MarshalVT() (dAtA []byte, err erro return dAtA[:n], nil } -func (m *SetKeyspaceDurabilityPolicyResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *CreateKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SetKeyspaceDurabilityPolicyResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CreateKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7204,91 +8132,85 @@ func (m *SetKeyspaceDurabilityPolicyResponse) MarshalToSizedBufferVT(dAtA []byte i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Keyspace != nil { - size, err := m.Keyspace.MarshalToSizedBufferVT(dAtA[:i]) + if len(m.SidecarDbName) > 0 { + i -= len(m.SidecarDbName) + copy(dAtA[i:], m.SidecarDbName) + i = encodeVarint(dAtA, i, uint64(len(m.SidecarDbName))) + i-- + dAtA[i] = 0x5a + } + if len(m.DurabilityPolicy) > 0 { + i -= len(m.DurabilityPolicy) + copy(dAtA[i:], m.DurabilityPolicy) + i = encodeVarint(dAtA, i, uint64(len(m.DurabilityPolicy))) + i-- + dAtA[i] = 0x52 + } + if m.SnapshotTime != nil { + size, err := m.SnapshotTime.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *SetKeyspaceServedFromRequest) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SetKeyspaceServedFromRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *SetKeyspaceServedFromRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil + dAtA[i] = 0x4a } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + if len(m.BaseKeyspace) > 0 { + i -= len(m.BaseKeyspace) + copy(dAtA[i:], m.BaseKeyspace) + i = encodeVarint(dAtA, i, uint64(len(m.BaseKeyspace))) + i-- + dAtA[i] = 0x42 } - if len(m.SourceKeyspace) > 0 { - i -= len(m.SourceKeyspace) - copy(dAtA[i:], m.SourceKeyspace) - i = encodeVarint(dAtA, i, uint64(len(m.SourceKeyspace))) + if m.Type != 0 { + i = encodeVarint(dAtA, i, uint64(m.Type)) i-- - dAtA[i] = 0x2a + dAtA[i] = 0x38 } - if m.Remove { + if len(m.ServedFroms) > 0 { + for iNdEx := len(m.ServedFroms) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ServedFroms[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + } + if m.AllowEmptyVSchema { i-- - if m.Remove { + if m.AllowEmptyVSchema { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- - dAtA[i] = 0x20 + dAtA[i] = 0x18 } - if len(m.Cells) > 0 { - for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Cells[iNdEx]) - copy(dAtA[i:], m.Cells[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) - i-- - dAtA[i] = 0x1a + if m.Force { + i-- + if m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - } - if m.TabletType != 0 { - i = encodeVarint(dAtA, i, uint64(m.TabletType)) i-- dAtA[i] = 0x10 } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *SetKeyspaceServedFromResponse) MarshalVT() (dAtA []byte, err error) { +func (m *CreateKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7301,12 +8223,12 @@ func (m *SetKeyspaceServedFromResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SetKeyspaceServedFromResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *CreateKeyspaceResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SetKeyspaceServedFromResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CreateKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7331,7 +8253,7 @@ func (m *SetKeyspaceServedFromResponse) MarshalToSizedBufferVT(dAtA []byte) (int return len(dAtA) - i, nil } -func (m *SetKeyspaceShardingInfoRequest) MarshalVT() (dAtA []byte, err error) { +func (m *CreateShardRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7344,12 +8266,12 @@ func (m *SetKeyspaceShardingInfoRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SetKeyspaceShardingInfoRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *CreateShardRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SetKeyspaceShardingInfoRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CreateShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7361,6 +8283,16 @@ func (m *SetKeyspaceShardingInfoRequest) MarshalToSizedBufferVT(dAtA []byte) (in i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.IncludeParent { + i-- + if m.IncludeParent { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } if m.Force { i-- if m.Force { @@ -7369,7 +8301,14 @@ func (m *SetKeyspaceShardingInfoRequest) MarshalToSizedBufferVT(dAtA []byte) (in dAtA[i] = 0 } i-- - dAtA[i] = 0x20 + dAtA[i] = 0x18 + } + if len(m.ShardName) > 0 { + i -= len(m.ShardName) + copy(dAtA[i:], m.ShardName) + i = encodeVarint(dAtA, i, uint64(len(m.ShardName))) + i-- + dAtA[i] = 0x12 } if len(m.Keyspace) > 0 { i -= len(m.Keyspace) @@ -7381,7 +8320,7 @@ func (m *SetKeyspaceShardingInfoRequest) MarshalToSizedBufferVT(dAtA []byte) (in return len(dAtA) - i, nil } -func (m *SetKeyspaceShardingInfoResponse) MarshalVT() (dAtA []byte, err error) { +func (m *CreateShardResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7394,12 +8333,12 @@ func (m *SetKeyspaceShardingInfoResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SetKeyspaceShardingInfoResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *CreateShardResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SetKeyspaceShardingInfoResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CreateShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7411,6 +8350,26 @@ func (m *SetKeyspaceShardingInfoResponse) MarshalToSizedBufferVT(dAtA []byte) (i i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.ShardAlreadyExists { + i-- + if m.ShardAlreadyExists { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Shard != nil { + size, err := m.Shard.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } if m.Keyspace != nil { size, err := m.Keyspace.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { @@ -7424,7 +8383,7 @@ func (m *SetKeyspaceShardingInfoResponse) MarshalToSizedBufferVT(dAtA []byte) (i return len(dAtA) - i, nil } -func (m *SetShardIsPrimaryServingRequest) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteCellInfoRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7437,12 +8396,12 @@ func (m *SetShardIsPrimaryServingRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SetShardIsPrimaryServingRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *DeleteCellInfoRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SetShardIsPrimaryServingRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteCellInfoRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7454,34 +8413,27 @@ func (m *SetShardIsPrimaryServingRequest) MarshalToSizedBufferVT(dAtA []byte) (i i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.IsServing { + if m.Force { i-- - if m.IsServing { + if m.Force { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- - dAtA[i] = 0x18 - } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) - i-- - dAtA[i] = 0x12 + dAtA[i] = 0x10 } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *SetShardIsPrimaryServingResponse) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteCellInfoResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7494,12 +8446,12 @@ func (m *SetShardIsPrimaryServingResponse) MarshalVT() (dAtA []byte, err error) return dAtA[:n], nil } -func (m *SetShardIsPrimaryServingResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *DeleteCellInfoResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SetShardIsPrimaryServingResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteCellInfoResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7511,20 +8463,10 @@ func (m *SetShardIsPrimaryServingResponse) MarshalToSizedBufferVT(dAtA []byte) ( i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Shard != nil { - size, err := m.Shard.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *SetShardTabletControlRequest) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteCellsAliasRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7537,12 +8479,12 @@ func (m *SetShardTabletControlRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SetShardTabletControlRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *DeleteCellsAliasRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SetShardTabletControlRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteCellsAliasRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7554,67 +8496,17 @@ func (m *SetShardTabletControlRequest) MarshalToSizedBufferVT(dAtA []byte) (int, i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Remove { - i-- - if m.Remove { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x38 - } - if m.DisableQueryService { - i-- - if m.DisableQueryService { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 - } - if len(m.DeniedTables) > 0 { - for iNdEx := len(m.DeniedTables) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.DeniedTables[iNdEx]) - copy(dAtA[i:], m.DeniedTables[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.DeniedTables[iNdEx]))) - i-- - dAtA[i] = 0x2a - } - } - if len(m.Cells) > 0 { - for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Cells[iNdEx]) - copy(dAtA[i:], m.Cells[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) - i-- - dAtA[i] = 0x22 - } - } - if m.TabletType != 0 { - i = encodeVarint(dAtA, i, uint64(m.TabletType)) - i-- - dAtA[i] = 0x18 - } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) - i-- - dAtA[i] = 0x12 - } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *SetShardTabletControlResponse) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteCellsAliasResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7627,12 +8519,12 @@ func (m *SetShardTabletControlResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SetShardTabletControlResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *DeleteCellsAliasResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SetShardTabletControlResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteCellsAliasResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7644,20 +8536,10 @@ func (m *SetShardTabletControlResponse) MarshalToSizedBufferVT(dAtA []byte) (int i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Shard != nil { - size, err := m.Shard.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *SetWritableRequest) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7670,12 +8552,12 @@ func (m *SetWritableRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SetWritableRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *DeleteKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SetWritableRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7687,30 +8569,37 @@ func (m *SetWritableRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Writable { + if m.Force { i-- - if m.Writable { + if m.Force { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- - dAtA[i] = 0x10 + dAtA[i] = 0x18 } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if m.Recursive { + i-- + if m.Recursive { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x10 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *SetWritableResponse) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7723,12 +8612,12 @@ func (m *SetWritableResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SetWritableResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *DeleteKeyspaceResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SetWritableResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7743,7 +8632,7 @@ func (m *SetWritableResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ShardReplicationAddRequest) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteShardsRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7756,12 +8645,12 @@ func (m *ShardReplicationAddRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ShardReplicationAddRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *DeleteShardsRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ShardReplicationAddRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteShardsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7773,34 +8662,52 @@ func (m *ShardReplicationAddRequest) MarshalToSizedBufferVT(dAtA []byte) (int, e i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if m.Force { + i-- + if m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x1a + dAtA[i] = 0x28 } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + if m.EvenIfServing { i-- - dAtA[i] = 0x12 + if m.EvenIfServing { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + if m.Recursive { i-- - dAtA[i] = 0xa + if m.Recursive { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Shards) > 0 { + for iNdEx := len(m.Shards) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Shards[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } } return len(dAtA) - i, nil } -func (m *ShardReplicationAddResponse) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteShardsResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7813,12 +8720,12 @@ func (m *ShardReplicationAddResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ShardReplicationAddResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *DeleteShardsResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ShardReplicationAddResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteShardsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7833,7 +8740,7 @@ func (m *ShardReplicationAddResponse) MarshalToSizedBufferVT(dAtA []byte) (int, return len(dAtA) - i, nil } -func (m *ShardReplicationFixRequest) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteSrvVSchemaRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7846,12 +8753,12 @@ func (m *ShardReplicationFixRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ShardReplicationFixRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *DeleteSrvVSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ShardReplicationFixRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteSrvVSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7868,26 +8775,12 @@ func (m *ShardReplicationFixRequest) MarshalToSizedBufferVT(dAtA []byte) (int, e copy(dAtA[i:], m.Cell) i = encodeVarint(dAtA, i, uint64(len(m.Cell))) i-- - dAtA[i] = 0x1a - } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) - i-- - dAtA[i] = 0x12 - } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ShardReplicationFixResponse) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteSrvVSchemaResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7900,12 +8793,12 @@ func (m *ShardReplicationFixResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ShardReplicationFixResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *DeleteSrvVSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ShardReplicationFixResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteSrvVSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7917,20 +8810,10 @@ func (m *ShardReplicationFixResponse) MarshalToSizedBufferVT(dAtA []byte) (int, i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Error != nil { - size, err := m.Error.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *ShardReplicationPositionsRequest) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteTabletsRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7943,12 +8826,12 @@ func (m *ShardReplicationPositionsRequest) MarshalVT() (dAtA []byte, err error) return dAtA[:n], nil } -func (m *ShardReplicationPositionsRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *DeleteTabletsRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ShardReplicationPositionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteTabletsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7960,24 +8843,32 @@ func (m *ShardReplicationPositionsRequest) MarshalToSizedBufferVT(dAtA []byte) ( i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + if m.AllowPrimary { i-- - dAtA[i] = 0x12 - } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + if m.AllowPrimary { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } i-- - dAtA[i] = 0xa + dAtA[i] = 0x10 + } + if len(m.TabletAliases) > 0 { + for iNdEx := len(m.TabletAliases) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.TabletAliases[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } } return len(dAtA) - i, nil } -func (m *ShardReplicationPositionsResponse) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteTabletsResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7990,12 +8881,12 @@ func (m *ShardReplicationPositionsResponse) MarshalVT() (dAtA []byte, err error) return dAtA[:n], nil } -func (m *ShardReplicationPositionsResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *DeleteTabletsResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ShardReplicationPositionsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteTabletsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8007,54 +8898,10 @@ func (m *ShardReplicationPositionsResponse) MarshalToSizedBufferVT(dAtA []byte) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.TabletMap) > 0 { - for k := range m.TabletMap { - v := m.TabletMap[k] - baseI := i - size, err := v.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if len(m.ReplicationStatuses) > 0 { - for k := range m.ReplicationStatuses { - v := m.ReplicationStatuses[k] - baseI := i - size, err := v.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } - } return len(dAtA) - i, nil } -func (m *ShardReplicationRemoveRequest) MarshalVT() (dAtA []byte, err error) { +func (m *EmergencyReparentShardRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8067,12 +8914,12 @@ func (m *ShardReplicationRemoveRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ShardReplicationRemoveRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *EmergencyReparentShardRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ShardReplicationRemoveRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *EmergencyReparentShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8084,8 +8931,50 @@ func (m *ShardReplicationRemoveRequest) MarshalToSizedBufferVT(dAtA []byte) (int i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if m.WaitForAllTablets { + i-- + if m.WaitForAllTablets { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if m.PreventCrossCellPromotion { + i-- + if m.PreventCrossCellPromotion { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.WaitReplicasTimeout != nil { + size, err := m.WaitReplicasTimeout.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if len(m.IgnoreReplicas) > 0 { + for iNdEx := len(m.IgnoreReplicas) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.IgnoreReplicas[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if m.NewPrimary != nil { + size, err := m.NewPrimary.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } @@ -8111,7 +9000,7 @@ func (m *ShardReplicationRemoveRequest) MarshalToSizedBufferVT(dAtA []byte) (int return len(dAtA) - i, nil } -func (m *ShardReplicationRemoveResponse) MarshalVT() (dAtA []byte, err error) { +func (m *EmergencyReparentShardResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8124,12 +9013,12 @@ func (m *ShardReplicationRemoveResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ShardReplicationRemoveResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *EmergencyReparentShardResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ShardReplicationRemoveResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *EmergencyReparentShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8141,10 +9030,46 @@ func (m *ShardReplicationRemoveResponse) MarshalToSizedBufferVT(dAtA []byte) (in i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Events[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if m.PromotedPrimary != nil { + size, err := m.PromotedPrimary.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *SleepTabletRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteFetchAsAppRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8157,12 +9082,12 @@ func (m *SleepTabletRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SleepTabletRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAppRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SleepTabletRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAppRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8174,13 +9099,25 @@ func (m *SleepTabletRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Duration != nil { - size, err := m.Duration.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if m.UsePool { + i-- + if m.UsePool { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x20 + } + if m.MaxRows != 0 { + i = encodeVarint(dAtA, i, uint64(m.MaxRows)) + i-- + dAtA[i] = 0x18 + } + if len(m.Query) > 0 { + i -= len(m.Query) + copy(dAtA[i:], m.Query) + i = encodeVarint(dAtA, i, uint64(len(m.Query))) i-- dAtA[i] = 0x12 } @@ -8197,7 +9134,7 @@ func (m *SleepTabletRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *SleepTabletResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteFetchAsAppResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8210,12 +9147,12 @@ func (m *SleepTabletResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SleepTabletResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAppResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SleepTabletResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAppResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8227,10 +9164,20 @@ func (m *SleepTabletResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.Result != nil { + size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *SourceShardAddRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteFetchAsDBARequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8243,12 +9190,12 @@ func (m *SourceShardAddRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SourceShardAddRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsDBARequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SourceShardAddRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsDBARequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8260,62 +9207,52 @@ func (m *SourceShardAddRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Tables) > 0 { - for iNdEx := len(m.Tables) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Tables[iNdEx]) - copy(dAtA[i:], m.Tables[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Tables[iNdEx]))) - i-- - dAtA[i] = 0x3a - } - } - if m.KeyRange != nil { - size, err := m.KeyRange.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if m.ReloadSchema { + i-- + if m.ReloadSchema { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x32 + dAtA[i] = 0x28 } - if len(m.SourceShard) > 0 { - i -= len(m.SourceShard) - copy(dAtA[i:], m.SourceShard) - i = encodeVarint(dAtA, i, uint64(len(m.SourceShard))) + if m.DisableBinlogs { i-- - dAtA[i] = 0x2a - } - if len(m.SourceKeyspace) > 0 { - i -= len(m.SourceKeyspace) - copy(dAtA[i:], m.SourceKeyspace) - i = encodeVarint(dAtA, i, uint64(len(m.SourceKeyspace))) + if m.DisableBinlogs { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } i-- - dAtA[i] = 0x22 + dAtA[i] = 0x20 } - if m.Uid != 0 { - i = encodeVarint(dAtA, i, uint64(m.Uid)) + if m.MaxRows != 0 { + i = encodeVarint(dAtA, i, uint64(m.MaxRows)) i-- dAtA[i] = 0x18 } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + if len(m.Query) > 0 { + i -= len(m.Query) + copy(dAtA[i:], m.Query) + i = encodeVarint(dAtA, i, uint64(len(m.Query))) i-- dAtA[i] = 0x12 } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *SourceShardAddResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteFetchAsDBAResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8328,12 +9265,12 @@ func (m *SourceShardAddResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SourceShardAddResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsDBAResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SourceShardAddResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsDBAResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8345,8 +9282,8 @@ func (m *SourceShardAddResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Shard != nil { - size, err := m.Shard.MarshalToSizedBufferVT(dAtA[:i]) + if m.Result != nil { + size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } @@ -8358,7 +9295,7 @@ func (m *SourceShardAddResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error return len(dAtA) - i, nil } -func (m *SourceShardDeleteRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteHookRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8371,12 +9308,12 @@ func (m *SourceShardDeleteRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SourceShardDeleteRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteHookRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SourceShardDeleteRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteHookRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8388,29 +9325,30 @@ func (m *SourceShardDeleteRequest) MarshalToSizedBufferVT(dAtA []byte) (int, err i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Uid != 0 { - i = encodeVarint(dAtA, i, uint64(m.Uid)) - i-- - dAtA[i] = 0x18 - } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + if m.TabletHookRequest != nil { + size, err := m.TabletHookRequest.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0x12 } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *SourceShardDeleteResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteHookResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8423,12 +9361,12 @@ func (m *SourceShardDeleteResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SourceShardDeleteResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteHookResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SourceShardDeleteResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteHookResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8440,8 +9378,8 @@ func (m *SourceShardDeleteResponse) MarshalToSizedBufferVT(dAtA []byte) (int, er i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Shard != nil { - size, err := m.Shard.MarshalToSizedBufferVT(dAtA[:i]) + if m.HookResult != nil { + size, err := m.HookResult.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } @@ -8453,7 +9391,7 @@ func (m *SourceShardDeleteResponse) MarshalToSizedBufferVT(dAtA []byte) (int, er return len(dAtA) - i, nil } -func (m *StartReplicationRequest) MarshalVT() (dAtA []byte, err error) { +func (m *FindAllShardsInKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8466,12 +9404,12 @@ func (m *StartReplicationRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *StartReplicationRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *FindAllShardsInKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *StartReplicationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *FindAllShardsInKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8483,20 +9421,17 @@ func (m *StartReplicationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, erro i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *StartReplicationResponse) MarshalVT() (dAtA []byte, err error) { +func (m *FindAllShardsInKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8509,12 +9444,12 @@ func (m *StartReplicationResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *StartReplicationResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *FindAllShardsInKeyspaceResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *StartReplicationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *FindAllShardsInKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8526,10 +9461,32 @@ func (m *StartReplicationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, err i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.Shards) > 0 { + for k := range m.Shards { + v := m.Shards[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } return len(dAtA) - i, nil } -func (m *StopReplicationRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetBackupsRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8542,12 +9499,12 @@ func (m *StopReplicationRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *StopReplicationRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetBackupsRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *StopReplicationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetBackupsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8559,20 +9516,44 @@ func (m *StopReplicationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if m.DetailedLimit != 0 { + i = encodeVarint(dAtA, i, uint64(m.DetailedLimit)) + i-- + dAtA[i] = 0x28 + } + if m.Detailed { + i-- + if m.Detailed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x20 + } + if m.Limit != 0 { + i = encodeVarint(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x18 + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *StopReplicationResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetBackupsResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8585,12 +9566,12 @@ func (m *StopReplicationResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *StopReplicationResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetBackupsResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *StopReplicationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetBackupsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8602,10 +9583,22 @@ func (m *StopReplicationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, erro i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.Backups) > 0 { + for iNdEx := len(m.Backups) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Backups[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } return len(dAtA) - i, nil } -func (m *TabletExternallyReparentedRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetCellInfoRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8618,12 +9611,12 @@ func (m *TabletExternallyReparentedRequest) MarshalVT() (dAtA []byte, err error) return dAtA[:n], nil } -func (m *TabletExternallyReparentedRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetCellInfoRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *TabletExternallyReparentedRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetCellInfoRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8635,20 +9628,17 @@ func (m *TabletExternallyReparentedRequest) MarshalToSizedBufferVT(dAtA []byte) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Tablet != nil { - size, err := m.Tablet.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + if len(m.Cell) > 0 { + i -= len(m.Cell) + copy(dAtA[i:], m.Cell) + i = encodeVarint(dAtA, i, uint64(len(m.Cell))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *TabletExternallyReparentedResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetCellInfoResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8661,12 +9651,12 @@ func (m *TabletExternallyReparentedResponse) MarshalVT() (dAtA []byte, err error return dAtA[:n], nil } -func (m *TabletExternallyReparentedResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetCellInfoResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *TabletExternallyReparentedResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetCellInfoResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8678,44 +9668,20 @@ func (m *TabletExternallyReparentedResponse) MarshalToSizedBufferVT(dAtA []byte) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.OldPrimary != nil { - size, err := m.OldPrimary.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x22 - } - if m.NewPrimary != nil { - size, err := m.NewPrimary.MarshalToSizedBufferVT(dAtA[:i]) + if m.CellInfo != nil { + size, err := m.CellInfo.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x1a - } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) - i-- - dAtA[i] = 0x12 - } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *UpdateCellInfoRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetCellInfoNamesRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8728,12 +9694,12 @@ func (m *UpdateCellInfoRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *UpdateCellInfoRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetCellInfoNamesRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *UpdateCellInfoRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetCellInfoNamesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8745,27 +9711,10 @@ func (m *UpdateCellInfoRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.CellInfo != nil { - size, err := m.CellInfo.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *UpdateCellInfoResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetCellInfoNamesResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8778,12 +9727,12 @@ func (m *UpdateCellInfoResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *UpdateCellInfoResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetCellInfoNamesResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *UpdateCellInfoResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetCellInfoNamesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8795,27 +9744,19 @@ func (m *UpdateCellInfoResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.CellInfo != nil { - size, err := m.CellInfo.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Names) > 0 { + for iNdEx := len(m.Names) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Names[iNdEx]) + copy(dAtA[i:], m.Names[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Names[iNdEx]))) + i-- + dAtA[i] = 0xa } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *UpdateCellsAliasRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetCellsAliasesRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8828,12 +9769,12 @@ func (m *UpdateCellsAliasRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *UpdateCellsAliasRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetCellsAliasesRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *UpdateCellsAliasRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetCellsAliasesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8845,29 +9786,12 @@ func (m *UpdateCellsAliasRequest) MarshalToSizedBufferVT(dAtA []byte) (int, erro i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.CellsAlias != nil { - size, err := m.CellsAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *UpdateCellsAliasResponse) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil + return len(dAtA) - i, nil +} + +func (m *GetCellsAliasesResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil } size := m.SizeVT() dAtA = make([]byte, size) @@ -8878,12 +9802,12 @@ func (m *UpdateCellsAliasResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *UpdateCellsAliasResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetCellsAliasesResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *UpdateCellsAliasResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetCellsAliasesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8895,27 +9819,32 @@ func (m *UpdateCellsAliasResponse) MarshalToSizedBufferVT(dAtA []byte) (int, err i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.CellsAlias != nil { - size, err := m.CellsAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Aliases) > 0 { + for k := range m.Aliases { + v := m.Aliases[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ValidateRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetFullStatusRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8928,12 +9857,12 @@ func (m *ValidateRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidateRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetFullStatusRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ValidateRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetFullStatusRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8945,20 +9874,20 @@ func (m *ValidateRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.PingTablets { - i-- - if m.PingTablets { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ValidateResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetFullStatusResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8971,12 +9900,12 @@ func (m *ValidateResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidateResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetFullStatusResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ValidateResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetFullStatusResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8988,41 +9917,20 @@ func (m *ValidateResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.ResultsByKeyspace) > 0 { - for k := range m.ResultsByKeyspace { - v := m.ResultsByKeyspace[k] - baseI := i - size, err := v.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Results) > 0 { - for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Results[iNdEx]) - copy(dAtA[i:], m.Results[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Results[iNdEx]))) - i-- - dAtA[i] = 0xa + if m.Status != nil { + size, err := m.Status.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ValidateKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetKeyspacesRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -9035,12 +9943,12 @@ func (m *ValidateKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidateKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetKeyspacesRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ValidateKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetKeyspacesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -9052,27 +9960,10 @@ func (m *ValidateKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, erro i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.PingTablets { - i-- - if m.PingTablets { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *ValidateKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetKeyspacesResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -9085,12 +9976,12 @@ func (m *ValidateKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidateKeyspaceResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetKeyspacesResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ValidateKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetKeyspacesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -9102,41 +9993,22 @@ func (m *ValidateKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, err i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.ResultsByShard) > 0 { - for k := range m.ResultsByShard { - v := m.ResultsByShard[k] - baseI := i - size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if len(m.Keyspaces) > 0 { + for iNdEx := len(m.Keyspaces) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Keyspaces[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Results) > 0 { - for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Results[iNdEx]) - copy(dAtA[i:], m.Results[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Results[iNdEx]))) - i-- dAtA[i] = 0xa } } return len(dAtA) - i, nil } -func (m *ValidateSchemaKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -9149,12 +10021,12 @@ func (m *ValidateSchemaKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidateSchemaKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ValidateSchemaKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -9166,45 +10038,6 @@ func (m *ValidateSchemaKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.IncludeVschema { - i-- - if m.IncludeVschema { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - if m.SkipNoPrimary { - i-- - if m.SkipNoPrimary { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if m.IncludeViews { - i-- - if m.IncludeViews { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if len(m.ExcludeTables) > 0 { - for iNdEx := len(m.ExcludeTables) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ExcludeTables[iNdEx]) - copy(dAtA[i:], m.ExcludeTables[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.ExcludeTables[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } if len(m.Keyspace) > 0 { i -= len(m.Keyspace) copy(dAtA[i:], m.Keyspace) @@ -9215,7 +10048,7 @@ func (m *ValidateSchemaKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int return len(dAtA) - i, nil } -func (m *ValidateSchemaKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -9228,12 +10061,12 @@ func (m *ValidateSchemaKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidateSchemaKeyspaceResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetKeyspaceResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ValidateSchemaKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -9245,41 +10078,20 @@ func (m *ValidateSchemaKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (in i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.ResultsByShard) > 0 { - for k := range m.ResultsByShard { - v := m.ResultsByShard[k] - baseI := i - size, err := v.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Results) > 0 { - for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Results[iNdEx]) - copy(dAtA[i:], m.Results[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Results[iNdEx]))) - i-- - dAtA[i] = 0xa + if m.Keyspace != nil { + size, err := m.Keyspace.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ValidateShardRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetPermissionsRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -9292,12 +10104,12 @@ func (m *ValidateShardRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidateShardRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetPermissionsRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ValidateShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetPermissionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -9309,34 +10121,20 @@ func (m *ValidateShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.PingTablets { - i-- - if m.PingTablets { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x18 - } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) - i-- - dAtA[i] = 0x12 - } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ValidateShardResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetPermissionsResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -9349,12 +10147,12 @@ func (m *ValidateShardResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidateShardResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetPermissionsResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ValidateShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetPermissionsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -9366,19 +10164,20 @@ func (m *ValidateShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Results) > 0 { - for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Results[iNdEx]) - copy(dAtA[i:], m.Results[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Results[iNdEx]))) - i-- - dAtA[i] = 0xa + if m.Permissions != nil { + size, err := m.Permissions.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ValidateVersionKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetRoutingRulesRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -9391,12 +10190,12 @@ func (m *ValidateVersionKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidateVersionKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetRoutingRulesRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ValidateVersionKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetRoutingRulesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -9408,17 +10207,10 @@ func (m *ValidateVersionKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (in i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *ValidateVersionKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetRoutingRulesResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -9431,12 +10223,12 @@ func (m *ValidateVersionKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidateVersionKeyspaceResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetRoutingRulesResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ValidateVersionKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetRoutingRulesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -9448,41 +10240,20 @@ func (m *ValidateVersionKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (i i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.ResultsByShard) > 0 { - for k := range m.ResultsByShard { - v := m.ResultsByShard[k] - baseI := i - size, err := v.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Results) > 0 { - for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Results[iNdEx]) - copy(dAtA[i:], m.Results[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Results[iNdEx]))) - i-- - dAtA[i] = 0xa + if m.RoutingRules != nil { + size, err := m.RoutingRules.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ValidateVersionShardRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetSchemaRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -9495,12 +10266,12 @@ func (m *ValidateVersionShardRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidateVersionShardRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ValidateVersionShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -9512,24 +10283,78 @@ func (m *ValidateVersionShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + if m.TableSchemaOnly { i-- - dAtA[i] = 0x12 + if m.TableSchemaOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + if m.TableSizesOnly { + i-- + if m.TableSizesOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.TableNamesOnly { + i-- + if m.TableNamesOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.IncludeViews { + i-- + if m.IncludeViews { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.ExcludeTables) > 0 { + for iNdEx := len(m.ExcludeTables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ExcludeTables[iNdEx]) + copy(dAtA[i:], m.ExcludeTables[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.ExcludeTables[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Tables) > 0 { + for iNdEx := len(m.Tables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Tables[iNdEx]) + copy(dAtA[i:], m.Tables[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Tables[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ValidateVersionShardResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetSchemaResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -9542,12 +10367,12 @@ func (m *ValidateVersionShardResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidateVersionShardResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ValidateVersionShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -9559,19 +10384,20 @@ func (m *ValidateVersionShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Results) > 0 { - for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Results[iNdEx]) - copy(dAtA[i:], m.Results[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Results[iNdEx]))) - i-- - dAtA[i] = 0xa + if m.Schema != nil { + size, err := m.Schema.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ValidateVSchemaRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetSchemaMigrationsRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -9584,12 +10410,12 @@ func (m *ValidateVSchemaRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidateVSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetSchemaMigrationsRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ValidateVSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetSchemaMigrationsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -9601,45 +10427,61 @@ func (m *ValidateVSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.IncludeViews { + if m.Skip != 0 { + i = encodeVarint(dAtA, i, uint64(m.Skip)) i-- - if m.IncludeViews { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } + dAtA[i] = 0x40 + } + if m.Limit != 0 { + i = encodeVarint(dAtA, i, uint64(m.Limit)) i-- - dAtA[i] = 0x20 + dAtA[i] = 0x38 } - if len(m.ExcludeTables) > 0 { - for iNdEx := len(m.ExcludeTables) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ExcludeTables[iNdEx]) - copy(dAtA[i:], m.ExcludeTables[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.ExcludeTables[iNdEx]))) - i-- - dAtA[i] = 0x1a - } + if m.Order != 0 { + i = encodeVarint(dAtA, i, uint64(m.Order)) + i-- + dAtA[i] = 0x30 } - if len(m.Shards) > 0 { - for iNdEx := len(m.Shards) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Shards[iNdEx]) - copy(dAtA[i:], m.Shards[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Shards[iNdEx]))) - i-- - dAtA[i] = 0x12 + if m.Recent != nil { + size, err := m.Recent.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + if m.Status != 0 { + i = encodeVarint(dAtA, i, uint64(m.Status)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x20 } - return len(dAtA) - i, nil -} + if len(m.MigrationContext) > 0 { + i -= len(m.MigrationContext) + copy(dAtA[i:], m.MigrationContext) + i = encodeVarint(dAtA, i, uint64(len(m.MigrationContext))) + i-- + dAtA[i] = 0x1a + } + if len(m.Uuid) > 0 { + i -= len(m.Uuid) + copy(dAtA[i:], m.Uuid) + i = encodeVarint(dAtA, i, uint64(len(m.Uuid))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} -func (m *ValidateVSchemaResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetSchemaMigrationsResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -9652,12 +10494,12 @@ func (m *ValidateVSchemaResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidateVSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetSchemaMigrationsResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ValidateVSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetSchemaMigrationsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -9669,41 +10511,22 @@ func (m *ValidateVSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, erro i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.ResultsByShard) > 0 { - for k := range m.ResultsByShard { - v := m.ResultsByShard[k] - baseI := i - size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if len(m.Migrations) > 0 { + for iNdEx := len(m.Migrations) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Migrations[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Results) > 0 { - for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Results[iNdEx]) - copy(dAtA[i:], m.Results[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Results[iNdEx]))) - i-- dAtA[i] = 0xa } } return len(dAtA) - i, nil } -func (m *WorkflowUpdateRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetShardRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -9716,12 +10539,12 @@ func (m *WorkflowUpdateRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *WorkflowUpdateRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetShardRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *WorkflowUpdateRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -9733,13 +10556,10 @@ func (m *WorkflowUpdateRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.TabletRequest != nil { - size, err := m.TabletRequest.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + if len(m.ShardName) > 0 { + i -= len(m.ShardName) + copy(dAtA[i:], m.ShardName) + i = encodeVarint(dAtA, i, uint64(len(m.ShardName))) i-- dAtA[i] = 0x12 } @@ -9753,7 +10573,7 @@ func (m *WorkflowUpdateRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) return len(dAtA) - i, nil } -func (m *WorkflowUpdateResponse_TabletInfo) MarshalVT() (dAtA []byte, err error) { +func (m *GetShardResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -9766,12 +10586,12 @@ func (m *WorkflowUpdateResponse_TabletInfo) MarshalVT() (dAtA []byte, err error) return dAtA[:n], nil } -func (m *WorkflowUpdateResponse_TabletInfo) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetShardResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *WorkflowUpdateResponse_TabletInfo) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -9783,27 +10603,20 @@ func (m *WorkflowUpdateResponse_TabletInfo) MarshalToSizedBufferVT(dAtA []byte) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Changed { - i-- - if m.Changed { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.Shard != nil { + size, err := m.Shard.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x10 - } - if len(m.Tablet) > 0 { - i -= len(m.Tablet) - copy(dAtA[i:], m.Tablet) - i = encodeVarint(dAtA, i, uint64(len(m.Tablet))) + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *WorkflowUpdateResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetShardRoutingRulesRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -9816,12 +10629,12 @@ func (m *WorkflowUpdateResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *WorkflowUpdateResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetShardRoutingRulesRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *WorkflowUpdateResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetShardRoutingRulesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -9833,3676 +10646,24077 @@ func (m *WorkflowUpdateResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Details) > 0 { - for iNdEx := len(m.Details) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Details[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Summary) > 0 { - i -= len(m.Summary) - copy(dAtA[i:], m.Summary) - i = encodeVarint(dAtA, i, uint64(len(m.Summary))) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func encodeVarint(dAtA []byte, offset int, v uint64) int { - offset -= sov(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *GetShardRoutingRulesResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil } - dAtA[offset] = uint8(v) - return base + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ExecuteVtctlCommandRequest) SizeVT() (n int) { + +func (m *GetShardRoutingRulesResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetShardRoutingRulesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if len(m.Args) > 0 { - for _, s := range m.Args { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.ActionTimeout != 0 { - n += 1 + sov(uint64(m.ActionTimeout)) + if m.ShardRoutingRules != nil { + size, err := m.ShardRoutingRules.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ExecuteVtctlCommandResponse) SizeVT() (n int) { +func (m *GetSrvKeyspaceNamesRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Event != nil { - l = m.Event.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *TableMaterializeSettings) SizeVT() (n int) { +func (m *GetSrvKeyspaceNamesRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetSrvKeyspaceNamesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.TargetTable) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.SourceExpression) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0xa + } } - l = len(m.CreateDdl) - if l > 0 { - n += 1 + l + sov(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *GetSrvKeyspaceNamesResponse_NameList) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil } - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *MaterializeSettings) SizeVT() (n int) { +func (m *GetSrvKeyspaceNamesResponse_NameList) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetSrvKeyspaceNamesResponse_NameList) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Workflow) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.SourceKeyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.TargetKeyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.StopAfterCopy { - n += 2 + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.TableSettings) > 0 { - for _, e := range m.TableSettings { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) + if len(m.Names) > 0 { + for iNdEx := len(m.Names) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Names[iNdEx]) + copy(dAtA[i:], m.Names[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Names[iNdEx]))) + i-- + dAtA[i] = 0xa } } - l = len(m.Cell) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.TabletTypes) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.ExternalCluster) - if l > 0 { - n += 1 + l + sov(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *GetSrvKeyspaceNamesResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil } - if m.MaterializationIntent != 0 { - n += 1 + sov(uint64(m.MaterializationIntent)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - l = len(m.SourceTimeZone) - if l > 0 { - n += 1 + l + sov(uint64(l)) + return dAtA[:n], nil +} + +func (m *GetSrvKeyspaceNamesResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetSrvKeyspaceNamesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil } - l = len(m.TargetTimeZone) - if l > 0 { - n += 1 + l + sov(uint64(l)) + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.SourceShards) > 0 { - for _, s := range m.SourceShards { - l = len(s) - n += 1 + l + sov(uint64(l)) + if len(m.Names) > 0 { + for k := range m.Names { + v := m.Names[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa } } - l = len(m.OnDdl) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.DeferSecondaryKeys { - n += 2 - } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *Keyspace) SizeVT() (n int) { +func (m *GetSrvKeyspacesRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) + return nil, nil } - if m.Keyspace != nil { - l = m.Keyspace.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *Shard) SizeVT() (n int) { +func (m *GetSrvKeyspacesRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetSrvKeyspacesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x12 + } } - if m.Shard != nil { - l = m.Shard.SizeVT() - n += 1 + l + sov(uint64(l)) + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *Workflow_ReplicationLocation) SizeVT() (n int) { +func (m *GetSrvKeyspacesResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + return nil, nil } - if len(m.Shards) > 0 { - for _, s := range m.Shards { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *Workflow_ShardStream) SizeVT() (n int) { +func (m *GetSrvKeyspacesResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetSrvKeyspacesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if len(m.Streams) > 0 { - for _, e := range m.Streams { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.TabletControls) > 0 { - for _, e := range m.TabletControls { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) + if len(m.SrvKeyspaces) > 0 { + for k := range m.SrvKeyspaces { + v := m.SrvKeyspaces[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa } } - if m.IsPrimaryServing { - n += 2 - } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *Workflow_Stream_CopyState) SizeVT() (n int) { +func (m *UpdateThrottlerConfigRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Table) - if l > 0 { - n += 1 + l + sov(uint64(l)) + return nil, nil } - l = len(m.LastPk) - if l > 0 { - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *Workflow_Stream_Log) SizeVT() (n int) { +func (m *UpdateThrottlerConfigRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *UpdateThrottlerConfigRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.Id != 0 { - n += 1 + sov(uint64(m.Id)) - } - if m.StreamId != 0 { - n += 1 + sov(uint64(m.StreamId)) - } - l = len(m.Type) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.State) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.CreatedAt != nil { - l = m.CreatedAt.SizeVT() - n += 1 + l + sov(uint64(l)) - } - if m.UpdatedAt != nil { - l = m.UpdatedAt.SizeVT() - n += 1 + l + sov(uint64(l)) - } - l = len(m.Message) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.Count != 0 { - n += 1 + sov(uint64(m.Count)) - } - n += len(m.unknownFields) - return n -} - -func (m *Workflow_Stream) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Id != 0 { - n += 1 + sov(uint64(m.Id)) - } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.Tablet != nil { - l = m.Tablet.SizeVT() - n += 1 + l + sov(uint64(l)) - } - if m.BinlogSource != nil { - l = m.BinlogSource.SizeVT() - n += 1 + l + sov(uint64(l)) - } - l = len(m.Position) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.StopPosition) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.ThrottledApp != nil { + size, err := m.ThrottledApp.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a } - l = len(m.State) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.CheckAsCheckShard { + i-- + if m.CheckAsCheckShard { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 } - l = len(m.DbName) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.CheckAsCheckSelf { + i-- + if m.CheckAsCheckSelf { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 } - if m.TransactionTimestamp != nil { - l = m.TransactionTimestamp.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.CustomQuerySet { + i-- + if m.CustomQuerySet { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 } - if m.TimeUpdated != nil { - l = m.TimeUpdated.SizeVT() - n += 1 + l + sov(uint64(l)) + if len(m.CustomQuery) > 0 { + i -= len(m.CustomQuery) + copy(dAtA[i:], m.CustomQuery) + i = encodeVarint(dAtA, i, uint64(len(m.CustomQuery))) + i-- + dAtA[i] = 0x2a } - l = len(m.Message) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.Threshold != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Threshold)))) + i-- + dAtA[i] = 0x21 } - if len(m.CopyStates) > 0 { - for _, e := range m.CopyStates { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.Disable { + i-- + if m.Disable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x18 } - if len(m.Logs) > 0 { - for _, e := range m.Logs { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.Enable { + i-- + if m.Enable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x10 } - l = len(m.LogFetchError) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if len(m.Tags) > 0 { - for _, s := range m.Tags { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *Workflow) SizeVT() (n int) { +func (m *UpdateThrottlerConfigResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.Source != nil { - l = m.Source.SizeVT() - n += 1 + l + sov(uint64(l)) - } - if m.Target != nil { - l = m.Target.SizeVT() - n += 1 + l + sov(uint64(l)) - } - if m.MaxVReplicationLag != 0 { - n += 1 + sov(uint64(m.MaxVReplicationLag)) - } - if len(m.ShardStreams) > 0 { - for k, v := range m.ShardStreams { - _ = k - _ = v - l = 0 - if v != nil { - l = v.SizeVT() - } - l += 1 + sov(uint64(l)) - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) - } - } - l = len(m.WorkflowType) - if l > 0 { - n += 1 + l + sov(uint64(l)) + return nil, nil } - l = len(m.WorkflowSubType) - if l > 0 { - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *AddCellInfoRequest) SizeVT() (n int) { +func (m *UpdateThrottlerConfigResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *UpdateThrottlerConfigResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.CellInfo != nil { - l = m.CellInfo.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *AddCellInfoResponse) SizeVT() (n int) { +func (m *GetSrvVSchemaRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *AddCellsAliasRequest) SizeVT() (n int) { +func (m *GetSrvVSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetSrvVSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.Cells) > 0 { - for _, s := range m.Cells { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + if len(m.Cell) > 0 { + i -= len(m.Cell) + copy(dAtA[i:], m.Cell) + i = encodeVarint(dAtA, i, uint64(len(m.Cell))) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *AddCellsAliasResponse) SizeVT() (n int) { +func (m *GetSrvVSchemaResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ApplyRoutingRulesRequest) SizeVT() (n int) { +func (m *GetSrvVSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetSrvVSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.RoutingRules != nil { - l = m.RoutingRules.SizeVT() - n += 1 + l + sov(uint64(l)) - } - if m.SkipRebuild { - n += 2 + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.RebuildCells) > 0 { - for _, s := range m.RebuildCells { - l = len(s) - n += 1 + l + sov(uint64(l)) + if m.SrvVSchema != nil { + size, err := m.SrvVSchema.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ApplyRoutingRulesResponse) SizeVT() (n int) { +func (m *GetSrvVSchemasRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ApplyShardRoutingRulesRequest) SizeVT() (n int) { +func (m *GetSrvVSchemasRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetSrvVSchemasRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.ShardRoutingRules != nil { - l = m.ShardRoutingRules.SizeVT() - n += 1 + l + sov(uint64(l)) - } - if m.SkipRebuild { - n += 2 + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.RebuildCells) > 0 { - for _, s := range m.RebuildCells { - l = len(s) - n += 1 + l + sov(uint64(l)) + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x12 } } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ApplyShardRoutingRulesResponse) SizeVT() (n int) { +func (m *GetSrvVSchemasResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ApplySchemaRequest) SizeVT() (n int) { +func (m *GetSrvVSchemasResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetSrvVSchemasResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.AllowLongUnavailability { - n += 2 - } - if len(m.Sql) > 0 { - for _, s := range m.Sql { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } - l = len(m.DdlStrategy) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.UuidList) > 0 { - for _, s := range m.UuidList { - l = len(s) - n += 1 + l + sov(uint64(l)) + if len(m.SrvVSchemas) > 0 { + for k := range m.SrvVSchemas { + v := m.SrvVSchemas[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa } } - l = len(m.MigrationContext) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.WaitReplicasTimeout != nil { - l = m.WaitReplicasTimeout.SizeVT() - n += 1 + l + sov(uint64(l)) - } - if m.SkipPreflight { - n += 2 + return len(dAtA) - i, nil +} + +func (m *GetTabletRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil } - if m.CallerId != nil { - l = m.CallerId.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ApplySchemaResponse) SizeVT() (n int) { +func (m *GetTabletRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetTabletRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if len(m.UuidList) > 0 { - for _, s := range m.UuidList { - l = len(s) - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ApplyVSchemaRequest) SizeVT() (n int) { +func (m *GetTabletResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - if m.SkipRebuild { - n += 2 + return dAtA[:n], nil +} + +func (m *GetTabletResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetTabletResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil } - if m.DryRun { - n += 2 + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.Cells) > 0 { - for _, s := range m.Cells { - l = len(s) - n += 1 + l + sov(uint64(l)) + if m.Tablet != nil { + size, err := m.Tablet.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - if m.VSchema != nil { - l = m.VSchema.SizeVT() - n += 1 + l + sov(uint64(l)) - } - l = len(m.Sql) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ApplyVSchemaResponse) SizeVT() (n int) { +func (m *GetTabletsRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.VSchema != nil { - l = m.VSchema.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *BackupRequest) SizeVT() (n int) { +func (m *GetTabletsRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetTabletsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.AllowPrimary { - n += 2 + if m.TabletType != 0 { + i = encodeVarint(dAtA, i, uint64(m.TabletType)) + i-- + dAtA[i] = 0x30 } - if m.Concurrency != 0 { - n += 1 + sov(uint64(m.Concurrency)) + if len(m.TabletAliases) > 0 { + for iNdEx := len(m.TabletAliases) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.TabletAliases[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } } - l = len(m.IncrementalFromPos) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.Strict { + i-- + if m.Strict { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 } - n += len(m.unknownFields) - return n -} - -func (m *BackupResponse) SizeVT() (n int) { - if m == nil { - return 0 + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x1a + } } - var l int - _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 } - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *GetTabletsResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil } - if m.Event != nil { - l = m.Event.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *BackupShardRequest) SizeVT() (n int) { +func (m *GetTabletsResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetTabletsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.Tablets) > 0 { + for iNdEx := len(m.Tablets) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Tablets[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } } - if m.AllowPrimary { - n += 2 + return len(dAtA) - i, nil +} + +func (m *GetTopologyPathRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil } - if m.Concurrency != 0 { - n += 1 + sov(uint64(m.Concurrency)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ChangeTabletTypeRequest) SizeVT() (n int) { +func (m *GetTopologyPathRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetTopologyPathRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) - } - if m.DbType != 0 { - n += 1 + sov(uint64(m.DbType)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.DryRun { - n += 2 + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ChangeTabletTypeResponse) SizeVT() (n int) { +func (m *GetTopologyPathResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - if m.BeforeTablet != nil { - l = m.BeforeTablet.SizeVT() - n += 1 + l + sov(uint64(l)) - } - if m.AfterTablet != nil { - l = m.AfterTablet.SizeVT() - n += 1 + l + sov(uint64(l)) + return nil, nil } - if m.WasDryRun { - n += 2 + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *CreateKeyspaceRequest) SizeVT() (n int) { +func (m *GetTopologyPathResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetTopologyPathResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.Force { - n += 2 - } - if m.AllowEmptyVSchema { - n += 2 + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.ServedFroms) > 0 { - for _, e := range m.ServedFroms { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.Cell != nil { + size, err := m.Cell.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - if m.Type != 0 { - n += 1 + sov(uint64(m.Type)) - } - l = len(m.BaseKeyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.SnapshotTime != nil { - l = m.SnapshotTime.SizeVT() - n += 1 + l + sov(uint64(l)) - } - l = len(m.DurabilityPolicy) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.SidecarDbName) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *CreateKeyspaceResponse) SizeVT() (n int) { +func (m *TopologyCell) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Keyspace != nil { - l = m.Keyspace.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *CreateShardRequest) SizeVT() (n int) { +func (m *TopologyCell) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *TopologyCell) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.ShardName) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.Children) > 0 { + for iNdEx := len(m.Children) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Children[iNdEx]) + copy(dAtA[i:], m.Children[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Children[iNdEx]))) + i-- + dAtA[i] = 0x22 + } } - if m.Force { - n += 2 + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarint(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x1a } - if m.IncludeParent { - n += 2 + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 } - n += len(m.unknownFields) - return n + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *CreateShardResponse) SizeVT() (n int) { +func (m *GetVSchemaRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - if m.Keyspace != nil { - l = m.Keyspace.SizeVT() - n += 1 + l + sov(uint64(l)) - } - if m.Shard != nil { - l = m.Shard.SizeVT() - n += 1 + l + sov(uint64(l)) + return nil, nil } - if m.ShardAlreadyExists { - n += 2 + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *DeleteCellInfoRequest) SizeVT() (n int) { +func (m *GetVSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetVSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.Force { - n += 2 + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *DeleteCellInfoResponse) SizeVT() (n int) { +func (m *GetVersionRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *DeleteCellsAliasRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n +func (m *GetVersionRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *DeleteCellsAliasResponse) SizeVT() (n int) { +func (m *GetVersionRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - n += len(m.unknownFields) - return n + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *DeleteKeyspaceRequest) SizeVT() (n int) { +func (m *GetVersionResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.Recursive { - n += 2 - } - if m.Force { - n += 2 + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *DeleteKeyspaceResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n +func (m *GetVersionResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *DeleteShardsRequest) SizeVT() (n int) { +func (m *GetVersionResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if len(m.Shards) > 0 { - for _, e := range m.Shards { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } - } - if m.Recursive { - n += 2 - } - if m.EvenIfServing { - n += 2 - } - if m.Force { - n += 2 + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n -} - -func (m *DeleteShardsResponse) SizeVT() (n int) { - if m == nil { - return 0 + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarint(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0xa } - var l int - _ = l - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *DeleteSrvVSchemaRequest) SizeVT() (n int) { +func (m *GetVSchemaResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - l = len(m.Cell) - if l > 0 { - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *DeleteSrvVSchemaResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n +func (m *GetVSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *DeleteTabletsRequest) SizeVT() (n int) { +func (m *GetVSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if len(m.TabletAliases) > 0 { - for _, e := range m.TabletAliases { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.AllowPrimary { - n += 2 + if m.VSchema != nil { + size, err := m.VSchema.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *DeleteTabletsResponse) SizeVT() (n int) { +func (m *GetWorkflowsRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *EmergencyReparentShardRequest) SizeVT() (n int) { +func (m *GetWorkflowsRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetWorkflowsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.IncludeLogs { + i-- + if m.IncludeLogs { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 } - if m.NewPrimary != nil { - l = m.NewPrimary.SizeVT() - n += 1 + l + sov(uint64(l)) + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0x22 } - if len(m.IgnoreReplicas) > 0 { - for _, e := range m.IgnoreReplicas { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.NameOnly { + i-- + if m.NameOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x18 } - if m.WaitReplicasTimeout != nil { - l = m.WaitReplicasTimeout.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.ActiveOnly { + i-- + if m.ActiveOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 } - if m.PreventCrossCellPromotion { - n += 2 + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *EmergencyReparentShardResponse) SizeVT() (n int) { +func (m *GetWorkflowsResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.PromotedPrimary != nil { - l = m.PromotedPrimary.SizeVT() - n += 1 + l + sov(uint64(l)) + return nil, nil } - if len(m.Events) > 0 { - for _, e := range m.Events { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ExecuteFetchAsAppRequest) SizeVT() (n int) { +func (m *GetWorkflowsResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetWorkflowsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) - } - l = len(m.Query) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.MaxRows != 0 { - n += 1 + sov(uint64(m.MaxRows)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.UsePool { - n += 2 + if len(m.Workflows) > 0 { + for iNdEx := len(m.Workflows) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Workflows[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ExecuteFetchAsAppResponse) SizeVT() (n int) { +func (m *InitShardPrimaryRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Result != nil { - l = m.Result.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ExecuteFetchAsDBARequest) SizeVT() (n int) { +func (m *InitShardPrimaryRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *InitShardPrimaryRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.Query) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.WaitReplicasTimeout != nil { + size, err := m.WaitReplicasTimeout.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a } - if m.MaxRows != 0 { - n += 1 + sov(uint64(m.MaxRows)) + if m.Force { + i-- + if m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 } - if m.DisableBinlogs { - n += 2 + if m.PrimaryElectTabletAlias != nil { + size, err := m.PrimaryElectTabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a } - if m.ReloadSchema { - n += 2 + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 } - n += len(m.unknownFields) - return n + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *ExecuteFetchAsDBAResponse) SizeVT() (n int) { +func (m *InitShardPrimaryResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Result != nil { - l = m.Result.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ExecuteHookRequest) SizeVT() (n int) { +func (m *InitShardPrimaryResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *InitShardPrimaryResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.TabletHookRequest != nil { - l = m.TabletHookRequest.SizeVT() - n += 1 + l + sov(uint64(l)) + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Events[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ExecuteHookResponse) SizeVT() (n int) { +func (m *LaunchSchemaMigrationRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.HookResult != nil { - l = m.HookResult.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *FindAllShardsInKeyspaceRequest) SizeVT() (n int) { +func (m *LaunchSchemaMigrationRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *LaunchSchemaMigrationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + if len(m.Uuid) > 0 { + i -= len(m.Uuid) + copy(dAtA[i:], m.Uuid) + i = encodeVarint(dAtA, i, uint64(len(m.Uuid))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *FindAllShardsInKeyspaceResponse) SizeVT() (n int) { +func (m *LaunchSchemaMigrationResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if len(m.Shards) > 0 { - for k, v := range m.Shards { - _ = k - _ = v - l = 0 - if v != nil { - l = v.SizeVT() - } - l += 1 + sov(uint64(l)) - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *GetBackupsRequest) SizeVT() (n int) { +func (m *LaunchSchemaMigrationResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *LaunchSchemaMigrationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.Limit != 0 { - n += 1 + sov(uint64(m.Limit)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.Detailed { - n += 2 - } - if m.DetailedLimit != 0 { - n += 1 + sov(uint64(m.DetailedLimit)) - } - n += len(m.unknownFields) - return n -} - -func (m *GetBackupsResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Backups) > 0 { - for _, e := range m.Backups { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) + if len(m.RowsAffectedByShard) > 0 { + for k := range m.RowsAffectedByShard { + v := m.RowsAffectedByShard[k] + baseI := i + i = encodeVarint(dAtA, i, uint64(v)) + i-- + dAtA[i] = 0x10 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa } } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *GetCellInfoRequest) SizeVT() (n int) { +func (m *LookupVindexCreateRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - l = len(m.Cell) - if l > 0 { - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *GetCellInfoResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.CellInfo != nil { - l = m.CellInfo.SizeVT() - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n +func (m *LookupVindexCreateRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetCellInfoNamesRequest) SizeVT() (n int) { +func (m *LookupVindexCreateRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - n += len(m.unknownFields) - return n -} - -func (m *GetCellInfoNamesResponse) SizeVT() (n int) { - if m == nil { - return 0 + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - var l int - _ = l - if len(m.Names) > 0 { - for _, s := range m.Names { - l = len(s) - n += 1 + l + sov(uint64(l)) + if m.TabletSelectionPreference != 0 { + i = encodeVarint(dAtA, i, uint64(m.TabletSelectionPreference)) + i-- + dAtA[i] = 0x38 + } + if len(m.TabletTypes) > 0 { + var pksize2 int + for _, num := range m.TabletTypes { + pksize2 += sov(uint64(num)) } + i -= pksize2 + j1 := i + for _, num1 := range m.TabletTypes { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = encodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x32 } - n += len(m.unknownFields) - return n -} - -func (m *GetCellsAliasesRequest) SizeVT() (n int) { - if m == nil { - return 0 + if m.ContinueAfterCopyWithOwner { + i-- + if m.ContinueAfterCopyWithOwner { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 } - var l int - _ = l - n += len(m.unknownFields) - return n -} - -func (m *GetCellsAliasesResponse) SizeVT() (n int) { - if m == nil { - return 0 + if m.Vindex != nil { + size, err := m.Vindex.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 } - var l int - _ = l - if len(m.Aliases) > 0 { - for k, v := range m.Aliases { - _ = k - _ = v - l = 0 - if v != nil { - l = v.SizeVT() - } - l += 1 + sov(uint64(l)) - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x1a } } - n += len(m.unknownFields) - return n -} - -func (m *GetFullStatusRequest) SizeVT() (n int) { - if m == nil { - return 0 + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0x12 } - var l int - _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *GetFullStatusResponse) SizeVT() (n int) { +func (m *LookupVindexCreateResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Status != nil { - l = m.Status.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *GetKeyspacesRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n +func (m *LookupVindexCreateResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetKeyspacesResponse) SizeVT() (n int) { +func (m *LookupVindexCreateResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if len(m.Keyspaces) > 0 { - for _, e := range m.Keyspaces { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *GetKeyspaceRequest) SizeVT() (n int) { +func (m *LookupVindexExternalizeRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *GetKeyspaceResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Keyspace != nil { - l = m.Keyspace.SizeVT() - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n +func (m *LookupVindexExternalizeRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetPermissionsRequest) SizeVT() (n int) { +func (m *LookupVindexExternalizeRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n -} - -func (m *GetPermissionsResponse) SizeVT() (n int) { - if m == nil { - return 0 + if len(m.TableKeyspace) > 0 { + i -= len(m.TableKeyspace) + copy(dAtA[i:], m.TableKeyspace) + i = encodeVarint(dAtA, i, uint64(len(m.TableKeyspace))) + i-- + dAtA[i] = 0x1a } - var l int - _ = l - if m.Permissions != nil { - l = m.Permissions.SizeVT() - n += 1 + l + sov(uint64(l)) + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 } - n += len(m.unknownFields) - return n -} - -func (m *GetRoutingRulesRequest) SizeVT() (n int) { - if m == nil { - return 0 + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa } - var l int - _ = l - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *GetRoutingRulesResponse) SizeVT() (n int) { +func (m *LookupVindexExternalizeResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.RoutingRules != nil { - l = m.RoutingRules.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *GetSchemaRequest) SizeVT() (n int) { +func (m *LookupVindexExternalizeResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *LookupVindexExternalizeResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) - } - if len(m.Tables) > 0 { - for _, s := range m.Tables { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.ExcludeTables) > 0 { - for _, s := range m.ExcludeTables { - l = len(s) - n += 1 + l + sov(uint64(l)) + if m.WorkflowDeleted { + i-- + if m.WorkflowDeleted { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x8 } - if m.IncludeViews { - n += 2 - } - if m.TableNamesOnly { - n += 2 - } - if m.TableSizesOnly { - n += 2 - } - if m.TableSchemaOnly { - n += 2 - } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *GetSchemaResponse) SizeVT() (n int) { +func (m *MaterializeCreateRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Schema != nil { - l = m.Schema.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *GetShardRequest) SizeVT() (n int) { +func (m *MaterializeCreateRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *MaterializeCreateRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.ShardName) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.Settings != nil { + size, err := m.Settings.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *GetShardResponse) SizeVT() (n int) { +func (m *MaterializeCreateResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Shard != nil { - l = m.Shard.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *GetShardRoutingRulesRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n +func (m *MaterializeCreateResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetShardRoutingRulesResponse) SizeVT() (n int) { +func (m *MaterializeCreateResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.ShardRoutingRules != nil { - l = m.ShardRoutingRules.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *GetSrvKeyspaceNamesRequest) SizeVT() (n int) { +func (m *MigrateCreateRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if len(m.Cells) > 0 { - for _, s := range m.Cells { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *GetSrvKeyspaceNamesResponse_NameList) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Names) > 0 { - for _, s := range m.Names { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } - n += len(m.unknownFields) - return n +func (m *MigrateCreateRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetSrvKeyspaceNamesResponse) SizeVT() (n int) { +func (m *MigrateCreateRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if len(m.Names) > 0 { - for k, v := range m.Names { - _ = k - _ = v - l = 0 - if v != nil { - l = v.SizeVT() - } - l += 1 + sov(uint64(l)) - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.NoRoutingRules { + i-- + if m.NoRoutingRules { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 } - n += len(m.unknownFields) - return n -} - -func (m *GetSrvKeyspacesRequest) SizeVT() (n int) { - if m == nil { - return 0 + if m.AutoStart { + i-- + if m.AutoStart { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 } - var l int - _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.DeferSecondaryKeys { + i-- + if m.DeferSecondaryKeys { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x78 } - if len(m.Cells) > 0 { - for _, s := range m.Cells { - l = len(s) - n += 1 + l + sov(uint64(l)) + if m.DropForeignKeys { + i-- + if m.DropForeignKeys { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x70 } - n += len(m.unknownFields) - return n -} - -func (m *GetSrvKeyspacesResponse) SizeVT() (n int) { - if m == nil { - return 0 + if m.StopAfterCopy { + i-- + if m.StopAfterCopy { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x68 } - var l int - _ = l - if len(m.SrvKeyspaces) > 0 { - for k, v := range m.SrvKeyspaces { - _ = k - _ = v - l = 0 - if v != nil { - l = v.SizeVT() - } - l += 1 + sov(uint64(l)) - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + if len(m.OnDdl) > 0 { + i -= len(m.OnDdl) + copy(dAtA[i:], m.OnDdl) + i = encodeVarint(dAtA, i, uint64(len(m.OnDdl))) + i-- + dAtA[i] = 0x62 + } + if len(m.SourceTimeZone) > 0 { + i -= len(m.SourceTimeZone) + copy(dAtA[i:], m.SourceTimeZone) + i = encodeVarint(dAtA, i, uint64(len(m.SourceTimeZone))) + i-- + dAtA[i] = 0x5a + } + if len(m.ExcludeTables) > 0 { + for iNdEx := len(m.ExcludeTables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ExcludeTables[iNdEx]) + copy(dAtA[i:], m.ExcludeTables[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.ExcludeTables[iNdEx]))) + i-- + dAtA[i] = 0x52 } } - n += len(m.unknownFields) - return n -} - -func (m *UpdateThrottlerConfigRequest) SizeVT() (n int) { - if m == nil { - return 0 + if len(m.IncludeTables) > 0 { + for iNdEx := len(m.IncludeTables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.IncludeTables[iNdEx]) + copy(dAtA[i:], m.IncludeTables[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.IncludeTables[iNdEx]))) + i-- + dAtA[i] = 0x4a + } } - var l int - _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.AllTables { + i-- + if m.AllTables { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 } - if m.Enable { - n += 2 + if m.TabletSelectionPreference != 0 { + i = encodeVarint(dAtA, i, uint64(m.TabletSelectionPreference)) + i-- + dAtA[i] = 0x38 } - if m.Disable { - n += 2 + if len(m.TabletTypes) > 0 { + var pksize2 int + for _, num := range m.TabletTypes { + pksize2 += sov(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num1 := range m.TabletTypes { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = encodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x32 } - if m.Threshold != 0 { - n += 9 + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x2a + } } - l = len(m.CustomQuery) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.MountName) > 0 { + i -= len(m.MountName) + copy(dAtA[i:], m.MountName) + i = encodeVarint(dAtA, i, uint64(len(m.MountName))) + i-- + dAtA[i] = 0x22 } - if m.CustomQuerySet { - n += 2 + if len(m.TargetKeyspace) > 0 { + i -= len(m.TargetKeyspace) + copy(dAtA[i:], m.TargetKeyspace) + i = encodeVarint(dAtA, i, uint64(len(m.TargetKeyspace))) + i-- + dAtA[i] = 0x1a } - if m.CheckAsCheckSelf { - n += 2 + if len(m.SourceKeyspace) > 0 { + i -= len(m.SourceKeyspace) + copy(dAtA[i:], m.SourceKeyspace) + i = encodeVarint(dAtA, i, uint64(len(m.SourceKeyspace))) + i-- + dAtA[i] = 0x12 } - if m.CheckAsCheckShard { - n += 2 + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *UpdateThrottlerConfigResponse) SizeVT() (n int) { +func (m *MigrateCompleteRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *GetSrvVSchemaRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Cell) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n +func (m *MigrateCompleteRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetSrvVSchemaResponse) SizeVT() (n int) { +func (m *MigrateCompleteRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.SrvVSchema != nil { - l = m.SrvVSchema.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n -} - -func (m *GetSrvVSchemasRequest) SizeVT() (n int) { - if m == nil { - return 0 + if m.DryRun { + i-- + if m.DryRun { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 } - var l int - _ = l - if len(m.Cells) > 0 { - for _, s := range m.Cells { - l = len(s) - n += 1 + l + sov(uint64(l)) + if m.RenameTables { + i-- + if m.RenameTables { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x30 } - n += len(m.unknownFields) - return n + if m.KeepRoutingRules { + i-- + if m.KeepRoutingRules { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.KeepData { + i-- + if m.KeepData { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.TargetKeyspace) > 0 { + i -= len(m.TargetKeyspace) + copy(dAtA[i:], m.TargetKeyspace) + i = encodeVarint(dAtA, i, uint64(len(m.TargetKeyspace))) + i-- + dAtA[i] = 0x1a + } + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *GetSrvVSchemasResponse) SizeVT() (n int) { +func (m *MigrateCompleteResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if len(m.SrvVSchemas) > 0 { - for k, v := range m.SrvVSchemas { - _ = k - _ = v - l = 0 - if v != nil { - l = v.SizeVT() - } - l += 1 + sov(uint64(l)) - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *GetTabletRequest) SizeVT() (n int) { +func (m *MigrateCompleteResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *MigrateCompleteResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + if len(m.DryRunResults) > 0 { + for iNdEx := len(m.DryRunResults) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DryRunResults[iNdEx]) + copy(dAtA[i:], m.DryRunResults[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.DryRunResults[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Summary) > 0 { + i -= len(m.Summary) + copy(dAtA[i:], m.Summary) + i = encodeVarint(dAtA, i, uint64(len(m.Summary))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *GetTabletResponse) SizeVT() (n int) { +func (m *MountRegisterRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Tablet != nil { - l = m.Tablet.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *GetTabletsRequest) SizeVT() (n int) { +func (m *MountRegisterRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *MountRegisterRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.Cells) > 0 { - for _, s := range m.Cells { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 } - if m.Strict { - n += 2 + if len(m.TopoRoot) > 0 { + i -= len(m.TopoRoot) + copy(dAtA[i:], m.TopoRoot) + i = encodeVarint(dAtA, i, uint64(len(m.TopoRoot))) + i-- + dAtA[i] = 0x1a } - if len(m.TabletAliases) > 0 { - for _, e := range m.TabletAliases { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } + if len(m.TopoServer) > 0 { + i -= len(m.TopoServer) + copy(dAtA[i:], m.TopoServer) + i = encodeVarint(dAtA, i, uint64(len(m.TopoServer))) + i-- + dAtA[i] = 0x12 } - if m.TabletType != 0 { - n += 1 + sov(uint64(m.TabletType)) + if len(m.TopoType) > 0 { + i -= len(m.TopoType) + copy(dAtA[i:], m.TopoType) + i = encodeVarint(dAtA, i, uint64(len(m.TopoType))) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *GetTabletsResponse) SizeVT() (n int) { +func (m *MountRegisterResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if len(m.Tablets) > 0 { - for _, e := range m.Tablets { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *GetTopologyPathRequest) SizeVT() (n int) { +func (m *MountRegisterResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *MountRegisterResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Path) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *GetTopologyPathResponse) SizeVT() (n int) { +func (m *MountUnregisterRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Cell != nil { - l = m.Cell.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *TopologyCell) SizeVT() (n int) { +func (m *MountUnregisterRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *MountUnregisterRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Path) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Data) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.Children) > 0 { - for _, s := range m.Children { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *GetVSchemaRequest) SizeVT() (n int) { +func (m *MountUnregisterResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *GetVersionRequest) SizeVT() (n int) { +func (m *MountUnregisterResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *MountUnregisterResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *GetVersionResponse) SizeVT() (n int) { +func (m *MountShowRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - l = len(m.Version) - if l > 0 { - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *GetVSchemaResponse) SizeVT() (n int) { +func (m *MountShowRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *MountShowRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.VSchema != nil { - l = m.VSchema.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil } -func (m *GetWorkflowsRequest) SizeVT() (n int) { +func (m *MountShowResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.ActiveOnly { - n += 2 + return nil, nil } - if m.NameOnly { - n += 2 + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *GetWorkflowsResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Workflows) > 0 { - for _, e := range m.Workflows { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } - } - n += len(m.unknownFields) - return n +func (m *MountShowResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *InitShardPrimaryRequest) SizeVT() (n int) { +func (m *MountShowResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 } - if m.PrimaryElectTabletAlias != nil { - l = m.PrimaryElectTabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + if len(m.TopoRoot) > 0 { + i -= len(m.TopoRoot) + copy(dAtA[i:], m.TopoRoot) + i = encodeVarint(dAtA, i, uint64(len(m.TopoRoot))) + i-- + dAtA[i] = 0x1a } - if m.Force { - n += 2 + if len(m.TopoServer) > 0 { + i -= len(m.TopoServer) + copy(dAtA[i:], m.TopoServer) + i = encodeVarint(dAtA, i, uint64(len(m.TopoServer))) + i-- + dAtA[i] = 0x12 } - if m.WaitReplicasTimeout != nil { - l = m.WaitReplicasTimeout.SizeVT() - n += 1 + l + sov(uint64(l)) + if len(m.TopoType) > 0 { + i -= len(m.TopoType) + copy(dAtA[i:], m.TopoType) + i = encodeVarint(dAtA, i, uint64(len(m.TopoType))) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *InitShardPrimaryResponse) SizeVT() (n int) { +func (m *MountListRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if len(m.Events) > 0 { - for _, e := range m.Events { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *PingTabletRequest) SizeVT() (n int) { +func (m *MountListRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *MountListRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *PingTabletResponse) SizeVT() (n int) { +func (m *MountListResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PlannedReparentShardRequest) SizeVT() (n int) { +func (m *MountListResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *MountListResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.NewPrimary != nil { - l = m.NewPrimary.SizeVT() - n += 1 + l + sov(uint64(l)) + if len(m.Names) > 0 { + for iNdEx := len(m.Names) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Names[iNdEx]) + copy(dAtA[i:], m.Names[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Names[iNdEx]))) + i-- + dAtA[i] = 0xa + } } - if m.AvoidPrimary != nil { - l = m.AvoidPrimary.SizeVT() - n += 1 + l + sov(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *MoveTablesCreateRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil } - if m.WaitReplicasTimeout != nil { - l = m.WaitReplicasTimeout.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *PlannedReparentShardResponse) SizeVT() (n int) { +func (m *MoveTablesCreateRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *MoveTablesCreateRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.AtomicCopy { + i-- + if m.AtomicCopy { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x98 } - if m.PromotedPrimary != nil { - l = m.PromotedPrimary.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.NoRoutingRules { + i-- + if m.NoRoutingRules { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x90 } - if len(m.Events) > 0 { - for _, e := range m.Events { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.AutoStart { + i-- + if m.AutoStart { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 } - n += len(m.unknownFields) - return n -} - -func (m *RebuildKeyspaceGraphRequest) SizeVT() (n int) { - if m == nil { - return 0 + if m.DeferSecondaryKeys { + i-- + if m.DeferSecondaryKeys { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 } - var l int - _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.DropForeignKeys { + i-- + if m.DropForeignKeys { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x78 + } + if m.StopAfterCopy { + i-- + if m.StopAfterCopy { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x70 + } + if len(m.OnDdl) > 0 { + i -= len(m.OnDdl) + copy(dAtA[i:], m.OnDdl) + i = encodeVarint(dAtA, i, uint64(len(m.OnDdl))) + i-- + dAtA[i] = 0x6a + } + if len(m.SourceTimeZone) > 0 { + i -= len(m.SourceTimeZone) + copy(dAtA[i:], m.SourceTimeZone) + i = encodeVarint(dAtA, i, uint64(len(m.SourceTimeZone))) + i-- + dAtA[i] = 0x62 + } + if len(m.ExternalClusterName) > 0 { + i -= len(m.ExternalClusterName) + copy(dAtA[i:], m.ExternalClusterName) + i = encodeVarint(dAtA, i, uint64(len(m.ExternalClusterName))) + i-- + dAtA[i] = 0x5a + } + if len(m.ExcludeTables) > 0 { + for iNdEx := len(m.ExcludeTables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ExcludeTables[iNdEx]) + copy(dAtA[i:], m.ExcludeTables[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.ExcludeTables[iNdEx]))) + i-- + dAtA[i] = 0x52 + } + } + if len(m.IncludeTables) > 0 { + for iNdEx := len(m.IncludeTables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.IncludeTables[iNdEx]) + copy(dAtA[i:], m.IncludeTables[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.IncludeTables[iNdEx]))) + i-- + dAtA[i] = 0x4a + } + } + if m.AllTables { + i-- + if m.AllTables { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if len(m.SourceShards) > 0 { + for iNdEx := len(m.SourceShards) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SourceShards[iNdEx]) + copy(dAtA[i:], m.SourceShards[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.SourceShards[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if m.TabletSelectionPreference != 0 { + i = encodeVarint(dAtA, i, uint64(m.TabletSelectionPreference)) + i-- + dAtA[i] = 0x30 + } + if len(m.TabletTypes) > 0 { + var pksize2 int + for _, num := range m.TabletTypes { + pksize2 += sov(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num1 := range m.TabletTypes { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = encodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x2a } if len(m.Cells) > 0 { - for _, s := range m.Cells { - l = len(s) - n += 1 + l + sov(uint64(l)) + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x22 } } - if m.AllowPartial { - n += 2 + if len(m.TargetKeyspace) > 0 { + i -= len(m.TargetKeyspace) + copy(dAtA[i:], m.TargetKeyspace) + i = encodeVarint(dAtA, i, uint64(len(m.TargetKeyspace))) + i-- + dAtA[i] = 0x1a } - n += len(m.unknownFields) - return n + if len(m.SourceKeyspace) > 0 { + i -= len(m.SourceKeyspace) + copy(dAtA[i:], m.SourceKeyspace) + i = encodeVarint(dAtA, i, uint64(len(m.SourceKeyspace))) + i-- + dAtA[i] = 0x12 + } + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *RebuildKeyspaceGraphResponse) SizeVT() (n int) { +func (m *MoveTablesCreateResponse_TabletInfo) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *RebuildVSchemaGraphRequest) SizeVT() (n int) { +func (m *MoveTablesCreateResponse_TabletInfo) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *MoveTablesCreateResponse_TabletInfo) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if len(m.Cells) > 0 { - for _, s := range m.Cells { - l = len(s) - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Created { + i-- + if m.Created { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x10 } - n += len(m.unknownFields) - return n -} - -func (m *RebuildVSchemaGraphResponse) SizeVT() (n int) { - if m == nil { - return 0 + if m.Tablet != nil { + size, err := m.Tablet.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - var l int - _ = l - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *RefreshStateRequest) SizeVT() (n int) { +func (m *MoveTablesCreateResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *RefreshStateResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n +func (m *MoveTablesCreateResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RefreshStateByShardRequest) SizeVT() (n int) { +func (m *MoveTablesCreateResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.Cells) > 0 { - for _, s := range m.Cells { - l = len(s) - n += 1 + l + sov(uint64(l)) + if len(m.Details) > 0 { + for iNdEx := len(m.Details) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Details[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 } } - n += len(m.unknownFields) - return n + if len(m.Summary) > 0 { + i -= len(m.Summary) + copy(dAtA[i:], m.Summary) + i = encodeVarint(dAtA, i, uint64(len(m.Summary))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *RefreshStateByShardResponse) SizeVT() (n int) { +func (m *MoveTablesCompleteRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - if m.IsPartialRefresh { - n += 2 + return nil, nil } - l = len(m.PartialRefreshDetails) - if l > 0 { - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ReloadSchemaRequest) SizeVT() (n int) { +func (m *MoveTablesCompleteRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *MoveTablesCompleteRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *ReloadSchemaResponse) SizeVT() (n int) { - if m == nil { - return 0 + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - var l int - _ = l - n += len(m.unknownFields) - return n -} - -func (m *ReloadSchemaKeyspaceRequest) SizeVT() (n int) { - if m == nil { - return 0 + if m.DryRun { + i-- + if m.DryRun { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 } - var l int - _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.RenameTables { + i-- + if m.RenameTables { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 } - l = len(m.WaitPosition) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.KeepRoutingRules { + i-- + if m.KeepRoutingRules { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 } - if m.IncludePrimary { - n += 2 + if m.KeepData { + i-- + if m.KeepData { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 } - if m.Concurrency != 0 { - n += 1 + sov(uint64(m.Concurrency)) + if len(m.TargetKeyspace) > 0 { + i -= len(m.TargetKeyspace) + copy(dAtA[i:], m.TargetKeyspace) + i = encodeVarint(dAtA, i, uint64(len(m.TargetKeyspace))) + i-- + dAtA[i] = 0x1a } - n += len(m.unknownFields) - return n + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *ReloadSchemaKeyspaceResponse) SizeVT() (n int) { +func (m *MoveTablesCompleteResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if len(m.Events) > 0 { - for _, e := range m.Events { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ReloadSchemaShardRequest) SizeVT() (n int) { +func (m *MoveTablesCompleteResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *MoveTablesCompleteResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.WaitPosition) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.IncludePrimary { - n += 2 + if len(m.DryRunResults) > 0 { + for iNdEx := len(m.DryRunResults) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DryRunResults[iNdEx]) + copy(dAtA[i:], m.DryRunResults[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.DryRunResults[iNdEx]))) + i-- + dAtA[i] = 0x12 + } } - if m.Concurrency != 0 { - n += 1 + sov(uint64(m.Concurrency)) + if len(m.Summary) > 0 { + i -= len(m.Summary) + copy(dAtA[i:], m.Summary) + i = encodeVarint(dAtA, i, uint64(len(m.Summary))) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ReloadSchemaShardResponse) SizeVT() (n int) { +func (m *PingTabletRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if len(m.Events) > 0 { - for _, e := range m.Events { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *RemoveBackupRequest) SizeVT() (n int) { +func (m *PingTabletRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *PingTabletRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *RemoveBackupResponse) SizeVT() (n int) { +func (m *PingTabletResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *RemoveKeyspaceCellRequest) SizeVT() (n int) { +func (m *PingTabletResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *PingTabletResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Cell) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.Force { - n += 2 - } - if m.Recursive { - n += 2 + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *RemoveKeyspaceCellResponse) SizeVT() (n int) { +func (m *PlannedReparentShardRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *RemoveShardCellRequest) SizeVT() (n int) { +func (m *PlannedReparentShardRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *PlannedReparentShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.ShardName) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.WaitReplicasTimeout != nil { + size, err := m.WaitReplicasTimeout.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a } - l = len(m.Cell) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.AvoidPrimary != nil { + size, err := m.AvoidPrimary.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 } - if m.Force { - n += 2 + if m.NewPrimary != nil { + size, err := m.NewPrimary.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a } - if m.Recursive { - n += 2 + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 } - n += len(m.unknownFields) - return n -} - -func (m *RemoveShardCellResponse) SizeVT() (n int) { - if m == nil { - return 0 + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa } - var l int - _ = l - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ReparentTabletRequest) SizeVT() (n int) { +func (m *PlannedReparentShardResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Tablet != nil { - l = m.Tablet.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ReparentTabletResponse) SizeVT() (n int) { +func (m *PlannedReparentShardResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *PlannedReparentShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Events[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } } - if m.Primary != nil { - l = m.Primary.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.PromotedPrimary != nil { + size, err := m.PromotedPrimary.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a } - n += len(m.unknownFields) - return n + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *RestoreFromBackupRequest) SizeVT() (n int) { +func (m *RebuildKeyspaceGraphRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) - } - if m.BackupTime != nil { - l = m.BackupTime.SizeVT() - n += 1 + l + sov(uint64(l)) - } - l = len(m.RestoreToPos) - if l > 0 { - n += 1 + l + sov(uint64(l)) + return nil, nil } - if m.DryRun { - n += 2 + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *RestoreFromBackupResponse) SizeVT() (n int) { +func (m *RebuildKeyspaceGraphRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RebuildKeyspaceGraphRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.AllowPartial { + i-- + if m.AllowPartial { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x12 + } } - if m.Event != nil { - l = m.Event.SizeVT() - n += 1 + l + sov(uint64(l)) + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *RunHealthCheckRequest) SizeVT() (n int) { +func (m *RebuildKeyspaceGraphResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *RunHealthCheckResponse) SizeVT() (n int) { +func (m *RebuildKeyspaceGraphResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RebuildKeyspaceGraphResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - n += len(m.unknownFields) - return n + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil } -func (m *SetKeyspaceDurabilityPolicyRequest) SizeVT() (n int) { +func (m *RebuildVSchemaGraphRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + return nil, nil } - l = len(m.DurabilityPolicy) - if l > 0 { - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *SetKeyspaceDurabilityPolicyResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Keyspace != nil { - l = m.Keyspace.SizeVT() - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n +func (m *RebuildVSchemaGraphRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SetKeyspaceServedFromRequest) SizeVT() (n int) { +func (m *RebuildVSchemaGraphRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.TabletType != 0 { - n += 1 + sov(uint64(m.TabletType)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } if len(m.Cells) > 0 { - for _, s := range m.Cells { - l = len(s) - n += 1 + l + sov(uint64(l)) + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0xa } } - if m.Remove { - n += 2 - } - l = len(m.SourceKeyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *SetKeyspaceServedFromResponse) SizeVT() (n int) { +func (m *RebuildVSchemaGraphResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Keyspace != nil { - l = m.Keyspace.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *SetKeyspaceShardingInfoRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.Force { - n += 2 - } - n += len(m.unknownFields) - return n +func (m *RebuildVSchemaGraphResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SetKeyspaceShardingInfoResponse) SizeVT() (n int) { +func (m *RebuildVSchemaGraphResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.Keyspace != nil { - l = m.Keyspace.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *SetShardIsPrimaryServingRequest) SizeVT() (n int) { +func (m *RefreshStateRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) + return nil, nil } - if m.IsServing { - n += 2 + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *SetShardIsPrimaryServingResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Shard != nil { - l = m.Shard.SizeVT() - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n +func (m *RefreshStateRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SetShardTabletControlRequest) SizeVT() (n int) { +func (m *RefreshStateRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.TabletType != 0 { - n += 1 + sov(uint64(m.TabletType)) - } - if len(m.Cells) > 0 { - for _, s := range m.Cells { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.DeniedTables) > 0 { - for _, s := range m.DeniedTables { - l = len(s) - n += 1 + l + sov(uint64(l)) + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - if m.DisableQueryService { - n += 2 - } - if m.Remove { - n += 2 - } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *SetShardTabletControlResponse) SizeVT() (n int) { +func (m *RefreshStateResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Shard != nil { - l = m.Shard.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *SetWritableRequest) SizeVT() (n int) { +func (m *RefreshStateResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RefreshStateResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) - } - if m.Writable { - n += 2 + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *SetWritableResponse) SizeVT() (n int) { +func (m *RefreshStateByShardRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ShardReplicationAddRequest) SizeVT() (n int) { +func (m *RefreshStateByShardRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RefreshStateByShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x1a + } } - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 } - n += len(m.unknownFields) - return n + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *ShardReplicationAddResponse) SizeVT() (n int) { +func (m *RefreshStateByShardResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ShardReplicationFixRequest) SizeVT() (n int) { +func (m *RefreshStateByShardResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RefreshStateByShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.PartialRefreshDetails) > 0 { + i -= len(m.PartialRefreshDetails) + copy(dAtA[i:], m.PartialRefreshDetails) + i = encodeVarint(dAtA, i, uint64(len(m.PartialRefreshDetails))) + i-- + dAtA[i] = 0x12 } - l = len(m.Cell) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.IsPartialRefresh { + i-- + if m.IsPartialRefresh { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ShardReplicationFixResponse) SizeVT() (n int) { +func (m *ReloadSchemaRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Error != nil { - l = m.Error.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ShardReplicationPositionsRequest) SizeVT() (n int) { +func (m *ReloadSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReloadSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ShardReplicationPositionsResponse) SizeVT() (n int) { +func (m *ReloadSchemaResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - if len(m.ReplicationStatuses) > 0 { - for k, v := range m.ReplicationStatuses { - _ = k - _ = v - l = 0 - if v != nil { - l = v.SizeVT() - } - l += 1 + sov(uint64(l)) - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) - } + return nil, nil } - if len(m.TabletMap) > 0 { - for k, v := range m.TabletMap { - _ = k - _ = v - l = 0 - if v != nil { - l = v.SizeVT() - } - l += 1 + sov(uint64(l)) - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ShardReplicationRemoveRequest) SizeVT() (n int) { +func (m *ReloadSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReloadSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ShardReplicationRemoveResponse) SizeVT() (n int) { +func (m *ReloadSchemaKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *SleepTabletRequest) SizeVT() (n int) { +func (m *ReloadSchemaKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReloadSchemaKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.Duration != nil { - l = m.Duration.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.Concurrency != 0 { + i = encodeVarint(dAtA, i, uint64(m.Concurrency)) + i-- + dAtA[i] = 0x20 } - n += len(m.unknownFields) - return n + if m.IncludePrimary { + i-- + if m.IncludePrimary { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.WaitPosition) > 0 { + i -= len(m.WaitPosition) + copy(dAtA[i:], m.WaitPosition) + i = encodeVarint(dAtA, i, uint64(len(m.WaitPosition))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *SleepTabletResponse) SizeVT() (n int) { +func (m *ReloadSchemaKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *SourceShardAddRequest) SizeVT() (n int) { +func (m *ReloadSchemaKeyspaceResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReloadSchemaKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.Uid != 0 { - n += 1 + sov(uint64(m.Uid)) - } - l = len(m.SourceKeyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.SourceShard) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.KeyRange != nil { - l = m.KeyRange.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.Tables) > 0 { - for _, s := range m.Tables { - l = len(s) - n += 1 + l + sov(uint64(l)) + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Events[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *SourceShardAddResponse) SizeVT() (n int) { +func (m *ReloadSchemaShardRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Shard != nil { - l = m.Shard.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *SourceShardDeleteRequest) SizeVT() (n int) { +func (m *ReloadSchemaShardRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReloadSchemaShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.Concurrency != 0 { + i = encodeVarint(dAtA, i, uint64(m.Concurrency)) + i-- + dAtA[i] = 0x28 } - if m.Uid != 0 { - n += 1 + sov(uint64(m.Uid)) + if m.IncludePrimary { + i-- + if m.IncludePrimary { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 } - n += len(m.unknownFields) - return n -} - -func (m *SourceShardDeleteResponse) SizeVT() (n int) { - if m == nil { - return 0 + if len(m.WaitPosition) > 0 { + i -= len(m.WaitPosition) + copy(dAtA[i:], m.WaitPosition) + i = encodeVarint(dAtA, i, uint64(len(m.WaitPosition))) + i-- + dAtA[i] = 0x1a } - var l int - _ = l - if m.Shard != nil { - l = m.Shard.SizeVT() - n += 1 + l + sov(uint64(l)) + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 } - n += len(m.unknownFields) - return n + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *StartReplicationRequest) SizeVT() (n int) { +func (m *ReloadSchemaShardResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *StartReplicationResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n +func (m *ReloadSchemaShardResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *StopReplicationRequest) SizeVT() (n int) { +func (m *ReloadSchemaShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n -} - -func (m *StopReplicationResponse) SizeVT() (n int) { - if m == nil { - return 0 + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Events[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } } - var l int - _ = l - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *TabletExternallyReparentedRequest) SizeVT() (n int) { +func (m *RemoveBackupRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Tablet != nil { - l = m.Tablet.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *TabletExternallyReparentedResponse) SizeVT() (n int) { +func (m *RemoveBackupRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RemoveBackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a } - if m.NewPrimary != nil { - l = m.NewPrimary.SizeVT() - n += 1 + l + sov(uint64(l)) + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 } - if m.OldPrimary != nil { - l = m.OldPrimary.SizeVT() - n += 1 + l + sov(uint64(l)) + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *UpdateCellInfoRequest) SizeVT() (n int) { +func (m *RemoveBackupResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) + return nil, nil } - if m.CellInfo != nil { - l = m.CellInfo.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *UpdateCellInfoResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.CellInfo != nil { - l = m.CellInfo.SizeVT() - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n +func (m *RemoveBackupResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *UpdateCellsAliasRequest) SizeVT() (n int) { +func (m *RemoveBackupResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.CellsAlias != nil { - l = m.CellsAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *UpdateCellsAliasResponse) SizeVT() (n int) { +func (m *RemoveKeyspaceCellRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) + return nil, nil } - if m.CellsAlias != nil { - l = m.CellsAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ValidateRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.PingTablets { - n += 2 - } - n += len(m.unknownFields) - return n +func (m *RemoveKeyspaceCellRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ValidateResponse) SizeVT() (n int) { +func (m *RemoveKeyspaceCellRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if len(m.Results) > 0 { - for _, s := range m.Results { - l = len(s) - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Recursive { + i-- + if m.Recursive { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x20 } - if len(m.ResultsByKeyspace) > 0 { - for k, v := range m.ResultsByKeyspace { - _ = k - _ = v - l = 0 - if v != nil { - l = v.SizeVT() - } - l += 1 + sov(uint64(l)) - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + if m.Force { + i-- + if m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x18 } - n += len(m.unknownFields) - return n + if len(m.Cell) > 0 { + i -= len(m.Cell) + copy(dAtA[i:], m.Cell) + i = encodeVarint(dAtA, i, uint64(len(m.Cell))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *ValidateKeyspaceRequest) SizeVT() (n int) { +func (m *RemoveKeyspaceCellResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + return nil, nil } - if m.PingTablets { - n += 2 + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ValidateKeyspaceResponse) SizeVT() (n int) { +func (m *RemoveKeyspaceCellResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RemoveKeyspaceCellResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if len(m.Results) > 0 { - for _, s := range m.Results { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.ResultsByShard) > 0 { - for k, v := range m.ResultsByShard { - _ = k - _ = v - l = 0 - if v != nil { - l = v.SizeVT() - } - l += 1 + sov(uint64(l)) - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) - } + return len(dAtA) - i, nil +} + +func (m *RemoveShardCellRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil } - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ValidateSchemaKeyspaceRequest) SizeVT() (n int) { +func (m *RemoveShardCellRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RemoveShardCellRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.ExcludeTables) > 0 { - for _, s := range m.ExcludeTables { - l = len(s) - n += 1 + l + sov(uint64(l)) + if m.Recursive { + i-- + if m.Recursive { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x28 } - if m.IncludeViews { - n += 2 + if m.Force { + i-- + if m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 } - if m.SkipNoPrimary { - n += 2 + if len(m.Cell) > 0 { + i -= len(m.Cell) + copy(dAtA[i:], m.Cell) + i = encodeVarint(dAtA, i, uint64(len(m.Cell))) + i-- + dAtA[i] = 0x1a } - if m.IncludeVschema { - n += 2 + if len(m.ShardName) > 0 { + i -= len(m.ShardName) + copy(dAtA[i:], m.ShardName) + i = encodeVarint(dAtA, i, uint64(len(m.ShardName))) + i-- + dAtA[i] = 0x12 } - n += len(m.unknownFields) - return n + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *ValidateSchemaKeyspaceResponse) SizeVT() (n int) { +func (m *RemoveShardCellResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - if len(m.Results) > 0 { - for _, s := range m.Results { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + return nil, nil } - if len(m.ResultsByShard) > 0 { - for k, v := range m.ResultsByShard { - _ = k - _ = v - l = 0 - if v != nil { - l = v.SizeVT() - } - l += 1 + sov(uint64(l)) - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ValidateShardRequest) SizeVT() (n int) { +func (m *RemoveShardCellResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RemoveShardCellResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *ReparentTabletRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil } - if m.PingTablets { - n += 2 + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ValidateShardResponse) SizeVT() (n int) { +func (m *ReparentTabletRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReparentTabletRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if len(m.Results) > 0 { - for _, s := range m.Results { - l = len(s) - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Tablet != nil { + size, err := m.Tablet.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ValidateVersionKeyspaceRequest) SizeVT() (n int) { +func (m *ReparentTabletResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ValidateVersionKeyspaceResponse) SizeVT() (n int) { +func (m *ReparentTabletResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReparentTabletResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if len(m.Results) > 0 { - for _, s := range m.Results { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.ResultsByShard) > 0 { - for k, v := range m.ResultsByShard { - _ = k - _ = v - l = 0 - if v != nil { - l = v.SizeVT() - } - l += 1 + sov(uint64(l)) - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + if m.Primary != nil { + size, err := m.Primary.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a } - n += len(m.unknownFields) - return n + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *ValidateVersionShardRequest) SizeVT() (n int) { +func (m *ReshardCreateRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + return nil, nil } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ValidateVersionShardResponse) SizeVT() (n int) { +func (m *ReshardCreateRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReshardCreateRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if len(m.Results) > 0 { - for _, s := range m.Results { - l = len(s) - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.AutoStart { + i-- + if m.AutoStart { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x60 } - n += len(m.unknownFields) - return n + if m.DeferSecondaryKeys { + i-- + if m.DeferSecondaryKeys { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + } + if m.StopAfterCopy { + i-- + if m.StopAfterCopy { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if len(m.OnDdl) > 0 { + i -= len(m.OnDdl) + copy(dAtA[i:], m.OnDdl) + i = encodeVarint(dAtA, i, uint64(len(m.OnDdl))) + i-- + dAtA[i] = 0x4a + } + if m.SkipSchemaCopy { + i-- + if m.SkipSchemaCopy { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if m.TabletSelectionPreference != 0 { + i = encodeVarint(dAtA, i, uint64(m.TabletSelectionPreference)) + i-- + dAtA[i] = 0x38 + } + if len(m.TabletTypes) > 0 { + var pksize2 int + for _, num := range m.TabletTypes { + pksize2 += sov(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num1 := range m.TabletTypes { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = encodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x32 + } + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.TargetShards) > 0 { + for iNdEx := len(m.TargetShards) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TargetShards[iNdEx]) + copy(dAtA[i:], m.TargetShards[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.TargetShards[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.SourceShards) > 0 { + for iNdEx := len(m.SourceShards) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SourceShards[iNdEx]) + copy(dAtA[i:], m.SourceShards[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.SourceShards[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0x12 + } + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *ValidateVSchemaRequest) SizeVT() (n int) { +func (m *RestoreFromBackupRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RestoreFromBackupRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RestoreFromBackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.Shards) > 0 { - for _, s := range m.Shards { - l = len(s) - n += 1 + l + sov(uint64(l)) + if m.RestoreToTimestamp != nil { + size, err := m.RestoreToTimestamp.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a } - if len(m.ExcludeTables) > 0 { - for _, s := range m.ExcludeTables { - l = len(s) - n += 1 + l + sov(uint64(l)) + if m.DryRun { + i-- + if m.DryRun { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x20 } - if m.IncludeViews { - n += 2 + if len(m.RestoreToPos) > 0 { + i -= len(m.RestoreToPos) + copy(dAtA[i:], m.RestoreToPos) + i = encodeVarint(dAtA, i, uint64(len(m.RestoreToPos))) + i-- + dAtA[i] = 0x1a } - n += len(m.unknownFields) - return n + if m.BackupTime != nil { + size, err := m.BackupTime.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *ValidateVSchemaResponse) SizeVT() (n int) { +func (m *RestoreFromBackupResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *RestoreFromBackupResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RestoreFromBackupResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i var l int _ = l - if len(m.Results) > 0 { - for _, s := range m.Results { - l = len(s) - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Event != nil { + size, err := m.Event.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 } - if len(m.ResultsByShard) > 0 { - for k, v := range m.ResultsByShard { + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x1a + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0x12 + } + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RetrySchemaMigrationRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetrySchemaMigrationRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RetrySchemaMigrationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Uuid) > 0 { + i -= len(m.Uuid) + copy(dAtA[i:], m.Uuid) + i = encodeVarint(dAtA, i, uint64(len(m.Uuid))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RetrySchemaMigrationResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetrySchemaMigrationResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RetrySchemaMigrationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.RowsAffectedByShard) > 0 { + for k := range m.RowsAffectedByShard { + v := m.RowsAffectedByShard[k] + baseI := i + i = encodeVarint(dAtA, i, uint64(v)) + i-- + dAtA[i] = 0x10 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RunHealthCheckRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RunHealthCheckRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RunHealthCheckRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RunHealthCheckResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RunHealthCheckResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RunHealthCheckResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *SetKeyspaceDurabilityPolicyRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SetKeyspaceDurabilityPolicyRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetKeyspaceDurabilityPolicyRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DurabilityPolicy) > 0 { + i -= len(m.DurabilityPolicy) + copy(dAtA[i:], m.DurabilityPolicy) + i = encodeVarint(dAtA, i, uint64(len(m.DurabilityPolicy))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SetKeyspaceDurabilityPolicyResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SetKeyspaceDurabilityPolicyResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetKeyspaceDurabilityPolicyResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Keyspace != nil { + size, err := m.Keyspace.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SetKeyspaceServedFromRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SetKeyspaceServedFromRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetKeyspaceServedFromRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.SourceKeyspace) > 0 { + i -= len(m.SourceKeyspace) + copy(dAtA[i:], m.SourceKeyspace) + i = encodeVarint(dAtA, i, uint64(len(m.SourceKeyspace))) + i-- + dAtA[i] = 0x2a + } + if m.Remove { + i-- + if m.Remove { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if m.TabletType != 0 { + i = encodeVarint(dAtA, i, uint64(m.TabletType)) + i-- + dAtA[i] = 0x10 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SetKeyspaceServedFromResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SetKeyspaceServedFromResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetKeyspaceServedFromResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Keyspace != nil { + size, err := m.Keyspace.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SetKeyspaceShardingInfoRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SetKeyspaceShardingInfoRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetKeyspaceShardingInfoRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Force { + i-- + if m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SetKeyspaceShardingInfoResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SetKeyspaceShardingInfoResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetKeyspaceShardingInfoResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Keyspace != nil { + size, err := m.Keyspace.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SetShardIsPrimaryServingRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SetShardIsPrimaryServingRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetShardIsPrimaryServingRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.IsServing { + i-- + if m.IsServing { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SetShardIsPrimaryServingResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SetShardIsPrimaryServingResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetShardIsPrimaryServingResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Shard != nil { + size, err := m.Shard.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SetShardTabletControlRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SetShardTabletControlRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetShardTabletControlRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Remove { + i-- + if m.Remove { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if m.DisableQueryService { + i-- + if m.DisableQueryService { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if len(m.DeniedTables) > 0 { + for iNdEx := len(m.DeniedTables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DeniedTables[iNdEx]) + copy(dAtA[i:], m.DeniedTables[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.DeniedTables[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if m.TabletType != 0 { + i = encodeVarint(dAtA, i, uint64(m.TabletType)) + i-- + dAtA[i] = 0x18 + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SetShardTabletControlResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SetShardTabletControlResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetShardTabletControlResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Shard != nil { + size, err := m.Shard.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SetWritableRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SetWritableRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetWritableRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Writable { + i-- + if m.Writable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SetWritableResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SetWritableResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetWritableResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *ShardReplicationAddRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ShardReplicationAddRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ShardReplicationAddRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ShardReplicationAddResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ShardReplicationAddResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ShardReplicationAddResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *ShardReplicationFixRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ShardReplicationFixRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ShardReplicationFixRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Cell) > 0 { + i -= len(m.Cell) + copy(dAtA[i:], m.Cell) + i = encodeVarint(dAtA, i, uint64(len(m.Cell))) + i-- + dAtA[i] = 0x1a + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ShardReplicationFixResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ShardReplicationFixResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ShardReplicationFixResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Error != nil { + size, err := m.Error.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ShardReplicationPositionsRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ShardReplicationPositionsRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ShardReplicationPositionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ShardReplicationPositionsResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ShardReplicationPositionsResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ShardReplicationPositionsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.TabletMap) > 0 { + for k := range m.TabletMap { + v := m.TabletMap[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.ReplicationStatuses) > 0 { + for k := range m.ReplicationStatuses { + v := m.ReplicationStatuses[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ShardReplicationRemoveRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ShardReplicationRemoveRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ShardReplicationRemoveRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ShardReplicationRemoveResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ShardReplicationRemoveResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ShardReplicationRemoveResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *SleepTabletRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SleepTabletRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SleepTabletRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Duration != nil { + size, err := m.Duration.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SleepTabletResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SleepTabletResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SleepTabletResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *SourceShardAddRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SourceShardAddRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SourceShardAddRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Tables) > 0 { + for iNdEx := len(m.Tables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Tables[iNdEx]) + copy(dAtA[i:], m.Tables[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Tables[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if m.KeyRange != nil { + size, err := m.KeyRange.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.SourceShard) > 0 { + i -= len(m.SourceShard) + copy(dAtA[i:], m.SourceShard) + i = encodeVarint(dAtA, i, uint64(len(m.SourceShard))) + i-- + dAtA[i] = 0x2a + } + if len(m.SourceKeyspace) > 0 { + i -= len(m.SourceKeyspace) + copy(dAtA[i:], m.SourceKeyspace) + i = encodeVarint(dAtA, i, uint64(len(m.SourceKeyspace))) + i-- + dAtA[i] = 0x22 + } + if m.Uid != 0 { + i = encodeVarint(dAtA, i, uint64(m.Uid)) + i-- + dAtA[i] = 0x18 + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SourceShardAddResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SourceShardAddResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SourceShardAddResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Shard != nil { + size, err := m.Shard.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SourceShardDeleteRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SourceShardDeleteRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SourceShardDeleteRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Uid != 0 { + i = encodeVarint(dAtA, i, uint64(m.Uid)) + i-- + dAtA[i] = 0x18 + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SourceShardDeleteResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SourceShardDeleteResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SourceShardDeleteResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Shard != nil { + size, err := m.Shard.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StartReplicationRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StartReplicationRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *StartReplicationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StartReplicationResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StartReplicationResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *StartReplicationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *StopReplicationRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StopReplicationRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *StopReplicationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StopReplicationResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StopReplicationResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *StopReplicationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *TabletExternallyReparentedRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TabletExternallyReparentedRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *TabletExternallyReparentedRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Tablet != nil { + size, err := m.Tablet.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TabletExternallyReparentedResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TabletExternallyReparentedResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *TabletExternallyReparentedResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.OldPrimary != nil { + size, err := m.OldPrimary.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.NewPrimary != nil { + size, err := m.NewPrimary.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpdateCellInfoRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateCellInfoRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *UpdateCellInfoRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.CellInfo != nil { + size, err := m.CellInfo.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpdateCellInfoResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateCellInfoResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *UpdateCellInfoResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.CellInfo != nil { + size, err := m.CellInfo.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpdateCellsAliasRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateCellsAliasRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *UpdateCellsAliasRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.CellsAlias != nil { + size, err := m.CellsAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpdateCellsAliasResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateCellsAliasResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *UpdateCellsAliasResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.CellsAlias != nil { + size, err := m.CellsAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ValidateRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidateRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ValidateRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PingTablets { + i-- + if m.PingTablets { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ValidateResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidateResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ValidateResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ResultsByKeyspace) > 0 { + for k := range m.ResultsByKeyspace { + v := m.ResultsByKeyspace[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Results) > 0 { + for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Results[iNdEx]) + copy(dAtA[i:], m.Results[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Results[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ValidateKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidateKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ValidateKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PingTablets { + i-- + if m.PingTablets { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ValidateKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidateKeyspaceResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ValidateKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ResultsByShard) > 0 { + for k := range m.ResultsByShard { + v := m.ResultsByShard[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Results) > 0 { + for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Results[iNdEx]) + copy(dAtA[i:], m.Results[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Results[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ValidateSchemaKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidateSchemaKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ValidateSchemaKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.IncludeVschema { + i-- + if m.IncludeVschema { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.SkipNoPrimary { + i-- + if m.SkipNoPrimary { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.IncludeViews { + i-- + if m.IncludeViews { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.ExcludeTables) > 0 { + for iNdEx := len(m.ExcludeTables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ExcludeTables[iNdEx]) + copy(dAtA[i:], m.ExcludeTables[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.ExcludeTables[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ValidateSchemaKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidateSchemaKeyspaceResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ValidateSchemaKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ResultsByShard) > 0 { + for k := range m.ResultsByShard { + v := m.ResultsByShard[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Results) > 0 { + for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Results[iNdEx]) + copy(dAtA[i:], m.Results[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Results[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ValidateShardRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidateShardRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ValidateShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PingTablets { + i-- + if m.PingTablets { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ValidateShardResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidateShardResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ValidateShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Results) > 0 { + for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Results[iNdEx]) + copy(dAtA[i:], m.Results[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Results[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ValidateVersionKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidateVersionKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ValidateVersionKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ValidateVersionKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidateVersionKeyspaceResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ValidateVersionKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ResultsByShard) > 0 { + for k := range m.ResultsByShard { + v := m.ResultsByShard[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Results) > 0 { + for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Results[iNdEx]) + copy(dAtA[i:], m.Results[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Results[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ValidateVersionShardRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidateVersionShardRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ValidateVersionShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ValidateVersionShardResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidateVersionShardResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ValidateVersionShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Results) > 0 { + for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Results[iNdEx]) + copy(dAtA[i:], m.Results[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Results[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ValidateVSchemaRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidateVSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ValidateVSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.IncludeViews { + i-- + if m.IncludeViews { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.ExcludeTables) > 0 { + for iNdEx := len(m.ExcludeTables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ExcludeTables[iNdEx]) + copy(dAtA[i:], m.ExcludeTables[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.ExcludeTables[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Shards) > 0 { + for iNdEx := len(m.Shards) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Shards[iNdEx]) + copy(dAtA[i:], m.Shards[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Shards[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ValidateVSchemaResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidateVSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ValidateVSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ResultsByShard) > 0 { + for k := range m.ResultsByShard { + v := m.ResultsByShard[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Results) > 0 { + for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Results[iNdEx]) + copy(dAtA[i:], m.Results[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Results[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *VDiffCreateRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VDiffCreateRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VDiffCreateRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Verbose { + i-- + if m.Verbose { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x90 + } + if m.AutoRetry { + i-- + if m.AutoRetry { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + } + if m.WaitUpdateInterval != nil { + size, err := m.WaitUpdateInterval.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.Wait { + i-- + if m.Wait { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x78 + } + if m.MaxExtraRowsToCompare != 0 { + i = encodeVarint(dAtA, i, uint64(m.MaxExtraRowsToCompare)) + i-- + dAtA[i] = 0x70 + } + if m.UpdateTableStats { + i-- + if m.UpdateTableStats { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x68 + } + if m.OnlyPKs { + i-- + if m.OnlyPKs { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x60 + } + if m.DebugQuery { + i-- + if m.DebugQuery { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + } + if m.FilteredReplicationWaitTime != nil { + size, err := m.FilteredReplicationWaitTime.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } + if m.Limit != 0 { + i = encodeVarint(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x48 + } + if len(m.Tables) > 0 { + for iNdEx := len(m.Tables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Tables[iNdEx]) + copy(dAtA[i:], m.Tables[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Tables[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if m.TabletSelectionPreference != 0 { + i = encodeVarint(dAtA, i, uint64(m.TabletSelectionPreference)) + i-- + dAtA[i] = 0x38 + } + if len(m.TabletTypes) > 0 { + var pksize2 int + for _, num := range m.TabletTypes { + pksize2 += sov(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num1 := range m.TabletTypes { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = encodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x32 + } + if len(m.TargetCells) > 0 { + for iNdEx := len(m.TargetCells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TargetCells[iNdEx]) + copy(dAtA[i:], m.TargetCells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.TargetCells[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.SourceCells) > 0 { + for iNdEx := len(m.SourceCells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SourceCells[iNdEx]) + copy(dAtA[i:], m.SourceCells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.SourceCells[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Uuid) > 0 { + i -= len(m.Uuid) + copy(dAtA[i:], m.Uuid) + i = encodeVarint(dAtA, i, uint64(len(m.Uuid))) + i-- + dAtA[i] = 0x1a + } + if len(m.TargetKeyspace) > 0 { + i -= len(m.TargetKeyspace) + copy(dAtA[i:], m.TargetKeyspace) + i = encodeVarint(dAtA, i, uint64(len(m.TargetKeyspace))) + i-- + dAtA[i] = 0x12 + } + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VDiffCreateResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VDiffCreateResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VDiffCreateResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.UUID) > 0 { + i -= len(m.UUID) + copy(dAtA[i:], m.UUID) + i = encodeVarint(dAtA, i, uint64(len(m.UUID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VDiffDeleteRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VDiffDeleteRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VDiffDeleteRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Arg) > 0 { + i -= len(m.Arg) + copy(dAtA[i:], m.Arg) + i = encodeVarint(dAtA, i, uint64(len(m.Arg))) + i-- + dAtA[i] = 0x1a + } + if len(m.TargetKeyspace) > 0 { + i -= len(m.TargetKeyspace) + copy(dAtA[i:], m.TargetKeyspace) + i = encodeVarint(dAtA, i, uint64(len(m.TargetKeyspace))) + i-- + dAtA[i] = 0x12 + } + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VDiffDeleteResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VDiffDeleteResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VDiffDeleteResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *VDiffResumeRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VDiffResumeRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VDiffResumeRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Uuid) > 0 { + i -= len(m.Uuid) + copy(dAtA[i:], m.Uuid) + i = encodeVarint(dAtA, i, uint64(len(m.Uuid))) + i-- + dAtA[i] = 0x1a + } + if len(m.TargetKeyspace) > 0 { + i -= len(m.TargetKeyspace) + copy(dAtA[i:], m.TargetKeyspace) + i = encodeVarint(dAtA, i, uint64(len(m.TargetKeyspace))) + i-- + dAtA[i] = 0x12 + } + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VDiffResumeResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VDiffResumeResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VDiffResumeResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *VDiffShowRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VDiffShowRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VDiffShowRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Arg) > 0 { + i -= len(m.Arg) + copy(dAtA[i:], m.Arg) + i = encodeVarint(dAtA, i, uint64(len(m.Arg))) + i-- + dAtA[i] = 0x1a + } + if len(m.TargetKeyspace) > 0 { + i -= len(m.TargetKeyspace) + copy(dAtA[i:], m.TargetKeyspace) + i = encodeVarint(dAtA, i, uint64(len(m.TargetKeyspace))) + i-- + dAtA[i] = 0x12 + } + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VDiffShowResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VDiffShowResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VDiffShowResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.TabletResponses) > 0 { + for k := range m.TabletResponses { + v := m.TabletResponses[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *VDiffStopRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VDiffStopRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VDiffStopRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Uuid) > 0 { + i -= len(m.Uuid) + copy(dAtA[i:], m.Uuid) + i = encodeVarint(dAtA, i, uint64(len(m.Uuid))) + i-- + dAtA[i] = 0x1a + } + if len(m.TargetKeyspace) > 0 { + i -= len(m.TargetKeyspace) + copy(dAtA[i:], m.TargetKeyspace) + i = encodeVarint(dAtA, i, uint64(len(m.TargetKeyspace))) + i-- + dAtA[i] = 0x12 + } + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VDiffStopResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VDiffStopResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VDiffStopResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *WorkflowDeleteRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowDeleteRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *WorkflowDeleteRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.KeepRoutingRules { + i-- + if m.KeepRoutingRules { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.KeepData { + i-- + if m.KeepData { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WorkflowDeleteResponse_TabletInfo) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowDeleteResponse_TabletInfo) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *WorkflowDeleteResponse_TabletInfo) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Deleted { + i-- + if m.Deleted { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Tablet != nil { + size, err := m.Tablet.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WorkflowDeleteResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowDeleteResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *WorkflowDeleteResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Details) > 0 { + for iNdEx := len(m.Details) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Details[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Summary) > 0 { + i -= len(m.Summary) + copy(dAtA[i:], m.Summary) + i = encodeVarint(dAtA, i, uint64(len(m.Summary))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WorkflowStatusRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowStatusRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *WorkflowStatusRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WorkflowStatusResponse_TableCopyState) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowStatusResponse_TableCopyState) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *WorkflowStatusResponse_TableCopyState) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.BytesPercentage != 0 { + i -= 4 + binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.BytesPercentage)))) + i-- + dAtA[i] = 0x35 + } + if m.BytesTotal != 0 { + i = encodeVarint(dAtA, i, uint64(m.BytesTotal)) + i-- + dAtA[i] = 0x28 + } + if m.BytesCopied != 0 { + i = encodeVarint(dAtA, i, uint64(m.BytesCopied)) + i-- + dAtA[i] = 0x20 + } + if m.RowsPercentage != 0 { + i -= 4 + binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.RowsPercentage)))) + i-- + dAtA[i] = 0x1d + } + if m.RowsTotal != 0 { + i = encodeVarint(dAtA, i, uint64(m.RowsTotal)) + i-- + dAtA[i] = 0x10 + } + if m.RowsCopied != 0 { + i = encodeVarint(dAtA, i, uint64(m.RowsCopied)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *WorkflowStatusResponse_ShardStreamState) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowStatusResponse_ShardStreamState) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *WorkflowStatusResponse_ShardStreamState) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Info) > 0 { + i -= len(m.Info) + copy(dAtA[i:], m.Info) + i = encodeVarint(dAtA, i, uint64(len(m.Info))) + i-- + dAtA[i] = 0x32 + } + if len(m.Status) > 0 { + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarint(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x2a + } + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarint(dAtA, i, uint64(len(m.Position))) + i-- + dAtA[i] = 0x22 + } + if len(m.SourceShard) > 0 { + i -= len(m.SourceShard) + copy(dAtA[i:], m.SourceShard) + i = encodeVarint(dAtA, i, uint64(len(m.SourceShard))) + i-- + dAtA[i] = 0x1a + } + if m.Tablet != nil { + size, err := m.Tablet.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Id != 0 { + i = encodeVarint(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *WorkflowStatusResponse_ShardStreams) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowStatusResponse_ShardStreams) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *WorkflowStatusResponse_ShardStreams) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Streams) > 0 { + for iNdEx := len(m.Streams) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Streams[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + return len(dAtA) - i, nil +} + +func (m *WorkflowStatusResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowStatusResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *WorkflowStatusResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.TrafficState) > 0 { + i -= len(m.TrafficState) + copy(dAtA[i:], m.TrafficState) + i = encodeVarint(dAtA, i, uint64(len(m.TrafficState))) + i-- + dAtA[i] = 0x1a + } + if len(m.ShardStreams) > 0 { + for k := range m.ShardStreams { + v := m.ShardStreams[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.TableCopyState) > 0 { + for k := range m.TableCopyState { + v := m.TableCopyState[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *WorkflowSwitchTrafficRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowSwitchTrafficRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *WorkflowSwitchTrafficRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.InitializeTargetSequences { + i-- + if m.InitializeTargetSequences { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if m.DryRun { + i-- + if m.DryRun { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + } + if m.Timeout != nil { + size, err := m.Timeout.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if m.Direction != 0 { + i = encodeVarint(dAtA, i, uint64(m.Direction)) + i-- + dAtA[i] = 0x38 + } + if m.EnableReverseReplication { + i-- + if m.EnableReverseReplication { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.MaxReplicationLagAllowed != nil { + size, err := m.MaxReplicationLagAllowed.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if len(m.TabletTypes) > 0 { + var pksize2 int + for _, num := range m.TabletTypes { + pksize2 += sov(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num1 := range m.TabletTypes { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = encodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x22 + } + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WorkflowSwitchTrafficResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowSwitchTrafficResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *WorkflowSwitchTrafficResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DryRunResults) > 0 { + for iNdEx := len(m.DryRunResults) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DryRunResults[iNdEx]) + copy(dAtA[i:], m.DryRunResults[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.DryRunResults[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.CurrentState) > 0 { + i -= len(m.CurrentState) + copy(dAtA[i:], m.CurrentState) + i = encodeVarint(dAtA, i, uint64(len(m.CurrentState))) + i-- + dAtA[i] = 0x1a + } + if len(m.StartState) > 0 { + i -= len(m.StartState) + copy(dAtA[i:], m.StartState) + i = encodeVarint(dAtA, i, uint64(len(m.StartState))) + i-- + dAtA[i] = 0x12 + } + if len(m.Summary) > 0 { + i -= len(m.Summary) + copy(dAtA[i:], m.Summary) + i = encodeVarint(dAtA, i, uint64(len(m.Summary))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WorkflowUpdateRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowUpdateRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *WorkflowUpdateRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TabletRequest != nil { + size, err := m.TabletRequest.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WorkflowUpdateResponse_TabletInfo) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowUpdateResponse_TabletInfo) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *WorkflowUpdateResponse_TabletInfo) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Changed { + i-- + if m.Changed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Tablet != nil { + size, err := m.Tablet.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WorkflowUpdateResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowUpdateResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *WorkflowUpdateResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Details) > 0 { + for iNdEx := len(m.Details) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Details[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Summary) > 0 { + i -= len(m.Summary) + copy(dAtA[i:], m.Summary) + i = encodeVarint(dAtA, i, uint64(len(m.Summary))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarint(dAtA []byte, offset int, v uint64) int { + offset -= sov(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ExecuteVtctlCommandRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if m.ActionTimeout != 0 { + n += 1 + sov(uint64(m.ActionTimeout)) + } + n += len(m.unknownFields) + return n +} + +func (m *ExecuteVtctlCommandResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Event != nil { + l = m.Event.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *TableMaterializeSettings) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TargetTable) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.SourceExpression) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.CreateDdl) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *MaterializeSettings) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.SourceKeyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.TargetKeyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.StopAfterCopy { + n += 2 + } + if len(m.TableSettings) > 0 { + for _, e := range m.TableSettings { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + l = len(m.Cell) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.TabletTypes) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.ExternalCluster) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.MaterializationIntent != 0 { + n += 1 + sov(uint64(m.MaterializationIntent)) + } + l = len(m.SourceTimeZone) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.TargetTimeZone) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.SourceShards) > 0 { + for _, s := range m.SourceShards { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + l = len(m.OnDdl) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.DeferSecondaryKeys { + n += 2 + } + if m.TabletSelectionPreference != 0 { + n += 1 + sov(uint64(m.TabletSelectionPreference)) + } + if m.AtomicCopy { + n += 3 + } + n += len(m.unknownFields) + return n +} + +func (m *Keyspace) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Keyspace != nil { + l = m.Keyspace.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SchemaMigration) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Uuid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Schema) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Table) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.MigrationStatement) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Strategy != 0 { + n += 1 + sov(uint64(m.Strategy)) + } + l = len(m.Options) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.AddedAt != nil { + l = m.AddedAt.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.RequestedAt != nil { + l = m.RequestedAt.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.ReadyAt != nil { + l = m.ReadyAt.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.StartedAt != nil { + l = m.StartedAt.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.LivenessTimestamp != nil { + l = m.LivenessTimestamp.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.CompletedAt != nil { + l = m.CompletedAt.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.CleanedUpAt != nil { + l = m.CleanedUpAt.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Status != 0 { + n += 2 + sov(uint64(m.Status)) + } + l = len(m.LogPath) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + l = len(m.Artifacts) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + if m.Retries != 0 { + n += 2 + sov(uint64(m.Retries)) + } + if m.Tablet != nil { + l = m.Tablet.SizeVT() + n += 2 + l + sov(uint64(l)) + } + if m.TabletFailure { + n += 3 + } + if m.Progress != 0 { + n += 6 + } + l = len(m.MigrationContext) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + l = len(m.DdlAction) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + l = len(m.Message) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + if m.EtaSeconds != 0 { + n += 2 + sov(uint64(m.EtaSeconds)) + } + if m.RowsCopied != 0 { + n += 2 + sov(uint64(m.RowsCopied)) + } + if m.TableRows != 0 { + n += 2 + sov(uint64(m.TableRows)) + } + if m.AddedUniqueKeys != 0 { + n += 2 + sov(uint64(m.AddedUniqueKeys)) + } + if m.RemovedUniqueKeys != 0 { + n += 2 + sov(uint64(m.RemovedUniqueKeys)) + } + l = len(m.LogFile) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + if m.ArtifactRetention != nil { + l = m.ArtifactRetention.SizeVT() + n += 2 + l + sov(uint64(l)) + } + if m.PostponeCompletion { + n += 3 + } + l = len(m.RemovedUniqueKeyNames) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + l = len(m.DroppedNoDefaultColumnNames) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + l = len(m.ExpandedColumnNames) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + l = len(m.RevertibleNotes) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + if m.AllowConcurrent { + n += 3 + } + l = len(m.RevertedUuid) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + if m.IsView { + n += 3 + } + if m.ReadyToComplete { + n += 3 + } + if m.VitessLivenessIndicator != 0 { + n += 2 + sov(uint64(m.VitessLivenessIndicator)) + } + if m.UserThrottleRatio != 0 { + n += 6 + } + l = len(m.SpecialPlan) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + if m.LastThrottledAt != nil { + l = m.LastThrottledAt.SizeVT() + n += 2 + l + sov(uint64(l)) + } + l = len(m.ComponentThrottled) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + if m.CancelledAt != nil { + l = m.CancelledAt.SizeVT() + n += 2 + l + sov(uint64(l)) + } + if m.PostponeLaunch { + n += 3 + } + l = len(m.Stage) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + if m.CutoverAttempts != 0 { + n += 2 + sov(uint64(m.CutoverAttempts)) + } + if m.IsImmediateOperation { + n += 3 + } + if m.ReviewedAt != nil { + l = m.ReviewedAt.SizeVT() + n += 2 + l + sov(uint64(l)) + } + if m.ReadyToCompleteAt != nil { + l = m.ReadyToCompleteAt.SizeVT() + n += 2 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Shard) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Shard != nil { + l = m.Shard.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Workflow_ReplicationLocation) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Shards) > 0 { + for _, s := range m.Shards { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Workflow_ShardStream) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Streams) > 0 { + for _, e := range m.Streams { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if len(m.TabletControls) > 0 { + for _, e := range m.TabletControls { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.IsPrimaryServing { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *Workflow_Stream_CopyState) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Table) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.LastPk) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Workflow_Stream_Log) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sov(uint64(m.Id)) + } + if m.StreamId != 0 { + n += 1 + sov(uint64(m.StreamId)) + } + l = len(m.Type) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.State) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.CreatedAt != nil { + l = m.CreatedAt.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.UpdatedAt != nil { + l = m.UpdatedAt.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Count != 0 { + n += 1 + sov(uint64(m.Count)) + } + n += len(m.unknownFields) + return n +} + +func (m *Workflow_Stream_ThrottlerStatus) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ComponentThrottled) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.TimeThrottled != nil { + l = m.TimeThrottled.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Workflow_Stream) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sov(uint64(m.Id)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Tablet != nil { + l = m.Tablet.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.BinlogSource != nil { + l = m.BinlogSource.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.Position) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.StopPosition) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.State) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.DbName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.TransactionTimestamp != nil { + l = m.TransactionTimestamp.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.TimeUpdated != nil { + l = m.TimeUpdated.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.CopyStates) > 0 { + for _, e := range m.CopyStates { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if len(m.Logs) > 0 { + for _, e := range m.Logs { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + l = len(m.LogFetchError) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Tags) > 0 { + for _, s := range m.Tags { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if m.RowsCopied != 0 { + n += 2 + sov(uint64(m.RowsCopied)) + } + if m.ThrottlerStatus != nil { + l = m.ThrottlerStatus.SizeVT() + n += 2 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Workflow) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Source != nil { + l = m.Source.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Target != nil { + l = m.Target.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.MaxVReplicationLag != 0 { + n += 1 + sov(uint64(m.MaxVReplicationLag)) + } + if len(m.ShardStreams) > 0 { + for k, v := range m.ShardStreams { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + l = len(m.WorkflowType) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.WorkflowSubType) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.MaxVReplicationTransactionLag != 0 { + n += 1 + sov(uint64(m.MaxVReplicationTransactionLag)) + } + if m.DeferSecondaryKeys { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *AddCellInfoRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.CellInfo != nil { + l = m.CellInfo.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *AddCellInfoResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *AddCellsAliasRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *AddCellsAliasResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ApplyRoutingRulesRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RoutingRules != nil { + l = m.RoutingRules.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.SkipRebuild { + n += 2 + } + if len(m.RebuildCells) > 0 { + for _, s := range m.RebuildCells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ApplyRoutingRulesResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ApplyShardRoutingRulesRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ShardRoutingRules != nil { + l = m.ShardRoutingRules.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.SkipRebuild { + n += 2 + } + if len(m.RebuildCells) > 0 { + for _, s := range m.RebuildCells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ApplyShardRoutingRulesResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ApplySchemaRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Sql) > 0 { + for _, s := range m.Sql { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + l = len(m.DdlStrategy) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.UuidList) > 0 { + for _, s := range m.UuidList { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + l = len(m.MigrationContext) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.WaitReplicasTimeout != nil { + l = m.WaitReplicasTimeout.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.CallerId != nil { + l = m.CallerId.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.BatchSize != 0 { + n += 1 + sov(uint64(m.BatchSize)) + } + n += len(m.unknownFields) + return n +} + +func (m *ApplySchemaResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.UuidList) > 0 { + for _, s := range m.UuidList { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.RowsAffectedByShard) > 0 { + for k, v := range m.RowsAffectedByShard { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + sov(uint64(v)) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ApplyVSchemaRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.SkipRebuild { + n += 2 + } + if m.DryRun { + n += 2 + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if m.VSchema != nil { + l = m.VSchema.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.Sql) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ApplyVSchemaResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.VSchema != nil { + l = m.VSchema.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *BackupRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.AllowPrimary { + n += 2 + } + if m.Concurrency != 0 { + n += 1 + sov(uint64(m.Concurrency)) + } + l = len(m.IncrementalFromPos) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.UpgradeSafe { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *BackupResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Event != nil { + l = m.Event.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *BackupShardRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.AllowPrimary { + n += 2 + } + if m.Concurrency != 0 { + n += 1 + sov(uint64(m.Concurrency)) + } + if m.UpgradeSafe { + n += 2 + } + l = len(m.IncrementalFromPos) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CancelSchemaMigrationRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Uuid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CancelSchemaMigrationResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RowsAffectedByShard) > 0 { + for k, v := range m.RowsAffectedByShard { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + sov(uint64(v)) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ChangeTabletTypeRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.DbType != 0 { + n += 1 + sov(uint64(m.DbType)) + } + if m.DryRun { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ChangeTabletTypeResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BeforeTablet != nil { + l = m.BeforeTablet.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.AfterTablet != nil { + l = m.AfterTablet.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.WasDryRun { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *CleanupSchemaMigrationRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Uuid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CleanupSchemaMigrationResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RowsAffectedByShard) > 0 { + for k, v := range m.RowsAffectedByShard { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + sov(uint64(v)) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *CompleteSchemaMigrationRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Uuid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CompleteSchemaMigrationResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RowsAffectedByShard) > 0 { + for k, v := range m.RowsAffectedByShard { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + sov(uint64(v)) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *CreateKeyspaceRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Force { + n += 2 + } + if m.AllowEmptyVSchema { + n += 2 + } + if len(m.ServedFroms) > 0 { + for _, e := range m.ServedFroms { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.Type != 0 { + n += 1 + sov(uint64(m.Type)) + } + l = len(m.BaseKeyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.SnapshotTime != nil { + l = m.SnapshotTime.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.DurabilityPolicy) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.SidecarDbName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CreateKeyspaceResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Keyspace != nil { + l = m.Keyspace.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CreateShardRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.ShardName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Force { + n += 2 + } + if m.IncludeParent { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *CreateShardResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Keyspace != nil { + l = m.Keyspace.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Shard != nil { + l = m.Shard.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.ShardAlreadyExists { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *DeleteCellInfoRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Force { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *DeleteCellInfoResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *DeleteCellsAliasRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DeleteCellsAliasResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *DeleteKeyspaceRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Recursive { + n += 2 + } + if m.Force { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *DeleteKeyspaceResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *DeleteShardsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Shards) > 0 { + for _, e := range m.Shards { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.Recursive { + n += 2 + } + if m.EvenIfServing { + n += 2 + } + if m.Force { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *DeleteShardsResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *DeleteSrvVSchemaRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Cell) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DeleteSrvVSchemaResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *DeleteTabletsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.TabletAliases) > 0 { + for _, e := range m.TabletAliases { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.AllowPrimary { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *DeleteTabletsResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *EmergencyReparentShardRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.NewPrimary != nil { + l = m.NewPrimary.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if len(m.IgnoreReplicas) > 0 { + for _, e := range m.IgnoreReplicas { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.WaitReplicasTimeout != nil { + l = m.WaitReplicasTimeout.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.PreventCrossCellPromotion { + n += 2 + } + if m.WaitForAllTablets { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *EmergencyReparentShardResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.PromotedPrimary != nil { + l = m.PromotedPrimary.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ExecuteFetchAsAppRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.Query) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.MaxRows != 0 { + n += 1 + sov(uint64(m.MaxRows)) + } + if m.UsePool { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ExecuteFetchAsAppResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = m.Result.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ExecuteFetchAsDBARequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.Query) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.MaxRows != 0 { + n += 1 + sov(uint64(m.MaxRows)) + } + if m.DisableBinlogs { + n += 2 + } + if m.ReloadSchema { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ExecuteFetchAsDBAResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = m.Result.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ExecuteHookRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.TabletHookRequest != nil { + l = m.TabletHookRequest.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ExecuteHookResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HookResult != nil { + l = m.HookResult.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *FindAllShardsInKeyspaceRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *FindAllShardsInKeyspaceResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Shards) > 0 { + for k, v := range m.Shards { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetBackupsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Limit != 0 { + n += 1 + sov(uint64(m.Limit)) + } + if m.Detailed { + n += 2 + } + if m.DetailedLimit != 0 { + n += 1 + sov(uint64(m.DetailedLimit)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetBackupsResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Backups) > 0 { + for _, e := range m.Backups { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetCellInfoRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Cell) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetCellInfoResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CellInfo != nil { + l = m.CellInfo.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetCellInfoNamesRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetCellInfoNamesResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetCellsAliasesRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetCellsAliasesResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Aliases) > 0 { + for k, v := range m.Aliases { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetFullStatusRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetFullStatusResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != nil { + l = m.Status.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetKeyspacesRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetKeyspacesResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Keyspaces) > 0 { + for _, e := range m.Keyspaces { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetKeyspaceRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetKeyspaceResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Keyspace != nil { + l = m.Keyspace.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetPermissionsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetPermissionsResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Permissions != nil { + l = m.Permissions.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetRoutingRulesRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetRoutingRulesResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RoutingRules != nil { + l = m.RoutingRules.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetSchemaRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if len(m.Tables) > 0 { + for _, s := range m.Tables { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.ExcludeTables) > 0 { + for _, s := range m.ExcludeTables { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if m.IncludeViews { + n += 2 + } + if m.TableNamesOnly { + n += 2 + } + if m.TableSizesOnly { + n += 2 + } + if m.TableSchemaOnly { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *GetSchemaResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Schema != nil { + l = m.Schema.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetSchemaMigrationsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Uuid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.MigrationContext) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Status != 0 { + n += 1 + sov(uint64(m.Status)) + } + if m.Recent != nil { + l = m.Recent.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Order != 0 { + n += 1 + sov(uint64(m.Order)) + } + if m.Limit != 0 { + n += 1 + sov(uint64(m.Limit)) + } + if m.Skip != 0 { + n += 1 + sov(uint64(m.Skip)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetSchemaMigrationsResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Migrations) > 0 { + for _, e := range m.Migrations { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetShardRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.ShardName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetShardResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Shard != nil { + l = m.Shard.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetShardRoutingRulesRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetShardRoutingRulesResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ShardRoutingRules != nil { + l = m.ShardRoutingRules.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetSrvKeyspaceNamesRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetSrvKeyspaceNamesResponse_NameList) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetSrvKeyspaceNamesResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Names) > 0 { + for k, v := range m.Names { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetSrvKeyspacesRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetSrvKeyspacesResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.SrvKeyspaces) > 0 { + for k, v := range m.SrvKeyspaces { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *UpdateThrottlerConfigRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Enable { + n += 2 + } + if m.Disable { + n += 2 + } + if m.Threshold != 0 { + n += 9 + } + l = len(m.CustomQuery) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.CustomQuerySet { + n += 2 + } + if m.CheckAsCheckSelf { + n += 2 + } + if m.CheckAsCheckShard { + n += 2 + } + if m.ThrottledApp != nil { + l = m.ThrottledApp.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *UpdateThrottlerConfigResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetSrvVSchemaRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Cell) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetSrvVSchemaResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SrvVSchema != nil { + l = m.SrvVSchema.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetSrvVSchemasRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetSrvVSchemasResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.SrvVSchemas) > 0 { + for k, v := range m.SrvVSchemas { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetTabletRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetTabletResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Tablet != nil { + l = m.Tablet.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetTabletsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if m.Strict { + n += 2 + } + if len(m.TabletAliases) > 0 { + for _, e := range m.TabletAliases { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.TabletType != 0 { + n += 1 + sov(uint64(m.TabletType)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetTabletsResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Tablets) > 0 { + for _, e := range m.Tablets { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetTopologyPathRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetTopologyPathResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Cell != nil { + l = m.Cell.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *TopologyCell) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Path) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Children) > 0 { + for _, s := range m.Children { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetVSchemaRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetVersionRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetVersionResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Version) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetVSchemaResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.VSchema != nil { + l = m.VSchema.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetWorkflowsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.ActiveOnly { + n += 2 + } + if m.NameOnly { + n += 2 + } + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.IncludeLogs { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *GetWorkflowsResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Workflows) > 0 { + for _, e := range m.Workflows { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *InitShardPrimaryRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.PrimaryElectTabletAlias != nil { + l = m.PrimaryElectTabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Force { + n += 2 + } + if m.WaitReplicasTimeout != nil { + l = m.WaitReplicasTimeout.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *InitShardPrimaryResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *LaunchSchemaMigrationRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Uuid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *LaunchSchemaMigrationResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RowsAffectedByShard) > 0 { + for k, v := range m.RowsAffectedByShard { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + sov(uint64(v)) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *LookupVindexCreateRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if m.Vindex != nil { + l = m.Vindex.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.ContinueAfterCopyWithOwner { + n += 2 + } + if len(m.TabletTypes) > 0 { + l = 0 + for _, e := range m.TabletTypes { + l += sov(uint64(e)) + } + n += 1 + sov(uint64(l)) + l + } + if m.TabletSelectionPreference != 0 { + n += 1 + sov(uint64(m.TabletSelectionPreference)) + } + n += len(m.unknownFields) + return n +} + +func (m *LookupVindexCreateResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *LookupVindexExternalizeRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.TableKeyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *LookupVindexExternalizeResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.WorkflowDeleted { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *MaterializeCreateRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Settings != nil { + l = m.Settings.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *MaterializeCreateResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *MigrateCreateRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.SourceKeyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.TargetKeyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.MountName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.TabletTypes) > 0 { + l = 0 + for _, e := range m.TabletTypes { + l += sov(uint64(e)) + } + n += 1 + sov(uint64(l)) + l + } + if m.TabletSelectionPreference != 0 { + n += 1 + sov(uint64(m.TabletSelectionPreference)) + } + if m.AllTables { + n += 2 + } + if len(m.IncludeTables) > 0 { + for _, s := range m.IncludeTables { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.ExcludeTables) > 0 { + for _, s := range m.ExcludeTables { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + l = len(m.SourceTimeZone) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.OnDdl) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.StopAfterCopy { + n += 2 + } + if m.DropForeignKeys { + n += 2 + } + if m.DeferSecondaryKeys { + n += 2 + } + if m.AutoStart { + n += 3 + } + if m.NoRoutingRules { + n += 3 + } + n += len(m.unknownFields) + return n +} + +func (m *MigrateCompleteRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.TargetKeyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.KeepData { + n += 2 + } + if m.KeepRoutingRules { + n += 2 + } + if m.RenameTables { + n += 2 + } + if m.DryRun { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *MigrateCompleteResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Summary) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.DryRunResults) > 0 { + for _, s := range m.DryRunResults { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *MountRegisterRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TopoType) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.TopoServer) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.TopoRoot) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *MountRegisterResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *MountUnregisterRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *MountUnregisterResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *MountShowRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *MountShowResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TopoType) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.TopoServer) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.TopoRoot) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *MountListRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *MountListResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *MoveTablesCreateRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.SourceKeyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.TargetKeyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.TabletTypes) > 0 { + l = 0 + for _, e := range m.TabletTypes { + l += sov(uint64(e)) + } + n += 1 + sov(uint64(l)) + l + } + if m.TabletSelectionPreference != 0 { + n += 1 + sov(uint64(m.TabletSelectionPreference)) + } + if len(m.SourceShards) > 0 { + for _, s := range m.SourceShards { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if m.AllTables { + n += 2 + } + if len(m.IncludeTables) > 0 { + for _, s := range m.IncludeTables { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.ExcludeTables) > 0 { + for _, s := range m.ExcludeTables { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + l = len(m.ExternalClusterName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.SourceTimeZone) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.OnDdl) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.StopAfterCopy { + n += 2 + } + if m.DropForeignKeys { + n += 2 + } + if m.DeferSecondaryKeys { + n += 3 + } + if m.AutoStart { + n += 3 + } + if m.NoRoutingRules { + n += 3 + } + if m.AtomicCopy { + n += 3 + } + n += len(m.unknownFields) + return n +} + +func (m *MoveTablesCreateResponse_TabletInfo) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Tablet != nil { + l = m.Tablet.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Created { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *MoveTablesCreateResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Summary) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Details) > 0 { + for _, e := range m.Details { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *MoveTablesCompleteRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.TargetKeyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.KeepData { + n += 2 + } + if m.KeepRoutingRules { + n += 2 + } + if m.RenameTables { + n += 2 + } + if m.DryRun { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *MoveTablesCompleteResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Summary) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.DryRunResults) > 0 { + for _, s := range m.DryRunResults { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *PingTabletRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *PingTabletResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *PlannedReparentShardRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.NewPrimary != nil { + l = m.NewPrimary.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.AvoidPrimary != nil { + l = m.AvoidPrimary.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.WaitReplicasTimeout != nil { + l = m.WaitReplicasTimeout.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *PlannedReparentShardResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.PromotedPrimary != nil { + l = m.PromotedPrimary.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *RebuildKeyspaceGraphRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if m.AllowPartial { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RebuildKeyspaceGraphResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RebuildVSchemaGraphRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *RebuildVSchemaGraphResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RefreshStateRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RefreshStateResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RefreshStateByShardRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *RefreshStateByShardResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.IsPartialRefresh { + n += 2 + } + l = len(m.PartialRefreshDetails) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ReloadSchemaRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ReloadSchemaResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ReloadSchemaKeyspaceRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.WaitPosition) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.IncludePrimary { + n += 2 + } + if m.Concurrency != 0 { + n += 1 + sov(uint64(m.Concurrency)) + } + n += len(m.unknownFields) + return n +} + +func (m *ReloadSchemaKeyspaceResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ReloadSchemaShardRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.WaitPosition) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.IncludePrimary { + n += 2 + } + if m.Concurrency != 0 { + n += 1 + sov(uint64(m.Concurrency)) + } + n += len(m.unknownFields) + return n +} + +func (m *ReloadSchemaShardResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *RemoveBackupRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RemoveBackupResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RemoveKeyspaceCellRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Cell) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Force { + n += 2 + } + if m.Recursive { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RemoveKeyspaceCellResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RemoveShardCellRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.ShardName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Cell) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Force { + n += 2 + } + if m.Recursive { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RemoveShardCellResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ReparentTabletRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Tablet != nil { + l = m.Tablet.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ReparentTabletResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Primary != nil { + l = m.Primary.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ReshardCreateRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.SourceShards) > 0 { + for _, s := range m.SourceShards { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.TargetShards) > 0 { + for _, s := range m.TargetShards { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.TabletTypes) > 0 { + l = 0 + for _, e := range m.TabletTypes { + l += sov(uint64(e)) + } + n += 1 + sov(uint64(l)) + l + } + if m.TabletSelectionPreference != 0 { + n += 1 + sov(uint64(m.TabletSelectionPreference)) + } + if m.SkipSchemaCopy { + n += 2 + } + l = len(m.OnDdl) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.StopAfterCopy { + n += 2 + } + if m.DeferSecondaryKeys { + n += 2 + } + if m.AutoStart { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RestoreFromBackupRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.BackupTime != nil { + l = m.BackupTime.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.RestoreToPos) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.DryRun { + n += 2 + } + if m.RestoreToTimestamp != nil { + l = m.RestoreToTimestamp.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RestoreFromBackupResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Event != nil { + l = m.Event.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RetrySchemaMigrationRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Uuid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RetrySchemaMigrationResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RowsAffectedByShard) > 0 { + for k, v := range m.RowsAffectedByShard { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + sov(uint64(v)) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *RunHealthCheckRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RunHealthCheckResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *SetKeyspaceDurabilityPolicyRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.DurabilityPolicy) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SetKeyspaceDurabilityPolicyResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Keyspace != nil { + l = m.Keyspace.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SetKeyspaceServedFromRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.TabletType != 0 { + n += 1 + sov(uint64(m.TabletType)) + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if m.Remove { + n += 2 + } + l = len(m.SourceKeyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SetKeyspaceServedFromResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Keyspace != nil { + l = m.Keyspace.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SetKeyspaceShardingInfoRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Force { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *SetKeyspaceShardingInfoResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Keyspace != nil { + l = m.Keyspace.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SetShardIsPrimaryServingRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.IsServing { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *SetShardIsPrimaryServingResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Shard != nil { + l = m.Shard.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SetShardTabletControlRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.TabletType != 0 { + n += 1 + sov(uint64(m.TabletType)) + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.DeniedTables) > 0 { + for _, s := range m.DeniedTables { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if m.DisableQueryService { + n += 2 + } + if m.Remove { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *SetShardTabletControlResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Shard != nil { + l = m.Shard.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SetWritableRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Writable { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *SetWritableResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ShardReplicationAddRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ShardReplicationAddResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ShardReplicationFixRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Cell) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ShardReplicationFixResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ShardReplicationPositionsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ShardReplicationPositionsResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ReplicationStatuses) > 0 { + for k, v := range m.ReplicationStatuses { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + if len(m.TabletMap) > 0 { + for k, v := range m.TabletMap { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ShardReplicationRemoveRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ShardReplicationRemoveResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *SleepTabletRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Duration != nil { + l = m.Duration.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SleepTabletResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *SourceShardAddRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Uid != 0 { + n += 1 + sov(uint64(m.Uid)) + } + l = len(m.SourceKeyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.SourceShard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.KeyRange != nil { + l = m.KeyRange.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if len(m.Tables) > 0 { + for _, s := range m.Tables { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *SourceShardAddResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Shard != nil { + l = m.Shard.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SourceShardDeleteRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Uid != 0 { + n += 1 + sov(uint64(m.Uid)) + } + n += len(m.unknownFields) + return n +} + +func (m *SourceShardDeleteResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Shard != nil { + l = m.Shard.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *StartReplicationRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *StartReplicationResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *StopReplicationRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *StopReplicationResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *TabletExternallyReparentedRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Tablet != nil { + l = m.Tablet.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *TabletExternallyReparentedResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.NewPrimary != nil { + l = m.NewPrimary.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.OldPrimary != nil { + l = m.OldPrimary.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *UpdateCellInfoRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.CellInfo != nil { + l = m.CellInfo.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *UpdateCellInfoResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.CellInfo != nil { + l = m.CellInfo.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *UpdateCellsAliasRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.CellsAlias != nil { + l = m.CellsAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *UpdateCellsAliasResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.CellsAlias != nil { + l = m.CellsAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ValidateRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PingTablets { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ValidateResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Results) > 0 { + for _, s := range m.Results { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.ResultsByKeyspace) > 0 { + for k, v := range m.ResultsByKeyspace { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ValidateKeyspaceRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.PingTablets { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ValidateKeyspaceResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Results) > 0 { + for _, s := range m.Results { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.ResultsByShard) > 0 { + for k, v := range m.ResultsByShard { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ValidateSchemaKeyspaceRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.ExcludeTables) > 0 { + for _, s := range m.ExcludeTables { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if m.IncludeViews { + n += 2 + } + if m.SkipNoPrimary { + n += 2 + } + if m.IncludeVschema { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ValidateSchemaKeyspaceResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Results) > 0 { + for _, s := range m.Results { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.ResultsByShard) > 0 { + for k, v := range m.ResultsByShard { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ValidateShardRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.PingTablets { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ValidateShardResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Results) > 0 { + for _, s := range m.Results { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ValidateVersionKeyspaceRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ValidateVersionKeyspaceResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Results) > 0 { + for _, s := range m.Results { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.ResultsByShard) > 0 { + for k, v := range m.ResultsByShard { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ValidateVersionShardRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ValidateVersionShardResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Results) > 0 { + for _, s := range m.Results { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ValidateVSchemaRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Shards) > 0 { + for _, s := range m.Shards { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.ExcludeTables) > 0 { + for _, s := range m.ExcludeTables { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if m.IncludeViews { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ValidateVSchemaResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Results) > 0 { + for _, s := range m.Results { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.ResultsByShard) > 0 { + for k, v := range m.ResultsByShard { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *VDiffCreateRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.TargetKeyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Uuid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.SourceCells) > 0 { + for _, s := range m.SourceCells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.TargetCells) > 0 { + for _, s := range m.TargetCells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.TabletTypes) > 0 { + l = 0 + for _, e := range m.TabletTypes { + l += sov(uint64(e)) + } + n += 1 + sov(uint64(l)) + l + } + if m.TabletSelectionPreference != 0 { + n += 1 + sov(uint64(m.TabletSelectionPreference)) + } + if len(m.Tables) > 0 { + for _, s := range m.Tables { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if m.Limit != 0 { + n += 1 + sov(uint64(m.Limit)) + } + if m.FilteredReplicationWaitTime != nil { + l = m.FilteredReplicationWaitTime.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.DebugQuery { + n += 2 + } + if m.OnlyPKs { + n += 2 + } + if m.UpdateTableStats { + n += 2 + } + if m.MaxExtraRowsToCompare != 0 { + n += 1 + sov(uint64(m.MaxExtraRowsToCompare)) + } + if m.Wait { + n += 2 + } + if m.WaitUpdateInterval != nil { + l = m.WaitUpdateInterval.SizeVT() + n += 2 + l + sov(uint64(l)) + } + if m.AutoRetry { + n += 3 + } + if m.Verbose { + n += 3 + } + n += len(m.unknownFields) + return n +} + +func (m *VDiffCreateResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UUID) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *VDiffDeleteRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.TargetKeyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Arg) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *VDiffDeleteResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *VDiffResumeRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.TargetKeyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Uuid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *VDiffResumeResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *VDiffShowRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.TargetKeyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Arg) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *VDiffShowResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.TabletResponses) > 0 { + for k, v := range m.TabletResponses { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *VDiffStopRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.TargetKeyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Uuid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *VDiffStopResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *WorkflowDeleteRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.KeepData { + n += 2 + } + if m.KeepRoutingRules { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *WorkflowDeleteResponse_TabletInfo) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Tablet != nil { + l = m.Tablet.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Deleted { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *WorkflowDeleteResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Summary) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Details) > 0 { + for _, e := range m.Details { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *WorkflowStatusRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *WorkflowStatusResponse_TableCopyState) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RowsCopied != 0 { + n += 1 + sov(uint64(m.RowsCopied)) + } + if m.RowsTotal != 0 { + n += 1 + sov(uint64(m.RowsTotal)) + } + if m.RowsPercentage != 0 { + n += 5 + } + if m.BytesCopied != 0 { + n += 1 + sov(uint64(m.BytesCopied)) + } + if m.BytesTotal != 0 { + n += 1 + sov(uint64(m.BytesTotal)) + } + if m.BytesPercentage != 0 { + n += 5 + } + n += len(m.unknownFields) + return n +} + +func (m *WorkflowStatusResponse_ShardStreamState) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sov(uint64(m.Id)) + } + if m.Tablet != nil { + l = m.Tablet.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.SourceShard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Position) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Status) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Info) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *WorkflowStatusResponse_ShardStreams) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Streams) > 0 { + for _, e := range m.Streams { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *WorkflowStatusResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.TableCopyState) > 0 { + for k, v := range m.TableCopyState { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + if len(m.ShardStreams) > 0 { + for k, v := range m.ShardStreams { _ = k _ = v l = 0 if v != nil { l = v.SizeVT() } - l += 1 + sov(uint64(l)) - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + l = len(m.TrafficState) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *WorkflowSwitchTrafficRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.TabletTypes) > 0 { + l = 0 + for _, e := range m.TabletTypes { + l += sov(uint64(e)) + } + n += 1 + sov(uint64(l)) + l + } + if m.MaxReplicationLagAllowed != nil { + l = m.MaxReplicationLagAllowed.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.EnableReverseReplication { + n += 2 + } + if m.Direction != 0 { + n += 1 + sov(uint64(m.Direction)) + } + if m.Timeout != nil { + l = m.Timeout.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.DryRun { + n += 2 + } + if m.InitializeTargetSequences { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *WorkflowSwitchTrafficResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Summary) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.StartState) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.CurrentState) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.DryRunResults) > 0 { + for _, s := range m.DryRunResults { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *WorkflowUpdateRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.TabletRequest != nil { + l = m.TabletRequest.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *WorkflowUpdateResponse_TabletInfo) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Tablet != nil { + l = m.Tablet.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Changed { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *WorkflowUpdateResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Summary) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Details) > 0 { + for _, e := range m.Details { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func sov(x uint64) (n int) { + return (bits.Len64(x|1) + 6) / 7 +} +func soz(x uint64) (n int) { + return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ExecuteVtctlCommandRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecuteVtctlCommandRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecuteVtctlCommandRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActionTimeout", wireType) + } + m.ActionTimeout = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ActionTimeout |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecuteVtctlCommandResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecuteVtctlCommandResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecuteVtctlCommandResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Event == nil { + m.Event = &logutil.Event{} + } + if err := m.Event.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TableMaterializeSettings) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TableMaterializeSettings: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TableMaterializeSettings: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetTable", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetTable = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceExpression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourceExpression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreateDdl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CreateDdl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MaterializeSettings) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MaterializeSettings: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MaterializeSettings: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Workflow = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceKeyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourceKeyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetKeyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetKeyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StopAfterCopy", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.StopAfterCopy = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TableSettings", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TableSettings = append(m.TableSettings, &TableMaterializeSettings{}) + if err := m.TableSettings[len(m.TableSettings)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cell = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletTypes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TabletTypes = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalCluster", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalCluster = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaterializationIntent", wireType) + } + m.MaterializationIntent = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaterializationIntent |= MaterializationIntent(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceTimeZone", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourceTimeZone = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetTimeZone", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetTimeZone = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceShards", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourceShards = append(m.SourceShards, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OnDdl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OnDdl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeferSecondaryKeys", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DeferSecondaryKeys = bool(v != 0) + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletSelectionPreference", wireType) + } + m.TabletSelectionPreference = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TabletSelectionPreference |= tabletmanagerdata.TabletSelectionPreference(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AtomicCopy", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AtomicCopy = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Keyspace) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Keyspace: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Keyspace: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Keyspace == nil { + m.Keyspace = &topodata.Keyspace{} + } + if err := m.Keyspace.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SchemaMigration) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SchemaMigration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SchemaMigration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Schema = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Table", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Table = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MigrationStatement", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MigrationStatement = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + m.Strategy = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Strategy |= SchemaMigration_Strategy(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AddedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AddedAt == nil { + m.AddedAt = &vttime.Time{} + } + if err := m.AddedAt.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequestedAt == nil { + m.RequestedAt = &vttime.Time{} + } + if err := m.RequestedAt.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ReadyAt == nil { + m.ReadyAt = &vttime.Time{} + } + if err := m.ReadyAt.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartedAt == nil { + m.StartedAt = &vttime.Time{} + } + if err := m.StartedAt.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LivenessTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LivenessTimestamp == nil { + m.LivenessTimestamp = &vttime.Time{} + } + if err := m.LivenessTimestamp.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CompletedAt == nil { + m.CompletedAt = &vttime.Time{} + } + if err := m.CompletedAt.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CleanedUpAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CleanedUpAt == nil { + m.CleanedUpAt = &vttime.Time{} + } + if err := m.CleanedUpAt.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + m.Status = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Status |= SchemaMigration_Status(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Artifacts", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Artifacts = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 19: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Retries", wireType) + } + m.Retries = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Retries |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tablet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tablet == nil { + m.Tablet = &topodata.TabletAlias{} + } + if err := m.Tablet.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 21: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletFailure", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TabletFailure = bool(v != 0) + case 22: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field Progress", wireType) + } + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.Progress = float32(math.Float32frombits(v)) + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MigrationContext", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MigrationContext = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DdlAction", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DdlAction = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 25: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 26: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EtaSeconds", wireType) + } + m.EtaSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EtaSeconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 27: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RowsCopied", wireType) + } + m.RowsCopied = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RowsCopied |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 28: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TableRows", wireType) + } + m.TableRows = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TableRows |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 29: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AddedUniqueKeys", wireType) + } + m.AddedUniqueKeys = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AddedUniqueKeys |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 30: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RemovedUniqueKeys", wireType) + } + m.RemovedUniqueKeys = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RemovedUniqueKeys |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 31: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogFile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogFile = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 32: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ArtifactRetention", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ArtifactRetention == nil { + m.ArtifactRetention = &vttime.Duration{} + } + if err := m.ArtifactRetention.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 33: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PostponeCompletion", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PostponeCompletion = bool(v != 0) + case 34: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RemovedUniqueKeyNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RemovedUniqueKeyNames = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 35: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DroppedNoDefaultColumnNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DroppedNoDefaultColumnNames = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 36: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpandedColumnNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExpandedColumnNames = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 37: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RevertibleNotes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RevertibleNotes = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 38: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowConcurrent", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowConcurrent = bool(v != 0) + case 39: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RevertedUuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RevertedUuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 40: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsView", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsView = bool(v != 0) + case 41: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyToComplete", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadyToComplete = bool(v != 0) + case 42: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field VitessLivenessIndicator", wireType) + } + m.VitessLivenessIndicator = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.VitessLivenessIndicator |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 43: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field UserThrottleRatio", wireType) + } + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.UserThrottleRatio = float32(math.Float32frombits(v)) + case 44: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpecialPlan", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SpecialPlan = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 45: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastThrottledAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastThrottledAt == nil { + m.LastThrottledAt = &vttime.Time{} + } + if err := m.LastThrottledAt.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 46: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ComponentThrottled", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ComponentThrottled = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 47: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CancelledAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CancelledAt == nil { + m.CancelledAt = &vttime.Time{} + } + if err := m.CancelledAt.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 48: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PostponeLaunch", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PostponeLaunch = bool(v != 0) + case 49: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 50: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CutoverAttempts", wireType) + } + m.CutoverAttempts = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CutoverAttempts |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 51: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsImmediateOperation", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsImmediateOperation = bool(v != 0) + case 52: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReviewedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ReviewedAt == nil { + m.ReviewedAt = &vttime.Time{} + } + if err := m.ReviewedAt.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 53: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyToCompleteAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ReadyToCompleteAt == nil { + m.ReadyToCompleteAt = &vttime.Time{} + } + if err := m.ReadyToCompleteAt.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Shard) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Shard: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Shard: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Shard == nil { + m.Shard = &topodata.Shard{} + } + if err := m.Shard.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Workflow_ReplicationLocation) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Workflow_ReplicationLocation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Workflow_ReplicationLocation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shards", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shards = append(m.Shards, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Workflow_ShardStream) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Workflow_ShardStream: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Workflow_ShardStream: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Streams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Streams = append(m.Streams, &Workflow_Stream{}) + if err := m.Streams[len(m.Streams)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletControls", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TabletControls = append(m.TabletControls, &topodata.Shard_TabletControl{}) + if err := m.TabletControls[len(m.TabletControls)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsPrimaryServing", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsPrimaryServing = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Workflow_Stream_CopyState) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Workflow_Stream_CopyState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Workflow_Stream_CopyState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Table", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Table = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastPk", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LastPk = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Workflow_Stream_Log) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Workflow_Stream_Log: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Workflow_Stream_Log: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StreamId", wireType) + } + m.StreamId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StreamId |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CreatedAt == nil { + m.CreatedAt = &vttime.Time{} + } + if err := m.CreatedAt.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UpdatedAt == nil { + m.UpdatedAt = &vttime.Time{} + } + if err := m.UpdatedAt.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Workflow_Stream_ThrottlerStatus) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Workflow_Stream_ThrottlerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Workflow_Stream_ThrottlerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ComponentThrottled", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ComponentThrottled = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeThrottled", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TimeThrottled == nil { + m.TimeThrottled = &vttime.Time{} + } + if err := m.TimeThrottled.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Workflow_Stream) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Workflow_Stream: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Workflow_Stream: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tablet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tablet == nil { + m.Tablet = &topodata.TabletAlias{} + } + if err := m.Tablet.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BinlogSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BinlogSource == nil { + m.BinlogSource = &binlogdata.BinlogSource{} + } + if err := m.BinlogSource.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Position = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StopPosition", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StopPosition = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DbName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DbName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TransactionTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TransactionTimestamp == nil { + m.TransactionTimestamp = &vttime.Time{} + } + if err := m.TransactionTimestamp.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeUpdated", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TimeUpdated == nil { + m.TimeUpdated = &vttime.Time{} + } + if err := m.TimeUpdated.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CopyStates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CopyStates = append(m.CopyStates, &Workflow_Stream_CopyState{}) + if err := m.CopyStates[len(m.CopyStates)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Logs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Logs = append(m.Logs, &Workflow_Stream_Log{}) + if err := m.Logs[len(m.Logs)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogFetchError", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogFetchError = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tags = append(m.Tags, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RowsCopied", wireType) + } + m.RowsCopied = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RowsCopied |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ThrottlerStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ThrottlerStatus == nil { + m.ThrottlerStatus = &Workflow_Stream_ThrottlerStatus{} + } + if err := m.ThrottlerStatus.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Workflow) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Workflow: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Workflow: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Source == nil { + m.Source = &Workflow_ReplicationLocation{} + } + if err := m.Source.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Target == nil { + m.Target = &Workflow_ReplicationLocation{} + } + if err := m.Target.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxVReplicationLag", wireType) + } + m.MaxVReplicationLag = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxVReplicationLag |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShardStreams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ShardStreams == nil { + m.ShardStreams = make(map[string]*Workflow_ShardStream) + } + var mapkey string + var mapvalue *Workflow_ShardStream + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Workflow_ShardStream{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ShardStreams[mapkey] = mapvalue + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkflowType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WorkflowType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkflowSubType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WorkflowSubType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxVReplicationTransactionLag", wireType) + } + m.MaxVReplicationTransactionLag = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxVReplicationTransactionLag |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeferSecondaryKeys", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DeferSecondaryKeys = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AddCellInfoRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AddCellInfoRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AddCellInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CellInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CellInfo == nil { + m.CellInfo = &topodata.CellInfo{} + } + if err := m.CellInfo.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AddCellInfoResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AddCellInfoResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AddCellInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AddCellsAliasRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AddCellsAliasRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AddCellsAliasRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AddCellsAliasResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AddCellsAliasResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AddCellsAliasResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ApplyRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ApplyRoutingRulesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ApplyRoutingRulesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoutingRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RoutingRules == nil { + m.RoutingRules = &vschema.RoutingRules{} + } + if err := m.RoutingRules.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SkipRebuild", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SkipRebuild = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RebuildCells", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RebuildCells = append(m.RebuildCells, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ApplyRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ApplyRoutingRulesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ApplyRoutingRulesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ApplyShardRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ApplyShardRoutingRulesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ApplyShardRoutingRulesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShardRoutingRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ShardRoutingRules == nil { + m.ShardRoutingRules = &vschema.ShardRoutingRules{} + } + if err := m.ShardRoutingRules.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SkipRebuild", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SkipRebuild = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RebuildCells", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RebuildCells = append(m.RebuildCells, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ApplyShardRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ApplyShardRoutingRulesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ApplyShardRoutingRulesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ApplySchemaRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ApplySchemaRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ApplySchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sql", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sql = append(m.Sql, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DdlStrategy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DdlStrategy = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UuidList", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UuidList = append(m.UuidList, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MigrationContext", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MigrationContext = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WaitReplicasTimeout", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WaitReplicasTimeout == nil { + m.WaitReplicasTimeout = &vttime.Duration{} + } + if err := m.WaitReplicasTimeout.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CallerId == nil { + m.CallerId = &vtrpc.CallerID{} + } + if err := m.CallerId.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BatchSize", wireType) + } + m.BatchSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BatchSize |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ApplySchemaResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ApplySchemaResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ApplySchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UuidList", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UuidList = append(m.UuidList, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RowsAffectedByShard", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RowsAffectedByShard == nil { + m.RowsAffectedByShard = make(map[string]uint64) + } + var mapkey string + var mapvalue uint64 + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.RowsAffectedByShard[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ApplyVSchemaRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ApplyVSchemaRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ApplyVSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SkipRebuild", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SkipRebuild = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DryRun = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VSchema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VSchema == nil { + m.VSchema = &vschema.Keyspace{} + } + if err := m.VSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sql", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sql = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ApplyVSchemaResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ApplyVSchemaResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ApplyVSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VSchema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VSchema == nil { + m.VSchema = &vschema.Keyspace{} + } + if err := m.VSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BackupRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BackupRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BackupRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} + } + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowPrimary", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowPrimary = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Concurrency", wireType) + } + m.Concurrency = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Concurrency |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IncrementalFromPos", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IncrementalFromPos = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpgradeSafe", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.UpgradeSafe = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BackupResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BackupResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BackupResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} + } + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Event == nil { + m.Event = &logutil.Event{} + } + if err := m.Event.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BackupShardRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BackupShardRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BackupShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowPrimary", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowPrimary = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Concurrency", wireType) + } + m.Concurrency = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Concurrency |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpgradeSafe", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.UpgradeSafe = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IncrementalFromPos", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IncrementalFromPos = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CancelSchemaMigrationRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CancelSchemaMigrationRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CancelSchemaMigrationRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CancelSchemaMigrationResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CancelSchemaMigrationResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CancelSchemaMigrationResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RowsAffectedByShard", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RowsAffectedByShard == nil { + m.RowsAffectedByShard = make(map[string]uint64) + } + var mapkey string + var mapvalue uint64 + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.RowsAffectedByShard[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ChangeTabletTypeRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ChangeTabletTypeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ChangeTabletTypeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} + } + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DbType", wireType) + } + m.DbType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DbType |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DryRun = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ChangeTabletTypeResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ChangeTabletTypeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ChangeTabletTypeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BeforeTablet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BeforeTablet == nil { + m.BeforeTablet = &topodata.Tablet{} + } + if err := m.BeforeTablet.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AfterTablet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AfterTablet == nil { + m.AfterTablet = &topodata.Tablet{} + } + if err := m.AfterTablet.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WasDryRun", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.WasDryRun = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CleanupSchemaMigrationRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CleanupSchemaMigrationRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CleanupSchemaMigrationRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CleanupSchemaMigrationResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CleanupSchemaMigrationResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CleanupSchemaMigrationResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RowsAffectedByShard", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RowsAffectedByShard == nil { + m.RowsAffectedByShard = make(map[string]uint64) + } + var mapkey string + var mapvalue uint64 + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.RowsAffectedByShard[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CompleteSchemaMigrationRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompleteSchemaMigrationRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompleteSchemaMigrationRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CompleteSchemaMigrationResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompleteSchemaMigrationResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompleteSchemaMigrationResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RowsAffectedByShard", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RowsAffectedByShard == nil { + m.RowsAffectedByShard = make(map[string]uint64) + } + var mapkey string + var mapvalue uint64 + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.RowsAffectedByShard[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateKeyspaceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Force = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowEmptyVSchema", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowEmptyVSchema = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServedFroms", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServedFroms = append(m.ServedFroms, &topodata.Keyspace_ServedFrom{}) + if err := m.ServedFroms[len(m.ServedFroms)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= topodata.KeyspaceType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BaseKeyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BaseKeyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SnapshotTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SnapshotTime == nil { + m.SnapshotTime = &vttime.Time{} + } + if err := m.SnapshotTime.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DurabilityPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DurabilityPolicy = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SidecarDbName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SidecarDbName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateKeyspaceResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateKeyspaceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Keyspace == nil { + m.Keyspace = &Keyspace{} + } + if err := m.Keyspace.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateShardRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateShardRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShardName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShardName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Force = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeParent", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeParent = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateShardResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateShardResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Keyspace == nil { + m.Keyspace = &Keyspace{} + } + if err := m.Keyspace.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Shard == nil { + m.Shard = &Shard{} + } + if err := m.Shard.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ShardAlreadyExists", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ShardAlreadyExists = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteCellInfoRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteCellInfoRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteCellInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Force = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteCellInfoResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteCellInfoResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteCellInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteCellsAliasRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteCellsAliasRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteCellsAliasRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteCellsAliasResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteCellsAliasResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteCellsAliasResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteKeyspaceRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteKeyspaceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Recursive = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Force = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteKeyspaceResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteKeyspaceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteShardsRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteShardsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteShardsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shards", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shards = append(m.Shards, &Shard{}) + if err := m.Shards[len(m.Shards)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Recursive = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EvenIfServing", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.EvenIfServing = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Force = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteShardsResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteShardsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteShardsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteSrvVSchemaRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteSrvVSchemaRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteSrvVSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cell = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteSrvVSchemaResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteSrvVSchemaResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteSrvVSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteTabletsRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteTabletsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteTabletsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletAliases", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TabletAliases = append(m.TabletAliases, &topodata.TabletAlias{}) + if err := m.TabletAliases[len(m.TabletAliases)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowPrimary", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowPrimary = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteTabletsResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteTabletsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteTabletsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EmergencyReparentShardRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EmergencyReparentShardRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EmergencyReparentShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NewPrimary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NewPrimary == nil { + m.NewPrimary = &topodata.TabletAlias{} + } + if err := m.NewPrimary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IgnoreReplicas", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IgnoreReplicas = append(m.IgnoreReplicas, &topodata.TabletAlias{}) + if err := m.IgnoreReplicas[len(m.IgnoreReplicas)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WaitReplicasTimeout", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WaitReplicasTimeout == nil { + m.WaitReplicasTimeout = &vttime.Duration{} + } + if err := m.WaitReplicasTimeout.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PreventCrossCellPromotion", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PreventCrossCellPromotion = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WaitForAllTablets", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.WaitForAllTablets = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EmergencyReparentShardResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EmergencyReparentShardResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EmergencyReparentShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PromotedPrimary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PromotedPrimary == nil { + m.PromotedPrimary = &topodata.TabletAlias{} + } + if err := m.PromotedPrimary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, &logutil.Event{}) + if err := m.Events[len(m.Events)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecuteFetchAsAppRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecuteFetchAsAppRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecuteFetchAsAppRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} + } + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Query = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxRows", wireType) + } + m.MaxRows = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxRows |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UsePool", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.UsePool = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecuteFetchAsAppResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecuteFetchAsAppResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecuteFetchAsAppResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &query.QueryResult{} + } + if err := m.Result.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecuteFetchAsDBARequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecuteFetchAsDBARequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecuteFetchAsDBARequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} + } + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Query = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxRows", wireType) + } + m.MaxRows = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxRows |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DisableBinlogs", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DisableBinlogs = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReloadSchema", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReloadSchema = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy } } - n += len(m.unknownFields) - return n -} - -func (m *WorkflowUpdateRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.TabletRequest != nil { - l = m.TabletRequest.SizeVT() - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *WorkflowUpdateResponse_TabletInfo) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Tablet) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.Changed { - n += 2 - } - n += len(m.unknownFields) - return n -} -func (m *WorkflowUpdateResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Summary) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if len(m.Details) > 0 { - for _, e := range m.Details { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } + if iNdEx > l { + return io.ErrUnexpectedEOF } - n += len(m.unknownFields) - return n -} - -func sov(x uint64) (n int) { - return (bits.Len64(x|1) + 6) / 7 -} -func soz(x uint64) (n int) { - return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + return nil } -func (m *ExecuteVtctlCommandRequest) UnmarshalVT(dAtA []byte) error { +func (m *ExecuteFetchAsDBAResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13525,17 +34739,17 @@ func (m *ExecuteVtctlCommandRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExecuteVtctlCommandRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ExecuteFetchAsDBAResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteVtctlCommandRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExecuteFetchAsDBAResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -13545,43 +34759,28 @@ func (m *ExecuteVtctlCommandRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ActionTimeout", wireType) + if m.Result == nil { + m.Result = &query.QueryResult{} } - m.ActionTimeout = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ActionTimeout |= int64(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.Result.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -13604,7 +34803,7 @@ func (m *ExecuteVtctlCommandRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ExecuteVtctlCommandResponse) UnmarshalVT(dAtA []byte) error { +func (m *ExecuteHookRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13627,15 +34826,15 @@ func (m *ExecuteVtctlCommandResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExecuteVtctlCommandResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ExecuteHookRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteVtctlCommandResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExecuteHookRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13662,10 +34861,46 @@ func (m *ExecuteVtctlCommandResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Event == nil { - m.Event = &logutil.Event{} + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} } - if err := m.Event.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletHookRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TabletHookRequest == nil { + m.TabletHookRequest = &tabletmanagerdata.ExecuteHookRequest{} + } + if err := m.TabletHookRequest.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -13691,7 +34926,7 @@ func (m *ExecuteVtctlCommandResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *TableMaterializeSettings) UnmarshalVT(dAtA []byte) error { +func (m *ExecuteHookResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13714,17 +34949,17 @@ func (m *TableMaterializeSettings) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: TableMaterializeSettings: wiretype end group for non-group") + return fmt.Errorf("proto: ExecuteHookResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: TableMaterializeSettings: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExecuteHookResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetTable", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HookResult", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -13734,27 +34969,82 @@ func (m *TableMaterializeSettings) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.TargetTable = string(dAtA[iNdEx:postIndex]) + if m.HookResult == nil { + m.HookResult = &tabletmanagerdata.ExecuteHookResponse{} + } + if err := m.HookResult.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FindAllShardsInKeyspaceRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FindAllShardsInKeyspaceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FindAllShardsInKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SourceExpression", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13782,13 +35072,64 @@ func (m *TableMaterializeSettings) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SourceExpression = string(dAtA[iNdEx:postIndex]) + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FindAllShardsInKeyspaceResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FindAllShardsInKeyspaceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FindAllShardsInKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CreateDdl", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shards", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -13798,23 +35139,120 @@ func (m *TableMaterializeSettings) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.CreateDdl = string(dAtA[iNdEx:postIndex]) + if m.Shards == nil { + m.Shards = make(map[string]*Shard) + } + var mapkey string + var mapvalue *Shard + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Shard{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Shards[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -13838,7 +35276,7 @@ func (m *TableMaterializeSettings) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *MaterializeSettings) UnmarshalVT(dAtA []byte) error { +func (m *GetBackupsRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13861,15 +35299,15 @@ func (m *MaterializeSettings) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MaterializeSettings: wiretype end group for non-group") + return fmt.Errorf("proto: GetBackupsRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MaterializeSettings: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetBackupsRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13897,11 +35335,11 @@ func (m *MaterializeSettings) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Workflow = string(dAtA[iNdEx:postIndex]) + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SourceKeyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13929,13 +35367,13 @@ func (m *MaterializeSettings) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SourceKeyspace = string(dAtA[iNdEx:postIndex]) + m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetKeyspace", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) } - var stringLen uint64 + m.Limit = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -13945,27 +35383,14 @@ func (m *MaterializeSettings) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.Limit |= uint32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TargetKeyspace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StopAfterCopy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Detailed", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -13982,10 +35407,80 @@ func (m *MaterializeSettings) UnmarshalVT(dAtA []byte) error { break } } - m.StopAfterCopy = bool(v != 0) + m.Detailed = bool(v != 0) case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DetailedLimit", wireType) + } + m.DetailedLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DetailedLimit |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetBackupsResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetBackupsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetBackupsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TableSettings", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Backups", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14012,12 +35507,63 @@ func (m *MaterializeSettings) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TableSettings = append(m.TableSettings, &TableMaterializeSettings{}) - if err := m.TableSettings[len(m.TableSettings)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Backups = append(m.Backups, &mysqlctl.BackupInfo{}) + if err := m.Backups[len(m.Backups)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetCellInfoRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetCellInfoRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetCellInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) } @@ -14049,11 +35595,62 @@ func (m *MaterializeSettings) UnmarshalVT(dAtA []byte) error { } m.Cell = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 7: + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetCellInfoResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetCellInfoResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetCellInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletTypes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CellInfo", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -14063,78 +35660,133 @@ func (m *MaterializeSettings) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.TabletTypes = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalCluster", wireType) + if m.CellInfo == nil { + m.CellInfo = &topodata.CellInfo{} } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.CellInfo.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetCellInfoNamesRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetCellInfoNamesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetCellInfoNamesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.ExternalCluster = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaterializationIntent", wireType) + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetCellInfoNamesResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - m.MaterializationIntent = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaterializationIntent |= MaterializationIntent(b&0x7F) << shift - if b < 0x80 { - break - } + if iNdEx >= l { + return io.ErrUnexpectedEOF } - case 10: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetCellInfoNamesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetCellInfoNamesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SourceTimeZone", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14162,45 +35814,115 @@ func (m *MaterializeSettings) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SourceTimeZone = string(dAtA[iNdEx:postIndex]) + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetTimeZone", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetCellsAliasesRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetCellsAliasesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetCellsAliasesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.TargetTimeZone = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 12: + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetCellsAliasesResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetCellsAliasesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetCellsAliasesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SourceShards", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Aliases", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -14210,76 +35932,121 @@ func (m *MaterializeSettings) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.SourceShards = append(m.SourceShards, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OnDdl", wireType) + if m.Aliases == nil { + m.Aliases = make(map[string]*topodata.CellsAlias) } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var mapkey string + var mapvalue *topodata.CellsAlias + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &topodata.CellsAlias{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.OnDdl = string(dAtA[iNdEx:postIndex]) + m.Aliases[mapkey] = mapvalue iNdEx = postIndex - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DeferSecondaryKeys", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.DeferSecondaryKeys = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -14302,7 +36069,7 @@ func (m *MaterializeSettings) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *Keyspace) UnmarshalVT(dAtA []byte) error { +func (m *GetFullStatusRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14325,47 +36092,15 @@ func (m *Keyspace) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Keyspace: wiretype end group for non-group") + return fmt.Errorf("proto: GetFullStatusRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Keyspace: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetFullStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14392,10 +36127,10 @@ func (m *Keyspace) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Keyspace == nil { - m.Keyspace = &topodata.Keyspace{} + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} } - if err := m.Keyspace.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -14421,7 +36156,7 @@ func (m *Keyspace) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *Shard) UnmarshalVT(dAtA []byte) error { +func (m *GetFullStatusResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14444,79 +36179,15 @@ func (m *Shard) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Shard: wiretype end group for non-group") + return fmt.Errorf("proto: GetFullStatusResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Shard: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetFullStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Keyspace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14543,10 +36214,10 @@ func (m *Shard) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Shard == nil { - m.Shard = &topodata.Shard{} + if m.Status == nil { + m.Status = &replicationdata.FullStatus{} } - if err := m.Shard.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -14572,7 +36243,7 @@ func (m *Shard) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *Workflow_ReplicationLocation) UnmarshalVT(dAtA []byte) error { +func (m *GetKeyspacesRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14595,76 +36266,12 @@ func (m *Workflow_ReplicationLocation) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Workflow_ReplicationLocation: wiretype end group for non-group") + return fmt.Errorf("proto: GetKeyspacesRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Workflow_ReplicationLocation: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetKeyspacesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Keyspace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shards", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Shards = append(m.Shards, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -14687,7 +36294,7 @@ func (m *Workflow_ReplicationLocation) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *Workflow_ShardStream) UnmarshalVT(dAtA []byte) error { +func (m *GetKeyspacesResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14710,49 +36317,15 @@ func (m *Workflow_ShardStream) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Workflow_ShardStream: wiretype end group for non-group") + return fmt.Errorf("proto: GetKeyspacesResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Workflow_ShardStream: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetKeyspacesResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Streams", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Streams = append(m.Streams, &Workflow_Stream{}) - if err := m.Streams[len(m.Streams)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletControls", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspaces", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14779,31 +36352,11 @@ func (m *Workflow_ShardStream) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TabletControls = append(m.TabletControls, &topodata.Shard_TabletControl{}) - if err := m.TabletControls[len(m.TabletControls)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Keyspaces = append(m.Keyspaces, &Keyspace{}) + if err := m.Keyspaces[len(m.Keyspaces)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IsPrimaryServing", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.IsPrimaryServing = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -14826,7 +36379,7 @@ func (m *Workflow_ShardStream) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *Workflow_Stream_CopyState) UnmarshalVT(dAtA []byte) error { +func (m *GetKeyspaceRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14849,47 +36402,15 @@ func (m *Workflow_Stream_CopyState) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Workflow_Stream_CopyState: wiretype end group for non-group") + return fmt.Errorf("proto: GetKeyspaceRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Workflow_Stream_CopyState: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Table", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Table = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastPk", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14917,7 +36438,7 @@ func (m *Workflow_Stream_CopyState) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.LastPk = string(dAtA[iNdEx:postIndex]) + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -14941,7 +36462,7 @@ func (m *Workflow_Stream_CopyState) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *Workflow_Stream_Log) UnmarshalVT(dAtA []byte) error { +func (m *GetKeyspaceResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14964,55 +36485,17 @@ func (m *Workflow_Stream_Log) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Workflow_Stream_Log: wiretype end group for non-group") + return fmt.Errorf("proto: GetKeyspaceResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Workflow_Stream_Log: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - m.Id = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Id |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StreamId", wireType) - } - m.StreamId = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StreamId |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15022,95 +36505,82 @@ func (m *Workflow_Stream_Log) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + if m.Keyspace == nil { + m.Keyspace = &Keyspace{} } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.Keyspace.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.State = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetPermissionsRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - if m.CreatedAt == nil { - m.CreatedAt = &vttime.Time{} - } - if err := m.CreatedAt.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - iNdEx = postIndex - case 6: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetPermissionsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetPermissionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -15137,64 +36607,13 @@ func (m *Workflow_Stream_Log) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.UpdatedAt == nil { - m.UpdatedAt = &vttime.Time{} + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} } - if err := m.UpdatedAt.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) - } - m.Count = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Count |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -15217,7 +36636,7 @@ func (m *Workflow_Stream_Log) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *Workflow_Stream) UnmarshalVT(dAtA []byte) error { +func (m *GetPermissionsResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15240,36 +36659,17 @@ func (m *Workflow_Stream) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Workflow_Stream: wiretype end group for non-group") + return fmt.Errorf("proto: GetPermissionsResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Workflow_Stream: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetPermissionsResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - m.Id = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Id |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Permissions", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15279,63 +36679,133 @@ func (m *Workflow_Stream) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tablet", wireType) + if m.Permissions == nil { + m.Permissions = &tabletmanagerdata.Permissions{} } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.Permissions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - if msglen < 0 { - return ErrInvalidLength + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.Tablet == nil { - m.Tablet = &topodata.TabletAlias{} + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - if err := m.Tablet.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetRoutingRulesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetRoutingRulesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { return err } - iNdEx = postIndex - case 4: + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetRoutingRulesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetRoutingRulesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BinlogSource", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RoutingRules", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -15362,50 +36832,69 @@ func (m *Workflow_Stream) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.BinlogSource == nil { - m.BinlogSource = &binlogdata.BinlogSource{} + if m.RoutingRules == nil { + m.RoutingRules = &vschema.RoutingRules{} } - if err := m.BinlogSource.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.RoutingRules.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - if postIndex > l { + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Position = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetSchemaRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StopPosition", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15415,27 +36904,31 @@ func (m *Workflow_Stream) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.StopPosition = string(dAtA[iNdEx:postIndex]) + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} + } + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 7: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -15463,11 +36956,11 @@ func (m *Workflow_Stream) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.State = string(dAtA[iNdEx:postIndex]) + m.Tables = append(m.Tables, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 8: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DbName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ExcludeTables", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -15495,13 +36988,13 @@ func (m *Workflow_Stream) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DbName = string(dAtA[iNdEx:postIndex]) + m.ExcludeTables = append(m.ExcludeTables, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TransactionTimestamp", wireType) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeViews", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15511,33 +37004,17 @@ func (m *Workflow_Stream) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TransactionTimestamp == nil { - m.TransactionTimestamp = &vttime.Time{} - } - if err := m.TransactionTimestamp.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeUpdated", wireType) + m.IncludeViews = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TableNamesOnly", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15547,33 +37024,17 @@ func (m *Workflow_Stream) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TimeUpdated == nil { - m.TimeUpdated = &vttime.Time{} - } - if err := m.TimeUpdated.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + m.TableNamesOnly = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TableSizesOnly", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15583,29 +37044,17 @@ func (m *Workflow_Stream) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CopyStates", wireType) + m.TableSizesOnly = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TableSchemaOnly", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15615,97 +37064,68 @@ func (m *Workflow_Stream) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CopyStates = append(m.CopyStates, &Workflow_Stream_CopyState{}) - if err := m.CopyStates[len(m.CopyStates)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.TableSchemaOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { return err } - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Logs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.Logs = append(m.Logs, &Workflow_Stream_Log{}) - if err := m.Logs[len(m.Logs)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LogFetchError", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetSchemaResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.LogFetchError = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 15: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetSchemaResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15715,23 +37135,27 @@ func (m *Workflow_Stream) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Tags = append(m.Tags, string(dAtA[iNdEx:postIndex])) + if m.Schema == nil { + m.Schema = &tabletmanagerdata.SchemaDefinition{} + } + if err := m.Schema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -15755,7 +37179,7 @@ func (m *Workflow_Stream) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *Workflow) UnmarshalVT(dAtA []byte) error { +func (m *GetSchemaMigrationsRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15778,15 +37202,15 @@ func (m *Workflow) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Workflow: wiretype end group for non-group") + return fmt.Errorf("proto: GetSchemaMigrationsRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Workflow: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSchemaMigrationsRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -15814,13 +37238,13 @@ func (m *Workflow) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Uuid", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15830,33 +37254,29 @@ func (m *Workflow) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Source == nil { - m.Source = &Workflow_ReplicationLocation{} - } - if err := m.Source.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Uuid = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MigrationContext", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15866,33 +37286,29 @@ func (m *Workflow) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Target == nil { - m.Target = &Workflow_ReplicationLocation{} - } - if err := m.Target.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.MigrationContext = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxVReplicationLag", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - m.MaxVReplicationLag = 0 + m.Status = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15902,14 +37318,14 @@ func (m *Workflow) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxVReplicationLag |= int64(b&0x7F) << shift + m.Status |= SchemaMigration_Status(b&0x7F) << shift if b < 0x80 { break } } case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShardStreams", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Recent", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -15936,111 +37352,18 @@ func (m *Workflow) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ShardStreams == nil { - m.ShardStreams = make(map[string]*Workflow_ShardStream) + if m.Recent == nil { + m.Recent = &vttime.Duration{} } - var mapkey string - var mapvalue *Workflow_ShardStream - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLength - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLength - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &Workflow_ShardStream{} - if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + if err := m.Recent.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.ShardStreams[mapkey] = mapvalue iNdEx = postIndex case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WorkflowType", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Order", wireType) } - var stringLen uint64 + m.Order = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -16050,29 +37373,16 @@ func (m *Workflow) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.Order |= QueryOrdering(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.WorkflowType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WorkflowSubType", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) } - var stringLen uint64 + m.Limit = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -16082,24 +37392,30 @@ func (m *Workflow) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.Limit |= uint64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Skip", wireType) } - if postIndex > l { - return io.ErrUnexpectedEOF + m.Skip = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Skip |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - m.WorkflowSubType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -16122,7 +37438,7 @@ func (m *Workflow) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *AddCellInfoRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetSchemaMigrationsResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -16145,47 +37461,15 @@ func (m *AddCellInfoRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AddCellInfoRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetSchemaMigrationsResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AddCellInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSchemaMigrationsResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CellInfo", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Migrations", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -16212,10 +37496,8 @@ func (m *AddCellInfoRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.CellInfo == nil { - m.CellInfo = &topodata.CellInfo{} - } - if err := m.CellInfo.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Migrations = append(m.Migrations, &SchemaMigration{}) + if err := m.Migrations[len(m.Migrations)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -16241,7 +37523,7 @@ func (m *AddCellInfoRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *AddCellInfoResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetShardRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -16264,12 +37546,76 @@ func (m *AddCellInfoResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AddCellInfoResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetShardRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AddCellInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShardName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShardName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -16292,7 +37638,7 @@ func (m *AddCellInfoResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *AddCellsAliasRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetShardResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -16315,17 +37661,17 @@ func (m *AddCellsAliasRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AddCellsAliasRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetShardResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AddCellsAliasRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -16335,55 +37681,27 @@ func (m *AddCellsAliasRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + if m.Shard == nil { + m.Shard = &Shard{} } - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.Shard.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -16407,7 +37725,7 @@ func (m *AddCellsAliasRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *AddCellsAliasResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetShardRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -16430,10 +37748,10 @@ func (m *AddCellsAliasResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AddCellsAliasResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetShardRoutingRulesRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AddCellsAliasResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetShardRoutingRulesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -16458,7 +37776,7 @@ func (m *AddCellsAliasResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ApplyRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetShardRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -16481,15 +37799,15 @@ func (m *ApplyRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ApplyRoutingRulesRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetShardRoutingRulesResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ApplyRoutingRulesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetShardRoutingRulesResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RoutingRules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ShardRoutingRules", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -16516,65 +37834,13 @@ func (m *ApplyRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RoutingRules == nil { - m.RoutingRules = &vschema.RoutingRules{} + if m.ShardRoutingRules == nil { + m.ShardRoutingRules = &vschema.ShardRoutingRules{} } - if err := m.RoutingRules.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ShardRoutingRules.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SkipRebuild", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.SkipRebuild = bool(v != 0) - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RebuildCells", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RebuildCells = append(m.RebuildCells, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -16597,7 +37863,7 @@ func (m *ApplyRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ApplyRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetSrvKeyspaceNamesRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -16616,16 +37882,48 @@ func (m *ApplyRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { if b < 0x80 { break } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ApplyRoutingRulesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ApplyRoutingRulesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetSrvKeyspaceNamesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetSrvKeyspaceNamesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -16648,7 +37946,7 @@ func (m *ApplyRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ApplyShardRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetSrvKeyspaceNamesResponse_NameList) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -16671,71 +37969,15 @@ func (m *ApplyShardRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ApplyShardRoutingRulesRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetSrvKeyspaceNamesResponse_NameList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ApplyShardRoutingRulesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSrvKeyspaceNamesResponse_NameList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShardRoutingRules", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ShardRoutingRules == nil { - m.ShardRoutingRules = &vschema.ShardRoutingRules{} - } - if err := m.ShardRoutingRules.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SkipRebuild", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.SkipRebuild = bool(v != 0) - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RebuildCells", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -16763,7 +38005,7 @@ func (m *ApplyShardRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.RebuildCells = append(m.RebuildCells, string(dAtA[iNdEx:postIndex])) + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -16787,7 +38029,7 @@ func (m *ApplyShardRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ApplyShardRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetSrvKeyspaceNamesResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -16810,12 +38052,141 @@ func (m *ApplyShardRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ApplyShardRoutingRulesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetSrvKeyspaceNamesResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ApplyShardRoutingRulesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSrvKeyspaceNamesResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Names == nil { + m.Names = make(map[string]*GetSrvKeyspaceNamesResponse_NameList) + } + var mapkey string + var mapvalue *GetSrvKeyspaceNamesResponse_NameList + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &GetSrvKeyspaceNamesResponse_NameList{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Names[mapkey] = mapvalue + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -16838,7 +38209,7 @@ func (m *ApplyShardRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ApplySchemaRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetSrvKeyspacesRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -16861,10 +38232,10 @@ func (m *ApplySchemaRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ApplySchemaRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetSrvKeyspacesRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ApplySchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSrvKeyspacesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -16900,28 +38271,8 @@ func (m *ApplySchemaRequest) UnmarshalVT(dAtA []byte) error { m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowLongUnavailability", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AllowLongUnavailability = bool(v != 0) - case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Sql", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -16949,107 +38300,62 @@ func (m *ApplySchemaRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Sql = append(m.Sql, string(dAtA[iNdEx:postIndex])) + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DdlStrategy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.DdlStrategy = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UuidList", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetSrvKeyspacesResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.UuidList = append(m.UuidList, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MigrationContext", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - m.MigrationContext = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetSrvKeyspacesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetSrvKeyspacesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WaitReplicasTimeout", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SrvKeyspaces", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -17076,68 +38382,105 @@ func (m *ApplySchemaRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.WaitReplicasTimeout == nil { - m.WaitReplicasTimeout = &vttime.Duration{} - } - if err := m.WaitReplicasTimeout.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SkipPreflight", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.SkipPreflight = bool(v != 0) - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CallerId", wireType) + if m.SrvKeyspaces == nil { + m.SrvKeyspaces = make(map[string]*topodata.SrvKeyspace) } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var mapkey string + var mapvalue *topodata.SrvKeyspace + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &topodata.SrvKeyspace{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CallerId == nil { - m.CallerId = &vtrpc.CallerID{} - } - if err := m.CallerId.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.SrvKeyspaces[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -17161,7 +38504,7 @@ func (m *ApplySchemaRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ApplySchemaResponse) UnmarshalVT(dAtA []byte) error { +func (m *UpdateThrottlerConfigRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17184,15 +38527,15 @@ func (m *ApplySchemaResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ApplySchemaResponse: wiretype end group for non-group") + return fmt.Errorf("proto: UpdateThrottlerConfigRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ApplySchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: UpdateThrottlerConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UuidList", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17220,62 +38563,62 @@ func (m *ApplySchemaResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.UuidList = append(m.UuidList, string(dAtA[iNdEx:postIndex])) + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Enable", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + m.Enable = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Disable", wireType) } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ApplyVSchemaRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { - return io.ErrUnexpectedEOF + m.Disable = bool(v != 0) + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Threshold", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ApplyVSchemaRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ApplyVSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Threshold = float64(math.Float64frombits(v)) + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CustomQuery", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17303,11 +38646,11 @@ func (m *ApplyVSchemaRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + m.CustomQuery = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 6: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SkipRebuild", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CustomQuerySet", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -17324,10 +38667,10 @@ func (m *ApplyVSchemaRequest) UnmarshalVT(dAtA []byte) error { break } } - m.SkipRebuild = bool(v != 0) - case 3: + m.CustomQuerySet = bool(v != 0) + case 7: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CheckAsCheckSelf", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -17344,12 +38687,12 @@ func (m *ApplyVSchemaRequest) UnmarshalVT(dAtA []byte) error { break } } - m.DryRun = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + m.CheckAsCheckSelf = bool(v != 0) + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CheckAsCheckShard", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -17359,27 +38702,15 @@ func (m *ApplyVSchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 5: + m.CheckAsCheckShard = bool(v != 0) + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VSchema", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ThrottledApp", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -17406,45 +38737,13 @@ func (m *ApplyVSchemaRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.VSchema == nil { - m.VSchema = &vschema.Keyspace{} + if m.ThrottledApp == nil { + m.ThrottledApp = &topodata.ThrottledAppRule{} } - if err := m.VSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ThrottledApp.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Sql", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Sql = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -17467,7 +38766,7 @@ func (m *ApplyVSchemaRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ApplyVSchemaResponse) UnmarshalVT(dAtA []byte) error { +func (m *UpdateThrottlerConfigResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17490,48 +38789,12 @@ func (m *ApplyVSchemaResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ApplyVSchemaResponse: wiretype end group for non-group") + return fmt.Errorf("proto: UpdateThrottlerConfigResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ApplyVSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: UpdateThrottlerConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VSchema", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.VSchema == nil { - m.VSchema = &vschema.Keyspace{} - } - if err := m.VSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -17554,7 +38817,7 @@ func (m *ApplyVSchemaResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *BackupRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetSrvVSchemaRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17577,90 +38840,15 @@ func (m *BackupRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: BackupRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetSrvVSchemaRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: BackupRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSrvVSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} - } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowPrimary", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AllowPrimary = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Concurrency", wireType) - } - m.Concurrency = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Concurrency |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IncrementalFromPos", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17688,7 +38876,7 @@ func (m *BackupRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.IncrementalFromPos = string(dAtA[iNdEx:postIndex]) + m.Cell = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -17712,7 +38900,7 @@ func (m *BackupRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *BackupResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetSrvVSchemaResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17735,115 +38923,15 @@ func (m *BackupResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: BackupResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetSrvVSchemaResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: BackupResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSrvVSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} - } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Keyspace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Shard = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SrvVSchema", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -17870,10 +38958,10 @@ func (m *BackupResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Event == nil { - m.Event = &logutil.Event{} + if m.SrvVSchema == nil { + m.SrvVSchema = &vschema.SrvVSchema{} } - if err := m.Event.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.SrvVSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -17899,7 +38987,7 @@ func (m *BackupResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *BackupShardRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetSrvVSchemasRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17922,81 +39010,17 @@ func (m *BackupShardRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: BackupShardRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetSrvVSchemasRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: BackupShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Keyspace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Shard = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowPrimary", wireType) + return fmt.Errorf("proto: GetSrvVSchemasRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -18006,31 +39030,24 @@ func (m *BackupShardRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.AllowPrimary = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Concurrency", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - m.Concurrency = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Concurrency |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF } + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -18053,7 +39070,7 @@ func (m *BackupShardRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ChangeTabletTypeRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetSrvVSchemasResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18076,15 +39093,15 @@ func (m *ChangeTabletTypeRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ChangeTabletTypeRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetSrvVSchemasResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ChangeTabletTypeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSrvVSchemasResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SrvVSchemas", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18111,52 +39128,106 @@ func (m *ChangeTabletTypeRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} - } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DbType", wireType) - } - m.DbType = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DbType |= topodata.TabletType(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + if m.SrvVSchemas == nil { + m.SrvVSchemas = make(map[string]*vschema.SrvVSchema) } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var mapkey string + var mapvalue *vschema.SrvVSchema + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &vschema.SrvVSchema{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - m.DryRun = bool(v != 0) + m.SrvVSchemas[mapkey] = mapvalue + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -18179,7 +39250,7 @@ func (m *ChangeTabletTypeRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ChangeTabletTypeResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetTabletRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18202,15 +39273,15 @@ func (m *ChangeTabletTypeResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ChangeTabletTypeResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetTabletRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ChangeTabletTypeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetTabletRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BeforeTablet", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18237,16 +39308,67 @@ func (m *ChangeTabletTypeResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.BeforeTablet == nil { - m.BeforeTablet = &topodata.Tablet{} + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} } - if err := m.BeforeTablet.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetTabletResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetTabletResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetTabletResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AfterTablet", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tablet", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18273,33 +39395,13 @@ func (m *ChangeTabletTypeResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.AfterTablet == nil { - m.AfterTablet = &topodata.Tablet{} + if m.Tablet == nil { + m.Tablet = &topodata.Tablet{} } - if err := m.AfterTablet.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Tablet.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field WasDryRun", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.WasDryRun = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -18322,7 +39424,7 @@ func (m *ChangeTabletTypeResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *CreateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetTabletsRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18345,15 +39447,15 @@ func (m *CreateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CreateKeyspaceRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetTabletsRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CreateKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetTabletsRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18381,13 +39483,13 @@ func (m *CreateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -18397,37 +39499,29 @@ func (m *CreateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.Force = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowEmptyVSchema", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength } - m.AllowEmptyVSchema = bool(v != 0) - case 6: + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServedFroms", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -18437,31 +39531,29 @@ func (m *CreateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.ServedFroms = append(m.ServedFroms, &topodata.Keyspace_ServedFrom{}) - if err := m.ServedFroms[len(m.ServedFroms)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 7: + case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Strict", wireType) } - m.Type = 0 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -18471,16 +39563,17 @@ func (m *CreateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Type |= topodata.KeyspaceType(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - case 8: + m.Strict = bool(v != 0) + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BaseKeyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletAliases", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -18490,29 +39583,31 @@ func (m *CreateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.BaseKeyspace = string(dAtA[iNdEx:postIndex]) + m.TabletAliases = append(m.TabletAliases, &topodata.TabletAlias{}) + if err := m.TabletAliases[len(m.TabletAliases)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SnapshotTime", wireType) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletType", wireType) } - var msglen int + m.TabletType = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -18522,33 +39617,67 @@ func (m *CreateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.TabletType |= topodata.TabletType(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.SnapshotTime == nil { - m.SnapshotTime = &vttime.Time{} + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetTabletsResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - if err := m.SnapshotTime.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + if iNdEx >= l { + return io.ErrUnexpectedEOF } - iNdEx = postIndex - case 10: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetTabletsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetTabletsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DurabilityPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tablets", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -18558,27 +39687,80 @@ func (m *CreateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.DurabilityPolicy = string(dAtA[iNdEx:postIndex]) + m.Tablets = append(m.Tablets, &topodata.Tablet{}) + if err := m.Tablets[len(m.Tablets)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 11: + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetTopologyPathRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetTopologyPathRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetTopologyPathRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SidecarDbName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18606,7 +39788,7 @@ func (m *CreateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SidecarDbName = string(dAtA[iNdEx:postIndex]) + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -18630,7 +39812,7 @@ func (m *CreateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *CreateKeyspaceResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetTopologyPathResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18653,15 +39835,15 @@ func (m *CreateKeyspaceResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CreateKeyspaceResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetTopologyPathResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CreateKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetTopologyPathResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18688,10 +39870,10 @@ func (m *CreateKeyspaceResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Keyspace == nil { - m.Keyspace = &Keyspace{} + if m.Cell == nil { + m.Cell = &TopologyCell{} } - if err := m.Keyspace.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Cell.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -18717,7 +39899,7 @@ func (m *CreateKeyspaceResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *CreateShardRequest) UnmarshalVT(dAtA []byte) error { +func (m *TopologyCell) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18740,15 +39922,15 @@ func (m *CreateShardRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CreateShardRequest: wiretype end group for non-group") + return fmt.Errorf("proto: TopologyCell: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CreateShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: TopologyCell: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18776,11 +39958,11 @@ func (m *CreateShardRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShardName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18808,13 +39990,13 @@ func (m *CreateShardRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ShardName = string(dAtA[iNdEx:postIndex]) + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -18824,17 +40006,29 @@ func (m *CreateShardRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.Force = bool(v != 0) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludeParent", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Children", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -18844,12 +40038,24 @@ func (m *CreateShardRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.IncludeParent = bool(v != 0) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Children = append(m.Children, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -18872,7 +40078,7 @@ func (m *CreateShardRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *CreateShardResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetVSchemaRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18895,53 +40101,17 @@ func (m *CreateShardResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CreateShardResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetVSchemaRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CreateShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetVSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Keyspace == nil { - m.Keyspace = &Keyspace{} - } - if err := m.Keyspace.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) - } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -18951,48 +40121,24 @@ func (m *CreateShardResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Shard == nil { - m.Shard = &Shard{} - } - if err := m.Shard.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ShardAlreadyExists", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ShardAlreadyExists = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -19015,7 +40161,7 @@ func (m *CreateShardResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DeleteCellInfoRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetVersionRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19038,17 +40184,17 @@ func (m *DeleteCellInfoRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeleteCellInfoRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetVersionRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteCellInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetVersionRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -19058,95 +40204,28 @@ func (m *DeleteCellInfoRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} } - m.Force = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteCellInfoResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteCellInfoResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteCellInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -19169,7 +40248,7 @@ func (m *DeleteCellInfoResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DeleteCellsAliasRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetVersionResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19192,15 +40271,15 @@ func (m *DeleteCellsAliasRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeleteCellsAliasRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetVersionResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteCellsAliasRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetVersionResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19228,7 +40307,7 @@ func (m *DeleteCellsAliasRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Version = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -19252,7 +40331,7 @@ func (m *DeleteCellsAliasRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DeleteCellsAliasResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetVSchemaResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19275,12 +40354,48 @@ func (m *DeleteCellsAliasResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeleteCellsAliasResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetVSchemaResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteCellsAliasResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetVSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VSchema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VSchema == nil { + m.VSchema = &vschema.Keyspace{} + } + if err := m.VSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -19303,7 +40418,7 @@ func (m *DeleteCellsAliasResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DeleteKeyspaceRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetWorkflowsRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19326,10 +40441,10 @@ func (m *DeleteKeyspaceRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeleteKeyspaceRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetWorkflowsRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetWorkflowsRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -19366,7 +40481,7 @@ func (m *DeleteKeyspaceRequest) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ActiveOnly", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -19383,10 +40498,10 @@ func (m *DeleteKeyspaceRequest) UnmarshalVT(dAtA []byte) error { break } } - m.Recursive = bool(v != 0) + m.ActiveOnly = bool(v != 0) case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NameOnly", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -19403,7 +40518,59 @@ func (m *DeleteKeyspaceRequest) UnmarshalVT(dAtA []byte) error { break } } - m.Force = bool(v != 0) + m.NameOnly = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Workflow = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeLogs", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeLogs = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -19426,7 +40593,7 @@ func (m *DeleteKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DeleteKeyspaceResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetWorkflowsResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19449,12 +40616,46 @@ func (m *DeleteKeyspaceResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeleteKeyspaceResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetWorkflowsResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetWorkflowsResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Workflows", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Workflows = append(m.Workflows, &Workflow{}) + if err := m.Workflows[len(m.Workflows)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -19477,7 +40678,7 @@ func (m *DeleteKeyspaceResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DeleteShardsRequest) UnmarshalVT(dAtA []byte) error { +func (m *InitShardPrimaryRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19496,21 +40697,53 @@ func (m *DeleteShardsRequest) UnmarshalVT(dAtA []byte) error { if b < 0x80 { break } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteShardsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteShardsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InitShardPrimaryRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InitShardPrimaryRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shards", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -19520,31 +40753,29 @@ func (m *DeleteShardsRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Shards = append(m.Shards, &Shard{}) - if err := m.Shards[len(m.Shards)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrimaryElectTabletAlias", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -19554,15 +40785,31 @@ func (m *DeleteShardsRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.Recursive = bool(v != 0) + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PrimaryElectTabletAlias == nil { + m.PrimaryElectTabletAlias = &topodata.TabletAlias{} + } + if err := m.PrimaryElectTabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EvenIfServing", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -19579,12 +40826,12 @@ func (m *DeleteShardsRequest) UnmarshalVT(dAtA []byte) error { break } } - m.EvenIfServing = bool(v != 0) + m.Force = bool(v != 0) case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WaitReplicasTimeout", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -19594,12 +40841,28 @@ func (m *DeleteShardsRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.Force = bool(v != 0) + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WaitReplicasTimeout == nil { + m.WaitReplicasTimeout = &vttime.Duration{} + } + if err := m.WaitReplicasTimeout.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -19622,7 +40885,7 @@ func (m *DeleteShardsRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DeleteShardsResponse) UnmarshalVT(dAtA []byte) error { +func (m *InitShardPrimaryResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19645,12 +40908,46 @@ func (m *DeleteShardsResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeleteShardsResponse: wiretype end group for non-group") + return fmt.Errorf("proto: InitShardPrimaryResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteShardsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: InitShardPrimaryResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, &logutil.Event{}) + if err := m.Events[len(m.Events)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -19673,7 +40970,7 @@ func (m *DeleteShardsResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DeleteSrvVSchemaRequest) UnmarshalVT(dAtA []byte) error { +func (m *LaunchSchemaMigrationRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19696,15 +40993,15 @@ func (m *DeleteSrvVSchemaRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeleteSrvVSchemaRequest: wiretype end group for non-group") + return fmt.Errorf("proto: LaunchSchemaMigrationRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteSrvVSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LaunchSchemaMigrationRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19732,7 +41029,39 @@ func (m *DeleteSrvVSchemaRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Cell = string(dAtA[iNdEx:postIndex]) + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uuid = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -19756,7 +41085,7 @@ func (m *DeleteSrvVSchemaRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DeleteSrvVSchemaResponse) UnmarshalVT(dAtA []byte) error { +func (m *LaunchSchemaMigrationResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19779,12 +41108,125 @@ func (m *DeleteSrvVSchemaResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeleteSrvVSchemaResponse: wiretype end group for non-group") + return fmt.Errorf("proto: LaunchSchemaMigrationResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteSrvVSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LaunchSchemaMigrationResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RowsAffectedByShard", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RowsAffectedByShard == nil { + m.RowsAffectedByShard = make(map[string]uint64) + } + var mapkey string + var mapvalue uint64 + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.RowsAffectedByShard[mapkey] = mapvalue + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -19807,7 +41249,7 @@ func (m *DeleteSrvVSchemaResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DeleteTabletsRequest) UnmarshalVT(dAtA []byte) error { +func (m *LookupVindexCreateRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19830,15 +41272,111 @@ func (m *DeleteTabletsRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeleteTabletsRequest: wiretype end group for non-group") + return fmt.Errorf("proto: LookupVindexCreateRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteTabletsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LookupVindexCreateRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAliases", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Workflow = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vindex", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -19865,14 +41403,16 @@ func (m *DeleteTabletsRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TabletAliases = append(m.TabletAliases, &topodata.TabletAlias{}) - if err := m.TabletAliases[len(m.TabletAliases)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if m.Vindex == nil { + m.Vindex = &vschema.Keyspace{} + } + if err := m.Vindex.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 5: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowPrimary", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ContinueAfterCopyWithOwner", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -19889,7 +41429,95 @@ func (m *DeleteTabletsRequest) UnmarshalVT(dAtA []byte) error { break } } - m.AllowPrimary = bool(v != 0) + m.ContinueAfterCopyWithOwner = bool(v != 0) + case 6: + if wireType == 0 { + var v topodata.TabletType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TabletTypes = append(m.TabletTypes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + if elementCount != 0 && len(m.TabletTypes) == 0 { + m.TabletTypes = make([]topodata.TabletType, 0, elementCount) + } + for iNdEx < postIndex { + var v topodata.TabletType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TabletTypes = append(m.TabletTypes, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field TabletTypes", wireType) + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletSelectionPreference", wireType) + } + m.TabletSelectionPreference = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TabletSelectionPreference |= tabletmanagerdata.TabletSelectionPreference(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -19912,7 +41540,7 @@ func (m *DeleteTabletsRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DeleteTabletsResponse) UnmarshalVT(dAtA []byte) error { +func (m *LookupVindexCreateResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19935,10 +41563,10 @@ func (m *DeleteTabletsResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeleteTabletsResponse: wiretype end group for non-group") + return fmt.Errorf("proto: LookupVindexCreateResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteTabletsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LookupVindexCreateResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -19963,7 +41591,7 @@ func (m *DeleteTabletsResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *EmergencyReparentShardRequest) UnmarshalVT(dAtA []byte) error { +func (m *LookupVindexExternalizeRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19986,10 +41614,10 @@ func (m *EmergencyReparentShardRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EmergencyReparentShardRequest: wiretype end group for non-group") + return fmt.Errorf("proto: LookupVindexExternalizeRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EmergencyReparentShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LookupVindexExternalizeRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -20026,7 +41654,7 @@ func (m *EmergencyReparentShardRequest) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -20054,13 +41682,13 @@ func (m *EmergencyReparentShardRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NewPrimary", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TableKeyspace", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -20070,33 +41698,80 @@ func (m *EmergencyReparentShardRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.NewPrimary == nil { - m.NewPrimary = &topodata.TabletAlias{} - } - if err := m.NewPrimary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.TableKeyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { return err } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IgnoreReplicas", wireType) + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LookupVindexExternalizeResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LookupVindexExternalizeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LookupVindexExternalizeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkflowDeleted", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -20106,29 +41781,66 @@ func (m *EmergencyReparentShardRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength + m.WorkflowDeleted = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.IgnoreReplicas = append(m.IgnoreReplicas, &topodata.TabletAlias{}) - if err := m.IgnoreReplicas[len(m.IgnoreReplicas)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MaterializeCreateRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - iNdEx = postIndex - case 5: + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MaterializeCreateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MaterializeCreateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WaitReplicasTimeout", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Settings", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20155,33 +41867,64 @@ func (m *EmergencyReparentShardRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.WaitReplicasTimeout == nil { - m.WaitReplicasTimeout = &vttime.Duration{} + if m.Settings == nil { + m.Settings = &MaterializeSettings{} } - if err := m.WaitReplicasTimeout.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Settings.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PreventCrossCellPromotion", wireType) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength } - m.PreventCrossCellPromotion = bool(v != 0) + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MaterializeCreateResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MaterializeCreateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MaterializeCreateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -20204,7 +41947,7 @@ func (m *EmergencyReparentShardRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *EmergencyReparentShardResponse) UnmarshalVT(dAtA []byte) error { +func (m *MigrateCreateRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20227,15 +41970,15 @@ func (m *EmergencyReparentShardResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EmergencyReparentShardResponse: wiretype end group for non-group") + return fmt.Errorf("proto: MigrateCreateRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EmergencyReparentShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MigrateCreateRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -20263,11 +42006,11 @@ func (m *EmergencyReparentShardResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + m.Workflow = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SourceKeyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -20295,13 +42038,13 @@ func (m *EmergencyReparentShardResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) + m.SourceKeyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PromotedPrimary", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetKeyspace", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -20311,33 +42054,29 @@ func (m *EmergencyReparentShardResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.PromotedPrimary == nil { - m.PromotedPrimary = &topodata.TabletAlias{} - } - if err := m.PromotedPrimary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.TargetKeyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MountName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -20347,82 +42086,29 @@ func (m *EmergencyReparentShardResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Events = append(m.Events, &logutil.Event{}) - if err := m.Events[len(m.Events)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.MountName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExecuteFetchAsAppRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExecuteFetchAsAppRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteFetchAsAppRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -20432,33 +42118,98 @@ func (m *ExecuteFetchAsAppRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} - } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + case 6: + if wireType == 0 { + var v topodata.TabletType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TabletTypes = append(m.TabletTypes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + if elementCount != 0 && len(m.TabletTypes) == 0 { + m.TabletTypes = make([]topodata.TabletType, 0, elementCount) + } + for iNdEx < postIndex { + var v topodata.TabletType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TabletTypes = append(m.TabletTypes, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field TabletTypes", wireType) } - var stringLen uint64 + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletSelectionPreference", wireType) + } + m.TabletSelectionPreference = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -20468,29 +42219,16 @@ func (m *ExecuteFetchAsAppRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.TabletSelectionPreference |= tabletmanagerdata.TabletSelectionPreference(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Query = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: + case 8: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxRows", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AllTables", wireType) } - m.MaxRows = 0 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -20500,16 +42238,17 @@ func (m *ExecuteFetchAsAppRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxRows |= int64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UsePool", wireType) + m.AllTables = bool(v != 0) + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeTables", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -20519,68 +42258,29 @@ func (m *ExecuteFetchAsAppRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.UsePool = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExecuteFetchAsAppResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExecuteFetchAsAppResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteFetchAsAppResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.IncludeTables = append(m.IncludeTables, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ExcludeTables", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -20590,84 +42290,29 @@ func (m *ExecuteFetchAsAppResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Result == nil { - m.Result = &query.QueryResult{} - } - if err := m.Result.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.ExcludeTables = append(m.ExcludeTables, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExecuteFetchAsDBARequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExecuteFetchAsDBARequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteFetchAsDBARequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SourceTimeZone", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -20677,31 +42322,27 @@ func (m *ExecuteFetchAsDBARequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} - } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.SourceTimeZone = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field OnDdl", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -20729,13 +42370,33 @@ func (m *ExecuteFetchAsDBARequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Query = string(dAtA[iNdEx:postIndex]) + m.OnDdl = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StopAfterCopy", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.StopAfterCopy = bool(v != 0) + case 14: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxRows", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DropForeignKeys", wireType) } - m.MaxRows = 0 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -20745,14 +42406,15 @@ func (m *ExecuteFetchAsDBARequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxRows |= int64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - case 4: + m.DropForeignKeys = bool(v != 0) + case 15: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DisableBinlogs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DeferSecondaryKeys", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -20769,10 +42431,10 @@ func (m *ExecuteFetchAsDBARequest) UnmarshalVT(dAtA []byte) error { break } } - m.DisableBinlogs = bool(v != 0) - case 5: + m.DeferSecondaryKeys = bool(v != 0) + case 16: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReloadSchema", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AutoStart", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -20789,63 +42451,12 @@ func (m *ExecuteFetchAsDBARequest) UnmarshalVT(dAtA []byte) error { break } } - m.ReloadSchema = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExecuteFetchAsDBAResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExecuteFetchAsDBAResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteFetchAsDBAResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + m.AutoStart = bool(v != 0) + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoRoutingRules", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -20855,28 +42466,12 @@ func (m *ExecuteFetchAsDBAResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Result == nil { - m.Result = &query.QueryResult{} - } - if err := m.Result.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + m.NoRoutingRules = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -20899,7 +42494,7 @@ func (m *ExecuteFetchAsDBAResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ExecuteHookRequest) UnmarshalVT(dAtA []byte) error { +func (m *MigrateCompleteRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20922,17 +42517,17 @@ func (m *ExecuteHookRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExecuteHookRequest: wiretype end group for non-group") + return fmt.Errorf("proto: MigrateCompleteRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteHookRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MigrateCompleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -20942,33 +42537,29 @@ func (m *ExecuteHookRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} - } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Workflow = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletHookRequest", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetKeyspace", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -20978,84 +42569,49 @@ func (m *ExecuteHookRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.TabletHookRequest == nil { - m.TabletHookRequest = &tabletmanagerdata.ExecuteHookRequest{} - } - if err := m.TabletHookRequest.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.TargetKeyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExecuteHookResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KeepData", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExecuteHookResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteHookResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HookResult", wireType) + m.KeepData = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KeepRoutingRules", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -21065,28 +42621,52 @@ func (m *ExecuteHookResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength + m.KeepRoutingRules = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RenameTables", wireType) } - if postIndex > l { - return io.ErrUnexpectedEOF + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if m.HookResult == nil { - m.HookResult = &tabletmanagerdata.ExecuteHookResponse{} + m.RenameTables = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) } - if err := m.HookResult.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex + m.DryRun = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -21109,7 +42689,7 @@ func (m *ExecuteHookResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *FindAllShardsInKeyspaceRequest) UnmarshalVT(dAtA []byte) error { +func (m *MigrateCompleteResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21132,15 +42712,15 @@ func (m *FindAllShardsInKeyspaceRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: FindAllShardsInKeyspaceRequest: wiretype end group for non-group") + return fmt.Errorf("proto: MigrateCompleteResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: FindAllShardsInKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MigrateCompleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -21168,64 +42748,13 @@ func (m *FindAllShardsInKeyspaceRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + m.Summary = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FindAllShardsInKeyspaceResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FindAllShardsInKeyspaceResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FindAllShardsInKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shards", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DryRunResults", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -21235,120 +42764,23 @@ func (m *FindAllShardsInKeyspaceResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Shards == nil { - m.Shards = make(map[string]*Shard) - } - var mapkey string - var mapvalue *Shard - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLength - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLength - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &Shard{} - if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Shards[mapkey] = mapvalue + m.DryRunResults = append(m.DryRunResults, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -21372,7 +42804,7 @@ func (m *FindAllShardsInKeyspaceResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetBackupsRequest) UnmarshalVT(dAtA []byte) error { +func (m *MountRegisterRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21395,15 +42827,15 @@ func (m *GetBackupsRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetBackupsRequest: wiretype end group for non-group") + return fmt.Errorf("proto: MountRegisterRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetBackupsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MountRegisterRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TopoType", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -21431,11 +42863,11 @@ func (m *GetBackupsRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + m.TopoType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TopoServer", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -21463,13 +42895,13 @@ func (m *GetBackupsRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) + m.TopoServer = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopoRoot", wireType) } - m.Limit = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -21479,16 +42911,29 @@ func (m *GetBackupsRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Limit |= uint32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopoRoot = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Detailed", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -21498,31 +42943,75 @@ func (m *GetBackupsRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.Detailed = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DetailedLimit", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - m.DetailedLimit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DetailedLimit |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MountRegisterResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MountRegisterResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MountRegisterResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -21545,7 +43034,7 @@ func (m *GetBackupsRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetBackupsResponse) UnmarshalVT(dAtA []byte) error { +func (m *MountUnregisterRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21568,17 +43057,17 @@ func (m *GetBackupsResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetBackupsResponse: wiretype end group for non-group") + return fmt.Errorf("proto: MountUnregisterRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetBackupsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MountUnregisterRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backups", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -21588,26 +43077,75 @@ func (m *GetBackupsResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Backups = append(m.Backups, &mysqlctl.BackupInfo{}) - if err := m.Backups[len(m.Backups)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { return err } - iNdEx = postIndex + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MountUnregisterResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MountUnregisterResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MountUnregisterResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -21630,7 +43168,7 @@ func (m *GetBackupsResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetCellInfoRequest) UnmarshalVT(dAtA []byte) error { +func (m *MountShowRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21653,15 +43191,15 @@ func (m *GetCellInfoRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetCellInfoRequest: wiretype end group for non-group") + return fmt.Errorf("proto: MountShowRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetCellInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MountShowRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -21689,7 +43227,7 @@ func (m *GetCellInfoRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Cell = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -21713,7 +43251,7 @@ func (m *GetCellInfoRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetCellInfoResponse) UnmarshalVT(dAtA []byte) error { +func (m *MountShowResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21736,17 +43274,17 @@ func (m *GetCellInfoResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetCellInfoResponse: wiretype end group for non-group") + return fmt.Errorf("proto: MountShowResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetCellInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MountShowResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CellInfo", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TopoType", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -21756,27 +43294,119 @@ func (m *GetCellInfoResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.CellInfo == nil { - m.CellInfo = &topodata.CellInfo{} + m.TopoType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopoServer", wireType) } - if err := m.CellInfo.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopoServer = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopoRoot", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopoRoot = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -21800,7 +43430,7 @@ func (m *GetCellInfoResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetCellInfoNamesRequest) UnmarshalVT(dAtA []byte) error { +func (m *MountListRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21823,10 +43453,10 @@ func (m *GetCellInfoNamesRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetCellInfoNamesRequest: wiretype end group for non-group") + return fmt.Errorf("proto: MountListRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetCellInfoNamesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MountListRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -21851,7 +43481,7 @@ func (m *GetCellInfoNamesRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetCellInfoNamesResponse) UnmarshalVT(dAtA []byte) error { +func (m *MountListResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21874,10 +43504,10 @@ func (m *GetCellInfoNamesResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetCellInfoNamesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: MountListResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetCellInfoNamesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MountListResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -21934,7 +43564,7 @@ func (m *GetCellInfoNamesResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetCellsAliasesRequest) UnmarshalVT(dAtA []byte) error { +func (m *MoveTablesCreateRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21957,68 +43587,317 @@ func (m *GetCellsAliasesRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetCellsAliasesRequest: wiretype end group for non-group") + return fmt.Errorf("proto: MoveTablesCreateRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetCellsAliasesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MoveTablesCreateRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - if (iNdEx + skippy) > l { + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { return io.ErrUnexpectedEOF } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetCellsAliasesResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + m.Workflow = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceKeyspace", wireType) } - if iNdEx >= l { + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.SourceKeyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetKeyspace", wireType) } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetCellsAliasesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetCellsAliasesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetKeyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType == 0 { + var v topodata.TabletType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TabletTypes = append(m.TabletTypes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + if elementCount != 0 && len(m.TabletTypes) == 0 { + m.TabletTypes = make([]topodata.TabletType, 0, elementCount) + } + for iNdEx < postIndex { + var v topodata.TabletType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TabletTypes = append(m.TabletTypes, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field TabletTypes", wireType) + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletSelectionPreference", wireType) + } + m.TabletSelectionPreference = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TabletSelectionPreference |= tabletmanagerdata.TabletSelectionPreference(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceShards", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourceShards = append(m.SourceShards, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllTables", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllTables = bool(v != 0) + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeTables", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IncludeTables = append(m.IncludeTables, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Aliases", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ExcludeTables", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -22028,177 +43907,61 @@ func (m *GetCellsAliasesResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Aliases == nil { - m.Aliases = make(map[string]*topodata.CellsAlias) + m.ExcludeTables = append(m.ExcludeTables, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalClusterName", wireType) } - var mapkey string - var mapvalue *topodata.CellsAlias - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLength - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLength - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &topodata.CellsAlias{} - if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break } } - m.Aliases[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetFullStatusRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetFullStatusRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetFullStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.ExternalClusterName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SourceTimeZone", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -22208,84 +43971,29 @@ func (m *GetFullStatusRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} - } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.SourceTimeZone = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetFullStatusResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetFullStatusResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetFullStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 13: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field OnDdl", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -22295,79 +44003,144 @@ func (m *GetFullStatusResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Status == nil { - m.Status = &replicationdata.FullStatus{} + m.OnDdl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StopAfterCopy", wireType) } - if err := m.Status.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + m.StopAfterCopy = bool(v != 0) + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DropForeignKeys", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + m.DropForeignKeys = bool(v != 0) + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeferSecondaryKeys", wireType) } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetKeyspacesRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { - return io.ErrUnexpectedEOF + m.DeferSecondaryKeys = bool(v != 0) + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutoStart", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetKeyspacesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetKeyspacesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + m.AutoStart = bool(v != 0) + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoRoutingRules", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NoRoutingRules = bool(v != 0) + case 19: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AtomicCopy", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AtomicCopy = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -22390,7 +44163,7 @@ func (m *GetKeyspacesRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetKeyspacesResponse) UnmarshalVT(dAtA []byte) error { +func (m *MoveTablesCreateResponse_TabletInfo) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22413,15 +44186,15 @@ func (m *GetKeyspacesResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetKeyspacesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: MoveTablesCreateResponse_TabletInfo: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetKeyspacesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MoveTablesCreateResponse_TabletInfo: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspaces", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tablet", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22448,11 +44221,33 @@ func (m *GetKeyspacesResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspaces = append(m.Keyspaces, &Keyspace{}) - if err := m.Keyspaces[len(m.Keyspaces)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if m.Tablet == nil { + m.Tablet = &topodata.TabletAlias{} + } + if err := m.Tablet.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Created = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -22475,7 +44270,7 @@ func (m *GetKeyspacesResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetKeyspaceRequest) UnmarshalVT(dAtA []byte) error { +func (m *MoveTablesCreateResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22498,15 +44293,15 @@ func (m *GetKeyspaceRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetKeyspaceRequest: wiretype end group for non-group") + return fmt.Errorf("proto: MoveTablesCreateResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MoveTablesCreateResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -22529,67 +44324,16 @@ func (m *GetKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } postIndex := iNdEx + intStringLen if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Keyspace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetKeyspaceResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetKeyspaceResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Summary = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Details", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22616,10 +44360,8 @@ func (m *GetKeyspaceResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Keyspace == nil { - m.Keyspace = &Keyspace{} - } - if err := m.Keyspace.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Details = append(m.Details, &MoveTablesCreateResponse_TabletInfo{}) + if err := m.Details[len(m.Details)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -22645,7 +44387,7 @@ func (m *GetKeyspaceResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetPermissionsRequest) UnmarshalVT(dAtA []byte) error { +func (m *MoveTablesCompleteRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22668,17 +44410,17 @@ func (m *GetPermissionsRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetPermissionsRequest: wiretype end group for non-group") + return fmt.Errorf("proto: MoveTablesCompleteRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetPermissionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MoveTablesCompleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -22688,28 +44430,136 @@ func (m *GetPermissionsRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} + m.Workflow = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetKeyspace", wireType) } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetKeyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KeepData", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.KeepData = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KeepRoutingRules", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.KeepRoutingRules = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RenameTables", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RenameTables = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DryRun = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -22732,7 +44582,7 @@ func (m *GetPermissionsRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetPermissionsResponse) UnmarshalVT(dAtA []byte) error { +func (m *MoveTablesCompleteResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22755,17 +44605,17 @@ func (m *GetPermissionsResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetPermissionsResponse: wiretype end group for non-group") + return fmt.Errorf("proto: MoveTablesCompleteResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetPermissionsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MoveTablesCompleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Permissions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -22775,27 +44625,55 @@ func (m *GetPermissionsResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Permissions == nil { - m.Permissions = &tabletmanagerdata.Permissions{} + m.Summary = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRunResults", wireType) } - if err := m.Permissions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF } + m.DryRunResults = append(m.DryRunResults, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -22819,7 +44697,7 @@ func (m *GetPermissionsResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { +func (m *PingTabletRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22842,12 +44720,48 @@ func (m *GetRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetRoutingRulesRequest: wiretype end group for non-group") + return fmt.Errorf("proto: PingTabletRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetRoutingRulesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PingTabletRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} + } + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -22870,7 +44784,7 @@ func (m *GetRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { +func (m *PingTabletResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22893,48 +44807,12 @@ func (m *GetRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetRoutingRulesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: PingTabletResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetRoutingRulesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PingTabletResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RoutingRules", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RoutingRules == nil { - m.RoutingRules = &vschema.RoutingRules{} - } - if err := m.RoutingRules.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -22957,7 +44835,7 @@ func (m *GetRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { +func (m *PlannedReparentShardRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22980,17 +44858,17 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetSchemaRequest: wiretype end group for non-group") + return fmt.Errorf("proto: PlannedReparentShardRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PlannedReparentShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -23000,31 +44878,27 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} - } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23052,13 +44926,13 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Tables = append(m.Tables, string(dAtA[iNdEx:postIndex])) + m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExcludeTables", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NewPrimary", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -23068,29 +44942,33 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.ExcludeTables = append(m.ExcludeTables, string(dAtA[iNdEx:postIndex])) + if m.NewPrimary == nil { + m.NewPrimary = &topodata.TabletAlias{} + } + if err := m.NewPrimary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludeViews", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AvoidPrimary", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -23100,17 +44978,33 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.IncludeViews = bool(v != 0) + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AvoidPrimary == nil { + m.AvoidPrimary = &topodata.TabletAlias{} + } + if err := m.AvoidPrimary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TableNamesOnly", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WaitReplicasTimeout", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -23120,52 +45014,28 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.TableNamesOnly = bool(v != 0) - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TableSizesOnly", wireType) + if msglen < 0 { + return ErrInvalidLength } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength } - m.TableSizesOnly = bool(v != 0) - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TableSchemaOnly", wireType) + if postIndex > l { + return io.ErrUnexpectedEOF } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if m.WaitReplicasTimeout == nil { + m.WaitReplicasTimeout = &vttime.Duration{} } - m.TableSchemaOnly = bool(v != 0) + if err := m.WaitReplicasTimeout.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -23188,7 +45058,7 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetSchemaResponse) UnmarshalVT(dAtA []byte) error { +func (m *PlannedReparentShardResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23211,15 +45081,79 @@ func (m *GetSchemaResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetSchemaResponse: wiretype end group for non-group") + return fmt.Errorf("proto: PlannedReparentShardResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PlannedReparentShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PromotedPrimary", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23246,10 +45180,44 @@ func (m *GetSchemaResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Schema == nil { - m.Schema = &tabletmanagerdata.SchemaDefinition{} + if m.PromotedPrimary == nil { + m.PromotedPrimary = &topodata.TabletAlias{} } - if err := m.Schema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.PromotedPrimary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, &logutil.Event{}) + if err := m.Events[len(m.Events)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -23275,7 +45243,7 @@ func (m *GetSchemaResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetShardRequest) UnmarshalVT(dAtA []byte) error { +func (m *RebuildKeyspaceGraphRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23298,10 +45266,10 @@ func (m *GetShardRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetShardRequest: wiretype end group for non-group") + return fmt.Errorf("proto: RebuildKeyspaceGraphRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RebuildKeyspaceGraphRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -23338,7 +45306,7 @@ func (m *GetShardRequest) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShardName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23366,8 +45334,28 @@ func (m *GetShardRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ShardName = string(dAtA[iNdEx:postIndex]) + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowPartial", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowPartial = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -23390,7 +45378,7 @@ func (m *GetShardRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetShardResponse) UnmarshalVT(dAtA []byte) error { +func (m *RebuildKeyspaceGraphResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23413,17 +45401,68 @@ func (m *GetShardResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetShardResponse: wiretype end group for non-group") + return fmt.Errorf("proto: RebuildKeyspaceGraphResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RebuildKeyspaceGraphResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RebuildVSchemaGraphRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RebuildVSchemaGraphRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RebuildVSchemaGraphRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -23433,27 +45472,23 @@ func (m *GetShardResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Shard == nil { - m.Shard = &Shard{} - } - if err := m.Shard.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -23477,7 +45512,7 @@ func (m *GetShardResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetShardRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { +func (m *RebuildVSchemaGraphResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23500,10 +45535,10 @@ func (m *GetShardRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetShardRoutingRulesRequest: wiretype end group for non-group") + return fmt.Errorf("proto: RebuildVSchemaGraphResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetShardRoutingRulesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RebuildVSchemaGraphResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -23528,7 +45563,7 @@ func (m *GetShardRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetShardRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { +func (m *RefreshStateRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23551,15 +45586,15 @@ func (m *GetShardRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetShardRoutingRulesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: RefreshStateRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetShardRoutingRulesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RefreshStateRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShardRoutingRules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23586,10 +45621,10 @@ func (m *GetShardRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ShardRoutingRules == nil { - m.ShardRoutingRules = &vschema.ShardRoutingRules{} + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} } - if err := m.ShardRoutingRules.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -23615,7 +45650,7 @@ func (m *GetShardRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetSrvKeyspaceNamesRequest) UnmarshalVT(dAtA []byte) error { +func (m *RefreshStateResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23638,44 +45673,12 @@ func (m *GetSrvKeyspaceNamesRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetSrvKeyspaceNamesRequest: wiretype end group for non-group") + return fmt.Errorf("proto: RefreshStateResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetSrvKeyspaceNamesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RefreshStateResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -23698,7 +45701,7 @@ func (m *GetSrvKeyspaceNamesRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetSrvKeyspaceNamesResponse_NameList) UnmarshalVT(dAtA []byte) error { +func (m *RefreshStateByShardRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23721,15 +45724,15 @@ func (m *GetSrvKeyspaceNamesResponse_NameList) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetSrvKeyspaceNamesResponse_NameList: wiretype end group for non-group") + return fmt.Errorf("proto: RefreshStateByShardRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetSrvKeyspaceNamesResponse_NameList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RefreshStateByShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23757,64 +45760,45 @@ func (m *GetSrvKeyspaceNamesResponse_NameList) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetSrvKeyspaceNamesResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetSrvKeyspaceNamesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetSrvKeyspaceNamesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -23824,120 +45808,23 @@ func (m *GetSrvKeyspaceNamesResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Names == nil { - m.Names = make(map[string]*GetSrvKeyspaceNamesResponse_NameList) - } - var mapkey string - var mapvalue *GetSrvKeyspaceNamesResponse_NameList - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLength - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLength - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &GetSrvKeyspaceNamesResponse_NameList{} - if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Names[mapkey] = mapvalue + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -23961,7 +45848,7 @@ func (m *GetSrvKeyspaceNamesResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetSrvKeyspacesRequest) UnmarshalVT(dAtA []byte) error { +func (m *RefreshStateByShardResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23984,17 +45871,17 @@ func (m *GetSrvKeyspacesRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetSrvKeyspacesRequest: wiretype end group for non-group") + return fmt.Errorf("proto: RefreshStateByShardResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetSrvKeyspacesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RefreshStateByShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsPartialRefresh", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -24004,27 +45891,15 @@ func (m *GetSrvKeyspacesRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Keyspace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.IsPartialRefresh = bool(v != 0) case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PartialRefreshDetails", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -24052,7 +45927,7 @@ func (m *GetSrvKeyspacesRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + m.PartialRefreshDetails = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -24076,7 +45951,7 @@ func (m *GetSrvKeyspacesRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetSrvKeyspacesResponse) UnmarshalVT(dAtA []byte) error { +func (m *ReloadSchemaRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24099,15 +45974,15 @@ func (m *GetSrvKeyspacesResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetSrvKeyspacesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ReloadSchemaRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetSrvKeyspacesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReloadSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SrvKeyspaces", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24134,106 +46009,64 @@ func (m *GetSrvKeyspacesResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SrvKeyspaces == nil { - m.SrvKeyspaces = make(map[string]*topodata.SrvKeyspace) + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} } - var mapkey string - var mapvalue *topodata.SrvKeyspace - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLength - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLength - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &topodata.SrvKeyspace{} - if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReloadSchemaResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - m.SrvKeyspaces[mapkey] = mapvalue - iNdEx = postIndex + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReloadSchemaResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReloadSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -24256,7 +46089,7 @@ func (m *GetSrvKeyspacesResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *UpdateThrottlerConfigRequest) UnmarshalVT(dAtA []byte) error { +func (m *ReloadSchemaKeyspaceRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24279,10 +46112,10 @@ func (m *UpdateThrottlerConfigRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: UpdateThrottlerConfigRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ReloadSchemaKeyspaceRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateThrottlerConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReloadSchemaKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -24318,59 +46151,8 @@ func (m *UpdateThrottlerConfigRequest) UnmarshalVT(dAtA []byte) error { m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Enable", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Enable = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Disable", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Disable = bool(v != 0) - case 4: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Threshold", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Threshold = float64(math.Float64frombits(v)) - case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CustomQuery", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field WaitPosition", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -24398,31 +46180,11 @@ func (m *UpdateThrottlerConfigRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.CustomQuery = string(dAtA[iNdEx:postIndex]) + m.WaitPosition = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CustomQuerySet", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.CustomQuerySet = bool(v != 0) - case 7: + case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CheckAsCheckSelf", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IncludePrimary", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -24439,12 +46201,12 @@ func (m *UpdateThrottlerConfigRequest) UnmarshalVT(dAtA []byte) error { break } } - m.CheckAsCheckSelf = bool(v != 0) - case 8: + m.IncludePrimary = bool(v != 0) + case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CheckAsCheckShard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Concurrency", wireType) } - var v int + m.Concurrency = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -24454,12 +46216,11 @@ func (m *UpdateThrottlerConfigRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + m.Concurrency |= uint32(b&0x7F) << shift if b < 0x80 { break } } - m.CheckAsCheckShard = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -24482,7 +46243,7 @@ func (m *UpdateThrottlerConfigRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *UpdateThrottlerConfigResponse) UnmarshalVT(dAtA []byte) error { +func (m *ReloadSchemaKeyspaceResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24505,12 +46266,46 @@ func (m *UpdateThrottlerConfigResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: UpdateThrottlerConfigResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ReloadSchemaKeyspaceResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateThrottlerConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReloadSchemaKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, &logutil.Event{}) + if err := m.Events[len(m.Events)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -24533,7 +46328,7 @@ func (m *UpdateThrottlerConfigResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetSrvVSchemaRequest) UnmarshalVT(dAtA []byte) error { +func (m *ReloadSchemaShardRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24556,15 +46351,15 @@ func (m *GetSrvVSchemaRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetSrvVSchemaRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ReloadSchemaShardRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetSrvVSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReloadSchemaShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -24592,8 +46387,111 @@ func (m *GetSrvVSchemaRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Cell = string(dAtA[iNdEx:postIndex]) + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WaitPosition", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WaitPosition = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludePrimary", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludePrimary = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Concurrency", wireType) + } + m.Concurrency = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Concurrency |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -24616,7 +46514,7 @@ func (m *GetSrvVSchemaRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetSrvVSchemaResponse) UnmarshalVT(dAtA []byte) error { +func (m *ReloadSchemaShardResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24639,15 +46537,15 @@ func (m *GetSrvVSchemaResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetSrvVSchemaResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ReloadSchemaShardResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetSrvVSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReloadSchemaShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SrvVSchema", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24674,10 +46572,8 @@ func (m *GetSrvVSchemaResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SrvVSchema == nil { - m.SrvVSchema = &vschema.SrvVSchema{} - } - if err := m.SrvVSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Events = append(m.Events, &logutil.Event{}) + if err := m.Events[len(m.Events)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -24703,7 +46599,7 @@ func (m *GetSrvVSchemaResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetSrvVSchemasRequest) UnmarshalVT(dAtA []byte) error { +func (m *RemoveBackupRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24726,15 +46622,47 @@ func (m *GetSrvVSchemasRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetSrvVSchemasRequest: wiretype end group for non-group") + return fmt.Errorf("proto: RemoveBackupRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetSrvVSchemasRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RemoveBackupRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -24762,7 +46690,39 @@ func (m *GetSrvVSchemasRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -24786,7 +46746,7 @@ func (m *GetSrvVSchemasRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetSrvVSchemasResponse) UnmarshalVT(dAtA []byte) error { +func (m *RemoveBackupResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24809,141 +46769,12 @@ func (m *GetSrvVSchemasResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetSrvVSchemasResponse: wiretype end group for non-group") + return fmt.Errorf("proto: RemoveBackupResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetSrvVSchemasResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RemoveBackupResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SrvVSchemas", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SrvVSchemas == nil { - m.SrvVSchemas = make(map[string]*vschema.SrvVSchema) - } - var mapkey string - var mapvalue *vschema.SrvVSchema - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLength - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLength - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &vschema.SrvVSchema{} - if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.SrvVSchemas[mapkey] = mapvalue - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -24966,7 +46797,7 @@ func (m *GetSrvVSchemasResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetTabletRequest) UnmarshalVT(dAtA []byte) error { +func (m *RemoveKeyspaceCellRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24989,17 +46820,17 @@ func (m *GetTabletRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetTabletRequest: wiretype end group for non-group") + return fmt.Errorf("proto: RemoveKeyspaceCellRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetTabletRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RemoveKeyspaceCellRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -25009,28 +46840,96 @@ func (m *GetTabletRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cell = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Force = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Recursive = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -25053,7 +46952,7 @@ func (m *GetTabletRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetTabletResponse) UnmarshalVT(dAtA []byte) error { +func (m *RemoveKeyspaceCellResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -25076,48 +46975,12 @@ func (m *GetTabletResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetTabletResponse: wiretype end group for non-group") + return fmt.Errorf("proto: RemoveKeyspaceCellResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetTabletResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RemoveKeyspaceCellResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tablet", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Tablet == nil { - m.Tablet = &topodata.Tablet{} - } - if err := m.Tablet.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -25140,7 +47003,7 @@ func (m *GetTabletResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetTabletsRequest) UnmarshalVT(dAtA []byte) error { +func (m *RemoveShardCellRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -25163,10 +47026,10 @@ func (m *GetTabletsRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetTabletsRequest: wiretype end group for non-group") + return fmt.Errorf("proto: RemoveShardCellRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetTabletsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RemoveShardCellRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -25203,7 +47066,7 @@ func (m *GetTabletsRequest) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ShardName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -25231,11 +47094,11 @@ func (m *GetTabletsRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) + m.ShardName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -25263,11 +47126,11 @@ func (m *GetTabletsRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + m.Cell = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Strict", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -25284,12 +47147,12 @@ func (m *GetTabletsRequest) UnmarshalVT(dAtA []byte) error { break } } - m.Strict = bool(v != 0) + m.Force = bool(v != 0) case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAliases", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -25299,45 +47162,63 @@ func (m *GetTabletsRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength + m.Recursive = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.TabletAliases = append(m.TabletAliases, &topodata.TabletAlias{}) - if err := m.TabletAliases[len(m.TabletAliases)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveShardCellResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletType", wireType) + if iNdEx >= l { + return io.ErrUnexpectedEOF } - m.TabletType = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TabletType |= topodata.TabletType(b&0x7F) << shift - if b < 0x80 { - break - } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveShardCellResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveShardCellResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -25360,7 +47241,7 @@ func (m *GetTabletsRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetTabletsResponse) UnmarshalVT(dAtA []byte) error { +func (m *ReparentTabletRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -25383,15 +47264,15 @@ func (m *GetTabletsResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetTabletsResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ReparentTabletRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetTabletsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReparentTabletRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tablets", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tablet", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -25418,8 +47299,10 @@ func (m *GetTabletsResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Tablets = append(m.Tablets, &topodata.Tablet{}) - if err := m.Tablets[len(m.Tablets)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if m.Tablet == nil { + m.Tablet = &topodata.TabletAlias{} + } + if err := m.Tablet.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -25445,7 +47328,7 @@ func (m *GetTabletsResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetTopologyPathRequest) UnmarshalVT(dAtA []byte) error { +func (m *ReparentTabletResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -25468,15 +47351,15 @@ func (m *GetTopologyPathRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetTopologyPathRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ReparentTabletResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetTopologyPathRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReparentTabletResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -25504,62 +47387,43 @@ func (m *GetTopologyPathRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetTopologyPathResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetTopologyPathResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetTopologyPathResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Primary", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -25586,10 +47450,10 @@ func (m *GetTopologyPathResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Cell == nil { - m.Cell = &TopologyCell{} + if m.Primary == nil { + m.Primary = &topodata.TabletAlias{} } - if err := m.Cell.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Primary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -25615,7 +47479,7 @@ func (m *GetTopologyPathResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *TopologyCell) UnmarshalVT(dAtA []byte) error { +func (m *ReshardCreateRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -25638,15 +47502,15 @@ func (m *TopologyCell) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: TopologyCell: wiretype end group for non-group") + return fmt.Errorf("proto: ReshardCreateRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: TopologyCell: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReshardCreateRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -25674,11 +47538,11 @@ func (m *TopologyCell) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Workflow = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -25706,11 +47570,11 @@ func (m *TopologyCell) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SourceShards", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -25738,11 +47602,11 @@ func (m *TopologyCell) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Data = string(dAtA[iNdEx:postIndex]) + m.SourceShards = append(m.SourceShards, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Children", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetShards", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -25770,62 +47634,151 @@ func (m *TopologyCell) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Children = append(m.Children, string(dAtA[iNdEx:postIndex])) + m.TargetShards = append(m.TargetShards, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - if (iNdEx + skippy) > l { + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { return io.ErrUnexpectedEOF } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetVSchemaRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType == 0 { + var v topodata.TabletType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TabletTypes = append(m.TabletTypes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + if elementCount != 0 && len(m.TabletTypes) == 0 { + m.TabletTypes = make([]topodata.TabletType, 0, elementCount) + } + for iNdEx < postIndex { + var v topodata.TabletType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TabletTypes = append(m.TabletTypes, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field TabletTypes", wireType) } - if iNdEx >= l { - return io.ErrUnexpectedEOF + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletSelectionPreference", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.TabletSelectionPreference = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TabletSelectionPreference |= tabletmanagerdata.TabletSelectionPreference(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetVSchemaRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetVSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SkipSchemaCopy", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SkipSchemaCopy = bool(v != 0) + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field OnDdl", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -25853,8 +47806,68 @@ func (m *GetVSchemaRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + m.OnDdl = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StopAfterCopy", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.StopAfterCopy = bool(v != 0) + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeferSecondaryKeys", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DeferSecondaryKeys = bool(v != 0) + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutoStart", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AutoStart = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -25877,7 +47890,7 @@ func (m *GetVSchemaRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetVersionRequest) UnmarshalVT(dAtA []byte) error { +func (m *RestoreFromBackupRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -25900,10 +47913,10 @@ func (m *GetVersionRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetVersionRequest: wiretype end group for non-group") + return fmt.Errorf("proto: RestoreFromBackupRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetVersionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RestoreFromBackupRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -25942,60 +47955,45 @@ func (m *GetVersionRequest) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BackupTime", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + if msglen < 0 { + return ErrInvalidLength } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetVersionResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if m.BackupTime == nil { + m.BackupTime = &vttime.Time{} } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetVersionResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetVersionResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + if err := m.BackupTime.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RestoreToPos", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -26023,62 +48021,31 @@ func (m *GetVersionResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Version = string(dAtA[iNdEx:postIndex]) + m.RestoreToPos = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetVSchemaResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetVSchemaResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetVSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.DryRun = bool(v != 0) + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VSchema", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RestoreToTimestamp", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -26105,10 +48072,10 @@ func (m *GetVSchemaResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.VSchema == nil { - m.VSchema = &vschema.Keyspace{} + if m.RestoreToTimestamp == nil { + m.RestoreToTimestamp = &vttime.Time{} } - if err := m.VSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.RestoreToTimestamp.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -26134,7 +48101,7 @@ func (m *GetVSchemaResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetWorkflowsRequest) UnmarshalVT(dAtA []byte) error { +func (m *RestoreFromBackupResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -26157,17 +48124,17 @@ func (m *GetWorkflowsRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetWorkflowsRequest: wiretype end group for non-group") + return fmt.Errorf("proto: RestoreFromBackupResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetWorkflowsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RestoreFromBackupResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -26177,29 +48144,33 @@ func (m *GetWorkflowsRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} + } + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ActiveOnly", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -26209,17 +48180,29 @@ func (m *GetWorkflowsRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.ActiveOnly = bool(v != 0) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NameOnly", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -26229,66 +48212,27 @@ func (m *GetWorkflowsRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.NameOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetWorkflowsResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetWorkflowsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetWorkflowsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Workflows", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -26315,8 +48259,10 @@ func (m *GetWorkflowsResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Workflows = append(m.Workflows, &Workflow{}) - if err := m.Workflows[len(m.Workflows)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if m.Event == nil { + m.Event = &logutil.Event{} + } + if err := m.Event.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -26342,7 +48288,7 @@ func (m *GetWorkflowsResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *InitShardPrimaryRequest) UnmarshalVT(dAtA []byte) error { +func (m *RetrySchemaMigrationRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -26365,10 +48311,10 @@ func (m *InitShardPrimaryRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: InitShardPrimaryRequest: wiretype end group for non-group") + return fmt.Errorf("proto: RetrySchemaMigrationRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: InitShardPrimaryRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RetrySchemaMigrationRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -26405,7 +48351,7 @@ func (m *InitShardPrimaryRequest) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Uuid", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -26433,67 +48379,62 @@ func (m *InitShardPrimaryRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) + m.Uuid = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PrimaryElectTabletAlias", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.PrimaryElectTabletAlias == nil { - m.PrimaryElectTabletAlias = &topodata.TabletAlias{} - } - if err := m.PrimaryElectTabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RetrySchemaMigrationResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + if iNdEx >= l { + return io.ErrUnexpectedEOF } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - m.Force = bool(v != 0) - case 5: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RetrySchemaMigrationResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RetrySchemaMigrationResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WaitReplicasTimeout", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RowsAffectedByShard", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -26520,12 +48461,89 @@ func (m *InitShardPrimaryRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.WaitReplicasTimeout == nil { - m.WaitReplicasTimeout = &vttime.Duration{} + if m.RowsAffectedByShard == nil { + m.RowsAffectedByShard = make(map[string]uint64) } - if err := m.WaitReplicasTimeout.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + var mapkey string + var mapvalue uint64 + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.RowsAffectedByShard[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -26549,7 +48567,7 @@ func (m *InitShardPrimaryRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *InitShardPrimaryResponse) UnmarshalVT(dAtA []byte) error { +func (m *RunHealthCheckRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -26572,15 +48590,15 @@ func (m *InitShardPrimaryResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: InitShardPrimaryResponse: wiretype end group for non-group") + return fmt.Errorf("proto: RunHealthCheckRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: InitShardPrimaryResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RunHealthCheckRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -26607,8 +48625,10 @@ func (m *InitShardPrimaryResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Events = append(m.Events, &logutil.Event{}) - if err := m.Events[len(m.Events)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} + } + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -26634,7 +48654,7 @@ func (m *InitShardPrimaryResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *PingTabletRequest) UnmarshalVT(dAtA []byte) error { +func (m *RunHealthCheckResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -26657,17 +48677,68 @@ func (m *PingTabletRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PingTabletRequest: wiretype end group for non-group") + return fmt.Errorf("proto: RunHealthCheckResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PingTabletRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RunHealthCheckResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SetKeyspaceDurabilityPolicyRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetKeyspaceDurabilityPolicyRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetKeyspaceDurabilityPolicyRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -26677,27 +48748,55 @@ func (m *PingTabletRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DurabilityPolicy", wireType) } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF } + m.DurabilityPolicy = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -26721,7 +48820,7 @@ func (m *PingTabletRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *PingTabletResponse) UnmarshalVT(dAtA []byte) error { +func (m *SetKeyspaceDurabilityPolicyResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -26744,12 +48843,48 @@ func (m *PingTabletResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PingTabletResponse: wiretype end group for non-group") + return fmt.Errorf("proto: SetKeyspaceDurabilityPolicyResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PingTabletResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SetKeyspaceDurabilityPolicyResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Keyspace == nil { + m.Keyspace = &topodata.Keyspace{} + } + if err := m.Keyspace.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -26772,7 +48907,7 @@ func (m *PingTabletResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *PlannedReparentShardRequest) UnmarshalVT(dAtA []byte) error { +func (m *SetKeyspaceServedFromRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -26795,10 +48930,10 @@ func (m *PlannedReparentShardRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PlannedReparentShardRequest: wiretype end group for non-group") + return fmt.Errorf("proto: SetKeyspaceServedFromRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PlannedReparentShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SetKeyspaceServedFromRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -26834,8 +48969,27 @@ func (m *PlannedReparentShardRequest) UnmarshalVT(dAtA []byte) error { m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletType", wireType) + } + m.TabletType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TabletType |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -26863,13 +49017,13 @@ func (m *PlannedReparentShardRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NewPrimary", wireType) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Remove", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -26879,33 +49033,17 @@ func (m *PlannedReparentShardRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NewPrimary == nil { - m.NewPrimary = &topodata.TabletAlias{} - } - if err := m.NewPrimary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: + m.Remove = bool(v != 0) + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AvoidPrimary", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SourceKeyspace", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -26915,31 +49053,78 @@ func (m *PlannedReparentShardRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.AvoidPrimary == nil { - m.AvoidPrimary = &topodata.TabletAlias{} - } - if err := m.AvoidPrimary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.SourceKeyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { return err } - iNdEx = postIndex - case 5: + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SetKeyspaceServedFromResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetKeyspaceServedFromResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetKeyspaceServedFromResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WaitReplicasTimeout", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -26966,10 +49151,10 @@ func (m *PlannedReparentShardRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.WaitReplicasTimeout == nil { - m.WaitReplicasTimeout = &vttime.Duration{} + if m.Keyspace == nil { + m.Keyspace = &topodata.Keyspace{} } - if err := m.WaitReplicasTimeout.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Keyspace.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -26995,7 +49180,7 @@ func (m *PlannedReparentShardRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *PlannedReparentShardResponse) UnmarshalVT(dAtA []byte) error { +func (m *SetKeyspaceShardingInfoRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -27018,10 +49203,10 @@ func (m *PlannedReparentShardResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PlannedReparentShardResponse: wiretype end group for non-group") + return fmt.Errorf("proto: SetKeyspaceShardingInfoRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PlannedReparentShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SetKeyspaceShardingInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -27056,11 +49241,11 @@ func (m *PlannedReparentShardResponse) UnmarshalVT(dAtA []byte) error { } m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -27070,63 +49255,66 @@ func (m *PlannedReparentShardResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength + m.Force = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PromotedPrimary", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SetKeyspaceShardingInfoResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - if m.PromotedPrimary == nil { - m.PromotedPrimary = &topodata.TabletAlias{} - } - if err := m.PromotedPrimary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - iNdEx = postIndex - case 4: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetKeyspaceShardingInfoResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetKeyspaceShardingInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -27153,8 +49341,10 @@ func (m *PlannedReparentShardResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Events = append(m.Events, &logutil.Event{}) - if err := m.Events[len(m.Events)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if m.Keyspace == nil { + m.Keyspace = &topodata.Keyspace{} + } + if err := m.Keyspace.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -27180,7 +49370,7 @@ func (m *PlannedReparentShardResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RebuildKeyspaceGraphRequest) UnmarshalVT(dAtA []byte) error { +func (m *SetShardIsPrimaryServingRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -27203,10 +49393,10 @@ func (m *RebuildKeyspaceGraphRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RebuildKeyspaceGraphRequest: wiretype end group for non-group") + return fmt.Errorf("proto: SetShardIsPrimaryServingRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RebuildKeyspaceGraphRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SetShardIsPrimaryServingRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -27243,7 +49433,7 @@ func (m *RebuildKeyspaceGraphRequest) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -27271,11 +49461,11 @@ func (m *RebuildKeyspaceGraphRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowPartial", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IsServing", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -27292,7 +49482,7 @@ func (m *RebuildKeyspaceGraphRequest) UnmarshalVT(dAtA []byte) error { break } } - m.AllowPartial = bool(v != 0) + m.IsServing = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -27315,7 +49505,7 @@ func (m *RebuildKeyspaceGraphRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RebuildKeyspaceGraphResponse) UnmarshalVT(dAtA []byte) error { +func (m *SetShardIsPrimaryServingResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -27338,12 +49528,48 @@ func (m *RebuildKeyspaceGraphResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RebuildKeyspaceGraphResponse: wiretype end group for non-group") + return fmt.Errorf("proto: SetShardIsPrimaryServingResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RebuildKeyspaceGraphResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SetShardIsPrimaryServingResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Shard == nil { + m.Shard = &topodata.Shard{} + } + if err := m.Shard.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -27366,7 +49592,7 @@ func (m *RebuildKeyspaceGraphResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RebuildVSchemaGraphRequest) UnmarshalVT(dAtA []byte) error { +func (m *SetShardTabletControlRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -27389,13 +49615,96 @@ func (m *RebuildVSchemaGraphRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RebuildVSchemaGraphRequest: wiretype end group for non-group") + return fmt.Errorf("proto: SetShardTabletControlRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RebuildVSchemaGraphRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SetShardTabletControlRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletType", wireType) + } + m.TabletType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TabletType |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) } @@ -27427,57 +49736,78 @@ func (m *RebuildVSchemaGraphRequest) UnmarshalVT(dAtA []byte) error { } m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeniedTables", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - if (iNdEx + skippy) > l { + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { return io.ErrUnexpectedEOF } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RebuildVSchemaGraphResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + m.DeniedTables = append(m.DeniedTables, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DisableQueryService", wireType) } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.DisableQueryService = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Remove", wireType) } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RebuildVSchemaGraphResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RebuildVSchemaGraphResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Remove = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -27500,7 +49830,7 @@ func (m *RebuildVSchemaGraphResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RefreshStateRequest) UnmarshalVT(dAtA []byte) error { +func (m *SetShardTabletControlResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -27523,15 +49853,15 @@ func (m *RefreshStateRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RefreshStateRequest: wiretype end group for non-group") + return fmt.Errorf("proto: SetShardTabletControlResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RefreshStateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SetShardTabletControlResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -27558,10 +49888,10 @@ func (m *RefreshStateRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} + if m.Shard == nil { + m.Shard = &topodata.Shard{} } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Shard.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -27587,58 +49917,7 @@ func (m *RefreshStateRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RefreshStateResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RefreshStateResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RefreshStateResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RefreshStateByShardRequest) UnmarshalVT(dAtA []byte) error { +func (m *SetWritableRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -27661,17 +49940,17 @@ func (m *RefreshStateByShardRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RefreshStateByShardRequest: wiretype end group for non-group") + return fmt.Errorf("proto: SetWritableRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RefreshStateByShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SetWritableRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -27681,61 +49960,33 @@ func (m *RefreshStateByShardRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} } - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Writable", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -27745,24 +49996,12 @@ func (m *RefreshStateByShardRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex + m.Writable = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -27785,7 +50024,7 @@ func (m *RefreshStateByShardRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RefreshStateByShardResponse) UnmarshalVT(dAtA []byte) error { +func (m *SetWritableResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -27808,64 +50047,12 @@ func (m *RefreshStateByShardResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RefreshStateByShardResponse: wiretype end group for non-group") + return fmt.Errorf("proto: SetWritableResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RefreshStateByShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SetWritableResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IsPartialRefresh", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.IsPartialRefresh = bool(v != 0) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PartialRefreshDetails", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PartialRefreshDetails = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -27888,7 +50075,7 @@ func (m *RefreshStateByShardResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ReloadSchemaRequest) UnmarshalVT(dAtA []byte) error { +func (m *ShardReplicationAddRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -27911,13 +50098,77 @@ func (m *ReloadSchemaRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReloadSchemaRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ShardReplicationAddRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReloadSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ShardReplicationAddRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) } @@ -27975,7 +50226,7 @@ func (m *ReloadSchemaRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ReloadSchemaResponse) UnmarshalVT(dAtA []byte) error { +func (m *ShardReplicationAddResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -27998,10 +50249,10 @@ func (m *ReloadSchemaResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReloadSchemaResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ShardReplicationAddResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReloadSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ShardReplicationAddResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -28026,7 +50277,7 @@ func (m *ReloadSchemaResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ReloadSchemaKeyspaceRequest) UnmarshalVT(dAtA []byte) error { +func (m *ShardReplicationFixRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -28049,10 +50300,10 @@ func (m *ReloadSchemaKeyspaceRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReloadSchemaKeyspaceRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ShardReplicationFixRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReloadSchemaKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ShardReplicationFixRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -28089,7 +50340,7 @@ func (m *ReloadSchemaKeyspaceRequest) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WaitPosition", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -28117,13 +50368,13 @@ func (m *ReloadSchemaKeyspaceRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.WaitPosition = string(dAtA[iNdEx:postIndex]) + m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludePrimary", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -28133,31 +50384,24 @@ func (m *ReloadSchemaKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.IncludePrimary = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Concurrency", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - m.Concurrency = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Concurrency |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF } + m.Cell = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -28180,7 +50424,7 @@ func (m *ReloadSchemaKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ReloadSchemaKeyspaceResponse) UnmarshalVT(dAtA []byte) error { +func (m *ShardReplicationFixResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -28203,15 +50447,15 @@ func (m *ReloadSchemaKeyspaceResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReloadSchemaKeyspaceResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ShardReplicationFixResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReloadSchemaKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ShardReplicationFixResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -28238,8 +50482,10 @@ func (m *ReloadSchemaKeyspaceResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Events = append(m.Events, &logutil.Event{}) - if err := m.Events[len(m.Events)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if m.Error == nil { + m.Error = &topodata.ShardReplicationError{} + } + if err := m.Error.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -28265,7 +50511,7 @@ func (m *ReloadSchemaKeyspaceResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ReloadSchemaShardRequest) UnmarshalVT(dAtA []byte) error { +func (m *ShardReplicationPositionsRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -28288,10 +50534,10 @@ func (m *ReloadSchemaShardRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReloadSchemaShardRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ShardReplicationPositionsRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReloadSchemaShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ShardReplicationPositionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -28358,77 +50604,6 @@ func (m *ReloadSchemaShardRequest) UnmarshalVT(dAtA []byte) error { } m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WaitPosition", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.WaitPosition = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludePrimary", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.IncludePrimary = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Concurrency", wireType) - } - m.Concurrency = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Concurrency |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -28451,7 +50626,7 @@ func (m *ReloadSchemaShardRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ReloadSchemaShardResponse) UnmarshalVT(dAtA []byte) error { +func (m *ShardReplicationPositionsResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -28474,15 +50649,144 @@ func (m *ReloadSchemaShardResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReloadSchemaShardResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ShardReplicationPositionsResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReloadSchemaShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ShardReplicationPositionsResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReplicationStatuses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ReplicationStatuses == nil { + m.ReplicationStatuses = make(map[string]*replicationdata.Status) + } + var mapkey string + var mapvalue *replicationdata.Status + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &replicationdata.Status{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ReplicationStatuses[mapkey] = mapvalue + iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletMap", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -28509,10 +50813,105 @@ func (m *ReloadSchemaShardResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Events = append(m.Events, &logutil.Event{}) - if err := m.Events[len(m.Events)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + if m.TabletMap == nil { + m.TabletMap = make(map[string]*topodata.Tablet) + } + var mapkey string + var mapvalue *topodata.Tablet + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &topodata.Tablet{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.TabletMap[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -28536,7 +50935,7 @@ func (m *ReloadSchemaShardResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RemoveBackupRequest) UnmarshalVT(dAtA []byte) error { +func (m *ShardReplicationRemoveRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -28559,10 +50958,10 @@ func (m *RemoveBackupRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RemoveBackupRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ShardReplicationRemoveRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RemoveBackupRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ShardReplicationRemoveRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -28631,9 +51030,9 @@ func (m *RemoveBackupRequest) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -28643,23 +51042,27 @@ func (m *RemoveBackupRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} + } + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -28683,7 +51086,7 @@ func (m *RemoveBackupRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RemoveBackupResponse) UnmarshalVT(dAtA []byte) error { +func (m *ShardReplicationRemoveResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -28706,10 +51109,10 @@ func (m *RemoveBackupResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RemoveBackupResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ShardReplicationRemoveResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RemoveBackupResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ShardReplicationRemoveResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -28734,7 +51137,7 @@ func (m *RemoveBackupResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RemoveKeyspaceCellRequest) UnmarshalVT(dAtA []byte) error { +func (m *SleepTabletRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -28757,17 +51160,17 @@ func (m *RemoveKeyspaceCellRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RemoveKeyspaceCellRequest: wiretype end group for non-group") + return fmt.Errorf("proto: SleepTabletRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RemoveKeyspaceCellRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SleepTabletRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -28777,29 +51180,33 @@ func (m *RemoveKeyspaceCellRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} + } + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -28809,64 +51216,28 @@ func (m *RemoveKeyspaceCellRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Cell = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Force = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType) + if m.Duration == nil { + m.Duration = &vttime.Duration{} } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.Duration.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.Recursive = bool(v != 0) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -28889,7 +51260,7 @@ func (m *RemoveKeyspaceCellRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RemoveKeyspaceCellResponse) UnmarshalVT(dAtA []byte) error { +func (m *SleepTabletResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -28912,10 +51283,10 @@ func (m *RemoveKeyspaceCellResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RemoveKeyspaceCellResponse: wiretype end group for non-group") + return fmt.Errorf("proto: SleepTabletResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RemoveKeyspaceCellResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SleepTabletResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -28940,7 +51311,7 @@ func (m *RemoveKeyspaceCellResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RemoveShardCellRequest) UnmarshalVT(dAtA []byte) error { +func (m *SourceShardAddRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -28963,10 +51334,10 @@ func (m *RemoveShardCellRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RemoveShardCellRequest: wiretype end group for non-group") + return fmt.Errorf("proto: SourceShardAddRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RemoveShardCellRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SourceShardAddRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -29003,7 +51374,7 @@ func (m *RemoveShardCellRequest) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShardName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -29031,187 +51402,13 @@ func (m *RemoveShardCellRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ShardName = string(dAtA[iNdEx:postIndex]) + m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Cell = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Force = bool(v != 0) - case 5: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Recursive = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RemoveShardCellResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RemoveShardCellResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RemoveShardCellResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReparentTabletRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReparentTabletRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReparentTabletRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tablet", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) } - var msglen int + m.Uid = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -29221,82 +51418,14 @@ func (m *ReparentTabletRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Uid |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Tablet == nil { - m.Tablet = &topodata.TabletAlias{} - } - if err := m.Tablet.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReparentTabletResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReparentTabletResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReparentTabletResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SourceKeyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -29324,11 +51453,11 @@ func (m *ReparentTabletResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + m.SourceKeyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SourceShard", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -29356,134 +51485,11 @@ func (m *ReparentTabletResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Primary", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Primary == nil { - m.Primary = &topodata.TabletAlias{} - } - if err := m.Primary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RestoreFromBackupRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RestoreFromBackupRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RestoreFromBackupRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} - } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.SourceShard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BackupTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field KeyRange", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -29510,16 +51516,16 @@ func (m *RestoreFromBackupRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.BackupTime == nil { - m.BackupTime = &vttime.Time{} + if m.KeyRange == nil { + m.KeyRange = &topodata.KeyRange{} } - if err := m.BackupTime.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.KeyRange.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RestoreToPos", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -29547,28 +51553,8 @@ func (m *RestoreFromBackupRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.RestoreToPos = string(dAtA[iNdEx:postIndex]) + m.Tables = append(m.Tables, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.DryRun = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -29591,7 +51577,7 @@ func (m *RestoreFromBackupRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RestoreFromBackupResponse) UnmarshalVT(dAtA []byte) error { +func (m *SourceShardAddResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -29614,15 +51600,15 @@ func (m *RestoreFromBackupResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RestoreFromBackupResponse: wiretype end group for non-group") + return fmt.Errorf("proto: SourceShardAddResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RestoreFromBackupResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SourceShardAddResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -29649,14 +51635,65 @@ func (m *RestoreFromBackupResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} + if m.Shard == nil { + m.Shard = &topodata.Shard{} } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Shard.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SourceShardDeleteRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SourceShardDeleteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SourceShardDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } @@ -29688,7 +51725,7 @@ func (m *RestoreFromBackupResponse) UnmarshalVT(dAtA []byte) error { } m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } @@ -29720,9 +51757,79 @@ func (m *RestoreFromBackupResponse) UnmarshalVT(dAtA []byte) error { } m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) + } + m.Uid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Uid |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SourceShardDeleteResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SourceShardDeleteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SourceShardDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -29749,10 +51856,10 @@ func (m *RestoreFromBackupResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Event == nil { - m.Event = &logutil.Event{} + if m.Shard == nil { + m.Shard = &topodata.Shard{} } - if err := m.Event.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Shard.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -29778,7 +51885,7 @@ func (m *RestoreFromBackupResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RunHealthCheckRequest) UnmarshalVT(dAtA []byte) error { +func (m *StartReplicationRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -29801,10 +51908,10 @@ func (m *RunHealthCheckRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RunHealthCheckRequest: wiretype end group for non-group") + return fmt.Errorf("proto: StartReplicationRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RunHealthCheckRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StartReplicationRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -29865,7 +51972,7 @@ func (m *RunHealthCheckRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RunHealthCheckResponse) UnmarshalVT(dAtA []byte) error { +func (m *StartReplicationResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -29888,10 +51995,10 @@ func (m *RunHealthCheckResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RunHealthCheckResponse: wiretype end group for non-group") + return fmt.Errorf("proto: StartReplicationResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RunHealthCheckResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StartReplicationResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -29916,7 +52023,7 @@ func (m *RunHealthCheckResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SetKeyspaceDurabilityPolicyRequest) UnmarshalVT(dAtA []byte) error { +func (m *StopReplicationRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -29939,17 +52046,17 @@ func (m *SetKeyspaceDurabilityPolicyRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetKeyspaceDurabilityPolicyRequest: wiretype end group for non-group") + return fmt.Errorf("proto: StopReplicationRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetKeyspaceDurabilityPolicyRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StopReplicationRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -29959,56 +52066,79 @@ func (m *SetKeyspaceDurabilityPolicyRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DurabilityPolicy", wireType) + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StopReplicationResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.DurabilityPolicy = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StopReplicationResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StopReplicationResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -30031,7 +52161,7 @@ func (m *SetKeyspaceDurabilityPolicyRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SetKeyspaceDurabilityPolicyResponse) UnmarshalVT(dAtA []byte) error { +func (m *TabletExternallyReparentedRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30054,15 +52184,15 @@ func (m *SetKeyspaceDurabilityPolicyResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetKeyspaceDurabilityPolicyResponse: wiretype end group for non-group") + return fmt.Errorf("proto: TabletExternallyReparentedRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetKeyspaceDurabilityPolicyResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: TabletExternallyReparentedRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tablet", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -30089,10 +52219,10 @@ func (m *SetKeyspaceDurabilityPolicyResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Keyspace == nil { - m.Keyspace = &topodata.Keyspace{} + if m.Tablet == nil { + m.Tablet = &topodata.TabletAlias{} } - if err := m.Keyspace.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Tablet.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -30118,7 +52248,7 @@ func (m *SetKeyspaceDurabilityPolicyResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SetKeyspaceServedFromRequest) UnmarshalVT(dAtA []byte) error { +func (m *TabletExternallyReparentedResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30141,10 +52271,10 @@ func (m *SetKeyspaceServedFromRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetKeyspaceServedFromRequest: wiretype end group for non-group") + return fmt.Errorf("proto: TabletExternallyReparentedResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetKeyspaceServedFromRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: TabletExternallyReparentedResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -30180,27 +52310,8 @@ func (m *SetKeyspaceServedFromRequest) UnmarshalVT(dAtA []byte) error { m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletType", wireType) - } - m.TabletType = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TabletType |= topodata.TabletType(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -30228,13 +52339,13 @@ func (m *SetKeyspaceServedFromRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Remove", wireType) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NewPrimary", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -30244,17 +52355,33 @@ func (m *SetKeyspaceServedFromRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.Remove = bool(v != 0) - case 5: + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NewPrimary == nil { + m.NewPrimary = &topodata.TabletAlias{} + } + if err := m.NewPrimary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SourceKeyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field OldPrimary", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -30264,23 +52391,27 @@ func (m *SetKeyspaceServedFromRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.SourceKeyspace = string(dAtA[iNdEx:postIndex]) + if m.OldPrimary == nil { + m.OldPrimary = &topodata.TabletAlias{} + } + if err := m.OldPrimary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -30304,7 +52435,7 @@ func (m *SetKeyspaceServedFromRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SetKeyspaceServedFromResponse) UnmarshalVT(dAtA []byte) error { +func (m *UpdateCellInfoRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30327,15 +52458,47 @@ func (m *SetKeyspaceServedFromResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetKeyspaceServedFromResponse: wiretype end group for non-group") + return fmt.Errorf("proto: UpdateCellInfoRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetKeyspaceServedFromResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: UpdateCellInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CellInfo", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -30362,10 +52525,10 @@ func (m *SetKeyspaceServedFromResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Keyspace == nil { - m.Keyspace = &topodata.Keyspace{} + if m.CellInfo == nil { + m.CellInfo = &topodata.CellInfo{} } - if err := m.Keyspace.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.CellInfo.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -30391,7 +52554,7 @@ func (m *SetKeyspaceServedFromResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SetKeyspaceShardingInfoRequest) UnmarshalVT(dAtA []byte) error { +func (m *UpdateCellInfoResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30414,15 +52577,15 @@ func (m *SetKeyspaceShardingInfoRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetKeyspaceShardingInfoRequest: wiretype end group for non-group") + return fmt.Errorf("proto: UpdateCellInfoResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetKeyspaceShardingInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: UpdateCellInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -30450,13 +52613,13 @@ func (m *SetKeyspaceShardingInfoRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CellInfo", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -30466,12 +52629,28 @@ func (m *SetKeyspaceShardingInfoRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.Force = bool(v != 0) + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CellInfo == nil { + m.CellInfo = &topodata.CellInfo{} + } + if err := m.CellInfo.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -30494,7 +52673,7 @@ func (m *SetKeyspaceShardingInfoRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SetKeyspaceShardingInfoResponse) UnmarshalVT(dAtA []byte) error { +func (m *UpdateCellsAliasRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30517,15 +52696,47 @@ func (m *SetKeyspaceShardingInfoResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetKeyspaceShardingInfoResponse: wiretype end group for non-group") + return fmt.Errorf("proto: UpdateCellsAliasRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetKeyspaceShardingInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: UpdateCellsAliasRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CellsAlias", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -30552,10 +52763,10 @@ func (m *SetKeyspaceShardingInfoResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Keyspace == nil { - m.Keyspace = &topodata.Keyspace{} + if m.CellsAlias == nil { + m.CellsAlias = &topodata.CellsAlias{} } - if err := m.Keyspace.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.CellsAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -30581,7 +52792,7 @@ func (m *SetKeyspaceShardingInfoResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SetShardIsPrimaryServingRequest) UnmarshalVT(dAtA []byte) error { +func (m *UpdateCellsAliasResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30604,15 +52815,15 @@ func (m *SetShardIsPrimaryServingRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetShardIsPrimaryServingRequest: wiretype end group for non-group") + return fmt.Errorf("proto: UpdateCellsAliasResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetShardIsPrimaryServingRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: UpdateCellsAliasResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -30640,13 +52851,13 @@ func (m *SetShardIsPrimaryServingRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CellsAlias", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -30656,44 +52867,28 @@ func (m *SetShardIsPrimaryServingRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IsServing", wireType) + if m.CellsAlias == nil { + m.CellsAlias = &topodata.CellsAlias{} } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.CellsAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.IsServing = bool(v != 0) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -30716,7 +52911,7 @@ func (m *SetShardIsPrimaryServingRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SetShardIsPrimaryServingResponse) UnmarshalVT(dAtA []byte) error { +func (m *ValidateRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30739,17 +52934,17 @@ func (m *SetShardIsPrimaryServingResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetShardIsPrimaryServingResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ValidateRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetShardIsPrimaryServingResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidateRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PingTablets", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -30759,28 +52954,12 @@ func (m *SetShardIsPrimaryServingResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Shard == nil { - m.Shard = &topodata.Shard{} - } - if err := m.Shard.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + m.PingTablets = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -30803,7 +52982,7 @@ func (m *SetShardIsPrimaryServingResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SetShardTabletControlRequest) UnmarshalVT(dAtA []byte) error { +func (m *ValidateResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30826,15 +53005,15 @@ func (m *SetShardTabletControlRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetShardTabletControlRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ValidateResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetShardTabletControlRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidateResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -30862,64 +53041,13 @@ func (m *SetShardTabletControlRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + m.Results = append(m.Results, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Shard = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletType", wireType) - } - m.TabletType = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TabletType |= topodata.TabletType(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResultsByKeyspace", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -30929,96 +53057,121 @@ func (m *SetShardTabletControlRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeniedTables", wireType) + if m.ResultsByKeyspace == nil { + m.ResultsByKeyspace = make(map[string]*ValidateKeyspaceResponse) } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var mapkey string + var mapvalue *ValidateKeyspaceResponse + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ValidateKeyspaceResponse{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DeniedTables = append(m.DeniedTables, string(dAtA[iNdEx:postIndex])) + m.ResultsByKeyspace[mapkey] = mapvalue iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DisableQueryService", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.DisableQueryService = bool(v != 0) - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Remove", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Remove = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -31041,7 +53194,7 @@ func (m *SetShardTabletControlRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SetShardTabletControlResponse) UnmarshalVT(dAtA []byte) error { +func (m *ValidateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -31064,17 +53217,17 @@ func (m *SetShardTabletControlResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetShardTabletControlResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ValidateKeyspaceRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetShardTabletControlResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidateKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -31084,28 +53237,44 @@ func (m *SetShardTabletControlResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Shard == nil { - m.Shard = &topodata.Shard{} + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PingTablets", wireType) } - if err := m.Shard.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex + m.PingTablets = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -31128,7 +53297,7 @@ func (m *SetShardTabletControlResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SetWritableRequest) UnmarshalVT(dAtA []byte) error { +func (m *ValidateKeyspaceResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -31151,17 +53320,17 @@ func (m *SetWritableRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetWritableRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ValidateKeyspaceResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetWritableRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidateKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -31171,33 +53340,29 @@ func (m *SetWritableRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} - } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Results = append(m.Results, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Writable", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResultsByShard", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -31207,63 +53372,121 @@ func (m *SetWritableRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.Writable = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + if msglen < 0 { + return ErrInvalidLength } - if (skippy < 0) || (iNdEx+skippy) < 0 { + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLength } - if (iNdEx + skippy) > l { + if postIndex > l { return io.ErrUnexpectedEOF } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SetWritableResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if m.ResultsByShard == nil { + m.ResultsByShard = make(map[string]*ValidateShardResponse) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var mapkey string + var mapvalue *ValidateShardResponse + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ValidateShardResponse{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SetWritableResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SetWritableResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + m.ResultsByShard[mapkey] = mapvalue + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -31286,7 +53509,7 @@ func (m *SetWritableResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ShardReplicationAddRequest) UnmarshalVT(dAtA []byte) error { +func (m *ValidateSchemaKeyspaceRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -31309,10 +53532,10 @@ func (m *ShardReplicationAddRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ShardReplicationAddRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ValidateSchemaKeyspaceRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ShardReplicationAddRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidateSchemaKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -31349,7 +53572,7 @@ func (m *ShardReplicationAddRequest) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ExcludeTables", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -31377,13 +53600,13 @@ func (m *ShardReplicationAddRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) + m.ExcludeTables = append(m.ExcludeTables, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeViews", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -31393,28 +53616,52 @@ func (m *ShardReplicationAddRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength + m.IncludeViews = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SkipNoPrimary", wireType) } - if postIndex > l { - return io.ErrUnexpectedEOF + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} + m.SkipNoPrimary = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeVschema", wireType) } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex + m.IncludeVschema = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -31437,7 +53684,7 @@ func (m *ShardReplicationAddRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ShardReplicationAddResponse) UnmarshalVT(dAtA []byte) error { +func (m *ValidateSchemaKeyspaceResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -31460,12 +53707,173 @@ func (m *ShardReplicationAddResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ShardReplicationAddResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ValidateSchemaKeyspaceResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ShardReplicationAddResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidateSchemaKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Results = append(m.Results, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResultsByShard", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResultsByShard == nil { + m.ResultsByShard = make(map[string]*ValidateShardResponse) + } + var mapkey string + var mapvalue *ValidateShardResponse + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ValidateShardResponse{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ResultsByShard[mapkey] = mapvalue + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -31488,7 +53896,7 @@ func (m *ShardReplicationAddResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ShardReplicationFixRequest) UnmarshalVT(dAtA []byte) error { +func (m *ValidateShardRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -31511,10 +53919,10 @@ func (m *ShardReplicationFixRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ShardReplicationFixRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ValidateShardRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ShardReplicationFixRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidateShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -31582,10 +53990,10 @@ func (m *ShardReplicationFixRequest) UnmarshalVT(dAtA []byte) error { m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PingTablets", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -31595,24 +54003,12 @@ func (m *ShardReplicationFixRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Cell = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.PingTablets = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -31635,7 +54031,7 @@ func (m *ShardReplicationFixRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ShardReplicationFixResponse) UnmarshalVT(dAtA []byte) error { +func (m *ValidateShardResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -31658,17 +54054,17 @@ func (m *ShardReplicationFixResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ShardReplicationFixResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ValidateShardResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ShardReplicationFixResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidateShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -31678,27 +54074,23 @@ func (m *ShardReplicationFixResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Error == nil { - m.Error = &topodata.ShardReplicationError{} - } - if err := m.Error.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Results = append(m.Results, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -31722,7 +54114,7 @@ func (m *ShardReplicationFixResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ShardReplicationPositionsRequest) UnmarshalVT(dAtA []byte) error { +func (m *ValidateVersionKeyspaceRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -31745,10 +54137,10 @@ func (m *ShardReplicationPositionsRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ShardReplicationPositionsRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ValidateVersionKeyspaceRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ShardReplicationPositionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidateVersionKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -31783,38 +54175,6 @@ func (m *ShardReplicationPositionsRequest) UnmarshalVT(dAtA []byte) error { } m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Shard = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -31837,7 +54197,7 @@ func (m *ShardReplicationPositionsRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ShardReplicationPositionsResponse) UnmarshalVT(dAtA []byte) error { +func (m *ValidateVersionKeyspaceResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -31860,17 +54220,17 @@ func (m *ShardReplicationPositionsResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ShardReplicationPositionsResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ValidateVersionKeyspaceResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ShardReplicationPositionsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidateVersionKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReplicationStatuses", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -31880,124 +54240,27 @@ func (m *ShardReplicationPositionsResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ReplicationStatuses == nil { - m.ReplicationStatuses = make(map[string]*replicationdata.Status) - } - var mapkey string - var mapvalue *replicationdata.Status - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLength - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLength - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &replicationdata.Status{} - if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.ReplicationStatuses[mapkey] = mapvalue + m.Results = append(m.Results, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletMap", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResultsByShard", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -32024,11 +54287,11 @@ func (m *ShardReplicationPositionsResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.TabletMap == nil { - m.TabletMap = make(map[string]*topodata.Tablet) + if m.ResultsByShard == nil { + m.ResultsByShard = make(map[string]*ValidateShardResponse) } var mapkey string - var mapvalue *topodata.Tablet + var mapvalue *ValidateShardResponse for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -32102,7 +54365,7 @@ func (m *ShardReplicationPositionsResponse) UnmarshalVT(dAtA []byte) error { if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &topodata.Tablet{} + mapvalue = &ValidateShardResponse{} if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -32122,7 +54385,7 @@ func (m *ShardReplicationPositionsResponse) UnmarshalVT(dAtA []byte) error { iNdEx += skippy } } - m.TabletMap[mapkey] = mapvalue + m.ResultsByShard[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -32146,7 +54409,7 @@ func (m *ShardReplicationPositionsResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ShardReplicationRemoveRequest) UnmarshalVT(dAtA []byte) error { +func (m *ValidateVersionShardRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -32169,10 +54432,10 @@ func (m *ShardReplicationRemoveRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ShardReplicationRemoveRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ValidateVersionShardRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ShardReplicationRemoveRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidateVersionShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -32239,11 +54502,62 @@ func (m *ShardReplicationRemoveRequest) UnmarshalVT(dAtA []byte) error { } m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidateVersionShardResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidateVersionShardResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidateVersionShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -32253,27 +54567,23 @@ func (m *ShardReplicationRemoveRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} - } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Results = append(m.Results, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -32297,7 +54607,7 @@ func (m *ShardReplicationRemoveRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ShardReplicationRemoveResponse) UnmarshalVT(dAtA []byte) error { +func (m *ValidateVSchemaRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -32320,12 +54630,128 @@ func (m *ShardReplicationRemoveResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ShardReplicationRemoveResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ValidateVSchemaRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ShardReplicationRemoveResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidateVSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shards", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shards = append(m.Shards, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExcludeTables", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExcludeTables = append(m.ExcludeTables, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeViews", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeViews = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -32348,7 +54774,7 @@ func (m *ShardReplicationRemoveResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SleepTabletRequest) UnmarshalVT(dAtA []byte) error { +func (m *ValidateVSchemaResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -32371,17 +54797,17 @@ func (m *SleepTabletRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SleepTabletRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ValidateVSchemaResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SleepTabletRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidateVSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -32391,31 +54817,27 @@ func (m *SleepTabletRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} - } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Results = append(m.Results, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResultsByShard", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -32442,12 +54864,105 @@ func (m *SleepTabletRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Duration == nil { - m.Duration = &vttime.Duration{} + if m.ResultsByShard == nil { + m.ResultsByShard = make(map[string]*ValidateShardResponse) } - if err := m.Duration.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + var mapkey string + var mapvalue *ValidateShardResponse + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ValidateShardResponse{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.ResultsByShard[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -32471,58 +54986,7 @@ func (m *SleepTabletRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SleepTabletResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SleepTabletResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SleepTabletResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SourceShardAddRequest) UnmarshalVT(dAtA []byte) error { +func (m *VDiffCreateRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -32545,15 +55009,15 @@ func (m *SourceShardAddRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SourceShardAddRequest: wiretype end group for non-group") + return fmt.Errorf("proto: VDiffCreateRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SourceShardAddRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VDiffCreateRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -32581,11 +55045,11 @@ func (m *SourceShardAddRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + m.Workflow = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetKeyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -32613,13 +55077,13 @@ func (m *SourceShardAddRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) + m.TargetKeyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uuid", wireType) } - m.Uid = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -32629,14 +55093,27 @@ func (m *SourceShardAddRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Uid |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SourceKeyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SourceCells", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -32664,11 +55141,11 @@ func (m *SourceShardAddRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SourceKeyspace = string(dAtA[iNdEx:postIndex]) + m.SourceCells = append(m.SourceCells, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SourceShard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetCells", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -32696,13 +55173,82 @@ func (m *SourceShardAddRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SourceShard = string(dAtA[iNdEx:postIndex]) + m.TargetCells = append(m.TargetCells, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KeyRange", wireType) + if wireType == 0 { + var v topodata.TabletType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TabletTypes = append(m.TabletTypes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + if elementCount != 0 && len(m.TabletTypes) == 0 { + m.TabletTypes = make([]topodata.TabletType, 0, elementCount) + } + for iNdEx < postIndex { + var v topodata.TabletType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TabletTypes = append(m.TabletTypes, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field TabletTypes", wireType) } - var msglen int + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletSelectionPreference", wireType) + } + m.TabletSelectionPreference = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -32712,29 +55258,12 @@ func (m *SourceShardAddRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.TabletSelectionPreference |= tabletmanagerdata.TabletSelectionPreference(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.KeyRange == nil { - m.KeyRange = &topodata.KeyRange{} - } - if err := m.KeyRange.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: + case 8: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) } @@ -32766,60 +55295,28 @@ func (m *SourceShardAddRequest) UnmarshalVT(dAtA []byte) error { } m.Tables = append(m.Tables, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SourceShardAddResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= int64(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SourceShardAddResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SourceShardAddResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FilteredReplicationWaitTime", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -32846,69 +55343,78 @@ func (m *SourceShardAddResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Shard == nil { - m.Shard = &topodata.Shard{} + if m.FilteredReplicationWaitTime == nil { + m.FilteredReplicationWaitTime = &vttime.Duration{} } - if err := m.Shard.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.FilteredReplicationWaitTime.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DebugQuery", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + m.DebugQuery = bool(v != 0) + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OnlyPKs", wireType) } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SourceShardDeleteRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { - return io.ErrUnexpectedEOF + m.OnlyPKs = bool(v != 0) + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateTableStats", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SourceShardDeleteRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SourceShardDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + m.UpdateTableStats = bool(v != 0) + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxExtraRowsToCompare", wireType) } - var stringLen uint64 + m.MaxExtraRowsToCompare = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -32918,29 +55424,36 @@ func (m *SourceShardDeleteRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.MaxExtraRowsToCompare |= int64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Wait", wireType) } - if postIndex > l { - return io.ErrUnexpectedEOF + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - m.Keyspace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + m.Wait = bool(v != 0) + case 16: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field WaitUpdateInterval", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -32950,29 +55463,53 @@ func (m *SourceShardDeleteRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: + if m.WaitUpdateInterval == nil { + m.WaitUpdateInterval = &vttime.Duration{} + } + if err := m.WaitUpdateInterval.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutoRetry", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AutoRetry = bool(v != 0) + case 18: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Verbose", wireType) } - m.Uid = 0 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -32982,11 +55519,12 @@ func (m *SourceShardDeleteRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Uid |= int32(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } + m.Verbose = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -33009,7 +55547,7 @@ func (m *SourceShardDeleteRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SourceShardDeleteResponse) UnmarshalVT(dAtA []byte) error { +func (m *VDiffCreateResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33032,17 +55570,17 @@ func (m *SourceShardDeleteResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SourceShardDeleteResponse: wiretype end group for non-group") + return fmt.Errorf("proto: VDiffCreateResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SourceShardDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VDiffCreateResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UUID", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -33052,27 +55590,23 @@ func (m *SourceShardDeleteResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Shard == nil { - m.Shard = &topodata.Shard{} - } - if err := m.Shard.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.UUID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -33096,7 +55630,7 @@ func (m *SourceShardDeleteResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *StartReplicationRequest) UnmarshalVT(dAtA []byte) error { +func (m *VDiffDeleteRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33119,17 +55653,17 @@ func (m *StartReplicationRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StartReplicationRequest: wiretype end group for non-group") + return fmt.Errorf("proto: VDiffDeleteRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StartReplicationRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VDiffDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -33139,135 +55673,61 @@ func (m *StartReplicationRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} - } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Workflow = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StartReplicationResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetKeyspace", wireType) } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StartReplicationResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StartReplicationResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if (skippy < 0) || (iNdEx+skippy) < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StopReplicationRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StopReplicationRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StopReplicationRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.TargetKeyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Arg", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -33277,27 +55737,23 @@ func (m *StopReplicationRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} - } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Arg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -33321,58 +55777,7 @@ func (m *StopReplicationRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *StopReplicationResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StopReplicationResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StopReplicationResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TabletExternallyReparentedRequest) UnmarshalVT(dAtA []byte) error { +func (m *VDiffDeleteResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33395,48 +55800,12 @@ func (m *TabletExternallyReparentedRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: TabletExternallyReparentedRequest: wiretype end group for non-group") + return fmt.Errorf("proto: VDiffDeleteResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: TabletExternallyReparentedRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VDiffDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tablet", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Tablet == nil { - m.Tablet = &topodata.TabletAlias{} - } - if err := m.Tablet.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -33459,7 +55828,7 @@ func (m *TabletExternallyReparentedRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *TabletExternallyReparentedResponse) UnmarshalVT(dAtA []byte) error { +func (m *VDiffResumeRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33482,15 +55851,15 @@ func (m *TabletExternallyReparentedResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: TabletExternallyReparentedResponse: wiretype end group for non-group") + return fmt.Errorf("proto: VDiffResumeRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: TabletExternallyReparentedResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VDiffResumeRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -33518,11 +55887,11 @@ func (m *TabletExternallyReparentedResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + m.Workflow = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetKeyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -33550,49 +55919,13 @@ func (m *TabletExternallyReparentedResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) + m.TargetKeyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NewPrimary", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NewPrimary == nil { - m.NewPrimary = &topodata.TabletAlias{} - } - if err := m.NewPrimary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OldPrimary", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Uuid", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -33602,27 +55935,23 @@ func (m *TabletExternallyReparentedResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.OldPrimary == nil { - m.OldPrimary = &topodata.TabletAlias{} - } - if err := m.OldPrimary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Uuid = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -33646,7 +55975,7 @@ func (m *TabletExternallyReparentedResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *UpdateCellInfoRequest) UnmarshalVT(dAtA []byte) error { +func (m *VDiffResumeResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33669,80 +55998,12 @@ func (m *UpdateCellInfoRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: UpdateCellInfoRequest: wiretype end group for non-group") + return fmt.Errorf("proto: VDiffResumeResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateCellInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VDiffResumeResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CellInfo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CellInfo == nil { - m.CellInfo = &topodata.CellInfo{} - } - if err := m.CellInfo.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -33765,7 +56026,7 @@ func (m *UpdateCellInfoRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *UpdateCellInfoResponse) UnmarshalVT(dAtA []byte) error { +func (m *VDiffShowRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33788,15 +56049,15 @@ func (m *UpdateCellInfoResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: UpdateCellInfoResponse: wiretype end group for non-group") + return fmt.Errorf("proto: VDiffShowRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateCellInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VDiffShowRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -33824,13 +56085,13 @@ func (m *UpdateCellInfoResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Workflow = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CellInfo", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetKeyspace", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -33840,27 +56101,55 @@ func (m *UpdateCellInfoResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.CellInfo == nil { - m.CellInfo = &topodata.CellInfo{} + m.TargetKeyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Arg", wireType) } - if err := m.CellInfo.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Arg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -33884,7 +56173,7 @@ func (m *UpdateCellInfoResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *UpdateCellsAliasRequest) UnmarshalVT(dAtA []byte) error { +func (m *VDiffShowResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33898,56 +56187,24 @@ func (m *UpdateCellsAliasRequest) UnmarshalVT(dAtA []byte) error { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UpdateCellsAliasRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateCellsAliasRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VDiffShowResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VDiffShowResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CellsAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletResponses", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -33974,12 +56231,105 @@ func (m *UpdateCellsAliasRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.CellsAlias == nil { - m.CellsAlias = &topodata.CellsAlias{} + if m.TabletResponses == nil { + m.TabletResponses = make(map[string]*tabletmanagerdata.VDiffResponse) } - if err := m.CellsAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + var mapkey string + var mapvalue *tabletmanagerdata.VDiffResponse + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &tabletmanagerdata.VDiffResponse{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.TabletResponses[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -34003,7 +56353,7 @@ func (m *UpdateCellsAliasRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *UpdateCellsAliasResponse) UnmarshalVT(dAtA []byte) error { +func (m *VDiffStopRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -34026,15 +56376,15 @@ func (m *UpdateCellsAliasResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: UpdateCellsAliasResponse: wiretype end group for non-group") + return fmt.Errorf("proto: VDiffStopRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateCellsAliasResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VDiffStopRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -34062,13 +56412,13 @@ func (m *UpdateCellsAliasResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Workflow = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CellsAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetKeyspace", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -34078,27 +56428,55 @@ func (m *UpdateCellsAliasResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.CellsAlias == nil { - m.CellsAlias = &topodata.CellsAlias{} + m.TargetKeyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uuid", wireType) } - if err := m.CellsAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF } + m.Uuid = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -34122,7 +56500,7 @@ func (m *UpdateCellsAliasResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ValidateRequest) UnmarshalVT(dAtA []byte) error { +func (m *VDiffStopResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -34145,32 +56523,12 @@ func (m *ValidateRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidateRequest: wiretype end group for non-group") + return fmt.Errorf("proto: VDiffStopResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VDiffStopResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PingTablets", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.PingTablets = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -34193,7 +56551,7 @@ func (m *ValidateRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ValidateResponse) UnmarshalVT(dAtA []byte) error { +func (m *WorkflowDeleteRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -34216,15 +56574,15 @@ func (m *ValidateResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidateResponse: wiretype end group for non-group") + return fmt.Errorf("proto: WorkflowDeleteRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WorkflowDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -34252,13 +56610,13 @@ func (m *ValidateResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Results = append(m.Results, string(dAtA[iNdEx:postIndex])) + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResultsByKeyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -34268,121 +56626,64 @@ func (m *ValidateResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ResultsByKeyspace == nil { - m.ResultsByKeyspace = make(map[string]*ValidateKeyspaceResponse) + m.Workflow = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KeepData", wireType) } - var mapkey string - var mapvalue *ValidateKeyspaceResponse - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLength - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLength - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &ValidateKeyspaceResponse{} - if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break } } - m.ResultsByKeyspace[mapkey] = mapvalue - iNdEx = postIndex + m.KeepData = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KeepRoutingRules", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.KeepRoutingRules = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -34405,7 +56706,7 @@ func (m *ValidateResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ValidateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { +func (m *WorkflowDeleteResponse_TabletInfo) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -34428,17 +56729,17 @@ func (m *ValidateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidateKeyspaceRequest: wiretype end group for non-group") + return fmt.Errorf("proto: WorkflowDeleteResponse_TabletInfo: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidateKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WorkflowDeleteResponse_TabletInfo: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tablet", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -34448,27 +56749,31 @@ func (m *ValidateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + if m.Tablet == nil { + m.Tablet = &topodata.TabletAlias{} + } + if err := m.Tablet.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PingTablets", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -34485,7 +56790,7 @@ func (m *ValidateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { break } } - m.PingTablets = bool(v != 0) + m.Deleted = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -34508,7 +56813,7 @@ func (m *ValidateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ValidateKeyspaceResponse) UnmarshalVT(dAtA []byte) error { +func (m *WorkflowDeleteResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -34531,172 +56836,77 @@ func (m *ValidateKeyspaceResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidateKeyspaceResponse: wiretype end group for non-group") + return fmt.Errorf("proto: WorkflowDeleteResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidateKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WorkflowDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Results = append(m.Results, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResultsByShard", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ResultsByShard == nil { - m.ResultsByShard = make(map[string]*ValidateShardResponse) - } - var mapkey string - var mapvalue *ValidateShardResponse - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLength - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLength - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &ValidateShardResponse{} - if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Summary = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Details", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Details = append(m.Details, &WorkflowDeleteResponse_TabletInfo{}) + if err := m.Details[len(m.Details)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.ResultsByShard[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -34720,7 +56930,7 @@ func (m *ValidateKeyspaceResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ValidateSchemaKeyspaceRequest) UnmarshalVT(dAtA []byte) error { +func (m *WorkflowStatusRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -34743,10 +56953,10 @@ func (m *ValidateSchemaKeyspaceRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidateSchemaKeyspaceRequest: wiretype end group for non-group") + return fmt.Errorf("proto: WorkflowStatusRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidateSchemaKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WorkflowStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -34783,7 +56993,7 @@ func (m *ValidateSchemaKeyspaceRequest) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExcludeTables", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -34811,13 +57021,64 @@ func (m *ValidateSchemaKeyspaceRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ExcludeTables = append(m.ExcludeTables, string(dAtA[iNdEx:postIndex])) + m.Workflow = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowStatusResponse_TableCopyState) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowStatusResponse_TableCopyState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowStatusResponse_TableCopyState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludeViews", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RowsCopied", wireType) } - var v int + m.RowsCopied = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -34827,17 +57088,46 @@ func (m *ValidateSchemaKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + m.RowsCopied |= int64(b&0x7F) << shift if b < 0x80 { break } } - m.IncludeViews = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RowsTotal", wireType) + } + m.RowsTotal = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RowsTotal |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field RowsPercentage", wireType) + } + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.RowsPercentage = float32(math.Float32frombits(v)) case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SkipNoPrimary", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field BytesCopied", wireType) } - var v int + m.BytesCopied = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -34847,17 +57137,16 @@ func (m *ValidateSchemaKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + m.BytesCopied |= int64(b&0x7F) << shift if b < 0x80 { break } } - m.SkipNoPrimary = bool(v != 0) case 5: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludeVschema", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field BytesTotal", wireType) } - var v int + m.BytesTotal = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -34867,12 +57156,22 @@ func (m *ValidateSchemaKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + m.BytesTotal |= int64(b&0x7F) << shift if b < 0x80 { break } } - m.IncludeVschema = bool(v != 0) + case 6: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field BytesPercentage", wireType) + } + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.BytesPercentage = float32(math.Float32frombits(v)) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -34895,7 +57194,7 @@ func (m *ValidateSchemaKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ValidateSchemaKeyspaceResponse) UnmarshalVT(dAtA []byte) error { +func (m *WorkflowStatusResponse_ShardStreamState) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -34918,17 +57217,17 @@ func (m *ValidateSchemaKeyspaceResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidateSchemaKeyspaceResponse: wiretype end group for non-group") + return fmt.Errorf("proto: WorkflowStatusResponse_ShardStreamState: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidateSchemaKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WorkflowStatusResponse_ShardStreamState: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) } - var stringLen uint64 + m.Id = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -34938,27 +57237,14 @@ func (m *ValidateSchemaKeyspaceResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.Id |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Results = append(m.Results, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResultsByShard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tablet", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -34985,160 +57271,16 @@ func (m *ValidateSchemaKeyspaceResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ResultsByShard == nil { - m.ResultsByShard = make(map[string]*ValidateShardResponse) - } - var mapkey string - var mapvalue *ValidateShardResponse - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLength - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLength - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &ValidateShardResponse{} - if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + if m.Tablet == nil { + m.Tablet = &topodata.TabletAlias{} } - m.ResultsByShard[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { + if err := m.Tablet.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ValidateShardRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ValidateShardRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ValidateShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SourceShard", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -35166,11 +57308,11 @@ func (m *ValidateShardRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + m.SourceShard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -35198,13 +57340,13 @@ func (m *ValidateShardRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) + m.Position = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PingTablets", wireType) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -35214,66 +57356,27 @@ func (m *ValidateShardRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.PingTablets = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ValidateShardResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ValidateShardResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ValidateShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Status = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -35301,7 +57404,7 @@ func (m *ValidateShardResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Results = append(m.Results, string(dAtA[iNdEx:postIndex])) + m.Info = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -35325,7 +57428,7 @@ func (m *ValidateShardResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ValidateVersionKeyspaceRequest) UnmarshalVT(dAtA []byte) error { +func (m *WorkflowStatusResponse_ShardStreams) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -35348,17 +57451,17 @@ func (m *ValidateVersionKeyspaceRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidateVersionKeyspaceRequest: wiretype end group for non-group") + return fmt.Errorf("proto: WorkflowStatusResponse_ShardStreams: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidateVersionKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WorkflowStatusResponse_ShardStreams: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Streams", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -35368,23 +57471,25 @@ func (m *ValidateVersionKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + m.Streams = append(m.Streams, &WorkflowStatusResponse_ShardStreamState{}) + if err := m.Streams[len(m.Streams)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -35408,7 +57513,7 @@ func (m *ValidateVersionKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ValidateVersionKeyspaceResponse) UnmarshalVT(dAtA []byte) error { +func (m *WorkflowStatusResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -35431,17 +57536,17 @@ func (m *ValidateVersionKeyspaceResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidateVersionKeyspaceResponse: wiretype end group for non-group") + return fmt.Errorf("proto: WorkflowStatusResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidateVersionKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WorkflowStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TableCopyState", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -35451,27 +57556,124 @@ func (m *ValidateVersionKeyspaceResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Results = append(m.Results, string(dAtA[iNdEx:postIndex])) + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TableCopyState == nil { + m.TableCopyState = make(map[string]*WorkflowStatusResponse_TableCopyState) + } + var mapkey string + var mapvalue *WorkflowStatusResponse_TableCopyState + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &WorkflowStatusResponse_TableCopyState{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.TableCopyState[mapkey] = mapvalue iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResultsByShard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ShardStreams", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -35498,11 +57700,11 @@ func (m *ValidateVersionKeyspaceResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ResultsByShard == nil { - m.ResultsByShard = make(map[string]*ValidateShardResponse) + if m.ShardStreams == nil { + m.ShardStreams = make(map[string]*WorkflowStatusResponse_ShardStreams) } var mapkey string - var mapvalue *ValidateShardResponse + var mapvalue *WorkflowStatusResponse_ShardStreams for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -35576,7 +57778,7 @@ func (m *ValidateVersionKeyspaceResponse) UnmarshalVT(dAtA []byte) error { if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &ValidateShardResponse{} + mapvalue = &WorkflowStatusResponse_ShardStreams{} if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -35596,7 +57798,39 @@ func (m *ValidateVersionKeyspaceResponse) UnmarshalVT(dAtA []byte) error { iNdEx += skippy } } - m.ResultsByShard[mapkey] = mapvalue + m.ShardStreams[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TrafficState", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TrafficState = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -35620,7 +57854,7 @@ func (m *ValidateVersionKeyspaceResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ValidateVersionShardRequest) UnmarshalVT(dAtA []byte) error { +func (m *WorkflowSwitchTrafficRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -35643,10 +57877,10 @@ func (m *ValidateVersionShardRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidateVersionShardRequest: wiretype end group for non-group") + return fmt.Errorf("proto: WorkflowSwitchTrafficRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidateVersionShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WorkflowSwitchTrafficRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -35683,7 +57917,7 @@ func (m *ValidateVersionShardRequest) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -35711,62 +57945,11 @@ func (m *ValidateVersionShardRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) + m.Workflow = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ValidateVersionShardResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ValidateVersionShardResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ValidateVersionShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -35794,64 +57977,82 @@ func (m *ValidateVersionShardResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Results = append(m.Results, string(dAtA[iNdEx:postIndex])) + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ValidateVSchemaRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + case 4: + if wireType == 0 { + var v topodata.TabletType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TabletTypes = append(m.TabletTypes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + if elementCount != 0 && len(m.TabletTypes) == 0 { + m.TabletTypes = make([]topodata.TabletType, 0, elementCount) + } + for iNdEx < postIndex { + var v topodata.TabletType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TabletTypes = append(m.TabletTypes, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field TabletTypes", wireType) } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ValidateVSchemaRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ValidateVSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxReplicationLagAllowed", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -35861,29 +58062,33 @@ func (m *ValidateVSchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + if m.MaxReplicationLagAllowed == nil { + m.MaxReplicationLagAllowed = &vttime.Duration{} + } + if err := m.MaxReplicationLagAllowed.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shards", wireType) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EnableReverseReplication", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -35893,29 +58098,36 @@ func (m *ValidateVSchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + m.EnableReverseReplication = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Direction", wireType) } - if postIndex > l { - return io.ErrUnexpectedEOF + m.Direction = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Direction |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - m.Shards = append(m.Shards, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExcludeTables", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Timeout", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -35925,27 +58137,31 @@ func (m *ValidateVSchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.ExcludeTables = append(m.ExcludeTables, string(dAtA[iNdEx:postIndex])) + if m.Timeout == nil { + m.Timeout = &vttime.Duration{} + } + if err := m.Timeout.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: + case 9: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludeViews", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -35962,7 +58178,27 @@ func (m *ValidateVSchemaRequest) UnmarshalVT(dAtA []byte) error { break } } - m.IncludeViews = bool(v != 0) + m.DryRun = bool(v != 0) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InitializeTargetSequences", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.InitializeTargetSequences = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -35985,7 +58221,7 @@ func (m *ValidateVSchemaRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ValidateVSchemaResponse) UnmarshalVT(dAtA []byte) error { +func (m *WorkflowSwitchTrafficResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -36008,15 +58244,15 @@ func (m *ValidateVSchemaResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidateVSchemaResponse: wiretype end group for non-group") + return fmt.Errorf("proto: WorkflowSwitchTrafficResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidateVSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WorkflowSwitchTrafficResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -36044,13 +58280,13 @@ func (m *ValidateVSchemaResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Results = append(m.Results, string(dAtA[iNdEx:postIndex])) + m.Summary = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResultsByShard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StartState", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -36060,120 +58296,87 @@ func (m *ValidateVSchemaResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ResultsByShard == nil { - m.ResultsByShard = make(map[string]*ValidateShardResponse) + m.StartState = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentState", wireType) } - var mapkey string - var mapvalue *ValidateShardResponse - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLength - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLength - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &ValidateShardResponse{} - if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break } } - m.ResultsByShard[mapkey] = mapvalue + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CurrentState = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRunResults", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DryRunResults = append(m.DryRunResults, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -36288,7 +58491,7 @@ func (m *WorkflowUpdateRequest) UnmarshalVT(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.TabletRequest == nil { - m.TabletRequest = &tabletmanagerdata.UpdateVRWorkflowRequest{} + m.TabletRequest = &tabletmanagerdata.UpdateVReplicationWorkflowRequest{} } if err := m.TabletRequest.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err @@ -36349,7 +58552,7 @@ func (m *WorkflowUpdateResponse_TabletInfo) UnmarshalVT(dAtA []byte) error { if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Tablet", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -36359,23 +58562,27 @@ func (m *WorkflowUpdateResponse_TabletInfo) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Tablet = string(dAtA[iNdEx:postIndex]) + if m.Tablet == nil { + m.Tablet = &topodata.TabletAlias{} + } + if err := m.Tablet.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 0 { diff --git a/go/vt/proto/vtctlservice/vtctlservice.pb.go b/go/vt/proto/vtctlservice/vtctlservice.pb.go index d806b7c5b05..41231828a3d 100644 --- a/go/vt/proto/vtctlservice/vtctlservice.pb.go +++ b/go/vt/proto/vtctlservice/vtctlservice.pb.go @@ -18,8 +18,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.6.1 +// protoc-gen-go v1.30.0 +// protoc v3.21.3 // source: vtctlservice.proto package vtctlservice @@ -51,7 +51,7 @@ var file_vtctlservice_proto_rawDesc = []byte{ 0x61, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x32, 0xea, 0x3d, 0x0a, 0x06, 0x56, 0x74, 0x63, 0x74, 0x6c, + 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x32, 0x93, 0x50, 0x0a, 0x06, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x12, 0x4e, 0x0a, 0x0b, 0x41, 0x64, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x41, 0x64, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, @@ -94,462 +94,609 @@ var file_vtctlservice_proto_rawDesc = []byte{ 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, - 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x5d, 0x0a, - 0x10, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x68, - 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x20, - 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, - 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, - 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x6c, 0x0a, + 0x15, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, + 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x61, 0x6e, 0x63, + 0x65, 0x6c, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x43, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x68, 0x61, 0x6e, + 0x67, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x43, 0x6c, + 0x65, 0x61, 0x6e, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, + 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x6c, 0x65, 0x61, 0x6e, + 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x72, 0x0a, 0x17, 0x43, + 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x6f, + 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x57, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, + 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, + 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, + 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, - 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, - 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, - 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, - 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x22, 0x2e, + 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x57, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x0d, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x12, 0x1f, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, - 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6f, - 0x0a, 0x16, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, - 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x60, 0x0a, 0x11, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, - 0x73, 0x41, 0x70, 0x70, 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, - 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x28, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, + 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, + 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, 0x70, 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, + 0x41, 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, + 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, + 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x42, 0x41, 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, - 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x60, 0x0a, 0x11, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, - 0x68, 0x41, 0x73, 0x44, 0x42, 0x41, 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, - 0x73, 0x44, 0x42, 0x41, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, - 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x42, 0x41, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x4c, 0x0a, 0x0b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, - 0x6f, 0x6b, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x72, 0x0a, 0x17, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x29, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x73, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, - 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, - 0x6f, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, - 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, - 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, - 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x4e, 0x61, - 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, - 0x66, 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x65, 0x73, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x63, 0x68, 0x41, 0x73, 0x44, 0x42, 0x41, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x42, 0x41, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4c, 0x0a, 0x0b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x72, 0x0a, 0x17, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, + 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6e, 0x64, + 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x42, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, + 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, + 0x49, 0x6e, 0x66, 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, + 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, + 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, 0x0a, - 0x0d, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, - 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, - 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x46, - 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, - 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, - 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x73, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x50, 0x65, 0x72, - 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x5a, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, - 0x65, 0x73, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, - 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x54, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, + 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, + 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x47, 0x65, 0x74, + 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x20, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x65, 0x72, 0x6d, 0x69, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x65, 0x72, + 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, + 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x09, 0x47, - 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, + 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, + 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1b, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x45, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, - 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x69, 0x0a, 0x14, - 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, - 0x75, 0x6c, 0x65, 0x73, 0x12, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, - 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x45, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x53, 0x72, - 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, + 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x69, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, + 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, + 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, + 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x47, 0x65, + 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x21, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, + 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6c, 0x0a, 0x15, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x68, 0x72, 0x6f, 0x74, + 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x47, 0x65, + 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x20, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, - 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x5a, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x73, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, - 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6c, 0x0a, 0x15, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x0d, 0x47, 0x65, 0x74, - 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x57, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x73, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, - 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, - 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, - 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, - 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x5a, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, - 0x74, 0x68, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, - 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, - 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x47, - 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, - 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x56, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x49, 0x6e, 0x69, 0x74, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x22, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, 0x69, - 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x50, 0x69, 0x6e, 0x67, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x69, 0x0a, 0x14, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, - 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x26, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, - 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x69, 0x0a, 0x14, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x62, 0x75, - 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x52, 0x65, - 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, - 0x68, 0x12, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, - 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, - 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, - 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, - 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x25, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, - 0x0c, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1e, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, + 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, + 0x0a, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x12, 0x1c, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x47, 0x65, + 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, 0x68, 0x12, 0x21, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, + 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, + 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, + 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, + 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x69, 0x0a, 0x14, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x12, 0x51, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, + 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, + 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x6c, 0x0a, 0x15, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x63, 0x0a, 0x12, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, + 0x69, 0x6e, 0x64, 0x65, 0x78, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x72, 0x0a, 0x17, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, + 0x69, 0x6e, 0x64, 0x65, 0x78, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, + 0x12, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4c, 0x6f, 0x6f, + 0x6b, 0x75, 0x70, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x69, + 0x6e, 0x64, 0x65, 0x78, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x4d, 0x61, 0x74, + 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x23, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x65, 0x72, + 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x0d, 0x4d, + 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x1f, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x54, 0x0a, 0x0d, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, + 0x74, 0x65, 0x72, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x4d, 0x6f, 0x75, 0x6e, + 0x74, 0x55, 0x6e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x12, 0x21, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x55, 0x6e, 0x72, + 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, + 0x55, 0x6e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x09, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x53, 0x68, 0x6f, + 0x77, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x6f, + 0x75, 0x6e, 0x74, 0x53, 0x68, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, + 0x53, 0x68, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, + 0x0a, 0x09, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x1b, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x4c, 0x69, 0x73, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a, 0x10, 0x4d, 0x6f, 0x76, 0x65, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x22, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x73, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x63, 0x0a, 0x12, 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x24, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x6f, + 0x76, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x50, 0x69, + 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x69, 0x0a, 0x14, 0x50, 0x6c, 0x61, 0x6e, 0x6e, + 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, + 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x6c, 0x61, 0x6e, + 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x69, 0x0a, 0x14, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, 0x26, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, + 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, + 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, + 0x13, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, + 0x72, 0x61, 0x70, 0x68, 0x12, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, + 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x52, 0x65, 0x66, 0x72, + 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, + 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x66, 0x72, + 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, + 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, + 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, + 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x69, 0x0a, 0x14, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x26, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, + 0x0a, 0x11, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, - 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x52, - 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, - 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, - 0x0c, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x1e, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, - 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, - 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x63, 0x0a, 0x12, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, + 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, + 0x6f, 0x76, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, + 0x6f, 0x76, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x63, 0x0a, 0x12, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, + 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x52, 0x65, 0x6d, 0x6f, + 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x62, 0x0a, 0x11, 0x52, 0x65, - 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, - 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x74, - 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x57, - 0x0a, 0x0e, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x75, 0x6e, - 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, - 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7e, 0x0a, 0x1b, 0x53, 0x65, 0x74, 0x4b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, - 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, - 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x75, 0x0a, 0x18, 0x53, 0x65, 0x74, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x6e, 0x67, 0x12, 0x2a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, + 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, + 0x0d, 0x52, 0x65, 0x73, 0x68, 0x61, 0x72, 0x64, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x1f, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x62, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, + 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, + 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x69, 0x0a, 0x14, 0x52, 0x65, 0x74, 0x72, + 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x74, + 0x72, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7e, 0x0a, 0x1b, + 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62, + 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2d, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x75, 0x0a, 0x18, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, - 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x2b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6c, - 0x0a, 0x15, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, - 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1d, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x41, 0x64, 0x64, 0x12, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, 0x12, 0x25, 0x2e, 0x76, 0x74, + 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x12, 0x2a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, + 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, + 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x6c, 0x0a, 0x15, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x27, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, + 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x57, + 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x66, 0x0a, 0x13, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x12, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, + 0x12, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x78, 0x0a, 0x19, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2b, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, - 0x69, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x78, 0x0a, 0x19, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2b, 0x2e, 0x76, 0x74, 0x63, 0x74, - 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, - 0x12, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, - 0x6f, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x53, 0x6c, 0x65, 0x65, 0x70, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, - 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x60, 0x0a, 0x11, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x74, 0x63, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, + 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, + 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1d, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x12, 0x20, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7b, 0x0a, - 0x1a, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, - 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x12, 0x2c, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x76, 0x74, 0x63, 0x74, - 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, + 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x72, 0x74, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, + 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x7b, 0x0a, 0x1a, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x20, 0x2e, 0x76, + 0x12, 0x2c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x57, 0x0a, 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, - 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, - 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, - 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, - 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x45, 0x0a, 0x08, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x12, 0x1a, - 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x56, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x22, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x0d, 0x56, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x72, 0x0a, 0x17, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x29, 0x2e, 0x76, 0x74, 0x63, + 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x45, 0x0a, 0x08, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, + 0x0a, 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, + 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, + 0x0a, 0x0d, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, + 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x72, 0x0a, 0x17, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, + 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x69, 0x0a, 0x14, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x26, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, - 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x20, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x69, 0x0a, 0x14, 0x56, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x12, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x4e, 0x0a, 0x0b, 0x56, 0x44, 0x69, 0x66, 0x66, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x1d, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x4e, 0x0a, 0x0b, 0x56, 0x44, 0x69, 0x66, 0x66, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x1d, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x4e, 0x0a, 0x0b, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x12, 0x1d, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, + 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, + 0x65, 0x73, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x48, 0x0a, 0x09, 0x56, 0x44, 0x69, 0x66, 0x66, 0x53, 0x68, 0x6f, 0x77, 0x12, 0x1b, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x53, 0x68, + 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x53, 0x68, 0x6f, 0x77, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x09, 0x56, 0x44, 0x69, + 0x66, 0x66, 0x53, 0x74, 0x6f, 0x70, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x56, 0x44, 0x69, 0x66, 0x66, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x20, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6c, 0x0a, 0x15, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x53, 0x77, 0x69, 0x74, 0x63, 0x68, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x12, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x42, 0x2b, 0x5a, 0x29, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, - 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6c, 0x6f, 0x77, 0x53, 0x77, 0x69, 0x74, 0x63, 0x68, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x77, 0x69, 0x74, + 0x63, 0x68, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x2b, 0x5a, 0x29, + 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, + 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var file_vtctlservice_proto_goTypes = []interface{}{ @@ -562,168 +709,215 @@ var file_vtctlservice_proto_goTypes = []interface{}{ (*vtctldata.ApplyVSchemaRequest)(nil), // 6: vtctldata.ApplyVSchemaRequest (*vtctldata.BackupRequest)(nil), // 7: vtctldata.BackupRequest (*vtctldata.BackupShardRequest)(nil), // 8: vtctldata.BackupShardRequest - (*vtctldata.ChangeTabletTypeRequest)(nil), // 9: vtctldata.ChangeTabletTypeRequest - (*vtctldata.CreateKeyspaceRequest)(nil), // 10: vtctldata.CreateKeyspaceRequest - (*vtctldata.CreateShardRequest)(nil), // 11: vtctldata.CreateShardRequest - (*vtctldata.DeleteCellInfoRequest)(nil), // 12: vtctldata.DeleteCellInfoRequest - (*vtctldata.DeleteCellsAliasRequest)(nil), // 13: vtctldata.DeleteCellsAliasRequest - (*vtctldata.DeleteKeyspaceRequest)(nil), // 14: vtctldata.DeleteKeyspaceRequest - (*vtctldata.DeleteShardsRequest)(nil), // 15: vtctldata.DeleteShardsRequest - (*vtctldata.DeleteSrvVSchemaRequest)(nil), // 16: vtctldata.DeleteSrvVSchemaRequest - (*vtctldata.DeleteTabletsRequest)(nil), // 17: vtctldata.DeleteTabletsRequest - (*vtctldata.EmergencyReparentShardRequest)(nil), // 18: vtctldata.EmergencyReparentShardRequest - (*vtctldata.ExecuteFetchAsAppRequest)(nil), // 19: vtctldata.ExecuteFetchAsAppRequest - (*vtctldata.ExecuteFetchAsDBARequest)(nil), // 20: vtctldata.ExecuteFetchAsDBARequest - (*vtctldata.ExecuteHookRequest)(nil), // 21: vtctldata.ExecuteHookRequest - (*vtctldata.FindAllShardsInKeyspaceRequest)(nil), // 22: vtctldata.FindAllShardsInKeyspaceRequest - (*vtctldata.GetBackupsRequest)(nil), // 23: vtctldata.GetBackupsRequest - (*vtctldata.GetCellInfoRequest)(nil), // 24: vtctldata.GetCellInfoRequest - (*vtctldata.GetCellInfoNamesRequest)(nil), // 25: vtctldata.GetCellInfoNamesRequest - (*vtctldata.GetCellsAliasesRequest)(nil), // 26: vtctldata.GetCellsAliasesRequest - (*vtctldata.GetFullStatusRequest)(nil), // 27: vtctldata.GetFullStatusRequest - (*vtctldata.GetKeyspaceRequest)(nil), // 28: vtctldata.GetKeyspaceRequest - (*vtctldata.GetKeyspacesRequest)(nil), // 29: vtctldata.GetKeyspacesRequest - (*vtctldata.GetPermissionsRequest)(nil), // 30: vtctldata.GetPermissionsRequest - (*vtctldata.GetRoutingRulesRequest)(nil), // 31: vtctldata.GetRoutingRulesRequest - (*vtctldata.GetSchemaRequest)(nil), // 32: vtctldata.GetSchemaRequest - (*vtctldata.GetShardRequest)(nil), // 33: vtctldata.GetShardRequest - (*vtctldata.GetShardRoutingRulesRequest)(nil), // 34: vtctldata.GetShardRoutingRulesRequest - (*vtctldata.GetSrvKeyspaceNamesRequest)(nil), // 35: vtctldata.GetSrvKeyspaceNamesRequest - (*vtctldata.GetSrvKeyspacesRequest)(nil), // 36: vtctldata.GetSrvKeyspacesRequest - (*vtctldata.UpdateThrottlerConfigRequest)(nil), // 37: vtctldata.UpdateThrottlerConfigRequest - (*vtctldata.GetSrvVSchemaRequest)(nil), // 38: vtctldata.GetSrvVSchemaRequest - (*vtctldata.GetSrvVSchemasRequest)(nil), // 39: vtctldata.GetSrvVSchemasRequest - (*vtctldata.GetTabletRequest)(nil), // 40: vtctldata.GetTabletRequest - (*vtctldata.GetTabletsRequest)(nil), // 41: vtctldata.GetTabletsRequest - (*vtctldata.GetTopologyPathRequest)(nil), // 42: vtctldata.GetTopologyPathRequest - (*vtctldata.GetVersionRequest)(nil), // 43: vtctldata.GetVersionRequest - (*vtctldata.GetVSchemaRequest)(nil), // 44: vtctldata.GetVSchemaRequest - (*vtctldata.GetWorkflowsRequest)(nil), // 45: vtctldata.GetWorkflowsRequest - (*vtctldata.InitShardPrimaryRequest)(nil), // 46: vtctldata.InitShardPrimaryRequest - (*vtctldata.PingTabletRequest)(nil), // 47: vtctldata.PingTabletRequest - (*vtctldata.PlannedReparentShardRequest)(nil), // 48: vtctldata.PlannedReparentShardRequest - (*vtctldata.RebuildKeyspaceGraphRequest)(nil), // 49: vtctldata.RebuildKeyspaceGraphRequest - (*vtctldata.RebuildVSchemaGraphRequest)(nil), // 50: vtctldata.RebuildVSchemaGraphRequest - (*vtctldata.RefreshStateRequest)(nil), // 51: vtctldata.RefreshStateRequest - (*vtctldata.RefreshStateByShardRequest)(nil), // 52: vtctldata.RefreshStateByShardRequest - (*vtctldata.ReloadSchemaRequest)(nil), // 53: vtctldata.ReloadSchemaRequest - (*vtctldata.ReloadSchemaKeyspaceRequest)(nil), // 54: vtctldata.ReloadSchemaKeyspaceRequest - (*vtctldata.ReloadSchemaShardRequest)(nil), // 55: vtctldata.ReloadSchemaShardRequest - (*vtctldata.RemoveBackupRequest)(nil), // 56: vtctldata.RemoveBackupRequest - (*vtctldata.RemoveKeyspaceCellRequest)(nil), // 57: vtctldata.RemoveKeyspaceCellRequest - (*vtctldata.RemoveShardCellRequest)(nil), // 58: vtctldata.RemoveShardCellRequest - (*vtctldata.ReparentTabletRequest)(nil), // 59: vtctldata.ReparentTabletRequest - (*vtctldata.RestoreFromBackupRequest)(nil), // 60: vtctldata.RestoreFromBackupRequest - (*vtctldata.RunHealthCheckRequest)(nil), // 61: vtctldata.RunHealthCheckRequest - (*vtctldata.SetKeyspaceDurabilityPolicyRequest)(nil), // 62: vtctldata.SetKeyspaceDurabilityPolicyRequest - (*vtctldata.SetShardIsPrimaryServingRequest)(nil), // 63: vtctldata.SetShardIsPrimaryServingRequest - (*vtctldata.SetShardTabletControlRequest)(nil), // 64: vtctldata.SetShardTabletControlRequest - (*vtctldata.SetWritableRequest)(nil), // 65: vtctldata.SetWritableRequest - (*vtctldata.ShardReplicationAddRequest)(nil), // 66: vtctldata.ShardReplicationAddRequest - (*vtctldata.ShardReplicationFixRequest)(nil), // 67: vtctldata.ShardReplicationFixRequest - (*vtctldata.ShardReplicationPositionsRequest)(nil), // 68: vtctldata.ShardReplicationPositionsRequest - (*vtctldata.ShardReplicationRemoveRequest)(nil), // 69: vtctldata.ShardReplicationRemoveRequest - (*vtctldata.SleepTabletRequest)(nil), // 70: vtctldata.SleepTabletRequest - (*vtctldata.SourceShardAddRequest)(nil), // 71: vtctldata.SourceShardAddRequest - (*vtctldata.SourceShardDeleteRequest)(nil), // 72: vtctldata.SourceShardDeleteRequest - (*vtctldata.StartReplicationRequest)(nil), // 73: vtctldata.StartReplicationRequest - (*vtctldata.StopReplicationRequest)(nil), // 74: vtctldata.StopReplicationRequest - (*vtctldata.TabletExternallyReparentedRequest)(nil), // 75: vtctldata.TabletExternallyReparentedRequest - (*vtctldata.UpdateCellInfoRequest)(nil), // 76: vtctldata.UpdateCellInfoRequest - (*vtctldata.UpdateCellsAliasRequest)(nil), // 77: vtctldata.UpdateCellsAliasRequest - (*vtctldata.ValidateRequest)(nil), // 78: vtctldata.ValidateRequest - (*vtctldata.ValidateKeyspaceRequest)(nil), // 79: vtctldata.ValidateKeyspaceRequest - (*vtctldata.ValidateSchemaKeyspaceRequest)(nil), // 80: vtctldata.ValidateSchemaKeyspaceRequest - (*vtctldata.ValidateShardRequest)(nil), // 81: vtctldata.ValidateShardRequest - (*vtctldata.ValidateVersionKeyspaceRequest)(nil), // 82: vtctldata.ValidateVersionKeyspaceRequest - (*vtctldata.ValidateVersionShardRequest)(nil), // 83: vtctldata.ValidateVersionShardRequest - (*vtctldata.ValidateVSchemaRequest)(nil), // 84: vtctldata.ValidateVSchemaRequest - (*vtctldata.WorkflowUpdateRequest)(nil), // 85: vtctldata.WorkflowUpdateRequest - (*vtctldata.ExecuteVtctlCommandResponse)(nil), // 86: vtctldata.ExecuteVtctlCommandResponse - (*vtctldata.AddCellInfoResponse)(nil), // 87: vtctldata.AddCellInfoResponse - (*vtctldata.AddCellsAliasResponse)(nil), // 88: vtctldata.AddCellsAliasResponse - (*vtctldata.ApplyRoutingRulesResponse)(nil), // 89: vtctldata.ApplyRoutingRulesResponse - (*vtctldata.ApplySchemaResponse)(nil), // 90: vtctldata.ApplySchemaResponse - (*vtctldata.ApplyShardRoutingRulesResponse)(nil), // 91: vtctldata.ApplyShardRoutingRulesResponse - (*vtctldata.ApplyVSchemaResponse)(nil), // 92: vtctldata.ApplyVSchemaResponse - (*vtctldata.BackupResponse)(nil), // 93: vtctldata.BackupResponse - (*vtctldata.ChangeTabletTypeResponse)(nil), // 94: vtctldata.ChangeTabletTypeResponse - (*vtctldata.CreateKeyspaceResponse)(nil), // 95: vtctldata.CreateKeyspaceResponse - (*vtctldata.CreateShardResponse)(nil), // 96: vtctldata.CreateShardResponse - (*vtctldata.DeleteCellInfoResponse)(nil), // 97: vtctldata.DeleteCellInfoResponse - (*vtctldata.DeleteCellsAliasResponse)(nil), // 98: vtctldata.DeleteCellsAliasResponse - (*vtctldata.DeleteKeyspaceResponse)(nil), // 99: vtctldata.DeleteKeyspaceResponse - (*vtctldata.DeleteShardsResponse)(nil), // 100: vtctldata.DeleteShardsResponse - (*vtctldata.DeleteSrvVSchemaResponse)(nil), // 101: vtctldata.DeleteSrvVSchemaResponse - (*vtctldata.DeleteTabletsResponse)(nil), // 102: vtctldata.DeleteTabletsResponse - (*vtctldata.EmergencyReparentShardResponse)(nil), // 103: vtctldata.EmergencyReparentShardResponse - (*vtctldata.ExecuteFetchAsAppResponse)(nil), // 104: vtctldata.ExecuteFetchAsAppResponse - (*vtctldata.ExecuteFetchAsDBAResponse)(nil), // 105: vtctldata.ExecuteFetchAsDBAResponse - (*vtctldata.ExecuteHookResponse)(nil), // 106: vtctldata.ExecuteHookResponse - (*vtctldata.FindAllShardsInKeyspaceResponse)(nil), // 107: vtctldata.FindAllShardsInKeyspaceResponse - (*vtctldata.GetBackupsResponse)(nil), // 108: vtctldata.GetBackupsResponse - (*vtctldata.GetCellInfoResponse)(nil), // 109: vtctldata.GetCellInfoResponse - (*vtctldata.GetCellInfoNamesResponse)(nil), // 110: vtctldata.GetCellInfoNamesResponse - (*vtctldata.GetCellsAliasesResponse)(nil), // 111: vtctldata.GetCellsAliasesResponse - (*vtctldata.GetFullStatusResponse)(nil), // 112: vtctldata.GetFullStatusResponse - (*vtctldata.GetKeyspaceResponse)(nil), // 113: vtctldata.GetKeyspaceResponse - (*vtctldata.GetKeyspacesResponse)(nil), // 114: vtctldata.GetKeyspacesResponse - (*vtctldata.GetPermissionsResponse)(nil), // 115: vtctldata.GetPermissionsResponse - (*vtctldata.GetRoutingRulesResponse)(nil), // 116: vtctldata.GetRoutingRulesResponse - (*vtctldata.GetSchemaResponse)(nil), // 117: vtctldata.GetSchemaResponse - (*vtctldata.GetShardResponse)(nil), // 118: vtctldata.GetShardResponse - (*vtctldata.GetShardRoutingRulesResponse)(nil), // 119: vtctldata.GetShardRoutingRulesResponse - (*vtctldata.GetSrvKeyspaceNamesResponse)(nil), // 120: vtctldata.GetSrvKeyspaceNamesResponse - (*vtctldata.GetSrvKeyspacesResponse)(nil), // 121: vtctldata.GetSrvKeyspacesResponse - (*vtctldata.UpdateThrottlerConfigResponse)(nil), // 122: vtctldata.UpdateThrottlerConfigResponse - (*vtctldata.GetSrvVSchemaResponse)(nil), // 123: vtctldata.GetSrvVSchemaResponse - (*vtctldata.GetSrvVSchemasResponse)(nil), // 124: vtctldata.GetSrvVSchemasResponse - (*vtctldata.GetTabletResponse)(nil), // 125: vtctldata.GetTabletResponse - (*vtctldata.GetTabletsResponse)(nil), // 126: vtctldata.GetTabletsResponse - (*vtctldata.GetTopologyPathResponse)(nil), // 127: vtctldata.GetTopologyPathResponse - (*vtctldata.GetVersionResponse)(nil), // 128: vtctldata.GetVersionResponse - (*vtctldata.GetVSchemaResponse)(nil), // 129: vtctldata.GetVSchemaResponse - (*vtctldata.GetWorkflowsResponse)(nil), // 130: vtctldata.GetWorkflowsResponse - (*vtctldata.InitShardPrimaryResponse)(nil), // 131: vtctldata.InitShardPrimaryResponse - (*vtctldata.PingTabletResponse)(nil), // 132: vtctldata.PingTabletResponse - (*vtctldata.PlannedReparentShardResponse)(nil), // 133: vtctldata.PlannedReparentShardResponse - (*vtctldata.RebuildKeyspaceGraphResponse)(nil), // 134: vtctldata.RebuildKeyspaceGraphResponse - (*vtctldata.RebuildVSchemaGraphResponse)(nil), // 135: vtctldata.RebuildVSchemaGraphResponse - (*vtctldata.RefreshStateResponse)(nil), // 136: vtctldata.RefreshStateResponse - (*vtctldata.RefreshStateByShardResponse)(nil), // 137: vtctldata.RefreshStateByShardResponse - (*vtctldata.ReloadSchemaResponse)(nil), // 138: vtctldata.ReloadSchemaResponse - (*vtctldata.ReloadSchemaKeyspaceResponse)(nil), // 139: vtctldata.ReloadSchemaKeyspaceResponse - (*vtctldata.ReloadSchemaShardResponse)(nil), // 140: vtctldata.ReloadSchemaShardResponse - (*vtctldata.RemoveBackupResponse)(nil), // 141: vtctldata.RemoveBackupResponse - (*vtctldata.RemoveKeyspaceCellResponse)(nil), // 142: vtctldata.RemoveKeyspaceCellResponse - (*vtctldata.RemoveShardCellResponse)(nil), // 143: vtctldata.RemoveShardCellResponse - (*vtctldata.ReparentTabletResponse)(nil), // 144: vtctldata.ReparentTabletResponse - (*vtctldata.RestoreFromBackupResponse)(nil), // 145: vtctldata.RestoreFromBackupResponse - (*vtctldata.RunHealthCheckResponse)(nil), // 146: vtctldata.RunHealthCheckResponse - (*vtctldata.SetKeyspaceDurabilityPolicyResponse)(nil), // 147: vtctldata.SetKeyspaceDurabilityPolicyResponse - (*vtctldata.SetShardIsPrimaryServingResponse)(nil), // 148: vtctldata.SetShardIsPrimaryServingResponse - (*vtctldata.SetShardTabletControlResponse)(nil), // 149: vtctldata.SetShardTabletControlResponse - (*vtctldata.SetWritableResponse)(nil), // 150: vtctldata.SetWritableResponse - (*vtctldata.ShardReplicationAddResponse)(nil), // 151: vtctldata.ShardReplicationAddResponse - (*vtctldata.ShardReplicationFixResponse)(nil), // 152: vtctldata.ShardReplicationFixResponse - (*vtctldata.ShardReplicationPositionsResponse)(nil), // 153: vtctldata.ShardReplicationPositionsResponse - (*vtctldata.ShardReplicationRemoveResponse)(nil), // 154: vtctldata.ShardReplicationRemoveResponse - (*vtctldata.SleepTabletResponse)(nil), // 155: vtctldata.SleepTabletResponse - (*vtctldata.SourceShardAddResponse)(nil), // 156: vtctldata.SourceShardAddResponse - (*vtctldata.SourceShardDeleteResponse)(nil), // 157: vtctldata.SourceShardDeleteResponse - (*vtctldata.StartReplicationResponse)(nil), // 158: vtctldata.StartReplicationResponse - (*vtctldata.StopReplicationResponse)(nil), // 159: vtctldata.StopReplicationResponse - (*vtctldata.TabletExternallyReparentedResponse)(nil), // 160: vtctldata.TabletExternallyReparentedResponse - (*vtctldata.UpdateCellInfoResponse)(nil), // 161: vtctldata.UpdateCellInfoResponse - (*vtctldata.UpdateCellsAliasResponse)(nil), // 162: vtctldata.UpdateCellsAliasResponse - (*vtctldata.ValidateResponse)(nil), // 163: vtctldata.ValidateResponse - (*vtctldata.ValidateKeyspaceResponse)(nil), // 164: vtctldata.ValidateKeyspaceResponse - (*vtctldata.ValidateSchemaKeyspaceResponse)(nil), // 165: vtctldata.ValidateSchemaKeyspaceResponse - (*vtctldata.ValidateShardResponse)(nil), // 166: vtctldata.ValidateShardResponse - (*vtctldata.ValidateVersionKeyspaceResponse)(nil), // 167: vtctldata.ValidateVersionKeyspaceResponse - (*vtctldata.ValidateVersionShardResponse)(nil), // 168: vtctldata.ValidateVersionShardResponse - (*vtctldata.ValidateVSchemaResponse)(nil), // 169: vtctldata.ValidateVSchemaResponse - (*vtctldata.WorkflowUpdateResponse)(nil), // 170: vtctldata.WorkflowUpdateResponse + (*vtctldata.CancelSchemaMigrationRequest)(nil), // 9: vtctldata.CancelSchemaMigrationRequest + (*vtctldata.ChangeTabletTypeRequest)(nil), // 10: vtctldata.ChangeTabletTypeRequest + (*vtctldata.CleanupSchemaMigrationRequest)(nil), // 11: vtctldata.CleanupSchemaMigrationRequest + (*vtctldata.CompleteSchemaMigrationRequest)(nil), // 12: vtctldata.CompleteSchemaMigrationRequest + (*vtctldata.CreateKeyspaceRequest)(nil), // 13: vtctldata.CreateKeyspaceRequest + (*vtctldata.CreateShardRequest)(nil), // 14: vtctldata.CreateShardRequest + (*vtctldata.DeleteCellInfoRequest)(nil), // 15: vtctldata.DeleteCellInfoRequest + (*vtctldata.DeleteCellsAliasRequest)(nil), // 16: vtctldata.DeleteCellsAliasRequest + (*vtctldata.DeleteKeyspaceRequest)(nil), // 17: vtctldata.DeleteKeyspaceRequest + (*vtctldata.DeleteShardsRequest)(nil), // 18: vtctldata.DeleteShardsRequest + (*vtctldata.DeleteSrvVSchemaRequest)(nil), // 19: vtctldata.DeleteSrvVSchemaRequest + (*vtctldata.DeleteTabletsRequest)(nil), // 20: vtctldata.DeleteTabletsRequest + (*vtctldata.EmergencyReparentShardRequest)(nil), // 21: vtctldata.EmergencyReparentShardRequest + (*vtctldata.ExecuteFetchAsAppRequest)(nil), // 22: vtctldata.ExecuteFetchAsAppRequest + (*vtctldata.ExecuteFetchAsDBARequest)(nil), // 23: vtctldata.ExecuteFetchAsDBARequest + (*vtctldata.ExecuteHookRequest)(nil), // 24: vtctldata.ExecuteHookRequest + (*vtctldata.FindAllShardsInKeyspaceRequest)(nil), // 25: vtctldata.FindAllShardsInKeyspaceRequest + (*vtctldata.GetBackupsRequest)(nil), // 26: vtctldata.GetBackupsRequest + (*vtctldata.GetCellInfoRequest)(nil), // 27: vtctldata.GetCellInfoRequest + (*vtctldata.GetCellInfoNamesRequest)(nil), // 28: vtctldata.GetCellInfoNamesRequest + (*vtctldata.GetCellsAliasesRequest)(nil), // 29: vtctldata.GetCellsAliasesRequest + (*vtctldata.GetFullStatusRequest)(nil), // 30: vtctldata.GetFullStatusRequest + (*vtctldata.GetKeyspaceRequest)(nil), // 31: vtctldata.GetKeyspaceRequest + (*vtctldata.GetKeyspacesRequest)(nil), // 32: vtctldata.GetKeyspacesRequest + (*vtctldata.GetPermissionsRequest)(nil), // 33: vtctldata.GetPermissionsRequest + (*vtctldata.GetRoutingRulesRequest)(nil), // 34: vtctldata.GetRoutingRulesRequest + (*vtctldata.GetSchemaRequest)(nil), // 35: vtctldata.GetSchemaRequest + (*vtctldata.GetSchemaMigrationsRequest)(nil), // 36: vtctldata.GetSchemaMigrationsRequest + (*vtctldata.GetShardRequest)(nil), // 37: vtctldata.GetShardRequest + (*vtctldata.GetShardRoutingRulesRequest)(nil), // 38: vtctldata.GetShardRoutingRulesRequest + (*vtctldata.GetSrvKeyspaceNamesRequest)(nil), // 39: vtctldata.GetSrvKeyspaceNamesRequest + (*vtctldata.GetSrvKeyspacesRequest)(nil), // 40: vtctldata.GetSrvKeyspacesRequest + (*vtctldata.UpdateThrottlerConfigRequest)(nil), // 41: vtctldata.UpdateThrottlerConfigRequest + (*vtctldata.GetSrvVSchemaRequest)(nil), // 42: vtctldata.GetSrvVSchemaRequest + (*vtctldata.GetSrvVSchemasRequest)(nil), // 43: vtctldata.GetSrvVSchemasRequest + (*vtctldata.GetTabletRequest)(nil), // 44: vtctldata.GetTabletRequest + (*vtctldata.GetTabletsRequest)(nil), // 45: vtctldata.GetTabletsRequest + (*vtctldata.GetTopologyPathRequest)(nil), // 46: vtctldata.GetTopologyPathRequest + (*vtctldata.GetVersionRequest)(nil), // 47: vtctldata.GetVersionRequest + (*vtctldata.GetVSchemaRequest)(nil), // 48: vtctldata.GetVSchemaRequest + (*vtctldata.GetWorkflowsRequest)(nil), // 49: vtctldata.GetWorkflowsRequest + (*vtctldata.InitShardPrimaryRequest)(nil), // 50: vtctldata.InitShardPrimaryRequest + (*vtctldata.LaunchSchemaMigrationRequest)(nil), // 51: vtctldata.LaunchSchemaMigrationRequest + (*vtctldata.LookupVindexCreateRequest)(nil), // 52: vtctldata.LookupVindexCreateRequest + (*vtctldata.LookupVindexExternalizeRequest)(nil), // 53: vtctldata.LookupVindexExternalizeRequest + (*vtctldata.MaterializeCreateRequest)(nil), // 54: vtctldata.MaterializeCreateRequest + (*vtctldata.MigrateCreateRequest)(nil), // 55: vtctldata.MigrateCreateRequest + (*vtctldata.MountRegisterRequest)(nil), // 56: vtctldata.MountRegisterRequest + (*vtctldata.MountUnregisterRequest)(nil), // 57: vtctldata.MountUnregisterRequest + (*vtctldata.MountShowRequest)(nil), // 58: vtctldata.MountShowRequest + (*vtctldata.MountListRequest)(nil), // 59: vtctldata.MountListRequest + (*vtctldata.MoveTablesCreateRequest)(nil), // 60: vtctldata.MoveTablesCreateRequest + (*vtctldata.MoveTablesCompleteRequest)(nil), // 61: vtctldata.MoveTablesCompleteRequest + (*vtctldata.PingTabletRequest)(nil), // 62: vtctldata.PingTabletRequest + (*vtctldata.PlannedReparentShardRequest)(nil), // 63: vtctldata.PlannedReparentShardRequest + (*vtctldata.RebuildKeyspaceGraphRequest)(nil), // 64: vtctldata.RebuildKeyspaceGraphRequest + (*vtctldata.RebuildVSchemaGraphRequest)(nil), // 65: vtctldata.RebuildVSchemaGraphRequest + (*vtctldata.RefreshStateRequest)(nil), // 66: vtctldata.RefreshStateRequest + (*vtctldata.RefreshStateByShardRequest)(nil), // 67: vtctldata.RefreshStateByShardRequest + (*vtctldata.ReloadSchemaRequest)(nil), // 68: vtctldata.ReloadSchemaRequest + (*vtctldata.ReloadSchemaKeyspaceRequest)(nil), // 69: vtctldata.ReloadSchemaKeyspaceRequest + (*vtctldata.ReloadSchemaShardRequest)(nil), // 70: vtctldata.ReloadSchemaShardRequest + (*vtctldata.RemoveBackupRequest)(nil), // 71: vtctldata.RemoveBackupRequest + (*vtctldata.RemoveKeyspaceCellRequest)(nil), // 72: vtctldata.RemoveKeyspaceCellRequest + (*vtctldata.RemoveShardCellRequest)(nil), // 73: vtctldata.RemoveShardCellRequest + (*vtctldata.ReparentTabletRequest)(nil), // 74: vtctldata.ReparentTabletRequest + (*vtctldata.ReshardCreateRequest)(nil), // 75: vtctldata.ReshardCreateRequest + (*vtctldata.RestoreFromBackupRequest)(nil), // 76: vtctldata.RestoreFromBackupRequest + (*vtctldata.RetrySchemaMigrationRequest)(nil), // 77: vtctldata.RetrySchemaMigrationRequest + (*vtctldata.RunHealthCheckRequest)(nil), // 78: vtctldata.RunHealthCheckRequest + (*vtctldata.SetKeyspaceDurabilityPolicyRequest)(nil), // 79: vtctldata.SetKeyspaceDurabilityPolicyRequest + (*vtctldata.SetShardIsPrimaryServingRequest)(nil), // 80: vtctldata.SetShardIsPrimaryServingRequest + (*vtctldata.SetShardTabletControlRequest)(nil), // 81: vtctldata.SetShardTabletControlRequest + (*vtctldata.SetWritableRequest)(nil), // 82: vtctldata.SetWritableRequest + (*vtctldata.ShardReplicationAddRequest)(nil), // 83: vtctldata.ShardReplicationAddRequest + (*vtctldata.ShardReplicationFixRequest)(nil), // 84: vtctldata.ShardReplicationFixRequest + (*vtctldata.ShardReplicationPositionsRequest)(nil), // 85: vtctldata.ShardReplicationPositionsRequest + (*vtctldata.ShardReplicationRemoveRequest)(nil), // 86: vtctldata.ShardReplicationRemoveRequest + (*vtctldata.SleepTabletRequest)(nil), // 87: vtctldata.SleepTabletRequest + (*vtctldata.SourceShardAddRequest)(nil), // 88: vtctldata.SourceShardAddRequest + (*vtctldata.SourceShardDeleteRequest)(nil), // 89: vtctldata.SourceShardDeleteRequest + (*vtctldata.StartReplicationRequest)(nil), // 90: vtctldata.StartReplicationRequest + (*vtctldata.StopReplicationRequest)(nil), // 91: vtctldata.StopReplicationRequest + (*vtctldata.TabletExternallyReparentedRequest)(nil), // 92: vtctldata.TabletExternallyReparentedRequest + (*vtctldata.UpdateCellInfoRequest)(nil), // 93: vtctldata.UpdateCellInfoRequest + (*vtctldata.UpdateCellsAliasRequest)(nil), // 94: vtctldata.UpdateCellsAliasRequest + (*vtctldata.ValidateRequest)(nil), // 95: vtctldata.ValidateRequest + (*vtctldata.ValidateKeyspaceRequest)(nil), // 96: vtctldata.ValidateKeyspaceRequest + (*vtctldata.ValidateSchemaKeyspaceRequest)(nil), // 97: vtctldata.ValidateSchemaKeyspaceRequest + (*vtctldata.ValidateShardRequest)(nil), // 98: vtctldata.ValidateShardRequest + (*vtctldata.ValidateVersionKeyspaceRequest)(nil), // 99: vtctldata.ValidateVersionKeyspaceRequest + (*vtctldata.ValidateVersionShardRequest)(nil), // 100: vtctldata.ValidateVersionShardRequest + (*vtctldata.ValidateVSchemaRequest)(nil), // 101: vtctldata.ValidateVSchemaRequest + (*vtctldata.VDiffCreateRequest)(nil), // 102: vtctldata.VDiffCreateRequest + (*vtctldata.VDiffDeleteRequest)(nil), // 103: vtctldata.VDiffDeleteRequest + (*vtctldata.VDiffResumeRequest)(nil), // 104: vtctldata.VDiffResumeRequest + (*vtctldata.VDiffShowRequest)(nil), // 105: vtctldata.VDiffShowRequest + (*vtctldata.VDiffStopRequest)(nil), // 106: vtctldata.VDiffStopRequest + (*vtctldata.WorkflowDeleteRequest)(nil), // 107: vtctldata.WorkflowDeleteRequest + (*vtctldata.WorkflowStatusRequest)(nil), // 108: vtctldata.WorkflowStatusRequest + (*vtctldata.WorkflowSwitchTrafficRequest)(nil), // 109: vtctldata.WorkflowSwitchTrafficRequest + (*vtctldata.WorkflowUpdateRequest)(nil), // 110: vtctldata.WorkflowUpdateRequest + (*vtctldata.ExecuteVtctlCommandResponse)(nil), // 111: vtctldata.ExecuteVtctlCommandResponse + (*vtctldata.AddCellInfoResponse)(nil), // 112: vtctldata.AddCellInfoResponse + (*vtctldata.AddCellsAliasResponse)(nil), // 113: vtctldata.AddCellsAliasResponse + (*vtctldata.ApplyRoutingRulesResponse)(nil), // 114: vtctldata.ApplyRoutingRulesResponse + (*vtctldata.ApplySchemaResponse)(nil), // 115: vtctldata.ApplySchemaResponse + (*vtctldata.ApplyShardRoutingRulesResponse)(nil), // 116: vtctldata.ApplyShardRoutingRulesResponse + (*vtctldata.ApplyVSchemaResponse)(nil), // 117: vtctldata.ApplyVSchemaResponse + (*vtctldata.BackupResponse)(nil), // 118: vtctldata.BackupResponse + (*vtctldata.CancelSchemaMigrationResponse)(nil), // 119: vtctldata.CancelSchemaMigrationResponse + (*vtctldata.ChangeTabletTypeResponse)(nil), // 120: vtctldata.ChangeTabletTypeResponse + (*vtctldata.CleanupSchemaMigrationResponse)(nil), // 121: vtctldata.CleanupSchemaMigrationResponse + (*vtctldata.CompleteSchemaMigrationResponse)(nil), // 122: vtctldata.CompleteSchemaMigrationResponse + (*vtctldata.CreateKeyspaceResponse)(nil), // 123: vtctldata.CreateKeyspaceResponse + (*vtctldata.CreateShardResponse)(nil), // 124: vtctldata.CreateShardResponse + (*vtctldata.DeleteCellInfoResponse)(nil), // 125: vtctldata.DeleteCellInfoResponse + (*vtctldata.DeleteCellsAliasResponse)(nil), // 126: vtctldata.DeleteCellsAliasResponse + (*vtctldata.DeleteKeyspaceResponse)(nil), // 127: vtctldata.DeleteKeyspaceResponse + (*vtctldata.DeleteShardsResponse)(nil), // 128: vtctldata.DeleteShardsResponse + (*vtctldata.DeleteSrvVSchemaResponse)(nil), // 129: vtctldata.DeleteSrvVSchemaResponse + (*vtctldata.DeleteTabletsResponse)(nil), // 130: vtctldata.DeleteTabletsResponse + (*vtctldata.EmergencyReparentShardResponse)(nil), // 131: vtctldata.EmergencyReparentShardResponse + (*vtctldata.ExecuteFetchAsAppResponse)(nil), // 132: vtctldata.ExecuteFetchAsAppResponse + (*vtctldata.ExecuteFetchAsDBAResponse)(nil), // 133: vtctldata.ExecuteFetchAsDBAResponse + (*vtctldata.ExecuteHookResponse)(nil), // 134: vtctldata.ExecuteHookResponse + (*vtctldata.FindAllShardsInKeyspaceResponse)(nil), // 135: vtctldata.FindAllShardsInKeyspaceResponse + (*vtctldata.GetBackupsResponse)(nil), // 136: vtctldata.GetBackupsResponse + (*vtctldata.GetCellInfoResponse)(nil), // 137: vtctldata.GetCellInfoResponse + (*vtctldata.GetCellInfoNamesResponse)(nil), // 138: vtctldata.GetCellInfoNamesResponse + (*vtctldata.GetCellsAliasesResponse)(nil), // 139: vtctldata.GetCellsAliasesResponse + (*vtctldata.GetFullStatusResponse)(nil), // 140: vtctldata.GetFullStatusResponse + (*vtctldata.GetKeyspaceResponse)(nil), // 141: vtctldata.GetKeyspaceResponse + (*vtctldata.GetKeyspacesResponse)(nil), // 142: vtctldata.GetKeyspacesResponse + (*vtctldata.GetPermissionsResponse)(nil), // 143: vtctldata.GetPermissionsResponse + (*vtctldata.GetRoutingRulesResponse)(nil), // 144: vtctldata.GetRoutingRulesResponse + (*vtctldata.GetSchemaResponse)(nil), // 145: vtctldata.GetSchemaResponse + (*vtctldata.GetSchemaMigrationsResponse)(nil), // 146: vtctldata.GetSchemaMigrationsResponse + (*vtctldata.GetShardResponse)(nil), // 147: vtctldata.GetShardResponse + (*vtctldata.GetShardRoutingRulesResponse)(nil), // 148: vtctldata.GetShardRoutingRulesResponse + (*vtctldata.GetSrvKeyspaceNamesResponse)(nil), // 149: vtctldata.GetSrvKeyspaceNamesResponse + (*vtctldata.GetSrvKeyspacesResponse)(nil), // 150: vtctldata.GetSrvKeyspacesResponse + (*vtctldata.UpdateThrottlerConfigResponse)(nil), // 151: vtctldata.UpdateThrottlerConfigResponse + (*vtctldata.GetSrvVSchemaResponse)(nil), // 152: vtctldata.GetSrvVSchemaResponse + (*vtctldata.GetSrvVSchemasResponse)(nil), // 153: vtctldata.GetSrvVSchemasResponse + (*vtctldata.GetTabletResponse)(nil), // 154: vtctldata.GetTabletResponse + (*vtctldata.GetTabletsResponse)(nil), // 155: vtctldata.GetTabletsResponse + (*vtctldata.GetTopologyPathResponse)(nil), // 156: vtctldata.GetTopologyPathResponse + (*vtctldata.GetVersionResponse)(nil), // 157: vtctldata.GetVersionResponse + (*vtctldata.GetVSchemaResponse)(nil), // 158: vtctldata.GetVSchemaResponse + (*vtctldata.GetWorkflowsResponse)(nil), // 159: vtctldata.GetWorkflowsResponse + (*vtctldata.InitShardPrimaryResponse)(nil), // 160: vtctldata.InitShardPrimaryResponse + (*vtctldata.LaunchSchemaMigrationResponse)(nil), // 161: vtctldata.LaunchSchemaMigrationResponse + (*vtctldata.LookupVindexCreateResponse)(nil), // 162: vtctldata.LookupVindexCreateResponse + (*vtctldata.LookupVindexExternalizeResponse)(nil), // 163: vtctldata.LookupVindexExternalizeResponse + (*vtctldata.MaterializeCreateResponse)(nil), // 164: vtctldata.MaterializeCreateResponse + (*vtctldata.WorkflowStatusResponse)(nil), // 165: vtctldata.WorkflowStatusResponse + (*vtctldata.MountRegisterResponse)(nil), // 166: vtctldata.MountRegisterResponse + (*vtctldata.MountUnregisterResponse)(nil), // 167: vtctldata.MountUnregisterResponse + (*vtctldata.MountShowResponse)(nil), // 168: vtctldata.MountShowResponse + (*vtctldata.MountListResponse)(nil), // 169: vtctldata.MountListResponse + (*vtctldata.MoveTablesCompleteResponse)(nil), // 170: vtctldata.MoveTablesCompleteResponse + (*vtctldata.PingTabletResponse)(nil), // 171: vtctldata.PingTabletResponse + (*vtctldata.PlannedReparentShardResponse)(nil), // 172: vtctldata.PlannedReparentShardResponse + (*vtctldata.RebuildKeyspaceGraphResponse)(nil), // 173: vtctldata.RebuildKeyspaceGraphResponse + (*vtctldata.RebuildVSchemaGraphResponse)(nil), // 174: vtctldata.RebuildVSchemaGraphResponse + (*vtctldata.RefreshStateResponse)(nil), // 175: vtctldata.RefreshStateResponse + (*vtctldata.RefreshStateByShardResponse)(nil), // 176: vtctldata.RefreshStateByShardResponse + (*vtctldata.ReloadSchemaResponse)(nil), // 177: vtctldata.ReloadSchemaResponse + (*vtctldata.ReloadSchemaKeyspaceResponse)(nil), // 178: vtctldata.ReloadSchemaKeyspaceResponse + (*vtctldata.ReloadSchemaShardResponse)(nil), // 179: vtctldata.ReloadSchemaShardResponse + (*vtctldata.RemoveBackupResponse)(nil), // 180: vtctldata.RemoveBackupResponse + (*vtctldata.RemoveKeyspaceCellResponse)(nil), // 181: vtctldata.RemoveKeyspaceCellResponse + (*vtctldata.RemoveShardCellResponse)(nil), // 182: vtctldata.RemoveShardCellResponse + (*vtctldata.ReparentTabletResponse)(nil), // 183: vtctldata.ReparentTabletResponse + (*vtctldata.RestoreFromBackupResponse)(nil), // 184: vtctldata.RestoreFromBackupResponse + (*vtctldata.RetrySchemaMigrationResponse)(nil), // 185: vtctldata.RetrySchemaMigrationResponse + (*vtctldata.RunHealthCheckResponse)(nil), // 186: vtctldata.RunHealthCheckResponse + (*vtctldata.SetKeyspaceDurabilityPolicyResponse)(nil), // 187: vtctldata.SetKeyspaceDurabilityPolicyResponse + (*vtctldata.SetShardIsPrimaryServingResponse)(nil), // 188: vtctldata.SetShardIsPrimaryServingResponse + (*vtctldata.SetShardTabletControlResponse)(nil), // 189: vtctldata.SetShardTabletControlResponse + (*vtctldata.SetWritableResponse)(nil), // 190: vtctldata.SetWritableResponse + (*vtctldata.ShardReplicationAddResponse)(nil), // 191: vtctldata.ShardReplicationAddResponse + (*vtctldata.ShardReplicationFixResponse)(nil), // 192: vtctldata.ShardReplicationFixResponse + (*vtctldata.ShardReplicationPositionsResponse)(nil), // 193: vtctldata.ShardReplicationPositionsResponse + (*vtctldata.ShardReplicationRemoveResponse)(nil), // 194: vtctldata.ShardReplicationRemoveResponse + (*vtctldata.SleepTabletResponse)(nil), // 195: vtctldata.SleepTabletResponse + (*vtctldata.SourceShardAddResponse)(nil), // 196: vtctldata.SourceShardAddResponse + (*vtctldata.SourceShardDeleteResponse)(nil), // 197: vtctldata.SourceShardDeleteResponse + (*vtctldata.StartReplicationResponse)(nil), // 198: vtctldata.StartReplicationResponse + (*vtctldata.StopReplicationResponse)(nil), // 199: vtctldata.StopReplicationResponse + (*vtctldata.TabletExternallyReparentedResponse)(nil), // 200: vtctldata.TabletExternallyReparentedResponse + (*vtctldata.UpdateCellInfoResponse)(nil), // 201: vtctldata.UpdateCellInfoResponse + (*vtctldata.UpdateCellsAliasResponse)(nil), // 202: vtctldata.UpdateCellsAliasResponse + (*vtctldata.ValidateResponse)(nil), // 203: vtctldata.ValidateResponse + (*vtctldata.ValidateKeyspaceResponse)(nil), // 204: vtctldata.ValidateKeyspaceResponse + (*vtctldata.ValidateSchemaKeyspaceResponse)(nil), // 205: vtctldata.ValidateSchemaKeyspaceResponse + (*vtctldata.ValidateShardResponse)(nil), // 206: vtctldata.ValidateShardResponse + (*vtctldata.ValidateVersionKeyspaceResponse)(nil), // 207: vtctldata.ValidateVersionKeyspaceResponse + (*vtctldata.ValidateVersionShardResponse)(nil), // 208: vtctldata.ValidateVersionShardResponse + (*vtctldata.ValidateVSchemaResponse)(nil), // 209: vtctldata.ValidateVSchemaResponse + (*vtctldata.VDiffCreateResponse)(nil), // 210: vtctldata.VDiffCreateResponse + (*vtctldata.VDiffDeleteResponse)(nil), // 211: vtctldata.VDiffDeleteResponse + (*vtctldata.VDiffResumeResponse)(nil), // 212: vtctldata.VDiffResumeResponse + (*vtctldata.VDiffShowResponse)(nil), // 213: vtctldata.VDiffShowResponse + (*vtctldata.VDiffStopResponse)(nil), // 214: vtctldata.VDiffStopResponse + (*vtctldata.WorkflowDeleteResponse)(nil), // 215: vtctldata.WorkflowDeleteResponse + (*vtctldata.WorkflowSwitchTrafficResponse)(nil), // 216: vtctldata.WorkflowSwitchTrafficResponse + (*vtctldata.WorkflowUpdateResponse)(nil), // 217: vtctldata.WorkflowUpdateResponse } var file_vtctlservice_proto_depIdxs = []int32{ 0, // 0: vtctlservice.Vtctl.ExecuteVtctlCommand:input_type -> vtctldata.ExecuteVtctlCommandRequest @@ -735,171 +929,221 @@ var file_vtctlservice_proto_depIdxs = []int32{ 6, // 6: vtctlservice.Vtctld.ApplyVSchema:input_type -> vtctldata.ApplyVSchemaRequest 7, // 7: vtctlservice.Vtctld.Backup:input_type -> vtctldata.BackupRequest 8, // 8: vtctlservice.Vtctld.BackupShard:input_type -> vtctldata.BackupShardRequest - 9, // 9: vtctlservice.Vtctld.ChangeTabletType:input_type -> vtctldata.ChangeTabletTypeRequest - 10, // 10: vtctlservice.Vtctld.CreateKeyspace:input_type -> vtctldata.CreateKeyspaceRequest - 11, // 11: vtctlservice.Vtctld.CreateShard:input_type -> vtctldata.CreateShardRequest - 12, // 12: vtctlservice.Vtctld.DeleteCellInfo:input_type -> vtctldata.DeleteCellInfoRequest - 13, // 13: vtctlservice.Vtctld.DeleteCellsAlias:input_type -> vtctldata.DeleteCellsAliasRequest - 14, // 14: vtctlservice.Vtctld.DeleteKeyspace:input_type -> vtctldata.DeleteKeyspaceRequest - 15, // 15: vtctlservice.Vtctld.DeleteShards:input_type -> vtctldata.DeleteShardsRequest - 16, // 16: vtctlservice.Vtctld.DeleteSrvVSchema:input_type -> vtctldata.DeleteSrvVSchemaRequest - 17, // 17: vtctlservice.Vtctld.DeleteTablets:input_type -> vtctldata.DeleteTabletsRequest - 18, // 18: vtctlservice.Vtctld.EmergencyReparentShard:input_type -> vtctldata.EmergencyReparentShardRequest - 19, // 19: vtctlservice.Vtctld.ExecuteFetchAsApp:input_type -> vtctldata.ExecuteFetchAsAppRequest - 20, // 20: vtctlservice.Vtctld.ExecuteFetchAsDBA:input_type -> vtctldata.ExecuteFetchAsDBARequest - 21, // 21: vtctlservice.Vtctld.ExecuteHook:input_type -> vtctldata.ExecuteHookRequest - 22, // 22: vtctlservice.Vtctld.FindAllShardsInKeyspace:input_type -> vtctldata.FindAllShardsInKeyspaceRequest - 23, // 23: vtctlservice.Vtctld.GetBackups:input_type -> vtctldata.GetBackupsRequest - 24, // 24: vtctlservice.Vtctld.GetCellInfo:input_type -> vtctldata.GetCellInfoRequest - 25, // 25: vtctlservice.Vtctld.GetCellInfoNames:input_type -> vtctldata.GetCellInfoNamesRequest - 26, // 26: vtctlservice.Vtctld.GetCellsAliases:input_type -> vtctldata.GetCellsAliasesRequest - 27, // 27: vtctlservice.Vtctld.GetFullStatus:input_type -> vtctldata.GetFullStatusRequest - 28, // 28: vtctlservice.Vtctld.GetKeyspace:input_type -> vtctldata.GetKeyspaceRequest - 29, // 29: vtctlservice.Vtctld.GetKeyspaces:input_type -> vtctldata.GetKeyspacesRequest - 30, // 30: vtctlservice.Vtctld.GetPermissions:input_type -> vtctldata.GetPermissionsRequest - 31, // 31: vtctlservice.Vtctld.GetRoutingRules:input_type -> vtctldata.GetRoutingRulesRequest - 32, // 32: vtctlservice.Vtctld.GetSchema:input_type -> vtctldata.GetSchemaRequest - 33, // 33: vtctlservice.Vtctld.GetShard:input_type -> vtctldata.GetShardRequest - 34, // 34: vtctlservice.Vtctld.GetShardRoutingRules:input_type -> vtctldata.GetShardRoutingRulesRequest - 35, // 35: vtctlservice.Vtctld.GetSrvKeyspaceNames:input_type -> vtctldata.GetSrvKeyspaceNamesRequest - 36, // 36: vtctlservice.Vtctld.GetSrvKeyspaces:input_type -> vtctldata.GetSrvKeyspacesRequest - 37, // 37: vtctlservice.Vtctld.UpdateThrottlerConfig:input_type -> vtctldata.UpdateThrottlerConfigRequest - 38, // 38: vtctlservice.Vtctld.GetSrvVSchema:input_type -> vtctldata.GetSrvVSchemaRequest - 39, // 39: vtctlservice.Vtctld.GetSrvVSchemas:input_type -> vtctldata.GetSrvVSchemasRequest - 40, // 40: vtctlservice.Vtctld.GetTablet:input_type -> vtctldata.GetTabletRequest - 41, // 41: vtctlservice.Vtctld.GetTablets:input_type -> vtctldata.GetTabletsRequest - 42, // 42: vtctlservice.Vtctld.GetTopologyPath:input_type -> vtctldata.GetTopologyPathRequest - 43, // 43: vtctlservice.Vtctld.GetVersion:input_type -> vtctldata.GetVersionRequest - 44, // 44: vtctlservice.Vtctld.GetVSchema:input_type -> vtctldata.GetVSchemaRequest - 45, // 45: vtctlservice.Vtctld.GetWorkflows:input_type -> vtctldata.GetWorkflowsRequest - 46, // 46: vtctlservice.Vtctld.InitShardPrimary:input_type -> vtctldata.InitShardPrimaryRequest - 47, // 47: vtctlservice.Vtctld.PingTablet:input_type -> vtctldata.PingTabletRequest - 48, // 48: vtctlservice.Vtctld.PlannedReparentShard:input_type -> vtctldata.PlannedReparentShardRequest - 49, // 49: vtctlservice.Vtctld.RebuildKeyspaceGraph:input_type -> vtctldata.RebuildKeyspaceGraphRequest - 50, // 50: vtctlservice.Vtctld.RebuildVSchemaGraph:input_type -> vtctldata.RebuildVSchemaGraphRequest - 51, // 51: vtctlservice.Vtctld.RefreshState:input_type -> vtctldata.RefreshStateRequest - 52, // 52: vtctlservice.Vtctld.RefreshStateByShard:input_type -> vtctldata.RefreshStateByShardRequest - 53, // 53: vtctlservice.Vtctld.ReloadSchema:input_type -> vtctldata.ReloadSchemaRequest - 54, // 54: vtctlservice.Vtctld.ReloadSchemaKeyspace:input_type -> vtctldata.ReloadSchemaKeyspaceRequest - 55, // 55: vtctlservice.Vtctld.ReloadSchemaShard:input_type -> vtctldata.ReloadSchemaShardRequest - 56, // 56: vtctlservice.Vtctld.RemoveBackup:input_type -> vtctldata.RemoveBackupRequest - 57, // 57: vtctlservice.Vtctld.RemoveKeyspaceCell:input_type -> vtctldata.RemoveKeyspaceCellRequest - 58, // 58: vtctlservice.Vtctld.RemoveShardCell:input_type -> vtctldata.RemoveShardCellRequest - 59, // 59: vtctlservice.Vtctld.ReparentTablet:input_type -> vtctldata.ReparentTabletRequest - 60, // 60: vtctlservice.Vtctld.RestoreFromBackup:input_type -> vtctldata.RestoreFromBackupRequest - 61, // 61: vtctlservice.Vtctld.RunHealthCheck:input_type -> vtctldata.RunHealthCheckRequest - 62, // 62: vtctlservice.Vtctld.SetKeyspaceDurabilityPolicy:input_type -> vtctldata.SetKeyspaceDurabilityPolicyRequest - 63, // 63: vtctlservice.Vtctld.SetShardIsPrimaryServing:input_type -> vtctldata.SetShardIsPrimaryServingRequest - 64, // 64: vtctlservice.Vtctld.SetShardTabletControl:input_type -> vtctldata.SetShardTabletControlRequest - 65, // 65: vtctlservice.Vtctld.SetWritable:input_type -> vtctldata.SetWritableRequest - 66, // 66: vtctlservice.Vtctld.ShardReplicationAdd:input_type -> vtctldata.ShardReplicationAddRequest - 67, // 67: vtctlservice.Vtctld.ShardReplicationFix:input_type -> vtctldata.ShardReplicationFixRequest - 68, // 68: vtctlservice.Vtctld.ShardReplicationPositions:input_type -> vtctldata.ShardReplicationPositionsRequest - 69, // 69: vtctlservice.Vtctld.ShardReplicationRemove:input_type -> vtctldata.ShardReplicationRemoveRequest - 70, // 70: vtctlservice.Vtctld.SleepTablet:input_type -> vtctldata.SleepTabletRequest - 71, // 71: vtctlservice.Vtctld.SourceShardAdd:input_type -> vtctldata.SourceShardAddRequest - 72, // 72: vtctlservice.Vtctld.SourceShardDelete:input_type -> vtctldata.SourceShardDeleteRequest - 73, // 73: vtctlservice.Vtctld.StartReplication:input_type -> vtctldata.StartReplicationRequest - 74, // 74: vtctlservice.Vtctld.StopReplication:input_type -> vtctldata.StopReplicationRequest - 75, // 75: vtctlservice.Vtctld.TabletExternallyReparented:input_type -> vtctldata.TabletExternallyReparentedRequest - 76, // 76: vtctlservice.Vtctld.UpdateCellInfo:input_type -> vtctldata.UpdateCellInfoRequest - 77, // 77: vtctlservice.Vtctld.UpdateCellsAlias:input_type -> vtctldata.UpdateCellsAliasRequest - 78, // 78: vtctlservice.Vtctld.Validate:input_type -> vtctldata.ValidateRequest - 79, // 79: vtctlservice.Vtctld.ValidateKeyspace:input_type -> vtctldata.ValidateKeyspaceRequest - 80, // 80: vtctlservice.Vtctld.ValidateSchemaKeyspace:input_type -> vtctldata.ValidateSchemaKeyspaceRequest - 81, // 81: vtctlservice.Vtctld.ValidateShard:input_type -> vtctldata.ValidateShardRequest - 82, // 82: vtctlservice.Vtctld.ValidateVersionKeyspace:input_type -> vtctldata.ValidateVersionKeyspaceRequest - 83, // 83: vtctlservice.Vtctld.ValidateVersionShard:input_type -> vtctldata.ValidateVersionShardRequest - 84, // 84: vtctlservice.Vtctld.ValidateVSchema:input_type -> vtctldata.ValidateVSchemaRequest - 85, // 85: vtctlservice.Vtctld.WorkflowUpdate:input_type -> vtctldata.WorkflowUpdateRequest - 86, // 86: vtctlservice.Vtctl.ExecuteVtctlCommand:output_type -> vtctldata.ExecuteVtctlCommandResponse - 87, // 87: vtctlservice.Vtctld.AddCellInfo:output_type -> vtctldata.AddCellInfoResponse - 88, // 88: vtctlservice.Vtctld.AddCellsAlias:output_type -> vtctldata.AddCellsAliasResponse - 89, // 89: vtctlservice.Vtctld.ApplyRoutingRules:output_type -> vtctldata.ApplyRoutingRulesResponse - 90, // 90: vtctlservice.Vtctld.ApplySchema:output_type -> vtctldata.ApplySchemaResponse - 91, // 91: vtctlservice.Vtctld.ApplyShardRoutingRules:output_type -> vtctldata.ApplyShardRoutingRulesResponse - 92, // 92: vtctlservice.Vtctld.ApplyVSchema:output_type -> vtctldata.ApplyVSchemaResponse - 93, // 93: vtctlservice.Vtctld.Backup:output_type -> vtctldata.BackupResponse - 93, // 94: vtctlservice.Vtctld.BackupShard:output_type -> vtctldata.BackupResponse - 94, // 95: vtctlservice.Vtctld.ChangeTabletType:output_type -> vtctldata.ChangeTabletTypeResponse - 95, // 96: vtctlservice.Vtctld.CreateKeyspace:output_type -> vtctldata.CreateKeyspaceResponse - 96, // 97: vtctlservice.Vtctld.CreateShard:output_type -> vtctldata.CreateShardResponse - 97, // 98: vtctlservice.Vtctld.DeleteCellInfo:output_type -> vtctldata.DeleteCellInfoResponse - 98, // 99: vtctlservice.Vtctld.DeleteCellsAlias:output_type -> vtctldata.DeleteCellsAliasResponse - 99, // 100: vtctlservice.Vtctld.DeleteKeyspace:output_type -> vtctldata.DeleteKeyspaceResponse - 100, // 101: vtctlservice.Vtctld.DeleteShards:output_type -> vtctldata.DeleteShardsResponse - 101, // 102: vtctlservice.Vtctld.DeleteSrvVSchema:output_type -> vtctldata.DeleteSrvVSchemaResponse - 102, // 103: vtctlservice.Vtctld.DeleteTablets:output_type -> vtctldata.DeleteTabletsResponse - 103, // 104: vtctlservice.Vtctld.EmergencyReparentShard:output_type -> vtctldata.EmergencyReparentShardResponse - 104, // 105: vtctlservice.Vtctld.ExecuteFetchAsApp:output_type -> vtctldata.ExecuteFetchAsAppResponse - 105, // 106: vtctlservice.Vtctld.ExecuteFetchAsDBA:output_type -> vtctldata.ExecuteFetchAsDBAResponse - 106, // 107: vtctlservice.Vtctld.ExecuteHook:output_type -> vtctldata.ExecuteHookResponse - 107, // 108: vtctlservice.Vtctld.FindAllShardsInKeyspace:output_type -> vtctldata.FindAllShardsInKeyspaceResponse - 108, // 109: vtctlservice.Vtctld.GetBackups:output_type -> vtctldata.GetBackupsResponse - 109, // 110: vtctlservice.Vtctld.GetCellInfo:output_type -> vtctldata.GetCellInfoResponse - 110, // 111: vtctlservice.Vtctld.GetCellInfoNames:output_type -> vtctldata.GetCellInfoNamesResponse - 111, // 112: vtctlservice.Vtctld.GetCellsAliases:output_type -> vtctldata.GetCellsAliasesResponse - 112, // 113: vtctlservice.Vtctld.GetFullStatus:output_type -> vtctldata.GetFullStatusResponse - 113, // 114: vtctlservice.Vtctld.GetKeyspace:output_type -> vtctldata.GetKeyspaceResponse - 114, // 115: vtctlservice.Vtctld.GetKeyspaces:output_type -> vtctldata.GetKeyspacesResponse - 115, // 116: vtctlservice.Vtctld.GetPermissions:output_type -> vtctldata.GetPermissionsResponse - 116, // 117: vtctlservice.Vtctld.GetRoutingRules:output_type -> vtctldata.GetRoutingRulesResponse - 117, // 118: vtctlservice.Vtctld.GetSchema:output_type -> vtctldata.GetSchemaResponse - 118, // 119: vtctlservice.Vtctld.GetShard:output_type -> vtctldata.GetShardResponse - 119, // 120: vtctlservice.Vtctld.GetShardRoutingRules:output_type -> vtctldata.GetShardRoutingRulesResponse - 120, // 121: vtctlservice.Vtctld.GetSrvKeyspaceNames:output_type -> vtctldata.GetSrvKeyspaceNamesResponse - 121, // 122: vtctlservice.Vtctld.GetSrvKeyspaces:output_type -> vtctldata.GetSrvKeyspacesResponse - 122, // 123: vtctlservice.Vtctld.UpdateThrottlerConfig:output_type -> vtctldata.UpdateThrottlerConfigResponse - 123, // 124: vtctlservice.Vtctld.GetSrvVSchema:output_type -> vtctldata.GetSrvVSchemaResponse - 124, // 125: vtctlservice.Vtctld.GetSrvVSchemas:output_type -> vtctldata.GetSrvVSchemasResponse - 125, // 126: vtctlservice.Vtctld.GetTablet:output_type -> vtctldata.GetTabletResponse - 126, // 127: vtctlservice.Vtctld.GetTablets:output_type -> vtctldata.GetTabletsResponse - 127, // 128: vtctlservice.Vtctld.GetTopologyPath:output_type -> vtctldata.GetTopologyPathResponse - 128, // 129: vtctlservice.Vtctld.GetVersion:output_type -> vtctldata.GetVersionResponse - 129, // 130: vtctlservice.Vtctld.GetVSchema:output_type -> vtctldata.GetVSchemaResponse - 130, // 131: vtctlservice.Vtctld.GetWorkflows:output_type -> vtctldata.GetWorkflowsResponse - 131, // 132: vtctlservice.Vtctld.InitShardPrimary:output_type -> vtctldata.InitShardPrimaryResponse - 132, // 133: vtctlservice.Vtctld.PingTablet:output_type -> vtctldata.PingTabletResponse - 133, // 134: vtctlservice.Vtctld.PlannedReparentShard:output_type -> vtctldata.PlannedReparentShardResponse - 134, // 135: vtctlservice.Vtctld.RebuildKeyspaceGraph:output_type -> vtctldata.RebuildKeyspaceGraphResponse - 135, // 136: vtctlservice.Vtctld.RebuildVSchemaGraph:output_type -> vtctldata.RebuildVSchemaGraphResponse - 136, // 137: vtctlservice.Vtctld.RefreshState:output_type -> vtctldata.RefreshStateResponse - 137, // 138: vtctlservice.Vtctld.RefreshStateByShard:output_type -> vtctldata.RefreshStateByShardResponse - 138, // 139: vtctlservice.Vtctld.ReloadSchema:output_type -> vtctldata.ReloadSchemaResponse - 139, // 140: vtctlservice.Vtctld.ReloadSchemaKeyspace:output_type -> vtctldata.ReloadSchemaKeyspaceResponse - 140, // 141: vtctlservice.Vtctld.ReloadSchemaShard:output_type -> vtctldata.ReloadSchemaShardResponse - 141, // 142: vtctlservice.Vtctld.RemoveBackup:output_type -> vtctldata.RemoveBackupResponse - 142, // 143: vtctlservice.Vtctld.RemoveKeyspaceCell:output_type -> vtctldata.RemoveKeyspaceCellResponse - 143, // 144: vtctlservice.Vtctld.RemoveShardCell:output_type -> vtctldata.RemoveShardCellResponse - 144, // 145: vtctlservice.Vtctld.ReparentTablet:output_type -> vtctldata.ReparentTabletResponse - 145, // 146: vtctlservice.Vtctld.RestoreFromBackup:output_type -> vtctldata.RestoreFromBackupResponse - 146, // 147: vtctlservice.Vtctld.RunHealthCheck:output_type -> vtctldata.RunHealthCheckResponse - 147, // 148: vtctlservice.Vtctld.SetKeyspaceDurabilityPolicy:output_type -> vtctldata.SetKeyspaceDurabilityPolicyResponse - 148, // 149: vtctlservice.Vtctld.SetShardIsPrimaryServing:output_type -> vtctldata.SetShardIsPrimaryServingResponse - 149, // 150: vtctlservice.Vtctld.SetShardTabletControl:output_type -> vtctldata.SetShardTabletControlResponse - 150, // 151: vtctlservice.Vtctld.SetWritable:output_type -> vtctldata.SetWritableResponse - 151, // 152: vtctlservice.Vtctld.ShardReplicationAdd:output_type -> vtctldata.ShardReplicationAddResponse - 152, // 153: vtctlservice.Vtctld.ShardReplicationFix:output_type -> vtctldata.ShardReplicationFixResponse - 153, // 154: vtctlservice.Vtctld.ShardReplicationPositions:output_type -> vtctldata.ShardReplicationPositionsResponse - 154, // 155: vtctlservice.Vtctld.ShardReplicationRemove:output_type -> vtctldata.ShardReplicationRemoveResponse - 155, // 156: vtctlservice.Vtctld.SleepTablet:output_type -> vtctldata.SleepTabletResponse - 156, // 157: vtctlservice.Vtctld.SourceShardAdd:output_type -> vtctldata.SourceShardAddResponse - 157, // 158: vtctlservice.Vtctld.SourceShardDelete:output_type -> vtctldata.SourceShardDeleteResponse - 158, // 159: vtctlservice.Vtctld.StartReplication:output_type -> vtctldata.StartReplicationResponse - 159, // 160: vtctlservice.Vtctld.StopReplication:output_type -> vtctldata.StopReplicationResponse - 160, // 161: vtctlservice.Vtctld.TabletExternallyReparented:output_type -> vtctldata.TabletExternallyReparentedResponse - 161, // 162: vtctlservice.Vtctld.UpdateCellInfo:output_type -> vtctldata.UpdateCellInfoResponse - 162, // 163: vtctlservice.Vtctld.UpdateCellsAlias:output_type -> vtctldata.UpdateCellsAliasResponse - 163, // 164: vtctlservice.Vtctld.Validate:output_type -> vtctldata.ValidateResponse - 164, // 165: vtctlservice.Vtctld.ValidateKeyspace:output_type -> vtctldata.ValidateKeyspaceResponse - 165, // 166: vtctlservice.Vtctld.ValidateSchemaKeyspace:output_type -> vtctldata.ValidateSchemaKeyspaceResponse - 166, // 167: vtctlservice.Vtctld.ValidateShard:output_type -> vtctldata.ValidateShardResponse - 167, // 168: vtctlservice.Vtctld.ValidateVersionKeyspace:output_type -> vtctldata.ValidateVersionKeyspaceResponse - 168, // 169: vtctlservice.Vtctld.ValidateVersionShard:output_type -> vtctldata.ValidateVersionShardResponse - 169, // 170: vtctlservice.Vtctld.ValidateVSchema:output_type -> vtctldata.ValidateVSchemaResponse - 170, // 171: vtctlservice.Vtctld.WorkflowUpdate:output_type -> vtctldata.WorkflowUpdateResponse - 86, // [86:172] is the sub-list for method output_type - 0, // [0:86] is the sub-list for method input_type + 9, // 9: vtctlservice.Vtctld.CancelSchemaMigration:input_type -> vtctldata.CancelSchemaMigrationRequest + 10, // 10: vtctlservice.Vtctld.ChangeTabletType:input_type -> vtctldata.ChangeTabletTypeRequest + 11, // 11: vtctlservice.Vtctld.CleanupSchemaMigration:input_type -> vtctldata.CleanupSchemaMigrationRequest + 12, // 12: vtctlservice.Vtctld.CompleteSchemaMigration:input_type -> vtctldata.CompleteSchemaMigrationRequest + 13, // 13: vtctlservice.Vtctld.CreateKeyspace:input_type -> vtctldata.CreateKeyspaceRequest + 14, // 14: vtctlservice.Vtctld.CreateShard:input_type -> vtctldata.CreateShardRequest + 15, // 15: vtctlservice.Vtctld.DeleteCellInfo:input_type -> vtctldata.DeleteCellInfoRequest + 16, // 16: vtctlservice.Vtctld.DeleteCellsAlias:input_type -> vtctldata.DeleteCellsAliasRequest + 17, // 17: vtctlservice.Vtctld.DeleteKeyspace:input_type -> vtctldata.DeleteKeyspaceRequest + 18, // 18: vtctlservice.Vtctld.DeleteShards:input_type -> vtctldata.DeleteShardsRequest + 19, // 19: vtctlservice.Vtctld.DeleteSrvVSchema:input_type -> vtctldata.DeleteSrvVSchemaRequest + 20, // 20: vtctlservice.Vtctld.DeleteTablets:input_type -> vtctldata.DeleteTabletsRequest + 21, // 21: vtctlservice.Vtctld.EmergencyReparentShard:input_type -> vtctldata.EmergencyReparentShardRequest + 22, // 22: vtctlservice.Vtctld.ExecuteFetchAsApp:input_type -> vtctldata.ExecuteFetchAsAppRequest + 23, // 23: vtctlservice.Vtctld.ExecuteFetchAsDBA:input_type -> vtctldata.ExecuteFetchAsDBARequest + 24, // 24: vtctlservice.Vtctld.ExecuteHook:input_type -> vtctldata.ExecuteHookRequest + 25, // 25: vtctlservice.Vtctld.FindAllShardsInKeyspace:input_type -> vtctldata.FindAllShardsInKeyspaceRequest + 26, // 26: vtctlservice.Vtctld.GetBackups:input_type -> vtctldata.GetBackupsRequest + 27, // 27: vtctlservice.Vtctld.GetCellInfo:input_type -> vtctldata.GetCellInfoRequest + 28, // 28: vtctlservice.Vtctld.GetCellInfoNames:input_type -> vtctldata.GetCellInfoNamesRequest + 29, // 29: vtctlservice.Vtctld.GetCellsAliases:input_type -> vtctldata.GetCellsAliasesRequest + 30, // 30: vtctlservice.Vtctld.GetFullStatus:input_type -> vtctldata.GetFullStatusRequest + 31, // 31: vtctlservice.Vtctld.GetKeyspace:input_type -> vtctldata.GetKeyspaceRequest + 32, // 32: vtctlservice.Vtctld.GetKeyspaces:input_type -> vtctldata.GetKeyspacesRequest + 33, // 33: vtctlservice.Vtctld.GetPermissions:input_type -> vtctldata.GetPermissionsRequest + 34, // 34: vtctlservice.Vtctld.GetRoutingRules:input_type -> vtctldata.GetRoutingRulesRequest + 35, // 35: vtctlservice.Vtctld.GetSchema:input_type -> vtctldata.GetSchemaRequest + 36, // 36: vtctlservice.Vtctld.GetSchemaMigrations:input_type -> vtctldata.GetSchemaMigrationsRequest + 37, // 37: vtctlservice.Vtctld.GetShard:input_type -> vtctldata.GetShardRequest + 38, // 38: vtctlservice.Vtctld.GetShardRoutingRules:input_type -> vtctldata.GetShardRoutingRulesRequest + 39, // 39: vtctlservice.Vtctld.GetSrvKeyspaceNames:input_type -> vtctldata.GetSrvKeyspaceNamesRequest + 40, // 40: vtctlservice.Vtctld.GetSrvKeyspaces:input_type -> vtctldata.GetSrvKeyspacesRequest + 41, // 41: vtctlservice.Vtctld.UpdateThrottlerConfig:input_type -> vtctldata.UpdateThrottlerConfigRequest + 42, // 42: vtctlservice.Vtctld.GetSrvVSchema:input_type -> vtctldata.GetSrvVSchemaRequest + 43, // 43: vtctlservice.Vtctld.GetSrvVSchemas:input_type -> vtctldata.GetSrvVSchemasRequest + 44, // 44: vtctlservice.Vtctld.GetTablet:input_type -> vtctldata.GetTabletRequest + 45, // 45: vtctlservice.Vtctld.GetTablets:input_type -> vtctldata.GetTabletsRequest + 46, // 46: vtctlservice.Vtctld.GetTopologyPath:input_type -> vtctldata.GetTopologyPathRequest + 47, // 47: vtctlservice.Vtctld.GetVersion:input_type -> vtctldata.GetVersionRequest + 48, // 48: vtctlservice.Vtctld.GetVSchema:input_type -> vtctldata.GetVSchemaRequest + 49, // 49: vtctlservice.Vtctld.GetWorkflows:input_type -> vtctldata.GetWorkflowsRequest + 50, // 50: vtctlservice.Vtctld.InitShardPrimary:input_type -> vtctldata.InitShardPrimaryRequest + 51, // 51: vtctlservice.Vtctld.LaunchSchemaMigration:input_type -> vtctldata.LaunchSchemaMigrationRequest + 52, // 52: vtctlservice.Vtctld.LookupVindexCreate:input_type -> vtctldata.LookupVindexCreateRequest + 53, // 53: vtctlservice.Vtctld.LookupVindexExternalize:input_type -> vtctldata.LookupVindexExternalizeRequest + 54, // 54: vtctlservice.Vtctld.MaterializeCreate:input_type -> vtctldata.MaterializeCreateRequest + 55, // 55: vtctlservice.Vtctld.MigrateCreate:input_type -> vtctldata.MigrateCreateRequest + 56, // 56: vtctlservice.Vtctld.MountRegister:input_type -> vtctldata.MountRegisterRequest + 57, // 57: vtctlservice.Vtctld.MountUnregister:input_type -> vtctldata.MountUnregisterRequest + 58, // 58: vtctlservice.Vtctld.MountShow:input_type -> vtctldata.MountShowRequest + 59, // 59: vtctlservice.Vtctld.MountList:input_type -> vtctldata.MountListRequest + 60, // 60: vtctlservice.Vtctld.MoveTablesCreate:input_type -> vtctldata.MoveTablesCreateRequest + 61, // 61: vtctlservice.Vtctld.MoveTablesComplete:input_type -> vtctldata.MoveTablesCompleteRequest + 62, // 62: vtctlservice.Vtctld.PingTablet:input_type -> vtctldata.PingTabletRequest + 63, // 63: vtctlservice.Vtctld.PlannedReparentShard:input_type -> vtctldata.PlannedReparentShardRequest + 64, // 64: vtctlservice.Vtctld.RebuildKeyspaceGraph:input_type -> vtctldata.RebuildKeyspaceGraphRequest + 65, // 65: vtctlservice.Vtctld.RebuildVSchemaGraph:input_type -> vtctldata.RebuildVSchemaGraphRequest + 66, // 66: vtctlservice.Vtctld.RefreshState:input_type -> vtctldata.RefreshStateRequest + 67, // 67: vtctlservice.Vtctld.RefreshStateByShard:input_type -> vtctldata.RefreshStateByShardRequest + 68, // 68: vtctlservice.Vtctld.ReloadSchema:input_type -> vtctldata.ReloadSchemaRequest + 69, // 69: vtctlservice.Vtctld.ReloadSchemaKeyspace:input_type -> vtctldata.ReloadSchemaKeyspaceRequest + 70, // 70: vtctlservice.Vtctld.ReloadSchemaShard:input_type -> vtctldata.ReloadSchemaShardRequest + 71, // 71: vtctlservice.Vtctld.RemoveBackup:input_type -> vtctldata.RemoveBackupRequest + 72, // 72: vtctlservice.Vtctld.RemoveKeyspaceCell:input_type -> vtctldata.RemoveKeyspaceCellRequest + 73, // 73: vtctlservice.Vtctld.RemoveShardCell:input_type -> vtctldata.RemoveShardCellRequest + 74, // 74: vtctlservice.Vtctld.ReparentTablet:input_type -> vtctldata.ReparentTabletRequest + 75, // 75: vtctlservice.Vtctld.ReshardCreate:input_type -> vtctldata.ReshardCreateRequest + 76, // 76: vtctlservice.Vtctld.RestoreFromBackup:input_type -> vtctldata.RestoreFromBackupRequest + 77, // 77: vtctlservice.Vtctld.RetrySchemaMigration:input_type -> vtctldata.RetrySchemaMigrationRequest + 78, // 78: vtctlservice.Vtctld.RunHealthCheck:input_type -> vtctldata.RunHealthCheckRequest + 79, // 79: vtctlservice.Vtctld.SetKeyspaceDurabilityPolicy:input_type -> vtctldata.SetKeyspaceDurabilityPolicyRequest + 80, // 80: vtctlservice.Vtctld.SetShardIsPrimaryServing:input_type -> vtctldata.SetShardIsPrimaryServingRequest + 81, // 81: vtctlservice.Vtctld.SetShardTabletControl:input_type -> vtctldata.SetShardTabletControlRequest + 82, // 82: vtctlservice.Vtctld.SetWritable:input_type -> vtctldata.SetWritableRequest + 83, // 83: vtctlservice.Vtctld.ShardReplicationAdd:input_type -> vtctldata.ShardReplicationAddRequest + 84, // 84: vtctlservice.Vtctld.ShardReplicationFix:input_type -> vtctldata.ShardReplicationFixRequest + 85, // 85: vtctlservice.Vtctld.ShardReplicationPositions:input_type -> vtctldata.ShardReplicationPositionsRequest + 86, // 86: vtctlservice.Vtctld.ShardReplicationRemove:input_type -> vtctldata.ShardReplicationRemoveRequest + 87, // 87: vtctlservice.Vtctld.SleepTablet:input_type -> vtctldata.SleepTabletRequest + 88, // 88: vtctlservice.Vtctld.SourceShardAdd:input_type -> vtctldata.SourceShardAddRequest + 89, // 89: vtctlservice.Vtctld.SourceShardDelete:input_type -> vtctldata.SourceShardDeleteRequest + 90, // 90: vtctlservice.Vtctld.StartReplication:input_type -> vtctldata.StartReplicationRequest + 91, // 91: vtctlservice.Vtctld.StopReplication:input_type -> vtctldata.StopReplicationRequest + 92, // 92: vtctlservice.Vtctld.TabletExternallyReparented:input_type -> vtctldata.TabletExternallyReparentedRequest + 93, // 93: vtctlservice.Vtctld.UpdateCellInfo:input_type -> vtctldata.UpdateCellInfoRequest + 94, // 94: vtctlservice.Vtctld.UpdateCellsAlias:input_type -> vtctldata.UpdateCellsAliasRequest + 95, // 95: vtctlservice.Vtctld.Validate:input_type -> vtctldata.ValidateRequest + 96, // 96: vtctlservice.Vtctld.ValidateKeyspace:input_type -> vtctldata.ValidateKeyspaceRequest + 97, // 97: vtctlservice.Vtctld.ValidateSchemaKeyspace:input_type -> vtctldata.ValidateSchemaKeyspaceRequest + 98, // 98: vtctlservice.Vtctld.ValidateShard:input_type -> vtctldata.ValidateShardRequest + 99, // 99: vtctlservice.Vtctld.ValidateVersionKeyspace:input_type -> vtctldata.ValidateVersionKeyspaceRequest + 100, // 100: vtctlservice.Vtctld.ValidateVersionShard:input_type -> vtctldata.ValidateVersionShardRequest + 101, // 101: vtctlservice.Vtctld.ValidateVSchema:input_type -> vtctldata.ValidateVSchemaRequest + 102, // 102: vtctlservice.Vtctld.VDiffCreate:input_type -> vtctldata.VDiffCreateRequest + 103, // 103: vtctlservice.Vtctld.VDiffDelete:input_type -> vtctldata.VDiffDeleteRequest + 104, // 104: vtctlservice.Vtctld.VDiffResume:input_type -> vtctldata.VDiffResumeRequest + 105, // 105: vtctlservice.Vtctld.VDiffShow:input_type -> vtctldata.VDiffShowRequest + 106, // 106: vtctlservice.Vtctld.VDiffStop:input_type -> vtctldata.VDiffStopRequest + 107, // 107: vtctlservice.Vtctld.WorkflowDelete:input_type -> vtctldata.WorkflowDeleteRequest + 108, // 108: vtctlservice.Vtctld.WorkflowStatus:input_type -> vtctldata.WorkflowStatusRequest + 109, // 109: vtctlservice.Vtctld.WorkflowSwitchTraffic:input_type -> vtctldata.WorkflowSwitchTrafficRequest + 110, // 110: vtctlservice.Vtctld.WorkflowUpdate:input_type -> vtctldata.WorkflowUpdateRequest + 111, // 111: vtctlservice.Vtctl.ExecuteVtctlCommand:output_type -> vtctldata.ExecuteVtctlCommandResponse + 112, // 112: vtctlservice.Vtctld.AddCellInfo:output_type -> vtctldata.AddCellInfoResponse + 113, // 113: vtctlservice.Vtctld.AddCellsAlias:output_type -> vtctldata.AddCellsAliasResponse + 114, // 114: vtctlservice.Vtctld.ApplyRoutingRules:output_type -> vtctldata.ApplyRoutingRulesResponse + 115, // 115: vtctlservice.Vtctld.ApplySchema:output_type -> vtctldata.ApplySchemaResponse + 116, // 116: vtctlservice.Vtctld.ApplyShardRoutingRules:output_type -> vtctldata.ApplyShardRoutingRulesResponse + 117, // 117: vtctlservice.Vtctld.ApplyVSchema:output_type -> vtctldata.ApplyVSchemaResponse + 118, // 118: vtctlservice.Vtctld.Backup:output_type -> vtctldata.BackupResponse + 118, // 119: vtctlservice.Vtctld.BackupShard:output_type -> vtctldata.BackupResponse + 119, // 120: vtctlservice.Vtctld.CancelSchemaMigration:output_type -> vtctldata.CancelSchemaMigrationResponse + 120, // 121: vtctlservice.Vtctld.ChangeTabletType:output_type -> vtctldata.ChangeTabletTypeResponse + 121, // 122: vtctlservice.Vtctld.CleanupSchemaMigration:output_type -> vtctldata.CleanupSchemaMigrationResponse + 122, // 123: vtctlservice.Vtctld.CompleteSchemaMigration:output_type -> vtctldata.CompleteSchemaMigrationResponse + 123, // 124: vtctlservice.Vtctld.CreateKeyspace:output_type -> vtctldata.CreateKeyspaceResponse + 124, // 125: vtctlservice.Vtctld.CreateShard:output_type -> vtctldata.CreateShardResponse + 125, // 126: vtctlservice.Vtctld.DeleteCellInfo:output_type -> vtctldata.DeleteCellInfoResponse + 126, // 127: vtctlservice.Vtctld.DeleteCellsAlias:output_type -> vtctldata.DeleteCellsAliasResponse + 127, // 128: vtctlservice.Vtctld.DeleteKeyspace:output_type -> vtctldata.DeleteKeyspaceResponse + 128, // 129: vtctlservice.Vtctld.DeleteShards:output_type -> vtctldata.DeleteShardsResponse + 129, // 130: vtctlservice.Vtctld.DeleteSrvVSchema:output_type -> vtctldata.DeleteSrvVSchemaResponse + 130, // 131: vtctlservice.Vtctld.DeleteTablets:output_type -> vtctldata.DeleteTabletsResponse + 131, // 132: vtctlservice.Vtctld.EmergencyReparentShard:output_type -> vtctldata.EmergencyReparentShardResponse + 132, // 133: vtctlservice.Vtctld.ExecuteFetchAsApp:output_type -> vtctldata.ExecuteFetchAsAppResponse + 133, // 134: vtctlservice.Vtctld.ExecuteFetchAsDBA:output_type -> vtctldata.ExecuteFetchAsDBAResponse + 134, // 135: vtctlservice.Vtctld.ExecuteHook:output_type -> vtctldata.ExecuteHookResponse + 135, // 136: vtctlservice.Vtctld.FindAllShardsInKeyspace:output_type -> vtctldata.FindAllShardsInKeyspaceResponse + 136, // 137: vtctlservice.Vtctld.GetBackups:output_type -> vtctldata.GetBackupsResponse + 137, // 138: vtctlservice.Vtctld.GetCellInfo:output_type -> vtctldata.GetCellInfoResponse + 138, // 139: vtctlservice.Vtctld.GetCellInfoNames:output_type -> vtctldata.GetCellInfoNamesResponse + 139, // 140: vtctlservice.Vtctld.GetCellsAliases:output_type -> vtctldata.GetCellsAliasesResponse + 140, // 141: vtctlservice.Vtctld.GetFullStatus:output_type -> vtctldata.GetFullStatusResponse + 141, // 142: vtctlservice.Vtctld.GetKeyspace:output_type -> vtctldata.GetKeyspaceResponse + 142, // 143: vtctlservice.Vtctld.GetKeyspaces:output_type -> vtctldata.GetKeyspacesResponse + 143, // 144: vtctlservice.Vtctld.GetPermissions:output_type -> vtctldata.GetPermissionsResponse + 144, // 145: vtctlservice.Vtctld.GetRoutingRules:output_type -> vtctldata.GetRoutingRulesResponse + 145, // 146: vtctlservice.Vtctld.GetSchema:output_type -> vtctldata.GetSchemaResponse + 146, // 147: vtctlservice.Vtctld.GetSchemaMigrations:output_type -> vtctldata.GetSchemaMigrationsResponse + 147, // 148: vtctlservice.Vtctld.GetShard:output_type -> vtctldata.GetShardResponse + 148, // 149: vtctlservice.Vtctld.GetShardRoutingRules:output_type -> vtctldata.GetShardRoutingRulesResponse + 149, // 150: vtctlservice.Vtctld.GetSrvKeyspaceNames:output_type -> vtctldata.GetSrvKeyspaceNamesResponse + 150, // 151: vtctlservice.Vtctld.GetSrvKeyspaces:output_type -> vtctldata.GetSrvKeyspacesResponse + 151, // 152: vtctlservice.Vtctld.UpdateThrottlerConfig:output_type -> vtctldata.UpdateThrottlerConfigResponse + 152, // 153: vtctlservice.Vtctld.GetSrvVSchema:output_type -> vtctldata.GetSrvVSchemaResponse + 153, // 154: vtctlservice.Vtctld.GetSrvVSchemas:output_type -> vtctldata.GetSrvVSchemasResponse + 154, // 155: vtctlservice.Vtctld.GetTablet:output_type -> vtctldata.GetTabletResponse + 155, // 156: vtctlservice.Vtctld.GetTablets:output_type -> vtctldata.GetTabletsResponse + 156, // 157: vtctlservice.Vtctld.GetTopologyPath:output_type -> vtctldata.GetTopologyPathResponse + 157, // 158: vtctlservice.Vtctld.GetVersion:output_type -> vtctldata.GetVersionResponse + 158, // 159: vtctlservice.Vtctld.GetVSchema:output_type -> vtctldata.GetVSchemaResponse + 159, // 160: vtctlservice.Vtctld.GetWorkflows:output_type -> vtctldata.GetWorkflowsResponse + 160, // 161: vtctlservice.Vtctld.InitShardPrimary:output_type -> vtctldata.InitShardPrimaryResponse + 161, // 162: vtctlservice.Vtctld.LaunchSchemaMigration:output_type -> vtctldata.LaunchSchemaMigrationResponse + 162, // 163: vtctlservice.Vtctld.LookupVindexCreate:output_type -> vtctldata.LookupVindexCreateResponse + 163, // 164: vtctlservice.Vtctld.LookupVindexExternalize:output_type -> vtctldata.LookupVindexExternalizeResponse + 164, // 165: vtctlservice.Vtctld.MaterializeCreate:output_type -> vtctldata.MaterializeCreateResponse + 165, // 166: vtctlservice.Vtctld.MigrateCreate:output_type -> vtctldata.WorkflowStatusResponse + 166, // 167: vtctlservice.Vtctld.MountRegister:output_type -> vtctldata.MountRegisterResponse + 167, // 168: vtctlservice.Vtctld.MountUnregister:output_type -> vtctldata.MountUnregisterResponse + 168, // 169: vtctlservice.Vtctld.MountShow:output_type -> vtctldata.MountShowResponse + 169, // 170: vtctlservice.Vtctld.MountList:output_type -> vtctldata.MountListResponse + 165, // 171: vtctlservice.Vtctld.MoveTablesCreate:output_type -> vtctldata.WorkflowStatusResponse + 170, // 172: vtctlservice.Vtctld.MoveTablesComplete:output_type -> vtctldata.MoveTablesCompleteResponse + 171, // 173: vtctlservice.Vtctld.PingTablet:output_type -> vtctldata.PingTabletResponse + 172, // 174: vtctlservice.Vtctld.PlannedReparentShard:output_type -> vtctldata.PlannedReparentShardResponse + 173, // 175: vtctlservice.Vtctld.RebuildKeyspaceGraph:output_type -> vtctldata.RebuildKeyspaceGraphResponse + 174, // 176: vtctlservice.Vtctld.RebuildVSchemaGraph:output_type -> vtctldata.RebuildVSchemaGraphResponse + 175, // 177: vtctlservice.Vtctld.RefreshState:output_type -> vtctldata.RefreshStateResponse + 176, // 178: vtctlservice.Vtctld.RefreshStateByShard:output_type -> vtctldata.RefreshStateByShardResponse + 177, // 179: vtctlservice.Vtctld.ReloadSchema:output_type -> vtctldata.ReloadSchemaResponse + 178, // 180: vtctlservice.Vtctld.ReloadSchemaKeyspace:output_type -> vtctldata.ReloadSchemaKeyspaceResponse + 179, // 181: vtctlservice.Vtctld.ReloadSchemaShard:output_type -> vtctldata.ReloadSchemaShardResponse + 180, // 182: vtctlservice.Vtctld.RemoveBackup:output_type -> vtctldata.RemoveBackupResponse + 181, // 183: vtctlservice.Vtctld.RemoveKeyspaceCell:output_type -> vtctldata.RemoveKeyspaceCellResponse + 182, // 184: vtctlservice.Vtctld.RemoveShardCell:output_type -> vtctldata.RemoveShardCellResponse + 183, // 185: vtctlservice.Vtctld.ReparentTablet:output_type -> vtctldata.ReparentTabletResponse + 165, // 186: vtctlservice.Vtctld.ReshardCreate:output_type -> vtctldata.WorkflowStatusResponse + 184, // 187: vtctlservice.Vtctld.RestoreFromBackup:output_type -> vtctldata.RestoreFromBackupResponse + 185, // 188: vtctlservice.Vtctld.RetrySchemaMigration:output_type -> vtctldata.RetrySchemaMigrationResponse + 186, // 189: vtctlservice.Vtctld.RunHealthCheck:output_type -> vtctldata.RunHealthCheckResponse + 187, // 190: vtctlservice.Vtctld.SetKeyspaceDurabilityPolicy:output_type -> vtctldata.SetKeyspaceDurabilityPolicyResponse + 188, // 191: vtctlservice.Vtctld.SetShardIsPrimaryServing:output_type -> vtctldata.SetShardIsPrimaryServingResponse + 189, // 192: vtctlservice.Vtctld.SetShardTabletControl:output_type -> vtctldata.SetShardTabletControlResponse + 190, // 193: vtctlservice.Vtctld.SetWritable:output_type -> vtctldata.SetWritableResponse + 191, // 194: vtctlservice.Vtctld.ShardReplicationAdd:output_type -> vtctldata.ShardReplicationAddResponse + 192, // 195: vtctlservice.Vtctld.ShardReplicationFix:output_type -> vtctldata.ShardReplicationFixResponse + 193, // 196: vtctlservice.Vtctld.ShardReplicationPositions:output_type -> vtctldata.ShardReplicationPositionsResponse + 194, // 197: vtctlservice.Vtctld.ShardReplicationRemove:output_type -> vtctldata.ShardReplicationRemoveResponse + 195, // 198: vtctlservice.Vtctld.SleepTablet:output_type -> vtctldata.SleepTabletResponse + 196, // 199: vtctlservice.Vtctld.SourceShardAdd:output_type -> vtctldata.SourceShardAddResponse + 197, // 200: vtctlservice.Vtctld.SourceShardDelete:output_type -> vtctldata.SourceShardDeleteResponse + 198, // 201: vtctlservice.Vtctld.StartReplication:output_type -> vtctldata.StartReplicationResponse + 199, // 202: vtctlservice.Vtctld.StopReplication:output_type -> vtctldata.StopReplicationResponse + 200, // 203: vtctlservice.Vtctld.TabletExternallyReparented:output_type -> vtctldata.TabletExternallyReparentedResponse + 201, // 204: vtctlservice.Vtctld.UpdateCellInfo:output_type -> vtctldata.UpdateCellInfoResponse + 202, // 205: vtctlservice.Vtctld.UpdateCellsAlias:output_type -> vtctldata.UpdateCellsAliasResponse + 203, // 206: vtctlservice.Vtctld.Validate:output_type -> vtctldata.ValidateResponse + 204, // 207: vtctlservice.Vtctld.ValidateKeyspace:output_type -> vtctldata.ValidateKeyspaceResponse + 205, // 208: vtctlservice.Vtctld.ValidateSchemaKeyspace:output_type -> vtctldata.ValidateSchemaKeyspaceResponse + 206, // 209: vtctlservice.Vtctld.ValidateShard:output_type -> vtctldata.ValidateShardResponse + 207, // 210: vtctlservice.Vtctld.ValidateVersionKeyspace:output_type -> vtctldata.ValidateVersionKeyspaceResponse + 208, // 211: vtctlservice.Vtctld.ValidateVersionShard:output_type -> vtctldata.ValidateVersionShardResponse + 209, // 212: vtctlservice.Vtctld.ValidateVSchema:output_type -> vtctldata.ValidateVSchemaResponse + 210, // 213: vtctlservice.Vtctld.VDiffCreate:output_type -> vtctldata.VDiffCreateResponse + 211, // 214: vtctlservice.Vtctld.VDiffDelete:output_type -> vtctldata.VDiffDeleteResponse + 212, // 215: vtctlservice.Vtctld.VDiffResume:output_type -> vtctldata.VDiffResumeResponse + 213, // 216: vtctlservice.Vtctld.VDiffShow:output_type -> vtctldata.VDiffShowResponse + 214, // 217: vtctlservice.Vtctld.VDiffStop:output_type -> vtctldata.VDiffStopResponse + 215, // 218: vtctlservice.Vtctld.WorkflowDelete:output_type -> vtctldata.WorkflowDeleteResponse + 165, // 219: vtctlservice.Vtctld.WorkflowStatus:output_type -> vtctldata.WorkflowStatusResponse + 216, // 220: vtctlservice.Vtctld.WorkflowSwitchTraffic:output_type -> vtctldata.WorkflowSwitchTrafficResponse + 217, // 221: vtctlservice.Vtctld.WorkflowUpdate:output_type -> vtctldata.WorkflowUpdateResponse + 111, // [111:222] is the sub-list for method output_type + 0, // [0:111] is the sub-list for method input_type 0, // [0:0] is the sub-list for extension type_name 0, // [0:0] is the sub-list for extension extendee 0, // [0:0] is the sub-list for field type_name diff --git a/go/vt/proto/vtctlservice/vtctlservice_grpc.pb.go b/go/vt/proto/vtctlservice/vtctlservice_grpc.pb.go index 3d6ccea2871..f0a73530047 100644 --- a/go/vt/proto/vtctlservice/vtctlservice_grpc.pb.go +++ b/go/vt/proto/vtctlservice/vtctlservice_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 -// - protoc v3.6.1 +// - protoc v3.21.3 // source: vtctlservice.proto package vtctlservice @@ -159,12 +159,18 @@ type VtctldClient interface { Backup(ctx context.Context, in *vtctldata.BackupRequest, opts ...grpc.CallOption) (Vtctld_BackupClient, error) // BackupShard chooses a tablet in the shard and uses it to create a backup. BackupShard(ctx context.Context, in *vtctldata.BackupShardRequest, opts ...grpc.CallOption) (Vtctld_BackupShardClient, error) + // CancelSchemaMigration cancels one or all migrations, terminating any runnign ones as needed. + CancelSchemaMigration(ctx context.Context, in *vtctldata.CancelSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldata.CancelSchemaMigrationResponse, error) // ChangeTabletType changes the db type for the specified tablet, if possible. // This is used primarily to arrange replicas, and it will not convert a // primary. For that, use InitShardPrimary. // // NOTE: This command automatically updates the serving graph. ChangeTabletType(ctx context.Context, in *vtctldata.ChangeTabletTypeRequest, opts ...grpc.CallOption) (*vtctldata.ChangeTabletTypeResponse, error) + // CleanupSchemaMigration marks a schema migration as ready for artifact cleanup. + CleanupSchemaMigration(ctx context.Context, in *vtctldata.CleanupSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldata.CleanupSchemaMigrationResponse, error) + // CompleteSchemaMigration completes one or all migrations executed with --postpone-completion. + CompleteSchemaMigration(ctx context.Context, in *vtctldata.CompleteSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldata.CompleteSchemaMigrationResponse, error) // CreateKeyspace creates the specified keyspace in the topology. For a // SNAPSHOT keyspace, the request must specify the name of a base keyspace, // as well as a snapshot time. @@ -225,6 +231,12 @@ type VtctldClient interface { // GetSchema returns the schema for a tablet, or just the schema for the // specified tables in that tablet. GetSchema(ctx context.Context, in *vtctldata.GetSchemaRequest, opts ...grpc.CallOption) (*vtctldata.GetSchemaResponse, error) + // GetSchemaMigrations returns one or more online schema migrations for the + // specified keyspace, analagous to `SHOW VITESS_MIGRATIONS`. + // + // Different fields in the request message result in different filtering + // behaviors. See the documentation on GetSchemaMigrationsRequest for details. + GetSchemaMigrations(ctx context.Context, in *vtctldata.GetSchemaMigrationsRequest, opts ...grpc.CallOption) (*vtctldata.GetSchemaMigrationsResponse, error) // GetShard returns information about a shard in the topology. GetShard(ctx context.Context, in *vtctldata.GetShardRequest, opts ...grpc.CallOption) (*vtctldata.GetShardResponse, error) // GetShardRoutingRules returns the VSchema shard routing rules. @@ -261,6 +273,30 @@ type VtctldClient interface { // PlannedReparentShard or EmergencyReparentShard should be used in those // cases instead. InitShardPrimary(ctx context.Context, in *vtctldata.InitShardPrimaryRequest, opts ...grpc.CallOption) (*vtctldata.InitShardPrimaryResponse, error) + // LaunchSchemaMigration launches one or all migrations executed with --postpone-launch. + LaunchSchemaMigration(ctx context.Context, in *vtctldata.LaunchSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldata.LaunchSchemaMigrationResponse, error) + LookupVindexCreate(ctx context.Context, in *vtctldata.LookupVindexCreateRequest, opts ...grpc.CallOption) (*vtctldata.LookupVindexCreateResponse, error) + LookupVindexExternalize(ctx context.Context, in *vtctldata.LookupVindexExternalizeRequest, opts ...grpc.CallOption) (*vtctldata.LookupVindexExternalizeResponse, error) + // MaterializeCreate creates a workflow to materialize one or more tables + // from a source keyspace to a target keyspace using a provided expressions. + MaterializeCreate(ctx context.Context, in *vtctldata.MaterializeCreateRequest, opts ...grpc.CallOption) (*vtctldata.MaterializeCreateResponse, error) + // MigrateCreate creates a workflow which migrates one or more tables from an + // external cluster into Vitess. + MigrateCreate(ctx context.Context, in *vtctldata.MigrateCreateRequest, opts ...grpc.CallOption) (*vtctldata.WorkflowStatusResponse, error) + // MountRegister registers a new external Vitess cluster. + MountRegister(ctx context.Context, in *vtctldata.MountRegisterRequest, opts ...grpc.CallOption) (*vtctldata.MountRegisterResponse, error) + // MountUnregister unregisters an external Vitess cluster. + MountUnregister(ctx context.Context, in *vtctldata.MountUnregisterRequest, opts ...grpc.CallOption) (*vtctldata.MountUnregisterResponse, error) + // MountShow returns information about an external Vitess cluster. + MountShow(ctx context.Context, in *vtctldata.MountShowRequest, opts ...grpc.CallOption) (*vtctldata.MountShowResponse, error) + // MountList lists all registered external Vitess clusters. + MountList(ctx context.Context, in *vtctldata.MountListRequest, opts ...grpc.CallOption) (*vtctldata.MountListResponse, error) + // MoveTablesCreate creates a workflow which moves one or more tables from a + // source keyspace to a target keyspace. + MoveTablesCreate(ctx context.Context, in *vtctldata.MoveTablesCreateRequest, opts ...grpc.CallOption) (*vtctldata.WorkflowStatusResponse, error) + // MoveTablesComplete completes the move and cleans up the workflow and + // its related artifacts. + MoveTablesComplete(ctx context.Context, in *vtctldata.MoveTablesCompleteRequest, opts ...grpc.CallOption) (*vtctldata.MoveTablesCompleteResponse, error) // PingTablet checks that the specified tablet is awake and responding to RPCs. // This command can be blocked by other in-flight operations. PingTablet(ctx context.Context, in *vtctldata.PingTabletRequest, opts ...grpc.CallOption) (*vtctldata.PingTabletResponse, error) @@ -308,8 +344,12 @@ type VtctldClient interface { // only works if the current replica position matches the last known reparent // action. ReparentTablet(ctx context.Context, in *vtctldata.ReparentTabletRequest, opts ...grpc.CallOption) (*vtctldata.ReparentTabletResponse, error) + // ReshardCreate creates a workflow to reshard a keyspace. + ReshardCreate(ctx context.Context, in *vtctldata.ReshardCreateRequest, opts ...grpc.CallOption) (*vtctldata.WorkflowStatusResponse, error) // RestoreFromBackup stops mysqld for the given tablet and restores a backup. RestoreFromBackup(ctx context.Context, in *vtctldata.RestoreFromBackupRequest, opts ...grpc.CallOption) (Vtctld_RestoreFromBackupClient, error) + // RetrySchemaMigration marks a given schema migration for retry. + RetrySchemaMigration(ctx context.Context, in *vtctldata.RetrySchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldata.RetrySchemaMigrationResponse, error) // RunHealthCheck runs a healthcheck on the remote tablet. RunHealthCheck(ctx context.Context, in *vtctldata.RunHealthCheckRequest, opts ...grpc.CallOption) (*vtctldata.RunHealthCheckResponse, error) // SetKeyspaceDurabilityPolicy updates the DurabilityPolicy for a keyspace. @@ -398,6 +438,15 @@ type VtctldClient interface { ValidateVersionShard(ctx context.Context, in *vtctldata.ValidateVersionShardRequest, opts ...grpc.CallOption) (*vtctldata.ValidateVersionShardResponse, error) // ValidateVSchema compares the schema of each primary tablet in "keyspace/shards..." to the vschema and errs if there are differences. ValidateVSchema(ctx context.Context, in *vtctldata.ValidateVSchemaRequest, opts ...grpc.CallOption) (*vtctldata.ValidateVSchemaResponse, error) + VDiffCreate(ctx context.Context, in *vtctldata.VDiffCreateRequest, opts ...grpc.CallOption) (*vtctldata.VDiffCreateResponse, error) + VDiffDelete(ctx context.Context, in *vtctldata.VDiffDeleteRequest, opts ...grpc.CallOption) (*vtctldata.VDiffDeleteResponse, error) + VDiffResume(ctx context.Context, in *vtctldata.VDiffResumeRequest, opts ...grpc.CallOption) (*vtctldata.VDiffResumeResponse, error) + VDiffShow(ctx context.Context, in *vtctldata.VDiffShowRequest, opts ...grpc.CallOption) (*vtctldata.VDiffShowResponse, error) + VDiffStop(ctx context.Context, in *vtctldata.VDiffStopRequest, opts ...grpc.CallOption) (*vtctldata.VDiffStopResponse, error) + // WorkflowDelete deletes a vreplication workflow. + WorkflowDelete(ctx context.Context, in *vtctldata.WorkflowDeleteRequest, opts ...grpc.CallOption) (*vtctldata.WorkflowDeleteResponse, error) + WorkflowStatus(ctx context.Context, in *vtctldata.WorkflowStatusRequest, opts ...grpc.CallOption) (*vtctldata.WorkflowStatusResponse, error) + WorkflowSwitchTraffic(ctx context.Context, in *vtctldata.WorkflowSwitchTrafficRequest, opts ...grpc.CallOption) (*vtctldata.WorkflowSwitchTrafficResponse, error) // WorkflowUpdate updates the configuration of a vreplication workflow // using the provided updated parameters. WorkflowUpdate(ctx context.Context, in *vtctldata.WorkflowUpdateRequest, opts ...grpc.CallOption) (*vtctldata.WorkflowUpdateResponse, error) @@ -529,6 +578,15 @@ func (x *vtctldBackupShardClient) Recv() (*vtctldata.BackupResponse, error) { return m, nil } +func (c *vtctldClient) CancelSchemaMigration(ctx context.Context, in *vtctldata.CancelSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldata.CancelSchemaMigrationResponse, error) { + out := new(vtctldata.CancelSchemaMigrationResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/CancelSchemaMigration", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *vtctldClient) ChangeTabletType(ctx context.Context, in *vtctldata.ChangeTabletTypeRequest, opts ...grpc.CallOption) (*vtctldata.ChangeTabletTypeResponse, error) { out := new(vtctldata.ChangeTabletTypeResponse) err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/ChangeTabletType", in, out, opts...) @@ -538,6 +596,24 @@ func (c *vtctldClient) ChangeTabletType(ctx context.Context, in *vtctldata.Chang return out, nil } +func (c *vtctldClient) CleanupSchemaMigration(ctx context.Context, in *vtctldata.CleanupSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldata.CleanupSchemaMigrationResponse, error) { + out := new(vtctldata.CleanupSchemaMigrationResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/CleanupSchemaMigration", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) CompleteSchemaMigration(ctx context.Context, in *vtctldata.CompleteSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldata.CompleteSchemaMigrationResponse, error) { + out := new(vtctldata.CompleteSchemaMigrationResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/CompleteSchemaMigration", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *vtctldClient) CreateKeyspace(ctx context.Context, in *vtctldata.CreateKeyspaceRequest, opts ...grpc.CallOption) (*vtctldata.CreateKeyspaceResponse, error) { out := new(vtctldata.CreateKeyspaceResponse) err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/CreateKeyspace", in, out, opts...) @@ -745,6 +821,15 @@ func (c *vtctldClient) GetSchema(ctx context.Context, in *vtctldata.GetSchemaReq return out, nil } +func (c *vtctldClient) GetSchemaMigrations(ctx context.Context, in *vtctldata.GetSchemaMigrationsRequest, opts ...grpc.CallOption) (*vtctldata.GetSchemaMigrationsResponse, error) { + out := new(vtctldata.GetSchemaMigrationsResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/GetSchemaMigrations", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *vtctldClient) GetShard(ctx context.Context, in *vtctldata.GetShardRequest, opts ...grpc.CallOption) (*vtctldata.GetShardResponse, error) { out := new(vtctldata.GetShardResponse) err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/GetShard", in, out, opts...) @@ -871,6 +956,105 @@ func (c *vtctldClient) InitShardPrimary(ctx context.Context, in *vtctldata.InitS return out, nil } +func (c *vtctldClient) LaunchSchemaMigration(ctx context.Context, in *vtctldata.LaunchSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldata.LaunchSchemaMigrationResponse, error) { + out := new(vtctldata.LaunchSchemaMigrationResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/LaunchSchemaMigration", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) LookupVindexCreate(ctx context.Context, in *vtctldata.LookupVindexCreateRequest, opts ...grpc.CallOption) (*vtctldata.LookupVindexCreateResponse, error) { + out := new(vtctldata.LookupVindexCreateResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/LookupVindexCreate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) LookupVindexExternalize(ctx context.Context, in *vtctldata.LookupVindexExternalizeRequest, opts ...grpc.CallOption) (*vtctldata.LookupVindexExternalizeResponse, error) { + out := new(vtctldata.LookupVindexExternalizeResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/LookupVindexExternalize", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) MaterializeCreate(ctx context.Context, in *vtctldata.MaterializeCreateRequest, opts ...grpc.CallOption) (*vtctldata.MaterializeCreateResponse, error) { + out := new(vtctldata.MaterializeCreateResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/MaterializeCreate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) MigrateCreate(ctx context.Context, in *vtctldata.MigrateCreateRequest, opts ...grpc.CallOption) (*vtctldata.WorkflowStatusResponse, error) { + out := new(vtctldata.WorkflowStatusResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/MigrateCreate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) MountRegister(ctx context.Context, in *vtctldata.MountRegisterRequest, opts ...grpc.CallOption) (*vtctldata.MountRegisterResponse, error) { + out := new(vtctldata.MountRegisterResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/MountRegister", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) MountUnregister(ctx context.Context, in *vtctldata.MountUnregisterRequest, opts ...grpc.CallOption) (*vtctldata.MountUnregisterResponse, error) { + out := new(vtctldata.MountUnregisterResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/MountUnregister", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) MountShow(ctx context.Context, in *vtctldata.MountShowRequest, opts ...grpc.CallOption) (*vtctldata.MountShowResponse, error) { + out := new(vtctldata.MountShowResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/MountShow", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) MountList(ctx context.Context, in *vtctldata.MountListRequest, opts ...grpc.CallOption) (*vtctldata.MountListResponse, error) { + out := new(vtctldata.MountListResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/MountList", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) MoveTablesCreate(ctx context.Context, in *vtctldata.MoveTablesCreateRequest, opts ...grpc.CallOption) (*vtctldata.WorkflowStatusResponse, error) { + out := new(vtctldata.WorkflowStatusResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/MoveTablesCreate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) MoveTablesComplete(ctx context.Context, in *vtctldata.MoveTablesCompleteRequest, opts ...grpc.CallOption) (*vtctldata.MoveTablesCompleteResponse, error) { + out := new(vtctldata.MoveTablesCompleteResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/MoveTablesComplete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *vtctldClient) PingTablet(ctx context.Context, in *vtctldata.PingTabletRequest, opts ...grpc.CallOption) (*vtctldata.PingTabletResponse, error) { out := new(vtctldata.PingTabletResponse) err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/PingTablet", in, out, opts...) @@ -988,6 +1172,15 @@ func (c *vtctldClient) ReparentTablet(ctx context.Context, in *vtctldata.Reparen return out, nil } +func (c *vtctldClient) ReshardCreate(ctx context.Context, in *vtctldata.ReshardCreateRequest, opts ...grpc.CallOption) (*vtctldata.WorkflowStatusResponse, error) { + out := new(vtctldata.WorkflowStatusResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/ReshardCreate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *vtctldClient) RestoreFromBackup(ctx context.Context, in *vtctldata.RestoreFromBackupRequest, opts ...grpc.CallOption) (Vtctld_RestoreFromBackupClient, error) { stream, err := c.cc.NewStream(ctx, &Vtctld_ServiceDesc.Streams[2], "/vtctlservice.Vtctld/RestoreFromBackup", opts...) if err != nil { @@ -1020,6 +1213,15 @@ func (x *vtctldRestoreFromBackupClient) Recv() (*vtctldata.RestoreFromBackupResp return m, nil } +func (c *vtctldClient) RetrySchemaMigration(ctx context.Context, in *vtctldata.RetrySchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldata.RetrySchemaMigrationResponse, error) { + out := new(vtctldata.RetrySchemaMigrationResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/RetrySchemaMigration", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *vtctldClient) RunHealthCheck(ctx context.Context, in *vtctldata.RunHealthCheckRequest, opts ...grpc.CallOption) (*vtctldata.RunHealthCheckResponse, error) { out := new(vtctldata.RunHealthCheckResponse) err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/RunHealthCheck", in, out, opts...) @@ -1236,6 +1438,78 @@ func (c *vtctldClient) ValidateVSchema(ctx context.Context, in *vtctldata.Valida return out, nil } +func (c *vtctldClient) VDiffCreate(ctx context.Context, in *vtctldata.VDiffCreateRequest, opts ...grpc.CallOption) (*vtctldata.VDiffCreateResponse, error) { + out := new(vtctldata.VDiffCreateResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/VDiffCreate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) VDiffDelete(ctx context.Context, in *vtctldata.VDiffDeleteRequest, opts ...grpc.CallOption) (*vtctldata.VDiffDeleteResponse, error) { + out := new(vtctldata.VDiffDeleteResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/VDiffDelete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) VDiffResume(ctx context.Context, in *vtctldata.VDiffResumeRequest, opts ...grpc.CallOption) (*vtctldata.VDiffResumeResponse, error) { + out := new(vtctldata.VDiffResumeResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/VDiffResume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) VDiffShow(ctx context.Context, in *vtctldata.VDiffShowRequest, opts ...grpc.CallOption) (*vtctldata.VDiffShowResponse, error) { + out := new(vtctldata.VDiffShowResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/VDiffShow", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) VDiffStop(ctx context.Context, in *vtctldata.VDiffStopRequest, opts ...grpc.CallOption) (*vtctldata.VDiffStopResponse, error) { + out := new(vtctldata.VDiffStopResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/VDiffStop", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) WorkflowDelete(ctx context.Context, in *vtctldata.WorkflowDeleteRequest, opts ...grpc.CallOption) (*vtctldata.WorkflowDeleteResponse, error) { + out := new(vtctldata.WorkflowDeleteResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/WorkflowDelete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) WorkflowStatus(ctx context.Context, in *vtctldata.WorkflowStatusRequest, opts ...grpc.CallOption) (*vtctldata.WorkflowStatusResponse, error) { + out := new(vtctldata.WorkflowStatusResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/WorkflowStatus", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) WorkflowSwitchTraffic(ctx context.Context, in *vtctldata.WorkflowSwitchTrafficRequest, opts ...grpc.CallOption) (*vtctldata.WorkflowSwitchTrafficResponse, error) { + out := new(vtctldata.WorkflowSwitchTrafficResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/WorkflowSwitchTraffic", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *vtctldClient) WorkflowUpdate(ctx context.Context, in *vtctldata.WorkflowUpdateRequest, opts ...grpc.CallOption) (*vtctldata.WorkflowUpdateResponse, error) { out := new(vtctldata.WorkflowUpdateResponse) err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/WorkflowUpdate", in, out, opts...) @@ -1272,12 +1546,18 @@ type VtctldServer interface { Backup(*vtctldata.BackupRequest, Vtctld_BackupServer) error // BackupShard chooses a tablet in the shard and uses it to create a backup. BackupShard(*vtctldata.BackupShardRequest, Vtctld_BackupShardServer) error + // CancelSchemaMigration cancels one or all migrations, terminating any runnign ones as needed. + CancelSchemaMigration(context.Context, *vtctldata.CancelSchemaMigrationRequest) (*vtctldata.CancelSchemaMigrationResponse, error) // ChangeTabletType changes the db type for the specified tablet, if possible. // This is used primarily to arrange replicas, and it will not convert a // primary. For that, use InitShardPrimary. // // NOTE: This command automatically updates the serving graph. ChangeTabletType(context.Context, *vtctldata.ChangeTabletTypeRequest) (*vtctldata.ChangeTabletTypeResponse, error) + // CleanupSchemaMigration marks a schema migration as ready for artifact cleanup. + CleanupSchemaMigration(context.Context, *vtctldata.CleanupSchemaMigrationRequest) (*vtctldata.CleanupSchemaMigrationResponse, error) + // CompleteSchemaMigration completes one or all migrations executed with --postpone-completion. + CompleteSchemaMigration(context.Context, *vtctldata.CompleteSchemaMigrationRequest) (*vtctldata.CompleteSchemaMigrationResponse, error) // CreateKeyspace creates the specified keyspace in the topology. For a // SNAPSHOT keyspace, the request must specify the name of a base keyspace, // as well as a snapshot time. @@ -1338,6 +1618,12 @@ type VtctldServer interface { // GetSchema returns the schema for a tablet, or just the schema for the // specified tables in that tablet. GetSchema(context.Context, *vtctldata.GetSchemaRequest) (*vtctldata.GetSchemaResponse, error) + // GetSchemaMigrations returns one or more online schema migrations for the + // specified keyspace, analagous to `SHOW VITESS_MIGRATIONS`. + // + // Different fields in the request message result in different filtering + // behaviors. See the documentation on GetSchemaMigrationsRequest for details. + GetSchemaMigrations(context.Context, *vtctldata.GetSchemaMigrationsRequest) (*vtctldata.GetSchemaMigrationsResponse, error) // GetShard returns information about a shard in the topology. GetShard(context.Context, *vtctldata.GetShardRequest) (*vtctldata.GetShardResponse, error) // GetShardRoutingRules returns the VSchema shard routing rules. @@ -1374,6 +1660,30 @@ type VtctldServer interface { // PlannedReparentShard or EmergencyReparentShard should be used in those // cases instead. InitShardPrimary(context.Context, *vtctldata.InitShardPrimaryRequest) (*vtctldata.InitShardPrimaryResponse, error) + // LaunchSchemaMigration launches one or all migrations executed with --postpone-launch. + LaunchSchemaMigration(context.Context, *vtctldata.LaunchSchemaMigrationRequest) (*vtctldata.LaunchSchemaMigrationResponse, error) + LookupVindexCreate(context.Context, *vtctldata.LookupVindexCreateRequest) (*vtctldata.LookupVindexCreateResponse, error) + LookupVindexExternalize(context.Context, *vtctldata.LookupVindexExternalizeRequest) (*vtctldata.LookupVindexExternalizeResponse, error) + // MaterializeCreate creates a workflow to materialize one or more tables + // from a source keyspace to a target keyspace using a provided expressions. + MaterializeCreate(context.Context, *vtctldata.MaterializeCreateRequest) (*vtctldata.MaterializeCreateResponse, error) + // MigrateCreate creates a workflow which migrates one or more tables from an + // external cluster into Vitess. + MigrateCreate(context.Context, *vtctldata.MigrateCreateRequest) (*vtctldata.WorkflowStatusResponse, error) + // MountRegister registers a new external Vitess cluster. + MountRegister(context.Context, *vtctldata.MountRegisterRequest) (*vtctldata.MountRegisterResponse, error) + // MountUnregister unregisters an external Vitess cluster. + MountUnregister(context.Context, *vtctldata.MountUnregisterRequest) (*vtctldata.MountUnregisterResponse, error) + // MountShow returns information about an external Vitess cluster. + MountShow(context.Context, *vtctldata.MountShowRequest) (*vtctldata.MountShowResponse, error) + // MountList lists all registered external Vitess clusters. + MountList(context.Context, *vtctldata.MountListRequest) (*vtctldata.MountListResponse, error) + // MoveTablesCreate creates a workflow which moves one or more tables from a + // source keyspace to a target keyspace. + MoveTablesCreate(context.Context, *vtctldata.MoveTablesCreateRequest) (*vtctldata.WorkflowStatusResponse, error) + // MoveTablesComplete completes the move and cleans up the workflow and + // its related artifacts. + MoveTablesComplete(context.Context, *vtctldata.MoveTablesCompleteRequest) (*vtctldata.MoveTablesCompleteResponse, error) // PingTablet checks that the specified tablet is awake and responding to RPCs. // This command can be blocked by other in-flight operations. PingTablet(context.Context, *vtctldata.PingTabletRequest) (*vtctldata.PingTabletResponse, error) @@ -1421,8 +1731,12 @@ type VtctldServer interface { // only works if the current replica position matches the last known reparent // action. ReparentTablet(context.Context, *vtctldata.ReparentTabletRequest) (*vtctldata.ReparentTabletResponse, error) + // ReshardCreate creates a workflow to reshard a keyspace. + ReshardCreate(context.Context, *vtctldata.ReshardCreateRequest) (*vtctldata.WorkflowStatusResponse, error) // RestoreFromBackup stops mysqld for the given tablet and restores a backup. RestoreFromBackup(*vtctldata.RestoreFromBackupRequest, Vtctld_RestoreFromBackupServer) error + // RetrySchemaMigration marks a given schema migration for retry. + RetrySchemaMigration(context.Context, *vtctldata.RetrySchemaMigrationRequest) (*vtctldata.RetrySchemaMigrationResponse, error) // RunHealthCheck runs a healthcheck on the remote tablet. RunHealthCheck(context.Context, *vtctldata.RunHealthCheckRequest) (*vtctldata.RunHealthCheckResponse, error) // SetKeyspaceDurabilityPolicy updates the DurabilityPolicy for a keyspace. @@ -1511,6 +1825,15 @@ type VtctldServer interface { ValidateVersionShard(context.Context, *vtctldata.ValidateVersionShardRequest) (*vtctldata.ValidateVersionShardResponse, error) // ValidateVSchema compares the schema of each primary tablet in "keyspace/shards..." to the vschema and errs if there are differences. ValidateVSchema(context.Context, *vtctldata.ValidateVSchemaRequest) (*vtctldata.ValidateVSchemaResponse, error) + VDiffCreate(context.Context, *vtctldata.VDiffCreateRequest) (*vtctldata.VDiffCreateResponse, error) + VDiffDelete(context.Context, *vtctldata.VDiffDeleteRequest) (*vtctldata.VDiffDeleteResponse, error) + VDiffResume(context.Context, *vtctldata.VDiffResumeRequest) (*vtctldata.VDiffResumeResponse, error) + VDiffShow(context.Context, *vtctldata.VDiffShowRequest) (*vtctldata.VDiffShowResponse, error) + VDiffStop(context.Context, *vtctldata.VDiffStopRequest) (*vtctldata.VDiffStopResponse, error) + // WorkflowDelete deletes a vreplication workflow. + WorkflowDelete(context.Context, *vtctldata.WorkflowDeleteRequest) (*vtctldata.WorkflowDeleteResponse, error) + WorkflowStatus(context.Context, *vtctldata.WorkflowStatusRequest) (*vtctldata.WorkflowStatusResponse, error) + WorkflowSwitchTraffic(context.Context, *vtctldata.WorkflowSwitchTrafficRequest) (*vtctldata.WorkflowSwitchTrafficResponse, error) // WorkflowUpdate updates the configuration of a vreplication workflow // using the provided updated parameters. WorkflowUpdate(context.Context, *vtctldata.WorkflowUpdateRequest) (*vtctldata.WorkflowUpdateResponse, error) @@ -1545,9 +1868,18 @@ func (UnimplementedVtctldServer) Backup(*vtctldata.BackupRequest, Vtctld_BackupS func (UnimplementedVtctldServer) BackupShard(*vtctldata.BackupShardRequest, Vtctld_BackupShardServer) error { return status.Errorf(codes.Unimplemented, "method BackupShard not implemented") } +func (UnimplementedVtctldServer) CancelSchemaMigration(context.Context, *vtctldata.CancelSchemaMigrationRequest) (*vtctldata.CancelSchemaMigrationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CancelSchemaMigration not implemented") +} func (UnimplementedVtctldServer) ChangeTabletType(context.Context, *vtctldata.ChangeTabletTypeRequest) (*vtctldata.ChangeTabletTypeResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ChangeTabletType not implemented") } +func (UnimplementedVtctldServer) CleanupSchemaMigration(context.Context, *vtctldata.CleanupSchemaMigrationRequest) (*vtctldata.CleanupSchemaMigrationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CleanupSchemaMigration not implemented") +} +func (UnimplementedVtctldServer) CompleteSchemaMigration(context.Context, *vtctldata.CompleteSchemaMigrationRequest) (*vtctldata.CompleteSchemaMigrationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CompleteSchemaMigration not implemented") +} func (UnimplementedVtctldServer) CreateKeyspace(context.Context, *vtctldata.CreateKeyspaceRequest) (*vtctldata.CreateKeyspaceResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method CreateKeyspace not implemented") } @@ -1617,6 +1949,9 @@ func (UnimplementedVtctldServer) GetRoutingRules(context.Context, *vtctldata.Get func (UnimplementedVtctldServer) GetSchema(context.Context, *vtctldata.GetSchemaRequest) (*vtctldata.GetSchemaResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetSchema not implemented") } +func (UnimplementedVtctldServer) GetSchemaMigrations(context.Context, *vtctldata.GetSchemaMigrationsRequest) (*vtctldata.GetSchemaMigrationsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSchemaMigrations not implemented") +} func (UnimplementedVtctldServer) GetShard(context.Context, *vtctldata.GetShardRequest) (*vtctldata.GetShardResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetShard not implemented") } @@ -1659,6 +1994,39 @@ func (UnimplementedVtctldServer) GetWorkflows(context.Context, *vtctldata.GetWor func (UnimplementedVtctldServer) InitShardPrimary(context.Context, *vtctldata.InitShardPrimaryRequest) (*vtctldata.InitShardPrimaryResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method InitShardPrimary not implemented") } +func (UnimplementedVtctldServer) LaunchSchemaMigration(context.Context, *vtctldata.LaunchSchemaMigrationRequest) (*vtctldata.LaunchSchemaMigrationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LaunchSchemaMigration not implemented") +} +func (UnimplementedVtctldServer) LookupVindexCreate(context.Context, *vtctldata.LookupVindexCreateRequest) (*vtctldata.LookupVindexCreateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LookupVindexCreate not implemented") +} +func (UnimplementedVtctldServer) LookupVindexExternalize(context.Context, *vtctldata.LookupVindexExternalizeRequest) (*vtctldata.LookupVindexExternalizeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LookupVindexExternalize not implemented") +} +func (UnimplementedVtctldServer) MaterializeCreate(context.Context, *vtctldata.MaterializeCreateRequest) (*vtctldata.MaterializeCreateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MaterializeCreate not implemented") +} +func (UnimplementedVtctldServer) MigrateCreate(context.Context, *vtctldata.MigrateCreateRequest) (*vtctldata.WorkflowStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MigrateCreate not implemented") +} +func (UnimplementedVtctldServer) MountRegister(context.Context, *vtctldata.MountRegisterRequest) (*vtctldata.MountRegisterResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MountRegister not implemented") +} +func (UnimplementedVtctldServer) MountUnregister(context.Context, *vtctldata.MountUnregisterRequest) (*vtctldata.MountUnregisterResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MountUnregister not implemented") +} +func (UnimplementedVtctldServer) MountShow(context.Context, *vtctldata.MountShowRequest) (*vtctldata.MountShowResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MountShow not implemented") +} +func (UnimplementedVtctldServer) MountList(context.Context, *vtctldata.MountListRequest) (*vtctldata.MountListResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MountList not implemented") +} +func (UnimplementedVtctldServer) MoveTablesCreate(context.Context, *vtctldata.MoveTablesCreateRequest) (*vtctldata.WorkflowStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MoveTablesCreate not implemented") +} +func (UnimplementedVtctldServer) MoveTablesComplete(context.Context, *vtctldata.MoveTablesCompleteRequest) (*vtctldata.MoveTablesCompleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MoveTablesComplete not implemented") +} func (UnimplementedVtctldServer) PingTablet(context.Context, *vtctldata.PingTabletRequest) (*vtctldata.PingTabletResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method PingTablet not implemented") } @@ -1698,9 +2066,15 @@ func (UnimplementedVtctldServer) RemoveShardCell(context.Context, *vtctldata.Rem func (UnimplementedVtctldServer) ReparentTablet(context.Context, *vtctldata.ReparentTabletRequest) (*vtctldata.ReparentTabletResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ReparentTablet not implemented") } +func (UnimplementedVtctldServer) ReshardCreate(context.Context, *vtctldata.ReshardCreateRequest) (*vtctldata.WorkflowStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReshardCreate not implemented") +} func (UnimplementedVtctldServer) RestoreFromBackup(*vtctldata.RestoreFromBackupRequest, Vtctld_RestoreFromBackupServer) error { return status.Errorf(codes.Unimplemented, "method RestoreFromBackup not implemented") } +func (UnimplementedVtctldServer) RetrySchemaMigration(context.Context, *vtctldata.RetrySchemaMigrationRequest) (*vtctldata.RetrySchemaMigrationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RetrySchemaMigration not implemented") +} func (UnimplementedVtctldServer) RunHealthCheck(context.Context, *vtctldata.RunHealthCheckRequest) (*vtctldata.RunHealthCheckResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RunHealthCheck not implemented") } @@ -1773,6 +2147,30 @@ func (UnimplementedVtctldServer) ValidateVersionShard(context.Context, *vtctldat func (UnimplementedVtctldServer) ValidateVSchema(context.Context, *vtctldata.ValidateVSchemaRequest) (*vtctldata.ValidateVSchemaResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ValidateVSchema not implemented") } +func (UnimplementedVtctldServer) VDiffCreate(context.Context, *vtctldata.VDiffCreateRequest) (*vtctldata.VDiffCreateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VDiffCreate not implemented") +} +func (UnimplementedVtctldServer) VDiffDelete(context.Context, *vtctldata.VDiffDeleteRequest) (*vtctldata.VDiffDeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VDiffDelete not implemented") +} +func (UnimplementedVtctldServer) VDiffResume(context.Context, *vtctldata.VDiffResumeRequest) (*vtctldata.VDiffResumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VDiffResume not implemented") +} +func (UnimplementedVtctldServer) VDiffShow(context.Context, *vtctldata.VDiffShowRequest) (*vtctldata.VDiffShowResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VDiffShow not implemented") +} +func (UnimplementedVtctldServer) VDiffStop(context.Context, *vtctldata.VDiffStopRequest) (*vtctldata.VDiffStopResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VDiffStop not implemented") +} +func (UnimplementedVtctldServer) WorkflowDelete(context.Context, *vtctldata.WorkflowDeleteRequest) (*vtctldata.WorkflowDeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method WorkflowDelete not implemented") +} +func (UnimplementedVtctldServer) WorkflowStatus(context.Context, *vtctldata.WorkflowStatusRequest) (*vtctldata.WorkflowStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method WorkflowStatus not implemented") +} +func (UnimplementedVtctldServer) WorkflowSwitchTraffic(context.Context, *vtctldata.WorkflowSwitchTrafficRequest) (*vtctldata.WorkflowSwitchTrafficResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method WorkflowSwitchTraffic not implemented") +} func (UnimplementedVtctldServer) WorkflowUpdate(context.Context, *vtctldata.WorkflowUpdateRequest) (*vtctldata.WorkflowUpdateResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method WorkflowUpdate not implemented") } @@ -1939,6 +2337,24 @@ func (x *vtctldBackupShardServer) Send(m *vtctldata.BackupResponse) error { return x.ServerStream.SendMsg(m) } +func _Vtctld_CancelSchemaMigration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.CancelSchemaMigrationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).CancelSchemaMigration(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/CancelSchemaMigration", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).CancelSchemaMigration(ctx, req.(*vtctldata.CancelSchemaMigrationRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Vtctld_ChangeTabletType_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(vtctldata.ChangeTabletTypeRequest) if err := dec(in); err != nil { @@ -1957,6 +2373,42 @@ func _Vtctld_ChangeTabletType_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +func _Vtctld_CleanupSchemaMigration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.CleanupSchemaMigrationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).CleanupSchemaMigration(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/CleanupSchemaMigration", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).CleanupSchemaMigration(ctx, req.(*vtctldata.CleanupSchemaMigrationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_CompleteSchemaMigration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.CompleteSchemaMigrationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).CompleteSchemaMigration(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/CompleteSchemaMigration", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).CompleteSchemaMigration(ctx, req.(*vtctldata.CompleteSchemaMigrationRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Vtctld_CreateKeyspace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(vtctldata.CreateKeyspaceRequest) if err := dec(in); err != nil { @@ -2371,6 +2823,24 @@ func _Vtctld_GetSchema_Handler(srv interface{}, ctx context.Context, dec func(in return interceptor(ctx, in, info, handler) } +func _Vtctld_GetSchemaMigrations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.GetSchemaMigrationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).GetSchemaMigrations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/GetSchemaMigrations", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).GetSchemaMigrations(ctx, req.(*vtctldata.GetSchemaMigrationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Vtctld_GetShard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(vtctldata.GetShardRequest) if err := dec(in); err != nil { @@ -2623,98 +3093,296 @@ func _Vtctld_InitShardPrimary_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } -func _Vtctld_PingTablet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(vtctldata.PingTabletRequest) +func _Vtctld_LaunchSchemaMigration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.LaunchSchemaMigrationRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(VtctldServer).PingTablet(ctx, in) + return srv.(VtctldServer).LaunchSchemaMigration(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vtctlservice.Vtctld/PingTablet", + FullMethod: "/vtctlservice.Vtctld/LaunchSchemaMigration", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(VtctldServer).PingTablet(ctx, req.(*vtctldata.PingTabletRequest)) + return srv.(VtctldServer).LaunchSchemaMigration(ctx, req.(*vtctldata.LaunchSchemaMigrationRequest)) } return interceptor(ctx, in, info, handler) } -func _Vtctld_PlannedReparentShard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(vtctldata.PlannedReparentShardRequest) +func _Vtctld_LookupVindexCreate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.LookupVindexCreateRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(VtctldServer).PlannedReparentShard(ctx, in) + return srv.(VtctldServer).LookupVindexCreate(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vtctlservice.Vtctld/PlannedReparentShard", + FullMethod: "/vtctlservice.Vtctld/LookupVindexCreate", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(VtctldServer).PlannedReparentShard(ctx, req.(*vtctldata.PlannedReparentShardRequest)) + return srv.(VtctldServer).LookupVindexCreate(ctx, req.(*vtctldata.LookupVindexCreateRequest)) } return interceptor(ctx, in, info, handler) } -func _Vtctld_RebuildKeyspaceGraph_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(vtctldata.RebuildKeyspaceGraphRequest) +func _Vtctld_LookupVindexExternalize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.LookupVindexExternalizeRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(VtctldServer).RebuildKeyspaceGraph(ctx, in) + return srv.(VtctldServer).LookupVindexExternalize(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vtctlservice.Vtctld/RebuildKeyspaceGraph", + FullMethod: "/vtctlservice.Vtctld/LookupVindexExternalize", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(VtctldServer).RebuildKeyspaceGraph(ctx, req.(*vtctldata.RebuildKeyspaceGraphRequest)) + return srv.(VtctldServer).LookupVindexExternalize(ctx, req.(*vtctldata.LookupVindexExternalizeRequest)) } return interceptor(ctx, in, info, handler) } -func _Vtctld_RebuildVSchemaGraph_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(vtctldata.RebuildVSchemaGraphRequest) +func _Vtctld_MaterializeCreate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.MaterializeCreateRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(VtctldServer).RebuildVSchemaGraph(ctx, in) + return srv.(VtctldServer).MaterializeCreate(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vtctlservice.Vtctld/RebuildVSchemaGraph", + FullMethod: "/vtctlservice.Vtctld/MaterializeCreate", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(VtctldServer).RebuildVSchemaGraph(ctx, req.(*vtctldata.RebuildVSchemaGraphRequest)) + return srv.(VtctldServer).MaterializeCreate(ctx, req.(*vtctldata.MaterializeCreateRequest)) } return interceptor(ctx, in, info, handler) } -func _Vtctld_RefreshState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(vtctldata.RefreshStateRequest) +func _Vtctld_MigrateCreate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.MigrateCreateRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(VtctldServer).RefreshState(ctx, in) + return srv.(VtctldServer).MigrateCreate(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/vtctlservice.Vtctld/RefreshState", + FullMethod: "/vtctlservice.Vtctld/MigrateCreate", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(VtctldServer).RefreshState(ctx, req.(*vtctldata.RefreshStateRequest)) + return srv.(VtctldServer).MigrateCreate(ctx, req.(*vtctldata.MigrateCreateRequest)) } return interceptor(ctx, in, info, handler) } -func _Vtctld_RefreshStateByShard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(vtctldata.RefreshStateByShardRequest) +func _Vtctld_MountRegister_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.MountRegisterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).MountRegister(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/MountRegister", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).MountRegister(ctx, req.(*vtctldata.MountRegisterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_MountUnregister_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.MountUnregisterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).MountUnregister(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/MountUnregister", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).MountUnregister(ctx, req.(*vtctldata.MountUnregisterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_MountShow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.MountShowRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).MountShow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/MountShow", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).MountShow(ctx, req.(*vtctldata.MountShowRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_MountList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.MountListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).MountList(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/MountList", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).MountList(ctx, req.(*vtctldata.MountListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_MoveTablesCreate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.MoveTablesCreateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).MoveTablesCreate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/MoveTablesCreate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).MoveTablesCreate(ctx, req.(*vtctldata.MoveTablesCreateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_MoveTablesComplete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.MoveTablesCompleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).MoveTablesComplete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/MoveTablesComplete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).MoveTablesComplete(ctx, req.(*vtctldata.MoveTablesCompleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_PingTablet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.PingTabletRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).PingTablet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/PingTablet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).PingTablet(ctx, req.(*vtctldata.PingTabletRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_PlannedReparentShard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.PlannedReparentShardRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).PlannedReparentShard(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/PlannedReparentShard", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).PlannedReparentShard(ctx, req.(*vtctldata.PlannedReparentShardRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_RebuildKeyspaceGraph_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.RebuildKeyspaceGraphRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).RebuildKeyspaceGraph(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/RebuildKeyspaceGraph", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).RebuildKeyspaceGraph(ctx, req.(*vtctldata.RebuildKeyspaceGraphRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_RebuildVSchemaGraph_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.RebuildVSchemaGraphRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).RebuildVSchemaGraph(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/RebuildVSchemaGraph", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).RebuildVSchemaGraph(ctx, req.(*vtctldata.RebuildVSchemaGraphRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_RefreshState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.RefreshStateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).RefreshState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/RefreshState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).RefreshState(ctx, req.(*vtctldata.RefreshStateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_RefreshStateByShard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.RefreshStateByShardRequest) if err := dec(in); err != nil { return nil, err } @@ -2857,6 +3525,24 @@ func _Vtctld_ReparentTablet_Handler(srv interface{}, ctx context.Context, dec fu return interceptor(ctx, in, info, handler) } +func _Vtctld_ReshardCreate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.ReshardCreateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).ReshardCreate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/ReshardCreate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).ReshardCreate(ctx, req.(*vtctldata.ReshardCreateRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Vtctld_RestoreFromBackup_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(vtctldata.RestoreFromBackupRequest) if err := stream.RecvMsg(m); err != nil { @@ -2878,6 +3564,24 @@ func (x *vtctldRestoreFromBackupServer) Send(m *vtctldata.RestoreFromBackupRespo return x.ServerStream.SendMsg(m) } +func _Vtctld_RetrySchemaMigration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.RetrySchemaMigrationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).RetrySchemaMigration(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/RetrySchemaMigration", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).RetrySchemaMigration(ctx, req.(*vtctldata.RetrySchemaMigrationRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Vtctld_RunHealthCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(vtctldata.RunHealthCheckRequest) if err := dec(in); err != nil { @@ -3310,6 +4014,150 @@ func _Vtctld_ValidateVSchema_Handler(srv interface{}, ctx context.Context, dec f return interceptor(ctx, in, info, handler) } +func _Vtctld_VDiffCreate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.VDiffCreateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).VDiffCreate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/VDiffCreate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).VDiffCreate(ctx, req.(*vtctldata.VDiffCreateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_VDiffDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.VDiffDeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).VDiffDelete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/VDiffDelete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).VDiffDelete(ctx, req.(*vtctldata.VDiffDeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_VDiffResume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.VDiffResumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).VDiffResume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/VDiffResume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).VDiffResume(ctx, req.(*vtctldata.VDiffResumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_VDiffShow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.VDiffShowRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).VDiffShow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/VDiffShow", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).VDiffShow(ctx, req.(*vtctldata.VDiffShowRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_VDiffStop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.VDiffStopRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).VDiffStop(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/VDiffStop", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).VDiffStop(ctx, req.(*vtctldata.VDiffStopRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_WorkflowDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.WorkflowDeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).WorkflowDelete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/WorkflowDelete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).WorkflowDelete(ctx, req.(*vtctldata.WorkflowDeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_WorkflowStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.WorkflowStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).WorkflowStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/WorkflowStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).WorkflowStatus(ctx, req.(*vtctldata.WorkflowStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_WorkflowSwitchTraffic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.WorkflowSwitchTrafficRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).WorkflowSwitchTraffic(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/WorkflowSwitchTraffic", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).WorkflowSwitchTraffic(ctx, req.(*vtctldata.WorkflowSwitchTrafficRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Vtctld_WorkflowUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(vtctldata.WorkflowUpdateRequest) if err := dec(in); err != nil { @@ -3359,10 +4207,22 @@ var Vtctld_ServiceDesc = grpc.ServiceDesc{ MethodName: "ApplyVSchema", Handler: _Vtctld_ApplyVSchema_Handler, }, + { + MethodName: "CancelSchemaMigration", + Handler: _Vtctld_CancelSchemaMigration_Handler, + }, { MethodName: "ChangeTabletType", Handler: _Vtctld_ChangeTabletType_Handler, }, + { + MethodName: "CleanupSchemaMigration", + Handler: _Vtctld_CleanupSchemaMigration_Handler, + }, + { + MethodName: "CompleteSchemaMigration", + Handler: _Vtctld_CompleteSchemaMigration_Handler, + }, { MethodName: "CreateKeyspace", Handler: _Vtctld_CreateKeyspace_Handler, @@ -3455,6 +4315,10 @@ var Vtctld_ServiceDesc = grpc.ServiceDesc{ MethodName: "GetSchema", Handler: _Vtctld_GetSchema_Handler, }, + { + MethodName: "GetSchemaMigrations", + Handler: _Vtctld_GetSchemaMigrations_Handler, + }, { MethodName: "GetShard", Handler: _Vtctld_GetShard_Handler, @@ -3511,6 +4375,50 @@ var Vtctld_ServiceDesc = grpc.ServiceDesc{ MethodName: "InitShardPrimary", Handler: _Vtctld_InitShardPrimary_Handler, }, + { + MethodName: "LaunchSchemaMigration", + Handler: _Vtctld_LaunchSchemaMigration_Handler, + }, + { + MethodName: "LookupVindexCreate", + Handler: _Vtctld_LookupVindexCreate_Handler, + }, + { + MethodName: "LookupVindexExternalize", + Handler: _Vtctld_LookupVindexExternalize_Handler, + }, + { + MethodName: "MaterializeCreate", + Handler: _Vtctld_MaterializeCreate_Handler, + }, + { + MethodName: "MigrateCreate", + Handler: _Vtctld_MigrateCreate_Handler, + }, + { + MethodName: "MountRegister", + Handler: _Vtctld_MountRegister_Handler, + }, + { + MethodName: "MountUnregister", + Handler: _Vtctld_MountUnregister_Handler, + }, + { + MethodName: "MountShow", + Handler: _Vtctld_MountShow_Handler, + }, + { + MethodName: "MountList", + Handler: _Vtctld_MountList_Handler, + }, + { + MethodName: "MoveTablesCreate", + Handler: _Vtctld_MoveTablesCreate_Handler, + }, + { + MethodName: "MoveTablesComplete", + Handler: _Vtctld_MoveTablesComplete_Handler, + }, { MethodName: "PingTablet", Handler: _Vtctld_PingTablet_Handler, @@ -3563,6 +4471,14 @@ var Vtctld_ServiceDesc = grpc.ServiceDesc{ MethodName: "ReparentTablet", Handler: _Vtctld_ReparentTablet_Handler, }, + { + MethodName: "ReshardCreate", + Handler: _Vtctld_ReshardCreate_Handler, + }, + { + MethodName: "RetrySchemaMigration", + Handler: _Vtctld_RetrySchemaMigration_Handler, + }, { MethodName: "RunHealthCheck", Handler: _Vtctld_RunHealthCheck_Handler, @@ -3659,6 +4575,38 @@ var Vtctld_ServiceDesc = grpc.ServiceDesc{ MethodName: "ValidateVSchema", Handler: _Vtctld_ValidateVSchema_Handler, }, + { + MethodName: "VDiffCreate", + Handler: _Vtctld_VDiffCreate_Handler, + }, + { + MethodName: "VDiffDelete", + Handler: _Vtctld_VDiffDelete_Handler, + }, + { + MethodName: "VDiffResume", + Handler: _Vtctld_VDiffResume_Handler, + }, + { + MethodName: "VDiffShow", + Handler: _Vtctld_VDiffShow_Handler, + }, + { + MethodName: "VDiffStop", + Handler: _Vtctld_VDiffStop_Handler, + }, + { + MethodName: "WorkflowDelete", + Handler: _Vtctld_WorkflowDelete_Handler, + }, + { + MethodName: "WorkflowStatus", + Handler: _Vtctld_WorkflowStatus_Handler, + }, + { + MethodName: "WorkflowSwitchTraffic", + Handler: _Vtctld_WorkflowSwitchTraffic_Handler, + }, { MethodName: "WorkflowUpdate", Handler: _Vtctld_WorkflowUpdate_Handler, diff --git a/go/vt/proto/vtgate/vtgate.pb.go b/go/vt/proto/vtgate/vtgate.pb.go index c2668820f9f..aee90d134a4 100644 --- a/go/vt/proto/vtgate/vtgate.pb.go +++ b/go/vt/proto/vtgate/vtgate.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.6.1 +// protoc-gen-go v1.30.0 +// protoc v3.21.3 // source: vtgate.proto package vtgate @@ -225,6 +225,8 @@ type Session struct { // query_timeout is the maximum amount of time a query is permitted to run QueryTimeout int64 `protobuf:"varint,25,opt,name=query_timeout,json=queryTimeout,proto3" json:"query_timeout,omitempty"` PrepareStatement map[string]*PrepareData `protobuf:"bytes,26,rep,name=prepare_statement,json=prepareStatement,proto3" json:"prepare_statement,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // MigrationContext + MigrationContext string `protobuf:"bytes,27,opt,name=migration_context,json=migrationContext,proto3" json:"migration_context,omitempty"` } func (x *Session) Reset() { @@ -434,6 +436,13 @@ func (x *Session) GetPrepareStatement() map[string]*PrepareData { return nil } +func (x *Session) GetMigrationContext() string { + if x != nil { + return x.MigrationContext + } + return "" +} + // PrepareData keeps the prepared statement and other information related for execution of it. type PrepareData struct { state protoimpl.MessageState @@ -1615,7 +1624,7 @@ var file_vtgate_proto_rawDesc = []byte{ 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0b, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0b, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x22, 0xce, 0x0e, 0x0a, 0x07, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x25, + 0x74, 0x6f, 0x22, 0xfb, 0x0e, 0x0a, 0x07, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x69, 0x6e, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x69, 0x6e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x43, 0x0a, 0x0e, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x73, @@ -1698,200 +1707,203 @@ var file_vtgate_proto_rawDesc = []byte{ 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x70, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x1a, 0xd8, 0x01, 0x0a, 0x0c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, - 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x25, 0x0a, - 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, - 0x6c, 0x69, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, - 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, - 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x12, - 0x1f, 0x0a, 0x0b, 0x76, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x76, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x4f, 0x6e, 0x6c, 0x79, - 0x1a, 0x5c, 0x0a, 0x19, 0x55, 0x73, 0x65, 0x72, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x56, - 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, - 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x56, 0x61, 0x72, 0x69, 0x61, - 0x62, 0x6c, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x42, - 0x0a, 0x14, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x1a, 0x3f, 0x0a, 0x11, 0x41, 0x64, 0x76, 0x69, 0x73, 0x6f, 0x72, 0x79, 0x4c, 0x6f, - 0x63, 0x6b, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x1a, 0x58, 0x0a, 0x15, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, - 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x44, 0x61, - 0x74, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x4a, 0x04, 0x08, - 0x03, 0x10, 0x04, 0x22, 0x5d, 0x0a, 0x0b, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x44, 0x61, - 0x74, 0x61, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x5f, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x70, - 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, - 0x21, 0x0a, 0x0c, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x43, 0x6f, 0x75, - 0x6e, 0x74, 0x22, 0xac, 0x01, 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x41, 0x66, 0x74, 0x65, 0x72, - 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x31, 0x0a, 0x15, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x61, 0x66, - 0x74, 0x65, 0x72, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x67, 0x74, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x72, 0x65, 0x61, 0x64, 0x41, 0x66, 0x74, 0x65, 0x72, 0x57, - 0x72, 0x69, 0x74, 0x65, 0x47, 0x74, 0x69, 0x64, 0x12, 0x37, 0x0a, 0x18, 0x72, 0x65, 0x61, 0x64, - 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x15, 0x72, 0x65, 0x61, 0x64, - 0x41, 0x66, 0x74, 0x65, 0x72, 0x57, 0x72, 0x69, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, - 0x74, 0x12, 0x2e, 0x0a, 0x13, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x72, 0x61, - 0x63, 0x6b, 0x5f, 0x67, 0x74, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, - 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x47, 0x74, 0x69, 0x64, - 0x73, 0x22, 0xaa, 0x01, 0x0a, 0x0e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, - 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, - 0x49, 0x64, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, - 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, - 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, - 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, - 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x22, 0x8f, - 0x01, 0x0a, 0x0f, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, - 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, - 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, - 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x22, 0xb3, 0x01, 0x0a, 0x13, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x42, 0x61, 0x74, 0x63, - 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, - 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, - 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, - 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, - 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x2b, 0x0a, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64, - 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x4a, 0x04, - 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, - 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x22, 0x9a, 0x01, 0x0a, 0x14, 0x45, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x65, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x10, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x1a, 0xd8, 0x01, 0x0a, 0x0c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, + 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, + 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x72, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x0a, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, + 0x76, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0a, 0x76, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x4f, 0x6e, 0x6c, 0x79, 0x1a, 0x5c, 0x0a, + 0x19, 0x55, 0x73, 0x65, 0x72, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x56, 0x61, 0x72, 0x69, + 0x61, 0x62, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x42, 0x0a, 0x14, 0x53, + 0x79, 0x73, 0x74, 0x65, 0x6d, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, + 0x3f, 0x0a, 0x11, 0x41, 0x64, 0x76, 0x69, 0x73, 0x6f, 0x72, 0x79, 0x4c, 0x6f, 0x63, 0x6b, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x1a, 0x58, 0x0a, 0x15, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x67, + 0x61, 0x74, 0x65, 0x2e, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, + 0x22, 0x5d, 0x0a, 0x0b, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x44, 0x61, 0x74, 0x61, 0x12, + 0x2b, 0x0a, 0x11, 0x70, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x70, 0x72, 0x65, 0x70, + 0x61, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x21, 0x0a, 0x0c, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x0b, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, + 0xac, 0x01, 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x41, 0x66, 0x74, 0x65, 0x72, 0x57, 0x72, 0x69, + 0x74, 0x65, 0x12, 0x31, 0x0a, 0x15, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, + 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x67, 0x74, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x12, 0x72, 0x65, 0x61, 0x64, 0x41, 0x66, 0x74, 0x65, 0x72, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x47, 0x74, 0x69, 0x64, 0x12, 0x37, 0x0a, 0x18, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x61, 0x66, + 0x74, 0x65, 0x72, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x15, 0x72, 0x65, 0x61, 0x64, 0x41, 0x66, 0x74, + 0x65, 0x72, 0x57, 0x72, 0x69, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x2e, + 0x0a, 0x13, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x5f, + 0x67, 0x74, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x73, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x47, 0x74, 0x69, 0x64, 0x73, 0x22, 0xaa, + 0x01, 0x0a, 0x0e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, + 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, + 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x05, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, + 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x22, 0x8f, 0x01, 0x0a, 0x0f, + 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x30, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x57, 0x69, 0x74, 0x68, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x73, 0x22, 0xaa, 0x01, 0x0a, 0x14, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, - 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, - 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x27, 0x0a, 0x05, 0x71, 0x75, - 0x65, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, - 0x65, 0x72, 0x79, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, + 0x6e, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xb3, 0x01, + 0x0a, 0x13, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, + 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, + 0x72, 0x49, 0x64, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4a, 0x04, - 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, - 0x22, 0x6e, 0x0a, 0x15, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, - 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x22, 0x5d, 0x0a, 0x19, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, - 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, - 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, - 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x64, - 0x74, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x74, 0x69, 0x64, 0x22, - 0x1c, 0x0a, 0x1a, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xec, 0x01, - 0x0a, 0x0c, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x23, - 0x0a, 0x0d, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x73, 0x6b, 0x65, 0x77, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x53, - 0x6b, 0x65, 0x77, 0x12, 0x2d, 0x0a, 0x12, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, - 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x11, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, - 0x61, 0x6c, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x6f, 0x6e, 0x5f, 0x72, 0x65, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x74, 0x6f, - 0x70, 0x4f, 0x6e, 0x52, 0x65, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, - 0x6c, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, - 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, - 0x6e, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x65, 0x6c, 0x6c, 0x50, - 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x5f, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x22, 0xf6, 0x01, 0x0a, - 0x0e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, - 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x35, 0x0a, - 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x67, 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x56, 0x47, 0x74, 0x69, 0x64, 0x52, 0x05, 0x76, 0x67, 0x74, 0x69, 0x64, 0x12, 0x2a, 0x0a, - 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, - 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, - 0x72, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x2a, 0x0a, 0x05, 0x66, 0x6c, 0x61, - 0x67, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, - 0x65, 0x2e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x52, 0x05, - 0x66, 0x6c, 0x61, 0x67, 0x73, 0x22, 0x3d, 0x0a, 0x0f, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, - 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x73, 0x22, 0x92, 0x01, 0x0a, 0x0e, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, - 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, - 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c, - 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, - 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x27, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2b, + 0x0a, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, - 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x22, 0x89, 0x01, 0x0a, 0x0f, 0x50, 0x72, - 0x65, 0x70, 0x61, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, - 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, - 0x74, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, - 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, - 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, - 0x69, 0x65, 0x6c, 0x64, 0x73, 0x22, 0x6e, 0x0a, 0x13, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x53, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, + 0x72, 0x79, 0x52, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x4a, 0x04, 0x08, 0x04, 0x10, + 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, + 0x07, 0x10, 0x08, 0x22, 0x9a, 0x01, 0x0a, 0x14, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x42, + 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, + 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x30, + 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x16, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x57, 0x69, + 0x74, 0x68, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, + 0x22, 0xaa, 0x01, 0x0a, 0x14, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, + 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, + 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, + 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x27, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, + 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, + 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4a, 0x04, 0x08, 0x03, 0x10, + 0x04, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x6e, 0x0a, + 0x15, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x5d, 0x0a, + 0x19, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, + 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, + 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, + 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x74, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x74, 0x69, 0x64, 0x22, 0x1c, 0x0a, 0x1a, + 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xec, 0x01, 0x0a, 0x0c, 0x56, + 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x6d, + 0x69, 0x6e, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x73, 0x6b, 0x65, 0x77, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0c, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x53, 0x6b, 0x65, 0x77, + 0x12, 0x2d, 0x0a, 0x12, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x5f, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x68, 0x65, + 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, + 0x26, 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x74, 0x6f, 0x70, 0x4f, 0x6e, + 0x52, 0x65, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x27, 0x0a, + 0x0f, 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x65, 0x6c, 0x6c, 0x50, 0x72, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x5f, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x22, 0xf6, 0x01, 0x0a, 0x0e, 0x56, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, - 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, - 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x3d, 0x0a, 0x14, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x53, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, - 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, - 0x74, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x2a, 0x44, 0x0a, 0x0f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, - 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x49, 0x4e, 0x47, - 0x4c, 0x45, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x55, 0x4c, 0x54, 0x49, 0x10, 0x02, 0x12, - 0x09, 0x0a, 0x05, 0x54, 0x57, 0x4f, 0x50, 0x43, 0x10, 0x03, 0x2a, 0x3c, 0x0a, 0x0b, 0x43, 0x6f, - 0x6d, 0x6d, 0x69, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x4f, 0x52, - 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x50, 0x52, 0x45, 0x10, 0x01, 0x12, 0x08, - 0x0a, 0x04, 0x50, 0x4f, 0x53, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x55, 0x54, 0x4f, - 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x10, 0x03, 0x42, 0x36, 0x0a, 0x0f, 0x69, 0x6f, 0x2e, 0x76, - 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x23, 0x76, 0x69, 0x74, - 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, - 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x67, 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x11, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x47, + 0x74, 0x69, 0x64, 0x52, 0x05, 0x76, 0x67, 0x74, 0x69, 0x64, 0x12, 0x2a, 0x0a, 0x06, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x69, 0x6e, + 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x06, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x2a, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x56, + 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x52, 0x05, 0x66, 0x6c, 0x61, + 0x67, 0x73, 0x22, 0x3d, 0x0a, 0x0f, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x73, 0x22, 0x92, 0x01, 0x0a, 0x0e, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, + 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, + 0x49, 0x64, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, + 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, + 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x22, 0x89, 0x01, 0x0a, 0x0f, 0x50, 0x72, 0x65, 0x70, 0x61, + 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, + 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x06, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x73, 0x22, 0x6e, 0x0a, 0x13, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, + 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, + 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, + 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, + 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0x3d, 0x0a, 0x14, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, + 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x2a, 0x44, 0x0a, 0x0f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x10, + 0x01, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x55, 0x4c, 0x54, 0x49, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, + 0x54, 0x57, 0x4f, 0x50, 0x43, 0x10, 0x03, 0x2a, 0x3c, 0x0a, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x69, + 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, + 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x50, 0x52, 0x45, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x50, + 0x4f, 0x53, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x55, 0x54, 0x4f, 0x43, 0x4f, 0x4d, + 0x4d, 0x49, 0x54, 0x10, 0x03, 0x42, 0x36, 0x0a, 0x0f, 0x69, 0x6f, 0x2e, 0x76, 0x69, 0x74, 0x65, + 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x23, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, + 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/go/vt/proto/vtgate/vtgate_vtproto.pb.go b/go/vt/proto/vtgate/vtgate_vtproto.pb.go index efc50b1600f..bec24472760 100644 --- a/go/vt/proto/vtgate/vtgate_vtproto.pb.go +++ b/go/vt/proto/vtgate/vtgate_vtproto.pb.go @@ -1,5 +1,5 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 +// protoc-gen-go-vtproto version: v0.5.0 // source: vtgate.proto package vtgate @@ -7,6 +7,7 @@ package vtgate import ( binary "encoding/binary" fmt "fmt" + proto "google.golang.org/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl" io "io" math "math" @@ -24,6 +25,479 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +func (m *Session_ShardSession) CloneVT() *Session_ShardSession { + if m == nil { + return (*Session_ShardSession)(nil) + } + r := &Session_ShardSession{ + Target: m.Target.CloneVT(), + TransactionId: m.TransactionId, + TabletAlias: m.TabletAlias.CloneVT(), + ReservedId: m.ReservedId, + VindexOnly: m.VindexOnly, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Session_ShardSession) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Session) CloneVT() *Session { + if m == nil { + return (*Session)(nil) + } + r := &Session{ + InTransaction: m.InTransaction, + Autocommit: m.Autocommit, + TargetString: m.TargetString, + Options: m.Options.CloneVT(), + TransactionMode: m.TransactionMode, + LastInsertId: m.LastInsertId, + FoundRows: m.FoundRows, + RowCount: m.RowCount, + InReservedConn: m.InReservedConn, + LockSession: m.LockSession.CloneVT(), + LastLockHeartbeat: m.LastLockHeartbeat, + ReadAfterWrite: m.ReadAfterWrite.CloneVT(), + DDLStrategy: m.DDLStrategy, + SessionUUID: m.SessionUUID, + EnableSystemSettings: m.EnableSystemSettings, + QueryTimeout: m.QueryTimeout, + MigrationContext: m.MigrationContext, + } + if rhs := m.ShardSessions; rhs != nil { + tmpContainer := make([]*Session_ShardSession, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.ShardSessions = tmpContainer + } + if rhs := m.Warnings; rhs != nil { + tmpContainer := make([]*query.QueryWarning, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Warnings = tmpContainer + } + if rhs := m.PreSessions; rhs != nil { + tmpContainer := make([]*Session_ShardSession, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.PreSessions = tmpContainer + } + if rhs := m.PostSessions; rhs != nil { + tmpContainer := make([]*Session_ShardSession, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.PostSessions = tmpContainer + } + if rhs := m.UserDefinedVariables; rhs != nil { + tmpContainer := make(map[string]*query.BindVariable, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.UserDefinedVariables = tmpContainer + } + if rhs := m.SystemVariables; rhs != nil { + tmpContainer := make(map[string]string, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v + } + r.SystemVariables = tmpContainer + } + if rhs := m.Savepoints; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Savepoints = tmpContainer + } + if rhs := m.AdvisoryLock; rhs != nil { + tmpContainer := make(map[string]int64, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v + } + r.AdvisoryLock = tmpContainer + } + if rhs := m.PrepareStatement; rhs != nil { + tmpContainer := make(map[string]*PrepareData, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.PrepareStatement = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Session) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PrepareData) CloneVT() *PrepareData { + if m == nil { + return (*PrepareData)(nil) + } + r := &PrepareData{ + PrepareStatement: m.PrepareStatement, + ParamsCount: m.ParamsCount, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *PrepareData) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReadAfterWrite) CloneVT() *ReadAfterWrite { + if m == nil { + return (*ReadAfterWrite)(nil) + } + r := &ReadAfterWrite{ + ReadAfterWriteGtid: m.ReadAfterWriteGtid, + ReadAfterWriteTimeout: m.ReadAfterWriteTimeout, + SessionTrackGtids: m.SessionTrackGtids, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReadAfterWrite) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteRequest) CloneVT() *ExecuteRequest { + if m == nil { + return (*ExecuteRequest)(nil) + } + r := &ExecuteRequest{ + CallerId: m.CallerId.CloneVT(), + Session: m.Session.CloneVT(), + Query: m.Query.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ExecuteRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteResponse) CloneVT() *ExecuteResponse { + if m == nil { + return (*ExecuteResponse)(nil) + } + r := &ExecuteResponse{ + Error: m.Error.CloneVT(), + Session: m.Session.CloneVT(), + Result: m.Result.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ExecuteResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteBatchRequest) CloneVT() *ExecuteBatchRequest { + if m == nil { + return (*ExecuteBatchRequest)(nil) + } + r := &ExecuteBatchRequest{ + CallerId: m.CallerId.CloneVT(), + Session: m.Session.CloneVT(), + } + if rhs := m.Queries; rhs != nil { + tmpContainer := make([]*query.BoundQuery, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Queries = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ExecuteBatchRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteBatchResponse) CloneVT() *ExecuteBatchResponse { + if m == nil { + return (*ExecuteBatchResponse)(nil) + } + r := &ExecuteBatchResponse{ + Error: m.Error.CloneVT(), + Session: m.Session.CloneVT(), + } + if rhs := m.Results; rhs != nil { + tmpContainer := make([]*query.ResultWithError, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Results = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ExecuteBatchResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StreamExecuteRequest) CloneVT() *StreamExecuteRequest { + if m == nil { + return (*StreamExecuteRequest)(nil) + } + r := &StreamExecuteRequest{ + CallerId: m.CallerId.CloneVT(), + Query: m.Query.CloneVT(), + Session: m.Session.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StreamExecuteRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StreamExecuteResponse) CloneVT() *StreamExecuteResponse { + if m == nil { + return (*StreamExecuteResponse)(nil) + } + r := &StreamExecuteResponse{ + Result: m.Result.CloneVT(), + Session: m.Session.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StreamExecuteResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ResolveTransactionRequest) CloneVT() *ResolveTransactionRequest { + if m == nil { + return (*ResolveTransactionRequest)(nil) + } + r := &ResolveTransactionRequest{ + CallerId: m.CallerId.CloneVT(), + Dtid: m.Dtid, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ResolveTransactionRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ResolveTransactionResponse) CloneVT() *ResolveTransactionResponse { + if m == nil { + return (*ResolveTransactionResponse)(nil) + } + r := &ResolveTransactionResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ResolveTransactionResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VStreamFlags) CloneVT() *VStreamFlags { + if m == nil { + return (*VStreamFlags)(nil) + } + r := &VStreamFlags{ + MinimizeSkew: m.MinimizeSkew, + HeartbeatInterval: m.HeartbeatInterval, + StopOnReshard: m.StopOnReshard, + Cells: m.Cells, + CellPreference: m.CellPreference, + TabletOrder: m.TabletOrder, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VStreamFlags) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VStreamRequest) CloneVT() *VStreamRequest { + if m == nil { + return (*VStreamRequest)(nil) + } + r := &VStreamRequest{ + CallerId: m.CallerId.CloneVT(), + TabletType: m.TabletType, + Vgtid: m.Vgtid.CloneVT(), + Filter: m.Filter.CloneVT(), + Flags: m.Flags.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VStreamRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VStreamResponse) CloneVT() *VStreamResponse { + if m == nil { + return (*VStreamResponse)(nil) + } + r := &VStreamResponse{} + if rhs := m.Events; rhs != nil { + tmpContainer := make([]*binlogdata.VEvent, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Events = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VStreamResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PrepareRequest) CloneVT() *PrepareRequest { + if m == nil { + return (*PrepareRequest)(nil) + } + r := &PrepareRequest{ + CallerId: m.CallerId.CloneVT(), + Session: m.Session.CloneVT(), + Query: m.Query.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *PrepareRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PrepareResponse) CloneVT() *PrepareResponse { + if m == nil { + return (*PrepareResponse)(nil) + } + r := &PrepareResponse{ + Error: m.Error.CloneVT(), + Session: m.Session.CloneVT(), + } + if rhs := m.Fields; rhs != nil { + tmpContainer := make([]*query.Field, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Fields = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *PrepareResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CloseSessionRequest) CloneVT() *CloseSessionRequest { + if m == nil { + return (*CloseSessionRequest)(nil) + } + r := &CloseSessionRequest{ + CallerId: m.CallerId.CloneVT(), + Session: m.Session.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CloseSessionRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CloseSessionResponse) CloneVT() *CloseSessionResponse { + if m == nil { + return (*CloseSessionResponse)(nil) + } + r := &CloseSessionResponse{ + Error: m.Error.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CloseSessionResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *Session_ShardSession) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -127,6 +601,15 @@ func (m *Session) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.MigrationContext) > 0 { + i -= len(m.MigrationContext) + copy(dAtA[i:], m.MigrationContext) + i = encodeVarint(dAtA, i, uint64(len(m.MigrationContext))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda + } if len(m.PrepareStatement) > 0 { for k := range m.PrepareStatement { v := m.PrepareStatement[k] @@ -1562,6 +2045,10 @@ func (m *Session) SizeVT() (n int) { n += mapEntrySize + 2 + sov(uint64(mapEntrySize)) } } + l = len(m.MigrationContext) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } n += len(m.unknownFields) return n } @@ -3197,6 +3684,38 @@ func (m *Session) UnmarshalVT(dAtA []byte) error { } m.PrepareStatement[mapkey] = mapvalue iNdEx = postIndex + case 27: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MigrationContext", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MigrationContext = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) diff --git a/go/vt/proto/vtgateservice/vtgateservice.pb.go b/go/vt/proto/vtgateservice/vtgateservice.pb.go index 457a2c90333..2008d486dc9 100644 --- a/go/vt/proto/vtgateservice/vtgateservice.pb.go +++ b/go/vt/proto/vtgateservice/vtgateservice.pb.go @@ -18,8 +18,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.6.1 +// protoc-gen-go v1.30.0 +// protoc v3.21.3 // source: vtgateservice.proto package vtgateservice diff --git a/go/vt/proto/vtgateservice/vtgateservice_grpc.pb.go b/go/vt/proto/vtgateservice/vtgateservice_grpc.pb.go index 33c8ba81ca6..44dd83d7f0b 100644 --- a/go/vt/proto/vtgateservice/vtgateservice_grpc.pb.go +++ b/go/vt/proto/vtgateservice/vtgateservice_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 -// - protoc v3.6.1 +// - protoc v3.21.3 // source: vtgateservice.proto package vtgateservice diff --git a/go/vt/proto/vtrpc/vtrpc.pb.go b/go/vt/proto/vtrpc/vtrpc.pb.go index 3ab7aade829..0c82dc34bf5 100644 --- a/go/vt/proto/vtrpc/vtrpc.pb.go +++ b/go/vt/proto/vtrpc/vtrpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.6.1 +// protoc-gen-go v1.30.0 +// protoc v3.21.3 // source: vtrpc.proto package vtrpc diff --git a/go/vt/proto/vtrpc/vtrpc_vtproto.pb.go b/go/vt/proto/vtrpc/vtrpc_vtproto.pb.go index 008fe7aa100..36fb8ba8627 100644 --- a/go/vt/proto/vtrpc/vtrpc_vtproto.pb.go +++ b/go/vt/proto/vtrpc/vtrpc_vtproto.pb.go @@ -1,11 +1,12 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 +// protoc-gen-go-vtproto version: v0.5.0 // source: vtrpc.proto package vtrpc import ( fmt "fmt" + proto "google.golang.org/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl" io "io" bits "math/bits" @@ -18,6 +19,50 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +func (m *CallerID) CloneVT() *CallerID { + if m == nil { + return (*CallerID)(nil) + } + r := &CallerID{ + Principal: m.Principal, + Component: m.Component, + Subcomponent: m.Subcomponent, + } + if rhs := m.Groups; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Groups = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CallerID) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RPCError) CloneVT() *RPCError { + if m == nil { + return (*RPCError)(nil) + } + r := &RPCError{ + Message: m.Message, + Code: m.Code, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RPCError) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *CallerID) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil diff --git a/go/vt/proto/vttest/vttest.pb.go b/go/vt/proto/vttest/vttest.pb.go index f79d96f3f64..4b4f269d38c 100644 --- a/go/vt/proto/vttest/vttest.pb.go +++ b/go/vt/proto/vttest/vttest.pb.go @@ -41,8 +41,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.6.1 +// protoc-gen-go v1.30.0 +// protoc v3.21.3 // source: vttest.proto package vttest diff --git a/go/vt/proto/vttest/vttest_vtproto.pb.go b/go/vt/proto/vttest/vttest_vtproto.pb.go index a7474446a26..f1dee298011 100644 --- a/go/vt/proto/vttest/vttest_vtproto.pb.go +++ b/go/vt/proto/vttest/vttest_vtproto.pb.go @@ -1,11 +1,12 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 +// protoc-gen-go-vtproto version: v0.5.0 // source: vttest.proto package vttest import ( fmt "fmt" + proto "google.golang.org/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl" io "io" bits "math/bits" @@ -19,6 +20,83 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +func (m *Shard) CloneVT() *Shard { + if m == nil { + return (*Shard)(nil) + } + r := &Shard{ + Name: m.Name, + DbNameOverride: m.DbNameOverride, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Shard) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Keyspace) CloneVT() *Keyspace { + if m == nil { + return (*Keyspace)(nil) + } + r := &Keyspace{ + Name: m.Name, + ServedFrom: m.ServedFrom, + ReplicaCount: m.ReplicaCount, + RdonlyCount: m.RdonlyCount, + } + if rhs := m.Shards; rhs != nil { + tmpContainer := make([]*Shard, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Shards = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Keyspace) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VTTestTopology) CloneVT() *VTTestTopology { + if m == nil { + return (*VTTestTopology)(nil) + } + r := &VTTestTopology{ + RoutingRules: m.RoutingRules.CloneVT(), + } + if rhs := m.Keyspaces; rhs != nil { + tmpContainer := make([]*Keyspace, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Keyspaces = tmpContainer + } + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VTTestTopology) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *Shard) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil diff --git a/go/vt/vtgr/plugin_etcd2topo.go b/go/vt/proto/vttime/cached_size.go similarity index 61% rename from go/vt/vtgr/plugin_etcd2topo.go rename to go/vt/proto/vttime/cached_size.go index 0f9c385f69b..e34da16852c 100644 --- a/go/vt/vtgr/plugin_etcd2topo.go +++ b/go/vt/proto/vttime/cached_size.go @@ -13,11 +13,23 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by Sizegen. DO NOT EDIT. -package vtgr +package vttime -// This plugin imports etcd2topo to register the etcd2 implementation of TopoServer. +import hack "vitess.io/vitess/go/hack" -import ( - _ "vitess.io/vitess/go/vt/topo/etcd2topo" -) +func (cached *Time) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(64) + } + // field unknownFields []byte + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownFields))) + } + return size +} diff --git a/go/vt/proto/vttime/vttime.pb.go b/go/vt/proto/vttime/vttime.pb.go index c9ed276b710..5cdf3f616ce 100644 --- a/go/vt/proto/vttime/vttime.pb.go +++ b/go/vt/proto/vttime/vttime.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.6.1 +// protoc-gen-go v1.30.0 +// protoc v3.21.3 // source: vttime.proto package vttime diff --git a/go/vt/proto/vttime/vttime_vtproto.pb.go b/go/vt/proto/vttime/vttime_vtproto.pb.go index d1e1ce8a4cc..aa53a902df5 100644 --- a/go/vt/proto/vttime/vttime_vtproto.pb.go +++ b/go/vt/proto/vttime/vttime_vtproto.pb.go @@ -1,11 +1,12 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 +// protoc-gen-go-vtproto version: v0.5.0 // source: vttime.proto package vttime import ( fmt "fmt" + proto "google.golang.org/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl" io "io" bits "math/bits" @@ -18,6 +19,44 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +func (m *Time) CloneVT() *Time { + if m == nil { + return (*Time)(nil) + } + r := &Time{ + Seconds: m.Seconds, + Nanoseconds: m.Nanoseconds, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Time) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Duration) CloneVT() *Duration { + if m == nil { + return (*Duration)(nil) + } + r := &Duration{ + Seconds: m.Seconds, + Nanos: m.Nanos, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Duration) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *Time) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil diff --git a/go/vt/schema/ddl_strategy.go b/go/vt/schema/ddl_strategy.go index 0a1ab8b5888..bc33c8cb3cf 100644 --- a/go/vt/schema/ddl_strategy.go +++ b/go/vt/schema/ddl_strategy.go @@ -20,6 +20,7 @@ import ( "fmt" "regexp" "strconv" + "strings" "time" "github.com/google/shlex" @@ -28,6 +29,7 @@ import ( var ( strategyParserRegexp = regexp.MustCompile(`^([\S]+)\s+(.*)$`) cutOverThresholdFlagRegexp = regexp.MustCompile(fmt.Sprintf(`^[-]{1,2}%s=(.*?)$`, cutOverThresholdFlag)) + retainArtifactsFlagRegexp = regexp.MustCompile(fmt.Sprintf(`^[-]{1,2}%s=(.*?)$`, retainArtifactsFlag)) ) const ( @@ -43,8 +45,10 @@ const ( preferInstantDDL = "prefer-instant-ddl" fastRangeRotationFlag = "fast-range-rotation" cutOverThresholdFlag = "cut-over-threshold" + retainArtifactsFlag = "retain-artifacts" vreplicationTestSuite = "vreplication-test-suite" allowForeignKeysFlag = "unsafe-allow-foreign-keys" + analyzeTableFlag = "analyze-table" ) // DDLStrategy suggests how an ALTER TABLE should run (e.g. "direct", "online", "gh-ost" or "pt-osc") @@ -109,6 +113,17 @@ func ParseDDLStrategy(strategyVariable string) (*DDLStrategySetting, error) { if _, err := setting.CutOverThreshold(); err != nil { return nil, err } + if _, err := setting.RetainArtifactsDuration(); err != nil { + return nil, err + } + + switch setting.Strategy { + case DDLStrategyVitess, DDLStrategyOnline, DDLStrategyMySQL, DDLStrategyDirect: + if opts := setting.RuntimeOptions(); len(opts) > 0 { + return nil, fmt.Errorf("invalid flags for %v strategy: %s", setting.Strategy, strings.Join(opts, " ")) + } + } + return setting, nil } @@ -193,7 +208,16 @@ func isCutOverThresholdFlag(opt string) (string, bool) { return submatch[1], true } -// CutOverThreshold returns a list of shards specified in '--shards=...', or an empty slice if unspecified +// isRetainArtifactsFlag returns true when given option denotes a `--retain-artifacts=[...]` flag +func isRetainArtifactsFlag(opt string) (string, bool) { + submatch := retainArtifactsFlagRegexp.FindStringSubmatch(opt) + if len(submatch) == 0 { + return "", false + } + return submatch[1], true +} + +// CutOverThreshold returns a the duration threshold indicated by --cut-over-threshold func (setting *DDLStrategySetting) CutOverThreshold() (d time.Duration, err error) { // We do some ugly manual parsing of --cut-over-threshold value opts, _ := shlex.Split(setting.Options) @@ -211,6 +235,24 @@ func (setting *DDLStrategySetting) CutOverThreshold() (d time.Duration, err erro return d, err } +// RetainArtifactsDuration returns a the duration indicated by --retain-artifacts +func (setting *DDLStrategySetting) RetainArtifactsDuration() (d time.Duration, err error) { + // We do some ugly manual parsing of --retain-artifacts + opts, _ := shlex.Split(setting.Options) + for _, opt := range opts { + if val, isRetainArtifacts := isRetainArtifactsFlag(opt); isRetainArtifacts { + // value is possibly quoted + if s, err := strconv.Unquote(val); err == nil { + val = s + } + if val != "" { + d, err = time.ParseDuration(val) + } + } + } + return d, err +} + // IsVreplicationTestSuite checks if strategy options include --vreplicatoin-test-suite func (setting *DDLStrategySetting) IsVreplicationTestSuite() bool { return setting.hasFlag(vreplicationTestSuite) @@ -221,6 +263,11 @@ func (setting *DDLStrategySetting) IsAllowForeignKeysFlag() bool { return setting.hasFlag(allowForeignKeysFlag) } +// IsAnalyzeTableFlag checks if strategy options include --analyze-table +func (setting *DDLStrategySetting) IsAnalyzeTableFlag() bool { + return setting.hasFlag(analyzeTableFlag) +} + // RuntimeOptions returns the options used as runtime flags for given strategy, removing any internal hint options func (setting *DDLStrategySetting) RuntimeOptions() []string { opts, _ := shlex.Split(setting.Options) @@ -229,6 +276,9 @@ func (setting *DDLStrategySetting) RuntimeOptions() []string { if _, ok := isCutOverThresholdFlag(opt); ok { continue } + if _, ok := isRetainArtifactsFlag(opt); ok { + continue + } switch { case isFlag(opt, declarativeFlag): case isFlag(opt, skipTopoFlag): @@ -243,6 +293,7 @@ func (setting *DDLStrategySetting) RuntimeOptions() []string { case isFlag(opt, fastRangeRotationFlag): case isFlag(opt, vreplicationTestSuite): case isFlag(opt, allowForeignKeysFlag): + case isFlag(opt, analyzeTableFlag): default: validOpts = append(validOpts, opt) } diff --git a/go/vt/schema/ddl_strategy_test.go b/go/vt/schema/ddl_strategy_test.go index 4ec1a1535ea..ba7d029b8b7 100644 --- a/go/vt/schema/ddl_strategy_test.go +++ b/go/vt/schema/ddl_strategy_test.go @@ -41,19 +41,23 @@ func TestIsDirect(t *testing.T) { func TestIsCutOverThresholdFlag(t *testing.T) { tt := []struct { - s string - expect bool - val string - d time.Duration + s string + expect bool + expectError string + val string + d time.Duration }{ { - s: "something", + s: "something", + expectError: "invalid flags", }, { - s: "-cut-over-threshold", + s: "-cut-over-threshold", + expectError: "invalid flags", }, { - s: "--cut-over-threshold", + s: "--cut-over-threshold", + expectError: "invalid flags", }, { s: "--cut-over-threshold=", @@ -87,9 +91,14 @@ func TestIsCutOverThresholdFlag(t *testing.T) { for _, ts := range tt { t.Run(ts.s, func(t *testing.T) { setting, err := ParseDDLStrategy("online " + ts.s) + if ts.expectError != "" { + assert.ErrorContains(t, err, ts.expectError) + return + } + assert.NoError(t, err) - val, isCutOver := isCutOverThresholdFlag((ts.s)) + val, isCutOver := isCutOverThresholdFlag(ts.s) assert.Equal(t, ts.expect, isCutOver) assert.Equal(t, ts.val, val) @@ -102,6 +111,77 @@ func TestIsCutOverThresholdFlag(t *testing.T) { } } +func TestIsExpireArtifactsFlag(t *testing.T) { + tt := []struct { + s string + expect bool + expectError string + val string + d time.Duration + }{ + { + s: "something", + expectError: "invalid flags", + }, + { + s: "-retain-artifacts", + expectError: "invalid flags", + }, + { + s: "--retain-artifacts", + expectError: "invalid flags", + }, + { + s: "--retain-artifacts=", + expect: true, + }, + { + s: "--retain-artifacts=0", + expect: true, + val: "0", + d: 0, + }, + { + s: "-retain-artifacts=0", + expect: true, + val: "0", + d: 0, + }, + { + s: "--retain-artifacts=1m", + expect: true, + val: "1m", + d: time.Minute, + }, + { + s: `--retain-artifacts="1m"`, + expect: true, + val: `"1m"`, + d: time.Minute, + }, + } + for _, ts := range tt { + t.Run(ts.s, func(t *testing.T) { + setting, err := ParseDDLStrategy("online " + ts.s) + if ts.expectError != "" { + assert.ErrorContains(t, err, ts.expectError) + return + } + assert.NoError(t, err) + + val, isRetainArtifacts := isRetainArtifactsFlag(ts.s) + assert.Equal(t, ts.expect, isRetainArtifacts) + assert.Equal(t, ts.val, val) + + if ts.expect { + d, err := setting.RetainArtifactsDuration() + assert.NoError(t, err) + assert.Equal(t, ts.d, d) + } + }) + } +} + func TestParseDDLStrategy(t *testing.T) { tt := []struct { strategyVariable string @@ -116,9 +196,11 @@ func TestParseDDLStrategy(t *testing.T) { fastOverRevertible bool fastRangeRotation bool allowForeignKeys bool + analyzeTable bool cutOverThreshold time.Duration + expireArtifacts time.Duration runtimeOptions string - err error + expectError string }{ { strategyVariable: "direct", @@ -238,10 +320,43 @@ func TestParseDDLStrategy(t *testing.T) { runtimeOptions: "", cutOverThreshold: 5 * time.Minute, }, + { + strategyVariable: "vitess --retain-artifacts=4m", + strategy: DDLStrategyVitess, + options: "--retain-artifacts=4m", + runtimeOptions: "", + expireArtifacts: 4 * time.Minute, + }, + { + strategyVariable: "vitess --analyze-table", + strategy: DDLStrategyVitess, + options: "--analyze-table", + runtimeOptions: "", + analyzeTable: true, + }, + + { + strategyVariable: "vitess --alow-concrrnt", // intentional typo + strategy: DDLStrategyVitess, + options: "", + runtimeOptions: "", + expectError: "invalid flags", + }, + { + strategyVariable: "vitess --declarative --max-load=Threads_running=100", + strategy: DDLStrategyVitess, + options: "--declarative --max-load=Threads_running=100", + runtimeOptions: "--max-load=Threads_running=100", + expectError: "invalid flags", + }, } for _, ts := range tt { t.Run(ts.strategyVariable, func(t *testing.T) { setting, err := ParseDDLStrategy(ts.strategyVariable) + if ts.expectError != "" { + assert.ErrorContains(t, err, ts.expectError) + return + } assert.NoError(t, err) assert.Equal(t, ts.strategy, setting.Strategy) assert.Equal(t, ts.options, setting.Options) @@ -253,6 +368,7 @@ func TestParseDDLStrategy(t *testing.T) { assert.Equal(t, ts.fastOverRevertible, setting.IsPreferInstantDDL()) assert.Equal(t, ts.fastRangeRotation, setting.IsFastRangeRotationFlag()) assert.Equal(t, ts.allowForeignKeys, setting.IsAllowForeignKeysFlag()) + assert.Equal(t, ts.analyzeTable, setting.IsAnalyzeTableFlag()) cutOverThreshold, err := setting.CutOverThreshold() assert.NoError(t, err) assert.Equal(t, ts.cutOverThreshold, cutOverThreshold) @@ -273,4 +389,8 @@ func TestParseDDLStrategy(t *testing.T) { _, err := ParseDDLStrategy("online --cut-over-threshold=3") assert.Error(t, err) } + { + _, err := ParseDDLStrategy("online --retain-artifacts=3") + assert.Error(t, err) + } } diff --git a/go/vt/schema/online_ddl.go b/go/vt/schema/online_ddl.go index 71f07add0c2..a06866e996a 100644 --- a/go/vt/schema/online_ddl.go +++ b/go/vt/schema/online_ddl.go @@ -34,6 +34,7 @@ var ( onlineDdlUUIDRegexp = regexp.MustCompile(`^[0-f]{8}_[0-f]{4}_[0-f]{4}_[0-f]{4}_[0-f]{12}$`) onlineDDLGeneratedTableNameRegexp = regexp.MustCompile(`^_[0-f]{8}_[0-f]{4}_[0-f]{4}_[0-f]{4}_[0-f]{12}_([0-9]{14})_(gho|ghc|del|new|vrepl)$`) ptOSCGeneratedTableNameRegexp = regexp.MustCompile(`^_.*_old$`) + migrationContextValidatorRegexp = regexp.MustCompile(`^[\w:-]*$`) ) var ( @@ -52,6 +53,14 @@ const ( RevertActionStr = "revert" ) +// ValidateMigrationContext validates that the given migration context only uses valid characters +func ValidateMigrationContext(migrationContext string) error { + if migrationContextValidatorRegexp.MatchString(migrationContext) { + return nil + } + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid characters in migration_context %v. Use alphanumeric, dash, underscore and colon only", migrationContext) +} + // when validateWalk returns true, then the child nodes are also visited func validateWalk(node sqlparser.SQLNode, allowForeignKeys bool) (kontinue bool, err error) { switch node.(type) { diff --git a/go/vt/schema/online_ddl_test.go b/go/vt/schema/online_ddl_test.go index c559c1e75f1..dbcad5454dc 100644 --- a/go/vt/schema/online_ddl_test.go +++ b/go/vt/schema/online_ddl_test.go @@ -322,14 +322,6 @@ func TestNewOnlineDDLs(t *testing.T) { } func TestNewOnlineDDLsForeignKeys(t *testing.T) { - type expect struct { - sqls []string - notDDL bool - parseError bool - isError bool - expectErrorText string - isView bool - } queries := []string{ "alter table corder add FOREIGN KEY my_fk(customer_id) references customer(customer_id)", "create table t1 (id int primary key, i int, foreign key (i) references parent(id))", @@ -400,3 +392,30 @@ func TestOnlineDDLFromCommentedStatement(t *testing.T) { }) } } + +func TestValidateMigrationContext(t *testing.T) { + tcases := []struct { + m string + expectError bool + }{ + {"", false}, + {"abc", false}, + {"abc-def", false}, + {"abc-DEF", false}, + {"abc-def-123", false}, + {"under_score:abc-DEF-123", false}, + {"~", true}, + {",", true}, + {"abc^def", true}, + } + for _, tcase := range tcases { + t.Run(tcase.m, func(t *testing.T) { + err := ValidateMigrationContext(tcase.m) + if tcase.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} diff --git a/go/vt/schemadiff/diff_test.go b/go/vt/schemadiff/diff_test.go index 57d626db493..2f8d913f042 100644 --- a/go/vt/schemadiff/diff_test.go +++ b/go/vt/schemadiff/diff_test.go @@ -777,6 +777,22 @@ func TestDiffSchemas(t *testing.T) { "CREATE TABLE `t4` (\n\t`id` int,\n\tPRIMARY KEY (`id`)\n)", }, }, + { + // Making sure schemadiff distinguishes between VIEWs with different casing + name: "case insensitive views", + from: "create view v1 as select * from t; create table t(id int primary key); create view V1 as select * from t", + to: "", + diffs: []string{ + "drop view v1", + "drop view V1", + "drop table t", + }, + cdiffs: []string{ + "DROP VIEW `v1`", + "DROP VIEW `V1`", + "DROP TABLE `t`", + }, + }, } for _, ts := range tt { t.Run(ts.name, func(t *testing.T) { diff --git a/go/vt/schemadiff/schema_diff_test.go b/go/vt/schemadiff/schema_diff_test.go index 670e84c6f1a..f6477c1885f 100644 --- a/go/vt/schemadiff/schema_diff_test.go +++ b/go/vt/schemadiff/schema_diff_test.go @@ -649,6 +649,20 @@ func TestSchemaDiff(t *testing.T) { sequential: true, conflictingDiffs: 2, }, + { + name: "two identical foreign keys in table, drop one", + fromQueries: []string{ + "create table parent (id int primary key)", + "create table t1 (id int primary key, i int, key i_idex (i), constraint f1 foreign key (i) references parent(id), constraint f2 foreign key (i) references parent(id))", + }, + toQueries: []string{ + "create table parent (id int primary key)", + "create table t1 (id int primary key, i int, key i_idex (i), constraint f1 foreign key (i) references parent(id))", + }, + expectDiffs: 1, + expectDeps: 0, + entityOrder: []string{"t1"}, + }, } hints := &DiffHints{RangeRotationStrategy: RangeRotationDistinctStatements} for _, tc := range tt { diff --git a/go/vt/schemadiff/schema_test.go b/go/vt/schemadiff/schema_test.go index 1fe6b0f2d86..79bf44117e2 100644 --- a/go/vt/schemadiff/schema_test.go +++ b/go/vt/schemadiff/schema_test.go @@ -17,9 +17,12 @@ limitations under the License. package schemadiff import ( + "fmt" + "math/rand" "sort" "strings" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -628,7 +631,7 @@ func TestViewReferences(t *testing.T) { "create table t2(id int primary key, n int, info int)", "create view v1 as select id, c as ch from t1 where id > 0", "create view v2 as select n as num, info from t2", - "create view v3 as select num, v1.id, ch from v1 join v2 using (id) where info > 5", + "create view v3 as select num, v1.id, ch from v1 join v2 on v1.id = v2.num where info > 5", }, }, { @@ -711,3 +714,112 @@ func TestViewReferences(t *testing.T) { }) } } + +// TestMassiveSchema loads thousands of tables into one schema, and thousands of tables, some of which are different, into another schema. +// It compares the two shemas. +// The objective of this test is to verify that execution time is _reasonable_. Since this will run in GitHub CI, which is very slow, we allow +// for 1 minute total for all operations. +func TestMassiveSchema(t *testing.T) { + tableBase := ` + CREATE TABLE IF NOT EXISTS placeholder + ( + id int NOT NULL AUTO_INCREMENT, + workflow varbinary(1000) DEFAULT NULL, + source mediumblob NOT NULL, + pos varbinary(10000) NOT NULL, + stop_pos varbinary(10000) DEFAULT NULL, + max_tps bigint NOT NULL, + max_replication_lag bigint NOT NULL, + cell varbinary(1000) DEFAULT NULL, + tablet_types varbinary(100) DEFAULT NULL, + time_updated bigint NOT NULL, + transaction_timestamp bigint NOT NULL, + state varbinary(100) NOT NULL, + message varbinary(1000) DEFAULT NULL, + db_name varbinary(255) NOT NULL, + rows_copied bigint NOT NULL DEFAULT '0', + tags varbinary(1024) NOT NULL DEFAULT '', + time_heartbeat bigint NOT NULL DEFAULT '0', + workflow_type int NOT NULL DEFAULT '0', + time_throttled bigint NOT NULL DEFAULT '0', + component_throttled varchar(255) NOT NULL DEFAULT '', + workflow_sub_type int NOT NULL DEFAULT '0', + defer_secondary_keys tinyint(1) NOT NULL DEFAULT '0', + PRIMARY KEY (id), + KEY workflow_idx (workflow(64)), + KEY time_heartbeat_idx (time_heartbeat) + ) ENGINE = InnoDB + ` + // Remove a couple columns into a modified table + modifiedTable := tableBase + for _, s := range []string{ + "workflow varbinary(1000) DEFAULT NULL,\n", + "KEY workflow_idx (workflow(64)),\n", + } { + require.Contains(t, tableBase, s) + modifiedTable = strings.Replace(modifiedTable, s, "", -1) + } + require.NotEqual(t, tableBase, modifiedTable) + + var schema0 *Schema + var schema1 *Schema + var err error + numTables := 8192 + modifyTables := 500 + countModifiedTables := 0 + tableNames := map[string]bool{} + + startTime := time.Now() + + // Load thousands of tables into each schema + t.Run(fmt.Sprintf("load %d tables into schemas", numTables), func(t *testing.T) { + modifiedTableIndexes := map[int]bool{} + for i, index := range rand.Perm(numTables) { + if i >= modifyTables { + break + } + modifiedTableIndexes[index] = true + } + queries0 := make([]string, 0, numTables) // to be loaded into schema0 + queries1 := make([]string, 0, numTables) // to be loaded into schema1 + for i := 0; i < numTables; i++ { + tableName := fmt.Sprintf("tbl_%05d", i) + query := strings.Replace(tableBase, "placeholder", tableName, -1) + queries0 = append(queries0, query) + if modifiedTableIndexes[i] { + // Some tables in schema1 are changed + query = strings.Replace(modifiedTable, "placeholder", tableName, -1) + countModifiedTables++ + } + queries1 = append(queries1, query) + tableNames[tableName] = true + } + schema0, err = NewSchemaFromQueries(queries0) + require.NoError(t, err) + schema1, err = NewSchemaFromQueries(queries1) + require.NoError(t, err) + + require.Equal(t, countModifiedTables, modifyTables) + }) + t.Run(fmt.Sprintf("validate loaded %d tables", numTables), func(t *testing.T) { + for _, schema := range []*Schema{schema0, schema1} { + entities := schema.Entities() + assert.Equal(t, numTables, len(entities)) // all tables are there + for _, e := range entities { + _, ok := tableNames[e.Name()] + assert.True(t, ok) + } + } + }) + + t.Run("evaluating diff", func(t *testing.T) { + schemaDiff, err := schema0.SchemaDiff(schema1, &DiffHints{}) + require.NoError(t, err) + diffs := schemaDiff.UnorderedDiffs() + require.NotEmpty(t, diffs) + require.Equal(t, len(diffs), countModifiedTables) + }) + + elapsed := time.Since(startTime) + assert.Less(t, elapsed, time.Minute) +} diff --git a/go/vt/schemadiff/table.go b/go/vt/schemadiff/table.go index 56ee960c267..b24184fe487 100644 --- a/go/vt/schemadiff/table.go +++ b/go/vt/schemadiff/table.go @@ -25,6 +25,8 @@ import ( golcs "github.com/yudai/golcs" + "vitess.io/vitess/go/mysql/collations/colldata" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/sqlparser" ) @@ -392,7 +394,7 @@ const mysqlCollationVersion = "8.0.0" var collationEnv = collations.NewEnvironment(mysqlCollationVersion) func defaultCharset() string { - collation := collations.ID(collationEnv.DefaultConnectionCharset()).Get() + collation := colldata.Lookup(collations.ID(collationEnv.DefaultConnectionCharset())) if collation == nil { return "" } @@ -401,10 +403,10 @@ func defaultCharset() string { func defaultCharsetCollation(charset string) string { collation := collationEnv.DefaultCollationForCharset(charset) - if collation == nil { + if collation == collations.Unknown { return "" } - return collation.Name() + return collationEnv.LookupName(collation) } func (c *CreateTableEntity) normalizeColumnOptions() { @@ -457,6 +459,7 @@ func (c *CreateTableEntity) normalizeColumnOptions() { // See also https://dev.mysql.com/doc/refman/8.0/en/data-type-defaults.html if _, ok := col.Type.Options.Default.(*sqlparser.NullVal); ok { col.Type.Options.Default = nil + col.Type.Options.DefaultLiteral = false } } @@ -507,6 +510,7 @@ func (c *CreateTableEntity) normalizeColumnOptions() { Type: sqlparser.StrVal, Val: defaultVal, } + col.Type.Options.DefaultLiteral = true } else { col.Type.Options.Default = nil } @@ -1281,11 +1285,14 @@ func (c *CreateTableEntity) diffConstraints(alterTable *sqlparser.AlterTable, } t1ConstraintsMap := map[string]*sqlparser.ConstraintDefinition{} t2ConstraintsMap := map[string]*sqlparser.ConstraintDefinition{} + t2ConstraintsCountMap := map[string]int{} for _, constraint := range t1Constraints { t1ConstraintsMap[normalizeConstraintName(t1Name, constraint)] = constraint } for _, constraint := range t2Constraints { - t2ConstraintsMap[normalizeConstraintName(t2Name, constraint)] = constraint + constraintName := normalizeConstraintName(t2Name, constraint) + t2ConstraintsMap[constraintName] = constraint + t2ConstraintsCountMap[constraintName]++ } dropConstraintStatement := func(constraint *sqlparser.ConstraintDefinition) *sqlparser.DropKey { @@ -1298,12 +1305,22 @@ func (c *CreateTableEntity) diffConstraints(alterTable *sqlparser.AlterTable, // evaluate dropped constraints // for _, t1Constraint := range t1Constraints { - if _, ok := t2ConstraintsMap[normalizeConstraintName(t1Name, t1Constraint)]; !ok { + // Due to how we normalize the constraint string (e.g. in ConstraintNamesIgnoreAll we + // completely discard the constraint name), it's possible to have multiple constraints under + // the same string. Effectively, this means the schema design has duplicate/redundant constraints, + // which of course is poor design -- but still valid. + // To deal with dropping constraints, we need to not only account for the _existence_ of a constraint, + // but also to _how many times_ it appears. + constraintName := normalizeConstraintName(t1Name, t1Constraint) + if t2ConstraintsCountMap[constraintName] == 0 { // constraint exists in t1 but not in t2, hence it is dropped dropConstraint := dropConstraintStatement(t1Constraint) alterTable.AlterOptions = append(alterTable.AlterOptions, dropConstraint) + } else { + t2ConstraintsCountMap[constraintName]-- } } + // t2ConstraintsCountMap should not be used henceforth. for _, t2Constraint := range t2Constraints { normalizedT2ConstraintName := normalizeConstraintName(t2Name, t2Constraint) @@ -2046,8 +2063,10 @@ func (c *CreateTableEntity) apply(diff *AlterTableEntityDiff) error { found = true if opt.DropDefault { col.Type.Options.Default = nil + col.Type.Options.DefaultLiteral = false } else if opt.DefaultVal != nil { col.Type.Options.Default = opt.DefaultVal + col.Type.Options.DefaultLiteral = opt.DefaultLiteral } col.Type.Options.Invisible = opt.Invisible break diff --git a/go/vt/schemadiff/table_test.go b/go/vt/schemadiff/table_test.go index 633fdc9a5d6..4d41d9584c0 100644 --- a/go/vt/schemadiff/table_test.go +++ b/go/vt/schemadiff/table_test.go @@ -607,6 +607,14 @@ func TestCreateTableDiff(t *testing.T) { cdiff: "ALTER TABLE `t1` DROP CHECK `check3`", constraint: ConstraintNamesIgnoreAll, }, + { + name: "check constraints, remove duplicate", + from: "create table t1 (id int primary key, i int, constraint `chk_123abc` CHECK ((`i` > 2)), constraint `check3` CHECK ((`i` > 2)), constraint `chk_789def` CHECK ((`i` < 5)))", + to: "create table t2 (id int primary key, i int, constraint `chk_123abc` CHECK ((`i` > 2)), constraint `chk_789def` CHECK ((`i` < 5)))", + diff: "alter table t1 drop check check3", + cdiff: "ALTER TABLE `t1` DROP CHECK `check3`", + constraint: ConstraintNamesIgnoreAll, + }, { name: "check constraints, remove, ignore vitess, no match", from: "create table t1 (id int primary key, i int, constraint `chk_123abc` CHECK ((`i` > 2)), constraint `check3` CHECK ((`i` != 3)), constraint `chk_789def` CHECK ((`i` < 5)))", @@ -658,6 +666,37 @@ func TestCreateTableDiff(t *testing.T) { from: "create table t1 (id int primary key, i int, constraint f foreign key (i) references parent(id) on delete cascade)", to: "create table t2 (id int primary key, i int, constraint f foreign key (i) references parent(id) on delete cascade)", }, + { + name: "two identical foreign keys, dropping one", + from: "create table t1 (id int primary key, i int, key i_idex (i), constraint f1 foreign key (i) references parent(id), constraint f2 foreign key (i) references parent(id))", + to: "create table t2 (id int primary key, i int, key i_idex (i), constraint f1 foreign key (i) references parent(id))", + diff: "alter table t1 drop foreign key f2", + cdiff: "ALTER TABLE `t1` DROP FOREIGN KEY `f2`", + }, + { + name: "two identical foreign keys, dropping one, ignore vitess names", + from: "create table t1 (id int primary key, i int, key i_idex (i), constraint f1 foreign key (i) references parent(id), constraint f2 foreign key (i) references parent(id))", + to: "create table t2 (id int primary key, i int, key i_idex (i), constraint f1 foreign key (i) references parent(id))", + diff: "alter table t1 drop foreign key f2", + cdiff: "ALTER TABLE `t1` DROP FOREIGN KEY `f2`", + constraint: ConstraintNamesIgnoreVitess, + }, + { + name: "two identical foreign keys, dropping one, ignore all names", + from: "create table t1 (id int primary key, i int, key i_idex (i), constraint f1 foreign key (i) references parent(id), constraint f2 foreign key (i) references parent(id))", + to: "create table t2 (id int primary key, i int, key i_idex (i), constraint f1 foreign key (i) references parent(id))", + diff: "alter table t1 drop foreign key f2", + cdiff: "ALTER TABLE `t1` DROP FOREIGN KEY `f2`", + constraint: ConstraintNamesIgnoreAll, + }, + { + name: "add two identical foreign key constraints, ignore all names", + from: "create table t1 (id int primary key, i int, key i_idex (i))", + to: "create table t2 (id int primary key, i int, key i_idex (i), constraint f1 foreign key (i) references parent(id), constraint f2 foreign key (i) references parent(id))", + diff: "alter table t1 add constraint f1 foreign key (i) references parent (id), add constraint f2 foreign key (i) references parent (id)", + cdiff: "ALTER TABLE `t1` ADD CONSTRAINT `f1` FOREIGN KEY (`i`) REFERENCES `parent` (`id`), ADD CONSTRAINT `f2` FOREIGN KEY (`i`) REFERENCES `parent` (`id`)", + constraint: ConstraintNamesIgnoreAll, + }, { name: "implicit foreign key indexes", from: "create table t1 (id int primary key, i int, key f(i), constraint f foreign key (i) references parent(id) on delete cascade)", diff --git a/go/vt/schemamanager/schemamanager_test.go b/go/vt/schemamanager/schemamanager_test.go index 3c2f9206f1e..e568b286cd7 100644 --- a/go/vt/schemamanager/schemamanager_test.go +++ b/go/vt/schemamanager/schemamanager_test.go @@ -23,6 +23,8 @@ import ( "strings" "testing" + "github.com/stretchr/testify/assert" + "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl/tmutils" "vitess.io/vitess/go/vt/topo" @@ -92,7 +94,7 @@ func TestSchemaManagerExecutorOpenFail(t *testing.T) { controller := newFakeController( []string{"create table test_table (pk int);"}, false, false, false) controller.SetKeyspace("unknown_keyspace") - executor := NewTabletExecutor("TestSchemaManagerExecutorOpenFail", newFakeTopo(t), newFakeTabletManagerClient(), logutil.NewConsoleLogger(), testWaitReplicasTimeout) + executor := NewTabletExecutor("TestSchemaManagerExecutorOpenFail", newFakeTopo(t), newFakeTabletManagerClient(), logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0) ctx := context.Background() _, err := Run(ctx, controller, executor) @@ -101,22 +103,62 @@ func TestSchemaManagerExecutorOpenFail(t *testing.T) { } } -func TestSchemaManagerExecutorExecuteFail(t *testing.T) { - controller := newFakeController( - []string{"create table test_table (pk int);"}, false, false, false) - executor := NewTabletExecutor("TestSchemaManagerExecutorExecuteFail", newFakeTopo(t), newFakeTabletManagerClient(), logutil.NewConsoleLogger(), testWaitReplicasTimeout) - ctx := context.Background() +func TestSchemaManagerRun(t *testing.T) { + for _, batchSize := range []int{0, 1, 10} { + t.Run(fmt.Sprintf("batch-size=%d", batchSize), func(t *testing.T) { + sql := "create table test_table (pk int)" + controller := newFakeController( + []string{sql}, false, false, false) + fakeTmc := newFakeTabletManagerClient() + fakeTmc.AddSchemaChange(sql, &tabletmanagerdatapb.SchemaChangeResult{ + BeforeSchema: &tabletmanagerdatapb.SchemaDefinition{}, + AfterSchema: &tabletmanagerdatapb.SchemaDefinition{ + DatabaseSchema: "CREATE DATABASE `{{.DatabaseName}}` /*!40100 DEFAULT CHARACTER SET utf8 */", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "test_table", + Schema: sql, + Type: tmutils.TableBaseTable, + }, + }, + }, + }) - _, err := Run(ctx, controller, executor) - if err == nil || !strings.Contains(err.Error(), "unknown database: test_keyspace") { - t.Fatalf("run schema change should fail due to executor.Execute fail, but got: %v", err) + fakeTmc.AddSchemaDefinition("vt_test_keyspace", &tabletmanagerdatapb.SchemaDefinition{}) + executor := NewTabletExecutor("TestSchemaManagerRun", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0) + + ctx := context.Background() + resp, err := Run(ctx, controller, executor) + + if len(resp.UUIDs) > 0 { + t.Fatalf("response should contain an empty list of UUIDs, found %v", len(resp.UUIDs)) + } + + if err != nil { + t.Fatalf("schema change should success but get error: %v", err) + } + if !controller.onReadSuccessTriggered { + t.Fatalf("OnReadSuccess should be called") + } + if controller.onReadFailTriggered { + t.Fatalf("OnReadFail should not be called") + } + if !controller.onValidationSuccessTriggered { + t.Fatalf("OnValidateSuccess should be called") + } + if controller.onValidationFailTriggered { + t.Fatalf("OnValidationFail should not be called") + } + if !controller.onExecutorCompleteTriggered { + t.Fatalf("OnExecutorComplete should be called") + } + }) } } -func TestSchemaManagerRun(t *testing.T) { +func TestSchemaManagerExecutorFail(t *testing.T) { sql := "create table test_table (pk int)" - controller := newFakeController( - []string{sql}, false, false, false) + controller := newFakeController([]string{sql}, false, false, false) fakeTmc := newFakeTabletManagerClient() fakeTmc.AddSchemaChange(sql, &tabletmanagerdatapb.SchemaChangeResult{ BeforeSchema: &tabletmanagerdatapb.SchemaDefinition{}, @@ -133,66 +175,67 @@ func TestSchemaManagerRun(t *testing.T) { }) fakeTmc.AddSchemaDefinition(topoproto.VtDbPrefix+"test_keyspace", &tabletmanagerdatapb.SchemaDefinition{}) - executor := NewTabletExecutor("TestSchemaManagerRun", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout) + fakeTmc.EnableExecuteFetchAsDbaError = true + executor := NewTabletExecutor("TestSchemaManagerExecutorFail", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0) ctx := context.Background() resp, err := Run(ctx, controller, executor) - if len(resp.UUIDs) > 0 { t.Fatalf("response should contain an empty list of UUIDs, found %v", len(resp.UUIDs)) } - if err != nil { - t.Fatalf("schema change should success but get error: %v", err) - } - if !controller.onReadSuccessTriggered { - t.Fatalf("OnReadSuccess should be called") - } - if controller.onReadFailTriggered { - t.Fatalf("OnReadFail should not be called") - } - if !controller.onValidationSuccessTriggered { - t.Fatalf("OnValidateSuccess should be called") - } - if controller.onValidationFailTriggered { - t.Fatalf("OnValidationFail should not be called") - } - if !controller.onExecutorCompleteTriggered { - t.Fatalf("OnExecutorComplete should be called") + if err == nil || !strings.Contains(err.Error(), "schema change failed") { + t.Fatalf("schema change should fail, but got err: %v", err) } } -func TestSchemaManagerExecutorFail(t *testing.T) { +func TestSchemaManagerExecutorBatchVsStrategyFail(t *testing.T) { sql := "create table test_table (pk int)" controller := newFakeController([]string{sql}, false, false, false) fakeTmc := newFakeTabletManagerClient() - fakeTmc.AddSchemaChange(sql, &tabletmanagerdatapb.SchemaChangeResult{ - BeforeSchema: &tabletmanagerdatapb.SchemaDefinition{}, - AfterSchema: &tabletmanagerdatapb.SchemaDefinition{ - DatabaseSchema: "CREATE DATABASE `{{.DatabaseName}}` /*!40100 DEFAULT CHARACTER SET utf8 */", - TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ - { - Name: "test_table", - Schema: sql, - Type: tmutils.TableBaseTable, - }, - }, - }, - }) fakeTmc.AddSchemaDefinition(topoproto.VtDbPrefix+"test_keyspace", &tabletmanagerdatapb.SchemaDefinition{}) fakeTmc.EnableExecuteFetchAsDbaError = true - executor := NewTabletExecutor("TestSchemaManagerExecutorFail", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout) + executor := NewTabletExecutor("TestSchemaManagerExecutorFail", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 10) + executor.SetDDLStrategy("online") ctx := context.Background() - resp, err := Run(ctx, controller, executor) - if len(resp.UUIDs) > 0 { - t.Fatalf("response should contain an empty list of UUIDs, found %v", len(resp.UUIDs)) - } + _, err := Run(ctx, controller, executor) - if err == nil || !strings.Contains(err.Error(), "schema change failed") { - t.Fatalf("schema change should fail, but got err: %v", err) - } + assert.ErrorContains(t, err, "--batch-size requires 'direct'") +} + +func TestSchemaManagerExecutorBatchVsQueriesFail(t *testing.T) { + sql := "alter table test_table force" + controller := newFakeController([]string{sql}, false, false, false) + fakeTmc := newFakeTabletManagerClient() + + fakeTmc.AddSchemaDefinition("vt_test_keyspace", &tabletmanagerdatapb.SchemaDefinition{}) + fakeTmc.EnableExecuteFetchAsDbaError = true + executor := NewTabletExecutor("TestSchemaManagerExecutorFail", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 10) + executor.SetDDLStrategy("direct") + + ctx := context.Background() + _, err := Run(ctx, controller, executor) + + assert.ErrorContains(t, err, "--batch-size only allowed when all queries are CREATE") +} + +func TestSchemaManagerExecutorBatchVsUUIDsFail(t *testing.T) { + sql := "create table test_table (pk int)" + controller := newFakeController([]string{sql}, false, false, false) + fakeTmc := newFakeTabletManagerClient() + + fakeTmc.AddSchemaDefinition("vt_test_keyspace", &tabletmanagerdatapb.SchemaDefinition{}) + fakeTmc.EnableExecuteFetchAsDbaError = true + executor := NewTabletExecutor("TestSchemaManagerExecutorFail", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 10) + executor.SetDDLStrategy("direct") + executor.SetUUIDList([]string{"4e5dcf80_354b_11eb_82cd_f875a4d24e90"}) + + ctx := context.Background() + _, err := Run(ctx, controller, executor) + + assert.ErrorContains(t, err, "--batch-size conflicts with --uuid-list") } func TestSchemaManagerRegisterControllerFactory(t *testing.T) { @@ -201,7 +244,6 @@ func TestSchemaManagerRegisterControllerFactory(t *testing.T) { "test_controller", func(params map[string]string) (Controller, error) { return newFakeController([]string{sql}, false, false, false), nil - }) _, err := GetControllerFactory("unknown") @@ -229,7 +271,7 @@ func TestSchemaManagerRegisterControllerFactory(t *testing.T) { } func newFakeExecutor(t *testing.T) *TabletExecutor { - return NewTabletExecutor("newFakeExecutor", newFakeTopo(t), newFakeTabletManagerClient(), logutil.NewConsoleLogger(), testWaitReplicasTimeout) + return NewTabletExecutor("newFakeExecutor", newFakeTopo(t), newFakeTabletManagerClient(), logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0) } func newFakeTabletManagerClient() *fakeTabletManagerClient { @@ -289,8 +331,9 @@ func (client *fakeTabletManagerClient) ExecuteFetchAsDba(ctx context.Context, ta // - 3 shards named '1', '2', '3'. // - A primary tablet for each shard. func newFakeTopo(t *testing.T) *topo.Server { - ts := memorytopo.NewServer("test_cell") - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "test_cell") if err := ts.CreateKeyspace(ctx, "test_keyspace", &topodatapb.Keyspace{}); err != nil { t.Fatalf("CreateKeyspace failed: %v", err) } diff --git a/go/vt/schemamanager/tablet_executor.go b/go/vt/schemamanager/tablet_executor.go index 3ca154b77b4..a56a95d5034 100644 --- a/go/vt/schemamanager/tablet_executor.go +++ b/go/vt/schemamanager/tablet_executor.go @@ -19,6 +19,7 @@ package schemamanager import ( "context" "fmt" + "strings" "sync" "time" @@ -36,48 +37,37 @@ import ( querypb "vitess.io/vitess/go/vt/proto/query" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/proto/vtrpc" ) // TabletExecutor applies schema changes to all tablets. type TabletExecutor struct { - migrationContext string - ts *topo.Server - tmc tmclient.TabletManagerClient - logger logutil.Logger - tablets []*topodatapb.Tablet - isClosed bool - allowBigSchemaChange bool - keyspace string - waitReplicasTimeout time.Duration - ddlStrategySetting *schema.DDLStrategySetting - uuids []string + migrationContext string + ts *topo.Server + tmc tmclient.TabletManagerClient + logger logutil.Logger + tablets []*topodatapb.Tablet + isClosed bool + keyspace string + waitReplicasTimeout time.Duration + ddlStrategySetting *schema.DDLStrategySetting + uuids []string + batchSize int64 } // NewTabletExecutor creates a new TabletExecutor instance -func NewTabletExecutor(migrationContext string, ts *topo.Server, tmc tmclient.TabletManagerClient, logger logutil.Logger, waitReplicasTimeout time.Duration) *TabletExecutor { +func NewTabletExecutor(migrationContext string, ts *topo.Server, tmc tmclient.TabletManagerClient, logger logutil.Logger, waitReplicasTimeout time.Duration, batchSize int64) *TabletExecutor { return &TabletExecutor{ - ts: ts, - tmc: tmc, - logger: logger, - isClosed: true, - allowBigSchemaChange: false, - waitReplicasTimeout: waitReplicasTimeout, - migrationContext: migrationContext, + ts: ts, + tmc: tmc, + logger: logger, + isClosed: true, + waitReplicasTimeout: waitReplicasTimeout, + migrationContext: migrationContext, + batchSize: batchSize, } } -// AllowBigSchemaChange changes TabletExecutor such that big schema changes -// will no longer be rejected. -func (exec *TabletExecutor) AllowBigSchemaChange() { - exec.allowBigSchemaChange = true -} - -// DisallowBigSchemaChange enables the check for big schema changes such that -// TabletExecutor will reject these. -func (exec *TabletExecutor) DisallowBigSchemaChange() { - exec.allowBigSchemaChange = false -} - // SetDDLStrategy applies ddl_strategy from command line flags func (exec *TabletExecutor) SetDDLStrategy(ddlStrategy string) error { ddlStrategySetting, err := schema.ParseDDLStrategy(ddlStrategy) @@ -147,58 +137,49 @@ func (exec *TabletExecutor) Validate(ctx context.Context, sqls []string) error { if exec.isClosed { return fmt.Errorf("executor is closed") } - - // We ignore DATABASE-level DDLs here because detectBigSchemaChanges doesn't - // look at them anyway. - parsedDDLs, _, _, _, err := exec.parseDDLs(sqls) - if err != nil { + if err := exec.parseDDLs(sqls); err != nil { return err } - bigSchemaChange, err := exec.detectBigSchemaChanges(ctx, parsedDDLs) - if bigSchemaChange && exec.allowBigSchemaChange { - exec.logger.Warningf("Processing big schema change. This may cause visible MySQL downtime.") - return nil - } - return err + return nil } -func (exec *TabletExecutor) parseDDLs(sqls []string) ([]sqlparser.DDLStatement, []sqlparser.DBDDLStatement, [](*sqlparser.RevertMigration), [](*sqlparser.AlterMigration), error) { - parsedDDLs := make([]sqlparser.DDLStatement, 0) - parsedDBDDLs := make([]sqlparser.DBDDLStatement, 0) - revertStatements := make([](*sqlparser.RevertMigration), 0) - alterMigrationStatements := make([](*sqlparser.AlterMigration), 0) +func (exec *TabletExecutor) parseDDLs(sqls []string) error { for _, sql := range sqls { stmt, err := sqlparser.Parse(sql) if err != nil { - return nil, nil, nil, nil, fmt.Errorf("failed to parse sql: %s, got error: %v", sql, err) + return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "failed to parse sql: %s, got error: %v", sql, err) } - switch stmt := stmt.(type) { + switch stmt.(type) { case sqlparser.DDLStatement: - parsedDDLs = append(parsedDDLs, stmt) case sqlparser.DBDDLStatement: - parsedDBDDLs = append(parsedDBDDLs, stmt) case *sqlparser.RevertMigration: - revertStatements = append(revertStatements, stmt) case *sqlparser.AlterMigration: - alterMigrationStatements = append(alterMigrationStatements, stmt) default: if len(exec.tablets) != 1 { - return nil, nil, nil, nil, fmt.Errorf("non-ddl statements can only be executed for single shard keyspaces: %s", sql) + return vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "non-ddl statements can only be executed for single shard keyspaces: %s", sql) } } } - return parsedDDLs, parsedDBDDLs, revertStatements, alterMigrationStatements, nil + return nil +} + +// isDirectStrategy returns 'true' when the ddl_strategy configuration implies 'direct' +func (exec *TabletExecutor) isDirectStrategy() (isDirect bool) { + if exec.ddlStrategySetting == nil { + return true + } + if exec.ddlStrategySetting.Strategy.IsDirect() { + return true + } + return false } // IsOnlineSchemaDDL returns true if we expect to run a online schema change DDL func (exec *TabletExecutor) isOnlineSchemaDDL(stmt sqlparser.Statement) (isOnline bool) { switch stmt := stmt.(type) { case sqlparser.DDLStatement: - if exec.ddlStrategySetting == nil { - return false - } - if exec.ddlStrategySetting.Strategy.IsDirect() { + if exec.isDirectStrategy() { return false } switch stmt.GetAction() { @@ -211,62 +192,18 @@ func (exec *TabletExecutor) isOnlineSchemaDDL(stmt sqlparser.Statement) (isOnlin return false } -// a schema change that satisfies any following condition is considered -// to be a big schema change and will be rejected. -// 1. Alter more than 100,000 rows. -// 2. Change a table with more than 2,000,000 rows (Drops are fine). -func (exec *TabletExecutor) detectBigSchemaChanges(ctx context.Context, parsedDDLs []sqlparser.DDLStatement) (bool, error) { - // We want to avoid any overhead if possible. If all DDLs are online schema changes, then we want to - // skip GetSchema altogether. - foundAnyNonOnlineDDL := false - for _, ddl := range parsedDDLs { - if !exec.isOnlineSchemaDDL(ddl) { - foundAnyNonOnlineDDL = true - } - } - if !foundAnyNonOnlineDDL { - return false, nil - } - // exec.tablets is guaranteed to have at least one element; - // Otherwise, Open should fail and executor should fail. - primaryTabletInfo := exec.tablets[0] - // get database schema, excluding views. - req := &tabletmanagerdatapb.GetSchemaRequest{Tables: []string{}, ExcludeTables: []string{}, TableSchemaOnly: true} - dbSchema, err := exec.tmc.GetSchema(ctx, primaryTabletInfo, req) - if err != nil { - return false, fmt.Errorf("unable to get database schema, error: %v", err) - } - tableWithCount := make(map[string]uint64, len(dbSchema.TableDefinitions)) - for _, tableSchema := range dbSchema.TableDefinitions { - tableWithCount[tableSchema.Name] = tableSchema.RowCount - } - for _, ddl := range parsedDDLs { - if exec.isOnlineSchemaDDL(ddl) { - // Since this is an online schema change, there is no need to worry about big changes - continue - } - switch ddl.GetAction() { - case sqlparser.DropDDLAction, sqlparser.CreateDDLAction, sqlparser.TruncateDDLAction, sqlparser.RenameDDLAction: - continue - } - tableName := ddl.GetTable().Name.String() - if rowCount, ok := tableWithCount[tableName]; ok { - if rowCount > 100000 && ddl.GetAction() == sqlparser.AlterDDLAction { - return true, fmt.Errorf( - "big schema change detected. Disable check with -allow_long_unavailability. ddl: %s alters a table with more than 100 thousand rows", sqlparser.String(ddl)) - } - if rowCount > 2000000 { - return true, fmt.Errorf( - "big schema change detected. Disable check with -allow_long_unavailability. ddl: %s changes a table with more than 2 million rows", sqlparser.String(ddl)) - } - } - } - return false, nil -} - // executeSQL executes a single SQL statement either as online DDL or synchronously on all tablets. // In online DDL case, the query may be exploded into multiple queries during func (exec *TabletExecutor) executeSQL(ctx context.Context, sql string, providedUUID string, execResult *ExecuteResult) (executedAsynchronously bool, err error) { + executeViaFetch := func() (bool, error) { + exec.executeOnAllTablets(ctx, execResult, sql, false) + return false, nil + } + if exec.batchSize > 1 { + // Batched writes only ever work with 'direct' strategy and appleid directly to the mysql servers + return executeViaFetch() + } + // Analyze what type of query this is: stmt, err := sqlparser.Parse(sql) if err != nil { return false, err @@ -303,17 +240,61 @@ func (exec *TabletExecutor) executeSQL(ctx context.Context, sql string, provided exec.executeOnAllTablets(ctx, execResult, sql, true) return true, nil } - exec.executeOnAllTablets(ctx, execResult, sql, false) - return false, nil + // Got here? The statement needs to be executed directly. + return executeViaFetch() +} + +// batchSQLs combines SQLs into batches, delimited by ';' +func batchSQLs(sqls []string, batchSize int) (batchedSQLs []string) { + if batchSize <= 1 { + return sqls + } + for len(sqls) > 0 { + nextBatchSize := batchSize + if nextBatchSize > len(sqls) { + nextBatchSize = len(sqls) + } + nextBatch := sqls[0:nextBatchSize] + nextBatchSql := strings.Join(nextBatch, ";") + batchedSQLs = append(batchedSQLs, nextBatchSql) + sqls = sqls[nextBatchSize:] + } + return batchedSQLs +} + +// allSQLsAreCreateQueries returns 'true' when all given queries are CREATE TABLE|VIEW +// This function runs pretty fast even for thousands of tables (its overhead is insignificant compared with +// the time it would take to apply the changes). +func allSQLsAreCreateQueries(sqls []string) (bool, error) { + for _, sql := range sqls { + stmt, err := sqlparser.Parse(sql) + if err != nil { + return false, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "failed to parse sql: %s, got error: %v", sql, err) + } + switch stmt.(type) { + case *sqlparser.CreateTable, *sqlparser.CreateView: + default: + return false, nil + } + } + return true, nil } // Execute applies schema changes func (exec *TabletExecutor) Execute(ctx context.Context, sqls []string) *ExecuteResult { execResult := ExecuteResult{} + + // errorExecResult is a utility function that populates the execResult with the given error, and returns it. Used to quickly bail out of + // this function. + errorExecResult := func(err error) *ExecuteResult { + if err != nil { + execResult.ExecutorErr = err.Error() + } + return &execResult + } execResult.Sqls = sqls if exec.isClosed { - execResult.ExecutorErr = "executor is closed" - return &execResult + return errorExecResult(fmt.Errorf("executor is closed")) } startTime := time.Now() defer func() { execResult.TotalTimeSpent = time.Since(startTime) }() @@ -322,8 +303,7 @@ func (exec *TabletExecutor) Execute(ctx context.Context, sqls []string) *Execute // keyspace-wide operations like resharding migrations. ctx, unlock, lockErr := exec.ts.LockKeyspace(ctx, exec.keyspace, "ApplySchemaKeyspace") if lockErr != nil { - execResult.ExecutorErr = vterrors.Wrapf(lockErr, "lockErr in ApplySchemaKeyspace %v", exec.keyspace).Error() - return &execResult + return errorExecResult(vterrors.Wrapf(lockErr, "lockErr in ApplySchemaKeyspace %v", exec.keyspace)) } defer func() { // This is complicated because execResult.ExecutorErr @@ -336,8 +316,7 @@ func (exec *TabletExecutor) Execute(ctx context.Context, sqls []string) *Execute }() if exec.hasProvidedUUIDs() && len(exec.uuids) != len(sqls) { - execResult.ExecutorErr = fmt.Sprintf("provided %v UUIDs do not match number of DDLs %v", len(exec.uuids), len(sqls)) - return &execResult + return errorExecResult(fmt.Errorf("provided %v UUIDs do not match number of DDLs %v", len(exec.uuids), len(sqls))) } providedUUID := "" @@ -390,11 +369,28 @@ func (exec *TabletExecutor) Execute(ctx context.Context, sqls []string) *Execute wg.Wait() }() + if exec.batchSize > 1 { + // Before we proceed to batch, we need to validate there's no conflicts. + if !exec.isDirectStrategy() { + return errorExecResult(fmt.Errorf("--batch-size requires 'direct' ddl-strategy")) + } + if exec.hasProvidedUUIDs() { + return errorExecResult(fmt.Errorf("--batch-size conflicts with --uuid-list. Batching does not support UUIDs.")) + } + allSQLsAreCreate, err := allSQLsAreCreateQueries(sqls) + if err != nil { + return errorExecResult(err) + } + if !allSQLsAreCreate { + return errorExecResult(fmt.Errorf("--batch-size only allowed when all queries are CREATE TABLE|VIEW")) + } + + sqls = batchSQLs(sqls, int(exec.batchSize)) + } for index, sql := range sqls { // Attempt to renew lease: if err := rl.Do(func() error { return topo.CheckKeyspaceLockedAndRenew(ctx, exec.keyspace) }); err != nil { - execResult.ExecutorErr = vterrors.Wrapf(err, "CheckKeyspaceLocked in ApplySchemaKeyspace %v", exec.keyspace).Error() - return &execResult + return errorExecResult(vterrors.Wrapf(err, "CheckKeyspaceLocked in ApplySchemaKeyspace %v", exec.keyspace)) } execResult.CurSQLIndex = index if exec.hasProvidedUUIDs() { @@ -402,8 +398,7 @@ func (exec *TabletExecutor) Execute(ctx context.Context, sqls []string) *Execute } executedAsynchronously, err := exec.executeSQL(ctx, sql, providedUUID, &execResult) if err != nil { - execResult.ExecutorErr = err.Error() - return &execResult + return errorExecResult(err) } if !executedAsynchronously { syncOperationExecuted = true @@ -446,6 +441,33 @@ func (exec *TabletExecutor) executeOnAllTablets(ctx context.Context, execResult } } +// applyAllowZeroInDate takes a SQL string which may contain one or more statements, +// and, assuming those are DDLs, adds a /*vt+ allowZeroInDate=true */ directive to all of them, +// returning the result again as one long SQL. +func applyAllowZeroInDate(sql string) (string, error) { + // sql may be a batch of multiple statements + sqls, err := sqlparser.SplitStatementToPieces(sql) + if err != nil { + return sql, err + } + var modifiedSqls []string + for _, singleSQL := range sqls { + // --allow-zero-in-date Applies to DDLs + stmt, err := sqlparser.Parse(singleSQL) + if err != nil { + return sql, err + } + if ddlStmt, ok := stmt.(sqlparser.DDLStatement); ok { + // Add comments directive to allow zero in date + const directive = `/*vt+ allowZeroInDate=true */` + ddlStmt.SetComments(ddlStmt.GetParsedComments().Prepend(directive)) + singleSQL = sqlparser.String(ddlStmt) + } + modifiedSqls = append(modifiedSqls, singleSQL) + } + return strings.Join(modifiedSqls, ";"), err +} + func (exec *TabletExecutor) executeOneTablet( ctx context.Context, tablet *topodatapb.Tablet, @@ -464,22 +486,17 @@ func (exec *TabletExecutor) executeOneTablet( } else { if exec.ddlStrategySetting != nil && exec.ddlStrategySetting.IsAllowZeroInDateFlag() { // --allow-zero-in-date Applies to DDLs - stmt, err := sqlparser.Parse(string(sql)) + sql, err = applyAllowZeroInDate(sql) if err != nil { errChan <- ShardWithError{Shard: tablet.Shard, Err: err.Error()} return } - if ddlStmt, ok := stmt.(sqlparser.DDLStatement); ok { - // Add comments directive to allow zero in date - const directive = `/*vt+ allowZeroInDate=true */` - ddlStmt.SetComments(ddlStmt.GetParsedComments().Prepend(directive)) - sql = sqlparser.String(ddlStmt) - } } result, err = exec.tmc.ExecuteFetchAsDba(ctx, tablet, false, &tabletmanagerdatapb.ExecuteFetchAsDbaRequest{ Query: []byte(sql), MaxRows: 10, }) + } if err != nil { errChan <- ShardWithError{Shard: tablet.Shard, Err: err.Error()} diff --git a/go/vt/schemamanager/tablet_executor_test.go b/go/vt/schemamanager/tablet_executor_test.go index 61942ab6d63..a4ee7392054 100644 --- a/go/vt/schemamanager/tablet_executor_test.go +++ b/go/vt/schemamanager/tablet_executor_test.go @@ -18,6 +18,7 @@ package schemamanager import ( "context" + "fmt" "strings" "testing" "time" @@ -56,8 +57,9 @@ func TestTabletExecutorOpen(t *testing.T) { } func TestTabletExecutorOpenWithEmptyPrimaryAlias(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("test_cell") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "test_cell") tablet := &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ Cell: "test_cell", @@ -72,7 +74,7 @@ func TestTabletExecutorOpenWithEmptyPrimaryAlias(t *testing.T) { if err := ts.InitTablet(ctx, tablet, false /*allowPrimaryOverride*/, true /*createShardAndKeyspace*/, false /*allowUpdate*/); err != nil { t.Fatalf("InitTablet failed: %v", err) } - executor := NewTabletExecutor("TestTabletExecutorOpenWithEmptyPrimaryAlias", ts, newFakeTabletManagerClient(), logutil.NewConsoleLogger(), testWaitReplicasTimeout) + executor := NewTabletExecutor("TestTabletExecutorOpenWithEmptyPrimaryAlias", ts, newFakeTabletManagerClient(), logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0) if err := executor.Open(ctx, "test_keyspace"); err == nil || !strings.Contains(err.Error(), "does not have a primary") { t.Fatalf("executor.Open() = '%v', want error", err) } @@ -105,7 +107,7 @@ func TestTabletExecutorValidate(t *testing.T) { }, }) - executor := NewTabletExecutor("TestTabletExecutorValidate", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout) + executor := NewTabletExecutor("TestTabletExecutorValidate", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0) ctx := context.Background() sqls := []string{ @@ -136,8 +138,8 @@ func TestTabletExecutorValidate(t *testing.T) { // alter a table with more than 100,000 rows if err := executor.Validate(ctx, []string{ "ALTER TABLE test_table_03 ADD COLUMN new_id bigint(20)", - }); err == nil { - t.Fatalf("executor.Validate should fail, alter a table more than 100,000 rows") + }); err != nil { + t.Fatalf("executor.Validate should not fail, even for a table with more than 100,000 rows") } if err := executor.Validate(ctx, []string{ @@ -151,21 +153,6 @@ func TestTabletExecutorValidate(t *testing.T) { }); err != nil { t.Fatalf("executor.Validate should succeed, drop a table with more than 2,000,000 rows is allowed") } - - executor.AllowBigSchemaChange() - // alter a table with more than 100,000 rows - if err := executor.Validate(ctx, []string{ - "ALTER TABLE test_table_03 ADD COLUMN new_id bigint(20)", - }); err != nil { - t.Fatalf("executor.Validate should succeed, big schema change is disabled") - } - - executor.DisallowBigSchemaChange() - if err := executor.Validate(ctx, []string{ - "ALTER TABLE test_table_03 ADD COLUMN new_id bigint(20)", - }); err == nil { - t.Fatalf("executor.Validate should fail, alter a table more than 100,000 rows") - } } func TestTabletExecutorDML(t *testing.T) { @@ -194,7 +181,7 @@ func TestTabletExecutorDML(t *testing.T) { }, }) - executor := NewTabletExecutor("TestTabletExecutorDML", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout) + executor := NewTabletExecutor("TestTabletExecutorDML", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0) ctx := context.Background() executor.Open(ctx, "unsharded_keyspace") @@ -303,3 +290,158 @@ func TestIsOnlineSchemaDDL(t *testing.T) { } } } + +func TestBatchSQLs(t *testing.T) { + sqls := []string{ + "create table t1(id int primary key)", + "create table t2(id int primary key)", + "create table t3(id int primary key)", + "create table t4(id int primary key)", + "create view v as select id from t", + } + tcases := []struct { + batchSize int + expectSQLs []string + }{ + { + batchSize: 0, + expectSQLs: sqls, + }, + { + batchSize: 1, + expectSQLs: sqls, + }, + { + batchSize: 2, + expectSQLs: []string{ + "create table t1(id int primary key);create table t2(id int primary key)", + "create table t3(id int primary key);create table t4(id int primary key)", + "create view v as select id from t", + }, + }, + { + batchSize: 3, + expectSQLs: []string{ + "create table t1(id int primary key);create table t2(id int primary key);create table t3(id int primary key)", + "create table t4(id int primary key);create view v as select id from t", + }, + }, + { + batchSize: 4, + expectSQLs: []string{ + "create table t1(id int primary key);create table t2(id int primary key);create table t3(id int primary key);create table t4(id int primary key)", + "create view v as select id from t", + }, + }, + { + batchSize: 5, + expectSQLs: []string{ + "create table t1(id int primary key);create table t2(id int primary key);create table t3(id int primary key);create table t4(id int primary key);create view v as select id from t", + }, + }, + { + batchSize: 6, + expectSQLs: []string{ + "create table t1(id int primary key);create table t2(id int primary key);create table t3(id int primary key);create table t4(id int primary key);create view v as select id from t", + }, + }, + } + for _, tcase := range tcases { + t.Run(fmt.Sprintf("%d", tcase.batchSize), func(t *testing.T) { + batchedSQLs := batchSQLs(sqls, tcase.batchSize) + assert.Equal(t, tcase.expectSQLs, batchedSQLs) + }) + } +} + +func TestAllSQLsAreCreateQueries(t *testing.T) { + tcases := []struct { + name string + sqls []string + expect bool + }{ + { + name: "empty", + expect: true, + }, + { + name: "single, yes", + sqls: []string{"create table t1 (id int primary key)"}, + expect: true, + }, + { + name: "single, no", + sqls: []string{"alter table t1 force"}, + expect: false, + }, + { + name: "multi, no", + sqls: []string{ + "create table t1 (id int primary key)", + "alter table t1 force", + }, + expect: false, + }, + { + name: "multi, no", + sqls: []string{ + "alter table t1 force", + "create table t1 (id int primary key)", + }, + expect: false, + }, + { + name: "multi, yes", + sqls: []string{ + "create table t1 (id int primary key)", + "create table t2 (id int primary key)", + "create table t3 (id int primary key)", + "create view v1 as select id from t1", + }, + expect: true, + }, + } + + for _, tcase := range tcases { + t.Run(tcase.name, func(t *testing.T) { + result, err := allSQLsAreCreateQueries(tcase.sqls) + assert.NoError(t, err) + assert.Equal(t, tcase.expect, result) + }) + } +} + +func TestApplyAllowZeroInDate(t *testing.T) { + tcases := []struct { + sql string + expect string + }{ + { + "create table t1(id int primary key); ", + "create /*vt+ allowZeroInDate=true */ table t1 (\n\tid int primary key\n)", + }, + { + "create table t1(id int primary key)", + "create /*vt+ allowZeroInDate=true */ table t1 (\n\tid int primary key\n)", + }, + { + "create table t1(id int primary key);select 1 from dual", + "create /*vt+ allowZeroInDate=true */ table t1 (\n\tid int primary key\n);select 1 from dual", + }, + { + "create table t1(id int primary key); alter table t2 add column id2 int", + "create /*vt+ allowZeroInDate=true */ table t1 (\n\tid int primary key\n);alter /*vt+ allowZeroInDate=true */ table t2 add column id2 int", + }, + { + " ; ; ;;; create table t1(id int primary key); ;; alter table t2 add column id2 int ;;", + "create /*vt+ allowZeroInDate=true */ table t1 (\n\tid int primary key\n);alter /*vt+ allowZeroInDate=true */ table t2 add column id2 int", + }, + } + for _, tcase := range tcases { + t.Run(tcase.sql, func(t *testing.T) { + result, err := applyAllowZeroInDate(tcase.sql) + assert.NoError(t, err) + assert.Equal(t, tcase.expect, result) + }) + } +} diff --git a/go/vt/servenv/grpc_server.go b/go/vt/servenv/grpc_server.go index bd79aed8108..7a41cca389a 100644 --- a/go/vt/servenv/grpc_server.go +++ b/go/vt/servenv/grpc_server.go @@ -19,9 +19,9 @@ package servenv import ( "context" "crypto/tls" - "fmt" "math" "net" + "strconv" "time" grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" @@ -72,6 +72,9 @@ var ( // gRPCPort is the port to listen on for gRPC. If zero, don't listen. gRPCPort int + // gRPCBindAddress is the address to bind to for gRPC. If empty, bind to all addresses. + gRPCBindAddress string + // gRPCMaxConnectionAge is the maximum age of a client connection, before GoAway is sent. // This is useful for L4 loadbalancing to ensure rebalancing after scaling. gRPCMaxConnectionAge = time.Duration(math.MaxInt64) @@ -124,6 +127,7 @@ var ( func RegisterGRPCServerFlags() { OnParse(func(fs *pflag.FlagSet) { fs.IntVar(&gRPCPort, "grpc_port", gRPCPort, "Port to listen on for gRPC calls. If zero, do not listen.") + fs.StringVar(&gRPCBindAddress, "grpc_bind_address", gRPCBindAddress, "Bind address for gRPC calls. If empty, listen on all addresses.") fs.DurationVar(&gRPCMaxConnectionAge, "grpc_max_connection_age", gRPCMaxConnectionAge, "Maximum age of a client connection before GoAway is sent.") fs.DurationVar(&gRPCMaxConnectionAgeGrace, "grpc_max_connection_age_grace", gRPCMaxConnectionAgeGrace, "Additional grace period after grpc_max_connection_age, after which connections are forcibly closed.") fs.IntVar(&gRPCInitialConnWindowSize, "grpc_server_initial_conn_window_size", gRPCInitialConnWindowSize, "gRPC server initial connection window size") @@ -284,7 +288,7 @@ func serveGRPC() { // listen on the port log.Infof("Listening for gRPC calls on port %v", gRPCPort) - listener, err := net.Listen("tcp", fmt.Sprintf(":%d", gRPCPort)) + listener, err := net.Listen("tcp", net.JoinHostPort(gRPCBindAddress, strconv.Itoa(gRPCPort))) if err != nil { log.Exitf("Cannot listen on port %v for gRPC: %v", gRPCPort, err) } diff --git a/go/vt/servenv/run.go b/go/vt/servenv/run.go index 5b585184331..6f028786eaf 100644 --- a/go/vt/servenv/run.go +++ b/go/vt/servenv/run.go @@ -17,11 +17,11 @@ limitations under the License. package servenv import ( - "fmt" "net" "net/url" "os" "os/signal" + "strconv" "syscall" "time" @@ -37,14 +37,14 @@ var ( // Run starts listening for RPC and HTTP requests, // and blocks until it the process gets a signal. -func Run(port int) { +func Run(bindAddress string, port int) { populateListeningURL(int32(port)) createGRPCServer() onRunHooks.Fire() serveGRPC() serveSocketFile() - l, err := net.Listen("tcp", fmt.Sprintf(":%v", port)) + l, err := net.Listen("tcp", net.JoinHostPort(bindAddress, strconv.Itoa(port))) if err != nil { log.Exit(err) } @@ -74,11 +74,6 @@ func Run(port int) { log.Info("Shutting down gracefully") fireOnCloseHooks(onCloseTimeout) -} - -// Close runs any registered exit hooks in parallel. -func Close() { - onCloseHooks.Fire() ListeningURL = url.URL{} } diff --git a/go/vt/servenv/servenv.go b/go/vt/servenv/servenv.go index 662e4da5207..e7c28855997 100644 --- a/go/vt/servenv/servenv.go +++ b/go/vt/servenv/servenv.go @@ -29,6 +29,8 @@ limitations under the License. package servenv import ( + "flag" + "fmt" "net/url" "os" "os/signal" @@ -38,6 +40,7 @@ import ( "syscall" "time" + "github.com/spf13/cobra" "github.com/spf13/pflag" "vitess.io/vitess/go/event" @@ -57,7 +60,8 @@ import ( var ( // port is part of the flags used when calling RegisterDefaultFlags. - port int + port int + bindAddress string // mutex used to protect the Init function mu sync.Mutex @@ -263,6 +267,7 @@ func FireRunHooks() { func RegisterDefaultFlags() { OnParse(func(fs *pflag.FlagSet) { fs.IntVar(&port, "port", port, "port for the server") + fs.StringVar(&bindAddress, "bind-address", bindAddress, "Bind address for the server. If empty, the server will listen on all available unicast and anycast IP addresses of the local system.") }) } @@ -273,7 +278,7 @@ func Port() int { // RunDefault calls Run() with the parameters from the flags. func RunDefault() { - Run(port) + Run(bindAddress, port) } var ( @@ -351,6 +356,69 @@ func ParseFlags(cmd string) { logutil.PurgeLogs() } +// ParseFlagsForTests initializes flags but skips the version, filesystem +// args and go flag related work. +// Note: this should not be used outside of unit tests. +func ParseFlagsForTests(cmd string) { + fs := GetFlagSetFor(cmd) + pflag.CommandLine = fs + pflag.Parse() + viperutil.BindFlags(fs) + loadViper(cmd) +} + +// MoveFlagsToCobraCommand moves the servenv-registered flags to the flagset of +// the given cobra command, then copies over the glog flags that otherwise +// require manual transferring. +func MoveFlagsToCobraCommand(cmd *cobra.Command) { + moveFlags(cmd.Use, cmd.Flags()) +} + +// MovePersistentFlagsToCobraCommand functions exactly like MoveFlagsToCobraCommand, +// but moves the servenv-registered flags to the persistent flagset of +// the given cobra command, then copies over the glog flags that otherwise +// require manual transferring. +// +// Useful for transferring flags to a parent command whose subcommands should +// inherit the servenv-registered flags. +func MovePersistentFlagsToCobraCommand(cmd *cobra.Command) { + moveFlags(cmd.Use, cmd.PersistentFlags()) +} + +func moveFlags(name string, fs *pflag.FlagSet) { + fs.AddFlagSet(GetFlagSetFor(name)) + + // glog flags, no better way to do this + _flag.PreventGlogVFlagFromClobberingVersionFlagShorthand(fs) + fs.AddGoFlag(flag.Lookup("logtostderr")) + fs.AddGoFlag(flag.Lookup("log_backtrace_at")) + fs.AddGoFlag(flag.Lookup("alsologtostderr")) + fs.AddGoFlag(flag.Lookup("stderrthreshold")) + fs.AddGoFlag(flag.Lookup("log_dir")) + fs.AddGoFlag(flag.Lookup("vmodule")) + + pflag.CommandLine = fs +} + +// CobraPreRunE returns the common function that commands will need to load +// viper infrastructure. It matches the signature of cobra's (Pre|Post)RunE-type +// functions. +func CobraPreRunE(cmd *cobra.Command, args []string) error { + _flag.TrickGlog() + + watchCancel, err := viperutil.LoadConfig() + if err != nil { + return fmt.Errorf("%s: failed to read in config: %s", cmd.Name(), err) + } + + OnTerm(watchCancel) + HTTPHandleFunc("/debug/config", viperdebug.HandlerFunc) + + logutil.PurgeLogs() + + return nil +} + // GetFlagSetFor returns the flag set for a given command. // This has to exported for the Vitess-operator to use func GetFlagSetFor(cmd string) *pflag.FlagSet { @@ -425,7 +493,6 @@ func init() { "vtctld", "vtgate", "vtgateclienttest", - "vtgr", "vtorc", "vttablet", "vttestserver", @@ -439,7 +506,6 @@ func init() { "vtcombo", "vtctld", "vtgate", - "vtgr", "vttablet", "vtorc", } { @@ -461,7 +527,6 @@ func RegisterFlagsForTopoBinaries(registerFlags func(fs *pflag.FlagSet)) { "vtctl", "vtctld", "vtgate", - "vtgr", "vttablet", "vttestserver", "zk", @@ -471,3 +536,10 @@ func RegisterFlagsForTopoBinaries(registerFlags func(fs *pflag.FlagSet)) { OnParseFor(cmd, registerFlags) } } + +// TestingEndtoend is true when this Vitess binary is being ran as part of an endtoend test suite +var TestingEndtoend = false + +func init() { + TestingEndtoend = os.Getenv("VTTEST") == "endtoend" +} diff --git a/go/vt/servenv/version.go b/go/vt/servenv/version.go index b8dcdf3264f..13b66b778c1 100644 --- a/go/vt/servenv/version.go +++ b/go/vt/servenv/version.go @@ -19,4 +19,4 @@ package servenv // THIS FILE IS AUTO-GENERATED DURING NEW RELEASES BY ./tools/do_releases.sh // DO NOT EDIT -const versionName = "17.0.2" +const versionName = "18.0.0" diff --git a/go/vt/sidecardb/identifier_cache.go b/go/vt/sidecardb/identifier_cache.go index a010923081d..002d8750ba1 100644 --- a/go/vt/sidecardb/identifier_cache.go +++ b/go/vt/sidecardb/identifier_cache.go @@ -22,6 +22,7 @@ import ( "sync/atomic" "time" + "vitess.io/vitess/go/constants/sidecar" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" @@ -88,7 +89,7 @@ func (ic *IdentifierCache) Get(keyspace string) (string, error) { return "", err } if sdbname == "" { - sdbname = DefaultName + sdbname = sidecar.DefaultName } sdbid = sqlparser.String(sqlparser.NewIdentifierCS(sdbname)) diff --git a/go/vt/sidecardb/schema/vdiff/vdiff.sql b/go/vt/sidecardb/schema/vdiff/vdiff.sql index 5eae9270460..52392bde427 100644 --- a/go/vt/sidecardb/schema/vdiff/vdiff.sql +++ b/go/vt/sidecardb/schema/vdiff/vdiff.sql @@ -28,7 +28,7 @@ CREATE TABLE IF NOT EXISTS vdiff `started_at` timestamp NULL DEFAULT NULL, `liveness_timestamp` timestamp NULL DEFAULT NULL, `completed_at` timestamp NULL DEFAULT NULL, - `last_error` varbinary(512) DEFAULT NULL, + `last_error` varbinary(1024) DEFAULT NULL, PRIMARY KEY (`id`), UNIQUE KEY `uuid_idx` (`vdiff_uuid`), KEY `state` (`state`), diff --git a/go/vt/sidecardb/schema/vreplication/vreplication_log.sql b/go/vt/sidecardb/schema/vreplication/vreplication_log.sql index 175e6db2bce..19360fb0c04 100644 --- a/go/vt/sidecardb/schema/vreplication/vreplication_log.sql +++ b/go/vt/sidecardb/schema/vreplication/vreplication_log.sql @@ -24,5 +24,6 @@ CREATE TABLE IF NOT EXISTS vreplication_log `updated_at` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, `message` text NOT NULL, `count` bigint NOT NULL DEFAULT '1', - PRIMARY KEY (`id`) + PRIMARY KEY (`id`), + KEY `vrepl_id_idx` (`vrepl_id`) ) ENGINE = InnoDB diff --git a/go/vt/sidecardb/sidecardb.go b/go/vt/sidecardb/sidecardb.go index e03dd76fe0b..0bb64611607 100644 --- a/go/vt/sidecardb/sidecardb.go +++ b/go/vt/sidecardb/sidecardb.go @@ -26,10 +26,10 @@ import ( "runtime" "strings" "sync" - "sync/atomic" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/history" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/mysql/fakesqldb" @@ -45,8 +45,6 @@ import ( ) const ( - DefaultName = "_vt" - createSidecarDBQuery = "create database if not exists %s" sidecarDBExistsQuery = "select 'true' as 'dbexists' from information_schema.SCHEMATA where SCHEMA_NAME = %a" showCreateTableQuery = "show create table %s.%s" @@ -70,8 +68,6 @@ const ( ) var ( - // This should be accessed via GetName() - sidecarDBName atomic.Value sidecarTables []*sidecarTable // All tables needed in the sidecar database have @@ -100,7 +96,6 @@ type ddlError struct { } func init() { - sidecarDBName.Store(DefaultName) ddlCount = stats.NewCounter(StatsKeyQueryCount, "Number of queries executed") ddlErrorCount = stats.NewCounter(StatsKeyErrorCount, "Number of errors during sidecar schema upgrade") ddlErrorHistory = history.New(maxDDLErrorHistoryLength) @@ -134,7 +129,7 @@ func validateSchemaDefinition(name, schema string) (string, error) { if qualifier != "" { return "", vterrors.Errorf(vtrpcpb.Code_INTERNAL, "database qualifier of %s specified for the %s table when there should not be one", qualifier, name) } - createTable.Table.Qualifier = sqlparser.NewIdentifierCS(GetName()) + createTable.Table.Qualifier = sqlparser.NewIdentifierCS(sidecar.GetName()) if !strings.EqualFold(tableName, name) { return "", vterrors.Errorf(vtrpcpb.Code_INTERNAL, "table name of %s does not match the table name specified within the file: %s", name, tableName) } @@ -193,7 +188,7 @@ func printCallerDetails() { pc, _, line, ok := runtime.Caller(2) details := runtime.FuncForPC(pc) if ok && details != nil { - log.Infof("%s schema init called from %s:%d\n", GetName(), details.Name(), line) + log.Infof("%s schema init called from %s:%d\n", sidecar.GetName(), details.Name(), line) } } @@ -207,28 +202,6 @@ type schemaInit struct { // execute the specified query within the database. type Exec func(ctx context.Context, query string, maxRows int, useDB bool) (*sqltypes.Result, error) -func SetName(name string) { - sidecarDBName.Store(name) -} - -func GetName() string { - return sidecarDBName.Load().(string) -} - -// GetIdentifier returns the sidecar database name as an SQL -// identifier string, most importantly this means that it will -// be properly escaped if/as needed. -func GetIdentifier() string { - ident := sqlparser.NewIdentifierCS(GetName()) - return sqlparser.String(ident) -} - -// GetCreateQuery returns the CREATE DATABASE SQL statement -// used to create the sidecar database. -func GetCreateQuery() string { - return sqlparser.BuildParsedQuery(createSidecarDBQuery, GetIdentifier()).Query -} - // GetDDLCount metric returns the count of sidecardb DDLs that // have been run as part of this vttablet's init process. func getDDLCount() int64 { @@ -281,7 +254,7 @@ func Init(ctx context.Context, exec Exec) error { si.dbCreated = true } - if err := si.setCurrentDatabase(GetIdentifier()); err != nil { + if err := si.setCurrentDatabase(sidecar.GetIdentifier()); err != nil { return err } @@ -325,7 +298,7 @@ func (si *schemaInit) setPermissiveSQLMode() (func(), error) { } func (si *schemaInit) doesSidecarDBExist() (bool, error) { - query, err := sqlparser.ParseAndBind(sidecarDBExistsQuery, sqltypes.StringBindVariable(GetName())) + query, err := sqlparser.ParseAndBind(sidecarDBExistsQuery, sqltypes.StringBindVariable(sidecar.GetName())) if err != nil { return false, err } @@ -337,10 +310,10 @@ func (si *schemaInit) doesSidecarDBExist() (bool, error) { switch len(rs.Rows) { case 0: - log.Infof("doesSidecarDBExist: %s not found", GetName()) + log.Infof("doesSidecarDBExist: %s not found", sidecar.GetName()) return false, nil case 1: - log.Infof("doesSidecarDBExist: found %s", GetName()) + log.Infof("doesSidecarDBExist: found %s", sidecar.GetName()) return true, nil default: // This should never happen. @@ -349,12 +322,12 @@ func (si *schemaInit) doesSidecarDBExist() (bool, error) { } func (si *schemaInit) createSidecarDB() error { - _, err := si.exec(si.ctx, GetCreateQuery(), 1, false) + _, err := si.exec(si.ctx, sidecar.GetCreateQuery(), 1, false) if err != nil { log.Error(err) return err } - log.Infof("createSidecarDB: %s", GetName()) + log.Infof("createSidecarDB: %s", sidecar.GetName()) return nil } @@ -368,9 +341,13 @@ func (si *schemaInit) setCurrentDatabase(dbName string) error { func (si *schemaInit) getCurrentSchema(tableName string) (string, error) { var currentTableSchema string - rs, err := si.exec(si.ctx, sqlparser.BuildParsedQuery(showCreateTableQuery, GetIdentifier(), sqlparser.String(sqlparser.NewIdentifierCS(tableName))).Query, 1, false) + // We escape the tableName because it can be a keyword. + // Converting the tableName to a case-sensitive identifier and converting back to a string using the + // sqlparser package, ensures that the table name is escaped with backticks if required. + escapedTableName := sqlparser.String(sqlparser.NewIdentifierCS(tableName)) + rs, err := si.exec(si.ctx, sqlparser.BuildParsedQuery(showCreateTableQuery, sidecar.GetIdentifier(), escapedTableName).Query, 1, false) if err != nil { - if sqlErr, ok := err.(*mysql.SQLError); ok && sqlErr.Number() == mysql.ERNoSuchTable { + if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Number() == sqlerror.ERNoSuchTable { // table does not exist in the sidecar database return "", nil } @@ -456,7 +433,7 @@ func (si *schemaInit) ensureSchema(table *sidecarTable) error { ddlCount.Add(1) return nil } - log.Infof("Table schema was already up to date for the %s table in the %s sidecar database", table.name, GetName()) + log.Infof("Table schema was already up to date for the %s table in the %s sidecar database", table.name, sidecar.GetName()) return nil } @@ -470,29 +447,13 @@ func recordDDLError(tableName string, err error) { } func (t *sidecarTable) String() string { - return fmt.Sprintf("%s.%s (%s)", GetIdentifier(), sqlparser.String(sqlparser.NewIdentifierCS(t.name)), t.module) + return fmt.Sprintf("%s.%s (%s)", sidecar.GetIdentifier(), sqlparser.String(sqlparser.NewIdentifierCS(t.name)), t.module) } // region unit-test-only // This section uses helpers used in tests, but also in // go/vt/vtexplain/vtexplain_vttablet.go. // Hence, it is here and not in the _test.go file. -const ( - createTableRegexp = "(?i)CREATE TABLE .* `?\\_vt\\`?..*" - alterTableRegexp = "(?i)ALTER TABLE `?\\_vt\\`?..*" -) - -var ( - sidecarDBInitQueries = []string{ - "use %s", - createSidecarDBQuery, - } - // Query patterns to handle in mocks. - sidecarDBInitQueryPatterns = []string{ - createTableRegexp, - alterTableRegexp, - } -) // AddSchemaInitQueries adds sidecar database schema related // queries to a mock db. @@ -500,13 +461,13 @@ var ( func AddSchemaInitQueries(db *fakesqldb.DB, populateTables bool) { once.Do(loadSchemaDefinitions) result := &sqltypes.Result{} - for _, q := range sidecarDBInitQueryPatterns { + for _, q := range sidecar.DBInitQueryPatterns { db.AddQueryPattern(q, result) } - for _, q := range sidecarDBInitQueries { - db.AddQuery(sqlparser.BuildParsedQuery(q, GetIdentifier()).Query, result) + for _, q := range sidecar.DBInitQueries { + db.AddQuery(sqlparser.BuildParsedQuery(q, sidecar.GetIdentifier()).Query, result) } - sdbe, _ := sqlparser.ParseAndBind(sidecarDBExistsQuery, sqltypes.StringBindVariable(GetName())) + sdbe, _ := sqlparser.ParseAndBind(sidecarDBExistsQuery, sqltypes.StringBindVariable(sidecar.GetName())) db.AddQuery(sdbe, result) for _, table := range sidecarTables { result = &sqltypes.Result{} @@ -517,7 +478,7 @@ func AddSchemaInitQueries(db *fakesqldb.DB, populateTables bool) { fmt.Sprintf("%s|%s", table.name, table.schema), ) } - db.AddQuery(sqlparser.BuildParsedQuery(showCreateTableQuery, GetIdentifier(), + db.AddQuery(sqlparser.BuildParsedQuery(showCreateTableQuery, sidecar.GetIdentifier(), sqlparser.String(sqlparser.NewIdentifierCS(table.name))).Query, result) } @@ -536,16 +497,16 @@ func AddSchemaInitQueries(db *fakesqldb.DB, populateTables bool) { // This is for unit tests only! func MatchesInitQuery(query string) bool { query = strings.ToLower(query) - for _, q := range sidecarDBInitQueries { - if strings.EqualFold(sqlparser.BuildParsedQuery(q, GetIdentifier()).Query, query) { + for _, q := range sidecar.DBInitQueries { + if strings.EqualFold(sqlparser.BuildParsedQuery(q, sidecar.GetIdentifier()).Query, query) { return true } } - sdbe, _ := sqlparser.ParseAndBind(sidecarDBExistsQuery, sqltypes.StringBindVariable(GetName())) + sdbe, _ := sqlparser.ParseAndBind(sidecarDBExistsQuery, sqltypes.StringBindVariable(sidecar.GetName())) if strings.EqualFold(sdbe, query) { return true } - for _, q := range sidecarDBInitQueryPatterns { + for _, q := range sidecar.DBInitQueryPatterns { q = strings.ToLower(q) if strings.Contains(query, q) { return true diff --git a/go/vt/sidecardb/sidecardb_test.go b/go/vt/sidecardb/sidecardb_test.go index 49b91ae5bdc..22147c960e9 100644 --- a/go/vt/sidecardb/sidecardb_test.go +++ b/go/vt/sidecardb/sidecardb_test.go @@ -24,6 +24,7 @@ import ( "strings" "testing" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/vt/sqlparser" "github.com/stretchr/testify/require" @@ -62,7 +63,7 @@ func TestInitErrors(t *testing.T) { exec := func(ctx context.Context, query string, maxRows int, useDB bool) (*sqltypes.Result, error) { if useDB { - if _, err := conn.ExecuteFetch(fmt.Sprintf("use %s", GetIdentifier()), maxRows, true); err != nil { + if _, err := conn.ExecuteFetch(fmt.Sprintf("use %s", sidecar.GetIdentifier()), maxRows, true); err != nil { return nil, err } } @@ -132,7 +133,7 @@ func TestMiscSidecarDB(t *testing.T) { require.NoError(t, err) exec := func(ctx context.Context, query string, maxRows int, useDB bool) (*sqltypes.Result, error) { if useDB { - if _, err := conn.ExecuteFetch(fmt.Sprintf("use %s", GetIdentifier()), maxRows, true); err != nil { + if _, err := conn.ExecuteFetch(fmt.Sprintf("use %s", sidecar.GetIdentifier()), maxRows, true); err != nil { return nil, err } } @@ -142,12 +143,12 @@ func TestMiscSidecarDB(t *testing.T) { result := sqltypes.MakeTestResult(sqltypes.MakeTestFields( "dbexists", "int64"), - GetName(), + sidecar.GetName(), ) - dbeq, err := sqlparser.ParseAndBind(sidecarDBExistsQuery, sqltypes.StringBindVariable(GetName())) + dbeq, err := sqlparser.ParseAndBind(sidecarDBExistsQuery, sqltypes.StringBindVariable(sidecar.GetName())) require.NoError(t, err) db.AddQuery(dbeq, result) - db.AddQuery(sqlparser.BuildParsedQuery(createSidecarDBQuery, GetIdentifier()).Query, &sqltypes.Result{}) + db.AddQuery(sidecar.GetCreateQuery(), &sqltypes.Result{}) AddSchemaInitQueries(db, false) // tests init on empty db @@ -173,7 +174,7 @@ func TestMiscSidecarDB(t *testing.T) { exec: exec, } - err = si.setCurrentDatabase(GetIdentifier()) + err = si.setCurrentDatabase(sidecar.GetIdentifier()) require.NoError(t, err) require.False(t, MatchesInitQuery("abc")) diff --git a/go/vt/sqlparser/analyzer.go b/go/vt/sqlparser/analyzer.go index 67066c87cd6..e4ac9326324 100644 --- a/go/vt/sqlparser/analyzer.go +++ b/go/vt/sqlparser/analyzer.go @@ -44,6 +44,7 @@ const ( StmtShow StmtUse StmtOther + StmtAnalyze StmtUnknown StmtComment StmtPriv @@ -69,6 +70,7 @@ const ( StmtTruncate StmtRename StmtPlan + StmtKill ) // ASTToStatementType returns a StatementType from an AST stmt @@ -94,8 +96,10 @@ func ASTToStatementType(stmt Statement) StatementType { return StmtShowMigrationLogs case *Use: return StmtUse - case *OtherRead, *OtherAdmin, *Load: + case *OtherAdmin, *Load: return StmtOther + case *Analyze: + return StmtAnalyze case Explain, *VExplainStmt: return StmtExplain case *Begin: @@ -130,6 +134,8 @@ func ASTToStatementType(stmt Statement) StatementType { return StmtExecute case *DeallocateStmt: return StmtDeallocate + case *Kill: + return StmtKill default: return StmtUnknown } @@ -251,14 +257,18 @@ func Preview(sql string) StatementType { return StmtUse case "describe", "desc", "explain": return StmtExplain - case "analyze", "repair", "optimize": + case "repair", "optimize": return StmtOther + case "analyze": + return StmtAnalyze case "grant", "revoke": return StmtPriv case "release": return StmtRelease case "rollback": return StmtSRollback + case "kill": + return StmtKill } return StmtUnknown } @@ -297,6 +307,8 @@ func (s StatementType) String() string { return "USE" case StmtOther: return "OTHER" + case StmtAnalyze: + return "ANALYZE" case StmtPriv: return "PRIV" case StmtExplain: @@ -323,6 +335,8 @@ func (s StatementType) String() string { return "EXECUTE" case StmtDeallocate: return "DEALLOCATE PREPARE" + case StmtKill: + return "KILL" default: return "UNKNOWN" } diff --git a/go/vt/sqlparser/analyzer_test.go b/go/vt/sqlparser/analyzer_test.go index 88355c6a8fc..9f6a451770e 100644 --- a/go/vt/sqlparser/analyzer_test.go +++ b/go/vt/sqlparser/analyzer_test.go @@ -62,7 +62,7 @@ func TestPreview(t *testing.T) { {"set", StmtSet}, {"show", StmtShow}, {"use", StmtUse}, - {"analyze", StmtOther}, + {"analyze", StmtAnalyze}, {"describe", StmtExplain}, {"desc", StmtExplain}, {"explain", StmtExplain}, diff --git a/go/vt/sqlparser/ast.go b/go/vt/sqlparser/ast.go index 8169550430a..ffc481d1f78 100644 --- a/go/vt/sqlparser/ast.go +++ b/go/vt/sqlparser/ast.go @@ -16,7 +16,10 @@ limitations under the License. package sqlparser -import "vitess.io/vitess/go/sqltypes" +import ( + "vitess.io/vitess/go/mysql/datetime" + "vitess.io/vitess/go/sqltypes" +) /* This is the Vitess AST. This file should only contain pure struct declarations, @@ -60,6 +63,7 @@ type ( GetOrderBy() OrderBy GetLimit() *Limit SetLimit(*Limit) + GetLock() Lock SetLock(lock Lock) SetInto(into *SelectInto) SetWith(with *With) @@ -67,6 +71,7 @@ type ( GetColumnCount() int GetColumns() SelectExprs Commented + IsDistinct() bool } // DDLStatement represents any DDL Statement @@ -134,10 +139,11 @@ type ( // AlterColumn is used to add or drop defaults & visibility to columns in alter table command AlterColumn struct { - Column *ColName - DropDefault bool - DefaultVal Expr - Invisible *bool + Column *ColName + DropDefault bool + DefaultVal Expr + DefaultLiteral bool + Invisible *bool } // With contains the lists of common table expression and specifies if it is recursive or not @@ -676,13 +682,11 @@ type ( Name IdentifierCI } - // IntervalTypes is an enum to get types of intervals - IntervalTypes int8 - - // OtherRead represents a DESCRIBE, or EXPLAIN statement. - // It should be used only as an indicator. It does not contain - // the full AST for the statement. - OtherRead struct{} + // Analyze represents the Analyze statement. + Analyze struct { + IsLocal bool + Table TableName + } // OtherAdmin represents a misc statement that relies on ADMIN privileges, // such as REPAIR, OPTIMIZE, or TRUNCATE statement. @@ -705,6 +709,14 @@ type ( FieldsInfo *FieldsClause LinesInfo *LinesClause } + // KillType is an enum for Kill.Type + KillType int8 + + // Kill represents a kill statement + Kill struct { + Type KillType + ProcesslistID uint64 + } ) func (*Union) iStatement() {} @@ -725,7 +737,7 @@ func (*Rollback) iStatement() {} func (*SRollback) iStatement() {} func (*Savepoint) iStatement() {} func (*Release) iStatement() {} -func (*OtherRead) iStatement() {} +func (*Analyze) iStatement() {} func (*OtherAdmin) iStatement() {} func (*CommentOnly) iStatement() {} func (*Select) iSelectStatement() {} @@ -757,6 +769,7 @@ func (*PrepareStmt) iStatement() {} func (*ExecuteStmt) iStatement() {} func (*DeallocateStmt) iStatement() {} func (*PurgeBinaryLogs) iStatement() {} +func (*Kill) iStatement() {} func (*CreateView) iDDLStatement() {} func (*AlterView) iDDLStatement() {} @@ -1837,14 +1850,15 @@ type ColumnTypeOptions struct { The complexity arises from the fact that we do not know whether the column will be nullable or not if nothing is specified. Therefore we do not know whether the column is nullable or not in case 3. */ - Null *bool - Autoincrement bool - Default Expr - OnUpdate Expr - As Expr - Comment *Literal - Storage ColumnStorage - Collate string + Null *bool + Autoincrement bool + Default Expr + DefaultLiteral bool + OnUpdate Expr + As Expr + Comment *Literal + Storage ColumnStorage + Collate string // Reference stores a foreign key constraint for the given column Reference *ReferenceDefinition @@ -2169,7 +2183,7 @@ type ( // More information available here: https://dev.mysql.com/doc/refman/8.0/en/window-functions-frames.html FramePoint struct { Type FramePointType - Unit IntervalTypes + Unit IntervalType Expr Expr } @@ -2210,16 +2224,6 @@ type ( FromFirstLastType int8 ) -// DateAddExprType is an enum to get types of DateAddExpr. -// This can be one of ADDDATE, DATE_ADD or a '+' operator -// with an interval left or right. -type DateAddExprType int8 - -// DateSubExprType is an enum to get types of DateAddExpr. -// This can be one of SUBDATE, DATE_SUB or a '-' operator -// with an interval right. -type DateSubExprType int8 - // *********** Expressions type ( // Expr represents an expression. @@ -2312,11 +2316,6 @@ type ( // ColName represents a column name. ColName struct { - // Metadata is not populated by the parser. - // It's a placeholder for analyzers to store - // additional data, typically info about which - // table or column this node references. - Metadata any Name IdentifierCI Qualifier TableName } @@ -2366,18 +2365,17 @@ type ( Expr Expr } - // TimestampFuncExpr represents the function and arguments for TIMESTAMP{ADD,DIFF} functions. - TimestampFuncExpr struct { - Name string + // TimestampDiffExpr represents the function and arguments for TIMESTAMPDIFF functions. + TimestampDiffExpr struct { Expr1 Expr Expr2 Expr - Unit string + Unit IntervalType } // ExtractFuncExpr represents the function and arguments for EXTRACT(YEAR FROM '2019-07-02') type functions. ExtractFuncExpr struct { - IntervalTypes IntervalTypes - Expr Expr + IntervalType IntervalType + Expr Expr } // CollateExpr represents dynamic collate operator. @@ -2499,21 +2497,6 @@ type ( Fsp int // fractional seconds precision, integer from 0 to 6 or an Argument } - // ExtractedSubquery is a subquery that has been extracted from the original AST - // This is a struct that the parser will never produce - it's written and read by the gen4 planner - // CAUTION: you should only change argName and hasValuesArg through the setter methods - ExtractedSubquery struct { - Original Expr // original expression that was replaced by this ExtractedSubquery - OpCode int // this should really be engine.PulloutOpCode, but we cannot depend on engine :( - Subquery *Subquery - OtherSide Expr // represents the side of the comparison, this field will be nil if Original is not a comparison - Merged bool // tells whether we need to rewrite this subquery to Original or not - - hasValuesArg string - argName string - alternative Expr // this is what will be used to Format this struct - } - // JSONPrettyExpr represents the function and argument for JSON_PRETTY() // https://dev.mysql.com/doc/refman/8.0/en/json-utility-functions.html#function_json-pretty JSONPrettyExpr struct { @@ -2875,10 +2858,15 @@ type ( AggrFunc interface { Expr - AggrName() string GetArg() Expr - IsDistinct() bool GetArgs() Exprs + // AggrName returns the lower case string representing this aggregation function + AggrName() string + } + + DistinctableAggr interface { + IsDistinct() bool + SetDistinct(bool) } Count struct { @@ -2985,6 +2973,13 @@ type ( Limit *Limit } + // AnyValue is an aggregation function in Vitess, even if the MySQL manual explicitly says it's not + // It's just simpler to treat it as one + // see https://dev.mysql.com/doc/refman/8.0/en/miscellaneous-functions.html#function_any-value + AnyValue struct { + Arg Expr + } + // RegexpInstrExpr represents REGEXP_INSTR() // For more information, see https://dev.mysql.com/doc/refman/8.0/en/regexp.html#function_regexp-instr RegexpInstrExpr struct { @@ -3025,24 +3020,14 @@ type ( MatchType Expr } - // DateAddExpr represents ADDDATE(), DATE_ADD() - // and additions with an interval on the left and right. - // For more information, see https://dev.mysql.com/doc/refman/8.0/en/date-and-time-functions.html#function_date-add - DateAddExpr struct { - Type DateAddExprType - Date Expr - Unit IntervalTypes - Expr Expr - } + IntervalType = datetime.IntervalType - // DateSubExpr represents SUBDATE(), DATE_SUB() - // and subtractions with an interval on the right. - // For more information, see https://dev.mysql.com/doc/refman/8.0/en/date-and-time-functions.html#function_date-sub - DateSubExpr struct { - Type DateSubExprType - Date Expr - Unit IntervalTypes - Expr Expr + // IntervalDateExpr represents ADDDATE(), DATE_ADD() + IntervalDateExpr struct { + Syntax IntervalExprSyntax + Date Expr + Interval Expr + Unit IntervalType } // ArgumentLessWindowExpr stands for the following window_functions: CUME_DIST, DENSE_RANK, PERCENT_RANK, RANK, ROW_NUMBER @@ -3173,7 +3158,7 @@ func (*UnaryExpr) iExpr() {} func (*IntroducerExpr) iExpr() {} func (*CollateExpr) iExpr() {} func (*FuncExpr) iExpr() {} -func (*TimestampFuncExpr) iExpr() {} +func (*TimestampDiffExpr) iExpr() {} func (*ExtractFuncExpr) iExpr() {} func (*WeightStringFuncExpr) iExpr() {} func (*CurTimeFuncExpr) iExpr() {} @@ -3189,7 +3174,6 @@ func (*CharExpr) iExpr() {} func (*ConvertUsingExpr) iExpr() {} func (*MatchExpr) iExpr() {} func (*Default) iExpr() {} -func (*ExtractedSubquery) iExpr() {} func (*TrimFuncExpr) iExpr() {} func (*JSONSchemaValidFuncExpr) iExpr() {} func (*JSONSchemaValidationReportFuncExpr) iExpr() {} @@ -3217,8 +3201,7 @@ func (*RegexpInstrExpr) iExpr() {} func (*RegexpLikeExpr) iExpr() {} func (*RegexpReplaceExpr) iExpr() {} func (*RegexpSubstrExpr) iExpr() {} -func (*DateAddExpr) iExpr() {} -func (*DateSubExpr) iExpr() {} +func (*IntervalDateExpr) iExpr() {} func (*ArgumentLessWindowExpr) iExpr() {} func (*FirstOrLastValueExpr) iExpr() {} func (*NtileExpr) iExpr() {} @@ -3237,6 +3220,7 @@ func (*Avg) iExpr() {} func (*CountStar) iExpr() {} func (*Count) iExpr() {} func (*GroupConcatExpr) iExpr() {} +func (*AnyValue) iExpr() {} func (*BitAnd) iExpr() {} func (*BitOr) iExpr() {} func (*BitXor) iExpr() {} @@ -3270,7 +3254,7 @@ func (*GeomFromGeoJSONExpr) iExpr() {} // iCallable marks all expressions that represent function calls func (*FuncExpr) iCallable() {} -func (*TimestampFuncExpr) iCallable() {} +func (*TimestampDiffExpr) iCallable() {} func (*ExtractFuncExpr) iCallable() {} func (*WeightStringFuncExpr) iCallable() {} func (*CurTimeFuncExpr) iCallable() {} @@ -3285,6 +3269,7 @@ func (*CharExpr) iCallable() {} func (*ConvertUsingExpr) iCallable() {} func (*MatchExpr) iCallable() {} func (*GroupConcatExpr) iCallable() {} +func (*AnyValue) iCallable() {} func (*JSONSchemaValidFuncExpr) iCallable() {} func (*JSONSchemaValidationReportFuncExpr) iCallable() {} func (*JSONPrettyExpr) iCallable() {} @@ -3310,8 +3295,7 @@ func (*RegexpInstrExpr) iCallable() {} func (*RegexpLikeExpr) iCallable() {} func (*RegexpReplaceExpr) iCallable() {} func (*RegexpSubstrExpr) iCallable() {} -func (*DateAddExpr) iCallable() {} -func (*DateSubExpr) iCallable() {} +func (*IntervalDateExpr) iCallable() {} func (*ArgumentLessWindowExpr) iCallable() {} func (*FirstOrLastValueExpr) iCallable() {} func (*NtileExpr) iCallable() {} @@ -3366,6 +3350,7 @@ func (stdS *StdSamp) GetArg() Expr { return stdS.Arg } func (varP *VarPop) GetArg() Expr { return varP.Arg } func (varS *VarSamp) GetArg() Expr { return varS.Arg } func (variance *Variance) GetArg() Expr { return variance.Arg } +func (av *AnyValue) GetArg() Expr { return av.Arg } func (sum *Sum) GetArgs() Exprs { return Exprs{sum.Arg} } func (min *Min) GetArgs() Exprs { return Exprs{min.Arg} } @@ -3384,42 +3369,40 @@ func (stdS *StdSamp) GetArgs() Exprs { return Exprs{stdS.Arg} } func (varP *VarPop) GetArgs() Exprs { return Exprs{varP.Arg} } func (varS *VarSamp) GetArgs() Exprs { return Exprs{varS.Arg} } func (variance *Variance) GetArgs() Exprs { return Exprs{variance.Arg} } +func (av *AnyValue) GetArgs() Exprs { return Exprs{av.Arg} } func (sum *Sum) IsDistinct() bool { return sum.Distinct } func (min *Min) IsDistinct() bool { return min.Distinct } func (max *Max) IsDistinct() bool { return max.Distinct } func (avg *Avg) IsDistinct() bool { return avg.Distinct } -func (cStar *CountStar) IsDistinct() bool { return false } func (count *Count) IsDistinct() bool { return count.Distinct } func (grpConcat *GroupConcatExpr) IsDistinct() bool { return grpConcat.Distinct } -func (bAnd *BitAnd) IsDistinct() bool { return false } -func (bOr *BitOr) IsDistinct() bool { return false } -func (bXor *BitXor) IsDistinct() bool { return false } -func (std *Std) IsDistinct() bool { return false } -func (stdD *StdDev) IsDistinct() bool { return false } -func (stdP *StdPop) IsDistinct() bool { return false } -func (stdS *StdSamp) IsDistinct() bool { return false } -func (varP *VarPop) IsDistinct() bool { return false } -func (varS *VarSamp) IsDistinct() bool { return false } -func (variance *Variance) IsDistinct() bool { return false } - -func (sum *Sum) AggrName() string { return "sum" } -func (min *Min) AggrName() string { return "min" } -func (max *Max) AggrName() string { return "max" } -func (avg *Avg) AggrName() string { return "avg" } -func (cStar *CountStar) AggrName() string { return "count" } -func (count *Count) AggrName() string { return "count" } -func (grpConcat *GroupConcatExpr) AggrName() string { return "group_concat" } -func (bAnd *BitAnd) AggrName() string { return "bit_and" } -func (bOr *BitOr) AggrName() string { return "bit_or" } -func (bXor *BitXor) AggrName() string { return "bit_xor" } -func (std *Std) AggrName() string { return "std" } -func (stdD *StdDev) AggrName() string { return "stddev" } -func (stdP *StdPop) AggrName() string { return "stddev_pop" } -func (stdS *StdSamp) AggrName() string { return "stddev_samp" } -func (varP *VarPop) AggrName() string { return "var_pop" } -func (varS *VarSamp) AggrName() string { return "var_samp" } -func (variance *Variance) AggrName() string { return "variance" } + +func (sum *Sum) SetDistinct(distinct bool) { sum.Distinct = distinct } +func (min *Min) SetDistinct(distinct bool) { min.Distinct = distinct } +func (max *Max) SetDistinct(distinct bool) { max.Distinct = distinct } +func (avg *Avg) SetDistinct(distinct bool) { avg.Distinct = distinct } +func (count *Count) SetDistinct(distinct bool) { count.Distinct = distinct } +func (grpConcat *GroupConcatExpr) SetDistinct(distinct bool) { grpConcat.Distinct = distinct } + +func (*Sum) AggrName() string { return "sum" } +func (*Min) AggrName() string { return "min" } +func (*Max) AggrName() string { return "max" } +func (*Avg) AggrName() string { return "avg" } +func (*CountStar) AggrName() string { return "count" } +func (*Count) AggrName() string { return "count" } +func (*GroupConcatExpr) AggrName() string { return "group_concat" } +func (*BitAnd) AggrName() string { return "bit_and" } +func (*BitOr) AggrName() string { return "bit_or" } +func (*BitXor) AggrName() string { return "bit_xor" } +func (*Std) AggrName() string { return "std" } +func (*StdDev) AggrName() string { return "stddev" } +func (*StdPop) AggrName() string { return "stddev_pop" } +func (*StdSamp) AggrName() string { return "stddev_samp" } +func (*VarPop) AggrName() string { return "var_pop" } +func (*VarSamp) AggrName() string { return "var_samp" } +func (*Variance) AggrName() string { return "variance" } +func (*AnyValue) AggrName() string { return "any_value" } // Exprs represents a list of value expressions. // It's not a valid expression because it's not parenthesized. diff --git a/go/vt/sqlparser/ast_clone.go b/go/vt/sqlparser/ast_clone.go index f7d287fdb55..11612fba60c 100644 --- a/go/vt/sqlparser/ast_clone.go +++ b/go/vt/sqlparser/ast_clone.go @@ -53,8 +53,12 @@ func CloneSQLNode(in SQLNode) SQLNode { return CloneRefOfAlterView(in) case *AlterVschema: return CloneRefOfAlterVschema(in) + case *Analyze: + return CloneRefOfAnalyze(in) case *AndExpr: return CloneRefOfAndExpr(in) + case *AnyValue: + return CloneRefOfAnyValue(in) case *Argument: return CloneRefOfArgument(in) case *ArgumentLessWindowExpr: @@ -129,10 +133,6 @@ func CloneSQLNode(in SQLNode) SQLNode { return CloneRefOfCreateView(in) case *CurTimeFuncExpr: return CloneRefOfCurTimeFuncExpr(in) - case *DateAddExpr: - return CloneRefOfDateAddExpr(in) - case *DateSubExpr: - return CloneRefOfDateSubExpr(in) case *DeallocateStmt: return CloneRefOfDeallocateStmt(in) case *Default: @@ -167,8 +167,6 @@ func CloneSQLNode(in SQLNode) SQLNode { return CloneRefOfExtractFuncExpr(in) case *ExtractValueExpr: return CloneRefOfExtractValueExpr(in) - case *ExtractedSubquery: - return CloneRefOfExtractedSubquery(in) case *FieldsClause: return CloneRefOfFieldsClause(in) case *FirstOrLastValueExpr: @@ -229,6 +227,8 @@ func CloneSQLNode(in SQLNode) SQLNode { return CloneRefOfInsert(in) case *InsertExpr: return CloneRefOfInsertExpr(in) + case *IntervalDateExpr: + return CloneRefOfIntervalDateExpr(in) case *IntervalFuncExpr: return CloneRefOfIntervalFuncExpr(in) case *IntroducerExpr: @@ -289,6 +289,8 @@ func CloneSQLNode(in SQLNode) SQLNode { return CloneRefOfJtOnResponse(in) case *KeyState: return CloneRefOfKeyState(in) + case *Kill: + return CloneRefOfKill(in) case *LagLeadExpr: return CloneRefOfLagLeadExpr(in) case *Limit: @@ -365,8 +367,6 @@ func CloneSQLNode(in SQLNode) SQLNode { return CloneRefOfOrderByOption(in) case *OtherAdmin: return CloneRefOfOtherAdmin(in) - case *OtherRead: - return CloneRefOfOtherRead(in) case *OverClause: return CloneRefOfOverClause(in) case *ParenTableExpr: @@ -499,8 +499,8 @@ func CloneSQLNode(in SQLNode) SQLNode { return CloneRefOfTableSpec(in) case *TablespaceOperation: return CloneRefOfTablespaceOperation(in) - case *TimestampFuncExpr: - return CloneRefOfTimestampFuncExpr(in) + case *TimestampDiffExpr: + return CloneRefOfTimestampDiffExpr(in) case *TrimFuncExpr: return CloneRefOfTrimFuncExpr(in) case *TruncateTable: @@ -726,6 +726,16 @@ func CloneRefOfAlterVschema(n *AlterVschema) *AlterVschema { return &out } +// CloneRefOfAnalyze creates a deep clone of the input. +func CloneRefOfAnalyze(n *Analyze) *Analyze { + if n == nil { + return nil + } + out := *n + out.Table = CloneTableName(n.Table) + return &out +} + // CloneRefOfAndExpr creates a deep clone of the input. func CloneRefOfAndExpr(n *AndExpr) *AndExpr { if n == nil { @@ -737,6 +747,16 @@ func CloneRefOfAndExpr(n *AndExpr) *AndExpr { return &out } +// CloneRefOfAnyValue creates a deep clone of the input. +func CloneRefOfAnyValue(n *AnyValue) *AnyValue { + if n == nil { + return nil + } + out := *n + out.Arg = CloneExpr(n.Arg) + return &out +} + // CloneRefOfArgument creates a deep clone of the input. func CloneRefOfArgument(n *Argument) *Argument { if n == nil { @@ -1124,28 +1144,6 @@ func CloneRefOfCurTimeFuncExpr(n *CurTimeFuncExpr) *CurTimeFuncExpr { return &out } -// CloneRefOfDateAddExpr creates a deep clone of the input. -func CloneRefOfDateAddExpr(n *DateAddExpr) *DateAddExpr { - if n == nil { - return nil - } - out := *n - out.Date = CloneExpr(n.Date) - out.Expr = CloneExpr(n.Expr) - return &out -} - -// CloneRefOfDateSubExpr creates a deep clone of the input. -func CloneRefOfDateSubExpr(n *DateSubExpr) *DateSubExpr { - if n == nil { - return nil - } - out := *n - out.Date = CloneExpr(n.Date) - out.Expr = CloneExpr(n.Expr) - return &out -} - // CloneRefOfDeallocateStmt creates a deep clone of the input. func CloneRefOfDeallocateStmt(n *DeallocateStmt) *DeallocateStmt { if n == nil { @@ -1331,19 +1329,6 @@ func CloneRefOfExtractValueExpr(n *ExtractValueExpr) *ExtractValueExpr { return &out } -// CloneRefOfExtractedSubquery creates a deep clone of the input. -func CloneRefOfExtractedSubquery(n *ExtractedSubquery) *ExtractedSubquery { - if n == nil { - return nil - } - out := *n - out.Original = CloneExpr(n.Original) - out.Subquery = CloneRefOfSubquery(n.Subquery) - out.OtherSide = CloneExpr(n.OtherSide) - out.alternative = CloneExpr(n.alternative) - return &out -} - // CloneRefOfFieldsClause creates a deep clone of the input. func CloneRefOfFieldsClause(n *FieldsClause) *FieldsClause { if n == nil { @@ -1674,6 +1659,17 @@ func CloneRefOfInsertExpr(n *InsertExpr) *InsertExpr { return &out } +// CloneRefOfIntervalDateExpr creates a deep clone of the input. +func CloneRefOfIntervalDateExpr(n *IntervalDateExpr) *IntervalDateExpr { + if n == nil { + return nil + } + out := *n + out.Date = CloneExpr(n.Date) + out.Interval = CloneExpr(n.Interval) + return &out +} + // CloneRefOfIntervalFuncExpr creates a deep clone of the input. func CloneRefOfIntervalFuncExpr(n *IntervalFuncExpr) *IntervalFuncExpr { if n == nil { @@ -2004,6 +2000,15 @@ func CloneRefOfKeyState(n *KeyState) *KeyState { return &out } +// CloneRefOfKill creates a deep clone of the input. +func CloneRefOfKill(n *Kill) *Kill { + if n == nil { + return nil + } + out := *n + return &out +} + // CloneRefOfLagLeadExpr creates a deep clone of the input. func CloneRefOfLagLeadExpr(n *LagLeadExpr) *LagLeadExpr { if n == nil { @@ -2385,15 +2390,6 @@ func CloneRefOfOtherAdmin(n *OtherAdmin) *OtherAdmin { return &out } -// CloneRefOfOtherRead creates a deep clone of the input. -func CloneRefOfOtherRead(n *OtherRead) *OtherRead { - if n == nil { - return nil - } - out := *n - return &out -} - // CloneRefOfOverClause creates a deep clone of the input. func CloneRefOfOverClause(n *OverClause) *OverClause { if n == nil { @@ -3112,8 +3108,8 @@ func CloneRefOfTablespaceOperation(n *TablespaceOperation) *TablespaceOperation return &out } -// CloneRefOfTimestampFuncExpr creates a deep clone of the input. -func CloneRefOfTimestampFuncExpr(n *TimestampFuncExpr) *TimestampFuncExpr { +// CloneRefOfTimestampDiffExpr creates a deep clone of the input. +func CloneRefOfTimestampDiffExpr(n *TimestampDiffExpr) *TimestampDiffExpr { if n == nil { return nil } @@ -3459,6 +3455,8 @@ func CloneAggrFunc(in AggrFunc) AggrFunc { return nil } switch in := in.(type) { + case *AnyValue: + return CloneRefOfAnyValue(in) case *Avg: return CloneRefOfAvg(in) case *BitAnd: @@ -3561,6 +3559,8 @@ func CloneCallable(in Callable) Callable { return nil } switch in := in.(type) { + case *AnyValue: + return CloneRefOfAnyValue(in) case *ArgumentLessWindowExpr: return CloneRefOfArgumentLessWindowExpr(in) case *Avg: @@ -3577,10 +3577,6 @@ func CloneCallable(in Callable) Callable { return CloneRefOfCountStar(in) case *CurTimeFuncExpr: return CloneRefOfCurTimeFuncExpr(in) - case *DateAddExpr: - return CloneRefOfDateAddExpr(in) - case *DateSubExpr: - return CloneRefOfDateSubExpr(in) case *ExtractFuncExpr: return CloneRefOfExtractFuncExpr(in) case *ExtractValueExpr: @@ -3615,6 +3611,8 @@ func CloneCallable(in Callable) Callable { return CloneRefOfGroupConcatExpr(in) case *InsertExpr: return CloneRefOfInsertExpr(in) + case *IntervalDateExpr: + return CloneRefOfIntervalDateExpr(in) case *IntervalFuncExpr: return CloneRefOfIntervalFuncExpr(in) case *JSONArrayExpr: @@ -3707,8 +3705,8 @@ func CloneCallable(in Callable) Callable { return CloneRefOfSubstrExpr(in) case *Sum: return CloneRefOfSum(in) - case *TimestampFuncExpr: - return CloneRefOfTimestampFuncExpr(in) + case *TimestampDiffExpr: + return CloneRefOfTimestampDiffExpr(in) case *TrimFuncExpr: return CloneRefOfTrimFuncExpr(in) case *UpdateXMLExpr: @@ -3827,6 +3825,8 @@ func CloneExpr(in Expr) Expr { switch in := in.(type) { case *AndExpr: return CloneRefOfAndExpr(in) + case *AnyValue: + return CloneRefOfAnyValue(in) case *Argument: return CloneRefOfArgument(in) case *ArgumentLessWindowExpr: @@ -3869,10 +3869,6 @@ func CloneExpr(in Expr) Expr { return CloneRefOfCountStar(in) case *CurTimeFuncExpr: return CloneRefOfCurTimeFuncExpr(in) - case *DateAddExpr: - return CloneRefOfDateAddExpr(in) - case *DateSubExpr: - return CloneRefOfDateSubExpr(in) case *Default: return CloneRefOfDefault(in) case *ExistsExpr: @@ -3881,8 +3877,6 @@ func CloneExpr(in Expr) Expr { return CloneRefOfExtractFuncExpr(in) case *ExtractValueExpr: return CloneRefOfExtractValueExpr(in) - case *ExtractedSubquery: - return CloneRefOfExtractedSubquery(in) case *FirstOrLastValueExpr: return CloneRefOfFirstOrLastValueExpr(in) case *FuncExpr: @@ -3913,6 +3907,8 @@ func CloneExpr(in Expr) Expr { return CloneRefOfGroupConcatExpr(in) case *InsertExpr: return CloneRefOfInsertExpr(in) + case *IntervalDateExpr: + return CloneRefOfIntervalDateExpr(in) case *IntervalFuncExpr: return CloneRefOfIntervalFuncExpr(in) case *IntroducerExpr: @@ -4033,8 +4029,8 @@ func CloneExpr(in Expr) Expr { return CloneRefOfSubstrExpr(in) case *Sum: return CloneRefOfSum(in) - case *TimestampFuncExpr: - return CloneRefOfTimestampFuncExpr(in) + case *TimestampDiffExpr: + return CloneRefOfTimestampDiffExpr(in) case *TrimFuncExpr: return CloneRefOfTrimFuncExpr(in) case *UnaryExpr: @@ -4165,6 +4161,8 @@ func CloneStatement(in Statement) Statement { return CloneRefOfAlterView(in) case *AlterVschema: return CloneRefOfAlterVschema(in) + case *Analyze: + return CloneRefOfAnalyze(in) case *Begin: return CloneRefOfBegin(in) case *CallProc: @@ -4199,6 +4197,8 @@ func CloneStatement(in Statement) Statement { return CloneRefOfFlush(in) case *Insert: return CloneRefOfInsert(in) + case *Kill: + return CloneRefOfKill(in) case *Load: return CloneRefOfLoad(in) case *LoadDataStmt: @@ -4207,8 +4207,6 @@ func CloneStatement(in Statement) Statement { return CloneRefOfLockTables(in) case *OtherAdmin: return CloneRefOfOtherAdmin(in) - case *OtherRead: - return CloneRefOfOtherRead(in) case *PrepareStmt: return CloneRefOfPrepareStmt(in) case *PurgeBinaryLogs: diff --git a/go/vt/sqlparser/ast_copy_on_rewrite.go b/go/vt/sqlparser/ast_copy_on_rewrite.go index fa5c256a885..aeaa342eced 100644 --- a/go/vt/sqlparser/ast_copy_on_rewrite.go +++ b/go/vt/sqlparser/ast_copy_on_rewrite.go @@ -52,8 +52,12 @@ func (c *cow) copyOnRewriteSQLNode(n SQLNode, parent SQLNode) (out SQLNode, chan return c.copyOnRewriteRefOfAlterView(n, parent) case *AlterVschema: return c.copyOnRewriteRefOfAlterVschema(n, parent) + case *Analyze: + return c.copyOnRewriteRefOfAnalyze(n, parent) case *AndExpr: return c.copyOnRewriteRefOfAndExpr(n, parent) + case *AnyValue: + return c.copyOnRewriteRefOfAnyValue(n, parent) case *Argument: return c.copyOnRewriteRefOfArgument(n, parent) case *ArgumentLessWindowExpr: @@ -128,10 +132,6 @@ func (c *cow) copyOnRewriteSQLNode(n SQLNode, parent SQLNode) (out SQLNode, chan return c.copyOnRewriteRefOfCreateView(n, parent) case *CurTimeFuncExpr: return c.copyOnRewriteRefOfCurTimeFuncExpr(n, parent) - case *DateAddExpr: - return c.copyOnRewriteRefOfDateAddExpr(n, parent) - case *DateSubExpr: - return c.copyOnRewriteRefOfDateSubExpr(n, parent) case *DeallocateStmt: return c.copyOnRewriteRefOfDeallocateStmt(n, parent) case *Default: @@ -166,8 +166,6 @@ func (c *cow) copyOnRewriteSQLNode(n SQLNode, parent SQLNode) (out SQLNode, chan return c.copyOnRewriteRefOfExtractFuncExpr(n, parent) case *ExtractValueExpr: return c.copyOnRewriteRefOfExtractValueExpr(n, parent) - case *ExtractedSubquery: - return c.copyOnRewriteRefOfExtractedSubquery(n, parent) case *FieldsClause: return c.copyOnRewriteRefOfFieldsClause(n, parent) case *FirstOrLastValueExpr: @@ -228,6 +226,8 @@ func (c *cow) copyOnRewriteSQLNode(n SQLNode, parent SQLNode) (out SQLNode, chan return c.copyOnRewriteRefOfInsert(n, parent) case *InsertExpr: return c.copyOnRewriteRefOfInsertExpr(n, parent) + case *IntervalDateExpr: + return c.copyOnRewriteRefOfIntervalDateExpr(n, parent) case *IntervalFuncExpr: return c.copyOnRewriteRefOfIntervalFuncExpr(n, parent) case *IntroducerExpr: @@ -288,6 +288,8 @@ func (c *cow) copyOnRewriteSQLNode(n SQLNode, parent SQLNode) (out SQLNode, chan return c.copyOnRewriteRefOfJtOnResponse(n, parent) case *KeyState: return c.copyOnRewriteRefOfKeyState(n, parent) + case *Kill: + return c.copyOnRewriteRefOfKill(n, parent) case *LagLeadExpr: return c.copyOnRewriteRefOfLagLeadExpr(n, parent) case *Limit: @@ -364,8 +366,6 @@ func (c *cow) copyOnRewriteSQLNode(n SQLNode, parent SQLNode) (out SQLNode, chan return c.copyOnRewriteRefOfOrderByOption(n, parent) case *OtherAdmin: return c.copyOnRewriteRefOfOtherAdmin(n, parent) - case *OtherRead: - return c.copyOnRewriteRefOfOtherRead(n, parent) case *OverClause: return c.copyOnRewriteRefOfOverClause(n, parent) case *ParenTableExpr: @@ -498,8 +498,8 @@ func (c *cow) copyOnRewriteSQLNode(n SQLNode, parent SQLNode) (out SQLNode, chan return c.copyOnRewriteRefOfTableSpec(n, parent) case *TablespaceOperation: return c.copyOnRewriteRefOfTablespaceOperation(n, parent) - case *TimestampFuncExpr: - return c.copyOnRewriteRefOfTimestampFuncExpr(n, parent) + case *TimestampDiffExpr: + return c.copyOnRewriteRefOfTimestampDiffExpr(n, parent) case *TrimFuncExpr: return c.copyOnRewriteRefOfTrimFuncExpr(n, parent) case *TruncateTable: @@ -923,6 +923,28 @@ func (c *cow) copyOnRewriteRefOfAlterVschema(n *AlterVschema, parent SQLNode) (o } return } +func (c *cow) copyOnRewriteRefOfAnalyze(n *Analyze, parent SQLNode) (out SQLNode, changed bool) { + if n == nil || c.cursor.stop { + return n, false + } + out = n + if c.pre == nil || c.pre(n, parent) { + _Table, changedTable := c.copyOnRewriteTableName(n.Table, n) + if changedTable { + res := *n + res.Table, _ = _Table.(TableName) + out = &res + if c.cloned != nil { + c.cloned(n, out) + } + changed = true + } + } + if c.post != nil { + out, changed = c.postVisit(out, parent, changed) + } + return +} func (c *cow) copyOnRewriteRefOfAndExpr(n *AndExpr, parent SQLNode) (out SQLNode, changed bool) { if n == nil || c.cursor.stop { return n, false @@ -947,6 +969,28 @@ func (c *cow) copyOnRewriteRefOfAndExpr(n *AndExpr, parent SQLNode) (out SQLNode } return } +func (c *cow) copyOnRewriteRefOfAnyValue(n *AnyValue, parent SQLNode) (out SQLNode, changed bool) { + if n == nil || c.cursor.stop { + return n, false + } + out = n + if c.pre == nil || c.pre(n, parent) { + _Arg, changedArg := c.copyOnRewriteExpr(n.Arg, n) + if changedArg { + res := *n + res.Arg, _ = _Arg.(Expr) + out = &res + if c.cloned != nil { + c.cloned(n, out) + } + changed = true + } + } + if c.post != nil { + out, changed = c.postVisit(out, parent, changed) + } + return +} func (c *cow) copyOnRewriteRefOfArgument(n *Argument, parent SQLNode) (out SQLNode, changed bool) { if n == nil || c.cursor.stop { return n, false @@ -1756,54 +1800,6 @@ func (c *cow) copyOnRewriteRefOfCurTimeFuncExpr(n *CurTimeFuncExpr, parent SQLNo } return } -func (c *cow) copyOnRewriteRefOfDateAddExpr(n *DateAddExpr, parent SQLNode) (out SQLNode, changed bool) { - if n == nil || c.cursor.stop { - return n, false - } - out = n - if c.pre == nil || c.pre(n, parent) { - _Date, changedDate := c.copyOnRewriteExpr(n.Date, n) - _Expr, changedExpr := c.copyOnRewriteExpr(n.Expr, n) - if changedDate || changedExpr { - res := *n - res.Date, _ = _Date.(Expr) - res.Expr, _ = _Expr.(Expr) - out = &res - if c.cloned != nil { - c.cloned(n, out) - } - changed = true - } - } - if c.post != nil { - out, changed = c.postVisit(out, parent, changed) - } - return -} -func (c *cow) copyOnRewriteRefOfDateSubExpr(n *DateSubExpr, parent SQLNode) (out SQLNode, changed bool) { - if n == nil || c.cursor.stop { - return n, false - } - out = n - if c.pre == nil || c.pre(n, parent) { - _Date, changedDate := c.copyOnRewriteExpr(n.Date, n) - _Expr, changedExpr := c.copyOnRewriteExpr(n.Expr, n) - if changedDate || changedExpr { - res := *n - res.Date, _ = _Date.(Expr) - res.Expr, _ = _Expr.(Expr) - out = &res - if c.cloned != nil { - c.cloned(n, out) - } - changed = true - } - } - if c.post != nil { - out, changed = c.postVisit(out, parent, changed) - } - return -} func (c *cow) copyOnRewriteRefOfDeallocateStmt(n *DeallocateStmt, parent SQLNode) (out SQLNode, changed bool) { if n == nil || c.cursor.stop { return n, false @@ -2197,34 +2193,6 @@ func (c *cow) copyOnRewriteRefOfExtractValueExpr(n *ExtractValueExpr, parent SQL } return } -func (c *cow) copyOnRewriteRefOfExtractedSubquery(n *ExtractedSubquery, parent SQLNode) (out SQLNode, changed bool) { - if n == nil || c.cursor.stop { - return n, false - } - out = n - if c.pre == nil || c.pre(n, parent) { - _Original, changedOriginal := c.copyOnRewriteExpr(n.Original, n) - _Subquery, changedSubquery := c.copyOnRewriteRefOfSubquery(n.Subquery, n) - _OtherSide, changedOtherSide := c.copyOnRewriteExpr(n.OtherSide, n) - _alternative, changedalternative := c.copyOnRewriteExpr(n.alternative, n) - if changedOriginal || changedSubquery || changedOtherSide || changedalternative { - res := *n - res.Original, _ = _Original.(Expr) - res.Subquery, _ = _Subquery.(*Subquery) - res.OtherSide, _ = _OtherSide.(Expr) - res.alternative, _ = _alternative.(Expr) - out = &res - if c.cloned != nil { - c.cloned(n, out) - } - changed = true - } - } - if c.post != nil { - out, changed = c.postVisit(out, parent, changed) - } - return -} func (c *cow) copyOnRewriteRefOfFieldsClause(n *FieldsClause, parent SQLNode) (out SQLNode, changed bool) { if n == nil || c.cursor.stop { return n, false @@ -2919,6 +2887,30 @@ func (c *cow) copyOnRewriteRefOfInsertExpr(n *InsertExpr, parent SQLNode) (out S } return } +func (c *cow) copyOnRewriteRefOfIntervalDateExpr(n *IntervalDateExpr, parent SQLNode) (out SQLNode, changed bool) { + if n == nil || c.cursor.stop { + return n, false + } + out = n + if c.pre == nil || c.pre(n, parent) { + _Date, changedDate := c.copyOnRewriteExpr(n.Date, n) + _Interval, changedInterval := c.copyOnRewriteExpr(n.Interval, n) + if changedDate || changedInterval { + res := *n + res.Date, _ = _Date.(Expr) + res.Interval, _ = _Interval.(Expr) + out = &res + if c.cloned != nil { + c.cloned(n, out) + } + changed = true + } + } + if c.post != nil { + out, changed = c.postVisit(out, parent, changed) + } + return +} func (c *cow) copyOnRewriteRefOfIntervalFuncExpr(n *IntervalFuncExpr, parent SQLNode) (out SQLNode, changed bool) { if n == nil || c.cursor.stop { return n, false @@ -3673,6 +3665,18 @@ func (c *cow) copyOnRewriteRefOfKeyState(n *KeyState, parent SQLNode) (out SQLNo } return } +func (c *cow) copyOnRewriteRefOfKill(n *Kill, parent SQLNode) (out SQLNode, changed bool) { + if n == nil || c.cursor.stop { + return n, false + } + out = n + if c.pre == nil || c.pre(n, parent) { + } + if c.post != nil { + out, changed = c.postVisit(out, parent, changed) + } + return +} func (c *cow) copyOnRewriteRefOfLagLeadExpr(n *LagLeadExpr, parent SQLNode) (out SQLNode, changed bool) { if n == nil || c.cursor.stop { return n, false @@ -4448,18 +4452,6 @@ func (c *cow) copyOnRewriteRefOfOtherAdmin(n *OtherAdmin, parent SQLNode) (out S } return } -func (c *cow) copyOnRewriteRefOfOtherRead(n *OtherRead, parent SQLNode) (out SQLNode, changed bool) { - if n == nil || c.cursor.stop { - return n, false - } - out = n - if c.pre == nil || c.pre(n, parent) { - } - if c.post != nil { - out, changed = c.postVisit(out, parent, changed) - } - return -} func (c *cow) copyOnRewriteRefOfOverClause(n *OverClause, parent SQLNode) (out SQLNode, changed bool) { if n == nil || c.cursor.stop { return n, false @@ -5965,7 +5957,7 @@ func (c *cow) copyOnRewriteRefOfTablespaceOperation(n *TablespaceOperation, pare } return } -func (c *cow) copyOnRewriteRefOfTimestampFuncExpr(n *TimestampFuncExpr, parent SQLNode) (out SQLNode, changed bool) { +func (c *cow) copyOnRewriteRefOfTimestampDiffExpr(n *TimestampDiffExpr, parent SQLNode) (out SQLNode, changed bool) { if n == nil || c.cursor.stop { return n, false } @@ -6709,6 +6701,8 @@ func (c *cow) copyOnRewriteAggrFunc(n AggrFunc, parent SQLNode) (out SQLNode, ch return n, false } switch n := n.(type) { + case *AnyValue: + return c.copyOnRewriteRefOfAnyValue(n, parent) case *Avg: return c.copyOnRewriteRefOfAvg(n, parent) case *BitAnd: @@ -6807,6 +6801,8 @@ func (c *cow) copyOnRewriteCallable(n Callable, parent SQLNode) (out SQLNode, ch return n, false } switch n := n.(type) { + case *AnyValue: + return c.copyOnRewriteRefOfAnyValue(n, parent) case *ArgumentLessWindowExpr: return c.copyOnRewriteRefOfArgumentLessWindowExpr(n, parent) case *Avg: @@ -6823,10 +6819,6 @@ func (c *cow) copyOnRewriteCallable(n Callable, parent SQLNode) (out SQLNode, ch return c.copyOnRewriteRefOfCountStar(n, parent) case *CurTimeFuncExpr: return c.copyOnRewriteRefOfCurTimeFuncExpr(n, parent) - case *DateAddExpr: - return c.copyOnRewriteRefOfDateAddExpr(n, parent) - case *DateSubExpr: - return c.copyOnRewriteRefOfDateSubExpr(n, parent) case *ExtractFuncExpr: return c.copyOnRewriteRefOfExtractFuncExpr(n, parent) case *ExtractValueExpr: @@ -6861,6 +6853,8 @@ func (c *cow) copyOnRewriteCallable(n Callable, parent SQLNode) (out SQLNode, ch return c.copyOnRewriteRefOfGroupConcatExpr(n, parent) case *InsertExpr: return c.copyOnRewriteRefOfInsertExpr(n, parent) + case *IntervalDateExpr: + return c.copyOnRewriteRefOfIntervalDateExpr(n, parent) case *IntervalFuncExpr: return c.copyOnRewriteRefOfIntervalFuncExpr(n, parent) case *JSONArrayExpr: @@ -6953,8 +6947,8 @@ func (c *cow) copyOnRewriteCallable(n Callable, parent SQLNode) (out SQLNode, ch return c.copyOnRewriteRefOfSubstrExpr(n, parent) case *Sum: return c.copyOnRewriteRefOfSum(n, parent) - case *TimestampFuncExpr: - return c.copyOnRewriteRefOfTimestampFuncExpr(n, parent) + case *TimestampDiffExpr: + return c.copyOnRewriteRefOfTimestampDiffExpr(n, parent) case *TrimFuncExpr: return c.copyOnRewriteRefOfTrimFuncExpr(n, parent) case *UpdateXMLExpr: @@ -7061,6 +7055,8 @@ func (c *cow) copyOnRewriteExpr(n Expr, parent SQLNode) (out SQLNode, changed bo switch n := n.(type) { case *AndExpr: return c.copyOnRewriteRefOfAndExpr(n, parent) + case *AnyValue: + return c.copyOnRewriteRefOfAnyValue(n, parent) case *Argument: return c.copyOnRewriteRefOfArgument(n, parent) case *ArgumentLessWindowExpr: @@ -7103,10 +7099,6 @@ func (c *cow) copyOnRewriteExpr(n Expr, parent SQLNode) (out SQLNode, changed bo return c.copyOnRewriteRefOfCountStar(n, parent) case *CurTimeFuncExpr: return c.copyOnRewriteRefOfCurTimeFuncExpr(n, parent) - case *DateAddExpr: - return c.copyOnRewriteRefOfDateAddExpr(n, parent) - case *DateSubExpr: - return c.copyOnRewriteRefOfDateSubExpr(n, parent) case *Default: return c.copyOnRewriteRefOfDefault(n, parent) case *ExistsExpr: @@ -7115,8 +7107,6 @@ func (c *cow) copyOnRewriteExpr(n Expr, parent SQLNode) (out SQLNode, changed bo return c.copyOnRewriteRefOfExtractFuncExpr(n, parent) case *ExtractValueExpr: return c.copyOnRewriteRefOfExtractValueExpr(n, parent) - case *ExtractedSubquery: - return c.copyOnRewriteRefOfExtractedSubquery(n, parent) case *FirstOrLastValueExpr: return c.copyOnRewriteRefOfFirstOrLastValueExpr(n, parent) case *FuncExpr: @@ -7147,6 +7137,8 @@ func (c *cow) copyOnRewriteExpr(n Expr, parent SQLNode) (out SQLNode, changed bo return c.copyOnRewriteRefOfGroupConcatExpr(n, parent) case *InsertExpr: return c.copyOnRewriteRefOfInsertExpr(n, parent) + case *IntervalDateExpr: + return c.copyOnRewriteRefOfIntervalDateExpr(n, parent) case *IntervalFuncExpr: return c.copyOnRewriteRefOfIntervalFuncExpr(n, parent) case *IntroducerExpr: @@ -7267,8 +7259,8 @@ func (c *cow) copyOnRewriteExpr(n Expr, parent SQLNode) (out SQLNode, changed bo return c.copyOnRewriteRefOfSubstrExpr(n, parent) case *Sum: return c.copyOnRewriteRefOfSum(n, parent) - case *TimestampFuncExpr: - return c.copyOnRewriteRefOfTimestampFuncExpr(n, parent) + case *TimestampDiffExpr: + return c.copyOnRewriteRefOfTimestampDiffExpr(n, parent) case *TrimFuncExpr: return c.copyOnRewriteRefOfTrimFuncExpr(n, parent) case *UnaryExpr: @@ -7387,6 +7379,8 @@ func (c *cow) copyOnRewriteStatement(n Statement, parent SQLNode) (out SQLNode, return c.copyOnRewriteRefOfAlterView(n, parent) case *AlterVschema: return c.copyOnRewriteRefOfAlterVschema(n, parent) + case *Analyze: + return c.copyOnRewriteRefOfAnalyze(n, parent) case *Begin: return c.copyOnRewriteRefOfBegin(n, parent) case *CallProc: @@ -7421,6 +7415,8 @@ func (c *cow) copyOnRewriteStatement(n Statement, parent SQLNode) (out SQLNode, return c.copyOnRewriteRefOfFlush(n, parent) case *Insert: return c.copyOnRewriteRefOfInsert(n, parent) + case *Kill: + return c.copyOnRewriteRefOfKill(n, parent) case *Load: return c.copyOnRewriteRefOfLoad(n, parent) case *LoadDataStmt: @@ -7429,8 +7425,6 @@ func (c *cow) copyOnRewriteStatement(n Statement, parent SQLNode) (out SQLNode, return c.copyOnRewriteRefOfLockTables(n, parent) case *OtherAdmin: return c.copyOnRewriteRefOfOtherAdmin(n, parent) - case *OtherRead: - return c.copyOnRewriteRefOfOtherRead(n, parent) case *PrepareStmt: return c.copyOnRewriteRefOfPrepareStmt(n, parent) case *PurgeBinaryLogs: diff --git a/go/vt/sqlparser/ast_equals.go b/go/vt/sqlparser/ast_equals.go index 742f7327d45..332fde93ad1 100644 --- a/go/vt/sqlparser/ast_equals.go +++ b/go/vt/sqlparser/ast_equals.go @@ -116,12 +116,24 @@ func (cmp *Comparator) SQLNode(inA, inB SQLNode) bool { return false } return cmp.RefOfAlterVschema(a, b) + case *Analyze: + b, ok := inB.(*Analyze) + if !ok { + return false + } + return cmp.RefOfAnalyze(a, b) case *AndExpr: b, ok := inB.(*AndExpr) if !ok { return false } return cmp.RefOfAndExpr(a, b) + case *AnyValue: + b, ok := inB.(*AnyValue) + if !ok { + return false + } + return cmp.RefOfAnyValue(a, b) case *Argument: b, ok := inB.(*Argument) if !ok { @@ -344,18 +356,6 @@ func (cmp *Comparator) SQLNode(inA, inB SQLNode) bool { return false } return cmp.RefOfCurTimeFuncExpr(a, b) - case *DateAddExpr: - b, ok := inB.(*DateAddExpr) - if !ok { - return false - } - return cmp.RefOfDateAddExpr(a, b) - case *DateSubExpr: - b, ok := inB.(*DateSubExpr) - if !ok { - return false - } - return cmp.RefOfDateSubExpr(a, b) case *DeallocateStmt: b, ok := inB.(*DeallocateStmt) if !ok { @@ -458,12 +458,6 @@ func (cmp *Comparator) SQLNode(inA, inB SQLNode) bool { return false } return cmp.RefOfExtractValueExpr(a, b) - case *ExtractedSubquery: - b, ok := inB.(*ExtractedSubquery) - if !ok { - return false - } - return cmp.RefOfExtractedSubquery(a, b) case *FieldsClause: b, ok := inB.(*FieldsClause) if !ok { @@ -644,6 +638,12 @@ func (cmp *Comparator) SQLNode(inA, inB SQLNode) bool { return false } return cmp.RefOfInsertExpr(a, b) + case *IntervalDateExpr: + b, ok := inB.(*IntervalDateExpr) + if !ok { + return false + } + return cmp.RefOfIntervalDateExpr(a, b) case *IntervalFuncExpr: b, ok := inB.(*IntervalFuncExpr) if !ok { @@ -824,6 +824,12 @@ func (cmp *Comparator) SQLNode(inA, inB SQLNode) bool { return false } return cmp.RefOfKeyState(a, b) + case *Kill: + b, ok := inB.(*Kill) + if !ok { + return false + } + return cmp.RefOfKill(a, b) case *LagLeadExpr: b, ok := inB.(*LagLeadExpr) if !ok { @@ -1052,12 +1058,6 @@ func (cmp *Comparator) SQLNode(inA, inB SQLNode) bool { return false } return cmp.RefOfOtherAdmin(a, b) - case *OtherRead: - b, ok := inB.(*OtherRead) - if !ok { - return false - } - return cmp.RefOfOtherRead(a, b) case *OverClause: b, ok := inB.(*OverClause) if !ok { @@ -1454,12 +1454,12 @@ func (cmp *Comparator) SQLNode(inA, inB SQLNode) bool { return false } return cmp.RefOfTablespaceOperation(a, b) - case *TimestampFuncExpr: - b, ok := inB.(*TimestampFuncExpr) + case *TimestampDiffExpr: + b, ok := inB.(*TimestampDiffExpr) if !ok { return false } - return cmp.RefOfTimestampFuncExpr(a, b) + return cmp.RefOfTimestampDiffExpr(a, b) case *TrimFuncExpr: b, ok := inB.(*TrimFuncExpr) if !ok { @@ -1741,6 +1741,7 @@ func (cmp *Comparator) RefOfAlterColumn(a, b *AlterColumn) bool { return false } return a.DropDefault == b.DropDefault && + a.DefaultLiteral == b.DefaultLiteral && cmp.RefOfColName(a.Column, b.Column) && cmp.Expr(a.DefaultVal, b.DefaultVal) && cmp.RefOfBool(a.Invisible, b.Invisible) @@ -1836,6 +1837,18 @@ func (cmp *Comparator) RefOfAlterVschema(a, b *AlterVschema) bool { cmp.RefOfAutoIncSpec(a.AutoIncSpec, b.AutoIncSpec) } +// RefOfAnalyze does deep equals between the two objects. +func (cmp *Comparator) RefOfAnalyze(a, b *Analyze) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.IsLocal == b.IsLocal && + cmp.TableName(a.Table, b.Table) +} + // RefOfAndExpr does deep equals between the two objects. func (cmp *Comparator) RefOfAndExpr(a, b *AndExpr) bool { if a == b { @@ -1848,6 +1861,17 @@ func (cmp *Comparator) RefOfAndExpr(a, b *AndExpr) bool { cmp.Expr(a.Right, b.Right) } +// RefOfAnyValue does deep equals between the two objects. +func (cmp *Comparator) RefOfAnyValue(a, b *AnyValue) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return cmp.Expr(a.Arg, b.Arg) +} + // RefOfArgument does deep equals between the two objects. func (cmp *Comparator) RefOfArgument(a, b *Argument) bool { if a == b { @@ -2310,34 +2334,6 @@ func (cmp *Comparator) RefOfCurTimeFuncExpr(a, b *CurTimeFuncExpr) bool { cmp.IdentifierCI(a.Name, b.Name) } -// RefOfDateAddExpr does deep equals between the two objects. -func (cmp *Comparator) RefOfDateAddExpr(a, b *DateAddExpr) bool { - if a == b { - return true - } - if a == nil || b == nil { - return false - } - return a.Type == b.Type && - cmp.Expr(a.Date, b.Date) && - a.Unit == b.Unit && - cmp.Expr(a.Expr, b.Expr) -} - -// RefOfDateSubExpr does deep equals between the two objects. -func (cmp *Comparator) RefOfDateSubExpr(a, b *DateSubExpr) bool { - if a == b { - return true - } - if a == nil || b == nil { - return false - } - return a.Type == b.Type && - cmp.Expr(a.Date, b.Date) && - a.Unit == b.Unit && - cmp.Expr(a.Expr, b.Expr) -} - // RefOfDeallocateStmt does deep equals between the two objects. func (cmp *Comparator) RefOfDeallocateStmt(a, b *DeallocateStmt) bool { if a == b { @@ -2537,7 +2533,7 @@ func (cmp *Comparator) RefOfExtractFuncExpr(a, b *ExtractFuncExpr) bool { if a == nil || b == nil { return false } - return a.IntervalTypes == b.IntervalTypes && + return a.IntervalType == b.IntervalType && cmp.Expr(a.Expr, b.Expr) } @@ -2553,24 +2549,6 @@ func (cmp *Comparator) RefOfExtractValueExpr(a, b *ExtractValueExpr) bool { cmp.Expr(a.XPathExpr, b.XPathExpr) } -// RefOfExtractedSubquery does deep equals between the two objects. -func (cmp *Comparator) RefOfExtractedSubquery(a, b *ExtractedSubquery) bool { - if a == b { - return true - } - if a == nil || b == nil { - return false - } - return a.OpCode == b.OpCode && - a.Merged == b.Merged && - a.hasValuesArg == b.hasValuesArg && - a.argName == b.argName && - cmp.Expr(a.Original, b.Original) && - cmp.RefOfSubquery(a.Subquery, b.Subquery) && - cmp.Expr(a.OtherSide, b.OtherSide) && - cmp.Expr(a.alternative, b.alternative) -} - // RefOfFieldsClause does deep equals between the two objects. func (cmp *Comparator) RefOfFieldsClause(a, b *FieldsClause) bool { if a == b { @@ -2961,6 +2939,20 @@ func (cmp *Comparator) RefOfInsertExpr(a, b *InsertExpr) bool { cmp.Expr(a.NewStr, b.NewStr) } +// RefOfIntervalDateExpr does deep equals between the two objects. +func (cmp *Comparator) RefOfIntervalDateExpr(a, b *IntervalDateExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Syntax == b.Syntax && + cmp.Expr(a.Date, b.Date) && + cmp.Expr(a.Interval, b.Interval) && + a.Unit == b.Unit +} + // RefOfIntervalFuncExpr does deep equals between the two objects. func (cmp *Comparator) RefOfIntervalFuncExpr(a, b *IntervalFuncExpr) bool { if a == b { @@ -3329,6 +3321,18 @@ func (cmp *Comparator) RefOfKeyState(a, b *KeyState) bool { return a.Enable == b.Enable } +// RefOfKill does deep equals between the two objects. +func (cmp *Comparator) RefOfKill(a, b *Kill) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.ProcesslistID == b.ProcesslistID && + a.Type == b.Type +} + // RefOfLagLeadExpr does deep equals between the two objects. func (cmp *Comparator) RefOfLagLeadExpr(a, b *LagLeadExpr) bool { if a == b { @@ -3768,17 +3772,6 @@ func (cmp *Comparator) RefOfOtherAdmin(a, b *OtherAdmin) bool { return true } -// RefOfOtherRead does deep equals between the two objects. -func (cmp *Comparator) RefOfOtherRead(a, b *OtherRead) bool { - if a == b { - return true - } - if a == nil || b == nil { - return false - } - return true -} - // RefOfOverClause does deep equals between the two objects. func (cmp *Comparator) RefOfOverClause(a, b *OverClause) bool { if a == b { @@ -4606,18 +4599,17 @@ func (cmp *Comparator) RefOfTablespaceOperation(a, b *TablespaceOperation) bool return a.Import == b.Import } -// RefOfTimestampFuncExpr does deep equals between the two objects. -func (cmp *Comparator) RefOfTimestampFuncExpr(a, b *TimestampFuncExpr) bool { +// RefOfTimestampDiffExpr does deep equals between the two objects. +func (cmp *Comparator) RefOfTimestampDiffExpr(a, b *TimestampDiffExpr) bool { if a == b { return true } if a == nil || b == nil { return false } - return a.Name == b.Name && - a.Unit == b.Unit && - cmp.Expr(a.Expr1, b.Expr1) && - cmp.Expr(a.Expr2, b.Expr2) + return cmp.Expr(a.Expr1, b.Expr1) && + cmp.Expr(a.Expr2, b.Expr2) && + a.Unit == b.Unit } // RefOfTrimFuncExpr does deep equals between the two objects. @@ -5001,6 +4993,12 @@ func (cmp *Comparator) AggrFunc(inA, inB AggrFunc) bool { return false } switch a := inA.(type) { + case *AnyValue: + b, ok := inB.(*AnyValue) + if !ok { + return false + } + return cmp.RefOfAnyValue(a, b) case *Avg: b, ok := inB.(*Avg) if !ok { @@ -5265,6 +5263,12 @@ func (cmp *Comparator) Callable(inA, inB Callable) bool { return false } switch a := inA.(type) { + case *AnyValue: + b, ok := inB.(*AnyValue) + if !ok { + return false + } + return cmp.RefOfAnyValue(a, b) case *ArgumentLessWindowExpr: b, ok := inB.(*ArgumentLessWindowExpr) if !ok { @@ -5313,18 +5317,6 @@ func (cmp *Comparator) Callable(inA, inB Callable) bool { return false } return cmp.RefOfCurTimeFuncExpr(a, b) - case *DateAddExpr: - b, ok := inB.(*DateAddExpr) - if !ok { - return false - } - return cmp.RefOfDateAddExpr(a, b) - case *DateSubExpr: - b, ok := inB.(*DateSubExpr) - if !ok { - return false - } - return cmp.RefOfDateSubExpr(a, b) case *ExtractFuncExpr: b, ok := inB.(*ExtractFuncExpr) if !ok { @@ -5427,6 +5419,12 @@ func (cmp *Comparator) Callable(inA, inB Callable) bool { return false } return cmp.RefOfInsertExpr(a, b) + case *IntervalDateExpr: + b, ok := inB.(*IntervalDateExpr) + if !ok { + return false + } + return cmp.RefOfIntervalDateExpr(a, b) case *IntervalFuncExpr: b, ok := inB.(*IntervalFuncExpr) if !ok { @@ -5703,12 +5701,12 @@ func (cmp *Comparator) Callable(inA, inB Callable) bool { return false } return cmp.RefOfSum(a, b) - case *TimestampFuncExpr: - b, ok := inB.(*TimestampFuncExpr) + case *TimestampDiffExpr: + b, ok := inB.(*TimestampDiffExpr) if !ok { return false } - return cmp.RefOfTimestampFuncExpr(a, b) + return cmp.RefOfTimestampDiffExpr(a, b) case *TrimFuncExpr: b, ok := inB.(*TrimFuncExpr) if !ok { @@ -5937,6 +5935,12 @@ func (cmp *Comparator) Expr(inA, inB Expr) bool { return false } return cmp.RefOfAndExpr(a, b) + case *AnyValue: + b, ok := inB.(*AnyValue) + if !ok { + return false + } + return cmp.RefOfAnyValue(a, b) case *Argument: b, ok := inB.(*Argument) if !ok { @@ -6063,18 +6067,6 @@ func (cmp *Comparator) Expr(inA, inB Expr) bool { return false } return cmp.RefOfCurTimeFuncExpr(a, b) - case *DateAddExpr: - b, ok := inB.(*DateAddExpr) - if !ok { - return false - } - return cmp.RefOfDateAddExpr(a, b) - case *DateSubExpr: - b, ok := inB.(*DateSubExpr) - if !ok { - return false - } - return cmp.RefOfDateSubExpr(a, b) case *Default: b, ok := inB.(*Default) if !ok { @@ -6099,12 +6091,6 @@ func (cmp *Comparator) Expr(inA, inB Expr) bool { return false } return cmp.RefOfExtractValueExpr(a, b) - case *ExtractedSubquery: - b, ok := inB.(*ExtractedSubquery) - if !ok { - return false - } - return cmp.RefOfExtractedSubquery(a, b) case *FirstOrLastValueExpr: b, ok := inB.(*FirstOrLastValueExpr) if !ok { @@ -6195,6 +6181,12 @@ func (cmp *Comparator) Expr(inA, inB Expr) bool { return false } return cmp.RefOfInsertExpr(a, b) + case *IntervalDateExpr: + b, ok := inB.(*IntervalDateExpr) + if !ok { + return false + } + return cmp.RefOfIntervalDateExpr(a, b) case *IntervalFuncExpr: b, ok := inB.(*IntervalFuncExpr) if !ok { @@ -6555,12 +6547,12 @@ func (cmp *Comparator) Expr(inA, inB Expr) bool { return false } return cmp.RefOfSum(a, b) - case *TimestampFuncExpr: - b, ok := inB.(*TimestampFuncExpr) + case *TimestampDiffExpr: + b, ok := inB.(*TimestampDiffExpr) if !ok { return false } - return cmp.RefOfTimestampFuncExpr(a, b) + return cmp.RefOfTimestampDiffExpr(a, b) case *TrimFuncExpr: b, ok := inB.(*TrimFuncExpr) if !ok { @@ -6825,6 +6817,12 @@ func (cmp *Comparator) Statement(inA, inB Statement) bool { return false } return cmp.RefOfAlterVschema(a, b) + case *Analyze: + b, ok := inB.(*Analyze) + if !ok { + return false + } + return cmp.RefOfAnalyze(a, b) case *Begin: b, ok := inB.(*Begin) if !ok { @@ -6927,6 +6925,12 @@ func (cmp *Comparator) Statement(inA, inB Statement) bool { return false } return cmp.RefOfInsert(a, b) + case *Kill: + b, ok := inB.(*Kill) + if !ok { + return false + } + return cmp.RefOfKill(a, b) case *Load: b, ok := inB.(*Load) if !ok { @@ -6951,12 +6955,6 @@ func (cmp *Comparator) Statement(inA, inB Statement) bool { return false } return cmp.RefOfOtherAdmin(a, b) - case *OtherRead: - b, ok := inB.(*OtherRead) - if !ok { - return false - } - return cmp.RefOfOtherRead(a, b) case *PrepareStmt: b, ok := inB.(*PrepareStmt) if !ok { @@ -7232,6 +7230,7 @@ func (cmp *Comparator) RefOfColumnTypeOptions(a, b *ColumnTypeOptions) bool { return false } return a.Autoincrement == b.Autoincrement && + a.DefaultLiteral == b.DefaultLiteral && a.Collate == b.Collate && cmp.RefOfBool(a.Null, b.Null) && cmp.Expr(a.Default, b.Default) && diff --git a/go/vt/sqlparser/ast_format.go b/go/vt/sqlparser/ast_format.go index 8883f752351..1c02534e657 100644 --- a/go/vt/sqlparser/ast_format.go +++ b/go/vt/sqlparser/ast_format.go @@ -243,8 +243,12 @@ func (node *AlterVschema) Format(buf *TrackedBuffer) { buf.astPrintf(node, "alter vschema on %v drop vindex %v", node.Table, node.VindexSpec.Name) case AddSequenceDDLAction: buf.astPrintf(node, "alter vschema add sequence %v", node.Table) + case DropSequenceDDLAction: + buf.astPrintf(node, "alter vschema drop sequence %v", node.Table) case AddAutoIncDDLAction: buf.astPrintf(node, "alter vschema on %v add auto_increment %v", node.Table, node.AutoIncSpec) + case DropAutoIncDDLAction: + buf.astPrintf(node, "alter vschema on %v drop auto_increment %v", node.Table, node.AutoIncSpec) default: buf.astPrintf(node, "%s table %v", node.Action.ToString(), node.Table) } @@ -717,10 +721,10 @@ func (ct *ColumnType) Format(buf *TrackedBuffer) { } if ct.Options.Default != nil { buf.astPrintf(ct, " %s", keywordStrings[DEFAULT]) - if defaultRequiresParens(ct) { - buf.astPrintf(ct, " (%v)", ct.Options.Default) - } else { + if ct.Options.DefaultLiteral { buf.astPrintf(ct, " %v", ct.Options.Default) + } else { + buf.astPrintf(ct, " (%v)", ct.Options.Default) } } if ct.Options.OnUpdate != nil { @@ -1056,8 +1060,12 @@ func (node *CallProc) Format(buf *TrackedBuffer) { } // Format formats the node. -func (node *OtherRead) Format(buf *TrackedBuffer) { - buf.literal("otherread") +func (node *Analyze) Format(buf *TrackedBuffer) { + buf.literal("analyze ") + if node.IsLocal { + buf.literal("local ") + } + buf.astPrintf(node, "table %v", node.Table) } // Format formats the node. @@ -1392,13 +1400,13 @@ func (node *IntroducerExpr) Format(buf *TrackedBuffer) { } // Format formats the node. -func (node *TimestampFuncExpr) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%#s(%#s, %v, %v)", node.Name, node.Unit, node.Expr1, node.Expr2) +func (node *TimestampDiffExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "timestampdiff(%#s, %v, %v)", node.Unit.ToString(), node.Expr1, node.Expr2) } // Format formats the node. func (node *ExtractFuncExpr) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "extract(%#s from %v)", node.IntervalTypes.ToString(), node.Expr) + buf.astPrintf(node, "extract(%#s from %v)", node.IntervalType.ToString(), node.Expr) } // Format formats the node @@ -1459,40 +1467,24 @@ func (node *RegexpSubstrExpr) Format(buf *TrackedBuffer) { } // Format formats the node -func (node *DateAddExpr) Format(buf *TrackedBuffer) { - switch node.Type { - case AdddateType: - buf.astPrintf(node, "adddate(%v, ", node.Date) - if node.Unit == IntervalUnknown { - buf.astPrintf(node, "%v", node.Expr) - } else { - buf.astPrintf(node, "interval %v %#s", node.Expr, node.Unit.ToString()) - } - buf.WriteByte(')') - case DateAddType: - buf.astPrintf(node, "date_add(%v, interval %v %#s)", node.Date, node.Expr, node.Unit.ToString()) - case PlusIntervalLeftType: - buf.astPrintf(node, "interval %v %#s + %v", node.Expr, node.Unit.ToString(), node.Date) - case PlusIntervalRightType: - buf.astPrintf(node, "%v + interval %v %#s", node.Date, node.Expr, node.Unit.ToString()) - } -} - -// Format formats the node -func (node *DateSubExpr) Format(buf *TrackedBuffer) { - switch node.Type { - case SubdateType: - buf.astPrintf(node, "subdate(%v, ", node.Date) - if node.Unit == IntervalUnknown { - buf.astPrintf(node, "%v", node.Expr) - } else { - buf.astPrintf(node, "interval %v %#s", node.Expr, node.Unit.ToString()) +func (node *IntervalDateExpr) Format(buf *TrackedBuffer) { + switch node.Syntax { + case IntervalDateExprAdddate, IntervalDateExprSubdate: + if node.Unit == IntervalNone { + buf.astPrintf(node, "%s(%v, %v)", node.FnName(), node.Date, node.Interval) + return } - buf.WriteByte(')') - case DateSubType: - buf.astPrintf(node, "date_sub(%v, interval %v %#s)", node.Date, node.Expr, node.Unit.ToString()) - case MinusIntervalRightType: - buf.astPrintf(node, "%v - interval %v %#s", node.Date, node.Expr, node.Unit.ToString()) + fallthrough + case IntervalDateExprDateAdd, IntervalDateExprDateSub: + buf.astPrintf(node, "%s(%v, interval %v %#s)", node.FnName(), node.Date, node.Interval, node.Unit.ToString()) + case IntervalDateExprBinaryAdd: + buf.astPrintf(node, "%l + interval %r %#s", node.Date, node.Interval, node.Unit.ToString()) + case IntervalDateExprBinaryAddLeft: + buf.astPrintf(node, "interval %l %#s + %r", node.Interval, node.Unit.ToString(), node.Date) + case IntervalDateExprBinarySub: + buf.astPrintf(node, "%l - interval %r %#s", node.Date, node.Interval, node.Unit.ToString()) + case IntervalDateExprTimestampadd: + buf.astPrintf(node, "timestampadd(%#s, %v, %v)", node.Unit.ToString(), node.Interval, node.Date) } } @@ -1644,7 +1636,7 @@ func (node *FromFirstLastClause) Format(buf *TrackedBuffer) { // Format formats the node func (node *FramePoint) Format(buf *TrackedBuffer) { if node.Expr != nil { - if node.Unit != IntervalUnknown { + if node.Unit != IntervalNone { buf.astPrintf(node, " interval %v %#s", node.Expr, node.Unit.ToString()) } else { buf.astPrintf(node, " %v", node.Expr) @@ -2266,7 +2258,11 @@ func (node *AlterColumn) Format(buf *TrackedBuffer) { if node.DropDefault { buf.astPrintf(node, " drop default") } else if node.DefaultVal != nil { - buf.astPrintf(node, " set default %v", node.DefaultVal) + if node.DefaultLiteral { + buf.astPrintf(node, " set default %v", node.DefaultVal) + } else { + buf.astPrintf(node, " set default (%v)", node.DefaultVal) + } } if node.Invisible != nil { if *node.Invisible { @@ -2430,14 +2426,6 @@ func (node *RenameTable) Format(buf *TrackedBuffer) { } } -// Format formats the node. -// If an extracted subquery is still in the AST when we print it, -// it will be formatted as if the subquery has been extracted, and instead -// show up like argument comparisons -func (node *ExtractedSubquery) Format(buf *TrackedBuffer) { - node.alternative.Format(buf) -} - func (node *JSONTableExpr) Format(buf *TrackedBuffer) { buf.astPrintf(node, "json_table(%v, %v columns(\n", node.Expr, node.Filter) sz := len(node.Columns) @@ -2691,12 +2679,15 @@ func (node *Count) Format(buf *TrackedBuffer) { } func (node *CountStar) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%s(", node.AggrName()) - buf.WriteString("*)") + buf.WriteString("count(*)") +} + +func (node *AnyValue) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "any_value(%v)", node.Arg) } func (node *Avg) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%s(", node.AggrName()) + buf.WriteString("avg(") if node.Distinct { buf.literal(DistinctStr) } @@ -2704,7 +2695,7 @@ func (node *Avg) Format(buf *TrackedBuffer) { } func (node *Max) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%s(", node.AggrName()) + buf.WriteString("max(") if node.Distinct { buf.literal(DistinctStr) } @@ -2712,7 +2703,7 @@ func (node *Max) Format(buf *TrackedBuffer) { } func (node *Min) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%s(", node.AggrName()) + buf.WriteString("min(") if node.Distinct { buf.literal(DistinctStr) } @@ -2720,7 +2711,7 @@ func (node *Min) Format(buf *TrackedBuffer) { } func (node *Sum) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%s(", node.AggrName()) + buf.WriteString("sum(") if node.Distinct { buf.literal(DistinctStr) } @@ -2728,53 +2719,43 @@ func (node *Sum) Format(buf *TrackedBuffer) { } func (node *BitAnd) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%s(", node.AggrName()) - buf.astPrintf(node, "%v)", node.Arg) + buf.astPrintf(node, "bit_and(%v)", node.Arg) } func (node *BitOr) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%s(", node.AggrName()) - buf.astPrintf(node, "%v)", node.Arg) + buf.astPrintf(node, "bit_or(%v)", node.Arg) } func (node *BitXor) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%s(", node.AggrName()) - buf.astPrintf(node, "%v)", node.Arg) + buf.astPrintf(node, "bit_xor(%v)", node.Arg) } func (node *Std) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%s(", node.AggrName()) - buf.astPrintf(node, "%v)", node.Arg) + buf.astPrintf(node, "std(%v)", node.Arg) } func (node *StdDev) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%s(", node.AggrName()) - buf.astPrintf(node, "%v)", node.Arg) + buf.astPrintf(node, "stddev(%v)", node.Arg) } func (node *StdPop) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%s(", node.AggrName()) - buf.astPrintf(node, "%v)", node.Arg) + buf.astPrintf(node, "stddev_pop(%v)", node.Arg) } func (node *StdSamp) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%s(", node.AggrName()) - buf.astPrintf(node, "%v)", node.Arg) + buf.astPrintf(node, "stddev_samp(%v)", node.Arg) } func (node *VarPop) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%s(", node.AggrName()) - buf.astPrintf(node, "%v)", node.Arg) + buf.astPrintf(node, "var_pop(%v)", node.Arg) } func (node *VarSamp) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%s(", node.AggrName()) - buf.astPrintf(node, "%v)", node.Arg) + buf.astPrintf(node, "var_samp(%v)", node.Arg) } func (node *Variance) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%s(", node.AggrName()) - buf.astPrintf(node, "%v)", node.Arg) + buf.astPrintf(node, "variance(%v)", node.Arg) } // Format formats the node. @@ -2965,3 +2946,8 @@ func (node *GeomFromGeoJSONExpr) Format(buf *TrackedBuffer) { } buf.WriteByte(')') } + +// Format formats the kill statement +func (node *Kill) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "kill %s %d", node.Type.ToString(), node.ProcesslistID) +} diff --git a/go/vt/sqlparser/ast_format_fast.go b/go/vt/sqlparser/ast_format_fast.go index 3286745e749..e75d42675c6 100644 --- a/go/vt/sqlparser/ast_format_fast.go +++ b/go/vt/sqlparser/ast_format_fast.go @@ -359,11 +359,19 @@ func (node *AlterVschema) formatFast(buf *TrackedBuffer) { case AddSequenceDDLAction: buf.WriteString("alter vschema add sequence ") node.Table.formatFast(buf) + case DropSequenceDDLAction: + buf.WriteString("alter vschema drop sequence ") + node.Table.formatFast(buf) case AddAutoIncDDLAction: buf.WriteString("alter vschema on ") node.Table.formatFast(buf) buf.WriteString(" add auto_increment ") node.AutoIncSpec.formatFast(buf) + case DropAutoIncDDLAction: + buf.WriteString("alter vschema on ") + node.Table.formatFast(buf) + buf.WriteString(" drop auto_increment ") + node.AutoIncSpec.formatFast(buf) default: buf.WriteString(node.Action.ToString()) buf.WriteString(" table ") @@ -951,13 +959,13 @@ func (ct *ColumnType) formatFast(buf *TrackedBuffer) { if ct.Options.Default != nil { buf.WriteByte(' ') buf.WriteString(keywordStrings[DEFAULT]) - if defaultRequiresParens(ct) { - buf.WriteString(" (") + if ct.Options.DefaultLiteral { + buf.WriteByte(' ') ct.Options.Default.formatFast(buf) - buf.WriteByte(')') } else { - buf.WriteByte(' ') + buf.WriteString(" (") ct.Options.Default.formatFast(buf) + buf.WriteByte(')') } } if ct.Options.OnUpdate != nil { @@ -1400,8 +1408,13 @@ func (node *CallProc) formatFast(buf *TrackedBuffer) { } // formatFast formats the node. -func (node *OtherRead) formatFast(buf *TrackedBuffer) { - buf.WriteString("otherread") +func (node *Analyze) formatFast(buf *TrackedBuffer) { + buf.WriteString("analyze ") + if node.IsLocal { + buf.WriteString("local ") + } + buf.WriteString("table ") + node.Table.formatFast(buf) } // formatFast formats the node. @@ -1820,10 +1833,9 @@ func (node *IntroducerExpr) formatFast(buf *TrackedBuffer) { } // formatFast formats the node. -func (node *TimestampFuncExpr) formatFast(buf *TrackedBuffer) { - buf.WriteString(node.Name) - buf.WriteByte('(') - buf.WriteString(node.Unit) +func (node *TimestampDiffExpr) formatFast(buf *TrackedBuffer) { + buf.WriteString("timestampdiff(") + buf.WriteString(node.Unit.ToString()) buf.WriteString(", ") buf.printExpr(node, node.Expr1, true) buf.WriteString(", ") @@ -1834,7 +1846,7 @@ func (node *TimestampFuncExpr) formatFast(buf *TrackedBuffer) { // formatFast formats the node. func (node *ExtractFuncExpr) formatFast(buf *TrackedBuffer) { buf.WriteString("extract(") - buf.WriteString(node.IntervalTypes.ToString()) + buf.WriteString(node.IntervalType.ToString()) buf.WriteString(" from ") buf.printExpr(node, node.Expr, true) buf.WriteByte(')') @@ -1923,75 +1935,55 @@ func (node *RegexpSubstrExpr) formatFast(buf *TrackedBuffer) { } // formatFast formats the node -func (node *DateAddExpr) formatFast(buf *TrackedBuffer) { - switch node.Type { - case AdddateType: - buf.WriteString("adddate(") - buf.printExpr(node, node.Date, true) - buf.WriteString(", ") - if node.Unit == IntervalUnknown { - buf.printExpr(node, node.Expr, true) - } else { - buf.WriteString("interval ") - buf.printExpr(node, node.Expr, true) - buf.WriteByte(' ') - buf.WriteString(node.Unit.ToString()) +func (node *IntervalDateExpr) formatFast(buf *TrackedBuffer) { + switch node.Syntax { + case IntervalDateExprAdddate, IntervalDateExprSubdate: + if node.Unit == IntervalNone { + buf.WriteString(node.FnName()) + buf.WriteByte('(') + buf.printExpr(node, node.Date, true) + buf.WriteString(", ") + buf.printExpr(node, node.Interval, true) + buf.WriteByte(')') + return } - buf.WriteByte(')') - case DateAddType: - buf.WriteString("date_add(") + fallthrough + case IntervalDateExprDateAdd, IntervalDateExprDateSub: + buf.WriteString(node.FnName()) + buf.WriteByte('(') buf.printExpr(node, node.Date, true) buf.WriteString(", interval ") - buf.printExpr(node, node.Expr, true) + buf.printExpr(node, node.Interval, true) buf.WriteByte(' ') buf.WriteString(node.Unit.ToString()) buf.WriteByte(')') - case PlusIntervalLeftType: - buf.WriteString("interval ") - buf.printExpr(node, node.Expr, true) - buf.WriteByte(' ') - buf.WriteString(node.Unit.ToString()) - buf.WriteString(" + ") - buf.printExpr(node, node.Date, true) - case PlusIntervalRightType: + case IntervalDateExprBinaryAdd: buf.printExpr(node, node.Date, true) buf.WriteString(" + interval ") - buf.printExpr(node, node.Expr, true) + buf.printExpr(node, node.Interval, false) buf.WriteByte(' ') buf.WriteString(node.Unit.ToString()) - } -} - -// formatFast formats the node -func (node *DateSubExpr) formatFast(buf *TrackedBuffer) { - switch node.Type { - case SubdateType: - buf.WriteString("subdate(") - buf.printExpr(node, node.Date, true) - buf.WriteString(", ") - if node.Unit == IntervalUnknown { - buf.printExpr(node, node.Expr, true) - } else { - buf.WriteString("interval ") - buf.printExpr(node, node.Expr, true) - buf.WriteByte(' ') - buf.WriteString(node.Unit.ToString()) - } - buf.WriteByte(')') - case DateSubType: - buf.WriteString("date_sub(") - buf.printExpr(node, node.Date, true) - buf.WriteString(", interval ") - buf.printExpr(node, node.Expr, true) + case IntervalDateExprBinaryAddLeft: + buf.WriteString("interval ") + buf.printExpr(node, node.Interval, true) buf.WriteByte(' ') buf.WriteString(node.Unit.ToString()) - buf.WriteByte(')') - case MinusIntervalRightType: + buf.WriteString(" + ") + buf.printExpr(node, node.Date, false) + case IntervalDateExprBinarySub: buf.printExpr(node, node.Date, true) buf.WriteString(" - interval ") - buf.printExpr(node, node.Expr, true) + buf.printExpr(node, node.Interval, false) buf.WriteByte(' ') buf.WriteString(node.Unit.ToString()) + case IntervalDateExprTimestampadd: + buf.WriteString("timestampadd(") + buf.WriteString(node.Unit.ToString()) + buf.WriteString(", ") + buf.printExpr(node, node.Interval, true) + buf.WriteString(", ") + buf.printExpr(node, node.Date, true) + buf.WriteByte(')') } } @@ -2189,7 +2181,7 @@ func (node *FromFirstLastClause) formatFast(buf *TrackedBuffer) { // formatFast formats the node func (node *FramePoint) formatFast(buf *TrackedBuffer) { if node.Expr != nil { - if node.Unit != IntervalUnknown { + if node.Unit != IntervalNone { buf.WriteString(" interval ") node.Expr.formatFast(buf) buf.WriteByte(' ') @@ -3007,8 +2999,14 @@ func (node *AlterColumn) formatFast(buf *TrackedBuffer) { if node.DropDefault { buf.WriteString(" drop default") } else if node.DefaultVal != nil { - buf.WriteString(" set default ") - node.DefaultVal.formatFast(buf) + if node.DefaultLiteral { + buf.WriteString(" set default ") + node.DefaultVal.formatFast(buf) + } else { + buf.WriteString(" set default (") + node.DefaultVal.formatFast(buf) + buf.WriteByte(')') + } } if node.Invisible != nil { if *node.Invisible { @@ -3202,14 +3200,6 @@ func (node *RenameTable) formatFast(buf *TrackedBuffer) { } } -// formatFast formats the node. -// If an extracted subquery is still in the AST when we print it, -// it will be formatted as if the subquery has been extracted, and instead -// show up like argument comparisons -func (node *ExtractedSubquery) formatFast(buf *TrackedBuffer) { - node.alternative.Format(buf) -} - func (node *JSONTableExpr) formatFast(buf *TrackedBuffer) { buf.WriteString("json_table(") node.Expr.formatFast(buf) @@ -3554,14 +3544,17 @@ func (node *Count) formatFast(buf *TrackedBuffer) { } func (node *CountStar) formatFast(buf *TrackedBuffer) { - buf.WriteString(node.AggrName()) - buf.WriteByte('(') - buf.WriteString("*)") + buf.WriteString("count(*)") +} + +func (node *AnyValue) formatFast(buf *TrackedBuffer) { + buf.WriteString("any_value(") + buf.printExpr(node, node.Arg, true) + buf.WriteByte(')') } func (node *Avg) formatFast(buf *TrackedBuffer) { - buf.WriteString(node.AggrName()) - buf.WriteByte('(') + buf.WriteString("avg(") if node.Distinct { buf.WriteString(DistinctStr) } @@ -3570,8 +3563,7 @@ func (node *Avg) formatFast(buf *TrackedBuffer) { } func (node *Max) formatFast(buf *TrackedBuffer) { - buf.WriteString(node.AggrName()) - buf.WriteByte('(') + buf.WriteString("max(") if node.Distinct { buf.WriteString(DistinctStr) } @@ -3580,8 +3572,7 @@ func (node *Max) formatFast(buf *TrackedBuffer) { } func (node *Min) formatFast(buf *TrackedBuffer) { - buf.WriteString(node.AggrName()) - buf.WriteByte('(') + buf.WriteString("min(") if node.Distinct { buf.WriteString(DistinctStr) } @@ -3590,8 +3581,7 @@ func (node *Min) formatFast(buf *TrackedBuffer) { } func (node *Sum) formatFast(buf *TrackedBuffer) { - buf.WriteString(node.AggrName()) - buf.WriteByte('(') + buf.WriteString("sum(") if node.Distinct { buf.WriteString(DistinctStr) } @@ -3600,71 +3590,61 @@ func (node *Sum) formatFast(buf *TrackedBuffer) { } func (node *BitAnd) formatFast(buf *TrackedBuffer) { - buf.WriteString(node.AggrName()) - buf.WriteByte('(') + buf.WriteString("bit_and(") buf.printExpr(node, node.Arg, true) buf.WriteByte(')') } func (node *BitOr) formatFast(buf *TrackedBuffer) { - buf.WriteString(node.AggrName()) - buf.WriteByte('(') + buf.WriteString("bit_or(") buf.printExpr(node, node.Arg, true) buf.WriteByte(')') } func (node *BitXor) formatFast(buf *TrackedBuffer) { - buf.WriteString(node.AggrName()) - buf.WriteByte('(') + buf.WriteString("bit_xor(") buf.printExpr(node, node.Arg, true) buf.WriteByte(')') } func (node *Std) formatFast(buf *TrackedBuffer) { - buf.WriteString(node.AggrName()) - buf.WriteByte('(') + buf.WriteString("std(") buf.printExpr(node, node.Arg, true) buf.WriteByte(')') } func (node *StdDev) formatFast(buf *TrackedBuffer) { - buf.WriteString(node.AggrName()) - buf.WriteByte('(') + buf.WriteString("stddev(") buf.printExpr(node, node.Arg, true) buf.WriteByte(')') } func (node *StdPop) formatFast(buf *TrackedBuffer) { - buf.WriteString(node.AggrName()) - buf.WriteByte('(') + buf.WriteString("stddev_pop(") buf.printExpr(node, node.Arg, true) buf.WriteByte(')') } func (node *StdSamp) formatFast(buf *TrackedBuffer) { - buf.WriteString(node.AggrName()) - buf.WriteByte('(') + buf.WriteString("stddev_samp(") buf.printExpr(node, node.Arg, true) buf.WriteByte(')') } func (node *VarPop) formatFast(buf *TrackedBuffer) { - buf.WriteString(node.AggrName()) - buf.WriteByte('(') + buf.WriteString("var_pop(") buf.printExpr(node, node.Arg, true) buf.WriteByte(')') } func (node *VarSamp) formatFast(buf *TrackedBuffer) { - buf.WriteString(node.AggrName()) - buf.WriteByte('(') + buf.WriteString("var_samp(") buf.printExpr(node, node.Arg, true) buf.WriteByte(')') } func (node *Variance) formatFast(buf *TrackedBuffer) { - buf.WriteString(node.AggrName()) - buf.WriteByte('(') + buf.WriteString("variance(") buf.printExpr(node, node.Arg, true) buf.WriteByte(')') } @@ -3925,3 +3905,11 @@ func (node *GeomFromGeoJSONExpr) formatFast(buf *TrackedBuffer) { } buf.WriteByte(')') } + +// formatFast formats the kill statement +func (node *Kill) formatFast(buf *TrackedBuffer) { + buf.WriteString("kill ") + buf.WriteString(node.Type.ToString()) + buf.WriteByte(' ') + buf.WriteString(fmt.Sprintf("%d", node.ProcesslistID)) +} diff --git a/go/vt/sqlparser/ast_funcs.go b/go/vt/sqlparser/ast_funcs.go index e469a02432f..fe36386462b 100644 --- a/go/vt/sqlparser/ast_funcs.go +++ b/go/vt/sqlparser/ast_funcs.go @@ -37,7 +37,11 @@ import ( // If postVisit returns true, the underlying nodes // are also visited. If it returns an error, walking // is interrupted, and the error is returned. -func Walk(visit Visit, nodes ...SQLNode) error { +func Walk(visit Visit, first SQLNode, nodes ...SQLNode) error { + err := VisitSQLNode(first, visit) + if err != nil { + return err + } for _, node := range nodes { err := VisitSQLNode(node, visit) if err != nil { @@ -130,7 +134,7 @@ const ( type MatchAction int const ( - // DefaultAction indicates no action was explicitly specified. + // DefaultMatch indicates no action was explicitly specified. DefaultMatch MatchAction = iota Full Partial @@ -601,31 +605,6 @@ func (node *ColName) Equal(c *ColName) bool { return node.Name.Equal(c.Name) && node.Qualifier == c.Qualifier } -// Aggregates is a map of all aggregate functions. -var Aggregates = map[string]bool{ - "avg": true, - "bit_and": true, - "bit_or": true, - "bit_xor": true, - "count": true, - "group_concat": true, - "max": true, - "min": true, - "std": true, - "stddev_pop": true, - "stddev_samp": true, - "stddev": true, - "sum": true, - "var_pop": true, - "var_samp": true, - "variance": true, -} - -// IsAggregate returns true if the function is an aggregate. -func (node *FuncExpr) IsAggregate() bool { - return Aggregates[node.Name.Lowered()] -} - // NewIdentifierCI makes a new IdentifierCI. func NewIdentifierCI(str string) IdentifierCI { return IdentifierCI{ @@ -651,6 +630,134 @@ func NewColNameWithQualifier(identifier string, table TableName) *ColName { } } +// NewTableName makes a new TableName +func NewTableName(name string) TableName { + return TableName{ + Name: NewIdentifierCS(name), + } +} + +// NewTableNameWithQualifier makes a new TableName with a qualifier +func NewTableNameWithQualifier(name, qualifier string) TableName { + return TableName{ + Name: NewIdentifierCS(name), + Qualifier: NewIdentifierCS(qualifier), + } +} + +// NewSubquery makes a new Subquery +func NewSubquery(selectStatement SelectStatement) *Subquery { + return &Subquery{Select: selectStatement} +} + +// NewDerivedTable makes a new DerivedTable +func NewDerivedTable(lateral bool, selectStatement SelectStatement) *DerivedTable { + return &DerivedTable{ + Lateral: lateral, + Select: selectStatement, + } +} + +// NewAliasedTableExpr makes a new AliasedTableExpr with an alias +func NewAliasedTableExpr(simpleTableExpr SimpleTableExpr, alias string) *AliasedTableExpr { + return &AliasedTableExpr{ + Expr: simpleTableExpr, + As: NewIdentifierCS(alias), + } +} + +// NewJoinTableExpr makes a new JoinTableExpr +func NewJoinTableExpr(leftExpr TableExpr, join JoinType, rightExpr TableExpr, condition *JoinCondition) *JoinTableExpr { + return &JoinTableExpr{ + LeftExpr: leftExpr, + Join: join, + RightExpr: rightExpr, + Condition: condition, + } +} + +// NewJoinCondition makes a new JoinCondition +func NewJoinCondition(on Expr, using Columns) *JoinCondition { + return &JoinCondition{ + On: on, + Using: using, + } +} + +// NewAliasedExpr makes a new AliasedExpr +func NewAliasedExpr(expr Expr, alias string) *AliasedExpr { + return &AliasedExpr{ + Expr: expr, + As: NewIdentifierCI(alias), + } +} + +func (ae *AliasedExpr) SetAlias(alias string) { + ae.As = NewIdentifierCI(alias) +} + +// NewOrder makes a new Order +func NewOrder(expr Expr, direction OrderDirection) *Order { + return &Order{ + Expr: expr, + Direction: direction, + } +} + +// NewNotExpr makes a new NotExpr +func NewNotExpr(expr Expr) *NotExpr { + return &NotExpr{Expr: expr} +} + +// NewComparisonExpr makes a new ComparisonExpr +func NewComparisonExpr(operator ComparisonExprOperator, left, right, escape Expr) *ComparisonExpr { + return &ComparisonExpr{ + Operator: operator, + Left: left, + Right: right, + Escape: escape, + } +} + +// NewExistsExpr makes a new ExistsExpr +func NewExistsExpr(subquery *Subquery) *ExistsExpr { + return &ExistsExpr{Subquery: subquery} +} + +// NewCaseExpr makes a new CaseExpr +func NewCaseExpr(expr Expr, whens []*When, elseExpr Expr) *CaseExpr { + return &CaseExpr{ + Expr: expr, + Whens: whens, + Else: elseExpr, + } +} + +// NewLimit makes a new Limit +func NewLimit(offset, rowCount int) *Limit { + return &Limit{ + Offset: &Literal{ + Type: IntVal, + Val: fmt.Sprint(offset), + }, + Rowcount: &Literal{ + Type: IntVal, + Val: fmt.Sprint(rowCount), + }, + } +} + +// NewLimitWithoutOffset makes a new Limit without an offset +func NewLimitWithoutOffset(rowCount int) *Limit { + return &Limit{ + Offset: nil, + Rowcount: &Literal{ + Type: IntVal, + Val: fmt.Sprint(rowCount), + }, + } +} + // NewSelect is used to create a select statement func NewSelect(comments Comments, exprs SelectExprs, selectOptions []string, into *SelectInto, from TableExprs, where *Where, groupBy GroupBy, having *Where, windows NamedWindows) *Select { var cache *bool @@ -918,6 +1025,10 @@ func compliantName(in string) string { return buf.String() } +func (node *Select) AddSelectExprs(selectExprs SelectExprs) { + node.SelectExprs = append(node.SelectExprs, selectExprs...) +} + // AddOrder adds an order by element func (node *Select) AddOrder(order *Order) { node.OrderBy = append(node.OrderBy, order) @@ -943,6 +1054,11 @@ func (node *Select) GetLimit() *Limit { return node.Limit } +// GetLock returns the lock clause +func (node *Select) GetLock() Lock { + return node.Lock +} + // SetLock sets the lock clause func (node *Select) SetLock(lock Lock) { node.Lock = lock @@ -963,6 +1079,11 @@ func (node *Select) MakeDistinct() { node.Distinct = true } +// IsDistinct implements the SelectStatement interface +func (node *Select) IsDistinct() bool { + return node.Distinct +} + // GetColumnCount return SelectExprs count. func (node *Select) GetColumnCount() int { return len(node.SelectExprs) @@ -986,29 +1107,14 @@ func (node *Select) GetParsedComments() *ParsedComments { // AddWhere adds the boolean expression to the // WHERE clause as an AND condition. func (node *Select) AddWhere(expr Expr) { - if node.Where == nil { - node.Where = &Where{ - Type: WhereClause, - Expr: expr, - } - return - } - exprs := SplitAndExpression(nil, node.Where.Expr) - node.Where.Expr = AndExpressions(append(exprs, expr)...) + node.Where = addPredicate(node.Where, expr) } // AddHaving adds the boolean expression to the // HAVING clause as an AND condition. func (node *Select) AddHaving(expr Expr) { - if node.Having == nil { - node.Having = &Where{ - Type: HavingClause, - Expr: expr, - } - return - } - exprs := SplitAndExpression(nil, node.Having.Expr) - node.Having.Expr = AndExpressions(append(exprs, expr)...) + node.Having = addPredicate(node.Having, expr) + node.Having.Type = HavingClause } // AddGroupBy adds a grouping expression, unless it's already present @@ -1025,17 +1131,27 @@ func (node *Select) AddGroupBy(expr Expr) { // AddWhere adds the boolean expression to the // WHERE clause as an AND condition. func (node *Update) AddWhere(expr Expr) { - if node.Where == nil { - node.Where = &Where{ + node.Where = addPredicate(node.Where, expr) +} + +func addPredicate(where *Where, pred Expr) *Where { + if where == nil { + return &Where{ Type: WhereClause, - Expr: expr, + Expr: pred, } - return } - node.Where.Expr = &AndExpr{ - Left: node.Where.Expr, - Right: expr, + where.Expr = &AndExpr{ + Left: where.Expr, + Right: pred, } + return where +} + +// AddWhere adds the boolean expression to the +// WHERE clause as an AND condition. +func (node *Delete) AddWhere(expr Expr) { + node.Where = addPredicate(node.Where, expr) } // AddOrder adds an order by element @@ -1068,6 +1184,11 @@ func (node *Union) GetColumns() SelectExprs { return node.Left.GetColumns() } +// GetLock returns the lock clause +func (node *Union) GetLock() Lock { + return node.Lock +} + // SetLock sets the lock clause func (node *Union) SetLock(lock Lock) { node.Lock = lock @@ -1088,6 +1209,11 @@ func (node *Union) MakeDistinct() { node.Distinct = true } +// IsDistinct implements the SelectStatement interface +func (node *Union) IsDistinct() bool { + return node.Distinct +} + // GetColumnCount implements the SelectStatement interface func (node *Union) GetColumnCount() int { return node.Left.GetColumnCount() @@ -1145,8 +1271,12 @@ func (action DDLAction) ToString() string { return DropColVindexStr case AddSequenceDDLAction: return AddSequenceStr + case DropSequenceDDLAction: + return DropSequenceStr case AddAutoIncDDLAction: return AddAutoIncStr + case DropAutoIncDDLAction: + return DropAutoIncStr default: return "Unknown DDL Action" } @@ -1680,54 +1810,6 @@ func (ty VExplainType) ToString() string { } } -// ToString returns the type as a string -func (ty IntervalTypes) ToString() string { - switch ty { - case IntervalYear: - return YearStr - case IntervalQuarter: - return QuarterStr - case IntervalMonth: - return MonthStr - case IntervalWeek: - return WeekStr - case IntervalDay: - return DayStr - case IntervalHour: - return HourStr - case IntervalMinute: - return MinuteStr - case IntervalSecond: - return SecondStr - case IntervalMicrosecond: - return MicrosecondStr - case IntervalYearMonth: - return YearMonthStr - case IntervalDayHour: - return DayHourStr - case IntervalDayMinute: - return DayMinuteStr - case IntervalDaySecond: - return DaySecondStr - case IntervalHourMinute: - return HourMinuteStr - case IntervalHourSecond: - return HourSecondStr - case IntervalMinuteSecond: - return MinuteSecondStr - case IntervalDayMicrosecond: - return DayMicrosecondStr - case IntervalHourMicrosecond: - return HourMicrosecondStr - case IntervalMinuteMicrosecond: - return MinuteMicrosecondStr - case IntervalSecondMicrosecond: - return SecondMicrosecondStr - default: - return "Unknown IntervalType" - } -} - // ToString returns the type as a string func (sel SelectIntoType) ToString() string { switch sel { @@ -1927,17 +2009,6 @@ func (node *ColName) CompliantName() string { return node.Name.CompliantName() } -// isExprAliasForCurrentTimeStamp returns true if the Expr provided is an alias for CURRENT_TIMESTAMP -func isExprAliasForCurrentTimeStamp(expr Expr) bool { - switch node := expr.(type) { - case *FuncExpr: - return node.Name.EqualString("current_timestamp") || node.Name.EqualString("now") || node.Name.EqualString("localtimestamp") || node.Name.EqualString("localtime") - case *CurTimeFuncExpr: - return node.Name.EqualString("current_timestamp") || node.Name.EqualString("now") || node.Name.EqualString("localtimestamp") || node.Name.EqualString("localtime") - } - return false -} - // AtCount represents the '@' count in IdentifierCI type AtCount int @@ -2026,59 +2097,6 @@ func GetAllSelects(selStmt SelectStatement) []*Select { panic("[BUG]: unknown type for SelectStatement") } -// SetArgName sets argument name. -func (es *ExtractedSubquery) SetArgName(n string) { - es.argName = n - es.updateAlternative() -} - -// SetHasValuesArg sets has_values argument. -func (es *ExtractedSubquery) SetHasValuesArg(n string) { - es.hasValuesArg = n - es.updateAlternative() -} - -// GetArgName returns argument name. -func (es *ExtractedSubquery) GetArgName() string { - return es.argName -} - -// GetHasValuesArg returns has values argument. -func (es *ExtractedSubquery) GetHasValuesArg() string { - return es.hasValuesArg - -} - -func (es *ExtractedSubquery) updateAlternative() { - switch original := es.Original.(type) { - case *ExistsExpr: - es.alternative = NewArgument(es.hasValuesArg) - case *Subquery: - es.alternative = NewArgument(es.argName) - case *ComparisonExpr: - // other_side = :__sq - cmp := &ComparisonExpr{ - Left: es.OtherSide, - Right: NewArgument(es.argName), - Operator: original.Operator, - } - var expr Expr = cmp - switch original.Operator { - case InOp: - // :__sq_has_values = 1 and other_side in ::__sq - cmp.Right = NewListArg(es.argName) - hasValue := &ComparisonExpr{Left: NewArgument(es.hasValuesArg), Right: NewIntLiteral("1"), Operator: EqualOp} - expr = AndExpressions(hasValue, cmp) - case NotInOp: - // :__sq_has_values = 0 or other_side not in ::__sq - cmp.Right = NewListArg(es.argName) - hasValue := &ComparisonExpr{Left: NewArgument(es.hasValuesArg), Right: NewIntLiteral("0"), Operator: EqualOp} - expr = &OrExpr{hasValue, cmp} - } - es.alternative = expr - } -} - // ColumnName returns the alias if one was provided, otherwise prints the AST func (ae *AliasedExpr) ColumnName() string { if !ae.As.IsEmpty() { @@ -2102,41 +2120,6 @@ func (s SelectExprs) AllAggregation() bool { return true } -func isExprLiteral(expr Expr) bool { - switch expr := expr.(type) { - case *Literal: - return true - case BoolVal: - return true - case *UnaryExpr: - return isExprLiteral(expr.Expr) - default: - return false - } -} - -func defaultRequiresParens(ct *ColumnType) bool { - // in 5.7 null value should be without parenthesis, in 8.0 it is allowed either way. - // so it is safe to not keep parenthesis around null. - if _, isNullVal := ct.Options.Default.(*NullVal); isNullVal { - return false - } - - switch strings.ToUpper(ct.Type) { - case "TINYTEXT", "TEXT", "MEDIUMTEXT", "LONGTEXT", "TINYBLOB", "BLOB", "MEDIUMBLOB", - "LONGBLOB", "JSON", "GEOMETRY", "POINT", - "LINESTRING", "POLYGON", "MULTIPOINT", "MULTILINESTRING", - "MULTIPOLYGON", "GEOMETRYCOLLECTION": - return true - } - - if isExprLiteral(ct.Options.Default) || isExprAliasForCurrentTimeStamp(ct.Options.Default) { - return false - } - - return true -} - // RemoveKeyspaceFromColName removes the Qualifier.Qualifier on all ColNames in the expression tree func RemoveKeyspaceFromColName(expr Expr) { RemoveKeyspace(expr) @@ -2161,6 +2144,11 @@ func convertStringToInt(integer string) int { return val } +func convertStringToUInt64(integer string) uint64 { + val, _ := strconv.ParseUint(integer, 10, 64) + return val +} + // SplitAndExpression breaks up the Expr into AND-separated conditions // and appends them to filters. Outer parenthesis are removed. Precedence // should be taken into account if expressions are recombined. @@ -2373,3 +2361,138 @@ func getAliasedTableExprFromTableName(tblName TableName) *AliasedTableExpr { Expr: tblName, } } + +func (node *IntervalDateExpr) IsSubtraction() bool { + switch node.Syntax { + case IntervalDateExprDateAdd, IntervalDateExprAdddate, IntervalDateExprBinaryAdd, IntervalDateExprBinaryAddLeft, IntervalDateExprTimestampadd: + return false + case IntervalDateExprDateSub, IntervalDateExprSubdate, IntervalDateExprBinarySub: + return true + default: + panic("invalid IntervalDateExpr syntax") + } +} + +func (node *IntervalDateExpr) NormalizedUnit() IntervalType { + if node.Unit == IntervalNone { + if node.Syntax == IntervalDateExprAdddate || node.Syntax == IntervalDateExprSubdate { + return IntervalDay + } + panic("IntervalDateExpr.Unit is not set") + } + return node.Unit +} + +func (node *IntervalDateExpr) FnName() string { + switch node.Syntax { + case IntervalDateExprDateAdd: + return "date_add" + case IntervalDateExprDateSub: + return "date_sub" + case IntervalDateExprAdddate: + return "adddate" + case IntervalDateExprSubdate: + return "subdate" + case IntervalDateExprTimestampadd: + return "timestampadd" + case IntervalDateExprBinaryAdd, IntervalDateExprBinaryAddLeft: + return "" + case IntervalDateExprBinarySub: + return "" + default: + return "" + } +} + +func IsDistinct(f AggrFunc) bool { + da, ok := f.(DistinctableAggr) + if !ok { + return false + } + return da.IsDistinct() +} + +// ToString returns the type as a string +func (ty KillType) ToString() string { + switch ty { + case QueryType: + return QueryStr + default: + return ConnectionStr + } +} + +// Indexes returns true, if the list of columns contains all the elements in the other list. +// It also returns the indexes of the columns in the list. +func (cols Columns) Indexes(subSetCols Columns) (bool, []int) { + var indexes []int + for _, subSetCol := range subSetCols { + colFound := false + for idx, col := range cols { + if col.Equal(subSetCol) { + colFound = true + indexes = append(indexes, idx) + break + } + } + if !colFound { + return false, nil + } + } + return true, indexes +} + +// MakeColumns is used to make a list of columns from a list of strings. +// This function is meant to be used in testing code. +func MakeColumns(colNames ...string) Columns { + var cols Columns + for _, name := range colNames { + cols = append(cols, NewIdentifierCI(name)) + } + return cols +} + +func VisitAllSelects(in SelectStatement, f func(p *Select, idx int) error) error { + v := visitor{} + return v.visitAllSelects(in, f) +} + +type visitor struct { + idx int +} + +func (v *visitor) visitAllSelects(in SelectStatement, f func(p *Select, idx int) error) error { + switch sel := in.(type) { + case *Select: + err := f(sel, v.idx) + v.idx++ + return err + case *Union: + err := v.visitAllSelects(sel.Left, f) + if err != nil { + return err + } + return v.visitAllSelects(sel.Right, f) + } + panic("switch should be exhaustive") +} + +// IsRestrict returns true if the reference action is of restrict type. +func (ra ReferenceAction) IsRestrict() bool { + switch ra { + case Restrict, NoAction, DefaultAction: + return true + default: + return false + } +} + +// IsLiteral returns true if the expression is of a literal type. +func IsLiteral(expr Expr) bool { + switch expr.(type) { + case *Argument, *NullVal, BoolVal, *Literal: + return true + default: + return false + } +} diff --git a/go/vt/sqlparser/ast_funcs_test.go b/go/vt/sqlparser/ast_funcs_test.go index b6a79da45ab..7bec47df96f 100644 --- a/go/vt/sqlparser/ast_funcs_test.go +++ b/go/vt/sqlparser/ast_funcs_test.go @@ -134,3 +134,41 @@ func TestSQLTypeToQueryType(t *testing.T) { }) } } + +// TestColumns_Indexes verifies the functionality of Indexes method on Columns. +func TestColumns_Indexes(t *testing.T) { + tests := []struct { + name string + cols Columns + subSetCols Columns + indexesWanted []int + }{ + { + name: "Not a subset", + cols: MakeColumns("col1", "col2", "col3"), + subSetCols: MakeColumns("col2", "col4"), + }, { + name: "Subset with 1 value", + cols: MakeColumns("col1", "col2", "col3"), + subSetCols: MakeColumns("col2"), + indexesWanted: []int{1}, + }, { + name: "Subset with multiple values", + cols: MakeColumns("col1", "col2", "col3", "col4", "col5"), + subSetCols: MakeColumns("col3", "col5", "col1"), + indexesWanted: []int{2, 4, 0}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + isSubset, indexes := tt.cols.Indexes(tt.subSetCols) + if tt.indexesWanted == nil { + require.False(t, isSubset) + require.Nil(t, indexes) + return + } + require.True(t, isSubset) + require.EqualValues(t, tt.indexesWanted, indexes) + }) + } +} diff --git a/go/vt/sqlparser/ast_rewrite.go b/go/vt/sqlparser/ast_rewrite.go index 569375226ad..c5f7f66e71b 100644 --- a/go/vt/sqlparser/ast_rewrite.go +++ b/go/vt/sqlparser/ast_rewrite.go @@ -52,8 +52,12 @@ func (a *application) rewriteSQLNode(parent SQLNode, node SQLNode, replacer repl return a.rewriteRefOfAlterView(parent, node, replacer) case *AlterVschema: return a.rewriteRefOfAlterVschema(parent, node, replacer) + case *Analyze: + return a.rewriteRefOfAnalyze(parent, node, replacer) case *AndExpr: return a.rewriteRefOfAndExpr(parent, node, replacer) + case *AnyValue: + return a.rewriteRefOfAnyValue(parent, node, replacer) case *Argument: return a.rewriteRefOfArgument(parent, node, replacer) case *ArgumentLessWindowExpr: @@ -128,10 +132,6 @@ func (a *application) rewriteSQLNode(parent SQLNode, node SQLNode, replacer repl return a.rewriteRefOfCreateView(parent, node, replacer) case *CurTimeFuncExpr: return a.rewriteRefOfCurTimeFuncExpr(parent, node, replacer) - case *DateAddExpr: - return a.rewriteRefOfDateAddExpr(parent, node, replacer) - case *DateSubExpr: - return a.rewriteRefOfDateSubExpr(parent, node, replacer) case *DeallocateStmt: return a.rewriteRefOfDeallocateStmt(parent, node, replacer) case *Default: @@ -166,8 +166,6 @@ func (a *application) rewriteSQLNode(parent SQLNode, node SQLNode, replacer repl return a.rewriteRefOfExtractFuncExpr(parent, node, replacer) case *ExtractValueExpr: return a.rewriteRefOfExtractValueExpr(parent, node, replacer) - case *ExtractedSubquery: - return a.rewriteRefOfExtractedSubquery(parent, node, replacer) case *FieldsClause: return a.rewriteRefOfFieldsClause(parent, node, replacer) case *FirstOrLastValueExpr: @@ -228,6 +226,8 @@ func (a *application) rewriteSQLNode(parent SQLNode, node SQLNode, replacer repl return a.rewriteRefOfInsert(parent, node, replacer) case *InsertExpr: return a.rewriteRefOfInsertExpr(parent, node, replacer) + case *IntervalDateExpr: + return a.rewriteRefOfIntervalDateExpr(parent, node, replacer) case *IntervalFuncExpr: return a.rewriteRefOfIntervalFuncExpr(parent, node, replacer) case *IntroducerExpr: @@ -288,6 +288,8 @@ func (a *application) rewriteSQLNode(parent SQLNode, node SQLNode, replacer repl return a.rewriteRefOfJtOnResponse(parent, node, replacer) case *KeyState: return a.rewriteRefOfKeyState(parent, node, replacer) + case *Kill: + return a.rewriteRefOfKill(parent, node, replacer) case *LagLeadExpr: return a.rewriteRefOfLagLeadExpr(parent, node, replacer) case *Limit: @@ -364,8 +366,6 @@ func (a *application) rewriteSQLNode(parent SQLNode, node SQLNode, replacer repl return a.rewriteRefOfOrderByOption(parent, node, replacer) case *OtherAdmin: return a.rewriteRefOfOtherAdmin(parent, node, replacer) - case *OtherRead: - return a.rewriteRefOfOtherRead(parent, node, replacer) case *OverClause: return a.rewriteRefOfOverClause(parent, node, replacer) case *ParenTableExpr: @@ -498,8 +498,8 @@ func (a *application) rewriteSQLNode(parent SQLNode, node SQLNode, replacer repl return a.rewriteRefOfTableSpec(parent, node, replacer) case *TablespaceOperation: return a.rewriteRefOfTablespaceOperation(parent, node, replacer) - case *TimestampFuncExpr: - return a.rewriteRefOfTimestampFuncExpr(parent, node, replacer) + case *TimestampDiffExpr: + return a.rewriteRefOfTimestampDiffExpr(parent, node, replacer) case *TrimFuncExpr: return a.rewriteRefOfTrimFuncExpr(parent, node, replacer) case *TruncateTable: @@ -1042,7 +1042,7 @@ func (a *application) rewriteRefOfAlterVschema(parent SQLNode, node *AlterVschem } return true } -func (a *application) rewriteRefOfAndExpr(parent SQLNode, node *AndExpr, replacer replacerFunc) bool { +func (a *application) rewriteRefOfAnalyze(parent SQLNode, node *Analyze, replacer replacerFunc) bool { if node == nil { return true } @@ -1054,6 +1054,38 @@ func (a *application) rewriteRefOfAndExpr(parent SQLNode, node *AndExpr, replace return true } } + if !a.rewriteTableName(node, node.Table, func(newNode, parent SQLNode) { + parent.(*Analyze).Table = newNode.(TableName) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfAndExpr(parent SQLNode, node *AndExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { + return true + } + } if !a.rewriteExpr(node, node.Left, func(newNode, parent SQLNode) { parent.(*AndExpr).Left = newNode.(Expr) }) { @@ -1074,6 +1106,38 @@ func (a *application) rewriteRefOfAndExpr(parent SQLNode, node *AndExpr, replace } return true } +func (a *application) rewriteRefOfAnyValue(parent SQLNode, node *AnyValue, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { + return true + } + } + if !a.rewriteExpr(node, node.Arg, func(newNode, parent SQLNode) { + parent.(*AnyValue).Arg = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} func (a *application) rewriteRefOfArgument(parent SQLNode, node *Argument, replacer replacerFunc) bool { if node == nil { return true @@ -1082,7 +1146,12 @@ func (a *application) rewriteRefOfArgument(parent SQLNode, node *Argument, repla a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -1106,7 +1175,12 @@ func (a *application) rewriteRefOfArgumentLessWindowExpr(parent SQLNode, node *A a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -1133,7 +1207,12 @@ func (a *application) rewriteRefOfAssignmentExpr(parent SQLNode, node *Assignmen a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -1197,7 +1276,12 @@ func (a *application) rewriteRefOfAvg(parent SQLNode, node *Avg, replacer replac a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -1248,7 +1332,12 @@ func (a *application) rewriteRefOfBetweenExpr(parent SQLNode, node *BetweenExpr, a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -1285,7 +1374,12 @@ func (a *application) rewriteRefOfBinaryExpr(parent SQLNode, node *BinaryExpr, r a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -1317,7 +1411,12 @@ func (a *application) rewriteRefOfBitAnd(parent SQLNode, node *BitAnd, replacer a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -1344,7 +1443,12 @@ func (a *application) rewriteRefOfBitOr(parent SQLNode, node *BitOr, replacer re a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -1371,7 +1475,12 @@ func (a *application) rewriteRefOfBitXor(parent SQLNode, node *BitXor, replacer a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -1430,7 +1539,12 @@ func (a *application) rewriteRefOfCaseExpr(parent SQLNode, node *CaseExpr, repla a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -1471,7 +1585,12 @@ func (a *application) rewriteRefOfCastExpr(parent SQLNode, node *CastExpr, repla a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -1540,7 +1659,12 @@ func (a *application) rewriteRefOfCharExpr(parent SQLNode, node *CharExpr, repla a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -1594,7 +1718,12 @@ func (a *application) rewriteRefOfColName(parent SQLNode, node *ColName, replace a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -1626,7 +1755,12 @@ func (a *application) rewriteRefOfCollateExpr(parent SQLNode, node *CollateExpr, a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -1839,7 +1973,12 @@ func (a *application) rewriteRefOfComparisonExpr(parent SQLNode, node *Compariso a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -1908,7 +2047,12 @@ func (a *application) rewriteRefOfConvertExpr(parent SQLNode, node *ConvertExpr, a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -1972,7 +2116,12 @@ func (a *application) rewriteRefOfConvertUsingExpr(parent SQLNode, node *Convert a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -1999,7 +2148,12 @@ func (a *application) rewriteRefOfCount(parent SQLNode, node *Count, replacer re a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -2026,7 +2180,12 @@ func (a *application) rewriteRefOfCountStar(parent SQLNode, node *CountStar, rep a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -2171,7 +2330,12 @@ func (a *application) rewriteRefOfCurTimeFuncExpr(parent SQLNode, node *CurTimeF a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -2190,70 +2354,6 @@ func (a *application) rewriteRefOfCurTimeFuncExpr(parent SQLNode, node *CurTimeF } return true } -func (a *application) rewriteRefOfDateAddExpr(parent SQLNode, node *DateAddExpr, replacer replacerFunc) bool { - if node == nil { - return true - } - if a.pre != nil { - a.cur.replacer = replacer - a.cur.parent = parent - a.cur.node = node - if !a.pre(&a.cur) { - return true - } - } - if !a.rewriteExpr(node, node.Date, func(newNode, parent SQLNode) { - parent.(*DateAddExpr).Date = newNode.(Expr) - }) { - return false - } - if !a.rewriteExpr(node, node.Expr, func(newNode, parent SQLNode) { - parent.(*DateAddExpr).Expr = newNode.(Expr) - }) { - return false - } - if a.post != nil { - a.cur.replacer = replacer - a.cur.parent = parent - a.cur.node = node - if !a.post(&a.cur) { - return false - } - } - return true -} -func (a *application) rewriteRefOfDateSubExpr(parent SQLNode, node *DateSubExpr, replacer replacerFunc) bool { - if node == nil { - return true - } - if a.pre != nil { - a.cur.replacer = replacer - a.cur.parent = parent - a.cur.node = node - if !a.pre(&a.cur) { - return true - } - } - if !a.rewriteExpr(node, node.Date, func(newNode, parent SQLNode) { - parent.(*DateSubExpr).Date = newNode.(Expr) - }) { - return false - } - if !a.rewriteExpr(node, node.Expr, func(newNode, parent SQLNode) { - parent.(*DateSubExpr).Expr = newNode.(Expr) - }) { - return false - } - if a.post != nil { - a.cur.replacer = replacer - a.cur.parent = parent - a.cur.node = node - if !a.post(&a.cur) { - return false - } - } - return true -} func (a *application) rewriteRefOfDeallocateStmt(parent SQLNode, node *DeallocateStmt, replacer replacerFunc) bool { if node == nil { return true @@ -2294,7 +2394,12 @@ func (a *application) rewriteRefOfDefault(parent SQLNode, node *Default, replace a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -2622,7 +2727,12 @@ func (a *application) rewriteRefOfExistsExpr(parent SQLNode, node *ExistsExpr, r a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -2745,7 +2855,12 @@ func (a *application) rewriteRefOfExtractFuncExpr(parent SQLNode, node *ExtractF a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -2772,7 +2887,12 @@ func (a *application) rewriteRefOfExtractValueExpr(parent SQLNode, node *Extract a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -2796,48 +2916,6 @@ func (a *application) rewriteRefOfExtractValueExpr(parent SQLNode, node *Extract } return true } -func (a *application) rewriteRefOfExtractedSubquery(parent SQLNode, node *ExtractedSubquery, replacer replacerFunc) bool { - if node == nil { - return true - } - if a.pre != nil { - a.cur.replacer = replacer - a.cur.parent = parent - a.cur.node = node - if !a.pre(&a.cur) { - return true - } - } - if !a.rewriteExpr(node, node.Original, func(newNode, parent SQLNode) { - parent.(*ExtractedSubquery).Original = newNode.(Expr) - }) { - return false - } - if !a.rewriteRefOfSubquery(node, node.Subquery, func(newNode, parent SQLNode) { - parent.(*ExtractedSubquery).Subquery = newNode.(*Subquery) - }) { - return false - } - if !a.rewriteExpr(node, node.OtherSide, func(newNode, parent SQLNode) { - parent.(*ExtractedSubquery).OtherSide = newNode.(Expr) - }) { - return false - } - if !a.rewriteExpr(node, node.alternative, func(newNode, parent SQLNode) { - parent.(*ExtractedSubquery).alternative = newNode.(Expr) - }) { - return false - } - if a.post != nil { - a.cur.replacer = replacer - a.cur.parent = parent - a.cur.node = node - if !a.post(&a.cur) { - return false - } - } - return true -} func (a *application) rewriteRefOfFieldsClause(parent SQLNode, node *FieldsClause, replacer replacerFunc) bool { if node == nil { return true @@ -2873,7 +2951,12 @@ func (a *application) rewriteRefOfFirstOrLastValueExpr(parent SQLNode, node *Fir a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -3081,7 +3164,12 @@ func (a *application) rewriteRefOfFuncExpr(parent SQLNode, node *FuncExpr, repla a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -3118,7 +3206,12 @@ func (a *application) rewriteRefOfGTIDFuncExpr(parent SQLNode, node *GTIDFuncExp a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -3160,7 +3253,12 @@ func (a *application) rewriteRefOfGeoHashFromLatLongExpr(parent SQLNode, node *G a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -3197,7 +3295,12 @@ func (a *application) rewriteRefOfGeoHashFromPointExpr(parent SQLNode, node *Geo a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -3229,7 +3332,12 @@ func (a *application) rewriteRefOfGeoJSONFromGeomExpr(parent SQLNode, node *GeoJ a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -3266,7 +3374,12 @@ func (a *application) rewriteRefOfGeomCollPropertyFuncExpr(parent SQLNode, node a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -3298,7 +3411,12 @@ func (a *application) rewriteRefOfGeomFormatExpr(parent SQLNode, node *GeomForma a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -3330,7 +3448,12 @@ func (a *application) rewriteRefOfGeomFromGeoHashExpr(parent SQLNode, node *Geom a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -3362,7 +3485,12 @@ func (a *application) rewriteRefOfGeomFromGeoJSONExpr(parent SQLNode, node *Geom a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -3399,7 +3527,12 @@ func (a *application) rewriteRefOfGeomFromTextExpr(parent SQLNode, node *GeomFro a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -3436,7 +3569,12 @@ func (a *application) rewriteRefOfGeomFromWKBExpr(parent SQLNode, node *GeomFrom a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -3473,7 +3611,12 @@ func (a *application) rewriteRefOfGeomPropertyFuncExpr(parent SQLNode, node *Geo a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -3537,7 +3680,12 @@ func (a *application) rewriteRefOfGroupConcatExpr(parent SQLNode, node *GroupCon a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -3795,7 +3943,12 @@ func (a *application) rewriteRefOfInsertExpr(parent SQLNode, node *InsertExpr, r a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -3829,6 +3982,43 @@ func (a *application) rewriteRefOfInsertExpr(parent SQLNode, node *InsertExpr, r } return true } +func (a *application) rewriteRefOfIntervalDateExpr(parent SQLNode, node *IntervalDateExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { + return true + } + } + if !a.rewriteExpr(node, node.Date, func(newNode, parent SQLNode) { + parent.(*IntervalDateExpr).Date = newNode.(Expr) + }) { + return false + } + if !a.rewriteExpr(node, node.Interval, func(newNode, parent SQLNode) { + parent.(*IntervalDateExpr).Interval = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} func (a *application) rewriteRefOfIntervalFuncExpr(parent SQLNode, node *IntervalFuncExpr, replacer replacerFunc) bool { if node == nil { return true @@ -3837,7 +4027,12 @@ func (a *application) rewriteRefOfIntervalFuncExpr(parent SQLNode, node *Interva a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -3869,7 +4064,12 @@ func (a *application) rewriteRefOfIntroducerExpr(parent SQLNode, node *Introduce a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -3896,7 +4096,12 @@ func (a *application) rewriteRefOfIsExpr(parent SQLNode, node *IsExpr, replacer a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -3923,7 +4128,12 @@ func (a *application) rewriteRefOfJSONArrayExpr(parent SQLNode, node *JSONArrayE a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -3950,7 +4160,12 @@ func (a *application) rewriteRefOfJSONAttributesExpr(parent SQLNode, node *JSONA a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -3982,7 +4197,12 @@ func (a *application) rewriteRefOfJSONContainsExpr(parent SQLNode, node *JSONCon a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -4023,7 +4243,12 @@ func (a *application) rewriteRefOfJSONContainsPathExpr(parent SQLNode, node *JSO a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -4064,7 +4289,12 @@ func (a *application) rewriteRefOfJSONExtractExpr(parent SQLNode, node *JSONExtr a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -4100,7 +4330,12 @@ func (a *application) rewriteRefOfJSONKeysExpr(parent SQLNode, node *JSONKeysExp a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -4132,7 +4367,12 @@ func (a *application) rewriteRefOfJSONObjectExpr(parent SQLNode, node *JSONObjec a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -4195,7 +4435,12 @@ func (a *application) rewriteRefOfJSONOverlapsExpr(parent SQLNode, node *JSONOve a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -4227,7 +4472,12 @@ func (a *application) rewriteRefOfJSONPrettyExpr(parent SQLNode, node *JSONPrett a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -4254,7 +4504,12 @@ func (a *application) rewriteRefOfJSONQuoteExpr(parent SQLNode, node *JSONQuoteE a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -4281,7 +4536,12 @@ func (a *application) rewriteRefOfJSONRemoveExpr(parent SQLNode, node *JSONRemov a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -4313,7 +4573,12 @@ func (a *application) rewriteRefOfJSONSchemaValidFuncExpr(parent SQLNode, node * a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -4345,7 +4610,12 @@ func (a *application) rewriteRefOfJSONSchemaValidationReportFuncExpr(parent SQLN a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -4377,7 +4647,12 @@ func (a *application) rewriteRefOfJSONSearchExpr(parent SQLNode, node *JSONSearc a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -4428,7 +4703,12 @@ func (a *application) rewriteRefOfJSONStorageFreeExpr(parent SQLNode, node *JSON a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -4455,7 +4735,12 @@ func (a *application) rewriteRefOfJSONStorageSizeExpr(parent SQLNode, node *JSON a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -4528,7 +4813,12 @@ func (a *application) rewriteRefOfJSONUnquoteExpr(parent SQLNode, node *JSONUnqu a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -4555,7 +4845,12 @@ func (a *application) rewriteRefOfJSONValueExpr(parent SQLNode, node *JSONValueE a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -4602,7 +4897,12 @@ func (a *application) rewriteRefOfJSONValueMergeExpr(parent SQLNode, node *JSONV a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -4634,7 +4934,12 @@ func (a *application) rewriteRefOfJSONValueModifierExpr(parent SQLNode, node *JS a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -4806,7 +5111,7 @@ func (a *application) rewriteRefOfKeyState(parent SQLNode, node *KeyState, repla } return true } -func (a *application) rewriteRefOfLagLeadExpr(parent SQLNode, node *LagLeadExpr, replacer replacerFunc) bool { +func (a *application) rewriteRefOfKill(parent SQLNode, node *Kill, replacer replacerFunc) bool { if node == nil { return true } @@ -4818,6 +5123,35 @@ func (a *application) rewriteRefOfLagLeadExpr(parent SQLNode, node *LagLeadExpr, return true } } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfLagLeadExpr(parent SQLNode, node *LagLeadExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { + return true + } + } if !a.rewriteExpr(node, node.Expr, func(newNode, parent SQLNode) { parent.(*LagLeadExpr).Expr = newNode.(Expr) }) { @@ -4893,7 +5227,12 @@ func (a *application) rewriteRefOfLineStringExpr(parent SQLNode, node *LineStrin a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -4947,7 +5286,12 @@ func (a *application) rewriteRefOfLinestrPropertyFuncExpr(parent SQLNode, node * a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -4979,7 +5323,12 @@ func (a *application) rewriteRefOfLiteral(parent SQLNode, node *Literal, replace a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -5069,7 +5418,12 @@ func (a *application) rewriteRefOfLocateExpr(parent SQLNode, node *LocateExpr, r a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -5154,7 +5508,12 @@ func (a *application) rewriteRefOfLockingFunc(parent SQLNode, node *LockingFunc, a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -5186,7 +5545,12 @@ func (a *application) rewriteRefOfMatchExpr(parent SQLNode, node *MatchExpr, rep a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -5222,7 +5586,12 @@ func (a *application) rewriteRefOfMax(parent SQLNode, node *Max, replacer replac a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -5249,7 +5618,12 @@ func (a *application) rewriteRefOfMemberOfExpr(parent SQLNode, node *MemberOfExp a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -5281,7 +5655,12 @@ func (a *application) rewriteRefOfMin(parent SQLNode, node *Min, replacer replac a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -5340,7 +5719,12 @@ func (a *application) rewriteRefOfMultiLinestringExpr(parent SQLNode, node *Mult a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -5367,7 +5751,12 @@ func (a *application) rewriteRefOfMultiPointExpr(parent SQLNode, node *MultiPoin a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -5394,7 +5783,12 @@ func (a *application) rewriteRefOfMultiPolygonExpr(parent SQLNode, node *MultiPo a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -5421,7 +5815,12 @@ func (a *application) rewriteRefOfNTHValueExpr(parent SQLNode, node *NTHValueExp a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -5468,7 +5867,12 @@ func (a *application) rewriteRefOfNamedWindow(parent SQLNode, node *NamedWindow, a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -5559,7 +5963,12 @@ func (a *application) rewriteRefOfNotExpr(parent SQLNode, node *NotExpr, replace a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -5586,7 +5995,12 @@ func (a *application) rewriteRefOfNtileExpr(parent SQLNode, node *NtileExpr, rep a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -5642,7 +6056,12 @@ func (a *application) rewriteRefOfNullVal(parent SQLNode, node *NullVal, replace a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -5666,7 +6085,12 @@ func (a *application) rewriteRefOfOffset(parent SQLNode, node *Offset, replacer a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -5757,7 +6181,12 @@ func (a *application) rewriteRefOfOrExpr(parent SQLNode, node *OrExpr, replacer a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -5896,30 +6325,6 @@ func (a *application) rewriteRefOfOtherAdmin(parent SQLNode, node *OtherAdmin, r } return true } -func (a *application) rewriteRefOfOtherRead(parent SQLNode, node *OtherRead, replacer replacerFunc) bool { - if node == nil { - return true - } - if a.pre != nil { - a.cur.replacer = replacer - a.cur.parent = parent - a.cur.node = node - if !a.pre(&a.cur) { - return true - } - } - if a.post != nil { - if a.pre == nil { - a.cur.replacer = replacer - a.cur.parent = parent - a.cur.node = node - } - if !a.post(&a.cur) { - return false - } - } - return true -} func (a *application) rewriteRefOfOverClause(parent SQLNode, node *OverClause, replacer replacerFunc) bool { if node == nil { return true @@ -6275,7 +6680,12 @@ func (a *application) rewriteRefOfPerformanceSchemaFuncExpr(parent SQLNode, node a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -6302,7 +6712,12 @@ func (a *application) rewriteRefOfPointExpr(parent SQLNode, node *PointExpr, rep a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -6334,7 +6749,12 @@ func (a *application) rewriteRefOfPointPropertyFuncExpr(parent SQLNode, node *Po a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -6366,7 +6786,12 @@ func (a *application) rewriteRefOfPolygonExpr(parent SQLNode, node *PolygonExpr, a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -6393,7 +6818,12 @@ func (a *application) rewriteRefOfPolygonPropertyFuncExpr(parent SQLNode, node * a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -6533,7 +6963,12 @@ func (a *application) rewriteRefOfRegexpInstrExpr(parent SQLNode, node *RegexpIn a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -6585,7 +7020,12 @@ func (a *application) rewriteRefOfRegexpLikeExpr(parent SQLNode, node *RegexpLik a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -6622,7 +7062,12 @@ func (a *application) rewriteRefOfRegexpReplaceExpr(parent SQLNode, node *Regexp a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -6674,7 +7119,12 @@ func (a *application) rewriteRefOfRegexpSubstrExpr(parent SQLNode, node *RegexpS a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -7479,7 +7929,12 @@ func (a *application) rewriteRefOfStd(parent SQLNode, node *Std, replacer replac a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -7506,7 +7961,12 @@ func (a *application) rewriteRefOfStdDev(parent SQLNode, node *StdDev, replacer a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -7533,7 +7993,12 @@ func (a *application) rewriteRefOfStdPop(parent SQLNode, node *StdPop, replacer a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -7560,7 +8025,12 @@ func (a *application) rewriteRefOfStdSamp(parent SQLNode, node *StdSamp, replace a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -7767,7 +8237,12 @@ func (a *application) rewriteRefOfSubquery(parent SQLNode, node *Subquery, repla a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -7794,7 +8269,12 @@ func (a *application) rewriteRefOfSubstrExpr(parent SQLNode, node *SubstrExpr, r a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -7831,7 +8311,12 @@ func (a *application) rewriteRefOfSum(parent SQLNode, node *Sum, replacer replac a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -8066,7 +8551,7 @@ func (a *application) rewriteRefOfTablespaceOperation(parent SQLNode, node *Tabl } return true } -func (a *application) rewriteRefOfTimestampFuncExpr(parent SQLNode, node *TimestampFuncExpr, replacer replacerFunc) bool { +func (a *application) rewriteRefOfTimestampDiffExpr(parent SQLNode, node *TimestampDiffExpr, replacer replacerFunc) bool { if node == nil { return true } @@ -8074,17 +8559,22 @@ func (a *application) rewriteRefOfTimestampFuncExpr(parent SQLNode, node *Timest a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } if !a.rewriteExpr(node, node.Expr1, func(newNode, parent SQLNode) { - parent.(*TimestampFuncExpr).Expr1 = newNode.(Expr) + parent.(*TimestampDiffExpr).Expr1 = newNode.(Expr) }) { return false } if !a.rewriteExpr(node, node.Expr2, func(newNode, parent SQLNode) { - parent.(*TimestampFuncExpr).Expr2 = newNode.(Expr) + parent.(*TimestampDiffExpr).Expr2 = newNode.(Expr) }) { return false } @@ -8106,7 +8596,12 @@ func (a *application) rewriteRefOfTrimFuncExpr(parent SQLNode, node *TrimFuncExp a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -8165,7 +8660,12 @@ func (a *application) rewriteRefOfUnaryExpr(parent SQLNode, node *UnaryExpr, rep a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -8394,7 +8894,12 @@ func (a *application) rewriteRefOfUpdateXMLExpr(parent SQLNode, node *UpdateXMLE a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -8635,7 +9140,12 @@ func (a *application) rewriteRefOfValuesFuncExpr(parent SQLNode, node *ValuesFun a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -8662,7 +9172,12 @@ func (a *application) rewriteRefOfVarPop(parent SQLNode, node *VarPop, replacer a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -8689,7 +9204,12 @@ func (a *application) rewriteRefOfVarSamp(parent SQLNode, node *VarSamp, replace a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -8716,7 +9236,12 @@ func (a *application) rewriteRefOfVariable(parent SQLNode, node *Variable, repla a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -8743,7 +9268,12 @@ func (a *application) rewriteRefOfVariance(parent SQLNode, node *Variance, repla a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -8835,7 +9365,12 @@ func (a *application) rewriteRefOfWeightStringFuncExpr(parent SQLNode, node *Wei a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -9068,7 +9603,12 @@ func (a *application) rewriteRefOfXorExpr(parent SQLNode, node *XorExpr, replace a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -9097,6 +9637,8 @@ func (a *application) rewriteAggrFunc(parent SQLNode, node AggrFunc, replacer re return true } switch node := node.(type) { + case *AnyValue: + return a.rewriteRefOfAnyValue(parent, node, replacer) case *Avg: return a.rewriteRefOfAvg(parent, node, replacer) case *BitAnd: @@ -9195,6 +9737,8 @@ func (a *application) rewriteCallable(parent SQLNode, node Callable, replacer re return true } switch node := node.(type) { + case *AnyValue: + return a.rewriteRefOfAnyValue(parent, node, replacer) case *ArgumentLessWindowExpr: return a.rewriteRefOfArgumentLessWindowExpr(parent, node, replacer) case *Avg: @@ -9211,10 +9755,6 @@ func (a *application) rewriteCallable(parent SQLNode, node Callable, replacer re return a.rewriteRefOfCountStar(parent, node, replacer) case *CurTimeFuncExpr: return a.rewriteRefOfCurTimeFuncExpr(parent, node, replacer) - case *DateAddExpr: - return a.rewriteRefOfDateAddExpr(parent, node, replacer) - case *DateSubExpr: - return a.rewriteRefOfDateSubExpr(parent, node, replacer) case *ExtractFuncExpr: return a.rewriteRefOfExtractFuncExpr(parent, node, replacer) case *ExtractValueExpr: @@ -9249,6 +9789,8 @@ func (a *application) rewriteCallable(parent SQLNode, node Callable, replacer re return a.rewriteRefOfGroupConcatExpr(parent, node, replacer) case *InsertExpr: return a.rewriteRefOfInsertExpr(parent, node, replacer) + case *IntervalDateExpr: + return a.rewriteRefOfIntervalDateExpr(parent, node, replacer) case *IntervalFuncExpr: return a.rewriteRefOfIntervalFuncExpr(parent, node, replacer) case *JSONArrayExpr: @@ -9341,8 +9883,8 @@ func (a *application) rewriteCallable(parent SQLNode, node Callable, replacer re return a.rewriteRefOfSubstrExpr(parent, node, replacer) case *Sum: return a.rewriteRefOfSum(parent, node, replacer) - case *TimestampFuncExpr: - return a.rewriteRefOfTimestampFuncExpr(parent, node, replacer) + case *TimestampDiffExpr: + return a.rewriteRefOfTimestampDiffExpr(parent, node, replacer) case *TrimFuncExpr: return a.rewriteRefOfTrimFuncExpr(parent, node, replacer) case *UpdateXMLExpr: @@ -9449,6 +9991,8 @@ func (a *application) rewriteExpr(parent SQLNode, node Expr, replacer replacerFu switch node := node.(type) { case *AndExpr: return a.rewriteRefOfAndExpr(parent, node, replacer) + case *AnyValue: + return a.rewriteRefOfAnyValue(parent, node, replacer) case *Argument: return a.rewriteRefOfArgument(parent, node, replacer) case *ArgumentLessWindowExpr: @@ -9491,10 +10035,6 @@ func (a *application) rewriteExpr(parent SQLNode, node Expr, replacer replacerFu return a.rewriteRefOfCountStar(parent, node, replacer) case *CurTimeFuncExpr: return a.rewriteRefOfCurTimeFuncExpr(parent, node, replacer) - case *DateAddExpr: - return a.rewriteRefOfDateAddExpr(parent, node, replacer) - case *DateSubExpr: - return a.rewriteRefOfDateSubExpr(parent, node, replacer) case *Default: return a.rewriteRefOfDefault(parent, node, replacer) case *ExistsExpr: @@ -9503,8 +10043,6 @@ func (a *application) rewriteExpr(parent SQLNode, node Expr, replacer replacerFu return a.rewriteRefOfExtractFuncExpr(parent, node, replacer) case *ExtractValueExpr: return a.rewriteRefOfExtractValueExpr(parent, node, replacer) - case *ExtractedSubquery: - return a.rewriteRefOfExtractedSubquery(parent, node, replacer) case *FirstOrLastValueExpr: return a.rewriteRefOfFirstOrLastValueExpr(parent, node, replacer) case *FuncExpr: @@ -9535,6 +10073,8 @@ func (a *application) rewriteExpr(parent SQLNode, node Expr, replacer replacerFu return a.rewriteRefOfGroupConcatExpr(parent, node, replacer) case *InsertExpr: return a.rewriteRefOfInsertExpr(parent, node, replacer) + case *IntervalDateExpr: + return a.rewriteRefOfIntervalDateExpr(parent, node, replacer) case *IntervalFuncExpr: return a.rewriteRefOfIntervalFuncExpr(parent, node, replacer) case *IntroducerExpr: @@ -9655,8 +10195,8 @@ func (a *application) rewriteExpr(parent SQLNode, node Expr, replacer replacerFu return a.rewriteRefOfSubstrExpr(parent, node, replacer) case *Sum: return a.rewriteRefOfSum(parent, node, replacer) - case *TimestampFuncExpr: - return a.rewriteRefOfTimestampFuncExpr(parent, node, replacer) + case *TimestampDiffExpr: + return a.rewriteRefOfTimestampDiffExpr(parent, node, replacer) case *TrimFuncExpr: return a.rewriteRefOfTrimFuncExpr(parent, node, replacer) case *UnaryExpr: @@ -9775,6 +10315,8 @@ func (a *application) rewriteStatement(parent SQLNode, node Statement, replacer return a.rewriteRefOfAlterView(parent, node, replacer) case *AlterVschema: return a.rewriteRefOfAlterVschema(parent, node, replacer) + case *Analyze: + return a.rewriteRefOfAnalyze(parent, node, replacer) case *Begin: return a.rewriteRefOfBegin(parent, node, replacer) case *CallProc: @@ -9809,6 +10351,8 @@ func (a *application) rewriteStatement(parent SQLNode, node Statement, replacer return a.rewriteRefOfFlush(parent, node, replacer) case *Insert: return a.rewriteRefOfInsert(parent, node, replacer) + case *Kill: + return a.rewriteRefOfKill(parent, node, replacer) case *Load: return a.rewriteRefOfLoad(parent, node, replacer) case *LoadDataStmt: @@ -9817,8 +10361,6 @@ func (a *application) rewriteStatement(parent SQLNode, node Statement, replacer return a.rewriteRefOfLockTables(parent, node, replacer) case *OtherAdmin: return a.rewriteRefOfOtherAdmin(parent, node, replacer) - case *OtherRead: - return a.rewriteRefOfOtherRead(parent, node, replacer) case *PrepareStmt: return a.rewriteRefOfPrepareStmt(parent, node, replacer) case *PurgeBinaryLogs: @@ -9912,7 +10454,12 @@ func (a *application) rewriteBoolVal(parent SQLNode, node BoolVal, replacer repl a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } @@ -9933,7 +10480,12 @@ func (a *application) rewriteListArg(parent SQLNode, node ListArg, replacer repl a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - if !a.pre(&a.cur) { + kontinue := !a.pre(&a.cur) + if a.cur.revisit { + a.cur.revisit = false + return a.rewriteExpr(parent, a.cur.node.(Expr), replacer) + } + if kontinue { return true } } diff --git a/go/vt/sqlparser/ast_rewriting.go b/go/vt/sqlparser/ast_rewriting.go index 00fa7c8b740..45711f8d535 100644 --- a/go/vt/sqlparser/ast_rewriting.go +++ b/go/vt/sqlparser/ast_rewriting.go @@ -26,12 +26,7 @@ import ( "vitess.io/vitess/go/vt/vterrors" ) -var ( - subQueryBaseArgName = []byte("__sq") - - // HasValueSubQueryBaseName is the prefix of each parameter representing an EXISTS subquery - HasValueSubQueryBaseName = []byte("__sq_has_values") -) +var HasValueSubQueryBaseName = []byte("__sq_has_values") // SQLSelectLimitUnset default value for sql_select_limit not set. const SQLSelectLimitUnset = -1 @@ -42,166 +37,10 @@ type RewriteASTResult struct { AST Statement // The rewritten AST } -// ReservedVars keeps track of the bind variable names that have already been used -// in a parsed query. -type ReservedVars struct { - prefix string - reserved BindVars - next []byte - counter int - fast, static bool - sqNext int64 -} - type VSchemaViews interface { FindView(name TableName) SelectStatement } -// ReserveAll tries to reserve all the given variable names. If they're all available, -// they are reserved and the function returns true. Otherwise, the function returns false. -func (r *ReservedVars) ReserveAll(names ...string) bool { - for _, name := range names { - if _, ok := r.reserved[name]; ok { - return false - } - } - for _, name := range names { - r.reserved[name] = struct{}{} - } - return true -} - -// ReserveColName reserves a variable name for the given column; if a variable -// with the same name already exists, it'll be suffixed with a numberic identifier -// to make it unique. -func (r *ReservedVars) ReserveColName(col *ColName) string { - reserveName := col.CompliantName() - if r.fast && strings.HasPrefix(reserveName, r.prefix) { - reserveName = "_" + reserveName - } - - return r.ReserveVariable(reserveName) -} - -func (r *ReservedVars) ReserveVariable(compliantName string) string { - joinVar := []byte(compliantName) - baseLen := len(joinVar) - i := int64(1) - - for { - if _, ok := r.reserved[string(joinVar)]; !ok { - bvar := string(joinVar) - r.reserved[bvar] = struct{}{} - return bvar - } - joinVar = strconv.AppendInt(joinVar[:baseLen], i, 10) - i++ - } -} - -// ReserveSubQuery returns the next argument name to replace subquery with pullout value. -func (r *ReservedVars) ReserveSubQuery() string { - for { - r.sqNext++ - joinVar := strconv.AppendInt(subQueryBaseArgName, r.sqNext, 10) - if _, ok := r.reserved[string(joinVar)]; !ok { - r.reserved[string(joinVar)] = struct{}{} - return string(joinVar) - } - } -} - -// ReserveSubQueryWithHasValues returns the next argument name to replace subquery with pullout value. -func (r *ReservedVars) ReserveSubQueryWithHasValues() (string, string) { - for { - r.sqNext++ - joinVar := strconv.AppendInt(subQueryBaseArgName, r.sqNext, 10) - hasValuesJoinVar := strconv.AppendInt(HasValueSubQueryBaseName, r.sqNext, 10) - _, joinVarOK := r.reserved[string(joinVar)] - _, hasValuesJoinVarOK := r.reserved[string(hasValuesJoinVar)] - if !joinVarOK && !hasValuesJoinVarOK { - r.reserved[string(joinVar)] = struct{}{} - r.reserved[string(hasValuesJoinVar)] = struct{}{} - return string(joinVar), string(hasValuesJoinVar) - } - } -} - -// ReserveHasValuesSubQuery returns the next argument name to replace subquery with has value. -func (r *ReservedVars) ReserveHasValuesSubQuery() string { - for { - r.sqNext++ - joinVar := strconv.AppendInt(HasValueSubQueryBaseName, r.sqNext, 10) - if _, ok := r.reserved[string(joinVar)]; !ok { - bvar := string(joinVar) - r.reserved[bvar] = struct{}{} - return bvar - } - } -} - -const staticBvar10 = "vtg0vtg1vtg2vtg3vtg4vtg5vtg6vtg7vtg8vtg9" -const staticBvar100 = "vtg10vtg11vtg12vtg13vtg14vtg15vtg16vtg17vtg18vtg19vtg20vtg21vtg22vtg23vtg24vtg25vtg26vtg27vtg28vtg29vtg30vtg31vtg32vtg33vtg34vtg35vtg36vtg37vtg38vtg39vtg40vtg41vtg42vtg43vtg44vtg45vtg46vtg47vtg48vtg49vtg50vtg51vtg52vtg53vtg54vtg55vtg56vtg57vtg58vtg59vtg60vtg61vtg62vtg63vtg64vtg65vtg66vtg67vtg68vtg69vtg70vtg71vtg72vtg73vtg74vtg75vtg76vtg77vtg78vtg79vtg80vtg81vtg82vtg83vtg84vtg85vtg86vtg87vtg88vtg89vtg90vtg91vtg92vtg93vtg94vtg95vtg96vtg97vtg98vtg99" - -func (r *ReservedVars) nextUnusedVar() string { - if r.fast { - r.counter++ - - if r.static { - switch { - case r.counter < 10: - ofs := r.counter * 4 - return staticBvar10[ofs : ofs+4] - case r.counter < 100: - ofs := (r.counter - 10) * 5 - return staticBvar100[ofs : ofs+5] - } - } - - r.next = strconv.AppendInt(r.next[:len(r.prefix)], int64(r.counter), 10) - return string(r.next) - } - - for { - r.counter++ - r.next = strconv.AppendInt(r.next[:len(r.prefix)], int64(r.counter), 10) - if _, ok := r.reserved[string(r.next)]; !ok { - bvar := string(r.next) - r.reserved[bvar] = struct{}{} - return bvar - } - } -} - -// NewReservedVars allocates a ReservedVar instance that will generate unique -// variable names starting with the given `prefix` and making sure that they -// don't conflict with the given set of `known` variables. -func NewReservedVars(prefix string, known BindVars) *ReservedVars { - rv := &ReservedVars{ - prefix: prefix, - counter: 0, - reserved: known, - fast: true, - next: []byte(prefix), - } - - if prefix != "" && prefix[0] == '_' { - panic("cannot reserve variables with a '_' prefix") - } - - for bvar := range known { - if strings.HasPrefix(bvar, prefix) { - rv.fast = false - break - } - } - - if prefix == "vtg" { - rv.static = true - } - return rv -} - // PrepareAST will normalize the query func PrepareAST( in Statement, @@ -364,6 +203,8 @@ func (er *astRewriter) rewriteUp(cursor *Cursor) bool { er.rewriteShowBasic(node) case *ExistsExpr: er.existsRewrite(cursor, node) + case DistinctableAggr: + er.rewriteDistinctableAggr(cursor, node) } return true } @@ -533,6 +374,7 @@ func (er *astRewriter) sysVarRewrite(cursor *Cursor, node *Variable) { sysvars.Charset.Name, sysvars.ClientFoundRows.Name, sysvars.DDLStrategy.Name, + sysvars.MigrationContext.Name, sysvars.Names.Name, sysvars.TransactionMode.Name, sysvars.ReadAfterWriteGTID.Name, @@ -659,11 +501,6 @@ func (er *astRewriter) existsRewrite(cursor *Cursor, node *ExistsExpr) { return } - if sel.Limit == nil { - sel.Limit = &Limit{} - } - sel.Limit.Rowcount = NewIntLiteral("1") - if sel.Having != nil { // If the query has HAVING, we can't take any shortcuts return @@ -683,6 +520,18 @@ func (er *astRewriter) existsRewrite(cursor *Cursor, node *ExistsExpr) { sel.GroupBy = nil } +// rewriteDistinctableAggr removed Distinct from Max and Min Aggregations as it does not impact the result. But, makes the plan simpler. +func (er *astRewriter) rewriteDistinctableAggr(cursor *Cursor, node DistinctableAggr) { + if !node.IsDistinct() { + return + } + switch aggr := node.(type) { + case *Max, *Min: + aggr.SetDistinct(false) + er.bindVars.NoteRewrite() + } +} + func bindVarExpression(name string) Expr { return NewArgument(name) } diff --git a/go/vt/sqlparser/ast_rewriting_test.go b/go/vt/sqlparser/ast_rewriting_test.go index 6fe59acbc85..2ed92201296 100644 --- a/go/vt/sqlparser/ast_rewriting_test.go +++ b/go/vt/sqlparser/ast_rewriting_test.go @@ -37,13 +37,12 @@ type testCaseSysVar struct { } type myTestCase struct { - in, expected string - liid, db, foundRows, rowCount, rawGTID, rawTimeout, sessTrackGTID bool - ddlStrategy, sessionUUID, sessionEnableSystemSettings bool - udv int - autocommit, clientFoundRows, skipQueryPlanCache, socket, queryTimeout bool - sqlSelectLimit, transactionMode, workload, version, versionComment bool - txIsolation bool + in, expected string + liid, db, foundRows, rowCount, rawGTID, rawTimeout, sessTrackGTID bool + ddlStrategy, migrationContext, sessionUUID, sessionEnableSystemSettings bool + udv int + autocommit, clientFoundRows, skipQueryPlanCache, socket, queryTimeout bool + sqlSelectLimit, transactionMode, workload, version, versionComment bool } func TestRewrites(in *testing.T) { @@ -172,9 +171,6 @@ func TestRewrites(in *testing.T) { }, { in: "select (select 42) from dual", expected: "select 42 as `(select 42 from dual)` from dual", - }, { - in: "select exists(select 1) from user", - expected: "select exists(select 1 limit 1) from user", }, { in: "select * from user where col = (select 42)", expected: "select * from user where col = 42", @@ -189,6 +185,10 @@ func TestRewrites(in *testing.T) { in: `select * from user where col = @@ddl_strategy`, expected: "select * from user where col = :__vtddl_strategy", ddlStrategy: true, + }, { + in: `select * from user where col = @@migration_context`, + expected: "select * from user where col = :__vtmigration_context", + migrationContext: true, }, { in: `select * from user where col = @@read_after_write_gtid OR col = @@read_after_write_timeout OR col = @@session_track_gtids`, expected: "select * from user where col = :__vtread_after_write_gtid or col = :__vtread_after_write_timeout or col = :__vtsession_track_gtids", @@ -270,25 +270,28 @@ func TestRewrites(in *testing.T) { expected: "select * from tbl where id regexp '%foobar'", }, { in: "SELECT * FROM tbl WHERE exists(select col1, col2 from other_table where foo > bar)", - expected: "SELECT * FROM tbl WHERE exists(select 1 from other_table where foo > bar limit 1)", + expected: "SELECT * FROM tbl WHERE exists(select 1 from other_table where foo > bar)", }, { in: "SELECT * FROM tbl WHERE exists(select col1, col2 from other_table where foo > bar limit 100 offset 34)", - expected: "SELECT * FROM tbl WHERE exists(select 1 from other_table where foo > bar limit 1 offset 34)", + expected: "SELECT * FROM tbl WHERE exists(select 1 from other_table where foo > bar limit 100 offset 34)", }, { in: "SELECT * FROM tbl WHERE exists(select col1, col2, count(*) from other_table where foo > bar group by col1, col2)", - expected: "SELECT * FROM tbl WHERE exists(select 1 from other_table where foo > bar limit 1)", + expected: "SELECT * FROM tbl WHERE exists(select 1 from other_table where foo > bar)", }, { in: "SELECT * FROM tbl WHERE exists(select col1, col2 from other_table where foo > bar group by col1, col2)", - expected: "SELECT * FROM tbl WHERE exists(select 1 from other_table where foo > bar limit 1)", + expected: "SELECT * FROM tbl WHERE exists(select 1 from other_table where foo > bar)", }, { in: "SELECT * FROM tbl WHERE exists(select count(*) from other_table where foo > bar)", expected: "SELECT * FROM tbl WHERE true", }, { in: "SELECT * FROM tbl WHERE exists(select col1, col2, count(*) from other_table where foo > bar group by col1, col2 having count(*) > 3)", - expected: "SELECT * FROM tbl WHERE exists(select col1, col2, count(*) from other_table where foo > bar group by col1, col2 having count(*) > 3 limit 1)", + expected: "SELECT * FROM tbl WHERE exists(select col1, col2, count(*) from other_table where foo > bar group by col1, col2 having count(*) > 3)", }, { in: "SELECT id, name, salary FROM user_details", expected: "SELECT id, name, salary FROM (select user.id, user.name, user_extra.salary from user join user_extra where user.id = user_extra.user_id) as user_details", + }, { + in: "select max(distinct c1), min(distinct c2), avg(distinct c3), sum(distinct c4), count(distinct c5), group_concat(distinct c6) from tbl", + expected: "select max(c1) as `max(distinct c1)`, min(c2) as `min(distinct c2)`, avg(distinct c3), sum(distinct c4), count(distinct c5), group_concat(distinct c6) from tbl", }, { in: "SHOW VARIABLES", expected: "SHOW VARIABLES", @@ -301,6 +304,7 @@ func TestRewrites(in *testing.T) { version: true, versionComment: true, ddlStrategy: true, + migrationContext: true, sessionUUID: true, sessionEnableSystemSettings: true, rawGTID: true, @@ -320,6 +324,7 @@ func TestRewrites(in *testing.T) { version: true, versionComment: true, ddlStrategy: true, + migrationContext: true, sessionUUID: true, sessionEnableSystemSettings: true, rawGTID: true, @@ -364,6 +369,7 @@ func TestRewrites(in *testing.T) { assert.Equal(tc.workload, result.NeedsSysVar(sysvars.Workload.Name), "should need :__vtworkload") assert.Equal(tc.queryTimeout, result.NeedsSysVar(sysvars.QueryTimeout.Name), "should need :__vtquery_timeout") assert.Equal(tc.ddlStrategy, result.NeedsSysVar(sysvars.DDLStrategy.Name), "should need ddlStrategy") + assert.Equal(tc.migrationContext, result.NeedsSysVar(sysvars.MigrationContext.Name), "should need migrationContext") assert.Equal(tc.sessionUUID, result.NeedsSysVar(sysvars.SessionUUID.Name), "should need sessionUUID") assert.Equal(tc.sessionEnableSystemSettings, result.NeedsSysVar(sysvars.SessionEnableSystemSettings.Name), "should need sessionEnableSystemSettings") assert.Equal(tc.rawGTID, result.NeedsSysVar(sysvars.ReadAfterWriteGTID.Name), "should need rawGTID") @@ -512,8 +518,8 @@ func TestRewritesWithDefaultKeyspace(in *testing.T) { in: "SELECT 1 from (select 2 from test) t", expected: "SELECT 1 from (select 2 from sys.test) t", }, { - in: "SELECT 1 from test where exists (select 2 from test)", - expected: "SELECT 1 from sys.test where exists (select 1 from sys.test limit 1)", + in: "SELECT 1 from test where exists(select 2 from test)", + expected: "SELECT 1 from sys.test where exists(select 1 from sys.test)", }, { in: "SELECT 1 from dual", expected: "SELECT 1 from dual", diff --git a/go/vt/sqlparser/ast_test.go b/go/vt/sqlparser/ast_test.go index 3f0fb850857..97b93a80379 100644 --- a/go/vt/sqlparser/ast_test.go +++ b/go/vt/sqlparser/ast_test.go @@ -49,46 +49,35 @@ func TestAppend(t *testing.T) { } func TestSelect(t *testing.T) { - tree, err := Parse("select * from t where a = 1") + e1, err := ParseExpr("a = 1") require.NoError(t, err) - expr := tree.(*Select).Where.Expr - - sel := &Select{} - sel.AddWhere(expr) - buf := NewTrackedBuffer(nil) - sel.Where.Format(buf) - assert.Equal(t, " where a = 1", buf.String()) - sel.AddWhere(expr) - buf = NewTrackedBuffer(nil) - sel.Where.Format(buf) - assert.Equal(t, " where a = 1", buf.String()) - - sel = &Select{} - sel.AddHaving(expr) - buf = NewTrackedBuffer(nil) - sel.Having.Format(buf) - assert.Equal(t, " having a = 1", buf.String()) - - sel.AddHaving(expr) - buf = NewTrackedBuffer(nil) - sel.Having.Format(buf) - assert.Equal(t, " having a = 1", buf.String()) - - tree, err = Parse("select * from t where a = 1 or b = 1") + e2, err := ParseExpr("b = 2") require.NoError(t, err) - expr = tree.(*Select).Where.Expr - sel = &Select{} - sel.AddWhere(expr) - buf = NewTrackedBuffer(nil) - sel.Where.Format(buf) - assert.Equal(t, " where a = 1 or b = 1", buf.String()) + t.Run("single predicate where", func(t *testing.T) { + sel := &Select{} + sel.AddWhere(e1) + assert.Equal(t, " where a = 1", String(sel.Where)) + }) - sel = &Select{} - sel.AddHaving(expr) - buf = NewTrackedBuffer(nil) - sel.Having.Format(buf) - assert.Equal(t, " having a = 1 or b = 1", buf.String()) + t.Run("single predicate having", func(t *testing.T) { + sel := &Select{} + sel.AddHaving(e1) + assert.Equal(t, " having a = 1", String(sel.Having)) + }) + t.Run("double predicate where", func(t *testing.T) { + sel := &Select{} + sel.AddWhere(e1) + sel.AddWhere(e2) + assert.Equal(t, " where a = 1 and b = 2", String(sel.Where)) + }) + + t.Run("double predicate having", func(t *testing.T) { + sel := &Select{} + sel.AddHaving(e1) + sel.AddHaving(e2) + assert.Equal(t, " having a = 1 and b = 2", String(sel.Having)) + }) } func TestUpdate(t *testing.T) { @@ -261,7 +250,7 @@ func TestSetAutocommitON(t *testing.T) { t.Errorf("SET statement value is not StrVal: %T", v) } - if "on" != v.Val { + if v.Val != "on" { t.Errorf("SET statement value want: on, got: %s", v.Val) } default: @@ -286,7 +275,7 @@ func TestSetAutocommitON(t *testing.T) { t.Errorf("SET statement value is not StrVal: %T", v) } - if "on" != v.Val { + if v.Val != "on" { t.Errorf("SET statement value want: on, got: %s", v.Val) } default: @@ -313,7 +302,7 @@ func TestSetAutocommitOFF(t *testing.T) { t.Errorf("SET statement value is not StrVal: %T", v) } - if "off" != v.Val { + if v.Val != "off" { t.Errorf("SET statement value want: on, got: %s", v.Val) } default: @@ -338,7 +327,7 @@ func TestSetAutocommitOFF(t *testing.T) { t.Errorf("SET statement value is not StrVal: %T", v) } - if "off" != v.Val { + if v.Val != "off" { t.Errorf("SET statement value want: on, got: %s", v.Val) } default: @@ -362,23 +351,6 @@ func TestWhere(t *testing.T) { } } -func TestIsAggregate(t *testing.T) { - f := FuncExpr{Name: NewIdentifierCI("avg")} - if !f.IsAggregate() { - t.Error("IsAggregate: false, want true") - } - - f = FuncExpr{Name: NewIdentifierCI("Avg")} - if !f.IsAggregate() { - t.Error("IsAggregate: false, want true") - } - - f = FuncExpr{Name: NewIdentifierCI("foo")} - if f.IsAggregate() { - t.Error("IsAggregate: true, want false") - } -} - func TestIsImpossible(t *testing.T) { f := ComparisonExpr{ Operator: NotEqualOp, @@ -520,26 +492,21 @@ func TestReplaceExpr(t *testing.T) { }} to := NewArgument("a") for _, tcase := range tcases { - tree, err := Parse(tcase.in) - if err != nil { - t.Fatal(err) - } - var from *Subquery - _ = Walk(func(node SQLNode) (kontinue bool, err error) { - if sq, ok := node.(*Subquery); ok { - from = sq - return false, nil - } - return true, nil - }, tree) - if from == nil { - t.Fatalf("from is nil for %s", tcase.in) - } - expr := ReplaceExpr(tree.(*Select).Where.Expr, from, to) - got := String(expr) - if tcase.out != got { - t.Errorf("ReplaceExpr(%s): %s, want %s", tcase.in, got, tcase.out) - } + t.Run(tcase.in, func(t *testing.T) { + tree, err := Parse(tcase.in) + require.NoError(t, err) + var from *Subquery + _ = Walk(func(node SQLNode) (kontinue bool, err error) { + if sq, ok := node.(*Subquery); ok { + from = sq + return false, nil + } + return true, nil + }, tree) + require.NotNilf(t, from, "from is nil for %s", tcase.in) + expr := ReplaceExpr(tree.(*Select).Where.Expr, from, to) + assert.Equal(t, tcase.out, String(expr)) + }) } } @@ -754,7 +721,22 @@ func TestSplitStatementToPieces(t *testing.T) { "`createtime` datetime NOT NULL DEFAULT NOW() COMMENT 'create time;'," + "`comment` varchar(100) NOT NULL DEFAULT '' COMMENT 'comment'," + "PRIMARY KEY (`id`))", - }} + }, { + input: "create table t1 (id int primary key); create table t2 (id int primary key);", + output: "create table t1 (id int primary key); create table t2 (id int primary key)", + }, { + input: ";;; create table t1 (id int primary key);;; ;create table t2 (id int primary key);", + output: " create table t1 (id int primary key);create table t2 (id int primary key)", + }, { + // The input doesn't have to be valid SQL statements! + input: ";create table t1 ;create table t2 (id;", + output: "create table t1 ;create table t2 (id", + }, { + // Ignore quoted semicolon + input: ";create table t1 ';';;;create table t2 (id;", + output: "create table t1 ';';create table t2 (id", + }, + } for _, tcase := range testcases { t.Run(tcase.input, func(t *testing.T) { diff --git a/go/vt/sqlparser/ast_visit.go b/go/vt/sqlparser/ast_visit.go index 7a706a619a8..7e40eceba2f 100644 --- a/go/vt/sqlparser/ast_visit.go +++ b/go/vt/sqlparser/ast_visit.go @@ -52,8 +52,12 @@ func VisitSQLNode(in SQLNode, f Visit) error { return VisitRefOfAlterView(in, f) case *AlterVschema: return VisitRefOfAlterVschema(in, f) + case *Analyze: + return VisitRefOfAnalyze(in, f) case *AndExpr: return VisitRefOfAndExpr(in, f) + case *AnyValue: + return VisitRefOfAnyValue(in, f) case *Argument: return VisitRefOfArgument(in, f) case *ArgumentLessWindowExpr: @@ -128,10 +132,6 @@ func VisitSQLNode(in SQLNode, f Visit) error { return VisitRefOfCreateView(in, f) case *CurTimeFuncExpr: return VisitRefOfCurTimeFuncExpr(in, f) - case *DateAddExpr: - return VisitRefOfDateAddExpr(in, f) - case *DateSubExpr: - return VisitRefOfDateSubExpr(in, f) case *DeallocateStmt: return VisitRefOfDeallocateStmt(in, f) case *Default: @@ -166,8 +166,6 @@ func VisitSQLNode(in SQLNode, f Visit) error { return VisitRefOfExtractFuncExpr(in, f) case *ExtractValueExpr: return VisitRefOfExtractValueExpr(in, f) - case *ExtractedSubquery: - return VisitRefOfExtractedSubquery(in, f) case *FieldsClause: return VisitRefOfFieldsClause(in, f) case *FirstOrLastValueExpr: @@ -228,6 +226,8 @@ func VisitSQLNode(in SQLNode, f Visit) error { return VisitRefOfInsert(in, f) case *InsertExpr: return VisitRefOfInsertExpr(in, f) + case *IntervalDateExpr: + return VisitRefOfIntervalDateExpr(in, f) case *IntervalFuncExpr: return VisitRefOfIntervalFuncExpr(in, f) case *IntroducerExpr: @@ -288,6 +288,8 @@ func VisitSQLNode(in SQLNode, f Visit) error { return VisitRefOfJtOnResponse(in, f) case *KeyState: return VisitRefOfKeyState(in, f) + case *Kill: + return VisitRefOfKill(in, f) case *LagLeadExpr: return VisitRefOfLagLeadExpr(in, f) case *Limit: @@ -364,8 +366,6 @@ func VisitSQLNode(in SQLNode, f Visit) error { return VisitRefOfOrderByOption(in, f) case *OtherAdmin: return VisitRefOfOtherAdmin(in, f) - case *OtherRead: - return VisitRefOfOtherRead(in, f) case *OverClause: return VisitRefOfOverClause(in, f) case *ParenTableExpr: @@ -498,8 +498,8 @@ func VisitSQLNode(in SQLNode, f Visit) error { return VisitRefOfTableSpec(in, f) case *TablespaceOperation: return VisitRefOfTablespaceOperation(in, f) - case *TimestampFuncExpr: - return VisitRefOfTimestampFuncExpr(in, f) + case *TimestampDiffExpr: + return VisitRefOfTimestampDiffExpr(in, f) case *TrimFuncExpr: return VisitRefOfTrimFuncExpr(in, f) case *TruncateTable: @@ -790,6 +790,18 @@ func VisitRefOfAlterVschema(in *AlterVschema, f Visit) error { } return nil } +func VisitRefOfAnalyze(in *Analyze, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitTableName(in.Table, f); err != nil { + return err + } + return nil +} func VisitRefOfAndExpr(in *AndExpr, f Visit) error { if in == nil { return nil @@ -805,6 +817,18 @@ func VisitRefOfAndExpr(in *AndExpr, f Visit) error { } return nil } +func VisitRefOfAnyValue(in *AnyValue, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.Arg, f); err != nil { + return err + } + return nil +} func VisitRefOfArgument(in *Argument, f Visit) error { if in == nil { return nil @@ -1313,36 +1337,6 @@ func VisitRefOfCurTimeFuncExpr(in *CurTimeFuncExpr, f Visit) error { } return nil } -func VisitRefOfDateAddExpr(in *DateAddExpr, f Visit) error { - if in == nil { - return nil - } - if cont, err := f(in); err != nil || !cont { - return err - } - if err := VisitExpr(in.Date, f); err != nil { - return err - } - if err := VisitExpr(in.Expr, f); err != nil { - return err - } - return nil -} -func VisitRefOfDateSubExpr(in *DateSubExpr, f Visit) error { - if in == nil { - return nil - } - if cont, err := f(in); err != nil || !cont { - return err - } - if err := VisitExpr(in.Date, f); err != nil { - return err - } - if err := VisitExpr(in.Expr, f); err != nil { - return err - } - return nil -} func VisitRefOfDeallocateStmt(in *DeallocateStmt, f Visit) error { if in == nil { return nil @@ -1590,27 +1584,6 @@ func VisitRefOfExtractValueExpr(in *ExtractValueExpr, f Visit) error { } return nil } -func VisitRefOfExtractedSubquery(in *ExtractedSubquery, f Visit) error { - if in == nil { - return nil - } - if cont, err := f(in); err != nil || !cont { - return err - } - if err := VisitExpr(in.Original, f); err != nil { - return err - } - if err := VisitRefOfSubquery(in.Subquery, f); err != nil { - return err - } - if err := VisitExpr(in.OtherSide, f); err != nil { - return err - } - if err := VisitExpr(in.alternative, f); err != nil { - return err - } - return nil -} func VisitRefOfFieldsClause(in *FieldsClause, f Visit) error { if in == nil { return nil @@ -2064,6 +2037,21 @@ func VisitRefOfInsertExpr(in *InsertExpr, f Visit) error { } return nil } +func VisitRefOfIntervalDateExpr(in *IntervalDateExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.Date, f); err != nil { + return err + } + if err := VisitExpr(in.Interval, f); err != nil { + return err + } + return nil +} func VisitRefOfIntervalFuncExpr(in *IntervalFuncExpr, f Visit) error { if in == nil { return nil @@ -2519,6 +2507,15 @@ func VisitRefOfKeyState(in *KeyState, f Visit) error { } return nil } +func VisitRefOfKill(in *Kill, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + return nil +} func VisitRefOfLagLeadExpr(in *LagLeadExpr, f Visit) error { if in == nil { return nil @@ -3001,15 +2998,6 @@ func VisitRefOfOtherAdmin(in *OtherAdmin, f Visit) error { } return nil } -func VisitRefOfOtherRead(in *OtherRead, f Visit) error { - if in == nil { - return nil - } - if cont, err := f(in); err != nil || !cont { - return err - } - return nil -} func VisitRefOfOverClause(in *OverClause, f Visit) error { if in == nil { return nil @@ -3974,7 +3962,7 @@ func VisitRefOfTablespaceOperation(in *TablespaceOperation, f Visit) error { } return nil } -func VisitRefOfTimestampFuncExpr(in *TimestampFuncExpr, f Visit) error { +func VisitRefOfTimestampDiffExpr(in *TimestampDiffExpr, f Visit) error { if in == nil { return nil } @@ -4444,6 +4432,8 @@ func VisitAggrFunc(in AggrFunc, f Visit) error { return nil } switch in := in.(type) { + case *AnyValue: + return VisitRefOfAnyValue(in, f) case *Avg: return VisitRefOfAvg(in, f) case *BitAnd: @@ -4542,6 +4532,8 @@ func VisitCallable(in Callable, f Visit) error { return nil } switch in := in.(type) { + case *AnyValue: + return VisitRefOfAnyValue(in, f) case *ArgumentLessWindowExpr: return VisitRefOfArgumentLessWindowExpr(in, f) case *Avg: @@ -4558,10 +4550,6 @@ func VisitCallable(in Callable, f Visit) error { return VisitRefOfCountStar(in, f) case *CurTimeFuncExpr: return VisitRefOfCurTimeFuncExpr(in, f) - case *DateAddExpr: - return VisitRefOfDateAddExpr(in, f) - case *DateSubExpr: - return VisitRefOfDateSubExpr(in, f) case *ExtractFuncExpr: return VisitRefOfExtractFuncExpr(in, f) case *ExtractValueExpr: @@ -4596,6 +4584,8 @@ func VisitCallable(in Callable, f Visit) error { return VisitRefOfGroupConcatExpr(in, f) case *InsertExpr: return VisitRefOfInsertExpr(in, f) + case *IntervalDateExpr: + return VisitRefOfIntervalDateExpr(in, f) case *IntervalFuncExpr: return VisitRefOfIntervalFuncExpr(in, f) case *JSONArrayExpr: @@ -4688,8 +4678,8 @@ func VisitCallable(in Callable, f Visit) error { return VisitRefOfSubstrExpr(in, f) case *Sum: return VisitRefOfSum(in, f) - case *TimestampFuncExpr: - return VisitRefOfTimestampFuncExpr(in, f) + case *TimestampDiffExpr: + return VisitRefOfTimestampDiffExpr(in, f) case *TrimFuncExpr: return VisitRefOfTrimFuncExpr(in, f) case *UpdateXMLExpr: @@ -4796,6 +4786,8 @@ func VisitExpr(in Expr, f Visit) error { switch in := in.(type) { case *AndExpr: return VisitRefOfAndExpr(in, f) + case *AnyValue: + return VisitRefOfAnyValue(in, f) case *Argument: return VisitRefOfArgument(in, f) case *ArgumentLessWindowExpr: @@ -4838,10 +4830,6 @@ func VisitExpr(in Expr, f Visit) error { return VisitRefOfCountStar(in, f) case *CurTimeFuncExpr: return VisitRefOfCurTimeFuncExpr(in, f) - case *DateAddExpr: - return VisitRefOfDateAddExpr(in, f) - case *DateSubExpr: - return VisitRefOfDateSubExpr(in, f) case *Default: return VisitRefOfDefault(in, f) case *ExistsExpr: @@ -4850,8 +4838,6 @@ func VisitExpr(in Expr, f Visit) error { return VisitRefOfExtractFuncExpr(in, f) case *ExtractValueExpr: return VisitRefOfExtractValueExpr(in, f) - case *ExtractedSubquery: - return VisitRefOfExtractedSubquery(in, f) case *FirstOrLastValueExpr: return VisitRefOfFirstOrLastValueExpr(in, f) case *FuncExpr: @@ -4882,6 +4868,8 @@ func VisitExpr(in Expr, f Visit) error { return VisitRefOfGroupConcatExpr(in, f) case *InsertExpr: return VisitRefOfInsertExpr(in, f) + case *IntervalDateExpr: + return VisitRefOfIntervalDateExpr(in, f) case *IntervalFuncExpr: return VisitRefOfIntervalFuncExpr(in, f) case *IntroducerExpr: @@ -5002,8 +4990,8 @@ func VisitExpr(in Expr, f Visit) error { return VisitRefOfSubstrExpr(in, f) case *Sum: return VisitRefOfSum(in, f) - case *TimestampFuncExpr: - return VisitRefOfTimestampFuncExpr(in, f) + case *TimestampDiffExpr: + return VisitRefOfTimestampDiffExpr(in, f) case *TrimFuncExpr: return VisitRefOfTrimFuncExpr(in, f) case *UnaryExpr: @@ -5122,6 +5110,8 @@ func VisitStatement(in Statement, f Visit) error { return VisitRefOfAlterView(in, f) case *AlterVschema: return VisitRefOfAlterVschema(in, f) + case *Analyze: + return VisitRefOfAnalyze(in, f) case *Begin: return VisitRefOfBegin(in, f) case *CallProc: @@ -5156,6 +5146,8 @@ func VisitStatement(in Statement, f Visit) error { return VisitRefOfFlush(in, f) case *Insert: return VisitRefOfInsert(in, f) + case *Kill: + return VisitRefOfKill(in, f) case *Load: return VisitRefOfLoad(in, f) case *LoadDataStmt: @@ -5164,8 +5156,6 @@ func VisitStatement(in Statement, f Visit) error { return VisitRefOfLockTables(in, f) case *OtherAdmin: return VisitRefOfOtherAdmin(in, f) - case *OtherRead: - return VisitRefOfOtherRead(in, f) case *PrepareStmt: return VisitRefOfPrepareStmt(in, f) case *PurgeBinaryLogs: diff --git a/go/vt/sqlparser/cached_size.go b/go/vt/sqlparser/cached_size.go index 7e13ccde9ab..465c6d0be06 100644 --- a/go/vt/sqlparser/cached_size.go +++ b/go/vt/sqlparser/cached_size.go @@ -301,6 +301,18 @@ func (cached *AlterVschema) CachedSize(alloc bool) int64 { size += cached.AutoIncSpec.CachedSize(true) return size } +func (cached *Analyze) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field Table vitess.io/vitess/go/vt/sqlparser.TableName + size += cached.Table.CachedSize(false) + return size +} func (cached *AndExpr) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -319,6 +331,20 @@ func (cached *AndExpr) CachedSize(alloc bool) int64 { } return size } +func (cached *AnyValue) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(16) + } + // field Arg vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Arg.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} func (cached *Argument) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -635,7 +661,7 @@ func (cached *ColName) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(80) + size += int64(64) } // field Name vitess.io/vitess/go/vt/sqlparser.IdentifierCI size += cached.Name.CachedSize(false) @@ -1028,42 +1054,6 @@ func (cached *DatabaseOption) CachedSize(alloc bool) int64 { size += hack.RuntimeAllocSize(int64(len(cached.Value))) return size } -func (cached *DateAddExpr) CachedSize(alloc bool) int64 { - if cached == nil { - return int64(0) - } - size := int64(0) - if alloc { - size += int64(48) - } - // field Date vitess.io/vitess/go/vt/sqlparser.Expr - if cc, ok := cached.Date.(cachedObject); ok { - size += cc.CachedSize(true) - } - // field Expr vitess.io/vitess/go/vt/sqlparser.Expr - if cc, ok := cached.Expr.(cachedObject); ok { - size += cc.CachedSize(true) - } - return size -} -func (cached *DateSubExpr) CachedSize(alloc bool) int64 { - if cached == nil { - return int64(0) - } - size := int64(0) - if alloc { - size += int64(48) - } - // field Date vitess.io/vitess/go/vt/sqlparser.Expr - if cc, ok := cached.Date.(cachedObject); ok { - size += cc.CachedSize(true) - } - // field Expr vitess.io/vitess/go/vt/sqlparser.Expr - if cc, ok := cached.Expr.(cachedObject); ok { - size += cc.CachedSize(true) - } - return size -} func (cached *DeallocateStmt) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -1337,34 +1327,6 @@ func (cached *ExtractValueExpr) CachedSize(alloc bool) int64 { } return size } -func (cached *ExtractedSubquery) CachedSize(alloc bool) int64 { - if cached == nil { - return int64(0) - } - size := int64(0) - if alloc { - size += int64(112) - } - // field Original vitess.io/vitess/go/vt/sqlparser.Expr - if cc, ok := cached.Original.(cachedObject); ok { - size += cc.CachedSize(true) - } - // field Subquery *vitess.io/vitess/go/vt/sqlparser.Subquery - size += cached.Subquery.CachedSize(true) - // field OtherSide vitess.io/vitess/go/vt/sqlparser.Expr - if cc, ok := cached.OtherSide.(cachedObject); ok { - size += cc.CachedSize(true) - } - // field hasValuesArg string - size += hack.RuntimeAllocSize(int64(len(cached.hasValuesArg))) - // field argName string - size += hack.RuntimeAllocSize(int64(len(cached.argName))) - // field alternative vitess.io/vitess/go/vt/sqlparser.Expr - if cc, ok := cached.alternative.(cachedObject); ok { - size += cc.CachedSize(true) - } - return size -} func (cached *FirstOrLastValueExpr) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -1917,6 +1879,24 @@ func (cached *InsertExpr) CachedSize(alloc bool) int64 { } return size } +func (cached *IntervalDateExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field Date vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Date.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Interval vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Interval.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} func (cached *IntervalFuncExpr) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -2550,6 +2530,16 @@ func (cached *KeyState) CachedSize(alloc bool) int64 { } return size } +func (cached *Kill) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(16) + } + return size +} func (cached *LagLeadExpr) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -4133,16 +4123,14 @@ func (cached *TablespaceOperation) CachedSize(alloc bool) int64 { } return size } -func (cached *TimestampFuncExpr) CachedSize(alloc bool) int64 { +func (cached *TimestampDiffExpr) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) } size := int64(0) if alloc { - size += int64(64) + size += int64(48) } - // field Name string - size += hack.RuntimeAllocSize(int64(len(cached.Name))) // field Expr1 vitess.io/vitess/go/vt/sqlparser.Expr if cc, ok := cached.Expr1.(cachedObject); ok { size += cc.CachedSize(true) @@ -4151,8 +4139,6 @@ func (cached *TimestampFuncExpr) CachedSize(alloc bool) int64 { if cc, ok := cached.Expr2.(cachedObject); ok { size += cc.CachedSize(true) } - // field Unit string - size += hack.RuntimeAllocSize(int64(len(cached.Unit))) return size } func (cached *TrimFuncExpr) CachedSize(alloc bool) int64 { diff --git a/go/vt/sqlparser/comments.go b/go/vt/sqlparser/comments.go index 911498d8242..4ecf7b1b293 100644 --- a/go/vt/sqlparser/comments.go +++ b/go/vt/sqlparser/comments.go @@ -273,6 +273,13 @@ func (c *ParsedComments) Length() int { return len(c.comments) } +func (c *ParsedComments) GetComments() Comments { + if c != nil { + return c.comments + } + return nil +} + func (c *ParsedComments) Prepend(comment string) Comments { if c == nil { return Comments{comment} diff --git a/go/vt/sqlparser/constants.go b/go/vt/sqlparser/constants.go index aa99f18a502..1be3124aa24 100644 --- a/go/vt/sqlparser/constants.go +++ b/go/vt/sqlparser/constants.go @@ -16,6 +16,8 @@ limitations under the License. package sqlparser +import "vitess.io/vitess/go/mysql/datetime" + // String constants to be used in ast. const ( // Select.Distinct @@ -63,7 +65,9 @@ const ( AddColVindexStr = "on table add vindex" DropColVindexStr = "on table drop vindex" AddSequenceStr = "add sequence" + DropSequenceStr = "drop sequence" AddAutoIncStr = "add auto_increment" + DropAutoIncStr = "drop auto_increment" // ALTER TABLE ALGORITHM string. DefaultStr = "default" @@ -209,7 +213,7 @@ const ( Utf16Str = "_utf16" Utf16leStr = "_utf16le" Utf32Str = "_utf32" - Utf8Str = "_utf8" + Utf8mb3Str = "_utf8mb3" Utf8mb4Str = "_utf8mb4" NStringStr = "N" @@ -401,28 +405,6 @@ const ( DefaultTypeStr = "default" ExclusiveTypeStr = "exclusive" - // IntervalTypes strings - DayStr = "day" - WeekStr = "week" - MonthStr = "month" - YearStr = "year" - DayHourStr = "day_hour" - DayMicrosecondStr = "day_microsecond" - DayMinuteStr = "day_minute" - DaySecondStr = "day_second" - HourStr = "hour" - HourMicrosecondStr = "hour_microsecond" - HourMinuteStr = "hour_minute" - HourSecondStr = "hour_second" - MicrosecondStr = "microsecond" - MinuteStr = "minute" - MinuteMicrosecondStr = "minute_microsecond" - MinuteSecondStr = "minute_second" - QuarterStr = "quarter" - SecondStr = "second" - SecondMicrosecondStr = "second_microsecond" - YearMonthStr = "year_month" - // GeomeFromWktType strings GeometryFromTextStr = "st_geometryfromtext" GeometryCollectionFromTextStr = "st_geometrycollectionfromtext" @@ -483,6 +465,10 @@ const ( LatitudeFromHashStr = "st_latfromgeohash" LongitudeFromHashStr = "st_longfromgeohash" PointFromHashStr = "st_pointfromgeohash" + + // KillType strings + ConnectionStr = "connection" + QueryStr = "query" ) // Constants for Enum Type - Insert.Action @@ -505,7 +491,9 @@ const ( AddColVindexDDLAction DropColVindexDDLAction AddSequenceDDLAction + DropSequenceDDLAction AddAutoIncDDLAction + DropAutoIncDDLAction RevertDDLAction ) @@ -675,6 +663,38 @@ const ( NotRegexpOp ) +func Inverse(in ComparisonExprOperator) ComparisonExprOperator { + switch in { + case EqualOp: + return NotEqualOp + case LessThanOp: + return GreaterEqualOp + case GreaterThanOp: + return LessEqualOp + case LessEqualOp: + return GreaterThanOp + case GreaterEqualOp: + return LessThanOp + case NotEqualOp: + return EqualOp + case NullSafeEqualOp: + return NotEqualOp + case InOp: + return NotInOp + case NotInOp: + return InOp + case LikeOp: + return NotLikeOp + case NotLikeOp: + return LikeOp + case RegexpOp: + return NotRegexpOp + case NotRegexpOp: + return RegexpOp + } + panic("unreachable") +} + // Constant for Enum Type - IsExprOperator const ( IsNullOp IsExprOperator = iota @@ -912,46 +932,6 @@ const ( DefaultFormat ) -// Constants for Enum Type - DateAddExprType -const ( - AdddateType DateAddExprType = iota - DateAddType - PlusIntervalLeftType - PlusIntervalRightType -) - -// Constants for Enum Type - DateAddExprType -const ( - SubdateType DateSubExprType = iota - DateSubType - MinusIntervalRightType -) - -// IntervalTypes constants -const ( - IntervalUnknown IntervalTypes = iota - IntervalYear - IntervalQuarter - IntervalMonth - IntervalWeek - IntervalDay - IntervalHour - IntervalMinute - IntervalSecond - IntervalMicrosecond - IntervalYearMonth - IntervalDayHour - IntervalDayMinute - IntervalDaySecond - IntervalHourMinute - IntervalHourSecond - IntervalMinuteSecond - IntervalDayMicrosecond - IntervalHourMicrosecond - IntervalMinuteMicrosecond - IntervalSecondMicrosecond -) - // Transaction access mode const ( WithConsistentSnapshot TxAccessMode = iota @@ -1037,3 +1017,48 @@ const ( LongitudeFromHash PointFromHash ) + +// IntervalType constants +const ( + IntervalNone = datetime.IntervalNone + IntervalMicrosecond = datetime.IntervalMicrosecond + IntervalSecond = datetime.IntervalSecond + IntervalMinute = datetime.IntervalMinute + IntervalHour = datetime.IntervalHour + IntervalDay = datetime.IntervalDay + IntervalWeek = datetime.IntervalWeek + IntervalMonth = datetime.IntervalMonth + IntervalQuarter = datetime.IntervalQuarter + IntervalYear = datetime.IntervalYear + + IntervalSecondMicrosecond = datetime.IntervalSecondMicrosecond + IntervalMinuteMicrosecond = datetime.IntervalMinuteMicrosecond + IntervalMinuteSecond = datetime.IntervalMinuteSecond + IntervalHourMicrosecond = datetime.IntervalHourMicrosecond + IntervalHourSecond = datetime.IntervalHourSecond + IntervalHourMinute = datetime.IntervalHourMinute + IntervalDayMicrosecond = datetime.IntervalDayMicrosecond + IntervalDaySecond = datetime.IntervalDaySecond + IntervalDayMinute = datetime.IntervalDayMinute + IntervalDayHour = datetime.IntervalDayHour + IntervalYearMonth = datetime.IntervalYearMonth +) + +type IntervalExprSyntax int8 + +const ( + IntervalDateExprDateAdd IntervalExprSyntax = iota + IntervalDateExprDateSub + IntervalDateExprAdddate + IntervalDateExprSubdate + IntervalDateExprBinaryAdd + IntervalDateExprBinaryAddLeft + IntervalDateExprBinarySub + IntervalDateExprTimestampadd +) + +// Constant for Enum Type - KillType +const ( + ConnectionType KillType = iota + QueryType +) diff --git a/go/vt/sqlparser/keywords.go b/go/vt/sqlparser/keywords.go index 9169597f1e1..20adb161a16 100644 --- a/go/vt/sqlparser/keywords.go +++ b/go/vt/sqlparser/keywords.go @@ -126,6 +126,7 @@ var keywords = []keyword{ {"always", ALWAYS}, {"analyze", ANALYZE}, {"and", AND}, + {"any_value", ANY_VALUE}, {"array", ARRAY}, {"as", AS}, {"asc", ASC}, @@ -386,7 +387,7 @@ var keywords = []keyword{ {"keys", KEYS}, {"keyspaces", KEYSPACES}, {"key_block_size", KEY_BLOCK_SIZE}, - {"kill", UNUSED}, + {"kill", KILL}, {"lag", LAG}, {"language", LANGUAGE}, {"last", LAST}, @@ -588,6 +589,15 @@ var keywords = []keyword{ {"sql_calc_found_rows", SQL_CALC_FOUND_ROWS}, {"sql_no_cache", SQL_NO_CACHE}, {"sql_small_result", UNUSED}, + {"sql_tsi_day", SQL_TSI_DAY}, + {"sql_tsi_week", SQL_TSI_WEEK}, + {"sql_tsi_hour", SQL_TSI_HOUR}, + {"sql_tsi_minute", SQL_TSI_MINUTE}, + {"sql_tsi_month", SQL_TSI_MONTH}, + {"sql_tsi_quarter", SQL_TSI_QUARTER}, + {"sql_tsi_second", SQL_TSI_SECOND}, + {"sql_tsi_microsecond", SQL_TSI_MICROSECOND}, + {"sql_tsi_year", SQL_TSI_YEAR}, {"ssl", UNUSED}, {"start", START}, {"startpoint", ST_StartPoint}, diff --git a/go/vt/sqlparser/keywords_test.go b/go/vt/sqlparser/keywords_test.go index be7e5349318..0209ee20352 100644 --- a/go/vt/sqlparser/keywords_test.go +++ b/go/vt/sqlparser/keywords_test.go @@ -20,13 +20,11 @@ func TestKeywordTable(t *testing.T) { } var vitessReserved = map[string]bool{ - "ESCAPE": true, - "NEXT": true, - "OFF": true, - "SAVEPOINT": true, - "SQL_NO_CACHE": true, - "TIMESTAMPADD": true, - "TIMESTAMPDIFF": true, + "ESCAPE": true, + "NEXT": true, + "OFF": true, + "SAVEPOINT": true, + "SQL_NO_CACHE": true, } func TestCompatibility(t *testing.T) { diff --git a/go/vt/sqlparser/literal.go b/go/vt/sqlparser/literal.go new file mode 100644 index 00000000000..24613ff6e05 --- /dev/null +++ b/go/vt/sqlparser/literal.go @@ -0,0 +1,123 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "errors" + "fmt" + "math" + "math/big" + + "vitess.io/vitess/go/hack" + "vitess.io/vitess/go/mysql/datetime" + "vitess.io/vitess/go/mysql/decimal" + "vitess.io/vitess/go/mysql/fastparse" + "vitess.io/vitess/go/mysql/hex" + "vitess.io/vitess/go/sqltypes" +) + +func LiteralToValue(lit *Literal) (sqltypes.Value, error) { + switch lit.Type { + case IntVal: + uval, err := fastparse.ParseUint64(lit.Val, 10) + if err != nil { + if errors.Is(err, fastparse.ErrOverflow) { + return sqltypes.NewDecimal(lit.Val), nil + } + return sqltypes.Value{}, err + } + if uval <= math.MaxInt64 { + return sqltypes.NewInt64(int64(uval)), nil + } + return sqltypes.NewUint64(uval), nil + case FloatVal: + fval, err := fastparse.ParseFloat64(lit.Val) + if err != nil { + return sqltypes.Value{}, err + } + return sqltypes.NewFloat64(fval), nil + case DecimalVal: + dec, err := decimal.NewFromMySQL(lit.Bytes()) + if err != nil { + return sqltypes.Value{}, err + } + return sqltypes.NewDecimal(hack.String(dec.FormatMySQL(0))), nil + case StrVal: + return sqltypes.NewVarChar(lit.Val), nil + case HexNum: + b := lit.Bytes() + if b[0] != '0' || b[1] != 'x' { + return sqltypes.Value{}, fmt.Errorf("invalid hex literal: %v", lit.Val) + } + if len(lit.Val)%2 == 0 { + return parseHexLiteral(b[2:]) + } + // If the hex literal doesn't have an even amount of hex digits, we need + // to pad it with a '0' in the left. Instead of allocating a new slice + // for padding pad in-place by replacing the 'x' in the original slice with + // a '0', and clean it up after parsing. + b[1] = '0' + defer func() { + b[1] = 'x' + }() + return parseHexLiteral(b[1:]) + case HexVal: + return parseHexLiteral(lit.Bytes()) + case BitVal: + return parseBitLiteral(lit.Bytes()) + case DateVal: + d, ok := datetime.ParseDate(lit.Val) + if !ok { + return sqltypes.Value{}, fmt.Errorf("invalid time literal: %v", lit.Val) + } + buf := datetime.Date_YYYY_MM_DD.Format(datetime.DateTime{Date: d}, 0) + return sqltypes.NewDate(hack.String(buf)), nil + case TimeVal: + t, l, ok := datetime.ParseTime(lit.Val, -1) + if !ok { + return sqltypes.Value{}, fmt.Errorf("invalid time literal: %v", lit.Val) + } + buf := datetime.Time_hh_mm_ss.Format(datetime.DateTime{Time: t}, uint8(l)) + return sqltypes.NewTime(hack.String(buf)), nil + case TimestampVal: + dt, l, ok := datetime.ParseDateTime(lit.Val, -1) + if !ok { + return sqltypes.Value{}, fmt.Errorf("invalid time literal: %v", lit.Val) + } + buf := datetime.DateTime_YYYY_MM_DD_hh_mm_ss.Format(dt, uint8(l)) + return sqltypes.NewDatetime(hack.String(buf)), nil + default: + return sqltypes.Value{}, fmt.Errorf("unsupported literal type: %v", lit.Type) + } +} + +func parseHexLiteral(val []byte) (sqltypes.Value, error) { + raw := make([]byte, hex.DecodedLen(val)) + if err := hex.DecodeBytes(raw, val); err != nil { + return sqltypes.Value{}, err + } + return sqltypes.NewVarBinary(hack.String(raw)), nil +} + +func parseBitLiteral(val []byte) (sqltypes.Value, error) { + var i big.Int + _, ok := i.SetString(string(val), 2) + if !ok { + return sqltypes.Value{}, fmt.Errorf("invalid bit literal: %v", val) + } + return sqltypes.NewVarBinary(hack.String(i.Bytes())), nil +} diff --git a/go/vt/sqlparser/normalizer.go b/go/vt/sqlparser/normalizer.go index 299f58e016d..a9d70d5e190 100644 --- a/go/vt/sqlparser/normalizer.go +++ b/go/vt/sqlparser/normalizer.go @@ -80,7 +80,7 @@ func (nz *normalizer) walkStatementUp(cursor *Cursor) bool { func (nz *normalizer) walkStatementDown(node, parent SQLNode) bool { switch node := node.(type) { // no need to normalize the statement types - case *Set, *Show, *Begin, *Commit, *Rollback, *Savepoint, DDLStatement, *SRollback, *Release, *OtherAdmin, *OtherRead: + case *Set, *Show, *Begin, *Commit, *Rollback, *Savepoint, DDLStatement, *SRollback, *Release, *OtherAdmin, *Analyze: return false case *Select: _, isDerived := parent.(*DerivedTable) diff --git a/go/vt/sqlparser/normalizer_test.go b/go/vt/sqlparser/normalizer_test.go index 8e40dfe9f1a..2b0a4b52122 100644 --- a/go/vt/sqlparser/normalizer_test.go +++ b/go/vt/sqlparser/normalizer_test.go @@ -436,6 +436,34 @@ func TestNormalizeValidSQL(t *testing.T) { } } +func TestNormalizeOneCasae(t *testing.T) { + testOne := struct { + input, output string + }{ + input: "", + output: "", + } + if testOne.input == "" { + t.Skip("empty test case") + } + tree, err := Parse(testOne.input) + require.NoError(t, err, testOne.input) + // Skip the test for the queries that do not run the normalizer + if !CanNormalize(tree) { + return + } + bv := make(map[string]*querypb.BindVariable) + known := make(BindVars) + err = Normalize(tree, NewReservedVars("vtg", known), bv) + require.NoError(t, err) + normalizerOutput := String(tree) + if normalizerOutput == "otheradmin" || normalizerOutput == "otherread" { + return + } + _, err = Parse(normalizerOutput) + require.NoError(t, err, normalizerOutput) +} + func TestGetBindVars(t *testing.T) { stmt, err := Parse("select * from t where :v1 = :v2 and :v2 = :v3 and :v4 in ::v5") if err != nil { diff --git a/go/vt/sqlparser/parse_next_test.go b/go/vt/sqlparser/parse_next_test.go index 149dc0bb067..2e55fbb8a9a 100644 --- a/go/vt/sqlparser/parse_next_test.go +++ b/go/vt/sqlparser/parse_next_test.go @@ -36,8 +36,7 @@ func TestParseNextValid(t *testing.T) { } tokens := NewStringTokenizer(sql.String()) - for i, tcase := range validSQL { - input := tcase.input + ";" + for _, tcase := range validSQL { want := tcase.output if want == "" { want = tcase.input @@ -45,16 +44,12 @@ func TestParseNextValid(t *testing.T) { tree, err := ParseNext(tokens) require.NoError(t, err) - - if got := String(tree); got != want { - t.Fatalf("[%d] ParseNext(%q) = %q, want %q", i, input, got, want) - } + require.Equal(t, want, String(tree)) } // Read once more and it should be EOF. - if tree, err := ParseNext(tokens); err != io.EOF { - t.Errorf("ParseNext(tokens) = (%q, %v) want io.EOF", String(tree), err) - } + tree, err := ParseNext(tokens) + require.ErrorIsf(t, err, io.EOF, "ParseNext(tokens) = (%q, %v) want io.EOF", String(tree), err) } func TestIgnoreSpecialComments(t *testing.T) { diff --git a/go/vt/sqlparser/parse_test.go b/go/vt/sqlparser/parse_test.go index fb621b5bdf7..cb8e9bb3807 100644 --- a/go/vt/sqlparser/parse_test.go +++ b/go/vt/sqlparser/parse_test.go @@ -44,17 +44,25 @@ var ( partialDDL bool ignoreNormalizerTest bool }{{ + input: "select * from foo limit 5 + 5", + }, { input: "create table x(location GEOMETRYCOLLECTION DEFAULT (POINT(7.0, 3.0)))", output: "create table x (\n\tlocation GEOMETRYCOLLECTION default (point(7.0, 3.0))\n)", }, { input: "create table t (id int primary key, dt datetime DEFAULT (CURRENT_TIMESTAMP))", - output: "create table t (\n\tid int primary key,\n\tdt datetime default current_timestamp()\n)", + output: "create table t (\n\tid int primary key,\n\tdt datetime default (current_timestamp())\n)", }, { input: "create table t (id int primary key, dt datetime DEFAULT now())", output: "create table t (\n\tid int primary key,\n\tdt datetime default now()\n)", }, { input: "create table t (id int primary key, dt datetime DEFAULT (now()))", - output: "create table t (\n\tid int primary key,\n\tdt datetime default now()\n)", + output: "create table t (\n\tid int primary key,\n\tdt datetime default (now())\n)", + }, { + input: "create table t (id int primary key, dt datetime(6) DEFAULT (now()))", + output: "create table t (\n\tid int primary key,\n\tdt datetime(6) default (now())\n)", + }, { + input: "create table t (id int primary key, dt datetime DEFAULT (now() + 1))", + output: "create table t (\n\tid int primary key,\n\tdt datetime default (now() + 1)\n)", }, { input: "create table x (e enum('red','yellow') null collate 'utf8_bin')", output: "create table x (\n\te enum('red', 'yellow') collate 'utf8_bin' null\n)", @@ -93,52 +101,52 @@ var ( output: "select extract(microsecond from '2003-01-02 10:30:00.000123') from dual", }, { input: "CREATE TABLE t2 (b BLOB DEFAULT 'abc')", - output: "create table t2 (\n\tb BLOB default ('abc')\n)", + output: "create table t2 (\n\tb BLOB default 'abc'\n)", }, { input: "CREATE TABLE t2 (b blob DEFAULT 'abc')", - output: "create table t2 (\n\tb blob default ('abc')\n)", + output: "create table t2 (\n\tb blob default 'abc'\n)", }, { input: "CREATE TABLE t2 (b BLOB DEFAULT ('abc'))", output: "create table t2 (\n\tb BLOB default ('abc')\n)", }, { input: "CREATE TABLE t2 (b TINYBLOB DEFAULT 'abc')", - output: "create table t2 (\n\tb TINYBLOB default ('abc')\n)", + output: "create table t2 (\n\tb TINYBLOB default 'abc'\n)", }, { input: "CREATE TABLE t2 (b TINYBLOB DEFAULT ('abc'))", output: "create table t2 (\n\tb TINYBLOB default ('abc')\n)", }, { input: "CREATE TABLE t2 (b MEDIUMBLOB DEFAULT 'abc')", - output: "create table t2 (\n\tb MEDIUMBLOB default ('abc')\n)", + output: "create table t2 (\n\tb MEDIUMBLOB default 'abc'\n)", }, { input: "CREATE TABLE t2 (b MEDIUMBLOB DEFAULT ('abc'))", output: "create table t2 (\n\tb MEDIUMBLOB default ('abc')\n)", }, { input: "CREATE TABLE t2 (b LONGBLOB DEFAULT 'abc')", - output: "create table t2 (\n\tb LONGBLOB default ('abc')\n)", + output: "create table t2 (\n\tb LONGBLOB default 'abc'\n)", }, { input: "CREATE TABLE t2 (b LONGBLOB DEFAULT ('abc'))", output: "create table t2 (\n\tb LONGBLOB default ('abc')\n)", }, { input: "CREATE TABLE t2 (b TEXT DEFAULT 'abc')", - output: "create table t2 (\n\tb TEXT default ('abc')\n)", + output: "create table t2 (\n\tb TEXT default 'abc'\n)", }, { input: "CREATE TABLE t2 (b TEXT DEFAULT ('abc'))", output: "create table t2 (\n\tb TEXT default ('abc')\n)", }, { input: "CREATE TABLE t2 (b TINYTEXT DEFAULT 'abc')", - output: "create table t2 (\n\tb TINYTEXT default ('abc')\n)", + output: "create table t2 (\n\tb TINYTEXT default 'abc'\n)", }, { input: "CREATE TABLE t2 (b TINYTEXT DEFAULT ('abc'))", output: "create table t2 (\n\tb TINYTEXT default ('abc')\n)", }, { input: "CREATE TABLE t2 (b MEDIUMTEXT DEFAULT 'abc')", - output: "create table t2 (\n\tb MEDIUMTEXT default ('abc')\n)", + output: "create table t2 (\n\tb MEDIUMTEXT default 'abc'\n)", }, { input: "CREATE TABLE t2 (b MEDIUMTEXT DEFAULT ('abc'))", output: "create table t2 (\n\tb MEDIUMTEXT default ('abc')\n)", }, { input: "CREATE TABLE t2 (b LONGTEXT DEFAULT 'abc')", - output: "create table t2 (\n\tb LONGTEXT default ('abc')\n)", + output: "create table t2 (\n\tb LONGTEXT default 'abc'\n)", }, { input: "CREATE TABLE t2 (b LONGTEXT DEFAULT ('abc'))", output: "create table t2 (\n\tb LONGTEXT default ('abc')\n)", @@ -147,16 +155,16 @@ var ( output: "create table t2 (\n\tb JSON default null\n)", }, { input: "CREATE TABLE t2 (b JSON DEFAULT (null))", - output: "create table t2 (\n\tb JSON default null\n)", + output: "create table t2 (\n\tb JSON default (null)\n)", }, { input: "CREATE TABLE t2 (b JSON DEFAULT '{name:abc}')", - output: "create table t2 (\n\tb JSON default ('{name:abc}')\n)", + output: "create table t2 (\n\tb JSON default '{name:abc}'\n)", }, { input: "CREATE TABLE t2 (b JSON DEFAULT ('{name:abc}'))", output: "create table t2 (\n\tb JSON default ('{name:abc}')\n)", }, { input: "create table x(location POINT DEFAULT 7.0)", - output: "create table x (\n\tlocation POINT default (7.0)\n)", + output: "create table x (\n\tlocation POINT default 7.0\n)", }, { input: "create table x(location POINT DEFAULT (7.0))", output: "create table x (\n\tlocation POINT default (7.0)\n)", @@ -1160,10 +1168,10 @@ var ( input: "select /* interval keyword */ adddate('2008-01-02', interval 1 year) from t", }, { input: "select /* TIMESTAMPADD */ TIMESTAMPADD(MINUTE, 1, '2008-01-04') from t", - output: "select /* TIMESTAMPADD */ timestampadd(MINUTE, 1, '2008-01-04') from t", + output: "select /* TIMESTAMPADD */ timestampadd(minute, 1, '2008-01-04') from t", }, { input: "select /* TIMESTAMPDIFF */ TIMESTAMPDIFF(MINUTE, '2008-01-02', '2008-01-04') from t", - output: "select /* TIMESTAMPDIFF */ timestampdiff(MINUTE, '2008-01-02', '2008-01-04') from t", + output: "select /* TIMESTAMPDIFF */ timestampdiff(minute, '2008-01-02', '2008-01-04') from t", }, { input: "select DATE_ADD(MIN(FROM_UNIXTIME(1673444922)),interval -DAYOFWEEK(MIN(FROM_UNIXTIME(1673444922)))+1 DAY)", output: "select date_add(min(FROM_UNIXTIME(1673444922)), interval -DAYOFWEEK(min(FROM_UNIXTIME(1673444922))) + 1 day) from dual", @@ -2103,8 +2111,12 @@ var ( input: "drop index `PRIMARY` on a lock none", output: "alter table a drop primary key, lock none", }, { - input: "analyze table a", - output: "otherread", + input: "analyze table a", + }, { + input: "analyze NO_WRITE_TO_BINLOG table a", + output: "analyze local table a", + }, { + input: "analyze local table a", }, { input: "flush tables", }, { @@ -2599,7 +2611,8 @@ var ( }, { input: "select 1 from t where foo = _binary 'bar'", }, { - input: "select 1 from t where foo = _utf8 'bar' and bar = _latin1 'sjösjuk'", + input: "select 1 from t where foo = _utf8 'bar' and bar = _latin1 'sjösjuk'", + output: "select 1 from t where foo = _utf8mb3 'bar' and bar = _latin1 'sjösjuk'", }, { input: "select 1 from t where foo = _binary'bar'", output: "select 1 from t where foo = _binary 'bar'", @@ -2610,10 +2623,10 @@ var ( output: "select 1 from t where foo = _utf8mb4 'bar'", }, { input: "select 1 from t where foo = _utf8mb3 'bar'", - output: "select 1 from t where foo = _utf8 'bar'", + output: "select 1 from t where foo = _utf8mb3 'bar'", }, { - input: "select 1 from t where foo = _utf8mb3'bar'", - output: "select 1 from t where foo = _utf8 'bar'", + input: "select 1 from t where foo = _utf8'bar'", + output: "select 1 from t where foo = _utf8mb3 'bar'", }, { input: "select match(a) against ('foo') from t", }, { @@ -2641,6 +2654,8 @@ var ( }, { input: "select name, group_concat(distinct id, score order by id desc separator ':' limit 10, 2) from t group by name", output: "select `name`, group_concat(distinct id, score order by id desc separator ':' limit 10, 2) from t group by `name`", + }, { + input: "select foo, any_value(id) from tbl group by foo", }, { input: "select * from t partition (p0)", }, { @@ -3647,6 +3662,13 @@ var ( }, { input: `select * from t1 where col1 like 'ks\_' and col2 = 'ks\_' and col1 like 'ks_' and col2 = 'ks_'`, output: `select * from t1 where col1 like 'ks\_' and col2 = 'ks\_' and col1 like 'ks_' and col2 = 'ks_'`, + }, { + input: `kill connection 18446744073709551615`, + }, { + input: `kill query 18446744073709551615`, + }, { + input: `kill 18446744073709551615`, + output: `kill connection 18446744073709551615`, }} ) @@ -4027,13 +4049,13 @@ func TestIntroducers(t *testing.T) { output: "select _utf32 'x' from dual", }, { input: "select _utf8 'x'", - output: "select _utf8 'x' from dual", + output: "select _utf8mb3 'x' from dual", }, { input: "select _utf8mb4 'x'", output: "select _utf8mb4 'x' from dual", }, { input: "select _utf8mb3 'x'", - output: "select _utf8 'x' from dual", + output: "select _utf8mb3 'x' from dual", }} for _, tcase := range validSQL { t.Run(tcase.input, func(t *testing.T) { @@ -4991,7 +5013,7 @@ func TestCreateTable(t *testing.T) { output: `create table t ( time1 timestamp default now(), time2 timestamp default now(), - time3 timestamp default now(), + time3 timestamp default (now()), time4 timestamp default now() on update now(), time5 timestamp default now() on update now(), time6 timestamp(3) default now(3) on update now(3) @@ -6177,7 +6199,7 @@ func testFile(t *testing.T, filename, tempDir string) { if fail && tempDir != "" { gotFile := fmt.Sprintf("%s/%s", tempDir, filename) _ = os.WriteFile(gotFile, []byte(strings.TrimSpace(expected.String())+"\n"), 0644) - fmt.Println(fmt.Sprintf("Errors found in parse tests. If the output is correct, run `cp %s/* testdata/` to update test expectations", tempDir)) // nolint + fmt.Printf("Errors found in parse tests. If the output is correct, run `cp %s/* testdata/` to update test expectations\n", tempDir) } }) } diff --git a/go/vt/sqlparser/parsed_query.go b/go/vt/sqlparser/parsed_query.go index 1c9ad47010f..b6b03a1901a 100644 --- a/go/vt/sqlparser/parsed_query.go +++ b/go/vt/sqlparser/parsed_query.go @@ -129,15 +129,16 @@ func (pq *ParsedQuery) AppendFromRow(buf *bytes2.Buffer, fields []*querypb.Field case querypb.Type_TUPLE: return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected Type_TUPLE for value %d", i) case querypb.Type_JSON: - buf2 := sqltypes.NullBytes - if col.length >= 0 { - buf2 = row.Values[col.offset : col.offset+col.length] - } - vv, err := vjson.MarshalSQLValue(buf2) - if err != nil { - return err + if col.length < 0 { // An SQL NULL and not an actual JSON value + buf.WriteString(sqltypes.NullStr) + } else { // A JSON value (which may be a JSON null literal value) + buf2 := row.Values[col.offset : col.offset+col.length] + vv, err := vjson.MarshalSQLValue(buf2) + if err != nil { + return err + } + buf.WriteString(vv.RawStr()) } - buf.WriteString(vv.RawStr()) default: if col.length < 0 { // -1 means a null variable; serialize it directly diff --git a/go/vt/sqlparser/precedence.go b/go/vt/sqlparser/precedence.go index cadf0d38261..ec590b23f95 100644 --- a/go/vt/sqlparser/precedence.go +++ b/go/vt/sqlparser/precedence.go @@ -86,8 +86,6 @@ func precedenceFor(in Expr) Precendence { case BangOp: return P3 } - case *ExtractedSubquery: - return precedenceFor(node.alternative) } return Syntactic diff --git a/go/vt/sqlparser/precedence_test.go b/go/vt/sqlparser/precedence_test.go index 215c9480823..a6cbffee351 100644 --- a/go/vt/sqlparser/precedence_test.go +++ b/go/vt/sqlparser/precedence_test.go @@ -18,10 +18,10 @@ package sqlparser import ( "fmt" + "math/rand" "testing" "time" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -66,49 +66,6 @@ func TestAndOrPrecedence(t *testing.T) { } } -func TestNotInSubqueryPrecedence(t *testing.T) { - tree, err := Parse("select * from a where not id in (select 42)") - require.NoError(t, err) - not := tree.(*Select).Where.Expr.(*NotExpr) - cmp := not.Expr.(*ComparisonExpr) - subq := cmp.Right.(*Subquery) - - extracted := &ExtractedSubquery{ - Original: cmp, - OpCode: 1, - Subquery: subq, - OtherSide: cmp.Left, - } - extracted.SetArgName("arg1") - extracted.SetHasValuesArg("has_values1") - - not.Expr = extracted - output := readable(not) - assert.Equal(t, "not (:has_values1 = 1 and id in ::arg1)", output) -} - -func TestSubqueryPrecedence(t *testing.T) { - tree, err := Parse("select * from a where id in (select 42) and false") - require.NoError(t, err) - where := tree.(*Select).Where - andExpr := where.Expr.(*AndExpr) - cmp := andExpr.Left.(*ComparisonExpr) - subq := cmp.Right.(*Subquery) - - extracted := &ExtractedSubquery{ - Original: andExpr.Left, - OpCode: 1, - Subquery: subq, - OtherSide: cmp.Left, - } - extracted.SetArgName("arg1") - extracted.SetHasValuesArg("has_values1") - - andExpr.Left = extracted - output := readable(extracted) - assert.Equal(t, ":has_values1 = 1 and id in ::arg1", output) -} - func TestPlusStarPrecedence(t *testing.T) { validSQL := []struct { input string @@ -215,8 +172,9 @@ func TestRandom(t *testing.T) { // The purpose of this test is to find discrepancies between Format and parsing. If for example our precedence rules are not consistent between the two, this test should find it. // The idea is to generate random queries, and pass them through the parser and then the unparser, and one more time. The result of the first unparse should be the same as the second result. seed := time.Now().UnixNano() - fmt.Println(fmt.Sprintf("seed is %d", seed)) // nolint - g := newGenerator(seed, 5) + r := rand.New(rand.NewSource(seed)) + fmt.Printf("seed is %d\n", seed) + g := NewGenerator(r, 5) endBy := time.Now().Add(1 * time.Second) for { @@ -224,7 +182,7 @@ func TestRandom(t *testing.T) { break } // Given a random expression - randomExpr := g.expression() + randomExpr := g.Expression(ExprGeneratorConfig{}) inputQ := "select " + String(randomExpr) + " from t" // When it's parsed and unparsed diff --git a/go/vt/sqlparser/predicate_rewriting.go b/go/vt/sqlparser/predicate_rewriting.go index 0348f95f115..40e9a953f57 100644 --- a/go/vt/sqlparser/predicate_rewriting.go +++ b/go/vt/sqlparser/predicate_rewriting.go @@ -43,10 +43,6 @@ func RewritePredicate(ast SQLNode) SQLNode { exprChanged = true cursor.Replace(rewritten) } - - if col, isCol := cursor.node.(*ColName); isCol { - col.Metadata = nil - } return !exprChanged }) diff --git a/go/vt/sqlparser/random_expr.go b/go/vt/sqlparser/random_expr.go index e2725f37a37..6eed8145ed2 100644 --- a/go/vt/sqlparser/random_expr.go +++ b/go/vt/sqlparser/random_expr.go @@ -23,34 +23,124 @@ import ( // This file is used to generate random expressions to be used for testing -func newGenerator(seed int64, maxDepth int) *generator { - g := generator{ - seed: seed, - r: rand.New(rand.NewSource(seed)), - maxDepth: maxDepth, +// Constants for Enum Type - AggregateRule +const ( + CannotAggregate AggregateRule = iota + CanAggregate + IsAggregate +) + +type ( + ExprGenerator interface { + Generate(r *rand.Rand, config ExprGeneratorConfig) Expr } - return &g + + QueryGenerator interface { + IsQueryGenerator() + ExprGenerator + } + + AggregateRule int8 + + ExprGeneratorConfig struct { + // AggrRule determines if the random expression can, cannot, or must be an aggregation expression + AggrRule AggregateRule + Type string + // MaxCols = 0 indicates no limit + NumCols int + // SingleRow indicates that the query must have at most one row + SingleRow bool + } + + Generator struct { + r *rand.Rand + depth int + maxDepth int + isAggregate bool + exprGenerators []ExprGenerator + } +) + +func NewExprGeneratorConfig(aggrRule AggregateRule, typ string, numCols int, singleRow bool) ExprGeneratorConfig { + return ExprGeneratorConfig{ + AggrRule: aggrRule, + Type: typ, + NumCols: numCols, + SingleRow: singleRow, + } +} + +func (egc ExprGeneratorConfig) SingleRowConfig() ExprGeneratorConfig { + egc.SingleRow = true + return egc +} + +func (egc ExprGeneratorConfig) MultiRowConfig() ExprGeneratorConfig { + egc.SingleRow = false + return egc +} + +func (egc ExprGeneratorConfig) SetNumCols(numCols int) ExprGeneratorConfig { + egc.NumCols = numCols + return egc +} + +func (egc ExprGeneratorConfig) boolTypeConfig() ExprGeneratorConfig { + egc.Type = "tinyint" + return egc +} + +func (egc ExprGeneratorConfig) intTypeConfig() ExprGeneratorConfig { + egc.Type = "bigint" + return egc +} + +func (egc ExprGeneratorConfig) stringTypeConfig() ExprGeneratorConfig { + egc.Type = "varchar" + return egc +} + +func (egc ExprGeneratorConfig) anyTypeConfig() ExprGeneratorConfig { + egc.Type = "" + return egc +} + +func (egc ExprGeneratorConfig) CannotAggregateConfig() ExprGeneratorConfig { + egc.AggrRule = CannotAggregate + return egc +} + +func (egc ExprGeneratorConfig) CanAggregateConfig() ExprGeneratorConfig { + egc.AggrRule = CanAggregate + return egc } -type generator struct { - seed int64 - r *rand.Rand - depth int - maxDepth int +func (egc ExprGeneratorConfig) IsAggregateConfig() ExprGeneratorConfig { + egc.AggrRule = IsAggregate + return egc +} + +func NewGenerator(r *rand.Rand, maxDepth int, exprGenerators ...ExprGenerator) *Generator { + g := Generator{ + r: r, + maxDepth: maxDepth, + exprGenerators: exprGenerators, + } + return &g } // enter should be called whenever we are producing an intermediate node. it should be followed by a `defer g.exit()` -func (g *generator) enter() { +func (g *Generator) enter() { g.depth++ } // exit should be called when exiting an intermediate node -func (g *generator) exit() { +func (g *Generator) exit() { g.depth-- } // atMaxDepth returns true if we have reached the maximum allowed depth or the expression tree -func (g *generator) atMaxDepth() bool { +func (g *Generator) atMaxDepth() bool { return g.depth >= g.maxDepth } @@ -58,147 +148,299 @@ func (g *generator) atMaxDepth() bool { Creates a random expression. It builds an expression tree using the following constructs: - true/false - AND/OR/NOT - - string literalrs, numeric literals (-/+ 1000) + - string literals, numeric literals (-/+ 1000) + - columns of types bigint and varchar + - scalar and tuple subqueries - =, >, <, >=, <=, <=>, != - &, |, ^, +, -, *, /, div, %, <<, >> - IN, BETWEEN and CASE - IS NULL, IS NOT NULL, IS TRUE, IS NOT TRUE, IS FALSE, IS NOT FALSE + Returns the random expression (Expr) and its type (string) Note: It's important to update this method so that it produces all expressions that need precedence checking. It's currently missing function calls and string operators */ -func (g *generator) expression() Expr { - if g.randomBool() { - return g.booleanExpr() +func (g *Generator) Expression(genConfig ExprGeneratorConfig) Expr { + var options []exprF + // this will only be used for tuple expressions, everything else will need genConfig.NumCols = 1 + numCols := genConfig.NumCols + genConfig = genConfig.SetNumCols(1) + + switch genConfig.Type { + case "bigint": + options = append(options, func() Expr { return g.intExpr(genConfig) }) + case "varchar": + options = append(options, func() Expr { return g.stringExpr(genConfig) }) + case "tinyint": + options = append(options, func() Expr { return g.booleanExpr(genConfig) }) + case "": + options = append(options, []exprF{ + func() Expr { return g.intExpr(genConfig) }, + func() Expr { return g.stringExpr(genConfig) }, + func() Expr { return g.booleanExpr(genConfig) }, + }...) + } + + for i := range g.exprGenerators { + generator := g.exprGenerators[i] + if generator == nil { + continue + } + + // don't create expressions from the expression exprGenerators if we haven't created an aggregation yet + if _, ok := generator.(QueryGenerator); ok || genConfig.AggrRule != IsAggregate { + options = append(options, func() Expr { + expr := generator.Generate(g.r, genConfig) + if expr == nil { + return g.randomLiteral() + } + return expr + }) + } + } + + if genConfig.AggrRule != CannotAggregate { + options = append(options, func() Expr { + g.isAggregate = true + return g.randomAggregate(genConfig.CannotAggregateConfig()) + }) + } + + // if an arbitrary number of columns may be generated, randomly choose 1-3 columns + if numCols == 0 { + numCols = g.r.Intn(3) + 1 + } + + if numCols == 1 { + return g.makeAggregateIfNecessary(genConfig, g.randomOf(options)) + } + + // with 1/5 probability choose a tuple subquery + if g.randomBool(0.2) { + return g.subqueryExpr(genConfig.SetNumCols(numCols)) + } + + tuple := ValTuple{} + for i := 0; i < numCols; i++ { + tuple = append(tuple, g.makeAggregateIfNecessary(genConfig, g.randomOf(options))) + } + + return tuple +} + +// makeAggregateIfNecessary is a failsafe to make sure an IsAggregate expression is in fact an aggregation +func (g *Generator) makeAggregateIfNecessary(genConfig ExprGeneratorConfig, expr Expr) Expr { + // if the generated expression must be an aggregate, and it is not, + // tack on an extra "and count(*)" to make it aggregate + if genConfig.AggrRule == IsAggregate && !g.isAggregate && g.depth == 0 { + expr = &AndExpr{ + Left: expr, + Right: &CountStar{}, + } + g.isAggregate = true } + + return expr +} + +func (g *Generator) randomAggregate(genConfig ExprGeneratorConfig) Expr { + isDistinct := g.r.Intn(10) < 1 + options := []exprF{ - func() Expr { return g.intExpr() }, - func() Expr { return g.stringExpr() }, - func() Expr { return g.booleanExpr() }, + func() Expr { return &CountStar{} }, + func() Expr { return &Count{Args: Exprs{g.Expression(genConfig.anyTypeConfig())}, Distinct: isDistinct} }, + func() Expr { return &Sum{Arg: g.Expression(genConfig), Distinct: isDistinct} }, + func() Expr { return &Min{Arg: g.Expression(genConfig), Distinct: isDistinct} }, + func() Expr { return &Max{Arg: g.Expression(genConfig), Distinct: isDistinct} }, } + g.isAggregate = true return g.randomOf(options) } -func (g *generator) booleanExpr() Expr { +func (g *Generator) booleanExpr(genConfig ExprGeneratorConfig) Expr { if g.atMaxDepth() { return g.booleanLiteral() } + genConfig = genConfig.boolTypeConfig() + options := []exprF{ - func() Expr { return g.andExpr() }, - func() Expr { return g.xorExpr() }, - func() Expr { return g.orExpr() }, - func() Expr { return g.comparison(g.intExpr) }, - func() Expr { return g.comparison(g.stringExpr) }, - //func() Expr { return g.comparison(g.booleanExpr) }, // this is not accepted by the parser - func() Expr { return g.inExpr() }, - func() Expr { return g.between() }, - func() Expr { return g.isExpr() }, - func() Expr { return g.notExpr() }, - func() Expr { return g.likeExpr() }, + func() Expr { return g.andExpr(genConfig) }, + func() Expr { return g.xorExpr(genConfig) }, + func() Expr { return g.orExpr(genConfig) }, + func() Expr { return g.comparison(genConfig.intTypeConfig()) }, + func() Expr { return g.comparison(genConfig.stringTypeConfig()) }, + //func() Expr { return g.comparison(genConfig) }, // this is not accepted by the parser + func() Expr { return g.inExpr(genConfig) }, + func() Expr { return g.existsExpr(genConfig) }, + func() Expr { return g.between(genConfig.intTypeConfig()) }, + func() Expr { return g.isExpr(genConfig) }, + func() Expr { return g.notExpr(genConfig) }, + func() Expr { return g.likeExpr(genConfig.stringTypeConfig()) }, } return g.randomOf(options) } -func (g *generator) intExpr() Expr { +func (g *Generator) intExpr(genConfig ExprGeneratorConfig) Expr { if g.atMaxDepth() { return g.intLiteral() } + genConfig = genConfig.intTypeConfig() + options := []exprF{ - func() Expr { return g.arithmetic() }, - func() Expr { return g.intLiteral() }, - func() Expr { return g.caseExpr(g.intExpr) }, + g.intLiteral, + func() Expr { return g.arithmetic(genConfig) }, + func() Expr { return g.caseExpr(genConfig) }, } return g.randomOf(options) } -func (g *generator) booleanLiteral() Expr { - return BoolVal(g.randomBool()) -} +func (g *Generator) stringExpr(genConfig ExprGeneratorConfig) Expr { + if g.atMaxDepth() { + return g.stringLiteral() + } -func (g *generator) randomBool() bool { - return g.r.Float32() < 0.5 -} + genConfig = genConfig.stringTypeConfig() -func (g *generator) intLiteral() Expr { - t := fmt.Sprintf("%d", g.r.Intn(1000)-g.r.Intn((1000))) + options := []exprF{ + g.stringLiteral, + func() Expr { return g.caseExpr(genConfig) }, + } - return NewIntLiteral(t) + return g.randomOf(options) } -var words = []string{"ox", "ant", "ape", "asp", "bat", "bee", "boa", "bug", "cat", "cod", "cow", "cub", "doe", "dog", "eel", "eft", "elf", "elk", "emu", "ewe", "fly", "fox", "gar", "gnu", "hen", "hog", "imp", "jay", "kid", "kit", "koi", "lab", "man", "owl", "pig", "pug", "pup", "ram", "rat", "ray", "yak", "bass", "bear", "bird", "boar", "buck", "bull", "calf", "chow", "clam", "colt", "crab", "crow", "dane", "deer", "dodo", "dory", "dove", "drum", "duck", "fawn", "fish", "flea", "foal", "fowl", "frog", "gnat", "goat", "grub", "gull", "hare", "hawk", "ibex", "joey", "kite", "kiwi", "lamb", "lark", "lion", "loon", "lynx", "mako", "mink", "mite", "mole", "moth", "mule", "mutt", "newt", "orca", "oryx", "pika", "pony", "puma", "seal", "shad", "slug", "sole", "stag", "stud", "swan", "tahr", "teal", "tick", "toad", "tuna", "wasp", "wolf", "worm", "wren", "yeti", "adder", "akita", "alien", "aphid", "bison", "boxer", "bream", "bunny", "burro", "camel", "chimp", "civet", "cobra", "coral", "corgi", "crane", "dingo", "drake", "eagle", "egret", "filly", "finch", "gator", "gecko", "ghost", "ghoul", "goose", "guppy", "heron", "hippo", "horse", "hound", "husky", "hyena", "koala", "krill", "leech", "lemur", "liger", "llama", "louse", "macaw", "midge", "molly", "moose", "moray", "mouse", "panda", "perch", "prawn", "quail", "racer", "raven", "rhino", "robin", "satyr", "shark", "sheep", "shrew", "skink", "skunk", "sloth", "snail", "snake", "snipe", "squid", "stork", "swift", "swine", "tapir", "tetra", "tiger", "troll", "trout", "viper", "wahoo", "whale", "zebra", "alpaca", "amoeba", "baboon", "badger", "beagle", "bedbug", "beetle", "bengal", "bobcat", "caiman", "cattle", "cicada", "collie", "condor", "cougar", "coyote", "dassie", "donkey", "dragon", "earwig", "falcon", "feline", "ferret", "gannet", "gibbon", "glider", "goblin", "gopher", "grouse", "guinea", "hermit", "hornet", "iguana", "impala", "insect", "jackal", "jaguar", "jennet", "kitten", "kodiak", "lizard", "locust", "maggot", "magpie", "mammal", "mantis", "marlin", "marmot", "marten", "martin", "mayfly", "minnow", "monkey", "mullet", "muskox", "ocelot", "oriole", "osprey", "oyster", "parrot", "pigeon", "piglet", "poodle", "possum", "python", "quagga", "rabbit", "raptor", "rodent", "roughy", "salmon", "sawfly", "serval", "shiner", "shrimp", "spider", "sponge", "tarpon", "thrush", "tomcat", "toucan", "turkey", "turtle", "urchin", "vervet", "walrus", "weasel", "weevil", "wombat", "anchovy", "anemone", "bluejay", "buffalo", "bulldog", "buzzard", "caribou", "catfish", "chamois", "cheetah", "chicken", "chigger", "cowbird", "crappie", "crawdad", "cricket", "dogfish", "dolphin", "firefly", "garfish", "gazelle", "gelding", "giraffe", "gobbler", "gorilla", "goshawk", "grackle", "griffon", "grizzly", "grouper", "haddock", "hagfish", "halibut", "hamster", "herring", "jackass", "javelin", "jawfish", "jaybird", "katydid", "ladybug", "lamprey", "lemming", "leopard", "lioness", "lobster", "macaque", "mallard", "mammoth", "manatee", "mastiff", "meerkat", "mollusk", "monarch", "mongrel", "monitor", "monster", "mudfish", "muskrat", "mustang", "narwhal", "oarfish", "octopus", "opossum", "ostrich", "panther", "peacock", "pegasus", "pelican", "penguin", "phoenix", "piranha", "polecat", "primate", "quetzal", "raccoon", "rattler", "redbird", "redfish", "reptile", "rooster", "sawfish", "sculpin", "seagull", "skylark", "snapper", "spaniel", "sparrow", "sunbeam", "sunbird", "sunfish", "tadpole", "termite", "terrier", "unicorn", "vulture", "wallaby", "walleye", "warthog", "whippet", "wildcat", "aardvark", "airedale", "albacore", "anteater", "antelope", "arachnid", "barnacle", "basilisk", "blowfish", "bluebird", "bluegill", "bonefish", "bullfrog", "cardinal", "chipmunk", "cockatoo", "crayfish", "dinosaur", "doberman", "duckling", "elephant", "escargot", "flamingo", "flounder", "foxhound", "glowworm", "goldfish", "grubworm", "hedgehog", "honeybee", "hookworm", "humpback", "kangaroo", "killdeer", "kingfish", "labrador", "lacewing", "ladybird", "lionfish", "longhorn", "mackerel", "malamute", "marmoset", "mastodon", "moccasin", "mongoose", "monkfish", "mosquito", "pangolin", "parakeet", "pheasant", "pipefish", "platypus", "polliwog", "porpoise", "reindeer", "ringtail", "sailfish", "scorpion", "seahorse", "seasnail", "sheepdog", "shepherd", "silkworm", "squirrel", "stallion", "starfish", "starling", "stingray", "stinkbug", "sturgeon", "terrapin", "titmouse", "tortoise", "treefrog", "werewolf", "woodcock"} +func (g *Generator) subqueryExpr(genConfig ExprGeneratorConfig) Expr { + if g.atMaxDepth() { + return g.makeAggregateIfNecessary(genConfig, g.randomTupleLiteral(genConfig)) + } -func (g *generator) stringLiteral() Expr { - return NewStrLiteral(g.randomOfS(words)) + var options []exprF + + for _, generator := range g.exprGenerators { + if qg, ok := generator.(QueryGenerator); ok { + options = append(options, func() Expr { + expr := qg.Generate(g.r, genConfig) + if expr == nil { + return g.randomTupleLiteral(genConfig) + } + return expr + }) + } + } + + if len(options) == 0 { + return g.Expression(genConfig) + } + + return g.randomOf(options) } -func (g *generator) stringExpr() Expr { - if g.atMaxDepth() { - return g.stringLiteral() +func (g *Generator) randomTupleLiteral(genConfig ExprGeneratorConfig) Expr { + if genConfig.NumCols == 0 { + genConfig.NumCols = g.r.Intn(3) + 1 + } + + tuple := ValTuple{} + for i := 0; i < genConfig.NumCols; i++ { + tuple = append(tuple, g.randomLiteral()) } + return tuple +} + +func (g *Generator) randomLiteral() Expr { options := []exprF{ - func() Expr { return g.stringLiteral() }, - func() Expr { return g.caseExpr(g.stringExpr) }, + g.intLiteral, + g.stringLiteral, + g.booleanLiteral, } return g.randomOf(options) } -func (g *generator) likeExpr() Expr { +func (g *Generator) booleanLiteral() Expr { + return BoolVal(g.randomBool(0.5)) +} + +// randomBool returns true with probability prob +func (g *Generator) randomBool(prob float32) bool { + if prob < 0 || prob > 1 { + prob = 0.5 + } + return g.r.Float32() < prob +} + +func (g *Generator) intLiteral() Expr { + t := fmt.Sprintf("%d", g.r.Intn(100)-g.r.Intn(100)) + + return NewIntLiteral(t) +} + +var words = []string{"ox", "ant", "ape", "asp", "bat", "bee", "boa", "bug", "cat", "cod", "cow", "cub", "doe", "dog", "eel", "eft", "elf", "elk", "emu", "ewe", "fly", "fox", "gar", "gnu", "hen", "hog", "imp", "jay", "kid", "kit", "koi", "lab", "man", "owl", "pig", "pug", "pup", "ram", "rat", "ray", "yak", "bass", "bear", "bird", "boar", "buck", "bull", "calf", "chow", "clam", "colt", "crab", "crow", "dane", "deer", "dodo", "dory", "dove", "drum", "duck", "fawn", "fish", "flea", "foal", "fowl", "frog", "gnat", "goat", "grub", "gull", "hare", "hawk", "ibex", "joey", "kite", "kiwi", "lamb", "lark", "lion", "loon", "lynx", "mako", "mink", "mite", "mole", "moth", "mule", "mutt", "newt", "orca", "oryx", "pika", "pony", "puma", "seal", "shad", "slug", "sole", "stag", "stud", "swan", "tahr", "teal", "tick", "toad", "tuna", "wasp", "wolf", "worm", "wren", "yeti", "adder", "akita", "alien", "aphid", "bison", "boxer", "bream", "bunny", "burro", "camel", "chimp", "civet", "cobra", "coral", "corgi", "crane", "dingo", "drake", "eagle", "egret", "filly", "finch", "gator", "gecko", "ghost", "ghoul", "goose", "guppy", "heron", "hippo", "horse", "hound", "husky", "hyena", "koala", "krill", "leech", "lemur", "liger", "llama", "louse", "macaw", "midge", "molly", "moose", "moray", "mouse", "panda", "perch", "prawn", "quail", "racer", "raven", "rhino", "robin", "satyr", "shark", "sheep", "shrew", "skink", "skunk", "sloth", "snail", "snake", "snipe", "squid", "stork", "swift", "swine", "tapir", "tetra", "tiger", "troll", "trout", "viper", "wahoo", "whale", "zebra", "alpaca", "amoeba", "baboon", "badger", "beagle", "bedbug", "beetle", "bengal", "bobcat", "caiman", "cattle", "cicada", "collie", "condor", "cougar", "coyote", "dassie", "donkey", "dragon", "earwig", "falcon", "feline", "ferret", "gannet", "gibbon", "glider", "goblin", "gopher", "grouse", "guinea", "hermit", "hornet", "iguana", "impala", "insect", "jackal", "jaguar", "jennet", "kitten", "kodiak", "lizard", "locust", "maggot", "magpie", "mammal", "mantis", "marlin", "marmot", "marten", "martin", "mayfly", "minnow", "monkey", "mullet", "muskox", "ocelot", "oriole", "osprey", "oyster", "parrot", "pigeon", "piglet", "poodle", "possum", "python", "quagga", "rabbit", "raptor", "rodent", "roughy", "salmon", "sawfly", "serval", "shiner", "shrimp", "spider", "sponge", "tarpon", "thrush", "tomcat", "toucan", "turkey", "turtle", "urchin", "vervet", "walrus", "weasel", "weevil", "wombat", "anchovy", "anemone", "bluejay", "buffalo", "bulldog", "buzzard", "caribou", "catfish", "chamois", "cheetah", "chicken", "chigger", "cowbird", "crappie", "crawdad", "cricket", "dogfish", "dolphin", "firefly", "garfish", "gazelle", "gelding", "giraffe", "gobbler", "gorilla", "goshawk", "grackle", "griffon", "grizzly", "grouper", "haddock", "hagfish", "halibut", "hamster", "herring", "jackass", "javelin", "jawfish", "jaybird", "katydid", "ladybug", "lamprey", "lemming", "leopard", "lioness", "lobster", "macaque", "mallard", "mammoth", "manatee", "mastiff", "meerkat", "mollusk", "monarch", "mongrel", "monitor", "monster", "mudfish", "muskrat", "mustang", "narwhal", "oarfish", "octopus", "opossum", "ostrich", "panther", "peacock", "pegasus", "pelican", "penguin", "phoenix", "piranha", "polecat", "primate", "quetzal", "raccoon", "rattler", "redbird", "redfish", "reptile", "rooster", "sawfish", "sculpin", "seagull", "skylark", "snapper", "spaniel", "sparrow", "sunbeam", "sunbird", "sunfish", "tadpole", "termite", "terrier", "unicorn", "vulture", "wallaby", "walleye", "warthog", "whippet", "wildcat", "aardvark", "airedale", "albacore", "anteater", "antelope", "arachnid", "barnacle", "basilisk", "blowfish", "bluebird", "bluegill", "bonefish", "bullfrog", "cardinal", "chipmunk", "cockatoo", "crayfish", "dinosaur", "doberman", "duckling", "elephant", "escargot", "flamingo", "flounder", "foxhound", "glowworm", "goldfish", "grubworm", "hedgehog", "honeybee", "hookworm", "humpback", "kangaroo", "killdeer", "kingfish", "labrador", "lacewing", "ladybird", "lionfish", "longhorn", "mackerel", "malamute", "marmoset", "mastodon", "moccasin", "mongoose", "monkfish", "mosquito", "pangolin", "parakeet", "pheasant", "pipefish", "platypus", "polliwog", "porpoise", "reindeer", "ringtail", "sailfish", "scorpion", "seahorse", "seasnail", "sheepdog", "shepherd", "silkworm", "squirrel", "stallion", "starfish", "starling", "stingray", "stinkbug", "sturgeon", "terrapin", "titmouse", "tortoise", "treefrog", "werewolf", "woodcock"} + +func (g *Generator) stringLiteral() Expr { + return NewStrLiteral(g.randomOfS(words)) +} + +func (g *Generator) likeExpr(genConfig ExprGeneratorConfig) Expr { g.enter() defer g.exit() return &ComparisonExpr{ Operator: LikeOp, - Left: g.stringExpr(), - Right: g.stringExpr(), + Left: g.Expression(genConfig), + Right: g.Expression(genConfig), } } var comparisonOps = []ComparisonExprOperator{EqualOp, LessThanOp, GreaterThanOp, LessEqualOp, GreaterEqualOp, NotEqualOp, NullSafeEqualOp} -func (g *generator) comparison(f func() Expr) Expr { +func (g *Generator) comparison(genConfig ExprGeneratorConfig) Expr { g.enter() defer g.exit() + // specifc 1-3 columns + numCols := g.r.Intn(3) + 1 + cmp := &ComparisonExpr{ Operator: comparisonOps[g.r.Intn(len(comparisonOps))], - Left: f(), - Right: f(), + Left: g.Expression(genConfig.SetNumCols(numCols)), + Right: g.Expression(genConfig.SetNumCols(numCols)), } return cmp } -func (g *generator) caseExpr(valueF func() Expr) Expr { +func (g *Generator) caseExpr(genConfig ExprGeneratorConfig) Expr { g.enter() defer g.exit() var exp Expr var elseExpr Expr - if g.randomBool() { - exp = valueF() + if g.randomBool(0.5) { + exp = g.Expression(genConfig.anyTypeConfig()) } - if g.randomBool() { - elseExpr = valueF() + if g.randomBool(0.5) { + elseExpr = g.Expression(genConfig) } - size := g.r.Intn(5) + 2 + size := g.r.Intn(2) + 1 var whens []*When for i := 0; i < size; i++ { var cond Expr if exp == nil { - cond = g.booleanExpr() + cond = g.Expression(genConfig.boolTypeConfig()) } else { - cond = g.expression() + cond = g.Expression(genConfig) } + val := g.Expression(genConfig) whens = append(whens, &When{ Cond: cond, - Val: g.expression(), + Val: val, }) } @@ -211,7 +453,7 @@ func (g *generator) caseExpr(valueF func() Expr) Expr { var arithmeticOps = []BinaryExprOperator{BitAndOp, BitOrOp, BitXorOp, PlusOp, MinusOp, MultOp, DivOp, IntDivOp, ModOp, ShiftRightOp, ShiftLeftOp} -func (g *generator) arithmetic() Expr { +func (g *Generator) arithmetic(genConfig ExprGeneratorConfig) Expr { g.enter() defer g.exit() @@ -219,82 +461,81 @@ func (g *generator) arithmetic() Expr { return &BinaryExpr{ Operator: op, - Left: g.intExpr(), - Right: g.intExpr(), + Left: g.Expression(genConfig), + Right: g.Expression(genConfig), } } type exprF func() Expr -func (g *generator) randomOf(options []exprF) Expr { +func (g *Generator) randomOf(options []exprF) Expr { return options[g.r.Intn(len(options))]() } -func (g *generator) randomOfS(options []string) string { +func (g *Generator) randomOfS(options []string) string { return options[g.r.Intn(len(options))] } -func (g *generator) andExpr() Expr { +func (g *Generator) andExpr(genConfig ExprGeneratorConfig) Expr { g.enter() defer g.exit() return &AndExpr{ - Left: g.booleanExpr(), - Right: g.booleanExpr(), + Left: g.Expression(genConfig), + Right: g.Expression(genConfig), } } -func (g *generator) orExpr() Expr { +func (g *Generator) orExpr(genConfig ExprGeneratorConfig) Expr { g.enter() defer g.exit() return &OrExpr{ - Left: g.booleanExpr(), - Right: g.booleanExpr(), + Left: g.Expression(genConfig), + Right: g.Expression(genConfig), } } -func (g *generator) xorExpr() Expr { +func (g *Generator) xorExpr(genConfig ExprGeneratorConfig) Expr { g.enter() defer g.exit() return &XorExpr{ - Left: g.booleanExpr(), - Right: g.booleanExpr(), + Left: g.Expression(genConfig), + Right: g.Expression(genConfig), } } -func (g *generator) notExpr() Expr { +func (g *Generator) notExpr(genConfig ExprGeneratorConfig) Expr { g.enter() defer g.exit() - return &NotExpr{g.booleanExpr()} + return &NotExpr{g.Expression(genConfig)} } -func (g *generator) inExpr() Expr { +func (g *Generator) inExpr(genConfig ExprGeneratorConfig) Expr { g.enter() defer g.exit() - expr := g.intExpr() - size := g.r.Intn(5) + 2 - tuples := ValTuple{} - for i := 0; i < size; i++ { - tuples = append(tuples, g.intExpr()) - } + size := g.r.Intn(3) + 2 + inExprGenConfig := NewExprGeneratorConfig(genConfig.AggrRule, "", size, true) + tuple1 := g.Expression(inExprGenConfig) + tuple2 := ValTuple{g.Expression(inExprGenConfig)} + op := InOp - if g.randomBool() { + if g.randomBool(0.5) { op = NotInOp } return &ComparisonExpr{ Operator: op, - Left: expr, - Right: tuples, + Left: tuple1, + Right: tuple2, } } -func (g *generator) between() Expr { +func (g *Generator) between(genConfig ExprGeneratorConfig) Expr { g.enter() defer g.exit() var IsBetween bool - if g.randomBool() { + if g.randomBool(0.5) { IsBetween = true } else { IsBetween = false @@ -302,13 +543,13 @@ func (g *generator) between() Expr { return &BetweenExpr{ IsBetween: IsBetween, - Left: g.intExpr(), - From: g.intExpr(), - To: g.intExpr(), + Left: g.Expression(genConfig), + From: g.Expression(genConfig), + To: g.Expression(genConfig), } } -func (g *generator) isExpr() Expr { +func (g *Generator) isExpr(genConfig ExprGeneratorConfig) Expr { g.enter() defer g.exit() @@ -316,6 +557,26 @@ func (g *generator) isExpr() Expr { return &IsExpr{ Right: ops[g.r.Intn(len(ops))], - Left: g.booleanExpr(), + Left: g.Expression(genConfig), } } + +func (g *Generator) existsExpr(genConfig ExprGeneratorConfig) Expr { + expr := g.subqueryExpr(genConfig.MultiRowConfig().SetNumCols(0)) + if subquery, ok := expr.(*Subquery); ok { + expr = NewExistsExpr(subquery) + } else { + // if g.subqueryExpr doesn't return a valid subquery, replace with + // select 1 + selectExprs := SelectExprs{NewAliasedExpr(NewIntLiteral("1"), "")} + from := TableExprs{NewAliasedTableExpr(NewTableName("dual"), "")} + expr = NewExistsExpr(NewSubquery(NewSelect(nil, selectExprs, nil, nil, from, nil, nil, nil, nil))) + } + + // not exists + if g.randomBool(0.5) { + expr = NewNotExpr(expr) + } + + return expr +} diff --git a/go/vt/sqlparser/reserved_vars.go b/go/vt/sqlparser/reserved_vars.go new file mode 100644 index 00000000000..62ed2fc62af --- /dev/null +++ b/go/vt/sqlparser/reserved_vars.go @@ -0,0 +1,180 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "strconv" + "strings" +) + +// ReservedVars keeps track of the bind variable names that have already been used +// in a parsed query. +type ReservedVars struct { + prefix string + reserved BindVars + next []byte + counter int + fast, static bool + sqNext int64 +} + +var subQueryBaseArgName = []byte("__sq") + +// ReserveAll tries to reserve all the given variable names. If they're all available, +// they are reserved and the function returns true. Otherwise, the function returns false. +func (r *ReservedVars) ReserveAll(names ...string) bool { + for _, name := range names { + if _, ok := r.reserved[name]; ok { + return false + } + } + for _, name := range names { + r.reserved[name] = struct{}{} + } + return true +} + +// ReserveColName reserves a variable name for the given column; if a variable +// with the same name already exists, it'll be suffixed with a numberic identifier +// to make it unique. +func (r *ReservedVars) ReserveColName(col *ColName) string { + reserveName := col.CompliantName() + if r.fast && strings.HasPrefix(reserveName, r.prefix) { + reserveName = "_" + reserveName + } + + return r.ReserveVariable(reserveName) +} + +func (r *ReservedVars) ReserveVariable(compliantName string) string { + joinVar := []byte(compliantName) + baseLen := len(joinVar) + i := int64(1) + + for { + if _, ok := r.reserved[string(joinVar)]; !ok { + bvar := string(joinVar) + r.reserved[bvar] = struct{}{} + return bvar + } + joinVar = strconv.AppendInt(joinVar[:baseLen], i, 10) + i++ + } +} + +// ReserveSubQuery returns the next argument name to replace subquery with pullout value. +func (r *ReservedVars) ReserveSubQuery() string { + for { + r.sqNext++ + joinVar := strconv.AppendInt(subQueryBaseArgName, r.sqNext, 10) + if _, ok := r.reserved[string(joinVar)]; !ok { + r.reserved[string(joinVar)] = struct{}{} + return string(joinVar) + } + } +} + +// ReserveSubQueryWithHasValues returns the next argument name to replace subquery with pullout value. +func (r *ReservedVars) ReserveSubQueryWithHasValues() (string, string) { + for { + r.sqNext++ + joinVar := strconv.AppendInt(subQueryBaseArgName, r.sqNext, 10) + hasValuesJoinVar := strconv.AppendInt(HasValueSubQueryBaseName, r.sqNext, 10) + _, joinVarOK := r.reserved[string(joinVar)] + _, hasValuesJoinVarOK := r.reserved[string(hasValuesJoinVar)] + if !joinVarOK && !hasValuesJoinVarOK { + r.reserved[string(joinVar)] = struct{}{} + r.reserved[string(hasValuesJoinVar)] = struct{}{} + return string(joinVar), string(hasValuesJoinVar) + } + } +} + +// ReserveHasValuesSubQuery returns the next argument name to replace subquery with has value. +func (r *ReservedVars) ReserveHasValuesSubQuery() string { + for { + r.sqNext++ + joinVar := strconv.AppendInt(HasValueSubQueryBaseName, r.sqNext, 10) + if _, ok := r.reserved[string(joinVar)]; !ok { + bvar := string(joinVar) + r.reserved[bvar] = struct{}{} + return bvar + } + } +} + +const staticBvar10 = "vtg0vtg1vtg2vtg3vtg4vtg5vtg6vtg7vtg8vtg9" +const staticBvar100 = "vtg10vtg11vtg12vtg13vtg14vtg15vtg16vtg17vtg18vtg19vtg20vtg21vtg22vtg23vtg24vtg25vtg26vtg27vtg28vtg29vtg30vtg31vtg32vtg33vtg34vtg35vtg36vtg37vtg38vtg39vtg40vtg41vtg42vtg43vtg44vtg45vtg46vtg47vtg48vtg49vtg50vtg51vtg52vtg53vtg54vtg55vtg56vtg57vtg58vtg59vtg60vtg61vtg62vtg63vtg64vtg65vtg66vtg67vtg68vtg69vtg70vtg71vtg72vtg73vtg74vtg75vtg76vtg77vtg78vtg79vtg80vtg81vtg82vtg83vtg84vtg85vtg86vtg87vtg88vtg89vtg90vtg91vtg92vtg93vtg94vtg95vtg96vtg97vtg98vtg99" + +func (r *ReservedVars) nextUnusedVar() string { + if r.fast { + r.counter++ + + if r.static { + switch { + case r.counter < 10: + ofs := r.counter * 4 + return staticBvar10[ofs : ofs+4] + case r.counter < 100: + ofs := (r.counter - 10) * 5 + return staticBvar100[ofs : ofs+5] + } + } + + r.next = strconv.AppendInt(r.next[:len(r.prefix)], int64(r.counter), 10) + return string(r.next) + } + + for { + r.counter++ + r.next = strconv.AppendInt(r.next[:len(r.prefix)], int64(r.counter), 10) + if _, ok := r.reserved[string(r.next)]; !ok { + bvar := string(r.next) + r.reserved[bvar] = struct{}{} + return bvar + } + } +} + +// NewReservedVars allocates a ReservedVar instance that will generate unique +// variable names starting with the given `prefix` and making sure that they +// don't conflict with the given set of `known` variables. +func NewReservedVars(prefix string, known BindVars) *ReservedVars { + rv := &ReservedVars{ + prefix: prefix, + counter: 0, + reserved: known, + fast: true, + next: []byte(prefix), + } + + if prefix != "" && prefix[0] == '_' { + panic("cannot reserve variables with a '_' prefix") + } + + for bvar := range known { + if strings.HasPrefix(bvar, prefix) { + rv.fast = false + break + } + } + + if prefix == "vtg" { + rv.static = true + } + return rv +} diff --git a/go/vt/sqlparser/rewriter_api.go b/go/vt/sqlparser/rewriter_api.go index 05d371bad13..cfcf75fa0f9 100644 --- a/go/vt/sqlparser/rewriter_api.go +++ b/go/vt/sqlparser/rewriter_api.go @@ -126,7 +126,7 @@ func (c *Cursor) ReplacerF() func(newNode SQLNode) { // and the new node visited. func (c *Cursor) ReplaceAndRevisit(newNode SQLNode) { switch newNode.(type) { - case SelectExprs: + case SelectExprs, Expr: default: // We need to add support to the generated code for when to look at the revisit flag. At the moment it is only // there for slices of SQLNode implementations diff --git a/go/vt/sqlparser/rewriter_test.go b/go/vt/sqlparser/rewriter_test.go index dadd2c501df..3044e04f8b0 100644 --- a/go/vt/sqlparser/rewriter_test.go +++ b/go/vt/sqlparser/rewriter_test.go @@ -17,6 +17,7 @@ limitations under the License. package sqlparser import ( + "math/rand" "testing" "github.com/stretchr/testify/assert" @@ -25,8 +26,8 @@ import ( ) func BenchmarkVisitLargeExpression(b *testing.B) { - gen := newGenerator(1, 5) - exp := gen.expression() + gen := NewGenerator(rand.New(rand.NewSource(1)), 5) + exp := gen.Expression(ExprGeneratorConfig{}) depth := 0 for i := 0; i < b.N; i++ { diff --git a/go/vt/sqlparser/sql.go b/go/vt/sqlparser/sql.go index 146bf25cbab..ab45c0dd2b9 100644 --- a/go/vt/sqlparser/sql.go +++ b/go/vt/sqlparser/sql.go @@ -159,581 +159,592 @@ const STORED = 57468 const BOTH = 57469 const LEADING = 57470 const TRAILING = 57471 -const EMPTY_FROM_CLAUSE = 57472 -const LOWER_THAN_CHARSET = 57473 -const CHARSET = 57474 -const UNIQUE = 57475 -const KEY = 57476 -const EXPRESSION_PREC_SETTER = 57477 -const OR = 57478 -const XOR = 57479 -const AND = 57480 -const NOT = 57481 -const BETWEEN = 57482 -const CASE = 57483 -const WHEN = 57484 -const THEN = 57485 -const ELSE = 57486 -const END = 57487 -const LE = 57488 -const GE = 57489 -const NE = 57490 -const NULL_SAFE_EQUAL = 57491 -const IS = 57492 -const LIKE = 57493 -const REGEXP = 57494 -const RLIKE = 57495 -const IN = 57496 -const ASSIGNMENT_OPT = 57497 -const SHIFT_LEFT = 57498 -const SHIFT_RIGHT = 57499 -const DIV = 57500 -const MOD = 57501 -const UNARY = 57502 -const COLLATE = 57503 -const BINARY = 57504 -const UNDERSCORE_ARMSCII8 = 57505 -const UNDERSCORE_ASCII = 57506 -const UNDERSCORE_BIG5 = 57507 -const UNDERSCORE_BINARY = 57508 -const UNDERSCORE_CP1250 = 57509 -const UNDERSCORE_CP1251 = 57510 -const UNDERSCORE_CP1256 = 57511 -const UNDERSCORE_CP1257 = 57512 -const UNDERSCORE_CP850 = 57513 -const UNDERSCORE_CP852 = 57514 -const UNDERSCORE_CP866 = 57515 -const UNDERSCORE_CP932 = 57516 -const UNDERSCORE_DEC8 = 57517 -const UNDERSCORE_EUCJPMS = 57518 -const UNDERSCORE_EUCKR = 57519 -const UNDERSCORE_GB18030 = 57520 -const UNDERSCORE_GB2312 = 57521 -const UNDERSCORE_GBK = 57522 -const UNDERSCORE_GEOSTD8 = 57523 -const UNDERSCORE_GREEK = 57524 -const UNDERSCORE_HEBREW = 57525 -const UNDERSCORE_HP8 = 57526 -const UNDERSCORE_KEYBCS2 = 57527 -const UNDERSCORE_KOI8R = 57528 -const UNDERSCORE_KOI8U = 57529 -const UNDERSCORE_LATIN1 = 57530 -const UNDERSCORE_LATIN2 = 57531 -const UNDERSCORE_LATIN5 = 57532 -const UNDERSCORE_LATIN7 = 57533 -const UNDERSCORE_MACCE = 57534 -const UNDERSCORE_MACROMAN = 57535 -const UNDERSCORE_SJIS = 57536 -const UNDERSCORE_SWE7 = 57537 -const UNDERSCORE_TIS620 = 57538 -const UNDERSCORE_UCS2 = 57539 -const UNDERSCORE_UJIS = 57540 -const UNDERSCORE_UTF16 = 57541 -const UNDERSCORE_UTF16LE = 57542 -const UNDERSCORE_UTF32 = 57543 -const UNDERSCORE_UTF8 = 57544 -const UNDERSCORE_UTF8MB4 = 57545 -const UNDERSCORE_UTF8MB3 = 57546 -const INTERVAL = 57547 -const WINDOW_EXPR = 57548 -const JSON_EXTRACT_OP = 57549 -const JSON_UNQUOTE_EXTRACT_OP = 57550 -const CREATE = 57551 -const ALTER = 57552 -const DROP = 57553 -const RENAME = 57554 -const ANALYZE = 57555 -const ADD = 57556 -const FLUSH = 57557 -const CHANGE = 57558 -const MODIFY = 57559 -const DEALLOCATE = 57560 -const REVERT = 57561 -const QUERIES = 57562 -const SCHEMA = 57563 -const TABLE = 57564 -const INDEX = 57565 -const VIEW = 57566 -const TO = 57567 -const IGNORE = 57568 -const IF = 57569 -const PRIMARY = 57570 -const COLUMN = 57571 -const SPATIAL = 57572 -const FULLTEXT = 57573 -const KEY_BLOCK_SIZE = 57574 -const CHECK = 57575 -const INDEXES = 57576 -const ACTION = 57577 -const CASCADE = 57578 -const CONSTRAINT = 57579 -const FOREIGN = 57580 -const NO = 57581 -const REFERENCES = 57582 -const RESTRICT = 57583 -const SHOW = 57584 -const DESCRIBE = 57585 -const EXPLAIN = 57586 -const DATE = 57587 -const ESCAPE = 57588 -const REPAIR = 57589 -const OPTIMIZE = 57590 -const TRUNCATE = 57591 -const COALESCE = 57592 -const EXCHANGE = 57593 -const REBUILD = 57594 -const PARTITIONING = 57595 -const REMOVE = 57596 -const PREPARE = 57597 -const EXECUTE = 57598 -const MAXVALUE = 57599 -const PARTITION = 57600 -const REORGANIZE = 57601 -const LESS = 57602 -const THAN = 57603 -const PROCEDURE = 57604 -const TRIGGER = 57605 -const VINDEX = 57606 -const VINDEXES = 57607 -const DIRECTORY = 57608 -const NAME = 57609 -const UPGRADE = 57610 -const STATUS = 57611 -const VARIABLES = 57612 -const WARNINGS = 57613 -const CASCADED = 57614 -const DEFINER = 57615 -const OPTION = 57616 -const SQL = 57617 -const UNDEFINED = 57618 -const SEQUENCE = 57619 -const MERGE = 57620 -const TEMPORARY = 57621 -const TEMPTABLE = 57622 -const INVOKER = 57623 -const SECURITY = 57624 -const FIRST = 57625 -const AFTER = 57626 -const LAST = 57627 -const VITESS_MIGRATION = 57628 -const CANCEL = 57629 -const RETRY = 57630 -const LAUNCH = 57631 -const COMPLETE = 57632 -const CLEANUP = 57633 -const THROTTLE = 57634 -const UNTHROTTLE = 57635 -const EXPIRE = 57636 -const RATIO = 57637 -const VITESS_THROTTLER = 57638 -const BEGIN = 57639 -const START = 57640 -const TRANSACTION = 57641 -const COMMIT = 57642 -const ROLLBACK = 57643 -const SAVEPOINT = 57644 -const RELEASE = 57645 -const WORK = 57646 -const CONSISTENT = 57647 -const SNAPSHOT = 57648 -const BIT = 57649 -const TINYINT = 57650 -const SMALLINT = 57651 -const MEDIUMINT = 57652 -const INT = 57653 -const INTEGER = 57654 -const BIGINT = 57655 -const INTNUM = 57656 -const REAL = 57657 -const DOUBLE = 57658 -const FLOAT_TYPE = 57659 -const FLOAT4_TYPE = 57660 -const FLOAT8_TYPE = 57661 -const DECIMAL_TYPE = 57662 -const NUMERIC = 57663 -const TIME = 57664 -const TIMESTAMP = 57665 -const DATETIME = 57666 -const YEAR = 57667 -const CHAR = 57668 -const VARCHAR = 57669 -const BOOL = 57670 -const CHARACTER = 57671 -const VARBINARY = 57672 -const NCHAR = 57673 -const TEXT = 57674 -const TINYTEXT = 57675 -const MEDIUMTEXT = 57676 -const LONGTEXT = 57677 -const BLOB = 57678 -const TINYBLOB = 57679 -const MEDIUMBLOB = 57680 -const LONGBLOB = 57681 -const JSON = 57682 -const JSON_SCHEMA_VALID = 57683 -const JSON_SCHEMA_VALIDATION_REPORT = 57684 -const ENUM = 57685 -const GEOMETRY = 57686 -const POINT = 57687 -const LINESTRING = 57688 -const POLYGON = 57689 -const GEOMCOLLECTION = 57690 -const GEOMETRYCOLLECTION = 57691 -const MULTIPOINT = 57692 -const MULTILINESTRING = 57693 -const MULTIPOLYGON = 57694 -const ASCII = 57695 -const UNICODE = 57696 -const NULLX = 57697 -const AUTO_INCREMENT = 57698 -const APPROXNUM = 57699 -const SIGNED = 57700 -const UNSIGNED = 57701 -const ZEROFILL = 57702 -const PURGE = 57703 -const BEFORE = 57704 -const CODE = 57705 -const COLLATION = 57706 -const COLUMNS = 57707 -const DATABASES = 57708 -const ENGINES = 57709 -const EVENT = 57710 -const EXTENDED = 57711 -const FIELDS = 57712 -const FULL = 57713 -const FUNCTION = 57714 -const GTID_EXECUTED = 57715 -const KEYSPACES = 57716 -const OPEN = 57717 -const PLUGINS = 57718 -const PRIVILEGES = 57719 -const PROCESSLIST = 57720 -const SCHEMAS = 57721 -const TABLES = 57722 -const TRIGGERS = 57723 -const USER = 57724 -const VGTID_EXECUTED = 57725 -const VITESS_KEYSPACES = 57726 -const VITESS_METADATA = 57727 -const VITESS_MIGRATIONS = 57728 -const VITESS_REPLICATION_STATUS = 57729 -const VITESS_SHARDS = 57730 -const VITESS_TABLETS = 57731 -const VITESS_TARGET = 57732 -const VSCHEMA = 57733 -const VITESS_THROTTLED_APPS = 57734 -const NAMES = 57735 -const GLOBAL = 57736 -const SESSION = 57737 -const ISOLATION = 57738 -const LEVEL = 57739 -const READ = 57740 -const WRITE = 57741 -const ONLY = 57742 -const REPEATABLE = 57743 -const COMMITTED = 57744 -const UNCOMMITTED = 57745 -const SERIALIZABLE = 57746 -const ADDDATE = 57747 -const CURRENT_TIMESTAMP = 57748 -const DATABASE = 57749 -const CURRENT_DATE = 57750 -const CURDATE = 57751 -const DATE_ADD = 57752 -const DATE_SUB = 57753 -const NOW = 57754 -const SUBDATE = 57755 -const CURTIME = 57756 -const CURRENT_TIME = 57757 -const LOCALTIME = 57758 -const LOCALTIMESTAMP = 57759 -const CURRENT_USER = 57760 -const UTC_DATE = 57761 -const UTC_TIME = 57762 -const UTC_TIMESTAMP = 57763 -const SYSDATE = 57764 -const DAY = 57765 -const DAY_HOUR = 57766 -const DAY_MICROSECOND = 57767 -const DAY_MINUTE = 57768 -const DAY_SECOND = 57769 -const HOUR = 57770 -const HOUR_MICROSECOND = 57771 -const HOUR_MINUTE = 57772 -const HOUR_SECOND = 57773 -const MICROSECOND = 57774 -const MINUTE = 57775 -const MINUTE_MICROSECOND = 57776 -const MINUTE_SECOND = 57777 -const MONTH = 57778 -const QUARTER = 57779 -const SECOND = 57780 -const SECOND_MICROSECOND = 57781 -const YEAR_MONTH = 57782 -const WEEK = 57783 -const REPLACE = 57784 -const CONVERT = 57785 -const CAST = 57786 -const SUBSTR = 57787 -const SUBSTRING = 57788 -const SEPARATOR = 57789 -const TIMESTAMPADD = 57790 -const TIMESTAMPDIFF = 57791 -const WEIGHT_STRING = 57792 -const LTRIM = 57793 -const RTRIM = 57794 -const TRIM = 57795 -const JSON_ARRAY = 57796 -const JSON_OBJECT = 57797 -const JSON_QUOTE = 57798 -const JSON_DEPTH = 57799 -const JSON_TYPE = 57800 -const JSON_LENGTH = 57801 -const JSON_VALID = 57802 -const JSON_ARRAY_APPEND = 57803 -const JSON_ARRAY_INSERT = 57804 -const JSON_INSERT = 57805 -const JSON_MERGE = 57806 -const JSON_MERGE_PATCH = 57807 -const JSON_MERGE_PRESERVE = 57808 -const JSON_REMOVE = 57809 -const JSON_REPLACE = 57810 -const JSON_SET = 57811 -const JSON_UNQUOTE = 57812 -const COUNT = 57813 -const AVG = 57814 -const MAX = 57815 -const MIN = 57816 -const SUM = 57817 -const GROUP_CONCAT = 57818 -const BIT_AND = 57819 -const BIT_OR = 57820 -const BIT_XOR = 57821 -const STD = 57822 -const STDDEV = 57823 -const STDDEV_POP = 57824 -const STDDEV_SAMP = 57825 -const VAR_POP = 57826 -const VAR_SAMP = 57827 -const VARIANCE = 57828 -const REGEXP_INSTR = 57829 -const REGEXP_LIKE = 57830 -const REGEXP_REPLACE = 57831 -const REGEXP_SUBSTR = 57832 -const ExtractValue = 57833 -const UpdateXML = 57834 -const GET_LOCK = 57835 -const RELEASE_LOCK = 57836 -const RELEASE_ALL_LOCKS = 57837 -const IS_FREE_LOCK = 57838 -const IS_USED_LOCK = 57839 -const LOCATE = 57840 -const POSITION = 57841 -const ST_GeometryCollectionFromText = 57842 -const ST_GeometryFromText = 57843 -const ST_LineStringFromText = 57844 -const ST_MultiLineStringFromText = 57845 -const ST_MultiPointFromText = 57846 -const ST_MultiPolygonFromText = 57847 -const ST_PointFromText = 57848 -const ST_PolygonFromText = 57849 -const ST_GeometryCollectionFromWKB = 57850 -const ST_GeometryFromWKB = 57851 -const ST_LineStringFromWKB = 57852 -const ST_MultiLineStringFromWKB = 57853 -const ST_MultiPointFromWKB = 57854 -const ST_MultiPolygonFromWKB = 57855 -const ST_PointFromWKB = 57856 -const ST_PolygonFromWKB = 57857 -const ST_AsBinary = 57858 -const ST_AsText = 57859 -const ST_Dimension = 57860 -const ST_Envelope = 57861 -const ST_IsSimple = 57862 -const ST_IsEmpty = 57863 -const ST_GeometryType = 57864 -const ST_X = 57865 -const ST_Y = 57866 -const ST_Latitude = 57867 -const ST_Longitude = 57868 -const ST_EndPoint = 57869 -const ST_IsClosed = 57870 -const ST_Length = 57871 -const ST_NumPoints = 57872 -const ST_StartPoint = 57873 -const ST_PointN = 57874 -const ST_Area = 57875 -const ST_Centroid = 57876 -const ST_ExteriorRing = 57877 -const ST_InteriorRingN = 57878 -const ST_NumInteriorRings = 57879 -const ST_NumGeometries = 57880 -const ST_GeometryN = 57881 -const ST_LongFromGeoHash = 57882 -const ST_PointFromGeoHash = 57883 -const ST_LatFromGeoHash = 57884 -const ST_GeoHash = 57885 -const ST_AsGeoJSON = 57886 -const ST_GeomFromGeoJSON = 57887 -const MATCH = 57888 -const AGAINST = 57889 -const BOOLEAN = 57890 -const LANGUAGE = 57891 -const WITH = 57892 -const QUERY = 57893 -const EXPANSION = 57894 -const WITHOUT = 57895 -const VALIDATION = 57896 -const UNUSED = 57897 -const ARRAY = 57898 -const BYTE = 57899 -const CUME_DIST = 57900 -const DESCRIPTION = 57901 -const DENSE_RANK = 57902 -const EMPTY = 57903 -const EXCEPT = 57904 -const FIRST_VALUE = 57905 -const GROUPING = 57906 -const GROUPS = 57907 -const JSON_TABLE = 57908 -const LAG = 57909 -const LAST_VALUE = 57910 -const LATERAL = 57911 -const LEAD = 57912 -const INFILE = 57913 -const NTH_VALUE = 57914 -const NTILE = 57915 -const OF = 57916 -const OVER = 57917 -const PERCENT_RANK = 57918 -const RANK = 57919 -const RECURSIVE = 57920 -const ROW_NUMBER = 57921 -const SYSTEM = 57922 -const WINDOW = 57923 -const ACTIVE = 57924 -const ADMIN = 57925 -const AUTOEXTEND_SIZE = 57926 -const BUCKETS = 57927 -const CLONE = 57928 -const COLUMN_FORMAT = 57929 -const COMPONENT = 57930 -const DEFINITION = 57931 -const ENFORCED = 57932 -const ENGINE_ATTRIBUTE = 57933 -const EXCLUDE = 57934 -const FOLLOWING = 57935 -const GET_MASTER_PUBLIC_KEY = 57936 -const HISTOGRAM = 57937 -const HISTORY = 57938 -const INACTIVE = 57939 -const INVISIBLE = 57940 -const LOCKED = 57941 -const MASTER_COMPRESSION_ALGORITHMS = 57942 -const MASTER_PUBLIC_KEY_PATH = 57943 -const MASTER_TLS_CIPHERSUITES = 57944 -const MASTER_ZSTD_COMPRESSION_LEVEL = 57945 -const NESTED = 57946 -const NETWORK_NAMESPACE = 57947 -const NOWAIT = 57948 -const NULLS = 57949 -const OJ = 57950 -const OLD = 57951 -const OPTIONAL = 57952 -const ORDINALITY = 57953 -const ORGANIZATION = 57954 -const OTHERS = 57955 -const PARTIAL = 57956 -const PATH = 57957 -const PERSIST = 57958 -const PERSIST_ONLY = 57959 -const PRECEDING = 57960 -const PRIVILEGE_CHECKS_USER = 57961 -const PROCESS = 57962 -const RANDOM = 57963 -const REFERENCE = 57964 -const REQUIRE_ROW_FORMAT = 57965 -const RESOURCE = 57966 -const RESPECT = 57967 -const RESTART = 57968 -const RETAIN = 57969 -const REUSE = 57970 -const ROLE = 57971 -const SECONDARY = 57972 -const SECONDARY_ENGINE = 57973 -const SECONDARY_ENGINE_ATTRIBUTE = 57974 -const SECONDARY_LOAD = 57975 -const SECONDARY_UNLOAD = 57976 -const SIMPLE = 57977 -const SKIP = 57978 -const SRID = 57979 -const THREAD_PRIORITY = 57980 -const TIES = 57981 -const UNBOUNDED = 57982 -const VCPU = 57983 -const VISIBLE = 57984 -const RETURNING = 57985 -const FORMAT_BYTES = 57986 -const FORMAT_PICO_TIME = 57987 -const PS_CURRENT_THREAD_ID = 57988 -const PS_THREAD_ID = 57989 -const GTID_SUBSET = 57990 -const GTID_SUBTRACT = 57991 -const WAIT_FOR_EXECUTED_GTID_SET = 57992 -const WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS = 57993 -const FORMAT = 57994 -const TREE = 57995 -const VITESS = 57996 -const TRADITIONAL = 57997 -const VTEXPLAIN = 57998 -const VEXPLAIN = 57999 -const PLAN = 58000 -const LOCAL = 58001 -const LOW_PRIORITY = 58002 -const NO_WRITE_TO_BINLOG = 58003 -const LOGS = 58004 -const ERROR = 58005 -const GENERAL = 58006 -const HOSTS = 58007 -const OPTIMIZER_COSTS = 58008 -const USER_RESOURCES = 58009 -const SLOW = 58010 -const CHANNEL = 58011 -const RELAY = 58012 -const EXPORT = 58013 -const CURRENT = 58014 -const ROW = 58015 -const ROWS = 58016 -const AVG_ROW_LENGTH = 58017 -const CONNECTION = 58018 -const CHECKSUM = 58019 -const DELAY_KEY_WRITE = 58020 -const ENCRYPTION = 58021 -const ENGINE = 58022 -const INSERT_METHOD = 58023 -const MAX_ROWS = 58024 -const MIN_ROWS = 58025 -const PACK_KEYS = 58026 -const PASSWORD = 58027 -const FIXED = 58028 -const DYNAMIC = 58029 -const COMPRESSED = 58030 -const REDUNDANT = 58031 -const COMPACT = 58032 -const ROW_FORMAT = 58033 -const STATS_AUTO_RECALC = 58034 -const STATS_PERSISTENT = 58035 -const STATS_SAMPLE_PAGES = 58036 -const STORAGE = 58037 -const MEMORY = 58038 -const DISK = 58039 -const PARTITIONS = 58040 -const LINEAR = 58041 -const RANGE = 58042 -const LIST = 58043 -const SUBPARTITION = 58044 -const SUBPARTITIONS = 58045 -const HASH = 58046 +const KILL = 57472 +const EMPTY_FROM_CLAUSE = 57473 +const LOWER_THAN_CHARSET = 57474 +const CHARSET = 57475 +const UNIQUE = 57476 +const KEY = 57477 +const EXPRESSION_PREC_SETTER = 57478 +const OR = 57479 +const XOR = 57480 +const AND = 57481 +const NOT = 57482 +const BETWEEN = 57483 +const CASE = 57484 +const WHEN = 57485 +const THEN = 57486 +const ELSE = 57487 +const END = 57488 +const LE = 57489 +const GE = 57490 +const NE = 57491 +const NULL_SAFE_EQUAL = 57492 +const IS = 57493 +const LIKE = 57494 +const REGEXP = 57495 +const RLIKE = 57496 +const IN = 57497 +const ASSIGNMENT_OPT = 57498 +const SHIFT_LEFT = 57499 +const SHIFT_RIGHT = 57500 +const DIV = 57501 +const MOD = 57502 +const UNARY = 57503 +const COLLATE = 57504 +const BINARY = 57505 +const UNDERSCORE_ARMSCII8 = 57506 +const UNDERSCORE_ASCII = 57507 +const UNDERSCORE_BIG5 = 57508 +const UNDERSCORE_BINARY = 57509 +const UNDERSCORE_CP1250 = 57510 +const UNDERSCORE_CP1251 = 57511 +const UNDERSCORE_CP1256 = 57512 +const UNDERSCORE_CP1257 = 57513 +const UNDERSCORE_CP850 = 57514 +const UNDERSCORE_CP852 = 57515 +const UNDERSCORE_CP866 = 57516 +const UNDERSCORE_CP932 = 57517 +const UNDERSCORE_DEC8 = 57518 +const UNDERSCORE_EUCJPMS = 57519 +const UNDERSCORE_EUCKR = 57520 +const UNDERSCORE_GB18030 = 57521 +const UNDERSCORE_GB2312 = 57522 +const UNDERSCORE_GBK = 57523 +const UNDERSCORE_GEOSTD8 = 57524 +const UNDERSCORE_GREEK = 57525 +const UNDERSCORE_HEBREW = 57526 +const UNDERSCORE_HP8 = 57527 +const UNDERSCORE_KEYBCS2 = 57528 +const UNDERSCORE_KOI8R = 57529 +const UNDERSCORE_KOI8U = 57530 +const UNDERSCORE_LATIN1 = 57531 +const UNDERSCORE_LATIN2 = 57532 +const UNDERSCORE_LATIN5 = 57533 +const UNDERSCORE_LATIN7 = 57534 +const UNDERSCORE_MACCE = 57535 +const UNDERSCORE_MACROMAN = 57536 +const UNDERSCORE_SJIS = 57537 +const UNDERSCORE_SWE7 = 57538 +const UNDERSCORE_TIS620 = 57539 +const UNDERSCORE_UCS2 = 57540 +const UNDERSCORE_UJIS = 57541 +const UNDERSCORE_UTF16 = 57542 +const UNDERSCORE_UTF16LE = 57543 +const UNDERSCORE_UTF32 = 57544 +const UNDERSCORE_UTF8 = 57545 +const UNDERSCORE_UTF8MB4 = 57546 +const UNDERSCORE_UTF8MB3 = 57547 +const INTERVAL = 57548 +const WINDOW_EXPR = 57549 +const JSON_EXTRACT_OP = 57550 +const JSON_UNQUOTE_EXTRACT_OP = 57551 +const CREATE = 57552 +const ALTER = 57553 +const DROP = 57554 +const RENAME = 57555 +const ANALYZE = 57556 +const ADD = 57557 +const FLUSH = 57558 +const CHANGE = 57559 +const MODIFY = 57560 +const DEALLOCATE = 57561 +const REVERT = 57562 +const QUERIES = 57563 +const SCHEMA = 57564 +const TABLE = 57565 +const INDEX = 57566 +const VIEW = 57567 +const TO = 57568 +const IGNORE = 57569 +const IF = 57570 +const PRIMARY = 57571 +const COLUMN = 57572 +const SPATIAL = 57573 +const FULLTEXT = 57574 +const KEY_BLOCK_SIZE = 57575 +const CHECK = 57576 +const INDEXES = 57577 +const ACTION = 57578 +const CASCADE = 57579 +const CONSTRAINT = 57580 +const FOREIGN = 57581 +const NO = 57582 +const REFERENCES = 57583 +const RESTRICT = 57584 +const SHOW = 57585 +const DESCRIBE = 57586 +const EXPLAIN = 57587 +const DATE = 57588 +const ESCAPE = 57589 +const REPAIR = 57590 +const OPTIMIZE = 57591 +const TRUNCATE = 57592 +const COALESCE = 57593 +const EXCHANGE = 57594 +const REBUILD = 57595 +const PARTITIONING = 57596 +const REMOVE = 57597 +const PREPARE = 57598 +const EXECUTE = 57599 +const MAXVALUE = 57600 +const PARTITION = 57601 +const REORGANIZE = 57602 +const LESS = 57603 +const THAN = 57604 +const PROCEDURE = 57605 +const TRIGGER = 57606 +const VINDEX = 57607 +const VINDEXES = 57608 +const DIRECTORY = 57609 +const NAME = 57610 +const UPGRADE = 57611 +const STATUS = 57612 +const VARIABLES = 57613 +const WARNINGS = 57614 +const CASCADED = 57615 +const DEFINER = 57616 +const OPTION = 57617 +const SQL = 57618 +const UNDEFINED = 57619 +const SEQUENCE = 57620 +const MERGE = 57621 +const TEMPORARY = 57622 +const TEMPTABLE = 57623 +const INVOKER = 57624 +const SECURITY = 57625 +const FIRST = 57626 +const AFTER = 57627 +const LAST = 57628 +const VITESS_MIGRATION = 57629 +const CANCEL = 57630 +const RETRY = 57631 +const LAUNCH = 57632 +const COMPLETE = 57633 +const CLEANUP = 57634 +const THROTTLE = 57635 +const UNTHROTTLE = 57636 +const EXPIRE = 57637 +const RATIO = 57638 +const VITESS_THROTTLER = 57639 +const BEGIN = 57640 +const START = 57641 +const TRANSACTION = 57642 +const COMMIT = 57643 +const ROLLBACK = 57644 +const SAVEPOINT = 57645 +const RELEASE = 57646 +const WORK = 57647 +const CONSISTENT = 57648 +const SNAPSHOT = 57649 +const BIT = 57650 +const TINYINT = 57651 +const SMALLINT = 57652 +const MEDIUMINT = 57653 +const INT = 57654 +const INTEGER = 57655 +const BIGINT = 57656 +const INTNUM = 57657 +const REAL = 57658 +const DOUBLE = 57659 +const FLOAT_TYPE = 57660 +const FLOAT4_TYPE = 57661 +const FLOAT8_TYPE = 57662 +const DECIMAL_TYPE = 57663 +const NUMERIC = 57664 +const TIME = 57665 +const TIMESTAMP = 57666 +const DATETIME = 57667 +const YEAR = 57668 +const CHAR = 57669 +const VARCHAR = 57670 +const BOOL = 57671 +const CHARACTER = 57672 +const VARBINARY = 57673 +const NCHAR = 57674 +const TEXT = 57675 +const TINYTEXT = 57676 +const MEDIUMTEXT = 57677 +const LONGTEXT = 57678 +const BLOB = 57679 +const TINYBLOB = 57680 +const MEDIUMBLOB = 57681 +const LONGBLOB = 57682 +const JSON = 57683 +const JSON_SCHEMA_VALID = 57684 +const JSON_SCHEMA_VALIDATION_REPORT = 57685 +const ENUM = 57686 +const GEOMETRY = 57687 +const POINT = 57688 +const LINESTRING = 57689 +const POLYGON = 57690 +const GEOMCOLLECTION = 57691 +const GEOMETRYCOLLECTION = 57692 +const MULTIPOINT = 57693 +const MULTILINESTRING = 57694 +const MULTIPOLYGON = 57695 +const ASCII = 57696 +const UNICODE = 57697 +const NULLX = 57698 +const AUTO_INCREMENT = 57699 +const APPROXNUM = 57700 +const SIGNED = 57701 +const UNSIGNED = 57702 +const ZEROFILL = 57703 +const PURGE = 57704 +const BEFORE = 57705 +const CODE = 57706 +const COLLATION = 57707 +const COLUMNS = 57708 +const DATABASES = 57709 +const ENGINES = 57710 +const EVENT = 57711 +const EXTENDED = 57712 +const FIELDS = 57713 +const FULL = 57714 +const FUNCTION = 57715 +const GTID_EXECUTED = 57716 +const KEYSPACES = 57717 +const OPEN = 57718 +const PLUGINS = 57719 +const PRIVILEGES = 57720 +const PROCESSLIST = 57721 +const SCHEMAS = 57722 +const TABLES = 57723 +const TRIGGERS = 57724 +const USER = 57725 +const VGTID_EXECUTED = 57726 +const VITESS_KEYSPACES = 57727 +const VITESS_METADATA = 57728 +const VITESS_MIGRATIONS = 57729 +const VITESS_REPLICATION_STATUS = 57730 +const VITESS_SHARDS = 57731 +const VITESS_TABLETS = 57732 +const VITESS_TARGET = 57733 +const VSCHEMA = 57734 +const VITESS_THROTTLED_APPS = 57735 +const NAMES = 57736 +const GLOBAL = 57737 +const SESSION = 57738 +const ISOLATION = 57739 +const LEVEL = 57740 +const READ = 57741 +const WRITE = 57742 +const ONLY = 57743 +const REPEATABLE = 57744 +const COMMITTED = 57745 +const UNCOMMITTED = 57746 +const SERIALIZABLE = 57747 +const ADDDATE = 57748 +const CURRENT_TIMESTAMP = 57749 +const DATABASE = 57750 +const CURRENT_DATE = 57751 +const CURDATE = 57752 +const DATE_ADD = 57753 +const DATE_SUB = 57754 +const NOW = 57755 +const SUBDATE = 57756 +const CURTIME = 57757 +const CURRENT_TIME = 57758 +const LOCALTIME = 57759 +const LOCALTIMESTAMP = 57760 +const CURRENT_USER = 57761 +const UTC_DATE = 57762 +const UTC_TIME = 57763 +const UTC_TIMESTAMP = 57764 +const SYSDATE = 57765 +const DAY = 57766 +const DAY_HOUR = 57767 +const DAY_MICROSECOND = 57768 +const DAY_MINUTE = 57769 +const DAY_SECOND = 57770 +const HOUR = 57771 +const HOUR_MICROSECOND = 57772 +const HOUR_MINUTE = 57773 +const HOUR_SECOND = 57774 +const MICROSECOND = 57775 +const MINUTE = 57776 +const MINUTE_MICROSECOND = 57777 +const MINUTE_SECOND = 57778 +const MONTH = 57779 +const QUARTER = 57780 +const SECOND = 57781 +const SECOND_MICROSECOND = 57782 +const YEAR_MONTH = 57783 +const WEEK = 57784 +const SQL_TSI_DAY = 57785 +const SQL_TSI_WEEK = 57786 +const SQL_TSI_HOUR = 57787 +const SQL_TSI_MINUTE = 57788 +const SQL_TSI_MONTH = 57789 +const SQL_TSI_QUARTER = 57790 +const SQL_TSI_SECOND = 57791 +const SQL_TSI_MICROSECOND = 57792 +const SQL_TSI_YEAR = 57793 +const REPLACE = 57794 +const CONVERT = 57795 +const CAST = 57796 +const SUBSTR = 57797 +const SUBSTRING = 57798 +const SEPARATOR = 57799 +const TIMESTAMPADD = 57800 +const TIMESTAMPDIFF = 57801 +const WEIGHT_STRING = 57802 +const LTRIM = 57803 +const RTRIM = 57804 +const TRIM = 57805 +const JSON_ARRAY = 57806 +const JSON_OBJECT = 57807 +const JSON_QUOTE = 57808 +const JSON_DEPTH = 57809 +const JSON_TYPE = 57810 +const JSON_LENGTH = 57811 +const JSON_VALID = 57812 +const JSON_ARRAY_APPEND = 57813 +const JSON_ARRAY_INSERT = 57814 +const JSON_INSERT = 57815 +const JSON_MERGE = 57816 +const JSON_MERGE_PATCH = 57817 +const JSON_MERGE_PRESERVE = 57818 +const JSON_REMOVE = 57819 +const JSON_REPLACE = 57820 +const JSON_SET = 57821 +const JSON_UNQUOTE = 57822 +const COUNT = 57823 +const AVG = 57824 +const MAX = 57825 +const MIN = 57826 +const SUM = 57827 +const GROUP_CONCAT = 57828 +const BIT_AND = 57829 +const BIT_OR = 57830 +const BIT_XOR = 57831 +const STD = 57832 +const STDDEV = 57833 +const STDDEV_POP = 57834 +const STDDEV_SAMP = 57835 +const VAR_POP = 57836 +const VAR_SAMP = 57837 +const VARIANCE = 57838 +const ANY_VALUE = 57839 +const REGEXP_INSTR = 57840 +const REGEXP_LIKE = 57841 +const REGEXP_REPLACE = 57842 +const REGEXP_SUBSTR = 57843 +const ExtractValue = 57844 +const UpdateXML = 57845 +const GET_LOCK = 57846 +const RELEASE_LOCK = 57847 +const RELEASE_ALL_LOCKS = 57848 +const IS_FREE_LOCK = 57849 +const IS_USED_LOCK = 57850 +const LOCATE = 57851 +const POSITION = 57852 +const ST_GeometryCollectionFromText = 57853 +const ST_GeometryFromText = 57854 +const ST_LineStringFromText = 57855 +const ST_MultiLineStringFromText = 57856 +const ST_MultiPointFromText = 57857 +const ST_MultiPolygonFromText = 57858 +const ST_PointFromText = 57859 +const ST_PolygonFromText = 57860 +const ST_GeometryCollectionFromWKB = 57861 +const ST_GeometryFromWKB = 57862 +const ST_LineStringFromWKB = 57863 +const ST_MultiLineStringFromWKB = 57864 +const ST_MultiPointFromWKB = 57865 +const ST_MultiPolygonFromWKB = 57866 +const ST_PointFromWKB = 57867 +const ST_PolygonFromWKB = 57868 +const ST_AsBinary = 57869 +const ST_AsText = 57870 +const ST_Dimension = 57871 +const ST_Envelope = 57872 +const ST_IsSimple = 57873 +const ST_IsEmpty = 57874 +const ST_GeometryType = 57875 +const ST_X = 57876 +const ST_Y = 57877 +const ST_Latitude = 57878 +const ST_Longitude = 57879 +const ST_EndPoint = 57880 +const ST_IsClosed = 57881 +const ST_Length = 57882 +const ST_NumPoints = 57883 +const ST_StartPoint = 57884 +const ST_PointN = 57885 +const ST_Area = 57886 +const ST_Centroid = 57887 +const ST_ExteriorRing = 57888 +const ST_InteriorRingN = 57889 +const ST_NumInteriorRings = 57890 +const ST_NumGeometries = 57891 +const ST_GeometryN = 57892 +const ST_LongFromGeoHash = 57893 +const ST_PointFromGeoHash = 57894 +const ST_LatFromGeoHash = 57895 +const ST_GeoHash = 57896 +const ST_AsGeoJSON = 57897 +const ST_GeomFromGeoJSON = 57898 +const MATCH = 57899 +const AGAINST = 57900 +const BOOLEAN = 57901 +const LANGUAGE = 57902 +const WITH = 57903 +const QUERY = 57904 +const EXPANSION = 57905 +const WITHOUT = 57906 +const VALIDATION = 57907 +const UNUSED = 57908 +const ARRAY = 57909 +const BYTE = 57910 +const CUME_DIST = 57911 +const DESCRIPTION = 57912 +const DENSE_RANK = 57913 +const EMPTY = 57914 +const EXCEPT = 57915 +const FIRST_VALUE = 57916 +const GROUPING = 57917 +const GROUPS = 57918 +const JSON_TABLE = 57919 +const LAG = 57920 +const LAST_VALUE = 57921 +const LATERAL = 57922 +const LEAD = 57923 +const INFILE = 57924 +const NTH_VALUE = 57925 +const NTILE = 57926 +const OF = 57927 +const OVER = 57928 +const PERCENT_RANK = 57929 +const RANK = 57930 +const RECURSIVE = 57931 +const ROW_NUMBER = 57932 +const SYSTEM = 57933 +const WINDOW = 57934 +const ACTIVE = 57935 +const ADMIN = 57936 +const AUTOEXTEND_SIZE = 57937 +const BUCKETS = 57938 +const CLONE = 57939 +const COLUMN_FORMAT = 57940 +const COMPONENT = 57941 +const DEFINITION = 57942 +const ENFORCED = 57943 +const ENGINE_ATTRIBUTE = 57944 +const EXCLUDE = 57945 +const FOLLOWING = 57946 +const GET_MASTER_PUBLIC_KEY = 57947 +const HISTOGRAM = 57948 +const HISTORY = 57949 +const INACTIVE = 57950 +const INVISIBLE = 57951 +const LOCKED = 57952 +const MASTER_COMPRESSION_ALGORITHMS = 57953 +const MASTER_PUBLIC_KEY_PATH = 57954 +const MASTER_TLS_CIPHERSUITES = 57955 +const MASTER_ZSTD_COMPRESSION_LEVEL = 57956 +const NESTED = 57957 +const NETWORK_NAMESPACE = 57958 +const NOWAIT = 57959 +const NULLS = 57960 +const OJ = 57961 +const OLD = 57962 +const OPTIONAL = 57963 +const ORDINALITY = 57964 +const ORGANIZATION = 57965 +const OTHERS = 57966 +const PARTIAL = 57967 +const PATH = 57968 +const PERSIST = 57969 +const PERSIST_ONLY = 57970 +const PRECEDING = 57971 +const PRIVILEGE_CHECKS_USER = 57972 +const PROCESS = 57973 +const RANDOM = 57974 +const REFERENCE = 57975 +const REQUIRE_ROW_FORMAT = 57976 +const RESOURCE = 57977 +const RESPECT = 57978 +const RESTART = 57979 +const RETAIN = 57980 +const REUSE = 57981 +const ROLE = 57982 +const SECONDARY = 57983 +const SECONDARY_ENGINE = 57984 +const SECONDARY_ENGINE_ATTRIBUTE = 57985 +const SECONDARY_LOAD = 57986 +const SECONDARY_UNLOAD = 57987 +const SIMPLE = 57988 +const SKIP = 57989 +const SRID = 57990 +const THREAD_PRIORITY = 57991 +const TIES = 57992 +const UNBOUNDED = 57993 +const VCPU = 57994 +const VISIBLE = 57995 +const RETURNING = 57996 +const FORMAT_BYTES = 57997 +const FORMAT_PICO_TIME = 57998 +const PS_CURRENT_THREAD_ID = 57999 +const PS_THREAD_ID = 58000 +const GTID_SUBSET = 58001 +const GTID_SUBTRACT = 58002 +const WAIT_FOR_EXECUTED_GTID_SET = 58003 +const WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS = 58004 +const FORMAT = 58005 +const TREE = 58006 +const VITESS = 58007 +const TRADITIONAL = 58008 +const VTEXPLAIN = 58009 +const VEXPLAIN = 58010 +const PLAN = 58011 +const LOCAL = 58012 +const LOW_PRIORITY = 58013 +const NO_WRITE_TO_BINLOG = 58014 +const LOGS = 58015 +const ERROR = 58016 +const GENERAL = 58017 +const HOSTS = 58018 +const OPTIMIZER_COSTS = 58019 +const USER_RESOURCES = 58020 +const SLOW = 58021 +const CHANNEL = 58022 +const RELAY = 58023 +const EXPORT = 58024 +const CURRENT = 58025 +const ROW = 58026 +const ROWS = 58027 +const AVG_ROW_LENGTH = 58028 +const CONNECTION = 58029 +const CHECKSUM = 58030 +const DELAY_KEY_WRITE = 58031 +const ENCRYPTION = 58032 +const ENGINE = 58033 +const INSERT_METHOD = 58034 +const MAX_ROWS = 58035 +const MIN_ROWS = 58036 +const PACK_KEYS = 58037 +const PASSWORD = 58038 +const FIXED = 58039 +const DYNAMIC = 58040 +const COMPRESSED = 58041 +const REDUNDANT = 58042 +const COMPACT = 58043 +const ROW_FORMAT = 58044 +const STATS_AUTO_RECALC = 58045 +const STATS_PERSISTENT = 58046 +const STATS_SAMPLE_PAGES = 58047 +const STORAGE = 58048 +const MEMORY = 58049 +const DISK = 58050 +const PARTITIONS = 58051 +const LINEAR = 58052 +const RANGE = 58053 +const LIST = 58054 +const SUBPARTITION = 58055 +const SUBPARTITIONS = 58056 +const HASH = 58057 var yyToknames = [...]string{ "$end", @@ -868,6 +879,7 @@ var yyToknames = [...]string{ "BOTH", "LEADING", "TRAILING", + "KILL", "EMPTY_FROM_CLAUSE", "LOWER_THAN_CHARSET", "CHARSET", @@ -1194,6 +1206,15 @@ var yyToknames = [...]string{ "SECOND_MICROSECOND", "YEAR_MONTH", "WEEK", + "SQL_TSI_DAY", + "SQL_TSI_WEEK", + "SQL_TSI_HOUR", + "SQL_TSI_MINUTE", + "SQL_TSI_MONTH", + "SQL_TSI_QUARTER", + "SQL_TSI_SECOND", + "SQL_TSI_MICROSECOND", + "SQL_TSI_YEAR", "REPLACE", "CONVERT", "CAST", @@ -1239,6 +1260,7 @@ var yyToknames = [...]string{ "VAR_POP", "VAR_SAMP", "VARIANCE", + "ANY_VALUE", "REGEXP_INSTR", "REGEXP_LIKE", "REGEXP_REPLACE", @@ -1472,5687 +1494,5764 @@ var yyExca = [...]int{ 1, -1, -2, 0, -1, 2, - 13, 51, - 14, 51, - -2, 40, - -1, 53, - 1, 159, - 722, 159, - -2, 167, + 13, 52, + 14, 52, + -2, 41, -1, 54, - 135, 167, - 177, 167, - 346, 167, - -2, 523, - -1, 62, - 36, 772, - 240, 772, - 251, 772, - 286, 786, - 287, 786, - -2, 774, - -1, 67, - 242, 810, - -2, 808, - -1, 113, - 588, 1593, - -2, 2220, - -1, 122, - 239, 1566, - -2, 133, - -1, 124, 1, 160, - 722, 160, - -2, 167, - -1, 135, - 136, 408, - 245, 408, - -2, 512, - -1, 154, - 135, 167, - 177, 167, - 346, 167, - -2, 532, - -1, 722, - 163, 41, - -2, 45, - -1, 927, - 87, 1583, - -2, 1437, - -1, 928, - 87, 1584, - 222, 1588, - -2, 1438, - -1, 929, - 222, 1587, - -2, 42, - -1, 1008, - 60, 884, - -2, 899, - -1, 1098, - 250, 43, - 255, 43, - -2, 419, - -1, 1183, - 1, 580, - 722, 580, - -2, 167, - -1, 1486, - 222, 1588, - -2, 1438, - -1, 1692, - 60, 885, - -2, 904, - -1, 1693, - 60, 886, - -2, 905, - -1, 1745, - 135, 167, - 177, 167, - 346, 167, - -2, 458, - -1, 1826, - 136, 408, - 245, 408, - -2, 512, - -1, 1835, - 250, 44, - 255, 44, + 733, 160, + -2, 168, + -1, 55, + 136, 168, + 178, 168, + 347, 168, + -2, 524, + -1, 63, + 36, 775, + 241, 775, + 252, 775, + 287, 789, + 288, 789, + -2, 777, + -1, 68, + 243, 813, + -2, 811, + -1, 115, + 599, 1615, + -2, 2256, + -1, 124, + 240, 1588, + -2, 134, + -1, 126, + 1, 161, + 733, 161, + -2, 168, + -1, 137, + 137, 409, + 246, 409, + -2, 513, + -1, 156, + 136, 168, + 178, 168, + 347, 168, + -2, 533, + -1, 735, + 164, 42, + -2, 46, + -1, 941, + 87, 1605, + -2, 1459, + -1, 942, + 87, 1606, + 223, 1610, + -2, 1460, + -1, 943, + 223, 1609, + -2, 43, + -1, 1025, + 60, 887, + -2, 902, + -1, 1115, + 251, 44, + 256, 44, -2, 420, - -1, 2253, - 222, 1592, - -2, 1586, - -1, 2254, - 222, 1588, - -2, 1584, - -1, 2355, - 135, 167, - 177, 167, - 346, 167, + -1, 1200, + 1, 581, + 733, 581, + -2, 168, + -1, 1502, + 223, 1610, + -2, 1460, + -1, 1710, + 60, 888, + -2, 907, + -1, 1711, + 60, 889, + -2, 908, + -1, 1763, + 136, 168, + 178, 168, + 347, 168, -2, 459, - -1, 2362, - 26, 188, - -2, 190, - -1, 2815, - 78, 98, - 88, 98, - -2, 963, - -1, 2884, - 697, 696, - -2, 670, - -1, 3091, - 50, 1534, - -2, 1528, - -1, 3912, - 697, 696, - -2, 684, - -1, 4014, - 90, 628, - 95, 628, - 105, 628, - 179, 628, - 180, 628, - 181, 628, - 182, 628, - 183, 628, - 184, 628, - 185, 628, - 186, 628, - 187, 628, - 188, 628, - 189, 628, - 190, 628, - 191, 628, - 192, 628, - 193, 628, - 194, 628, - 195, 628, - 196, 628, - 197, 628, - 198, 628, - 199, 628, - 200, 628, - 201, 628, - 202, 628, - 203, 628, - 204, 628, - 205, 628, - 206, 628, - 207, 628, - 208, 628, - 209, 628, - 210, 628, - 211, 628, - 212, 628, - 213, 628, - 214, 628, - 215, 628, - 216, 628, - 217, 628, - 218, 628, - 219, 628, - 220, 628, - -2, 1972, + -1, 1844, + 137, 409, + 246, 409, + -2, 513, + -1, 1853, + 251, 45, + 256, 45, + -2, 421, + -1, 2291, + 223, 1614, + -2, 1608, + -1, 2292, + 223, 1610, + -2, 1606, + -1, 2393, + 136, 168, + 178, 168, + 347, 168, + -2, 460, + -1, 2400, + 26, 189, + -2, 191, + -1, 2855, + 78, 99, + 88, 99, + -2, 966, + -1, 2924, + 708, 699, + -2, 673, + -1, 3132, + 50, 1556, + -2, 1550, + -1, 3957, + 708, 699, + -2, 687, + -1, 4059, + 90, 631, + 95, 631, + 105, 631, + 180, 631, + 181, 631, + 182, 631, + 183, 631, + 184, 631, + 185, 631, + 186, 631, + 187, 631, + 188, 631, + 189, 631, + 190, 631, + 191, 631, + 192, 631, + 193, 631, + 194, 631, + 195, 631, + 196, 631, + 197, 631, + 198, 631, + 199, 631, + 200, 631, + 201, 631, + 202, 631, + 203, 631, + 204, 631, + 205, 631, + 206, 631, + 207, 631, + 208, 631, + 209, 631, + 210, 631, + 211, 631, + 212, 631, + 213, 631, + 214, 631, + 215, 631, + 216, 631, + 217, 631, + 218, 631, + 219, 631, + 220, 631, + 221, 631, + -2, 1998, } const yyPrivate = 57344 -const yyLast = 54898 +const yyLast = 55664 var yyAct = [...]int{ - 1700, 4096, 4109, 3893, 931, 3985, 4061, 2352, 938, 2078, - 4062, 1748, 3370, 4012, 1250, 1954, 3520, 3943, 3242, 3973, - 2282, 2302, 3144, 3151, 3111, 3873, 3798, 3193, 723, 3202, - 3207, 3204, 3203, 3201, 3206, 3205, 3871, 1024, 2009, 3507, - 896, 5, 2710, 42, 1001, 2748, 3602, 3107, 3044, 2426, - 1707, 726, 3104, 3221, 2787, 3861, 1248, 3108, 3105, 3222, - 3415, 3409, 2284, 3159, 2947, 891, 892, 3607, 753, 2306, - 3092, 3436, 893, 1130, 2322, 3224, 2326, 3102, 897, 2774, - 720, 3401, 2394, 2881, 2414, 2850, 2457, 2851, 2340, 2800, - 2389, 163, 1025, 1060, 1003, 2849, 41, 1196, 2327, 2929, - 3248, 2328, 2780, 1694, 2750, 2766, 2249, 2237, 43, 2204, - 2205, 2435, 2017, 1833, 149, 2413, 2396, 2842, 2474, 1851, - 2314, 2921, 1737, 2817, 1088, 1005, 1716, 1009, 2330, 736, - 99, 3569, 3570, 1674, 3568, 103, 2105, 104, 1498, 1425, - 1804, 1410, 1950, 1840, 1067, 1064, 1027, 1099, 1093, 2411, - 1932, 1068, 2037, 2385, 2101, 1096, 2062, 1736, 1094, 1095, - 1721, 1015, 2250, 724, 1106, 721, 2386, 731, 1045, 1047, - 2155, 1482, 98, 106, 2307, 1458, 1012, 2008, 1239, 84, - 1961, 167, 1246, 1010, 2113, 127, 1799, 132, 133, 125, - 126, 1179, 1825, 83, 730, 1011, 1013, 1225, 105, 1037, - 1502, 3902, 92, 2874, 97, 4097, 1507, 2428, 2429, 2430, - 3928, 3508, 930, 3190, 2428, 2904, 2903, 2472, 2872, 3463, - 2066, 3500, 4039, 1426, 94, 1132, 94, 3212, 2937, 1018, - 2938, 1671, 1053, 1057, 895, 128, 3929, 656, 1149, 1150, - 1151, 1917, 1154, 1155, 1156, 1157, 1135, 134, 1160, 1161, - 1162, 1163, 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1171, - 1172, 1173, 1174, 1175, 1176, 1032, 1036, 94, 1061, 3923, - 3924, 3573, 1019, 2024, 2, 690, 690, 696, 1004, 3573, - 2023, 2022, 1002, 3210, 1054, 2021, 2040, 3212, 1426, 2020, - 2019, 1055, 1109, 2279, 2280, 1992, 1195, 714, 4033, 715, - 3209, 1110, 190, 1085, 2746, 2519, 128, 716, 1026, 3216, - 1084, 1136, 1139, 1140, 1083, 1082, 1041, 1086, 3088, 110, - 111, 112, 2894, 1143, 116, 1421, 129, 122, 151, 94, - 191, 1436, 3048, 650, 1077, 4119, 1701, 2461, 4060, 172, - 2776, 1442, 1152, 3210, 4065, 709, 710, 947, 948, 949, - 1072, 4087, 3375, 3374, 997, 998, 999, 1000, 3572, 2459, - 1008, 4102, 1053, 1057, 895, 3874, 3572, 4043, 4041, 3216, - 162, 3924, 2897, 190, 128, 2711, 150, 947, 948, 949, - 2029, 2460, 1730, 3267, 2877, 3794, 4101, 3793, 1134, 1042, - 1043, 690, 4042, 4040, 696, 169, 1436, 129, 170, 151, - 2405, 3513, 85, 1133, 3514, 87, 690, 4075, 3804, 4037, - 172, 3907, 3532, 1412, 3521, 3974, 85, 138, 139, 161, - 160, 189, 3213, 2399, 3982, 4022, 3986, 2454, 2528, 3803, - 3531, 2071, 4017, 3287, 2310, 1814, 3141, 3142, 690, 1432, - 2790, 162, 1424, 4020, 1182, 2826, 3140, 150, 2825, 2347, - 2348, 2827, 4026, 4027, 1738, 85, 1739, 1046, 2911, 2912, - 3161, 3162, 2001, 2002, 2747, 2791, 169, 4021, 2936, 170, - 85, 2525, 3990, 2346, 2920, 1243, 1439, 1215, 1440, 1441, - 94, 687, 3213, 1459, 995, 994, 691, 691, 1827, 1828, - 161, 160, 189, 2526, 94, 3894, 3628, 2838, 1203, 1220, - 1221, 3627, 3990, 1204, 1432, 3264, 2875, 1460, 1461, 1462, - 1463, 1464, 1465, 1466, 1468, 1467, 1469, 1470, 1957, 3620, - 3275, 155, 136, 158, 143, 135, 1178, 156, 157, 672, - 2098, 1216, 1203, 94, 173, 2365, 2364, 1204, 1422, 2783, - 2784, 690, 670, 179, 144, 1202, 3412, 1201, 94, 3273, - 690, 1056, 1050, 1048, 1081, 2281, 1188, 1189, 147, 145, - 140, 141, 142, 146, 1209, 1411, 2517, 3244, 690, 3160, - 137, 2398, 2982, 1734, 2000, 704, 2004, 708, 702, 148, - 3249, 3163, 667, 2922, 2310, 1678, 2882, 4066, 1191, 2436, - 1242, 682, 155, 1829, 158, 3845, 1826, 3846, 156, 157, - 1081, 1177, 691, 1222, 1907, 173, 677, 3237, 4067, 2907, - 1153, 2480, 1079, 1223, 179, 3238, 680, 691, 2520, 2521, - 2523, 2522, 2475, 4099, 1933, 1217, 2308, 2309, 2090, 2079, - 2080, 2081, 2082, 2092, 2083, 2084, 2085, 2097, 2093, 2086, - 2087, 2094, 2095, 2096, 2088, 2089, 2091, 1236, 1908, 691, - 1909, 1184, 2495, 1241, 2496, 1958, 2497, 1224, 1210, 3245, - 1076, 1218, 1219, 1078, 2924, 1181, 2481, 164, 2487, 2483, - 2485, 2486, 2484, 2488, 2489, 3502, 3246, 3778, 1232, 3501, - 1234, 1056, 1050, 1048, 657, 4034, 659, 673, 2439, 693, - 2948, 692, 663, 2498, 661, 665, 674, 666, 3498, 660, - 1159, 671, 2479, 1158, 662, 675, 676, 679, 683, 684, - 685, 681, 678, 2477, 669, 694, 1089, 3577, 1231, 1233, - 1090, 1681, 2323, 1090, 1431, 1428, 1429, 1430, 1435, 1437, - 1434, 3047, 1433, 1128, 1127, 1119, 1117, 2983, 164, 1126, - 1080, 1125, 1427, 1124, 2478, 1123, 1122, 1121, 1116, 1818, - 1129, 1473, 691, 1473, 159, 1199, 3163, 1205, 1206, 1207, - 1208, 691, 1081, 1065, 1073, 1065, 2526, 4072, 1102, 1063, - 1180, 1075, 1074, 1493, 2950, 4120, 2308, 2309, 1138, 691, - 2928, 1244, 1245, 1065, 1101, 3901, 1080, 2873, 1137, 1431, - 1428, 1429, 1430, 1435, 1437, 1434, 1101, 1433, 1735, 1951, - 1839, 2876, 1938, 2909, 4113, 2412, 1038, 1427, 2925, 2751, - 2753, 2465, 1484, 3497, 2464, 1947, 3183, 1049, 1812, 1413, - 1079, 1146, 1108, 2458, 1229, 159, 2906, 944, 1230, 944, - 2840, 1811, 1810, 1213, 3115, 2892, 1087, 2941, 1235, 2540, - 1948, 1808, 654, 3413, 2960, 2959, 2958, 1474, 1475, 2952, - 649, 2956, 88, 2951, 2919, 2949, 4035, 2918, 3886, 2456, - 2954, 152, 3450, 1228, 153, 1403, 3431, 2822, 1480, 2953, - 944, 1404, 1405, 2786, 1237, 2723, 2074, 1725, 1633, 1193, - 3056, 3214, 3215, 1120, 1118, 2781, 3055, 2955, 2957, 2402, - 3461, 3462, 124, 655, 3218, 165, 1476, 1477, 1478, 1479, - 2353, 1473, 177, 1470, 3139, 2896, 1490, 2551, 3988, 1838, - 3265, 1453, 1919, 1918, 1920, 1921, 1922, 1108, 1021, 3571, - 1701, 3530, 1240, 3915, 1226, 1107, 1131, 3571, 3493, 1937, - 2403, 2931, 152, 1108, 1198, 153, 2930, 2401, 3988, 4025, - 3987, 3214, 3215, 185, 3425, 2931, 2476, 1049, 1080, 2895, - 2930, 119, 1962, 1504, 3218, 1505, 1506, 1145, 2013, 1944, - 93, 1740, 2968, 2106, 1509, 1510, 165, 2106, 2865, 2560, - 3987, 2404, 2527, 177, 93, 4076, 1420, 2752, 1108, 695, - 1441, 2400, 3616, 4024, 1669, 1190, 166, 171, 168, 174, - 175, 176, 178, 180, 181, 182, 183, 3358, 2551, 1200, - 688, 1187, 184, 186, 187, 188, 1440, 1441, 3468, 1108, - 3467, 1702, 1704, 93, 185, 689, 1941, 2443, 1939, 1940, - 1107, 1942, 1943, 120, 4111, 1111, 1101, 4112, 93, 4110, - 1113, 1848, 1847, 1837, 1114, 1112, 1107, 1212, 2453, 1682, - 1670, 1111, 1101, 1684, 2451, 190, 1113, 1688, 1214, 1685, - 1114, 1112, 1119, 1005, 2455, 1115, 2114, 166, 171, 168, - 174, 175, 176, 178, 180, 181, 182, 183, 1108, 129, - 2115, 1227, 2448, 184, 186, 187, 188, 1197, 1934, 1117, - 1935, 1107, 172, 1936, 1442, 1183, 717, 1101, 1104, 1105, - 4068, 1065, 3451, 2448, 3945, 1098, 1102, 3879, 1686, 1963, - 1687, 1017, 103, 1670, 104, 1639, 1640, 1641, 1642, 1643, - 1644, 2452, 1107, 2242, 2042, 4121, 1097, 1701, 1101, 1104, - 1105, 1459, 1065, 1675, 2111, 2831, 1098, 1102, 2043, 1471, - 1472, 2041, 2450, 1663, 2031, 2033, 2034, 1442, 169, 3946, - 106, 170, 3880, 2598, 4115, 1460, 1461, 1462, 1463, 1464, - 1465, 1466, 1468, 1467, 1469, 1470, 3786, 3785, 2032, 1815, - 1816, 1817, 3776, 3543, 189, 1465, 1466, 1468, 1467, 1469, - 1470, 1107, 3542, 1144, 1442, 1927, 3241, 1141, 2532, 2533, - 2534, 3475, 1845, 1703, 1407, 947, 948, 949, 2242, 3474, - 1831, 1442, 2239, 3527, 1028, 3528, 2586, 1034, 1034, 3464, - 2112, 2241, 4122, 1002, 3191, 1683, 1004, 1824, 1956, 1880, - 3282, 1706, 1883, 3179, 1885, 942, 2847, 711, 2846, 1439, - 1843, 1440, 1441, 2845, 4085, 1902, 1892, 1893, 1459, 696, - 2940, 2408, 1898, 1899, 1884, 1928, 1853, 1912, 1854, 1926, - 1856, 1858, 1731, 1732, 1862, 1864, 1866, 1868, 1870, 1842, - 1911, 1442, 1460, 1461, 1462, 1463, 1464, 1465, 1466, 1468, - 1467, 1469, 1470, 1807, 1910, 1900, 1734, 1894, 1459, 1841, - 1841, 1455, 1439, 1456, 1440, 1441, 1701, 173, 1891, 1822, - 1820, 2970, 1689, 4084, 1821, 1834, 179, 1457, 1471, 1472, - 1454, 712, 1460, 1461, 1462, 1463, 1464, 1465, 1466, 1468, - 1467, 1469, 1470, 1890, 1889, 1860, 1925, 1966, 1914, 1439, - 4069, 1440, 1441, 1459, 1970, 2539, 1972, 1973, 1974, 1975, - 4054, 1888, 4052, 1979, 1701, 1710, 1439, 1442, 1440, 1441, - 190, 1039, 1442, 1952, 4009, 1991, 1701, 1460, 1461, 1462, - 1463, 1464, 1465, 1466, 1468, 1467, 1469, 1470, 3458, 696, - 3910, 1459, 1813, 3903, 129, 1461, 1462, 1463, 1464, 1465, - 1466, 1468, 1467, 1469, 1470, 128, 3812, 172, 1442, 1084, - 1924, 1711, 1913, 1083, 1082, 1460, 1461, 1462, 1463, 1464, - 1465, 1466, 1468, 1467, 1469, 1470, 1439, 1442, 1440, 1441, - 3909, 1442, 1968, 2829, 696, 2424, 2423, 2422, 2421, 1964, - 1965, 1463, 1464, 1465, 1466, 1468, 1467, 1469, 1470, 3883, - 164, 4081, 1701, 1969, 3882, 1990, 4079, 1701, 1989, 3881, - 1976, 1977, 1978, 169, 2420, 2419, 170, 1460, 1461, 1462, - 1463, 1464, 1465, 1466, 1468, 1467, 1469, 1470, 1446, 1447, - 1448, 1449, 1450, 1451, 1452, 1444, 2772, 4098, 100, 189, - 100, 3781, 3996, 1701, 3811, 102, 3765, 42, 3764, 101, - 42, 101, 1439, 1442, 1440, 1441, 3615, 1439, 1442, 1440, - 1441, 3994, 1701, 4056, 1701, 3992, 1701, 3429, 1701, 1442, - 1438, 1701, 3769, 2069, 2069, 2045, 3613, 2047, 2048, 2049, - 2050, 2051, 2052, 2054, 2056, 2057, 2058, 2059, 2060, 2061, - 2596, 3539, 1442, 1439, 2109, 1440, 1441, 109, 1668, 2110, - 1438, 1701, 3768, 2594, 1701, 1442, 1701, 102, 108, 1667, - 107, 109, 1439, 1666, 1440, 1441, 1439, 3472, 1440, 1441, - 2772, 3981, 108, 2038, 107, 1669, 2772, 3960, 1442, 2133, - 2772, 3956, 1442, 102, 2151, 3936, 1701, 3858, 1701, 3511, - 3900, 3519, 3856, 1701, 3789, 1701, 1484, 2772, 3777, 3511, - 1701, 108, 173, 3853, 1701, 2772, 3509, 2883, 2147, 3457, - 3250, 179, 1442, 2107, 2448, 1701, 1701, 1442, 2678, 1701, - 3424, 3247, 1442, 3182, 2039, 1997, 1998, 1701, 1701, 2549, - 3181, 1670, 2856, 84, 3172, 3171, 84, 2014, 1439, 2548, - 1440, 1441, 1701, 1439, 2843, 1440, 1441, 3169, 3170, 3167, - 3168, 2035, 3167, 3166, 1439, 1665, 1440, 1441, 2797, 1701, - 2818, 2046, 3835, 1701, 2235, 2508, 3400, 1701, 2526, 2905, - 1438, 2044, 2768, 1442, 1803, 2886, 2861, 1439, 165, 1440, - 1441, 2879, 2880, 2772, 2771, 177, 2507, 102, 2253, 2252, - 1439, 2470, 1440, 1441, 2268, 2073, 3393, 1701, 2100, 2102, - 2469, 3390, 1701, 2553, 1701, 2361, 3388, 1701, 2072, 1701, - 1438, 1702, 2275, 1439, 1442, 1440, 1441, 1439, 2325, 1440, - 1441, 1442, 2305, 2819, 2287, 1993, 185, 2067, 2067, 1959, - 2251, 1923, 2238, 2821, 2818, 164, 2070, 2116, 2117, 2118, - 2119, 1442, 1915, 1905, 2299, 2553, 2240, 1439, 1901, 1440, - 1441, 2130, 1439, 1897, 1440, 1441, 3147, 1439, 4070, 1440, - 1441, 1896, 1442, 2292, 1895, 2293, 1803, 1802, 1442, 166, - 171, 168, 174, 175, 176, 178, 180, 181, 182, 183, - 2253, 2320, 2256, 2257, 1712, 184, 186, 187, 188, 1746, - 1745, 2788, 2315, 2316, 1238, 2796, 2362, 2819, 3350, 1701, - 3426, 3148, 103, 3134, 104, 3348, 1701, 2526, 1439, 2145, - 1440, 1441, 2334, 2526, 2788, 2449, 3103, 3941, 3914, 2156, - 2772, 103, 2251, 104, 1442, 3150, 3898, 3424, 2797, 3379, - 3169, 3076, 2345, 2553, 2678, 2583, 1442, 2255, 2298, 2582, - 2258, 2259, 2448, 3145, 2431, 1443, 3807, 2409, 2313, 1439, - 2797, 1440, 1441, 1705, 2277, 2072, 1439, 2015, 1440, 1441, - 3161, 3162, 3476, 2797, 1999, 1946, 1876, 3146, 1733, 2371, - 2372, 2373, 2374, 2448, 1018, 1499, 1439, 2356, 1440, 1441, - 2357, 1442, 1007, 2274, 1092, 2338, 3424, 1091, 2286, 2229, - 2230, 2231, 2232, 2233, 2297, 1442, 94, 1439, 4030, 1440, - 1441, 3152, 1442, 1439, 3963, 1440, 1441, 2300, 3344, 1701, - 3800, 1708, 3766, 3477, 3478, 3479, 3631, 1877, 1878, 1879, - 3341, 1701, 3492, 2318, 3489, 3470, 3292, 2397, 3291, 2391, - 1442, 1805, 2437, 2344, 2366, 2360, 2367, 2368, 2369, 2370, - 1442, 1054, 2343, 2342, 2276, 2359, 2358, 2390, 1055, 2156, - 3239, 2557, 2377, 2378, 2379, 2380, 3196, 1442, 3192, 1439, - 94, 1440, 1441, 165, 2434, 2407, 3773, 2887, 2387, 3160, - 177, 1439, 2382, 1440, 1441, 2376, 2375, 1930, 2852, 3339, - 1701, 3163, 1442, 1836, 1832, 2392, 3337, 1701, 1442, 1801, - 2388, 2442, 121, 3194, 2445, 2406, 2446, 1442, 2410, 2381, - 2383, 2384, 1442, 2418, 3480, 2853, 1182, 3437, 3438, 1442, - 2547, 185, 3243, 3801, 3335, 1701, 1439, 2462, 1440, 1441, - 2392, 2441, 2440, 2444, 2556, 1109, 2853, 1714, 2405, 2290, - 1439, 4093, 1440, 1441, 1110, 4091, 2463, 1439, 1995, 1440, - 1441, 3333, 1701, 4063, 1841, 3922, 3840, 3440, 3188, 3443, - 3187, 3481, 3482, 3483, 166, 171, 168, 174, 175, 176, - 178, 180, 181, 182, 183, 1439, 3445, 1440, 1441, 3186, - 184, 186, 187, 188, 3103, 1439, 2866, 1440, 1441, 2502, - 3442, 3331, 1701, 2473, 3123, 2466, 3329, 1701, 3122, 2467, - 2468, 3126, 1439, 1713, 1440, 1441, 3127, 3149, 3918, 3124, - 1996, 3802, 2510, 2511, 3125, 2304, 3128, 2513, 2806, 2807, - 1872, 2536, 1709, 2538, 2545, 2296, 2514, 1439, 2550, 1440, - 1441, 3430, 2541, 1439, 2542, 1440, 1441, 3093, 3095, 2253, - 2252, 2501, 1439, 3081, 1440, 1441, 3096, 1439, 3080, 1440, - 1441, 2554, 3417, 2555, 1439, 3952, 1440, 1441, 2562, 3606, - 3416, 3878, 2564, 2565, 3608, 3954, 1717, 1873, 1874, 1875, - 2038, 2571, 2572, 2573, 2574, 2575, 2576, 2577, 2578, 2579, - 2580, 2546, 3420, 4007, 4004, 3890, 2587, 2588, 2589, 2590, - 2591, 2516, 2593, 1442, 1022, 2544, 2595, 3888, 1945, 3090, - 2600, 2601, 1023, 2602, 113, 2524, 2605, 1806, 2606, 2608, - 2610, 2611, 2612, 2613, 2614, 2615, 2617, 2619, 2620, 2621, - 2623, 2039, 2625, 2626, 2628, 2630, 2632, 2634, 2636, 2638, - 2640, 2642, 2644, 2646, 2648, 2650, 2652, 2654, 2656, 2658, - 2660, 2662, 2663, 2664, 2535, 2666, 3165, 2668, 2836, 2670, - 2671, 2857, 2673, 2675, 2677, 1148, 1147, 2537, 2680, 1442, - 2114, 3258, 2684, 2852, 100, 2934, 2689, 2690, 2691, 2692, - 1442, 1699, 1695, 1406, 2115, 101, 2893, 3327, 1701, 2703, - 2704, 2705, 2706, 2707, 2708, 129, 1696, 2712, 2713, 3422, - 2795, 2559, 1442, 2315, 2316, 2715, 1442, 102, 4107, 3184, - 2721, 1442, 3597, 2505, 3596, 1442, 2724, 2725, 2726, 2727, - 2728, 2294, 2295, 1698, 2494, 1697, 1442, 2735, 2736, 4001, - 2737, 3899, 3796, 2740, 2742, 2299, 3164, 2744, 1439, 2592, - 1440, 1441, 1442, 2810, 100, 1960, 1442, 2756, 3153, 2301, - 3079, 102, 3157, 3325, 1701, 101, 2493, 2069, 3078, 3156, - 1030, 1031, 3595, 2492, 3323, 1701, 2802, 2805, 2806, 2807, - 2803, 1442, 2804, 2808, 2491, 2490, 3437, 3438, 3402, 2530, - 107, 109, 4053, 1442, 4051, 4008, 3321, 1701, 4005, 1442, - 3319, 1701, 108, 3158, 107, 3317, 1701, 3955, 3154, 3315, - 1701, 3866, 3865, 3155, 1439, 2566, 1440, 1441, 2755, 1442, - 3313, 1701, 3843, 3614, 1442, 1439, 3612, 1440, 1441, 3611, - 1442, 2792, 2581, 42, 3604, 108, 3311, 1701, 3490, 2758, - 3395, 2760, 3421, 2811, 3419, 2773, 2813, 1439, 1442, 1440, - 1441, 1439, 3197, 1440, 1441, 1442, 1439, 2432, 1440, 1441, - 1439, 1819, 1440, 1441, 109, 3297, 1701, 1029, 3603, 3410, - 2788, 1439, 2769, 1440, 1441, 108, 1675, 3280, 1701, 2812, - 3581, 2745, 2782, 2743, 1701, 4095, 4094, 1439, 1442, 1440, - 1441, 1439, 2768, 1440, 1441, 2984, 1442, 2584, 2765, 1670, - 2288, 2770, 4094, 2741, 1701, 1726, 1718, 2839, 2841, 3494, - 2785, 1699, 1695, 4095, 2716, 1701, 1439, 2832, 1440, 1441, - 2816, 3884, 1442, 3456, 2878, 1020, 1696, 1442, 1439, 3626, - 1440, 1441, 3391, 2820, 1439, 3, 1440, 1441, 2823, 2693, - 1701, 96, 2855, 1, 2902, 114, 115, 2858, 2859, 2830, - 2397, 1692, 1693, 1698, 1439, 1697, 1440, 1441, 9, 1439, - 2012, 1440, 1441, 11, 2844, 1439, 2010, 1440, 1441, 10, - 1040, 2067, 2685, 1701, 2011, 4003, 2833, 8, 3951, 2757, - 2676, 1701, 2854, 1439, 3889, 1440, 1441, 3953, 4006, 3625, - 1439, 3887, 1440, 1441, 1409, 2025, 2026, 2027, 2028, 2863, - 2862, 2867, 2868, 2869, 1408, 3460, 3356, 2900, 4019, 668, - 2036, 2674, 1701, 2278, 1673, 4064, 1824, 2899, 4015, 4016, - 1916, 2944, 2945, 1439, 1906, 1440, 1441, 2888, 2889, 3522, - 2203, 1439, 3797, 1440, 1441, 3200, 2438, 2972, 2973, 2974, - 2975, 2976, 2946, 3488, 2898, 2075, 2076, 2395, 1100, 154, - 2963, 2099, 1034, 1034, 2103, 2354, 2981, 1439, 2108, 1440, - 1441, 2355, 1439, 3976, 1440, 1441, 118, 1058, 117, 1103, - 1211, 2433, 3512, 2120, 2121, 2122, 2123, 2124, 2125, 2126, - 2127, 2128, 2129, 2961, 2926, 2923, 2837, 2134, 2135, 2136, - 2137, 2138, 2139, 2141, 2363, 2146, 2977, 2148, 2149, 2150, - 1752, 2152, 2153, 2154, 1750, 2157, 2158, 2159, 2160, 2161, - 2162, 2163, 2164, 2165, 2166, 2167, 2168, 2169, 2170, 2171, - 2172, 2173, 2174, 2175, 2176, 2177, 2178, 2179, 2180, 2181, - 2182, 2183, 2184, 2185, 2186, 2187, 2188, 2189, 2190, 2191, - 2192, 2193, 2194, 2195, 2196, 2197, 2198, 2199, 2200, 2201, - 2202, 2206, 2207, 2208, 2209, 2210, 2211, 2212, 2213, 2214, - 2215, 2216, 2217, 2218, 2219, 2220, 2221, 2222, 2223, 2224, - 2225, 2226, 2227, 2228, 2943, 2942, 2962, 2932, 2965, 2234, - 2933, 2236, 1751, 2243, 2244, 2245, 2246, 2247, 2248, 1034, - 1749, 1034, 1034, 1034, 1034, 1034, 1754, 1442, 1753, 3266, - 2585, 3357, 2260, 2261, 2262, 2263, 2264, 2265, 2266, 2267, - 2003, 2269, 2270, 2271, 2272, 2273, 109, 1442, 2802, 2805, - 2806, 2807, 2803, 2986, 2804, 2808, 3049, 108, 703, 107, - 2809, 697, 192, 1741, 3051, 1719, 3371, 1142, 102, 658, - 1442, 3173, 2471, 664, 1491, 1994, 3077, 2824, 1034, 1052, - 3060, 2238, 1044, 2238, 2964, 2289, 2759, 1051, 3949, 3022, - 3774, 3414, 3089, 3091, 3112, 2240, 2775, 2240, 3094, 3087, - 2311, 2312, 3877, 3605, 3032, 3033, 3034, 3035, 3036, 2334, - 3961, 2661, 1701, 3131, 2834, 1715, 3378, 2558, 2104, 1481, - 2331, 3083, 3050, 3576, 3052, 3100, 2030, 2351, 728, 3059, - 727, 2659, 1701, 725, 1009, 2761, 3133, 1956, 2334, 2334, - 2334, 2334, 2334, 3060, 2789, 3084, 1445, 932, 2749, 3116, - 1727, 2801, 2799, 2798, 3071, 2848, 2503, 2339, 2334, 3439, - 3435, 2334, 1439, 3106, 1440, 1441, 4011, 2333, 2329, 3106, - 3109, 3082, 2767, 3085, 3075, 883, 882, 737, 729, 719, - 3097, 3098, 1439, 881, 1440, 1441, 880, 3453, 2393, 3226, - 1010, 2908, 3240, 2910, 3135, 2835, 3117, 3136, 3129, 3120, - 3118, 3119, 1011, 3121, 103, 1439, 104, 1440, 1441, 3220, - 3137, 3236, 1423, 3024, 3114, 3026, 3072, 3073, 3074, 1691, - 713, 1071, 3263, 3143, 3905, 2529, 3177, 3178, 3286, 1690, - 3912, 3037, 3038, 3039, 3040, 3174, 3208, 3176, 3175, 3506, - 3260, 3189, 2884, 2425, 70, 46, 3872, 3942, 875, 872, - 3578, 3579, 3580, 3045, 3046, 3199, 3925, 3926, 871, 3927, - 3228, 3227, 3277, 3278, 2140, 3279, 1419, 3281, 3283, 3219, - 1416, 2397, 1442, 3198, 4032, 2005, 95, 37, 36, 35, - 34, 3290, 3234, 1442, 33, 27, 3294, 3295, 3296, 3298, - 3299, 3300, 3301, 3302, 3303, 3304, 3305, 3306, 3307, 3308, - 3309, 3310, 3312, 3314, 3316, 3318, 3320, 3322, 3324, 3326, - 3328, 3330, 3332, 3334, 3336, 3338, 3340, 3342, 3343, 3345, - 3346, 3347, 3349, 3254, 3251, 3351, 3253, 3353, 3354, 3355, - 3256, 3257, 3359, 3360, 3361, 3362, 3363, 3364, 3365, 3366, - 3367, 3368, 3369, 3271, 26, 25, 24, 23, 30, 20, - 22, 3376, 21, 19, 3211, 3381, 2657, 1701, 4059, 3385, - 3386, 1442, 3387, 3389, 4106, 3392, 3394, 3352, 3396, 3397, - 3398, 3399, 123, 56, 53, 51, 3405, 131, 130, 54, - 50, 1185, 3380, 48, 3382, 3383, 3384, 3373, 32, 3268, - 3269, 31, 3270, 18, 3377, 3272, 17, 3274, 16, 3276, - 15, 1459, 1442, 14, 13, 3262, 3261, 1439, 12, 1440, - 1441, 3427, 3428, 7, 6, 3432, 1442, 40, 1439, 39, - 1440, 1441, 29, 38, 28, 1460, 1461, 1462, 1463, 1464, - 1465, 1466, 1468, 1467, 1469, 1470, 1442, 3408, 4, 2871, - 1442, 2427, 0, 0, 1442, 2655, 1701, 0, 0, 0, - 0, 0, 0, 0, 0, 3403, 3404, 3406, 1442, 0, - 0, 0, 3418, 1442, 0, 0, 0, 3411, 0, 0, - 0, 3434, 1442, 0, 0, 2334, 1442, 0, 0, 0, - 0, 3444, 3423, 0, 1442, 0, 2653, 1701, 1442, 0, - 3448, 3449, 3441, 2561, 0, 0, 1439, 0, 1440, 1441, - 2651, 1701, 0, 2567, 2568, 2569, 2570, 1442, 3446, 0, - 0, 3447, 0, 3228, 3227, 0, 3454, 0, 3495, 3496, - 2649, 1701, 3510, 3455, 2647, 1701, 0, 1499, 2645, 1701, - 0, 3465, 3466, 0, 3516, 3517, 3471, 1439, 3473, 1440, - 1441, 0, 2643, 1701, 0, 1442, 0, 2641, 1701, 1442, - 0, 1439, 0, 1440, 1441, 0, 2639, 1701, 0, 0, - 2637, 1701, 0, 0, 3529, 0, 0, 3533, 2635, 1701, - 0, 1439, 3289, 1440, 1441, 1439, 0, 1440, 1441, 1439, - 0, 1440, 1441, 0, 3499, 0, 3518, 0, 3503, 3504, - 3505, 2633, 1701, 1439, 3544, 1440, 1441, 0, 1439, 0, - 1440, 1441, 0, 2098, 1442, 0, 0, 1439, 0, 1440, - 1441, 1439, 1442, 1440, 1441, 0, 0, 0, 0, 1439, - 0, 1440, 1441, 1439, 3538, 1440, 1441, 0, 0, 2631, - 1701, 0, 0, 2629, 1701, 0, 1442, 0, 0, 3534, - 0, 0, 1439, 0, 1440, 1441, 0, 0, 0, 0, - 1442, 0, 0, 0, 1442, 0, 0, 0, 3567, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 3575, 0, 0, 943, 0, 0, 86, 0, 3582, - 1439, 0, 1440, 1441, 1439, 0, 1440, 1441, 2627, 1701, - 3584, 0, 3585, 3586, 3587, 0, 2622, 1701, 0, 0, - 1717, 2090, 2079, 2080, 2081, 2082, 2092, 2083, 2084, 2085, - 2097, 2093, 2086, 2087, 2094, 2095, 2096, 2088, 2089, 2091, - 2618, 1701, 0, 3574, 0, 0, 3600, 0, 0, 0, - 0, 0, 0, 0, 2616, 1701, 0, 0, 3288, 1439, - 0, 1440, 1441, 0, 0, 0, 0, 1439, 0, 1440, - 1441, 0, 0, 0, 2069, 0, 0, 0, 0, 1006, - 0, 86, 0, 0, 0, 0, 0, 3621, 0, 42, - 3601, 1439, 0, 1440, 1441, 3610, 0, 0, 3623, 3629, - 1006, 3609, 0, 3617, 3619, 1439, 0, 1440, 1441, 1439, - 0, 1440, 1441, 0, 0, 1442, 1070, 3106, 0, 0, - 0, 3634, 3635, 0, 0, 3109, 0, 3788, 3637, 3109, - 0, 0, 0, 0, 3775, 0, 3795, 0, 0, 0, - 0, 0, 1442, 0, 0, 0, 1442, 0, 0, 0, - 0, 0, 0, 0, 0, 3770, 3805, 3806, 1442, 3808, - 3772, 3809, 3810, 3771, 0, 1442, 3813, 3814, 3815, 3816, - 3817, 3818, 3819, 3820, 3821, 3822, 3823, 3824, 3825, 3826, - 3827, 3828, 3829, 3830, 3831, 3832, 3833, 3834, 3787, 3836, - 3839, 3792, 0, 3791, 3782, 3783, 3784, 3779, 3799, 2609, - 1701, 0, 2069, 0, 3565, 3848, 3849, 3850, 3851, 3852, - 3854, 3855, 3857, 3859, 3860, 3862, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 2607, 1701, 0, 0, - 3285, 0, 0, 0, 0, 0, 0, 0, 2939, 0, - 1442, 3842, 2739, 0, 0, 0, 0, 3844, 0, 2738, - 1439, 3847, 1440, 1441, 0, 0, 0, 3896, 0, 0, - 1034, 1442, 0, 2966, 2967, 0, 3869, 2969, 3870, 0, - 2971, 0, 0, 3892, 3867, 3868, 0, 1439, 2067, 1440, - 1441, 1439, 0, 1440, 1441, 1442, 3633, 3891, 3885, 2978, - 2979, 2980, 0, 1439, 0, 1440, 1441, 0, 0, 3895, - 1439, 2985, 1440, 1441, 2987, 2988, 2989, 0, 0, 0, - 2990, 2991, 0, 0, 2992, 0, 2993, 0, 0, 3109, - 3837, 3838, 0, 2994, 2734, 2995, 1442, 0, 0, 2996, - 0, 2997, 0, 1442, 2998, 0, 2999, 0, 3000, 0, - 3001, 1442, 3002, 0, 3003, 2733, 3004, 0, 3005, 1442, - 3006, 0, 3007, 0, 3008, 0, 3009, 0, 3010, 1442, - 3011, 0, 3012, 1442, 3013, 0, 3014, 1442, 3015, 2732, - 0, 0, 3016, 0, 3017, 1439, 3018, 1440, 1441, 3019, - 0, 3020, 1442, 3021, 0, 2206, 3023, 0, 0, 3025, - 0, 1442, 3027, 3028, 3029, 3030, 1439, 1442, 1440, 1441, - 3031, 2206, 2206, 2206, 2206, 2206, 2067, 0, 3897, 1442, - 2731, 0, 1676, 0, 3841, 0, 3041, 2730, 0, 0, - 1439, 0, 1440, 1441, 3054, 2729, 0, 3058, 0, 1034, - 0, 0, 0, 2720, 1442, 0, 3061, 3062, 3063, 3064, - 3065, 3066, 3916, 2719, 0, 3067, 3068, 2718, 3069, 3911, - 3070, 2717, 0, 0, 0, 3913, 3876, 42, 3921, 1442, - 0, 1439, 0, 1440, 1441, 3908, 2714, 0, 1439, 0, - 1440, 1441, 652, 0, 0, 2709, 1439, 3904, 1440, 1441, - 0, 2702, 3937, 0, 1439, 0, 1440, 1441, 3938, 3939, - 1442, 996, 0, 2701, 1439, 3101, 1440, 1441, 1439, 1442, - 1440, 1441, 1439, 0, 1440, 1441, 1442, 3931, 0, 0, - 3932, 0, 0, 0, 3957, 1442, 0, 1439, 2700, 1440, - 1441, 3132, 1442, 0, 0, 0, 1439, 3940, 1440, 1441, - 0, 0, 1439, 1066, 1440, 1441, 0, 0, 0, 0, - 3983, 3984, 0, 2699, 1439, 0, 1440, 1441, 0, 0, - 3964, 0, 3989, 3959, 3991, 3993, 3995, 3947, 3962, 0, - 42, 3975, 3967, 3972, 3969, 3968, 3966, 3971, 3970, 1439, - 1442, 1440, 1441, 0, 2698, 0, 0, 3106, 0, 1442, - 0, 0, 3195, 2697, 0, 0, 4031, 3999, 0, 0, - 2696, 3799, 3978, 0, 1439, 4002, 1440, 1441, 4010, 2695, - 0, 0, 4028, 0, 4018, 4023, 2694, 0, 0, 0, - 0, 3989, 4038, 0, 0, 0, 0, 0, 0, 0, - 4036, 0, 0, 4050, 0, 1439, 0, 1440, 1441, 0, - 0, 0, 4055, 0, 1439, 0, 1440, 1441, 4048, 4044, - 0, 1439, 0, 1440, 1441, 4058, 0, 0, 0, 0, - 1439, 0, 1440, 1441, 2688, 0, 0, 1439, 0, 1440, - 1441, 0, 0, 2687, 0, 0, 4078, 4080, 4082, 0, - 1956, 4073, 4071, 4074, 4077, 0, 0, 3284, 0, 0, - 0, 1247, 4083, 1247, 1247, 0, 4088, 3920, 2069, 4092, - 4086, 3293, 4090, 0, 0, 3930, 0, 0, 0, 0, - 0, 3989, 4100, 0, 4105, 1439, 0, 1440, 1441, 0, - 0, 0, 0, 4108, 1439, 0, 1440, 1441, 4114, 0, - 4116, 0, 0, 0, 4117, 4118, 0, 0, 0, 0, - 0, 0, 0, 1006, 1483, 1488, 1489, 0, 1492, 1494, - 1495, 1496, 1497, 0, 1500, 1501, 1503, 1503, 2069, 1503, - 1503, 1508, 1508, 1508, 1511, 1512, 1513, 1514, 1515, 1516, - 1517, 1518, 1519, 1520, 1521, 1522, 1523, 1524, 1525, 1526, - 1527, 1528, 1529, 1530, 1531, 1532, 1533, 1534, 1535, 1536, - 1537, 1538, 1539, 1540, 1541, 1542, 1543, 1544, 1545, 1546, - 1547, 1548, 1549, 1550, 1551, 1552, 1553, 1554, 1555, 1556, - 1557, 1558, 1559, 1560, 1561, 1562, 1563, 1564, 1565, 1566, - 1567, 1568, 1569, 1570, 1571, 1572, 1573, 1574, 1575, 1576, - 1577, 1578, 1579, 1580, 1581, 1582, 1583, 1584, 1585, 1586, - 1587, 1588, 1589, 1590, 1591, 1592, 1593, 1594, 1595, 1596, - 1597, 1598, 1599, 1600, 1601, 1602, 1603, 1604, 1605, 1606, - 1607, 1608, 1609, 1610, 1611, 1612, 1613, 1614, 1615, 1616, - 1617, 1618, 1619, 1620, 1621, 1622, 1623, 1624, 1625, 1626, - 1627, 1628, 1629, 1630, 1631, 4125, 4126, 3838, 4124, 1632, - 0, 1634, 1635, 1636, 1637, 1638, 0, 0, 0, 0, - 0, 0, 2067, 1508, 1508, 1508, 1508, 1508, 1508, 3491, - 4089, 0, 0, 0, 0, 0, 0, 0, 1645, 1646, - 1647, 1648, 1649, 1650, 1651, 1652, 1653, 1654, 1655, 1656, - 1657, 1658, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1442, 3515, 4057, 0, 1442, 0, 0, 0, 1672, - 0, 1442, 0, 1769, 0, 1442, 0, 0, 0, 0, - 0, 0, 2067, 0, 0, 1442, 0, 0, 0, 0, - 4123, 0, 0, 0, 0, 0, 1442, 0, 0, 0, - 0, 0, 0, 0, 1442, 0, 0, 0, 0, 0, - 0, 1442, 0, 0, 0, 0, 0, 3535, 0, 3536, - 0, 1677, 3537, 0, 0, 3540, 3541, 1006, 0, 0, - 1442, 1006, 0, 0, 3545, 1442, 0, 1006, 0, 0, - 0, 0, 0, 0, 0, 2686, 0, 0, 3546, 2683, - 3547, 0, 3548, 0, 3549, 2682, 3550, 0, 3551, 2681, - 3552, 1442, 3553, 0, 3554, 0, 3555, 0, 3556, 2679, - 3557, 0, 3558, 0, 3559, 1442, 3560, 0, 3561, 1442, - 2672, 3562, 0, 0, 0, 3563, 0, 3564, 2669, 0, - 0, 0, 1186, 3566, 1192, 2667, 1439, 1194, 1440, 1441, - 1439, 1442, 1440, 1441, 0, 0, 1439, 1442, 1440, 1441, - 1439, 0, 1440, 1441, 2665, 0, 3583, 1442, 1757, 2624, - 1439, 0, 1440, 1441, 0, 3588, 0, 3589, 3590, 0, - 3591, 1439, 3592, 1440, 1441, 0, 0, 3593, 0, 1439, - 0, 1440, 1441, 0, 0, 2604, 1439, 0, 1440, 1441, - 0, 0, 1415, 0, 0, 85, 44, 45, 87, 2603, - 0, 0, 3618, 2599, 0, 1439, 0, 1440, 1441, 0, - 1439, 0, 1440, 1441, 3630, 91, 0, 3632, 0, 49, - 76, 77, 0, 75, 78, 2597, 0, 0, 0, 3636, - 0, 2563, 47, 0, 0, 0, 1439, 0, 1440, 1441, - 0, 2552, 1770, 0, 0, 0, 0, 3767, 0, 0, - 1439, 0, 1440, 1441, 1439, 0, 1440, 1441, 0, 0, - 0, 63, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 94, 0, 0, 1439, 0, 1440, 1441, - 0, 0, 1439, 0, 1440, 1441, 1769, 0, 0, 0, - 0, 0, 1439, 0, 1440, 1441, 0, 0, 0, 0, - 1247, 0, 0, 0, 1783, 1786, 1787, 1788, 1789, 1790, - 1791, 0, 1792, 1793, 1795, 1796, 1794, 1797, 1798, 1771, - 1772, 1773, 1774, 1755, 1756, 1784, 0, 1758, 0, 1759, - 1760, 1761, 1762, 1763, 1764, 1765, 1766, 1767, 0, 0, - 1768, 1775, 1776, 1777, 1778, 0, 1779, 1780, 1781, 1782, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 3875, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 52, 55, 58, 57, 60, 0, 74, 0, - 0, 82, 79, 0, 0, 0, 0, 0, 0, 0, - 0, 1757, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 62, 90, 89, 0, 0, - 72, 73, 59, 0, 190, 0, 0, 0, 80, 81, - 0, 0, 0, 0, 0, 1823, 0, 1247, 1247, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 129, 0, - 151, 86, 0, 0, 86, 0, 0, 0, 0, 0, - 0, 172, 0, 0, 0, 0, 0, 0, 0, 0, - 64, 65, 0, 66, 67, 68, 69, 0, 0, 0, - 0, 0, 0, 0, 0, 1770, 0, 0, 0, 0, - 1729, 0, 162, 0, 0, 0, 0, 0, 150, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1747, 0, 0, 1785, 0, 169, 0, 0, - 170, 0, 3655, 3657, 3656, 3720, 3721, 3722, 3723, 3724, - 3725, 3726, 777, 0, 61, 0, 0, 928, 0, 1827, - 1828, 161, 160, 189, 0, 0, 0, 1783, 1786, 1787, - 1788, 1789, 1790, 1791, 3919, 1792, 1793, 1795, 1796, 1794, - 1797, 1798, 1771, 1772, 1773, 1774, 1755, 1756, 1784, 0, - 1758, 0, 1759, 1760, 1761, 1762, 1763, 1764, 1765, 1766, - 1767, 0, 0, 1768, 1775, 1776, 1777, 1778, 0, 1779, - 1780, 1781, 1782, 0, 195, 1886, 0, 195, 0, 0, - 3933, 701, 0, 3934, 0, 3935, 707, 0, 0, 0, - 0, 0, 0, 0, 0, 88, 195, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1931, 195, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 155, 1829, 158, 0, 1826, 0, 156, - 157, 0, 0, 0, 0, 0, 173, 707, 195, 707, - 0, 0, 0, 1967, 0, 179, 0, 0, 0, 0, - 1971, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1982, 1983, 1984, 1985, 1986, 1987, 1988, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 4029, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 93, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 4045, - 0, 4046, 0, 4047, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 3661, 0, 0, - 0, 0, 0, 0, 0, 0, 2332, 0, 0, 0, - 0, 0, 3669, 3670, 0, 0, 3745, 3744, 3743, 0, - 0, 3741, 3742, 3740, 0, 0, 0, 0, 1785, 164, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 4103, 0, 4104, 0, - 71, 0, 0, 0, 0, 0, 3746, 898, 1070, 754, - 755, 3747, 3748, 902, 3749, 757, 758, 899, 900, 0, - 752, 756, 901, 903, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1070, 0, 0, 0, 0, 159, 2018, 0, 775, - 776, 3654, 3658, 3659, 3660, 3671, 3718, 3719, 3727, 3729, - 854, 3728, 3730, 3731, 3732, 3735, 3736, 3737, 3738, 3733, - 3734, 3739, 3638, 3642, 3639, 3640, 3641, 3653, 3643, 3644, - 3645, 3646, 3647, 3648, 3649, 3650, 3651, 3652, 3750, 3751, - 3752, 3753, 3754, 3755, 3664, 3668, 3667, 3665, 3666, 3662, - 3663, 3690, 3689, 3691, 3692, 3693, 3694, 3695, 3696, 3698, - 3697, 3699, 3700, 3701, 3702, 3703, 3704, 3672, 3673, 3676, - 3677, 3675, 3674, 3678, 3687, 3688, 3679, 3680, 3681, 3682, - 3683, 3684, 3686, 3685, 3705, 3706, 3707, 3708, 3709, 3711, - 3710, 3714, 3715, 3713, 3712, 3717, 3716, 0, 0, 0, - 0, 0, 0, 152, 0, 0, 153, 0, 0, 904, - 0, 905, 0, 0, 909, 0, 0, 0, 911, 910, - 0, 912, 0, 874, 873, 0, 0, 906, 907, 0, - 908, 0, 0, 0, 0, 0, 0, 165, 0, 0, - 0, 0, 0, 0, 177, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 945, - 0, 2242, 0, 0, 946, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 2068, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 2531, 185, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 3756, 3757, 3758, 3759, 3760, - 3761, 3762, 3763, 0, 0, 0, 0, 0, 1494, 0, - 1494, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 2543, 0, 166, 171, - 168, 174, 175, 176, 178, 180, 181, 182, 183, 0, - 0, 0, 0, 0, 184, 186, 187, 188, 952, 953, - 954, 955, 956, 957, 958, 959, 960, 961, 962, 963, - 964, 965, 966, 967, 968, 969, 970, 971, 972, 973, - 974, 975, 976, 977, 978, 979, 980, 981, 982, 983, - 984, 985, 986, 987, 988, 989, 990, 991, 992, 993, - 0, 2317, 0, 0, 0, 0, 0, 0, 0, 2321, - 0, 2324, 0, 0, 0, 2018, 0, 195, 0, 195, - 0, 0, 195, 0, 0, 0, 0, 0, 0, 0, + 1718, 952, 4141, 944, 3612, 3938, 4154, 4106, 4030, 945, + 4107, 3610, 2390, 2097, 3414, 1267, 4057, 1972, 3562, 1766, + 3988, 4018, 3285, 3185, 3192, 2320, 3152, 3918, 2340, 3843, + 3243, 907, 736, 910, 3248, 3245, 43, 3244, 3242, 3247, + 3246, 3916, 2028, 3234, 3644, 5, 3148, 3145, 2750, 1041, + 3549, 1018, 3906, 2788, 2464, 3085, 1265, 739, 3200, 3146, + 3263, 3149, 2827, 3459, 3453, 2322, 3649, 906, 3265, 3480, + 766, 2344, 905, 2364, 2988, 911, 2360, 3143, 1147, 3133, + 3445, 2814, 2921, 2432, 2452, 734, 3262, 2890, 2495, 733, + 2427, 2891, 1077, 2378, 1042, 2889, 165, 1725, 2840, 1020, + 3292, 2366, 44, 2970, 2365, 2820, 1712, 2806, 2790, 2243, + 2473, 2352, 2242, 2287, 42, 1851, 2451, 2962, 151, 1213, + 1022, 2434, 1026, 1822, 1869, 2512, 2882, 1755, 2857, 1105, + 2275, 1734, 749, 101, 2368, 1691, 2124, 1514, 105, 1426, + 1084, 1044, 1858, 106, 1441, 3611, 1968, 2449, 2056, 1081, + 1116, 1113, 1950, 2081, 1110, 1085, 1111, 2423, 1112, 1032, + 1062, 1739, 2288, 1754, 2193, 2120, 2345, 1498, 1123, 2424, + 744, 2132, 1064, 100, 2151, 1474, 1029, 108, 2027, 86, + 1980, 169, 1817, 1256, 737, 1263, 129, 127, 128, 1196, + 134, 1028, 1843, 1030, 1242, 135, 1054, 85, 94, 743, + 107, 1518, 192, 1027, 2036, 3947, 99, 2914, 4142, 2466, + 2467, 2468, 3973, 3550, 1523, 3231, 2466, 3253, 2944, 2943, + 2510, 2085, 2912, 3542, 1442, 96, 131, 3505, 96, 96, + 671, 1017, 1149, 1442, 1035, 1935, 130, 4084, 3974, 174, + 2978, 705, 2979, 1688, 2059, 1166, 1167, 1168, 136, 1171, + 1172, 1173, 1174, 1152, 3969, 1177, 1178, 1179, 1180, 1181, + 1182, 1183, 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, + 1192, 1193, 3968, 3251, 711, 1036, 1078, 2, 1049, 1053, + 3615, 2043, 2871, 1103, 1021, 1072, 1019, 2042, 3253, 1071, + 1126, 2041, 961, 962, 963, 171, 2040, 1102, 172, 3257, + 3615, 3250, 2317, 2318, 2039, 2038, 1043, 1127, 130, 1153, + 1156, 1157, 2011, 1101, 1100, 1099, 1212, 668, 1437, 669, + 729, 191, 2786, 112, 113, 114, 711, 4078, 118, 1160, + 96, 124, 1452, 2558, 193, 1058, 1169, 663, 192, 3129, + 2816, 1452, 4031, 2934, 3251, 1070, 1074, 909, 2499, 724, + 725, 1094, 961, 962, 963, 1089, 1016, 1719, 1011, 1012, + 1013, 1014, 131, 4147, 153, 1025, 4088, 3614, 3089, 4164, + 3257, 705, 4086, 4105, 3969, 174, 130, 1070, 1074, 909, + 1458, 3419, 4132, 3418, 2497, 2937, 3919, 3614, 4146, 2751, + 705, 4087, 2498, 2048, 1059, 1060, 4110, 4085, 1748, 3311, + 3839, 3838, 3555, 1151, 1150, 3556, 164, 4120, 87, 3849, + 4082, 89, 152, 3254, 702, 87, 3952, 2567, 4035, 87, + 3574, 1428, 705, 87, 3563, 4019, 4027, 2492, 3848, 2090, + 4062, 171, 3573, 3331, 172, 175, 1832, 2951, 2952, 1756, + 1448, 1757, 705, 1440, 181, 2866, 3181, 2830, 2865, 1448, + 2787, 2867, 706, 140, 141, 163, 162, 191, 3182, 3183, + 2385, 2386, 687, 87, 45, 46, 89, 2977, 3662, 2020, + 2021, 2564, 2831, 2384, 2961, 685, 1232, 1009, 4035, 2443, + 1237, 1238, 2565, 93, 3254, 2348, 96, 50, 77, 78, + 1220, 76, 79, 96, 1260, 1221, 1008, 96, 3939, 3670, + 48, 96, 2437, 1219, 3669, 1218, 2915, 2878, 1098, 1220, + 1205, 1206, 1233, 1976, 1221, 682, 1455, 1226, 1456, 1457, + 2403, 2402, 705, 3289, 697, 3456, 3287, 2823, 2824, 64, + 3319, 705, 1438, 2556, 3317, 3023, 2019, 719, 1199, 692, + 2023, 96, 1208, 705, 723, 1195, 717, 3293, 2963, 695, + 1093, 1695, 2922, 1095, 1752, 3280, 2474, 2947, 157, 138, + 160, 145, 137, 3281, 158, 159, 1096, 3890, 166, 3891, + 2513, 175, 4144, 1170, 2319, 1951, 2519, 1925, 1235, 1236, + 181, 146, 706, 1427, 1239, 1253, 2348, 84, 1258, 2534, + 1956, 2535, 1241, 2536, 1240, 149, 147, 142, 143, 144, + 148, 706, 1201, 1063, 2965, 3544, 1234, 139, 3543, 1259, + 2537, 1227, 1176, 2517, 2515, 3290, 150, 672, 3288, 674, + 688, 1926, 708, 1927, 707, 678, 1175, 676, 680, 689, + 681, 2520, 675, 706, 686, 2477, 192, 677, 690, 691, + 694, 698, 699, 700, 696, 693, 3823, 684, 709, 4111, + 1977, 2436, 1098, 706, 1090, 2516, 2559, 2560, 2562, 2561, + 131, 1092, 1091, 3619, 2361, 1073, 1067, 1065, 2518, 1107, + 4112, 3540, 1136, 174, 1145, 4067, 1106, 2346, 2347, 1144, + 1107, 53, 56, 59, 58, 61, 1134, 75, 1143, 1836, + 83, 80, 1142, 4065, 1097, 1098, 1194, 1073, 1067, 1065, + 3024, 1141, 4071, 4072, 166, 1140, 1139, 1138, 1133, 1698, + 1096, 3202, 3203, 1146, 63, 92, 91, 1489, 4066, 73, + 74, 60, 2565, 3204, 4079, 4165, 4158, 81, 82, 171, + 1489, 4117, 172, 706, 1082, 1447, 1444, 1445, 1446, 1451, + 1453, 1450, 706, 1449, 1447, 1444, 1445, 1446, 1451, 1453, + 1450, 1857, 1449, 1443, 706, 191, 1082, 1118, 1125, 1969, + 1198, 1119, 1443, 1082, 2966, 2450, 1230, 1080, 1055, 65, + 66, 2503, 67, 68, 69, 70, 2502, 3088, 2346, 2347, + 1753, 1155, 1125, 2949, 1500, 1965, 3539, 1118, 3224, 2969, + 1509, 1154, 1216, 1429, 1222, 1223, 1224, 1225, 1163, 3946, + 161, 2913, 2916, 2982, 2946, 1959, 167, 1957, 1958, 1830, + 1960, 1961, 1104, 179, 1829, 1249, 2579, 1251, 1261, 1262, + 3201, 1137, 1828, 3457, 2526, 2522, 2524, 2525, 2523, 2527, + 2528, 3156, 3204, 62, 2932, 1135, 1966, 1826, 1097, 958, + 2791, 2793, 958, 958, 1211, 662, 1955, 2960, 2494, 3097, + 2959, 1490, 1491, 4080, 187, 1248, 1250, 3096, 2496, 3931, + 1856, 1124, 3494, 3475, 4033, 1197, 1128, 1118, 90, 175, + 2862, 1130, 2826, 1419, 2763, 1131, 1129, 2093, 181, 1743, + 1650, 1097, 3255, 3256, 1496, 1124, 1254, 1210, 2972, 1420, + 1421, 2880, 2821, 2971, 1125, 3259, 4032, 168, 173, 170, + 176, 177, 178, 180, 182, 183, 184, 185, 154, 3503, + 3504, 155, 670, 186, 188, 189, 190, 1937, 1936, 1938, + 1939, 1940, 710, 90, 4033, 126, 2391, 1489, 1486, 3180, + 1492, 1493, 1494, 1495, 3572, 2590, 1469, 2936, 1038, 3613, + 1506, 1066, 167, 703, 1162, 1215, 1243, 4156, 2133, 179, + 4157, 1719, 4155, 3255, 3256, 2972, 4032, 1257, 704, 3613, + 2971, 1246, 2134, 1458, 1981, 1247, 3259, 1520, 3960, 1521, + 1522, 1229, 2566, 1066, 1148, 1252, 3535, 95, 3469, 2440, + 1436, 2935, 1231, 2514, 95, 1525, 1526, 1125, 95, 121, + 187, 1686, 95, 2032, 1962, 1758, 1952, 1124, 1953, 3009, + 1245, 1954, 166, 1118, 1121, 1122, 1125, 1082, 2125, 2792, + 2599, 1115, 1119, 2125, 1217, 1207, 2905, 1456, 1457, 1204, + 2441, 1481, 1482, 1484, 1483, 1485, 1486, 2439, 1720, 1722, + 2590, 4121, 95, 168, 173, 170, 176, 177, 178, 180, + 182, 183, 184, 185, 2493, 3658, 1457, 3402, 1458, 186, + 188, 189, 190, 1702, 1475, 3510, 1125, 1706, 3509, 2481, + 1866, 2442, 122, 1022, 1865, 1855, 3188, 2486, 1700, 2491, + 2626, 2438, 1687, 1703, 2486, 2489, 3308, 1458, 1476, 1477, + 1478, 1479, 1480, 1481, 1482, 1484, 1483, 1485, 1486, 1214, + 1124, 1136, 1134, 2131, 1244, 3990, 1118, 1121, 1122, 1455, + 1082, 1456, 1457, 4113, 1115, 1119, 2490, 730, 3011, 1124, + 3495, 3189, 1982, 2488, 1128, 1118, 1034, 1704, 3924, 1130, + 1200, 105, 1705, 1131, 1129, 1114, 106, 1656, 1657, 1658, + 1659, 1660, 1661, 2061, 1719, 3191, 1458, 4166, 1687, 72, + 3991, 2130, 4160, 1692, 1132, 1458, 1945, 2062, 1487, 1488, + 2060, 3831, 1680, 3186, 2050, 2052, 2053, 1458, 1863, 1124, + 108, 1161, 1719, 3925, 3830, 1158, 2571, 2572, 2573, 2353, + 2354, 3202, 3203, 961, 962, 963, 2280, 3821, 3187, 2051, + 2277, 1833, 1834, 1835, 1455, 1898, 1456, 1457, 1901, 2279, + 1903, 1479, 1480, 1481, 1482, 1484, 1483, 1485, 1486, 3585, + 4070, 1721, 2160, 956, 3584, 2989, 1849, 3569, 3517, 3570, + 1944, 3516, 3193, 1455, 3506, 1456, 1457, 1045, 1701, 3232, + 1051, 1051, 1019, 1943, 4167, 1724, 1974, 1021, 1842, 1477, + 1478, 1479, 1480, 1481, 1482, 1484, 1483, 1485, 1486, 1861, + 167, 4126, 1719, 1932, 4069, 1920, 3220, 179, 1910, 1911, + 1902, 2887, 2886, 2885, 1916, 1917, 1749, 1750, 1871, 2446, + 1872, 1946, 1874, 1876, 1930, 1860, 1880, 1882, 1884, 1886, + 1888, 1929, 1455, 1928, 1456, 1457, 1918, 1912, 1825, 1909, + 3201, 1455, 1908, 1456, 1457, 1907, 1878, 1942, 187, 2991, + 1859, 1859, 3204, 1455, 1458, 1456, 1457, 1699, 1852, 1840, + 2152, 1839, 1707, 711, 1838, 2154, 2280, 1931, 3284, 2159, + 2155, 3500, 711, 2156, 2157, 2158, 1423, 1728, 2153, 2161, + 2162, 2163, 2164, 2165, 2166, 2167, 2168, 2169, 1752, 1458, + 1906, 168, 173, 170, 176, 177, 178, 180, 182, 183, + 184, 185, 2117, 4130, 1458, 4129, 1985, 186, 188, 189, + 190, 2586, 4114, 1989, 1970, 1991, 1992, 1993, 1994, 3001, + 3000, 2999, 1998, 1729, 2993, 4099, 2997, 1475, 2992, 2981, + 2990, 4097, 1831, 4054, 2010, 2995, 2869, 711, 4124, 1719, + 2812, 4143, 130, 3955, 2994, 726, 3954, 1101, 1100, 1099, + 3928, 1476, 1477, 1478, 1479, 1480, 1481, 1482, 1484, 1483, + 1485, 1486, 2996, 2998, 1458, 2462, 2461, 3927, 3190, 1987, + 2460, 2459, 3926, 1983, 1984, 1476, 1477, 1478, 1479, 1480, + 1481, 1482, 1484, 1483, 1485, 1486, 3826, 1988, 4041, 1719, + 1455, 3810, 1456, 1457, 1995, 1996, 1997, 2009, 2008, 3809, + 2109, 2098, 2099, 2100, 2101, 2111, 2102, 2103, 2104, 2116, + 2112, 2105, 2106, 2113, 2114, 2115, 2107, 2108, 2110, 3657, + 727, 2458, 2457, 4101, 1719, 1455, 43, 1456, 1457, 43, + 1462, 1463, 1464, 1465, 1466, 1467, 1468, 1460, 3655, 1458, + 1455, 3581, 1456, 1457, 1685, 102, 1684, 2636, 4039, 1719, + 1719, 3309, 2088, 2088, 2086, 2086, 103, 1454, 1719, 3473, + 1719, 1056, 1454, 1719, 3948, 2064, 1683, 2066, 2067, 2068, + 2069, 2070, 2071, 2073, 2075, 2076, 2077, 2078, 2079, 2080, + 2128, 111, 2812, 4026, 3857, 2129, 1458, 2812, 4005, 2812, + 4001, 1500, 110, 3514, 109, 3981, 1719, 3553, 3945, 3856, + 1455, 3499, 1456, 1457, 1686, 111, 2057, 2054, 2126, 1475, + 3294, 1719, 1471, 3291, 1472, 3814, 110, 3223, 109, 1458, + 2171, 2189, 3222, 4037, 1719, 3813, 1475, 104, 1473, 1487, + 1488, 1470, 2896, 1476, 1477, 1478, 1479, 1480, 1481, 1482, + 1484, 1483, 1485, 1486, 2883, 1458, 3834, 1719, 2638, 2185, + 1476, 1477, 1478, 1479, 1480, 1481, 1482, 1484, 1483, 1485, + 1486, 3561, 1719, 2058, 2016, 2017, 1475, 1458, 2578, 86, + 3903, 1719, 86, 1682, 2033, 1455, 2547, 1456, 1457, 2812, + 3822, 3553, 1719, 2923, 1458, 1687, 1719, 2812, 3551, 2901, + 1476, 1477, 1478, 1479, 1480, 1481, 1482, 1484, 1483, 1485, + 1486, 2273, 2546, 3901, 1719, 2399, 1458, 2063, 2508, 3194, + 102, 1458, 1475, 3198, 2507, 1458, 2858, 104, 2065, 2363, + 3197, 103, 1455, 2343, 1456, 1457, 2291, 2092, 2290, 3898, + 1719, 2306, 2325, 1458, 2119, 2121, 1476, 1477, 1478, 1479, + 1480, 1481, 1482, 1484, 1483, 1485, 1486, 2012, 1720, 2313, + 1458, 3880, 1719, 3468, 3199, 1455, 1978, 1456, 1457, 3195, + 2135, 2136, 2137, 2138, 3196, 2486, 1719, 2289, 3444, 1719, + 2276, 2718, 1719, 2089, 2149, 2858, 1719, 3213, 3212, 2859, + 2170, 1455, 2337, 1456, 1457, 3210, 3211, 1454, 1458, 2861, + 3437, 1719, 2278, 3208, 3209, 3434, 1719, 1458, 1941, 3432, + 1719, 3208, 3207, 1455, 104, 1456, 1457, 3326, 1458, 2837, + 1719, 2828, 1458, 2330, 1933, 2331, 1923, 3394, 1719, 2291, + 1455, 2358, 1456, 1457, 2565, 2945, 2294, 2295, 1821, 2926, + 2919, 2920, 2812, 2811, 3392, 1719, 2592, 1719, 2859, 2400, + 2808, 105, 1455, 2372, 1456, 1457, 106, 1455, 2565, 1456, + 1457, 1455, 1919, 1456, 1457, 104, 2091, 1719, 2183, 1915, + 2289, 105, 1914, 1719, 2487, 1913, 106, 1730, 2194, 1455, + 1458, 1456, 1457, 1719, 2293, 1821, 1820, 2296, 2297, 2336, + 1458, 3388, 1719, 2837, 1458, 2447, 1455, 1255, 1456, 1457, + 3144, 2596, 3385, 1719, 1764, 1763, 3383, 1719, 2828, 1459, + 3470, 3468, 3986, 3959, 2812, 2837, 3175, 1458, 2836, 2409, + 2410, 2411, 2412, 2592, 1458, 1035, 2565, 2395, 110, 2324, + 2394, 2312, 2486, 3423, 1455, 2634, 1456, 1457, 1458, 1515, + 3210, 3117, 2335, 1455, 2383, 1456, 1457, 3518, 2267, 2268, + 2269, 2270, 2271, 2338, 1455, 1458, 1456, 1457, 1455, 2592, + 1456, 1457, 2718, 2623, 3852, 2404, 2622, 2405, 2406, 2407, + 2408, 2486, 2356, 2837, 2595, 2435, 2469, 2429, 3381, 1719, + 3468, 2475, 2381, 2415, 2416, 2417, 2418, 2382, 1072, 2398, + 1458, 2397, 1071, 2396, 2380, 2351, 1723, 1454, 3519, 3520, + 3521, 3379, 1719, 2314, 2445, 2315, 2091, 2034, 2194, 1719, + 2018, 2472, 1964, 1458, 1751, 1894, 1455, 1458, 1456, 1457, + 1109, 1458, 3377, 1719, 1108, 2376, 1455, 96, 1456, 1457, + 1455, 1024, 1456, 1457, 4075, 4008, 3845, 2430, 3286, 3375, + 1719, 2480, 2426, 1726, 2483, 3811, 2484, 2444, 2448, 2456, + 2419, 2421, 2422, 1455, 3673, 1456, 1457, 3534, 3235, 3531, + 1455, 3512, 1456, 1457, 2500, 2479, 1895, 1896, 1897, 1126, + 2482, 3336, 2430, 2478, 1455, 4115, 1456, 1457, 3335, 1823, + 2428, 3282, 3237, 2501, 3233, 2927, 1127, 2425, 2420, 2414, + 2413, 1455, 1948, 1456, 1457, 1854, 1859, 3373, 1719, 1850, + 1458, 3371, 1719, 1819, 1458, 3369, 1719, 123, 2893, 96, + 2892, 1199, 1475, 2842, 2845, 2846, 2847, 2843, 3846, 2844, + 2848, 3522, 2443, 3481, 3482, 2328, 1455, 4138, 1456, 1457, + 2504, 3481, 3482, 4136, 2505, 2506, 1476, 1477, 1478, 1479, + 1480, 1481, 1482, 1484, 1483, 1485, 1486, 1890, 4108, 1455, + 3967, 1456, 1457, 1455, 3885, 1456, 1457, 1455, 2893, 1456, + 1457, 3484, 3229, 2584, 2575, 2014, 2577, 2589, 3523, 3524, + 3525, 3228, 3227, 2549, 2550, 2580, 3144, 2581, 2552, 2291, + 2906, 2290, 2541, 2540, 3367, 1719, 667, 2553, 3365, 1719, + 2593, 3487, 2594, 2583, 1891, 1892, 1893, 2601, 3167, 3165, + 3486, 2603, 2604, 3168, 3166, 3169, 2057, 2846, 2847, 3164, + 2610, 2611, 2612, 2613, 2614, 2615, 2616, 2617, 2618, 2619, + 2585, 2621, 3163, 2511, 1735, 3963, 3847, 2015, 2555, 2342, + 1727, 2334, 3134, 3136, 1458, 3474, 1455, 3122, 1456, 1457, + 1455, 3137, 1456, 1457, 2627, 2628, 2629, 2630, 2631, 2563, + 2633, 1458, 3121, 3997, 2635, 3923, 3648, 3650, 2640, 2641, + 3999, 2642, 728, 2058, 2645, 1824, 2646, 2648, 2650, 2651, + 2652, 2653, 2654, 2655, 2657, 2659, 2660, 2661, 2663, 2574, + 2665, 2666, 2668, 2670, 2672, 2674, 2676, 2678, 2680, 2682, + 2684, 2686, 2688, 2690, 2692, 2694, 2696, 2698, 2700, 2702, + 2703, 2704, 3639, 2706, 3638, 2708, 3464, 2710, 2711, 4052, + 2713, 2715, 2717, 1458, 2117, 3461, 2720, 2576, 3363, 1719, + 2724, 1039, 1458, 3460, 2729, 2730, 2731, 2732, 4049, 1040, + 2598, 1732, 3935, 3933, 3131, 3361, 1719, 2743, 2744, 2745, + 2746, 2747, 2748, 1963, 115, 2752, 2753, 3206, 2876, 1165, + 2897, 1458, 3637, 2755, 1422, 1458, 1164, 2133, 2761, 1458, + 3302, 2892, 2975, 1458, 2764, 2765, 2766, 2767, 2768, 1458, + 1455, 2134, 1456, 1457, 2933, 2775, 2776, 102, 2777, 2835, + 131, 2780, 2782, 2337, 104, 2784, 2632, 1455, 103, 1456, + 1457, 1458, 102, 1979, 3466, 2796, 1458, 1731, 2088, 4152, + 2086, 2353, 2354, 103, 104, 3225, 3359, 1719, 2544, 4046, + 3944, 3841, 2109, 2098, 2099, 2100, 2101, 2111, 2102, 2103, + 2104, 2116, 2112, 2105, 2106, 2113, 2114, 2115, 2107, 2108, + 2110, 3205, 1458, 2850, 2339, 3357, 1719, 1047, 1048, 3355, + 1719, 1458, 3120, 3341, 1719, 2533, 2605, 3324, 1719, 1455, + 3119, 1456, 1457, 2783, 1719, 2532, 2531, 111, 1455, 2795, + 1456, 1457, 1458, 2620, 43, 2530, 2529, 1458, 110, 3446, + 109, 2569, 109, 2832, 2851, 2781, 1719, 2853, 1458, 104, + 2756, 1719, 4098, 3645, 4096, 2813, 2798, 1455, 2800, 1456, + 1457, 1455, 4053, 1456, 1457, 1455, 4050, 1456, 1457, 1455, + 1458, 1456, 1457, 4000, 3911, 1455, 3910, 1456, 1457, 3888, + 1458, 3656, 3654, 2809, 2852, 3653, 2733, 1719, 3646, 3532, + 1692, 2785, 3465, 2822, 1458, 2725, 1719, 1455, 3463, 1456, + 1457, 3238, 1455, 2470, 1456, 1457, 2588, 1837, 111, 2805, + 3454, 1046, 2810, 111, 110, 2828, 2587, 2879, 2881, 110, + 4139, 2716, 1719, 2825, 110, 1687, 109, 4140, 4139, 4140, + 2872, 1458, 2714, 1719, 3623, 2808, 3025, 1458, 1455, 2918, + 1456, 1457, 3668, 3, 2624, 2326, 2860, 1455, 3929, 1456, + 1457, 2863, 1717, 1713, 3489, 2895, 1744, 2942, 2435, 1736, + 2898, 2899, 2870, 3498, 2701, 1719, 1037, 1714, 1455, 2031, + 1456, 1457, 11, 1455, 2940, 1456, 1457, 2029, 2699, 1719, + 10, 2884, 116, 117, 1455, 98, 1456, 1457, 2797, 1, + 2873, 1015, 2332, 2333, 1716, 2894, 1715, 2030, 9, 2902, + 8, 1057, 2044, 2045, 2046, 2047, 1455, 4048, 1456, 1457, + 2907, 2908, 2909, 3996, 2903, 3439, 1455, 2055, 1456, 1457, + 2856, 2697, 1719, 3934, 2939, 3998, 4051, 3667, 1842, 3932, + 1455, 1425, 1456, 1457, 2985, 2986, 1424, 2928, 2929, 3502, + 4064, 683, 2316, 1690, 4109, 4060, 3013, 3014, 3015, 3016, + 3017, 2938, 2094, 2095, 4061, 2987, 1934, 1924, 2118, 1051, + 1051, 2122, 3564, 3004, 2241, 2127, 3022, 1455, 3842, 1456, + 1457, 3241, 2476, 1455, 3530, 1456, 1457, 2433, 1117, 156, + 2139, 2140, 2141, 2142, 2143, 2144, 2145, 2146, 2147, 2148, + 2964, 2150, 2392, 2393, 4021, 2172, 2173, 2174, 2175, 2176, + 2177, 2179, 3002, 2184, 2983, 2186, 2187, 2188, 120, 2190, + 2191, 2192, 3018, 2195, 2196, 2197, 2198, 2199, 2200, 2201, + 2202, 2203, 2204, 2205, 2206, 2207, 2208, 2209, 2210, 2211, + 2212, 2213, 2214, 2215, 2216, 2217, 2218, 2219, 2220, 2221, + 2222, 2223, 2224, 2225, 2226, 2227, 2228, 2229, 2230, 2231, + 2232, 2233, 2234, 2235, 2236, 2237, 2238, 2239, 2240, 2244, + 2245, 2246, 2247, 2248, 2249, 2250, 2251, 2252, 2253, 2254, + 2255, 2256, 2257, 2258, 2259, 2260, 2261, 2262, 2263, 2264, + 2265, 2266, 3003, 2973, 1075, 2967, 2974, 2272, 119, 2274, + 1120, 2281, 2282, 2283, 2284, 2285, 2286, 1051, 3006, 1051, + 1051, 1051, 1051, 1051, 2984, 1228, 2471, 1458, 3554, 2877, + 2298, 2299, 2300, 2301, 2302, 2303, 2304, 2305, 2401, 2307, + 2308, 2309, 2310, 2311, 1770, 2842, 2845, 2846, 2847, 2843, + 3027, 2844, 2848, 1768, 1769, 1767, 1772, 1771, 3310, 2625, + 3401, 2022, 3090, 718, 2849, 712, 194, 1759, 1737, 3415, + 1159, 3092, 673, 3214, 1458, 2509, 679, 1051, 1507, 2013, + 3101, 3118, 2276, 2864, 2276, 1069, 1061, 2327, 3063, 2799, + 1068, 3994, 3819, 3458, 3153, 3005, 3130, 1458, 3132, 2349, + 2350, 2815, 2372, 3135, 2278, 3128, 2278, 3073, 3074, 3075, + 3076, 3077, 3943, 3172, 1458, 3922, 3647, 4006, 2874, 1458, + 1733, 3422, 3147, 2597, 2123, 1497, 2389, 1026, 3147, 3124, + 2369, 2372, 2372, 2372, 2372, 2372, 3100, 3141, 1974, 1458, + 3618, 3174, 3125, 3101, 3157, 3091, 2049, 3093, 741, 740, + 738, 2372, 2801, 2829, 2372, 1461, 946, 2789, 3112, 3818, + 1745, 2841, 2839, 1455, 2838, 1456, 1457, 3150, 2542, 2377, + 3483, 3479, 4056, 3123, 2371, 2367, 3126, 2807, 897, 3116, + 896, 2695, 1719, 3138, 3139, 750, 742, 2431, 732, 895, + 894, 3176, 3268, 3269, 3177, 2948, 1028, 3283, 2693, 1719, + 3159, 3160, 3158, 3162, 3536, 3161, 105, 3170, 1027, 2950, + 1455, 106, 1456, 1457, 3240, 3178, 3261, 2875, 3279, 3155, + 3113, 3114, 3115, 2691, 1719, 3065, 1439, 3067, 3184, 1709, + 1088, 3218, 3219, 1455, 3307, 1456, 1457, 3215, 3950, 3217, + 2568, 3304, 3216, 3078, 3079, 3080, 3081, 3330, 1708, 3957, + 1455, 3249, 1456, 1457, 3548, 1455, 3230, 1456, 1457, 2924, + 2463, 1458, 3270, 3321, 3322, 3267, 3323, 3271, 3325, 3327, + 3260, 2435, 3239, 71, 47, 1455, 3917, 1456, 1457, 3987, + 889, 3277, 3334, 1458, 886, 3620, 3621, 3338, 3339, 3340, + 3342, 3343, 3344, 3345, 3346, 3347, 3348, 3349, 3350, 3351, + 3352, 3353, 3354, 3356, 3358, 3360, 3362, 3364, 3366, 3368, + 3370, 3372, 3374, 3376, 3378, 3380, 3382, 3384, 3386, 3387, + 3389, 3390, 3391, 3393, 3298, 3295, 3395, 3305, 3397, 3398, + 3399, 3297, 3622, 3403, 3404, 3405, 3406, 3407, 3408, 3409, + 3410, 3411, 3412, 3413, 3315, 2689, 1719, 3086, 1458, 3087, + 3970, 3971, 3420, 3300, 3301, 885, 3425, 3972, 1458, 2178, + 3429, 3430, 1435, 3431, 3433, 1432, 3436, 3438, 2888, 3440, + 3441, 3442, 3443, 3312, 3313, 4077, 3314, 3449, 2024, 3316, + 97, 3318, 37, 3320, 36, 35, 34, 3424, 33, 3426, + 3427, 3428, 1458, 27, 3417, 1717, 1713, 1455, 26, 1456, + 1457, 3421, 25, 24, 23, 30, 20, 22, 21, 19, + 1714, 3252, 3471, 3472, 4104, 4151, 3476, 1458, 125, 1455, + 57, 1456, 1457, 1458, 54, 52, 133, 3306, 1458, 132, + 55, 51, 2687, 1719, 1202, 1710, 1711, 1716, 49, 1715, + 1458, 32, 2685, 1719, 31, 18, 3452, 17, 1458, 16, + 15, 14, 13, 12, 7, 3447, 3448, 6, 40, 39, + 38, 29, 28, 1458, 41, 4, 2911, 3450, 2465, 2372, + 3455, 0, 3462, 1458, 0, 0, 2683, 1719, 0, 0, + 3478, 0, 0, 3467, 1455, 1458, 1456, 1457, 0, 1458, + 0, 3485, 0, 2600, 1455, 0, 1456, 1457, 0, 3492, + 3493, 2681, 1719, 2606, 2607, 2608, 2609, 2679, 1719, 3491, + 0, 3490, 2677, 1719, 0, 0, 3270, 3497, 0, 3267, + 1458, 3271, 0, 3552, 2675, 1719, 3537, 3538, 1455, 3488, + 1456, 1457, 2673, 1719, 1458, 0, 0, 3507, 3508, 3513, + 0, 3515, 3558, 3559, 0, 0, 1515, 2671, 1719, 1458, + 0, 0, 0, 1455, 1458, 1456, 1457, 2669, 1719, 1455, + 0, 1456, 1457, 0, 1455, 0, 1456, 1457, 3571, 2667, + 1719, 3575, 0, 2662, 1719, 0, 1455, 1458, 1456, 1457, + 0, 0, 1458, 0, 1455, 0, 1456, 1457, 0, 0, + 3541, 0, 1458, 0, 3545, 3546, 3547, 0, 3586, 1455, + 1458, 1456, 1457, 0, 2658, 1719, 0, 1458, 0, 1455, + 0, 1456, 1457, 0, 0, 0, 0, 0, 2656, 1719, + 1458, 1455, 0, 1456, 1457, 1455, 0, 1456, 1457, 0, + 0, 0, 0, 2649, 1719, 0, 3560, 0, 2647, 1719, + 3580, 0, 0, 0, 0, 3576, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1455, 1458, 1456, 1457, + 0, 3435, 3609, 0, 0, 1458, 3400, 0, 0, 0, + 1455, 0, 1456, 1457, 0, 3617, 3396, 0, 957, 0, + 0, 88, 0, 3624, 3333, 1455, 1458, 1456, 1457, 0, + 1455, 3332, 1456, 1457, 1458, 0, 0, 0, 3626, 0, + 3627, 3628, 3629, 0, 3329, 0, 0, 0, 0, 0, + 0, 0, 0, 1455, 0, 1456, 1457, 0, 1455, 1735, + 1456, 1457, 0, 0, 0, 0, 0, 0, 1455, 0, + 1456, 1457, 0, 0, 0, 3642, 1455, 3616, 1456, 1457, + 0, 2779, 0, 1455, 0, 1456, 1457, 0, 0, 2778, + 0, 2088, 0, 2086, 0, 0, 1455, 0, 1456, 1457, + 3147, 0, 0, 0, 0, 1023, 43, 88, 0, 0, + 2774, 0, 0, 0, 0, 3663, 0, 3643, 2773, 0, + 0, 3665, 3652, 0, 3651, 0, 1023, 0, 3659, 0, + 3671, 0, 3661, 1455, 0, 1456, 1457, 0, 0, 0, + 0, 1455, 1087, 1456, 1457, 0, 3150, 0, 0, 3833, + 3150, 0, 0, 3676, 3677, 0, 3679, 0, 3840, 1458, + 0, 0, 1455, 3820, 1456, 1457, 0, 0, 0, 0, + 1455, 0, 1456, 1457, 0, 0, 1458, 0, 3850, 3851, + 1458, 3853, 0, 3854, 3855, 0, 0, 3817, 3858, 3859, + 3860, 3861, 3862, 3863, 3864, 3865, 3866, 3867, 3868, 3869, + 3870, 3871, 3872, 3873, 3874, 3875, 3876, 3877, 3878, 3879, + 3836, 3881, 3884, 3832, 3837, 3883, 3824, 2088, 3816, 2086, + 3844, 3827, 3828, 3829, 3815, 0, 0, 3893, 3894, 3895, + 3896, 3897, 3899, 3900, 3902, 3904, 3905, 3907, 0, 0, + 3607, 0, 0, 2772, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 2771, 0, 0, 0, 2770, 0, 0, 0, 0, 0, + 3887, 0, 0, 0, 0, 0, 3889, 0, 2980, 3941, + 3892, 0, 0, 0, 0, 0, 0, 0, 3915, 0, + 0, 0, 0, 3912, 3913, 1455, 3914, 1456, 1457, 0, + 1051, 0, 3937, 3007, 3008, 0, 0, 3010, 0, 3930, + 3012, 3675, 1455, 0, 1456, 1457, 1455, 0, 1456, 1457, + 1458, 3936, 0, 0, 1458, 0, 0, 0, 3940, 0, + 3019, 3020, 3021, 0, 0, 0, 0, 0, 3150, 0, + 0, 0, 3026, 0, 0, 3028, 3029, 3030, 0, 0, + 0, 3031, 3032, 0, 0, 3033, 3882, 3034, 0, 0, + 0, 0, 0, 0, 3035, 1458, 3036, 0, 0, 0, + 3037, 0, 3038, 0, 0, 3039, 0, 3040, 0, 3041, + 1458, 3042, 0, 3043, 0, 3044, 0, 3045, 0, 3046, + 0, 3047, 1458, 3048, 0, 3049, 0, 3050, 0, 3051, + 0, 3052, 0, 3053, 2769, 3054, 0, 3055, 2760, 3056, + 1458, 0, 0, 3057, 1458, 3058, 0, 3059, 0, 0, + 3060, 0, 3061, 0, 3062, 0, 2244, 3064, 0, 0, + 3066, 0, 0, 3068, 3069, 3070, 3071, 3886, 0, 0, + 3942, 3072, 2244, 2244, 2244, 2244, 2244, 1693, 0, 2759, + 0, 0, 0, 0, 0, 0, 1455, 3082, 1456, 1457, + 1455, 0, 1456, 1457, 2758, 3095, 0, 1458, 3099, 0, + 1051, 1458, 0, 0, 3961, 43, 2757, 3102, 3103, 3104, + 3105, 3106, 3107, 3966, 3958, 3956, 3108, 3109, 1458, 3110, + 3921, 3111, 0, 3953, 2754, 0, 0, 0, 2749, 3949, + 1458, 1455, 0, 1456, 1457, 0, 0, 3982, 665, 0, + 0, 0, 0, 3983, 3984, 1458, 1455, 0, 1456, 1457, + 1458, 0, 0, 0, 1458, 0, 0, 1010, 1455, 0, + 1456, 1457, 0, 0, 1458, 0, 3142, 0, 3976, 4002, + 0, 3977, 0, 0, 0, 0, 1455, 0, 1456, 1457, + 1455, 2742, 1456, 1457, 0, 2741, 0, 3985, 0, 0, + 0, 3147, 3173, 1458, 0, 4028, 4029, 0, 0, 0, + 1083, 0, 2740, 0, 0, 0, 0, 3992, 43, 4036, + 4038, 4040, 4034, 0, 2739, 0, 0, 4007, 4012, 4020, + 4004, 4009, 4017, 4014, 0, 4013, 4011, 4016, 4015, 2738, + 1458, 0, 0, 1455, 2737, 1456, 1457, 1455, 2736, 1456, + 1457, 4076, 0, 0, 4044, 0, 3844, 4023, 2735, 0, + 0, 1458, 4047, 3236, 1455, 0, 1456, 1457, 4055, 4073, + 4063, 4068, 0, 0, 0, 0, 1455, 0, 1456, 1457, + 4083, 4034, 0, 0, 0, 0, 0, 2734, 4095, 4081, + 0, 1455, 0, 1456, 1457, 0, 1455, 4100, 1456, 1457, + 1455, 0, 1456, 1457, 0, 0, 4093, 0, 4089, 0, + 1455, 0, 1456, 1457, 0, 0, 0, 0, 4103, 0, + 0, 0, 0, 0, 2728, 0, 0, 0, 0, 0, + 0, 4123, 4125, 4127, 0, 0, 1974, 0, 4118, 1455, + 4119, 1456, 1457, 4122, 4116, 2727, 2088, 0, 2086, 4128, + 0, 0, 0, 0, 0, 4137, 4133, 4135, 4131, 3328, + 1264, 0, 1264, 1264, 3965, 0, 0, 0, 0, 4150, + 4145, 4034, 3975, 3337, 0, 0, 1455, 0, 1456, 1457, + 4153, 0, 0, 0, 0, 0, 4161, 4159, 0, 4162, + 4163, 0, 0, 0, 0, 0, 0, 1455, 0, 1456, + 1457, 0, 0, 0, 0, 0, 2088, 0, 2086, 0, + 1023, 1499, 1504, 1505, 3883, 1508, 1510, 1511, 1512, 1513, + 4169, 1516, 1517, 1519, 1519, 0, 1519, 1519, 1524, 1524, + 1524, 1527, 1528, 1529, 1530, 1531, 1532, 1533, 1534, 1535, + 1536, 1537, 1538, 1539, 1540, 1541, 1542, 1543, 1544, 1545, + 1546, 1547, 1548, 1549, 1550, 1551, 1552, 1553, 1554, 1555, + 1556, 1557, 1558, 1559, 1560, 1561, 1562, 1563, 1564, 1565, + 1566, 1567, 1568, 1569, 1570, 1571, 1572, 1573, 1574, 1575, + 1576, 1577, 1578, 1579, 1580, 1581, 1582, 1583, 1584, 1585, + 1586, 1587, 1588, 1589, 1590, 1591, 1592, 1593, 1594, 1595, + 1596, 1597, 1598, 1599, 1600, 1601, 1602, 1603, 1604, 1605, + 1606, 1607, 1608, 1609, 1610, 1611, 1612, 1613, 1614, 1615, + 1616, 1617, 1618, 1619, 1620, 1621, 1622, 1623, 1624, 1625, + 1626, 1627, 1628, 1629, 1630, 1631, 1632, 1633, 1634, 1635, + 1636, 1637, 1638, 1639, 1640, 1641, 1642, 1643, 1644, 1645, + 1646, 1647, 1648, 0, 4170, 4171, 1458, 1649, 0, 1651, + 1652, 1653, 1654, 1655, 0, 0, 4134, 0, 0, 0, + 0, 1524, 1524, 1524, 1524, 1524, 1524, 0, 0, 0, + 0, 3533, 0, 0, 0, 0, 1662, 1663, 1664, 1665, + 1666, 1667, 1668, 1669, 1670, 1671, 1672, 1673, 1674, 1675, + 0, 0, 4102, 0, 0, 0, 0, 1458, 0, 0, + 0, 0, 1787, 0, 3557, 0, 0, 1689, 1458, 0, + 0, 0, 0, 0, 1458, 0, 4168, 0, 0, 0, + 1458, 0, 0, 0, 1458, 0, 0, 0, 0, 0, + 2726, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1458, 0, 0, 0, 0, 1458, 0, 0, 0, 0, + 1458, 0, 0, 0, 0, 1458, 0, 0, 0, 1694, + 0, 1458, 3577, 0, 3578, 1458, 0, 3579, 1023, 0, + 3582, 3583, 1023, 0, 0, 0, 0, 0, 1023, 3587, + 0, 2723, 1455, 0, 1456, 1457, 0, 1458, 0, 0, + 0, 0, 2722, 3588, 0, 3589, 0, 3590, 2721, 3591, + 0, 3592, 0, 3593, 2719, 3594, 1458, 3595, 2712, 3596, + 1787, 3597, 0, 3598, 0, 3599, 0, 3600, 0, 3601, + 0, 3602, 0, 3603, 2709, 0, 3604, 0, 1458, 2707, + 3605, 0, 3606, 1455, 2705, 1456, 1457, 1458, 3608, 2664, + 1203, 0, 1209, 192, 1455, 2644, 1456, 1457, 1775, 2643, + 1455, 0, 1456, 1457, 2917, 0, 1455, 0, 1456, 1457, + 1455, 3625, 1456, 1457, 0, 0, 0, 131, 0, 153, + 3630, 2639, 3631, 3632, 0, 3633, 1455, 3634, 1456, 1457, + 174, 1455, 3635, 1456, 1457, 0, 1455, 0, 1456, 1457, + 2637, 1455, 0, 1456, 1457, 0, 0, 1455, 0, 1456, + 1457, 1455, 1431, 1456, 1457, 0, 0, 3660, 0, 0, + 0, 164, 2602, 0, 0, 0, 0, 152, 0, 3672, + 0, 2591, 3674, 1455, 0, 1456, 1457, 0, 0, 0, + 0, 0, 1788, 0, 3678, 0, 171, 0, 0, 172, + 0, 0, 1455, 0, 1456, 1457, 0, 0, 0, 0, + 3812, 0, 0, 0, 0, 0, 1775, 0, 1845, 1846, + 163, 162, 191, 0, 1455, 0, 1456, 1457, 0, 0, + 0, 0, 0, 1455, 0, 1456, 1457, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 1801, 1804, 1805, 1806, 1807, 1808, + 1809, 1264, 1810, 1811, 1813, 1814, 1812, 1815, 1816, 1789, + 1790, 1791, 1792, 1773, 1774, 1802, 0, 1776, 0, 1777, + 1778, 1779, 1780, 1781, 1782, 1783, 1784, 1785, 0, 0, + 1786, 1793, 1794, 1795, 1796, 0, 1797, 1798, 1799, 1800, + 1788, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 157, 1847, 160, 0, 1844, 0, 158, + 159, 0, 0, 0, 0, 0, 175, 0, 0, 0, + 0, 0, 0, 3920, 0, 181, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1801, 1804, 1805, 1806, 1807, 1808, 1809, 0, + 1810, 1811, 1813, 1814, 1812, 1815, 1816, 1789, 1790, 1791, + 1792, 1773, 1774, 1802, 0, 1776, 0, 1777, 1778, 1779, + 1780, 1781, 1782, 1783, 1784, 1785, 0, 0, 1786, 1793, + 1794, 1795, 1796, 0, 1797, 1798, 1799, 1800, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 192, + 0, 0, 0, 0, 0, 0, 0, 1264, 1264, 0, + 1841, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 88, 0, 131, 88, 153, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 174, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 166, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 164, 0, 0, + 0, 0, 1747, 152, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1803, 942, 0, 0, 0, + 0, 0, 171, 0, 1765, 172, 0, 0, 0, 0, + 3700, 3702, 3701, 3765, 3766, 3767, 3768, 3769, 3770, 3771, + 791, 0, 0, 0, 1845, 1846, 163, 162, 191, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 3964, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 197, 161, 0, 197, 0, 0, + 0, 716, 0, 0, 0, 0, 722, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 197, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 3978, 1904, 0, 3979, + 0, 3980, 197, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1803, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 722, 197, + 722, 0, 1949, 0, 0, 0, 0, 0, 0, 157, + 1847, 160, 0, 1844, 0, 158, 159, 0, 0, 1975, + 0, 0, 175, 0, 0, 0, 0, 0, 0, 0, + 0, 181, 0, 0, 0, 1986, 0, 0, 0, 0, + 0, 0, 1990, 154, 0, 0, 155, 0, 0, 0, + 0, 0, 0, 2001, 2002, 2003, 2004, 2005, 2006, 2007, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 4074, 167, 0, 0, + 0, 0, 0, 0, 179, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 4090, 0, 4091, 0, 4092, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 187, 3706, 0, 2370, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 3714, 3715, 0, 0, 3790, 3789, 3788, 0, 0, + 3786, 3787, 3785, 0, 0, 166, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 168, 173, + 170, 176, 177, 178, 180, 182, 183, 184, 185, 0, + 0, 0, 0, 0, 186, 188, 189, 190, 0, 0, + 0, 0, 4148, 0, 4149, 0, 0, 0, 0, 0, + 1087, 0, 0, 0, 0, 3791, 912, 0, 767, 768, + 3792, 3793, 916, 3794, 770, 771, 913, 914, 0, 765, + 769, 915, 917, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1087, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 2037, 0, + 0, 161, 0, 0, 0, 0, 0, 3697, 3698, 3699, + 3703, 3704, 3705, 3716, 3763, 3764, 3772, 3774, 868, 3773, + 3775, 3776, 3777, 3780, 3781, 3782, 3783, 3778, 3779, 3784, + 3680, 3684, 3681, 3682, 3683, 3695, 3685, 3686, 3687, 3688, + 3689, 3690, 3691, 3692, 3693, 3694, 3696, 3795, 3796, 3797, + 3798, 3799, 3800, 3709, 3713, 3712, 3710, 3711, 3707, 3708, + 3735, 3734, 3736, 3737, 3738, 3739, 3740, 3741, 3743, 3742, + 3744, 3745, 3746, 3747, 3748, 3749, 3717, 3718, 3721, 3722, + 3720, 3719, 3723, 3732, 3733, 3724, 3725, 3726, 3727, 3728, + 3729, 3731, 3730, 3750, 3751, 3752, 3753, 3754, 3756, 3755, + 3759, 3760, 3758, 3757, 3762, 3761, 0, 0, 0, 154, + 0, 0, 155, 0, 0, 0, 0, 0, 918, 0, + 919, 0, 0, 923, 0, 0, 0, 925, 924, 0, + 926, 0, 888, 887, 0, 0, 920, 921, 0, 922, + 0, 0, 0, 167, 0, 0, 0, 0, 0, 0, + 179, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 2570, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 187, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1510, 0, 1510, 3801, 3802, 3803, 3804, 3805, 3806, + 3807, 3808, 0, 0, 0, 0, 0, 0, 0, 2582, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 168, 173, 170, 176, 177, 178, + 180, 182, 183, 184, 185, 0, 0, 0, 0, 0, + 186, 188, 189, 190, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 197, + 0, 197, 0, 0, 2355, 0, 0, 0, 0, 0, + 0, 0, 2359, 0, 2362, 0, 0, 0, 2037, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 722, 0, + 722, 722, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 722, 197, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1502, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 2762, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 2794, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1023, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 2833, + 2834, 0, 0, 0, 0, 0, 0, 0, 0, 2370, + 0, 0, 1023, 2854, 0, 0, 2037, 0, 0, 0, + 0, 0, 0, 2521, 0, 0, 0, 0, 0, 0, + 0, 0, 2538, 2539, 0, 0, 2543, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 2548, 0, + 0, 0, 0, 0, 0, 2551, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 2554, 0, 0, 0, 0, 0, 0, 0, 1502, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 707, 0, 707, 707, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 707, 195, 0, 0, + 0, 0, 0, 0, 0, 0, 2931, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 197, 0, 0, 0, 722, + 722, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 197, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 722, 0, + 0, 197, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 722, 0, 0, 0, + 0, 0, 0, 197, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1486, 0, 0, - 0, 0, 0, 0, 0, 0, 2722, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 2754, 0, + 722, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1502, 0, 0, 0, 0, 0, 722, + 722, 0, 722, 0, 722, 722, 0, 722, 722, 722, + 722, 722, 722, 0, 0, 0, 0, 0, 0, 0, + 1502, 0, 0, 1502, 722, 1502, 197, 0, 0, 0, + 0, 0, 959, 0, 2280, 0, 0, 960, 0, 0, + 0, 0, 0, 0, 0, 0, 197, 2087, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 722, + 0, 197, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 722, 0, 197, 197, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1006, 0, 0, 0, + 0, 0, 0, 0, 197, 0, 0, 0, 0, 0, + 0, 197, 0, 0, 0, 0, 0, 0, 0, 0, + 197, 197, 197, 197, 197, 197, 197, 197, 197, 722, + 3083, 0, 966, 967, 968, 969, 970, 971, 972, 973, + 974, 975, 976, 977, 978, 979, 980, 981, 982, 983, + 984, 985, 986, 987, 988, 989, 990, 991, 992, 993, + 994, 995, 996, 997, 998, 999, 1000, 1001, 1002, 1003, + 1004, 1005, 1006, 1007, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 2855, 0, 0, 0, 2370, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 2793, 2794, 0, - 0, 0, 0, 0, 0, 0, 0, 2332, 0, 0, - 1006, 2814, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 2018, 0, 0, 0, 0, 0, 0, - 2482, 0, 0, 0, 0, 0, 945, 0, 0, 2499, - 2500, 946, 0, 2504, 0, 0, 0, 0, 0, 0, - 0, 2068, 0, 0, 2509, 0, 0, 0, 0, 0, - 0, 2512, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 2515, 0, 0, + 3151, 0, 88, 0, 0, 0, 2370, 2370, 2370, 2370, + 2370, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 2370, 0, 0, 2370, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 2904, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1486, 0, 0, 0, - 0, 0, 0, 0, 2891, 952, 953, 954, 955, 956, - 957, 958, 959, 960, 961, 962, 963, 964, 965, 966, - 967, 968, 969, 970, 971, 972, 973, 974, 975, 976, - 977, 978, 979, 980, 981, 982, 983, 984, 985, 986, - 987, 988, 989, 990, 991, 992, 993, 0, 0, 0, - 0, 0, 195, 0, 0, 0, 707, 707, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 195, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 707, 0, 0, 195, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 707, 0, 0, 0, 0, 0, 0, 195, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 707, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1486, 0, 0, - 0, 0, 0, 707, 707, 0, 707, 0, 707, 707, - 0, 707, 707, 707, 707, 707, 707, 0, 0, 0, - 0, 0, 0, 0, 1486, 0, 0, 1486, 707, 1486, - 195, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 195, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 707, 0, 195, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 707, - 0, 195, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 3042, 0, 195, 0, - 0, 0, 0, 0, 0, 195, 0, 0, 0, 0, - 0, 0, 0, 0, 195, 195, 195, 195, 195, 195, - 195, 195, 195, 707, 0, 0, 94, 0, 0, 945, - 0, 0, 0, 933, 946, 947, 948, 949, 934, 0, - 2815, 935, 936, 0, 937, 0, 0, 0, 0, 0, - 0, 0, 0, 2332, 0, 0, 0, 0, 942, 950, - 951, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 3110, 0, 86, 0, - 0, 0, 2332, 2332, 2332, 2332, 2332, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 2332, 0, 0, 2332, 3229, 3230, 0, 0, - 0, 0, 0, 0, 2864, 0, 0, 0, 952, 953, - 954, 955, 956, 957, 958, 959, 960, 961, 962, 963, - 964, 965, 966, 967, 968, 969, 970, 971, 972, 973, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 722, 722, 0, 0, 0, + 0, 0, 0, 3258, 0, 0, 0, 0, 722, 0, + 0, 0, 0, 3266, 0, 0, 0, 197, 0, 0, + 0, 0, 0, 2953, 2954, 2955, 2956, 2957, 2958, 96, + 0, 0, 959, 0, 0, 0, 947, 960, 961, 962, + 963, 948, 0, 0, 949, 950, 0, 951, 2037, 2968, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 956, 964, 965, 0, 0, 722, 0, 0, 0, + 0, 0, 0, 2976, 0, 0, 1502, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1502, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 3272, 3273, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 966, 967, 968, 969, 970, 971, 972, 973, 974, 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, 990, 991, 992, 993, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 3217, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 3225, - 2913, 2914, 2915, 2916, 2917, 0, 0, 0, 0, 0, - 707, 707, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 3231, 0, 707, 2018, 2927, 0, 0, 0, 0, - 0, 0, 195, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 2935, + 994, 995, 996, 997, 998, 999, 1000, 1001, 1002, 1003, + 1004, 1005, 1006, 1007, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1510, 0, 0, 0, 1510, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 3274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 2292, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 707, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1486, 0, 0, 0, 0, 0, 0, 3232, 3233, - 0, 0, 0, 0, 0, 0, 0, 0, 1486, 0, + 0, 0, 0, 0, 2370, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 3496, 0, 0, + 197, 0, 0, 0, 0, 722, 0, 0, 0, 0, + 0, 0, 3275, 3276, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 197, 0, 0, 722, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 197, 0, 0, 0, 722, 0, 0, + 2292, 197, 0, 197, 0, 0, 197, 197, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 707, 707, 0, 0, 0, + 0, 722, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 1494, 0, - 0, 0, 1494, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 912, 0, 0, 0, + 0, 0, 916, 0, 0, 0, 913, 914, 0, 0, + 0, 915, 917, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 722, 0, + 0, 0, 0, 0, 0, 0, 941, 0, 0, 0, + 0, 0, 0, 0, 3226, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 722, 0, 0, 0, 0, + 0, 722, 0, 0, 0, 0, 0, 0, 0, 0, + 3264, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 3278, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 898, 0, 0, 0, 0, 0, 902, 0, - 0, 0, 899, 900, 0, 0, 0, 901, 903, 0, + 701, 0, 0, 0, 3296, 0, 721, 3299, 722, 0, + 0, 0, 0, 722, 0, 0, 0, 722, 722, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 2332, - 0, 2254, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 3452, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 3636, 197, 3640, 3641, 721, 0, + 721, 0, 197, 0, 0, 0, 0, 0, 0, 0, + 0, 197, 197, 0, 0, 197, 0, 197, 0, 3151, + 0, 88, 0, 3151, 0, 0, 0, 197, 0, 0, + 0, 0, 0, 0, 197, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 195, 0, 0, 0, 0, 707, + 197, 0, 0, 0, 0, 0, 0, 0, 0, 722, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 3825, + 0, 0, 0, 0, 0, 0, 3451, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 195, 0, 0, 707, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 195, 0, 0, 0, - 707, 0, 0, 2254, 195, 0, 195, 0, 0, 195, - 195, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 707, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 3185, + 0, 0, 0, 0, 3477, 0, 0, 0, 1502, 0, + 2292, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 707, 0, 0, 0, 3223, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 3235, - 0, 0, 0, 0, 0, 0, 0, 0, 707, 0, - 0, 0, 0, 0, 707, 0, 0, 0, 3252, 0, - 0, 3255, 0, 927, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 3511, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 3526, 0, 0, 3527, 3528, 3529, 0, 0, + 0, 3151, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1527, + 1528, 1529, 1530, 1531, 1532, 1533, 1534, 1535, 1536, 1537, + 1538, 1539, 1540, 1541, 1542, 1543, 1544, 1545, 1547, 1548, + 1549, 1550, 1551, 1552, 1553, 1554, 1555, 1556, 1557, 1558, + 1559, 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567, 1568, + 1569, 1570, 1571, 1572, 1573, 1574, 1575, 1576, 1577, 1578, + 1579, 1580, 1581, 1582, 1583, 1584, 1585, 1586, 1587, 1588, + 1589, 1590, 1591, 1592, 1593, 1594, 1595, 1596, 1597, 1598, + 1599, 1600, 1601, 1602, 1603, 1604, 1605, 1606, 1607, 1608, + 1609, 1610, 1611, 1612, 1613, 1614, 1615, 1616, 1617, 1618, + 1619, 1620, 1621, 1622, 1624, 1625, 1626, 1627, 1628, 1629, + 1630, 1631, 1632, 1633, 1634, 1635, 1636, 1637, 1638, 1639, + 1645, 1646, 1647, 1648, 1662, 1663, 1664, 1665, 1666, 1667, + 1668, 1669, 1670, 1671, 1672, 1673, 1674, 1675, 0, 0, + 0, 0, 197, 0, 0, 0, 0, 0, 0, 0, + 197, 0, 3951, 0, 0, 0, 0, 0, 0, 0, + 0, 722, 0, 0, 0, 0, 0, 0, 0, 0, + 88, 0, 722, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 707, 0, 0, 0, 0, 707, 0, 0, 0, - 707, 707, 0, 0, 0, 0, 3594, 0, 3598, 3599, - 0, 0, 0, 0, 0, 0, 686, 0, 0, 0, - 0, 0, 706, 0, 0, 0, 0, 0, 0, 0, - 0, 3110, 0, 86, 0, 3110, 0, 0, 195, 0, - 0, 0, 0, 0, 0, 195, 0, 0, 0, 0, - 0, 0, 0, 0, 195, 195, 0, 0, 195, 0, - 195, 0, 0, 0, 0, 0, 0, 0, 0, 195, - 0, 0, 0, 706, 0, 706, 195, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 197, 0, 0, + 0, 0, 197, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 3407, 0, 195, 3780, 0, 0, 0, 0, 0, 0, - 0, 707, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 3433, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 3995, 0, 0, 0, 898, 0, 0, 4003, 0, + 722, 0, 0, 88, 0, 0, 197, 0, 0, 0, + 0, 0, 0, 197, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 722, 0, 0, + 0, 0, 0, 0, 722, 0, 0, 0, 0, 0, + 0, 0, 0, 722, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1502, + 0, 0, 0, 0, 0, 720, 0, 0, 0, 0, + 0, 0, 197, 197, 197, 197, 197, 197, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 721, 1418, + 721, 721, 0, 0, 0, 0, 0, 197, 197, 0, + 0, 0, 0, 0, 0, 4094, 0, 0, 0, 0, + 721, 0, 0, 0, 0, 0, 0, 1079, 0, 1086, + 0, 0, 197, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1501, + 0, 0, 0, 0, 0, 722, 0, 0, 899, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1486, 0, 2254, 0, 0, 0, 3469, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 3484, 0, 0, 3485, - 3486, 3487, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 3110, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1511, 1512, 1513, 1514, 1515, 1516, 1517, - 1518, 1519, 1520, 1521, 1522, 1523, 1524, 1525, 1526, 1530, - 1531, 1532, 1533, 1534, 1535, 1536, 1537, 1538, 1539, 1540, - 1541, 1542, 1543, 1544, 1545, 1546, 1547, 1548, 1549, 1550, - 1551, 1552, 1553, 1554, 1555, 1556, 1557, 1558, 1559, 1560, - 1561, 1562, 1563, 1564, 1565, 1566, 1567, 1568, 1569, 1570, - 1571, 1572, 1573, 1574, 1575, 1576, 1577, 1578, 1579, 1580, - 1581, 1582, 1583, 1584, 1585, 1586, 1587, 1588, 1589, 1590, - 1591, 1592, 1593, 1594, 1595, 1596, 1597, 1598, 1599, 1600, - 1601, 1602, 1603, 1604, 1605, 1607, 1608, 1609, 1610, 1611, - 1612, 1613, 1614, 1615, 1616, 1617, 1618, 1619, 1620, 1621, - 1622, 1628, 1629, 1630, 1631, 1645, 1646, 1647, 1648, 1649, - 1650, 1651, 1652, 1653, 1654, 1655, 1656, 1657, 1658, 0, + 0, 0, 0, 722, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 195, 0, 0, 666, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 3906, 0, 195, 0, 0, 0, 0, - 0, 0, 0, 195, 0, 0, 0, 0, 0, 0, - 0, 86, 0, 0, 707, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 707, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 666, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 1033, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 195, 0, 0, 0, 0, 195, 0, 0, 0, 0, + 0, 1052, 1052, 0, 0, 0, 0, 0, 0, 0, + 0, 666, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 884, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 722, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 3950, 0, 0, 0, 0, 0, 0, 3958, - 0, 0, 0, 0, 86, 0, 0, 0, 0, 0, - 0, 0, 0, 707, 0, 0, 0, 0, 0, 195, - 0, 0, 0, 0, 0, 0, 195, 0, 0, 0, + 722, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 3962, 0, 0, 0, 0, 1501, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 707, 0, 0, 0, 0, 0, 705, 707, 0, 0, - 0, 0, 0, 0, 0, 0, 707, 0, 0, 0, + 0, 722, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 197, 0, 0, 0, 722, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1486, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 195, 195, 195, 195, 195, - 0, 0, 0, 0, 0, 0, 0, 1062, 0, 1069, - 0, 0, 0, 0, 0, 0, 4049, 0, 0, 195, - 195, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 722, 0, 0, 0, 1502, 0, 0, + 722, 722, 0, 1502, 197, 197, 197, 197, 197, 721, + 721, 0, 0, 0, 0, 0, 197, 0, 0, 0, + 0, 0, 197, 0, 197, 0, 0, 197, 197, 197, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 195, 0, 0, 0, 0, 0, - 706, 1402, 706, 706, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 707, 0, 0, - 0, 0, 706, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 721, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 885, 0, - 0, 0, 0, 1485, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 707, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 721, 0, 0, 0, + 0, 0, 0, 197, 0, 0, 0, 1818, 0, 0, + 0, 0, 0, 0, 0, 0, 722, 1827, 0, 1502, + 0, 0, 0, 0, 722, 0, 0, 0, 0, 197, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 721, 0, 1853, 197, 0, 0, 959, 0, 0, 0, + 1862, 960, 0, 1501, 1864, 0, 0, 1867, 1868, 721, + 721, 2087, 721, 197, 721, 721, 197, 721, 721, 721, + 721, 721, 721, 0, 0, 0, 0, 0, 0, 0, + 1501, 1899, 1900, 1501, 721, 1501, 0, 1905, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 193, 0, 0, 653, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 721, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 653, 0, 0, + 0, 0, 1967, 0, 0, 721, 966, 967, 968, 969, + 970, 971, 972, 973, 974, 975, 976, 977, 978, 979, + 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, + 990, 991, 992, 993, 994, 995, 996, 997, 998, 999, + 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 0, 721, + 722, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1016, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 1035, - 1035, 0, 0, 0, 0, 0, 0, 0, 0, 653, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 707, - 0, 0, 0, 0, 3917, 0, 0, 0, 0, 0, - 0, 707, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 197, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 707, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 195, 0, 0, 0, - 707, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1485, 0, 707, 0, 0, 0, 1486, 0, - 0, 707, 707, 0, 1486, 195, 195, 195, 195, 195, - 0, 0, 0, 0, 0, 0, 0, 195, 0, 0, - 0, 0, 0, 195, 0, 195, 0, 0, 195, 195, - 195, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1266, 0, 1266, + 1266, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 197, 0, 0, 0, 0, 0, 1430, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 706, 706, 0, 0, 0, 0, 0, 0, + 0, 0, 197, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 195, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 707, 706, 0, - 1486, 0, 0, 0, 0, 707, 0, 0, 0, 0, - 195, 0, 0, 0, 0, 0, 706, 0, 0, 0, - 0, 0, 0, 0, 195, 0, 0, 1800, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1809, 0, 0, - 0, 0, 0, 195, 0, 0, 195, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 706, 0, 1835, 0, 0, 0, 0, 0, 0, 0, - 1844, 0, 0, 1485, 1846, 0, 0, 1849, 1850, 706, - 706, 0, 706, 0, 706, 706, 0, 706, 706, 706, - 706, 706, 706, 0, 0, 0, 0, 0, 0, 0, - 1485, 1881, 1882, 1485, 706, 1485, 0, 1887, 0, 0, + 0, 197, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 197, 0, 0, 197, 197, 197, 0, 0, 0, + 0, 0, 0, 0, 722, 722, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 721, 721, 0, 0, 0, + 0, 666, 0, 666, 0, 0, 0, 0, 721, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 706, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1949, 0, 0, 706, 0, 0, 0, 0, - 707, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1249, 0, 1249, 1249, 0, 0, + 0, 0, 0, 722, 722, 722, 722, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 195, 1414, 0, 0, 706, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 666, 0, 0, 721, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1501, 0, 0, 0, + 0, 0, 0, 0, 0, 2096, 0, 0, 0, 0, + 0, 1503, 0, 1501, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 195, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 195, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 195, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 195, 0, 0, 195, 195, 195, 0, 0, 0, - 0, 0, 0, 0, 707, 707, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 653, 0, - 653, 0, 0, 653, 0, 0, 0, 0, 0, 0, - 0, 707, 707, 707, 707, 0, 706, 706, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 706, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 197, 0, 1696, 1697, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 653, 0, + 0, 0, 0, 0, 0, 1502, 0, 0, 0, 0, + 722, 0, 722, 0, 0, 0, 0, 721, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1741, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1760, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 706, 1487, 0, - 0, 0, 0, 0, 0, 0, 0, 1485, 0, 0, - 0, 0, 0, 0, 0, 0, 2077, 0, 0, 0, - 0, 0, 0, 0, 1485, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 721, 0, 0, 0, 0, + 0, 722, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1503, 0, 0, 197, 0, 0, 722, 0, 1079, + 0, 0, 0, 721, 0, 0, 0, 0, 0, 0, + 722, 0, 0, 0, 0, 0, 0, 721, 1870, 1870, + 721, 1870, 0, 1870, 1870, 0, 1879, 1870, 1870, 1870, + 1870, 1870, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 721, 0, 1079, 0, 0, 0, 666, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1679, 1680, 0, 0, - 0, 706, 706, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1033, 0, 0, 0, 0, 0, 0, 0, 1947, 0, + 0, 0, 0, 722, 0, 0, 0, 722, 722, 0, + 0, 0, 0, 666, 1971, 0, 0, 0, 721, 0, + 0, 0, 0, 0, 0, 2453, 2454, 2455, 0, 0, + 0, 0, 0, 0, 0, 666, 0, 0, 0, 0, + 722, 0, 0, 0, 0, 721, 0, 0, 0, 0, + 0, 721, 1862, 0, 0, 1862, 0, 1862, 1266, 0, + 0, 0, 0, 2485, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1723, 195, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1503, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 721, 0, + 0, 0, 0, 721, 0, 0, 0, 721, 721, 0, + 0, 0, 1503, 0, 0, 1503, 0, 1503, 666, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1742, 0, 1486, 0, 0, 0, 0, 707, 0, 707, + 0, 0, 0, 0, 0, 0, 0, 0, 1921, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 666, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1973, + 666, 0, 0, 0, 0, 0, 0, 0, 722, 0, + 0, 0, 0, 0, 0, 0, 666, 0, 0, 0, + 0, 0, 0, 666, 197, 0, 0, 0, 0, 0, + 0, 0, 1999, 2000, 666, 666, 666, 666, 666, 666, + 666, 0, 722, 197, 0, 0, 0, 0, 0, 721, + 0, 0, 0, 0, 1266, 1266, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 2025, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1062, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 706, 0, 0, - 0, 0, 0, 1852, 1852, 0, 1852, 0, 1852, 1852, - 707, 1861, 1852, 1852, 1852, 1852, 1852, 0, 0, 0, - 0, 0, 0, 195, 0, 0, 707, 0, 1062, 0, - 0, 0, 0, 0, 0, 0, 0, 1487, 0, 707, - 0, 0, 0, 0, 0, 706, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1929, 0, 0, 0, 0, 0, 0, - 0, 0, 706, 0, 0, 0, 0, 0, 0, 1953, - 0, 0, 0, 0, 0, 0, 706, 0, 0, 706, - 0, 0, 0, 653, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 706, 0, 707, 1016, 0, 0, 707, 707, 0, 0, - 0, 0, 0, 1249, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 653, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 707, - 0, 0, 0, 0, 0, 0, 0, 0, 653, 0, - 0, 0, 0, 0, 0, 0, 0, 706, 0, 0, - 0, 0, 0, 0, 2415, 2416, 2417, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 722, 0, 1501, 0, + 721, 0, 0, 0, 0, 2082, 1502, 0, 722, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 706, 0, 0, 0, 0, 0, - 706, 1844, 0, 0, 1844, 0, 1844, 0, 1487, 0, - 0, 0, 2447, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 1487, 0, 0, 1487, 0, - 1487, 653, 0, 0, 0, 0, 0, 706, 0, 0, - 0, 0, 706, 0, 0, 0, 706, 706, 0, 0, - 0, 1903, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 653, 0, 0, 0, - 0, 0, 0, 0, 707, 0, 0, 0, 0, 0, - 1249, 1249, 1955, 0, 0, 0, 0, 0, 0, 0, - 195, 0, 0, 2006, 0, 0, 0, 0, 0, 653, - 0, 0, 0, 0, 0, 0, 653, 0, 707, 195, - 0, 0, 0, 0, 0, 1980, 1981, 653, 653, 653, - 653, 653, 653, 653, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 722, 2292, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 722, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 2063, 0, 0, 0, 0, 0, 706, 0, 0, + 0, 0, 722, 0, 0, 0, 0, 0, 0, 197, + 722, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 666, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 707, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1486, 0, 707, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 2131, 2132, 0, 0, 0, + 0, 0, 722, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 707, 2254, 0, 0, 1485, 0, 706, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 707, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 707, 0, - 0, 0, 0, 0, 0, 195, 707, 0, 0, 0, + 0, 0, 722, 0, 0, 0, 0, 197, 0, 0, + 0, 0, 0, 0, 0, 0, 1266, 0, 1503, 0, + 0, 722, 0, 0, 0, 0, 0, 0, 722, 0, + 1052, 1052, 0, 0, 0, 1503, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 2329, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 707, 0, - 0, 1249, 0, 653, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 707, 0, - 0, 0, 0, 195, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 707, 0, 2291, - 0, 0, 0, 0, 707, 0, 0, 0, 0, 0, - 0, 0, 1487, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1035, 1035, 2303, 0, 0, 1487, + 0, 721, 2341, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 721, 0, 0, 0, 1741, 0, 0, 1266, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1723, 0, 0, 1249, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1062, 0, 0, 0, 0, 0, + 1079, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 2868, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1052, 1973, + 1052, 1052, 1052, 1052, 1052, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1086, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 706, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 706, 0, 0, 0, 0, 0, 0, 0, 0, + 721, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1921, 0, 1079, 0, 0, 0, 0, 0, + 1086, 0, 0, 0, 0, 0, 0, 721, 1052, 0, + 0, 0, 0, 0, 721, 0, 0, 0, 1862, 1862, + 0, 0, 1033, 721, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 666, 0, 0, 0, 1501, + 2941, 0, 1973, 666, 0, 666, 0, 1079, 666, 2379, + 0, 0, 2082, 0, 0, 0, 2082, 2082, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1069, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 2828, 1062, 0, - 0, 0, 0, 0, 1069, 0, 0, 0, 0, 0, - 0, 1035, 1955, 1035, 1035, 1035, 1035, 1035, 0, 0, - 94, 0, 0, 945, 0, 0, 0, 933, 946, 947, - 948, 949, 934, 0, 0, 935, 936, 0, 937, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 706, - 0, 1062, 942, 950, 951, 1903, 2063, 0, 0, 0, - 2063, 2063, 0, 0, 0, 0, 0, 0, 0, 0, - 1035, 0, 0, 0, 0, 0, 706, 0, 0, 0, - 0, 0, 0, 706, 1016, 0, 0, 1844, 1844, 0, - 0, 0, 706, 0, 0, 0, 0, 653, 0, 0, - 3229, 3230, 0, 0, 1955, 653, 0, 653, 1485, 2901, - 653, 2341, 952, 953, 954, 955, 956, 957, 958, 959, - 960, 961, 962, 963, 964, 965, 966, 967, 968, 969, - 970, 971, 972, 973, 974, 975, 976, 977, 978, 979, - 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, - 990, 991, 992, 993, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 2518, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 706, 0, 3231, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 721, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 2557, 0, + 0, 0, 0, 721, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1266, + 0, 0, 0, 0, 0, 0, 0, 666, 0, 0, + 0, 0, 0, 0, 666, 0, 0, 0, 0, 0, + 0, 0, 0, 666, 666, 0, 0, 666, 0, 2545, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 666, + 0, 0, 0, 0, 0, 0, 666, 0, 721, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 721, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 666, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 706, 0, 1249, 0, 0, 0, 0, 0, 0, 0, + 0, 721, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 721, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 721, 0, 0, 0, 1501, 0, 0, + 721, 721, 0, 1501, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 3232, 3233, 0, 0, 0, 0, 0, 0, + 1503, 0, 1973, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 653, - 0, 0, 0, 0, 0, 0, 653, 0, 0, 0, - 0, 0, 0, 0, 0, 653, 653, 0, 0, 653, - 0, 2506, 0, 0, 0, 0, 0, 0, 0, 0, - 653, 0, 0, 0, 0, 0, 0, 653, 0, 0, - 0, 0, 0, 0, 0, 706, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 706, 0, 0, - 0, 0, 0, 653, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 706, 0, + 0, 0, 0, 0, 3221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 706, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 721, 0, 0, 1501, + 0, 0, 0, 0, 721, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 706, 0, 0, 0, 1485, 0, 0, 706, 706, 0, - 1485, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1487, 0, 1955, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 2762, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 2777, 0, 0, 0, 0, + 2802, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 2817, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 3303, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 3180, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 706, 0, 0, 1485, 0, 0, 0, - 0, 706, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 2860, 0, 0, 0, 0, 0, 0, - 0, 0, 3259, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 2303, 0, 0, 0, 0, 0, 0, 2885, 0, 0, - 0, 0, 0, 0, 0, 0, 2890, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 2900, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 666, 0, 2341, 0, 0, 0, + 721, 0, 1921, 2925, 0, 0, 0, 0, 0, 0, + 0, 0, 2930, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 653, 0, 0, 0, - 0, 0, 0, 0, 1903, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 706, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 666, + 0, 0, 0, 0, 666, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 653, 0, 0, 0, 0, 653, 2063, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 3501, 0, 0, 0, 2082, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 666, 0, + 0, 0, 0, 0, 0, 2910, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 2063, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 721, 721, 0, 0, 0, 0, + 0, 0, 2082, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1503, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 666, 666, 666, 666, 666, 666, + 0, 0, 0, 721, 721, 721, 721, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 666, + 666, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 653, 0, 0, 0, 0, 0, 3459, 2870, 0, 0, + 0, 0, 0, 0, 666, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1052, 0, 0, 0, 0, 0, 3084, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1266, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 706, 706, 0, 1487, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 653, 653, 653, 653, - 653, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 3043, - 653, 653, 0, 0, 0, 0, 0, 706, 706, 706, - 706, 1249, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 653, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1852, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1035, 0, 0, 0, 0, 0, 0, 0, - 3086, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1249, 0, 0, 0, 0, 0, - 0, 3113, 1852, 0, 0, 0, 0, 0, 0, 0, + 1870, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 3127, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1266, 0, 0, 0, 0, 0, 0, 3154, + 1870, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1501, 0, 0, 0, 0, + 721, 0, 721, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1052, 0, 0, 0, 1079, 0, 0, 0, 0, + 0, 721, 0, 2341, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 721, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1062, 0, 0, - 0, 0, 0, 0, 0, 2303, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 1485, 0, - 0, 0, 0, 706, 0, 706, 0, 0, 0, 0, + 721, 0, 0, 0, 0, 0, 0, 666, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1503, + 0, 0, 0, 0, 0, 1503, 666, 666, 666, 666, + 666, 0, 0, 0, 0, 0, 0, 0, 3171, 0, + 0, 0, 0, 0, 1921, 0, 666, 0, 0, 666, + 3179, 1973, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 721, 0, 0, 0, 721, 721, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1035, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 666, 0, 0, 0, 0, + 721, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1503, 0, 0, 0, 0, 0, 0, 0, 3416, + 0, 666, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 666, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 706, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 653, 0, 0, - 0, 0, 706, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 706, 0, 0, 0, 1487, - 0, 0, 0, 0, 0, 1487, 653, 653, 653, 653, - 653, 0, 0, 0, 0, 0, 0, 0, 3130, 0, - 0, 0, 0, 0, 1903, 0, 653, 0, 0, 653, - 3138, 1955, 0, 0, 0, 0, 0, 0, 0, 0, - 3372, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 666, 0, 0, 666, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 706, 0, - 0, 0, 706, 706, 0, 653, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1487, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 653, 0, 0, 0, 706, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 653, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 653, 0, 0, 653, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 721, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 2303, 2303, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 721, 2341, 2341, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 706, 3523, 3524, 3525, 3526, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 666, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 3565, 3566, 3567, 3568, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 706, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 653, 0, 0, 0, + 0, 0, 0, 0, 0, 666, 721, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1501, 0, 721, 0, + 0, 0, 0, 0, 666, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 721, 721, 0, 0, + 0, 0, 0, 666, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 721, 0, + 0, 0, 0, 666, 0, 0, 666, 666, 666, 0, + 0, 0, 721, 0, 0, 0, 0, 0, 0, 0, + 721, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 653, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 653, 0, 0, 0, 0, 706, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 1485, 0, - 706, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 653, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 706, 706, - 0, 0, 653, 0, 0, 653, 653, 653, 0, 0, + 0, 0, 721, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 706, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 706, 0, 0, 3622, 0, 3624, - 0, 0, 706, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 721, 0, 0, 0, 0, 0, 0, 3664, + 0, 3666, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 721, 0, 0, 0, 0, 0, 0, 721, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 706, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 2303, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 706, 0, 3790, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 1249, - 0, 0, 0, 706, 0, 0, 0, 0, 0, 0, - 706, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 2341, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 3835, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1266, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1921, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 3863, 0, 0, 0, 3863, 3863, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1503, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1903, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 2303, - 0, 0, 0, 1487, 0, 0, 0, 0, 0, 0, + 0, 0, 3908, 0, 0, 0, 3908, 3908, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 2341, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1921, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1903, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 2303, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 2303, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 2341, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 2341, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 3944, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 3948, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1249, 1249, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 3997, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 3113, 0, - 0, 0, 0, 0, 0, 0, 4013, 0, 0, 0, + 0, 0, 0, 0, 0, 3989, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 3993, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1266, 1266, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1903, 0, 0, 0, 0, 0, 0, 3944, 0, + 0, 0, 0, 0, 0, 0, 1921, 4042, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 653, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 2303, 0, + 0, 3154, 0, 0, 0, 666, 0, 0, 0, 4058, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 3372, 0, 0, - 0, 0, 0, 0, 4013, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1487, 0, 0, 0, 0, 0, 0, + 0, 3989, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 3977, 0, 0, 0, 0, 0, + 0, 2341, 0, 0, 0, 0, 0, 0, 1503, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 3416, 0, 0, 0, 0, 0, 0, 4058, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 4022, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 391, - 0, 0, 0, 1385, 1369, 511, 1903, 1312, 1388, 1281, - 1300, 1398, 1303, 1306, 1348, 1259, 1326, 410, 1297, 1252, - 1285, 1254, 1292, 1255, 1283, 1314, 268, 1280, 1371, 1330, - 1387, 361, 265, 1261, 1286, 424, 1302, 203, 1350, 480, - 250, 372, 369, 566, 280, 271, 267, 248, 314, 380, - 422, 501, 416, 1394, 365, 1336, 0, 490, 395, 0, - 0, 0, 1316, 1375, 1324, 1362, 1311, 1349, 1269, 1335, - 1389, 1298, 1345, 1390, 320, 246, 322, 202, 407, 491, - 284, 0, 0, 0, 1955, 3979, 929, 0, 0, 0, - 0, 3980, 0, 0, 0, 0, 236, 0, 0, 243, - 0, 0, 0, 346, 355, 354, 335, 336, 338, 340, - 345, 352, 358, 1294, 1342, 1384, 1295, 1344, 263, 318, - 270, 262, 563, 1395, 1374, 1258, 1323, 1383, 0, 0, - 227, 1386, 1318, 0, 1347, 0, 1401, 1253, 1338, 0, - 1256, 1260, 1397, 1379, 1289, 273, 0, 0, 0, 0, - 0, 0, 0, 1315, 1325, 1359, 1363, 1309, 0, 0, - 0, 0, 0, 0, 0, 0, 1287, 0, 1334, 0, - 0, 0, 1265, 1257, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 394, 0, 0, 0, 1401, 1387, + 522, 1921, 1329, 1404, 1298, 1317, 1414, 1320, 1323, 1366, + 1276, 1344, 413, 1314, 1269, 1302, 1271, 1309, 1272, 1300, + 1331, 271, 1297, 1389, 1348, 1403, 364, 268, 1278, 1303, + 427, 1319, 205, 1368, 483, 253, 375, 372, 577, 283, + 274, 270, 251, 317, 383, 425, 512, 419, 1410, 368, + 1354, 0, 493, 398, 0, 0, 0, 1333, 1393, 1342, + 1380, 1328, 1367, 1286, 1353, 1405, 1315, 1363, 1406, 323, + 249, 325, 204, 410, 494, 287, 0, 0, 0, 1973, + 4024, 943, 0, 0, 0, 0, 4025, 0, 0, 0, + 0, 239, 0, 0, 246, 0, 0, 0, 349, 358, + 357, 338, 339, 341, 343, 348, 355, 361, 1311, 1360, + 1400, 1312, 1362, 266, 321, 273, 265, 574, 1411, 1392, + 1275, 1341, 1399, 1336, 0, 0, 230, 1402, 1335, 0, + 1365, 0, 1417, 1270, 1356, 0, 1273, 1277, 1413, 1397, + 1306, 276, 0, 0, 0, 0, 0, 0, 0, 1332, + 1343, 1377, 1381, 1326, 0, 0, 0, 0, 0, 0, + 0, 0, 1304, 0, 1352, 0, 0, 0, 1282, 1274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1313, 0, 0, 0, - 0, 1268, 0, 1288, 1360, 0, 1251, 295, 1262, 396, - 255, 0, 447, 1367, 1378, 1310, 605, 1382, 1308, 1307, - 1354, 1266, 1373, 1301, 360, 1264, 327, 197, 223, 0, - 1299, 406, 455, 467, 1372, 1284, 1293, 251, 1291, 465, - 420, 583, 231, 282, 452, 426, 463, 434, 285, 1333, - 1352, 464, 367, 568, 444, 580, 606, 607, 261, 400, - 592, 505, 600, 624, 224, 258, 414, 498, 586, 487, - 392, 564, 565, 326, 486, 293, 201, 364, 612, 222, - 473, 366, 240, 229, 570, 589, 287, 450, 619, 211, - 500, 578, 237, 477, 0, 0, 627, 245, 497, 213, - 575, 496, 388, 323, 324, 212, 0, 451, 266, 291, - 0, 0, 256, 409, 572, 573, 254, 628, 226, 599, - 218, 1263, 598, 402, 567, 576, 389, 378, 217, 574, - 387, 377, 331, 350, 351, 278, 304, 441, 370, 442, - 303, 305, 398, 397, 399, 205, 587, 0, 206, 0, - 492, 588, 629, 446, 210, 232, 233, 235, 1279, 277, - 281, 289, 292, 300, 301, 310, 362, 413, 440, 436, - 445, 1368, 562, 581, 593, 604, 610, 611, 613, 614, - 615, 616, 617, 620, 618, 401, 308, 488, 330, 368, - 1357, 1400, 419, 466, 238, 585, 489, 199, 1273, 1278, - 1271, 0, 252, 253, 1339, 558, 1274, 1272, 1328, 1329, - 1275, 1391, 1392, 1393, 1376, 630, 631, 632, 633, 634, - 635, 636, 637, 638, 639, 640, 641, 642, 643, 644, - 645, 646, 647, 625, 1361, 1267, 0, 1276, 1277, 1370, - 1380, 1381, 648, 379, 479, 582, 332, 344, 347, 337, - 356, 0, 357, 333, 334, 339, 341, 342, 343, 348, - 349, 353, 359, 247, 208, 385, 393, 561, 309, 214, - 215, 216, 507, 508, 509, 510, 596, 597, 601, 456, - 457, 458, 459, 290, 591, 306, 462, 461, 328, 329, - 374, 443, 523, 525, 536, 540, 542, 544, 550, 553, - 524, 526, 537, 541, 543, 545, 551, 554, 513, 515, - 517, 519, 532, 531, 528, 556, 557, 534, 539, 518, - 530, 535, 548, 555, 552, 512, 516, 520, 529, 547, - 546, 527, 538, 549, 533, 521, 514, 522, 1332, 196, - 219, 363, 1396, 448, 286, 626, 595, 590, 204, 221, - 1270, 260, 1282, 1290, 0, 1296, 1304, 1305, 1317, 1319, - 1320, 1321, 1322, 0, 1340, 1341, 1343, 1351, 1353, 1356, - 1358, 1365, 1377, 1399, 198, 200, 207, 220, 230, 234, - 241, 259, 274, 276, 283, 296, 307, 315, 316, 319, - 325, 375, 381, 382, 383, 384, 403, 404, 405, 408, - 411, 412, 415, 417, 418, 421, 425, 429, 430, 431, - 433, 435, 437, 449, 454, 468, 469, 470, 471, 472, - 475, 476, 481, 482, 483, 484, 485, 493, 494, 499, - 569, 571, 584, 602, 608, 474, 298, 299, 438, 439, - 311, 312, 622, 623, 297, 579, 609, 577, 621, 603, - 432, 373, 1331, 1337, 376, 279, 302, 317, 1346, 594, - 495, 225, 460, 288, 249, 1364, 1366, 209, 244, 228, - 257, 272, 275, 321, 386, 394, 423, 428, 294, 269, - 242, 453, 239, 478, 502, 503, 504, 506, 390, 264, - 427, 1327, 1355, 371, 559, 560, 313, 391, 0, 0, - 0, 1385, 1369, 511, 0, 1312, 1388, 1281, 1300, 1398, - 1303, 1306, 1348, 1259, 1326, 410, 1297, 1252, 1285, 1254, - 1292, 1255, 1283, 1314, 268, 1280, 1371, 1330, 1387, 361, - 265, 1261, 1286, 424, 1302, 203, 1350, 480, 250, 372, - 369, 566, 280, 271, 267, 248, 314, 380, 422, 501, - 416, 1394, 365, 1336, 0, 490, 395, 0, 0, 0, - 1316, 1375, 1324, 1362, 1311, 1349, 1269, 1335, 1389, 1298, - 1345, 1390, 320, 246, 322, 202, 407, 491, 284, 0, - 0, 0, 0, 0, 194, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 236, 0, 0, 243, 0, 0, - 0, 346, 355, 354, 335, 336, 338, 340, 345, 352, - 358, 1294, 1342, 1384, 1295, 1344, 263, 318, 270, 262, - 563, 1395, 1374, 1258, 1323, 1383, 0, 0, 227, 1386, - 1318, 0, 1347, 0, 1401, 1253, 1338, 0, 1256, 1260, - 1397, 1379, 1289, 273, 0, 0, 0, 0, 0, 0, - 0, 1315, 1325, 1359, 1363, 1309, 0, 0, 0, 0, - 0, 0, 3139, 0, 1287, 0, 1334, 0, 0, 0, - 1265, 1257, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1313, 0, 0, 0, 0, 1268, - 0, 1288, 1360, 0, 1251, 295, 1262, 396, 255, 0, - 447, 1367, 1378, 1310, 605, 1382, 1308, 1307, 1354, 1266, - 1373, 1301, 360, 1264, 327, 197, 223, 0, 1299, 406, - 455, 467, 1372, 1284, 1293, 251, 1291, 465, 420, 583, - 231, 282, 452, 426, 463, 434, 285, 1333, 1352, 464, - 367, 568, 444, 580, 606, 607, 261, 400, 592, 505, - 600, 624, 224, 258, 414, 498, 586, 487, 392, 564, - 565, 326, 486, 293, 201, 364, 612, 222, 473, 366, - 240, 229, 570, 589, 287, 450, 619, 211, 500, 578, - 237, 477, 0, 0, 627, 245, 497, 213, 575, 496, - 388, 323, 324, 212, 0, 451, 266, 291, 0, 0, - 256, 409, 572, 573, 254, 628, 226, 599, 218, 1263, - 598, 402, 567, 576, 389, 378, 217, 574, 387, 377, - 331, 350, 351, 278, 304, 441, 370, 442, 303, 305, - 398, 397, 399, 205, 587, 0, 206, 0, 492, 588, - 629, 446, 210, 232, 233, 235, 1279, 277, 281, 289, - 292, 300, 301, 310, 362, 413, 440, 436, 445, 1368, - 562, 581, 593, 604, 610, 611, 613, 614, 615, 616, - 617, 620, 618, 401, 308, 488, 330, 368, 1357, 1400, - 419, 466, 238, 585, 489, 199, 1273, 1278, 1271, 0, - 252, 253, 1339, 558, 1274, 1272, 1328, 1329, 1275, 1391, - 1392, 1393, 1376, 630, 631, 632, 633, 634, 635, 636, - 637, 638, 639, 640, 641, 642, 643, 644, 645, 646, - 647, 625, 1361, 1267, 0, 1276, 1277, 1370, 1380, 1381, - 648, 379, 479, 582, 332, 344, 347, 337, 356, 0, - 357, 333, 334, 339, 341, 342, 343, 348, 349, 353, - 359, 247, 208, 385, 393, 561, 309, 214, 215, 216, - 507, 508, 509, 510, 596, 597, 601, 456, 457, 458, - 459, 290, 591, 306, 462, 461, 328, 329, 374, 443, - 523, 525, 536, 540, 542, 544, 550, 553, 524, 526, - 537, 541, 543, 545, 551, 554, 513, 515, 517, 519, - 532, 531, 528, 556, 557, 534, 539, 518, 530, 535, - 548, 555, 552, 512, 516, 520, 529, 547, 546, 527, - 538, 549, 533, 521, 514, 522, 1332, 196, 219, 363, - 1396, 448, 286, 626, 595, 590, 204, 221, 1270, 260, - 1282, 1290, 0, 1296, 1304, 1305, 1317, 1319, 1320, 1321, - 1322, 0, 1340, 1341, 1343, 1351, 1353, 1356, 1358, 1365, - 1377, 1399, 198, 200, 207, 220, 230, 234, 241, 259, - 274, 276, 283, 296, 307, 315, 316, 319, 325, 375, - 381, 382, 383, 384, 403, 404, 405, 408, 411, 412, - 415, 417, 418, 421, 425, 429, 430, 431, 433, 435, - 437, 449, 454, 468, 469, 470, 471, 472, 475, 476, - 481, 482, 483, 484, 485, 493, 494, 499, 569, 571, - 584, 602, 608, 474, 298, 299, 438, 439, 311, 312, - 622, 623, 297, 579, 609, 577, 621, 603, 432, 373, - 1331, 1337, 376, 279, 302, 317, 1346, 594, 495, 225, - 460, 288, 249, 1364, 1366, 209, 244, 228, 257, 272, - 275, 321, 386, 394, 423, 428, 294, 269, 242, 453, - 239, 478, 502, 503, 504, 506, 390, 264, 427, 1327, - 1355, 371, 559, 560, 313, 391, 0, 0, 0, 1385, - 1369, 511, 0, 1312, 1388, 1281, 1300, 1398, 1303, 1306, - 1348, 1259, 1326, 410, 1297, 1252, 1285, 1254, 1292, 1255, - 1283, 1314, 268, 1280, 1371, 1330, 1387, 361, 265, 1261, - 1286, 424, 1302, 203, 1350, 480, 250, 372, 369, 566, - 280, 271, 267, 248, 314, 380, 422, 501, 416, 1394, - 365, 1336, 0, 490, 395, 0, 0, 0, 1316, 1375, - 1324, 1362, 1311, 1349, 1269, 1335, 1389, 1298, 1345, 1390, - 320, 246, 322, 202, 407, 491, 284, 0, 0, 0, - 0, 0, 696, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 236, 0, 0, 243, 0, 0, 0, 346, - 355, 354, 335, 336, 338, 340, 345, 352, 358, 1294, - 1342, 1384, 1295, 1344, 263, 318, 270, 262, 563, 1395, - 1374, 1258, 1323, 1383, 0, 0, 227, 1386, 1318, 0, - 1347, 0, 1401, 1253, 1338, 0, 1256, 1260, 1397, 1379, - 1289, 273, 0, 0, 0, 0, 0, 0, 0, 1315, - 1325, 1359, 1363, 1309, 0, 0, 0, 0, 0, 0, - 3099, 0, 1287, 0, 1334, 0, 0, 0, 1265, 1257, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1313, 0, 0, 0, 0, 1268, 0, 1288, - 1360, 0, 1251, 295, 1262, 396, 255, 0, 447, 1367, - 1378, 1310, 605, 1382, 1308, 1307, 1354, 1266, 1373, 1301, - 360, 1264, 327, 197, 223, 0, 1299, 406, 455, 467, - 1372, 1284, 1293, 251, 1291, 465, 420, 583, 231, 282, - 452, 426, 463, 434, 285, 1333, 1352, 464, 367, 568, - 444, 580, 606, 607, 261, 400, 592, 505, 600, 624, - 224, 258, 414, 498, 586, 487, 392, 564, 565, 326, - 486, 293, 201, 364, 612, 222, 473, 366, 240, 229, - 570, 589, 287, 450, 619, 211, 500, 578, 237, 477, - 0, 0, 627, 245, 497, 213, 575, 496, 388, 323, - 324, 212, 0, 451, 266, 291, 0, 0, 256, 409, - 572, 573, 254, 628, 226, 599, 218, 1263, 598, 402, - 567, 576, 389, 378, 217, 574, 387, 377, 331, 350, - 351, 278, 304, 441, 370, 442, 303, 305, 398, 397, - 399, 205, 587, 0, 206, 0, 492, 588, 629, 446, - 210, 232, 233, 235, 1279, 277, 281, 289, 292, 300, - 301, 310, 362, 413, 440, 436, 445, 1368, 562, 581, - 593, 604, 610, 611, 613, 614, 615, 616, 617, 620, - 618, 401, 308, 488, 330, 368, 1357, 1400, 419, 466, - 238, 585, 489, 199, 1273, 1278, 1271, 0, 252, 253, - 1339, 558, 1274, 1272, 1328, 1329, 1275, 1391, 1392, 1393, - 1376, 630, 631, 632, 633, 634, 635, 636, 637, 638, - 639, 640, 641, 642, 643, 644, 645, 646, 647, 625, - 1361, 1267, 0, 1276, 1277, 1370, 1380, 1381, 648, 379, - 479, 582, 332, 344, 347, 337, 356, 0, 357, 333, - 334, 339, 341, 342, 343, 348, 349, 353, 359, 247, - 208, 385, 393, 561, 309, 214, 215, 216, 507, 508, - 509, 510, 596, 597, 601, 456, 457, 458, 459, 290, - 591, 306, 462, 461, 328, 329, 374, 443, 523, 525, - 536, 540, 542, 544, 550, 553, 524, 526, 537, 541, - 543, 545, 551, 554, 513, 515, 517, 519, 532, 531, - 528, 556, 557, 534, 539, 518, 530, 535, 548, 555, - 552, 512, 516, 520, 529, 547, 546, 527, 538, 549, - 533, 521, 514, 522, 1332, 196, 219, 363, 1396, 448, - 286, 626, 595, 590, 204, 221, 1270, 260, 1282, 1290, - 0, 1296, 1304, 1305, 1317, 1319, 1320, 1321, 1322, 0, - 1340, 1341, 1343, 1351, 1353, 1356, 1358, 1365, 1377, 1399, - 198, 200, 207, 220, 230, 234, 241, 259, 274, 276, - 283, 296, 307, 315, 316, 319, 325, 375, 381, 382, - 383, 384, 403, 404, 405, 408, 411, 412, 415, 417, - 418, 421, 425, 429, 430, 431, 433, 435, 437, 449, - 454, 468, 469, 470, 471, 472, 475, 476, 481, 482, - 483, 484, 485, 493, 494, 499, 569, 571, 584, 602, - 608, 474, 298, 299, 438, 439, 311, 312, 622, 623, - 297, 579, 609, 577, 621, 603, 432, 373, 1331, 1337, - 376, 279, 302, 317, 1346, 594, 495, 225, 460, 288, - 249, 1364, 1366, 209, 244, 228, 257, 272, 275, 321, - 386, 394, 423, 428, 294, 269, 242, 453, 239, 478, - 502, 503, 504, 506, 390, 264, 427, 1327, 1355, 371, - 559, 560, 313, 391, 0, 0, 0, 1385, 1369, 511, - 0, 1312, 1388, 1281, 1300, 1398, 1303, 1306, 1348, 1259, - 1326, 410, 1297, 1252, 1285, 1254, 1292, 1255, 1283, 1314, - 268, 1280, 1371, 1330, 1387, 361, 265, 1261, 1286, 424, - 1302, 203, 1350, 480, 250, 372, 369, 566, 280, 271, - 267, 248, 314, 380, 422, 501, 416, 1394, 365, 1336, - 0, 490, 395, 0, 0, 0, 1316, 1375, 1324, 1362, - 1311, 1349, 1269, 1335, 1389, 1298, 1345, 1390, 320, 246, - 322, 202, 407, 491, 284, 0, 0, 0, 0, 0, - 929, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 236, 0, 0, 243, 0, 0, 0, 346, 355, 354, - 335, 336, 338, 340, 345, 352, 358, 1294, 1342, 1384, - 1295, 1344, 263, 318, 270, 262, 563, 1395, 1374, 1258, - 1323, 1383, 0, 0, 227, 1386, 1318, 0, 1347, 0, - 1401, 1253, 1338, 0, 1256, 1260, 1397, 1379, 1289, 273, - 0, 0, 0, 0, 0, 0, 0, 1315, 1325, 1359, - 1363, 1309, 0, 0, 0, 0, 0, 0, 2319, 0, - 1287, 0, 1334, 0, 0, 0, 1265, 1257, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1313, 0, 0, 0, 0, 1268, 0, 1288, 1360, 0, - 1251, 295, 1262, 396, 255, 0, 447, 1367, 1378, 1310, - 605, 1382, 1308, 1307, 1354, 1266, 1373, 1301, 360, 1264, - 327, 197, 223, 0, 1299, 406, 455, 467, 1372, 1284, - 1293, 251, 1291, 465, 420, 583, 231, 282, 452, 426, - 463, 434, 285, 1333, 1352, 464, 367, 568, 444, 580, - 606, 607, 261, 400, 592, 505, 600, 624, 224, 258, - 414, 498, 586, 487, 392, 564, 565, 326, 486, 293, - 201, 364, 612, 222, 473, 366, 240, 229, 570, 589, - 287, 450, 619, 211, 500, 578, 237, 477, 0, 0, - 627, 245, 497, 213, 575, 496, 388, 323, 324, 212, - 0, 451, 266, 291, 0, 0, 256, 409, 572, 573, - 254, 628, 226, 599, 218, 1263, 598, 402, 567, 576, - 389, 378, 217, 574, 387, 377, 331, 350, 351, 278, - 304, 441, 370, 442, 303, 305, 398, 397, 399, 205, - 587, 0, 206, 0, 492, 588, 629, 446, 210, 232, - 233, 235, 1279, 277, 281, 289, 292, 300, 301, 310, - 362, 413, 440, 436, 445, 1368, 562, 581, 593, 604, - 610, 611, 613, 614, 615, 616, 617, 620, 618, 401, - 308, 488, 330, 368, 1357, 1400, 419, 466, 238, 585, - 489, 199, 1273, 1278, 1271, 0, 252, 253, 1339, 558, - 1274, 1272, 1328, 1329, 1275, 1391, 1392, 1393, 1376, 630, - 631, 632, 633, 634, 635, 636, 637, 638, 639, 640, - 641, 642, 643, 644, 645, 646, 647, 625, 1361, 1267, - 0, 1276, 1277, 1370, 1380, 1381, 648, 379, 479, 582, - 332, 344, 347, 337, 356, 0, 357, 333, 334, 339, - 341, 342, 343, 348, 349, 353, 359, 247, 208, 385, - 393, 561, 309, 214, 215, 216, 507, 508, 509, 510, - 596, 597, 601, 456, 457, 458, 459, 290, 591, 306, - 462, 461, 328, 329, 374, 443, 523, 525, 536, 540, - 542, 544, 550, 553, 524, 526, 537, 541, 543, 545, - 551, 554, 513, 515, 517, 519, 532, 531, 528, 556, - 557, 534, 539, 518, 530, 535, 548, 555, 552, 512, - 516, 520, 529, 547, 546, 527, 538, 549, 533, 521, - 514, 522, 1332, 196, 219, 363, 1396, 448, 286, 626, - 595, 590, 204, 221, 1270, 260, 1282, 1290, 0, 1296, - 1304, 1305, 1317, 1319, 1320, 1321, 1322, 0, 1340, 1341, - 1343, 1351, 1353, 1356, 1358, 1365, 1377, 1399, 198, 200, - 207, 220, 230, 234, 241, 259, 274, 276, 283, 296, - 307, 315, 316, 319, 325, 375, 381, 382, 383, 384, - 403, 404, 405, 408, 411, 412, 415, 417, 418, 421, - 425, 429, 430, 431, 433, 435, 437, 449, 454, 468, - 469, 470, 471, 472, 475, 476, 481, 482, 483, 484, - 485, 493, 494, 499, 569, 571, 584, 602, 608, 474, - 298, 299, 438, 439, 311, 312, 622, 623, 297, 579, - 609, 577, 621, 603, 432, 373, 1331, 1337, 376, 279, - 302, 317, 1346, 594, 495, 225, 460, 288, 249, 1364, - 1366, 209, 244, 228, 257, 272, 275, 321, 386, 394, - 423, 428, 294, 269, 242, 453, 239, 478, 502, 503, - 504, 506, 390, 264, 427, 1327, 1355, 371, 559, 560, - 313, 391, 0, 0, 0, 1385, 1369, 511, 0, 1312, - 1388, 1281, 1300, 1398, 1303, 1306, 1348, 1259, 1326, 410, - 1297, 1252, 1285, 1254, 1292, 1255, 1283, 1314, 268, 1280, - 1371, 1330, 1387, 361, 265, 1261, 1286, 424, 1302, 203, - 1350, 480, 250, 372, 369, 566, 280, 271, 267, 248, - 314, 380, 422, 501, 416, 1394, 365, 1336, 0, 490, - 395, 0, 0, 0, 1316, 1375, 1324, 1362, 1311, 1349, - 1269, 1335, 1389, 1298, 1345, 1390, 320, 246, 322, 202, - 407, 491, 284, 0, 94, 0, 0, 0, 696, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 236, 0, - 0, 243, 0, 0, 0, 346, 355, 354, 335, 336, - 338, 340, 345, 352, 358, 1294, 1342, 1384, 1295, 1344, - 263, 318, 270, 262, 563, 1395, 1374, 1258, 1323, 1383, - 0, 0, 227, 1386, 1318, 0, 1347, 0, 1401, 1253, - 1338, 0, 1256, 1260, 1397, 1379, 1289, 273, 0, 0, - 0, 0, 0, 0, 0, 1315, 1325, 1359, 1363, 1309, - 0, 0, 0, 0, 0, 0, 0, 0, 1287, 0, - 1334, 0, 0, 0, 1265, 1257, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 1313, 0, - 0, 0, 0, 1268, 0, 1288, 1360, 0, 1251, 295, - 1262, 396, 255, 0, 447, 1367, 1378, 1310, 605, 1382, - 1308, 1307, 1354, 1266, 1373, 1301, 360, 1264, 327, 197, - 223, 0, 1299, 406, 455, 467, 1372, 1284, 1293, 251, - 1291, 465, 420, 583, 231, 282, 452, 426, 463, 434, - 285, 1333, 1352, 464, 367, 568, 444, 580, 606, 607, - 261, 400, 592, 505, 600, 624, 224, 258, 414, 498, - 586, 487, 392, 564, 565, 326, 486, 293, 201, 364, - 612, 222, 473, 366, 240, 229, 570, 589, 287, 450, - 619, 211, 500, 578, 237, 477, 0, 0, 627, 245, - 497, 213, 575, 496, 388, 323, 324, 212, 0, 451, - 266, 291, 0, 0, 256, 409, 572, 573, 254, 628, - 226, 599, 218, 1263, 598, 402, 567, 576, 389, 378, - 217, 574, 387, 377, 331, 350, 351, 278, 304, 441, - 370, 442, 303, 305, 398, 397, 399, 205, 587, 0, - 206, 0, 492, 588, 629, 446, 210, 232, 233, 235, - 1279, 277, 281, 289, 292, 300, 301, 310, 362, 413, - 440, 436, 445, 1368, 562, 581, 593, 604, 610, 611, - 613, 614, 615, 616, 617, 620, 618, 401, 308, 488, - 330, 368, 1357, 1400, 419, 466, 238, 585, 489, 199, - 1273, 1278, 1271, 0, 252, 253, 1339, 558, 1274, 1272, - 1328, 1329, 1275, 1391, 1392, 1393, 1376, 630, 631, 632, - 633, 634, 635, 636, 637, 638, 639, 640, 641, 642, - 643, 644, 645, 646, 647, 625, 1361, 1267, 0, 1276, - 1277, 1370, 1380, 1381, 648, 379, 479, 582, 332, 344, - 347, 337, 356, 0, 357, 333, 334, 339, 341, 342, - 343, 348, 349, 353, 359, 247, 208, 385, 393, 561, - 309, 214, 215, 216, 507, 508, 509, 510, 596, 597, - 601, 456, 457, 458, 459, 290, 591, 306, 462, 461, - 328, 329, 374, 443, 523, 525, 536, 540, 542, 544, - 550, 553, 524, 526, 537, 541, 543, 545, 551, 554, - 513, 515, 517, 519, 532, 531, 528, 556, 557, 534, - 539, 518, 530, 535, 548, 555, 552, 512, 516, 520, - 529, 547, 546, 527, 538, 549, 533, 521, 514, 522, - 1332, 196, 219, 363, 1396, 448, 286, 626, 595, 590, - 204, 221, 1270, 260, 1282, 1290, 0, 1296, 1304, 1305, - 1317, 1319, 1320, 1321, 1322, 0, 1340, 1341, 1343, 1351, - 1353, 1356, 1358, 1365, 1377, 1399, 198, 200, 207, 220, - 230, 234, 241, 259, 274, 276, 283, 296, 307, 315, - 316, 319, 325, 375, 381, 382, 383, 384, 403, 404, - 405, 408, 411, 412, 415, 417, 418, 421, 425, 429, - 430, 431, 433, 435, 437, 449, 454, 468, 469, 470, - 471, 472, 475, 476, 481, 482, 483, 484, 485, 493, - 494, 499, 569, 571, 584, 602, 608, 474, 298, 299, - 438, 439, 311, 312, 622, 623, 297, 579, 609, 577, - 621, 603, 432, 373, 1331, 1337, 376, 279, 302, 317, - 1346, 594, 495, 225, 460, 288, 249, 1364, 1366, 209, - 244, 228, 257, 272, 275, 321, 386, 394, 423, 428, - 294, 269, 242, 453, 239, 478, 502, 503, 504, 506, - 390, 264, 427, 1327, 1355, 371, 559, 560, 313, 391, - 0, 0, 0, 1385, 1369, 511, 0, 1312, 1388, 1281, - 1300, 1398, 1303, 1306, 1348, 1259, 1326, 410, 1297, 1252, - 1285, 1254, 1292, 1255, 1283, 1314, 268, 1280, 1371, 1330, - 1387, 361, 265, 1261, 1286, 424, 1302, 203, 1350, 480, - 250, 372, 369, 566, 280, 271, 267, 248, 314, 380, - 422, 501, 416, 1394, 365, 1336, 0, 490, 395, 0, - 0, 0, 1316, 1375, 1324, 1362, 1311, 1349, 1269, 1335, - 1389, 1298, 1345, 1390, 320, 246, 322, 202, 407, 491, - 284, 0, 0, 0, 0, 0, 194, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 236, 0, 0, 243, - 0, 0, 0, 346, 355, 354, 335, 336, 338, 340, - 345, 352, 358, 1294, 1342, 1384, 1295, 1344, 263, 318, - 270, 262, 563, 1395, 1374, 1258, 1323, 1383, 0, 0, - 227, 1386, 1318, 0, 1347, 0, 1401, 1253, 1338, 0, - 1256, 1260, 1397, 1379, 1289, 273, 0, 0, 0, 0, - 0, 0, 0, 1315, 1325, 1359, 1363, 1309, 0, 0, - 0, 0, 0, 0, 0, 0, 1287, 0, 1334, 0, - 0, 0, 1265, 1257, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1313, 0, 0, 0, - 0, 1268, 0, 1288, 1360, 0, 1251, 295, 1262, 396, - 255, 0, 447, 1367, 1378, 1310, 605, 1382, 1308, 1307, - 1354, 1266, 1373, 1301, 360, 1264, 327, 197, 223, 0, - 1299, 406, 455, 467, 1372, 1284, 1293, 251, 1291, 465, - 420, 583, 231, 282, 452, 426, 463, 434, 285, 1333, - 1352, 464, 367, 568, 444, 580, 606, 607, 261, 400, - 592, 505, 600, 624, 224, 258, 414, 498, 586, 487, - 392, 564, 565, 326, 486, 293, 201, 364, 612, 222, - 473, 366, 240, 229, 570, 589, 287, 450, 619, 211, - 500, 578, 237, 477, 0, 0, 627, 245, 497, 213, - 575, 496, 388, 323, 324, 212, 0, 451, 266, 291, - 0, 0, 256, 409, 572, 573, 254, 628, 226, 599, - 218, 1263, 598, 402, 567, 576, 389, 378, 217, 574, - 387, 377, 331, 350, 351, 278, 304, 441, 370, 442, - 303, 305, 398, 397, 399, 205, 587, 0, 206, 0, - 492, 588, 629, 446, 210, 232, 233, 235, 1279, 277, - 281, 289, 292, 300, 301, 310, 362, 413, 440, 436, - 445, 1368, 562, 581, 593, 604, 610, 611, 613, 614, - 615, 616, 617, 620, 618, 401, 308, 488, 330, 368, - 1357, 1400, 419, 466, 238, 585, 489, 199, 1273, 1278, - 1271, 0, 252, 253, 1339, 558, 1274, 1272, 1328, 1329, - 1275, 1391, 1392, 1393, 1376, 630, 631, 632, 633, 634, - 635, 636, 637, 638, 639, 640, 641, 642, 643, 644, - 645, 646, 647, 625, 1361, 1267, 0, 1276, 1277, 1370, - 1380, 1381, 648, 379, 479, 582, 332, 344, 347, 337, - 356, 0, 357, 333, 334, 339, 341, 342, 343, 348, - 349, 353, 359, 247, 208, 385, 393, 561, 309, 214, - 215, 216, 507, 508, 509, 510, 596, 597, 601, 456, - 457, 458, 459, 290, 591, 306, 462, 461, 328, 329, - 374, 443, 523, 525, 536, 540, 542, 544, 550, 553, - 524, 526, 537, 541, 543, 545, 551, 554, 513, 515, - 517, 519, 532, 531, 528, 556, 557, 534, 539, 518, - 530, 535, 548, 555, 552, 512, 516, 520, 529, 547, - 546, 527, 538, 549, 533, 521, 514, 522, 1332, 196, - 219, 363, 1396, 448, 286, 626, 595, 590, 204, 221, - 1270, 260, 1282, 1290, 0, 1296, 1304, 1305, 1317, 1319, - 1320, 1321, 1322, 0, 1340, 1341, 1343, 1351, 1353, 1356, - 1358, 1365, 1377, 1399, 198, 200, 207, 220, 230, 234, - 241, 259, 274, 276, 283, 296, 307, 315, 316, 319, - 325, 375, 381, 382, 383, 384, 403, 404, 405, 408, - 411, 412, 415, 417, 418, 421, 425, 429, 430, 431, - 433, 435, 437, 449, 454, 468, 469, 470, 471, 472, - 475, 476, 481, 482, 483, 484, 485, 493, 494, 499, - 569, 571, 584, 602, 608, 474, 298, 299, 438, 439, - 311, 312, 622, 623, 297, 579, 609, 577, 621, 603, - 432, 373, 1331, 1337, 376, 279, 302, 317, 1346, 594, - 495, 225, 460, 288, 249, 1364, 1366, 209, 244, 228, - 257, 272, 275, 321, 386, 394, 423, 428, 294, 269, - 242, 453, 239, 478, 502, 503, 504, 506, 390, 264, - 427, 1327, 1355, 371, 559, 560, 313, 391, 0, 0, - 0, 1385, 1369, 511, 0, 1312, 1388, 1281, 1300, 1398, - 1303, 1306, 1348, 1259, 1326, 410, 1297, 1252, 1285, 1254, - 1292, 1255, 1283, 1314, 268, 1280, 1371, 1330, 1387, 361, - 265, 1261, 1286, 424, 1302, 203, 1350, 480, 250, 372, - 369, 566, 280, 271, 267, 248, 314, 380, 422, 501, - 416, 1394, 365, 1336, 0, 490, 395, 0, 0, 0, - 1316, 1375, 1324, 1362, 1311, 1349, 1269, 1335, 1389, 1298, - 1345, 1390, 320, 246, 322, 202, 407, 491, 284, 0, - 0, 0, 0, 0, 696, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 236, 0, 0, 243, 0, 0, - 0, 346, 355, 354, 335, 336, 338, 340, 345, 352, - 358, 1294, 1342, 1384, 1295, 1344, 263, 318, 270, 262, - 563, 1395, 1374, 1258, 1323, 1383, 0, 0, 227, 1386, - 1318, 0, 1347, 0, 1401, 1253, 1338, 0, 1256, 1260, - 1397, 1379, 1289, 273, 0, 0, 0, 0, 0, 0, - 0, 1315, 1325, 1359, 1363, 1309, 0, 0, 0, 0, - 0, 0, 0, 0, 1287, 0, 1334, 0, 0, 0, - 1265, 1257, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1313, 0, 0, 0, 0, 1268, - 0, 1288, 1360, 0, 1251, 295, 1262, 396, 255, 0, - 447, 1367, 1378, 1310, 605, 1382, 1308, 1307, 1354, 1266, - 1373, 1301, 360, 1264, 327, 197, 223, 0, 1299, 406, - 455, 467, 1372, 1284, 1293, 251, 1291, 465, 420, 583, - 231, 282, 452, 426, 463, 434, 285, 1333, 1352, 464, - 367, 568, 444, 580, 606, 607, 261, 400, 592, 505, - 600, 624, 224, 258, 414, 498, 586, 487, 392, 564, - 565, 326, 486, 293, 201, 364, 612, 222, 473, 366, - 240, 229, 570, 589, 287, 450, 619, 211, 500, 578, - 237, 477, 0, 0, 627, 245, 497, 213, 575, 496, - 388, 323, 324, 212, 0, 451, 266, 291, 0, 0, - 256, 409, 572, 573, 254, 628, 226, 599, 218, 1263, - 598, 402, 567, 576, 389, 378, 217, 574, 387, 377, - 331, 350, 351, 278, 304, 441, 370, 442, 303, 305, - 398, 397, 399, 205, 587, 0, 206, 0, 492, 588, - 629, 446, 210, 232, 233, 235, 1279, 277, 281, 289, - 292, 300, 301, 310, 362, 413, 440, 436, 445, 1368, - 562, 581, 593, 604, 610, 611, 613, 614, 615, 616, - 617, 620, 618, 401, 308, 488, 330, 368, 1357, 1400, - 419, 466, 238, 585, 489, 199, 1273, 1278, 1271, 0, - 252, 253, 1339, 558, 1274, 1272, 1328, 1329, 1275, 1391, - 1392, 1393, 1376, 630, 631, 632, 633, 634, 635, 636, - 637, 638, 639, 640, 641, 642, 643, 644, 645, 646, - 647, 625, 1361, 1267, 0, 1276, 1277, 1370, 1380, 1381, - 648, 379, 479, 582, 332, 344, 347, 337, 356, 0, - 357, 333, 334, 339, 341, 342, 343, 348, 349, 353, - 359, 247, 208, 385, 393, 561, 309, 214, 215, 216, - 507, 508, 509, 510, 596, 597, 601, 456, 457, 458, - 459, 290, 591, 306, 462, 461, 328, 329, 374, 443, - 523, 525, 536, 540, 542, 544, 550, 553, 524, 526, - 537, 541, 543, 545, 551, 554, 513, 515, 517, 519, - 532, 531, 528, 556, 557, 534, 539, 518, 530, 535, - 548, 555, 552, 512, 516, 520, 529, 547, 546, 527, - 538, 549, 533, 521, 514, 522, 1332, 196, 219, 363, - 1396, 448, 286, 626, 595, 590, 204, 221, 1270, 260, - 1282, 1290, 0, 1296, 1304, 1305, 1317, 1319, 1320, 1321, - 1322, 0, 1340, 1341, 1343, 1351, 1353, 1356, 1358, 1365, - 1377, 1399, 198, 200, 207, 220, 230, 234, 241, 259, - 274, 276, 283, 296, 307, 315, 316, 319, 325, 375, - 381, 382, 383, 384, 403, 404, 405, 408, 411, 412, - 415, 417, 418, 421, 425, 429, 430, 431, 433, 435, - 437, 449, 454, 468, 469, 470, 471, 472, 475, 476, - 481, 482, 483, 484, 485, 493, 494, 499, 569, 571, - 584, 602, 608, 474, 298, 299, 438, 439, 311, 312, - 622, 623, 297, 579, 609, 577, 621, 603, 432, 373, - 1331, 1337, 376, 279, 302, 317, 1346, 594, 495, 225, - 460, 288, 249, 1364, 1366, 209, 244, 228, 257, 272, - 275, 321, 386, 394, 423, 428, 294, 269, 242, 453, - 239, 478, 502, 503, 504, 506, 390, 264, 427, 1327, - 1355, 371, 559, 560, 313, 391, 0, 0, 0, 1385, - 1369, 511, 0, 1312, 1388, 1281, 1300, 1398, 1303, 1306, - 1348, 1259, 1326, 410, 1297, 1252, 1285, 1254, 1292, 1255, - 1283, 1314, 268, 1280, 1371, 1330, 1387, 361, 265, 1261, - 1286, 424, 1302, 203, 1350, 480, 250, 372, 369, 566, - 280, 271, 267, 248, 314, 380, 422, 501, 416, 1394, - 365, 1336, 0, 490, 395, 0, 0, 0, 1316, 1375, - 1324, 1362, 1311, 1349, 1269, 1335, 1389, 1298, 1345, 1390, - 320, 246, 322, 202, 407, 491, 284, 0, 0, 0, - 0, 0, 929, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 236, 0, 0, 243, 0, 0, 0, 346, - 355, 354, 335, 336, 338, 340, 345, 352, 358, 1294, - 1342, 1384, 1295, 1344, 263, 318, 270, 262, 563, 1395, - 1374, 1258, 1323, 1383, 0, 0, 227, 1386, 1318, 0, - 1347, 0, 1401, 1253, 1338, 0, 1256, 1260, 1397, 1379, - 1289, 273, 0, 0, 0, 0, 0, 0, 0, 1315, - 1325, 1359, 1363, 1309, 0, 0, 0, 0, 0, 0, - 0, 0, 1287, 0, 1334, 0, 0, 0, 1265, 1257, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1313, 0, 0, 0, 0, 1268, 0, 1288, - 1360, 0, 1251, 295, 1262, 396, 255, 0, 447, 1367, - 1378, 1310, 605, 1382, 1308, 1307, 1354, 1266, 1373, 1301, - 360, 1264, 327, 197, 223, 0, 1299, 406, 455, 467, - 1372, 1284, 1293, 251, 1291, 465, 420, 583, 231, 282, - 452, 426, 463, 434, 285, 1333, 1352, 464, 367, 568, - 444, 580, 606, 607, 261, 400, 592, 505, 600, 624, - 224, 258, 414, 498, 586, 487, 392, 564, 565, 326, - 486, 293, 201, 364, 612, 222, 473, 366, 240, 229, - 570, 589, 287, 450, 619, 211, 500, 578, 237, 477, - 0, 0, 627, 245, 497, 213, 575, 496, 388, 323, - 324, 212, 0, 451, 266, 291, 0, 0, 256, 409, - 572, 573, 254, 628, 226, 599, 218, 1263, 598, 402, - 567, 576, 389, 378, 217, 574, 387, 377, 331, 350, - 351, 278, 304, 441, 370, 442, 303, 305, 398, 397, - 399, 205, 587, 0, 206, 0, 492, 588, 629, 446, - 210, 232, 233, 235, 1279, 277, 281, 289, 292, 300, - 301, 310, 362, 413, 440, 436, 445, 1368, 562, 581, - 593, 604, 610, 611, 613, 614, 615, 616, 617, 620, - 618, 401, 308, 488, 330, 368, 1357, 1400, 419, 466, - 238, 585, 489, 199, 1273, 1278, 1271, 0, 252, 253, - 1339, 558, 1274, 1272, 1328, 1329, 1275, 1391, 1392, 1393, - 1376, 630, 631, 632, 633, 634, 635, 636, 637, 638, - 639, 640, 641, 642, 643, 644, 645, 646, 647, 625, - 1361, 1267, 0, 1276, 1277, 1370, 1380, 1381, 648, 379, - 479, 582, 332, 344, 347, 337, 356, 0, 357, 333, - 334, 339, 341, 342, 343, 348, 349, 353, 359, 247, - 208, 385, 393, 561, 309, 214, 215, 216, 507, 508, - 509, 510, 596, 597, 601, 456, 457, 458, 459, 290, - 591, 306, 462, 461, 328, 329, 374, 443, 523, 525, - 536, 540, 542, 544, 550, 553, 524, 526, 537, 541, - 543, 545, 551, 554, 513, 515, 517, 519, 532, 531, - 528, 556, 557, 534, 539, 518, 530, 535, 548, 555, - 552, 512, 516, 520, 529, 547, 546, 527, 538, 549, - 533, 521, 514, 522, 1332, 196, 219, 363, 1396, 448, - 286, 626, 595, 590, 204, 221, 1270, 260, 1282, 1290, - 0, 1296, 1304, 1305, 1317, 1319, 1320, 1321, 1322, 0, - 1340, 1341, 1343, 1351, 1353, 1356, 1358, 1365, 1377, 1399, - 198, 200, 207, 220, 230, 234, 241, 259, 274, 276, - 283, 296, 307, 315, 316, 319, 325, 375, 381, 382, - 383, 384, 403, 404, 405, 408, 411, 412, 415, 417, - 418, 421, 425, 429, 430, 431, 433, 435, 437, 449, - 454, 468, 469, 470, 471, 472, 475, 476, 481, 482, - 483, 484, 485, 493, 494, 499, 569, 571, 584, 602, - 608, 474, 298, 299, 438, 439, 311, 312, 622, 623, - 297, 579, 609, 577, 621, 603, 432, 373, 1331, 1337, - 376, 279, 302, 317, 1346, 594, 495, 225, 460, 288, - 249, 1364, 1366, 209, 244, 228, 257, 272, 275, 321, - 386, 394, 423, 428, 294, 269, 242, 453, 239, 478, - 502, 503, 504, 506, 390, 264, 427, 1327, 1355, 371, - 559, 560, 313, 391, 0, 0, 0, 0, 0, 511, - 0, 750, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 410, 0, 0, 0, 0, 738, 0, 0, 0, - 268, 743, 0, 0, 0, 361, 265, 0, 0, 424, - 0, 203, 0, 480, 250, 372, 369, 566, 280, 271, - 267, 248, 314, 380, 422, 501, 416, 749, 365, 0, - 0, 490, 395, 0, 0, 0, 0, 0, 745, 746, - 0, 0, 0, 0, 0, 0, 0, 0, 320, 246, - 322, 202, 407, 491, 284, 0, 94, 0, 0, 945, - 929, 722, 895, 933, 946, 947, 948, 949, 934, 0, - 236, 935, 936, 243, 937, 0, 894, 779, 781, 780, - 844, 845, 846, 847, 848, 849, 850, 777, 942, 950, - 951, 0, 263, 318, 270, 262, 563, 0, 0, 2142, - 2143, 2144, 0, 0, 227, 0, 0, 0, 0, 0, - 0, 0, 718, 735, 0, 748, 0, 0, 0, 273, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 732, 733, 0, 0, - 0, 0, 889, 0, 734, 0, 0, 742, 952, 953, - 954, 955, 956, 957, 958, 959, 960, 961, 962, 963, - 964, 965, 966, 967, 968, 969, 970, 971, 972, 973, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1330, 0, 0, 0, 0, 1285, 0, 1305, + 1378, 0, 1268, 298, 1279, 399, 258, 0, 450, 1385, + 1396, 1327, 618, 1398, 1325, 1324, 1372, 1283, 1391, 1318, + 363, 1281, 330, 199, 226, 0, 1316, 409, 458, 470, + 1390, 1301, 1310, 254, 1308, 468, 423, 596, 234, 285, + 455, 429, 466, 437, 288, 1351, 1370, 467, 370, 579, + 447, 593, 619, 620, 264, 403, 605, 516, 613, 637, + 227, 261, 417, 501, 599, 490, 395, 575, 576, 329, + 489, 296, 203, 367, 625, 225, 476, 369, 243, 232, + 581, 602, 290, 453, 632, 214, 511, 591, 240, 480, + 0, 0, 640, 248, 500, 216, 588, 499, 391, 326, + 327, 215, 0, 454, 269, 294, 0, 0, 259, 412, + 583, 584, 257, 641, 229, 612, 221, 1280, 611, 405, + 578, 589, 392, 381, 220, 587, 390, 380, 334, 353, + 354, 281, 307, 444, 373, 445, 306, 308, 401, 400, + 402, 208, 600, 0, 209, 0, 495, 601, 642, 449, + 213, 235, 236, 238, 1296, 280, 284, 292, 295, 303, + 304, 313, 365, 416, 443, 439, 448, 1386, 573, 594, + 606, 617, 623, 624, 626, 627, 628, 629, 630, 633, + 631, 404, 311, 491, 333, 371, 1375, 1416, 422, 469, + 241, 598, 492, 201, 1290, 1295, 1288, 0, 255, 256, + 1357, 569, 1291, 1289, 1346, 1347, 1292, 1407, 1408, 1409, + 1394, 643, 644, 645, 646, 647, 648, 649, 650, 651, + 652, 653, 654, 655, 656, 657, 658, 659, 660, 638, + 502, 508, 503, 504, 505, 506, 507, 0, 509, 1379, + 1284, 0, 1293, 1294, 1388, 585, 586, 661, 382, 482, + 595, 335, 347, 350, 340, 359, 0, 360, 336, 337, + 342, 344, 345, 346, 351, 352, 356, 362, 250, 211, + 388, 396, 572, 312, 217, 218, 219, 518, 519, 520, + 521, 609, 610, 614, 206, 459, 460, 461, 462, 293, + 604, 309, 465, 464, 331, 332, 377, 446, 534, 536, + 547, 551, 553, 555, 561, 564, 535, 537, 548, 552, + 554, 556, 562, 565, 524, 526, 528, 530, 543, 542, + 539, 567, 568, 545, 550, 529, 541, 546, 559, 566, + 563, 523, 527, 531, 540, 558, 557, 538, 549, 560, + 544, 532, 525, 533, 1350, 198, 222, 366, 1412, 451, + 289, 639, 608, 603, 207, 224, 1287, 263, 1299, 1307, + 0, 1313, 1321, 1322, 1334, 1337, 1338, 1339, 1340, 0, + 1358, 1359, 1361, 1369, 1371, 1374, 1376, 1383, 1395, 1415, + 200, 202, 210, 223, 233, 237, 244, 262, 277, 279, + 286, 299, 310, 318, 319, 322, 328, 378, 384, 385, + 386, 387, 406, 407, 408, 411, 414, 415, 418, 420, + 421, 424, 428, 432, 433, 434, 436, 438, 440, 452, + 457, 471, 472, 473, 474, 475, 478, 479, 484, 485, + 486, 487, 488, 496, 497, 510, 580, 582, 597, 615, + 621, 477, 301, 302, 441, 442, 314, 315, 635, 636, + 300, 592, 622, 590, 634, 616, 435, 376, 1349, 1355, + 379, 282, 305, 320, 1364, 607, 498, 228, 463, 291, + 252, 1382, 1384, 212, 247, 231, 260, 275, 278, 324, + 389, 397, 426, 431, 297, 272, 245, 456, 242, 481, + 513, 514, 515, 517, 393, 267, 430, 1345, 1373, 374, + 570, 571, 316, 394, 0, 0, 0, 1401, 1387, 522, + 0, 1329, 1404, 1298, 1317, 1414, 1320, 1323, 1366, 1276, + 1344, 413, 1314, 1269, 1302, 1271, 1309, 1272, 1300, 1331, + 271, 1297, 1389, 1348, 1403, 364, 268, 1278, 1303, 427, + 1319, 205, 1368, 483, 253, 375, 372, 577, 283, 274, + 270, 251, 317, 383, 425, 512, 419, 1410, 368, 1354, + 0, 493, 398, 0, 0, 0, 1333, 1393, 1342, 1380, + 1328, 1367, 1286, 1353, 1405, 1315, 1363, 1406, 323, 249, + 325, 204, 410, 494, 287, 0, 0, 0, 0, 0, + 196, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 239, 0, 0, 246, 0, 0, 0, 349, 358, 357, + 338, 339, 341, 343, 348, 355, 361, 1311, 1360, 1400, + 1312, 1362, 266, 321, 273, 265, 574, 1411, 1392, 1275, + 1341, 1399, 1336, 0, 0, 230, 1402, 1335, 0, 1365, + 0, 1417, 1270, 1356, 0, 1273, 1277, 1413, 1397, 1306, + 276, 0, 0, 0, 0, 0, 0, 0, 1332, 1343, + 1377, 1381, 1326, 0, 0, 0, 0, 0, 0, 3180, + 0, 1304, 0, 1352, 0, 0, 0, 1282, 1274, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1330, 0, 0, 0, 0, 1285, 0, 1305, 1378, + 0, 1268, 298, 1279, 399, 258, 0, 450, 1385, 1396, + 1327, 618, 1398, 1325, 1324, 1372, 1283, 1391, 1318, 363, + 1281, 330, 199, 226, 0, 1316, 409, 458, 470, 1390, + 1301, 1310, 254, 1308, 468, 423, 596, 234, 285, 455, + 429, 466, 437, 288, 1351, 1370, 467, 370, 579, 447, + 593, 619, 620, 264, 403, 605, 516, 613, 637, 227, + 261, 417, 501, 599, 490, 395, 575, 576, 329, 489, + 296, 203, 367, 625, 225, 476, 369, 243, 232, 581, + 602, 290, 453, 632, 214, 511, 591, 240, 480, 0, + 0, 640, 248, 500, 216, 588, 499, 391, 326, 327, + 215, 0, 454, 269, 294, 0, 0, 259, 412, 583, + 584, 257, 641, 229, 612, 221, 1280, 611, 405, 578, + 589, 392, 381, 220, 587, 390, 380, 334, 353, 354, + 281, 307, 444, 373, 445, 306, 308, 401, 400, 402, + 208, 600, 0, 209, 0, 495, 601, 642, 449, 213, + 235, 236, 238, 1296, 280, 284, 292, 295, 303, 304, + 313, 365, 416, 443, 439, 448, 1386, 573, 594, 606, + 617, 623, 624, 626, 627, 628, 629, 630, 633, 631, + 404, 311, 491, 333, 371, 1375, 1416, 422, 469, 241, + 598, 492, 201, 1290, 1295, 1288, 0, 255, 256, 1357, + 569, 1291, 1289, 1346, 1347, 1292, 1407, 1408, 1409, 1394, + 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, + 653, 654, 655, 656, 657, 658, 659, 660, 638, 502, + 508, 503, 504, 505, 506, 507, 0, 509, 1379, 1284, + 0, 1293, 1294, 1388, 585, 586, 661, 382, 482, 595, + 335, 347, 350, 340, 359, 0, 360, 336, 337, 342, + 344, 345, 346, 351, 352, 356, 362, 250, 211, 388, + 396, 572, 312, 217, 218, 219, 518, 519, 520, 521, + 609, 610, 614, 206, 459, 460, 461, 462, 293, 604, + 309, 465, 464, 331, 332, 377, 446, 534, 536, 547, + 551, 553, 555, 561, 564, 535, 537, 548, 552, 554, + 556, 562, 565, 524, 526, 528, 530, 543, 542, 539, + 567, 568, 545, 550, 529, 541, 546, 559, 566, 563, + 523, 527, 531, 540, 558, 557, 538, 549, 560, 544, + 532, 525, 533, 1350, 198, 222, 366, 1412, 451, 289, + 639, 608, 603, 207, 224, 1287, 263, 1299, 1307, 0, + 1313, 1321, 1322, 1334, 1337, 1338, 1339, 1340, 0, 1358, + 1359, 1361, 1369, 1371, 1374, 1376, 1383, 1395, 1415, 200, + 202, 210, 223, 233, 237, 244, 262, 277, 279, 286, + 299, 310, 318, 319, 322, 328, 378, 384, 385, 386, + 387, 406, 407, 408, 411, 414, 415, 418, 420, 421, + 424, 428, 432, 433, 434, 436, 438, 440, 452, 457, + 471, 472, 473, 474, 475, 478, 479, 484, 485, 486, + 487, 488, 496, 497, 510, 580, 582, 597, 615, 621, + 477, 301, 302, 441, 442, 314, 315, 635, 636, 300, + 592, 622, 590, 634, 616, 435, 376, 1349, 1355, 379, + 282, 305, 320, 1364, 607, 498, 228, 463, 291, 252, + 1382, 1384, 212, 247, 231, 260, 275, 278, 324, 389, + 397, 426, 431, 297, 272, 245, 456, 242, 481, 513, + 514, 515, 517, 393, 267, 430, 1345, 1373, 374, 570, + 571, 316, 394, 0, 0, 0, 1401, 1387, 522, 0, + 1329, 1404, 1298, 1317, 1414, 1320, 1323, 1366, 1276, 1344, + 413, 1314, 1269, 1302, 1271, 1309, 1272, 1300, 1331, 271, + 1297, 1389, 1348, 1403, 364, 268, 1278, 1303, 427, 1319, + 205, 1368, 483, 253, 375, 372, 577, 283, 274, 270, + 251, 317, 383, 425, 512, 419, 1410, 368, 1354, 0, + 493, 398, 0, 0, 0, 1333, 1393, 1342, 1380, 1328, + 1367, 1286, 1353, 1405, 1315, 1363, 1406, 323, 249, 325, + 204, 410, 494, 287, 0, 0, 0, 0, 0, 711, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 239, + 0, 0, 246, 0, 0, 0, 349, 358, 357, 338, + 339, 341, 343, 348, 355, 361, 1311, 1360, 1400, 1312, + 1362, 266, 321, 273, 265, 574, 1411, 1392, 1275, 1341, + 1399, 1336, 0, 0, 230, 1402, 1335, 0, 1365, 0, + 1417, 1270, 1356, 0, 1273, 1277, 1413, 1397, 1306, 276, + 0, 0, 0, 0, 0, 0, 0, 1332, 1343, 1377, + 1381, 1326, 0, 0, 0, 0, 0, 0, 3140, 0, + 1304, 0, 1352, 0, 0, 0, 1282, 1274, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1330, 0, 0, 0, 0, 1285, 0, 1305, 1378, 0, + 1268, 298, 1279, 399, 258, 0, 450, 1385, 1396, 1327, + 618, 1398, 1325, 1324, 1372, 1283, 1391, 1318, 363, 1281, + 330, 199, 226, 0, 1316, 409, 458, 470, 1390, 1301, + 1310, 254, 1308, 468, 423, 596, 234, 285, 455, 429, + 466, 437, 288, 1351, 1370, 467, 370, 579, 447, 593, + 619, 620, 264, 403, 605, 516, 613, 637, 227, 261, + 417, 501, 599, 490, 395, 575, 576, 329, 489, 296, + 203, 367, 625, 225, 476, 369, 243, 232, 581, 602, + 290, 453, 632, 214, 511, 591, 240, 480, 0, 0, + 640, 248, 500, 216, 588, 499, 391, 326, 327, 215, + 0, 454, 269, 294, 0, 0, 259, 412, 583, 584, + 257, 641, 229, 612, 221, 1280, 611, 405, 578, 589, + 392, 381, 220, 587, 390, 380, 334, 353, 354, 281, + 307, 444, 373, 445, 306, 308, 401, 400, 402, 208, + 600, 0, 209, 0, 495, 601, 642, 449, 213, 235, + 236, 238, 1296, 280, 284, 292, 295, 303, 304, 313, + 365, 416, 443, 439, 448, 1386, 573, 594, 606, 617, + 623, 624, 626, 627, 628, 629, 630, 633, 631, 404, + 311, 491, 333, 371, 1375, 1416, 422, 469, 241, 598, + 492, 201, 1290, 1295, 1288, 0, 255, 256, 1357, 569, + 1291, 1289, 1346, 1347, 1292, 1407, 1408, 1409, 1394, 643, + 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, + 654, 655, 656, 657, 658, 659, 660, 638, 502, 508, + 503, 504, 505, 506, 507, 0, 509, 1379, 1284, 0, + 1293, 1294, 1388, 585, 586, 661, 382, 482, 595, 335, + 347, 350, 340, 359, 0, 360, 336, 337, 342, 344, + 345, 346, 351, 352, 356, 362, 250, 211, 388, 396, + 572, 312, 217, 218, 219, 518, 519, 520, 521, 609, + 610, 614, 206, 459, 460, 461, 462, 293, 604, 309, + 465, 464, 331, 332, 377, 446, 534, 536, 547, 551, + 553, 555, 561, 564, 535, 537, 548, 552, 554, 556, + 562, 565, 524, 526, 528, 530, 543, 542, 539, 567, + 568, 545, 550, 529, 541, 546, 559, 566, 563, 523, + 527, 531, 540, 558, 557, 538, 549, 560, 544, 532, + 525, 533, 1350, 198, 222, 366, 1412, 451, 289, 639, + 608, 603, 207, 224, 1287, 263, 1299, 1307, 0, 1313, + 1321, 1322, 1334, 1337, 1338, 1339, 1340, 0, 1358, 1359, + 1361, 1369, 1371, 1374, 1376, 1383, 1395, 1415, 200, 202, + 210, 223, 233, 237, 244, 262, 277, 279, 286, 299, + 310, 318, 319, 322, 328, 378, 384, 385, 386, 387, + 406, 407, 408, 411, 414, 415, 418, 420, 421, 424, + 428, 432, 433, 434, 436, 438, 440, 452, 457, 471, + 472, 473, 474, 475, 478, 479, 484, 485, 486, 487, + 488, 496, 497, 510, 580, 582, 597, 615, 621, 477, + 301, 302, 441, 442, 314, 315, 635, 636, 300, 592, + 622, 590, 634, 616, 435, 376, 1349, 1355, 379, 282, + 305, 320, 1364, 607, 498, 228, 463, 291, 252, 1382, + 1384, 212, 247, 231, 260, 275, 278, 324, 389, 397, + 426, 431, 297, 272, 245, 456, 242, 481, 513, 514, + 515, 517, 393, 267, 430, 1345, 1373, 374, 570, 571, + 316, 394, 0, 0, 0, 1401, 1387, 522, 0, 1329, + 1404, 1298, 1317, 1414, 1320, 1323, 1366, 1276, 1344, 413, + 1314, 1269, 1302, 1271, 1309, 1272, 1300, 1331, 271, 1297, + 1389, 1348, 1403, 364, 268, 1278, 1303, 427, 1319, 205, + 1368, 483, 253, 375, 372, 577, 283, 274, 270, 251, + 317, 383, 425, 512, 419, 1410, 368, 1354, 0, 493, + 398, 0, 0, 0, 1333, 1393, 1342, 1380, 1328, 1367, + 1286, 1353, 1405, 1315, 1363, 1406, 323, 249, 325, 204, + 410, 494, 287, 0, 0, 0, 0, 0, 943, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 239, 0, + 0, 246, 0, 0, 0, 349, 358, 357, 338, 339, + 341, 343, 348, 355, 361, 1311, 1360, 1400, 1312, 1362, + 266, 321, 273, 265, 574, 1411, 1392, 1275, 1341, 1399, + 1336, 0, 0, 230, 1402, 1335, 0, 1365, 0, 1417, + 1270, 1356, 0, 1273, 1277, 1413, 1397, 1306, 276, 0, + 0, 0, 0, 0, 0, 0, 1332, 1343, 1377, 1381, + 1326, 0, 0, 0, 0, 0, 0, 2357, 0, 1304, + 0, 1352, 0, 0, 0, 1282, 1274, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1330, + 0, 0, 0, 0, 1285, 0, 1305, 1378, 0, 1268, + 298, 1279, 399, 258, 0, 450, 1385, 1396, 1327, 618, + 1398, 1325, 1324, 1372, 1283, 1391, 1318, 363, 1281, 330, + 199, 226, 0, 1316, 409, 458, 470, 1390, 1301, 1310, + 254, 1308, 468, 423, 596, 234, 285, 455, 429, 466, + 437, 288, 1351, 1370, 467, 370, 579, 447, 593, 619, + 620, 264, 403, 605, 516, 613, 637, 227, 261, 417, + 501, 599, 490, 395, 575, 576, 329, 489, 296, 203, + 367, 625, 225, 476, 369, 243, 232, 581, 602, 290, + 453, 632, 214, 511, 591, 240, 480, 0, 0, 640, + 248, 500, 216, 588, 499, 391, 326, 327, 215, 0, + 454, 269, 294, 0, 0, 259, 412, 583, 584, 257, + 641, 229, 612, 221, 1280, 611, 405, 578, 589, 392, + 381, 220, 587, 390, 380, 334, 353, 354, 281, 307, + 444, 373, 445, 306, 308, 401, 400, 402, 208, 600, + 0, 209, 0, 495, 601, 642, 449, 213, 235, 236, + 238, 1296, 280, 284, 292, 295, 303, 304, 313, 365, + 416, 443, 439, 448, 1386, 573, 594, 606, 617, 623, + 624, 626, 627, 628, 629, 630, 633, 631, 404, 311, + 491, 333, 371, 1375, 1416, 422, 469, 241, 598, 492, + 201, 1290, 1295, 1288, 0, 255, 256, 1357, 569, 1291, + 1289, 1346, 1347, 1292, 1407, 1408, 1409, 1394, 643, 644, + 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, + 655, 656, 657, 658, 659, 660, 638, 502, 508, 503, + 504, 505, 506, 507, 0, 509, 1379, 1284, 0, 1293, + 1294, 1388, 585, 586, 661, 382, 482, 595, 335, 347, + 350, 340, 359, 0, 360, 336, 337, 342, 344, 345, + 346, 351, 352, 356, 362, 250, 211, 388, 396, 572, + 312, 217, 218, 219, 518, 519, 520, 521, 609, 610, + 614, 206, 459, 460, 461, 462, 293, 604, 309, 465, + 464, 331, 332, 377, 446, 534, 536, 547, 551, 553, + 555, 561, 564, 535, 537, 548, 552, 554, 556, 562, + 565, 524, 526, 528, 530, 543, 542, 539, 567, 568, + 545, 550, 529, 541, 546, 559, 566, 563, 523, 527, + 531, 540, 558, 557, 538, 549, 560, 544, 532, 525, + 533, 1350, 198, 222, 366, 1412, 451, 289, 639, 608, + 603, 207, 224, 1287, 263, 1299, 1307, 0, 1313, 1321, + 1322, 1334, 1337, 1338, 1339, 1340, 0, 1358, 1359, 1361, + 1369, 1371, 1374, 1376, 1383, 1395, 1415, 200, 202, 210, + 223, 233, 237, 244, 262, 277, 279, 286, 299, 310, + 318, 319, 322, 328, 378, 384, 385, 386, 387, 406, + 407, 408, 411, 414, 415, 418, 420, 421, 424, 428, + 432, 433, 434, 436, 438, 440, 452, 457, 471, 472, + 473, 474, 475, 478, 479, 484, 485, 486, 487, 488, + 496, 497, 510, 580, 582, 597, 615, 621, 477, 301, + 302, 441, 442, 314, 315, 635, 636, 300, 592, 622, + 590, 634, 616, 435, 376, 1349, 1355, 379, 282, 305, + 320, 1364, 607, 498, 228, 463, 291, 252, 1382, 1384, + 212, 247, 231, 260, 275, 278, 324, 389, 397, 426, + 431, 297, 272, 245, 456, 242, 481, 513, 514, 515, + 517, 393, 267, 430, 1345, 1373, 374, 570, 571, 316, + 394, 0, 0, 0, 1401, 1387, 522, 0, 1329, 1404, + 1298, 1317, 1414, 1320, 1323, 1366, 1276, 1344, 413, 1314, + 1269, 1302, 1271, 1309, 1272, 1300, 1331, 271, 1297, 1389, + 1348, 1403, 364, 268, 1278, 1303, 427, 1319, 205, 1368, + 483, 253, 375, 372, 577, 283, 274, 270, 251, 317, + 383, 425, 512, 419, 1410, 368, 1354, 0, 493, 398, + 0, 0, 0, 1333, 1393, 1342, 1380, 1328, 1367, 1286, + 1353, 1405, 1315, 1363, 1406, 323, 249, 325, 204, 410, + 494, 287, 0, 96, 0, 0, 0, 711, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 239, 0, 0, + 246, 0, 0, 0, 349, 358, 357, 338, 339, 341, + 343, 348, 355, 361, 1311, 1360, 1400, 1312, 1362, 266, + 321, 273, 265, 574, 1411, 1392, 1275, 1341, 1399, 1336, + 0, 0, 230, 1402, 1335, 0, 1365, 0, 1417, 1270, + 1356, 0, 1273, 1277, 1413, 1397, 1306, 276, 0, 0, + 0, 0, 0, 0, 0, 1332, 1343, 1377, 1381, 1326, + 0, 0, 0, 0, 0, 0, 0, 0, 1304, 0, + 1352, 0, 0, 0, 1282, 1274, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1330, 0, + 0, 0, 0, 1285, 0, 1305, 1378, 0, 1268, 298, + 1279, 399, 258, 0, 450, 1385, 1396, 1327, 618, 1398, + 1325, 1324, 1372, 1283, 1391, 1318, 363, 1281, 330, 199, + 226, 0, 1316, 409, 458, 470, 1390, 1301, 1310, 254, + 1308, 468, 423, 596, 234, 285, 455, 429, 466, 437, + 288, 1351, 1370, 467, 370, 579, 447, 593, 619, 620, + 264, 403, 605, 516, 613, 637, 227, 261, 417, 501, + 599, 490, 395, 575, 576, 329, 489, 296, 203, 367, + 625, 225, 476, 369, 243, 232, 581, 602, 290, 453, + 632, 214, 511, 591, 240, 480, 0, 0, 640, 248, + 500, 216, 588, 499, 391, 326, 327, 215, 0, 454, + 269, 294, 0, 0, 259, 412, 583, 584, 257, 641, + 229, 612, 221, 1280, 611, 405, 578, 589, 392, 381, + 220, 587, 390, 380, 334, 353, 354, 281, 307, 444, + 373, 445, 306, 308, 401, 400, 402, 208, 600, 0, + 209, 0, 495, 601, 642, 449, 213, 235, 236, 238, + 1296, 280, 284, 292, 295, 303, 304, 313, 365, 416, + 443, 439, 448, 1386, 573, 594, 606, 617, 623, 624, + 626, 627, 628, 629, 630, 633, 631, 404, 311, 491, + 333, 371, 1375, 1416, 422, 469, 241, 598, 492, 201, + 1290, 1295, 1288, 0, 255, 256, 1357, 569, 1291, 1289, + 1346, 1347, 1292, 1407, 1408, 1409, 1394, 643, 644, 645, + 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, + 656, 657, 658, 659, 660, 638, 502, 508, 503, 504, + 505, 506, 507, 0, 509, 1379, 1284, 0, 1293, 1294, + 1388, 585, 586, 661, 382, 482, 595, 335, 347, 350, + 340, 359, 0, 360, 336, 337, 342, 344, 345, 346, + 351, 352, 356, 362, 250, 211, 388, 396, 572, 312, + 217, 218, 219, 518, 519, 520, 521, 609, 610, 614, + 206, 459, 460, 461, 462, 293, 604, 309, 465, 464, + 331, 332, 377, 446, 534, 536, 547, 551, 553, 555, + 561, 564, 535, 537, 548, 552, 554, 556, 562, 565, + 524, 526, 528, 530, 543, 542, 539, 567, 568, 545, + 550, 529, 541, 546, 559, 566, 563, 523, 527, 531, + 540, 558, 557, 538, 549, 560, 544, 532, 525, 533, + 1350, 198, 222, 366, 1412, 451, 289, 639, 608, 603, + 207, 224, 1287, 263, 1299, 1307, 0, 1313, 1321, 1322, + 1334, 1337, 1338, 1339, 1340, 0, 1358, 1359, 1361, 1369, + 1371, 1374, 1376, 1383, 1395, 1415, 200, 202, 210, 223, + 233, 237, 244, 262, 277, 279, 286, 299, 310, 318, + 319, 322, 328, 378, 384, 385, 386, 387, 406, 407, + 408, 411, 414, 415, 418, 420, 421, 424, 428, 432, + 433, 434, 436, 438, 440, 452, 457, 471, 472, 473, + 474, 475, 478, 479, 484, 485, 486, 487, 488, 496, + 497, 510, 580, 582, 597, 615, 621, 477, 301, 302, + 441, 442, 314, 315, 635, 636, 300, 592, 622, 590, + 634, 616, 435, 376, 1349, 1355, 379, 282, 305, 320, + 1364, 607, 498, 228, 463, 291, 252, 1382, 1384, 212, + 247, 231, 260, 275, 278, 324, 389, 397, 426, 431, + 297, 272, 245, 456, 242, 481, 513, 514, 515, 517, + 393, 267, 430, 1345, 1373, 374, 570, 571, 316, 394, + 0, 0, 0, 1401, 1387, 522, 0, 1329, 1404, 1298, + 1317, 1414, 1320, 1323, 1366, 1276, 1344, 413, 1314, 1269, + 1302, 1271, 1309, 1272, 1300, 1331, 271, 1297, 1389, 1348, + 1403, 364, 268, 1278, 1303, 427, 1319, 205, 1368, 483, + 253, 375, 372, 577, 283, 274, 270, 251, 317, 383, + 425, 512, 419, 1410, 368, 1354, 0, 493, 398, 0, + 0, 0, 1333, 1393, 1342, 1380, 1328, 1367, 1286, 1353, + 1405, 1315, 1363, 1406, 323, 249, 325, 204, 410, 494, + 287, 0, 0, 0, 0, 0, 196, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 239, 0, 0, 246, + 0, 0, 0, 349, 358, 357, 338, 339, 341, 343, + 348, 355, 361, 1311, 1360, 1400, 1312, 1362, 266, 321, + 273, 265, 574, 1411, 1392, 1275, 1341, 1399, 1336, 0, + 0, 230, 1402, 1335, 0, 1365, 0, 1417, 1270, 1356, + 0, 1273, 1277, 1413, 1397, 1306, 276, 0, 0, 0, + 0, 0, 0, 0, 1332, 1343, 1377, 1381, 1326, 0, + 0, 0, 0, 0, 0, 0, 0, 1304, 0, 1352, + 0, 0, 0, 1282, 1274, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1330, 0, 0, + 0, 0, 1285, 0, 1305, 1378, 0, 1268, 298, 1279, + 399, 258, 0, 450, 1385, 1396, 1327, 618, 1398, 1325, + 1324, 1372, 1283, 1391, 1318, 363, 1281, 330, 199, 226, + 0, 1316, 409, 458, 470, 1390, 1301, 1310, 254, 1308, + 468, 423, 596, 234, 285, 455, 429, 466, 437, 288, + 1351, 1370, 467, 370, 579, 447, 593, 619, 620, 264, + 403, 605, 516, 613, 637, 227, 261, 417, 501, 599, + 490, 395, 575, 576, 329, 489, 296, 203, 367, 625, + 225, 476, 369, 243, 232, 581, 602, 290, 453, 632, + 214, 511, 591, 240, 480, 0, 0, 640, 248, 500, + 216, 588, 499, 391, 326, 327, 215, 0, 454, 269, + 294, 0, 0, 259, 412, 583, 584, 257, 641, 229, + 612, 221, 1280, 611, 405, 578, 589, 392, 381, 220, + 587, 390, 380, 334, 353, 354, 281, 307, 444, 373, + 445, 306, 308, 401, 400, 402, 208, 600, 0, 209, + 0, 495, 601, 642, 449, 213, 235, 236, 238, 1296, + 280, 284, 292, 295, 303, 304, 313, 365, 416, 443, + 439, 448, 1386, 573, 594, 606, 617, 623, 624, 626, + 627, 628, 629, 630, 633, 631, 404, 311, 491, 333, + 371, 1375, 1416, 422, 469, 241, 598, 492, 201, 1290, + 1295, 1288, 0, 255, 256, 1357, 569, 1291, 1289, 1346, + 1347, 1292, 1407, 1408, 1409, 1394, 643, 644, 645, 646, + 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, + 657, 658, 659, 660, 638, 502, 508, 503, 504, 505, + 506, 507, 0, 509, 1379, 1284, 0, 1293, 1294, 1388, + 585, 586, 661, 382, 482, 595, 335, 347, 350, 340, + 359, 0, 360, 336, 337, 342, 344, 345, 346, 351, + 352, 356, 362, 250, 211, 388, 396, 572, 312, 217, + 218, 219, 518, 519, 520, 521, 609, 610, 614, 206, + 459, 460, 461, 462, 293, 604, 309, 465, 464, 331, + 332, 377, 446, 534, 536, 547, 551, 553, 555, 561, + 564, 535, 537, 548, 552, 554, 556, 562, 565, 524, + 526, 528, 530, 543, 542, 539, 567, 568, 545, 550, + 529, 541, 546, 559, 566, 563, 523, 527, 531, 540, + 558, 557, 538, 549, 560, 544, 532, 525, 533, 1350, + 198, 222, 366, 1412, 451, 289, 639, 608, 603, 207, + 224, 1287, 263, 1299, 1307, 0, 1313, 1321, 1322, 1334, + 1337, 1338, 1339, 1340, 0, 1358, 1359, 1361, 1369, 1371, + 1374, 1376, 1383, 1395, 1415, 200, 202, 210, 223, 233, + 237, 244, 262, 277, 279, 286, 299, 310, 318, 319, + 322, 328, 378, 384, 385, 386, 387, 406, 407, 408, + 411, 414, 415, 418, 420, 421, 424, 428, 432, 433, + 434, 436, 438, 440, 452, 457, 471, 472, 473, 474, + 475, 478, 479, 484, 485, 486, 487, 488, 496, 497, + 510, 580, 582, 597, 615, 621, 477, 301, 302, 441, + 442, 314, 315, 635, 636, 300, 592, 622, 590, 634, + 616, 435, 376, 1349, 1355, 379, 282, 305, 320, 1364, + 607, 498, 228, 463, 291, 252, 1382, 1384, 212, 247, + 231, 260, 275, 278, 324, 389, 397, 426, 431, 297, + 272, 245, 456, 242, 481, 513, 514, 515, 517, 393, + 267, 430, 1345, 1373, 374, 570, 571, 316, 394, 0, + 0, 0, 1401, 1387, 522, 0, 1329, 1404, 1298, 1317, + 1414, 1320, 1323, 1366, 1276, 1344, 413, 1314, 1269, 1302, + 1271, 1309, 1272, 1300, 1331, 271, 1297, 1389, 1348, 1403, + 364, 268, 1278, 1303, 427, 1319, 205, 1368, 483, 253, + 375, 372, 577, 283, 274, 270, 251, 317, 383, 425, + 512, 419, 1410, 368, 1354, 0, 493, 398, 0, 0, + 0, 1333, 1393, 1342, 1380, 1328, 1367, 1286, 1353, 1405, + 1315, 1363, 1406, 323, 249, 325, 204, 410, 494, 287, + 0, 0, 0, 0, 0, 711, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 239, 0, 0, 246, 0, + 0, 0, 349, 358, 357, 338, 339, 341, 343, 348, + 355, 361, 1311, 1360, 1400, 1312, 1362, 266, 321, 273, + 265, 574, 1411, 1392, 1275, 1341, 1399, 1336, 0, 0, + 230, 1402, 1335, 0, 1365, 0, 1417, 1270, 1356, 0, + 1273, 1277, 1413, 1397, 1306, 276, 0, 0, 0, 0, + 0, 0, 0, 1332, 1343, 1377, 1381, 1326, 0, 0, + 0, 0, 0, 0, 0, 0, 1304, 0, 1352, 0, + 0, 0, 1282, 1274, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1330, 0, 0, 0, + 0, 1285, 0, 1305, 1378, 0, 1268, 298, 1279, 399, + 258, 0, 450, 1385, 1396, 1327, 618, 1398, 1325, 1324, + 1372, 1283, 1391, 1318, 363, 1281, 330, 199, 226, 0, + 1316, 409, 458, 470, 1390, 1301, 1310, 254, 1308, 468, + 423, 596, 234, 285, 455, 429, 466, 437, 288, 1351, + 1370, 467, 370, 579, 447, 593, 619, 620, 264, 403, + 605, 516, 613, 637, 227, 261, 417, 501, 599, 490, + 395, 575, 576, 329, 489, 296, 203, 367, 625, 225, + 476, 369, 243, 232, 581, 602, 290, 453, 632, 214, + 511, 591, 240, 480, 0, 0, 640, 248, 500, 216, + 588, 499, 391, 326, 327, 215, 0, 454, 269, 294, + 0, 0, 259, 412, 583, 584, 257, 641, 229, 612, + 221, 1280, 611, 405, 578, 589, 392, 381, 220, 587, + 390, 380, 334, 353, 354, 281, 307, 444, 373, 445, + 306, 308, 401, 400, 402, 208, 600, 0, 209, 0, + 495, 601, 642, 449, 213, 235, 236, 238, 1296, 280, + 284, 292, 295, 303, 304, 313, 365, 416, 443, 439, + 448, 1386, 573, 594, 606, 617, 623, 624, 626, 627, + 628, 629, 630, 633, 631, 404, 311, 491, 333, 371, + 1375, 1416, 422, 469, 241, 598, 492, 201, 1290, 1295, + 1288, 0, 255, 256, 1357, 569, 1291, 1289, 1346, 1347, + 1292, 1407, 1408, 1409, 1394, 643, 644, 645, 646, 647, + 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, + 658, 659, 660, 638, 502, 508, 503, 504, 505, 506, + 507, 0, 509, 1379, 1284, 0, 1293, 1294, 1388, 585, + 586, 661, 382, 482, 595, 335, 347, 350, 340, 359, + 0, 360, 336, 337, 342, 344, 345, 346, 351, 352, + 356, 362, 250, 211, 388, 396, 572, 312, 217, 218, + 219, 518, 519, 520, 521, 609, 610, 614, 206, 459, + 460, 461, 462, 293, 604, 309, 465, 464, 331, 332, + 377, 446, 534, 536, 547, 551, 553, 555, 561, 564, + 535, 537, 548, 552, 554, 556, 562, 565, 524, 526, + 528, 530, 543, 542, 539, 567, 568, 545, 550, 529, + 541, 546, 559, 566, 563, 523, 527, 531, 540, 558, + 557, 538, 549, 560, 544, 532, 525, 533, 1350, 198, + 222, 366, 1412, 451, 289, 639, 608, 603, 207, 224, + 1287, 263, 1299, 1307, 0, 1313, 1321, 1322, 1334, 1337, + 1338, 1339, 1340, 0, 1358, 1359, 1361, 1369, 1371, 1374, + 1376, 1383, 1395, 1415, 200, 202, 210, 223, 233, 237, + 244, 262, 277, 279, 286, 299, 310, 318, 319, 322, + 328, 378, 384, 385, 386, 387, 406, 407, 408, 411, + 414, 415, 418, 420, 421, 424, 428, 432, 433, 434, + 436, 438, 440, 452, 457, 471, 472, 473, 474, 475, + 478, 479, 484, 485, 486, 487, 488, 496, 497, 510, + 580, 582, 597, 615, 621, 477, 301, 302, 441, 442, + 314, 315, 635, 636, 300, 592, 622, 590, 634, 616, + 435, 376, 1349, 1355, 379, 282, 305, 320, 1364, 607, + 498, 228, 463, 291, 252, 1382, 1384, 212, 247, 231, + 260, 275, 278, 324, 389, 397, 426, 431, 297, 272, + 245, 456, 242, 481, 513, 514, 515, 517, 393, 267, + 430, 1345, 1373, 374, 570, 571, 316, 394, 0, 0, + 0, 1401, 1387, 522, 0, 1329, 1404, 1298, 1317, 1414, + 1320, 1323, 1366, 1276, 1344, 413, 1314, 1269, 1302, 1271, + 1309, 1272, 1300, 1331, 271, 1297, 1389, 1348, 1403, 364, + 268, 1278, 1303, 427, 1319, 205, 1368, 483, 253, 375, + 372, 577, 283, 274, 270, 251, 317, 383, 425, 512, + 419, 1410, 368, 1354, 0, 493, 398, 0, 0, 0, + 1333, 1393, 1342, 1380, 1328, 1367, 1286, 1353, 1405, 1315, + 1363, 1406, 323, 249, 325, 204, 410, 494, 287, 0, + 0, 0, 0, 0, 943, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 239, 0, 0, 246, 0, 0, + 0, 349, 358, 357, 338, 339, 341, 343, 348, 355, + 361, 1311, 1360, 1400, 1312, 1362, 266, 321, 273, 265, + 574, 1411, 1392, 1275, 1341, 1399, 1336, 0, 0, 230, + 1402, 1335, 0, 1365, 0, 1417, 1270, 1356, 0, 1273, + 1277, 1413, 1397, 1306, 276, 0, 0, 0, 0, 0, + 0, 0, 1332, 1343, 1377, 1381, 1326, 0, 0, 0, + 0, 0, 0, 0, 0, 1304, 0, 1352, 0, 0, + 0, 1282, 1274, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1330, 0, 0, 0, 0, + 1285, 0, 1305, 1378, 0, 1268, 298, 1279, 399, 258, + 0, 450, 1385, 1396, 1327, 618, 1398, 1325, 1324, 1372, + 1283, 1391, 1318, 363, 1281, 330, 199, 226, 0, 1316, + 409, 458, 470, 1390, 1301, 1310, 254, 1308, 468, 423, + 596, 234, 285, 455, 429, 466, 437, 288, 1351, 1370, + 467, 370, 579, 447, 593, 619, 620, 264, 403, 605, + 516, 613, 637, 227, 261, 417, 501, 599, 490, 395, + 575, 576, 329, 489, 296, 203, 367, 625, 225, 476, + 369, 243, 232, 581, 602, 290, 453, 632, 214, 511, + 591, 240, 480, 0, 0, 640, 248, 500, 216, 588, + 499, 391, 326, 327, 215, 0, 454, 269, 294, 0, + 0, 259, 412, 583, 584, 257, 641, 229, 612, 221, + 1280, 611, 405, 578, 589, 392, 381, 220, 587, 390, + 380, 334, 353, 354, 281, 307, 444, 373, 445, 306, + 308, 401, 400, 402, 208, 600, 0, 209, 0, 495, + 601, 642, 449, 213, 235, 236, 238, 1296, 280, 284, + 292, 295, 303, 304, 313, 365, 416, 443, 439, 448, + 1386, 573, 594, 606, 617, 623, 624, 626, 627, 628, + 629, 630, 633, 631, 404, 311, 491, 333, 371, 1375, + 1416, 422, 469, 241, 598, 492, 201, 1290, 1295, 1288, + 0, 255, 256, 1357, 569, 1291, 1289, 1346, 1347, 1292, + 1407, 1408, 1409, 1394, 643, 644, 645, 646, 647, 648, + 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, + 659, 660, 638, 502, 508, 503, 504, 505, 506, 507, + 0, 509, 1379, 1284, 0, 1293, 1294, 1388, 585, 586, + 661, 382, 482, 595, 335, 347, 350, 340, 359, 0, + 360, 336, 337, 342, 344, 345, 346, 351, 352, 356, + 362, 250, 211, 388, 396, 572, 312, 217, 218, 219, + 518, 519, 520, 521, 609, 610, 614, 206, 459, 460, + 461, 462, 293, 604, 309, 465, 464, 331, 332, 377, + 446, 534, 536, 547, 551, 553, 555, 561, 564, 535, + 537, 548, 552, 554, 556, 562, 565, 524, 526, 528, + 530, 543, 542, 539, 567, 568, 545, 550, 529, 541, + 546, 559, 566, 563, 523, 527, 531, 540, 558, 557, + 538, 549, 560, 544, 532, 525, 533, 1350, 198, 222, + 366, 1412, 451, 289, 639, 608, 603, 207, 224, 1287, + 263, 1299, 1307, 0, 1313, 1321, 1322, 1334, 1337, 1338, + 1339, 1340, 0, 1358, 1359, 1361, 1369, 1371, 1374, 1376, + 1383, 1395, 1415, 200, 202, 210, 223, 233, 237, 244, + 262, 277, 279, 286, 299, 310, 318, 319, 322, 328, + 378, 384, 385, 386, 387, 406, 407, 408, 411, 414, + 415, 418, 420, 421, 424, 428, 432, 433, 434, 436, + 438, 440, 452, 457, 471, 472, 473, 474, 475, 478, + 479, 484, 485, 486, 487, 488, 496, 497, 510, 580, + 582, 597, 615, 621, 477, 301, 302, 441, 442, 314, + 315, 635, 636, 300, 592, 622, 590, 634, 616, 435, + 376, 1349, 1355, 379, 282, 305, 320, 1364, 607, 498, + 228, 463, 291, 252, 1382, 1384, 212, 247, 231, 260, + 275, 278, 324, 389, 397, 426, 431, 297, 272, 245, + 456, 242, 481, 513, 514, 515, 517, 393, 267, 430, + 1345, 1373, 374, 570, 571, 316, 394, 0, 0, 0, + 0, 0, 522, 0, 763, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 413, 0, 0, 0, 0, 751, + 0, 0, 0, 271, 756, 0, 0, 0, 364, 268, + 0, 0, 427, 0, 205, 0, 483, 253, 375, 372, + 577, 283, 274, 270, 251, 317, 383, 425, 512, 419, + 762, 368, 0, 0, 493, 398, 0, 0, 0, 0, + 0, 758, 759, 0, 0, 0, 0, 0, 0, 0, + 0, 323, 249, 325, 204, 410, 494, 287, 0, 96, + 0, 0, 959, 943, 735, 909, 947, 960, 961, 962, + 963, 948, 0, 239, 949, 950, 246, 951, 0, 908, + 793, 795, 794, 858, 859, 860, 861, 862, 863, 864, + 791, 956, 964, 965, 0, 266, 321, 273, 265, 574, + 0, 0, 2180, 2181, 2182, 0, 0, 0, 230, 0, + 0, 0, 0, 0, 0, 0, 731, 748, 0, 761, + 0, 0, 0, 276, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 745, 746, 0, 0, 0, 0, 903, 0, 747, 0, + 0, 755, 966, 967, 968, 969, 970, 971, 972, 973, 974, 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, 990, 991, 992, 993, - 744, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 295, 0, 396, 255, 0, 447, 888, 0, 0, - 605, 0, 0, 886, 0, 0, 0, 0, 360, 0, - 327, 197, 223, 0, 0, 406, 455, 467, 0, 0, - 0, 939, 0, 465, 420, 583, 231, 282, 452, 426, - 463, 434, 285, 0, 0, 464, 367, 568, 444, 580, - 606, 607, 261, 400, 592, 505, 600, 624, 224, 258, - 414, 498, 586, 487, 392, 564, 565, 326, 486, 293, - 201, 364, 612, 222, 473, 366, 240, 229, 570, 589, - 287, 450, 619, 211, 500, 578, 237, 477, 0, 0, - 627, 245, 497, 213, 575, 496, 388, 323, 324, 212, - 0, 451, 266, 291, 0, 0, 256, 409, 940, 941, - 254, 628, 785, 599, 218, 0, 598, 402, 567, 576, - 389, 378, 217, 574, 387, 377, 331, 793, 794, 278, - 304, 870, 869, 868, 303, 305, 866, 867, 865, 205, - 587, 0, 206, 0, 492, 588, 629, 446, 210, 232, - 233, 235, 0, 277, 281, 289, 292, 300, 301, 310, - 362, 413, 440, 436, 445, 0, 562, 581, 593, 604, - 610, 611, 613, 614, 615, 616, 617, 620, 618, 401, - 308, 488, 330, 368, 0, 0, 419, 466, 238, 585, - 489, 876, 898, 887, 754, 755, 877, 878, 902, 879, - 757, 758, 899, 900, 751, 752, 756, 901, 903, 630, - 631, 632, 633, 634, 635, 636, 637, 638, 639, 640, - 641, 642, 643, 644, 645, 646, 647, 625, 890, 741, - 740, 0, 747, 0, 775, 776, 778, 782, 783, 784, - 795, 842, 843, 851, 853, 854, 852, 855, 856, 857, - 860, 861, 862, 863, 858, 859, 864, 759, 763, 760, - 761, 762, 774, 764, 765, 766, 767, 768, 769, 770, - 771, 772, 773, 913, 914, 915, 916, 917, 918, 788, - 792, 791, 789, 790, 786, 787, 814, 813, 815, 816, - 817, 818, 819, 820, 822, 821, 823, 824, 825, 826, - 827, 828, 796, 797, 800, 801, 799, 798, 802, 811, - 812, 803, 804, 805, 806, 807, 808, 810, 809, 829, - 830, 831, 832, 833, 835, 834, 838, 839, 837, 836, - 841, 840, 739, 196, 219, 363, 0, 448, 286, 626, - 595, 590, 204, 221, 904, 260, 905, 0, 0, 909, - 0, 0, 0, 911, 910, 0, 912, 0, 874, 873, - 0, 0, 906, 907, 0, 908, 0, 0, 198, 200, - 207, 220, 230, 234, 241, 259, 274, 276, 283, 296, - 307, 315, 316, 319, 325, 375, 381, 382, 383, 384, - 403, 404, 405, 408, 411, 412, 415, 417, 418, 421, - 425, 429, 430, 431, 433, 435, 437, 449, 454, 468, - 469, 470, 471, 472, 475, 476, 481, 482, 483, 484, - 485, 493, 494, 499, 569, 571, 584, 602, 608, 474, - 919, 920, 921, 922, 923, 924, 925, 926, 297, 579, - 609, 577, 621, 603, 432, 373, 0, 0, 376, 279, - 302, 317, 0, 594, 495, 225, 460, 288, 249, 944, - 0, 209, 244, 228, 257, 272, 275, 321, 386, 394, - 423, 428, 294, 269, 242, 453, 239, 478, 502, 503, - 504, 506, 390, 264, 427, 391, 0, 371, 559, 560, - 313, 511, 0, 750, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 410, 0, 0, 0, 0, 738, 0, - 0, 0, 268, 743, 0, 0, 0, 361, 265, 0, - 0, 424, 0, 203, 0, 480, 250, 372, 369, 566, - 280, 271, 267, 248, 314, 380, 422, 501, 416, 749, - 365, 0, 0, 490, 395, 0, 0, 0, 0, 0, - 745, 746, 0, 0, 0, 0, 0, 0, 2349, 0, - 320, 246, 322, 202, 407, 491, 284, 0, 94, 0, - 0, 945, 929, 722, 895, 933, 946, 947, 948, 949, - 934, 0, 236, 935, 936, 243, 937, 0, 894, 779, - 781, 780, 844, 845, 846, 847, 848, 849, 850, 777, - 942, 950, 951, 2350, 263, 318, 270, 262, 563, 0, - 0, 0, 0, 0, 0, 0, 227, 0, 0, 0, - 0, 0, 0, 0, 718, 735, 0, 748, 0, 0, - 0, 273, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 732, 733, - 0, 0, 0, 0, 889, 0, 734, 0, 0, 742, - 952, 953, 954, 955, 956, 957, 958, 959, 960, 961, - 962, 963, 964, 965, 966, 967, 968, 969, 970, 971, - 972, 973, 974, 975, 976, 977, 978, 979, 980, 981, - 982, 983, 984, 985, 986, 987, 988, 989, 990, 991, - 992, 993, 744, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 295, 0, 396, 255, 0, 447, 888, - 0, 0, 605, 0, 0, 886, 0, 0, 0, 0, - 360, 0, 327, 197, 223, 0, 0, 406, 455, 467, - 0, 0, 0, 939, 0, 465, 420, 583, 231, 282, - 452, 426, 463, 434, 285, 0, 0, 464, 367, 568, - 444, 580, 606, 607, 261, 400, 592, 505, 600, 624, - 224, 258, 414, 498, 586, 487, 392, 564, 565, 326, - 486, 293, 201, 364, 612, 222, 473, 366, 240, 229, - 570, 589, 287, 450, 619, 211, 500, 578, 237, 477, - 0, 0, 627, 245, 497, 213, 575, 496, 388, 323, - 324, 212, 0, 451, 266, 291, 0, 0, 256, 409, - 940, 941, 254, 628, 785, 599, 218, 0, 598, 402, - 567, 576, 389, 378, 217, 574, 387, 377, 331, 793, - 794, 278, 304, 870, 869, 868, 303, 305, 866, 867, - 865, 205, 587, 0, 206, 0, 492, 588, 629, 446, - 210, 232, 233, 235, 0, 277, 281, 289, 292, 300, - 301, 310, 362, 413, 440, 436, 445, 0, 562, 581, - 593, 604, 610, 611, 613, 614, 615, 616, 617, 620, - 618, 401, 308, 488, 330, 368, 0, 0, 419, 466, - 238, 585, 489, 876, 898, 887, 754, 755, 877, 878, - 902, 879, 757, 758, 899, 900, 751, 752, 756, 901, - 903, 630, 631, 632, 633, 634, 635, 636, 637, 638, - 639, 640, 641, 642, 643, 644, 645, 646, 647, 625, - 890, 741, 740, 0, 747, 0, 775, 776, 778, 782, - 783, 784, 795, 842, 843, 851, 853, 854, 852, 855, - 856, 857, 860, 861, 862, 863, 858, 859, 864, 759, - 763, 760, 761, 762, 774, 764, 765, 766, 767, 768, - 769, 770, 771, 772, 773, 913, 914, 915, 916, 917, - 918, 788, 792, 791, 789, 790, 786, 787, 814, 813, - 815, 816, 817, 818, 819, 820, 822, 821, 823, 824, - 825, 826, 827, 828, 796, 797, 800, 801, 799, 798, - 802, 811, 812, 803, 804, 805, 806, 807, 808, 810, - 809, 829, 830, 831, 832, 833, 835, 834, 838, 839, - 837, 836, 841, 840, 739, 196, 219, 363, 0, 448, - 286, 626, 595, 590, 204, 221, 904, 260, 905, 0, - 0, 909, 0, 0, 0, 911, 910, 0, 912, 0, - 874, 873, 0, 0, 906, 907, 0, 908, 0, 0, - 198, 200, 207, 220, 230, 234, 241, 259, 274, 276, - 283, 296, 307, 315, 316, 319, 325, 375, 381, 382, - 383, 384, 403, 404, 405, 408, 411, 412, 415, 417, - 418, 421, 425, 429, 430, 431, 433, 435, 437, 449, - 454, 468, 469, 470, 471, 472, 475, 476, 481, 482, - 483, 484, 485, 493, 494, 499, 569, 571, 584, 602, - 608, 474, 919, 920, 921, 922, 923, 924, 925, 926, - 297, 579, 609, 577, 621, 603, 432, 373, 0, 0, - 376, 279, 302, 317, 0, 594, 495, 225, 460, 288, - 249, 944, 0, 209, 244, 228, 257, 272, 275, 321, - 386, 394, 423, 428, 294, 269, 242, 453, 239, 478, - 502, 503, 504, 506, 390, 264, 427, 0, 391, 371, - 559, 560, 313, 85, 511, 0, 750, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 410, 0, 0, 0, - 0, 738, 0, 0, 0, 268, 743, 0, 0, 0, - 361, 265, 0, 0, 424, 0, 203, 0, 480, 250, - 372, 369, 566, 280, 271, 267, 248, 314, 380, 422, - 501, 416, 749, 365, 0, 0, 490, 395, 0, 0, - 0, 0, 0, 745, 746, 0, 0, 0, 0, 0, - 0, 0, 0, 320, 246, 322, 202, 407, 491, 284, - 0, 94, 0, 0, 945, 929, 722, 895, 933, 946, - 947, 948, 949, 934, 0, 236, 935, 936, 243, 937, - 0, 894, 779, 781, 780, 844, 845, 846, 847, 848, - 849, 850, 777, 942, 950, 951, 0, 263, 318, 270, - 262, 563, 0, 0, 0, 0, 0, 0, 0, 227, - 0, 0, 0, 0, 0, 0, 0, 718, 735, 0, - 748, 0, 0, 0, 273, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 732, 733, 0, 0, 0, 0, 889, 0, 734, - 0, 0, 742, 952, 953, 954, 955, 956, 957, 958, - 959, 960, 961, 962, 963, 964, 965, 966, 967, 968, - 969, 970, 971, 972, 973, 974, 975, 976, 977, 978, - 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, - 989, 990, 991, 992, 993, 744, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 295, 0, 396, 255, - 0, 447, 888, 0, 0, 605, 0, 0, 886, 0, - 0, 0, 0, 360, 0, 327, 197, 223, 0, 0, - 406, 455, 467, 0, 0, 0, 939, 0, 465, 420, - 583, 231, 282, 452, 426, 463, 434, 285, 0, 0, - 464, 367, 568, 444, 580, 606, 607, 261, 400, 592, - 505, 600, 624, 224, 258, 414, 498, 586, 487, 392, - 564, 565, 326, 486, 293, 201, 364, 612, 222, 473, - 366, 240, 229, 570, 589, 287, 450, 619, 211, 500, - 578, 237, 477, 0, 0, 627, 245, 497, 213, 575, - 496, 388, 323, 324, 212, 0, 451, 266, 291, 0, - 0, 256, 409, 940, 941, 254, 628, 785, 599, 218, - 0, 598, 402, 567, 576, 389, 378, 217, 574, 387, - 377, 331, 793, 794, 278, 304, 870, 869, 868, 303, - 305, 866, 867, 865, 205, 587, 0, 206, 0, 492, - 588, 629, 446, 210, 232, 233, 235, 0, 277, 281, - 289, 292, 300, 301, 310, 362, 413, 440, 436, 445, - 0, 562, 581, 593, 604, 610, 611, 613, 614, 615, - 616, 617, 620, 618, 401, 308, 488, 330, 368, 0, - 0, 419, 466, 238, 585, 489, 876, 898, 887, 754, - 755, 877, 878, 902, 879, 757, 758, 899, 900, 751, - 752, 756, 901, 903, 630, 631, 632, 633, 634, 635, - 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, - 646, 647, 625, 890, 741, 740, 0, 747, 0, 775, - 776, 778, 782, 783, 784, 795, 842, 843, 851, 853, - 854, 852, 855, 856, 857, 860, 861, 862, 863, 858, - 859, 864, 759, 763, 760, 761, 762, 774, 764, 765, - 766, 767, 768, 769, 770, 771, 772, 773, 913, 914, - 915, 916, 917, 918, 788, 792, 791, 789, 790, 786, - 787, 814, 813, 815, 816, 817, 818, 819, 820, 822, - 821, 823, 824, 825, 826, 827, 828, 796, 797, 800, - 801, 799, 798, 802, 811, 812, 803, 804, 805, 806, - 807, 808, 810, 809, 829, 830, 831, 832, 833, 835, - 834, 838, 839, 837, 836, 841, 840, 739, 196, 219, - 363, 93, 448, 286, 626, 595, 590, 204, 221, 904, - 260, 905, 0, 0, 909, 0, 0, 0, 911, 910, - 0, 912, 0, 874, 873, 0, 0, 906, 907, 0, - 908, 0, 0, 198, 200, 207, 220, 230, 234, 241, - 259, 274, 276, 283, 296, 307, 315, 316, 319, 325, - 375, 381, 382, 383, 384, 403, 404, 405, 408, 411, - 412, 415, 417, 418, 421, 425, 429, 430, 431, 433, - 435, 437, 449, 454, 468, 469, 470, 471, 472, 475, - 476, 481, 482, 483, 484, 485, 493, 494, 499, 569, - 571, 584, 602, 608, 474, 919, 920, 921, 922, 923, - 924, 925, 926, 297, 579, 609, 577, 621, 603, 432, - 373, 0, 0, 376, 279, 302, 317, 0, 594, 495, - 225, 460, 288, 249, 944, 0, 209, 244, 228, 257, - 272, 275, 321, 386, 394, 423, 428, 294, 269, 242, - 453, 239, 478, 502, 503, 504, 506, 390, 264, 427, - 391, 0, 371, 559, 560, 313, 511, 0, 750, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 410, 0, - 0, 0, 0, 738, 0, 0, 0, 268, 743, 0, - 0, 0, 361, 265, 0, 0, 424, 0, 203, 0, - 480, 250, 372, 369, 566, 280, 271, 267, 248, 314, - 380, 422, 501, 416, 749, 365, 0, 0, 490, 395, - 0, 0, 0, 0, 0, 745, 746, 0, 0, 0, - 0, 0, 0, 0, 0, 320, 246, 322, 202, 407, - 491, 284, 0, 94, 0, 0, 945, 929, 722, 895, - 933, 946, 947, 948, 949, 934, 0, 236, 935, 936, - 243, 937, 0, 894, 779, 781, 780, 844, 845, 846, - 847, 848, 849, 850, 777, 942, 950, 951, 0, 263, - 318, 270, 262, 563, 0, 0, 0, 0, 0, 0, - 0, 227, 0, 0, 0, 0, 0, 0, 0, 718, - 735, 0, 748, 0, 0, 0, 273, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 732, 733, 0, 0, 0, 0, 889, - 0, 734, 0, 0, 742, 952, 953, 954, 955, 956, - 957, 958, 959, 960, 961, 962, 963, 964, 965, 966, + 994, 995, 996, 997, 998, 999, 1000, 1001, 1002, 1003, + 1004, 1005, 1006, 1007, 757, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 298, 0, 399, 258, 0, + 450, 902, 0, 0, 618, 0, 0, 900, 0, 0, + 0, 0, 363, 0, 330, 199, 226, 0, 0, 409, + 458, 470, 0, 0, 0, 953, 0, 468, 423, 596, + 234, 285, 455, 429, 466, 437, 288, 0, 0, 467, + 370, 579, 447, 593, 619, 620, 264, 403, 605, 516, + 613, 637, 227, 261, 417, 501, 599, 490, 395, 575, + 576, 329, 489, 296, 203, 367, 625, 225, 476, 369, + 243, 232, 581, 602, 290, 453, 632, 214, 511, 591, + 240, 480, 0, 0, 640, 248, 500, 216, 588, 499, + 391, 326, 327, 215, 0, 454, 269, 294, 0, 0, + 259, 412, 954, 955, 257, 641, 799, 612, 221, 0, + 611, 405, 578, 589, 392, 381, 220, 587, 390, 380, + 334, 807, 808, 281, 307, 884, 883, 882, 306, 308, + 880, 881, 879, 208, 600, 0, 209, 0, 495, 601, + 642, 449, 213, 235, 236, 238, 0, 280, 284, 292, + 295, 303, 304, 313, 365, 416, 443, 439, 448, 0, + 573, 594, 606, 617, 623, 624, 626, 627, 628, 629, + 630, 633, 631, 404, 311, 491, 333, 371, 0, 0, + 422, 469, 241, 598, 492, 890, 912, 901, 767, 768, + 891, 892, 916, 893, 770, 771, 913, 914, 764, 765, + 769, 915, 917, 643, 644, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, + 660, 638, 502, 508, 503, 504, 505, 506, 507, 0, + 509, 904, 754, 753, 0, 760, 0, 789, 790, 792, + 796, 797, 798, 809, 856, 857, 865, 867, 868, 866, + 869, 870, 871, 874, 875, 876, 877, 872, 873, 878, + 772, 776, 773, 774, 775, 787, 777, 778, 779, 780, + 781, 782, 783, 784, 785, 786, 788, 927, 928, 929, + 930, 931, 932, 802, 806, 805, 803, 804, 800, 801, + 828, 827, 829, 830, 831, 832, 833, 834, 836, 835, + 837, 838, 839, 840, 841, 842, 810, 811, 814, 815, + 813, 812, 816, 825, 826, 817, 818, 819, 820, 821, + 822, 824, 823, 843, 844, 845, 846, 847, 849, 848, + 852, 853, 851, 850, 855, 854, 752, 198, 222, 366, + 0, 451, 289, 639, 608, 603, 207, 224, 918, 263, + 919, 0, 0, 923, 0, 0, 0, 925, 924, 0, + 926, 0, 888, 887, 0, 0, 920, 921, 0, 922, + 0, 0, 200, 202, 210, 223, 233, 237, 244, 262, + 277, 279, 286, 299, 310, 318, 319, 322, 328, 378, + 384, 385, 386, 387, 406, 407, 408, 411, 414, 415, + 418, 420, 421, 424, 428, 432, 433, 434, 436, 438, + 440, 452, 457, 471, 472, 473, 474, 475, 478, 479, + 484, 485, 486, 487, 488, 496, 497, 510, 580, 582, + 597, 615, 621, 477, 933, 934, 935, 936, 937, 938, + 939, 940, 300, 592, 622, 590, 634, 616, 435, 376, + 0, 0, 379, 282, 305, 320, 0, 607, 498, 228, + 463, 291, 252, 958, 0, 212, 247, 231, 260, 275, + 278, 324, 389, 397, 426, 431, 297, 272, 245, 456, + 242, 481, 513, 514, 515, 517, 393, 267, 430, 394, + 0, 374, 570, 571, 316, 522, 0, 763, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 413, 0, 0, + 0, 0, 751, 0, 0, 0, 271, 756, 0, 0, + 0, 364, 268, 0, 0, 427, 0, 205, 0, 483, + 253, 375, 372, 577, 283, 274, 270, 251, 317, 383, + 425, 512, 419, 762, 368, 0, 0, 493, 398, 0, + 0, 0, 0, 0, 758, 759, 0, 0, 0, 0, + 0, 0, 2387, 0, 323, 249, 325, 204, 410, 494, + 287, 0, 96, 0, 0, 959, 943, 735, 909, 947, + 960, 961, 962, 963, 948, 0, 239, 949, 950, 246, + 951, 0, 908, 793, 795, 794, 858, 859, 860, 861, + 862, 863, 864, 791, 956, 964, 965, 2388, 266, 321, + 273, 265, 574, 0, 0, 0, 0, 0, 0, 0, + 0, 230, 0, 0, 0, 0, 0, 0, 0, 731, + 748, 0, 761, 0, 0, 0, 276, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 745, 746, 0, 0, 0, 0, 903, + 0, 747, 0, 0, 755, 966, 967, 968, 969, 970, + 971, 972, 973, 974, 975, 976, 977, 978, 979, 980, + 981, 982, 983, 984, 985, 986, 987, 988, 989, 990, + 991, 992, 993, 994, 995, 996, 997, 998, 999, 1000, + 1001, 1002, 1003, 1004, 1005, 1006, 1007, 757, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 298, 0, + 399, 258, 0, 450, 902, 0, 0, 618, 0, 0, + 900, 0, 0, 0, 0, 363, 0, 330, 199, 226, + 0, 0, 409, 458, 470, 0, 0, 0, 953, 0, + 468, 423, 596, 234, 285, 455, 429, 466, 437, 288, + 0, 0, 467, 370, 579, 447, 593, 619, 620, 264, + 403, 605, 516, 613, 637, 227, 261, 417, 501, 599, + 490, 395, 575, 576, 329, 489, 296, 203, 367, 625, + 225, 476, 369, 243, 232, 581, 602, 290, 453, 632, + 214, 511, 591, 240, 480, 0, 0, 640, 248, 500, + 216, 588, 499, 391, 326, 327, 215, 0, 454, 269, + 294, 0, 0, 259, 412, 954, 955, 257, 641, 799, + 612, 221, 0, 611, 405, 578, 589, 392, 381, 220, + 587, 390, 380, 334, 807, 808, 281, 307, 884, 883, + 882, 306, 308, 880, 881, 879, 208, 600, 0, 209, + 0, 495, 601, 642, 449, 213, 235, 236, 238, 0, + 280, 284, 292, 295, 303, 304, 313, 365, 416, 443, + 439, 448, 0, 573, 594, 606, 617, 623, 624, 626, + 627, 628, 629, 630, 633, 631, 404, 311, 491, 333, + 371, 0, 0, 422, 469, 241, 598, 492, 890, 912, + 901, 767, 768, 891, 892, 916, 893, 770, 771, 913, + 914, 764, 765, 769, 915, 917, 643, 644, 645, 646, + 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, + 657, 658, 659, 660, 638, 502, 508, 503, 504, 505, + 506, 507, 0, 509, 904, 754, 753, 0, 760, 0, + 789, 790, 792, 796, 797, 798, 809, 856, 857, 865, + 867, 868, 866, 869, 870, 871, 874, 875, 876, 877, + 872, 873, 878, 772, 776, 773, 774, 775, 787, 777, + 778, 779, 780, 781, 782, 783, 784, 785, 786, 788, + 927, 928, 929, 930, 931, 932, 802, 806, 805, 803, + 804, 800, 801, 828, 827, 829, 830, 831, 832, 833, + 834, 836, 835, 837, 838, 839, 840, 841, 842, 810, + 811, 814, 815, 813, 812, 816, 825, 826, 817, 818, + 819, 820, 821, 822, 824, 823, 843, 844, 845, 846, + 847, 849, 848, 852, 853, 851, 850, 855, 854, 752, + 198, 222, 366, 0, 451, 289, 639, 608, 603, 207, + 224, 918, 263, 919, 0, 0, 923, 0, 0, 0, + 925, 924, 0, 926, 0, 888, 887, 0, 0, 920, + 921, 0, 922, 0, 0, 200, 202, 210, 223, 233, + 237, 244, 262, 277, 279, 286, 299, 310, 318, 319, + 322, 328, 378, 384, 385, 386, 387, 406, 407, 408, + 411, 414, 415, 418, 420, 421, 424, 428, 432, 433, + 434, 436, 438, 440, 452, 457, 471, 472, 473, 474, + 475, 478, 479, 484, 485, 486, 487, 488, 496, 497, + 510, 580, 582, 597, 615, 621, 477, 933, 934, 935, + 936, 937, 938, 939, 940, 300, 592, 622, 590, 634, + 616, 435, 376, 0, 0, 379, 282, 305, 320, 0, + 607, 498, 228, 463, 291, 252, 958, 0, 212, 247, + 231, 260, 275, 278, 324, 389, 397, 426, 431, 297, + 272, 245, 456, 242, 481, 513, 514, 515, 517, 393, + 267, 430, 0, 394, 374, 570, 571, 316, 87, 522, + 0, 763, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 413, 0, 0, 0, 0, 751, 0, 0, 0, + 271, 756, 0, 0, 0, 364, 268, 0, 0, 427, + 0, 205, 0, 483, 253, 375, 372, 577, 283, 274, + 270, 251, 317, 383, 425, 512, 419, 762, 368, 0, + 0, 493, 398, 0, 0, 0, 0, 0, 758, 759, + 0, 0, 0, 0, 0, 0, 0, 0, 323, 249, + 325, 204, 410, 494, 287, 0, 96, 0, 0, 959, + 943, 735, 909, 947, 960, 961, 962, 963, 948, 0, + 239, 949, 950, 246, 951, 0, 908, 793, 795, 794, + 858, 859, 860, 861, 862, 863, 864, 791, 956, 964, + 965, 0, 266, 321, 273, 265, 574, 0, 0, 0, + 0, 0, 0, 0, 0, 230, 0, 0, 0, 0, + 0, 0, 0, 731, 748, 0, 761, 0, 0, 0, + 276, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 745, 746, 0, + 0, 0, 0, 903, 0, 747, 0, 0, 755, 966, 967, 968, 969, 970, 971, 972, 973, 974, 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 986, - 987, 988, 989, 990, 991, 992, 993, 744, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 295, 0, - 396, 255, 0, 447, 888, 0, 0, 605, 0, 0, - 886, 0, 0, 0, 0, 360, 0, 327, 197, 223, - 0, 0, 406, 455, 467, 0, 0, 0, 939, 0, - 465, 420, 583, 231, 282, 452, 426, 463, 434, 285, - 3965, 0, 464, 367, 568, 444, 580, 606, 607, 261, - 400, 592, 505, 600, 624, 224, 258, 414, 498, 586, - 487, 392, 564, 565, 326, 486, 293, 201, 364, 612, - 222, 473, 366, 240, 229, 570, 589, 287, 450, 619, - 211, 500, 578, 237, 477, 0, 0, 627, 245, 497, - 213, 575, 496, 388, 323, 324, 212, 0, 451, 266, - 291, 0, 0, 256, 409, 940, 941, 254, 628, 785, - 599, 218, 0, 598, 402, 567, 576, 389, 378, 217, - 574, 387, 377, 331, 793, 794, 278, 304, 870, 869, - 868, 303, 305, 866, 867, 865, 205, 587, 0, 206, - 0, 492, 588, 629, 446, 210, 232, 233, 235, 0, - 277, 281, 289, 292, 300, 301, 310, 362, 413, 440, - 436, 445, 0, 562, 581, 593, 604, 610, 611, 613, - 614, 615, 616, 617, 620, 618, 401, 308, 488, 330, - 368, 0, 0, 419, 466, 238, 585, 489, 876, 898, - 887, 754, 755, 877, 878, 902, 879, 757, 758, 899, - 900, 751, 752, 756, 901, 903, 630, 631, 632, 633, - 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, - 644, 645, 646, 647, 625, 890, 741, 740, 0, 747, - 0, 775, 776, 778, 782, 783, 784, 795, 842, 843, - 851, 853, 854, 852, 855, 856, 857, 860, 861, 862, - 863, 858, 859, 864, 759, 763, 760, 761, 762, 774, - 764, 765, 766, 767, 768, 769, 770, 771, 772, 773, - 913, 914, 915, 916, 917, 918, 788, 792, 791, 789, - 790, 786, 787, 814, 813, 815, 816, 817, 818, 819, - 820, 822, 821, 823, 824, 825, 826, 827, 828, 796, - 797, 800, 801, 799, 798, 802, 811, 812, 803, 804, - 805, 806, 807, 808, 810, 809, 829, 830, 831, 832, - 833, 835, 834, 838, 839, 837, 836, 841, 840, 739, - 196, 219, 363, 0, 448, 286, 626, 595, 590, 204, - 221, 904, 260, 905, 0, 0, 909, 0, 0, 0, - 911, 910, 0, 912, 0, 874, 873, 0, 0, 906, - 907, 0, 908, 0, 0, 198, 200, 207, 220, 230, - 234, 241, 259, 274, 276, 283, 296, 307, 315, 316, - 319, 325, 375, 381, 382, 383, 384, 403, 404, 405, - 408, 411, 412, 415, 417, 418, 421, 425, 429, 430, - 431, 433, 435, 437, 449, 454, 468, 469, 470, 471, - 472, 475, 476, 481, 482, 483, 484, 485, 493, 494, - 499, 569, 571, 584, 602, 608, 474, 919, 920, 921, - 922, 923, 924, 925, 926, 297, 579, 609, 577, 621, - 603, 432, 373, 0, 0, 376, 279, 302, 317, 0, - 594, 495, 225, 460, 288, 249, 944, 0, 209, 244, - 228, 257, 272, 275, 321, 386, 394, 423, 428, 294, - 269, 242, 453, 239, 478, 502, 503, 504, 506, 390, - 264, 427, 391, 0, 371, 559, 560, 313, 511, 0, - 750, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 410, 0, 0, 0, 0, 738, 0, 0, 0, 268, - 743, 0, 0, 0, 361, 265, 0, 0, 424, 0, - 203, 0, 480, 250, 372, 369, 566, 280, 271, 267, - 248, 314, 380, 422, 501, 416, 749, 365, 0, 0, - 490, 395, 0, 0, 0, 0, 0, 745, 746, 0, - 0, 0, 0, 0, 0, 0, 0, 320, 246, 322, - 202, 407, 491, 284, 0, 94, 0, 1701, 945, 929, - 722, 895, 933, 946, 947, 948, 949, 934, 0, 236, - 935, 936, 243, 937, 0, 894, 779, 781, 780, 844, - 845, 846, 847, 848, 849, 850, 777, 942, 950, 951, - 0, 263, 318, 270, 262, 563, 0, 0, 0, 0, - 0, 0, 0, 227, 0, 0, 0, 0, 0, 0, - 0, 718, 735, 0, 748, 0, 0, 0, 273, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 732, 733, 0, 0, 0, - 0, 889, 0, 734, 0, 0, 742, 952, 953, 954, - 955, 956, 957, 958, 959, 960, 961, 962, 963, 964, - 965, 966, 967, 968, 969, 970, 971, 972, 973, 974, - 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, - 985, 986, 987, 988, 989, 990, 991, 992, 993, 744, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 295, 0, 396, 255, 0, 447, 888, 0, 0, 605, - 0, 0, 886, 0, 0, 0, 0, 360, 0, 327, - 197, 223, 0, 0, 406, 455, 467, 0, 0, 0, - 939, 0, 465, 420, 583, 231, 282, 452, 426, 463, - 434, 285, 0, 0, 464, 367, 568, 444, 580, 606, - 607, 261, 400, 592, 505, 600, 624, 224, 258, 414, - 498, 586, 487, 392, 564, 565, 326, 486, 293, 201, - 364, 612, 222, 473, 366, 240, 229, 570, 589, 287, - 450, 619, 211, 500, 578, 237, 477, 0, 0, 627, - 245, 497, 213, 575, 496, 388, 323, 324, 212, 0, - 451, 266, 291, 0, 0, 256, 409, 940, 941, 254, - 628, 785, 599, 218, 0, 598, 402, 567, 576, 389, - 378, 217, 574, 387, 377, 331, 793, 794, 278, 304, - 870, 869, 868, 303, 305, 866, 867, 865, 205, 587, - 0, 206, 0, 492, 588, 629, 446, 210, 232, 233, - 235, 0, 277, 281, 289, 292, 300, 301, 310, 362, - 413, 440, 436, 445, 0, 562, 581, 593, 604, 610, - 611, 613, 614, 615, 616, 617, 620, 618, 401, 308, - 488, 330, 368, 0, 0, 419, 466, 238, 585, 489, - 876, 898, 887, 754, 755, 877, 878, 902, 879, 757, - 758, 899, 900, 751, 752, 756, 901, 903, 630, 631, - 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, - 642, 643, 644, 645, 646, 647, 625, 890, 741, 740, - 0, 747, 0, 775, 776, 778, 782, 783, 784, 795, - 842, 843, 851, 853, 854, 852, 855, 856, 857, 860, - 861, 862, 863, 858, 859, 864, 759, 763, 760, 761, - 762, 774, 764, 765, 766, 767, 768, 769, 770, 771, - 772, 773, 913, 914, 915, 916, 917, 918, 788, 792, - 791, 789, 790, 786, 787, 814, 813, 815, 816, 817, - 818, 819, 820, 822, 821, 823, 824, 825, 826, 827, - 828, 796, 797, 800, 801, 799, 798, 802, 811, 812, - 803, 804, 805, 806, 807, 808, 810, 809, 829, 830, - 831, 832, 833, 835, 834, 838, 839, 837, 836, 841, - 840, 739, 196, 219, 363, 0, 448, 286, 626, 595, - 590, 204, 221, 904, 260, 905, 0, 0, 909, 0, - 0, 0, 911, 910, 0, 912, 0, 874, 873, 0, - 0, 906, 907, 0, 908, 0, 0, 198, 200, 207, - 220, 230, 234, 241, 259, 274, 276, 283, 296, 307, - 315, 316, 319, 325, 375, 381, 382, 383, 384, 403, - 404, 405, 408, 411, 412, 415, 417, 418, 421, 425, - 429, 430, 431, 433, 435, 437, 449, 454, 468, 469, - 470, 471, 472, 475, 476, 481, 482, 483, 484, 485, - 493, 494, 499, 569, 571, 584, 602, 608, 474, 919, - 920, 921, 922, 923, 924, 925, 926, 297, 579, 609, - 577, 621, 603, 432, 373, 0, 0, 376, 279, 302, - 317, 0, 594, 495, 225, 460, 288, 249, 944, 0, - 209, 244, 228, 257, 272, 275, 321, 386, 394, 423, - 428, 294, 269, 242, 453, 239, 478, 502, 503, 504, - 506, 390, 264, 427, 391, 0, 371, 559, 560, 313, - 511, 0, 750, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 410, 0, 0, 0, 0, 738, 0, 0, - 0, 268, 743, 0, 0, 0, 361, 265, 0, 0, - 424, 0, 203, 0, 480, 250, 372, 369, 566, 280, - 271, 267, 248, 314, 380, 422, 501, 416, 749, 365, - 0, 0, 490, 395, 0, 0, 0, 0, 0, 745, - 746, 0, 0, 0, 0, 0, 0, 0, 0, 320, - 246, 322, 202, 407, 491, 284, 0, 94, 0, 0, - 945, 929, 722, 895, 933, 946, 947, 948, 949, 934, - 0, 236, 935, 936, 243, 937, 0, 894, 779, 781, - 780, 844, 845, 846, 847, 848, 849, 850, 777, 942, - 950, 951, 0, 263, 318, 270, 262, 563, 0, 0, - 0, 0, 0, 0, 0, 227, 0, 0, 0, 0, - 0, 0, 0, 718, 735, 0, 748, 0, 0, 0, - 273, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 732, 733, 1033, - 0, 0, 0, 889, 0, 734, 0, 0, 742, 952, - 953, 954, 955, 956, 957, 958, 959, 960, 961, 962, - 963, 964, 965, 966, 967, 968, 969, 970, 971, 972, - 973, 974, 975, 976, 977, 978, 979, 980, 981, 982, - 983, 984, 985, 986, 987, 988, 989, 990, 991, 992, - 993, 744, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 295, 0, 396, 255, 0, 447, 888, 0, - 0, 605, 0, 0, 886, 0, 0, 0, 0, 360, - 0, 327, 197, 223, 0, 0, 406, 455, 467, 0, - 0, 0, 939, 0, 465, 420, 583, 231, 282, 452, - 426, 463, 434, 285, 0, 0, 464, 367, 568, 444, - 580, 606, 607, 261, 400, 592, 505, 600, 624, 224, - 258, 414, 498, 586, 487, 392, 564, 565, 326, 486, - 293, 201, 364, 612, 222, 473, 366, 240, 229, 570, - 589, 287, 450, 619, 211, 500, 578, 237, 477, 0, - 0, 627, 245, 497, 213, 575, 496, 388, 323, 324, - 212, 0, 451, 266, 291, 0, 0, 256, 409, 940, - 941, 254, 628, 785, 599, 218, 0, 598, 402, 567, - 576, 389, 378, 217, 574, 387, 377, 331, 793, 794, - 278, 304, 870, 869, 868, 303, 305, 866, 867, 865, - 205, 587, 0, 206, 0, 492, 588, 629, 446, 210, - 232, 233, 235, 0, 277, 281, 289, 292, 300, 301, - 310, 362, 413, 440, 436, 445, 0, 562, 581, 593, - 604, 610, 611, 613, 614, 615, 616, 617, 620, 618, - 401, 308, 488, 330, 368, 0, 0, 419, 466, 238, - 585, 489, 876, 898, 887, 754, 755, 877, 878, 902, - 879, 757, 758, 899, 900, 751, 752, 756, 901, 903, - 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, - 640, 641, 642, 643, 644, 645, 646, 647, 625, 890, - 741, 740, 0, 747, 0, 775, 776, 778, 782, 783, - 784, 795, 842, 843, 851, 853, 854, 852, 855, 856, - 857, 860, 861, 862, 863, 858, 859, 864, 759, 763, - 760, 761, 762, 774, 764, 765, 766, 767, 768, 769, - 770, 771, 772, 773, 913, 914, 915, 916, 917, 918, - 788, 792, 791, 789, 790, 786, 787, 814, 813, 815, - 816, 817, 818, 819, 820, 822, 821, 823, 824, 825, - 826, 827, 828, 796, 797, 800, 801, 799, 798, 802, - 811, 812, 803, 804, 805, 806, 807, 808, 810, 809, - 829, 830, 831, 832, 833, 835, 834, 838, 839, 837, - 836, 841, 840, 739, 196, 219, 363, 0, 448, 286, - 626, 595, 590, 204, 221, 904, 260, 905, 0, 0, - 909, 0, 0, 0, 911, 910, 0, 912, 0, 874, - 873, 0, 0, 906, 907, 0, 908, 0, 0, 198, - 200, 207, 220, 230, 234, 241, 259, 274, 276, 283, - 296, 307, 315, 316, 319, 325, 375, 381, 382, 383, - 384, 403, 404, 405, 408, 411, 412, 415, 417, 418, - 421, 425, 429, 430, 431, 433, 435, 437, 449, 454, - 468, 469, 470, 471, 472, 475, 476, 481, 482, 483, - 484, 485, 493, 494, 499, 569, 571, 584, 602, 608, - 474, 919, 920, 921, 922, 923, 924, 925, 926, 297, - 579, 609, 577, 621, 603, 432, 373, 0, 0, 376, - 279, 302, 317, 0, 594, 495, 225, 460, 288, 249, - 944, 0, 209, 244, 228, 257, 272, 275, 321, 386, - 394, 423, 428, 294, 269, 242, 453, 239, 478, 502, - 503, 504, 506, 390, 264, 427, 391, 0, 371, 559, - 560, 313, 511, 0, 750, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 410, 0, 0, 0, 0, 738, - 0, 0, 0, 268, 743, 0, 0, 0, 361, 265, - 0, 0, 424, 0, 203, 0, 480, 250, 372, 369, - 566, 280, 271, 267, 248, 314, 380, 422, 501, 416, - 749, 365, 0, 0, 490, 395, 0, 0, 0, 0, - 0, 745, 746, 0, 0, 0, 0, 0, 0, 0, - 0, 320, 246, 322, 202, 407, 491, 284, 0, 94, - 0, 0, 945, 929, 722, 895, 933, 946, 947, 948, - 949, 934, 0, 236, 935, 936, 243, 937, 0, 894, - 779, 781, 780, 844, 845, 846, 847, 848, 849, 850, - 777, 942, 950, 951, 0, 263, 318, 270, 262, 563, - 0, 0, 0, 0, 0, 0, 0, 227, 0, 0, - 0, 0, 0, 0, 0, 718, 735, 0, 748, 0, - 0, 0, 273, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 732, - 733, 0, 0, 0, 0, 889, 0, 734, 0, 0, - 742, 952, 953, 954, 955, 956, 957, 958, 959, 960, - 961, 962, 963, 964, 965, 966, 967, 968, 969, 970, + 987, 988, 989, 990, 991, 992, 993, 994, 995, 996, + 997, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, 1006, + 1007, 757, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 298, 0, 399, 258, 0, 450, 902, 0, + 0, 618, 0, 0, 900, 0, 0, 0, 0, 363, + 0, 330, 199, 226, 0, 0, 409, 458, 470, 0, + 0, 0, 953, 0, 468, 423, 596, 234, 285, 455, + 429, 466, 437, 288, 0, 0, 467, 370, 579, 447, + 593, 619, 620, 264, 403, 605, 516, 613, 637, 227, + 261, 417, 501, 599, 490, 395, 575, 576, 329, 489, + 296, 203, 367, 625, 225, 476, 369, 243, 232, 581, + 602, 290, 453, 632, 214, 511, 591, 240, 480, 0, + 0, 640, 248, 500, 216, 588, 499, 391, 326, 327, + 215, 0, 454, 269, 294, 0, 0, 259, 412, 954, + 955, 257, 641, 799, 612, 221, 0, 611, 405, 578, + 589, 392, 381, 220, 587, 390, 380, 334, 807, 808, + 281, 307, 884, 883, 882, 306, 308, 880, 881, 879, + 208, 600, 0, 209, 0, 495, 601, 642, 449, 213, + 235, 236, 238, 0, 280, 284, 292, 295, 303, 304, + 313, 365, 416, 443, 439, 448, 0, 573, 594, 606, + 617, 623, 624, 626, 627, 628, 629, 630, 633, 631, + 404, 311, 491, 333, 371, 0, 0, 422, 469, 241, + 598, 492, 890, 912, 901, 767, 768, 891, 892, 916, + 893, 770, 771, 913, 914, 764, 765, 769, 915, 917, + 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, + 653, 654, 655, 656, 657, 658, 659, 660, 638, 502, + 508, 503, 504, 505, 506, 507, 0, 509, 904, 754, + 753, 0, 760, 0, 789, 790, 792, 796, 797, 798, + 809, 856, 857, 865, 867, 868, 866, 869, 870, 871, + 874, 875, 876, 877, 872, 873, 878, 772, 776, 773, + 774, 775, 787, 777, 778, 779, 780, 781, 782, 783, + 784, 785, 786, 788, 927, 928, 929, 930, 931, 932, + 802, 806, 805, 803, 804, 800, 801, 828, 827, 829, + 830, 831, 832, 833, 834, 836, 835, 837, 838, 839, + 840, 841, 842, 810, 811, 814, 815, 813, 812, 816, + 825, 826, 817, 818, 819, 820, 821, 822, 824, 823, + 843, 844, 845, 846, 847, 849, 848, 852, 853, 851, + 850, 855, 854, 752, 198, 222, 366, 95, 451, 289, + 639, 608, 603, 207, 224, 918, 263, 919, 0, 0, + 923, 0, 0, 0, 925, 924, 0, 926, 0, 888, + 887, 0, 0, 920, 921, 0, 922, 0, 0, 200, + 202, 210, 223, 233, 237, 244, 262, 277, 279, 286, + 299, 310, 318, 319, 322, 328, 378, 384, 385, 386, + 387, 406, 407, 408, 411, 414, 415, 418, 420, 421, + 424, 428, 432, 433, 434, 436, 438, 440, 452, 457, + 471, 472, 473, 474, 475, 478, 479, 484, 485, 486, + 487, 488, 496, 497, 510, 580, 582, 597, 615, 621, + 477, 933, 934, 935, 936, 937, 938, 939, 940, 300, + 592, 622, 590, 634, 616, 435, 376, 0, 0, 379, + 282, 305, 320, 0, 607, 498, 228, 463, 291, 252, + 958, 0, 212, 247, 231, 260, 275, 278, 324, 389, + 397, 426, 431, 297, 272, 245, 456, 242, 481, 513, + 514, 515, 517, 393, 267, 430, 394, 0, 374, 570, + 571, 316, 522, 0, 763, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 413, 0, 0, 0, 0, 751, + 0, 0, 0, 271, 756, 0, 0, 0, 364, 268, + 0, 0, 427, 0, 205, 0, 483, 253, 375, 372, + 577, 283, 274, 270, 251, 317, 383, 425, 512, 419, + 762, 368, 0, 0, 493, 398, 0, 0, 0, 0, + 0, 758, 759, 0, 0, 0, 0, 0, 0, 0, + 0, 323, 249, 325, 204, 410, 494, 287, 0, 96, + 0, 0, 959, 943, 735, 909, 947, 960, 961, 962, + 963, 948, 0, 239, 949, 950, 246, 951, 0, 908, + 793, 795, 794, 858, 859, 860, 861, 862, 863, 864, + 791, 956, 964, 965, 0, 266, 321, 273, 265, 574, + 0, 0, 0, 0, 0, 0, 0, 0, 230, 0, + 0, 0, 0, 0, 0, 0, 731, 748, 0, 761, + 0, 0, 0, 276, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 745, 746, 0, 0, 0, 0, 903, 0, 747, 0, + 0, 755, 966, 967, 968, 969, 970, 971, 972, 973, + 974, 975, 976, 977, 978, 979, 980, 981, 982, 983, + 984, 985, 986, 987, 988, 989, 990, 991, 992, 993, + 994, 995, 996, 997, 998, 999, 1000, 1001, 1002, 1003, + 1004, 1005, 1006, 1007, 757, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 298, 0, 399, 258, 0, + 450, 902, 0, 0, 618, 0, 0, 900, 0, 0, + 0, 0, 363, 0, 330, 199, 226, 0, 0, 409, + 458, 470, 0, 0, 0, 953, 0, 468, 423, 596, + 234, 285, 455, 429, 466, 437, 288, 4010, 0, 467, + 370, 579, 447, 593, 619, 620, 264, 403, 605, 516, + 613, 637, 227, 261, 417, 501, 599, 490, 395, 575, + 576, 329, 489, 296, 203, 367, 625, 225, 476, 369, + 243, 232, 581, 602, 290, 453, 632, 214, 511, 591, + 240, 480, 0, 0, 640, 248, 500, 216, 588, 499, + 391, 326, 327, 215, 0, 454, 269, 294, 0, 0, + 259, 412, 954, 955, 257, 641, 799, 612, 221, 0, + 611, 405, 578, 589, 392, 381, 220, 587, 390, 380, + 334, 807, 808, 281, 307, 884, 883, 882, 306, 308, + 880, 881, 879, 208, 600, 0, 209, 0, 495, 601, + 642, 449, 213, 235, 236, 238, 0, 280, 284, 292, + 295, 303, 304, 313, 365, 416, 443, 439, 448, 0, + 573, 594, 606, 617, 623, 624, 626, 627, 628, 629, + 630, 633, 631, 404, 311, 491, 333, 371, 0, 0, + 422, 469, 241, 598, 492, 890, 912, 901, 767, 768, + 891, 892, 916, 893, 770, 771, 913, 914, 764, 765, + 769, 915, 917, 643, 644, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, + 660, 638, 502, 508, 503, 504, 505, 506, 507, 0, + 509, 904, 754, 753, 0, 760, 0, 789, 790, 792, + 796, 797, 798, 809, 856, 857, 865, 867, 868, 866, + 869, 870, 871, 874, 875, 876, 877, 872, 873, 878, + 772, 776, 773, 774, 775, 787, 777, 778, 779, 780, + 781, 782, 783, 784, 785, 786, 788, 927, 928, 929, + 930, 931, 932, 802, 806, 805, 803, 804, 800, 801, + 828, 827, 829, 830, 831, 832, 833, 834, 836, 835, + 837, 838, 839, 840, 841, 842, 810, 811, 814, 815, + 813, 812, 816, 825, 826, 817, 818, 819, 820, 821, + 822, 824, 823, 843, 844, 845, 846, 847, 849, 848, + 852, 853, 851, 850, 855, 854, 752, 198, 222, 366, + 0, 451, 289, 639, 608, 603, 207, 224, 918, 263, + 919, 0, 0, 923, 0, 0, 0, 925, 924, 0, + 926, 0, 888, 887, 0, 0, 920, 921, 0, 922, + 0, 0, 200, 202, 210, 223, 233, 237, 244, 262, + 277, 279, 286, 299, 310, 318, 319, 322, 328, 378, + 384, 385, 386, 387, 406, 407, 408, 411, 414, 415, + 418, 420, 421, 424, 428, 432, 433, 434, 436, 438, + 440, 452, 457, 471, 472, 473, 474, 475, 478, 479, + 484, 485, 486, 487, 488, 496, 497, 510, 580, 582, + 597, 615, 621, 477, 933, 934, 935, 936, 937, 938, + 939, 940, 300, 592, 622, 590, 634, 616, 435, 376, + 0, 0, 379, 282, 305, 320, 0, 607, 498, 228, + 463, 291, 252, 958, 0, 212, 247, 231, 260, 275, + 278, 324, 389, 397, 426, 431, 297, 272, 245, 456, + 242, 481, 513, 514, 515, 517, 393, 267, 430, 394, + 0, 374, 570, 571, 316, 522, 0, 763, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 413, 0, 0, + 0, 0, 751, 0, 0, 0, 271, 756, 0, 0, + 0, 364, 268, 0, 0, 427, 0, 205, 0, 483, + 253, 375, 372, 577, 283, 274, 270, 251, 317, 383, + 425, 512, 419, 762, 368, 0, 0, 493, 398, 0, + 0, 0, 0, 0, 758, 759, 0, 0, 0, 0, + 0, 0, 0, 0, 323, 249, 325, 204, 410, 494, + 287, 0, 96, 0, 1719, 959, 943, 735, 909, 947, + 960, 961, 962, 963, 948, 0, 239, 949, 950, 246, + 951, 0, 908, 793, 795, 794, 858, 859, 860, 861, + 862, 863, 864, 791, 956, 964, 965, 0, 266, 321, + 273, 265, 574, 0, 0, 0, 0, 0, 0, 0, + 0, 230, 0, 0, 0, 0, 0, 0, 0, 731, + 748, 0, 761, 0, 0, 0, 276, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 745, 746, 0, 0, 0, 0, 903, + 0, 747, 0, 0, 755, 966, 967, 968, 969, 970, 971, 972, 973, 974, 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, 990, - 991, 992, 993, 744, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 295, 0, 396, 255, 0, 447, - 888, 0, 0, 605, 0, 0, 886, 0, 0, 0, - 0, 360, 0, 327, 197, 223, 0, 0, 406, 455, - 467, 0, 0, 0, 939, 0, 465, 420, 583, 231, - 282, 452, 426, 463, 434, 285, 0, 0, 464, 367, - 568, 444, 580, 606, 607, 261, 400, 592, 505, 600, - 624, 224, 258, 414, 498, 586, 487, 392, 564, 565, - 326, 486, 293, 201, 364, 612, 222, 473, 366, 240, - 229, 570, 589, 287, 450, 619, 211, 500, 578, 237, - 477, 0, 0, 627, 245, 497, 213, 575, 496, 388, - 323, 324, 212, 0, 451, 266, 291, 0, 0, 256, - 409, 940, 941, 254, 628, 785, 599, 218, 0, 598, - 402, 567, 576, 389, 378, 217, 574, 387, 377, 331, - 793, 794, 278, 304, 870, 869, 868, 303, 305, 866, - 867, 865, 205, 587, 0, 206, 0, 492, 588, 629, - 446, 210, 232, 233, 235, 0, 277, 281, 289, 292, - 300, 301, 310, 362, 413, 440, 436, 445, 0, 562, - 581, 593, 604, 610, 611, 613, 614, 615, 616, 617, - 620, 618, 401, 308, 488, 330, 368, 0, 0, 419, - 466, 238, 585, 489, 876, 898, 887, 754, 755, 877, - 878, 902, 879, 757, 758, 899, 900, 751, 752, 756, - 901, 903, 630, 631, 632, 633, 634, 635, 636, 637, - 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, - 625, 890, 741, 740, 0, 747, 0, 775, 776, 778, - 782, 783, 784, 795, 842, 843, 851, 853, 854, 852, - 855, 856, 857, 860, 861, 862, 863, 858, 859, 864, - 759, 763, 760, 761, 762, 774, 764, 765, 766, 767, - 768, 769, 770, 771, 772, 773, 913, 914, 915, 916, - 917, 918, 788, 792, 791, 789, 790, 786, 787, 814, - 813, 815, 816, 817, 818, 819, 820, 822, 821, 823, - 824, 825, 826, 827, 828, 796, 797, 800, 801, 799, - 798, 802, 811, 812, 803, 804, 805, 806, 807, 808, - 810, 809, 829, 830, 831, 832, 833, 835, 834, 838, - 839, 837, 836, 841, 840, 739, 196, 219, 363, 0, - 448, 286, 626, 595, 590, 204, 221, 904, 260, 905, - 0, 0, 909, 0, 0, 0, 911, 910, 0, 912, - 0, 874, 873, 0, 0, 906, 907, 0, 908, 0, - 0, 198, 200, 207, 220, 230, 234, 241, 259, 274, - 276, 283, 296, 307, 315, 316, 319, 325, 375, 381, - 382, 383, 384, 403, 404, 405, 408, 411, 412, 415, - 417, 418, 421, 425, 429, 430, 431, 433, 435, 437, - 449, 454, 468, 469, 470, 471, 472, 475, 476, 481, - 482, 483, 484, 485, 493, 494, 499, 569, 571, 584, - 602, 608, 474, 919, 920, 921, 922, 923, 924, 925, - 926, 297, 579, 609, 577, 621, 603, 432, 373, 0, - 0, 376, 279, 302, 317, 0, 594, 495, 225, 460, - 288, 249, 944, 0, 209, 244, 228, 257, 272, 275, - 321, 386, 394, 423, 428, 294, 269, 242, 453, 239, - 478, 502, 503, 504, 506, 390, 264, 427, 391, 0, - 371, 559, 560, 313, 511, 0, 750, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 410, 0, 0, 0, - 0, 738, 0, 0, 0, 268, 743, 0, 0, 0, - 361, 265, 0, 0, 424, 0, 203, 0, 480, 250, - 372, 369, 566, 280, 271, 267, 248, 314, 380, 422, - 501, 416, 749, 365, 0, 0, 490, 395, 0, 0, - 0, 0, 0, 745, 746, 0, 0, 0, 0, 0, - 0, 0, 0, 320, 246, 322, 202, 407, 491, 284, - 0, 94, 0, 0, 945, 929, 722, 895, 933, 946, - 947, 948, 949, 934, 0, 236, 935, 936, 243, 937, - 0, 894, 779, 781, 780, 844, 845, 846, 847, 848, - 849, 850, 777, 942, 950, 951, 0, 263, 318, 270, - 262, 563, 0, 0, 0, 0, 0, 0, 0, 227, - 0, 0, 0, 0, 0, 0, 0, 718, 735, 0, - 748, 0, 0, 0, 273, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 732, 733, 0, 0, 0, 0, 889, 0, 734, - 0, 0, 742, 952, 953, 954, 955, 956, 957, 958, - 959, 960, 961, 962, 963, 964, 965, 966, 967, 968, + 991, 992, 993, 994, 995, 996, 997, 998, 999, 1000, + 1001, 1002, 1003, 1004, 1005, 1006, 1007, 757, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 298, 0, + 399, 258, 0, 450, 902, 0, 0, 618, 0, 0, + 900, 0, 0, 0, 0, 363, 0, 330, 199, 226, + 0, 0, 409, 458, 470, 0, 0, 0, 953, 0, + 468, 423, 596, 234, 285, 455, 429, 466, 437, 288, + 0, 0, 467, 370, 579, 447, 593, 619, 620, 264, + 403, 605, 516, 613, 637, 227, 261, 417, 501, 599, + 490, 395, 575, 576, 329, 489, 296, 203, 367, 625, + 225, 476, 369, 243, 232, 581, 602, 290, 453, 632, + 214, 511, 591, 240, 480, 0, 0, 640, 248, 500, + 216, 588, 499, 391, 326, 327, 215, 0, 454, 269, + 294, 0, 0, 259, 412, 954, 955, 257, 641, 799, + 612, 221, 0, 611, 405, 578, 589, 392, 381, 220, + 587, 390, 380, 334, 807, 808, 281, 307, 884, 883, + 882, 306, 308, 880, 881, 879, 208, 600, 0, 209, + 0, 495, 601, 642, 449, 213, 235, 236, 238, 0, + 280, 284, 292, 295, 303, 304, 313, 365, 416, 443, + 439, 448, 0, 573, 594, 606, 617, 623, 624, 626, + 627, 628, 629, 630, 633, 631, 404, 311, 491, 333, + 371, 0, 0, 422, 469, 241, 598, 492, 890, 912, + 901, 767, 768, 891, 892, 916, 893, 770, 771, 913, + 914, 764, 765, 769, 915, 917, 643, 644, 645, 646, + 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, + 657, 658, 659, 660, 638, 502, 508, 503, 504, 505, + 506, 507, 0, 509, 904, 754, 753, 0, 760, 0, + 789, 790, 792, 796, 797, 798, 809, 856, 857, 865, + 867, 868, 866, 869, 870, 871, 874, 875, 876, 877, + 872, 873, 878, 772, 776, 773, 774, 775, 787, 777, + 778, 779, 780, 781, 782, 783, 784, 785, 786, 788, + 927, 928, 929, 930, 931, 932, 802, 806, 805, 803, + 804, 800, 801, 828, 827, 829, 830, 831, 832, 833, + 834, 836, 835, 837, 838, 839, 840, 841, 842, 810, + 811, 814, 815, 813, 812, 816, 825, 826, 817, 818, + 819, 820, 821, 822, 824, 823, 843, 844, 845, 846, + 847, 849, 848, 852, 853, 851, 850, 855, 854, 752, + 198, 222, 366, 0, 451, 289, 639, 608, 603, 207, + 224, 918, 263, 919, 0, 0, 923, 0, 0, 0, + 925, 924, 0, 926, 0, 888, 887, 0, 0, 920, + 921, 0, 922, 0, 0, 200, 202, 210, 223, 233, + 237, 244, 262, 277, 279, 286, 299, 310, 318, 319, + 322, 328, 378, 384, 385, 386, 387, 406, 407, 408, + 411, 414, 415, 418, 420, 421, 424, 428, 432, 433, + 434, 436, 438, 440, 452, 457, 471, 472, 473, 474, + 475, 478, 479, 484, 485, 486, 487, 488, 496, 497, + 510, 580, 582, 597, 615, 621, 477, 933, 934, 935, + 936, 937, 938, 939, 940, 300, 592, 622, 590, 634, + 616, 435, 376, 0, 0, 379, 282, 305, 320, 0, + 607, 498, 228, 463, 291, 252, 958, 0, 212, 247, + 231, 260, 275, 278, 324, 389, 397, 426, 431, 297, + 272, 245, 456, 242, 481, 513, 514, 515, 517, 393, + 267, 430, 394, 0, 374, 570, 571, 316, 522, 0, + 763, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 413, 0, 0, 0, 0, 751, 0, 0, 0, 271, + 756, 0, 0, 0, 364, 268, 0, 0, 427, 0, + 205, 0, 483, 253, 375, 372, 577, 283, 274, 270, + 251, 317, 383, 425, 512, 419, 762, 368, 0, 0, + 493, 398, 0, 0, 0, 0, 0, 758, 759, 0, + 0, 0, 0, 0, 0, 0, 0, 323, 249, 325, + 204, 410, 494, 287, 0, 96, 0, 0, 959, 943, + 735, 909, 947, 960, 961, 962, 963, 948, 0, 239, + 949, 950, 246, 951, 0, 908, 793, 795, 794, 858, + 859, 860, 861, 862, 863, 864, 791, 956, 964, 965, + 0, 266, 321, 273, 265, 574, 0, 0, 0, 0, + 0, 0, 0, 0, 230, 0, 0, 0, 0, 0, + 0, 0, 731, 748, 0, 761, 0, 0, 0, 276, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 745, 746, 1050, 0, + 0, 0, 903, 0, 747, 0, 0, 755, 966, 967, + 968, 969, 970, 971, 972, 973, 974, 975, 976, 977, + 978, 979, 980, 981, 982, 983, 984, 985, 986, 987, + 988, 989, 990, 991, 992, 993, 994, 995, 996, 997, + 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, + 757, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 298, 0, 399, 258, 0, 450, 902, 0, 0, + 618, 0, 0, 900, 0, 0, 0, 0, 363, 0, + 330, 199, 226, 0, 0, 409, 458, 470, 0, 0, + 0, 953, 0, 468, 423, 596, 234, 285, 455, 429, + 466, 437, 288, 0, 0, 467, 370, 579, 447, 593, + 619, 620, 264, 403, 605, 516, 613, 637, 227, 261, + 417, 501, 599, 490, 395, 575, 576, 329, 489, 296, + 203, 367, 625, 225, 476, 369, 243, 232, 581, 602, + 290, 453, 632, 214, 511, 591, 240, 480, 0, 0, + 640, 248, 500, 216, 588, 499, 391, 326, 327, 215, + 0, 454, 269, 294, 0, 0, 259, 412, 954, 955, + 257, 641, 799, 612, 221, 0, 611, 405, 578, 589, + 392, 381, 220, 587, 390, 380, 334, 807, 808, 281, + 307, 884, 883, 882, 306, 308, 880, 881, 879, 208, + 600, 0, 209, 0, 495, 601, 642, 449, 213, 235, + 236, 238, 0, 280, 284, 292, 295, 303, 304, 313, + 365, 416, 443, 439, 448, 0, 573, 594, 606, 617, + 623, 624, 626, 627, 628, 629, 630, 633, 631, 404, + 311, 491, 333, 371, 0, 0, 422, 469, 241, 598, + 492, 890, 912, 901, 767, 768, 891, 892, 916, 893, + 770, 771, 913, 914, 764, 765, 769, 915, 917, 643, + 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, + 654, 655, 656, 657, 658, 659, 660, 638, 502, 508, + 503, 504, 505, 506, 507, 0, 509, 904, 754, 753, + 0, 760, 0, 789, 790, 792, 796, 797, 798, 809, + 856, 857, 865, 867, 868, 866, 869, 870, 871, 874, + 875, 876, 877, 872, 873, 878, 772, 776, 773, 774, + 775, 787, 777, 778, 779, 780, 781, 782, 783, 784, + 785, 786, 788, 927, 928, 929, 930, 931, 932, 802, + 806, 805, 803, 804, 800, 801, 828, 827, 829, 830, + 831, 832, 833, 834, 836, 835, 837, 838, 839, 840, + 841, 842, 810, 811, 814, 815, 813, 812, 816, 825, + 826, 817, 818, 819, 820, 821, 822, 824, 823, 843, + 844, 845, 846, 847, 849, 848, 852, 853, 851, 850, + 855, 854, 752, 198, 222, 366, 0, 451, 289, 639, + 608, 603, 207, 224, 918, 263, 919, 0, 0, 923, + 0, 0, 0, 925, 924, 0, 926, 0, 888, 887, + 0, 0, 920, 921, 0, 922, 0, 0, 200, 202, + 210, 223, 233, 237, 244, 262, 277, 279, 286, 299, + 310, 318, 319, 322, 328, 378, 384, 385, 386, 387, + 406, 407, 408, 411, 414, 415, 418, 420, 421, 424, + 428, 432, 433, 434, 436, 438, 440, 452, 457, 471, + 472, 473, 474, 475, 478, 479, 484, 485, 486, 487, + 488, 496, 497, 510, 580, 582, 597, 615, 621, 477, + 933, 934, 935, 936, 937, 938, 939, 940, 300, 592, + 622, 590, 634, 616, 435, 376, 0, 0, 379, 282, + 305, 320, 0, 607, 498, 228, 463, 291, 252, 958, + 0, 212, 247, 231, 260, 275, 278, 324, 389, 397, + 426, 431, 297, 272, 245, 456, 242, 481, 513, 514, + 515, 517, 393, 267, 430, 394, 0, 374, 570, 571, + 316, 522, 0, 763, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 413, 0, 0, 0, 0, 751, 0, + 0, 0, 271, 756, 0, 0, 0, 364, 268, 0, + 0, 427, 0, 205, 0, 483, 253, 375, 372, 577, + 283, 274, 270, 251, 317, 383, 425, 512, 419, 762, + 368, 0, 0, 493, 398, 0, 0, 0, 0, 0, + 758, 759, 0, 0, 0, 0, 0, 0, 0, 0, + 323, 249, 325, 204, 410, 494, 287, 0, 96, 0, + 0, 959, 943, 735, 909, 947, 960, 961, 962, 963, + 948, 0, 239, 949, 950, 246, 951, 0, 908, 793, + 795, 794, 858, 859, 860, 861, 862, 863, 864, 791, + 956, 964, 965, 0, 266, 321, 273, 265, 574, 0, + 0, 0, 0, 0, 0, 0, 0, 230, 0, 0, + 0, 0, 0, 0, 0, 731, 748, 0, 761, 0, + 0, 0, 276, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 745, + 746, 0, 0, 0, 0, 903, 0, 747, 0, 0, + 755, 966, 967, 968, 969, 970, 971, 972, 973, 974, + 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, + 985, 986, 987, 988, 989, 990, 991, 992, 993, 994, + 995, 996, 997, 998, 999, 1000, 1001, 1002, 1003, 1004, + 1005, 1006, 1007, 757, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 298, 0, 399, 258, 0, 450, + 902, 0, 0, 618, 0, 0, 900, 0, 0, 0, + 0, 363, 0, 330, 199, 226, 0, 0, 409, 458, + 470, 0, 0, 0, 953, 0, 468, 423, 596, 234, + 285, 455, 429, 466, 437, 288, 0, 0, 467, 370, + 579, 447, 593, 619, 620, 264, 403, 605, 516, 613, + 637, 227, 261, 417, 501, 599, 490, 395, 575, 576, + 329, 489, 296, 203, 367, 625, 225, 476, 369, 243, + 232, 581, 602, 290, 453, 632, 214, 511, 591, 240, + 480, 0, 0, 640, 248, 500, 216, 588, 499, 391, + 326, 327, 215, 0, 454, 269, 294, 0, 0, 259, + 412, 954, 955, 257, 641, 799, 612, 221, 0, 611, + 405, 578, 589, 392, 381, 220, 587, 390, 380, 334, + 807, 808, 281, 307, 884, 883, 882, 306, 308, 880, + 881, 879, 208, 600, 0, 209, 0, 495, 601, 642, + 449, 213, 235, 236, 238, 0, 280, 284, 292, 295, + 303, 304, 313, 365, 416, 443, 439, 448, 0, 573, + 594, 606, 617, 623, 624, 626, 627, 628, 629, 630, + 633, 631, 404, 311, 491, 333, 371, 0, 0, 422, + 469, 241, 598, 492, 890, 912, 901, 767, 768, 891, + 892, 916, 893, 770, 771, 913, 914, 764, 765, 769, + 915, 917, 643, 644, 645, 646, 647, 648, 649, 650, + 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, + 638, 502, 508, 503, 504, 505, 506, 507, 0, 509, + 904, 754, 753, 0, 760, 0, 789, 790, 792, 796, + 797, 798, 809, 856, 857, 865, 867, 868, 866, 869, + 870, 871, 874, 875, 876, 877, 872, 873, 878, 772, + 776, 773, 774, 775, 787, 777, 778, 779, 780, 781, + 782, 783, 784, 785, 786, 788, 927, 928, 929, 930, + 931, 932, 802, 806, 805, 803, 804, 800, 801, 828, + 827, 829, 830, 831, 832, 833, 834, 836, 835, 837, + 838, 839, 840, 841, 842, 810, 811, 814, 815, 813, + 812, 816, 825, 826, 817, 818, 819, 820, 821, 822, + 824, 823, 843, 844, 845, 846, 847, 849, 848, 852, + 853, 851, 850, 855, 854, 752, 198, 222, 366, 0, + 451, 289, 639, 608, 603, 207, 224, 918, 263, 919, + 0, 0, 923, 0, 0, 0, 925, 924, 0, 926, + 0, 888, 887, 0, 0, 920, 921, 0, 922, 0, + 0, 200, 202, 210, 223, 233, 237, 244, 262, 277, + 279, 286, 299, 310, 318, 319, 322, 328, 378, 384, + 385, 386, 387, 406, 407, 408, 411, 414, 415, 418, + 420, 421, 424, 428, 432, 433, 434, 436, 438, 440, + 452, 457, 471, 472, 473, 474, 475, 478, 479, 484, + 485, 486, 487, 488, 496, 497, 510, 580, 582, 597, + 615, 621, 477, 933, 934, 935, 936, 937, 938, 939, + 940, 300, 592, 622, 590, 634, 616, 435, 376, 0, + 0, 379, 282, 305, 320, 0, 607, 498, 228, 463, + 291, 252, 958, 0, 212, 247, 231, 260, 275, 278, + 324, 389, 397, 426, 431, 297, 272, 245, 456, 242, + 481, 513, 514, 515, 517, 393, 267, 430, 394, 0, + 374, 570, 571, 316, 522, 0, 763, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 413, 0, 0, 0, + 0, 751, 0, 0, 0, 271, 756, 0, 0, 0, + 364, 268, 0, 0, 427, 0, 205, 0, 483, 253, + 375, 372, 577, 283, 274, 270, 251, 317, 383, 425, + 512, 419, 762, 368, 0, 0, 493, 398, 0, 0, + 0, 0, 0, 758, 759, 0, 0, 0, 0, 0, + 0, 0, 0, 323, 249, 325, 204, 410, 494, 287, + 0, 96, 0, 0, 959, 943, 735, 909, 947, 960, + 961, 962, 963, 948, 0, 239, 949, 950, 246, 951, + 0, 908, 793, 795, 794, 858, 859, 860, 861, 862, + 863, 864, 791, 956, 964, 965, 0, 266, 321, 273, + 265, 574, 0, 0, 0, 0, 0, 0, 0, 0, + 230, 0, 0, 0, 0, 0, 0, 0, 731, 748, + 0, 761, 0, 0, 0, 276, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 745, 746, 0, 0, 0, 0, 903, 0, + 747, 0, 0, 755, 966, 967, 968, 969, 970, 971, + 972, 973, 974, 975, 976, 977, 978, 979, 980, 981, + 982, 983, 984, 985, 986, 987, 988, 989, 990, 991, + 992, 993, 994, 995, 996, 997, 998, 999, 1000, 1001, + 1002, 1003, 1004, 1005, 1006, 1007, 3098, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 298, 0, 399, + 258, 0, 450, 902, 0, 0, 618, 0, 0, 900, + 0, 0, 0, 0, 363, 0, 330, 199, 226, 0, + 0, 409, 458, 470, 0, 0, 0, 953, 0, 468, + 423, 596, 234, 285, 455, 429, 466, 437, 288, 0, + 0, 467, 370, 579, 447, 593, 619, 620, 264, 403, + 605, 516, 613, 637, 227, 261, 417, 501, 599, 490, + 395, 575, 576, 329, 489, 296, 203, 367, 625, 225, + 476, 369, 243, 232, 581, 602, 290, 453, 632, 214, + 511, 591, 240, 480, 0, 0, 640, 248, 500, 216, + 588, 499, 391, 326, 327, 215, 0, 454, 269, 294, + 0, 0, 259, 412, 954, 955, 257, 641, 799, 612, + 221, 0, 611, 405, 578, 589, 392, 381, 220, 587, + 390, 380, 334, 807, 808, 281, 307, 884, 883, 882, + 306, 308, 880, 881, 879, 208, 600, 0, 209, 0, + 495, 601, 642, 449, 213, 235, 236, 238, 0, 280, + 284, 292, 295, 303, 304, 313, 365, 416, 443, 439, + 448, 0, 573, 594, 606, 617, 623, 624, 626, 627, + 628, 629, 630, 633, 631, 404, 311, 491, 333, 371, + 0, 0, 422, 469, 241, 598, 492, 890, 912, 901, + 767, 768, 891, 892, 916, 893, 770, 771, 913, 914, + 764, 765, 769, 915, 917, 643, 644, 645, 646, 647, + 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, + 658, 659, 660, 638, 502, 508, 503, 504, 505, 506, + 507, 0, 509, 904, 754, 753, 0, 760, 0, 789, + 790, 792, 796, 797, 798, 809, 856, 857, 865, 867, + 868, 866, 869, 870, 871, 874, 875, 876, 877, 872, + 873, 878, 772, 776, 773, 774, 775, 787, 777, 778, + 779, 780, 781, 782, 783, 784, 785, 786, 788, 927, + 928, 929, 930, 931, 932, 802, 806, 805, 803, 804, + 800, 801, 828, 827, 829, 830, 831, 832, 833, 834, + 836, 835, 837, 838, 839, 840, 841, 842, 810, 811, + 814, 815, 813, 812, 816, 825, 826, 817, 818, 819, + 820, 821, 822, 824, 823, 843, 844, 845, 846, 847, + 849, 848, 852, 853, 851, 850, 855, 854, 752, 198, + 222, 366, 0, 451, 289, 639, 608, 603, 207, 224, + 918, 263, 919, 0, 0, 923, 0, 0, 0, 925, + 924, 0, 926, 0, 888, 887, 0, 0, 920, 921, + 0, 922, 0, 0, 200, 202, 210, 223, 233, 237, + 244, 262, 277, 279, 286, 299, 310, 318, 319, 322, + 328, 378, 384, 385, 386, 387, 406, 407, 408, 411, + 414, 415, 418, 420, 421, 424, 428, 432, 433, 434, + 436, 438, 440, 452, 457, 471, 472, 473, 474, 475, + 478, 479, 484, 485, 486, 487, 488, 496, 497, 510, + 580, 582, 597, 615, 621, 477, 933, 934, 935, 936, + 937, 938, 939, 940, 300, 592, 622, 590, 634, 616, + 435, 376, 0, 0, 379, 282, 305, 320, 0, 607, + 498, 228, 463, 291, 252, 958, 0, 212, 247, 231, + 260, 275, 278, 324, 389, 397, 426, 431, 297, 272, + 245, 456, 242, 481, 513, 514, 515, 517, 393, 267, + 430, 394, 0, 374, 570, 571, 316, 522, 0, 763, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 413, + 0, 0, 0, 0, 751, 0, 0, 0, 271, 756, + 0, 0, 0, 364, 268, 0, 0, 427, 0, 205, + 0, 483, 253, 375, 372, 577, 283, 274, 270, 251, + 317, 383, 425, 512, 419, 762, 368, 0, 0, 493, + 398, 0, 0, 0, 0, 0, 758, 759, 0, 0, + 0, 0, 0, 0, 0, 0, 323, 249, 325, 204, + 410, 494, 287, 0, 96, 0, 0, 959, 943, 735, + 909, 947, 960, 961, 962, 963, 948, 0, 239, 949, + 950, 246, 951, 0, 908, 793, 795, 794, 858, 859, + 860, 861, 862, 863, 864, 791, 956, 964, 965, 0, + 266, 321, 273, 265, 574, 0, 0, 0, 0, 0, + 0, 0, 0, 230, 0, 0, 0, 0, 0, 0, + 0, 731, 748, 0, 761, 0, 0, 0, 276, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 745, 746, 0, 0, 0, + 0, 903, 0, 747, 0, 0, 755, 966, 967, 968, 969, 970, 971, 972, 973, 974, 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, - 989, 990, 991, 992, 993, 3057, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 295, 0, 396, 255, - 0, 447, 888, 0, 0, 605, 0, 0, 886, 0, - 0, 0, 0, 360, 0, 327, 197, 223, 0, 0, - 406, 455, 467, 0, 0, 0, 939, 0, 465, 420, - 583, 231, 282, 452, 426, 463, 434, 285, 0, 0, - 464, 367, 568, 444, 580, 606, 607, 261, 400, 592, - 505, 600, 624, 224, 258, 414, 498, 586, 487, 392, - 564, 565, 326, 486, 293, 201, 364, 612, 222, 473, - 366, 240, 229, 570, 589, 287, 450, 619, 211, 500, - 578, 237, 477, 0, 0, 627, 245, 497, 213, 575, - 496, 388, 323, 324, 212, 0, 451, 266, 291, 0, - 0, 256, 409, 940, 941, 254, 628, 785, 599, 218, - 0, 598, 402, 567, 576, 389, 378, 217, 574, 387, - 377, 331, 793, 794, 278, 304, 870, 869, 868, 303, - 305, 866, 867, 865, 205, 587, 0, 206, 0, 492, - 588, 629, 446, 210, 232, 233, 235, 0, 277, 281, - 289, 292, 300, 301, 310, 362, 413, 440, 436, 445, - 0, 562, 581, 593, 604, 610, 611, 613, 614, 615, - 616, 617, 620, 618, 401, 308, 488, 330, 368, 0, - 0, 419, 466, 238, 585, 489, 876, 898, 887, 754, - 755, 877, 878, 902, 879, 757, 758, 899, 900, 751, - 752, 756, 901, 903, 630, 631, 632, 633, 634, 635, - 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, - 646, 647, 625, 890, 741, 740, 0, 747, 0, 775, - 776, 778, 782, 783, 784, 795, 842, 843, 851, 853, - 854, 852, 855, 856, 857, 860, 861, 862, 863, 858, - 859, 864, 759, 763, 760, 761, 762, 774, 764, 765, - 766, 767, 768, 769, 770, 771, 772, 773, 913, 914, - 915, 916, 917, 918, 788, 792, 791, 789, 790, 786, - 787, 814, 813, 815, 816, 817, 818, 819, 820, 822, - 821, 823, 824, 825, 826, 827, 828, 796, 797, 800, - 801, 799, 798, 802, 811, 812, 803, 804, 805, 806, - 807, 808, 810, 809, 829, 830, 831, 832, 833, 835, - 834, 838, 839, 837, 836, 841, 840, 739, 196, 219, - 363, 0, 448, 286, 626, 595, 590, 204, 221, 904, - 260, 905, 0, 0, 909, 0, 0, 0, 911, 910, - 0, 912, 0, 874, 873, 0, 0, 906, 907, 0, - 908, 0, 0, 198, 200, 207, 220, 230, 234, 241, - 259, 274, 276, 283, 296, 307, 315, 316, 319, 325, - 375, 381, 382, 383, 384, 403, 404, 405, 408, 411, - 412, 415, 417, 418, 421, 425, 429, 430, 431, 433, - 435, 437, 449, 454, 468, 469, 470, 471, 472, 475, - 476, 481, 482, 483, 484, 485, 493, 494, 499, 569, - 571, 584, 602, 608, 474, 919, 920, 921, 922, 923, - 924, 925, 926, 297, 579, 609, 577, 621, 603, 432, - 373, 0, 0, 376, 279, 302, 317, 0, 594, 495, - 225, 460, 288, 249, 944, 0, 209, 244, 228, 257, - 272, 275, 321, 386, 394, 423, 428, 294, 269, 242, - 453, 239, 478, 502, 503, 504, 506, 390, 264, 427, - 391, 0, 371, 559, 560, 313, 511, 0, 750, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 410, 0, - 0, 0, 0, 738, 0, 0, 0, 268, 743, 0, - 0, 0, 361, 265, 0, 0, 424, 0, 203, 0, - 480, 250, 372, 369, 566, 280, 271, 267, 248, 314, - 380, 422, 501, 416, 749, 365, 0, 0, 490, 395, - 0, 0, 0, 0, 0, 745, 746, 0, 0, 0, - 0, 0, 0, 0, 0, 320, 246, 322, 202, 407, - 491, 284, 0, 94, 0, 0, 945, 929, 722, 895, - 933, 946, 947, 948, 949, 934, 0, 236, 935, 936, - 243, 937, 0, 894, 779, 781, 780, 844, 845, 846, - 847, 848, 849, 850, 777, 942, 950, 951, 0, 263, - 318, 270, 262, 563, 0, 0, 0, 0, 0, 0, - 0, 227, 0, 0, 0, 0, 0, 0, 0, 718, - 735, 0, 748, 0, 0, 0, 273, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 732, 733, 0, 0, 0, 0, 889, - 0, 734, 0, 0, 742, 952, 953, 954, 955, 956, - 957, 958, 959, 960, 961, 962, 963, 964, 965, 966, - 967, 968, 969, 970, 971, 972, 973, 974, 975, 976, - 977, 978, 979, 980, 981, 982, 983, 984, 985, 986, - 987, 988, 989, 990, 991, 992, 993, 3053, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 295, 0, - 396, 255, 0, 447, 888, 0, 0, 605, 0, 0, - 886, 0, 0, 0, 0, 360, 0, 327, 197, 223, - 0, 0, 406, 455, 467, 0, 0, 0, 939, 0, - 465, 420, 583, 231, 282, 452, 426, 463, 434, 285, - 0, 0, 464, 367, 568, 444, 580, 606, 607, 261, - 400, 592, 505, 600, 624, 224, 258, 414, 498, 586, - 487, 392, 564, 565, 326, 486, 293, 201, 364, 612, - 222, 473, 366, 240, 229, 570, 589, 287, 450, 619, - 211, 500, 578, 237, 477, 0, 0, 627, 245, 497, - 213, 575, 496, 388, 323, 324, 212, 0, 451, 266, - 291, 0, 0, 256, 409, 940, 941, 254, 628, 785, - 599, 218, 0, 598, 402, 567, 576, 389, 378, 217, - 574, 387, 377, 331, 793, 794, 278, 304, 870, 869, - 868, 303, 305, 866, 867, 865, 205, 587, 0, 206, - 0, 492, 588, 629, 446, 210, 232, 233, 235, 0, - 277, 281, 289, 292, 300, 301, 310, 362, 413, 440, - 436, 445, 0, 562, 581, 593, 604, 610, 611, 613, - 614, 615, 616, 617, 620, 618, 401, 308, 488, 330, - 368, 0, 0, 419, 466, 238, 585, 489, 876, 898, - 887, 754, 755, 877, 878, 902, 879, 757, 758, 899, - 900, 751, 752, 756, 901, 903, 630, 631, 632, 633, - 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, - 644, 645, 646, 647, 625, 890, 741, 740, 0, 747, - 0, 775, 776, 778, 782, 783, 784, 795, 842, 843, - 851, 853, 854, 852, 855, 856, 857, 860, 861, 862, - 863, 858, 859, 864, 759, 763, 760, 761, 762, 774, - 764, 765, 766, 767, 768, 769, 770, 771, 772, 773, - 913, 914, 915, 916, 917, 918, 788, 792, 791, 789, - 790, 786, 787, 814, 813, 815, 816, 817, 818, 819, - 820, 822, 821, 823, 824, 825, 826, 827, 828, 796, - 797, 800, 801, 799, 798, 802, 811, 812, 803, 804, - 805, 806, 807, 808, 810, 809, 829, 830, 831, 832, - 833, 835, 834, 838, 839, 837, 836, 841, 840, 739, - 196, 219, 363, 0, 448, 286, 626, 595, 590, 204, - 221, 904, 260, 905, 0, 0, 909, 0, 0, 0, - 911, 910, 0, 912, 0, 874, 873, 0, 0, 906, - 907, 0, 908, 0, 0, 198, 200, 207, 220, 230, - 234, 241, 259, 274, 276, 283, 296, 307, 315, 316, - 319, 325, 375, 381, 382, 383, 384, 403, 404, 405, - 408, 411, 412, 415, 417, 418, 421, 425, 429, 430, - 431, 433, 435, 437, 449, 454, 468, 469, 470, 471, - 472, 475, 476, 481, 482, 483, 484, 485, 493, 494, - 499, 569, 571, 584, 602, 608, 474, 919, 920, 921, - 922, 923, 924, 925, 926, 297, 579, 609, 577, 621, - 603, 432, 373, 0, 0, 376, 279, 302, 317, 0, - 594, 495, 225, 460, 288, 249, 944, 0, 209, 244, - 228, 257, 272, 275, 321, 386, 394, 423, 428, 294, - 269, 242, 453, 239, 478, 502, 503, 504, 506, 390, - 264, 427, 391, 0, 371, 559, 560, 313, 511, 0, - 750, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 410, 0, 0, 0, 0, 738, 0, 0, 0, 268, - 743, 0, 0, 0, 361, 265, 0, 0, 424, 0, - 203, 0, 480, 250, 372, 369, 566, 280, 271, 267, - 248, 314, 380, 422, 501, 416, 749, 365, 0, 0, - 490, 395, 0, 0, 0, 0, 0, 745, 746, 0, - 0, 0, 0, 0, 0, 0, 0, 320, 246, 322, - 202, 407, 491, 284, 0, 94, 0, 0, 945, 929, - 1057, 895, 933, 946, 947, 948, 949, 934, 0, 236, - 935, 936, 243, 937, 0, 894, 779, 781, 780, 844, - 845, 846, 847, 848, 849, 850, 777, 942, 950, 951, - 0, 263, 318, 270, 262, 563, 0, 0, 0, 0, - 0, 0, 0, 227, 0, 0, 0, 0, 0, 0, - 0, 0, 735, 0, 748, 0, 0, 0, 273, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 732, 733, 0, 0, 0, - 0, 889, 0, 734, 0, 0, 742, 952, 953, 954, - 955, 956, 957, 958, 959, 960, 961, 962, 963, 964, - 965, 966, 967, 968, 969, 970, 971, 972, 973, 974, - 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, - 985, 986, 987, 988, 989, 990, 991, 992, 993, 744, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 295, 0, 396, 255, 0, 447, 888, 0, 0, 605, - 0, 0, 886, 0, 0, 0, 0, 360, 0, 327, - 197, 223, 0, 0, 406, 455, 467, 0, 0, 0, - 939, 0, 465, 420, 583, 231, 282, 452, 426, 463, - 434, 285, 0, 0, 464, 367, 568, 444, 580, 606, - 607, 261, 400, 592, 505, 600, 624, 224, 258, 414, - 498, 586, 487, 392, 564, 565, 326, 486, 293, 201, - 364, 612, 222, 473, 366, 240, 229, 570, 589, 287, - 450, 619, 211, 500, 578, 237, 477, 0, 0, 627, - 245, 497, 213, 575, 496, 388, 323, 324, 212, 0, - 451, 266, 291, 0, 0, 256, 409, 940, 941, 254, - 628, 785, 599, 218, 0, 598, 402, 567, 576, 389, - 378, 217, 574, 387, 377, 331, 793, 794, 278, 304, - 870, 869, 868, 303, 305, 866, 867, 865, 205, 587, - 0, 206, 0, 492, 588, 629, 446, 210, 232, 233, - 235, 0, 277, 281, 289, 292, 300, 301, 310, 362, - 413, 440, 436, 445, 0, 562, 581, 593, 604, 610, - 611, 613, 614, 615, 616, 617, 620, 618, 401, 308, - 488, 330, 368, 0, 0, 419, 466, 238, 585, 489, - 876, 898, 887, 754, 755, 877, 878, 902, 879, 757, - 758, 899, 900, 751, 752, 756, 901, 903, 630, 631, - 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, - 642, 643, 644, 645, 646, 647, 625, 890, 741, 740, - 0, 747, 0, 775, 776, 778, 782, 783, 784, 795, - 842, 843, 851, 853, 854, 852, 855, 856, 857, 860, - 861, 862, 863, 858, 859, 864, 759, 763, 760, 761, - 762, 774, 764, 765, 766, 767, 768, 769, 770, 771, - 772, 773, 913, 914, 915, 916, 917, 918, 788, 792, - 791, 789, 790, 786, 787, 814, 813, 815, 816, 817, - 818, 819, 820, 822, 821, 823, 824, 825, 826, 827, - 828, 796, 797, 800, 801, 799, 798, 802, 811, 812, - 803, 804, 805, 806, 807, 808, 810, 809, 829, 830, - 831, 832, 833, 835, 834, 838, 839, 837, 836, 841, - 840, 739, 196, 219, 363, 0, 448, 286, 626, 595, - 590, 204, 221, 904, 260, 905, 0, 0, 909, 0, - 0, 0, 911, 910, 0, 912, 0, 874, 873, 0, - 0, 906, 907, 0, 908, 0, 0, 198, 200, 207, - 220, 230, 234, 241, 259, 274, 276, 283, 296, 307, - 315, 316, 319, 325, 375, 381, 382, 383, 384, 403, - 404, 405, 408, 411, 412, 415, 417, 418, 421, 425, - 429, 430, 431, 433, 435, 437, 449, 454, 468, 469, - 470, 471, 472, 475, 476, 481, 482, 483, 484, 485, - 493, 494, 499, 569, 571, 584, 602, 608, 474, 919, - 920, 921, 922, 923, 924, 925, 926, 297, 579, 609, - 577, 621, 603, 432, 373, 0, 0, 376, 279, 302, - 317, 0, 594, 495, 225, 460, 288, 249, 944, 0, - 209, 244, 228, 257, 272, 275, 321, 386, 394, 423, - 428, 294, 269, 242, 453, 239, 478, 502, 503, 504, - 506, 390, 264, 427, 391, 0, 371, 559, 560, 313, - 511, 0, 750, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 410, 0, 0, 0, 0, 738, 0, 0, - 0, 268, 743, 0, 0, 0, 361, 265, 0, 0, - 424, 0, 203, 0, 480, 250, 372, 369, 566, 280, - 271, 267, 248, 314, 380, 422, 501, 416, 749, 365, - 0, 0, 490, 395, 0, 0, 0, 0, 0, 745, - 746, 0, 0, 0, 0, 0, 0, 0, 0, 320, - 246, 322, 202, 407, 491, 284, 0, 94, 0, 0, - 945, 929, 1057, 895, 933, 946, 947, 948, 949, 934, - 0, 236, 935, 936, 243, 937, 0, 894, 779, 781, - 780, 844, 845, 846, 847, 848, 849, 850, 777, 942, - 950, 951, 0, 263, 318, 270, 262, 563, 0, 0, - 0, 0, 0, 0, 0, 227, 0, 0, 0, 0, - 0, 0, 0, 0, 735, 0, 748, 0, 0, 0, - 273, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 732, 733, 0, - 0, 0, 0, 889, 0, 734, 0, 0, 742, 952, - 953, 954, 955, 956, 957, 958, 959, 960, 961, 962, - 963, 964, 965, 966, 967, 968, 969, 970, 971, 972, + 989, 990, 991, 992, 993, 994, 995, 996, 997, 998, + 999, 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 3094, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 298, 0, 399, 258, 0, 450, 902, 0, 0, 618, + 0, 0, 900, 0, 0, 0, 0, 363, 0, 330, + 199, 226, 0, 0, 409, 458, 470, 0, 0, 0, + 953, 0, 468, 423, 596, 234, 285, 455, 429, 466, + 437, 288, 0, 0, 467, 370, 579, 447, 593, 619, + 620, 264, 403, 605, 516, 613, 637, 227, 261, 417, + 501, 599, 490, 395, 575, 576, 329, 489, 296, 203, + 367, 625, 225, 476, 369, 243, 232, 581, 602, 290, + 453, 632, 214, 511, 591, 240, 480, 0, 0, 640, + 248, 500, 216, 588, 499, 391, 326, 327, 215, 0, + 454, 269, 294, 0, 0, 259, 412, 954, 955, 257, + 641, 799, 612, 221, 0, 611, 405, 578, 589, 392, + 381, 220, 587, 390, 380, 334, 807, 808, 281, 307, + 884, 883, 882, 306, 308, 880, 881, 879, 208, 600, + 0, 209, 0, 495, 601, 642, 449, 213, 235, 236, + 238, 0, 280, 284, 292, 295, 303, 304, 313, 365, + 416, 443, 439, 448, 0, 573, 594, 606, 617, 623, + 624, 626, 627, 628, 629, 630, 633, 631, 404, 311, + 491, 333, 371, 0, 0, 422, 469, 241, 598, 492, + 890, 912, 901, 767, 768, 891, 892, 916, 893, 770, + 771, 913, 914, 764, 765, 769, 915, 917, 643, 644, + 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, + 655, 656, 657, 658, 659, 660, 638, 502, 508, 503, + 504, 505, 506, 507, 0, 509, 904, 754, 753, 0, + 760, 0, 789, 790, 792, 796, 797, 798, 809, 856, + 857, 865, 867, 868, 866, 869, 870, 871, 874, 875, + 876, 877, 872, 873, 878, 772, 776, 773, 774, 775, + 787, 777, 778, 779, 780, 781, 782, 783, 784, 785, + 786, 788, 927, 928, 929, 930, 931, 932, 802, 806, + 805, 803, 804, 800, 801, 828, 827, 829, 830, 831, + 832, 833, 834, 836, 835, 837, 838, 839, 840, 841, + 842, 810, 811, 814, 815, 813, 812, 816, 825, 826, + 817, 818, 819, 820, 821, 822, 824, 823, 843, 844, + 845, 846, 847, 849, 848, 852, 853, 851, 850, 855, + 854, 752, 198, 222, 366, 0, 451, 289, 639, 608, + 603, 207, 224, 918, 263, 919, 0, 0, 923, 0, + 0, 0, 925, 924, 0, 926, 0, 888, 887, 0, + 0, 920, 921, 0, 922, 0, 0, 200, 202, 210, + 223, 233, 237, 244, 262, 277, 279, 286, 299, 310, + 318, 319, 322, 328, 378, 384, 385, 386, 387, 406, + 407, 408, 411, 414, 415, 418, 420, 421, 424, 428, + 432, 433, 434, 436, 438, 440, 452, 457, 471, 472, + 473, 474, 475, 478, 479, 484, 485, 486, 487, 488, + 496, 497, 510, 580, 582, 597, 615, 621, 477, 933, + 934, 935, 936, 937, 938, 939, 940, 300, 592, 622, + 590, 634, 616, 435, 376, 0, 0, 379, 282, 305, + 320, 0, 607, 498, 228, 463, 291, 252, 958, 0, + 212, 247, 231, 260, 275, 278, 324, 389, 397, 426, + 431, 297, 272, 245, 456, 242, 481, 513, 514, 515, + 517, 393, 267, 430, 394, 0, 374, 570, 571, 316, + 522, 0, 763, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 413, 0, 0, 0, 0, 751, 0, 0, + 0, 271, 756, 0, 0, 0, 364, 268, 0, 0, + 427, 0, 205, 0, 483, 253, 375, 372, 577, 283, + 274, 270, 251, 317, 383, 425, 512, 419, 762, 368, + 0, 0, 493, 398, 0, 0, 0, 0, 0, 758, + 759, 0, 0, 0, 0, 0, 0, 0, 0, 323, + 249, 325, 204, 410, 494, 287, 0, 96, 0, 0, + 959, 943, 1074, 909, 947, 960, 961, 962, 963, 948, + 0, 239, 949, 950, 246, 951, 0, 908, 793, 795, + 794, 858, 859, 860, 861, 862, 863, 864, 791, 956, + 964, 965, 0, 266, 321, 273, 265, 574, 0, 0, + 0, 0, 0, 0, 0, 0, 230, 0, 0, 0, + 0, 0, 0, 0, 0, 748, 0, 761, 0, 0, + 0, 276, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 745, 746, + 0, 0, 0, 0, 903, 0, 747, 0, 0, 755, + 966, 967, 968, 969, 970, 971, 972, 973, 974, 975, + 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, + 986, 987, 988, 989, 990, 991, 992, 993, 994, 995, + 996, 997, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, + 1006, 1007, 757, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 298, 0, 399, 258, 0, 450, 902, + 0, 0, 618, 0, 0, 900, 0, 0, 0, 0, + 363, 0, 330, 199, 226, 0, 0, 409, 458, 470, + 0, 0, 0, 953, 0, 468, 423, 596, 234, 285, + 455, 429, 466, 437, 288, 0, 0, 467, 370, 579, + 447, 593, 619, 620, 264, 403, 605, 516, 613, 637, + 227, 261, 417, 501, 599, 490, 395, 575, 576, 329, + 489, 296, 203, 367, 625, 225, 476, 369, 243, 232, + 581, 602, 290, 453, 632, 214, 511, 591, 240, 480, + 0, 0, 640, 248, 500, 216, 588, 499, 391, 326, + 327, 215, 0, 454, 269, 294, 0, 0, 259, 412, + 954, 955, 257, 641, 799, 612, 221, 0, 611, 405, + 578, 589, 392, 381, 220, 587, 390, 380, 334, 807, + 808, 281, 307, 884, 883, 882, 306, 308, 880, 881, + 879, 208, 600, 0, 209, 0, 495, 601, 642, 449, + 213, 235, 236, 238, 0, 280, 284, 292, 295, 303, + 304, 313, 365, 416, 443, 439, 448, 0, 573, 594, + 606, 617, 623, 624, 626, 627, 628, 629, 630, 633, + 631, 404, 311, 491, 333, 371, 0, 0, 422, 469, + 241, 598, 492, 890, 912, 901, 767, 768, 891, 892, + 916, 893, 770, 771, 913, 914, 764, 765, 769, 915, + 917, 643, 644, 645, 646, 647, 648, 649, 650, 651, + 652, 653, 654, 655, 656, 657, 658, 659, 660, 638, + 502, 508, 503, 504, 505, 506, 507, 0, 509, 904, + 754, 753, 0, 760, 0, 789, 790, 792, 796, 797, + 798, 809, 856, 857, 865, 867, 868, 866, 869, 870, + 871, 874, 875, 876, 877, 872, 873, 878, 772, 776, + 773, 774, 775, 787, 777, 778, 779, 780, 781, 782, + 783, 784, 785, 786, 788, 927, 928, 929, 930, 931, + 932, 802, 806, 805, 803, 804, 800, 801, 828, 827, + 829, 830, 831, 832, 833, 834, 836, 835, 837, 838, + 839, 840, 841, 842, 810, 811, 814, 815, 813, 812, + 816, 825, 826, 817, 818, 819, 820, 821, 822, 824, + 823, 843, 844, 845, 846, 847, 849, 848, 852, 853, + 851, 850, 855, 854, 752, 198, 222, 366, 0, 451, + 289, 639, 608, 603, 207, 224, 918, 263, 919, 0, + 0, 923, 0, 0, 0, 925, 924, 0, 926, 0, + 888, 887, 0, 0, 920, 921, 0, 922, 0, 0, + 200, 202, 210, 223, 233, 237, 244, 262, 277, 279, + 286, 299, 310, 318, 319, 322, 328, 378, 384, 385, + 386, 387, 406, 407, 408, 411, 414, 415, 418, 420, + 421, 424, 428, 432, 433, 434, 436, 438, 440, 452, + 457, 471, 472, 473, 474, 475, 478, 479, 484, 485, + 486, 487, 488, 496, 497, 510, 580, 582, 597, 615, + 621, 477, 933, 934, 935, 936, 937, 938, 939, 940, + 300, 592, 622, 590, 634, 616, 435, 376, 0, 0, + 379, 282, 305, 320, 0, 607, 498, 228, 463, 291, + 252, 958, 0, 212, 247, 231, 260, 275, 278, 324, + 389, 397, 426, 431, 297, 272, 245, 456, 242, 481, + 513, 514, 515, 517, 393, 267, 430, 394, 0, 374, + 570, 571, 316, 522, 0, 763, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 413, 0, 0, 0, 0, + 751, 0, 0, 0, 271, 756, 0, 0, 0, 364, + 268, 0, 0, 427, 0, 205, 0, 483, 253, 375, + 372, 577, 283, 274, 270, 251, 317, 383, 425, 512, + 419, 762, 368, 0, 0, 493, 398, 0, 0, 0, + 0, 0, 758, 759, 0, 0, 0, 0, 0, 0, + 0, 0, 323, 249, 325, 204, 410, 494, 287, 0, + 96, 0, 0, 959, 943, 1074, 909, 947, 960, 961, + 962, 963, 948, 0, 239, 949, 950, 246, 951, 0, + 908, 793, 795, 794, 858, 859, 860, 861, 862, 863, + 864, 791, 956, 964, 965, 0, 266, 321, 273, 265, + 574, 0, 0, 0, 0, 0, 0, 0, 0, 230, + 0, 0, 0, 0, 0, 0, 0, 0, 748, 0, + 761, 0, 0, 0, 276, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 745, 746, 0, 0, 0, 0, 903, 0, 747, + 0, 0, 755, 966, 967, 968, 969, 970, 971, 972, 973, 974, 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, 990, 991, 992, - 993, 2055, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 295, 0, 396, 255, 0, 447, 888, 0, - 0, 605, 0, 0, 886, 0, 0, 0, 0, 360, - 0, 327, 197, 223, 0, 0, 406, 455, 467, 0, - 0, 0, 939, 0, 465, 420, 583, 231, 282, 452, - 426, 463, 434, 285, 0, 0, 464, 367, 568, 444, - 580, 606, 607, 261, 400, 592, 505, 600, 624, 224, - 258, 414, 498, 586, 487, 392, 564, 565, 326, 486, - 293, 201, 364, 612, 222, 473, 366, 240, 229, 570, - 589, 287, 450, 619, 211, 500, 578, 237, 477, 0, - 0, 627, 245, 497, 213, 575, 496, 388, 323, 324, - 212, 0, 451, 266, 291, 0, 0, 256, 409, 940, - 941, 254, 628, 785, 599, 218, 0, 598, 402, 567, - 576, 389, 378, 217, 574, 387, 377, 331, 793, 794, - 278, 304, 870, 869, 868, 303, 305, 866, 867, 865, - 205, 587, 0, 206, 0, 492, 588, 629, 446, 210, - 232, 233, 235, 0, 277, 281, 289, 292, 300, 301, - 310, 362, 413, 440, 436, 445, 0, 562, 581, 593, - 604, 610, 611, 613, 614, 615, 616, 617, 620, 618, - 401, 308, 488, 330, 368, 0, 0, 419, 466, 238, - 585, 489, 876, 898, 887, 754, 755, 877, 878, 902, - 879, 757, 758, 899, 900, 751, 752, 756, 901, 903, - 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, - 640, 641, 642, 643, 644, 645, 646, 647, 625, 890, - 741, 740, 0, 747, 0, 775, 776, 778, 782, 783, - 784, 795, 842, 843, 851, 853, 854, 852, 855, 856, - 857, 860, 861, 862, 863, 858, 859, 864, 759, 763, - 760, 761, 762, 774, 764, 765, 766, 767, 768, 769, - 770, 771, 772, 773, 913, 914, 915, 916, 917, 918, - 788, 792, 791, 789, 790, 786, 787, 814, 813, 815, - 816, 817, 818, 819, 820, 822, 821, 823, 824, 825, - 826, 827, 828, 796, 797, 800, 801, 799, 798, 802, - 811, 812, 803, 804, 805, 806, 807, 808, 810, 809, - 829, 830, 831, 832, 833, 835, 834, 838, 839, 837, - 836, 841, 840, 739, 196, 219, 363, 0, 448, 286, - 626, 595, 590, 204, 221, 904, 260, 905, 0, 0, - 909, 0, 0, 0, 911, 910, 0, 912, 0, 874, - 873, 0, 0, 906, 907, 0, 908, 0, 0, 198, - 200, 207, 220, 230, 234, 241, 259, 274, 276, 283, - 296, 307, 315, 316, 319, 325, 375, 381, 382, 383, - 384, 403, 404, 405, 408, 411, 412, 415, 417, 418, - 421, 425, 429, 430, 431, 433, 435, 437, 449, 454, - 468, 469, 470, 471, 472, 475, 476, 481, 482, 483, - 484, 485, 493, 494, 499, 569, 571, 584, 602, 608, - 474, 919, 920, 921, 922, 923, 924, 925, 926, 297, - 579, 609, 577, 621, 603, 432, 373, 0, 0, 376, - 279, 302, 317, 0, 594, 495, 225, 460, 288, 249, - 944, 0, 209, 244, 228, 257, 272, 275, 321, 386, - 394, 423, 428, 294, 269, 242, 453, 239, 478, 502, - 503, 504, 506, 390, 264, 427, 391, 0, 371, 559, - 560, 313, 511, 0, 750, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 410, 0, 0, 0, 0, 738, - 0, 0, 0, 268, 743, 0, 0, 0, 361, 265, - 0, 0, 424, 0, 203, 0, 480, 250, 372, 369, - 566, 280, 271, 267, 248, 314, 380, 422, 501, 416, - 749, 365, 0, 0, 490, 395, 0, 0, 0, 0, - 0, 745, 746, 0, 0, 0, 0, 0, 0, 0, - 0, 320, 246, 322, 202, 407, 491, 284, 0, 94, - 0, 0, 945, 929, 1057, 895, 933, 946, 947, 948, - 949, 934, 0, 236, 935, 936, 243, 937, 0, 894, - 779, 781, 780, 844, 845, 846, 847, 848, 849, 850, - 777, 942, 950, 951, 0, 263, 318, 270, 262, 563, - 0, 0, 0, 0, 0, 0, 0, 227, 0, 0, - 0, 0, 0, 0, 0, 0, 735, 0, 748, 0, - 0, 0, 273, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 732, - 733, 0, 0, 0, 0, 889, 0, 734, 0, 0, - 742, 952, 953, 954, 955, 956, 957, 958, 959, 960, - 961, 962, 963, 964, 965, 966, 967, 968, 969, 970, - 971, 972, 973, 974, 975, 976, 977, 978, 979, 980, - 981, 982, 983, 984, 985, 986, 987, 988, 989, 990, - 991, 992, 993, 2053, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 295, 0, 396, 255, 0, 447, - 888, 0, 0, 605, 0, 0, 886, 0, 0, 0, - 0, 360, 0, 327, 197, 223, 0, 0, 406, 455, - 467, 0, 0, 0, 939, 0, 465, 420, 583, 231, - 282, 452, 426, 463, 434, 285, 0, 0, 464, 367, - 568, 444, 580, 606, 607, 261, 400, 592, 505, 600, - 624, 224, 258, 414, 498, 586, 487, 392, 564, 565, - 326, 486, 293, 201, 364, 612, 222, 473, 366, 240, - 229, 570, 589, 287, 450, 619, 211, 500, 578, 237, - 477, 0, 0, 627, 245, 497, 213, 575, 496, 388, - 323, 324, 212, 0, 451, 266, 291, 0, 0, 256, - 409, 940, 941, 254, 628, 785, 599, 218, 0, 598, - 402, 567, 576, 389, 378, 217, 574, 387, 377, 331, - 793, 794, 278, 304, 870, 869, 868, 303, 305, 866, - 867, 865, 205, 587, 0, 206, 0, 492, 588, 629, - 446, 210, 232, 233, 235, 0, 277, 281, 289, 292, - 300, 301, 310, 362, 413, 440, 436, 445, 0, 562, - 581, 593, 604, 610, 611, 613, 614, 615, 616, 617, - 620, 618, 401, 308, 488, 330, 368, 0, 0, 419, - 466, 238, 585, 489, 876, 898, 887, 754, 755, 877, - 878, 902, 879, 757, 758, 899, 900, 751, 752, 756, - 901, 903, 630, 631, 632, 633, 634, 635, 636, 637, - 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, - 625, 890, 741, 740, 0, 747, 0, 775, 776, 778, - 782, 783, 784, 795, 842, 843, 851, 853, 854, 852, - 855, 856, 857, 860, 861, 862, 863, 858, 859, 864, - 759, 763, 760, 761, 762, 774, 764, 765, 766, 767, - 768, 769, 770, 771, 772, 773, 913, 914, 915, 916, - 917, 918, 788, 792, 791, 789, 790, 786, 787, 814, - 813, 815, 816, 817, 818, 819, 820, 822, 821, 823, - 824, 825, 826, 827, 828, 796, 797, 800, 801, 799, - 798, 802, 811, 812, 803, 804, 805, 806, 807, 808, - 810, 809, 829, 830, 831, 832, 833, 835, 834, 838, - 839, 837, 836, 841, 840, 739, 196, 219, 363, 0, - 448, 286, 626, 595, 590, 204, 221, 904, 260, 905, - 0, 0, 909, 0, 0, 0, 911, 910, 0, 912, - 0, 874, 873, 0, 0, 906, 907, 0, 908, 0, - 0, 198, 200, 207, 220, 230, 234, 241, 259, 274, - 276, 283, 296, 307, 315, 316, 319, 325, 375, 381, - 382, 383, 384, 403, 404, 405, 408, 411, 412, 415, - 417, 418, 421, 425, 429, 430, 431, 433, 435, 437, - 449, 454, 468, 469, 470, 471, 472, 475, 476, 481, - 482, 483, 484, 485, 493, 494, 499, 569, 571, 584, - 602, 608, 474, 919, 920, 921, 922, 923, 924, 925, - 926, 297, 579, 609, 577, 621, 603, 432, 373, 0, - 0, 376, 279, 302, 317, 0, 594, 495, 225, 460, - 288, 249, 944, 0, 209, 244, 228, 257, 272, 275, - 321, 386, 394, 423, 428, 294, 269, 242, 453, 239, - 478, 502, 503, 504, 506, 390, 264, 427, 391, 0, - 371, 559, 560, 313, 511, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 410, 0, 0, 0, - 0, 0, 0, 0, 0, 268, 0, 0, 0, 0, - 361, 265, 0, 0, 424, 0, 203, 0, 480, 250, - 372, 369, 566, 280, 271, 267, 248, 314, 380, 422, - 501, 416, 0, 365, 0, 0, 490, 395, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 320, 246, 322, 202, 407, 491, 284, - 0, 0, 0, 0, 0, 696, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 236, 0, 0, 243, 0, - 0, 0, 346, 355, 354, 335, 336, 338, 340, 345, - 352, 358, 0, 0, 0, 0, 0, 263, 318, 270, - 262, 563, 0, 0, 0, 0, 0, 0, 0, 227, - 0, 1108, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 273, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 295, 0, 396, 255, - 0, 447, 0, 0, 1107, 605, 0, 0, 0, 0, - 0, 1104, 1105, 360, 1065, 327, 197, 223, 1098, 1102, - 406, 455, 467, 0, 0, 0, 251, 0, 465, 420, - 583, 231, 282, 452, 426, 463, 434, 285, 0, 0, - 464, 367, 568, 444, 580, 606, 607, 261, 400, 592, - 505, 600, 624, 224, 258, 414, 498, 586, 487, 392, - 564, 565, 326, 486, 293, 201, 364, 612, 222, 473, - 366, 240, 229, 570, 589, 287, 450, 619, 211, 500, - 578, 237, 477, 0, 0, 627, 245, 497, 213, 575, - 496, 388, 323, 324, 212, 0, 451, 266, 291, 0, - 0, 256, 409, 572, 573, 254, 628, 226, 599, 218, - 0, 598, 402, 567, 576, 389, 378, 217, 574, 387, - 377, 331, 350, 351, 278, 304, 441, 370, 442, 303, - 305, 398, 397, 399, 205, 587, 0, 206, 0, 492, - 588, 629, 446, 210, 232, 233, 235, 0, 277, 281, - 289, 292, 300, 301, 310, 362, 413, 440, 436, 445, - 0, 562, 581, 593, 604, 610, 611, 613, 614, 615, - 616, 617, 620, 618, 401, 308, 488, 330, 368, 0, - 0, 419, 466, 238, 585, 489, 199, 0, 0, 0, - 0, 252, 253, 0, 558, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 630, 631, 632, 633, 634, 635, - 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, - 646, 647, 625, 0, 0, 0, 0, 0, 0, 0, - 0, 648, 379, 479, 582, 332, 344, 347, 337, 356, - 0, 357, 333, 334, 339, 341, 342, 343, 348, 349, - 353, 359, 247, 208, 385, 393, 561, 309, 214, 215, - 216, 507, 508, 509, 510, 596, 597, 601, 456, 457, - 458, 459, 290, 591, 306, 462, 461, 328, 329, 374, - 443, 523, 525, 536, 540, 542, 544, 550, 553, 524, - 526, 537, 541, 543, 545, 551, 554, 513, 515, 517, - 519, 532, 531, 528, 556, 557, 534, 539, 518, 530, - 535, 548, 555, 552, 512, 516, 520, 529, 547, 546, - 527, 538, 549, 533, 521, 514, 522, 0, 196, 219, - 363, 0, 448, 286, 626, 595, 590, 204, 221, 0, - 260, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 198, 200, 207, 220, 230, 234, 241, - 259, 274, 276, 283, 296, 307, 315, 316, 319, 325, - 375, 381, 382, 383, 384, 403, 404, 405, 408, 411, - 412, 415, 417, 418, 421, 425, 429, 430, 431, 433, - 435, 437, 449, 454, 468, 469, 470, 471, 472, 475, - 476, 481, 482, 483, 484, 485, 493, 494, 499, 569, - 571, 584, 602, 608, 474, 298, 299, 438, 439, 311, - 312, 622, 623, 297, 579, 609, 577, 621, 603, 432, - 373, 0, 0, 376, 279, 302, 317, 0, 594, 495, - 225, 460, 288, 249, 0, 0, 209, 244, 228, 257, - 272, 275, 321, 386, 394, 423, 428, 294, 269, 242, - 453, 239, 478, 502, 503, 504, 506, 390, 264, 427, - 391, 0, 371, 559, 560, 313, 511, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 410, 0, - 0, 0, 0, 0, 0, 0, 0, 268, 0, 0, - 0, 0, 361, 265, 0, 0, 424, 0, 203, 0, - 480, 250, 372, 369, 566, 280, 271, 267, 248, 314, - 380, 422, 501, 416, 0, 365, 0, 0, 490, 395, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 320, 246, 322, 202, 407, - 491, 284, 0, 0, 0, 0, 1665, 929, 0, 0, - 1662, 0, 0, 0, 0, 1660, 0, 236, 1661, 1659, - 243, 1664, 0, 894, 346, 355, 354, 335, 336, 338, - 340, 345, 352, 358, 0, 0, 0, 0, 0, 263, - 318, 270, 262, 563, 0, 0, 0, 0, 0, 0, - 0, 227, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 273, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 295, 0, - 396, 255, 0, 447, 0, 0, 0, 605, 0, 0, - 0, 0, 0, 0, 0, 360, 0, 327, 197, 223, - 0, 0, 406, 455, 467, 0, 0, 0, 251, 0, - 465, 420, 583, 231, 282, 452, 426, 463, 434, 285, - 0, 0, 464, 367, 568, 444, 580, 606, 607, 261, - 400, 592, 505, 600, 624, 224, 258, 414, 498, 586, - 487, 392, 564, 565, 326, 486, 293, 201, 364, 612, - 222, 473, 366, 240, 229, 570, 589, 287, 450, 619, - 211, 500, 578, 237, 477, 0, 0, 627, 245, 497, - 213, 575, 496, 388, 323, 324, 212, 0, 451, 266, - 291, 0, 0, 256, 409, 572, 573, 254, 628, 226, - 599, 218, 0, 598, 402, 567, 576, 389, 378, 217, - 574, 387, 377, 331, 350, 351, 278, 304, 441, 370, - 442, 303, 305, 398, 397, 399, 205, 587, 0, 206, - 0, 492, 588, 629, 446, 210, 232, 233, 235, 0, - 277, 281, 289, 292, 300, 301, 310, 362, 413, 440, - 436, 445, 0, 562, 581, 593, 604, 610, 611, 613, - 614, 615, 616, 617, 620, 618, 401, 308, 488, 330, - 368, 0, 0, 419, 466, 238, 585, 489, 199, 0, - 0, 0, 0, 252, 253, 0, 558, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 630, 631, 632, 633, - 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, - 644, 645, 646, 647, 625, 0, 0, 0, 0, 0, - 0, 0, 0, 648, 379, 479, 582, 332, 344, 347, - 337, 356, 0, 357, 333, 334, 339, 341, 342, 343, - 348, 349, 353, 359, 247, 208, 385, 393, 561, 309, - 214, 215, 216, 507, 508, 509, 510, 596, 597, 601, - 456, 457, 458, 459, 290, 591, 306, 462, 461, 328, - 329, 374, 443, 523, 525, 536, 540, 542, 544, 550, - 553, 524, 526, 537, 541, 543, 545, 551, 554, 513, - 515, 517, 519, 532, 531, 528, 556, 557, 534, 539, - 518, 530, 535, 548, 555, 552, 512, 516, 520, 529, - 547, 546, 527, 538, 549, 533, 521, 514, 522, 0, - 196, 219, 363, 0, 448, 286, 626, 595, 590, 204, - 221, 0, 260, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 198, 200, 207, 220, 230, - 234, 241, 259, 274, 276, 283, 296, 307, 315, 316, - 319, 325, 375, 381, 382, 383, 384, 403, 404, 405, - 408, 411, 412, 415, 417, 418, 421, 425, 429, 430, - 431, 433, 435, 437, 449, 454, 468, 469, 470, 471, - 472, 475, 476, 481, 482, 483, 484, 485, 493, 494, - 499, 569, 571, 584, 602, 608, 474, 298, 299, 438, - 439, 311, 312, 622, 623, 297, 579, 609, 577, 621, - 603, 432, 373, 0, 0, 376, 279, 302, 317, 0, - 594, 495, 225, 460, 288, 249, 0, 0, 209, 244, - 228, 257, 272, 275, 321, 386, 394, 423, 428, 294, - 269, 242, 453, 239, 478, 502, 503, 504, 506, 390, - 264, 427, 0, 391, 371, 559, 560, 313, 85, 511, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 410, 0, 0, 0, 0, 0, 0, 0, 0, - 268, 0, 0, 0, 0, 361, 265, 0, 0, 424, - 0, 203, 0, 480, 250, 372, 369, 566, 280, 271, - 267, 248, 314, 380, 422, 501, 416, 0, 365, 0, - 0, 490, 395, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 320, 246, - 322, 202, 407, 491, 284, 0, 94, 0, 0, 0, - 194, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 236, 0, 0, 243, 0, 0, 0, 346, 355, 354, - 335, 336, 338, 340, 345, 352, 358, 0, 0, 0, - 0, 0, 263, 318, 270, 262, 563, 0, 0, 0, - 0, 0, 0, 0, 227, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 273, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 295, 0, 396, 255, 0, 447, 0, 0, 0, - 605, 0, 0, 0, 0, 0, 0, 0, 360, 0, - 327, 197, 223, 0, 0, 406, 455, 467, 0, 0, - 0, 251, 0, 465, 420, 583, 231, 282, 452, 426, - 463, 434, 285, 0, 0, 464, 367, 568, 444, 580, - 606, 607, 261, 400, 592, 505, 600, 624, 224, 258, - 414, 498, 586, 487, 392, 564, 565, 326, 486, 293, - 201, 364, 612, 222, 473, 366, 240, 229, 570, 589, - 287, 450, 619, 211, 500, 578, 237, 477, 0, 0, - 627, 245, 497, 213, 575, 496, 388, 323, 324, 212, - 0, 451, 266, 291, 0, 0, 256, 409, 572, 573, - 254, 628, 226, 599, 218, 0, 598, 402, 567, 576, - 389, 378, 217, 574, 387, 377, 331, 350, 351, 278, - 304, 441, 370, 442, 303, 305, 398, 397, 399, 205, - 587, 0, 206, 0, 492, 588, 629, 446, 210, 232, - 233, 235, 0, 277, 281, 289, 292, 300, 301, 310, - 362, 413, 440, 436, 445, 0, 562, 581, 593, 604, - 610, 611, 613, 614, 615, 616, 617, 620, 618, 401, - 308, 488, 330, 368, 0, 0, 419, 466, 238, 585, - 489, 199, 0, 0, 0, 0, 252, 253, 0, 558, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 630, - 631, 632, 633, 634, 635, 636, 637, 638, 639, 640, - 641, 642, 643, 644, 645, 646, 647, 625, 0, 0, - 0, 0, 0, 0, 0, 0, 648, 379, 479, 582, - 332, 344, 347, 337, 356, 0, 357, 333, 334, 339, - 341, 342, 343, 348, 349, 353, 359, 247, 208, 385, - 393, 561, 309, 214, 215, 216, 507, 508, 509, 510, - 596, 597, 601, 456, 457, 458, 459, 290, 591, 306, - 462, 461, 328, 329, 374, 443, 523, 525, 536, 540, - 542, 544, 550, 553, 524, 526, 537, 541, 543, 545, - 551, 554, 513, 515, 517, 519, 532, 531, 528, 556, - 557, 534, 539, 518, 530, 535, 548, 555, 552, 512, - 516, 520, 529, 547, 546, 527, 538, 549, 533, 521, - 514, 522, 0, 196, 219, 363, 93, 448, 286, 626, - 595, 590, 204, 221, 0, 260, 0, 0, 0, 0, - 0, 0, 2336, 0, 0, 2335, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 198, 200, - 207, 220, 230, 234, 241, 259, 274, 276, 283, 296, - 307, 315, 316, 319, 325, 375, 381, 382, 383, 384, - 403, 404, 405, 408, 411, 412, 415, 417, 418, 421, - 425, 429, 430, 431, 433, 435, 437, 449, 454, 468, - 469, 470, 471, 472, 475, 476, 481, 482, 483, 484, - 485, 493, 494, 499, 569, 571, 584, 602, 608, 474, - 298, 299, 438, 439, 311, 312, 622, 623, 297, 579, - 609, 577, 621, 603, 432, 373, 0, 0, 376, 279, - 302, 317, 0, 594, 495, 225, 460, 288, 249, 0, - 0, 209, 244, 228, 257, 272, 275, 321, 386, 394, - 423, 428, 294, 269, 242, 453, 239, 478, 502, 503, - 504, 506, 390, 264, 427, 1720, 0, 371, 559, 560, - 313, 511, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 410, 0, 0, 0, 1722, 0, 0, - 0, 0, 268, 0, 0, 0, 0, 361, 265, 0, - 0, 424, 0, 203, 0, 480, 250, 372, 369, 566, - 280, 271, 267, 248, 314, 380, 422, 501, 416, 0, - 365, 0, 0, 490, 395, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 320, 246, 322, 202, 407, 491, 284, 0, 0, 0, - 0, 1724, 696, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 236, 0, 0, 243, 0, 0, 0, 346, - 355, 354, 335, 336, 338, 340, 345, 352, 358, 0, - 0, 0, 0, 0, 263, 318, 270, 262, 563, 0, - 0, 0, 0, 0, 0, 0, 227, 0, 0, 0, - 1439, 0, 1440, 1441, 0, 0, 0, 0, 0, 0, - 0, 273, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 295, 0, 396, 255, 0, 447, 0, - 0, 0, 605, 0, 0, 0, 0, 0, 0, 0, - 360, 0, 327, 197, 223, 0, 0, 406, 455, 467, - 0, 0, 0, 251, 0, 465, 420, 583, 231, 282, - 452, 426, 463, 434, 285, 0, 0, 464, 367, 568, - 444, 580, 606, 607, 261, 400, 592, 505, 600, 624, - 224, 258, 414, 498, 586, 487, 392, 564, 565, 326, - 486, 293, 201, 364, 612, 222, 473, 366, 240, 229, - 570, 589, 287, 450, 619, 211, 500, 578, 237, 477, - 0, 0, 627, 245, 497, 213, 575, 496, 388, 323, - 324, 212, 0, 451, 266, 291, 0, 0, 256, 409, - 572, 573, 254, 628, 226, 599, 218, 0, 598, 402, - 567, 576, 389, 378, 217, 574, 387, 377, 331, 350, - 351, 278, 304, 441, 370, 442, 303, 305, 398, 397, - 399, 205, 587, 0, 206, 0, 492, 588, 629, 446, - 210, 232, 233, 235, 0, 277, 281, 289, 292, 300, - 301, 310, 362, 413, 440, 436, 445, 0, 562, 581, - 593, 604, 610, 611, 613, 614, 615, 616, 617, 620, - 618, 401, 308, 488, 330, 368, 0, 0, 419, 466, - 238, 585, 489, 199, 0, 0, 0, 0, 252, 253, - 0, 558, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 630, 631, 632, 633, 634, 635, 636, 637, 638, - 639, 640, 641, 642, 643, 644, 645, 646, 647, 625, - 0, 0, 0, 0, 0, 0, 0, 0, 648, 379, - 479, 582, 332, 344, 347, 337, 356, 0, 357, 333, - 334, 339, 341, 342, 343, 348, 349, 353, 359, 247, - 208, 385, 393, 561, 309, 214, 215, 216, 507, 508, - 509, 510, 596, 597, 601, 456, 457, 458, 459, 290, - 591, 306, 462, 461, 328, 329, 374, 443, 523, 525, - 536, 540, 542, 544, 550, 553, 524, 526, 537, 541, - 543, 545, 551, 554, 513, 515, 517, 519, 532, 531, - 528, 556, 557, 534, 539, 518, 530, 535, 548, 555, - 552, 512, 516, 520, 529, 547, 546, 527, 538, 549, - 533, 521, 514, 522, 0, 196, 219, 363, 0, 448, - 286, 626, 595, 590, 204, 221, 0, 260, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 198, 200, 207, 220, 230, 234, 241, 259, 274, 276, - 283, 296, 307, 315, 316, 319, 325, 375, 381, 382, - 383, 384, 403, 404, 405, 408, 411, 412, 415, 417, - 418, 421, 425, 429, 430, 431, 433, 435, 437, 449, - 454, 468, 469, 470, 471, 472, 475, 476, 481, 482, - 483, 484, 485, 493, 494, 499, 569, 571, 584, 602, - 608, 474, 298, 299, 438, 439, 311, 312, 622, 623, - 297, 579, 609, 577, 621, 603, 432, 373, 0, 0, - 376, 279, 302, 317, 0, 594, 495, 225, 460, 288, - 249, 0, 0, 209, 244, 228, 257, 272, 275, 321, - 386, 394, 423, 428, 294, 269, 242, 453, 239, 478, - 502, 503, 504, 506, 390, 264, 427, 0, 391, 371, - 559, 560, 313, 85, 511, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 410, 0, 0, 0, - 0, 0, 0, 0, 0, 268, 0, 0, 0, 0, - 361, 265, 0, 0, 424, 0, 203, 0, 480, 250, - 372, 369, 566, 280, 271, 267, 248, 314, 380, 422, - 501, 416, 0, 365, 0, 0, 490, 395, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 320, 246, 322, 202, 407, 491, 284, - 0, 94, 0, 1701, 0, 696, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 236, 0, 0, 243, 0, - 0, 0, 346, 355, 354, 335, 336, 338, 340, 345, - 352, 358, 0, 0, 0, 0, 0, 263, 318, 270, - 262, 563, 0, 0, 0, 0, 0, 0, 0, 227, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 273, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 295, 0, 396, 255, - 0, 447, 0, 0, 0, 605, 0, 0, 0, 0, - 0, 0, 0, 360, 0, 327, 197, 223, 0, 0, - 406, 455, 467, 0, 0, 0, 251, 0, 465, 420, - 583, 231, 282, 452, 426, 463, 434, 285, 0, 0, - 464, 367, 568, 444, 580, 606, 607, 261, 400, 592, - 505, 600, 624, 224, 258, 414, 498, 586, 487, 392, - 564, 565, 326, 486, 293, 201, 364, 612, 222, 473, - 366, 240, 229, 570, 589, 287, 450, 619, 211, 500, - 578, 237, 477, 0, 0, 627, 245, 497, 213, 575, - 496, 388, 323, 324, 212, 0, 451, 266, 291, 0, - 0, 256, 409, 572, 573, 254, 628, 226, 599, 218, - 0, 598, 402, 567, 576, 389, 378, 217, 574, 387, - 377, 331, 350, 351, 278, 304, 441, 370, 442, 303, - 305, 398, 397, 399, 205, 587, 0, 206, 0, 492, - 588, 629, 446, 210, 232, 233, 235, 0, 277, 281, - 289, 292, 300, 301, 310, 362, 413, 440, 436, 445, - 0, 562, 581, 593, 604, 610, 611, 613, 614, 615, - 616, 617, 620, 618, 401, 308, 488, 330, 368, 0, - 0, 419, 466, 238, 585, 489, 199, 0, 0, 0, - 0, 252, 253, 0, 558, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 630, 631, 632, 633, 634, 635, - 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, - 646, 647, 625, 0, 0, 0, 0, 0, 0, 0, - 0, 648, 379, 479, 582, 332, 344, 347, 337, 356, - 0, 357, 333, 334, 339, 341, 342, 343, 348, 349, - 353, 359, 247, 208, 385, 393, 561, 309, 214, 215, - 216, 507, 508, 509, 510, 596, 597, 601, 456, 457, - 458, 459, 290, 591, 306, 462, 461, 328, 329, 374, - 443, 523, 525, 536, 540, 542, 544, 550, 553, 524, - 526, 537, 541, 543, 545, 551, 554, 513, 515, 517, - 519, 532, 531, 528, 556, 557, 534, 539, 518, 530, - 535, 548, 555, 552, 512, 516, 520, 529, 547, 546, - 527, 538, 549, 533, 521, 514, 522, 0, 196, 219, - 363, 93, 448, 286, 626, 595, 590, 204, 221, 0, - 260, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 198, 200, 207, 220, 230, 234, 241, - 259, 274, 276, 283, 296, 307, 315, 316, 319, 325, - 375, 381, 382, 383, 384, 403, 404, 405, 408, 411, - 412, 415, 417, 418, 421, 425, 429, 430, 431, 433, - 435, 437, 449, 454, 468, 469, 470, 471, 472, 475, - 476, 481, 482, 483, 484, 485, 493, 494, 499, 569, - 571, 584, 602, 608, 474, 298, 299, 438, 439, 311, - 312, 622, 623, 297, 579, 609, 577, 621, 603, 432, - 373, 0, 0, 376, 279, 302, 317, 0, 594, 495, - 225, 460, 288, 249, 0, 0, 209, 244, 228, 257, - 272, 275, 321, 386, 394, 423, 428, 294, 269, 242, - 453, 239, 478, 502, 503, 504, 506, 390, 264, 427, - 391, 0, 371, 559, 560, 313, 511, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 410, 0, - 0, 0, 0, 0, 0, 0, 0, 268, 0, 0, - 0, 0, 361, 265, 0, 0, 424, 0, 203, 0, - 480, 250, 372, 369, 566, 280, 271, 267, 248, 314, - 380, 422, 501, 416, 0, 365, 0, 0, 490, 395, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 320, 246, 322, 202, 407, - 491, 284, 0, 94, 0, 0, 0, 194, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 236, 0, 0, - 243, 0, 0, 0, 346, 355, 354, 335, 336, 338, - 340, 345, 352, 358, 0, 0, 0, 0, 0, 263, - 318, 270, 262, 563, 0, 0, 0, 0, 0, 0, - 0, 227, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 273, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 295, 0, - 396, 255, 0, 447, 0, 0, 0, 605, 0, 0, - 0, 0, 0, 0, 0, 360, 0, 327, 197, 223, - 0, 0, 406, 455, 467, 0, 0, 0, 251, 0, - 465, 420, 583, 231, 282, 452, 426, 463, 434, 285, - 0, 0, 464, 367, 568, 444, 580, 606, 607, 261, - 400, 592, 505, 600, 624, 224, 258, 414, 498, 586, - 487, 392, 564, 565, 326, 486, 293, 201, 364, 612, - 222, 473, 366, 240, 229, 570, 589, 287, 450, 619, - 211, 500, 578, 237, 477, 0, 0, 627, 245, 497, - 213, 575, 496, 388, 323, 324, 212, 0, 451, 266, - 291, 0, 0, 256, 409, 572, 573, 254, 628, 226, - 599, 218, 0, 598, 402, 567, 576, 389, 378, 217, - 574, 387, 377, 331, 350, 351, 278, 304, 441, 370, - 442, 303, 305, 398, 397, 399, 205, 587, 0, 206, - 0, 492, 588, 629, 446, 210, 232, 233, 235, 0, - 277, 281, 289, 292, 300, 301, 310, 362, 413, 440, - 436, 445, 0, 562, 581, 593, 604, 610, 611, 613, - 614, 615, 616, 617, 620, 618, 401, 308, 488, 330, - 368, 0, 0, 419, 466, 238, 585, 489, 199, 0, - 0, 0, 0, 252, 253, 0, 558, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 630, 631, 632, 633, - 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, - 644, 645, 646, 647, 625, 0, 0, 0, 0, 0, - 0, 0, 0, 648, 379, 479, 582, 332, 344, 347, - 337, 356, 0, 357, 333, 334, 339, 341, 342, 343, - 348, 349, 353, 359, 247, 208, 385, 393, 561, 309, - 214, 215, 216, 507, 508, 509, 510, 596, 597, 601, - 456, 457, 458, 459, 290, 591, 306, 462, 461, 328, - 329, 374, 443, 523, 525, 536, 540, 542, 544, 550, - 553, 524, 526, 537, 541, 543, 545, 551, 554, 513, - 515, 517, 519, 532, 531, 528, 556, 557, 534, 539, - 518, 530, 535, 548, 555, 552, 512, 516, 520, 529, - 547, 546, 527, 538, 549, 533, 521, 514, 522, 0, - 196, 219, 363, 0, 448, 286, 626, 595, 590, 204, - 221, 0, 260, 0, 0, 0, 0, 0, 0, 2336, - 0, 0, 2335, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 198, 200, 207, 220, 230, - 234, 241, 259, 274, 276, 283, 296, 307, 315, 316, - 319, 325, 375, 381, 382, 383, 384, 403, 404, 405, - 408, 411, 412, 415, 417, 418, 421, 425, 429, 430, - 431, 433, 435, 437, 449, 454, 468, 469, 470, 471, - 472, 475, 476, 481, 482, 483, 484, 485, 493, 494, - 499, 569, 571, 584, 602, 608, 474, 298, 299, 438, - 439, 311, 312, 622, 623, 297, 579, 609, 577, 621, - 603, 432, 373, 0, 0, 376, 279, 302, 317, 0, - 594, 495, 225, 460, 288, 249, 0, 0, 209, 244, - 228, 257, 272, 275, 321, 386, 394, 423, 428, 294, - 269, 242, 453, 239, 478, 502, 503, 504, 506, 390, - 264, 427, 391, 0, 371, 559, 560, 313, 511, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 410, 0, 0, 0, 2285, 0, 0, 0, 0, 268, - 0, 0, 0, 0, 361, 265, 0, 0, 424, 0, - 203, 0, 480, 250, 372, 369, 566, 280, 271, 267, - 248, 314, 380, 422, 501, 416, 0, 365, 0, 0, - 490, 395, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 320, 246, 322, - 202, 407, 491, 284, 0, 0, 0, 0, 1904, 194, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 236, - 0, 0, 243, 0, 0, 0, 346, 355, 354, 335, - 336, 338, 340, 345, 352, 358, 0, 0, 0, 0, - 0, 263, 318, 270, 262, 563, 0, 0, 0, 0, - 0, 0, 0, 227, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 273, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 295, 0, 396, 255, 0, 447, 0, 0, 0, 605, - 0, 0, 0, 0, 0, 0, 0, 360, 0, 327, - 197, 223, 0, 0, 406, 455, 467, 0, 0, 0, - 251, 0, 465, 420, 583, 231, 282, 452, 426, 463, - 434, 285, 0, 2283, 464, 367, 568, 444, 580, 606, - 607, 261, 400, 592, 505, 600, 624, 224, 258, 414, - 498, 586, 487, 392, 564, 565, 326, 486, 293, 201, - 364, 612, 222, 473, 366, 240, 229, 570, 589, 287, - 450, 619, 211, 500, 578, 237, 477, 0, 0, 627, - 245, 497, 213, 575, 496, 388, 323, 324, 212, 0, - 451, 266, 291, 0, 0, 256, 409, 572, 573, 254, - 628, 226, 599, 218, 0, 598, 402, 567, 576, 389, - 378, 217, 574, 387, 377, 331, 350, 351, 278, 304, - 441, 370, 442, 303, 305, 398, 397, 399, 205, 587, - 0, 206, 0, 492, 588, 629, 446, 210, 232, 233, - 235, 0, 277, 281, 289, 292, 300, 301, 310, 362, - 413, 440, 436, 445, 0, 562, 581, 593, 604, 610, - 611, 613, 614, 615, 616, 617, 620, 618, 401, 308, - 488, 330, 368, 0, 0, 419, 466, 238, 585, 489, - 199, 0, 0, 0, 0, 252, 253, 0, 558, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 630, 631, - 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, - 642, 643, 644, 645, 646, 647, 625, 0, 0, 0, - 0, 0, 0, 0, 0, 648, 379, 479, 582, 332, - 344, 347, 337, 356, 0, 357, 333, 334, 339, 341, - 342, 343, 348, 349, 353, 359, 247, 208, 385, 393, - 561, 309, 214, 215, 216, 507, 508, 509, 510, 596, - 597, 601, 456, 457, 458, 459, 290, 591, 306, 462, - 461, 328, 329, 374, 443, 523, 525, 536, 540, 542, - 544, 550, 553, 524, 526, 537, 541, 543, 545, 551, - 554, 513, 515, 517, 519, 532, 531, 528, 556, 557, - 534, 539, 518, 530, 535, 548, 555, 552, 512, 516, - 520, 529, 547, 546, 527, 538, 549, 533, 521, 514, - 522, 0, 196, 219, 363, 0, 448, 286, 626, 595, - 590, 204, 221, 0, 260, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 198, 200, 207, - 220, 230, 234, 241, 259, 274, 276, 283, 296, 307, - 315, 316, 319, 325, 375, 381, 382, 383, 384, 403, - 404, 405, 408, 411, 412, 415, 417, 418, 421, 425, - 429, 430, 431, 433, 435, 437, 449, 454, 468, 469, - 470, 471, 472, 475, 476, 481, 482, 483, 484, 485, - 493, 494, 499, 569, 571, 584, 602, 608, 474, 298, - 299, 438, 439, 311, 312, 622, 623, 297, 579, 609, - 577, 621, 603, 432, 373, 0, 0, 376, 279, 302, - 317, 0, 594, 495, 225, 460, 288, 249, 0, 0, - 209, 244, 228, 257, 272, 275, 321, 386, 394, 423, - 428, 294, 269, 242, 453, 239, 478, 502, 503, 504, - 506, 390, 264, 427, 391, 0, 371, 559, 560, 313, - 511, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 410, 0, 0, 0, 0, 0, 0, 0, - 0, 268, 0, 0, 0, 0, 361, 265, 0, 0, - 424, 0, 203, 0, 480, 250, 372, 369, 566, 280, - 271, 267, 248, 314, 380, 422, 501, 416, 0, 365, - 0, 0, 490, 395, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 320, - 246, 322, 202, 407, 491, 284, 0, 0, 0, 0, - 0, 696, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 236, 0, 0, 243, 0, 0, 0, 346, 355, - 354, 335, 336, 338, 340, 345, 352, 358, 0, 0, - 0, 0, 0, 263, 318, 270, 262, 563, 0, 0, - 0, 0, 0, 0, 0, 227, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 273, 0, 0, 0, 0, 0, 0, 0, 0, 1059, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 295, 0, 396, 255, 0, 447, 0, 0, - 0, 605, 0, 0, 0, 0, 0, 0, 0, 360, - 1065, 327, 197, 223, 1063, 0, 406, 455, 467, 0, - 0, 0, 251, 0, 465, 420, 583, 231, 282, 452, - 426, 463, 434, 285, 0, 0, 464, 367, 568, 444, - 580, 606, 607, 261, 400, 592, 505, 600, 624, 224, - 258, 414, 498, 586, 487, 392, 564, 565, 326, 486, - 293, 201, 364, 612, 222, 473, 366, 240, 229, 570, - 589, 287, 450, 619, 211, 500, 578, 237, 477, 0, - 0, 627, 245, 497, 213, 575, 496, 388, 323, 324, - 212, 0, 451, 266, 291, 0, 0, 256, 409, 572, - 573, 254, 628, 226, 599, 218, 0, 598, 402, 567, - 576, 389, 378, 217, 574, 387, 377, 331, 350, 351, - 278, 304, 441, 370, 442, 303, 305, 398, 397, 399, - 205, 587, 0, 206, 0, 492, 588, 629, 446, 210, - 232, 233, 235, 0, 277, 281, 289, 292, 300, 301, - 310, 362, 413, 440, 436, 445, 0, 562, 581, 593, - 604, 610, 611, 613, 614, 615, 616, 617, 620, 618, - 401, 308, 488, 330, 368, 0, 0, 419, 466, 238, - 585, 489, 199, 0, 0, 0, 0, 252, 253, 0, - 558, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, - 640, 641, 642, 643, 644, 645, 646, 647, 625, 0, - 0, 0, 0, 0, 0, 0, 0, 648, 379, 479, - 582, 332, 344, 347, 337, 356, 0, 357, 333, 334, - 339, 341, 342, 343, 348, 349, 353, 359, 247, 208, - 385, 393, 561, 309, 214, 215, 216, 507, 508, 509, - 510, 596, 597, 601, 456, 457, 458, 459, 290, 591, - 306, 462, 461, 328, 329, 374, 443, 523, 525, 536, - 540, 542, 544, 550, 553, 524, 526, 537, 541, 543, - 545, 551, 554, 513, 515, 517, 519, 532, 531, 528, - 556, 557, 534, 539, 518, 530, 535, 548, 555, 552, - 512, 516, 520, 529, 547, 546, 527, 538, 549, 533, - 521, 514, 522, 0, 196, 219, 363, 0, 448, 286, - 626, 595, 590, 204, 221, 0, 260, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 198, - 200, 207, 220, 230, 234, 241, 259, 274, 276, 283, - 296, 307, 315, 316, 319, 325, 375, 381, 382, 383, - 384, 403, 404, 405, 408, 411, 412, 415, 417, 418, - 421, 425, 429, 430, 431, 433, 435, 437, 449, 454, - 468, 469, 470, 471, 472, 475, 476, 481, 482, 483, - 484, 485, 493, 494, 499, 569, 571, 584, 602, 608, - 474, 298, 299, 438, 439, 311, 312, 622, 623, 297, - 579, 609, 577, 621, 603, 432, 373, 0, 0, 376, - 279, 302, 317, 0, 594, 495, 225, 460, 288, 249, - 0, 0, 209, 244, 228, 257, 272, 275, 321, 386, - 394, 423, 428, 294, 269, 242, 453, 239, 478, 502, - 503, 504, 506, 390, 264, 427, 391, 0, 371, 559, - 560, 313, 511, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 410, 0, 0, 0, 2285, 0, - 0, 0, 0, 268, 0, 0, 0, 0, 361, 265, - 0, 0, 424, 0, 203, 0, 480, 250, 372, 369, - 566, 280, 271, 267, 248, 314, 380, 422, 501, 416, - 0, 365, 0, 0, 490, 395, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 320, 246, 322, 202, 407, 491, 284, 0, 0, - 0, 0, 1904, 194, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 236, 0, 0, 243, 0, 0, 0, - 346, 355, 354, 335, 336, 338, 340, 345, 352, 358, - 0, 0, 0, 0, 0, 263, 318, 270, 262, 563, - 0, 0, 0, 0, 0, 0, 0, 227, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 273, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 295, 0, 396, 255, 0, 447, - 0, 0, 0, 605, 0, 0, 0, 0, 0, 0, - 0, 360, 0, 327, 197, 223, 0, 0, 406, 455, - 467, 0, 0, 0, 251, 0, 465, 420, 583, 231, - 282, 452, 426, 463, 434, 285, 0, 0, 464, 367, - 568, 444, 580, 606, 607, 261, 400, 592, 505, 600, - 624, 224, 258, 414, 498, 586, 487, 392, 564, 565, - 326, 486, 293, 201, 364, 612, 222, 473, 366, 240, - 229, 570, 589, 287, 450, 619, 211, 500, 578, 237, - 477, 0, 0, 627, 245, 497, 213, 575, 496, 388, - 323, 324, 212, 0, 451, 266, 291, 0, 0, 256, - 409, 572, 573, 254, 628, 226, 599, 218, 0, 598, - 402, 567, 576, 389, 378, 217, 574, 387, 377, 331, - 350, 351, 278, 304, 441, 370, 442, 303, 305, 398, - 397, 399, 205, 587, 0, 206, 0, 492, 588, 629, - 446, 210, 232, 233, 235, 0, 277, 281, 289, 292, - 300, 301, 310, 362, 413, 440, 436, 445, 0, 562, - 581, 593, 604, 610, 611, 613, 614, 615, 616, 617, - 620, 618, 401, 308, 488, 330, 368, 0, 0, 419, - 466, 238, 585, 489, 199, 0, 0, 0, 0, 252, - 253, 0, 558, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 630, 631, 632, 633, 634, 635, 636, 637, - 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, - 625, 0, 0, 0, 0, 0, 0, 0, 0, 648, - 379, 479, 582, 332, 344, 347, 337, 356, 0, 357, - 333, 334, 339, 341, 342, 343, 348, 349, 353, 359, - 247, 208, 385, 393, 561, 309, 214, 215, 216, 507, - 508, 509, 510, 596, 597, 601, 456, 457, 458, 459, - 290, 591, 306, 462, 461, 328, 329, 374, 443, 523, - 525, 536, 540, 542, 544, 550, 553, 524, 526, 537, - 541, 543, 545, 551, 554, 513, 515, 517, 519, 532, - 531, 528, 556, 557, 534, 539, 518, 530, 535, 548, - 555, 552, 512, 516, 520, 529, 547, 546, 527, 538, - 549, 533, 521, 514, 522, 0, 196, 219, 363, 0, - 448, 286, 626, 595, 590, 204, 221, 0, 260, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 198, 200, 207, 220, 230, 234, 241, 259, 274, - 276, 283, 296, 307, 315, 316, 319, 325, 375, 381, - 382, 383, 384, 403, 404, 405, 408, 411, 412, 415, - 417, 418, 421, 425, 429, 430, 431, 433, 435, 437, - 449, 454, 468, 469, 470, 471, 472, 475, 476, 481, - 482, 483, 484, 485, 493, 494, 499, 569, 571, 584, - 602, 608, 474, 298, 299, 438, 439, 311, 312, 622, - 623, 297, 579, 609, 577, 621, 603, 432, 373, 0, - 0, 376, 279, 302, 317, 0, 594, 495, 225, 460, - 288, 249, 0, 0, 209, 244, 228, 257, 272, 275, - 321, 386, 394, 423, 428, 294, 269, 242, 453, 239, - 478, 502, 503, 504, 506, 390, 264, 427, 391, 0, - 371, 559, 560, 313, 511, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 410, 0, 0, 0, - 0, 0, 0, 0, 0, 268, 0, 0, 0, 0, - 361, 265, 0, 0, 424, 0, 203, 0, 480, 250, - 372, 369, 566, 280, 271, 267, 248, 314, 380, 422, - 501, 416, 0, 365, 0, 0, 490, 395, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 320, 246, 322, 202, 407, 491, 284, - 0, 0, 0, 1701, 0, 696, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 236, 0, 0, 243, 0, - 0, 0, 346, 355, 354, 335, 336, 338, 340, 345, - 352, 358, 0, 0, 0, 0, 0, 263, 318, 270, - 262, 563, 0, 0, 0, 0, 0, 0, 0, 227, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 273, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 295, 0, 396, 255, - 0, 447, 0, 0, 0, 605, 0, 0, 0, 3864, - 0, 0, 0, 360, 0, 327, 197, 223, 0, 0, - 406, 455, 467, 0, 0, 0, 251, 0, 465, 420, - 583, 231, 282, 452, 426, 463, 434, 285, 0, 0, - 464, 367, 568, 444, 580, 606, 607, 261, 400, 592, - 505, 600, 624, 224, 258, 414, 498, 586, 487, 392, - 564, 565, 326, 486, 293, 201, 364, 612, 222, 473, - 366, 240, 229, 570, 589, 287, 450, 619, 211, 500, - 578, 237, 477, 0, 0, 627, 245, 497, 213, 575, - 496, 388, 323, 324, 212, 0, 451, 266, 291, 0, - 0, 256, 409, 572, 573, 254, 628, 226, 599, 218, - 0, 598, 402, 567, 576, 389, 378, 217, 574, 387, - 377, 331, 350, 351, 278, 304, 441, 370, 442, 303, - 305, 398, 397, 399, 205, 587, 0, 206, 0, 492, - 588, 629, 446, 210, 232, 233, 235, 0, 277, 281, - 289, 292, 300, 301, 310, 362, 413, 440, 436, 445, - 0, 562, 581, 593, 604, 610, 611, 613, 614, 615, - 616, 617, 620, 618, 401, 308, 488, 330, 368, 0, - 0, 419, 466, 238, 585, 489, 199, 0, 0, 0, - 0, 252, 253, 0, 558, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 630, 631, 632, 633, 634, 635, - 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, - 646, 647, 625, 0, 0, 0, 0, 0, 0, 0, - 0, 648, 379, 479, 582, 332, 344, 347, 337, 356, - 0, 357, 333, 334, 339, 341, 342, 343, 348, 349, - 353, 359, 247, 208, 385, 393, 561, 309, 214, 215, - 216, 507, 508, 509, 510, 596, 597, 601, 456, 457, - 458, 459, 290, 591, 306, 462, 461, 328, 329, 374, - 443, 523, 525, 536, 540, 542, 544, 550, 553, 524, - 526, 537, 541, 543, 545, 551, 554, 513, 515, 517, - 519, 532, 531, 528, 556, 557, 534, 539, 518, 530, - 535, 548, 555, 552, 512, 516, 520, 529, 547, 546, - 527, 538, 549, 533, 521, 514, 522, 0, 196, 219, - 363, 0, 448, 286, 626, 595, 590, 204, 221, 0, - 260, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 198, 200, 207, 220, 230, 234, 241, - 259, 274, 276, 283, 296, 307, 315, 316, 319, 325, - 375, 381, 382, 383, 384, 403, 404, 405, 408, 411, - 412, 415, 417, 418, 421, 425, 429, 430, 431, 433, - 435, 437, 449, 454, 468, 469, 470, 471, 472, 475, - 476, 481, 482, 483, 484, 485, 493, 494, 499, 569, - 571, 584, 602, 608, 474, 298, 299, 438, 439, 311, - 312, 622, 623, 297, 579, 609, 577, 621, 603, 432, - 373, 0, 0, 376, 279, 302, 317, 0, 594, 495, - 225, 460, 288, 249, 0, 0, 209, 244, 228, 257, - 272, 275, 321, 386, 394, 423, 428, 294, 269, 242, - 453, 239, 478, 502, 503, 504, 506, 390, 264, 427, - 391, 0, 371, 559, 560, 313, 511, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 410, 0, - 0, 0, 0, 0, 0, 0, 0, 268, 0, 0, - 0, 0, 361, 265, 0, 0, 424, 0, 203, 0, - 480, 250, 372, 369, 566, 280, 271, 267, 248, 314, - 380, 422, 501, 416, 0, 365, 0, 0, 490, 395, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 320, 246, 322, 202, 407, - 491, 284, 0, 0, 0, 0, 2064, 696, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 236, 0, 0, - 243, 0, 0, 0, 346, 355, 354, 335, 336, 338, - 340, 345, 352, 358, 0, 0, 0, 0, 0, 263, - 318, 270, 262, 563, 0, 0, 0, 0, 0, 0, - 0, 227, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 273, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 2065, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 295, 0, - 396, 255, 0, 447, 0, 0, 0, 605, 0, 0, - 0, 0, 0, 0, 0, 360, 0, 327, 197, 223, - 0, 0, 406, 455, 467, 0, 0, 0, 251, 0, - 465, 420, 583, 231, 282, 452, 426, 463, 434, 285, - 0, 0, 464, 367, 568, 444, 580, 606, 607, 261, - 400, 592, 505, 600, 624, 224, 258, 414, 498, 586, - 487, 392, 564, 565, 326, 486, 293, 201, 364, 612, - 222, 473, 366, 240, 229, 570, 589, 287, 450, 619, - 211, 500, 578, 237, 477, 0, 0, 627, 245, 497, - 213, 575, 496, 388, 323, 324, 212, 0, 451, 266, - 291, 0, 0, 256, 409, 572, 573, 254, 628, 226, - 599, 218, 0, 598, 402, 567, 576, 389, 378, 217, - 574, 387, 377, 331, 350, 351, 278, 304, 441, 370, - 442, 303, 305, 398, 397, 399, 205, 587, 0, 206, - 0, 492, 588, 629, 446, 210, 232, 233, 235, 0, - 277, 281, 289, 292, 300, 301, 310, 362, 413, 440, - 436, 445, 0, 562, 581, 593, 604, 610, 611, 613, - 614, 615, 616, 617, 620, 618, 401, 308, 488, 330, - 368, 0, 0, 419, 466, 238, 585, 489, 199, 0, - 0, 0, 0, 252, 253, 0, 558, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 630, 631, 632, 633, - 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, - 644, 645, 646, 647, 625, 0, 0, 0, 0, 0, - 0, 0, 0, 648, 379, 479, 582, 332, 344, 347, - 337, 356, 0, 357, 333, 334, 339, 341, 342, 343, - 348, 349, 353, 359, 247, 208, 385, 393, 561, 309, - 214, 215, 216, 507, 508, 509, 510, 596, 597, 601, - 456, 457, 458, 459, 290, 591, 306, 462, 461, 328, - 329, 374, 443, 523, 525, 536, 540, 542, 544, 550, - 553, 524, 526, 537, 541, 543, 545, 551, 554, 513, - 515, 517, 519, 532, 531, 528, 556, 557, 534, 539, - 518, 530, 535, 548, 555, 552, 512, 516, 520, 529, - 547, 546, 527, 538, 549, 533, 521, 514, 522, 0, - 196, 219, 363, 0, 448, 286, 626, 595, 590, 204, - 221, 0, 260, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 198, 200, 207, 220, 230, - 234, 241, 259, 274, 276, 283, 296, 307, 315, 316, - 319, 325, 375, 381, 382, 383, 384, 403, 404, 405, - 408, 411, 412, 415, 417, 418, 421, 425, 429, 430, - 431, 433, 435, 437, 449, 454, 468, 469, 470, 471, - 472, 475, 476, 481, 482, 483, 484, 485, 493, 494, - 499, 569, 571, 584, 602, 608, 474, 298, 299, 438, - 439, 311, 312, 622, 623, 297, 579, 609, 577, 621, - 603, 432, 373, 0, 0, 376, 279, 302, 317, 0, - 594, 495, 225, 460, 288, 249, 0, 0, 209, 244, - 228, 257, 272, 275, 321, 386, 394, 423, 428, 294, - 269, 242, 453, 239, 478, 502, 503, 504, 506, 390, - 264, 427, 391, 0, 371, 559, 560, 313, 511, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 410, 0, 0, 0, 0, 0, 0, 0, 0, 268, - 0, 0, 0, 0, 361, 265, 0, 0, 424, 0, - 203, 0, 480, 250, 372, 369, 566, 280, 271, 267, - 248, 314, 380, 422, 501, 416, 0, 365, 0, 0, - 490, 395, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 320, 246, 322, - 202, 407, 491, 284, 0, 0, 0, 0, 2778, 696, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 236, - 0, 0, 243, 0, 0, 0, 346, 355, 354, 335, - 336, 338, 340, 345, 352, 358, 0, 0, 0, 0, - 0, 263, 318, 270, 262, 563, 0, 0, 0, 0, - 0, 0, 0, 227, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 273, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 2779, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 295, 0, 396, 255, 0, 447, 0, 0, 0, 605, - 0, 0, 0, 0, 0, 0, 0, 360, 0, 327, - 197, 223, 0, 0, 406, 455, 467, 0, 0, 0, - 251, 0, 465, 420, 583, 231, 282, 452, 426, 463, - 434, 285, 0, 0, 464, 367, 568, 444, 580, 606, - 607, 261, 400, 592, 505, 600, 624, 224, 258, 414, - 498, 586, 487, 392, 564, 565, 326, 486, 293, 201, - 364, 612, 222, 473, 366, 240, 229, 570, 589, 287, - 450, 619, 211, 500, 578, 237, 477, 0, 0, 627, - 245, 497, 213, 575, 496, 388, 323, 324, 212, 0, - 451, 266, 291, 0, 0, 256, 409, 572, 573, 254, - 628, 226, 599, 218, 0, 598, 402, 567, 576, 389, - 378, 217, 574, 387, 377, 331, 350, 351, 278, 304, - 441, 370, 442, 303, 305, 398, 397, 399, 205, 587, - 0, 206, 0, 492, 588, 629, 446, 210, 232, 233, - 235, 0, 277, 281, 289, 292, 300, 301, 310, 362, - 413, 440, 436, 445, 0, 562, 581, 593, 604, 610, - 611, 613, 614, 615, 616, 617, 620, 618, 401, 308, - 488, 330, 368, 0, 0, 419, 466, 238, 585, 489, - 199, 0, 0, 0, 0, 252, 253, 0, 558, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 630, 631, - 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, - 642, 643, 644, 645, 646, 647, 625, 0, 0, 0, - 0, 0, 0, 0, 0, 648, 379, 479, 582, 332, - 344, 347, 337, 356, 0, 357, 333, 334, 339, 341, - 342, 343, 348, 349, 353, 359, 247, 208, 385, 393, - 561, 309, 214, 215, 216, 507, 508, 509, 510, 596, - 597, 601, 456, 457, 458, 459, 290, 591, 306, 462, - 461, 328, 329, 374, 443, 523, 525, 536, 540, 542, - 544, 550, 553, 524, 526, 537, 541, 543, 545, 551, - 554, 513, 515, 517, 519, 532, 531, 528, 556, 557, - 534, 539, 518, 530, 535, 548, 555, 552, 512, 516, - 520, 529, 547, 546, 527, 538, 549, 533, 521, 514, - 522, 0, 196, 219, 363, 0, 448, 286, 626, 595, - 590, 204, 221, 0, 260, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 198, 200, 207, - 220, 230, 234, 241, 259, 274, 276, 283, 296, 307, - 315, 316, 319, 325, 375, 381, 382, 383, 384, 403, - 404, 405, 408, 411, 412, 415, 417, 418, 421, 425, - 429, 430, 431, 433, 435, 437, 449, 454, 468, 469, - 470, 471, 472, 475, 476, 481, 482, 483, 484, 485, - 493, 494, 499, 569, 571, 584, 602, 608, 474, 298, - 299, 438, 439, 311, 312, 622, 623, 297, 579, 609, - 577, 621, 603, 432, 373, 0, 0, 376, 279, 302, - 317, 0, 594, 495, 225, 460, 288, 249, 0, 0, - 209, 244, 228, 257, 272, 275, 321, 386, 394, 423, - 428, 294, 269, 242, 453, 239, 478, 502, 503, 504, - 506, 390, 264, 427, 391, 0, 371, 559, 560, 313, - 511, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 410, 0, 0, 0, 0, 0, 0, 0, - 0, 268, 0, 0, 0, 0, 361, 265, 0, 0, - 424, 0, 203, 0, 480, 250, 372, 369, 566, 280, - 271, 267, 248, 314, 380, 422, 501, 416, 0, 365, - 0, 0, 490, 395, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 320, - 246, 322, 202, 407, 491, 284, 0, 0, 0, 0, - 0, 696, 0, 0, 0, 0, 2763, 0, 0, 0, - 0, 236, 0, 0, 243, 2764, 0, 0, 346, 355, - 354, 335, 336, 338, 340, 345, 352, 358, 0, 0, - 0, 0, 0, 263, 318, 270, 262, 563, 0, 0, - 0, 0, 0, 0, 0, 227, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 273, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 295, 0, 396, 255, 0, 447, 0, 0, - 0, 605, 0, 0, 0, 0, 0, 0, 0, 360, - 0, 327, 197, 223, 0, 0, 406, 455, 467, 0, - 0, 0, 251, 0, 465, 420, 583, 231, 282, 452, - 426, 463, 434, 285, 0, 0, 464, 367, 568, 444, - 580, 606, 607, 261, 400, 592, 505, 600, 624, 224, - 258, 414, 498, 586, 487, 392, 564, 565, 326, 486, - 293, 201, 364, 612, 222, 473, 366, 240, 229, 570, - 589, 287, 450, 619, 211, 500, 578, 237, 477, 0, - 0, 627, 245, 497, 213, 575, 496, 388, 323, 324, - 212, 0, 451, 266, 291, 0, 0, 256, 409, 572, - 573, 254, 628, 226, 599, 218, 0, 598, 402, 567, - 576, 389, 378, 217, 574, 387, 377, 331, 350, 351, - 278, 304, 441, 370, 442, 303, 305, 398, 397, 399, - 205, 587, 0, 206, 0, 492, 588, 629, 446, 210, - 232, 233, 235, 0, 277, 281, 289, 292, 300, 301, - 310, 362, 413, 440, 436, 445, 0, 562, 581, 593, - 604, 610, 611, 613, 614, 615, 616, 617, 620, 618, - 401, 308, 488, 330, 368, 0, 0, 419, 466, 238, - 585, 489, 199, 0, 0, 0, 0, 252, 253, 0, - 558, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, - 640, 641, 642, 643, 644, 645, 646, 647, 625, 0, - 0, 0, 0, 0, 0, 0, 0, 648, 379, 479, - 582, 332, 344, 347, 337, 356, 0, 357, 333, 334, - 339, 341, 342, 343, 348, 349, 353, 359, 247, 208, - 385, 393, 561, 309, 214, 215, 216, 507, 508, 509, - 510, 596, 597, 601, 456, 457, 458, 459, 290, 591, - 306, 462, 461, 328, 329, 374, 443, 523, 525, 536, - 540, 542, 544, 550, 553, 524, 526, 537, 541, 543, - 545, 551, 554, 513, 515, 517, 519, 532, 531, 528, - 556, 557, 534, 539, 518, 530, 535, 548, 555, 552, - 512, 516, 520, 529, 547, 546, 527, 538, 549, 533, - 521, 514, 522, 0, 196, 219, 363, 0, 448, 286, - 626, 595, 590, 204, 221, 0, 260, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 198, - 200, 207, 220, 230, 234, 241, 259, 274, 276, 283, - 296, 307, 315, 316, 319, 325, 375, 381, 382, 383, - 384, 403, 404, 405, 408, 411, 412, 415, 417, 418, - 421, 425, 429, 430, 431, 433, 435, 437, 449, 454, - 468, 469, 470, 471, 472, 475, 476, 481, 482, 483, - 484, 485, 493, 494, 499, 569, 571, 584, 602, 608, - 474, 298, 299, 438, 439, 311, 312, 622, 623, 297, - 579, 609, 577, 621, 603, 432, 373, 0, 0, 376, - 279, 302, 317, 0, 594, 495, 225, 460, 288, 249, - 0, 0, 209, 244, 228, 257, 272, 275, 321, 386, - 394, 423, 428, 294, 269, 242, 453, 239, 478, 502, - 503, 504, 506, 390, 264, 427, 391, 0, 371, 559, - 560, 313, 511, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 410, 0, 0, 0, 0, 0, - 0, 0, 0, 268, 1744, 0, 0, 0, 361, 265, - 0, 0, 424, 0, 203, 0, 480, 250, 372, 369, - 566, 280, 271, 267, 248, 314, 380, 422, 501, 416, - 0, 365, 0, 0, 490, 395, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 320, 246, 322, 202, 407, 491, 284, 0, 0, - 0, 0, 1743, 696, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 236, 0, 0, 243, 0, 0, 0, - 346, 355, 354, 335, 336, 338, 340, 345, 352, 358, - 0, 0, 0, 0, 0, 263, 318, 270, 262, 563, - 0, 0, 0, 0, 0, 0, 0, 227, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 273, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 295, 0, 396, 255, 0, 447, - 0, 0, 0, 605, 0, 0, 0, 0, 0, 0, - 0, 360, 0, 327, 197, 223, 0, 0, 406, 455, - 467, 0, 0, 0, 251, 0, 465, 420, 583, 231, - 282, 452, 426, 463, 434, 285, 0, 0, 464, 367, - 568, 444, 580, 606, 607, 261, 400, 592, 505, 600, - 624, 224, 258, 414, 498, 586, 487, 392, 564, 565, - 326, 486, 293, 201, 364, 612, 222, 473, 366, 240, - 229, 570, 589, 287, 450, 619, 211, 500, 578, 237, - 477, 0, 0, 627, 245, 497, 213, 575, 496, 388, - 323, 324, 212, 0, 451, 266, 291, 0, 0, 256, - 409, 572, 573, 254, 628, 226, 599, 218, 0, 598, - 402, 567, 576, 389, 378, 217, 574, 387, 377, 331, - 350, 351, 278, 304, 441, 370, 442, 303, 305, 398, - 397, 399, 205, 587, 0, 206, 0, 492, 588, 629, - 446, 210, 232, 233, 235, 0, 277, 281, 289, 292, - 300, 301, 310, 362, 413, 440, 436, 445, 0, 562, - 581, 593, 604, 610, 611, 613, 614, 615, 616, 617, - 620, 618, 401, 308, 488, 330, 368, 0, 0, 419, - 466, 238, 585, 489, 199, 0, 0, 0, 0, 252, - 253, 0, 558, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 630, 631, 632, 633, 634, 635, 636, 637, - 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, - 625, 0, 0, 0, 0, 0, 0, 0, 0, 648, - 379, 479, 582, 332, 344, 347, 337, 356, 0, 357, - 333, 334, 339, 341, 342, 343, 348, 349, 353, 359, - 247, 208, 385, 393, 561, 309, 214, 215, 216, 507, - 508, 509, 510, 596, 597, 601, 456, 457, 458, 459, - 290, 591, 306, 462, 461, 328, 329, 374, 443, 523, - 525, 536, 540, 542, 544, 550, 553, 524, 526, 537, - 541, 543, 545, 551, 554, 513, 515, 517, 519, 532, - 531, 528, 556, 557, 534, 539, 518, 530, 535, 548, - 555, 552, 512, 516, 520, 529, 547, 546, 527, 538, - 549, 533, 521, 514, 522, 0, 196, 219, 363, 0, - 448, 286, 626, 595, 590, 204, 221, 0, 260, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 198, 200, 207, 220, 230, 234, 241, 259, 274, - 276, 283, 296, 307, 315, 316, 319, 325, 375, 381, - 382, 383, 384, 403, 404, 405, 408, 411, 412, 415, - 417, 418, 421, 425, 429, 430, 431, 433, 435, 437, - 449, 454, 468, 469, 470, 471, 472, 475, 476, 481, - 482, 483, 484, 485, 493, 494, 499, 569, 571, 584, - 602, 608, 474, 298, 299, 438, 439, 311, 312, 622, - 623, 297, 579, 609, 577, 621, 603, 432, 373, 0, - 0, 376, 279, 302, 317, 0, 594, 495, 225, 460, - 288, 249, 0, 0, 209, 244, 228, 257, 272, 275, - 321, 386, 394, 423, 428, 294, 269, 242, 453, 239, - 478, 502, 503, 504, 506, 390, 264, 427, 391, 0, - 371, 559, 560, 313, 511, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 410, 0, 0, 0, - 0, 0, 0, 0, 0, 268, 0, 0, 0, 0, - 361, 265, 0, 0, 424, 0, 203, 0, 480, 250, - 372, 369, 566, 280, 271, 267, 248, 314, 380, 422, - 501, 416, 0, 365, 0, 0, 490, 395, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 320, 246, 322, 202, 407, 491, 284, - 0, 0, 0, 0, 0, 698, 699, 700, 0, 0, - 0, 0, 0, 0, 0, 236, 0, 0, 243, 0, - 0, 0, 346, 355, 354, 335, 336, 338, 340, 345, - 352, 358, 0, 0, 0, 0, 0, 263, 318, 270, - 262, 563, 0, 0, 0, 0, 0, 0, 0, 227, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 273, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 295, 0, 396, 255, - 0, 447, 0, 0, 0, 605, 0, 0, 0, 0, - 0, 0, 0, 360, 0, 327, 197, 223, 0, 0, - 406, 455, 467, 0, 0, 0, 251, 0, 465, 420, - 583, 231, 282, 452, 426, 463, 434, 285, 0, 0, - 464, 367, 568, 444, 580, 606, 607, 261, 400, 592, - 505, 600, 624, 224, 258, 414, 498, 586, 487, 392, - 564, 565, 326, 486, 293, 201, 364, 612, 222, 473, - 366, 240, 229, 570, 589, 287, 450, 619, 211, 500, - 578, 237, 477, 0, 0, 627, 245, 497, 213, 575, - 496, 388, 323, 324, 212, 0, 451, 266, 291, 0, - 0, 256, 409, 572, 573, 254, 628, 226, 599, 218, - 0, 598, 402, 567, 576, 389, 378, 217, 574, 387, - 377, 331, 350, 351, 278, 304, 441, 370, 442, 303, - 305, 398, 397, 399, 205, 587, 0, 206, 0, 492, - 588, 629, 446, 210, 232, 233, 235, 0, 277, 281, - 289, 292, 300, 301, 310, 362, 413, 440, 436, 445, - 0, 562, 581, 593, 604, 610, 611, 613, 614, 615, - 616, 617, 620, 618, 401, 308, 488, 330, 368, 0, - 0, 419, 466, 238, 585, 489, 199, 0, 0, 0, - 0, 252, 253, 0, 558, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 630, 631, 632, 633, 634, 635, - 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, - 646, 647, 625, 0, 0, 0, 0, 0, 0, 0, - 0, 648, 379, 479, 582, 332, 344, 347, 337, 356, - 0, 357, 333, 334, 339, 341, 342, 343, 348, 349, - 353, 359, 247, 208, 385, 393, 561, 309, 214, 215, - 216, 507, 508, 509, 510, 596, 597, 601, 456, 457, - 458, 459, 290, 591, 306, 462, 461, 328, 329, 374, - 443, 523, 525, 536, 540, 542, 544, 550, 553, 524, - 526, 537, 541, 543, 545, 551, 554, 513, 515, 517, - 519, 532, 531, 528, 556, 557, 534, 539, 518, 530, - 535, 548, 555, 552, 512, 516, 520, 529, 547, 546, - 527, 538, 549, 533, 521, 514, 522, 0, 196, 219, - 363, 0, 448, 286, 626, 595, 590, 204, 221, 0, - 260, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 198, 200, 207, 220, 230, 234, 241, - 259, 274, 276, 283, 296, 307, 315, 316, 319, 325, - 375, 381, 382, 383, 384, 403, 404, 405, 408, 411, - 412, 415, 417, 418, 421, 425, 429, 430, 431, 433, - 435, 437, 449, 454, 468, 469, 470, 471, 472, 475, - 476, 481, 482, 483, 484, 485, 493, 494, 499, 569, - 571, 584, 602, 608, 474, 298, 299, 438, 439, 311, - 312, 622, 623, 297, 579, 609, 577, 621, 603, 432, - 373, 0, 0, 376, 279, 302, 317, 0, 594, 495, - 225, 460, 288, 249, 0, 0, 209, 244, 228, 257, - 272, 275, 321, 386, 394, 423, 428, 294, 269, 242, - 453, 239, 478, 502, 503, 504, 506, 390, 264, 427, - 391, 0, 371, 559, 560, 313, 511, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 410, 0, - 0, 0, 0, 0, 0, 0, 0, 268, 0, 0, - 0, 0, 361, 265, 0, 0, 424, 0, 203, 0, - 480, 250, 372, 369, 566, 280, 271, 267, 248, 314, - 380, 422, 501, 416, 0, 365, 0, 0, 490, 395, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 320, 246, 322, 202, 407, - 491, 284, 0, 0, 0, 0, 0, 696, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 236, 0, 0, - 243, 0, 0, 0, 346, 355, 354, 335, 336, 338, - 340, 345, 352, 358, 0, 0, 0, 0, 0, 263, - 318, 270, 262, 563, 0, 0, 0, 0, 0, 0, - 0, 227, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 273, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 295, 0, - 396, 255, 0, 447, 0, 0, 0, 605, 0, 0, - 0, 3998, 0, 0, 0, 360, 0, 327, 197, 223, - 0, 0, 406, 455, 467, 0, 0, 0, 251, 0, - 465, 420, 583, 231, 282, 452, 426, 463, 434, 285, - 0, 0, 464, 367, 568, 444, 580, 606, 607, 261, - 400, 592, 505, 600, 624, 224, 258, 414, 498, 586, - 487, 392, 564, 565, 326, 486, 293, 201, 364, 612, - 222, 473, 366, 240, 229, 570, 589, 287, 450, 619, - 211, 500, 578, 237, 477, 0, 0, 627, 245, 497, - 213, 575, 496, 388, 323, 324, 212, 0, 451, 266, - 291, 0, 0, 256, 409, 572, 573, 254, 628, 226, - 599, 218, 0, 598, 402, 567, 576, 389, 378, 217, - 574, 387, 377, 331, 350, 351, 278, 304, 441, 370, - 442, 303, 305, 398, 397, 399, 205, 587, 0, 206, - 0, 492, 588, 629, 446, 210, 232, 233, 235, 0, - 277, 281, 289, 292, 300, 301, 310, 362, 413, 440, - 436, 445, 0, 562, 581, 593, 604, 610, 611, 613, - 614, 615, 616, 617, 620, 618, 401, 308, 488, 330, - 368, 0, 0, 419, 466, 238, 585, 489, 199, 0, - 0, 0, 0, 252, 253, 0, 558, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 630, 631, 632, 633, - 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, - 644, 645, 646, 647, 625, 0, 0, 0, 0, 0, - 0, 0, 0, 648, 379, 479, 582, 332, 344, 347, - 337, 356, 0, 357, 333, 334, 339, 341, 342, 343, - 348, 349, 353, 359, 247, 208, 385, 393, 561, 309, - 214, 215, 216, 507, 508, 509, 510, 596, 597, 601, - 456, 457, 458, 459, 290, 591, 306, 462, 461, 328, - 329, 374, 443, 523, 525, 536, 540, 542, 544, 550, - 553, 524, 526, 537, 541, 543, 545, 551, 554, 513, - 515, 517, 519, 532, 531, 528, 556, 557, 534, 539, - 518, 530, 535, 548, 555, 552, 512, 516, 520, 529, - 547, 546, 527, 538, 549, 533, 521, 514, 522, 0, - 196, 219, 363, 0, 448, 286, 626, 595, 590, 204, - 221, 0, 260, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 198, 200, 207, 220, 230, - 234, 241, 259, 274, 276, 283, 296, 307, 315, 316, - 319, 325, 375, 381, 382, 383, 384, 403, 404, 405, - 408, 411, 412, 415, 417, 418, 421, 425, 429, 430, - 431, 433, 435, 437, 449, 454, 468, 469, 470, 471, - 472, 475, 476, 481, 482, 483, 484, 485, 493, 494, - 499, 569, 571, 584, 602, 608, 474, 298, 299, 438, - 439, 311, 312, 622, 623, 297, 579, 609, 577, 621, - 603, 432, 373, 0, 0, 376, 279, 302, 317, 0, - 594, 495, 225, 460, 288, 249, 0, 0, 209, 244, - 228, 257, 272, 275, 321, 386, 394, 423, 428, 294, - 269, 242, 453, 239, 478, 502, 503, 504, 506, 390, - 264, 427, 391, 0, 371, 559, 560, 313, 511, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 410, 0, 0, 0, 0, 0, 0, 0, 0, 268, - 0, 0, 0, 0, 361, 265, 0, 0, 424, 0, - 203, 0, 480, 250, 372, 369, 566, 280, 271, 267, - 248, 314, 380, 422, 501, 416, 0, 365, 0, 0, - 490, 395, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 320, 246, 322, - 202, 407, 491, 284, 0, 0, 0, 0, 1904, 194, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 236, - 0, 0, 243, 0, 0, 0, 346, 355, 354, 335, - 336, 338, 340, 345, 352, 358, 0, 0, 0, 0, - 0, 263, 318, 270, 262, 563, 0, 0, 0, 0, - 0, 0, 0, 227, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 273, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 295, 0, 396, 255, 0, 447, 0, 0, 0, 605, - 0, 0, 0, 0, 0, 0, 0, 360, 0, 327, - 197, 223, 0, 0, 406, 455, 467, 0, 0, 0, - 251, 0, 465, 420, 583, 231, 282, 452, 426, 463, - 434, 285, 0, 0, 464, 367, 568, 444, 580, 606, - 607, 261, 400, 592, 505, 600, 624, 224, 258, 414, - 498, 586, 487, 392, 564, 565, 326, 486, 293, 201, - 364, 612, 222, 473, 366, 240, 229, 570, 589, 287, - 450, 619, 211, 500, 578, 237, 477, 0, 0, 627, - 245, 497, 213, 575, 496, 388, 323, 324, 212, 0, - 451, 266, 291, 0, 0, 256, 409, 572, 573, 254, - 628, 226, 599, 218, 0, 598, 402, 567, 576, 389, - 378, 217, 574, 387, 377, 331, 350, 351, 278, 304, - 441, 370, 442, 303, 305, 398, 397, 399, 205, 587, - 0, 206, 0, 492, 588, 629, 446, 210, 232, 233, - 235, 0, 277, 281, 289, 292, 300, 301, 310, 362, - 413, 440, 436, 445, 0, 562, 581, 593, 604, 610, - 611, 613, 614, 615, 616, 617, 620, 618, 401, 308, - 488, 330, 368, 0, 0, 419, 466, 238, 585, 489, - 199, 0, 0, 0, 0, 252, 253, 0, 558, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 630, 631, - 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, - 642, 643, 644, 645, 646, 647, 625, 0, 0, 0, - 0, 0, 0, 0, 0, 648, 379, 479, 582, 332, - 344, 347, 337, 356, 0, 357, 333, 334, 339, 341, - 342, 343, 348, 349, 353, 359, 247, 208, 385, 393, - 561, 309, 214, 215, 216, 507, 508, 509, 510, 596, - 597, 601, 456, 457, 458, 459, 290, 591, 306, 462, - 461, 328, 329, 374, 443, 523, 525, 536, 540, 542, - 544, 550, 553, 524, 526, 537, 541, 543, 545, 551, - 554, 513, 515, 517, 519, 532, 531, 528, 556, 557, - 534, 539, 518, 530, 535, 548, 555, 552, 512, 516, - 520, 529, 547, 546, 527, 538, 549, 533, 521, 514, - 522, 0, 196, 219, 363, 0, 448, 286, 626, 595, - 590, 204, 221, 0, 260, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 198, 200, 207, - 220, 230, 234, 241, 259, 274, 276, 283, 296, 307, - 315, 316, 319, 325, 375, 381, 382, 383, 384, 403, - 404, 405, 408, 411, 412, 415, 417, 418, 421, 425, - 429, 430, 431, 433, 435, 437, 449, 454, 468, 469, - 470, 471, 472, 475, 476, 481, 482, 483, 484, 485, - 493, 494, 499, 569, 571, 584, 602, 608, 474, 298, - 299, 438, 439, 311, 312, 622, 623, 297, 579, 609, - 577, 621, 603, 432, 373, 0, 0, 376, 279, 302, - 317, 0, 594, 495, 225, 460, 288, 249, 0, 0, - 209, 244, 228, 257, 272, 275, 321, 386, 394, 423, - 428, 294, 269, 242, 453, 239, 478, 502, 503, 504, - 506, 390, 264, 427, 391, 0, 371, 559, 560, 313, - 511, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 410, 0, 0, 0, 0, 0, 0, 0, - 0, 268, 0, 0, 0, 0, 361, 265, 0, 0, - 424, 0, 203, 0, 480, 250, 372, 369, 566, 280, - 271, 267, 248, 314, 380, 422, 501, 416, 0, 365, - 0, 0, 490, 395, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 320, - 246, 322, 202, 407, 491, 284, 0, 0, 0, 0, - 0, 696, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 236, 0, 0, 243, 0, 0, 0, 346, 355, - 354, 335, 336, 338, 340, 345, 352, 358, 0, 0, - 0, 0, 0, 263, 318, 270, 262, 563, 0, 0, - 0, 0, 0, 0, 0, 227, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 273, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 295, 0, 396, 255, 0, 447, 0, 0, - 0, 605, 0, 0, 0, 3864, 0, 0, 0, 360, - 0, 327, 197, 223, 0, 0, 406, 455, 467, 0, - 0, 0, 251, 0, 465, 420, 583, 231, 282, 452, - 426, 463, 434, 285, 0, 0, 464, 367, 568, 444, - 580, 606, 607, 261, 400, 592, 505, 600, 624, 224, - 258, 414, 498, 586, 487, 392, 564, 565, 326, 486, - 293, 201, 364, 612, 222, 473, 366, 240, 229, 570, - 589, 287, 450, 619, 211, 500, 578, 237, 477, 0, - 0, 627, 245, 497, 213, 575, 496, 388, 323, 324, - 212, 0, 451, 266, 291, 0, 0, 256, 409, 572, - 573, 254, 628, 226, 599, 218, 0, 598, 402, 567, - 576, 389, 378, 217, 574, 387, 377, 331, 350, 351, - 278, 304, 441, 370, 442, 303, 305, 398, 397, 399, - 205, 587, 0, 206, 0, 492, 588, 629, 446, 210, - 232, 233, 235, 0, 277, 281, 289, 292, 300, 301, - 310, 362, 413, 440, 436, 445, 0, 562, 581, 593, - 604, 610, 611, 613, 614, 615, 616, 617, 620, 618, - 401, 308, 488, 330, 368, 0, 0, 419, 466, 238, - 585, 489, 199, 0, 0, 0, 0, 252, 253, 0, - 558, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, - 640, 641, 642, 643, 644, 645, 646, 647, 625, 0, - 0, 0, 0, 0, 0, 0, 0, 648, 379, 479, - 582, 332, 344, 347, 337, 356, 0, 357, 333, 334, - 339, 341, 342, 343, 348, 349, 353, 359, 247, 208, - 385, 393, 561, 309, 214, 215, 216, 507, 508, 509, - 510, 596, 597, 601, 456, 457, 458, 459, 290, 591, - 306, 462, 461, 328, 329, 374, 443, 523, 525, 536, - 540, 542, 544, 550, 553, 524, 526, 537, 541, 543, - 545, 551, 554, 513, 515, 517, 519, 532, 531, 528, - 556, 557, 534, 539, 518, 530, 535, 548, 555, 552, - 512, 516, 520, 529, 547, 546, 527, 538, 549, 533, - 521, 514, 522, 0, 196, 219, 363, 0, 448, 286, - 626, 595, 590, 204, 221, 0, 260, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 198, - 200, 207, 220, 230, 234, 241, 259, 274, 276, 283, - 296, 307, 315, 316, 319, 325, 375, 381, 382, 383, - 384, 403, 404, 405, 408, 411, 412, 415, 417, 418, - 421, 425, 429, 430, 431, 433, 435, 437, 449, 454, - 468, 469, 470, 471, 472, 475, 476, 481, 482, 483, - 484, 485, 493, 494, 499, 569, 571, 584, 602, 608, - 474, 298, 299, 438, 439, 311, 312, 622, 623, 297, - 579, 609, 577, 621, 603, 432, 373, 0, 0, 376, - 279, 302, 317, 0, 594, 495, 225, 460, 288, 249, - 0, 0, 209, 244, 228, 257, 272, 275, 321, 386, - 394, 423, 428, 294, 269, 242, 453, 239, 478, 502, - 503, 504, 506, 390, 264, 427, 391, 0, 371, 559, - 560, 313, 511, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 410, 0, 0, 0, 0, 0, - 0, 0, 0, 268, 0, 0, 0, 0, 361, 265, - 0, 0, 424, 0, 203, 0, 480, 250, 372, 369, - 566, 280, 271, 267, 248, 314, 380, 422, 501, 416, - 0, 365, 0, 0, 490, 395, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 320, 246, 322, 202, 407, 491, 284, 0, 94, - 0, 0, 0, 696, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 236, 0, 0, 243, 0, 0, 0, - 346, 355, 354, 335, 336, 338, 340, 345, 352, 358, - 0, 0, 0, 0, 0, 263, 318, 270, 262, 563, - 0, 0, 0, 0, 0, 0, 0, 227, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 273, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 295, 0, 396, 255, 0, 447, - 0, 0, 0, 605, 0, 0, 0, 0, 0, 0, - 0, 360, 0, 327, 197, 223, 0, 0, 406, 455, - 467, 0, 0, 0, 251, 0, 465, 420, 583, 231, - 282, 452, 426, 463, 434, 285, 0, 0, 464, 367, - 568, 444, 580, 606, 607, 261, 400, 592, 505, 600, - 624, 224, 258, 414, 498, 586, 487, 392, 564, 565, - 326, 486, 293, 201, 364, 612, 222, 473, 366, 240, - 229, 570, 589, 287, 450, 619, 211, 500, 578, 237, - 477, 0, 0, 627, 245, 497, 213, 575, 496, 388, - 323, 324, 212, 0, 451, 266, 291, 0, 0, 256, - 409, 572, 573, 254, 628, 226, 599, 218, 0, 598, - 402, 567, 576, 389, 378, 217, 574, 387, 377, 331, - 350, 351, 278, 304, 441, 370, 442, 303, 305, 398, - 397, 399, 205, 587, 0, 206, 0, 492, 588, 629, - 446, 210, 232, 233, 235, 0, 277, 281, 289, 292, - 300, 301, 310, 362, 413, 440, 436, 445, 0, 562, - 581, 593, 604, 610, 611, 613, 614, 615, 616, 617, - 620, 618, 401, 308, 488, 330, 368, 0, 0, 419, - 466, 238, 585, 489, 199, 0, 0, 0, 0, 252, - 253, 0, 558, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 630, 631, 632, 633, 634, 635, 636, 637, - 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, - 625, 0, 0, 0, 0, 0, 0, 0, 0, 648, - 379, 479, 582, 332, 344, 347, 337, 356, 0, 357, - 333, 334, 339, 341, 342, 343, 348, 349, 353, 359, - 247, 208, 385, 393, 561, 309, 214, 215, 216, 507, - 508, 509, 510, 596, 597, 601, 456, 457, 458, 459, - 290, 591, 306, 462, 461, 328, 329, 374, 443, 523, - 525, 536, 540, 542, 544, 550, 553, 524, 526, 537, - 541, 543, 545, 551, 554, 513, 515, 517, 519, 532, - 531, 528, 556, 557, 534, 539, 518, 530, 535, 548, - 555, 552, 512, 516, 520, 529, 547, 546, 527, 538, - 549, 533, 521, 514, 522, 0, 196, 219, 363, 0, - 448, 286, 626, 595, 590, 204, 221, 0, 260, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 198, 200, 207, 220, 230, 234, 241, 259, 274, - 276, 283, 296, 307, 315, 316, 319, 325, 375, 381, - 382, 383, 384, 403, 404, 405, 408, 411, 412, 415, - 417, 418, 421, 425, 429, 430, 431, 433, 435, 437, - 449, 454, 468, 469, 470, 471, 472, 475, 476, 481, - 482, 483, 484, 485, 493, 494, 499, 569, 571, 584, - 602, 608, 474, 298, 299, 438, 439, 311, 312, 622, - 623, 297, 579, 609, 577, 621, 603, 432, 373, 0, - 0, 376, 279, 302, 317, 0, 594, 495, 225, 460, - 288, 249, 0, 0, 209, 244, 228, 257, 272, 275, - 321, 386, 394, 423, 428, 294, 269, 242, 453, 239, - 478, 502, 503, 504, 506, 390, 264, 427, 391, 0, - 371, 559, 560, 313, 511, 0, 0, 0, 0, 2337, - 0, 0, 0, 0, 0, 0, 410, 0, 0, 0, - 0, 0, 0, 0, 0, 268, 0, 0, 0, 0, - 361, 265, 0, 0, 424, 0, 203, 0, 480, 250, - 372, 369, 566, 280, 271, 267, 248, 314, 380, 422, - 501, 416, 0, 365, 0, 0, 490, 395, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 320, 246, 322, 202, 407, 491, 284, - 0, 0, 0, 0, 0, 194, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 236, 0, 0, 243, 0, - 0, 0, 346, 355, 354, 335, 336, 338, 340, 345, - 352, 358, 0, 0, 0, 0, 0, 263, 318, 270, - 262, 563, 0, 0, 0, 0, 0, 0, 0, 227, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 273, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 295, 0, 396, 255, - 0, 447, 0, 0, 0, 605, 0, 0, 0, 0, - 0, 0, 0, 360, 0, 327, 197, 223, 0, 0, - 406, 455, 467, 0, 0, 0, 251, 0, 465, 420, - 583, 231, 282, 452, 426, 463, 434, 285, 0, 0, - 464, 367, 568, 444, 580, 606, 607, 261, 400, 592, - 505, 600, 624, 224, 258, 414, 498, 586, 487, 392, - 564, 565, 326, 486, 293, 201, 364, 612, 222, 473, - 366, 240, 229, 570, 589, 287, 450, 619, 211, 500, - 578, 237, 477, 0, 0, 627, 245, 497, 213, 575, - 496, 388, 323, 324, 212, 0, 451, 266, 291, 0, - 0, 256, 409, 572, 573, 254, 628, 226, 599, 218, - 0, 598, 402, 567, 576, 389, 378, 217, 574, 387, - 377, 331, 350, 351, 278, 304, 441, 370, 442, 303, - 305, 398, 397, 399, 205, 587, 0, 206, 0, 492, - 588, 629, 446, 210, 232, 233, 235, 0, 277, 281, - 289, 292, 300, 301, 310, 362, 413, 440, 436, 445, - 0, 562, 581, 593, 604, 610, 611, 613, 614, 615, - 616, 617, 620, 618, 401, 308, 488, 330, 368, 0, - 0, 419, 466, 238, 585, 489, 199, 0, 0, 0, - 0, 252, 253, 0, 558, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 630, 631, 632, 633, 634, 635, - 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, - 646, 647, 625, 0, 0, 0, 0, 0, 0, 0, - 0, 648, 379, 479, 582, 332, 344, 347, 337, 356, - 0, 357, 333, 334, 339, 341, 342, 343, 348, 349, - 353, 359, 247, 208, 385, 393, 561, 309, 214, 215, - 216, 507, 508, 509, 510, 596, 597, 601, 456, 457, - 458, 459, 290, 591, 306, 462, 461, 328, 329, 374, - 443, 523, 525, 536, 540, 542, 544, 550, 553, 524, - 526, 537, 541, 543, 545, 551, 554, 513, 515, 517, - 519, 532, 531, 528, 556, 557, 534, 539, 518, 530, - 535, 548, 555, 552, 512, 516, 520, 529, 547, 546, - 527, 538, 549, 533, 521, 514, 522, 0, 196, 219, - 363, 0, 448, 286, 626, 595, 590, 204, 221, 0, - 260, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 198, 200, 207, 220, 230, 234, 241, - 259, 274, 276, 283, 296, 307, 315, 316, 319, 325, - 375, 381, 382, 383, 384, 403, 404, 405, 408, 411, - 412, 415, 417, 418, 421, 425, 429, 430, 431, 433, - 435, 437, 449, 454, 468, 469, 470, 471, 472, 475, - 476, 481, 482, 483, 484, 485, 493, 494, 499, 569, - 571, 584, 602, 608, 474, 298, 299, 438, 439, 311, - 312, 622, 623, 297, 579, 609, 577, 621, 603, 432, - 373, 0, 0, 376, 279, 302, 317, 0, 594, 495, - 225, 460, 288, 249, 0, 0, 209, 244, 228, 257, - 272, 275, 321, 386, 394, 423, 428, 294, 269, 242, - 453, 239, 478, 502, 503, 504, 506, 390, 264, 427, - 391, 0, 371, 559, 560, 313, 511, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 410, 0, - 0, 0, 0, 0, 0, 0, 0, 268, 0, 0, - 0, 0, 361, 265, 0, 0, 424, 0, 203, 0, - 480, 250, 372, 369, 566, 280, 271, 267, 248, 314, - 380, 422, 501, 416, 0, 365, 0, 0, 490, 395, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 320, 246, 322, 202, 407, - 491, 284, 0, 0, 0, 0, 1724, 696, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 236, 0, 0, - 243, 0, 0, 0, 346, 355, 354, 335, 336, 338, - 340, 345, 352, 358, 0, 0, 0, 0, 0, 263, - 318, 270, 262, 563, 0, 0, 0, 0, 0, 0, - 0, 227, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 273, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 295, 0, - 396, 255, 0, 447, 0, 0, 0, 605, 0, 0, - 0, 0, 0, 0, 0, 360, 0, 327, 197, 223, - 0, 0, 406, 455, 467, 0, 0, 0, 251, 0, - 465, 420, 583, 231, 282, 452, 426, 463, 434, 285, - 0, 0, 464, 367, 568, 444, 580, 606, 607, 261, - 400, 592, 505, 600, 624, 224, 258, 414, 498, 586, - 487, 392, 564, 565, 326, 486, 293, 201, 364, 612, - 222, 473, 366, 240, 229, 570, 589, 287, 450, 619, - 211, 500, 578, 237, 477, 0, 0, 627, 245, 497, - 213, 575, 496, 388, 323, 324, 212, 0, 451, 266, - 291, 0, 0, 256, 409, 572, 573, 254, 628, 226, - 599, 218, 0, 598, 402, 567, 576, 389, 378, 217, - 574, 387, 377, 331, 350, 351, 278, 304, 441, 370, - 442, 303, 305, 398, 397, 399, 205, 587, 0, 206, - 0, 492, 588, 629, 446, 210, 232, 233, 235, 0, - 277, 281, 289, 292, 300, 301, 310, 362, 413, 440, - 436, 445, 0, 562, 581, 593, 604, 610, 611, 613, - 614, 615, 616, 617, 620, 618, 401, 308, 488, 330, - 368, 0, 0, 419, 466, 238, 585, 489, 199, 0, - 0, 0, 0, 252, 253, 0, 558, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 630, 631, 632, 633, - 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, - 644, 645, 646, 647, 625, 0, 0, 0, 0, 0, - 0, 0, 0, 648, 379, 479, 582, 332, 344, 347, - 337, 356, 0, 357, 333, 334, 339, 341, 342, 343, - 348, 349, 353, 359, 247, 208, 385, 393, 561, 309, - 214, 215, 216, 507, 508, 509, 510, 596, 597, 601, - 456, 457, 458, 459, 290, 591, 306, 462, 461, 328, - 329, 374, 443, 523, 525, 536, 540, 542, 544, 550, - 553, 524, 526, 537, 541, 543, 545, 551, 554, 513, - 515, 517, 519, 532, 531, 528, 556, 557, 534, 539, - 518, 530, 535, 548, 555, 552, 512, 516, 520, 529, - 547, 546, 527, 538, 549, 533, 521, 514, 522, 0, - 196, 219, 363, 0, 448, 286, 626, 595, 590, 204, - 221, 0, 260, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 198, 200, 207, 220, 230, - 234, 241, 259, 274, 276, 283, 296, 307, 315, 316, - 319, 325, 375, 381, 382, 383, 384, 403, 404, 405, - 408, 411, 412, 415, 417, 418, 421, 425, 429, 430, - 431, 433, 435, 437, 449, 454, 468, 469, 470, 471, - 472, 475, 476, 481, 482, 483, 484, 485, 493, 494, - 499, 569, 571, 584, 602, 608, 474, 298, 299, 438, - 439, 311, 312, 622, 623, 297, 579, 609, 577, 621, - 603, 432, 373, 0, 0, 376, 279, 302, 317, 0, - 594, 495, 225, 460, 288, 249, 0, 0, 209, 244, - 228, 257, 272, 275, 321, 386, 394, 423, 428, 294, - 269, 242, 453, 239, 478, 502, 503, 504, 506, 390, - 264, 427, 391, 0, 371, 559, 560, 313, 511, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 410, 0, 0, 0, 0, 0, 0, 0, 0, 268, - 0, 0, 0, 0, 361, 265, 0, 0, 424, 0, - 203, 0, 480, 250, 372, 369, 566, 280, 271, 267, - 248, 314, 380, 422, 501, 416, 0, 365, 0, 0, - 490, 395, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 320, 246, 322, - 202, 407, 491, 284, 0, 0, 0, 0, 0, 194, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 236, - 0, 0, 243, 0, 0, 0, 346, 355, 354, 335, - 336, 338, 340, 345, 352, 358, 0, 0, 0, 0, - 0, 263, 318, 270, 262, 563, 0, 0, 0, 0, - 0, 0, 0, 227, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 273, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 295, 0, 396, 255, 0, 447, 0, 0, 0, 605, - 0, 0, 0, 0, 0, 0, 0, 360, 0, 327, - 197, 223, 0, 0, 406, 455, 467, 0, 0, 0, - 251, 0, 465, 420, 583, 231, 282, 452, 426, 463, - 434, 285, 0, 0, 464, 367, 568, 444, 580, 606, - 607, 261, 400, 592, 505, 600, 624, 224, 258, 414, - 498, 586, 487, 392, 564, 565, 326, 486, 293, 201, - 364, 612, 222, 473, 366, 240, 229, 570, 589, 287, - 450, 619, 211, 500, 578, 237, 477, 0, 0, 627, - 245, 497, 213, 575, 496, 388, 323, 324, 212, 0, - 451, 266, 291, 0, 0, 256, 409, 572, 573, 254, - 628, 226, 599, 218, 0, 598, 402, 567, 576, 389, - 378, 217, 574, 387, 377, 331, 350, 351, 278, 304, - 441, 370, 442, 303, 305, 398, 397, 399, 205, 587, - 0, 206, 0, 492, 588, 629, 446, 210, 232, 233, - 235, 0, 277, 281, 289, 292, 300, 301, 310, 362, - 413, 440, 436, 445, 0, 562, 581, 593, 604, 610, - 611, 613, 614, 615, 616, 617, 620, 618, 401, 308, - 488, 330, 368, 0, 0, 419, 466, 238, 585, 489, - 199, 0, 0, 0, 0, 252, 253, 0, 558, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 630, 631, - 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, - 642, 643, 644, 645, 646, 647, 625, 0, 0, 0, - 0, 0, 0, 0, 0, 648, 379, 479, 582, 332, - 344, 347, 337, 356, 0, 357, 333, 334, 339, 341, - 342, 343, 348, 349, 353, 359, 247, 208, 385, 393, - 561, 309, 214, 215, 216, 507, 508, 509, 510, 596, - 597, 601, 456, 457, 458, 459, 290, 591, 306, 462, - 461, 328, 329, 374, 443, 523, 525, 536, 540, 542, - 544, 550, 553, 524, 526, 537, 541, 543, 545, 551, - 554, 513, 515, 517, 519, 532, 531, 528, 556, 557, - 534, 539, 518, 530, 535, 548, 555, 552, 512, 516, - 520, 529, 547, 546, 527, 538, 549, 533, 521, 514, - 522, 0, 196, 219, 363, 2016, 448, 286, 626, 595, - 590, 204, 221, 0, 260, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 198, 200, 207, - 220, 230, 234, 241, 259, 274, 276, 283, 296, 307, - 315, 316, 319, 325, 375, 381, 382, 383, 384, 403, - 404, 405, 408, 411, 412, 415, 417, 418, 421, 425, - 429, 430, 431, 433, 435, 437, 449, 454, 468, 469, - 470, 471, 472, 475, 476, 481, 482, 483, 484, 485, - 493, 494, 499, 569, 571, 584, 602, 608, 474, 298, - 299, 438, 439, 311, 312, 622, 623, 297, 579, 609, - 577, 621, 603, 432, 373, 0, 0, 376, 279, 302, - 317, 0, 594, 495, 225, 460, 288, 249, 0, 0, - 209, 244, 228, 257, 272, 275, 321, 386, 394, 423, - 428, 294, 269, 242, 453, 239, 478, 502, 503, 504, - 506, 390, 264, 427, 391, 0, 371, 559, 560, 313, - 511, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 410, 0, 0, 0, 0, 0, 0, 0, - 0, 268, 0, 0, 0, 0, 361, 265, 0, 0, - 424, 0, 203, 0, 480, 250, 372, 369, 566, 280, - 271, 267, 248, 314, 380, 422, 501, 416, 0, 365, - 0, 0, 490, 395, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 320, - 246, 322, 202, 407, 491, 284, 0, 0, 0, 0, - 2007, 696, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 236, 0, 0, 243, 0, 0, 0, 346, 355, - 354, 335, 336, 338, 340, 345, 352, 358, 0, 0, - 0, 0, 0, 263, 318, 270, 262, 563, 0, 0, - 0, 0, 0, 0, 0, 227, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 273, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 295, 0, 396, 255, 0, 447, 0, 0, - 0, 605, 0, 0, 0, 0, 0, 0, 0, 360, - 0, 327, 197, 223, 0, 0, 406, 455, 467, 0, - 0, 0, 251, 0, 465, 420, 583, 231, 282, 452, - 426, 463, 434, 285, 0, 0, 464, 367, 568, 444, - 580, 606, 607, 261, 400, 592, 505, 600, 624, 224, - 258, 414, 498, 586, 487, 392, 564, 565, 326, 486, - 293, 201, 364, 612, 222, 473, 366, 240, 229, 570, - 589, 287, 450, 619, 211, 500, 578, 237, 477, 0, - 0, 627, 245, 497, 213, 575, 496, 388, 323, 324, - 212, 0, 451, 266, 291, 0, 0, 256, 409, 572, - 573, 254, 628, 226, 599, 218, 0, 598, 402, 567, - 576, 389, 378, 217, 574, 387, 377, 331, 350, 351, - 278, 304, 441, 370, 442, 303, 305, 398, 397, 399, - 205, 587, 0, 206, 0, 492, 588, 629, 446, 210, - 232, 233, 235, 0, 277, 281, 289, 292, 300, 301, - 310, 362, 413, 440, 436, 445, 0, 562, 581, 593, - 604, 610, 611, 613, 614, 615, 616, 617, 620, 618, - 401, 308, 488, 330, 368, 0, 0, 419, 466, 238, - 585, 489, 199, 0, 0, 0, 0, 252, 253, 0, - 558, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, - 640, 641, 642, 643, 644, 645, 646, 647, 625, 0, - 0, 0, 0, 0, 0, 0, 0, 648, 379, 479, - 582, 332, 344, 347, 337, 356, 0, 357, 333, 334, - 339, 341, 342, 343, 348, 349, 353, 359, 247, 208, - 385, 393, 561, 309, 214, 215, 216, 507, 508, 509, - 510, 596, 597, 601, 456, 457, 458, 459, 290, 591, - 306, 462, 461, 328, 329, 374, 443, 523, 525, 536, - 540, 542, 544, 550, 553, 524, 526, 537, 541, 543, - 545, 551, 554, 513, 515, 517, 519, 532, 531, 528, - 556, 557, 534, 539, 518, 530, 535, 548, 555, 552, - 512, 516, 520, 529, 547, 546, 527, 538, 549, 533, - 521, 514, 522, 0, 196, 219, 363, 0, 448, 286, - 626, 595, 590, 204, 221, 0, 260, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 198, - 200, 207, 220, 230, 234, 241, 259, 274, 276, 283, - 296, 307, 315, 316, 319, 325, 375, 381, 382, 383, - 384, 403, 404, 405, 408, 411, 412, 415, 417, 418, - 421, 425, 429, 430, 431, 433, 435, 437, 449, 454, - 468, 469, 470, 471, 472, 475, 476, 481, 482, 483, - 484, 485, 493, 494, 499, 569, 571, 584, 602, 608, - 474, 298, 299, 438, 439, 311, 312, 622, 623, 297, - 579, 609, 577, 621, 603, 432, 373, 0, 0, 376, - 279, 302, 317, 0, 594, 495, 225, 460, 288, 249, - 0, 0, 209, 244, 228, 257, 272, 275, 321, 386, - 394, 423, 428, 294, 269, 242, 453, 239, 478, 502, - 503, 504, 506, 390, 264, 427, 391, 0, 371, 559, - 560, 313, 511, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 410, 0, 1871, 0, 0, 0, - 0, 0, 0, 268, 0, 0, 0, 0, 361, 265, - 0, 0, 424, 0, 203, 0, 480, 250, 372, 369, - 566, 280, 271, 267, 248, 314, 380, 422, 501, 416, - 0, 365, 0, 0, 490, 395, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 320, 246, 322, 202, 407, 491, 284, 0, 0, - 0, 0, 0, 696, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 236, 0, 0, 243, 0, 0, 0, - 346, 355, 354, 335, 336, 338, 340, 345, 352, 358, - 0, 0, 0, 0, 0, 263, 318, 270, 262, 563, - 0, 0, 0, 0, 0, 0, 0, 227, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 273, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 295, 0, 396, 255, 0, 447, - 0, 0, 0, 605, 0, 0, 0, 0, 0, 0, - 0, 360, 0, 327, 197, 223, 0, 0, 406, 455, - 467, 0, 0, 0, 251, 0, 465, 420, 583, 231, - 282, 452, 426, 463, 434, 285, 0, 0, 464, 367, - 568, 444, 580, 606, 607, 261, 400, 592, 505, 600, - 624, 224, 258, 414, 498, 586, 487, 392, 564, 565, - 326, 486, 293, 201, 364, 612, 222, 473, 366, 240, - 229, 570, 589, 287, 450, 619, 211, 500, 578, 237, - 477, 0, 0, 627, 245, 497, 213, 575, 496, 388, - 323, 324, 212, 0, 451, 266, 291, 0, 0, 256, - 409, 572, 573, 254, 628, 226, 599, 218, 0, 598, - 402, 567, 576, 389, 378, 217, 574, 387, 377, 331, - 350, 351, 278, 304, 441, 370, 442, 303, 305, 398, - 397, 399, 205, 587, 0, 206, 0, 492, 588, 629, - 446, 210, 232, 233, 235, 0, 277, 281, 289, 292, - 300, 301, 310, 362, 413, 440, 436, 445, 0, 562, - 581, 593, 604, 610, 611, 613, 614, 615, 616, 617, - 620, 618, 401, 308, 488, 330, 368, 0, 0, 419, - 466, 238, 585, 489, 199, 0, 0, 0, 0, 252, - 253, 0, 558, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 630, 631, 632, 633, 634, 635, 636, 637, - 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, - 625, 0, 0, 0, 0, 0, 0, 0, 0, 648, - 379, 479, 582, 332, 344, 347, 337, 356, 0, 357, - 333, 334, 339, 341, 342, 343, 348, 349, 353, 359, - 247, 208, 385, 393, 561, 309, 214, 215, 216, 507, - 508, 509, 510, 596, 597, 601, 456, 457, 458, 459, - 290, 591, 306, 462, 461, 328, 329, 374, 443, 523, - 525, 536, 540, 542, 544, 550, 553, 524, 526, 537, - 541, 543, 545, 551, 554, 513, 515, 517, 519, 532, - 531, 528, 556, 557, 534, 539, 518, 530, 535, 548, - 555, 552, 512, 516, 520, 529, 547, 546, 527, 538, - 549, 533, 521, 514, 522, 0, 196, 219, 363, 0, - 448, 286, 626, 595, 590, 204, 221, 0, 260, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 198, 200, 207, 220, 230, 234, 241, 259, 274, - 276, 283, 296, 307, 315, 316, 319, 325, 375, 381, - 382, 383, 384, 403, 404, 405, 408, 411, 412, 415, - 417, 418, 421, 425, 429, 430, 431, 433, 435, 437, - 449, 454, 468, 469, 470, 471, 472, 475, 476, 481, - 482, 483, 484, 485, 493, 494, 499, 569, 571, 584, - 602, 608, 474, 298, 299, 438, 439, 311, 312, 622, - 623, 297, 579, 609, 577, 621, 603, 432, 373, 0, - 0, 376, 279, 302, 317, 0, 594, 495, 225, 460, - 288, 249, 0, 0, 209, 244, 228, 257, 272, 275, - 321, 386, 394, 423, 428, 294, 269, 242, 453, 239, - 478, 502, 503, 504, 506, 390, 264, 427, 391, 0, - 371, 559, 560, 313, 511, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 410, 0, 1869, 0, - 0, 0, 0, 0, 0, 268, 0, 0, 0, 0, - 361, 265, 0, 0, 424, 0, 203, 0, 480, 250, - 372, 369, 566, 280, 271, 267, 248, 314, 380, 422, - 501, 416, 0, 365, 0, 0, 490, 395, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 320, 246, 322, 202, 407, 491, 284, - 0, 0, 0, 0, 0, 696, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 236, 0, 0, 243, 0, - 0, 0, 346, 355, 354, 335, 336, 338, 340, 345, - 352, 358, 0, 0, 0, 0, 0, 263, 318, 270, - 262, 563, 0, 0, 0, 0, 0, 0, 0, 227, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 273, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 295, 0, 396, 255, - 0, 447, 0, 0, 0, 605, 0, 0, 0, 0, - 0, 0, 0, 360, 0, 327, 197, 223, 0, 0, - 406, 455, 467, 0, 0, 0, 251, 0, 465, 420, - 583, 231, 282, 452, 426, 463, 434, 285, 0, 0, - 464, 367, 568, 444, 580, 606, 607, 261, 400, 592, - 505, 600, 624, 224, 258, 414, 498, 586, 487, 392, - 564, 565, 326, 486, 293, 201, 364, 612, 222, 473, - 366, 240, 229, 570, 589, 287, 450, 619, 211, 500, - 578, 237, 477, 0, 0, 627, 245, 497, 213, 575, - 496, 388, 323, 324, 212, 0, 451, 266, 291, 0, - 0, 256, 409, 572, 573, 254, 628, 226, 599, 218, - 0, 598, 402, 567, 576, 389, 378, 217, 574, 387, - 377, 331, 350, 351, 278, 304, 441, 370, 442, 303, - 305, 398, 397, 399, 205, 587, 0, 206, 0, 492, - 588, 629, 446, 210, 232, 233, 235, 0, 277, 281, - 289, 292, 300, 301, 310, 362, 413, 440, 436, 445, - 0, 562, 581, 593, 604, 610, 611, 613, 614, 615, - 616, 617, 620, 618, 401, 308, 488, 330, 368, 0, - 0, 419, 466, 238, 585, 489, 199, 0, 0, 0, - 0, 252, 253, 0, 558, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 630, 631, 632, 633, 634, 635, - 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, - 646, 647, 625, 0, 0, 0, 0, 0, 0, 0, - 0, 648, 379, 479, 582, 332, 344, 347, 337, 356, - 0, 357, 333, 334, 339, 341, 342, 343, 348, 349, - 353, 359, 247, 208, 385, 393, 561, 309, 214, 215, - 216, 507, 508, 509, 510, 596, 597, 601, 456, 457, - 458, 459, 290, 591, 306, 462, 461, 328, 329, 374, - 443, 523, 525, 536, 540, 542, 544, 550, 553, 524, - 526, 537, 541, 543, 545, 551, 554, 513, 515, 517, - 519, 532, 531, 528, 556, 557, 534, 539, 518, 530, - 535, 548, 555, 552, 512, 516, 520, 529, 547, 546, - 527, 538, 549, 533, 521, 514, 522, 0, 196, 219, - 363, 0, 448, 286, 626, 595, 590, 204, 221, 0, - 260, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 198, 200, 207, 220, 230, 234, 241, - 259, 274, 276, 283, 296, 307, 315, 316, 319, 325, - 375, 381, 382, 383, 384, 403, 404, 405, 408, 411, - 412, 415, 417, 418, 421, 425, 429, 430, 431, 433, - 435, 437, 449, 454, 468, 469, 470, 471, 472, 475, - 476, 481, 482, 483, 484, 485, 493, 494, 499, 569, - 571, 584, 602, 608, 474, 298, 299, 438, 439, 311, - 312, 622, 623, 297, 579, 609, 577, 621, 603, 432, - 373, 0, 0, 376, 279, 302, 317, 0, 594, 495, - 225, 460, 288, 249, 0, 0, 209, 244, 228, 257, - 272, 275, 321, 386, 394, 423, 428, 294, 269, 242, - 453, 239, 478, 502, 503, 504, 506, 390, 264, 427, - 391, 0, 371, 559, 560, 313, 511, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 410, 0, - 1867, 0, 0, 0, 0, 0, 0, 268, 0, 0, - 0, 0, 361, 265, 0, 0, 424, 0, 203, 0, - 480, 250, 372, 369, 566, 280, 271, 267, 248, 314, - 380, 422, 501, 416, 0, 365, 0, 0, 490, 395, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 320, 246, 322, 202, 407, - 491, 284, 0, 0, 0, 0, 0, 696, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 236, 0, 0, - 243, 0, 0, 0, 346, 355, 354, 335, 336, 338, - 340, 345, 352, 358, 0, 0, 0, 0, 0, 263, - 318, 270, 262, 563, 0, 0, 0, 0, 0, 0, - 0, 227, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 273, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 295, 0, - 396, 255, 0, 447, 0, 0, 0, 605, 0, 0, - 0, 0, 0, 0, 0, 360, 0, 327, 197, 223, - 0, 0, 406, 455, 467, 0, 0, 0, 251, 0, - 465, 420, 583, 231, 282, 452, 426, 463, 434, 285, - 0, 0, 464, 367, 568, 444, 580, 606, 607, 261, - 400, 592, 505, 600, 624, 224, 258, 414, 498, 586, - 487, 392, 564, 565, 326, 486, 293, 201, 364, 612, - 222, 473, 366, 240, 229, 570, 589, 287, 450, 619, - 211, 500, 578, 237, 477, 0, 0, 627, 245, 497, - 213, 575, 496, 388, 323, 324, 212, 0, 451, 266, - 291, 0, 0, 256, 409, 572, 573, 254, 628, 226, - 599, 218, 0, 598, 402, 567, 576, 389, 378, 217, - 574, 387, 377, 331, 350, 351, 278, 304, 441, 370, - 442, 303, 305, 398, 397, 399, 205, 587, 0, 206, - 0, 492, 588, 629, 446, 210, 232, 233, 235, 0, - 277, 281, 289, 292, 300, 301, 310, 362, 413, 440, - 436, 445, 0, 562, 581, 593, 604, 610, 611, 613, - 614, 615, 616, 617, 620, 618, 401, 308, 488, 330, - 368, 0, 0, 419, 466, 238, 585, 489, 199, 0, - 0, 0, 0, 252, 253, 0, 558, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 630, 631, 632, 633, - 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, - 644, 645, 646, 647, 625, 0, 0, 0, 0, 0, - 0, 0, 0, 648, 379, 479, 582, 332, 344, 347, - 337, 356, 0, 357, 333, 334, 339, 341, 342, 343, - 348, 349, 353, 359, 247, 208, 385, 393, 561, 309, - 214, 215, 216, 507, 508, 509, 510, 596, 597, 601, - 456, 457, 458, 459, 290, 591, 306, 462, 461, 328, - 329, 374, 443, 523, 525, 536, 540, 542, 544, 550, - 553, 524, 526, 537, 541, 543, 545, 551, 554, 513, - 515, 517, 519, 532, 531, 528, 556, 557, 534, 539, - 518, 530, 535, 548, 555, 552, 512, 516, 520, 529, - 547, 546, 527, 538, 549, 533, 521, 514, 522, 0, - 196, 219, 363, 0, 448, 286, 626, 595, 590, 204, - 221, 0, 260, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 198, 200, 207, 220, 230, - 234, 241, 259, 274, 276, 283, 296, 307, 315, 316, - 319, 325, 375, 381, 382, 383, 384, 403, 404, 405, - 408, 411, 412, 415, 417, 418, 421, 425, 429, 430, - 431, 433, 435, 437, 449, 454, 468, 469, 470, 471, - 472, 475, 476, 481, 482, 483, 484, 485, 493, 494, - 499, 569, 571, 584, 602, 608, 474, 298, 299, 438, - 439, 311, 312, 622, 623, 297, 579, 609, 577, 621, - 603, 432, 373, 0, 0, 376, 279, 302, 317, 0, - 594, 495, 225, 460, 288, 249, 0, 0, 209, 244, - 228, 257, 272, 275, 321, 386, 394, 423, 428, 294, - 269, 242, 453, 239, 478, 502, 503, 504, 506, 390, - 264, 427, 391, 0, 371, 559, 560, 313, 511, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 410, 0, 1865, 0, 0, 0, 0, 0, 0, 268, - 0, 0, 0, 0, 361, 265, 0, 0, 424, 0, - 203, 0, 480, 250, 372, 369, 566, 280, 271, 267, - 248, 314, 380, 422, 501, 416, 0, 365, 0, 0, - 490, 395, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 320, 246, 322, - 202, 407, 491, 284, 0, 0, 0, 0, 0, 696, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 236, - 0, 0, 243, 0, 0, 0, 346, 355, 354, 335, - 336, 338, 340, 345, 352, 358, 0, 0, 0, 0, - 0, 263, 318, 270, 262, 563, 0, 0, 0, 0, - 0, 0, 0, 227, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 273, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 295, 0, 396, 255, 0, 447, 0, 0, 0, 605, - 0, 0, 0, 0, 0, 0, 0, 360, 0, 327, - 197, 223, 0, 0, 406, 455, 467, 0, 0, 0, - 251, 0, 465, 420, 583, 231, 282, 452, 426, 463, - 434, 285, 0, 0, 464, 367, 568, 444, 580, 606, - 607, 261, 400, 592, 505, 600, 624, 224, 258, 414, - 498, 586, 487, 392, 564, 565, 326, 486, 293, 201, - 364, 612, 222, 473, 366, 240, 229, 570, 589, 287, - 450, 619, 211, 500, 578, 237, 477, 0, 0, 627, - 245, 497, 213, 575, 496, 388, 323, 324, 212, 0, - 451, 266, 291, 0, 0, 256, 409, 572, 573, 254, - 628, 226, 599, 218, 0, 598, 402, 567, 576, 389, - 378, 217, 574, 387, 377, 331, 350, 351, 278, 304, - 441, 370, 442, 303, 305, 398, 397, 399, 205, 587, - 0, 206, 0, 492, 588, 629, 446, 210, 232, 233, - 235, 0, 277, 281, 289, 292, 300, 301, 310, 362, - 413, 440, 436, 445, 0, 562, 581, 593, 604, 610, - 611, 613, 614, 615, 616, 617, 620, 618, 401, 308, - 488, 330, 368, 0, 0, 419, 466, 238, 585, 489, - 199, 0, 0, 0, 0, 252, 253, 0, 558, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 630, 631, - 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, - 642, 643, 644, 645, 646, 647, 625, 0, 0, 0, - 0, 0, 0, 0, 0, 648, 379, 479, 582, 332, - 344, 347, 337, 356, 0, 357, 333, 334, 339, 341, - 342, 343, 348, 349, 353, 359, 247, 208, 385, 393, - 561, 309, 214, 215, 216, 507, 508, 509, 510, 596, - 597, 601, 456, 457, 458, 459, 290, 591, 306, 462, - 461, 328, 329, 374, 443, 523, 525, 536, 540, 542, - 544, 550, 553, 524, 526, 537, 541, 543, 545, 551, - 554, 513, 515, 517, 519, 532, 531, 528, 556, 557, - 534, 539, 518, 530, 535, 548, 555, 552, 512, 516, - 520, 529, 547, 546, 527, 538, 549, 533, 521, 514, - 522, 0, 196, 219, 363, 0, 448, 286, 626, 595, - 590, 204, 221, 0, 260, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 198, 200, 207, - 220, 230, 234, 241, 259, 274, 276, 283, 296, 307, - 315, 316, 319, 325, 375, 381, 382, 383, 384, 403, - 404, 405, 408, 411, 412, 415, 417, 418, 421, 425, - 429, 430, 431, 433, 435, 437, 449, 454, 468, 469, - 470, 471, 472, 475, 476, 481, 482, 483, 484, 485, - 493, 494, 499, 569, 571, 584, 602, 608, 474, 298, - 299, 438, 439, 311, 312, 622, 623, 297, 579, 609, - 577, 621, 603, 432, 373, 0, 0, 376, 279, 302, - 317, 0, 594, 495, 225, 460, 288, 249, 0, 0, - 209, 244, 228, 257, 272, 275, 321, 386, 394, 423, - 428, 294, 269, 242, 453, 239, 478, 502, 503, 504, - 506, 390, 264, 427, 391, 0, 371, 559, 560, 313, - 511, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 410, 0, 1863, 0, 0, 0, 0, 0, - 0, 268, 0, 0, 0, 0, 361, 265, 0, 0, - 424, 0, 203, 0, 480, 250, 372, 369, 566, 280, - 271, 267, 248, 314, 380, 422, 501, 416, 0, 365, - 0, 0, 490, 395, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 320, - 246, 322, 202, 407, 491, 284, 0, 0, 0, 0, - 0, 696, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 236, 0, 0, 243, 0, 0, 0, 346, 355, - 354, 335, 336, 338, 340, 345, 352, 358, 0, 0, - 0, 0, 0, 263, 318, 270, 262, 563, 0, 0, - 0, 0, 0, 0, 0, 227, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 273, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 295, 0, 396, 255, 0, 447, 0, 0, - 0, 605, 0, 0, 0, 0, 0, 0, 0, 360, - 0, 327, 197, 223, 0, 0, 406, 455, 467, 0, - 0, 0, 251, 0, 465, 420, 583, 231, 282, 452, - 426, 463, 434, 285, 0, 0, 464, 367, 568, 444, - 580, 606, 607, 261, 400, 592, 505, 600, 624, 224, - 258, 414, 498, 586, 487, 392, 564, 565, 326, 486, - 293, 201, 364, 612, 222, 473, 366, 240, 229, 570, - 589, 287, 450, 619, 211, 500, 578, 237, 477, 0, - 0, 627, 245, 497, 213, 575, 496, 388, 323, 324, - 212, 0, 451, 266, 291, 0, 0, 256, 409, 572, - 573, 254, 628, 226, 599, 218, 0, 598, 402, 567, - 576, 389, 378, 217, 574, 387, 377, 331, 350, 351, - 278, 304, 441, 370, 442, 303, 305, 398, 397, 399, - 205, 587, 0, 206, 0, 492, 588, 629, 446, 210, - 232, 233, 235, 0, 277, 281, 289, 292, 300, 301, - 310, 362, 413, 440, 436, 445, 0, 562, 581, 593, - 604, 610, 611, 613, 614, 615, 616, 617, 620, 618, - 401, 308, 488, 330, 368, 0, 0, 419, 466, 238, - 585, 489, 199, 0, 0, 0, 0, 252, 253, 0, - 558, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, - 640, 641, 642, 643, 644, 645, 646, 647, 625, 0, - 0, 0, 0, 0, 0, 0, 0, 648, 379, 479, - 582, 332, 344, 347, 337, 356, 0, 357, 333, 334, - 339, 341, 342, 343, 348, 349, 353, 359, 247, 208, - 385, 393, 561, 309, 214, 215, 216, 507, 508, 509, - 510, 596, 597, 601, 456, 457, 458, 459, 290, 591, - 306, 462, 461, 328, 329, 374, 443, 523, 525, 536, - 540, 542, 544, 550, 553, 524, 526, 537, 541, 543, - 545, 551, 554, 513, 515, 517, 519, 532, 531, 528, - 556, 557, 534, 539, 518, 530, 535, 548, 555, 552, - 512, 516, 520, 529, 547, 546, 527, 538, 549, 533, - 521, 514, 522, 0, 196, 219, 363, 0, 448, 286, - 626, 595, 590, 204, 221, 0, 260, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 198, - 200, 207, 220, 230, 234, 241, 259, 274, 276, 283, - 296, 307, 315, 316, 319, 325, 375, 381, 382, 383, - 384, 403, 404, 405, 408, 411, 412, 415, 417, 418, - 421, 425, 429, 430, 431, 433, 435, 437, 449, 454, - 468, 469, 470, 471, 472, 475, 476, 481, 482, 483, - 484, 485, 493, 494, 499, 569, 571, 584, 602, 608, - 474, 298, 299, 438, 439, 311, 312, 622, 623, 297, - 579, 609, 577, 621, 603, 432, 373, 0, 0, 376, - 279, 302, 317, 0, 594, 495, 225, 460, 288, 249, - 0, 0, 209, 244, 228, 257, 272, 275, 321, 386, - 394, 423, 428, 294, 269, 242, 453, 239, 478, 502, - 503, 504, 506, 390, 264, 427, 391, 0, 371, 559, - 560, 313, 511, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 410, 0, 1859, 0, 0, 0, - 0, 0, 0, 268, 0, 0, 0, 0, 361, 265, - 0, 0, 424, 0, 203, 0, 480, 250, 372, 369, - 566, 280, 271, 267, 248, 314, 380, 422, 501, 416, - 0, 365, 0, 0, 490, 395, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 320, 246, 322, 202, 407, 491, 284, 0, 0, - 0, 0, 0, 696, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 236, 0, 0, 243, 0, 0, 0, - 346, 355, 354, 335, 336, 338, 340, 345, 352, 358, - 0, 0, 0, 0, 0, 263, 318, 270, 262, 563, - 0, 0, 0, 0, 0, 0, 0, 227, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 273, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 295, 0, 396, 255, 0, 447, - 0, 0, 0, 605, 0, 0, 0, 0, 0, 0, - 0, 360, 0, 327, 197, 223, 0, 0, 406, 455, - 467, 0, 0, 0, 251, 0, 465, 420, 583, 231, - 282, 452, 426, 463, 434, 285, 0, 0, 464, 367, - 568, 444, 580, 606, 607, 261, 400, 592, 505, 600, - 624, 224, 258, 414, 498, 586, 487, 392, 564, 565, - 326, 486, 293, 201, 364, 612, 222, 473, 366, 240, - 229, 570, 589, 287, 450, 619, 211, 500, 578, 237, - 477, 0, 0, 627, 245, 497, 213, 575, 496, 388, - 323, 324, 212, 0, 451, 266, 291, 0, 0, 256, - 409, 572, 573, 254, 628, 226, 599, 218, 0, 598, - 402, 567, 576, 389, 378, 217, 574, 387, 377, 331, - 350, 351, 278, 304, 441, 370, 442, 303, 305, 398, - 397, 399, 205, 587, 0, 206, 0, 492, 588, 629, - 446, 210, 232, 233, 235, 0, 277, 281, 289, 292, - 300, 301, 310, 362, 413, 440, 436, 445, 0, 562, - 581, 593, 604, 610, 611, 613, 614, 615, 616, 617, - 620, 618, 401, 308, 488, 330, 368, 0, 0, 419, - 466, 238, 585, 489, 199, 0, 0, 0, 0, 252, - 253, 0, 558, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 630, 631, 632, 633, 634, 635, 636, 637, - 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, - 625, 0, 0, 0, 0, 0, 0, 0, 0, 648, - 379, 479, 582, 332, 344, 347, 337, 356, 0, 357, - 333, 334, 339, 341, 342, 343, 348, 349, 353, 359, - 247, 208, 385, 393, 561, 309, 214, 215, 216, 507, - 508, 509, 510, 596, 597, 601, 456, 457, 458, 459, - 290, 591, 306, 462, 461, 328, 329, 374, 443, 523, - 525, 536, 540, 542, 544, 550, 553, 524, 526, 537, - 541, 543, 545, 551, 554, 513, 515, 517, 519, 532, - 531, 528, 556, 557, 534, 539, 518, 530, 535, 548, - 555, 552, 512, 516, 520, 529, 547, 546, 527, 538, - 549, 533, 521, 514, 522, 0, 196, 219, 363, 0, - 448, 286, 626, 595, 590, 204, 221, 0, 260, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 198, 200, 207, 220, 230, 234, 241, 259, 274, - 276, 283, 296, 307, 315, 316, 319, 325, 375, 381, - 382, 383, 384, 403, 404, 405, 408, 411, 412, 415, - 417, 418, 421, 425, 429, 430, 431, 433, 435, 437, - 449, 454, 468, 469, 470, 471, 472, 475, 476, 481, - 482, 483, 484, 485, 493, 494, 499, 569, 571, 584, - 602, 608, 474, 298, 299, 438, 439, 311, 312, 622, - 623, 297, 579, 609, 577, 621, 603, 432, 373, 0, - 0, 376, 279, 302, 317, 0, 594, 495, 225, 460, - 288, 249, 0, 0, 209, 244, 228, 257, 272, 275, - 321, 386, 394, 423, 428, 294, 269, 242, 453, 239, - 478, 502, 503, 504, 506, 390, 264, 427, 391, 0, - 371, 559, 560, 313, 511, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 410, 0, 1857, 0, - 0, 0, 0, 0, 0, 268, 0, 0, 0, 0, - 361, 265, 0, 0, 424, 0, 203, 0, 480, 250, - 372, 369, 566, 280, 271, 267, 248, 314, 380, 422, - 501, 416, 0, 365, 0, 0, 490, 395, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 320, 246, 322, 202, 407, 491, 284, - 0, 0, 0, 0, 0, 696, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 236, 0, 0, 243, 0, - 0, 0, 346, 355, 354, 335, 336, 338, 340, 345, - 352, 358, 0, 0, 0, 0, 0, 263, 318, 270, - 262, 563, 0, 0, 0, 0, 0, 0, 0, 227, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 273, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 295, 0, 396, 255, - 0, 447, 0, 0, 0, 605, 0, 0, 0, 0, - 0, 0, 0, 360, 0, 327, 197, 223, 0, 0, - 406, 455, 467, 0, 0, 0, 251, 0, 465, 420, - 583, 231, 282, 452, 426, 463, 434, 285, 0, 0, - 464, 367, 568, 444, 580, 606, 607, 261, 400, 592, - 505, 600, 624, 224, 258, 414, 498, 586, 487, 392, - 564, 565, 326, 486, 293, 201, 364, 612, 222, 473, - 366, 240, 229, 570, 589, 287, 450, 619, 211, 500, - 578, 237, 477, 0, 0, 627, 245, 497, 213, 575, - 496, 388, 323, 324, 212, 0, 451, 266, 291, 0, - 0, 256, 409, 572, 573, 254, 628, 226, 599, 218, - 0, 598, 402, 567, 576, 389, 378, 217, 574, 387, - 377, 331, 350, 351, 278, 304, 441, 370, 442, 303, - 305, 398, 397, 399, 205, 587, 0, 206, 0, 492, - 588, 629, 446, 210, 232, 233, 235, 0, 277, 281, - 289, 292, 300, 301, 310, 362, 413, 440, 436, 445, - 0, 562, 581, 593, 604, 610, 611, 613, 614, 615, - 616, 617, 620, 618, 401, 308, 488, 330, 368, 0, - 0, 419, 466, 238, 585, 489, 199, 0, 0, 0, - 0, 252, 253, 0, 558, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 630, 631, 632, 633, 634, 635, - 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, - 646, 647, 625, 0, 0, 0, 0, 0, 0, 0, - 0, 648, 379, 479, 582, 332, 344, 347, 337, 356, - 0, 357, 333, 334, 339, 341, 342, 343, 348, 349, - 353, 359, 247, 208, 385, 393, 561, 309, 214, 215, - 216, 507, 508, 509, 510, 596, 597, 601, 456, 457, - 458, 459, 290, 591, 306, 462, 461, 328, 329, 374, - 443, 523, 525, 536, 540, 542, 544, 550, 553, 524, - 526, 537, 541, 543, 545, 551, 554, 513, 515, 517, - 519, 532, 531, 528, 556, 557, 534, 539, 518, 530, - 535, 548, 555, 552, 512, 516, 520, 529, 547, 546, - 527, 538, 549, 533, 521, 514, 522, 0, 196, 219, - 363, 0, 448, 286, 626, 595, 590, 204, 221, 0, - 260, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 198, 200, 207, 220, 230, 234, 241, - 259, 274, 276, 283, 296, 307, 315, 316, 319, 325, - 375, 381, 382, 383, 384, 403, 404, 405, 408, 411, - 412, 415, 417, 418, 421, 425, 429, 430, 431, 433, - 435, 437, 449, 454, 468, 469, 470, 471, 472, 475, - 476, 481, 482, 483, 484, 485, 493, 494, 499, 569, - 571, 584, 602, 608, 474, 298, 299, 438, 439, 311, - 312, 622, 623, 297, 579, 609, 577, 621, 603, 432, - 373, 0, 0, 376, 279, 302, 317, 0, 594, 495, - 225, 460, 288, 249, 0, 0, 209, 244, 228, 257, - 272, 275, 321, 386, 394, 423, 428, 294, 269, 242, - 453, 239, 478, 502, 503, 504, 506, 390, 264, 427, - 391, 0, 371, 559, 560, 313, 511, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 410, 0, - 1855, 0, 0, 0, 0, 0, 0, 268, 0, 0, - 0, 0, 361, 265, 0, 0, 424, 0, 203, 0, - 480, 250, 372, 369, 566, 280, 271, 267, 248, 314, - 380, 422, 501, 416, 0, 365, 0, 0, 490, 395, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 320, 246, 322, 202, 407, - 491, 284, 0, 0, 0, 0, 0, 696, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 236, 0, 0, - 243, 0, 0, 0, 346, 355, 354, 335, 336, 338, - 340, 345, 352, 358, 0, 0, 0, 0, 0, 263, - 318, 270, 262, 563, 0, 0, 0, 0, 0, 0, - 0, 227, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 273, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 295, 0, - 396, 255, 0, 447, 0, 0, 0, 605, 0, 0, - 0, 0, 0, 0, 0, 360, 0, 327, 197, 223, - 0, 0, 406, 455, 467, 0, 0, 0, 251, 0, - 465, 420, 583, 231, 282, 452, 426, 463, 434, 285, - 0, 0, 464, 367, 568, 444, 580, 606, 607, 261, - 400, 592, 505, 600, 624, 224, 258, 414, 498, 586, - 487, 392, 564, 565, 326, 486, 293, 201, 364, 612, - 222, 473, 366, 240, 229, 570, 589, 287, 450, 619, - 211, 500, 578, 237, 477, 0, 0, 627, 245, 497, - 213, 575, 496, 388, 323, 324, 212, 0, 451, 266, - 291, 0, 0, 256, 409, 572, 573, 254, 628, 226, - 599, 218, 0, 598, 402, 567, 576, 389, 378, 217, - 574, 387, 377, 331, 350, 351, 278, 304, 441, 370, - 442, 303, 305, 398, 397, 399, 205, 587, 0, 206, - 0, 492, 588, 629, 446, 210, 232, 233, 235, 0, - 277, 281, 289, 292, 300, 301, 310, 362, 413, 440, - 436, 445, 0, 562, 581, 593, 604, 610, 611, 613, - 614, 615, 616, 617, 620, 618, 401, 308, 488, 330, - 368, 0, 0, 419, 466, 238, 585, 489, 199, 0, - 0, 0, 0, 252, 253, 0, 558, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 630, 631, 632, 633, - 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, - 644, 645, 646, 647, 625, 0, 0, 0, 0, 0, - 0, 0, 0, 648, 379, 479, 582, 332, 344, 347, - 337, 356, 0, 357, 333, 334, 339, 341, 342, 343, - 348, 349, 353, 359, 247, 208, 385, 393, 561, 309, - 214, 215, 216, 507, 508, 509, 510, 596, 597, 601, - 456, 457, 458, 459, 290, 591, 306, 462, 461, 328, - 329, 374, 443, 523, 525, 536, 540, 542, 544, 550, - 553, 524, 526, 537, 541, 543, 545, 551, 554, 513, - 515, 517, 519, 532, 531, 528, 556, 557, 534, 539, - 518, 530, 535, 548, 555, 552, 512, 516, 520, 529, - 547, 546, 527, 538, 549, 533, 521, 514, 522, 0, - 196, 219, 363, 0, 448, 286, 626, 595, 590, 204, - 221, 0, 260, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 198, 200, 207, 220, 230, - 234, 241, 259, 274, 276, 283, 296, 307, 315, 316, - 319, 325, 375, 381, 382, 383, 384, 403, 404, 405, - 408, 411, 412, 415, 417, 418, 421, 425, 429, 430, - 431, 433, 435, 437, 449, 454, 468, 469, 470, 471, - 472, 475, 476, 481, 482, 483, 484, 485, 493, 494, - 499, 569, 571, 584, 602, 608, 474, 298, 299, 438, - 439, 311, 312, 622, 623, 297, 579, 609, 577, 621, - 603, 432, 373, 0, 0, 376, 279, 302, 317, 0, - 594, 495, 225, 460, 288, 249, 0, 0, 209, 244, - 228, 257, 272, 275, 321, 386, 394, 423, 428, 294, - 269, 242, 453, 239, 478, 502, 503, 504, 506, 390, - 264, 427, 391, 0, 371, 559, 560, 313, 511, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 410, 0, 0, 0, 0, 0, 0, 0, 0, 268, - 0, 0, 0, 0, 361, 265, 0, 0, 424, 0, - 203, 0, 480, 250, 372, 369, 566, 280, 271, 267, - 248, 314, 380, 422, 501, 416, 0, 365, 0, 0, - 490, 395, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 320, 246, 322, - 202, 407, 491, 284, 0, 1830, 0, 0, 0, 696, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 236, - 0, 0, 243, 0, 0, 0, 346, 355, 354, 335, - 336, 338, 340, 345, 352, 358, 0, 0, 0, 0, - 0, 263, 318, 270, 262, 563, 0, 0, 0, 0, - 0, 0, 0, 227, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 273, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 295, 0, 396, 255, 0, 447, 0, 0, 0, 605, - 0, 0, 0, 0, 0, 0, 0, 360, 0, 327, - 197, 223, 0, 0, 406, 455, 467, 0, 0, 0, - 251, 0, 465, 420, 583, 231, 282, 452, 426, 463, - 434, 285, 0, 0, 464, 367, 568, 444, 580, 606, - 607, 261, 400, 592, 505, 600, 624, 224, 258, 414, - 498, 586, 487, 392, 564, 565, 326, 486, 293, 201, - 364, 612, 222, 473, 366, 240, 229, 570, 589, 287, - 450, 619, 211, 500, 578, 237, 477, 0, 0, 627, - 245, 497, 213, 575, 496, 388, 323, 324, 212, 0, - 451, 266, 291, 0, 0, 256, 409, 572, 573, 254, - 628, 226, 599, 218, 0, 598, 402, 567, 576, 389, - 378, 217, 574, 387, 377, 331, 350, 351, 278, 304, - 441, 370, 442, 303, 305, 398, 397, 399, 205, 587, - 0, 206, 0, 492, 588, 629, 446, 210, 232, 233, - 235, 0, 277, 281, 289, 292, 300, 301, 310, 362, - 413, 440, 436, 445, 0, 562, 581, 593, 604, 610, - 611, 613, 614, 615, 616, 617, 620, 618, 401, 308, - 488, 330, 368, 0, 0, 419, 466, 238, 585, 489, - 199, 0, 0, 0, 0, 252, 253, 0, 558, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 630, 631, - 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, - 642, 643, 644, 645, 646, 647, 625, 0, 0, 0, - 0, 0, 0, 0, 0, 648, 379, 479, 582, 332, - 344, 347, 337, 356, 0, 357, 333, 334, 339, 341, - 342, 343, 348, 349, 353, 359, 247, 208, 385, 393, - 561, 309, 214, 215, 216, 507, 508, 509, 510, 596, - 597, 601, 456, 457, 458, 459, 290, 591, 306, 462, - 461, 328, 329, 374, 443, 523, 525, 536, 540, 542, - 544, 550, 553, 524, 526, 537, 541, 543, 545, 551, - 554, 513, 515, 517, 519, 532, 531, 528, 556, 557, - 534, 539, 518, 530, 535, 548, 555, 552, 512, 516, - 520, 529, 547, 546, 527, 538, 549, 533, 521, 514, - 522, 0, 196, 219, 363, 0, 448, 286, 626, 595, - 590, 204, 221, 0, 260, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 198, 200, 207, - 220, 230, 234, 241, 259, 274, 276, 283, 296, 307, - 315, 316, 319, 325, 375, 381, 382, 383, 384, 403, - 404, 405, 408, 411, 412, 415, 417, 418, 421, 425, - 429, 430, 431, 433, 435, 437, 449, 454, 468, 469, - 470, 471, 472, 475, 476, 481, 482, 483, 484, 485, - 493, 494, 499, 569, 571, 584, 602, 608, 474, 298, - 299, 438, 439, 311, 312, 622, 623, 297, 579, 609, - 577, 621, 603, 432, 373, 0, 0, 376, 279, 302, - 317, 0, 594, 495, 225, 460, 288, 249, 0, 0, - 209, 244, 228, 257, 272, 275, 321, 386, 394, 423, - 428, 294, 269, 242, 453, 239, 478, 502, 503, 504, - 506, 390, 264, 427, 391, 0, 371, 559, 560, 313, - 511, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 410, 0, 0, 0, 0, 0, 0, 0, - 1728, 268, 0, 0, 0, 0, 361, 265, 0, 0, - 424, 0, 203, 0, 480, 250, 372, 369, 566, 280, - 271, 267, 248, 314, 380, 422, 501, 416, 0, 365, - 0, 0, 490, 395, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 320, - 246, 322, 202, 407, 491, 284, 0, 0, 0, 0, - 0, 194, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 236, 0, 0, 243, 0, 0, 0, 346, 355, - 354, 335, 336, 338, 340, 345, 352, 358, 0, 0, - 0, 0, 0, 263, 318, 270, 262, 563, 0, 0, - 0, 0, 0, 0, 0, 227, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 273, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 295, 0, 396, 255, 0, 447, 0, 0, - 0, 605, 0, 0, 0, 0, 0, 0, 0, 360, - 0, 327, 197, 223, 0, 0, 406, 455, 467, 0, - 0, 0, 251, 0, 465, 420, 583, 231, 282, 452, - 426, 463, 434, 285, 0, 0, 464, 367, 568, 444, - 580, 606, 607, 261, 400, 592, 505, 600, 624, 224, - 258, 414, 498, 586, 487, 392, 564, 565, 326, 486, - 293, 201, 364, 612, 222, 473, 366, 240, 229, 570, - 589, 287, 450, 619, 211, 500, 578, 237, 477, 0, - 0, 627, 245, 497, 213, 575, 496, 388, 323, 324, - 212, 0, 451, 266, 291, 0, 0, 256, 409, 572, - 573, 254, 628, 226, 599, 218, 0, 598, 402, 567, - 576, 389, 378, 217, 574, 387, 377, 331, 350, 351, - 278, 304, 441, 370, 442, 303, 305, 398, 397, 399, - 205, 587, 0, 206, 0, 492, 588, 629, 446, 210, - 232, 233, 235, 0, 277, 281, 289, 292, 300, 301, - 310, 362, 413, 440, 436, 445, 0, 562, 581, 593, - 604, 610, 611, 613, 614, 615, 616, 617, 620, 618, - 401, 308, 488, 330, 368, 0, 0, 419, 466, 238, - 585, 489, 199, 0, 0, 0, 0, 252, 253, 0, - 558, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, - 640, 641, 642, 643, 644, 645, 646, 647, 625, 0, - 0, 0, 0, 0, 0, 0, 0, 648, 379, 479, - 582, 332, 344, 347, 337, 356, 0, 357, 333, 334, - 339, 341, 342, 343, 348, 349, 353, 359, 247, 208, - 385, 393, 561, 309, 214, 215, 216, 507, 508, 509, - 510, 596, 597, 601, 456, 457, 458, 459, 290, 591, - 306, 462, 461, 328, 329, 374, 443, 523, 525, 536, - 540, 542, 544, 550, 553, 524, 526, 537, 541, 543, - 545, 551, 554, 513, 515, 517, 519, 532, 531, 528, - 556, 557, 534, 539, 518, 530, 535, 548, 555, 552, - 512, 516, 520, 529, 547, 546, 527, 538, 549, 533, - 521, 514, 522, 0, 196, 219, 363, 0, 448, 286, - 626, 595, 590, 204, 221, 0, 260, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 198, - 200, 207, 220, 230, 234, 241, 259, 274, 276, 283, - 296, 307, 315, 316, 319, 325, 375, 381, 382, 383, - 384, 403, 404, 405, 408, 411, 412, 415, 417, 418, - 421, 425, 429, 430, 431, 433, 435, 437, 449, 454, - 468, 469, 470, 471, 472, 475, 476, 481, 482, 483, - 484, 485, 493, 494, 499, 569, 571, 584, 602, 608, - 474, 298, 299, 438, 439, 311, 312, 622, 623, 297, - 579, 609, 577, 621, 603, 432, 373, 0, 0, 376, - 279, 302, 317, 0, 594, 495, 225, 460, 288, 249, - 0, 0, 209, 244, 228, 257, 272, 275, 321, 386, - 394, 423, 428, 294, 269, 242, 453, 239, 478, 502, - 503, 504, 506, 390, 264, 427, 391, 0, 371, 559, - 560, 313, 511, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 410, 0, 0, 0, 0, 0, - 0, 0, 0, 268, 0, 0, 0, 0, 361, 265, - 0, 0, 424, 0, 203, 0, 480, 250, 372, 369, - 566, 280, 271, 267, 248, 314, 380, 422, 501, 416, - 0, 365, 0, 0, 490, 395, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 320, 246, 322, 202, 407, 491, 284, 0, 94, - 0, 0, 0, 929, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 236, 0, 0, 243, 0, 0, 0, - 346, 355, 354, 335, 336, 338, 340, 345, 352, 358, - 0, 0, 0, 0, 0, 263, 318, 270, 262, 563, - 0, 0, 0, 0, 0, 0, 0, 227, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 273, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 295, 0, 396, 255, 0, 447, - 0, 0, 0, 605, 0, 0, 0, 0, 0, 0, - 0, 360, 0, 327, 197, 223, 0, 0, 406, 455, - 467, 0, 0, 0, 251, 0, 465, 420, 583, 231, - 282, 452, 426, 463, 434, 285, 0, 0, 464, 367, - 568, 444, 580, 606, 607, 261, 400, 592, 505, 600, - 624, 224, 258, 414, 498, 586, 487, 392, 564, 565, - 326, 486, 293, 201, 364, 612, 222, 473, 366, 240, - 229, 570, 589, 287, 450, 619, 211, 500, 578, 237, - 477, 0, 0, 627, 245, 497, 213, 575, 496, 388, - 323, 324, 212, 0, 451, 266, 291, 0, 0, 256, - 409, 572, 573, 254, 628, 226, 599, 218, 0, 598, - 402, 567, 576, 389, 378, 217, 574, 387, 377, 331, - 350, 351, 278, 304, 441, 370, 442, 303, 305, 398, - 397, 399, 205, 587, 0, 206, 0, 492, 588, 629, - 446, 210, 232, 233, 235, 0, 277, 281, 289, 292, - 300, 301, 310, 362, 413, 440, 436, 445, 0, 562, - 581, 593, 604, 610, 611, 613, 614, 615, 616, 617, - 620, 618, 401, 308, 488, 330, 368, 0, 0, 419, - 466, 238, 585, 489, 199, 0, 0, 0, 0, 252, - 253, 0, 558, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 630, 631, 632, 633, 634, 635, 636, 637, - 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, - 625, 0, 0, 0, 0, 0, 0, 0, 0, 648, - 379, 479, 582, 332, 344, 347, 337, 356, 0, 357, - 333, 334, 339, 341, 342, 343, 348, 349, 353, 359, - 247, 208, 385, 393, 561, 309, 214, 215, 216, 507, - 508, 509, 510, 596, 597, 601, 456, 457, 458, 459, - 290, 591, 306, 462, 461, 328, 329, 374, 443, 523, - 525, 536, 540, 542, 544, 550, 553, 524, 526, 537, - 541, 543, 545, 551, 554, 513, 515, 517, 519, 532, - 531, 528, 556, 557, 534, 539, 518, 530, 535, 548, - 555, 552, 512, 516, 520, 529, 547, 546, 527, 538, - 549, 533, 521, 514, 522, 0, 196, 219, 363, 0, - 448, 286, 626, 595, 590, 204, 221, 0, 260, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 198, 200, 207, 220, 230, 234, 241, 259, 274, - 276, 283, 296, 307, 315, 316, 319, 325, 375, 381, - 382, 383, 384, 403, 404, 405, 408, 411, 412, 415, - 417, 418, 421, 425, 429, 430, 431, 433, 435, 437, - 449, 454, 468, 469, 470, 471, 472, 475, 476, 481, - 482, 483, 484, 485, 493, 494, 499, 569, 571, 584, - 602, 608, 474, 298, 299, 438, 439, 311, 312, 622, - 623, 297, 579, 609, 577, 621, 603, 432, 373, 0, - 0, 376, 279, 302, 317, 0, 594, 495, 225, 460, - 288, 249, 0, 0, 209, 244, 228, 257, 272, 275, - 321, 386, 394, 423, 428, 294, 269, 242, 453, 239, - 478, 502, 503, 504, 506, 390, 264, 427, 391, 0, - 371, 559, 560, 313, 511, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 410, 0, 0, 0, - 0, 0, 0, 0, 0, 268, 0, 0, 0, 0, - 361, 265, 0, 0, 424, 0, 203, 0, 480, 250, - 372, 369, 566, 280, 271, 267, 248, 314, 380, 422, - 501, 416, 0, 365, 0, 0, 490, 395, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 320, 246, 322, 202, 407, 491, 284, - 0, 0, 0, 0, 0, 194, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 236, 0, 0, 243, 0, - 0, 0, 346, 355, 354, 335, 336, 338, 340, 345, - 352, 358, 0, 0, 0, 0, 0, 263, 318, 270, - 262, 563, 0, 0, 0, 0, 0, 0, 0, 227, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 273, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1418, 0, 295, 0, 396, 255, - 0, 447, 0, 0, 0, 605, 0, 0, 0, 0, - 0, 0, 0, 360, 0, 327, 197, 223, 0, 0, - 406, 455, 467, 0, 0, 0, 251, 0, 465, 420, - 583, 231, 282, 452, 426, 463, 434, 285, 0, 0, - 464, 367, 568, 444, 580, 606, 607, 261, 400, 592, - 505, 600, 624, 224, 258, 414, 498, 586, 487, 392, - 564, 565, 326, 486, 293, 201, 364, 612, 222, 473, - 366, 240, 229, 570, 589, 287, 450, 619, 211, 500, - 578, 237, 477, 0, 0, 627, 245, 497, 213, 575, - 496, 388, 323, 324, 212, 0, 451, 266, 291, 0, - 0, 256, 409, 572, 573, 254, 628, 226, 599, 218, - 0, 598, 402, 567, 576, 389, 378, 217, 574, 387, - 377, 331, 350, 351, 278, 304, 441, 370, 442, 303, - 305, 398, 397, 399, 205, 587, 0, 206, 0, 492, - 588, 629, 446, 210, 232, 233, 235, 0, 277, 281, - 289, 292, 300, 301, 310, 362, 413, 440, 436, 445, - 0, 562, 581, 593, 604, 610, 611, 613, 614, 615, - 616, 617, 620, 618, 401, 308, 488, 330, 368, 0, - 0, 419, 466, 238, 585, 489, 199, 0, 0, 0, - 0, 252, 253, 0, 558, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 630, 631, 632, 633, 634, 635, - 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, - 646, 647, 625, 0, 0, 0, 0, 0, 0, 0, - 0, 648, 379, 479, 582, 332, 344, 347, 337, 356, - 0, 357, 333, 334, 339, 341, 342, 343, 348, 349, - 353, 359, 247, 208, 385, 393, 561, 309, 214, 215, - 216, 507, 508, 509, 510, 596, 597, 601, 456, 457, - 458, 459, 290, 591, 306, 462, 461, 328, 329, 374, - 443, 523, 525, 536, 540, 542, 544, 550, 553, 524, - 526, 537, 541, 543, 545, 551, 554, 513, 515, 517, - 519, 532, 531, 528, 556, 557, 534, 539, 518, 530, - 535, 548, 555, 552, 512, 516, 520, 529, 547, 546, - 527, 538, 549, 533, 521, 514, 522, 0, 196, 219, - 363, 0, 448, 286, 626, 595, 590, 204, 221, 0, - 260, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 198, 200, 207, 220, 230, 234, 241, - 259, 274, 276, 283, 296, 307, 315, 316, 319, 325, - 375, 381, 382, 383, 384, 403, 404, 405, 408, 411, - 412, 415, 417, 418, 421, 425, 429, 430, 431, 433, - 435, 437, 449, 454, 468, 469, 470, 471, 472, 475, - 476, 481, 482, 483, 484, 485, 493, 494, 499, 569, - 571, 584, 602, 608, 474, 298, 299, 438, 439, 311, - 312, 622, 623, 1417, 579, 609, 577, 621, 603, 432, - 373, 0, 0, 376, 279, 302, 317, 0, 594, 495, - 225, 460, 288, 249, 0, 0, 209, 244, 228, 257, - 272, 275, 321, 386, 394, 423, 428, 294, 269, 242, - 453, 239, 478, 502, 503, 504, 506, 390, 264, 427, - 391, 0, 371, 559, 560, 313, 511, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 410, 0, - 0, 0, 0, 0, 0, 0, 0, 268, 0, 0, - 0, 0, 361, 265, 0, 0, 424, 0, 203, 0, - 480, 250, 372, 369, 566, 280, 271, 267, 248, 314, - 380, 422, 501, 416, 0, 365, 0, 0, 490, 395, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 320, 246, 322, 202, 407, - 491, 284, 0, 0, 0, 0, 0, 194, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 236, 0, 0, - 243, 0, 0, 0, 346, 355, 354, 335, 336, 338, - 340, 345, 352, 358, 0, 0, 0, 0, 0, 263, - 318, 270, 262, 563, 0, 0, 0, 0, 0, 0, - 0, 227, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 273, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 295, 0, - 396, 255, 0, 447, 0, 0, 0, 605, 0, 0, - 0, 0, 0, 0, 0, 360, 0, 327, 197, 223, - 0, 0, 406, 455, 467, 0, 0, 0, 251, 0, - 465, 420, 583, 231, 282, 452, 426, 463, 434, 285, - 0, 0, 464, 367, 568, 444, 580, 606, 607, 261, - 400, 592, 505, 600, 624, 224, 258, 414, 498, 586, - 487, 392, 564, 565, 326, 486, 293, 201, 364, 612, - 222, 473, 366, 240, 229, 570, 589, 287, 450, 619, - 211, 500, 578, 237, 477, 0, 0, 627, 245, 497, - 213, 575, 496, 388, 323, 324, 212, 0, 451, 266, - 291, 0, 0, 256, 409, 572, 573, 254, 628, 226, - 599, 218, 0, 598, 402, 567, 576, 389, 378, 217, - 574, 387, 377, 331, 350, 351, 278, 304, 441, 370, - 442, 303, 305, 398, 397, 399, 205, 587, 0, 206, - 0, 492, 588, 629, 446, 210, 232, 233, 235, 0, - 277, 281, 289, 292, 300, 301, 310, 362, 413, 440, - 436, 445, 0, 562, 581, 593, 604, 610, 611, 613, - 614, 615, 616, 617, 620, 618, 401, 308, 488, 330, - 368, 0, 0, 419, 466, 238, 585, 489, 199, 0, - 0, 0, 0, 252, 253, 0, 558, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 630, 631, 632, 633, - 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, - 644, 645, 646, 647, 625, 0, 0, 0, 0, 0, - 0, 0, 0, 648, 379, 479, 582, 332, 344, 347, - 337, 356, 0, 357, 333, 334, 339, 341, 342, 343, - 348, 349, 353, 359, 247, 208, 385, 393, 561, 309, - 214, 215, 216, 507, 508, 509, 510, 596, 597, 601, - 456, 457, 458, 459, 290, 591, 306, 462, 461, 328, - 329, 374, 443, 523, 525, 536, 540, 542, 544, 550, - 553, 524, 526, 537, 541, 543, 545, 551, 554, 513, - 515, 517, 519, 532, 531, 528, 556, 557, 534, 539, - 518, 530, 535, 548, 555, 552, 512, 516, 520, 529, - 547, 546, 527, 538, 549, 533, 521, 514, 522, 0, - 196, 219, 363, 0, 448, 286, 626, 595, 590, 204, - 221, 0, 260, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1014, 0, 0, 0, 198, 200, 207, 220, 230, - 234, 241, 259, 274, 276, 283, 296, 307, 315, 316, - 319, 325, 375, 381, 382, 383, 384, 403, 404, 405, - 408, 411, 412, 415, 417, 418, 421, 425, 429, 430, - 431, 433, 435, 437, 449, 454, 468, 469, 470, 471, - 472, 475, 476, 481, 482, 483, 484, 485, 493, 494, - 499, 569, 571, 584, 602, 608, 474, 298, 299, 438, - 439, 311, 312, 622, 623, 297, 579, 609, 577, 621, - 603, 432, 373, 0, 0, 376, 279, 302, 317, 0, - 594, 495, 225, 460, 288, 249, 0, 0, 209, 244, - 228, 257, 272, 275, 321, 386, 394, 423, 428, 294, - 269, 242, 453, 239, 478, 502, 503, 504, 506, 390, - 264, 427, 391, 0, 371, 559, 560, 313, 511, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 410, 0, 0, 0, 0, 0, 0, 0, 0, 268, - 0, 0, 0, 0, 361, 265, 0, 0, 424, 0, - 203, 0, 480, 250, 372, 369, 566, 280, 271, 267, - 248, 314, 380, 422, 501, 416, 0, 365, 0, 0, - 490, 395, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 320, 246, 322, - 202, 407, 491, 284, 0, 0, 0, 0, 0, 194, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 236, - 0, 0, 243, 0, 0, 0, 346, 355, 354, 335, - 336, 338, 340, 345, 352, 358, 0, 0, 0, 0, - 0, 263, 318, 270, 262, 563, 0, 0, 0, 0, - 0, 0, 0, 227, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 273, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 295, 0, 396, 255, 0, 447, 0, 651, 0, 605, - 0, 0, 0, 0, 0, 0, 0, 360, 0, 327, - 197, 223, 0, 0, 406, 455, 467, 0, 0, 0, - 251, 0, 465, 420, 583, 231, 282, 452, 426, 463, - 434, 285, 0, 0, 464, 367, 568, 444, 580, 606, - 607, 261, 400, 592, 505, 600, 624, 224, 258, 414, - 498, 586, 487, 392, 564, 565, 326, 486, 293, 201, - 364, 612, 222, 473, 366, 240, 229, 570, 589, 287, - 450, 619, 211, 500, 578, 237, 477, 0, 0, 627, - 245, 497, 213, 575, 496, 388, 323, 324, 212, 0, - 451, 266, 291, 0, 0, 256, 409, 572, 573, 254, - 628, 226, 599, 218, 0, 598, 402, 567, 576, 389, - 378, 217, 574, 387, 377, 331, 350, 351, 278, 304, - 441, 370, 442, 303, 305, 398, 397, 399, 205, 587, - 0, 206, 0, 492, 588, 629, 446, 210, 232, 233, - 235, 0, 277, 281, 289, 292, 300, 301, 310, 362, - 413, 440, 436, 445, 0, 562, 581, 593, 604, 610, - 611, 613, 614, 615, 616, 617, 620, 618, 401, 308, - 488, 330, 368, 0, 0, 419, 466, 238, 585, 489, - 199, 0, 0, 0, 0, 252, 253, 0, 558, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 630, 631, - 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, - 642, 643, 644, 645, 646, 647, 625, 0, 0, 0, - 0, 0, 0, 0, 0, 648, 379, 479, 582, 332, - 344, 347, 337, 356, 0, 357, 333, 334, 339, 341, - 342, 343, 348, 349, 353, 359, 247, 208, 385, 393, - 561, 309, 214, 215, 216, 507, 508, 509, 510, 596, - 597, 601, 456, 457, 458, 459, 290, 591, 306, 462, - 461, 328, 329, 374, 443, 523, 525, 536, 540, 542, - 544, 550, 553, 524, 526, 537, 541, 543, 545, 551, - 554, 513, 515, 517, 519, 532, 531, 528, 556, 557, - 534, 539, 518, 530, 535, 548, 555, 552, 512, 516, - 520, 529, 547, 546, 527, 538, 549, 533, 521, 514, - 522, 0, 196, 219, 363, 0, 448, 286, 626, 595, - 590, 204, 221, 0, 260, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 198, 200, 207, - 220, 230, 234, 241, 259, 274, 276, 283, 296, 307, - 315, 316, 319, 325, 375, 381, 382, 383, 384, 403, - 404, 405, 408, 411, 412, 415, 417, 418, 421, 425, - 429, 430, 431, 433, 435, 437, 449, 454, 468, 469, - 470, 471, 472, 475, 476, 481, 482, 483, 484, 485, - 493, 494, 499, 569, 571, 584, 602, 608, 474, 298, - 299, 438, 439, 311, 312, 622, 623, 297, 579, 609, - 577, 621, 603, 432, 373, 0, 0, 376, 279, 302, - 317, 0, 594, 495, 225, 460, 288, 249, 0, 0, - 209, 244, 228, 257, 272, 275, 321, 386, 394, 423, - 428, 294, 269, 242, 453, 239, 478, 502, 503, 504, - 506, 390, 264, 427, 391, 0, 371, 559, 560, 313, - 511, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 410, 0, 0, 0, 0, 0, 0, 0, - 0, 268, 0, 0, 0, 0, 361, 265, 0, 0, - 424, 0, 203, 0, 480, 250, 372, 369, 566, 280, - 271, 267, 248, 314, 380, 422, 501, 416, 0, 365, - 0, 0, 490, 395, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 320, - 246, 322, 202, 407, 491, 284, 0, 0, 0, 0, - 0, 696, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 236, 0, 0, 243, 0, 0, 0, 346, 355, - 354, 335, 336, 338, 340, 345, 352, 358, 0, 0, - 0, 0, 0, 263, 318, 270, 262, 563, 0, 0, - 0, 0, 0, 0, 0, 227, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 273, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 295, 0, 396, 255, 0, 447, 0, 0, - 0, 605, 0, 0, 0, 0, 0, 0, 0, 360, - 0, 327, 197, 223, 0, 0, 406, 455, 467, 0, - 0, 0, 251, 0, 465, 420, 583, 231, 282, 452, - 426, 463, 434, 285, 0, 0, 464, 367, 568, 444, - 580, 606, 607, 261, 400, 592, 505, 600, 624, 224, - 258, 414, 498, 586, 487, 392, 564, 565, 326, 486, - 293, 201, 364, 612, 222, 473, 366, 240, 229, 570, - 589, 287, 450, 619, 211, 500, 578, 237, 477, 0, - 0, 627, 245, 497, 213, 575, 496, 388, 323, 324, - 212, 0, 451, 266, 291, 0, 0, 256, 409, 572, - 573, 254, 628, 226, 599, 218, 0, 598, 402, 567, - 576, 389, 378, 217, 574, 387, 377, 331, 350, 351, - 278, 304, 441, 370, 442, 303, 305, 398, 397, 399, - 205, 587, 0, 206, 0, 492, 588, 629, 446, 210, - 232, 233, 235, 0, 277, 281, 289, 292, 300, 301, - 310, 362, 413, 440, 436, 445, 0, 562, 581, 593, - 604, 610, 611, 613, 614, 615, 616, 617, 620, 618, - 401, 308, 488, 330, 368, 0, 0, 419, 466, 238, - 585, 489, 199, 0, 0, 0, 0, 252, 253, 0, - 558, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, - 640, 641, 642, 643, 644, 645, 646, 647, 625, 0, - 0, 0, 0, 0, 0, 0, 0, 648, 379, 479, - 582, 332, 344, 347, 337, 356, 0, 357, 333, 334, - 339, 341, 342, 343, 348, 349, 353, 359, 247, 208, - 385, 393, 561, 309, 214, 215, 216, 507, 508, 509, - 510, 596, 597, 601, 456, 457, 458, 459, 290, 591, - 306, 462, 461, 328, 329, 374, 443, 523, 525, 536, - 540, 542, 544, 550, 553, 524, 526, 537, 541, 543, - 545, 551, 554, 513, 515, 517, 519, 532, 531, 528, - 556, 557, 534, 539, 518, 530, 535, 548, 555, 552, - 512, 516, 520, 529, 547, 546, 527, 538, 549, 533, - 521, 514, 522, 0, 196, 219, 363, 0, 448, 286, - 626, 595, 590, 204, 221, 0, 260, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 198, - 200, 207, 220, 230, 234, 241, 259, 274, 276, 283, - 296, 307, 315, 316, 319, 325, 375, 381, 382, 383, - 384, 4014, 404, 405, 408, 411, 412, 415, 417, 418, - 421, 425, 429, 430, 431, 433, 435, 437, 449, 454, - 468, 469, 470, 471, 472, 475, 476, 481, 482, 483, - 484, 485, 493, 494, 499, 569, 571, 584, 602, 608, - 474, 298, 299, 438, 439, 311, 312, 622, 623, 297, - 579, 609, 577, 621, 603, 432, 373, 0, 0, 376, - 279, 302, 317, 0, 594, 495, 225, 460, 288, 249, - 0, 0, 209, 244, 228, 257, 272, 275, 321, 386, - 394, 423, 428, 294, 269, 242, 453, 239, 478, 502, - 503, 504, 506, 390, 264, 427, 391, 0, 371, 559, - 560, 313, 511, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 410, 0, 0, 0, 0, 0, - 0, 0, 0, 268, 0, 0, 0, 0, 361, 265, - 0, 0, 424, 0, 203, 0, 480, 250, 372, 369, - 566, 280, 271, 267, 248, 314, 380, 422, 501, 416, - 0, 365, 0, 0, 490, 395, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 320, 246, 322, 202, 407, 491, 284, 0, 0, - 0, 0, 0, 696, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 236, 0, 0, 243, 0, 0, 0, - 346, 355, 354, 335, 336, 338, 340, 345, 352, 358, - 0, 0, 0, 0, 0, 263, 318, 270, 262, 563, - 0, 0, 0, 0, 0, 0, 0, 227, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 273, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 295, 0, 396, 255, 0, 447, - 0, 0, 0, 605, 0, 0, 0, 0, 0, 0, - 0, 360, 0, 327, 197, 223, 0, 0, 406, 455, - 467, 0, 0, 0, 251, 0, 465, 420, 583, 231, - 282, 452, 426, 463, 434, 285, 0, 0, 464, 367, - 568, 444, 580, 606, 607, 261, 400, 592, 505, 600, - 624, 224, 258, 414, 498, 586, 487, 392, 564, 565, - 326, 486, 293, 201, 364, 612, 222, 473, 366, 240, - 229, 570, 589, 287, 450, 619, 211, 500, 578, 237, - 477, 0, 0, 627, 245, 497, 213, 575, 496, 388, - 323, 324, 212, 0, 451, 266, 291, 0, 0, 256, - 409, 572, 573, 254, 628, 226, 599, 218, 0, 598, - 402, 567, 576, 389, 378, 217, 574, 387, 377, 331, - 350, 351, 278, 304, 441, 370, 442, 303, 305, 398, - 397, 399, 205, 587, 0, 206, 0, 492, 588, 629, - 446, 210, 232, 233, 235, 0, 277, 281, 289, 292, - 300, 301, 310, 362, 413, 440, 436, 445, 0, 562, - 581, 593, 604, 610, 611, 613, 614, 615, 616, 617, - 620, 618, 401, 308, 488, 330, 368, 0, 0, 419, - 466, 238, 585, 489, 199, 0, 0, 0, 0, 252, - 253, 0, 558, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 630, 631, 632, 633, 634, 635, 636, 637, - 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, - 625, 0, 0, 0, 0, 0, 0, 0, 0, 648, - 379, 479, 582, 332, 344, 347, 337, 356, 0, 357, - 333, 334, 339, 341, 342, 343, 348, 349, 353, 359, - 247, 208, 385, 393, 561, 309, 214, 215, 216, 507, - 508, 509, 510, 596, 597, 601, 456, 457, 458, 459, - 290, 591, 306, 462, 461, 328, 329, 374, 443, 523, - 525, 536, 540, 542, 544, 550, 553, 524, 526, 537, - 541, 543, 545, 551, 554, 513, 515, 517, 519, 532, - 531, 528, 556, 557, 534, 539, 518, 530, 535, 548, - 555, 552, 512, 516, 520, 529, 547, 546, 527, 538, - 549, 533, 521, 514, 522, 0, 196, 219, 363, 0, - 448, 286, 626, 595, 590, 204, 221, 0, 260, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 198, 200, 207, 220, 230, 234, 241, 259, 274, - 276, 283, 296, 307, 315, 316, 319, 325, 375, 381, - 382, 383, 384, 403, 404, 405, 408, 411, 412, 415, - 417, 418, 421, 425, 429, 430, 431, 433, 435, 437, - 449, 454, 468, 469, 470, 471, 472, 475, 476, 481, - 482, 483, 484, 485, 493, 494, 499, 569, 571, 584, - 602, 608, 474, 298, 299, 438, 439, 311, 312, 622, - 623, 297, 579, 609, 577, 621, 603, 432, 373, 0, - 0, 376, 279, 302, 317, 0, 594, 495, 225, 460, - 288, 249, 0, 0, 209, 244, 228, 257, 272, 275, - 321, 386, 394, 423, 428, 294, 269, 242, 453, 239, - 478, 502, 503, 504, 506, 390, 264, 427, 391, 0, - 371, 559, 560, 313, 511, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 410, 0, 0, 0, - 0, 0, 0, 0, 0, 268, 0, 0, 0, 0, - 361, 265, 0, 0, 424, 0, 203, 0, 480, 250, - 372, 369, 566, 280, 271, 267, 248, 314, 380, 422, - 501, 416, 0, 365, 0, 0, 490, 395, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 320, 246, 322, 202, 407, 491, 284, - 0, 0, 0, 0, 0, 929, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 236, 0, 0, 243, 0, - 0, 0, 346, 355, 354, 335, 336, 338, 340, 345, - 352, 358, 0, 0, 0, 0, 0, 263, 318, 270, - 262, 563, 0, 0, 0, 0, 0, 0, 0, 227, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 273, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 295, 0, 396, 255, - 0, 447, 0, 0, 0, 605, 0, 0, 0, 0, - 0, 0, 0, 360, 0, 327, 197, 223, 0, 0, - 406, 455, 467, 0, 0, 0, 251, 0, 465, 420, - 583, 231, 282, 452, 426, 463, 434, 285, 0, 0, - 464, 367, 568, 444, 580, 606, 607, 261, 400, 592, - 505, 600, 624, 224, 258, 414, 498, 586, 487, 392, - 564, 565, 326, 486, 293, 201, 364, 612, 222, 473, - 366, 240, 229, 570, 589, 287, 450, 619, 211, 500, - 578, 237, 477, 0, 0, 627, 245, 497, 213, 575, - 496, 388, 323, 324, 212, 0, 451, 266, 291, 0, - 0, 256, 409, 572, 573, 254, 628, 226, 599, 218, - 0, 598, 402, 567, 576, 389, 378, 217, 574, 387, - 377, 331, 350, 351, 278, 304, 441, 370, 442, 303, - 305, 398, 397, 399, 205, 587, 0, 206, 0, 492, - 588, 629, 446, 210, 232, 233, 235, 0, 277, 281, - 289, 292, 300, 301, 310, 362, 413, 440, 436, 445, - 0, 562, 581, 593, 604, 610, 611, 613, 614, 615, - 616, 617, 620, 618, 401, 308, 488, 330, 368, 0, - 0, 419, 466, 238, 585, 489, 199, 0, 0, 0, - 0, 252, 253, 0, 558, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 630, 631, 632, 633, 634, 635, - 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, - 646, 647, 625, 0, 0, 0, 0, 0, 0, 0, - 0, 648, 379, 479, 582, 332, 344, 347, 337, 356, - 0, 357, 333, 334, 339, 341, 342, 343, 348, 349, - 353, 359, 247, 208, 385, 393, 561, 309, 214, 215, - 216, 507, 508, 509, 510, 596, 597, 601, 456, 457, - 458, 459, 290, 591, 306, 462, 461, 328, 329, 374, - 443, 523, 525, 536, 540, 542, 544, 550, 553, 524, - 526, 537, 541, 543, 545, 551, 554, 513, 515, 517, - 519, 532, 531, 528, 556, 557, 534, 539, 518, 530, - 535, 548, 555, 552, 512, 516, 520, 529, 547, 546, - 527, 538, 549, 533, 521, 514, 522, 0, 196, 219, - 363, 0, 448, 286, 626, 595, 590, 204, 221, 0, - 260, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 198, 200, 207, 220, 230, 234, 241, - 259, 274, 276, 283, 296, 307, 315, 316, 319, 325, - 375, 381, 382, 383, 384, 403, 404, 405, 408, 411, - 412, 415, 417, 418, 421, 425, 429, 430, 431, 433, - 435, 437, 449, 454, 468, 469, 470, 471, 472, 475, - 476, 481, 482, 483, 484, 485, 493, 494, 499, 569, - 571, 584, 602, 608, 474, 298, 299, 438, 439, 311, - 312, 622, 623, 297, 579, 609, 577, 621, 603, 432, - 373, 0, 0, 376, 279, 302, 317, 0, 594, 495, - 225, 460, 288, 249, 0, 0, 209, 244, 228, 257, - 272, 275, 321, 386, 394, 423, 428, 294, 269, 242, - 453, 239, 478, 502, 503, 504, 506, 390, 264, 427, - 391, 0, 371, 559, 560, 313, 511, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 410, 0, - 0, 0, 0, 0, 0, 0, 0, 268, 0, 0, - 0, 0, 361, 265, 0, 0, 424, 0, 203, 0, - 480, 250, 372, 369, 566, 280, 271, 267, 248, 314, - 380, 422, 501, 416, 0, 365, 0, 0, 490, 395, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 320, 246, 322, 202, 407, - 491, 284, 0, 0, 0, 0, 0, 194, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 236, 0, 0, - 243, 0, 0, 0, 346, 355, 354, 335, 336, 338, - 340, 345, 352, 358, 0, 0, 0, 0, 0, 263, - 318, 270, 262, 563, 0, 0, 0, 0, 0, 0, - 0, 227, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 273, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 295, 0, - 396, 255, 0, 447, 0, 0, 0, 605, 0, 0, - 0, 0, 0, 0, 0, 360, 0, 327, 197, 223, - 0, 0, 406, 455, 467, 0, 0, 0, 251, 0, - 465, 420, 583, 231, 282, 452, 426, 463, 434, 285, - 0, 0, 464, 367, 568, 444, 580, 606, 607, 261, - 400, 592, 505, 600, 624, 224, 258, 414, 498, 586, - 487, 392, 564, 565, 326, 486, 293, 201, 364, 612, - 222, 473, 366, 240, 229, 570, 589, 287, 450, 619, - 211, 500, 578, 237, 477, 0, 0, 627, 245, 497, - 213, 575, 496, 388, 323, 324, 212, 0, 451, 266, - 291, 0, 0, 256, 409, 572, 573, 254, 628, 226, - 599, 218, 0, 598, 402, 567, 576, 389, 378, 217, - 574, 387, 377, 331, 350, 351, 278, 304, 441, 370, - 442, 303, 305, 398, 397, 399, 205, 587, 0, 206, - 0, 492, 588, 629, 446, 210, 232, 233, 235, 0, - 277, 281, 289, 292, 300, 301, 310, 362, 413, 440, - 436, 445, 0, 562, 581, 593, 604, 610, 611, 613, - 614, 615, 616, 617, 620, 618, 401, 308, 488, 330, - 368, 0, 0, 419, 466, 238, 585, 489, 199, 0, - 0, 0, 0, 252, 253, 0, 558, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 630, 631, 632, 633, - 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, - 644, 645, 646, 647, 625, 0, 0, 0, 0, 0, - 0, 0, 0, 648, 379, 479, 582, 332, 344, 347, - 337, 356, 0, 357, 333, 334, 339, 341, 342, 343, - 348, 349, 353, 359, 247, 208, 385, 393, 561, 309, - 214, 215, 216, 507, 508, 509, 510, 596, 597, 601, - 456, 457, 458, 459, 290, 591, 306, 462, 461, 328, - 329, 374, 443, 523, 525, 536, 540, 542, 544, 550, - 553, 524, 526, 537, 541, 543, 545, 551, 554, 513, - 515, 517, 519, 532, 531, 528, 556, 557, 534, 539, - 518, 530, 535, 548, 555, 552, 512, 516, 520, 529, - 547, 546, 527, 538, 549, 533, 521, 514, 522, 0, - 196, 219, 363, 0, 448, 286, 626, 595, 590, 204, - 221, 0, 260, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 198, 200, 207, 220, 230, - 234, 241, 259, 274, 276, 283, 296, 307, 315, 316, - 319, 325, 375, 381, 382, 383, 384, 403, 404, 405, - 408, 411, 412, 415, 417, 418, 421, 425, 429, 430, - 431, 433, 435, 437, 449, 454, 468, 469, 470, 471, - 472, 475, 476, 481, 482, 483, 484, 485, 493, 494, - 499, 569, 571, 584, 602, 608, 474, 298, 299, 438, - 439, 311, 312, 622, 623, 297, 579, 609, 577, 621, - 603, 432, 373, 0, 0, 376, 279, 302, 317, 0, - 594, 495, 225, 460, 288, 249, 0, 0, 209, 244, - 228, 257, 272, 275, 321, 386, 394, 423, 428, 294, - 269, 242, 453, 239, 478, 502, 503, 504, 506, 390, - 264, 427, 0, 0, 371, 559, 560, 313, + 993, 994, 995, 996, 997, 998, 999, 1000, 1001, 1002, + 1003, 1004, 1005, 1006, 1007, 2074, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 298, 0, 399, 258, + 0, 450, 902, 0, 0, 618, 0, 0, 900, 0, + 0, 0, 0, 363, 0, 330, 199, 226, 0, 0, + 409, 458, 470, 0, 0, 0, 953, 0, 468, 423, + 596, 234, 285, 455, 429, 466, 437, 288, 0, 0, + 467, 370, 579, 447, 593, 619, 620, 264, 403, 605, + 516, 613, 637, 227, 261, 417, 501, 599, 490, 395, + 575, 576, 329, 489, 296, 203, 367, 625, 225, 476, + 369, 243, 232, 581, 602, 290, 453, 632, 214, 511, + 591, 240, 480, 0, 0, 640, 248, 500, 216, 588, + 499, 391, 326, 327, 215, 0, 454, 269, 294, 0, + 0, 259, 412, 954, 955, 257, 641, 799, 612, 221, + 0, 611, 405, 578, 589, 392, 381, 220, 587, 390, + 380, 334, 807, 808, 281, 307, 884, 883, 882, 306, + 308, 880, 881, 879, 208, 600, 0, 209, 0, 495, + 601, 642, 449, 213, 235, 236, 238, 0, 280, 284, + 292, 295, 303, 304, 313, 365, 416, 443, 439, 448, + 0, 573, 594, 606, 617, 623, 624, 626, 627, 628, + 629, 630, 633, 631, 404, 311, 491, 333, 371, 0, + 0, 422, 469, 241, 598, 492, 890, 912, 901, 767, + 768, 891, 892, 916, 893, 770, 771, 913, 914, 764, + 765, 769, 915, 917, 643, 644, 645, 646, 647, 648, + 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, + 659, 660, 638, 502, 508, 503, 504, 505, 506, 507, + 0, 509, 904, 754, 753, 0, 760, 0, 789, 790, + 792, 796, 797, 798, 809, 856, 857, 865, 867, 868, + 866, 869, 870, 871, 874, 875, 876, 877, 872, 873, + 878, 772, 776, 773, 774, 775, 787, 777, 778, 779, + 780, 781, 782, 783, 784, 785, 786, 788, 927, 928, + 929, 930, 931, 932, 802, 806, 805, 803, 804, 800, + 801, 828, 827, 829, 830, 831, 832, 833, 834, 836, + 835, 837, 838, 839, 840, 841, 842, 810, 811, 814, + 815, 813, 812, 816, 825, 826, 817, 818, 819, 820, + 821, 822, 824, 823, 843, 844, 845, 846, 847, 849, + 848, 852, 853, 851, 850, 855, 854, 752, 198, 222, + 366, 0, 451, 289, 639, 608, 603, 207, 224, 918, + 263, 919, 0, 0, 923, 0, 0, 0, 925, 924, + 0, 926, 0, 888, 887, 0, 0, 920, 921, 0, + 922, 0, 0, 200, 202, 210, 223, 233, 237, 244, + 262, 277, 279, 286, 299, 310, 318, 319, 322, 328, + 378, 384, 385, 386, 387, 406, 407, 408, 411, 414, + 415, 418, 420, 421, 424, 428, 432, 433, 434, 436, + 438, 440, 452, 457, 471, 472, 473, 474, 475, 478, + 479, 484, 485, 486, 487, 488, 496, 497, 510, 580, + 582, 597, 615, 621, 477, 933, 934, 935, 936, 937, + 938, 939, 940, 300, 592, 622, 590, 634, 616, 435, + 376, 0, 0, 379, 282, 305, 320, 0, 607, 498, + 228, 463, 291, 252, 958, 0, 212, 247, 231, 260, + 275, 278, 324, 389, 397, 426, 431, 297, 272, 245, + 456, 242, 481, 513, 514, 515, 517, 393, 267, 430, + 394, 0, 374, 570, 571, 316, 522, 0, 763, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 413, 0, + 0, 0, 0, 751, 0, 0, 0, 271, 756, 0, + 0, 0, 364, 268, 0, 0, 427, 0, 205, 0, + 483, 253, 375, 372, 577, 283, 274, 270, 251, 317, + 383, 425, 512, 419, 762, 368, 0, 0, 493, 398, + 0, 0, 0, 0, 0, 758, 759, 0, 0, 0, + 0, 0, 0, 0, 0, 323, 249, 325, 204, 410, + 494, 287, 0, 96, 0, 0, 959, 943, 1074, 909, + 947, 960, 961, 962, 963, 948, 0, 239, 949, 950, + 246, 951, 0, 908, 793, 795, 794, 858, 859, 860, + 861, 862, 863, 864, 791, 956, 964, 965, 0, 266, + 321, 273, 265, 574, 0, 0, 0, 0, 0, 0, + 0, 0, 230, 0, 0, 0, 0, 0, 0, 0, + 0, 748, 0, 761, 0, 0, 0, 276, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 745, 746, 0, 0, 0, 0, + 903, 0, 747, 0, 0, 755, 966, 967, 968, 969, + 970, 971, 972, 973, 974, 975, 976, 977, 978, 979, + 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, + 990, 991, 992, 993, 994, 995, 996, 997, 998, 999, + 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 2072, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 298, + 0, 399, 258, 0, 450, 902, 0, 0, 618, 0, + 0, 900, 0, 0, 0, 0, 363, 0, 330, 199, + 226, 0, 0, 409, 458, 470, 0, 0, 0, 953, + 0, 468, 423, 596, 234, 285, 455, 429, 466, 437, + 288, 0, 0, 467, 370, 579, 447, 593, 619, 620, + 264, 403, 605, 516, 613, 637, 227, 261, 417, 501, + 599, 490, 395, 575, 576, 329, 489, 296, 203, 367, + 625, 225, 476, 369, 243, 232, 581, 602, 290, 453, + 632, 214, 511, 591, 240, 480, 0, 0, 640, 248, + 500, 216, 588, 499, 391, 326, 327, 215, 0, 454, + 269, 294, 0, 0, 259, 412, 954, 955, 257, 641, + 799, 612, 221, 0, 611, 405, 578, 589, 392, 381, + 220, 587, 390, 380, 334, 807, 808, 281, 307, 884, + 883, 882, 306, 308, 880, 881, 879, 208, 600, 0, + 209, 0, 495, 601, 642, 449, 213, 235, 236, 238, + 0, 280, 284, 292, 295, 303, 304, 313, 365, 416, + 443, 439, 448, 0, 573, 594, 606, 617, 623, 624, + 626, 627, 628, 629, 630, 633, 631, 404, 311, 491, + 333, 371, 0, 0, 422, 469, 241, 598, 492, 890, + 912, 901, 767, 768, 891, 892, 916, 893, 770, 771, + 913, 914, 764, 765, 769, 915, 917, 643, 644, 645, + 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, + 656, 657, 658, 659, 660, 638, 502, 508, 503, 504, + 505, 506, 507, 0, 509, 904, 754, 753, 0, 760, + 0, 789, 790, 792, 796, 797, 798, 809, 856, 857, + 865, 867, 868, 866, 869, 870, 871, 874, 875, 876, + 877, 872, 873, 878, 772, 776, 773, 774, 775, 787, + 777, 778, 779, 780, 781, 782, 783, 784, 785, 786, + 788, 927, 928, 929, 930, 931, 932, 802, 806, 805, + 803, 804, 800, 801, 828, 827, 829, 830, 831, 832, + 833, 834, 836, 835, 837, 838, 839, 840, 841, 842, + 810, 811, 814, 815, 813, 812, 816, 825, 826, 817, + 818, 819, 820, 821, 822, 824, 823, 843, 844, 845, + 846, 847, 849, 848, 852, 853, 851, 850, 855, 854, + 752, 198, 222, 366, 0, 451, 289, 639, 608, 603, + 207, 224, 918, 263, 919, 0, 0, 923, 0, 0, + 0, 925, 924, 0, 926, 0, 888, 887, 0, 0, + 920, 921, 0, 922, 0, 0, 200, 202, 210, 223, + 233, 237, 244, 262, 277, 279, 286, 299, 310, 318, + 319, 322, 328, 378, 384, 385, 386, 387, 406, 407, + 408, 411, 414, 415, 418, 420, 421, 424, 428, 432, + 433, 434, 436, 438, 440, 452, 457, 471, 472, 473, + 474, 475, 478, 479, 484, 485, 486, 487, 488, 496, + 497, 510, 580, 582, 597, 615, 621, 477, 933, 934, + 935, 936, 937, 938, 939, 940, 300, 592, 622, 590, + 634, 616, 435, 376, 0, 0, 379, 282, 305, 320, + 0, 607, 498, 228, 463, 291, 252, 958, 0, 212, + 247, 231, 260, 275, 278, 324, 389, 397, 426, 431, + 297, 272, 245, 456, 242, 481, 513, 514, 515, 517, + 393, 267, 430, 394, 0, 374, 570, 571, 316, 522, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 413, 0, 0, 0, 0, 0, 0, 0, 0, + 271, 0, 0, 0, 0, 364, 268, 0, 0, 427, + 0, 205, 0, 483, 253, 375, 372, 577, 283, 274, + 270, 251, 317, 383, 425, 512, 419, 0, 368, 0, + 0, 493, 398, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 323, 249, + 325, 204, 410, 494, 287, 0, 0, 0, 0, 0, + 711, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 239, 0, 0, 246, 0, 0, 0, 349, 358, 357, + 338, 339, 341, 343, 348, 355, 361, 0, 0, 0, + 0, 0, 266, 321, 273, 265, 574, 0, 0, 0, + 0, 0, 0, 0, 0, 230, 0, 1125, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 276, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 298, 0, 399, 258, 0, 450, 0, 0, + 1124, 618, 0, 0, 0, 0, 0, 1121, 1122, 363, + 1082, 330, 199, 226, 1115, 1119, 409, 458, 470, 0, + 0, 0, 254, 0, 468, 423, 596, 234, 285, 455, + 429, 466, 437, 288, 0, 0, 467, 370, 579, 447, + 593, 619, 620, 264, 403, 605, 516, 613, 637, 227, + 261, 417, 501, 599, 490, 395, 575, 576, 329, 489, + 296, 203, 367, 625, 225, 476, 369, 243, 232, 581, + 602, 290, 453, 632, 214, 511, 591, 240, 480, 0, + 0, 640, 248, 500, 216, 588, 499, 391, 326, 327, + 215, 0, 454, 269, 294, 0, 0, 259, 412, 583, + 584, 257, 641, 229, 612, 221, 0, 611, 405, 578, + 589, 392, 381, 220, 587, 390, 380, 334, 353, 354, + 281, 307, 444, 373, 445, 306, 308, 401, 400, 402, + 208, 600, 0, 209, 0, 495, 601, 642, 449, 213, + 235, 236, 238, 0, 280, 284, 292, 295, 303, 304, + 313, 365, 416, 443, 439, 448, 0, 573, 594, 606, + 617, 623, 624, 626, 627, 628, 629, 630, 633, 631, + 404, 311, 491, 333, 371, 0, 0, 422, 469, 241, + 598, 492, 201, 0, 0, 0, 0, 255, 256, 0, + 569, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, + 653, 654, 655, 656, 657, 658, 659, 660, 638, 502, + 508, 503, 504, 505, 506, 507, 0, 509, 0, 0, + 0, 0, 0, 0, 585, 586, 661, 382, 482, 595, + 335, 347, 350, 340, 359, 0, 360, 336, 337, 342, + 344, 345, 346, 351, 352, 356, 362, 250, 211, 388, + 396, 572, 312, 217, 218, 219, 518, 519, 520, 521, + 609, 610, 614, 206, 459, 460, 461, 462, 293, 604, + 309, 465, 464, 331, 332, 377, 446, 534, 536, 547, + 551, 553, 555, 561, 564, 535, 537, 548, 552, 554, + 556, 562, 565, 524, 526, 528, 530, 543, 542, 539, + 567, 568, 545, 550, 529, 541, 546, 559, 566, 563, + 523, 527, 531, 540, 558, 557, 538, 549, 560, 544, + 532, 525, 533, 0, 198, 222, 366, 0, 451, 289, + 639, 608, 603, 207, 224, 0, 263, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 200, + 202, 210, 223, 233, 237, 244, 262, 277, 279, 286, + 299, 310, 318, 319, 322, 328, 378, 384, 385, 386, + 387, 406, 407, 408, 411, 414, 415, 418, 420, 421, + 424, 428, 432, 433, 434, 436, 438, 440, 452, 457, + 471, 472, 473, 474, 475, 478, 479, 484, 485, 486, + 487, 488, 496, 497, 510, 580, 582, 597, 615, 621, + 477, 301, 302, 441, 442, 314, 315, 635, 636, 300, + 592, 622, 590, 634, 616, 435, 376, 0, 0, 379, + 282, 305, 320, 0, 607, 498, 228, 463, 291, 252, + 0, 0, 212, 247, 231, 260, 275, 278, 324, 389, + 397, 426, 431, 297, 272, 245, 456, 242, 481, 513, + 514, 515, 517, 393, 267, 430, 394, 0, 374, 570, + 571, 316, 522, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 413, 0, 0, 0, 0, 0, + 0, 0, 0, 271, 0, 0, 0, 0, 364, 268, + 0, 0, 427, 0, 205, 0, 483, 253, 375, 372, + 577, 283, 274, 270, 251, 317, 383, 425, 512, 419, + 0, 368, 0, 0, 493, 398, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 323, 249, 325, 204, 410, 494, 287, 0, 0, + 0, 0, 1682, 943, 0, 0, 1679, 0, 0, 0, + 0, 1677, 0, 239, 1678, 1676, 246, 1681, 0, 908, + 349, 358, 357, 338, 339, 341, 343, 348, 355, 361, + 0, 0, 0, 0, 0, 266, 321, 273, 265, 574, + 0, 0, 0, 0, 0, 0, 0, 0, 230, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 276, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 298, 0, 399, 258, 0, + 450, 0, 0, 0, 618, 0, 0, 0, 0, 0, + 0, 0, 363, 0, 330, 199, 226, 0, 0, 409, + 458, 470, 0, 0, 0, 254, 0, 468, 423, 596, + 234, 285, 455, 429, 466, 437, 288, 0, 0, 467, + 370, 579, 447, 593, 619, 620, 264, 403, 605, 516, + 613, 637, 227, 261, 417, 501, 599, 490, 395, 575, + 576, 329, 489, 296, 203, 367, 625, 225, 476, 369, + 243, 232, 581, 602, 290, 453, 632, 214, 511, 591, + 240, 480, 0, 0, 640, 248, 500, 216, 588, 499, + 391, 326, 327, 215, 0, 454, 269, 294, 0, 0, + 259, 412, 583, 584, 257, 641, 229, 612, 221, 0, + 611, 405, 578, 589, 392, 381, 220, 587, 390, 380, + 334, 353, 354, 281, 307, 444, 373, 445, 306, 308, + 401, 400, 402, 208, 600, 0, 209, 0, 495, 601, + 642, 449, 213, 235, 236, 238, 0, 280, 284, 292, + 295, 303, 304, 313, 365, 416, 443, 439, 448, 0, + 573, 594, 606, 617, 623, 624, 626, 627, 628, 629, + 630, 633, 631, 404, 311, 491, 333, 371, 0, 0, + 422, 469, 241, 598, 492, 201, 0, 0, 0, 0, + 255, 256, 0, 569, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 643, 644, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, + 660, 638, 502, 508, 503, 504, 505, 506, 507, 0, + 509, 0, 0, 0, 0, 0, 0, 585, 586, 661, + 382, 482, 595, 335, 347, 350, 340, 359, 0, 360, + 336, 337, 342, 344, 345, 346, 351, 352, 356, 362, + 250, 211, 388, 396, 572, 312, 217, 218, 219, 518, + 519, 520, 521, 609, 610, 614, 206, 459, 460, 461, + 462, 293, 604, 309, 465, 464, 331, 332, 377, 446, + 534, 536, 547, 551, 553, 555, 561, 564, 535, 537, + 548, 552, 554, 556, 562, 565, 524, 526, 528, 530, + 543, 542, 539, 567, 568, 545, 550, 529, 541, 546, + 559, 566, 563, 523, 527, 531, 540, 558, 557, 538, + 549, 560, 544, 532, 525, 533, 0, 198, 222, 366, + 0, 451, 289, 639, 608, 603, 207, 224, 0, 263, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 200, 202, 210, 223, 233, 237, 244, 262, + 277, 279, 286, 299, 310, 318, 319, 322, 328, 378, + 384, 385, 386, 387, 406, 407, 408, 411, 414, 415, + 418, 420, 421, 424, 428, 432, 433, 434, 436, 438, + 440, 452, 457, 471, 472, 473, 474, 475, 478, 479, + 484, 485, 486, 487, 488, 496, 497, 510, 580, 582, + 597, 615, 621, 477, 301, 302, 441, 442, 314, 315, + 635, 636, 300, 592, 622, 590, 634, 616, 435, 376, + 0, 0, 379, 282, 305, 320, 0, 607, 498, 228, + 463, 291, 252, 0, 0, 212, 247, 231, 260, 275, + 278, 324, 389, 397, 426, 431, 297, 272, 245, 456, + 242, 481, 513, 514, 515, 517, 393, 267, 430, 0, + 394, 374, 570, 571, 316, 87, 522, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 413, 0, + 0, 0, 0, 0, 0, 0, 0, 271, 0, 0, + 0, 0, 364, 268, 0, 0, 427, 0, 205, 0, + 483, 253, 375, 372, 577, 283, 274, 270, 251, 317, + 383, 425, 512, 419, 0, 368, 0, 0, 493, 398, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 323, 249, 325, 204, 410, + 494, 287, 0, 96, 0, 0, 0, 196, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 239, 0, 0, + 246, 0, 0, 0, 349, 358, 357, 338, 339, 341, + 343, 348, 355, 361, 0, 0, 0, 0, 0, 266, + 321, 273, 265, 574, 0, 0, 0, 0, 0, 0, + 0, 0, 230, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 276, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 298, + 0, 399, 258, 0, 450, 0, 0, 0, 618, 0, + 0, 0, 0, 0, 0, 0, 363, 0, 330, 199, + 226, 0, 0, 409, 458, 470, 0, 0, 0, 254, + 0, 468, 423, 596, 234, 285, 455, 429, 466, 437, + 288, 0, 0, 467, 370, 579, 447, 593, 619, 620, + 264, 403, 605, 516, 613, 637, 227, 261, 417, 501, + 599, 490, 395, 575, 576, 329, 489, 296, 203, 367, + 625, 225, 476, 369, 243, 232, 581, 602, 290, 453, + 632, 214, 511, 591, 240, 480, 0, 0, 640, 248, + 500, 216, 588, 499, 391, 326, 327, 215, 0, 454, + 269, 294, 0, 0, 259, 412, 583, 584, 257, 641, + 229, 612, 221, 0, 611, 405, 578, 589, 392, 381, + 220, 587, 390, 380, 334, 353, 354, 281, 307, 444, + 373, 445, 306, 308, 401, 400, 402, 208, 600, 0, + 209, 0, 495, 601, 642, 449, 213, 235, 236, 238, + 0, 280, 284, 292, 295, 303, 304, 313, 365, 416, + 443, 439, 448, 0, 573, 594, 606, 617, 623, 624, + 626, 627, 628, 629, 630, 633, 631, 404, 311, 491, + 333, 371, 0, 0, 422, 469, 241, 598, 492, 201, + 0, 0, 0, 0, 255, 256, 0, 569, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 643, 644, 645, + 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, + 656, 657, 658, 659, 660, 638, 502, 508, 503, 504, + 505, 506, 507, 0, 509, 0, 0, 0, 0, 0, + 0, 585, 586, 661, 382, 482, 595, 335, 347, 350, + 340, 359, 0, 360, 336, 337, 342, 344, 345, 346, + 351, 352, 356, 362, 250, 211, 388, 396, 572, 312, + 217, 218, 219, 518, 519, 520, 521, 609, 610, 614, + 206, 459, 460, 461, 462, 293, 604, 309, 465, 464, + 331, 332, 377, 446, 534, 536, 547, 551, 553, 555, + 561, 564, 535, 537, 548, 552, 554, 556, 562, 565, + 524, 526, 528, 530, 543, 542, 539, 567, 568, 545, + 550, 529, 541, 546, 559, 566, 563, 523, 527, 531, + 540, 558, 557, 538, 549, 560, 544, 532, 525, 533, + 0, 198, 222, 366, 95, 451, 289, 639, 608, 603, + 207, 224, 0, 263, 0, 0, 0, 0, 0, 0, + 2374, 0, 0, 2373, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 200, 202, 210, 223, + 233, 237, 244, 262, 277, 279, 286, 299, 310, 318, + 319, 322, 328, 378, 384, 385, 386, 387, 406, 407, + 408, 411, 414, 415, 418, 420, 421, 424, 428, 432, + 433, 434, 436, 438, 440, 452, 457, 471, 472, 473, + 474, 475, 478, 479, 484, 485, 486, 487, 488, 496, + 497, 510, 580, 582, 597, 615, 621, 477, 301, 302, + 441, 442, 314, 315, 635, 636, 300, 592, 622, 590, + 634, 616, 435, 376, 0, 0, 379, 282, 305, 320, + 0, 607, 498, 228, 463, 291, 252, 0, 0, 212, + 247, 231, 260, 275, 278, 324, 389, 397, 426, 431, + 297, 272, 245, 456, 242, 481, 513, 514, 515, 517, + 393, 267, 430, 1738, 0, 374, 570, 571, 316, 522, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 413, 0, 0, 0, 1740, 0, 0, 0, 0, + 271, 0, 0, 0, 0, 364, 268, 0, 0, 427, + 0, 205, 0, 483, 253, 375, 372, 577, 283, 274, + 270, 251, 317, 383, 425, 512, 419, 0, 368, 0, + 0, 493, 398, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 323, 249, + 325, 204, 410, 494, 287, 0, 0, 0, 0, 1742, + 711, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 239, 0, 0, 246, 0, 0, 0, 349, 358, 357, + 338, 339, 341, 343, 348, 355, 361, 0, 0, 0, + 0, 0, 266, 321, 273, 265, 574, 0, 0, 0, + 0, 0, 0, 0, 0, 230, 0, 0, 0, 1455, + 0, 1456, 1457, 0, 0, 0, 0, 0, 0, 0, + 276, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 298, 0, 399, 258, 0, 450, 0, 0, + 0, 618, 0, 0, 0, 0, 0, 0, 0, 363, + 0, 330, 199, 226, 0, 0, 409, 458, 470, 0, + 0, 0, 254, 0, 468, 423, 596, 234, 285, 455, + 429, 466, 437, 288, 0, 0, 467, 370, 579, 447, + 593, 619, 620, 264, 403, 605, 516, 613, 637, 227, + 261, 417, 501, 599, 490, 395, 575, 576, 329, 489, + 296, 203, 367, 625, 225, 476, 369, 243, 232, 581, + 602, 290, 453, 632, 214, 511, 591, 240, 480, 0, + 0, 640, 248, 500, 216, 588, 499, 391, 326, 327, + 215, 0, 454, 269, 294, 0, 0, 259, 412, 583, + 584, 257, 641, 229, 612, 221, 0, 611, 405, 578, + 589, 392, 381, 220, 587, 390, 380, 334, 353, 354, + 281, 307, 444, 373, 445, 306, 308, 401, 400, 402, + 208, 600, 0, 209, 0, 495, 601, 642, 449, 213, + 235, 236, 238, 0, 280, 284, 292, 295, 303, 304, + 313, 365, 416, 443, 439, 448, 0, 573, 594, 606, + 617, 623, 624, 626, 627, 628, 629, 630, 633, 631, + 404, 311, 491, 333, 371, 0, 0, 422, 469, 241, + 598, 492, 201, 0, 0, 0, 0, 255, 256, 0, + 569, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, + 653, 654, 655, 656, 657, 658, 659, 660, 638, 502, + 508, 503, 504, 505, 506, 507, 0, 509, 0, 0, + 0, 0, 0, 0, 585, 586, 661, 382, 482, 595, + 335, 347, 350, 340, 359, 0, 360, 336, 337, 342, + 344, 345, 346, 351, 352, 356, 362, 250, 211, 388, + 396, 572, 312, 217, 218, 219, 518, 519, 520, 521, + 609, 610, 614, 206, 459, 460, 461, 462, 293, 604, + 309, 465, 464, 331, 332, 377, 446, 534, 536, 547, + 551, 553, 555, 561, 564, 535, 537, 548, 552, 554, + 556, 562, 565, 524, 526, 528, 530, 543, 542, 539, + 567, 568, 545, 550, 529, 541, 546, 559, 566, 563, + 523, 527, 531, 540, 558, 557, 538, 549, 560, 544, + 532, 525, 533, 0, 198, 222, 366, 0, 451, 289, + 639, 608, 603, 207, 224, 0, 263, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 200, + 202, 210, 223, 233, 237, 244, 262, 277, 279, 286, + 299, 310, 318, 319, 322, 328, 378, 384, 385, 386, + 387, 406, 407, 408, 411, 414, 415, 418, 420, 421, + 424, 428, 432, 433, 434, 436, 438, 440, 452, 457, + 471, 472, 473, 474, 475, 478, 479, 484, 485, 486, + 487, 488, 496, 497, 510, 580, 582, 597, 615, 621, + 477, 301, 302, 441, 442, 314, 315, 635, 636, 300, + 592, 622, 590, 634, 616, 435, 376, 0, 0, 379, + 282, 305, 320, 0, 607, 498, 228, 463, 291, 252, + 0, 0, 212, 247, 231, 260, 275, 278, 324, 389, + 397, 426, 431, 297, 272, 245, 456, 242, 481, 513, + 514, 515, 517, 393, 267, 430, 0, 394, 374, 570, + 571, 316, 87, 522, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 413, 0, 0, 0, 0, + 0, 0, 0, 0, 271, 0, 0, 0, 0, 364, + 268, 0, 0, 427, 0, 205, 0, 483, 253, 375, + 372, 577, 283, 274, 270, 251, 317, 383, 425, 512, + 419, 0, 368, 0, 0, 493, 398, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 323, 249, 325, 204, 410, 494, 287, 0, + 96, 0, 1719, 0, 711, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 239, 0, 0, 246, 0, 0, + 0, 349, 358, 357, 338, 339, 341, 343, 348, 355, + 361, 0, 0, 0, 0, 0, 266, 321, 273, 265, + 574, 0, 0, 0, 0, 0, 0, 0, 0, 230, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 276, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 298, 0, 399, 258, + 0, 450, 0, 0, 0, 618, 0, 0, 0, 0, + 0, 0, 0, 363, 0, 330, 199, 226, 0, 0, + 409, 458, 470, 0, 0, 0, 254, 0, 468, 423, + 596, 234, 285, 455, 429, 466, 437, 288, 0, 0, + 467, 370, 579, 447, 593, 619, 620, 264, 403, 605, + 516, 613, 637, 227, 261, 417, 501, 599, 490, 395, + 575, 576, 329, 489, 296, 203, 367, 625, 225, 476, + 369, 243, 232, 581, 602, 290, 453, 632, 214, 511, + 591, 240, 480, 0, 0, 640, 248, 500, 216, 588, + 499, 391, 326, 327, 215, 0, 454, 269, 294, 0, + 0, 259, 412, 583, 584, 257, 641, 229, 612, 221, + 0, 611, 405, 578, 589, 392, 381, 220, 587, 390, + 380, 334, 353, 354, 281, 307, 444, 373, 445, 306, + 308, 401, 400, 402, 208, 600, 0, 209, 0, 495, + 601, 642, 449, 213, 235, 236, 238, 0, 280, 284, + 292, 295, 303, 304, 313, 365, 416, 443, 439, 448, + 0, 573, 594, 606, 617, 623, 624, 626, 627, 628, + 629, 630, 633, 631, 404, 311, 491, 333, 371, 0, + 0, 422, 469, 241, 598, 492, 201, 0, 0, 0, + 0, 255, 256, 0, 569, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 643, 644, 645, 646, 647, 648, + 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, + 659, 660, 638, 502, 508, 503, 504, 505, 506, 507, + 0, 509, 0, 0, 0, 0, 0, 0, 585, 586, + 661, 382, 482, 595, 335, 347, 350, 340, 359, 0, + 360, 336, 337, 342, 344, 345, 346, 351, 352, 356, + 362, 250, 211, 388, 396, 572, 312, 217, 218, 219, + 518, 519, 520, 521, 609, 610, 614, 206, 459, 460, + 461, 462, 293, 604, 309, 465, 464, 331, 332, 377, + 446, 534, 536, 547, 551, 553, 555, 561, 564, 535, + 537, 548, 552, 554, 556, 562, 565, 524, 526, 528, + 530, 543, 542, 539, 567, 568, 545, 550, 529, 541, + 546, 559, 566, 563, 523, 527, 531, 540, 558, 557, + 538, 549, 560, 544, 532, 525, 533, 0, 198, 222, + 366, 95, 451, 289, 639, 608, 603, 207, 224, 0, + 263, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 200, 202, 210, 223, 233, 237, 244, + 262, 277, 279, 286, 299, 310, 318, 319, 322, 328, + 378, 384, 385, 386, 387, 406, 407, 408, 411, 414, + 415, 418, 420, 421, 424, 428, 432, 433, 434, 436, + 438, 440, 452, 457, 471, 472, 473, 474, 475, 478, + 479, 484, 485, 486, 487, 488, 496, 497, 510, 580, + 582, 597, 615, 621, 477, 301, 302, 441, 442, 314, + 315, 635, 636, 300, 592, 622, 590, 634, 616, 435, + 376, 0, 0, 379, 282, 305, 320, 0, 607, 498, + 228, 463, 291, 252, 0, 0, 212, 247, 231, 260, + 275, 278, 324, 389, 397, 426, 431, 297, 272, 245, + 456, 242, 481, 513, 514, 515, 517, 393, 267, 430, + 394, 0, 374, 570, 571, 316, 522, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 413, 0, + 0, 0, 0, 0, 0, 0, 0, 271, 0, 0, + 0, 0, 364, 268, 0, 0, 427, 0, 205, 0, + 483, 253, 375, 372, 577, 283, 274, 270, 251, 317, + 383, 425, 512, 419, 0, 368, 0, 0, 493, 398, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 323, 249, 325, 204, 410, + 494, 287, 0, 96, 0, 0, 0, 196, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 239, 0, 0, + 246, 0, 0, 0, 349, 358, 357, 338, 339, 341, + 343, 348, 355, 361, 0, 0, 0, 0, 0, 266, + 321, 273, 265, 574, 0, 0, 0, 0, 0, 0, + 0, 0, 230, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 276, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 298, + 0, 399, 258, 0, 450, 0, 0, 0, 618, 0, + 0, 0, 0, 0, 0, 0, 363, 0, 330, 199, + 226, 0, 0, 409, 458, 470, 0, 0, 0, 254, + 0, 468, 423, 596, 234, 285, 455, 429, 466, 437, + 288, 0, 0, 467, 370, 579, 447, 593, 619, 620, + 264, 403, 605, 516, 613, 637, 227, 261, 417, 501, + 599, 490, 395, 575, 576, 329, 489, 296, 203, 367, + 625, 225, 476, 369, 243, 232, 581, 602, 290, 453, + 632, 214, 511, 591, 240, 480, 0, 0, 640, 248, + 500, 216, 588, 499, 391, 326, 327, 215, 0, 454, + 269, 294, 0, 0, 259, 412, 583, 584, 257, 641, + 229, 612, 221, 0, 611, 405, 578, 589, 392, 381, + 220, 587, 390, 380, 334, 353, 354, 281, 307, 444, + 373, 445, 306, 308, 401, 400, 402, 208, 600, 0, + 209, 0, 495, 601, 642, 449, 213, 235, 236, 238, + 0, 280, 284, 292, 295, 303, 304, 313, 365, 416, + 443, 439, 448, 0, 573, 594, 606, 617, 623, 624, + 626, 627, 628, 629, 630, 633, 631, 404, 311, 491, + 333, 371, 0, 0, 422, 469, 241, 598, 492, 201, + 0, 0, 0, 0, 255, 256, 0, 569, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 643, 644, 645, + 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, + 656, 657, 658, 659, 660, 638, 502, 508, 503, 504, + 505, 506, 507, 0, 509, 0, 0, 0, 0, 0, + 0, 585, 586, 661, 382, 482, 595, 335, 347, 350, + 340, 359, 0, 360, 336, 337, 342, 344, 345, 346, + 351, 352, 356, 362, 250, 211, 388, 396, 572, 312, + 217, 218, 219, 518, 519, 520, 521, 609, 610, 614, + 206, 459, 460, 461, 462, 293, 604, 309, 465, 464, + 331, 332, 377, 446, 534, 536, 547, 551, 553, 555, + 561, 564, 535, 537, 548, 552, 554, 556, 562, 565, + 524, 526, 528, 530, 543, 542, 539, 567, 568, 545, + 550, 529, 541, 546, 559, 566, 563, 523, 527, 531, + 540, 558, 557, 538, 549, 560, 544, 532, 525, 533, + 0, 198, 222, 366, 0, 451, 289, 639, 608, 603, + 207, 224, 0, 263, 0, 0, 0, 0, 0, 0, + 2374, 0, 0, 2373, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 200, 202, 210, 223, + 233, 237, 244, 262, 277, 279, 286, 299, 310, 318, + 319, 322, 328, 378, 384, 385, 386, 387, 406, 407, + 408, 411, 414, 415, 418, 420, 421, 424, 428, 432, + 433, 434, 436, 438, 440, 452, 457, 471, 472, 473, + 474, 475, 478, 479, 484, 485, 486, 487, 488, 496, + 497, 510, 580, 582, 597, 615, 621, 477, 301, 302, + 441, 442, 314, 315, 635, 636, 300, 592, 622, 590, + 634, 616, 435, 376, 0, 0, 379, 282, 305, 320, + 0, 607, 498, 228, 463, 291, 252, 0, 0, 212, + 247, 231, 260, 275, 278, 324, 389, 397, 426, 431, + 297, 272, 245, 456, 242, 481, 513, 514, 515, 517, + 393, 267, 430, 394, 0, 374, 570, 571, 316, 522, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 413, 0, 0, 0, 2323, 0, 0, 0, 0, + 271, 0, 0, 0, 0, 364, 268, 0, 0, 427, + 0, 205, 0, 483, 253, 375, 372, 577, 283, 274, + 270, 251, 317, 383, 425, 512, 419, 0, 368, 0, + 0, 493, 398, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 323, 249, + 325, 204, 410, 494, 287, 0, 0, 0, 0, 1922, + 196, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 239, 0, 0, 246, 0, 0, 0, 349, 358, 357, + 338, 339, 341, 343, 348, 355, 361, 0, 0, 0, + 0, 0, 266, 321, 273, 265, 574, 0, 0, 0, + 0, 0, 0, 0, 0, 230, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 276, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 298, 0, 399, 258, 0, 450, 0, 0, + 0, 618, 0, 0, 0, 0, 0, 0, 0, 363, + 0, 330, 199, 226, 0, 0, 409, 458, 470, 0, + 0, 0, 254, 0, 468, 423, 596, 234, 285, 455, + 429, 466, 437, 288, 0, 2321, 467, 370, 579, 447, + 593, 619, 620, 264, 403, 605, 516, 613, 637, 227, + 261, 417, 501, 599, 490, 395, 575, 576, 329, 489, + 296, 203, 367, 625, 225, 476, 369, 243, 232, 581, + 602, 290, 453, 632, 214, 511, 591, 240, 480, 0, + 0, 640, 248, 500, 216, 588, 499, 391, 326, 327, + 215, 0, 454, 269, 294, 0, 0, 259, 412, 583, + 584, 257, 641, 229, 612, 221, 0, 611, 405, 578, + 589, 392, 381, 220, 587, 390, 380, 334, 353, 354, + 281, 307, 444, 373, 445, 306, 308, 401, 400, 402, + 208, 600, 0, 209, 0, 495, 601, 642, 449, 213, + 235, 236, 238, 0, 280, 284, 292, 295, 303, 304, + 313, 365, 416, 443, 439, 448, 0, 573, 594, 606, + 617, 623, 624, 626, 627, 628, 629, 630, 633, 631, + 404, 311, 491, 333, 371, 0, 0, 422, 469, 241, + 598, 492, 201, 0, 0, 0, 0, 255, 256, 0, + 569, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, + 653, 654, 655, 656, 657, 658, 659, 660, 638, 502, + 508, 503, 504, 505, 506, 507, 0, 509, 0, 0, + 0, 0, 0, 0, 585, 586, 661, 382, 482, 595, + 335, 347, 350, 340, 359, 0, 360, 336, 337, 342, + 344, 345, 346, 351, 352, 356, 362, 250, 211, 388, + 396, 572, 312, 217, 218, 219, 518, 519, 520, 521, + 609, 610, 614, 206, 459, 460, 461, 462, 293, 604, + 309, 465, 464, 331, 332, 377, 446, 534, 536, 547, + 551, 553, 555, 561, 564, 535, 537, 548, 552, 554, + 556, 562, 565, 524, 526, 528, 530, 543, 542, 539, + 567, 568, 545, 550, 529, 541, 546, 559, 566, 563, + 523, 527, 531, 540, 558, 557, 538, 549, 560, 544, + 532, 525, 533, 0, 198, 222, 366, 0, 451, 289, + 639, 608, 603, 207, 224, 0, 263, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 200, + 202, 210, 223, 233, 237, 244, 262, 277, 279, 286, + 299, 310, 318, 319, 322, 328, 378, 384, 385, 386, + 387, 406, 407, 408, 411, 414, 415, 418, 420, 421, + 424, 428, 432, 433, 434, 436, 438, 440, 452, 457, + 471, 472, 473, 474, 475, 478, 479, 484, 485, 486, + 487, 488, 496, 497, 510, 580, 582, 597, 615, 621, + 477, 301, 302, 441, 442, 314, 315, 635, 636, 300, + 592, 622, 590, 634, 616, 435, 376, 0, 0, 379, + 282, 305, 320, 0, 607, 498, 228, 463, 291, 252, + 0, 0, 212, 247, 231, 260, 275, 278, 324, 389, + 397, 426, 431, 297, 272, 245, 456, 242, 481, 513, + 514, 515, 517, 393, 267, 430, 394, 0, 374, 570, + 571, 316, 522, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 413, 0, 0, 0, 0, 0, + 0, 0, 0, 271, 0, 0, 0, 0, 364, 268, + 0, 0, 427, 0, 205, 0, 483, 253, 375, 372, + 577, 283, 274, 270, 251, 317, 383, 425, 512, 419, + 0, 368, 0, 0, 493, 398, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 323, 249, 325, 204, 410, 494, 287, 0, 0, + 0, 0, 0, 711, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 239, 0, 0, 246, 0, 0, 0, + 349, 358, 357, 338, 339, 341, 343, 348, 355, 361, + 0, 0, 0, 0, 0, 266, 321, 273, 265, 574, + 0, 0, 0, 0, 0, 0, 0, 0, 230, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 276, 0, 0, 0, 0, 0, 0, + 0, 0, 1076, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 298, 0, 399, 258, 0, + 450, 0, 0, 0, 618, 0, 0, 0, 0, 0, + 0, 0, 363, 1082, 330, 199, 226, 1080, 0, 409, + 458, 470, 0, 0, 0, 254, 0, 468, 423, 596, + 234, 285, 455, 429, 466, 437, 288, 0, 0, 467, + 370, 579, 447, 593, 619, 620, 264, 403, 605, 516, + 613, 637, 227, 261, 417, 501, 599, 490, 395, 575, + 576, 329, 489, 296, 203, 367, 625, 225, 476, 369, + 243, 232, 581, 602, 290, 453, 632, 214, 511, 591, + 240, 480, 0, 0, 640, 248, 500, 216, 588, 499, + 391, 326, 327, 215, 0, 454, 269, 294, 0, 0, + 259, 412, 583, 584, 257, 641, 229, 612, 221, 0, + 611, 405, 578, 589, 392, 381, 220, 587, 390, 380, + 334, 353, 354, 281, 307, 444, 373, 445, 306, 308, + 401, 400, 402, 208, 600, 0, 209, 0, 495, 601, + 642, 449, 213, 235, 236, 238, 0, 280, 284, 292, + 295, 303, 304, 313, 365, 416, 443, 439, 448, 0, + 573, 594, 606, 617, 623, 624, 626, 627, 628, 629, + 630, 633, 631, 404, 311, 491, 333, 371, 0, 0, + 422, 469, 241, 598, 492, 201, 0, 0, 0, 0, + 255, 256, 0, 569, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 643, 644, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, + 660, 638, 502, 508, 503, 504, 505, 506, 507, 0, + 509, 0, 0, 0, 0, 0, 0, 585, 586, 661, + 382, 482, 595, 335, 347, 350, 340, 359, 0, 360, + 336, 337, 342, 344, 345, 346, 351, 352, 356, 362, + 250, 211, 388, 396, 572, 312, 217, 218, 219, 518, + 519, 520, 521, 609, 610, 614, 206, 459, 460, 461, + 462, 293, 604, 309, 465, 464, 331, 332, 377, 446, + 534, 536, 547, 551, 553, 555, 561, 564, 535, 537, + 548, 552, 554, 556, 562, 565, 524, 526, 528, 530, + 543, 542, 539, 567, 568, 545, 550, 529, 541, 546, + 559, 566, 563, 523, 527, 531, 540, 558, 557, 538, + 549, 560, 544, 532, 525, 533, 0, 198, 222, 366, + 0, 451, 289, 639, 608, 603, 207, 224, 0, 263, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 200, 202, 210, 223, 233, 237, 244, 262, + 277, 279, 286, 299, 310, 318, 319, 322, 328, 378, + 384, 385, 386, 387, 406, 407, 408, 411, 414, 415, + 418, 420, 421, 424, 428, 432, 433, 434, 436, 438, + 440, 452, 457, 471, 472, 473, 474, 475, 478, 479, + 484, 485, 486, 487, 488, 496, 497, 510, 580, 582, + 597, 615, 621, 477, 301, 302, 441, 442, 314, 315, + 635, 636, 300, 592, 622, 590, 634, 616, 435, 376, + 0, 0, 379, 282, 305, 320, 0, 607, 498, 228, + 463, 291, 252, 0, 0, 212, 247, 231, 260, 275, + 278, 324, 389, 397, 426, 431, 297, 272, 245, 456, + 242, 481, 513, 514, 515, 517, 393, 267, 430, 394, + 0, 374, 570, 571, 316, 522, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 413, 0, 0, + 0, 2323, 0, 0, 0, 0, 271, 0, 0, 0, + 0, 364, 268, 0, 0, 427, 0, 205, 0, 483, + 253, 375, 372, 577, 283, 274, 270, 251, 317, 383, + 425, 512, 419, 0, 368, 0, 0, 493, 398, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 323, 249, 325, 204, 410, 494, + 287, 0, 0, 0, 0, 1922, 196, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 239, 0, 0, 246, + 0, 0, 0, 349, 358, 357, 338, 339, 341, 343, + 348, 355, 361, 0, 0, 0, 0, 0, 266, 321, + 273, 265, 574, 0, 0, 0, 0, 0, 0, 0, + 0, 230, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 276, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 298, 0, + 399, 258, 0, 450, 0, 0, 0, 618, 0, 0, + 0, 0, 0, 0, 0, 363, 0, 330, 199, 226, + 0, 0, 409, 458, 470, 0, 0, 0, 254, 0, + 468, 423, 596, 234, 285, 455, 429, 466, 437, 288, + 0, 0, 467, 370, 579, 447, 593, 619, 620, 264, + 403, 605, 516, 613, 637, 227, 261, 417, 501, 599, + 490, 395, 575, 576, 329, 489, 296, 203, 367, 625, + 225, 476, 369, 243, 232, 581, 602, 290, 453, 632, + 214, 511, 591, 240, 480, 0, 0, 640, 248, 500, + 216, 588, 499, 391, 326, 327, 215, 0, 454, 269, + 294, 0, 0, 259, 412, 583, 584, 257, 641, 229, + 612, 221, 0, 611, 405, 578, 589, 392, 381, 220, + 587, 390, 380, 334, 353, 354, 281, 307, 444, 373, + 445, 306, 308, 401, 400, 402, 208, 600, 0, 209, + 0, 495, 601, 642, 449, 213, 235, 236, 238, 0, + 280, 284, 292, 295, 303, 304, 313, 365, 416, 443, + 439, 448, 0, 573, 594, 606, 617, 623, 624, 626, + 627, 628, 629, 630, 633, 631, 404, 311, 491, 333, + 371, 0, 0, 422, 469, 241, 598, 492, 201, 0, + 0, 0, 0, 255, 256, 0, 569, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 643, 644, 645, 646, + 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, + 657, 658, 659, 660, 638, 502, 508, 503, 504, 505, + 506, 507, 0, 509, 0, 0, 0, 0, 0, 0, + 585, 586, 661, 382, 482, 595, 335, 347, 350, 340, + 359, 0, 360, 336, 337, 342, 344, 345, 346, 351, + 352, 356, 362, 250, 211, 388, 396, 572, 312, 217, + 218, 219, 518, 519, 520, 521, 609, 610, 614, 206, + 459, 460, 461, 462, 293, 604, 309, 465, 464, 331, + 332, 377, 446, 534, 536, 547, 551, 553, 555, 561, + 564, 535, 537, 548, 552, 554, 556, 562, 565, 524, + 526, 528, 530, 543, 542, 539, 567, 568, 545, 550, + 529, 541, 546, 559, 566, 563, 523, 527, 531, 540, + 558, 557, 538, 549, 560, 544, 532, 525, 533, 0, + 198, 222, 366, 0, 451, 289, 639, 608, 603, 207, + 224, 0, 263, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 200, 202, 210, 223, 233, + 237, 244, 262, 277, 279, 286, 299, 310, 318, 319, + 322, 328, 378, 384, 385, 386, 387, 406, 407, 408, + 411, 414, 415, 418, 420, 421, 424, 428, 432, 433, + 434, 436, 438, 440, 452, 457, 471, 472, 473, 474, + 475, 478, 479, 484, 485, 486, 487, 488, 496, 497, + 510, 580, 582, 597, 615, 621, 477, 301, 302, 441, + 442, 314, 315, 635, 636, 300, 592, 622, 590, 634, + 616, 435, 376, 0, 0, 379, 282, 305, 320, 0, + 607, 498, 228, 463, 291, 252, 0, 0, 212, 247, + 231, 260, 275, 278, 324, 389, 397, 426, 431, 297, + 272, 245, 456, 242, 481, 513, 514, 515, 517, 393, + 267, 430, 394, 0, 374, 570, 571, 316, 522, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 413, 0, 0, 0, 0, 0, 0, 0, 0, 271, + 0, 0, 0, 0, 364, 268, 0, 0, 427, 0, + 205, 0, 483, 253, 375, 372, 577, 283, 274, 270, + 251, 317, 383, 425, 512, 419, 0, 368, 0, 0, + 493, 398, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 323, 249, 325, + 204, 410, 494, 287, 0, 0, 0, 1719, 0, 711, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 239, + 0, 0, 246, 0, 0, 0, 349, 358, 357, 338, + 339, 341, 343, 348, 355, 361, 0, 0, 0, 0, + 0, 266, 321, 273, 265, 574, 0, 0, 0, 0, + 0, 0, 0, 0, 230, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 276, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 298, 0, 399, 258, 0, 450, 0, 0, 0, + 618, 0, 0, 0, 3909, 0, 0, 0, 363, 0, + 330, 199, 226, 0, 0, 409, 458, 470, 0, 0, + 0, 254, 0, 468, 423, 596, 234, 285, 455, 429, + 466, 437, 288, 0, 0, 467, 370, 579, 447, 593, + 619, 620, 264, 403, 605, 516, 613, 637, 227, 261, + 417, 501, 599, 490, 395, 575, 576, 329, 489, 296, + 203, 367, 625, 225, 476, 369, 243, 232, 581, 602, + 290, 453, 632, 214, 511, 591, 240, 480, 0, 0, + 640, 248, 500, 216, 588, 499, 391, 326, 327, 215, + 0, 454, 269, 294, 0, 0, 259, 412, 583, 584, + 257, 641, 229, 612, 221, 0, 611, 405, 578, 589, + 392, 381, 220, 587, 390, 380, 334, 353, 354, 281, + 307, 444, 373, 445, 306, 308, 401, 400, 402, 208, + 600, 0, 209, 0, 495, 601, 642, 449, 213, 235, + 236, 238, 0, 280, 284, 292, 295, 303, 304, 313, + 365, 416, 443, 439, 448, 0, 573, 594, 606, 617, + 623, 624, 626, 627, 628, 629, 630, 633, 631, 404, + 311, 491, 333, 371, 0, 0, 422, 469, 241, 598, + 492, 201, 0, 0, 0, 0, 255, 256, 0, 569, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 643, + 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, + 654, 655, 656, 657, 658, 659, 660, 638, 502, 508, + 503, 504, 505, 506, 507, 0, 509, 0, 0, 0, + 0, 0, 0, 585, 586, 661, 382, 482, 595, 335, + 347, 350, 340, 359, 0, 360, 336, 337, 342, 344, + 345, 346, 351, 352, 356, 362, 250, 211, 388, 396, + 572, 312, 217, 218, 219, 518, 519, 520, 521, 609, + 610, 614, 206, 459, 460, 461, 462, 293, 604, 309, + 465, 464, 331, 332, 377, 446, 534, 536, 547, 551, + 553, 555, 561, 564, 535, 537, 548, 552, 554, 556, + 562, 565, 524, 526, 528, 530, 543, 542, 539, 567, + 568, 545, 550, 529, 541, 546, 559, 566, 563, 523, + 527, 531, 540, 558, 557, 538, 549, 560, 544, 532, + 525, 533, 0, 198, 222, 366, 0, 451, 289, 639, + 608, 603, 207, 224, 0, 263, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 200, 202, + 210, 223, 233, 237, 244, 262, 277, 279, 286, 299, + 310, 318, 319, 322, 328, 378, 384, 385, 386, 387, + 406, 407, 408, 411, 414, 415, 418, 420, 421, 424, + 428, 432, 433, 434, 436, 438, 440, 452, 457, 471, + 472, 473, 474, 475, 478, 479, 484, 485, 486, 487, + 488, 496, 497, 510, 580, 582, 597, 615, 621, 477, + 301, 302, 441, 442, 314, 315, 635, 636, 300, 592, + 622, 590, 634, 616, 435, 376, 0, 0, 379, 282, + 305, 320, 0, 607, 498, 228, 463, 291, 252, 0, + 0, 212, 247, 231, 260, 275, 278, 324, 389, 397, + 426, 431, 297, 272, 245, 456, 242, 481, 513, 514, + 515, 517, 393, 267, 430, 394, 0, 374, 570, 571, + 316, 522, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 413, 0, 0, 0, 0, 0, 0, + 0, 0, 271, 0, 0, 0, 0, 364, 268, 0, + 0, 427, 0, 205, 0, 483, 253, 375, 372, 577, + 283, 274, 270, 251, 317, 383, 425, 512, 419, 0, + 368, 0, 0, 493, 398, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 323, 249, 325, 204, 410, 494, 287, 0, 0, 0, + 0, 2083, 711, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 239, 0, 0, 246, 0, 0, 0, 349, + 358, 357, 338, 339, 341, 343, 348, 355, 361, 0, + 0, 0, 0, 0, 266, 321, 273, 265, 574, 0, + 0, 0, 0, 0, 0, 0, 0, 230, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 276, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 2084, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 298, 0, 399, 258, 0, 450, + 0, 0, 0, 618, 0, 0, 0, 0, 0, 0, + 0, 363, 0, 330, 199, 226, 0, 0, 409, 458, + 470, 0, 0, 0, 254, 0, 468, 423, 596, 234, + 285, 455, 429, 466, 437, 288, 0, 0, 467, 370, + 579, 447, 593, 619, 620, 264, 403, 605, 516, 613, + 637, 227, 261, 417, 501, 599, 490, 395, 575, 576, + 329, 489, 296, 203, 367, 625, 225, 476, 369, 243, + 232, 581, 602, 290, 453, 632, 214, 511, 591, 240, + 480, 0, 0, 640, 248, 500, 216, 588, 499, 391, + 326, 327, 215, 0, 454, 269, 294, 0, 0, 259, + 412, 583, 584, 257, 641, 229, 612, 221, 0, 611, + 405, 578, 589, 392, 381, 220, 587, 390, 380, 334, + 353, 354, 281, 307, 444, 373, 445, 306, 308, 401, + 400, 402, 208, 600, 0, 209, 0, 495, 601, 642, + 449, 213, 235, 236, 238, 0, 280, 284, 292, 295, + 303, 304, 313, 365, 416, 443, 439, 448, 0, 573, + 594, 606, 617, 623, 624, 626, 627, 628, 629, 630, + 633, 631, 404, 311, 491, 333, 371, 0, 0, 422, + 469, 241, 598, 492, 201, 0, 0, 0, 0, 255, + 256, 0, 569, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 643, 644, 645, 646, 647, 648, 649, 650, + 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, + 638, 502, 508, 503, 504, 505, 506, 507, 0, 509, + 0, 0, 0, 0, 0, 0, 585, 586, 661, 382, + 482, 595, 335, 347, 350, 340, 359, 0, 360, 336, + 337, 342, 344, 345, 346, 351, 352, 356, 362, 250, + 211, 388, 396, 572, 312, 217, 218, 219, 518, 519, + 520, 521, 609, 610, 614, 206, 459, 460, 461, 462, + 293, 604, 309, 465, 464, 331, 332, 377, 446, 534, + 536, 547, 551, 553, 555, 561, 564, 535, 537, 548, + 552, 554, 556, 562, 565, 524, 526, 528, 530, 543, + 542, 539, 567, 568, 545, 550, 529, 541, 546, 559, + 566, 563, 523, 527, 531, 540, 558, 557, 538, 549, + 560, 544, 532, 525, 533, 0, 198, 222, 366, 0, + 451, 289, 639, 608, 603, 207, 224, 0, 263, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 200, 202, 210, 223, 233, 237, 244, 262, 277, + 279, 286, 299, 310, 318, 319, 322, 328, 378, 384, + 385, 386, 387, 406, 407, 408, 411, 414, 415, 418, + 420, 421, 424, 428, 432, 433, 434, 436, 438, 440, + 452, 457, 471, 472, 473, 474, 475, 478, 479, 484, + 485, 486, 487, 488, 496, 497, 510, 580, 582, 597, + 615, 621, 477, 301, 302, 441, 442, 314, 315, 635, + 636, 300, 592, 622, 590, 634, 616, 435, 376, 0, + 0, 379, 282, 305, 320, 0, 607, 498, 228, 463, + 291, 252, 0, 0, 212, 247, 231, 260, 275, 278, + 324, 389, 397, 426, 431, 297, 272, 245, 456, 242, + 481, 513, 514, 515, 517, 393, 267, 430, 394, 0, + 374, 570, 571, 316, 522, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 413, 0, 0, 0, + 0, 0, 0, 0, 0, 271, 0, 0, 0, 0, + 364, 268, 0, 0, 427, 0, 205, 0, 483, 253, + 375, 372, 577, 283, 274, 270, 251, 317, 383, 425, + 512, 419, 0, 368, 0, 0, 493, 398, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 323, 249, 325, 204, 410, 494, 287, + 0, 0, 0, 0, 2818, 711, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 239, 0, 0, 246, 0, + 0, 0, 349, 358, 357, 338, 339, 341, 343, 348, + 355, 361, 0, 0, 0, 0, 0, 266, 321, 273, + 265, 574, 0, 0, 0, 0, 0, 0, 0, 0, + 230, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 276, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 2819, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 298, 0, 399, + 258, 0, 450, 0, 0, 0, 618, 0, 0, 0, + 0, 0, 0, 0, 363, 0, 330, 199, 226, 0, + 0, 409, 458, 470, 0, 0, 0, 254, 0, 468, + 423, 596, 234, 285, 455, 429, 466, 437, 288, 0, + 0, 467, 370, 579, 447, 593, 619, 620, 264, 403, + 605, 516, 613, 637, 227, 261, 417, 501, 599, 490, + 395, 575, 576, 329, 489, 296, 203, 367, 625, 225, + 476, 369, 243, 232, 581, 602, 290, 453, 632, 214, + 511, 591, 240, 480, 0, 0, 640, 248, 500, 216, + 588, 499, 391, 326, 327, 215, 0, 454, 269, 294, + 0, 0, 259, 412, 583, 584, 257, 641, 229, 612, + 221, 0, 611, 405, 578, 589, 392, 381, 220, 587, + 390, 380, 334, 353, 354, 281, 307, 444, 373, 445, + 306, 308, 401, 400, 402, 208, 600, 0, 209, 0, + 495, 601, 642, 449, 213, 235, 236, 238, 0, 280, + 284, 292, 295, 303, 304, 313, 365, 416, 443, 439, + 448, 0, 573, 594, 606, 617, 623, 624, 626, 627, + 628, 629, 630, 633, 631, 404, 311, 491, 333, 371, + 0, 0, 422, 469, 241, 598, 492, 201, 0, 0, + 0, 0, 255, 256, 0, 569, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 643, 644, 645, 646, 647, + 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, + 658, 659, 660, 638, 502, 508, 503, 504, 505, 506, + 507, 0, 509, 0, 0, 0, 0, 0, 0, 585, + 586, 661, 382, 482, 595, 335, 347, 350, 340, 359, + 0, 360, 336, 337, 342, 344, 345, 346, 351, 352, + 356, 362, 250, 211, 388, 396, 572, 312, 217, 218, + 219, 518, 519, 520, 521, 609, 610, 614, 206, 459, + 460, 461, 462, 293, 604, 309, 465, 464, 331, 332, + 377, 446, 534, 536, 547, 551, 553, 555, 561, 564, + 535, 537, 548, 552, 554, 556, 562, 565, 524, 526, + 528, 530, 543, 542, 539, 567, 568, 545, 550, 529, + 541, 546, 559, 566, 563, 523, 527, 531, 540, 558, + 557, 538, 549, 560, 544, 532, 525, 533, 0, 198, + 222, 366, 0, 451, 289, 639, 608, 603, 207, 224, + 0, 263, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 200, 202, 210, 223, 233, 237, + 244, 262, 277, 279, 286, 299, 310, 318, 319, 322, + 328, 378, 384, 385, 386, 387, 406, 407, 408, 411, + 414, 415, 418, 420, 421, 424, 428, 432, 433, 434, + 436, 438, 440, 452, 457, 471, 472, 473, 474, 475, + 478, 479, 484, 485, 486, 487, 488, 496, 497, 510, + 580, 582, 597, 615, 621, 477, 301, 302, 441, 442, + 314, 315, 635, 636, 300, 592, 622, 590, 634, 616, + 435, 376, 0, 0, 379, 282, 305, 320, 0, 607, + 498, 228, 463, 291, 252, 0, 0, 212, 247, 231, + 260, 275, 278, 324, 389, 397, 426, 431, 297, 272, + 245, 456, 242, 481, 513, 514, 515, 517, 393, 267, + 430, 394, 0, 374, 570, 571, 316, 522, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 413, + 0, 0, 0, 0, 0, 0, 0, 0, 271, 0, + 0, 0, 0, 364, 268, 0, 0, 427, 0, 205, + 0, 483, 253, 375, 372, 577, 283, 274, 270, 251, + 317, 383, 425, 512, 419, 0, 368, 0, 0, 493, + 398, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 323, 249, 325, 204, + 410, 494, 287, 0, 0, 0, 0, 0, 711, 0, + 0, 0, 0, 2803, 0, 0, 0, 0, 239, 0, + 0, 246, 2804, 0, 0, 349, 358, 357, 338, 339, + 341, 343, 348, 355, 361, 0, 0, 0, 0, 0, + 266, 321, 273, 265, 574, 0, 0, 0, 0, 0, + 0, 0, 0, 230, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 276, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 298, 0, 399, 258, 0, 450, 0, 0, 0, 618, + 0, 0, 0, 0, 0, 0, 0, 363, 0, 330, + 199, 226, 0, 0, 409, 458, 470, 0, 0, 0, + 254, 0, 468, 423, 596, 234, 285, 455, 429, 466, + 437, 288, 0, 0, 467, 370, 579, 447, 593, 619, + 620, 264, 403, 605, 516, 613, 637, 227, 261, 417, + 501, 599, 490, 395, 575, 576, 329, 489, 296, 203, + 367, 625, 225, 476, 369, 243, 232, 581, 602, 290, + 453, 632, 214, 511, 591, 240, 480, 0, 0, 640, + 248, 500, 216, 588, 499, 391, 326, 327, 215, 0, + 454, 269, 294, 0, 0, 259, 412, 583, 584, 257, + 641, 229, 612, 221, 0, 611, 405, 578, 589, 392, + 381, 220, 587, 390, 380, 334, 353, 354, 281, 307, + 444, 373, 445, 306, 308, 401, 400, 402, 208, 600, + 0, 209, 0, 495, 601, 642, 449, 213, 235, 236, + 238, 0, 280, 284, 292, 295, 303, 304, 313, 365, + 416, 443, 439, 448, 0, 573, 594, 606, 617, 623, + 624, 626, 627, 628, 629, 630, 633, 631, 404, 311, + 491, 333, 371, 0, 0, 422, 469, 241, 598, 492, + 201, 0, 0, 0, 0, 255, 256, 0, 569, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 643, 644, + 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, + 655, 656, 657, 658, 659, 660, 638, 502, 508, 503, + 504, 505, 506, 507, 0, 509, 0, 0, 0, 0, + 0, 0, 585, 586, 661, 382, 482, 595, 335, 347, + 350, 340, 359, 0, 360, 336, 337, 342, 344, 345, + 346, 351, 352, 356, 362, 250, 211, 388, 396, 572, + 312, 217, 218, 219, 518, 519, 520, 521, 609, 610, + 614, 206, 459, 460, 461, 462, 293, 604, 309, 465, + 464, 331, 332, 377, 446, 534, 536, 547, 551, 553, + 555, 561, 564, 535, 537, 548, 552, 554, 556, 562, + 565, 524, 526, 528, 530, 543, 542, 539, 567, 568, + 545, 550, 529, 541, 546, 559, 566, 563, 523, 527, + 531, 540, 558, 557, 538, 549, 560, 544, 532, 525, + 533, 0, 198, 222, 366, 0, 451, 289, 639, 608, + 603, 207, 224, 0, 263, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 200, 202, 210, + 223, 233, 237, 244, 262, 277, 279, 286, 299, 310, + 318, 319, 322, 328, 378, 384, 385, 386, 387, 406, + 407, 408, 411, 414, 415, 418, 420, 421, 424, 428, + 432, 433, 434, 436, 438, 440, 452, 457, 471, 472, + 473, 474, 475, 478, 479, 484, 485, 486, 487, 488, + 496, 497, 510, 580, 582, 597, 615, 621, 477, 301, + 302, 441, 442, 314, 315, 635, 636, 300, 592, 622, + 590, 634, 616, 435, 376, 0, 0, 379, 282, 305, + 320, 0, 607, 498, 228, 463, 291, 252, 0, 0, + 212, 247, 231, 260, 275, 278, 324, 389, 397, 426, + 431, 297, 272, 245, 456, 242, 481, 513, 514, 515, + 517, 393, 267, 430, 394, 0, 374, 570, 571, 316, + 522, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 413, 0, 0, 0, 0, 0, 0, 0, + 0, 271, 1762, 0, 0, 0, 364, 268, 0, 0, + 427, 0, 205, 0, 483, 253, 375, 372, 577, 283, + 274, 270, 251, 317, 383, 425, 512, 419, 0, 368, + 0, 0, 493, 398, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 323, + 249, 325, 204, 410, 494, 287, 0, 0, 0, 0, + 1761, 711, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 239, 0, 0, 246, 0, 0, 0, 349, 358, + 357, 338, 339, 341, 343, 348, 355, 361, 0, 0, + 0, 0, 0, 266, 321, 273, 265, 574, 0, 0, + 0, 0, 0, 0, 0, 0, 230, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 276, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 298, 0, 399, 258, 0, 450, 0, + 0, 0, 618, 0, 0, 0, 0, 0, 0, 0, + 363, 0, 330, 199, 226, 0, 0, 409, 458, 470, + 0, 0, 0, 254, 0, 468, 423, 596, 234, 285, + 455, 429, 466, 437, 288, 0, 0, 467, 370, 579, + 447, 593, 619, 620, 264, 403, 605, 516, 613, 637, + 227, 261, 417, 501, 599, 490, 395, 575, 576, 329, + 489, 296, 203, 367, 625, 225, 476, 369, 243, 232, + 581, 602, 290, 453, 632, 214, 511, 591, 240, 480, + 0, 0, 640, 248, 500, 216, 588, 499, 391, 326, + 327, 215, 0, 454, 269, 294, 0, 0, 259, 412, + 583, 584, 257, 641, 229, 612, 221, 0, 611, 405, + 578, 589, 392, 381, 220, 587, 390, 380, 334, 353, + 354, 281, 307, 444, 373, 445, 306, 308, 401, 400, + 402, 208, 600, 0, 209, 0, 495, 601, 642, 449, + 213, 235, 236, 238, 0, 280, 284, 292, 295, 303, + 304, 313, 365, 416, 443, 439, 448, 0, 573, 594, + 606, 617, 623, 624, 626, 627, 628, 629, 630, 633, + 631, 404, 311, 491, 333, 371, 0, 0, 422, 469, + 241, 598, 492, 201, 0, 0, 0, 0, 255, 256, + 0, 569, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 643, 644, 645, 646, 647, 648, 649, 650, 651, + 652, 653, 654, 655, 656, 657, 658, 659, 660, 638, + 502, 508, 503, 504, 505, 506, 507, 0, 509, 0, + 0, 0, 0, 0, 0, 585, 586, 661, 382, 482, + 595, 335, 347, 350, 340, 359, 0, 360, 336, 337, + 342, 344, 345, 346, 351, 352, 356, 362, 250, 211, + 388, 396, 572, 312, 217, 218, 219, 518, 519, 520, + 521, 609, 610, 614, 206, 459, 460, 461, 462, 293, + 604, 309, 465, 464, 331, 332, 377, 446, 534, 536, + 547, 551, 553, 555, 561, 564, 535, 537, 548, 552, + 554, 556, 562, 565, 524, 526, 528, 530, 543, 542, + 539, 567, 568, 545, 550, 529, 541, 546, 559, 566, + 563, 523, 527, 531, 540, 558, 557, 538, 549, 560, + 544, 532, 525, 533, 0, 198, 222, 366, 0, 451, + 289, 639, 608, 603, 207, 224, 0, 263, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 200, 202, 210, 223, 233, 237, 244, 262, 277, 279, + 286, 299, 310, 318, 319, 322, 328, 378, 384, 385, + 386, 387, 406, 407, 408, 411, 414, 415, 418, 420, + 421, 424, 428, 432, 433, 434, 436, 438, 440, 452, + 457, 471, 472, 473, 474, 475, 478, 479, 484, 485, + 486, 487, 488, 496, 497, 510, 580, 582, 597, 615, + 621, 477, 301, 302, 441, 442, 314, 315, 635, 636, + 300, 592, 622, 590, 634, 616, 435, 376, 0, 0, + 379, 282, 305, 320, 0, 607, 498, 228, 463, 291, + 252, 0, 0, 212, 247, 231, 260, 275, 278, 324, + 389, 397, 426, 431, 297, 272, 245, 456, 242, 481, + 513, 514, 515, 517, 393, 267, 430, 394, 0, 374, + 570, 571, 316, 522, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 413, 0, 0, 0, 0, + 0, 0, 0, 0, 271, 0, 0, 0, 0, 364, + 268, 0, 0, 427, 0, 205, 0, 483, 253, 375, + 372, 577, 283, 274, 270, 251, 317, 383, 425, 512, + 419, 0, 368, 0, 0, 493, 398, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 323, 249, 325, 204, 410, 494, 287, 0, + 0, 0, 0, 0, 713, 714, 715, 0, 0, 0, + 0, 0, 0, 0, 239, 0, 0, 246, 0, 0, + 0, 349, 358, 357, 338, 339, 341, 343, 348, 355, + 361, 0, 0, 0, 0, 0, 266, 321, 273, 265, + 574, 0, 0, 0, 0, 0, 0, 0, 0, 230, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 276, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 298, 0, 399, 258, + 0, 450, 0, 0, 0, 618, 0, 0, 0, 0, + 0, 0, 0, 363, 0, 330, 199, 226, 0, 0, + 409, 458, 470, 0, 0, 0, 254, 0, 468, 423, + 596, 234, 285, 455, 429, 466, 437, 288, 0, 0, + 467, 370, 579, 447, 593, 619, 620, 264, 403, 605, + 516, 613, 637, 227, 261, 417, 501, 599, 490, 395, + 575, 576, 329, 489, 296, 203, 367, 625, 225, 476, + 369, 243, 232, 581, 602, 290, 453, 632, 214, 511, + 591, 240, 480, 0, 0, 640, 248, 500, 216, 588, + 499, 391, 326, 327, 215, 0, 454, 269, 294, 0, + 0, 259, 412, 583, 584, 257, 641, 229, 612, 221, + 0, 611, 405, 578, 589, 392, 381, 220, 587, 390, + 380, 334, 353, 354, 281, 307, 444, 373, 445, 306, + 308, 401, 400, 402, 208, 600, 0, 209, 0, 495, + 601, 642, 449, 213, 235, 236, 238, 0, 280, 284, + 292, 295, 303, 304, 313, 365, 416, 443, 439, 448, + 0, 573, 594, 606, 617, 623, 624, 626, 627, 628, + 629, 630, 633, 631, 404, 311, 491, 333, 371, 0, + 0, 422, 469, 241, 598, 492, 201, 0, 0, 0, + 0, 255, 256, 0, 569, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 643, 644, 645, 646, 647, 648, + 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, + 659, 660, 638, 502, 508, 503, 504, 505, 506, 507, + 0, 509, 0, 0, 0, 0, 0, 0, 585, 586, + 661, 382, 482, 595, 335, 347, 350, 340, 359, 0, + 360, 336, 337, 342, 344, 345, 346, 351, 352, 356, + 362, 250, 211, 388, 396, 572, 312, 217, 218, 219, + 518, 519, 520, 521, 609, 610, 614, 206, 459, 460, + 461, 462, 293, 604, 309, 465, 464, 331, 332, 377, + 446, 534, 536, 547, 551, 553, 555, 561, 564, 535, + 537, 548, 552, 554, 556, 562, 565, 524, 526, 528, + 530, 543, 542, 539, 567, 568, 545, 550, 529, 541, + 546, 559, 566, 563, 523, 527, 531, 540, 558, 557, + 538, 549, 560, 544, 532, 525, 533, 0, 198, 222, + 366, 0, 451, 289, 639, 608, 603, 207, 224, 0, + 263, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 200, 202, 210, 223, 233, 237, 244, + 262, 277, 279, 286, 299, 310, 318, 319, 322, 328, + 378, 384, 385, 386, 387, 406, 407, 408, 411, 414, + 415, 418, 420, 421, 424, 428, 432, 433, 434, 436, + 438, 440, 452, 457, 471, 472, 473, 474, 475, 478, + 479, 484, 485, 486, 487, 488, 496, 497, 510, 580, + 582, 597, 615, 621, 477, 301, 302, 441, 442, 314, + 315, 635, 636, 300, 592, 622, 590, 634, 616, 435, + 376, 0, 0, 379, 282, 305, 320, 0, 607, 498, + 228, 463, 291, 252, 0, 0, 212, 247, 231, 260, + 275, 278, 324, 389, 397, 426, 431, 297, 272, 245, + 456, 242, 481, 513, 514, 515, 517, 393, 267, 430, + 394, 0, 374, 570, 571, 316, 522, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 413, 0, + 0, 0, 0, 0, 0, 0, 0, 271, 0, 0, + 0, 0, 364, 268, 0, 0, 427, 0, 205, 0, + 483, 253, 375, 372, 577, 283, 274, 270, 251, 317, + 383, 425, 512, 419, 0, 368, 0, 0, 493, 398, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 323, 249, 325, 204, 410, + 494, 287, 0, 0, 0, 0, 0, 711, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 239, 0, 0, + 246, 0, 0, 0, 349, 358, 357, 338, 339, 341, + 343, 348, 355, 361, 0, 0, 0, 0, 0, 266, + 321, 273, 265, 574, 0, 0, 0, 0, 0, 0, + 0, 0, 230, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 276, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 298, + 0, 399, 258, 0, 450, 0, 0, 0, 618, 0, + 0, 0, 4043, 0, 0, 0, 363, 0, 330, 199, + 226, 0, 0, 409, 458, 470, 0, 0, 0, 254, + 0, 468, 423, 596, 234, 285, 455, 429, 466, 437, + 288, 0, 0, 467, 370, 579, 447, 593, 619, 620, + 264, 403, 605, 516, 613, 637, 227, 261, 417, 501, + 599, 490, 395, 575, 576, 329, 489, 296, 203, 367, + 625, 225, 476, 369, 243, 232, 581, 602, 290, 453, + 632, 214, 511, 591, 240, 480, 0, 0, 640, 248, + 500, 216, 588, 499, 391, 326, 327, 215, 0, 454, + 269, 294, 0, 0, 259, 412, 583, 584, 257, 641, + 229, 612, 221, 0, 611, 405, 578, 589, 392, 381, + 220, 587, 390, 380, 334, 353, 354, 281, 307, 444, + 373, 445, 306, 308, 401, 400, 402, 208, 600, 0, + 209, 0, 495, 601, 642, 449, 213, 235, 236, 238, + 0, 280, 284, 292, 295, 303, 304, 313, 365, 416, + 443, 439, 448, 0, 573, 594, 606, 617, 623, 624, + 626, 627, 628, 629, 630, 633, 631, 404, 311, 491, + 333, 371, 0, 0, 422, 469, 241, 598, 492, 201, + 0, 0, 0, 0, 255, 256, 0, 569, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 643, 644, 645, + 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, + 656, 657, 658, 659, 660, 638, 502, 508, 503, 504, + 505, 506, 507, 0, 509, 0, 0, 0, 0, 0, + 0, 585, 586, 661, 382, 482, 595, 335, 347, 350, + 340, 359, 0, 360, 336, 337, 342, 344, 345, 346, + 351, 352, 356, 362, 250, 211, 388, 396, 572, 312, + 217, 218, 219, 518, 519, 520, 521, 609, 610, 614, + 206, 459, 460, 461, 462, 293, 604, 309, 465, 464, + 331, 332, 377, 446, 534, 536, 547, 551, 553, 555, + 561, 564, 535, 537, 548, 552, 554, 556, 562, 565, + 524, 526, 528, 530, 543, 542, 539, 567, 568, 545, + 550, 529, 541, 546, 559, 566, 563, 523, 527, 531, + 540, 558, 557, 538, 549, 560, 544, 532, 525, 533, + 0, 198, 222, 366, 0, 451, 289, 639, 608, 603, + 207, 224, 0, 263, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 200, 202, 210, 223, + 233, 237, 244, 262, 277, 279, 286, 299, 310, 318, + 319, 322, 328, 378, 384, 385, 386, 387, 406, 407, + 408, 411, 414, 415, 418, 420, 421, 424, 428, 432, + 433, 434, 436, 438, 440, 452, 457, 471, 472, 473, + 474, 475, 478, 479, 484, 485, 486, 487, 488, 496, + 497, 510, 580, 582, 597, 615, 621, 477, 301, 302, + 441, 442, 314, 315, 635, 636, 300, 592, 622, 590, + 634, 616, 435, 376, 0, 0, 379, 282, 305, 320, + 0, 607, 498, 228, 463, 291, 252, 0, 0, 212, + 247, 231, 260, 275, 278, 324, 389, 397, 426, 431, + 297, 272, 245, 456, 242, 481, 513, 514, 515, 517, + 393, 267, 430, 394, 0, 374, 570, 571, 316, 522, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 413, 0, 0, 0, 0, 0, 0, 0, 0, + 271, 0, 0, 0, 0, 364, 268, 0, 0, 427, + 0, 205, 0, 483, 253, 375, 372, 577, 283, 274, + 270, 251, 317, 383, 425, 512, 419, 0, 368, 0, + 0, 493, 398, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 323, 249, + 325, 204, 410, 494, 287, 0, 0, 0, 0, 1922, + 196, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 239, 0, 0, 246, 0, 0, 0, 349, 358, 357, + 338, 339, 341, 343, 348, 355, 361, 0, 0, 0, + 0, 0, 266, 321, 273, 265, 574, 0, 0, 0, + 0, 0, 0, 0, 0, 230, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 276, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 298, 0, 399, 258, 0, 450, 0, 0, + 0, 618, 0, 0, 0, 0, 0, 0, 0, 363, + 0, 330, 199, 226, 0, 0, 409, 458, 470, 0, + 0, 0, 254, 0, 468, 423, 596, 234, 285, 455, + 429, 466, 437, 288, 0, 0, 467, 370, 579, 447, + 593, 619, 620, 264, 403, 605, 516, 613, 637, 227, + 261, 417, 501, 599, 490, 395, 575, 576, 329, 489, + 296, 203, 367, 625, 225, 476, 369, 243, 232, 581, + 602, 290, 453, 632, 214, 511, 591, 240, 480, 0, + 0, 640, 248, 500, 216, 588, 499, 391, 326, 327, + 215, 0, 454, 269, 294, 0, 0, 259, 412, 583, + 584, 257, 641, 229, 612, 221, 0, 611, 405, 578, + 589, 392, 381, 220, 587, 390, 380, 334, 353, 354, + 281, 307, 444, 373, 445, 306, 308, 401, 400, 402, + 208, 600, 0, 209, 0, 495, 601, 642, 449, 213, + 235, 236, 238, 0, 280, 284, 292, 295, 303, 304, + 313, 365, 416, 443, 439, 448, 0, 573, 594, 606, + 617, 623, 624, 626, 627, 628, 629, 630, 633, 631, + 404, 311, 491, 333, 371, 0, 0, 422, 469, 241, + 598, 492, 201, 0, 0, 0, 0, 255, 256, 0, + 569, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, + 653, 654, 655, 656, 657, 658, 659, 660, 638, 502, + 508, 503, 504, 505, 506, 507, 0, 509, 0, 0, + 0, 0, 0, 0, 585, 586, 661, 382, 482, 595, + 335, 347, 350, 340, 359, 0, 360, 336, 337, 342, + 344, 345, 346, 351, 352, 356, 362, 250, 211, 388, + 396, 572, 312, 217, 218, 219, 518, 519, 520, 521, + 609, 610, 614, 206, 459, 460, 461, 462, 293, 604, + 309, 465, 464, 331, 332, 377, 446, 534, 536, 547, + 551, 553, 555, 561, 564, 535, 537, 548, 552, 554, + 556, 562, 565, 524, 526, 528, 530, 543, 542, 539, + 567, 568, 545, 550, 529, 541, 546, 559, 566, 563, + 523, 527, 531, 540, 558, 557, 538, 549, 560, 544, + 532, 525, 533, 0, 198, 222, 366, 0, 451, 289, + 639, 608, 603, 207, 224, 0, 263, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 200, + 202, 210, 223, 233, 237, 244, 262, 277, 279, 286, + 299, 310, 318, 319, 322, 328, 378, 384, 385, 386, + 387, 406, 407, 408, 411, 414, 415, 418, 420, 421, + 424, 428, 432, 433, 434, 436, 438, 440, 452, 457, + 471, 472, 473, 474, 475, 478, 479, 484, 485, 486, + 487, 488, 496, 497, 510, 580, 582, 597, 615, 621, + 477, 301, 302, 441, 442, 314, 315, 635, 636, 300, + 592, 622, 590, 634, 616, 435, 376, 0, 0, 379, + 282, 305, 320, 0, 607, 498, 228, 463, 291, 252, + 0, 0, 212, 247, 231, 260, 275, 278, 324, 389, + 397, 426, 431, 297, 272, 245, 456, 242, 481, 513, + 514, 515, 517, 393, 267, 430, 394, 0, 374, 570, + 571, 316, 522, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 413, 0, 0, 0, 0, 0, + 0, 0, 0, 271, 0, 0, 0, 0, 364, 268, + 0, 0, 427, 0, 205, 0, 483, 253, 375, 372, + 577, 283, 274, 270, 251, 317, 383, 425, 512, 419, + 0, 368, 0, 0, 493, 398, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 323, 249, 325, 204, 410, 494, 287, 0, 0, + 0, 0, 0, 711, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 239, 0, 0, 246, 0, 0, 0, + 349, 358, 357, 338, 339, 341, 343, 348, 355, 361, + 0, 0, 0, 0, 0, 266, 321, 273, 265, 574, + 0, 0, 0, 0, 0, 0, 0, 0, 230, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 276, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 298, 0, 399, 258, 0, + 450, 0, 0, 0, 618, 0, 0, 0, 3909, 0, + 0, 0, 363, 0, 330, 199, 226, 0, 0, 409, + 458, 470, 0, 0, 0, 254, 0, 468, 423, 596, + 234, 285, 455, 429, 466, 437, 288, 0, 0, 467, + 370, 579, 447, 593, 619, 620, 264, 403, 605, 516, + 613, 637, 227, 261, 417, 501, 599, 490, 395, 575, + 576, 329, 489, 296, 203, 367, 625, 225, 476, 369, + 243, 232, 581, 602, 290, 453, 632, 214, 511, 591, + 240, 480, 0, 0, 640, 248, 500, 216, 588, 499, + 391, 326, 327, 215, 0, 454, 269, 294, 0, 0, + 259, 412, 583, 584, 257, 641, 229, 612, 221, 0, + 611, 405, 578, 589, 392, 381, 220, 587, 390, 380, + 334, 353, 354, 281, 307, 444, 373, 445, 306, 308, + 401, 400, 402, 208, 600, 0, 209, 0, 495, 601, + 642, 449, 213, 235, 236, 238, 0, 280, 284, 292, + 295, 303, 304, 313, 365, 416, 443, 439, 448, 0, + 573, 594, 606, 617, 623, 624, 626, 627, 628, 629, + 630, 633, 631, 404, 311, 491, 333, 371, 0, 0, + 422, 469, 241, 598, 492, 201, 0, 0, 0, 0, + 255, 256, 0, 569, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 643, 644, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, + 660, 638, 502, 508, 503, 504, 505, 506, 507, 0, + 509, 0, 0, 0, 0, 0, 0, 585, 586, 661, + 382, 482, 595, 335, 347, 350, 340, 359, 0, 360, + 336, 337, 342, 344, 345, 346, 351, 352, 356, 362, + 250, 211, 388, 396, 572, 312, 217, 218, 219, 518, + 519, 520, 521, 609, 610, 614, 206, 459, 460, 461, + 462, 293, 604, 309, 465, 464, 331, 332, 377, 446, + 534, 536, 547, 551, 553, 555, 561, 564, 535, 537, + 548, 552, 554, 556, 562, 565, 524, 526, 528, 530, + 543, 542, 539, 567, 568, 545, 550, 529, 541, 546, + 559, 566, 563, 523, 527, 531, 540, 558, 557, 538, + 549, 560, 544, 532, 525, 533, 0, 198, 222, 366, + 0, 451, 289, 639, 608, 603, 207, 224, 0, 263, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 200, 202, 210, 223, 233, 237, 244, 262, + 277, 279, 286, 299, 310, 318, 319, 322, 328, 378, + 384, 385, 386, 387, 406, 407, 408, 411, 414, 415, + 418, 420, 421, 424, 428, 432, 433, 434, 436, 438, + 440, 452, 457, 471, 472, 473, 474, 475, 478, 479, + 484, 485, 486, 487, 488, 496, 497, 510, 580, 582, + 597, 615, 621, 477, 301, 302, 441, 442, 314, 315, + 635, 636, 300, 592, 622, 590, 634, 616, 435, 376, + 0, 0, 379, 282, 305, 320, 0, 607, 498, 228, + 463, 291, 252, 0, 0, 212, 247, 231, 260, 275, + 278, 324, 389, 397, 426, 431, 297, 272, 245, 456, + 242, 481, 513, 514, 515, 517, 393, 267, 430, 394, + 0, 374, 570, 571, 316, 522, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 413, 0, 0, + 0, 0, 0, 0, 0, 0, 271, 0, 0, 0, + 0, 364, 268, 0, 0, 427, 0, 205, 0, 483, + 253, 375, 372, 577, 283, 274, 270, 251, 317, 383, + 425, 512, 419, 0, 368, 0, 0, 493, 398, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 323, 249, 325, 204, 410, 494, + 287, 0, 96, 0, 0, 0, 711, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 239, 0, 0, 246, + 0, 0, 0, 349, 358, 357, 338, 339, 341, 343, + 348, 355, 361, 0, 0, 0, 0, 0, 266, 321, + 273, 265, 574, 0, 0, 0, 0, 0, 0, 0, + 0, 230, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 276, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 298, 0, + 399, 258, 0, 450, 0, 0, 0, 618, 0, 0, + 0, 0, 0, 0, 0, 363, 0, 330, 199, 226, + 0, 0, 409, 458, 470, 0, 0, 0, 254, 0, + 468, 423, 596, 234, 285, 455, 429, 466, 437, 288, + 0, 0, 467, 370, 579, 447, 593, 619, 620, 264, + 403, 605, 516, 613, 637, 227, 261, 417, 501, 599, + 490, 395, 575, 576, 329, 489, 296, 203, 367, 625, + 225, 476, 369, 243, 232, 581, 602, 290, 453, 632, + 214, 511, 591, 240, 480, 0, 0, 640, 248, 500, + 216, 588, 499, 391, 326, 327, 215, 0, 454, 269, + 294, 0, 0, 259, 412, 583, 584, 257, 641, 229, + 612, 221, 0, 611, 405, 578, 589, 392, 381, 220, + 587, 390, 380, 334, 353, 354, 281, 307, 444, 373, + 445, 306, 308, 401, 400, 402, 208, 600, 0, 209, + 0, 495, 601, 642, 449, 213, 235, 236, 238, 0, + 280, 284, 292, 295, 303, 304, 313, 365, 416, 443, + 439, 448, 0, 573, 594, 606, 617, 623, 624, 626, + 627, 628, 629, 630, 633, 631, 404, 311, 491, 333, + 371, 0, 0, 422, 469, 241, 598, 492, 201, 0, + 0, 0, 0, 255, 256, 0, 569, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 643, 644, 645, 646, + 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, + 657, 658, 659, 660, 638, 502, 508, 503, 504, 505, + 506, 507, 0, 509, 0, 0, 0, 0, 0, 0, + 585, 586, 661, 382, 482, 595, 335, 347, 350, 340, + 359, 0, 360, 336, 337, 342, 344, 345, 346, 351, + 352, 356, 362, 250, 211, 388, 396, 572, 312, 217, + 218, 219, 518, 519, 520, 521, 609, 610, 614, 206, + 459, 460, 461, 462, 293, 604, 309, 465, 464, 331, + 332, 377, 446, 534, 536, 547, 551, 553, 555, 561, + 564, 535, 537, 548, 552, 554, 556, 562, 565, 524, + 526, 528, 530, 543, 542, 539, 567, 568, 545, 550, + 529, 541, 546, 559, 566, 563, 523, 527, 531, 540, + 558, 557, 538, 549, 560, 544, 532, 525, 533, 0, + 198, 222, 366, 0, 451, 289, 639, 608, 603, 207, + 224, 0, 263, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 200, 202, 210, 223, 233, + 237, 244, 262, 277, 279, 286, 299, 310, 318, 319, + 322, 328, 378, 384, 385, 386, 387, 406, 407, 408, + 411, 414, 415, 418, 420, 421, 424, 428, 432, 433, + 434, 436, 438, 440, 452, 457, 471, 472, 473, 474, + 475, 478, 479, 484, 485, 486, 487, 488, 496, 497, + 510, 580, 582, 597, 615, 621, 477, 301, 302, 441, + 442, 314, 315, 635, 636, 300, 592, 622, 590, 634, + 616, 435, 376, 0, 0, 379, 282, 305, 320, 0, + 607, 498, 228, 463, 291, 252, 0, 0, 212, 247, + 231, 260, 275, 278, 324, 389, 397, 426, 431, 297, + 272, 245, 456, 242, 481, 513, 514, 515, 517, 393, + 267, 430, 394, 0, 374, 570, 571, 316, 522, 0, + 0, 0, 0, 2375, 0, 0, 0, 0, 0, 0, + 413, 0, 0, 0, 0, 0, 0, 0, 0, 271, + 0, 0, 0, 0, 364, 268, 0, 0, 427, 0, + 205, 0, 483, 253, 375, 372, 577, 283, 274, 270, + 251, 317, 383, 425, 512, 419, 0, 368, 0, 0, + 493, 398, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 323, 249, 325, + 204, 410, 494, 287, 0, 0, 0, 0, 0, 196, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 239, + 0, 0, 246, 0, 0, 0, 349, 358, 357, 338, + 339, 341, 343, 348, 355, 361, 0, 0, 0, 0, + 0, 266, 321, 273, 265, 574, 0, 0, 0, 0, + 0, 0, 0, 0, 230, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 276, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 298, 0, 399, 258, 0, 450, 0, 0, 0, + 618, 0, 0, 0, 0, 0, 0, 0, 363, 0, + 330, 199, 226, 0, 0, 409, 458, 470, 0, 0, + 0, 254, 0, 468, 423, 596, 234, 285, 455, 429, + 466, 437, 288, 0, 0, 467, 370, 579, 447, 593, + 619, 620, 264, 403, 605, 516, 613, 637, 227, 261, + 417, 501, 599, 490, 395, 575, 576, 329, 489, 296, + 203, 367, 625, 225, 476, 369, 243, 232, 581, 602, + 290, 453, 632, 214, 511, 591, 240, 480, 0, 0, + 640, 248, 500, 216, 588, 499, 391, 326, 327, 215, + 0, 454, 269, 294, 0, 0, 259, 412, 583, 584, + 257, 641, 229, 612, 221, 0, 611, 405, 578, 589, + 392, 381, 220, 587, 390, 380, 334, 353, 354, 281, + 307, 444, 373, 445, 306, 308, 401, 400, 402, 208, + 600, 0, 209, 0, 495, 601, 642, 449, 213, 235, + 236, 238, 0, 280, 284, 292, 295, 303, 304, 313, + 365, 416, 443, 439, 448, 0, 573, 594, 606, 617, + 623, 624, 626, 627, 628, 629, 630, 633, 631, 404, + 311, 491, 333, 371, 0, 0, 422, 469, 241, 598, + 492, 201, 0, 0, 0, 0, 255, 256, 0, 569, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 643, + 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, + 654, 655, 656, 657, 658, 659, 660, 638, 502, 508, + 503, 504, 505, 506, 507, 0, 509, 0, 0, 0, + 0, 0, 0, 585, 586, 661, 382, 482, 595, 335, + 347, 350, 340, 359, 0, 360, 336, 337, 342, 344, + 345, 346, 351, 352, 356, 362, 250, 211, 388, 396, + 572, 312, 217, 218, 219, 518, 519, 520, 521, 609, + 610, 614, 206, 459, 460, 461, 462, 293, 604, 309, + 465, 464, 331, 332, 377, 446, 534, 536, 547, 551, + 553, 555, 561, 564, 535, 537, 548, 552, 554, 556, + 562, 565, 524, 526, 528, 530, 543, 542, 539, 567, + 568, 545, 550, 529, 541, 546, 559, 566, 563, 523, + 527, 531, 540, 558, 557, 538, 549, 560, 544, 532, + 525, 533, 0, 198, 222, 366, 0, 451, 289, 639, + 608, 603, 207, 224, 0, 263, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 200, 202, + 210, 223, 233, 237, 244, 262, 277, 279, 286, 299, + 310, 318, 319, 322, 328, 378, 384, 385, 386, 387, + 406, 407, 408, 411, 414, 415, 418, 420, 421, 424, + 428, 432, 433, 434, 436, 438, 440, 452, 457, 471, + 472, 473, 474, 475, 478, 479, 484, 485, 486, 487, + 488, 496, 497, 510, 580, 582, 597, 615, 621, 477, + 301, 302, 441, 442, 314, 315, 635, 636, 300, 592, + 622, 590, 634, 616, 435, 376, 0, 0, 379, 282, + 305, 320, 0, 607, 498, 228, 463, 291, 252, 0, + 0, 212, 247, 231, 260, 275, 278, 324, 389, 397, + 426, 431, 297, 272, 245, 456, 242, 481, 513, 514, + 515, 517, 393, 267, 430, 394, 0, 374, 570, 571, + 316, 522, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 413, 0, 0, 0, 0, 0, 0, + 0, 0, 271, 0, 0, 0, 0, 364, 268, 0, + 0, 427, 0, 205, 0, 483, 253, 375, 372, 577, + 283, 274, 270, 251, 317, 383, 425, 512, 419, 0, + 368, 0, 0, 493, 398, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 323, 249, 325, 204, 410, 494, 287, 0, 0, 0, + 0, 1742, 711, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 239, 0, 0, 246, 0, 0, 0, 349, + 358, 357, 338, 339, 341, 343, 348, 355, 361, 0, + 0, 0, 0, 0, 266, 321, 273, 265, 574, 0, + 0, 0, 0, 0, 0, 0, 0, 230, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 276, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 298, 0, 399, 258, 0, 450, + 0, 0, 0, 618, 0, 0, 0, 0, 0, 0, + 0, 363, 0, 330, 199, 226, 0, 0, 409, 458, + 470, 0, 0, 0, 254, 0, 468, 423, 596, 234, + 285, 455, 429, 466, 437, 288, 0, 0, 467, 370, + 579, 447, 593, 619, 620, 264, 403, 605, 516, 613, + 637, 227, 261, 417, 501, 599, 490, 395, 575, 576, + 329, 489, 296, 203, 367, 625, 225, 476, 369, 243, + 232, 581, 602, 290, 453, 632, 214, 511, 591, 240, + 480, 0, 0, 640, 248, 500, 216, 588, 499, 391, + 326, 327, 215, 0, 454, 269, 294, 0, 0, 259, + 412, 583, 584, 257, 641, 229, 612, 221, 0, 611, + 405, 578, 589, 392, 381, 220, 587, 390, 380, 334, + 353, 354, 281, 307, 444, 373, 445, 306, 308, 401, + 400, 402, 208, 600, 0, 209, 0, 495, 601, 642, + 449, 213, 235, 236, 238, 0, 280, 284, 292, 295, + 303, 304, 313, 365, 416, 443, 439, 448, 0, 573, + 594, 606, 617, 623, 624, 626, 627, 628, 629, 630, + 633, 631, 404, 311, 491, 333, 371, 0, 0, 422, + 469, 241, 598, 492, 201, 0, 0, 0, 0, 255, + 256, 0, 569, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 643, 644, 645, 646, 647, 648, 649, 650, + 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, + 638, 502, 508, 503, 504, 505, 506, 507, 0, 509, + 0, 0, 0, 0, 0, 0, 585, 586, 661, 382, + 482, 595, 335, 347, 350, 340, 359, 0, 360, 336, + 337, 342, 344, 345, 346, 351, 352, 356, 362, 250, + 211, 388, 396, 572, 312, 217, 218, 219, 518, 519, + 520, 521, 609, 610, 614, 206, 459, 460, 461, 462, + 293, 604, 309, 465, 464, 331, 332, 377, 446, 534, + 536, 547, 551, 553, 555, 561, 564, 535, 537, 548, + 552, 554, 556, 562, 565, 524, 526, 528, 530, 543, + 542, 539, 567, 568, 545, 550, 529, 541, 546, 559, + 566, 563, 523, 527, 531, 540, 558, 557, 538, 549, + 560, 544, 532, 525, 533, 0, 198, 222, 366, 0, + 451, 289, 639, 608, 603, 207, 224, 0, 263, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 200, 202, 210, 223, 233, 237, 244, 262, 277, + 279, 286, 299, 310, 318, 319, 322, 328, 378, 384, + 385, 386, 387, 406, 407, 408, 411, 414, 415, 418, + 420, 421, 424, 428, 432, 433, 434, 436, 438, 440, + 452, 457, 471, 472, 473, 474, 475, 478, 479, 484, + 485, 486, 487, 488, 496, 497, 510, 580, 582, 597, + 615, 621, 477, 301, 302, 441, 442, 314, 315, 635, + 636, 300, 592, 622, 590, 634, 616, 435, 376, 0, + 0, 379, 282, 305, 320, 0, 607, 498, 228, 463, + 291, 252, 0, 0, 212, 247, 231, 260, 275, 278, + 324, 389, 397, 426, 431, 297, 272, 245, 456, 242, + 481, 513, 514, 515, 517, 393, 267, 430, 394, 0, + 374, 570, 571, 316, 522, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 413, 0, 0, 0, + 0, 0, 0, 0, 0, 271, 0, 0, 0, 0, + 364, 268, 0, 0, 427, 0, 205, 0, 483, 253, + 375, 372, 577, 283, 274, 270, 251, 317, 383, 425, + 512, 419, 0, 368, 0, 0, 493, 398, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 323, 249, 325, 204, 410, 494, 287, + 0, 0, 0, 0, 0, 196, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 239, 0, 0, 246, 0, + 0, 0, 349, 358, 357, 338, 339, 341, 343, 348, + 355, 361, 0, 0, 0, 0, 0, 266, 321, 273, + 265, 574, 0, 0, 0, 0, 0, 0, 0, 0, + 230, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 276, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 298, 0, 399, + 258, 0, 450, 0, 0, 0, 618, 0, 0, 0, + 0, 0, 0, 0, 363, 0, 330, 199, 226, 0, + 0, 409, 458, 470, 0, 0, 0, 254, 0, 468, + 423, 596, 234, 285, 455, 429, 466, 437, 288, 0, + 0, 467, 370, 579, 447, 593, 619, 620, 264, 403, + 605, 516, 613, 637, 227, 261, 417, 501, 599, 490, + 395, 575, 576, 329, 489, 296, 203, 367, 625, 225, + 476, 369, 243, 232, 581, 602, 290, 453, 632, 214, + 511, 591, 240, 480, 0, 0, 640, 248, 500, 216, + 588, 499, 391, 326, 327, 215, 0, 454, 269, 294, + 0, 0, 259, 412, 583, 584, 257, 641, 229, 612, + 221, 0, 611, 405, 578, 589, 392, 381, 220, 587, + 390, 380, 334, 353, 354, 281, 307, 444, 373, 445, + 306, 308, 401, 400, 402, 208, 600, 0, 209, 0, + 495, 601, 642, 449, 213, 235, 236, 238, 0, 280, + 284, 292, 295, 303, 304, 313, 365, 416, 443, 439, + 448, 0, 573, 594, 606, 617, 623, 624, 626, 627, + 628, 629, 630, 633, 631, 404, 311, 491, 333, 371, + 0, 0, 422, 469, 241, 598, 492, 201, 0, 0, + 0, 0, 255, 256, 0, 569, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 643, 644, 645, 646, 647, + 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, + 658, 659, 660, 638, 502, 508, 503, 504, 505, 506, + 507, 0, 509, 0, 0, 0, 0, 0, 0, 585, + 586, 661, 382, 482, 595, 335, 347, 350, 340, 359, + 0, 360, 336, 337, 342, 344, 345, 346, 351, 352, + 356, 362, 250, 211, 388, 396, 572, 312, 217, 218, + 219, 518, 519, 520, 521, 609, 610, 614, 206, 459, + 460, 461, 462, 293, 604, 309, 465, 464, 331, 332, + 377, 446, 534, 536, 547, 551, 553, 555, 561, 564, + 535, 537, 548, 552, 554, 556, 562, 565, 524, 526, + 528, 530, 543, 542, 539, 567, 568, 545, 550, 529, + 541, 546, 559, 566, 563, 523, 527, 531, 540, 558, + 557, 538, 549, 560, 544, 532, 525, 533, 0, 198, + 222, 366, 2035, 451, 289, 639, 608, 603, 207, 224, + 0, 263, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 200, 202, 210, 223, 233, 237, + 244, 262, 277, 279, 286, 299, 310, 318, 319, 322, + 328, 378, 384, 385, 386, 387, 406, 407, 408, 411, + 414, 415, 418, 420, 421, 424, 428, 432, 433, 434, + 436, 438, 440, 452, 457, 471, 472, 473, 474, 475, + 478, 479, 484, 485, 486, 487, 488, 496, 497, 510, + 580, 582, 597, 615, 621, 477, 301, 302, 441, 442, + 314, 315, 635, 636, 300, 592, 622, 590, 634, 616, + 435, 376, 0, 0, 379, 282, 305, 320, 0, 607, + 498, 228, 463, 291, 252, 0, 0, 212, 247, 231, + 260, 275, 278, 324, 389, 397, 426, 431, 297, 272, + 245, 456, 242, 481, 513, 514, 515, 517, 393, 267, + 430, 394, 0, 374, 570, 571, 316, 522, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 413, + 0, 0, 0, 0, 0, 0, 0, 0, 271, 0, + 0, 0, 0, 364, 268, 0, 0, 427, 0, 205, + 0, 483, 253, 375, 372, 577, 283, 274, 270, 251, + 317, 383, 425, 512, 419, 0, 368, 0, 0, 493, + 398, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 323, 249, 325, 204, + 410, 494, 287, 0, 0, 0, 0, 2026, 711, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 239, 0, + 0, 246, 0, 0, 0, 349, 358, 357, 338, 339, + 341, 343, 348, 355, 361, 0, 0, 0, 0, 0, + 266, 321, 273, 265, 574, 0, 0, 0, 0, 0, + 0, 0, 0, 230, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 276, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 298, 0, 399, 258, 0, 450, 0, 0, 0, 618, + 0, 0, 0, 0, 0, 0, 0, 363, 0, 330, + 199, 226, 0, 0, 409, 458, 470, 0, 0, 0, + 254, 0, 468, 423, 596, 234, 285, 455, 429, 466, + 437, 288, 0, 0, 467, 370, 579, 447, 593, 619, + 620, 264, 403, 605, 516, 613, 637, 227, 261, 417, + 501, 599, 490, 395, 575, 576, 329, 489, 296, 203, + 367, 625, 225, 476, 369, 243, 232, 581, 602, 290, + 453, 632, 214, 511, 591, 240, 480, 0, 0, 640, + 248, 500, 216, 588, 499, 391, 326, 327, 215, 0, + 454, 269, 294, 0, 0, 259, 412, 583, 584, 257, + 641, 229, 612, 221, 0, 611, 405, 578, 589, 392, + 381, 220, 587, 390, 380, 334, 353, 354, 281, 307, + 444, 373, 445, 306, 308, 401, 400, 402, 208, 600, + 0, 209, 0, 495, 601, 642, 449, 213, 235, 236, + 238, 0, 280, 284, 292, 295, 303, 304, 313, 365, + 416, 443, 439, 448, 0, 573, 594, 606, 617, 623, + 624, 626, 627, 628, 629, 630, 633, 631, 404, 311, + 491, 333, 371, 0, 0, 422, 469, 241, 598, 492, + 201, 0, 0, 0, 0, 255, 256, 0, 569, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 643, 644, + 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, + 655, 656, 657, 658, 659, 660, 638, 502, 508, 503, + 504, 505, 506, 507, 0, 509, 0, 0, 0, 0, + 0, 0, 585, 586, 661, 382, 482, 595, 335, 347, + 350, 340, 359, 0, 360, 336, 337, 342, 344, 345, + 346, 351, 352, 356, 362, 250, 211, 388, 396, 572, + 312, 217, 218, 219, 518, 519, 520, 521, 609, 610, + 614, 206, 459, 460, 461, 462, 293, 604, 309, 465, + 464, 331, 332, 377, 446, 534, 536, 547, 551, 553, + 555, 561, 564, 535, 537, 548, 552, 554, 556, 562, + 565, 524, 526, 528, 530, 543, 542, 539, 567, 568, + 545, 550, 529, 541, 546, 559, 566, 563, 523, 527, + 531, 540, 558, 557, 538, 549, 560, 544, 532, 525, + 533, 0, 198, 222, 366, 0, 451, 289, 639, 608, + 603, 207, 224, 0, 263, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 200, 202, 210, + 223, 233, 237, 244, 262, 277, 279, 286, 299, 310, + 318, 319, 322, 328, 378, 384, 385, 386, 387, 406, + 407, 408, 411, 414, 415, 418, 420, 421, 424, 428, + 432, 433, 434, 436, 438, 440, 452, 457, 471, 472, + 473, 474, 475, 478, 479, 484, 485, 486, 487, 488, + 496, 497, 510, 580, 582, 597, 615, 621, 477, 301, + 302, 441, 442, 314, 315, 635, 636, 300, 592, 622, + 590, 634, 616, 435, 376, 0, 0, 379, 282, 305, + 320, 0, 607, 498, 228, 463, 291, 252, 0, 0, + 212, 247, 231, 260, 275, 278, 324, 389, 397, 426, + 431, 297, 272, 245, 456, 242, 481, 513, 514, 515, + 517, 393, 267, 430, 394, 0, 374, 570, 571, 316, + 522, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 413, 0, 1889, 0, 0, 0, 0, 0, + 0, 271, 0, 0, 0, 0, 364, 268, 0, 0, + 427, 0, 205, 0, 483, 253, 375, 372, 577, 283, + 274, 270, 251, 317, 383, 425, 512, 419, 0, 368, + 0, 0, 493, 398, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 323, + 249, 325, 204, 410, 494, 287, 0, 0, 0, 0, + 0, 711, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 239, 0, 0, 246, 0, 0, 0, 349, 358, + 357, 338, 339, 341, 343, 348, 355, 361, 0, 0, + 0, 0, 0, 266, 321, 273, 265, 574, 0, 0, + 0, 0, 0, 0, 0, 0, 230, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 276, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 298, 0, 399, 258, 0, 450, 0, + 0, 0, 618, 0, 0, 0, 0, 0, 0, 0, + 363, 0, 330, 199, 226, 0, 0, 409, 458, 470, + 0, 0, 0, 254, 0, 468, 423, 596, 234, 285, + 455, 429, 466, 437, 288, 0, 0, 467, 370, 579, + 447, 593, 619, 620, 264, 403, 605, 516, 613, 637, + 227, 261, 417, 501, 599, 490, 395, 575, 576, 329, + 489, 296, 203, 367, 625, 225, 476, 369, 243, 232, + 581, 602, 290, 453, 632, 214, 511, 591, 240, 480, + 0, 0, 640, 248, 500, 216, 588, 499, 391, 326, + 327, 215, 0, 454, 269, 294, 0, 0, 259, 412, + 583, 584, 257, 641, 229, 612, 221, 0, 611, 405, + 578, 589, 392, 381, 220, 587, 390, 380, 334, 353, + 354, 281, 307, 444, 373, 445, 306, 308, 401, 400, + 402, 208, 600, 0, 209, 0, 495, 601, 642, 449, + 213, 235, 236, 238, 0, 280, 284, 292, 295, 303, + 304, 313, 365, 416, 443, 439, 448, 0, 573, 594, + 606, 617, 623, 624, 626, 627, 628, 629, 630, 633, + 631, 404, 311, 491, 333, 371, 0, 0, 422, 469, + 241, 598, 492, 201, 0, 0, 0, 0, 255, 256, + 0, 569, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 643, 644, 645, 646, 647, 648, 649, 650, 651, + 652, 653, 654, 655, 656, 657, 658, 659, 660, 638, + 502, 508, 503, 504, 505, 506, 507, 0, 509, 0, + 0, 0, 0, 0, 0, 585, 586, 661, 382, 482, + 595, 335, 347, 350, 340, 359, 0, 360, 336, 337, + 342, 344, 345, 346, 351, 352, 356, 362, 250, 211, + 388, 396, 572, 312, 217, 218, 219, 518, 519, 520, + 521, 609, 610, 614, 206, 459, 460, 461, 462, 293, + 604, 309, 465, 464, 331, 332, 377, 446, 534, 536, + 547, 551, 553, 555, 561, 564, 535, 537, 548, 552, + 554, 556, 562, 565, 524, 526, 528, 530, 543, 542, + 539, 567, 568, 545, 550, 529, 541, 546, 559, 566, + 563, 523, 527, 531, 540, 558, 557, 538, 549, 560, + 544, 532, 525, 533, 0, 198, 222, 366, 0, 451, + 289, 639, 608, 603, 207, 224, 0, 263, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 200, 202, 210, 223, 233, 237, 244, 262, 277, 279, + 286, 299, 310, 318, 319, 322, 328, 378, 384, 385, + 386, 387, 406, 407, 408, 411, 414, 415, 418, 420, + 421, 424, 428, 432, 433, 434, 436, 438, 440, 452, + 457, 471, 472, 473, 474, 475, 478, 479, 484, 485, + 486, 487, 488, 496, 497, 510, 580, 582, 597, 615, + 621, 477, 301, 302, 441, 442, 314, 315, 635, 636, + 300, 592, 622, 590, 634, 616, 435, 376, 0, 0, + 379, 282, 305, 320, 0, 607, 498, 228, 463, 291, + 252, 0, 0, 212, 247, 231, 260, 275, 278, 324, + 389, 397, 426, 431, 297, 272, 245, 456, 242, 481, + 513, 514, 515, 517, 393, 267, 430, 394, 0, 374, + 570, 571, 316, 522, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 413, 0, 1887, 0, 0, + 0, 0, 0, 0, 271, 0, 0, 0, 0, 364, + 268, 0, 0, 427, 0, 205, 0, 483, 253, 375, + 372, 577, 283, 274, 270, 251, 317, 383, 425, 512, + 419, 0, 368, 0, 0, 493, 398, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 323, 249, 325, 204, 410, 494, 287, 0, + 0, 0, 0, 0, 711, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 239, 0, 0, 246, 0, 0, + 0, 349, 358, 357, 338, 339, 341, 343, 348, 355, + 361, 0, 0, 0, 0, 0, 266, 321, 273, 265, + 574, 0, 0, 0, 0, 0, 0, 0, 0, 230, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 276, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 298, 0, 399, 258, + 0, 450, 0, 0, 0, 618, 0, 0, 0, 0, + 0, 0, 0, 363, 0, 330, 199, 226, 0, 0, + 409, 458, 470, 0, 0, 0, 254, 0, 468, 423, + 596, 234, 285, 455, 429, 466, 437, 288, 0, 0, + 467, 370, 579, 447, 593, 619, 620, 264, 403, 605, + 516, 613, 637, 227, 261, 417, 501, 599, 490, 395, + 575, 576, 329, 489, 296, 203, 367, 625, 225, 476, + 369, 243, 232, 581, 602, 290, 453, 632, 214, 511, + 591, 240, 480, 0, 0, 640, 248, 500, 216, 588, + 499, 391, 326, 327, 215, 0, 454, 269, 294, 0, + 0, 259, 412, 583, 584, 257, 641, 229, 612, 221, + 0, 611, 405, 578, 589, 392, 381, 220, 587, 390, + 380, 334, 353, 354, 281, 307, 444, 373, 445, 306, + 308, 401, 400, 402, 208, 600, 0, 209, 0, 495, + 601, 642, 449, 213, 235, 236, 238, 0, 280, 284, + 292, 295, 303, 304, 313, 365, 416, 443, 439, 448, + 0, 573, 594, 606, 617, 623, 624, 626, 627, 628, + 629, 630, 633, 631, 404, 311, 491, 333, 371, 0, + 0, 422, 469, 241, 598, 492, 201, 0, 0, 0, + 0, 255, 256, 0, 569, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 643, 644, 645, 646, 647, 648, + 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, + 659, 660, 638, 502, 508, 503, 504, 505, 506, 507, + 0, 509, 0, 0, 0, 0, 0, 0, 585, 586, + 661, 382, 482, 595, 335, 347, 350, 340, 359, 0, + 360, 336, 337, 342, 344, 345, 346, 351, 352, 356, + 362, 250, 211, 388, 396, 572, 312, 217, 218, 219, + 518, 519, 520, 521, 609, 610, 614, 206, 459, 460, + 461, 462, 293, 604, 309, 465, 464, 331, 332, 377, + 446, 534, 536, 547, 551, 553, 555, 561, 564, 535, + 537, 548, 552, 554, 556, 562, 565, 524, 526, 528, + 530, 543, 542, 539, 567, 568, 545, 550, 529, 541, + 546, 559, 566, 563, 523, 527, 531, 540, 558, 557, + 538, 549, 560, 544, 532, 525, 533, 0, 198, 222, + 366, 0, 451, 289, 639, 608, 603, 207, 224, 0, + 263, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 200, 202, 210, 223, 233, 237, 244, + 262, 277, 279, 286, 299, 310, 318, 319, 322, 328, + 378, 384, 385, 386, 387, 406, 407, 408, 411, 414, + 415, 418, 420, 421, 424, 428, 432, 433, 434, 436, + 438, 440, 452, 457, 471, 472, 473, 474, 475, 478, + 479, 484, 485, 486, 487, 488, 496, 497, 510, 580, + 582, 597, 615, 621, 477, 301, 302, 441, 442, 314, + 315, 635, 636, 300, 592, 622, 590, 634, 616, 435, + 376, 0, 0, 379, 282, 305, 320, 0, 607, 498, + 228, 463, 291, 252, 0, 0, 212, 247, 231, 260, + 275, 278, 324, 389, 397, 426, 431, 297, 272, 245, + 456, 242, 481, 513, 514, 515, 517, 393, 267, 430, + 394, 0, 374, 570, 571, 316, 522, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 413, 0, + 1885, 0, 0, 0, 0, 0, 0, 271, 0, 0, + 0, 0, 364, 268, 0, 0, 427, 0, 205, 0, + 483, 253, 375, 372, 577, 283, 274, 270, 251, 317, + 383, 425, 512, 419, 0, 368, 0, 0, 493, 398, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 323, 249, 325, 204, 410, + 494, 287, 0, 0, 0, 0, 0, 711, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 239, 0, 0, + 246, 0, 0, 0, 349, 358, 357, 338, 339, 341, + 343, 348, 355, 361, 0, 0, 0, 0, 0, 266, + 321, 273, 265, 574, 0, 0, 0, 0, 0, 0, + 0, 0, 230, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 276, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 298, + 0, 399, 258, 0, 450, 0, 0, 0, 618, 0, + 0, 0, 0, 0, 0, 0, 363, 0, 330, 199, + 226, 0, 0, 409, 458, 470, 0, 0, 0, 254, + 0, 468, 423, 596, 234, 285, 455, 429, 466, 437, + 288, 0, 0, 467, 370, 579, 447, 593, 619, 620, + 264, 403, 605, 516, 613, 637, 227, 261, 417, 501, + 599, 490, 395, 575, 576, 329, 489, 296, 203, 367, + 625, 225, 476, 369, 243, 232, 581, 602, 290, 453, + 632, 214, 511, 591, 240, 480, 0, 0, 640, 248, + 500, 216, 588, 499, 391, 326, 327, 215, 0, 454, + 269, 294, 0, 0, 259, 412, 583, 584, 257, 641, + 229, 612, 221, 0, 611, 405, 578, 589, 392, 381, + 220, 587, 390, 380, 334, 353, 354, 281, 307, 444, + 373, 445, 306, 308, 401, 400, 402, 208, 600, 0, + 209, 0, 495, 601, 642, 449, 213, 235, 236, 238, + 0, 280, 284, 292, 295, 303, 304, 313, 365, 416, + 443, 439, 448, 0, 573, 594, 606, 617, 623, 624, + 626, 627, 628, 629, 630, 633, 631, 404, 311, 491, + 333, 371, 0, 0, 422, 469, 241, 598, 492, 201, + 0, 0, 0, 0, 255, 256, 0, 569, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 643, 644, 645, + 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, + 656, 657, 658, 659, 660, 638, 502, 508, 503, 504, + 505, 506, 507, 0, 509, 0, 0, 0, 0, 0, + 0, 585, 586, 661, 382, 482, 595, 335, 347, 350, + 340, 359, 0, 360, 336, 337, 342, 344, 345, 346, + 351, 352, 356, 362, 250, 211, 388, 396, 572, 312, + 217, 218, 219, 518, 519, 520, 521, 609, 610, 614, + 206, 459, 460, 461, 462, 293, 604, 309, 465, 464, + 331, 332, 377, 446, 534, 536, 547, 551, 553, 555, + 561, 564, 535, 537, 548, 552, 554, 556, 562, 565, + 524, 526, 528, 530, 543, 542, 539, 567, 568, 545, + 550, 529, 541, 546, 559, 566, 563, 523, 527, 531, + 540, 558, 557, 538, 549, 560, 544, 532, 525, 533, + 0, 198, 222, 366, 0, 451, 289, 639, 608, 603, + 207, 224, 0, 263, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 200, 202, 210, 223, + 233, 237, 244, 262, 277, 279, 286, 299, 310, 318, + 319, 322, 328, 378, 384, 385, 386, 387, 406, 407, + 408, 411, 414, 415, 418, 420, 421, 424, 428, 432, + 433, 434, 436, 438, 440, 452, 457, 471, 472, 473, + 474, 475, 478, 479, 484, 485, 486, 487, 488, 496, + 497, 510, 580, 582, 597, 615, 621, 477, 301, 302, + 441, 442, 314, 315, 635, 636, 300, 592, 622, 590, + 634, 616, 435, 376, 0, 0, 379, 282, 305, 320, + 0, 607, 498, 228, 463, 291, 252, 0, 0, 212, + 247, 231, 260, 275, 278, 324, 389, 397, 426, 431, + 297, 272, 245, 456, 242, 481, 513, 514, 515, 517, + 393, 267, 430, 394, 0, 374, 570, 571, 316, 522, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 413, 0, 1883, 0, 0, 0, 0, 0, 0, + 271, 0, 0, 0, 0, 364, 268, 0, 0, 427, + 0, 205, 0, 483, 253, 375, 372, 577, 283, 274, + 270, 251, 317, 383, 425, 512, 419, 0, 368, 0, + 0, 493, 398, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 323, 249, + 325, 204, 410, 494, 287, 0, 0, 0, 0, 0, + 711, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 239, 0, 0, 246, 0, 0, 0, 349, 358, 357, + 338, 339, 341, 343, 348, 355, 361, 0, 0, 0, + 0, 0, 266, 321, 273, 265, 574, 0, 0, 0, + 0, 0, 0, 0, 0, 230, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 276, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 298, 0, 399, 258, 0, 450, 0, 0, + 0, 618, 0, 0, 0, 0, 0, 0, 0, 363, + 0, 330, 199, 226, 0, 0, 409, 458, 470, 0, + 0, 0, 254, 0, 468, 423, 596, 234, 285, 455, + 429, 466, 437, 288, 0, 0, 467, 370, 579, 447, + 593, 619, 620, 264, 403, 605, 516, 613, 637, 227, + 261, 417, 501, 599, 490, 395, 575, 576, 329, 489, + 296, 203, 367, 625, 225, 476, 369, 243, 232, 581, + 602, 290, 453, 632, 214, 511, 591, 240, 480, 0, + 0, 640, 248, 500, 216, 588, 499, 391, 326, 327, + 215, 0, 454, 269, 294, 0, 0, 259, 412, 583, + 584, 257, 641, 229, 612, 221, 0, 611, 405, 578, + 589, 392, 381, 220, 587, 390, 380, 334, 353, 354, + 281, 307, 444, 373, 445, 306, 308, 401, 400, 402, + 208, 600, 0, 209, 0, 495, 601, 642, 449, 213, + 235, 236, 238, 0, 280, 284, 292, 295, 303, 304, + 313, 365, 416, 443, 439, 448, 0, 573, 594, 606, + 617, 623, 624, 626, 627, 628, 629, 630, 633, 631, + 404, 311, 491, 333, 371, 0, 0, 422, 469, 241, + 598, 492, 201, 0, 0, 0, 0, 255, 256, 0, + 569, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, + 653, 654, 655, 656, 657, 658, 659, 660, 638, 502, + 508, 503, 504, 505, 506, 507, 0, 509, 0, 0, + 0, 0, 0, 0, 585, 586, 661, 382, 482, 595, + 335, 347, 350, 340, 359, 0, 360, 336, 337, 342, + 344, 345, 346, 351, 352, 356, 362, 250, 211, 388, + 396, 572, 312, 217, 218, 219, 518, 519, 520, 521, + 609, 610, 614, 206, 459, 460, 461, 462, 293, 604, + 309, 465, 464, 331, 332, 377, 446, 534, 536, 547, + 551, 553, 555, 561, 564, 535, 537, 548, 552, 554, + 556, 562, 565, 524, 526, 528, 530, 543, 542, 539, + 567, 568, 545, 550, 529, 541, 546, 559, 566, 563, + 523, 527, 531, 540, 558, 557, 538, 549, 560, 544, + 532, 525, 533, 0, 198, 222, 366, 0, 451, 289, + 639, 608, 603, 207, 224, 0, 263, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 200, + 202, 210, 223, 233, 237, 244, 262, 277, 279, 286, + 299, 310, 318, 319, 322, 328, 378, 384, 385, 386, + 387, 406, 407, 408, 411, 414, 415, 418, 420, 421, + 424, 428, 432, 433, 434, 436, 438, 440, 452, 457, + 471, 472, 473, 474, 475, 478, 479, 484, 485, 486, + 487, 488, 496, 497, 510, 580, 582, 597, 615, 621, + 477, 301, 302, 441, 442, 314, 315, 635, 636, 300, + 592, 622, 590, 634, 616, 435, 376, 0, 0, 379, + 282, 305, 320, 0, 607, 498, 228, 463, 291, 252, + 0, 0, 212, 247, 231, 260, 275, 278, 324, 389, + 397, 426, 431, 297, 272, 245, 456, 242, 481, 513, + 514, 515, 517, 393, 267, 430, 394, 0, 374, 570, + 571, 316, 522, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 413, 0, 1881, 0, 0, 0, + 0, 0, 0, 271, 0, 0, 0, 0, 364, 268, + 0, 0, 427, 0, 205, 0, 483, 253, 375, 372, + 577, 283, 274, 270, 251, 317, 383, 425, 512, 419, + 0, 368, 0, 0, 493, 398, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 323, 249, 325, 204, 410, 494, 287, 0, 0, + 0, 0, 0, 711, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 239, 0, 0, 246, 0, 0, 0, + 349, 358, 357, 338, 339, 341, 343, 348, 355, 361, + 0, 0, 0, 0, 0, 266, 321, 273, 265, 574, + 0, 0, 0, 0, 0, 0, 0, 0, 230, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 276, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 298, 0, 399, 258, 0, + 450, 0, 0, 0, 618, 0, 0, 0, 0, 0, + 0, 0, 363, 0, 330, 199, 226, 0, 0, 409, + 458, 470, 0, 0, 0, 254, 0, 468, 423, 596, + 234, 285, 455, 429, 466, 437, 288, 0, 0, 467, + 370, 579, 447, 593, 619, 620, 264, 403, 605, 516, + 613, 637, 227, 261, 417, 501, 599, 490, 395, 575, + 576, 329, 489, 296, 203, 367, 625, 225, 476, 369, + 243, 232, 581, 602, 290, 453, 632, 214, 511, 591, + 240, 480, 0, 0, 640, 248, 500, 216, 588, 499, + 391, 326, 327, 215, 0, 454, 269, 294, 0, 0, + 259, 412, 583, 584, 257, 641, 229, 612, 221, 0, + 611, 405, 578, 589, 392, 381, 220, 587, 390, 380, + 334, 353, 354, 281, 307, 444, 373, 445, 306, 308, + 401, 400, 402, 208, 600, 0, 209, 0, 495, 601, + 642, 449, 213, 235, 236, 238, 0, 280, 284, 292, + 295, 303, 304, 313, 365, 416, 443, 439, 448, 0, + 573, 594, 606, 617, 623, 624, 626, 627, 628, 629, + 630, 633, 631, 404, 311, 491, 333, 371, 0, 0, + 422, 469, 241, 598, 492, 201, 0, 0, 0, 0, + 255, 256, 0, 569, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 643, 644, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, + 660, 638, 502, 508, 503, 504, 505, 506, 507, 0, + 509, 0, 0, 0, 0, 0, 0, 585, 586, 661, + 382, 482, 595, 335, 347, 350, 340, 359, 0, 360, + 336, 337, 342, 344, 345, 346, 351, 352, 356, 362, + 250, 211, 388, 396, 572, 312, 217, 218, 219, 518, + 519, 520, 521, 609, 610, 614, 206, 459, 460, 461, + 462, 293, 604, 309, 465, 464, 331, 332, 377, 446, + 534, 536, 547, 551, 553, 555, 561, 564, 535, 537, + 548, 552, 554, 556, 562, 565, 524, 526, 528, 530, + 543, 542, 539, 567, 568, 545, 550, 529, 541, 546, + 559, 566, 563, 523, 527, 531, 540, 558, 557, 538, + 549, 560, 544, 532, 525, 533, 0, 198, 222, 366, + 0, 451, 289, 639, 608, 603, 207, 224, 0, 263, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 200, 202, 210, 223, 233, 237, 244, 262, + 277, 279, 286, 299, 310, 318, 319, 322, 328, 378, + 384, 385, 386, 387, 406, 407, 408, 411, 414, 415, + 418, 420, 421, 424, 428, 432, 433, 434, 436, 438, + 440, 452, 457, 471, 472, 473, 474, 475, 478, 479, + 484, 485, 486, 487, 488, 496, 497, 510, 580, 582, + 597, 615, 621, 477, 301, 302, 441, 442, 314, 315, + 635, 636, 300, 592, 622, 590, 634, 616, 435, 376, + 0, 0, 379, 282, 305, 320, 0, 607, 498, 228, + 463, 291, 252, 0, 0, 212, 247, 231, 260, 275, + 278, 324, 389, 397, 426, 431, 297, 272, 245, 456, + 242, 481, 513, 514, 515, 517, 393, 267, 430, 394, + 0, 374, 570, 571, 316, 522, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 413, 0, 1877, + 0, 0, 0, 0, 0, 0, 271, 0, 0, 0, + 0, 364, 268, 0, 0, 427, 0, 205, 0, 483, + 253, 375, 372, 577, 283, 274, 270, 251, 317, 383, + 425, 512, 419, 0, 368, 0, 0, 493, 398, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 323, 249, 325, 204, 410, 494, + 287, 0, 0, 0, 0, 0, 711, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 239, 0, 0, 246, + 0, 0, 0, 349, 358, 357, 338, 339, 341, 343, + 348, 355, 361, 0, 0, 0, 0, 0, 266, 321, + 273, 265, 574, 0, 0, 0, 0, 0, 0, 0, + 0, 230, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 276, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 298, 0, + 399, 258, 0, 450, 0, 0, 0, 618, 0, 0, + 0, 0, 0, 0, 0, 363, 0, 330, 199, 226, + 0, 0, 409, 458, 470, 0, 0, 0, 254, 0, + 468, 423, 596, 234, 285, 455, 429, 466, 437, 288, + 0, 0, 467, 370, 579, 447, 593, 619, 620, 264, + 403, 605, 516, 613, 637, 227, 261, 417, 501, 599, + 490, 395, 575, 576, 329, 489, 296, 203, 367, 625, + 225, 476, 369, 243, 232, 581, 602, 290, 453, 632, + 214, 511, 591, 240, 480, 0, 0, 640, 248, 500, + 216, 588, 499, 391, 326, 327, 215, 0, 454, 269, + 294, 0, 0, 259, 412, 583, 584, 257, 641, 229, + 612, 221, 0, 611, 405, 578, 589, 392, 381, 220, + 587, 390, 380, 334, 353, 354, 281, 307, 444, 373, + 445, 306, 308, 401, 400, 402, 208, 600, 0, 209, + 0, 495, 601, 642, 449, 213, 235, 236, 238, 0, + 280, 284, 292, 295, 303, 304, 313, 365, 416, 443, + 439, 448, 0, 573, 594, 606, 617, 623, 624, 626, + 627, 628, 629, 630, 633, 631, 404, 311, 491, 333, + 371, 0, 0, 422, 469, 241, 598, 492, 201, 0, + 0, 0, 0, 255, 256, 0, 569, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 643, 644, 645, 646, + 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, + 657, 658, 659, 660, 638, 502, 508, 503, 504, 505, + 506, 507, 0, 509, 0, 0, 0, 0, 0, 0, + 585, 586, 661, 382, 482, 595, 335, 347, 350, 340, + 359, 0, 360, 336, 337, 342, 344, 345, 346, 351, + 352, 356, 362, 250, 211, 388, 396, 572, 312, 217, + 218, 219, 518, 519, 520, 521, 609, 610, 614, 206, + 459, 460, 461, 462, 293, 604, 309, 465, 464, 331, + 332, 377, 446, 534, 536, 547, 551, 553, 555, 561, + 564, 535, 537, 548, 552, 554, 556, 562, 565, 524, + 526, 528, 530, 543, 542, 539, 567, 568, 545, 550, + 529, 541, 546, 559, 566, 563, 523, 527, 531, 540, + 558, 557, 538, 549, 560, 544, 532, 525, 533, 0, + 198, 222, 366, 0, 451, 289, 639, 608, 603, 207, + 224, 0, 263, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 200, 202, 210, 223, 233, + 237, 244, 262, 277, 279, 286, 299, 310, 318, 319, + 322, 328, 378, 384, 385, 386, 387, 406, 407, 408, + 411, 414, 415, 418, 420, 421, 424, 428, 432, 433, + 434, 436, 438, 440, 452, 457, 471, 472, 473, 474, + 475, 478, 479, 484, 485, 486, 487, 488, 496, 497, + 510, 580, 582, 597, 615, 621, 477, 301, 302, 441, + 442, 314, 315, 635, 636, 300, 592, 622, 590, 634, + 616, 435, 376, 0, 0, 379, 282, 305, 320, 0, + 607, 498, 228, 463, 291, 252, 0, 0, 212, 247, + 231, 260, 275, 278, 324, 389, 397, 426, 431, 297, + 272, 245, 456, 242, 481, 513, 514, 515, 517, 393, + 267, 430, 394, 0, 374, 570, 571, 316, 522, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 413, 0, 1875, 0, 0, 0, 0, 0, 0, 271, + 0, 0, 0, 0, 364, 268, 0, 0, 427, 0, + 205, 0, 483, 253, 375, 372, 577, 283, 274, 270, + 251, 317, 383, 425, 512, 419, 0, 368, 0, 0, + 493, 398, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 323, 249, 325, + 204, 410, 494, 287, 0, 0, 0, 0, 0, 711, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 239, + 0, 0, 246, 0, 0, 0, 349, 358, 357, 338, + 339, 341, 343, 348, 355, 361, 0, 0, 0, 0, + 0, 266, 321, 273, 265, 574, 0, 0, 0, 0, + 0, 0, 0, 0, 230, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 276, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 298, 0, 399, 258, 0, 450, 0, 0, 0, + 618, 0, 0, 0, 0, 0, 0, 0, 363, 0, + 330, 199, 226, 0, 0, 409, 458, 470, 0, 0, + 0, 254, 0, 468, 423, 596, 234, 285, 455, 429, + 466, 437, 288, 0, 0, 467, 370, 579, 447, 593, + 619, 620, 264, 403, 605, 516, 613, 637, 227, 261, + 417, 501, 599, 490, 395, 575, 576, 329, 489, 296, + 203, 367, 625, 225, 476, 369, 243, 232, 581, 602, + 290, 453, 632, 214, 511, 591, 240, 480, 0, 0, + 640, 248, 500, 216, 588, 499, 391, 326, 327, 215, + 0, 454, 269, 294, 0, 0, 259, 412, 583, 584, + 257, 641, 229, 612, 221, 0, 611, 405, 578, 589, + 392, 381, 220, 587, 390, 380, 334, 353, 354, 281, + 307, 444, 373, 445, 306, 308, 401, 400, 402, 208, + 600, 0, 209, 0, 495, 601, 642, 449, 213, 235, + 236, 238, 0, 280, 284, 292, 295, 303, 304, 313, + 365, 416, 443, 439, 448, 0, 573, 594, 606, 617, + 623, 624, 626, 627, 628, 629, 630, 633, 631, 404, + 311, 491, 333, 371, 0, 0, 422, 469, 241, 598, + 492, 201, 0, 0, 0, 0, 255, 256, 0, 569, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 643, + 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, + 654, 655, 656, 657, 658, 659, 660, 638, 502, 508, + 503, 504, 505, 506, 507, 0, 509, 0, 0, 0, + 0, 0, 0, 585, 586, 661, 382, 482, 595, 335, + 347, 350, 340, 359, 0, 360, 336, 337, 342, 344, + 345, 346, 351, 352, 356, 362, 250, 211, 388, 396, + 572, 312, 217, 218, 219, 518, 519, 520, 521, 609, + 610, 614, 206, 459, 460, 461, 462, 293, 604, 309, + 465, 464, 331, 332, 377, 446, 534, 536, 547, 551, + 553, 555, 561, 564, 535, 537, 548, 552, 554, 556, + 562, 565, 524, 526, 528, 530, 543, 542, 539, 567, + 568, 545, 550, 529, 541, 546, 559, 566, 563, 523, + 527, 531, 540, 558, 557, 538, 549, 560, 544, 532, + 525, 533, 0, 198, 222, 366, 0, 451, 289, 639, + 608, 603, 207, 224, 0, 263, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 200, 202, + 210, 223, 233, 237, 244, 262, 277, 279, 286, 299, + 310, 318, 319, 322, 328, 378, 384, 385, 386, 387, + 406, 407, 408, 411, 414, 415, 418, 420, 421, 424, + 428, 432, 433, 434, 436, 438, 440, 452, 457, 471, + 472, 473, 474, 475, 478, 479, 484, 485, 486, 487, + 488, 496, 497, 510, 580, 582, 597, 615, 621, 477, + 301, 302, 441, 442, 314, 315, 635, 636, 300, 592, + 622, 590, 634, 616, 435, 376, 0, 0, 379, 282, + 305, 320, 0, 607, 498, 228, 463, 291, 252, 0, + 0, 212, 247, 231, 260, 275, 278, 324, 389, 397, + 426, 431, 297, 272, 245, 456, 242, 481, 513, 514, + 515, 517, 393, 267, 430, 394, 0, 374, 570, 571, + 316, 522, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 413, 0, 1873, 0, 0, 0, 0, + 0, 0, 271, 0, 0, 0, 0, 364, 268, 0, + 0, 427, 0, 205, 0, 483, 253, 375, 372, 577, + 283, 274, 270, 251, 317, 383, 425, 512, 419, 0, + 368, 0, 0, 493, 398, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 323, 249, 325, 204, 410, 494, 287, 0, 0, 0, + 0, 0, 711, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 239, 0, 0, 246, 0, 0, 0, 349, + 358, 357, 338, 339, 341, 343, 348, 355, 361, 0, + 0, 0, 0, 0, 266, 321, 273, 265, 574, 0, + 0, 0, 0, 0, 0, 0, 0, 230, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 276, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 298, 0, 399, 258, 0, 450, + 0, 0, 0, 618, 0, 0, 0, 0, 0, 0, + 0, 363, 0, 330, 199, 226, 0, 0, 409, 458, + 470, 0, 0, 0, 254, 0, 468, 423, 596, 234, + 285, 455, 429, 466, 437, 288, 0, 0, 467, 370, + 579, 447, 593, 619, 620, 264, 403, 605, 516, 613, + 637, 227, 261, 417, 501, 599, 490, 395, 575, 576, + 329, 489, 296, 203, 367, 625, 225, 476, 369, 243, + 232, 581, 602, 290, 453, 632, 214, 511, 591, 240, + 480, 0, 0, 640, 248, 500, 216, 588, 499, 391, + 326, 327, 215, 0, 454, 269, 294, 0, 0, 259, + 412, 583, 584, 257, 641, 229, 612, 221, 0, 611, + 405, 578, 589, 392, 381, 220, 587, 390, 380, 334, + 353, 354, 281, 307, 444, 373, 445, 306, 308, 401, + 400, 402, 208, 600, 0, 209, 0, 495, 601, 642, + 449, 213, 235, 236, 238, 0, 280, 284, 292, 295, + 303, 304, 313, 365, 416, 443, 439, 448, 0, 573, + 594, 606, 617, 623, 624, 626, 627, 628, 629, 630, + 633, 631, 404, 311, 491, 333, 371, 0, 0, 422, + 469, 241, 598, 492, 201, 0, 0, 0, 0, 255, + 256, 0, 569, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 643, 644, 645, 646, 647, 648, 649, 650, + 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, + 638, 502, 508, 503, 504, 505, 506, 507, 0, 509, + 0, 0, 0, 0, 0, 0, 585, 586, 661, 382, + 482, 595, 335, 347, 350, 340, 359, 0, 360, 336, + 337, 342, 344, 345, 346, 351, 352, 356, 362, 250, + 211, 388, 396, 572, 312, 217, 218, 219, 518, 519, + 520, 521, 609, 610, 614, 206, 459, 460, 461, 462, + 293, 604, 309, 465, 464, 331, 332, 377, 446, 534, + 536, 547, 551, 553, 555, 561, 564, 535, 537, 548, + 552, 554, 556, 562, 565, 524, 526, 528, 530, 543, + 542, 539, 567, 568, 545, 550, 529, 541, 546, 559, + 566, 563, 523, 527, 531, 540, 558, 557, 538, 549, + 560, 544, 532, 525, 533, 0, 198, 222, 366, 0, + 451, 289, 639, 608, 603, 207, 224, 0, 263, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 200, 202, 210, 223, 233, 237, 244, 262, 277, + 279, 286, 299, 310, 318, 319, 322, 328, 378, 384, + 385, 386, 387, 406, 407, 408, 411, 414, 415, 418, + 420, 421, 424, 428, 432, 433, 434, 436, 438, 440, + 452, 457, 471, 472, 473, 474, 475, 478, 479, 484, + 485, 486, 487, 488, 496, 497, 510, 580, 582, 597, + 615, 621, 477, 301, 302, 441, 442, 314, 315, 635, + 636, 300, 592, 622, 590, 634, 616, 435, 376, 0, + 0, 379, 282, 305, 320, 0, 607, 498, 228, 463, + 291, 252, 0, 0, 212, 247, 231, 260, 275, 278, + 324, 389, 397, 426, 431, 297, 272, 245, 456, 242, + 481, 513, 514, 515, 517, 393, 267, 430, 394, 0, + 374, 570, 571, 316, 522, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 413, 0, 0, 0, + 0, 0, 0, 0, 0, 271, 0, 0, 0, 0, + 364, 268, 0, 0, 427, 0, 205, 0, 483, 253, + 375, 372, 577, 283, 274, 270, 251, 317, 383, 425, + 512, 419, 0, 368, 0, 0, 493, 398, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 323, 249, 325, 204, 410, 494, 287, + 0, 1848, 0, 0, 0, 711, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 239, 0, 0, 246, 0, + 0, 0, 349, 358, 357, 338, 339, 341, 343, 348, + 355, 361, 0, 0, 0, 0, 0, 266, 321, 273, + 265, 574, 0, 0, 0, 0, 0, 0, 0, 0, + 230, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 276, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 298, 0, 399, + 258, 0, 450, 0, 0, 0, 618, 0, 0, 0, + 0, 0, 0, 0, 363, 0, 330, 199, 226, 0, + 0, 409, 458, 470, 0, 0, 0, 254, 0, 468, + 423, 596, 234, 285, 455, 429, 466, 437, 288, 0, + 0, 467, 370, 579, 447, 593, 619, 620, 264, 403, + 605, 516, 613, 637, 227, 261, 417, 501, 599, 490, + 395, 575, 576, 329, 489, 296, 203, 367, 625, 225, + 476, 369, 243, 232, 581, 602, 290, 453, 632, 214, + 511, 591, 240, 480, 0, 0, 640, 248, 500, 216, + 588, 499, 391, 326, 327, 215, 0, 454, 269, 294, + 0, 0, 259, 412, 583, 584, 257, 641, 229, 612, + 221, 0, 611, 405, 578, 589, 392, 381, 220, 587, + 390, 380, 334, 353, 354, 281, 307, 444, 373, 445, + 306, 308, 401, 400, 402, 208, 600, 0, 209, 0, + 495, 601, 642, 449, 213, 235, 236, 238, 0, 280, + 284, 292, 295, 303, 304, 313, 365, 416, 443, 439, + 448, 0, 573, 594, 606, 617, 623, 624, 626, 627, + 628, 629, 630, 633, 631, 404, 311, 491, 333, 371, + 0, 0, 422, 469, 241, 598, 492, 201, 0, 0, + 0, 0, 255, 256, 0, 569, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 643, 644, 645, 646, 647, + 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, + 658, 659, 660, 638, 502, 508, 503, 504, 505, 506, + 507, 0, 509, 0, 0, 0, 0, 0, 0, 585, + 586, 661, 382, 482, 595, 335, 347, 350, 340, 359, + 0, 360, 336, 337, 342, 344, 345, 346, 351, 352, + 356, 362, 250, 211, 388, 396, 572, 312, 217, 218, + 219, 518, 519, 520, 521, 609, 610, 614, 206, 459, + 460, 461, 462, 293, 604, 309, 465, 464, 331, 332, + 377, 446, 534, 536, 547, 551, 553, 555, 561, 564, + 535, 537, 548, 552, 554, 556, 562, 565, 524, 526, + 528, 530, 543, 542, 539, 567, 568, 545, 550, 529, + 541, 546, 559, 566, 563, 523, 527, 531, 540, 558, + 557, 538, 549, 560, 544, 532, 525, 533, 0, 198, + 222, 366, 0, 451, 289, 639, 608, 603, 207, 224, + 0, 263, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 200, 202, 210, 223, 233, 237, + 244, 262, 277, 279, 286, 299, 310, 318, 319, 322, + 328, 378, 384, 385, 386, 387, 406, 407, 408, 411, + 414, 415, 418, 420, 421, 424, 428, 432, 433, 434, + 436, 438, 440, 452, 457, 471, 472, 473, 474, 475, + 478, 479, 484, 485, 486, 487, 488, 496, 497, 510, + 580, 582, 597, 615, 621, 477, 301, 302, 441, 442, + 314, 315, 635, 636, 300, 592, 622, 590, 634, 616, + 435, 376, 0, 0, 379, 282, 305, 320, 0, 607, + 498, 228, 463, 291, 252, 0, 0, 212, 247, 231, + 260, 275, 278, 324, 389, 397, 426, 431, 297, 272, + 245, 456, 242, 481, 513, 514, 515, 517, 393, 267, + 430, 394, 0, 374, 570, 571, 316, 522, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 413, + 0, 0, 0, 0, 0, 0, 0, 1746, 271, 0, + 0, 0, 0, 364, 268, 0, 0, 427, 0, 205, + 0, 483, 253, 375, 372, 577, 283, 274, 270, 251, + 317, 383, 425, 512, 419, 0, 368, 0, 0, 493, + 398, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 323, 249, 325, 204, + 410, 494, 287, 0, 0, 0, 0, 0, 196, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 239, 0, + 0, 246, 0, 0, 0, 349, 358, 357, 338, 339, + 341, 343, 348, 355, 361, 0, 0, 0, 0, 0, + 266, 321, 273, 265, 574, 0, 0, 0, 0, 0, + 0, 0, 0, 230, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 276, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 298, 0, 399, 258, 0, 450, 0, 0, 0, 618, + 0, 0, 0, 0, 0, 0, 0, 363, 0, 330, + 199, 226, 0, 0, 409, 458, 470, 0, 0, 0, + 254, 0, 468, 423, 596, 234, 285, 455, 429, 466, + 437, 288, 0, 0, 467, 370, 579, 447, 593, 619, + 620, 264, 403, 605, 516, 613, 637, 227, 261, 417, + 501, 599, 490, 395, 575, 576, 329, 489, 296, 203, + 367, 625, 225, 476, 369, 243, 232, 581, 602, 290, + 453, 632, 214, 511, 591, 240, 480, 0, 0, 640, + 248, 500, 216, 588, 499, 391, 326, 327, 215, 0, + 454, 269, 294, 0, 0, 259, 412, 583, 584, 257, + 641, 229, 612, 221, 0, 611, 405, 578, 589, 392, + 381, 220, 587, 390, 380, 334, 353, 354, 281, 307, + 444, 373, 445, 306, 308, 401, 400, 402, 208, 600, + 0, 209, 0, 495, 601, 642, 449, 213, 235, 236, + 238, 0, 280, 284, 292, 295, 303, 304, 313, 365, + 416, 443, 439, 448, 0, 573, 594, 606, 617, 623, + 624, 626, 627, 628, 629, 630, 633, 631, 404, 311, + 491, 333, 371, 0, 0, 422, 469, 241, 598, 492, + 201, 0, 0, 0, 0, 255, 256, 0, 569, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 643, 644, + 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, + 655, 656, 657, 658, 659, 660, 638, 502, 508, 503, + 504, 505, 506, 507, 0, 509, 0, 0, 0, 0, + 0, 0, 585, 586, 661, 382, 482, 595, 335, 347, + 350, 340, 359, 0, 360, 336, 337, 342, 344, 345, + 346, 351, 352, 356, 362, 250, 211, 388, 396, 572, + 312, 217, 218, 219, 518, 519, 520, 521, 609, 610, + 614, 206, 459, 460, 461, 462, 293, 604, 309, 465, + 464, 331, 332, 377, 446, 534, 536, 547, 551, 553, + 555, 561, 564, 535, 537, 548, 552, 554, 556, 562, + 565, 524, 526, 528, 530, 543, 542, 539, 567, 568, + 545, 550, 529, 541, 546, 559, 566, 563, 523, 527, + 531, 540, 558, 557, 538, 549, 560, 544, 532, 525, + 533, 0, 198, 222, 366, 0, 451, 289, 639, 608, + 603, 207, 224, 0, 263, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 200, 202, 210, + 223, 233, 237, 244, 262, 277, 279, 286, 299, 310, + 318, 319, 322, 328, 378, 384, 385, 386, 387, 406, + 407, 408, 411, 414, 415, 418, 420, 421, 424, 428, + 432, 433, 434, 436, 438, 440, 452, 457, 471, 472, + 473, 474, 475, 478, 479, 484, 485, 486, 487, 488, + 496, 497, 510, 580, 582, 597, 615, 621, 477, 301, + 302, 441, 442, 314, 315, 635, 636, 300, 592, 622, + 590, 634, 616, 435, 376, 0, 0, 379, 282, 305, + 320, 0, 607, 498, 228, 463, 291, 252, 0, 0, + 212, 247, 231, 260, 275, 278, 324, 389, 397, 426, + 431, 297, 272, 245, 456, 242, 481, 513, 514, 515, + 517, 393, 267, 430, 394, 0, 374, 570, 571, 316, + 522, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 413, 0, 0, 0, 0, 0, 0, 0, + 0, 271, 0, 0, 0, 0, 364, 268, 0, 0, + 427, 0, 205, 0, 483, 253, 375, 372, 577, 283, + 274, 270, 251, 317, 383, 425, 512, 419, 0, 368, + 0, 0, 493, 398, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 323, + 249, 325, 204, 410, 494, 287, 0, 96, 0, 0, + 0, 943, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 239, 0, 0, 246, 0, 0, 0, 349, 358, + 357, 338, 339, 341, 343, 348, 355, 361, 0, 0, + 0, 0, 0, 266, 321, 273, 265, 574, 0, 0, + 0, 0, 0, 0, 0, 0, 230, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 276, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 298, 0, 399, 258, 0, 450, 0, + 0, 0, 618, 0, 0, 0, 0, 0, 0, 0, + 363, 0, 330, 199, 226, 0, 0, 409, 458, 470, + 0, 0, 0, 254, 0, 468, 423, 596, 234, 285, + 455, 429, 466, 437, 288, 0, 0, 467, 370, 579, + 447, 593, 619, 620, 264, 403, 605, 516, 613, 637, + 227, 261, 417, 501, 599, 490, 395, 575, 576, 329, + 489, 296, 203, 367, 625, 225, 476, 369, 243, 232, + 581, 602, 290, 453, 632, 214, 511, 591, 240, 480, + 0, 0, 640, 248, 500, 216, 588, 499, 391, 326, + 327, 215, 0, 454, 269, 294, 0, 0, 259, 412, + 583, 584, 257, 641, 229, 612, 221, 0, 611, 405, + 578, 589, 392, 381, 220, 587, 390, 380, 334, 353, + 354, 281, 307, 444, 373, 445, 306, 308, 401, 400, + 402, 208, 600, 0, 209, 0, 495, 601, 642, 449, + 213, 235, 236, 238, 0, 280, 284, 292, 295, 303, + 304, 313, 365, 416, 443, 439, 448, 0, 573, 594, + 606, 617, 623, 624, 626, 627, 628, 629, 630, 633, + 631, 404, 311, 491, 333, 371, 0, 0, 422, 469, + 241, 598, 492, 201, 0, 0, 0, 0, 255, 256, + 0, 569, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 643, 644, 645, 646, 647, 648, 649, 650, 651, + 652, 653, 654, 655, 656, 657, 658, 659, 660, 638, + 502, 508, 503, 504, 505, 506, 507, 0, 509, 0, + 0, 0, 0, 0, 0, 585, 586, 661, 382, 482, + 595, 335, 347, 350, 340, 359, 0, 360, 336, 337, + 342, 344, 345, 346, 351, 352, 356, 362, 250, 211, + 388, 396, 572, 312, 217, 218, 219, 518, 519, 520, + 521, 609, 610, 614, 206, 459, 460, 461, 462, 293, + 604, 309, 465, 464, 331, 332, 377, 446, 534, 536, + 547, 551, 553, 555, 561, 564, 535, 537, 548, 552, + 554, 556, 562, 565, 524, 526, 528, 530, 543, 542, + 539, 567, 568, 545, 550, 529, 541, 546, 559, 566, + 563, 523, 527, 531, 540, 558, 557, 538, 549, 560, + 544, 532, 525, 533, 0, 198, 222, 366, 0, 451, + 289, 639, 608, 603, 207, 224, 0, 263, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 200, 202, 210, 223, 233, 237, 244, 262, 277, 279, + 286, 299, 310, 318, 319, 322, 328, 378, 384, 385, + 386, 387, 406, 407, 408, 411, 414, 415, 418, 420, + 421, 424, 428, 432, 433, 434, 436, 438, 440, 452, + 457, 471, 472, 473, 474, 475, 478, 479, 484, 485, + 486, 487, 488, 496, 497, 510, 580, 582, 597, 615, + 621, 477, 301, 302, 441, 442, 314, 315, 635, 636, + 300, 592, 622, 590, 634, 616, 435, 376, 0, 0, + 379, 282, 305, 320, 0, 607, 498, 228, 463, 291, + 252, 0, 0, 212, 247, 231, 260, 275, 278, 324, + 389, 397, 426, 431, 297, 272, 245, 456, 242, 481, + 513, 514, 515, 517, 393, 267, 430, 394, 0, 374, + 570, 571, 316, 522, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 413, 0, 0, 0, 0, + 0, 0, 0, 0, 271, 0, 0, 0, 0, 364, + 268, 0, 0, 427, 0, 205, 0, 483, 253, 375, + 372, 577, 283, 274, 270, 251, 317, 383, 425, 512, + 419, 0, 368, 0, 0, 493, 398, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 323, 249, 325, 204, 410, 494, 287, 0, + 0, 0, 0, 0, 196, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 239, 0, 0, 246, 0, 0, + 0, 349, 358, 357, 338, 339, 341, 343, 348, 355, + 361, 0, 0, 0, 0, 0, 266, 321, 273, 265, + 574, 0, 0, 0, 0, 0, 0, 0, 0, 230, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 276, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 1434, 0, 298, 0, 399, 258, + 0, 450, 0, 0, 0, 618, 0, 0, 0, 0, + 0, 0, 0, 363, 0, 330, 199, 226, 0, 0, + 409, 458, 470, 0, 0, 0, 254, 0, 468, 423, + 596, 234, 285, 455, 429, 466, 437, 288, 0, 0, + 467, 370, 579, 447, 593, 619, 620, 264, 403, 605, + 516, 613, 637, 227, 261, 417, 501, 599, 490, 395, + 575, 576, 329, 489, 296, 203, 367, 625, 225, 476, + 369, 243, 232, 581, 602, 290, 453, 632, 214, 511, + 591, 240, 480, 0, 0, 640, 248, 500, 216, 588, + 499, 391, 326, 327, 215, 0, 454, 269, 294, 0, + 0, 259, 412, 583, 584, 257, 641, 229, 612, 221, + 0, 611, 405, 578, 589, 392, 381, 220, 587, 390, + 380, 334, 353, 354, 281, 307, 444, 373, 445, 306, + 308, 401, 400, 402, 208, 600, 0, 209, 0, 495, + 601, 642, 449, 213, 235, 236, 238, 0, 280, 284, + 292, 295, 303, 304, 313, 365, 416, 443, 439, 448, + 0, 573, 594, 606, 617, 623, 624, 626, 627, 628, + 629, 630, 633, 631, 404, 311, 491, 333, 371, 0, + 0, 422, 469, 241, 598, 492, 201, 0, 0, 0, + 0, 255, 256, 0, 569, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 643, 644, 645, 646, 647, 648, + 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, + 659, 660, 638, 502, 508, 503, 504, 505, 506, 507, + 0, 509, 0, 0, 0, 0, 0, 0, 585, 586, + 661, 382, 482, 595, 335, 347, 350, 340, 359, 0, + 360, 336, 337, 342, 344, 345, 346, 351, 352, 356, + 362, 250, 211, 388, 396, 572, 312, 217, 218, 219, + 518, 519, 520, 521, 609, 610, 614, 206, 459, 460, + 461, 462, 293, 604, 309, 465, 464, 331, 332, 377, + 446, 534, 536, 547, 551, 553, 555, 561, 564, 535, + 537, 548, 552, 554, 556, 562, 565, 524, 526, 528, + 530, 543, 542, 539, 567, 568, 545, 550, 529, 541, + 546, 559, 566, 563, 523, 527, 531, 540, 558, 557, + 538, 549, 560, 544, 532, 525, 533, 0, 198, 222, + 366, 0, 451, 289, 639, 608, 603, 207, 224, 0, + 263, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 200, 202, 210, 223, 233, 237, 244, + 262, 277, 279, 286, 299, 310, 318, 319, 322, 328, + 378, 384, 385, 386, 387, 406, 407, 408, 411, 414, + 415, 418, 420, 421, 424, 428, 432, 433, 434, 436, + 438, 440, 452, 457, 471, 472, 473, 474, 475, 478, + 479, 484, 485, 486, 487, 488, 496, 497, 510, 580, + 582, 597, 615, 621, 477, 301, 302, 441, 442, 314, + 315, 635, 636, 1433, 592, 622, 590, 634, 616, 435, + 376, 0, 0, 379, 282, 305, 320, 0, 607, 498, + 228, 463, 291, 252, 0, 0, 212, 247, 231, 260, + 275, 278, 324, 389, 397, 426, 431, 297, 272, 245, + 456, 242, 481, 513, 514, 515, 517, 393, 267, 430, + 394, 0, 374, 570, 571, 316, 522, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 413, 0, + 0, 0, 0, 0, 0, 0, 0, 271, 0, 0, + 0, 0, 364, 268, 0, 0, 427, 0, 205, 0, + 483, 253, 375, 372, 577, 283, 274, 270, 251, 317, + 383, 425, 512, 419, 0, 368, 0, 0, 493, 398, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 323, 249, 325, 204, 410, + 494, 287, 0, 0, 0, 0, 0, 196, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 239, 0, 0, + 246, 0, 0, 0, 349, 358, 357, 338, 339, 341, + 343, 348, 355, 361, 0, 0, 0, 0, 0, 266, + 321, 273, 265, 574, 0, 0, 0, 0, 0, 0, + 0, 0, 230, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 276, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 298, + 0, 399, 258, 0, 450, 0, 0, 0, 618, 0, + 0, 0, 0, 0, 0, 0, 363, 0, 330, 199, + 226, 0, 0, 409, 458, 470, 0, 0, 0, 254, + 0, 468, 423, 596, 234, 285, 455, 429, 466, 437, + 288, 0, 0, 467, 370, 579, 447, 593, 619, 620, + 264, 403, 605, 516, 613, 637, 227, 261, 417, 501, + 599, 490, 395, 575, 576, 329, 489, 296, 203, 367, + 625, 225, 476, 369, 243, 232, 581, 602, 290, 453, + 632, 214, 511, 591, 240, 480, 0, 0, 640, 248, + 500, 216, 588, 499, 391, 326, 327, 215, 0, 454, + 269, 294, 0, 0, 259, 412, 583, 584, 257, 641, + 229, 612, 221, 0, 611, 405, 578, 589, 392, 381, + 220, 587, 390, 380, 334, 353, 354, 281, 307, 444, + 373, 445, 306, 308, 401, 400, 402, 208, 600, 0, + 209, 0, 495, 601, 642, 449, 213, 235, 236, 238, + 0, 280, 284, 292, 295, 303, 304, 313, 365, 416, + 443, 439, 448, 0, 573, 594, 606, 617, 623, 624, + 626, 627, 628, 629, 630, 633, 631, 404, 311, 491, + 333, 371, 0, 0, 422, 469, 241, 598, 492, 201, + 0, 0, 0, 0, 255, 256, 0, 569, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 643, 644, 645, + 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, + 656, 657, 658, 659, 660, 638, 502, 508, 503, 504, + 505, 506, 507, 0, 509, 0, 0, 0, 0, 0, + 0, 585, 586, 661, 382, 482, 595, 335, 347, 350, + 340, 359, 0, 360, 336, 337, 342, 344, 345, 346, + 351, 352, 356, 362, 250, 211, 388, 396, 572, 312, + 217, 218, 219, 518, 519, 520, 521, 609, 610, 614, + 206, 459, 460, 461, 462, 293, 604, 309, 465, 464, + 331, 332, 377, 446, 534, 536, 547, 551, 553, 555, + 561, 564, 535, 537, 548, 552, 554, 556, 562, 565, + 524, 526, 528, 530, 543, 542, 539, 567, 568, 545, + 550, 529, 541, 546, 559, 566, 563, 523, 527, 531, + 540, 558, 557, 538, 549, 560, 544, 532, 525, 533, + 0, 198, 222, 366, 0, 451, 289, 639, 608, 603, + 207, 224, 0, 263, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1031, 0, 0, 0, 200, 202, 210, 223, + 233, 237, 244, 262, 277, 279, 286, 299, 310, 318, + 319, 322, 328, 378, 384, 385, 386, 387, 406, 407, + 408, 411, 414, 415, 418, 420, 421, 424, 428, 432, + 433, 434, 436, 438, 440, 452, 457, 471, 472, 473, + 474, 475, 478, 479, 484, 485, 486, 487, 488, 496, + 497, 510, 580, 582, 597, 615, 621, 477, 301, 302, + 441, 442, 314, 315, 635, 636, 300, 592, 622, 590, + 634, 616, 435, 376, 0, 0, 379, 282, 305, 320, + 0, 607, 498, 228, 463, 291, 252, 0, 0, 212, + 247, 231, 260, 275, 278, 324, 389, 397, 426, 431, + 297, 272, 245, 456, 242, 481, 513, 514, 515, 517, + 393, 267, 430, 394, 0, 374, 570, 571, 316, 522, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 413, 0, 0, 0, 0, 0, 0, 0, 0, + 271, 0, 0, 0, 0, 364, 268, 0, 0, 427, + 0, 205, 0, 483, 253, 375, 372, 577, 283, 274, + 270, 251, 317, 383, 425, 512, 419, 0, 368, 0, + 0, 493, 398, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 323, 249, + 325, 204, 410, 494, 287, 0, 0, 0, 0, 0, + 196, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 239, 0, 0, 246, 0, 0, 0, 349, 358, 357, + 338, 339, 341, 343, 348, 355, 361, 0, 0, 0, + 0, 0, 266, 321, 273, 265, 574, 0, 0, 0, + 0, 0, 0, 0, 0, 230, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 276, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 298, 0, 399, 258, 0, 450, 0, 664, + 0, 618, 0, 0, 0, 0, 0, 0, 0, 363, + 0, 330, 199, 226, 0, 0, 409, 458, 470, 0, + 0, 0, 254, 0, 468, 423, 596, 234, 285, 455, + 429, 466, 437, 288, 0, 0, 467, 370, 579, 447, + 593, 619, 620, 264, 403, 605, 516, 613, 637, 227, + 261, 417, 501, 599, 490, 395, 575, 576, 329, 489, + 296, 203, 367, 625, 225, 476, 369, 243, 232, 581, + 602, 290, 453, 632, 214, 511, 591, 240, 480, 0, + 0, 640, 248, 500, 216, 588, 499, 391, 326, 327, + 215, 0, 454, 269, 294, 0, 0, 259, 412, 583, + 584, 257, 641, 229, 612, 221, 0, 611, 405, 578, + 589, 392, 381, 220, 587, 390, 380, 334, 353, 354, + 281, 307, 444, 373, 445, 306, 308, 401, 400, 402, + 208, 600, 0, 209, 0, 495, 601, 642, 449, 213, + 235, 236, 238, 0, 280, 284, 292, 295, 303, 304, + 313, 365, 416, 443, 439, 448, 0, 573, 594, 606, + 617, 623, 624, 626, 627, 628, 629, 630, 633, 631, + 404, 311, 491, 333, 371, 0, 0, 422, 469, 241, + 598, 492, 201, 0, 0, 0, 0, 255, 256, 0, + 569, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, + 653, 654, 655, 656, 657, 658, 659, 660, 638, 502, + 508, 503, 504, 505, 506, 507, 0, 509, 0, 0, + 0, 0, 0, 0, 585, 586, 661, 382, 482, 595, + 335, 347, 350, 340, 359, 0, 360, 336, 337, 342, + 344, 345, 346, 351, 352, 356, 362, 250, 211, 388, + 396, 572, 312, 217, 218, 219, 518, 519, 520, 521, + 609, 610, 614, 206, 459, 460, 461, 462, 293, 604, + 309, 465, 464, 331, 332, 377, 446, 534, 536, 547, + 551, 553, 555, 561, 564, 535, 537, 548, 552, 554, + 556, 562, 565, 524, 526, 528, 530, 543, 542, 539, + 567, 568, 545, 550, 529, 541, 546, 559, 566, 563, + 523, 527, 531, 540, 558, 557, 538, 549, 560, 544, + 532, 525, 533, 0, 198, 222, 366, 0, 451, 289, + 639, 608, 603, 207, 224, 0, 263, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 200, + 202, 210, 223, 233, 237, 244, 262, 277, 279, 286, + 299, 310, 318, 319, 322, 328, 378, 384, 385, 386, + 387, 406, 407, 408, 411, 414, 415, 418, 420, 421, + 424, 428, 432, 433, 434, 436, 438, 440, 452, 457, + 471, 472, 473, 474, 475, 478, 479, 484, 485, 486, + 487, 488, 496, 497, 510, 580, 582, 597, 615, 621, + 477, 301, 302, 441, 442, 314, 315, 635, 636, 300, + 592, 622, 590, 634, 616, 435, 376, 0, 0, 379, + 282, 305, 320, 0, 607, 498, 228, 463, 291, 252, + 0, 0, 212, 247, 231, 260, 275, 278, 324, 389, + 397, 426, 431, 297, 272, 245, 456, 242, 481, 513, + 514, 515, 517, 393, 267, 430, 394, 0, 374, 570, + 571, 316, 522, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 413, 0, 0, 0, 0, 0, + 0, 0, 0, 271, 0, 0, 0, 0, 364, 268, + 0, 0, 427, 0, 205, 0, 483, 253, 375, 372, + 577, 283, 274, 270, 251, 317, 383, 425, 512, 419, + 0, 368, 0, 0, 493, 398, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 323, 249, 325, 204, 410, 494, 287, 0, 0, + 0, 0, 0, 711, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 239, 0, 0, 246, 0, 0, 0, + 349, 358, 357, 338, 339, 341, 343, 348, 355, 361, + 0, 0, 0, 0, 0, 266, 321, 273, 265, 574, + 0, 0, 0, 0, 0, 0, 0, 0, 230, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 276, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 298, 0, 399, 258, 0, + 450, 0, 0, 0, 618, 0, 0, 0, 0, 0, + 0, 0, 363, 0, 330, 199, 226, 0, 0, 409, + 458, 470, 0, 0, 0, 254, 0, 468, 423, 596, + 234, 285, 455, 429, 466, 437, 288, 0, 0, 467, + 370, 579, 447, 593, 619, 620, 264, 403, 605, 516, + 613, 637, 227, 261, 417, 501, 599, 490, 395, 575, + 576, 329, 489, 296, 203, 367, 625, 225, 476, 369, + 243, 232, 581, 602, 290, 453, 632, 214, 511, 591, + 240, 480, 0, 0, 640, 248, 500, 216, 588, 499, + 391, 326, 327, 215, 0, 454, 269, 294, 0, 0, + 259, 412, 583, 584, 257, 641, 229, 612, 221, 0, + 611, 405, 578, 589, 392, 381, 220, 587, 390, 380, + 334, 353, 354, 281, 307, 444, 373, 445, 306, 308, + 401, 400, 402, 208, 600, 0, 209, 0, 495, 601, + 642, 449, 213, 235, 236, 238, 0, 280, 284, 292, + 295, 303, 304, 313, 365, 416, 443, 439, 448, 0, + 573, 594, 606, 617, 623, 624, 626, 627, 628, 629, + 630, 633, 631, 404, 311, 491, 333, 371, 0, 0, + 422, 469, 241, 598, 492, 201, 0, 0, 0, 0, + 255, 256, 0, 569, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 643, 644, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, + 660, 638, 502, 508, 503, 504, 505, 506, 507, 0, + 509, 0, 0, 0, 0, 0, 0, 585, 586, 661, + 382, 482, 595, 335, 347, 350, 340, 359, 0, 360, + 336, 337, 342, 344, 345, 346, 351, 352, 356, 362, + 250, 211, 388, 396, 572, 312, 217, 218, 219, 518, + 519, 520, 521, 609, 610, 614, 206, 459, 460, 461, + 462, 293, 604, 309, 465, 464, 331, 332, 377, 446, + 534, 536, 547, 551, 553, 555, 561, 564, 535, 537, + 548, 552, 554, 556, 562, 565, 524, 526, 528, 530, + 543, 542, 539, 567, 568, 545, 550, 529, 541, 546, + 559, 566, 563, 523, 527, 531, 540, 558, 557, 538, + 549, 560, 544, 532, 525, 533, 0, 198, 222, 366, + 0, 451, 289, 639, 608, 603, 207, 224, 0, 263, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 200, 202, 210, 223, 233, 237, 244, 262, + 277, 279, 286, 299, 310, 318, 319, 322, 328, 378, + 384, 385, 386, 387, 4059, 407, 408, 411, 414, 415, + 418, 420, 421, 424, 428, 432, 433, 434, 436, 438, + 440, 452, 457, 471, 472, 473, 474, 475, 478, 479, + 484, 485, 486, 487, 488, 496, 497, 510, 580, 582, + 597, 615, 621, 477, 301, 302, 441, 442, 314, 315, + 635, 636, 300, 592, 622, 590, 634, 616, 435, 376, + 0, 0, 379, 282, 305, 320, 0, 607, 498, 228, + 463, 291, 252, 0, 0, 212, 247, 231, 260, 275, + 278, 324, 389, 397, 426, 431, 297, 272, 245, 456, + 242, 481, 513, 514, 515, 517, 393, 267, 430, 394, + 0, 374, 570, 571, 316, 522, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 413, 0, 0, + 0, 0, 0, 0, 0, 0, 271, 0, 0, 0, + 0, 364, 268, 0, 0, 427, 0, 205, 0, 483, + 253, 375, 372, 577, 283, 274, 270, 251, 317, 383, + 425, 512, 419, 0, 368, 0, 0, 493, 398, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 323, 249, 325, 204, 410, 494, + 287, 0, 0, 0, 0, 0, 711, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 239, 0, 0, 246, + 0, 0, 0, 349, 358, 357, 338, 339, 341, 343, + 348, 355, 361, 0, 0, 0, 0, 0, 266, 321, + 273, 265, 574, 0, 0, 0, 0, 0, 0, 0, + 0, 230, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 276, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 298, 0, + 399, 258, 0, 450, 0, 0, 0, 618, 0, 0, + 0, 0, 0, 0, 0, 363, 0, 330, 199, 226, + 0, 0, 409, 458, 470, 0, 0, 0, 254, 0, + 468, 423, 596, 234, 285, 455, 429, 466, 437, 288, + 0, 0, 467, 370, 579, 447, 593, 619, 620, 264, + 403, 605, 516, 613, 637, 227, 261, 417, 501, 599, + 490, 395, 575, 576, 329, 489, 296, 203, 367, 625, + 225, 476, 369, 243, 232, 581, 602, 290, 453, 632, + 214, 511, 591, 240, 480, 0, 0, 640, 248, 500, + 216, 588, 499, 391, 326, 327, 215, 0, 454, 269, + 294, 0, 0, 259, 412, 583, 584, 257, 641, 229, + 612, 221, 0, 611, 405, 578, 589, 392, 381, 220, + 587, 390, 380, 334, 353, 354, 281, 307, 444, 373, + 445, 306, 308, 401, 400, 402, 208, 600, 0, 209, + 0, 495, 601, 642, 449, 213, 235, 236, 238, 0, + 280, 284, 292, 295, 303, 304, 313, 365, 416, 443, + 439, 448, 0, 573, 594, 606, 617, 623, 624, 626, + 627, 628, 629, 630, 633, 631, 404, 311, 491, 333, + 371, 0, 0, 422, 469, 241, 598, 492, 201, 0, + 0, 0, 0, 255, 256, 0, 569, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 643, 644, 645, 646, + 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, + 657, 658, 659, 660, 638, 502, 508, 503, 504, 505, + 506, 507, 0, 509, 0, 0, 0, 0, 0, 0, + 585, 586, 661, 382, 482, 595, 335, 347, 350, 340, + 359, 0, 360, 336, 337, 342, 344, 345, 346, 351, + 352, 356, 362, 250, 211, 388, 396, 572, 312, 217, + 218, 219, 518, 519, 520, 521, 609, 610, 614, 206, + 459, 460, 461, 462, 293, 604, 309, 465, 464, 331, + 332, 377, 446, 534, 536, 547, 551, 553, 555, 561, + 564, 535, 537, 548, 552, 554, 556, 562, 565, 524, + 526, 528, 530, 543, 542, 539, 567, 568, 545, 550, + 529, 541, 546, 559, 566, 563, 523, 527, 531, 540, + 558, 557, 538, 549, 560, 544, 532, 525, 533, 0, + 198, 222, 366, 0, 451, 289, 639, 608, 603, 207, + 224, 0, 263, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 200, 202, 210, 223, 233, + 237, 244, 262, 277, 279, 286, 299, 310, 318, 319, + 322, 328, 378, 384, 385, 386, 387, 406, 407, 408, + 411, 414, 415, 418, 420, 421, 424, 428, 432, 433, + 434, 436, 438, 440, 452, 457, 471, 472, 473, 474, + 475, 478, 479, 484, 485, 486, 487, 488, 496, 497, + 510, 580, 582, 597, 615, 621, 477, 301, 302, 441, + 442, 314, 315, 635, 636, 300, 592, 622, 590, 634, + 616, 435, 376, 0, 0, 379, 282, 305, 320, 0, + 607, 498, 228, 463, 291, 252, 0, 0, 212, 247, + 231, 260, 275, 278, 324, 389, 397, 426, 431, 297, + 272, 245, 456, 242, 481, 513, 514, 515, 517, 393, + 267, 430, 394, 0, 374, 570, 571, 316, 522, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 413, 0, 0, 0, 0, 0, 0, 0, 0, 271, + 0, 0, 0, 0, 364, 268, 0, 0, 427, 0, + 205, 0, 483, 253, 375, 372, 577, 283, 274, 270, + 251, 317, 383, 425, 512, 419, 0, 368, 0, 0, + 493, 398, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 323, 249, 325, + 204, 410, 494, 287, 0, 0, 0, 0, 0, 943, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 239, + 0, 0, 246, 0, 0, 0, 349, 358, 357, 338, + 339, 341, 343, 348, 355, 361, 0, 0, 0, 0, + 0, 266, 321, 273, 265, 574, 0, 0, 0, 0, + 0, 0, 0, 0, 230, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 276, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 298, 0, 399, 258, 0, 450, 0, 0, 0, + 618, 0, 0, 0, 0, 0, 0, 0, 363, 0, + 330, 199, 226, 0, 0, 409, 458, 470, 0, 0, + 0, 254, 0, 468, 423, 596, 234, 285, 455, 429, + 466, 437, 288, 0, 0, 467, 370, 579, 447, 593, + 619, 620, 264, 403, 605, 516, 613, 637, 227, 261, + 417, 501, 599, 490, 395, 575, 576, 329, 489, 296, + 203, 367, 625, 225, 476, 369, 243, 232, 581, 602, + 290, 453, 632, 214, 511, 591, 240, 480, 0, 0, + 640, 248, 500, 216, 588, 499, 391, 326, 327, 215, + 0, 454, 269, 294, 0, 0, 259, 412, 583, 584, + 257, 641, 229, 612, 221, 0, 611, 405, 578, 589, + 392, 381, 220, 587, 390, 380, 334, 353, 354, 281, + 307, 444, 373, 445, 306, 308, 401, 400, 402, 208, + 600, 0, 209, 0, 495, 601, 642, 449, 213, 235, + 236, 238, 0, 280, 284, 292, 295, 303, 304, 313, + 365, 416, 443, 439, 448, 0, 573, 594, 606, 617, + 623, 624, 626, 627, 628, 629, 630, 633, 631, 404, + 311, 491, 333, 371, 0, 0, 422, 469, 241, 598, + 492, 201, 0, 0, 0, 0, 255, 256, 0, 569, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 643, + 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, + 654, 655, 656, 657, 658, 659, 660, 638, 502, 508, + 503, 504, 505, 506, 507, 0, 509, 0, 0, 0, + 0, 0, 0, 585, 586, 661, 382, 482, 595, 335, + 347, 350, 340, 359, 0, 360, 336, 337, 342, 344, + 345, 346, 351, 352, 356, 362, 250, 211, 388, 396, + 572, 312, 217, 218, 219, 518, 519, 520, 521, 609, + 610, 614, 206, 459, 460, 461, 462, 293, 604, 309, + 465, 464, 331, 332, 377, 446, 534, 536, 547, 551, + 553, 555, 561, 564, 535, 537, 548, 552, 554, 556, + 562, 565, 524, 526, 528, 530, 543, 542, 539, 567, + 568, 545, 550, 529, 541, 546, 559, 566, 563, 523, + 527, 531, 540, 558, 557, 538, 549, 560, 544, 532, + 525, 533, 0, 198, 222, 366, 0, 451, 289, 639, + 608, 603, 207, 224, 0, 263, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 200, 202, + 210, 223, 233, 237, 244, 262, 277, 279, 286, 299, + 310, 318, 319, 322, 328, 378, 384, 385, 386, 387, + 406, 407, 408, 411, 414, 415, 418, 420, 421, 424, + 428, 432, 433, 434, 436, 438, 440, 452, 457, 471, + 472, 473, 474, 475, 478, 479, 484, 485, 486, 487, + 488, 496, 497, 510, 580, 582, 597, 615, 621, 477, + 301, 302, 441, 442, 314, 315, 635, 636, 300, 592, + 622, 590, 634, 616, 435, 376, 0, 0, 379, 282, + 305, 320, 0, 607, 498, 228, 463, 291, 252, 0, + 0, 212, 247, 231, 260, 275, 278, 324, 389, 397, + 426, 431, 297, 272, 245, 456, 242, 481, 513, 514, + 515, 517, 393, 267, 430, 394, 0, 374, 570, 571, + 316, 522, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 413, 0, 0, 0, 0, 0, 0, + 0, 0, 271, 0, 0, 0, 0, 364, 268, 0, + 0, 427, 0, 205, 0, 483, 253, 375, 372, 577, + 283, 274, 270, 251, 317, 383, 425, 512, 419, 0, + 368, 0, 0, 493, 398, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 323, 249, 325, 204, 410, 494, 287, 0, 0, 0, + 0, 0, 196, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 239, 0, 0, 246, 0, 0, 0, 349, + 358, 357, 338, 339, 341, 343, 348, 355, 361, 0, + 0, 0, 0, 0, 266, 321, 273, 265, 574, 0, + 0, 0, 0, 0, 0, 0, 0, 230, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 276, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 298, 0, 399, 258, 0, 450, + 0, 0, 0, 618, 0, 0, 0, 0, 0, 0, + 0, 363, 0, 330, 199, 226, 0, 0, 409, 458, + 470, 0, 0, 0, 254, 0, 468, 423, 596, 234, + 285, 455, 429, 466, 437, 288, 0, 0, 467, 370, + 579, 447, 593, 619, 620, 264, 403, 605, 516, 613, + 637, 227, 261, 417, 501, 599, 490, 395, 575, 576, + 329, 489, 296, 203, 367, 625, 225, 476, 369, 243, + 232, 581, 602, 290, 453, 632, 214, 511, 591, 240, + 480, 0, 0, 640, 248, 500, 216, 588, 499, 391, + 326, 327, 215, 0, 454, 269, 294, 0, 0, 259, + 412, 583, 584, 257, 641, 229, 612, 221, 0, 611, + 405, 578, 589, 392, 381, 220, 587, 390, 380, 334, + 353, 354, 281, 307, 444, 373, 445, 306, 308, 401, + 400, 402, 208, 600, 0, 209, 0, 495, 601, 642, + 449, 213, 235, 236, 238, 0, 280, 284, 292, 295, + 303, 304, 313, 365, 416, 443, 439, 448, 0, 573, + 594, 606, 617, 623, 624, 626, 627, 628, 629, 630, + 633, 631, 404, 311, 491, 333, 371, 0, 0, 422, + 469, 241, 598, 492, 201, 0, 0, 0, 0, 255, + 256, 0, 569, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 643, 644, 645, 646, 647, 648, 649, 650, + 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, + 638, 502, 508, 503, 504, 505, 506, 507, 0, 509, + 0, 0, 0, 0, 0, 0, 585, 586, 661, 382, + 482, 595, 335, 347, 350, 340, 359, 0, 360, 336, + 337, 342, 344, 345, 346, 351, 352, 356, 362, 250, + 211, 388, 396, 572, 312, 217, 218, 219, 518, 519, + 520, 521, 609, 610, 614, 206, 459, 460, 461, 462, + 293, 604, 309, 465, 464, 331, 332, 377, 446, 534, + 536, 547, 551, 553, 555, 561, 564, 535, 537, 548, + 552, 554, 556, 562, 565, 524, 526, 528, 530, 543, + 542, 539, 567, 568, 545, 550, 529, 541, 546, 559, + 566, 563, 523, 527, 531, 540, 558, 557, 538, 549, + 560, 544, 532, 525, 533, 0, 198, 222, 366, 0, + 451, 289, 639, 608, 603, 207, 224, 0, 263, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 200, 202, 210, 223, 233, 237, 244, 262, 277, + 279, 286, 299, 310, 318, 319, 322, 328, 378, 384, + 385, 386, 387, 406, 407, 408, 411, 414, 415, 418, + 420, 421, 424, 428, 432, 433, 434, 436, 438, 440, + 452, 457, 471, 472, 473, 474, 475, 478, 479, 484, + 485, 486, 487, 488, 496, 497, 510, 580, 582, 597, + 615, 621, 477, 301, 302, 441, 442, 314, 315, 635, + 636, 300, 592, 622, 590, 634, 616, 435, 376, 0, + 0, 379, 282, 305, 320, 0, 607, 498, 228, 463, + 291, 252, 0, 0, 212, 247, 231, 260, 275, 278, + 324, 389, 397, 426, 431, 297, 272, 245, 456, 242, + 481, 513, 514, 515, 517, 393, 267, 430, 0, 0, + 374, 570, 571, 316, } var yyPact = [...]int{ - -1000, -1000, 4496, -1000, -518, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, 454, -1000, -527, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, 2261, 2738, -1000, -1000, -1000, -1000, 2109, 2462, -1000, - 864, 1865, -1000, 2203, 294, -1000, 54176, 611, -1000, 51328, - 603, 715, 303, 35664, -1000, 262, -1000, 254, 52752, 258, - -1000, -1000, -1000, -1000, -379, 21422, 88, 87, 54176, -1000, - -1000, -1000, -1000, 2386, 1833, -1000, 446, -1000, -1000, -1000, - -1000, -1000, -1000, 50616, -1000, 1001, -1000, -1000, 2217, 2191, - 2442, 756, 2101, -1000, 2299, 1833, -1000, 21422, 2377, 2276, - 20710, 20710, 563, -360, -1000, -1000, 141, -1000, -1000, 30680, - 54176, 38512, 524, -1000, 2203, -1000, -1000, -1000, 140, -1000, - 445, 1759, -1000, 1756, -1000, 841, 780, 473, 609, 608, - 472, 471, 470, 468, 466, 464, 459, 458, 480, -1000, - 775, 775, -168, -183, 1322, 538, 550, 550, 931, 579, - 2170, 2169, -1000, -1000, 775, 775, 775, 433, 775, 775, - 775, 775, 420, 417, 775, 775, 775, 775, 775, 775, - 775, 775, 775, 775, 775, 775, 775, 775, 775, 775, - 775, 362, 2203, 366, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, 2284, 2379, -1000, -1000, -1000, -1000, 2229, 2529, + -1000, 902, 1950, -1000, 2278, 330, -1000, 54931, 605, -1000, + 52039, -370, 733, 235, 36133, -1000, 229, -1000, 215, 53485, + 224, -1000, -1000, -1000, -1000, -370, 21671, 98, 79, 54931, + -1000, -1000, -1000, -1000, -348, 2460, 1952, -1000, 406, -1000, + -1000, -1000, -1000, -1000, -1000, 51316, -1000, 1016, -1000, -1000, + 2304, 2299, 2513, 775, 2208, -1000, 2381, 1952, -1000, 21671, + 2451, 2343, 20948, 20948, 524, -352, -1000, -1000, 286, -1000, + -1000, 31072, 54931, 39025, 413, -1000, 2278, -1000, -1000, -1000, + 105, -1000, 404, 1866, -1000, 1862, -1000, 849, 868, 432, + 559, 545, 431, 430, 429, 425, 416, 412, 403, 398, + 442, -1000, 822, 822, -178, -179, 628, 540, 510, 510, + 918, 555, 2250, 2243, -1000, -1000, 822, 822, 822, 395, + 822, 822, 822, 822, 342, 328, 822, 822, 822, 822, + 822, 822, 822, 822, 822, 822, 822, 822, 822, 822, + 822, 822, 822, 456, 2278, 316, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, @@ -7197,69 +7296,69 @@ var yyPact = [...]int{ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 54176, - 316, 54176, -1000, 657, 54176, -383, 918, 918, 150, 918, - 918, 918, 918, 278, 797, 80, -1000, 245, 375, 213, - 371, 909, 439, -1000, -1000, 360, 909, 1664, -1000, 763, - 367, 193, -1000, 918, 918, -1000, 14277, 186, 14277, 14277, - -1000, 2190, -1000, -1000, -1000, -1000, -1000, 1092, -1000, -1000, - -1000, -1000, -2, 577, -1000, -1000, -1000, -1000, 52752, 49904, - 301, -1000, -1000, 45, -1000, -1000, 1582, 1080, 21422, 1287, - 748, -1000, -1000, 1128, 724, -1000, -1000, -1000, -1000, -1000, - 623, -1000, 23558, 23558, 23558, 23558, -1000, -1000, 1769, 49192, - 1769, 1769, 23558, 1769, 23558, 1769, 1769, 1769, 21422, 1769, - 1769, 1769, 1769, -1000, 1769, 1769, 1769, 1769, 1769, 1769, - 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, - 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, - 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, - 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, - 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, - 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, - 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, - 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, - 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, - 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, - 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, - 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, - -1000, -1000, -1000, -1000, 1769, 656, 1769, 1769, 1769, 1769, - 1769, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1769, 1769, - 1769, 1769, 1769, 1769, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, 1769, 1769, 1769, 1769, 1769, 1769, 1769, - 1769, 1769, 1769, 1769, 1769, 1769, 1769, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 26406, 1433, - 1429, 1418, -1000, 18574, 1769, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, 54931, 269, 54931, -1000, 664, 604, -1000, -1000, + -374, 929, 929, 107, 929, 929, 929, 929, 230, 730, + 78, -1000, 225, 291, 193, 305, 931, 575, -1000, -1000, + 297, 931, 1737, -1000, 797, 301, 211, -1000, 929, 929, + -1000, 14416, 183, 14416, 14416, -1000, 2251, -1000, -1000, -1000, + -1000, -1000, 1224, -1000, -1000, -1000, -1000, 5, 550, -1000, + -1000, -1000, -1000, 53485, 50593, 294, -1000, -1000, 45, 1639, + 1132, 21671, 1318, 772, -1000, -1000, 1408, 749, -1000, -1000, + -1000, -1000, -1000, 626, -1000, 23840, 23840, 23840, 23840, -1000, + -1000, 1870, 49870, 1870, 1870, 23840, 1870, 23840, 1870, 1870, + 1870, 21671, 1870, 1870, 1870, 1870, -1000, 1870, 1870, 1870, + 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, + 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, + 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, + 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, + 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, + 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, + 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, + 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, + 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, + 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, + 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, + 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, + 1870, 1870, 1870, 1870, -1000, -1000, -1000, -1000, 1870, 657, + 1870, 1870, 1870, 1870, 1870, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, 1870, 1870, 1870, 1870, 1870, 1870, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1870, 1870, 1870, + 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, + 1870, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, 26732, 1416, 1396, 1394, -1000, 18779, 1870, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, 54176, -1000, 1769, 282, 52752, 52752, - 449, 2299, 1833, -1000, 2386, 2313, 446, -1000, 2417, 1513, - 1425, 1235, 1833, 1725, 54176, -1000, 1784, -1000, -1000, -1000, - -1000, 2010, 1281, 1654, -1000, -1000, -1000, -1000, 1965, 21422, - -1000, -1000, 2421, -1000, 27831, 655, 2420, 48480, -1000, -1000, - -206, -1000, 563, 563, 1740, 482, 41, -1000, -1000, -1000, - -1000, 810, 34952, -1000, -1000, -1000, -1000, -1000, 1661, 54176, - -1000, -1000, 4563, 1138, -1000, 1862, -1000, 1638, -1000, 1804, - 21422, 1884, 602, 1138, 592, 591, 578, -1000, -24, -1000, - -1000, -1000, -1000, -1000, -1000, 775, 775, 775, -1000, 479, - 2371, 294, 4756, -1000, -1000, -1000, 47768, 1857, 1138, -1000, - 1856, -1000, 896, 664, 685, 685, 1138, -1000, -1000, 53464, - 1138, 895, 894, 1138, 1138, 52752, 52752, -1000, 47056, -1000, - 46344, 45632, 1209, 52752, 44920, 44208, 43496, 42784, 42072, -1000, - 2038, -1000, 1794, -1000, -1000, -1000, 53464, 1138, 1138, 53464, - 52752, 53464, 54176, 1138, -1000, -1000, 415, -1000, -1000, 1208, - 1207, 1182, 775, 775, 1171, 1634, 1631, 1623, 775, 775, - 1169, 1618, 37088, 1613, 348, 1168, 1154, 1141, 1276, 1612, - 209, 1601, 1274, 1143, 1139, 52752, 1850, 54176, -1000, 334, - 852, 712, 808, 2203, 2103, 1737, 573, 601, 1138, 555, - 555, 52752, -1000, 14995, -1000, 276, -1000, 1599, 21422, -1000, - 937, 909, 909, -1000, -1000, -1000, -1000, -1000, -1000, 918, - 54176, 937, -1000, -1000, -1000, 909, 918, 54176, 918, 918, - 918, 918, 909, 909, 909, 918, 54176, 54176, 54176, 54176, - 54176, 54176, 54176, 54176, 54176, 14277, 763, 918, -384, -1000, - 1595, -1000, -1000, 1983, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 54931, -1000, + 1870, 247, 53485, 53485, 436, 1201, -1000, -1000, 2381, 1952, + -1000, 2460, 2465, 406, -1000, 3111, 1537, 1627, 1401, 1952, + 1838, 54931, -1000, 1886, -1000, -1000, -1000, -1000, 2098, 1273, + 1717, -1000, -1000, -1000, -1000, 2239, 21671, -1000, -1000, 2504, + -1000, 28179, 656, 2501, 49147, -1000, -1000, -201, -1000, 524, + 524, 1856, 463, 25, -1000, -1000, -1000, -1000, 843, 35410, + -1000, -1000, -1000, -1000, -1000, 1746, 54931, -1000, -1000, 4447, + 1212, -1000, 1946, -1000, 1727, -1000, 1922, 21671, 1959, 597, + 1212, 581, 573, 568, -1000, -33, -1000, -1000, -1000, -1000, + -1000, -1000, 822, 822, 822, -1000, 418, 2447, 330, 4821, + -1000, -1000, -1000, 48424, 1942, 1212, -1000, 1938, -1000, 927, + 614, 644, 644, 1212, -1000, -1000, 54208, 1212, 926, 922, + 1212, 1212, 53485, 53485, -1000, 47701, -1000, 46978, 46255, 1190, + 53485, 45532, 44809, 44086, 43363, 42640, -1000, 2045, -1000, 1913, + -1000, -1000, -1000, 54208, 1212, 1212, 54208, 53485, 54208, 54931, + 1212, -1000, -1000, 386, -1000, -1000, 1189, 1186, 1183, 822, + 822, 1181, 1715, 1712, 1709, 822, 822, 1180, 1702, 37579, + 1666, 320, 1177, 1175, 1168, 1211, 1664, 203, 1648, 1191, + 1114, 1165, 53485, 1935, 54931, -1000, 284, 769, 500, 842, + 2278, 2228, 1854, 542, 596, 1212, 514, 514, 53485, -1000, + 15145, 54931, 270, -1000, 1606, 21671, -1000, 949, 931, 931, + -1000, -1000, -1000, -1000, -1000, -1000, 929, 54931, 949, -1000, + -1000, -1000, 931, 929, 54931, 929, 929, 929, 929, 931, + 931, 931, 929, 54931, 54931, 54931, 54931, 54931, 54931, 54931, + 54931, 54931, 14416, 797, 929, -378, -1000, 1597, -1000, -1000, + 2080, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, @@ -7273,328 +7372,333 @@ var yyPact = [...]int{ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 14277, 14277, -1000, -1000, -1000, -1000, -1000, 1736, - -1000, 252, 46, 257, -1000, 41360, 393, 807, -1000, 393, - -1000, -1000, -1000, 1729, 40648, -1000, -389, -390, -394, -398, - -1000, -1000, -1000, -399, -406, -1000, -1000, -1000, 21422, 21422, - 21422, 21422, -211, -1000, 1015, 23558, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, 21422, 180, 969, 23558, 23558, 23558, 23558, - 23558, 23558, 23558, 24982, 24270, 23558, 23558, 23558, 23558, 23558, - 23558, -1000, -1000, 32816, 5646, 5646, 724, 724, 724, 724, - -1000, -133, 1727, 53464, -1000, -1000, -1000, 654, 21422, 21422, - 724, -1000, 1138, 2961, 18574, 20710, 20710, 21422, 816, 1080, - 53464, 21422, -1000, 1235, -1000, -1000, -1000, -1000, 1028, -1000, - -1000, 1031, 2185, 2185, 2185, 2185, 21422, 21422, 21422, 21422, - 21422, 21422, 21422, 21422, 21422, 21422, 2185, 52752, 52752, 188, - 21422, 21422, 21422, 21422, 21422, 21422, 17149, 21422, 21422, 23558, - 21422, 21422, 21422, 1235, 21422, 21422, 21422, 21422, 21422, 21422, - 21422, 21422, 21422, 21422, 21422, 21422, 21422, 21422, 21422, 21422, - 21422, 21422, 21422, 21422, 21422, 21422, 21422, 21422, 21422, 21422, - 21422, 21422, 21422, 21422, 21422, 21422, 21422, 21422, 21422, 21422, - 21422, 21422, 21422, 21422, 21422, 21422, 21422, 21422, 21422, 21422, - 21422, 21422, 21422, 21422, 21422, 21422, 21422, 21422, 21422, 21422, - 21422, 21422, 21422, 21422, 21422, 21422, 21422, 21422, 21422, 21422, - 21422, 21422, 21422, 21422, 21422, 21422, 21422, 21422, 21422, 21422, - 21422, 21422, 21422, 1235, 21422, 1096, 21422, 21422, 21422, 21422, - 21422, 21422, 20710, 16431, 20710, 20710, 20710, 20710, 20710, -1000, - -1000, -1000, -1000, -1000, -1000, 21422, 21422, 21422, 21422, 21422, - 21422, 21422, 21422, 1235, 21422, 21422, 21422, 21422, 21422, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1499, - 1427, 1392, 21422, 1726, -1000, -122, 29968, 21422, 1594, 2415, - 1911, 52752, -1000, -1000, -1000, 2299, -1000, 2299, 1499, 2197, - 2015, 20710, -1000, -1000, 2197, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, 1487, -1000, 54176, 1725, 2263, 52752, 2002, - 1592, 406, -1000, 21422, 21422, 1720, -1000, 1724, 54176, -1000, - -211, -1000, 39936, -1000, -1000, 13559, 54176, 447, 54176, -1000, - 1588, 29256, 39224, 271, -1000, 41, 1704, -1000, 59, 33, - 17861, 723, -1000, -1000, -1000, 1322, 25694, 1576, 723, 160, - -1000, -1000, -1000, 1804, -1000, 1804, 1804, 1804, 1804, 406, - 406, 406, 406, -1000, -1000, -1000, -1000, -1000, 1849, 1848, - -1000, 1804, 1804, 1804, 1804, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 1845, 1845, 1845, 1841, 1841, 1820, 1820, 533, - -1000, 21422, 322, 38512, 2215, 1135, 1247, 334, 561, 1910, - 1138, 1138, 1138, 561, -1000, 1334, 1307, 1305, -1000, -509, - 1716, -1000, -1000, 2367, -1000, -1000, 872, 952, 925, 796, - 52752, 289, 413, -1000, 513, -1000, 38512, 1138, 880, 685, - 1138, -1000, 1138, -1000, -1000, -1000, -1000, -1000, 1138, -1000, - -1000, 1714, -1000, 1745, 1005, 917, 984, 911, 1714, -1000, - -1000, -140, 1714, -1000, 1714, -1000, 1714, -1000, 1714, -1000, - 1714, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - 826, 216, -278, 52752, 289, 572, -1000, 569, 32816, -1000, - -1000, -1000, 32816, 32816, -1000, -1000, -1000, -1000, 1570, 1561, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 14416, 14416, + -1000, -1000, -1000, -1000, -1000, 1852, -1000, 213, 52, 220, + -1000, 41917, 399, 841, -1000, 399, -1000, -1000, -1000, 1849, + 41194, -1000, -385, -386, -394, -399, -1000, -1000, -1000, -403, + -409, -1000, -1000, -1000, 21671, 21671, 21671, 21671, -209, -1000, + 1035, 23840, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 21671, + 138, 987, 23840, 23840, 23840, 23840, 23840, 23840, 23840, 25286, + 24563, 23840, 23840, 23840, 23840, 23840, 23840, -1000, -1000, 33241, + 7776, 7776, 749, 749, 749, 749, -1000, -146, 1848, 54208, + -1000, -1000, -1000, 654, 21671, 21671, 749, -1000, 1212, 1901, + 18779, 20948, 20948, 21671, 865, 1132, 54208, 21671, -1000, 1401, + -1000, -1000, -1000, -1000, 1045, -1000, -1000, 923, 2262, 2262, + 2262, 2262, 21671, 21671, 21671, 21671, 21671, 21671, 21671, 21671, + 21671, 21671, 2262, 21671, 859, 859, 999, 21671, 21671, 21671, + 21671, 21671, 21671, 17332, 21671, 21671, 23840, 21671, 21671, 21671, + 1401, 21671, 21671, 21671, 21671, 21671, 21671, 21671, 21671, 21671, + 21671, 21671, 21671, 21671, 21671, 21671, 21671, 21671, 21671, 21671, + 21671, 21671, 21671, 21671, 21671, 21671, 21671, 21671, 21671, 21671, + 21671, 21671, 21671, 21671, 21671, 21671, 21671, 21671, 21671, 21671, + 21671, 21671, 21671, 21671, 21671, 21671, 21671, 21671, 21671, 21671, + 21671, 21671, 21671, 21671, 21671, 21671, 21671, 21671, 21671, 21671, + 21671, 21671, 21671, 21671, 21671, 21671, 21671, 21671, 21671, 21671, + 21671, 21671, 21671, 21671, 21671, 21671, 21671, 21671, 21671, 21671, + 1401, 21671, 1084, 21671, 21671, 21671, 21671, 21671, 21671, 20948, + 16603, 20948, 20948, 20948, 20948, 20948, -1000, -1000, -1000, -1000, + -1000, -1000, 21671, 21671, 21671, 21671, 21671, 21671, 21671, 21671, + 1401, 21671, 21671, 21671, 21671, 21671, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, 1513, 1462, 1414, 21671, + 1847, -1000, -114, 30349, 21671, 1582, 2490, 1977, 53485, -1000, + -1000, -1000, -1000, 2381, -1000, 2381, 1513, 2488, 2101, 20948, + -1000, -1000, 2488, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, 1714, -1000, 54931, 1838, 2338, 53485, 2096, 1573, 407, + -1000, 21671, 21671, 1837, -1000, 1141, 54931, -1000, -209, -1000, + 40471, -1000, -1000, 13687, 54931, 388, 54931, -1000, 1569, 29626, + 39748, 254, -1000, 25, 1786, -1000, 58, 43, 18055, 748, + -1000, -1000, -1000, 628, 26009, 1556, 748, 144, -1000, -1000, + -1000, 1922, -1000, 1922, 1922, 1922, 1922, 407, 407, 407, + 407, -1000, -1000, -1000, -1000, -1000, 1933, 1932, -1000, 1922, + 1922, 1922, 1922, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + 1931, 1931, 1931, 1930, 1930, 1923, 1923, 483, -1000, 21671, + 401, 39025, 2303, 1163, 1073, 284, 520, 1974, 1212, 1212, + 1212, 520, -1000, 1371, 1320, 1315, -1000, -518, 1818, -1000, + -1000, 2443, -1000, -1000, 756, 965, 964, 620, 53485, 255, + 359, -1000, 505, -1000, 39025, 1212, 921, 644, 1212, -1000, + 1212, -1000, -1000, -1000, -1000, -1000, 1212, -1000, -1000, 1813, + -1000, 1774, 986, 948, 979, 942, 1813, -1000, -1000, -151, + 1813, -1000, 1813, -1000, 1813, -1000, 1813, -1000, 1813, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 815, 240, + -278, 53485, 255, 533, -1000, 528, 33241, -1000, -1000, -1000, + 33241, 33241, -1000, -1000, -1000, -1000, 1564, 1558, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -495, - 54176, -1000, 330, 795, 432, 463, 372, 54176, 364, 2291, - 2290, 2279, 2272, 2240, 359, 410, 54176, 54176, 555, 1962, - 54176, 2226, 54176, -1000, -1000, -1000, -1000, 1556, 1535, -1000, - 1080, 54176, -1000, -1000, 918, 918, -1000, -1000, 54176, 918, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 918, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, 54176, -1000, -1000, -1000, -1000, -2, - 243, -1000, -1000, 52752, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -52, -1000, 110, 56, 405, -1000, -1000, - -1000, -1000, -1000, 2296, -1000, 1080, 865, 838, -1000, 1769, - -1000, -1000, 1059, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, 180, 23558, 23558, 23558, 1173, 576, 1211, 1263, 1190, - 1234, 1234, 996, 23558, 996, 23558, 729, 729, 729, 729, - 729, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1525, - -1000, 1769, 53464, 1580, 16431, 1944, 1511, 1235, 740, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -503, 54931, -1000, + 277, 831, 332, 373, 336, 54931, 519, 2372, 2371, 2362, + 2361, 2351, 295, 326, 54931, 54931, 514, 2035, 54931, 2311, + 54931, -1000, -1000, -1000, -1000, -1000, 1552, 1526, -1000, 1132, + 54931, -1000, -1000, 929, 929, -1000, -1000, 54931, 929, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, 929, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 4463, - 1575, -1000, 1575, 1896, 820, -1000, 21422, 1235, 4453, -1000, - -1000, 1235, 1235, 21422, -1000, -1000, 21422, 21422, 21422, 21422, - 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, - 21422, 1711, 1707, 2412, 1170, 1247, 1247, 1247, 1247, 1247, - 21422, 1498, -1000, -1000, -1000, 1422, 4447, 981, 4425, 1247, - 1247, -1000, 1247, 4421, 4407, 1235, 1582, 3518, 3491, 1247, - 1247, 1247, 1247, 1247, 3346, 3332, 1247, 1247, 3308, 1247, - 4381, 1247, 3300, 3245, 3241, 3203, 3180, 3172, 3168, 3159, - 3154, 3140, 3136, 3132, 3112, 3098, 3057, 2968, 2743, 2723, - 1247, 1247, 1247, 4376, 1247, 4357, 1247, 4350, 1247, 1247, - 4342, 2453, 2422, 1235, 1706, -1000, 4331, 1247, 4321, 4317, - 4311, 2414, 4307, 3955, 3946, 1247, 1247, 1247, 2381, 3898, - 3891, 3882, 3875, 3866, 3835, 3810, 3785, 3773, 1247, 1392, - 1392, 1392, 1392, 1392, 3767, -217, 1247, 1235, -1000, -1000, - -1000, -1000, -1000, 3758, 2356, 3743, 3739, 3735, 3725, 1235, - 1705, 1769, 653, -1000, -1000, 1575, 1235, 1235, 1575, 1575, - 3717, 3709, 3702, 3661, 3637, 3616, 1247, 1247, -1000, 1247, - 3541, 3534, 2345, 2325, 1235, -1000, 1392, 54176, -1000, -372, - -1000, 48, 734, 1769, -1000, 37088, 1235, -1000, 5289, -1000, - 1021, -1000, -1000, -1000, -1000, -1000, 34240, 1617, 2197, -1000, - -1000, 1769, 1555, -1000, -1000, 406, 116, 33528, 707, 707, - 169, 1080, 1080, 21422, -1000, -1000, -1000, -1000, -1000, -1000, - 651, 2384, 407, 1769, -1000, 2210, 1722, 2681, -1000, -1000, - -1000, 2257, 27119, -1000, -1000, 1769, 1769, 54176, 1679, 1605, - -1000, 645, -1000, 1175, 1704, 41, 30, -1000, -1000, -1000, - -1000, 1080, -1000, 1303, 448, 1037, -1000, 515, -1000, -1000, - -1000, -1000, 2157, 120, -1000, -1000, -1000, 256, 406, -1000, - -1000, -1000, -1000, -1000, -1000, 1514, 1514, -1000, -1000, -1000, - -1000, -1000, 1127, -1000, -1000, -1000, -1000, 1122, -1000, -1000, - 1120, -1000, -1000, 2766, 1904, 322, -1000, -1000, 775, 1502, - -1000, -1000, 2161, 775, 775, 52752, -1000, -1000, 1547, 2215, - 330, 54176, 825, 1959, -1000, 1910, 1910, 1910, 54176, -1000, - -1000, -1000, -1000, -1000, -1000, -497, 66, 419, -1000, -1000, - -1000, 365, 52752, 1553, -1000, 285, -1000, 1478, -1000, 52752, - -1000, 1546, 1840, 1138, 1138, -1000, -1000, -1000, 52752, 1769, - -1000, -1000, -1000, -1000, 596, 2194, 290, -1000, -1000, -235, - -1000, -1000, 289, 285, 53464, 1138, 723, -1000, -1000, -1000, - -1000, -1000, -498, 1540, 585, 310, 368, 54176, 54176, 54176, - 54176, 54176, 626, -1000, -1000, 69, -1000, -1000, 272, -1000, - -1000, -1000, -1000, 272, -1000, -1000, -1000, -1000, 380, 566, - -1000, 54176, 54176, 692, -1000, -1000, -1000, -1000, -1000, 909, - -1000, -1000, 909, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 2181, 54176, 53, -460, -1000, - -456, 21422, -1000, -1000, -1000, -1000, 1088, 574, 1211, 23558, - 23558, 2961, 2961, 23558, -1000, -1000, -1000, 512, 512, 32816, - -1000, 23558, 21422, 20710, -1000, -1000, 21422, 21422, 812, -1000, - 21422, 1133, -1000, 21422, -1000, -1000, 1392, 1247, 1247, 1247, - 1247, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, 1542, 21422, 21422, 21422, 1235, 394, -1000, -1000, -1000, - -1000, -1000, 2410, -1000, 21422, -1000, 32816, 21422, 21422, 21422, - -1000, -1000, -1000, 21422, 21422, -1000, -1000, 21422, -1000, 21422, - -1000, -1000, -1000, -1000, -1000, -1000, 21422, -1000, 21422, -1000, - -1000, -1000, 21422, -1000, 21422, -1000, -1000, 21422, -1000, 21422, - -1000, 21422, -1000, 21422, -1000, 21422, -1000, 21422, -1000, 21422, - -1000, 21422, -1000, 21422, -1000, 21422, -1000, 21422, -1000, 21422, - -1000, 21422, -1000, 21422, -1000, 21422, -1000, 21422, -1000, 21422, - -1000, 21422, -1000, -1000, -1000, 21422, -1000, 21422, -1000, 21422, - -1000, -1000, 21422, -1000, 21422, -1000, 21422, -1000, 21422, 21422, - -1000, 21422, 21422, 21422, -1000, 21422, 21422, 21422, 21422, -1000, - -1000, -1000, -1000, 21422, 21422, 21422, 21422, 21422, 21422, 21422, - 21422, 21422, 21422, -1000, -1000, -1000, -1000, -1000, -1000, 21422, - -1000, 38512, 89, -217, 1096, 89, 1096, 22846, 665, 659, - 22134, -1000, 20710, 15713, -1000, -1000, -1000, -1000, -1000, 21422, - 21422, 21422, 21422, 21422, 21422, -1000, -1000, -1000, 21422, 21422, - -1000, 21422, -1000, 21422, -1000, -1000, -1000, -1000, -1000, 734, - -1000, 685, 685, 685, 52752, -1000, -1000, -1000, -1000, 1703, - -1000, 2275, -1000, 2040, 2035, 2407, 2384, -1000, 29256, 2197, - -1000, -1000, 52752, -351, -1000, 2102, 2039, 707, 707, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, 12841, 2299, 21422, 1957, - 53464, 139, -1000, 28544, 52752, 595, 53464, 29256, 29256, 29256, - 29256, 29256, -1000, 1981, 1977, -1000, 1992, 1984, 1999, 54176, - -1000, 1499, 1530, -1000, 21422, 31392, 1685, 29256, -1000, -1000, - 29256, 54176, 12123, -1000, -1000, 31, 17, -1000, -1000, -1000, - -1000, 1322, -1000, -1000, 1684, 2250, 2154, -1000, -1000, -1000, - -1000, -1000, 1524, -1000, 1521, 1702, 1519, 1506, 216, -1000, - 1883, 2179, 775, 775, -1000, 1117, -1000, 1138, 1500, 1493, - -1000, -1000, -1000, 575, -1000, 2222, 54176, 1952, 1933, 1931, - -1000, -506, 1108, 1831, 1871, 21422, 1829, 2362, 1692, 52752, - -1000, -1000, 53464, -1000, 242, -1000, 322, 52752, -1000, -1000, - -1000, 413, 54176, -1000, 9013, -1000, -1000, -1000, 285, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, 54176, 317, -1000, 1823, - 1084, -1000, -1000, 1894, -1000, -1000, -1000, -1000, 286, 395, - 1491, 268, 1480, 268, -1000, 54176, 678, 1904, 54176, -1000, - -1000, -1000, 918, 918, -1000, -1000, 2177, -1000, 1138, 1247, - 23558, 23558, -1000, 724, -1000, -1000, 343, -190, 1804, 1804, - -1000, 1804, 1820, -1000, 1804, 220, 1804, 191, 1804, -1000, - -1000, 1235, 1235, -1000, 1392, -1000, 2319, 1187, -1000, 1080, - 21422, 3522, -1000, -1000, -1000, -1000, -1000, -31, 3350, 3184, - 1247, -1000, 1801, 1799, 21422, 1247, 1235, 2307, 1247, 1247, - 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, - 2278, 2262, 2251, 2247, 2242, 2238, 2216, 2205, 2139, 1958, - 1953, 1913, 1886, 1858, 1851, 1792, 1247, 1247, 1780, 1247, - 1677, 1670, -1000, 1080, 1392, 2979, 1392, 1247, 1247, 2448, - 337, 1247, 1490, 1490, 1490, 1490, 1490, 1392, 1392, 1392, - 1392, 1247, 52752, -1000, -217, -1000, -1000, -271, -272, -1000, - 1235, -217, 1701, 23558, 1247, 23558, 23558, 23558, 1247, 1235, - -1000, 1578, 1573, 2374, 1568, 1247, 2282, 1247, 1247, 1247, - 1538, -1000, 2295, 2295, 2295, 1486, 1021, 54176, -1000, -1000, - -1000, -1000, 2384, 2382, 1700, -1000, -1000, 116, 494, -1000, - 2054, 2039, -1000, 2354, 2082, 2352, -1000, -1000, -1000, -1000, - -1000, 1080, -1000, 2208, 1699, -1000, 793, 1672, -1000, -1000, - 19998, 1389, 2023, 644, 1486, 54176, 1758, 2681, 1890, 1930, - 2239, -1000, -1000, -1000, -1000, 1973, -1000, 1942, -1000, -1000, - 1784, -1000, 1938, 447, 29256, 1735, 1735, -1000, 640, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, 973, 6029, 2440, -1000, - 1479, -1000, 1258, 187, 1103, -1000, -1000, 775, 775, -1000, - 873, 871, -1000, 54176, 1798, -1000, 406, 1437, 406, 1093, - -1000, -1000, 1085, -1000, -1000, -1000, -1000, 1790, 1932, -1000, - -1000, -1000, -1000, 54176, -1000, -1000, 54176, 54176, 54176, 1797, - 2348, -1000, 21422, 1795, 777, 2350, 52752, 52752, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 536, - 775, -476, 396, 392, 775, 775, 775, -508, -1000, -1000, - 1477, 1471, -1000, -166, -1000, 21422, -1000, -1000, -1000, 1089, - 1089, 1433, 1429, 1418, -1000, 1784, -1000, -1000, -1000, 1462, - -1000, -1000, -153, 52752, 52752, 52752, 52752, -1000, -1000, 1097, + -1000, -1000, -1000, 54931, -1000, -1000, -1000, -1000, 5, 209, + -1000, -1000, 53485, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -25, -1000, 54, 55, 394, -1000, -1000, -1000, + -1000, -1000, 2378, -1000, 1132, 875, 903, -1000, 1870, -1000, + -1000, 1047, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + 138, 23840, 23840, 23840, 1465, 552, 1511, 1250, 1063, 1023, + 1023, 851, 23840, 851, 23840, 753, 753, 753, 753, 753, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1523, -1000, + 1870, 54208, 1708, 16603, 1325, 2388, 1401, 767, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 724, 1235, 356, -156, 1235, -1000, -1000, 406, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 4503, 1688, + -1000, 1688, 1816, 860, -1000, 21671, 1401, 4494, -1000, -1000, + 1401, 1401, 21671, -1000, -1000, 21671, 21671, 21671, 21671, 1073, + 1073, 1073, 1073, 1073, 1073, 1073, 1073, 1073, 1073, 21671, + 1073, 1808, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - 21422, -1000, 21422, -1000, 1080, 21422, 2299, 1411, 21422, 21422, - -1000, 1076, 1067, 1247, -1000, -1000, -1000, 21422, -1000, -1000, + 1805, 2489, 1044, 1073, 1073, 1073, 1073, 1073, 21671, 1850, + -1000, -1000, -1000, 1409, 4472, 1425, 4453, 1073, 1073, -1000, + 1073, 4431, 4427, 1401, 1639, 3290, 3285, 1073, 1073, 1073, + 1073, 1073, 3270, 3256, 1073, 1073, 3225, 1073, 4421, 1073, + 3221, 3209, 3199, 3184, 3176, 3164, 3159, 3153, 3128, 3094, + 3084, 2997, 2875, 2850, 2833, 2493, 2450, 2436, 1073, 1073, + 1073, 4416, 1073, 4411, 1073, 4406, 1073, 1073, 4390, 2404, + 2393, 1401, 1804, -1000, 4386, 1073, 4380, 4374, 4363, 2367, + 4312, 4017, 3996, 1073, 1073, 1073, 2358, 3959, 3930, 3920, + 3916, 3911, 3896, 3884, 3867, 3863, 1073, 1414, 1414, 1414, + 1414, 1414, 3810, -214, 1073, 1401, -1000, -1000, -1000, -1000, + -1000, 3806, 2322, 3788, 3776, 3761, 3720, 1401, 1801, 1870, + 651, -1000, -1000, 1688, 1401, 1401, 1688, 1688, 3716, 3576, + 3572, 3555, 3430, 3422, 1073, 1073, -1000, 1073, 3401, 3393, + 2317, 2295, 1401, -1000, 1414, 54931, -1000, -365, -1000, 33, + 765, 1870, -1000, 37579, 1401, -1000, 5992, -1000, 1214, -1000, + -1000, -1000, -1000, -1000, 34687, 1765, 2488, -1000, -1000, 1870, + 1684, -1000, -1000, 407, 126, 33964, 713, 713, 156, 1132, + 1132, 21671, -1000, -1000, -1000, -1000, -1000, -1000, 649, 2459, + 414, 1870, -1000, 2279, 1815, 2718, -1000, -1000, -1000, 2337, + 27456, -1000, -1000, 1870, 1870, 54931, 1700, 1641, -1000, 647, + -1000, 1237, 1786, 25, 29, -1000, -1000, -1000, -1000, 1132, + -1000, 1286, 393, 194, -1000, 512, -1000, -1000, -1000, -1000, + 2237, 129, -1000, -1000, -1000, 306, 407, -1000, -1000, -1000, + -1000, -1000, -1000, 1494, 1494, -1000, -1000, -1000, -1000, -1000, + 1157, -1000, -1000, -1000, -1000, 1156, -1000, -1000, 1155, -1000, + -1000, 3019, 2006, 401, -1000, -1000, 822, 1482, -1000, -1000, + 2240, 822, 822, 53485, -1000, -1000, 1540, 2303, 277, 54931, + 872, 2033, -1000, 1974, 1974, 1974, 54931, -1000, -1000, -1000, + -1000, -1000, -1000, -504, 69, 419, -1000, -1000, -1000, 4505, + 53485, 1682, -1000, 250, -1000, 1534, -1000, 53485, -1000, 1680, + 1928, 1212, 1212, -1000, -1000, -1000, 53485, 1870, -1000, -1000, + -1000, -1000, 594, 2272, 311, -1000, -1000, -233, -1000, -1000, + 255, 250, 54208, 1212, 748, -1000, -1000, -1000, -1000, -1000, + -506, 1676, 562, 257, 347, 54931, 54931, 54931, 54931, 54931, + 54931, 618, -1000, -1000, 68, -1000, -1000, 236, -1000, -1000, + -1000, -1000, 236, -1000, -1000, -1000, -1000, 319, 521, -1000, + 54931, 54931, 701, -1000, -1000, -1000, -1000, -1000, 931, -1000, + -1000, 931, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, 2258, 54931, 51, -459, -1000, -455, + 21671, -1000, -1000, -1000, -1000, 1226, 539, 1511, 23840, 23840, + 1901, 1901, 23840, -1000, -1000, -1000, 1026, 1026, 33241, -1000, + 23840, 21671, 20948, -1000, -1000, 21671, 21671, 848, -1000, 21671, + 959, -1000, 21671, -1000, -1000, 1414, 1073, 1073, 1073, 1073, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, 21422, -1000, 21422, -1000, 21422, -1000, 21422, -1000, 21422, - -1000, 21422, -1000, 21422, -1000, 21422, -1000, 21422, -1000, 21422, - -1000, 21422, -1000, 21422, -1000, 21422, -1000, 21422, -1000, 21422, - -1000, 21422, -1000, -1000, 21422, -1000, -1000, -1000, 21422, -1000, - 21422, -1000, 21422, -1000, -1000, -1000, 21422, 247, 512, -1000, + 1839, -1000, 21671, 21671, 21671, 1401, 356, -1000, -1000, -1000, + -1000, -1000, 2481, -1000, 21671, -1000, 33241, 21671, 21671, 21671, + -1000, -1000, -1000, 21671, 21671, -1000, -1000, 21671, -1000, 21671, + -1000, -1000, -1000, -1000, -1000, -1000, 21671, -1000, 21671, -1000, + -1000, -1000, 21671, -1000, 21671, -1000, -1000, 21671, -1000, 21671, + -1000, 21671, -1000, 21671, -1000, 21671, -1000, 21671, -1000, 21671, + -1000, 21671, -1000, 21671, -1000, 21671, -1000, 21671, -1000, 21671, + -1000, 21671, -1000, 21671, -1000, 21671, -1000, 21671, -1000, 21671, + -1000, 21671, -1000, -1000, -1000, 21671, -1000, 21671, -1000, 21671, + -1000, -1000, 21671, -1000, 21671, -1000, 21671, -1000, 21671, 21671, + -1000, 21671, 21671, 21671, -1000, 21671, 21671, 21671, 21671, -1000, + -1000, -1000, -1000, 21671, 21671, 21671, 21671, 21671, 21671, 21671, + 21671, 21671, 21671, -1000, -1000, -1000, -1000, -1000, -1000, 21671, + -1000, 39025, 124, -214, 1084, 124, 1084, 23117, 635, 627, + 22394, -1000, 20948, 15874, -1000, -1000, -1000, -1000, -1000, 21671, + 21671, 21671, 21671, 21671, 21671, -1000, -1000, -1000, 21671, 21671, + -1000, 21671, -1000, 21671, -1000, -1000, -1000, -1000, -1000, 765, + -1000, 644, 644, 644, 53485, -1000, -1000, -1000, -1000, 1783, + -1000, 2357, -1000, 2124, 2109, 2480, 2459, -1000, 29626, 2488, + -1000, -1000, 53485, -341, -1000, 2217, 2114, 713, 713, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, 12958, 2381, 21671, 2029, + 54208, 142, -1000, 28903, 53485, 591, 54208, 29626, 29626, 29626, + 29626, 29626, -1000, 2085, 2072, -1000, 2062, 2061, 2068, 54931, + -1000, 1513, 1661, -1000, 21671, 31795, 1768, 29626, -1000, -1000, + 29626, 54931, 12229, -1000, -1000, 30, 38, -1000, -1000, -1000, + -1000, 628, -1000, -1000, 1034, 2335, 2235, -1000, -1000, -1000, + -1000, -1000, 1653, -1000, 1645, 1782, 1637, 1629, 240, -1000, + 1956, 2257, 822, 822, -1000, 1150, -1000, 1212, 1472, 1467, + -1000, -1000, -1000, 546, -1000, 2308, 54931, 2025, 2024, 2015, + -1000, -515, 1123, 1927, 1906, 21671, 1925, 2441, 1756, 53485, + -1000, -1000, 54208, -1000, 243, -1000, 401, 53485, -1000, -1000, + -1000, 359, 54931, -1000, 6282, -1000, -1000, -1000, 250, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, 54931, 264, -1000, 1924, + 1216, -1000, -1000, 1890, -1000, -1000, -1000, -1000, -1000, 244, + 241, 1463, 234, 1460, 234, -1000, 54931, 634, 2006, 54931, + -1000, -1000, -1000, 929, 929, -1000, -1000, 2256, -1000, 1212, + 1073, 23840, 23840, -1000, 749, -1000, -1000, 913, -185, 1922, + 1922, -1000, 1922, 1923, -1000, 1922, 204, 1922, 200, 1922, + -1000, -1000, 1401, 1401, -1000, 1414, -1000, 2289, 1724, -1000, + 1132, 21671, 3356, -1000, -1000, -1000, -1000, -1000, -41, 3343, + 3336, 1073, -1000, 1921, 1914, 21671, 1073, 1401, 2285, 1073, + 1073, 1073, 1073, 1073, 1073, 1073, 1073, 1073, 1073, 1073, + 1073, 2281, 2277, 2248, 2177, 2160, 2030, 2026, 1947, 1943, + 1939, 1881, 1864, 1843, 1820, 1748, 1744, 1073, 1073, 1733, + 1073, 1686, 1669, -1000, 1132, 1414, 3328, 1414, 1073, 1073, + 3318, 376, 1073, 1623, 1623, 1623, 1623, 1623, 1414, 1414, + 1414, 1414, 1073, 53485, -1000, -214, -1000, -1000, -252, -254, + -1000, 1401, -214, 1775, 23840, 1073, 23840, 23840, 23840, 1073, + 1401, -1000, 1651, 1647, 3313, 1642, 1073, 2487, 1073, 1073, + 1073, 1620, -1000, 2376, 2376, 2376, 1617, 1214, 54931, -1000, + -1000, -1000, -1000, 2459, 2453, 1757, -1000, -1000, 126, 473, + -1000, 2197, 2114, -1000, 2438, 2186, 2432, -1000, -1000, -1000, + -1000, -1000, 1132, -1000, 2293, 1753, -1000, 826, 1752, -1000, + -1000, 20225, 1411, 2107, 640, 1617, 54931, 1822, 2718, 1984, + 2014, 1976, -1000, -1000, -1000, -1000, 2063, -1000, 2054, -1000, + -1000, 1886, -1000, 2426, 388, 29626, 1735, 1735, -1000, 639, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, 991, 6282, 2510, + -1000, 1451, -1000, 1221, 195, 1118, -1000, -1000, 822, 822, + -1000, 920, 917, -1000, 54931, 1904, -1000, 407, 1443, 407, + 1115, -1000, -1000, 1112, -1000, -1000, -1000, -1000, 1845, 2019, + -1000, -1000, -1000, -1000, 54931, -1000, -1000, 54931, 54931, 54931, + 1902, 2429, -1000, 21671, 1900, 824, 2855, 53485, 53485, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - 1235, 442, -1000, -1000, -1000, -1000, 2395, -1000, 1235, 21422, - 2961, -1000, 2961, 2961, 2961, -1000, -1000, -1000, 21422, -1000, - 21422, 21422, -1000, 21422, -1000, 21422, -1000, -1000, -1000, -1000, - 21422, 1769, 2235, 1769, 1769, 31392, -1000, -1000, 2382, 2380, - 2344, 2055, 2061, 2061, 2054, -1000, 2339, 2336, -1000, 1396, - 2333, 1376, 845, -1000, 53464, 21422, 139, -1000, 461, 52752, - 139, 52752, -1000, 114, 2346, -1000, -1000, 21422, 1789, -1000, - 21422, -1000, -1000, -1000, -1000, 5646, 2384, 1735, -1000, -1000, - 735, -1000, 21422, -1000, -1000, -1000, 4754, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 1368, 1366, -1000, -1000, 1785, - 21422, -1000, -1000, -1000, 1423, 1393, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, 1784, -1000, -1000, -1000, -1000, 413, - -502, 1837, 52752, 1066, -1000, 1469, 1692, 399, 139, 1361, - 775, 775, 775, 1061, 1060, 37088, 1466, -1000, 52752, 499, - -1000, 413, -1000, -184, -186, 1247, -1000, -1000, 2246, -1000, - -1000, 15713, -1000, -1000, 1783, 1895, -1000, -1000, -1000, -1000, - 1998, -137, -161, -1000, -1000, 1247, 1247, 1718, 1235, -1000, - 1247, 1247, 1365, 1277, -1000, 1247, 1247, 1247, 1247, 1247, - 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, 1247, - 1247, 1247, 1247, 1247, 1247, 1392, 1534, -1000, 247, 1235, - 1929, -1000, -1000, 5646, -1000, -1000, 2346, 2332, 89, -1000, - -1000, 295, 89, 1080, 831, 1235, 1235, 831, 1475, 1247, - 1464, 1459, 1247, 1247, 32104, -1000, 2322, 2321, 37800, 37800, - 734, 2380, -233, 21422, 21422, 2056, 1020, -1000, -1000, -1000, - -1000, 1329, 1324, -1000, 1319, -1000, 2438, -1000, 1080, -1000, - 139, -1000, 636, 1672, -1000, 2100, 2087, -1000, -1000, 2299, - 1080, 52752, 1080, 113, 2346, -1000, 1247, -1000, 1769, 1769, - 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, - 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, - 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, - 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, - 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, - 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, - 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, - 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, - 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, - 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, - 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, - 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, 1769, - 1769, 1769, 1769, 1769, -1000, -1000, 52752, 1697, -1000, -1000, - 2245, 1461, 64, -1000, 1264, 1692, -1000, -1000, 137, -1000, - 21422, -1000, 37088, 1300, 1260, -1000, -1000, -1000, -1000, -508, - -1000, -1000, -1000, -1000, -1000, -1000, 446, 1690, -1000, 772, - 52752, 54176, -1000, 1995, -1000, -1000, -1000, 21422, -1000, -1000, + 508, 822, -485, 324, 321, 822, 822, 822, -517, -1000, + -1000, 1539, 1533, -1000, -176, -1000, 21671, -1000, -1000, -1000, + -1000, -1000, 1077, 1077, 1416, 1396, 1394, -1000, 1886, -1000, + -1000, -1000, 1512, -1000, -1000, -154, 53485, 53485, 53485, 53485, + -1000, -1000, -1000, 1111, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, 749, 1401, 358, -159, + 1401, -1000, -1000, 407, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, 21671, -1000, 21671, -1000, 1132, 21671, + 2381, 1391, 21671, 21671, -1000, 1108, 1103, 1073, -1000, -1000, + -1000, 21671, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 21671, -1000, 21671, -1000, 21671, + -1000, 21671, -1000, 21671, -1000, 21671, -1000, 21671, -1000, 21671, + -1000, 21671, -1000, 21671, -1000, 21671, -1000, 21671, -1000, 21671, + -1000, 21671, -1000, 21671, -1000, 21671, -1000, -1000, 21671, -1000, + -1000, -1000, 21671, -1000, 21671, -1000, 21671, -1000, -1000, -1000, + 21671, 268, 1026, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, 1401, 387, -1000, -1000, -1000, -1000, + 2479, -1000, 1401, 21671, 1901, -1000, 1901, 1901, 1901, -1000, + -1000, -1000, 21671, -1000, 21671, 21671, -1000, 21671, -1000, 21671, + -1000, -1000, -1000, -1000, 21671, 1870, 2215, 1870, 1870, 31795, + -1000, -1000, 2453, 2395, 2428, 2132, 2134, 2134, 2197, -1000, + 2425, 2422, -1000, 1388, 2421, 1369, 907, -1000, 54208, 21671, + 142, -1000, 410, 53485, 142, 53485, -1000, 116, 2455, -1000, + -1000, 21671, 1897, -1000, 21671, -1000, -1000, -1000, -1000, 7776, + 2459, 1735, -1000, -1000, 759, -1000, 21671, -1000, 4822, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1349, 1341, -1000, + -1000, 1888, 21671, -1000, -1000, -1000, 1476, 1466, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, 1886, -1000, -1000, -1000, + -1000, 359, -511, 2810, 53485, 1081, -1000, 1531, 1756, 367, + 142, 1336, 822, 822, 822, 1068, 1055, 37579, 1498, -1000, + 53485, 465, -1000, 359, -1000, -181, -182, 1073, -1000, -1000, + 2315, -1000, -1000, 15874, -1000, -1000, 1879, 1970, -1000, -1000, + -1000, -1000, 2093, -149, -171, -1000, -1000, 1073, 1073, 1806, + 1401, -1000, 1073, 1073, 1450, 1435, -1000, 1073, 1073, 1073, + 1073, 1073, 1073, 1073, 1073, 1073, 1073, 1073, 1073, 1073, + 1073, 1073, 1073, 1073, 1073, 1073, 1073, 1414, 1603, -1000, + 268, 1401, 2007, -1000, -1000, 7776, -1000, -1000, 2455, 2419, + 124, -1000, -1000, 266, 124, 1132, 862, 1401, 1401, 862, + 1581, 1073, 1555, 1522, 1073, 1073, 32518, -1000, 2416, 2414, + 38302, 38302, 765, 2395, -223, 21671, 21671, 2130, 1041, -1000, + -1000, -1000, -1000, 1322, 1317, -1000, 1300, -1000, 2495, -1000, + 1132, -1000, 142, -1000, 636, 1752, -1000, 2216, 2214, -1000, + -1000, 2381, 1132, 53485, 1132, 115, 2455, -1000, 1073, -1000, + 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, + 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, + 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, + 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, + 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, + 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, + 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, + 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, + 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, + 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, + 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, + 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, + 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, 1870, -1000, + -1000, 53485, 2763, -1000, -1000, 2314, 1449, 67, -1000, 1415, + 1756, -1000, -1000, 141, -1000, 21671, -1000, 37579, 1296, 1293, + -1000, -1000, -1000, -1000, -517, -1000, -1000, -1000, -1000, -1000, + -1000, 406, 1755, -1000, 816, 53485, 54931, -1000, 2092, -1000, + -1000, -1000, 21671, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 21422, -1000, 1235, 1928, -1000, - -309, -1000, -481, 21422, -217, -1000, -1000, -217, -1000, -1000, - -1000, -1000, -1000, 21422, -1000, -1000, 21422, -1000, 21422, -1000, - -1000, 1457, -1000, -1000, -1000, -1000, -1000, 1457, 1457, -1000, - -233, -1000, 1689, -1000, 52752, 1080, 1582, -1000, 1017, -1000, - -1000, -1000, -1000, -1000, 53464, 1672, 52752, 1769, 2049, 2065, - 2317, -1000, 1452, 1235, 1769, 2299, -1000, 1448, -1000, 446, - -1000, 1777, 1871, -1000, -1000, -1000, 19286, -1000, -1000, -1000, - -1000, -1000, 182, -152, 15713, 11405, 1442, -1000, -143, 1247, - 1392, -1000, -410, -1000, -1000, -1000, -1000, 281, -1000, -1000, - 1582, -1000, -1000, 1387, 1383, 1364, 36376, -1000, -1000, -1000, - -1000, -233, -1000, -1000, 2243, -1000, -1000, 1492, -1000, -1000, - 52752, 2086, 2308, 2084, 2305, 1244, -1000, 31392, 52040, -1000, - -131, 324, -152, 21422, 1771, 1235, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, 9, -1000, -1000, 634, -1000, -1000, - -1000, 1894, -159, -1000, -1000, -1000, 251, -468, -242, -243, - 23558, -1000, 21422, -1000, 21422, -1000, 21422, -1000, -1000, -1000, - 52752, 1769, 1389, -1000, 2304, 1232, -1000, 2302, 1230, -1000, - -1000, 1385, -1000, 4290, -294, 1926, -1000, -44, -1000, -1000, - -1000, 971, 1220, -1000, -1000, -1000, -1000, -1000, -1000, 1629, - 52752, -1000, 517, -1000, -1000, 14995, -153, -162, 833, -1000, - -1000, -1000, -1000, -1000, 2961, 1328, 1323, 1247, -1000, 52752, - -1000, 1193, -1000, 1134, -1000, -1000, 52040, -277, 723, 5646, - -1000, 1918, 1914, 2402, -1000, -1000, -1000, -1000, -1000, -1000, - -515, 1358, 332, -1000, -1000, -1000, 251, -249, -1000, 21422, - -1000, 21422, -1000, 1235, -1000, -1000, -1000, -1000, 2221, 113, - -1000, 2430, -1000, 2418, 771, 771, -1000, 1048, -515, -1000, - -1000, -1000, -1000, 1247, 1247, -1000, -297, -1000, -1000, -1000, - -1000, -1000, 523, 1083, -1000, -1000, -1000, -1000, -1000, 5646, - -1000, -1000, -1000, 239, 239, -1000, -1000, + 21671, -1000, 1401, 2003, -1000, -317, -1000, -490, 21671, -214, + -1000, -1000, -214, -1000, -1000, -1000, -1000, -1000, 21671, -1000, + -1000, 21671, -1000, 21671, -1000, -1000, 1447, -1000, -1000, -1000, + -1000, -1000, 1447, 1447, -1000, -223, -1000, 1754, -1000, 53485, + 1132, 1639, -1000, 1018, -1000, -1000, -1000, -1000, -1000, 54208, + 1752, 53485, 1870, 2127, 2140, 2413, -1000, 1441, 1401, 1870, + 2381, -1000, 1439, -1000, 406, -1000, 1878, 1906, -1000, -1000, + -1000, 19502, -1000, -1000, -1000, -1000, -1000, 172, -153, 15874, + 11500, 1434, -1000, -152, 1073, 1414, -1000, -437, -1000, -1000, + -1000, -1000, 196, -1000, -1000, 1639, -1000, -1000, 1475, 1400, + 1340, 36856, -1000, -1000, -1000, -1000, -223, -1000, -1000, 2313, + -1000, -1000, 1605, -1000, -1000, 53485, 2210, 2406, 2190, 2402, + 1283, -1000, 31795, 52762, -1000, -144, 574, -153, 21671, 1877, + 1401, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 37, + -1000, -1000, 630, -1000, -1000, -1000, 1890, -169, -1000, -1000, + -1000, 256, -464, -249, -255, 23840, -1000, 21671, -1000, 21671, + -1000, 21671, -1000, -1000, -1000, 53485, 1870, 1411, -1000, 2394, + 1281, -1000, 2392, 1275, -1000, -1000, 1375, -1000, 4339, -270, + 2001, -1000, 7, -1000, -1000, -1000, 984, 1262, -1000, -1000, + -1000, -1000, -1000, -1000, 1916, 53485, -1000, 480, -1000, -1000, + 15145, -154, -173, 888, -1000, -1000, -1000, -1000, -1000, 1901, + 1290, 1153, 1073, -1000, 53485, -1000, 1255, -1000, 1253, -1000, + -1000, 52762, -257, 748, 7776, -1000, 1986, 1980, 2474, -1000, + -1000, -1000, -1000, -1000, -1000, -523, 1292, 280, -1000, -1000, + -1000, 256, -258, -1000, 21671, -1000, 21671, -1000, 1401, -1000, + -1000, -1000, -1000, 2302, 115, -1000, 2476, -1000, 2466, 693, + 693, -1000, 1046, -523, -1000, -1000, -1000, -1000, 1073, 1073, + -1000, -274, -1000, -1000, -1000, -1000, -1000, 472, 1105, -1000, + -1000, -1000, -1000, -1000, 7776, -1000, -1000, -1000, 248, 248, + -1000, -1000, } var yyPgo = [...]int{ - 0, 3141, 3139, 27, 1, 35, 34, 3138, 40, 96, - 193, 38, 202, 108, 3124, 177, 3123, 3122, 3119, 3117, - 3114, 3113, 2504, 2496, 2490, 3108, 3104, 3103, 3100, 3098, - 3096, 3093, 3091, 3088, 3083, 176, 161, 196, 3081, 3080, - 3079, 114, 192, 85, 87, 188, 3078, 3077, 95, 3075, - 3074, 3073, 190, 189, 185, 892, 3072, 181, 111, 49, - 3064, 3058, 3054, 3053, 3052, 3050, 3049, 3048, 3047, 3046, - 3045, 3044, 2985, 2984, 2980, 2979, 2978, 2977, 274, 2976, - 2975, 19, 2974, 99, 2970, 2966, 2964, 2959, 2958, 5, - 2957, 2956, 12, 42, 2954, 2953, 48, 2952, 2951, 2950, - 2949, 2948, 17, 2947, 25, 2946, 36, 2945, 2944, 124, - 2943, 2942, 2941, 39, 2939, 2936, 2930, 9, 2929, 2928, - 139, 2925, 2924, 2922, 184, 198, 2921, 2920, 162, 106, - 154, 2919, 2912, 103, 191, 2911, 118, 2895, 2893, 2892, - 150, 2891, 1086, 2889, 75, 66, 2887, 28, 2886, 2883, - 163, 65, 4, 212, 220, 2879, 2878, 68, 165, 2877, - 107, 2876, 2875, 105, 76, 2872, 98, 101, 2868, 2867, - 13, 3, 2866, 131, 134, 132, 71, 2860, 2859, 112, - 2857, 2856, 2853, 89, 2852, 2851, 3792, 2850, 88, 128, - 104, 81, 2848, 45, 54, 2847, 2846, 2844, 2835, 2833, - 51, 2830, 2828, 2826, 152, 231, 170, 2823, 47, 78, - 57, 129, 2820, 72, 83, 194, 171, 2819, 2818, 136, - 138, 2817, 2816, 61, 46, 44, 2815, 94, 126, 120, - 37, 92, 130, 2814, 2810, 67, 79, 2803, 2802, 2799, - 2798, 175, 2796, 2793, 70, 2792, 60, 2791, 172, 24, - 21, 50, 2790, 55, 2788, 167, 2787, 80, 2786, 2785, - 74, 119, 77, 52, 2782, 157, 169, 122, 174, 2779, - 2777, 58, 2776, 2775, 2774, 199, 340, 2773, 2772, 97, - 178, 142, 149, 86, 2771, 350, 2769, 2767, 14, 4877, - 7247, 2766, 56, 160, 2765, 2763, 7458, 15, 62, 20, - 2762, 197, 2761, 2760, 2758, 2740, 1217, 182, 156, 168, - 64, 2731, 2730, 2729, 11, 2728, 2726, 2720, 2712, 2634, - 2630, 140, 33, 32, 31, 206, 69, 7, 102, 166, - 153, 90, 2624, 2616, 2602, 117, 93, 2601, 159, 158, - 148, 164, 2600, 180, 143, 113, 2599, 73, 30, 2598, - 2597, 2596, 2593, 91, 2591, 2585, 2579, 2578, 151, 144, - 116, 84, 2577, 82, 115, 147, 145, 53, 2573, 59, - 2566, 2565, 29, 187, 26, 2562, 16, 110, 109, 2560, - 6723, 186, 2559, 18, 334, 155, 2554, 2550, 2, 6, - 10, 2549, 2548, 2545, 2544, 133, 2543, 2539, 2538, 2535, - 23, 63, 22, 8, 121, 100, 2534, 2524, 141, 2521, - 2519, 2518, 2517, 2514, 2508, 2505, 2500, 2488, 2473, 2471, - 3374, 0, 123, 2465, 200, 2459, + 0, 3208, 3206, 43, 2, 40, 39, 3205, 3204, 3202, + 178, 3201, 3200, 3199, 3198, 3197, 3194, 2557, 2537, 2529, + 3193, 3192, 3191, 3190, 3189, 3187, 3185, 3184, 3181, 42, + 102, 33, 114, 197, 198, 3178, 176, 159, 193, 3174, + 3171, 3170, 118, 192, 87, 91, 195, 3169, 3166, 95, + 3165, 3164, 3160, 188, 187, 186, 925, 3158, 181, 110, + 54, 3155, 3154, 3151, 3149, 3148, 3147, 3146, 3145, 3144, + 3143, 3142, 3138, 3133, 3128, 3126, 3125, 3124, 3122, 277, + 3120, 3118, 21, 3115, 103, 3105, 3102, 3099, 3097, 3095, + 8, 3091, 3090, 14, 48, 3089, 3087, 55, 3072, 3026, + 3025, 3024, 3020, 20, 3019, 27, 3016, 41, 3014, 3013, + 129, 3000, 2999, 2996, 50, 2994, 2991, 2989, 13, 174, + 2988, 2987, 144, 2980, 2978, 2974, 171, 200, 2970, 2116, + 162, 113, 165, 2969, 2966, 106, 189, 2958, 125, 2957, + 2949, 2937, 152, 2935, 1107, 2933, 2932, 67, 68, 32, + 2930, 2929, 184, 72, 9, 3, 221, 2928, 2926, 70, + 85, 2925, 130, 2920, 2918, 107, 73, 2917, 104, 101, + 2915, 2914, 16, 5, 2912, 145, 11, 4, 69, 2911, + 2910, 204, 2909, 2908, 2904, 98, 2902, 2901, 3847, 2900, + 93, 134, 108, 80, 2897, 53, 62, 2896, 2895, 2893, + 2892, 2890, 57, 2889, 2888, 2886, 148, 243, 164, 2880, + 46, 75, 61, 132, 2870, 31, 82, 199, 167, 2865, + 2864, 136, 137, 2863, 2861, 64, 44, 51, 2860, 99, + 131, 111, 49, 94, 133, 2858, 2857, 66, 81, 2856, + 2855, 2845, 2843, 175, 2841, 2838, 79, 2836, 63, 2833, + 173, 26, 28, 97, 2832, 52, 2831, 170, 2830, 89, + 2829, 2827, 76, 124, 77, 47, 2826, 163, 172, 127, + 166, 2825, 2823, 59, 2821, 2819, 2818, 196, 340, 2816, + 2815, 119, 183, 146, 147, 88, 2813, 355, 2812, 2810, + 15, 4916, 7355, 2809, 56, 161, 2808, 2807, 7508, 17, + 65, 25, 2806, 194, 2805, 2804, 2803, 2801, 1385, 185, + 153, 160, 74, 2800, 2799, 2798, 19, 2797, 2796, 2795, + 2794, 2793, 2784, 123, 38, 37, 35, 214, 71, 12, + 105, 169, 157, 90, 2778, 2769, 2768, 126, 92, 2766, + 158, 156, 154, 168, 2765, 180, 142, 115, 2750, 78, + 34, 2748, 2744, 2668, 2654, 96, 2653, 2652, 2639, 2638, + 155, 140, 121, 84, 2637, 83, 116, 150, 149, 86, + 2634, 60, 2632, 2631, 30, 190, 29, 2628, 18, 109, + 112, 2624, 6746, 182, 2622, 22, 351, 151, 2617, 2616, + 6, 7, 10, 2614, 2605, 2604, 2603, 135, 2602, 2601, + 2600, 2599, 24, 58, 23, 1, 117, 100, 2596, 2591, + 139, 2589, 2587, 2586, 2585, 2583, 2573, 2567, 2561, 2558, + 2551, 2549, 2545, 3418, 0, 128, 2503, 201, 2502, } -//line sql.y:8586 +//line sql.y:8709 type yySymType struct { union any empty struct{} @@ -7883,8 +7987,8 @@ func (st *yySymType) integerUnion() int { return v } -func (st *yySymType) intervalTypeUnion() IntervalTypes { - v, _ := st.union.(IntervalTypes) +func (st *yySymType) intervalTypeUnion() IntervalType { + v, _ := st.union.(IntervalType) return v } @@ -7923,6 +8027,11 @@ func (st *yySymType) jtOnResponseUnion() *JtOnResponse { return v } +func (st *yySymType) killTypeUnion() KillType { + v, _ := st.union.(KillType) + return v +} + func (st *yySymType) lagLeadExprTypeUnion() LagLeadExprType { v, _ := st.union.(LagLeadExprType) return v @@ -8279,229 +8388,232 @@ func (st *yySymType) withUnion() *With { } var yyR1 = [...]int{ - 0, 418, 419, 419, 7, 7, 7, 7, 7, 7, + 0, 421, 422, 422, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 257, 380, 381, 381, 255, 255, 33, 73, 35, - 35, 34, 34, 37, 37, 36, 8, 8, 8, 9, - 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, - 10, 10, 11, 11, 11, 11, 13, 13, 13, 13, - 13, 20, 21, 12, 12, 22, 22, 107, 107, 23, - 24, 24, 24, 24, 422, 422, 181, 181, 179, 179, - 180, 180, 260, 260, 25, 264, 264, 266, 266, 266, - 266, 256, 256, 256, 26, 26, 265, 265, 267, 267, - 267, 270, 270, 270, 270, 309, 309, 309, 27, 27, - 27, 27, 27, 126, 126, 383, 383, 382, 376, 376, - 375, 375, 374, 379, 379, 378, 378, 377, 39, 40, - 49, 49, 49, 49, 50, 51, 384, 384, 349, 56, - 56, 55, 55, 55, 55, 55, 55, 57, 57, 53, - 53, 52, 52, 54, 54, 351, 351, 337, 337, 350, - 350, 350, 350, 350, 350, 350, 336, 336, 137, 137, - 233, 233, 233, 233, 233, 233, 233, 233, 233, 233, - 233, 233, 233, 233, 233, 233, 233, 399, 399, 399, - 398, 398, 234, 234, 234, 234, 234, 234, 234, 234, - 146, 146, 157, 157, 157, 157, 157, 157, 144, 144, - 145, 143, 143, 143, 151, 151, 151, 151, 151, 151, - 151, 151, 151, 151, 151, 151, 151, 151, 151, 151, - 151, 403, 403, 403, 403, 403, 403, 403, 403, 403, - 403, 403, 403, 403, 403, 403, 403, 403, 403, 403, - 403, 403, 403, 403, 403, 403, 403, 403, 403, 403, - 403, 403, 403, 403, 403, 403, 403, 403, 403, 403, - 403, 403, 403, 156, 156, 152, 152, 152, 153, 153, - 153, 154, 154, 400, 400, 400, 400, 314, 314, 314, - 314, 317, 317, 315, 315, 315, 315, 315, 315, 315, - 315, 315, 316, 316, 316, 316, 316, 316, 316, 318, - 318, 318, 318, 318, 319, 319, 319, 319, 319, 319, - 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - 320, 320, 320, 320, 320, 320, 320, 320, 335, 335, - 321, 321, 329, 329, 330, 330, 331, 331, 331, 332, - 332, 332, 333, 333, 326, 326, 326, 326, 326, 326, - 326, 326, 326, 328, 328, 327, 327, 327, 338, 363, - 363, 362, 362, 360, 360, 360, 360, 360, 360, 360, - 360, 347, 347, 357, 357, 357, 357, 357, 346, 346, - 342, 342, 342, 343, 343, 344, 344, 341, 341, 345, - 345, 359, 359, 358, 358, 339, 339, 340, 340, 365, - 401, 401, 401, 401, 401, 402, 402, 366, 391, 393, - 393, 393, 392, 392, 389, 390, 388, 388, 388, 388, - 388, 83, 83, 83, 283, 283, 284, 284, 355, 355, - 354, 354, 354, 356, 356, 353, 353, 353, 353, 353, - 353, 353, 353, 353, 353, 353, 353, 353, 353, 353, - 353, 353, 353, 353, 353, 353, 353, 353, 353, 353, - 353, 353, 353, 353, 353, 353, 278, 278, 278, 387, - 387, 387, 387, 387, 387, 386, 386, 386, 352, 352, - 352, 352, 385, 385, 58, 58, 214, 214, 404, 404, - 405, 405, 405, 46, 46, 46, 46, 46, 46, 45, - 45, 45, 41, 41, 41, 41, 41, 41, 41, 41, - 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, - 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, - 41, 41, 41, 47, 47, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 28, 28, 28, 28, 28, - 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, - 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, - 28, 28, 28, 109, 109, 110, 110, 110, 110, 112, - 112, 112, 368, 368, 59, 59, 3, 3, 169, 171, - 172, 172, 170, 170, 170, 170, 170, 170, 61, 61, - 60, 60, 174, 173, 175, 175, 175, 1, 1, 2, - 2, 4, 4, 373, 373, 373, 373, 373, 373, 373, - 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, - 373, 373, 373, 373, 373, 334, 334, 334, 367, 367, - 369, 111, 111, 111, 111, 111, 111, 111, 111, 111, - 111, 115, 114, 114, 113, 116, 116, 116, 116, 116, - 116, 116, 116, 371, 371, 371, 62, 62, 372, 322, - 323, 324, 5, 6, 348, 370, 122, 122, 29, 38, - 38, 30, 30, 30, 30, 31, 31, 63, 66, 66, - 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, - 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, - 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, - 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, - 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, - 64, 64, 277, 277, 286, 286, 276, 276, 301, 301, - 301, 279, 279, 279, 280, 280, 397, 397, 397, 273, - 273, 65, 65, 65, 302, 302, 302, 302, 68, 68, - 406, 406, 407, 407, 408, 408, 408, 69, 70, 70, - 304, 304, 305, 305, 71, 72, 84, 84, 84, 84, - 84, 84, 84, 85, 85, 85, 85, 108, 108, 108, - 15, 15, 15, 15, 80, 80, 80, 14, 14, 17, - 67, 67, 74, 394, 394, 395, 396, 396, 396, 396, - 75, 77, 32, 32, 32, 32, 32, 32, 132, 132, - 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, - 120, 120, 127, 127, 127, 121, 121, 423, 78, 79, - 79, 125, 125, 125, 118, 118, 118, 124, 124, 124, - 16, 16, 18, 259, 259, 19, 19, 129, 129, 131, - 131, 131, 131, 131, 133, 133, 133, 133, 133, 133, - 133, 128, 128, 130, 130, 130, 130, 294, 294, 294, - 293, 293, 163, 163, 165, 164, 164, 166, 166, 167, - 167, 167, 167, 212, 212, 189, 189, 251, 251, 252, - 252, 250, 250, 258, 258, 253, 253, 253, 253, 261, - 261, 168, 168, 168, 168, 176, 176, 177, 177, 178, - 178, 303, 303, 299, 299, 299, 298, 298, 182, 182, - 182, 184, 183, 183, 183, 183, 185, 185, 187, 187, - 186, 186, 188, 193, 193, 192, 192, 190, 190, 190, - 190, 191, 191, 191, 191, 194, 194, 142, 142, 142, - 142, 142, 142, 142, 142, 155, 155, 155, 155, 158, - 158, 158, 158, 158, 158, 158, 158, 158, 158, 158, - 241, 241, 147, 147, 147, 147, 147, 147, 147, 147, - 147, 147, 147, 147, 147, 147, 147, 150, 150, 150, - 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, - 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, - 150, 150, 217, 217, 216, 216, 86, 86, 86, 87, - 87, 88, 88, 88, 88, 88, 89, 89, 89, 89, - 89, 89, 89, 91, 91, 90, 90, 207, 207, 291, - 291, 92, 93, 93, 96, 96, 95, 94, 94, 100, - 100, 97, 97, 99, 99, 98, 101, 101, 102, 103, - 103, 274, 274, 195, 195, 203, 203, 203, 203, 196, - 196, 196, 196, 196, 196, 196, 204, 204, 204, 211, - 205, 205, 201, 201, 199, 199, 199, 199, 199, 199, - 199, 199, 199, 199, 200, 200, 200, 200, 200, 200, - 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, - 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, - 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, - 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, - 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, - 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, - 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, - 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, - 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, - 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, - 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, - 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, - 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, - 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, - 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, - 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, - 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, - 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, - 200, 200, 200, 200, 160, 160, 160, 160, 222, 222, - 148, 148, 148, 148, 148, 148, 148, 148, 148, 148, - 148, 148, 148, 148, 148, 149, 149, 161, 161, 161, - 161, 162, 162, 162, 162, 162, 162, 162, 311, 311, - 117, 117, 117, 117, 117, 117, 117, 117, 117, 117, - 117, 117, 117, 117, 117, 117, 117, 117, 117, 117, - 424, 424, 325, 325, 325, 202, 202, 202, 202, 202, - 123, 123, 123, 123, 123, 308, 308, 308, 312, 312, - 312, 310, 310, 310, 310, 310, 310, 310, 310, 310, - 310, 310, 310, 310, 310, 310, 313, 313, 220, 220, - 119, 119, 218, 218, 219, 221, 221, 213, 213, 213, - 213, 215, 215, 198, 198, 198, 223, 223, 224, 224, - 104, 105, 105, 106, 106, 225, 225, 227, 226, 226, - 228, 229, 229, 229, 230, 230, 231, 231, 231, 48, - 48, 48, 48, 48, 43, 43, 43, 43, 44, 44, - 44, 44, 134, 134, 134, 134, 136, 136, 135, 135, - 81, 81, 82, 82, 82, 140, 140, 141, 141, 141, - 138, 138, 139, 139, 248, 248, 232, 232, 232, 239, - 239, 239, 235, 235, 237, 237, 237, 238, 238, 238, - 236, 245, 245, 247, 247, 246, 246, 242, 242, 243, - 243, 244, 244, 244, 240, 240, 197, 197, 197, 197, - 197, 249, 249, 249, 249, 262, 262, 208, 208, 210, - 210, 209, 209, 159, 263, 263, 271, 268, 268, 269, - 269, 295, 295, 295, 272, 272, 285, 285, 281, 281, - 282, 282, 275, 275, 287, 287, 287, 76, 206, 206, - 364, 364, 361, 290, 290, 292, 292, 296, 296, 300, - 300, 297, 297, 416, 416, 254, 254, 410, 410, 409, - 409, 413, 413, 412, 412, 411, 411, 414, 414, 415, - 415, 425, 425, 417, 288, 288, 288, 288, 288, 288, - 288, 288, 288, 288, 288, 288, 288, 288, 288, 288, - 288, 288, 288, 288, 288, 288, 288, 288, 288, 288, - 288, 288, 288, 288, 288, 288, 288, 288, 288, 288, - 288, 288, 288, 288, 288, 288, 288, 288, 288, 288, - 288, 288, 288, 288, 288, 288, 288, 288, 288, 288, - 288, 288, 288, 288, 288, 288, 288, 288, 288, 288, - 288, 288, 288, 288, 288, 288, 288, 288, 288, 288, - 288, 288, 288, 288, 288, 288, 288, 288, 288, 288, - 288, 288, 288, 288, 288, 288, 288, 288, 288, 288, - 288, 288, 288, 288, 288, 288, 288, 288, 288, 288, - 288, 288, 288, 288, 288, 288, 288, 288, 288, 288, - 288, 288, 288, 288, 288, 288, 288, 288, 288, 288, - 288, 288, 288, 288, 288, 288, 288, 288, 288, 288, - 288, 288, 288, 288, 288, 288, 288, 288, 288, 288, - 288, 288, 288, 288, 288, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 420, 421, - 306, 307, 307, 307, + 7, 7, 259, 382, 383, 383, 257, 257, 28, 74, + 36, 36, 35, 35, 38, 38, 37, 31, 31, 31, + 32, 32, 32, 32, 32, 32, 32, 32, 33, 33, + 33, 33, 33, 29, 29, 29, 29, 30, 30, 30, + 30, 30, 15, 16, 34, 34, 17, 17, 108, 108, + 18, 19, 19, 19, 19, 425, 425, 183, 183, 181, + 181, 182, 182, 262, 262, 20, 266, 266, 268, 268, + 268, 268, 258, 258, 258, 21, 21, 267, 267, 269, + 269, 269, 272, 272, 272, 272, 311, 311, 311, 22, + 22, 22, 22, 22, 128, 128, 385, 385, 384, 378, + 378, 377, 377, 376, 381, 381, 380, 380, 379, 40, + 41, 50, 50, 50, 50, 51, 52, 386, 386, 351, + 57, 57, 56, 56, 56, 56, 56, 56, 58, 58, + 54, 54, 53, 53, 55, 55, 353, 353, 339, 339, + 352, 352, 352, 352, 352, 352, 352, 338, 338, 139, + 139, 235, 235, 235, 235, 235, 235, 235, 235, 235, + 235, 235, 235, 235, 235, 235, 235, 235, 401, 401, + 401, 400, 400, 236, 236, 236, 236, 236, 236, 236, + 236, 148, 148, 159, 159, 159, 159, 159, 159, 146, + 146, 147, 145, 145, 145, 153, 153, 153, 153, 153, + 153, 153, 153, 153, 153, 153, 153, 153, 153, 153, + 153, 153, 405, 405, 405, 405, 405, 405, 405, 405, + 405, 405, 405, 405, 405, 405, 405, 405, 405, 405, + 405, 405, 405, 405, 405, 405, 405, 405, 405, 405, + 405, 405, 405, 405, 405, 405, 405, 405, 405, 405, + 405, 405, 405, 405, 158, 158, 154, 154, 154, 155, + 155, 155, 156, 156, 402, 402, 402, 402, 316, 316, + 316, 316, 319, 319, 317, 317, 317, 317, 317, 317, + 317, 317, 317, 318, 318, 318, 318, 318, 318, 318, + 320, 320, 320, 320, 320, 321, 321, 321, 321, 321, + 321, 321, 321, 321, 321, 321, 321, 321, 321, 321, + 321, 322, 322, 322, 322, 322, 322, 322, 322, 337, + 337, 323, 323, 331, 331, 332, 332, 333, 333, 333, + 334, 334, 334, 335, 335, 328, 328, 328, 328, 328, + 328, 328, 328, 328, 330, 330, 329, 329, 329, 340, + 365, 365, 364, 364, 362, 362, 362, 362, 362, 362, + 362, 362, 349, 349, 359, 359, 359, 359, 359, 348, + 348, 344, 344, 344, 345, 345, 346, 346, 343, 343, + 347, 347, 361, 361, 360, 360, 341, 341, 342, 342, + 367, 403, 403, 403, 403, 403, 404, 404, 368, 393, + 395, 395, 395, 394, 394, 391, 392, 390, 390, 390, + 390, 390, 84, 84, 84, 285, 285, 286, 286, 357, + 357, 356, 356, 356, 358, 358, 355, 355, 355, 355, + 355, 355, 355, 355, 355, 355, 355, 355, 355, 355, + 355, 355, 355, 355, 355, 355, 355, 355, 355, 355, + 355, 355, 355, 355, 355, 355, 355, 280, 280, 280, + 389, 389, 389, 389, 389, 389, 388, 388, 388, 354, + 354, 354, 354, 387, 387, 59, 59, 216, 216, 406, + 406, 407, 407, 407, 47, 47, 47, 47, 47, 47, + 46, 46, 46, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 48, 48, 43, 43, 43, 43, + 43, 43, 43, 43, 43, 43, 23, 23, 23, 23, + 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, + 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, + 23, 23, 23, 23, 23, 23, 110, 110, 111, 111, + 111, 111, 113, 113, 113, 370, 370, 60, 60, 3, + 3, 171, 173, 174, 174, 172, 172, 172, 172, 172, + 172, 62, 62, 61, 61, 176, 175, 177, 177, 177, + 1, 1, 2, 2, 4, 4, 375, 375, 375, 375, + 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, + 375, 375, 375, 375, 375, 375, 375, 375, 336, 336, + 336, 369, 369, 371, 112, 112, 112, 112, 112, 112, + 112, 112, 112, 112, 116, 115, 115, 114, 117, 117, + 117, 117, 117, 117, 117, 117, 373, 373, 373, 63, + 63, 374, 324, 325, 326, 5, 6, 350, 372, 124, + 124, 24, 39, 39, 25, 25, 25, 25, 26, 26, + 64, 67, 67, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 279, 279, 288, 288, 278, + 278, 303, 303, 303, 281, 281, 281, 282, 282, 399, + 399, 399, 275, 275, 66, 66, 66, 304, 304, 304, + 304, 69, 69, 408, 408, 409, 409, 410, 410, 410, + 70, 71, 71, 306, 306, 307, 307, 72, 73, 85, + 85, 85, 85, 85, 85, 85, 86, 86, 86, 86, + 109, 109, 109, 10, 10, 10, 10, 81, 81, 81, + 9, 9, 11, 68, 68, 75, 396, 396, 397, 398, + 398, 398, 398, 76, 78, 27, 27, 27, 27, 27, + 27, 134, 134, 122, 122, 122, 122, 122, 122, 122, + 122, 122, 122, 122, 122, 129, 129, 129, 123, 123, + 426, 79, 80, 80, 127, 127, 127, 120, 120, 120, + 126, 126, 126, 12, 12, 13, 261, 261, 14, 14, + 131, 131, 133, 133, 133, 133, 133, 135, 135, 135, + 135, 135, 135, 135, 130, 130, 132, 132, 132, 132, + 296, 296, 296, 295, 295, 165, 165, 167, 166, 166, + 168, 168, 169, 169, 169, 169, 214, 214, 191, 191, + 253, 253, 254, 254, 252, 252, 260, 260, 255, 255, + 255, 255, 263, 263, 170, 170, 170, 170, 178, 178, + 179, 179, 180, 180, 305, 305, 301, 301, 301, 300, + 300, 184, 184, 184, 186, 185, 185, 185, 185, 187, + 187, 189, 189, 188, 188, 190, 195, 195, 194, 194, + 192, 192, 192, 192, 193, 193, 193, 193, 196, 196, + 144, 144, 144, 144, 144, 144, 144, 144, 157, 157, + 157, 157, 160, 160, 160, 160, 160, 160, 160, 160, + 160, 160, 160, 243, 243, 149, 149, 149, 149, 149, + 149, 149, 149, 149, 149, 149, 149, 149, 149, 149, + 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, + 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, + 152, 152, 152, 152, 152, 219, 219, 218, 218, 87, + 87, 87, 88, 88, 89, 89, 89, 89, 89, 90, + 90, 90, 90, 90, 90, 90, 92, 92, 91, 91, + 209, 209, 293, 293, 93, 94, 94, 97, 97, 96, + 95, 95, 101, 101, 98, 98, 100, 100, 99, 102, + 102, 103, 104, 104, 276, 276, 197, 197, 205, 205, + 205, 205, 198, 198, 198, 198, 198, 198, 198, 206, + 206, 206, 213, 207, 207, 203, 203, 201, 201, 201, + 201, 201, 201, 201, 201, 201, 201, 202, 202, 202, + 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, + 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, + 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, + 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, + 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, + 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, + 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, + 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, + 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, + 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, + 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, + 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, + 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, + 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, + 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, + 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, + 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, + 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, + 202, 202, 202, 202, 202, 202, 202, 202, 162, 162, + 162, 162, 224, 224, 150, 150, 150, 150, 150, 150, + 150, 150, 150, 150, 150, 150, 150, 150, 150, 151, + 151, 163, 163, 163, 163, 164, 164, 164, 164, 164, + 164, 164, 313, 313, 118, 118, 118, 118, 118, 118, + 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, + 118, 118, 118, 118, 119, 119, 119, 119, 119, 119, + 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, + 119, 119, 427, 427, 327, 327, 327, 204, 204, 204, + 204, 204, 125, 125, 125, 125, 125, 310, 310, 310, + 314, 314, 314, 312, 312, 312, 312, 312, 312, 312, + 312, 312, 312, 312, 312, 312, 312, 312, 315, 315, + 222, 222, 121, 121, 220, 220, 221, 223, 223, 215, + 215, 215, 215, 217, 217, 200, 200, 200, 225, 225, + 226, 226, 105, 106, 106, 107, 107, 227, 227, 229, + 228, 228, 230, 231, 231, 231, 232, 232, 233, 233, + 233, 49, 49, 49, 49, 49, 44, 44, 44, 44, + 45, 45, 45, 45, 136, 136, 136, 136, 138, 138, + 137, 137, 82, 82, 83, 83, 83, 142, 142, 143, + 143, 143, 140, 140, 141, 141, 250, 250, 234, 234, + 234, 241, 241, 241, 237, 237, 239, 239, 239, 240, + 240, 240, 238, 247, 247, 249, 249, 248, 248, 244, + 244, 245, 245, 246, 246, 246, 242, 242, 199, 199, + 199, 199, 199, 251, 251, 251, 251, 264, 264, 210, + 210, 212, 212, 211, 211, 161, 265, 265, 273, 270, + 270, 271, 271, 297, 297, 297, 274, 274, 287, 287, + 283, 283, 284, 284, 277, 277, 289, 289, 289, 77, + 208, 208, 366, 366, 363, 292, 292, 294, 294, 298, + 298, 302, 302, 299, 299, 418, 418, 256, 256, 412, + 412, 411, 411, 415, 415, 414, 414, 413, 413, 416, + 416, 417, 417, 428, 428, 419, 8, 420, 420, 420, + 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, + 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, + 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, + 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, + 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, + 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, + 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, + 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, + 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, + 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, + 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, + 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, + 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, + 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, + 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 423, 424, 308, 309, 309, 309, } var yyR2 = [...]int{ @@ -8509,164 +8621,167 @@ var yyR2 = [...]int{ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 0, 1, 1, 0, 1, 1, 1, 2, 3, 2, - 3, 0, 1, 3, 1, 4, 3, 3, 4, 3, - 2, 3, 4, 3, 4, 2, 7, 1, 3, 3, - 3, 3, 1, 2, 1, 1, 3, 2, 3, 3, - 2, 5, 7, 10, 9, 7, 8, 1, 1, 10, - 11, 9, 8, 8, 1, 1, 1, 3, 1, 3, - 1, 3, 0, 4, 3, 1, 3, 3, 3, 3, - 3, 1, 1, 2, 5, 4, 1, 3, 3, 2, - 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, - 6, 12, 2, 0, 2, 0, 2, 1, 0, 2, - 1, 3, 3, 0, 1, 1, 3, 3, 6, 4, - 7, 8, 8, 8, 6, 3, 1, 1, 5, 0, - 1, 1, 1, 1, 2, 2, 2, 0, 1, 4, - 4, 4, 4, 4, 4, 2, 4, 1, 3, 1, - 1, 3, 4, 3, 3, 3, 5, 10, 0, 2, - 0, 2, 3, 5, 3, 4, 2, 3, 2, 3, - 3, 3, 3, 2, 2, 4, 4, 1, 1, 1, - 1, 1, 0, 2, 2, 3, 3, 2, 2, 2, - 1, 1, 2, 2, 2, 2, 2, 2, 1, 1, - 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, - 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 0, 1, 1, 0, 1, 1, 1, 2, 3, + 2, 3, 0, 1, 3, 1, 4, 3, 3, 4, + 3, 2, 3, 4, 3, 4, 2, 7, 1, 3, + 3, 3, 3, 1, 2, 1, 1, 3, 2, 3, + 3, 2, 5, 7, 10, 9, 7, 8, 1, 1, + 10, 11, 9, 8, 8, 1, 1, 1, 3, 1, + 3, 1, 3, 0, 4, 3, 1, 3, 3, 3, + 3, 3, 1, 1, 2, 5, 4, 1, 3, 3, + 2, 2, 2, 2, 2, 1, 1, 1, 1, 2, + 2, 6, 12, 2, 0, 2, 0, 2, 1, 0, + 2, 1, 3, 3, 0, 1, 1, 3, 3, 6, + 4, 7, 8, 8, 8, 6, 3, 1, 1, 5, + 0, 1, 1, 1, 1, 2, 2, 2, 0, 1, + 4, 4, 4, 4, 4, 4, 2, 4, 1, 3, + 1, 1, 3, 4, 3, 3, 3, 5, 10, 0, + 2, 0, 2, 3, 5, 3, 4, 2, 3, 2, + 3, 3, 3, 3, 2, 2, 4, 4, 1, 1, + 1, 1, 1, 0, 2, 2, 3, 3, 2, 2, + 2, 1, 1, 2, 2, 2, 2, 2, 2, 1, + 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, + 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 2, 1, 1, 2, 1, 2, 1, 3, 1, 1, - 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, - 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, - 2, 2, 2, 1, 1, 1, 1, 1, 5, 5, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, - 0, 3, 0, 5, 1, 3, 0, 3, 5, 0, - 1, 1, 0, 1, 0, 3, 3, 2, 2, 2, - 1, 2, 2, 0, 1, 0, 2, 2, 5, 0, - 1, 1, 2, 1, 3, 2, 1, 1, 3, 3, - 3, 0, 1, 4, 3, 3, 4, 2, 0, 2, - 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, - 1, 1, 3, 3, 4, 3, 1, 3, 1, 7, - 6, 7, 7, 8, 8, 0, 1, 5, 2, 1, - 1, 1, 0, 1, 3, 3, 1, 1, 2, 2, - 2, 0, 1, 1, 1, 2, 0, 1, 0, 1, - 1, 3, 2, 1, 2, 3, 3, 3, 4, 4, - 3, 3, 3, 3, 4, 4, 3, 3, 3, 3, + 1, 2, 1, 1, 2, 1, 2, 1, 3, 1, + 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, + 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, + 2, 2, 2, 2, 1, 1, 1, 1, 1, 5, + 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 3, 0, 3, 0, 5, 1, 3, 0, 3, 5, + 0, 1, 1, 0, 1, 0, 3, 3, 2, 2, + 2, 1, 2, 2, 0, 1, 0, 2, 2, 5, + 0, 1, 1, 2, 1, 3, 2, 1, 1, 3, + 3, 3, 0, 1, 4, 3, 3, 4, 2, 0, + 2, 1, 1, 1, 1, 1, 0, 1, 1, 1, + 0, 1, 1, 3, 3, 4, 3, 1, 3, 1, + 7, 6, 7, 7, 8, 8, 0, 1, 5, 2, + 1, 1, 1, 0, 1, 3, 3, 1, 1, 2, + 2, 2, 0, 1, 1, 1, 2, 0, 1, 0, + 1, 1, 3, 2, 1, 2, 3, 3, 3, 4, + 4, 3, 3, 3, 3, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 4, 5, 0, 2, 2, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, - 1, 1, 0, 1, 0, 1, 0, 2, 0, 2, - 0, 2, 2, 0, 1, 5, 1, 3, 7, 1, - 3, 3, 1, 2, 2, 2, 5, 5, 5, 6, - 8, 5, 5, 4, 4, 4, 6, 5, 5, 5, - 2, 2, 2, 2, 3, 3, 3, 4, 3, 3, - 1, 3, 5, 1, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 2, 2, 3, 4, 4, 2, 11, - 3, 6, 8, 6, 6, 6, 13, 8, 6, 10, - 5, 5, 5, 7, 5, 5, 5, 5, 5, 7, - 7, 5, 5, 0, 6, 5, 6, 4, 5, 0, - 8, 9, 0, 3, 0, 1, 0, 3, 8, 4, - 1, 3, 3, 6, 7, 7, 8, 4, 0, 1, - 0, 1, 3, 3, 1, 1, 2, 1, 1, 0, - 2, 0, 2, 5, 3, 7, 4, 4, 4, 4, - 3, 3, 3, 7, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 2, 0, 2, 2, 1, 3, - 2, 0, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 3, 1, 3, 3, 0, 2, 2, 2, 2, - 2, 2, 2, 4, 4, 3, 0, 1, 4, 3, - 4, 4, 3, 3, 3, 2, 1, 3, 3, 3, - 5, 7, 7, 6, 5, 3, 2, 3, 5, 5, - 3, 3, 7, 3, 3, 3, 3, 4, 7, 5, - 2, 4, 4, 4, 4, 4, 5, 5, 4, 4, - 4, 4, 4, 4, 4, 4, 2, 2, 4, 4, - 4, 4, 4, 2, 3, 3, 3, 3, 5, 2, - 3, 3, 2, 3, 4, 4, 4, 3, 4, 4, - 5, 3, 0, 1, 0, 1, 1, 1, 0, 2, - 2, 0, 2, 2, 0, 2, 0, 1, 1, 1, - 1, 2, 1, 3, 1, 1, 1, 1, 1, 3, - 0, 1, 1, 3, 3, 2, 2, 1, 1, 5, - 0, 1, 0, 1, 2, 3, 0, 3, 3, 3, - 3, 3, 1, 0, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 0, 1, 1, 4, 4, 4, - 2, 2, 3, 1, 3, 2, 1, 2, 1, 2, - 2, 4, 3, 3, 6, 4, 7, 6, 1, 3, - 2, 2, 2, 2, 1, 1, 1, 3, 2, 1, - 1, 1, 0, 1, 1, 0, 3, 0, 2, 0, - 2, 1, 2, 2, 0, 1, 1, 0, 1, 1, - 5, 5, 4, 0, 2, 4, 4, 0, 1, 0, - 1, 2, 3, 4, 1, 1, 1, 1, 1, 1, - 1, 1, 3, 1, 2, 3, 5, 0, 1, 2, - 1, 1, 0, 1, 2, 1, 3, 1, 1, 1, - 4, 3, 1, 1, 2, 3, 7, 0, 3, 0, - 1, 1, 3, 1, 3, 1, 1, 3, 3, 1, - 3, 4, 4, 4, 3, 2, 4, 0, 1, 0, - 2, 0, 1, 0, 1, 2, 1, 1, 1, 2, - 2, 1, 2, 3, 2, 3, 2, 2, 2, 1, - 1, 3, 3, 0, 1, 1, 2, 6, 5, 6, - 6, 0, 2, 3, 3, 0, 2, 3, 3, 3, - 2, 3, 1, 3, 6, 3, 4, 3, 1, 3, - 4, 5, 6, 3, 4, 5, 6, 3, 4, 1, - 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, - 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, - 1, 3, 1, 1, 1, 2, 2, 2, 2, 1, - 1, 2, 7, 7, 6, 6, 2, 2, 5, 6, - 3, 3, 1, 3, 1, 3, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, - 4, 2, 4, 0, 1, 2, 5, 0, 3, 0, - 1, 4, 4, 2, 0, 1, 1, 2, 2, 1, - 1, 2, 2, 0, 1, 1, 1, 1, 5, 1, - 3, 0, 3, 1, 1, 1, 2, 1, 2, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 3, 4, 6, 4, 4, 8, 6, 8, 6, - 5, 4, 10, 2, 2, 1, 2, 2, 2, 2, - 2, 4, 5, 5, 5, 5, 5, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, - 6, 5, 4, 4, 4, 4, 4, 7, 4, 4, - 6, 6, 6, 8, 6, 6, 4, 4, 3, 4, - 6, 6, 4, 4, 6, 4, 6, 4, 4, 4, - 4, 4, 4, 6, 4, 6, 4, 4, 4, 6, - 4, 6, 4, 4, 6, 4, 6, 4, 6, 8, + 3, 3, 3, 3, 3, 4, 5, 0, 2, 2, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, + 1, 1, 1, 0, 1, 0, 1, 0, 2, 0, + 2, 0, 2, 2, 0, 1, 5, 1, 3, 7, + 1, 3, 3, 1, 2, 2, 2, 5, 5, 5, + 6, 8, 5, 5, 4, 4, 4, 6, 5, 5, + 5, 2, 2, 2, 2, 3, 3, 3, 4, 3, + 3, 1, 3, 5, 1, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 2, 2, 3, 4, 4, 2, + 11, 3, 6, 8, 6, 6, 6, 13, 8, 6, + 6, 10, 7, 5, 5, 5, 7, 5, 5, 5, + 5, 5, 7, 7, 5, 5, 0, 6, 5, 6, + 4, 5, 0, 8, 9, 0, 3, 0, 1, 0, + 3, 8, 4, 1, 3, 3, 6, 7, 7, 8, + 4, 0, 1, 0, 1, 3, 3, 1, 1, 2, + 1, 1, 0, 2, 0, 2, 5, 3, 7, 4, + 4, 4, 4, 3, 3, 3, 7, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 2, 0, 2, + 2, 1, 3, 2, 0, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 3, 1, 3, 3, 0, 2, + 2, 2, 2, 2, 2, 2, 4, 4, 3, 0, + 1, 4, 3, 4, 4, 3, 3, 3, 2, 1, + 3, 3, 3, 5, 7, 7, 6, 5, 3, 2, + 4, 5, 5, 3, 3, 7, 3, 3, 3, 3, + 4, 7, 5, 2, 4, 4, 4, 4, 4, 5, + 5, 4, 4, 4, 4, 4, 4, 4, 4, 2, + 2, 4, 4, 4, 4, 4, 2, 3, 3, 3, + 3, 5, 2, 3, 3, 2, 3, 4, 4, 4, + 3, 4, 4, 5, 3, 0, 1, 0, 1, 1, + 1, 0, 2, 2, 0, 2, 2, 0, 2, 0, + 1, 1, 1, 1, 2, 1, 3, 1, 1, 1, + 1, 1, 3, 0, 1, 1, 3, 3, 2, 2, + 1, 1, 5, 0, 1, 0, 1, 2, 3, 0, + 3, 3, 3, 3, 3, 1, 0, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, + 4, 4, 4, 2, 2, 3, 1, 3, 2, 1, + 2, 1, 2, 2, 4, 3, 3, 6, 4, 7, + 6, 1, 3, 2, 2, 2, 2, 1, 1, 1, + 3, 2, 1, 1, 1, 0, 1, 1, 0, 3, + 0, 2, 0, 2, 1, 2, 2, 0, 1, 1, + 0, 1, 1, 5, 5, 4, 0, 2, 4, 4, + 0, 1, 0, 1, 2, 3, 4, 1, 1, 1, + 1, 1, 1, 1, 1, 3, 1, 2, 3, 5, + 0, 1, 2, 1, 1, 0, 1, 2, 1, 3, + 1, 1, 1, 4, 3, 1, 1, 2, 3, 7, + 0, 3, 0, 1, 1, 3, 1, 3, 1, 1, + 3, 3, 1, 3, 4, 4, 4, 3, 2, 4, + 0, 1, 0, 2, 0, 1, 0, 1, 2, 1, + 1, 1, 2, 2, 1, 2, 3, 2, 3, 2, + 2, 2, 1, 1, 3, 3, 0, 1, 1, 2, + 6, 5, 6, 6, 0, 2, 3, 3, 0, 2, + 3, 3, 3, 2, 3, 1, 3, 6, 3, 4, + 3, 1, 3, 4, 5, 6, 3, 4, 5, 6, + 3, 4, 1, 1, 1, 3, 3, 3, 3, 3, + 3, 5, 5, 3, 3, 3, 3, 3, 3, 1, + 1, 1, 1, 1, 3, 1, 1, 1, 2, 2, + 2, 2, 1, 1, 2, 7, 7, 6, 6, 2, + 2, 5, 6, 3, 3, 1, 3, 1, 3, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, + 2, 2, 2, 4, 2, 4, 0, 1, 2, 5, + 0, 3, 0, 1, 4, 4, 2, 0, 1, 1, + 2, 2, 1, 1, 2, 2, 0, 1, 1, 1, + 1, 5, 1, 3, 0, 3, 1, 1, 1, 2, + 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 3, 4, 6, 4, 4, 8, + 6, 8, 6, 5, 4, 10, 2, 2, 1, 2, + 2, 2, 2, 2, 4, 5, 5, 5, 5, 5, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 8, 4, 8, 8, 6, 5, 4, 4, 4, 4, + 4, 7, 4, 4, 6, 6, 6, 8, 6, 6, + 4, 4, 3, 4, 6, 6, 4, 4, 6, 4, + 6, 4, 4, 4, 4, 4, 4, 6, 4, 6, + 4, 4, 4, 6, 4, 6, 4, 4, 6, 4, + 6, 4, 6, 8, 4, 6, 8, 4, 6, 8, 4, 6, 8, 4, 6, 8, 4, 6, 8, 4, 6, 8, 4, 6, 8, 4, 6, 8, 4, 6, 8, 4, 6, 8, 4, 6, 8, 4, 6, 8, 4, 6, 8, 4, 6, 8, 4, 6, 8, 4, - 6, 8, 4, 6, 8, 4, 4, 4, 6, 4, - 6, 4, 8, 6, 4, 4, 6, 4, 6, 8, - 4, 6, 8, 4, 4, 6, 8, 6, 4, 6, - 6, 8, 10, 7, 8, 8, 9, 4, 4, 4, - 4, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 4, 4, 4, 4, 4, 4, 6, 4, 6, - 5, 9, 6, 9, 8, 6, 8, 8, 8, 6, - 1, 1, 1, 1, 1, 1, 1, 1, 0, 2, - 6, 8, 10, 12, 14, 6, 8, 8, 10, 12, - 14, 6, 8, 10, 12, 6, 8, 4, 4, 3, - 4, 6, 6, 4, 6, 4, 6, 8, 0, 2, + 4, 4, 6, 4, 6, 4, 8, 6, 4, 4, + 6, 4, 6, 8, 4, 6, 8, 4, 4, 6, + 8, 6, 4, 6, 6, 8, 10, 7, 8, 8, + 9, 4, 4, 4, 4, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 4, 4, 4, 4, 4, + 4, 6, 4, 6, 5, 9, 6, 9, 8, 6, + 8, 8, 8, 6, 1, 1, 1, 1, 1, 1, + 1, 1, 0, 2, 6, 8, 10, 12, 14, 6, + 8, 8, 10, 12, 14, 6, 8, 10, 12, 6, + 8, 4, 4, 3, 4, 6, 6, 4, 6, 4, + 6, 8, 0, 2, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 0, 2, 0, 2, 3, 4, 4, 4, + 4, 4, 0, 3, 4, 7, 3, 1, 1, 1, + 0, 5, 5, 2, 3, 1, 2, 2, 1, 2, + 1, 2, 2, 1, 2, 2, 1, 1, 0, 1, + 0, 1, 0, 2, 1, 2, 4, 0, 2, 1, + 1, 3, 5, 1, 1, 1, 2, 2, 0, 3, + 0, 2, 2, 1, 3, 0, 1, 0, 1, 3, + 1, 3, 2, 0, 1, 1, 0, 1, 2, 4, + 4, 0, 2, 2, 1, 1, 3, 3, 3, 3, + 3, 3, 3, 3, 0, 3, 3, 3, 0, 3, + 1, 1, 0, 4, 0, 1, 1, 0, 3, 1, + 3, 2, 1, 1, 0, 1, 2, 4, 9, 3, + 5, 0, 3, 3, 0, 1, 0, 2, 2, 0, + 2, 2, 2, 0, 2, 1, 2, 3, 3, 0, + 2, 1, 2, 3, 4, 3, 0, 1, 2, 1, + 5, 4, 4, 1, 3, 3, 5, 0, 5, 1, + 3, 1, 2, 3, 4, 1, 1, 3, 3, 1, + 2, 1, 1, 1, 1, 1, 1, 1, 0, 1, + 0, 2, 0, 3, 0, 1, 0, 1, 1, 5, + 0, 1, 0, 1, 2, 1, 1, 1, 1, 1, + 1, 0, 1, 1, 1, 0, 1, 0, 3, 0, + 4, 0, 3, 0, 3, 0, 3, 0, 3, 0, + 3, 0, 3, 1, 1, 11, 3, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 0, 2, 0, 2, 3, 4, 4, 4, 4, 4, - 0, 3, 4, 7, 3, 1, 1, 1, 0, 5, - 5, 2, 3, 1, 2, 2, 1, 2, 1, 2, - 2, 1, 2, 2, 1, 1, 0, 1, 0, 1, - 0, 2, 1, 2, 4, 0, 2, 1, 1, 3, - 5, 1, 1, 1, 2, 2, 0, 3, 0, 2, - 2, 1, 3, 0, 1, 0, 1, 3, 1, 3, - 2, 0, 1, 1, 0, 1, 2, 4, 4, 0, - 2, 2, 1, 1, 3, 3, 3, 3, 3, 3, - 3, 3, 0, 3, 3, 3, 0, 3, 1, 1, - 0, 4, 0, 1, 1, 0, 3, 1, 3, 2, - 1, 1, 0, 1, 2, 4, 9, 3, 5, 0, - 3, 3, 0, 1, 0, 2, 2, 0, 2, 2, - 2, 0, 2, 1, 2, 3, 3, 0, 2, 1, - 2, 3, 4, 3, 0, 1, 2, 1, 5, 4, - 4, 1, 3, 3, 5, 0, 5, 1, 3, 1, - 2, 3, 4, 1, 1, 3, 3, 1, 2, 1, - 1, 1, 1, 1, 1, 1, 0, 1, 0, 2, - 0, 3, 0, 1, 0, 1, 1, 5, 0, 1, - 0, 1, 2, 1, 1, 1, 1, 1, 1, 0, - 1, 1, 1, 0, 1, 0, 3, 0, 4, 0, - 3, 0, 3, 0, 3, 0, 3, 0, 3, 0, - 3, 1, 1, 11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, @@ -8727,855 +8842,865 @@ var yyR2 = [...]int{ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 0, 0, 1, 1, + 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, } var yyChk = [...]int{ - -1000, -418, -78, -423, -7, -11, -20, -21, -22, -417, - -23, -24, -25, -26, -27, -28, -29, -30, -31, -63, - -66, -64, -65, -68, -69, -70, -71, -72, -14, -17, - -67, -32, -33, -73, -74, -75, -76, -77, -16, -18, - -19, -9, -8, -13, 10, 11, -107, 46, -34, 33, - -39, -49, 226, -50, -40, 227, -51, 229, 228, 266, - 230, 378, 259, 75, 314, 315, 317, 318, 319, 320, - -108, 674, 264, 265, 232, 37, 34, 35, 38, 236, - 272, 273, 235, -10, -35, 9, -420, 12, 459, 261, - 260, 29, -12, 567, 87, -79, -419, 722, -248, -232, - 23, 34, 30, -231, -227, -125, -232, 21, 19, 8, - -78, -78, -78, 45, 13, 14, -78, -349, -351, 87, - 159, 87, -78, -56, -55, -53, -52, -54, -57, 32, - -46, -47, -373, -45, -42, 231, 228, 276, 123, 124, - 266, 267, 268, 230, 250, 265, 269, 264, 285, -41, - 82, 34, 567, 570, -356, 227, 233, 234, 229, 460, - 126, 125, 76, -353, 373, 601, 692, -57, 694, 101, - 104, 693, 45, 240, 695, 696, 697, 608, 698, 249, - 699, 700, 701, 702, 708, 649, 709, 710, 711, 127, - 8, -78, -300, -296, 91, -289, 564, 252, 599, 422, - 600, 301, 82, 42, 573, 370, 373, 601, 489, 692, - 379, 314, 330, 324, 494, 495, 496, 353, 345, 565, - 602, 574, 304, 253, 289, 686, 343, 135, 694, 308, - 603, 267, 380, 381, 604, 382, 101, 317, 419, 707, - 307, 605, 705, 104, 693, 322, 80, 488, 52, 689, - 45, 262, 427, 428, 341, 235, 337, 695, 290, 606, - 576, 283, 126, 123, 714, 37, 333, 51, 31, 704, - 125, 50, 696, 150, 607, 697, 608, 384, 360, 680, - 49, 385, 268, 609, 85, 273, 569, 311, 688, 386, - 508, 334, 387, 300, 703, 232, 610, 669, 661, 662, - 388, 389, 681, 365, 361, 366, 510, 611, 411, 493, - 390, 665, 666, 721, 53, 612, 613, 682, 124, 614, - 79, 698, 81, 328, 329, 615, 298, 251, 513, 514, - 413, 357, 471, 478, 479, 111, 112, 474, 113, 480, - 114, 481, 482, 483, 472, 115, 108, 473, 484, 485, - 358, 359, 116, 486, 110, 109, 475, 477, 117, 487, - 249, 36, 391, 566, 302, 59, 306, 277, 414, 47, - 363, 718, 46, 676, 515, 616, 679, 356, 352, 468, - 54, 617, 618, 619, 620, 490, 699, 355, 327, 351, - 713, 4, 295, 491, 700, 63, 234, 368, 367, 369, - 284, 410, 348, 621, 622, 623, 256, 83, 624, 338, - 22, 625, 626, 392, 291, 627, 57, 628, 629, 417, - 265, 630, 55, 701, 40, 631, 270, 715, 702, 632, - 633, 634, 675, 635, 272, 636, 394, 637, 663, 664, - 393, 362, 364, 516, 279, 395, 378, 237, 568, 638, - 312, 332, 269, 706, 639, 257, 504, 505, 506, 507, - 687, 512, 511, 271, 276, 264, 418, 258, 640, 641, - 642, 643, 644, 305, 660, 645, 646, 318, 708, 469, - 44, 647, 648, 649, 650, 651, 299, 294, 412, 421, - 62, 84, 375, 652, 653, 685, 326, 323, 292, 654, - 315, 56, 709, 710, 711, 286, 712, 497, 498, 499, - 500, 10, 550, 533, 561, 534, 551, 535, 544, 536, - 552, 560, 562, 517, 525, 518, 526, 556, 539, 553, - 545, 538, 537, 559, 542, 546, 519, 527, 557, 543, - 520, 528, 521, 529, 522, 530, 555, 554, 547, 558, - 523, 531, 549, 524, 532, 548, 540, 541, 430, 719, - 720, 492, 397, 127, 296, 297, 48, 349, 278, 655, - 309, 656, 339, 340, 354, 325, 350, 672, 316, 670, - 280, 398, 470, 266, 657, 420, 293, 371, 376, 310, - 572, 509, 285, 399, 684, 571, 501, 502, 347, 344, - 287, 503, 658, 674, 400, 241, 281, 282, 659, 671, - 401, 402, 303, 403, 404, 405, 406, 407, 409, 313, - 408, 673, 667, 668, 288, 458, 570, 321, 342, 377, - 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, - 450, 451, 452, 453, 454, 455, 456, 457, 467, 239, - -78, 239, -186, -296, 239, 178, -268, 381, -286, 383, - 396, 391, 401, 389, -277, 392, 394, 279, -397, 411, - 239, 398, 226, 384, 393, 402, 403, 303, 409, 404, - 313, 408, 288, 405, 406, 407, -380, 178, 697, 712, - 135, 346, 388, 386, 412, 676, 91, -302, 91, 92, - 93, -289, 316, -304, 321, -290, -380, -289, 319, -78, - -78, -306, -306, -127, 676, 678, -205, -142, 143, -155, - -257, -158, 92, -147, -150, -199, -200, -201, -202, -156, - -215, -255, 167, 168, 175, 144, -211, -159, 27, 563, - 461, 460, 178, 32, 221, 69, 70, 463, 146, 58, - 12, 435, 436, -157, 425, 426, 437, 431, 432, 488, - 490, 491, 492, 489, 494, 495, 496, 497, 498, 499, - 500, 501, 502, 503, 493, 465, 466, 118, 467, 108, - 110, 109, 468, 469, 470, 343, 515, 516, 510, 513, - 514, 512, 511, 358, 359, 471, 533, 534, 538, 537, - 535, 536, 539, 542, 543, 544, 545, 546, 547, 549, - 548, 540, 541, 518, 517, 519, 520, 521, 522, 523, - 524, 526, 525, 527, 528, 529, 530, 531, 532, 550, - 551, 552, 553, 554, 556, 555, 560, 559, 557, 558, - 562, 561, 472, 473, 111, 112, 113, 114, 115, 116, - 117, 474, 477, 475, 476, 478, 479, 480, 485, 486, - 481, 482, 483, 484, 487, 369, 367, 368, 364, 363, - 362, -88, -100, 590, 589, -101, 422, 427, 428, 430, - -148, -149, -161, -162, -290, -296, 244, 424, 238, 173, - 459, -151, -145, -213, 107, 93, -8, -209, 423, 433, - 434, 438, 429, 439, 575, 577, 593, 594, 596, 580, - 585, 584, 587, 504, 505, 506, 507, 508, 509, 661, - 662, 663, 664, 665, 666, 667, 668, -380, -289, 91, - -153, -152, -195, 94, 99, 102, 103, 105, -403, 262, - 339, 340, 119, -420, 690, 90, 95, 96, 97, 98, - 120, 121, 179, 180, 181, 182, 183, 184, 185, 186, - 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, - 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, - 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, - 217, 218, 219, 220, 397, 397, -186, -78, -78, -78, - -78, -225, -125, -227, -10, -8, -420, 9, -78, -8, - -9, -13, -35, -37, 595, -36, -296, 100, -232, -248, - 13, 162, 43, 51, -230, -231, -12, -8, -142, 20, - 24, 25, -130, 169, -142, -296, -130, -275, 243, -306, - -416, 676, -78, -78, -264, -309, 316, -266, 412, 676, - 411, -256, -269, 91, -255, -268, 410, 92, -350, 159, - -336, -340, -290, 254, -366, 250, -186, -359, -358, -290, - -420, -126, -285, 240, 248, 247, 136, -384, 139, 296, - 424, 238, -52, -53, -54, -268, 177, 696, -109, 271, - 275, 88, 88, -340, -339, -338, -385, 275, 254, -365, - -357, 246, 255, -346, 247, 248, -341, 240, 137, -385, - -341, 245, 255, 250, 254, 275, 275, 127, 275, 127, - 275, 275, 275, 275, 275, 275, 275, 275, 275, 270, - -347, 151, -347, 571, 571, -353, -385, 250, 240, -385, - -385, 246, -287, -341, 242, 26, 242, 36, 36, -347, - -347, -347, -268, 177, -347, -347, -347, -347, 283, 283, - -347, -347, -347, -347, -347, -347, -347, -347, -347, -347, - -347, -347, -347, -347, -347, -347, -347, 239, -384, -134, - 408, 303, 82, -55, 285, -38, -186, -285, 240, 241, - -384, 272, -186, 222, -186, 679, -279, 159, 16, -279, - -276, 397, 395, 382, 387, -279, -279, -279, -279, 286, - 380, -342, 240, 36, 251, 397, 286, 380, 286, 287, - 286, 287, 390, 400, 286, -301, 15, 162, 424, 385, - 389, 279, 239, 280, 241, 399, 287, -301, 90, -280, - 159, 286, 397, 282, -279, -279, -307, -420, -292, -290, - -288, 231, 24, 142, 26, 28, 145, 178, 130, 20, - 146, 38, 233, 346, 250, 177, 246, 460, 226, 73, - 575, 425, 432, 423, 431, 435, 462, 463, 424, 383, - 32, 14, 577, 29, 260, 25, 39, 171, 228, 149, - 578, 263, 27, 261, 118, 121, 580, 23, 76, 255, - 15, 248, 41, 17, 581, 582, 18, 244, 243, 162, - 240, 71, 12, 221, 30, 158, 67, 583, 137, 584, - 585, 586, 587, 131, 69, 159, 21, 716, 433, 434, - 34, 677, 563, 274, 173, 74, 60, 678, 143, 429, - 589, 590, 119, 591, 122, 77, 683, 139, 19, 72, - 43, 592, 275, 593, 245, 717, 594, 415, 595, 160, - 229, 459, 70, 161, 690, 596, 691, 238, 396, 9, - 464, 33, 259, 247, 129, 68, 439, 597, 239, 148, - 465, 466, 242, 132, 120, 8, 136, 35, 13, 75, - 78, 436, 437, 438, 58, 128, 567, 147, 16, 598, - 416, 141, -380, 679, -307, -307, 33, 92, -406, -407, - -408, 567, 415, 242, -290, -186, -84, 669, 230, -85, - 675, 24, 237, -132, 397, -120, 178, 697, 680, 681, - 682, 679, 394, 687, 685, 683, 286, 684, 88, 139, - 141, 142, 4, -142, 158, -196, 151, 152, 153, 154, - 155, 156, 157, 163, 162, 143, 145, 159, -241, 140, - 164, 165, 166, 167, 168, 169, 170, 172, 171, 173, - 174, 160, 161, 177, 224, 225, -150, -150, -150, -150, - -211, -217, -216, -420, -213, -380, -289, -296, -420, -420, - -150, -274, -420, -147, -420, -420, -420, -420, -220, -142, - -420, -420, -424, -420, -424, -424, -424, -325, -420, -325, - -325, -420, -420, -420, -420, -420, -420, -420, -420, -420, - -420, -420, -420, -420, -420, -420, -420, -420, -420, -420, - -420, -420, -420, -420, -420, -420, -420, -420, -420, -420, - -420, -420, -420, -420, -420, -420, -420, -420, -420, -420, - -420, -420, -420, -420, -420, -420, -420, -420, -420, -420, - -420, -420, -420, -420, -420, -420, -420, -420, -420, -420, - -420, -420, -420, -420, -420, -420, -420, -420, -420, -420, - -420, -420, -420, -420, -420, -420, -420, -420, -420, -420, - -420, -420, -420, -420, -420, -420, -420, -420, -420, -420, - -420, -420, -420, -420, -420, -420, -420, -420, -420, -420, - -420, -420, -420, -420, -420, -420, -420, -420, -420, -420, - -420, -420, -420, -420, -420, -420, -420, -420, -420, -420, - -420, -420, -420, 222, -420, -420, -420, -420, -420, -325, - -325, -325, -325, -325, -325, -420, -420, -420, -420, -420, - -420, -420, -420, -420, -420, -420, -420, -420, -420, 103, - 99, 102, 94, -215, 105, 90, 90, 90, 90, -8, - -9, -205, -420, -394, -395, -189, -186, -420, 303, -290, - -290, 272, -230, -12, -8, -225, -231, -227, -8, -78, - -118, -131, 64, 65, -133, 25, 39, 68, 66, 24, - -421, 89, -421, -248, -421, 88, -37, -251, 87, 62, - 44, 90, 90, 88, 22, -226, -228, -142, 15, -294, - 4, -293, 26, -290, 90, 222, 15, -187, 30, -186, - 588, -275, -275, 88, 91, 316, -265, -267, 413, 415, - 151, -295, -290, 90, 32, 89, 88, -186, -314, -317, - -319, -318, -320, -315, -316, 343, 344, 178, 347, 349, - 350, 351, 352, 353, 354, 355, 356, 357, 360, 33, - 262, 339, 340, 341, 342, 361, 362, 363, 364, 366, - 367, 368, 369, 324, 345, 565, 325, 326, 327, 328, - 329, 330, 332, 333, 336, 334, 335, 337, 338, -381, - -380, 87, 89, 88, -321, 87, -142, -134, 239, -380, - 240, 240, 240, -78, 459, -347, -347, -347, 270, 20, - -45, -42, -373, 19, -41, -42, 231, 123, 124, 228, - 87, -336, 87, -345, -381, -380, 87, 137, 245, 136, - -344, -341, -344, -345, -380, -213, -380, 137, 137, -380, - -380, -261, -290, -261, -261, 24, -261, 24, -261, 24, - 96, -290, -261, 24, -261, 24, -261, 24, -261, 24, - -261, 24, 32, 79, 80, 81, 32, 83, 84, 85, - -213, -380, -380, -213, -336, -213, -186, -380, -268, 96, - 96, 96, -347, -347, 96, 90, 90, 90, -347, -347, - 96, 90, -298, -296, 90, 90, -386, 256, 300, 302, - 96, 96, 96, 96, 32, 90, -387, 32, 704, 703, - 705, 706, 707, 90, 96, 32, 96, 32, 96, -290, - 87, -186, -140, 290, 226, 228, 231, 77, 90, 306, - 307, 304, 309, 310, 151, 45, 88, 242, 239, -380, - -281, 244, -281, -290, -297, -296, -288, 242, 379, 90, - -142, -343, 15, 162, -301, -301, -279, -186, -343, -301, - -279, -186, -279, -279, -279, -279, -301, -301, -301, -279, - -296, -296, -186, -186, -186, -186, -186, -186, -186, -307, - -280, -279, 679, 90, -273, 15, 77, -307, -307, 88, - 322, 416, 417, -305, 319, -80, -290, 90, -15, -11, - -23, -22, -24, 151, -15, 88, 567, -179, -186, 679, - 679, 679, 679, 679, 679, -142, -142, -142, -142, 591, - -203, 119, 143, 120, 121, -158, -142, -204, -209, -211, - 106, 162, 145, 159, -241, -147, -150, -147, -147, -147, - -147, -147, -147, 221, -147, 221, -147, -147, -147, -147, - -147, -147, -308, -290, 90, 178, -154, -153, 105, -403, - -154, 564, 88, -216, 222, -142, -142, -380, -117, 441, - 442, 443, 444, 446, 447, 448, 451, 452, 456, 457, - 440, 458, 445, 450, 453, 454, 455, 449, 342, -142, - -128, -130, -128, -142, -218, -219, 147, -213, -142, -421, - -421, 96, 169, -124, 25, 39, -124, -124, -124, -124, - -142, -142, -142, -142, -142, -142, -142, -142, -142, -142, - -124, -290, -290, -117, -142, -142, -142, -142, -142, -142, - -86, -142, 130, 131, 132, -205, -142, -147, -142, -142, - -142, -421, -142, -142, -142, -206, -205, -142, -142, -142, - -142, -142, -142, -142, -142, -142, -142, -142, -142, -142, - -142, -142, -142, -142, -142, -142, -142, -142, -142, -142, - -142, -142, -142, -142, -142, -142, -142, -142, -142, -142, - -142, -142, -142, -142, -142, -142, -142, -142, -142, -142, - -142, -142, -142, -379, -378, -377, -142, -142, -142, -142, - -142, -142, -142, -142, -142, -142, -142, -142, -142, -142, - -142, -142, -142, -142, -142, -142, -142, -142, -142, -205, - -205, -205, -205, -205, -142, -421, -142, -160, -145, 96, - -257, 105, 92, -142, -142, -142, -142, -142, -142, -129, - -128, -292, -297, -288, -289, -128, -129, -129, -128, -128, - -142, -142, -142, -142, -142, -142, -142, -142, -421, -142, - -142, -142, -142, -142, -248, -421, -205, 88, -396, 415, - 416, 677, -299, 275, -298, 26, -206, 90, 15, -259, - 78, -290, -230, -230, 64, 65, 60, -128, -133, -421, - -36, 26, -250, -290, 63, 90, -326, -268, 370, 371, - 178, -142, -142, 88, -229, 28, 29, -186, -293, 169, - -297, -186, -260, 275, -186, 90, -164, -166, -167, -168, - -189, -212, -420, -169, -8, 586, 583, 15, -179, -180, - -188, -296, -266, -309, -265, 88, 414, 416, 417, 77, - 122, -142, -327, 177, -355, -354, -353, -336, -338, -339, - -340, 89, -327, -332, 376, 375, -321, -321, -321, -321, - -321, -326, -326, -326, -326, 87, 87, -321, -321, -321, - -321, -329, 87, -329, -329, -330, -329, 87, -330, -331, - 87, -331, -366, -142, -363, -362, -360, -361, 249, 101, - 659, 615, 567, 608, 649, 78, -358, -229, 96, -421, - -140, -282, 244, -364, -361, -380, -380, -380, -282, 91, - 90, 91, 90, 91, 90, -110, -59, -1, 716, 717, - 718, 88, 20, -337, -336, -58, 300, -369, -370, 275, - -365, -359, -345, 137, -344, -345, -345, -380, 88, 30, - 127, 127, 127, 127, 567, 228, 33, -283, 607, 143, - 659, 615, -336, -58, 242, 242, -308, -308, -308, 90, - 90, -278, 712, -179, -136, 292, 151, 281, 281, 239, - 239, 294, -186, 305, 308, 306, 307, 304, 309, 310, - 24, 24, 24, 24, 24, 293, 295, 297, 283, -186, - -186, -281, 77, -181, -186, 27, -296, 90, 90, -186, - -279, -279, -186, -279, -279, -186, -408, 323, -290, 357, - 670, 671, 673, 672, -120, 415, 88, 567, 23, -121, - 23, -420, 119, 120, 121, -204, -147, -150, -147, 142, - 263, -147, -147, -420, -213, -421, -292, 26, 88, 78, - -421, 167, 88, 88, -421, -421, 88, 15, -221, -219, - 149, -142, -421, 88, -421, -421, -205, -142, -142, -142, - -142, -421, -421, -421, -421, -421, -421, -421, -421, -421, - -421, -205, 88, 88, 15, -312, 26, -421, -421, -421, - -421, -421, -220, -421, 15, -421, 78, 88, 162, 88, - -421, -421, -421, 88, 88, -421, -421, 88, -421, 88, - -421, -421, -421, -421, -421, -421, 88, -421, 88, -421, - -421, -421, 88, -421, 88, -421, -421, 88, -421, 88, - -421, 88, -421, 88, -421, 88, -421, 88, -421, 88, - -421, 88, -421, 88, -421, 88, -421, 88, -421, 88, - -421, 88, -421, 88, -421, 88, -421, 88, -421, 88, - -421, 88, -421, -421, -421, 88, -421, 88, -421, 88, - -421, -421, 88, -421, 88, -421, 88, -421, 88, 88, - -421, 88, 88, 88, -421, 88, 88, 88, 88, -421, - -421, -421, -421, 88, 88, 88, 88, 88, 88, 88, - 88, 88, 88, -421, -421, -421, -421, -421, -421, 88, - -93, 592, -421, -421, 88, -421, 88, 88, 88, 88, - 88, -421, -420, 222, -421, -421, -421, -421, -421, 88, - 88, 88, 88, 88, 88, -421, -421, -421, 88, 88, - -421, 88, -421, 88, -421, -395, 676, 416, -193, -192, - -190, 75, 243, 76, -420, -298, -421, -154, -257, -258, - -257, -198, -290, 96, 105, -232, -163, -165, 15, -133, - -211, 89, 88, -326, -236, -242, -276, -290, 90, 178, - -328, 178, -328, 370, 371, -228, 222, -194, 16, -197, - 33, 58, -11, -420, -420, 30, 33, 88, -182, -184, - -183, -185, 67, 71, 73, 68, 69, 70, 74, -303, - 26, -8, -164, -8, -420, -186, -179, -422, 15, 78, - -422, 88, 222, -267, -270, 418, 415, 421, -380, 90, - -109, 88, -353, -340, -233, -137, 41, -333, 377, -326, - 574, -326, -335, 90, -335, 96, 96, 96, 89, -48, - -43, -44, 34, 82, -360, -347, 90, 40, -347, -347, - -290, 89, -229, -136, -186, 143, 77, -364, -364, -364, - -296, -2, 715, 721, 137, 87, 382, 19, -250, 88, - 89, -214, 301, 89, -111, -290, 89, 87, -345, -345, - -290, -420, 239, 32, 32, 659, 615, 607, -58, -214, - -213, -380, -327, 714, 713, 89, 241, 299, -141, 435, - -138, 90, 91, -186, -186, -186, -186, -186, 231, 228, - 405, -404, 311, -404, 284, 242, -179, -186, 88, -83, - 258, 253, -301, -301, 34, -186, 415, 688, 686, -142, - 142, 263, -158, -150, -117, -117, -147, -310, 178, 343, - 262, 341, 337, 357, 348, 375, 339, 376, 334, 333, - 332, -310, -308, -147, -205, -130, -142, -142, 150, -142, - 148, -142, -421, -421, -421, -421, -421, -225, -142, -142, - -142, -421, 178, 343, 15, -142, -308, -142, -142, -142, - -142, -142, -142, -142, -142, -142, -142, -142, -142, -142, - -142, -142, -142, -142, -142, -142, -142, -142, -142, -142, - -142, -142, -142, -142, -142, -142, -142, -142, -142, -142, - -142, -142, -377, -142, -205, -142, -205, -142, -142, -142, - -142, -142, -378, -378, -378, -378, -378, -205, -205, -205, - -205, -142, -420, -290, -96, -95, -94, 642, 243, -93, - -160, -96, -160, 221, -142, 221, 221, 221, -142, -129, - -292, -142, -142, -142, -142, -142, -142, -142, -142, -142, - -142, -190, -341, -341, -341, -261, 88, -272, 23, 15, - 58, 58, -163, -194, -164, -133, -290, -239, 669, -245, - 47, -243, -244, 48, -240, 49, 57, -328, -328, 169, - -230, -142, -262, 77, -263, -271, -213, -208, -210, -209, - -420, -249, -421, -290, -261, 239, -263, -166, -167, -167, - -166, -167, 67, 67, 67, 72, 67, 72, 67, -183, - -296, -421, -142, -299, 78, -164, -164, -188, -296, 169, - 415, 419, 420, -353, -402, 119, 143, 32, 77, 373, - 101, -400, 177, 604, 654, 659, 615, 608, 649, -401, - 245, 136, 137, 257, 26, 42, 89, 88, 89, 88, - 89, 89, 88, -284, -283, -44, -43, -347, -347, 96, - -380, 90, 90, 241, 27, -186, 77, 77, 77, -112, - 719, 96, 87, -3, 82, -142, 87, 20, -336, -213, - -371, -322, -372, -323, -324, -5, -6, -348, -115, 58, - 101, -62, 45, 240, 699, 700, 127, -420, 712, -363, - -250, -367, -369, -186, -144, -420, -143, -145, -151, 167, - 168, 262, 339, 340, -214, -186, -135, 290, 298, 87, - -139, 92, -383, 78, 281, 373, 281, 90, -405, 312, - 90, -405, -186, -83, -48, -186, -279, -279, 34, -380, - -421, -158, -150, -123, 162, 567, -313, 573, -321, -321, - -321, -331, -321, 329, -321, 329, -321, -421, -421, -421, - 88, -421, 23, -421, -142, 88, -119, 464, 88, 88, - -421, 87, 87, -142, -421, -421, -421, 88, -421, -421, - -421, -421, -421, -421, -421, -421, -421, -421, -421, -421, - -421, 88, -421, 88, -421, 88, -421, 88, -421, 88, - -421, 88, -421, 88, -421, 88, -421, 88, -421, 88, - -421, 88, -421, 88, -421, 88, -421, 88, -421, 88, - -421, 88, -421, -421, 88, -421, -421, -421, 88, -421, - 88, -421, 88, -421, -421, -421, 88, -311, 660, -421, - -421, -421, -421, -421, -421, -421, -421, -421, -421, -421, - -92, -291, -290, -93, 624, 624, -421, -93, -222, 88, - -147, -421, -147, -147, -147, -421, -421, -421, 88, -421, - 88, 88, -421, 88, -421, 88, -421, -421, -421, -421, - 88, -191, 23, -191, -191, -421, -257, -186, -194, -223, - 17, -236, 52, 349, -247, -246, 56, 48, -244, 20, - 50, 20, 31, -262, 88, 151, 88, -421, -421, 88, - 58, 222, -421, -186, -194, -177, -176, 77, 78, -178, - 77, -176, 67, 67, -251, 88, -260, -164, -194, -194, - 222, 119, -420, -146, -157, -144, 13, 90, 90, -380, - -399, 703, 704, 32, 96, -347, -347, 137, 137, -186, - 87, -326, 90, -326, 96, 96, 32, 83, 84, 85, - 32, 79, 80, 81, -186, -186, -186, -186, -368, 87, - 20, -142, 87, 151, 89, -250, -250, 277, 162, -347, - 697, 283, 283, -347, -347, -347, -114, -113, 719, 89, - -421, 88, -334, 567, 570, -142, -152, -152, -251, 89, - -376, 567, -382, -290, -290, -290, -290, 96, 98, -421, - 565, 74, 568, -421, -326, -142, -142, -142, -230, 90, - -142, -142, 96, 96, -421, -142, -142, -142, -142, -142, - -142, -142, -142, -142, -142, -142, -142, -142, -142, -142, - -142, -142, -142, -142, -142, -205, -142, -421, -174, -173, - -175, 680, 119, 32, -310, -421, -207, 275, -99, -98, - -97, 15, -421, -142, -117, -117, -117, -117, -142, -142, - -142, -142, -142, -142, -420, 67, 19, 17, -420, -420, - -299, -223, -224, 18, 20, -237, 54, -235, 53, -235, - -246, 20, 20, 90, 20, 90, 137, -271, -142, -210, - 58, -11, -290, -208, -290, -410, -425, 387, 382, -225, - -142, 87, -142, -154, -194, -194, -142, -200, 488, 490, - 491, 492, 489, 494, 495, 496, 497, 498, 499, 500, - 501, 502, 503, 493, 467, 108, 110, 109, 468, 469, - 470, 343, 515, 516, 510, 513, 514, 512, 511, 358, - 359, 471, 533, 534, 538, 537, 535, 536, 539, 542, - 543, 544, 545, 546, 547, 549, 548, 540, 541, 518, - 517, 519, 520, 521, 522, 523, 524, 526, 525, 527, - 528, 529, 530, 531, 532, 550, 551, 552, 553, 554, - 556, 555, 560, 559, 557, 558, 562, 561, 472, 473, - 111, 112, 113, 114, 115, 116, 117, 474, 477, 475, - 478, 479, 480, 485, 486, 481, 482, 483, 484, 487, - 369, 367, 368, 364, 363, 362, 422, 427, 428, 430, - 504, 505, 506, 507, 508, 509, 661, 662, 663, 664, - 665, 666, 667, 668, 90, 90, 87, -142, 89, 89, - -251, -367, -59, 89, -252, -250, 96, 89, 278, -209, - -420, 90, -347, -347, -347, 96, 96, -298, -421, 88, - -290, -401, -369, 571, 571, -421, 26, -375, -374, -292, - 87, 78, 63, 566, 569, -421, -421, 88, -421, -421, - -421, 89, 89, -421, -421, -421, -421, -421, -421, -421, - -421, -421, -421, -421, -421, -421, -421, -421, -421, -421, - -421, -421, -421, -421, -421, 88, -421, -173, -175, -421, - 77, -154, -225, 20, -96, 300, 302, -96, -421, -421, - -421, -421, -421, 88, -421, -421, 88, -421, 88, -421, - -421, -253, -421, -290, 245, 20, 20, -253, -253, -193, - -224, -106, -105, -104, 598, -142, -205, -238, 55, 77, - 122, 90, 90, 90, 13, -208, 222, -409, 47, -413, - 48, -230, -250, -171, 382, -225, -421, -250, 89, 26, - 89, 721, 137, 89, -209, -122, -420, 274, -298, 90, - 90, -113, -116, -11, 88, 151, -250, -186, 63, -142, - -205, -421, 77, 578, 680, -91, -90, -87, 691, 717, - -205, -93, -93, -142, -142, -142, 88, -421, -421, -421, - -106, 88, -103, -102, -290, 77, 122, -263, -290, -254, - -420, -414, 56, -412, 50, 20, 89, -421, -420, -230, - 89, -234, -11, 87, -3, 274, -322, -372, -323, -324, - -5, -6, -348, -81, 567, -374, -352, -296, -292, 90, - 96, 89, 567, -421, -421, -89, 145, 689, 657, -152, - 221, -421, 88, -421, 88, -421, 88, -290, 245, -104, - 88, 26, -249, -415, 48, 20, -411, 49, 20, 90, - -299, -172, -170, -290, 621, -392, -391, 563, -402, -398, - 119, 143, 101, -400, 659, 615, 128, 129, -81, -142, - 87, -421, -82, 289, 676, 222, -383, 568, -89, 690, - 635, 610, 635, 610, -147, -142, -142, -142, -102, -420, - -421, 20, 90, 20, 90, -421, 88, 23, -314, -61, - 632, -389, -390, 77, -393, 388, 631, 652, 119, 90, - 89, -250, 250, -297, -376, 569, 142, -117, -421, 88, - -421, 88, -421, -92, 90, 90, -170, 628, -327, -154, - -390, 77, -389, 77, 14, 13, -4, 720, 89, 291, - -89, 635, 610, -142, -142, -421, -60, 27, -171, -388, - 258, 253, 256, 33, -388, 96, -4, -421, -421, 632, - 252, 32, 119, -154, -174, -173, -173, + -1000, -421, -79, -426, -7, -29, -15, -16, -17, -419, + -18, -19, -20, -21, -22, -23, -24, -25, -26, -64, + -67, -65, -66, -69, -70, -71, -72, -73, -9, -11, + -68, -27, -28, -74, -75, -76, -77, -78, -12, -13, + -14, -8, -32, -31, -30, 10, 11, -108, 46, -35, + 33, -40, -50, 227, -51, -41, 228, -52, 230, 229, + 267, 231, 379, 260, 75, 315, 316, 318, 319, 320, + 321, -109, 685, 265, 266, 233, 37, 34, 35, 38, + 237, 273, 274, 236, 133, -33, -36, 9, -423, 12, + 469, 262, 261, 29, -34, 578, 87, -80, -422, 733, + -250, -234, 23, 34, 30, -233, -229, -127, -234, 21, + 19, 8, -79, -79, -79, 45, 13, 14, -79, -351, + -353, 87, 160, 87, -79, -57, -56, -54, -53, -55, + -58, 32, -47, -48, -375, -46, -43, 232, 229, 277, + 123, 124, 267, 268, 269, 231, 251, 266, 270, 265, + 286, -42, 82, 34, 578, 581, -358, 228, 234, 235, + 230, 470, 126, 125, 76, -355, 374, 612, 703, -58, + 705, 101, 104, 704, 45, 241, 706, 707, 708, 619, + 709, 250, 710, 711, 712, 713, 719, 660, 720, 721, + 722, 127, 8, -79, -302, -298, 91, -291, 575, 253, + 610, 423, 611, 302, 82, 42, 514, 584, 371, 374, + 612, 499, 703, 380, 315, 331, 325, 504, 505, 506, + 354, 346, 576, 613, 585, 305, 254, 290, 697, 344, + 136, 705, 309, 614, 268, 381, 382, 615, 383, 101, + 318, 420, 718, 308, 616, 716, 104, 704, 323, 80, + 498, 52, 700, 45, 263, 428, 429, 342, 236, 338, + 706, 291, 617, 587, 284, 126, 123, 725, 37, 334, + 51, 31, 715, 125, 50, 707, 151, 618, 708, 619, + 385, 361, 691, 49, 386, 269, 620, 85, 274, 580, + 312, 699, 387, 519, 335, 388, 301, 714, 233, 621, + 680, 672, 673, 389, 390, 692, 366, 362, 367, 521, + 622, 412, 503, 391, 676, 677, 732, 53, 623, 624, + 693, 124, 625, 79, 709, 81, 329, 330, 626, 299, + 252, 524, 525, 414, 358, 481, 488, 489, 111, 112, + 484, 113, 490, 114, 491, 492, 493, 482, 115, 108, + 483, 494, 495, 359, 360, 116, 496, 110, 109, 485, + 487, 117, 497, 250, 36, 392, 577, 303, 59, 307, + 278, 415, 47, 364, 729, 46, 687, 526, 627, 690, + 357, 353, 478, 54, 628, 629, 630, 631, 500, 710, + 356, 328, 352, 724, 4, 296, 501, 711, 63, 235, + 369, 368, 370, 285, 411, 349, 632, 633, 634, 257, + 83, 635, 339, 22, 636, 637, 393, 292, 638, 57, + 639, 640, 418, 266, 641, 55, 712, 40, 642, 271, + 726, 713, 643, 644, 645, 686, 646, 273, 647, 395, + 648, 674, 675, 394, 363, 365, 527, 280, 396, 379, + 238, 579, 649, 313, 333, 270, 717, 650, 258, 515, + 516, 517, 518, 698, 523, 522, 272, 277, 265, 419, + 259, 651, 652, 653, 654, 655, 306, 671, 656, 657, + 319, 719, 479, 44, 658, 659, 660, 661, 662, 300, + 295, 413, 422, 62, 84, 376, 663, 664, 696, 327, + 324, 293, 460, 462, 463, 464, 465, 466, 461, 468, + 665, 316, 56, 720, 721, 722, 287, 723, 507, 508, + 509, 510, 10, 561, 544, 572, 545, 562, 546, 555, + 547, 563, 571, 573, 528, 536, 529, 537, 567, 550, + 564, 556, 549, 548, 570, 553, 557, 530, 538, 568, + 554, 531, 539, 532, 540, 533, 541, 566, 565, 558, + 569, 534, 542, 560, 535, 543, 559, 551, 552, 431, + 730, 731, 502, 398, 127, 297, 298, 48, 350, 279, + 666, 310, 667, 340, 341, 475, 476, 355, 326, 351, + 683, 317, 681, 281, 399, 480, 267, 668, 421, 294, + 372, 377, 311, 583, 520, 286, 400, 695, 582, 511, + 512, 348, 345, 288, 513, 669, 685, 401, 242, 282, + 283, 670, 682, 402, 403, 304, 404, 405, 406, 407, + 408, 410, 314, 409, 684, 678, 679, 289, 459, 581, + 322, 343, 378, 441, 442, 443, 444, 445, 446, 447, + 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, + 458, 477, 240, -79, 240, -188, -298, -129, 687, 689, + 179, -270, 382, -288, 384, 397, 392, 402, 390, -279, + 393, 395, 280, -399, 412, 240, 399, 227, 385, 394, + 403, 404, 304, 410, 405, 314, 409, 289, 406, 407, + 408, -382, 179, 708, 723, 136, 347, 389, 387, 413, + 687, 91, -304, 91, 92, 93, -291, 317, -306, 322, + -292, -382, -291, 320, -79, -79, -308, -308, -129, -207, + -144, 144, -157, -259, -160, 92, -149, -152, -201, -202, + -203, -204, -158, -217, -257, 168, 169, 176, 145, -213, + -161, 27, 574, 471, 470, 179, 32, 222, 69, 70, + 473, 147, 58, 12, 436, 437, -159, 426, 427, 438, + 432, 433, 498, 500, 501, 502, 499, 504, 505, 506, + 507, 508, 509, 510, 511, 512, 513, 503, 514, 475, + 476, 118, 477, 108, 110, 109, 478, 479, 480, 344, + 526, 527, 521, 524, 525, 523, 522, 359, 360, 481, + 544, 545, 549, 548, 546, 547, 550, 553, 554, 555, + 556, 557, 558, 560, 559, 551, 552, 529, 528, 530, + 531, 532, 533, 534, 535, 537, 536, 538, 539, 540, + 541, 542, 543, 561, 562, 563, 564, 565, 567, 566, + 571, 570, 568, 569, 573, 572, 482, 483, 111, 112, + 113, 114, 115, 116, 117, 484, 487, 485, 486, 488, + 489, 490, 495, 496, 491, 492, 493, 494, 497, 370, + 368, 369, 365, 364, 363, -89, -101, 601, 600, -102, + 423, 428, 429, 431, -150, -151, -163, -164, -292, -298, + 245, 425, 239, 174, 469, -153, -147, -215, 107, 93, + -31, -211, 424, 434, 435, 439, 430, 440, 586, 588, + 604, 605, 607, 591, 596, 595, 598, 515, 516, 517, + 518, 519, 520, 672, 673, 674, 675, 676, 677, 678, + 679, -382, -291, 91, -155, -154, -197, 94, 99, 102, + 103, 105, -405, 263, 340, 341, 119, -423, 701, 90, + 95, 96, 97, 98, 120, 121, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, + 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, + 214, 215, 216, 217, 218, 219, 220, 221, 398, 398, + -188, -79, -79, -79, -79, -420, 704, 579, -227, -127, + -229, -33, -31, -423, 9, -79, -31, -32, -30, -36, + -38, 606, -37, -298, 100, -234, -250, 13, 163, 43, + 51, -232, -233, -34, -31, -144, 20, 24, 25, -132, + 170, -144, -298, -132, -277, 244, -308, -418, 687, -79, + -79, -266, -311, 317, -268, 413, 687, 412, -258, -271, + 91, -257, -270, 411, 92, -352, 160, -338, -342, -292, + 255, -368, 251, -188, -361, -360, -292, -423, -128, -287, + 241, 249, 248, 137, -386, 140, 297, 425, 239, -53, + -54, -55, -270, 178, 707, -110, 272, 276, 88, 88, + -342, -341, -340, -387, 276, 255, -367, -359, 247, 256, + -348, 248, 249, -343, 241, 138, -387, -343, 246, 256, + 251, 255, 276, 276, 127, 276, 127, 276, 276, 276, + 276, 276, 276, 276, 276, 276, 271, -349, 152, -349, + 582, 582, -355, -387, 251, 241, -387, -387, 247, -289, + -343, 243, 26, 243, 36, 36, -349, -349, -349, -270, + 178, -349, -349, -349, -349, 284, 284, -349, -349, -349, + -349, -349, -349, -349, -349, -349, -349, -349, -349, -349, + -349, -349, -349, -349, 240, -386, -136, 409, 304, 82, + -56, 286, -39, -188, -287, 241, 242, -386, 273, -188, + 223, 240, 690, -281, 160, 16, -281, -278, 398, 396, + 383, 388, -281, -281, -281, -281, 287, 381, -344, 241, + 36, 252, 398, 287, 381, 287, 288, 287, 288, 391, + 401, 287, -303, 15, 163, 425, 386, 390, 280, 240, + 281, 242, 400, 288, -303, 90, -282, 160, 287, 398, + 283, -281, -281, -309, -423, -294, -292, -290, 232, 24, + 143, 26, 28, 146, 179, 130, 20, 147, 38, 234, + 347, 251, 178, 247, 470, 227, 73, 586, 426, 433, + 424, 432, 436, 472, 473, 425, 384, 32, 14, 588, + 29, 261, 25, 39, 172, 229, 150, 589, 264, 27, + 262, 118, 121, 591, 23, 76, 256, 15, 249, 41, + 17, 592, 593, 18, 245, 244, 163, 241, 71, 12, + 222, 30, 159, 67, 594, 138, 133, 595, 596, 597, + 598, 131, 69, 160, 21, 727, 434, 435, 34, 688, + 574, 275, 174, 74, 60, 689, 144, 430, 600, 601, + 119, 602, 122, 77, 694, 140, 19, 72, 43, 603, + 276, 604, 246, 728, 605, 416, 606, 161, 230, 469, + 70, 162, 701, 607, 702, 239, 397, 9, 474, 33, + 260, 248, 129, 68, 440, 608, 240, 149, 243, 132, + 120, 8, 137, 35, 13, 75, 78, 437, 438, 439, + 58, 128, 578, 148, 16, 609, 417, 142, -382, 690, + -309, -309, 33, 92, -408, -409, -410, 578, 416, 243, + -292, -188, -85, 680, 231, -86, 686, 24, 238, -134, + 398, -122, 179, 708, 691, 692, 693, 690, 395, 698, + 696, 694, 287, 695, 88, 140, 142, 143, 4, -144, + 159, -198, 152, 153, 154, 155, 156, 157, 158, 164, + 163, 144, 146, 160, -243, 141, 165, 166, 167, 168, + 169, 170, 171, 173, 172, 174, 175, 161, 162, 178, + 225, 226, -152, -152, -152, -152, -213, -219, -218, -423, + -215, -382, -291, -298, -423, -423, -152, -276, -423, -149, + -423, -423, -423, -423, -222, -144, -423, -423, -427, -423, + -427, -427, -427, -327, -423, -327, -327, -423, -423, -423, + -423, -423, -423, -423, -423, -423, -423, -423, -423, -423, + -423, -423, -423, -423, -423, -423, -423, -423, -423, -423, + -423, -423, -423, -423, -423, -423, -423, -423, -423, -423, + -423, -423, -423, -423, -423, -423, -423, -423, -423, -423, + -423, -423, -423, -423, -423, -423, -423, -423, -423, -423, + -423, -423, -423, -423, -423, -423, -423, -423, -423, -423, + -423, -423, -423, -423, -423, -423, -423, -423, -423, -423, + -423, -423, -423, -423, -423, -423, -423, -423, -423, -423, + -423, -423, -423, -423, -423, -423, -423, -423, -423, -423, + -423, -423, -423, -423, -423, -423, -423, -423, -423, -423, + -423, -423, -423, -423, -423, -423, -423, -423, -423, -423, + -423, -423, -423, -423, -423, -423, -423, -423, -423, -423, + 223, -423, -423, -423, -423, -423, -327, -327, -327, -327, + -327, -327, -423, -423, -423, -423, -423, -423, -423, -423, + -423, -423, -423, -423, -423, -423, 103, 99, 102, 94, + -217, 105, 90, 90, 90, 90, -31, -32, -207, -423, + -396, -397, -191, -188, -423, 304, -292, -292, 273, 96, + -232, -34, -31, -227, -233, -229, -31, -79, -120, -133, + 64, 65, -135, 25, 39, 68, 66, 24, -424, 89, + -424, -250, -424, 88, -38, -253, 87, 62, 44, 90, + 90, 88, 22, -228, -230, -144, 15, -296, 4, -295, + 26, -292, 90, 223, 15, -189, 30, -188, 599, -277, + -277, 88, 91, 317, -267, -269, 414, 416, 152, -297, + -292, 90, 32, 89, 88, -188, -316, -319, -321, -320, + -322, -317, -318, 344, 345, 179, 348, 350, 351, 352, + 353, 354, 355, 356, 357, 358, 361, 33, 263, 340, + 341, 342, 343, 362, 363, 364, 365, 367, 368, 369, + 370, 325, 346, 576, 326, 327, 328, 329, 330, 331, + 333, 334, 337, 335, 336, 338, 339, -383, -382, 87, + 89, 88, -323, 87, -144, -136, 240, -382, 241, 241, + 241, -79, 469, -349, -349, -349, 271, 20, -46, -43, + -375, 19, -42, -43, 232, 123, 124, 229, 87, -338, + 87, -347, -383, -382, 87, 138, 246, 137, -346, -343, + -346, -347, -382, -215, -382, 138, 138, -382, -382, -263, + -292, -263, -263, 24, -263, 24, -263, 24, 96, -292, + -263, 24, -263, 24, -263, 24, -263, 24, -263, 24, + 32, 79, 80, 81, 32, 83, 84, 85, -215, -382, + -382, -215, -338, -215, -188, -382, -270, 96, 96, 96, + -349, -349, 96, 90, 90, 90, -349, -349, 96, 90, + -300, -298, 90, 90, -388, 257, 301, 303, 96, 96, + 96, 96, 32, 90, -389, 32, 715, 714, 716, 717, + 718, 90, 96, 32, 96, 32, 96, -292, 87, -188, + -142, 291, 227, 229, 232, 77, 90, 307, 308, 305, + 310, 311, 152, 45, 88, 243, 240, -382, -283, 245, + -283, -292, -299, -298, -290, -188, 243, 380, 90, -144, + -345, 15, 163, -303, -303, -281, -188, -345, -303, -281, + -188, -281, -281, -281, -281, -303, -303, -303, -281, -298, + -298, -188, -188, -188, -188, -188, -188, -188, -309, -282, + -281, 690, 90, -275, 15, 77, -309, -309, 88, 323, + 417, 418, -307, 320, -81, -292, 90, -10, -29, -18, + -17, -19, 152, -10, 88, 578, -181, -188, 690, 690, + 690, 690, 690, 690, -144, -144, -144, -144, 602, -205, + 119, 144, 120, 121, -160, -144, -206, -211, -213, 106, + 163, 146, 160, -243, -149, -152, -149, -149, -149, -149, + -149, -149, 222, -149, 222, -149, -149, -149, -149, -149, + -149, -310, -292, 90, 179, -156, -155, 105, -405, -156, + 575, 88, -218, 223, -144, -144, -382, -118, 442, 443, + 444, 445, 447, 448, 449, 452, 453, 457, 458, 441, + 459, 446, 451, 454, 455, 456, 450, 343, -144, -130, + -132, -130, -144, -220, -221, 148, -215, -144, -424, -424, + 96, 170, -126, 25, 39, -126, -126, -126, -126, -144, + -144, -144, -144, -144, -144, -144, -144, -144, -144, -126, + -144, -119, 441, 459, 446, 451, 454, 455, 456, 450, + 343, 460, 461, 462, 463, 464, 465, 466, 467, 468, + -119, -118, -144, -144, -144, -144, -144, -144, -87, -144, + 130, 131, 132, -207, -144, -149, -144, -144, -144, -424, + -144, -144, -144, -208, -207, -144, -144, -144, -144, -144, + -144, -144, -144, -144, -144, -144, -144, -144, -144, -144, + -144, -144, -144, -144, -144, -144, -144, -144, -144, -144, + -144, -144, -144, -144, -144, -144, -144, -144, -144, -144, + -144, -144, -144, -144, -144, -144, -144, -144, -144, -144, + -144, -381, -380, -379, -144, -144, -144, -144, -144, -144, + -144, -144, -144, -144, -144, -144, -144, -144, -144, -144, + -144, -144, -144, -144, -144, -144, -144, -207, -207, -207, + -207, -207, -144, -424, -144, -162, -147, 96, -259, 105, + 92, -144, -144, -144, -144, -144, -144, -131, -130, -294, + -299, -290, -291, -130, -131, -131, -130, -130, -144, -144, + -144, -144, -144, -144, -144, -144, -424, -144, -144, -144, + -144, -144, -250, -424, -207, 88, -398, 416, 417, 688, + -301, 276, -300, 26, -208, 90, 15, -261, 78, -292, + -232, -232, 64, 65, 60, -130, -135, -424, -37, 26, + -252, -292, 63, 90, -328, -270, 371, 372, 179, -144, + -144, 88, -231, 28, 29, -188, -295, 170, -299, -188, + -262, 276, -188, 90, -166, -168, -169, -170, -191, -214, + -423, -171, -31, 597, 594, 15, -181, -182, -190, -298, + -268, -311, -267, 88, 415, 417, 418, 77, 122, -144, + -329, 178, -357, -356, -355, -338, -340, -341, -342, 89, + -329, -334, 377, 376, -323, -323, -323, -323, -323, -328, + -328, -328, -328, 87, 87, -323, -323, -323, -323, -331, + 87, -331, -331, -332, -331, 87, -332, -333, 87, -333, + -368, -144, -365, -364, -362, -363, 250, 101, 670, 626, + 578, 619, 660, 78, -360, -231, 96, -424, -142, -284, + 245, -366, -363, -382, -382, -382, -284, 91, 90, 91, + 90, 91, 90, -111, -60, -1, 727, 728, 729, 88, + 20, -339, -338, -59, 301, -371, -372, 276, -367, -361, + -347, 138, -346, -347, -347, -382, 88, 30, 127, 127, + 127, 127, 578, 229, 33, -285, 618, 144, 670, 626, + -338, -59, 243, 243, -310, -310, -310, 90, 90, -280, + 723, -181, -138, 293, 152, 282, 282, 240, 295, 240, + 295, -188, 306, 309, 307, 308, 305, 310, 311, 24, + 24, 24, 24, 24, 294, 296, 298, 284, -188, -188, + -283, 77, -183, -188, 27, -298, 90, 90, -188, -281, + -281, -188, -281, -281, -188, -410, 324, -292, 358, 681, + 682, 684, 683, -122, 416, 88, 578, 23, -123, 23, + -423, 119, 120, 121, -206, -149, -152, -149, 143, 264, + -149, -149, -423, -215, -424, -294, 26, 88, 78, -424, + 168, 88, 88, -424, -424, 88, 15, -223, -221, 150, + -144, -424, 88, -424, -424, -207, -144, -144, -144, -144, + -424, -424, -424, -424, -424, -424, -424, -424, -424, -424, + -207, -424, 88, 88, 15, -314, 26, -424, -424, -424, + -424, -424, -222, -424, 15, -424, 78, 88, 163, 88, + -424, -424, -424, 88, 88, -424, -424, 88, -424, 88, + -424, -424, -424, -424, -424, -424, 88, -424, 88, -424, + -424, -424, 88, -424, 88, -424, -424, 88, -424, 88, + -424, 88, -424, 88, -424, 88, -424, 88, -424, 88, + -424, 88, -424, 88, -424, 88, -424, 88, -424, 88, + -424, 88, -424, 88, -424, 88, -424, 88, -424, 88, + -424, 88, -424, -424, -424, 88, -424, 88, -424, 88, + -424, -424, 88, -424, 88, -424, 88, -424, 88, 88, + -424, 88, 88, 88, -424, 88, 88, 88, 88, -424, + -424, -424, -424, 88, 88, 88, 88, 88, 88, 88, + 88, 88, 88, -424, -424, -424, -424, -424, -424, 88, + -94, 603, -424, -424, 88, -424, 88, 88, 88, 88, + 88, -424, -423, 223, -424, -424, -424, -424, -424, 88, + 88, 88, 88, 88, 88, -424, -424, -424, 88, 88, + -424, 88, -424, 88, -424, -397, 687, 417, -195, -194, + -192, 75, 244, 76, -423, -300, -424, -156, -259, -260, + -259, -200, -292, 96, 105, -234, -165, -167, 15, -135, + -213, 89, 88, -328, -238, -244, -278, -292, 90, 179, + -330, 179, -330, 371, 372, -230, 223, -196, 16, -199, + 33, 58, -29, -423, -423, 30, 33, 88, -184, -186, + -185, -187, 67, 71, 73, 68, 69, 70, 74, -305, + 26, -31, -166, -31, -423, -188, -181, -425, 15, 78, + -425, 88, 223, -269, -272, 419, 416, 422, -382, 90, + -110, 88, -355, -342, -235, -139, 41, -335, 378, -328, + 585, -328, -337, 90, -337, 96, 96, 96, 89, -49, + -44, -45, 34, 82, -362, -349, 90, 40, -349, -349, + -292, 89, -231, -138, -188, 144, 77, -366, -366, -366, + -298, -2, 726, 732, 138, 87, 383, 19, -252, 88, + 89, -216, 302, 89, -112, -292, 89, 87, -347, -347, + -292, -423, 240, 32, 32, 670, 626, 618, -59, -216, + -215, -382, -329, 725, 724, 89, 242, 300, -143, 436, + -140, 90, 91, -188, -188, -188, -188, -188, -188, 232, + 229, 406, -406, 312, -406, 285, 243, -181, -188, 88, + -84, 259, 254, -303, -303, 34, -188, 416, 699, 697, + -144, 143, 264, -160, -152, -118, -118, -149, -312, 179, + 344, 263, 342, 338, 358, 349, 376, 340, 377, 335, + 334, 333, -312, -310, -149, -207, -132, -144, -144, 151, + -144, 149, -144, -424, -424, -424, -424, -424, -227, -144, + -144, -144, -424, 179, 344, 15, -144, -310, -144, -144, + -144, -144, -144, -144, -144, -144, -144, -144, -144, -144, + -144, -144, -144, -144, -144, -144, -144, -144, -144, -144, + -144, -144, -144, -144, -144, -144, -144, -144, -144, -144, + -144, -144, -144, -379, -144, -207, -144, -207, -144, -144, + -144, -144, -144, -380, -380, -380, -380, -380, -207, -207, + -207, -207, -144, -423, -292, -97, -96, -95, 653, 244, + -94, -162, -97, -162, 222, -144, 222, 222, 222, -144, + -131, -294, -144, -144, -144, -144, -144, -144, -144, -144, + -144, -144, -192, -343, -343, -343, -263, 88, -274, 23, + 15, 58, 58, -165, -196, -166, -135, -292, -241, 680, + -247, 47, -245, -246, 48, -242, 49, 57, -330, -330, + 170, -232, -144, -264, 77, -265, -273, -215, -210, -212, + -211, -423, -251, -424, -292, -263, 240, -265, -168, -169, + -169, -168, -169, 67, 67, 67, 72, 67, 72, 67, + -185, -298, -424, -144, -301, 78, -166, -166, -190, -298, + 170, 416, 420, 421, -355, -404, 119, 144, 32, 77, + 374, 101, -402, 178, 615, 665, 670, 626, 619, 660, + -403, 246, 137, 138, 258, 26, 42, 89, 88, 89, + 88, 89, 89, 88, -286, -285, -45, -44, -349, -349, + 96, -382, 90, 90, 242, 27, -188, 77, 77, 77, + -113, 730, 96, 87, -3, 82, -144, 87, 20, -338, + -215, -373, -324, -374, -325, -326, -5, -6, -350, -116, + 58, 101, -63, 45, 241, 710, 711, 127, -423, 723, + -365, -252, -369, -371, -188, -148, -423, -159, -146, -145, + -147, -153, 168, 169, 263, 340, 341, -216, -188, -137, + 291, 299, 87, -141, 92, -385, 78, 282, 374, 282, + 374, 90, -407, 313, 90, -407, -188, -84, -49, -188, + -281, -281, 34, -382, -424, -160, -152, -125, 163, 578, + -315, 584, -323, -323, -323, -333, -323, 330, -323, 330, + -323, -424, -424, -424, 88, -424, 23, -424, -144, 88, + -121, 474, 88, 88, -424, 87, 87, -144, -424, -424, + -424, 88, -424, -424, -424, -424, -424, -424, -424, -424, + -424, -424, -424, -424, -424, 88, -424, 88, -424, 88, + -424, 88, -424, 88, -424, 88, -424, 88, -424, 88, + -424, 88, -424, 88, -424, 88, -424, 88, -424, 88, + -424, 88, -424, 88, -424, 88, -424, -424, 88, -424, + -424, -424, 88, -424, 88, -424, 88, -424, -424, -424, + 88, -313, 671, -424, -424, -424, -424, -424, -424, -424, + -424, -424, -424, -424, -93, -293, -292, -94, 635, 635, + -424, -94, -224, 88, -149, -424, -149, -149, -149, -424, + -424, -424, 88, -424, 88, 88, -424, 88, -424, 88, + -424, -424, -424, -424, 88, -193, 23, -193, -193, -424, + -259, -188, -196, -225, 17, -238, 52, 350, -249, -248, + 56, 48, -246, 20, 50, 20, 31, -264, 88, 152, + 88, -424, -424, 88, 58, 223, -424, -188, -196, -179, + -178, 77, 78, -180, 77, -178, 67, 67, -253, 88, + -262, -166, -196, -196, 223, 119, -423, -148, 13, 90, + 90, -382, -401, 714, 715, 32, 96, -349, -349, 138, + 138, -188, 87, -328, 90, -328, 96, 96, 32, 83, + 84, 85, 32, 79, 80, 81, -188, -188, -188, -188, + -370, 87, 20, -144, 87, 152, 89, -252, -252, 278, + 163, -349, 708, 284, 284, -349, -349, -349, -115, -114, + 730, 89, -424, 88, -336, 578, 581, -144, -154, -154, + -253, 89, -378, 578, -384, -292, -292, -292, -292, 96, + 98, -424, 576, 74, 579, -424, -328, -144, -144, -144, + -232, 90, -144, -144, 96, 96, -424, -144, -144, -144, + -144, -144, -144, -144, -144, -144, -144, -144, -144, -144, + -144, -144, -144, -144, -144, -144, -144, -207, -144, -424, + -176, -175, -177, 691, 119, 32, -312, -424, -209, 276, + -100, -99, -98, 15, -424, -144, -118, -118, -118, -118, + -144, -144, -144, -144, -144, -144, -423, 67, 19, 17, + -423, -423, -301, -225, -226, 18, 20, -239, 54, -237, + 53, -237, -248, 20, 20, 90, 20, 90, 138, -273, + -144, -212, 58, -29, -292, -210, -292, -412, -428, 388, + 383, -227, -144, 87, -144, -156, -196, -196, -144, -202, + 498, 500, 501, 502, 499, 504, 505, 506, 507, 508, + 509, 510, 511, 512, 513, 503, 514, 475, 476, 477, + 108, 110, 109, 478, 479, 480, 344, 526, 527, 521, + 524, 525, 523, 522, 359, 360, 481, 544, 545, 549, + 548, 546, 547, 550, 553, 554, 555, 556, 557, 558, + 560, 559, 551, 552, 529, 528, 530, 531, 532, 533, + 534, 535, 537, 536, 538, 539, 540, 541, 542, 543, + 561, 562, 563, 564, 565, 567, 566, 571, 570, 568, + 569, 573, 572, 482, 483, 111, 112, 113, 114, 115, + 116, 117, 484, 487, 485, 488, 489, 490, 495, 496, + 491, 492, 493, 494, 497, 370, 368, 369, 365, 364, + 363, 423, 428, 429, 431, 515, 516, 517, 518, 519, + 520, 672, 673, 674, 675, 676, 677, 678, 679, 90, + 90, 87, -144, 89, 89, -253, -369, -60, 89, -254, + -252, 96, 89, 279, -211, -423, 90, -349, -349, -349, + 96, 96, -300, -424, 88, -292, -403, -371, 582, 582, + -424, 26, -377, -376, -294, 87, 78, 63, 577, 580, + -424, -424, 88, -424, -424, -424, 89, 89, -424, -424, + -424, -424, -424, -424, -424, -424, -424, -424, -424, -424, + -424, -424, -424, -424, -424, -424, -424, -424, -424, -424, + 88, -424, -175, -177, -424, 77, -156, -227, 20, -97, + 301, 303, -97, -424, -424, -424, -424, -424, 88, -424, + -424, 88, -424, 88, -424, -424, -255, -424, -292, 246, + 20, 20, -255, -255, -195, -226, -107, -106, -105, 609, + -144, -207, -240, 55, 77, 122, 90, 90, 90, 13, + -210, 223, -411, 47, -415, 48, -232, -252, -173, 383, + -227, -424, -252, 89, 26, 89, 732, 138, 89, -211, + -124, -423, 275, -300, 90, 90, -114, -117, -29, 88, + 152, -252, -188, 63, -144, -207, -424, 77, 589, 691, + -92, -91, -88, 702, 728, -207, -94, -94, -144, -144, + -144, 88, -424, -424, -424, -107, 88, -104, -103, -292, + 77, 122, -265, -292, -256, -423, -416, 56, -414, 50, + 20, 89, -424, -423, -232, 89, -236, -29, 87, -3, + 275, -324, -374, -325, -326, -5, -6, -350, -82, 578, + -376, -354, -298, -294, 90, 96, 89, 578, -424, -424, + -90, 146, 700, 668, -154, 222, -424, 88, -424, 88, + -424, 88, -292, 246, -105, 88, 26, -251, -417, 48, + 20, -413, 49, 20, 90, -301, -174, -172, -292, 632, + -394, -393, 574, -404, -400, 119, 144, 101, -402, 670, + 626, 128, 129, -82, -144, 87, -424, -83, 290, 687, + 223, -385, 579, -90, 701, 646, 621, 646, 621, -149, + -144, -144, -144, -103, -423, -424, 20, 90, 20, 90, + -424, 88, 23, -316, -62, 643, -391, -392, 77, -395, + 389, 642, 663, 119, 90, 89, -252, 251, -299, -378, + 580, 143, -118, -424, 88, -424, 88, -424, -93, 90, + 90, -172, 639, -329, -156, -392, 77, -391, 77, 14, + 13, -4, 731, 89, 292, -90, 646, 621, -144, -144, + -424, -61, 27, -173, -390, 259, 254, 257, 33, -390, + 96, -4, -424, -424, 643, 253, 32, 119, -156, -176, + -175, -175, } var yyDef = [...]int{ - 877, -2, -2, 879, 2, 4, 5, 6, 7, 8, + 880, -2, -2, 882, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, - 39, 72, 74, 75, 877, 877, 877, 0, 0, 877, - 0, 0, 877, -2, -2, 877, 1589, 0, 877, 0, - 0, 0, -2, 792, 798, 0, 807, -2, 0, 0, - 877, 877, 2220, 2220, 872, 0, 0, 0, 0, 877, - 877, 877, 877, 1455, 52, 877, 0, 87, 88, 827, - 828, 829, 67, 0, 2218, 878, 1, 3, 73, 77, - 0, 0, 0, 60, 1464, 0, 80, 0, 0, 881, - 0, 0, 1572, -2, 877, 877, 0, 128, 129, 0, - 0, 0, -2, 132, -2, 161, 162, 163, 0, 168, - 603, 526, 578, 524, 563, -2, 512, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 529, - 401, 401, 0, 0, -2, 512, 512, 512, 1574, 0, - 0, 0, 560, 463, 401, 401, 401, 0, 401, 401, - 401, 401, 0, 0, 401, 401, 401, 401, 401, 401, - 401, 401, 401, 401, 401, 401, 401, 401, 401, 401, - 401, 1482, 167, 1590, 1587, 1588, 1765, 1766, 1767, 1768, - 1769, 1770, 1771, 1772, 1773, 1774, 1775, 1776, 1777, 1778, - 1779, 1780, 1781, 1782, 1783, 1784, 1785, 1786, 1787, 1788, - 1789, 1790, 1791, 1792, 1793, 1794, 1795, 1796, 1797, 1798, - 1799, 1800, 1801, 1802, 1803, 1804, 1805, 1806, 1807, 1808, - 1809, 1810, 1811, 1812, 1813, 1814, 1815, 1816, 1817, 1818, - 1819, 1820, 1821, 1822, 1823, 1824, 1825, 1826, 1827, 1828, - 1829, 1830, 1831, 1832, 1833, 1834, 1835, 1836, 1837, 1838, - 1839, 1840, 1841, 1842, 1843, 1844, 1845, 1846, 1847, 1848, - 1849, 1850, 1851, 1852, 1853, 1854, 1855, 1856, 1857, 1858, - 1859, 1860, 1861, 1862, 1863, 1864, 1865, 1866, 1867, 1868, - 1869, 1870, 1871, 1872, 1873, 1874, 1875, 1876, 1877, 1878, - 1879, 1880, 1881, 1882, 1883, 1884, 1885, 1886, 1887, 1888, - 1889, 1890, 1891, 1892, 1893, 1894, 1895, 1896, 1897, 1898, - 1899, 1900, 1901, 1902, 1903, 1904, 1905, 1906, 1907, 1908, - 1909, 1910, 1911, 1912, 1913, 1914, 1915, 1916, 1917, 1918, - 1919, 1920, 1921, 1922, 1923, 1924, 1925, 1926, 1927, 1928, - 1929, 1930, 1931, 1932, 1933, 1934, 1935, 1936, 1937, 1938, - 1939, 1940, 1941, 1942, 1943, 1944, 1945, 1946, 1947, 1948, - 1949, 1950, 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, - 1959, 1960, 1961, 1962, 1963, 1964, 1965, 1966, 1967, 1968, - 1969, 1970, 1971, 1972, 1973, 1974, 1975, 1976, 1977, 1978, - 1979, 1980, 1981, 1982, 1983, 1984, 1985, 1986, 1987, 1988, - 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, - 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, - 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, - 2019, 2020, 2021, 2022, 2023, 2024, 2025, 2026, 2027, 2028, - 2029, 2030, 2031, 2032, 2033, 2034, 2035, 2036, 2037, 2038, - 2039, 2040, 2041, 2042, 2043, 2044, 2045, 2046, 2047, 2048, - 2049, 2050, 2051, 2052, 2053, 2054, 2055, 2056, 2057, 2058, - 2059, 2060, 2061, 2062, 2063, 2064, 2065, 2066, 2067, 2068, - 2069, 2070, 2071, 2072, 2073, 2074, 2075, 2076, 2077, 2078, - 2079, 2080, 2081, 2082, 2083, 2084, 2085, 2086, 2087, 2088, - 2089, 2090, 2091, 2092, 2093, 2094, 2095, 2096, 2097, 2098, - 2099, 2100, 2101, 2102, 2103, 2104, 2105, 2106, 2107, 2108, - 2109, 2110, 2111, 2112, 2113, 2114, 2115, 2116, 2117, 2118, - 2119, 2120, 2121, 2122, 2123, 2124, 2125, 2126, 2127, 2128, - 2129, 2130, 2131, 2132, 2133, 2134, 2135, 2136, 2137, 2138, - 2139, 2140, 2141, 2142, 2143, 2144, 2145, 2146, 2147, 2148, - 2149, 2150, 2151, 2152, 2153, 2154, 2155, 2156, 2157, 2158, - 2159, 2160, 2161, 2162, 2163, 2164, 2165, 2166, 2167, 2168, - 2169, 2170, 2171, 2172, 2173, 2174, 2175, 2176, 2177, 2178, - 2179, 2180, 2181, 2182, 2183, 2184, 2185, 2186, 2187, 2188, - 2189, 2190, 2191, 2192, 2193, 2194, 2195, 2196, 2197, 2198, - 2199, 2200, 2201, 2202, 2203, 2204, 2205, 2206, 2207, 2208, - 2209, 2210, 2211, 2212, 2213, 2214, 2215, 2216, 2217, 0, - 1566, 0, 716, 980, 0, 0, 781, 781, 0, 781, - 781, 781, 781, 0, 0, 0, 730, 0, 0, 0, - 0, 778, 0, 746, 747, 0, 778, 0, 753, 784, - 0, 0, 759, 781, 781, 762, 2221, 0, 2221, 2221, - 1557, 0, 775, 773, 787, 788, 42, 791, 794, 795, - 796, 797, 800, 0, 811, 814, 1583, 1584, 0, 816, - 823, 840, 841, 0, 873, 874, 47, 1130, 0, 1002, - 0, 1008, -2, 1019, 1036, 1037, 1038, 1039, 1040, 1042, - 1043, 1044, 0, 0, 0, 0, 1049, 1050, 0, 0, - 0, 0, 0, 1111, 0, 0, 0, 0, 1428, 0, - 0, 1390, 1390, 1145, 1390, 1390, 1392, 1392, 1392, 1816, - 1954, 1962, 2130, 1777, 1783, 1784, 1785, 2076, 2077, 2078, - 2079, 2165, 2166, 2170, 1878, 0, 0, 0, 2217, 1915, - 1923, 1924, 1948, 2048, 2151, 1795, 1943, 2012, 1875, 1897, - 1898, 2030, 2031, 1919, 1920, 1901, 2082, 2084, 2100, 2101, - 2086, 2088, 2097, 2103, 2108, 2087, 2099, 2104, 2117, 2121, - 2124, 2125, 2126, 2094, 2092, 2105, 2109, 2111, 2113, 2119, - 2122, 2095, 2093, 2106, 2110, 2112, 2114, 2120, 2123, 2081, - 2085, 2089, 2098, 2116, 2096, 2115, 2090, 2102, 2107, 2118, - 2091, 2083, 1913, 1916, 1904, 1905, 1907, 1909, 1914, 1921, - 1927, 1906, 1926, 1925, 0, 1902, 1903, 1908, 1918, 1922, - 1910, 1911, 1912, 1917, 1928, 1968, 1967, 1966, 2011, 1939, - 2010, 0, 0, 0, 0, 0, 1768, 1821, 1822, 2127, - 1330, 1331, 1332, 1333, 0, 0, 0, 0, 0, 0, - 0, 293, 294, 1441, 1442, 46, 1129, 1553, 1392, 1392, - 1392, 1392, 1392, 1392, 1071, 1072, 1073, 1074, 1075, 1099, - 1100, 1106, 1107, 2025, 2026, 2027, 2028, 1859, 2160, 1867, - 1868, 2007, 2008, 1880, 1881, 2191, 2192, -2, -2, -2, - 234, 235, 236, 237, 238, 239, 240, 241, 0, 1820, - 2141, 2142, 230, 0, 0, 298, 299, 295, 296, 297, - 1113, 1114, 251, 252, 253, 254, 255, 256, 257, 258, - 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, - 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, - 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, - 289, 290, 291, 292, 0, 850, 0, 0, 0, 0, - 0, 1464, 0, 1456, 1455, 65, 0, 877, -2, 0, - 0, 0, 0, 49, 0, 54, 937, 880, 79, 78, - 1504, 0, 0, 0, 61, 1465, 69, 71, 1466, 0, - 882, 883, 0, 913, 917, 0, 0, 0, 1573, 48, - 0, 1594, 1572, 1572, 104, 0, 0, 105, 125, 126, - 127, 0, 0, 111, 112, 1559, 1560, 45, 0, 0, - 179, 180, 0, 43, 428, 0, 175, 0, 421, 360, - 0, 1482, 0, 0, 0, 0, 0, 877, 0, 1567, - 156, 157, 164, 165, 166, 401, 401, 401, 575, 0, - 0, 167, 167, 533, 534, 535, 0, 0, -2, 426, - 0, 513, 0, 0, 415, 415, 419, 417, 418, 0, - 0, 0, 0, 0, 0, 0, 0, 552, 0, 553, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 664, - 0, 402, 0, 573, 574, 464, 0, 0, 0, 0, - 0, 0, 0, 0, 1575, 1576, 0, 550, 551, 0, - 0, 0, 401, 401, 0, 0, 0, 0, 401, 401, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 155, 1495, - 0, 0, 0, -2, 0, 708, 0, 0, 0, 1568, - 1568, 0, 715, 0, 717, 0, 720, 0, 0, 721, - 0, 778, 778, 776, 777, 723, 724, 725, 726, 781, - 0, 0, 410, 411, 412, 778, 781, 0, 781, 781, - 781, 781, 778, 778, 778, 781, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 2221, 784, 781, 0, 754, - 0, 755, 756, 757, 760, 761, 763, 2222, 2223, 1585, - 1586, 1614, 1615, 1616, 1617, 1618, 1619, 1620, 1621, 1622, - 1623, 1624, 1625, 1626, 1627, 1628, 1629, 1630, 1631, 1632, - 1633, 1634, 1635, 1636, 1637, 1638, 1639, 1640, 1641, 1642, - 1643, 1644, 1645, 1646, 1647, 1648, 1649, 1650, 1651, 1652, - 1653, 1654, 1655, 1656, 1657, 1658, 1659, 1660, 1661, 1662, - 1663, 1664, 1665, 1666, 1667, 1668, 1669, 1670, 1671, 1672, - 1673, 1674, 1675, 1676, 1677, 1678, 1679, 1680, 1681, 1682, - 1683, 1684, 1685, 1686, 1687, 1688, 1689, 1690, 1691, 1692, - 1693, 1694, 1695, 1696, 1697, 1698, 1699, 1700, 1701, 1702, - 1703, 1704, 1705, 1706, 1707, 1708, 1709, 1710, 1711, 1712, - 1713, 1714, 1715, 1716, 1717, 1718, 1719, 1720, 1721, 1722, - 1723, 1724, 1725, 1726, 1727, 1728, 1729, 1730, 1731, 1732, - 1733, 1734, 1735, 1736, 1737, 1738, 1739, 1740, 1741, 1742, - 1743, 1744, 1745, 1746, 1747, 1748, 1749, 1750, 1751, 1752, - 1753, 1754, 1755, 1756, 1757, 1758, 1759, 1760, 1761, 1762, - 1763, 1764, 2221, 2221, 767, 771, 1558, 793, 799, 801, - 802, 0, 0, 812, 815, 834, 51, 1866, 822, 51, - 824, 825, 826, 852, 853, 858, 0, 0, 0, 0, - 864, 865, 866, 0, 0, 869, 870, 871, 0, 0, - 0, 0, 0, 1000, 0, 0, 1119, 1120, 1121, 1122, - 1123, 1124, 1125, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1020, 1021, 0, 0, 0, 1045, 1046, 1047, 1048, - 1051, 0, 1062, 0, 1064, 1437, -2, 0, 0, 0, - 1056, 1057, 0, 0, 0, 0, 0, 0, 0, 1429, - 0, 0, 1143, 0, 1144, 1146, 1147, 1148, 0, 1149, - 1150, 887, 887, 887, 887, 887, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 887, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1578, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 143, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 897, 0, 0, 897, 897, 0, 0, 222, - 223, 224, 225, 226, 227, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 242, - 243, 244, 245, 246, 247, 300, 248, 249, 250, 1129, - 0, 0, 0, 842, 843, 0, 963, 1578, 0, 0, - 893, 0, 59, 68, 70, 1464, 63, 1464, 0, 899, - 0, 0, -2, -2, 900, 906, 907, 908, 909, 910, - 56, 2219, 57, 0, 76, 0, 50, 0, 0, 0, - 0, 374, 1507, 0, 0, 1457, 1458, 1461, 0, 914, - 1960, 918, 0, 920, 921, 0, 0, 102, 0, 979, - 0, 0, 0, 0, 113, 0, 115, 116, 0, 0, - 0, 385, 1561, 1562, 1563, -2, 408, 0, 385, 369, - 308, 309, 310, 360, 312, 360, 360, 360, 360, 374, - 374, 374, 374, 343, 344, 345, 346, 347, 0, 0, - 329, 360, 360, 360, 360, 350, 351, 352, 353, 354, - 355, 356, 357, 313, 314, 315, 316, 317, 318, 319, - 320, 321, 362, 362, 362, 362, 362, 366, 366, 0, - 44, 0, 389, 0, 1461, 0, 0, 1495, 1570, 1580, - 0, 0, 0, 1570, 134, 0, 0, 0, 576, 614, - 527, 564, 577, 0, 530, 531, -2, 0, 0, 512, - 0, 514, 0, 409, 0, -2, 0, 419, 0, 415, - 419, 416, 419, 407, 420, 554, 555, 556, 0, 558, - 559, 644, 949, 0, 0, 0, 0, 0, 650, 651, - 652, 0, 654, 655, 656, 657, 658, 659, 660, 661, - 662, 663, 565, 566, 567, 568, 569, 570, 571, 572, - 0, 0, 0, 0, 514, 0, 561, 0, 0, 465, - 466, 467, 0, 0, 470, 471, 472, 473, 0, 0, - 476, 477, 478, 966, 967, 479, 480, 505, 506, 507, - 481, 482, 483, 484, 485, 486, 487, 499, 500, 501, - 502, 503, 504, 488, 489, 490, 491, 492, 493, 496, - 0, 149, 1486, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 1568, 0, - 0, 0, 0, 896, 981, 1591, 1592, 0, 0, 782, - 783, 0, 413, 414, 781, 781, 727, 768, 0, 781, - 731, 769, 732, 734, 733, 735, 748, 749, 781, 738, - 779, 780, 739, 740, 741, 742, 743, 744, 745, 764, - 750, 751, 752, 785, 0, 789, 790, 765, 766, 0, - 0, 805, 806, 0, 813, 837, 835, 836, 838, 830, - 831, 832, 833, 0, 839, 0, 0, 855, 98, 860, - 861, 862, 863, 875, 868, 1131, 997, 998, 999, 0, - 1001, 1005, 0, 1115, 1117, 1007, 1003, 1009, 1126, 1127, - 1128, 0, 0, 0, 0, 0, 1013, 1017, 1022, 1023, - 1024, 1025, 1026, 0, 1027, 0, 1030, 1031, 1032, 1033, - 1034, 1035, 1041, 1405, 1406, 1407, 1060, 301, 302, 0, - 1061, 0, 0, 0, 0, 0, 0, 0, 0, 1370, - 1371, 1372, 1373, 1374, 1375, 1376, 1377, 1378, 1379, 1380, - 1381, 1382, 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1130, - 0, 911, 0, 0, 1435, 1432, 0, 0, 0, 1391, - 1393, 0, 0, 0, 888, 889, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1408, 0, 0, 0, 0, 0, - 1428, 0, 1066, 1067, 1068, 0, 0, 0, 0, 0, - 0, 1188, 0, 0, 0, 0, 1579, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 144, 145, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 1334, 1335, - 1336, 1337, 41, 0, 0, 0, 0, 0, 0, 0, - 898, 1439, 0, -2, -2, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 1359, 0, - 0, 0, 0, 0, 0, 1551, 0, 0, 845, 846, - 848, 0, 983, 0, 964, 0, 0, 851, 0, 892, - 0, 895, 62, 64, 904, 905, 0, 922, 901, 58, - 53, 0, 0, 941, 1505, 374, 1527, 0, 383, 383, - 380, 1467, 1468, 0, 1460, 1462, 1463, 81, 919, 915, - 0, 995, 0, 0, 978, 0, 0, 925, 927, 928, - 929, 961, 0, 932, 933, 0, 0, 0, 0, 0, - 100, 980, 106, 0, 114, 0, 0, 119, 120, 107, - 108, 109, 110, 0, 603, -2, 460, 181, 183, 184, - 185, 176, -2, 372, 370, 371, 311, 374, 374, 337, - 338, 339, 340, 341, 342, 0, 0, 330, 331, 332, - 333, 322, 0, 323, 324, 325, 364, 0, 326, 327, - 0, 328, 427, 0, 1469, 390, 391, 393, 401, 0, - 396, 397, 0, 401, 401, 0, 422, 423, 0, 1461, - 1486, 0, 0, 0, 1581, 1580, 1580, 1580, 0, 169, - 170, 171, 172, 173, 174, 639, 0, 0, 615, 637, - 638, 167, 0, 0, 177, 516, 515, 0, 671, 0, - 425, 0, 0, 419, 419, 404, 405, 557, 0, 0, - 646, 647, 648, 649, 0, 0, 0, 543, 454, 0, - 544, 545, 514, 516, 0, 0, 385, 468, 469, 474, - 475, 494, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 590, 591, 592, 595, 597, 518, 601, - 594, 596, 598, 518, 602, 1483, 1484, 1485, 0, 0, - 709, 0, 0, 451, 96, 1569, 714, 718, 719, 778, - 737, 770, 778, 729, 736, 758, 803, 804, 809, 817, - 818, 819, 820, 821, 859, 0, 0, 0, 0, 867, - 0, 0, 1006, 1116, 1118, 1010, 0, 1014, 1018, 0, - 0, 0, 0, 0, 1065, 1063, 1439, 0, 0, 0, - 1112, 0, 0, 0, 1134, 1135, 0, 0, 0, 1433, - 0, 0, 1141, 0, 1394, 1151, 0, 0, 0, 0, - 0, 1157, 1158, 1159, 1160, 1161, 1162, 1163, 1164, 1165, - 1166, 1455, 0, 0, 0, 0, 0, 1172, 1173, 1174, - 1175, 1176, 0, 1178, 0, 1179, 0, 0, 0, 0, - 1186, 1187, 1189, 0, 0, 1192, 1193, 0, 1195, 0, - 1197, 1198, 1199, 1200, 1201, 1202, 0, 1204, 0, 1206, - 1207, 1208, 0, 1210, 0, 1212, 1213, 0, 1215, 0, - 1217, 0, 1220, 0, 1223, 0, 1226, 0, 1229, 0, - 1232, 0, 1235, 0, 1238, 0, 1241, 0, 1244, 0, - 1247, 0, 1250, 0, 1253, 0, 1256, 0, 1259, 0, - 1262, 0, 1265, 1266, 1267, 0, 1269, 0, 1271, 0, - 1274, 1275, 0, 1277, 0, 1280, 0, 1283, 0, 0, - 1284, 0, 0, 0, 1288, 0, 0, 0, 0, 1297, - 1298, 1299, 1300, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1311, 1312, 1313, 1314, 1315, 1316, 0, - 1318, 0, 1094, 0, 0, 1094, 0, 0, 0, 0, - 0, 1132, 897, 0, 1395, 1396, 1397, 1398, 1399, 0, - 0, 0, 0, 0, 0, 1357, 1358, 1360, 0, 0, - 1363, 0, 1365, 0, 1552, 844, 847, 849, 935, 984, - 985, 0, 0, 0, 0, 965, 1577, 890, 891, 894, - 943, 0, 1443, 0, 0, 922, 995, 923, 0, 902, - 55, 938, 0, 1509, 1508, 1521, 1534, 383, 383, 377, - 378, 384, 379, 381, 382, 1459, 0, 1464, 0, 1545, - 0, 0, 1537, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 968, 0, 0, 971, 0, 0, 0, 0, - 962, 933, 0, 934, 0, -2, 0, 0, 94, 95, - 0, 0, 0, 117, 118, 0, 0, 124, 386, 387, - 158, 167, 462, 182, 435, 0, 0, 307, 373, 334, - 335, 336, 0, 358, 0, 0, 0, 0, 456, 130, - 1473, 1472, 401, 401, 392, 0, 395, 0, 0, 0, - 1582, 361, 424, 0, 148, 0, 0, 0, 0, 0, - 154, 609, 0, 0, 616, 0, 0, 0, 525, 0, - 536, 537, 0, 643, -2, 705, 389, 0, 403, 406, - 950, 0, 0, 538, 0, 541, 542, 455, 516, 547, - 548, 562, 549, 497, 498, 495, 0, 0, 1496, 1497, - 1502, 1500, 1501, 135, 583, 585, 584, 588, 0, 0, - 0, 520, 0, 520, 581, 0, 451, 1469, 0, 713, - 452, 453, 781, 781, 854, 99, 0, 857, 0, 0, - 0, 0, 1011, 1015, 1028, 1029, 1400, 1426, 360, 360, - 1413, 360, 366, 1416, 360, 1418, 360, 1421, 360, 1424, - 1425, 0, 0, 1058, 0, 912, 0, 0, 1140, 1436, - 0, 0, 1152, 1153, 1154, 1155, 1156, 1430, 0, 0, - 0, 1171, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 146, 147, 0, 0, 0, 0, 0, 0, - 1368, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1089, 1093, 0, 1095, 1096, 0, 0, 1320, - 0, 0, 1338, 0, 0, 0, 0, 0, 0, 0, - 1440, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 986, 991, 991, 991, 0, 0, 0, 1564, 1565, - 1444, 1445, 995, 1446, 924, 903, 942, 1527, 0, 1520, - 0, -2, 1529, 0, 0, 0, 1535, 375, 376, 916, - 82, 996, 85, 0, 1545, 1554, 0, 1536, 1547, 1549, - 0, 0, 0, 1541, 0, 0, 995, 926, 957, 959, - 0, 954, 969, 970, 972, 0, 974, 0, 976, 977, - 937, 931, 0, 102, 0, 995, 995, 101, 0, 982, - 121, 122, 123, 461, 186, 191, 0, 0, 0, 196, - 0, 198, 0, 0, 0, 203, 204, 401, 401, 436, - 0, 304, 306, 0, 0, 189, 374, 0, 374, 0, - 365, 367, 0, 437, 457, 1470, 1471, 0, 0, 394, - 398, 399, 400, 0, 1571, 150, 0, 0, 0, 612, - 0, 640, 0, 0, 0, 0, 0, 0, 178, 517, - 672, 673, 674, 675, 676, 677, 678, 679, 680, 0, - 401, 0, 0, 0, 401, 401, 401, 0, 697, 388, - 0, 0, 668, 665, 539, 0, 228, 229, 231, 0, - 0, 0, 0, 0, 546, 937, 1487, 1488, 1489, 0, - 1499, 1503, 138, 0, 0, 0, 0, 593, 599, 0, - 519, 600, 710, 711, 712, 97, 722, 728, 856, 876, - 1004, 1012, 1016, 0, 0, 0, 0, 1427, 1411, 374, - 1414, 1415, 1417, 1419, 1420, 1422, 1423, 1054, 1055, 1059, - 0, 1137, 0, 1139, 1434, 0, 1464, 0, 0, 0, - 1170, 0, 0, 0, 1181, 1180, 1182, 0, 1184, 1185, - 1190, 1191, 1194, 1196, 1203, 1205, 1209, 1211, 1214, 1216, - 1218, 0, 1221, 0, 1224, 0, 1227, 0, 1230, 0, - 1233, 0, 1236, 0, 1239, 0, 1242, 0, 1245, 0, - 1248, 0, 1251, 0, 1254, 0, 1257, 0, 1260, 0, - 1263, 0, 1268, 1270, 0, 1273, 1276, 1278, 0, 1281, - 0, 1285, 0, 1287, 1289, 1290, 0, 0, 0, 1301, - 1302, 1303, 1304, 1305, 1306, 1307, 1308, 1309, 1310, 1317, - 0, 1087, 1090, 1319, 1097, 1098, 1103, 1322, 0, 0, - 0, 1325, 0, 0, 0, 1329, 1133, 1340, 0, 1345, - 0, 0, 1351, 0, 1355, 0, 1361, 1362, 1364, 1366, - 0, 0, 0, 0, 0, 963, 944, 66, 1446, 1448, - 0, 1514, 1512, 1512, 1522, 1523, 0, 0, 1530, 0, - 0, 0, 0, 86, 0, 0, 0, 1550, 0, 0, - 0, 0, 103, 1597, 1455, 951, 958, 0, 0, 952, - 0, 953, 973, 975, 930, 0, 995, 995, 92, 93, - 0, 192, 0, 194, 220, 221, 0, 197, 199, 200, - 201, 207, 208, 209, 202, 0, 0, 303, 305, 0, - 0, 348, 359, 349, 0, 0, 1474, 1475, 1476, 1477, - 1478, 1479, 1480, 1481, 937, 151, 152, 153, 604, 0, - 614, 0, 939, 0, 607, 0, 528, 0, 0, 0, - 401, 401, 401, 0, 0, 0, 0, 682, 0, 0, - 645, 0, 653, 0, 0, 0, 232, 233, 0, 1498, - 582, 0, 136, 137, 0, 0, 587, 521, 522, 1052, - 0, 0, 0, 1053, 1412, 0, 0, 0, 0, 1431, - 0, 0, 0, 0, 1177, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1293, 0, 0, - 0, 634, 635, 0, 1369, 1092, 1455, 0, 1094, 1104, - 1105, 0, 1094, 1339, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 992, 0, 0, 0, 0, - 983, 1448, 1453, 0, 0, 1517, 0, 1510, 1513, 1511, - 1524, 0, 0, 1531, 0, 1533, 0, 1555, 1556, 1548, - 0, 1540, 1543, 1539, 1542, 1599, 1601, 1611, 1612, 1464, - 955, 0, 960, 0, 1455, 91, 0, 195, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 205, 206, 0, 0, 363, 368, - 0, 0, 0, 605, 0, 940, 617, 608, 0, 695, - 0, 699, 0, 0, 0, 702, 703, 704, 681, 0, - 685, 429, 669, 666, 667, 540, 0, 139, 140, 0, - 0, 0, 1401, 0, 1404, 1136, 1138, 0, 1167, 1168, - 1169, 1409, 1410, 1183, 1219, 1222, 1225, 1228, 1231, 1234, - 1237, 1240, 1243, 1246, 1249, 1252, 1255, 1258, 1261, 1264, - 1272, 1279, 1282, 1286, 1291, 0, 1294, 0, 0, 1295, - 0, 636, 1083, 0, 0, 1101, 1102, 0, 1324, 1326, - 1327, 1328, 1341, 0, 1346, 1347, 0, 1352, 0, 1356, - 1367, 0, 988, 945, 946, 993, 994, 0, 0, 936, - 1453, 84, 1454, 1451, 0, 1449, 1447, 1506, 0, 1515, - 1516, 1525, 1526, 1532, 0, 1538, 0, 1595, 1607, 1603, - 0, 89, 0, 0, 0, 1464, 193, 0, 212, 0, - 613, 0, 616, 606, 693, 694, 0, 706, 698, 700, - 701, 683, -2, 1490, 0, 0, 0, 589, 1402, 0, - 0, 1296, 0, 632, 633, 1091, 1084, 0, 1069, 1070, - 1088, 1321, 1323, 0, 0, 0, 0, 987, 989, 990, - 83, 0, 1450, 1109, 0, 1518, 1519, 1546, 1544, 1613, - 0, 1609, 0, 1605, 0, 0, 956, 963, 0, 90, - 442, 435, 1490, 0, 0, 0, 686, 687, 688, 689, - 690, 691, 692, 579, 1492, 141, 142, 0, 509, 510, - 511, 135, 0, 1142, 1292, 1085, 0, 0, 0, 0, - 0, 1342, 0, 1348, 0, 1353, 0, 947, 948, 1452, - 0, 0, 0, 1600, 0, 0, 1598, 0, 0, 1602, - 618, 0, 620, 0, -2, 430, 443, 0, 187, 213, - 214, 0, 0, 217, 218, 219, 210, 211, 131, 0, - 0, 707, 0, 1493, 1494, 0, 138, 0, 0, 1076, - 1077, 1078, 1079, 1081, 0, 0, 0, 0, 1110, 1089, - 1596, 0, 1608, 0, 1604, 619, 0, 0, 385, 0, - 629, 431, 432, 0, 438, 439, 440, 441, 215, 216, - 641, 0, 0, 508, 586, 1403, 0, 0, 1343, 0, - 1349, 0, 1354, 0, 1610, 1606, 621, 622, 630, 0, - 433, 0, 434, 0, 0, 0, 610, 0, 641, 1491, - 1086, 1080, 1082, 0, 0, 1108, 0, 631, 627, 444, - 446, 447, 0, 0, 445, 642, 611, 1344, 1350, 0, - 448, 449, 450, 623, 624, 625, 626, + 39, 40, 73, 75, 76, 880, 880, 880, 0, 0, + 880, 0, 0, 880, -2, -2, 880, 1611, 0, 880, + 0, 875, 0, -2, 795, 801, 0, 810, -2, 0, + 0, 880, 880, 2256, 2256, 875, 0, 0, 0, 0, + 880, 880, 880, 880, 1637, 1477, 53, 880, 0, 88, + 89, 830, 831, 832, 68, 0, 2254, 881, 1, 3, + 74, 78, 0, 0, 0, 61, 1486, 0, 81, 0, + 0, 884, 0, 0, 1594, -2, 880, 880, 0, 129, + 130, 0, 0, 0, -2, 133, -2, 162, 163, 164, + 0, 169, 606, 527, 579, 525, 564, -2, 513, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 530, 402, 402, 0, 0, -2, 513, 513, 513, + 1596, 0, 0, 0, 561, 464, 402, 402, 402, 0, + 402, 402, 402, 402, 0, 0, 402, 402, 402, 402, + 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, + 402, 402, 402, 1504, 168, 1612, 1609, 1610, 1790, 1791, + 1792, 1793, 1794, 1795, 1796, 1797, 1798, 1799, 1800, 1801, + 1802, 1803, 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, + 1812, 1813, 1814, 1815, 1816, 1817, 1818, 1819, 1820, 1821, + 1822, 1823, 1824, 1825, 1826, 1827, 1828, 1829, 1830, 1831, + 1832, 1833, 1834, 1835, 1836, 1837, 1838, 1839, 1840, 1841, + 1842, 1843, 1844, 1845, 1846, 1847, 1848, 1849, 1850, 1851, + 1852, 1853, 1854, 1855, 1856, 1857, 1858, 1859, 1860, 1861, + 1862, 1863, 1864, 1865, 1866, 1867, 1868, 1869, 1870, 1871, + 1872, 1873, 1874, 1875, 1876, 1877, 1878, 1879, 1880, 1881, + 1882, 1883, 1884, 1885, 1886, 1887, 1888, 1889, 1890, 1891, + 1892, 1893, 1894, 1895, 1896, 1897, 1898, 1899, 1900, 1901, + 1902, 1903, 1904, 1905, 1906, 1907, 1908, 1909, 1910, 1911, + 1912, 1913, 1914, 1915, 1916, 1917, 1918, 1919, 1920, 1921, + 1922, 1923, 1924, 1925, 1926, 1927, 1928, 1929, 1930, 1931, + 1932, 1933, 1934, 1935, 1936, 1937, 1938, 1939, 1940, 1941, + 1942, 1943, 1944, 1945, 1946, 1947, 1948, 1949, 1950, 1951, + 1952, 1953, 1954, 1955, 1956, 1957, 1958, 1959, 1960, 1961, + 1962, 1963, 1964, 1965, 1966, 1967, 1968, 1969, 1970, 1971, + 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981, + 1982, 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990, 1991, + 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, + 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, + 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, + 2022, 2023, 2024, 2025, 2026, 2027, 2028, 2029, 2030, 2031, + 2032, 2033, 2034, 2035, 2036, 2037, 2038, 2039, 2040, 2041, + 2042, 2043, 2044, 2045, 2046, 2047, 2048, 2049, 2050, 2051, + 2052, 2053, 2054, 2055, 2056, 2057, 2058, 2059, 2060, 2061, + 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069, 2070, 2071, + 2072, 2073, 2074, 2075, 2076, 2077, 2078, 2079, 2080, 2081, + 2082, 2083, 2084, 2085, 2086, 2087, 2088, 2089, 2090, 2091, + 2092, 2093, 2094, 2095, 2096, 2097, 2098, 2099, 2100, 2101, + 2102, 2103, 2104, 2105, 2106, 2107, 2108, 2109, 2110, 2111, + 2112, 2113, 2114, 2115, 2116, 2117, 2118, 2119, 2120, 2121, + 2122, 2123, 2124, 2125, 2126, 2127, 2128, 2129, 2130, 2131, + 2132, 2133, 2134, 2135, 2136, 2137, 2138, 2139, 2140, 2141, + 2142, 2143, 2144, 2145, 2146, 2147, 2148, 2149, 2150, 2151, + 2152, 2153, 2154, 2155, 2156, 2157, 2158, 2159, 2160, 2161, + 2162, 2163, 2164, 2165, 2166, 2167, 2168, 2169, 2170, 2171, + 2172, 2173, 2174, 2175, 2176, 2177, 2178, 2179, 2180, 2181, + 2182, 2183, 2184, 2185, 2186, 2187, 2188, 2189, 2190, 2191, + 2192, 2193, 2194, 2195, 2196, 2197, 2198, 2199, 2200, 2201, + 2202, 2203, 2204, 2205, 2206, 2207, 2208, 2209, 2210, 2211, + 2212, 2213, 2214, 2215, 2216, 2217, 2218, 2219, 2220, 2221, + 2222, 2223, 2224, 2225, 2226, 2227, 2228, 2229, 2230, 2231, + 2232, 2233, 2234, 2235, 2236, 2237, 2238, 2239, 2240, 2241, + 2242, 2243, 2244, 2245, 2246, 2247, 2248, 2249, 2250, 2251, + 2252, 2253, 0, 1588, 0, 719, 983, 0, 876, 877, + 0, 784, 784, 0, 784, 784, 784, 784, 0, 0, + 0, 733, 0, 0, 0, 0, 781, 0, 749, 750, + 0, 781, 0, 756, 787, 0, 0, 762, 784, 784, + 765, 2257, 0, 2257, 2257, 1579, 0, 778, 776, 790, + 791, 43, 794, 797, 798, 799, 800, 803, 0, 814, + 817, 1605, 1606, 0, 819, 826, 843, 844, 0, 48, + 1133, 0, 1005, 0, 1011, -2, 1022, 1039, 1040, 1041, + 1042, 1043, 1045, 1046, 1047, 0, 0, 0, 0, 1052, + 1053, 0, 0, 0, 0, 0, 1114, 0, 0, 0, + 0, 1450, 0, 0, 1412, 1412, 1148, 1412, 1412, 1414, + 1414, 1414, 1842, 1980, 1988, 2164, 1803, 1809, 1810, 1811, + 2110, 2111, 2112, 2113, 2201, 2202, 2206, 1904, 1798, 2177, + 2178, 0, 2253, 1941, 1949, 1950, 1974, 2074, 2187, 1821, + 1969, 2038, 1901, 1923, 1924, 2056, 2057, 1945, 1946, 1927, + 2116, 2118, 2134, 2135, 2120, 2122, 2131, 2137, 2142, 2121, + 2133, 2138, 2151, 2155, 2158, 2159, 2160, 2128, 2126, 2139, + 2143, 2145, 2147, 2153, 2156, 2129, 2127, 2140, 2144, 2146, + 2148, 2154, 2157, 2115, 2119, 2123, 2132, 2150, 2130, 2149, + 2124, 2136, 2141, 2152, 2125, 2117, 1939, 1942, 1930, 1931, + 1933, 1935, 1940, 1947, 1953, 1932, 1952, 1951, 0, 1928, + 1929, 1934, 1944, 1948, 1936, 1937, 1938, 1943, 1954, 1994, + 1993, 1992, 2037, 1965, 2036, 0, 0, 0, 0, 0, + 1793, 1847, 1848, 2161, 1334, 1335, 1336, 1337, 0, 0, + 0, 0, 0, 0, 0, 294, 295, 1463, 1464, 47, + 1132, 1575, 1414, 1414, 1414, 1414, 1414, 1414, 1074, 1075, + 1076, 1077, 1078, 1102, 1103, 1109, 1110, 2051, 2052, 2053, + 2054, 1885, 2196, 1893, 1894, 2033, 2034, 1906, 1907, 2227, + 2228, -2, -2, -2, 235, 236, 237, 238, 239, 240, + 241, 242, 0, 1846, 2175, 2176, 231, 0, 0, 299, + 300, 296, 297, 298, 1116, 1117, 252, 253, 254, 255, + 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, + 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, + 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, + 286, 287, 288, 289, 290, 291, 292, 293, 0, 853, + 0, 0, 0, 0, 0, 0, 1638, 1639, 1486, 0, + 1478, 1477, 66, 0, 880, -2, 0, 0, 0, 0, + 50, 0, 55, 940, 883, 80, 79, 1526, 0, 0, + 0, 62, 1487, 70, 72, 1488, 0, 885, 886, 0, + 916, 920, 0, 0, 0, 1595, 49, 0, 1616, 1594, + 1594, 105, 0, 0, 106, 126, 127, 128, 0, 0, + 112, 113, 1581, 1582, 46, 0, 0, 180, 181, 0, + 44, 429, 0, 176, 0, 422, 361, 0, 1504, 0, + 0, 0, 0, 0, 880, 0, 1589, 157, 158, 165, + 166, 167, 402, 402, 402, 576, 0, 0, 168, 168, + 534, 535, 536, 0, 0, -2, 427, 0, 514, 0, + 0, 416, 416, 420, 418, 419, 0, 0, 0, 0, + 0, 0, 0, 0, 553, 0, 554, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 667, 0, 403, 0, + 574, 575, 465, 0, 0, 0, 0, 0, 0, 0, + 0, 1597, 1598, 0, 551, 552, 0, 0, 0, 402, + 402, 0, 0, 0, 0, 402, 402, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 156, 1517, 0, 0, 0, + -2, 0, 711, 0, 0, 0, 1590, 1590, 0, 718, + 0, 0, 0, 723, 0, 0, 724, 0, 781, 781, + 779, 780, 726, 727, 728, 729, 784, 0, 0, 411, + 412, 413, 781, 784, 0, 784, 784, 784, 784, 781, + 781, 781, 784, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 2257, 787, 784, 0, 757, 0, 758, 759, + 760, 763, 764, 766, 2258, 2259, 1607, 1608, 1640, 1641, + 1642, 1643, 1644, 1645, 1646, 1647, 1648, 1649, 1650, 1651, + 1652, 1653, 1654, 1655, 1656, 1657, 1658, 1659, 1660, 1661, + 1662, 1663, 1664, 1665, 1666, 1667, 1668, 1669, 1670, 1671, + 1672, 1673, 1674, 1675, 1676, 1677, 1678, 1679, 1680, 1681, + 1682, 1683, 1684, 1685, 1686, 1687, 1688, 1689, 1690, 1691, + 1692, 1693, 1694, 1695, 1696, 1697, 1698, 1699, 1700, 1701, + 1702, 1703, 1704, 1705, 1706, 1707, 1708, 1709, 1710, 1711, + 1712, 1713, 1714, 1715, 1716, 1717, 1718, 1719, 1720, 1721, + 1722, 1723, 1724, 1725, 1726, 1727, 1728, 1729, 1730, 1731, + 1732, 1733, 1734, 1735, 1736, 1737, 1738, 1739, 1740, 1741, + 1742, 1743, 1744, 1745, 1746, 1747, 1748, 1749, 1750, 1751, + 1752, 1753, 1754, 1755, 1756, 1757, 1758, 1759, 1760, 1761, + 1762, 1763, 1764, 1765, 1766, 1767, 1768, 1769, 1770, 1771, + 1772, 1773, 1774, 1775, 1776, 1777, 1778, 1779, 1780, 1781, + 1782, 1783, 1784, 1785, 1786, 1787, 1788, 1789, 2257, 2257, + 770, 774, 1580, 796, 802, 804, 805, 0, 0, 815, + 818, 837, 52, 1892, 825, 52, 827, 828, 829, 855, + 856, 861, 0, 0, 0, 0, 867, 868, 869, 0, + 0, 872, 873, 874, 0, 0, 0, 0, 0, 1003, + 0, 0, 1122, 1123, 1124, 1125, 1126, 1127, 1128, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1023, 1024, 0, + 0, 0, 1048, 1049, 1050, 1051, 1054, 0, 1065, 0, + 1067, 1459, -2, 0, 0, 0, 1059, 1060, 0, 0, + 0, 0, 0, 0, 0, 1451, 0, 0, 1146, 0, + 1147, 1149, 1150, 1151, 0, 1152, 1153, 890, 890, 890, + 890, 890, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 890, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 1600, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 144, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 900, + 0, 0, 900, 900, 0, 0, 223, 224, 225, 226, + 227, 228, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 243, 244, 245, 246, + 247, 248, 301, 249, 250, 251, 1132, 0, 0, 0, + 845, 846, 0, 966, 1600, 0, 0, 896, 0, 1636, + 60, 69, 71, 1486, 64, 1486, 0, 902, 0, 0, + -2, -2, 903, 909, 910, 911, 912, 913, 57, 2255, + 58, 0, 77, 0, 51, 0, 0, 0, 0, 375, + 1529, 0, 0, 1479, 1480, 1483, 0, 917, 1986, 921, + 0, 923, 924, 0, 0, 103, 0, 982, 0, 0, + 0, 0, 114, 0, 116, 117, 0, 0, 0, 386, + 1583, 1584, 1585, -2, 409, 0, 386, 370, 309, 310, + 311, 361, 313, 361, 361, 361, 361, 375, 375, 375, + 375, 344, 345, 346, 347, 348, 0, 0, 330, 361, + 361, 361, 361, 351, 352, 353, 354, 355, 356, 357, + 358, 314, 315, 316, 317, 318, 319, 320, 321, 322, + 363, 363, 363, 363, 363, 367, 367, 0, 45, 0, + 390, 0, 1483, 0, 0, 1517, 1592, 1602, 0, 0, + 0, 1592, 135, 0, 0, 0, 577, 617, 528, 565, + 578, 0, 531, 532, -2, 0, 0, 513, 0, 515, + 0, 410, 0, -2, 0, 420, 0, 416, 420, 417, + 420, 408, 421, 555, 556, 557, 0, 559, 560, 647, + 952, 0, 0, 0, 0, 0, 653, 654, 655, 0, + 657, 658, 659, 660, 661, 662, 663, 664, 665, 666, + 566, 567, 568, 569, 570, 571, 572, 573, 0, 0, + 0, 0, 515, 0, 562, 0, 0, 466, 467, 468, + 0, 0, 471, 472, 473, 474, 0, 0, 477, 478, + 479, 969, 970, 480, 481, 506, 507, 508, 482, 483, + 484, 485, 486, 487, 488, 500, 501, 502, 503, 504, + 505, 489, 490, 491, 492, 493, 494, 497, 0, 150, + 1508, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1590, 0, 0, 0, + 0, 899, 984, 1613, 1614, 720, 0, 0, 785, 786, + 0, 414, 415, 784, 784, 730, 771, 0, 784, 734, + 772, 735, 737, 736, 738, 751, 752, 784, 741, 782, + 783, 742, 743, 744, 745, 746, 747, 748, 767, 753, + 754, 755, 788, 0, 792, 793, 768, 769, 0, 0, + 808, 809, 0, 816, 840, 838, 839, 841, 833, 834, + 835, 836, 0, 842, 0, 0, 858, 99, 863, 864, + 865, 866, 878, 871, 1134, 1000, 1001, 1002, 0, 1004, + 1008, 0, 1118, 1120, 1010, 1006, 1012, 1129, 1130, 1131, + 0, 0, 0, 0, 0, 1016, 1020, 1025, 1026, 1027, + 1028, 1029, 0, 1030, 0, 1033, 1034, 1035, 1036, 1037, + 1038, 1044, 1427, 1428, 1429, 1063, 302, 303, 0, 1064, + 0, 0, 0, 0, 0, 0, 0, 0, 1374, 1375, + 1376, 1377, 1378, 1379, 1380, 1381, 1382, 1383, 1384, 1385, + 1386, 1387, 1388, 1389, 1390, 1391, 1392, 1393, 1133, 0, + 914, 0, 0, 1457, 1454, 0, 0, 0, 1413, 1415, + 0, 0, 0, 891, 892, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1394, 1395, 1396, 1397, 1398, 1399, 1400, 1401, + 1402, 1403, 1404, 1405, 1406, 1407, 1408, 1409, 1410, 1411, + 0, 0, 1430, 0, 0, 0, 0, 0, 1450, 0, + 1069, 1070, 1071, 0, 0, 0, 0, 0, 0, 1192, + 0, 0, 0, 0, 1601, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 145, 146, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1338, 1339, 1340, 1341, + 42, 0, 0, 0, 0, 0, 0, 0, 901, 1461, + 0, -2, -2, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1363, 0, 0, 0, + 0, 0, 0, 1573, 0, 0, 848, 849, 851, 0, + 986, 0, 967, 0, 0, 854, 0, 895, 0, 898, + 63, 65, 907, 908, 0, 925, 904, 59, 54, 0, + 0, 944, 1527, 375, 1549, 0, 384, 384, 381, 1489, + 1490, 0, 1482, 1484, 1485, 82, 922, 918, 0, 998, + 0, 0, 981, 0, 0, 928, 930, 931, 932, 964, + 0, 935, 936, 0, 0, 0, 0, 0, 101, 983, + 107, 0, 115, 0, 0, 120, 121, 108, 109, 110, + 111, 0, 606, -2, 461, 182, 184, 185, 186, 177, + -2, 373, 371, 372, 312, 375, 375, 338, 339, 340, + 341, 342, 343, 0, 0, 331, 332, 333, 334, 323, + 0, 324, 325, 326, 365, 0, 327, 328, 0, 329, + 428, 0, 1491, 391, 392, 394, 402, 0, 397, 398, + 0, 402, 402, 0, 423, 424, 0, 1483, 1508, 0, + 0, 0, 1603, 1602, 1602, 1602, 0, 170, 171, 172, + 173, 174, 175, 642, 0, 0, 618, 640, 641, 168, + 0, 0, 178, 517, 516, 0, 674, 0, 426, 0, + 0, 420, 420, 405, 406, 558, 0, 0, 649, 650, + 651, 652, 0, 0, 0, 544, 455, 0, 545, 546, + 515, 517, 0, 0, 386, 469, 470, 475, 476, 495, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 593, 594, 595, 598, 600, 519, 604, 597, + 599, 601, 519, 605, 1505, 1506, 1507, 0, 0, 712, + 0, 0, 452, 97, 1591, 717, 721, 722, 781, 740, + 773, 781, 732, 739, 761, 806, 807, 812, 820, 821, + 822, 823, 824, 862, 0, 0, 0, 0, 870, 0, + 0, 1009, 1119, 1121, 1013, 0, 1017, 1021, 0, 0, + 0, 0, 0, 1068, 1066, 1461, 0, 0, 0, 1115, + 0, 0, 0, 1137, 1138, 0, 0, 0, 1455, 0, + 0, 1144, 0, 1416, 1154, 0, 0, 0, 0, 0, + 1160, 1161, 1162, 1163, 1164, 1165, 1166, 1167, 1168, 1169, + 1477, 1171, 0, 0, 0, 0, 0, 1176, 1177, 1178, + 1179, 1180, 0, 1182, 0, 1183, 0, 0, 0, 0, + 1190, 1191, 1193, 0, 0, 1196, 1197, 0, 1199, 0, + 1201, 1202, 1203, 1204, 1205, 1206, 0, 1208, 0, 1210, + 1211, 1212, 0, 1214, 0, 1216, 1217, 0, 1219, 0, + 1221, 0, 1224, 0, 1227, 0, 1230, 0, 1233, 0, + 1236, 0, 1239, 0, 1242, 0, 1245, 0, 1248, 0, + 1251, 0, 1254, 0, 1257, 0, 1260, 0, 1263, 0, + 1266, 0, 1269, 1270, 1271, 0, 1273, 0, 1275, 0, + 1278, 1279, 0, 1281, 0, 1284, 0, 1287, 0, 0, + 1288, 0, 0, 0, 1292, 0, 0, 0, 0, 1301, + 1302, 1303, 1304, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1315, 1316, 1317, 1318, 1319, 1320, 0, + 1322, 0, 1097, 0, 0, 1097, 0, 0, 0, 0, + 0, 1135, 900, 0, 1417, 1418, 1419, 1420, 1421, 0, + 0, 0, 0, 0, 0, 1361, 1362, 1364, 0, 0, + 1367, 0, 1369, 0, 1574, 847, 850, 852, 938, 987, + 988, 0, 0, 0, 0, 968, 1599, 893, 894, 897, + 946, 0, 1465, 0, 0, 925, 998, 926, 0, 905, + 56, 941, 0, 1531, 1530, 1543, 1556, 384, 384, 378, + 379, 385, 380, 382, 383, 1481, 0, 1486, 0, 1567, + 0, 0, 1559, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 971, 0, 0, 974, 0, 0, 0, 0, + 965, 936, 0, 937, 0, -2, 0, 0, 95, 96, + 0, 0, 0, 118, 119, 0, 0, 125, 387, 388, + 159, 168, 463, 183, 436, 0, 0, 308, 374, 335, + 336, 337, 0, 359, 0, 0, 0, 0, 457, 131, + 1495, 1494, 402, 402, 393, 0, 396, 0, 0, 0, + 1604, 362, 425, 0, 149, 0, 0, 0, 0, 0, + 155, 612, 0, 0, 619, 0, 0, 0, 526, 0, + 537, 538, 0, 646, -2, 708, 390, 0, 404, 407, + 953, 0, 0, 539, 0, 542, 543, 456, 517, 548, + 549, 563, 550, 498, 499, 496, 0, 0, 1518, 1519, + 1524, 1522, 1523, 136, 584, 586, 590, 585, 589, 0, + 0, 0, 521, 0, 521, 582, 0, 452, 1491, 0, + 716, 453, 454, 784, 784, 857, 100, 0, 860, 0, + 0, 0, 0, 1014, 1018, 1031, 1032, 1422, 1448, 361, + 361, 1435, 361, 367, 1438, 361, 1440, 361, 1443, 361, + 1446, 1447, 0, 0, 1061, 0, 915, 0, 0, 1143, + 1458, 0, 0, 1155, 1156, 1157, 1158, 1159, 1452, 0, + 0, 0, 1175, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 147, 148, 0, 0, 0, 0, 0, + 0, 1372, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1092, 1096, 0, 1098, 1099, 0, 0, + 1324, 0, 0, 1342, 0, 0, 0, 0, 0, 0, + 0, 1462, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 989, 994, 994, 994, 0, 0, 0, 1586, + 1587, 1466, 1467, 998, 1468, 927, 906, 945, 1549, 0, + 1542, 0, -2, 1551, 0, 0, 0, 1557, 376, 377, + 919, 83, 999, 86, 0, 1567, 1576, 0, 1558, 1569, + 1571, 0, 0, 0, 1563, 0, 0, 998, 929, 960, + 962, 0, 957, 972, 973, 975, 0, 977, 0, 979, + 980, 940, 934, 0, 103, 0, 998, 998, 102, 0, + 985, 122, 123, 124, 462, 187, 192, 0, 0, 0, + 197, 0, 199, 0, 0, 0, 204, 205, 402, 402, + 437, 0, 305, 307, 0, 0, 190, 375, 0, 375, + 0, 366, 368, 0, 438, 458, 1492, 1493, 0, 0, + 395, 399, 400, 401, 0, 1593, 151, 0, 0, 0, + 615, 0, 643, 0, 0, 0, 0, 0, 0, 179, + 518, 675, 676, 677, 678, 679, 680, 681, 682, 683, + 0, 402, 0, 0, 0, 402, 402, 402, 0, 700, + 389, 0, 0, 671, 668, 540, 0, 221, 222, 229, + 230, 232, 0, 0, 0, 0, 0, 547, 940, 1509, + 1510, 1511, 0, 1521, 1525, 139, 0, 0, 0, 0, + 592, 596, 602, 0, 520, 603, 713, 714, 715, 98, + 725, 731, 859, 879, 1007, 1015, 1019, 0, 0, 0, + 0, 1449, 1433, 375, 1436, 1437, 1439, 1441, 1442, 1444, + 1445, 1057, 1058, 1062, 0, 1140, 0, 1142, 1456, 0, + 1486, 0, 0, 0, 1174, 0, 0, 0, 1185, 1184, + 1186, 0, 1188, 1189, 1194, 1195, 1198, 1200, 1207, 1209, + 1213, 1215, 1218, 1220, 1222, 0, 1225, 0, 1228, 0, + 1231, 0, 1234, 0, 1237, 0, 1240, 0, 1243, 0, + 1246, 0, 1249, 0, 1252, 0, 1255, 0, 1258, 0, + 1261, 0, 1264, 0, 1267, 0, 1272, 1274, 0, 1277, + 1280, 1282, 0, 1285, 0, 1289, 0, 1291, 1293, 1294, + 0, 0, 0, 1305, 1306, 1307, 1308, 1309, 1310, 1311, + 1312, 1313, 1314, 1321, 0, 1090, 1093, 1323, 1100, 1101, + 1106, 1326, 0, 0, 0, 1329, 0, 0, 0, 1333, + 1136, 1344, 0, 1349, 0, 0, 1355, 0, 1359, 0, + 1365, 1366, 1368, 1370, 0, 0, 0, 0, 0, 966, + 947, 67, 1468, 1470, 0, 1536, 1534, 1534, 1544, 1545, + 0, 0, 1552, 0, 0, 0, 0, 87, 0, 0, + 0, 1572, 0, 0, 0, 0, 104, 1619, 1477, 954, + 961, 0, 0, 955, 0, 956, 976, 978, 933, 0, + 998, 998, 93, 94, 0, 193, 0, 195, 0, 198, + 200, 201, 202, 208, 209, 210, 203, 0, 0, 304, + 306, 0, 0, 349, 360, 350, 0, 0, 1496, 1497, + 1498, 1499, 1500, 1501, 1502, 1503, 940, 152, 153, 154, + 607, 0, 617, 0, 942, 0, 610, 0, 529, 0, + 0, 0, 402, 402, 402, 0, 0, 0, 0, 685, + 0, 0, 648, 0, 656, 0, 0, 0, 233, 234, + 0, 1520, 583, 0, 137, 138, 0, 0, 588, 522, + 523, 1055, 0, 0, 0, 1056, 1434, 0, 0, 0, + 0, 1453, 0, 0, 0, 0, 1181, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1297, + 0, 0, 0, 637, 638, 0, 1373, 1095, 1477, 0, + 1097, 1107, 1108, 0, 1097, 1343, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 995, 0, 0, + 0, 0, 986, 1470, 1475, 0, 0, 1539, 0, 1532, + 1535, 1533, 1546, 0, 0, 1553, 0, 1555, 0, 1577, + 1578, 1570, 0, 1562, 1565, 1561, 1564, 1621, 1623, 1633, + 1634, 1486, 958, 0, 963, 0, 1477, 92, 0, 196, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 206, + 207, 0, 0, 364, 369, 0, 0, 0, 608, 0, + 943, 620, 611, 0, 698, 0, 702, 0, 0, 0, + 705, 706, 707, 684, 0, 688, 430, 672, 669, 670, + 541, 0, 140, 141, 0, 0, 0, 1423, 0, 1426, + 1139, 1141, 0, 1170, 1172, 1173, 1431, 1432, 1187, 1223, + 1226, 1229, 1232, 1235, 1238, 1241, 1244, 1247, 1250, 1253, + 1256, 1259, 1262, 1265, 1268, 1276, 1283, 1286, 1290, 1295, + 0, 1298, 0, 0, 1299, 0, 639, 1086, 0, 0, + 1104, 1105, 0, 1328, 1330, 1331, 1332, 1345, 0, 1350, + 1351, 0, 1356, 0, 1360, 1371, 0, 991, 948, 949, + 996, 997, 0, 0, 939, 1475, 85, 1476, 1473, 0, + 1471, 1469, 1528, 0, 1537, 1538, 1547, 1548, 1554, 0, + 1560, 0, 1617, 1629, 1625, 0, 90, 0, 0, 0, + 1486, 194, 0, 213, 0, 616, 0, 619, 609, 696, + 697, 0, 709, 701, 703, 704, 686, -2, 1512, 0, + 0, 0, 591, 1424, 0, 0, 1300, 0, 635, 636, + 1094, 1087, 0, 1072, 1073, 1091, 1325, 1327, 0, 0, + 0, 0, 990, 992, 993, 84, 0, 1472, 1112, 0, + 1540, 1541, 1568, 1566, 1635, 0, 1631, 0, 1627, 0, + 0, 959, 966, 0, 91, 443, 436, 1512, 0, 0, + 0, 689, 690, 691, 692, 693, 694, 695, 580, 1514, + 142, 143, 0, 510, 511, 512, 136, 0, 1145, 1296, + 1088, 0, 0, 0, 0, 0, 1346, 0, 1352, 0, + 1357, 0, 950, 951, 1474, 0, 0, 0, 1622, 0, + 0, 1620, 0, 0, 1624, 621, 0, 623, 0, -2, + 431, 444, 0, 188, 214, 215, 0, 0, 218, 219, + 220, 211, 212, 132, 0, 0, 710, 0, 1515, 1516, + 0, 139, 0, 0, 1079, 1080, 1081, 1082, 1084, 0, + 0, 0, 0, 1113, 1092, 1618, 0, 1630, 0, 1626, + 622, 0, 0, 386, 0, 632, 432, 433, 0, 439, + 440, 441, 442, 216, 217, 644, 0, 0, 509, 587, + 1425, 0, 0, 1347, 0, 1353, 0, 1358, 0, 1632, + 1628, 624, 625, 633, 0, 434, 0, 435, 0, 0, + 0, 613, 0, 644, 1513, 1089, 1083, 1085, 0, 0, + 1111, 0, 634, 630, 445, 447, 448, 0, 0, 446, + 645, 614, 1348, 1354, 0, 449, 450, 451, 626, 627, + 628, 629, } var yyTok1 = [...]int{ 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 144, 3, 3, 3, 172, 164, 3, - 87, 89, 169, 167, 88, 168, 222, 170, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 722, - 152, 151, 153, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 145, 3, 3, 3, 173, 165, 3, + 87, 89, 170, 168, 88, 169, 223, 171, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 733, + 153, 152, 154, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 174, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 175, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 140, 3, 175, + 3, 3, 3, 3, 141, 3, 176, } var yyTok2 = [...]int{ @@ -9592,14 +9717,14 @@ var yyTok2 = [...]int{ 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, - 135, 136, 137, 138, 139, 141, 142, 143, 145, 146, - 147, 148, 149, 150, 154, 155, 156, 157, 158, 159, - 160, 161, 162, 163, 165, 166, 171, 173, 176, 177, + 135, 136, 137, 138, 139, 140, 142, 143, 144, 146, + 147, 148, 149, 150, 151, 155, 156, 157, 158, 159, + 160, 161, 162, 163, 164, 166, 167, 172, 174, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, - 218, 219, 220, 221, 223, 224, 225, 226, 227, 228, + 218, 219, 220, 221, 222, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, @@ -9697,7 +9822,9 @@ var yyTok3 = [...]int{ 58030, 705, 58031, 706, 58032, 707, 58033, 708, 58034, 709, 58035, 710, 58036, 711, 58037, 712, 58038, 713, 58039, 714, 58040, 715, 58041, 716, 58042, 717, 58043, 718, 58044, 719, - 58045, 720, 58046, 721, 0, + 58045, 720, 58046, 721, 58047, 722, 58048, 723, 58049, 724, + 58050, 725, 58051, 726, 58052, 727, 58053, 728, 58054, 729, + 58055, 730, 58056, 731, 58057, 732, 0, } var yyErrorMessages = [...]struct { @@ -10047,7 +10174,7 @@ yydefault: case 1: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:604 +//line sql.y:605 { stmt := yyDollar[2].statementUnion() // If the statement is empty and we have comments @@ -10061,199 +10188,199 @@ yydefault: } case 2: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:617 +//line sql.y:618 { } case 3: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:618 +//line sql.y:619 { } case 4: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Statement -//line sql.y:622 +//line sql.y:623 { yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 40: + case 41: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:661 +//line sql.y:663 { setParseTree(yylex, nil) } - case 41: + case 42: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *Variable -//line sql.y:667 +//line sql.y:669 { yyLOCAL = NewVariableExpression(yyDollar[1].str, SingleAt) } yyVAL.union = yyLOCAL - case 42: + case 43: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:673 +//line sql.y:675 { yyVAL.identifierCI = NewIdentifierCI(string(yyDollar[1].str)) } - case 43: + case 44: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:678 +//line sql.y:680 { yyVAL.identifierCI = NewIdentifierCI("") } - case 44: + case 45: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:682 +//line sql.y:684 { yyVAL.identifierCI = yyDollar[1].identifierCI } - case 45: + case 46: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *Variable -//line sql.y:688 +//line sql.y:690 { yyLOCAL = NewVariableExpression(string(yyDollar[1].str), SingleAt) } yyVAL.union = yyLOCAL - case 46: + case 47: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *Variable -//line sql.y:692 +//line sql.y:694 { yyLOCAL = NewVariableExpression(string(yyDollar[1].str), DoubleAt) } yyVAL.union = yyLOCAL - case 47: + case 48: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:698 +//line sql.y:700 { yyLOCAL = &OtherAdmin{} } yyVAL.union = yyLOCAL - case 48: + case 49: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:704 +//line sql.y:706 { yyLOCAL = &Load{} } yyVAL.union = yyLOCAL - case 49: + case 50: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *With -//line sql.y:710 +//line sql.y:712 { yyLOCAL = &With{ctes: yyDollar[2].ctesUnion(), Recursive: false} } yyVAL.union = yyLOCAL - case 50: + case 51: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *With -//line sql.y:714 +//line sql.y:716 { yyLOCAL = &With{ctes: yyDollar[3].ctesUnion(), Recursive: true} } yyVAL.union = yyLOCAL - case 51: + case 52: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *With -//line sql.y:719 +//line sql.y:721 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 52: + case 53: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *With -//line sql.y:723 +//line sql.y:725 { yyLOCAL = yyDollar[1].withUnion() } yyVAL.union = yyLOCAL - case 53: + case 54: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:729 +//line sql.y:731 { yySLICE := (*[]*CommonTableExpr)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].cteUnion()) } - case 54: + case 55: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*CommonTableExpr -//line sql.y:733 +//line sql.y:735 { yyLOCAL = []*CommonTableExpr{yyDollar[1].cteUnion()} } yyVAL.union = yyLOCAL - case 55: + case 56: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *CommonTableExpr -//line sql.y:739 +//line sql.y:741 { yyLOCAL = &CommonTableExpr{ID: yyDollar[1].identifierCS, Columns: yyDollar[2].columnsUnion(), Subquery: yyDollar[4].subqueryUnion()} } yyVAL.union = yyLOCAL - case 56: + case 57: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:745 +//line sql.y:747 { yyLOCAL = yyDollar[2].selStmtUnion() } yyVAL.union = yyLOCAL - case 57: + case 58: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:749 +//line sql.y:751 { yyLOCAL = yyDollar[2].selStmtUnion() } yyVAL.union = yyLOCAL - case 58: + case 59: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:753 +//line sql.y:755 { setLockInSelect(yyDollar[2].selStmtUnion(), yyDollar[3].lockUnion()) yyLOCAL = yyDollar[2].selStmtUnion() } yyVAL.union = yyLOCAL - case 59: + case 60: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:776 +//line sql.y:778 { yyDollar[1].selStmtUnion().SetOrderBy(yyDollar[2].orderByUnion()) yyDollar[1].selStmtUnion().SetLimit(yyDollar[3].limitUnion()) yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 60: + case 61: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:782 +//line sql.y:784 { yyDollar[1].selStmtUnion().SetLimit(yyDollar[2].limitUnion()) yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 61: + case 62: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:787 +//line sql.y:789 { yyDollar[1].selStmtUnion().SetOrderBy(yyDollar[2].orderByUnion()) yyDollar[1].selStmtUnion().SetLimit(yyDollar[3].limitUnion()) yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 62: + case 63: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:793 +//line sql.y:795 { yyDollar[2].selStmtUnion().SetWith(yyDollar[1].withUnion()) yyDollar[2].selStmtUnion().SetOrderBy(yyDollar[3].orderByUnion()) @@ -10261,20 +10388,20 @@ yydefault: yyLOCAL = yyDollar[2].selStmtUnion() } yyVAL.union = yyLOCAL - case 63: + case 64: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:800 +//line sql.y:802 { yyDollar[2].selStmtUnion().SetWith(yyDollar[1].withUnion()) yyDollar[2].selStmtUnion().SetLimit(yyDollar[3].limitUnion()) yyLOCAL = yyDollar[2].selStmtUnion() } yyVAL.union = yyLOCAL - case 64: + case 65: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:806 +//line sql.y:808 { yyDollar[2].selStmtUnion().SetWith(yyDollar[1].withUnion()) yyDollar[2].selStmtUnion().SetOrderBy(yyDollar[3].orderByUnion()) @@ -10282,175 +10409,175 @@ yydefault: yyLOCAL = yyDollar[2].selStmtUnion() } yyVAL.union = yyLOCAL - case 65: + case 66: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:813 +//line sql.y:815 { yyDollar[2].selStmtUnion().SetWith(yyDollar[1].withUnion()) } - case 66: + case 67: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:817 +//line sql.y:819 { yyLOCAL = NewSelect(Comments(yyDollar[2].strs), SelectExprs{&Nextval{Expr: yyDollar[5].exprUnion()}}, []string{yyDollar[3].str} /*options*/, nil, TableExprs{&AliasedTableExpr{Expr: yyDollar[7].tableName}}, nil /*where*/, nil /*groupBy*/, nil /*having*/, nil) } yyVAL.union = yyLOCAL - case 67: + case 68: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:823 +//line sql.y:825 { yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 68: + case 69: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:827 +//line sql.y:829 { yyLOCAL = &Union{Left: yyDollar[1].selStmtUnion(), Distinct: yyDollar[2].booleanUnion(), Right: yyDollar[3].selStmtUnion()} } yyVAL.union = yyLOCAL - case 69: + case 70: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:831 +//line sql.y:833 { yyLOCAL = &Union{Left: yyDollar[1].selStmtUnion(), Distinct: yyDollar[2].booleanUnion(), Right: yyDollar[3].selStmtUnion()} } yyVAL.union = yyLOCAL - case 70: + case 71: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:835 +//line sql.y:837 { yyLOCAL = &Union{Left: yyDollar[1].selStmtUnion(), Distinct: yyDollar[2].booleanUnion(), Right: yyDollar[3].selStmtUnion()} } yyVAL.union = yyLOCAL - case 71: + case 72: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:839 +//line sql.y:841 { yyLOCAL = &Union{Left: yyDollar[1].selStmtUnion(), Distinct: yyDollar[2].booleanUnion(), Right: yyDollar[3].selStmtUnion()} } yyVAL.union = yyLOCAL - case 72: + case 73: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:845 +//line sql.y:847 { yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 73: + case 74: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:849 +//line sql.y:851 { setLockInSelect(yyDollar[1].selStmtUnion(), yyDollar[2].lockUnion()) yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 74: + case 75: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:854 +//line sql.y:856 { yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 75: + case 76: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:858 +//line sql.y:860 { yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 76: + case 77: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:864 +//line sql.y:866 { yyLOCAL = yyDollar[2].selStmtUnion() } yyVAL.union = yyLOCAL - case 77: + case 78: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:868 +//line sql.y:870 { yyDollar[1].selStmtUnion().SetInto(yyDollar[2].selectIntoUnion()) yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 78: + case 79: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:873 +//line sql.y:875 { yyDollar[1].selStmtUnion().SetInto(yyDollar[2].selectIntoUnion()) yyDollar[1].selStmtUnion().SetLock(yyDollar[3].lockUnion()) yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 79: + case 80: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:879 +//line sql.y:881 { yyDollar[1].selStmtUnion().SetInto(yyDollar[3].selectIntoUnion()) yyDollar[1].selStmtUnion().SetLock(yyDollar[2].lockUnion()) yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 80: + case 81: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:885 +//line sql.y:887 { yyDollar[1].selStmtUnion().SetInto(yyDollar[2].selectIntoUnion()) yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 81: + case 82: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:892 +//line sql.y:894 { yyLOCAL = &Stream{Comments: Comments(yyDollar[2].strs).Parsed(), SelectExpr: yyDollar[3].selectExprUnion(), Table: yyDollar[5].tableName} } yyVAL.union = yyLOCAL - case 82: + case 83: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Statement -//line sql.y:898 +//line sql.y:900 { yyLOCAL = &VStream{Comments: Comments(yyDollar[2].strs).Parsed(), SelectExpr: yyDollar[3].selectExprUnion(), Table: yyDollar[5].tableName, Where: NewWhere(WhereClause, yyDollar[6].exprUnion()), Limit: yyDollar[7].limitUnion()} } yyVAL.union = yyLOCAL - case 83: + case 84: yyDollar = yyS[yypt-10 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:906 +//line sql.y:908 { yyLOCAL = NewSelect(Comments(yyDollar[2].strs), yyDollar[4].selectExprsUnion() /*SelectExprs*/, yyDollar[3].strs /*options*/, yyDollar[5].selectIntoUnion() /*into*/, yyDollar[6].tableExprsUnion() /*from*/, NewWhere(WhereClause, yyDollar[7].exprUnion()), GroupBy(yyDollar[8].exprsUnion()), NewWhere(HavingClause, yyDollar[9].exprUnion()), yyDollar[10].namedWindowsUnion()) } yyVAL.union = yyLOCAL - case 84: + case 85: yyDollar = yyS[yypt-9 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:910 +//line sql.y:912 { yyLOCAL = NewSelect(Comments(yyDollar[2].strs), yyDollar[4].selectExprsUnion() /*SelectExprs*/, yyDollar[3].strs /*options*/, nil, yyDollar[5].tableExprsUnion() /*from*/, NewWhere(WhereClause, yyDollar[6].exprUnion()), GroupBy(yyDollar[7].exprsUnion()), NewWhere(HavingClause, yyDollar[8].exprUnion()), yyDollar[9].namedWindowsUnion()) } yyVAL.union = yyLOCAL - case 85: + case 86: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Statement -//line sql.y:916 +//line sql.y:918 { // insert_data returns a *Insert pre-filled with Columns & Values ins := yyDollar[6].insUnion() @@ -10463,10 +10590,10 @@ yydefault: yyLOCAL = ins } yyVAL.union = yyLOCAL - case 86: + case 87: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Statement -//line sql.y:928 +//line sql.y:930 { cols := make(Columns, 0, len(yyDollar[7].updateExprsUnion())) vals := make(ValTuple, 0, len(yyDollar[8].updateExprsUnion())) @@ -10477,329 +10604,329 @@ yydefault: yyLOCAL = &Insert{Action: yyDollar[1].insertActionUnion(), Comments: Comments(yyDollar[2].strs).Parsed(), Ignore: yyDollar[3].ignoreUnion(), Table: getAliasedTableExprFromTableName(yyDollar[4].tableName), Partitions: yyDollar[5].partitionsUnion(), Columns: cols, Rows: Values{vals}, OnDup: OnDup(yyDollar[8].updateExprsUnion())} } yyVAL.union = yyLOCAL - case 87: + case 88: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL InsertAction -//line sql.y:940 +//line sql.y:942 { yyLOCAL = InsertAct } yyVAL.union = yyLOCAL - case 88: + case 89: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL InsertAction -//line sql.y:944 +//line sql.y:946 { yyLOCAL = ReplaceAct } yyVAL.union = yyLOCAL - case 89: + case 90: yyDollar = yyS[yypt-10 : yypt+1] var yyLOCAL Statement -//line sql.y:950 +//line sql.y:952 { yyLOCAL = &Update{With: yyDollar[1].withUnion(), Comments: Comments(yyDollar[3].strs).Parsed(), Ignore: yyDollar[4].ignoreUnion(), TableExprs: yyDollar[5].tableExprsUnion(), Exprs: yyDollar[7].updateExprsUnion(), Where: NewWhere(WhereClause, yyDollar[8].exprUnion()), OrderBy: yyDollar[9].orderByUnion(), Limit: yyDollar[10].limitUnion()} } yyVAL.union = yyLOCAL - case 90: + case 91: yyDollar = yyS[yypt-11 : yypt+1] var yyLOCAL Statement -//line sql.y:956 +//line sql.y:958 { yyLOCAL = &Delete{With: yyDollar[1].withUnion(), Comments: Comments(yyDollar[3].strs).Parsed(), Ignore: yyDollar[4].ignoreUnion(), TableExprs: TableExprs{&AliasedTableExpr{Expr: yyDollar[6].tableName, As: yyDollar[7].identifierCS}}, Partitions: yyDollar[8].partitionsUnion(), Where: NewWhere(WhereClause, yyDollar[9].exprUnion()), OrderBy: yyDollar[10].orderByUnion(), Limit: yyDollar[11].limitUnion()} } yyVAL.union = yyLOCAL - case 91: + case 92: yyDollar = yyS[yypt-9 : yypt+1] var yyLOCAL Statement -//line sql.y:960 +//line sql.y:962 { yyLOCAL = &Delete{With: yyDollar[1].withUnion(), Comments: Comments(yyDollar[3].strs).Parsed(), Ignore: yyDollar[4].ignoreUnion(), Targets: yyDollar[6].tableNamesUnion(), TableExprs: yyDollar[8].tableExprsUnion(), Where: NewWhere(WhereClause, yyDollar[9].exprUnion())} } yyVAL.union = yyLOCAL - case 92: + case 93: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Statement -//line sql.y:964 +//line sql.y:966 { yyLOCAL = &Delete{With: yyDollar[1].withUnion(), Comments: Comments(yyDollar[3].strs).Parsed(), Ignore: yyDollar[4].ignoreUnion(), Targets: yyDollar[5].tableNamesUnion(), TableExprs: yyDollar[7].tableExprsUnion(), Where: NewWhere(WhereClause, yyDollar[8].exprUnion())} } yyVAL.union = yyLOCAL - case 93: + case 94: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Statement -//line sql.y:968 +//line sql.y:970 { yyLOCAL = &Delete{With: yyDollar[1].withUnion(), Comments: Comments(yyDollar[3].strs).Parsed(), Ignore: yyDollar[4].ignoreUnion(), Targets: yyDollar[5].tableNamesUnion(), TableExprs: yyDollar[7].tableExprsUnion(), Where: NewWhere(WhereClause, yyDollar[8].exprUnion())} } yyVAL.union = yyLOCAL - case 94: + case 95: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:973 +//line sql.y:975 { } - case 95: + case 96: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:974 +//line sql.y:976 { } - case 96: + case 97: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TableNames -//line sql.y:978 +//line sql.y:980 { yyLOCAL = TableNames{yyDollar[1].tableName} } yyVAL.union = yyLOCAL - case 97: + case 98: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:982 +//line sql.y:984 { yySLICE := (*TableNames)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].tableName) } - case 98: + case 99: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TableNames -//line sql.y:988 +//line sql.y:990 { yyLOCAL = TableNames{yyDollar[1].tableName} } yyVAL.union = yyLOCAL - case 99: + case 100: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:992 +//line sql.y:994 { yySLICE := (*TableNames)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].tableName) } - case 100: + case 101: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TableNames -//line sql.y:998 +//line sql.y:1000 { yyLOCAL = TableNames{yyDollar[1].tableName} } yyVAL.union = yyLOCAL - case 101: + case 102: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1002 +//line sql.y:1004 { yySLICE := (*TableNames)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].tableName) } - case 102: + case 103: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL Partitions -//line sql.y:1007 +//line sql.y:1009 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 103: + case 104: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Partitions -//line sql.y:1011 +//line sql.y:1013 { yyLOCAL = yyDollar[3].partitionsUnion() } yyVAL.union = yyLOCAL - case 104: + case 105: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:1017 +//line sql.y:1019 { yyLOCAL = NewSetStatement(Comments(yyDollar[2].strs).Parsed(), yyDollar[3].setExprsUnion()) } yyVAL.union = yyLOCAL - case 105: + case 106: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL SetExprs -//line sql.y:1023 +//line sql.y:1025 { yyLOCAL = SetExprs{yyDollar[1].setExprUnion()} } yyVAL.union = yyLOCAL - case 106: + case 107: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1027 +//line sql.y:1029 { yySLICE := (*SetExprs)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].setExprUnion()) } - case 107: + case 108: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *SetExpr -//line sql.y:1033 +//line sql.y:1035 { yyLOCAL = &SetExpr{Var: yyDollar[1].variableUnion(), Expr: NewStrLiteral("on")} } yyVAL.union = yyLOCAL - case 108: + case 109: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *SetExpr -//line sql.y:1037 +//line sql.y:1039 { yyLOCAL = &SetExpr{Var: yyDollar[1].variableUnion(), Expr: NewStrLiteral("off")} } yyVAL.union = yyLOCAL - case 109: + case 110: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *SetExpr -//line sql.y:1041 +//line sql.y:1043 { yyLOCAL = &SetExpr{Var: yyDollar[1].variableUnion(), Expr: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 110: + case 111: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *SetExpr -//line sql.y:1045 +//line sql.y:1047 { yyLOCAL = &SetExpr{Var: NewSetVariable(string(yyDollar[1].str), SessionScope), Expr: yyDollar[2].exprUnion()} } yyVAL.union = yyLOCAL - case 111: + case 112: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *Variable -//line sql.y:1051 +//line sql.y:1053 { yyLOCAL = NewSetVariable(string(yyDollar[1].str), SessionScope) } yyVAL.union = yyLOCAL - case 112: + case 113: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *Variable -//line sql.y:1055 +//line sql.y:1057 { yyLOCAL = yyDollar[1].variableUnion() } yyVAL.union = yyLOCAL - case 113: + case 114: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *Variable -//line sql.y:1059 +//line sql.y:1061 { yyLOCAL = NewSetVariable(string(yyDollar[2].str), yyDollar[1].scopeUnion()) } yyVAL.union = yyLOCAL - case 114: + case 115: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:1065 +//line sql.y:1067 { yyLOCAL = NewSetStatement(Comments(yyDollar[2].strs).Parsed(), UpdateSetExprsScope(yyDollar[5].setExprsUnion(), yyDollar[3].scopeUnion())) } yyVAL.union = yyLOCAL - case 115: + case 116: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:1069 +//line sql.y:1071 { yyLOCAL = NewSetStatement(Comments(yyDollar[2].strs).Parsed(), yyDollar[4].setExprsUnion()) } yyVAL.union = yyLOCAL - case 116: + case 117: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL SetExprs -//line sql.y:1075 +//line sql.y:1077 { yyLOCAL = SetExprs{yyDollar[1].setExprUnion()} } yyVAL.union = yyLOCAL - case 117: + case 118: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1079 +//line sql.y:1081 { yySLICE := (*SetExprs)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].setExprUnion()) } - case 118: + case 119: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *SetExpr -//line sql.y:1085 +//line sql.y:1087 { yyLOCAL = &SetExpr{Var: NewSetVariable(TransactionIsolationStr, NextTxScope), Expr: NewStrLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 119: + case 120: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *SetExpr -//line sql.y:1089 +//line sql.y:1091 { yyLOCAL = &SetExpr{Var: NewSetVariable(TransactionReadOnlyStr, NextTxScope), Expr: NewStrLiteral("off")} } yyVAL.union = yyLOCAL - case 120: + case 121: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *SetExpr -//line sql.y:1093 +//line sql.y:1095 { yyLOCAL = &SetExpr{Var: NewSetVariable(TransactionReadOnlyStr, NextTxScope), Expr: NewStrLiteral("on")} } yyVAL.union = yyLOCAL - case 121: + case 122: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1099 +//line sql.y:1101 { yyVAL.str = RepeatableReadStr } - case 122: + case 123: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1103 +//line sql.y:1105 { yyVAL.str = ReadCommittedStr } - case 123: + case 124: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1107 +//line sql.y:1109 { yyVAL.str = ReadUncommittedStr } - case 124: + case 125: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1111 +//line sql.y:1113 { yyVAL.str = SerializableStr } - case 125: + case 126: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Scope -//line sql.y:1117 +//line sql.y:1119 { yyLOCAL = SessionScope } yyVAL.union = yyLOCAL - case 126: + case 127: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Scope -//line sql.y:1121 +//line sql.y:1123 { yyLOCAL = SessionScope } yyVAL.union = yyLOCAL - case 127: + case 128: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Scope -//line sql.y:1125 +//line sql.y:1127 { yyLOCAL = GlobalScope } yyVAL.union = yyLOCAL - case 128: + case 129: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:1131 +//line sql.y:1133 { yyDollar[1].createTableUnion().TableSpec = yyDollar[2].tableSpecUnion() yyDollar[1].createTableUnion().FullyParsed = true yyLOCAL = yyDollar[1].createTableUnion() } yyVAL.union = yyLOCAL - case 129: + case 130: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:1137 +//line sql.y:1139 { // Create table [name] like [name] yyDollar[1].createTableUnion().OptLike = yyDollar[2].optLikeUnion() @@ -10807,10 +10934,10 @@ yydefault: yyLOCAL = yyDollar[1].createTableUnion() } yyVAL.union = yyLOCAL - case 130: + case 131: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Statement -//line sql.y:1144 +//line sql.y:1146 { indexDef := yyDollar[1].alterTableUnion().AlterOptions[0].(*AddIndexDefinition).IndexDefinition indexDef.Columns = yyDollar[3].indexColumnsUnion() @@ -10820,413 +10947,413 @@ yydefault: yyLOCAL = yyDollar[1].alterTableUnion() } yyVAL.union = yyLOCAL - case 131: + case 132: yyDollar = yyS[yypt-12 : yypt+1] var yyLOCAL Statement -//line sql.y:1153 +//line sql.y:1155 { yyLOCAL = &CreateView{ViewName: yyDollar[8].tableName, Comments: Comments(yyDollar[2].strs).Parsed(), IsReplace: yyDollar[3].booleanUnion(), Algorithm: yyDollar[4].str, Definer: yyDollar[5].definerUnion(), Security: yyDollar[6].str, Columns: yyDollar[9].columnsUnion(), Select: yyDollar[11].selStmtUnion(), CheckOption: yyDollar[12].str} } yyVAL.union = yyLOCAL - case 132: + case 133: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:1157 +//line sql.y:1159 { yyDollar[1].createDatabaseUnion().FullyParsed = true yyDollar[1].createDatabaseUnion().CreateOptions = yyDollar[2].databaseOptionsUnion() yyLOCAL = yyDollar[1].createDatabaseUnion() } yyVAL.union = yyLOCAL - case 133: + case 134: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:1164 +//line sql.y:1166 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 134: + case 135: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL bool -//line sql.y:1168 +//line sql.y:1170 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 135: + case 136: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1173 +//line sql.y:1175 { yyVAL.identifierCI = NewIdentifierCI("") } - case 136: + case 137: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1177 +//line sql.y:1179 { yyVAL.identifierCI = yyDollar[2].identifierCI } - case 137: + case 138: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1183 +//line sql.y:1185 { yyVAL.identifierCI = yyDollar[1].identifierCI } - case 138: + case 139: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL []VindexParam -//line sql.y:1188 +//line sql.y:1190 { var v []VindexParam yyLOCAL = v } yyVAL.union = yyLOCAL - case 139: + case 140: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL []VindexParam -//line sql.y:1193 +//line sql.y:1195 { yyLOCAL = yyDollar[2].vindexParamsUnion() } yyVAL.union = yyLOCAL - case 140: + case 141: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []VindexParam -//line sql.y:1199 +//line sql.y:1201 { yyLOCAL = make([]VindexParam, 0, 4) yyLOCAL = append(yyLOCAL, yyDollar[1].vindexParam) } yyVAL.union = yyLOCAL - case 141: + case 142: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1204 +//line sql.y:1206 { yySLICE := (*[]VindexParam)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].vindexParam) } - case 142: + case 143: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1210 +//line sql.y:1212 { yyVAL.vindexParam = VindexParam{Key: yyDollar[1].identifierCI, Val: yyDollar[3].str} } - case 143: + case 144: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL []*JSONObjectParam -//line sql.y:1215 +//line sql.y:1217 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 144: + case 145: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*JSONObjectParam -//line sql.y:1219 +//line sql.y:1221 { yyLOCAL = yyDollar[1].jsonObjectParamsUnion() } yyVAL.union = yyLOCAL - case 145: + case 146: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*JSONObjectParam -//line sql.y:1225 +//line sql.y:1227 { yyLOCAL = []*JSONObjectParam{yyDollar[1].jsonObjectParam} } yyVAL.union = yyLOCAL - case 146: + case 147: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1229 +//line sql.y:1231 { yySLICE := (*[]*JSONObjectParam)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].jsonObjectParam) } - case 147: + case 148: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1235 +//line sql.y:1237 { yyVAL.jsonObjectParam = &JSONObjectParam{Key: yyDollar[1].exprUnion(), Value: yyDollar[3].exprUnion()} } - case 148: + case 149: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL *CreateTable -//line sql.y:1241 +//line sql.y:1243 { yyLOCAL = &CreateTable{Comments: Comments(yyDollar[2].strs).Parsed(), Table: yyDollar[6].tableName, IfNotExists: yyDollar[5].booleanUnion(), Temp: yyDollar[3].booleanUnion()} setDDL(yylex, yyLOCAL) } yyVAL.union = yyLOCAL - case 149: + case 150: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *AlterTable -//line sql.y:1248 +//line sql.y:1250 { yyLOCAL = &AlterTable{Comments: Comments(yyDollar[2].strs).Parsed(), Table: yyDollar[4].tableName} setDDL(yylex, yyLOCAL) } yyVAL.union = yyLOCAL - case 150: + case 151: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL *AlterTable -//line sql.y:1255 +//line sql.y:1257 { yyLOCAL = &AlterTable{Table: yyDollar[7].tableName, AlterOptions: []AlterOption{&AddIndexDefinition{IndexDefinition: &IndexDefinition{Info: &IndexInfo{Name: yyDollar[4].identifierCI, Type: string(yyDollar[3].str)}, Options: yyDollar[5].indexOptionsUnion()}}}} setDDL(yylex, yyLOCAL) } yyVAL.union = yyLOCAL - case 151: + case 152: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL *AlterTable -//line sql.y:1260 +//line sql.y:1262 { yyLOCAL = &AlterTable{Table: yyDollar[8].tableName, AlterOptions: []AlterOption{&AddIndexDefinition{IndexDefinition: &IndexDefinition{Info: &IndexInfo{Name: yyDollar[5].identifierCI, Type: string(yyDollar[3].str) + " " + string(yyDollar[4].str), Fulltext: true}, Options: yyDollar[6].indexOptionsUnion()}}}} setDDL(yylex, yyLOCAL) } yyVAL.union = yyLOCAL - case 152: + case 153: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL *AlterTable -//line sql.y:1265 +//line sql.y:1267 { yyLOCAL = &AlterTable{Table: yyDollar[8].tableName, AlterOptions: []AlterOption{&AddIndexDefinition{IndexDefinition: &IndexDefinition{Info: &IndexInfo{Name: yyDollar[5].identifierCI, Type: string(yyDollar[3].str) + " " + string(yyDollar[4].str), Spatial: true}, Options: yyDollar[6].indexOptionsUnion()}}}} setDDL(yylex, yyLOCAL) } yyVAL.union = yyLOCAL - case 153: + case 154: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL *AlterTable -//line sql.y:1270 +//line sql.y:1272 { yyLOCAL = &AlterTable{Table: yyDollar[8].tableName, AlterOptions: []AlterOption{&AddIndexDefinition{IndexDefinition: &IndexDefinition{Info: &IndexInfo{Name: yyDollar[5].identifierCI, Type: string(yyDollar[3].str) + " " + string(yyDollar[4].str), Unique: true}, Options: yyDollar[6].indexOptionsUnion()}}}} setDDL(yylex, yyLOCAL) } yyVAL.union = yyLOCAL - case 154: + case 155: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL *CreateDatabase -//line sql.y:1277 +//line sql.y:1279 { yyLOCAL = &CreateDatabase{Comments: Comments(yyDollar[4].strs).Parsed(), DBName: yyDollar[6].identifierCS, IfNotExists: yyDollar[5].booleanUnion()} setDDL(yylex, yyLOCAL) } yyVAL.union = yyLOCAL - case 155: + case 156: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *AlterDatabase -//line sql.y:1284 +//line sql.y:1286 { yyLOCAL = &AlterDatabase{} setDDL(yylex, yyLOCAL) } yyVAL.union = yyLOCAL - case 158: + case 159: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *TableSpec -//line sql.y:1295 +//line sql.y:1297 { yyLOCAL = yyDollar[2].tableSpecUnion() yyLOCAL.Options = yyDollar[4].tableOptionsUnion() yyLOCAL.PartitionOption = yyDollar[5].partitionOptionUnion() } yyVAL.union = yyLOCAL - case 159: + case 160: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL []DatabaseOption -//line sql.y:1302 +//line sql.y:1304 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 160: + case 161: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []DatabaseOption -//line sql.y:1306 +//line sql.y:1308 { yyLOCAL = yyDollar[1].databaseOptionsUnion() } yyVAL.union = yyLOCAL - case 161: + case 162: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []DatabaseOption -//line sql.y:1312 +//line sql.y:1314 { yyLOCAL = []DatabaseOption{yyDollar[1].databaseOption} } yyVAL.union = yyLOCAL - case 162: + case 163: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []DatabaseOption -//line sql.y:1316 +//line sql.y:1318 { yyLOCAL = []DatabaseOption{yyDollar[1].databaseOption} } yyVAL.union = yyLOCAL - case 163: + case 164: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []DatabaseOption -//line sql.y:1320 +//line sql.y:1322 { yyLOCAL = []DatabaseOption{yyDollar[1].databaseOption} } yyVAL.union = yyLOCAL - case 164: + case 165: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1324 +//line sql.y:1326 { yySLICE := (*[]DatabaseOption)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[2].databaseOption) } - case 165: + case 166: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1328 +//line sql.y:1330 { yySLICE := (*[]DatabaseOption)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[2].databaseOption) } - case 166: + case 167: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1332 +//line sql.y:1334 { yySLICE := (*[]DatabaseOption)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[2].databaseOption) } - case 167: + case 168: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:1338 +//line sql.y:1340 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 168: + case 169: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:1342 +//line sql.y:1344 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 169: + case 170: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1348 +//line sql.y:1350 { yyVAL.databaseOption = DatabaseOption{Type: CharacterSetType, Value: string(yyDollar[4].str), IsDefault: yyDollar[1].booleanUnion()} } - case 170: + case 171: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1352 +//line sql.y:1354 { yyVAL.databaseOption = DatabaseOption{Type: CharacterSetType, Value: encodeSQLString(yyDollar[4].str), IsDefault: yyDollar[1].booleanUnion()} } - case 171: + case 172: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1358 +//line sql.y:1360 { yyVAL.databaseOption = DatabaseOption{Type: CollateType, Value: string(yyDollar[4].str), IsDefault: yyDollar[1].booleanUnion()} } - case 172: + case 173: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1362 +//line sql.y:1364 { yyVAL.databaseOption = DatabaseOption{Type: CollateType, Value: encodeSQLString(yyDollar[4].str), IsDefault: yyDollar[1].booleanUnion()} } - case 173: + case 174: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1368 +//line sql.y:1370 { yyVAL.databaseOption = DatabaseOption{Type: EncryptionType, Value: string(yyDollar[4].str), IsDefault: yyDollar[1].booleanUnion()} } - case 174: + case 175: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1372 +//line sql.y:1374 { yyVAL.databaseOption = DatabaseOption{Type: EncryptionType, Value: encodeSQLString(yyDollar[4].str), IsDefault: yyDollar[1].booleanUnion()} } - case 175: + case 176: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *OptLike -//line sql.y:1378 +//line sql.y:1380 { yyLOCAL = &OptLike{LikeTable: yyDollar[2].tableName} } yyVAL.union = yyLOCAL - case 176: + case 177: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *OptLike -//line sql.y:1382 +//line sql.y:1384 { yyLOCAL = &OptLike{LikeTable: yyDollar[3].tableName} } yyVAL.union = yyLOCAL - case 177: + case 178: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*ColumnDefinition -//line sql.y:1388 +//line sql.y:1390 { yyLOCAL = []*ColumnDefinition{yyDollar[1].columnDefinitionUnion()} } yyVAL.union = yyLOCAL - case 178: + case 179: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1392 +//line sql.y:1394 { yySLICE := (*[]*ColumnDefinition)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].columnDefinitionUnion()) } - case 179: + case 180: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *TableSpec -//line sql.y:1398 +//line sql.y:1400 { yyLOCAL = &TableSpec{} yyLOCAL.AddColumn(yyDollar[1].columnDefinitionUnion()) } yyVAL.union = yyLOCAL - case 180: + case 181: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *TableSpec -//line sql.y:1403 +//line sql.y:1405 { yyLOCAL = &TableSpec{} yyLOCAL.AddConstraint(yyDollar[1].constraintDefinitionUnion()) } yyVAL.union = yyLOCAL - case 181: + case 182: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1408 +//line sql.y:1410 { yyVAL.tableSpecUnion().AddColumn(yyDollar[3].columnDefinitionUnion()) } - case 182: + case 183: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1412 +//line sql.y:1414 { yyVAL.tableSpecUnion().AddColumn(yyDollar[3].columnDefinitionUnion()) yyVAL.tableSpecUnion().AddConstraint(yyDollar[4].constraintDefinitionUnion()) } - case 183: + case 184: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1417 +//line sql.y:1419 { yyVAL.tableSpecUnion().AddIndex(yyDollar[3].indexDefinitionUnion()) } - case 184: + case 185: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1421 +//line sql.y:1423 { yyVAL.tableSpecUnion().AddConstraint(yyDollar[3].constraintDefinitionUnion()) } - case 185: + case 186: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1425 +//line sql.y:1427 { yyVAL.tableSpecUnion().AddConstraint(yyDollar[3].constraintDefinitionUnion()) } - case 186: + case 187: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *ColumnDefinition -//line sql.y:1436 +//line sql.y:1438 { yyDollar[2].columnType.Options = yyDollar[4].columnTypeOptionsUnion() if yyDollar[2].columnType.Options.Collate == "" { @@ -11236,10 +11363,10 @@ yydefault: yyLOCAL = &ColumnDefinition{Name: yyDollar[1].identifierCI, Type: yyDollar[2].columnType} } yyVAL.union = yyLOCAL - case 187: + case 188: yyDollar = yyS[yypt-10 : yypt+1] var yyLOCAL *ColumnDefinition -//line sql.y:1445 +//line sql.y:1447 { yyDollar[2].columnType.Options = yyDollar[9].columnTypeOptionsUnion() yyDollar[2].columnType.Options.As = yyDollar[7].exprUnion() @@ -11248,928 +11375,921 @@ yydefault: yyLOCAL = &ColumnDefinition{Name: yyDollar[1].identifierCI, Type: yyDollar[2].columnType} } yyVAL.union = yyLOCAL - case 188: + case 189: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1454 +//line sql.y:1456 { yyVAL.str = "" } - case 189: + case 190: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1458 +//line sql.y:1460 { yyVAL.str = "" } - case 190: + case 191: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1467 +//line sql.y:1469 { yyLOCAL = &ColumnTypeOptions{Null: nil, Default: nil, OnUpdate: nil, Autoincrement: false, KeyOpt: ColKeyNone, Comment: nil, As: nil, Invisible: nil, Format: UnspecifiedFormat, EngineAttribute: nil, SecondaryEngineAttribute: nil} } yyVAL.union = yyLOCAL - case 191: + case 192: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1471 +//line sql.y:1473 { val := true yyDollar[1].columnTypeOptionsUnion().Null = &val yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 192: + case 193: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1477 +//line sql.y:1479 { val := false yyDollar[1].columnTypeOptionsUnion().Null = &val yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 193: + case 194: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1483 +//line sql.y:1485 { yyDollar[1].columnTypeOptionsUnion().Default = yyDollar[4].exprUnion() yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 194: + case 195: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1488 +//line sql.y:1490 { yyDollar[1].columnTypeOptionsUnion().Default = yyDollar[3].exprUnion() + yyDollar[1].columnTypeOptionsUnion().DefaultLiteral = true yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 195: + case 196: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1493 +//line sql.y:1496 { yyDollar[1].columnTypeOptionsUnion().OnUpdate = yyDollar[4].exprUnion() yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 196: + case 197: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1498 +//line sql.y:1501 { yyDollar[1].columnTypeOptionsUnion().Autoincrement = true yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 197: + case 198: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1503 +//line sql.y:1506 { yyDollar[1].columnTypeOptionsUnion().Comment = NewStrLiteral(yyDollar[3].str) yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 198: + case 199: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1508 +//line sql.y:1511 { yyDollar[1].columnTypeOptionsUnion().KeyOpt = yyDollar[2].colKeyOptUnion() yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 199: + case 200: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1513 +//line sql.y:1516 { yyDollar[1].columnTypeOptionsUnion().Collate = encodeSQLString(yyDollar[3].str) } - case 200: + case 201: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1517 +//line sql.y:1520 { yyDollar[1].columnTypeOptionsUnion().Collate = string(yyDollar[3].identifierCI.String()) yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 201: + case 202: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1522 +//line sql.y:1525 { yyDollar[1].columnTypeOptionsUnion().Format = yyDollar[3].columnFormatUnion() } - case 202: + case 203: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1526 +//line sql.y:1529 { yyDollar[1].columnTypeOptionsUnion().SRID = NewIntLiteral(yyDollar[3].str) yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 203: + case 204: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1531 +//line sql.y:1534 { val := false yyDollar[1].columnTypeOptionsUnion().Invisible = &val yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 204: + case 205: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1537 +//line sql.y:1540 { val := true yyDollar[1].columnTypeOptionsUnion().Invisible = &val yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 205: + case 206: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1543 +//line sql.y:1546 { yyDollar[1].columnTypeOptionsUnion().EngineAttribute = NewStrLiteral(yyDollar[4].str) } - case 206: + case 207: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1547 +//line sql.y:1550 { yyDollar[1].columnTypeOptionsUnion().SecondaryEngineAttribute = NewStrLiteral(yyDollar[4].str) } - case 207: + case 208: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ColumnFormat -//line sql.y:1553 +//line sql.y:1556 { yyLOCAL = FixedFormat } yyVAL.union = yyLOCAL - case 208: + case 209: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ColumnFormat -//line sql.y:1557 +//line sql.y:1560 { yyLOCAL = DynamicFormat } yyVAL.union = yyLOCAL - case 209: + case 210: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ColumnFormat -//line sql.y:1561 +//line sql.y:1564 { yyLOCAL = DefaultFormat } yyVAL.union = yyLOCAL - case 210: + case 211: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ColumnStorage -//line sql.y:1567 +//line sql.y:1570 { yyLOCAL = VirtualStorage } yyVAL.union = yyLOCAL - case 211: + case 212: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ColumnStorage -//line sql.y:1571 +//line sql.y:1574 { yyLOCAL = StoredStorage } yyVAL.union = yyLOCAL - case 212: + case 213: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1576 +//line sql.y:1579 { yyLOCAL = &ColumnTypeOptions{} } yyVAL.union = yyLOCAL - case 213: + case 214: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1580 +//line sql.y:1583 { yyDollar[1].columnTypeOptionsUnion().Storage = yyDollar[2].columnStorageUnion() yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 214: + case 215: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1585 +//line sql.y:1588 { val := true yyDollar[1].columnTypeOptionsUnion().Null = &val yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 215: + case 216: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1591 +//line sql.y:1594 { val := false yyDollar[1].columnTypeOptionsUnion().Null = &val yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 216: + case 217: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1597 +//line sql.y:1600 { yyDollar[1].columnTypeOptionsUnion().Comment = NewStrLiteral(yyDollar[3].str) yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 217: + case 218: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1602 +//line sql.y:1605 { yyDollar[1].columnTypeOptionsUnion().KeyOpt = yyDollar[2].colKeyOptUnion() yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 218: + case 219: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1607 +//line sql.y:1610 { val := false yyDollar[1].columnTypeOptionsUnion().Invisible = &val yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 219: + case 220: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1613 +//line sql.y:1616 { val := true yyDollar[1].columnTypeOptionsUnion().Invisible = &val yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 220: + case 221: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:1621 +//line sql.y:1624 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 222: + case 223: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:1628 +//line sql.y:1631 { yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("current_timestamp"), Fsp: yyDollar[2].integerUnion()} } yyVAL.union = yyLOCAL - case 223: + case 224: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:1632 +//line sql.y:1635 { yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("localtime"), Fsp: yyDollar[2].integerUnion()} } yyVAL.union = yyLOCAL - case 224: + case 225: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:1636 +//line sql.y:1639 { yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("localtimestamp"), Fsp: yyDollar[2].integerUnion()} } yyVAL.union = yyLOCAL - case 225: + case 226: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:1640 +//line sql.y:1643 { yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("utc_timestamp"), Fsp: yyDollar[2].integerUnion()} } yyVAL.union = yyLOCAL - case 226: + case 227: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:1644 +//line sql.y:1647 { yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("now"), Fsp: yyDollar[2].integerUnion()} } yyVAL.union = yyLOCAL - case 227: + case 228: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:1648 +//line sql.y:1651 { yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("sysdate"), Fsp: yyDollar[2].integerUnion()} } yyVAL.union = yyLOCAL - case 230: + case 231: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:1658 +//line sql.y:1661 { yyLOCAL = &NullVal{} } yyVAL.union = yyLOCAL - case 232: + case 233: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:1665 +//line sql.y:1668 { yyLOCAL = yyDollar[2].exprUnion() } yyVAL.union = yyLOCAL - case 233: + case 234: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:1669 +//line sql.y:1672 { yyLOCAL = &UnaryExpr{Operator: UMinusOp, Expr: yyDollar[2].exprUnion()} } yyVAL.union = yyLOCAL - case 234: + case 235: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:1675 +//line sql.y:1678 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 235: + case 236: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:1679 +//line sql.y:1682 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 236: + case 237: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:1683 +//line sql.y:1686 { yyLOCAL = yyDollar[1].boolValUnion() } yyVAL.union = yyLOCAL - case 237: + case 238: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:1687 +//line sql.y:1690 { yyLOCAL = NewHexLiteral(yyDollar[1].str) } yyVAL.union = yyLOCAL - case 238: + case 239: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:1691 +//line sql.y:1694 { yyLOCAL = NewHexNumLiteral(yyDollar[1].str) } yyVAL.union = yyLOCAL - case 239: + case 240: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:1695 +//line sql.y:1698 { yyLOCAL = NewBitLiteral(yyDollar[1].str[2:]) } yyVAL.union = yyLOCAL - case 240: + case 241: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:1699 +//line sql.y:1702 { yyLOCAL = NewBitLiteral(yyDollar[1].str) } yyVAL.union = yyLOCAL - case 241: + case 242: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:1703 +//line sql.y:1706 { yyLOCAL = parseBindVariable(yylex, yyDollar[1].str[1:]) } yyVAL.union = yyLOCAL - case 242: + case 243: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:1707 +//line sql.y:1710 { yyLOCAL = &IntroducerExpr{CharacterSet: yyDollar[1].str, Expr: NewBitLiteral(yyDollar[2].str)} } yyVAL.union = yyLOCAL - case 243: + case 244: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:1711 +//line sql.y:1714 { yyLOCAL = &IntroducerExpr{CharacterSet: yyDollar[1].str, Expr: NewHexNumLiteral(yyDollar[2].str)} } yyVAL.union = yyLOCAL - case 244: + case 245: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:1715 +//line sql.y:1718 { yyLOCAL = &IntroducerExpr{CharacterSet: yyDollar[1].str, Expr: NewBitLiteral(yyDollar[2].str[2:])} } yyVAL.union = yyLOCAL - case 245: + case 246: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:1719 +//line sql.y:1722 { yyLOCAL = &IntroducerExpr{CharacterSet: yyDollar[1].str, Expr: NewHexLiteral(yyDollar[2].str)} } yyVAL.union = yyLOCAL - case 246: + case 247: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:1723 +//line sql.y:1726 { yyLOCAL = &IntroducerExpr{CharacterSet: yyDollar[1].str, Expr: yyDollar[2].exprUnion()} } yyVAL.union = yyLOCAL - case 247: + case 248: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:1727 +//line sql.y:1730 { arg := parseBindVariable(yylex, yyDollar[2].str[1:]) yyLOCAL = &IntroducerExpr{CharacterSet: yyDollar[1].str, Expr: arg} } yyVAL.union = yyLOCAL - case 248: + case 249: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:1732 +//line sql.y:1735 { yyLOCAL = NewDateLiteral(yyDollar[2].str) } yyVAL.union = yyLOCAL - case 249: + case 250: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:1736 +//line sql.y:1739 { yyLOCAL = NewTimeLiteral(yyDollar[2].str) } yyVAL.union = yyLOCAL - case 250: + case 251: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:1740 +//line sql.y:1743 { yyLOCAL = NewTimestampLiteral(yyDollar[2].str) } yyVAL.union = yyLOCAL - case 251: + case 252: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1746 +//line sql.y:1749 { yyVAL.str = Armscii8Str } - case 252: + case 253: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1750 +//line sql.y:1753 { yyVAL.str = ASCIIStr } - case 253: + case 254: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1754 +//line sql.y:1757 { yyVAL.str = Big5Str } - case 254: + case 255: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1758 +//line sql.y:1761 { yyVAL.str = UBinaryStr } - case 255: + case 256: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1762 +//line sql.y:1765 { yyVAL.str = Cp1250Str } - case 256: + case 257: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1766 +//line sql.y:1769 { yyVAL.str = Cp1251Str } - case 257: + case 258: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1770 +//line sql.y:1773 { yyVAL.str = Cp1256Str } - case 258: + case 259: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1774 +//line sql.y:1777 { yyVAL.str = Cp1257Str } - case 259: + case 260: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1778 +//line sql.y:1781 { yyVAL.str = Cp850Str } - case 260: + case 261: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1782 +//line sql.y:1785 { yyVAL.str = Cp852Str } - case 261: + case 262: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1786 +//line sql.y:1789 { yyVAL.str = Cp866Str } - case 262: + case 263: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1790 +//line sql.y:1793 { yyVAL.str = Cp932Str } - case 263: + case 264: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1794 +//line sql.y:1797 { yyVAL.str = Dec8Str } - case 264: + case 265: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1798 +//line sql.y:1801 { yyVAL.str = EucjpmsStr } - case 265: + case 266: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1802 +//line sql.y:1805 { yyVAL.str = EuckrStr } - case 266: + case 267: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1806 +//line sql.y:1809 { yyVAL.str = Gb18030Str } - case 267: + case 268: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1810 +//line sql.y:1813 { yyVAL.str = Gb2312Str } - case 268: + case 269: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1814 +//line sql.y:1817 { yyVAL.str = GbkStr } - case 269: + case 270: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1818 +//line sql.y:1821 { yyVAL.str = Geostd8Str } - case 270: + case 271: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1822 +//line sql.y:1825 { yyVAL.str = GreekStr } - case 271: + case 272: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1826 +//line sql.y:1829 { yyVAL.str = HebrewStr } - case 272: + case 273: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1830 +//line sql.y:1833 { yyVAL.str = Hp8Str } - case 273: + case 274: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1834 +//line sql.y:1837 { yyVAL.str = Keybcs2Str } - case 274: + case 275: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1838 +//line sql.y:1841 { yyVAL.str = Koi8rStr } - case 275: + case 276: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1842 +//line sql.y:1845 { yyVAL.str = Koi8uStr } - case 276: + case 277: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1846 +//line sql.y:1849 { yyVAL.str = Latin1Str } - case 277: + case 278: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1850 +//line sql.y:1853 { yyVAL.str = Latin2Str } - case 278: + case 279: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1854 +//line sql.y:1857 { yyVAL.str = Latin5Str } - case 279: + case 280: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1858 +//line sql.y:1861 { yyVAL.str = Latin7Str } - case 280: + case 281: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1862 +//line sql.y:1865 { yyVAL.str = MacceStr } - case 281: + case 282: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1866 +//line sql.y:1869 { yyVAL.str = MacromanStr } - case 282: + case 283: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1870 +//line sql.y:1873 { yyVAL.str = SjisStr } - case 283: + case 284: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1874 +//line sql.y:1877 { yyVAL.str = Swe7Str } - case 284: + case 285: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1878 +//line sql.y:1881 { yyVAL.str = Tis620Str } - case 285: + case 286: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1882 +//line sql.y:1885 { yyVAL.str = Ucs2Str } - case 286: + case 287: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1886 +//line sql.y:1889 { yyVAL.str = UjisStr } - case 287: + case 288: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1890 +//line sql.y:1893 { yyVAL.str = Utf16Str } - case 288: + case 289: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1894 +//line sql.y:1897 { yyVAL.str = Utf16leStr } - case 289: + case 290: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1898 +//line sql.y:1901 { yyVAL.str = Utf32Str } - case 290: + case 291: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1902 +//line sql.y:1905 { - yyVAL.str = Utf8Str + yyVAL.str = Utf8mb3Str } - case 291: + case 292: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1906 +//line sql.y:1909 { yyVAL.str = Utf8mb4Str } - case 292: + case 293: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1910 +//line sql.y:1913 { - yyVAL.str = Utf8Str + yyVAL.str = Utf8mb3Str } - case 295: + case 296: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:1920 +//line sql.y:1923 { yyLOCAL = NewIntLiteral(yyDollar[1].str) } yyVAL.union = yyLOCAL - case 296: + case 297: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:1924 +//line sql.y:1927 { yyLOCAL = NewFloatLiteral(yyDollar[1].str) } yyVAL.union = yyLOCAL - case 297: + case 298: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:1928 +//line sql.y:1931 { yyLOCAL = NewDecimalLiteral(yyDollar[1].str) } yyVAL.union = yyLOCAL - case 298: + case 299: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:1934 +//line sql.y:1937 { yyLOCAL = NewStrLiteral(yyDollar[1].str) } yyVAL.union = yyLOCAL - case 299: + case 300: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:1938 +//line sql.y:1941 { yyLOCAL = &UnaryExpr{Operator: NStringOp, Expr: NewStrLiteral(yyDollar[1].str)} } yyVAL.union = yyLOCAL - case 300: + case 301: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:1942 +//line sql.y:1945 { yyLOCAL = &IntroducerExpr{CharacterSet: yyDollar[1].str, Expr: NewStrLiteral(yyDollar[2].str)} } yyVAL.union = yyLOCAL - case 301: + case 302: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:1948 +//line sql.y:1951 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 302: + case 303: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:1952 +//line sql.y:1955 { yyLOCAL = parseBindVariable(yylex, yyDollar[1].str[1:]) } yyVAL.union = yyLOCAL - case 303: + case 304: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL ColumnKeyOption -//line sql.y:1958 +//line sql.y:1961 { yyLOCAL = ColKeyPrimary } yyVAL.union = yyLOCAL - case 304: + case 305: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ColumnKeyOption -//line sql.y:1962 +//line sql.y:1965 { yyLOCAL = ColKeyUnique } yyVAL.union = yyLOCAL - case 305: + case 306: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL ColumnKeyOption -//line sql.y:1966 +//line sql.y:1969 { yyLOCAL = ColKeyUniqueKey } yyVAL.union = yyLOCAL - case 306: + case 307: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ColumnKeyOption -//line sql.y:1970 +//line sql.y:1973 { yyLOCAL = ColKey } yyVAL.union = yyLOCAL - case 307: + case 308: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1976 +//line sql.y:1979 { yyVAL.columnType = yyDollar[1].columnType yyVAL.columnType.Unsigned = yyDollar[2].booleanUnion() yyVAL.columnType.Zerofill = yyDollar[3].booleanUnion() } - case 311: + case 312: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1987 +//line sql.y:1990 { yyVAL.columnType = yyDollar[1].columnType yyVAL.columnType.Length = yyDollar[2].literalUnion() } - case 312: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1992 - { - yyVAL.columnType = yyDollar[1].columnType - } case 313: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1998 +//line sql.y:1995 { - yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} + yyVAL.columnType = yyDollar[1].columnType } case 314: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2002 +//line sql.y:2001 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 315: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2006 +//line sql.y:2005 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 316: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2010 +//line sql.y:2009 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 317: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2014 +//line sql.y:2013 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 318: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2018 +//line sql.y:2017 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 319: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2022 +//line sql.y:2021 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 320: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2026 +//line sql.y:2025 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 321: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2030 +//line sql.y:2029 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 322: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2036 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:2033 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} - yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length - yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale } case 323: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2042 +//line sql.y:2039 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length @@ -12177,7 +12297,7 @@ yydefault: } case 324: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2048 +//line sql.y:2045 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length @@ -12185,7 +12305,7 @@ yydefault: } case 325: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2054 +//line sql.y:2051 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length @@ -12193,7 +12313,7 @@ yydefault: } case 326: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2060 +//line sql.y:2057 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length @@ -12201,7 +12321,7 @@ yydefault: } case 327: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2066 +//line sql.y:2063 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length @@ -12209,1759 +12329,1767 @@ yydefault: } case 328: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2072 +//line sql.y:2069 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale } case 329: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2080 + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:2075 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} + yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length + yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale } case 330: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2084 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:2083 { - yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} + yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 331: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2088 +//line sql.y:2087 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} } case 332: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2092 +//line sql.y:2091 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} } case 333: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2096 +//line sql.y:2095 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} } case 334: + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:2099 + { + yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} + } + case 335: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2102 +//line sql.y:2105 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion(), Charset: yyDollar[3].columnCharset} } - case 335: + case 336: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2106 +//line sql.y:2109 { // CHAR BYTE is an alias for binary. See also: // https://dev.mysql.com/doc/refman/8.0/en/string-type-syntax.html yyVAL.columnType = &ColumnType{Type: "binary", Length: yyDollar[2].literalUnion()} } - case 336: + case 337: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2112 +//line sql.y:2115 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion(), Charset: yyDollar[3].columnCharset} } - case 337: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2116 - { - yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} - } case 338: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2120 +//line sql.y:2119 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} } case 339: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2124 +//line sql.y:2123 { - yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Charset: yyDollar[2].columnCharset} + yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} } case 340: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2128 +//line sql.y:2127 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Charset: yyDollar[2].columnCharset} } case 341: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2132 +//line sql.y:2131 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Charset: yyDollar[2].columnCharset} } case 342: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2136 +//line sql.y:2135 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Charset: yyDollar[2].columnCharset} } case 343: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2140 + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:2139 { - yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} + yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Charset: yyDollar[2].columnCharset} } case 344: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2144 +//line sql.y:2143 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 345: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2148 +//line sql.y:2147 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 346: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2152 +//line sql.y:2151 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 347: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2156 +//line sql.y:2155 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 348: - yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2160 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:2159 { - yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), EnumValues: yyDollar[3].strs, Charset: yyDollar[5].columnCharset} + yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 349: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2165 +//line sql.y:2163 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), EnumValues: yyDollar[3].strs, Charset: yyDollar[5].columnCharset} } case 350: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2171 + yyDollar = yyS[yypt-5 : yypt+1] +//line sql.y:2168 { - yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} + yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), EnumValues: yyDollar[3].strs, Charset: yyDollar[5].columnCharset} } case 351: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2175 +//line sql.y:2174 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 352: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2179 +//line sql.y:2178 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 353: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2183 +//line sql.y:2182 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 354: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2187 +//line sql.y:2186 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 355: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2191 +//line sql.y:2190 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 356: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2195 +//line sql.y:2194 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 357: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2199 +//line sql.y:2198 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 358: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2205 +//line sql.y:2202 + { + yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} + } + case 359: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:2208 { yyVAL.strs = make([]string, 0, 4) yyVAL.strs = append(yyVAL.strs, encodeSQLString(yyDollar[1].str)) } - case 359: + case 360: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2210 +//line sql.y:2213 { yyVAL.strs = append(yyDollar[1].strs, encodeSQLString(yyDollar[3].str)) } - case 360: + case 361: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *Literal -//line sql.y:2215 +//line sql.y:2218 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 361: + case 362: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *Literal -//line sql.y:2219 +//line sql.y:2222 { yyLOCAL = NewIntLiteral(yyDollar[2].str) } yyVAL.union = yyLOCAL - case 362: + case 363: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2224 +//line sql.y:2227 { yyVAL.LengthScaleOption = LengthScaleOption{} } - case 363: + case 364: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2228 +//line sql.y:2231 { yyVAL.LengthScaleOption = LengthScaleOption{ Length: NewIntLiteral(yyDollar[2].str), Scale: NewIntLiteral(yyDollar[4].str), } } - case 364: + case 365: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2237 +//line sql.y:2240 { yyVAL.LengthScaleOption = yyDollar[1].LengthScaleOption } - case 365: + case 366: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2241 +//line sql.y:2244 { yyVAL.LengthScaleOption = LengthScaleOption{ Length: NewIntLiteral(yyDollar[2].str), } } - case 366: + case 367: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2248 +//line sql.y:2251 { yyVAL.LengthScaleOption = LengthScaleOption{} } - case 367: + case 368: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2252 +//line sql.y:2255 { yyVAL.LengthScaleOption = LengthScaleOption{ Length: NewIntLiteral(yyDollar[2].str), } } - case 368: + case 369: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2258 +//line sql.y:2261 { yyVAL.LengthScaleOption = LengthScaleOption{ Length: NewIntLiteral(yyDollar[2].str), Scale: NewIntLiteral(yyDollar[4].str), } } - case 369: + case 370: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:2266 +//line sql.y:2269 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 370: + case 371: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:2270 +//line sql.y:2273 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 371: + case 372: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:2274 +//line sql.y:2277 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 372: + case 373: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:2279 +//line sql.y:2282 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 373: + case 374: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:2283 +//line sql.y:2286 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 374: + case 375: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2288 +//line sql.y:2291 { yyVAL.columnCharset = ColumnCharset{} } - case 375: + case 376: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2292 +//line sql.y:2295 { yyVAL.columnCharset = ColumnCharset{Name: string(yyDollar[2].identifierCI.String()), Binary: yyDollar[3].booleanUnion()} } - case 376: + case 377: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2296 +//line sql.y:2299 { yyVAL.columnCharset = ColumnCharset{Name: encodeSQLString(yyDollar[2].str), Binary: yyDollar[3].booleanUnion()} } - case 377: + case 378: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2300 +//line sql.y:2303 { yyVAL.columnCharset = ColumnCharset{Name: string(yyDollar[2].str)} } - case 378: + case 379: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2304 +//line sql.y:2307 { // ASCII: Shorthand for CHARACTER SET latin1. yyVAL.columnCharset = ColumnCharset{Name: "latin1", Binary: yyDollar[2].booleanUnion()} } - case 379: + case 380: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2309 +//line sql.y:2312 { // UNICODE: Shorthand for CHARACTER SET ucs2. yyVAL.columnCharset = ColumnCharset{Name: "ucs2", Binary: yyDollar[2].booleanUnion()} } - case 380: + case 381: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2314 +//line sql.y:2317 { // BINARY: Shorthand for default CHARACTER SET but with binary collation yyVAL.columnCharset = ColumnCharset{Name: "", Binary: true} } - case 381: + case 382: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2319 +//line sql.y:2322 { // BINARY ASCII: Shorthand for CHARACTER SET latin1 with binary collation yyVAL.columnCharset = ColumnCharset{Name: "latin1", Binary: true} } - case 382: + case 383: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2324 +//line sql.y:2327 { // BINARY UNICODE: Shorthand for CHARACTER SET ucs2 with binary collation yyVAL.columnCharset = ColumnCharset{Name: "ucs2", Binary: true} } - case 383: + case 384: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:2330 +//line sql.y:2333 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 384: + case 385: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:2334 +//line sql.y:2337 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 385: + case 386: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2339 +//line sql.y:2342 { yyVAL.str = "" } - case 386: + case 387: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2343 +//line sql.y:2346 { yyVAL.str = string(yyDollar[2].identifierCI.String()) } - case 387: + case 388: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2347 +//line sql.y:2350 { yyVAL.str = encodeSQLString(yyDollar[2].str) } - case 388: + case 389: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *IndexDefinition -//line sql.y:2353 +//line sql.y:2356 { yyLOCAL = &IndexDefinition{Info: yyDollar[1].indexInfoUnion(), Columns: yyDollar[3].indexColumnsUnion(), Options: yyDollar[5].indexOptionsUnion()} } yyVAL.union = yyLOCAL - case 389: + case 390: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL []*IndexOption -//line sql.y:2358 +//line sql.y:2361 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 390: + case 391: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*IndexOption -//line sql.y:2362 +//line sql.y:2365 { yyLOCAL = yyDollar[1].indexOptionsUnion() } yyVAL.union = yyLOCAL - case 391: + case 392: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*IndexOption -//line sql.y:2368 +//line sql.y:2371 { yyLOCAL = []*IndexOption{yyDollar[1].indexOptionUnion()} } yyVAL.union = yyLOCAL - case 392: + case 393: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2372 +//line sql.y:2375 { yySLICE := (*[]*IndexOption)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[2].indexOptionUnion()) } - case 393: + case 394: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *IndexOption -//line sql.y:2378 +//line sql.y:2381 { yyLOCAL = yyDollar[1].indexOptionUnion() } yyVAL.union = yyLOCAL - case 394: + case 395: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *IndexOption -//line sql.y:2382 +//line sql.y:2385 { // should not be string yyLOCAL = &IndexOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 395: + case 396: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *IndexOption -//line sql.y:2387 +//line sql.y:2390 { yyLOCAL = &IndexOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[2].str)} } yyVAL.union = yyLOCAL - case 396: + case 397: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *IndexOption -//line sql.y:2391 +//line sql.y:2394 { yyLOCAL = &IndexOption{Name: string(yyDollar[1].str)} } yyVAL.union = yyLOCAL - case 397: + case 398: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *IndexOption -//line sql.y:2395 +//line sql.y:2398 { yyLOCAL = &IndexOption{Name: string(yyDollar[1].str)} } yyVAL.union = yyLOCAL - case 398: + case 399: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *IndexOption -//line sql.y:2399 +//line sql.y:2402 { yyLOCAL = &IndexOption{Name: string(yyDollar[1].str) + " " + string(yyDollar[2].str), String: yyDollar[3].identifierCI.String()} } yyVAL.union = yyLOCAL - case 399: + case 400: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *IndexOption -//line sql.y:2403 +//line sql.y:2406 { yyLOCAL = &IndexOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 400: + case 401: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *IndexOption -//line sql.y:2407 +//line sql.y:2410 { yyLOCAL = &IndexOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 401: + case 402: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2413 +//line sql.y:2416 { yyVAL.str = "" } - case 402: + case 403: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2417 +//line sql.y:2420 { yyVAL.str = string(yyDollar[1].str) } - case 403: + case 404: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *IndexInfo -//line sql.y:2423 +//line sql.y:2426 { yyLOCAL = &IndexInfo{Type: string(yyDollar[2].str) + " " + string(yyDollar[3].str), ConstraintName: NewIdentifierCI(yyDollar[1].str), Name: NewIdentifierCI("PRIMARY"), Primary: true, Unique: true} } yyVAL.union = yyLOCAL - case 404: + case 405: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *IndexInfo -//line sql.y:2427 +//line sql.y:2430 { yyLOCAL = &IndexInfo{Type: string(yyDollar[1].str) + " " + string(yyDollar[2].str), Name: NewIdentifierCI(yyDollar[3].str), Spatial: true, Unique: false} } yyVAL.union = yyLOCAL - case 405: + case 406: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *IndexInfo -//line sql.y:2431 +//line sql.y:2434 { yyLOCAL = &IndexInfo{Type: string(yyDollar[1].str) + " " + string(yyDollar[2].str), Name: NewIdentifierCI(yyDollar[3].str), Fulltext: true, Unique: false} } yyVAL.union = yyLOCAL - case 406: + case 407: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *IndexInfo -//line sql.y:2435 +//line sql.y:2438 { yyLOCAL = &IndexInfo{Type: string(yyDollar[2].str) + " " + string(yyDollar[3].str), ConstraintName: NewIdentifierCI(yyDollar[1].str), Name: NewIdentifierCI(yyDollar[4].str), Unique: true} } yyVAL.union = yyLOCAL - case 407: + case 408: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *IndexInfo -//line sql.y:2439 +//line sql.y:2442 { yyLOCAL = &IndexInfo{Type: string(yyDollar[1].str), Name: NewIdentifierCI(yyDollar[2].str), Unique: false} } yyVAL.union = yyLOCAL - case 408: + case 409: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2444 +//line sql.y:2447 { yyVAL.str = "" } - case 409: + case 410: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2448 +//line sql.y:2451 { yyVAL.str = yyDollar[2].str } - case 410: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2454 - { - yyVAL.str = string(yyDollar[1].str) - } case 411: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2458 +//line sql.y:2457 { yyVAL.str = string(yyDollar[1].str) } case 412: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2462 +//line sql.y:2461 { yyVAL.str = string(yyDollar[1].str) } case 413: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2468 +//line sql.y:2465 { yyVAL.str = string(yyDollar[1].str) } case 414: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2472 +//line sql.y:2471 { yyVAL.str = string(yyDollar[1].str) } case 415: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:2475 + { + yyVAL.str = string(yyDollar[1].str) + } + case 416: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2477 +//line sql.y:2480 { yyVAL.str = "key" } - case 416: + case 417: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2481 +//line sql.y:2484 { yyVAL.str = yyDollar[1].str } - case 417: + case 418: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2487 +//line sql.y:2490 { yyVAL.str = string(yyDollar[1].str) } - case 418: + case 419: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2491 +//line sql.y:2494 { yyVAL.str = string(yyDollar[1].str) } - case 419: + case 420: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2496 +//line sql.y:2499 { yyVAL.str = "" } - case 420: + case 421: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2500 +//line sql.y:2503 { yyVAL.str = string(yyDollar[1].identifierCI.String()) } - case 421: + case 422: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*IndexColumn -//line sql.y:2506 +//line sql.y:2509 { yyLOCAL = []*IndexColumn{yyDollar[1].indexColumnUnion()} } yyVAL.union = yyLOCAL - case 422: + case 423: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2510 +//line sql.y:2513 { yySLICE := (*[]*IndexColumn)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].indexColumnUnion()) } - case 423: + case 424: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *IndexColumn -//line sql.y:2516 +//line sql.y:2519 { yyLOCAL = &IndexColumn{Column: yyDollar[1].identifierCI, Length: yyDollar[2].literalUnion(), Direction: yyDollar[3].orderDirectionUnion()} } yyVAL.union = yyLOCAL - case 424: + case 425: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *IndexColumn -//line sql.y:2520 +//line sql.y:2523 { yyLOCAL = &IndexColumn{Expression: yyDollar[2].exprUnion(), Direction: yyDollar[4].orderDirectionUnion()} } yyVAL.union = yyLOCAL - case 425: + case 426: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *ConstraintDefinition -//line sql.y:2526 +//line sql.y:2529 { yyLOCAL = &ConstraintDefinition{Name: yyDollar[2].identifierCI, Details: yyDollar[3].constraintInfoUnion()} } yyVAL.union = yyLOCAL - case 426: + case 427: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *ConstraintDefinition -//line sql.y:2530 +//line sql.y:2533 { yyLOCAL = &ConstraintDefinition{Details: yyDollar[1].constraintInfoUnion()} } yyVAL.union = yyLOCAL - case 427: + case 428: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *ConstraintDefinition -//line sql.y:2536 +//line sql.y:2539 { yyLOCAL = &ConstraintDefinition{Name: yyDollar[2].identifierCI, Details: yyDollar[3].constraintInfoUnion()} } yyVAL.union = yyLOCAL - case 428: + case 429: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *ConstraintDefinition -//line sql.y:2540 +//line sql.y:2543 { yyLOCAL = &ConstraintDefinition{Details: yyDollar[1].constraintInfoUnion()} } yyVAL.union = yyLOCAL - case 429: + case 430: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL ConstraintInfo -//line sql.y:2546 +//line sql.y:2549 { yyLOCAL = &ForeignKeyDefinition{IndexName: NewIdentifierCI(yyDollar[3].str), Source: yyDollar[5].columnsUnion(), ReferenceDefinition: yyDollar[7].referenceDefinitionUnion()} } yyVAL.union = yyLOCAL - case 430: + case 431: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL *ReferenceDefinition -//line sql.y:2552 +//line sql.y:2555 { yyLOCAL = &ReferenceDefinition{ReferencedTable: yyDollar[2].tableName, ReferencedColumns: yyDollar[4].columnsUnion(), Match: yyDollar[6].matchActionUnion()} } yyVAL.union = yyLOCAL - case 431: + case 432: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL *ReferenceDefinition -//line sql.y:2556 +//line sql.y:2559 { yyLOCAL = &ReferenceDefinition{ReferencedTable: yyDollar[2].tableName, ReferencedColumns: yyDollar[4].columnsUnion(), Match: yyDollar[6].matchActionUnion(), OnDelete: yyDollar[7].referenceActionUnion()} } yyVAL.union = yyLOCAL - case 432: + case 433: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL *ReferenceDefinition -//line sql.y:2560 +//line sql.y:2563 { yyLOCAL = &ReferenceDefinition{ReferencedTable: yyDollar[2].tableName, ReferencedColumns: yyDollar[4].columnsUnion(), Match: yyDollar[6].matchActionUnion(), OnUpdate: yyDollar[7].referenceActionUnion()} } yyVAL.union = yyLOCAL - case 433: + case 434: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL *ReferenceDefinition -//line sql.y:2564 +//line sql.y:2567 { yyLOCAL = &ReferenceDefinition{ReferencedTable: yyDollar[2].tableName, ReferencedColumns: yyDollar[4].columnsUnion(), Match: yyDollar[6].matchActionUnion(), OnDelete: yyDollar[7].referenceActionUnion(), OnUpdate: yyDollar[8].referenceActionUnion()} } yyVAL.union = yyLOCAL - case 434: + case 435: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL *ReferenceDefinition -//line sql.y:2568 +//line sql.y:2571 { yyLOCAL = &ReferenceDefinition{ReferencedTable: yyDollar[2].tableName, ReferencedColumns: yyDollar[4].columnsUnion(), Match: yyDollar[6].matchActionUnion(), OnUpdate: yyDollar[7].referenceActionUnion(), OnDelete: yyDollar[8].referenceActionUnion()} } yyVAL.union = yyLOCAL - case 435: + case 436: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *ReferenceDefinition -//line sql.y:2573 +//line sql.y:2576 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 436: + case 437: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *ReferenceDefinition -//line sql.y:2577 +//line sql.y:2580 { yyLOCAL = yyDollar[1].referenceDefinitionUnion() } yyVAL.union = yyLOCAL - case 437: + case 438: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL ConstraintInfo -//line sql.y:2583 +//line sql.y:2586 { yyLOCAL = &CheckConstraintDefinition{Expr: yyDollar[3].exprUnion(), Enforced: yyDollar[5].booleanUnion()} } yyVAL.union = yyLOCAL - case 438: + case 439: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL MatchAction -//line sql.y:2589 +//line sql.y:2592 { yyLOCAL = yyDollar[2].matchActionUnion() } yyVAL.union = yyLOCAL - case 439: + case 440: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL MatchAction -//line sql.y:2595 +//line sql.y:2598 { yyLOCAL = Full } yyVAL.union = yyLOCAL - case 440: + case 441: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL MatchAction -//line sql.y:2599 +//line sql.y:2602 { yyLOCAL = Partial } yyVAL.union = yyLOCAL - case 441: + case 442: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL MatchAction -//line sql.y:2603 +//line sql.y:2606 { yyLOCAL = Simple } yyVAL.union = yyLOCAL - case 442: + case 443: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL MatchAction -//line sql.y:2608 +//line sql.y:2611 { yyLOCAL = DefaultMatch } yyVAL.union = yyLOCAL - case 443: + case 444: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL MatchAction -//line sql.y:2612 +//line sql.y:2615 { yyLOCAL = yyDollar[1].matchActionUnion() } yyVAL.union = yyLOCAL - case 444: + case 445: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL ReferenceAction -//line sql.y:2618 +//line sql.y:2621 { yyLOCAL = yyDollar[3].referenceActionUnion() } yyVAL.union = yyLOCAL - case 445: + case 446: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL ReferenceAction -//line sql.y:2624 +//line sql.y:2627 { yyLOCAL = yyDollar[3].referenceActionUnion() } yyVAL.union = yyLOCAL - case 446: + case 447: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ReferenceAction -//line sql.y:2630 +//line sql.y:2633 { yyLOCAL = Restrict } yyVAL.union = yyLOCAL - case 447: + case 448: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ReferenceAction -//line sql.y:2634 +//line sql.y:2637 { yyLOCAL = Cascade } yyVAL.union = yyLOCAL - case 448: + case 449: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL ReferenceAction -//line sql.y:2638 +//line sql.y:2641 { yyLOCAL = NoAction } yyVAL.union = yyLOCAL - case 449: + case 450: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL ReferenceAction -//line sql.y:2642 +//line sql.y:2645 { yyLOCAL = SetDefault } yyVAL.union = yyLOCAL - case 450: + case 451: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL ReferenceAction -//line sql.y:2646 +//line sql.y:2649 { yyLOCAL = SetNull } yyVAL.union = yyLOCAL - case 451: + case 452: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2651 +//line sql.y:2654 { yyVAL.str = "" } - case 452: + case 453: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2655 +//line sql.y:2658 { yyVAL.str = string(yyDollar[1].str) } - case 453: + case 454: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2659 +//line sql.y:2662 { yyVAL.str = string(yyDollar[1].str) } - case 454: + case 455: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:2665 +//line sql.y:2668 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 455: + case 456: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL bool -//line sql.y:2669 +//line sql.y:2672 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 456: + case 457: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:2674 +//line sql.y:2677 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 457: + case 458: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:2678 +//line sql.y:2681 { yyLOCAL = yyDollar[1].booleanUnion() } yyVAL.union = yyLOCAL - case 458: + case 459: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL TableOptions -//line sql.y:2683 +//line sql.y:2686 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 459: + case 460: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TableOptions -//line sql.y:2687 +//line sql.y:2690 { yyLOCAL = yyDollar[1].tableOptionsUnion() } yyVAL.union = yyLOCAL - case 460: + case 461: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TableOptions -//line sql.y:2693 +//line sql.y:2696 { yyLOCAL = TableOptions{yyDollar[1].tableOptionUnion()} } yyVAL.union = yyLOCAL - case 461: + case 462: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2697 +//line sql.y:2700 { yySLICE := (*TableOptions)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].tableOptionUnion()) } - case 462: + case 463: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2701 +//line sql.y:2704 { yySLICE := (*TableOptions)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[2].tableOptionUnion()) } - case 463: + case 464: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TableOptions -//line sql.y:2707 +//line sql.y:2710 { yyLOCAL = TableOptions{yyDollar[1].tableOptionUnion()} } yyVAL.union = yyLOCAL - case 464: + case 465: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2711 +//line sql.y:2714 { yySLICE := (*TableOptions)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[2].tableOptionUnion()) } - case 465: + case 466: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2717 +//line sql.y:2720 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 466: + case 467: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2721 +//line sql.y:2724 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 467: + case 468: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2725 +//line sql.y:2728 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 468: + case 469: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2729 +//line sql.y:2732 { yyLOCAL = &TableOption{Name: (string(yyDollar[2].str)), String: yyDollar[4].str, CaseSensitive: true} } yyVAL.union = yyLOCAL - case 469: + case 470: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2733 +//line sql.y:2736 { yyLOCAL = &TableOption{Name: string(yyDollar[2].str), String: yyDollar[4].str, CaseSensitive: true} } yyVAL.union = yyLOCAL - case 470: + case 471: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2737 +//line sql.y:2740 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 471: + case 472: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2741 +//line sql.y:2744 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 472: + case 473: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2745 +//line sql.y:2748 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 473: + case 474: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2749 +//line sql.y:2752 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 474: + case 475: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2753 +//line sql.y:2756 { yyLOCAL = &TableOption{Name: (string(yyDollar[1].str) + " " + string(yyDollar[2].str)), Value: NewStrLiteral(yyDollar[4].str)} } yyVAL.union = yyLOCAL - case 475: + case 476: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2757 +//line sql.y:2760 { yyLOCAL = &TableOption{Name: (string(yyDollar[1].str) + " " + string(yyDollar[2].str)), Value: NewStrLiteral(yyDollar[4].str)} } yyVAL.union = yyLOCAL - case 476: + case 477: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2761 +//line sql.y:2764 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 477: + case 478: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2765 +//line sql.y:2768 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 478: + case 479: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2769 +//line sql.y:2772 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: yyDollar[3].identifierCS.String(), CaseSensitive: true} } yyVAL.union = yyLOCAL - case 479: + case 480: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2773 +//line sql.y:2776 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 480: + case 481: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2777 +//line sql.y:2780 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: string(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 481: + case 482: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2781 +//line sql.y:2784 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 482: + case 483: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2785 +//line sql.y:2788 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 483: + case 484: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2789 +//line sql.y:2792 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 484: + case 485: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2793 +//line sql.y:2796 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 485: + case 486: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2797 +//line sql.y:2800 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: string(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 486: + case 487: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2801 +//line sql.y:2804 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 487: + case 488: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2805 +//line sql.y:2808 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: string(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 488: + case 489: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2809 +//line sql.y:2812 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 489: + case 490: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2813 +//line sql.y:2816 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 490: + case 491: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2817 +//line sql.y:2820 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: string(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 491: + case 492: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2821 +//line sql.y:2824 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 492: + case 493: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2825 +//line sql.y:2828 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: string(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 493: + case 494: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2829 +//line sql.y:2832 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 494: + case 495: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2833 +//line sql.y:2836 { - yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: (yyDollar[3].identifierCI.String() + yyDollar[4].str)} + yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: (yyDollar[3].identifierCI.String() + yyDollar[4].str), CaseSensitive: true} } yyVAL.union = yyLOCAL - case 495: + case 496: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2837 +//line sql.y:2840 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Tables: yyDollar[4].tableNamesUnion()} } yyVAL.union = yyLOCAL - case 496: + case 497: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2842 +//line sql.y:2845 { yyVAL.str = "" } - case 497: + case 498: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2846 +//line sql.y:2849 { yyVAL.str = " " + string(yyDollar[1].str) + " " + string(yyDollar[2].str) } - case 498: + case 499: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2850 +//line sql.y:2853 { yyVAL.str = " " + string(yyDollar[1].str) + " " + string(yyDollar[2].str) } - case 508: + case 509: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2869 +//line sql.y:2872 { yyVAL.str = String(TableName{Qualifier: yyDollar[1].identifierCS, Name: yyDollar[3].identifierCS}) } - case 509: + case 510: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2873 +//line sql.y:2876 { yyVAL.str = yyDollar[1].identifierCI.String() } - case 510: + case 511: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2877 +//line sql.y:2880 { yyVAL.str = encodeSQLString(yyDollar[1].str) } - case 511: + case 512: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2881 +//line sql.y:2884 { yyVAL.str = string(yyDollar[1].str) } - case 512: + case 513: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2886 +//line sql.y:2889 { yyVAL.str = "" } - case 514: + case 515: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:2892 +//line sql.y:2895 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 515: + case 516: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:2896 +//line sql.y:2899 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 516: + case 517: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *ColName -//line sql.y:2901 +//line sql.y:2904 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 517: + case 518: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ColName -//line sql.y:2905 +//line sql.y:2908 { yyLOCAL = yyDollar[2].colNameUnion() } yyVAL.union = yyLOCAL - case 518: + case 519: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2910 +//line sql.y:2913 { yyVAL.str = "" } - case 519: + case 520: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2914 +//line sql.y:2917 { yyVAL.str = string(yyDollar[2].str) } - case 520: + case 521: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *Literal -//line sql.y:2919 +//line sql.y:2922 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 521: + case 522: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *Literal -//line sql.y:2923 +//line sql.y:2926 { yyLOCAL = NewIntLiteral(yyDollar[2].str) } yyVAL.union = yyLOCAL - case 522: + case 523: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *Literal -//line sql.y:2927 +//line sql.y:2930 { yyLOCAL = NewDecimalLiteral(yyDollar[2].str) } yyVAL.union = yyLOCAL - case 523: + case 524: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL []AlterOption -//line sql.y:2932 +//line sql.y:2935 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 524: + case 525: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []AlterOption -//line sql.y:2936 +//line sql.y:2939 { yyLOCAL = yyDollar[1].alterOptionsUnion() } yyVAL.union = yyLOCAL - case 525: + case 526: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2940 +//line sql.y:2943 { yySLICE := (*[]AlterOption)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, &OrderByOption{Cols: yyDollar[5].columnsUnion()}) } - case 526: + case 527: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []AlterOption -//line sql.y:2944 +//line sql.y:2947 { yyLOCAL = yyDollar[1].alterOptionsUnion() } yyVAL.union = yyLOCAL - case 527: + case 528: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2948 +//line sql.y:2951 { yySLICE := (*[]AlterOption)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].alterOptionsUnion()...) } - case 528: + case 529: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL []AlterOption -//line sql.y:2952 +//line sql.y:2955 { yyLOCAL = append(append(yyDollar[1].alterOptionsUnion(), yyDollar[3].alterOptionsUnion()...), &OrderByOption{Cols: yyDollar[7].columnsUnion()}) } yyVAL.union = yyLOCAL - case 529: + case 530: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []AlterOption -//line sql.y:2958 +//line sql.y:2961 { yyLOCAL = []AlterOption{yyDollar[1].alterOptionUnion()} } yyVAL.union = yyLOCAL - case 530: + case 531: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2962 +//line sql.y:2965 { yySLICE := (*[]AlterOption)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].alterOptionUnion()) } - case 531: + case 532: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2966 +//line sql.y:2969 { yySLICE := (*[]AlterOption)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].alterOptionUnion()) } - case 532: + case 533: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL AlterOption -//line sql.y:2972 +//line sql.y:2975 { yyLOCAL = yyDollar[1].tableOptionsUnion() } yyVAL.union = yyLOCAL - case 533: + case 534: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL AlterOption -//line sql.y:2976 +//line sql.y:2979 { yyLOCAL = &AddConstraintDefinition{ConstraintDefinition: yyDollar[2].constraintDefinitionUnion()} } yyVAL.union = yyLOCAL - case 534: + case 535: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL AlterOption -//line sql.y:2980 +//line sql.y:2983 { yyLOCAL = &AddConstraintDefinition{ConstraintDefinition: yyDollar[2].constraintDefinitionUnion()} } yyVAL.union = yyLOCAL - case 535: + case 536: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL AlterOption -//line sql.y:2984 +//line sql.y:2987 { yyLOCAL = &AddIndexDefinition{IndexDefinition: yyDollar[2].indexDefinitionUnion()} } yyVAL.union = yyLOCAL - case 536: + case 537: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL AlterOption -//line sql.y:2988 +//line sql.y:2991 { yyLOCAL = &AddColumns{Columns: yyDollar[4].columnDefinitionsUnion()} } yyVAL.union = yyLOCAL - case 537: + case 538: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL AlterOption -//line sql.y:2992 +//line sql.y:2995 { yyLOCAL = &AddColumns{Columns: []*ColumnDefinition{yyDollar[3].columnDefinitionUnion()}, First: yyDollar[4].booleanUnion(), After: yyDollar[5].colNameUnion()} } yyVAL.union = yyLOCAL - case 538: + case 539: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL AlterOption -//line sql.y:2996 +//line sql.y:2999 { yyLOCAL = &AlterColumn{Column: yyDollar[3].colNameUnion(), DropDefault: true} } yyVAL.union = yyLOCAL - case 539: + case 540: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3000 +//line sql.y:3003 { - yyLOCAL = &AlterColumn{Column: yyDollar[3].colNameUnion(), DropDefault: false, DefaultVal: yyDollar[6].exprUnion()} + yyLOCAL = &AlterColumn{Column: yyDollar[3].colNameUnion(), DropDefault: false, DefaultVal: yyDollar[6].exprUnion(), DefaultLiteral: true} } yyVAL.union = yyLOCAL - case 540: + case 541: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3004 +//line sql.y:3007 { yyLOCAL = &AlterColumn{Column: yyDollar[3].colNameUnion(), DropDefault: false, DefaultVal: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 541: + case 542: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3008 +//line sql.y:3011 { val := false yyLOCAL = &AlterColumn{Column: yyDollar[3].colNameUnion(), Invisible: &val} } yyVAL.union = yyLOCAL - case 542: + case 543: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3013 +//line sql.y:3016 { val := true yyLOCAL = &AlterColumn{Column: yyDollar[3].colNameUnion(), Invisible: &val} } yyVAL.union = yyLOCAL - case 543: + case 544: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3018 +//line sql.y:3021 { yyLOCAL = &AlterCheck{Name: yyDollar[3].identifierCI, Enforced: yyDollar[4].booleanUnion()} } yyVAL.union = yyLOCAL - case 544: + case 545: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3022 +//line sql.y:3025 { yyLOCAL = &AlterIndex{Name: yyDollar[3].identifierCI, Invisible: false} } yyVAL.union = yyLOCAL - case 545: + case 546: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3026 +//line sql.y:3029 { yyLOCAL = &AlterIndex{Name: yyDollar[3].identifierCI, Invisible: true} } yyVAL.union = yyLOCAL - case 546: + case 547: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3030 +//line sql.y:3033 { yyLOCAL = &ChangeColumn{OldColumn: yyDollar[3].colNameUnion(), NewColDefinition: yyDollar[4].columnDefinitionUnion(), First: yyDollar[5].booleanUnion(), After: yyDollar[6].colNameUnion()} } yyVAL.union = yyLOCAL - case 547: + case 548: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3034 +//line sql.y:3037 { yyLOCAL = &ModifyColumn{NewColDefinition: yyDollar[3].columnDefinitionUnion(), First: yyDollar[4].booleanUnion(), After: yyDollar[5].colNameUnion()} } yyVAL.union = yyLOCAL - case 548: + case 549: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3038 +//line sql.y:3041 { yyLOCAL = &RenameColumn{OldName: yyDollar[3].colNameUnion(), NewName: yyDollar[5].colNameUnion()} } yyVAL.union = yyLOCAL - case 549: + case 550: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3042 +//line sql.y:3045 { yyLOCAL = &AlterCharset{CharacterSet: yyDollar[4].str, Collate: yyDollar[5].str} } yyVAL.union = yyLOCAL - case 550: + case 551: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3046 +//line sql.y:3049 { yyLOCAL = &KeyState{Enable: false} } yyVAL.union = yyLOCAL - case 551: + case 552: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3050 +//line sql.y:3053 { yyLOCAL = &KeyState{Enable: true} } yyVAL.union = yyLOCAL - case 552: + case 553: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3054 +//line sql.y:3057 { yyLOCAL = &TablespaceOperation{Import: false} } yyVAL.union = yyLOCAL - case 553: + case 554: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3058 +//line sql.y:3061 { yyLOCAL = &TablespaceOperation{Import: true} } yyVAL.union = yyLOCAL - case 554: + case 555: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3062 +//line sql.y:3065 { yyLOCAL = &DropColumn{Name: yyDollar[3].colNameUnion()} } yyVAL.union = yyLOCAL - case 555: + case 556: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3066 +//line sql.y:3069 { yyLOCAL = &DropKey{Type: NormalKeyType, Name: yyDollar[3].identifierCI} } yyVAL.union = yyLOCAL - case 556: + case 557: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3070 +//line sql.y:3073 { yyLOCAL = &DropKey{Type: PrimaryKeyType} } yyVAL.union = yyLOCAL - case 557: + case 558: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3074 +//line sql.y:3077 { yyLOCAL = &DropKey{Type: ForeignKeyType, Name: yyDollar[4].identifierCI} } yyVAL.union = yyLOCAL - case 558: + case 559: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3078 +//line sql.y:3081 { yyLOCAL = &DropKey{Type: CheckKeyType, Name: yyDollar[3].identifierCI} } yyVAL.union = yyLOCAL - case 559: + case 560: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3082 +//line sql.y:3085 { yyLOCAL = &DropKey{Type: CheckKeyType, Name: yyDollar[3].identifierCI} } yyVAL.union = yyLOCAL - case 560: + case 561: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3086 +//line sql.y:3089 { yyLOCAL = &Force{} } yyVAL.union = yyLOCAL - case 561: + case 562: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3090 +//line sql.y:3093 { yyLOCAL = &RenameTableName{Table: yyDollar[3].tableName} } yyVAL.union = yyLOCAL - case 562: + case 563: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3094 +//line sql.y:3097 { yyLOCAL = &RenameIndex{OldName: yyDollar[3].identifierCI, NewName: yyDollar[5].identifierCI} } yyVAL.union = yyLOCAL - case 563: + case 564: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []AlterOption -//line sql.y:3100 +//line sql.y:3103 { yyLOCAL = []AlterOption{yyDollar[1].alterOptionUnion()} } yyVAL.union = yyLOCAL - case 564: + case 565: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3104 +//line sql.y:3107 { yySLICE := (*[]AlterOption)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].alterOptionUnion()) } - case 565: + case 566: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3110 +//line sql.y:3113 { yyLOCAL = AlgorithmValue(string(yyDollar[3].str)) } yyVAL.union = yyLOCAL - case 566: + case 567: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3114 +//line sql.y:3117 { yyLOCAL = AlgorithmValue(string(yyDollar[3].str)) } yyVAL.union = yyLOCAL - case 567: + case 568: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3118 +//line sql.y:3121 { yyLOCAL = AlgorithmValue(string(yyDollar[3].str)) } yyVAL.union = yyLOCAL - case 568: + case 569: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3122 +//line sql.y:3125 { yyLOCAL = AlgorithmValue(string(yyDollar[3].str)) } yyVAL.union = yyLOCAL - case 569: + case 570: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3126 +//line sql.y:3129 { yyLOCAL = &LockOption{Type: DefaultType} } yyVAL.union = yyLOCAL - case 570: + case 571: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3130 +//line sql.y:3133 { yyLOCAL = &LockOption{Type: NoneType} } yyVAL.union = yyLOCAL - case 571: + case 572: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3134 +//line sql.y:3137 { yyLOCAL = &LockOption{Type: SharedType} } yyVAL.union = yyLOCAL - case 572: + case 573: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3138 +//line sql.y:3141 { yyLOCAL = &LockOption{Type: ExclusiveType} } yyVAL.union = yyLOCAL - case 573: + case 574: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3142 +//line sql.y:3145 { yyLOCAL = &Validation{With: true} } yyVAL.union = yyLOCAL - case 574: + case 575: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3146 +//line sql.y:3149 { yyLOCAL = &Validation{With: false} } yyVAL.union = yyLOCAL - case 575: + case 576: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:3152 +//line sql.y:3155 { yyDollar[1].alterTableUnion().FullyParsed = true yyDollar[1].alterTableUnion().AlterOptions = yyDollar[2].alterOptionsUnion() @@ -13969,10 +14097,10 @@ yydefault: yyLOCAL = yyDollar[1].alterTableUnion() } yyVAL.union = yyLOCAL - case 576: + case 577: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:3159 +//line sql.y:3162 { yyDollar[1].alterTableUnion().FullyParsed = true yyDollar[1].alterTableUnion().AlterOptions = yyDollar[2].alterOptionsUnion() @@ -13980,10 +14108,10 @@ yydefault: yyLOCAL = yyDollar[1].alterTableUnion() } yyVAL.union = yyLOCAL - case 577: + case 578: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:3166 +//line sql.y:3169 { yyDollar[1].alterTableUnion().FullyParsed = true yyDollar[1].alterTableUnion().AlterOptions = yyDollar[2].alterOptionsUnion() @@ -13991,28 +14119,28 @@ yydefault: yyLOCAL = yyDollar[1].alterTableUnion() } yyVAL.union = yyLOCAL - case 578: + case 579: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:3173 +//line sql.y:3176 { yyDollar[1].alterTableUnion().FullyParsed = true yyDollar[1].alterTableUnion().PartitionSpec = yyDollar[2].partSpecUnion() yyLOCAL = yyDollar[1].alterTableUnion() } yyVAL.union = yyLOCAL - case 579: + case 580: yyDollar = yyS[yypt-11 : yypt+1] var yyLOCAL Statement -//line sql.y:3179 +//line sql.y:3182 { yyLOCAL = &AlterView{ViewName: yyDollar[7].tableName, Comments: Comments(yyDollar[2].strs).Parsed(), Algorithm: yyDollar[3].str, Definer: yyDollar[4].definerUnion(), Security: yyDollar[5].str, Columns: yyDollar[8].columnsUnion(), Select: yyDollar[10].selStmtUnion(), CheckOption: yyDollar[11].str} } yyVAL.union = yyLOCAL - case 580: + case 581: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:3189 +//line sql.y:3192 { yyDollar[1].alterDatabaseUnion().FullyParsed = true yyDollar[1].alterDatabaseUnion().DBName = yyDollar[2].identifierCS @@ -14020,10 +14148,10 @@ yydefault: yyLOCAL = yyDollar[1].alterDatabaseUnion() } yyVAL.union = yyLOCAL - case 581: + case 582: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Statement -//line sql.y:3196 +//line sql.y:3199 { yyDollar[1].alterDatabaseUnion().FullyParsed = true yyDollar[1].alterDatabaseUnion().DBName = yyDollar[2].identifierCS @@ -14031,10 +14159,10 @@ yydefault: yyLOCAL = yyDollar[1].alterDatabaseUnion() } yyVAL.union = yyLOCAL - case 582: + case 583: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Statement -//line sql.y:3203 +//line sql.y:3206 { yyLOCAL = &AlterVschema{ Action: CreateVindexDDLAction, @@ -14047,10 +14175,10 @@ yydefault: } } yyVAL.union = yyLOCAL - case 583: + case 584: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Statement -//line sql.y:3215 +//line sql.y:3218 { yyLOCAL = &AlterVschema{ Action: DropVindexDDLAction, @@ -14061,26 +14189,26 @@ yydefault: } } yyVAL.union = yyLOCAL - case 584: + case 585: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Statement -//line sql.y:3225 +//line sql.y:3228 { yyLOCAL = &AlterVschema{Action: AddVschemaTableDDLAction, Table: yyDollar[6].tableName} } yyVAL.union = yyLOCAL - case 585: + case 586: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Statement -//line sql.y:3229 +//line sql.y:3232 { yyLOCAL = &AlterVschema{Action: DropVschemaTableDDLAction, Table: yyDollar[6].tableName} } yyVAL.union = yyLOCAL - case 586: + case 587: yyDollar = yyS[yypt-13 : yypt+1] var yyLOCAL Statement -//line sql.y:3233 +//line sql.y:3236 { yyLOCAL = &AlterVschema{ Action: AddColVindexDDLAction, @@ -14094,10 +14222,10 @@ yydefault: } } yyVAL.union = yyLOCAL - case 587: + case 588: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Statement -//line sql.y:3246 +//line sql.y:3249 { yyLOCAL = &AlterVschema{ Action: DropColVindexDDLAction, @@ -14108,18 +14236,26 @@ yydefault: } } yyVAL.union = yyLOCAL - case 588: + case 589: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Statement -//line sql.y:3256 +//line sql.y:3259 { yyLOCAL = &AlterVschema{Action: AddSequenceDDLAction, Table: yyDollar[6].tableName} } yyVAL.union = yyLOCAL - case 589: + case 590: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Statement +//line sql.y:3263 + { + yyLOCAL = &AlterVschema{Action: DropSequenceDDLAction, Table: yyDollar[6].tableName} + } + yyVAL.union = yyLOCAL + case 591: yyDollar = yyS[yypt-10 : yypt+1] var yyLOCAL Statement -//line sql.y:3260 +//line sql.y:3267 { yyLOCAL = &AlterVschema{ Action: AddAutoIncDDLAction, @@ -14131,10 +14267,21 @@ yydefault: } } yyVAL.union = yyLOCAL - case 590: + case 592: + yyDollar = yyS[yypt-7 : yypt+1] + var yyLOCAL Statement +//line sql.y:3278 + { + yyLOCAL = &AlterVschema{ + Action: DropAutoIncDDLAction, + Table: yyDollar[5].tableName, + } + } + yyVAL.union = yyLOCAL + case 593: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3271 +//line sql.y:3285 { yyLOCAL = &AlterMigration{ Type: RetryMigrationType, @@ -14142,10 +14289,10 @@ yydefault: } } yyVAL.union = yyLOCAL - case 591: + case 594: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3278 +//line sql.y:3292 { yyLOCAL = &AlterMigration{ Type: CleanupMigrationType, @@ -14153,10 +14300,10 @@ yydefault: } } yyVAL.union = yyLOCAL - case 592: + case 595: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3285 +//line sql.y:3299 { yyLOCAL = &AlterMigration{ Type: LaunchMigrationType, @@ -14164,10 +14311,10 @@ yydefault: } } yyVAL.union = yyLOCAL - case 593: + case 596: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Statement -//line sql.y:3292 +//line sql.y:3306 { yyLOCAL = &AlterMigration{ Type: LaunchMigrationType, @@ -14176,20 +14323,20 @@ yydefault: } } yyVAL.union = yyLOCAL - case 594: + case 597: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3300 +//line sql.y:3314 { yyLOCAL = &AlterMigration{ Type: LaunchAllMigrationType, } } yyVAL.union = yyLOCAL - case 595: + case 598: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3306 +//line sql.y:3320 { yyLOCAL = &AlterMigration{ Type: CompleteMigrationType, @@ -14197,20 +14344,20 @@ yydefault: } } yyVAL.union = yyLOCAL - case 596: + case 599: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3313 +//line sql.y:3327 { yyLOCAL = &AlterMigration{ Type: CompleteAllMigrationType, } } yyVAL.union = yyLOCAL - case 597: + case 600: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3319 +//line sql.y:3333 { yyLOCAL = &AlterMigration{ Type: CancelMigrationType, @@ -14218,20 +14365,20 @@ yydefault: } } yyVAL.union = yyLOCAL - case 598: + case 601: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3326 +//line sql.y:3340 { yyLOCAL = &AlterMigration{ Type: CancelAllMigrationType, } } yyVAL.union = yyLOCAL - case 599: + case 602: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Statement -//line sql.y:3332 +//line sql.y:3346 { yyLOCAL = &AlterMigration{ Type: ThrottleMigrationType, @@ -14241,10 +14388,10 @@ yydefault: } } yyVAL.union = yyLOCAL - case 600: + case 603: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Statement -//line sql.y:3341 +//line sql.y:3355 { yyLOCAL = &AlterMigration{ Type: ThrottleAllMigrationType, @@ -14253,10 +14400,10 @@ yydefault: } } yyVAL.union = yyLOCAL - case 601: + case 604: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3349 +//line sql.y:3363 { yyLOCAL = &AlterMigration{ Type: UnthrottleMigrationType, @@ -14264,28 +14411,28 @@ yydefault: } } yyVAL.union = yyLOCAL - case 602: + case 605: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3356 +//line sql.y:3370 { yyLOCAL = &AlterMigration{ Type: UnthrottleAllMigrationType, } } yyVAL.union = yyLOCAL - case 603: + case 606: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *PartitionOption -//line sql.y:3363 +//line sql.y:3377 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 604: + case 607: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL *PartitionOption -//line sql.y:3367 +//line sql.y:3381 { yyDollar[3].partitionOptionUnion().Partitions = yyDollar[4].integerUnion() yyDollar[3].partitionOptionUnion().SubPartition = yyDollar[5].subPartitionUnion() @@ -14293,10 +14440,10 @@ yydefault: yyLOCAL = yyDollar[3].partitionOptionUnion() } yyVAL.union = yyLOCAL - case 605: + case 608: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *PartitionOption -//line sql.y:3376 +//line sql.y:3390 { yyLOCAL = &PartitionOption{ IsLinear: yyDollar[1].booleanUnion(), @@ -14305,10 +14452,10 @@ yydefault: } } yyVAL.union = yyLOCAL - case 606: + case 609: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL *PartitionOption -//line sql.y:3384 +//line sql.y:3398 { yyLOCAL = &PartitionOption{ IsLinear: yyDollar[1].booleanUnion(), @@ -14318,10 +14465,10 @@ yydefault: } } yyVAL.union = yyLOCAL - case 607: + case 610: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *PartitionOption -//line sql.y:3393 +//line sql.y:3407 { yyLOCAL = &PartitionOption{ Type: yyDollar[1].partitionByTypeUnion(), @@ -14329,10 +14476,10 @@ yydefault: } } yyVAL.union = yyLOCAL - case 608: + case 611: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *PartitionOption -//line sql.y:3400 +//line sql.y:3414 { yyLOCAL = &PartitionOption{ Type: yyDollar[1].partitionByTypeUnion(), @@ -14340,18 +14487,18 @@ yydefault: } } yyVAL.union = yyLOCAL - case 609: + case 612: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *SubPartition -//line sql.y:3408 +//line sql.y:3422 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 610: + case 613: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL *SubPartition -//line sql.y:3412 +//line sql.y:3426 { yyLOCAL = &SubPartition{ IsLinear: yyDollar[3].booleanUnion(), @@ -14361,10 +14508,10 @@ yydefault: } } yyVAL.union = yyLOCAL - case 611: + case 614: yyDollar = yyS[yypt-9 : yypt+1] var yyLOCAL *SubPartition -//line sql.y:3421 +//line sql.y:3435 { yyLOCAL = &SubPartition{ IsLinear: yyDollar[3].booleanUnion(), @@ -14375,682 +14522,682 @@ yydefault: } } yyVAL.union = yyLOCAL - case 612: + case 615: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL []*PartitionDefinition -//line sql.y:3432 +//line sql.y:3446 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 613: + case 616: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL []*PartitionDefinition -//line sql.y:3436 +//line sql.y:3450 { yyLOCAL = yyDollar[2].partDefsUnion() } yyVAL.union = yyLOCAL - case 614: + case 617: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:3441 +//line sql.y:3455 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 615: + case 618: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:3445 +//line sql.y:3459 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 616: + case 619: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL int -//line sql.y:3450 +//line sql.y:3464 { yyLOCAL = 0 } yyVAL.union = yyLOCAL - case 617: + case 620: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL int -//line sql.y:3454 +//line sql.y:3468 { yyLOCAL = convertStringToInt(yyDollar[3].str) } yyVAL.union = yyLOCAL - case 618: + case 621: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL TableExpr -//line sql.y:3460 +//line sql.y:3474 { yyLOCAL = &JSONTableExpr{Expr: yyDollar[3].exprUnion(), Filter: yyDollar[5].exprUnion(), Columns: yyDollar[6].jtColumnListUnion(), Alias: yyDollar[8].identifierCS} } yyVAL.union = yyLOCAL - case 619: + case 622: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL []*JtColumnDefinition -//line sql.y:3466 +//line sql.y:3480 { yyLOCAL = yyDollar[3].jtColumnListUnion() } yyVAL.union = yyLOCAL - case 620: + case 623: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*JtColumnDefinition -//line sql.y:3472 +//line sql.y:3486 { yyLOCAL = []*JtColumnDefinition{yyDollar[1].jtColumnDefinitionUnion()} } yyVAL.union = yyLOCAL - case 621: + case 624: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3476 +//line sql.y:3490 { yySLICE := (*[]*JtColumnDefinition)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].jtColumnDefinitionUnion()) } - case 622: + case 625: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *JtColumnDefinition -//line sql.y:3482 +//line sql.y:3496 { yyLOCAL = &JtColumnDefinition{JtOrdinal: &JtOrdinalColDef{Name: yyDollar[1].identifierCI}} } yyVAL.union = yyLOCAL - case 623: + case 626: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL *JtColumnDefinition -//line sql.y:3486 +//line sql.y:3500 { yyDollar[2].columnType.Options = &ColumnTypeOptions{Collate: yyDollar[3].str} jtPath := &JtPathColDef{Name: yyDollar[1].identifierCI, Type: yyDollar[2].columnType, JtColExists: yyDollar[4].booleanUnion(), Path: yyDollar[6].exprUnion()} yyLOCAL = &JtColumnDefinition{JtPath: jtPath} } yyVAL.union = yyLOCAL - case 624: + case 627: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL *JtColumnDefinition -//line sql.y:3492 +//line sql.y:3506 { yyDollar[2].columnType.Options = &ColumnTypeOptions{Collate: yyDollar[3].str} jtPath := &JtPathColDef{Name: yyDollar[1].identifierCI, Type: yyDollar[2].columnType, JtColExists: yyDollar[4].booleanUnion(), Path: yyDollar[6].exprUnion(), EmptyOnResponse: yyDollar[7].jtOnResponseUnion()} yyLOCAL = &JtColumnDefinition{JtPath: jtPath} } yyVAL.union = yyLOCAL - case 625: + case 628: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL *JtColumnDefinition -//line sql.y:3498 +//line sql.y:3512 { yyDollar[2].columnType.Options = &ColumnTypeOptions{Collate: yyDollar[3].str} jtPath := &JtPathColDef{Name: yyDollar[1].identifierCI, Type: yyDollar[2].columnType, JtColExists: yyDollar[4].booleanUnion(), Path: yyDollar[6].exprUnion(), ErrorOnResponse: yyDollar[7].jtOnResponseUnion()} yyLOCAL = &JtColumnDefinition{JtPath: jtPath} } yyVAL.union = yyLOCAL - case 626: + case 629: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL *JtColumnDefinition -//line sql.y:3504 +//line sql.y:3518 { yyDollar[2].columnType.Options = &ColumnTypeOptions{Collate: yyDollar[3].str} jtPath := &JtPathColDef{Name: yyDollar[1].identifierCI, Type: yyDollar[2].columnType, JtColExists: yyDollar[4].booleanUnion(), Path: yyDollar[6].exprUnion(), EmptyOnResponse: yyDollar[7].jtOnResponseUnion(), ErrorOnResponse: yyDollar[8].jtOnResponseUnion()} yyLOCAL = &JtColumnDefinition{JtPath: jtPath} } yyVAL.union = yyLOCAL - case 627: + case 630: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *JtColumnDefinition -//line sql.y:3510 +//line sql.y:3524 { jtNestedPath := &JtNestedPathColDef{Path: yyDollar[3].exprUnion(), Columns: yyDollar[4].jtColumnListUnion()} yyLOCAL = &JtColumnDefinition{JtNestedPath: jtNestedPath} } yyVAL.union = yyLOCAL - case 628: + case 631: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:3516 +//line sql.y:3530 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 629: + case 632: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:3520 +//line sql.y:3534 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 630: + case 633: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:3524 +//line sql.y:3538 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 631: + case 634: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:3528 +//line sql.y:3542 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 632: + case 635: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *JtOnResponse -//line sql.y:3534 +//line sql.y:3548 { yyLOCAL = yyDollar[1].jtOnResponseUnion() } yyVAL.union = yyLOCAL - case 633: + case 636: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *JtOnResponse -//line sql.y:3540 +//line sql.y:3554 { yyLOCAL = yyDollar[1].jtOnResponseUnion() } yyVAL.union = yyLOCAL - case 634: + case 637: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *JtOnResponse -//line sql.y:3546 +//line sql.y:3560 { yyLOCAL = &JtOnResponse{ResponseType: ErrorJSONType} } yyVAL.union = yyLOCAL - case 635: + case 638: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *JtOnResponse -//line sql.y:3550 +//line sql.y:3564 { yyLOCAL = &JtOnResponse{ResponseType: NullJSONType} } yyVAL.union = yyLOCAL - case 636: + case 639: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *JtOnResponse -//line sql.y:3554 +//line sql.y:3568 { yyLOCAL = &JtOnResponse{ResponseType: DefaultJSONType, Expr: yyDollar[2].exprUnion()} } yyVAL.union = yyLOCAL - case 637: + case 640: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL PartitionByType -//line sql.y:3560 +//line sql.y:3574 { yyLOCAL = RangeType } yyVAL.union = yyLOCAL - case 638: + case 641: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL PartitionByType -//line sql.y:3564 +//line sql.y:3578 { yyLOCAL = ListType } yyVAL.union = yyLOCAL - case 639: + case 642: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL int -//line sql.y:3569 +//line sql.y:3583 { yyLOCAL = -1 } yyVAL.union = yyLOCAL - case 640: + case 643: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL int -//line sql.y:3573 +//line sql.y:3587 { yyLOCAL = convertStringToInt(yyDollar[2].str) } yyVAL.union = yyLOCAL - case 641: + case 644: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL int -//line sql.y:3578 +//line sql.y:3592 { yyLOCAL = -1 } yyVAL.union = yyLOCAL - case 642: + case 645: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL int -//line sql.y:3582 +//line sql.y:3596 { yyLOCAL = convertStringToInt(yyDollar[2].str) } yyVAL.union = yyLOCAL - case 643: + case 646: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3588 +//line sql.y:3602 { yyLOCAL = &PartitionSpec{Action: AddAction, Definitions: []*PartitionDefinition{yyDollar[4].partDefUnion()}} } yyVAL.union = yyLOCAL - case 644: + case 647: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3592 +//line sql.y:3606 { yyLOCAL = &PartitionSpec{Action: DropAction, Names: yyDollar[3].partitionsUnion()} } yyVAL.union = yyLOCAL - case 645: + case 648: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3596 +//line sql.y:3610 { yyLOCAL = &PartitionSpec{Action: ReorganizeAction, Names: yyDollar[3].partitionsUnion(), Definitions: yyDollar[6].partDefsUnion()} } yyVAL.union = yyLOCAL - case 646: + case 649: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3600 +//line sql.y:3614 { yyLOCAL = &PartitionSpec{Action: DiscardAction, Names: yyDollar[3].partitionsUnion()} } yyVAL.union = yyLOCAL - case 647: + case 650: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3604 +//line sql.y:3618 { yyLOCAL = &PartitionSpec{Action: DiscardAction, IsAll: true} } yyVAL.union = yyLOCAL - case 648: + case 651: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3608 +//line sql.y:3622 { yyLOCAL = &PartitionSpec{Action: ImportAction, Names: yyDollar[3].partitionsUnion()} } yyVAL.union = yyLOCAL - case 649: + case 652: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3612 +//line sql.y:3626 { yyLOCAL = &PartitionSpec{Action: ImportAction, IsAll: true} } yyVAL.union = yyLOCAL - case 650: + case 653: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3616 +//line sql.y:3630 { yyLOCAL = &PartitionSpec{Action: TruncateAction, Names: yyDollar[3].partitionsUnion()} } yyVAL.union = yyLOCAL - case 651: + case 654: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3620 +//line sql.y:3634 { yyLOCAL = &PartitionSpec{Action: TruncateAction, IsAll: true} } yyVAL.union = yyLOCAL - case 652: + case 655: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3624 +//line sql.y:3638 { yyLOCAL = &PartitionSpec{Action: CoalesceAction, Number: NewIntLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 653: + case 656: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3628 +//line sql.y:3642 { yyLOCAL = &PartitionSpec{Action: ExchangeAction, Names: Partitions{yyDollar[3].identifierCI}, TableName: yyDollar[6].tableName, WithoutValidation: yyDollar[7].booleanUnion()} } yyVAL.union = yyLOCAL - case 654: + case 657: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3632 +//line sql.y:3646 { yyLOCAL = &PartitionSpec{Action: AnalyzeAction, Names: yyDollar[3].partitionsUnion()} } yyVAL.union = yyLOCAL - case 655: + case 658: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3636 +//line sql.y:3650 { yyLOCAL = &PartitionSpec{Action: AnalyzeAction, IsAll: true} } yyVAL.union = yyLOCAL - case 656: + case 659: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3640 +//line sql.y:3654 { yyLOCAL = &PartitionSpec{Action: CheckAction, Names: yyDollar[3].partitionsUnion()} } yyVAL.union = yyLOCAL - case 657: + case 660: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3644 +//line sql.y:3658 { yyLOCAL = &PartitionSpec{Action: CheckAction, IsAll: true} } yyVAL.union = yyLOCAL - case 658: + case 661: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3648 +//line sql.y:3662 { yyLOCAL = &PartitionSpec{Action: OptimizeAction, Names: yyDollar[3].partitionsUnion()} } yyVAL.union = yyLOCAL - case 659: + case 662: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3652 +//line sql.y:3666 { yyLOCAL = &PartitionSpec{Action: OptimizeAction, IsAll: true} } yyVAL.union = yyLOCAL - case 660: + case 663: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3656 +//line sql.y:3670 { yyLOCAL = &PartitionSpec{Action: RebuildAction, Names: yyDollar[3].partitionsUnion()} } yyVAL.union = yyLOCAL - case 661: + case 664: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3660 +//line sql.y:3674 { yyLOCAL = &PartitionSpec{Action: RebuildAction, IsAll: true} } yyVAL.union = yyLOCAL - case 662: + case 665: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3664 +//line sql.y:3678 { yyLOCAL = &PartitionSpec{Action: RepairAction, Names: yyDollar[3].partitionsUnion()} } yyVAL.union = yyLOCAL - case 663: + case 666: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3668 +//line sql.y:3682 { yyLOCAL = &PartitionSpec{Action: RepairAction, IsAll: true} } yyVAL.union = yyLOCAL - case 664: + case 667: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3672 +//line sql.y:3686 { yyLOCAL = &PartitionSpec{Action: UpgradeAction} } yyVAL.union = yyLOCAL - case 665: + case 668: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:3677 +//line sql.y:3691 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 666: + case 669: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL bool -//line sql.y:3681 +//line sql.y:3695 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 667: + case 670: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL bool -//line sql.y:3685 +//line sql.y:3699 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 668: + case 671: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*PartitionDefinition -//line sql.y:3691 +//line sql.y:3705 { yyLOCAL = []*PartitionDefinition{yyDollar[1].partDefUnion()} } yyVAL.union = yyLOCAL - case 669: + case 672: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3695 +//line sql.y:3709 { yySLICE := (*[]*PartitionDefinition)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].partDefUnion()) } - case 670: + case 673: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3701 +//line sql.y:3715 { yyVAL.partDefUnion().Options = yyDollar[2].partitionDefinitionOptionsUnion() } - case 671: + case 674: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *PartitionDefinitionOptions -//line sql.y:3706 +//line sql.y:3720 { yyLOCAL = &PartitionDefinitionOptions{} } yyVAL.union = yyLOCAL - case 672: + case 675: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *PartitionDefinitionOptions -//line sql.y:3710 +//line sql.y:3724 { yyDollar[1].partitionDefinitionOptionsUnion().ValueRange = yyDollar[2].partitionValueRangeUnion() yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 673: + case 676: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *PartitionDefinitionOptions -//line sql.y:3715 +//line sql.y:3729 { yyDollar[1].partitionDefinitionOptionsUnion().Comment = yyDollar[2].literalUnion() yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 674: + case 677: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *PartitionDefinitionOptions -//line sql.y:3720 +//line sql.y:3734 { yyDollar[1].partitionDefinitionOptionsUnion().Engine = yyDollar[2].partitionEngineUnion() yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 675: + case 678: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *PartitionDefinitionOptions -//line sql.y:3725 +//line sql.y:3739 { yyDollar[1].partitionDefinitionOptionsUnion().DataDirectory = yyDollar[2].literalUnion() yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 676: + case 679: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *PartitionDefinitionOptions -//line sql.y:3730 +//line sql.y:3744 { yyDollar[1].partitionDefinitionOptionsUnion().IndexDirectory = yyDollar[2].literalUnion() yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 677: + case 680: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *PartitionDefinitionOptions -//line sql.y:3735 +//line sql.y:3749 { val := yyDollar[2].integerUnion() yyDollar[1].partitionDefinitionOptionsUnion().MaxRows = &val yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 678: + case 681: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *PartitionDefinitionOptions -//line sql.y:3741 +//line sql.y:3755 { val := yyDollar[2].integerUnion() yyDollar[1].partitionDefinitionOptionsUnion().MinRows = &val yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 679: + case 682: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *PartitionDefinitionOptions -//line sql.y:3747 +//line sql.y:3761 { yyDollar[1].partitionDefinitionOptionsUnion().TableSpace = yyDollar[2].str yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 680: + case 683: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *PartitionDefinitionOptions -//line sql.y:3752 +//line sql.y:3766 { yyDollar[1].partitionDefinitionOptionsUnion().SubPartitionDefinitions = yyDollar[2].subPartitionDefinitionsUnion() yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 681: + case 684: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SubPartitionDefinitions -//line sql.y:3758 +//line sql.y:3772 { yyLOCAL = yyDollar[2].subPartitionDefinitionsUnion() } yyVAL.union = yyLOCAL - case 682: + case 685: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL SubPartitionDefinitions -//line sql.y:3764 +//line sql.y:3778 { yyLOCAL = SubPartitionDefinitions{yyDollar[1].subPartitionDefinitionUnion()} } yyVAL.union = yyLOCAL - case 683: + case 686: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3768 +//line sql.y:3782 { yySLICE := (*SubPartitionDefinitions)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].subPartitionDefinitionUnion()) } - case 684: + case 687: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *SubPartitionDefinition -//line sql.y:3774 +//line sql.y:3788 { yyLOCAL = &SubPartitionDefinition{Name: yyDollar[2].identifierCI, Options: yyDollar[3].subPartitionDefinitionOptionsUnion()} } yyVAL.union = yyLOCAL - case 685: + case 688: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *SubPartitionDefinitionOptions -//line sql.y:3779 +//line sql.y:3793 { yyLOCAL = &SubPartitionDefinitionOptions{} } yyVAL.union = yyLOCAL - case 686: + case 689: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *SubPartitionDefinitionOptions -//line sql.y:3783 +//line sql.y:3797 { yyDollar[1].subPartitionDefinitionOptionsUnion().Comment = yyDollar[2].literalUnion() yyLOCAL = yyDollar[1].subPartitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 687: + case 690: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *SubPartitionDefinitionOptions -//line sql.y:3788 +//line sql.y:3802 { yyDollar[1].subPartitionDefinitionOptionsUnion().Engine = yyDollar[2].partitionEngineUnion() yyLOCAL = yyDollar[1].subPartitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 688: + case 691: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *SubPartitionDefinitionOptions -//line sql.y:3793 +//line sql.y:3807 { yyDollar[1].subPartitionDefinitionOptionsUnion().DataDirectory = yyDollar[2].literalUnion() yyLOCAL = yyDollar[1].subPartitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 689: + case 692: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *SubPartitionDefinitionOptions -//line sql.y:3798 +//line sql.y:3812 { yyDollar[1].subPartitionDefinitionOptionsUnion().IndexDirectory = yyDollar[2].literalUnion() yyLOCAL = yyDollar[1].subPartitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 690: + case 693: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *SubPartitionDefinitionOptions -//line sql.y:3803 +//line sql.y:3817 { val := yyDollar[2].integerUnion() yyDollar[1].subPartitionDefinitionOptionsUnion().MaxRows = &val yyLOCAL = yyDollar[1].subPartitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 691: + case 694: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *SubPartitionDefinitionOptions -//line sql.y:3809 +//line sql.y:3823 { val := yyDollar[2].integerUnion() yyDollar[1].subPartitionDefinitionOptionsUnion().MinRows = &val yyLOCAL = yyDollar[1].subPartitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 692: + case 695: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *SubPartitionDefinitionOptions -//line sql.y:3815 +//line sql.y:3829 { yyDollar[1].subPartitionDefinitionOptionsUnion().TableSpace = yyDollar[2].str yyLOCAL = yyDollar[1].subPartitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 693: + case 696: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *PartitionValueRange -//line sql.y:3822 +//line sql.y:3836 { yyLOCAL = &PartitionValueRange{ Type: LessThanType, @@ -15058,10 +15205,10 @@ yydefault: } } yyVAL.union = yyLOCAL - case 694: + case 697: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *PartitionValueRange -//line sql.y:3829 +//line sql.y:3843 { yyLOCAL = &PartitionValueRange{ Type: LessThanType, @@ -15069,10 +15216,10 @@ yydefault: } } yyVAL.union = yyLOCAL - case 695: + case 698: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionValueRange -//line sql.y:3836 +//line sql.y:3850 { yyLOCAL = &PartitionValueRange{ Type: InType, @@ -15080,131 +15227,131 @@ yydefault: } } yyVAL.union = yyLOCAL - case 696: + case 699: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:3844 +//line sql.y:3858 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 697: + case 700: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:3848 +//line sql.y:3862 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 698: + case 701: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *PartitionEngine -//line sql.y:3854 +//line sql.y:3868 { yyLOCAL = &PartitionEngine{Storage: yyDollar[1].booleanUnion(), Name: yyDollar[4].identifierCS.String()} } yyVAL.union = yyLOCAL - case 699: + case 702: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *Literal -//line sql.y:3860 +//line sql.y:3874 { yyLOCAL = NewStrLiteral(yyDollar[3].str) } yyVAL.union = yyLOCAL - case 700: + case 703: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *Literal -//line sql.y:3866 +//line sql.y:3880 { yyLOCAL = NewStrLiteral(yyDollar[4].str) } yyVAL.union = yyLOCAL - case 701: + case 704: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *Literal -//line sql.y:3872 +//line sql.y:3886 { yyLOCAL = NewStrLiteral(yyDollar[4].str) } yyVAL.union = yyLOCAL - case 702: + case 705: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL int -//line sql.y:3878 +//line sql.y:3892 { yyLOCAL = convertStringToInt(yyDollar[3].str) } yyVAL.union = yyLOCAL - case 703: + case 706: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL int -//line sql.y:3884 +//line sql.y:3898 { yyLOCAL = convertStringToInt(yyDollar[3].str) } yyVAL.union = yyLOCAL - case 704: + case 707: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3890 +//line sql.y:3904 { yyVAL.str = yyDollar[3].identifierCS.String() } - case 705: + case 708: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *PartitionDefinition -//line sql.y:3896 +//line sql.y:3910 { yyLOCAL = &PartitionDefinition{Name: yyDollar[2].identifierCI} } yyVAL.union = yyLOCAL - case 706: + case 709: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3902 +//line sql.y:3916 { yyVAL.str = "" } - case 707: + case 710: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3906 +//line sql.y:3920 { yyVAL.str = "" } - case 708: + case 711: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:3912 +//line sql.y:3926 { yyLOCAL = &RenameTable{TablePairs: yyDollar[3].renameTablePairsUnion()} } yyVAL.union = yyLOCAL - case 709: + case 712: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL []*RenameTablePair -//line sql.y:3918 +//line sql.y:3932 { yyLOCAL = []*RenameTablePair{{FromTable: yyDollar[1].tableName, ToTable: yyDollar[3].tableName}} } yyVAL.union = yyLOCAL - case 710: + case 713: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:3922 +//line sql.y:3936 { yySLICE := (*[]*RenameTablePair)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, &RenameTablePair{FromTable: yyDollar[3].tableName, ToTable: yyDollar[5].tableName}) } - case 711: + case 714: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Statement -//line sql.y:3928 +//line sql.y:3942 { yyLOCAL = &DropTable{FromTables: yyDollar[6].tableNamesUnion(), IfExists: yyDollar[5].booleanUnion(), Comments: Comments(yyDollar[2].strs).Parsed(), Temp: yyDollar[3].booleanUnion()} } yyVAL.union = yyLOCAL - case 712: + case 715: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Statement -//line sql.y:3932 +//line sql.y:3946 { // Change this to an alter statement if yyDollar[4].identifierCI.Lowered() == "primary" { @@ -15214,1335 +15361,1335 @@ yydefault: } } yyVAL.union = yyLOCAL - case 713: + case 716: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Statement -//line sql.y:3941 +//line sql.y:3955 { yyLOCAL = &DropView{FromTables: yyDollar[5].tableNamesUnion(), Comments: Comments(yyDollar[2].strs).Parsed(), IfExists: yyDollar[4].booleanUnion()} } yyVAL.union = yyLOCAL - case 714: + case 717: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3945 +//line sql.y:3959 { yyLOCAL = &DropDatabase{Comments: Comments(yyDollar[2].strs).Parsed(), DBName: yyDollar[5].identifierCS, IfExists: yyDollar[4].booleanUnion()} } yyVAL.union = yyLOCAL - case 715: + case 718: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:3951 +//line sql.y:3965 { yyLOCAL = &TruncateTable{Table: yyDollar[3].tableName} } yyVAL.union = yyLOCAL - case 716: + case 719: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:3955 +//line sql.y:3969 { yyLOCAL = &TruncateTable{Table: yyDollar[2].tableName} } yyVAL.union = yyLOCAL - case 717: - yyDollar = yyS[yypt-3 : yypt+1] + case 720: + yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:3961 +//line sql.y:3975 { - yyLOCAL = &OtherRead{} + yyLOCAL = &Analyze{IsLocal: yyDollar[2].booleanUnion(), Table: yyDollar[4].tableName} } yyVAL.union = yyLOCAL - case 718: + case 721: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3967 +//line sql.y:3981 { yyLOCAL = &PurgeBinaryLogs{To: string(yyDollar[5].str)} } yyVAL.union = yyLOCAL - case 719: + case 722: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3971 +//line sql.y:3985 { yyLOCAL = &PurgeBinaryLogs{Before: string(yyDollar[5].str)} } yyVAL.union = yyLOCAL - case 720: + case 723: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:3977 +//line sql.y:3991 { yyLOCAL = &Show{&ShowBasic{Command: Charset, Filter: yyDollar[3].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 721: + case 724: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:3981 +//line sql.y:3995 { yyLOCAL = &Show{&ShowBasic{Command: Collation, Filter: yyDollar[3].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 722: + case 725: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Statement -//line sql.y:3985 +//line sql.y:3999 { yyLOCAL = &Show{&ShowBasic{Full: yyDollar[2].booleanUnion(), Command: Column, Tbl: yyDollar[5].tableName, DbName: yyDollar[6].identifierCS, Filter: yyDollar[7].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 723: + case 726: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:3989 +//line sql.y:4003 { yyLOCAL = &Show{&ShowBasic{Command: Database, Filter: yyDollar[3].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 724: + case 727: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:3993 +//line sql.y:4007 { yyLOCAL = &Show{&ShowBasic{Command: Database, Filter: yyDollar[3].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 725: + case 728: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:3997 +//line sql.y:4011 { yyLOCAL = &Show{&ShowBasic{Command: Keyspace, Filter: yyDollar[3].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 726: + case 729: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4001 +//line sql.y:4015 { yyLOCAL = &Show{&ShowBasic{Command: Keyspace, Filter: yyDollar[3].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 727: + case 730: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4005 +//line sql.y:4019 { yyLOCAL = &Show{&ShowBasic{Command: Function, Filter: yyDollar[4].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 728: + case 731: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Statement -//line sql.y:4009 +//line sql.y:4023 { yyLOCAL = &Show{&ShowBasic{Command: Index, Tbl: yyDollar[5].tableName, DbName: yyDollar[6].identifierCS, Filter: yyDollar[7].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 729: + case 732: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:4013 +//line sql.y:4027 { yyLOCAL = &Show{&ShowBasic{Command: OpenTable, DbName: yyDollar[4].identifierCS, Filter: yyDollar[5].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 730: + case 733: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:4017 +//line sql.y:4031 { yyLOCAL = &Show{&ShowBasic{Command: Privilege}} } yyVAL.union = yyLOCAL - case 731: + case 734: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4021 +//line sql.y:4035 { yyLOCAL = &Show{&ShowBasic{Command: Procedure, Filter: yyDollar[4].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 732: + case 735: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4025 +//line sql.y:4039 { yyLOCAL = &Show{&ShowBasic{Command: StatusSession, Filter: yyDollar[4].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 733: + case 736: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4029 +//line sql.y:4043 { yyLOCAL = &Show{&ShowBasic{Command: StatusGlobal, Filter: yyDollar[4].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 734: + case 737: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4033 +//line sql.y:4047 { yyLOCAL = &Show{&ShowBasic{Command: VariableSession, Filter: yyDollar[4].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 735: + case 738: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4037 +//line sql.y:4051 { yyLOCAL = &Show{&ShowBasic{Command: VariableGlobal, Filter: yyDollar[4].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 736: + case 739: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:4041 +//line sql.y:4055 { yyLOCAL = &Show{&ShowBasic{Command: TableStatus, DbName: yyDollar[4].identifierCS, Filter: yyDollar[5].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 737: + case 740: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:4045 +//line sql.y:4059 { yyLOCAL = &Show{&ShowBasic{Command: Table, Full: yyDollar[2].booleanUnion(), DbName: yyDollar[4].identifierCS, Filter: yyDollar[5].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 738: + case 741: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4049 +//line sql.y:4063 { yyLOCAL = &Show{&ShowBasic{Command: Trigger, DbName: yyDollar[3].identifierCS, Filter: yyDollar[4].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 739: + case 742: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4053 +//line sql.y:4067 { yyLOCAL = &Show{&ShowCreate{Command: CreateDb, Op: yyDollar[4].tableName}} } yyVAL.union = yyLOCAL - case 740: + case 743: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4057 +//line sql.y:4071 { yyLOCAL = &Show{&ShowCreate{Command: CreateE, Op: yyDollar[4].tableName}} } yyVAL.union = yyLOCAL - case 741: + case 744: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4061 +//line sql.y:4075 { yyLOCAL = &Show{&ShowCreate{Command: CreateF, Op: yyDollar[4].tableName}} } yyVAL.union = yyLOCAL - case 742: + case 745: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4065 +//line sql.y:4079 { yyLOCAL = &Show{&ShowCreate{Command: CreateProc, Op: yyDollar[4].tableName}} } yyVAL.union = yyLOCAL - case 743: + case 746: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4069 +//line sql.y:4083 { yyLOCAL = &Show{&ShowCreate{Command: CreateTbl, Op: yyDollar[4].tableName}} } yyVAL.union = yyLOCAL - case 744: + case 747: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4073 +//line sql.y:4087 { yyLOCAL = &Show{&ShowCreate{Command: CreateTr, Op: yyDollar[4].tableName}} } yyVAL.union = yyLOCAL - case 745: + case 748: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4077 +//line sql.y:4091 { yyLOCAL = &Show{&ShowCreate{Command: CreateV, Op: yyDollar[4].tableName}} } yyVAL.union = yyLOCAL - case 746: + case 749: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:4081 +//line sql.y:4095 { yyLOCAL = &Show{&ShowBasic{Command: Engines}} } yyVAL.union = yyLOCAL - case 747: + case 750: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:4085 +//line sql.y:4099 { yyLOCAL = &Show{&ShowBasic{Command: Plugins}} } yyVAL.union = yyLOCAL - case 748: + case 751: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4089 +//line sql.y:4103 { yyLOCAL = &Show{&ShowBasic{Command: GtidExecGlobal, DbName: yyDollar[4].identifierCS}} } yyVAL.union = yyLOCAL - case 749: + case 752: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4093 +//line sql.y:4107 { yyLOCAL = &Show{&ShowBasic{Command: VGtidExecGlobal, DbName: yyDollar[4].identifierCS}} } yyVAL.union = yyLOCAL - case 750: + case 753: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4097 +//line sql.y:4111 { yyLOCAL = &Show{&ShowBasic{Command: VitessVariables, Filter: yyDollar[4].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 751: + case 754: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4101 +//line sql.y:4115 { yyLOCAL = &Show{&ShowBasic{Command: VitessMigrations, Filter: yyDollar[4].showFilterUnion(), DbName: yyDollar[3].identifierCS}} } yyVAL.union = yyLOCAL - case 752: + case 755: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4105 +//line sql.y:4119 { yyLOCAL = &ShowMigrationLogs{UUID: string(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 753: + case 756: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:4109 +//line sql.y:4123 { yyLOCAL = &ShowThrottledApps{} } yyVAL.union = yyLOCAL - case 754: + case 757: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4113 +//line sql.y:4127 { yyLOCAL = &Show{&ShowBasic{Command: VitessReplicationStatus, Filter: yyDollar[3].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 755: + case 758: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4117 +//line sql.y:4131 { yyLOCAL = &ShowThrottlerStatus{} } yyVAL.union = yyLOCAL - case 756: + case 759: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4121 +//line sql.y:4135 { yyLOCAL = &Show{&ShowBasic{Command: VschemaTables}} } yyVAL.union = yyLOCAL - case 757: + case 760: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4125 +//line sql.y:4139 { yyLOCAL = &Show{&ShowBasic{Command: VschemaVindexes}} } yyVAL.union = yyLOCAL - case 758: + case 761: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:4129 +//line sql.y:4143 { yyLOCAL = &Show{&ShowBasic{Command: VschemaVindexes, Tbl: yyDollar[5].tableName}} } yyVAL.union = yyLOCAL - case 759: + case 762: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:4133 +//line sql.y:4147 { yyLOCAL = &Show{&ShowBasic{Command: Warnings}} } yyVAL.union = yyLOCAL - case 760: + case 763: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4137 +//line sql.y:4151 { yyLOCAL = &Show{&ShowBasic{Command: VitessShards, Filter: yyDollar[3].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 761: + case 764: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4141 +//line sql.y:4155 { yyLOCAL = &Show{&ShowBasic{Command: VitessTablets, Filter: yyDollar[3].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 762: + case 765: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:4145 +//line sql.y:4159 { yyLOCAL = &Show{&ShowBasic{Command: VitessTarget}} } yyVAL.union = yyLOCAL - case 763: + case 766: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4152 +//line sql.y:4166 { yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].identifierCI.String())}} } yyVAL.union = yyLOCAL - case 764: + case 767: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4156 +//line sql.y:4170 { yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].str) + " " + string(yyDollar[3].str)}} } yyVAL.union = yyLOCAL - case 765: + case 768: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4160 +//line sql.y:4174 { yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].str) + " " + yyDollar[3].identifierCI.String()}} } yyVAL.union = yyLOCAL - case 766: + case 769: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4164 +//line sql.y:4178 { yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].str) + " " + string(yyDollar[3].str)}} } yyVAL.union = yyLOCAL - case 767: + case 770: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4168 +//line sql.y:4182 { yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].str)}} } yyVAL.union = yyLOCAL - case 768: + case 771: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4172 +//line sql.y:4186 { yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].str) + " " + string(yyDollar[3].str) + " " + String(yyDollar[4].tableName)}} } yyVAL.union = yyLOCAL - case 769: + case 772: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4176 +//line sql.y:4190 { yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].str) + " " + string(yyDollar[3].str) + " " + String(yyDollar[4].tableName)}} } yyVAL.union = yyLOCAL - case 770: + case 773: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:4180 +//line sql.y:4194 { yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[3].str)}} } yyVAL.union = yyLOCAL - case 771: + case 774: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4184 +//line sql.y:4198 { yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].str)}} } yyVAL.union = yyLOCAL - case 772: + case 775: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4190 +//line sql.y:4204 { yyVAL.str = "" } - case 773: + case 776: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4194 +//line sql.y:4208 { yyVAL.str = "extended " } - case 774: + case 777: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:4200 +//line sql.y:4214 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 775: + case 778: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:4204 +//line sql.y:4218 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 776: + case 779: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4210 +//line sql.y:4224 { yyVAL.str = string(yyDollar[1].str) } - case 777: + case 780: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4214 +//line sql.y:4228 { yyVAL.str = string(yyDollar[1].str) } - case 778: + case 781: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4220 +//line sql.y:4234 { yyVAL.identifierCS = NewIdentifierCS("") } - case 779: + case 782: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4224 +//line sql.y:4238 { yyVAL.identifierCS = yyDollar[2].identifierCS } - case 780: + case 783: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4228 +//line sql.y:4242 { yyVAL.identifierCS = yyDollar[2].identifierCS } - case 781: + case 784: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *ShowFilter -//line sql.y:4234 +//line sql.y:4248 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 782: + case 785: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ShowFilter -//line sql.y:4238 +//line sql.y:4252 { yyLOCAL = &ShowFilter{Like: string(yyDollar[2].str)} } yyVAL.union = yyLOCAL - case 783: + case 786: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ShowFilter -//line sql.y:4242 +//line sql.y:4256 { yyLOCAL = &ShowFilter{Filter: yyDollar[2].exprUnion()} } yyVAL.union = yyLOCAL - case 784: + case 787: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *ShowFilter -//line sql.y:4248 +//line sql.y:4262 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 785: + case 788: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ShowFilter -//line sql.y:4252 +//line sql.y:4266 { yyLOCAL = &ShowFilter{Like: string(yyDollar[2].str)} } yyVAL.union = yyLOCAL - case 786: + case 789: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4258 +//line sql.y:4272 { yyVAL.empty = struct{}{} } - case 787: + case 790: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4262 +//line sql.y:4276 { yyVAL.empty = struct{}{} } - case 788: + case 791: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4266 +//line sql.y:4280 { yyVAL.empty = struct{}{} } - case 789: + case 792: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4272 +//line sql.y:4286 { yyVAL.str = string(yyDollar[1].str) } - case 790: + case 793: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4276 +//line sql.y:4290 { yyVAL.str = string(yyDollar[1].str) } - case 791: + case 794: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:4282 +//line sql.y:4296 { yyLOCAL = &Use{DBName: yyDollar[2].identifierCS} } yyVAL.union = yyLOCAL - case 792: + case 795: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Statement -//line sql.y:4286 +//line sql.y:4300 { yyLOCAL = &Use{DBName: IdentifierCS{v: ""}} } yyVAL.union = yyLOCAL - case 793: + case 796: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4290 +//line sql.y:4304 { yyLOCAL = &Use{DBName: NewIdentifierCS(yyDollar[2].identifierCS.String() + "@" + string(yyDollar[3].str))} } yyVAL.union = yyLOCAL - case 794: + case 797: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4297 +//line sql.y:4311 { yyVAL.identifierCS = NewIdentifierCS(string(yyDollar[1].str)) } - case 795: + case 798: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4301 +//line sql.y:4315 { yyVAL.identifierCS = NewIdentifierCS("@" + string(yyDollar[1].str)) } - case 796: + case 799: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4305 +//line sql.y:4319 { yyVAL.identifierCS = NewIdentifierCS("@@" + string(yyDollar[1].str)) } - case 797: + case 800: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4309 +//line sql.y:4323 { yyVAL.identifierCS = NewIdentifierCS(string(yyDollar[1].str)) } - case 798: + case 801: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Statement -//line sql.y:4316 +//line sql.y:4330 { yyLOCAL = &Begin{} } yyVAL.union = yyLOCAL - case 799: + case 802: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4320 +//line sql.y:4334 { yyLOCAL = &Begin{TxAccessModes: yyDollar[3].txAccessModesUnion()} } yyVAL.union = yyLOCAL - case 800: + case 803: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL []TxAccessMode -//line sql.y:4325 +//line sql.y:4339 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 801: + case 804: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []TxAccessMode -//line sql.y:4329 +//line sql.y:4343 { yyLOCAL = yyDollar[1].txAccessModesUnion() } yyVAL.union = yyLOCAL - case 802: + case 805: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []TxAccessMode -//line sql.y:4335 +//line sql.y:4349 { yyLOCAL = []TxAccessMode{yyDollar[1].txAccessModeUnion()} } yyVAL.union = yyLOCAL - case 803: + case 806: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4339 +//line sql.y:4353 { yySLICE := (*[]TxAccessMode)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].txAccessModeUnion()) } - case 804: + case 807: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL TxAccessMode -//line sql.y:4345 +//line sql.y:4359 { yyLOCAL = WithConsistentSnapshot } yyVAL.union = yyLOCAL - case 805: + case 808: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL TxAccessMode -//line sql.y:4349 +//line sql.y:4363 { yyLOCAL = ReadWrite } yyVAL.union = yyLOCAL - case 806: + case 809: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL TxAccessMode -//line sql.y:4353 +//line sql.y:4367 { yyLOCAL = ReadOnly } yyVAL.union = yyLOCAL - case 807: + case 810: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Statement -//line sql.y:4360 +//line sql.y:4374 { yyLOCAL = &Commit{} } yyVAL.union = yyLOCAL - case 808: + case 811: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Statement -//line sql.y:4366 +//line sql.y:4380 { yyLOCAL = &Rollback{} } yyVAL.union = yyLOCAL - case 809: + case 812: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:4370 +//line sql.y:4384 { yyLOCAL = &SRollback{Name: yyDollar[5].identifierCI} } yyVAL.union = yyLOCAL - case 810: + case 813: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4375 +//line sql.y:4389 { yyVAL.empty = struct{}{} } - case 811: + case 814: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4377 +//line sql.y:4391 { yyVAL.empty = struct{}{} } - case 812: + case 815: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4380 +//line sql.y:4394 { yyVAL.empty = struct{}{} } - case 813: + case 816: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4382 +//line sql.y:4396 { yyVAL.empty = struct{}{} } - case 814: + case 817: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:4386 +//line sql.y:4400 { yyLOCAL = &Savepoint{Name: yyDollar[2].identifierCI} } yyVAL.union = yyLOCAL - case 815: + case 818: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4392 +//line sql.y:4406 { yyLOCAL = &Release{Name: yyDollar[3].identifierCI} } yyVAL.union = yyLOCAL - case 816: + case 819: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL ExplainType -//line sql.y:4397 +//line sql.y:4411 { yyLOCAL = EmptyType } yyVAL.union = yyLOCAL - case 817: + case 820: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL ExplainType -//line sql.y:4401 +//line sql.y:4415 { yyLOCAL = JSONType } yyVAL.union = yyLOCAL - case 818: + case 821: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL ExplainType -//line sql.y:4405 +//line sql.y:4419 { yyLOCAL = TreeType } yyVAL.union = yyLOCAL - case 819: + case 822: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL ExplainType -//line sql.y:4409 +//line sql.y:4423 { yyLOCAL = VitessType } yyVAL.union = yyLOCAL - case 820: + case 823: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL ExplainType -//line sql.y:4413 +//line sql.y:4427 { yyLOCAL = VTExplainType } yyVAL.union = yyLOCAL - case 821: + case 824: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL ExplainType -//line sql.y:4417 +//line sql.y:4431 { yyLOCAL = TraditionalType } yyVAL.union = yyLOCAL - case 822: + case 825: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ExplainType -//line sql.y:4421 +//line sql.y:4435 { yyLOCAL = AnalyzeType } yyVAL.union = yyLOCAL - case 823: + case 826: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL VExplainType -//line sql.y:4426 +//line sql.y:4440 { yyLOCAL = PlanVExplainType } yyVAL.union = yyLOCAL - case 824: + case 827: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL VExplainType -//line sql.y:4430 +//line sql.y:4444 { yyLOCAL = PlanVExplainType } yyVAL.union = yyLOCAL - case 825: + case 828: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL VExplainType -//line sql.y:4434 +//line sql.y:4448 { yyLOCAL = AllVExplainType } yyVAL.union = yyLOCAL - case 826: + case 829: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL VExplainType -//line sql.y:4438 +//line sql.y:4452 { yyLOCAL = QueriesVExplainType } yyVAL.union = yyLOCAL - case 827: + case 830: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4444 +//line sql.y:4458 { yyVAL.str = yyDollar[1].str } - case 828: + case 831: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4448 +//line sql.y:4462 { yyVAL.str = yyDollar[1].str } - case 829: + case 832: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4452 +//line sql.y:4466 { yyVAL.str = yyDollar[1].str } - case 830: + case 833: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Statement -//line sql.y:4458 +//line sql.y:4472 { yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 831: + case 834: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Statement -//line sql.y:4462 +//line sql.y:4476 { yyLOCAL = yyDollar[1].statementUnion() } yyVAL.union = yyLOCAL - case 832: + case 835: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Statement -//line sql.y:4466 +//line sql.y:4480 { yyLOCAL = yyDollar[1].statementUnion() } yyVAL.union = yyLOCAL - case 833: + case 836: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Statement -//line sql.y:4470 +//line sql.y:4484 { yyLOCAL = yyDollar[1].statementUnion() } yyVAL.union = yyLOCAL - case 834: + case 837: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4475 +//line sql.y:4489 { yyVAL.str = "" } - case 835: + case 838: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4479 +//line sql.y:4493 { yyVAL.str = yyDollar[1].identifierCI.val } - case 836: + case 839: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4483 +//line sql.y:4497 { yyVAL.str = encodeSQLString(yyDollar[1].str) } - case 837: + case 840: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4489 +//line sql.y:4503 { yyLOCAL = &ExplainTab{Table: yyDollar[3].tableName, Wild: yyDollar[4].str} } yyVAL.union = yyLOCAL - case 838: + case 841: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4493 +//line sql.y:4507 { yyLOCAL = &ExplainStmt{Type: yyDollar[3].explainTypeUnion(), Statement: yyDollar[4].statementUnion(), Comments: Comments(yyDollar[2].strs).Parsed()} } yyVAL.union = yyLOCAL - case 839: + case 842: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4499 +//line sql.y:4513 { yyLOCAL = &VExplainStmt{Type: yyDollar[3].vexplainTypeUnion(), Statement: yyDollar[4].statementUnion(), Comments: Comments(yyDollar[2].strs).Parsed()} } yyVAL.union = yyLOCAL - case 840: + case 843: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:4505 +//line sql.y:4519 { yyLOCAL = &OtherAdmin{} } yyVAL.union = yyLOCAL - case 841: + case 844: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:4509 +//line sql.y:4523 { yyLOCAL = &OtherAdmin{} } yyVAL.union = yyLOCAL - case 842: + case 845: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4515 +//line sql.y:4529 { yyLOCAL = &LockTables{Tables: yyDollar[3].tableAndLockTypesUnion()} } yyVAL.union = yyLOCAL - case 843: + case 846: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TableAndLockTypes -//line sql.y:4521 +//line sql.y:4535 { yyLOCAL = TableAndLockTypes{yyDollar[1].tableAndLockTypeUnion()} } yyVAL.union = yyLOCAL - case 844: + case 847: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4525 +//line sql.y:4539 { yySLICE := (*TableAndLockTypes)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].tableAndLockTypeUnion()) } - case 845: + case 848: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *TableAndLockType -//line sql.y:4531 +//line sql.y:4545 { yyLOCAL = &TableAndLockType{Table: yyDollar[1].aliasedTableNameUnion(), Lock: yyDollar[2].lockTypeUnion()} } yyVAL.union = yyLOCAL - case 846: + case 849: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL LockType -//line sql.y:4537 +//line sql.y:4551 { yyLOCAL = Read } yyVAL.union = yyLOCAL - case 847: + case 850: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL LockType -//line sql.y:4541 +//line sql.y:4555 { yyLOCAL = ReadLocal } yyVAL.union = yyLOCAL - case 848: + case 851: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL LockType -//line sql.y:4545 +//line sql.y:4559 { yyLOCAL = Write } yyVAL.union = yyLOCAL - case 849: + case 852: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL LockType -//line sql.y:4549 +//line sql.y:4563 { yyLOCAL = LowPriorityWrite } yyVAL.union = yyLOCAL - case 850: + case 853: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:4555 +//line sql.y:4569 { yyLOCAL = &UnlockTables{} } yyVAL.union = yyLOCAL - case 851: + case 854: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4561 +//line sql.y:4575 { yyLOCAL = &RevertMigration{Comments: Comments(yyDollar[2].strs).Parsed(), UUID: string(yyDollar[4].str)} } yyVAL.union = yyLOCAL - case 852: + case 855: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4567 +//line sql.y:4581 { yyLOCAL = &Flush{IsLocal: yyDollar[2].booleanUnion(), FlushOptions: yyDollar[3].strs} } yyVAL.union = yyLOCAL - case 853: + case 856: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4571 +//line sql.y:4585 { yyLOCAL = &Flush{IsLocal: yyDollar[2].booleanUnion()} } yyVAL.union = yyLOCAL - case 854: + case 857: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Statement -//line sql.y:4575 +//line sql.y:4589 { yyLOCAL = &Flush{IsLocal: yyDollar[2].booleanUnion(), WithLock: true} } yyVAL.union = yyLOCAL - case 855: + case 858: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4579 +//line sql.y:4593 { yyLOCAL = &Flush{IsLocal: yyDollar[2].booleanUnion(), TableNames: yyDollar[4].tableNamesUnion()} } yyVAL.union = yyLOCAL - case 856: + case 859: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Statement -//line sql.y:4583 +//line sql.y:4597 { yyLOCAL = &Flush{IsLocal: yyDollar[2].booleanUnion(), TableNames: yyDollar[4].tableNamesUnion(), WithLock: true} } yyVAL.union = yyLOCAL - case 857: + case 860: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Statement -//line sql.y:4587 +//line sql.y:4601 { yyLOCAL = &Flush{IsLocal: yyDollar[2].booleanUnion(), TableNames: yyDollar[4].tableNamesUnion(), ForExport: true} } yyVAL.union = yyLOCAL - case 858: + case 861: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4593 +//line sql.y:4607 { yyVAL.strs = []string{yyDollar[1].str} } - case 859: + case 862: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4597 +//line sql.y:4611 { yyVAL.strs = append(yyDollar[1].strs, yyDollar[3].str) } - case 860: + case 863: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4603 +//line sql.y:4617 { yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str) } - case 861: + case 864: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4607 +//line sql.y:4621 { yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str) } - case 862: + case 865: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4611 +//line sql.y:4625 { yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str) } - case 863: + case 866: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4615 +//line sql.y:4629 { yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str) } - case 864: + case 867: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4619 +//line sql.y:4633 { yyVAL.str = string(yyDollar[1].str) } - case 865: + case 868: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4623 +//line sql.y:4637 { yyVAL.str = string(yyDollar[1].str) } - case 866: + case 869: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4627 +//line sql.y:4641 { yyVAL.str = string(yyDollar[1].str) } - case 867: + case 870: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4631 +//line sql.y:4645 { yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str) + yyDollar[3].str } - case 868: + case 871: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4635 +//line sql.y:4649 { yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str) } - case 869: + case 872: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4639 +//line sql.y:4653 { yyVAL.str = string(yyDollar[1].str) } - case 870: + case 873: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4643 +//line sql.y:4657 { yyVAL.str = string(yyDollar[1].str) } - case 871: + case 874: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4647 +//line sql.y:4661 { yyVAL.str = string(yyDollar[1].str) } - case 872: + case 875: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:4652 +//line sql.y:4666 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 873: + case 876: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:4656 +//line sql.y:4670 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 874: + case 877: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:4660 +//line sql.y:4674 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 875: + case 878: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4665 +//line sql.y:4679 { yyVAL.str = "" } - case 876: + case 879: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4669 +//line sql.y:4683 { yyVAL.str = " " + string(yyDollar[1].str) + " " + string(yyDollar[2].str) + " " + yyDollar[3].identifierCI.String() } - case 877: + case 880: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4674 +//line sql.y:4688 { setAllowComments(yylex, true) } - case 878: + case 881: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4678 +//line sql.y:4692 { yyVAL.strs = yyDollar[2].strs setAllowComments(yylex, false) } - case 879: + case 882: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4684 +//line sql.y:4698 { yyVAL.strs = nil } - case 880: + case 883: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4688 +//line sql.y:4702 { yyVAL.strs = append(yyDollar[1].strs, yyDollar[2].str) } - case 881: + case 884: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:4694 +//line sql.y:4708 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 882: + case 885: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL bool -//line sql.y:4698 +//line sql.y:4712 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 883: + case 886: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL bool -//line sql.y:4702 +//line sql.y:4716 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 884: + case 887: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4707 +//line sql.y:4721 { yyVAL.str = "" } - case 885: + case 888: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4711 +//line sql.y:4725 { yyVAL.str = SQLNoCacheStr } - case 886: + case 889: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4715 +//line sql.y:4729 { yyVAL.str = SQLCacheStr } - case 887: + case 890: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:4720 +//line sql.y:4734 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 888: + case 891: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:4724 +//line sql.y:4738 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 889: + case 892: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:4728 +//line sql.y:4742 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 890: + case 893: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:4734 +//line sql.y:4748 { yyLOCAL = &PrepareStmt{Name: yyDollar[3].identifierCI, Comments: Comments(yyDollar[2].strs).Parsed(), Statement: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 891: + case 894: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:4738 +//line sql.y:4752 { yyLOCAL = &PrepareStmt{ Name: yyDollar[3].identifierCI, @@ -16551,595 +16698,595 @@ yydefault: } } yyVAL.union = yyLOCAL - case 892: + case 895: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4748 +//line sql.y:4762 { yyLOCAL = &ExecuteStmt{Name: yyDollar[3].identifierCI, Comments: Comments(yyDollar[2].strs).Parsed(), Arguments: yyDollar[4].variablesUnion()} } yyVAL.union = yyLOCAL - case 893: + case 896: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL []*Variable -//line sql.y:4753 +//line sql.y:4767 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 894: + case 897: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL []*Variable -//line sql.y:4757 +//line sql.y:4771 { yyLOCAL = yyDollar[2].variablesUnion() } yyVAL.union = yyLOCAL - case 895: + case 898: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4763 +//line sql.y:4777 { yyLOCAL = &DeallocateStmt{Comments: Comments(yyDollar[2].strs).Parsed(), Name: yyDollar[4].identifierCI} } yyVAL.union = yyLOCAL - case 896: + case 899: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4767 +//line sql.y:4781 { yyLOCAL = &DeallocateStmt{Comments: Comments(yyDollar[2].strs).Parsed(), Name: yyDollar[4].identifierCI} } yyVAL.union = yyLOCAL - case 897: + case 900: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL SelectExprs -//line sql.y:4772 +//line sql.y:4786 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 898: + case 901: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL SelectExprs -//line sql.y:4776 +//line sql.y:4790 { yyLOCAL = yyDollar[1].selectExprsUnion() } yyVAL.union = yyLOCAL - case 899: + case 902: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4781 +//line sql.y:4795 { yyVAL.strs = nil } - case 900: + case 903: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4785 +//line sql.y:4799 { yyVAL.strs = []string{yyDollar[1].str} } - case 901: + case 904: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4789 +//line sql.y:4803 { // TODO: This is a hack since I couldn't get it to work in a nicer way. I got 'conflicts: 8 shift/reduce' yyVAL.strs = []string{yyDollar[1].str, yyDollar[2].str} } - case 902: + case 905: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4793 +//line sql.y:4807 { yyVAL.strs = []string{yyDollar[1].str, yyDollar[2].str, yyDollar[3].str} } - case 903: + case 906: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:4797 +//line sql.y:4811 { yyVAL.strs = []string{yyDollar[1].str, yyDollar[2].str, yyDollar[3].str, yyDollar[4].str} } - case 904: + case 907: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4803 +//line sql.y:4817 { yyVAL.str = SQLNoCacheStr } - case 905: + case 908: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4807 +//line sql.y:4821 { yyVAL.str = SQLCacheStr } - case 906: + case 909: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4811 +//line sql.y:4825 { yyVAL.str = DistinctStr } - case 907: + case 910: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4815 +//line sql.y:4829 { yyVAL.str = DistinctStr } - case 908: + case 911: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4819 +//line sql.y:4833 { yyVAL.str = StraightJoinHint } - case 909: + case 912: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4823 +//line sql.y:4837 { yyVAL.str = SQLCalcFoundRowsStr } - case 910: + case 913: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4827 +//line sql.y:4841 { yyVAL.str = AllStr // These are not picked up by NewSelect, and so ALL will be dropped. But this is OK, since it's redundant anyway } - case 911: + case 914: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL SelectExprs -//line sql.y:4833 +//line sql.y:4847 { yyLOCAL = SelectExprs{yyDollar[1].selectExprUnion()} } yyVAL.union = yyLOCAL - case 912: + case 915: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4837 +//line sql.y:4851 { yySLICE := (*SelectExprs)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].selectExprUnion()) } - case 913: + case 916: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL SelectExpr -//line sql.y:4843 +//line sql.y:4857 { yyLOCAL = &StarExpr{} } yyVAL.union = yyLOCAL - case 914: + case 917: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL SelectExpr -//line sql.y:4847 +//line sql.y:4861 { yyLOCAL = &AliasedExpr{Expr: yyDollar[1].exprUnion(), As: yyDollar[2].identifierCI} } yyVAL.union = yyLOCAL - case 915: + case 918: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectExpr -//line sql.y:4851 +//line sql.y:4865 { yyLOCAL = &StarExpr{TableName: TableName{Name: yyDollar[1].identifierCS}} } yyVAL.union = yyLOCAL - case 916: + case 919: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL SelectExpr -//line sql.y:4855 +//line sql.y:4869 { yyLOCAL = &StarExpr{TableName: TableName{Qualifier: yyDollar[1].identifierCS, Name: yyDollar[3].identifierCS}} } yyVAL.union = yyLOCAL - case 917: + case 920: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4860 +//line sql.y:4874 { yyVAL.identifierCI = IdentifierCI{} } - case 918: + case 921: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4864 +//line sql.y:4878 { yyVAL.identifierCI = yyDollar[1].identifierCI } - case 919: + case 922: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4868 +//line sql.y:4882 { yyVAL.identifierCI = yyDollar[2].identifierCI } - case 921: + case 924: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4875 +//line sql.y:4889 { yyVAL.identifierCI = NewIdentifierCI(string(yyDollar[1].str)) } - case 922: + case 925: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL TableExprs -//line sql.y:4880 +//line sql.y:4894 { yyLOCAL = TableExprs{&AliasedTableExpr{Expr: TableName{Name: NewIdentifierCS("dual")}}} } yyVAL.union = yyLOCAL - case 923: + case 926: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TableExprs -//line sql.y:4884 +//line sql.y:4898 { yyLOCAL = yyDollar[1].tableExprsUnion() } yyVAL.union = yyLOCAL - case 924: + case 927: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL TableExprs -//line sql.y:4890 +//line sql.y:4904 { yyLOCAL = yyDollar[2].tableExprsUnion() } yyVAL.union = yyLOCAL - case 925: + case 928: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TableExprs -//line sql.y:4896 +//line sql.y:4910 { yyLOCAL = TableExprs{yyDollar[1].tableExprUnion()} } yyVAL.union = yyLOCAL - case 926: + case 929: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4900 +//line sql.y:4914 { yySLICE := (*TableExprs)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].tableExprUnion()) } - case 929: + case 932: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TableExpr -//line sql.y:4910 +//line sql.y:4924 { yyLOCAL = yyDollar[1].aliasedTableNameUnion() } yyVAL.union = yyLOCAL - case 930: + case 933: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL TableExpr -//line sql.y:4914 +//line sql.y:4928 { yyLOCAL = &AliasedTableExpr{Expr: yyDollar[1].derivedTableUnion(), As: yyDollar[3].identifierCS, Columns: yyDollar[4].columnsUnion()} } yyVAL.union = yyLOCAL - case 931: + case 934: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL TableExpr -//line sql.y:4918 +//line sql.y:4932 { yyLOCAL = &ParenTableExpr{Exprs: yyDollar[2].tableExprsUnion()} } yyVAL.union = yyLOCAL - case 932: + case 935: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TableExpr -//line sql.y:4922 +//line sql.y:4936 { yyLOCAL = yyDollar[1].tableExprUnion() } yyVAL.union = yyLOCAL - case 933: + case 936: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *DerivedTable -//line sql.y:4928 +//line sql.y:4942 { yyLOCAL = &DerivedTable{Lateral: false, Select: yyDollar[1].selStmtUnion()} } yyVAL.union = yyLOCAL - case 934: + case 937: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *DerivedTable -//line sql.y:4932 +//line sql.y:4946 { yyLOCAL = &DerivedTable{Lateral: true, Select: yyDollar[2].selStmtUnion()} } yyVAL.union = yyLOCAL - case 935: + case 938: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *AliasedTableExpr -//line sql.y:4938 +//line sql.y:4952 { yyLOCAL = &AliasedTableExpr{Expr: yyDollar[1].tableName, As: yyDollar[2].identifierCS, Hints: yyDollar[3].indexHintsUnion()} } yyVAL.union = yyLOCAL - case 936: + case 939: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL *AliasedTableExpr -//line sql.y:4942 +//line sql.y:4956 { yyLOCAL = &AliasedTableExpr{Expr: yyDollar[1].tableName, Partitions: yyDollar[4].partitionsUnion(), As: yyDollar[6].identifierCS, Hints: yyDollar[7].indexHintsUnion()} } yyVAL.union = yyLOCAL - case 937: + case 940: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL Columns -//line sql.y:4947 +//line sql.y:4961 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 938: + case 941: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Columns -//line sql.y:4951 +//line sql.y:4965 { yyLOCAL = yyDollar[2].columnsUnion() } yyVAL.union = yyLOCAL - case 939: + case 942: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL Columns -//line sql.y:4956 +//line sql.y:4970 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 940: + case 943: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Columns -//line sql.y:4960 +//line sql.y:4974 { yyLOCAL = yyDollar[1].columnsUnion() } yyVAL.union = yyLOCAL - case 941: + case 944: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Columns -//line sql.y:4966 +//line sql.y:4980 { yyLOCAL = Columns{yyDollar[1].identifierCI} } yyVAL.union = yyLOCAL - case 942: + case 945: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4970 +//line sql.y:4984 { yySLICE := (*Columns)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].identifierCI) } - case 943: + case 946: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*Variable -//line sql.y:4976 +//line sql.y:4990 { yyLOCAL = []*Variable{yyDollar[1].variableUnion()} } yyVAL.union = yyLOCAL - case 944: + case 947: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4980 +//line sql.y:4994 { yySLICE := (*[]*Variable)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].variableUnion()) } - case 945: + case 948: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Columns -//line sql.y:4986 +//line sql.y:5000 { yyLOCAL = Columns{yyDollar[1].identifierCI} } yyVAL.union = yyLOCAL - case 946: + case 949: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Columns -//line sql.y:4990 +//line sql.y:5004 { yyLOCAL = Columns{NewIdentifierCI(string(yyDollar[1].str))} } yyVAL.union = yyLOCAL - case 947: + case 950: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4994 +//line sql.y:5008 { yySLICE := (*Columns)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].identifierCI) } - case 948: + case 951: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4998 +//line sql.y:5012 { yySLICE := (*Columns)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, NewIdentifierCI(string(yyDollar[3].str))) } - case 949: + case 952: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Partitions -//line sql.y:5004 +//line sql.y:5018 { yyLOCAL = Partitions{yyDollar[1].identifierCI} } yyVAL.union = yyLOCAL - case 950: + case 953: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:5008 +//line sql.y:5022 { yySLICE := (*Partitions)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].identifierCI) } - case 951: + case 954: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL TableExpr -//line sql.y:5021 +//line sql.y:5035 { yyLOCAL = &JoinTableExpr{LeftExpr: yyDollar[1].tableExprUnion(), Join: yyDollar[2].joinTypeUnion(), RightExpr: yyDollar[3].tableExprUnion(), Condition: yyDollar[4].joinCondition} } yyVAL.union = yyLOCAL - case 952: + case 955: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL TableExpr -//line sql.y:5025 +//line sql.y:5039 { yyLOCAL = &JoinTableExpr{LeftExpr: yyDollar[1].tableExprUnion(), Join: yyDollar[2].joinTypeUnion(), RightExpr: yyDollar[3].tableExprUnion(), Condition: yyDollar[4].joinCondition} } yyVAL.union = yyLOCAL - case 953: + case 956: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL TableExpr -//line sql.y:5029 +//line sql.y:5043 { yyLOCAL = &JoinTableExpr{LeftExpr: yyDollar[1].tableExprUnion(), Join: yyDollar[2].joinTypeUnion(), RightExpr: yyDollar[3].tableExprUnion(), Condition: yyDollar[4].joinCondition} } yyVAL.union = yyLOCAL - case 954: + case 957: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL TableExpr -//line sql.y:5033 +//line sql.y:5047 { yyLOCAL = &JoinTableExpr{LeftExpr: yyDollar[1].tableExprUnion(), Join: yyDollar[2].joinTypeUnion(), RightExpr: yyDollar[3].tableExprUnion()} } yyVAL.union = yyLOCAL - case 955: + case 958: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:5039 +//line sql.y:5053 { yyVAL.joinCondition = &JoinCondition{On: yyDollar[2].exprUnion()} } - case 956: + case 959: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:5041 +//line sql.y:5055 { yyVAL.joinCondition = &JoinCondition{Using: yyDollar[3].columnsUnion()} } - case 957: + case 960: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:5045 +//line sql.y:5059 { yyVAL.joinCondition = &JoinCondition{} } - case 958: + case 961: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:5047 +//line sql.y:5061 { yyVAL.joinCondition = yyDollar[1].joinCondition } - case 959: + case 962: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:5051 +//line sql.y:5065 { yyVAL.joinCondition = &JoinCondition{} } - case 960: + case 963: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:5053 +//line sql.y:5067 { yyVAL.joinCondition = &JoinCondition{On: yyDollar[2].exprUnion()} } - case 961: + case 964: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:5056 +//line sql.y:5070 { yyVAL.empty = struct{}{} } - case 962: + case 965: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:5058 +//line sql.y:5072 { yyVAL.empty = struct{}{} } - case 963: + case 966: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:5061 +//line sql.y:5075 { yyVAL.identifierCS = NewIdentifierCS("") } - case 964: + case 967: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:5065 +//line sql.y:5079 { yyVAL.identifierCS = yyDollar[1].identifierCS } - case 965: + case 968: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:5069 +//line sql.y:5083 { yyVAL.identifierCS = yyDollar[2].identifierCS } - case 967: + case 970: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:5076 +//line sql.y:5090 { yyVAL.identifierCS = NewIdentifierCS(string(yyDollar[1].str)) } - case 968: + case 971: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL JoinType -//line sql.y:5082 +//line sql.y:5096 { yyLOCAL = NormalJoinType } yyVAL.union = yyLOCAL - case 969: + case 972: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL JoinType -//line sql.y:5086 +//line sql.y:5100 { yyLOCAL = NormalJoinType } yyVAL.union = yyLOCAL - case 970: + case 973: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL JoinType -//line sql.y:5090 +//line sql.y:5104 { yyLOCAL = NormalJoinType } yyVAL.union = yyLOCAL - case 971: + case 974: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL JoinType -//line sql.y:5096 +//line sql.y:5110 { yyLOCAL = StraightJoinType } yyVAL.union = yyLOCAL - case 972: + case 975: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL JoinType -//line sql.y:5102 +//line sql.y:5116 { yyLOCAL = LeftJoinType } yyVAL.union = yyLOCAL - case 973: + case 976: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL JoinType -//line sql.y:5106 +//line sql.y:5120 { yyLOCAL = LeftJoinType } yyVAL.union = yyLOCAL - case 974: + case 977: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL JoinType -//line sql.y:5110 +//line sql.y:5124 { yyLOCAL = RightJoinType } yyVAL.union = yyLOCAL - case 975: + case 978: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL JoinType -//line sql.y:5114 +//line sql.y:5128 { yyLOCAL = RightJoinType } yyVAL.union = yyLOCAL - case 976: + case 979: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL JoinType -//line sql.y:5120 +//line sql.y:5134 { yyLOCAL = NaturalJoinType } yyVAL.union = yyLOCAL - case 977: + case 980: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL JoinType -//line sql.y:5124 +//line sql.y:5138 { if yyDollar[2].joinTypeUnion() == LeftJoinType { yyLOCAL = NaturalLeftJoinType @@ -17148,617 +17295,617 @@ yydefault: } } yyVAL.union = yyLOCAL - case 978: + case 981: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:5134 +//line sql.y:5148 { yyVAL.tableName = yyDollar[2].tableName } - case 979: + case 982: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:5138 +//line sql.y:5152 { yyVAL.tableName = yyDollar[1].tableName } - case 980: + case 983: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:5144 +//line sql.y:5158 { yyVAL.tableName = TableName{Name: yyDollar[1].identifierCS} } - case 981: + case 984: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:5148 +//line sql.y:5162 { yyVAL.tableName = TableName{Qualifier: yyDollar[1].identifierCS, Name: yyDollar[3].identifierCS} } - case 982: + case 985: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:5154 +//line sql.y:5168 { yyVAL.tableName = TableName{Name: yyDollar[1].identifierCS} } - case 983: + case 986: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL IndexHints -//line sql.y:5159 +//line sql.y:5173 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 984: + case 987: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IndexHints -//line sql.y:5163 +//line sql.y:5177 { yyLOCAL = yyDollar[1].indexHintsUnion() } yyVAL.union = yyLOCAL - case 985: + case 988: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IndexHints -//line sql.y:5169 +//line sql.y:5183 { yyLOCAL = IndexHints{yyDollar[1].indexHintUnion()} } yyVAL.union = yyLOCAL - case 986: + case 989: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:5173 +//line sql.y:5187 { yySLICE := (*IndexHints)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[2].indexHintUnion()) } - case 987: + case 990: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL *IndexHint -//line sql.y:5179 +//line sql.y:5193 { yyLOCAL = &IndexHint{Type: UseOp, ForType: yyDollar[3].indexHintForTypeUnion(), Indexes: yyDollar[5].columnsUnion()} } yyVAL.union = yyLOCAL - case 988: + case 991: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *IndexHint -//line sql.y:5183 +//line sql.y:5197 { yyLOCAL = &IndexHint{Type: UseOp, ForType: yyDollar[3].indexHintForTypeUnion()} } yyVAL.union = yyLOCAL - case 989: + case 992: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL *IndexHint -//line sql.y:5187 +//line sql.y:5201 { yyLOCAL = &IndexHint{Type: IgnoreOp, ForType: yyDollar[3].indexHintForTypeUnion(), Indexes: yyDollar[5].columnsUnion()} } yyVAL.union = yyLOCAL - case 990: + case 993: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL *IndexHint -//line sql.y:5191 +//line sql.y:5205 { yyLOCAL = &IndexHint{Type: ForceOp, ForType: yyDollar[3].indexHintForTypeUnion(), Indexes: yyDollar[5].columnsUnion()} } yyVAL.union = yyLOCAL - case 991: + case 994: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL IndexHintForType -//line sql.y:5196 +//line sql.y:5210 { yyLOCAL = NoForType } yyVAL.union = yyLOCAL - case 992: + case 995: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL IndexHintForType -//line sql.y:5200 +//line sql.y:5214 { yyLOCAL = JoinForType } yyVAL.union = yyLOCAL - case 993: + case 996: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL IndexHintForType -//line sql.y:5204 +//line sql.y:5218 { yyLOCAL = OrderByForType } yyVAL.union = yyLOCAL - case 994: + case 997: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL IndexHintForType -//line sql.y:5208 +//line sql.y:5222 { yyLOCAL = GroupByForType } yyVAL.union = yyLOCAL - case 995: + case 998: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL Expr -//line sql.y:5214 +//line sql.y:5228 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 996: + case 999: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5218 +//line sql.y:5232 { yyLOCAL = yyDollar[2].exprUnion() } yyVAL.union = yyLOCAL - case 997: + case 1000: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5225 +//line sql.y:5239 { yyLOCAL = &OrExpr{Left: yyDollar[1].exprUnion(), Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 998: + case 1001: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5229 +//line sql.y:5243 { yyLOCAL = &XorExpr{Left: yyDollar[1].exprUnion(), Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 999: + case 1002: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5233 +//line sql.y:5247 { yyLOCAL = &AndExpr{Left: yyDollar[1].exprUnion(), Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1000: + case 1003: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5237 +//line sql.y:5251 { yyLOCAL = &NotExpr{Expr: yyDollar[2].exprUnion()} } yyVAL.union = yyLOCAL - case 1001: + case 1004: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5241 +//line sql.y:5255 { yyLOCAL = &IsExpr{Left: yyDollar[1].exprUnion(), Right: yyDollar[3].isExprOperatorUnion()} } yyVAL.union = yyLOCAL - case 1002: + case 1005: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5245 +//line sql.y:5259 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1003: + case 1006: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5249 +//line sql.y:5263 { yyLOCAL = &AssignmentExpr{Left: yyDollar[1].variableUnion(), Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1004: + case 1007: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:5253 +//line sql.y:5267 { yyLOCAL = &MemberOfExpr{Value: yyDollar[1].exprUnion(), JSONArr: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1005: + case 1008: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5259 +//line sql.y:5273 { yyLOCAL = &IsExpr{Left: yyDollar[1].exprUnion(), Right: IsNullOp} } yyVAL.union = yyLOCAL - case 1006: + case 1009: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5263 +//line sql.y:5277 { yyLOCAL = &IsExpr{Left: yyDollar[1].exprUnion(), Right: IsNotNullOp} } yyVAL.union = yyLOCAL - case 1007: + case 1010: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5267 +//line sql.y:5281 { yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: yyDollar[2].comparisonExprOperatorUnion(), Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1008: + case 1011: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5271 +//line sql.y:5285 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1009: + case 1012: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5277 +//line sql.y:5291 { yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: InOp, Right: yyDollar[3].colTupleUnion()} } yyVAL.union = yyLOCAL - case 1010: + case 1013: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5281 +//line sql.y:5295 { yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: NotInOp, Right: yyDollar[4].colTupleUnion()} } yyVAL.union = yyLOCAL - case 1011: + case 1014: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:5285 +//line sql.y:5299 { yyLOCAL = &BetweenExpr{Left: yyDollar[1].exprUnion(), IsBetween: true, From: yyDollar[3].exprUnion(), To: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1012: + case 1015: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:5289 +//line sql.y:5303 { yyLOCAL = &BetweenExpr{Left: yyDollar[1].exprUnion(), IsBetween: false, From: yyDollar[4].exprUnion(), To: yyDollar[6].exprUnion()} } yyVAL.union = yyLOCAL - case 1013: + case 1016: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5293 +//line sql.y:5307 { yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: LikeOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1014: + case 1017: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5297 +//line sql.y:5311 { yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: NotLikeOp, Right: yyDollar[4].exprUnion()} } yyVAL.union = yyLOCAL - case 1015: + case 1018: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:5301 +//line sql.y:5315 { yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: LikeOp, Right: yyDollar[3].exprUnion(), Escape: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1016: + case 1019: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:5305 +//line sql.y:5319 { yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: NotLikeOp, Right: yyDollar[4].exprUnion(), Escape: yyDollar[6].exprUnion()} } yyVAL.union = yyLOCAL - case 1017: + case 1020: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5309 +//line sql.y:5323 { yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: RegexpOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1018: + case 1021: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5313 +//line sql.y:5327 { yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: NotRegexpOp, Right: yyDollar[4].exprUnion()} } yyVAL.union = yyLOCAL - case 1019: + case 1022: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5317 +//line sql.y:5331 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1020: + case 1023: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:5323 +//line sql.y:5337 { } - case 1021: + case 1024: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:5326 +//line sql.y:5340 { } - case 1022: + case 1025: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5332 +//line sql.y:5346 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: BitOrOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1023: + case 1026: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5336 +//line sql.y:5350 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: BitAndOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1024: + case 1027: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5340 +//line sql.y:5354 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: ShiftLeftOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1025: + case 1028: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5344 +//line sql.y:5358 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: ShiftRightOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1026: + case 1029: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5348 +//line sql.y:5362 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: PlusOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1027: + case 1030: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5352 +//line sql.y:5366 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: MinusOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1028: + case 1031: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:5356 +//line sql.y:5370 { - yyLOCAL = &DateAddExpr{Type: PlusIntervalRightType, Date: yyDollar[1].exprUnion(), Unit: yyDollar[5].intervalTypeUnion(), Expr: yyDollar[4].exprUnion()} + yyLOCAL = &IntervalDateExpr{Syntax: IntervalDateExprBinaryAdd, Date: yyDollar[1].exprUnion(), Unit: yyDollar[5].intervalTypeUnion(), Interval: yyDollar[4].exprUnion()} } yyVAL.union = yyLOCAL - case 1029: + case 1032: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:5360 +//line sql.y:5374 { - yyLOCAL = &DateSubExpr{Type: MinusIntervalRightType, Date: yyDollar[1].exprUnion(), Unit: yyDollar[5].intervalTypeUnion(), Expr: yyDollar[4].exprUnion()} + yyLOCAL = &IntervalDateExpr{Syntax: IntervalDateExprBinarySub, Date: yyDollar[1].exprUnion(), Unit: yyDollar[5].intervalTypeUnion(), Interval: yyDollar[4].exprUnion()} } yyVAL.union = yyLOCAL - case 1030: + case 1033: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5364 +//line sql.y:5378 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: MultOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1031: + case 1034: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5368 +//line sql.y:5382 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: DivOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1032: + case 1035: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5372 +//line sql.y:5386 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: ModOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1033: + case 1036: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5376 +//line sql.y:5390 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: IntDivOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1034: + case 1037: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5380 +//line sql.y:5394 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: ModOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1035: + case 1038: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5384 +//line sql.y:5398 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: BitXorOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1036: + case 1039: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5388 +//line sql.y:5402 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1037: + case 1040: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5394 +//line sql.y:5408 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1038: + case 1041: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5398 +//line sql.y:5412 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1039: + case 1042: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5402 +//line sql.y:5416 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1040: + case 1043: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5406 +//line sql.y:5420 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1041: + case 1044: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5410 +//line sql.y:5424 { yyLOCAL = &CollateExpr{Expr: yyDollar[1].exprUnion(), Collation: yyDollar[3].str} } yyVAL.union = yyLOCAL - case 1042: + case 1045: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5414 +//line sql.y:5428 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1043: + case 1046: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5418 +//line sql.y:5432 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1044: + case 1047: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5422 +//line sql.y:5436 { yyLOCAL = yyDollar[1].variableUnion() } yyVAL.union = yyLOCAL - case 1045: + case 1048: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5426 +//line sql.y:5440 { yyLOCAL = yyDollar[2].exprUnion() // TODO: do we really want to ignore unary '+' before any kind of literals? } yyVAL.union = yyLOCAL - case 1046: + case 1049: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5430 +//line sql.y:5444 { yyLOCAL = &UnaryExpr{Operator: UMinusOp, Expr: yyDollar[2].exprUnion()} } yyVAL.union = yyLOCAL - case 1047: + case 1050: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5434 +//line sql.y:5448 { yyLOCAL = &UnaryExpr{Operator: TildaOp, Expr: yyDollar[2].exprUnion()} } yyVAL.union = yyLOCAL - case 1048: + case 1051: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5438 +//line sql.y:5452 { yyLOCAL = &UnaryExpr{Operator: BangOp, Expr: yyDollar[2].exprUnion()} } yyVAL.union = yyLOCAL - case 1049: + case 1052: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5442 +//line sql.y:5456 { yyLOCAL = yyDollar[1].subqueryUnion() } yyVAL.union = yyLOCAL - case 1050: + case 1053: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5446 +//line sql.y:5460 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1051: + case 1054: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5450 +//line sql.y:5464 { yyLOCAL = &ExistsExpr{Subquery: yyDollar[2].subqueryUnion()} } yyVAL.union = yyLOCAL - case 1052: + case 1055: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Expr -//line sql.y:5454 +//line sql.y:5468 { yyLOCAL = &MatchExpr{Columns: yyDollar[2].colNamesUnion(), Expr: yyDollar[5].exprUnion(), Option: yyDollar[6].matchExprOptionUnion()} } yyVAL.union = yyLOCAL - case 1053: + case 1056: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Expr -//line sql.y:5458 +//line sql.y:5472 { yyLOCAL = &CastExpr{Expr: yyDollar[3].exprUnion(), Type: yyDollar[5].convertTypeUnion(), Array: yyDollar[6].booleanUnion()} } yyVAL.union = yyLOCAL - case 1054: + case 1057: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:5462 +//line sql.y:5476 { yyLOCAL = &ConvertExpr{Expr: yyDollar[3].exprUnion(), Type: yyDollar[5].convertTypeUnion()} } yyVAL.union = yyLOCAL - case 1055: + case 1058: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:5466 +//line sql.y:5480 { yyLOCAL = &ConvertUsingExpr{Expr: yyDollar[3].exprUnion(), Type: yyDollar[5].str} } yyVAL.union = yyLOCAL - case 1056: + case 1059: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5470 +//line sql.y:5484 { // From: https://dev.mysql.com/doc/refman/8.0/en/cast-functions.html#operator_binary // To convert a string expression to a binary string, these constructs are equivalent: @@ -17767,3017 +17914,3169 @@ yydefault: yyLOCAL = &ConvertExpr{Expr: yyDollar[2].exprUnion(), Type: &ConvertType{Type: yyDollar[1].str}} } yyVAL.union = yyLOCAL - case 1057: + case 1060: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5478 +//line sql.y:5492 { yyLOCAL = &Default{ColName: yyDollar[2].str} } yyVAL.union = yyLOCAL - case 1058: + case 1061: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:5482 +//line sql.y:5496 { - yyLOCAL = &DateAddExpr{Type: PlusIntervalLeftType, Date: yyDollar[5].exprUnion(), Unit: yyDollar[3].intervalTypeUnion(), Expr: yyDollar[2].exprUnion()} + yyLOCAL = &IntervalDateExpr{Syntax: IntervalDateExprBinaryAddLeft, Date: yyDollar[5].exprUnion(), Unit: yyDollar[3].intervalTypeUnion(), Interval: yyDollar[2].exprUnion()} } yyVAL.union = yyLOCAL - case 1059: + case 1062: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:5486 +//line sql.y:5500 { yyLOCAL = &IntervalFuncExpr{Expr: yyDollar[3].exprUnion(), Exprs: yyDollar[5].exprsUnion()} } yyVAL.union = yyLOCAL - case 1060: + case 1063: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5490 +//line sql.y:5504 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: JSONExtractOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1061: + case 1064: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5494 +//line sql.y:5508 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: JSONUnquoteExtractOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1062: + case 1065: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*ColName -//line sql.y:5500 +//line sql.y:5514 { yyLOCAL = yyDollar[1].colNamesUnion() } yyVAL.union = yyLOCAL - case 1063: + case 1066: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL []*ColName -//line sql.y:5504 +//line sql.y:5518 { yyLOCAL = yyDollar[2].colNamesUnion() } yyVAL.union = yyLOCAL - case 1064: + case 1067: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*ColName -//line sql.y:5510 +//line sql.y:5524 { yyLOCAL = []*ColName{yyDollar[1].colNameUnion()} } yyVAL.union = yyLOCAL - case 1065: + case 1068: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:5514 +//line sql.y:5528 { yySLICE := (*[]*ColName)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].colNameUnion()) } - case 1066: + case 1069: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TrimType -//line sql.y:5520 +//line sql.y:5534 { yyLOCAL = BothTrimType } yyVAL.union = yyLOCAL - case 1067: + case 1070: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TrimType -//line sql.y:5524 +//line sql.y:5538 { yyLOCAL = LeadingTrimType } yyVAL.union = yyLOCAL - case 1068: + case 1071: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TrimType -//line sql.y:5528 +//line sql.y:5542 { yyLOCAL = TrailingTrimType } yyVAL.union = yyLOCAL - case 1069: + case 1072: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL FrameUnitType -//line sql.y:5534 +//line sql.y:5548 { yyLOCAL = FrameRowsType } yyVAL.union = yyLOCAL - case 1070: + case 1073: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL FrameUnitType -//line sql.y:5538 +//line sql.y:5552 { yyLOCAL = FrameRangeType } yyVAL.union = yyLOCAL - case 1071: + case 1074: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ArgumentLessWindowExprType -//line sql.y:5545 +//line sql.y:5559 { yyLOCAL = CumeDistExprType } yyVAL.union = yyLOCAL - case 1072: + case 1075: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ArgumentLessWindowExprType -//line sql.y:5549 +//line sql.y:5563 { yyLOCAL = DenseRankExprType } yyVAL.union = yyLOCAL - case 1073: + case 1076: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ArgumentLessWindowExprType -//line sql.y:5553 +//line sql.y:5567 { yyLOCAL = PercentRankExprType } yyVAL.union = yyLOCAL - case 1074: + case 1077: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ArgumentLessWindowExprType -//line sql.y:5557 +//line sql.y:5571 { yyLOCAL = RankExprType } yyVAL.union = yyLOCAL - case 1075: + case 1078: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ArgumentLessWindowExprType -//line sql.y:5561 +//line sql.y:5575 { yyLOCAL = RowNumberExprType } yyVAL.union = yyLOCAL - case 1076: + case 1079: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *FramePoint -//line sql.y:5567 +//line sql.y:5581 { yyLOCAL = &FramePoint{Type: CurrentRowType} } yyVAL.union = yyLOCAL - case 1077: + case 1080: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *FramePoint -//line sql.y:5571 +//line sql.y:5585 { yyLOCAL = &FramePoint{Type: UnboundedPrecedingType} } yyVAL.union = yyLOCAL - case 1078: + case 1081: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *FramePoint -//line sql.y:5575 +//line sql.y:5589 { yyLOCAL = &FramePoint{Type: UnboundedFollowingType} } yyVAL.union = yyLOCAL - case 1079: + case 1082: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *FramePoint -//line sql.y:5579 +//line sql.y:5593 { yyLOCAL = &FramePoint{Type: ExprPrecedingType, Expr: yyDollar[1].exprUnion()} } yyVAL.union = yyLOCAL - case 1080: + case 1083: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *FramePoint -//line sql.y:5583 +//line sql.y:5597 { yyLOCAL = &FramePoint{Type: ExprPrecedingType, Expr: yyDollar[2].exprUnion(), Unit: yyDollar[3].intervalTypeUnion()} } yyVAL.union = yyLOCAL - case 1081: + case 1084: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *FramePoint -//line sql.y:5587 +//line sql.y:5601 { yyLOCAL = &FramePoint{Type: ExprFollowingType, Expr: yyDollar[1].exprUnion()} } yyVAL.union = yyLOCAL - case 1082: + case 1085: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *FramePoint -//line sql.y:5591 +//line sql.y:5605 { yyLOCAL = &FramePoint{Type: ExprFollowingType, Expr: yyDollar[2].exprUnion(), Unit: yyDollar[3].intervalTypeUnion()} } yyVAL.union = yyLOCAL - case 1083: + case 1086: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *FrameClause -//line sql.y:5596 +//line sql.y:5610 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1084: + case 1087: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *FrameClause -//line sql.y:5600 +//line sql.y:5614 { yyLOCAL = yyDollar[1].frameClauseUnion() } yyVAL.union = yyLOCAL - case 1085: + case 1088: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *FrameClause -//line sql.y:5606 +//line sql.y:5620 { yyLOCAL = &FrameClause{Unit: yyDollar[1].frameUnitTypeUnion(), Start: yyDollar[2].framePointUnion()} } yyVAL.union = yyLOCAL - case 1086: + case 1089: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *FrameClause -//line sql.y:5610 +//line sql.y:5624 { yyLOCAL = &FrameClause{Unit: yyDollar[1].frameUnitTypeUnion(), Start: yyDollar[3].framePointUnion(), End: yyDollar[5].framePointUnion()} } yyVAL.union = yyLOCAL - case 1087: + case 1090: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL Exprs -//line sql.y:5615 +//line sql.y:5629 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1088: + case 1091: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Exprs -//line sql.y:5619 +//line sql.y:5633 { yyLOCAL = yyDollar[3].exprsUnion() } yyVAL.union = yyLOCAL - case 1089: + case 1092: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:5624 +//line sql.y:5638 { } - case 1090: + case 1093: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:5627 +//line sql.y:5641 { yyVAL.identifierCI = yyDollar[1].identifierCI } - case 1091: + case 1094: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *WindowSpecification -//line sql.y:5633 +//line sql.y:5647 { yyLOCAL = &WindowSpecification{Name: yyDollar[1].identifierCI, PartitionClause: yyDollar[2].exprsUnion(), OrderClause: yyDollar[3].orderByUnion(), FrameClause: yyDollar[4].frameClauseUnion()} } yyVAL.union = yyLOCAL - case 1092: + case 1095: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *OverClause -//line sql.y:5639 +//line sql.y:5653 { yyLOCAL = &OverClause{WindowSpec: yyDollar[3].windowSpecificationUnion()} } yyVAL.union = yyLOCAL - case 1093: + case 1096: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *OverClause -//line sql.y:5643 +//line sql.y:5657 { yyLOCAL = &OverClause{WindowName: yyDollar[2].identifierCI} } yyVAL.union = yyLOCAL - case 1094: + case 1097: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *NullTreatmentClause -//line sql.y:5648 +//line sql.y:5662 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1096: + case 1099: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *NullTreatmentClause -//line sql.y:5655 +//line sql.y:5669 { yyLOCAL = &NullTreatmentClause{yyDollar[1].nullTreatmentTypeUnion()} } yyVAL.union = yyLOCAL - case 1097: + case 1100: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL NullTreatmentType -//line sql.y:5661 +//line sql.y:5675 { yyLOCAL = RespectNullsType } yyVAL.union = yyLOCAL - case 1098: + case 1101: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL NullTreatmentType -//line sql.y:5665 +//line sql.y:5679 { yyLOCAL = IgnoreNullsType } yyVAL.union = yyLOCAL - case 1099: + case 1102: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL FirstOrLastValueExprType -//line sql.y:5671 +//line sql.y:5685 { yyLOCAL = FirstValueExprType } yyVAL.union = yyLOCAL - case 1100: + case 1103: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL FirstOrLastValueExprType -//line sql.y:5675 +//line sql.y:5689 { yyLOCAL = LastValueExprType } yyVAL.union = yyLOCAL - case 1101: + case 1104: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL FromFirstLastType -//line sql.y:5681 +//line sql.y:5695 { yyLOCAL = FromFirstType } yyVAL.union = yyLOCAL - case 1102: + case 1105: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL FromFirstLastType -//line sql.y:5685 +//line sql.y:5699 { yyLOCAL = FromLastType } yyVAL.union = yyLOCAL - case 1103: + case 1106: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *FromFirstLastClause -//line sql.y:5690 +//line sql.y:5704 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1105: + case 1108: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *FromFirstLastClause -//line sql.y:5697 +//line sql.y:5711 { yyLOCAL = &FromFirstLastClause{yyDollar[1].fromFirstLastTypeUnion()} } yyVAL.union = yyLOCAL - case 1106: + case 1109: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL LagLeadExprType -//line sql.y:5703 +//line sql.y:5717 { yyLOCAL = LagExprType } yyVAL.union = yyLOCAL - case 1107: + case 1110: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL LagLeadExprType -//line sql.y:5707 +//line sql.y:5721 { yyLOCAL = LeadExprType } yyVAL.union = yyLOCAL - case 1108: + case 1111: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *WindowDefinition -//line sql.y:5713 +//line sql.y:5727 { yyLOCAL = &WindowDefinition{Name: yyDollar[1].identifierCI, WindowSpec: yyDollar[4].windowSpecificationUnion()} } yyVAL.union = yyLOCAL - case 1109: + case 1112: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL WindowDefinitions -//line sql.y:5719 +//line sql.y:5733 { yyLOCAL = WindowDefinitions{yyDollar[1].windowDefinitionUnion()} } yyVAL.union = yyLOCAL - case 1110: + case 1113: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:5723 +//line sql.y:5737 { yySLICE := (*WindowDefinitions)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].windowDefinitionUnion()) } - case 1111: + case 1114: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:5729 +//line sql.y:5743 { yyVAL.str = "" } - case 1112: + case 1115: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:5733 +//line sql.y:5747 { yyVAL.str = string(yyDollar[2].identifierCI.String()) } - case 1113: + case 1116: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL BoolVal -//line sql.y:5739 +//line sql.y:5753 { yyLOCAL = BoolVal(true) } yyVAL.union = yyLOCAL - case 1114: + case 1117: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL BoolVal -//line sql.y:5743 +//line sql.y:5757 { yyLOCAL = BoolVal(false) } yyVAL.union = yyLOCAL - case 1115: + case 1118: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IsExprOperator -//line sql.y:5750 +//line sql.y:5764 { yyLOCAL = IsTrueOp } yyVAL.union = yyLOCAL - case 1116: + case 1119: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL IsExprOperator -//line sql.y:5754 +//line sql.y:5768 { yyLOCAL = IsNotTrueOp } yyVAL.union = yyLOCAL - case 1117: + case 1120: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IsExprOperator -//line sql.y:5758 +//line sql.y:5772 { yyLOCAL = IsFalseOp } yyVAL.union = yyLOCAL - case 1118: + case 1121: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL IsExprOperator -//line sql.y:5762 +//line sql.y:5776 { yyLOCAL = IsNotFalseOp } yyVAL.union = yyLOCAL - case 1119: + case 1122: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ComparisonExprOperator -//line sql.y:5768 +//line sql.y:5782 { yyLOCAL = EqualOp } yyVAL.union = yyLOCAL - case 1120: + case 1123: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ComparisonExprOperator -//line sql.y:5772 +//line sql.y:5786 { yyLOCAL = LessThanOp } yyVAL.union = yyLOCAL - case 1121: + case 1124: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ComparisonExprOperator -//line sql.y:5776 +//line sql.y:5790 { yyLOCAL = GreaterThanOp } yyVAL.union = yyLOCAL - case 1122: + case 1125: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ComparisonExprOperator -//line sql.y:5780 +//line sql.y:5794 { yyLOCAL = LessEqualOp } yyVAL.union = yyLOCAL - case 1123: + case 1126: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ComparisonExprOperator -//line sql.y:5784 +//line sql.y:5798 { yyLOCAL = GreaterEqualOp } yyVAL.union = yyLOCAL - case 1124: + case 1127: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ComparisonExprOperator -//line sql.y:5788 +//line sql.y:5802 { yyLOCAL = NotEqualOp } yyVAL.union = yyLOCAL - case 1125: + case 1128: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ComparisonExprOperator -//line sql.y:5792 +//line sql.y:5806 { yyLOCAL = NullSafeEqualOp } yyVAL.union = yyLOCAL - case 1126: + case 1129: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ColTuple -//line sql.y:5798 +//line sql.y:5812 { yyLOCAL = yyDollar[1].valTupleUnion() } yyVAL.union = yyLOCAL - case 1127: + case 1130: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ColTuple -//line sql.y:5802 +//line sql.y:5816 { yyLOCAL = yyDollar[1].subqueryUnion() } yyVAL.union = yyLOCAL - case 1128: + case 1131: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ColTuple -//line sql.y:5806 +//line sql.y:5820 { yyLOCAL = ListArg(yyDollar[1].str[2:]) markBindVariable(yylex, yyDollar[1].str[2:]) } yyVAL.union = yyLOCAL - case 1129: + case 1132: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *Subquery -//line sql.y:5813 +//line sql.y:5827 { yyLOCAL = &Subquery{yyDollar[1].selStmtUnion()} } yyVAL.union = yyLOCAL - case 1130: + case 1133: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Exprs -//line sql.y:5819 +//line sql.y:5833 { yyLOCAL = Exprs{yyDollar[1].exprUnion()} } yyVAL.union = yyLOCAL - case 1131: + case 1134: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:5823 +//line sql.y:5837 { yySLICE := (*Exprs)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].exprUnion()) } - case 1132: + case 1135: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5833 +//line sql.y:5847 { yyLOCAL = &FuncExpr{Name: yyDollar[1].identifierCI, Exprs: yyDollar[3].selectExprsUnion()} } yyVAL.union = yyLOCAL - case 1133: + case 1136: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:5837 +//line sql.y:5851 { yyLOCAL = &FuncExpr{Qualifier: yyDollar[1].identifierCS, Name: yyDollar[3].identifierCI, Exprs: yyDollar[5].selectExprsUnion()} } yyVAL.union = yyLOCAL - case 1134: + case 1137: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5847 +//line sql.y:5861 { yyLOCAL = &FuncExpr{Name: NewIdentifierCI("left"), Exprs: yyDollar[3].selectExprsUnion()} } yyVAL.union = yyLOCAL - case 1135: + case 1138: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5851 +//line sql.y:5865 { yyLOCAL = &FuncExpr{Name: NewIdentifierCI("right"), Exprs: yyDollar[3].selectExprsUnion()} } yyVAL.union = yyLOCAL - case 1136: + case 1139: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:5855 +//line sql.y:5869 { yyLOCAL = &SubstrExpr{Name: yyDollar[3].exprUnion(), From: yyDollar[5].exprUnion(), To: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1137: + case 1140: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:5859 +//line sql.y:5873 { yyLOCAL = &SubstrExpr{Name: yyDollar[3].exprUnion(), From: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1138: + case 1141: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:5863 +//line sql.y:5877 { yyLOCAL = &SubstrExpr{Name: yyDollar[3].exprUnion(), From: yyDollar[5].exprUnion(), To: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1139: + case 1142: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:5867 +//line sql.y:5881 { yyLOCAL = &SubstrExpr{Name: yyDollar[3].exprUnion(), From: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1140: + case 1143: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:5871 +//line sql.y:5885 { yyLOCAL = &CaseExpr{Expr: yyDollar[2].exprUnion(), Whens: yyDollar[3].whensUnion(), Else: yyDollar[4].exprUnion()} } yyVAL.union = yyLOCAL - case 1141: + case 1144: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5875 +//line sql.y:5889 { yyLOCAL = &ValuesFuncExpr{Name: yyDollar[3].colNameUnion()} } yyVAL.union = yyLOCAL - case 1142: + case 1145: yyDollar = yyS[yypt-10 : yypt+1] var yyLOCAL Expr -//line sql.y:5879 +//line sql.y:5893 { yyLOCAL = &InsertExpr{Str: yyDollar[3].exprUnion(), Pos: yyDollar[5].exprUnion(), Len: yyDollar[7].exprUnion(), NewStr: yyDollar[9].exprUnion()} } yyVAL.union = yyLOCAL - case 1143: + case 1146: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5883 +//line sql.y:5897 { yyLOCAL = &FuncExpr{Name: NewIdentifierCI(yyDollar[1].str)} } yyVAL.union = yyLOCAL - case 1144: + case 1147: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5894 +//line sql.y:5908 { yyLOCAL = &FuncExpr{Name: NewIdentifierCI("utc_date")} } yyVAL.union = yyLOCAL - case 1145: + case 1148: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5898 +//line sql.y:5912 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1146: + case 1149: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5904 +//line sql.y:5918 { yyLOCAL = &FuncExpr{Name: NewIdentifierCI("current_date")} } yyVAL.union = yyLOCAL - case 1147: + case 1150: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5908 +//line sql.y:5922 { yyLOCAL = &FuncExpr{Name: NewIdentifierCI("curdate")} } yyVAL.union = yyLOCAL - case 1148: + case 1151: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5912 +//line sql.y:5926 { yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("utc_time"), Fsp: yyDollar[2].integerUnion()} } yyVAL.union = yyLOCAL - case 1149: + case 1152: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5917 +//line sql.y:5931 { yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("curtime"), Fsp: yyDollar[2].integerUnion()} } yyVAL.union = yyLOCAL - case 1150: + case 1153: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5922 +//line sql.y:5936 { yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("current_time"), Fsp: yyDollar[2].integerUnion()} } yyVAL.union = yyLOCAL - case 1151: + case 1154: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5926 +//line sql.y:5940 { yyLOCAL = &CountStar{} } yyVAL.union = yyLOCAL - case 1152: + case 1155: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:5930 +//line sql.y:5944 { yyLOCAL = &Count{Distinct: yyDollar[3].booleanUnion(), Args: yyDollar[4].exprsUnion()} } yyVAL.union = yyLOCAL - case 1153: + case 1156: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:5934 +//line sql.y:5948 { yyLOCAL = &Max{Distinct: yyDollar[3].booleanUnion(), Arg: yyDollar[4].exprUnion()} } yyVAL.union = yyLOCAL - case 1154: + case 1157: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:5938 +//line sql.y:5952 { yyLOCAL = &Min{Distinct: yyDollar[3].booleanUnion(), Arg: yyDollar[4].exprUnion()} } yyVAL.union = yyLOCAL - case 1155: + case 1158: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:5942 +//line sql.y:5956 { yyLOCAL = &Sum{Distinct: yyDollar[3].booleanUnion(), Arg: yyDollar[4].exprUnion()} } yyVAL.union = yyLOCAL - case 1156: + case 1159: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:5946 +//line sql.y:5960 { yyLOCAL = &Avg{Distinct: yyDollar[3].booleanUnion(), Arg: yyDollar[4].exprUnion()} } yyVAL.union = yyLOCAL - case 1157: + case 1160: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5950 +//line sql.y:5964 { yyLOCAL = &BitAnd{Arg: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1158: + case 1161: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5954 +//line sql.y:5968 { yyLOCAL = &BitOr{Arg: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1159: + case 1162: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5958 +//line sql.y:5972 { yyLOCAL = &BitXor{Arg: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1160: + case 1163: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5962 +//line sql.y:5976 { yyLOCAL = &Std{Arg: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1161: + case 1164: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5966 +//line sql.y:5980 { yyLOCAL = &StdDev{Arg: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1162: + case 1165: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5970 +//line sql.y:5984 { yyLOCAL = &StdPop{Arg: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1163: + case 1166: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5974 +//line sql.y:5988 { yyLOCAL = &StdSamp{Arg: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1164: + case 1167: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5978 +//line sql.y:5992 { yyLOCAL = &VarPop{Arg: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1165: + case 1168: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5982 +//line sql.y:5996 { yyLOCAL = &VarSamp{Arg: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1166: + case 1169: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5986 +//line sql.y:6000 { yyLOCAL = &Variance{Arg: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1167: + case 1170: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:5990 +//line sql.y:6004 { yyLOCAL = &GroupConcatExpr{Distinct: yyDollar[3].booleanUnion(), Exprs: yyDollar[4].exprsUnion(), OrderBy: yyDollar[5].orderByUnion(), Separator: yyDollar[6].str, Limit: yyDollar[7].limitUnion()} } yyVAL.union = yyLOCAL - case 1168: + case 1171: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Expr +//line sql.y:6008 + { + yyLOCAL = &AnyValue{Arg: yyDollar[3].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1172: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:5994 +//line sql.y:6012 { - yyLOCAL = &TimestampFuncExpr{Name: string("timestampadd"), Unit: yyDollar[3].identifierCI.String(), Expr1: yyDollar[5].exprUnion(), Expr2: yyDollar[7].exprUnion()} + yyLOCAL = &IntervalDateExpr{Syntax: IntervalDateExprTimestampadd, Date: yyDollar[7].exprUnion(), Interval: yyDollar[5].exprUnion(), Unit: yyDollar[3].intervalTypeUnion()} } yyVAL.union = yyLOCAL - case 1169: + case 1173: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:5998 +//line sql.y:6016 { - yyLOCAL = &TimestampFuncExpr{Name: string("timestampdiff"), Unit: yyDollar[3].identifierCI.String(), Expr1: yyDollar[5].exprUnion(), Expr2: yyDollar[7].exprUnion()} + yyLOCAL = &TimestampDiffExpr{Unit: yyDollar[3].intervalTypeUnion(), Expr1: yyDollar[5].exprUnion(), Expr2: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1170: + case 1174: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6002 +//line sql.y:6020 { - yyLOCAL = &ExtractFuncExpr{IntervalTypes: yyDollar[3].intervalTypeUnion(), Expr: yyDollar[5].exprUnion()} + yyLOCAL = &ExtractFuncExpr{IntervalType: yyDollar[3].intervalTypeUnion(), Expr: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1171: + case 1175: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:6006 +//line sql.y:6024 { yyLOCAL = &WeightStringFuncExpr{Expr: yyDollar[3].exprUnion(), As: yyDollar[4].convertTypeUnion()} } yyVAL.union = yyLOCAL - case 1172: + case 1176: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6010 +//line sql.y:6028 { yyLOCAL = &JSONPrettyExpr{JSONVal: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1173: + case 1177: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6014 +//line sql.y:6032 { yyLOCAL = &JSONStorageFreeExpr{JSONVal: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1174: + case 1178: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6018 +//line sql.y:6036 { yyLOCAL = &JSONStorageSizeExpr{JSONVal: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1175: + case 1179: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6022 +//line sql.y:6040 { yyLOCAL = &TrimFuncExpr{TrimFuncType: LTrimType, Type: LeadingTrimType, StringArg: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1176: + case 1180: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6026 +//line sql.y:6044 { yyLOCAL = &TrimFuncExpr{TrimFuncType: RTrimType, Type: TrailingTrimType, StringArg: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1177: + case 1181: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Expr -//line sql.y:6030 +//line sql.y:6048 { yyLOCAL = &TrimFuncExpr{Type: yyDollar[3].trimTypeUnion(), TrimArg: yyDollar[4].exprUnion(), StringArg: yyDollar[6].exprUnion()} } yyVAL.union = yyLOCAL - case 1178: + case 1182: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6034 +//line sql.y:6052 { yyLOCAL = &TrimFuncExpr{StringArg: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1179: + case 1183: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6038 +//line sql.y:6056 { yyLOCAL = &CharExpr{Exprs: yyDollar[3].exprsUnion()} } yyVAL.union = yyLOCAL - case 1180: + case 1184: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6042 +//line sql.y:6060 { yyLOCAL = &CharExpr{Exprs: yyDollar[3].exprsUnion(), Charset: yyDollar[5].str} } yyVAL.union = yyLOCAL - case 1181: + case 1185: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6046 +//line sql.y:6064 { yyLOCAL = &TrimFuncExpr{TrimArg: yyDollar[3].exprUnion(), StringArg: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1182: + case 1186: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6050 +//line sql.y:6068 { yyLOCAL = &LocateExpr{SubStr: yyDollar[3].exprUnion(), Str: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1183: + case 1187: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6054 +//line sql.y:6072 { yyLOCAL = &LocateExpr{SubStr: yyDollar[3].exprUnion(), Str: yyDollar[5].exprUnion(), Pos: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1184: + case 1188: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6058 +//line sql.y:6076 { yyLOCAL = &LocateExpr{SubStr: yyDollar[3].exprUnion(), Str: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1185: + case 1189: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6062 +//line sql.y:6080 { yyLOCAL = &LockingFunc{Type: GetLock, Name: yyDollar[3].exprUnion(), Timeout: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1186: + case 1190: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6066 +//line sql.y:6084 { yyLOCAL = &LockingFunc{Type: IsFreeLock, Name: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1187: + case 1191: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6070 +//line sql.y:6088 { yyLOCAL = &LockingFunc{Type: IsUsedLock, Name: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1188: + case 1192: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:6074 +//line sql.y:6092 { yyLOCAL = &LockingFunc{Type: ReleaseAllLocks} } yyVAL.union = yyLOCAL - case 1189: + case 1193: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6078 +//line sql.y:6096 { yyLOCAL = &LockingFunc{Type: ReleaseLock, Name: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1190: + case 1194: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6082 +//line sql.y:6100 { yyLOCAL = &JSONSchemaValidFuncExpr{Schema: yyDollar[3].exprUnion(), Document: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1191: + case 1195: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6086 +//line sql.y:6104 { yyLOCAL = &JSONSchemaValidationReportFuncExpr{Schema: yyDollar[3].exprUnion(), Document: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1192: + case 1196: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6090 +//line sql.y:6108 { yyLOCAL = &JSONArrayExpr{Params: yyDollar[3].exprsUnion()} } yyVAL.union = yyLOCAL - case 1193: + case 1197: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6094 +//line sql.y:6112 { yyLOCAL = &GeomFormatExpr{FormatType: BinaryFormat, Geom: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1194: + case 1198: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6098 +//line sql.y:6116 { yyLOCAL = &GeomFormatExpr{FormatType: BinaryFormat, Geom: yyDollar[3].exprUnion(), AxisOrderOpt: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1195: + case 1199: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6102 +//line sql.y:6120 { yyLOCAL = &GeomFormatExpr{FormatType: TextFormat, Geom: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1196: + case 1200: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6106 +//line sql.y:6124 { yyLOCAL = &GeomFormatExpr{FormatType: TextFormat, Geom: yyDollar[3].exprUnion(), AxisOrderOpt: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1197: + case 1201: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6110 +//line sql.y:6128 { yyLOCAL = &GeomPropertyFuncExpr{Property: IsEmpty, Geom: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1198: + case 1202: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6114 +//line sql.y:6132 { yyLOCAL = &GeomPropertyFuncExpr{Property: IsSimple, Geom: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1199: + case 1203: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6118 +//line sql.y:6136 { yyLOCAL = &GeomPropertyFuncExpr{Property: Dimension, Geom: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1200: + case 1204: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6122 +//line sql.y:6140 { yyLOCAL = &GeomPropertyFuncExpr{Property: Envelope, Geom: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1201: + case 1205: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6126 +//line sql.y:6144 { yyLOCAL = &GeomPropertyFuncExpr{Property: GeometryType, Geom: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1202: + case 1206: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6130 +//line sql.y:6148 { yyLOCAL = &PointPropertyFuncExpr{Property: Latitude, Point: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1203: + case 1207: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6134 +//line sql.y:6152 { yyLOCAL = &PointPropertyFuncExpr{Property: Latitude, Point: yyDollar[3].exprUnion(), ValueToSet: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1204: + case 1208: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6138 +//line sql.y:6156 { yyLOCAL = &PointPropertyFuncExpr{Property: Longitude, Point: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1205: + case 1209: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6142 +//line sql.y:6160 { yyLOCAL = &PointPropertyFuncExpr{Property: Longitude, Point: yyDollar[3].exprUnion(), ValueToSet: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1206: + case 1210: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6146 +//line sql.y:6164 { yyLOCAL = &LinestrPropertyFuncExpr{Property: EndPoint, Linestring: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1207: + case 1211: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6150 +//line sql.y:6168 { yyLOCAL = &LinestrPropertyFuncExpr{Property: IsClosed, Linestring: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1208: + case 1212: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6154 +//line sql.y:6172 { yyLOCAL = &LinestrPropertyFuncExpr{Property: Length, Linestring: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1209: + case 1213: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6158 +//line sql.y:6176 { yyLOCAL = &LinestrPropertyFuncExpr{Property: Length, Linestring: yyDollar[3].exprUnion(), PropertyDefArg: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1210: + case 1214: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6162 +//line sql.y:6180 { yyLOCAL = &LinestrPropertyFuncExpr{Property: NumPoints, Linestring: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1211: + case 1215: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6166 +//line sql.y:6184 { yyLOCAL = &LinestrPropertyFuncExpr{Property: PointN, Linestring: yyDollar[3].exprUnion(), PropertyDefArg: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1212: + case 1216: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6170 +//line sql.y:6188 { yyLOCAL = &LinestrPropertyFuncExpr{Property: StartPoint, Linestring: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1213: + case 1217: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6174 +//line sql.y:6192 { yyLOCAL = &PointPropertyFuncExpr{Property: XCordinate, Point: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1214: + case 1218: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6178 +//line sql.y:6196 { yyLOCAL = &PointPropertyFuncExpr{Property: XCordinate, Point: yyDollar[3].exprUnion(), ValueToSet: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1215: + case 1219: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6182 +//line sql.y:6200 { yyLOCAL = &PointPropertyFuncExpr{Property: YCordinate, Point: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1216: + case 1220: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6186 +//line sql.y:6204 { yyLOCAL = &PointPropertyFuncExpr{Property: YCordinate, Point: yyDollar[3].exprUnion(), ValueToSet: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1217: + case 1221: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6190 +//line sql.y:6208 { yyLOCAL = &GeomFromTextExpr{Type: GeometryFromText, WktText: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1218: + case 1222: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6194 +//line sql.y:6212 { yyLOCAL = &GeomFromTextExpr{Type: GeometryFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1219: + case 1223: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6198 +//line sql.y:6216 { yyLOCAL = &GeomFromTextExpr{Type: GeometryFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1220: + case 1224: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6202 +//line sql.y:6220 { yyLOCAL = &GeomFromTextExpr{Type: GeometryCollectionFromText, WktText: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1221: + case 1225: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6206 +//line sql.y:6224 { yyLOCAL = &GeomFromTextExpr{Type: GeometryCollectionFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1222: + case 1226: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6210 +//line sql.y:6228 { yyLOCAL = &GeomFromTextExpr{Type: GeometryCollectionFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1223: + case 1227: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6214 +//line sql.y:6232 { yyLOCAL = &GeomFromTextExpr{Type: LineStringFromText, WktText: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1224: + case 1228: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6218 +//line sql.y:6236 { yyLOCAL = &GeomFromTextExpr{Type: LineStringFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1225: + case 1229: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6222 +//line sql.y:6240 { yyLOCAL = &GeomFromTextExpr{Type: LineStringFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1226: + case 1230: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6226 +//line sql.y:6244 { yyLOCAL = &GeomFromTextExpr{Type: MultiLinestringFromText, WktText: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1227: + case 1231: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6230 +//line sql.y:6248 { yyLOCAL = &GeomFromTextExpr{Type: MultiLinestringFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1228: + case 1232: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6234 +//line sql.y:6252 { yyLOCAL = &GeomFromTextExpr{Type: MultiLinestringFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1229: + case 1233: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6238 +//line sql.y:6256 { yyLOCAL = &GeomFromTextExpr{Type: MultiPointFromText, WktText: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1230: + case 1234: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6242 +//line sql.y:6260 { yyLOCAL = &GeomFromTextExpr{Type: MultiPointFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1231: + case 1235: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6246 +//line sql.y:6264 { yyLOCAL = &GeomFromTextExpr{Type: MultiPointFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1232: + case 1236: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6250 +//line sql.y:6268 { yyLOCAL = &GeomFromTextExpr{Type: MultiPolygonFromText, WktText: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1233: + case 1237: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6254 +//line sql.y:6272 { yyLOCAL = &GeomFromTextExpr{Type: MultiPolygonFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1234: + case 1238: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6258 +//line sql.y:6276 { yyLOCAL = &GeomFromTextExpr{Type: MultiPolygonFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1235: + case 1239: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6262 +//line sql.y:6280 { yyLOCAL = &GeomFromTextExpr{Type: PointFromText, WktText: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1236: + case 1240: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6266 +//line sql.y:6284 { yyLOCAL = &GeomFromTextExpr{Type: PointFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1237: + case 1241: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6270 +//line sql.y:6288 { yyLOCAL = &GeomFromTextExpr{Type: PointFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1238: + case 1242: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6274 +//line sql.y:6292 { yyLOCAL = &GeomFromTextExpr{Type: PolygonFromText, WktText: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1239: + case 1243: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6278 +//line sql.y:6296 { yyLOCAL = &GeomFromTextExpr{Type: PolygonFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1240: + case 1244: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6282 +//line sql.y:6300 { yyLOCAL = &GeomFromTextExpr{Type: PolygonFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1241: + case 1245: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6286 +//line sql.y:6304 { yyLOCAL = &GeomFromWKBExpr{Type: GeometryFromWKB, WkbBlob: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1242: + case 1246: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6290 +//line sql.y:6308 { yyLOCAL = &GeomFromWKBExpr{Type: GeometryFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1243: + case 1247: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6294 +//line sql.y:6312 { yyLOCAL = &GeomFromWKBExpr{Type: GeometryFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1244: + case 1248: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6298 +//line sql.y:6316 { yyLOCAL = &GeomFromWKBExpr{Type: GeometryCollectionFromWKB, WkbBlob: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1245: + case 1249: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6302 +//line sql.y:6320 { yyLOCAL = &GeomFromWKBExpr{Type: GeometryCollectionFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1246: + case 1250: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6306 +//line sql.y:6324 { yyLOCAL = &GeomFromWKBExpr{Type: GeometryCollectionFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1247: + case 1251: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6310 +//line sql.y:6328 { yyLOCAL = &GeomFromWKBExpr{Type: LineStringFromWKB, WkbBlob: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1248: + case 1252: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6314 +//line sql.y:6332 { yyLOCAL = &GeomFromWKBExpr{Type: LineStringFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1249: + case 1253: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6318 +//line sql.y:6336 { yyLOCAL = &GeomFromWKBExpr{Type: LineStringFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1250: + case 1254: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6322 +//line sql.y:6340 { yyLOCAL = &GeomFromWKBExpr{Type: MultiLinestringFromWKB, WkbBlob: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1251: + case 1255: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6326 +//line sql.y:6344 { yyLOCAL = &GeomFromWKBExpr{Type: MultiLinestringFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1252: + case 1256: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6330 +//line sql.y:6348 { yyLOCAL = &GeomFromWKBExpr{Type: MultiLinestringFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1253: + case 1257: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6334 +//line sql.y:6352 { yyLOCAL = &GeomFromWKBExpr{Type: MultiPointFromWKB, WkbBlob: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1254: + case 1258: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6338 +//line sql.y:6356 { yyLOCAL = &GeomFromWKBExpr{Type: MultiPointFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1255: + case 1259: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6342 +//line sql.y:6360 { yyLOCAL = &GeomFromWKBExpr{Type: MultiPointFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1256: + case 1260: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6346 +//line sql.y:6364 { yyLOCAL = &GeomFromWKBExpr{Type: MultiPolygonFromWKB, WkbBlob: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1257: + case 1261: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6350 +//line sql.y:6368 { yyLOCAL = &GeomFromWKBExpr{Type: MultiPolygonFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1258: + case 1262: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6354 +//line sql.y:6372 { yyLOCAL = &GeomFromWKBExpr{Type: MultiPolygonFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1259: + case 1263: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6358 +//line sql.y:6376 { yyLOCAL = &GeomFromWKBExpr{Type: PointFromWKB, WkbBlob: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1260: + case 1264: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6362 +//line sql.y:6380 { yyLOCAL = &GeomFromWKBExpr{Type: PointFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1261: + case 1265: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6366 +//line sql.y:6384 { yyLOCAL = &GeomFromWKBExpr{Type: PointFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1262: + case 1266: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6370 +//line sql.y:6388 { yyLOCAL = &GeomFromWKBExpr{Type: PolygonFromWKB, WkbBlob: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1263: + case 1267: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6374 +//line sql.y:6392 { yyLOCAL = &GeomFromWKBExpr{Type: PolygonFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1264: + case 1268: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6378 +//line sql.y:6396 { yyLOCAL = &GeomFromWKBExpr{Type: PolygonFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1265: + case 1269: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6382 +//line sql.y:6400 { yyLOCAL = &PolygonPropertyFuncExpr{Property: Area, Polygon: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1266: + case 1270: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6386 +//line sql.y:6404 { yyLOCAL = &PolygonPropertyFuncExpr{Property: Centroid, Polygon: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1267: + case 1271: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6390 +//line sql.y:6408 { yyLOCAL = &PolygonPropertyFuncExpr{Property: ExteriorRing, Polygon: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1268: + case 1272: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6394 +//line sql.y:6412 { yyLOCAL = &PolygonPropertyFuncExpr{Property: InteriorRingN, Polygon: yyDollar[3].exprUnion(), PropertyDefArg: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1269: + case 1273: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6398 +//line sql.y:6416 { yyLOCAL = &PolygonPropertyFuncExpr{Property: NumInteriorRings, Polygon: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1270: + case 1274: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6402 +//line sql.y:6420 { yyLOCAL = &GeomCollPropertyFuncExpr{Property: GeometryN, GeomColl: yyDollar[3].exprUnion(), PropertyDefArg: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1271: + case 1275: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6406 +//line sql.y:6424 { yyLOCAL = &GeomCollPropertyFuncExpr{Property: NumGeometries, GeomColl: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1272: + case 1276: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6410 +//line sql.y:6428 { yyLOCAL = &GeoHashFromLatLongExpr{Longitude: yyDollar[3].exprUnion(), Latitude: yyDollar[5].exprUnion(), MaxLength: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1273: + case 1277: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6414 +//line sql.y:6432 { yyLOCAL = &GeoHashFromPointExpr{Point: yyDollar[3].exprUnion(), MaxLength: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1274: + case 1278: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6418 +//line sql.y:6436 { yyLOCAL = &GeomFromGeoHashExpr{GeomType: LatitudeFromHash, GeoHash: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1275: + case 1279: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6422 +//line sql.y:6440 { yyLOCAL = &GeomFromGeoHashExpr{GeomType: LongitudeFromHash, GeoHash: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1276: + case 1280: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6426 +//line sql.y:6444 { yyLOCAL = &GeomFromGeoHashExpr{GeomType: PointFromHash, GeoHash: yyDollar[3].exprUnion(), SridOpt: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1277: + case 1281: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6430 +//line sql.y:6448 { yyLOCAL = &GeomFromGeoJSONExpr{GeoJSON: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1278: + case 1282: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6434 +//line sql.y:6452 { yyLOCAL = &GeomFromGeoJSONExpr{GeoJSON: yyDollar[3].exprUnion(), HigherDimHandlerOpt: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1279: + case 1283: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6438 +//line sql.y:6456 { yyLOCAL = &GeomFromGeoJSONExpr{GeoJSON: yyDollar[3].exprUnion(), HigherDimHandlerOpt: yyDollar[5].exprUnion(), Srid: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1280: + case 1284: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6442 +//line sql.y:6460 { yyLOCAL = &GeoJSONFromGeomExpr{Geom: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1281: + case 1285: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6446 +//line sql.y:6464 { yyLOCAL = &GeoJSONFromGeomExpr{Geom: yyDollar[3].exprUnion(), MaxDecimalDigits: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1282: + case 1286: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6450 +//line sql.y:6468 { yyLOCAL = &GeoJSONFromGeomExpr{Geom: yyDollar[3].exprUnion(), MaxDecimalDigits: yyDollar[5].exprUnion(), Bitmask: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1283: + case 1287: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6454 +//line sql.y:6472 { yyLOCAL = &JSONObjectExpr{Params: yyDollar[3].jsonObjectParamsUnion()} } yyVAL.union = yyLOCAL - case 1284: + case 1288: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6458 +//line sql.y:6476 { yyLOCAL = &JSONQuoteExpr{StringArg: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1285: + case 1289: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6462 +//line sql.y:6480 { yyLOCAL = &JSONContainsExpr{Target: yyDollar[3].exprUnion(), Candidate: yyDollar[5].exprsUnion()[0], PathList: yyDollar[5].exprsUnion()[1:]} } yyVAL.union = yyLOCAL - case 1286: + case 1290: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6466 +//line sql.y:6484 { yyLOCAL = &JSONContainsPathExpr{JSONDoc: yyDollar[3].exprUnion(), OneOrAll: yyDollar[5].exprUnion(), PathList: yyDollar[7].exprsUnion()} } yyVAL.union = yyLOCAL - case 1287: + case 1291: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6470 +//line sql.y:6488 { yyLOCAL = &JSONExtractExpr{JSONDoc: yyDollar[3].exprUnion(), PathList: yyDollar[5].exprsUnion()} } yyVAL.union = yyLOCAL - case 1288: + case 1292: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6474 +//line sql.y:6492 { yyLOCAL = &JSONKeysExpr{JSONDoc: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1289: + case 1293: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6478 +//line sql.y:6496 { yyLOCAL = &JSONKeysExpr{JSONDoc: yyDollar[3].exprUnion(), Path: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1290: + case 1294: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6482 +//line sql.y:6500 { yyLOCAL = &JSONOverlapsExpr{JSONDoc1: yyDollar[3].exprUnion(), JSONDoc2: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1291: + case 1295: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6486 +//line sql.y:6504 { yyLOCAL = &JSONSearchExpr{JSONDoc: yyDollar[3].exprUnion(), OneOrAll: yyDollar[5].exprUnion(), SearchStr: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1292: + case 1296: yyDollar = yyS[yypt-10 : yypt+1] var yyLOCAL Expr -//line sql.y:6490 +//line sql.y:6508 { yyLOCAL = &JSONSearchExpr{JSONDoc: yyDollar[3].exprUnion(), OneOrAll: yyDollar[5].exprUnion(), SearchStr: yyDollar[7].exprUnion(), EscapeChar: yyDollar[9].exprsUnion()[0], PathList: yyDollar[9].exprsUnion()[1:]} } yyVAL.union = yyLOCAL - case 1293: + case 1297: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Expr -//line sql.y:6494 +//line sql.y:6512 { yyLOCAL = &JSONValueExpr{JSONDoc: yyDollar[3].exprUnion(), Path: yyDollar[5].exprUnion(), ReturningType: yyDollar[6].convertTypeUnion()} } yyVAL.union = yyLOCAL - case 1294: + case 1298: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6498 +//line sql.y:6516 { yyLOCAL = &JSONValueExpr{JSONDoc: yyDollar[3].exprUnion(), Path: yyDollar[5].exprUnion(), ReturningType: yyDollar[6].convertTypeUnion(), EmptyOnResponse: yyDollar[7].jtOnResponseUnion()} } yyVAL.union = yyLOCAL - case 1295: + case 1299: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6502 +//line sql.y:6520 { yyLOCAL = &JSONValueExpr{JSONDoc: yyDollar[3].exprUnion(), Path: yyDollar[5].exprUnion(), ReturningType: yyDollar[6].convertTypeUnion(), ErrorOnResponse: yyDollar[7].jtOnResponseUnion()} } yyVAL.union = yyLOCAL - case 1296: + case 1300: yyDollar = yyS[yypt-9 : yypt+1] var yyLOCAL Expr -//line sql.y:6506 +//line sql.y:6524 { yyLOCAL = &JSONValueExpr{JSONDoc: yyDollar[3].exprUnion(), Path: yyDollar[5].exprUnion(), ReturningType: yyDollar[6].convertTypeUnion(), EmptyOnResponse: yyDollar[7].jtOnResponseUnion(), ErrorOnResponse: yyDollar[8].jtOnResponseUnion()} } yyVAL.union = yyLOCAL - case 1297: + case 1301: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6510 +//line sql.y:6528 { yyLOCAL = &JSONAttributesExpr{Type: DepthAttributeType, JSONDoc: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1298: + case 1302: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6514 +//line sql.y:6532 { yyLOCAL = &JSONAttributesExpr{Type: ValidAttributeType, JSONDoc: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1299: + case 1303: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6518 +//line sql.y:6536 { yyLOCAL = &JSONAttributesExpr{Type: TypeAttributeType, JSONDoc: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1300: + case 1304: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6522 +//line sql.y:6540 { yyLOCAL = &JSONAttributesExpr{Type: LengthAttributeType, JSONDoc: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1301: + case 1305: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6526 +//line sql.y:6544 { yyLOCAL = &JSONAttributesExpr{Type: LengthAttributeType, JSONDoc: yyDollar[3].exprUnion(), Path: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1302: + case 1306: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6530 +//line sql.y:6548 { yyLOCAL = &JSONValueModifierExpr{Type: JSONArrayAppendType, JSONDoc: yyDollar[3].exprUnion(), Params: yyDollar[5].jsonObjectParamsUnion()} } yyVAL.union = yyLOCAL - case 1303: + case 1307: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6534 +//line sql.y:6552 { yyLOCAL = &JSONValueModifierExpr{Type: JSONArrayInsertType, JSONDoc: yyDollar[3].exprUnion(), Params: yyDollar[5].jsonObjectParamsUnion()} } yyVAL.union = yyLOCAL - case 1304: + case 1308: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6538 +//line sql.y:6556 { yyLOCAL = &JSONValueModifierExpr{Type: JSONInsertType, JSONDoc: yyDollar[3].exprUnion(), Params: yyDollar[5].jsonObjectParamsUnion()} } yyVAL.union = yyLOCAL - case 1305: + case 1309: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6542 +//line sql.y:6560 { yyLOCAL = &JSONValueModifierExpr{Type: JSONReplaceType, JSONDoc: yyDollar[3].exprUnion(), Params: yyDollar[5].jsonObjectParamsUnion()} } yyVAL.union = yyLOCAL - case 1306: + case 1310: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6546 +//line sql.y:6564 { yyLOCAL = &JSONValueModifierExpr{Type: JSONSetType, JSONDoc: yyDollar[3].exprUnion(), Params: yyDollar[5].jsonObjectParamsUnion()} } yyVAL.union = yyLOCAL - case 1307: + case 1311: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6550 +//line sql.y:6568 { yyLOCAL = &JSONValueMergeExpr{Type: JSONMergeType, JSONDoc: yyDollar[3].exprUnion(), JSONDocList: yyDollar[5].exprsUnion()} } yyVAL.union = yyLOCAL - case 1308: + case 1312: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6554 +//line sql.y:6572 { yyLOCAL = &JSONValueMergeExpr{Type: JSONMergePatchType, JSONDoc: yyDollar[3].exprUnion(), JSONDocList: yyDollar[5].exprsUnion()} } yyVAL.union = yyLOCAL - case 1309: + case 1313: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6558 +//line sql.y:6576 { yyLOCAL = &JSONValueMergeExpr{Type: JSONMergePreserveType, JSONDoc: yyDollar[3].exprUnion(), JSONDocList: yyDollar[5].exprsUnion()} } yyVAL.union = yyLOCAL - case 1310: + case 1314: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6562 +//line sql.y:6580 { yyLOCAL = &JSONRemoveExpr{JSONDoc: yyDollar[3].exprUnion(), PathList: yyDollar[5].exprsUnion()} } yyVAL.union = yyLOCAL - case 1311: + case 1315: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6566 +//line sql.y:6584 { yyLOCAL = &JSONUnquoteExpr{JSONValue: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1312: + case 1316: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6570 +//line sql.y:6588 { yyLOCAL = &MultiPolygonExpr{PolygonParams: yyDollar[3].exprsUnion()} } yyVAL.union = yyLOCAL - case 1313: + case 1317: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6574 +//line sql.y:6592 { yyLOCAL = &MultiPointExpr{PointParams: yyDollar[3].exprsUnion()} } yyVAL.union = yyLOCAL - case 1314: + case 1318: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6578 +//line sql.y:6596 { yyLOCAL = &MultiLinestringExpr{LinestringParams: yyDollar[3].exprsUnion()} } yyVAL.union = yyLOCAL - case 1315: + case 1319: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6582 +//line sql.y:6600 { yyLOCAL = &PolygonExpr{LinestringParams: yyDollar[3].exprsUnion()} } yyVAL.union = yyLOCAL - case 1316: + case 1320: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6586 +//line sql.y:6604 { yyLOCAL = &LineStringExpr{PointParams: yyDollar[3].exprsUnion()} } yyVAL.union = yyLOCAL - case 1317: + case 1321: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6590 +//line sql.y:6608 { yyLOCAL = &PointExpr{XCordinate: yyDollar[3].exprUnion(), YCordinate: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1318: + case 1322: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6594 +//line sql.y:6612 { yyLOCAL = &ArgumentLessWindowExpr{Type: yyDollar[1].argumentLessWindowExprTypeUnion(), OverClause: yyDollar[4].overClauseUnion()} } yyVAL.union = yyLOCAL - case 1319: + case 1323: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6598 +//line sql.y:6616 { yyLOCAL = &FirstOrLastValueExpr{Type: yyDollar[1].firstOrLastValueExprTypeUnion(), Expr: yyDollar[3].exprUnion(), NullTreatmentClause: yyDollar[5].nullTreatmentClauseUnion(), OverClause: yyDollar[6].overClauseUnion()} } yyVAL.union = yyLOCAL - case 1320: + case 1324: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:6602 +//line sql.y:6620 { yyLOCAL = &NtileExpr{N: yyDollar[3].exprUnion(), OverClause: yyDollar[5].overClauseUnion()} } yyVAL.union = yyLOCAL - case 1321: + case 1325: yyDollar = yyS[yypt-9 : yypt+1] var yyLOCAL Expr -//line sql.y:6606 +//line sql.y:6624 { yyLOCAL = &NTHValueExpr{Expr: yyDollar[3].exprUnion(), N: yyDollar[5].exprUnion(), FromFirstLastClause: yyDollar[7].fromFirstLastClauseUnion(), NullTreatmentClause: yyDollar[8].nullTreatmentClauseUnion(), OverClause: yyDollar[9].overClauseUnion()} } yyVAL.union = yyLOCAL - case 1322: + case 1326: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6610 +//line sql.y:6628 { yyLOCAL = &LagLeadExpr{Type: yyDollar[1].lagLeadExprTypeUnion(), Expr: yyDollar[3].exprUnion(), NullTreatmentClause: yyDollar[5].nullTreatmentClauseUnion(), OverClause: yyDollar[6].overClauseUnion()} } yyVAL.union = yyLOCAL - case 1323: + case 1327: yyDollar = yyS[yypt-9 : yypt+1] var yyLOCAL Expr -//line sql.y:6614 +//line sql.y:6632 { yyLOCAL = &LagLeadExpr{Type: yyDollar[1].lagLeadExprTypeUnion(), Expr: yyDollar[3].exprUnion(), N: yyDollar[5].exprUnion(), Default: yyDollar[6].exprUnion(), NullTreatmentClause: yyDollar[8].nullTreatmentClauseUnion(), OverClause: yyDollar[9].overClauseUnion()} } yyVAL.union = yyLOCAL - case 1324: + case 1328: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6618 +//line sql.y:6636 { - yyLOCAL = &DateAddExpr{Type: AdddateType, Date: yyDollar[3].exprUnion(), Expr: yyDollar[6].exprUnion(), Unit: yyDollar[7].intervalTypeUnion()} + yyLOCAL = &IntervalDateExpr{Syntax: IntervalDateExprAdddate, Date: yyDollar[3].exprUnion(), Interval: yyDollar[6].exprUnion(), Unit: yyDollar[7].intervalTypeUnion()} } yyVAL.union = yyLOCAL - case 1325: + case 1329: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6622 +//line sql.y:6640 { - yyLOCAL = &DateAddExpr{Type: AdddateType, Date: yyDollar[3].exprUnion(), Expr: yyDollar[5].exprUnion()} + yyLOCAL = &IntervalDateExpr{Syntax: IntervalDateExprAdddate, Date: yyDollar[3].exprUnion(), Interval: yyDollar[5].exprUnion(), Unit: IntervalNone} } yyVAL.union = yyLOCAL - case 1326: + case 1330: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6626 +//line sql.y:6644 { - yyLOCAL = &DateAddExpr{Type: DateAddType, Date: yyDollar[3].exprUnion(), Expr: yyDollar[6].exprUnion(), Unit: yyDollar[7].intervalTypeUnion()} + yyLOCAL = &IntervalDateExpr{Syntax: IntervalDateExprDateAdd, Date: yyDollar[3].exprUnion(), Interval: yyDollar[6].exprUnion(), Unit: yyDollar[7].intervalTypeUnion()} } yyVAL.union = yyLOCAL - case 1327: + case 1331: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6630 +//line sql.y:6648 { - yyLOCAL = &DateSubExpr{Type: DateSubType, Date: yyDollar[3].exprUnion(), Expr: yyDollar[6].exprUnion(), Unit: yyDollar[7].intervalTypeUnion()} + yyLOCAL = &IntervalDateExpr{Syntax: IntervalDateExprDateSub, Date: yyDollar[3].exprUnion(), Interval: yyDollar[6].exprUnion(), Unit: yyDollar[7].intervalTypeUnion()} } yyVAL.union = yyLOCAL - case 1328: + case 1332: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6634 +//line sql.y:6652 { - yyLOCAL = &DateSubExpr{Type: SubdateType, Date: yyDollar[3].exprUnion(), Expr: yyDollar[6].exprUnion(), Unit: yyDollar[7].intervalTypeUnion()} + yyLOCAL = &IntervalDateExpr{Syntax: IntervalDateExprSubdate, Date: yyDollar[3].exprUnion(), Interval: yyDollar[6].exprUnion(), Unit: yyDollar[7].intervalTypeUnion()} } yyVAL.union = yyLOCAL - case 1329: + case 1333: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6638 +//line sql.y:6656 { - yyLOCAL = &DateSubExpr{Type: SubdateType, Date: yyDollar[3].exprUnion(), Expr: yyDollar[5].exprUnion()} + yyLOCAL = &IntervalDateExpr{Syntax: IntervalDateExprSubdate, Date: yyDollar[3].exprUnion(), Interval: yyDollar[5].exprUnion(), Unit: IntervalNone} } yyVAL.union = yyLOCAL - case 1334: + case 1338: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:6648 +//line sql.y:6666 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1335: + case 1339: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:6652 +//line sql.y:6670 { yyLOCAL = NewIntLiteral(yyDollar[1].str) } yyVAL.union = yyLOCAL - case 1336: + case 1340: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:6656 +//line sql.y:6674 { yyLOCAL = yyDollar[1].variableUnion() } yyVAL.union = yyLOCAL - case 1337: + case 1341: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:6660 +//line sql.y:6678 { yyLOCAL = parseBindVariable(yylex, yyDollar[1].str[1:]) } yyVAL.union = yyLOCAL - case 1338: + case 1342: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL Expr -//line sql.y:6665 +//line sql.y:6683 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1339: + case 1343: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:6669 +//line sql.y:6687 { yyLOCAL = yyDollar[2].exprUnion() } yyVAL.union = yyLOCAL - case 1340: + case 1344: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6675 +//line sql.y:6693 { yyLOCAL = &RegexpInstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1341: + case 1345: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6679 +//line sql.y:6697 { yyLOCAL = &RegexpInstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1342: + case 1346: yyDollar = yyS[yypt-10 : yypt+1] var yyLOCAL Expr -//line sql.y:6683 +//line sql.y:6701 { yyLOCAL = &RegexpInstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion(), Occurrence: yyDollar[9].exprUnion()} } yyVAL.union = yyLOCAL - case 1343: + case 1347: yyDollar = yyS[yypt-12 : yypt+1] var yyLOCAL Expr -//line sql.y:6687 +//line sql.y:6705 { yyLOCAL = &RegexpInstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion(), Occurrence: yyDollar[9].exprUnion(), ReturnOption: yyDollar[11].exprUnion()} } yyVAL.union = yyLOCAL - case 1344: + case 1348: yyDollar = yyS[yypt-14 : yypt+1] var yyLOCAL Expr -//line sql.y:6691 +//line sql.y:6709 { // Match type is kept expression as TRIM( ' m ') is accepted yyLOCAL = &RegexpInstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion(), Occurrence: yyDollar[9].exprUnion(), ReturnOption: yyDollar[11].exprUnion(), MatchType: yyDollar[13].exprUnion()} } yyVAL.union = yyLOCAL - case 1345: + case 1349: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6696 +//line sql.y:6714 { yyLOCAL = &RegexpLikeExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1346: + case 1350: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6700 +//line sql.y:6718 { yyLOCAL = &RegexpLikeExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), MatchType: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1347: + case 1351: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6704 +//line sql.y:6722 { yyLOCAL = &RegexpReplaceExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Repl: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1348: + case 1352: yyDollar = yyS[yypt-10 : yypt+1] var yyLOCAL Expr -//line sql.y:6708 +//line sql.y:6726 { yyLOCAL = &RegexpReplaceExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Repl: yyDollar[7].exprUnion(), Position: yyDollar[9].exprUnion()} } yyVAL.union = yyLOCAL - case 1349: + case 1353: yyDollar = yyS[yypt-12 : yypt+1] var yyLOCAL Expr -//line sql.y:6712 +//line sql.y:6730 { yyLOCAL = &RegexpReplaceExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Repl: yyDollar[7].exprUnion(), Position: yyDollar[9].exprUnion(), Occurrence: yyDollar[11].exprUnion()} } yyVAL.union = yyLOCAL - case 1350: + case 1354: yyDollar = yyS[yypt-14 : yypt+1] var yyLOCAL Expr -//line sql.y:6716 +//line sql.y:6734 { // Match type is kept expression as TRIM( ' m ') is accepted yyLOCAL = &RegexpReplaceExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Repl: yyDollar[7].exprUnion(), Position: yyDollar[9].exprUnion(), Occurrence: yyDollar[11].exprUnion(), MatchType: yyDollar[13].exprUnion()} } yyVAL.union = yyLOCAL - case 1351: + case 1355: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6721 +//line sql.y:6739 { yyLOCAL = &RegexpSubstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1352: + case 1356: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6725 +//line sql.y:6743 { yyLOCAL = &RegexpSubstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1353: + case 1357: yyDollar = yyS[yypt-10 : yypt+1] var yyLOCAL Expr -//line sql.y:6729 +//line sql.y:6747 { yyLOCAL = &RegexpSubstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion(), Occurrence: yyDollar[9].exprUnion()} } yyVAL.union = yyLOCAL - case 1354: + case 1358: yyDollar = yyS[yypt-12 : yypt+1] var yyLOCAL Expr -//line sql.y:6733 +//line sql.y:6751 { // Match type is kept expression as TRIM( ' m ') is accepted yyLOCAL = &RegexpSubstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion(), Occurrence: yyDollar[9].exprUnion(), MatchType: yyDollar[11].exprUnion()} } yyVAL.union = yyLOCAL - case 1355: + case 1359: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6740 +//line sql.y:6758 { yyLOCAL = &ExtractValueExpr{Fragment: yyDollar[3].exprUnion(), XPathExpr: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1356: + case 1360: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6744 +//line sql.y:6762 { yyLOCAL = &UpdateXMLExpr{Target: yyDollar[3].exprUnion(), XPathExpr: yyDollar[5].exprUnion(), NewXML: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1357: + case 1361: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6750 +//line sql.y:6768 { yyLOCAL = &PerformanceSchemaFuncExpr{Type: FormatBytesType, Argument: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1358: + case 1362: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6754 +//line sql.y:6772 { yyLOCAL = &PerformanceSchemaFuncExpr{Type: FormatPicoTimeType, Argument: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1359: + case 1363: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:6758 +//line sql.y:6776 { yyLOCAL = &PerformanceSchemaFuncExpr{Type: PsCurrentThreadIDType} } yyVAL.union = yyLOCAL - case 1360: + case 1364: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6762 +//line sql.y:6780 { yyLOCAL = &PerformanceSchemaFuncExpr{Type: PsThreadIDType, Argument: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1361: + case 1365: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6768 +//line sql.y:6786 { yyLOCAL = >IDFuncExpr{Type: GTIDSubsetType, Set1: yyDollar[3].exprUnion(), Set2: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1362: + case 1366: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6772 +//line sql.y:6790 { yyLOCAL = >IDFuncExpr{Type: GTIDSubtractType, Set1: yyDollar[3].exprUnion(), Set2: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1363: + case 1367: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6776 +//line sql.y:6794 { yyLOCAL = >IDFuncExpr{Type: WaitForExecutedGTIDSetType, Set1: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1364: + case 1368: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6780 +//line sql.y:6798 { yyLOCAL = >IDFuncExpr{Type: WaitForExecutedGTIDSetType, Set1: yyDollar[3].exprUnion(), Timeout: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1365: + case 1369: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6784 +//line sql.y:6802 { yyLOCAL = >IDFuncExpr{Type: WaitUntilSQLThreadAfterGTIDSType, Set1: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1366: + case 1370: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6788 +//line sql.y:6806 { yyLOCAL = >IDFuncExpr{Type: WaitUntilSQLThreadAfterGTIDSType, Set1: yyDollar[3].exprUnion(), Timeout: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1367: + case 1371: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6792 +//line sql.y:6810 { yyLOCAL = >IDFuncExpr{Type: WaitUntilSQLThreadAfterGTIDSType, Set1: yyDollar[3].exprUnion(), Timeout: yyDollar[5].exprUnion(), Channel: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1368: + case 1372: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:6797 +//line sql.y:6815 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1369: + case 1373: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:6801 +//line sql.y:6819 { yyLOCAL = yyDollar[2].convertTypeUnion() } yyVAL.union = yyLOCAL - case 1370: + case 1374: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6807 + var yyLOCAL IntervalType +//line sql.y:6825 { yyLOCAL = IntervalDayHour } yyVAL.union = yyLOCAL - case 1371: + case 1375: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6811 + var yyLOCAL IntervalType +//line sql.y:6829 { yyLOCAL = IntervalDayMicrosecond } yyVAL.union = yyLOCAL - case 1372: + case 1376: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6815 + var yyLOCAL IntervalType +//line sql.y:6833 { yyLOCAL = IntervalDayMinute } yyVAL.union = yyLOCAL - case 1373: + case 1377: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6819 + var yyLOCAL IntervalType +//line sql.y:6837 { yyLOCAL = IntervalDaySecond } yyVAL.union = yyLOCAL - case 1374: + case 1378: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6823 + var yyLOCAL IntervalType +//line sql.y:6841 { yyLOCAL = IntervalHourMicrosecond } yyVAL.union = yyLOCAL - case 1375: + case 1379: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6827 + var yyLOCAL IntervalType +//line sql.y:6845 { yyLOCAL = IntervalHourMinute } yyVAL.union = yyLOCAL - case 1376: + case 1380: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6831 + var yyLOCAL IntervalType +//line sql.y:6849 { yyLOCAL = IntervalHourSecond } yyVAL.union = yyLOCAL - case 1377: + case 1381: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6835 + var yyLOCAL IntervalType +//line sql.y:6853 { yyLOCAL = IntervalMinuteMicrosecond } yyVAL.union = yyLOCAL - case 1378: + case 1382: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6839 + var yyLOCAL IntervalType +//line sql.y:6857 { yyLOCAL = IntervalMinuteSecond } yyVAL.union = yyLOCAL - case 1379: + case 1383: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6843 + var yyLOCAL IntervalType +//line sql.y:6861 { yyLOCAL = IntervalSecondMicrosecond } yyVAL.union = yyLOCAL - case 1380: + case 1384: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL IntervalType +//line sql.y:6865 + { + yyLOCAL = IntervalYearMonth + } + yyVAL.union = yyLOCAL + case 1385: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL IntervalType +//line sql.y:6869 + { + yyLOCAL = IntervalDay + } + yyVAL.union = yyLOCAL + case 1386: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL IntervalType +//line sql.y:6873 + { + yyLOCAL = IntervalWeek + } + yyVAL.union = yyLOCAL + case 1387: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL IntervalType +//line sql.y:6877 + { + yyLOCAL = IntervalHour + } + yyVAL.union = yyLOCAL + case 1388: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL IntervalType +//line sql.y:6881 + { + yyLOCAL = IntervalMinute + } + yyVAL.union = yyLOCAL + case 1389: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL IntervalType +//line sql.y:6885 + { + yyLOCAL = IntervalMonth + } + yyVAL.union = yyLOCAL + case 1390: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL IntervalType +//line sql.y:6889 + { + yyLOCAL = IntervalQuarter + } + yyVAL.union = yyLOCAL + case 1391: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL IntervalType +//line sql.y:6893 + { + yyLOCAL = IntervalSecond + } + yyVAL.union = yyLOCAL + case 1392: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL IntervalType +//line sql.y:6897 + { + yyLOCAL = IntervalMicrosecond + } + yyVAL.union = yyLOCAL + case 1393: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL IntervalType +//line sql.y:6901 + { + yyLOCAL = IntervalYear + } + yyVAL.union = yyLOCAL + case 1394: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL IntervalType +//line sql.y:6907 + { + yyLOCAL = IntervalDay + } + yyVAL.union = yyLOCAL + case 1395: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL IntervalType +//line sql.y:6911 + { + yyLOCAL = IntervalWeek + } + yyVAL.union = yyLOCAL + case 1396: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL IntervalType +//line sql.y:6915 + { + yyLOCAL = IntervalHour + } + yyVAL.union = yyLOCAL + case 1397: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL IntervalType +//line sql.y:6919 + { + yyLOCAL = IntervalMinute + } + yyVAL.union = yyLOCAL + case 1398: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL IntervalType +//line sql.y:6923 + { + yyLOCAL = IntervalMonth + } + yyVAL.union = yyLOCAL + case 1399: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL IntervalType +//line sql.y:6927 + { + yyLOCAL = IntervalQuarter + } + yyVAL.union = yyLOCAL + case 1400: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL IntervalType +//line sql.y:6931 + { + yyLOCAL = IntervalSecond + } + yyVAL.union = yyLOCAL + case 1401: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL IntervalType +//line sql.y:6935 + { + yyLOCAL = IntervalMicrosecond + } + yyVAL.union = yyLOCAL + case 1402: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6847 + var yyLOCAL IntervalType +//line sql.y:6939 { - yyLOCAL = IntervalYearMonth + yyLOCAL = IntervalYear } yyVAL.union = yyLOCAL - case 1381: + case 1403: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6851 + var yyLOCAL IntervalType +//line sql.y:6943 { yyLOCAL = IntervalDay } yyVAL.union = yyLOCAL - case 1382: + case 1404: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6855 + var yyLOCAL IntervalType +//line sql.y:6947 { yyLOCAL = IntervalWeek } yyVAL.union = yyLOCAL - case 1383: + case 1405: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6859 + var yyLOCAL IntervalType +//line sql.y:6951 { yyLOCAL = IntervalHour } yyVAL.union = yyLOCAL - case 1384: + case 1406: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6863 + var yyLOCAL IntervalType +//line sql.y:6955 { yyLOCAL = IntervalMinute } yyVAL.union = yyLOCAL - case 1385: + case 1407: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6867 + var yyLOCAL IntervalType +//line sql.y:6959 { yyLOCAL = IntervalMonth } yyVAL.union = yyLOCAL - case 1386: + case 1408: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6871 + var yyLOCAL IntervalType +//line sql.y:6963 { yyLOCAL = IntervalQuarter } yyVAL.union = yyLOCAL - case 1387: + case 1409: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6875 + var yyLOCAL IntervalType +//line sql.y:6967 { yyLOCAL = IntervalSecond } yyVAL.union = yyLOCAL - case 1388: + case 1410: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6879 + var yyLOCAL IntervalType +//line sql.y:6971 { yyLOCAL = IntervalMicrosecond } yyVAL.union = yyLOCAL - case 1389: + case 1411: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6883 + var yyLOCAL IntervalType +//line sql.y:6975 { yyLOCAL = IntervalYear } yyVAL.union = yyLOCAL - case 1392: + case 1414: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL int -//line sql.y:6893 +//line sql.y:6985 { yyLOCAL = 0 } yyVAL.union = yyLOCAL - case 1393: + case 1415: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL int -//line sql.y:6897 +//line sql.y:6989 { yyLOCAL = 0 } yyVAL.union = yyLOCAL - case 1394: + case 1416: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL int -//line sql.y:6901 +//line sql.y:6993 { yyLOCAL = convertStringToInt(yyDollar[2].str) } yyVAL.union = yyLOCAL - case 1395: + case 1417: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6911 +//line sql.y:7003 { yyLOCAL = &FuncExpr{Name: NewIdentifierCI("if"), Exprs: yyDollar[3].selectExprsUnion()} } yyVAL.union = yyLOCAL - case 1396: + case 1418: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6915 +//line sql.y:7007 { yyLOCAL = &FuncExpr{Name: NewIdentifierCI("database"), Exprs: yyDollar[3].selectExprsUnion()} } yyVAL.union = yyLOCAL - case 1397: + case 1419: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6919 +//line sql.y:7011 { yyLOCAL = &FuncExpr{Name: NewIdentifierCI("schema"), Exprs: yyDollar[3].selectExprsUnion()} } yyVAL.union = yyLOCAL - case 1398: + case 1420: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6923 +//line sql.y:7015 { yyLOCAL = &FuncExpr{Name: NewIdentifierCI("mod"), Exprs: yyDollar[3].selectExprsUnion()} } yyVAL.union = yyLOCAL - case 1399: + case 1421: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6927 +//line sql.y:7019 { yyLOCAL = &FuncExpr{Name: NewIdentifierCI("replace"), Exprs: yyDollar[3].selectExprsUnion()} } yyVAL.union = yyLOCAL - case 1400: + case 1422: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL MatchExprOption -//line sql.y:6933 +//line sql.y:7025 { yyLOCAL = NoOption } yyVAL.union = yyLOCAL - case 1401: + case 1423: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL MatchExprOption -//line sql.y:6937 +//line sql.y:7029 { yyLOCAL = BooleanModeOpt } yyVAL.union = yyLOCAL - case 1402: + case 1424: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL MatchExprOption -//line sql.y:6941 +//line sql.y:7033 { yyLOCAL = NaturalLanguageModeOpt } yyVAL.union = yyLOCAL - case 1403: + case 1425: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL MatchExprOption -//line sql.y:6945 +//line sql.y:7037 { yyLOCAL = NaturalLanguageModeWithQueryExpansionOpt } yyVAL.union = yyLOCAL - case 1404: + case 1426: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL MatchExprOption -//line sql.y:6949 +//line sql.y:7041 { yyLOCAL = QueryExpansionOpt } yyVAL.union = yyLOCAL - case 1405: + case 1427: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:6955 +//line sql.y:7047 { yyVAL.str = string(yyDollar[1].identifierCI.String()) } - case 1406: + case 1428: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:6959 +//line sql.y:7051 { yyVAL.str = string(yyDollar[1].str) } - case 1407: + case 1429: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:6963 +//line sql.y:7055 { yyVAL.str = string(yyDollar[1].str) } - case 1408: + case 1430: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:6969 +//line sql.y:7061 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1409: + case 1431: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:6973 +//line sql.y:7065 { yyLOCAL = &ConvertType{Type: string(yyDollar[2].str), Length: NewIntLiteral(yyDollar[4].str)} } yyVAL.union = yyLOCAL - case 1410: + case 1432: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:6977 +//line sql.y:7069 { yyLOCAL = &ConvertType{Type: string(yyDollar[2].str), Length: NewIntLiteral(yyDollar[4].str)} } yyVAL.union = yyLOCAL - case 1411: + case 1433: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:6983 +//line sql.y:7075 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} } yyVAL.union = yyLOCAL - case 1412: + case 1434: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:6987 +//line sql.y:7079 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion(), Charset: yyDollar[3].columnCharset} } yyVAL.union = yyLOCAL - case 1413: + case 1435: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:6991 +//line sql.y:7083 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)} } yyVAL.union = yyLOCAL - case 1414: + case 1436: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:6995 +//line sql.y:7087 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} } yyVAL.union = yyLOCAL - case 1415: + case 1437: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:6999 +//line sql.y:7091 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)} yyLOCAL.Length = yyDollar[2].LengthScaleOption.Length yyLOCAL.Scale = yyDollar[2].LengthScaleOption.Scale } yyVAL.union = yyLOCAL - case 1416: + case 1438: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:7005 +//line sql.y:7097 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)} } yyVAL.union = yyLOCAL - case 1417: + case 1439: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:7009 +//line sql.y:7101 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} } yyVAL.union = yyLOCAL - case 1418: + case 1440: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:7013 +//line sql.y:7105 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)} } yyVAL.union = yyLOCAL - case 1419: + case 1441: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:7017 +//line sql.y:7109 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)} } yyVAL.union = yyLOCAL - case 1420: + case 1442: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:7021 +//line sql.y:7113 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} } yyVAL.union = yyLOCAL - case 1421: + case 1443: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:7025 +//line sql.y:7117 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)} } yyVAL.union = yyLOCAL - case 1422: + case 1444: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:7029 +//line sql.y:7121 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)} } yyVAL.union = yyLOCAL - case 1423: + case 1445: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:7033 +//line sql.y:7125 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} } yyVAL.union = yyLOCAL - case 1424: + case 1446: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:7037 +//line sql.y:7129 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)} } yyVAL.union = yyLOCAL - case 1425: + case 1447: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:7041 +//line sql.y:7133 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)} } yyVAL.union = yyLOCAL - case 1426: + case 1448: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:7047 +//line sql.y:7139 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 1427: + case 1449: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:7051 +//line sql.y:7143 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 1428: + case 1450: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL Expr -//line sql.y:7056 +//line sql.y:7148 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1429: + case 1451: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:7060 +//line sql.y:7152 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1430: + case 1452: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7065 +//line sql.y:7157 { yyVAL.str = string("") } - case 1431: + case 1453: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:7069 +//line sql.y:7161 { yyVAL.str = encodeSQLString(yyDollar[2].str) } - case 1432: + case 1454: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*When -//line sql.y:7075 +//line sql.y:7167 { yyLOCAL = []*When{yyDollar[1].whenUnion()} } yyVAL.union = yyLOCAL - case 1433: + case 1455: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:7079 +//line sql.y:7171 { yySLICE := (*[]*When)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[2].whenUnion()) } - case 1434: + case 1456: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *When -//line sql.y:7085 +//line sql.y:7177 { yyLOCAL = &When{Cond: yyDollar[2].exprUnion(), Val: yyDollar[4].exprUnion()} } yyVAL.union = yyLOCAL - case 1435: + case 1457: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL Expr -//line sql.y:7090 +//line sql.y:7182 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1436: + case 1458: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:7094 +//line sql.y:7186 { yyLOCAL = yyDollar[2].exprUnion() } yyVAL.union = yyLOCAL - case 1437: + case 1459: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *ColName -//line sql.y:7100 +//line sql.y:7192 { yyLOCAL = &ColName{Name: yyDollar[1].identifierCI} } yyVAL.union = yyLOCAL - case 1438: + case 1460: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *ColName -//line sql.y:7104 +//line sql.y:7196 { yyLOCAL = &ColName{Name: NewIdentifierCI(string(yyDollar[1].str))} } yyVAL.union = yyLOCAL - case 1439: + case 1461: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *ColName -//line sql.y:7108 +//line sql.y:7200 { yyLOCAL = &ColName{Qualifier: TableName{Name: yyDollar[1].identifierCS}, Name: yyDollar[3].identifierCI} } yyVAL.union = yyLOCAL - case 1440: + case 1462: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *ColName -//line sql.y:7112 +//line sql.y:7204 { yyLOCAL = &ColName{Qualifier: TableName{Qualifier: yyDollar[1].identifierCS, Name: yyDollar[3].identifierCS}, Name: yyDollar[5].identifierCI} } yyVAL.union = yyLOCAL - case 1441: + case 1463: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:7118 +//line sql.y:7210 { yyLOCAL = yyDollar[1].colNameUnion() } yyVAL.union = yyLOCAL - case 1442: + case 1464: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:7122 +//line sql.y:7214 { yyLOCAL = &Offset{V: convertStringToInt(yyDollar[1].str)} } yyVAL.union = yyLOCAL - case 1443: + case 1465: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:7128 +//line sql.y:7220 { // TODO(sougou): Deprecate this construct. if yyDollar[1].identifierCI.Lowered() != "value" { @@ -20787,426 +21086,426 @@ yydefault: yyLOCAL = NewIntLiteral("1") } yyVAL.union = yyLOCAL - case 1444: + case 1466: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:7137 +//line sql.y:7229 { yyLOCAL = NewIntLiteral(yyDollar[1].str) } yyVAL.union = yyLOCAL - case 1445: + case 1467: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:7141 +//line sql.y:7233 { yyLOCAL = parseBindVariable(yylex, yyDollar[1].str[1:]) } yyVAL.union = yyLOCAL - case 1446: + case 1468: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL Exprs -//line sql.y:7146 +//line sql.y:7238 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1447: + case 1469: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Exprs -//line sql.y:7150 +//line sql.y:7242 { yyLOCAL = yyDollar[3].exprsUnion() } yyVAL.union = yyLOCAL - case 1448: + case 1470: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL Expr -//line sql.y:7155 +//line sql.y:7247 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1449: + case 1471: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:7159 +//line sql.y:7251 { yyLOCAL = yyDollar[2].exprUnion() } yyVAL.union = yyLOCAL - case 1450: + case 1472: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *NamedWindow -//line sql.y:7165 +//line sql.y:7257 { yyLOCAL = &NamedWindow{yyDollar[2].windowDefinitionsUnion()} } yyVAL.union = yyLOCAL - case 1451: + case 1473: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL NamedWindows -//line sql.y:7171 +//line sql.y:7263 { yyLOCAL = NamedWindows{yyDollar[1].namedWindowUnion()} } yyVAL.union = yyLOCAL - case 1452: + case 1474: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7175 +//line sql.y:7267 { yySLICE := (*NamedWindows)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].namedWindowUnion()) } - case 1453: + case 1475: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL NamedWindows -//line sql.y:7180 +//line sql.y:7272 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1454: + case 1476: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL NamedWindows -//line sql.y:7184 +//line sql.y:7276 { yyLOCAL = yyDollar[1].namedWindowsUnion() } yyVAL.union = yyLOCAL - case 1455: + case 1477: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL OrderBy -//line sql.y:7189 +//line sql.y:7281 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1456: + case 1478: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL OrderBy -//line sql.y:7193 +//line sql.y:7285 { yyLOCAL = yyDollar[1].orderByUnion() } yyVAL.union = yyLOCAL - case 1457: + case 1479: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL OrderBy -//line sql.y:7199 +//line sql.y:7291 { yyLOCAL = yyDollar[3].orderByUnion() } yyVAL.union = yyLOCAL - case 1458: + case 1480: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL OrderBy -//line sql.y:7205 +//line sql.y:7297 { yyLOCAL = OrderBy{yyDollar[1].orderUnion()} } yyVAL.union = yyLOCAL - case 1459: + case 1481: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7209 +//line sql.y:7301 { yySLICE := (*OrderBy)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].orderUnion()) } - case 1460: + case 1482: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *Order -//line sql.y:7215 +//line sql.y:7307 { yyLOCAL = &Order{Expr: yyDollar[1].exprUnion(), Direction: yyDollar[2].orderDirectionUnion()} } yyVAL.union = yyLOCAL - case 1461: + case 1483: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL OrderDirection -//line sql.y:7220 +//line sql.y:7312 { yyLOCAL = AscOrder } yyVAL.union = yyLOCAL - case 1462: + case 1484: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL OrderDirection -//line sql.y:7224 +//line sql.y:7316 { yyLOCAL = AscOrder } yyVAL.union = yyLOCAL - case 1463: + case 1485: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL OrderDirection -//line sql.y:7228 +//line sql.y:7320 { yyLOCAL = DescOrder } yyVAL.union = yyLOCAL - case 1464: + case 1486: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *Limit -//line sql.y:7233 +//line sql.y:7325 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1465: + case 1487: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *Limit -//line sql.y:7237 +//line sql.y:7329 { yyLOCAL = yyDollar[1].limitUnion() } yyVAL.union = yyLOCAL - case 1466: + case 1488: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *Limit -//line sql.y:7243 +//line sql.y:7335 { yyLOCAL = &Limit{Rowcount: yyDollar[2].exprUnion()} } yyVAL.union = yyLOCAL - case 1467: + case 1489: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *Limit -//line sql.y:7247 +//line sql.y:7339 { yyLOCAL = &Limit{Offset: yyDollar[2].exprUnion(), Rowcount: yyDollar[4].exprUnion()} } yyVAL.union = yyLOCAL - case 1468: + case 1490: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *Limit -//line sql.y:7251 +//line sql.y:7343 { yyLOCAL = &Limit{Offset: yyDollar[4].exprUnion(), Rowcount: yyDollar[2].exprUnion()} } yyVAL.union = yyLOCAL - case 1469: + case 1491: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL []AlterOption -//line sql.y:7256 +//line sql.y:7348 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1470: + case 1492: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL []AlterOption -//line sql.y:7260 +//line sql.y:7352 { yyLOCAL = []AlterOption{yyDollar[1].alterOptionUnion(), yyDollar[2].alterOptionUnion()} } yyVAL.union = yyLOCAL - case 1471: + case 1493: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL []AlterOption -//line sql.y:7264 +//line sql.y:7356 { yyLOCAL = []AlterOption{yyDollar[1].alterOptionUnion(), yyDollar[2].alterOptionUnion()} } yyVAL.union = yyLOCAL - case 1472: + case 1494: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []AlterOption -//line sql.y:7268 +//line sql.y:7360 { yyLOCAL = []AlterOption{yyDollar[1].alterOptionUnion()} } yyVAL.union = yyLOCAL - case 1473: + case 1495: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []AlterOption -//line sql.y:7272 +//line sql.y:7364 { yyLOCAL = []AlterOption{yyDollar[1].alterOptionUnion()} } yyVAL.union = yyLOCAL - case 1474: + case 1496: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:7279 +//line sql.y:7371 { yyLOCAL = &LockOption{Type: DefaultType} } yyVAL.union = yyLOCAL - case 1475: + case 1497: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:7283 +//line sql.y:7375 { yyLOCAL = &LockOption{Type: NoneType} } yyVAL.union = yyLOCAL - case 1476: + case 1498: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:7287 +//line sql.y:7379 { yyLOCAL = &LockOption{Type: SharedType} } yyVAL.union = yyLOCAL - case 1477: + case 1499: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:7291 +//line sql.y:7383 { yyLOCAL = &LockOption{Type: ExclusiveType} } yyVAL.union = yyLOCAL - case 1478: + case 1500: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:7297 +//line sql.y:7389 { yyLOCAL = AlgorithmValue(yyDollar[3].str) } yyVAL.union = yyLOCAL - case 1479: + case 1501: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:7301 +//line sql.y:7393 { yyLOCAL = AlgorithmValue(yyDollar[3].str) } yyVAL.union = yyLOCAL - case 1480: + case 1502: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:7305 +//line sql.y:7397 { yyLOCAL = AlgorithmValue(yyDollar[3].str) } yyVAL.union = yyLOCAL - case 1481: + case 1503: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:7309 +//line sql.y:7401 { yyLOCAL = AlgorithmValue(yyDollar[3].str) } yyVAL.union = yyLOCAL - case 1482: + case 1504: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7314 +//line sql.y:7406 { yyVAL.str = "" } - case 1483: + case 1505: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7318 +//line sql.y:7410 { yyVAL.str = string(yyDollar[3].str) } - case 1484: + case 1506: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7322 +//line sql.y:7414 { yyVAL.str = string(yyDollar[3].str) } - case 1485: + case 1507: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7326 +//line sql.y:7418 { yyVAL.str = string(yyDollar[3].str) } - case 1486: + case 1508: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7331 +//line sql.y:7423 { yyVAL.str = "" } - case 1487: + case 1509: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7335 +//line sql.y:7427 { yyVAL.str = yyDollar[3].str } - case 1488: + case 1510: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7341 +//line sql.y:7433 { yyVAL.str = string(yyDollar[1].str) } - case 1489: + case 1511: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7345 +//line sql.y:7437 { yyVAL.str = string(yyDollar[1].str) } - case 1490: + case 1512: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7350 +//line sql.y:7442 { yyVAL.str = "" } - case 1491: + case 1513: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:7354 +//line sql.y:7446 { yyVAL.str = yyDollar[2].str } - case 1492: + case 1514: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7359 +//line sql.y:7451 { yyVAL.str = "cascaded" } - case 1493: + case 1515: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7363 +//line sql.y:7455 { yyVAL.str = string(yyDollar[1].str) } - case 1494: + case 1516: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7367 +//line sql.y:7459 { yyVAL.str = string(yyDollar[1].str) } - case 1495: + case 1517: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *Definer -//line sql.y:7372 +//line sql.y:7464 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1496: + case 1518: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *Definer -//line sql.y:7376 +//line sql.y:7468 { yyLOCAL = yyDollar[3].definerUnion() } yyVAL.union = yyLOCAL - case 1497: + case 1519: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *Definer -//line sql.y:7382 +//line sql.y:7474 { yyLOCAL = &Definer{ Name: string(yyDollar[1].str), } } yyVAL.union = yyLOCAL - case 1498: + case 1520: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *Definer -//line sql.y:7388 +//line sql.y:7480 { yyLOCAL = &Definer{ Name: string(yyDollar[1].str), } } yyVAL.union = yyLOCAL - case 1499: + case 1521: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *Definer -//line sql.y:7394 +//line sql.y:7486 { yyLOCAL = &Definer{ Name: yyDollar[1].str, @@ -21214,369 +21513,369 @@ yydefault: } } yyVAL.union = yyLOCAL - case 1500: + case 1522: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7403 +//line sql.y:7495 { yyVAL.str = encodeSQLString(yyDollar[1].str) } - case 1501: + case 1523: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7407 +//line sql.y:7499 { yyVAL.str = formatIdentifier(yyDollar[1].str) } - case 1502: + case 1524: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7412 +//line sql.y:7504 { yyVAL.str = "" } - case 1503: + case 1525: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7416 +//line sql.y:7508 { yyVAL.str = formatAddress(yyDollar[1].str) } - case 1504: + case 1526: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Lock -//line sql.y:7422 +//line sql.y:7514 { yyLOCAL = ForUpdateLock } yyVAL.union = yyLOCAL - case 1505: + case 1527: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Lock -//line sql.y:7426 +//line sql.y:7518 { yyLOCAL = ShareModeLock } yyVAL.union = yyLOCAL - case 1506: + case 1528: yyDollar = yyS[yypt-9 : yypt+1] var yyLOCAL *SelectInto -//line sql.y:7432 +//line sql.y:7524 { yyLOCAL = &SelectInto{Type: IntoOutfileS3, FileName: encodeSQLString(yyDollar[4].str), Charset: yyDollar[5].columnCharset, FormatOption: yyDollar[6].str, ExportOption: yyDollar[7].str, Manifest: yyDollar[8].str, Overwrite: yyDollar[9].str} } yyVAL.union = yyLOCAL - case 1507: + case 1529: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *SelectInto -//line sql.y:7436 +//line sql.y:7528 { yyLOCAL = &SelectInto{Type: IntoDumpfile, FileName: encodeSQLString(yyDollar[3].str), Charset: ColumnCharset{}, FormatOption: "", ExportOption: "", Manifest: "", Overwrite: ""} } yyVAL.union = yyLOCAL - case 1508: + case 1530: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *SelectInto -//line sql.y:7440 +//line sql.y:7532 { yyLOCAL = &SelectInto{Type: IntoOutfile, FileName: encodeSQLString(yyDollar[3].str), Charset: yyDollar[4].columnCharset, FormatOption: "", ExportOption: yyDollar[5].str, Manifest: "", Overwrite: ""} } yyVAL.union = yyLOCAL - case 1509: + case 1531: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7445 +//line sql.y:7537 { yyVAL.str = "" } - case 1510: + case 1532: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7449 +//line sql.y:7541 { yyVAL.str = " format csv" + yyDollar[3].str } - case 1511: + case 1533: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7453 +//line sql.y:7545 { yyVAL.str = " format text" + yyDollar[3].str } - case 1512: + case 1534: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7458 +//line sql.y:7550 { yyVAL.str = "" } - case 1513: + case 1535: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7462 +//line sql.y:7554 { yyVAL.str = " header" } - case 1514: + case 1536: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7467 +//line sql.y:7559 { yyVAL.str = "" } - case 1515: + case 1537: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:7471 +//line sql.y:7563 { yyVAL.str = " manifest on" } - case 1516: + case 1538: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:7475 +//line sql.y:7567 { yyVAL.str = " manifest off" } - case 1517: + case 1539: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7480 +//line sql.y:7572 { yyVAL.str = "" } - case 1518: + case 1540: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:7484 +//line sql.y:7576 { yyVAL.str = " overwrite on" } - case 1519: + case 1541: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:7488 +//line sql.y:7580 { yyVAL.str = " overwrite off" } - case 1520: + case 1542: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:7494 +//line sql.y:7586 { yyVAL.str = yyDollar[1].str + yyDollar[2].str } - case 1521: + case 1543: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7499 +//line sql.y:7591 { yyVAL.str = "" } - case 1522: + case 1544: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:7503 +//line sql.y:7595 { yyVAL.str = " lines" + yyDollar[2].str } - case 1523: + case 1545: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7509 +//line sql.y:7601 { yyVAL.str = yyDollar[1].str } - case 1524: + case 1546: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:7513 +//line sql.y:7605 { yyVAL.str = yyDollar[1].str + yyDollar[2].str } - case 1525: + case 1547: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7519 +//line sql.y:7611 { yyVAL.str = " starting by " + encodeSQLString(yyDollar[3].str) } - case 1526: + case 1548: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7523 +//line sql.y:7615 { yyVAL.str = " terminated by " + encodeSQLString(yyDollar[3].str) } - case 1527: + case 1549: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7528 +//line sql.y:7620 { yyVAL.str = "" } - case 1528: + case 1550: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:7532 +//line sql.y:7624 { yyVAL.str = " " + yyDollar[1].str + yyDollar[2].str } - case 1529: + case 1551: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7538 +//line sql.y:7630 { yyVAL.str = yyDollar[1].str } - case 1530: + case 1552: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:7542 +//line sql.y:7634 { yyVAL.str = yyDollar[1].str + yyDollar[2].str } - case 1531: + case 1553: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7548 +//line sql.y:7640 { yyVAL.str = " terminated by " + encodeSQLString(yyDollar[3].str) } - case 1532: + case 1554: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:7552 +//line sql.y:7644 { yyVAL.str = yyDollar[1].str + " enclosed by " + encodeSQLString(yyDollar[4].str) } - case 1533: + case 1555: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7556 +//line sql.y:7648 { yyVAL.str = " escaped by " + encodeSQLString(yyDollar[3].str) } - case 1534: + case 1556: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7561 +//line sql.y:7653 { yyVAL.str = "" } - case 1535: + case 1557: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7565 +//line sql.y:7657 { yyVAL.str = " optionally" } - case 1536: + case 1558: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *Insert -//line sql.y:7578 +//line sql.y:7670 { yyLOCAL = &Insert{Rows: yyDollar[2].valuesUnion()} } yyVAL.union = yyLOCAL - case 1537: + case 1559: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *Insert -//line sql.y:7582 +//line sql.y:7674 { yyLOCAL = &Insert{Rows: yyDollar[1].selStmtUnion()} } yyVAL.union = yyLOCAL - case 1538: + case 1560: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *Insert -//line sql.y:7586 +//line sql.y:7678 { yyLOCAL = &Insert{Columns: yyDollar[2].columnsUnion(), Rows: yyDollar[5].valuesUnion()} } yyVAL.union = yyLOCAL - case 1539: + case 1561: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *Insert -//line sql.y:7590 +//line sql.y:7682 { yyLOCAL = &Insert{Columns: []IdentifierCI{}, Rows: yyDollar[4].valuesUnion()} } yyVAL.union = yyLOCAL - case 1540: + case 1562: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *Insert -//line sql.y:7594 +//line sql.y:7686 { yyLOCAL = &Insert{Columns: yyDollar[2].columnsUnion(), Rows: yyDollar[4].selStmtUnion()} } yyVAL.union = yyLOCAL - case 1541: + case 1563: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Columns -//line sql.y:7600 +//line sql.y:7692 { yyLOCAL = Columns{yyDollar[1].identifierCI} } yyVAL.union = yyLOCAL - case 1542: + case 1564: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Columns -//line sql.y:7604 +//line sql.y:7696 { yyLOCAL = Columns{yyDollar[3].identifierCI} } yyVAL.union = yyLOCAL - case 1543: + case 1565: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7608 +//line sql.y:7700 { yySLICE := (*Columns)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].identifierCI) } - case 1544: + case 1566: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:7612 +//line sql.y:7704 { yySLICE := (*Columns)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[5].identifierCI) } - case 1545: + case 1567: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL UpdateExprs -//line sql.y:7617 +//line sql.y:7709 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1546: + case 1568: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL UpdateExprs -//line sql.y:7621 +//line sql.y:7713 { yyLOCAL = yyDollar[5].updateExprsUnion() } yyVAL.union = yyLOCAL - case 1547: + case 1569: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Values -//line sql.y:7627 +//line sql.y:7719 { yyLOCAL = Values{yyDollar[1].valTupleUnion()} } yyVAL.union = yyLOCAL - case 1548: + case 1570: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7631 +//line sql.y:7723 { yySLICE := (*Values)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].valTupleUnion()) } - case 1549: + case 1571: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ValTuple -//line sql.y:7637 +//line sql.y:7729 { yyLOCAL = yyDollar[1].valTupleUnion() } yyVAL.union = yyLOCAL - case 1550: + case 1572: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL ValTuple -//line sql.y:7641 +//line sql.y:7733 { yyLOCAL = ValTuple{} } yyVAL.union = yyLOCAL - case 1551: + case 1573: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL ValTuple -//line sql.y:7647 +//line sql.y:7739 { yyLOCAL = ValTuple(yyDollar[2].exprsUnion()) } yyVAL.union = yyLOCAL - case 1552: + case 1574: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL ValTuple -//line sql.y:7651 +//line sql.y:7743 { yyLOCAL = ValTuple(yyDollar[3].exprsUnion()) } yyVAL.union = yyLOCAL - case 1553: + case 1575: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:7656 +//line sql.y:7748 { if len(yyDollar[1].valTupleUnion()) == 1 { yyLOCAL = yyDollar[1].valTupleUnion()[0] @@ -21585,269 +21884,269 @@ yydefault: } } yyVAL.union = yyLOCAL - case 1554: + case 1576: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL UpdateExprs -//line sql.y:7666 +//line sql.y:7758 { yyLOCAL = UpdateExprs{yyDollar[1].updateExprUnion()} } yyVAL.union = yyLOCAL - case 1555: + case 1577: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7670 +//line sql.y:7762 { yySLICE := (*UpdateExprs)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].updateExprUnion()) } - case 1556: + case 1578: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *UpdateExpr -//line sql.y:7676 +//line sql.y:7768 { yyLOCAL = &UpdateExpr{Name: yyDollar[1].colNameUnion(), Expr: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1558: + case 1580: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:7683 +//line sql.y:7775 { yyVAL.str = "charset" } - case 1561: + case 1583: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:7693 +//line sql.y:7785 { yyLOCAL = NewStrLiteral(yyDollar[1].identifierCI.String()) } yyVAL.union = yyLOCAL - case 1562: + case 1584: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:7697 +//line sql.y:7789 { yyLOCAL = NewStrLiteral(yyDollar[1].str) } yyVAL.union = yyLOCAL - case 1563: + case 1585: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:7701 +//line sql.y:7793 { yyLOCAL = &Default{} } yyVAL.union = yyLOCAL - case 1566: + case 1588: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:7710 +//line sql.y:7802 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 1567: + case 1589: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:7712 +//line sql.y:7804 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 1568: + case 1590: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:7715 +//line sql.y:7807 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 1569: + case 1591: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL bool -//line sql.y:7717 +//line sql.y:7809 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 1570: + case 1592: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:7720 +//line sql.y:7812 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 1571: + case 1593: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL bool -//line sql.y:7722 +//line sql.y:7814 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 1572: + case 1594: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL Ignore -//line sql.y:7725 +//line sql.y:7817 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 1573: + case 1595: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Ignore -//line sql.y:7727 +//line sql.y:7819 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 1574: + case 1596: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7730 +//line sql.y:7822 { yyVAL.empty = struct{}{} } - case 1575: + case 1597: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7732 +//line sql.y:7824 { yyVAL.empty = struct{}{} } - case 1576: + case 1598: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7734 +//line sql.y:7826 { yyVAL.empty = struct{}{} } - case 1577: + case 1599: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:7738 +//line sql.y:7830 { yyLOCAL = &CallProc{Name: yyDollar[2].tableName, Params: yyDollar[4].exprsUnion()} } yyVAL.union = yyLOCAL - case 1578: + case 1600: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL Exprs -//line sql.y:7743 +//line sql.y:7835 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1579: + case 1601: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Exprs -//line sql.y:7747 +//line sql.y:7839 { yyLOCAL = yyDollar[1].exprsUnion() } yyVAL.union = yyLOCAL - case 1580: + case 1602: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL []*IndexOption -//line sql.y:7752 +//line sql.y:7844 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1581: + case 1603: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*IndexOption -//line sql.y:7754 +//line sql.y:7846 { yyLOCAL = []*IndexOption{yyDollar[1].indexOptionUnion()} } yyVAL.union = yyLOCAL - case 1582: + case 1604: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *IndexOption -//line sql.y:7758 +//line sql.y:7850 { yyLOCAL = &IndexOption{Name: string(yyDollar[1].str), String: string(yyDollar[2].identifierCI.String())} } yyVAL.union = yyLOCAL - case 1583: + case 1605: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7764 +//line sql.y:7856 { yyVAL.identifierCI = yyDollar[1].identifierCI } - case 1584: + case 1606: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7768 +//line sql.y:7860 { yyVAL.identifierCI = NewIdentifierCI(string(yyDollar[1].str)) } - case 1586: + case 1608: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7775 +//line sql.y:7867 { yyVAL.identifierCI = NewIdentifierCI(string(yyDollar[1].str)) } - case 1587: + case 1609: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7781 +//line sql.y:7873 { yyVAL.identifierCS = NewIdentifierCS(string(yyDollar[1].str)) } - case 1588: + case 1610: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7785 +//line sql.y:7877 { yyVAL.identifierCS = NewIdentifierCS(string(yyDollar[1].str)) } - case 1589: + case 1611: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7791 +//line sql.y:7883 { yyVAL.identifierCS = NewIdentifierCS("") } - case 1590: + case 1612: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7795 +//line sql.y:7887 { yyVAL.identifierCS = yyDollar[1].identifierCS } - case 1592: + case 1614: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7802 +//line sql.y:7894 { yyVAL.identifierCS = NewIdentifierCS(string(yyDollar[1].str)) } - case 1593: + case 1615: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7807 +//line sql.y:7899 { yyVAL.str = "" } - case 1594: + case 1616: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7811 +//line sql.y:7903 { yyVAL.str = string(yyDollar[1].str) } - case 1595: + case 1617: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL Columns -//line sql.y:7816 +//line sql.y:7908 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1596: + case 1618: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Columns -//line sql.y:7820 +//line sql.y:7912 { yyLOCAL = yyDollar[2].columnsUnion() } yyVAL.union = yyLOCAL - case 1597: + case 1619: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL interface{} -//line sql.y:7825 +//line sql.y:7917 { escape := "\\" yyLOCAL = &FieldsClause{ @@ -21856,10 +22155,10 @@ yydefault: } } yyVAL.union = yyLOCAL - case 1598: + case 1620: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL interface{} -//line sql.y:7833 +//line sql.y:7925 { escape := yyDollar[4].itemUnion().(string) if escape != "\\" && len(escape) > 1 { @@ -21881,107 +22180,107 @@ yydefault: } } yyVAL.union = yyLOCAL - case 1599: + case 1621: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL interface{} -//line sql.y:7855 +//line sql.y:7947 { yyLOCAL = &LinesClause{Terminated: "\n"} } yyVAL.union = yyLOCAL - case 1600: + case 1622: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL interface{} -//line sql.y:7859 +//line sql.y:7951 { yyLOCAL = &LinesClause{Starting: yyDollar[2].itemUnion().(string), Terminated: yyDollar[3].itemUnion().(string)} } yyVAL.union = yyLOCAL - case 1601: + case 1623: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL interface{} -//line sql.y:7864 +//line sql.y:7956 { yyLOCAL = "\t" } yyVAL.union = yyLOCAL - case 1602: + case 1624: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL interface{} -//line sql.y:7868 +//line sql.y:7960 { yyLOCAL = string(yyDollar[3].str) } yyVAL.union = yyLOCAL - case 1603: + case 1625: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL interface{} -//line sql.y:7873 +//line sql.y:7965 { yyLOCAL = "" } yyVAL.union = yyLOCAL - case 1604: + case 1626: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL interface{} -//line sql.y:7877 +//line sql.y:7969 { yyLOCAL = string(yyDollar[3].str) } yyVAL.union = yyLOCAL - case 1605: + case 1627: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL interface{} -//line sql.y:7882 +//line sql.y:7974 { yyLOCAL = "\\" } yyVAL.union = yyLOCAL - case 1606: + case 1628: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL interface{} -//line sql.y:7886 +//line sql.y:7978 { yyLOCAL = string(yyDollar[3].str) } yyVAL.union = yyLOCAL - case 1607: + case 1629: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL interface{} -//line sql.y:7891 +//line sql.y:7983 { yyLOCAL = "" } yyVAL.union = yyLOCAL - case 1608: + case 1630: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL interface{} -//line sql.y:7895 +//line sql.y:7987 { yyLOCAL = string(yyDollar[3].str) } yyVAL.union = yyLOCAL - case 1609: + case 1631: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL interface{} -//line sql.y:7901 +//line sql.y:7993 { yyLOCAL = "\n" } yyVAL.union = yyLOCAL - case 1610: + case 1632: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL interface{} -//line sql.y:7905 +//line sql.y:7997 { yyLOCAL = string(yyDollar[3].str) } yyVAL.union = yyLOCAL - case 1613: + case 1635: yyDollar = yyS[yypt-11 : yypt+1] var yyLOCAL Statement -//line sql.y:7914 +//line sql.y:8006 { x := &LoadDataStmt{ Action: string(yyDollar[1].str), @@ -22001,37 +22300,69 @@ yydefault: yyLOCAL = x } yyVAL.union = yyLOCAL - case 2218: + case 1636: + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL Statement +//line sql.y:8027 + { + yyLOCAL = &Kill{Type: yyDollar[2].killTypeUnion(), ProcesslistID: convertStringToUInt64(yyDollar[3].str)} + } + yyVAL.union = yyLOCAL + case 1637: + yyDollar = yyS[yypt-0 : yypt+1] + var yyLOCAL KillType +//line sql.y:8033 + { + yyLOCAL = ConnectionType + } + yyVAL.union = yyLOCAL + case 1638: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL KillType +//line sql.y:8037 + { + yyLOCAL = ConnectionType + } + yyVAL.union = yyLOCAL + case 1639: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL KillType +//line sql.y:8041 + { + yyLOCAL = QueryType + } + yyVAL.union = yyLOCAL + case 2254: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:8561 +//line sql.y:8684 { } - case 2219: + case 2255: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:8566 +//line sql.y:8689 { } - case 2220: + case 2256: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:8570 +//line sql.y:8693 { skipToEnd(yylex) } - case 2221: + case 2257: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:8575 +//line sql.y:8698 { skipToEnd(yylex) } - case 2222: + case 2258: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:8579 +//line sql.y:8702 { skipToEnd(yylex) } - case 2223: + case 2259: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:8583 +//line sql.y:8706 { skipToEnd(yylex) } diff --git a/go/vt/sqlparser/sql.y b/go/vt/sqlparser/sql.y index d7ca61abe0c..547d0bd44c2 100644 --- a/go/vt/sqlparser/sql.y +++ b/go/vt/sqlparser/sql.y @@ -173,11 +173,12 @@ func markBindVariable(yylex yyLexer, bvar string) { orderDirection OrderDirection explainType ExplainType vexplainType VExplainType - intervalType IntervalTypes + intervalType IntervalType lockType LockType referenceDefinition *ReferenceDefinition txAccessModes []TxAccessMode txAccessMode TxAccessMode + killType KillType columnStorage ColumnStorage columnFormat ColumnFormat @@ -249,6 +250,7 @@ func markBindVariable(yylex yyLexer, bvar string) { %token DISCARD IMPORT ENABLE DISABLE TABLESPACE %token VIRTUAL STORED %token BOTH LEADING TRAILING +%token KILL %left EMPTY_FROM_CLAUSE %right INTO @@ -340,6 +342,7 @@ func markBindVariable(yylex yyLexer, bvar string) { %token CURTIME CURRENT_TIME LOCALTIME LOCALTIMESTAMP CURRENT_USER %token UTC_DATE UTC_TIME UTC_TIMESTAMP SYSDATE %token DAY DAY_HOUR DAY_MICROSECOND DAY_MINUTE DAY_SECOND HOUR HOUR_MICROSECOND HOUR_MINUTE HOUR_SECOND MICROSECOND MINUTE MINUTE_MICROSECOND MINUTE_SECOND MONTH QUARTER SECOND SECOND_MICROSECOND YEAR_MONTH WEEK +%token SQL_TSI_DAY SQL_TSI_WEEK SQL_TSI_HOUR SQL_TSI_MINUTE SQL_TSI_MONTH SQL_TSI_QUARTER SQL_TSI_SECOND SQL_TSI_MICROSECOND SQL_TSI_YEAR %token REPLACE %token CONVERT CAST %token SUBSTR SUBSTRING @@ -350,7 +353,7 @@ func markBindVariable(yylex yyLexer, bvar string) { %token JSON_ARRAY JSON_OBJECT JSON_QUOTE %token JSON_DEPTH JSON_TYPE JSON_LENGTH JSON_VALID %token JSON_ARRAY_APPEND JSON_ARRAY_INSERT JSON_INSERT JSON_MERGE JSON_MERGE_PATCH JSON_MERGE_PRESERVE JSON_REMOVE JSON_REPLACE JSON_SET JSON_UNQUOTE -%token COUNT AVG MAX MIN SUM GROUP_CONCAT BIT_AND BIT_OR BIT_XOR STD STDDEV STDDEV_POP STDDEV_SAMP VAR_POP VAR_SAMP VARIANCE +%token COUNT AVG MAX MIN SUM GROUP_CONCAT BIT_AND BIT_OR BIT_XOR STD STDDEV STDDEV_POP STDDEV_SAMP VAR_POP VAR_SAMP VARIANCE ANY_VALUE %token REGEXP_INSTR REGEXP_LIKE REGEXP_REPLACE REGEXP_SUBSTR %token ExtractValue UpdateXML %token GET_LOCK RELEASE_LOCK RELEASE_ALL_LOCKS IS_FREE_LOCK IS_USED_LOCK @@ -399,14 +402,12 @@ func markBindVariable(yylex yyLexer, bvar string) { %type range_or_list %type partitions_opt algorithm_opt subpartitions_opt partition_max_rows partition_min_rows -%type command -%type query_expression_parens query_expression query_expression_body select_statement query_primary select_stmt_with_into -%type explain_statement explainable_statement -%type prepare_statement -%type vexplain_statement -%type execute_statement deallocate_statement +%type command kill_statement +%type explain_statement explainable_statement vexplain_statement +%type prepare_statement execute_statement deallocate_statement %type stream_statement vstream_statement insert_statement update_statement delete_statement set_statement set_transaction_statement %type create_statement alter_statement rename_statement drop_statement truncate_statement flush_statement do_statement +%type select_statement select_stmt_with_into query_expression_parens query_expression query_expression_body query_primary %type with_clause_opt with_clause %type common_table_expr %type with_list @@ -454,7 +455,7 @@ func markBindVariable(yylex yyLexer, bvar string) { %type subpartition_definition %type subpartition_definition_list subpartition_definition_list_with_brackets %type subpartition_definition_attribute_list_opt -%type interval +%type interval timestampadd_interval %type cache_opt separator_opt flush_option for_channel_opt maxvalue %type match_option %type distinct_opt union_op replace_opt local_opt @@ -590,11 +591,11 @@ func markBindVariable(yylex yyLexer, bvar string) { %type ratio_opt %type tx_chacteristics_opt tx_chars %type tx_char - %type LinesLoad FieldsLoad Escaped Enclosed FieldsTerminated Starting LinesTerminated %type LocalOpt %type LoadDataStmt +%type kill_type_opt %start any_command %% @@ -657,6 +658,7 @@ command: | prepare_statement | execute_statement | deallocate_statement +| kill_statement | /*empty*/ { setParseTree(yylex, nil) @@ -1487,6 +1489,7 @@ column_attribute_list_opt: | column_attribute_list_opt DEFAULT now_or_signed_literal { $1.Default = $3 + $1.DefaultLiteral = true $$ = $1 } | column_attribute_list_opt ON UPDATE function_call_nonkeyword @@ -1900,7 +1903,7 @@ underscore_charsets: } | UNDERSCORE_UTF8 { - $$ = Utf8Str + $$ = Utf8mb3Str } | UNDERSCORE_UTF8MB4 { @@ -1908,7 +1911,7 @@ underscore_charsets: } | UNDERSCORE_UTF8MB3 { - $$ = Utf8Str + $$ = Utf8mb3Str } literal_or_null: @@ -2831,7 +2834,7 @@ table_option: } | TABLESPACE equal_opt sql_id storage_opt { - $$ = &TableOption{Name:string($1), String: ($3.String() + $4)} + $$ = &TableOption{Name:string($1), String: ($3.String() + $4), CaseSensitive: true} } | UNION equal_opt '(' table_name_list ')' { @@ -2996,9 +2999,9 @@ alter_option: { $$ = &AlterColumn{Column: $3, DropDefault:true} } -| ALTER column_opt column_name SET DEFAULT signed_literal_or_null +| ALTER column_opt column_name SET DEFAULT now_or_signed_literal { - $$ = &AlterColumn{Column: $3, DropDefault:false, DefaultVal:$6} + $$ = &AlterColumn{Column: $3, DropDefault:false, DefaultVal:$6, DefaultLiteral: true} } | ALTER column_opt column_name SET DEFAULT openb expression closeb { @@ -3256,6 +3259,10 @@ alter_statement: { $$ = &AlterVschema{Action: AddSequenceDDLAction, Table: $6} } +| ALTER comment_opt VSCHEMA DROP SEQUENCE table_name + { + $$ = &AlterVschema{Action: DropSequenceDDLAction, Table: $6} + } | ALTER comment_opt VSCHEMA ON table_name ADD AUTO_INCREMENT sql_id USING table_name { $$ = &AlterVschema{ @@ -3267,6 +3274,13 @@ alter_statement: }, } } +| ALTER comment_opt VSCHEMA ON table_name DROP AUTO_INCREMENT + { + $$ = &AlterVschema{ + Action: DropAutoIncDDLAction, + Table: $5, + } + } | ALTER comment_opt VITESS_MIGRATION STRING RETRY { $$ = &AlterMigration{ @@ -3957,9 +3971,9 @@ truncate_statement: } analyze_statement: - ANALYZE TABLE table_name + ANALYZE local_opt TABLE table_name { - $$ = &OtherRead{} + $$ = &Analyze{IsLocal: $2, Table: $4} } purge_statement: @@ -5354,11 +5368,11 @@ bit_expr '|' bit_expr %prec '|' } | bit_expr '+' INTERVAL bit_expr interval %prec '+' { - $$ = &DateAddExpr{Type: PlusIntervalRightType, Date: $1, Unit: $5, Expr: $4} + $$ = &IntervalDateExpr{Syntax: IntervalDateExprBinaryAdd, Date: $1, Unit: $5, Interval: $4} } | bit_expr '-' INTERVAL bit_expr interval %prec '-' { - $$ = &DateSubExpr{Type: MinusIntervalRightType, Date: $1, Unit: $5, Expr: $4} + $$ = &IntervalDateExpr{Syntax: IntervalDateExprBinarySub, Date: $1, Unit: $5, Interval: $4} } | bit_expr '*' bit_expr %prec '*' { @@ -5480,7 +5494,7 @@ function_call_keyword } | INTERVAL bit_expr interval '+' bit_expr %prec INTERVAL { - $$ = &DateAddExpr{Type: PlusIntervalLeftType, Date: $5, Unit: $3, Expr: $2} + $$ = &IntervalDateExpr{Syntax: IntervalDateExprBinaryAddLeft, Date: $5, Unit: $3, Interval: $2} } | INTERVAL openb expression ',' expression_list closeb { @@ -5990,17 +6004,21 @@ UTC_DATE func_paren_opt { $$ = &GroupConcatExpr{Distinct: $3, Exprs: $4, OrderBy: $5, Separator: $6, Limit: $7} } -| TIMESTAMPADD openb sql_id ',' expression ',' expression closeb +| ANY_VALUE openb expression closeb + { + $$ = &AnyValue{Arg:$3} + } +| TIMESTAMPADD openb timestampadd_interval ',' expression ',' expression closeb { - $$ = &TimestampFuncExpr{Name:string("timestampadd"), Unit:$3.String(), Expr1:$5, Expr2:$7} + $$ = &IntervalDateExpr{Syntax: IntervalDateExprTimestampadd, Date: $7, Interval: $5, Unit: $3} } -| TIMESTAMPDIFF openb sql_id ',' expression ',' expression closeb +| TIMESTAMPDIFF openb timestampadd_interval ',' expression ',' expression closeb { - $$ = &TimestampFuncExpr{Name:string("timestampdiff"), Unit:$3.String(), Expr1:$5, Expr2:$7} + $$ = &TimestampDiffExpr{Unit:$3, Expr1:$5, Expr2:$7} } | EXTRACT openb interval FROM expression closeb { - $$ = &ExtractFuncExpr{IntervalTypes: $3, Expr: $5} + $$ = &ExtractFuncExpr{IntervalType: $3, Expr: $5} } | WEIGHT_STRING openb expression convert_type_weight_string closeb { @@ -6616,27 +6634,27 @@ UTC_DATE func_paren_opt } | ADDDATE openb expression ',' INTERVAL bit_expr interval closeb { - $$ = &DateAddExpr{Type: AdddateType, Date: $3, Expr: $6, Unit: $7} + $$ = &IntervalDateExpr{Syntax: IntervalDateExprAdddate, Date: $3, Interval: $6, Unit: $7} } | ADDDATE openb expression ',' expression closeb { - $$ = &DateAddExpr{Type: AdddateType, Date: $3, Expr: $5} + $$ = &IntervalDateExpr{Syntax: IntervalDateExprAdddate, Date: $3, Interval: $5, Unit: IntervalNone} } | DATE_ADD openb expression ',' INTERVAL bit_expr interval closeb { - $$ = &DateAddExpr{Type: DateAddType, Date: $3, Expr: $6, Unit: $7} + $$ = &IntervalDateExpr{Syntax: IntervalDateExprDateAdd, Date: $3, Interval: $6, Unit: $7} } | DATE_SUB openb expression ',' INTERVAL bit_expr interval closeb { - $$ = &DateSubExpr{Type: DateSubType, Date: $3, Expr: $6, Unit: $7} + $$ = &IntervalDateExpr{Syntax: IntervalDateExprDateSub, Date: $3, Interval: $6, Unit: $7} } | SUBDATE openb expression ',' INTERVAL bit_expr interval closeb { - $$ = &DateSubExpr{Type: SubdateType, Date: $3, Expr: $6, Unit: $7} + $$ = &IntervalDateExpr{Syntax: IntervalDateExprSubdate, Date: $3, Interval: $6, Unit: $7} } | SUBDATE openb expression ',' expression closeb { - $$ = &DateSubExpr{Type: SubdateType, Date: $3, Expr: $5} + $$ = &IntervalDateExpr{Syntax: IntervalDateExprSubdate, Date: $3, Interval: $5, Unit: IntervalNone} } | regular_expressions | xml_expressions @@ -6884,6 +6902,80 @@ interval: $$=IntervalYear } +timestampadd_interval: + DAY + { + $$=IntervalDay + } +| WEEK + { + $$=IntervalWeek + } +| HOUR + { + $$=IntervalHour + } +| MINUTE + { + $$=IntervalMinute + } +| MONTH + { + $$=IntervalMonth + } +| QUARTER + { + $$=IntervalQuarter + } +| SECOND + { + $$=IntervalSecond + } +| MICROSECOND + { + $$=IntervalMicrosecond + } +| YEAR + { + $$=IntervalYear + } +| SQL_TSI_DAY + { + $$=IntervalDay + } +| SQL_TSI_WEEK + { + $$=IntervalWeek + } +| SQL_TSI_HOUR + { + $$=IntervalHour + } +| SQL_TSI_MINUTE + { + $$=IntervalMinute + } +| SQL_TSI_MONTH + { + $$=IntervalMonth + } +| SQL_TSI_QUARTER + { + $$=IntervalQuarter + } +| SQL_TSI_SECOND + { + $$=IntervalSecond + } +| SQL_TSI_MICROSECOND + { + $$=IntervalMicrosecond + } +| SQL_TSI_YEAR + { + $$=IntervalYear + } + func_paren_opt: /* empty */ | openb closeb @@ -7930,6 +8022,27 @@ LoadDataStmt: $$ = x } +kill_statement: + KILL kill_type_opt INTEGRAL + { + $$ = &Kill{Type: $2, ProcesslistID: convertStringToUInt64($3)} + } + +kill_type_opt: + /* empty */ + { + $$ = ConnectionType + } +| CONNECTION + { + $$ = ConnectionType + } +| QUERY + { + $$ = QueryType + } + + /* These are not all necessarily reserved in MySQL, but some are. @@ -8008,6 +8121,7 @@ reserved_keyword: | JOIN | JSON_TABLE | KEY +| KILL | LAG | LAST_VALUE | LATERAL @@ -8069,8 +8183,6 @@ reserved_keyword: | SYSTEM | TABLE | THEN -| TIMESTAMPADD -| TIMESTAMPDIFF | TO | TRAILING | TRUE @@ -8108,6 +8220,7 @@ non_reserved_keyword: | AFTER | ALGORITHM | ALWAYS +| ANY_VALUE %prec FUNCTION_CALL_NON_KEYWORD | ARRAY | ASCII | AUTO_INCREMENT @@ -8403,6 +8516,14 @@ non_reserved_keyword: | SMALLINT | SNAPSHOT | SQL +| SQL_TSI_DAY +| SQL_TSI_HOUR +| SQL_TSI_MINUTE +| SQL_TSI_MONTH +| SQL_TSI_QUARTER +| SQL_TSI_SECOND +| SQL_TSI_WEEK +| SQL_TSI_YEAR | SRID | START | STARTING @@ -8478,6 +8599,8 @@ non_reserved_keyword: | TIES | TIME %prec STRING_TYPE_PREFIX_NON_KEYWORD | TIMESTAMP %prec STRING_TYPE_PREFIX_NON_KEYWORD +| TIMESTAMPADD %prec FUNCTION_CALL_NON_KEYWORD +| TIMESTAMPDIFF %prec FUNCTION_CALL_NON_KEYWORD | TINYBLOB | TINYINT | TINYTEXT diff --git a/go/vt/sqlparser/testdata/select_cases.txt b/go/vt/sqlparser/testdata/select_cases.txt index 426001ecdf6..1112593cd13 100644 --- a/go/vt/sqlparser/testdata/select_cases.txt +++ b/go/vt/sqlparser/testdata/select_cases.txt @@ -8,7 +8,7 @@ INPUT select concat(a, if(b>10, _utf8 0xC3A6, _utf8 0xC3AF)) from t1; END OUTPUT -select concat(a, if(b > 10, _utf8 0xC3A6, _utf8 0xC3AF)) from t1 +select concat(a, if(b > 10, _utf8mb3 0xC3A6, _utf8mb3 0xC3AF)) from t1 END INPUT select a as 'x', t1.*, b as 'x' from t1; @@ -374,7 +374,7 @@ INPUT select timestampdiff(SQL_TSI_SECOND, '2001-02-01 12:59:59', '2001-05-01 12:58:58') as a; END OUTPUT -select timestampdiff(SQL_TSI_SECOND, '2001-02-01 12:59:59', '2001-05-01 12:58:58') as a from dual +select timestampdiff(second, '2001-02-01 12:59:59', '2001-05-01 12:58:58') as a from dual END INPUT select concat(f1, 2) a from t1 union select 'x' a from t1; @@ -404,7 +404,7 @@ INPUT select locate(_utf8 0xD091, _utf8 0xD0B0D0B1D0B2 collate utf8_bin); END OUTPUT -select locate(_utf8 0xD091, _utf8 0xD0B0D0B1D0B2 collate utf8_bin) from dual +select locate(_utf8mb3 0xD091, _utf8mb3 0xD0B0D0B1D0B2 collate utf8_bin) from dual END INPUT select hex('a'), hex('a '); @@ -1004,7 +1004,7 @@ INPUT select soundex(_utf8 0xE99885E8A788E99A8FE697B6E69BB4E696B0E79A84E696B0E997BB); END OUTPUT -select soundex(_utf8 0xE99885E8A788E99A8FE697B6E69BB4E696B0E79A84E696B0E997BB) from dual +select soundex(_utf8mb3 0xE99885E8A788E99A8FE697B6E69BB4E696B0E79A84E696B0E997BB) from dual END INPUT select t1.a, (case t1.a when 0 then 0 else t1.b end) d from t1 join t2 on t1.a=t2.c where b=11120436154190595086 order by d; @@ -1526,7 +1526,7 @@ INPUT select timestampdiff(YEAR, '2002-05-01', '2001-01-01') as a; END OUTPUT -select timestampdiff(YEAR, '2002-05-01', '2001-01-01') as a from dual +select timestampdiff(year, '2002-05-01', '2001-01-01') as a from dual END INPUT select ST_AsText(Polygon(LineString(Point(0, 0), Point(1, 0), Point(1,1), Point(0, 1), Point(0, 0)))); @@ -1676,7 +1676,7 @@ INPUT select hex(soundex(_utf8 0xD091D092D093)); END OUTPUT -select hex(soundex(_utf8 0xD091D092D093)) from dual +select hex(soundex(_utf8mb3 0xD091D092D093)) from dual END INPUT select * from t1 where btn like "ff%"; @@ -2450,7 +2450,7 @@ INPUT select length(uuid()), charset(uuid()), length(unhex(replace(uuid(),_utf8'-',_utf8''))); END OUTPUT -select length(uuid()), charset(uuid()), length(unhex(replace(uuid(), _utf8 '-', _utf8 ''))) from dual +select length(uuid()), charset(uuid()), length(unhex(replace(uuid(), _utf8mb3 '-', _utf8mb3 ''))) from dual END INPUT select substring('hello', 4294967296, 4294967296); @@ -2588,7 +2588,7 @@ INPUT select _utf8 0xD0B0D0B1D0B2 like concat(_utf8'%',_utf8 0xD0B1,_utf8 '%'); END OUTPUT -select _utf8 0xD0B0D0B1D0B2 like concat(_utf8 '%', _utf8 0xD0B1, _utf8 '%') from dual +select _utf8mb3 0xD0B0D0B1D0B2 like concat(_utf8mb3 '%', _utf8mb3 0xD0B1, _utf8mb3 '%') from dual END INPUT select * from t1 where MATCH(a,b) AGAINST ("indexes"); @@ -2612,7 +2612,7 @@ INPUT select timestampdiff(SQL_TSI_HOUR, '2001-02-01', '2001-05-01') as a; END OUTPUT -select timestampdiff(SQL_TSI_HOUR, '2001-02-01', '2001-05-01') as a from dual +select timestampdiff(hour, '2001-02-01', '2001-05-01') as a from dual END INPUT select max(t2.a1) from t2 left outer join t1 on t2.a2=10 where t2.a2=10; @@ -2624,7 +2624,7 @@ INPUT select timestampadd(SQL_TSI_SECOND, 1, date) from t1; END OUTPUT -select timestampadd(SQL_TSI_SECOND, 1, `date`) from t1 +select timestampadd(second, 1, `date`) from t1 END INPUT select * from (select 1 as a) b left join (select 2 as a) c using(a); @@ -2666,7 +2666,7 @@ INPUT select concat(a, if(b>10, _utf8'æ', _utf8'ß')) from t1; END OUTPUT -select concat(a, if(b > 10, _utf8 'æ', _utf8 'ß')) from t1 +select concat(a, if(b > 10, _utf8mb3 'æ', _utf8mb3 'ß')) from t1 END INPUT select hex(group_concat(a separator ',')) from t1; @@ -2702,7 +2702,7 @@ INPUT select hex(_utf8 X'616263FF'); END OUTPUT -select hex(_utf8 X'616263FF') from dual +select hex(_utf8mb3 X'616263FF') from dual END INPUT select t2.count, t1.name from t2 inner join t1 using (color); @@ -2900,7 +2900,7 @@ INPUT select timestampdiff(QUARTER, '2002-05-01', '2001-01-01') as a; END OUTPUT -select timestampdiff(QUARTER, '2002-05-01', '2001-01-01') as a from dual +select timestampdiff(quarter, '2002-05-01', '2001-01-01') as a from dual END INPUT select * from information_schema.TABLE_CONSTRAINTS where TABLE_NAME= "vo"; @@ -3098,7 +3098,7 @@ INPUT select locate(_utf8 0xD091, _utf8 0xD0B0D0B1D0B2); END OUTPUT -select locate(_utf8 0xD091, _utf8 0xD0B0D0B1D0B2) from dual +select locate(_utf8mb3 0xD091, _utf8mb3 0xD0B0D0B1D0B2) from dual END INPUT select group_concat(distinct a, c order by a desc, c desc) from t1; @@ -3308,7 +3308,7 @@ INPUT select i from t1 where a=repeat(_utf8 0xD0B1,200); END OUTPUT -select i from t1 where a = repeat(_utf8 0xD0B1, 200) +select i from t1 where a = repeat(_utf8mb3 0xD0B1, 200) END INPUT select @@read_rnd_buffer_size; @@ -3422,7 +3422,7 @@ INPUT select t1.*,t2.* from t1 left join t2 on (t1.b=t2.b) where charset(t2.a) = _utf8'binary' order by t1.a,t2.a; END OUTPUT -select t1.*, t2.* from t1 left join t2 on t1.b = t2.b where charset(t2.a) = _utf8 'binary' order by t1.a asc, t2.a asc +select t1.*, t2.* from t1 left join t2 on t1.b = t2.b where charset(t2.a) = _utf8mb3 'binary' order by t1.a asc, t2.a asc END INPUT select 1 from (select 1) as a; @@ -4544,7 +4544,7 @@ INPUT select timestampadd(MINUTE, 1, date) from t1; END OUTPUT -select timestampadd(MINUTE, 1, `date`) from t1 +select timestampadd(minute, 1, `date`) from t1 END INPUT select ST_astext(ST_Intersection(ST_GeomFromText('POLYGON((0 0, 50 45, 40 50, 0 0))'), ST_GeomFromText('LINESTRING(-10 -10, 200 200)'))); @@ -5462,7 +5462,7 @@ INPUT select timestampdiff(MONTH, '2000-03-28', '2000-02-29') as a; END OUTPUT -select timestampdiff(MONTH, '2000-03-28', '2000-02-29') as a from dual +select timestampdiff(month, '2000-03-28', '2000-02-29') as a from dual END INPUT select concat(_latin1'a',_latin2'b',_latin5'c' collate latin5_turkish_ci); @@ -5846,7 +5846,7 @@ INPUT select concat(a, if(b>10, _utf8'x', _utf8'y')) from t1; END OUTPUT -select concat(a, if(b > 10, _utf8 'x', _utf8 'y')) from t1 +select concat(a, if(b > 10, _utf8mb3 'x', _utf8mb3 'y')) from t1 END INPUT select /lib32/ /libx32/ user, host, db, info from information_schema.processlist where state = 'User lock' and info = 'select get_lock('ee_16407_2', 60)'; @@ -6458,7 +6458,7 @@ INPUT select right(_utf8 0xD0B0D0B2D0B2,1); END OUTPUT -select right(_utf8 0xD0B0D0B2D0B2, 1) from dual +select right(_utf8mb3 0xD0B0D0B2D0B2, 1) from dual END INPUT select 5 div 2; @@ -6476,7 +6476,7 @@ INPUT select timestampdiff(MONTH, '1991-03-28', '2000-02-29') as a; END OUTPUT -select timestampdiff(MONTH, '1991-03-28', '2000-02-29') as a from dual +select timestampdiff(month, '1991-03-28', '2000-02-29') as a from dual END INPUT select date_format(f1, "%m") as d1, date_format(f1, "%M") as d2 from t1 order by date_format(f1, "%M"); @@ -6632,7 +6632,7 @@ INPUT select timestampdiff(MONTH, '2001-02-01', '2001-05-01') as a; END OUTPUT -select timestampdiff(MONTH, '2001-02-01', '2001-05-01') as a from dual +select timestampdiff(month, '2001-02-01', '2001-05-01') as a from dual END INPUT select inet_aton("255.255.255.255.255"),inet_aton("255.255.1.255"),inet_aton("0.1.255"); @@ -7790,7 +7790,7 @@ INPUT select user() like _utf8"%@%"; END OUTPUT -select user() like _utf8 '%@%' from dual +select user() like _utf8mb3 '%@%' from dual END INPUT select st_distance(linestring(point(26,87),point(13,95)), geometrycollection(point(4.297374e+307,8.433875e+307), point(1e308, 1e308))) as dist; @@ -8462,7 +8462,7 @@ INPUT select locate(_utf8 0xD0B1, _utf8 0xD0B0D091D0B2); END OUTPUT -select locate(_utf8 0xD0B1, _utf8 0xD0B0D091D0B2) from dual +select locate(_utf8mb3 0xD0B1, _utf8mb3 0xD0B0D091D0B2) from dual END INPUT select 18446744073709551615, 18446744073709551615 DIV 1, 18446744073709551615 DIV 2; @@ -9182,7 +9182,7 @@ INPUT select locate(_utf8 0xD0B1, _utf8 0xD0B0D0B1D0B2); END OUTPUT -select locate(_utf8 0xD0B1, _utf8 0xD0B0D0B1D0B2) from dual +select locate(_utf8mb3 0xD0B1, _utf8mb3 0xD0B0D0B1D0B2) from dual END INPUT select * from t1 where b like 'foob%'; @@ -11186,7 +11186,7 @@ INPUT select (_utf8 X'616263FF'); END OUTPUT -select _utf8 X'616263FF' from dual +select _utf8mb3 X'616263FF' from dual END INPUT select f1, group_concat(f1+1) from t1 group by f1 with rollup; @@ -11714,7 +11714,7 @@ INPUT select timestampdiff(SQL_TSI_WEEK, '2001-02-01', '2001-05-01') as a; END OUTPUT -select timestampdiff(SQL_TSI_WEEK, '2001-02-01', '2001-05-01') as a from dual +select timestampdiff(week, '2001-02-01', '2001-05-01') as a from dual END INPUT select a, MAX(b), INTERVAL (MAX(b), 1,3,10,30,39,40,50,60,100,1000) from t1 group by a; @@ -12368,7 +12368,7 @@ INPUT select concat(a, if(b>10, _utf8 0x78, _utf8 0x79)) from t1; END OUTPUT -select concat(a, if(b > 10, _utf8 0x78, _utf8 0x79)) from t1 +select concat(a, if(b > 10, _utf8mb3 0x78, _utf8mb3 0x79)) from t1 END INPUT select cast(19999999999999999999 as signed); @@ -12626,7 +12626,7 @@ INPUT select length(_utf8 0xD0B1), bit_length(_utf8 0xD0B1), char_length(_utf8 0xD0B1); END OUTPUT -select length(_utf8 0xD0B1), bit_length(_utf8 0xD0B1), char_length(_utf8 0xD0B1) from dual +select length(_utf8mb3 0xD0B1), bit_length(_utf8mb3 0xD0B1), char_length(_utf8mb3 0xD0B1) from dual END INPUT select @@keycache1.key_buffer_size; @@ -12794,7 +12794,7 @@ INPUT select version()>=_utf8"3.23.29"; END OUTPUT -select version() >= _utf8 '3.23.29' from dual +select version() >= _utf8mb3 '3.23.29' from dual END INPUT select table_name, column_name, privileges from information_schema.columns where table_schema = 'mysqltest' and table_name = 'v1' order by table_name, column_name; @@ -13538,7 +13538,7 @@ INPUT select left(_utf8 0xD0B0D0B1D0B2,1); END OUTPUT -select left(_utf8 0xD0B0D0B1D0B2, 1) from dual +select left(_utf8mb3 0xD0B0D0B1D0B2, 1) from dual END INPUT select * from information_schema.SCHEMA_PRIVILEGES where grantee like '%mysqltest_1%'; @@ -14426,7 +14426,7 @@ INPUT select i from t1 where a=repeat(_utf8 'a',200); END OUTPUT -select i from t1 where a = repeat(_utf8 'a', 200) +select i from t1 where a = repeat(_utf8mb3 'a', 200) END INPUT select time_format('100:00:00', '%H %k %h %I %l'); @@ -14684,7 +14684,7 @@ INPUT select collation(charset(_utf8'a')), collation(collation(_utf8'a')); END OUTPUT -select collation(charset(_utf8 'a')), collation(collation(_utf8 'a')) from dual +select collation(charset(_utf8mb3 'a')), collation(collation(_utf8mb3 'a')) from dual END INPUT select last_day('2000-02-05') as f1, last_day('2002-12-31') as f2, last_day('2003-03-32') as f3, last_day('2003-04-01') as f4, last_day('2001-01-01 01:01:01') as f5, last_day(NULL), last_day('2001-02-12'); @@ -15397,8 +15397,8 @@ END INPUT select t1.time+0,t1.date+0,t1.timestamp+0,concat(date," ",time), t1.quarter+t1.week, t1.year+timestampadd, timestampdiff from t1; END -ERROR -syntax error at position 107 +OUTPUT +select t1.`time` + 0, t1.`date` + 0, t1.`timestamp` + 0, concat(`date`, ' ', `time`), t1.`quarter` + t1.`week`, t1.`year` + `timestampadd`, `timestampdiff` from t1 END INPUT select substring_index('aaaaaaaaa1','aa',1); @@ -15800,7 +15800,7 @@ INPUT select hex(soundex(_utf8 0xE99885E8A788E99A8FE697B6E69BB4E696B0E79A84E696B0E997BB)); END OUTPUT -select hex(soundex(_utf8 0xE99885E8A788E99A8FE697B6E69BB4E696B0E79A84E696B0E997BB)) from dual +select hex(soundex(_utf8mb3 0xE99885E8A788E99A8FE697B6E69BB4E696B0E79A84E696B0E997BB)) from dual END INPUT select a2 from t3 join (t1 join t2 using (a1)) on b=c1 join t4 using (c2); @@ -16802,7 +16802,7 @@ INPUT select locate(_utf8 0xD0B1, _utf8 0xD0B0D091D0B2 collate utf8_bin); END OUTPUT -select locate(_utf8 0xD0B1, _utf8 0xD0B0D091D0B2 collate utf8_bin) from dual +select locate(_utf8mb3 0xD0B1, _utf8mb3 0xD0B0D091D0B2 collate utf8_bin) from dual END INPUT select insert('hello', 1, -4294967295, 'hi'); @@ -17906,7 +17906,7 @@ INPUT select export_set(3, _latin1'foo', _utf8'bar', ',', 4); END OUTPUT -select export_set(3, _latin1 'foo', _utf8 'bar', ',', 4) from dual +select export_set(3, _latin1 'foo', _utf8mb3 'bar', ',', 4) from dual END INPUT select a,hex(a) from t1; @@ -17966,7 +17966,7 @@ INPUT select timestampdiff(SQL_TSI_DAY, '2001-02-01', '2001-05-01') as a; END OUTPUT -select timestampdiff(SQL_TSI_DAY, '2001-02-01', '2001-05-01') as a from dual +select timestampdiff(day, '2001-02-01', '2001-05-01') as a from dual END INPUT select elt(1,c1,'�'),elt(1,'�',c1) from t1; @@ -18116,7 +18116,7 @@ INPUT select database() = _utf8"test"; END OUTPUT -select database() = _utf8 'test' from dual +select database() = _utf8mb3 'test' from dual END INPUT select collation(char(123)), collation(char(123 using binary)); @@ -18746,7 +18746,7 @@ INPUT select charset(charset(_utf8'a')), charset(collation(_utf8'a')); END OUTPUT -select charset(charset(_utf8 'a')), charset(collation(_utf8 'a')) from dual +select charset(charset(_utf8mb3 'a')), charset(collation(_utf8mb3 'a')) from dual END INPUT select * from `information_schema`.`key_column_usage` where `TABLE_NAME` = NULL; @@ -18764,7 +18764,7 @@ INPUT select timestampadd(WEEK, 1, date) from t1; END OUTPUT -select timestampadd(WEEK, 1, `date`) from t1 +select timestampadd(week, 1, `date`) from t1 END INPUT select log2(-1); @@ -18944,7 +18944,7 @@ INPUT select i from t1 where b=repeat(_utf8 'b',310); END OUTPUT -select i from t1 where b = repeat(_utf8 'b', 310) +select i from t1 where b = repeat(_utf8mb3 'b', 310) END INPUT select * from t1 where not(not(a)); @@ -18962,13 +18962,13 @@ INPUT select ifnull(NULL, _utf8'string'); END OUTPUT -select ifnull(null, _utf8 'string') from dual +select ifnull(null, _utf8mb3 'string') from dual END INPUT select hex(_utf8 B'001111111111'); END OUTPUT -select hex(_utf8 B'001111111111') from dual +select hex(_utf8mb3 B'001111111111') from dual END INPUT select right('hello', -18446744073709551615); @@ -19430,7 +19430,7 @@ INPUT select t1.*,t2.* from t1 left join t2 on (t1.b=t2.b) where collation(t2.a) = _utf8'binary' order by t1.a,t2.a; END OUTPUT -select t1.*, t2.* from t1 left join t2 on t1.b = t2.b where collation(t2.a) = _utf8 'binary' order by t1.a asc, t2.a asc +select t1.*, t2.* from t1 left join t2 on t1.b = t2.b where collation(t2.a) = _utf8mb3 'binary' order by t1.a asc, t2.a asc END INPUT select * from t1 where i = 2; @@ -19496,7 +19496,7 @@ INPUT select timestampdiff(SQL_TSI_DAY, '1986-02-01', '1986-03-01') as a1, timestampdiff(SQL_TSI_DAY, '1900-02-01', '1900-03-01') as a2, timestampdiff(SQL_TSI_DAY, '1996-02-01', '1996-03-01') as a3, timestampdiff(SQL_TSI_DAY, '2000-02-01', '2000-03-01') as a4; END OUTPUT -select timestampdiff(SQL_TSI_DAY, '1986-02-01', '1986-03-01') as a1, timestampdiff(SQL_TSI_DAY, '1900-02-01', '1900-03-01') as a2, timestampdiff(SQL_TSI_DAY, '1996-02-01', '1996-03-01') as a3, timestampdiff(SQL_TSI_DAY, '2000-02-01', '2000-03-01') as a4 from dual +select timestampdiff(day, '1986-02-01', '1986-03-01') as a1, timestampdiff(day, '1900-02-01', '1900-03-01') as a2, timestampdiff(day, '1996-02-01', '1996-03-01') as a3, timestampdiff(day, '2000-02-01', '2000-03-01') as a4 from dual END INPUT select * from information_schema.COLLATION_CHARACTER_SET_APPLICABILITY where COLLATION_NAME like 'latin1%' ORDER BY COLLATION_NAME; @@ -19646,7 +19646,7 @@ INPUT select greatest(1,_utf16'.',_utf8''); END OUTPUT -select greatest(1, _utf16 '.', _utf8 '') from dual +select greatest(1, _utf16 '.', _utf8mb3 '') from dual END INPUT select round(1e1,308), truncate(1e1, 308); @@ -20552,7 +20552,7 @@ INPUT select soundex(_utf8 0xD091D092D093); END OUTPUT -select soundex(_utf8 0xD091D092D093) from dual +select soundex(_utf8mb3 0xD091D092D093) from dual END INPUT select sum(a) from t1 where a > 10; @@ -20840,7 +20840,7 @@ INPUT select repeat(_utf8'+',3) as h union select NULL; END OUTPUT -select repeat(_utf8 '+', 3) as h from dual union select null from dual +select repeat(_utf8mb3 '+', 3) as h from dual union select null from dual END INPUT select fld1,fld3 FROM t2 where fld1 like "25050%"; @@ -20978,7 +20978,7 @@ INPUT select hex(_utf8 0x616263FF); END OUTPUT -select hex(_utf8 0x616263FF) from dual +select hex(_utf8mb3 0x616263FF) from dual END INPUT select avg(a) as x from t1 having x=2; @@ -22658,7 +22658,7 @@ INPUT select timestampdiff(SQL_TSI_MINUTE, '2001-02-01 12:59:59', '2001-05-01 12:58:59') as a; END OUTPUT -select timestampdiff(SQL_TSI_MINUTE, '2001-02-01 12:59:59', '2001-05-01 12:58:59') as a from dual +select timestampdiff(minute, '2001-02-01 12:59:59', '2001-05-01 12:58:59') as a from dual END INPUT select date_add("1997-12-31 23:59:59",INTERVAL 100000 SECOND); diff --git a/go/vt/sqlparser/testdata/union_cases.txt b/go/vt/sqlparser/testdata/union_cases.txt index 529ebdb5efd..8e2def0e04e 100644 --- a/go/vt/sqlparser/testdata/union_cases.txt +++ b/go/vt/sqlparser/testdata/union_cases.txt @@ -572,7 +572,7 @@ INPUT select repeat(_utf8'+',3) as h union select NULL; END OUTPUT -select repeat(_utf8 '+', 3) as h from dual union select null from dual +select repeat(_utf8mb3 '+', 3) as h from dual union select null from dual END INPUT SELECT * FROM t1 UNION SELECT /*+ MAX_EXECUTION_TIME(0) */ * FROM t1; diff --git a/go/vt/sqlparser/tracked_buffer_test.go b/go/vt/sqlparser/tracked_buffer_test.go index af1de7c843e..6924bf11911 100644 --- a/go/vt/sqlparser/tracked_buffer_test.go +++ b/go/vt/sqlparser/tracked_buffer_test.go @@ -104,6 +104,10 @@ func TestCanonicalOutput(t *testing.T) { "create table a (v varchar(32)) engine=InnoDB", "CREATE TABLE `a` (\n\t`v` varchar(32)\n) ENGINE InnoDB", }, + { // tablespace names are case-sensitive: https://dev.mysql.com/doc/refman/en/general-tablespaces.html + "create table a (v varchar(32)) engine=InnoDB tablespace innodb_system", + "CREATE TABLE `a` (\n\t`v` varchar(32)\n) ENGINE InnoDB,\n TABLESPACE innodb_system", + }, { "create table a (id int not null primary key) engine InnoDB, charset utf8mb4, collate utf8mb4_0900_ai_ci partition by range (`id`) (partition `p10` values less than(10) engine InnoDB tablespace foo)", "CREATE TABLE `a` (\n\t`id` int NOT NULL PRIMARY KEY\n) ENGINE InnoDB,\n CHARSET utf8mb4,\n COLLATE utf8mb4_0900_ai_ci\nPARTITION BY RANGE (`id`)\n(PARTITION `p10` VALUES LESS THAN (10) ENGINE InnoDB TABLESPACE foo)", diff --git a/go/vt/sqlparser/walker_test.go b/go/vt/sqlparser/walker_test.go index f8bf2b4792a..560ed2ff470 100644 --- a/go/vt/sqlparser/walker_test.go +++ b/go/vt/sqlparser/walker_test.go @@ -18,6 +18,7 @@ package sqlparser import ( "fmt" + "math/rand" "testing" "github.com/stretchr/testify/require" @@ -26,7 +27,7 @@ import ( func BenchmarkWalkLargeExpression(b *testing.B) { for i := 0; i < 10; i++ { b.Run(fmt.Sprintf("%d", i), func(b *testing.B) { - exp := newGenerator(int64(i*100), 5).expression() + exp := NewGenerator(rand.New(rand.NewSource(int64(i*100))), 5).Expression(ExprGeneratorConfig{}) count := 0 for i := 0; i < b.N; i++ { err := Walk(func(node SQLNode) (kontinue bool, err error) { @@ -42,7 +43,7 @@ func BenchmarkWalkLargeExpression(b *testing.B) { func BenchmarkRewriteLargeExpression(b *testing.B) { for i := 1; i < 7; i++ { b.Run(fmt.Sprintf("%d", i), func(b *testing.B) { - exp := newGenerator(int64(i*100), i).expression() + exp := NewGenerator(rand.New(rand.NewSource(int64(i*100))), i).Expression(ExprGeneratorConfig{}) count := 0 for i := 0; i < b.N; i++ { _ = Rewrite(exp, func(_ *Cursor) bool { diff --git a/go/vt/srvtopo/discover_test.go b/go/vt/srvtopo/discover_test.go index c076ba0e7b7..ca4774a1b84 100644 --- a/go/vt/srvtopo/discover_test.go +++ b/go/vt/srvtopo/discover_test.go @@ -48,8 +48,9 @@ func (a TargetArray) Less(i, j int) bool { } func TestFindAllTargets(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1", "cell2") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1", "cell2") srvTopoCacheRefresh = 0 srvTopoCacheTTL = 0 @@ -58,7 +59,7 @@ func TestFindAllTargets(t *testing.T) { srvTopoCacheTTL = 1 * time.Second }() - rs := NewResilientServer(ts, "TestFindAllKeyspaceShards") + rs := NewResilientServer(ctx, ts, "TestFindAllKeyspaceShards") // No keyspace / shards. ks, err := FindAllTargets(ctx, rs, "cell1", []topodatapb.TabletType{topodatapb.TabletType_PRIMARY}) diff --git a/go/vt/srvtopo/keyspace_filtering_server_test.go b/go/vt/srvtopo/keyspace_filtering_server_test.go index 83e1a18e062..bcd5681f3e8 100644 --- a/go/vt/srvtopo/keyspace_filtering_server_test.go +++ b/go/vt/srvtopo/keyspace_filtering_server_test.go @@ -48,10 +48,10 @@ var ( } ) -func newFiltering(filter []string) (*topo.Server, *srvtopotest.PassthroughSrvTopoServer, Server) { +func newFiltering(ctx context.Context, filter []string) (*topo.Server, *srvtopotest.PassthroughSrvTopoServer, Server) { testServer := srvtopotest.NewPassthroughSrvTopoServer() - testServer.TopoServer = memorytopo.NewServer(stockCell) + testServer.TopoServer = memorytopo.NewServer(ctx, stockCell) testServer.SrvKeyspaceNames = []string{"foo", "bar", "baz"} testServer.SrvKeyspace = &topodatapb.SrvKeyspace{} testServer.WatchedSrvVSchema = stockVSchema @@ -71,7 +71,9 @@ func TestFilteringServerHandlesNilUnderlying(t *testing.T) { } func TestFilteringServerReturnsUnderlyingServer(t *testing.T) { - _, _, f := newFiltering(nil) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, _, f := newFiltering(ctx, nil) got, gotErr := f.GetTopoServer() if gotErr != nil { t.Errorf("Got error getting topo.Server from FilteringServer") @@ -108,17 +110,23 @@ func doTestGetSrvKeyspaceNames( } func TestFilteringServerGetSrvKeyspameNamesFiltersEverythingOut(t *testing.T) { - _, _, f := newFiltering(nil) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, _, f := newFiltering(ctx, nil) doTestGetSrvKeyspaceNames(t, f, stockCell, []string{}, nil) } func TestFilteringServerGetSrvKeyspaceNamesFiltersKeyspaces(t *testing.T) { - _, _, f := newFiltering(stockFilters) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, _, f := newFiltering(ctx, stockFilters) doTestGetSrvKeyspaceNames(t, f, stockCell, stockFilters, nil) } func TestFilteringServerGetSrvKeyspaceNamesPassesThroughErrors(t *testing.T) { - _, mock, f := newFiltering(stockFilters) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, mock, f := newFiltering(ctx, stockFilters) wantErr := fmt.Errorf("some badcell error") mock.SrvKeyspaceNamesError = wantErr doTestGetSrvKeyspaceNames(t, f, "badcell", stockFilters, wantErr) @@ -140,28 +148,36 @@ func doTestGetSrvKeyspace( } func TestFilteringServerGetSrvKeyspaceReturnsSelectedKeyspaces(t *testing.T) { - _, mock, f := newFiltering(stockFilters) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, mock, f := newFiltering(ctx, stockFilters) mock.SrvKeyspace = stockKeyspaces["bar"] doTestGetSrvKeyspace(t, f, stockCell, "bar", stockKeyspaces["bar"], nil) } func TestFilteringServerGetSrvKeyspaceErrorPassthrough(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() wantErr := fmt.Errorf("some error") - _, mock, f := newFiltering(stockFilters) + _, mock, f := newFiltering(ctx, stockFilters) mock.SrvKeyspace = stockKeyspaces["bar"] mock.SrvKeyspaceError = wantErr doTestGetSrvKeyspace(t, f, "badcell", "bar", stockKeyspaces["bar"], wantErr) } func TestFilteringServerGetSrvKeyspaceFilters(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() wantErr := topo.NewError(topo.NoNode, "foo") - _, mock, f := newFiltering(stockFilters) + _, mock, f := newFiltering(ctx, stockFilters) mock.SrvKeyspaceError = wantErr doTestGetSrvKeyspace(t, f, stockCell, "foo", nil, wantErr) } func TestFilteringServerWatchSrvVSchemaFiltersPassthroughSrvVSchema(t *testing.T) { - _, mock, f := newFiltering(stockFilters) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, mock, f := newFiltering(ctx, stockFilters) allowed := map[string]bool{} for _, ks := range stockFilters { @@ -196,8 +212,11 @@ func TestFilteringServerWatchSrvVSchemaFiltersPassthroughSrvVSchema(t *testing.T } func TestFilteringServerWatchSrvVSchemaHandlesNilSchema(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + wantErr := fmt.Errorf("some err") - _, mock, f := newFiltering(stockFilters) + _, mock, f := newFiltering(ctx, stockFilters) mock.WatchedSrvVSchema = nil mock.WatchedSrvVSchemaError = wantErr diff --git a/go/vt/srvtopo/resilient_server.go b/go/vt/srvtopo/resilient_server.go index 2a5be9be188..081fcca6ffa 100644 --- a/go/vt/srvtopo/resilient_server.go +++ b/go/vt/srvtopo/resilient_server.go @@ -17,6 +17,7 @@ limitations under the License. package srvtopo import ( + "context" "time" "github.com/spf13/pflag" @@ -78,7 +79,7 @@ type ResilientServer struct { // NewResilientServer creates a new ResilientServer // based on the provided topo.Server. -func NewResilientServer(base *topo.Server, counterPrefix string) *ResilientServer { +func NewResilientServer(ctx context.Context, base *topo.Server, counterPrefix string) *ResilientServer { if srvTopoCacheRefresh > srvTopoCacheTTL { log.Fatalf("srv_topo_cache_refresh must be less than or equal to srv_topo_cache_ttl") } @@ -94,8 +95,8 @@ func NewResilientServer(base *topo.Server, counterPrefix string) *ResilientServe return &ResilientServer{ topoServer: base, counts: counts, - SrvKeyspaceWatcher: NewSrvKeyspaceWatcher(base, counts, srvTopoCacheRefresh, srvTopoCacheTTL), - SrvVSchemaWatcher: NewSrvVSchemaWatcher(base, counts, srvTopoCacheRefresh, srvTopoCacheTTL), + SrvKeyspaceWatcher: NewSrvKeyspaceWatcher(ctx, base, counts, srvTopoCacheRefresh, srvTopoCacheTTL), + SrvVSchemaWatcher: NewSrvVSchemaWatcher(ctx, base, counts, srvTopoCacheRefresh, srvTopoCacheTTL), SrvKeyspaceNamesQuery: NewSrvKeyspaceNamesQuery(base, counts, srvTopoCacheRefresh, srvTopoCacheTTL), } } diff --git a/go/vt/srvtopo/resilient_server_test.go b/go/vt/srvtopo/resilient_server_test.go index 95640028050..c237d43f300 100644 --- a/go/vt/srvtopo/resilient_server_test.go +++ b/go/vt/srvtopo/resilient_server_test.go @@ -43,7 +43,9 @@ import ( // TestGetSrvKeyspace will test we properly return updated SrvKeyspace. func TestGetSrvKeyspace(t *testing.T) { - ts, factory := memorytopo.NewServerAndFactory("test_cell") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts, factory := memorytopo.NewServerAndFactory(ctx, "test_cell") srvTopoCacheTTL = 200 * time.Millisecond srvTopoCacheRefresh = 80 * time.Millisecond defer func() { @@ -51,7 +53,7 @@ func TestGetSrvKeyspace(t *testing.T) { srvTopoCacheRefresh = 1 * time.Second }() - rs := NewResilientServer(ts, "TestGetSrvKeyspace") + rs := NewResilientServer(ctx, ts, "TestGetSrvKeyspace") // Ask for a not-yet-created keyspace _, err := rs.GetSrvKeyspace(context.Background(), "test_cell", "test_ks") @@ -359,17 +361,18 @@ func TestGetSrvKeyspace(t *testing.T) { // TestSrvKeyspaceCachedError will test we properly re-try to query // the topo server upon failure. func TestSrvKeyspaceCachedError(t *testing.T) { - ts := memorytopo.NewServer("test_cell") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "test_cell") srvTopoCacheTTL = 100 * time.Millisecond srvTopoCacheRefresh = 40 * time.Millisecond defer func() { srvTopoCacheTTL = 1 * time.Second srvTopoCacheRefresh = 1 * time.Second }() - rs := NewResilientServer(ts, "TestSrvKeyspaceCachedErrors") + rs := NewResilientServer(ctx, ts, "TestSrvKeyspaceCachedErrors") // Ask for an unknown keyspace, should get an error. - ctx := context.Background() _, err := rs.GetSrvKeyspace(ctx, "test_cell", "unknown_ks") if err == nil { t.Fatalf("First GetSrvKeyspace didn't return an error") @@ -382,8 +385,6 @@ func TestSrvKeyspaceCachedError(t *testing.T) { time.Sleep(srvTopoCacheTTL + 10*time.Millisecond) // Ask again with a different context, should get an error and // save that context. - ctx, cancel := context.WithCancel(ctx) - defer cancel() _, err2 := rs.GetSrvKeyspace(ctx, "test_cell", "unknown_ks") if err2 == nil { t.Fatalf("Second GetSrvKeyspace didn't return an error") @@ -396,8 +397,11 @@ func TestSrvKeyspaceCachedError(t *testing.T) { // TestGetSrvKeyspaceCreated will test we properly get the initial // value if the SrvKeyspace already exists. func TestGetSrvKeyspaceCreated(t *testing.T) { - ts := memorytopo.NewServer("test_cell") - rs := NewResilientServer(ts, "TestGetSrvKeyspaceCreated") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "test_cell") + defer ts.Close() + rs := NewResilientServer(ctx, ts, "TestGetSrvKeyspaceCreated") // Set SrvKeyspace with value. want := &topodatapb.SrvKeyspace{} @@ -428,9 +432,10 @@ func TestGetSrvKeyspaceCreated(t *testing.T) { func TestWatchSrvVSchema(t *testing.T) { srvTopoCacheRefresh = 10 * time.Millisecond - ctx := context.Background() - ts := memorytopo.NewServer("test_cell") - rs := NewResilientServer(ts, "TestWatchSrvVSchema") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "test_cell") + rs := NewResilientServer(ctx, ts, "TestWatchSrvVSchema") // mu protects watchValue and watchErr. mu := sync.Mutex{} @@ -512,14 +517,19 @@ func TestWatchSrvVSchema(t *testing.T) { } func TestGetSrvKeyspaceNames(t *testing.T) { - ts, factory := memorytopo.NewServerAndFactory("test_cell") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts, factory := memorytopo.NewServerAndFactory(ctx, "test_cell") + + time.Sleep(1 * time.Second) + srvTopoCacheTTL = 100 * time.Millisecond srvTopoCacheRefresh = 40 * time.Millisecond defer func() { srvTopoCacheTTL = 1 * time.Second srvTopoCacheRefresh = 1 * time.Second }() - rs := NewResilientServer(ts, "TestGetSrvKeyspaceNames") + rs := NewResilientServer(ctx, ts, "TestGetSrvKeyspaceNames") // Set SrvKeyspace with value want := &topodatapb.SrvKeyspace{} @@ -529,7 +539,6 @@ func TestGetSrvKeyspaceNames(t *testing.T) { err = ts.UpdateSrvKeyspace(context.Background(), "test_cell", "test_ks2", want) require.NoError(t, err, "UpdateSrvKeyspace(test_cell, test_ks2, %s) failed", want) - ctx := context.Background() names, err := rs.GetSrvKeyspaceNames(ctx, "test_cell", false) if err != nil { t.Errorf("GetSrvKeyspaceNames unexpected error %v", err) @@ -644,8 +653,8 @@ func TestGetSrvKeyspaceNames(t *testing.T) { time.Sleep(srvTopoCacheTTL) - timeoutCtx, cancel := context.WithTimeout(context.Background(), srvTopoCacheRefresh*2) //nolint - defer cancel() + timeoutCtx, timeoutCancel := context.WithTimeout(context.Background(), srvTopoCacheRefresh*2) //nolint + defer timeoutCancel() _, err = rs.GetSrvKeyspaceNames(timeoutCtx, "test_cell", false) if err != context.DeadlineExceeded { t.Errorf("expected error '%v', got '%v'", context.DeadlineExceeded, err.Error()) @@ -666,7 +675,9 @@ func (w *watched) equals(other *watched) bool { } func TestSrvKeyspaceWatcher(t *testing.T) { - ts, factory := memorytopo.NewServerAndFactory("test_cell") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts, factory := memorytopo.NewServerAndFactory(ctx, "test_cell") srvTopoCacheTTL = 100 * time.Millisecond srvTopoCacheRefresh = 40 * time.Millisecond defer func() { @@ -674,7 +685,7 @@ func TestSrvKeyspaceWatcher(t *testing.T) { srvTopoCacheRefresh = 1 * time.Second }() - rs := NewResilientServer(ts, "TestGetSrvKeyspaceWatcher") + rs := NewResilientServer(ctx, ts, "TestGetSrvKeyspaceWatcher") var wmu sync.Mutex var wseen []watched @@ -790,7 +801,9 @@ func TestSrvKeyspaceWatcher(t *testing.T) { } func TestSrvKeyspaceListener(t *testing.T) { - ts, _ := memorytopo.NewServerAndFactory("test_cell") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "test_cell") srvTopoCacheTTL = 100 * time.Millisecond srvTopoCacheRefresh = 40 * time.Millisecond defer func() { @@ -798,16 +811,16 @@ func TestSrvKeyspaceListener(t *testing.T) { srvTopoCacheRefresh = 1 * time.Second }() - rs := NewResilientServer(ts, "TestGetSrvKeyspaceWatcher") + rs := NewResilientServer(ctx, ts, "TestGetSrvKeyspaceWatcher") - ctx, cancel := context.WithCancel(context.Background()) + cancelCtx, cancelFunc := context.WithCancel(context.Background()) var callbackCount atomic.Int32 // adding listener will perform callback. - rs.WatchSrvKeyspace(context.Background(), "test_cell", "test_ks", func(srvKs *topodatapb.SrvKeyspace, err error) bool { + rs.WatchSrvKeyspace(ctx, "test_cell", "test_ks", func(srvKs *topodatapb.SrvKeyspace, err error) bool { callbackCount.Add(1) select { - case <-ctx.Done(): + case <-cancelCtx.Done(): return false default: return true @@ -816,16 +829,16 @@ func TestSrvKeyspaceListener(t *testing.T) { // First update (callback - 2) want := &topodatapb.SrvKeyspace{} - err := ts.UpdateSrvKeyspace(context.Background(), "test_cell", "test_ks", want) + err := ts.UpdateSrvKeyspace(ctx, "test_cell", "test_ks", want) require.NoError(t, err) // Next callback to remove from listener - cancel() + cancelFunc() // multi updates thereafter for i := 0; i < 5; i++ { want = &topodatapb.SrvKeyspace{} - err = ts.UpdateSrvKeyspace(context.Background(), "test_cell", "test_ks", want) + err = ts.UpdateSrvKeyspace(ctx, "test_cell", "test_ks", want) require.NoError(t, err) time.Sleep(100 * time.Millisecond) } diff --git a/go/vt/srvtopo/resolver.go b/go/vt/srvtopo/resolver.go index 2cb3fed676c..98d77e259ef 100644 --- a/go/vt/srvtopo/resolver.go +++ b/go/vt/srvtopo/resolver.go @@ -17,12 +17,11 @@ limitations under the License. package srvtopo import ( + "context" "sort" "vitess.io/vitess/go/sqltypes" - "context" - "google.golang.org/protobuf/proto" "vitess.io/vitess/go/vt/key" @@ -43,6 +42,9 @@ type Gateway interface { // QueryServiceByAlias returns a QueryService QueryServiceByAlias(alias *topodatapb.TabletAlias, target *querypb.Target) (queryservice.QueryService, error) + + // GetServingKeyspaces returns list of serving keyspaces. + GetServingKeyspaces() []string } // A Resolver can resolve keyspace ids and key ranges into ResolvedShard* diff --git a/go/vt/srvtopo/resolver_test.go b/go/vt/srvtopo/resolver_test.go index 49b108fcffb..95e6dbe620c 100644 --- a/go/vt/srvtopo/resolver_test.go +++ b/go/vt/srvtopo/resolver_test.go @@ -17,12 +17,11 @@ limitations under the License. package srvtopo import ( + "context" "testing" "github.com/stretchr/testify/require" - "context" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/logutil" @@ -34,11 +33,10 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) -func initResolver(t *testing.T, name string) *Resolver { - ctx := context.Background() +func initResolver(t *testing.T, ctx context.Context, name string) *Resolver { cell := "cell1" - ts := memorytopo.NewServer(cell) - rs := NewResilientServer(ts, name) + ts := memorytopo.NewServer(ctx, cell) + rs := NewResilientServer(ctx, ts, name) // Create sharded keyspace and shards. if err := ts.CreateKeyspace(ctx, "sks", &topodatapb.Keyspace{}); err != nil { @@ -97,7 +95,9 @@ func initResolver(t *testing.T, name string) *Resolver { } func TestResolveDestinations(t *testing.T) { - resolver := initResolver(t, "TestResolveDestinations") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + resolver := initResolver(t, ctx, "TestResolveDestinations") id1 := &querypb.Value{ Type: sqltypes.VarChar, diff --git a/go/vt/srvtopo/watch.go b/go/vt/srvtopo/watch.go index 2d470327c4e..36d8fd428bd 100644 --- a/go/vt/srvtopo/watch.go +++ b/go/vt/srvtopo/watch.go @@ -33,6 +33,7 @@ const ( watchStateIdle watchState = iota watchStateStarting watchStateRunning + watchStateStopped ) type watchEntry struct { @@ -100,7 +101,10 @@ func (entry *watchEntry) addListener(ctx context.Context, callback func(any, err callback(v, err) } -func (entry *watchEntry) ensureWatchingLocked() { +func (entry *watchEntry) ensureWatchingLocked(ctx context.Context) { + if ctx.Err() != nil { + return + } switch entry.watchState { case watchStateRunning, watchStateStarting: case watchStateIdle: @@ -121,7 +125,7 @@ func (entry *watchEntry) currentValueLocked(ctx context.Context) (any, error) { return entry.value, entry.lastError } - entry.ensureWatchingLocked() + entry.ensureWatchingLocked(ctx) cacheValid := entry.value != nil && time.Since(entry.lastValueTime) < entry.rw.cacheTTL if cacheValid { @@ -146,12 +150,12 @@ func (entry *watchEntry) currentValueLocked(ctx context.Context) (any, error) { return nil, entry.lastError } -func (entry *watchEntry) update(value any, err error, init bool) { +func (entry *watchEntry) update(ctx context.Context, value any, err error, init bool) { entry.mutex.Lock() defer entry.mutex.Unlock() if err != nil { - entry.onErrorLocked(err, init) + entry.onErrorLocked(ctx, err, init) } else { entry.onValueLocked(value) } @@ -179,7 +183,7 @@ func (entry *watchEntry) onValueLocked(value any) { entry.lastErrorTime = time.Time{} } -func (entry *watchEntry) onErrorLocked(err error, init bool) { +func (entry *watchEntry) onErrorLocked(ctx context.Context, err error, init bool) { entry.rw.counts.Add(errorCategory, 1) entry.lastErrorTime = time.Now() @@ -217,12 +221,13 @@ func (entry *watchEntry) onErrorLocked(err error, init bool) { entry.watchState = watchStateIdle // only retry the watch if we haven't been explicitly interrupted + if len(entry.listeners) > 0 && !topo.IsErrType(err, topo.Interrupted) { go func() { time.Sleep(entry.rw.cacheRefreshInterval) entry.mutex.Lock() - entry.ensureWatchingLocked() + entry.ensureWatchingLocked(ctx) entry.mutex.Unlock() }() } diff --git a/go/vt/srvtopo/watch_srvkeyspace.go b/go/vt/srvtopo/watch_srvkeyspace.go index e47e810b615..cefe95c6951 100644 --- a/go/vt/srvtopo/watch_srvkeyspace.go +++ b/go/vt/srvtopo/watch_srvkeyspace.go @@ -37,25 +37,25 @@ func (k *srvKeyspaceKey) String() string { return k.cell + "." + k.keyspace } -func NewSrvKeyspaceWatcher(topoServer *topo.Server, counts *stats.CountersWithSingleLabel, cacheRefresh, cacheTTL time.Duration) *SrvKeyspaceWatcher { +func NewSrvKeyspaceWatcher(ctx context.Context, topoServer *topo.Server, counts *stats.CountersWithSingleLabel, cacheRefresh, cacheTTL time.Duration) *SrvKeyspaceWatcher { watch := func(entry *watchEntry) { key := entry.key.(*srvKeyspaceKey) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + requestCtx, requestCancel := context.WithCancel(context.Background()) + defer requestCancel() - current, changes, err := topoServer.WatchSrvKeyspace(ctx, key.cell, key.keyspace) + current, changes, err := topoServer.WatchSrvKeyspace(requestCtx, key.cell, key.keyspace) if err != nil { - entry.update(nil, err, true) + entry.update(ctx, nil, err, true) return } - entry.update(current.Value, current.Err, true) + entry.update(ctx, current.Value, current.Err, true) if current.Err != nil { return } for c := range changes { - entry.update(c.Value, c.Err, false) + entry.update(ctx, c.Value, c.Err, false) if c.Err != nil { return } diff --git a/go/vt/srvtopo/watch_srvvschema.go b/go/vt/srvtopo/watch_srvvschema.go index 251f5e55644..1b5536e623d 100644 --- a/go/vt/srvtopo/watch_srvvschema.go +++ b/go/vt/srvtopo/watch_srvvschema.go @@ -35,26 +35,25 @@ func (k cellName) String() string { return string(k) } -func NewSrvVSchemaWatcher(topoServer *topo.Server, counts *stats.CountersWithSingleLabel, cacheRefresh, cacheTTL time.Duration) *SrvVSchemaWatcher { +func NewSrvVSchemaWatcher(ctx context.Context, topoServer *topo.Server, counts *stats.CountersWithSingleLabel, cacheRefresh, cacheTTL time.Duration) *SrvVSchemaWatcher { watch := func(entry *watchEntry) { key := entry.key.(cellName) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + requestCtx, requestCancel := context.WithCancel(ctx) + defer requestCancel() - current, changes, err := topoServer.WatchSrvVSchema(ctx, key.String()) + current, changes, err := topoServer.WatchSrvVSchema(requestCtx, key.String()) if err != nil { - entry.update(nil, err, true) + entry.update(ctx, nil, err, true) return } - entry.update(current.Value, current.Err, true) + entry.update(ctx, current.Value, current.Err, true) if current.Err != nil { return } - defer cancel() for c := range changes { - entry.update(c.Value, c.Err, false) + entry.update(ctx, c.Value, c.Err, false) if c.Err != nil { return } diff --git a/go/vt/sysvars/sysvars.go b/go/vt/sysvars/sysvars.go index c4939d5c63e..98da8ff07b7 100644 --- a/go/vt/sysvars/sysvars.go +++ b/go/vt/sysvars/sysvars.go @@ -73,7 +73,10 @@ var ( QueryTimeout = SystemVariable{Name: "query_timeout"} // Online DDL - DDLStrategy = SystemVariable{Name: "ddl_strategy", IdentifierAsString: true} + DDLStrategy = SystemVariable{Name: "ddl_strategy", IdentifierAsString: true} + MigrationContext = SystemVariable{Name: "migration_context", IdentifierAsString: true} + + // Version Version = SystemVariable{Name: "version"} VersionComment = SystemVariable{Name: "version_comment"} @@ -95,6 +98,7 @@ var ( Charset, Names, SessionUUID, + MigrationContext, SessionEnableSystemSettings, ReadAfterWriteGTID, ReadAfterWriteTimeOut, diff --git a/go/vt/tableacl/tableacl.go b/go/vt/tableacl/tableacl.go index 4ee46ae7739..9a6e6eeba4e 100644 --- a/go/vt/tableacl/tableacl.go +++ b/go/vt/tableacl/tableacl.go @@ -26,13 +26,11 @@ import ( "sync" "github.com/tchap/go-patricia/patricia" - "google.golang.org/protobuf/proto" "vitess.io/vitess/go/json2" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/tableacl/acl" - tableaclpb "vitess.io/vitess/go/vt/proto/tableacl" + "vitess.io/vitess/go/vt/tableacl/acl" ) // ACLResult embeds an acl.ACL and also tell which table group it belongs to. @@ -188,7 +186,7 @@ func (tacl *tableACL) Set(config *tableaclpb.Config) error { } tacl.Lock() tacl.entries = entries - tacl.config = proto.Clone(config).(*tableaclpb.Config) + tacl.config = config.CloneVT() callback := tacl.callback tacl.Unlock() if callback != nil { @@ -277,7 +275,7 @@ func GetCurrentConfig() *tableaclpb.Config { func (tacl *tableACL) Config() *tableaclpb.Config { tacl.RLock() defer tacl.RUnlock() - return proto.Clone(tacl.config).(*tableaclpb.Config) + return tacl.config.CloneVT() } // Register registers an AclFactory. diff --git a/go/vt/tableacl/testlib/testlib.go b/go/vt/tableacl/testlib/testlib.go index 3c30c43d8dc..bdde9ae800f 100644 --- a/go/vt/tableacl/testlib/testlib.go +++ b/go/vt/tableacl/testlib/testlib.go @@ -21,7 +21,6 @@ import ( "fmt" "math/rand" "testing" - "time" querypb "vitess.io/vitess/go/vt/proto/query" tableaclpb "vitess.io/vitess/go/vt/proto/tableacl" @@ -127,7 +126,3 @@ func checkAccess(config *tableaclpb.Config, tableName string, role tableacl.Role } return nil } - -func init() { - rand.Seed(time.Now().UnixNano()) -} diff --git a/go/vt/throttler/demo/throttler_demo.go b/go/vt/throttler/demo/throttler_demo.go index 615f7c4fc93..126b9098236 100644 --- a/go/vt/throttler/demo/throttler_demo.go +++ b/go/vt/throttler/demo/throttler_demo.go @@ -231,13 +231,13 @@ type client struct { healthcheckCh chan *discovery.TabletHealth } -func newClient(primary *primary, replica *replica, ts *topo.Server) *client { +func newClient(ctx context.Context, primary *primary, replica *replica, ts *topo.Server) *client { t, err := throttler.NewThrottler("client", "TPS", 1, throttler.MaxRateModuleDisabled, 5 /* seconds */) if err != nil { log.Fatal(err) } - healthCheck := discovery.NewHealthCheck(context.Background(), 5*time.Second, 1*time.Minute, ts, "cell1", "") + healthCheck := discovery.NewHealthCheck(ctx, 5*time.Second, 1*time.Minute, ts, "cell1", "") c := &client{ primary: primary, healthCheck: healthCheck, @@ -307,10 +307,10 @@ func main() { }) log.Infof("start rate set to: %v", rate) - ts := memorytopo.NewServer("cell1") + ts := memorytopo.NewServer(context.Background(), "cell1") replica := newReplica(lagUpdateInterval, replicaDegrationInterval, replicaDegrationDuration, ts) primary := &primary{replica: replica} - client := newClient(primary, replica, ts) + client := newClient(context.Background(), primary, replica, ts) client.run() time.Sleep(duration) diff --git a/go/vt/throttler/manager_test.go b/go/vt/throttler/manager_test.go index 8c0e6ae4563..e6c3359b242 100644 --- a/go/vt/throttler/manager_test.go +++ b/go/vt/throttler/manager_test.go @@ -24,8 +24,6 @@ import ( "testing" "time" - "google.golang.org/protobuf/proto" - throttlerdatapb "vitess.io/vitess/go/vt/proto/throttlerdata" ) @@ -225,7 +223,7 @@ func TestManager_UpdateConfiguration_ZeroValues(t *testing.T) { defer f.tearDown() // Test the explicit copy of zero values. - zeroValueConfig := proto.Clone(defaultMaxReplicationLagModuleConfig.Configuration).(*throttlerdatapb.Configuration) + zeroValueConfig := defaultMaxReplicationLagModuleConfig.Configuration.CloneVT() zeroValueConfig.IgnoreNSlowestReplicas = 0 names, err := f.m.UpdateConfiguration("t2", zeroValueConfig, true /* copyZeroValues */) if err != nil { diff --git a/go/vt/throttler/max_replication_lag_module.go b/go/vt/throttler/max_replication_lag_module.go index e492764e443..f08c9211205 100644 --- a/go/vt/throttler/max_replication_lag_module.go +++ b/go/vt/throttler/max_replication_lag_module.go @@ -207,7 +207,7 @@ func (m *MaxReplicationLagModule) applyLatestConfig() { func (m *MaxReplicationLagModule) getConfiguration() *throttlerdatapb.Configuration { m.mutableConfigMu.Lock() defer m.mutableConfigMu.Unlock() - return proto.Clone(m.mutableConfig.Configuration).(*throttlerdatapb.Configuration) + return m.mutableConfig.Configuration.CloneVT() } func (m *MaxReplicationLagModule) updateConfiguration(configuration *throttlerdatapb.Configuration, copyZeroValues bool) error { @@ -217,7 +217,7 @@ func (m *MaxReplicationLagModule) updateConfiguration(configuration *throttlerda newConfig := m.mutableConfig if copyZeroValues { - newConfig.Configuration = proto.Clone(configuration).(*throttlerdatapb.Configuration) + newConfig.Configuration = configuration.CloneVT() } else { proto.Merge(newConfig.Configuration, configuration) } @@ -599,7 +599,7 @@ func (m *MaxReplicationLagModule) decreaseAndGuessRate(r *result, now time.Time, if replicationLagChange == equal { // The replication lag did not change. Keep going at the current rate. - r.Reason = fmt.Sprintf("did not decrease the rate because the lag did not change (assuming a 1s error margin)") // nolint + r.Reason = "did not decrease the rate because the lag did not change (assuming a 1s error margin)" return } diff --git a/go/vt/throttler/max_replication_lag_module_config.go b/go/vt/throttler/max_replication_lag_module_config.go index 775aa4639a4..e61909f57dc 100644 --- a/go/vt/throttler/max_replication_lag_module_config.go +++ b/go/vt/throttler/max_replication_lag_module_config.go @@ -20,8 +20,6 @@ import ( "fmt" "time" - "google.golang.org/protobuf/proto" - throttlerdatapb "vitess.io/vitess/go/vt/proto/throttlerdata" ) @@ -33,9 +31,7 @@ type MaxReplicationLagModuleConfig struct { } func (cfg MaxReplicationLagModuleConfig) Clone() MaxReplicationLagModuleConfig { - return MaxReplicationLagModuleConfig{ - proto.Clone(cfg.Configuration).(*throttlerdatapb.Configuration), - } + return MaxReplicationLagModuleConfig{cfg.Configuration.CloneVT()} } // Most of the values are based on the assumption that vttablet is started diff --git a/go/vt/throttler/throttlerlogz.go b/go/vt/throttler/throttlerlogz.go index 80ff09b1707..4023dcd7e68 100644 --- a/go/vt/throttler/throttlerlogz.go +++ b/go/vt/throttler/throttlerlogz.go @@ -20,11 +20,11 @@ import ( "fmt" "io" "net/http" + "slices" "strings" "time" "github.com/google/safehtml/template" - "golang.org/x/exp/slices" "vitess.io/vitess/go/vt/logz" "vitess.io/vitess/go/vt/servenv" diff --git a/go/vt/throttler/throttlerz.go b/go/vt/throttler/throttlerz.go index 42b9a18284f..84431aad62f 100644 --- a/go/vt/throttler/throttlerz.go +++ b/go/vt/throttler/throttlerz.go @@ -18,10 +18,10 @@ package throttler import ( "net/http" + "slices" "strings" "github.com/google/safehtml/template" - "golang.org/x/exp/slices" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/servenv" diff --git a/go/vt/tlstest/tlstest.go b/go/vt/tlstest/tlstest.go index 0529ea4ef09..ae560115e8d 100644 --- a/go/vt/tlstest/tlstest.go +++ b/go/vt/tlstest/tlstest.go @@ -348,8 +348,8 @@ func RevokeCertAndRegenerateCRL(root, parent, name string) { log.Fatal(err) } - revoked := crlList.RevokedCertificates - revoked = append(revoked, pkix.RevokedCertificate{ + revoked := crlList.RevokedCertificateEntries + revoked = append(revoked, x509.RevocationListEntry{ SerialNumber: certificate.SerialNumber, RevocationTime: time.Now(), }) @@ -365,8 +365,8 @@ func RevokeCertAndRegenerateCRL(root, parent, name string) { var crlNumber big.Int newCrl, err := x509.CreateRevocationList(rand.Reader, &x509.RevocationList{ - RevokedCertificates: revoked, - Number: crlNumber.Add(crlList.Number, big.NewInt(1)), + RevokedCertificateEntries: revoked, + Number: crlNumber.Add(crlList.Number, big.NewInt(1)), }, caCert, caKey.(crypto.Signer)) if err != nil { log.Fatal(err) diff --git a/go/vt/tlstest/tlstest_test.go b/go/vt/tlstest/tlstest_test.go index c12e65b8d88..5c79e45b906 100644 --- a/go/vt/tlstest/tlstest_test.go +++ b/go/vt/tlstest/tlstest_test.go @@ -162,7 +162,7 @@ func testClientServer(t *testing.T, combineCerts bool) { // With TLS 1.3, the Dial will succeed and the first Read will fail. clientConn, err := tls.DialWithDialer(dialer, "tcp", addr, badClientConfig) if err != nil { - if !strings.Contains(err.Error(), "bad certificate") { + if !strings.Contains(err.Error(), "certificate required") { t.Errorf("Wrong error returned: %v", err) } return @@ -177,7 +177,8 @@ func testClientServer(t *testing.T, combineCerts bool) { if err == nil { t.Fatalf("Dial or first Read was expected to fail") } - if !strings.Contains(err.Error(), "bad certificate") { + + if !strings.Contains(err.Error(), "certificate required") { t.Errorf("Wrong error returned: %v", err) } } diff --git a/go/vt/topo/cell_info.go b/go/vt/topo/cell_info.go index fd7a4a5249e..4a8112084cb 100644 --- a/go/vt/topo/cell_info.go +++ b/go/vt/topo/cell_info.go @@ -60,6 +60,9 @@ func (ts *Server) GetCellInfoNames(ctx context.Context) ([]string, error) { // GetCellInfo reads a CellInfo from the global Conn. func (ts *Server) GetCellInfo(ctx context.Context, cell string, strongRead bool) (*topodatapb.CellInfo, error) { conn := ts.globalCell + if ctx.Err() != nil { + return nil, ctx.Err() + } if !strongRead { conn = ts.globalReadOnlyCell } diff --git a/go/vt/topo/consultopo/server.go b/go/vt/topo/consultopo/server.go index 3e9192b0e46..a7a5446c274 100644 --- a/go/vt/topo/consultopo/server.go +++ b/go/vt/topo/consultopo/server.go @@ -21,7 +21,6 @@ package consultopo import ( "encoding/json" - "fmt" "os" "strings" "sync" @@ -90,7 +89,7 @@ func getClientCreds() (creds map[string]*ClientAuthCred, err error) { } if err := json.Unmarshal(data, &creds); err != nil { - err = vterrors.Wrapf(err, fmt.Sprintf("Error parsing consul_auth_static_file")) //nolint + err = vterrors.Wrapf(err, "Error parsing consul_auth_static_file") return creds, err } return creds, nil diff --git a/go/vt/topo/consultopo/server_flaky_test.go b/go/vt/topo/consultopo/server_flaky_test.go index 797ad4c955f..a987336dd01 100644 --- a/go/vt/topo/consultopo/server_flaky_test.go +++ b/go/vt/topo/consultopo/server_flaky_test.go @@ -144,7 +144,9 @@ func TestConsulTopo(t *testing.T) { // Run the TopoServerTestSuite tests. testIndex := 0 - test.TopoServerTestSuite(t, func() *topo.Server { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + test.TopoServerTestSuite(t, ctx, func() *topo.Server { // Each test will use its own sub-directories. testRoot := fmt.Sprintf("test-%v", testIndex) testIndex++ @@ -190,7 +192,9 @@ func TestConsulTopoWithChecks(t *testing.T) { // Run the TopoServerTestSuite tests. testIndex := 0 - test.TopoServerTestSuite(t, func() *topo.Server { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + test.TopoServerTestSuite(t, ctx, func() *topo.Server { // Each test will use its own sub-directories. testRoot := fmt.Sprintf("test-%v", testIndex) testIndex++ @@ -247,7 +251,9 @@ func TestConsulTopoWithAuth(t *testing.T) { t.Fatalf("couldn't write temp file: %v", err) } - test.TopoServerTestSuite(t, func() *topo.Server { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + test.TopoServerTestSuite(t, ctx, func() *topo.Server { // Each test will use its own sub-directories. testRoot := fmt.Sprintf("test-%v", testIndex) testIndex++ diff --git a/go/vt/topo/etcd2topo/server_test.go b/go/vt/topo/etcd2topo/server_test.go index 2e6853d8458..116205bd93a 100644 --- a/go/vt/topo/etcd2topo/server_test.go +++ b/go/vt/topo/etcd2topo/server_test.go @@ -245,7 +245,9 @@ func TestEtcd2Topo(t *testing.T) { } // Run the TopoServerTestSuite tests. - test.TopoServerTestSuite(t, func() *topo.Server { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + test.TopoServerTestSuite(t, ctx, func() *topo.Server { return newServer() }, []string{}) diff --git a/go/vt/topo/faketopo/faketopo.go b/go/vt/topo/faketopo/faketopo.go index 9265ba699a3..8601d28f5b6 100644 --- a/go/vt/topo/faketopo/faketopo.go +++ b/go/vt/topo/faketopo/faketopo.go @@ -340,13 +340,13 @@ func (f *FakeConn) Close() { } // NewFakeTopoServer creates a new fake topo server -func NewFakeTopoServer(factory *FakeFactory) *topo.Server { +func NewFakeTopoServer(ctx context.Context, factory *FakeFactory) *topo.Server { ts, err := topo.NewWithFactory(factory, "" /*serverAddress*/, "" /*root*/) if err != nil { log.Exitf("topo.NewWithFactory() failed: %v", err) } for cell := range factory.cells { - if err := ts.CreateCellInfo(context.Background(), cell, &topodatapb.CellInfo{}); err != nil { + if err := ts.CreateCellInfo(ctx, cell, &topodatapb.CellInfo{}); err != nil { log.Exitf("ts.CreateCellInfo(%v) failed: %v", cell, err) } } diff --git a/go/vt/topo/helpers/copy.go b/go/vt/topo/helpers/copy.go index f0ae912243b..0df706eba31 100644 --- a/go/vt/topo/helpers/copy.go +++ b/go/vt/topo/helpers/copy.go @@ -20,28 +20,30 @@ package helpers import ( "context" + "fmt" "google.golang.org/protobuf/proto" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtgate/vindexes" topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) // CopyKeyspaces will create the keyspaces in the destination topo. -func CopyKeyspaces(ctx context.Context, fromTS, toTS *topo.Server) { +func CopyKeyspaces(ctx context.Context, fromTS, toTS *topo.Server) error { keyspaces, err := fromTS.GetKeyspaces(ctx) if err != nil { - log.Fatalf("GetKeyspaces: %v", err) + return fmt.Errorf("GetKeyspaces: %w", err) } for _, keyspace := range keyspaces { ki, err := fromTS.GetKeyspace(ctx, keyspace) if err != nil { - log.Fatalf("GetKeyspace(%v): %v", keyspace, err) + return fmt.Errorf("GetKeyspace(%v): %w", keyspace, err) } if err := toTS.CreateKeyspace(ctx, keyspace, ki.Keyspace); err != nil { @@ -55,6 +57,11 @@ func CopyKeyspaces(ctx context.Context, fromTS, toTS *topo.Server) { vs, err := fromTS.GetVSchema(ctx, keyspace) switch { case err == nil: + _, err = vindexes.BuildKeyspace(vs) + if err != nil { + log.Errorf("BuildKeyspace(%v): %v", keyspace, err) + break + } if err := toTS.SaveVSchema(ctx, keyspace, vs); err != nil { log.Errorf("SaveVSchema(%v): %v", keyspace, err) } @@ -64,64 +71,67 @@ func CopyKeyspaces(ctx context.Context, fromTS, toTS *topo.Server) { log.Errorf("GetVSchema(%v): %v", keyspace, err) } } + + return nil } // CopyShards will create the shards in the destination topo. -func CopyShards(ctx context.Context, fromTS, toTS *topo.Server) { +func CopyShards(ctx context.Context, fromTS, toTS *topo.Server) error { keyspaces, err := fromTS.GetKeyspaces(ctx) if err != nil { - log.Fatalf("fromTS.GetKeyspaces: %v", err) + return fmt.Errorf("fromTS.GetKeyspaces: %w", err) } for _, keyspace := range keyspaces { shards, err := fromTS.GetShardNames(ctx, keyspace) if err != nil { - log.Fatalf("GetShardNames(%v): %v", keyspace, err) - return + return fmt.Errorf("GetShardNames(%v): %w", keyspace, err) } for _, shard := range shards { si, err := fromTS.GetShard(ctx, keyspace, shard) if err != nil { - log.Fatalf("GetShard(%v, %v): %v", keyspace, shard, err) + return fmt.Errorf("GetShard(%v, %v): %w", keyspace, shard, err) } if err := toTS.CreateShard(ctx, keyspace, shard); err != nil { if topo.IsErrType(err, topo.NodeExists) { log.Warningf("shard %v/%v already exists", keyspace, shard) } else { - log.Fatalf("CreateShard(%v, %v): %v", keyspace, shard, err) + return fmt.Errorf("CreateShard(%v, %v): %w", keyspace, shard, err) } } if _, err := toTS.UpdateShardFields(ctx, keyspace, shard, func(toSI *topo.ShardInfo) error { - toSI.Shard = proto.Clone(si.Shard).(*topodatapb.Shard) + toSI.Shard = si.Shard.CloneVT() return nil }); err != nil { - log.Fatalf("UpdateShardFields(%v, %v): %v", keyspace, shard, err) + return fmt.Errorf("UpdateShardFields(%v, %v): %w", keyspace, shard, err) } } } + + return nil } // CopyTablets will create the tablets in the destination topo. -func CopyTablets(ctx context.Context, fromTS, toTS *topo.Server) { +func CopyTablets(ctx context.Context, fromTS, toTS *topo.Server) error { cells, err := fromTS.GetKnownCells(ctx) if err != nil { - log.Fatalf("fromTS.GetKnownCells: %v", err) + return fmt.Errorf("fromTS.GetKnownCells: %w", err) } for _, cell := range cells { tabletAliases, err := fromTS.GetTabletAliasesByCell(ctx, cell) if err != nil { - log.Fatalf("GetTabletsByCell(%v): %v", cell, err) + return fmt.Errorf("GetTabletsByCell(%v): %w", cell, err) } else { for _, tabletAlias := range tabletAliases { // read the source tablet ti, err := fromTS.GetTablet(ctx, tabletAlias) if err != nil { - log.Fatalf("GetTablet(%v): %v", tabletAlias, err) + return fmt.Errorf("GetTablet(%v): %w", tabletAlias, err) } // try to create the destination @@ -135,37 +145,39 @@ func CopyTablets(ctx context.Context, fromTS, toTS *topo.Server) { }) } if err != nil { - log.Fatalf("CreateTablet(%v): %v", tabletAlias, err) + return fmt.Errorf("CreateTablet(%v): %w", tabletAlias, err) } } } } + + return nil } // CopyShardReplications will create the ShardReplication objects in // the destination topo. -func CopyShardReplications(ctx context.Context, fromTS, toTS *topo.Server) { +func CopyShardReplications(ctx context.Context, fromTS, toTS *topo.Server) error { keyspaces, err := fromTS.GetKeyspaces(ctx) if err != nil { - log.Fatalf("fromTS.GetKeyspaces: %v", err) + return fmt.Errorf("fromTS.GetKeyspaces: %w", err) } cells, err := fromTS.GetCellInfoNames(ctx) if err != nil { - log.Fatalf("GetCellInfoNames(): %v", err) + return fmt.Errorf("GetCellInfoNames(): %w", err) } for _, keyspace := range keyspaces { shards, err := fromTS.GetShardNames(ctx, keyspace) if err != nil { - log.Fatalf("GetShardNames(%v): %v", keyspace, err) + return fmt.Errorf("GetShardNames(%v): %w", keyspace, err) } for _, shard := range shards { for _, cell := range cells { sri, err := fromTS.GetShardReplication(ctx, cell, keyspace, shard) if err != nil { - log.Fatalf("GetShardReplication(%v, %v, %v): %v", cell, keyspace, shard, err) + return fmt.Errorf("GetShardReplication(%v, %v, %v): %w", cell, keyspace, shard, err) } sriNodes := map[string]struct{}{} @@ -196,15 +208,19 @@ func CopyShardReplications(ctx context.Context, fromTS, toTS *topo.Server) { } } } + + return nil } // CopyRoutingRules will create the routing rules in the destination topo. -func CopyRoutingRules(ctx context.Context, fromTS, toTS *topo.Server) { +func CopyRoutingRules(ctx context.Context, fromTS, toTS *topo.Server) error { rr, err := fromTS.GetRoutingRules(ctx) if err != nil { - log.Fatalf("GetRoutingRules: %v", err) + return fmt.Errorf("GetRoutingRules: %w", err) } if err := toTS.SaveRoutingRules(ctx, rr); err != nil { log.Errorf("SaveRoutingRules(%v): %v", rr, err) } + + return nil } diff --git a/go/vt/topo/helpers/copy_test.go b/go/vt/topo/helpers/copy_test.go index 73ecfabf66b..2086a2e6552 100644 --- a/go/vt/topo/helpers/copy_test.go +++ b/go/vt/topo/helpers/copy_test.go @@ -17,12 +17,11 @@ limitations under the License. package helpers import ( + "context" "testing" "github.com/stretchr/testify/require" - "context" - "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" @@ -34,8 +33,8 @@ func createSetup(ctx context.Context, t *testing.T) (*topo.Server, *topo.Server) // Create a source and destination TS. They will have // different generations, so we test using the Version for // both works as expected. - fromTS := memorytopo.NewServer("test_cell") - toTS := memorytopo.NewServer("test_cell") + fromTS := memorytopo.NewServer(ctx, "test_cell") + toTS := memorytopo.NewServer(ctx, "test_cell") // create a keyspace and a couple tablets if err := fromTS.CreateKeyspace(ctx, "test_keyspace", &topodatapb.Keyspace{}); err != nil { diff --git a/go/vt/topo/helpers/tee_topo_test.go b/go/vt/topo/helpers/tee_topo_test.go index 519301eaafa..8a4c5690846 100644 --- a/go/vt/topo/helpers/tee_topo_test.go +++ b/go/vt/topo/helpers/tee_topo_test.go @@ -19,15 +19,17 @@ package helpers import ( "testing" + "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/test" ) func TestTeeTopo(t *testing.T) { - test.TopoServerTestSuite(t, func() *topo.Server { - s1 := memorytopo.NewServer(test.LocalCellName) - s2 := memorytopo.NewServer(test.LocalCellName) + ctx := utils.LeakCheckContext(t) + test.TopoServerTestSuite(t, ctx, func() *topo.Server { + s1 := memorytopo.NewServer(ctx, test.LocalCellName) + s2 := memorytopo.NewServer(ctx, test.LocalCellName) tee, err := NewTee(s1, s2, false) if err != nil { t.Fatalf("NewTee() failed: %v", err) diff --git a/go/vt/topo/k8stopo/VitessTopoNodes-crd.yaml b/go/vt/topo/k8stopo/VitessTopoNodes-crd.yaml deleted file mode 100644 index 44e89925817..00000000000 --- a/go/vt/topo/k8stopo/VitessTopoNodes-crd.yaml +++ /dev/null @@ -1,43 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: vitesstoponodes.topo.vitess.io -spec: - group: topo.vitess.io - versions: - - name: v1beta1 - served: true - storage: true - additionalPrinterColumns: - - name: Key - type: string - description: The full key path - jsonPath: .data.key - schema: - openAPIV3Schema: - type: object - required: - - data - properties: - data: - type: object - required: - - key - - value - properties: - key: - description: A file-path like key. Must be an absolute path. Must not end with a /. - type: string - pattern: '^\/.+[^\/]$' - value: - description: A base64 encoded value. Must be a base64 encoded string or empty string. - type: string - pattern: "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$" - ephemeral: - description: Whether or not the node is considered ephemeral. True for lock and election nodes. - type: boolean - scope: Namespaced - names: - plural: vitesstoponodes - singular: vitesstoponode - kind: VitessTopoNode diff --git a/go/vt/topo/k8stopo/apis/topo/v1beta1/doc.go b/go/vt/topo/k8stopo/apis/topo/v1beta1/doc.go deleted file mode 100644 index e2be94ae46e..00000000000 --- a/go/vt/topo/k8stopo/apis/topo/v1beta1/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// +k8s:deepcopy-gen=package -// +k8s:defaulter-gen=TypeMeta -// +groupName=topo.vitess.io - -package v1beta1 diff --git a/go/vt/topo/k8stopo/apis/topo/v1beta1/register.go b/go/vt/topo/k8stopo/apis/topo/v1beta1/register.go deleted file mode 100644 index 49a9ee9a2a5..00000000000 --- a/go/vt/topo/k8stopo/apis/topo/v1beta1/register.go +++ /dev/null @@ -1,38 +0,0 @@ -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -var SchemeGroupVersion = schema.GroupVersion{Group: "topo.vitess.io", Version: "v1beta1"} - -var ( - SchemeBuilder runtime.SchemeBuilder - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -func init() { - localSchemeBuilder.Register(addKnownTypes) -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &VitessTopoNode{}, - &VitessTopoNodeList{}, - ) - - scheme.AddKnownTypes(SchemeGroupVersion, - &metav1.Status{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/go/vt/topo/k8stopo/apis/topo/v1beta1/types.go b/go/vt/topo/k8stopo/apis/topo/v1beta1/types.go deleted file mode 100644 index 48d48001d64..00000000000 --- a/go/vt/topo/k8stopo/apis/topo/v1beta1/types.go +++ /dev/null @@ -1,32 +0,0 @@ -package v1beta1 - -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// VitessTopoNode is a container for Vitess topology data -type VitessTopoNode struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - Data VitessTopoNodeData `json:"data"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// VitessTopoNodeList is a top-level list type. The client methods for lists are automatically created. -type VitessTopoNodeList struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - - Items []VitessTopoNode `json:"items"` -} - -// VitessTopoNodeData contains the basic data for the node -type VitessTopoNodeData struct { - Key string `json:"key"` - Value string `json:"value"` - Ephemeral bool `json:"ephemeral"` -} diff --git a/go/vt/topo/k8stopo/apis/topo/v1beta1/zz_generated.deepcopy.go b/go/vt/topo/k8stopo/apis/topo/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index b9fd77141c0..00000000000 --- a/go/vt/topo/k8stopo/apis/topo/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,102 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1beta1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VitessTopoNode) DeepCopyInto(out *VitessTopoNode) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Data = in.Data - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VitessTopoNode. -func (in *VitessTopoNode) DeepCopy() *VitessTopoNode { - if in == nil { - return nil - } - out := new(VitessTopoNode) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VitessTopoNode) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VitessTopoNodeData) DeepCopyInto(out *VitessTopoNodeData) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VitessTopoNodeData. -func (in *VitessTopoNodeData) DeepCopy() *VitessTopoNodeData { - if in == nil { - return nil - } - out := new(VitessTopoNodeData) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VitessTopoNodeList) DeepCopyInto(out *VitessTopoNodeList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]VitessTopoNode, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VitessTopoNodeList. -func (in *VitessTopoNodeList) DeepCopy() *VitessTopoNodeList { - if in == nil { - return nil - } - out := new(VitessTopoNodeList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VitessTopoNodeList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} diff --git a/go/vt/topo/k8stopo/client/clientset/versioned/clientset.go b/go/vt/topo/k8stopo/client/clientset/versioned/clientset.go deleted file mode 100644 index 83ad7c4c839..00000000000 --- a/go/vt/topo/k8stopo/client/clientset/versioned/clientset.go +++ /dev/null @@ -1,98 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package versioned - -import ( - "fmt" - - discovery "k8s.io/client-go/discovery" - rest "k8s.io/client-go/rest" - flowcontrol "k8s.io/client-go/util/flowcontrol" - - topov1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1" -) - -type Interface interface { - Discovery() discovery.DiscoveryInterface - TopoV1beta1() topov1beta1.TopoV1beta1Interface -} - -// Clientset contains the clients for groups. Each group has exactly one -// version included in a Clientset. -type Clientset struct { - *discovery.DiscoveryClient - topoV1beta1 *topov1beta1.TopoV1beta1Client -} - -// TopoV1beta1 retrieves the TopoV1beta1Client -func (c *Clientset) TopoV1beta1() topov1beta1.TopoV1beta1Interface { - return c.topoV1beta1 -} - -// Discovery retrieves the DiscoveryClient -func (c *Clientset) Discovery() discovery.DiscoveryInterface { - if c == nil { - return nil - } - return c.DiscoveryClient -} - -// NewForConfig creates a new Clientset for the given config. -// If config's RateLimiter is not set and QPS and Burst are acceptable, -// NewForConfig will generate a rate-limiter in configShallowCopy. -func NewForConfig(c *rest.Config) (*Clientset, error) { - configShallowCopy := *c - if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { - if configShallowCopy.Burst <= 0 { - return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") - } - configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) - } - var cs Clientset - var err error - cs.topoV1beta1, err = topov1beta1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - - cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - return &cs, nil -} - -// NewForConfigOrDie creates a new Clientset for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *Clientset { - var cs Clientset - cs.topoV1beta1 = topov1beta1.NewForConfigOrDie(c) - - cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) - return &cs -} - -// New creates a new Clientset for the given RESTClient. -func New(c rest.Interface) *Clientset { - var cs Clientset - cs.topoV1beta1 = topov1beta1.New(c) - - cs.DiscoveryClient = discovery.NewDiscoveryClient(c) - return &cs -} diff --git a/go/vt/topo/k8stopo/client/clientset/versioned/doc.go b/go/vt/topo/k8stopo/client/clientset/versioned/doc.go deleted file mode 100644 index 499efc0b13e..00000000000 --- a/go/vt/topo/k8stopo/client/clientset/versioned/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated clientset. -package versioned diff --git a/go/vt/topo/k8stopo/client/clientset/versioned/fake/clientset_generated.go b/go/vt/topo/k8stopo/client/clientset/versioned/fake/clientset_generated.go deleted file mode 100644 index 532eab2b2a2..00000000000 --- a/go/vt/topo/k8stopo/client/clientset/versioned/fake/clientset_generated.go +++ /dev/null @@ -1,83 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/discovery" - fakediscovery "k8s.io/client-go/discovery/fake" - "k8s.io/client-go/testing" - - clientset "vitess.io/vitess/go/vt/topo/k8stopo/client/clientset/versioned" - topov1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1" - faketopov1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/fake" -) - -// NewSimpleClientset returns a clientset that will respond with the provided objects. -// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, -// without applying any validations and/or defaults. It shouldn't be considered a replacement -// for a real clientset and is mostly useful in simple unit tests. -func NewSimpleClientset(objects ...runtime.Object) *Clientset { - o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) - for _, obj := range objects { - if err := o.Add(obj); err != nil { - panic(err) - } - } - - cs := &Clientset{tracker: o} - cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} - cs.AddReactor("*", "*", testing.ObjectReaction(o)) - cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { - gvr := action.GetResource() - ns := action.GetNamespace() - watch, err := o.Watch(gvr, ns) - if err != nil { - return false, nil, err - } - return true, watch, nil - }) - - return cs -} - -// Clientset implements clientset.Interface. Meant to be embedded into a -// struct to get a default implementation. This makes faking out just the method -// you want to test easier. -type Clientset struct { - testing.Fake - discovery *fakediscovery.FakeDiscovery - tracker testing.ObjectTracker -} - -func (c *Clientset) Discovery() discovery.DiscoveryInterface { - return c.discovery -} - -func (c *Clientset) Tracker() testing.ObjectTracker { - return c.tracker -} - -var _ clientset.Interface = &Clientset{} - -// TopoV1beta1 retrieves the TopoV1beta1Client -func (c *Clientset) TopoV1beta1() topov1beta1.TopoV1beta1Interface { - return &faketopov1beta1.FakeTopoV1beta1{Fake: &c.Fake} -} diff --git a/go/vt/topo/k8stopo/client/clientset/versioned/fake/doc.go b/go/vt/topo/k8stopo/client/clientset/versioned/fake/doc.go deleted file mode 100644 index 97b3e5c56db..00000000000 --- a/go/vt/topo/k8stopo/client/clientset/versioned/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated fake clientset. -package fake diff --git a/go/vt/topo/k8stopo/client/clientset/versioned/fake/register.go b/go/vt/topo/k8stopo/client/clientset/versioned/fake/register.go deleted file mode 100644 index fea362af64c..00000000000 --- a/go/vt/topo/k8stopo/client/clientset/versioned/fake/register.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - - topov1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/apis/topo/v1beta1" -) - -var scheme = runtime.NewScheme() -var codecs = serializer.NewCodecFactory(scheme) -var parameterCodec = runtime.NewParameterCodec(scheme) -var localSchemeBuilder = runtime.SchemeBuilder{ - topov1beta1.AddToScheme, -} - -// AddToScheme adds all types of this clientset into the given scheme. This allows composition -// of clientsets, like in: -// -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) -// -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) -// -// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types -// correctly. -var AddToScheme = localSchemeBuilder.AddToScheme - -func init() { - v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) - utilruntime.Must(AddToScheme(scheme)) -} diff --git a/go/vt/topo/k8stopo/client/clientset/versioned/scheme/doc.go b/go/vt/topo/k8stopo/client/clientset/versioned/scheme/doc.go deleted file mode 100644 index 35280ae27c6..00000000000 --- a/go/vt/topo/k8stopo/client/clientset/versioned/scheme/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package contains the scheme of the automatically generated clientset. -package scheme diff --git a/go/vt/topo/k8stopo/client/clientset/versioned/scheme/register.go b/go/vt/topo/k8stopo/client/clientset/versioned/scheme/register.go deleted file mode 100644 index 7a9084ecb67..00000000000 --- a/go/vt/topo/k8stopo/client/clientset/versioned/scheme/register.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package scheme - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - - topov1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/apis/topo/v1beta1" -) - -var Scheme = runtime.NewScheme() -var Codecs = serializer.NewCodecFactory(Scheme) -var ParameterCodec = runtime.NewParameterCodec(Scheme) -var localSchemeBuilder = runtime.SchemeBuilder{ - topov1beta1.AddToScheme, -} - -// AddToScheme adds all types of this clientset into the given scheme. This allows composition -// of clientsets, like in: -// -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) -// -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) -// -// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types -// correctly. -var AddToScheme = localSchemeBuilder.AddToScheme - -func init() { - v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) - utilruntime.Must(AddToScheme(Scheme)) -} diff --git a/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/doc.go b/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/doc.go deleted file mode 100644 index 4911ad316da..00000000000 --- a/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1beta1 diff --git a/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/fake/doc.go b/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/fake/doc.go deleted file mode 100644 index e8f15d37732..00000000000 --- a/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/fake/fake_topo_client.go b/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/fake/fake_topo_client.go deleted file mode 100644 index 76c43f0d8dd..00000000000 --- a/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/fake/fake_topo_client.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" - - v1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1" -) - -type FakeTopoV1beta1 struct { - *testing.Fake -} - -func (c *FakeTopoV1beta1) VitessTopoNodes(namespace string) v1beta1.VitessTopoNodeInterface { - return &FakeVitessTopoNodes{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeTopoV1beta1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/fake/fake_vitesstoponode.go b/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/fake/fake_vitesstoponode.go deleted file mode 100644 index 45841625eb9..00000000000 --- a/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/fake/fake_vitesstoponode.go +++ /dev/null @@ -1,131 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" - - v1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/apis/topo/v1beta1" -) - -// FakeVitessTopoNodes implements VitessTopoNodeInterface -type FakeVitessTopoNodes struct { - Fake *FakeTopoV1beta1 - ns string -} - -var vitesstoponodesResource = schema.GroupVersionResource{Group: "topo.vitess.io", Version: "v1beta1", Resource: "vitesstoponodes"} - -var vitesstoponodesKind = schema.GroupVersionKind{Group: "topo.vitess.io", Version: "v1beta1", Kind: "VitessTopoNode"} - -// Get takes name of the vitessTopoNode, and returns the corresponding vitessTopoNode object, and an error if there is any. -func (c *FakeVitessTopoNodes) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.VitessTopoNode, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(vitesstoponodesResource, c.ns, name), &v1beta1.VitessTopoNode{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.VitessTopoNode), err -} - -// List takes label and field selectors, and returns the list of VitessTopoNodes that match those selectors. -func (c *FakeVitessTopoNodes) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.VitessTopoNodeList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(vitesstoponodesResource, vitesstoponodesKind, c.ns, opts), &v1beta1.VitessTopoNodeList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.VitessTopoNodeList{ListMeta: obj.(*v1beta1.VitessTopoNodeList).ListMeta} - for _, item := range obj.(*v1beta1.VitessTopoNodeList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested vitessTopoNodes. -func (c *FakeVitessTopoNodes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(vitesstoponodesResource, c.ns, opts)) - -} - -// Create takes the representation of a vitessTopoNode and creates it. Returns the server's representation of the vitessTopoNode, and an error, if there is any. -func (c *FakeVitessTopoNodes) Create(ctx context.Context, vitessTopoNode *v1beta1.VitessTopoNode, opts v1.CreateOptions) (result *v1beta1.VitessTopoNode, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(vitesstoponodesResource, c.ns, vitessTopoNode), &v1beta1.VitessTopoNode{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.VitessTopoNode), err -} - -// Update takes the representation of a vitessTopoNode and updates it. Returns the server's representation of the vitessTopoNode, and an error, if there is any. -func (c *FakeVitessTopoNodes) Update(ctx context.Context, vitessTopoNode *v1beta1.VitessTopoNode, opts v1.UpdateOptions) (result *v1beta1.VitessTopoNode, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(vitesstoponodesResource, c.ns, vitessTopoNode), &v1beta1.VitessTopoNode{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.VitessTopoNode), err -} - -// Delete takes name of the vitessTopoNode and deletes it. Returns an error if one occurs. -func (c *FakeVitessTopoNodes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(vitesstoponodesResource, c.ns, name), &v1beta1.VitessTopoNode{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeVitessTopoNodes) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(vitesstoponodesResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.VitessTopoNodeList{}) - return err -} - -// Patch applies the patch and returns the patched vitessTopoNode. -func (c *FakeVitessTopoNodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VitessTopoNode, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(vitesstoponodesResource, c.ns, name, pt, data, subresources...), &v1beta1.VitessTopoNode{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.VitessTopoNode), err -} diff --git a/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/generated_expansion.go b/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/generated_expansion.go deleted file mode 100644 index 1219ba9ee0c..00000000000 --- a/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/generated_expansion.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -type VitessTopoNodeExpansion any diff --git a/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/topo_client.go b/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/topo_client.go deleted file mode 100644 index 55b78f6fd17..00000000000 --- a/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/topo_client.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - rest "k8s.io/client-go/rest" - - v1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/apis/topo/v1beta1" - "vitess.io/vitess/go/vt/topo/k8stopo/client/clientset/versioned/scheme" -) - -type TopoV1beta1Interface interface { - RESTClient() rest.Interface - VitessTopoNodesGetter -} - -// TopoV1beta1Client is used to interact with features provided by the topo.vitess.io group. -type TopoV1beta1Client struct { - restClient rest.Interface -} - -func (c *TopoV1beta1Client) VitessTopoNodes(namespace string) VitessTopoNodeInterface { - return newVitessTopoNodes(c, namespace) -} - -// NewForConfig creates a new TopoV1beta1Client for the given config. -func NewForConfig(c *rest.Config) (*TopoV1beta1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &TopoV1beta1Client{client}, nil -} - -// NewForConfigOrDie creates a new TopoV1beta1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *TopoV1beta1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new TopoV1beta1Client for the given RESTClient. -func New(c rest.Interface) *TopoV1beta1Client { - return &TopoV1beta1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *TopoV1beta1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/vitesstoponode.go b/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/vitesstoponode.go deleted file mode 100644 index 7b458f7dfee..00000000000 --- a/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/vitesstoponode.go +++ /dev/null @@ -1,179 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "context" - "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - - v1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/apis/topo/v1beta1" - scheme "vitess.io/vitess/go/vt/topo/k8stopo/client/clientset/versioned/scheme" -) - -// VitessTopoNodesGetter has a method to return a VitessTopoNodeInterface. -// A group's client should implement this interface. -type VitessTopoNodesGetter interface { - VitessTopoNodes(namespace string) VitessTopoNodeInterface -} - -// VitessTopoNodeInterface has methods to work with VitessTopoNode resources. -type VitessTopoNodeInterface interface { - Create(ctx context.Context, vitessTopoNode *v1beta1.VitessTopoNode, opts v1.CreateOptions) (*v1beta1.VitessTopoNode, error) - Update(ctx context.Context, vitessTopoNode *v1beta1.VitessTopoNode, opts v1.UpdateOptions) (*v1beta1.VitessTopoNode, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.VitessTopoNode, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.VitessTopoNodeList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VitessTopoNode, err error) - VitessTopoNodeExpansion -} - -// vitessTopoNodes implements VitessTopoNodeInterface -type vitessTopoNodes struct { - client rest.Interface - ns string -} - -// newVitessTopoNodes returns a VitessTopoNodes -func newVitessTopoNodes(c *TopoV1beta1Client, namespace string) *vitessTopoNodes { - return &vitessTopoNodes{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the vitessTopoNode, and returns the corresponding vitessTopoNode object, and an error if there is any. -func (c *vitessTopoNodes) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.VitessTopoNode, err error) { - result = &v1beta1.VitessTopoNode{} - err = c.client.Get(). - Namespace(c.ns). - Resource("vitesstoponodes"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of VitessTopoNodes that match those selectors. -func (c *vitessTopoNodes) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.VitessTopoNodeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.VitessTopoNodeList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("vitesstoponodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested vitessTopoNodes. -func (c *vitessTopoNodes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("vitesstoponodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a vitessTopoNode and creates it. Returns the server's representation of the vitessTopoNode, and an error, if there is any. -func (c *vitessTopoNodes) Create(ctx context.Context, vitessTopoNode *v1beta1.VitessTopoNode, opts v1.CreateOptions) (result *v1beta1.VitessTopoNode, err error) { - result = &v1beta1.VitessTopoNode{} - err = c.client.Post(). - Namespace(c.ns). - Resource("vitesstoponodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(vitessTopoNode). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a vitessTopoNode and updates it. Returns the server's representation of the vitessTopoNode, and an error, if there is any. -func (c *vitessTopoNodes) Update(ctx context.Context, vitessTopoNode *v1beta1.VitessTopoNode, opts v1.UpdateOptions) (result *v1beta1.VitessTopoNode, err error) { - result = &v1beta1.VitessTopoNode{} - err = c.client.Put(). - Namespace(c.ns). - Resource("vitesstoponodes"). - Name(vitessTopoNode.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(vitessTopoNode). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the vitessTopoNode and deletes it. Returns an error if one occurs. -func (c *vitessTopoNodes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("vitesstoponodes"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *vitessTopoNodes) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("vitesstoponodes"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched vitessTopoNode. -func (c *vitessTopoNodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VitessTopoNode, err error) { - result = &v1beta1.VitessTopoNode{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("vitesstoponodes"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/go/vt/topo/k8stopo/client/informers/externalversions/factory.go b/go/vt/topo/k8stopo/client/informers/externalversions/factory.go deleted file mode 100644 index 5c5886daedd..00000000000 --- a/go/vt/topo/k8stopo/client/informers/externalversions/factory.go +++ /dev/null @@ -1,181 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package externalversions - -import ( - reflect "reflect" - sync "sync" - time "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - cache "k8s.io/client-go/tools/cache" - - versioned "vitess.io/vitess/go/vt/topo/k8stopo/client/clientset/versioned" - internalinterfaces "vitess.io/vitess/go/vt/topo/k8stopo/client/informers/externalversions/internalinterfaces" - topo "vitess.io/vitess/go/vt/topo/k8stopo/client/informers/externalversions/topo" -) - -// SharedInformerOption defines the functional option type for SharedInformerFactory. -type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory - -type sharedInformerFactory struct { - client versioned.Interface - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc - lock sync.Mutex - defaultResync time.Duration - customResync map[reflect.Type]time.Duration - - informers map[reflect.Type]cache.SharedIndexInformer - // startedInformers is used for tracking which informers have been started. - // This allows Start() to be called multiple times safely. - startedInformers map[reflect.Type]bool -} - -// WithCustomResyncConfig sets a custom resync period for the specified informer types. -func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { - return func(factory *sharedInformerFactory) *sharedInformerFactory { - for k, v := range resyncConfig { - factory.customResync[reflect.TypeOf(k)] = v - } - return factory - } -} - -// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. -func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { - return func(factory *sharedInformerFactory) *sharedInformerFactory { - factory.tweakListOptions = tweakListOptions - return factory - } -} - -// WithNamespace limits the SharedInformerFactory to the specified namespace. -func WithNamespace(namespace string) SharedInformerOption { - return func(factory *sharedInformerFactory) *sharedInformerFactory { - factory.namespace = namespace - return factory - } -} - -// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. -func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { - return NewSharedInformerFactoryWithOptions(client, defaultResync) -} - -// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. -// Listers obtained via this SharedInformerFactory will be subject to the same filters -// as specified here. -// Deprecated: Please use NewSharedInformerFactoryWithOptions instead -func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { - return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) -} - -// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. -func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { - factory := &sharedInformerFactory{ - client: client, - namespace: v1.NamespaceAll, - defaultResync: defaultResync, - informers: make(map[reflect.Type]cache.SharedIndexInformer), - startedInformers: make(map[reflect.Type]bool), - customResync: make(map[reflect.Type]time.Duration), - } - - // Apply all options - for _, opt := range options { - factory = opt(factory) - } - - return factory -} - -// Start initializes all requested informers. -func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { - f.lock.Lock() - defer f.lock.Unlock() - - for informerType, informer := range f.informers { - if !f.startedInformers[informerType] { - go informer.Run(stopCh) - f.startedInformers[informerType] = true - } - } -} - -// WaitForCacheSync waits for all started informers' cache were synced. -func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { - informers := func() map[reflect.Type]cache.SharedIndexInformer { - f.lock.Lock() - defer f.lock.Unlock() - - informers := map[reflect.Type]cache.SharedIndexInformer{} - for informerType, informer := range f.informers { - if f.startedInformers[informerType] { - informers[informerType] = informer - } - } - return informers - }() - - res := map[reflect.Type]bool{} - for informType, informer := range informers { - res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) - } - return res -} - -// InternalInformerFor returns the SharedIndexInformer for obj using an internal -// client. -func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { - f.lock.Lock() - defer f.lock.Unlock() - - informerType := reflect.TypeOf(obj) - informer, exists := f.informers[informerType] - if exists { - return informer - } - - resyncPeriod, exists := f.customResync[informerType] - if !exists { - resyncPeriod = f.defaultResync - } - - informer = newFunc(f.client, resyncPeriod) - f.informers[informerType] = informer - - return informer -} - -// SharedInformerFactory provides shared informers for resources in all known -// API group versions. -type SharedInformerFactory interface { - internalinterfaces.SharedInformerFactory - ForResource(resource schema.GroupVersionResource) (GenericInformer, error) - WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool - - Topo() topo.Interface -} - -func (f *sharedInformerFactory) Topo() topo.Interface { - return topo.New(f, f.namespace, f.tweakListOptions) -} diff --git a/go/vt/topo/k8stopo/client/informers/externalversions/generic.go b/go/vt/topo/k8stopo/client/informers/externalversions/generic.go deleted file mode 100644 index 8064882a6bd..00000000000 --- a/go/vt/topo/k8stopo/client/informers/externalversions/generic.go +++ /dev/null @@ -1,63 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package externalversions - -import ( - "fmt" - - schema "k8s.io/apimachinery/pkg/runtime/schema" - cache "k8s.io/client-go/tools/cache" - - v1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/apis/topo/v1beta1" -) - -// GenericInformer is type of SharedIndexInformer which will locate and delegate to other -// sharedInformers based on type -type GenericInformer interface { - Informer() cache.SharedIndexInformer - Lister() cache.GenericLister -} - -type genericInformer struct { - informer cache.SharedIndexInformer - resource schema.GroupResource -} - -// Informer returns the SharedIndexInformer. -func (f *genericInformer) Informer() cache.SharedIndexInformer { - return f.informer -} - -// Lister returns the GenericLister. -func (f *genericInformer) Lister() cache.GenericLister { - return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) -} - -// ForResource gives generic access to a shared informer of the matching type -// TODO extend this to unknown resources with a client pool -func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { - switch resource { - // Group=topo.vitess.io, Version=v1beta1 - case v1beta1.SchemeGroupVersion.WithResource("vitesstoponodes"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Topo().V1beta1().VitessTopoNodes().Informer()}, nil - - } - - return nil, fmt.Errorf("no informer found for %v", resource) -} diff --git a/go/vt/topo/k8stopo/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/go/vt/topo/k8stopo/client/informers/externalversions/internalinterfaces/factory_interfaces.go deleted file mode 100644 index ca90aa8983f..00000000000 --- a/go/vt/topo/k8stopo/client/informers/externalversions/internalinterfaces/factory_interfaces.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package internalinterfaces - -import ( - time "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - cache "k8s.io/client-go/tools/cache" - - versioned "vitess.io/vitess/go/vt/topo/k8stopo/client/clientset/versioned" -) - -// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. -type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer - -// SharedInformerFactory a small interface to allow for adding an informer without an import cycle -type SharedInformerFactory interface { - Start(stopCh <-chan struct{}) - InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer -} - -// TweakListOptionsFunc is a function that transforms a v1.ListOptions. -type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/go/vt/topo/k8stopo/client/informers/externalversions/topo/interface.go b/go/vt/topo/k8stopo/client/informers/externalversions/topo/interface.go deleted file mode 100644 index 655cf46d4da..00000000000 --- a/go/vt/topo/k8stopo/client/informers/externalversions/topo/interface.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package topo - -import ( - internalinterfaces "vitess.io/vitess/go/vt/topo/k8stopo/client/informers/externalversions/internalinterfaces" - v1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/client/informers/externalversions/topo/v1beta1" -) - -// Interface provides access to each of this group's versions. -type Interface interface { - // V1beta1 provides access to shared informers for resources in V1beta1. - V1beta1() v1beta1.Interface -} - -type group struct { - factory internalinterfaces.SharedInformerFactory - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { - return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} -} - -// V1beta1 returns a new v1beta1.Interface. -func (g *group) V1beta1() v1beta1.Interface { - return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) -} diff --git a/go/vt/topo/k8stopo/client/informers/externalversions/topo/v1beta1/interface.go b/go/vt/topo/k8stopo/client/informers/externalversions/topo/v1beta1/interface.go deleted file mode 100644 index f1bc2dc3db6..00000000000 --- a/go/vt/topo/k8stopo/client/informers/externalversions/topo/v1beta1/interface.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1beta1 - -import ( - internalinterfaces "vitess.io/vitess/go/vt/topo/k8stopo/client/informers/externalversions/internalinterfaces" -) - -// Interface provides access to all the informers in this group version. -type Interface interface { - // VitessTopoNodes returns a VitessTopoNodeInformer. - VitessTopoNodes() VitessTopoNodeInformer -} - -type version struct { - factory internalinterfaces.SharedInformerFactory - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { - return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} -} - -// VitessTopoNodes returns a VitessTopoNodeInformer. -func (v *version) VitessTopoNodes() VitessTopoNodeInformer { - return &vitessTopoNodeInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} diff --git a/go/vt/topo/k8stopo/client/informers/externalversions/topo/v1beta1/vitesstoponode.go b/go/vt/topo/k8stopo/client/informers/externalversions/topo/v1beta1/vitesstoponode.go deleted file mode 100644 index af6e8b17acc..00000000000 --- a/go/vt/topo/k8stopo/client/informers/externalversions/topo/v1beta1/vitesstoponode.go +++ /dev/null @@ -1,91 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "context" - time "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" - - topov1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/apis/topo/v1beta1" - versioned "vitess.io/vitess/go/vt/topo/k8stopo/client/clientset/versioned" - internalinterfaces "vitess.io/vitess/go/vt/topo/k8stopo/client/informers/externalversions/internalinterfaces" - v1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/client/listers/topo/v1beta1" -) - -// VitessTopoNodeInformer provides access to a shared informer and lister for -// VitessTopoNodes. -type VitessTopoNodeInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1beta1.VitessTopoNodeLister -} - -type vitessTopoNodeInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewVitessTopoNodeInformer constructs a new informer for VitessTopoNode type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewVitessTopoNodeInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredVitessTopoNodeInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredVitessTopoNodeInformer constructs a new informer for VitessTopoNode type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredVitessTopoNodeInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.TopoV1beta1().VitessTopoNodes(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.TopoV1beta1().VitessTopoNodes(namespace).Watch(context.TODO(), options) - }, - }, - &topov1beta1.VitessTopoNode{}, - resyncPeriod, - indexers, - ) -} - -func (f *vitessTopoNodeInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredVitessTopoNodeInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *vitessTopoNodeInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&topov1beta1.VitessTopoNode{}, f.defaultInformer) -} - -func (f *vitessTopoNodeInformer) Lister() v1beta1.VitessTopoNodeLister { - return v1beta1.NewVitessTopoNodeLister(f.Informer().GetIndexer()) -} diff --git a/go/vt/topo/k8stopo/client/listers/topo/v1beta1/expansion_generated.go b/go/vt/topo/k8stopo/client/listers/topo/v1beta1/expansion_generated.go deleted file mode 100644 index b1602ecced9..00000000000 --- a/go/vt/topo/k8stopo/client/listers/topo/v1beta1/expansion_generated.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1beta1 - -// VitessTopoNodeListerExpansion allows custom methods to be added to -// VitessTopoNodeLister. -type VitessTopoNodeListerExpansion any - -// VitessTopoNodeNamespaceListerExpansion allows custom methods to be added to -// VitessTopoNodeNamespaceLister. -type VitessTopoNodeNamespaceListerExpansion any diff --git a/go/vt/topo/k8stopo/client/listers/topo/v1beta1/vitesstoponode.go b/go/vt/topo/k8stopo/client/listers/topo/v1beta1/vitesstoponode.go deleted file mode 100644 index 4ca6176b146..00000000000 --- a/go/vt/topo/k8stopo/client/listers/topo/v1beta1/vitesstoponode.go +++ /dev/null @@ -1,95 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" - - v1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/apis/topo/v1beta1" -) - -// VitessTopoNodeLister helps list VitessTopoNodes. -type VitessTopoNodeLister interface { - // List lists all VitessTopoNodes in the indexer. - List(selector labels.Selector) (ret []*v1beta1.VitessTopoNode, err error) - // VitessTopoNodes returns an object that can list and get VitessTopoNodes. - VitessTopoNodes(namespace string) VitessTopoNodeNamespaceLister - VitessTopoNodeListerExpansion -} - -// vitessTopoNodeLister implements the VitessTopoNodeLister interface. -type vitessTopoNodeLister struct { - indexer cache.Indexer -} - -// NewVitessTopoNodeLister returns a new VitessTopoNodeLister. -func NewVitessTopoNodeLister(indexer cache.Indexer) VitessTopoNodeLister { - return &vitessTopoNodeLister{indexer: indexer} -} - -// List lists all VitessTopoNodes in the indexer. -func (s *vitessTopoNodeLister) List(selector labels.Selector) (ret []*v1beta1.VitessTopoNode, err error) { - err = cache.ListAll(s.indexer, selector, func(m any) { - ret = append(ret, m.(*v1beta1.VitessTopoNode)) - }) - return ret, err -} - -// VitessTopoNodes returns an object that can list and get VitessTopoNodes. -func (s *vitessTopoNodeLister) VitessTopoNodes(namespace string) VitessTopoNodeNamespaceLister { - return vitessTopoNodeNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// VitessTopoNodeNamespaceLister helps list and get VitessTopoNodes. -type VitessTopoNodeNamespaceLister interface { - // List lists all VitessTopoNodes in the indexer for a given namespace. - List(selector labels.Selector) (ret []*v1beta1.VitessTopoNode, err error) - // Get retrieves the VitessTopoNode from the indexer for a given namespace and name. - Get(name string) (*v1beta1.VitessTopoNode, error) - VitessTopoNodeNamespaceListerExpansion -} - -// vitessTopoNodeNamespaceLister implements the VitessTopoNodeNamespaceLister -// interface. -type vitessTopoNodeNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all VitessTopoNodes in the indexer for a given namespace. -func (s vitessTopoNodeNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.VitessTopoNode, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m any) { - ret = append(ret, m.(*v1beta1.VitessTopoNode)) - }) - return ret, err -} - -// Get retrieves the VitessTopoNode from the indexer for a given namespace and name. -func (s vitessTopoNodeNamespaceLister) Get(name string) (*v1beta1.VitessTopoNode, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("vitesstoponode"), name) - } - return obj.(*v1beta1.VitessTopoNode), nil -} diff --git a/go/vt/topo/k8stopo/config.go b/go/vt/topo/k8stopo/config.go deleted file mode 100644 index 7db781d6497..00000000000 --- a/go/vt/topo/k8stopo/config.go +++ /dev/null @@ -1,17 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package k8stopo diff --git a/go/vt/topo/k8stopo/directory.go b/go/vt/topo/k8stopo/directory.go deleted file mode 100644 index abc18cf8bef..00000000000 --- a/go/vt/topo/k8stopo/directory.go +++ /dev/null @@ -1,98 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreedto in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package k8stopo - -import ( - "context" - "path/filepath" - "sort" - "strings" - - "vitess.io/vitess/go/vt/topo" - vtv1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/apis/topo/v1beta1" -) - -// ListDir is part of the topo.Conn interface. -// It uses an internal cache to find all the objects matching a specific key and returns -// a slice of results sorted alphabetically to emulate the behavior of etcd, zk, consul, etc -func (s *Server) ListDir(ctx context.Context, dirPath string, full bool) ([]topo.DirEntry, error) { - dirPath = filepath.Join(s.root, dirPath) - dirMap := map[string]topo.DirEntry{} - - if children, err := s.memberIndexer.ByIndex("by_parent", dirPath); err == nil { - for _, obj := range children { - vtn := obj.(*vtv1beta1.VitessTopoNode) - - key := vtn.Data.Key - - // skip duplicates - if _, ok := dirMap[key]; ok { - continue - } - - // new empty entry - e := topo.DirEntry{ - Ephemeral: vtn.Data.Ephemeral, - } - - // Clean dirPath from key to get name - key = strings.TrimPrefix(key, dirPath+"/") - - // If the key represents a directory - if strings.Contains(key, "/") { - if full { - e.Type = topo.TypeDirectory - } - - // get first part of path as name - key = strings.Split(filepath.Dir(key), "/")[0] - } else if full { - e.Type = topo.TypeFile - } - - // set name - e.Name = key - - // add to results - dirMap[e.Name] = e - } - } else { - return nil, err - } - - // An empty map means not found - if len(dirMap) == 0 { - return nil, topo.NewError(topo.NoNode, dirPath) - } - - // Get slice of keys - var keys []string - for key := range dirMap { - keys = append(keys, key) - } - - // sort keys - sort.Strings(keys) - - // Get ordered result - var result []topo.DirEntry - for _, k := range keys { - result = append(result, dirMap[k]) - } - - return result, nil -} diff --git a/go/vt/topo/k8stopo/election.go b/go/vt/topo/k8stopo/election.go deleted file mode 100644 index 9c89faf445d..00000000000 --- a/go/vt/topo/k8stopo/election.go +++ /dev/null @@ -1,129 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package k8stopo - -import ( - "context" - "path" - - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/topo" -) - -const electionsPath = "elections" - -// NewLeaderParticipation is part of the topo.Server interface -func (s *Server) NewLeaderParticipation(name, id string) (topo.LeaderParticipation, error) { - return &kubernetesLeaderParticipation{ - s: s, - name: name, - id: id, - stop: make(chan struct{}), - done: make(chan struct{}), - }, nil -} - -// kubernetesLeaderParticipation implements topo.LeaderParticipation. -// -// We use a directory (in global election path, with the name) with -// ephemeral files in it, that contains the id. The oldest revision -// wins the election. -type kubernetesLeaderParticipation struct { - // s is our parent kubernetes topo Server - s *Server - - // name is the name of this LeaderParticipation - name string - - // id is the process's current id. - id string - - // stop is a channel closed when Stop is called. - stop chan struct{} - - // done is a channel closed when we're done processing the Stop - done chan struct{} -} - -func (mp *kubernetesLeaderParticipation) getElectionPath() string { - return path.Join(mp.s.root, electionsPath, mp.name) -} - -// WaitForLeadership is part of the topo.LeaderParticipation interface. -func (mp *kubernetesLeaderParticipation) WaitForLeadership() (context.Context, error) { - // If Stop was already called, mp.done is closed, so we are interrupted. - select { - case <-mp.done: - return nil, topo.NewError(topo.Interrupted, "Leadership") - default: - } - - electionPath := mp.getElectionPath() - var ld topo.LockDescriptor - - // We use a cancelable context here. If stop is closed, - // we just cancel that context. - lockCtx, lockCancel := context.WithCancel(context.Background()) - go func() { - <-mp.stop - if ld != nil { - if err := ld.Unlock(context.Background()); err != nil { - log.Errorf("failed to unlock electionPath %v: %v", electionPath, err) - } - } - lockCancel() - close(mp.done) - }() - - // Try to get the primaryship, by getting a lock. - var err error - ld, err = mp.s.lock(lockCtx, electionPath, mp.id, true) - if err != nil { - // It can be that we were interrupted. - return nil, err - } - - // We got the lock. Return the lockContext. If Stop() is called, - // it will cancel the lockCtx, and cancel the returned context. - return lockCtx, nil -} - -// Stop is part of the topo.LeaderParticipation interface -func (mp *kubernetesLeaderParticipation) Stop() { - close(mp.stop) - <-mp.done -} - -// GetCurrentLeaderID is part of the topo.LeaderParticipation interface -func (mp *kubernetesLeaderParticipation) GetCurrentLeaderID(ctx context.Context) (string, error) { - id, _, err := mp.s.Get(ctx, mp.getElectionPath()) - if err != nil { - // NoNode means nobody is the primary - if topo.IsErrType(err, topo.NoNode) { - return "", nil - } - return "", err - } - return string(id), nil -} - -// WaitForNewLeader is part of the topo.LeaderParticipation interface -func (mp *kubernetesLeaderParticipation) WaitForNewLeader(context.Context) (<-chan string, error) { - // Kubernetes doesn't seem to provide a primitive that watches a prefix - // or directory, so this likely can never be implemented. - return nil, topo.NewError(topo.NoImplementation, "wait for leader not supported in K8s topo") -} diff --git a/go/vt/topo/k8stopo/error.go b/go/vt/topo/k8stopo/error.go deleted file mode 100644 index 32f44d0beef..00000000000 --- a/go/vt/topo/k8stopo/error.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package k8stopo - -import ( - "context" - - "k8s.io/apimachinery/pkg/api/errors" - - "vitess.io/vitess/go/vt/topo" -) - -// convertError converts errors into a topo error. All errors -// are either application-level errors, or context errors. -func convertError(err error, nodePath string) error { - if err == nil { - return nil - } - - // Check for specific kubernetes errors - if errors.IsAlreadyExists(err) { - return topo.NewError(topo.NodeExists, nodePath) - } - if errors.IsNotFound(err) { - return topo.NewError(topo.NoNode, nodePath) - } - if errors.IsServerTimeout(err) { - return topo.NewError(topo.Timeout, nodePath) - } - - // Convert specific context sentinel values. - switch err { - case context.Canceled: - return topo.NewError(topo.Interrupted, nodePath) - case context.DeadlineExceeded: - return topo.NewError(topo.Timeout, nodePath) - } - - return err -} diff --git a/go/vt/topo/k8stopo/file.go b/go/vt/topo/k8stopo/file.go deleted file mode 100644 index 0a186235cde..00000000000 --- a/go/vt/topo/k8stopo/file.go +++ /dev/null @@ -1,301 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreedto in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package k8stopo - -import ( - "bytes" - "compress/gzip" - "context" - "encoding/base64" - "fmt" - "hash/fnv" - "io" - "path/filepath" - "strconv" - "strings" - "time" - - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/util/retry" - - "vitess.io/vitess/go/vt/topo" - vtv1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/apis/topo/v1beta1" -) - -// NodeReference contains the data relating to a node -type NodeReference struct { - id string - key string - value string -} - -func packValue(value []byte) ([]byte, error) { - encoded := &bytes.Buffer{} - encoder := base64.NewEncoder(base64.StdEncoding, encoded) - - zw := gzip.NewWriter(encoder) - _, err := zw.Write(value) - if err != nil { - return []byte{}, fmt.Errorf("gzip write error: %s", err) - } - - err = zw.Close() - if err != nil { - return []byte{}, fmt.Errorf("gzip close error: %s", err) - } - - err = encoder.Close() - if err != nil { - return []byte{}, fmt.Errorf("base64 encoder close error: %s", err) - } - - return encoded.Bytes(), nil -} - -func unpackValue(value []byte) ([]byte, error) { - decoder := base64.NewDecoder(base64.StdEncoding, bytes.NewBuffer(value)) - - zr, err := gzip.NewReader(decoder) - if err != nil { - return []byte{}, fmt.Errorf("unable to create new gzip reader: %s", err) - } - - decoded := &bytes.Buffer{} - if _, err := io.Copy(decoded, zr); err != nil { - return []byte{}, fmt.Errorf("error coppying uncompressed data: %s", err) - } - - if err := zr.Close(); err != nil { - return []byte{}, fmt.Errorf("unable to close gzip reader: %s", err) - } - - return decoded.Bytes(), nil -} - -// ToData converts a nodeReference to the data type used in the VitessTopoNode -func (n *NodeReference) ToData() vtv1beta1.VitessTopoNodeData { - return vtv1beta1.VitessTopoNodeData{ - Key: n.key, - Value: string(n.value), - } -} - -func getHash(parent string) string { - hasher := fnv.New64a() - hasher.Write([]byte(parent)) - return strconv.FormatUint(hasher.Sum64(), 10) -} - -func (s *Server) newNodeReference(key string) *NodeReference { - key = filepath.Join(s.root, key) - - node := &NodeReference{ - id: fmt.Sprintf("vt-%s", getHash(key)), - key: key, - } - - return node -} - -func (s *Server) buildFileResource(filePath string, contents []byte) (*vtv1beta1.VitessTopoNode, error) { - node := s.newNodeReference(filePath) - - value, err := packValue(contents) - if err != nil { - return nil, err - } - - // create data - node.value = string(value) - - // Create "file" object - return &vtv1beta1.VitessTopoNode{ - ObjectMeta: metav1.ObjectMeta{ - Name: node.id, - Namespace: s.namespace, - }, - Data: node.ToData(), - }, nil -} - -// Create is part of the topo.Conn interface. -func (s *Server) Create(ctx context.Context, filePath string, contents []byte) (topo.Version, error) { - resource, err := s.buildFileResource(filePath, contents) - if err != nil { - return nil, convertError(err, filePath) - } - - final, err := s.resourceClient.Create(ctx, resource, metav1.CreateOptions{}) - if err != nil { - return nil, convertError(err, filePath) - } - - // Update the internal cache - err = s.memberIndexer.Update(final) - if err != nil { - return nil, convertError(err, filePath) - } - - return KubernetesVersion(final.GetResourceVersion()), nil -} - -// Update is part of the topo.Conn interface. -func (s *Server) Update(ctx context.Context, filePath string, contents []byte, version topo.Version) (topo.Version, error) { - resource, err := s.buildFileResource(filePath, contents) - if err != nil { - return nil, convertError(err, filePath) - } - - var finalVersion KubernetesVersion - - err = retry.RetryOnConflict(retry.DefaultBackoff, func() error { - result, err := s.resourceClient.Get(ctx, resource.Name, metav1.GetOptions{}) - if err != nil && errors.IsNotFound(err) && version == nil { - // Update should create objects when the version is nil and the object is not found - createdVersion, err := s.Create(ctx, filePath, contents) - if err != nil { - return err - } - finalVersion = KubernetesVersion(createdVersion.String()) - return nil - } - - // If a non-nil version is given to update, fail on mismatched version - if version != nil && KubernetesVersion(result.GetResourceVersion()) != version { - return topo.NewError(topo.BadVersion, filePath) - } - - // set new contents - result.Data.Value = resource.Data.Value - - // get result or err - final, err := s.resourceClient.Update(ctx, result, metav1.UpdateOptions{}) - if err != nil { - return convertError(err, filePath) - } - - // Update the internal cache - err = s.memberIndexer.Update(final) - if err != nil { - return convertError(err, filePath) - } - - finalVersion = KubernetesVersion(final.GetResourceVersion()) - - return nil - }) - if err != nil { - return nil, err - } - - return finalVersion, nil -} - -// Get is part of the topo.Conn interface. -func (s *Server) Get(ctx context.Context, filePath string) ([]byte, topo.Version, error) { - node := s.newNodeReference(filePath) - - result, err := s.resourceClient.Get(ctx, node.id, metav1.GetOptions{}) - if err != nil { - return []byte{}, nil, convertError(err, filePath) - } - - out, err := unpackValue([]byte(result.Data.Value)) - if err != nil { - return []byte{}, nil, convertError(err, filePath) - } - - return out, KubernetesVersion(result.GetResourceVersion()), nil -} - -// List is part of the topo.Conn interface. -func (s *Server) List(ctx context.Context, filePathPrefix string) ([]topo.KVInfo, error) { - nodeList, err := s.resourceClient.List(ctx, metav1.ListOptions{}) - - results := []topo.KVInfo{} - if err != nil { - return results, convertError(err, filePathPrefix) - } - nodes := nodeList.Items - if len(nodes) == 0 { - return results, topo.NewError(topo.NoNode, filePathPrefix) - } - rootPrefix := filepath.Join(s.root, filePathPrefix) - for _, node := range nodes { - if strings.HasPrefix(node.Data.Key, rootPrefix) { - out, err := unpackValue([]byte(node.Data.Value)) - if err != nil { - return results, convertError(err, node.Data.Key) - } - results = append(results, topo.KVInfo{ - Key: []byte(node.Data.Key), - Value: out, - Version: KubernetesVersion(node.GetResourceVersion()), - }) - } - } - - return results, nil -} - -// Delete is part of the topo.Conn interface. -func (s *Server) Delete(ctx context.Context, filePath string, version topo.Version) error { - node := s.newNodeReference(filePath) - - // Check version before delete - current, err := s.resourceClient.Get(ctx, node.id, metav1.GetOptions{}) - if err != nil { - return convertError(err, filePath) - } - if version != nil { - if KubernetesVersion(current.GetResourceVersion()) != version { - return topo.NewError(topo.BadVersion, filePath) - } - } - - err = s.resourceClient.Delete(ctx, node.id, metav1.DeleteOptions{}) - if err != nil { - return convertError(err, filePath) - } - - // Wait for one of the following conditions - // 1. Context is cancelled - // 2. The object is no longer in the cache - // 3. The object in the cache has a new uid (was deleted but recreated since we last checked) - for { - select { - case <-ctx.Done(): - return convertError(ctx.Err(), filePath) - case <-time.After(50 * time.Millisecond): - } - - obj, ok, err := s.memberIndexer.Get(current) - if err != nil { // error getting from cache - return convertError(err, filePath) - } - if !ok { // deleted from cache - break - } - cached := obj.(*vtv1beta1.VitessTopoNode) - if cached.GetUID() != current.GetUID() { - break // deleted and recreated - } - } - - return nil -} diff --git a/go/vt/topo/k8stopo/file_test.go b/go/vt/topo/k8stopo/file_test.go deleted file mode 100644 index 036c2c02f2a..00000000000 --- a/go/vt/topo/k8stopo/file_test.go +++ /dev/null @@ -1,131 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreedto in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package k8stopo - -import ( - "reflect" - "testing" -) - -func Test_packValue(t *testing.T) { - tests := []struct { - name string - value []byte - want []byte - wantErr bool - }{ - { - // a gzip with an empty payload still has header bytes to identify the stream - "empty", - []byte{}, - []byte{72, 52, 115, 73, 65, 65, 65, 65, 65, 65, 65, 65, 47, 119, 69, 65, 65, 80, 47, 47, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 61}, - false, - }, - { - "valid payload", - []byte("test payload"), - []byte{72, 52, 115, 73, 65, 65, 65, 65, 65, 65, 65, 65, 47, 121, 112, 74, 76, 83, 53, 82, 75, 69, 105, 115, 122, 77, 108, 80, 84, 65, 69, 69, 65, 65, 68, 47, 47, 43, 69, 57, 72, 101, 115, 77, 65, 65, 65, 65}, - false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := packValue(tt.value) - if (err != nil) != tt.wantErr { - t.Errorf("packValue() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("packValue() = %v, want %v", got, tt.want) - } - }) - } -} - -func Test_unpackValue(t *testing.T) { - tests := []struct { - name string - value []byte - want []byte - wantErr bool - }{ - { - // a gzip with an empty payload still has header bytes to identify the stream - "empty", - []byte{72, 52, 115, 73, 65, 65, 65, 65, 65, 65, 65, 65, 47, 119, 69, 65, 65, 80, 47, 47, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 61}, - []byte{}, - false, - }, - { - "valid payload", - []byte{72, 52, 115, 73, 65, 65, 65, 65, 65, 65, 65, 65, 47, 121, 112, 74, 76, 83, 53, 82, 75, 69, 105, 115, 122, 77, 108, 80, 84, 65, 69, 69, 65, 65, 68, 47, 47, 43, 69, 57, 72, 101, 115, 77, 65, 65, 65, 65}, - []byte("test payload"), - false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := unpackValue(tt.value) - if (err != nil) != tt.wantErr { - t.Errorf("unpackValue() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("unpackValue() = %v, want %v", got, tt.want) - } - }) - } -} - -func Test_packUnpackRoundTrip(t *testing.T) { - tests := []struct { - name string - value []byte - wantErr bool - }{ - { - "empty", - []byte{}, - false, - }, - { - "valid payload", - []byte("test payload"), - false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - packed, err := packValue(tt.value) - if (err != nil) != tt.wantErr { - t.Errorf("packValue() error = %v, wantErr %v", err, tt.wantErr) - return - } - - unpacked, err := unpackValue(packed) - if (err != nil) != tt.wantErr { - t.Errorf("packValue() error = %v, wantErr %v", err, tt.wantErr) - return - } - - if !reflect.DeepEqual(unpacked, tt.value) { - t.Errorf("unpacked value != original value original = %v, unpacked %v", tt.value, unpacked) - return - } - }) - } -} diff --git a/go/vt/topo/k8stopo/lock.go b/go/vt/topo/k8stopo/lock.go deleted file mode 100644 index e1321ea76e4..00000000000 --- a/go/vt/topo/k8stopo/lock.go +++ /dev/null @@ -1,120 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package k8stopo - -import ( - "time" - - "context" - - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "vitess.io/vitess/go/vt/topo" - vtv1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/apis/topo/v1beta1" -) - -// kubernetesLockDescriptor implements topo.LockDescriptor. -type kubernetesLockDescriptor struct { - s *Server - leaseID string - leasePath string -} - -// Lock is part of the topo.Conn interface. -func (s *Server) Lock(ctx context.Context, dirPath, contents string) (topo.LockDescriptor, error) { - return s.lock(ctx, dirPath, contents, false) -} - -// TryLock is part of the topo.Conn interface. Its implementation is same as Lock -func (s *Server) TryLock(ctx context.Context, dirPath, contents string) (topo.LockDescriptor, error) { - return s.Lock(ctx, dirPath, contents) -} - -// lock is used by both Lock() and primary election. -// it blocks until the lock is taken, interrupted, or times out -func (s *Server) lock(ctx context.Context, nodePath, contents string, createMissing bool) (topo.LockDescriptor, error) { - // Satisfy the topo.Conn interface - if !createMissing { - // Per the topo.Conn interface: - // "Returns ErrNoNode if the directory doesn't exist (meaning - // there is no existing file under that directory)." - if _, err := s.ListDir(ctx, nodePath, false); err != nil { - return nil, convertError(err, nodePath) - } - } - - resource, err := s.buildFileResource(nodePath, []byte(contents)) - if err != nil { - return nil, convertError(err, nodePath) - } - - // mark locks as ephemeral - resource.Data.Ephemeral = true - - var final *vtv1beta1.VitessTopoNode - - for { - // Try and and create the resource. The kube api will handle the actual atomic lock creation - final, err = s.resourceClient.Create(ctx, resource, metav1.CreateOptions{}) - if errors.IsAlreadyExists(err) { - select { - case <-time.After(10 * time.Millisecond): - continue // retry - case <-ctx.Done(): - return nil, convertError(ctx.Err(), nodePath) - } - } else if err != nil { - return nil, convertError(err, nodePath) - } - - break - } - - // Update the internal cache - err = s.memberIndexer.Update(final) - if err != nil { - return nil, convertError(err, nodePath) - } - - return &kubernetesLockDescriptor{ - s: s, - leaseID: resource.Name, - leasePath: resource.Data.Key, - }, nil -} - -// Check is part of the topo.LockDescriptor interface. -func (ld *kubernetesLockDescriptor) Check(ctx context.Context) error { - // Get the object and ensure the leaseid - _, err := ld.s.resourceClient.Get(ctx, ld.leaseID, metav1.GetOptions{}) // TODO namespacing - if err != nil { - return convertError(err, ld.leasePath) - - } - - return nil -} - -// Unlock is part of the topo.LockDescriptor interface. -func (ld *kubernetesLockDescriptor) Unlock(ctx context.Context) error { - err := ld.s.resourceClient.Delete(ctx, ld.leaseID, metav1.DeleteOptions{}) // TODO namespacing - if err != nil { - return convertError(err, ld.leasePath) - } - return nil -} diff --git a/go/vt/topo/k8stopo/server.go b/go/vt/topo/k8stopo/server.go deleted file mode 100644 index 2507ae254b1..00000000000 --- a/go/vt/topo/k8stopo/server.go +++ /dev/null @@ -1,241 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Package k8stopo implements topo.Server with the Kubernetes API as the backend. - -We expect the following behavior from the kubernetes client library: - - - TODO - -We follow these conventions within this package: - - - TODO -*/ -package k8stopo - -import ( - "fmt" - "os" - "path/filepath" - "strings" - - "vitess.io/vitess/go/vt/servenv" - - "github.com/spf13/pflag" - - "k8s.io/apimachinery/pkg/fields" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/clientcmd" - - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/topo" - vtv1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/apis/topo/v1beta1" - vtkube "vitess.io/vitess/go/vt/topo/k8stopo/client/clientset/versioned" - vttyped "vitess.io/vitess/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1" -) - -var kubeconfigPath, configContext, configNamespace string - -func init() { - servenv.RegisterFlagsForTopoBinaries(registerK8STopoFlags) -} - -func registerK8STopoFlags(fs *pflag.FlagSet) { - // kubeconfigPath is a string that gives the location of a valid kubeconfig file - fs.StringVar(&kubeconfigPath, "topo_k8s_kubeconfig", kubeconfigPath, "Path to a valid kubeconfig file. When running as a k8s pod inside the same cluster you wish to use as the topo, you may omit this and the below arguments, and Vitess is capable of auto-discovering the correct values. https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod") - - // configContext is a string that can be used to override the default context - fs.StringVar(&configContext, "topo_k8s_context", configContext, "The kubeconfig context to use, overrides the 'current-context' from the config") - - // configNamespace is a string that can be used to override the default namespace for objects - fs.StringVar(&configNamespace, "topo_k8s_namespace", configNamespace, "The kubernetes namespace to use for all objects. Default comes from the context or in-cluster config") -} - -// Factory is the Kubernetes topo.Factory implementation. -type Factory struct{} - -// HasGlobalReadOnlyCell is part of the topo.Factory interface. -func (f Factory) HasGlobalReadOnlyCell(serverAddr, root string) bool { - return false -} - -// Create is part of the topo.Factory interface. -func (f Factory) Create(cell, serverAddr, root string) (topo.Conn, error) { - return NewServer(serverAddr, root) -} - -// Server is the implementation of topo.Server for Kubernetes. -type Server struct { - // kubeClient is the entire kubernetes interface - kubeClient kubernetes.Interface - - // vtKubeClient is the client for vitess api types - vtKubeClient vtkube.Interface - - // resource is a scoped-down kubernetes.Interface used for convenience - resourceClient vttyped.VitessTopoNodeInterface - - // stopChan is used to tell the client-go informers to quit - stopChan chan struct{} - - // memberInformer is the controller that syncronized the cache of data - memberInformer cache.Controller - - // memberIndexer is the cache of tree data - memberIndexer cache.Indexer - - // namespace is the Kubernetes namespace to be used for all resources - namespace string - - // root is the root path for this client. - // used for resource prefixing - root string -} - -// Close implements topo.Server.Close. -func (s *Server) Close() { - close(s.stopChan) -} - -func getKeyParents(key string) []string { - parents := []string{""} - parent := []string{} - for _, segment := range strings.Split(filepath.Dir(key), "/") { - parent = append(parent, segment) - parents = append(parents, strings.Join(parent, "/")) - } - return parents -} - -func indexByParent(obj any) ([]string, error) { - return getKeyParents(obj.(*vtv1beta1.VitessTopoNode).Data.Key), nil -} - -// syncTree starts and syncs the member objects that form the directory "tree" -func (s *Server) syncTree() error { - // Create the informer / indexer - restClient := s.vtKubeClient.TopoV1beta1().RESTClient() - listwatch := cache.NewListWatchFromClient(restClient, "vitesstoponodes", s.namespace, fields.Everything()) - - // set up index funcs - indexers := cache.Indexers{} - indexers["by_parent"] = indexByParent - - s.memberIndexer, s.memberInformer = cache.NewIndexerInformer(listwatch, &vtv1beta1.VitessTopoNode{}, 0, - cache.ResourceEventHandlerFuncs{}, indexers) - - // Start indexer - go s.memberInformer.Run(s.stopChan) - - // Wait for sync - log.Info("Waiting for Kubernetes topo cache sync") - if !cache.WaitForCacheSync(s.stopChan, s.memberInformer.HasSynced) { - return fmt.Errorf("timed out waiting for caches to sync") - } - log.Info("Kubernetes topo cache sync completed") - - return nil -} - -// NewServer returns a new k8stopo.Server. -func NewServer(_, root string) (*Server, error) { - log.Info("Creating new Kubernetes topo server with root: ", root) - - var config *rest.Config - var err error - namespace := "default" //nolint - - if kubeconfigPath == "" { - log.Info("Creating new in-cluster Kubernetes config") - - config, err = rest.InClusterConfig() - if err != nil { - return nil, fmt.Errorf("error getting Kubernetes in-cluster client config: %s", err) - } - - // When running in the cluster, use the namespace file to detect the current namespace - nsBytes, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") - if err != nil { - return nil, err - } - namespace = string(nsBytes) - } else { - log.Info("Creating new Kubernetes config from kubeconfig", kubeconfigPath) - - configOverrides := &clientcmd.ConfigOverrides{} - - // respect the context flag - if configContext != "" { - configOverrides.CurrentContext = configContext - } - - configLoader := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( - &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfigPath}, - configOverrides, - ) - - config, err = configLoader.ClientConfig() - if err != nil { - return nil, fmt.Errorf("error getting Kubernetes client config: %s", err) - } - - // When given a kubeconfig file, use the namespace from the current context - namespace, _, err = configLoader.Namespace() - if err != nil { - return nil, fmt.Errorf("error getting namespace from Kubernetes client config: %s", err) - } - } - - // respect the namespace flag - if configNamespace != "" { - namespace = configNamespace - } - - // create the kubernetes client - kubeClientset, err := kubernetes.NewForConfig(config) - if err != nil { - return nil, fmt.Errorf("error creating official Kubernetes client: %s", err) - } - - vtKubeClientset, err := vtkube.NewForConfig(config) - if err != nil { - return nil, fmt.Errorf("error creating vitess Kubernetes client: %s", err) - } - - // Create the server - s := &Server{ - namespace: namespace, - kubeClient: kubeClientset, - vtKubeClient: vtKubeClientset, - resourceClient: vtKubeClientset.TopoV1beta1().VitessTopoNodes(namespace), - root: root, - stopChan: make(chan struct{}), - } - - // Sync cache - if err = s.syncTree(); err != nil { - return nil, err - } - - return s, nil -} - -func init() { - topo.RegisterFactory("k8s", Factory{}) -} diff --git a/go/vt/topo/k8stopo/server_flaky_test.go b/go/vt/topo/k8stopo/server_flaky_test.go deleted file mode 100644 index 5a9fce1ca80..00000000000 --- a/go/vt/topo/k8stopo/server_flaky_test.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package k8stopo - -import ( - "context" - "fmt" - "os" - "os/exec" - "path" - "runtime" - "testing" - "time" - - "github.com/stretchr/testify/require" - extensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kubeyaml "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/tools/clientcmd" - - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/topo/test" -) - -func TestKubernetesTopo(t *testing.T) { - if runtime.GOOS != "linux" { - t.Skip("k3s not supported on non-linux platforms. Skipping k8stopo integration tests") - } - - // Create a data dir for test data - testDataDir := t.TempDir() - - // Gen a temp file name for the config - testConfig, err := os.CreateTemp("", "vt-test-k3s-config") - if err != nil { - t.Fatal(err) - } - testConfigPath := testConfig.Name() - defer os.Remove(testConfigPath) // clean up - - k3sArgs := []string{ - "server", "start", - "--write-kubeconfig=" + testConfigPath, - "--data-dir=" + testDataDir, - "--https-listen-port=6663", - "--disable-agent", "--flannel-backend=none", - "--disable-network-policy", - "--disable-cloud-controller", - "--disable-scheduler", - "--no-deploy=coredns,servicelb,traefik,local-storage,metrics-server", - "--kube-controller-manager-arg=port=10253", - - "--log=/tmp/k3svtlog", - } - - // Start a minimal k3s daemon, and close it after all tests are done. - ctx, killK3s := context.WithCancel(context.Background()) - c := exec.CommandContext(ctx, "k3s", k3sArgs...) - - // Start in the background and kill when tests end - t.Log("Starting k3s") - err = c.Start() - if err != nil { - t.Fatal("Unable to start k3s", err) - } - defer killK3s() - - // Wait for server to be ready - for { - t.Log("Waiting for server to be ready") - time.Sleep(time.Second) - config, err := clientcmd.BuildConfigFromFlags("", testConfigPath) - if err != nil { - continue - } - - // Create the vitesstoponode crd - apiextensionsClientSet, err := apiextensionsclient.NewForConfig(config) - if err != nil { - t.Fatal(err) - } - - crdFile, err := os.Open("./VitessTopoNodes-crd.yaml") - require.NoError(t, err) - defer crdFile.Close() - - crd := &extensionsv1.CustomResourceDefinition{} - - kubeyaml.NewYAMLOrJSONDecoder(crdFile, 2048).Decode(crd) - - _, err = apiextensionsClientSet.ApiextensionsV1().CustomResourceDefinitions().Create(context.TODO(), crd, metav1.CreateOptions{}) - if err != nil { - t.Fatal(err) - } - - break - } - - serverAddr := "default" - - oldKubeConfigPath := kubeconfigPath - kubeconfigPath = testConfigPath - defer func() { - kubeconfigPath = oldKubeConfigPath - }() - - // Run the test suite. - testIndex := 0 - test.TopoServerTestSuite(t, func() *topo.Server { - // Each test will use its own sub-directories. - // The directories will be created when used the first time. - testRoot := fmt.Sprintf("/test-%v", testIndex) - testIndex++ - - globalRoot := path.Join(testRoot, topo.GlobalCell) - cellRoot := path.Join(testRoot, test.LocalCellName) - - ts, err := topo.OpenServer("k8s", serverAddr, globalRoot) - if err != nil { - t.Fatalf("OpenServer() failed: %v", err) - } - if err := ts.CreateCellInfo(context.Background(), test.LocalCellName, &topodatapb.CellInfo{ - ServerAddress: serverAddr, - Root: cellRoot, - }); err != nil { - t.Fatalf("CreateCellInfo() failed: %v", err) - } - - return ts - }, []string{"checkTryLock", "checkShardWithLock"}) -} diff --git a/go/vt/topo/k8stopo/version.go b/go/vt/topo/k8stopo/version.go deleted file mode 100644 index c7e16bfffeb..00000000000 --- a/go/vt/topo/k8stopo/version.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreedto in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package k8stopo - -import ( - "fmt" - - "vitess.io/vitess/go/vt/topo" -) - -// KubernetesVersion is Kubernetes's idea of a version. -// It implements topo.Version. -type KubernetesVersion string - -// String is part of the topo.Version interface. -func (v KubernetesVersion) String() string { - return string(v) -} - -// VersionFromInt is used by old-style functions to create a proper -// Version: if version is -1, returns nil. Otherwise returns the -// KubernetesVersion object. -func VersionFromInt(version int64) topo.Version { - if version == -1 { - return nil - } - return KubernetesVersion(fmt.Sprint(version)) -} diff --git a/go/vt/topo/k8stopo/watch.go b/go/vt/topo/k8stopo/watch.go deleted file mode 100644 index 4cf1c7bc2c3..00000000000 --- a/go/vt/topo/k8stopo/watch.go +++ /dev/null @@ -1,128 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreedto in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package k8stopo - -import ( - "context" - - "k8s.io/apimachinery/pkg/fields" - "k8s.io/client-go/tools/cache" - - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/topo" - vtv1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/apis/topo/v1beta1" -) - -// Watch is part of the topo.Conn interface. -func (s *Server) Watch(ctx context.Context, filePath string) (*topo.WatchData, <-chan *topo.WatchData, error) { - log.Info("Starting Kubernetes topo Watch on ", filePath) - - current := &topo.WatchData{} - - // get current - initialCtx, initialCancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) - defer initialCancel() - - contents, ver, err := s.Get(initialCtx, filePath) - if err != nil { - return nil, nil, err - } - current.Contents = contents - current.Version = ver - - // Create the changes channel - changes := make(chan *topo.WatchData, 10) - - // Create a signal channel for non-interrupt shutdowns - gracefulShutdown := make(chan struct{}) - - resource, err := s.buildFileResource(filePath, []byte{}) - if err != nil { - return nil, nil, err - } - - // Create the informer / indexer to watch the single resource - restClient := s.vtKubeClient.TopoV1beta1().RESTClient() - listwatch := cache.NewListWatchFromClient(restClient, "vitesstoponodes", s.namespace, fields.OneTermEqualSelector("metadata.name", resource.Name)) - - // set up index funcs - indexers := cache.Indexers{} - indexers["by_parent"] = indexByParent - - _, memberInformer := cache.NewIndexerInformer(listwatch, &vtv1beta1.VitessTopoNode{}, 0, - cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj any) { - vtn := obj.(*vtv1beta1.VitessTopoNode) - out, err := unpackValue([]byte(vtn.Data.Value)) - if err != nil { - changes <- &topo.WatchData{Err: err} - close(gracefulShutdown) - } else { - changes <- &topo.WatchData{ - Contents: out, - Version: KubernetesVersion(vtn.GetResourceVersion()), - } - } - }, - UpdateFunc: func(oldObj, newObj any) { - vtn := newObj.(*vtv1beta1.VitessTopoNode) - out, err := unpackValue([]byte(vtn.Data.Value)) - if err != nil { - changes <- &topo.WatchData{Err: err} - close(gracefulShutdown) - } else { - changes <- &topo.WatchData{ - Contents: out, - Version: KubernetesVersion(vtn.GetResourceVersion()), - } - } - }, - DeleteFunc: func(obj any) { - vtn := obj.(*vtv1beta1.VitessTopoNode) - changes <- &topo.WatchData{Err: topo.NewError(topo.NoNode, vtn.Name)} - close(gracefulShutdown) - }, - }, indexers) - - // create control chan for informer and start it - informerChan := make(chan struct{}) - go memberInformer.Run(informerChan) - - // Handle interrupts - go closeOnDone(ctx, filePath, informerChan, gracefulShutdown, changes) - - return current, changes, nil -} - -func closeOnDone(ctx context.Context, filePath string, informerChan chan struct{}, gracefulShutdown chan struct{}, changes chan *topo.WatchData) { - select { - case <-ctx.Done(): - if err := ctx.Err(); err != nil && err == context.Canceled { - changes <- &topo.WatchData{Err: topo.NewError(topo.Interrupted, filePath)} - } - case <-gracefulShutdown: - } - close(informerChan) - close(changes) -} - -// WatchRecursive is part of the topo.Conn interface. -func (s *Server) WatchRecursive(_ context.Context, path string) ([]*topo.WatchDataRecursive, <-chan *topo.WatchDataRecursive, error) { - // Kubernetes doesn't seem to provide a primitive that watches a prefix - // or directory, so this likely can never be implemented. - return nil, nil, topo.NewError(topo.NoImplementation, path) -} diff --git a/go/vt/topo/keyspace.go b/go/vt/topo/keyspace.go index ddec9f4e0a8..feb80c374e5 100755 --- a/go/vt/topo/keyspace.go +++ b/go/vt/topo/keyspace.go @@ -20,7 +20,7 @@ import ( "context" "path" - "vitess.io/vitess/go/vt/sidecardb" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/event" @@ -233,7 +233,7 @@ func (ts *Server) GetSidecarDBName(ctx context.Context, keyspace string) (string if keyspaceInfo.SidecarDbName != "" { return keyspaceInfo.SidecarDbName, nil } - return sidecardb.DefaultName, nil + return sidecar.DefaultName, nil } func (ts *Server) GetThrottlerConfig(ctx context.Context, keyspace string) (*topodatapb.ThrottlerConfig, error) { diff --git a/go/vt/topo/memorytopo/file.go b/go/vt/topo/memorytopo/file.go index 0abfc56cb80..0007203799f 100644 --- a/go/vt/topo/memorytopo/file.go +++ b/go/vt/topo/memorytopo/file.go @@ -262,7 +262,6 @@ func (c *Conn) Delete(ctx context.Context, filePath string, version topo.Version // Check if it's a directory. if n.isDirectory() { - //lint:ignore ST1005 Delete is a function name return fmt.Errorf("delete(%v, %v) failed: it's a directory", c.cell, filePath) } diff --git a/go/vt/topo/memorytopo/memorytopo.go b/go/vt/topo/memorytopo/memorytopo.go index 504f1d4bd39..f24b2f6c89e 100644 --- a/go/vt/topo/memorytopo/memorytopo.go +++ b/go/vt/topo/memorytopo/memorytopo.go @@ -25,7 +25,6 @@ import ( "math/rand" "strings" "sync" - "time" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/topo" @@ -128,18 +127,16 @@ type Conn struct { } // dial returns immediately, unless the Conn points to the sentinel -// UnreachableServerAddr, in which case it will block until the context expires -// and return the context's error. +// UnreachableServerAddr, in which case it will block until the context expires. func (c *Conn) dial(ctx context.Context) error { if c.closed { return ErrConnectionClosed } if c.serverAddr == UnreachableServerAddr { <-ctx.Done() - return ctx.Err() } - return nil + return ctx.Err() } // Close is part of the topo.Conn interface. @@ -236,14 +233,13 @@ func (n *node) PropagateWatchError(err error) { // NewServerAndFactory returns a new MemoryTopo and the backing factory for all // the cells. It will create one cell for each parameter passed in. It will log.Exit out // in case of a problem. -func NewServerAndFactory(cells ...string) (*topo.Server, *Factory) { +func NewServerAndFactory(ctx context.Context, cells ...string) (*topo.Server, *Factory) { f := &Factory{ cells: make(map[string]*node), generation: uint64(rand.Int63n(1 << 60)), } f.cells[topo.GlobalCell] = f.newDirectory(topo.GlobalCell, nil) - ctx := context.Background() ts, err := topo.NewWithFactory(f, "" /*serverAddress*/, "" /*root*/) if err != nil { log.Exitf("topo.NewWithFactory() failed: %v", err) @@ -258,8 +254,8 @@ func NewServerAndFactory(cells ...string) (*topo.Server, *Factory) { } // NewServer returns the new server -func NewServer(cells ...string) *topo.Server { - server, _ := NewServerAndFactory(cells...) +func NewServer(ctx context.Context, cells ...string) *topo.Server { + server, _ := NewServerAndFactory(ctx, cells...) return server } @@ -352,7 +348,3 @@ func (f *Factory) recursiveDelete(n *node) { f.recursiveDelete(parent) } } - -func init() { - rand.Seed(time.Now().UnixNano()) -} diff --git a/go/vt/topo/memorytopo/server_test.go b/go/vt/topo/memorytopo/server_test.go index 5bfa41c8a5e..c2d1cf6cfb5 100644 --- a/go/vt/topo/memorytopo/server_test.go +++ b/go/vt/topo/memorytopo/server_test.go @@ -17,6 +17,7 @@ limitations under the License. package memorytopo import ( + "context" "testing" "vitess.io/vitess/go/vt/topo" @@ -25,7 +26,9 @@ import ( func TestMemoryTopo(t *testing.T) { // Run the TopoServerTestSuite tests. - test.TopoServerTestSuite(t, func() *topo.Server { - return NewServer(test.LocalCellName) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + test.TopoServerTestSuite(t, ctx, func() *topo.Server { + return NewServer(ctx, test.LocalCellName) }, []string{"checkTryLock", "checkShardWithLock"}) } diff --git a/go/vt/topo/server.go b/go/vt/topo/server.go index 6dfcecdec34..1995e8b6ec4 100644 --- a/go/vt/topo/server.go +++ b/go/vt/topo/server.go @@ -174,7 +174,7 @@ var ( } FlagBinaries = []string{"vttablet", "vtctl", "vtctld", "vtcombo", "vtgate", - "vtgr", "vtorc", "vtbackup"} + "vtorc", "vtbackup"} ) func init() { @@ -240,10 +240,7 @@ func OpenServer(implementation, serverAddress, root string) (*Server, error) { // Open returns a Server using the command line parameter flags // for implementation, address and root. It log.Exits out if an error occurs. func Open() *Server { - if topoImplementation == "k8s" { - log.Warningf("The `k8stopo` is deprecated. We recommend using the `etcd2topo` instead. The `k8stopo` will be removed in Vitess 18.") - } - if topoGlobalServerAddress == "" && topoImplementation != "k8s" { + if topoGlobalServerAddress == "" { log.Exitf("topo_global_server_address must be configured") } if topoGlobalRoot == "" { diff --git a/go/vt/topo/shard.go b/go/vt/topo/shard.go index cd98325cd3c..183ed409bbb 100644 --- a/go/vt/topo/shard.go +++ b/go/vt/topo/shard.go @@ -27,11 +27,9 @@ import ( "sync" "time" - "google.golang.org/protobuf/proto" - - "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/event" @@ -189,12 +187,12 @@ func (si *ShardInfo) HasPrimary() bool { // GetPrimaryTermStartTime returns the shard's primary term start time as a Time value. func (si *ShardInfo) GetPrimaryTermStartTime() time.Time { - return logutil.ProtoToTime(si.Shard.PrimaryTermStartTime) + return protoutil.TimeFromProto(si.Shard.PrimaryTermStartTime).UTC() } // SetPrimaryTermStartTime sets the shard's primary term start time as a Time value. func (si *ShardInfo) SetPrimaryTermStartTime(t time.Time) { - si.Shard.PrimaryTermStartTime = logutil.TimeToProto(t) + si.Shard.PrimaryTermStartTime = protoutil.TimeToProto(t) } // GetShard is a high level function to read shard data. @@ -365,7 +363,7 @@ func (ts *Server) GetOrCreateShard(ctx context.Context, keyspace, shard string) // fully initialize and perform certain operations (e.g. // OnlineDDL or VReplication workflows) if they are using a // different sidecar database name. - ksi := topodatapb.Keyspace{SidecarDbName: sidecardb.GetName()} + ksi := topodatapb.Keyspace{SidecarDbName: sidecar.GetName()} if err = ts.CreateKeyspace(ctx, keyspace, &ksi); err != nil && !IsErrType(err, NodeExists) { return nil, vterrors.Wrapf(err, "CreateKeyspace(%v) failed", keyspace) } @@ -412,7 +410,7 @@ func (si *ShardInfo) GetTabletControl(tabletType topodatapb.TabletType) *topodat return nil } -// UpdateSourceDeniedTables will add or remove the listed tables +// UpdateDeniedTables will add or remove the listed tables // in the shard record's TabletControl structures. Note we don't // support a lot of the corner cases: // - only support one table list per shard. If we encounter a different @@ -421,7 +419,7 @@ func (si *ShardInfo) GetTabletControl(tabletType topodatapb.TabletType) *topodat // because it's not used in the same context (vertical vs horizontal sharding) // // This function should be called while holding the keyspace lock. -func (si *ShardInfo) UpdateSourceDeniedTables(ctx context.Context, tabletType topodatapb.TabletType, cells []string, remove bool, tables []string) error { +func (si *ShardInfo) UpdateDeniedTables(ctx context.Context, tabletType topodatapb.TabletType, cells []string, remove bool, tables []string) error { if err := CheckKeyspaceLocked(ctx, si.keyspace); err != nil { return err } @@ -633,7 +631,7 @@ func (ts *Server) FindAllTabletAliasesInShardByCell(ctx context.Context, keyspac } for _, a := range resultAsMap { - result = append(result, proto.Clone(a).(*topodatapb.TabletAlias)) + result = append(result, a.CloneVT()) } sort.Sort(topoproto.TabletAliasList(result)) return result, err diff --git a/go/vt/topo/shard_test.go b/go/vt/topo/shard_test.go index d0ec08f94ea..2c0b9082816 100644 --- a/go/vt/topo/shard_test.go +++ b/go/vt/topo/shard_test.go @@ -106,14 +106,14 @@ func lockedKeyspaceContext(keyspace string) context.Context { } func addToDenyList(ctx context.Context, si *ShardInfo, tabletType topodatapb.TabletType, cells, tables []string) error { - if err := si.UpdateSourceDeniedTables(ctx, tabletType, cells, false, tables); err != nil { + if err := si.UpdateDeniedTables(ctx, tabletType, cells, false, tables); err != nil { return err } return nil } func removeFromDenyList(ctx context.Context, si *ShardInfo, tabletType topodatapb.TabletType, cells, tables []string) error { - if err := si.UpdateSourceDeniedTables(ctx, tabletType, cells, true, tables); err != nil { + if err := si.UpdateDeniedTables(ctx, tabletType, cells, true, tables); err != nil { return err } return nil @@ -161,13 +161,13 @@ func TestUpdateSourceDeniedTables(t *testing.T) { // check we enforce the keyspace lock ctx := context.Background() - if err := si.UpdateSourceDeniedTables(ctx, topodatapb.TabletType_RDONLY, nil, false, nil); err == nil || err.Error() != "keyspace ks is not locked (no locksInfo)" { + if err := si.UpdateDeniedTables(ctx, topodatapb.TabletType_RDONLY, nil, false, nil); err == nil || err.Error() != "keyspace ks is not locked (no locksInfo)" { t.Fatalf("unlocked keyspace produced wrong error: %v", err) } ctx = lockedKeyspaceContext("ks") // add one cell - if err := si.UpdateSourceDeniedTables(ctx, topodatapb.TabletType_RDONLY, []string{"first"}, false, []string{"t1", "t2"}); err != nil || !reflect.DeepEqual(si.TabletControls, []*topodatapb.Shard_TabletControl{ + if err := si.UpdateDeniedTables(ctx, topodatapb.TabletType_RDONLY, []string{"first"}, false, []string{"t1", "t2"}); err != nil || !reflect.DeepEqual(si.TabletControls, []*topodatapb.Shard_TabletControl{ { TabletType: topodatapb.TabletType_RDONLY, Cells: []string{"first"}, @@ -178,20 +178,20 @@ func TestUpdateSourceDeniedTables(t *testing.T) { } // remove that cell, going back - if err := si.UpdateSourceDeniedTables(ctx, topodatapb.TabletType_RDONLY, []string{"first"}, true, nil); err != nil || len(si.TabletControls) != 0 { + if err := si.UpdateDeniedTables(ctx, topodatapb.TabletType_RDONLY, []string{"first"}, true, nil); err != nil || len(si.TabletControls) != 0 { t.Fatalf("going back should have remove the record: %v", si) } // re-add a cell, then another with different table list to // make sure it fails - if err := si.UpdateSourceDeniedTables(ctx, topodatapb.TabletType_RDONLY, []string{"first"}, false, []string{"t1", "t2"}); err != nil { + if err := si.UpdateDeniedTables(ctx, topodatapb.TabletType_RDONLY, []string{"first"}, false, []string{"t1", "t2"}); err != nil { t.Fatalf("one cell add failed: %v", si) } - if err := si.UpdateSourceDeniedTables(ctx, topodatapb.TabletType_RDONLY, []string{"second"}, false, []string{"t2", "t3"}); err == nil || err.Error() != "trying to use two different sets of denied tables for shard ks/sh: [t1 t2] and [t2 t3]" { + if err := si.UpdateDeniedTables(ctx, topodatapb.TabletType_RDONLY, []string{"second"}, false, []string{"t2", "t3"}); err == nil || err.Error() != "trying to use two different sets of denied tables for shard ks/sh: [t1 t2] and [t2 t3]" { t.Fatalf("different table list should fail: %v", err) } // add another cell, see the list grow - if err := si.UpdateSourceDeniedTables(ctx, topodatapb.TabletType_RDONLY, []string{"second"}, false, []string{"t1", "t2"}); err != nil || !reflect.DeepEqual(si.TabletControls, []*topodatapb.Shard_TabletControl{ + if err := si.UpdateDeniedTables(ctx, topodatapb.TabletType_RDONLY, []string{"second"}, false, []string{"t1", "t2"}); err != nil || !reflect.DeepEqual(si.TabletControls, []*topodatapb.Shard_TabletControl{ { TabletType: topodatapb.TabletType_RDONLY, Cells: []string{"first", "second"}, @@ -202,7 +202,7 @@ func TestUpdateSourceDeniedTables(t *testing.T) { } // add all cells, see the list grow to all - if err := si.UpdateSourceDeniedTables(ctx, topodatapb.TabletType_RDONLY, []string{"first", "second", "third"}, false, []string{"t1", "t2"}); err != nil || !reflect.DeepEqual(si.TabletControls, []*topodatapb.Shard_TabletControl{ + if err := si.UpdateDeniedTables(ctx, topodatapb.TabletType_RDONLY, []string{"first", "second", "third"}, false, []string{"t1", "t2"}); err != nil || !reflect.DeepEqual(si.TabletControls, []*topodatapb.Shard_TabletControl{ { TabletType: topodatapb.TabletType_RDONLY, Cells: []string{"first", "second", "third"}, @@ -213,7 +213,7 @@ func TestUpdateSourceDeniedTables(t *testing.T) { } // remove one cell from the full list - if err := si.UpdateSourceDeniedTables(ctx, topodatapb.TabletType_RDONLY, []string{"second"}, true, []string{"t1", "t2"}); err != nil || !reflect.DeepEqual(si.TabletControls, []*topodatapb.Shard_TabletControl{ + if err := si.UpdateDeniedTables(ctx, topodatapb.TabletType_RDONLY, []string{"second"}, true, []string{"t1", "t2"}); err != nil || !reflect.DeepEqual(si.TabletControls, []*topodatapb.Shard_TabletControl{ { TabletType: topodatapb.TabletType_RDONLY, Cells: []string{"first", "third"}, diff --git a/go/vt/topo/tablet.go b/go/vt/topo/tablet.go index b68a48b0223..d17235f6948 100644 --- a/go/vt/topo/tablet.go +++ b/go/vt/topo/tablet.go @@ -24,18 +24,15 @@ import ( "sync" "time" + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/vt/key" - "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/vterrors" - - "google.golang.org/protobuf/proto" - "vitess.io/vitess/go/event" "vitess.io/vitess/go/netutil" "vitess.io/vitess/go/trace" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo/events" @@ -215,7 +212,7 @@ func (ti *TabletInfo) IsReplicaType() bool { // GetPrimaryTermStartTime returns the tablet's primary term start time as a Time value. func (ti *TabletInfo) GetPrimaryTermStartTime() time.Time { - return logutil.ProtoToTime(ti.Tablet.PrimaryTermStartTime) + return protoutil.TimeFromProto(ti.Tablet.PrimaryTermStartTime).UTC() } // NewTabletInfo returns a TabletInfo basing on tablet with the @@ -586,7 +583,7 @@ func (ts *Server) InitTablet(ctx context.Context, tablet *topodatapb.Tablet, all if tablet.Type == topodatapb.TabletType_PRIMARY { // we update primary_term_start_time even if the primary hasn't changed // because that means a new primary term with the same primary - tablet.PrimaryTermStartTime = logutil.TimeToProto(time.Now()) + tablet.PrimaryTermStartTime = protoutil.TimeToProto(time.Now()) } err = ts.CreateTablet(ctx, tablet) @@ -602,7 +599,7 @@ func (ts *Server) InitTablet(ctx context.Context, tablet *topodatapb.Tablet, all if oldTablet.Keyspace != tablet.Keyspace || oldTablet.Shard != tablet.Shard { return fmt.Errorf("old tablet has shard %v/%v. Cannot override with shard %v/%v. Delete and re-add tablet if you want to change the tablet's keyspace/shard", oldTablet.Keyspace, oldTablet.Shard, tablet.Keyspace, tablet.Shard) } - oldTablet.Tablet = proto.Clone(tablet).(*topodatapb.Tablet) + oldTablet.Tablet = tablet.CloneVT() if err := ts.UpdateTablet(ctx, oldTablet); err != nil { return fmt.Errorf("failed updating tablet %v: %v", topoproto.TabletAliasString(tablet.Alias), err) } diff --git a/go/vt/topo/test/directory.go b/go/vt/topo/test/directory.go index b33404f8643..88001a56d87 100644 --- a/go/vt/topo/test/directory.go +++ b/go/vt/topo/test/directory.go @@ -17,18 +17,15 @@ limitations under the License. package test import ( + "context" "reflect" "testing" - "context" - "vitess.io/vitess/go/vt/topo" ) // checkDirectory tests the directory part of the topo.Conn API. -func checkDirectory(t *testing.T, ts *topo.Server) { - ctx := context.Background() - +func checkDirectory(t *testing.T, ctx context.Context, ts *topo.Server) { // global cell t.Logf("=== checkDirectoryInCell global") conn, err := ts.ConnForCell(ctx, topo.GlobalCell) diff --git a/go/vt/topo/test/election.go b/go/vt/topo/test/election.go index 594e6562eb2..377ea21ae09 100644 --- a/go/vt/topo/test/election.go +++ b/go/vt/topo/test/election.go @@ -46,8 +46,8 @@ func waitForLeaderID(t *testing.T, mp topo.LeaderParticipation, expected string) // checkElection runs the tests on the LeaderParticipation part of the // topo.Conn API. -func checkElection(t *testing.T, ts *topo.Server) { - conn, err := ts.ConnForCell(context.Background(), topo.GlobalCell) +func checkElection(t *testing.T, ctx context.Context, ts *topo.Server) { + conn, err := ts.ConnForCell(ctx, topo.GlobalCell) if err != nil { t.Fatalf("ConnForCell(global) failed: %v", err) } @@ -71,7 +71,7 @@ func checkElection(t *testing.T, ts *topo.Server) { // A lot of implementations use a toplevel directory for their elections. // Make sure it is marked as 'Ephemeral'. - entries, err := conn.ListDir(context.Background(), "/", true /*full*/) + entries, err := conn.ListDir(ctx, "/", true /*full*/) if err != nil { t.Fatalf("ListDir(/) failed: %v", err) } @@ -148,8 +148,8 @@ func checkElection(t *testing.T, ts *topo.Server) { } // checkWaitForNewLeader runs the WaitForLeadership test on the LeaderParticipation -func checkWaitForNewLeader(t *testing.T, ts *topo.Server) { - conn, err := ts.ConnForCell(context.Background(), topo.GlobalCell) +func checkWaitForNewLeader(t *testing.T, ctx context.Context, ts *topo.Server) { + conn, err := ts.ConnForCell(ctx, topo.GlobalCell) if err != nil { t.Fatalf("ConnForCell(global) failed: %v", err) } @@ -195,7 +195,7 @@ func checkWaitForNewLeader(t *testing.T, ts *topo.Server) { t.Fatalf("cannot create mp2: %v", err) } - leaders, err := mp2.WaitForNewLeader(context.Background()) + leaders, err := mp2.WaitForNewLeader(ctx) if topo.IsErrType(err, topo.NoImplementation) { t.Logf("%T does not support WaitForNewLeader()", mp2) return diff --git a/go/vt/topo/test/file.go b/go/vt/topo/test/file.go index 8e5858d17a3..70bb386fd80 100644 --- a/go/vt/topo/test/file.go +++ b/go/vt/topo/test/file.go @@ -26,9 +26,7 @@ import ( ) // checkFile tests the file part of the Conn API. -func checkFile(t *testing.T, ts *topo.Server) { - ctx := context.Background() - +func checkFile(t *testing.T, ctx context.Context, ts *topo.Server) { // global cell t.Logf("=== checkFileInCell global") conn, err := ts.ConnForCell(ctx, topo.GlobalCell) @@ -203,8 +201,7 @@ func checkFileInCell(t *testing.T, conn topo.Conn, hasCells bool) { } // checkList tests the file part of the Conn API. -func checkList(t *testing.T, ts *topo.Server) { - ctx := context.Background() +func checkList(t *testing.T, ctx context.Context, ts *topo.Server) { // global cell conn, err := ts.ConnForCell(ctx, LocalCellName) if err != nil { diff --git a/go/vt/topo/test/keyspace.go b/go/vt/topo/test/keyspace.go index c5b9af68009..0458e7fd2d7 100644 --- a/go/vt/topo/test/keyspace.go +++ b/go/vt/topo/test/keyspace.go @@ -17,9 +17,8 @@ limitations under the License. package test import ( - "testing" - "context" + "testing" "vitess.io/vitess/go/vt/topo" @@ -27,8 +26,7 @@ import ( ) // checkKeyspace tests the keyspace part of the API -func checkKeyspace(t *testing.T, ts *topo.Server) { - ctx := context.Background() +func checkKeyspace(t *testing.T, ctx context.Context, ts *topo.Server) { keyspaces, err := ts.GetKeyspaces(ctx) if err != nil { t.Errorf("GetKeyspaces(empty): %v", err) diff --git a/go/vt/topo/test/lock.go b/go/vt/topo/test/lock.go index 69cdeff2a55..dce51ed859d 100644 --- a/go/vt/topo/test/lock.go +++ b/go/vt/topo/test/lock.go @@ -17,12 +17,11 @@ limitations under the License. package test import ( + "context" "path" "testing" "time" - "context" - "vitess.io/vitess/go/vt/topo" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -35,8 +34,7 @@ var timeUntilLockIsTaken = 10 * time.Millisecond // checkLock checks we can lock / unlock as expected. It's using a keyspace // as the lock target. -func checkLock(t *testing.T, ts *topo.Server) { - ctx := context.Background() +func checkLock(t *testing.T, ctx context.Context, ts *topo.Server) { if err := ts.CreateKeyspace(ctx, "test_keyspace", &topodatapb.Keyspace{}); err != nil { t.Fatalf("CreateKeyspace: %v", err) } diff --git a/go/vt/topo/test/replication.go b/go/vt/topo/test/replication.go index e681749f68d..3080cb77145 100644 --- a/go/vt/topo/test/replication.go +++ b/go/vt/topo/test/replication.go @@ -17,20 +17,18 @@ limitations under the License. package test import ( + "context" "testing" "google.golang.org/protobuf/proto" - "context" - "vitess.io/vitess/go/vt/topo" topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) // checkShardReplication tests ShardReplication objects -func checkShardReplication(t *testing.T, ts *topo.Server) { - ctx := context.Background() +func checkShardReplication(t *testing.T, ctx context.Context, ts *topo.Server) { if _, err := ts.GetShardReplication(ctx, LocalCellName, "test_keyspace", "-10"); !topo.IsErrType(err, topo.NoNode) { t.Errorf("GetShardReplication(not there): %v", err) } diff --git a/go/vt/topo/test/serving.go b/go/vt/topo/test/serving.go index dfeac442180..dd00f3da370 100644 --- a/go/vt/topo/test/serving.go +++ b/go/vt/topo/test/serving.go @@ -17,12 +17,11 @@ limitations under the License. package test import ( + "context" "testing" "google.golang.org/protobuf/proto" - "context" - "vitess.io/vitess/go/vt/topo" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -30,9 +29,7 @@ import ( ) // checkSrvKeyspace tests the SrvKeyspace methods (other than watch). -func checkSrvKeyspace(t *testing.T, ts *topo.Server) { - ctx := context.Background() - +func checkSrvKeyspace(t *testing.T, ctx context.Context, ts *topo.Server) { // Test GetSrvKeyspaceNames returns an empty list correctly. if names, err := ts.GetSrvKeyspaceNames(ctx, LocalCellName); err != nil || len(names) != 0 { t.Errorf("GetSrvKeyspace(not there): %v %v", names, err) @@ -91,9 +88,7 @@ func checkSrvKeyspace(t *testing.T, ts *topo.Server) { } // checkSrvVSchema tests the SrvVSchema methods (other than watch). -func checkSrvVSchema(t *testing.T, ts *topo.Server) { - ctx := context.Background() - +func checkSrvVSchema(t *testing.T, ctx context.Context, ts *topo.Server) { // check GetSrvVSchema returns topo.ErrNoNode if no SrvVSchema if _, err := ts.GetSrvVSchema(ctx, LocalCellName); !topo.IsErrType(err, topo.NoNode) { t.Errorf("GetSrvVSchema(not set): %v", err) diff --git a/go/vt/topo/test/shard.go b/go/vt/topo/test/shard.go index d285f382838..b5c92c4a3ec 100644 --- a/go/vt/topo/test/shard.go +++ b/go/vt/topo/test/shard.go @@ -31,8 +31,7 @@ import ( ) // checkShard verifies the Shard operations work correctly -func checkShard(t *testing.T, ts *topo.Server) { - ctx := context.Background() +func checkShard(t *testing.T, ctx context.Context, ts *topo.Server) { if err := ts.CreateKeyspace(ctx, "test_keyspace", &topodatapb.Keyspace{}); err != nil { t.Fatalf("CreateKeyspace: %v", err) } @@ -100,8 +99,7 @@ func checkShard(t *testing.T, ts *topo.Server) { // checkShardWithLock verifies that `TryLockShard` will keep failing with `NodeExists` error if there is // a lock already taken for given shard. Once we unlock that shard, then subsequent call to `TryLockShard` // should succeed. -func checkShardWithLock(t *testing.T, ts *topo.Server) { - ctx := context.Background() +func checkShardWithLock(t *testing.T, ctx context.Context, ts *topo.Server) { if err := ts.CreateKeyspace(ctx, "test_keyspace", &topodatapb.Keyspace{}); err != nil { t.Fatalf("CreateKeyspace: %v", err) } diff --git a/go/vt/topo/test/tablet.go b/go/vt/topo/test/tablet.go index 4a562b1e46a..63afc1abff0 100644 --- a/go/vt/topo/test/tablet.go +++ b/go/vt/topo/test/tablet.go @@ -17,21 +17,18 @@ limitations under the License. package test import ( + "context" "testing" "google.golang.org/protobuf/proto" - "context" - "vitess.io/vitess/go/vt/topo" topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) // checkTablet verifies the topo server API is correct for managing tablets. -func checkTablet(t *testing.T, ts *topo.Server) { - ctx := context.Background() - +func checkTablet(t *testing.T, ctx context.Context, ts *topo.Server) { tablet := &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ Cell: LocalCellName, diff --git a/go/vt/topo/test/testing.go b/go/vt/topo/test/testing.go index e8d014242ad..d189a7f4cf1 100644 --- a/go/vt/topo/test/testing.go +++ b/go/vt/topo/test/testing.go @@ -22,6 +22,7 @@ limitations under the License. package test import ( + "context" "testing" "vitess.io/vitess/go/vt/topo" @@ -40,7 +41,7 @@ func newKeyRange(value string) *topodatapb.KeyRange { return result } -func executeTestSuite(f func(*testing.T, *topo.Server), t *testing.T, ts *topo.Server, ignoreList []string, name string) { +func executeTestSuite(f func(*testing.T, context.Context, *topo.Server), t *testing.T, ctx context.Context, ts *topo.Server, ignoreList []string, name string) { // some test does not apply every where therefore we ignore them for _, n := range ignoreList { if n == name { @@ -48,7 +49,7 @@ func executeTestSuite(f func(*testing.T, *topo.Server), t *testing.T, ts *topo.S return } } - f(t, ts) + f(t, ctx, ts) } // TopoServerTestSuite runs the full topo.Server/Conn test suite. @@ -57,101 +58,101 @@ func executeTestSuite(f func(*testing.T, *topo.Server), t *testing.T, ts *topo.S // Not all tests are applicable for each Topo server, therefore we provide ignoreList in order to // avoid them for given Topo server tests. For example `TryLock` implementation is same as `Lock` for some Topo servers. // Hence, for these Topo servers we ignore executing TryLock Tests. -func TopoServerTestSuite(t *testing.T, factory func() *topo.Server, ignoreList []string) { +func TopoServerTestSuite(t *testing.T, ctx context.Context, factory func() *topo.Server, ignoreList []string) { var ts *topo.Server t.Log("=== checkKeyspace") ts = factory() - executeTestSuite(checkKeyspace, t, ts, ignoreList, "checkKeyspace") + executeTestSuite(checkKeyspace, t, ctx, ts, ignoreList, "checkKeyspace") ts.Close() t.Log("=== checkShard") ts = factory() - executeTestSuite(checkShard, t, ts, ignoreList, "checkShard") + executeTestSuite(checkShard, t, ctx, ts, ignoreList, "checkShard") ts.Close() t.Log("=== checkShardWithLock") ts = factory() - executeTestSuite(checkShardWithLock, t, ts, ignoreList, "checkShardWithLock") + executeTestSuite(checkShardWithLock, t, ctx, ts, ignoreList, "checkShardWithLock") ts.Close() t.Log("=== checkTablet") ts = factory() - executeTestSuite(checkTablet, t, ts, ignoreList, "checkTablet") + executeTestSuite(checkTablet, t, ctx, ts, ignoreList, "checkTablet") ts.Close() t.Log("=== checkShardReplication") ts = factory() - executeTestSuite(checkShardReplication, t, ts, ignoreList, "checkShardReplication") + executeTestSuite(checkShardReplication, t, ctx, ts, ignoreList, "checkShardReplication") ts.Close() t.Log("=== checkSrvKeyspace") ts = factory() - executeTestSuite(checkSrvKeyspace, t, ts, ignoreList, "checkSrvKeyspace") + executeTestSuite(checkSrvKeyspace, t, ctx, ts, ignoreList, "checkSrvKeyspace") ts.Close() t.Log("=== checkSrvVSchema") ts = factory() - executeTestSuite(checkSrvVSchema, t, ts, ignoreList, "checkSrvVSchema") + executeTestSuite(checkSrvVSchema, t, ctx, ts, ignoreList, "checkSrvVSchema") ts.Close() t.Log("=== checkLock") ts = factory() - executeTestSuite(checkLock, t, ts, ignoreList, "checkLock") + executeTestSuite(checkLock, t, ctx, ts, ignoreList, "checkLock") ts.Close() t.Log("=== checkTryLock") ts = factory() - executeTestSuite(checkTryLock, t, ts, ignoreList, "checkTryLock") + executeTestSuite(checkTryLock, t, ctx, ts, ignoreList, "checkTryLock") ts.Close() t.Log("=== checkVSchema") ts = factory() - executeTestSuite(checkVSchema, t, ts, ignoreList, "checkVSchema") + executeTestSuite(checkVSchema, t, ctx, ts, ignoreList, "checkVSchema") ts.Close() t.Log("=== checkRoutingRules") ts = factory() - executeTestSuite(checkRoutingRules, t, ts, ignoreList, "checkRoutingRules") + executeTestSuite(checkRoutingRules, t, ctx, ts, ignoreList, "checkRoutingRules") ts.Close() t.Log("=== checkElection") ts = factory() - executeTestSuite(checkElection, t, ts, ignoreList, "checkElection") + executeTestSuite(checkElection, t, ctx, ts, ignoreList, "checkElection") ts.Close() t.Log("=== checkWaitForNewLeader") ts = factory() - executeTestSuite(checkWaitForNewLeader, t, ts, ignoreList, "checkWaitForNewLeader") + executeTestSuite(checkWaitForNewLeader, t, ctx, ts, ignoreList, "checkWaitForNewLeader") ts.Close() t.Log("=== checkDirectory") ts = factory() - executeTestSuite(checkDirectory, t, ts, ignoreList, "checkDirectory") + executeTestSuite(checkDirectory, t, ctx, ts, ignoreList, "checkDirectory") ts.Close() t.Log("=== checkFile") ts = factory() - executeTestSuite(checkFile, t, ts, ignoreList, "checkFile") + executeTestSuite(checkFile, t, ctx, ts, ignoreList, "checkFile") ts.Close() t.Log("=== checkWatch") ts = factory() - executeTestSuite(checkWatch, t, ts, ignoreList, "checkWatch") + executeTestSuite(checkWatch, t, ctx, ts, ignoreList, "checkWatch") ts.Close() ts = factory() t.Log("=== checkWatchInterrupt") - executeTestSuite(checkWatchInterrupt, t, ts, ignoreList, "checkWatchInterrupt") + executeTestSuite(checkWatchInterrupt, t, ctx, ts, ignoreList, "checkWatchInterrupt") ts.Close() ts = factory() t.Log("=== checkList") - executeTestSuite(checkList, t, ts, ignoreList, "checkList") + executeTestSuite(checkList, t, ctx, ts, ignoreList, "checkList") ts.Close() ts = factory() t.Log("=== checkWatchRecursive") - executeTestSuite(checkWatchRecursive, t, ts, ignoreList, "checkWatchRecursive") + executeTestSuite(checkWatchRecursive, t, ctx, ts, ignoreList, "checkWatchRecursive") ts.Close() } diff --git a/go/vt/topo/test/trylock.go b/go/vt/topo/test/trylock.go index cace3cccc61..4519d1bcaab 100644 --- a/go/vt/topo/test/trylock.go +++ b/go/vt/topo/test/trylock.go @@ -31,8 +31,7 @@ import ( // checkTryLock checks if we can lock / unlock as expected. It's using a keyspace // as the lock target. -func checkTryLock(t *testing.T, ts *topo.Server) { - ctx := context.Background() +func checkTryLock(t *testing.T, ctx context.Context, ts *topo.Server) { if err := ts.CreateKeyspace(ctx, "test_keyspace", &topodatapb.Keyspace{}); err != nil { require.Fail(t, "CreateKeyspace fail", err.Error()) } diff --git a/go/vt/topo/test/vschema.go b/go/vt/topo/test/vschema.go index 0c2d58bdba7..5063addaefd 100644 --- a/go/vt/topo/test/vschema.go +++ b/go/vt/topo/test/vschema.go @@ -17,9 +17,8 @@ limitations under the License. package test import ( - "testing" - "context" + "testing" "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" @@ -31,8 +30,7 @@ import ( ) // checkVSchema runs the tests on the VSchema part of the API -func checkVSchema(t *testing.T, ts *topo.Server) { - ctx := context.Background() +func checkVSchema(t *testing.T, ctx context.Context, ts *topo.Server) { if err := ts.CreateKeyspace(ctx, "test_keyspace", &topodatapb.Keyspace{}); err != nil { t.Fatalf("CreateKeyspace: %v", err) } @@ -92,9 +90,7 @@ func checkVSchema(t *testing.T, ts *topo.Server) { } // checkRoutingRules runs the tests on the routing rules part of the API -func checkRoutingRules(t *testing.T, ts *topo.Server) { - ctx := context.Background() - +func checkRoutingRules(t *testing.T, ctx context.Context, ts *topo.Server) { if _, err := ts.GetRoutingRules(ctx); err != nil { t.Fatal(err) } diff --git a/go/vt/topo/test/watch.go b/go/vt/topo/test/watch.go index 08dec8cd56f..a4caaaf742d 100644 --- a/go/vt/topo/test/watch.go +++ b/go/vt/topo/test/watch.go @@ -114,8 +114,8 @@ func waitForInitialValueRecursive(t *testing.T, conn topo.Conn, srvKeyspace *top // checkWatch runs the tests on the Watch part of the Conn API. // We use a SrvKeyspace object. -func checkWatch(t *testing.T, ts *topo.Server) { - ctx, cancel := context.WithCancel(context.Background()) +func checkWatch(t *testing.T, ctx context.Context, ts *topo.Server) { + ctx, cancel := context.WithCancel(ctx) defer cancel() conn, err := ts.ConnForCell(ctx, LocalCellName) if err != nil { @@ -227,8 +227,7 @@ func checkWatch(t *testing.T, ts *topo.Server) { } // checkWatchInterrupt tests we can interrupt a watch. -func checkWatchInterrupt(t *testing.T, ts *topo.Server) { - ctx := context.Background() +func checkWatchInterrupt(t *testing.T, ctx context.Context, ts *topo.Server) { conn, err := ts.ConnForCell(ctx, LocalCellName) if err != nil { t.Fatalf("ConnForCell(test) failed: %v", err) @@ -298,8 +297,8 @@ func checkWatchInterrupt(t *testing.T, ts *topo.Server) { } // checkWatchRecursive tests we can setup a recursive watch -func checkWatchRecursive(t *testing.T, ts *topo.Server) { - ctx, cancel := context.WithCancel(context.Background()) +func checkWatchRecursive(t *testing.T, ctx context.Context, ts *topo.Server) { + ctx, cancel := context.WithCancel(ctx) defer cancel() conn, err := ts.ConnForCell(ctx, LocalCellName) if err != nil { diff --git a/go/vt/topo/topoproto/tablet.go b/go/vt/topo/topoproto/tablet.go index 880637ad8d6..21d174c7a82 100644 --- a/go/vt/topo/topoproto/tablet.go +++ b/go/vt/topo/topoproto/tablet.go @@ -186,6 +186,9 @@ func ParseTabletType(param string) (topodatapb.TabletType, error) { // ParseTabletTypes parses a comma separated list of tablet types and returns a slice with the respective enums. func ParseTabletTypes(param string) ([]topodatapb.TabletType, error) { var tabletTypes []topodatapb.TabletType + if param == "" { + return tabletTypes, nil + } for _, typeStr := range strings.Split(param, ",") { t, err := ParseTabletType(typeStr) if err != nil { @@ -227,6 +230,21 @@ func MakeStringTypeList(types []topodatapb.TabletType) []string { return strs } +// MakeStringTypeUnsortedList returns a list of strings that match the input +// without modifying the order in the list. +func MakeStringTypeUnsortedList(types []topodatapb.TabletType) []string { + strs := make([]string, len(types)) + for i, t := range types { + strs[i] = strings.ToLower(t.String()) + } + return strs +} + +// MakeStringTypeCSV returns the tablet types in CSV format. +func MakeStringTypeCSV(types []topodatapb.TabletType) string { + return strings.Join(MakeStringTypeUnsortedList(types), ",") +} + // MakeUniqueStringTypeList returns a unique list of strings that match // the input list -- with duplicate types removed. // This is needed as some types are aliases for others, like BATCH and diff --git a/go/vt/topo/topotests/cell_info_test.go b/go/vt/topo/topotests/cell_info_test.go index 89ad68043a1..becdbd8d14a 100644 --- a/go/vt/topo/topotests/cell_info_test.go +++ b/go/vt/topo/topotests/cell_info_test.go @@ -36,8 +36,10 @@ import ( func TestCellInfo(t *testing.T) { cell := "cell1" - ctx := context.Background() - ts := memorytopo.NewServer(cell) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cell) + defer ts.Close() // Check GetCellInfo returns what memorytopo created. ci, err := ts.GetCellInfo(ctx, cell, true /*strongRead*/) @@ -135,7 +137,8 @@ func TestCellInfo(t *testing.T) { } func TestExpandCells(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() var cells []string var err error var allCells = "cell1,cell2,cell3" @@ -162,10 +165,12 @@ func TestExpandCells(t *testing.T) { topoCells := strings.Split(cellsIn, ",") var ts *topo.Server if tCase.name == "bad" { - ts = memorytopo.NewServer() + ts = memorytopo.NewServer(ctx) } else { - ts = memorytopo.NewServer(topoCells...) + ts = memorytopo.NewServer(ctx, topoCells...) } + defer ts.Close() + cells, err = ts.ExpandCells(ctx, cellsIn) if tCase.errString != "" { require.Error(t, err) @@ -179,7 +184,7 @@ func TestExpandCells(t *testing.T) { t.Run("aliases", func(t *testing.T) { cells := []string{"cell1", "cell2", "cell3"} - ts := memorytopo.NewServer(cells...) + ts := memorytopo.NewServer(ctx, cells...) err := ts.CreateCellsAlias(ctx, "alias", &topodatapb.CellsAlias{Cells: cells}) require.NoError(t, err) @@ -228,8 +233,10 @@ func TestExpandCells(t *testing.T) { } func TestDeleteCellInfo(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("zone1", "unreachable") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1", "unreachable") + defer ts.Close() err := ts.UpdateCellInfoFields(ctx, "unreachable", func(ci *topodatapb.CellInfo) error { ci.ServerAddress = memorytopo.UnreachableServerAddr @@ -254,11 +261,11 @@ func TestDeleteCellInfo(t *testing.T) { }, } for _, tt := range tests { - func() { - ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) - defer cancel() + t.Run(fmt.Sprintf("force:%t", tt.force), func(t *testing.T) { + requestCtx, requestCancel := context.WithTimeout(ctx, 10*time.Millisecond) + defer requestCancel() - err := ts.DeleteCellInfo(ctx, "unreachable", tt.force) + err := ts.DeleteCellInfo(requestCtx, "unreachable", tt.force) if tt.shouldErr { assert.Error(t, err, "force=%t", tt.force) } else { @@ -272,6 +279,6 @@ func TestDeleteCellInfo(t *testing.T) { } else { assert.True(t, topo.IsErrType(err, topo.NoNode), "expected cell %q to not exist", "unreachable") } - }() + }) } } diff --git a/go/vt/topo/topotests/cells_aliases_test.go b/go/vt/topo/topotests/cells_aliases_test.go index d124dcd8d47..7b8f0ebe3f5 100644 --- a/go/vt/topo/topotests/cells_aliases_test.go +++ b/go/vt/topo/topotests/cells_aliases_test.go @@ -33,8 +33,10 @@ func TestCellsAliases(t *testing.T) { // Create an alias cell := "cell1" - ctx := context.Background() - ts := memorytopo.NewServer(cell) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cell) + defer ts.Close() if err := ts.CreateCellsAlias(ctx, "alias", &topodatapb.CellsAlias{Cells: []string{"cell1", "cell2"}}); err != nil { t.Fatalf("CreateCellsAlias failed: %v", err) diff --git a/go/vt/topo/topotests/keyspace_test.go b/go/vt/topo/topotests/keyspace_test.go index b0b35c1421c..96eb9938353 100644 --- a/go/vt/topo/topotests/keyspace_test.go +++ b/go/vt/topo/topotests/keyspace_test.go @@ -31,8 +31,10 @@ import ( ) func TestCreateKeyspace(t *testing.T) { - ts := memorytopo.NewServer("zone1") - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() t.Run("valid name", func(t *testing.T) { err := ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{}) @@ -46,8 +48,10 @@ func TestCreateKeyspace(t *testing.T) { } func TestGetKeyspace(t *testing.T) { - ts := memorytopo.NewServer("zone1") - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() t.Run("valid name", func(t *testing.T) { // First, create the keyspace. diff --git a/go/vt/topo/topotests/replication_test.go b/go/vt/topo/topotests/replication_test.go index 0d6e148fa3d..d45aaf36551 100644 --- a/go/vt/topo/topotests/replication_test.go +++ b/go/vt/topo/topotests/replication_test.go @@ -37,8 +37,10 @@ func TestFixShardReplication(t *testing.T) { cell := "cell1" keyspace := "ks1" shard := "shard1" - ctx := context.Background() - ts := memorytopo.NewServer(cell) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cell) + defer ts.Close() // Create a tablet. alias := &topodatapb.TabletAlias{ diff --git a/go/vt/topo/topotests/shard_watch_test.go b/go/vt/topo/topotests/shard_watch_test.go index a8333251a2d..80b696c106d 100644 --- a/go/vt/topo/topotests/shard_watch_test.go +++ b/go/vt/topo/topotests/shard_watch_test.go @@ -56,8 +56,10 @@ func waitForInitialShard(t *testing.T, ts *topo.Server, keyspace, shard string) func TestWatchShardNoNode(t *testing.T) { keyspace := "ks1" shard := "0" - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") + defer ts.Close() // No Shard -> ErrNoNode _, _, err := ts.WatchShard(ctx, keyspace, shard) @@ -70,8 +72,10 @@ func TestWatchShard(t *testing.T) { cell := "cell1" keyspace := "ks1" shard := "0" - ctx := context.Background() - ts := memorytopo.NewServer(cell) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cell) + defer ts.Close() // Create keyspace if err := ts.CreateKeyspace(ctx, keyspace, &topodatapb.Keyspace{}); err != nil { @@ -205,8 +209,10 @@ func TestWatchShardCancel(t *testing.T) { cell := "cell1" keyspace := "ks1" shard := "0" - ctx := context.Background() - ts := memorytopo.NewServer(cell) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cell) + defer ts.Close() // No Shard -> ErrNoNode _, _, err := ts.WatchShard(ctx, keyspace, shard) diff --git a/go/vt/topo/topotests/srv_keyspace_test.go b/go/vt/topo/topotests/srv_keyspace_test.go index 8eeaf3f07ac..97e44e3a82a 100644 --- a/go/vt/topo/topotests/srv_keyspace_test.go +++ b/go/vt/topo/topotests/srv_keyspace_test.go @@ -62,8 +62,10 @@ func waitForInitialSrvKeyspace(t *testing.T, ts *topo.Server, cell, keyspace str func TestWatchSrvKeyspaceNoNode(t *testing.T) { cell := "cell1" keyspace := "ks1" - ctx := context.Background() - ts := memorytopo.NewServer(cell) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cell) + defer ts.Close() // No SrvKeyspace -> ErrNoNode _, _, err := ts.WatchSrvKeyspace(ctx, cell, keyspace) @@ -76,8 +78,10 @@ func TestWatchSrvKeyspace(t *testing.T) { cell := "cell1" keyspace := "ks1" - ctx := context.Background() - ts := memorytopo.NewServer(cell) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cell) + defer ts.Close() // Create initial value if err := ts.UpdateSrvKeyspace(ctx, cell, keyspace, &topodatapb.SrvKeyspace{}); err != nil { @@ -175,8 +179,10 @@ func TestWatchSrvKeyspace(t *testing.T) { func TestWatchSrvKeyspaceCancel(t *testing.T) { cell := "cell1" keyspace := "ks1" - ctx := context.Background() - ts := memorytopo.NewServer(cell) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cell) + defer ts.Close() // No SrvKeyspace -> ErrNoNode _, _, err := ts.WatchSrvKeyspace(ctx, cell, keyspace) @@ -223,8 +229,10 @@ func TestUpdateSrvKeyspacePartitions(t *testing.T) { cell := "cell1" cell2 := "cell2" keyspace := "ks1" - ctx := context.Background() - ts := memorytopo.NewServer(cell, cell2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cell, cell2) + defer ts.Close() keyRange, err := key.ParseShardingSpec("-") if err != nil || len(keyRange) != 1 { @@ -464,8 +472,10 @@ func TestUpdateUpdateDisableQueryService(t *testing.T) { cell := "cell1" cell2 := "cell2" keyspace := "ks1" - ctx := context.Background() - ts := memorytopo.NewServer(cell, cell2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cell, cell2) + defer ts.Close() leftKeyRange, err := key.ParseShardingSpec("-80") if err != nil || len(leftKeyRange) != 1 { @@ -656,8 +666,10 @@ func TestGetShardServingTypes(t *testing.T) { cell := "cell1" cell2 := "cell2" keyspace := "ks1" - ctx := context.Background() - ts := memorytopo.NewServer(cell, cell2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cell, cell2) + defer ts.Close() leftKeyRange, err := key.ParseShardingSpec("-80") if err != nil || len(leftKeyRange) != 1 { @@ -764,8 +776,10 @@ func TestGetShardServingCells(t *testing.T) { cell := "cell1" cell2 := "cell2" keyspace := "ks1" - ctx := context.Background() - ts := memorytopo.NewServer(cell, cell2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cell, cell2) + defer ts.Close() leftKeyRange, err := key.ParseShardingSpec("-80") if err != nil || len(leftKeyRange) != 1 { @@ -868,8 +882,10 @@ func TestMasterMigrateServedType(t *testing.T) { cell := "cell1" cell2 := "cell2" keyspace := "ks1" - ctx := context.Background() - ts := memorytopo.NewServer(cell, cell2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cell, cell2) + defer ts.Close() initialKeyRange, err := key.ParseShardingSpec("-") if err != nil || len(initialKeyRange) != 1 { @@ -1152,8 +1168,10 @@ func TestValidateSrvKeyspace(t *testing.T) { cell := "cell1" cell2 := "cell2" keyspace := "ks1" - ctx := context.Background() - ts := memorytopo.NewServer(cell, cell2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cell, cell2) + defer ts.Close() leftKeyRange, err := key.ParseShardingSpec("-80") if err != nil || len(leftKeyRange) != 1 { diff --git a/go/vt/topo/topotests/srv_vschema_test.go b/go/vt/topo/topotests/srv_vschema_test.go index 73745854ae1..85a2d65c4ec 100644 --- a/go/vt/topo/topotests/srv_vschema_test.go +++ b/go/vt/topo/topotests/srv_vschema_test.go @@ -28,7 +28,6 @@ import ( ) func TestRebuildVSchema(t *testing.T) { - ctx := context.Background() emptySrvVSchema := &vschemapb.SrvVSchema{ RoutingRules: &vschemapb.RoutingRules{}, ShardRoutingRules: &vschemapb.ShardRoutingRules{}, @@ -36,7 +35,10 @@ func TestRebuildVSchema(t *testing.T) { // Set up topology. cells := []string{"cell1", "cell2"} - ts := memorytopo.NewServer(cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cells...) + defer ts.Close() // Rebuild with no keyspace / no vschema if err := ts.RebuildSrvVSchema(ctx, cells); err != nil { diff --git a/go/vt/topo/topotests/tablet_test.go b/go/vt/topo/topotests/tablet_test.go index e59b4c6d060..96bcdba1ae5 100644 --- a/go/vt/topo/topotests/tablet_test.go +++ b/go/vt/topo/topotests/tablet_test.go @@ -34,8 +34,10 @@ func TestCreateTablet(t *testing.T) { cell := "cell1" keyspace := "ks1" shard := "shard1" - ctx := context.Background() - ts := memorytopo.NewServer(cell) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cell) + defer ts.Close() // Create a tablet. alias := &topodatapb.TabletAlias{ diff --git a/go/vt/topo/topotests/wildcards_test.go b/go/vt/topo/topotests/wildcards_test.go index a87992b28de..d373a91b686 100644 --- a/go/vt/topo/topotests/wildcards_test.go +++ b/go/vt/topo/topotests/wildcards_test.go @@ -49,10 +49,12 @@ func (l *topoLayout) initTopo(t *testing.T, ts *topo.Server) { } func validateKeyspaceWildcard(t *testing.T, l *topoLayout, param string, expected []string) { - ts := memorytopo.NewServer() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx) + defer ts.Close() l.initTopo(t, ts) - ctx := context.Background() r, err := ts.ResolveKeyspaceWildcard(ctx, param) if err != nil { if expected != nil { @@ -85,10 +87,12 @@ func TestResolveKeyspaceWildcard(t *testing.T) { } func validateShardWildcard(t *testing.T, l *topoLayout, param string, expected []topo.KeyspaceShard) { - ts := memorytopo.NewServer() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx) + defer ts.Close() l.initTopo(t, ts) - ctx := context.Background() r, err := ts.ResolveShardWildcard(ctx, param) if err != nil { if expected != nil { @@ -181,10 +185,12 @@ func TestResolveShardWildcard(t *testing.T) { } func validateWildcards(t *testing.T, l *topoLayout, param string, expected []string) { - ts := memorytopo.NewServer() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx) + defer ts.Close() l.initTopo(t, ts) - ctx := context.Background() r, err := ts.ResolveWildcards(ctx, topo.GlobalCell, []string{param}) if err != nil { if expected != nil { diff --git a/go/vt/topo/vschema.go b/go/vt/topo/vschema.go index a2503673deb..0f63a26c2ae 100644 --- a/go/vt/topo/vschema.go +++ b/go/vt/topo/vschema.go @@ -17,27 +17,20 @@ limitations under the License. package topo import ( + "context" "path" "google.golang.org/protobuf/proto" - "context" - "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/vterrors" vschemapb "vitess.io/vitess/go/vt/proto/vschema" - "vitess.io/vitess/go/vt/vtgate/vindexes" ) -// SaveVSchema first validates the VSchema, then saves it. +// SaveVSchema saves a Vschema. A valid Vschema should be passed in. It does not verify its correctness. // If the VSchema is empty, just remove it. func (ts *Server) SaveVSchema(ctx context.Context, keyspace string, vschema *vschemapb.Keyspace) error { - err := vindexes.ValidateKeyspace(vschema) - if err != nil { - return err - } - nodePath := path.Join(KeyspacesPath, keyspace, VSchemaFile) data, err := vschema.MarshalVT() if err != nil { diff --git a/go/vt/topo/zk2topo/server_test.go b/go/vt/topo/zk2topo/server_test.go index 663f22f3914..f50221d91c1 100644 --- a/go/vt/topo/zk2topo/server_test.go +++ b/go/vt/topo/zk2topo/server_test.go @@ -17,12 +17,11 @@ limitations under the License. package zk2topo import ( + "context" "fmt" "path" "testing" - "context" - "vitess.io/vitess/go/testfiles" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/test" @@ -38,7 +37,9 @@ func testZk2Topo(t *testing.T) { // Run the test suite. testIndex := 0 - test.TopoServerTestSuite(t, func() *topo.Server { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + test.TopoServerTestSuite(t, ctx, func() *topo.Server { // Each test will use its own sub-directories. // The directories will be created when used the first time. testRoot := fmt.Sprintf("/test-%v", testIndex) diff --git a/go/vt/topotools/keyspace.go b/go/vt/topotools/keyspace.go index 82f4b133a50..d8a5740f3ae 100644 --- a/go/vt/topotools/keyspace.go +++ b/go/vt/topotools/keyspace.go @@ -118,22 +118,18 @@ func UpdateShardRecords( if err := ts.UpdateDisableQueryService(ctx, keyspace, shards, servedType, cells, disableQueryService); err != nil { return err } - for i, si := range shards { updatedShard, err := ts.UpdateShardFields(ctx, si.Keyspace(), si.ShardName(), func(si *topo.ShardInfo) error { if clearSourceShards { si.SourceShards = nil } - return nil }) if err != nil { return err } - shards[i] = updatedShard - // For 'to' shards, refresh to make them serve. The 'from' shards will // be refreshed after traffic has migrated. if !isFrom { @@ -142,7 +138,6 @@ func UpdateShardRecords( } } } - return nil } diff --git a/go/vt/topotools/routing_rules.go b/go/vt/topotools/routing_rules.go index 6dfa8b655ca..9eb64c936d7 100644 --- a/go/vt/topotools/routing_rules.go +++ b/go/vt/topotools/routing_rules.go @@ -27,6 +27,19 @@ import ( vschemapb "vitess.io/vitess/go/vt/proto/vschema" ) +//region routing rules + +func GetRoutingRulesMap(rules *vschemapb.RoutingRules) map[string][]string { + if rules == nil { + return nil + } + rulesMap := make(map[string][]string, len(rules.Rules)) + for _, rr := range rules.Rules { + rulesMap[rr.FromTable] = rr.ToTables + } + return rulesMap +} + // GetRoutingRules fetches routing rules from the topology server and returns a // mapping of fromTable=>[]toTables. func GetRoutingRules(ctx context.Context, ts *topo.Server) (map[string][]string, error) { @@ -35,10 +48,7 @@ func GetRoutingRules(ctx context.Context, ts *topo.Server) (map[string][]string, return nil, err } - rules := make(map[string][]string, len(rrs.Rules)) - for _, rr := range rrs.Rules { - rules[rr.FromTable] = rr.ToTables - } + rules := GetRoutingRulesMap(rrs) return rules, nil } @@ -59,6 +69,29 @@ func SaveRoutingRules(ctx context.Context, ts *topo.Server, rules map[string][]s return ts.SaveRoutingRules(ctx, rrs) } +//endregion + +//region shard routing rules + +func GetShardRoutingRuleKey(fromKeyspace, shard string) string { + return fmt.Sprintf("%s.%s", fromKeyspace, shard) +} +func ParseShardRoutingRuleKey(key string) (string, string) { + arr := strings.Split(key, ".") + return arr[0], arr[1] +} + +func GetShardRoutingRulesMap(rules *vschemapb.ShardRoutingRules) map[string]string { + if rules == nil { + return nil + } + rulesMap := make(map[string]string, len(rules.Rules)) + for _, rr := range rules.Rules { + rulesMap[GetShardRoutingRuleKey(rr.FromKeyspace, rr.Shard)] = rr.ToKeyspace + } + return rulesMap +} + // GetShardRoutingRules fetches shard routing rules from the topology server and returns a // mapping of fromKeyspace.Shard=>toKeyspace. func GetShardRoutingRules(ctx context.Context, ts *topo.Server) (map[string]string, error) { @@ -67,10 +100,7 @@ func GetShardRoutingRules(ctx context.Context, ts *topo.Server) (map[string]stri return nil, err } - rules := make(map[string]string, len(rrs.Rules)) - for _, rr := range rrs.Rules { - rules[fmt.Sprintf("%s.%s", rr.FromKeyspace, rr.Shard)] = rr.ToKeyspace - } + rules := GetShardRoutingRulesMap(rrs) return rules, nil } @@ -82,9 +112,7 @@ func SaveShardRoutingRules(ctx context.Context, ts *topo.Server, srr map[string] srs := &vschemapb.ShardRoutingRules{Rules: make([]*vschemapb.ShardRoutingRule, 0, len(srr))} for from, to := range srr { - arr := strings.Split(from, ".") - fromKeyspace := arr[0] - shard := arr[1] + fromKeyspace, shard := ParseShardRoutingRuleKey(from) srs.Rules = append(srs.Rules, &vschemapb.ShardRoutingRule{ FromKeyspace: fromKeyspace, ToKeyspace: to, diff --git a/go/vt/topotools/routing_rules_test.go b/go/vt/topotools/routing_rules_test.go index 6047bb441fe..0b4f265a77b 100644 --- a/go/vt/topotools/routing_rules_test.go +++ b/go/vt/topotools/routing_rules_test.go @@ -28,8 +28,10 @@ import ( ) func TestRoutingRulesRoundTrip(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() rules := map[string][]string{ "t1": {"t2", "t3"}, @@ -46,8 +48,10 @@ func TestRoutingRulesRoundTrip(t *testing.T) { } func TestRoutingRulesErrors(t *testing.T) { - ctx := context.Background() - ts, factory := memorytopo.NewServerAndFactory("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts, factory := memorytopo.NewServerAndFactory(ctx, "zone1") + defer ts.Close() factory.SetError(errors.New("topo failure for testing")) t.Run("GetRoutingRules error", func(t *testing.T) { @@ -68,8 +72,10 @@ func TestRoutingRulesErrors(t *testing.T) { } func TestShardRoutingRulesRoundTrip(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() srr := map[string]string{ "ks1.shard1": "ks2", diff --git a/go/vt/topotools/shard_test.go b/go/vt/topotools/shard_test.go index 8904c984715..f2fb5f50340 100644 --- a/go/vt/topotools/shard_test.go +++ b/go/vt/topotools/shard_test.go @@ -17,13 +17,11 @@ limitations under the License. package topotools import ( + "context" "fmt" "math/rand" "sync" "testing" - "time" - - "context" topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo/memorytopo" @@ -31,10 +29,10 @@ import ( // TestCreateShard tests a few cases for topo.CreateShard func TestCreateShard(t *testing.T) { - ctx := context.Background() - - // Set up topology. - ts := memorytopo.NewServer("test_cell") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "test_cell") + defer ts.Close() keyspace := "test_keyspace" shard := "0" @@ -60,10 +58,10 @@ func TestCreateShard(t *testing.T) { // TODO(sougou): we should eventually disallow multiple shards // for unsharded keyspaces. func TestCreateShardMultiUnsharded(t *testing.T) { - ctx := context.Background() - - // Set up topology. - ts := memorytopo.NewServer("test_cell") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "test_cell") + defer ts.Close() // create keyspace keyspace := "test_keyspace" @@ -102,16 +100,14 @@ func TestCreateShardMultiUnsharded(t *testing.T) { // for a long time in parallel, making sure the locking and everything // works correctly. func TestGetOrCreateShard(t *testing.T) { - ctx := context.Background() - - // Set up topology. - cell := "test_cell" - ts := memorytopo.NewServer(cell) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "test_cell") + defer ts.Close() // and do massive parallel GetOrCreateShard keyspace := "test_keyspace" wg := sync.WaitGroup{} - rand.Seed(time.Now().UnixNano()) for i := 0; i < 100; i++ { wg.Add(1) go func(i int) { diff --git a/go/vt/topotools/tablet.go b/go/vt/topotools/tablet.go index ef458ae7cd3..8bbca4b8c03 100644 --- a/go/vt/topotools/tablet.go +++ b/go/vt/topotools/tablet.go @@ -245,3 +245,29 @@ func TabletIdent(tablet *topodatapb.Tablet) string { func TargetIdent(target *querypb.Target) string { return fmt.Sprintf("%s/%s (%s)", target.Keyspace, target.Shard, target.TabletType) } + +// TabletEquality returns true iff two Tablets are identical for testing purposes +func TabletEquality(left, right *topodatapb.Tablet) bool { + if left.Keyspace != right.Keyspace { + return false + } + if left.Shard != right.Shard { + return false + } + if left.Hostname != right.Hostname { + return false + } + if left.Type != right.Type { + return false + } + if left.MysqlHostname != right.MysqlHostname { + return false + } + if left.MysqlPort != right.MysqlPort { + return false + } + if left.PrimaryTermStartTime.String() != right.PrimaryTermStartTime.String() { + return false + } + return topoproto.TabletAliasString(left.Alias) == topoproto.TabletAliasString(right.Alias) +} diff --git a/go/vt/topotools/vschema_ddl.go b/go/vt/topotools/vschema_ddl.go index e8da2734b4f..ff4d9f4ad04 100644 --- a/go/vt/topotools/vschema_ddl.go +++ b/go/vt/topotools/vschema_ddl.go @@ -214,6 +214,20 @@ func ApplyVSchemaDDL(ksName string, ks *vschemapb.Keyspace, alterVschema *sqlpar return ks, nil + case sqlparser.DropSequenceDDLAction: + if ks.Sharded { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "drop sequence table: unsupported on sharded keyspace %s", ksName) + } + + name := alterVschema.Table.Name.String() + if _, ok := ks.Tables[name]; !ok { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "vschema does not contain sequence %s in keyspace %s", name, ksName) + } + + delete(ks.Tables, name) + + return ks, nil + case sqlparser.AddAutoIncDDLAction: name := alterVschema.Table.Name.String() table := ks.Tables[name] @@ -230,6 +244,21 @@ func ApplyVSchemaDDL(ksName string, ks *vschemapb.Keyspace, alterVschema *sqlpar Sequence: sqlparser.String(alterVschema.AutoIncSpec.Sequence), } + return ks, nil + + case sqlparser.DropAutoIncDDLAction: + name := alterVschema.Table.Name.String() + table := ks.Tables[name] + if table == nil { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "vschema does not contain table %s in keyspace %s", name, ksName) + } + + if table.AutoIncrement == nil { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "vschema does not contain auto increment %v on table %s in keyspace %s", table.AutoIncrement, name, ksName) + } + + table.AutoIncrement = nil + return ks, nil } diff --git a/go/vt/vitessdriver/convert.go b/go/vt/vitessdriver/convert.go index abb25beb000..7ba95db4147 100644 --- a/go/vt/vitessdriver/convert.go +++ b/go/vt/vitessdriver/convert.go @@ -21,10 +21,10 @@ import ( "fmt" "time" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" ) type converter struct { @@ -32,13 +32,27 @@ type converter struct { } func (cv *converter) ToNative(v sqltypes.Value) (any, error) { - switch v.Type() { - case sqltypes.Datetime, sqltypes.Timestamp: - return DatetimeToNative(v, cv.location) - case sqltypes.Date: - return DateToNative(v, cv.location) + var out any + var err error + switch { + case v.Type() == sqltypes.Null: + // no-op + case v.IsSigned(): + return v.ToInt64() + case v.IsUnsigned(): + return v.ToUint64() + case v.IsFloat(): + return v.ToFloat64() + case v.Type() == sqltypes.Datetime, v.Type() == sqltypes.Timestamp: + return datetimeToNative(v, cv.location) + case v.Type() == sqltypes.Date: + return dateToNative(v, cv.location) + case v.IsQuoted() || v.Type() == sqltypes.Bit || v.Type() == sqltypes.Decimal: + out, err = v.ToBytes() + case v.Type() == sqltypes.Expression: + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v cannot be converted to a go type", v) } - return evalengine.ToNative(v) + return out, err } func (cv *converter) BuildBindVariable(v any) (*querypb.BindVariable, error) { @@ -109,12 +123,16 @@ func (cv *converter) bindVarsFromNamedValues(args []driver.NamedValue) (map[stri return bindVars, nil } -func newConverter(cfg *Configuration) (c *converter, err error) { - c = &converter{ - location: time.UTC, +func newConverter(cfg *Configuration) (*converter, error) { + c := &converter{location: time.UTC} + if cfg.DefaultLocation == "" { + return c, nil } - if cfg.DefaultLocation != "" { - c.location, err = time.LoadLocation(cfg.DefaultLocation) + + loc, err := time.LoadLocation(cfg.DefaultLocation) + if err != nil { + return nil, err } - return + c.location = loc + return c, nil } diff --git a/go/vt/vitessdriver/convert_test.go b/go/vt/vitessdriver/convert_test.go index 2e6734e8da0..c1d5f46b247 100644 --- a/go/vt/vitessdriver/convert_test.go +++ b/go/vt/vitessdriver/convert_test.go @@ -33,35 +33,161 @@ func TestToNative(t *testing.T) { convert *converter in sqltypes.Value out any - }{{ - convert: &converter{}, - in: sqltypes.TestValue(sqltypes.Int32, "1"), - out: int64(1), - }, { - convert: &converter{}, - in: sqltypes.TestValue(sqltypes.Timestamp, "2012-02-24 23:19:43"), - out: time.Date(2012, 02, 24, 23, 19, 43, 0, time.UTC), - }, { - convert: &converter{}, - in: sqltypes.TestValue(sqltypes.Time, "23:19:43"), - out: []byte("23:19:43"), // TIME is not handled - }, { - convert: &converter{}, - in: sqltypes.TestValue(sqltypes.Date, "2012-02-24"), - out: time.Date(2012, 02, 24, 0, 0, 0, 0, time.UTC), - }, { - convert: &converter{}, - in: sqltypes.TestValue(sqltypes.Datetime, "2012-02-24 23:19:43"), - out: time.Date(2012, 02, 24, 23, 19, 43, 0, time.UTC), - }, { - convert: convertTimeLocal, - in: sqltypes.TestValue(sqltypes.Datetime, "2012-02-24 23:19:43"), - out: time.Date(2012, 02, 24, 23, 19, 43, 0, time.Local), - }, { - convert: convertTimeLocal, - in: sqltypes.TestValue(sqltypes.Date, "2012-02-24"), - out: time.Date(2012, 02, 24, 0, 0, 0, 0, time.Local), - }} + }{ + { + convert: &converter{}, + in: sqltypes.NULL, + out: nil, + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Int8, "1"), + out: int64(1), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Int16, "1"), + out: int64(1), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Int24, "1"), + out: int64(1), + }, { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Int32, "1"), + out: int64(1), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Int64, "1"), + out: int64(1), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Uint8, "1"), + out: uint64(1), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Uint16, "1"), + out: uint64(1), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Uint24, "1"), + out: uint64(1), + }, { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Uint32, "1"), + out: uint64(1), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Uint64, "1"), + out: uint64(1), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Float32, "1.1"), + out: float64(1.1), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Float64, "1.1"), + out: float64(1.1), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Timestamp, "2012-02-24 23:19:43"), + out: time.Date(2012, 02, 24, 23, 19, 43, 0, time.UTC), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Time, "23:19:43"), + out: []byte("23:19:43"), // TIME is not handled + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Date, "2012-02-24"), + out: time.Date(2012, 02, 24, 0, 0, 0, 0, time.UTC), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Datetime, "2012-02-24 23:19:43"), + out: time.Date(2012, 02, 24, 23, 19, 43, 0, time.UTC), + }, + { + convert: convertTimeLocal, + in: sqltypes.TestValue(sqltypes.Datetime, "2012-02-24 23:19:43"), + out: time.Date(2012, 02, 24, 23, 19, 43, 0, time.Local), + }, + { + convert: convertTimeLocal, + in: sqltypes.TestValue(sqltypes.Date, "2012-02-24"), + out: time.Date(2012, 02, 24, 0, 0, 0, 0, time.Local), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Year, "1"), + out: uint64(1), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Decimal, "1"), + out: []byte("1"), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Text, "a"), + out: []byte("a"), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Blob, "a"), + out: []byte("a"), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.VarChar, "a"), + out: []byte("a"), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.VarBinary, "a"), + out: []byte("a"), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Char, "a"), + out: []byte("a"), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Binary, "a"), + out: []byte("a"), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.VarChar, "a"), + out: []byte("a"), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Bit, "a"), + out: []byte("a"), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Enum, "a"), + out: []byte("a"), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Set, "a"), + out: []byte("a"), + }, + } for _, tcase := range testcases { v, err := tcase.convert.ToNative(tcase.in) diff --git a/go/vt/vitessdriver/driver.go b/go/vt/vitessdriver/driver.go index 638e31523f3..4a965399e9c 100644 --- a/go/vt/vitessdriver/driver.go +++ b/go/vt/vitessdriver/driver.go @@ -41,10 +41,30 @@ var ( // Type-check interfaces. var ( - _ driver.QueryerContext = &conn{} - _ driver.ExecerContext = &conn{} - _ driver.StmtQueryContext = &stmt{} - _ driver.StmtExecContext = &stmt{} + _ interface { + driver.Connector + } = &connector{} + + _ interface { + driver.Driver + driver.DriverContext + } = drv{} + + _ interface { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.ExecerContext + driver.Pinger + driver.QueryerContext + driver.Tx + } = &conn{} + + _ interface { + driver.Stmt + driver.StmtExecContext + driver.StmtQueryContext + } = &stmt{} ) func init() { @@ -94,8 +114,7 @@ func OpenWithConfiguration(c Configuration) (*sql.DB, error) { return sql.Open(c.DriverName, json) } -type drv struct { -} +type drv struct{} // Open implements the database/sql/driver.Driver interface. // @@ -112,25 +131,65 @@ type drv struct { // // For a description of the available fields, see the Configuration struct. func (d drv) Open(name string) (driver.Conn, error) { - c := &conn{} - err := json.Unmarshal([]byte(name), c) + conn, err := d.OpenConnector(name) if err != nil { return nil, err } - c.setDefaults() + return conn.Connect(context.Background()) +} - if c.convert, err = newConverter(&c.Configuration); err != nil { +// OpenConnector implements the database/sql/driver.DriverContext interface. +// +// See the documentation of Open for details on the format of name. +func (d drv) OpenConnector(name string) (driver.Connector, error) { + var cfg Configuration + if err := json.Unmarshal([]byte(name), &cfg); err != nil { return nil, err } - if err = c.dial(); err != nil { + cfg.setDefaults() + return d.newConnector(cfg) +} + +// A connector holds immutable state for the creation of additional conns via +// the Connect method. +type connector struct { + drv drv + cfg Configuration + convert *converter +} + +func (d drv) newConnector(cfg Configuration) (driver.Connector, error) { + convert, err := newConverter(&cfg) + if err != nil { return nil, err } - return c, nil + return &connector{ + drv: d, + cfg: cfg, + convert: convert, + }, nil } +// Connect implements the database/sql/driver.Connector interface. +func (c *connector) Connect(ctx context.Context) (driver.Conn, error) { + conn := &conn{ + cfg: c.cfg, + convert: c.convert, + } + + if err := conn.dial(ctx); err != nil { + return nil, err + } + + return conn, nil +} + +// Driver implements the database/sql/driver.Connector interface. +func (c *connector) Driver() driver.Driver { return c.drv } + // Configuration holds all Vitess driver settings. // // Fields with documented default values do not have to be set explicitly. @@ -202,32 +261,32 @@ func (c *Configuration) setDefaults() { } type conn struct { - Configuration + cfg Configuration convert *converter conn *vtgateconn.VTGateConn session *vtgateconn.VTGateSession } -func (c *conn) dial() error { +func (c *conn) dial(ctx context.Context) error { var err error - c.conn, err = vtgateconn.DialProtocol(context.Background(), c.Protocol, c.Address) + c.conn, err = vtgateconn.DialProtocol(ctx, c.cfg.Protocol, c.cfg.Address) if err != nil { return err } - if c.Configuration.SessionToken != "" { - sessionFromToken, err := sessionTokenToSession(c.Configuration.SessionToken) + if c.cfg.SessionToken != "" { + sessionFromToken, err := sessionTokenToSession(c.cfg.SessionToken) if err != nil { return err } c.session = c.conn.SessionFromPb(sessionFromToken) } else { - c.session = c.conn.Session(c.Target, nil) + c.session = c.conn.Session(c.cfg.Target, nil) } return nil } func (c *conn) Ping(ctx context.Context) error { - if c.Streaming { + if c.cfg.Streaming { return errors.New("Ping not allowed for streaming connections") } @@ -378,7 +437,7 @@ func sessionTokenToSession(sessionToken string) (*vtgatepb.Session, error) { func (c *conn) Begin() (driver.Tx, error) { // if we're loading from an existing session, we need to avoid starting a new transaction - if c.Configuration.SessionToken != "" { + if c.cfg.SessionToken != "" { return c, nil } @@ -401,7 +460,7 @@ func (c *conn) Commit() error { // if we're loading from an existing session, disallow committing/rolling back the transaction // this isn't a technical limitation, but is enforced to prevent misuse, so that only // the original creator of the transaction can commit/rollback - if c.Configuration.SessionToken != "" { + if c.cfg.SessionToken != "" { return errors.New("calling Commit from a distributed tx is not allowed") } @@ -413,7 +472,7 @@ func (c *conn) Rollback() error { // if we're loading from an existing session, disallow committing/rolling back the transaction // this isn't a technical limitation, but is enforced to prevent misuse, so that only // the original creator of the transaction can commit/rollback - if c.Configuration.SessionToken != "" { + if c.cfg.SessionToken != "" { return errors.New("calling Rollback from a distributed tx is not allowed") } @@ -424,7 +483,7 @@ func (c *conn) Rollback() error { func (c *conn) Exec(query string, args []driver.Value) (driver.Result, error) { ctx := context.TODO() - if c.Streaming { + if c.cfg.Streaming { return nil, errors.New("Exec not allowed for streaming connections") } bindVars, err := c.convert.buildBindVars(args) @@ -440,7 +499,7 @@ func (c *conn) Exec(query string, args []driver.Value) (driver.Result, error) { } func (c *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { - if c.Streaming { + if c.cfg.Streaming { return nil, errors.New("Exec not allowed for streaming connections") } @@ -462,7 +521,7 @@ func (c *conn) Query(query string, args []driver.Value) (driver.Rows, error) { return nil, err } - if c.Streaming { + if c.cfg.Streaming { stream, err := c.session.StreamExecute(ctx, query, bindVars) if err != nil { return nil, err @@ -488,7 +547,7 @@ func (c *conn) QueryContext(ctx context.Context, query string, args []driver.Nam return nil, err } - if c.Streaming { + if c.cfg.Streaming { stream, err := c.session.StreamExecute(ctx, query, bv) if err != nil { return nil, err diff --git a/go/vt/vitessdriver/driver_test.go b/go/vt/vitessdriver/driver_test.go index b1bdd2a833f..bd49a0acd0a 100644 --- a/go/vt/vitessdriver/driver_test.go +++ b/go/vt/vitessdriver/driver_test.go @@ -38,9 +38,7 @@ import ( "vitess.io/vitess/go/vt/vtgate/grpcvtgateservice" ) -var ( - testAddress string -) +var testAddress string // TestMain tests the Vitess Go SQL driver. // @@ -71,7 +69,7 @@ func TestOpen(t *testing.T) { panic(err) } - var testcases = []struct { + testcases := []struct { desc string connStr string conn *conn @@ -80,7 +78,7 @@ func TestOpen(t *testing.T) { desc: "Open()", connStr: fmt.Sprintf(`{"address": "%s", "target": "@replica", "timeout": %d}`, testAddress, int64(30*time.Second)), conn: &conn{ - Configuration: Configuration{ + cfg: Configuration{ Protocol: "grpc", DriverName: "vitess", Target: "@replica", @@ -94,7 +92,7 @@ func TestOpen(t *testing.T) { desc: "Open() (defaults omitted)", connStr: fmt.Sprintf(`{"address": "%s", "timeout": %d}`, testAddress, int64(30*time.Second)), conn: &conn{ - Configuration: Configuration{ + cfg: Configuration{ Protocol: "grpc", DriverName: "vitess", }, @@ -107,7 +105,7 @@ func TestOpen(t *testing.T) { desc: "Open() with keyspace", connStr: fmt.Sprintf(`{"protocol": "grpc", "address": "%s", "target": "ks:0@replica", "timeout": %d}`, testAddress, int64(30*time.Second)), conn: &conn{ - Configuration: Configuration{ + cfg: Configuration{ Protocol: "grpc", DriverName: "vitess", Target: "ks:0@replica", @@ -123,7 +121,7 @@ func TestOpen(t *testing.T) { `{"address": "%s", "timeout": %d, "defaultlocation": "America/Los_Angeles"}`, testAddress, int64(30*time.Second)), conn: &conn{ - Configuration: Configuration{ + cfg: Configuration{ Protocol: "grpc", DriverName: "vitess", DefaultLocation: "America/Los_Angeles", @@ -144,7 +142,7 @@ func TestOpen(t *testing.T) { wantc := tc.conn newc := *(c.(*conn)) - newc.Address = "" + newc.cfg.Address = "" newc.conn = nil newc.session = nil if !reflect.DeepEqual(&newc, wantc) { @@ -255,7 +253,7 @@ func TestExecStreamingNotAllowed(t *testing.T) { } func TestQuery(t *testing.T) { - var testcases = []struct { + testcases := []struct { desc string config Configuration requestName string @@ -357,7 +355,7 @@ func TestQuery(t *testing.T) { } func TestBindVars(t *testing.T) { - var testcases = []struct { + testcases := []struct { desc string in []driver.NamedValue out map[string]*querypb.BindVariable @@ -440,7 +438,7 @@ func TestBindVars(t *testing.T) { } func TestDatetimeQuery(t *testing.T) { - var testcases = []struct { + testcases := []struct { desc string config Configuration requestName string @@ -763,3 +761,103 @@ func colList(fields []*querypb.Field) []string { } return cols } + +func TestConnSeparateSessions(t *testing.T) { + c := Configuration{ + Protocol: "grpc", + Address: testAddress, + Target: "@primary", + } + + db, err := OpenWithConfiguration(c) + if err != nil { + t.Fatal(err) + } + defer db.Close() + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // Each new connection starts a fresh session pointed at @primary. When the + // USE statement is executed, we simulate a change to that individual + // connection's target string. + // + // No connections are returned to the pool during this test and therefore + // the connection state should not be shared. + var conns []*sql.Conn + for i := 0; i < 3; i++ { + sconn, err := db.Conn(ctx) + if err != nil { + t.Fatal(err) + } + conns = append(conns, sconn) + + targets := []string{targetString(t, sconn)} + + _, err = sconn.ExecContext(ctx, "use @rdonly") + require.NoError(t, err) + + targets = append(targets, targetString(t, sconn)) + + require.Equal(t, []string{"@primary", "@rdonly"}, targets) + } + + for _, c := range conns { + require.NoError(t, c.Close()) + } +} + +func TestConnReuseSessions(t *testing.T) { + c := Configuration{ + Protocol: "grpc", + Address: testAddress, + Target: "@primary", + } + + db, err := OpenWithConfiguration(c) + if err != nil { + t.Fatal(err) + } + defer db.Close() + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // Pull an individual connection from the pool and execute a USE, resulting + // in changing the target string. We return the connection to the pool + // continuously in this test and verify that we keep pulling the same + // connection with its target string altered. + sconn, err := db.Conn(ctx) + if err != nil { + t.Fatal(err) + } + + _, err = sconn.ExecContext(ctx, "use @rdonly") + require.NoError(t, err) + require.NoError(t, sconn.Close()) + + var targets []string + for i := 0; i < 3; i++ { + sconn, err := db.Conn(ctx) + if err != nil { + t.Fatal(err) + } + + targets = append(targets, targetString(t, sconn)) + require.NoError(t, sconn.Close()) + } + + require.Equal(t, []string{"@rdonly", "@rdonly", "@rdonly"}, targets) +} + +func targetString(t *testing.T, c *sql.Conn) string { + t.Helper() + + var target string + require.NoError(t, c.Raw(func(driverConn any) error { + target = driverConn.(*conn).session.SessionPb().TargetString + return nil + })) + + return target +} diff --git a/go/vt/vitessdriver/fakeserver_test.go b/go/vt/vitessdriver/fakeserver_test.go index 213ff0826d2..758bfbd4756 100644 --- a/go/vt/vitessdriver/fakeserver_test.go +++ b/go/vt/vitessdriver/fakeserver_test.go @@ -35,8 +35,7 @@ import ( ) // fakeVTGateService has the server side of this fake -type fakeVTGateService struct { -} +type fakeVTGateService struct{} // queryExecute contains all the fields we use to test Execute type queryExecute struct { @@ -52,7 +51,7 @@ func (q *queryExecute) Equal(q2 *queryExecute) bool { } // Execute is part of the VTGateService interface -func (f *fakeVTGateService) Execute(ctx context.Context, c *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) { +func (f *fakeVTGateService) Execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, c *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) { execCase, ok := execMap[sql] if !ok { return session, nil, fmt.Errorf("no match for: %s", sql) @@ -102,7 +101,7 @@ func (f *fakeVTGateService) ExecuteBatch(ctx context.Context, c *mysql.Conn, ses } // StreamExecute is part of the VTGateService interface -func (f *fakeVTGateService) StreamExecute(ctx context.Context, c *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) { +func (f *fakeVTGateService) StreamExecute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, c *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) { execCase, ok := execMap[sql] if !ok { return session, fmt.Errorf("no match for: %s", sql) @@ -284,6 +283,20 @@ var execMap = map[string]struct { TargetString: "@primary", }, }, + "use @rdonly": { + execQuery: &queryExecute{ + SQL: "use @rdonly", + Session: &vtgatepb.Session{ + TargetString: "@primary", + Autocommit: true, + }, + }, + result: &sqltypes.Result{}, + session: &vtgatepb.Session{ + TargetString: "@rdonly", + SessionUUID: "1111", + }, + }, } var result1 = sqltypes.Result{ diff --git a/go/vt/vitessdriver/time.go b/go/vt/vitessdriver/time.go index dc2d4453c31..70ec2d679ae 100644 --- a/go/vt/vitessdriver/time.go +++ b/go/vt/vitessdriver/time.go @@ -74,8 +74,8 @@ func parseISOTime(tstr string, loc *time.Location, minLen, maxLen int) (t time.T return time.ParseInLocation(isoTimeFormat[:tlen], tstr, loc) } -// DatetimeToNative converts a Datetime Value into a time.Time -func DatetimeToNative(v sqltypes.Value, loc *time.Location) (time.Time, error) { +// datetimeToNative converts a Datetime Value into a time.Time +func datetimeToNative(v sqltypes.Value, loc *time.Location) (time.Time, error) { // Valid format string offsets for a DATETIME // |DATETIME |19+ // |------------------|------| @@ -83,11 +83,11 @@ func DatetimeToNative(v sqltypes.Value, loc *time.Location) (time.Time, error) { return parseISOTime(v.ToString(), loc, 19, isoTimeLength) } -// DateToNative converts a Date Value into a time.Time. +// dateToNative converts a Date Value into a time.Time. // Note that there's no specific type in the Go stdlib to represent // dates without time components, so the returned Time will have // their hours/mins/seconds zeroed out. -func DateToNative(v sqltypes.Value, loc *time.Location) (time.Time, error) { +func dateToNative(v sqltypes.Value, loc *time.Location) (time.Time, error) { // Valid format string offsets for a DATE // |DATE |10 // |---------| diff --git a/go/vt/vitessdriver/time_test.go b/go/vt/vitessdriver/time_test.go index d2924fa343a..949d8f43354 100644 --- a/go/vt/vitessdriver/time_test.go +++ b/go/vt/vitessdriver/time_test.go @@ -113,15 +113,15 @@ func TestDatetimeToNative(t *testing.T) { }} for _, tcase := range tcases { - got, err := DatetimeToNative(tcase.val, tcase.loc) + got, err := datetimeToNative(tcase.val, tcase.loc) if tcase.err && err == nil { - t.Errorf("DatetimeToNative(%v, %#v) succeeded; expected error", tcase.val, tcase.loc) + t.Errorf("datetimeToNative(%v, %#v) succeeded; expected error", tcase.val, tcase.loc) } if !tcase.err && err != nil { - t.Errorf("DatetimeToNative(%v, %#v) failed: %v", tcase.val, tcase.loc, err) + t.Errorf("datetimeToNative(%v, %#v) failed: %v", tcase.val, tcase.loc, err) } if !reflect.DeepEqual(got, tcase.out) { - t.Errorf("DatetimeToNative(%v, %#v): %v, want %v", tcase.val, tcase.loc, got, tcase.out) + t.Errorf("datetimeToNative(%v, %#v): %v, want %v", tcase.val, tcase.loc, got, tcase.out) } } } @@ -161,15 +161,15 @@ func TestDateToNative(t *testing.T) { }} for _, tcase := range tcases { - got, err := DateToNative(tcase.val, tcase.loc) + got, err := dateToNative(tcase.val, tcase.loc) if tcase.err && err == nil { - t.Errorf("DateToNative(%v, %#v) succeeded; expected error", tcase.val, tcase.loc) + t.Errorf("dateToNative(%v, %#v) succeeded; expected error", tcase.val, tcase.loc) } if !tcase.err && err != nil { - t.Errorf("DateToNative(%v, %#v) failed: %v", tcase.val, tcase.loc, err) + t.Errorf("dateToNative(%v, %#v) failed: %v", tcase.val, tcase.loc, err) } if !reflect.DeepEqual(got, tcase.out) { - t.Errorf("DateToNative(%v, %#v): %v, want %v", tcase.val, tcase.loc, got, tcase.out) + t.Errorf("dateToNative(%v, %#v): %v, want %v", tcase.val, tcase.loc, got, tcase.out) } } } diff --git a/go/vt/vtadmin/api.go b/go/vt/vtadmin/api.go index de973fd283f..92d11ba18ea 100644 --- a/go/vt/vtadmin/api.go +++ b/go/vt/vtadmin/api.go @@ -2148,7 +2148,7 @@ func (api *API) VTExplain(ctx context.Context, req *vtadminpb.VTExplainRequest) return nil, er.Error() } - vte, err := vtexplain.Init(srvVSchema, schema, shardMap, &vtexplain.Options{ReplicationMode: "ROW"}) + vte, err := vtexplain.Init(ctx, srvVSchema, schema, shardMap, &vtexplain.Options{ReplicationMode: "ROW"}) if err != nil { return nil, fmt.Errorf("error initilaizing vtexplain: %w", err) } diff --git a/go/vt/vtadmin/api_test.go b/go/vt/vtadmin/api_test.go index 1ca21a8af31..b7c63d25d17 100644 --- a/go/vt/vtadmin/api_test.go +++ b/go/vt/vtadmin/api_test.go @@ -803,8 +803,7 @@ func TestFindSchema(t *testing.T) { if schema != nil { // Clone so our mutation below doesn't trip the race detector. - schema = proto.Clone(schema).(*vtadminpb.Schema) - + schema = schema.CloneVT() for _, td := range schema.TableDefinitions { // Zero these out because they're non-deterministic and also not // relevant to the final result. @@ -1050,19 +1049,19 @@ func TestGetKeyspace(t *testing.T) { }, } - ctx := context.Background() - for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() topos := make([]*topo.Server, len(tt.clusterShards)) vtctlds := make([]vtctlservicepb.VtctldServer, len(tt.clusterShards)) for i, shards := range tt.clusterShards { - ts := memorytopo.NewServer("cell1") + ts := memorytopo.NewServer(ctx, "cell1") testutil.AddShards(ctx, t, ts, shards...) topos[i] = ts vtctlds[i] = testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { @@ -1282,20 +1281,20 @@ func TestGetKeyspaces(t *testing.T) { }, } - ctx := context.Background() - for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // Note that these test cases were written prior to the existence of // WithTestServers, so they are all written with the assumption that // there are exactly 2 clusters. topos := []*topo.Server{ - memorytopo.NewServer("c0_cell1"), - memorytopo.NewServer("c1_cell1"), + memorytopo.NewServer(ctx, "c0_cell1"), + memorytopo.NewServer(ctx, "c1_cell1"), } for cdx, cks := range tt.clusterKeyspaces { @@ -1346,7 +1345,8 @@ func TestGetKeyspaces(t *testing.T) { } func TestGetSchema(t *testing.T) { - t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() tests := []struct { name string @@ -1361,7 +1361,7 @@ func TestGetSchema(t *testing.T) { { name: "success", clusterID: 1, - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ GetSchemaResults: map[string]struct { Schema *tabletmanagerdatapb.SchemaDefinition @@ -1416,7 +1416,7 @@ func TestGetSchema(t *testing.T) { { name: "cluster not found", clusterID: 1, // results in clusterId == "c1" - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tablets: nil, req: &vtadminpb.GetSchemaRequest{ ClusterId: "c2", @@ -1429,7 +1429,7 @@ func TestGetSchema(t *testing.T) { { name: "tablet not found for keyspace", clusterID: 1, - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tablets: []*vtadminpb.Tablet{ { Cluster: &vtadminpb.Cluster{ @@ -1457,7 +1457,7 @@ func TestGetSchema(t *testing.T) { { name: "no serving tablet found for keyspace", clusterID: 1, - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tablets: []*vtadminpb.Tablet{ { Cluster: &vtadminpb.Cluster{ @@ -1485,7 +1485,7 @@ func TestGetSchema(t *testing.T) { { name: "error in GetSchema call", clusterID: 1, - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ GetSchemaResults: map[string]struct { Schema *tabletmanagerdatapb.SchemaDefinition @@ -1535,13 +1535,13 @@ func TestGetSchema(t *testing.T) { }, } - ctx := context.Background() - for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { return grpcvtctldserver.NewVtctldServer(ts) @@ -1570,7 +1570,7 @@ func TestGetSchema(t *testing.T) { if resp != nil { // Clone so our mutation below doesn't trip the race detector. - resp = proto.Clone(resp).(*vtadminpb.Schema) + resp = resp.CloneVT() } assert.NoError(t, err) @@ -1580,8 +1580,6 @@ func TestGetSchema(t *testing.T) { } t.Run("size aggregation", func(t *testing.T) { - t.Parallel() - c1pb := &vtadminpb.Cluster{ Id: "c1", Name: "cluster1", @@ -1730,8 +1728,7 @@ func TestGetSchema(t *testing.T) { if schema != nil { // Clone so our mutation below doesn't trip the race detector. - schema = proto.Clone(schema).(*vtadminpb.Schema) - + schema = schema.CloneVT() for _, td := range schema.TableDefinitions { // Zero these out because they're non-deterministic and also not // relevant to the final result. @@ -2176,8 +2173,6 @@ func TestGetSchemas(t *testing.T) { }, } - ctx := context.Background() - for _, tt := range tests { // Note that these test cases were written prior to the existence of // WithTestServers, so they are all written with the assumption that @@ -2186,10 +2181,12 @@ func TestGetSchemas(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() topos := []*topo.Server{ - memorytopo.NewServer("c0_cell1"), - memorytopo.NewServer("c1_cell1"), + memorytopo.NewServer(ctx, "c0_cell1"), + memorytopo.NewServer(ctx, "c1_cell1"), } tmc := testutil.TabletManagerClient{ @@ -2469,7 +2466,7 @@ func TestGetSchemas(t *testing.T) { api := NewAPI([]*cluster.Cluster{c1, c2}, Options{}) defer api.Close() - resp, err := api.GetSchemas(ctx, &vtadminpb.GetSchemasRequest{ + resp, err := api.GetSchemas(context.Background(), &vtadminpb.GetSchemasRequest{ TableSizeOptions: &vtadminpb.GetSchemaTableSizeOptions{ AggregateSizes: true, }, @@ -2539,8 +2536,7 @@ func TestGetSchemas(t *testing.T) { // Clone schemas so our mutations below don't trip the race detector. schemas := make([]*vtadminpb.Schema, len(resp.Schemas)) for i, schema := range resp.Schemas { - schema := proto.Clone(schema).(*vtadminpb.Schema) - + schema := schema.CloneVT() for _, td := range schema.TableDefinitions { // Zero these out because they're non-deterministic and also not // relevant to the final result. @@ -2627,17 +2623,18 @@ func TestGetSrvKeyspace(t *testing.T) { }, } - ctx := context.Background() - for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tmc := testutil.TabletManagerClient{} - toposerver := memorytopo.NewServer(tt.cells...) + toposerver := memorytopo.NewServer(ctx, tt.cells...) vtctldserver := testutil.NewVtctldServerWithTabletManagerClient(t, toposerver, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { return grpcvtctldserver.NewVtctldServer(ts) @@ -2787,17 +2784,17 @@ func TestGetSrvKeyspaces(t *testing.T) { }, } - ctx := context.Background() - for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() tmc := testutil.TabletManagerClient{} - toposerver := memorytopo.NewServer(tt.cells...) + toposerver := memorytopo.NewServer(ctx, tt.cells...) for _, ks := range tt.keyspaces { testutil.AddKeyspace(ctx, t, toposerver, ks) @@ -2956,17 +2953,17 @@ func TestGetSrvVSchema(t *testing.T) { }, } - ctx := context.Background() - for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() tmc := testutil.TabletManagerClient{} - toposerver := memorytopo.NewServer(tt.cells...) + toposerver := memorytopo.NewServer(ctx, tt.cells...) vtctldserver := testutil.NewVtctldServerWithTabletManagerClient(t, toposerver, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { return grpcvtctldserver.NewVtctldServer(ts) @@ -3250,17 +3247,17 @@ func TestGetSrvVSchemas(t *testing.T) { }, } - ctx := context.Background() - for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() tmc := testutil.TabletManagerClient{} - toposerver := memorytopo.NewServer(tt.cells...) + toposerver := memorytopo.NewServer(ctx, tt.cells...) vtctldserver := testutil.NewVtctldServerWithTabletManagerClient(t, toposerver, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { return grpcvtctldserver.NewVtctldServer(ts) @@ -5099,13 +5096,13 @@ func TestVTExplain(t *testing.T) { }, } - ctx := context.Background() - for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { - toposerver := memorytopo.NewServer("c0_cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + toposerver := memorytopo.NewServer(ctx, "c0_cell1") tmc := testutil.TabletManagerClient{ GetSchemaResults: map[string]struct { @@ -5202,6 +5199,7 @@ func TestServeHTTP(t *testing.T) { }, }, }.Cluster(context.Background()) + defer testCluster.Close() tests := []struct { name string diff --git a/go/vt/vtadmin/cluster/cluster.go b/go/vt/vtadmin/cluster/cluster.go index 917b64ff4d9..6f8e355c326 100644 --- a/go/vt/vtadmin/cluster/cluster.go +++ b/go/vt/vtadmin/cluster/cluster.go @@ -30,8 +30,6 @@ import ( "text/template" "time" - "google.golang.org/protobuf/proto" - "vitess.io/vitess/go/pools" "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/sets" @@ -39,7 +37,6 @@ import ( "vitess.io/vitess/go/trace" "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtadmin/cache" "vitess.io/vitess/go/vt/vtadmin/cluster/discovery" @@ -231,12 +228,13 @@ func (c *Cluster) Close() error { rec.RecordError(closer.Close()) }(closer) } + wg.Wait() if rec.HasErrors() { return fmt.Errorf("failed to cleanly close cluster (id=%s): %w", c.ID, rec.Error()) } - return nil + return c.schemaCache.Close() } // ToProto returns a value-copy protobuf equivalent of the cluster. @@ -340,7 +338,7 @@ func (c *Cluster) parseTablet(rows *sql.Rows) (*vtadminpb.Tablet, error) { return nil, fmt.Errorf("failed parsing primary_term_start_time %s: %w", mtstStr, err) } - topotablet.PrimaryTermStartTime = logutil.TimeToProto(timeTime) + topotablet.PrimaryTermStartTime = protoutil.TimeToProto(timeTime) } if c.TabletFQDNTmpl != nil { @@ -507,6 +505,7 @@ func (c *Cluster) EmergencyFailoverShard(ctx context.Context, req *vtctldatapb.E span.Annotate("new_primary", topoproto.TabletAliasString(req.NewPrimary)) span.Annotate("ignore_replicas", strings.Join(topoproto.TabletAliasList(req.IgnoreReplicas).ToStringSlice(), ",")) span.Annotate("prevent_cross_cell_promotion", req.PreventCrossCellPromotion) + span.Annotate("wait_for_all_tablets", req.WaitForAllTablets) if d, ok, err := protoutil.DurationFromProto(req.WaitReplicasTimeout); ok && err == nil { span.Annotate("wait_replicas_timeout", d.String()) @@ -732,8 +731,9 @@ func (c *Cluster) findWorkflows(ctx context.Context, keyspaces []string, opts Fi } resp, err := c.Vtctld.GetWorkflows(ctx, &vtctldatapb.GetWorkflowsRequest{ - Keyspace: ks, - ActiveOnly: opts.ActiveOnly, + Keyspace: ks, + ActiveOnly: opts.ActiveOnly, + IncludeLogs: true, }) c.workflowReadPool.Release() @@ -1572,8 +1572,7 @@ func (c *Cluster) getSchemaFromTablets(ctx context.Context, keyspace string, tab span, ctx := trace.NewSpan(ctx, "Vtctld.GetSchema") defer span.Finish() - - req := proto.Clone(opts.BaseRequest).(*vtctldatapb.GetSchemaRequest) + req := opts.BaseRequest.CloneVT() req.TableSizesOnly = sizesOnly req.TabletAlias = tablet.Tablet.Alias diff --git a/go/vt/vtadmin/cluster/cluster_test.go b/go/vt/vtadmin/cluster/cluster_test.go index 4222876178c..698d827d499 100644 --- a/go/vt/vtadmin/cluster/cluster_test.go +++ b/go/vt/vtadmin/cluster/cluster_test.go @@ -27,9 +27,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "google.golang.org/protobuf/proto" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/sets" "vitess.io/vitess/go/test/utils" @@ -49,9 +48,8 @@ import ( ) func TestCreateKeyspace(t *testing.T) { - t.Parallel() + defer utils.EnsureNoLeaks(t) - ctx := context.Background() tests := []struct { name string cfg testutil.TestClusterConfig @@ -157,11 +155,12 @@ func TestCreateKeyspace(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() cluster := testutil.BuildCluster(t, tt.cfg) + defer cluster.Close() resp, err := cluster.CreateKeyspace(ctx, tt.req) if tt.shouldErr { @@ -176,7 +175,7 @@ func TestCreateKeyspace(t *testing.T) { } func TestCreateShard(t *testing.T) { - t.Parallel() + ctx := utils.LeakCheckContext(t) type test struct { name string @@ -185,11 +184,11 @@ func TestCreateShard(t *testing.T) { shouldErr bool assertion func(t *testing.T, tt *test) } - ctx := context.Background() + tests := []*test{ { name: "ok", - tc: testutil.BuildIntegrationTestCluster(t, &vtadminpb.Cluster{ + tc: testutil.BuildIntegrationTestCluster(t, ctx, &vtadminpb.Cluster{ Id: "local", Name: "local", }, "zone1"), @@ -210,7 +209,7 @@ func TestCreateShard(t *testing.T) { }, { name: "nil request", - tc: testutil.BuildIntegrationTestCluster(t, &vtadminpb.Cluster{ + tc: testutil.BuildIntegrationTestCluster(t, ctx, &vtadminpb.Cluster{ Id: "local", Name: "local", }, "zone1"), @@ -219,7 +218,7 @@ func TestCreateShard(t *testing.T) { }, { name: "no keyspace in request", - tc: testutil.BuildIntegrationTestCluster(t, &vtadminpb.Cluster{ + tc: testutil.BuildIntegrationTestCluster(t, ctx, &vtadminpb.Cluster{ Id: "local", Name: "local", }, "zone1"), @@ -231,7 +230,7 @@ func TestCreateShard(t *testing.T) { }, { name: "no shard name in request", - tc: testutil.BuildIntegrationTestCluster(t, &vtadminpb.Cluster{ + tc: testutil.BuildIntegrationTestCluster(t, ctx, &vtadminpb.Cluster{ Id: "local", Name: "local", }, "zone1"), @@ -243,7 +242,7 @@ func TestCreateShard(t *testing.T) { }, { name: "vtctld.CreateShard fails", - tc: testutil.BuildIntegrationTestCluster(t, &vtadminpb.Cluster{ + tc: testutil.BuildIntegrationTestCluster(t, ctx, &vtadminpb.Cluster{ Id: "local", Name: "local", }, "zone1"), @@ -258,6 +257,7 @@ func TestCreateShard(t *testing.T) { for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { + defer tt.tc.Cluster.Close() _, err := tt.tc.Cluster.CreateShard(ctx, tt.req) if tt.shouldErr { assert.Error(t, err) @@ -276,9 +276,8 @@ func TestCreateShard(t *testing.T) { } func TestDeleteKeyspace(t *testing.T) { - t.Parallel() + ctx := utils.LeakCheckContext(t) - ctx := context.Background() tests := []struct { name string cfg testutil.TestClusterConfig @@ -343,11 +342,9 @@ func TestDeleteKeyspace(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() - cluster := testutil.BuildCluster(t, tt.cfg) + defer cluster.Close() resp, err := cluster.DeleteKeyspace(ctx, tt.req) if tt.shouldErr { @@ -363,6 +360,7 @@ func TestDeleteKeyspace(t *testing.T) { func TestDeleteShards(t *testing.T) { t.Parallel() + ctx := utils.LeakCheckContext(t) type test struct { name string @@ -372,16 +370,15 @@ func TestDeleteShards(t *testing.T) { shouldErr bool assertion func(t *testing.T, tt *test) } - ctx := context.Background() + tests := []*test{ { name: "ok", - tc: testutil.BuildIntegrationTestCluster(t, &vtadminpb.Cluster{ + tc: testutil.BuildIntegrationTestCluster(t, ctx, &vtadminpb.Cluster{ Id: "local", Name: "local", }, "zone1"), setup: func(t *testing.T, tt *test) { - ctx := context.Background() shards := []string{"-80", "80-"} for _, shard := range shards { _, err := tt.tc.Cluster.CreateShard(ctx, &vtctldatapb.CreateShardRequest{ @@ -418,7 +415,7 @@ func TestDeleteShards(t *testing.T) { }, { name: "nil request", - tc: testutil.BuildIntegrationTestCluster(t, &vtadminpb.Cluster{ + tc: testutil.BuildIntegrationTestCluster(t, ctx, &vtadminpb.Cluster{ Id: "local", Name: "local", }, "zone1"), @@ -427,7 +424,7 @@ func TestDeleteShards(t *testing.T) { }, { name: "vtctld.DeleteShards fails", - tc: testutil.BuildIntegrationTestCluster(t, &vtadminpb.Cluster{ + tc: testutil.BuildIntegrationTestCluster(t, ctx, &vtadminpb.Cluster{ Id: "local", Name: "local", }, "zone1"), @@ -475,7 +472,6 @@ func TestDeleteShards(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { if tt.setup != nil { func() { @@ -484,6 +480,7 @@ func TestDeleteShards(t *testing.T) { }() } + defer tt.tc.Cluster.Close() _, err := tt.tc.Cluster.DeleteShards(ctx, tt.req) if tt.shouldErr { assert.Error(t, err) @@ -610,6 +607,7 @@ func TestFindTablet(t *testing.T) { }, Tablets: tt.tablets, }) + defer cluster.Close() tablet, err := cluster.FindTablet(ctx, tt.filter) if tt.expectedError != nil { @@ -821,6 +819,7 @@ func TestFindTablets(t *testing.T) { }, Tablets: tt.tablets, }) + defer cluster.Close() tablets, err := cluster.FindTablets(ctx, tt.filter, tt.n) assert.NoError(t, err) @@ -1202,6 +1201,7 @@ func TestFindWorkflows(t *testing.T) { t.Parallel() c := testutil.BuildCluster(t, tt.cfg) + defer c.Close() workflows, err := c.FindWorkflows(ctx, tt.keyspaces, tt.opts) if tt.shouldErr { assert.Error(t, err) @@ -1482,6 +1482,7 @@ func TestGetCellInfos(t *testing.T) { Cluster: cpb, VtctldClient: tt.vtctld, }) + defer c.Close() cellInfos, err := c.GetCellInfos(context.Background(), tt.req) if tt.shouldErr { assert.Error(t, err) @@ -1564,6 +1565,7 @@ func TestGetCellsAliases(t *testing.T) { Cluster: cpb, VtctldClient: tt.vtctld, }) + defer c.Close() cellsAliases, err := c.GetCellsAliases(context.Background()) if tt.shouldErr { assert.Error(t, err) @@ -1716,6 +1718,7 @@ func TestGetSchema(t *testing.T) { Tablets: []*vtadminpb.Tablet{tt.tablet}, DBConfig: testutil.Dbcfg{}, }) + defer c.Close() schema, err := c.GetSchema(ctx, "testkeyspace", cluster.GetSchemaOptions{ BaseRequest: tt.req, @@ -1767,6 +1770,7 @@ func TestGetSchema(t *testing.T) { }, VtctldClient: vtctld, }) + defer c.Close() _, _ = c.GetSchema(ctx, "testkeyspace", cluster.GetSchemaOptions{ BaseRequest: req, @@ -2689,6 +2693,7 @@ func TestGetSchema(t *testing.T) { } c := testutil.BuildCluster(t, tt.cfg) + defer c.Close() schema, err := c.GetSchema(ctx, tt.keyspace, tt.opts) if tt.shouldErr { assert.Error(t, err) @@ -2697,8 +2702,7 @@ func TestGetSchema(t *testing.T) { } // Clone so our mutation below doesn't trip the race detector. - schema = proto.Clone(schema).(*vtadminpb.Schema) - + schema = schema.CloneVT() if schema.TableDefinitions != nil { // For simplicity, we're going to assert only on the state // of the aggregated sizes (in schema.TableSizes), since the @@ -2772,18 +2776,18 @@ func TestGetShardReplicationPositions(t *testing.T) { Response: &vtctldatapb.ShardReplicationPositionsResponse{ ReplicationStatuses: map[string]*replicationdatapb.Status{ "zone1-001": { - IoState: int32(mysql.ReplicationStateStopped), - SqlState: int32(mysql.ReplicationStateStopped), + IoState: int32(replication.ReplicationStateStopped), + SqlState: int32(replication.ReplicationStateStopped), Position: "MySQL56/08d0dbbb-be29-11eb-9fea-0aafb9701138:1-109848265", }, "zone1-002": { // Note: in reality other fields will be set on replicating hosts as well, but this is sufficient to illustrate in the testing. - IoState: int32(mysql.ReplicationStateRunning), - SqlState: int32(mysql.ReplicationStateRunning), + IoState: int32(replication.ReplicationStateRunning), + SqlState: int32(replication.ReplicationStateRunning), Position: "MySQL56/08d0dbbb-be29-11eb-9fea-0aafb9701138:1-109848265", }, "zone1-003": { - IoState: int32(mysql.ReplicationStateRunning), - SqlState: int32(mysql.ReplicationStateRunning), + IoState: int32(replication.ReplicationStateRunning), + SqlState: int32(replication.ReplicationStateRunning), Position: "MySQL56/08d0dbbb-be29-11eb-9fea-0aafb9701138:1-109848265", }, }, @@ -2835,18 +2839,18 @@ func TestGetShardReplicationPositions(t *testing.T) { PositionInfo: &vtctldatapb.ShardReplicationPositionsResponse{ ReplicationStatuses: map[string]*replicationdatapb.Status{ "zone1-001": { - IoState: int32(mysql.ReplicationStateStopped), - SqlState: int32(mysql.ReplicationStateStopped), + IoState: int32(replication.ReplicationStateStopped), + SqlState: int32(replication.ReplicationStateStopped), Position: "MySQL56/08d0dbbb-be29-11eb-9fea-0aafb9701138:1-109848265", }, "zone1-002": { - IoState: int32(mysql.ReplicationStateRunning), - SqlState: int32(mysql.ReplicationStateRunning), + IoState: int32(replication.ReplicationStateRunning), + SqlState: int32(replication.ReplicationStateRunning), Position: "MySQL56/08d0dbbb-be29-11eb-9fea-0aafb9701138:1-109848265", }, "zone1-003": { - IoState: int32(mysql.ReplicationStateRunning), - SqlState: int32(mysql.ReplicationStateRunning), + IoState: int32(replication.ReplicationStateRunning), + SqlState: int32(replication.ReplicationStateRunning), Position: "MySQL56/08d0dbbb-be29-11eb-9fea-0aafb9701138:1-109848265", }, }, @@ -2944,6 +2948,7 @@ func TestGetShardReplicationPositions(t *testing.T) { t.Parallel() c := testutil.BuildCluster(t, tt.cfg) + defer c.Close() resp, err := c.GetShardReplicationPositions(ctx, tt.req) if tt.shouldErr { @@ -3033,6 +3038,7 @@ func TestGetVSchema(t *testing.T) { t.Parallel() cluster := testutil.BuildCluster(t, tt.cfg) + defer cluster.Close() vschema, err := cluster.GetVSchema(ctx, tt.keyspace) if tt.shouldErr { @@ -3191,6 +3197,7 @@ func TestGetWorkflow(t *testing.T) { t.Parallel() c := testutil.BuildCluster(t, tt.cfg) + defer c.Close() workflow, err := c.GetWorkflow(ctx, tt.keyspace, tt.workflow, tt.opts) if tt.shouldErr { assert.Error(t, err) @@ -3357,6 +3364,7 @@ func TestGetWorkflows(t *testing.T) { t.Parallel() c := testutil.BuildCluster(t, tt.cfg) + defer c.Close() workflows, err := c.GetWorkflows(ctx, tt.keyspaces, tt.opts) if tt.shouldErr { assert.Error(t, err) @@ -3434,6 +3442,7 @@ func TestSetWritable(t *testing.T) { t.Parallel() c := testutil.BuildCluster(t, tt.cfg) + defer c.Close() err := c.SetWritable(ctx, tt.req) tt.assertion(t, err, tt.assertionMsgExtra...) }) @@ -3580,6 +3589,7 @@ func TestToggleTabletReplication(t *testing.T) { t.Parallel() c := testutil.BuildCluster(t, tt.cfg) + defer c.Close() err := c.ToggleTabletReplication(ctx, tt.tablet, bool(tt.state)) tt.assertion(t, err, tt.assertionMsgExtra...) }) diff --git a/go/vt/vtadmin/cluster/discovery/discovery_static_file_test.go b/go/vt/vtadmin/cluster/discovery/discovery_static_file_test.go index 8fa049f8540..344ee32863d 100644 --- a/go/vt/vtadmin/cluster/discovery/discovery_static_file_test.go +++ b/go/vt/vtadmin/cluster/discovery/discovery_static_file_test.go @@ -23,7 +23,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/vt/proto/vtadmin" vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin" ) @@ -54,7 +53,7 @@ func TestDiscoverVTGate(t *testing.T) { }] } `), - expected: &vtadmin.VTGate{ + expected: &vtadminpb.VTGate{ Hostname: "127.0.0.1:12345", }, }, @@ -292,7 +291,7 @@ func TestDiscoverVtctld(t *testing.T) { }] } `), - expected: &vtadmin.Vtctld{ + expected: &vtadminpb.Vtctld{ Hostname: "127.0.0.1:12345", }, }, diff --git a/go/vt/vtadmin/cluster/internal/caches/schemacache/cache.go b/go/vt/vtadmin/cluster/internal/caches/schemacache/cache.go index 98801ab3951..ebc3899e82f 100644 --- a/go/vt/vtadmin/cluster/internal/caches/schemacache/cache.go +++ b/go/vt/vtadmin/cluster/internal/caches/schemacache/cache.go @@ -22,8 +22,6 @@ import ( "fmt" "time" - "google.golang.org/protobuf/proto" - "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/mysqlctl/tmutils" "vitess.io/vitess/go/vt/vtadmin/cache" @@ -178,8 +176,7 @@ func LoadOne(c *schemaCache, key Key, opts LoadOptions) (schema *vtadminpb.Schem } func loadSchema(cachedSchema *vtadminpb.Schema, opts LoadOptions) *vtadminpb.Schema { - schema := proto.Clone(cachedSchema).(*vtadminpb.Schema) - + schema := cachedSchema.CloneVT() if !opts.AggregateSizes { schema.TableSizes = nil } diff --git a/go/vt/vtadmin/cluster/resolver/resolver_test.go b/go/vt/vtadmin/cluster/resolver/resolver_test.go index 720f26f9f6c..fd1dbab5f13 100644 --- a/go/vt/vtadmin/cluster/resolver/resolver_test.go +++ b/go/vt/vtadmin/cluster/resolver/resolver_test.go @@ -78,20 +78,6 @@ func (cc *mockClientConn) assertUpdateWithin(t testing.TB, timeout time.Duration } } -func (cc *mockClientConn) assertErrorReportedWithin(t testing.TB, timeout time.Duration, msgAndArgs ...any) bool { - t.Helper() - - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - - select { - case <-ctx.Done(): - return assert.Fail(t, "failed to receive reported error", "did not receive reported error within %v: %s", timeout, ctx.Err()) - case actual := <-cc.errors: - return assert.Error(t, actual, msgAndArgs...) - } -} - func (cc *mockClientConn) UpdateState(state grpcresolver.State) error { select { case <-cc.ctx.Done(): diff --git a/go/vt/vtadmin/testutil/cluster.go b/go/vt/vtadmin/testutil/cluster.go index 2f825093e1f..9141d6b0c22 100644 --- a/go/vt/vtadmin/testutil/cluster.go +++ b/go/vt/vtadmin/testutil/cluster.go @@ -164,10 +164,10 @@ type IntegrationTestCluster struct { // // (TODO|@ajm188): Unify this with the BuildCluster API. Also this does not // support any cluster methods that involve vtgate/vitessdriver queries. -func BuildIntegrationTestCluster(t testing.TB, c *vtadminpb.Cluster, cells ...string) *IntegrationTestCluster { +func BuildIntegrationTestCluster(t testing.TB, ctx context.Context, c *vtadminpb.Cluster, cells ...string) *IntegrationTestCluster { t.Helper() - ts, factory := memorytopo.NewServerAndFactory(cells...) + ts, factory := memorytopo.NewServerAndFactory(ctx, cells...) vtctld := grpcvtctldtestutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return grpcvtctldserver.NewVtctldServer(ts) }) diff --git a/go/vt/vtadmin/vtsql/config.go b/go/vt/vtadmin/vtsql/config.go index bf3eee89fab..52b162236fd 100644 --- a/go/vt/vtadmin/vtsql/config.go +++ b/go/vt/vtadmin/vtsql/config.go @@ -97,13 +97,20 @@ func (c *Config) Parse(args []string) error { "a Username and Password. Templates are given the context of the vtsql.Config, and primarily "+ "interoplate the cluster name and ID variables.") effectiveUser := fs.String("effective-user", "", "username to send queries on behalf of") - + credentialsUsername := fs.String("credentials-username", "", + "A string specifying the Username to use for authenticating with vtgate. "+ + "Used with credentials-password in place of credentials-path-tmpl, in cases where providing a static file cannot be done.") + credentialsPassword := fs.String("credentials-password", "", + "A string specifying a Password to use for authenticating with vtgate. "+ + "Used with credentials-username in place of credentials-path-tmpl, in cases where providing a static file cannot be done.") if err := fs.Parse(args); err != nil { return err } - var creds *grpcclient.StaticAuthClientCreds + var username, password string + // First load credentials from credentials-path-tmpl, if provided + var tmplStrCreds *grpcclient.StaticAuthClientCreds if *credentialsTmplStr != "" { _creds, path, err := credentials.LoadFromTemplate(*credentialsTmplStr, c) if err != nil { @@ -111,20 +118,34 @@ func (c *Config) Parse(args []string) error { } c.CredentialsPath = path - creds = _creds + tmplStrCreds = _creds + } + if tmplStrCreds != nil { + username = tmplStrCreds.Username + password = tmplStrCreds.Password } - if creds != nil { - // If we did not receive an effective user, but loaded credentials, then the - // immediate user is the effective user. - if *effectiveUser == "" { - *effectiveUser = creds.Username - } + // If credentials-username and credentials-password are provided, use those credentials instead + if *credentialsUsername != "" { + username = *credentialsUsername + } + if *credentialsPassword != "" { + password = *credentialsPassword + } - c.Credentials = &StaticAuthCredentials{ - EffectiveUser: *effectiveUser, - StaticAuthClientCreds: creds, - } + // If we did not receive an effective user, but loaded user credentials, then the + // immediate user is the effective user. + if *effectiveUser == "" { + *effectiveUser = username + } + + // Set credentials to values potentially supplied by credentials-password and credentials-username + c.Credentials = &StaticAuthCredentials{ + EffectiveUser: *effectiveUser, + StaticAuthClientCreds: &grpcclient.StaticAuthClientCreds{ + Username: username, + Password: password, + }, } return nil diff --git a/go/vt/vtadmin/vtsql/config_test.go b/go/vt/vtadmin/vtsql/config_test.go index 2fe2cea5d22..f23874c5039 100644 --- a/go/vt/vtadmin/vtsql/config_test.go +++ b/go/vt/vtadmin/vtsql/config_test.go @@ -98,6 +98,97 @@ func TestConfigParse(t *testing.T) { assert.Equal(t, expectedCreds, cfg.Credentials) }) + t.Run("uses vtsql-credentials-password", func(t *testing.T) { + t.Parallel() + + f, err := os.CreateTemp("", "vtsql-config-test-testcluster-*") // testcluster is going to appear in the template + require.NoError(t, err) + + _, err = f.Write([]byte(`{ + "Username": "vtadmin", + "Password": "hunter2" +}`)) + require.NoError(t, err) + + path := f.Name() + defer os.Remove(path) + f.Close() + + dir := filepath.Dir(path) + baseParts := strings.Split(filepath.Base(path), "-") + tmplParts := append(baseParts[:3], "{{ .Cluster.Name }}", baseParts[4]) + + cfg := &Config{ + Cluster: &vtadminpb.Cluster{ + Name: "testcluster", + }, + } + + credsTmplStr := filepath.Join(dir, strings.Join(tmplParts, "-")) + + args := []string{ + "--discovery-tags=a:1,b:2", + "--effective-user=vt_appdebug", + "--discovery-tags=c:3", + "--credentials-password=my_password", + fmt.Sprintf("--credentials-path-tmpl=%s", credsTmplStr), + } + + expectedCreds := &StaticAuthCredentials{ + EffectiveUser: "vt_appdebug", + StaticAuthClientCreds: &grpcclient.StaticAuthClientCreds{ + Username: "vtadmin", + Password: "my_password", + }, + } + expectedTags := []string{ + "a:1", + "b:2", + "c:3", + } + + err = cfg.Parse(args) + assert.NoError(t, err) + assert.Equal(t, expectedTags, cfg.ResolverOptions.DiscoveryTags) + assert.Equal(t, expectedCreds, cfg.Credentials) + }) + + t.Run("it uses vtsql credentials passed as flags", func(t *testing.T) { + t.Parallel() + + cfg := &Config{ + Cluster: &vtadminpb.Cluster{ + Name: "testcluster", + }, + } + + args := []string{ + "--discovery-tags=a:1,b:2", + "--effective-user=vt_appdebug", + "--discovery-tags=c:3", + "--credentials-username=vtadmin", + "--credentials-password=my_password", + } + + expectedCreds := &StaticAuthCredentials{ + EffectiveUser: "vt_appdebug", + StaticAuthClientCreds: &grpcclient.StaticAuthClientCreds{ + Username: "vtadmin", + Password: "my_password", + }, + } + expectedTags := []string{ + "a:1", + "b:2", + "c:3", + } + + err = cfg.Parse(args) + assert.NoError(t, err) + assert.Equal(t, expectedTags, cfg.ResolverOptions.DiscoveryTags) + assert.Equal(t, expectedCreds, cfg.Credentials) + }) + t.Run("", func(t *testing.T) { t.Parallel() diff --git a/go/vt/vtcombo/tablet_map.go b/go/vt/vtcombo/tablet_map.go index 5892462835b..914ced933f4 100644 --- a/go/vt/vtcombo/tablet_map.go +++ b/go/vt/vtcombo/tablet_map.go @@ -89,7 +89,7 @@ func CreateTablet( } log.Infof("Creating %v tablet %v for %v/%v", tabletType, topoproto.TabletAliasString(alias), keyspace, shard) - controller := tabletserver.NewServer(topoproto.TabletAliasString(alias), ts, alias) + controller := tabletserver.NewServer(ctx, topoproto.TabletAliasString(alias), ts, alias) initTabletType := tabletType if tabletType == topodatapb.TabletType_PRIMARY { initTabletType = topodatapb.TabletType_REPLICA @@ -394,6 +394,10 @@ func CreateKs( return 0, fmt.Errorf("cannot load vschema file %v for keyspace %v: %v", f, keyspace, err) } + _, err = vindexes.BuildKeyspace(formal) + if err != nil { + return 0, fmt.Errorf("BuildKeyspace(%v) failed: %v", keyspace, err) + } if err := ts.SaveVSchema(ctx, keyspace, formal); err != nil { return 0, fmt.Errorf("SaveVSchema(%v) failed: %v", keyspace, err) } @@ -703,6 +707,16 @@ func (itc *internalTabletConn) VStreamRows( return tabletconn.ErrorFromGRPC(vterrors.ToGRPC(err)) } +// VStreamTables is part of the QueryService interface. +func (itc *internalTabletConn) VStreamTables( + ctx context.Context, + request *binlogdatapb.VStreamTablesRequest, + send func(*binlogdatapb.VStreamTablesResponse) error, +) error { + err := itc.tablet.qsc.QueryService().VStreamTables(ctx, request, send) + return tabletconn.ErrorFromGRPC(vterrors.ToGRPC(err)) +} + // VStreamResults is part of the QueryService interface. func (itc *internalTabletConn) VStreamResults( ctx context.Context, @@ -861,6 +875,22 @@ func (itmc *internalTabletManagerClient) WaitForPosition(context.Context, *topod return fmt.Errorf("not implemented in vtcombo") } +// +// VReplication related methods +// + +func (itmc *internalTabletManagerClient) CreateVReplicationWorkflow(context.Context, *topodatapb.Tablet, *tabletmanagerdatapb.CreateVReplicationWorkflowRequest) (*tabletmanagerdatapb.CreateVReplicationWorkflowResponse, error) { + return nil, fmt.Errorf("not implemented in vtcombo") +} + +func (itmc *internalTabletManagerClient) DeleteVReplicationWorkflow(context.Context, *topodatapb.Tablet, *tabletmanagerdatapb.DeleteVReplicationWorkflowRequest) (*tabletmanagerdatapb.DeleteVReplicationWorkflowResponse, error) { + return nil, fmt.Errorf("not implemented in vtcombo") +} + +func (itmc *internalTabletManagerClient) ReadVReplicationWorkflow(context.Context, *topodatapb.Tablet, *tabletmanagerdatapb.ReadVReplicationWorkflowRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowResponse, error) { + return nil, fmt.Errorf("not implemented in vtcombo") +} + func (itmc *internalTabletManagerClient) VReplicationExec(context.Context, *topodatapb.Tablet, string) (*querypb.QueryResult, error) { return nil, fmt.Errorf("not implemented in vtcombo") } @@ -869,7 +899,7 @@ func (itmc *internalTabletManagerClient) VReplicationWaitForPos(context.Context, return fmt.Errorf("not implemented in vtcombo") } -func (itmc *internalTabletManagerClient) UpdateVRWorkflow(context.Context, *topodatapb.Tablet, *tabletmanagerdatapb.UpdateVRWorkflowRequest) (*tabletmanagerdatapb.UpdateVRWorkflowResponse, error) { +func (itmc *internalTabletManagerClient) UpdateVReplicationWorkflow(context.Context, *topodatapb.Tablet, *tabletmanagerdatapb.UpdateVReplicationWorkflowRequest) (*tabletmanagerdatapb.UpdateVReplicationWorkflowResponse, error) { return nil, fmt.Errorf("not implemented in vtcombo") } @@ -913,6 +943,10 @@ func (itmc *internalTabletManagerClient) RestoreFromBackup(context.Context, *top return nil, fmt.Errorf("not implemented in vtcombo") } +func (itmc *internalTabletManagerClient) CheckThrottler(context.Context, *topodatapb.Tablet, *tabletmanagerdatapb.CheckThrottlerRequest) (*tabletmanagerdatapb.CheckThrottlerResponse, error) { + return nil, fmt.Errorf("not implemented in vtcombo") +} + func (itmc *internalTabletManagerClient) Close() { } @@ -967,3 +1001,6 @@ func (itc *internalTabletConn) ExecuteLoadData(ctx context.Context, target *quer } return result, nil } +func (itmc *internalTabletManagerClient) ResetSequences(ctx context.Context, tablet *topodatapb.Tablet, tables []string) error { + return fmt.Errorf("not implemented in vtcombo") +} diff --git a/go/vt/vtctl/backup.go b/go/vt/vtctl/backup.go index 8087580125c..c2f90ec4b14 100644 --- a/go/vt/vtctl/backup.go +++ b/go/vt/vtctl/backup.go @@ -73,6 +73,7 @@ func commandBackup(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.F concurrency := subFlags.Int("concurrency", 4, "Specifies the number of compression/checksum jobs to run simultaneously") allowPrimary := subFlags.Bool("allow_primary", false, "Allows backups to be taken on primary. Warning!! If you are using the builtin backup engine, this will shutdown your primary mysql for as long as it takes to create a backup.") incrementalFromPos := subFlags.String("incremental_from_pos", "", "Position of previous backup. Default: empty. If given, then this backup becomes an incremental backup from given position. If value is 'auto', backup taken from last successful backup position") + upgradeSafe := subFlags.Bool("upgrade-safe", false, "Whether to use innodb_fast_shutdown=0 for the backup so it is safe to use for MySQL upgrades.") if err := subFlags.Parse(args); err != nil { return err @@ -91,6 +92,7 @@ func commandBackup(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.F Concurrency: uint64(*concurrency), AllowPrimary: *allowPrimary, IncrementalFromPos: *incrementalFromPos, + UpgradeSafe: *upgradeSafe, }, &backupEventStreamLogger{logger: wr.Logger(), ctx: ctx}) } @@ -112,6 +114,8 @@ func (b *backupEventStreamLogger) Send(resp *vtctldatapb.BackupResponse) error { func commandBackupShard(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error { concurrency := subFlags.Int("concurrency", 4, "Specifies the number of compression/checksum jobs to run simultaneously") allowPrimary := subFlags.Bool("allow_primary", false, "Whether to use primary tablet for backup. Warning!! If you are using the builtin backup engine, this will shutdown your primary mysql for as long as it takes to create a backup.") + incrementalFromPos := subFlags.String("incremental_from_pos", "", "Position of previous backup. Default: empty. If given, then this backup becomes an incremental backup from given position. If value is 'auto', backup taken from last successful backup position") + upgradeSafe := subFlags.Bool("upgrade-safe", false, "Whether to use innodb_fast_shutdown=0 for the backup so it is safe to use for MySQL upgrades.") if err := subFlags.Parse(args); err != nil { return err @@ -126,10 +130,12 @@ func commandBackupShard(ctx context.Context, wr *wrangler.Wrangler, subFlags *pf } return wr.VtctldServer().BackupShard(&vtctldatapb.BackupShardRequest{ - Keyspace: keyspace, - Shard: shard, - Concurrency: uint64(*concurrency), - AllowPrimary: *allowPrimary, + Keyspace: keyspace, + Shard: shard, + Concurrency: uint64(*concurrency), + AllowPrimary: *allowPrimary, + IncrementalFromPos: *incrementalFromPos, + UpgradeSafe: *upgradeSafe, }, &backupEventStreamLogger{logger: wr.Logger(), ctx: ctx}) } @@ -202,6 +208,7 @@ func (b *backupRestoreEventStreamLogger) Send(resp *vtctldatapb.RestoreFromBacku func commandRestoreFromBackup(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error { backupTimestampStr := subFlags.String("backup_timestamp", "", "Use the backup taken at or before this timestamp rather than using the latest backup.") restoreToPos := subFlags.String("restore_to_pos", "", "Run a point in time recovery that ends with the given position. This will attempt to use one full backup followed by zero or more incremental backups") + restoreToTimestampStr := subFlags.String("restore_to_timestamp", "", "Run a point in time recovery that restores up to, and excluding, given timestamp in RFC3339 format (`2006-01-02T15:04:05Z07:00`). This will attempt to use one full backup followed by zero or more incremental backups") dryRun := subFlags.Bool("dry_run", false, "Only validate restore steps, do not actually restore data") if err := subFlags.Parse(args); err != nil { return err @@ -227,10 +234,18 @@ func commandRestoreFromBackup(ctx context.Context, wr *wrangler.Wrangler, subFla return err } + var restoreToTimestamp time.Time + if *restoreToTimestampStr != "" { + restoreToTimestamp, err = mysqlctl.ParseRFC3339(*restoreToTimestampStr) + if err != nil { + return vterrors.Wrapf(err, "parsing --restore_to_timestamp args") + } + } req := &vtctldatapb.RestoreFromBackupRequest{ - TabletAlias: tabletAlias, - RestoreToPos: *restoreToPos, - DryRun: *dryRun, + TabletAlias: tabletAlias, + RestoreToPos: *restoreToPos, + RestoreToTimestamp: protoutil.TimeToProto(restoreToTimestamp), + DryRun: *dryRun, } if !backupTime.IsZero() { diff --git a/go/vt/vtctl/endtoend/get_schema_test.go b/go/vt/vtctl/endtoend/get_schema_test.go index da73f2ff74f..45a161348f1 100644 --- a/go/vt/vtctl/endtoend/get_schema_test.go +++ b/go/vt/vtctl/endtoend/get_schema_test.go @@ -26,9 +26,10 @@ import ( ) func TestGetSchema(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - topo := memorytopo.NewServer("zone1", "zone2", "zone3") + topo := memorytopo.NewServer(ctx, "zone1", "zone2", "zone3") tablet := &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ diff --git a/go/vt/vtctl/endtoend/onlineddl_show_test.go b/go/vt/vtctl/endtoend/onlineddl_show_test.go index 0b982bc7545..fe795af752d 100644 --- a/go/vt/vtctl/endtoend/onlineddl_show_test.go +++ b/go/vt/vtctl/endtoend/onlineddl_show_test.go @@ -94,9 +94,11 @@ func TestShowOnlineDDL_Cancel(t *testing.T) { func onlineDDLTest(t *testing.T, args []string, expectedQuery string) { t.Helper() - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - fakeTopo := memorytopo.NewServer("zone1", "zone2", "zone3") + fakeTopo := memorytopo.NewServer(ctx, "zone1", "zone2", "zone3") + defer fakeTopo.Close() tablet := &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ diff --git a/go/vt/vtctl/fakevtctlclient/fake_loggerevent_streamingclient.go b/go/vt/vtctl/fakevtctlclient/fake_loggerevent_streamingclient.go index a11db6a4952..14147316508 100644 --- a/go/vt/vtctl/fakevtctlclient/fake_loggerevent_streamingclient.go +++ b/go/vt/vtctl/fakevtctlclient/fake_loggerevent_streamingclient.go @@ -23,6 +23,7 @@ import ( "sync" "time" + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/vt/logutil" logutilpb "vitess.io/vitess/go/vt/proto/logutil" @@ -111,7 +112,7 @@ type streamResultAdapter struct { func (s *streamResultAdapter) Recv() (*logutilpb.Event, error) { if s.index < len(s.lines) { result := &logutilpb.Event{ - Time: logutil.TimeToProto(time.Now()), + Time: protoutil.TimeToProto(time.Now()), Level: logutilpb.Level_CONSOLE, File: "fakevtctlclient", Line: -1, diff --git a/go/vt/vtctl/grpcvtctlclient/client_test.go b/go/vt/vtctl/grpcvtctlclient/client_test.go index a50a79ecdce..50e1968533e 100644 --- a/go/vt/vtctl/grpcvtctlclient/client_test.go +++ b/go/vt/vtctl/grpcvtctlclient/client_test.go @@ -17,6 +17,7 @@ limitations under the License. package grpcvtctlclient import ( + "context" "fmt" "io" "net" @@ -38,7 +39,9 @@ import ( // the test here creates a fake server implementation, a fake client // implementation, and runs the test suite against the setup. func TestVtctlServer(t *testing.T) { - ts := vtctlclienttest.CreateTopoServer(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := vtctlclienttest.CreateTopoServer(t, ctx) // Listen on a random port listener, err := net.Listen("tcp", "127.0.0.1:0") @@ -65,7 +68,9 @@ func TestVtctlServer(t *testing.T) { // the test here creates a fake server implementation, a fake client with auth // implementation, and runs the test suite against the setup. func TestVtctlAuthClient(t *testing.T) { - ts := vtctlclienttest.CreateTopoServer(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := vtctlclienttest.CreateTopoServer(t, ctx) // Listen on a random port listener, err := net.Listen("tcp", "127.0.0.1:0") diff --git a/go/vt/vtctl/grpcvtctldclient/client_gen.go b/go/vt/vtctl/grpcvtctldclient/client_gen.go index 7e04dcb693d..087b566fe5d 100644 --- a/go/vt/vtctl/grpcvtctldclient/client_gen.go +++ b/go/vt/vtctl/grpcvtctldclient/client_gen.go @@ -101,6 +101,15 @@ func (client *gRPCVtctldClient) BackupShard(ctx context.Context, in *vtctldatapb return client.c.BackupShard(ctx, in, opts...) } +// CancelSchemaMigration is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) CancelSchemaMigration(ctx context.Context, in *vtctldatapb.CancelSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldatapb.CancelSchemaMigrationResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.CancelSchemaMigration(ctx, in, opts...) +} + // ChangeTabletType is part of the vtctlservicepb.VtctldClient interface. func (client *gRPCVtctldClient) ChangeTabletType(ctx context.Context, in *vtctldatapb.ChangeTabletTypeRequest, opts ...grpc.CallOption) (*vtctldatapb.ChangeTabletTypeResponse, error) { if client.c == nil { @@ -110,6 +119,24 @@ func (client *gRPCVtctldClient) ChangeTabletType(ctx context.Context, in *vtctld return client.c.ChangeTabletType(ctx, in, opts...) } +// CleanupSchemaMigration is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) CleanupSchemaMigration(ctx context.Context, in *vtctldatapb.CleanupSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldatapb.CleanupSchemaMigrationResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.CleanupSchemaMigration(ctx, in, opts...) +} + +// CompleteSchemaMigration is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) CompleteSchemaMigration(ctx context.Context, in *vtctldatapb.CompleteSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldatapb.CompleteSchemaMigrationResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.CompleteSchemaMigration(ctx, in, opts...) +} + // CreateKeyspace is part of the vtctlservicepb.VtctldClient interface. func (client *gRPCVtctldClient) CreateKeyspace(ctx context.Context, in *vtctldatapb.CreateKeyspaceRequest, opts ...grpc.CallOption) (*vtctldatapb.CreateKeyspaceResponse, error) { if client.c == nil { @@ -317,6 +344,15 @@ func (client *gRPCVtctldClient) GetSchema(ctx context.Context, in *vtctldatapb.G return client.c.GetSchema(ctx, in, opts...) } +// GetSchemaMigrations is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) GetSchemaMigrations(ctx context.Context, in *vtctldatapb.GetSchemaMigrationsRequest, opts ...grpc.CallOption) (*vtctldatapb.GetSchemaMigrationsResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.GetSchemaMigrations(ctx, in, opts...) +} + // GetShard is part of the vtctlservicepb.VtctldClient interface. func (client *gRPCVtctldClient) GetShard(ctx context.Context, in *vtctldatapb.GetShardRequest, opts ...grpc.CallOption) (*vtctldatapb.GetShardResponse, error) { if client.c == nil { @@ -434,6 +470,105 @@ func (client *gRPCVtctldClient) InitShardPrimary(ctx context.Context, in *vtctld return client.c.InitShardPrimary(ctx, in, opts...) } +// LaunchSchemaMigration is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) LaunchSchemaMigration(ctx context.Context, in *vtctldatapb.LaunchSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldatapb.LaunchSchemaMigrationResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.LaunchSchemaMigration(ctx, in, opts...) +} + +// LookupVindexCreate is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) LookupVindexCreate(ctx context.Context, in *vtctldatapb.LookupVindexCreateRequest, opts ...grpc.CallOption) (*vtctldatapb.LookupVindexCreateResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.LookupVindexCreate(ctx, in, opts...) +} + +// LookupVindexExternalize is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) LookupVindexExternalize(ctx context.Context, in *vtctldatapb.LookupVindexExternalizeRequest, opts ...grpc.CallOption) (*vtctldatapb.LookupVindexExternalizeResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.LookupVindexExternalize(ctx, in, opts...) +} + +// MaterializeCreate is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) MaterializeCreate(ctx context.Context, in *vtctldatapb.MaterializeCreateRequest, opts ...grpc.CallOption) (*vtctldatapb.MaterializeCreateResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.MaterializeCreate(ctx, in, opts...) +} + +// MigrateCreate is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) MigrateCreate(ctx context.Context, in *vtctldatapb.MigrateCreateRequest, opts ...grpc.CallOption) (*vtctldatapb.WorkflowStatusResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.MigrateCreate(ctx, in, opts...) +} + +// MountList is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) MountList(ctx context.Context, in *vtctldatapb.MountListRequest, opts ...grpc.CallOption) (*vtctldatapb.MountListResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.MountList(ctx, in, opts...) +} + +// MountRegister is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) MountRegister(ctx context.Context, in *vtctldatapb.MountRegisterRequest, opts ...grpc.CallOption) (*vtctldatapb.MountRegisterResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.MountRegister(ctx, in, opts...) +} + +// MountShow is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) MountShow(ctx context.Context, in *vtctldatapb.MountShowRequest, opts ...grpc.CallOption) (*vtctldatapb.MountShowResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.MountShow(ctx, in, opts...) +} + +// MountUnregister is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) MountUnregister(ctx context.Context, in *vtctldatapb.MountUnregisterRequest, opts ...grpc.CallOption) (*vtctldatapb.MountUnregisterResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.MountUnregister(ctx, in, opts...) +} + +// MoveTablesComplete is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) MoveTablesComplete(ctx context.Context, in *vtctldatapb.MoveTablesCompleteRequest, opts ...grpc.CallOption) (*vtctldatapb.MoveTablesCompleteResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.MoveTablesComplete(ctx, in, opts...) +} + +// MoveTablesCreate is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) MoveTablesCreate(ctx context.Context, in *vtctldatapb.MoveTablesCreateRequest, opts ...grpc.CallOption) (*vtctldatapb.WorkflowStatusResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.MoveTablesCreate(ctx, in, opts...) +} + // PingTablet is part of the vtctlservicepb.VtctldClient interface. func (client *gRPCVtctldClient) PingTablet(ctx context.Context, in *vtctldatapb.PingTabletRequest, opts ...grpc.CallOption) (*vtctldatapb.PingTabletResponse, error) { if client.c == nil { @@ -551,6 +686,15 @@ func (client *gRPCVtctldClient) ReparentTablet(ctx context.Context, in *vtctldat return client.c.ReparentTablet(ctx, in, opts...) } +// ReshardCreate is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) ReshardCreate(ctx context.Context, in *vtctldatapb.ReshardCreateRequest, opts ...grpc.CallOption) (*vtctldatapb.WorkflowStatusResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.ReshardCreate(ctx, in, opts...) +} + // RestoreFromBackup is part of the vtctlservicepb.VtctldClient interface. func (client *gRPCVtctldClient) RestoreFromBackup(ctx context.Context, in *vtctldatapb.RestoreFromBackupRequest, opts ...grpc.CallOption) (vtctlservicepb.Vtctld_RestoreFromBackupClient, error) { if client.c == nil { @@ -560,6 +704,15 @@ func (client *gRPCVtctldClient) RestoreFromBackup(ctx context.Context, in *vtctl return client.c.RestoreFromBackup(ctx, in, opts...) } +// RetrySchemaMigration is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) RetrySchemaMigration(ctx context.Context, in *vtctldatapb.RetrySchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldatapb.RetrySchemaMigrationResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.RetrySchemaMigration(ctx, in, opts...) +} + // RunHealthCheck is part of the vtctlservicepb.VtctldClient interface. func (client *gRPCVtctldClient) RunHealthCheck(ctx context.Context, in *vtctldatapb.RunHealthCheckRequest, opts ...grpc.CallOption) (*vtctldatapb.RunHealthCheckResponse, error) { if client.c == nil { @@ -722,6 +875,51 @@ func (client *gRPCVtctldClient) UpdateThrottlerConfig(ctx context.Context, in *v return client.c.UpdateThrottlerConfig(ctx, in, opts...) } +// VDiffCreate is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) VDiffCreate(ctx context.Context, in *vtctldatapb.VDiffCreateRequest, opts ...grpc.CallOption) (*vtctldatapb.VDiffCreateResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.VDiffCreate(ctx, in, opts...) +} + +// VDiffDelete is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) VDiffDelete(ctx context.Context, in *vtctldatapb.VDiffDeleteRequest, opts ...grpc.CallOption) (*vtctldatapb.VDiffDeleteResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.VDiffDelete(ctx, in, opts...) +} + +// VDiffResume is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) VDiffResume(ctx context.Context, in *vtctldatapb.VDiffResumeRequest, opts ...grpc.CallOption) (*vtctldatapb.VDiffResumeResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.VDiffResume(ctx, in, opts...) +} + +// VDiffShow is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) VDiffShow(ctx context.Context, in *vtctldatapb.VDiffShowRequest, opts ...grpc.CallOption) (*vtctldatapb.VDiffShowResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.VDiffShow(ctx, in, opts...) +} + +// VDiffStop is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) VDiffStop(ctx context.Context, in *vtctldatapb.VDiffStopRequest, opts ...grpc.CallOption) (*vtctldatapb.VDiffStopResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.VDiffStop(ctx, in, opts...) +} + // Validate is part of the vtctlservicepb.VtctldClient interface. func (client *gRPCVtctldClient) Validate(ctx context.Context, in *vtctldatapb.ValidateRequest, opts ...grpc.CallOption) (*vtctldatapb.ValidateResponse, error) { if client.c == nil { @@ -785,6 +983,33 @@ func (client *gRPCVtctldClient) ValidateVersionShard(ctx context.Context, in *vt return client.c.ValidateVersionShard(ctx, in, opts...) } +// WorkflowDelete is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) WorkflowDelete(ctx context.Context, in *vtctldatapb.WorkflowDeleteRequest, opts ...grpc.CallOption) (*vtctldatapb.WorkflowDeleteResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.WorkflowDelete(ctx, in, opts...) +} + +// WorkflowStatus is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) WorkflowStatus(ctx context.Context, in *vtctldatapb.WorkflowStatusRequest, opts ...grpc.CallOption) (*vtctldatapb.WorkflowStatusResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.WorkflowStatus(ctx, in, opts...) +} + +// WorkflowSwitchTraffic is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) WorkflowSwitchTraffic(ctx context.Context, in *vtctldatapb.WorkflowSwitchTrafficRequest, opts ...grpc.CallOption) (*vtctldatapb.WorkflowSwitchTrafficResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.WorkflowSwitchTraffic(ctx, in, opts...) +} + // WorkflowUpdate is part of the vtctlservicepb.VtctldClient interface. func (client *gRPCVtctldClient) WorkflowUpdate(ctx context.Context, in *vtctldatapb.WorkflowUpdateRequest, opts ...grpc.CallOption) (*vtctldatapb.WorkflowUpdateResponse, error) { if client.c == nil { diff --git a/go/vt/vtctl/grpcvtctldclient/client_test.go b/go/vt/vtctl/grpcvtctldclient/client_test.go index 1b42f8e5270..93c95ffa607 100644 --- a/go/vt/vtctl/grpcvtctldclient/client_test.go +++ b/go/vt/vtctl/grpcvtctldclient/client_test.go @@ -36,8 +36,10 @@ import ( ) func TestFindAllShardsInKeyspace(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") + defer ts.Close() vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return grpcvtctldserver.NewVtctldServer(ts) }) @@ -80,9 +82,11 @@ func TestFindAllShardsInKeyspace(t *testing.T) { } func TestGetKeyspace(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - ts := memorytopo.NewServer("cell1") + ts := memorytopo.NewServer(ctx, "cell1") + defer ts.Close() vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return grpcvtctldserver.NewVtctldServer(ts) }) @@ -107,9 +111,11 @@ func TestGetKeyspace(t *testing.T) { } func TestGetKeyspaces(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - ts := memorytopo.NewServer("cell1") + ts := memorytopo.NewServer(ctx, "cell1") + defer ts.Close() vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return grpcvtctldserver.NewVtctldServer(ts) }) diff --git a/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go b/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go index db412284e7a..87433396913 100644 --- a/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go +++ b/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go @@ -21,6 +21,7 @@ import ( "fmt" "testing" + "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/mysqlctl" "github.com/stretchr/testify/assert" @@ -41,11 +42,14 @@ import ( ) func TestInitShardPrimary(t *testing.T) { - ts := memorytopo.NewServer("cell1") + ctx := utils.LeakCheckContext(t) + ts := memorytopo.NewServer(ctx, "cell1") tmc := tmclient.NewTabletManagerClient() + defer tmc.Close() wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmc) primaryDb := fakesqldb.New(t) + defer primaryDb.Close() primaryDb.AddQuery("create database if not exists `test_keyspace`", &sqltypes.Result{InsertID: 0, RowsAffected: 0}) tablet1 := testlib.NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, primaryDb) @@ -101,11 +105,14 @@ func TestInitShardPrimary(t *testing.T) { } func TestInitShardPrimaryNoFormerPrimary(t *testing.T) { - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") tmc := tmclient.NewTabletManagerClient() wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmc) primaryDb := fakesqldb.New(t) + defer primaryDb.Close() primaryDb.AddQuery("create database if not exists `test_keyspace`", &sqltypes.Result{InsertID: 0, RowsAffected: 0}) tablet1 := testlib.NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_REPLICA, primaryDb) diff --git a/go/vt/vtctl/grpcvtctldserver/query.go b/go/vt/vtctl/grpcvtctldserver/query.go new file mode 100644 index 00000000000..2fac399bd4f --- /dev/null +++ b/go/vt/vtctl/grpcvtctldserver/query.go @@ -0,0 +1,243 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package grpcvtctldserver + +import ( + "fmt" + "strings" + "time" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/protoutil" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtctl/schematools" + + querypb "vitess.io/vitess/go/vt/proto/query" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" + "vitess.io/vitess/go/vt/proto/vttime" +) + +const ( + alterSingleSchemaMigrationSql = `alter vitess_migration %a ` + alterAllSchemaMigrationSql = `alter vitess_migration %s all` + selectSchemaMigrationsSql = `select + * + from _vt.schema_migrations where %s %s %s` + AllMigrationsIndicator = "all" +) + +func alterSchemaMigrationQuery(command, uuid string) (string, error) { + if strings.ToLower(uuid) == AllMigrationsIndicator { + return fmt.Sprintf(alterAllSchemaMigrationSql, command), nil + } + return sqlparser.ParseAndBind(alterSingleSchemaMigrationSql+command, sqltypes.StringBindVariable(uuid)) +} + +func selectSchemaMigrationsQuery(condition, order, skipLimit string) string { + return fmt.Sprintf(selectSchemaMigrationsSql, condition, order, skipLimit) +} + +// rowToSchemaMigration converts a single row into a SchemaMigration protobuf. +func rowToSchemaMigration(row sqltypes.RowNamedValues) (sm *vtctldatapb.SchemaMigration, err error) { + sm = new(vtctldatapb.SchemaMigration) + sm.Uuid = row.AsString("migration_uuid", "") + sm.Keyspace = row.AsString("keyspace", "") + sm.Shard = row.AsString("shard", "") + sm.Schema = row.AsString("mysql_schema", "") + sm.Table = row.AsString("mysql_table", "") + sm.MigrationStatement = row.AsString("migration_statement", "") + + sm.Strategy, err = schematools.ParseSchemaMigrationStrategy(row.AsString("strategy", "")) + if err != nil { + return nil, err + } + + sm.Options = row.AsString("options", "") + + sm.AddedAt, err = valueToVTTime(row.AsString("added_timestamp", "")) + if err != nil { + return nil, err + } + + sm.RequestedAt, err = valueToVTTime(row.AsString("requested_timestamp", "")) + if err != nil { + return nil, err + } + + sm.ReadyAt, err = valueToVTTime(row.AsString("ready_timestamp", "")) + if err != nil { + return nil, err + } + + sm.StartedAt, err = valueToVTTime(row.AsString("started_timestamp", "")) + if err != nil { + return nil, err + } + + sm.LivenessTimestamp, err = valueToVTTime(row.AsString("liveness_timestamp", "")) + if err != nil { + return nil, err + } + + sm.CompletedAt, err = valueToVTTime(row.AsString("completed_timestamp", "")) + if err != nil { + return nil, err + } + + sm.CleanedUpAt, err = valueToVTTime(row.AsString("cleanup_timestamp", "")) + if err != nil { + return nil, err + } + + sm.Status, err = schematools.ParseSchemaMigrationStatus(row.AsString("migration_status", "unknown")) + if err != nil { + return nil, err + } + + sm.LogPath = row.AsString("log_path", "") + sm.Artifacts = row.AsString("artifacts", "") + sm.Retries = row.AsUint64("retries", 0) + + if alias := row.AsString("tablet", ""); alias != "" { + sm.Tablet, err = topoproto.ParseTabletAlias(alias) + if err != nil { + return nil, err + } + } + + sm.TabletFailure = row.AsBool("tablet_failure", false) + sm.Progress = float32(row.AsFloat64("progress", 0)) + sm.MigrationContext = row.AsString("migration_context", "") + sm.DdlAction = row.AsString("ddl_action", "") + sm.Message = row.AsString("message", "") + sm.EtaSeconds = row.AsInt64("eta_seconds", -1) + sm.RowsCopied = row.AsUint64("rows_copied", 0) + sm.TableRows = row.AsInt64("table_rows", 0) + sm.AddedUniqueKeys = uint32(row.AsUint64("added_unique_keys", 0)) + sm.RemovedUniqueKeys = uint32(row.AsUint64("removed_unique_keys", 0)) + sm.LogFile = row.AsString("log_file", "") + + sm.ArtifactRetention, err = valueToVTDuration(row.AsString("retain_artifacts_seconds", ""), "s") + if err != nil { + return nil, err + } + + sm.PostponeCompletion = row.AsBool("postpone_completion", false) + sm.RemovedUniqueKeyNames = row.AsString("removed_unique_key_names", "") + sm.DroppedNoDefaultColumnNames = row.AsString("dropped_no_default_column_names", "") + sm.ExpandedColumnNames = row.AsString("expanded_column_names", "") + sm.RevertibleNotes = row.AsString("revertible_notes", "") + sm.AllowConcurrent = row.AsBool("allow_concurrent", false) + sm.RevertedUuid = row.AsString("reverted_uuid", "") + sm.IsView = row.AsBool("is_view", false) + sm.ReadyToComplete = row.AsBool("ready_to_complete", false) + sm.VitessLivenessIndicator = row.AsInt64("vitess_liveness_indicator", 0) + sm.UserThrottleRatio = float32(row.AsFloat64("user_throttle_ratio", 0)) + sm.SpecialPlan = row.AsString("special_plan", "") + + sm.LastThrottledAt, err = valueToVTTime(row.AsString("last_throttled_timestamp", "")) + if err != nil { + return nil, err + } + + sm.ComponentThrottled = row.AsString("component_throttled", "") + + sm.CancelledAt, err = valueToVTTime(row.AsString("cancelled_at", "")) + if err != nil { + return nil, err + } + + sm.PostponeLaunch = row.AsBool("postpone_launch", false) + sm.Stage = row.AsString("stage", "") + sm.CutoverAttempts = uint32(row.AsUint64("cutover_attempts", 0)) + sm.IsImmediateOperation = row.AsBool("is_immediate_operation", false) + + sm.ReviewedAt, err = valueToVTTime(row.AsString("reviewed_timestamp", "")) + if err != nil { + return nil, err + } + + sm.ReadyToCompleteAt, err = valueToVTTime(row.AsString("ready_to_complete_timestamp", "")) + if err != nil { + return nil, err + } + + return sm, nil +} + +// valueToVTTime converts a SQL timestamp string into a vttime Time type, first +// parsing the raw string value into a Go Time type in the local timezone. This +// is a correct conversion only if the vtctld is set to the same timezone as the +// vttablet that stored the value. +func valueToVTTime(s string) (*vttime.Time, error) { + if s == "" { + return nil, nil + } + + gotime, err := time.ParseInLocation(sqltypes.TimestampFormat, s, time.Local) + if err != nil { + return nil, err + } + + return protoutil.TimeToProto(gotime), nil +} + +// valueToVTDuration converts a SQL string into a vttime Duration type. It takes +// a defaultUnit in the event the value is a bare numeral (e.g. 124 vs 124s). +func valueToVTDuration(s string, defaultUnit string) (*vttime.Duration, error) { + if s == "" { + return nil, nil + } + + switch s[len(s)-1] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + s += defaultUnit + } + + godur, err := time.ParseDuration(s) + if err != nil { + return nil, err + } + + return protoutil.DurationToProto(godur), nil +} + +// queryResultForTabletResults aggregates given results into a combined result set +func queryResultForTabletResults(results map[string]*sqltypes.Result) *sqltypes.Result { + var qr = &sqltypes.Result{} + defaultFields := []*querypb.Field{{ + Name: "Tablet", + Type: sqltypes.VarBinary, + Charset: collations.CollationBinaryID, + Flags: uint32(querypb.MySqlFlag_BINARY_FLAG), + }} + var row2 []sqltypes.Value + for tabletAlias, result := range results { + if qr.Fields == nil { + qr.Fields = append(qr.Fields, defaultFields...) + qr.Fields = append(qr.Fields, result.Fields...) + } + for _, row := range result.Rows { + row2 = nil + row2 = append(row2, sqltypes.NewVarBinary(tabletAlias)) + row2 = append(row2, row...) + qr.Rows = append(qr.Rows, row2) + } + } + return qr +} diff --git a/go/vt/vtctl/grpcvtctldserver/query_test.go b/go/vt/vtctl/grpcvtctldserver/query_test.go new file mode 100644 index 00000000000..6073d3bc395 --- /dev/null +++ b/go/vt/vtctl/grpcvtctldserver/query_test.go @@ -0,0 +1,236 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package grpcvtctldserver + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/protoutil" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/vtctl/schematools" + + "vitess.io/vitess/go/test/utils" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" + "vitess.io/vitess/go/vt/proto/vttime" +) + +var now = time.Now() + +func TestRowToSchemaMigration(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + row sqltypes.RowNamedValues + expected *vtctldatapb.SchemaMigration + shouldErr bool + }{ + { + row: sqltypes.RowNamedValues(map[string]sqltypes.Value{ + "migration_uuid": sqltypes.NewVarChar("abc"), + "keyspace": sqltypes.NewVarChar("testks"), + "shard": sqltypes.NewVarChar("shard"), + "mysql_schema": sqltypes.NewVarChar("_vt"), + "mysql_table": sqltypes.NewVarChar("t1"), + "migration_statement": sqltypes.NewVarChar("alter table t1 rename foo to bar"), + "strategy": sqltypes.NewVarChar(schematools.SchemaMigrationStrategyName(vtctldatapb.SchemaMigration_ONLINE)), + "requested_timestamp": sqltypes.NewTimestamp(mysqlTimestamp(now)), + "eta_seconds": sqltypes.NewInt64(10), + }), + expected: &vtctldatapb.SchemaMigration{ + Uuid: "abc", + Keyspace: "testks", + Shard: "shard", + Schema: "_vt", + Table: "t1", + MigrationStatement: "alter table t1 rename foo to bar", + Strategy: vtctldatapb.SchemaMigration_ONLINE, + RequestedAt: protoutil.TimeToProto(now.Truncate(time.Second)), + EtaSeconds: 10, + }, + }, + { + name: "eta_seconds defaults to -1", + row: sqltypes.RowNamedValues(map[string]sqltypes.Value{}), + expected: &vtctldatapb.SchemaMigration{ + Strategy: vtctldatapb.SchemaMigration_DIRECT, + EtaSeconds: -1, + }, + }, + { + name: "bad data", + row: sqltypes.RowNamedValues(map[string]sqltypes.Value{ + "tablet": sqltypes.NewVarChar("not-an-alias"), + }), + shouldErr: true, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + out, err := rowToSchemaMigration(test.row) + if test.shouldErr { + assert.Error(t, err) + return + } + + require.NoError(t, err) + utils.MustMatch(t, test.expected, out) + }) + } +} + +func mysqlTimestamp(t time.Time) string { + return t.Local().Format(sqltypes.TimestampFormat) +} + +func TestValueToVTTime(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + value string + expected *vttime.Time + shouldErr bool + }{ + { + value: mysqlTimestamp(now), + expected: protoutil.TimeToProto(now.Truncate(time.Second)), + }, + { + name: "empty string", + value: "", + expected: nil, + }, + { + name: "parse error", + value: "2006/01/02", + shouldErr: true, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + out, err := valueToVTTime(test.value) + if test.shouldErr { + assert.Error(t, err, "expected parse error") + return + } + + require.NoError(t, err) + utils.MustMatch(t, test.expected, out, "failed to convert %s into vttime", test.value) + }) + } +} + +func TestValueToVTDuration(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + value string + defaultUnit string + expected *vttime.Duration + shouldErr bool + }{ + { + value: "12s", + expected: protoutil.DurationToProto(12 * time.Second), + }, + { + value: "1h10m", + expected: protoutil.DurationToProto(time.Hour + 10*time.Minute), + }, + { + name: "no unit in value", + value: "120", + defaultUnit: "s", + expected: protoutil.DurationToProto(120 * time.Second), + }, + { + name: "empty", + expected: nil, + }, + { + name: "bad input", + value: "abcd", + shouldErr: true, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + out, err := valueToVTDuration(test.value, test.defaultUnit) + if test.shouldErr { + assert.Error(t, err, "expected parse error") + return + } + + require.NoError(t, err) + utils.MustMatch(t, test.expected, out, "failed to convert %s into vttime duration", test.value) + }) + } +} + +func TestAlterSchemaMigrationQuery(t *testing.T) { + uuid := "4e5dcf80_354b_11eb_82cd_f875a4d24e90" + + tcases := []struct { + command string + uuid string + expect string + }{ + { + command: "cleanup", + uuid: uuid, + expect: "alter vitess_migration '4e5dcf80_354b_11eb_82cd_f875a4d24e90' cleanup", + }, + { + command: "cancel", + uuid: uuid, + expect: "alter vitess_migration '4e5dcf80_354b_11eb_82cd_f875a4d24e90' cancel", + }, + { + command: "cancel", + uuid: "all", + expect: "alter vitess_migration cancel all", + }, + { + command: "cancel", + uuid: "ALL", + expect: "alter vitess_migration cancel all", + }, + } + for _, tcase := range tcases { + testName := fmt.Sprintf("%s %s", tcase.command, tcase.uuid) + t.Run(testName, func(t *testing.T) { + query, err := alterSchemaMigrationQuery(tcase.command, tcase.uuid) + assert.NoError(t, err) + assert.Equal(t, tcase.expect, query) + }) + } +} diff --git a/go/vt/vtctl/grpcvtctldserver/server.go b/go/vt/vtctl/grpcvtctldserver/server.go index 834abe5bb75..cc3378172e9 100644 --- a/go/vt/vtctl/grpcvtctldserver/server.go +++ b/go/vt/vtctl/grpcvtctldserver/server.go @@ -24,6 +24,7 @@ import ( "io" "net/http" "path/filepath" + "runtime/debug" "sort" "strings" "sync" @@ -31,13 +32,13 @@ import ( "golang.org/x/sync/semaphore" "google.golang.org/grpc" - "google.golang.org/protobuf/proto" "vitess.io/vitess/go/event" "vitess.io/vitess/go/netutil" "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/sets" "vitess.io/vitess/go/sqlescape" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/trace" "vitess.io/vitess/go/vt/callerid" "vitess.io/vitess/go/vt/concurrency" @@ -60,6 +61,7 @@ import ( "vitess.io/vitess/go/vt/vtctl/schematools" "vitess.io/vitess/go/vt/vtctl/workflow" "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet/tmclient" logutilpb "vitess.io/vitess/go/vt/proto/logutil" @@ -71,11 +73,14 @@ import ( vschemapb "vitess.io/vitess/go/vt/proto/vschema" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" vtctlservicepb "vitess.io/vitess/go/vt/proto/vtctlservice" - "vitess.io/vitess/go/vt/proto/vtrpc" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) const ( initShardPrimaryOperation = "InitShardPrimary" + + // DefaultWaitReplicasTimeout is the default value for waitReplicasTimeout, which is used when calling method ApplySchema. + DefaultWaitReplicasTimeout = 10 * time.Second ) // VtctldServer implements the Vtctld RPC service protocol. @@ -109,7 +114,7 @@ func NewTestVtctldServer(ts *topo.Server, tmc tmclient.TabletManagerClient) *Vtc func panicHandler(err *error) { if x := recover(); x != nil { - *err = fmt.Errorf("uncaught panic: %v", x) + *err = fmt.Errorf("uncaught panic: %v from: %v", x, string(debug.Stack())) } } @@ -121,7 +126,7 @@ func (s *VtctldServer) AddCellInfo(ctx context.Context, req *vtctldatapb.AddCell defer panicHandler(&err) if req.CellInfo.Root == "" { - err = vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "CellInfo.Root must be non-empty") + err = vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "CellInfo.Root must be non-empty") return nil, err } @@ -225,7 +230,7 @@ func (s *VtctldServer) ApplySchema(ctx context.Context, req *vtctldatapb.ApplySc span.Annotate("ddl_strategy", req.DdlStrategy) if len(req.Sql) == 0 { - err = vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "Sql must be a non-empty array") + err = vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "Sql must be a non-empty array") return nil, err } @@ -263,10 +268,7 @@ func (s *VtctldServer) ApplySchema(ctx context.Context, req *vtctldatapb.ApplySc logstream = append(logstream, e) }) - executor := schemamanager.NewTabletExecutor(migrationContext, s.ts, s.tmc, logger, waitReplicasTimeout) - if req.AllowLongUnavailability { - executor.AllowBigSchemaChange() - } + executor := schemamanager.NewTabletExecutor(migrationContext, s.ts, s.tmc, logger, waitReplicasTimeout, req.BatchSize) if err = executor.SetDDLStrategy(req.DdlStrategy); err != nil { err = vterrors.Wrapf(err, "invalid DdlStrategy: %s", req.DdlStrategy) @@ -287,12 +289,19 @@ func (s *VtctldServer) ApplySchema(ctx context.Context, req *vtctldatapb.ApplySc ) if err != nil { - return &vtctldatapb.ApplySchemaResponse{}, err + return nil, err } - return &vtctldatapb.ApplySchemaResponse{ - UuidList: execResult.UUIDs, - }, err + resp = &vtctldatapb.ApplySchemaResponse{ + UuidList: execResult.UUIDs, + RowsAffectedByShard: make(map[string]uint64, len(execResult.SuccessShards)), + } + + for _, shard := range execResult.SuccessShards { + resp.RowsAffectedByShard[shard.Shard] = shard.Result.RowsAffected + } + + return resp, err } // ApplyVSchema is part of the vtctlservicepb.VtctldServer interface. @@ -318,7 +327,7 @@ func (s *VtctldServer) ApplyVSchema(ctx context.Context, req *vtctldatapb.ApplyV } if (req.Sql != "" && req.VSchema != nil) || (req.Sql == "" && req.VSchema == nil) { - err = vterrors.New(vtrpc.Code_INVALID_ARGUMENT, "must pass exactly one of req.VSchema and req.Sql") + err = vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "must pass exactly one of req.VSchema and req.Sql") return nil, err } @@ -335,7 +344,7 @@ func (s *VtctldServer) ApplyVSchema(ctx context.Context, req *vtctldatapb.ApplyV } ddl, ok := stmt.(*sqlparser.AlterVschema) if !ok { - err = vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "error parsing VSchema DDL statement `%s`", req.Sql) + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "error parsing VSchema DDL statement `%s`", req.Sql) return nil, err } @@ -359,6 +368,12 @@ func (s *VtctldServer) ApplyVSchema(ctx context.Context, req *vtctldatapb.ApplyV return &vtctldatapb.ApplyVSchemaResponse{VSchema: vs}, nil } + _, err = vindexes.BuildKeyspace(vs) + if err != nil { + err = vterrors.Wrapf(err, "BuildKeyspace(%s)", req.Keyspace) + return nil, err + } + if err = s.ts.SaveVSchema(ctx, req.Keyspace, vs); err != nil { err = vterrors.Wrapf(err, "SaveVSchema(%s, %v)", req.Keyspace, req.VSchema) return nil, err @@ -413,6 +428,7 @@ func (s *VtctldServer) BackupShard(req *vtctldatapb.BackupShardRequest, stream v span.Annotate("shard", req.Shard) span.Annotate("allow_primary", req.AllowPrimary) span.Annotate("concurrency", req.Concurrency) + span.Annotate("incremental_from_pos", req.IncrementalFromPos) tablets, stats, err := reparentutil.ShardReplicationStatuses(ctx, s.ts, s.tmc, req.Keyspace, req.Shard) if err != nil { @@ -449,13 +465,13 @@ func (s *VtctldServer) BackupShard(req *vtctldatapb.BackupShardRequest, stream v } if backupTablet == nil { - err = vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "no tablet available for backup") + err = vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "no tablet available for backup") return err } span.Annotate("tablet_alias", topoproto.TabletAliasString(backupTablet.Alias)) - r := &vtctldatapb.BackupRequest{Concurrency: req.Concurrency, AllowPrimary: req.AllowPrimary} + r := &vtctldatapb.BackupRequest{Concurrency: req.Concurrency, AllowPrimary: req.AllowPrimary, UpgradeSafe: req.UpgradeSafe, IncrementalFromPos: req.IncrementalFromPos} err = s.backupTablet(ctx, backupTablet, r, stream) return err } @@ -467,6 +483,7 @@ func (s *VtctldServer) backupTablet(ctx context.Context, tablet *topodatapb.Tabl Concurrency: int64(req.Concurrency), AllowPrimary: req.AllowPrimary, IncrementalFromPos: req.IncrementalFromPos, + UpgradeSafe: req.UpgradeSafe, } logStream, err := s.tmc.Backup(ctx, tablet, r) if err != nil { @@ -496,6 +513,37 @@ func (s *VtctldServer) backupTablet(ctx context.Context, tablet *topodatapb.Tabl } } +// CancelSchemaMigration is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) CancelSchemaMigration(ctx context.Context, req *vtctldatapb.CancelSchemaMigrationRequest) (resp *vtctldatapb.CancelSchemaMigrationResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.CancelSchemaMigration") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("keyspace", req.Keyspace) + span.Annotate("uuid", req.Uuid) + + query, err := alterSchemaMigrationQuery("cancel", req.Uuid) + if err != nil { + return nil, err + } + + log.Info("Calling ApplySchema to cancel migration") + qr, err := s.ApplySchema(ctx, &vtctldatapb.ApplySchemaRequest{ + Keyspace: req.Keyspace, + Sql: []string{query}, + WaitReplicasTimeout: protoutil.DurationToProto(DefaultWaitReplicasTimeout), + }) + if err != nil { + return nil, err + } + + resp = &vtctldatapb.CancelSchemaMigrationResponse{ + RowsAffectedByShard: qr.RowsAffectedByShard, + } + return resp, nil +} + // ChangeTabletType is part of the vtctlservicepb.VtctldServer interface. func (s *VtctldServer) ChangeTabletType(ctx context.Context, req *vtctldatapb.ChangeTabletTypeRequest) (resp *vtctldatapb.ChangeTabletTypeResponse, err error) { span, ctx := trace.NewSpan(ctx, "VtctldServer.ChangeTabletType") @@ -523,7 +571,7 @@ func (s *VtctldServer) ChangeTabletType(ctx context.Context, req *vtctldatapb.Ch } if req.DryRun { - afterTablet := proto.Clone(tablet.Tablet).(*topodatapb.Tablet) + afterTablet := tablet.Tablet.CloneVT() afterTablet.Type = req.DbType return &vtctldatapb.ChangeTabletTypeResponse{ @@ -549,7 +597,7 @@ func (s *VtctldServer) ChangeTabletType(ctx context.Context, req *vtctldatapb.Ch } if !shard.HasPrimary() { - err = vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "no primary tablet for shard %v/%v", tablet.Keyspace, tablet.Shard) + err = vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "no primary tablet for shard %v/%v", tablet.Keyspace, tablet.Shard) return nil, err } @@ -560,18 +608,18 @@ func (s *VtctldServer) ChangeTabletType(ctx context.Context, req *vtctldatapb.Ch } if shardPrimary.Type != topodatapb.TabletType_PRIMARY { - err = vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "TopologyServer has incosistent state for shard primary %v", topoproto.TabletAliasString(shard.PrimaryAlias)) + err = vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "TopologyServer has incosistent state for shard primary %v", topoproto.TabletAliasString(shard.PrimaryAlias)) return nil, err } if shardPrimary.Keyspace != tablet.Keyspace || shardPrimary.Shard != tablet.Shard { - err = vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "primary %v and potential replica %v not in same keypace shard (%v/%v)", topoproto.TabletAliasString(shard.PrimaryAlias), req.TabletAlias, tablet.Keyspace, tablet.Shard) + err = vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "primary %v and potential replica %v not in same keypace shard (%v/%v)", topoproto.TabletAliasString(shard.PrimaryAlias), req.TabletAlias, tablet.Keyspace, tablet.Shard) return nil, err } // We should clone the tablet and change its type to the expected type before checking the durability rules // Since we want to check the durability rules for the desired state and not before we make that change - expectedTablet := proto.Clone(tablet.Tablet).(*topodatapb.Tablet) + expectedTablet := tablet.Tablet.CloneVT() expectedTablet.Type = req.DbType err = s.tmc.ChangeType(ctx, tablet.Tablet, req.DbType, reparentutil.IsReplicaSemiSync(durability, shardPrimary.Tablet, expectedTablet)) if err != nil { @@ -594,6 +642,68 @@ func (s *VtctldServer) ChangeTabletType(ctx context.Context, req *vtctldatapb.Ch }, nil } +// CleanupSchemaMigration is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) CleanupSchemaMigration(ctx context.Context, req *vtctldatapb.CleanupSchemaMigrationRequest) (resp *vtctldatapb.CleanupSchemaMigrationResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.CleanupSchemaMigration") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("keyspace", req.Keyspace) + span.Annotate("uuid", req.Uuid) + + query, err := alterSchemaMigrationQuery("cleanup", req.Uuid) + if err != nil { + return nil, err + } + + log.Info("Calling ApplySchema to cleanup migration") + qr, err := s.ApplySchema(ctx, &vtctldatapb.ApplySchemaRequest{ + Keyspace: req.Keyspace, + Sql: []string{query}, + WaitReplicasTimeout: protoutil.DurationToProto(DefaultWaitReplicasTimeout), + }) + if err != nil { + return nil, err + } + + resp = &vtctldatapb.CleanupSchemaMigrationResponse{ + RowsAffectedByShard: qr.RowsAffectedByShard, + } + return resp, nil +} + +// CompleteSchemaMigration is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) CompleteSchemaMigration(ctx context.Context, req *vtctldatapb.CompleteSchemaMigrationRequest) (resp *vtctldatapb.CompleteSchemaMigrationResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.CompleteSchemaMigration") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("keyspace", req.Keyspace) + span.Annotate("uuid", req.Uuid) + + query, err := alterSchemaMigrationQuery("complete", req.Uuid) + if err != nil { + return nil, err + } + + log.Info("Calling ApplySchema to complete migration") + qr, err := s.ApplySchema(ctx, &vtctldatapb.ApplySchemaRequest{ + Keyspace: req.Keyspace, + Sql: []string{query}, + WaitReplicasTimeout: protoutil.DurationToProto(DefaultWaitReplicasTimeout), + }) + if err != nil { + return nil, err + } + + resp = &vtctldatapb.CompleteSchemaMigrationResponse{ + RowsAffectedByShard: qr.RowsAffectedByShard, + } + return resp, nil +} + // CreateKeyspace is part of the vtctlservicepb.VtctldServer interface. func (s *VtctldServer) CreateKeyspace(ctx context.Context, req *vtctldatapb.CreateKeyspaceRequest) (resp *vtctldatapb.CreateKeyspaceResponse, err error) { span, ctx := trace.NewSpan(ctx, "VtctldServer.CreateKeyspace") @@ -845,7 +955,7 @@ func (s *VtctldServer) DeleteKeyspace(ctx context.Context, req *vtctldatapb.Dele if len(shards) > 0 { if !req.Recursive { - err = vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "keyspace %v still has %d shards; use Recursive=true or remove them manually", req.Keyspace, len(shards)) + err = vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "keyspace %v still has %d shards; use Recursive=true or remove them manually", req.Keyspace, len(shards)) return nil, err } @@ -917,7 +1027,7 @@ func (s *VtctldServer) DeleteSrvVSchema(ctx context.Context, req *vtctldatapb.De defer panicHandler(&err) if req.Cell == "" { - err = vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cell must be non-empty") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "cell must be non-empty") return nil, err } @@ -976,6 +1086,7 @@ func (s *VtctldServer) EmergencyReparentShard(ctx context.Context, req *vtctldat span.Annotate("wait_replicas_timeout_sec", waitReplicasTimeout.Seconds()) span.Annotate("prevent_cross_cell_promotion", req.PreventCrossCellPromotion) + span.Annotate("wait_for_all_tablets", req.WaitForAllTablets) m := sync.RWMutex{} logstream := []*logutilpb.Event{} @@ -993,6 +1104,7 @@ func (s *VtctldServer) EmergencyReparentShard(ctx context.Context, req *vtctldat NewPrimaryAlias: req.NewPrimary, IgnoreReplicas: sets.New(ignoreReplicaAliases...), WaitReplicasTimeout: waitReplicasTimeout, + WaitAllTablets: req.WaitForAllTablets, PreventCrossCellPromotion: req.PreventCrossCellPromotion, }, ) @@ -1087,14 +1199,14 @@ func (s *VtctldServer) ExecuteHook(ctx context.Context, req *vtctldatapb.Execute span.Annotate("tablet_alias", topoproto.TabletAliasString(req.TabletAlias)) if req.TabletHookRequest == nil { - err = vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "TabletHookRequest cannot be nil") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "TabletHookRequest cannot be nil") return nil, err } span.Annotate("hook_name", req.TabletHookRequest.Name) if strings.Contains(req.TabletHookRequest.Name, "/") { - err = vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "hook name cannot contain a '/'; was %v", req.TabletHookRequest.Name) + err = vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "hook name cannot contain a '/'; was %v", req.TabletHookRequest.Name) return nil, err } @@ -1232,7 +1344,7 @@ func (s *VtctldServer) GetCellInfo(ctx context.Context, req *vtctldatapb.GetCell defer panicHandler(&err) if req.Cell == "" { - err = vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cell field is required") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "cell field is required") return nil, err } @@ -1348,7 +1460,7 @@ func (s *VtctldServer) GetPermissions(ctx context.Context, req *vtctldatapb.GetP span.Annotate("tablet_alias", topoproto.TabletAliasString(req.TabletAlias)) ti, err := s.ts.GetTablet(ctx, req.TabletAlias) if err != nil { - err = vterrors.Errorf(vtrpc.Code_NOT_FOUND, "Failed to get tablet %v: %v", req.TabletAlias, err) + err = vterrors.Errorf(vtrpcpb.Code_NOT_FOUND, "Failed to get tablet %v: %v", req.TabletAlias, err) return nil, err } @@ -1445,6 +1557,124 @@ func (s *VtctldServer) GetSchema(ctx context.Context, req *vtctldatapb.GetSchema }, nil } +func (s *VtctldServer) GetSchemaMigrations(ctx context.Context, req *vtctldatapb.GetSchemaMigrationsRequest) (resp *vtctldatapb.GetSchemaMigrationsResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.GetShard") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("keyspace", req.Keyspace) + + var condition string + switch { + case req.Uuid != "": + span.Annotate("uuid", req.Uuid) + if !schema.IsOnlineDDLUUID(req.Uuid) { + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%s is not a valid UUID", req.Uuid) + } + + condition, err = sqlparser.ParseAndBind("migration_uuid=%a", sqltypes.StringBindVariable(req.Uuid)) + case req.MigrationContext != "": + span.Annotate("migration_context", req.MigrationContext) + condition, err = sqlparser.ParseAndBind("migration_context=%a", sqltypes.StringBindVariable(req.MigrationContext)) + case req.Status != vtctldatapb.SchemaMigration_UNKNOWN: + span.Annotate("migration_status", schematools.SchemaMigrationStatusName(req.Status)) + condition, err = sqlparser.ParseAndBind("migration_status=%a", sqltypes.StringBindVariable(schematools.SchemaMigrationStatusName(req.Status))) + case req.Recent != nil: + var d time.Duration + d, _, err = protoutil.DurationFromProto(req.Recent) + if err != nil { + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "error parsing duration: %s", err) + } + + span.Annotate("recent", d.String()) + condition = fmt.Sprintf("requested_timestamp > now() - interval %0.f second", d.Seconds()) + default: + condition = "migration_uuid like '%'" + } + + if err != nil { + return nil, fmt.Errorf("Error generating OnlineDDL query: %+v", err) + } + + order := " order by `id` " + switch req.Order { + case vtctldatapb.QueryOrdering_DESCENDING: + order += "DESC" + default: + order += "ASC" + } + + var skipLimit string + if req.Limit > 0 { + skipLimit = fmt.Sprintf("LIMIT %v,%v", req.Skip, req.Limit) + span.Annotate("skip_limit", skipLimit) + } + + query := selectSchemaMigrationsQuery(condition, order, skipLimit) + + tabletsResp, err := s.GetTablets(ctx, &vtctldatapb.GetTabletsRequest{ + Cells: nil, + Strict: false, + Keyspace: req.Keyspace, + TabletType: topodatapb.TabletType_PRIMARY, + }) + if err != nil { + return nil, err + } + + var ( + m sync.Mutex + wg sync.WaitGroup + rec concurrency.AllErrorRecorder + results = map[string]*sqltypes.Result{} + ) + for _, tablet := range tabletsResp.Tablets { + + wg.Add(1) + go func(tablet *topodatapb.Tablet) { + defer wg.Done() + + alias := topoproto.TabletAliasString(tablet.Alias) + fetchResp, err := s.ExecuteFetchAsDBA(ctx, &vtctldatapb.ExecuteFetchAsDBARequest{ + TabletAlias: tablet.Alias, + Query: query, + MaxRows: 10_000, + }) + if err != nil { + rec.RecordError(err) + return + } + + m.Lock() + defer m.Unlock() + + results[alias] = sqltypes.Proto3ToResult(fetchResp.Result) + }(tablet) + } + + wg.Wait() + if rec.HasErrors() { + return nil, rec.Error() + } + + // combine results. This loses sorting if there's more then 1 tablet + combinedResults := queryResultForTabletResults(results) + + resp = new(vtctldatapb.GetSchemaMigrationsResponse) + for _, row := range combinedResults.Named().Rows { + var m *vtctldatapb.SchemaMigration + m, err = rowToSchemaMigration(row) + if err != nil { + return nil, err + } + + resp.Migrations = append(resp.Migrations, m) + } + + return resp, err +} + // GetShard is part of the vtctlservicepb.VtctldServer interface. func (s *VtctldServer) GetShard(ctx context.Context, req *vtctldatapb.GetShardRequest) (resp *vtctldatapb.GetShardResponse, err error) { span, ctx := trace.NewSpan(ctx, "VtctldServer.GetShard") @@ -1569,6 +1799,9 @@ func (s *VtctldServer) UpdateThrottlerConfig(ctx context.Context, req *vtctldata if throttlerConfig == nil { throttlerConfig = &topodatapb.ThrottlerConfig{} } + if throttlerConfig.ThrottledApps == nil { + throttlerConfig.ThrottledApps = make(map[string]*topodatapb.ThrottledAppRule) + } if req.CustomQuerySet { // custom query provided throttlerConfig.CustomQuery = req.CustomQuery @@ -1591,6 +1824,9 @@ func (s *VtctldServer) UpdateThrottlerConfig(ctx context.Context, req *vtctldata if req.CheckAsCheckShard { throttlerConfig.CheckAsCheckSelf = false } + if req.ThrottledApp != nil && req.ThrottledApp.Name != "" { + throttlerConfig.ThrottledApps[req.ThrottledApp.Name] = req.ThrottledApp + } return throttlerConfig } @@ -1960,6 +2196,7 @@ func (s *VtctldServer) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWor span.Annotate("keyspace", req.Keyspace) span.Annotate("active_only", req.ActiveOnly) + span.Annotate("include_logs", req.IncludeLogs) resp, err = s.ws.GetWorkflows(ctx, req) return resp, err @@ -1973,12 +2210,12 @@ func (s *VtctldServer) InitShardPrimary(ctx context.Context, req *vtctldatapb.In defer panicHandler(&err) if req.Keyspace == "" { - err = vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "keyspace field is required") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "keyspace field is required") return nil, err } if req.Shard == "" { - err = vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "shard field is required") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "shard field is required") return nil, err } @@ -2077,7 +2314,7 @@ func (s *VtctldServer) InitShardPrimaryLocked( if !ok { return fmt.Errorf("primary-elect tablet %v is not in the shard", topoproto.TabletAliasString(req.PrimaryElectTabletAlias)) } - ev.NewPrimary = proto.Clone(primaryElectTabletInfo.Tablet).(*topodatapb.Tablet) + ev.NewPrimary = primaryElectTabletInfo.Tablet.CloneVT() // Check the primary is the only primary is the shard, or -force was used. _, primaryTabletMap := topotools.SortedTabletMap(tabletMap) @@ -2243,6 +2480,191 @@ func (s *VtctldServer) InitShardPrimaryLocked( return nil } +// LaunchSchemaMigration is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) LaunchSchemaMigration(ctx context.Context, req *vtctldatapb.LaunchSchemaMigrationRequest) (resp *vtctldatapb.LaunchSchemaMigrationResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.LaunchSchemaMigration") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("keyspace", req.Keyspace) + span.Annotate("uuid", req.Uuid) + + query, err := alterSchemaMigrationQuery("launch", req.Uuid) + if err != nil { + return nil, err + } + + log.Info("Calling ApplySchema to launch migration") + qr, err := s.ApplySchema(ctx, &vtctldatapb.ApplySchemaRequest{ + Keyspace: req.Keyspace, + Sql: []string{query}, + WaitReplicasTimeout: protoutil.DurationToProto(DefaultWaitReplicasTimeout), + }) + if err != nil { + return nil, err + } + + resp = &vtctldatapb.LaunchSchemaMigrationResponse{ + RowsAffectedByShard: qr.RowsAffectedByShard, + } + return resp, nil +} + +// LookupVindexCreate is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) LookupVindexCreate(ctx context.Context, req *vtctldatapb.LookupVindexCreateRequest) (resp *vtctldatapb.LookupVindexCreateResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.LookupVindexCreate") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("workflow", req.Workflow) + span.Annotate("keyspace", req.Keyspace) + span.Annotate("continue_after_copy_with_owner", req.ContinueAfterCopyWithOwner) + span.Annotate("cells", req.Cells) + span.Annotate("tablet_types", req.TabletTypes) + + resp, err = s.ws.LookupVindexCreate(ctx, req) + return resp, err +} + +// LookupVindexExternalize is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) LookupVindexExternalize(ctx context.Context, req *vtctldatapb.LookupVindexExternalizeRequest) (resp *vtctldatapb.LookupVindexExternalizeResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.LookupVindexExternalize") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("name", req.Name) + span.Annotate("keyspace", req.Keyspace) + span.Annotate("table_keyspace", req.TableKeyspace) + + resp, err = s.ws.LookupVindexExternalize(ctx, req) + return resp, err +} + +// MaterializeCreate is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) MaterializeCreate(ctx context.Context, req *vtctldatapb.MaterializeCreateRequest) (resp *vtctldatapb.MaterializeCreateResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.MaterializeCreate") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("workflow", req.Settings.Workflow) + span.Annotate("source_keyspace", req.Settings.SourceKeyspace) + span.Annotate("target_keyspace", req.Settings.TargetKeyspace) + span.Annotate("cells", req.Settings.Cell) + span.Annotate("tablet_types", req.Settings.TabletTypes) + span.Annotate("table_settings", fmt.Sprintf("%+v", req.Settings.TableSettings)) + + err = s.ws.Materialize(ctx, req.Settings) + return resp, err +} + +// MigrateCreate is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) MigrateCreate(ctx context.Context, req *vtctldatapb.MigrateCreateRequest) (resp *vtctldatapb.WorkflowStatusResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.MigrateCreate") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("target_keyspace", req.TargetKeyspace) + span.Annotate("workflow", req.Workflow) + span.Annotate("cells", req.Cells) + span.Annotate("tablet_types", req.TabletTypes) + span.Annotate("on_ddl", req.OnDdl) + + resp, err = s.ws.MigrateCreate(ctx, req) + return resp, err +} + +// MountRegister is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) MountRegister(ctx context.Context, req *vtctldatapb.MountRegisterRequest) (resp *vtctldatapb.MountRegisterResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.MountRegister") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("topo_type", req.TopoType) + span.Annotate("topo_server", req.TopoServer) + span.Annotate("topo_root", req.TopoRoot) + span.Annotate("mount_name", req.Name) + + resp, err = s.ws.MountRegister(ctx, req) + return resp, err +} + +// MountUnregister is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) MountUnregister(ctx context.Context, req *vtctldatapb.MountUnregisterRequest) (resp *vtctldatapb.MountUnregisterResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.MountUnregister") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("mount_name", req.Name) + + resp, err = s.ws.MountUnregister(ctx, req) + return resp, err +} + +// MountList is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) MountList(ctx context.Context, req *vtctldatapb.MountListRequest) (resp *vtctldatapb.MountListResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.MountList") + defer span.Finish() + + defer panicHandler(&err) + + resp, err = s.ws.MountList(ctx, req) + return resp, err +} + +// MountShow is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) MountShow(ctx context.Context, req *vtctldatapb.MountShowRequest) (resp *vtctldatapb.MountShowResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.MountShow") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("mount_name", req.Name) + + resp, err = s.ws.MountShow(ctx, req) + return resp, err +} + +// MoveTablesCreate is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) MoveTablesCreate(ctx context.Context, req *vtctldatapb.MoveTablesCreateRequest) (resp *vtctldatapb.WorkflowStatusResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.MoveTablesCreate") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("keyspace", req.TargetKeyspace) + span.Annotate("workflow", req.Workflow) + span.Annotate("cells", req.Cells) + span.Annotate("tablet_types", req.TabletTypes) + span.Annotate("on_ddl", req.OnDdl) + + resp, err = s.ws.MoveTablesCreate(ctx, req) + return resp, err +} + +// MoveTablesComplete is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) MoveTablesComplete(ctx context.Context, req *vtctldatapb.MoveTablesCompleteRequest) (resp *vtctldatapb.MoveTablesCompleteResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.MoveTablesComplete") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("keyspace", req.TargetKeyspace) + span.Annotate("workflow", req.Workflow) + span.Annotate("keep_data", req.KeepData) + span.Annotate("keep_routing_rules", req.KeepRoutingRules) + span.Annotate("dry_run", req.DryRun) + + resp, err = s.ws.MoveTablesComplete(ctx, req) + return resp, err +} + // PingTablet is part of the vtctlservicepb.VtctldServer interface. func (s *VtctldServer) PingTablet(ctx context.Context, req *vtctldatapb.PingTabletRequest) (resp *vtctldatapb.PingTabletResponse, err error) { span, ctx := trace.NewSpan(ctx, "VtctldServer.PingTablet") @@ -2375,7 +2797,7 @@ func (s *VtctldServer) RefreshState(ctx context.Context, req *vtctldatapb.Refres defer panicHandler(&err) if req.TabletAlias == nil { - err = vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "RefreshState requires a tablet alias") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "RefreshState requires a tablet alias") return nil, err } @@ -2403,12 +2825,12 @@ func (s *VtctldServer) RefreshStateByShard(ctx context.Context, req *vtctldatapb defer panicHandler(&err) if req.Keyspace == "" { - err = vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "RefreshStateByShard requires a keyspace") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "RefreshStateByShard requires a keyspace") return nil, err } if req.Shard == "" { - err = vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "RefreshStateByShard requires a shard") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "RefreshStateByShard requires a shard") return nil, err } @@ -2452,7 +2874,7 @@ func (s *VtctldServer) ReloadSchema(ctx context.Context, req *vtctldatapb.Reload ti, err := s.ts.GetTablet(ctx, req.TabletAlias) if err != nil { - err = vterrors.Errorf(vtrpc.Code_NOT_FOUND, "GetTablet(%v) failed: %v", req.TabletAlias, err) + err = vterrors.Errorf(vtrpcpb.Code_NOT_FOUND, "GetTablet(%v) failed: %v", req.TabletAlias, err) return nil, err } @@ -2514,7 +2936,7 @@ func (s *VtctldServer) ReloadSchemaKeyspace(ctx context.Context, req *vtctldatap shards, err := s.ts.GetShardNames(ctx, req.Keyspace) if err != nil { - err = vterrors.Errorf(vtrpc.Code_INTERNAL, "GetShardNames(%v) failed: %v", req.Keyspace, err) + err = vterrors.Errorf(vtrpcpb.Code_INTERNAL, "GetShardNames(%v) failed: %v", req.Keyspace, err) return nil, err } @@ -2639,7 +3061,7 @@ func (s *VtctldServer) ReparentTablet(ctx context.Context, req *vtctldatapb.Repa defer panicHandler(&err) if req.Tablet == nil { - err = vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "tablet alias must not be nil") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "tablet alias must not be nil") return nil, err } @@ -2656,7 +3078,7 @@ func (s *VtctldServer) ReparentTablet(ctx context.Context, req *vtctldatapb.Repa } if !shard.HasPrimary() { - err = vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "no primary tablet for shard %v/%v", tablet.Keyspace, tablet.Shard) + err = vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "no primary tablet for shard %v/%v", tablet.Keyspace, tablet.Shard) return nil, err } @@ -2667,17 +3089,17 @@ func (s *VtctldServer) ReparentTablet(ctx context.Context, req *vtctldatapb.Repa } if shardPrimary.Type != topodatapb.TabletType_PRIMARY { - err = vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "TopologyServer has incosistent state for shard primary %v", topoproto.TabletAliasString(shard.PrimaryAlias)) + err = vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "TopologyServer has incosistent state for shard primary %v", topoproto.TabletAliasString(shard.PrimaryAlias)) return nil, err } if shardPrimary.Keyspace != tablet.Keyspace || shardPrimary.Shard != tablet.Shard { - err = vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "primary %v and potential replica %v not in same keypace shard (%v/%v)", topoproto.TabletAliasString(shard.PrimaryAlias), topoproto.TabletAliasString(req.Tablet), tablet.Keyspace, tablet.Shard) + err = vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "primary %v and potential replica %v not in same keypace shard (%v/%v)", topoproto.TabletAliasString(shard.PrimaryAlias), topoproto.TabletAliasString(req.Tablet), tablet.Keyspace, tablet.Shard) return nil, err } if topoproto.TabletAliasEqual(req.Tablet, shardPrimary.Alias) { - err = vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "cannot ReparentTablet current shard primary (%v) onto itself", topoproto.TabletAliasString(req.Tablet)) + err = vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "cannot ReparentTablet current shard primary (%v) onto itself", topoproto.TabletAliasString(req.Tablet)) return nil, err } @@ -2702,6 +3124,24 @@ func (s *VtctldServer) ReparentTablet(ctx context.Context, req *vtctldatapb.Repa }, nil } +// ReshardCreate is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) ReshardCreate(ctx context.Context, req *vtctldatapb.ReshardCreateRequest) (resp *vtctldatapb.WorkflowStatusResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.ReshardCreate") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("keyspace", req.Keyspace) + span.Annotate("workflow", req.Workflow) + span.Annotate("cells", req.Cells) + span.Annotate("source_shards", req.SourceShards) + span.Annotate("target_shards", req.TargetShards) + span.Annotate("tablet_types", req.TabletTypes) + span.Annotate("on_ddl", req.OnDdl) + + resp, err = s.ws.ReshardCreate(ctx, req) + return resp, err +} func (s *VtctldServer) RestoreFromBackup(req *vtctldatapb.RestoreFromBackupRequest, stream vtctlservicepb.Vtctld_RestoreFromBackupServer) (err error) { span, ctx := trace.NewSpan(stream.Context(), "VtctldServer.RestoreFromBackup") defer span.Finish() @@ -2723,9 +3163,10 @@ func (s *VtctldServer) RestoreFromBackup(req *vtctldatapb.RestoreFromBackupReque span.Annotate("shard", ti.Shard) r := &tabletmanagerdatapb.RestoreFromBackupRequest{ - BackupTime: req.BackupTime, - RestoreToPos: req.RestoreToPos, - DryRun: req.DryRun, + BackupTime: req.BackupTime, + RestoreToPos: req.RestoreToPos, + RestoreToTimestamp: req.RestoreToTimestamp, + DryRun: req.DryRun, } logStream, err := s.tmc.RestoreFromBackup(ctx, ti.Tablet, r) if err != nil { @@ -2754,7 +3195,7 @@ func (s *VtctldServer) RestoreFromBackup(req *vtctldatapb.RestoreFromBackupReque if mysqlctl.DisableActiveReparents { return nil } - if req.RestoreToPos != "" && !req.DryRun { + if (req.RestoreToPos != "" || !protoutil.TimeFromProto(req.RestoreToTimestamp).UTC().IsZero()) && !req.DryRun { // point in time recovery. Do not restore replication return nil } @@ -2778,6 +3219,37 @@ func (s *VtctldServer) RestoreFromBackup(req *vtctldatapb.RestoreFromBackupReque } } +// RetrySchemaMigration is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) RetrySchemaMigration(ctx context.Context, req *vtctldatapb.RetrySchemaMigrationRequest) (resp *vtctldatapb.RetrySchemaMigrationResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.RetrySchemaMigration") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("keyspace", req.Keyspace) + span.Annotate("uuid", req.Uuid) + + query, err := alterSchemaMigrationQuery("retry", req.Uuid) + if err != nil { + return nil, err + } + + log.Info("Calling ApplySchema to retry migration") + qr, err := s.ApplySchema(ctx, &vtctldatapb.ApplySchemaRequest{ + Keyspace: req.Keyspace, + Sql: []string{query}, + WaitReplicasTimeout: protoutil.DurationToProto(DefaultWaitReplicasTimeout), + }) + if err != nil { + return nil, err + } + + resp = &vtctldatapb.RetrySchemaMigrationResponse{ + RowsAffectedByShard: qr.RowsAffectedByShard, + } + return resp, nil +} + // RunHealthCheck is part of the vtctlservicepb.VtctldServer interface. func (s *VtctldServer) RunHealthCheck(ctx context.Context, req *vtctldatapb.RunHealthCheckRequest) (resp *vtctldatapb.RunHealthCheckResponse, err error) { span, ctx := trace.NewSpan(ctx, "VtctldServer.RunHealthCheck") @@ -2825,7 +3297,7 @@ func (s *VtctldServer) SetKeyspaceDurabilityPolicy(ctx context.Context, req *vtc policyValid := reparentutil.CheckDurabilityPolicyExists(req.DurabilityPolicy) if !policyValid { - err = vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "durability policy <%v> is not a valid policy. Please register it as a policy first", req.DurabilityPolicy) + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "durability policy <%v> is not a valid policy. Please register it as a policy first", req.DurabilityPolicy) return nil, err } @@ -2936,7 +3408,7 @@ func (s *VtctldServer) SetShardTabletControl(ctx context.Context, req *vtctldata defer unlock(&err) si, err := s.ts.UpdateShardFields(ctx, req.Keyspace, req.Shard, func(si *topo.ShardInfo) error { - return si.UpdateSourceDeniedTables(ctx, req.TabletType, req.Cells, req.Remove, req.DeniedTables) + return si.UpdateDeniedTables(ctx, req.TabletType, req.Cells, req.Remove, req.DeniedTables) }) switch { @@ -2972,7 +3444,7 @@ func (s *VtctldServer) SetWritable(ctx context.Context, req *vtctldatapb.SetWrit defer panicHandler(&err) if req.TabletAlias == nil { - err = vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "SetWritable.TabletAlias is required") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "SetWritable.TabletAlias is required") return nil, err } @@ -3341,7 +3813,7 @@ func (s *VtctldServer) StartReplication(ctx context.Context, req *vtctldatapb.St defer panicHandler(&err) if req.TabletAlias == nil { - err = vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "StartReplication.TabletAlias is required") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "StartReplication.TabletAlias is required") return nil, err } @@ -3360,7 +3832,7 @@ func (s *VtctldServer) StartReplication(ctx context.Context, req *vtctldatapb.St } if !shard.HasPrimary() { - err = vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "no primary tablet for shard %v/%v", tablet.Keyspace, tablet.Shard) + err = vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "no primary tablet for shard %v/%v", tablet.Keyspace, tablet.Shard) return nil, err } @@ -3371,12 +3843,12 @@ func (s *VtctldServer) StartReplication(ctx context.Context, req *vtctldatapb.St } if shardPrimary.Type != topodatapb.TabletType_PRIMARY { - err = vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "TopologyServer has incosistent state for shard primary %v", topoproto.TabletAliasString(shard.PrimaryAlias)) + err = vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "TopologyServer has incosistent state for shard primary %v", topoproto.TabletAliasString(shard.PrimaryAlias)) return nil, err } if shardPrimary.Keyspace != tablet.Keyspace || shardPrimary.Shard != tablet.Shard { - err = vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "primary %v and replica %v not in same keypace shard (%v/%v)", topoproto.TabletAliasString(shard.PrimaryAlias), topoproto.TabletAliasString(tablet.Alias), tablet.Keyspace, tablet.Shard) + err = vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "primary %v and replica %v not in same keypace shard (%v/%v)", topoproto.TabletAliasString(shard.PrimaryAlias), topoproto.TabletAliasString(tablet.Alias), tablet.Keyspace, tablet.Shard) return nil, err } @@ -3406,7 +3878,7 @@ func (s *VtctldServer) StopReplication(ctx context.Context, req *vtctldatapb.Sto defer panicHandler(&err) if req.TabletAlias == nil { - err = vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "StopReplication.TabletAlias is required") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "StopReplication.TabletAlias is required") return nil, err } @@ -3435,7 +3907,7 @@ func (s *VtctldServer) TabletExternallyReparented(ctx context.Context, req *vtct defer panicHandler(&err) if req.Tablet == nil { - err = vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "TabletExternallyReparentedRequest.Tablet must not be nil") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "TabletExternallyReparentedRequest.Tablet must not be nil") return nil, err } @@ -3469,7 +3941,7 @@ func (s *VtctldServer) TabletExternallyReparented(ctx context.Context, req *vtct log.Infof("TabletExternallyReparented: executing tablet type change %v -> PRIMARY on %v", tablet.Type, topoproto.TabletAliasString(req.Tablet)) ev := &events.Reparent{ ShardInfo: *shard, - NewPrimary: proto.Clone(tablet.Tablet).(*topodatapb.Tablet), + NewPrimary: tablet.Tablet.CloneVT(), OldPrimary: &topodatapb.Tablet{ Alias: shard.PrimaryAlias, Type: topodatapb.TabletType_PRIMARY, @@ -3521,9 +3993,7 @@ func (s *VtctldServer) UpdateCellInfo(ctx context.Context, req *vtctldatapb.Upda var updatedCi *topodatapb.CellInfo err = s.ts.UpdateCellInfoFields(ctx, req.Name, func(ci *topodatapb.CellInfo) error { - defer func() { - updatedCi = proto.Clone(ci).(*topodatapb.CellInfo) - }() + defer func() { updatedCi = ci.CloneVT() }() changed := false @@ -3569,9 +4039,7 @@ func (s *VtctldServer) UpdateCellsAlias(ctx context.Context, req *vtctldatapb.Up var updatedCa *topodatapb.CellsAlias err = s.ts.UpdateCellsAlias(ctx, req.Name, func(ca *topodatapb.CellsAlias) error { - defer func() { - updatedCa = proto.Clone(ca).(*topodatapb.CellsAlias) - }() + defer func() { updatedCa = ca.CloneVT() }() ca.Cells = req.CellsAlias.Cells return nil @@ -4342,6 +4810,131 @@ func (s *VtctldServer) ValidateVSchema(ctx context.Context, req *vtctldatapb.Val return resp, err } +// VDiffCreate is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) VDiffCreate(ctx context.Context, req *vtctldatapb.VDiffCreateRequest) (resp *vtctldatapb.VDiffCreateResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.VDiffCreate") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("keyspace", req.TargetKeyspace) + span.Annotate("workflow", req.Workflow) + span.Annotate("uuid", req.Uuid) + span.Annotate("source_cells", req.SourceCells) + span.Annotate("target_cells", req.TargetCells) + span.Annotate("tablet_types", req.TabletTypes) + span.Annotate("tables", req.Tables) + span.Annotate("auto_retry", req.AutoRetry) + + resp, err = s.ws.VDiffCreate(ctx, req) + return resp, err +} + +// VDiffDelete is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) VDiffDelete(ctx context.Context, req *vtctldatapb.VDiffDeleteRequest) (resp *vtctldatapb.VDiffDeleteResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.VDiffDelete") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("keyspace", req.TargetKeyspace) + span.Annotate("workflow", req.Workflow) + span.Annotate("argument", req.Arg) + + resp, err = s.ws.VDiffDelete(ctx, req) + return resp, err +} + +// VDiffResume is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) VDiffResume(ctx context.Context, req *vtctldatapb.VDiffResumeRequest) (resp *vtctldatapb.VDiffResumeResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.VDiffResume") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("keyspace", req.TargetKeyspace) + span.Annotate("workflow", req.Workflow) + span.Annotate("uuid", req.Uuid) + + resp, err = s.ws.VDiffResume(ctx, req) + return resp, err +} + +// VDiffShow is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) VDiffShow(ctx context.Context, req *vtctldatapb.VDiffShowRequest) (resp *vtctldatapb.VDiffShowResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.VDiffShow") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("keyspace", req.TargetKeyspace) + span.Annotate("workflow", req.Workflow) + span.Annotate("argument", req.Arg) + + resp, err = s.ws.VDiffShow(ctx, req) + return resp, err +} + +// VDiffStop is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) VDiffStop(ctx context.Context, req *vtctldatapb.VDiffStopRequest) (resp *vtctldatapb.VDiffStopResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.VDiffStop") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("keyspace", req.TargetKeyspace) + span.Annotate("workflow", req.Workflow) + span.Annotate("uuid", req.Uuid) + + resp, err = s.ws.VDiffStop(ctx, req) + return resp, err +} + +// WorkflowDelete is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) WorkflowDelete(ctx context.Context, req *vtctldatapb.WorkflowDeleteRequest) (resp *vtctldatapb.WorkflowDeleteResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.WorkflowDelete") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("keyspace", req.Keyspace) + span.Annotate("workflow", req.Workflow) + + resp, err = s.ws.WorkflowDelete(ctx, req) + return resp, err +} + +// WorkflowStatus is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) WorkflowStatus(ctx context.Context, req *vtctldatapb.WorkflowStatusRequest) (resp *vtctldatapb.WorkflowStatusResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.WorkflowStatus") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("keyspace", req.Keyspace) + span.Annotate("workflow", req.Workflow) + + resp, err = s.ws.WorkflowStatus(ctx, req) + return resp, err +} + +// WorkflowSwitchTraffic is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) WorkflowSwitchTraffic(ctx context.Context, req *vtctldatapb.WorkflowSwitchTrafficRequest) (resp *vtctldatapb.WorkflowSwitchTrafficResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.WorkflowSwitchTraffic") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("keyspace", req.Keyspace) + span.Annotate("workflow", req.Workflow) + span.Annotate("tablet-types", req.TabletTypes) + span.Annotate("direction", req.Direction) + span.Annotate("enable-reverse-replication", req.EnableReverseReplication) + + resp, err = s.ws.WorkflowSwitchTraffic(ctx, req) + return resp, err +} + // WorkflowUpdate is part of the vtctlservicepb.VtctldServer interface. func (s *VtctldServer) WorkflowUpdate(ctx context.Context, req *vtctldatapb.WorkflowUpdateRequest) (resp *vtctldatapb.WorkflowUpdateResponse, err error) { span, ctx := trace.NewSpan(ctx, "VtctldServer.WorkflowUpdate") @@ -4354,6 +4947,7 @@ func (s *VtctldServer) WorkflowUpdate(ctx context.Context, req *vtctldatapb.Work span.Annotate("cells", req.TabletRequest.Cells) span.Annotate("tablet_types", req.TabletRequest.TabletTypes) span.Annotate("on_ddl", req.TabletRequest.OnDdl) + span.Annotate("state", req.TabletRequest.State) resp, err = s.ws.WorkflowUpdate(ctx, req) return resp, err @@ -4369,7 +4963,7 @@ func (s *VtctldServer) getTopologyCell(ctx context.Context, cellPath string) (*v // extract cell and relative path parts := strings.Split(cellPath, "/") if parts[0] != "" || len(parts) < 2 { - err := vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid path: %s", cellPath) + err := vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid path: %s", cellPath) return nil, err } cell := parts[1] @@ -4378,7 +4972,7 @@ func (s *VtctldServer) getTopologyCell(ctx context.Context, cellPath string) (*v conn, err := s.ts.ConnForCell(ctx, cell) if err != nil { - err := vterrors.Errorf(vtrpc.Code_UNAVAILABLE, "error fetching connection to cell %s: %v", cell, err) + err := vterrors.Errorf(vtrpcpb.Code_UNAVAILABLE, "error fetching connection to cell %s: %v", cell, err) return nil, err } @@ -4387,7 +4981,7 @@ func (s *VtctldServer) getTopologyCell(ctx context.Context, cellPath string) (*v if dataErr == nil { result, err := topo.DecodeContent(relativePath, data, false) if err != nil { - err := vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "error decoding file content for cell %s: %v", cellPath, err) + err := vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "error decoding file content for cell %s: %v", cellPath, err) return nil, err } topoCell.Data = result @@ -4399,7 +4993,7 @@ func (s *VtctldServer) getTopologyCell(ctx context.Context, cellPath string) (*v children, childrenErr := conn.ListDir(ctx, relativePath, false /*full*/) if childrenErr != nil && dataErr != nil { - err := vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "cell %s with path %s has no file contents and no children: %v", cell, cellPath, err) + err := vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "cell %s with path %s has no file contents and no children: %v", cell, cellPath, err) return nil, err } diff --git a/go/vt/vtctl/grpcvtctldserver/server_slow_test.go b/go/vt/vtctl/grpcvtctldserver/server_slow_test.go index 858ac271a70..3100855e370 100644 --- a/go/vt/vtctl/grpcvtctldserver/server_slow_test.go +++ b/go/vt/vtctl/grpcvtctldserver/server_slow_test.go @@ -24,6 +24,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/vt/topo" @@ -43,7 +45,6 @@ func TestEmergencyReparentShardSlow(t *testing.T) { tests := []struct { name string - ts *topo.Server tmc tmclient.TabletManagerClient tablets []*topodatapb.Tablet @@ -62,7 +63,6 @@ func TestEmergencyReparentShardSlow(t *testing.T) { // concurrently, so the total time is only around 30 seconds, but // that's still a long time for a unit test! name: "nil WaitReplicasTimeout and request takes 29 seconds is ok", - ts: memorytopo.NewServer("zone1"), tablets: []*topodatapb.Tablet{ { Alias: &topodatapb.TabletAlias{ @@ -140,7 +140,7 @@ func TestEmergencyReparentShardSlow(t *testing.T) { }, "zone1-0000000200": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", @@ -180,7 +180,6 @@ func TestEmergencyReparentShardSlow(t *testing.T) { }, { name: "nil WaitReplicasTimeout and request takes 31 seconds is error", - ts: memorytopo.NewServer("zone1"), tablets: []*topodatapb.Tablet{ { Alias: &topodatapb.TabletAlias{ @@ -258,7 +257,7 @@ func TestEmergencyReparentShardSlow(t *testing.T) { }, "zone1-0000000200": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", @@ -290,8 +289,6 @@ func TestEmergencyReparentShardSlow(t *testing.T) { }, } - ctx := context.Background() - for _, tt := range tests { tt := tt @@ -302,13 +299,17 @@ func TestEmergencyReparentShardSlow(t *testing.T) { t.Skip("tt.EmergencyReparentShardRequest = nil implies test not ready to run") } - testutil.AddTablets(ctx, t, tt.ts, &testutil.AddTabletOptions{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + + testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ AlsoSetShardPrimary: true, ForceSetShardPrimary: true, SkipShardCreation: false, }, tt.tablets...) - vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) resp, err := vtctld.EmergencyReparentShard(ctx, tt.req) @@ -343,7 +344,6 @@ func TestPlannedReparentShardSlow(t *testing.T) { tests := []struct { name string - ts *topo.Server tmc tmclient.TabletManagerClient tablets []*topodatapb.Tablet @@ -357,7 +357,6 @@ func TestPlannedReparentShardSlow(t *testing.T) { // nil WaitReplicasTimeout in the request results in a default 30 // second WaitReplicasTimeout. name: "nil WaitReplicasTimeout and request takes 29 seconds is ok", - ts: memorytopo.NewServer("zone1"), tablets: []*topodatapb.Tablet{ { Alias: &topodatapb.TabletAlias{ @@ -402,6 +401,21 @@ func TestPlannedReparentShardSlow(t *testing.T) { Error: nil, }, }, + // This is only needed to verify reachability, so empty results are fine. + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000101": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, PrimaryPositionResults: map[string]struct { Position string Error error @@ -460,7 +474,6 @@ func TestPlannedReparentShardSlow(t *testing.T) { }, { name: "nil WaitReplicasTimeout and request takes 31 seconds is error", - ts: memorytopo.NewServer("zone1"), tablets: []*topodatapb.Tablet{ { Alias: &topodatapb.TabletAlias{ @@ -505,6 +518,21 @@ func TestPlannedReparentShardSlow(t *testing.T) { Error: nil, }, }, + // This is only needed to verify reachability, so empty results are fine. + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000101": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, PrimaryPositionResults: map[string]struct { Position string Error error @@ -563,21 +591,23 @@ func TestPlannedReparentShardSlow(t *testing.T) { }, } - ctx := context.Background() - for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() - testutil.AddTablets(ctx, t, tt.ts, &testutil.AddTabletOptions{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, "zone1") + testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ AlsoSetShardPrimary: true, ForceSetShardPrimary: true, SkipShardCreation: false, }, tt.tablets...) - vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) resp, err := vtctld.PlannedReparentShard(ctx, tt.req) @@ -610,8 +640,10 @@ func TestPlannedReparentShardSlow(t *testing.T) { func TestSleepTablet(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, "zone1") testutil.AddTablet(ctx, t, ts, &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ Cell: "zone1", @@ -704,10 +736,7 @@ func TestSleepTablet(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() - vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) diff --git a/go/vt/vtctl/grpcvtctldserver/server_test.go b/go/vt/vtctl/grpcvtctldserver/server_test.go index d737ede2fd9..864d86e45ee 100644 --- a/go/vt/vtctl/grpcvtctldserver/server_test.go +++ b/go/vt/vtctl/grpcvtctldserver/server_test.go @@ -23,6 +23,7 @@ import ( "io" "os" "sort" + "strings" "testing" "time" @@ -30,20 +31,20 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "google.golang.org/protobuf/proto" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" hk "vitess.io/vitess/go/vt/hook" - "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl/backupstorage" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver/testutil" "vitess.io/vitess/go/vt/vtctl/localvtctldclient" + "vitess.io/vitess/go/vt/vtctl/schematools" "vitess.io/vitess/go/vt/vttablet/tmclient" "vitess.io/vitess/go/vt/vttablet/tmclienttest" @@ -94,7 +95,8 @@ func TestPanicHandler(t *testing.T) { func TestAddCellInfo(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() tests := []struct { name string ts *topo.Server @@ -102,7 +104,7 @@ func TestAddCellInfo(t *testing.T) { shouldErr bool }{ { - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), req: &vtctldatapb.AddCellInfoRequest{ Name: "zone2", CellInfo: &topodatapb.CellInfo{ @@ -113,7 +115,7 @@ func TestAddCellInfo(t *testing.T) { }, { name: "cell already exists", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), req: &vtctldatapb.AddCellInfoRequest{ Name: "zone1", CellInfo: &topodatapb.CellInfo{ @@ -125,7 +127,7 @@ func TestAddCellInfo(t *testing.T) { }, { name: "no cell root", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), req: &vtctldatapb.AddCellInfoRequest{ Name: "zone2", CellInfo: &topodatapb.CellInfo{ @@ -137,10 +139,7 @@ func TestAddCellInfo(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() - vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -161,7 +160,8 @@ func TestAddCellInfo(t *testing.T) { func TestAddCellsAlias(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() tests := []struct { name string ts *topo.Server @@ -170,7 +170,7 @@ func TestAddCellsAlias(t *testing.T) { shouldErr bool }{ { - ts: memorytopo.NewServer("zone1", "zone2", "zone3"), + ts: memorytopo.NewServer(ctx, "zone1", "zone2", "zone3"), req: &vtctldatapb.AddCellsAliasRequest{ Name: "zone", Cells: []string{"zone1", "zone2", "zone3"}, @@ -178,7 +178,7 @@ func TestAddCellsAlias(t *testing.T) { }, { name: "alias exists", - ts: memorytopo.NewServer("zone1", "zone2", "zone3"), + ts: memorytopo.NewServer(ctx, "zone1", "zone2", "zone3"), setup: func(ts *topo.Server) error { return ts.CreateCellsAlias(ctx, "zone", &topodatapb.CellsAlias{ Cells: []string{"zone1", "zone2"}, @@ -192,7 +192,7 @@ func TestAddCellsAlias(t *testing.T) { }, { name: "alias overlaps", - ts: memorytopo.NewServer("zone1", "zone2", "zone3"), + ts: memorytopo.NewServer(ctx, "zone1", "zone2", "zone3"), setup: func(ts *topo.Server) error { return ts.CreateCellsAlias(context.Background(), "zone_a", &topodatapb.CellsAlias{ Cells: []string{"zone1", "zone3"}, @@ -207,10 +207,7 @@ func TestAddCellsAlias(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() - if tt.setup != nil { err := tt.setup(tt.ts) require.NoError(t, err, "test setup failed") @@ -236,7 +233,8 @@ func TestAddCellsAlias(t *testing.T) { func TestApplyRoutingRules(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() tests := []struct { name string cells []string @@ -321,11 +319,8 @@ func TestApplyRoutingRules(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - ts, factory := memorytopo.NewServerAndFactory(tt.cells...) + ts, factory := memorytopo.NewServerAndFactory(ctx, tt.cells...) if tt.topoDown { factory.SetError(errors.New("topo down for testing")) } @@ -421,12 +416,10 @@ func TestApplyVSchema(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - ctx := context.Background() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -508,7 +501,8 @@ func TestApplyVSchema(t *testing.T) { } func TestBackup(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() tests := []struct { name string ts *topo.Server @@ -520,7 +514,7 @@ func TestBackup(t *testing.T) { }{ { name: "ok", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ Backups: map[string]struct { Events []*logutilpb.Event @@ -558,7 +552,7 @@ func TestBackup(t *testing.T) { }, { name: "cannot backup primary", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ Backups: map[string]struct { Events []*logutilpb.Event @@ -593,7 +587,7 @@ func TestBackup(t *testing.T) { }, { name: "allow-primary", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ Backups: map[string]struct { Events []*logutilpb.Event @@ -629,7 +623,7 @@ func TestBackup(t *testing.T) { }, { name: "no tablet", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ Backups: map[string]struct { Events []*logutilpb.Event @@ -664,7 +658,7 @@ func TestBackup(t *testing.T) { }, { name: "midstream error", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ Backups: map[string]struct { Events []*logutilpb.Event @@ -702,7 +696,6 @@ func TestBackup(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { if tt.tablet != nil { testutil.AddTablet(ctx, t, tt.ts, tt.tablet, nil) @@ -740,7 +733,8 @@ func TestBackup(t *testing.T) { } func TestBackupShard(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() tests := []struct { name string ts *topo.Server @@ -752,7 +746,7 @@ func TestBackupShard(t *testing.T) { }{ { name: "ok", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ Backups: map[string]struct { Events []*logutilpb.Event @@ -817,7 +811,7 @@ func TestBackupShard(t *testing.T) { }, { name: "cannot backup primary", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ Backups: map[string]struct { Events []*logutilpb.Event @@ -860,7 +854,7 @@ func TestBackupShard(t *testing.T) { }, { name: "allow-primary", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ Backups: map[string]struct { Events []*logutilpb.Event @@ -919,9 +913,75 @@ func TestBackupShard(t *testing.T) { assert.Equal(t, 3, len(responses), "expected 3 messages from backupclient stream") }, }, + { + name: "incremental-from-pos", + ts: memorytopo.NewServer(ctx, "zone1"), + tmc: &testutil.TabletManagerClient{ + Backups: map[string]struct { + Events []*logutilpb.Event + EventInterval time.Duration + EventJitter time.Duration + ErrorAfter time.Duration + }{ + "zone1-0000000100": { + Events: []*logutilpb.Event{{}, {}, {}}, + }, + }, + PrimaryPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000200": { + Position: "some-position", + }, + }, + ReplicationStatusResults: map[string]struct { + Position *replicationdatapb.Status + Error error + }{ + "zone1-0000000100": { + Position: &replicationdatapb.Status{ + ReplicationLagSeconds: 0, + }, + }, + }, + SetReplicationSourceResults: map[string]error{ + "zone1-0000000100": nil, + }, + }, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "ks", + Shard: "-", + Type: topodatapb.TabletType_REPLICA, + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Keyspace: "ks", + Shard: "-", + Type: topodatapb.TabletType_PRIMARY, + }, + }, + req: &vtctldatapb.BackupShardRequest{ + Keyspace: "ks", + Shard: "-", + IncrementalFromPos: "auto", + }, + assertion: func(t *testing.T, responses []*vtctldatapb.BackupResponse, err error) { + assert.ErrorIs(t, err, io.EOF, "expected Recv loop to end with io.EOF") + assert.Equal(t, 3, len(responses), "expected 3 messages from backupclient stream") + }, + }, { name: "no available tablet", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ Backups: map[string]struct { Events []*logutilpb.Event @@ -974,7 +1034,6 @@ func TestBackupShard(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { testutil.AddTablets(ctx, t, tt.ts, &testutil.AddTabletOptions{ @@ -1013,6 +1072,210 @@ func TestBackupShard(t *testing.T) { } } +func TestCancelSchemaMigration(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + tablets []*topodatapb.Tablet + tmc *testutil.TabletManagerClient + req *vtctldatapb.CancelSchemaMigrationRequest + expected *vtctldatapb.CancelSchemaMigrationResponse + shouldErr bool + }{ + { + tablets: []*topodatapb.Tablet{ + { + Keyspace: "ks", + Shard: "-80", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + { + Keyspace: "ks", + Shard: "80-", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + tmc: &testutil.TabletManagerClient{ + ExecuteQueryResults: map[string]struct { + Response *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + Response: &querypb.QueryResult{ + RowsAffected: 1, + }, + }, + "zone1-0000000200": { + Response: &querypb.QueryResult{}, + }, + }, + PrimaryPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": {}, + "zone1-0000000200": {}, + }, + ReloadSchemaResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000200": nil, + }, + }, + req: &vtctldatapb.CancelSchemaMigrationRequest{ + Keyspace: "ks", + Uuid: "abc", + }, + expected: &vtctldatapb.CancelSchemaMigrationResponse{ + RowsAffectedByShard: map[string]uint64{ + "-80": 1, + "80-": 0, + }, + }, + }, + { + name: "no shard primary", + tablets: []*topodatapb.Tablet{ + { + Keyspace: "ks", + Shard: "-80", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + { + Keyspace: "ks", + Shard: "80-", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + tmc: &testutil.TabletManagerClient{ + ExecuteQueryResults: map[string]struct { + Response *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + Response: &querypb.QueryResult{}, + }, + "zone1-0000000200": { + Response: &querypb.QueryResult{}, + }, + }, + PrimaryPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": {}, + "zone1-0000000200": {}, + }, + ReloadSchemaResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000200": nil, + }, + }, + req: &vtctldatapb.CancelSchemaMigrationRequest{ + Keyspace: "ks", + Uuid: "abc", + }, + shouldErr: true, + }, + { + name: "executeQuery failure", + tablets: []*topodatapb.Tablet{ + { + Keyspace: "ks", + Shard: "-80", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + { + Keyspace: "ks", + Shard: "80-", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + tmc: &testutil.TabletManagerClient{ + ExecuteQueryResults: map[string]struct { + Response *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + Error: assert.AnError, + }, + "zone1-0000000200": { + Response: &querypb.QueryResult{}, + }, + }, + PrimaryPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": {}, + "zone1-0000000200": {}, + }, + ReloadSchemaResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000200": nil, + }, + }, + req: &vtctldatapb.CancelSchemaMigrationRequest{ + Keyspace: "ks", + Uuid: "abc", + }, + shouldErr: true, + }, + // execute query failure + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + + testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ + AlsoSetShardPrimary: true, + }, test.tablets...) + + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, test.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + resp, err := vtctld.CancelSchemaMigration(ctx, test.req) + if test.shouldErr { + assert.Error(t, err) + return + } + + require.NoError(t, err) + utils.MustMatch(t, test.expected, resp) + }) + } +} + func TestChangeTabletType(t *testing.T) { t.Parallel() @@ -1183,129 +1446,537 @@ func TestChangeTabletType(t *testing.T) { Type: topodatapb.TabletType_PRIMARY, }, }, - req: &vtctldatapb.ChangeTabletTypeRequest{ - TabletAlias: &topodatapb.TabletAlias{ - Cell: "zone1", - Uid: 100, - }, - DbType: topodatapb.TabletType_PRIMARY, + req: &vtctldatapb.ChangeTabletTypeRequest{ + TabletAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + DbType: topodatapb.TabletType_PRIMARY, + }, + expected: nil, + shouldErr: true, + }, + { + name: "primary demotions not allowed", + cells: []string{"zone1"}, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "ks", + Shard: "0", + Type: topodatapb.TabletType_PRIMARY, + }, + }, + req: &vtctldatapb.ChangeTabletTypeRequest{ + TabletAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + DbType: topodatapb.TabletType_REPLICA, + }, + expected: nil, + shouldErr: true, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, tt.cells...) + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &testutil.TabletManagerClient{ + TopoServer: ts, + }, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) + + testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ + AlsoSetShardPrimary: true, + }, tt.tablets...) + + resp, err := vtctld.ChangeTabletType(ctx, tt.req) + if tt.shouldErr { + assert.Error(t, err) + return + } + + assert.NoError(t, err) + utils.MustMatch(t, tt.expected, resp) + + // If we are testing a dry-run, then the tablet in the actual + // topo should match the BeforeTablet in the response. Otherwise, + // the tablet in the actual topo should match the AfterTablet in + // the response. + expectedRealType := resp.AfterTablet.Type + msg := "ChangeTabletType did not cause topo update" + if tt.req.DryRun { + expectedRealType = resp.BeforeTablet.Type + msg = "dryrun type change resulted in real type change" + } + + tablet, err := ts.GetTablet(ctx, tt.req.TabletAlias) + assert.NoError(t, err, + "could not load tablet %s from topo after type change %v -> %v [dryrun=%t]", + topoproto.TabletAliasString(tt.req.TabletAlias), + resp.BeforeTablet.Type, + resp.AfterTablet.Type, + resp.WasDryRun, + ) + utils.MustMatch(t, expectedRealType, tablet.Type, msg) + }) + } + + t.Run("tabletmanager failure", func(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &testutil.TabletManagerClient{ + TopoServer: nil, + }, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) + + testutil.AddTablet(ctx, t, ts, &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "ks", + Shard: "0", + Type: topodatapb.TabletType_REPLICA, + }, nil) + testutil.AddTablet(ctx, t, ts, &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Keyspace: "ks", + Shard: "0", + Type: topodatapb.TabletType_PRIMARY, + }, &testutil.AddTabletOptions{ + AlsoSetShardPrimary: true, + }) + + _, err := vtctld.ChangeTabletType(ctx, &vtctldatapb.ChangeTabletTypeRequest{ + TabletAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + DbType: topodatapb.TabletType_RDONLY, + }) + assert.Error(t, err) + }) +} + +func TestCleanupSchemaMigration(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + tablets []*topodatapb.Tablet + tmc *testutil.TabletManagerClient + req *vtctldatapb.CleanupSchemaMigrationRequest + expected *vtctldatapb.CleanupSchemaMigrationResponse + shouldErr bool + }{ + { + tablets: []*topodatapb.Tablet{ + { + Keyspace: "ks", + Shard: "-80", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + { + Keyspace: "ks", + Shard: "80-", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + tmc: &testutil.TabletManagerClient{ + ExecuteQueryResults: map[string]struct { + Response *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + Response: &querypb.QueryResult{ + RowsAffected: 1, + }, + }, + "zone1-0000000200": { + Response: &querypb.QueryResult{}, + }, + }, + PrimaryPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": {}, + "zone1-0000000200": {}, + }, + ReloadSchemaResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000200": nil, + }, + }, + req: &vtctldatapb.CleanupSchemaMigrationRequest{ + Keyspace: "ks", + Uuid: "abc", + }, + expected: &vtctldatapb.CleanupSchemaMigrationResponse{ + RowsAffectedByShard: map[string]uint64{ + "-80": 1, + "80-": 0, + }, + }, + }, + { + name: "no shard primary", + tablets: []*topodatapb.Tablet{ + { + Keyspace: "ks", + Shard: "-80", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + { + Keyspace: "ks", + Shard: "80-", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + tmc: &testutil.TabletManagerClient{ + ExecuteQueryResults: map[string]struct { + Response *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + Response: &querypb.QueryResult{}, + }, + "zone1-0000000200": { + Response: &querypb.QueryResult{}, + }, + }, + PrimaryPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": {}, + "zone1-0000000200": {}, + }, + ReloadSchemaResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000200": nil, + }, + }, + req: &vtctldatapb.CleanupSchemaMigrationRequest{ + Keyspace: "ks", + Uuid: "abc", + }, + shouldErr: true, + }, + { + name: "executeQuery failure", + tablets: []*topodatapb.Tablet{ + { + Keyspace: "ks", + Shard: "-80", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + { + Keyspace: "ks", + Shard: "80-", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + tmc: &testutil.TabletManagerClient{ + ExecuteQueryResults: map[string]struct { + Response *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + Error: assert.AnError, + }, + "zone1-0000000200": { + Response: &querypb.QueryResult{}, + }, + }, + PrimaryPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": {}, + "zone1-0000000200": {}, + }, + ReloadSchemaResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000200": nil, + }, + }, + req: &vtctldatapb.CleanupSchemaMigrationRequest{ + Keyspace: "ks", + Uuid: "abc", + }, + shouldErr: true, + }, + // execute query failure + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + + testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ + AlsoSetShardPrimary: true, + }, test.tablets...) + + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, test.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + resp, err := vtctld.CleanupSchemaMigration(ctx, test.req) + if test.shouldErr { + assert.Error(t, err) + return + } + + require.NoError(t, err) + utils.MustMatch(t, test.expected, resp) + }) + } +} + +func TestCompleteSchemaMigration(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + tablets []*topodatapb.Tablet + tmc *testutil.TabletManagerClient + req *vtctldatapb.CompleteSchemaMigrationRequest + expected *vtctldatapb.CompleteSchemaMigrationResponse + shouldErr bool + }{ + { + tablets: []*topodatapb.Tablet{ + { + Keyspace: "ks", + Shard: "-80", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + { + Keyspace: "ks", + Shard: "80-", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + tmc: &testutil.TabletManagerClient{ + ExecuteQueryResults: map[string]struct { + Response *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + Response: &querypb.QueryResult{ + RowsAffected: 1, + }, + }, + "zone1-0000000200": { + Response: &querypb.QueryResult{}, + }, + }, + PrimaryPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": {}, + "zone1-0000000200": {}, + }, + ReloadSchemaResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000200": nil, + }, + }, + req: &vtctldatapb.CompleteSchemaMigrationRequest{ + Keyspace: "ks", + Uuid: "abc", + }, + expected: &vtctldatapb.CompleteSchemaMigrationResponse{ + RowsAffectedByShard: map[string]uint64{ + "-80": 1, + "80-": 0, + }, + }, + }, + { + name: "no shard primary", + tablets: []*topodatapb.Tablet{ + { + Keyspace: "ks", + Shard: "-80", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + { + Keyspace: "ks", + Shard: "80-", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + tmc: &testutil.TabletManagerClient{ + ExecuteQueryResults: map[string]struct { + Response *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + Response: &querypb.QueryResult{}, + }, + "zone1-0000000200": { + Response: &querypb.QueryResult{}, + }, + }, + PrimaryPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": {}, + "zone1-0000000200": {}, + }, + ReloadSchemaResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000200": nil, + }, + }, + req: &vtctldatapb.CompleteSchemaMigrationRequest{ + Keyspace: "ks", + Uuid: "abc", }, - expected: nil, shouldErr: true, }, { - name: "primary demotions not allowed", - cells: []string{"zone1"}, + name: "executeQuery failure", tablets: []*topodatapb.Tablet{ { + Keyspace: "ks", + Shard: "-80", Alias: &topodatapb.TabletAlias{ Cell: "zone1", Uid: 100, }, + Type: topodatapb.TabletType_PRIMARY, + }, + { Keyspace: "ks", - Shard: "0", - Type: topodatapb.TabletType_PRIMARY, + Shard: "80-", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_PRIMARY, }, }, - req: &vtctldatapb.ChangeTabletTypeRequest{ - TabletAlias: &topodatapb.TabletAlias{ - Cell: "zone1", - Uid: 100, + tmc: &testutil.TabletManagerClient{ + ExecuteQueryResults: map[string]struct { + Response *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + Error: assert.AnError, + }, + "zone1-0000000200": { + Response: &querypb.QueryResult{}, + }, + }, + PrimaryPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": {}, + "zone1-0000000200": {}, + }, + ReloadSchemaResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000200": nil, }, - DbType: topodatapb.TabletType_REPLICA, }, - expected: nil, + req: &vtctldatapb.CompleteSchemaMigrationRequest{ + Keyspace: "ks", + Uuid: "abc", + }, shouldErr: true, }, + // execute query failure } - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer(tt.cells...) - vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &testutil.TabletManagerClient{ - TopoServer: ts, - }, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) + ctx, Complete := context.WithCancel(context.Background()) + defer Complete() + ts := memorytopo.NewServer(ctx, "zone1") testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ AlsoSetShardPrimary: true, - }, tt.tablets...) + }, test.tablets...) - resp, err := vtctld.ChangeTabletType(ctx, tt.req) - if tt.shouldErr { + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, test.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + resp, err := vtctld.CompleteSchemaMigration(ctx, test.req) + if test.shouldErr { assert.Error(t, err) return } - assert.NoError(t, err) - utils.MustMatch(t, tt.expected, resp) - - // If we are testing a dry-run, then the tablet in the actual - // topo should match the BeforeTablet in the response. Otherwise, - // the tablet in the actual topo should match the AfterTablet in - // the response. - expectedRealType := resp.AfterTablet.Type - msg := "ChangeTabletType did not cause topo update" - if tt.req.DryRun { - expectedRealType = resp.BeforeTablet.Type - msg = "dryrun type change resulted in real type change" - } - - tablet, err := ts.GetTablet(ctx, tt.req.TabletAlias) - assert.NoError(t, err, - "could not load tablet %s from topo after type change %v -> %v [dryrun=%t]", - topoproto.TabletAliasString(tt.req.TabletAlias), - resp.BeforeTablet.Type, - resp.AfterTablet.Type, - resp.WasDryRun, - ) - utils.MustMatch(t, expectedRealType, tablet.Type, msg) + require.NoError(t, err) + utils.MustMatch(t, test.expected, resp) }) } - - t.Run("tabletmanager failure", func(t *testing.T) { - t.Parallel() - - ctx := context.Background() - ts := memorytopo.NewServer("zone1") - vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &testutil.TabletManagerClient{ - TopoServer: nil, - }, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) - - testutil.AddTablet(ctx, t, ts, &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "zone1", - Uid: 100, - }, - Keyspace: "ks", - Shard: "0", - Type: topodatapb.TabletType_REPLICA, - }, nil) - testutil.AddTablet(ctx, t, ts, &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "zone1", - Uid: 101, - }, - Keyspace: "ks", - Shard: "0", - Type: topodatapb.TabletType_PRIMARY, - }, &testutil.AddTabletOptions{ - AlsoSetShardPrimary: true, - }) - - _, err := vtctld.ChangeTabletType(ctx, &vtctldatapb.ChangeTabletTypeRequest{ - TabletAlias: &topodatapb.TabletAlias{ - Cell: "zone1", - Uid: 100, - }, - DbType: topodatapb.TabletType_RDONLY, - }) - assert.Error(t, err) - }) } func TestCreateKeyspace(t *testing.T) { @@ -1545,8 +2216,9 @@ func TestCreateKeyspace(t *testing.T) { t.Skip("test not yet implemented") } - ctx := context.Background() - ts := memorytopo.NewServer(cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -1822,8 +2494,9 @@ func TestCreateShard(t *testing.T) { t.Skip("focusing on other tests") } - ctx := context.Background() - ts, topofactory := memorytopo.NewServerAndFactory("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts, topofactory := memorytopo.NewServerAndFactory(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -1853,7 +2526,8 @@ func TestCreateShard(t *testing.T) { func TestDeleteCellInfo(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() tests := []struct { name string ts *topo.Server @@ -1861,14 +2535,14 @@ func TestDeleteCellInfo(t *testing.T) { shouldErr bool }{ { - ts: memorytopo.NewServer("zone1", "zone2"), + ts: memorytopo.NewServer(ctx, "zone1", "zone2"), req: &vtctldatapb.DeleteCellInfoRequest{ Name: "zone2", }, }, { name: "cell does not exist", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), req: &vtctldatapb.DeleteCellInfoRequest{ Name: "zone2", }, @@ -1877,10 +2551,7 @@ func TestDeleteCellInfo(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() - vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -1900,7 +2571,8 @@ func TestDeleteCellInfo(t *testing.T) { func TestDeleteCellsAlias(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() tests := []struct { name string ts *topo.Server @@ -1909,7 +2581,7 @@ func TestDeleteCellsAlias(t *testing.T) { shouldErr bool }{ { - ts: memorytopo.NewServer("zone1", "zone2"), + ts: memorytopo.NewServer(ctx, "zone1", "zone2"), setup: func(ts *topo.Server) error { return ts.CreateCellsAlias(ctx, "zone", &topodatapb.CellsAlias{ Cells: []string{"zone1", "zone2"}, @@ -1921,7 +2593,7 @@ func TestDeleteCellsAlias(t *testing.T) { }, { name: "alias does not exist", - ts: memorytopo.NewServer("zone1", "zone2"), + ts: memorytopo.NewServer(ctx, "zone1", "zone2"), setup: func(ts *topo.Server) error { return ts.CreateCellsAlias(ctx, "zone_a", &topodatapb.CellsAlias{ Cells: []string{"zone1", "zone2"}, @@ -1935,10 +2607,7 @@ func TestDeleteCellsAlias(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() - if tt.setup != nil { err := tt.setup(tt.ts) require.NoError(t, err, "test setup failed") @@ -1968,7 +2637,7 @@ func TestDeleteKeyspace(t *testing.T) { keyspaces []*vtctldatapb.Keyspace shards []*vtctldatapb.Shard srvKeyspaces map[string]map[string]*topodatapb.SrvKeyspace - before func(t *testing.T, ts *topo.Server, tt testcase) func() + before func(t *testing.T, ctx context.Context, ts *topo.Server, tt testcase) (context.Context, func()) topoErr error req *vtctldatapb.DeleteKeyspaceRequest expected *vtctldatapb.DeleteKeyspaceResponse @@ -2115,10 +2784,10 @@ func TestDeleteKeyspace(t *testing.T) { shards: nil, srvKeyspaces: nil, topoErr: nil, - before: func(t *testing.T, ts *topo.Server, tt testcase) func() { - _, unlock, err := ts.LockKeyspace(context.Background(), tt.req.Keyspace, "test.DeleteKeyspace") + before: func(t *testing.T, ctx context.Context, ts *topo.Server, tt testcase) (context.Context, func()) { + lctx, unlock, err := ts.LockKeyspace(ctx, tt.req.Keyspace, "test.DeleteKeyspace") require.NoError(t, err, "failed to lock keyspace %s before test", tt.req.Keyspace) - return func() { + return lctx, func() { unlock(&err) if !topo.IsErrType(err, topo.NoNode) { assert.NoError(t, err, "error while unlocking keyspace %s after test", tt.req.Keyspace) @@ -2146,10 +2815,10 @@ func TestDeleteKeyspace(t *testing.T) { shards: nil, srvKeyspaces: nil, topoErr: nil, - before: func(t *testing.T, ts *topo.Server, tt testcase) func() { - _, unlock, err := ts.LockKeyspace(context.Background(), tt.req.Keyspace, "test.DeleteKeyspace") + before: func(t *testing.T, ctx context.Context, ts *topo.Server, tt testcase) (context.Context, func()) { + lctx, unlock, err := ts.LockKeyspace(ctx, tt.req.Keyspace, "test.DeleteKeyspace") require.NoError(t, err, "failed to lock keyspace %s before test", tt.req.Keyspace) - return func() { + return lctx, func() { unlock(&err) if !topo.IsErrType(err, topo.NoNode) { assert.NoError(t, err, "error while unlocking keyspace %s after test", tt.req.Keyspace) @@ -2167,18 +2836,15 @@ func TestDeleteKeyspace(t *testing.T) { }, } - for _, tt := range tests { - tt := tt + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - t.Parallel() - cells := []string{"zone1", "zone2", "zone3"} - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*50) - defer cancel() - - ts, topofactory := memorytopo.NewServerAndFactory(cells...) + ts, topofactory := memorytopo.NewServerAndFactory(ctx, cells...) + defer ts.Close() vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -2219,12 +2885,15 @@ func TestDeleteKeyspace(t *testing.T) { }() if tt.before != nil { - if after := tt.before(t, ts, tt); after != nil { + var after func() + if ctx, after = tt.before(t, ctx, ts, tt); after != nil { defer after() } } - resp, err := vtctld.DeleteKeyspace(ctx, tt.req) + requestCtx, requestCancel := context.WithTimeout(ctx, time.Millisecond*50) + defer requestCancel() + resp, err := vtctld.DeleteKeyspace(requestCtx, tt.req) if tt.shouldErr { assert.Error(t, err) @@ -2247,7 +2916,7 @@ func TestDeleteShards(t *testing.T) { replicationGraphs []*topo.ShardReplicationInfo srvKeyspaces map[string]map[string]*topodatapb.SrvKeyspace topoErr error - before func(t *testing.T, ts *topo.Server, tt testcase) func() + before func(t *testing.T, ctx context.Context, ts *topo.Server, tt testcase) (context.Context, func()) req *vtctldatapb.DeleteShardsRequest expected *vtctldatapb.DeleteShardsResponse expectedRemainingShards []*vtctldatapb.Shard @@ -2614,11 +3283,11 @@ func TestDeleteShards(t *testing.T) { }, tablets: nil, topoErr: nil, - before: func(t *testing.T, ts *topo.Server, tt testcase) func() { + before: func(t *testing.T, ctx context.Context, ts *topo.Server, tt testcase) (context.Context, func()) { shard := tt.req.Shards[0] - _, unlock, err := ts.LockShard(context.Background(), shard.Keyspace, shard.Name, "test.DeleteShard") + lctx, unlock, err := ts.LockShard(ctx, shard.Keyspace, shard.Name, "test.DeleteShard") require.NoError(t, err, "failed to lock shard %s/%s before test", shard.Keyspace, shard.Name) - return func() { + return lctx, func() { unlock(&err) if !topo.IsErrType(err, topo.NoNode) { assert.NoError(t, err, "error while unlocking shard %s/%s after test", shard.Keyspace, shard.Name) @@ -2652,11 +3321,11 @@ func TestDeleteShards(t *testing.T) { }, tablets: nil, topoErr: nil, - before: func(t *testing.T, ts *topo.Server, tt testcase) func() { + before: func(t *testing.T, ctx context.Context, ts *topo.Server, tt testcase) (context.Context, func()) { shard := tt.req.Shards[0] - _, unlock, err := ts.LockShard(context.Background(), shard.Keyspace, shard.Name, "test.DeleteShard") + lctx, unlock, err := ts.LockShard(ctx, shard.Keyspace, shard.Name, "test.DeleteShard") require.NoError(t, err, "failed to lock shard %s/%s before test", shard.Keyspace, shard.Name) - return func() { + return lctx, func() { unlock(&err) if !topo.IsErrType(err, topo.NoNode) { assert.NoError(t, err, "error while unlocking shard %s/%s after test", shard.Keyspace, shard.Name) @@ -2689,7 +3358,7 @@ func TestDeleteShards(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*50) defer cancel() - ts, topofactory := memorytopo.NewServerAndFactory(cells...) + ts, topofactory := memorytopo.NewServerAndFactory(ctx, cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -2729,7 +3398,8 @@ func TestDeleteShards(t *testing.T) { } if tt.before != nil { - if after := tt.before(t, ts, tt); after != nil { + var after func() + if ctx, after = tt.before(t, ctx, ts, tt); after != nil { defer after() } } @@ -2750,7 +3420,6 @@ func TestDeleteShards(t *testing.T) { func TestDeleteSrvKeyspace(t *testing.T) { t.Parallel() - ctx := context.Background() tests := []struct { name string vschemas map[string]*vschemapb.SrvVSchema @@ -2825,7 +3494,9 @@ func TestDeleteSrvKeyspace(t *testing.T) { finalVSchemas[cell] = vschema } - ts := memorytopo.NewServer(cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cells...) for cell, vschema := range tt.vschemas { err := ts.UpdateSrvVSchema(ctx, cell, vschema) require.NoError(t, err, "failed to update SrvVSchema in cell = %v, vschema = %+v", cell, vschema) @@ -3289,8 +3960,9 @@ func TestDeleteTablets(t *testing.T) { t.Skip("focusing on other tests") } - ctx := context.Background() - ts, topofactory := memorytopo.NewServerAndFactory("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts, topofactory := memorytopo.NewServerAndFactory(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -3354,6 +4026,9 @@ func TestDeleteTablets(t *testing.T) { func TestEmergencyReparentShard(t *testing.T) { t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tests := []struct { name string ts *topo.Server @@ -3367,7 +4042,7 @@ func TestEmergencyReparentShard(t *testing.T) { }{ { name: "successful reparent", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tablets: []*topodatapb.Tablet{ { Alias: &topodatapb.TabletAlias{ @@ -3442,7 +4117,7 @@ func TestEmergencyReparentShard(t *testing.T) { }, "zone1-0000000200": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", @@ -3487,7 +4162,7 @@ func TestEmergencyReparentShard(t *testing.T) { // the simplest way to trigger a failure is to attempt an ERS on a // shard that does not exist. name: "failed reparent", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tablets: nil, req: &vtctldatapb.EmergencyReparentShardRequest{ @@ -3509,14 +4184,8 @@ func TestEmergencyReparentShard(t *testing.T) { }, } - ctx := context.Background() - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - testutil.AddTablets(ctx, t, tt.ts, &testutil.AddTabletOptions{ AlsoSetShardPrimary: true, ForceSetShardPrimary: true, @@ -3660,8 +4329,9 @@ func TestExecuteFetchAsApp(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") testutil.AddTablet(ctx, t, ts, tt.tablet, nil) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { @@ -3786,8 +4456,9 @@ func TestExecuteFetchAsDBA(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") testutil.AddTablet(ctx, t, ts, tt.tablet, nil) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { @@ -3808,6 +4479,9 @@ func TestExecuteFetchAsDBA(t *testing.T) { func TestExecuteHook(t *testing.T) { t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tests := []struct { name string ts *topo.Server @@ -3818,7 +4492,7 @@ func TestExecuteHook(t *testing.T) { }{ { name: "ok", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ ExecuteHookResults: map[string]struct { Response *hk.HookResult @@ -3847,7 +4521,7 @@ func TestExecuteHook(t *testing.T) { }, { name: "nil hook request", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ ExecuteHookResults: map[string]struct { Response *hk.HookResult @@ -3877,7 +4551,7 @@ func TestExecuteHook(t *testing.T) { }, { name: "hook with slash", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ ExecuteHookResults: map[string]struct { Response *hk.HookResult @@ -3909,7 +4583,7 @@ func TestExecuteHook(t *testing.T) { }, { name: "no such tablet", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ ExecuteHookResults: map[string]struct { Response *hk.HookResult @@ -3939,7 +4613,7 @@ func TestExecuteHook(t *testing.T) { }, { name: "tablet hook failure", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ ExecuteHookResults: map[string]struct { Response *hk.HookResult @@ -3969,12 +4643,8 @@ func TestExecuteHook(t *testing.T) { }, } - ctx := context.Background() for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() - testutil.AddTablets(ctx, t, tt.ts, nil, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) @@ -3994,8 +4664,9 @@ func TestExecuteHook(t *testing.T) { func TestFindAllShardsInKeyspace(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -4035,8 +4706,9 @@ func TestFindAllShardsInKeyspace(t *testing.T) { } func TestGetBackups(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -4142,8 +4814,9 @@ func TestGetBackups(t *testing.T) { func TestGetKeyspace(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -4167,8 +4840,9 @@ func TestGetKeyspace(t *testing.T) { func TestGetCellInfoNames(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("cell1", "cell2", "cell3") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1", "cell2", "cell3") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -4177,7 +4851,7 @@ func TestGetCellInfoNames(t *testing.T) { assert.NoError(t, err) assert.ElementsMatch(t, []string{"cell1", "cell2", "cell3"}, resp.Names) - ts = memorytopo.NewServer() + ts = memorytopo.NewServer(ctx) vtctld = testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -4186,7 +4860,7 @@ func TestGetCellInfoNames(t *testing.T) { assert.NoError(t, err) assert.Empty(t, resp.Names) - ts, topofactory := memorytopo.NewServerAndFactory("cell1") + ts, topofactory := memorytopo.NewServerAndFactory(ctx, "cell1") vtctld = testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -4199,8 +4873,9 @@ func TestGetCellInfoNames(t *testing.T) { func TestGetCellInfo(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -4209,7 +4884,7 @@ func TestGetCellInfo(t *testing.T) { ServerAddress: "example.com", Root: "vitess", } - input := proto.Clone(expected).(*topodatapb.CellInfo) + input := expected.CloneVT() require.NoError(t, ts.CreateCellInfo(ctx, "cell1", input)) resp, err := vtctld.GetCellInfo(ctx, &vtctldatapb.GetCellInfoRequest{Cell: "cell1"}) @@ -4226,8 +4901,9 @@ func TestGetCellInfo(t *testing.T) { func TestGetCellsAliases(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("c11", "c12", "c13", "c21", "c22") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "c11", "c12", "c13", "c21", "c22") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -4240,7 +4916,7 @@ func TestGetCellsAliases(t *testing.T) { } for i, alias := range []*topodatapb.CellsAlias{alias1, alias2} { - input := proto.Clone(alias).(*topodatapb.CellsAlias) + input := alias.CloneVT() name := fmt.Sprintf("a%d", i+1) require.NoError(t, ts.CreateCellsAlias(ctx, name, input), "cannot create cells alias %d (idx = %d) = %+v", i+1, i, input) } @@ -4254,7 +4930,7 @@ func TestGetCellsAliases(t *testing.T) { assert.NoError(t, err) utils.MustMatch(t, expected, resp.Aliases) - ts, topofactory := memorytopo.NewServerAndFactory() + ts, topofactory := memorytopo.NewServerAndFactory(ctx) vtctld = testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -4328,8 +5004,9 @@ func TestGetFullStatus(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer(tt.cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, tt.cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &testutil.TabletManagerClient{ TopoServer: ts, FullStatusResult: &replicationdatapb.FullStatus{ @@ -4356,8 +5033,9 @@ func TestGetFullStatus(t *testing.T) { func TestGetKeyspaces(t *testing.T) { t.Parallel() - ctx := context.Background() - ts, topofactory := memorytopo.NewServerAndFactory("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts, topofactory := memorytopo.NewServerAndFactory(ctx, "cell1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -4397,7 +5075,6 @@ func TestGetKeyspaces(t *testing.T) { func TestGetPermissions(t *testing.T) { t.Parallel() - ctx := context.Background() var testGetPermissionsReply = &tabletmanagerdatapb.Permissions{ UserPermissions: []*tabletmanagerdatapb.UserPermission{ { @@ -4521,7 +5198,10 @@ func TestGetPermissions(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, "zone1") testutil.AddTablets(ctx, t, ts, nil, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { @@ -4544,7 +5224,6 @@ func TestGetPermissions(t *testing.T) { func TestGetRoutingRules(t *testing.T) { t.Parallel() - ctx := context.Background() tests := []struct { name string topoDown bool @@ -4588,7 +5267,10 @@ func TestGetRoutingRules(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ts, factory := memorytopo.NewServerAndFactory() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts, factory := memorytopo.NewServerAndFactory(ctx) if tt.rrIn != nil { err := ts.SaveRoutingRules(ctx, tt.rrIn) require.NoError(t, err, "could not save routing rules: %+v", tt.rrIn) @@ -4614,8 +5296,9 @@ func TestGetRoutingRules(t *testing.T) { } func TestGetSchema(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") tmc := testutil.TabletManagerClient{ GetSchemaResults: map[string]struct { Schema *tabletmanagerdatapb.SchemaDefinition @@ -4768,40 +5451,241 @@ func TestGetSchema(t *testing.T) { }, shouldErr: false, }, - // error cases + // error cases + { + name: "no tablet", + req: &vtctldatapb.GetSchemaRequest{ + TabletAlias: &topodatapb.TabletAlias{ + Cell: "notfound", + Uid: 100, + }, + }, + expected: nil, + shouldErr: true, + }, + { + name: "no schema", + req: &vtctldatapb.GetSchemaRequest{ + TabletAlias: otherAlias, + }, + expected: nil, + shouldErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + setupSchema() + + resp, err := vtctld.GetSchema(ctx, tt.req) + if tt.shouldErr { + assert.Error(t, err) + return + } + + assert.NoError(t, err) + utils.MustMatch(t, tt.expected, resp) + }) + } +} + +func TestGetSchemaMigrations(t *testing.T) { + t.Parallel() + + convertNamedRowsToProto3Result := func(rows []sqltypes.RowNamedValues) *querypb.QueryResult { + var ( + result sqltypes.Result + fieldNames, fieldTypes []string + ) + for i, row := range rows { + var unnamedRow sqltypes.Row + if i == 0 { + // Add to fields if this is the first row + for name, value := range row { + fieldNames = append(fieldNames, name) + fieldTypes = append(fieldTypes, strings.ToLower(querypb.Type_name[int32(value.Type())])) + } + } + + for _, name := range fieldNames { + value, ok := row[name] + if !ok { + value = sqltypes.NULL + } + + unnamedRow = append(unnamedRow, value) + } + + result.Rows = append(result.Rows, unnamedRow) + } + + result.Fields = sqltypes.MakeTestFields(strings.Join(fieldNames, "|"), strings.Join(fieldTypes, "|")) + return sqltypes.ResultToProto3(&result) + } + + tests := []struct { + name string + tablets []*topodatapb.Tablet + rowsByTablet map[string][]sqltypes.RowNamedValues + failTopo bool + req *vtctldatapb.GetSchemaMigrationsRequest + expected *vtctldatapb.GetSchemaMigrationsResponse + shouldErr bool + }{ + { + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "ks", + Shard: "-", + Type: topodatapb.TabletType_PRIMARY, + }, + }, + rowsByTablet: map[string][]sqltypes.RowNamedValues{ + "zone1-0000000100": { + map[string]sqltypes.Value{ + "migration_uuid": sqltypes.NewVarChar("uuid1"), + "keyspace": sqltypes.NewVarChar("ks"), + "shard": sqltypes.NewVarChar("-"), + "strategy": sqltypes.NewVarChar(schematools.SchemaMigrationStrategyName(vtctldatapb.SchemaMigration_ONLINE)), + }, + }, + }, + req: &vtctldatapb.GetSchemaMigrationsRequest{ + Keyspace: "ks", + }, + expected: &vtctldatapb.GetSchemaMigrationsResponse{ + Migrations: []*vtctldatapb.SchemaMigration{ + { + Uuid: "uuid1", + Keyspace: "ks", + Shard: "-", + Strategy: vtctldatapb.SchemaMigration_ONLINE, + EtaSeconds: -1, + }, + }, + }, + }, + { + name: "bad uuid input", + req: &vtctldatapb.GetSchemaMigrationsRequest{ + Uuid: "not-a-uuid", + }, + shouldErr: true, + }, + { + name: "gettablets failure", + failTopo: true, + req: &vtctldatapb.GetSchemaMigrationsRequest{ + Keyspace: "notfound", + }, + shouldErr: true, + }, { - name: "no tablet", - req: &vtctldatapb.GetSchemaRequest{ - TabletAlias: &topodatapb.TabletAlias{ - Cell: "notfound", - Uid: 100, + name: "execute fetch failure", + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "ks", + Shard: "-", + Type: topodatapb.TabletType_PRIMARY, }, }, - expected: nil, + rowsByTablet: map[string][]sqltypes.RowNamedValues{ + "zone1-0000000100": nil, + }, + req: &vtctldatapb.GetSchemaMigrationsRequest{ + Keyspace: "ks", + }, shouldErr: true, }, { - name: "no schema", - req: &vtctldatapb.GetSchemaRequest{ - TabletAlias: otherAlias, + name: "bad row data", + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "ks", + Shard: "-", + Type: topodatapb.TabletType_PRIMARY, + }, + }, + rowsByTablet: map[string][]sqltypes.RowNamedValues{ + "zone1-0000000100": { + {"requested_timestamp": sqltypes.NewVarChar("invalid timestamp")}, + }, + }, + req: &vtctldatapb.GetSchemaMigrationsRequest{ + Keyspace: "ks", }, - expected: nil, shouldErr: true, }, } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - setupSchema() + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + t.Parallel() - resp, err := vtctld.GetSchema(ctx, tt.req) - if tt.shouldErr { + tmc := &testutil.TabletManagerClient{ + ExecuteFetchAsDbaResults: make(map[string]struct { + Response *querypb.QueryResult + Error error + }, len(test.rowsByTablet)), + } + + if test.rowsByTablet == nil { + test.rowsByTablet = map[string][]sqltypes.RowNamedValues{} + } + for alias, rows := range test.rowsByTablet { + switch rows { + case nil: + tmc.ExecuteFetchAsDbaResults[alias] = struct { + Response *querypb.QueryResult + Error error + }{ + Error: assert.AnError, + } + default: + tmc.ExecuteFetchAsDbaResults[alias] = struct { + Response *querypb.QueryResult + Error error + }{ + Response: convertNamedRowsToProto3Result(rows), + } + } + } + + cells := []string{"zone1", "zone2", "zone3"} + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts, factory := memorytopo.NewServerAndFactory(ctx, cells...) + testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{AlsoSetShardPrimary: true}, test.tablets...) + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + if test.failTopo { + factory.SetError(assert.AnError) + } + + resp, err := vtctld.GetSchemaMigrations(ctx, test.req) + if test.shouldErr { assert.Error(t, err) return } - assert.NoError(t, err) - utils.MustMatch(t, tt.expected, resp) + require.NoError(t, err) + utils.MustMatch(t, test.expected, resp) }) } } @@ -4874,8 +5758,9 @@ func TestGetShard(t *testing.T) { cells := []string{"zone1", "zone2", "zone3"} - ctx := context.Background() - ts, topofactory := memorytopo.NewServerAndFactory(cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts, topofactory := memorytopo.NewServerAndFactory(ctx, cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -4900,7 +5785,6 @@ func TestGetShard(t *testing.T) { func TestGetSrvKeyspaceNames(t *testing.T) { t.Parallel() - ctx := context.Background() tests := []struct { name string srvKeyspacesByCell map[string]map[string]*topodatapb.SrvKeyspace @@ -4999,7 +5883,9 @@ func TestGetSrvKeyspaceNames(t *testing.T) { cells = append(cells, cell) } - ts, factory := memorytopo.NewServerAndFactory(cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts, factory := memorytopo.NewServerAndFactory(ctx, cells...) for cell, srvKeyspaces := range tt.srvKeyspacesByCell { for ks, srvks := range srvKeyspaces { @@ -5153,8 +6039,6 @@ func TestGetSrvKeyspaces(t *testing.T) { }, } - ctx := context.Background() - for _, tt := range tests { tt := tt @@ -5165,7 +6049,10 @@ func TestGetSrvKeyspaces(t *testing.T) { t.SkipNow() } - ts, topofactory := memorytopo.NewServerAndFactory(tt.cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts, topofactory := memorytopo.NewServerAndFactory(ctx, tt.cells...) testutil.AddSrvKeyspaces(t, ts, tt.srvKeyspaces...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { @@ -5192,8 +6079,9 @@ func TestGetSrvKeyspaces(t *testing.T) { func TestGetSrvVSchema(t *testing.T) { t.Parallel() - ctx := context.Background() - ts, topofactory := memorytopo.NewServerAndFactory("zone1", "zone2") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts, topofactory := memorytopo.NewServerAndFactory(ctx, "zone1", "zone2") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -5402,8 +6290,9 @@ func TestGetSrvVSchemas(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ctx := context.Background() - ts, topofactory := memorytopo.NewServerAndFactory("zone1", "zone2", "zone3") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts, topofactory := memorytopo.NewServerAndFactory(ctx, "zone1", "zone2", "zone3") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -5461,8 +6350,9 @@ func TestGetSrvVSchemas(t *testing.T) { func TestGetTablet(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -5638,7 +6528,7 @@ func TestGetTablets(t *testing.T) { Keyspace: "ks2", Shard: "-", Type: topodatapb.TabletType_PRIMARY, - PrimaryTermStartTime: logutil.TimeToProto(time.Date(2006, time.January, 2, 15, 4, 5, 0, time.UTC)), + PrimaryTermStartTime: protoutil.TimeToProto(time.Date(2006, time.January, 2, 15, 4, 5, 0, time.UTC)), }, { Alias: &topodatapb.TabletAlias{ @@ -5649,7 +6539,7 @@ func TestGetTablets(t *testing.T) { Shard: "-", Hostname: "stale.primary", Type: topodatapb.TabletType_PRIMARY, - PrimaryTermStartTime: logutil.TimeToProto(time.Date(2006, time.January, 2, 14, 4, 5, 0, time.UTC)), + PrimaryTermStartTime: protoutil.TimeToProto(time.Date(2006, time.January, 2, 14, 4, 5, 0, time.UTC)), }, }, req: &vtctldatapb.GetTabletsRequest{ @@ -5665,7 +6555,7 @@ func TestGetTablets(t *testing.T) { Keyspace: "ks2", Shard: "-", Type: topodatapb.TabletType_PRIMARY, - PrimaryTermStartTime: logutil.TimeToProto(time.Date(2006, time.January, 2, 15, 4, 5, 0, time.UTC)), + PrimaryTermStartTime: protoutil.TimeToProto(time.Date(2006, time.January, 2, 15, 4, 5, 0, time.UTC)), }, { Alias: &topodatapb.TabletAlias{ @@ -5676,7 +6566,7 @@ func TestGetTablets(t *testing.T) { Shard: "-", Hostname: "stale.primary", Type: topodatapb.TabletType_UNKNOWN, - PrimaryTermStartTime: logutil.TimeToProto(time.Date(2006, time.January, 2, 14, 4, 5, 0, time.UTC)), + PrimaryTermStartTime: protoutil.TimeToProto(time.Date(2006, time.January, 2, 14, 4, 5, 0, time.UTC)), }, }, shouldErr: false, @@ -5694,7 +6584,7 @@ func TestGetTablets(t *testing.T) { Shard: "-", Hostname: "slightly less stale", Type: topodatapb.TabletType_PRIMARY, - PrimaryTermStartTime: logutil.TimeToProto(time.Date(2006, time.January, 2, 15, 4, 5, 0, time.UTC)), + PrimaryTermStartTime: protoutil.TimeToProto(time.Date(2006, time.January, 2, 15, 4, 5, 0, time.UTC)), }, { Alias: &topodatapb.TabletAlias{ @@ -5705,7 +6595,7 @@ func TestGetTablets(t *testing.T) { Keyspace: "ks1", Shard: "-", Type: topodatapb.TabletType_PRIMARY, - PrimaryTermStartTime: logutil.TimeToProto(time.Date(2006, time.January, 2, 14, 4, 5, 0, time.UTC)), + PrimaryTermStartTime: protoutil.TimeToProto(time.Date(2006, time.January, 2, 14, 4, 5, 0, time.UTC)), }, { Alias: &topodatapb.TabletAlias{ @@ -5716,7 +6606,7 @@ func TestGetTablets(t *testing.T) { Keyspace: "ks1", Shard: "-", Type: topodatapb.TabletType_PRIMARY, - PrimaryTermStartTime: logutil.TimeToProto(time.Date(2006, time.January, 2, 16, 4, 5, 0, time.UTC)), + PrimaryTermStartTime: protoutil.TimeToProto(time.Date(2006, time.January, 2, 16, 4, 5, 0, time.UTC)), }, }, req: &vtctldatapb.GetTabletsRequest{}, @@ -5730,7 +6620,7 @@ func TestGetTablets(t *testing.T) { Shard: "-", Hostname: "slightly less stale", Type: topodatapb.TabletType_UNKNOWN, - PrimaryTermStartTime: logutil.TimeToProto(time.Date(2006, time.January, 2, 15, 4, 5, 0, time.UTC)), + PrimaryTermStartTime: protoutil.TimeToProto(time.Date(2006, time.January, 2, 15, 4, 5, 0, time.UTC)), }, { Alias: &topodatapb.TabletAlias{ @@ -5741,7 +6631,7 @@ func TestGetTablets(t *testing.T) { Keyspace: "ks1", Shard: "-", Type: topodatapb.TabletType_UNKNOWN, - PrimaryTermStartTime: logutil.TimeToProto(time.Date(2006, time.January, 2, 14, 4, 5, 0, time.UTC)), + PrimaryTermStartTime: protoutil.TimeToProto(time.Date(2006, time.January, 2, 14, 4, 5, 0, time.UTC)), }, { Alias: &topodatapb.TabletAlias{ @@ -5752,7 +6642,7 @@ func TestGetTablets(t *testing.T) { Keyspace: "ks1", Shard: "-", Type: topodatapb.TabletType_PRIMARY, - PrimaryTermStartTime: logutil.TimeToProto(time.Date(2006, time.January, 2, 16, 4, 5, 0, time.UTC)), + PrimaryTermStartTime: protoutil.TimeToProto(time.Date(2006, time.January, 2, 16, 4, 5, 0, time.UTC)), }, }, shouldErr: false, @@ -5940,8 +6830,9 @@ func TestGetTablets(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer(tt.cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, tt.cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -5963,8 +6854,9 @@ func TestGetTablets(t *testing.T) { func TestGetTopologyPath(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("cell1", "cell2", "cell3") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1", "cell2", "cell3") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -6051,15 +6943,14 @@ func TestGetTopologyPath(t *testing.T) { func TestGetVSchema(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) t.Run("found", func(t *testing.T) { - t.Parallel() - err := ts.SaveVSchema(ctx, "testkeyspace", &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ @@ -6098,11 +6989,216 @@ func TestGetVSchema(t *testing.T) { }) } +func TestLaunchSchemaMigration(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + tablets []*topodatapb.Tablet + tmc *testutil.TabletManagerClient + req *vtctldatapb.LaunchSchemaMigrationRequest + expected *vtctldatapb.LaunchSchemaMigrationResponse + shouldErr bool + }{ + { + tablets: []*topodatapb.Tablet{ + { + Keyspace: "ks", + Shard: "-80", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + { + Keyspace: "ks", + Shard: "80-", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + tmc: &testutil.TabletManagerClient{ + ExecuteQueryResults: map[string]struct { + Response *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + Response: &querypb.QueryResult{ + RowsAffected: 1, + }, + }, + "zone1-0000000200": { + Response: &querypb.QueryResult{}, + }, + }, + PrimaryPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": {}, + "zone1-0000000200": {}, + }, + ReloadSchemaResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000200": nil, + }, + }, + req: &vtctldatapb.LaunchSchemaMigrationRequest{ + Keyspace: "ks", + Uuid: "abc", + }, + expected: &vtctldatapb.LaunchSchemaMigrationResponse{ + RowsAffectedByShard: map[string]uint64{ + "-80": 1, + "80-": 0, + }, + }, + }, + { + name: "no shard primary", + tablets: []*topodatapb.Tablet{ + { + Keyspace: "ks", + Shard: "-80", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + { + Keyspace: "ks", + Shard: "80-", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + tmc: &testutil.TabletManagerClient{ + ExecuteQueryResults: map[string]struct { + Response *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + Response: &querypb.QueryResult{}, + }, + "zone1-0000000200": { + Response: &querypb.QueryResult{}, + }, + }, + PrimaryPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": {}, + "zone1-0000000200": {}, + }, + ReloadSchemaResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000200": nil, + }, + }, + req: &vtctldatapb.LaunchSchemaMigrationRequest{ + Keyspace: "ks", + Uuid: "abc", + }, + shouldErr: true, + }, + { + name: "executeQuery failure", + tablets: []*topodatapb.Tablet{ + { + Keyspace: "ks", + Shard: "-80", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + { + Keyspace: "ks", + Shard: "80-", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + tmc: &testutil.TabletManagerClient{ + ExecuteQueryResults: map[string]struct { + Response *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + Error: assert.AnError, + }, + "zone1-0000000200": { + Response: &querypb.QueryResult{}, + }, + }, + PrimaryPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": {}, + "zone1-0000000200": {}, + }, + ReloadSchemaResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000200": nil, + }, + }, + req: &vtctldatapb.LaunchSchemaMigrationRequest{ + Keyspace: "ks", + Uuid: "abc", + }, + shouldErr: true, + }, + // execute query failure + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + + ctx, Launch := context.WithCancel(context.Background()) + defer Launch() + ts := memorytopo.NewServer(ctx, "zone1") + + testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ + AlsoSetShardPrimary: true, + }, test.tablets...) + + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, test.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + resp, err := vtctld.LaunchSchemaMigration(ctx, test.req) + if test.shouldErr { + assert.Error(t, err) + return + } + + require.NoError(t, err) + utils.MustMatch(t, test.expected, resp) + }) + } +} + func TestPingTablet(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") testutil.AddTablet(ctx, t, ts, &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ Cell: "zone1", @@ -6167,10 +7263,7 @@ func TestPingTablet(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() - vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -6191,6 +7284,9 @@ func TestPingTablet(t *testing.T) { func TestPlannedReparentShard(t *testing.T) { t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tests := []struct { name string ts *topo.Server @@ -6204,7 +7300,7 @@ func TestPlannedReparentShard(t *testing.T) { }{ { name: "successful reparent", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tablets: []*topodatapb.Tablet{ { Alias: &topodatapb.TabletAlias{ @@ -6249,6 +7345,21 @@ func TestPlannedReparentShard(t *testing.T) { Error: nil, }, }, + // This is only needed to verify reachability, so empty results are fine. + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000101": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, PrimaryPositionResults: map[string]struct { Position string Error error @@ -6309,7 +7420,7 @@ func TestPlannedReparentShard(t *testing.T) { // the simplest way to trigger a failure is to attempt an PRS on a // shard that does not exist. name: "failed reparent", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tablets: nil, req: &vtctldatapb.PlannedReparentShardRequest{ Keyspace: "testkeyspace", @@ -6330,14 +7441,8 @@ func TestPlannedReparentShard(t *testing.T) { }, } - ctx := context.Background() - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - testutil.AddTablets(ctx, t, tt.ts, &testutil.AddTabletOptions{ AlsoSetShardPrimary: true, ForceSetShardPrimary: true, @@ -6380,8 +7485,10 @@ func TestRebuildKeyspaceGraph(t *testing.T) { t.Run("ok", func(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, "zone1") testutil.AddKeyspace(ctx, t, ts, &vtctldatapb.Keyspace{ Name: "testkeyspace", }) @@ -6398,7 +7505,10 @@ func TestRebuildKeyspaceGraph(t *testing.T) { t.Run("no such keyspace", func(t *testing.T) { t.Parallel() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -6412,8 +7522,10 @@ func TestRebuildKeyspaceGraph(t *testing.T) { t.Run("topo unavailable", func(t *testing.T) { t.Parallel() - ctx := context.Background() - ts, factory := memorytopo.NewServerAndFactory("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts, factory := memorytopo.NewServerAndFactory(ctx, "zone1") testutil.AddKeyspace(ctx, t, ts, &vtctldatapb.Keyspace{ Name: "testkeyspace", }) @@ -6431,8 +7543,10 @@ func TestRebuildKeyspaceGraph(t *testing.T) { t.Run("lock error", func(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, "zone1") testutil.AddKeyspace(ctx, t, ts, &vtctldatapb.Keyspace{ Name: "testkeyspace", }) @@ -6440,15 +7554,13 @@ func TestRebuildKeyspaceGraph(t *testing.T) { return NewVtctldServer(ts) }) - _, unlock, lerr := ts.LockKeyspace(context.Background(), "testkeyspace", "test lock") + lctx, unlock, lerr := ts.LockKeyspace(context.Background(), "testkeyspace", "test lock") require.NoError(t, lerr, "could not lock keyspace for testing") defer unlock(&lerr) defer func() { require.NoError(t, lerr, "could not unlock testkeyspace after test") }() - ctx, cancel := context.WithTimeout(ctx, time.Millisecond*50) - defer cancel() - _, err := vtctld.RebuildKeyspaceGraph(ctx, &vtctldatapb.RebuildKeyspaceGraphRequest{ + _, err := vtctld.RebuildKeyspaceGraph(lctx, &vtctldatapb.RebuildKeyspaceGraphRequest{ Keyspace: "testkeyspace", }) assert.Error(t, err) @@ -6458,7 +7570,6 @@ func TestRebuildKeyspaceGraph(t *testing.T) { func TestRebuildVSchemaGraph(t *testing.T) { t.Parallel() - ctx := context.Background() req := &vtctldatapb.RebuildVSchemaGraphRequest{} tests := []struct { name string @@ -6480,7 +7591,10 @@ func TestRebuildVSchemaGraph(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ts, factory := memorytopo.NewServerAndFactory("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts, factory := memorytopo.NewServerAndFactory(ctx, "zone1") if tt.topoDown { factory.SetError(errors.New("topo down for testing")) } @@ -6502,7 +7616,9 @@ func TestRebuildVSchemaGraph(t *testing.T) { func TestRefreshState(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tests := []struct { name string ts *topo.Server @@ -6513,7 +7629,7 @@ func TestRefreshState(t *testing.T) { }{ { name: "success", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tablet: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ Cell: "zone1", @@ -6530,13 +7646,13 @@ func TestRefreshState(t *testing.T) { }, { name: "tablet alias nil", - ts: memorytopo.NewServer(), + ts: memorytopo.NewServer(ctx), req: &vtctldatapb.RefreshStateRequest{}, shouldErr: true, }, { name: "tablet not found", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tablet: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ Cell: "zone1", @@ -6554,7 +7670,7 @@ func TestRefreshState(t *testing.T) { }, { name: "RefreshState failed", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tablet: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ Cell: "zone1", @@ -6573,11 +7689,7 @@ func TestRefreshState(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - var tmc testutil.TabletManagerClient if tt.tablet != nil { testutil.AddTablet(ctx, t, tt.ts, tt.tablet, nil) @@ -6603,7 +7715,8 @@ func TestRefreshState(t *testing.T) { func TestRefreshStateByShard(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() tests := []struct { name string ts *topo.Server @@ -6615,7 +7728,7 @@ func TestRefreshStateByShard(t *testing.T) { }{ { name: "success", - ts: memorytopo.NewServer("zone1", "zone2"), + ts: memorytopo.NewServer(ctx, "zone1", "zone2"), tablets: []*topodatapb.Tablet{ { Hostname: "zone1-100", @@ -6648,7 +7761,7 @@ func TestRefreshStateByShard(t *testing.T) { }, { name: "cell filtering", - ts: memorytopo.NewServer("zone1", "zone2"), + ts: memorytopo.NewServer(ctx, "zone1", "zone2"), tablets: []*topodatapb.Tablet{ { Hostname: "zone1-100", @@ -6686,7 +7799,7 @@ func TestRefreshStateByShard(t *testing.T) { }, { name: "partial result", - ts: memorytopo.NewServer("zone1", "zone2"), + ts: memorytopo.NewServer(ctx, "zone1", "zone2"), tablets: []*topodatapb.Tablet{ { Hostname: "zone1-100", @@ -6723,13 +7836,13 @@ func TestRefreshStateByShard(t *testing.T) { }, { name: "missing keyspace argument", - ts: memorytopo.NewServer(), + ts: memorytopo.NewServer(ctx), req: &vtctldatapb.RefreshStateByShardRequest{}, shouldErr: true, }, { name: "missing shard argument", - ts: memorytopo.NewServer(), + ts: memorytopo.NewServer(ctx), req: &vtctldatapb.RefreshStateByShardRequest{ Keyspace: "ks", }, @@ -6737,7 +7850,7 @@ func TestRefreshStateByShard(t *testing.T) { }, { name: "shard not found", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tablets: []*topodatapb.Tablet{ { Alias: &topodatapb.TabletAlias{ @@ -6758,11 +7871,7 @@ func TestRefreshStateByShard(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - require.Equal(t, len(tt.tablets), len(tt.refreshStateErrors), "Invalid test case: must have one refreshStateError for each tablet") tmc := &testutil.TabletManagerClient{ @@ -6869,11 +7978,13 @@ func TestReloadSchema(t *testing.T) { }, } - ctx := context.Background() for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, "zone1") testutil.AddTablets(ctx, t, ts, nil, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { @@ -6961,13 +8072,15 @@ func TestReloadSchemaKeyspace(t *testing.T) { }, } - ctx := context.Background() for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() - ts := memorytopo.NewServer("zone1", "zone2", "zone3") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, "zone1", "zone2", "zone3") testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ AlsoSetShardPrimary: true, }, tt.tablets...) @@ -7117,13 +8230,15 @@ func TestReloadSchemaShard(t *testing.T) { }, } - ctx := context.Background() for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() - ts := memorytopo.NewServer("zone1", "zone2", "zone3") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, "zone1", "zone2", "zone3") testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ AlsoSetShardPrimary: true, }, tt.tablets...) @@ -7144,8 +8259,9 @@ func TestReloadSchemaShard(t *testing.T) { } func TestRemoveBackup(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -7334,8 +8450,9 @@ func TestRemoveKeyspaceCell(t *testing.T) { cells := []string{"zone1", "zone2", "zone3"} - ctx := context.Background() - ts, topofactory := memorytopo.NewServerAndFactory(cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts, topofactory := memorytopo.NewServerAndFactory(ctx, cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -7622,8 +8739,9 @@ func TestRemoveShardCell(t *testing.T) { cells := []string{"zone1", "zone2", "zone3"} - ctx := context.Background() - ts, topofactory := memorytopo.NewServerAndFactory(cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts, topofactory := memorytopo.NewServerAndFactory(ctx, cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -8231,8 +9349,9 @@ func TestReparentTablet(t *testing.T) { cells := []string{"zone1", "zone2", "zone3"} - ctx := context.Background() - ts, topofactory := memorytopo.NewServerAndFactory(cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts, topofactory := memorytopo.NewServerAndFactory(ctx, cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -8259,7 +9378,9 @@ func TestReparentTablet(t *testing.T) { } func TestRestoreFromBackup(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tests := []struct { name string ts *topo.Server @@ -8271,7 +9392,7 @@ func TestRestoreFromBackup(t *testing.T) { }{ { name: "ok", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ RestoreFromBackupResults: map[string]struct { Events []*logutilpb.Event @@ -8320,7 +9441,7 @@ func TestRestoreFromBackup(t *testing.T) { }, { name: "no such tablet", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ Backups: map[string]struct { Events []*logutilpb.Event @@ -8358,7 +9479,6 @@ func TestRestoreFromBackup(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { testutil.AddTablets(ctx, t, tt.ts, &testutil.AddTabletOptions{ @@ -8397,10 +9517,211 @@ func TestRestoreFromBackup(t *testing.T) { } } +func TestRetrySchemaMigration(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + tablets []*topodatapb.Tablet + tmc *testutil.TabletManagerClient + req *vtctldatapb.RetrySchemaMigrationRequest + expected *vtctldatapb.RetrySchemaMigrationResponse + shouldErr bool + }{ + { + tablets: []*topodatapb.Tablet{ + { + Keyspace: "ks", + Shard: "-80", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + { + Keyspace: "ks", + Shard: "80-", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + tmc: &testutil.TabletManagerClient{ + ExecuteQueryResults: map[string]struct { + Response *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + Response: &querypb.QueryResult{ + RowsAffected: 1, + }, + }, + "zone1-0000000200": { + Response: &querypb.QueryResult{}, + }, + }, + PrimaryPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": {}, + "zone1-0000000200": {}, + }, + ReloadSchemaResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000200": nil, + }, + }, + req: &vtctldatapb.RetrySchemaMigrationRequest{ + Keyspace: "ks", + Uuid: "abc", + }, + expected: &vtctldatapb.RetrySchemaMigrationResponse{ + RowsAffectedByShard: map[string]uint64{ + "-80": 1, + "80-": 0, + }, + }, + }, + { + name: "no shard primary", + tablets: []*topodatapb.Tablet{ + { + Keyspace: "ks", + Shard: "-80", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + { + Keyspace: "ks", + Shard: "80-", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + tmc: &testutil.TabletManagerClient{ + ExecuteQueryResults: map[string]struct { + Response *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + Response: &querypb.QueryResult{}, + }, + "zone1-0000000200": { + Response: &querypb.QueryResult{}, + }, + }, + PrimaryPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": {}, + "zone1-0000000200": {}, + }, + ReloadSchemaResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000200": nil, + }, + }, + req: &vtctldatapb.RetrySchemaMigrationRequest{ + Keyspace: "ks", + Uuid: "abc", + }, + shouldErr: true, + }, + { + name: "executeQuery failure", + tablets: []*topodatapb.Tablet{ + { + Keyspace: "ks", + Shard: "-80", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + { + Keyspace: "ks", + Shard: "80-", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + tmc: &testutil.TabletManagerClient{ + ExecuteQueryResults: map[string]struct { + Response *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + Error: assert.AnError, + }, + "zone1-0000000200": { + Response: &querypb.QueryResult{}, + }, + }, + PrimaryPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": {}, + "zone1-0000000200": {}, + }, + ReloadSchemaResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000200": nil, + }, + }, + req: &vtctldatapb.RetrySchemaMigrationRequest{ + Keyspace: "ks", + Uuid: "abc", + }, + shouldErr: true, + }, + // execute query failure + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + + testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ + AlsoSetShardPrimary: true, + }, test.tablets...) + + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, test.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + resp, err := vtctld.RetrySchemaMigration(ctx, test.req) + if test.shouldErr { + assert.Error(t, err) + return + } + + require.NoError(t, err) + utils.MustMatch(t, test.expected, resp) + }) + } +} + func TestRunHealthCheck(t *testing.T) { t.Parallel() - ctx := context.Background() tests := []struct { name string tablets []*topodatapb.Tablet @@ -8483,7 +9804,10 @@ func TestRunHealthCheck(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, "zone1") testutil.AddTablets(ctx, t, ts, nil, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { @@ -8555,14 +9879,15 @@ func TestSetKeyspaceDurabilityPolicy(t *testing.T) { }, } - ctx := context.Background() - for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, "zone1") testutil.AddKeyspaces(ctx, t, ts, tt.keyspaces...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { @@ -8583,11 +9908,14 @@ func TestSetKeyspaceDurabilityPolicy(t *testing.T) { func TestSetShardIsPrimaryServing(t *testing.T) { t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + type testcase struct { name string ctx context.Context ts *topo.Server - setup func(*testing.T, *testcase) + setup func(*testing.T, *testcase) context.Context teardown func(*testing.T, *testcase) req *vtctldatapb.SetShardIsPrimaryServingRequest expected *vtctldatapb.SetShardIsPrimaryServingResponse @@ -8597,14 +9925,15 @@ func TestSetShardIsPrimaryServing(t *testing.T) { tests := []*testcase{ { name: "ok", - setup: func(t *testing.T, tt *testcase) { - tt.ctx = context.Background() - tt.ts = memorytopo.NewServer("zone1") + setup: func(t *testing.T, tt *testcase) context.Context { + tt.ctx = ctx + tt.ts = memorytopo.NewServer(ctx, "zone1") testutil.AddShards(tt.ctx, t, tt.ts, &vtctldatapb.Shard{ Keyspace: "testkeyspace", Name: "-", Shard: &topodatapb.Shard{}, }) + return tt.ctx }, req: &vtctldatapb.SetShardIsPrimaryServingRequest{ Keyspace: "testkeyspace", @@ -8619,17 +9948,17 @@ func TestSetShardIsPrimaryServing(t *testing.T) { }, { name: "lock error", - setup: func(t *testing.T, tt *testcase) { + setup: func(t *testing.T, tt *testcase) context.Context { var cancel func() - tt.ctx, cancel = context.WithTimeout(context.Background(), time.Millisecond*50) - tt.ts = memorytopo.NewServer("zone1") + tt.ctx, cancel = context.WithTimeout(ctx, time.Millisecond*50) + tt.ts = memorytopo.NewServer(ctx, "zone1") testutil.AddShards(tt.ctx, t, tt.ts, &vtctldatapb.Shard{ Keyspace: "testkeyspace", Name: "-", Shard: &topodatapb.Shard{}, }) - _, unlock, err := tt.ts.LockKeyspace(tt.ctx, "testkeyspace", "test lock") + lctx, unlock, err := tt.ts.LockKeyspace(tt.ctx, "testkeyspace", "test lock") require.NoError(t, err) tt.teardown = func(t *testing.T, tt *testcase) { var err error @@ -8637,6 +9966,7 @@ func TestSetShardIsPrimaryServing(t *testing.T) { assert.NoError(t, err) cancel() } + return lctx }, req: &vtctldatapb.SetShardIsPrimaryServingRequest{ Keyspace: "testkeyspace", @@ -8649,13 +9979,9 @@ func TestSetShardIsPrimaryServing(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - if tt.setup != nil { - tt.setup(t, tt) + tt.ctx = tt.setup(t, tt) } if tt.teardown != nil { defer tt.teardown(t, tt) @@ -8679,6 +10005,9 @@ func TestSetShardIsPrimaryServing(t *testing.T) { func TestSetShardTabletControl(t *testing.T) { t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + type testcase struct { name string ctx context.Context @@ -8694,8 +10023,8 @@ func TestSetShardTabletControl(t *testing.T) { { name: "ok", setup: func(t *testing.T, tt *testcase) { - tt.ctx = context.Background() - tt.ts = memorytopo.NewServer("zone1", "zone2", "zone3") + tt.ctx = ctx + tt.ts = memorytopo.NewServer(ctx, "zone1", "zone2", "zone3") testutil.AddShards(tt.ctx, t, tt.ts, &vtctldatapb.Shard{ Keyspace: "testkeyspace", @@ -8743,8 +10072,8 @@ func TestSetShardTabletControl(t *testing.T) { { name: "remove tabletcontrols", setup: func(t *testing.T, tt *testcase) { - tt.ctx = context.Background() - tt.ts = memorytopo.NewServer("zone1", "zone2", "zone3") + tt.ctx = ctx + tt.ts = memorytopo.NewServer(ctx, "zone1", "zone2", "zone3") testutil.AddShards(tt.ctx, t, tt.ts, &vtctldatapb.Shard{ Keyspace: "testkeyspace", @@ -8778,8 +10107,8 @@ func TestSetShardTabletControl(t *testing.T) { { name: "disable queryservice", setup: func(t *testing.T, tt *testcase) { - tt.ctx = context.Background() - tt.ts = memorytopo.NewServer("zone1", "zone2", "zone3") + tt.ctx = ctx + tt.ts = memorytopo.NewServer(ctx, "zone1", "zone2", "zone3") testutil.AddShards(tt.ctx, t, tt.ts, &vtctldatapb.Shard{ Keyspace: "testkeyspace", @@ -8873,8 +10202,8 @@ func TestSetShardTabletControl(t *testing.T) { name: "keyspace lock error", setup: func(t *testing.T, tt *testcase) { var cancel func() - tt.ctx, cancel = context.WithTimeout(context.Background(), time.Millisecond*50) - tt.ts = memorytopo.NewServer("zone1") + tt.ctx, cancel = context.WithTimeout(ctx, time.Millisecond*50) + tt.ts = memorytopo.NewServer(ctx, "zone1") testutil.AddShards(tt.ctx, t, tt.ts, &vtctldatapb.Shard{ Keyspace: "testkeyspace", Name: "-", @@ -8900,10 +10229,7 @@ func TestSetShardTabletControl(t *testing.T) { }, } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() - if tt.setup != nil { tt.setup(t, tt) } @@ -9103,13 +10429,15 @@ func TestSetWritable(t *testing.T) { }, } - ctx := context.Background() for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() - ts := memorytopo.NewServer(tt.cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, tt.cells...) defer ts.Close() testutil.AddTablets(ctx, t, ts, nil, tt.tablets...) @@ -9131,8 +10459,9 @@ func TestSetWritable(t *testing.T) { func TestShardReplicationAdd(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -9185,6 +10514,9 @@ func TestShardReplicationAdd(t *testing.T) { func TestShardReplicationPositions(t *testing.T) { t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tests := []struct { name string ts *topo.Server @@ -9197,7 +10529,7 @@ func TestShardReplicationPositions(t *testing.T) { }{ { name: "success", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tablets: []*topodatapb.Tablet{ { Alias: &topodatapb.TabletAlias{ @@ -9276,7 +10608,7 @@ func TestShardReplicationPositions(t *testing.T) { }, { name: "timeouts are nonfatal", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tablets: []*topodatapb.Tablet{ { Alias: &topodatapb.TabletAlias{ @@ -9358,7 +10690,7 @@ func TestShardReplicationPositions(t *testing.T) { }, { name: "other rpc errors are fatal", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tablets: []*topodatapb.Tablet{ { Alias: &topodatapb.TabletAlias{ @@ -9408,7 +10740,7 @@ func TestShardReplicationPositions(t *testing.T) { }, { name: "nonexistent shard", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), req: &vtctldatapb.ShardReplicationPositionsRequest{ Keyspace: "testkeyspace", Shard: "-", @@ -9419,13 +10751,7 @@ func TestShardReplicationPositions(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - ctx := context.Background() - testutil.AddTablets(ctx, t, tt.ts, &testutil.AddTabletOptions{ AlsoSetShardPrimary: true, SkipShardCreation: false, @@ -9435,14 +10761,14 @@ func TestShardReplicationPositions(t *testing.T) { return NewVtctldServer(ts) }) + requestCtx := ctx if tt.ctxTimeout > 0 { - _ctx, cancel := context.WithTimeout(ctx, tt.ctxTimeout) - defer cancel() - - ctx = _ctx + var requestCancel func() + requestCtx, requestCancel = context.WithTimeout(ctx, tt.ctxTimeout) + defer requestCancel() } - resp, err := vtctld.ShardReplicationPositions(ctx, tt.req) + resp, err := vtctld.ShardReplicationPositions(requestCtx, tt.req) if tt.shouldErr { assert.Error(t, err) @@ -9458,8 +10784,10 @@ func TestShardReplicationPositions(t *testing.T) { func TestShardReplicationRemove(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -9617,8 +10945,9 @@ func TestSourceShardAdd(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -9751,8 +11080,9 @@ func TestSourceShardDelete(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -9931,13 +11261,15 @@ func TestStartReplication(t *testing.T) { }, } - ctx := context.Background() for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() - ts := memorytopo.NewServer(tt.cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, tt.cells...) defer ts.Close() testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ @@ -10068,13 +11400,15 @@ func TestStopReplication(t *testing.T) { }, } - ctx := context.Background() for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() - ts := memorytopo.NewServer(tt.cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, tt.cells...) defer ts.Close() testutil.AddTablets(ctx, t, ts, nil, tt.tablets...) @@ -10459,8 +11793,9 @@ func TestTabletExternallyReparented(t *testing.T) { cells := []string{"zone1", "zone2", "zone3"} - ctx := context.Background() - ts, topofactory := memorytopo.NewServerAndFactory(cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts, topofactory := memorytopo.NewServerAndFactory(ctx, cells...) tmc := testutil.TabletManagerClient{ TopoServer: ts, } @@ -10513,7 +11848,6 @@ func TestTabletExternallyReparented(t *testing.T) { func TestUpdateCellInfo(t *testing.T) { t.Parallel() - ctx := context.Background() tests := []struct { name string cells map[string]*topodatapb.CellInfo @@ -10637,7 +11971,10 @@ func TestUpdateCellInfo(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ts, factory := memorytopo.NewServerAndFactory() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts, factory := memorytopo.NewServerAndFactory(ctx) for name, cell := range tt.cells { err := ts.CreateCellInfo(ctx, name, cell) require.NoError(t, err, "failed to create cell %s: %+v for test", name, cell) @@ -10665,7 +12002,6 @@ func TestUpdateCellInfo(t *testing.T) { func TestUpdateCellsAlias(t *testing.T) { t.Parallel() - ctx := context.Background() tests := []struct { name string cells []string @@ -10776,7 +12112,10 @@ func TestUpdateCellsAlias(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ts := memorytopo.NewServer(tt.cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, tt.cells...) for name, cells := range tt.aliases { for _, cell := range cells { // We use UpdateCellInfoFields rather than CreateCellInfo @@ -10813,8 +12152,9 @@ func TestUpdateCellsAlias(t *testing.T) { func TestValidate(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("zone1", "zone2", "zone3") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1", "zone2", "zone3") tablets := []*topodatapb.Tablet{ { Keyspace: "ks1", @@ -10928,8 +12268,9 @@ func TestValidate(t *testing.T) { } func TestValidateSchemaKeyspace(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("zone1", "zone2", "zone3") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1", "zone2", "zone3") tmc := testutil.TabletManagerClient{ GetSchemaResults: map[string]struct { Schema *tabletmanagerdatapb.SchemaDefinition @@ -11155,8 +12496,9 @@ func TestValidateSchemaKeyspace(t *testing.T) { } func TestValidateVersionKeyspace(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("zone1", "zone2") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1", "zone2") tmc := testutil.TabletManagerClient{ GetSchemaResults: map[string]struct { Schema *tabletmanagerdatapb.SchemaDefinition @@ -11274,8 +12616,9 @@ func TestValidateVersionKeyspace(t *testing.T) { func TestValidateVersionShard(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("zone1", "zone2") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1", "zone2") tmc := testutil.TabletManagerClient{ GetSchemaResults: map[string]struct { Schema *tabletmanagerdatapb.SchemaDefinition @@ -11394,11 +12737,12 @@ func TestValidateShard(t *testing.T) { shouldErr bool } - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() tests := []*testcase{ { name: "ok", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: nil, setup: func(t *testing.T, tt *testcase) { tablets := []*topodatapb.Tablet{ @@ -11435,7 +12779,7 @@ func TestValidateShard(t *testing.T) { }, { name: "no shard", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: nil, setup: func(t *testing.T, tt *testcase) { tablets := []*topodatapb.Tablet{ @@ -11476,7 +12820,7 @@ func TestValidateShard(t *testing.T) { }, { name: "no primary in shard", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: nil, setup: func(t *testing.T, tt *testcase) { tablets := []*topodatapb.Tablet{ @@ -11503,7 +12847,7 @@ func TestValidateShard(t *testing.T) { }, { name: "two primaries in shard", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: nil, setup: func(t *testing.T, tt *testcase) { tablets := []*topodatapb.Tablet{ @@ -11546,7 +12890,7 @@ func TestValidateShard(t *testing.T) { }, { name: "ping_tablets/ok", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ GetReplicasResults: map[string]struct { Replicas []string @@ -11615,7 +12959,7 @@ func TestValidateShard(t *testing.T) { }, { name: "ping_tablets/GetReplicas failed", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ GetReplicasResults: map[string]struct { Replicas []string @@ -11686,7 +13030,7 @@ func TestValidateShard(t *testing.T) { }, { name: "ping_tablets/no replicas", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ GetReplicasResults: map[string]struct { Replicas []string @@ -11757,7 +13101,7 @@ func TestValidateShard(t *testing.T) { }, { name: "ping_tablets/orphaned replica", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ GetReplicasResults: map[string]struct { Replicas []string @@ -11831,7 +13175,7 @@ func TestValidateShard(t *testing.T) { }, { name: "ping_tablets/Ping failed", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ GetReplicasResults: map[string]struct { Replicas []string @@ -11903,10 +13247,7 @@ func TestValidateShard(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() - if tt.setup != nil { tt.setup(t, tt) } diff --git a/go/vt/vtctl/grpcvtctldserver/testutil/proto_compare.go b/go/vt/vtctl/grpcvtctldserver/testutil/proto_compare.go index 45692e70114..20ad0f692b0 100644 --- a/go/vt/vtctl/grpcvtctldserver/testutil/proto_compare.go +++ b/go/vt/vtctl/grpcvtctldserver/testutil/proto_compare.go @@ -23,10 +23,8 @@ import ( "testing" "github.com/stretchr/testify/assert" - "google.golang.org/protobuf/proto" "vitess.io/vitess/go/test/utils" - logutilpb "vitess.io/vitess/go/vt/proto/logutil" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" @@ -37,11 +35,9 @@ import ( // respective Events field in the comparison. func AssertEmergencyReparentShardResponsesEqual(t *testing.T, expected *vtctldatapb.EmergencyReparentShardResponse, actual *vtctldatapb.EmergencyReparentShardResponse, msgAndArgs ...any) { t.Helper() - - expected = proto.Clone(expected).(*vtctldatapb.EmergencyReparentShardResponse) + expected = expected.CloneVT() expected.Events = nil - - actual = proto.Clone(actual).(*vtctldatapb.EmergencyReparentShardResponse) + actual = actual.CloneVT() actual.Events = nil utils.MustMatch(t, expected, actual) @@ -104,11 +100,9 @@ func clearEvents(events []*logutilpb.Event, f func(*logutilpb.Event) *logutilpb. // respective Events field in the comparison. func AssertPlannedReparentShardResponsesEqual(t *testing.T, expected *vtctldatapb.PlannedReparentShardResponse, actual *vtctldatapb.PlannedReparentShardResponse) { t.Helper() - - expected = proto.Clone(expected).(*vtctldatapb.PlannedReparentShardResponse) + expected = expected.CloneVT() expected.Events = nil - - actual = proto.Clone(actual).(*vtctldatapb.PlannedReparentShardResponse) + actual = actual.CloneVT() actual.Events = nil utils.MustMatch(t, expected, actual) diff --git a/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go b/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go index 020eed4bd81..ba7c8477d22 100644 --- a/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go +++ b/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go @@ -214,6 +214,13 @@ type TabletManagerClient struct { Response *hk.HookResult Error error } + // keyed by tablet alias. + ExecuteQueryDelays map[string]time.Duration + // keyed by tablet alias. + ExecuteQueryResults map[string]struct { + Response *querypb.QueryResult + Error error + } // FullStatus result FullStatusResult *replicationdatapb.FullStatus // keyed by tablet alias. @@ -281,6 +288,11 @@ type TabletManagerClient struct { Position *replicationdatapb.Status Error error } + PrimaryStatusDelays map[string]time.Duration + PrimaryStatusResults map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + } RestoreFromBackupResults map[string]struct { Events []*logutilpb.Event EventInterval time.Duration @@ -343,6 +355,10 @@ type TabletManagerClient struct { // WaitForPosition(tablet *topodatapb.Tablet, position string) error, so we // key by tablet alias and then by position. WaitForPositionResults map[string]map[string]error + // tablet alias => duration + CheckThrottlerDelays map[string]time.Duration + // keyed by tablet alias + CheckThrottlerResults map[string]*tabletmanagerdatapb.CheckThrottlerResponse } type backupStreamAdapter struct { @@ -559,6 +575,30 @@ func (fake *TabletManagerClient) ExecuteHook(ctx context.Context, tablet *topoda return nil, fmt.Errorf("%w: no ExecuteHook result set for tablet %s", assert.AnError, key) } +// ExecuteQuery is part of the tmclient.TabletManagerClient interface. +func (fake *TabletManagerClient) ExecuteQuery(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.ExecuteQueryRequest) (*querypb.QueryResult, error) { + if fake.ExecuteQueryResults == nil { + return nil, fmt.Errorf("%w: no ExecuteQuery results on fake TabletManagerClient", assert.AnError) + } + + key := topoproto.TabletAliasString(tablet.Alias) + if fake.ExecuteQueryDelays != nil { + if delay, ok := fake.ExecuteQueryDelays[key]; ok { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(delay): + // proceed to results + } + } + } + if result, ok := fake.ExecuteQueryResults[key]; ok { + return result.Response, result.Error + } + + return nil, fmt.Errorf("%w: no ExecuteQuery result set for tablet %s", assert.AnError, key) +} + // FullStatus is part of the tmclient.TabletManagerClient interface. func (fake *TabletManagerClient) FullStatus(ctx context.Context, tablet *topodatapb.Tablet) (*replicationdatapb.FullStatus, error) { if fake.FullStatusResult != nil { @@ -572,7 +612,7 @@ func (fake *TabletManagerClient) FullStatus(ctx context.Context, tablet *topodat return nil, fmt.Errorf("no output set for FullStatus") } -// GetPermission is part of the tmclient.TabletManagerClient interface. +// GetPermissions is part of the tmclient.TabletManagerClient interface. func (fake *TabletManagerClient) GetPermissions(ctx context.Context, tablet *topodatapb.Tablet) (*tabletmanagerdatapb.Permissions, error) { if fake.GetPermissionsResults == nil { return nil, assert.AnError @@ -870,6 +910,32 @@ func (fake *TabletManagerClient) ReplicationStatus(ctx context.Context, tablet * return nil, assert.AnError } +// PrimaryStatus is part of the tmclient.TabletManagerClient interface. +func (fake *TabletManagerClient) PrimaryStatus(ctx context.Context, tablet *topodatapb.Tablet) (*replicationdatapb.PrimaryStatus, error) { + if fake.PrimaryStatusResults == nil { + return nil, assert.AnError + } + + key := topoproto.TabletAliasString(tablet.Alias) + + if fake.PrimaryStatusDelays != nil { + if delay, ok := fake.PrimaryStatusDelays[key]; ok { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(delay): + // proceed to results + } + } + } + + if result, ok := fake.PrimaryStatusResults[key]; ok { + return result.Status, result.Error + } + + return nil, assert.AnError +} + type backupRestoreStreamAdapter struct { *grpcshim.BidiStream ch chan *logutilpb.Event @@ -1326,3 +1392,33 @@ func (fake *TabletManagerClient) VReplicationExec(ctx context.Context, tablet *t return nil, assert.AnError } + +// CheckThrottler is part of the tmclient.TabletManagerCLient interface. +func (fake *TabletManagerClient) CheckThrottler(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.CheckThrottlerRequest) (*tabletmanagerdatapb.CheckThrottlerResponse, error) { + if fake.CheckThrottlerResults == nil { + return nil, assert.AnError + } + + if tablet.Alias == nil { + return nil, assert.AnError + } + + key := topoproto.TabletAliasString(tablet.Alias) + + if fake.CheckThrottlerDelays != nil { + if delay, ok := fake.CheckThrottlerDelays[key]; ok { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(delay): + // proceed to results + } + } + } + + if resultsForTablet, ok := fake.CheckThrottlerResults[key]; ok { + return resultsForTablet, nil + } + + return nil, assert.AnError +} diff --git a/go/vt/vtctl/grpcvtctldserver/testutil/util.go b/go/vt/vtctl/grpcvtctldserver/testutil/util.go index 2b18d0bce68..97638e9c41e 100644 --- a/go/vt/vtctl/grpcvtctldserver/testutil/util.go +++ b/go/vt/vtctl/grpcvtctldserver/testutil/util.go @@ -24,8 +24,6 @@ import ( "fmt" "testing" - "google.golang.org/protobuf/proto" - "github.com/stretchr/testify/require" "golang.org/x/net/nettest" "google.golang.org/grpc" @@ -106,7 +104,7 @@ func WithTestServers( // could not be added. It shallow copies the proto struct to prevent XXX_ fields // from changing in the marshalling. func AddKeyspace(ctx context.Context, t *testing.T, ts *topo.Server, ks *vtctldatapb.Keyspace) { - err := ts.CreateKeyspace(ctx, ks.Name, proto.Clone(ks.Keyspace).(*topodatapb.Keyspace)) + err := ts.CreateKeyspace(ctx, ks.Name, ks.Keyspace.CloneVT()) require.NoError(t, err) } @@ -149,7 +147,8 @@ type AddTabletOptions struct { // shard to serving. If that shard record already has a serving primary, then // AddTablet will fail the test. func AddTablet(ctx context.Context, t *testing.T, ts *topo.Server, tablet *topodatapb.Tablet, opts *AddTabletOptions) { - tablet = proto.Clone(tablet).(*topodatapb.Tablet) + t.Helper() + tablet = tablet.CloneVT() if opts == nil { opts = &AddTabletOptions{} } @@ -200,6 +199,7 @@ func AddTablet(ctx context.Context, t *testing.T, ts *topo.Server, tablet *topod // AddTablets adds a list of tablets to the topology. See AddTablet for more // details. func AddTablets(ctx context.Context, t *testing.T, ts *topo.Server, opts *AddTabletOptions, tablets ...*topodatapb.Tablet) { + t.Helper() for _, tablet := range tablets { AddTablet(ctx, t, ts, tablet, opts) } diff --git a/go/vt/vtctl/localvtctldclient/client_gen.go b/go/vt/vtctl/localvtctldclient/client_gen.go index e516bb3063c..198fc12908f 100644 --- a/go/vt/vtctl/localvtctldclient/client_gen.go +++ b/go/vt/vtctl/localvtctldclient/client_gen.go @@ -161,11 +161,26 @@ func (client *localVtctldClient) BackupShard(ctx context.Context, in *vtctldatap return stream, nil } +// CancelSchemaMigration is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) CancelSchemaMigration(ctx context.Context, in *vtctldatapb.CancelSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldatapb.CancelSchemaMigrationResponse, error) { + return client.s.CancelSchemaMigration(ctx, in) +} + // ChangeTabletType is part of the vtctlservicepb.VtctldClient interface. func (client *localVtctldClient) ChangeTabletType(ctx context.Context, in *vtctldatapb.ChangeTabletTypeRequest, opts ...grpc.CallOption) (*vtctldatapb.ChangeTabletTypeResponse, error) { return client.s.ChangeTabletType(ctx, in) } +// CleanupSchemaMigration is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) CleanupSchemaMigration(ctx context.Context, in *vtctldatapb.CleanupSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldatapb.CleanupSchemaMigrationResponse, error) { + return client.s.CleanupSchemaMigration(ctx, in) +} + +// CompleteSchemaMigration is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) CompleteSchemaMigration(ctx context.Context, in *vtctldatapb.CompleteSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldatapb.CompleteSchemaMigrationResponse, error) { + return client.s.CompleteSchemaMigration(ctx, in) +} + // CreateKeyspace is part of the vtctlservicepb.VtctldClient interface. func (client *localVtctldClient) CreateKeyspace(ctx context.Context, in *vtctldatapb.CreateKeyspaceRequest, opts ...grpc.CallOption) (*vtctldatapb.CreateKeyspaceResponse, error) { return client.s.CreateKeyspace(ctx, in) @@ -281,6 +296,11 @@ func (client *localVtctldClient) GetSchema(ctx context.Context, in *vtctldatapb. return client.s.GetSchema(ctx, in) } +// GetSchemaMigrations is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) GetSchemaMigrations(ctx context.Context, in *vtctldatapb.GetSchemaMigrationsRequest, opts ...grpc.CallOption) (*vtctldatapb.GetSchemaMigrationsResponse, error) { + return client.s.GetSchemaMigrations(ctx, in) +} + // GetShard is part of the vtctlservicepb.VtctldClient interface. func (client *localVtctldClient) GetShard(ctx context.Context, in *vtctldatapb.GetShardRequest, opts ...grpc.CallOption) (*vtctldatapb.GetShardResponse, error) { return client.s.GetShard(ctx, in) @@ -346,6 +366,61 @@ func (client *localVtctldClient) InitShardPrimary(ctx context.Context, in *vtctl return client.s.InitShardPrimary(ctx, in) } +// LaunchSchemaMigration is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) LaunchSchemaMigration(ctx context.Context, in *vtctldatapb.LaunchSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldatapb.LaunchSchemaMigrationResponse, error) { + return client.s.LaunchSchemaMigration(ctx, in) +} + +// LookupVindexCreate is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) LookupVindexCreate(ctx context.Context, in *vtctldatapb.LookupVindexCreateRequest, opts ...grpc.CallOption) (*vtctldatapb.LookupVindexCreateResponse, error) { + return client.s.LookupVindexCreate(ctx, in) +} + +// LookupVindexExternalize is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) LookupVindexExternalize(ctx context.Context, in *vtctldatapb.LookupVindexExternalizeRequest, opts ...grpc.CallOption) (*vtctldatapb.LookupVindexExternalizeResponse, error) { + return client.s.LookupVindexExternalize(ctx, in) +} + +// MaterializeCreate is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) MaterializeCreate(ctx context.Context, in *vtctldatapb.MaterializeCreateRequest, opts ...grpc.CallOption) (*vtctldatapb.MaterializeCreateResponse, error) { + return client.s.MaterializeCreate(ctx, in) +} + +// MigrateCreate is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) MigrateCreate(ctx context.Context, in *vtctldatapb.MigrateCreateRequest, opts ...grpc.CallOption) (*vtctldatapb.WorkflowStatusResponse, error) { + return client.s.MigrateCreate(ctx, in) +} + +// MountList is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) MountList(ctx context.Context, in *vtctldatapb.MountListRequest, opts ...grpc.CallOption) (*vtctldatapb.MountListResponse, error) { + return client.s.MountList(ctx, in) +} + +// MountRegister is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) MountRegister(ctx context.Context, in *vtctldatapb.MountRegisterRequest, opts ...grpc.CallOption) (*vtctldatapb.MountRegisterResponse, error) { + return client.s.MountRegister(ctx, in) +} + +// MountShow is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) MountShow(ctx context.Context, in *vtctldatapb.MountShowRequest, opts ...grpc.CallOption) (*vtctldatapb.MountShowResponse, error) { + return client.s.MountShow(ctx, in) +} + +// MountUnregister is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) MountUnregister(ctx context.Context, in *vtctldatapb.MountUnregisterRequest, opts ...grpc.CallOption) (*vtctldatapb.MountUnregisterResponse, error) { + return client.s.MountUnregister(ctx, in) +} + +// MoveTablesComplete is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) MoveTablesComplete(ctx context.Context, in *vtctldatapb.MoveTablesCompleteRequest, opts ...grpc.CallOption) (*vtctldatapb.MoveTablesCompleteResponse, error) { + return client.s.MoveTablesComplete(ctx, in) +} + +// MoveTablesCreate is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) MoveTablesCreate(ctx context.Context, in *vtctldatapb.MoveTablesCreateRequest, opts ...grpc.CallOption) (*vtctldatapb.WorkflowStatusResponse, error) { + return client.s.MoveTablesCreate(ctx, in) +} + // PingTablet is part of the vtctlservicepb.VtctldClient interface. func (client *localVtctldClient) PingTablet(ctx context.Context, in *vtctldatapb.PingTabletRequest, opts ...grpc.CallOption) (*vtctldatapb.PingTabletResponse, error) { return client.s.PingTablet(ctx, in) @@ -411,6 +486,11 @@ func (client *localVtctldClient) ReparentTablet(ctx context.Context, in *vtctlda return client.s.ReparentTablet(ctx, in) } +// ReshardCreate is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) ReshardCreate(ctx context.Context, in *vtctldatapb.ReshardCreateRequest, opts ...grpc.CallOption) (*vtctldatapb.WorkflowStatusResponse, error) { + return client.s.ReshardCreate(ctx, in) +} + type restoreFromBackupStreamAdapter struct { *grpcshim.BidiStream ch chan *vtctldatapb.RestoreFromBackupResponse @@ -462,6 +542,11 @@ func (client *localVtctldClient) RestoreFromBackup(ctx context.Context, in *vtct return stream, nil } +// RetrySchemaMigration is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) RetrySchemaMigration(ctx context.Context, in *vtctldatapb.RetrySchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldatapb.RetrySchemaMigrationResponse, error) { + return client.s.RetrySchemaMigration(ctx, in) +} + // RunHealthCheck is part of the vtctlservicepb.VtctldClient interface. func (client *localVtctldClient) RunHealthCheck(ctx context.Context, in *vtctldatapb.RunHealthCheckRequest, opts ...grpc.CallOption) (*vtctldatapb.RunHealthCheckResponse, error) { return client.s.RunHealthCheck(ctx, in) @@ -552,6 +637,31 @@ func (client *localVtctldClient) UpdateThrottlerConfig(ctx context.Context, in * return client.s.UpdateThrottlerConfig(ctx, in) } +// VDiffCreate is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) VDiffCreate(ctx context.Context, in *vtctldatapb.VDiffCreateRequest, opts ...grpc.CallOption) (*vtctldatapb.VDiffCreateResponse, error) { + return client.s.VDiffCreate(ctx, in) +} + +// VDiffDelete is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) VDiffDelete(ctx context.Context, in *vtctldatapb.VDiffDeleteRequest, opts ...grpc.CallOption) (*vtctldatapb.VDiffDeleteResponse, error) { + return client.s.VDiffDelete(ctx, in) +} + +// VDiffResume is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) VDiffResume(ctx context.Context, in *vtctldatapb.VDiffResumeRequest, opts ...grpc.CallOption) (*vtctldatapb.VDiffResumeResponse, error) { + return client.s.VDiffResume(ctx, in) +} + +// VDiffShow is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) VDiffShow(ctx context.Context, in *vtctldatapb.VDiffShowRequest, opts ...grpc.CallOption) (*vtctldatapb.VDiffShowResponse, error) { + return client.s.VDiffShow(ctx, in) +} + +// VDiffStop is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) VDiffStop(ctx context.Context, in *vtctldatapb.VDiffStopRequest, opts ...grpc.CallOption) (*vtctldatapb.VDiffStopResponse, error) { + return client.s.VDiffStop(ctx, in) +} + // Validate is part of the vtctlservicepb.VtctldClient interface. func (client *localVtctldClient) Validate(ctx context.Context, in *vtctldatapb.ValidateRequest, opts ...grpc.CallOption) (*vtctldatapb.ValidateResponse, error) { return client.s.Validate(ctx, in) @@ -587,6 +697,21 @@ func (client *localVtctldClient) ValidateVersionShard(ctx context.Context, in *v return client.s.ValidateVersionShard(ctx, in) } +// WorkflowDelete is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) WorkflowDelete(ctx context.Context, in *vtctldatapb.WorkflowDeleteRequest, opts ...grpc.CallOption) (*vtctldatapb.WorkflowDeleteResponse, error) { + return client.s.WorkflowDelete(ctx, in) +} + +// WorkflowStatus is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) WorkflowStatus(ctx context.Context, in *vtctldatapb.WorkflowStatusRequest, opts ...grpc.CallOption) (*vtctldatapb.WorkflowStatusResponse, error) { + return client.s.WorkflowStatus(ctx, in) +} + +// WorkflowSwitchTraffic is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) WorkflowSwitchTraffic(ctx context.Context, in *vtctldatapb.WorkflowSwitchTrafficRequest, opts ...grpc.CallOption) (*vtctldatapb.WorkflowSwitchTrafficResponse, error) { + return client.s.WorkflowSwitchTraffic(ctx, in) +} + // WorkflowUpdate is part of the vtctlservicepb.VtctldClient interface. func (client *localVtctldClient) WorkflowUpdate(ctx context.Context, in *vtctldatapb.WorkflowUpdateRequest, opts ...grpc.CallOption) (*vtctldatapb.WorkflowUpdateResponse, error) { return client.s.WorkflowUpdate(ctx, in) diff --git a/go/vt/vtctl/plugin_kubernetestopo.go b/go/vt/vtctl/plugin_kubernetestopo.go deleted file mode 100644 index 271633fc2bc..00000000000 --- a/go/vt/vtctl/plugin_kubernetestopo.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreedto in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package vtctl - -import ( - // Imports k8stopo to register the kubernetes implementation of - // TopoServer. - _ "vitess.io/vitess/go/vt/topo/k8stopo" -) diff --git a/go/vt/vtctl/reparent.go b/go/vt/vtctl/reparent.go index 43844eb9388..7ed0f6582b9 100644 --- a/go/vt/vtctl/reparent.go +++ b/go/vt/vtctl/reparent.go @@ -162,6 +162,7 @@ func commandEmergencyReparentShard(ctx context.Context, wr *wrangler.Wrangler, s newPrimary := subFlags.String("new_primary", "", "optional alias of a tablet that should be the new primary. If not specified, Vitess will select the best candidate") preventCrossCellPromotion := subFlags.Bool("prevent_cross_cell_promotion", false, "only promotes a new primary from the same cell as the previous primary") ignoreReplicasList := subFlags.String("ignore_replicas", "", "comma-separated list of replica tablet aliases to ignore during emergency reparent") + waitForAllTablets := subFlags.Bool("wait_for_all_tablets", false, "should ERS wait for all the tablets to respond. Useful when all the tablets are reachable") if err := subFlags.Parse(args); err != nil { return err @@ -189,7 +190,7 @@ func commandEmergencyReparentShard(ctx context.Context, wr *wrangler.Wrangler, s } } unreachableReplicas := topoproto.ParseTabletSet(*ignoreReplicasList) - return wr.EmergencyReparentShard(ctx, keyspace, shard, tabletAlias, *waitReplicasTimeout, unreachableReplicas, *preventCrossCellPromotion) + return wr.EmergencyReparentShard(ctx, keyspace, shard, tabletAlias, *waitReplicasTimeout, unreachableReplicas, *preventCrossCellPromotion, *waitForAllTablets) } func commandTabletExternallyReparented(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error { diff --git a/go/vt/vtctl/reparentutil/durability.go b/go/vt/vtctl/reparentutil/durability.go index 735965c3afa..e68485a395c 100644 --- a/go/vt/vtctl/reparentutil/durability.go +++ b/go/vt/vtctl/reparentutil/durability.go @@ -43,10 +43,24 @@ func init() { return &durabilityNone{} }) RegisterDurability("semi_sync", func() Durabler { - return &durabilitySemiSync{} + return &durabilitySemiSync{ + rdonlySemiSync: false, + } }) RegisterDurability("cross_cell", func() Durabler { - return &durabilityCrossCell{} + return &durabilityCrossCell{ + rdonlySemiSync: false, + } + }) + RegisterDurability("semi_sync_with_rdonly_ack", func() Durabler { + return &durabilitySemiSync{ + rdonlySemiSync: true, + } + }) + RegisterDurability("cross_cell_with_rdonly_ack", func() Durabler { + return &durabilityCrossCell{ + rdonlySemiSync: true, + } }) RegisterDurability("test", func() Durabler { return &durabilityTest{} @@ -141,7 +155,9 @@ func (d *durabilityNone) isReplicaSemiSync(primary, replica *topodatapb.Tablet) // durabilitySemiSync has 1 semi-sync setup. It only allows Primary and Replica type servers to acknowledge semi sync // It returns NeutralPromoteRule for Primary and Replica tablet types, MustNotPromoteRule for everything else -type durabilitySemiSync struct{} +type durabilitySemiSync struct { + rdonlySemiSync bool +} // promotionRule implements the Durabler interface func (d *durabilitySemiSync) promotionRule(tablet *topodatapb.Tablet) promotionrule.CandidatePromotionRule { @@ -162,6 +178,8 @@ func (d *durabilitySemiSync) isReplicaSemiSync(primary, replica *topodatapb.Tabl switch replica.Type { case topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA: return true + case topodatapb.TabletType_RDONLY: + return d.rdonlySemiSync } return false } @@ -171,7 +189,9 @@ func (d *durabilitySemiSync) isReplicaSemiSync(primary, replica *topodatapb.Tabl // durabilityCrossCell has 1 semi-sync setup. It only allows Primary and Replica type servers from a different cell to acknowledge semi sync. // This means that a transaction must be in two cells for it to be acknowledged // It returns NeutralPromoteRule for Primary and Replica tablet types, MustNotPromoteRule for everything else -type durabilityCrossCell struct{} +type durabilityCrossCell struct { + rdonlySemiSync bool +} // promotionRule implements the Durabler interface func (d *durabilityCrossCell) promotionRule(tablet *topodatapb.Tablet) promotionrule.CandidatePromotionRule { @@ -192,6 +212,8 @@ func (d *durabilityCrossCell) isReplicaSemiSync(primary, replica *topodatapb.Tab switch replica.Type { case topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA: return primary.Alias.Cell != replica.Alias.Cell + case topodatapb.TabletType_RDONLY: + return d.rdonlySemiSync && primary.Alias.Cell != replica.Alias.Cell } return false } diff --git a/go/vt/vtctl/reparentutil/durability_test.go b/go/vt/vtctl/reparentutil/durability_test.go index 857718174c5..f1429b29621 100644 --- a/go/vt/vtctl/reparentutil/durability_test.go +++ b/go/vt/vtctl/reparentutil/durability_test.go @@ -73,146 +73,204 @@ func TestDurabilityNone(t *testing.T) { } func TestDurabilitySemiSync(t *testing.T) { - durability, err := GetDurabilityPolicy("semi_sync") - require.NoError(t, err) - - promoteRule := PromotionRule(durability, &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "cell1", - Uid: 100, + testcases := []struct { + durabilityPolicy string + rdonlySemiSync bool + }{ + { + durabilityPolicy: "semi_sync", + rdonlySemiSync: false, + }, { + durabilityPolicy: "semi_sync_with_rdonly_ack", + rdonlySemiSync: true, }, - Type: topodatapb.TabletType_PRIMARY, - }) - assert.Equal(t, promotionrule.Neutral, promoteRule) + } - promoteRule = PromotionRule(durability, &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "cell1", - Uid: 100, - }, - Type: topodatapb.TabletType_REPLICA, - }) - assert.Equal(t, promotionrule.Neutral, promoteRule) + for _, tt := range testcases { + t.Run(tt.durabilityPolicy, func(t *testing.T) { + durability, err := GetDurabilityPolicy(tt.durabilityPolicy) + require.NoError(t, err) - promoteRule = PromotionRule(durability, &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "cell1", - Uid: 100, - }, - Type: topodatapb.TabletType_RDONLY, - }) - assert.Equal(t, promotionrule.MustNot, promoteRule) + promoteRule := PromotionRule(durability, &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }) + assert.Equal(t, promotionrule.Neutral, promoteRule) - promoteRule = PromotionRule(durability, &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "cell1", - Uid: 100, - }, - Type: topodatapb.TabletType_SPARE, - }) - assert.Equal(t, promotionrule.MustNot, promoteRule) - assert.Equal(t, 1, SemiSyncAckers(durability, nil)) - assert.Equal(t, true, IsReplicaSemiSync(durability, &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "cell1", - Uid: 101, - }, - Type: topodatapb.TabletType_PRIMARY, - }, &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "cell1", - Uid: 100, - }, - Type: topodatapb.TabletType_REPLICA, - })) - assert.Equal(t, false, IsReplicaSemiSync(durability, &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "cell1", - Uid: 101, - }, - Type: topodatapb.TabletType_PRIMARY, - }, &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "cell1", - Uid: 100, - }, - Type: topodatapb.TabletType_EXPERIMENTAL, - })) + promoteRule = PromotionRule(durability, &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Type: topodatapb.TabletType_REPLICA, + }) + assert.Equal(t, promotionrule.Neutral, promoteRule) + + promoteRule = PromotionRule(durability, &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Type: topodatapb.TabletType_RDONLY, + }) + assert.Equal(t, promotionrule.MustNot, promoteRule) + + promoteRule = PromotionRule(durability, &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Type: topodatapb.TabletType_SPARE, + }) + assert.Equal(t, promotionrule.MustNot, promoteRule) + assert.Equal(t, 1, SemiSyncAckers(durability, nil)) + assert.Equal(t, true, IsReplicaSemiSync(durability, &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 101, + }, + Type: topodatapb.TabletType_PRIMARY, + }, &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Type: topodatapb.TabletType_REPLICA, + })) + assert.Equal(t, false, IsReplicaSemiSync(durability, &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 101, + }, + Type: topodatapb.TabletType_PRIMARY, + }, &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Type: topodatapb.TabletType_EXPERIMENTAL, + })) + assert.Equal(t, tt.rdonlySemiSync, IsReplicaSemiSync(durability, &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 101, + }, + Type: topodatapb.TabletType_PRIMARY, + }, &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Type: topodatapb.TabletType_RDONLY, + })) + }) + } } func TestDurabilityCrossCell(t *testing.T) { - durability, err := GetDurabilityPolicy("cross_cell") - require.NoError(t, err) - - promoteRule := PromotionRule(durability, &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "cell1", - Uid: 100, + testcases := []struct { + durabilityPolicy string + rdonlySemiSync bool + }{ + { + durabilityPolicy: "cross_cell", + rdonlySemiSync: false, + }, { + durabilityPolicy: "cross_cell_with_rdonly_ack", + rdonlySemiSync: true, }, - Type: topodatapb.TabletType_PRIMARY, - }) - assert.Equal(t, promotionrule.Neutral, promoteRule) + } - promoteRule = PromotionRule(durability, &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "cell1", - Uid: 100, - }, - Type: topodatapb.TabletType_REPLICA, - }) - assert.Equal(t, promotionrule.Neutral, promoteRule) + for _, tt := range testcases { + t.Run(tt.durabilityPolicy, func(t *testing.T) { + durability, err := GetDurabilityPolicy(tt.durabilityPolicy) + require.NoError(t, err) - promoteRule = PromotionRule(durability, &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "cell1", - Uid: 100, - }, - Type: topodatapb.TabletType_RDONLY, - }) - assert.Equal(t, promotionrule.MustNot, promoteRule) + promoteRule := PromotionRule(durability, &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }) + assert.Equal(t, promotionrule.Neutral, promoteRule) - promoteRule = PromotionRule(durability, &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "cell1", - Uid: 100, - }, - Type: topodatapb.TabletType_SPARE, - }) - assert.Equal(t, promotionrule.MustNot, promoteRule) - assert.Equal(t, 1, SemiSyncAckers(durability, nil)) - assert.Equal(t, false, IsReplicaSemiSync(durability, &topodatapb.Tablet{ - Type: topodatapb.TabletType_PRIMARY, - Alias: &topodatapb.TabletAlias{ - Cell: "cell1", - }, - }, &topodatapb.Tablet{ - Type: topodatapb.TabletType_REPLICA, - Alias: &topodatapb.TabletAlias{ - Cell: "cell1", - }, - })) - assert.Equal(t, true, IsReplicaSemiSync(durability, &topodatapb.Tablet{ - Type: topodatapb.TabletType_PRIMARY, - Alias: &topodatapb.TabletAlias{ - Cell: "cell1", - }, - }, &topodatapb.Tablet{ - Type: topodatapb.TabletType_REPLICA, - Alias: &topodatapb.TabletAlias{ - Cell: "cell2", - }, - })) - assert.Equal(t, false, IsReplicaSemiSync(durability, &topodatapb.Tablet{ - Type: topodatapb.TabletType_PRIMARY, - Alias: &topodatapb.TabletAlias{ - Cell: "cell1", - }, - }, &topodatapb.Tablet{ - Type: topodatapb.TabletType_EXPERIMENTAL, - Alias: &topodatapb.TabletAlias{ - Cell: "cell2", - }, - })) + promoteRule = PromotionRule(durability, &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Type: topodatapb.TabletType_REPLICA, + }) + assert.Equal(t, promotionrule.Neutral, promoteRule) + + promoteRule = PromotionRule(durability, &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Type: topodatapb.TabletType_RDONLY, + }) + assert.Equal(t, promotionrule.MustNot, promoteRule) + + promoteRule = PromotionRule(durability, &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Type: topodatapb.TabletType_SPARE, + }) + assert.Equal(t, promotionrule.MustNot, promoteRule) + assert.Equal(t, 1, SemiSyncAckers(durability, nil)) + assert.Equal(t, false, IsReplicaSemiSync(durability, &topodatapb.Tablet{ + Type: topodatapb.TabletType_PRIMARY, + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + }, + }, &topodatapb.Tablet{ + Type: topodatapb.TabletType_REPLICA, + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + }, + })) + assert.Equal(t, true, IsReplicaSemiSync(durability, &topodatapb.Tablet{ + Type: topodatapb.TabletType_PRIMARY, + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + }, + }, &topodatapb.Tablet{ + Type: topodatapb.TabletType_REPLICA, + Alias: &topodatapb.TabletAlias{ + Cell: "cell2", + }, + })) + assert.Equal(t, false, IsReplicaSemiSync(durability, &topodatapb.Tablet{ + Type: topodatapb.TabletType_PRIMARY, + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + }, + }, &topodatapb.Tablet{ + Type: topodatapb.TabletType_EXPERIMENTAL, + Alias: &topodatapb.TabletAlias{ + Cell: "cell2", + }, + })) + assert.Equal(t, tt.rdonlySemiSync, IsReplicaSemiSync(durability, &topodatapb.Tablet{ + Type: topodatapb.TabletType_PRIMARY, + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + }, + }, &topodatapb.Tablet{ + Type: topodatapb.TabletType_RDONLY, + Alias: &topodatapb.TabletAlias{ + Cell: "cell2", + }, + })) + }) + } } func TestError(t *testing.T) { diff --git a/go/vt/vtctl/reparentutil/emergency_reparenter.go b/go/vt/vtctl/reparentutil/emergency_reparenter.go index 536d77bdaad..7f190a4d994 100644 --- a/go/vt/vtctl/reparentutil/emergency_reparenter.go +++ b/go/vt/vtctl/reparentutil/emergency_reparenter.go @@ -22,10 +22,9 @@ import ( "sync" "time" - "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/event" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sets" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/concurrency" @@ -54,8 +53,11 @@ type EmergencyReparenter struct { // EmergencyReparentShard operations. Options are passed by value, so it is safe // for callers to mutate and reuse options structs for multiple calls. type EmergencyReparentOptions struct { - NewPrimaryAlias *topodatapb.TabletAlias - IgnoreReplicas sets.Set[string] + NewPrimaryAlias *topodatapb.TabletAlias + IgnoreReplicas sets.Set[string] + // WaitAllTablets is used to specify whether ERS should wait for all the tablets to return and not proceed + // further after n-1 tablets have returned. + WaitAllTablets bool WaitReplicasTimeout time.Duration PreventCrossCellPromotion bool @@ -67,9 +69,14 @@ type EmergencyReparentOptions struct { // counters for Emergency Reparent Shard var ( - ersCounter = stats.NewGauge("ers_counter", "Number of times Emergency Reparent Shard has been run") - ersSuccessCounter = stats.NewGauge("ers_success_counter", "Number of times Emergency Reparent Shard has succeeded") - ersFailureCounter = stats.NewGauge("ers_failure_counter", "Number of times Emergency Reparent Shard has failed") + // TODO(timvaillancourt): remove legacyERS* gauges in v19+. + legacyERSCounter = stats.NewGauge("ers_counter", "Number of times Emergency Reparent Shard has been run") + legacyERSSuccessCounter = stats.NewGauge("ers_success_counter", "Number of times Emergency Reparent Shard has succeeded") + legacyERSFailureCounter = stats.NewGauge("ers_failure_counter", "Number of times Emergency Reparent Shard has failed") + + ersCounter = stats.NewCountersWithMultiLabels("emergency_reparent_counts", "Number of times Emergency Reparent Shard has been run", + []string{"Keyspace", "Shard", "Result"}, + ) ) // NewEmergencyReparenter returns a new EmergencyReparenter object, ready to @@ -97,26 +104,33 @@ func NewEmergencyReparenter(ts *topo.Server, tmc tmclient.TabletManagerClient, l // keyspace and shard. func (erp *EmergencyReparenter) ReparentShard(ctx context.Context, keyspace string, shard string, opts EmergencyReparentOptions) (*events.Reparent, error) { var err error + statsLabels := []string{keyspace, shard} + + opts.lockAction = erp.getLockAction(opts.NewPrimaryAlias) // First step is to lock the shard for the given operation, if not already locked if err = topo.CheckShardLocked(ctx, keyspace, shard); err != nil { var unlock func(*error) - opts.lockAction = erp.getLockAction(opts.NewPrimaryAlias) ctx, unlock, err = erp.ts.LockShard(ctx, keyspace, shard, opts.lockAction) if err != nil { + ersCounter.Add(append(statsLabels, failureResult), 1) return nil, err } defer unlock(&err) } // dispatch success or failure of ERS + startTime := time.Now() ev := &events.Reparent{} defer func() { + reparentShardOpTimings.Add("EmergencyReparentShard", time.Since(startTime)) switch err { case nil: - ersSuccessCounter.Add(1) + legacyERSSuccessCounter.Add(1) + ersCounter.Add(append(statsLabels, successResult), 1) event.DispatchUpdate(ev, "finished EmergencyReparentShard") default: - ersFailureCounter.Add(1) + legacyERSFailureCounter.Add(1) + ersCounter.Add(append(statsLabels, failureResult), 1) event.DispatchUpdate(ev, "failed EmergencyReparentShard: "+err.Error()) } }() @@ -140,14 +154,14 @@ func (erp *EmergencyReparenter) getLockAction(newPrimaryAlias *topodatapb.Tablet func (erp *EmergencyReparenter) reparentShardLocked(ctx context.Context, ev *events.Reparent, keyspace, shard string, opts EmergencyReparentOptions) (err error) { // log the starting of the operation and increment the counter erp.logger.Infof("will initiate emergency reparent shard in keyspace - %s, shard - %s", keyspace, shard) - ersCounter.Add(1) + legacyERSCounter.Add(1) var ( stoppedReplicationSnapshot *replicationSnapshot shardInfo *topo.ShardInfo prevPrimary *topodatapb.Tablet tabletMap map[string]*topo.TabletInfo - validCandidates map[string]mysql.Position + validCandidates map[string]replication.Position intermediateSource *topodatapb.Tablet validCandidateTablets []*topodatapb.Tablet validReplacementCandidates []*topodatapb.Tablet @@ -191,7 +205,7 @@ func (erp *EmergencyReparenter) reparentShardLocked(ctx context.Context, ev *eve } // Stop replication on all the tablets and build their status map - stoppedReplicationSnapshot, err = stopReplicationAndBuildStatusMaps(ctx, erp.tmc, ev, tabletMap, topo.RemoteOperationTimeout, opts.IgnoreReplicas, opts.NewPrimaryAlias, opts.durability, erp.logger) + stoppedReplicationSnapshot, err = stopReplicationAndBuildStatusMaps(ctx, erp.tmc, ev, tabletMap, topo.RemoteOperationTimeout, opts.IgnoreReplicas, opts.NewPrimaryAlias, opts.durability, opts.WaitAllTablets, erp.logger) if err != nil { return vterrors.Wrapf(err, "failed to stop replication and build status maps: %v", err) } @@ -298,14 +312,13 @@ func (erp *EmergencyReparenter) reparentShardLocked(ctx context.Context, ev *eve if err != nil { return err } - - ev.NewPrimary = proto.Clone(newPrimary).(*topodatapb.Tablet) + ev.NewPrimary = newPrimary.CloneVT() return err } func (erp *EmergencyReparenter) waitForAllRelayLogsToApply( ctx context.Context, - validCandidates map[string]mysql.Position, + validCandidates map[string]replication.Position, tabletMap map[string]*topo.TabletInfo, statusMap map[string]*replicationdatapb.StopReplicationStatus, waitReplicasTimeout time.Duration, @@ -371,7 +384,7 @@ func (erp *EmergencyReparenter) waitForAllRelayLogsToApply( // findMostAdvanced finds the intermediate source for ERS. We always choose the most advanced one from our valid candidates list. Further ties are broken by looking at the promotion rules. func (erp *EmergencyReparenter) findMostAdvanced( - validCandidates map[string]mysql.Position, + validCandidates map[string]replication.Position, tabletMap map[string]*topo.TabletInfo, opts EmergencyReparentOptions, ) (*topodatapb.Tablet, []*topodatapb.Tablet, error) { diff --git a/go/vt/vtctl/reparentutil/emergency_reparenter_test.go b/go/vt/vtctl/reparentutil/emergency_reparenter_test.go index fad4dfeb15b..d7f8bb6a1db 100644 --- a/go/vt/vtctl/reparentutil/emergency_reparenter_test.go +++ b/go/vt/vtctl/reparentutil/emergency_reparenter_test.go @@ -25,6 +25,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sets" "vitess.io/vitess/go/vt/logutil" @@ -116,7 +118,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { emergencyReparentOps EmergencyReparentOptions tmc *testutil.TabletManagerClient // setup - ts *topo.Server + cells []string keyspace string shard string unlockTopo bool @@ -161,7 +163,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -170,7 +172,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -179,7 +181,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000102": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-26", @@ -240,7 +242,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), + cells: []string{"zone1"}, shouldErr: false, }, { @@ -278,7 +280,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -287,7 +289,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -296,7 +298,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000102": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-26", @@ -380,7 +382,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), + cells: []string{"zone1"}, shouldErr: false, }, { @@ -423,7 +425,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -432,7 +434,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -441,7 +443,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000102": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -501,7 +503,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), + cells: []string{"zone1"}, shouldErr: false, }, { @@ -552,7 +554,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -561,7 +563,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000102": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-26", @@ -620,7 +622,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), + cells: []string{"zone1"}, shouldErr: false, }, { @@ -632,7 +634,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { shards: nil, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), + cells: []string{"zone1"}, shouldErr: true, errShouldContain: "node doesn't exist: keyspaces/testkeyspace/shards/-/Shard", }, @@ -691,7 +693,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), + cells: []string{"zone1"}, shouldErr: true, errShouldContain: "failed to stop replication and build status maps", }, @@ -705,13 +707,13 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { Error error }{ "zone1-0000000100": { - StopStatus: &replicationdatapb.StopReplicationStatus{Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}}, + StopStatus: &replicationdatapb.StopReplicationStatus{Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}}, }, "zone1-0000000101": { - StopStatus: &replicationdatapb.StopReplicationStatus{Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}}, + StopStatus: &replicationdatapb.StopReplicationStatus{Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}}, }, "zone1-0000000102": { - StopStatus: &replicationdatapb.StopReplicationStatus{Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}}, + StopStatus: &replicationdatapb.StopReplicationStatus{Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}}, }, }, }, @@ -750,7 +752,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), + cells: []string{"zone1"}, shouldErr: true, errShouldContain: "lost topology lock, aborting", }, @@ -765,7 +767,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -774,7 +776,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -783,7 +785,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000102": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{}, }, }, @@ -824,7 +826,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), + cells: []string{"zone1"}, shouldErr: true, errShouldContain: "encountered tablet zone1-0000000102 with no relay log position", }, @@ -842,7 +844,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { shouldErr: true, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), + cells: []string{"zone1"}, errShouldContain: "no valid candidates for emergency reparent", }, { @@ -859,7 +861,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -868,7 +870,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -877,7 +879,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000102": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -937,7 +939,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { shouldErr: true, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), + cells: []string{"zone1"}, errShouldContain: "could not apply all relay logs within the provided waitReplicasTimeout", }, { @@ -954,7 +956,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -963,7 +965,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -972,7 +974,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000102": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -1026,7 +1028,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), + cells: []string{"zone1"}, shouldErr: true, errShouldContain: "primary elect zone1-0000000200 has errant GTIDs", }, @@ -1039,7 +1041,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }}, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), + cells: []string{"zone1"}, tmc: &testutil.TabletManagerClient{ PopulateReparentJournalResults: map[string]error{ "zone1-0000000102": nil, @@ -1076,7 +1078,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -1085,7 +1087,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-20", @@ -1094,7 +1096,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000102": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-20", @@ -1189,7 +1191,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -1198,7 +1200,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -1207,7 +1209,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000102": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -1273,7 +1275,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), + cells: []string{"zone1"}, shouldErr: true, errShouldContain: "failed to be upgraded to primary", }, @@ -1312,7 +1314,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -1321,7 +1323,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -1330,7 +1332,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000102": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-26", @@ -1388,7 +1390,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), + cells: []string{"zone1"}, shouldErr: true, errShouldContain: "no valid candidates for emergency reparent", }, @@ -1432,7 +1434,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -1441,7 +1443,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -1450,7 +1452,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000102": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-26", @@ -1508,7 +1510,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), + cells: []string{"zone1"}, shouldErr: true, errShouldContain: "proposed primary zone1-0000000102 has a must not promotion rule", }, @@ -1547,7 +1549,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -1556,7 +1558,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -1565,7 +1567,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000102": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-26", @@ -1635,7 +1637,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1", "zone2"), + cells: []string{"zone1", "zone2"}, shouldErr: true, errShouldContain: "no valid candidates for emergency reparent", }, @@ -1680,7 +1682,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -1689,7 +1691,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -1698,7 +1700,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000102": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-26", @@ -1768,7 +1770,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1", "zone2"), + cells: []string{"zone1", "zone2"}, shouldErr: true, errShouldContain: "proposed primary zone1-0000000102 is is a different cell as the previous primary", }, @@ -1812,7 +1814,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -1821,7 +1823,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -1830,7 +1832,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000102": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-26", @@ -1891,7 +1893,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), + cells: []string{"zone1"}, shouldErr: true, errShouldContain: "proposed primary zone1-0000000102 will not be able to make forward progress on being promoted", }, @@ -1902,7 +1904,9 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { t.Run(tt.name, func(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := logutil.NewMemoryLogger() ev := &events.Reparent{} @@ -1913,12 +1917,14 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { tt.tablets[i] = tablet } - testutil.AddShards(ctx, t, tt.ts, tt.shards...) - testutil.AddTablets(ctx, t, tt.ts, nil, tt.tablets...) - reparenttestutil.SetKeyspaceDurability(ctx, t, tt.ts, tt.keyspace, tt.durability) + ts := memorytopo.NewServer(ctx, tt.cells...) + defer ts.Close() + testutil.AddShards(ctx, t, ts, tt.shards...) + testutil.AddTablets(ctx, t, ts, nil, tt.tablets...) + reparenttestutil.SetKeyspaceDurability(ctx, t, ts, tt.keyspace, tt.durability) if !tt.unlockTopo { - lctx, unlock, lerr := tt.ts.LockShard(ctx, tt.keyspace, tt.shard, "test lock") + lctx, unlock, lerr := ts.LockShard(ctx, tt.keyspace, tt.shard, "test lock") require.NoError(t, lerr, "could not lock %s/%s for testing", tt.keyspace, tt.shard) defer func() { @@ -1929,7 +1935,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { ctx = lctx // make the reparentShardLocked call use the lock ctx } - erp := NewEmergencyReparenter(tt.ts, tt.tmc, logger) + erp := NewEmergencyReparenter(ts, tt.tmc, logger) err := erp.reparentShardLocked(ctx, ev, tt.keyspace, tt.shard, tt.emergencyReparentOps) if tt.shouldErr { @@ -1952,7 +1958,6 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { tmc *testutil.TabletManagerClient unlockTopo bool newPrimaryTabletAlias string - ts *topo.Server keyspace string shard string tablets []*topodatapb.Tablet @@ -2032,20 +2037,19 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { statusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000101": { // forceStart = false Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateStopped), - SqlState: int32(mysql.ReplicationStateStopped), + IoState: int32(replication.ReplicationStateStopped), + SqlState: int32(replication.ReplicationStateStopped), }, }, "zone1-0000000102": { // forceStart = true Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateRunning), - SqlState: int32(mysql.ReplicationStateRunning), + IoState: int32(replication.ReplicationStateRunning), + SqlState: int32(replication.ReplicationStateRunning), }, }, }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), shouldErr: false, }, { @@ -2091,7 +2095,6 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), shouldErr: true, errShouldContain: "primary position error", }, @@ -2141,7 +2144,6 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), shouldErr: true, errShouldContain: "failed to PopulateReparentJournal on primary", }, @@ -2205,7 +2207,6 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), shouldErr: true, errShouldContain: " replica(s) failed", }, @@ -2274,7 +2275,6 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { shouldErr: true, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), errShouldContain: "context deadline exceeded", }, { @@ -2336,7 +2336,6 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), shouldErr: false, }, { @@ -2410,20 +2409,19 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { statusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000101": { // forceStart = false Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateStopped), - SqlState: int32(mysql.ReplicationStateStopped), + IoState: int32(replication.ReplicationStateStopped), + SqlState: int32(replication.ReplicationStateStopped), }, }, "zone1-0000000102": { // forceStart = true Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateRunning), - SqlState: int32(mysql.ReplicationStateRunning), + IoState: int32(replication.ReplicationStateRunning), + SqlState: int32(replication.ReplicationStateRunning), }, }, }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), shouldErr: false, }, } @@ -2435,7 +2433,8 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() logger := logutil.NewMemoryLogger() ev := &events.Reparent{ShardInfo: topo.ShardInfo{ Shard: &topodatapb.Shard{ @@ -2449,7 +2448,10 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { ev.ShardInfo.PrimaryAlias = nil } - testutil.AddShards(ctx, t, tt.ts, &vtctldatapb.Shard{ + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() + + testutil.AddShards(ctx, t, ts, &vtctldatapb.Shard{ Keyspace: tt.keyspace, Name: tt.shard, }) @@ -2460,7 +2462,7 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { lerr error ) - ctx, unlock, lerr = tt.ts.LockShard(ctx, tt.keyspace, tt.shard, "test lock") + ctx, unlock, lerr = ts.LockShard(ctx, tt.keyspace, tt.shard, "test lock") require.NoError(t, lerr, "could not lock %s/%s for test", tt.keyspace, tt.shard) defer func() { @@ -2472,7 +2474,7 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { tt.emergencyReparentOps.durability = durability - erp := NewEmergencyReparenter(tt.ts, tt.tmc, logger) + erp := NewEmergencyReparenter(ts, tt.tmc, logger) err := erp.promoteNewPrimary(ctx, ev, tabletInfo.Tablet, tt.emergencyReparentOps, tt.tabletMap, tt.statusMap) if tt.shouldErr { assert.Error(t, err) @@ -2494,7 +2496,7 @@ func TestEmergencyReparenter_waitForAllRelayLogsToApply(t *testing.T) { tests := []struct { name string tmc *testutil.TabletManagerClient - candidates map[string]mysql.Position + candidates map[string]replication.Position tabletMap map[string]*topo.TabletInfo statusMap map[string]*replicationdatapb.StopReplicationStatus shouldErr bool @@ -2511,7 +2513,7 @@ func TestEmergencyReparenter_waitForAllRelayLogsToApply(t *testing.T) { }, }, }, - candidates: map[string]mysql.Position{ + candidates: map[string]replication.Position{ "zone1-0000000100": {}, "zone1-0000000101": {}, }, @@ -2559,7 +2561,7 @@ func TestEmergencyReparenter_waitForAllRelayLogsToApply(t *testing.T) { }, }, }, - candidates: map[string]mysql.Position{ + candidates: map[string]replication.Position{ "zone1-0000000100": {}, "zone1-0000000101": {}, }, @@ -2610,7 +2612,7 @@ func TestEmergencyReparenter_waitForAllRelayLogsToApply(t *testing.T) { }, }, }, - candidates: map[string]mysql.Position{ + candidates: map[string]replication.Position{ "zone1-0000000100": {}, "zone1-0000000101": {}, "zone1-0000000102": {}, @@ -2675,7 +2677,7 @@ func TestEmergencyReparenter_waitForAllRelayLogsToApply(t *testing.T) { }, }, }, - candidates: map[string]mysql.Position{ + candidates: map[string]replication.Position{ "zone1-0000000100": {}, "zone1-0000000101": {}, }, @@ -2731,10 +2733,12 @@ func TestEmergencyReparenter_waitForAllRelayLogsToApply(t *testing.T) { } } -func TestEmergencyReparenterCounters(t *testing.T) { - ersCounter.Set(0) - ersSuccessCounter.Set(0) - ersFailureCounter.Set(0) +func TestEmergencyReparenterStats(t *testing.T) { + ersCounter.ResetAll() + legacyERSCounter.Reset() + legacyERSSuccessCounter.Reset() + legacyERSFailureCounter.Reset() + reparentShardOpTimings.Reset() emergencyReparentOps := EmergencyReparentOptions{} tmc := &testutil.TabletManagerClient{ @@ -2768,7 +2772,7 @@ func TestEmergencyReparenterCounters(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -2777,7 +2781,7 @@ func TestEmergencyReparenterCounters(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -2786,7 +2790,7 @@ func TestEmergencyReparenterCounters(t *testing.T) { }, "zone1-0000000102": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-26", @@ -2844,11 +2848,12 @@ func TestEmergencyReparenterCounters(t *testing.T) { } keyspace := "testkeyspace" shard := "-" - ts := memorytopo.NewServer("zone1") - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() logger := logutil.NewMemoryLogger() + ts := memorytopo.NewServer(ctx, "zone1") testutil.AddShards(ctx, t, ts, shards...) testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ AlsoSetShardPrimary: true, @@ -2862,9 +2867,13 @@ func TestEmergencyReparenterCounters(t *testing.T) { require.NoError(t, err) // check the counter values - require.EqualValues(t, 1, ersCounter.Get()) - require.EqualValues(t, 1, ersSuccessCounter.Get()) - require.EqualValues(t, 0, ersFailureCounter.Get()) + require.EqualValues(t, map[string]int64{"testkeyspace.-.success": 1}, ersCounter.Counts()) + require.EqualValues(t, map[string]int64{"All": 1, "EmergencyReparentShard": 1}, reparentShardOpTimings.Counts()) + + // check the legacy counter values + require.EqualValues(t, 1, legacyERSCounter.Get()) + require.EqualValues(t, 1, legacyERSSuccessCounter.Get()) + require.EqualValues(t, 0, legacyERSFailureCounter.Get()) // set emergencyReparentOps to request a non existent tablet emergencyReparentOps.NewPrimaryAlias = &topodatapb.TabletAlias{ @@ -2877,46 +2886,50 @@ func TestEmergencyReparenterCounters(t *testing.T) { require.Error(t, err) // check the counter values - require.EqualValues(t, 2, ersCounter.Get()) - require.EqualValues(t, 1, ersSuccessCounter.Get()) - require.EqualValues(t, 1, ersFailureCounter.Get()) + require.EqualValues(t, map[string]int64{"testkeyspace.-.success": 1, "testkeyspace.-.failure": 1}, ersCounter.Counts()) + require.EqualValues(t, map[string]int64{"All": 2, "EmergencyReparentShard": 2}, reparentShardOpTimings.Counts()) + + // check the legacy counter values + require.EqualValues(t, 2, legacyERSCounter.Get()) + require.EqualValues(t, 1, legacyERSSuccessCounter.Get()) + require.EqualValues(t, 1, legacyERSFailureCounter.Get()) } func TestEmergencyReparenter_findMostAdvanced(t *testing.T) { - sid1 := mysql.SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} - mysqlGTID1 := mysql.Mysql56GTID{ + sid1 := replication.SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} + mysqlGTID1 := replication.Mysql56GTID{ Server: sid1, Sequence: 9, } - mysqlGTID2 := mysql.Mysql56GTID{ + mysqlGTID2 := replication.Mysql56GTID{ Server: sid1, Sequence: 10, } - mysqlGTID3 := mysql.Mysql56GTID{ + mysqlGTID3 := replication.Mysql56GTID{ Server: sid1, Sequence: 11, } - positionMostAdvanced := mysql.Position{GTIDSet: mysql.Mysql56GTIDSet{}} + positionMostAdvanced := replication.Position{GTIDSet: replication.Mysql56GTIDSet{}} positionMostAdvanced.GTIDSet = positionMostAdvanced.GTIDSet.AddGTID(mysqlGTID1) positionMostAdvanced.GTIDSet = positionMostAdvanced.GTIDSet.AddGTID(mysqlGTID2) positionMostAdvanced.GTIDSet = positionMostAdvanced.GTIDSet.AddGTID(mysqlGTID3) - positionIntermediate1 := mysql.Position{GTIDSet: mysql.Mysql56GTIDSet{}} + positionIntermediate1 := replication.Position{GTIDSet: replication.Mysql56GTIDSet{}} positionIntermediate1.GTIDSet = positionIntermediate1.GTIDSet.AddGTID(mysqlGTID1) - positionIntermediate2 := mysql.Position{GTIDSet: mysql.Mysql56GTIDSet{}} + positionIntermediate2 := replication.Position{GTIDSet: replication.Mysql56GTIDSet{}} positionIntermediate2.GTIDSet = positionIntermediate2.GTIDSet.AddGTID(mysqlGTID1) positionIntermediate2.GTIDSet = positionIntermediate2.GTIDSet.AddGTID(mysqlGTID2) - positionOnly2 := mysql.Position{GTIDSet: mysql.Mysql56GTIDSet{}} + positionOnly2 := replication.Position{GTIDSet: replication.Mysql56GTIDSet{}} positionOnly2.GTIDSet = positionOnly2.GTIDSet.AddGTID(mysqlGTID2) - positionEmpty := mysql.Position{GTIDSet: mysql.Mysql56GTIDSet{}} + positionEmpty := replication.Position{GTIDSet: replication.Mysql56GTIDSet{}} tests := []struct { name string - validCandidates map[string]mysql.Position + validCandidates map[string]replication.Position tabletMap map[string]*topo.TabletInfo emergencyReparentOps EmergencyReparentOptions result *topodatapb.Tablet @@ -2924,7 +2937,7 @@ func TestEmergencyReparenter_findMostAdvanced(t *testing.T) { }{ { name: "choose most advanced", - validCandidates: map[string]mysql.Position{ + validCandidates: map[string]replication.Position{ "zone1-0000000100": positionMostAdvanced, "zone1-0000000101": positionIntermediate1, "zone1-0000000102": positionIntermediate2, @@ -2972,7 +2985,7 @@ func TestEmergencyReparenter_findMostAdvanced(t *testing.T) { }, }, { name: "choose most advanced with the best promotion rule", - validCandidates: map[string]mysql.Position{ + validCandidates: map[string]replication.Position{ "zone1-0000000100": positionMostAdvanced, "zone1-0000000101": positionIntermediate1, "zone1-0000000102": positionMostAdvanced, @@ -3026,7 +3039,7 @@ func TestEmergencyReparenter_findMostAdvanced(t *testing.T) { Cell: "zone1", Uid: 102, }}, - validCandidates: map[string]mysql.Position{ + validCandidates: map[string]replication.Position{ "zone1-0000000100": positionMostAdvanced, "zone1-0000000101": positionIntermediate1, "zone1-0000000102": positionMostAdvanced, @@ -3080,7 +3093,7 @@ func TestEmergencyReparenter_findMostAdvanced(t *testing.T) { Cell: "zone1", Uid: 102, }}, - validCandidates: map[string]mysql.Position{ + validCandidates: map[string]replication.Position{ "zone1-0000000100": positionOnly2, "zone1-0000000101": positionIntermediate1, "zone1-0000000102": positionEmpty, @@ -3154,7 +3167,6 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) { tmc *testutil.TabletManagerClient unlockTopo bool newPrimaryTabletAlias string - ts *topo.Server keyspace string shard string tablets []*topodatapb.Tablet @@ -3225,20 +3237,19 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) { statusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000101": { // forceStart = false Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateStopped), - SqlState: int32(mysql.ReplicationStateStopped), + IoState: int32(replication.ReplicationStateStopped), + SqlState: int32(replication.ReplicationStateStopped), }, }, "zone1-0000000102": { // forceStart = true Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateRunning), - SqlState: int32(mysql.ReplicationStateRunning), + IoState: int32(replication.ReplicationStateRunning), + SqlState: int32(replication.ReplicationStateRunning), }, }, }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), shouldErr: false, }, { @@ -3276,7 +3287,6 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) { statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), shouldErr: true, errShouldContain: "primary position error", }, @@ -3318,7 +3328,6 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) { statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), shouldErr: true, errShouldContain: "failed to PopulateReparentJournal on primary", }, @@ -3374,7 +3383,6 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) { statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), shouldErr: true, errShouldContain: " replica(s) failed", }, @@ -3434,7 +3442,6 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) { shouldErr: true, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), errShouldContain: "context deadline exceeded", }, { @@ -3487,7 +3494,6 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) { statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), shouldErr: false, }, { name: "single replica failing to SetReplicationSource does not fail the promotion", @@ -3530,7 +3536,6 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) { statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), shouldErr: false, }, } @@ -3542,11 +3547,15 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ctx := context.Background() logger := logutil.NewMemoryLogger() ev := &events.Reparent{} - testutil.AddShards(ctx, t, tt.ts, &vtctldatapb.Shard{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() + + testutil.AddShards(ctx, t, ts, &vtctldatapb.Shard{ Keyspace: tt.keyspace, Name: tt.shard, }) @@ -3557,7 +3566,7 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) { lerr error ) - ctx, unlock, lerr = tt.ts.LockShard(ctx, tt.keyspace, tt.shard, "test lock") + ctx, unlock, lerr = ts.LockShard(ctx, tt.keyspace, tt.shard, "test lock") require.NoError(t, lerr, "could not lock %s/%s for test", tt.keyspace, tt.shard) defer func() { @@ -3569,7 +3578,7 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) { tt.emergencyReparentOps.durability = durability - erp := NewEmergencyReparenter(tt.ts, tt.tmc, logger) + erp := NewEmergencyReparenter(ts, tt.tmc, logger) _, err := erp.reparentReplicas(ctx, ev, tabletInfo.Tablet, tt.tabletMap, tt.statusMap, tt.emergencyReparentOps, false /* waitForAllReplicas */, true /* populateReparentJournal */) if tt.shouldErr { assert.Error(t, err) @@ -3591,7 +3600,6 @@ func TestEmergencyReparenter_promoteIntermediateSource(t *testing.T) { tmc *testutil.TabletManagerClient unlockTopo bool newSourceTabletAlias string - ts *topo.Server keyspace string shard string tablets []*topodatapb.Tablet @@ -3664,20 +3672,19 @@ func TestEmergencyReparenter_promoteIntermediateSource(t *testing.T) { statusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000101": { // forceStart = false Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateStopped), - SqlState: int32(mysql.ReplicationStateStopped), + IoState: int32(replication.ReplicationStateStopped), + SqlState: int32(replication.ReplicationStateStopped), }, }, "zone1-0000000102": { // forceStart = true Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateRunning), - SqlState: int32(mysql.ReplicationStateRunning), + IoState: int32(replication.ReplicationStateRunning), + SqlState: int32(replication.ReplicationStateRunning), }, }, }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), shouldErr: false, result: []*topodatapb.Tablet{ { @@ -3795,7 +3802,6 @@ func TestEmergencyReparenter_promoteIntermediateSource(t *testing.T) { statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), shouldErr: true, errShouldContain: " replica(s) failed", }, @@ -3849,7 +3855,6 @@ func TestEmergencyReparenter_promoteIntermediateSource(t *testing.T) { statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), shouldErr: false, validCandidateTablets: []*topodatapb.Tablet{ { @@ -3938,20 +3943,19 @@ func TestEmergencyReparenter_promoteIntermediateSource(t *testing.T) { statusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000101": { // forceStart = false Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateStopped), - SqlState: int32(mysql.ReplicationStateStopped), + IoState: int32(replication.ReplicationStateStopped), + SqlState: int32(replication.ReplicationStateStopped), }, }, "zone1-0000000102": { // forceStart = true Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateRunning), - SqlState: int32(mysql.ReplicationStateRunning), + IoState: int32(replication.ReplicationStateRunning), + SqlState: int32(replication.ReplicationStateRunning), }, }, }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), shouldErr: false, result: []*topodatapb.Tablet{ { @@ -3991,11 +3995,15 @@ func TestEmergencyReparenter_promoteIntermediateSource(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() logger := logutil.NewMemoryLogger() ev := &events.Reparent{} - testutil.AddShards(ctx, t, tt.ts, &vtctldatapb.Shard{ + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() + + testutil.AddShards(ctx, t, ts, &vtctldatapb.Shard{ Keyspace: tt.keyspace, Name: tt.shard, }) @@ -4006,7 +4014,7 @@ func TestEmergencyReparenter_promoteIntermediateSource(t *testing.T) { lerr error ) - ctx, unlock, lerr = tt.ts.LockShard(ctx, tt.keyspace, tt.shard, "test lock") + ctx, unlock, lerr = ts.LockShard(ctx, tt.keyspace, tt.shard, "test lock") require.NoError(t, lerr, "could not lock %s/%s for test", tt.keyspace, tt.shard) defer func() { @@ -4018,7 +4026,7 @@ func TestEmergencyReparenter_promoteIntermediateSource(t *testing.T) { tt.emergencyReparentOps.durability = durability - erp := NewEmergencyReparenter(tt.ts, tt.tmc, logger) + erp := NewEmergencyReparenter(ts, tt.tmc, logger) res, err := erp.promoteIntermediateSource(ctx, ev, tabletInfo.Tablet, tt.tabletMap, tt.statusMap, tt.validCandidateTablets, tt.emergencyReparentOps) if tt.shouldErr { assert.Error(t, err) @@ -4289,17 +4297,19 @@ func TestParentContextCancelled(t *testing.T) { statusMap := map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000101": { Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateRunning), - SqlState: int32(mysql.ReplicationStateRunning), + IoState: int32(replication.ReplicationStateRunning), + SqlState: int32(replication.ReplicationStateRunning), }, }, } keyspace := "testkeyspace" shard := "-" - ts := memorytopo.NewServer("zone1") ctx, cancel := context.WithCancel(context.Background()) defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() + logger := logutil.NewMemoryLogger() ev := &events.Reparent{} diff --git a/go/vt/vtctl/reparentutil/planned_reparenter.go b/go/vt/vtctl/reparentutil/planned_reparenter.go index 8633afa13d0..9fc933a8e35 100644 --- a/go/vt/vtctl/reparentutil/planned_reparenter.go +++ b/go/vt/vtctl/reparentutil/planned_reparenter.go @@ -22,21 +22,28 @@ import ( "sync" "time" - "google.golang.org/protobuf/proto" + "golang.org/x/sync/errgroup" "vitess.io/vitess/go/event" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/logutil" + logutilpb "vitess.io/vitess/go/vt/proto/logutil" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools/events" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tmclient" +) - logutilpb "vitess.io/vitess/go/vt/proto/logutil" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/proto/vtrpc" +// counters for Planned Reparent Shard +var ( + prsCounter = stats.NewCountersWithMultiLabels("planned_reparent_counts", "Number of times Planned Reparent Shard has been run", + []string{"Keyspace", "Shard", "Result"}, + ) ) // PlannedReparenter performs PlannedReparentShard operations. @@ -88,11 +95,14 @@ func NewPlannedReparenter(ts *topo.Server, tmc tmclient.TabletManagerClient, log // both the current and desired primary are reachable and in a good state. func (pr *PlannedReparenter) ReparentShard(ctx context.Context, keyspace string, shard string, opts PlannedReparentOptions) (*events.Reparent, error) { var err error + statsLabels := []string{keyspace, shard} + if err = topo.CheckShardLocked(ctx, keyspace, shard); err != nil { var unlock func(*error) opts.lockAction = pr.getLockAction(opts) ctx, unlock, err = pr.ts.LockShard(ctx, keyspace, shard, opts.lockAction) if err != nil { + prsCounter.Add(append(statsLabels, failureResult), 1) return nil, err } defer unlock(&err) @@ -101,18 +111,23 @@ func (pr *PlannedReparenter) ReparentShard(ctx context.Context, keyspace string, if opts.NewPrimaryAlias == nil && opts.AvoidPrimaryAlias == nil { shardInfo, err := pr.ts.GetShard(ctx, keyspace, shard) if err != nil { + prsCounter.Add(append(statsLabels, failureResult), 1) return nil, err } opts.AvoidPrimaryAlias = shardInfo.PrimaryAlias } + startTime := time.Now() ev := &events.Reparent{} defer func() { + reparentShardOpTimings.Add("PlannedReparentShard", time.Since(startTime)) switch err { case nil: + prsCounter.Add(append(statsLabels, successResult), 1) event.DispatchUpdate(ev, "finished PlannedReparentShard") default: + prsCounter.Add(append(statsLabels, failureResult), 1) event.DispatchUpdate(ev, "failed PlannedReparentShard: "+err.Error()) } }() @@ -198,9 +213,7 @@ func (pr *PlannedReparenter) preflightChecks( if !canEstablishForTablet(opts.durability, newPrimaryTabletInfo.Tablet, tabletsReachable) { return true, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "primary-elect tablet %v won't be able to make forward progress on promotion", primaryElectAliasStr) } - - ev.NewPrimary = proto.Clone(newPrimaryTabletInfo.Tablet).(*topodatapb.Tablet) - + ev.NewPrimary = newPrimaryTabletInfo.Tablet.CloneVT() return false, nil } @@ -215,7 +228,7 @@ func (pr *PlannedReparenter) performGracefulPromotion( opts PlannedReparentOptions, ) error { primaryElectAliasStr := topoproto.TabletAliasString(primaryElect.Alias) - ev.OldPrimary = proto.Clone(currentPrimary.Tablet).(*topodatapb.Tablet) + ev.OldPrimary = currentPrimary.Tablet.CloneVT() // Before demoting the old primary, we're going to ensure that replication // is working from the old primary to the primary-elect. If replication is @@ -375,7 +388,7 @@ func (pr *PlannedReparenter) performPotentialPromotion( type tabletPos struct { alias string tablet *topodatapb.Tablet - pos mysql.Position + pos replication.Position } positions := make(chan tabletPos, len(tabletMap)) @@ -422,7 +435,7 @@ func (pr *PlannedReparenter) performPotentialPromotion( return } - pos, err := mysql.DecodePosition(primaryStatus.Position) + pos, err := replication.DecodePosition(primaryStatus.Position) if err != nil { rec.RecordError(vterrors.Wrapf(err, "cannot decode replication position (%v) for demoted tablet %v", primaryStatus.Position, alias)) @@ -518,6 +531,11 @@ func (pr *PlannedReparenter) reparentShardLocked( return err } + err = pr.verifyAllTabletsReachable(ctx, tabletMap) + if err != nil { + return err + } + // Check invariants that PlannedReparentShard depends on. if isNoop, err := pr.preflightChecks(ctx, ev, keyspace, shard, tabletMap, &opts); err != nil { return err @@ -572,12 +590,12 @@ func (pr *PlannedReparenter) reparentShardLocked( // inserted in the new primary's journal, so we can use it below to check // that all the replicas have attached to new primary successfully. switch { - case currentPrimary == nil && ev.ShardInfo.PrimaryAlias == nil: + case currentPrimary == nil && ev.ShardInfo.PrimaryTermStartTime == nil: // Case (1): no primary has been elected ever. Initialize // the primary-elect tablet reparentJournalPos, err = pr.performInitialPromotion(ctx, ev.NewPrimary, opts) needsRefresh = true - case currentPrimary == nil && ev.ShardInfo.PrimaryAlias != nil: + case currentPrimary == nil && ev.ShardInfo.PrimaryTermStartTime != nil: // Case (2): no clear current primary. Try to find a safe promotion // candidate, and promote to it. err = pr.performPotentialPromotion(ctx, keyspace, shard, ev.NewPrimary, tabletMap, opts) @@ -713,3 +731,20 @@ func (pr *PlannedReparenter) reparentTablets( return nil } + +// verifyAllTabletsReachable verifies that all the tablets are reachable when running PRS. +func (pr *PlannedReparenter) verifyAllTabletsReachable(ctx context.Context, tabletMap map[string]*topo.TabletInfo) error { + // Create a cancellable context for the entire set of RPCs to verify reachability. + verifyCtx, verifyCancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) + defer verifyCancel() + + errorGroup, groupCtx := errgroup.WithContext(verifyCtx) + for _, info := range tabletMap { + tablet := info.Tablet + errorGroup.Go(func() error { + _, err := pr.tmc.PrimaryStatus(groupCtx, tablet) + return err + }) + } + return errorGroup.Wait() +} diff --git a/go/vt/vtctl/reparentutil/planned_reparenter_flaky_test.go b/go/vt/vtctl/reparentutil/planned_reparenter_flaky_test.go index 20815db3dfc..c564a95167e 100644 --- a/go/vt/vtctl/reparentutil/planned_reparenter_flaky_test.go +++ b/go/vt/vtctl/reparentutil/planned_reparenter_flaky_test.go @@ -78,7 +78,6 @@ func TestPlannedReparenter_ReparentShard(t *testing.T) { tests := []struct { name string - ts *topo.Server tmc tmclient.TabletManagerClient tablets []*topodatapb.Tablet lockShardBeforeTest bool @@ -92,7 +91,6 @@ func TestPlannedReparenter_ReparentShard(t *testing.T) { }{ { name: "success", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ PrimaryPositionResults: map[string]struct { Position string @@ -112,6 +110,18 @@ func TestPlannedReparenter_ReparentShard(t *testing.T) { SetReadWriteResults: map[string]error{ "zone1-0000000100": nil, }, + // This is only needed to verify reachability, so empty results are fine. + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, }, tablets: []*topodatapb.Tablet{ { @@ -166,7 +176,6 @@ func TestPlannedReparenter_ReparentShard(t *testing.T) { }, { name: "success - new primary not provided", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ ReplicationStatusResults: map[string]struct { Position *replicationdatapb.Status @@ -222,6 +231,18 @@ func TestPlannedReparenter_ReparentShard(t *testing.T) { PopulateReparentJournalResults: map[string]error{ "zone1-0000000200": nil, }, + // This is only needed to verify reachability, so empty results are fine. + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, }, tablets: []*topodatapb.Tablet{ { @@ -280,7 +301,6 @@ func TestPlannedReparenter_ReparentShard(t *testing.T) { }, { name: "already locked shard", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ PrimaryPositionResults: map[string]struct { Position string @@ -300,6 +320,18 @@ func TestPlannedReparenter_ReparentShard(t *testing.T) { SetReadWriteResults: map[string]error{ "zone1-0000000100": nil, }, + // This is only needed to verify reachability, so empty results are fine. + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, }, tablets: []*topodatapb.Tablet{ { @@ -359,8 +391,17 @@ func TestPlannedReparenter_ReparentShard(t *testing.T) { // fail the preflight checks. Other functions are unit-tested // thoroughly to cover all the cases. name: "reparent fails", - ts: memorytopo.NewServer("zone1"), - tmc: nil, + tmc: &testutil.TabletManagerClient{ + // This is only needed to verify reachability, so empty results are fine. + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, + }, tablets: []*topodatapb.Tablet{ { Alias: &topodatapb.TabletAlias{ @@ -400,7 +441,6 @@ func TestPlannedReparenter_ReparentShard(t *testing.T) { }, } - ctx := context.Background() logger := logutil.NewMemoryLogger() for _, tt := range tests { @@ -409,15 +449,18 @@ func TestPlannedReparenter_ReparentShard(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ctx := ctx + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() - testutil.AddTablets(ctx, t, tt.ts, &testutil.AddTabletOptions{ + testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ AlsoSetShardPrimary: true, SkipShardCreation: false, }, tt.tablets...) if tt.lockShardBeforeTest { - lctx, unlock, err := tt.ts.LockShard(ctx, tt.keyspace, tt.shard, "locking for test") + lctx, unlock, err := ts.LockShard(ctx, tt.keyspace, tt.shard, "locking for test") require.NoError(t, err, "could not lock %s/%s for test case", tt.keyspace, tt.shard) defer func() { @@ -428,7 +471,7 @@ func TestPlannedReparenter_ReparentShard(t *testing.T) { ctx = lctx } - pr := NewPlannedReparenter(tt.ts, tt.tmc, logger) + pr := NewPlannedReparenter(ts, tt.tmc, logger) ev, err := pr.ReparentShard(ctx, tt.keyspace, tt.shard, tt.opts) if tt.shouldErr { assert.Error(t, err) @@ -516,7 +559,6 @@ func TestPlannedReparenter_preflightChecks(t *testing.T) { tests := []struct { name string - ts *topo.Server tmc tmclient.TabletManagerClient tablets []*topodatapb.Tablet @@ -907,7 +949,6 @@ func TestPlannedReparenter_preflightChecks(t *testing.T) { }, } - ctx := context.Background() logger := logutil.NewMemoryLogger() for _, tt := range tests { @@ -916,6 +957,11 @@ func TestPlannedReparenter_preflightChecks(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() + defer func() { if tt.expectedEvent != nil { AssertReparentEventsEqualWithMessage(t, tt.expectedEvent, tt.ev, "expected preflightChecks to mutate the passed-in event") @@ -926,7 +972,7 @@ func TestPlannedReparenter_preflightChecks(t *testing.T) { } }() - pr := NewPlannedReparenter(tt.ts, tt.tmc, logger) + pr := NewPlannedReparenter(ts, tt.tmc, logger) if tt.opts.durability == nil { durability, err := GetDurabilityPolicy("none") require.NoError(t, err) @@ -951,7 +997,6 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { tests := []struct { name string - ts *topo.Server tmc tmclient.TabletManagerClient unlockTopo bool ctxTimeout time.Duration @@ -973,7 +1018,6 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { }{ { name: "successful promotion", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ DemotePrimaryResults: map[string]struct { Status *replicationdatapb.PrimaryStatus @@ -1030,7 +1074,6 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { }, { name: "cannot get snapshot of current primary", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ PrimaryPositionResults: map[string]struct { Position string @@ -1064,7 +1107,6 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { }, { name: "primary-elect fails to catch up to current primary snapshot position", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ PrimaryPositionResults: map[string]struct { Position string @@ -1101,7 +1143,6 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { }, { name: "primary-elect times out catching up to current primary snapshot position", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ PrimaryPositionResults: map[string]struct { Position string @@ -1143,7 +1184,6 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { }, { name: "lost topology lock", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ PrimaryPositionResults: map[string]struct { Position string @@ -1181,7 +1221,6 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { }, { name: "failed to demote current primary", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ DemotePrimaryResults: map[string]struct { Status *replicationdatapb.PrimaryStatus @@ -1226,7 +1265,6 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { }, { name: "primary-elect fails to catch up to current primary demotion position", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ DemotePrimaryResults: map[string]struct { Status *replicationdatapb.PrimaryStatus @@ -1283,7 +1321,6 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { }, { name: "primary-elect times out catching up to current primary demotion position", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ DemotePrimaryResults: map[string]struct { Status *replicationdatapb.PrimaryStatus @@ -1345,7 +1382,6 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { }, { name: "demotion succeeds but parent context times out", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ DemotePrimaryResults: map[string]struct { Status *replicationdatapb.PrimaryStatus @@ -1402,7 +1438,6 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { }, { name: "rollback fails", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ DemotePrimaryResults: map[string]struct { Status *replicationdatapb.PrimaryStatus @@ -1465,7 +1500,6 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { }, { name: "rollback succeeds", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ DemotePrimaryResults: map[string]struct { Status *replicationdatapb.PrimaryStatus @@ -1528,7 +1562,6 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { }, } - ctx := context.Background() logger := logutil.NewMemoryLogger() for _, tt := range tests { @@ -1537,15 +1570,18 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ctx := ctx + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() - testutil.AddShards(ctx, t, tt.ts, &vtctldatapb.Shard{ + testutil.AddShards(ctx, t, ts, &vtctldatapb.Shard{ Keyspace: tt.keyspace, Name: tt.shard, }) if !tt.unlockTopo { - lctx, unlock, err := tt.ts.LockShard(ctx, tt.keyspace, tt.shard, "test lock") + lctx, unlock, err := ts.LockShard(ctx, tt.keyspace, tt.shard, "test lock") require.NoError(t, err, "could not lock %s/%s for testing", tt.keyspace, tt.shard) defer func() { @@ -1556,7 +1592,7 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { ctx = lctx } - pr := NewPlannedReparenter(tt.ts, tt.tmc, logger) + pr := NewPlannedReparenter(ts, tt.tmc, logger) if tt.ctxTimeout > 0 { _ctx, cancel := context.WithTimeout(ctx, tt.ctxTimeout) @@ -1600,7 +1636,6 @@ func TestPlannedReparenter_performInitialPromotion(t *testing.T) { tests := []struct { name string - ts *topo.Server tmc tmclient.TabletManagerClient ctxTimeout time.Duration @@ -1614,7 +1649,6 @@ func TestPlannedReparenter_performInitialPromotion(t *testing.T) { }{ { name: "successful promotion", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ InitPrimaryResults: map[string]struct { Result string @@ -1640,7 +1674,6 @@ func TestPlannedReparenter_performInitialPromotion(t *testing.T) { }, { name: "primary-elect fails to promote", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ InitPrimaryResults: map[string]struct { Result string @@ -1664,7 +1697,6 @@ func TestPlannedReparenter_performInitialPromotion(t *testing.T) { }, { name: "promotion succeeds but parent context times out", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ InitPrimaryPostDelays: map[string]time.Duration{ "zone1-0000000200": time.Millisecond * 100, // 10x the parent context timeout @@ -1692,7 +1724,6 @@ func TestPlannedReparenter_performInitialPromotion(t *testing.T) { }, } - ctx := context.Background() logger := logutil.NewMemoryLogger() for _, tt := range tests { @@ -1701,14 +1732,17 @@ func TestPlannedReparenter_performInitialPromotion(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ctx := ctx + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() - testutil.AddShards(ctx, t, tt.ts, &vtctldatapb.Shard{ + testutil.AddShards(ctx, t, ts, &vtctldatapb.Shard{ Keyspace: tt.keyspace, Name: tt.shard, }) - pr := NewPlannedReparenter(tt.ts, tt.tmc, logger) + pr := NewPlannedReparenter(ts, tt.tmc, logger) if tt.ctxTimeout > 0 { _ctx, cancel := context.WithTimeout(ctx, tt.ctxTimeout) @@ -1897,7 +1931,6 @@ func TestPlannedReparenter_performPotentialPromotion(t *testing.T) { tests := []struct { name string - ts *topo.Server tmc tmclient.TabletManagerClient timeout time.Duration unlockTopo bool @@ -1911,7 +1944,6 @@ func TestPlannedReparenter_performPotentialPromotion(t *testing.T) { }{ { name: "success", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ DemotePrimaryResults: map[string]struct { Status *replicationdatapb.PrimaryStatus @@ -1976,7 +2008,6 @@ func TestPlannedReparenter_performPotentialPromotion(t *testing.T) { }, { name: "failed to DemotePrimary on a tablet", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ DemotePrimaryResults: map[string]struct { Status *replicationdatapb.PrimaryStatus @@ -2011,7 +2042,6 @@ func TestPlannedReparenter_performPotentialPromotion(t *testing.T) { }, { name: "timed out during DemotePrimary on a tablet", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ DemotePrimaryDelays: map[string]time.Duration{ "zone1-0000000100": time.Millisecond * 50, @@ -2052,7 +2082,6 @@ func TestPlannedReparenter_performPotentialPromotion(t *testing.T) { }, { name: "failed to DecodePosition on a tablet's demote position", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ DemotePrimaryResults: map[string]struct { Status *replicationdatapb.PrimaryStatus @@ -2089,7 +2118,6 @@ func TestPlannedReparenter_performPotentialPromotion(t *testing.T) { }, { name: "primary-elect not in tablet map", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{}, unlockTopo: false, keyspace: "testkeyspace", @@ -2105,7 +2133,6 @@ func TestPlannedReparenter_performPotentialPromotion(t *testing.T) { }, { name: "primary-elect not most at most advanced position", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ DemotePrimaryResults: map[string]struct { Status *replicationdatapb.PrimaryStatus @@ -2170,7 +2197,6 @@ func TestPlannedReparenter_performPotentialPromotion(t *testing.T) { }, { name: "lost topology lock", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ DemotePrimaryResults: map[string]struct { Status *replicationdatapb.PrimaryStatus @@ -2235,7 +2261,6 @@ func TestPlannedReparenter_performPotentialPromotion(t *testing.T) { }, } - ctx := context.Background() logger := logutil.NewMemoryLogger() for _, tt := range tests { @@ -2244,16 +2269,20 @@ func TestPlannedReparenter_performPotentialPromotion(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ctx := ctx + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() + pr := NewPlannedReparenter(nil, tt.tmc, logger) - testutil.AddShards(ctx, t, tt.ts, &vtctldatapb.Shard{ + testutil.AddShards(ctx, t, ts, &vtctldatapb.Shard{ Keyspace: tt.keyspace, Name: tt.shard, }) if !tt.unlockTopo { - lctx, unlock, err := tt.ts.LockShard(ctx, tt.keyspace, tt.shard, "test lock") + lctx, unlock, err := ts.LockShard(ctx, tt.keyspace, tt.shard, "test lock") require.NoError(t, err, "could not lock %s/%s for testing", tt.keyspace, tt.shard) defer func() { @@ -2290,7 +2319,6 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { tests := []struct { name string - ts *topo.Server tmc tmclient.TabletManagerClient tablets []*topodatapb.Tablet unlockTopo bool @@ -2305,7 +2333,6 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { }{ { name: "success: current primary cannot be determined", // "Case (1)" - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ DemotePrimaryResults: map[string]struct { Status *replicationdatapb.PrimaryStatus @@ -2324,6 +2351,18 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { Error: nil, }, }, + // This is only needed to verify reachability, so empty results are fine. + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, PopulateReparentJournalResults: map[string]error{ "zone1-0000000200": nil, // zone1-200 gets promoted }, @@ -2385,7 +2424,6 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { }, { name: "success: current primary is desired primary", // "Case (2)" - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ PrimaryPositionResults: map[string]struct { Position string @@ -2405,6 +2443,18 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { SetReadWriteResults: map[string]error{ "zone1-0000000100": nil, }, + // This is only needed to verify reachability, so empty results are fine. + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, }, tablets: []*topodatapb.Tablet{ { @@ -2460,7 +2510,6 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { }, { name: "success: graceful promotion", // "Case (3)" - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ DemotePrimaryResults: map[string]struct { Status *replicationdatapb.PrimaryStatus @@ -2474,6 +2523,18 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { Error: nil, }, }, + // This is only needed to verify reachability, so empty results are fine. + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, PrimaryPositionResults: map[string]struct { Position string Error error @@ -2544,7 +2605,6 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { }, { name: "shard not found", - ts: memorytopo.NewServer("zone1"), tmc: nil, tablets: nil, unlockTopo: true, @@ -2559,7 +2619,6 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { }, { name: "shard initialization", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ PopulateReparentJournalResults: map[string]error{ "zone1-0000000200": nil, @@ -2576,6 +2635,18 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { SetReplicationSourceResults: map[string]error{ "zone1-0000000100": nil, // called during reparentTablets to make this tablet a replica of newPrimary }, + // This is only needed to verify reachability, so empty results are fine. + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, }, tablets: []*topodatapb.Tablet{ // Shard has no current primary in the beginning. @@ -2628,7 +2699,6 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { }, { name: "shard initialization with no new primary provided", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ PopulateReparentJournalResults: map[string]error{ "zone1-0000000200": nil, @@ -2647,15 +2717,29 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { Error error }{ "zone1-0000000200": { - Error: mysql.ErrNotReplica, + Position: &replicationdatapb.Status{ + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-2", + }, }, "zone1-0000000100": { - Error: fmt.Errorf("not providing replication status, so that 200 wins"), + Error: mysql.ErrNotReplica, }, }, SetReplicationSourceResults: map[string]error{ "zone1-0000000100": nil, // called during reparentTablets to make this tablet a replica of newPrimary }, + // This is only needed to verify reachability, so empty results are fine. + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, }, tablets: []*topodatapb.Tablet{ // Shard has no current primary in the beginning. @@ -2702,8 +2786,20 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { }, { name: "preflight checks determine PRS is no-op", - ts: memorytopo.NewServer("zone1"), - tmc: nil, + tmc: &testutil.TabletManagerClient{ + // This is only needed to verify reachability, so empty results are fine. + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, + }, tablets: []*topodatapb.Tablet{ { Alias: &topodatapb.TabletAlias{ @@ -2750,11 +2846,22 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { }, { name: "promotion step fails", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ SetReadWriteResults: map[string]error{ "zone1-0000000100": assert.AnError, }, + // This is only needed to verify reachability, so empty results are fine. + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, }, tablets: []*topodatapb.Tablet{ { @@ -2810,8 +2917,19 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { }, { name: "lost topology lock", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ + // This is only needed to verify reachability, so empty results are fine. + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, PrimaryPositionResults: map[string]struct { Position string Error error @@ -2881,7 +2999,6 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { }, { name: "failed to reparent tablets", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ PrimaryPositionResults: map[string]struct { Position string @@ -2892,6 +3009,18 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { Error: nil, }, }, + // This is only needed to verify reachability, so empty results are fine. + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, PopulateReparentJournalResults: map[string]error{ "zone1-0000000100": assert.AnError, }, @@ -2953,7 +3082,6 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { }, } - ctx := context.Background() logger := logutil.NewMemoryLogger() for _, tt := range tests { @@ -2962,16 +3090,19 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ctx := ctx + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() - testutil.AddTablets(ctx, t, tt.ts, &testutil.AddTabletOptions{ + testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ AlsoSetShardPrimary: true, ForceSetShardPrimary: true, // Some of our test cases count on having multiple primaries, so let the last one "win". SkipShardCreation: false, }, tt.tablets...) if !tt.unlockTopo { - lctx, unlock, err := tt.ts.LockShard(ctx, tt.keyspace, tt.shard, "locking for testing") + lctx, unlock, err := ts.LockShard(ctx, tt.keyspace, tt.shard, "locking for testing") require.NoError(t, err, "could not lock %s/%s for testing", tt.keyspace, tt.shard) defer func() { @@ -2988,7 +3119,7 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { }() } - pr := NewPlannedReparenter(tt.ts, tt.tmc, logger) + pr := NewPlannedReparenter(ts, tt.tmc, logger) err := pr.reparentShardLocked(ctx, tt.ev, tt.keyspace, tt.shard, tt.opts) if tt.shouldErr { @@ -3638,3 +3769,294 @@ func AssertReparentEventsEqual(t *testing.T, expected *events.Reparent, actual * AssertReparentEventsEqualWithMessage(t, expected, actual, "") } + +// TestPlannedReparenter_verifyAllTabletsReachable tests the functionality of verifyAllTabletsReachable. +func TestPlannedReparenter_verifyAllTabletsReachable(t *testing.T) { + tests := []struct { + name string + tmc tmclient.TabletManagerClient + tabletMap map[string]*topo.TabletInfo + remoteOpTime time.Duration + wantErr string + }{ + { + name: "Success", + tmc: &testutil.TabletManagerClient{ + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000201": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + "zone1-0000000200": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + "zone1-0000000201": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 201, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + }, + }, { + name: "Failure", + tmc: &testutil.TabletManagerClient{ + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Error: fmt.Errorf("primary status failed"), + }, + "zone1-0000000201": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + "zone1-0000000200": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + "zone1-0000000201": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 201, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + }, + wantErr: "primary status failed", + }, { + name: "Timeout", + tmc: &testutil.TabletManagerClient{ + PrimaryStatusDelays: map[string]time.Duration{ + "zone1-0000000100": 20 * time.Second, + }, + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000201": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, + }, + remoteOpTime: 100 * time.Millisecond, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + "zone1-0000000200": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + "zone1-0000000201": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 201, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + }, + wantErr: "context deadline exceeded", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() + + pr := &PlannedReparenter{ + ts: ts, + tmc: tt.tmc, + } + if tt.remoteOpTime != 0 { + oldTime := topo.RemoteOperationTimeout + topo.RemoteOperationTimeout = tt.remoteOpTime + defer func() { + topo.RemoteOperationTimeout = oldTime + }() + } + err := pr.verifyAllTabletsReachable(context.Background(), tt.tabletMap) + if tt.wantErr == "" { + require.NoError(t, err) + return + } + require.ErrorContains(t, err, tt.wantErr) + }) + } +} + +func TestPlannedReparenterStats(t *testing.T) { + prsCounter.ResetAll() + reparentShardOpTimings.Reset() + + tmc := &testutil.TabletManagerClient{ + PrimaryPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": { + Position: "position1", + Error: nil, + }, + }, + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000100": nil, + }, + SetReplicationSourceResults: map[string]error{ + "zone1-0000000101": nil, + }, + SetReadWriteResults: map[string]error{ + "zone1-0000000100": nil, + }, + // This is only needed to verify reachability, so empty results are fine. + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000101": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, + } + shards := []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + } + tablets := []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + } + plannedReparentOps := PlannedReparentOptions{ + NewPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + } + keyspace := "testkeyspace" + shard := "-" + ts := memorytopo.NewServer(context.Background(), "zone1") + + ctx := context.Background() + logger := logutil.NewMemoryLogger() + + testutil.AddShards(ctx, t, ts, shards...) + testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ + AlsoSetShardPrimary: true, + SkipShardCreation: false, + }, tablets...) + + prp := NewPlannedReparenter(ts, tmc, logger) + // run a successful prs + _, err := prp.ReparentShard(ctx, keyspace, shard, plannedReparentOps) + require.NoError(t, err) + + // check the counter values + require.EqualValues(t, map[string]int64{"testkeyspace.-.success": 1}, prsCounter.Counts()) + require.EqualValues(t, map[string]int64{"All": 1, "PlannedReparentShard": 1}, reparentShardOpTimings.Counts()) + + // set plannedReparentOps to request a non existent tablet + plannedReparentOps.NewPrimaryAlias = &topodatapb.TabletAlias{ + Cell: "bogus", + Uid: 100, + } + + // run a failing prs + _, err = prp.ReparentShard(ctx, keyspace, shard, plannedReparentOps) + require.Error(t, err) + + // check the counter values + require.EqualValues(t, map[string]int64{"testkeyspace.-.success": 1, "testkeyspace.-.failure": 1}, prsCounter.Counts()) + require.EqualValues(t, map[string]int64{"All": 2, "PlannedReparentShard": 2}, reparentShardOpTimings.Counts()) +} diff --git a/go/vt/vtctl/reparentutil/reparent_sorter.go b/go/vt/vtctl/reparentutil/reparent_sorter.go index 77547827d37..e4461b78064 100644 --- a/go/vt/vtctl/reparentutil/reparent_sorter.go +++ b/go/vt/vtctl/reparentutil/reparent_sorter.go @@ -19,7 +19,7 @@ package reparentutil import ( "sort" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/vterrors" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -30,12 +30,12 @@ import ( // candidate for intermediate promotion in emergency reparent shard, and the new primary in planned reparent shard type reparentSorter struct { tablets []*topodatapb.Tablet - positions []mysql.Position + positions []replication.Position durability Durabler } // newReparentSorter creates a new reparentSorter -func newReparentSorter(tablets []*topodatapb.Tablet, positions []mysql.Position, durability Durabler) *reparentSorter { +func newReparentSorter(tablets []*topodatapb.Tablet, positions []replication.Position, durability Durabler) *reparentSorter { return &reparentSorter{ tablets: tablets, positions: positions, @@ -84,7 +84,7 @@ func (rs *reparentSorter) Less(i, j int) bool { // sortTabletsForReparent sorts the tablets, given their positions for emergency reparent shard and planned reparent shard. // Tablets are sorted first by their replication positions, with ties broken by the promotion rules. -func sortTabletsForReparent(tablets []*topodatapb.Tablet, positions []mysql.Position, durability Durabler) error { +func sortTabletsForReparent(tablets []*topodatapb.Tablet, positions []replication.Position, durability Durabler) error { // throw an error internal error in case of unequal number of tablets and positions // fail-safe code prevents panic in sorting in case the lengths are unequal if len(tablets) != len(positions) { diff --git a/go/vt/vtctl/reparentutil/reparent_sorter_test.go b/go/vt/vtctl/reparentutil/reparent_sorter_test.go index 469d9ac2c88..c21c95ad22b 100644 --- a/go/vt/vtctl/reparentutil/reparent_sorter_test.go +++ b/go/vt/vtctl/reparentutil/reparent_sorter_test.go @@ -21,14 +21,15 @@ import ( "github.com/stretchr/testify/require" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) // TestReparentSorter tests that the sorting for tablets works correctly func TestReparentSorter(t *testing.T) { - sid1 := mysql.SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} - sid2 := mysql.SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16} + sid1 := replication.SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} + sid2 := replication.SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16} cell1 := "cell1" cell2 := "cell2" tabletReplica1_100 := &topodatapb.Tablet{ @@ -60,64 +61,64 @@ func TestReparentSorter(t *testing.T) { Type: topodatapb.TabletType_RDONLY, } - mysqlGTID1 := mysql.Mysql56GTID{ + mysqlGTID1 := replication.Mysql56GTID{ Server: sid1, Sequence: 9, } - mysqlGTID2 := mysql.Mysql56GTID{ + mysqlGTID2 := replication.Mysql56GTID{ Server: sid2, Sequence: 10, } - mysqlGTID3 := mysql.Mysql56GTID{ + mysqlGTID3 := replication.Mysql56GTID{ Server: sid1, Sequence: 11, } - positionMostAdvanced := mysql.Position{GTIDSet: mysql.Mysql56GTIDSet{}} + positionMostAdvanced := replication.Position{GTIDSet: replication.Mysql56GTIDSet{}} positionMostAdvanced.GTIDSet = positionMostAdvanced.GTIDSet.AddGTID(mysqlGTID1) positionMostAdvanced.GTIDSet = positionMostAdvanced.GTIDSet.AddGTID(mysqlGTID2) positionMostAdvanced.GTIDSet = positionMostAdvanced.GTIDSet.AddGTID(mysqlGTID3) - positionEmpty := mysql.Position{GTIDSet: mysql.Mysql56GTIDSet{}} + positionEmpty := replication.Position{GTIDSet: replication.Mysql56GTIDSet{}} - positionIntermediate1 := mysql.Position{GTIDSet: mysql.Mysql56GTIDSet{}} + positionIntermediate1 := replication.Position{GTIDSet: replication.Mysql56GTIDSet{}} positionIntermediate1.GTIDSet = positionIntermediate1.GTIDSet.AddGTID(mysqlGTID1) - positionIntermediate2 := mysql.Position{GTIDSet: mysql.Mysql56GTIDSet{}} + positionIntermediate2 := replication.Position{GTIDSet: replication.Mysql56GTIDSet{}} positionIntermediate2.GTIDSet = positionIntermediate2.GTIDSet.AddGTID(mysqlGTID1) positionIntermediate2.GTIDSet = positionIntermediate2.GTIDSet.AddGTID(mysqlGTID2) testcases := []struct { name string tablets []*topodatapb.Tablet - positions []mysql.Position + positions []replication.Position containsErr string sortedTablets []*topodatapb.Tablet }{ { name: "all advanced, sort via promotion rules", tablets: []*topodatapb.Tablet{nil, tabletReplica1_100, tabletRdonly1_102}, - positions: []mysql.Position{positionMostAdvanced, positionMostAdvanced, positionMostAdvanced}, + positions: []replication.Position{positionMostAdvanced, positionMostAdvanced, positionMostAdvanced}, sortedTablets: []*topodatapb.Tablet{tabletReplica1_100, tabletRdonly1_102, nil}, }, { name: "ordering by position", tablets: []*topodatapb.Tablet{tabletReplica1_101, tabletReplica2_100, tabletReplica1_100, tabletRdonly1_102}, - positions: []mysql.Position{positionEmpty, positionIntermediate1, positionIntermediate2, positionMostAdvanced}, + positions: []replication.Position{positionEmpty, positionIntermediate1, positionIntermediate2, positionMostAdvanced}, sortedTablets: []*topodatapb.Tablet{tabletRdonly1_102, tabletReplica1_100, tabletReplica2_100, tabletReplica1_101}, }, { name: "tablets and positions count error", tablets: []*topodatapb.Tablet{tabletReplica1_101, tabletReplica2_100}, - positions: []mysql.Position{positionEmpty, positionIntermediate1, positionMostAdvanced}, + positions: []replication.Position{positionEmpty, positionIntermediate1, positionMostAdvanced}, containsErr: "unequal number of tablets and positions", }, { name: "promotion rule check", tablets: []*topodatapb.Tablet{tabletReplica1_101, tabletRdonly1_102}, - positions: []mysql.Position{positionMostAdvanced, positionMostAdvanced}, + positions: []replication.Position{positionMostAdvanced, positionMostAdvanced}, sortedTablets: []*topodatapb.Tablet{tabletReplica1_101, tabletRdonly1_102}, }, { name: "mixed", tablets: []*topodatapb.Tablet{tabletReplica1_101, tabletReplica2_100, tabletReplica1_100, tabletRdonly1_102}, - positions: []mysql.Position{positionEmpty, positionIntermediate1, positionMostAdvanced, positionIntermediate1}, + positions: []replication.Position{positionEmpty, positionIntermediate1, positionMostAdvanced, positionIntermediate1}, sortedTablets: []*topodatapb.Tablet{tabletReplica1_100, tabletReplica2_100, tabletRdonly1_102, tabletReplica1_101}, }, } diff --git a/go/vt/vtctl/reparentutil/replication.go b/go/vt/vtctl/reparentutil/replication.go index ddc83ad43f4..9b33a5b0536 100644 --- a/go/vt/vtctl/reparentutil/replication.go +++ b/go/vt/vtctl/reparentutil/replication.go @@ -22,7 +22,8 @@ import ( "time" "vitess.io/vitess/go/event" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sets" "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/log" @@ -44,13 +45,13 @@ import ( func FindValidEmergencyReparentCandidates( statusMap map[string]*replicationdatapb.StopReplicationStatus, primaryStatusMap map[string]*replicationdatapb.PrimaryStatus, -) (map[string]mysql.Position, error) { - replicationStatusMap := make(map[string]*mysql.ReplicationStatus, len(statusMap)) - positionMap := make(map[string]mysql.Position) +) (map[string]replication.Position, error) { + replicationStatusMap := make(map[string]*replication.ReplicationStatus, len(statusMap)) + positionMap := make(map[string]replication.Position) // Build out replication status list from proto types. for alias, statuspb := range statusMap { - status := mysql.ProtoToReplicationStatus(statuspb.After) + status := replication.ProtoToReplicationStatus(statuspb.After) replicationStatusMap[alias] = &status } @@ -63,7 +64,7 @@ func FindValidEmergencyReparentCandidates( ) for alias, status := range replicationStatusMap { - if _, ok := status.RelayLogPosition.GTIDSet.(mysql.Mysql56GTIDSet); ok { + if _, ok := status.RelayLogPosition.GTIDSet.(replication.Mysql56GTIDSet); ok { isGTIDBased = true } else { isNonGTIDBased = true @@ -98,14 +99,14 @@ func FindValidEmergencyReparentCandidates( // This condition should really never happen, since we did the same cast // in the earlier loop, but let's be doubly sure. - relayLogGTIDSet, ok := status.RelayLogPosition.GTIDSet.(mysql.Mysql56GTIDSet) + relayLogGTIDSet, ok := status.RelayLogPosition.GTIDSet.(replication.Mysql56GTIDSet) if !ok { return nil, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "we got a filled-in relay log position, but it's not of type Mysql56GTIDSet, even though we've determined we need to use GTID based assesment") } // We need to remove this alias's status from the list, otherwise the // GTID diff will always be empty. - statusList := make([]*mysql.ReplicationStatus, 0, len(replicationStatusMap)-1) + statusList := make([]*replication.ReplicationStatus, 0, len(replicationStatusMap)-1) for a, s := range replicationStatusMap { if a != alias { @@ -126,12 +127,12 @@ func FindValidEmergencyReparentCandidates( continue } - pos := mysql.Position{GTIDSet: relayLogGTIDSet} + pos := replication.Position{GTIDSet: relayLogGTIDSet} positionMap[alias] = pos } for alias, primaryStatus := range primaryStatusMap { - executedPosition, err := mysql.DecodePosition(primaryStatus.Position) + executedPosition, err := replication.DecodePosition(primaryStatus.Position) if err != nil { return nil, vterrors.Wrapf(err, "could not decode a primary status executed position for tablet %v: %v", alias, err) } @@ -150,9 +151,9 @@ func ReplicaWasRunning(stopStatus *replicationdatapb.StopReplicationStatus) (boo return false, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "could not determine Before state of StopReplicationStatus %v", stopStatus) } - replStatus := mysql.ProtoToReplicationStatus(stopStatus.Before) - return (replStatus.IOState == mysql.ReplicationStateRunning) || - (replStatus.SQLState == mysql.ReplicationStateRunning), nil + replStatus := replication.ProtoToReplicationStatus(stopStatus.Before) + return (replStatus.IOState == replication.ReplicationStateRunning) || + (replStatus.SQLState == replication.ReplicationStateRunning), nil } // SQLThreadWasRunning returns true if a StopReplicationStatus indicates that the @@ -163,8 +164,8 @@ func SQLThreadWasRunning(stopStatus *replicationdatapb.StopReplicationStatus) (b return false, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "could not determine Before state of StopReplicationStatus %v", stopStatus) } - replStatus := mysql.ProtoToReplicationStatus(stopStatus.Before) - return replStatus.SQLState == mysql.ReplicationStateRunning, nil + replStatus := replication.ProtoToReplicationStatus(stopStatus.Before) + return replStatus.SQLState == replication.ReplicationStateRunning, nil } // SetReplicationSource is used to set the replication source on the specified @@ -217,6 +218,7 @@ func stopReplicationAndBuildStatusMaps( ignoredTablets sets.Set[string], tabletToWaitFor *topodatapb.TabletAlias, durability Durabler, + waitForAllTablets bool, logger logutil.Logger, ) (*replicationSnapshot, error) { event.DispatchUpdate(ev, "stop replication on all replicas") @@ -248,8 +250,8 @@ func stopReplicationAndBuildStatusMaps( stopReplicationStatus, err := tmc.StopReplicationAndGetStatus(groupCtx, tabletInfo.Tablet, replicationdatapb.StopReplicationMode_IOTHREADONLY) if err != nil { - sqlErr, isSQLErr := mysql.NewSQLErrorFromError(err).(*mysql.SQLError) - if isSQLErr && sqlErr != nil && sqlErr.Number() == mysql.ERNotReplica { + sqlErr, isSQLErr := sqlerror.NewSQLErrorFromError(err).(*sqlerror.SQLError) + if isSQLErr && sqlErr != nil && sqlErr.Number() == sqlerror.ERNotReplica { var primaryStatus *replicationdatapb.PrimaryStatus primaryStatus, err = tmc.DemotePrimary(groupCtx, tabletInfo.Tablet) @@ -291,6 +293,12 @@ func stopReplicationAndBuildStatusMaps( } } + // For the tablets that we want to get a response from necessarily, we + // get them to set the MustWaitFor boolean as part of the concurrency.Error message + // that we send to the waitGroup below. + // + // numErrorsToWaitFor corresponds to how many such tablets there are. This is the number + // of special messages with MustWaitFor set that the call errgroup.Wait will wait for. tabletAliasToWaitFor := "" numErrorsToWaitFor := 0 if tabletToWaitFor != nil { @@ -300,6 +308,10 @@ func stopReplicationAndBuildStatusMaps( allTablets = append(allTablets, tabletInfo.Tablet) if !ignoredTablets.Has(alias) { mustWaitFor := tabletAliasToWaitFor == alias + // If this is a tablet that we must wait for + // we increment numErrorsToWaitFor and pass in this to the + // fillStatus function to indicate we must send this with the boolean + // MustWaitFor specified. if mustWaitFor { numErrorsToWaitFor++ } @@ -307,9 +319,18 @@ func stopReplicationAndBuildStatusMaps( } } + numGoRoutines := len(tabletMap) - ignoredTablets.Len() + // In general we want to wait for n-1 tablets to respond, since we know the primary tablet is down. + requiredSuccesses := numGoRoutines - 1 + if waitForAllTablets { + // In the special case, where we are explicitly told to wait for all the tablets to return, + // we set the required success to all the go-routines. + requiredSuccesses = numGoRoutines + } + errgroup := concurrency.ErrorGroup{ - NumGoroutines: len(tabletMap) - ignoredTablets.Len(), - NumRequiredSuccesses: len(tabletMap) - ignoredTablets.Len() - 1, + NumGoroutines: numGoRoutines, + NumRequiredSuccesses: requiredSuccesses, NumAllowedErrors: len(tabletMap), // We set the number of allowed errors to a very high value, because we don't want to exit early // even in case of multiple failures. We rely on the revoke function below to determine if we have more failures than we can tolerate NumErrorsToWaitFor: numErrorsToWaitFor, diff --git a/go/vt/vtctl/reparentutil/replication_test.go b/go/vt/vtctl/reparentutil/replication_test.go index 5c7adc42ec4..ed7bd152e9c 100644 --- a/go/vt/vtctl/reparentutil/replication_test.go +++ b/go/vt/vtctl/reparentutil/replication_test.go @@ -25,6 +25,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/replication" + _flag "vitess.io/vitess/go/internal/flag" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sets" @@ -299,6 +301,8 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { stopReplicasTimeout time.Duration ignoredTablets sets.Set[string] tabletToWaitFor *topodatapb.TabletAlias + timeSpent time.Duration + waitForAllTablets bool expectedStatusMap map[string]*replicationdatapb.StopReplicationStatus expectedPrimaryStatusMap map[string]*replicationdatapb.PrimaryStatus expectedTabletsReachable []*topodatapb.Tablet @@ -314,13 +318,13 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-9"}, }, }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, @@ -349,11 +353,11 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { ignoredTablets: sets.New[string](), expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000100": { - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-9"}, }, "zone1-0000000101": { - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, @@ -372,6 +376,159 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, }}, shouldErr: false, + }, { + name: "success with wait for all tablets", + durability: "none", + tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ + stopReplicationAndGetStatusResults: map[string]*struct { + StopStatus *replicationdatapb.StopReplicationStatus + Err error + }{ + "zone1-0000000100": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, + After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-9"}, + }, + }, + "zone1-0000000101": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, + After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, + }, + }, + }, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Type: topodatapb.TabletType_REPLICA, + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Type: topodatapb.TabletType_REPLICA, + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + }, + ignoredTablets: sets.New[string](), + expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ + "zone1-0000000100": { + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, + After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-9"}, + }, + "zone1-0000000101": { + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, + After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, + }, + }, + expectedPrimaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{}, + expectedTabletsReachable: []*topodatapb.Tablet{{ + Type: topodatapb.TabletType_REPLICA, + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, { + Type: topodatapb.TabletType_REPLICA, + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }}, + waitForAllTablets: true, + shouldErr: false, + }, { + name: "timing check with wait for all tablets", + durability: "none", + tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ + stopReplicationAndGetStatusResults: map[string]*struct { + StopStatus *replicationdatapb.StopReplicationStatus + Err error + }{ + "zone1-0000000100": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, + After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-9"}, + }, + }, + "zone1-0000000101": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, + After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, + }, + }, + }, + stopReplicationAndGetStatusDelays: map[string]time.Duration{ + // We want `zone1-0000000102` to take a lot of time to respond. + // Simulating a tablet being unreachable. + "zone1-0000000102": time.Hour, + }, + }, + stopReplicasTimeout: 1 * time.Second, + timeSpent: 900 * time.Millisecond, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Type: topodatapb.TabletType_REPLICA, + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Type: topodatapb.TabletType_REPLICA, + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, "zone1-0000000102": { + Tablet: &topodatapb.Tablet{ + Type: topodatapb.TabletType_REPLICA, + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + }, + }, + }, + ignoredTablets: sets.New[string](), + expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ + "zone1-0000000100": { + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, + After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-9"}, + }, + "zone1-0000000101": { + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, + After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, + }, + }, + expectedPrimaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{}, + expectedTabletsReachable: []*topodatapb.Tablet{{ + Type: topodatapb.TabletType_REPLICA, + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, { + Type: topodatapb.TabletType_REPLICA, + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }}, + waitForAllTablets: true, + shouldErr: false, }, { name: "success - 2 rdonly failures", @@ -383,13 +540,13 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-9"}, }, }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, @@ -442,11 +599,11 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { ignoredTablets: sets.New[string](), expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000100": { - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-9"}, }, "zone1-0000000101": { - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, @@ -476,13 +633,13 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-9"}, }, }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, @@ -535,11 +692,11 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { ignoredTablets: sets.New[string](), expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000100": { - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-9"}, }, "zone1-0000000101": { - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, @@ -569,13 +726,13 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-9"}, }, }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, @@ -604,7 +761,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { ignoredTablets: sets.New[string]("zone1-0000000100"), expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000101": { - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, @@ -643,7 +800,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, @@ -672,7 +829,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { ignoredTablets: sets.New[string](), expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000101": { - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, @@ -717,7 +874,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, @@ -746,7 +903,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { ignoredTablets: sets.New[string](), expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000101": { - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, @@ -826,13 +983,13 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-9"}, }, }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, @@ -862,7 +1019,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { ignoredTablets: sets.New[string](), expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000101": { - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, @@ -889,7 +1046,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, @@ -918,7 +1075,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { ignoredTablets: sets.New[string](), expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000101": { - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, @@ -1031,19 +1188,19 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-9"}, }, }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, "zone1-0000000102": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429102:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429102:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429102:1-9"}, }, }, @@ -1085,15 +1242,15 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { ignoredTablets: sets.New[string](), expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000100": { - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-9"}, }, "zone1-0000000101": { - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, "zone1-0000000102": { - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429102:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429102:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429102:1-9"}, }, }, @@ -1128,7 +1285,12 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { t.Run(tt.name, func(t *testing.T) { durability, err := GetDurabilityPolicy(tt.durability) require.NoError(t, err) - res, err := stopReplicationAndBuildStatusMaps(ctx, tt.tmc, &events.Reparent{}, tt.tabletMap, tt.stopReplicasTimeout, tt.ignoredTablets, tt.tabletToWaitFor, durability, logger) + startTime := time.Now() + res, err := stopReplicationAndBuildStatusMaps(ctx, tt.tmc, &events.Reparent{}, tt.tabletMap, tt.stopReplicasTimeout, tt.ignoredTablets, tt.tabletToWaitFor, durability, tt.waitForAllTablets, logger) + totalTimeSpent := time.Since(startTime) + if tt.timeSpent != 0 { + assert.Greater(t, totalTimeSpent, tt.timeSpent) + } if tt.shouldErr { assert.Error(t, err) return @@ -1158,8 +1320,8 @@ func TestReplicaWasRunning(t *testing.T) { name: "io thread running", in: &replicationdatapb.StopReplicationStatus{ Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateRunning), - SqlState: int32(mysql.ReplicationStateStopped), + IoState: int32(replication.ReplicationStateRunning), + SqlState: int32(replication.ReplicationStateStopped), }, }, expected: true, @@ -1169,8 +1331,8 @@ func TestReplicaWasRunning(t *testing.T) { name: "sql thread running", in: &replicationdatapb.StopReplicationStatus{ Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateStopped), - SqlState: int32(mysql.ReplicationStateRunning), + IoState: int32(replication.ReplicationStateStopped), + SqlState: int32(replication.ReplicationStateRunning), }, }, expected: true, @@ -1180,8 +1342,8 @@ func TestReplicaWasRunning(t *testing.T) { name: "io and sql threads running", in: &replicationdatapb.StopReplicationStatus{ Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateRunning), - SqlState: int32(mysql.ReplicationStateRunning), + IoState: int32(replication.ReplicationStateRunning), + SqlState: int32(replication.ReplicationStateRunning), }, }, expected: true, @@ -1191,8 +1353,8 @@ func TestReplicaWasRunning(t *testing.T) { name: "no replication threads running", in: &replicationdatapb.StopReplicationStatus{ Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateStopped), - SqlState: int32(mysql.ReplicationStateStopped), + IoState: int32(replication.ReplicationStateStopped), + SqlState: int32(replication.ReplicationStateStopped), }, }, expected: false, @@ -1246,8 +1408,8 @@ func TestSQLThreadWasRunning(t *testing.T) { name: "io thread running", in: &replicationdatapb.StopReplicationStatus{ Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateRunning), - SqlState: int32(mysql.ReplicationStateStopped), + IoState: int32(replication.ReplicationStateRunning), + SqlState: int32(replication.ReplicationStateStopped), }, }, expected: false, @@ -1257,8 +1419,8 @@ func TestSQLThreadWasRunning(t *testing.T) { name: "sql thread running", in: &replicationdatapb.StopReplicationStatus{ Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateStopped), - SqlState: int32(mysql.ReplicationStateRunning), + IoState: int32(replication.ReplicationStateStopped), + SqlState: int32(replication.ReplicationStateRunning), }, }, expected: true, @@ -1268,8 +1430,8 @@ func TestSQLThreadWasRunning(t *testing.T) { name: "io and sql threads running", in: &replicationdatapb.StopReplicationStatus{ Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateRunning), - SqlState: int32(mysql.ReplicationStateRunning), + IoState: int32(replication.ReplicationStateRunning), + SqlState: int32(replication.ReplicationStateRunning), }, }, expected: true, @@ -1279,8 +1441,8 @@ func TestSQLThreadWasRunning(t *testing.T) { name: "no replication threads running", in: &replicationdatapb.StopReplicationStatus{ Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateStopped), - SqlState: int32(mysql.ReplicationStateStopped), + IoState: int32(replication.ReplicationStateStopped), + SqlState: int32(replication.ReplicationStateStopped), }, }, expected: false, diff --git a/go/vt/vtctl/reparentutil/util.go b/go/vt/vtctl/reparentutil/util.go index f4cebc3dd7d..cfde8f34508 100644 --- a/go/vt/vtctl/reparentutil/util.go +++ b/go/vt/vtctl/reparentutil/util.go @@ -22,7 +22,11 @@ import ( "sync" "time" - "vitess.io/vitess/go/mysql" + "golang.org/x/sync/errgroup" + + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" @@ -38,6 +42,12 @@ import ( "vitess.io/vitess/go/vt/proto/vtrpc" ) +var ( + reparentShardOpTimings = stats.NewTimings("reparent_shard_operation_timings", "Timings of reparent shard operations", "Operation") + failureResult = "failure" + successResult = "success" +) + // ChooseNewPrimary finds a tablet that should become a primary after reparent. // The criteria for the new primary-elect are (preferably) to be in the same // cell as the current primary, and to be different from avoidPrimaryAlias. The @@ -66,12 +76,12 @@ func ChooseNewPrimary( } var ( - wg sync.WaitGroup // mutex to secure the next two fields from concurrent access mu sync.Mutex // tablets that are possible candidates to be the new primary and their positions - validTablets []*topodatapb.Tablet - tabletPositions []mysql.Position + validTablets []*topodatapb.Tablet + tabletPositions []replication.Position + errorGroup, groupCtx = errgroup.WithContext(ctx) ) for _, tablet := range tabletMap { @@ -84,22 +94,24 @@ func ChooseNewPrimary( continue } - wg.Add(1) - - go func(tablet *topodatapb.Tablet) { - defer wg.Done() + tb := tablet.Tablet + errorGroup.Go(func() error { // find and store the positions for the tablet - pos, err := findPositionForTablet(ctx, tablet, logger, tmc, waitReplicasTimeout) + pos, err := findPositionForTablet(groupCtx, tb, logger, tmc, waitReplicasTimeout) mu.Lock() defer mu.Unlock() if err == nil { - validTablets = append(validTablets, tablet) + validTablets = append(validTablets, tb) tabletPositions = append(tabletPositions, pos) } - }(tablet.Tablet) + return err + }) } - wg.Wait() + err := errorGroup.Wait() + if err != nil { + return nil, err + } // return nothing if there are no valid tablets available if len(validTablets) == 0 { @@ -107,7 +119,7 @@ func ChooseNewPrimary( } // sort the tablets for finding the best primary - err := sortTabletsForReparent(validTablets, tabletPositions, durability) + err = sortTabletsForReparent(validTablets, tabletPositions, durability) if err != nil { return nil, err } @@ -117,7 +129,7 @@ func ChooseNewPrimary( // findPositionForTablet processes the replication position for a single tablet and // returns it. It is safe to call from multiple goroutines. -func findPositionForTablet(ctx context.Context, tablet *topodatapb.Tablet, logger logutil.Logger, tmc tmclient.TabletManagerClient, waitTimeout time.Duration) (mysql.Position, error) { +func findPositionForTablet(ctx context.Context, tablet *topodatapb.Tablet, logger logutil.Logger, tmc tmclient.TabletManagerClient, waitTimeout time.Duration) (replication.Position, error) { logger.Infof("getting replication position from %v", topoproto.TabletAliasString(tablet.Alias)) ctx, cancel := context.WithTimeout(ctx, waitTimeout) @@ -125,13 +137,13 @@ func findPositionForTablet(ctx context.Context, tablet *topodatapb.Tablet, logge status, err := tmc.ReplicationStatus(ctx, tablet) if err != nil { - sqlErr, isSQLErr := mysql.NewSQLErrorFromError(err).(*mysql.SQLError) - if isSQLErr && sqlErr != nil && sqlErr.Number() == mysql.ERNotReplica { + sqlErr, isSQLErr := sqlerror.NewSQLErrorFromError(err).(*sqlerror.SQLError) + if isSQLErr && sqlErr != nil && sqlErr.Number() == sqlerror.ERNotReplica { logger.Warningf("no replication statue from %v, using empty gtid set", topoproto.TabletAliasString(tablet.Alias)) - return mysql.Position{}, nil + return replication.Position{}, nil } logger.Warningf("failed to get replication status from %v, ignoring tablet: %v", topoproto.TabletAliasString(tablet.Alias), err) - return mysql.Position{}, err + return replication.Position{}, err } // Use the relay log position if available, otherwise use the executed GTID set (binary log position). @@ -139,10 +151,10 @@ func findPositionForTablet(ctx context.Context, tablet *topodatapb.Tablet, logge if status.RelayLogPosition != "" { positionString = status.RelayLogPosition } - pos, err := mysql.DecodePosition(positionString) + pos, err := replication.DecodePosition(positionString) if err != nil { logger.Warningf("cannot decode replica position %v for tablet %v, ignoring tablet: %v", positionString, topoproto.TabletAliasString(tablet.Alias), err) - return mysql.Position{}, err + return replication.Position{}, err } return pos, nil @@ -247,9 +259,9 @@ func ShardReplicationStatuses(ctx context.Context, ts *topo.Server, tmc tmclient } // getValidCandidatesAndPositionsAsList converts the valid candidates from a map to a list of tablets, making it easier to sort -func getValidCandidatesAndPositionsAsList(validCandidates map[string]mysql.Position, tabletMap map[string]*topo.TabletInfo) ([]*topodatapb.Tablet, []mysql.Position, error) { +func getValidCandidatesAndPositionsAsList(validCandidates map[string]replication.Position, tabletMap map[string]*topo.TabletInfo) ([]*topodatapb.Tablet, []replication.Position, error) { var validTablets []*topodatapb.Tablet - var tabletPositions []mysql.Position + var tabletPositions []replication.Position for tabletAlias, position := range validCandidates { tablet, isFound := tabletMap[tabletAlias] if !isFound { @@ -262,8 +274,8 @@ func getValidCandidatesAndPositionsAsList(validCandidates map[string]mysql.Posit } // restrictValidCandidates is used to restrict some candidates from being considered eligible for becoming the intermediate source or the final promotion candidate -func restrictValidCandidates(validCandidates map[string]mysql.Position, tabletMap map[string]*topo.TabletInfo) (map[string]mysql.Position, error) { - restrictedValidCandidates := make(map[string]mysql.Position) +func restrictValidCandidates(validCandidates map[string]replication.Position, tabletMap map[string]*topo.TabletInfo) (map[string]replication.Position, error) { + restrictedValidCandidates := make(map[string]replication.Position) for candidate, position := range validCandidates { candidateInfo, ok := tabletMap[candidate] if !ok { diff --git a/go/vt/vtctl/reparentutil/util_test.go b/go/vt/vtctl/reparentutil/util_test.go index 29f7bb4ab7d..a9e6274d490 100644 --- a/go/vt/vtctl/reparentutil/util_test.go +++ b/go/vt/vtctl/reparentutil/util_test.go @@ -25,6 +25,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/logutil" @@ -559,7 +561,7 @@ func TestFindPositionForTablet(t *testing.T) { return } require.NoError(t, err) - posString := mysql.EncodePosition(pos) + posString := replication.EncodePosition(pos) require.Equal(t, test.expectedPosition, posString) }) } @@ -736,41 +738,41 @@ func TestFindCurrentPrimary(t *testing.T) { } func TestGetValidCandidatesAndPositionsAsList(t *testing.T) { - sid1 := mysql.SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} - mysqlGTID1 := mysql.Mysql56GTID{ + sid1 := replication.SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} + mysqlGTID1 := replication.Mysql56GTID{ Server: sid1, Sequence: 9, } - mysqlGTID2 := mysql.Mysql56GTID{ + mysqlGTID2 := replication.Mysql56GTID{ Server: sid1, Sequence: 10, } - mysqlGTID3 := mysql.Mysql56GTID{ + mysqlGTID3 := replication.Mysql56GTID{ Server: sid1, Sequence: 11, } - positionMostAdvanced := mysql.Position{GTIDSet: mysql.Mysql56GTIDSet{}} + positionMostAdvanced := replication.Position{GTIDSet: replication.Mysql56GTIDSet{}} positionMostAdvanced.GTIDSet = positionMostAdvanced.GTIDSet.AddGTID(mysqlGTID1) positionMostAdvanced.GTIDSet = positionMostAdvanced.GTIDSet.AddGTID(mysqlGTID2) positionMostAdvanced.GTIDSet = positionMostAdvanced.GTIDSet.AddGTID(mysqlGTID3) - positionIntermediate1 := mysql.Position{GTIDSet: mysql.Mysql56GTIDSet{}} + positionIntermediate1 := replication.Position{GTIDSet: replication.Mysql56GTIDSet{}} positionIntermediate1.GTIDSet = positionIntermediate1.GTIDSet.AddGTID(mysqlGTID1) - positionIntermediate2 := mysql.Position{GTIDSet: mysql.Mysql56GTIDSet{}} + positionIntermediate2 := replication.Position{GTIDSet: replication.Mysql56GTIDSet{}} positionIntermediate2.GTIDSet = positionIntermediate2.GTIDSet.AddGTID(mysqlGTID1) positionIntermediate2.GTIDSet = positionIntermediate2.GTIDSet.AddGTID(mysqlGTID2) tests := []struct { name string - validCandidates map[string]mysql.Position + validCandidates map[string]replication.Position tabletMap map[string]*topo.TabletInfo tabletRes []*topodatapb.Tablet }{ { name: "test conversion", - validCandidates: map[string]mysql.Position{ + validCandidates: map[string]replication.Position{ "zone1-0000000100": positionMostAdvanced, "zone1-0000000101": positionIntermediate1, "zone1-0000000102": positionIntermediate2, @@ -968,13 +970,13 @@ func TestWaitForCatchUp(t *testing.T) { func TestRestrictValidCandidates(t *testing.T) { tests := []struct { name string - validCandidates map[string]mysql.Position + validCandidates map[string]replication.Position tabletMap map[string]*topo.TabletInfo - result map[string]mysql.Position + result map[string]replication.Position }{ { name: "remove invalid tablets", - validCandidates: map[string]mysql.Position{ + validCandidates: map[string]replication.Position{ "zone1-0000000100": {}, "zone1-0000000101": {}, "zone1-0000000102": {}, @@ -1038,7 +1040,7 @@ func TestRestrictValidCandidates(t *testing.T) { }, }, }, - result: map[string]mysql.Position{ + result: map[string]replication.Position{ "zone1-0000000100": {}, "zone1-0000000101": {}, "zone1-0000000104": {}, diff --git a/go/vt/vtctl/schematools/marshal.go b/go/vt/vtctl/schematools/marshal.go new file mode 100644 index 00000000000..0ebf3e65346 --- /dev/null +++ b/go/vt/vtctl/schematools/marshal.go @@ -0,0 +1,158 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schematools + +import ( + "vitess.io/vitess/go/protoutil" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/topo/topoproto" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" + "vitess.io/vitess/go/vt/proto/vttime" +) + +type tSchemaMigration struct { + *vtctldatapb.SchemaMigration + // Renamed fields + MigrationUuid string + MysqlSchema string + MysqlTable string + AddedTimestamp *vttime.Time + RequestedTimestamp *vttime.Time + ReadyTimestamp *vttime.Time + StartedTimestamp *vttime.Time + CompletedTimestamp *vttime.Time + CleanupTimestamp *vttime.Time + ArtifactRetentionSeconds int64 + LastThrottledTimestamp *vttime.Time + CancelledTimestamp *vttime.Time + ReviewedTimestamp *vttime.Time + ReadyToCompleteTimestamp *vttime.Time + + // Re-typed fields. These must have distinct names or the first-pass + // marshalling will not produce fields/rows for these. + Status_ string `sqltypes:"$$status"` + Tablet_ string `sqltypes:"$$tablet"` + Strategy_ string `sqltypes:"$$strategy"` +} + +func replaceSchemaMigrationFields(result *sqltypes.Result) *sqltypes.Result { + // NOTE: this depends entirely on (1) the ordering of the fields in the + // embedded protobuf message and (2) that MarshalResult walks fields in the + // order they are defined (via reflect.VisibleFields). + // + // That half is stable, as it is part of the VisibleFields API, but if we + // were to remove or reorder fields in the SchemaMigration proto without + // updating this function, this could break. + return sqltypes.ReplaceFields(result, map[string]string{ + "uuid": "migration_uuid", + "schema": "mysql_schema", + "table": "mysql_table", + "added_at": "added_timestamp", + "requested_at": "requested_timestamp", + "ready_at": "ready_timestamp", + "started_at": "started_timestamp", + "completed_at": "completed_timestamp", + "cleaned_up_at": "cleanup_timestamp", + "artifact_retention": "artifact_retention_seconds", + "last_throttled_at": "last_throttled_timestamp", + "cancelled_at": "cancelled_timestamp", + "reviewed_at": "reviewed_timestamp", + "ready_to_complete_at": "ready_to_complete_timestamp", + "$$status": "status", + "$$tablet": "tablet", + "$$strategy": "strategy", + }) +} + +type MarshallableSchemaMigration vtctldatapb.SchemaMigration + +func (t *MarshallableSchemaMigration) MarshalResult() (*sqltypes.Result, error) { + artifactRetention, _, err := protoutil.DurationFromProto(t.ArtifactRetention) + if err != nil { + return nil, err + } + + tmp := tSchemaMigration{ + SchemaMigration: (*vtctldatapb.SchemaMigration)(t), + MigrationUuid: t.Uuid, + MysqlSchema: t.Schema, + MysqlTable: t.Table, + AddedTimestamp: t.AddedAt, + RequestedTimestamp: t.RequestedAt, + ReadyTimestamp: t.ReadyAt, + StartedTimestamp: t.StartedAt, + CompletedTimestamp: t.CompletedAt, + CleanupTimestamp: t.CleanedUpAt, + ArtifactRetentionSeconds: int64(artifactRetention.Seconds()), + LastThrottledTimestamp: t.LastThrottledAt, + CancelledTimestamp: t.CancelledAt, + ReviewedTimestamp: t.ReviewedAt, + ReadyToCompleteTimestamp: t.ReadyToCompleteAt, + Status_: SchemaMigrationStatusName(t.Status), + Tablet_: topoproto.TabletAliasString(t.Tablet), + Strategy_: SchemaMigrationStrategyName(t.Strategy), + } + + res, err := sqltypes.MarshalResult(&tmp) + if err != nil { + return nil, err + } + + return replaceSchemaMigrationFields(res), nil +} + +type MarshallableSchemaMigrations []*vtctldatapb.SchemaMigration + +func (ts MarshallableSchemaMigrations) MarshalResult() (*sqltypes.Result, error) { + s := make([]*tSchemaMigration, len(ts)) + for i, t := range ts { + artifactRetention, _, err := protoutil.DurationFromProto(t.ArtifactRetention) + if err != nil { + return nil, err + } + + tmp := &tSchemaMigration{ + SchemaMigration: (*vtctldatapb.SchemaMigration)(t), + MigrationUuid: t.Uuid, + MysqlSchema: t.Schema, + MysqlTable: t.Table, + AddedTimestamp: t.AddedAt, + RequestedTimestamp: t.RequestedAt, + ReadyTimestamp: t.ReadyAt, + StartedTimestamp: t.StartedAt, + CompletedTimestamp: t.CompletedAt, + CleanupTimestamp: t.CleanedUpAt, + ArtifactRetentionSeconds: int64(artifactRetention.Seconds()), + LastThrottledTimestamp: t.LastThrottledAt, + CancelledTimestamp: t.CancelledAt, + ReviewedTimestamp: t.ReviewedAt, + ReadyToCompleteTimestamp: t.ReadyToCompleteAt, + Status_: SchemaMigrationStatusName(t.Status), + Tablet_: topoproto.TabletAliasString(t.Tablet), + Strategy_: SchemaMigrationStrategyName(t.Strategy), + } + s[i] = tmp + } + + res, err := sqltypes.MarshalResult(s) + if err != nil { + return nil, err + } + + return replaceSchemaMigrationFields(res), nil +} diff --git a/go/vt/vtctl/schematools/marshal_test.go b/go/vt/vtctl/schematools/marshal_test.go new file mode 100644 index 00000000000..6a574af5974 --- /dev/null +++ b/go/vt/vtctl/schematools/marshal_test.go @@ -0,0 +1,68 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schematools + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/protoutil" + "vitess.io/vitess/go/sqltypes" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +func TestMarshalResult(t *testing.T) { + t.Parallel() + + now := time.Now() + + sm := &vtctldatapb.SchemaMigration{ + Uuid: "abc", + RequestedAt: protoutil.TimeToProto(now), + Tablet: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Status: vtctldatapb.SchemaMigration_RUNNING, + Table: "t1", + } + + r, err := sqltypes.MarshalResult((*MarshallableSchemaMigration)(sm)) + require.NoError(t, err) + row := r.Named().Rows[0] + + assert.Equal(t, "abc", row.AsString("migration_uuid", "")) + assert.Equal(t, now.Format(sqltypes.TimestampFormat), row.AsString("requested_timestamp", "")) + assert.Equal(t, "zone1-0000000101", row.AsString("tablet", "")) + assert.Equal(t, "running", row.AsString("status", "")) + assert.Equal(t, "t1", row.AsString("mysql_table", "")) + + r, err = sqltypes.MarshalResult(MarshallableSchemaMigrations([]*vtctldatapb.SchemaMigration{sm})) + require.NoError(t, err) + row = r.Named().Rows[0] + + assert.Equal(t, "abc", row.AsString("migration_uuid", "")) + assert.Equal(t, now.Format(sqltypes.TimestampFormat), row.AsString("requested_timestamp", "")) + assert.Equal(t, "zone1-0000000101", row.AsString("tablet", "")) + assert.Equal(t, "running", row.AsString("status", "")) + assert.Equal(t, "t1", row.AsString("mysql_table", "")) +} diff --git a/go/vt/vtctl/schematools/reload_test.go b/go/vt/vtctl/schematools/reload_test.go index 6fbc7f152be..4f00e300d13 100644 --- a/go/vt/vtctl/schematools/reload_test.go +++ b/go/vt/vtctl/schematools/reload_test.go @@ -86,7 +86,6 @@ func (tmc *reloadSchemaTMC) ReloadSchema(ctx context.Context, tablet *topodatapb func TestReloadShard(t *testing.T) { t.Parallel() - ctx := context.Background() tests := []struct { name string @@ -330,7 +329,10 @@ func TestReloadShard(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ts := memorytopo.NewServer(tt.cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, tt.cells...) + defer ts.Close() testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ AlsoSetShardPrimary: true, }, tt.tablets...) diff --git a/go/vt/vtctl/schematools/schematools.go b/go/vt/vtctl/schematools/schematools.go index 4b8543a394d..059b7ca3db8 100644 --- a/go/vt/vtctl/schematools/schematools.go +++ b/go/vt/vtctl/schematools/schematools.go @@ -18,6 +18,8 @@ package schematools import ( "context" + "fmt" + "strings" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vterrors" @@ -25,7 +27,8 @@ import ( tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/proto/vtrpc" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) // GetSchema makes an RPC to get the schema from a remote tablet, after @@ -33,7 +36,7 @@ import ( func GetSchema(ctx context.Context, ts *topo.Server, tmc tmclient.TabletManagerClient, alias *topodatapb.TabletAlias, request *tabletmanagerdatapb.GetSchemaRequest) (*tabletmanagerdatapb.SchemaDefinition, error) { ti, err := ts.GetTablet(ctx, alias) if err != nil { - return nil, vterrors.Errorf(vtrpc.Code_NOT_FOUND, "GetTablet(%v) failed: %v", alias, err) + return nil, vterrors.Errorf(vtrpcpb.Code_NOT_FOUND, "GetTablet(%v) failed: %v", alias, err) } sd, err := tmc.GetSchema(ctx, ti.Tablet, request) @@ -43,3 +46,61 @@ func GetSchema(ctx context.Context, ts *topo.Server, tmc tmclient.TabletManagerC return sd, nil } + +// ParseSchemaMigrationStrategy parses the given strategy into the underlying enum type. +func ParseSchemaMigrationStrategy(name string) (vtctldatapb.SchemaMigration_Strategy, error) { + if name == "" { + // backward compatiblity and to handle unspecified values + return vtctldatapb.SchemaMigration_DIRECT, nil + + } + + upperName := strings.ToUpper(name) + switch upperName { + case "GH-OST", "PT-OSC": + // more compatibility since the protobuf message names don't + // have the dash. + upperName = strings.ReplaceAll(upperName, "-", "") + default: + } + + strategy, ok := vtctldatapb.SchemaMigration_Strategy_value[upperName] + if !ok { + return 0, fmt.Errorf("unknown schema migration strategy: '%v'", name) + } + + return vtctldatapb.SchemaMigration_Strategy(strategy), nil + +} + +// ParseSchemaMigrationStatus parses the given status into the underlying enum type. +func ParseSchemaMigrationStatus(name string) (vtctldatapb.SchemaMigration_Status, error) { + key := strings.ToUpper(name) + + val, ok := vtctldatapb.SchemaMigration_Status_value[key] + if !ok { + return 0, fmt.Errorf("unknown enum name for SchemaMigration_Status: %s", name) + } + + return vtctldatapb.SchemaMigration_Status(val), nil +} + +// SchemaMigrationStrategyName returns the text-based form of the strategy. +func SchemaMigrationStrategyName(strategy vtctldatapb.SchemaMigration_Strategy) string { + name, ok := vtctldatapb.SchemaMigration_Strategy_name[int32(strategy)] + if !ok { + return "unknown" + } + + switch strategy { + case vtctldatapb.SchemaMigration_GHOST, vtctldatapb.SchemaMigration_PTOSC: + name = strings.Join([]string{name[:2], name[2:]}, "-") + } + + return strings.ToLower(name) +} + +// SchemaMigrationStatusName returns the text-based form of the status. +func SchemaMigrationStatusName(status vtctldatapb.SchemaMigration_Status) string { + return strings.ToLower(vtctldatapb.SchemaMigration_Status_name[int32(status)]) +} diff --git a/go/vt/vtctl/schematools/schematools_test.go b/go/vt/vtctl/schematools/schematools_test.go new file mode 100644 index 00000000000..94909ab52b1 --- /dev/null +++ b/go/vt/vtctl/schematools/schematools_test.go @@ -0,0 +1,69 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schematools + +import ( + "testing" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" + + "github.com/stretchr/testify/assert" +) + +func TestSchemaMigrationStrategyName(t *testing.T) { + t.Parallel() + + tests := []struct { + in vtctldatapb.SchemaMigration_Strategy + out string + }{ + { + in: vtctldatapb.SchemaMigration_ONLINE, + out: "vitess", + }, + { + in: vtctldatapb.SchemaMigration_VITESS, + out: "vitess", + }, + { + in: vtctldatapb.SchemaMigration_GHOST, + out: "gh-ost", + }, + { + in: vtctldatapb.SchemaMigration_PTOSC, + out: "pt-osc", + }, + { + in: vtctldatapb.SchemaMigration_DIRECT, + out: "direct", + }, + { + in: vtctldatapb.SchemaMigration_Strategy(-1), + out: "unknown", + }, + } + + for _, test := range tests { + test := test + t.Run(test.out, func(t *testing.T) { + t.Parallel() + + out := SchemaMigrationStrategyName(test.in) + assert.Equal(t, test.out, out) + }) + } +} diff --git a/go/vt/vtctl/testdata/unknown-params-logged-dry-run-vschema.json b/go/vt/vtctl/testdata/unknown-params-logged-dry-run-vschema.json new file mode 100644 index 00000000000..aefcfb13ae7 --- /dev/null +++ b/go/vt/vtctl/testdata/unknown-params-logged-dry-run-vschema.json @@ -0,0 +1,18 @@ +{ + "sharded": true, + "vindexes": { + "hash_vdx" : { + "type": "hash", + "params": { + "foo": "bar", + "hello": "world" + } + }, + "binary_vdx": { + "type": "binary", + "params": { + "hello": "world" + } + } + } +} diff --git a/go/vt/vtctl/testdata/unknown-params-logged-vschema.json b/go/vt/vtctl/testdata/unknown-params-logged-vschema.json new file mode 100644 index 00000000000..d3abc1c0e03 --- /dev/null +++ b/go/vt/vtctl/testdata/unknown-params-logged-vschema.json @@ -0,0 +1,18 @@ +{ + "sharded": true, + "vindexes": { + "binary_vdx": { + "type": "binary", + "params": { + "hello": "world" + } + }, + "hash_vdx": { + "type": "hash", + "params": { + "foo": "bar", + "hello": "world" + } + } + } +} diff --git a/go/vt/vtctl/vdiff2.go b/go/vt/vtctl/vdiff2.go index 9dab6c30517..7cd1c7e00ca 100644 --- a/go/vt/vtctl/vdiff2.go +++ b/go/vt/vtctl/vdiff2.go @@ -165,9 +165,6 @@ func commandVDiff2(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.F return fmt.Errorf("invalid action '%s'; %s", action, usage) } - type ErrorResponse struct { - Error string - } output, err := wr.VDiff2(ctx, keyspace, workflowName, action, actionArg, vdiffUUID.String(), options) if err != nil { log.Errorf("vdiff2 returning with error: %v", err) diff --git a/go/vt/vtctl/vdiff2_test.go b/go/vt/vtctl/vdiff2_test.go index 368f21eb93b..1348cd06448 100644 --- a/go/vt/vtctl/vdiff2_test.go +++ b/go/vt/vtctl/vdiff2_test.go @@ -35,7 +35,9 @@ var ( ) func TestVDiff2Unsharded(t *testing.T) { - env := newTestVDiffEnv(t, []string{"0"}, []string{"0"}, "", nil) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestVDiffEnv(t, ctx, []string{"0"}, []string{"0"}, "", nil) defer env.close() UUID := uuid.New().String() @@ -275,7 +277,9 @@ func TestVDiff2Unsharded(t *testing.T) { } func TestVDiff2Sharded(t *testing.T) { - env := newTestVDiffEnv(t, []string{"-40", "40-"}, []string{"-80", "80-"}, "", map[string]string{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestVDiffEnv(t, ctx, []string{"-40", "40-"}, []string{"-80", "80-"}, "", map[string]string{ "-80": "MySQL56/0e45e704-7cb9-11ed-a1eb-0242ac120002:1-890", "80-": "MySQL56/1497ddb0-7cb9-11ed-a1eb-0242ac120002:1-891", }) @@ -473,7 +477,7 @@ func TestBuildProgressReport(t *testing.T) { t.Run(tt.name, func(t *testing.T) { buildProgressReport(tt.args.summary, tt.args.rowsToCompare) // We always check the percentage - require.Equal(t, tt.want.Percentage, tt.args.summary.Progress.Percentage) + require.Equal(t, int(tt.want.Percentage), int(tt.args.summary.Progress.Percentage)) // We only check the ETA if there is one if tt.want.ETA != "" { diff --git a/go/vt/vtctl/vdiff_env_test.go b/go/vt/vtctl/vdiff_env_test.go index 2d662d8bdb1..b8791c1d99f 100644 --- a/go/vt/vtctl/vdiff_env_test.go +++ b/go/vt/vtctl/vdiff_env_test.go @@ -69,11 +69,11 @@ type testVDiffEnv struct { //---------------------------------------------- // testVDiffEnv -func newTestVDiffEnv(t testing.TB, sourceShards, targetShards []string, query string, positions map[string]string) *testVDiffEnv { +func newTestVDiffEnv(t testing.TB, ctx context.Context, sourceShards, targetShards []string, query string, positions map[string]string) *testVDiffEnv { env := &testVDiffEnv{ workflow: "vdiffTest", tablets: make(map[int]*testVDiffTablet), - topoServ: memorytopo.NewServer("cell"), + topoServ: memorytopo.NewServer(ctx, "cell"), cell: "cell", tabletType: topodatapb.TabletType_REPLICA, tmc: newTestVDiffTMClient(), diff --git a/go/vt/vtctl/vtctl.go b/go/vt/vtctl/vtctl.go index c7da949e4dd..11d3cf85e68 100644 --- a/go/vt/vtctl/vtctl.go +++ b/go/vt/vtctl/vtctl.go @@ -97,6 +97,7 @@ import ( "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/cmd/vtctldclient/cli" @@ -105,6 +106,7 @@ import ( "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/textutil" + "vitess.io/vitess/go/vt/discovery" hk "vitess.io/vitess/go/vt/hook" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/log" @@ -118,13 +120,15 @@ import ( vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/proto/vttime" "vitess.io/vitess/go/vt/schema" - "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver" "vitess.io/vitess/go/vt/vtctl/workflow" "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/vindexes" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle" "vitess.io/vitess/go/vt/wrangler" ) @@ -440,7 +444,7 @@ var commands = []commandGroup{ { name: "MoveTables", method: commandMoveTables, - params: "[--source=] [--tables=] [--cells=] [--tablet_types=] [--all] [--exclude=] [--auto_start] [--stop_after_copy] [--defer-secondary-keys] [--on-ddl=] [--source_shards=] 'action must be one of the following: Create, Complete, Cancel, SwitchTraffic, ReverseTrafffic, Show, or Progress' ", + params: "[--source=] [--tables=] [--cells=] [--tablet_types=] [--all] [--exclude=] [--auto_start] [--stop_after_copy] [--defer-secondary-keys] [--on-ddl=] [--source_shards=] [--source_time_zone=] [--initialize-target-sequences] [--no-routing-rules] 'action must be one of the following: Create, Complete, Cancel, SwitchTraffic, ReverseTrafffic, Show, or Progress' ", help: `Move table(s) to another keyspace, table_specs is a list of tables or the tables section of the vschema for the target keyspace. Example: '{"t1":{"column_vindexes": [{"column": "id1", "name": "hash"}]}, "t2":{"column_vindexes": [{"column": "id2", "name": "hash"}]}}'. In the case of an unsharded target keyspace the vschema for each table may be empty. Example: '{"t1":{}, "t2":{}}'.`, }, { @@ -587,8 +591,8 @@ var commands = []commandGroup{ { name: "ApplySchema", method: commandApplySchema, - params: "[--allow_long_unavailability] [--wait_replicas_timeout=10s] [--ddl_strategy=] [--uuid_list=] [--migration_context=] {--sql= || --sql-file=} ", - help: "Applies the schema change to the specified keyspace on every primary, running in parallel on all shards. The changes are then propagated to replicas via replication. If --allow_long_unavailability is set, schema changes affecting a large number of rows (and possibly incurring a longer period of unavailability) will not be rejected. -ddl_strategy is used to instruct migrations via vreplication, gh-ost or pt-osc with optional parameters. -migration_context allows the user to specify a custom request context for online DDL migrations.", + params: "[--wait_replicas_timeout=10s] [--ddl_strategy=] [--uuid_list=] [--migration_context=] {--sql= || --sql-file=} [--batch-size=] ", + help: "Applies the schema change to the specified keyspace on every primary, running in parallel on all shards. The changes are then propagated to replicas via replication. -ddl_strategy is used to instruct migrations via vreplication, gh-ost or pt-osc with optional parameters. -migration_context allows the user to specify a custom request context for online DDL migrations.", }, { name: "CopySchemaShard", @@ -609,8 +613,19 @@ var commands = []commandGroup{ " \nvtctl OnlineDDL test_keyspace show running" + " \nvtctl OnlineDDL test_keyspace show complete" + " \nvtctl OnlineDDL test_keyspace show failed" + + " \nvtctl OnlineDDL test_keyspace cleanup 82fa54ac_e83e_11ea_96b7_f875a4d24e90" + " \nvtctl OnlineDDL test_keyspace retry 82fa54ac_e83e_11ea_96b7_f875a4d24e90" + - " \nvtctl OnlineDDL test_keyspace cancel 82fa54ac_e83e_11ea_96b7_f875a4d24e90", + " \nvtctl OnlineDDL test_keyspace cancel 82fa54ac_e83e_11ea_96b7_f875a4d24e90" + + " \nvtctl OnlineDDL test_keyspace cancel-all" + + " \nvtctl OnlineDDL test_keyspace launch 82fa54ac_e83e_11ea_96b7_f875a4d24e90" + + " \nvtctl OnlineDDL test_keyspace launch-all" + + " \nvtctl OnlineDDL test_keyspace complete 82fa54ac_e83e_11ea_96b7_f875a4d24e90" + + " \nvtctl OnlineDDL test_keyspace complete-all" + + " \nvtctl OnlineDDL test_keyspace throttle 82fa54ac_e83e_11ea_96b7_f875a4d24e90" + + " \nvtctl OnlineDDL test_keyspace throttle-all" + + " \nvtctl OnlineDDL test_keyspace unthrottle 82fa54ac_e83e_11ea_96b7_f875a4d24e90" + + " \nvtctl OnlineDDL test_keyspace unthrottle-all" + + "", }, { name: "ValidateVersionShard", @@ -691,7 +706,7 @@ var commands = []commandGroup{ { name: "UpdateThrottlerConfig", method: commandUpdateThrottlerConfig, - params: "[--enable|--disable] [--threshold=] [--custom-query=] [--check-as-check-self|--check-as-check-shard] ", + params: "[--enable|--disable] [--threshold=] [--custom-query=] [--check-as-check-self|--check-as-check-shard] [--throttle-app|unthrottle-app=] [--throttle-app-ratio=] [--throttle-app-duration=] [--throttle-app-exempt] ", help: "Update the table throttler configuration for all cells and tablets of a given keyspace", }, { @@ -785,7 +800,7 @@ func fmtTabletAwkable(ti *topo.TabletInfo) string { mtst := "" // special case for old primary that hasn't updated topo yet if ti.PrimaryTermStartTime != nil && ti.PrimaryTermStartTime.Seconds > 0 { - mtst = logutil.ProtoToTime(ti.PrimaryTermStartTime).Format(time.RFC3339) + mtst = protoutil.TimeFromProto(ti.PrimaryTermStartTime).UTC().Format(time.RFC3339) } return fmt.Sprintf("%v %v %v %v %v %v %v %v", topoproto.TabletAliasString(ti.Alias), keyspace, shard, topoproto.TabletTypeLString(ti.Type), ti.Addr(), ti.MysqlAddr(), fmtMapAwkable(ti.Tags), mtst) } @@ -1805,7 +1820,7 @@ func commandCreateKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags baseKeyspace := subFlags.String("base_keyspace", "", "Specifies the base keyspace for a snapshot keyspace") timestampStr := subFlags.String("snapshot_time", "", "Specifies the snapshot time for this keyspace") durabilityPolicy := subFlags.String("durability-policy", "none", "Type of durability to enforce for this keyspace. Default is none. Possible values include 'semi_sync' and others as dictated by registered plugins.") - sidecarDBName := subFlags.String("sidecar-db-name", sidecardb.DefaultName, "(Experimental) Name of the Vitess sidecar database that tablets in this keyspace will use for internal metadata.") + sidecarDBName := subFlags.String("sidecar-db-name", sidecar.DefaultName, "(Experimental) Name of the Vitess sidecar database that tablets in this keyspace will use for internal metadata.") if err := subFlags.Parse(args); err != nil { return err } @@ -1846,7 +1861,7 @@ func commandCreateKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags if timeTime.After(time.Now()) { return vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "snapshot_time can not be more than current time") } - snapshotTime = logutil.TimeToProto(timeTime) + snapshotTime = protoutil.TimeToProto(timeTime) } ki := &topodatapb.Keyspace{ KeyspaceType: ktype, @@ -2029,11 +2044,11 @@ func commandValidateKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlag } func commandReshard(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error { - return commandVRWorkflow(ctx, wr, subFlags, args, wrangler.ReshardWorkflow) + return commandVReplicationWorkflow(ctx, wr, subFlags, args, wrangler.ReshardWorkflow) } func commandMoveTables(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error { - return commandVRWorkflow(ctx, wr, subFlags, args, wrangler.MoveTablesWorkflow) + return commandVReplicationWorkflow(ctx, wr, subFlags, args, wrangler.MoveTablesWorkflow) } // VReplicationWorkflowAction defines subcommands passed to vtctl for movetables or reshard @@ -2051,7 +2066,7 @@ const ( ) func commandMigrate(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error { - return commandVRWorkflow(ctx, wr, subFlags, args, wrangler.MigrateWorkflow) + return commandVReplicationWorkflow(ctx, wr, subFlags, args, wrangler.MigrateWorkflow) } // getSourceKeyspace expects a keyspace of the form "externalClusterName.keyspaceName" and returns the components @@ -2063,9 +2078,9 @@ func getSourceKeyspace(clusterKeyspace string) (clusterName string, sourceKeyspa return splits[0], splits[1], nil } -// commandVRWorkflow is the common entry point for MoveTables/Reshard/Migrate workflows +// commandVReplicationWorkflow is the common entry point for MoveTables/Reshard/Migrate workflows // FIXME: this function needs a refactor. Also validations for params should to be done per workflow type -func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string, +func commandVReplicationWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string, workflowType wrangler.VReplicationWorkflowType) error { const defaultWaitTime = time.Duration(30 * time.Second) @@ -2074,7 +2089,7 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfl const defaultMaxReplicationLagAllowed = defaultWaitTime cells := subFlags.String("cells", "", "Cell(s) or CellAlias(es) (comma-separated) to replicate from.") - tabletTypes := subFlags.String("tablet_types", "in_order:REPLICA,PRIMARY", "Source tablet types to replicate from (e.g. PRIMARY, REPLICA, RDONLY). Defaults to --vreplication_tablet_type parameter value for the tablet, which has the default value of in_order:REPLICA,PRIMARY. Note: SwitchTraffic overrides this default and uses in_order:RDONLY,REPLICA,PRIMARY to switch all traffic by default.") + tabletTypesStr := subFlags.String("tablet_types", "in_order:REPLICA,PRIMARY", "Source tablet types to replicate from (e.g. PRIMARY, REPLICA, RDONLY). Defaults to --vreplication_tablet_type parameter value for the tablet, which has the default value of in_order:REPLICA,PRIMARY. Note: SwitchTraffic overrides this default and uses in_order:RDONLY,REPLICA,PRIMARY to switch all traffic by default.") dryRun := subFlags.Bool("dry_run", false, "Does a dry run of SwitchTraffic and only reports the actions to be taken. --dry_run is only supported for SwitchTraffic, ReverseTraffic and Complete.") timeout := subFlags.Duration("timeout", defaultWaitTime, "Specifies the maximum time to wait, in seconds, for vreplication to catch up on primary migrations. The migration will be cancelled on a timeout. --timeout is only supported for SwitchTraffic and ReverseTraffic.") reverseReplication := subFlags.Bool("reverse_replication", true, "Also reverse the replication (default true). --reverse_replication is only supported for SwitchTraffic.") @@ -2084,6 +2099,7 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfl stopAfterCopy := subFlags.Bool("stop_after_copy", false, "Streams will be stopped once the copy phase is completed") dropForeignKeys := subFlags.Bool("drop_foreign_keys", false, "If true, tables in the target keyspace will be created without foreign keys.") maxReplicationLagAllowed := subFlags.Duration("max_replication_lag_allowed", defaultMaxReplicationLagAllowed, "Allow traffic to be switched only if vreplication lag is below this (in seconds)") + atomicCopy := subFlags.Bool("atomic-copy", false, "(EXPERIMENTAL) Use this if your source keyspace has tables which use foreign key constraints. All tables from the source will be moved.") onDDL := "IGNORE" subFlags.StringVar(&onDDL, "on-ddl", onDDL, "What to do when DDL is encountered in the VReplication stream. Possible values are IGNORE, STOP, EXEC, and EXEC_IGNORE.") @@ -2093,6 +2109,7 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfl allTables := subFlags.Bool("all", false, "MoveTables only. Move all tables from the source keyspace. Either table_specs or --all needs to be specified.") excludes := subFlags.String("exclude", "", "MoveTables only. Tables to exclude (comma-separated) if --all is specified") sourceKeyspace := subFlags.String("source", "", "MoveTables only. Source keyspace") + initializeTargetSequences := subFlags.Bool("initialize-target-sequences", false, "MoveTables only. When moving tables from an unsharded keyspace to a sharded keyspace, initialize any sequences that are being used on the target when switching writes.") // if sourceTimeZone is specified, the target needs to have time zones loaded // note we make an opinionated decision to not allow specifying a different target time zone than UTC. @@ -2100,6 +2117,7 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfl // MoveTables-only params renameTables := subFlags.Bool("rename_tables", false, "MoveTables only. Rename tables instead of dropping them. --rename_tables is only supported for Complete.") + noRoutingRules := subFlags.Bool("no-routing-rules", false, "(Advanced) MoveTables Create only. Do not create routing rules while creating the workflow. See the reference documentation for limitations if you use this flag.") // MoveTables and Reshard params sourceShards := subFlags.String("source_shards", "", "Source shards") @@ -2142,6 +2160,7 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfl DryRun: *dryRun, AutoStart: *autoStart, StopAfterCopy: *stopAfterCopy, + AtomicCopy: *atomicCopy, } printDetails := func() error { @@ -2162,7 +2181,7 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfl statuses := res.ShardStatuses[ksShard].PrimaryReplicationStatuses for _, st := range statuses { msg := "" - if st.State == "Error" { + if st.State == binlogdatapb.VReplicationWorkflowState_Error.String() { msg += fmt.Sprintf(": %s.", st.Message) } else if st.Pos == "" { msg += ". VStream has not started." @@ -2233,6 +2252,24 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfl wr.Logger().Errorf("keyspace %s not found", *sourceKeyspace) return err } + + if *atomicCopy { + var errors []string + if !*allTables { + errors = append(errors, "atomic copy requires --all.") + } + if *tables != "" { + errors = append(errors, "atomic copy does not support specifying tables.") + } + if *excludes != "" { + errors = append(errors, "atomic copy does not support specifying excludes.") + } + if len(errors) > 0 { + errors = append(errors, "Found options incompatible with atomic copy:") + return fmt.Errorf(strings.Join(errors, " ")) + } + } + if !*allTables && *tables == "" { return fmt.Errorf("no tables specified to move") } @@ -2244,6 +2281,7 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfl vrwp.ExternalCluster = externalClusterName vrwp.SourceTimeZone = *sourceTimeZone vrwp.DropForeignKeys = *dropForeignKeys + vrwp.NoRoutingRules = *noRoutingRules if *sourceShards != "" { vrwp.SourceShards = strings.Split(*sourceShards, ",") } @@ -2261,11 +2299,11 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfl vrwp.OnDDL = onDDL vrwp.DeferSecondaryKeys = *deferNonPKeys vrwp.Cells = *cells - vrwp.TabletTypes = *tabletTypes + vrwp.TabletTypes = *tabletTypesStr case vReplicationWorkflowActionSwitchTraffic, vReplicationWorkflowActionReverseTraffic: vrwp.Cells = *cells if subFlags.Changed("tablet_types") { - vrwp.TabletTypes = *tabletTypes + vrwp.TabletTypes = *tabletTypesStr } else { // When no tablet types are specified we are supposed to switch all traffic so // we override the normal default for tablet_types. @@ -2274,6 +2312,7 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfl vrwp.Timeout = *timeout vrwp.EnableReverseReplication = *reverseReplication vrwp.MaxAllowedTransactionLagSeconds = int64(math.Ceil(maxReplicationLagAllowed.Seconds())) + vrwp.InitializeTargetSequences = *initializeTargetSequences case vReplicationWorkflowActionCancel: vrwp.KeepData = *keepData vrwp.KeepRoutingRules = *keepRoutingRules @@ -2476,7 +2515,7 @@ func commandExternalizeVindex(ctx context.Context, wr *wrangler.Wrangler, subFla func commandMaterialize(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error { cells := subFlags.String("cells", "", "Source cells to replicate from.") - tabletTypes := subFlags.String("tablet_types", "", "Source tablet types to replicate from.") + tabletTypesStr := subFlags.String("tablet_types", "", "Source tablet types to replicate from.") if err := subFlags.Parse(args); err != nil { return err } @@ -2488,7 +2527,16 @@ func commandMaterialize(ctx context.Context, wr *wrangler.Wrangler, subFlags *pf return err } ms.Cell = *cells - ms.TabletTypes = *tabletTypes + tabletTypes, inorder, err := discovery.ParseTabletTypesAndOrder(*tabletTypesStr) + if err != nil { + return err + } + tsp := tabletmanagerdatapb.TabletSelectionPreference_ANY + if inorder { + tsp = tabletmanagerdatapb.TabletSelectionPreference_INORDER + } + ms.TabletTypes = topoproto.MakeStringTypeCSV(tabletTypes) + ms.TabletSelectionPreference = tsp return wr.Materialize(ctx, ms) } @@ -2509,7 +2557,7 @@ func commandVDiff(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.Fl sourceCell := subFlags.String("source_cell", "", "The source cell to compare from; default is any available cell") targetCell := subFlags.String("target_cell", "", "The target cell to compare with; default is any available cell") - tabletTypes := subFlags.String("tablet_types", "in_order:RDONLY,REPLICA,PRIMARY", "Tablet types for source and target") + tabletTypesStr := subFlags.String("tablet_types", "in_order:RDONLY,REPLICA,PRIMARY", "Tablet types for source and target") filteredReplicationWaitTime := subFlags.Duration("filtered_replication_wait_time", 30*time.Second, "Specifies the maximum time to wait, in seconds, for filtered replication to catch up on primary migrations. The migration will be cancelled on a timeout.") maxRows := subFlags.Int64("limit", math.MaxInt64, "Max rows to stop comparing after") debugQuery := subFlags.Bool("debug_query", false, "Adds a mysql query to the report that can be used for further debugging") @@ -2539,7 +2587,7 @@ func commandVDiff(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.Fl } }() - _, err = wr.VDiff(ctx, keyspace, workflow, *sourceCell, *targetCell, *tabletTypes, *filteredReplicationWaitTime, *format, + _, err = wr.VDiff(ctx, keyspace, workflow, *sourceCell, *targetCell, *tabletTypesStr, *filteredReplicationWaitTime, *format, *maxRows, *tables, *debugQuery, *onlyPks, *maxExtraRowsToCompare) if err != nil { log.Errorf("vdiff returning with error: %v", err) @@ -2852,15 +2900,15 @@ func commandValidateSchemaKeyspace(ctx context.Context, wr *wrangler.Wrangler, s } func commandApplySchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error { - allowLongUnavailability := subFlags.Bool("allow_long_unavailability", false, "Allow large schema changes which incur a longer unavailability of the database.") + subFlags.MarkDeprecated("allow_long_unavailability", "") sql := subFlags.String("sql", "", "A list of semicolon-delimited SQL commands") sqlFile := subFlags.String("sql-file", "", "Identifies the file that contains the SQL commands") ddlStrategy := subFlags.String("ddl_strategy", string(schema.DDLStrategyDirect), "Online DDL strategy, compatible with @@ddl_strategy session variable (examples: 'gh-ost', 'pt-osc', 'gh-ost --max-load=Threads_running=100'") uuidList := subFlags.String("uuid_list", "", "Optional: comma delimited explicit UUIDs for migration. If given, must match number of DDL changes") migrationContext := subFlags.String("migration_context", "", "For Online DDL, optionally supply a custom unique string used as context for the migration(s) in this command. By default a unique context is auto-generated by Vitess") requestContext := subFlags.String("request_context", "", "synonym for --migration_context") - waitReplicasTimeout := subFlags.Duration("wait_replicas_timeout", wrangler.DefaultWaitReplicasTimeout, "The amount of time to wait for replicas to receive the schema change via replication.") - skipPreflight := subFlags.Bool("skip_preflight", false, "Deprecated. Always assumed to be 'true'") + waitReplicasTimeout := subFlags.Duration("wait_replicas_timeout", grpcvtctldserver.DefaultWaitReplicasTimeout, "The amount of time to wait for replicas to receive the schema change via replication.") + batchSize := subFlags.Int64("batch_size", 0, "How many queries to batch together") callerID := subFlags.String("caller_id", "", "This is the effective caller ID used for the operation and should map to an ACL name which grants this identity the necessary permissions to perform the operation (this is only necessary when strict table ACLs are used)") if err := subFlags.Parse(args); err != nil { @@ -2869,9 +2917,6 @@ func commandApplySchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *pf if subFlags.NArg() != 1 { return fmt.Errorf("the argument is required for the commandApplySchema command") } - if *skipPreflight { - log.Warningf("--skip_preflight flag is deprecated. Always assumed to be 'true'") - } keyspace := subFlags.Arg(0) change, err := getFileParam(*sql, *sqlFile, "sql") @@ -2899,15 +2944,14 @@ func commandApplySchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *pf log.Info("Calling ApplySchema on VtctldServer") resp, err := wr.VtctldServer().ApplySchema(ctx, &vtctldatapb.ApplySchemaRequest{ - Keyspace: keyspace, - AllowLongUnavailability: *allowLongUnavailability, - DdlStrategy: *ddlStrategy, - Sql: parts, - SkipPreflight: true, - UuidList: textutil.SplitDelimitedList(*uuidList), - MigrationContext: *migrationContext, - WaitReplicasTimeout: protoutil.DurationToProto(*waitReplicasTimeout), - CallerId: cID, + Keyspace: keyspace, + DdlStrategy: *ddlStrategy, + Sql: parts, + UuidList: textutil.SplitDelimitedList(*uuidList), + MigrationContext: *migrationContext, + WaitReplicasTimeout: protoutil.DurationToProto(*waitReplicasTimeout), + CallerId: cID, + BatchSize: *batchSize, }) if err != nil { @@ -2922,6 +2966,34 @@ func commandApplySchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *pf return nil } +func generateOnlineDDLQuery(command string, arg string, allSupported bool) (string, error) { + // Accept inputs like so: + // "launch", "all" + // "launch", + // "launch-all", + if tokens := strings.Split(command, "-"); len(tokens) == 2 && tokens[1] == "all" { + // command is e.g. "launch-all" + if arg != "" { + return "", fmt.Errorf("UUID not allowed in '%s' command", command) + } + // transform "launch-all" into "launch", "all" + command = tokens[0] + arg = "all" + } + switch arg { + case "": + return "", fmt.Errorf("UUID|all required") + case "all": + if !allSupported { + return "", fmt.Errorf("'all' not supported for '%s' command", command) + } + return fmt.Sprintf(`alter vitess_migration %s all`, command), nil + default: + query := `alter vitess_migration %a ` + command + return sqlparser.ParseAndBind(query, sqltypes.StringBindVariable(arg)) + } +} + func commandOnlineDDL(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error { json := subFlags.Bool("json", false, "Output JSON instead of human-readable table") orderBy := subFlags.String("order", "ascending", "Sort the results by `id` property of the Schema migration (default is ascending. Allowed values are `ascending` or `descending`.") @@ -2945,7 +3017,7 @@ func commandOnlineDDL(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfla applySchemaQuery := "" executeFetchQuery := "" - var bindErr error + var err error switch command { case "show": condition := "" @@ -2961,12 +3033,12 @@ func commandOnlineDDL(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfla string(schema.OnlineDDLStatusRunning), string(schema.OnlineDDLStatusComplete), string(schema.OnlineDDLStatusFailed): - condition, bindErr = sqlparser.ParseAndBind("migration_status=%a", sqltypes.StringBindVariable(arg)) + condition, err = sqlparser.ParseAndBind("migration_status=%a", sqltypes.StringBindVariable(arg)) default: if schema.IsOnlineDDLUUID(arg) { - condition, bindErr = sqlparser.ParseAndBind("migration_uuid=%a", sqltypes.StringBindVariable(arg)) + condition, err = sqlparser.ParseAndBind("migration_uuid=%a", sqltypes.StringBindVariable(arg)) } else { - condition, bindErr = sqlparser.ParseAndBind("migration_context=%a", sqltypes.StringBindVariable(arg)) + condition, err = sqlparser.ParseAndBind("migration_context=%a", sqltypes.StringBindVariable(arg)) } } order := " order by `id` " @@ -2985,31 +3057,29 @@ func commandOnlineDDL(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfla executeFetchQuery = fmt.Sprintf(`select * from _vt.schema_migrations where %s %s %s`, condition, order, skipLimit) - case "retry": - if arg == "" { - return fmt.Errorf("UUID required") - } - applySchemaQuery, bindErr = sqlparser.ParseAndBind(`alter vitess_migration %a retry`, sqltypes.StringBindVariable(arg)) - case "complete": - if arg == "" { - return fmt.Errorf("UUID required") - } - applySchemaQuery, bindErr = sqlparser.ParseAndBind(`alter vitess_migration %a complete`, sqltypes.StringBindVariable(arg)) - case "cancel": - if arg == "" { - return fmt.Errorf("UUID required") - } - applySchemaQuery, bindErr = sqlparser.ParseAndBind(`alter vitess_migration %a cancel`, sqltypes.StringBindVariable(arg)) - case "cancel-all": - if arg != "" { - return fmt.Errorf("UUID not allowed in %s", command) - } - applySchemaQuery = `alter vitess_migration cancel all` + case + "retry", + "cleanup": + // Do not support 'ALL' argument + applySchemaQuery, err = generateOnlineDDLQuery(command, arg, false) + case + "launch", + "launch-all", + "complete", + "complete-all", + "cancel", + "cancel-all", + "throttle", + "throttle-all", + "unthrottle", + "unthrottle-all": + // Support 'ALL' argument + applySchemaQuery, err = generateOnlineDDLQuery(command, arg, true) default: return fmt.Errorf("Unknown OnlineDDL command: %s", command) } - if bindErr != nil { - return fmt.Errorf("Error generating OnlineDDL query: %+v", bindErr) + if err != nil { + return fmt.Errorf("Error generating OnlineDDL query: %+v", err) } if applySchemaQuery != "" { @@ -3018,8 +3088,7 @@ func commandOnlineDDL(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfla resp, err := wr.VtctldServer().ApplySchema(ctx, &vtctldatapb.ApplySchemaRequest{ Keyspace: keyspace, Sql: []string{applySchemaQuery}, - SkipPreflight: true, - WaitReplicasTimeout: protoutil.DurationToProto(wrangler.DefaultWaitReplicasTimeout), + WaitReplicasTimeout: protoutil.DurationToProto(grpcvtctldserver.DefaultWaitReplicasTimeout), }) if err != nil { return err @@ -3064,7 +3133,7 @@ func commandCopySchemaShard(ctx context.Context, wr *wrangler.Wrangler, subFlags includeViews := subFlags.Bool("include-views", true, "Includes views in the output") skipVerify := subFlags.Bool("skip-verify", false, "Skip verification of source and target schema after copy") // for backwards compatibility - waitReplicasTimeout := subFlags.Duration("wait_replicas_timeout", wrangler.DefaultWaitReplicasTimeout, "The amount of time to wait for replicas to receive the schema change via replication.") + waitReplicasTimeout := subFlags.Duration("wait_replicas_timeout", grpcvtctldserver.DefaultWaitReplicasTimeout, "The amount of time to wait for replicas to receive the schema change via replication.") if err := subFlags.Parse(args); err != nil { return err } @@ -3337,6 +3406,27 @@ func commandApplyVSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *p wr.Logger().Printf("New VSchema object:\n%s\nIf this is not what you expected, check the input data (as JSON parsing will skip unexpected fields).\n", b) } + // Validate the VSchema. + ksVs, err := vindexes.BuildKeyspace(vs) + if err != nil { + return err + } + + // Log unknown Vindex params as warnings. + var vdxNames []string + for name := range ksVs.Vindexes { + vdxNames = append(vdxNames, name) + } + sort.Strings(vdxNames) + for _, name := range vdxNames { + vdx := ksVs.Vindexes[name] + if val, ok := vdx.(vindexes.ParamValidating); ok { + for _, param := range val.UnknownParams() { + wr.Logger().Warningf("Unknown param in vindex %s: %s", name, param) + } + } + } + if *dryRun { wr.Logger().Printf("Dry run: Skipping update of VSchema\n") return nil @@ -3349,6 +3439,10 @@ func commandApplyVSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *p return err } + if _, err := vindexes.BuildKeyspace(vs); err != nil { + return err + } + if err := wr.TopoServer().SaveVSchema(ctx, keyspace, vs); err != nil { return err } @@ -3498,7 +3592,11 @@ func commandUpdateThrottlerConfig(ctx context.Context, wr *wrangler.Wrangler, su customQuery := subFlags.String("custom-query", "", "custom throttler check query") checkAsCheckSelf := subFlags.Bool("check-as-check-self", false, "/throttler/check requests behave as is /throttler/check-self was called") checkAsCheckShard := subFlags.Bool("check-as-check-shard", false, "use standard behavior for /throttler/check requests") - + unthrottledApp := subFlags.String("unthrottle-app", "", "an app name to unthrottle") + throttledApp := subFlags.String("throttle-app", "", "an app name to throttle") + throttledAppRatio := subFlags.Float64("throttle-app-ratio", throttle.DefaultThrottleRatio, "ratio to throttle app (app specififed in --throttled-app)") + throttledAppDuration := subFlags.Duration("throttle-app-duration", throttle.DefaultAppThrottleDuration, "duration after which throttled app rule expires (app specified in --throttled-app)") + throttledAppExempt := subFlags.Bool("throttle-app-exempt", false, "exempt this app from being at all throttled. WARNING: use with extreme care, as this is likely to push metrics beyond the throttler's threshold, and starve other apps (app specified in --throttled-app)") if err := subFlags.Parse(args); err != nil { return err } @@ -3513,9 +3611,22 @@ func commandUpdateThrottlerConfig(ctx context.Context, wr *wrangler.Wrangler, su return fmt.Errorf("--check-as-check-self and --check-as-check-shard are mutually exclusive") } + if *throttledApp != "" && *unthrottledApp != "" { + return fmt.Errorf("--throttle-app and --unthrottle-app are mutually exclusive") + } + if subFlags.Changed("throttle-app-ratio") && *throttledApp == "" { + return fmt.Errorf("--throttle-app-ratio requires --throttle-app") + } + if subFlags.Changed("throttle-app-duration") && *throttledApp == "" { + return fmt.Errorf("--throttle-app-duration requires --throttle-app") + } + if subFlags.Changed("throttle-app-exempt") && *throttledApp == "" { + return fmt.Errorf("--throttle-app-exempt requires --throttle-app") + } + keyspace := subFlags.Arg(0) - _, err = wr.VtctldServer().UpdateThrottlerConfig(ctx, &vtctldatapb.UpdateThrottlerConfigRequest{ + req := &vtctldatapb.UpdateThrottlerConfigRequest{ Keyspace: keyspace, Enable: *enable, Disable: *disable, @@ -3524,7 +3635,22 @@ func commandUpdateThrottlerConfig(ctx context.Context, wr *wrangler.Wrangler, su Threshold: *threshold, CheckAsCheckSelf: *checkAsCheckSelf, CheckAsCheckShard: *checkAsCheckShard, - }) + } + if *throttledApp != "" { + req.ThrottledApp = &topodatapb.ThrottledAppRule{ + Name: *throttledApp, + Ratio: *throttledAppRatio, + Exempt: *throttledAppExempt, + ExpiresAt: protoutil.TimeToProto(time.Now().Add(*throttledAppDuration)), + } + } else if *unthrottledApp != "" { + req.ThrottledApp = &topodatapb.ThrottledAppRule{ + Name: *unthrottledApp, + Ratio: 0, + ExpiresAt: protoutil.TimeToProto(time.Now()), + } + } + _, err = wr.VtctldServer().UpdateThrottlerConfig(ctx, req) return err } @@ -3598,7 +3724,7 @@ func commandWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag usage := "usage: Workflow [--dry-run] [--cells] [--tablet-types] [.] start/stop/update/delete/show/listall/tags []" dryRun := subFlags.Bool("dry-run", false, "Does a dry run of the Workflow action and reports the query and list of tablets on which the operation will be applied") cells := subFlags.StringSlice("cells", []string{}, "New Cell(s) or CellAlias(es) (comma-separated) to replicate from. (Update only)") - tabletTypes := subFlags.StringSlice("tablet-types", []string{}, "New source tablet types to replicate from (e.g. PRIMARY, REPLICA, RDONLY). (Update only)") + tabletTypesStrs := subFlags.StringSlice("tablet-types", []string{}, "New source tablet types to replicate from (e.g. PRIMARY, REPLICA, RDONLY). (Update only)") onDDL := subFlags.String("on-ddl", "", "New instruction on what to do when DDL is encountered in the VReplication stream. Possible values are IGNORE, STOP, EXEC, and EXEC_IGNORE. (Update only)") if err := subFlags.Parse(args); err != nil { return err @@ -3651,16 +3777,22 @@ func commandWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag } else { cells = &textutil.SimulatedNullStringSlice } + tabletTypes := make([]topodatapb.TabletType, len(*tabletTypesStrs)) + inorder := false if subFlags.Lookup("tablet-types").Changed { // Validate the provided value(s) changes = true - for i, tabletType := range *tabletTypes { - (*tabletTypes)[i] = strings.ToUpper(strings.TrimSpace(tabletType)) - if _, err = topoproto.ParseTabletType((*tabletTypes)[i]); err != nil { + if len(*tabletTypesStrs) > 0 && strings.HasPrefix((*tabletTypesStrs)[0], discovery.InOrderHint) { + (*tabletTypesStrs)[0] = strings.TrimPrefix((*tabletTypesStrs)[0], discovery.InOrderHint) + inorder = true + } + for i, tabletType := range *tabletTypesStrs { + tabletTypes[i], err = topoproto.ParseTabletType(tabletType) + if err != nil { return err } } } else { - tabletTypes = &textutil.SimulatedNullStringSlice + tabletTypes = []topodatapb.TabletType{topodatapb.TabletType(textutil.SimulatedNullInt)} } onddl := int32(textutil.SimulatedNullInt) // To signify no value has been provided if subFlags.Lookup("on-ddl").Changed { // Validate the provided value @@ -3674,11 +3806,16 @@ func commandWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag if !changes { return fmt.Errorf(errWorkflowUpdateWithoutChanges) } - rpcReq = &tabletmanagerdatapb.UpdateVRWorkflowRequest{ - Workflow: workflow, - Cells: *cells, - TabletTypes: *tabletTypes, - OnDdl: binlogdatapb.OnDDLAction(onddl), + tsp := tabletmanagerdatapb.TabletSelectionPreference_UNKNOWN + if inorder { + tsp = tabletmanagerdatapb.TabletSelectionPreference_INORDER + } + rpcReq = &tabletmanagerdatapb.UpdateVReplicationWorkflowRequest{ + Workflow: workflow, + Cells: *cells, + TabletTypes: tabletTypes, + TabletSelectionPreference: tsp, + OnDdl: binlogdatapb.OnDDLAction(onddl), } } results, err = wr.WorkflowAction(ctx, workflow, keyspace, action, *dryRun, rpcReq) // Only update currently uses the new RPC path diff --git a/go/vt/vtctl/vtctl_env_test.go b/go/vt/vtctl/vtctl_env_test.go index 570088b9d13..e502fbdf86a 100644 --- a/go/vt/vtctl/vtctl_env_test.go +++ b/go/vt/vtctl/vtctl_env_test.go @@ -68,12 +68,12 @@ func init() { //---------------------------------------------- // testVTCtlEnv -func newTestVTCtlEnv() *testVTCtlEnv { +func newTestVTCtlEnv(ctx context.Context) *testVTCtlEnv { tabletconntest.SetProtocol("go.vt.vtctl.vtctl_env_test", "VTCtlTest") cellName := "cell1" env := &testVTCtlEnv{ tablets: make(map[int]*testVTCtlTablet), - topoServ: memorytopo.NewServer(cellName), + topoServ: memorytopo.NewServer(ctx, cellName), cell: cellName, tabletType: topodatapb.TabletType_REPLICA, tmc: newTestVTCtlTMClient(), diff --git a/go/vt/vtctl/vtctl_test.go b/go/vt/vtctl/vtctl_test.go index 33c615c0a11..aee985c6955 100644 --- a/go/vt/vtctl/vtctl_test.go +++ b/go/vt/vtctl/vtctl_test.go @@ -18,12 +18,15 @@ package vtctl import ( "context" + _ "embed" "fmt" + "regexp" "strings" "testing" "time" "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" @@ -32,8 +35,112 @@ import ( "vitess.io/vitess/go/vt/wrangler" ) +var ( + //go:embed testdata/unknown-params-logged-vschema.json + unknownParamsLoggedVSchema string + + //go:embed testdata/unknown-params-logged-dry-run-vschema.json + unknownParamsLoggedDryRunVSchema string +) + +// TestApplyVSchema tests the the MoveTables client command +// via the commandVRApplyVSchema() cmd handler. +func TestApplyVSchema(t *testing.T) { + shard := "0" + ks := "ks" + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestVTCtlEnv(ctx) + defer env.close() + _ = env.addTablet(100, ks, shard, &topodatapb.KeyRange{}, topodatapb.TabletType_PRIMARY) + + tests := []struct { + name string + args []string + expectResults func() + want string + }{ + { + name: "EmptyVSchema", + args: []string{"--vschema", "{}", ks}, + want: "New VSchema object:\n{}\nIf this is not what you expected, check the input data (as JSON parsing will skip unexpected fields).\n\n", + }, + { + name: "UnknownParamsLogged", + args: []string{"--vschema", unknownParamsLoggedVSchema, ks}, + want: `/New VSchema object: +{ + "sharded": true, + "vindexes": { + "binary_vdx": { + "type": "binary", + "params": { + "hello": "world" + } + }, + "hash_vdx": { + "type": "hash", + "params": { + "foo": "bar", + "hello": "world" + } + } + } +} +If this is not what you expected, check the input data \(as JSON parsing will skip unexpected fields\)\. + +.*W.* .* vtctl.go:.* Unknown param in vindex binary_vdx: hello +W.* .* vtctl.go:.* Unknown param in vindex hash_vdx: foo +W.* .* vtctl.go:.* Unknown param in vindex hash_vdx: hello`, + }, + { + name: "UnknownParamsLoggedWithDryRun", + args: []string{"--vschema", unknownParamsLoggedDryRunVSchema, "--dry-run", ks}, + want: `/New VSchema object: +{ + "sharded": true, + "vindexes": { + "binary_vdx": { + "type": "binary", + "params": { + "hello": "world" + } + }, + "hash_vdx": { + "type": "hash", + "params": { + "foo": "bar", + "hello": "world" + } + } + } +} +If this is not what you expected, check the input data \(as JSON parsing will skip unexpected fields\)\. + +.*W.* .* vtctl.go:.* Unknown param in vindex binary_vdx: hello +W.* .* vtctl.go:.* Unknown param in vindex hash_vdx: foo +W.* .* vtctl.go:.* Unknown param in vindex hash_vdx: hello +Dry run: Skipping update of VSchema`, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + subFlags := pflag.NewFlagSet("test", pflag.ContinueOnError) + err := commandApplyVSchema(ctx, env.wr, subFlags, tt.args) + require.NoError(t, err) + if strings.HasPrefix(tt.want, "/") { + require.Regexp(t, regexp.MustCompile(tt.want[1:]), env.cmdlog.String()) + } else { + require.Equal(t, tt.want, env.cmdlog.String()) + } + env.cmdlog.Clear() + env.tmc.clearResults() + }) + } +} + // TestMoveTables tests the the MoveTables client command -// via the commandVRWorkflow() cmd handler. +// via the commandVReplicationWorkflow() cmd handler. // This currently only tests the Progress action (which is // a parent of the Show action) but it can be used to test // other actions as well. @@ -46,8 +153,9 @@ func TestMoveTables(t *testing.T) { wf := "testwf" ksWf := fmt.Sprintf("%s.%s", targetKs, wf) minTableSize := 16384 // a single 16KiB InnoDB page - ctx := context.Background() - env := newTestVTCtlEnv() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestVTCtlEnv(ctx) defer env.close() source := env.addTablet(100, sourceKs, shard, &topodatapb.KeyRange{}, topodatapb.TabletType_PRIMARY) target := env.addTablet(200, targetKs, shard, &topodatapb.KeyRange{}, topodatapb.TabletType_PRIMARY) @@ -243,7 +351,7 @@ func TestMoveTables(t *testing.T) { subFlags := pflag.NewFlagSet("test", pflag.ContinueOnError) expectGlobalResults() tt.expectResults() - err := commandVRWorkflow(ctx, env.wr, subFlags, tt.args, tt.workflowType) + err := commandVReplicationWorkflow(ctx, env.wr, subFlags, tt.args, tt.workflowType) require.NoError(t, err) if strings.HasPrefix(tt.want, "/") { require.Regexp(t, tt.want[1:], env.cmdlog.String()) @@ -255,3 +363,109 @@ func TestMoveTables(t *testing.T) { }) } } + +func TestGenerateOnlineDDLQuery(t *testing.T) { + tcases := []struct { + cmd string + arg string + allSupported bool + expectError bool + expectQuery string + }{ + { + "launch", + "all", + true, + false, + "alter vitess_migration launch all", + }, + { + "launch-all", + "", + true, + false, + "alter vitess_migration launch all", + }, + { + "launch", + "718169cc_1fea_11ee_82b1_0a43f95f28a3", + true, + false, + "alter vitess_migration '718169cc_1fea_11ee_82b1_0a43f95f28a3' launch", + }, + { + "cancel", + "718169cc_1fea_11ee_82b1_0a43f95f28a3", + true, + false, + "alter vitess_migration '718169cc_1fea_11ee_82b1_0a43f95f28a3' cancel", + }, + { + "unthrottle", + "718169cc_1fea_11ee_82b1_0a43f95f28a3", + true, + false, + "alter vitess_migration '718169cc_1fea_11ee_82b1_0a43f95f28a3' unthrottle", + }, + { + "unthrottle", + "", + true, + true, + "", + }, + { + "unthrottle-all", + "all", + true, + true, + "", + }, + { + "unthrottle-all", + "718169cc_1fea_11ee_82b1_0a43f95f28a3", + true, + true, + "", + }, + { + "retry", + "718169cc_1fea_11ee_82b1_0a43f95f28a3", + false, + false, + "alter vitess_migration '718169cc_1fea_11ee_82b1_0a43f95f28a3' retry", + }, + { + "retry-all", + "718169cc_1fea_11ee_82b1_0a43f95f28a3", + false, + true, + "", + }, + { + "retry-all", + "", + false, + true, + "", + }, + { + "retry", + "all", + false, + true, + "", + }, + } + for _, tcase := range tcases { + t.Run(fmt.Sprintf("%s %s", tcase.cmd, tcase.arg), func(t *testing.T) { + query, err := generateOnlineDDLQuery(tcase.cmd, tcase.arg, tcase.allSupported) + if tcase.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tcase.expectQuery, query) + } + }) + } +} diff --git a/go/vt/vtctl/vtctlclienttest/client.go b/go/vt/vtctl/vtctlclienttest/client.go index df192997cbb..8e77bed8f8a 100644 --- a/go/vt/vtctl/vtctlclienttest/client.go +++ b/go/vt/vtctl/vtctlclienttest/client.go @@ -25,13 +25,13 @@ package vtctlclienttest // zookeeper) won't be drawn into production binaries as well. import ( + "context" "io" "strings" "testing" "time" - "context" - + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" @@ -51,8 +51,8 @@ func init() { } // CreateTopoServer returns the test topo server properly configured -func CreateTopoServer(t *testing.T) *topo.Server { - return memorytopo.NewServer("cell1") +func CreateTopoServer(t *testing.T, ctx context.Context) *topo.Server { + return memorytopo.NewServer(ctx, "cell1") } // TestSuite runs the test suite on the given topo server and client @@ -67,7 +67,7 @@ func TestSuite(t *testing.T, ts *topo.Server, client vtctlclient.VtctlClient) { PortMap: map[string]int32{ "vt": 3333, }, - PrimaryTermStartTime: logutil.TimeToProto(time.Date(1970, 1, 1, 1, 1, 1, 1, time.UTC)), + PrimaryTermStartTime: protoutil.TimeToProto(time.Date(1970, 1, 1, 1, 1, 1, 1, time.UTC)), Tags: map[string]string{"tag": "value"}, Keyspace: "test_keyspace", Type: topodatapb.TabletType_PRIMARY, diff --git a/go/vt/vtctl/workflow/log_recorder.go b/go/vt/vtctl/workflow/log_recorder.go new file mode 100644 index 00000000000..c35ef562354 --- /dev/null +++ b/go/vt/vtctl/workflow/log_recorder.go @@ -0,0 +1,58 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "fmt" + "sort" +) + +// LogRecorder is used to collect logs for a specific purpose. +// Not thread-safe since it is expected to be generated in repeatable sequence +type LogRecorder struct { + logs []string +} + +// NewLogRecorder creates a new instance of LogRecorder +func NewLogRecorder() *LogRecorder { + lr := LogRecorder{} + return &lr +} + +// Log records a new log message +func (lr *LogRecorder) Log(log string) { + lr.logs = append(lr.logs, log) +} + +// Logf records a new log message with interpolation parameters using fmt.Sprintf. +func (lr *LogRecorder) Logf(log string, args ...any) { + lr.logs = append(lr.logs, fmt.Sprintf(log, args...)) +} + +// LogSlice sorts a given slice using natural sort, so that the result is predictable. +// Useful when logging arrays or maps where order of objects can vary +func (lr *LogRecorder) LogSlice(logs []string) { + sort.Strings(logs) + for _, log := range logs { + lr.Log(log) + } +} + +// GetLogs returns all recorded logs in sequence +func (lr *LogRecorder) GetLogs() []string { + return lr.logs +} diff --git a/go/cmd/vtctld/plugin_kubernetestopo.go b/go/vt/vtctl/workflow/log_recorder_test.go similarity index 57% rename from go/cmd/vtctld/plugin_kubernetestopo.go rename to go/vt/vtctl/workflow/log_recorder_test.go index 97612df6ed7..b58d1d42a79 100644 --- a/go/cmd/vtctld/plugin_kubernetestopo.go +++ b/go/vt/vtctl/workflow/log_recorder_test.go @@ -14,10 +14,22 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main - -// Imports and register the 'kubernetes' topo.Server. +package workflow import ( - _ "vitess.io/vitess/go/vt/topo/k8stopo" + "testing" + "time" + + "github.com/magiconair/properties/assert" ) + +func TestLogRecorder(t *testing.T) { + lr := NewLogRecorder() + now := time.August + lr.Log("log 1") + lr.Log("log 2") + lr.Logf("log 3 with params: %s, %v, %d", "param1", now, 3) + lr.LogSlice([]string{"log 4", "log 5"}) + want := []string{"log 1", "log 2", "log 3 with params: param1, August, 3", "log 4", "log 5"} + assert.Equal(t, lr.GetLogs(), want) +} diff --git a/go/vt/vtctl/workflow/materializer.go b/go/vt/vtctl/workflow/materializer.go new file mode 100644 index 00000000000..152409540c8 --- /dev/null +++ b/go/vt/vtctl/workflow/materializer.go @@ -0,0 +1,739 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "context" + "fmt" + "strings" + "sync" + "text/template" + "time" + + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/concurrency" + "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/mysqlctl/tmutils" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vtctl/schematools" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/vindexes" + "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" + "vitess.io/vitess/go/vt/vttablet/tmclient" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +const ( + createDDLAsCopy = "copy" + createDDLAsCopyDropConstraint = "copy:drop_constraint" + createDDLAsCopyDropForeignKeys = "copy:drop_foreign_keys" +) + +type materializer struct { + ctx context.Context + ts *topo.Server + sourceTs *topo.Server + tmc tmclient.TabletManagerClient + + ms *vtctldatapb.MaterializeSettings + targetVSchema *vindexes.KeyspaceSchema + sourceShards []*topo.ShardInfo + targetShards []*topo.ShardInfo + isPartial bool + primaryVindexesDiffer bool + workflowType binlogdatapb.VReplicationWorkflowType +} + +func (mz *materializer) getWorkflowSubType() (binlogdatapb.VReplicationWorkflowSubType, error) { + switch { + case mz.isPartial && mz.ms.AtomicCopy: + return binlogdatapb.VReplicationWorkflowSubType_None, + fmt.Errorf("both atomic copy and partial mode cannot be specified for the same workflow") + case mz.isPartial: + return binlogdatapb.VReplicationWorkflowSubType_Partial, nil + case mz.ms.AtomicCopy: + return binlogdatapb.VReplicationWorkflowSubType_AtomicCopy, nil + default: + return binlogdatapb.VReplicationWorkflowSubType_None, nil + } +} + +func (mz *materializer) createMoveTablesStreams(req *vtctldatapb.MoveTablesCreateRequest) error { + if err := validateNewWorkflow(mz.ctx, mz.ts, mz.tmc, mz.ms.TargetKeyspace, mz.ms.Workflow); err != nil { + return err + } + err := mz.buildMaterializer() + if err != nil { + return err + } + if err := mz.deploySchema(); err != nil { + return err + } + + var workflowSubType binlogdatapb.VReplicationWorkflowSubType + workflowSubType, err = mz.getWorkflowSubType() + if err != nil { + return err + } + + return mz.forAllTargets(func(target *topo.ShardInfo) error { + targetPrimary, err := mz.ts.GetTablet(mz.ctx, target.PrimaryAlias) + if err != nil { + return vterrors.Wrapf(err, "GetTablet(%v) failed", target.PrimaryAlias) + } + + sourceShards := mz.filterSourceShards(target) + blses, err := mz.generateBinlogSources(mz.ctx, target, sourceShards) + if err != nil { + return err + } + _, err = mz.tmc.CreateVReplicationWorkflow(mz.ctx, targetPrimary.Tablet, &tabletmanagerdatapb.CreateVReplicationWorkflowRequest{ + Workflow: req.Workflow, + BinlogSource: blses, + Cells: req.Cells, + TabletTypes: req.TabletTypes, + TabletSelectionPreference: req.TabletSelectionPreference, + WorkflowType: mz.workflowType, + WorkflowSubType: workflowSubType, + DeferSecondaryKeys: req.DeferSecondaryKeys, + AutoStart: req.AutoStart, + StopAfterCopy: req.StopAfterCopy, + }) + return err + }) +} + +// createMaterializerStreams creates the vreplication streams for Materialize +// and LookupVindex workflows. +func (mz *materializer) createMaterializerStreams() error { + if err := validateNewWorkflow(mz.ctx, mz.ts, mz.tmc, mz.ms.TargetKeyspace, mz.ms.Workflow); err != nil { + return err + } + err := mz.buildMaterializer() + if err != nil { + return err + } + if err := mz.deploySchema(); err != nil { + return err + } + insertMap := make(map[string]string, len(mz.targetShards)) + for _, targetShard := range mz.targetShards { + sourceShards := mz.filterSourceShards(targetShard) + inserts, err := mz.generateInserts(mz.ctx, sourceShards) + if err != nil { + return err + } + insertMap[key.KeyRangeString(targetShard.KeyRange)] = inserts + } + if err := mz.createStreams(mz.ctx, insertMap); err != nil { + return err + } + return nil +} + +func (mz *materializer) generateInserts(ctx context.Context, sourceShards []*topo.ShardInfo) (string, error) { + ig := vreplication.NewInsertGenerator(binlogdatapb.VReplicationWorkflowState_Stopped, "{{.dbname}}") + + for _, sourceShard := range sourceShards { + bls := &binlogdatapb.BinlogSource{ + Keyspace: mz.ms.SourceKeyspace, + Shard: sourceShard.ShardName(), + Filter: &binlogdatapb.Filter{}, + StopAfterCopy: mz.ms.StopAfterCopy, + ExternalCluster: mz.ms.ExternalCluster, + SourceTimeZone: mz.ms.SourceTimeZone, + TargetTimeZone: mz.ms.TargetTimeZone, + OnDdl: binlogdatapb.OnDDLAction(binlogdatapb.OnDDLAction_value[mz.ms.OnDdl]), + } + for _, ts := range mz.ms.TableSettings { + rule := &binlogdatapb.Rule{ + Match: ts.TargetTable, + } + + if ts.SourceExpression == "" { + bls.Filter.Rules = append(bls.Filter.Rules, rule) + continue + } + + // Validate non-empty query. + stmt, err := sqlparser.Parse(ts.SourceExpression) + if err != nil { + return "", err + } + sel, ok := stmt.(*sqlparser.Select) + if !ok { + return "", fmt.Errorf("unrecognized statement: %s", ts.SourceExpression) + } + filter := ts.SourceExpression + if mz.targetVSchema.Keyspace.Sharded && mz.targetVSchema.Tables[ts.TargetTable].Type != vindexes.TypeReference { + cv, err := vindexes.FindBestColVindex(mz.targetVSchema.Tables[ts.TargetTable]) + if err != nil { + return "", err + } + mappedCols := make([]*sqlparser.ColName, 0, len(cv.Columns)) + for _, col := range cv.Columns { + colName, err := matchColInSelect(col, sel) + if err != nil { + return "", err + } + mappedCols = append(mappedCols, colName) + } + subExprs := make(sqlparser.SelectExprs, 0, len(mappedCols)+2) + for _, mappedCol := range mappedCols { + subExprs = append(subExprs, &sqlparser.AliasedExpr{Expr: mappedCol}) + } + vindexName := fmt.Sprintf("%s.%s", mz.ms.TargetKeyspace, cv.Name) + subExprs = append(subExprs, &sqlparser.AliasedExpr{Expr: sqlparser.NewStrLiteral(vindexName)}) + subExprs = append(subExprs, &sqlparser.AliasedExpr{Expr: sqlparser.NewStrLiteral("{{.keyrange}}")}) + inKeyRange := &sqlparser.FuncExpr{ + Name: sqlparser.NewIdentifierCI("in_keyrange"), + Exprs: subExprs, + } + if sel.Where != nil { + sel.Where = &sqlparser.Where{ + Type: sqlparser.WhereClause, + Expr: &sqlparser.AndExpr{ + Left: inKeyRange, + Right: sel.Where.Expr, + }, + } + } else { + sel.Where = &sqlparser.Where{ + Type: sqlparser.WhereClause, + Expr: inKeyRange, + } + } + + filter = sqlparser.String(sel) + } + + rule.Filter = filter + + bls.Filter.Rules = append(bls.Filter.Rules, rule) + } + workflowSubType := binlogdatapb.VReplicationWorkflowSubType_None + if mz.isPartial { + workflowSubType = binlogdatapb.VReplicationWorkflowSubType_Partial + } + var workflowType binlogdatapb.VReplicationWorkflowType + switch mz.ms.MaterializationIntent { + case vtctldatapb.MaterializationIntent_CUSTOM: + workflowType = binlogdatapb.VReplicationWorkflowType_Materialize + case vtctldatapb.MaterializationIntent_MOVETABLES: + workflowType = binlogdatapb.VReplicationWorkflowType_MoveTables + case vtctldatapb.MaterializationIntent_CREATELOOKUPINDEX: + workflowType = binlogdatapb.VReplicationWorkflowType_CreateLookupIndex + } + ig.AddRow(mz.ms.Workflow, bls, "", mz.ms.Cell, mz.ms.TabletTypes, + workflowType, + workflowSubType, mz.ms.DeferSecondaryKeys) + } + return ig.String(), nil +} + +func (mz *materializer) generateBinlogSources(ctx context.Context, targetShard *topo.ShardInfo, sourceShards []*topo.ShardInfo) ([]*binlogdatapb.BinlogSource, error) { + blses := make([]*binlogdatapb.BinlogSource, 0, len(mz.sourceShards)) + for _, sourceShard := range sourceShards { + bls := &binlogdatapb.BinlogSource{ + Keyspace: mz.ms.SourceKeyspace, + Shard: sourceShard.ShardName(), + Filter: &binlogdatapb.Filter{}, + StopAfterCopy: mz.ms.StopAfterCopy, + ExternalCluster: mz.ms.ExternalCluster, + SourceTimeZone: mz.ms.SourceTimeZone, + TargetTimeZone: mz.ms.TargetTimeZone, + OnDdl: binlogdatapb.OnDDLAction(binlogdatapb.OnDDLAction_value[mz.ms.OnDdl]), + } + for _, ts := range mz.ms.TableSettings { + rule := &binlogdatapb.Rule{ + Match: ts.TargetTable, + } + + if ts.SourceExpression == "" { + bls.Filter.Rules = append(bls.Filter.Rules, rule) + continue + } + + // Validate non-empty query. + stmt, err := sqlparser.Parse(ts.SourceExpression) + if err != nil { + return nil, err + } + sel, ok := stmt.(*sqlparser.Select) + if !ok { + return nil, fmt.Errorf("unrecognized statement: %s", ts.SourceExpression) + } + filter := ts.SourceExpression + if mz.targetVSchema.Keyspace.Sharded && mz.targetVSchema.Tables[ts.TargetTable].Type != vindexes.TypeReference { + cv, err := vindexes.FindBestColVindex(mz.targetVSchema.Tables[ts.TargetTable]) + if err != nil { + return nil, err + } + mappedCols := make([]*sqlparser.ColName, 0, len(cv.Columns)) + for _, col := range cv.Columns { + colName, err := matchColInSelect(col, sel) + if err != nil { + return nil, err + } + mappedCols = append(mappedCols, colName) + } + subExprs := make(sqlparser.SelectExprs, 0, len(mappedCols)+2) + for _, mappedCol := range mappedCols { + subExprs = append(subExprs, &sqlparser.AliasedExpr{Expr: mappedCol}) + } + vindexName := fmt.Sprintf("%s.%s", mz.ms.TargetKeyspace, cv.Name) + subExprs = append(subExprs, &sqlparser.AliasedExpr{Expr: sqlparser.NewStrLiteral(vindexName)}) + subExprs = append(subExprs, &sqlparser.AliasedExpr{Expr: sqlparser.NewStrLiteral(key.KeyRangeString(targetShard.KeyRange))}) + inKeyRange := &sqlparser.FuncExpr{ + Name: sqlparser.NewIdentifierCI("in_keyrange"), + Exprs: subExprs, + } + if sel.Where != nil { + sel.Where = &sqlparser.Where{ + Type: sqlparser.WhereClause, + Expr: &sqlparser.AndExpr{ + Left: inKeyRange, + Right: sel.Where.Expr, + }, + } + } else { + sel.Where = &sqlparser.Where{ + Type: sqlparser.WhereClause, + Expr: inKeyRange, + } + } + + filter = sqlparser.String(sel) + } + + rule.Filter = filter + bls.Filter.Rules = append(bls.Filter.Rules, rule) + } + blses = append(blses, bls) + } + return blses, nil +} + +func (mz *materializer) deploySchema() error { + var sourceDDLs map[string]string + var mu sync.Mutex + + return forAllShards(mz.targetShards, func(target *topo.ShardInfo) error { + allTables := []string{"/.*/"} + + hasTargetTable := map[string]bool{} + req := &tabletmanagerdatapb.GetSchemaRequest{Tables: allTables} + targetSchema, err := schematools.GetSchema(mz.ctx, mz.ts, mz.tmc, target.PrimaryAlias, req) + if err != nil { + return err + } + + for _, td := range targetSchema.TableDefinitions { + hasTargetTable[td.Name] = true + } + + targetTablet, err := mz.ts.GetTablet(mz.ctx, target.PrimaryAlias) + if err != nil { + return err + } + + var applyDDLs []string + for _, ts := range mz.ms.TableSettings { + if hasTargetTable[ts.TargetTable] { + // Table already exists. + continue + } + if ts.CreateDdl == "" { + return fmt.Errorf("target table %v does not exist and there is no create ddl defined", ts.TargetTable) + } + + var err error + mu.Lock() + if len(sourceDDLs) == 0 { + // Only get DDLs for tables once and lazily: if we need to copy the schema from source + // to target then we copy schemas from primaries on the source keyspace; we have found + // use cases where the user just has a replica (no primary) in the source keyspace. + sourceDDLs, err = getSourceTableDDLs(mz.ctx, mz.sourceTs, mz.tmc, mz.sourceShards) + } + mu.Unlock() + if err != nil { + log.Errorf("Error getting DDLs of source tables: %s", err.Error()) + return err + } + + createDDL := ts.CreateDdl + if createDDL == createDDLAsCopy || createDDL == createDDLAsCopyDropConstraint || createDDL == createDDLAsCopyDropForeignKeys { + if ts.SourceExpression != "" { + // Check for table if non-empty SourceExpression. + sourceTableName, err := sqlparser.TableFromStatement(ts.SourceExpression) + if err != nil { + return err + } + if sourceTableName.Name.String() != ts.TargetTable { + return fmt.Errorf("source and target table names must match for copying schema: %v vs %v", sqlparser.String(sourceTableName), ts.TargetTable) + + } + } + + ddl, ok := sourceDDLs[ts.TargetTable] + if !ok { + return fmt.Errorf("source table %v does not exist", ts.TargetTable) + } + + if createDDL == createDDLAsCopyDropConstraint { + strippedDDL, err := stripTableConstraints(ddl) + if err != nil { + return err + } + + ddl = strippedDDL + } + + if createDDL == createDDLAsCopyDropForeignKeys { + strippedDDL, err := stripTableForeignKeys(ddl) + if err != nil { + return err + } + + ddl = strippedDDL + } + createDDL = ddl + } + + applyDDLs = append(applyDDLs, createDDL) + } + + if len(applyDDLs) > 0 { + sql := strings.Join(applyDDLs, ";\n") + + _, err = mz.tmc.ApplySchema(mz.ctx, targetTablet.Tablet, &tmutils.SchemaChange{ + SQL: sql, + Force: false, + AllowReplication: true, + SQLMode: vreplication.SQLMode, + }) + if err != nil { + return err + } + } + + return nil + }) +} + +func (mz *materializer) buildMaterializer() error { + ctx := mz.ctx + ms := mz.ms + vschema, err := mz.ts.GetVSchema(ctx, ms.TargetKeyspace) + if err != nil { + return err + } + targetVSchema, err := vindexes.BuildKeyspaceSchema(vschema, ms.TargetKeyspace) + if err != nil { + return err + } + if targetVSchema.Keyspace.Sharded { + for _, ts := range ms.TableSettings { + if targetVSchema.Tables[ts.TargetTable] == nil { + return fmt.Errorf("table %s not found in vschema for keyspace %s", ts.TargetTable, ms.TargetKeyspace) + } + } + } + isPartial := false + sourceShards, err := mz.sourceTs.GetServingShards(ctx, ms.SourceKeyspace) + if err != nil { + return err + } + if len(ms.SourceShards) > 0 { + isPartial = true + var sourceShards2 []*topo.ShardInfo + for _, shard := range sourceShards { + for _, shard2 := range ms.SourceShards { + if shard.ShardName() == shard2 { + sourceShards2 = append(sourceShards2, shard) + break + } + } + } + sourceShards = sourceShards2 + } + if len(sourceShards) == 0 { + return fmt.Errorf("no source shards specified for workflow %s ", ms.Workflow) + } + + targetShards, err := mz.ts.GetServingShards(ctx, ms.TargetKeyspace) + if err != nil { + return err + } + if len(ms.SourceShards) > 0 { + var targetShards2 []*topo.ShardInfo + for _, shard := range targetShards { + for _, shard2 := range ms.SourceShards { + if shard.ShardName() == shard2 { + targetShards2 = append(targetShards2, shard) + break + } + } + } + targetShards = targetShards2 + } + if len(targetShards) == 0 { + return fmt.Errorf("no target shards specified for workflow %s ", ms.Workflow) + } + + sourceTs := mz.ts + if ms.ExternalCluster != "" { // when the source is an external mysql cluster mounted using the Mount command + externalTopo, err := mz.ts.OpenExternalVitessClusterServer(ctx, ms.ExternalCluster) + if err != nil { + return fmt.Errorf("failed to open external topo: %v", err) + } + sourceTs = externalTopo + } + differentPVs := false + sourceVSchema, err := sourceTs.GetVSchema(ctx, ms.SourceKeyspace) + if err != nil { + return fmt.Errorf("failed to get source keyspace vschema: %v", err) + } + differentPVs = primaryVindexesDiffer(ms, sourceVSchema, vschema) + + mz.targetVSchema = targetVSchema + mz.sourceShards = sourceShards + mz.targetShards = targetShards + mz.isPartial = isPartial + mz.primaryVindexesDiffer = differentPVs + return nil +} + +func (mz *materializer) createStreams(ctx context.Context, insertsMap map[string]string) error { + return forAllShards(mz.targetShards, func(target *topo.ShardInfo) error { + keyRange := key.KeyRangeString(target.KeyRange) + inserts := insertsMap[keyRange] + targetPrimary, err := mz.ts.GetTablet(ctx, target.PrimaryAlias) + if err != nil { + return vterrors.Wrapf(err, "GetTablet(%v) failed", target.PrimaryAlias) + } + buf := &strings.Builder{} + t := template.Must(template.New("").Parse(inserts)) + input := map[string]string{ + "keyrange": keyRange, + "dbname": targetPrimary.DbName(), + } + if err := t.Execute(buf, input); err != nil { + return err + } + if _, err := mz.tmc.VReplicationExec(ctx, targetPrimary.Tablet, buf.String()); err != nil { + return err + } + return nil + }) +} + +func (mz *materializer) startStreams(ctx context.Context) error { + return forAllShards(mz.targetShards, func(target *topo.ShardInfo) error { + targetPrimary, err := mz.ts.GetTablet(ctx, target.PrimaryAlias) + if err != nil { + return vterrors.Wrapf(err, "GetTablet(%v) failed", target.PrimaryAlias) + } + query := fmt.Sprintf("update _vt.vreplication set state='Running' where db_name=%s and workflow=%s", encodeString(targetPrimary.DbName()), encodeString(mz.ms.Workflow)) + if _, err := mz.tmc.VReplicationExec(ctx, targetPrimary.Tablet, query); err != nil { + return vterrors.Wrapf(err, "VReplicationExec(%v, %s)", targetPrimary.Tablet, query) + } + return nil + }) +} + +func Materialize(ctx context.Context, ts *topo.Server, tmc tmclient.TabletManagerClient, ms *vtctldatapb.MaterializeSettings) error { + mz := &materializer{ + ctx: ctx, + ts: ts, + sourceTs: ts, + tmc: tmc, + ms: ms, + } + + err := mz.createMaterializerStreams() + if err != nil { + return err + } + return mz.startStreams(ctx) +} + +func (mz *materializer) forAllTargets(f func(*topo.ShardInfo) error) error { + var wg sync.WaitGroup + allErrors := &concurrency.AllErrorRecorder{} + for _, target := range mz.targetShards { + wg.Add(1) + go func(target *topo.ShardInfo) { + defer wg.Done() + + if err := f(target); err != nil { + allErrors.RecordError(err) + } + }(target) + } + wg.Wait() + return allErrors.AggrError(vterrors.Aggregate) +} + +// checkTZConversion is a light-weight consistency check to validate that, if a source time zone is specified to MoveTables, +// that the current primary has the time zone loaded in order to run the convert_tz() function used by VReplication to do the +// datetime conversions. We only check the current primaries on each shard and note here that it is possible a new primary +// gets elected: in this case user will either see errors during vreplication or vdiff will report mismatches. +func (mz *materializer) checkTZConversion(ctx context.Context, tz string) error { + err := mz.forAllTargets(func(target *topo.ShardInfo) error { + targetPrimary, err := mz.ts.GetTablet(ctx, target.PrimaryAlias) + if err != nil { + return vterrors.Wrapf(err, "GetTablet(%v) failed", target.PrimaryAlias) + } + testDateTime := "2006-01-02 15:04:05" + query := fmt.Sprintf("select convert_tz(%s, %s, 'UTC')", encodeString(testDateTime), encodeString(tz)) + qrproto, err := mz.tmc.ExecuteFetchAsApp(ctx, targetPrimary.Tablet, false, &tabletmanagerdatapb.ExecuteFetchAsAppRequest{ + Query: []byte(query), + MaxRows: 1, + }) + if err != nil { + return vterrors.Wrapf(err, "ExecuteFetchAsApp(%v, %s)", targetPrimary.Tablet, query) + } + qr := sqltypes.Proto3ToResult(qrproto) + if gotDate, err := time.Parse(testDateTime, qr.Rows[0][0].ToString()); err != nil { + return fmt.Errorf("unable to perform time_zone conversions from %s to UTC — value from DB was: %+v and the result of the attempt was: %s. Either the specified source time zone is invalid or the time zone tables have not been loaded on the %s tablet", + tz, qr.Rows, gotDate, targetPrimary.Alias) + } + return nil + }) + return err +} + +// filterSourceShards filters out source shards that do not overlap with the +// provided target shard. This is an optimization to avoid copying unnecessary +// data between the shards. This optimization is only applied for MoveTables +// when the source and target shard have the same primary vindexes. +func (mz *materializer) filterSourceShards(targetShard *topo.ShardInfo) []*topo.ShardInfo { + if mz.primaryVindexesDiffer || mz.ms.MaterializationIntent != vtctldatapb.MaterializationIntent_MOVETABLES { + // Use all source shards. + return mz.sourceShards + } + // Use intersecting source shards. + var filteredSourceShards []*topo.ShardInfo + for _, sourceShard := range mz.sourceShards { + if !key.KeyRangeIntersect(sourceShard.KeyRange, targetShard.KeyRange) { + continue + } + filteredSourceShards = append(filteredSourceShards, sourceShard) + } + return filteredSourceShards +} + +// primaryVindexesDiffer returns true if, for any tables defined in the provided +// materialize settings, the source and target vschema definitions for those +// tables have different primary vindexes. +// +// The result of this function is used to determine whether to apply a source +// shard selection optimization in MoveTables. +func primaryVindexesDiffer(ms *vtctldatapb.MaterializeSettings, source, target *vschemapb.Keyspace) bool { + // Unless both keyspaces are sharded, treat the answer to the question as + // trivially false. + if source.Sharded != target.Sharded { + return false + } + + // For source and target keyspaces that are sharded, we can optimize source + // shard selection if source and target tables' primary vindexes are equal. + // + // To determine this, iterate over all target tables, looking for primary + // vindexes that differ from the corresponding source table. + for _, ts := range ms.TableSettings { + sColumnVindexes := []*vschemapb.ColumnVindex{} + tColumnVindexes := []*vschemapb.ColumnVindex{} + if tt, ok := source.Tables[ts.TargetTable]; ok { + sColumnVindexes = tt.ColumnVindexes + } + if tt, ok := target.Tables[ts.TargetTable]; ok { + tColumnVindexes = tt.ColumnVindexes + } + + // If source does not have a primary vindex, but the target does, then + // the primary vindexes differ. + if len(sColumnVindexes) == 0 && len(tColumnVindexes) > 0 { + return true + } + // If source has a primary vindex, but the target does not, then the + // primary vindexes differ. + if len(sColumnVindexes) > 0 && len(tColumnVindexes) == 0 { + return true + } + // If neither source nor target have any vindexes, treat the answer to + // the question as trivially false. + if len(sColumnVindexes) == 0 && len(tColumnVindexes) == 0 { + return true + } + + sPrimaryVindex := sColumnVindexes[0] + tPrimaryVindex := tColumnVindexes[0] + + // Compare source and target primary vindex columns. + var sColumns, tColumns []string + if sPrimaryVindex.Column != "" { + sColumns = []string{sPrimaryVindex.Column} + } else { + sColumns = sPrimaryVindex.Columns + } + if tPrimaryVindex.Column != "" { + tColumns = []string{tPrimaryVindex.Column} + } else { + tColumns = tPrimaryVindex.Columns + } + if len(sColumns) != len(tColumns) { + return true + } + for i := 0; i < len(sColumns); i++ { + if !strings.EqualFold(sColumns[i], tColumns[i]) { + return true + } + } + + // Get source and target vindex definitions. + spv := source.Vindexes[sColumnVindexes[0].Name] + tpv := target.Vindexes[tColumnVindexes[0].Name] + // If the source has vindex definition, but target does not, then the + // target vschema is invalid. Assume the primary vindexes differ. + if spv != nil && tpv == nil { + return true + } + // If the target has vindex definition, but source does not, then the + // source vschema is invalid. Assume the primary vindexes differ. + if spv == nil && tpv != nil { + return true + } + // If both target and source are missing vindex definitions, then both + // are equally invalid. + if spv == nil && tpv == nil { + continue + } + // Compare source and target vindex type. + if !strings.EqualFold(spv.Type, tpv.Type) { + return true + } + } + return false +} diff --git a/go/vt/vtctl/workflow/materializer_env_test.go b/go/vt/vtctl/workflow/materializer_env_test.go new file mode 100644 index 00000000000..f1ddf6be645 --- /dev/null +++ b/go/vt/vtctl/workflow/materializer_env_test.go @@ -0,0 +1,363 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "context" + "fmt" + "os" + "regexp" + "strconv" + "strings" + "sync" + "testing" + + _flag "vitess.io/vitess/go/internal/flag" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/mysqlctl/tmutils" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/vttablet/tmclient" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +type queryResult struct { + query string + result *querypb.QueryResult +} + +type testMaterializerEnv struct { + ws *Server + ms *vtctldatapb.MaterializeSettings + sources []string + targets []string + tablets map[int]*topodatapb.Tablet + // Importing the tabletmanager package causes a circular dependency. :-( + //tms map[int]*tabletmanager.TabletManager + topoServ *topo.Server + cell string + tmc *testMaterializerTMClient +} + +//---------------------------------------------- +// testMaterializerEnv + +func TestMain(m *testing.M) { + _flag.ParseFlagsForTest() + os.Exit(m.Run()) +} + +func newTestMaterializerEnv(t *testing.T, ctx context.Context, ms *vtctldatapb.MaterializeSettings, sources, targets []string) *testMaterializerEnv { + t.Helper() + env := &testMaterializerEnv{ + ms: ms, + sources: sources, + targets: targets, + tablets: make(map[int]*topodatapb.Tablet), + topoServ: memorytopo.NewServer(ctx, "cell"), + cell: "cell", + tmc: newTestMaterializerTMClient(), + } + env.ws = NewServer(env.topoServ, env.tmc) + tabletID := 100 + for _, shard := range sources { + _ = env.addTablet(tabletID, env.ms.SourceKeyspace, shard, topodatapb.TabletType_PRIMARY) + tabletID += 10 + } + if ms.SourceKeyspace != ms.TargetKeyspace { + tabletID = 200 + for _, shard := range targets { + _ = env.addTablet(tabletID, env.ms.TargetKeyspace, shard, topodatapb.TabletType_PRIMARY) + tabletID += 10 + } + } + + for _, ts := range ms.TableSettings { + tableName := ts.TargetTable + table, err := sqlparser.TableFromStatement(ts.SourceExpression) + if err == nil { + tableName = table.Name.String() + } + env.tmc.schema[ms.SourceKeyspace+"."+tableName] = &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{{ + Name: tableName, + Schema: fmt.Sprintf("%s_schema", tableName), + }}, + } + env.tmc.schema[ms.TargetKeyspace+"."+ts.TargetTable] = &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{{ + Name: ts.TargetTable, + Schema: fmt.Sprintf("%s_schema", ts.TargetTable), + }}, + } + } + if ms.Workflow != "" { + env.expectValidation() + } + return env +} + +func (env *testMaterializerEnv) expectValidation() { + for _, tablet := range env.tablets { + tabletID := int(tablet.Alias.Uid) + if tabletID < 200 { + continue + } + env.tmc.expectVRQuery(tabletID, fmt.Sprintf("select 1 from _vt.vreplication where db_name='vt_%s' and workflow='%s'", env.ms.TargetKeyspace, env.ms.Workflow), &sqltypes.Result{}) + } +} + +func (env *testMaterializerEnv) close() { + for _, t := range env.tablets { + env.deleteTablet(t) + } +} + +func (env *testMaterializerEnv) addTablet(id int, keyspace, shard string, tabletType topodatapb.TabletType) *topodatapb.Tablet { + tablet := &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: env.cell, + Uid: uint32(id), + }, + Keyspace: keyspace, + Shard: shard, + KeyRange: &topodatapb.KeyRange{}, + Type: tabletType, + PortMap: map[string]int32{ + "test": int32(id), + }, + } + env.tablets[id] = tablet + if err := env.ws.ts.InitTablet(context.Background(), tablet, false /* allowPrimaryOverride */, true /* createShardAndKeyspace */, false /* allowUpdate */); err != nil { + panic(err) + } + if tabletType == topodatapb.TabletType_PRIMARY { + _, err := env.ws.ts.UpdateShardFields(context.Background(), keyspace, shard, func(si *topo.ShardInfo) error { + si.PrimaryAlias = tablet.Alias + return nil + }) + if err != nil { + panic(err) + } + } + return tablet +} + +func (env *testMaterializerEnv) deleteTablet(tablet *topodatapb.Tablet) { + _ = env.topoServ.DeleteTablet(context.Background(), tablet.Alias) + delete(env.tablets, int(tablet.Alias.Uid)) +} + +//---------------------------------------------- +// testMaterializerTMClient + +type testMaterializerTMClient struct { + tmclient.TabletManagerClient + schema map[string]*tabletmanagerdatapb.SchemaDefinition + + mu sync.Mutex + vrQueries map[int][]*queryResult + getSchemaCounts map[string]int + muSchemaCount sync.Mutex + + // Used to confirm the number of times WorkflowDelete was called. + workflowDeleteCalls int +} + +func newTestMaterializerTMClient() *testMaterializerTMClient { + return &testMaterializerTMClient{ + schema: make(map[string]*tabletmanagerdatapb.SchemaDefinition), + vrQueries: make(map[int][]*queryResult), + getSchemaCounts: make(map[string]int), + } +} + +func (tmc *testMaterializerTMClient) schemaRequested(uid uint32) { + tmc.muSchemaCount.Lock() + defer tmc.muSchemaCount.Unlock() + key := strconv.Itoa(int(uid)) + n, ok := tmc.getSchemaCounts[key] + if !ok { + tmc.getSchemaCounts[key] = 1 + } else { + tmc.getSchemaCounts[key] = n + 1 + } +} + +func (tmc *testMaterializerTMClient) CreateVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.CreateVReplicationWorkflowRequest) (*tabletmanagerdatapb.CreateVReplicationWorkflowResponse, error) { + res := sqltypes.MakeTestResult(sqltypes.MakeTestFields("rowsaffected", "int64"), "1") + return &tabletmanagerdatapb.CreateVReplicationWorkflowResponse{Result: sqltypes.ResultToProto3(res)}, nil +} + +func (tmc *testMaterializerTMClient) ReadVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.ReadVReplicationWorkflowRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowResponse, error) { + workflowType := binlogdatapb.VReplicationWorkflowType_MoveTables + if strings.Contains(request.Workflow, "lookup") { + workflowType = binlogdatapb.VReplicationWorkflowType_CreateLookupIndex + } + return &tabletmanagerdatapb.ReadVReplicationWorkflowResponse{ + Workflow: request.Workflow, + WorkflowType: workflowType, + Streams: []*tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream{ + { + Id: 1, + Bls: &binlogdatapb.BinlogSource{ + Keyspace: "sourceks", + Shard: "0", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{ + { + Match: ".*", + }, + }, + }, + }, + }, + }, + }, nil +} + +func (tmc *testMaterializerTMClient) DeleteVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.DeleteVReplicationWorkflowRequest) (response *tabletmanagerdatapb.DeleteVReplicationWorkflowResponse, err error) { + tmc.mu.Lock() + defer tmc.mu.Unlock() + tmc.workflowDeleteCalls++ + return &tabletmanagerdatapb.DeleteVReplicationWorkflowResponse{ + Result: &querypb.QueryResult{ + RowsAffected: 1, + }, + }, nil +} + +func (tmc *testMaterializerTMClient) getSchemaRequestCount(uid uint32) int { + tmc.muSchemaCount.Lock() + defer tmc.muSchemaCount.Unlock() + key := strconv.Itoa(int(uid)) + return tmc.getSchemaCounts[key] +} + +func (tmc *testMaterializerTMClient) GetSchema(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.GetSchemaRequest) (*tabletmanagerdatapb.SchemaDefinition, error) { + tmc.schemaRequested(tablet.Alias.Uid) + schemaDefn := &tabletmanagerdatapb.SchemaDefinition{} + for _, table := range request.Tables { + if table == "/.*/" { + // Special case of all tables in keyspace. + for key, tableDefn := range tmc.schema { + if strings.HasPrefix(key, tablet.Keyspace+".") { + schemaDefn.TableDefinitions = append(schemaDefn.TableDefinitions, tableDefn.TableDefinitions...) + } + } + break + } + + key := tablet.Keyspace + "." + table + tableDefn := tmc.schema[key] + if tableDefn == nil { + continue + } + schemaDefn.TableDefinitions = append(schemaDefn.TableDefinitions, tableDefn.TableDefinitions...) + } + return schemaDefn, nil +} + +func (tmc *testMaterializerTMClient) expectVRQuery(tabletID int, query string, result *sqltypes.Result) { + tmc.mu.Lock() + defer tmc.mu.Unlock() + + tmc.vrQueries[tabletID] = append(tmc.vrQueries[tabletID], &queryResult{ + query: query, + result: sqltypes.ResultToProto3(result), + }) +} + +func (tmc *testMaterializerTMClient) verifyQueries(t *testing.T) { + t.Helper() + tmc.mu.Lock() + defer tmc.mu.Unlock() + + for tabletID, qrs := range tmc.vrQueries { + if len(qrs) != 0 { + var list []string + for _, qr := range qrs { + list = append(list, qr.query) + } + t.Errorf("tablet %v: found queries that were expected but never got executed by the test: %v", tabletID, list) + } + } +} + +func (tmc *testMaterializerTMClient) VReplicationExec(ctx context.Context, tablet *topodatapb.Tablet, query string) (*querypb.QueryResult, error) { + tmc.mu.Lock() + defer tmc.mu.Unlock() + + qrs := tmc.vrQueries[int(tablet.Alias.Uid)] + if len(qrs) == 0 { + return nil, fmt.Errorf("tablet %v does not expect any more queries: %s", tablet, query) + } + matched := false + if qrs[0].query[0] == '/' { + matched = regexp.MustCompile(qrs[0].query[1:]).MatchString(query) + } else { + matched = query == qrs[0].query + } + if !matched { + return nil, fmt.Errorf("tablet %v:\nunexpected query\n%s\nwant:\n%s", tablet, query, qrs[0].query) + } + tmc.vrQueries[int(tablet.Alias.Uid)] = qrs[1:] + return qrs[0].result, nil +} + +func (tmc *testMaterializerTMClient) ExecuteFetchAsDba(ctx context.Context, tablet *topodatapb.Tablet, usePool bool, req *tabletmanagerdatapb.ExecuteFetchAsDbaRequest) (*querypb.QueryResult, error) { + // Reuse VReplicationExec + return tmc.VReplicationExec(ctx, tablet, string(req.Query)) +} + +func (tmc *testMaterializerTMClient) ExecuteFetchAsAllPrivs(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.ExecuteFetchAsAllPrivsRequest) (*querypb.QueryResult, error) { + return nil, nil +} + +// Note: ONLY breaks up change.SQL into individual statements and executes it. Does NOT fully implement ApplySchema. +func (tmc *testMaterializerTMClient) ApplySchema(ctx context.Context, tablet *topodatapb.Tablet, change *tmutils.SchemaChange) (*tabletmanagerdatapb.SchemaChangeResult, error) { + stmts := strings.Split(change.SQL, ";") + + for _, stmt := range stmts { + _, err := tmc.ExecuteFetchAsDba(ctx, tablet, false, &tabletmanagerdatapb.ExecuteFetchAsDbaRequest{ + Query: []byte(stmt), + MaxRows: 0, + ReloadSchema: true, + }) + if err != nil { + return nil, err + } + } + + return nil, nil +} + +func (tmc *testMaterializerTMClient) VDiff(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.VDiffRequest) (*tabletmanagerdatapb.VDiffResponse, error) { + return &tabletmanagerdatapb.VDiffResponse{ + Id: 1, + VdiffUuid: req.VdiffUuid, + Output: &querypb.QueryResult{ + RowsAffected: 1, + }, + }, nil +} diff --git a/go/vt/vtctl/workflow/materializer_test.go b/go/vt/vtctl/workflow/materializer_test.go new file mode 100644 index 00000000000..39aabcf45ad --- /dev/null +++ b/go/vt/vtctl/workflow/materializer_test.go @@ -0,0 +1,3286 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" + + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtgate/vindexes" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +const getWorkflowQuery = "select id from _vt.vreplication where db_name='vt_targetks' and workflow='workflow'" +const mzUpdateQuery = "update _vt.vreplication set state='Running' where db_name='vt_targetks' and workflow='workflow'" +const mzSelectFrozenQuery = "select 1 from _vt.vreplication where db_name='vt_targetks' and message='FROZEN' and workflow_sub_type != 1" +const mzCheckJournal = "/select val from _vt.resharding_journal where id=" +const mzGetWorkflowStatusQuery = "select id, workflow, source, pos, stop_pos, max_replication_lag, state, db_name, time_updated, transaction_timestamp, message, tags, workflow_type, workflow_sub_type, time_heartbeat, defer_secondary_keys, component_throttled, time_throttled, rows_copied from _vt.vreplication where workflow = 'workflow' and db_name = 'vt_targetks'" +const mzGetCopyState = "select distinct table_name from _vt.copy_state cs, _vt.vreplication vr where vr.id = cs.vrepl_id and vr.id = 1" +const mzGetLatestCopyState = "select table_name, lastpk from _vt.copy_state where vrepl_id = 1 and id in (select max(id) from _vt.copy_state where vrepl_id = 1 group by vrepl_id, table_name)" +const insertPrefix = `/insert into _vt.vreplication\(workflow, source, pos, max_tps, max_replication_lag, cell, tablet_types, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, defer_secondary_keys\) values ` +const eol = "$" + +var ( + defaultOnDDL = binlogdatapb.OnDDLAction_IGNORE.String() + binlogSource = &binlogdatapb.BinlogSource{ + Keyspace: "sourceks", + Shard: "0", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select * from t1", + }}, + }, + } + getWorkflowRes = sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id|source|message|cell|tablet_types|workflow_type|workflow_sub_type|defer_secondary_keys", + "int64|blob|varchar|varchar|varchar|int64|int64|int64", + ), + fmt.Sprintf("1|%s||zone1|replica|1|0|1", binlogSource), + ) + getWorkflowStatusRes = sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id|workflow|source|pos|stop_pos|max_replication_log|state|db_name|time_updated|transaction_timestamp|message|tags|workflow_type|workflow_sub_type|time_heartbeat|defer_secondary_keys|component_throttled|time_throttled|rows_copied", + "int64|varchar|blob|varchar|varchar|int64|varchar|varchar|int64|int64|varchar|varchar|int64|int64|int64|int64|varchar|int64|int64", + ), + fmt.Sprintf("1|wf1|%s|MySQL56/9d10e6ec-07a0-11ee-ae73-8e53f4cf3083:1-97|NULL|0|running|vt_ks|1686577659|0|||1|0|0|0||0|10", binlogSource), + ) +) + +func TestStripForeignKeys(t *testing.T) { + tcs := []struct { + desc string + ddl string + + hasErr bool + newDDL string + }{ + { + desc: "has FK constraints", + ddl: "CREATE TABLE `table1` (\n" + + "`id` int(11) NOT NULL AUTO_INCREMENT,\n" + + "`foreign_id` int(11) CHECK (foreign_id>10),\n" + + "PRIMARY KEY (`id`),\n" + + "KEY `fk_table1_ref_foreign_id` (`foreign_id`),\n" + + "CONSTRAINT `fk_table1_ref_foreign_id` FOREIGN KEY (`foreign_id`) REFERENCES `foreign` (`id`)\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=latin1;", + + newDDL: "create table table1 (\n" + + "\tid int(11) not null auto_increment,\n" + + "\tforeign_id int(11),\n" + + "\tPRIMARY KEY (id),\n" + + "\tKEY fk_table1_ref_foreign_id (foreign_id),\n" + + "\tcheck (foreign_id > 10)\n" + + ") ENGINE InnoDB,\n" + + " CHARSET latin1", + + hasErr: false, + }, + { + desc: "no FK constraints", + ddl: "CREATE TABLE `table1` (\n" + + "`id` int(11) NOT NULL AUTO_INCREMENT,\n" + + "`foreign_id` int(11) NOT NULL CHECK (foreign_id>10),\n" + + "`user_id` int(11) NOT NULL,\n" + + "PRIMARY KEY (`id`),\n" + + "KEY `fk_table1_ref_foreign_id` (`foreign_id`),\n" + + "KEY `fk_table1_ref_user_id` (`user_id`)\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=latin1;", + + newDDL: "create table table1 (\n" + + "\tid int(11) not null auto_increment,\n" + + "\tforeign_id int(11) not null,\n" + + "\tuser_id int(11) not null,\n" + + "\tPRIMARY KEY (id),\n" + + "\tKEY fk_table1_ref_foreign_id (foreign_id),\n" + + "\tKEY fk_table1_ref_user_id (user_id),\n" + + "\tcheck (foreign_id > 10)\n" + + ") ENGINE InnoDB,\n" + + " CHARSET latin1", + }, + } + + for _, tc := range tcs { + newDDL, err := stripTableForeignKeys(tc.ddl) + if tc.hasErr != (err != nil) { + t.Fatalf("hasErr does not match: err: %v, tc: %+v", err, tc) + } + + if newDDL != tc.newDDL { + utils.MustMatch(t, tc.newDDL, newDDL, fmt.Sprintf("newDDL does not match. tc: %+v", tc)) + } + } +} + +func TestStripConstraints(t *testing.T) { + tcs := []struct { + desc string + ddl string + + hasErr bool + newDDL string + }{ + { + desc: "constraints", + ddl: "CREATE TABLE `table1` (\n" + + "`id` int(11) NOT NULL AUTO_INCREMENT,\n" + + "`foreign_id` int(11) NOT NULL,\n" + + "`user_id` int(11) NOT NULL,\n" + + "PRIMARY KEY (`id`),\n" + + "KEY `fk_table1_ref_foreign_id` (`foreign_id`),\n" + + "KEY `fk_table1_ref_user_id` (`user_id`),\n" + + "CONSTRAINT `fk_table1_ref_foreign_id` FOREIGN KEY (`foreign_id`) REFERENCES `foreign` (`id`),\n" + + "CONSTRAINT `fk_table1_ref_user_id` FOREIGN KEY (`user_id`) REFERENCES `core_user` (`id`)\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=latin1;", + + newDDL: "create table table1 (\n" + + "\tid int(11) not null auto_increment,\n" + + "\tforeign_id int(11) not null,\n" + + "\tuser_id int(11) not null,\n" + + "\tPRIMARY KEY (id),\n" + + "\tKEY fk_table1_ref_foreign_id (foreign_id),\n" + + "\tKEY fk_table1_ref_user_id (user_id)\n" + + ") ENGINE InnoDB,\n" + + " CHARSET latin1", + + hasErr: false, + }, + { + desc: "no constraints", + ddl: "CREATE TABLE `table1` (\n" + + "`id` int(11) NOT NULL AUTO_INCREMENT,\n" + + "`foreign_id` int(11) NOT NULL,\n" + + "`user_id` int(11) NOT NULL,\n" + + "PRIMARY KEY (`id`),\n" + + "KEY `fk_table1_ref_foreign_id` (`foreign_id`),\n" + + "KEY `fk_table1_ref_user_id` (`user_id`)\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=latin1;", + + newDDL: "create table table1 (\n" + + "\tid int(11) not null auto_increment,\n" + + "\tforeign_id int(11) not null,\n" + + "\tuser_id int(11) not null,\n" + + "\tPRIMARY KEY (id),\n" + + "\tKEY fk_table1_ref_foreign_id (foreign_id),\n" + + "\tKEY fk_table1_ref_user_id (user_id)\n" + + ") ENGINE InnoDB,\n" + + " CHARSET latin1", + }, + { + desc: "bad ddl has error", + ddl: "bad ddl", + + hasErr: true, + }, + } + + for _, tc := range tcs { + newDDL, err := stripTableConstraints(tc.ddl) + if tc.hasErr != (err != nil) { + t.Fatalf("hasErr does not match: err: %v, tc: %+v", err, tc) + } + + if newDDL != tc.newDDL { + utils.MustMatch(t, tc.newDDL, newDDL, fmt.Sprintf("newDDL does not match. tc: %+v", tc)) + } + } +} + +func TestAddTablesToVSchema(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() + srcks := "source" + ws := &Server{ + ts: ts, + } + tests := []struct { + name string + sourceVSchema *vschemapb.Keyspace + inTargetVSchema *vschemapb.Keyspace + tables []string + copyVSchema bool + wantTargetVSchema *vschemapb.Keyspace + }{ + { + name: "no target vschema; copy source vschema", + sourceVSchema: &vschemapb.Keyspace{ + Tables: map[string]*vschemapb.Table{ + "t1": { + Type: vindexes.TypeReference, + }, + "t2": { + Type: vindexes.TypeSequence, + }, + "t3": { + AutoIncrement: &vschemapb.AutoIncrement{ + Column: "c1", + Sequence: "t2", + }, + }, + }, + }, + inTargetVSchema: &vschemapb.Keyspace{}, + tables: []string{"t1", "t2", "t3", "t4"}, + copyVSchema: true, + wantTargetVSchema: &vschemapb.Keyspace{ + Tables: map[string]*vschemapb.Table{ + "t1": { + Type: vindexes.TypeReference, + }, + "t2": { + Type: vindexes.TypeSequence, + }, + "t3": { + AutoIncrement: &vschemapb.AutoIncrement{ + Column: "c1", + Sequence: "t2", + }, + }, + "t4": {}, + }, + }, + }, + { + name: "no target vschema; copy source vschema; sharded source", + sourceVSchema: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "hash": { + Type: "hash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + Type: vindexes.TypeReference, + }, + "t2": { + Type: vindexes.TypeSequence, + Pinned: "123456", + }, + "t3": { + AutoIncrement: &vschemapb.AutoIncrement{ + Column: "c1", + Sequence: "t2", + }, + ColumnVindexes: []*vschemapb.ColumnVindex{ // Should be stripped on target + { + Column: "c1", + Name: "hash", + }, + }, + }, + "t4": { + ColumnVindexes: []*vschemapb.ColumnVindex{ // Should be stripped on target + { + Column: "c1", + Name: "hash", + }, + }, + }, + }, + }, + inTargetVSchema: &vschemapb.Keyspace{}, + tables: []string{"t1", "t2", "t3", "t4"}, + copyVSchema: true, + wantTargetVSchema: &vschemapb.Keyspace{ + Tables: map[string]*vschemapb.Table{ + "t1": { + Type: vindexes.TypeReference, + }, + "t2": { + Type: vindexes.TypeSequence, + Pinned: "123456", + }, + "t3": { + AutoIncrement: &vschemapb.AutoIncrement{ + Column: "c1", + Sequence: "t2", + }, + }, + "t4": {}, + }, + }, + }, + { + name: "target vschema; copy source vschema", + sourceVSchema: &vschemapb.Keyspace{ + Vindexes: map[string]*vschemapb.Vindex{ + "hash": { + Type: "hash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + Type: vindexes.TypeReference, + }, + "t2": { + Type: vindexes.TypeSequence, + }, + "t3": { + AutoIncrement: &vschemapb.AutoIncrement{ + Column: "c1", + Sequence: "t2", + }, + }, + "t4": { + ColumnVindexes: []*vschemapb.ColumnVindex{ // Should be stripped on target + { + Column: "c1", + Name: "hash", + }, + }, + }, + }, + }, + inTargetVSchema: &vschemapb.Keyspace{ + Tables: map[string]*vschemapb.Table{ + "t1": { + Type: vindexes.TypeReference, + }, + "t2": {}, + "t3": {}, + "t4": {}, + }, + }, + tables: []string{"t1", "t2", "t3", "t4"}, + copyVSchema: true, + wantTargetVSchema: &vschemapb.Keyspace{ + Tables: map[string]*vschemapb.Table{ + "t1": { + Type: vindexes.TypeReference, + }, + "t2": {}, + "t3": {}, + "t4": {}, + }, + }, + }, + { + name: "no target vschema; do not copy source vschema", + sourceVSchema: &vschemapb.Keyspace{ + Tables: map[string]*vschemapb.Table{ + "t1": { + Type: vindexes.TypeReference, + }, + "t2": { + Type: vindexes.TypeSequence, + }, + "t3": { + AutoIncrement: &vschemapb.AutoIncrement{ + Column: "c1", + Sequence: "t2", + }, + }, + }, + }, + inTargetVSchema: &vschemapb.Keyspace{}, + tables: []string{"t1", "t2"}, + copyVSchema: false, + wantTargetVSchema: &vschemapb.Keyspace{ + Tables: map[string]*vschemapb.Table{ + "t1": {}, + "t2": {}, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := ts.SaveVSchema(ctx, srcks, tt.sourceVSchema) + require.NoError(t, err) + err = ws.addTablesToVSchema(ctx, srcks, tt.inTargetVSchema, tt.tables, tt.copyVSchema) + require.NoError(t, err) + require.Equal(t, tt.wantTargetVSchema, tt.inTargetVSchema) + }) + } +} + +func TestMigrateVSchema(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ms := &vtctldatapb.MaterializeSettings{ + Workflow: "workflow", + Cell: "cell", + SourceKeyspace: "sourceks", + TargetKeyspace: "targetks", + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select * from t1", + }}, + } + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) + defer env.close() + + env.tmc.expectVRQuery(100, mzCheckJournal, &sqltypes.Result{}) + env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) + env.tmc.expectVRQuery(200, getWorkflowQuery, getWorkflowRes) + env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) + env.tmc.expectVRQuery(200, mzGetCopyState, &sqltypes.Result{}) + env.tmc.expectVRQuery(200, mzGetWorkflowStatusQuery, getWorkflowStatusRes) + env.tmc.expectVRQuery(200, mzGetLatestCopyState, &sqltypes.Result{}) + + _, err := env.ws.MoveTablesCreate(ctx, &vtctldatapb.MoveTablesCreateRequest{ + Workflow: ms.Workflow, + Cells: []string{ms.Cell}, + TabletTypes: []topodatapb.TabletType{topodatapb.TabletType_PRIMARY}, + SourceKeyspace: ms.SourceKeyspace, + TargetKeyspace: ms.TargetKeyspace, + IncludeTables: []string{"t1"}, + AutoStart: true, + OnDdl: defaultOnDDL, + }) + require.NoError(t, err) + vschema, err := env.ws.ts.GetSrvVSchema(ctx, env.cell) + require.NoError(t, err) + got := fmt.Sprintf("%v", vschema) + want := []string{`keyspaces:{key:"sourceks" value:{}}`, + `keyspaces:{key:"sourceks" value:{}} keyspaces:{key:"targetks" value:{tables:{key:"t1" value:{}}}}`, + `rules:{from_table:"t1" to_tables:"sourceks.t1"}`, + `rules:{from_table:"targetks.t1" to_tables:"sourceks.t1"}`, + } + for _, wantstr := range want { + require.Contains(t, got, wantstr) + } +} + +// TestMoveTablesDDLFlag tests that we save the on-ddl flag value in the workflow. +// Note: +// - TestPlayerDDL tests that the vplayer correctly implements the ddl behavior +// - We have a manual e2e test for the full behavior: TestVReplicationDDLHandling +func TestMoveTablesDDLFlag(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + Workflow: "workflow", + SourceKeyspace: "sourceks", + TargetKeyspace: "targetks", + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select * from t1", + }}, + } + + for onDDLAction := range binlogdatapb.OnDDLAction_value { + t.Run(fmt.Sprintf("OnDDL Flag:%v", onDDLAction), func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) + defer env.close() + // This is the default and go does not marshal defaults + // for prototext fields so we use the default insert stmt. + //insert = fmt.Sprintf(`/insert into .vreplication\(.*on_ddl:%s.*`, onDDLAction) + //env.tmc.expectVRQuery(100, "/.*", &sqltypes.Result{}) + + // TODO: we cannot test the actual query generated w/o having a + // TabletManager. Importing the tabletmanager package, however, causes + // a circular dependency. + // The TabletManager portion is tested in rpc_vreplication_test.go. + env.tmc.expectVRQuery(100, mzCheckJournal, &sqltypes.Result{}) + env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) + env.tmc.expectVRQuery(200, getWorkflowQuery, getWorkflowRes) + env.tmc.expectVRQuery(200, mzGetCopyState, &sqltypes.Result{}) + env.tmc.expectVRQuery(200, mzGetWorkflowStatusQuery, getWorkflowStatusRes) + env.tmc.expectVRQuery(200, mzGetLatestCopyState, &sqltypes.Result{}) + + targetShard, err := env.topoServ.GetShardNames(ctx, ms.TargetKeyspace) + require.NoError(t, err) + sourceShard, err := env.topoServ.GetShardNames(ctx, ms.SourceKeyspace) + require.NoError(t, err) + want := fmt.Sprintf("shard_streams:{key:\"%s/%s\" value:{streams:{id:1 tablet:{cell:\"%s\" uid:200} source_shard:\"%s/%s\" position:\"9d10e6ec-07a0-11ee-ae73-8e53f4cf3083:1-97\" status:\"running\" info:\"VStream Lag: 0s\"}}} traffic_state:\"Reads Not Switched. Writes Not Switched\"", + ms.TargetKeyspace, targetShard[0], env.cell, ms.SourceKeyspace, sourceShard[0]) + + res, err := env.ws.MoveTablesCreate(ctx, &vtctldatapb.MoveTablesCreateRequest{ + Workflow: ms.Workflow, + SourceKeyspace: ms.SourceKeyspace, + TargetKeyspace: ms.TargetKeyspace, + IncludeTables: []string{"t1"}, + OnDdl: onDDLAction, + }) + require.NoError(t, err) + require.Equal(t, want, fmt.Sprintf("%+v", res)) + }) + } +} + +// TestMoveTablesNoRoutingRules confirms that MoveTables does not create routing rules if --no-routing-rules is specified. +func TestMoveTablesNoRoutingRules(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + Workflow: "workflow", + SourceKeyspace: "sourceks", + TargetKeyspace: "targetks", + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select * from t1", + }}, + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) + defer env.close() + // This is the default and go does not marshal defaults + // for prototext fields so we use the default insert stmt. + //insert = fmt.Sprintf(`/insert into .vreplication\(.*on_ddl:%s.*`, onDDLAction) + //env.tmc.expectVRQuery(100, "/.*", &sqltypes.Result{}) + + // TODO: we cannot test the actual query generated w/o having a + // TabletManager. Importing the tabletmanager package, however, causes + // a circular dependency. + // The TabletManager portion is tested in rpc_vreplication_test.go. + env.tmc.expectVRQuery(100, mzCheckJournal, &sqltypes.Result{}) + env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) + env.tmc.expectVRQuery(200, getWorkflowQuery, getWorkflowRes) + env.tmc.expectVRQuery(200, mzGetCopyState, &sqltypes.Result{}) + env.tmc.expectVRQuery(200, mzGetWorkflowStatusQuery, getWorkflowStatusRes) + env.tmc.expectVRQuery(200, mzGetLatestCopyState, &sqltypes.Result{}) + + targetShard, err := env.topoServ.GetShardNames(ctx, ms.TargetKeyspace) + require.NoError(t, err) + sourceShard, err := env.topoServ.GetShardNames(ctx, ms.SourceKeyspace) + require.NoError(t, err) + want := fmt.Sprintf("shard_streams:{key:\"%s/%s\" value:{streams:{id:1 tablet:{cell:\"%s\" uid:200} source_shard:\"%s/%s\" position:\"9d10e6ec-07a0-11ee-ae73-8e53f4cf3083:1-97\" status:\"running\" info:\"VStream Lag: 0s\"}}} traffic_state:\"Reads Not Switched. Writes Not Switched\"", + ms.TargetKeyspace, targetShard[0], env.cell, ms.SourceKeyspace, sourceShard[0]) + + res, err := env.ws.MoveTablesCreate(ctx, &vtctldatapb.MoveTablesCreateRequest{ + Workflow: ms.Workflow, + SourceKeyspace: ms.SourceKeyspace, + TargetKeyspace: ms.TargetKeyspace, + IncludeTables: []string{"t1"}, + NoRoutingRules: true, + }) + require.NoError(t, err) + require.Equal(t, want, fmt.Sprintf("%+v", res)) + rr, err := env.ws.ts.GetRoutingRules(ctx) + require.NoError(t, err) + require.Zerof(t, len(rr.Rules), "routing rules should be empty, found %+v", rr.Rules) +} + +func TestCreateLookupVindexFull(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + Workflow: "lookup", + SourceKeyspace: "sourceks", + TargetKeyspace: "targetks", + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) + defer env.close() + + specs := &vschemapb.Keyspace{ + Vindexes: map[string]*vschemapb.Vindex{ + "v": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "targetks.lookup", + "from": "c1", + "to": "c2", + }, + Owner: "t1", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "v", + Column: "col2", + }}, + }, + }, + } + // Dummy sourceSchema + sourceSchema := "CREATE TABLE `t1` (\n" + + " `col1` int(11) NOT NULL AUTO_INCREMENT,\n" + + " `col2` int(11) DEFAULT NULL,\n" + + " PRIMARY KEY (`id`)\n" + + ") ENGINE=InnoDB AUTO_INCREMENT=3 DEFAULT CHARSET=latin1" + + sourceVSchema := &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "xxhash", + Column: "col1", + }}, + }, + }, + } + env.tmc.schema[ms.SourceKeyspace+".t1"] = &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{{ + Fields: []*querypb.Field{{ + Name: "col1", + Type: querypb.Type_INT64, + }, { + Name: "col2", + Type: querypb.Type_INT64, + }}, + Schema: sourceSchema, + }}, + } + if err := env.topoServ.SaveVSchema(ctx, ms.TargetKeyspace, &vschemapb.Keyspace{}); err != nil { + t.Fatal(err) + } + if err := env.topoServ.SaveVSchema(ctx, ms.SourceKeyspace, sourceVSchema); err != nil { + t.Fatal(err) + } + + env.tmc.expectVRQuery(100, mzCheckJournal, &sqltypes.Result{}) + env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) + env.tmc.expectVRQuery(200, "/CREATE TABLE `lookup`", &sqltypes.Result{}) + env.tmc.expectVRQuery(200, insertPrefix, &sqltypes.Result{}) + env.tmc.expectVRQuery(200, "update _vt.vreplication set state='Running' where db_name='vt_targetks' and workflow='lookup'", &sqltypes.Result{}) + + req := &vtctldatapb.LookupVindexCreateRequest{ + Workflow: ms.Workflow, + Keyspace: ms.SourceKeyspace, + Cells: []string{"cell"}, + TabletTypes: []topodatapb.TabletType{topodatapb.TabletType_PRIMARY}, + Vindex: specs, + } + + _, err := env.ws.LookupVindexCreate(ctx, req) + require.NoError(t, err) + + wantvschema := &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + "v": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "targetks.lookup", + "from": "c1", + "to": "c2", + "write_only": "true", + }, + Owner: "t1", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "xxhash", + Column: "col1", + }, { + Name: "v", + Column: "col2", + }}, + }, + }, + } + vschema, err := env.topoServ.GetVSchema(ctx, ms.SourceKeyspace) + require.NoError(t, err) + utils.MustMatch(t, wantvschema, vschema) + + wantvschema = &vschemapb.Keyspace{ + Tables: map[string]*vschemapb.Table{ + "lookup": {}, + }, + } + vschema, err = env.topoServ.GetVSchema(ctx, ms.TargetKeyspace) + require.NoError(t, err) + utils.MustMatch(t, wantvschema, vschema) +} + +func TestCreateLookupVindexCreateDDL(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + SourceKeyspace: "sourceks", + TargetKeyspace: "targetks", + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) + defer env.close() + vs := &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "col1", + Name: "xxhash", + }}, + }, + }, + } + if err := env.topoServ.SaveVSchema(ctx, ms.SourceKeyspace, vs); err != nil { + t.Fatal(err) + } + + testcases := []struct { + description string + specs *vschemapb.Keyspace + sourceSchema string + out string + err string + }{{ + description: "unique lookup", + specs: &vschemapb.Keyspace{ + Vindexes: map[string]*vschemapb.Vindex{ + "v": { + Type: "lookup_unique", + Params: map[string]string{ + "table": fmt.Sprintf("%s.lkp", ms.TargetKeyspace), + "from": "c1", + "to": "c2", + }, + Owner: "t1", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "v", + Column: "col2", + }}, + }, + }, + }, + sourceSchema: "CREATE TABLE `t1` (\n" + + " `col1` int(11) NOT NULL AUTO_INCREMENT,\n" + + " `col2` int(11) DEFAULT NULL,\n" + + " `col3` int(11) DEFAULT NULL,\n" + + " PRIMARY KEY (`id`)\n" + + ") ENGINE=InnoDB AUTO_INCREMENT=3 DEFAULT CHARSET=latin1", + out: "CREATE TABLE `lkp` (\n" + + " `c1` int(11),\n" + + " `c2` varbinary(128),\n" + + " PRIMARY KEY (`c1`)\n" + + ")", + }, { + description: "unique lookup, also pk", + specs: &vschemapb.Keyspace{ + Vindexes: map[string]*vschemapb.Vindex{ + "v": { + Type: "lookup_unique", + Params: map[string]string{ + "table": fmt.Sprintf("%s.lkp", ms.TargetKeyspace), + "from": "c1", + "to": "c2", + }, + Owner: "t1", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "v", + Column: "col2", + }}, + }, + }, + }, + sourceSchema: "CREATE TABLE `t1` (\n" + + " `col2` int(11) NOT NULL AUTO_INCREMENT,\n" + + " `col1` int(11) DEFAULT NULL,\n" + + " `col4` int(11) DEFAULT NULL,\n" + + " PRIMARY KEY (`id`)\n" + + ") ENGINE=InnoDB AUTO_INCREMENT=3 DEFAULT CHARSET=latin1", + out: "CREATE TABLE `lkp` (\n" + + " `c1` int(11) NOT NULL,\n" + + " `c2` varbinary(128),\n" + + " PRIMARY KEY (`c1`)\n" + + ")", + }, { + description: "non-unique lookup, also pk", + specs: &vschemapb.Keyspace{ + Vindexes: map[string]*vschemapb.Vindex{ + "v": { + Type: "lookup", + Params: map[string]string{ + "table": fmt.Sprintf("%s.lkp", ms.TargetKeyspace), + "from": "c1,c2", + "to": "c3", + }, + Owner: "t1", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "v", + Columns: []string{"col2", "col1"}, + }}, + }, + }, + }, + sourceSchema: "CREATE TABLE `t1` (\n" + + " `col1` int(11) NOT NULL AUTO_INCREMENT,\n" + + " `col2` int(11) NOT NULL,\n" + + " `col3` int(11) DEFAULT NULL,\n" + + " PRIMARY KEY (`id`)\n" + + ") ENGINE=InnoDB AUTO_INCREMENT=3 DEFAULT CHARSET=latin1", + out: "CREATE TABLE `lkp` (\n" + + " `c1` int(11) NOT NULL,\n" + + " `c2` int(11) NOT NULL,\n" + + " `c3` varbinary(128),\n" + + " PRIMARY KEY (`c1`, `c2`)\n" + + ")", + }, { + description: "column missing", + specs: &vschemapb.Keyspace{ + Vindexes: map[string]*vschemapb.Vindex{ + "v": { + Type: "lookup_unique", + Params: map[string]string{ + "table": fmt.Sprintf("%s.lkp", ms.TargetKeyspace), + "from": "c1", + "to": "c2", + }, + Owner: "t1", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "v", + Column: "nocol", + }}, + }, + }, + }, + sourceSchema: "CREATE TABLE `t1` (\n" + + " `col1` int(11) NOT NULL AUTO_INCREMENT,\n" + + " `col2` int(11) NOT NULL,\n" + + " `col3` int(11) DEFAULT NULL,\n" + + " PRIMARY KEY (`id`)\n" + + ") ENGINE=InnoDB AUTO_INCREMENT=3 DEFAULT CHARSET=latin1", + err: "column nocol not found in schema", + }, { + description: "no table in schema", + specs: &vschemapb.Keyspace{ + Vindexes: map[string]*vschemapb.Vindex{ + "v": { + Type: "lookup_unique", + Params: map[string]string{ + "table": fmt.Sprintf("%s.lkp", ms.TargetKeyspace), + "from": "c1", + "to": "c2", + }, + Owner: "t1", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "v", + Column: "nocol", + }}, + }, + }, + }, + sourceSchema: "", + err: "unexpected number of tables (0) returned from sourceks schema", + }} + for _, tcase := range testcases { + if tcase.sourceSchema != "" { + env.tmc.schema[ms.SourceKeyspace+".t1"] = &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{{ + Schema: tcase.sourceSchema, + }}, + } + } else { + delete(env.tmc.schema, ms.SourceKeyspace+".t1") + } + + outms, _, _, err := env.ws.prepareCreateLookup(ctx, "workflow", ms.SourceKeyspace, tcase.specs, false) + if tcase.err != "" { + if err == nil || !strings.Contains(err.Error(), tcase.err) { + t.Errorf("prepareCreateLookup(%s) err: %v, must contain %v", tcase.description, err, tcase.err) + } + continue + } + require.NoError(t, err) + want := strings.Split(tcase.out, "\n") + got := strings.Split(outms.TableSettings[0].CreateDdl, "\n") + require.Equal(t, want, got, tcase.description) + } +} + +func TestCreateLookupVindexSourceVSchema(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + SourceKeyspace: "sourceks", + TargetKeyspace: "targetks", + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) + defer env.close() + + specs := &vschemapb.Keyspace{ + Vindexes: map[string]*vschemapb.Vindex{ + "v": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "targetks.lkp", + "from": "c1", + "to": "c2", + }, + Owner: "t1", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "v", + Column: "col2", + }}, + }, + }, + } + // Dummy sourceSchema + sourceSchema := "CREATE TABLE `t1` (\n" + + " `col1` int(11) NOT NULL AUTO_INCREMENT,\n" + + " `col2` int(11) DEFAULT NULL,\n" + + " PRIMARY KEY (`id`)\n" + + ") ENGINE=InnoDB AUTO_INCREMENT=3 DEFAULT CHARSET=latin1" + + testcases := []struct { + description string + sourceVSchema *vschemapb.Keyspace + out *vschemapb.Keyspace + }{{ + description: "source vschema has no prior info", + sourceVSchema: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "xxhash", + Column: "col1", + }}, + }, + }, + }, + out: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + "v": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "targetks.lkp", + "from": "c1", + "to": "c2", + "write_only": "true", + }, + Owner: "t1", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "xxhash", + Column: "col1", + }, { + Name: "v", + Column: "col2", + }}, + }, + }, + }, + }, { + description: "source vschema has the lookup vindex", + sourceVSchema: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + "v": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "targetks.lkp", + "from": "c1", + "to": "c2", + "write_only": "true", + }, + Owner: "t1", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "xxhash", + Column: "col1", + }}, + }, + }, + }, + out: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + "v": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "targetks.lkp", + "from": "c1", + "to": "c2", + "write_only": "true", + }, + Owner: "t1", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "xxhash", + Column: "col1", + }, { + Name: "v", + Column: "col2", + }}, + }, + }, + }, + }, { + description: "source vschema table has a different vindex on same column", + sourceVSchema: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + "v": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "targetks.lkp", + "from": "c1", + "to": "c2", + "write_only": "true", + }, + Owner: "t1", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "xxhash", + Column: "col1", + }, { + Name: "xxhash", + Column: "col2", + }}, + }, + }, + }, + out: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + "v": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "targetks.lkp", + "from": "c1", + "to": "c2", + "write_only": "true", + }, + Owner: "t1", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "xxhash", + Column: "col1", + }, { + Name: "xxhash", + Column: "col2", + }, { + Name: "v", + Column: "col2", + }}, + }, + }, + }, + }} + for _, tcase := range testcases { + env.tmc.schema[ms.SourceKeyspace+".t1"] = &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{{ + Fields: []*querypb.Field{{ + Name: "col1", + Type: querypb.Type_INT64, + }, { + Name: "col2", + Type: querypb.Type_INT64, + }}, + Schema: sourceSchema, + }}, + } + if err := env.topoServ.SaveVSchema(ctx, ms.TargetKeyspace, &vschemapb.Keyspace{}); err != nil { + t.Fatal(err) + } + if err := env.topoServ.SaveVSchema(ctx, ms.SourceKeyspace, tcase.sourceVSchema); err != nil { + t.Fatal(err) + } + + _, got, _, err := env.ws.prepareCreateLookup(ctx, "workflow", ms.SourceKeyspace, specs, false) + require.NoError(t, err) + if !proto.Equal(got, tcase.out) { + t.Errorf("%s: got:\n%v, want\n%v", tcase.description, got, tcase.out) + } + } +} + +func TestCreateLookupVindexTargetVSchema(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + SourceKeyspace: "sourceks", + TargetKeyspace: "targetks", + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) + defer env.close() + sourcevs := &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "col1", + Name: "xxhash", + }}, + }, + }, + } + if err := env.topoServ.SaveVSchema(context.Background(), ms.SourceKeyspace, sourcevs); err != nil { + t.Fatal(err) + } + + // withTable is a target vschema with a pre-existing table. + withTable := &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t2": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "c1", + Name: "xxhash", + }}, + }, + }, + } + + specs := &vschemapb.Keyspace{ + Vindexes: map[string]*vschemapb.Vindex{ + "v": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "will be set by the test case", + "from": "c1", + "to": "c2", + }, + Owner: "t1", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "v", + Column: "col2", + }}, + }, + }, + } + // Dummy sourceSchema + sourceSchema := "CREATE TABLE `t1` (\n" + + " `col1` int(11) NOT NULL AUTO_INCREMENT,\n" + + " `col2` int(11) DEFAULT NULL,\n" + + " PRIMARY KEY (`id`)\n" + + ") ENGINE=InnoDB AUTO_INCREMENT=3 DEFAULT CHARSET=latin1" + + testcases := []struct { + description string + targetTable string + sourceFieldType querypb.Type + targetVSchema *vschemapb.Keyspace + out *vschemapb.Keyspace + err string + }{{ + description: "sharded, int64, empty target", + targetTable: "lkp", + sourceFieldType: querypb.Type_INT64, + targetVSchema: &vschemapb.Keyspace{Sharded: true}, + out: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "lkp": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "c1", + Name: "xxhash", + }}, + }, + }, + }, + }, { + description: "sharded, varchar, empty target", + targetTable: "lkp", + sourceFieldType: querypb.Type_VARCHAR, + targetVSchema: &vschemapb.Keyspace{Sharded: true}, + out: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "unicode_loose_md5": { + Type: "unicode_loose_md5", + }, + }, + Tables: map[string]*vschemapb.Table{ + "lkp": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "c1", + Name: "unicode_loose_md5", + }}, + }, + }, + }, + }, { + description: "sharded, int64, good vindex", + targetTable: "lkp", + sourceFieldType: querypb.Type_INT64, + targetVSchema: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + }, + }, + out: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "lkp": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "c1", + Name: "xxhash", + }}, + }, + }, + }, + }, { + description: "sharded, int64, bad vindex", + targetTable: "lkp", + sourceFieldType: querypb.Type_INT64, + targetVSchema: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + // Create a misleading vindex name. + "xxhash": { + Type: "unicode_loose_md5", + }, + }, + }, + err: "a conflicting vindex named xxhash already exists in the targetks keyspace", + }, { + description: "sharded, int64, good table", + targetTable: "t2", + sourceFieldType: querypb.Type_INT64, + targetVSchema: withTable, + out: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t2": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "c1", + Name: "xxhash", + }}, + }, + }, + }, + }, { + description: "sharded, int64, table mismatch", + targetTable: "t2", + sourceFieldType: querypb.Type_VARCHAR, + targetVSchema: withTable, + err: "a conflicting table named t2 already exists in the targetks vschema", + }, { + description: "unsharded", + targetTable: "lkp", + sourceFieldType: querypb.Type_INT64, + targetVSchema: &vschemapb.Keyspace{}, + out: &vschemapb.Keyspace{ + Vindexes: map[string]*vschemapb.Vindex{}, + Tables: map[string]*vschemapb.Table{ + "lkp": {}, + }, + }, + }, { + description: "invalid column type", + targetTable: "lkp", + sourceFieldType: querypb.Type_SET, + targetVSchema: &vschemapb.Keyspace{Sharded: true}, + err: "type SET is not recommended for a vindex", + }} + for _, tcase := range testcases { + env.tmc.schema[ms.SourceKeyspace+".t1"] = &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{{ + Fields: []*querypb.Field{{ + Name: "col2", + Type: tcase.sourceFieldType, + }}, + Schema: sourceSchema, + }}, + } + specs.Vindexes["v"].Params["table"] = fmt.Sprintf("%s.%s", ms.TargetKeyspace, tcase.targetTable) + if err := env.topoServ.SaveVSchema(ctx, ms.TargetKeyspace, tcase.targetVSchema); err != nil { + t.Fatal(err) + } + + _, _, got, err := env.ws.prepareCreateLookup(ctx, "workflow", ms.SourceKeyspace, specs, false) + if tcase.err != "" { + if err == nil || !strings.Contains(err.Error(), tcase.err) { + t.Errorf("prepareCreateLookup(%s) err: %v, must contain %v", tcase.description, err, tcase.err) + } + continue + } + require.NoError(t, err) + utils.MustMatch(t, tcase.out, got, tcase.description) + } +} + +func TestCreateLookupVindexSameKeyspace(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + SourceKeyspace: "ks", + TargetKeyspace: "ks", + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) + defer env.close() + + specs := &vschemapb.Keyspace{ + Vindexes: map[string]*vschemapb.Vindex{ + "v": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "ks.lkp", + "from": "c1", + "to": "c2", + }, + Owner: "t1", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "v", + Column: "col2", + }}, + }, + }, + } + // Dummy sourceSchema + sourceSchema := "CREATE TABLE `t1` (\n" + + " `col1` int(11) NOT NULL AUTO_INCREMENT,\n" + + " `col2` int(11) DEFAULT NULL,\n" + + " PRIMARY KEY (`id`)\n" + + ") ENGINE=InnoDB AUTO_INCREMENT=3 DEFAULT CHARSET=latin1" + + vschema := &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "xxhash", + Column: "col1", + }}, + }, + }, + } + want := &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + "v": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "ks.lkp", + "from": "c1", + "to": "c2", + "write_only": "true", + }, + Owner: "t1", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "xxhash", + Column: "col1", + }, { + Name: "v", + Column: "col2", + }}, + }, + "lkp": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "c1", + Name: "xxhash", + }}, + }, + }, + } + env.tmc.schema[ms.SourceKeyspace+".t1"] = &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{{ + Fields: []*querypb.Field{{ + Name: "col1", + Type: querypb.Type_INT64, + }, { + Name: "col2", + Type: querypb.Type_INT64, + }}, + Schema: sourceSchema, + }}, + } + if err := env.topoServ.SaveVSchema(ctx, ms.TargetKeyspace, vschema); err != nil { + t.Fatal(err) + } + + _, got, _, err := env.ws.prepareCreateLookup(ctx, "keyspace", ms.TargetKeyspace, specs, false) + require.NoError(t, err) + if !proto.Equal(got, want) { + t.Errorf("same keyspace: got:\n%v, want\n%v", got, want) + } +} + +func TestCreateCustomizedVindex(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + SourceKeyspace: "ks", + TargetKeyspace: "ks", + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) + defer env.close() + + specs := &vschemapb.Keyspace{ + Vindexes: map[string]*vschemapb.Vindex{ + "v": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "ks.lookup", + "from": "c1", + "to": "col2", + }, + Owner: "t1", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "v", + Column: "col2", + }}, + }, + "lookup": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "unicode_loose_md5", + Column: "c1", + }}, + }, + }, + } + + // Dummy sourceSchema + sourceSchema := "CREATE TABLE `t1` (\n" + + " `col1` int(11) NOT NULL AUTO_INCREMENT,\n" + + " `col2` int(11) DEFAULT NULL,\n" + + " PRIMARY KEY (`id`)\n" + + ") ENGINE=InnoDB AUTO_INCREMENT=3 DEFAULT CHARSET=latin1" + + vschema := &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + "unicode_loose_md5": { // Non default vindex type for the column. + Type: "unicode_loose_md5", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "xxhash", + Column: "col1", + }}, + }, + }, + } + want := &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + "unicode_loose_md5": { + Type: "unicode_loose_md5", + }, + "v": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "ks.lookup", + "from": "c1", + "to": "col2", + "write_only": "true", + }, + Owner: "t1", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "xxhash", + Column: "col1", + }, { + Name: "v", + Column: "col2", + }}, + }, + "lookup": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "c1", + Name: "unicode_loose_md5", + }}, + }, + }, + } + env.tmc.schema[ms.TargetKeyspace+".t1"] = &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{{ + Fields: []*querypb.Field{{ + Name: "col1", + Type: querypb.Type_INT64, + }, { + Name: "col2", + Type: querypb.Type_INT64, + }}, + Schema: sourceSchema, + }}, + } + if err := env.topoServ.SaveVSchema(ctx, ms.TargetKeyspace, vschema); err != nil { + t.Fatal(err) + } + + _, got, _, err := env.ws.prepareCreateLookup(ctx, "workflow", ms.TargetKeyspace, specs, false) + require.NoError(t, err) + if !proto.Equal(got, want) { + t.Errorf("customize create lookup error same: got:\n%v, want\n%v", got, want) + } +} + +func TestCreateLookupVindexIgnoreNulls(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + SourceKeyspace: "ks", + TargetKeyspace: "ks", + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) + defer env.close() + + specs := &vschemapb.Keyspace{ + Vindexes: map[string]*vschemapb.Vindex{ + "v": { + Type: "consistent_lookup", + Params: map[string]string{ + "table": "ks.lkp", + "from": "col2,col1", + "to": "keyspace_id", + "ignore_nulls": "true", + }, + Owner: "t1", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "v", + Columns: []string{"col2", "col1"}, + }}, + }, + }, + } + // Dummy sourceSchema + sourceSchema := "CREATE TABLE `t1` (\n" + + " `col1` int(11) NOT NULL AUTO_INCREMENT,\n" + + " `col2` int(11) DEFAULT NULL,\n" + + " PRIMARY KEY (`id`)\n" + + ") ENGINE=InnoDB AUTO_INCREMENT=3 DEFAULT CHARSET=latin1" + + vschema := &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "xxhash", + Column: "col1", + }}, + }, + }, + } + + wantKs := &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + "v": { + Type: "consistent_lookup", + Params: map[string]string{ + "table": "ks.lkp", + "from": "col2,col1", + "to": "keyspace_id", + "write_only": "true", + "ignore_nulls": "true", + }, + Owner: "t1", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "xxhash", + Column: "col1", + }, { + Name: "v", + Columns: []string{"col2", "col1"}, + }}, + }, + "lkp": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "col2", + Name: "xxhash", + }}, + }, + }, + } + wantQuery := "select col2 as col2, col1 as col1, keyspace_id() as keyspace_id from t1 where col2 is not null and col1 is not null group by col2, col1, keyspace_id" + + env.tmc.schema[ms.SourceKeyspace+".t1"] = &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{{ + Fields: []*querypb.Field{{ + Name: "col1", + Type: querypb.Type_INT64, + }, { + Name: "col2", + Type: querypb.Type_INT64, + }}, + Schema: sourceSchema, + }}, + } + if err := env.topoServ.SaveVSchema(ctx, ms.TargetKeyspace, vschema); err != nil { + t.Fatal(err) + } + + ms, ks, _, err := env.ws.prepareCreateLookup(ctx, "workflow", ms.TargetKeyspace, specs, false) + require.NoError(t, err) + if !proto.Equal(wantKs, ks) { + t.Errorf("unexpected keyspace value: got:\n%v, want\n%v", ks, wantKs) + } + require.NotNil(t, ms) + require.GreaterOrEqual(t, len(ms.TableSettings), 1) + require.Equal(t, wantQuery, ms.TableSettings[0].SourceExpression, "unexpected query") +} + +func TestStopAfterCopyFlag(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + SourceKeyspace: "ks", + TargetKeyspace: "ks", + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) + defer env.close() + specs := &vschemapb.Keyspace{ + Vindexes: map[string]*vschemapb.Vindex{ + "v": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "ks.lkp", + "from": "c1", + "to": "col2", + }, + Owner: "t1", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "v", + Column: "col2", + }}, + }, + }, + } + // Dummy sourceSchema. + sourceSchema := "CREATE TABLE `t1` (\n" + + " `col1` int(11) NOT NULL AUTO_INCREMENT,\n" + + " `col2` int(11) DEFAULT NULL,\n" + + " PRIMARY KEY (`id`)\n" + + ") ENGINE=InnoDB AUTO_INCREMENT=3 DEFAULT CHARSET=latin1" + + vschema := &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "xxhash", + Column: "col1", + }}, + }, + }, + } + env.tmc.schema[ms.SourceKeyspace+".t1"] = &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{{ + Fields: []*querypb.Field{{ + Name: "col1", + Type: querypb.Type_INT64, + }, { + Name: "col2", + Type: querypb.Type_INT64, + }}, + Schema: sourceSchema, + }}, + } + if err := env.topoServ.SaveVSchema(ctx, ms.SourceKeyspace, vschema); err != nil { + t.Fatal(err) + } + + ms1, _, _, err := env.ws.prepareCreateLookup(ctx, "workflow", ms.TargetKeyspace, specs, false) + require.NoError(t, err) + require.Equal(t, ms1.StopAfterCopy, true) + + ms2, _, _, err := env.ws.prepareCreateLookup(ctx, "workflow", ms.TargetKeyspace, specs, true) + require.NoError(t, err) + require.Equal(t, ms2.StopAfterCopy, false) +} + +func TestCreateLookupVindexFailures(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + // Keyspace where the vindex is created. + SourceKeyspace: "sourceks", + // Keyspace where the lookup table and VReplication workflow is created. + TargetKeyspace: "targetks", + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) + defer env.close() + + unique := map[string]*vschemapb.Vindex{ + "v": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "targetks.t", + "from": "c1", + "to": "c2", + }, + }, + } + + vs := &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + "v": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "targetks.t", + "from": "c1", + "to": "c2", + "write_only": "true", + }, + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "v", + Column: "c1", + }}, + }, + }, + } + err := env.topoServ.SaveVSchema(ctx, ms.TargetKeyspace, vs) + require.NoError(t, err) + + testcases := []struct { + description string + input *vschemapb.Keyspace + err string + }{ + { + description: "dup vindex", + input: &vschemapb.Keyspace{ + Vindexes: map[string]*vschemapb.Vindex{ + "v1": { + Type: "xxhash", + }, + "v2": { + Type: "xxhash", + }, + }, + }, + err: "only one vindex must be specified", + }, + { + description: "not a lookup", + input: &vschemapb.Keyspace{ + Vindexes: map[string]*vschemapb.Vindex{ + "v": { + Type: "xxhash", + }, + }, + }, + err: "vindex xxhash is not a lookup type", + }, + { + description: "unqualified table", + input: &vschemapb.Keyspace{ + Vindexes: map[string]*vschemapb.Vindex{ + "v": { + Type: "lookup", + Params: map[string]string{ + "table": "t", + }, + }, + }, + }, + err: "vindex table name (t) must be in the form .", + }, + { + description: "unique lookup should have only one from column", + input: &vschemapb.Keyspace{ + Vindexes: map[string]*vschemapb.Vindex{ + "v": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "targetks.t", + "from": "c1,c2", + "to": "c3", + }, + }, + }, + }, + err: "unique vindex 'from' should have only one column", + }, + { + description: "non-unique lookup should have more than one column", + input: &vschemapb.Keyspace{ + Vindexes: map[string]*vschemapb.Vindex{ + "v": { + Type: "lookup", + Params: map[string]string{ + "table": "targetks.t", + "from": "c1", + "to": "c2", + }, + }, + }, + }, + err: "non-unique vindex 'from' should have more than one column", + }, + { + description: "vindex not found", + input: &vschemapb.Keyspace{ + Vindexes: map[string]*vschemapb.Vindex{ + "v": { + Type: "lookup_noexist", + Params: map[string]string{ + "table": "targetks.t", + "from": "c1,c2", + "to": "c2", + }, + }, + }, + }, + err: `vindexType "lookup_noexist" not found`, + }, + { + description: "no tables", + input: &vschemapb.Keyspace{ + Vindexes: unique, + }, + err: "one or two tables must be specified", + }, + { + description: "too many tables", + input: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "v": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "targetks.t", + "from": "c1", + "to": "c2", + }, + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "v", + Column: "c1", + }}, + }, + "v": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "xxhash", + Column: "c2", + }}, + }, + "v2": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "xxhash", + Column: "c1", + }}, + }, + }, + }, + err: "one or two tables must be specified", + }, + { + description: "only one colvindex", + input: &vschemapb.Keyspace{ + Vindexes: unique, + Tables: map[string]*vschemapb.Table{ + "t1": {}, + }, + }, + err: "exactly one ColumnVindex must be specified for the t1 table", + }, + { + description: "vindex name must match", + input: &vschemapb.Keyspace{ + Vindexes: unique, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "other", + }}, + }, + }, + }, + err: "ColumnVindex name (other) must match vindex name (v)", + }, + { + description: "owner must match", + input: &vschemapb.Keyspace{ + Vindexes: map[string]*vschemapb.Vindex{ + "v": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "targetks.t", + "from": "c1", + "to": "c2", + }, + Owner: "otherTable", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "v", + }}, + }, + }, + }, + err: "vindex owner (otherTable) must match table name (t1)", + }, + { + description: "owner must match", + input: &vschemapb.Keyspace{ + Vindexes: unique, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "v", + }}, + }, + }, + }, + err: "at least one column must be specified in ColumnVindexes", + }, + { + description: "columnvindex length mismatch", + input: &vschemapb.Keyspace{ + Vindexes: unique, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "v", + Columns: []string{"col1", "col2"}, + }}, + }, + }, + }, + err: "length of table columns (2) differs from length of vindex columns (1)", + }, + { + description: "vindex mismatches with what's in vschema", + input: &vschemapb.Keyspace{ + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "targetks.t", + "from": "c1", + "to": "c2", + }, + Owner: "t1", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "xxhash", + Column: "col", + }}, + }, + }, + }, + err: "a conflicting vindex named xxhash already exists in the targetks keyspace", + }, + { + description: "source table not in vschema", + input: &vschemapb.Keyspace{ + Vindexes: unique, + Tables: map[string]*vschemapb.Table{ + "other": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "v", + Column: "col", + }}, + }, + }, + }, + err: "table other not found in the targetks keyspace", + }, + } + for _, tcase := range testcases { + t.Run(tcase.description, func(t *testing.T) { + req := &vtctldatapb.LookupVindexCreateRequest{ + Workflow: "lookup", + Keyspace: ms.TargetKeyspace, + Vindex: tcase.input, + } + _, err := env.ws.LookupVindexCreate(ctx, req) + if !strings.Contains(err.Error(), tcase.err) { + t.Errorf("CreateLookupVindex(%s) err: %v, must contain %v", tcase.description, err, tcase.err) + } + }) + } +} + +func TestExternalizeLookupVindex(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + // Keyspace where the vindex is created. + SourceKeyspace: "sourceks", + // Keyspace where the lookup table and VReplication workflow is created. + TargetKeyspace: "targetks", + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) + defer env.close() + + sourceVschema := &vschemapb.Keyspace{ + Sharded: false, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + "owned_lookup": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "targetks.owned_lookup", + "from": "c1", + "to": "c2", + "write_only": "true", + }, + Owner: "t1", + }, + "unowned_lookup": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "targetks.unowned_lookup", + "from": "c1", + "to": "c2", + "write_only": "true", + }, + }, + "unqualified_lookup": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "unqualified", + "from": "c1", + "to": "c2", + }, + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "xxhash", + Column: "col1", + }, { + Name: "owned_lookup", + Column: "col2", + }}, + }, + }, + } + fields := sqltypes.MakeTestFields( + "id|state|message|source", + "int64|varbinary|varbinary|blob", + ) + ownedSourceStopAfterCopy := fmt.Sprintf(`keyspace:"%s",shard:"0",filter:{rules:{match:"owned_lookup" filter:"select * from t1 where in_keyrange(col1, '%s.xxhash', '-80')"}} stop_after_copy:true`, + ms.SourceKeyspace, ms.SourceKeyspace) + ownedSourceKeepRunningAfterCopy := fmt.Sprintf(`keyspace:"%s",shard:"0",filter:{rules:{match:"owned_lookup" filter:"select * from t1 where in_keyrange(col1, '%s.xxhash', '-80')"}}`, + ms.SourceKeyspace, ms.SourceKeyspace) + ownedRunning := sqltypes.MakeTestResult(fields, "1|Running|msg|"+ownedSourceKeepRunningAfterCopy) + ownedStopped := sqltypes.MakeTestResult(fields, "1|Stopped|Stopped after copy|"+ownedSourceStopAfterCopy) + unownedSourceStopAfterCopy := fmt.Sprintf(`keyspace:"%s",shard:"0",filter:{rules:{match:"unowned_lookup" filter:"select * from t1 where in_keyrange(col1, '%s.xxhash', '-80')"}} stop_after_copy:true`, + ms.SourceKeyspace, ms.SourceKeyspace) + unownedSourceKeepRunningAfterCopy := fmt.Sprintf(`keyspace:"%s",shard:"0",filter:{rules:{match:"unowned_lookup" filter:"select * from t1 where in_keyrange(col1, '%s.xxhash', '-80')"}}`, + ms.SourceKeyspace, ms.SourceKeyspace) + unownedRunning := sqltypes.MakeTestResult(fields, "2|Running|msg|"+unownedSourceKeepRunningAfterCopy) + unownedStopped := sqltypes.MakeTestResult(fields, "2|Stopped|Stopped after copy|"+unownedSourceStopAfterCopy) + + testcases := []struct { + request *vtctldatapb.LookupVindexExternalizeRequest + vrResponse *sqltypes.Result + err string + expectedVschema *vschemapb.Keyspace + expectDelete bool + }{ + { + request: &vtctldatapb.LookupVindexExternalizeRequest{ + Name: "owned_lookup", + Keyspace: ms.SourceKeyspace, + TableKeyspace: ms.TargetKeyspace, + }, + vrResponse: ownedStopped, + expectedVschema: &vschemapb.Keyspace{ + Vindexes: map[string]*vschemapb.Vindex{ + "owned_lookup": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "targetks.owned_lookup", + "from": "c1", + "to": "c2", + }, + Owner: "t1", + }, + }, + }, + expectDelete: true, + }, + { + request: &vtctldatapb.LookupVindexExternalizeRequest{ + Name: "unowned_lookup", + Keyspace: ms.SourceKeyspace, + TableKeyspace: ms.TargetKeyspace, + }, + vrResponse: unownedStopped, + expectedVschema: &vschemapb.Keyspace{ + Vindexes: map[string]*vschemapb.Vindex{ + "unowned_lookup": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "targetks.unowned_lookup", + "from": "c1", + "to": "c2", + }, + }, + }, + }, + err: "is not in Running state", + }, + { + request: &vtctldatapb.LookupVindexExternalizeRequest{ + Name: "owned_lookup", + Keyspace: ms.SourceKeyspace, + TableKeyspace: ms.TargetKeyspace, + }, + vrResponse: ownedRunning, + expectedVschema: &vschemapb.Keyspace{ + Vindexes: map[string]*vschemapb.Vindex{ + "owned_lookup": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "targetks.owned_lookup", + "from": "c1", + "to": "c2", + }, + Owner: "t1", + }, + }, + }, + expectDelete: true, + }, + { + request: &vtctldatapb.LookupVindexExternalizeRequest{ + Name: "unowned_lookup", + Keyspace: ms.SourceKeyspace, + TableKeyspace: ms.TargetKeyspace, + }, + vrResponse: unownedRunning, + expectedVschema: &vschemapb.Keyspace{ + Vindexes: map[string]*vschemapb.Vindex{ + "unowned_lookup": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "targetks.unowned_lookup", + "from": "c1", + "to": "c2", + }, + }, + }, + }, + }, + { + request: &vtctldatapb.LookupVindexExternalizeRequest{ + Name: "absent_lookup", + Keyspace: ms.SourceKeyspace, + TableKeyspace: ms.TargetKeyspace, + }, + expectedVschema: &vschemapb.Keyspace{ + Vindexes: map[string]*vschemapb.Vindex{ + "absent_lookup": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "targetks.absent_lookup", + "from": "c1", + "to": "c2", + }, + }, + }, + }, + err: "vindex absent_lookup not found in the sourceks keyspace", + }, + } + for _, tcase := range testcases { + t.Run(tcase.request.Name, func(t *testing.T) { + // Resave the source schema for every iteration. + err := env.topoServ.SaveVSchema(ctx, tcase.request.Keyspace, sourceVschema) + require.NoError(t, err) + err = env.topoServ.RebuildSrvVSchema(ctx, []string{env.cell}) + require.NoError(t, err) + + validationQuery := fmt.Sprintf("select id, state, message, source from _vt.vreplication where workflow='%s' and db_name='vt_%s'", + tcase.request.Name, ms.TargetKeyspace) + env.tmc.expectVRQuery(200, validationQuery, tcase.vrResponse) + env.tmc.expectVRQuery(210, validationQuery, tcase.vrResponse) + + preWorkflowDeleteCalls := env.tmc.workflowDeleteCalls + _, err = env.ws.LookupVindexExternalize(ctx, tcase.request) + if tcase.err != "" { + if err == nil || !strings.Contains(err.Error(), tcase.err) { + require.FailNow(t, "LookupVindexExternalize error", "ExternalizeVindex(%v) err: %v, must contain %v", tcase.request, err, tcase.err) + } + return + } + require.NoError(t, err) + expectedWorkflowDeleteCalls := preWorkflowDeleteCalls + if tcase.expectDelete { + // We expect the RPC to be called on each target shard. + expectedWorkflowDeleteCalls = preWorkflowDeleteCalls + (len(env.targets)) + } + require.Equal(t, expectedWorkflowDeleteCalls, env.tmc.workflowDeleteCalls) + + aftervschema, err := env.topoServ.GetVSchema(ctx, ms.SourceKeyspace) + require.NoError(t, err) + vindex := aftervschema.Vindexes[tcase.request.Name] + expectedVindex := tcase.expectedVschema.Vindexes[tcase.request.Name] + require.NotNil(t, vindex, "vindex %s not found in vschema", tcase.request.Name) + require.NotContains(t, vindex.Params, "write_only", tcase.request) + require.Equal(t, expectedVindex, vindex, "vindex mismatch. expected: %+v, got: %+v", expectedVindex, vindex) + }) + } +} + +func TestMaterializerOneToOne(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + Workflow: "workflow", + SourceKeyspace: "sourceks", + TargetKeyspace: "targetks", + TableSettings: []*vtctldatapb.TableMaterializeSettings{ + { + TargetTable: "t1", + SourceExpression: "select * from t1", + CreateDdl: "t1ddl", + }, + { + TargetTable: "t2", + SourceExpression: "select * from t3", + CreateDdl: "t2ddl", + }, + { + TargetTable: "t4", + SourceExpression: "", // empty + CreateDdl: "t4ddl", + }, + }, + Cell: "zone1", + TabletTypes: topoproto.MakeStringTypeCSV([]topodatapb.TabletType{ + topodatapb.TabletType_PRIMARY, + topodatapb.TabletType_RDONLY, + }), + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) + defer env.close() + + env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) + env.tmc.expectVRQuery( + 200, + insertPrefix+ + `\(`+ + `'workflow', `+ + (`'keyspace:\\"sourceks\\" shard:\\"0\\" `+ + `filter:{`+ + `rules:{match:\\"t1\\" filter:\\"select.*t1\\"} `+ + `rules:{match:\\"t2\\" filter:\\"select.*t3\\"} `+ + `rules:{match:\\"t4\\"}`+ + `}', `)+ + `'', [0-9]*, [0-9]*, 'zone1', 'primary,rdonly', [0-9]*, 0, 'Stopped', 'vt_targetks', 0, 0, false`+ + `\)`+eol, + &sqltypes.Result{}, + ) + env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) + + err := env.ws.Materialize(ctx, ms) + require.NoError(t, err) + env.tmc.verifyQueries(t) +} + +func TestMaterializerManyToOne(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + Workflow: "workflow", + SourceKeyspace: "sourceks", + TargetKeyspace: "targetks", + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select * from t1", + CreateDdl: "t1ddl", + }, { + TargetTable: "t2", + SourceExpression: "select * from t3", + CreateDdl: "t2ddl", + }}, + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"-80", "80-"}, []string{"0"}) + defer env.close() + + env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) + env.tmc.expectVRQuery( + 200, + insertPrefix+ + `\('workflow', 'keyspace:\\"sourceks\\" shard:\\"-80\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1\\"} rules:{match:\\"t2\\" filter:\\"select.*t3\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_targetks', 0, 0, false\)`+ + `, `+ + `\('workflow', 'keyspace:\\"sourceks\\" shard:\\"80-\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1\\"} rules:{match:\\"t2\\" filter:\\"select.*t3\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_targetks', 0, 0, false\)`+ + eol, + &sqltypes.Result{}, + ) + env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) + + err := env.ws.Materialize(ctx, ms) + require.NoError(t, err) + env.tmc.verifyQueries(t) +} + +func TestMaterializerOneToMany(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + Workflow: "workflow", + SourceKeyspace: "sourceks", + TargetKeyspace: "targetks", + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select * from t1", + CreateDdl: "t1ddl", + }}, + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) + defer env.close() + + vs := &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "c1", + Name: "xxhash", + }}, + }, + }, + } + + if err := env.topoServ.SaveVSchema(context.Background(), "targetks", vs); err != nil { + t.Fatal(err) + } + + env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) + env.tmc.expectVRQuery(210, mzSelectFrozenQuery, &sqltypes.Result{}) + env.tmc.expectVRQuery( + 200, + insertPrefix+ + `.*shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c1.*targetks\.xxhash.*-80.*`, + &sqltypes.Result{}, + ) + env.tmc.expectVRQuery( + 210, + insertPrefix+ + `.*shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c1.*targetks\.xxhash.*80-.*`, + &sqltypes.Result{}, + ) + env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) + env.tmc.expectVRQuery(210, mzUpdateQuery, &sqltypes.Result{}) + + err := env.ws.Materialize(ctx, ms) + require.NoError(t, err) + env.tmc.verifyQueries(t) +} + +func TestMaterializerManyToMany(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + Workflow: "workflow", + SourceKeyspace: "sourceks", + TargetKeyspace: "targetks", + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select * from t1", + CreateDdl: "t1ddl", + }}, + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"-40", "40-"}, []string{"-80", "80-"}) + defer env.close() + + vs := &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "c1", + Name: "xxhash", + }}, + }, + }, + } + + if err := env.topoServ.SaveVSchema(context.Background(), "targetks", vs); err != nil { + t.Fatal(err) + } + + env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) + env.tmc.expectVRQuery(210, mzSelectFrozenQuery, &sqltypes.Result{}) + env.tmc.expectVRQuery( + 200, + insertPrefix+ + `.*shard:\\"-40\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c1.*targetks\.xxhash.*-80.*`+ + `.*shard:\\"40-\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c1.*targetks\.xxhash.*-80.*`, + &sqltypes.Result{}, + ) + env.tmc.expectVRQuery( + 210, + insertPrefix+ + `.*shard:\\"-40\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c1.*targetks\.xxhash.*80-.*`+ + `.*shard:\\"40-\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c1.*targetks\.xxhash.*80-.*`, + &sqltypes.Result{}, + ) + env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) + env.tmc.expectVRQuery(210, mzUpdateQuery, &sqltypes.Result{}) + err := env.ws.Materialize(ctx, ms) + require.NoError(t, err) + env.tmc.verifyQueries(t) +} + +func TestMaterializerMulticolumnVindex(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + Workflow: "workflow", + SourceKeyspace: "sourceks", + TargetKeyspace: "targetks", + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select * from t1", + CreateDdl: "t1ddl", + }}, + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) + defer env.close() + + vs := &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "region": { + Type: "region_experimental", + Params: map[string]string{ + "region_bytes": "1", + }, + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Columns: []string{"c1", "c2"}, + Name: "region", + }}, + }, + }, + } + + if err := env.topoServ.SaveVSchema(context.Background(), "targetks", vs); err != nil { + t.Fatal(err) + } + + env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) + env.tmc.expectVRQuery(210, mzSelectFrozenQuery, &sqltypes.Result{}) + env.tmc.expectVRQuery( + 200, + insertPrefix+ + `.*shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c1, c2.*targetks\.region.*-80.*`, + &sqltypes.Result{}, + ) + env.tmc.expectVRQuery( + 210, + insertPrefix+ + `.*shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c1, c2.*targetks\.region.*80-.*`, + &sqltypes.Result{}, + ) + env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) + env.tmc.expectVRQuery(210, mzUpdateQuery, &sqltypes.Result{}) + + err := env.ws.Materialize(ctx, ms) + require.NoError(t, err) + env.tmc.verifyQueries(t) +} + +func TestMaterializerDeploySchema(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + Workflow: "workflow", + SourceKeyspace: "sourceks", + TargetKeyspace: "targetks", + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select * from t1", + CreateDdl: "t1ddl", + }, { + TargetTable: "t2", + SourceExpression: "select * from t3", + CreateDdl: "t2ddl", + }}, + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) + defer env.close() + + delete(env.tmc.schema, "targetks.t2") + + env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) + env.tmc.expectVRQuery(200, `t2ddl`, &sqltypes.Result{}) + env.tmc.expectVRQuery( + 200, + insertPrefix+ + `\('workflow', 'keyspace:\\"sourceks\\" shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1\\"} rules:{match:\\"t2\\" filter:\\"select.*t3\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_targetks', 0, 0, false\)`+ + eol, + &sqltypes.Result{}, + ) + env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) + + err := env.ws.Materialize(ctx, ms) + require.NoError(t, err) + env.tmc.verifyQueries(t) + require.Equal(t, env.tmc.getSchemaRequestCount(100), 1) + require.Equal(t, env.tmc.getSchemaRequestCount(200), 1) +} + +func TestMaterializerCopySchema(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + Workflow: "workflow", + SourceKeyspace: "sourceks", + TargetKeyspace: "targetks", + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select * from t1", + CreateDdl: "copy", + }, { + TargetTable: "t2", + SourceExpression: "select * from t3", + CreateDdl: "t2ddl", + }}, + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) + defer env.close() + + delete(env.tmc.schema, "targetks.t1") + + env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) + env.tmc.expectVRQuery(200, `t1_schema`, &sqltypes.Result{}) + env.tmc.expectVRQuery( + 200, + insertPrefix+ + `\('workflow', 'keyspace:\\"sourceks\\" shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1\\"} rules:{match:\\"t2\\" filter:\\"select.*t3\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_targetks', 0, 0, false\)`+ + eol, + &sqltypes.Result{}, + ) + env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) + + err := env.ws.Materialize(ctx, ms) + require.NoError(t, err) + env.tmc.verifyQueries(t) + require.Equal(t, env.tmc.getSchemaRequestCount(100), 1) + require.Equal(t, env.tmc.getSchemaRequestCount(200), 1) + +} + +func TestMaterializerExplicitColumns(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + Workflow: "workflow", + SourceKeyspace: "sourceks", + TargetKeyspace: "targetks", + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select c1, c1+c2, c2 from t1", + CreateDdl: "t1ddl", + }}, + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) + defer env.close() + + vs := &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "region": { + Type: "region_experimental", + Params: map[string]string{ + "region_bytes": "1", + }, + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Columns: []string{"c1", "c2"}, + Name: "region", + }}, + }, + }, + } + + if err := env.topoServ.SaveVSchema(context.Background(), "targetks", vs); err != nil { + t.Fatal(err) + } + + env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) + env.tmc.expectVRQuery(210, mzSelectFrozenQuery, &sqltypes.Result{}) + env.tmc.expectVRQuery( + 200, + insertPrefix+ + `.*shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c1, c2.*targetks\.region.*-80.*`, + &sqltypes.Result{}, + ) + env.tmc.expectVRQuery( + 210, + insertPrefix+ + `.*shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c1, c2.*targetks\.region.*80-.*`, + &sqltypes.Result{}, + ) + env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) + env.tmc.expectVRQuery(210, mzUpdateQuery, &sqltypes.Result{}) + + err := env.ws.Materialize(ctx, ms) + require.NoError(t, err) + env.tmc.verifyQueries(t) +} + +func TestMaterializerRenamedColumns(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + Workflow: "workflow", + SourceKeyspace: "sourceks", + TargetKeyspace: "targetks", + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select c3 as c1, c1+c2, c4 as c2 from t1", + CreateDdl: "t1ddl", + }}, + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) + defer env.close() + + vs := &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "region": { + Type: "region_experimental", + Params: map[string]string{ + "region_bytes": "1", + }, + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Columns: []string{"c1", "c2"}, + Name: "region", + }}, + }, + }, + } + + if err := env.topoServ.SaveVSchema(context.Background(), "targetks", vs); err != nil { + t.Fatal(err) + } + + env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) + env.tmc.expectVRQuery(210, mzSelectFrozenQuery, &sqltypes.Result{}) + env.tmc.expectVRQuery( + 200, + insertPrefix+ + `.*shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c3, c4.*targetks\.region.*-80.*`, + &sqltypes.Result{}, + ) + env.tmc.expectVRQuery( + 210, + insertPrefix+ + `.*shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c3, c4.*targetks\.region.*80-.*`, + &sqltypes.Result{}, + ) + env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) + env.tmc.expectVRQuery(210, mzUpdateQuery, &sqltypes.Result{}) + + err := env.ws.Materialize(ctx, ms) + require.NoError(t, err) + env.tmc.verifyQueries(t) +} + +func TestMaterializerStopAfterCopy(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + Workflow: "workflow", + SourceKeyspace: "sourceks", + TargetKeyspace: "targetks", + StopAfterCopy: true, + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select * from t1", + CreateDdl: "t1ddl", + }, { + TargetTable: "t2", + SourceExpression: "select * from t3", + CreateDdl: "t2ddl", + }}, + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) + defer env.close() + + env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) + env.tmc.expectVRQuery(200, insertPrefix+`.*stop_after_copy:true`, &sqltypes.Result{}) + env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) + + err := env.ws.Materialize(ctx, ms) + require.NoError(t, err) + env.tmc.verifyQueries(t) +} + +func TestMaterializerNoTargetVSchema(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + Workflow: "workflow", + SourceKeyspace: "sourceks", + TargetKeyspace: "targetks", + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select * from t1", + CreateDdl: "t1ddl", + }}, + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) + defer env.close() + + vs := &vschemapb.Keyspace{ + Sharded: true, + } + + if err := env.topoServ.SaveVSchema(context.Background(), "targetks", vs); err != nil { + t.Fatal(err) + } + env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) + env.tmc.expectVRQuery(210, mzSelectFrozenQuery, &sqltypes.Result{}) + err := env.ws.Materialize(ctx, ms) + require.EqualError(t, err, "table t1 not found in vschema for keyspace targetks") +} + +func TestMaterializerNoDDL(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + Workflow: "workflow", + SourceKeyspace: "sourceks", + TargetKeyspace: "targetks", + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select * from t1", + CreateDdl: "", + }}, + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) + defer env.close() + + delete(env.tmc.schema, "targetks.t1") + + env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) + err := env.ws.Materialize(ctx, ms) + require.EqualError(t, err, "target table t1 does not exist and there is no create ddl defined") + require.Equal(t, env.tmc.getSchemaRequestCount(100), 0) + require.Equal(t, env.tmc.getSchemaRequestCount(200), 1) + +} + +func TestMaterializerNoSourcePrimary(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + Workflow: "workflow", + SourceKeyspace: "sourceks", + TargetKeyspace: "targetks", + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select * from t1", + CreateDdl: "copy", + }}, + } + sources := []string{"0"} + targets := []string{"0"} + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Copied from newTestMaterializerEnv + env := &testMaterializerEnv{ + ms: ms, + sources: sources, + targets: targets, + tablets: make(map[int]*topodatapb.Tablet), + topoServ: memorytopo.NewServer(ctx, "cell"), + cell: "cell", + tmc: newTestMaterializerTMClient(), + } + env.ws = NewServer(env.topoServ, env.tmc) + defer env.close() + + tabletID := 100 + for _, shard := range sources { + _ = env.addTablet(tabletID, env.ms.SourceKeyspace, shard, topodatapb.TabletType_REPLICA) + tabletID += 10 + } + tabletID = 200 + for _, shard := range targets { + _ = env.addTablet(tabletID, env.ms.TargetKeyspace, shard, topodatapb.TabletType_PRIMARY) + tabletID += 10 + } + + // Skip the schema creation part. + + env.expectValidation() + + env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) + err := env.ws.Materialize(ctx, ms) + require.EqualError(t, err, "shard must have a primary for copying schema: 0") +} + +func TestMaterializerTableMismatchNonCopy(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + Workflow: "workflow", + SourceKeyspace: "sourceks", + TargetKeyspace: "targetks", + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select * from t2", + CreateDdl: "", + }}, + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) + defer env.close() + + delete(env.tmc.schema, "targetks.t1") + + env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) + err := env.ws.Materialize(ctx, ms) + require.EqualError(t, err, "target table t1 does not exist and there is no create ddl defined") +} + +func TestMaterializerTableMismatchCopy(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + Workflow: "workflow", + SourceKeyspace: "sourceks", + TargetKeyspace: "targetks", + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select * from t2", + CreateDdl: "copy", + }}, + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) + defer env.close() + + delete(env.tmc.schema, "targetks.t1") + + env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) + err := env.ws.Materialize(ctx, ms) + require.EqualError(t, err, "source and target table names must match for copying schema: t2 vs t1") +} + +func TestMaterializerNoSourceTable(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + Workflow: "workflow", + SourceKeyspace: "sourceks", + TargetKeyspace: "targetks", + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select * from t1", + CreateDdl: "copy", + }}, + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) + defer env.close() + + delete(env.tmc.schema, "targetks.t1") + delete(env.tmc.schema, "sourceks.t1") + + env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) + err := env.ws.Materialize(ctx, ms) + require.EqualError(t, err, "source table t1 does not exist") +} + +func TestMaterializerSyntaxError(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + Workflow: "workflow", + SourceKeyspace: "sourceks", + TargetKeyspace: "targetks", + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "bad query", + CreateDdl: "t1ddl", + }}, + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) + defer env.close() + + env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) + err := env.ws.Materialize(ctx, ms) + require.EqualError(t, err, "syntax error at position 4 near 'bad'") +} + +func TestMaterializerNotASelect(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + Workflow: "workflow", + SourceKeyspace: "sourceks", + TargetKeyspace: "targetks", + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "update t1 set val=1", + CreateDdl: "t1ddl", + }}, + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) + defer env.close() + + env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) + err := env.ws.Materialize(ctx, ms) + require.EqualError(t, err, "unrecognized statement: update t1 set val=1") +} + +func TestMaterializerNoGoodVindex(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + Workflow: "workflow", + SourceKeyspace: "sourceks", + TargetKeyspace: "targetks", + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select * from t1", + CreateDdl: "t1ddl", + }}, + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) + defer env.close() + + vs := &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "lookup_unique": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "t1", + "from": "c1", + "to": "c2", + }, + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "c1", + Name: "lookup_unique", + }}, + }, + }, + } + + if err := env.topoServ.SaveVSchema(context.Background(), "targetks", vs); err != nil { + t.Fatal(err) + } + + env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) + env.tmc.expectVRQuery(210, mzSelectFrozenQuery, &sqltypes.Result{}) + err := env.ws.Materialize(ctx, ms) + require.EqualError(t, err, "could not find a vindex to compute keyspace id for table t1") +} + +func TestMaterializerComplexVindexExpression(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + Workflow: "workflow", + SourceKeyspace: "sourceks", + TargetKeyspace: "targetks", + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select a+b as c1 from t1", + CreateDdl: "t1ddl", + }}, + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) + defer env.close() + + vs := &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "c1", + Name: "xxhash", + }}, + }, + }, + } + + if err := env.topoServ.SaveVSchema(context.Background(), "targetks", vs); err != nil { + t.Fatal(err) + } + + env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) + env.tmc.expectVRQuery(210, mzSelectFrozenQuery, &sqltypes.Result{}) + err := env.ws.Materialize(ctx, ms) + require.EqualError(t, err, "vindex column cannot be a complex expression: a + b as c1") +} + +func TestMaterializerNoVindexInExpression(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + Workflow: "workflow", + SourceKeyspace: "sourceks", + TargetKeyspace: "targetks", + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select c2 from t1", + CreateDdl: "t1ddl", + }}, + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) + defer env.close() + + vs := &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "c1", + Name: "xxhash", + }}, + }, + }, + } + + if err := env.topoServ.SaveVSchema(context.Background(), "targetks", vs); err != nil { + t.Fatal(err) + } + + env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) + env.tmc.expectVRQuery(210, mzSelectFrozenQuery, &sqltypes.Result{}) + err := env.ws.Materialize(ctx, ms) + require.EqualError(t, err, "could not find vindex column c1") +} diff --git a/go/vt/vtctl/workflow/mount.go b/go/vt/vtctl/workflow/mount.go new file mode 100644 index 00000000000..c45dd9f29a3 --- /dev/null +++ b/go/vt/vtctl/workflow/mount.go @@ -0,0 +1,88 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "context" + + "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/vterrors" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" +) + +func notExistsError(name string) error { + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "there is no vitess cluster named %s", name) +} + +func (s *Server) MountRegister(ctx context.Context, req *vtctldatapb.MountRegisterRequest) (*vtctldatapb.MountRegisterResponse, error) { + vci, err := s.ts.GetExternalVitessCluster(ctx, req.Name) + if err != nil { + return &vtctldatapb.MountRegisterResponse{}, + vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get external vitess cluster in MountRegister: %v", err) + } + if vci != nil { + return &vtctldatapb.MountRegisterResponse{}, notExistsError(req.Name) + } + vc := &topodata.ExternalVitessCluster{ + TopoConfig: &topodata.TopoConfig{ + TopoType: req.TopoType, + Server: req.TopoServer, + Root: req.TopoRoot, + }, + } + return &vtctldatapb.MountRegisterResponse{}, s.ts.CreateExternalVitessCluster(ctx, req.Name, vc) +} + +func (s *Server) MountUnregister(ctx context.Context, req *vtctldatapb.MountUnregisterRequest) (*vtctldatapb.MountUnregisterResponse, error) { + vci, err := s.ts.GetExternalVitessCluster(ctx, req.Name) + if err != nil { + return &vtctldatapb.MountUnregisterResponse{}, + vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get external vitess cluster in MountUnregister: %v", err) + } + if vci == nil { + return &vtctldatapb.MountUnregisterResponse{}, notExistsError(req.Name) + } + return &vtctldatapb.MountUnregisterResponse{}, s.ts.DeleteExternalVitessCluster(ctx, req.Name) +} + +func (s *Server) MountList(ctx context.Context, req *vtctldatapb.MountListRequest) (*vtctldatapb.MountListResponse, error) { + vciList, err := s.ts.GetExternalVitessClusters(ctx) + if err != nil { + return &vtctldatapb.MountListResponse{}, + vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get external vitess cluster in MountList: %v", err) + } + return &vtctldatapb.MountListResponse{Names: vciList}, nil +} + +func (s *Server) MountShow(ctx context.Context, req *vtctldatapb.MountShowRequest) (*vtctldatapb.MountShowResponse, error) { + vci, err := s.ts.GetExternalVitessCluster(ctx, req.Name) + if err != nil { + return &vtctldatapb.MountShowResponse{}, + vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get external vitess cluster in MountShow: %v", err) + } + if vci == nil { + return &vtctldatapb.MountShowResponse{}, notExistsError(req.Name) + } + return &vtctldatapb.MountShowResponse{ + TopoType: vci.TopoConfig.TopoType, + TopoServer: vci.TopoConfig.Server, + TopoRoot: vci.TopoConfig.Root, + Name: req.Name, + }, nil +} diff --git a/go/vt/vtctl/workflow/resharder.go b/go/vt/vtctl/workflow/resharder.go new file mode 100644 index 00000000000..161b1c4567d --- /dev/null +++ b/go/vt/vtctl/workflow/resharder.go @@ -0,0 +1,348 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*/ + +package workflow + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "google.golang.org/protobuf/encoding/prototext" + + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/concurrency" + "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/schema" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/vindexes" + "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" +) + +type resharder struct { + s *Server + keyspace string + workflow string + sourceShards []*topo.ShardInfo + sourcePrimaries map[string]*topo.TabletInfo + targetShards []*topo.ShardInfo + targetPrimaries map[string]*topo.TabletInfo + vschema *vschemapb.Keyspace + refStreams map[string]*refStream + // This can be single cell name or cell alias but it can + // also be a comma-separated list of cells. + cell string + tabletTypes string + stopAfterCopy bool + onDDL string + deferSecondaryKeys bool +} + +type refStream struct { + workflow string + bls *binlogdatapb.BinlogSource + cell string + tabletTypes string +} + +func (s *Server) buildResharder(ctx context.Context, keyspace, workflow string, sources, targets []string, cell, tabletTypes string) (*resharder, error) { + ts := s.ts + rs := &resharder{ + s: s, + keyspace: keyspace, + workflow: workflow, + sourcePrimaries: make(map[string]*topo.TabletInfo), + targetPrimaries: make(map[string]*topo.TabletInfo), + cell: cell, + tabletTypes: tabletTypes, + } + for _, shard := range sources { + si, err := ts.GetShard(ctx, keyspace, shard) + if err != nil { + return nil, vterrors.Wrapf(err, "GetShard(%s) failed", shard) + } + if !si.IsPrimaryServing { + return nil, fmt.Errorf("source shard %v is not in serving state", shard) + } + rs.sourceShards = append(rs.sourceShards, si) + primary, err := ts.GetTablet(ctx, si.PrimaryAlias) + if err != nil { + return nil, vterrors.Wrapf(err, "GetTablet(%s) failed", si.PrimaryAlias) + } + rs.sourcePrimaries[si.ShardName()] = primary + } + for _, shard := range targets { + si, err := ts.GetShard(ctx, keyspace, shard) + if err != nil { + return nil, vterrors.Wrapf(err, "GetShard(%s) failed", shard) + } + if si.IsPrimaryServing { + return nil, fmt.Errorf("target shard %v is in serving state", shard) + } + rs.targetShards = append(rs.targetShards, si) + primary, err := ts.GetTablet(ctx, si.PrimaryAlias) + if err != nil { + return nil, vterrors.Wrapf(err, "GetTablet(%s) failed", si.PrimaryAlias) + } + rs.targetPrimaries[si.ShardName()] = primary + } + if err := topotools.ValidateForReshard(rs.sourceShards, rs.targetShards); err != nil { + return nil, vterrors.Wrap(err, "ValidateForReshard") + } + if err := rs.validateTargets(ctx); err != nil { + return nil, vterrors.Wrap(err, "validateTargets") + } + + vschema, err := ts.GetVSchema(ctx, keyspace) + if err != nil { + return nil, vterrors.Wrap(err, "GetVSchema") + } + rs.vschema = vschema + + if err := rs.readRefStreams(ctx); err != nil { + return nil, vterrors.Wrap(err, "readRefStreams") + } + return rs, nil +} + +func (rs *resharder) validateTargets(ctx context.Context) error { + err := rs.forAll(rs.targetShards, func(target *topo.ShardInfo) error { + targetPrimary := rs.targetPrimaries[target.ShardName()] + query := fmt.Sprintf("select 1 from _vt.vreplication where db_name=%s", encodeString(targetPrimary.DbName())) + p3qr, err := rs.s.tmc.VReplicationExec(ctx, targetPrimary.Tablet, query) + if err != nil { + return vterrors.Wrapf(err, "VReplicationExec(%v, %s)", targetPrimary.Tablet, query) + } + if len(p3qr.Rows) != 0 { + return errors.New("some streams already exist in the target shards, please clean them up and retry the command") + } + return nil + }) + return err +} + +func (rs *resharder) readRefStreams(ctx context.Context) error { + var mu sync.Mutex + err := rs.forAll(rs.sourceShards, func(source *topo.ShardInfo) error { + sourcePrimary := rs.sourcePrimaries[source.ShardName()] + + query := fmt.Sprintf("select workflow, source, cell, tablet_types from _vt.vreplication where db_name=%s and message != 'FROZEN'", encodeString(sourcePrimary.DbName())) + p3qr, err := rs.s.tmc.VReplicationExec(ctx, sourcePrimary.Tablet, query) + if err != nil { + return vterrors.Wrapf(err, "VReplicationExec(%v, %s)", sourcePrimary.Tablet, query) + } + qr := sqltypes.Proto3ToResult(p3qr) + + mu.Lock() + defer mu.Unlock() + + mustCreate := false + var ref map[string]bool + if rs.refStreams == nil { + rs.refStreams = make(map[string]*refStream) + mustCreate = true + } else { + // Copy the ref streams for comparison. + ref = make(map[string]bool, len(rs.refStreams)) + for k := range rs.refStreams { + ref[k] = true + } + } + for _, row := range qr.Rows { + + workflow := row[0].ToString() + if workflow == "" { + return fmt.Errorf("VReplication streams must have named workflows for migration: shard: %s:%s", source.Keyspace(), source.ShardName()) + } + var bls binlogdatapb.BinlogSource + rowBytes, err := row[1].ToBytes() + if err != nil { + return err + } + if err := prototext.Unmarshal(rowBytes, &bls); err != nil { + return vterrors.Wrapf(err, "prototext.Unmarshal: %v", row) + } + isReference, err := rs.blsIsReference(&bls) + if err != nil { + return vterrors.Wrap(err, "blsIsReference") + } + if !isReference { + continue + } + refKey := fmt.Sprintf("%s:%s:%s", workflow, bls.Keyspace, bls.Shard) + if mustCreate { + rs.refStreams[refKey] = &refStream{ + workflow: workflow, + bls: &bls, + cell: row[2].ToString(), + tabletTypes: row[3].ToString(), + } + } else { + if !ref[refKey] { + return fmt.Errorf("streams are mismatched across source shards for workflow: %s", workflow) + } + delete(ref, refKey) + } + } + if len(ref) != 0 { + return fmt.Errorf("streams are mismatched across source shards: %v", ref) + } + return nil + }) + return err +} + +// blsIsReference is partially copied from streamMigrater.templatize. +// It reuses the constants from that function also. +func (rs *resharder) blsIsReference(bls *binlogdatapb.BinlogSource) (bool, error) { + streamType := StreamTypeUnknown + for _, rule := range bls.Filter.Rules { + typ, err := rs.identifyRuleType(rule) + if err != nil { + return false, err + } + + switch typ { + case StreamTypeSharded: + if streamType == StreamTypeReference { + return false, fmt.Errorf("cannot reshard streams with a mix of reference and sharded tables: %v", bls) + } + streamType = StreamTypeSharded + case StreamTypeReference: + if streamType == StreamTypeSharded { + return false, fmt.Errorf("cannot reshard streams with a mix of reference and sharded tables: %v", bls) + } + streamType = StreamTypeReference + } + } + return streamType == StreamTypeReference, nil +} + +func (rs *resharder) identifyRuleType(rule *binlogdatapb.Rule) (StreamType, error) { + vtable, ok := rs.vschema.Tables[rule.Match] + if !ok && !schema.IsInternalOperationTableName(rule.Match) { + return 0, fmt.Errorf("table %v not found in vschema", rule.Match) + } + if vtable != nil && vtable.Type == vindexes.TypeReference { + return StreamTypeReference, nil + } + // In this case, 'sharded' means that it's not a reference + // table. We don't care about any other subtleties. + return StreamTypeSharded, nil +} + +func (rs *resharder) copySchema(ctx context.Context) error { + oneSource := rs.sourceShards[0].PrimaryAlias + err := rs.forAll(rs.targetShards, func(target *topo.ShardInfo) error { + return rs.s.CopySchemaShard(ctx, oneSource, []string{"/.*"}, nil, false, rs.keyspace, target.ShardName(), 1*time.Second, false) + }) + return err +} + +func (rs *resharder) createStreams(ctx context.Context) error { + var excludeRules []*binlogdatapb.Rule + for tableName, table := range rs.vschema.Tables { + if table.Type == vindexes.TypeReference { + excludeRules = append(excludeRules, &binlogdatapb.Rule{ + Match: tableName, + Filter: "exclude", + }) + } + } + + err := rs.forAll(rs.targetShards, func(target *topo.ShardInfo) error { + targetPrimary := rs.targetPrimaries[target.ShardName()] + + ig := vreplication.NewInsertGenerator(binlogdatapb.VReplicationWorkflowState_Stopped, targetPrimary.DbName()) + + // copy excludeRules to prevent data race. + copyExcludeRules := append([]*binlogdatapb.Rule(nil), excludeRules...) + for _, source := range rs.sourceShards { + if !key.KeyRangeIntersect(target.KeyRange, source.KeyRange) { + continue + } + filter := &binlogdatapb.Filter{ + Rules: append(copyExcludeRules, &binlogdatapb.Rule{ + Match: "/.*", + Filter: key.KeyRangeString(target.KeyRange), + }), + } + bls := &binlogdatapb.BinlogSource{ + Keyspace: rs.keyspace, + Shard: source.ShardName(), + Filter: filter, + StopAfterCopy: rs.stopAfterCopy, + OnDdl: binlogdatapb.OnDDLAction(binlogdatapb.OnDDLAction_value[rs.onDDL]), + } + ig.AddRow(rs.workflow, bls, "", rs.cell, rs.tabletTypes, + binlogdatapb.VReplicationWorkflowType_Reshard, + binlogdatapb.VReplicationWorkflowSubType_None, + rs.deferSecondaryKeys) + } + + for _, rstream := range rs.refStreams { + ig.AddRow(rstream.workflow, rstream.bls, "", rstream.cell, rstream.tabletTypes, + // TODO: fix based on original stream. + binlogdatapb.VReplicationWorkflowType_Reshard, + binlogdatapb.VReplicationWorkflowSubType_None, + rs.deferSecondaryKeys) + } + query := ig.String() + if _, err := rs.s.tmc.VReplicationExec(ctx, targetPrimary.Tablet, query); err != nil { + return vterrors.Wrapf(err, "VReplicationExec(%v, %s)", targetPrimary.Tablet, query) + } + return nil + }) + + return err +} + +func (rs *resharder) startStreams(ctx context.Context) error { + err := rs.forAll(rs.targetShards, func(target *topo.ShardInfo) error { + targetPrimary := rs.targetPrimaries[target.ShardName()] + query := fmt.Sprintf("update _vt.vreplication set state='Running' where db_name=%s", encodeString(targetPrimary.DbName())) + if _, err := rs.s.tmc.VReplicationExec(ctx, targetPrimary.Tablet, query); err != nil { + return vterrors.Wrapf(err, "VReplicationExec(%v, %s)", targetPrimary.Tablet, query) + } + return nil + }) + return err +} + +func (rs *resharder) forAll(shards []*topo.ShardInfo, f func(*topo.ShardInfo) error) error { + var wg sync.WaitGroup + allErrors := &concurrency.AllErrorRecorder{} + for _, shard := range shards { + wg.Add(1) + go func(shard *topo.ShardInfo) { + defer wg.Done() + + if err := f(shard); err != nil { + allErrors.RecordError(err) + } + }(shard) + } + wg.Wait() + return allErrors.AggrError(vterrors.Aggregate) +} diff --git a/go/vt/vtctl/workflow/server.go b/go/vt/vtctl/workflow/server.go index b29851a3f8a..6927b56b89d 100644 --- a/go/vt/vtctl/workflow/server.go +++ b/go/vt/vtctl/workflow/server.go @@ -17,33 +17,108 @@ limitations under the License. package workflow import ( + "bytes" "context" "errors" "fmt" + "math" + "reflect" + "slices" "sort" "strings" "sync" + "text/template" "time" + "golang.org/x/exp/maps" + "golang.org/x/sync/semaphore" "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/sets" + "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/trace" + "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/concurrency" + "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/mysqlctl/tmutils" + "vitess.io/vitess/go/vt/schema" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vtctl/schematools" "vitess.io/vitess/go/vt/vtctl/workflow/vexec" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/evalengine" + "vitess.io/vitess/go/vt/vtgate/vindexes" + "vitess.io/vitess/go/vt/vttablet/tabletmanager/vdiff" + "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" "vitess.io/vitess/go/vt/vttablet/tmclient" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" - "vitess.io/vitess/go/vt/proto/vttime" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + vttimepb "vitess.io/vitess/go/vt/proto/vttime" +) + +// tableCopyProgress stores the row counts and disk sizes of the source and target tables +type tableCopyProgress struct { + TargetRowCount, TargetTableSize int64 + SourceRowCount, SourceTableSize int64 +} + +// copyProgress stores the tableCopyProgress for all tables still being copied +type copyProgress map[string]*tableCopyProgress + +// sequenceMetadata contains all of the relevant metadata for a sequence that +// is being used by a table involved in a vreplication workflow. +type sequenceMetadata struct { + // The name of the sequence table. + backingTableName string + // The keyspace where the backing table lives. + backingTableKeyspace string + // The dbName in use by the keyspace where the backing table lives. + backingTableDBName string + // The name of the table using the sequence. + usingTableName string + // The dbName in use by the keyspace where the using table lives. + usingTableDBName string + // The using table definition. + usingTableDefinition *vschemapb.Table +} + +// vdiffOutput holds the data from all shards that is needed to generate +// the full summary results of the vdiff in the vdiff show command output. +type vdiffOutput struct { + mu sync.Mutex + responses map[string]*tabletmanagerdatapb.VDiffResponse + err error +} + +const ( + cannotSwitchError = "workflow has errors" + cannotSwitchCopyIncomplete = "copy is still in progress" + cannotSwitchHighLag = "replication lag %ds is higher than allowed lag %ds" + cannotSwitchFailedTabletRefresh = "could not refresh all of the tablets involved in the operation:\n%s" + cannotSwitchFrozen = "workflow is frozen" + + // Number of LOCK TABLES cycles to perform on the sources during SwitchWrites. + lockTablesCycles = 2 + // Time to wait between LOCK TABLES cycles on the sources during SwitchWrites. + lockTablesCycleDelay = time.Duration(100 * time.Millisecond) + + // Default duration used for lag, timeout, etc. + defaultDuration = 30 * time.Second ) var ( @@ -57,19 +132,18 @@ var ( // ErrMultipleTargetKeyspaces occurs when a workflow somehow has multiple // target keyspaces across different shard primaries. This should be // impossible. - ErrMultipleTargetKeyspaces = errors.New("multiple target keyspaces for a single workflow") + ErrMultipleTargetKeyspaces = errors.New("multiple target keyspaces for a single workflow") + ErrWorkflowNotFullySwitched = errors.New("cannot complete workflow because you have not yet switched all read and write traffic") + ErrWorkflowPartiallySwitched = errors.New("cannot cancel workflow because you have already switched some or all read and write traffic") ) // Server provides an API to work with Vitess workflows, like vreplication // workflows (MoveTables, Reshard, etc) and schema migration workflows. -// -// NB: This is in alpha, and you probably don't want to depend on it (yet!). -// Currently, it provides only a read-only API to vreplication workflows. Write -// actions on vreplication workflows, and schema migration workflows entirely, -// are not yet supported, but planned. type Server struct { ts *topo.Server tmc tmclient.TabletManagerClient + // Limt the number of concurrent background goroutines if needed. + sem *semaphore.Weighted } // NewServer returns a new server instance with the given topo.Server and @@ -213,7 +287,7 @@ func (s *Server) GetCellsWithTableReadsSwitched( getKeyspace := func(ruleTarget string) (string, error) { arr := strings.Split(ruleTarget, ".") if len(arr) != 2 { - return "", fmt.Errorf("rule target is not correctly formatted: %s", ruleTarget) + return "", vterrors.Errorf(vtrpcpb.Code_INTERNAL, "rule target is not correctly formatted: %s", ruleTarget) } return arr[0], nil @@ -264,6 +338,22 @@ func (s *Server) GetCellsWithTableReadsSwitched( return cellsSwitched, cellsNotSwitched, nil } +func (s *Server) GetWorkflow(ctx context.Context, keyspace, workflow string, includeLogs bool) (*vtctldatapb.Workflow, error) { + res, err := s.GetWorkflows(ctx, &vtctldatapb.GetWorkflowsRequest{ + Keyspace: keyspace, + Workflow: workflow, + IncludeLogs: includeLogs, + }) + if err != nil { + return nil, err + } + if len(res.Workflows) != 1 { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected number of workflows returned for %s.%s; expected 1, got %d", + keyspace, workflow, len(res.Workflows)) + } + return res.Workflows[0], nil +} + // GetWorkflows returns a list of all workflows that exist in a given keyspace, // with some additional filtering depending on the request parameters (for // example, ActiveOnly=true restricts the search to only workflows that are @@ -277,10 +367,18 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows span.Annotate("keyspace", req.Keyspace) span.Annotate("active_only", req.ActiveOnly) + span.Annotate("include_logs", req.IncludeLogs) where := "" + predicates := []string{} if req.ActiveOnly { - where = "WHERE state <> 'Stopped'" + predicates = append(predicates, "state <> 'Stopped'") + } + if req.Workflow != "" { + predicates = append(predicates, fmt.Sprintf("workflow = '%s'", req.Workflow)) + } + if len(predicates) > 0 { + where = fmt.Sprintf("WHERE %s", strings.Join(predicates, " AND ")) } query := fmt.Sprintf(` @@ -298,7 +396,12 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows message, tags, workflow_type, - workflow_sub_type + workflow_sub_type, + time_heartbeat, + defer_secondary_keys, + component_throttled, + time_throttled, + rows_copied FROM _vt.vreplication %s`, @@ -318,6 +421,7 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows targetKeyspaceByWorkflow := make(map[string]string, len(results)) targetShardsByWorkflow := make(map[string]sets.Set[string], len(results)) maxVReplicationLagByWorkflow := make(map[string]float64, len(results)) + maxVReplicationTransactionLagByWorkflow := make(map[string]float64, len(results)) // We guarantee the following invariants when this function is called for a // given workflow: @@ -336,7 +440,7 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows span.Annotate("workflow", workflow.Name) span.Annotate("tablet_alias", tablet.AliasString()) - id, err := evalengine.ToInt64(row["id"]) + id, err := row["id"].ToCastInt64() if err != nil { return err } @@ -350,17 +454,32 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows return err } - pos := row["pos"].ToString() + // The value in the pos column can be compressed and thus not + // have a valid GTID consisting of valid UTF-8 characters so we + // have to decode it so that it's properly decompressed first + // when needed. + pos, err := row.ToString("pos") + if err != nil { + return err + } + if pos != "" { + mpos, err := binlogplayer.DecodePosition(pos) + if err != nil { + return err + } + pos = mpos.String() + } + stopPos := row["stop_pos"].ToString() state := row["state"].ToString() dbName := row["db_name"].ToString() - timeUpdatedSeconds, err := evalengine.ToInt64(row["time_updated"]) + timeUpdatedSeconds, err := row["time_updated"].ToCastInt64() if err != nil { return err } - transactionTimeSeconds, err := evalengine.ToInt64(row["transaction_timestamp"]) + transactionTimeSeconds, err := row["transaction_timestamp"].ToCastInt64() if err != nil { return err } @@ -372,8 +491,31 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows if tags != "" { tagArray = strings.Split(tags, ",") } + workflowType, _ := row["workflow_type"].ToInt32() workflowSubType, _ := row["workflow_sub_type"].ToInt32() + + timeHeartbeat, err := row["time_heartbeat"].ToCastInt64() + if err != nil { + return err + } + + componentThrottled := row["component_throttled"].ToString() + timeThrottled, err := row["time_throttled"].ToCastInt64() + if err != nil { + return err + } + + deferSecondaryKeys, err := row["defer_secondary_keys"].ToBool() + if err != nil { + return err + } + + rowsCopied, err := row["rows_copied"].ToCastInt64() + if err != nil { + return err + } + stream := &vtctldatapb.Workflow_Stream{ Id: id, Shard: tablet.Shard, @@ -383,17 +525,23 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows StopPosition: stopPos, State: state, DbName: dbName, - TransactionTimestamp: &vttime.Time{ + TransactionTimestamp: &vttimepb.Time{ Seconds: transactionTimeSeconds, }, - TimeUpdated: &vttime.Time{ + TimeUpdated: &vttimepb.Time{ Seconds: timeUpdatedSeconds, }, - Message: message, - Tags: tagArray, + Message: message, + Tags: tagArray, + RowsCopied: rowsCopied, + ThrottlerStatus: &vtctldatapb.Workflow_Stream_ThrottlerStatus{ + ComponentThrottled: componentThrottled, + TimeThrottled: &vttimepb.Time{ + Seconds: timeThrottled, + }, + }, } - workflow.WorkflowType = binlogdatapb.VReplicationWorkflowType_name[workflowType] - workflow.WorkflowSubType = binlogdatapb.VReplicationWorkflowSubType_name[workflowSubType] + stream.CopyStates, err = s.getWorkflowCopyStates(ctx, tablet, id) if err != nil { return err @@ -401,15 +549,6 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows span.Annotate("num_copy_states", len(stream.CopyStates)) - switch { - case strings.Contains(strings.ToLower(stream.Message), "error"): - stream.State = "Error" - case stream.State == "Running" && len(stream.CopyStates) > 0: - stream.State = "Copying" - case stream.State == "Running" && int64(time.Now().Second())-timeUpdatedSeconds > 10: - stream.State = "Lagging" - } - // At this point, we're going to start modifying the maps defined // outside this function, as well as fields on the passed-in Workflow // pointer. Since we're running concurrently, take the lock. @@ -419,6 +558,19 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows m.Lock() defer m.Unlock() + workflow.WorkflowType = binlogdatapb.VReplicationWorkflowType_name[workflowType] + workflow.WorkflowSubType = binlogdatapb.VReplicationWorkflowSubType_name[workflowSubType] + workflow.DeferSecondaryKeys = deferSecondaryKeys + + switch { + case strings.Contains(strings.ToLower(stream.Message), "error"): + stream.State = binlogdatapb.VReplicationWorkflowState_Error.String() + case stream.State == binlogdatapb.VReplicationWorkflowState_Running.String() && len(stream.CopyStates) > 0: + stream.State = binlogdatapb.VReplicationWorkflowState_Copying.String() + case stream.State == binlogdatapb.VReplicationWorkflowState_Running.String() && int64(time.Now().Second())-timeUpdatedSeconds > 10: + stream.State = binlogdatapb.VReplicationWorkflowState_Lagging.String() + } + shardStreamKey := fmt.Sprintf("%s/%s", tablet.Shard, tablet.AliasString()) shardStream, ok := workflow.ShardStreams[shardStreamKey] if !ok { @@ -444,13 +596,13 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows targetShardsByWorkflow[workflow.Name].Insert(tablet.Shard) if ks, ok := sourceKeyspaceByWorkflow[workflow.Name]; ok && ks != stream.BinlogSource.Keyspace { - return fmt.Errorf("%w: workflow = %v, ks1 = %v, ks2 = %v", ErrMultipleSourceKeyspaces, workflow.Name, ks, stream.BinlogSource.Keyspace) + return vterrors.Wrapf(ErrMultipleSourceKeyspaces, "workflow = %v, ks1 = %v, ks2 = %v", workflow.Name, ks, stream.BinlogSource.Keyspace) } sourceKeyspaceByWorkflow[workflow.Name] = stream.BinlogSource.Keyspace if ks, ok := targetKeyspaceByWorkflow[workflow.Name]; ok && ks != tablet.Keyspace { - return fmt.Errorf("%w: workflow = %v, ks1 = %v, ks2 = %v", ErrMultipleTargetKeyspaces, workflow.Name, ks, tablet.Keyspace) + return vterrors.Wrapf(ErrMultipleTargetKeyspaces, "workflow = %v, ks1 = %v, ks2 = %v", workflow.Name, ks, tablet.Keyspace) } targetKeyspaceByWorkflow[workflow.Name] = tablet.Keyspace @@ -458,6 +610,8 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows timeUpdated := time.Unix(timeUpdatedSeconds, 0) vreplicationLag := time.Since(timeUpdated) + // MaxVReplicationLag represents the time since we last processed any event + // in the workflow. if currentMaxLag, ok := maxVReplicationLagByWorkflow[workflow.Name]; ok { if vreplicationLag.Seconds() > currentMaxLag { maxVReplicationLagByWorkflow[workflow.Name] = vreplicationLag.Seconds() @@ -466,6 +620,34 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows maxVReplicationLagByWorkflow[workflow.Name] = vreplicationLag.Seconds() } + // MaxVReplicationTransactionLag estimates the actual statement processing lag + // between the source and the target. If we are still processing source events it + // is the difference b/w current time and the timestamp of the last event. If + // heartbeats are more recent than the last event, then the lag is the time since + // the last heartbeat as there can be an actual event immediately after the + // heartbeat, but which has not yet been processed on the target. + // We don't allow switching during the copy phase, so in that case we just return + // a large lag. All timestamps are in seconds since epoch. + if _, ok := maxVReplicationTransactionLagByWorkflow[workflow.Name]; !ok { + maxVReplicationTransactionLagByWorkflow[workflow.Name] = 0 + } + lastTransactionTime := transactionTimeSeconds + lastHeartbeatTime := timeHeartbeat + if stream.State == binlogdatapb.VReplicationWorkflowState_Copying.String() { + maxVReplicationTransactionLagByWorkflow[workflow.Name] = math.MaxInt64 + } else { + if lastTransactionTime == 0 /* no new events after copy */ || + lastHeartbeatTime > lastTransactionTime /* no recent transactions, so all caught up */ { + + lastTransactionTime = lastHeartbeatTime + } + now := time.Now().Unix() /* seconds since epoch */ + transactionReplicationLag := float64(now - lastTransactionTime) + if transactionReplicationLag > maxVReplicationTransactionLagByWorkflow[workflow.Name] { + maxVReplicationTransactionLagByWorkflow[workflow.Name] = transactionReplicationLag + } + } + return nil } @@ -531,6 +713,7 @@ SELECT count FROM _vt.vreplication_log +WHERE vrepl_id IN %a ORDER BY vrepl_id ASC, id ASC @@ -538,13 +721,29 @@ ORDER BY ) fetchStreamLogs := func(ctx context.Context, workflow *vtctldatapb.Workflow) { - span, ctx := trace.NewSpan(ctx, "workflow.Server.scanWorkflow") + span, ctx := trace.NewSpan(ctx, "workflow.Server.fetchStreamLogs") defer span.Finish() span.Annotate("keyspace", req.Keyspace) span.Annotate("workflow", workflow.Name) - results, err := vx.WithWorkflow(workflow.Name).QueryContext(ctx, vrepLogQuery) + vreplIDs := make([]int64, 0, len(workflow.ShardStreams)) + for _, shardStream := range maps.Values(workflow.ShardStreams) { + for _, stream := range shardStream.Streams { + vreplIDs = append(vreplIDs, stream.Id) + } + } + idsBV, err := sqltypes.BuildBindVariable(vreplIDs) + if err != nil { + return + } + + query, err := sqlparser.ParseAndBind(vrepLogQuery, idsBV) + if err != nil { + return + } + + results, err := vx.WithWorkflow(workflow.Name).QueryContext(ctx, query) if err != nil { // Note that we do not return here. If there are any query results // in the map (i.e. some tablets returned successfully), we will @@ -579,13 +778,13 @@ ORDER BY } for _, row := range qr.Rows { - id, err := evalengine.ToInt64(row[0]) + id, err := row[0].ToCastInt64() if err != nil { markErrors(err) continue } - streamID, err := evalengine.ToInt64(row[1]) + streamID, err := row[1].ToCastInt64() if err != nil { markErrors(err) continue @@ -607,7 +806,7 @@ ORDER BY continue } - count, err := evalengine.ToInt64(row[7]) + count, err := row[7].ToCastInt64() if err != nil { markErrors(err) continue @@ -618,10 +817,10 @@ ORDER BY StreamId: streamID, Type: typ, State: state, - CreatedAt: &vttime.Time{ + CreatedAt: &vttimepb.Time{ Seconds: createdAt.Unix(), }, - UpdatedAt: &vttime.Time{ + UpdatedAt: &vttimepb.Time{ Seconds: updatedAt.Unix(), }, Message: message, @@ -659,27 +858,32 @@ ORDER BY for name, workflow := range workflowsMap { sourceShards, ok := sourceShardsByWorkflow[name] if !ok { - return nil, fmt.Errorf("%w: %s has no source shards", ErrInvalidWorkflow, name) + return nil, vterrors.Wrapf(ErrInvalidWorkflow, "%s has no source shards", name) } sourceKeyspace, ok := sourceKeyspaceByWorkflow[name] if !ok { - return nil, fmt.Errorf("%w: %s has no source keyspace", ErrInvalidWorkflow, name) + return nil, vterrors.Wrapf(ErrInvalidWorkflow, "%s has no source keyspace", name) } targetShards, ok := targetShardsByWorkflow[name] if !ok { - return nil, fmt.Errorf("%w: %s has no target shards", ErrInvalidWorkflow, name) + return nil, vterrors.Wrapf(ErrInvalidWorkflow, "%s has no target shards", name) } targetKeyspace, ok := targetKeyspaceByWorkflow[name] if !ok { - return nil, fmt.Errorf("%w: %s has no target keyspace", ErrInvalidWorkflow, name) + return nil, vterrors.Wrapf(ErrInvalidWorkflow, "%s has no target keyspace", name) } maxVReplicationLag, ok := maxVReplicationLagByWorkflow[name] if !ok { - return nil, fmt.Errorf("%w: %s has no tracked vreplication lag", ErrInvalidWorkflow, name) + return nil, vterrors.Wrapf(ErrInvalidWorkflow, "%s has no tracked vreplication lag", name) + } + + maxVReplicationTransactionLag, ok := maxVReplicationTransactionLagByWorkflow[name] + if !ok { + return nil, vterrors.Wrapf(ErrInvalidWorkflow, "%s has no tracked vreplication transaction lag", name) } workflow.Source = &vtctldatapb.Workflow_ReplicationLocation{ @@ -693,6 +897,7 @@ ORDER BY } workflow.MaxVReplicationLag = int64(maxVReplicationLag) + workflow.MaxVReplicationTransactionLag = int64(maxVReplicationTransactionLag) // Sort shard streams by stream_id ASC, to support an optimization // in fetchStreamLogs below. @@ -704,12 +909,14 @@ ORDER BY workflows = append(workflows, workflow) - // Fetch logs for all streams associated with this workflow in the background. - fetchLogsWG.Add(1) - go func(ctx context.Context, workflow *vtctldatapb.Workflow) { - defer fetchLogsWG.Done() - fetchStreamLogs(ctx, workflow) - }(ctx, workflow) + if req.IncludeLogs { + // Fetch logs for all streams associated with this workflow in the background. + fetchLogsWG.Add(1) + go func(ctx context.Context, workflow *vtctldatapb.Workflow) { + defer fetchLogsWG.Done() + fetchStreamLogs(ctx, workflow) + }(ctx, workflow) + } } // Wait for all the log fetchers to finish. @@ -720,6 +927,129 @@ ORDER BY }, nil } +func (s *Server) getWorkflowState(ctx context.Context, targetKeyspace, workflowName string) (*trafficSwitcher, *State, error) { + ts, err := s.buildTrafficSwitcher(ctx, targetKeyspace, workflowName) + + if err != nil { + log.Errorf("buildTrafficSwitcher failed: %v", err) + return nil, nil, err + } + + state := &State{ + Workflow: workflowName, + SourceKeyspace: ts.SourceKeyspaceName(), + TargetKeyspace: targetKeyspace, + IsPartialMigration: ts.isPartialMigration, + } + + if ts.workflowType == binlogdatapb.VReplicationWorkflowType_CreateLookupIndex { + // Nothing left to do. + return ts, state, nil + } + + var ( + reverse bool + sourceKeyspace string + ) + + // We reverse writes by using the source_keyspace.workflowname_reverse workflow + // spec, so we need to use the source of the reverse workflow, which is the + // target of the workflow initiated by the user for checking routing rules. + // Similarly we use a target shard of the reverse workflow as the original + // source to check if writes have been switched. + + if strings.HasSuffix(workflowName, "_reverse") { + reverse = true + // Flip the source and target keyspaces. + sourceKeyspace = state.TargetKeyspace + targetKeyspace = state.SourceKeyspace + workflowName = ReverseWorkflowName(workflowName) + } else { + sourceKeyspace = state.SourceKeyspace + } + if ts.MigrationType() == binlogdatapb.MigrationType_TABLES { + state.WorkflowType = TypeMoveTables + + // We assume a consistent state, so only choose routing rule for one table. + if len(ts.Tables()) == 0 { + return nil, nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "no tables in workflow %s.%s", targetKeyspace, workflowName) + + } + table := ts.Tables()[0] + + if ts.isPartialMigration { // shard level traffic switching is all or nothing + shardRoutingRules, err := s.ts.GetShardRoutingRules(ctx) + if err != nil { + return nil, nil, err + } + + rules := shardRoutingRules.Rules + for _, rule := range rules { + switch rule.ToKeyspace { + case sourceKeyspace: + state.ShardsNotYetSwitched = append(state.ShardsNotYetSwitched, rule.Shard) + case targetKeyspace: + state.ShardsAlreadySwitched = append(state.ShardsAlreadySwitched, rule.Shard) + default: + // Not a relevant rule. + } + } + } else { + state.RdonlyCellsSwitched, state.RdonlyCellsNotSwitched, err = s.GetCellsWithTableReadsSwitched(ctx, targetKeyspace, table, topodatapb.TabletType_RDONLY) + if err != nil { + return nil, nil, err + } + + state.ReplicaCellsSwitched, state.ReplicaCellsNotSwitched, err = s.GetCellsWithTableReadsSwitched(ctx, targetKeyspace, table, topodatapb.TabletType_REPLICA) + if err != nil { + return nil, nil, err + } + globalRules, err := topotools.GetRoutingRules(ctx, ts.TopoServer()) + if err != nil { + return nil, nil, err + } + for _, table := range ts.Tables() { + rr := globalRules[table] + // If a rule exists for the table and points to the target keyspace, then + // writes have been switched. + if len(rr) > 0 && rr[0] == fmt.Sprintf("%s.%s", targetKeyspace, table) { + state.WritesSwitched = true + break + } + } + } + } else { + state.WorkflowType = TypeReshard + + // We assume a consistent state, so only choose one shard. + var shard *topo.ShardInfo + if reverse { + shard = ts.TargetShards()[0] + } else { + shard = ts.SourceShards()[0] + } + + state.RdonlyCellsSwitched, state.RdonlyCellsNotSwitched, err = s.GetCellsWithShardReadsSwitched(ctx, targetKeyspace, shard, topodatapb.TabletType_RDONLY) + if err != nil { + return nil, nil, err + } + + state.ReplicaCellsSwitched, state.ReplicaCellsNotSwitched, err = s.GetCellsWithShardReadsSwitched(ctx, targetKeyspace, shard, topodatapb.TabletType_REPLICA) + if err != nil { + return nil, nil, err + } + + if !shard.IsPrimaryServing { + state.WritesSwitched = true + } + } + if ts.workflowType == binlogdatapb.VReplicationWorkflowType_Migrate { + state.WorkflowType = TypeMigrate + } + + return ts, state, nil +} + func (s *Server) getWorkflowCopyStates(ctx context.Context, tablet *topo.TabletInfo, id int64) ([]*vtctldatapb.Workflow_Stream_CopyState, error) { span, ctx := trace.NewSpan(ctx, "workflow.Server.getWorkflowCopyStates") defer span.Finish() @@ -752,49 +1082,2697 @@ func (s *Server) getWorkflowCopyStates(ctx context.Context, tablet *topo.TabletI return copyStates, nil } -// WorkflowUpdate is part of the vtctlservicepb.VtctldServer interface. -// It passes the embedded TabletRequest object to the given keyspace's -// target primary tablets that are participating in the given workflow. -func (s *Server) WorkflowUpdate(ctx context.Context, req *vtctldatapb.WorkflowUpdateRequest) (*vtctldatapb.WorkflowUpdateResponse, error) { - span, ctx := trace.NewSpan(ctx, "workflow.Server.WorkflowUpdate") +// LookupVindexCreate creates the lookup vindex in the specified +// keyspace and creates a VReplication workflow to backfill that +// vindex from the keyspace to the target/lookup table specified. +func (s *Server) LookupVindexCreate(ctx context.Context, req *vtctldatapb.LookupVindexCreateRequest) (*vtctldatapb.LookupVindexCreateResponse, error) { + span, ctx := trace.NewSpan(ctx, "workflow.Server.LookupVindexCreate") defer span.Finish() + span.Annotate("workflow", req.Workflow) span.Annotate("keyspace", req.Keyspace) - span.Annotate("workflow", req.TabletRequest.Workflow) - span.Annotate("cells", req.TabletRequest.Cells) - span.Annotate("tablet_types", req.TabletRequest.TabletTypes) - span.Annotate("on_ddl", req.TabletRequest.OnDdl) + span.Annotate("continue_after_copy_with_owner", req.ContinueAfterCopyWithOwner) + span.Annotate("cells", req.Cells) + span.Annotate("tablet_types", req.TabletTypes) - vx := vexec.NewVExec(req.Keyspace, req.TabletRequest.Workflow, s.ts, s.tmc) - callback := func(ctx context.Context, tablet *topo.TabletInfo) (*querypb.QueryResult, error) { - res, err := s.tmc.UpdateVRWorkflow(ctx, tablet.Tablet, req.TabletRequest) - if err != nil { - return nil, err + ms, sourceVSchema, targetVSchema, err := s.prepareCreateLookup(ctx, req.Workflow, req.Keyspace, req.Vindex, req.ContinueAfterCopyWithOwner) + if err != nil { + return nil, err + } + if err := s.ts.SaveVSchema(ctx, ms.TargetKeyspace, targetVSchema); err != nil { + return nil, err + } + + ms.TabletTypes = topoproto.MakeStringTypeCSV(req.TabletTypes) + ms.TabletSelectionPreference = req.TabletSelectionPreference + if err := s.Materialize(ctx, ms); err != nil { + return nil, err + } + if err := s.ts.SaveVSchema(ctx, req.Keyspace, sourceVSchema); err != nil { + return nil, err + } + if err := s.ts.RebuildSrvVSchema(ctx, nil); err != nil { + return nil, err + } + + return &vtctldatapb.LookupVindexCreateResponse{}, nil +} + +// LookupVindexExternalize externalizes a lookup vindex that's +// finished backfilling or has caught up. If the vindex has an +// owner then the workflow will also be deleted. +func (s *Server) LookupVindexExternalize(ctx context.Context, req *vtctldatapb.LookupVindexExternalizeRequest) (*vtctldatapb.LookupVindexExternalizeResponse, error) { + span, ctx := trace.NewSpan(ctx, "workflow.Server.LookupVindexExternalize") + defer span.Finish() + + span.Annotate("keyspace", req.Keyspace) + span.Annotate("name", req.Name) + span.Annotate("table_keyspace", req.TableKeyspace) + + // Find the lookup vindex by by name. + sourceVschema, err := s.ts.GetVSchema(ctx, req.Keyspace) + if err != nil { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get vschema for the %s keyspace", req.Keyspace) + } + vindex := sourceVschema.Vindexes[req.Name] + if vindex == nil { + return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "vindex %s not found in the %s keyspace", req.Name, req.Keyspace) + } + + targetShards, err := s.ts.GetServingShards(ctx, req.TableKeyspace) + if err != nil { + return nil, err + } + + // Create a parallelizer function. + forAllTargets := func(f func(*topo.ShardInfo) error) error { + var wg sync.WaitGroup + allErrors := &concurrency.AllErrorRecorder{} + for _, targetShard := range targetShards { + wg.Add(1) + go func(targetShard *topo.ShardInfo) { + defer wg.Done() + + if err := f(targetShard); err != nil { + allErrors.RecordError(err) + } + }(targetShard) } - return res.Result, err + wg.Wait() + return allErrors.AggrError(vterrors.Aggregate) } - res, err := vx.CallbackContext(ctx, callback) + + err = forAllTargets(func(targetShard *topo.ShardInfo) error { + targetPrimary, err := s.ts.GetTablet(ctx, targetShard.PrimaryAlias) + if err != nil { + return err + } + p3qr, err := s.tmc.VReplicationExec(ctx, targetPrimary.Tablet, fmt.Sprintf("select id, state, message, source from _vt.vreplication where workflow=%s and db_name=%s", encodeString(req.Name), encodeString(targetPrimary.DbName()))) + if err != nil { + return err + } + qr := sqltypes.Proto3ToResult(p3qr) + if qr == nil || len(qr.Rows) == 0 { + return vterrors.Errorf(vtrpcpb.Code_NOT_FOUND, "workflow %s not found on %v", req.Name, targetPrimary.Alias) + } + for _, row := range qr.Rows { + id, err := row[0].ToCastInt64() + if err != nil { + return err + } + state := binlogdatapb.VReplicationWorkflowState(binlogdatapb.VReplicationWorkflowState_value[row[1].ToString()]) + message := row[2].ToString() + var bls binlogdatapb.BinlogSource + sourceBytes, err := row[3].ToBytes() + if err != nil { + return err + } + if err := prototext.Unmarshal(sourceBytes, &bls); err != nil { + return err + } + if bls.Filter == nil || len(bls.Filter.Rules) != 1 { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "invalid binlog source") + } + if vindex.Owner == "" || !bls.StopAfterCopy { + // If there's no owner or we've requested that the workflow NOT be stopped + // after the copy phase completes, then all streams need to be running. + if state != binlogdatapb.VReplicationWorkflowState_Running { + return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "stream %d for %v.%v is not in Running state: %v", id, targetShard.Keyspace(), targetShard.ShardName(), state) + } + } else { + // If there is an owner, all streams need to be stopped after copy. + if state != binlogdatapb.VReplicationWorkflowState_Stopped || !strings.Contains(message, "Stopped after copy") { + return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "stream %d for %v.%v is not in Stopped after copy state: %v, %v", id, targetShard.Keyspace(), targetShard.ShardName(), state, message) + } + } + } + return nil + }) if err != nil { - if topo.IsErrType(err, topo.NoNode) { - return nil, vterrors.Wrapf(err, "%s keyspace does not exist", req.Keyspace) + return nil, err + } + + resp := &vtctldatapb.LookupVindexExternalizeResponse{} + + if vindex.Owner != "" { + // If there is an owner, we have to delete the streams. Once we externalize it + // the VTGate will now be responsible for keeping the lookup table up to date + // with the owner table. + if _, derr := s.WorkflowDelete(ctx, &vtctldatapb.WorkflowDeleteRequest{ + Keyspace: req.TableKeyspace, + Workflow: req.Name, + KeepData: true, // Not relevant + KeepRoutingRules: true, // Not relevant + }); derr != nil { + return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "failed to delete workflow %s: %v", req.Name, derr) } + resp.WorkflowDeleted = true + } + + // Remove the write_only param and save the source vschema. + delete(vindex.Params, "write_only") + if err := s.ts.SaveVSchema(ctx, req.Keyspace, sourceVschema); err != nil { return nil, err } + return resp, s.ts.RebuildSrvVSchema(ctx, nil) +} - if len(res) == 0 { - return nil, fmt.Errorf("the %s workflow does not exist in the %s keyspace", req.TabletRequest.Workflow, req.Keyspace) +// Materialize performs the steps needed to materialize a list of +// tables based on the materialization specs. +func (s *Server) Materialize(ctx context.Context, ms *vtctldatapb.MaterializeSettings) error { + mz := &materializer{ + ctx: ctx, + ts: s.ts, + sourceTs: s.ts, + tmc: s.tmc, + ms: ms, } - response := &vtctldatapb.WorkflowUpdateResponse{} - response.Summary = fmt.Sprintf("Successfully updated the %s workflow on (%d) target primary tablets in the %s keyspace", req.TabletRequest.Workflow, len(res), req.Keyspace) - details := make([]*vtctldatapb.WorkflowUpdateResponse_TabletInfo, 0, len(res)) - for tinfo, tres := range res { - result := &vtctldatapb.WorkflowUpdateResponse_TabletInfo{ - Tablet: fmt.Sprintf("%s-%d (%s/%s)", tinfo.Alias.Cell, tinfo.Alias.Uid, tinfo.Keyspace, tinfo.Shard), - Changed: tres.RowsAffected > 0, // Can be more than one with shard merges + err := mz.createMaterializerStreams() + if err != nil { + return err + } + return mz.startStreams(ctx) +} + +// MoveTablesCreate is part of the vtctlservicepb.VtctldServer interface. +// It passes the embedded TabletRequest object to the given keyspace's +// target primary tablets that will be executing the workflow. +func (s *Server) MoveTablesCreate(ctx context.Context, req *vtctldatapb.MoveTablesCreateRequest) (res *vtctldatapb.WorkflowStatusResponse, err error) { + return s.moveTablesCreate(ctx, req, binlogdatapb.VReplicationWorkflowType_MoveTables) +} + +func (s *Server) moveTablesCreate(ctx context.Context, req *vtctldatapb.MoveTablesCreateRequest, + workflowType binlogdatapb.VReplicationWorkflowType) (res *vtctldatapb.WorkflowStatusResponse, err error) { + + span, ctx := trace.NewSpan(ctx, "workflow.Server.MoveTablesCreate") + defer span.Finish() + + span.Annotate("keyspace", req.TargetKeyspace) + span.Annotate("workflow", req.Workflow) + span.Annotate("cells", req.Cells) + span.Annotate("tablet_types", req.TabletTypes) + span.Annotate("on_ddl", req.OnDdl) + + sourceKeyspace := req.SourceKeyspace + targetKeyspace := req.TargetKeyspace + //FIXME validate tableSpecs, allTables, excludeTables + var ( + tables = req.IncludeTables + externalTopo *topo.Server + sourceTopo = s.ts + ) + + // When the source is an external cluster mounted using the Mount command. + if req.ExternalClusterName != "" { + externalTopo, err = s.ts.OpenExternalVitessClusterServer(ctx, req.ExternalClusterName) + if err != nil { + return nil, err } - details = append(details, result) + sourceTopo = externalTopo + log.Infof("Successfully opened external topo: %+v", externalTopo) } - response.Details = details - return response, nil + + var vschema *vschemapb.Keyspace + var origVSchema *vschemapb.Keyspace // If we need to rollback a failed create + vschema, err = s.ts.GetVSchema(ctx, targetKeyspace) + if err != nil { + return nil, err + } + if vschema == nil { + return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "no vschema found for target keyspace %s", targetKeyspace) + } + ksTables, err := getTablesInKeyspace(ctx, sourceTopo, s.tmc, sourceKeyspace) + if err != nil { + return nil, err + } + if len(tables) > 0 { + err = s.validateSourceTablesExist(ctx, sourceKeyspace, ksTables, tables) + if err != nil { + return nil, err + } + } else { + if req.AllTables { + tables = ksTables + } else { + return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "no tables to move") + } + } + if len(req.ExcludeTables) > 0 { + err = s.validateSourceTablesExist(ctx, sourceKeyspace, ksTables, req.ExcludeTables) + if err != nil { + return nil, err + } + } + var tables2 []string + for _, t := range tables { + if shouldInclude(t, req.ExcludeTables) { + tables2 = append(tables2, t) + } + } + tables = tables2 + if len(tables) == 0 { + return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "no tables to move") + } + log.Infof("Found tables to move: %s", strings.Join(tables, ",")) + + if !vschema.Sharded { + // Save the original in case we need to restore it for a late failure + // in the defer(). + origVSchema = vschema.CloneVT() + if err := s.addTablesToVSchema(ctx, sourceKeyspace, vschema, tables, externalTopo == nil); err != nil { + return nil, err + } + } + + ms := &vtctldatapb.MaterializeSettings{ + Workflow: req.Workflow, + MaterializationIntent: vtctldatapb.MaterializationIntent_MOVETABLES, + SourceKeyspace: sourceKeyspace, + TargetKeyspace: targetKeyspace, + Cell: strings.Join(req.Cells, ","), + TabletTypes: topoproto.MakeStringTypeCSV(req.TabletTypes), + TabletSelectionPreference: req.TabletSelectionPreference, + StopAfterCopy: req.StopAfterCopy, + ExternalCluster: req.ExternalClusterName, + SourceShards: req.SourceShards, + OnDdl: req.OnDdl, + DeferSecondaryKeys: req.DeferSecondaryKeys, + AtomicCopy: req.AtomicCopy, + } + if req.SourceTimeZone != "" { + ms.SourceTimeZone = req.SourceTimeZone + ms.TargetTimeZone = "UTC" + } + createDDLMode := createDDLAsCopy + if req.DropForeignKeys { + createDDLMode = createDDLAsCopyDropForeignKeys + } + + for _, table := range tables { + buf := sqlparser.NewTrackedBuffer(nil) + buf.Myprintf("select * from %v", sqlparser.NewIdentifierCS(table)) + ms.TableSettings = append(ms.TableSettings, &vtctldatapb.TableMaterializeSettings{ + TargetTable: table, + SourceExpression: buf.String(), + CreateDdl: createDDLMode, + }) + } + mz := &materializer{ + ctx: ctx, + ts: s.ts, + sourceTs: sourceTopo, + tmc: s.tmc, + ms: ms, + workflowType: workflowType, + } + err = mz.createMoveTablesStreams(req) + if err != nil { + return nil, err + } + + // If we get an error after this point, where the vreplication streams/records + // have been created, then we clean up the workflow's artifacts. + defer func() { + if err != nil { + ts, cerr := s.buildTrafficSwitcher(ctx, ms.TargetKeyspace, ms.Workflow) + if cerr != nil { + err = vterrors.Wrapf(err, "failed to cleanup workflow artifacts: %v", cerr) + } + if cerr := s.dropArtifacts(ctx, false, &switcher{s: s, ts: ts}); cerr != nil { + err = vterrors.Wrapf(err, "failed to cleanup workflow artifacts: %v", cerr) + } + if origVSchema == nil { // There's no previous version to restore + return + } + if cerr := s.ts.SaveVSchema(ctx, targetKeyspace, origVSchema); cerr != nil { + err = vterrors.Wrapf(err, "failed to restore original target vschema: %v", cerr) + } + } + }() + + // Now that the streams have been successfully created, let's put the associated + // routing rules in place. + if externalTopo == nil { + if req.NoRoutingRules { + log.Warningf("Found --no-routing-rules flag, not creating routing rules for workflow %s.%s", targetKeyspace, req.Workflow) + } else { + // Save routing rules before vschema. If we save vschema first, and routing + // rules fails to save, we may generate duplicate table errors. + if mz.isPartial { + if err := createDefaultShardRoutingRules(mz.ctx, mz.ms, mz.ts); err != nil { + return nil, err + } + } + + rules, err := topotools.GetRoutingRules(ctx, s.ts) + if err != nil { + return nil, err + } + for _, table := range tables { + toSource := []string{sourceKeyspace + "." + table} + rules[table] = toSource + rules[table+"@replica"] = toSource + rules[table+"@rdonly"] = toSource + rules[targetKeyspace+"."+table] = toSource + rules[targetKeyspace+"."+table+"@replica"] = toSource + rules[targetKeyspace+"."+table+"@rdonly"] = toSource + rules[targetKeyspace+"."+table] = toSource + rules[sourceKeyspace+"."+table+"@replica"] = toSource + rules[sourceKeyspace+"."+table+"@rdonly"] = toSource + } + if err := topotools.SaveRoutingRules(ctx, s.ts, rules); err != nil { + return nil, err + } + } + if vschema != nil { + // We added to the vschema. + if err := s.ts.SaveVSchema(ctx, targetKeyspace, vschema); err != nil { + return nil, err + } + } + + } + if err := s.ts.RebuildSrvVSchema(ctx, nil); err != nil { + return nil, err + } + + if ms.SourceTimeZone != "" { + if err := mz.checkTZConversion(ctx, ms.SourceTimeZone); err != nil { + return nil, err + } + } + + tabletShards, err := s.collectTargetStreams(ctx, mz) + if err != nil { + return nil, err + } + + migrationID, err := getMigrationID(targetKeyspace, tabletShards) + if err != nil { + return nil, err + } + + if mz.ms.ExternalCluster == "" { + exists, tablets, err := s.checkIfPreviousJournalExists(ctx, mz, migrationID) + if err != nil { + return nil, err + } + if exists { + log.Errorf("Found a previous journal entry for %d", migrationID) + msg := fmt.Sprintf("found an entry from a previous run for migration id %d in _vt.resharding_journal on tablets %s, ", + migrationID, strings.Join(tablets, ",")) + msg += fmt.Sprintf("please review and delete it before proceeding and then start the workflow using: MoveTables --workflow %s --target-keyspace %s start", + req.Workflow, req.TargetKeyspace) + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, msg) + } + } + + if req.AutoStart { + if err := mz.startStreams(ctx); err != nil { + return nil, err + } + } + + return s.WorkflowStatus(ctx, &vtctldatapb.WorkflowStatusRequest{ + Keyspace: targetKeyspace, + Workflow: req.Workflow, + }) +} + +// MoveTablesComplete is part of the vtctlservicepb.VtctldServer interface. +// It cleans up a successful MoveTables workflow and its related artifacts. +// Note: this is currently re-used for Reshard as well. +func (s *Server) MoveTablesComplete(ctx context.Context, req *vtctldatapb.MoveTablesCompleteRequest) (*vtctldatapb.MoveTablesCompleteResponse, error) { + span, ctx := trace.NewSpan(ctx, "workflow.Server.MoveTablesComplete") + defer span.Finish() + + ts, state, err := s.getWorkflowState(ctx, req.TargetKeyspace, req.Workflow) + if err != nil { + return nil, err + } + + var summary string + if req.DryRun { + summary = fmt.Sprintf("Complete dry run results for workflow %s.%s at %v", req.TargetKeyspace, req.Workflow, time.Now().UTC().Format(time.RFC822)) + } else { + summary = fmt.Sprintf("Successfully completed the %s workflow in the %s keyspace", req.Workflow, req.TargetKeyspace) + } + var dryRunResults *[]string + + if state.WorkflowType == TypeMigrate { + dryRunResults, err = s.finalizeMigrateWorkflow(ctx, req.TargetKeyspace, req.Workflow, strings.Join(ts.tables, ","), + false, req.KeepData, req.KeepRoutingRules, req.DryRun) + if err != nil { + return nil, vterrors.Wrapf(err, "failed to finalize the %s workflow in the %s keyspace", + req.Workflow, req.TargetKeyspace) + } + resp := &vtctldatapb.MoveTablesCompleteResponse{ + Summary: summary, + } + if dryRunResults != nil { + resp.DryRunResults = *dryRunResults + } + return resp, nil + } + + if !state.WritesSwitched || len(state.ReplicaCellsNotSwitched) > 0 || len(state.RdonlyCellsNotSwitched) > 0 { + return nil, ErrWorkflowNotFullySwitched + } + var renameTable TableRemovalType + if req.RenameTables { + renameTable = RenameTable + } else { + renameTable = DropTable + } + if dryRunResults, err = s.dropSources(ctx, ts, renameTable, req.KeepData, req.KeepRoutingRules, false, req.DryRun); err != nil { + return nil, err + } + + resp := &vtctldatapb.MoveTablesCompleteResponse{ + Summary: summary, + } + if dryRunResults != nil { + resp.DryRunResults = *dryRunResults + } + + return resp, nil +} + +// ReshardCreate is part of the vtctlservicepb.VtctldServer interface. +func (s *Server) ReshardCreate(ctx context.Context, req *vtctldatapb.ReshardCreateRequest) (*vtctldatapb.WorkflowStatusResponse, error) { + span, ctx := trace.NewSpan(ctx, "workflow.Server.ReshardCreate") + defer span.Finish() + + span.Annotate("keyspace", req.Keyspace) + span.Annotate("workflow", req.Workflow) + span.Annotate("source_shards", req.SourceShards) + span.Annotate("target_shards", req.TargetShards) + span.Annotate("cells", req.Cells) + span.Annotate("tablet_types", req.TabletTypes) + span.Annotate("on_ddl", req.OnDdl) + + keyspace := req.Keyspace + cells := req.Cells + // TODO: validate workflow does not exist. + + if err := s.ts.ValidateSrvKeyspace(ctx, keyspace, strings.Join(cells, ",")); err != nil { + err2 := vterrors.Wrapf(err, "SrvKeyspace for keyspace %s is corrupt for cell(s) %s", keyspace, cells) + log.Errorf("%w", err2) + return nil, err + } + rs, err := s.buildResharder(ctx, keyspace, req.Workflow, req.SourceShards, req.TargetShards, strings.Join(cells, ","), "") + if err != nil { + return nil, vterrors.Wrap(err, "buildResharder") + } + rs.onDDL = req.OnDdl + rs.stopAfterCopy = req.StopAfterCopy + rs.deferSecondaryKeys = req.DeferSecondaryKeys + if !req.SkipSchemaCopy { + if err := rs.copySchema(ctx); err != nil { + return nil, vterrors.Wrap(err, "copySchema") + } + } + if err := rs.createStreams(ctx); err != nil { + return nil, vterrors.Wrap(err, "createStreams") + } + + if req.AutoStart { + if err := rs.startStreams(ctx); err != nil { + return nil, vterrors.Wrap(err, "startStreams") + } + } else { + log.Warningf("Streams will not be started since --auto-start is set to false") + } + return nil, nil +} + +// VDiffCreate is part of the vtctlservicepb.VtctldServer interface. +// It passes on the request to the target primary tablets that are +// participating in the given workflow and VDiff. +func (s *Server) VDiffCreate(ctx context.Context, req *vtctldatapb.VDiffCreateRequest) (*vtctldatapb.VDiffCreateResponse, error) { + span, ctx := trace.NewSpan(ctx, "workflow.Server.VDiffCreate") + defer span.Finish() + + span.Annotate("keyspace", req.TargetKeyspace) + span.Annotate("workflow", req.Workflow) + span.Annotate("uuid", req.Uuid) + span.Annotate("source_cells", req.SourceCells) + span.Annotate("target_cells", req.TargetCells) + span.Annotate("tablet_types", req.TabletTypes) + span.Annotate("tables", req.Tables) + span.Annotate("auto_retry", req.AutoRetry) + + tabletTypesStr := topoproto.MakeStringTypeCSV(req.TabletTypes) + if req.TabletSelectionPreference == tabletmanagerdatapb.TabletSelectionPreference_INORDER { + tabletTypesStr = discovery.InOrderHint + tabletTypesStr + } + + options := &tabletmanagerdatapb.VDiffOptions{ + PickerOptions: &tabletmanagerdatapb.VDiffPickerOptions{ + TabletTypes: tabletTypesStr, + SourceCell: strings.Join(req.SourceCells, ","), + TargetCell: strings.Join(req.TargetCells, ","), + }, + CoreOptions: &tabletmanagerdatapb.VDiffCoreOptions{ + Tables: strings.Join(req.Tables, ","), + AutoRetry: req.AutoRetry, + MaxRows: req.MaxExtraRowsToCompare, + TimeoutSeconds: req.FilteredReplicationWaitTime.Seconds, + MaxExtraRowsToCompare: req.MaxExtraRowsToCompare, + UpdateTableStats: req.UpdateTableStats, + }, + ReportOptions: &tabletmanagerdatapb.VDiffReportOptions{ + OnlyPks: req.OnlyPKs, + DebugQuery: req.DebugQuery, + }, + } + + tabletreq := &tabletmanagerdatapb.VDiffRequest{ + Keyspace: req.TargetKeyspace, + Workflow: req.Workflow, + Action: string(vdiff.CreateAction), + Options: options, + VdiffUuid: req.Uuid, + } + + ts, err := s.buildTrafficSwitcher(ctx, req.TargetKeyspace, req.Workflow) + if err != nil { + return nil, err + } + if ts.frozen { + return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "invalid VDiff run: writes have been already been switched for workflow %s.%s", + req.TargetKeyspace, req.Workflow) + } + + err = ts.ForAllTargets(func(target *MigrationTarget) error { + _, err := s.tmc.VDiff(ctx, target.GetPrimary().Tablet, tabletreq) + return err + }) + if err != nil { + log.Errorf("Error executing vdiff create action: %v", err) + return nil, err + } + + return &vtctldatapb.VDiffCreateResponse{ + UUID: req.Uuid, + }, nil +} + +// VDiffDelete is part of the vtctlservicepb.VtctldServer interface. +func (s *Server) VDiffDelete(ctx context.Context, req *vtctldatapb.VDiffDeleteRequest) (*vtctldatapb.VDiffDeleteResponse, error) { + span, ctx := trace.NewSpan(ctx, "workflow.Server.VDiffDelete") + defer span.Finish() + + span.Annotate("keyspace", req.TargetKeyspace) + span.Annotate("workflow", req.Workflow) + span.Annotate("argument", req.Arg) + + tabletreq := &tabletmanagerdatapb.VDiffRequest{ + Keyspace: req.TargetKeyspace, + Workflow: req.Workflow, + Action: string(vdiff.DeleteAction), + ActionArg: req.Arg, + } + + ts, err := s.buildTrafficSwitcher(ctx, req.TargetKeyspace, req.Workflow) + if err != nil { + return nil, err + } + + err = ts.ForAllTargets(func(target *MigrationTarget) error { + _, err := s.tmc.VDiff(ctx, target.GetPrimary().Tablet, tabletreq) + return err + }) + if err != nil { + log.Errorf("Error executing vdiff delete action: %v", err) + return nil, err + } + + return &vtctldatapb.VDiffDeleteResponse{}, nil +} + +// VDiffResume is part of the vtctlservicepb.VtctldServer interface. +func (s *Server) VDiffResume(ctx context.Context, req *vtctldatapb.VDiffResumeRequest) (*vtctldatapb.VDiffResumeResponse, error) { + span, ctx := trace.NewSpan(ctx, "workflow.Server.VDiffResume") + defer span.Finish() + + span.Annotate("keyspace", req.TargetKeyspace) + span.Annotate("workflow", req.Workflow) + span.Annotate("uuid", req.Uuid) + + tabletreq := &tabletmanagerdatapb.VDiffRequest{ + Keyspace: req.TargetKeyspace, + Workflow: req.Workflow, + Action: string(vdiff.ResumeAction), + VdiffUuid: req.Uuid, + } + + ts, err := s.buildTrafficSwitcher(ctx, req.TargetKeyspace, req.Workflow) + if err != nil { + return nil, err + } + + err = ts.ForAllTargets(func(target *MigrationTarget) error { + _, err := s.tmc.VDiff(ctx, target.GetPrimary().Tablet, tabletreq) + return err + }) + if err != nil { + log.Errorf("Error executing vdiff resume action: %v", err) + return nil, err + } + + return &vtctldatapb.VDiffResumeResponse{}, nil +} + +// VDiffShow is part of the vtctlservicepb.VtctldServer interface. +func (s *Server) VDiffShow(ctx context.Context, req *vtctldatapb.VDiffShowRequest) (*vtctldatapb.VDiffShowResponse, error) { + span, ctx := trace.NewSpan(ctx, "workflow.Server.VDiffShow") + defer span.Finish() + + span.Annotate("keyspace", req.TargetKeyspace) + span.Annotate("workflow", req.Workflow) + span.Annotate("argument", req.Arg) + + tabletreq := &tabletmanagerdatapb.VDiffRequest{ + Keyspace: req.TargetKeyspace, + Workflow: req.Workflow, + Action: string(vdiff.ShowAction), + ActionArg: req.Arg, + } + + ts, err := s.buildTrafficSwitcher(ctx, req.TargetKeyspace, req.Workflow) + if err != nil { + return nil, err + } + + output := &vdiffOutput{ + responses: make(map[string]*tabletmanagerdatapb.VDiffResponse, len(ts.targets)), + err: nil, + } + output.err = ts.ForAllTargets(func(target *MigrationTarget) error { + resp, err := s.tmc.VDiff(ctx, target.GetPrimary().Tablet, tabletreq) + output.mu.Lock() + defer output.mu.Unlock() + output.responses[target.GetShard().ShardName()] = resp + return err + }) + if output.err != nil { + log.Errorf("Error executing vdiff show action: %v", output.err) + return nil, output.err + } + return &vtctldatapb.VDiffShowResponse{ + TabletResponses: output.responses, + }, nil +} + +// VDiffStop is part of the vtctlservicepb.VtctldServer interface. +func (s *Server) VDiffStop(ctx context.Context, req *vtctldatapb.VDiffStopRequest) (*vtctldatapb.VDiffStopResponse, error) { + span, ctx := trace.NewSpan(ctx, "workflow.Server.VDiffStop") + defer span.Finish() + + span.Annotate("keyspace", req.TargetKeyspace) + span.Annotate("workflow", req.Workflow) + span.Annotate("uuid", req.Uuid) + + tabletreq := &tabletmanagerdatapb.VDiffRequest{ + Keyspace: req.TargetKeyspace, + Workflow: req.Workflow, + Action: string(vdiff.StopAction), + VdiffUuid: req.Uuid, + } + + ts, err := s.buildTrafficSwitcher(ctx, req.TargetKeyspace, req.Workflow) + if err != nil { + return nil, err + } + + err = ts.ForAllTargets(func(target *MigrationTarget) error { + _, err := s.tmc.VDiff(ctx, target.GetPrimary().Tablet, tabletreq) + return err + }) + if err != nil { + log.Errorf("Error executing vdiff stop action: %v", err) + return nil, err + } + + return &vtctldatapb.VDiffStopResponse{}, nil +} + +// WorkflowDelete is part of the vtctlservicepb.VtctldServer interface. +// It passes on the request to the target primary tablets that are +// participating in the given workflow. +func (s *Server) WorkflowDelete(ctx context.Context, req *vtctldatapb.WorkflowDeleteRequest) (*vtctldatapb.WorkflowDeleteResponse, error) { + span, ctx := trace.NewSpan(ctx, "workflow.Server.WorkflowDelete") + defer span.Finish() + + span.Annotate("keyspace", req.Keyspace) + span.Annotate("workflow", req.Workflow) + + // Cleanup related data and artifacts. + if _, err := s.DropTargets(ctx, req.Keyspace, req.Workflow, req.KeepData, req.KeepRoutingRules, false); err != nil { + if topo.IsErrType(err, topo.NoNode) { + return nil, vterrors.Wrapf(err, "%s keyspace does not exist", req.Keyspace) + } + return nil, err + } + + deleteReq := &tabletmanagerdatapb.DeleteVReplicationWorkflowRequest{ + Workflow: req.Workflow, + } + vx := vexec.NewVExec(req.Keyspace, req.Workflow, s.ts, s.tmc) + callback := func(ctx context.Context, tablet *topo.TabletInfo) (*querypb.QueryResult, error) { + res, err := s.tmc.DeleteVReplicationWorkflow(ctx, tablet.Tablet, deleteReq) + if err != nil { + return nil, err + } + // Best effort cleanup and optimization of related data. + s.deleteWorkflowVDiffData(ctx, tablet.Tablet, req.Workflow) + s.optimizeCopyStateTable(tablet.Tablet) + return res.Result, err + } + res, err := vx.CallbackContext(ctx, callback) + if err != nil { + return nil, err + } + + if len(res) == 0 { + return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "the %s workflow does not exist in the %s keyspace", req.Workflow, req.Keyspace) + } + + response := &vtctldatapb.WorkflowDeleteResponse{} + response.Summary = fmt.Sprintf("Successfully cancelled the %s workflow in the %s keyspace", req.Workflow, req.Keyspace) + details := make([]*vtctldatapb.WorkflowDeleteResponse_TabletInfo, 0, len(res)) + for tinfo, tres := range res { + result := &vtctldatapb.WorkflowDeleteResponse_TabletInfo{ + Tablet: tinfo.Alias, + Deleted: tres.RowsAffected > 0, // Can be more than one with shard merges + } + details = append(details, result) + } + response.Details = details + return response, nil +} + +func (s *Server) WorkflowStatus(ctx context.Context, req *vtctldatapb.WorkflowStatusRequest) (*vtctldatapb.WorkflowStatusResponse, error) { + ts, state, err := s.getWorkflowState(ctx, req.Keyspace, req.Workflow) + if err != nil { + return nil, err + } + copyProgress, err := s.GetCopyProgress(ctx, ts, state) + if err != nil { + return nil, err + } + resp := &vtctldatapb.WorkflowStatusResponse{ + TrafficState: state.String(), + } + if copyProgress != nil { + resp.TableCopyState = make(map[string]*vtctldatapb.WorkflowStatusResponse_TableCopyState, len(*copyProgress)) + // We sort the tables for intuitive and consistent output. + var tables []string + for table := range *copyProgress { + tables = append(tables, table) + } + sort.Strings(tables) + var progress tableCopyProgress + for _, table := range tables { + var rowCountPct, tableSizePct float32 + resp.TableCopyState[table] = &vtctldatapb.WorkflowStatusResponse_TableCopyState{} + progress = *(*copyProgress)[table] + if progress.SourceRowCount > 0 { + rowCountPct = float32(100.0 * float64(progress.TargetRowCount) / float64(progress.SourceRowCount)) + } + if progress.SourceTableSize > 0 { + tableSizePct = float32(100.0 * float64(progress.TargetTableSize) / float64(progress.SourceTableSize)) + } + resp.TableCopyState[table].RowsCopied = progress.TargetRowCount + resp.TableCopyState[table].RowsTotal = progress.SourceRowCount + resp.TableCopyState[table].RowsPercentage = rowCountPct + resp.TableCopyState[table].BytesCopied = progress.TargetTableSize + resp.TableCopyState[table].BytesTotal = progress.SourceTableSize + resp.TableCopyState[table].BytesPercentage = tableSizePct + } + } + + workflow, err := s.GetWorkflow(ctx, req.Keyspace, req.Workflow, false) + if err != nil { + return nil, err + } + + // The stream key is target keyspace/tablet alias, e.g. 0/test-0000000100. + // We sort the keys for intuitive and consistent output. + streamKeys := make([]string, 0, len(workflow.ShardStreams)) + for streamKey := range workflow.ShardStreams { + streamKeys = append(streamKeys, streamKey) + } + sort.Strings(streamKeys) + resp.ShardStreams = make(map[string]*vtctldatapb.WorkflowStatusResponse_ShardStreams, len(streamKeys)) + for _, streamKey := range streamKeys { + streams := workflow.ShardStreams[streamKey].GetStreams() + keyParts := strings.Split(streamKey, "/") + if len(keyParts) != 2 { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected stream key format in: %s ; expect /", + streamKey) + } + // We want to use target keyspace/shard as the map key for the + // response, e.g. customer/-80. + ksShard := fmt.Sprintf("%s/%s", req.Keyspace, keyParts[0]) + resp.ShardStreams[ksShard] = &vtctldatapb.WorkflowStatusResponse_ShardStreams{} + resp.ShardStreams[ksShard].Streams = make([]*vtctldatapb.WorkflowStatusResponse_ShardStreamState, len(streams)) + for i, st := range streams { + info := []string{} + ts := &vtctldatapb.WorkflowStatusResponse_ShardStreamState{} + if st.State == binlogdatapb.VReplicationWorkflowState_Error.String() { + info = append(info, st.Message) + } else if st.Position == "" { + info = append(info, "VStream has not started") + } else { + now := time.Now().Nanosecond() + updateLag := int64(now) - st.TimeUpdated.Seconds + if updateLag > 0*1e9 { + info = append(info, "VStream may not be running") + } + txLag := int64(now) - st.TransactionTimestamp.Seconds + info = append(info, fmt.Sprintf("VStream Lag: %ds", txLag/1e9)) + if st.TransactionTimestamp.Seconds > 0 { // if no events occur after copy phase, TransactionTimeStamp can be 0 + info = append(info, fmt.Sprintf("; Tx time: %s.", time.Unix(st.TransactionTimestamp.Seconds, 0).Format(time.ANSIC))) + } + } + ts.Id = int32(st.Id) + ts.Tablet = st.Tablet + ts.SourceShard = fmt.Sprintf("%s/%s", st.BinlogSource.Keyspace, st.BinlogSource.Shard) + ts.Position = st.Position + ts.Status = st.State + ts.Info = strings.Join(info, "; ") + resp.ShardStreams[ksShard].Streams[i] = ts + } + } + + return resp, nil +} + +// GetCopyProgress returns the progress of all tables being copied in the +// workflow. +func (s *Server) GetCopyProgress(ctx context.Context, ts *trafficSwitcher, state *State) (*copyProgress, error) { + getTablesQuery := "select distinct table_name from _vt.copy_state cs, _vt.vreplication vr where vr.id = cs.vrepl_id and vr.id = %d" + getRowCountQuery := "select table_name, table_rows, data_length from information_schema.tables where table_schema = %s and table_name in (%s)" + tables := make(map[string]bool) + const MaxRows = 1000 + sourcePrimaries := make(map[*topodatapb.TabletAlias]bool) + for _, target := range ts.targets { + for id, bls := range target.Sources { + query := fmt.Sprintf(getTablesQuery, id) + p3qr, err := s.tmc.ExecuteFetchAsDba(ctx, target.GetPrimary().Tablet, true, &tabletmanagerdatapb.ExecuteFetchAsDbaRequest{ + Query: []byte(query), + MaxRows: MaxRows, + }) + if err != nil { + return nil, err + } + if len(p3qr.Rows) < 1 { + continue + } + qr := sqltypes.Proto3ToResult(p3qr) + for i := 0; i < len(p3qr.Rows); i++ { + tables[qr.Rows[i][0].ToString()] = true + } + sourcesi, err := s.ts.GetShard(ctx, bls.Keyspace, bls.Shard) + if err != nil { + return nil, err + } + found := false + for existingSource := range sourcePrimaries { + if existingSource.Uid == sourcesi.PrimaryAlias.Uid { + found = true + } + } + if !found { + sourcePrimaries[sourcesi.PrimaryAlias] = true + } + } + } + if len(tables) == 0 { + return nil, nil + } + var tableList []string + targetRowCounts := make(map[string]int64) + sourceRowCounts := make(map[string]int64) + targetTableSizes := make(map[string]int64) + sourceTableSizes := make(map[string]int64) + + for table := range tables { + tableList = append(tableList, encodeString(table)) + targetRowCounts[table] = 0 + sourceRowCounts[table] = 0 + targetTableSizes[table] = 0 + sourceTableSizes[table] = 0 + } + + var getTableMetrics = func(tablet *topodatapb.Tablet, query string, rowCounts *map[string]int64, tableSizes *map[string]int64) error { + p3qr, err := s.tmc.ExecuteFetchAsDba(ctx, tablet, true, &tabletmanagerdatapb.ExecuteFetchAsDbaRequest{ + Query: []byte(query), + MaxRows: uint64(len(tables)), + }) + if err != nil { + return err + } + qr := sqltypes.Proto3ToResult(p3qr) + for i := 0; i < len(qr.Rows); i++ { + table := qr.Rows[i][0].ToString() + rowCount, err := qr.Rows[i][1].ToCastInt64() + if err != nil { + return err + } + tableSize, err := qr.Rows[i][2].ToCastInt64() + if err != nil { + return err + } + (*rowCounts)[table] += rowCount + (*tableSizes)[table] += tableSize + } + return nil + } + sourceDbName := "" + for _, tsSource := range ts.sources { + sourceDbName = tsSource.GetPrimary().DbName() + break + } + if sourceDbName == "" { + return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "no sources found for workflow %s.%s", state.TargetKeyspace, state.Workflow) + } + targetDbName := "" + for _, tsTarget := range ts.targets { + targetDbName = tsTarget.GetPrimary().DbName() + break + } + if sourceDbName == "" || targetDbName == "" { + return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "workflow %s.%s is incorrectly configured", state.TargetKeyspace, state.Workflow) + } + sort.Strings(tableList) // sort list for repeatability for mocking in tests + tablesStr := strings.Join(tableList, ",") + query := fmt.Sprintf(getRowCountQuery, encodeString(targetDbName), tablesStr) + for _, target := range ts.targets { + tablet := target.GetPrimary().Tablet + if err := getTableMetrics(tablet, query, &targetRowCounts, &targetTableSizes); err != nil { + return nil, err + } + } + + query = fmt.Sprintf(getRowCountQuery, encodeString(sourceDbName), tablesStr) + for source := range sourcePrimaries { + ti, err := s.ts.GetTablet(ctx, source) + tablet := ti.Tablet + if err != nil { + return nil, err + } + if err := getTableMetrics(tablet, query, &sourceRowCounts, &sourceTableSizes); err != nil { + return nil, err + } + } + + copyProgress := copyProgress{} + for table, rowCount := range targetRowCounts { + copyProgress[table] = &tableCopyProgress{ + TargetRowCount: rowCount, + TargetTableSize: targetTableSizes[table], + SourceRowCount: sourceRowCounts[table], + SourceTableSize: sourceTableSizes[table], + } + } + return ©Progress, nil +} + +// WorkflowUpdate is part of the vtctlservicepb.VtctldServer interface. +// It passes the embedded TabletRequest object to the given keyspace's +// target primary tablets that are participating in the given workflow. +func (s *Server) WorkflowUpdate(ctx context.Context, req *vtctldatapb.WorkflowUpdateRequest) (*vtctldatapb.WorkflowUpdateResponse, error) { + span, ctx := trace.NewSpan(ctx, "workflow.Server.WorkflowUpdate") + defer span.Finish() + + span.Annotate("keyspace", req.Keyspace) + span.Annotate("workflow", req.TabletRequest.Workflow) + span.Annotate("cells", req.TabletRequest.Cells) + span.Annotate("tablet_types", req.TabletRequest.TabletTypes) + span.Annotate("on_ddl", req.TabletRequest.OnDdl) + span.Annotate("state", req.TabletRequest.State) + + vx := vexec.NewVExec(req.Keyspace, req.TabletRequest.Workflow, s.ts, s.tmc) + callback := func(ctx context.Context, tablet *topo.TabletInfo) (*querypb.QueryResult, error) { + res, err := s.tmc.UpdateVReplicationWorkflow(ctx, tablet.Tablet, req.TabletRequest) + if err != nil { + return nil, err + } + return res.Result, err + } + res, err := vx.CallbackContext(ctx, callback) + if err != nil { + if topo.IsErrType(err, topo.NoNode) { + return nil, vterrors.Wrapf(err, "%s keyspace does not exist", req.Keyspace) + } + return nil, err + } + + if len(res) == 0 { + return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "the %s workflow does not exist in the %s keyspace", req.TabletRequest.Workflow, req.Keyspace) + } + + response := &vtctldatapb.WorkflowUpdateResponse{} + response.Summary = fmt.Sprintf("Successfully updated the %s workflow on (%d) target primary tablets in the %s keyspace", req.TabletRequest.Workflow, len(res), req.Keyspace) + details := make([]*vtctldatapb.WorkflowUpdateResponse_TabletInfo, 0, len(res)) + for tinfo, tres := range res { + result := &vtctldatapb.WorkflowUpdateResponse_TabletInfo{ + Tablet: tinfo.Alias, + Changed: tres.RowsAffected > 0, // Can be more than one with shard merges + } + details = append(details, result) + } + response.Details = details + return response, nil +} + +// validateSourceTablesExist validates that tables provided are present +// in the source keyspace. +func (s *Server) validateSourceTablesExist(ctx context.Context, sourceKeyspace string, ksTables, tables []string) error { + var missingTables []string + for _, table := range tables { + if schema.IsInternalOperationTableName(table) { + continue + } + found := false + + for _, ksTable := range ksTables { + if table == ksTable { + found = true + break + } + } + if !found { + missingTables = append(missingTables, table) + } + } + if len(missingTables) > 0 { + return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "table(s) not found in source keyspace %s: %s", sourceKeyspace, strings.Join(missingTables, ",")) + } + return nil +} + +// addTablesToVSchema adds tables to an (unsharded) vschema if they are not already defined. +// If copyVSchema is true then we copy over the vschema table definitions from the source, +// otherwise we create empty ones. +// For a migrate workflow we do not copy the vschema since the source keyspace is just a +// proxy to import data into Vitess. +func (s *Server) addTablesToVSchema(ctx context.Context, sourceKeyspace string, targetVSchema *vschemapb.Keyspace, tables []string, copyVSchema bool) error { + if targetVSchema.Tables == nil { + targetVSchema.Tables = make(map[string]*vschemapb.Table) + } + if copyVSchema { + srcVSchema, err := s.ts.GetVSchema(ctx, sourceKeyspace) + if err != nil { + return vterrors.Wrapf(err, "failed to get vschema for source keyspace %s", sourceKeyspace) + } + for _, table := range tables { + srcTable, sok := srcVSchema.Tables[table] + if _, tok := targetVSchema.Tables[table]; sok && !tok { + targetVSchema.Tables[table] = srcTable + // If going from sharded to unsharded, then we need to remove the + // column vindexes as they are not valid for unsharded tables. + if srcVSchema.Sharded { + targetVSchema.Tables[table].ColumnVindexes = nil + } + } + } + } + // Ensure that each table at least has an empty definition on the target. + for _, table := range tables { + if _, tok := targetVSchema.Tables[table]; !tok { + targetVSchema.Tables[table] = &vschemapb.Table{} + } + } + return nil +} + +func (s *Server) collectTargetStreams(ctx context.Context, mz *materializer) ([]string, error) { + var shardTablets []string + var mu sync.Mutex + err := mz.forAllTargets(func(target *topo.ShardInfo) error { + var qrproto *querypb.QueryResult + var id int64 + var err error + targetPrimary, err := s.ts.GetTablet(ctx, target.PrimaryAlias) + if err != nil { + return vterrors.Wrapf(err, "GetTablet(%v) failed", target.PrimaryAlias) + } + query := fmt.Sprintf("select id from _vt.vreplication where db_name=%s and workflow=%s", encodeString(targetPrimary.DbName()), encodeString(mz.ms.Workflow)) + if qrproto, err = s.tmc.VReplicationExec(ctx, targetPrimary.Tablet, query); err != nil { + return vterrors.Wrapf(err, "VReplicationExec(%v, %s)", targetPrimary.Tablet, query) + } + qr := sqltypes.Proto3ToResult(qrproto) + for i := 0; i < len(qr.Rows); i++ { + id, err = qr.Rows[i][0].ToCastInt64() + if err != nil { + return err + } + mu.Lock() + shardTablets = append(shardTablets, fmt.Sprintf("%s:%d", target.ShardName(), id)) + mu.Unlock() + } + return nil + }) + if err != nil { + return nil, err + } + return shardTablets, nil +} + +func (s *Server) checkIfPreviousJournalExists(ctx context.Context, mz *materializer, migrationID int64) (bool, []string, error) { + forAllSources := func(f func(*topo.ShardInfo) error) error { + var wg sync.WaitGroup + allErrors := &concurrency.AllErrorRecorder{} + for _, sourceShard := range mz.sourceShards { + wg.Add(1) + go func(sourceShard *topo.ShardInfo) { + defer wg.Done() + + if err := f(sourceShard); err != nil { + allErrors.RecordError(err) + } + }(sourceShard) + } + wg.Wait() + return allErrors.AggrError(vterrors.Aggregate) + } + + var ( + mu sync.Mutex + exists bool + tablets []string + ) + + err := forAllSources(func(si *topo.ShardInfo) error { + tablet, err := s.ts.GetTablet(ctx, si.PrimaryAlias) + if err != nil { + return err + } + if tablet == nil { + return nil + } + _, exists, err = s.CheckReshardingJournalExistsOnTablet(ctx, tablet.Tablet, migrationID) + if err != nil { + return err + } + if exists { + mu.Lock() + defer mu.Unlock() + tablets = append(tablets, tablet.AliasString()) + } + return nil + }) + return exists, tablets, err +} + +// deleteWorkflowVDiffData cleans up any potential VDiff related data associated +// with the workflow on the given tablet. +func (s *Server) deleteWorkflowVDiffData(ctx context.Context, tablet *topodatapb.Tablet, workflow string) { + if _, err := s.tmc.VDiff(ctx, tablet, &tabletmanagerdatapb.VDiffRequest{ + Keyspace: tablet.Keyspace, + Workflow: workflow, + Action: string(vdiff.DeleteAction), + ActionArg: vdiff.AllActionArg, + }); err != nil { + log.Errorf("Error deleting vdiff data for %s.%s workflow: %v", tablet.Keyspace, workflow, err) + } +} + +// optimizeCopyStateTable rebuilds the copy_state table to ensure the on-disk +// structures are minimal and optimized and resets the auto-inc value for +// subsequent inserts. +// This helps to ensure that the size, storage, and performance related factors +// for the table remain optimal over time and that we don't ever exhaust the +// available auto-inc values for the table. +// Note: it's not critical that this executes successfully any given time, it's +// only important that we try to do this periodically so that things stay in an +// optimal state over long periods of time. For this reason, the work is done +// asynchronously in the background on the given tablet and any failures are +// logged as warnings. Because it's done in the background we use the AllPrivs +// account to be sure that we don't execute the writes if READ_ONLY is set on +// the MySQL instance. +func (s *Server) optimizeCopyStateTable(tablet *topodatapb.Tablet) { + if s.sem != nil { + if !s.sem.TryAcquire(1) { + log.Warningf("Deferring work to optimize the copy_state table on %q due to hitting the maximum concurrent background job limit.", + tablet.Alias.String()) + return + } + } + go func() { + defer func() { + if s.sem != nil { + s.sem.Release(1) + } + }() + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute) + defer cancel() + sqlOptimizeTable := "optimize table _vt.copy_state" + if _, err := s.tmc.ExecuteFetchAsAllPrivs(ctx, tablet, &tabletmanagerdatapb.ExecuteFetchAsAllPrivsRequest{ + Query: []byte(sqlOptimizeTable), + MaxRows: uint64(100), // always produces 1+rows with notes and status + }); err != nil { + if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Num == sqlerror.ERNoSuchTable { // the table may not exist + return + } + log.Warningf("Failed to optimize the copy_state table on %q: %v", tablet.Alias.String(), err) + } + // This will automatically set the value to 1 or the current max value in the + // table, whichever is greater. + sqlResetAutoInc := "alter table _vt.copy_state auto_increment = 1" + if _, err := s.tmc.ExecuteFetchAsAllPrivs(ctx, tablet, &tabletmanagerdatapb.ExecuteFetchAsAllPrivsRequest{ + Query: []byte(sqlResetAutoInc), + MaxRows: uint64(0), + }); err != nil { + log.Warningf("Failed to reset the auto_increment value for the copy_state table on %q: %v", + tablet.Alias.String(), err) + } + }() +} + +// DropTargets cleans up target tables, shards and denied tables if a MoveTables/Reshard +// is cancelled. +func (s *Server) DropTargets(ctx context.Context, targetKeyspace, workflow string, keepData, keepRoutingRules, dryRun bool) (*[]string, error) { + ts, state, err := s.getWorkflowState(ctx, targetKeyspace, workflow) + if err != nil { + log.Errorf("Failed to get VReplication workflow state for %s.%s: %v", targetKeyspace, workflow, err) + return nil, err + } + + // There is nothing to drop for a LookupVindex workflow. + if ts.workflowType == binlogdatapb.VReplicationWorkflowType_CreateLookupIndex { + return nil, nil + } + + // Return an error if the workflow traffic is partially switched. + if state.WritesSwitched || len(state.ReplicaCellsSwitched) > 0 || len(state.RdonlyCellsSwitched) > 0 { + return nil, ErrWorkflowPartiallySwitched + } + + if state.WorkflowType == TypeMigrate { + _, err := s.finalizeMigrateWorkflow(ctx, targetKeyspace, workflow, "", true, keepData, keepRoutingRules, dryRun) + return nil, err + } + + ts.keepRoutingRules = keepRoutingRules + var sw iswitcher + if dryRun { + sw = &switcherDryRun{ts: ts, drLog: NewLogRecorder()} + } else { + sw = &switcher{s: s, ts: ts} + } + var tctx context.Context + tctx, sourceUnlock, lockErr := sw.lockKeyspace(ctx, ts.SourceKeyspaceName(), "DropTargets") + if lockErr != nil { + ts.Logger().Errorf("Source LockKeyspace failed: %v", lockErr) + return nil, lockErr + } + defer sourceUnlock(&err) + ctx = tctx + + if ts.TargetKeyspaceName() != ts.SourceKeyspaceName() { + tctx, targetUnlock, lockErr := sw.lockKeyspace(ctx, ts.TargetKeyspaceName(), "DropTargets") + if lockErr != nil { + ts.Logger().Errorf("Target LockKeyspace failed: %v", lockErr) + return nil, lockErr + } + defer targetUnlock(&err) + ctx = tctx + } + if !keepData { + switch ts.MigrationType() { + case binlogdatapb.MigrationType_TABLES: + if err := sw.removeTargetTables(ctx); err != nil { + return nil, err + } + if err := sw.dropSourceDeniedTables(ctx); err != nil { + return nil, err + } + if err := sw.dropTargetDeniedTables(ctx); err != nil { + return nil, err + } + case binlogdatapb.MigrationType_SHARDS: + if err := sw.dropTargetShards(ctx); err != nil { + return nil, err + } + } + } + if err := s.dropRelatedArtifacts(ctx, keepRoutingRules, sw); err != nil { + return nil, err + } + if err := ts.TopoServer().RebuildSrvVSchema(ctx, nil); err != nil { + return nil, err + } + return sw.logs(), nil +} + +func (s *Server) buildTrafficSwitcher(ctx context.Context, targetKeyspace, workflowName string) (*trafficSwitcher, error) { + tgtInfo, err := BuildTargets(ctx, s.ts, s.tmc, targetKeyspace, workflowName) + if err != nil { + log.Infof("Error building targets: %s", err) + return nil, err + } + targets, frozen, optCells, optTabletTypes := tgtInfo.Targets, tgtInfo.Frozen, tgtInfo.OptCells, tgtInfo.OptTabletTypes + + ts := &trafficSwitcher{ + ws: s, + logger: logutil.NewConsoleLogger(), + workflow: workflowName, + reverseWorkflow: ReverseWorkflowName(workflowName), + id: HashStreams(targetKeyspace, targets), + targets: targets, + sources: make(map[string]*MigrationSource), + targetKeyspace: targetKeyspace, + frozen: frozen, + optCells: optCells, + optTabletTypes: optTabletTypes, + workflowType: tgtInfo.WorkflowType, + workflowSubType: tgtInfo.WorkflowSubType, + } + log.Infof("Migration ID for workflow %s: %d", workflowName, ts.id) + sourceTopo := s.ts + + // Build the sources. + for _, target := range targets { + for _, bls := range target.Sources { + if ts.sourceKeyspace == "" { + ts.sourceKeyspace = bls.Keyspace + ts.sourceTimeZone = bls.SourceTimeZone + ts.targetTimeZone = bls.TargetTimeZone + ts.externalCluster = bls.ExternalCluster + if ts.externalCluster != "" { + externalTopo, err := s.ts.OpenExternalVitessClusterServer(ctx, ts.externalCluster) + if err != nil { + return nil, err + } + sourceTopo = externalTopo + ts.externalTopo = externalTopo + } + } else if ts.sourceKeyspace != bls.Keyspace { + return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "source keyspaces are mismatched across streams: %v vs %v", ts.sourceKeyspace, bls.Keyspace) + } + + if ts.tables == nil { + for _, rule := range bls.Filter.Rules { + ts.tables = append(ts.tables, rule.Match) + } + sort.Strings(ts.tables) + } else { + var tables []string + for _, rule := range bls.Filter.Rules { + tables = append(tables, rule.Match) + } + sort.Strings(tables) + if !reflect.DeepEqual(ts.tables, tables) { + return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "table lists are mismatched across streams: %v vs %v", ts.tables, tables) + } + } + + if _, ok := ts.sources[bls.Shard]; ok { + continue + } + sourcesi, err := sourceTopo.GetShard(ctx, bls.Keyspace, bls.Shard) + if err != nil { + return nil, err + } + if sourcesi.PrimaryAlias == nil { + return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "source shard %s/%s currently has no primary tablet", + bls.Keyspace, bls.Shard) + } + sourcePrimary, err := sourceTopo.GetTablet(ctx, sourcesi.PrimaryAlias) + if err != nil { + return nil, err + } + ts.sources[bls.Shard] = NewMigrationSource(sourcesi, sourcePrimary) + } + } + if ts.sourceKeyspace != ts.targetKeyspace || ts.externalCluster != "" { + ts.migrationType = binlogdatapb.MigrationType_TABLES + } else { + // TODO(sougou): for shard migration, validate that source and target combined + // keyranges match. + ts.migrationType = binlogdatapb.MigrationType_SHARDS + for sourceShard := range ts.sources { + if _, ok := ts.targets[sourceShard]; ok { + // If shards are overlapping, then this is a table migration. + ts.migrationType = binlogdatapb.MigrationType_TABLES + break + } + } + } + vs, err := sourceTopo.GetVSchema(ctx, ts.sourceKeyspace) + if err != nil { + return nil, err + } + ts.sourceKSSchema, err = vindexes.BuildKeyspaceSchema(vs, ts.sourceKeyspace) + if err != nil { + return nil, err + } + + sourceShards, targetShards := ts.getSourceAndTargetShardsNames() + + ts.isPartialMigration, err = ts.isPartialMoveTables(sourceShards, targetShards) + if err != nil { + return nil, err + } + if ts.isPartialMigration { + log.Infof("Migration is partial, for shards %+v", sourceShards) + } + return ts, nil +} + +func (s *Server) dropRelatedArtifacts(ctx context.Context, keepRoutingRules bool, sw iswitcher) error { + if err := sw.dropSourceReverseVReplicationStreams(ctx); err != nil { + return err + } + if !keepRoutingRules { + if err := sw.deleteRoutingRules(ctx); err != nil { + return err + } + if err := sw.deleteShardRoutingRules(ctx); err != nil { + return err + } + } + + return nil +} + +// dropSources cleans up source tables, shards and denied tables after a +// MoveTables/Reshard is completed. +func (s *Server) dropSources(ctx context.Context, ts *trafficSwitcher, removalType TableRemovalType, keepData, keepRoutingRules, force, dryRun bool) (*[]string, error) { + var ( + sw iswitcher + err error + ) + if dryRun { + sw = &switcherDryRun{ts: ts, drLog: NewLogRecorder()} + } else { + sw = &switcher{ts: ts, s: s} + } + var tctx context.Context + tctx, sourceUnlock, lockErr := sw.lockKeyspace(ctx, ts.SourceKeyspaceName(), "DropSources") + if lockErr != nil { + ts.Logger().Errorf("Source LockKeyspace failed: %v", lockErr) + return nil, lockErr + } + defer sourceUnlock(&err) + ctx = tctx + if ts.TargetKeyspaceName() != ts.SourceKeyspaceName() { + tctx, targetUnlock, lockErr := sw.lockKeyspace(ctx, ts.TargetKeyspaceName(), "DropSources") + if lockErr != nil { + ts.Logger().Errorf("Target LockKeyspace failed: %v", lockErr) + return nil, lockErr + } + defer targetUnlock(&err) + ctx = tctx + } + if !force { + if err := sw.validateWorkflowHasCompleted(ctx); err != nil { + ts.Logger().Errorf("Workflow has not completed, cannot DropSources: %v", err) + return nil, err + } + } + if !keepData { + switch ts.MigrationType() { + case binlogdatapb.MigrationType_TABLES: + log.Infof("Deleting tables") + if err := sw.removeSourceTables(ctx, removalType); err != nil { + return nil, err + } + if err := sw.dropSourceDeniedTables(ctx); err != nil { + return nil, err + } + if err := sw.dropTargetDeniedTables(ctx); err != nil { + return nil, err + } + + case binlogdatapb.MigrationType_SHARDS: + log.Infof("Removing shards") + if err := sw.dropSourceShards(ctx); err != nil { + return nil, err + } + } + } + if err := s.dropArtifacts(ctx, keepRoutingRules, sw); err != nil { + return nil, err + } + if err := ts.TopoServer().RebuildSrvVSchema(ctx, nil); err != nil { + return nil, err + } + + return sw.logs(), nil +} + +func (s *Server) dropArtifacts(ctx context.Context, keepRoutingRules bool, sw iswitcher) error { + if err := sw.dropSourceReverseVReplicationStreams(ctx); err != nil { + return err + } + if err := sw.dropTargetVReplicationStreams(ctx); err != nil { + return err + } + if !keepRoutingRules { + if err := sw.deleteRoutingRules(ctx); err != nil { + return err + } + if err := sw.deleteShardRoutingRules(ctx); err != nil { + return err + } + } + + return nil +} + +// DeleteShard will do all the necessary changes in the topology server +// to entirely remove a shard. +func (s *Server) DeleteShard(ctx context.Context, keyspace, shard string, recursive, evenIfServing bool) error { + // Read the Shard object. If it's not there, try to clean up + // the topology anyway. + shardInfo, err := s.ts.GetShard(ctx, keyspace, shard) + if err != nil { + if topo.IsErrType(err, topo.NoNode) { + log.Infof("Shard %v/%v doesn't seem to exist, cleaning up any potential leftover", keyspace, shard) + return s.ts.DeleteShard(ctx, keyspace, shard) + } + return err + } + + servingCells, err := s.ts.GetShardServingCells(ctx, shardInfo) + if err != nil { + return err + } + // Check the Serving map for the shard, we don't want to + // remove a serving shard if not absolutely sure. + if !evenIfServing && len(servingCells) > 0 { + return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "shard %v/%v is still serving, cannot delete it, use the even-if-serving flag if needed", keyspace, shard) + } + + cells, err := s.ts.GetCellInfoNames(ctx) + if err != nil { + return err + } + + // Go through all the cells. + for _, cell := range cells { + var aliases []*topodatapb.TabletAlias + + // Get the ShardReplication object for that cell. Try + // to find all tablets that may belong to our shard. + sri, err := s.ts.GetShardReplication(ctx, cell, keyspace, shard) + switch { + case topo.IsErrType(err, topo.NoNode): + // No ShardReplication object. It means the + // topo is inconsistent. Let's read all the + // tablets for that cell, and if we find any + // in our keyspace / shard, either abort or + // try to delete them. + aliases, err = s.ts.GetTabletAliasesByCell(ctx, cell) + if err != nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "GetTabletsByCell(%v) failed: %v", cell, err) + } + case err == nil: + // We found a ShardReplication object. We + // trust it to have all tablet records. + aliases = make([]*topodatapb.TabletAlias, len(sri.Nodes)) + for i, n := range sri.Nodes { + aliases[i] = n.TabletAlias + } + default: + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "GetShardReplication(%v, %v, %v) failed: %v", cell, keyspace, shard, err) + } + + // Get the corresponding Tablet records. Note + // GetTabletMap ignores ErrNoNode, and it's good for + // our purpose, it means a tablet was deleted but is + // still referenced. + tabletMap, err := s.ts.GetTabletMap(ctx, aliases) + if err != nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "GetTabletMap() failed: %v", err) + } + + // Remove the tablets that don't belong to our + // keyspace/shard from the map. + for a, ti := range tabletMap { + if ti.Keyspace != keyspace || ti.Shard != shard { + delete(tabletMap, a) + } + } + + // Now see if we need to DeleteTablet, and if we can, do it. + if len(tabletMap) > 0 { + if !recursive { + return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "shard %v/%v still has %v tablets in cell %v; use --recursive or remove them manually", keyspace, shard, len(tabletMap), cell) + } + + log.Infof("Deleting all tablets in shard %v/%v cell %v", keyspace, shard, cell) + for tabletAlias, tabletInfo := range tabletMap { + // We don't care about scrapping or updating the replication graph, + // because we're about to delete the entire replication graph. + log.Infof("Deleting tablet %v", tabletAlias) + if err := s.ts.DeleteTablet(ctx, tabletInfo.Alias); err != nil && !topo.IsErrType(err, topo.NoNode) { + // We don't want to continue if a DeleteTablet fails for + // any good reason (other than missing tablet, in which + // case it's just a topology server inconsistency we can + // ignore). If we continue and delete the replication + // graph, the tablet record will be orphaned, since + // we'll no longer know it belongs to this shard. + // + // If the problem is temporary, or resolved externally, re-running + // DeleteShard will skip over tablets that were already deleted. + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "can't delete tablet %v: %v", tabletAlias, err) + } + } + } + } + + // Try to remove the replication graph and serving graph in each cell, + // regardless of its existence. + for _, cell := range cells { + if err := s.ts.DeleteShardReplication(ctx, cell, keyspace, shard); err != nil && !topo.IsErrType(err, topo.NoNode) { + log.Warningf("Cannot delete ShardReplication in cell %v for %v/%v: %v", cell, keyspace, shard, err) + } + } + + return s.ts.DeleteShard(ctx, keyspace, shard) +} + +// updateShardRecords updates the shard records based on 'from' or 'to' direction. +func (s *Server) updateShardRecords(ctx context.Context, keyspace string, shards []*topo.ShardInfo, cells []string, + servedType topodatapb.TabletType, isFrom bool, clearSourceShards bool, logger logutil.Logger) (err error) { + return topotools.UpdateShardRecords(ctx, s.ts, s.tmc, keyspace, shards, cells, servedType, isFrom, clearSourceShards, logger) +} + +// refreshPrimaryTablets will just RPC-ping all the primary tablets with RefreshState +func (s *Server) refreshPrimaryTablets(ctx context.Context, shards []*topo.ShardInfo) error { + wg := sync.WaitGroup{} + rec := concurrency.AllErrorRecorder{} + for _, si := range shards { + wg.Add(1) + go func(si *topo.ShardInfo) { + defer wg.Done() + ti, err := s.ts.GetTablet(ctx, si.PrimaryAlias) + if err != nil { + rec.RecordError(err) + return + } + + if err := s.tmc.RefreshState(ctx, ti.Tablet); err != nil { + rec.RecordError(err) + } else { + log.Infof("%v responded", topoproto.TabletAliasString(si.PrimaryAlias)) + } + }(si) + } + wg.Wait() + return rec.Error() +} + +// finalizeMigrateWorkflow deletes the streams for the Migrate workflow. +// We only cleanup the target for external sources. +func (s *Server) finalizeMigrateWorkflow(ctx context.Context, targetKeyspace, workflow, tableSpecs string, cancel, keepData, keepRoutingRules, dryRun bool) (*[]string, error) { + ts, err := s.buildTrafficSwitcher(ctx, targetKeyspace, workflow) + if err != nil { + ts.Logger().Errorf("buildTrafficSwitcher failed: %v", err) + return nil, err + } + var sw iswitcher + if dryRun { + sw = &switcherDryRun{ts: ts, drLog: NewLogRecorder()} + } else { + sw = &switcher{s: s, ts: ts} + } + var tctx context.Context + tctx, targetUnlock, lockErr := sw.lockKeyspace(ctx, ts.TargetKeyspaceName(), "completeMigrateWorkflow") + if lockErr != nil { + ts.Logger().Errorf("Target LockKeyspace failed: %v", lockErr) + return nil, lockErr + } + defer targetUnlock(&err) + ctx = tctx + if err := sw.dropTargetVReplicationStreams(ctx); err != nil { + return nil, err + } + if !cancel { + if err := sw.addParticipatingTablesToKeyspace(ctx, targetKeyspace, tableSpecs); err != nil { + return nil, err + } + if err := ts.TopoServer().RebuildSrvVSchema(ctx, nil); err != nil { + return nil, err + } + } + log.Infof("cancel is %t, keepData %t", cancel, keepData) + if cancel && !keepData { + if err := sw.removeTargetTables(ctx); err != nil { + return nil, err + } + } + return sw.logs(), nil +} + +// WorkflowSwitchTraffic switches traffic in the direction passed for specified tablet types. +func (s *Server) WorkflowSwitchTraffic(ctx context.Context, req *vtctldatapb.WorkflowSwitchTrafficRequest) (*vtctldatapb.WorkflowSwitchTrafficResponse, error) { + var ( + dryRunResults []string + rdDryRunResults, wrDryRunResults *[]string + hasReplica, hasRdonly, hasPrimary bool + ) + timeout, set, err := protoutil.DurationFromProto(req.Timeout) + if err != nil { + err = vterrors.Wrapf(err, "unable to parse Timeout into a valid duration") + return nil, err + } + if !set { + timeout = defaultDuration + } + ts, startState, err := s.getWorkflowState(ctx, req.Keyspace, req.Workflow) + if err != nil { + return nil, err + } + + if startState.WorkflowType == TypeMigrate { + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid action for Migrate workflow: SwitchTraffic") + } + + maxReplicationLagAllowed, set, err := protoutil.DurationFromProto(req.MaxReplicationLagAllowed) + if err != nil { + err = vterrors.Wrapf(err, "unable to parse MaxReplicationLagAllowed into a valid duration") + return nil, err + } + if !set { + maxReplicationLagAllowed = defaultDuration + } + direction := TrafficSwitchDirection(req.Direction) + if direction == DirectionBackward { + ts, startState, err = s.getWorkflowState(ctx, startState.SourceKeyspace, ts.reverseWorkflow) + if err != nil { + return nil, err + } + } + reason, err := s.canSwitch(ctx, ts, startState, direction, int64(maxReplicationLagAllowed.Seconds())) + if err != nil { + return nil, err + } + if reason != "" { + return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "cannot switch traffic for workflow %s at this time: %s", startState.Workflow, reason) + } + hasReplica, hasRdonly, hasPrimary, err = parseTabletTypes(req.TabletTypes) + if err != nil { + return nil, err + } + if hasReplica || hasRdonly { + if rdDryRunResults, err = s.switchReads(ctx, req, ts, startState, timeout, false, direction); err != nil { + return nil, err + } + log.Infof("Switch Reads done for workflow %s.%s", req.Keyspace, req.Workflow) + } + if rdDryRunResults != nil { + dryRunResults = append(dryRunResults, *rdDryRunResults...) + } + if hasPrimary { + if _, wrDryRunResults, err = s.switchWrites(ctx, req, ts, timeout, false); err != nil { + return nil, err + } + log.Infof("Switch Writes done for workflow %s.%s", req.Keyspace, req.Workflow) + } + + if wrDryRunResults != nil { + dryRunResults = append(dryRunResults, *wrDryRunResults...) + } + if req.DryRun && len(dryRunResults) == 0 { + dryRunResults = append(dryRunResults, "No changes required") + } + cmd := "SwitchTraffic" + if direction == DirectionBackward { + cmd = "ReverseTraffic" + } + log.Infof("%s done for workflow %s.%s", cmd, req.Keyspace, req.Workflow) + resp := &vtctldatapb.WorkflowSwitchTrafficResponse{} + if req.DryRun { + resp.Summary = fmt.Sprintf("%s dry run results for workflow %s.%s at %v", cmd, req.Keyspace, req.Workflow, time.Now().UTC().Format(time.RFC822)) + resp.DryRunResults = dryRunResults + } else { + log.Infof("SwitchTraffic done for workflow %s.%s", req.Keyspace, req.Workflow) + resp.Summary = fmt.Sprintf("%s was successful for workflow %s.%s", cmd, req.Keyspace, req.Workflow) + // Reload the state after the SwitchTraffic operation + // and return that as a string. + keyspace := req.Keyspace + workflow := req.Workflow + if direction == DirectionBackward { + keyspace = startState.SourceKeyspace + workflow = ts.reverseWorkflow + } + resp.StartState = startState.String() + log.Infof("Before reloading workflow state after switching traffic: %+v\n", resp.StartState) + _, currentState, err := s.getWorkflowState(ctx, keyspace, workflow) + if err != nil { + resp.CurrentState = fmt.Sprintf("Error reloading workflow state after switching traffic: %v", err) + } else { + resp.CurrentState = currentState.String() + } + } + return resp, nil +} + +// switchReads is a generic way of switching read traffic for a workflow. +func (s *Server) switchReads(ctx context.Context, req *vtctldatapb.WorkflowSwitchTrafficRequest, ts *trafficSwitcher, state *State, timeout time.Duration, cancel bool, direction TrafficSwitchDirection) (*[]string, error) { + var roTabletTypes []topodatapb.TabletType + // When we are switching all traffic we also get the primary tablet type, which we need to + // filter out for switching reads. + for _, tabletType := range req.TabletTypes { + if tabletType != topodatapb.TabletType_PRIMARY { + roTabletTypes = append(roTabletTypes, tabletType) + } + } + + roTypesToSwitchStr := topoproto.MakeStringTypeCSV(roTabletTypes) + var switchReplica, switchRdonly bool + for _, roType := range roTabletTypes { + switch roType { + case topodatapb.TabletType_REPLICA: + switchReplica = true + case topodatapb.TabletType_RDONLY: + switchRdonly = true + } + } + + // Consistently handle errors by logging and returning them. + handleError := func(message string, err error) (*[]string, error) { + ts.Logger().Error(err) + return nil, err + } + + log.Infof("Switching reads: %s.%s tablet types: %s, cells: %s, workflow state: %s", ts.targetKeyspace, ts.workflow, roTypesToSwitchStr, ts.optCells, state.String()) + if !switchReplica && !switchRdonly { + return handleError("invalid tablet types", vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "tablet types must be REPLICA or RDONLY: %s", roTypesToSwitchStr)) + } + if !ts.isPartialMigration { // shard level traffic switching is all or nothing + if direction == DirectionBackward && switchReplica && len(state.ReplicaCellsSwitched) == 0 { + return handleError("invalid request", vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "requesting reversal of read traffic for REPLICAs but REPLICA reads have not been switched")) + } + if direction == DirectionBackward && switchRdonly && len(state.RdonlyCellsSwitched) == 0 { + return handleError("invalid request", vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "requesting reversal of SwitchReads for RDONLYs but RDONLY reads have not been switched")) + } + } + var cells = req.Cells + // If no cells were provided in the command then use the value from the workflow. + if len(cells) == 0 && ts.optCells != "" { + cells = strings.Split(strings.TrimSpace(ts.optCells), ",") + } + + // If there are no rdonly tablets in the cells ask to switch rdonly tablets as well so that routing rules + // are updated for rdonly as well. Otherwise vitess will not know that the workflow has completed and will + // incorrectly report that not all reads have been switched. User currently is forced to switch non-existent + // rdonly tablets. + if switchReplica && !switchRdonly { + var err error + rdonlyTabletsExist, err := topotools.DoCellsHaveRdonlyTablets(ctx, s.ts, cells) + if err != nil { + return nil, err + } + if !rdonlyTabletsExist { + roTabletTypes = append(roTabletTypes, topodatapb.TabletType_RDONLY) + } + } + + // If journals exist notify user and fail. + journalsExist, _, err := ts.checkJournals(ctx) + if err != nil { + return handleError(fmt.Sprintf("failed to read journal in the %s keyspace", ts.SourceKeyspaceName()), err) + } + if journalsExist { + log.Infof("Found a previous journal entry for %d", ts.id) + } + var sw iswitcher + if req.DryRun { + sw = &switcherDryRun{ts: ts, drLog: NewLogRecorder()} + } else { + sw = &switcher{ts: ts, s: s} + } + + if err := ts.validate(ctx); err != nil { + return handleError("workflow validation failed", err) + } + + // For reads, locking the source keyspace is sufficient. + ctx, unlock, lockErr := sw.lockKeyspace(ctx, ts.SourceKeyspaceName(), "SwitchReads") + if lockErr != nil { + return handleError(fmt.Sprintf("failed to lock the %s keyspace", ts.SourceKeyspaceName()), lockErr) + } + defer unlock(&err) + + if ts.MigrationType() == binlogdatapb.MigrationType_TABLES { + if ts.isPartialMigration { + ts.Logger().Infof("Partial migration, skipping switchTableReads as traffic is all or nothing per shard and overridden for reads AND writes in the ShardRoutingRule created when switching writes.") + } else if err := sw.switchTableReads(ctx, cells, roTabletTypes, direction); err != nil { + return handleError("failed to switch read traffic for the tables", err) + } + return sw.logs(), nil + } + ts.Logger().Infof("About to switchShardReads: %+v, %+s, %+v", cells, roTypesToSwitchStr, direction) + if err := sw.switchShardReads(ctx, cells, roTabletTypes, direction); err != nil { + return handleError("failed to switch read traffic for the shards", err) + } + + ts.Logger().Infof("switchShardReads Completed: %+v, %+s, %+v", cells, roTypesToSwitchStr, direction) + if err := s.ts.ValidateSrvKeyspace(ctx, ts.targetKeyspace, strings.Join(cells, ",")); err != nil { + err2 := vterrors.Wrapf(err, "after switching shard reads, found SrvKeyspace for %s is corrupt in cell %s", + ts.targetKeyspace, strings.Join(cells, ",")) + return handleError("failed to validate SrvKeyspace record", err2) + } + return sw.logs(), nil +} + +// switchWrites is a generic way of migrating write traffic for a workflow. +func (s *Server) switchWrites(ctx context.Context, req *vtctldatapb.WorkflowSwitchTrafficRequest, ts *trafficSwitcher, timeout time.Duration, + cancel bool) (journalID int64, dryRunResults *[]string, err error) { + + var sw iswitcher + if req.DryRun { + sw = &switcherDryRun{ts: ts, drLog: NewLogRecorder()} + } else { + sw = &switcher{ts: ts, s: s} + } + + // Consistently handle errors by logging and returning them. + handleError := func(message string, err error) (int64, *[]string, error) { + werr := vterrors.Errorf(vtrpcpb.Code_INTERNAL, fmt.Sprintf("%s: %v", message, err)) + ts.Logger().Error(werr) + return 0, nil, werr + } + + if ts.frozen { + ts.Logger().Warningf("Writes have already been switched for workflow %s, nothing to do here", ts.WorkflowName()) + return 0, sw.logs(), nil + } + + if err := ts.validate(ctx); err != nil { + return handleError("workflow validation failed", err) + } + + if req.EnableReverseReplication { + if err := areTabletsAvailableToStreamFrom(ctx, req, ts, ts.TargetKeyspaceName(), ts.TargetShards()); err != nil { + return handleError(fmt.Sprintf("no tablets were available to stream from in the %s keyspace", ts.SourceKeyspaceName()), err) + } + } + + // Need to lock both source and target keyspaces. + tctx, sourceUnlock, lockErr := sw.lockKeyspace(ctx, ts.SourceKeyspaceName(), "SwitchWrites") + if lockErr != nil { + return handleError(fmt.Sprintf("failed to lock the %s keyspace", ts.SourceKeyspaceName()), lockErr) + } + ctx = tctx + defer sourceUnlock(&err) + if ts.TargetKeyspaceName() != ts.SourceKeyspaceName() { + tctx, targetUnlock, lockErr := sw.lockKeyspace(ctx, ts.TargetKeyspaceName(), "SwitchWrites") + if lockErr != nil { + return handleError(fmt.Sprintf("failed to lock the %s keyspace", ts.TargetKeyspaceName()), lockErr) + } + ctx = tctx + defer targetUnlock(&err) + } + + // Find out if the target is using any sequence tables for auto_increment + // value generation. If so, then we'll need to ensure that they are + // initialized properly before allowing new writes on the target. + sequenceMetadata := make(map[string]*sequenceMetadata) + // For sharded to sharded migrations the sequence must already be setup. + // For reshards the sequence usage is not changed. + if req.InitializeTargetSequences && ts.workflowType == binlogdatapb.VReplicationWorkflowType_MoveTables && + ts.SourceKeyspaceSchema() != nil && ts.SourceKeyspaceSchema().Keyspace != nil && + !ts.SourceKeyspaceSchema().Keyspace.Sharded { + sequenceMetadata, err = ts.getTargetSequenceMetadata(ctx) + if err != nil { + return handleError(fmt.Sprintf("failed to get the sequence information in the %s keyspace", ts.TargetKeyspaceName()), err) + } + } + + // If no journals exist, sourceWorkflows will be initialized by sm.MigrateStreams. + journalsExist, sourceWorkflows, err := ts.checkJournals(ctx) + if err != nil { + return handleError(fmt.Sprintf("failed to read journal in the %s keyspace", ts.SourceKeyspaceName()), err) + } + if !journalsExist { + ts.Logger().Infof("No previous journals were found. Proceeding normally.") + sm, err := BuildStreamMigrator(ctx, ts, cancel) + if err != nil { + return handleError("failed to migrate the workflow streams", err) + } + if cancel { + sw.cancelMigration(ctx, sm) + return 0, sw.logs(), nil + } + + ts.Logger().Infof("Stopping streams") + sourceWorkflows, err = sw.stopStreams(ctx, sm) + if err != nil { + for key, streams := range sm.Streams() { + for _, stream := range streams { + ts.Logger().Errorf("stream in stopStreams: key %s shard %s stream %+v", key, stream.BinlogSource.Shard, stream.BinlogSource) + } + } + sw.cancelMigration(ctx, sm) + return handleError("failed to stop the workflow streams", err) + } + + ts.Logger().Infof("Stopping source writes") + if err := sw.stopSourceWrites(ctx); err != nil { + sw.cancelMigration(ctx, sm) + return handleError(fmt.Sprintf("failed to stop writes in the %s keyspace", ts.SourceKeyspaceName()), err) + } + + if ts.MigrationType() == binlogdatapb.MigrationType_TABLES { + ts.Logger().Infof("Executing LOCK TABLES on source tables %d times", lockTablesCycles) + // Doing this twice with a pause in-between to catch any writes that may have raced in between + // the tablet's deny list check and the first mysqld side table lock. + for cnt := 1; cnt <= lockTablesCycles; cnt++ { + if err := ts.executeLockTablesOnSource(ctx); err != nil { + sw.cancelMigration(ctx, sm) + return handleError(fmt.Sprintf("failed to execute LOCK TABLES (attempt %d of %d) on sources", cnt, lockTablesCycles), err) + } + // No need to UNLOCK the tables as the connection was closed once the locks were acquired + // and thus the locks released. + time.Sleep(lockTablesCycleDelay) + } + } + + ts.Logger().Infof("Waiting for streams to catchup") + if err := sw.waitForCatchup(ctx, timeout); err != nil { + sw.cancelMigration(ctx, sm) + return handleError("failed to sync up replication between the source and target", err) + } + + ts.Logger().Infof("Migrating streams") + if err := sw.migrateStreams(ctx, sm); err != nil { + sw.cancelMigration(ctx, sm) + return handleError("failed to migrate the workflow streams", err) + } + + ts.Logger().Infof("Resetting sequences") + if err := sw.resetSequences(ctx); err != nil { + sw.cancelMigration(ctx, sm) + return handleError("failed to reset the sequences", err) + } + + ts.Logger().Infof("Creating reverse streams") + if err := sw.createReverseVReplication(ctx); err != nil { + sw.cancelMigration(ctx, sm) + return handleError("failed to create the reverse vreplication streams", err) + } + } else { + if cancel { + return handleError("invalid cancel", vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "traffic switching has reached the point of no return, cannot cancel")) + } + ts.Logger().Infof("Journals were found. Completing the left over steps.") + // Need to gather positions in case all journals were not created. + if err := ts.gatherPositions(ctx); err != nil { + return handleError("failed to gather replication positions", err) + } + } + + // This is the point of no return. Once a journal is created, + // traffic can be redirected to target shards. + if err := sw.createJournals(ctx, sourceWorkflows); err != nil { + return handleError("failed to create the journal", err) + } + // Initialize any target sequences, if there are any, before allowing new writes. + if req.InitializeTargetSequences && len(sequenceMetadata) > 0 { + // Writes are blocked so we can safely initialize the sequence tables but + // we also want to use a shorter timeout than the parent context. + // We use up at most half of the overall timeout. + initSeqCtx, cancel := context.WithTimeout(ctx, timeout/2) + defer cancel() + if err := sw.initializeTargetSequences(initSeqCtx, sequenceMetadata); err != nil { + return handleError(fmt.Sprintf("failed to initialize the sequences used in the %s keyspace", ts.TargetKeyspaceName()), err) + } + } + if err := sw.allowTargetWrites(ctx); err != nil { + return handleError(fmt.Sprintf("failed to allow writes in the %s keyspace", ts.TargetKeyspaceName()), err) + } + if err := sw.changeRouting(ctx); err != nil { + return handleError("failed to update the routing rules", err) + } + if err := sw.streamMigraterfinalize(ctx, ts, sourceWorkflows); err != nil { + return handleError("failed to finalize the traffic switch", err) + } + if req.EnableReverseReplication { + if err := sw.startReverseVReplication(ctx); err != nil { + return handleError("failed to start the reverse workflow", err) + } + } + + if err := sw.freezeTargetVReplication(ctx); err != nil { + return handleError(fmt.Sprintf("failed to freeze the workflow in the %s keyspace", ts.TargetKeyspaceName()), err) + } + + return ts.id, sw.logs(), nil +} + +func (s *Server) canSwitch(ctx context.Context, ts *trafficSwitcher, state *State, direction TrafficSwitchDirection, maxAllowedReplLagSecs int64) (reason string, err error) { + if direction == DirectionForward && state.WritesSwitched || + direction == DirectionBackward && !state.WritesSwitched { + log.Infof("writes already switched no need to check lag") + return "", nil + } + wf, err := s.GetWorkflow(ctx, state.TargetKeyspace, state.Workflow, false) + if err != nil { + return "", err + } + for _, stream := range wf.ShardStreams { + for _, st := range stream.GetStreams() { + if st.Message == Frozen { + return cannotSwitchFrozen, nil + } + // If no new events have been replicated after the copy phase then it will be 0. + if vreplLag := time.Now().Unix() - st.TimeUpdated.Seconds; vreplLag > maxAllowedReplLagSecs { + return fmt.Sprintf(cannotSwitchHighLag, vreplLag, maxAllowedReplLagSecs), nil + } + switch st.State { + case binlogdatapb.VReplicationWorkflowState_Copying.String(): + return cannotSwitchCopyIncomplete, nil + case binlogdatapb.VReplicationWorkflowState_Error.String(): + return cannotSwitchError, nil + } + } + } + + // Ensure that the tablets on both sides are in good shape as we make this same call in the + // process and an error will cause us to backout. + refreshErrors := strings.Builder{} + var m sync.Mutex + var wg sync.WaitGroup + rtbsCtx, cancel := context.WithTimeout(ctx, shardTabletRefreshTimeout) + defer cancel() + refreshTablets := func(shards []*topo.ShardInfo, stype string) { + defer wg.Done() + for _, si := range shards { + if partial, partialDetails, err := topotools.RefreshTabletsByShard(rtbsCtx, s.ts, s.tmc, si, nil, ts.Logger()); err != nil || partial { + m.Lock() + refreshErrors.WriteString(fmt.Sprintf("failed to successfully refresh all tablets in the %s/%s %s shard (%v):\n %v\n", + si.Keyspace(), si.ShardName(), stype, err, partialDetails)) + m.Unlock() + } + } + } + wg.Add(1) + go refreshTablets(ts.SourceShards(), "source") + wg.Add(1) + go refreshTablets(ts.TargetShards(), "target") + wg.Wait() + if refreshErrors.Len() > 0 { + return fmt.Sprintf(cannotSwitchFailedTabletRefresh, refreshErrors.String()), nil + } + return "", nil +} + +// VReplicationExec executes a query remotely using the DBA pool. +func (s *Server) VReplicationExec(ctx context.Context, tabletAlias *topodatapb.TabletAlias, query string) (*querypb.QueryResult, error) { + ti, err := s.ts.GetTablet(ctx, tabletAlias) + if err != nil { + return nil, err + } + return s.tmc.VReplicationExec(ctx, ti.Tablet, query) +} + +// CopySchemaShard copies the schema from a source tablet to the +// specified shard. The schema is applied directly on the primary of +// the destination shard, and is propagated to the replicas through +// binlogs. +func (s *Server) CopySchemaShard(ctx context.Context, sourceTabletAlias *topodatapb.TabletAlias, tables, excludeTables []string, includeViews bool, destKeyspace, destShard string, waitReplicasTimeout time.Duration, skipVerify bool) error { + destShardInfo, err := s.ts.GetShard(ctx, destKeyspace, destShard) + if err != nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "GetShard(%v, %v) failed: %v", destKeyspace, destShard, err) + } + + if destShardInfo.PrimaryAlias == nil { + return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "no primary in shard record %v/%v. Consider running 'vtctl InitShardPrimary' in case of a new shard or reparenting the shard to fix the topology data", destKeyspace, destShard) + } + + diffs, err := schematools.CompareSchemas(ctx, s.ts, s.tmc, sourceTabletAlias, destShardInfo.PrimaryAlias, tables, excludeTables, includeViews) + if err != nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "CopySchemaShard failed because schemas could not be compared initially: %v", err) + } + if diffs == nil { + // Return early because dest has already the same schema as source. + return nil + } + + req := &tabletmanagerdatapb.GetSchemaRequest{Tables: tables, ExcludeTables: excludeTables, IncludeViews: includeViews} + sourceSd, err := schematools.GetSchema(ctx, s.ts, s.tmc, sourceTabletAlias, req) + if err != nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "GetSchema(%v, %v, %v, %v) failed: %v", sourceTabletAlias, tables, excludeTables, includeViews, err) + } + + createSQLstmts := tmutils.SchemaDefinitionToSQLStrings(sourceSd) + + destTabletInfo, err := s.ts.GetTablet(ctx, destShardInfo.PrimaryAlias) + if err != nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "GetTablet(%v) failed: %v", destShardInfo.PrimaryAlias, err) + } + for _, createSQL := range createSQLstmts { + err = s.applySQLShard(ctx, destTabletInfo, createSQL) + if err != nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "creating a table failed."+ + " Most likely some tables already exist on the destination and differ from the source."+ + " Please remove all to be copied tables from the destination manually and run this command again."+ + " Full error: %v", err) + } + } + + // Remember the replication position after all the above were applied. + destPrimaryPos, err := s.tmc.PrimaryPosition(ctx, destTabletInfo.Tablet) + if err != nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "CopySchemaShard: can't get replication position after schema applied: %v", err) + } + + // Although the copy was successful, we have to verify it to catch the case + // where the database already existed on the destination, but with different + // options e.g. a different character set. + // In that case, MySQL would have skipped our CREATE DATABASE IF NOT EXISTS + // statement. + if !skipVerify { + diffs, err = schematools.CompareSchemas(ctx, s.ts, s.tmc, sourceTabletAlias, destShardInfo.PrimaryAlias, tables, excludeTables, includeViews) + if err != nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "CopySchemaShard failed because schemas could not be compared finally: %v", err) + } + if diffs != nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "CopySchemaShard was not successful because the schemas between the two tablets %v and %v differ: %v", sourceTabletAlias, destShardInfo.PrimaryAlias, diffs) + } + } + + // Notify Replicas to reload schema. This is best-effort. + reloadCtx, cancel := context.WithTimeout(ctx, waitReplicasTimeout) + defer cancel() + _, ok := schematools.ReloadShard(reloadCtx, s.ts, s.tmc, logutil.NewMemoryLogger(), destKeyspace, destShard, destPrimaryPos, nil, true) + if !ok { + log.Error(vterrors.Errorf(vtrpcpb.Code_INTERNAL, "CopySchemaShard: failed to reload schema on all replicas")) + } + + return err +} + +// applySQLShard applies a given SQL change on a given tablet alias. It allows executing arbitrary +// SQL statements, but doesn't return any results, so it's only useful for SQL statements +// that would be run for their effects (e.g., CREATE). +// It works by applying the SQL statement on the shard's primary tablet with replication turned on. +// Thus it should be used only for changes that can be applied on a live instance without causing issues; +// it shouldn't be used for anything that will require a pivot. +// The SQL statement string is expected to have {{.DatabaseName}} in place of the actual db name. +func (s *Server) applySQLShard(ctx context.Context, tabletInfo *topo.TabletInfo, change string) error { + filledChange, err := fillStringTemplate(change, map[string]string{"DatabaseName": tabletInfo.DbName()}) + if err != nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "fillStringTemplate failed: %v", err) + } + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + // Need to make sure that replication is enabled since we're only applying + // the statement on primaries. + _, err = s.tmc.ApplySchema(ctx, tabletInfo.Tablet, &tmutils.SchemaChange{ + SQL: filledChange, + Force: false, + AllowReplication: true, + SQLMode: vreplication.SQLMode, + }) + return err +} + +// fillStringTemplate returns the string template filled. +func fillStringTemplate(tmpl string, vars any) (string, error) { + myTemplate := template.Must(template.New("").Parse(tmpl)) + data := new(bytes.Buffer) + if err := myTemplate.Execute(data, vars); err != nil { + return "", err + } + return data.String(), nil +} + +// prepareCreateLookup performs the preparatory steps for creating a +// Lookup Vindex. +func (s *Server) prepareCreateLookup(ctx context.Context, workflow, keyspace string, specs *vschemapb.Keyspace, continueAfterCopyWithOwner bool) (ms *vtctldatapb.MaterializeSettings, sourceVSchema, targetVSchema *vschemapb.Keyspace, err error) { + // Important variables are pulled out here. + var ( + vindexName string + vindex *vschemapb.Vindex + targetKeyspace string + targetTableName string + vindexFromCols []string + vindexToCol string + vindexIgnoreNulls bool + + sourceTableName string + // sourceTable is the supplied table info. + sourceTable *vschemapb.Table + // sourceVSchemaTable is the table info present in the vschema. + sourceVSchemaTable *vschemapb.Table + // sourceVindexColumns are computed from the input sourceTable. + sourceVindexColumns []string + + // Target table info. + createDDL string + materializeQuery string + ) + + // Validate input vindex. + if specs == nil { + return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "no vindex provided") + } + if len(specs.Vindexes) != 1 { + return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "only one vindex must be specified") + } + vindexName = maps.Keys(specs.Vindexes)[0] + vindex = maps.Values(specs.Vindexes)[0] + if !strings.Contains(vindex.Type, "lookup") { + return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "vindex %s is not a lookup type", vindex.Type) + } + targetKeyspace, targetTableName, err = sqlparser.ParseTable(vindex.Params["table"]) + if err != nil || targetKeyspace == "" { + return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "vindex table name (%s) must be in the form .
", vindex.Params["table"]) + } + vindexFromCols = strings.Split(vindex.Params["from"], ",") + if strings.Contains(vindex.Type, "unique") { + if len(vindexFromCols) != 1 { + return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unique vindex 'from' should have only one column") + } + } else { + if len(vindexFromCols) < 2 { + return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "non-unique vindex 'from' should have more than one column") + } + } + vindexToCol = vindex.Params["to"] + // Make the vindex write_only. If one exists already in the vschema, + // it will need to match this vindex exactly, including the write_only setting. + vindex.Params["write_only"] = "true" + // See if we can create the vindex without errors. + if _, err := vindexes.CreateVindex(vindex.Type, vindexName, vindex.Params); err != nil { + return nil, nil, nil, err + } + if ignoreNullsStr, ok := vindex.Params["ignore_nulls"]; ok { + // This mirrors the behavior of vindexes.boolFromMap(). + switch ignoreNullsStr { + case "true": + vindexIgnoreNulls = true + case "false": + vindexIgnoreNulls = false + default: + return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "ignore_nulls (%s) value must be 'true' or 'false'", + ignoreNullsStr) + } + } + + // Validate input table. + if len(specs.Tables) < 1 || len(specs.Tables) > 2 { + return nil, nil, nil, fmt.Errorf("one or two tables must be specified") + } + // Loop executes once or twice. + for tableName, table := range specs.Tables { + if len(table.ColumnVindexes) != 1 { + return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "exactly one ColumnVindex must be specified for the %s table", tableName) + } + if tableName != targetTableName { // This is the source table. + sourceTableName = tableName + sourceTable = table + continue + } + // This is a primary vindex definition for the target table + // which allows you to override the vindex type used. + var vindexCols []string + if len(table.ColumnVindexes[0].Columns) != 0 { + vindexCols = table.ColumnVindexes[0].Columns + } else { + if table.ColumnVindexes[0].Column == "" { + return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "at least one column must be specified in ColumnVindexes for the %s table", tableName) + } + vindexCols = []string{table.ColumnVindexes[0].Column} + } + if !slices.Equal(vindexCols, vindexFromCols) { + return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "columns in the lookup table %s primary vindex (%s) don't match the 'from' columns specified (%s)", + tableName, strings.Join(vindexCols, ","), strings.Join(vindexFromCols, ",")) + } + } + + // Validate input table and vindex consistency. + if sourceTable == nil || len(sourceTable.ColumnVindexes) != 1 { + return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "No ColumnVindex found for the owner table in the %s keyspace", keyspace) + } + if sourceTable.ColumnVindexes[0].Name != vindexName { + return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "ColumnVindex name (%s) must match vindex name (%s)", sourceTable.ColumnVindexes[0].Name, vindexName) + } + if vindex.Owner != "" && vindex.Owner != sourceTableName { + return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "vindex owner (%s) must match table name (%s)", vindex.Owner, sourceTableName) + } + if len(sourceTable.ColumnVindexes[0].Columns) != 0 { + sourceVindexColumns = sourceTable.ColumnVindexes[0].Columns + } else { + if sourceTable.ColumnVindexes[0].Column == "" { + return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "at least one column must be specified in ColumnVindexes for the %s table", sourceTableName) + } + sourceVindexColumns = []string{sourceTable.ColumnVindexes[0].Column} + } + if len(sourceVindexColumns) != len(vindexFromCols) { + return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "length of table columns (%d) differs from length of vindex columns (%d)", len(sourceVindexColumns), len(vindexFromCols)) + } + + // Validate against source vschema. + sourceVSchema, err = s.ts.GetVSchema(ctx, keyspace) + if err != nil { + return nil, nil, nil, err + } + if sourceVSchema.Vindexes == nil { + sourceVSchema.Vindexes = make(map[string]*vschemapb.Vindex) + } + // If source and target keyspaces are the same, make vschemas point + // to the same object. + if keyspace == targetKeyspace { + targetVSchema = sourceVSchema + } else { + targetVSchema, err = s.ts.GetVSchema(ctx, targetKeyspace) + if err != nil { + return nil, nil, nil, err + } + } + if targetVSchema.Vindexes == nil { + targetVSchema.Vindexes = make(map[string]*vschemapb.Vindex) + } + if targetVSchema.Tables == nil { + targetVSchema.Tables = make(map[string]*vschemapb.Table) + } + if existing, ok := sourceVSchema.Vindexes[vindexName]; ok { + if !proto.Equal(existing, vindex) { + return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "a conflicting vindex named %s already exists in the %s keyspace", vindexName, keyspace) + } + } + sourceVSchemaTable = sourceVSchema.Tables[sourceTableName] + if sourceVSchemaTable == nil && !schema.IsInternalOperationTableName(sourceTableName) { + return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "table %s not found in the %s keyspace", sourceTableName, keyspace) + } + for _, colVindex := range sourceVSchemaTable.ColumnVindexes { + // For a conflict, the vindex name and column should match. + if colVindex.Name != vindexName { + continue + } + colName := colVindex.Column + if len(colVindex.Columns) != 0 { + colName = colVindex.Columns[0] + } + if colName == sourceVindexColumns[0] { + return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "a conflicting ColumnVindex on column %s in table %s already exists in the %s keyspace", + colName, sourceTableName, keyspace) + } + } + + // Validate against source schema. + sourceShards, err := s.ts.GetServingShards(ctx, keyspace) + if err != nil { + return nil, nil, nil, err + } + onesource := sourceShards[0] + if onesource.PrimaryAlias == nil { + return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "source shard %s has no primary", onesource.ShardName()) + } + req := &tabletmanagerdatapb.GetSchemaRequest{Tables: []string{sourceTableName}} + tableSchema, err := schematools.GetSchema(ctx, s.ts, s.tmc, onesource.PrimaryAlias, req) + if err != nil { + return nil, nil, nil, err + } + if len(tableSchema.TableDefinitions) != 1 { + return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected number of tables (%d) returned from %s schema", len(tableSchema.TableDefinitions), keyspace) + } + + // Generate "create table" statement. + lines := strings.Split(tableSchema.TableDefinitions[0].Schema, "\n") + if len(lines) < 3 { + // Should never happen. + return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "schema looks incorrect: %s, expecting at least four lines", tableSchema.TableDefinitions[0].Schema) + } + var modified []string + modified = append(modified, strings.Replace(lines[0], sourceTableName, targetTableName, 1)) + for i := range sourceVindexColumns { + line, err := generateColDef(lines, sourceVindexColumns[i], vindexFromCols[i]) + if err != nil { + return nil, nil, nil, err + } + modified = append(modified, line) + } + + if vindex.Params["data_type"] == "" || strings.EqualFold(vindex.Type, "consistent_lookup_unique") || strings.EqualFold(vindex.Type, "consistent_lookup") { + modified = append(modified, fmt.Sprintf(" %s varbinary(128),", sqlescape.EscapeID(vindexToCol))) + } else { + modified = append(modified, fmt.Sprintf(" %s %s,", sqlescape.EscapeID(vindexToCol), sqlescape.EscapeID(vindex.Params["data_type"]))) + } + buf := sqlparser.NewTrackedBuffer(nil) + fmt.Fprintf(buf, " PRIMARY KEY (") + prefix := "" + for _, col := range vindexFromCols { + fmt.Fprintf(buf, "%s%s", prefix, sqlescape.EscapeID(col)) + prefix = ", " + } + fmt.Fprintf(buf, ")") + modified = append(modified, buf.String()) + modified = append(modified, ")") + createDDL = strings.Join(modified, "\n") + + // Generate vreplication query. + buf = sqlparser.NewTrackedBuffer(nil) + buf.Myprintf("select ") + for i := range vindexFromCols { + buf.Myprintf("%s as %s, ", sqlparser.String(sqlparser.NewIdentifierCI(sourceVindexColumns[i])), sqlparser.String(sqlparser.NewIdentifierCI(vindexFromCols[i]))) + } + if strings.EqualFold(vindexToCol, "keyspace_id") || strings.EqualFold(vindex.Type, "consistent_lookup_unique") || strings.EqualFold(vindex.Type, "consistent_lookup") { + buf.Myprintf("keyspace_id() as %s ", sqlparser.String(sqlparser.NewIdentifierCI(vindexToCol))) + } else { + buf.Myprintf("%s as %s ", sqlparser.String(sqlparser.NewIdentifierCI(vindexToCol)), sqlparser.String(sqlparser.NewIdentifierCI(vindexToCol))) + } + buf.Myprintf("from %s", sqlparser.String(sqlparser.NewIdentifierCS(sourceTableName))) + if vindexIgnoreNulls { + buf.Myprintf(" where ") + lastValIdx := len(vindexFromCols) - 1 + for i := range vindexFromCols { + buf.Myprintf("%s is not null", sqlparser.String(sqlparser.NewIdentifierCI(vindexFromCols[i]))) + if i != lastValIdx { + buf.Myprintf(" and ") + } + } + } + if vindex.Owner != "" { + // Only backfill. + buf.Myprintf(" group by ") + for i := range vindexFromCols { + buf.Myprintf("%s, ", sqlparser.String(sqlparser.NewIdentifierCI(vindexFromCols[i]))) + } + buf.Myprintf("%s", sqlparser.String(sqlparser.NewIdentifierCI(vindexToCol))) + } + materializeQuery = buf.String() + + // Update targetVSchema. + targetTable := specs.Tables[targetTableName] + if targetVSchema.Sharded { + // Choose a primary vindex type for the lookup table based on the source + // definition if one was not explicitly specified. + var targetVindexType string + var targetVindex *vschemapb.Vindex + for _, field := range tableSchema.TableDefinitions[0].Fields { + if sourceVindexColumns[0] == field.Name { + if targetTable != nil && len(targetTable.ColumnVindexes) > 0 { + targetVindexType = targetTable.ColumnVindexes[0].Name + } + if targetVindexType == "" { + targetVindexType, err = vindexes.ChooseVindexForType(field.Type) + if err != nil { + return nil, nil, nil, err + } + } + targetVindex = &vschemapb.Vindex{ + Type: targetVindexType, + } + break + } + } + if targetVindex == nil { + // Unreachable. We validated column names when generating the DDL. + return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "column %s not found in target schema %s", sourceVindexColumns[0], tableSchema.TableDefinitions[0].Schema) + } + if existing, ok := targetVSchema.Vindexes[targetVindexType]; ok { + if !proto.Equal(existing, targetVindex) { + return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "a conflicting vindex named %v already exists in the %s keyspace", targetVindexType, targetKeyspace) + } + } else { + targetVSchema.Vindexes[targetVindexType] = targetVindex + } + + targetTable = &vschemapb.Table{ + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: vindexFromCols[0], + Name: targetVindexType, + }}, + } + } else { + targetTable = &vschemapb.Table{} + } + if existing, ok := targetVSchema.Tables[targetTableName]; ok { + if !proto.Equal(existing, targetTable) { + return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "a conflicting table named %s already exists in the %s vschema", targetTableName, targetKeyspace) + } + } else { + targetVSchema.Tables[targetTableName] = targetTable + } + + ms = &vtctldatapb.MaterializeSettings{ + Workflow: workflow, + MaterializationIntent: vtctldatapb.MaterializationIntent_CREATELOOKUPINDEX, + SourceKeyspace: keyspace, + TargetKeyspace: targetKeyspace, + StopAfterCopy: vindex.Owner != "" && !continueAfterCopyWithOwner, + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: targetTableName, + SourceExpression: materializeQuery, + CreateDdl: createDDL, + }}, + } + + // Update sourceVSchema + sourceVSchema.Vindexes[vindexName] = vindex + sourceVSchemaTable.ColumnVindexes = append(sourceVSchemaTable.ColumnVindexes, sourceTable.ColumnVindexes[0]) + + return ms, sourceVSchema, targetVSchema, nil +} + +func generateColDef(lines []string, sourceVindexCol, vindexFromCol string) (string, error) { + source := sqlescape.EscapeID(sourceVindexCol) + target := sqlescape.EscapeID(vindexFromCol) + + for _, line := range lines[1:] { + if strings.Contains(line, source) { + line = strings.Replace(line, source, target, 1) + line = strings.Replace(line, " AUTO_INCREMENT", "", 1) + line = strings.Replace(line, " DEFAULT NULL", "", 1) + return line, nil + } + } + return "", fmt.Errorf("column %s not found in schema %v", sourceVindexCol, lines) +} + +func (s *Server) MigrateCreate(ctx context.Context, req *vtctldatapb.MigrateCreateRequest) (*vtctldatapb.WorkflowStatusResponse, error) { + moveTablesCreateRequest := &vtctldatapb.MoveTablesCreateRequest{ + Workflow: req.Workflow, + SourceKeyspace: req.SourceKeyspace, + TargetKeyspace: req.TargetKeyspace, + ExternalClusterName: req.MountName, + Cells: req.Cells, + TabletTypes: req.TabletTypes, + TabletSelectionPreference: req.TabletSelectionPreference, + AllTables: req.AllTables, + IncludeTables: req.IncludeTables, + ExcludeTables: req.ExcludeTables, + SourceTimeZone: req.SourceTimeZone, + OnDdl: req.OnDdl, + StopAfterCopy: req.StopAfterCopy, + DeferSecondaryKeys: req.DeferSecondaryKeys, + DropForeignKeys: req.DropForeignKeys, + AutoStart: req.AutoStart, + NoRoutingRules: req.NoRoutingRules, + } + return s.moveTablesCreate(ctx, moveTablesCreateRequest, binlogdatapb.VReplicationWorkflowType_Migrate) } diff --git a/go/vt/vtctl/workflow/state.go b/go/vt/vtctl/workflow/state.go index 613f82d0b43..927f5a9db56 100644 --- a/go/vt/vtctl/workflow/state.go +++ b/go/vt/vtctl/workflow/state.go @@ -16,15 +16,45 @@ limitations under the License. package workflow -// Type is the type of a workflow. +import ( + "fmt" + "strings" +) + +// VReplicationWorkflowType specifies whether workflow is +// MoveTables or Reshard and maps directly to what is stored +// in the backend database. +type VReplicationWorkflowType int + +// VReplicationWorkflowType enums. +const ( + MoveTablesWorkflow = VReplicationWorkflowType(iota) + ReshardWorkflow + MigrateWorkflow +) + +// Type is the type of a workflow as a string and maps directly +// to what is provided and presented to the user. type Type string -// Workflow types. +// Workflow string types. const ( - TypeReshard Type = "Reshard" TypeMoveTables Type = "MoveTables" + TypeReshard Type = "Reshard" + TypeMigrate Type = "Migrate" ) +var TypeStrMap = map[VReplicationWorkflowType]Type{ + MoveTablesWorkflow: TypeMoveTables, + ReshardWorkflow: TypeReshard, + MigrateWorkflow: TypeMigrate, +} +var TypeIntMap = map[Type]VReplicationWorkflowType{ + TypeMoveTables: MoveTablesWorkflow, + TypeReshard: ReshardWorkflow, + TypeMigrate: MigrateWorkflow, +} + // State represents the state of a workflow. type State struct { Workflow string @@ -45,3 +75,52 @@ type State struct { ShardsAlreadySwitched []string ShardsNotYetSwitched []string } + +func (s *State) String() string { + var stateInfo []string + if !s.IsPartialMigration { // shard level traffic switching is all or nothing + if len(s.RdonlyCellsNotSwitched) == 0 && len(s.ReplicaCellsNotSwitched) == 0 && len(s.ReplicaCellsSwitched) > 0 { + stateInfo = append(stateInfo, "All Reads Switched") + } else if len(s.RdonlyCellsSwitched) == 0 && len(s.ReplicaCellsSwitched) == 0 { + stateInfo = append(stateInfo, "Reads Not Switched") + } else { + stateInfo = append(stateInfo, "Reads partially switched") + if len(s.ReplicaCellsNotSwitched) == 0 { + stateInfo = append(stateInfo, "All Replica Reads Switched") + } else if len(s.ReplicaCellsSwitched) == 0 { + stateInfo = append(stateInfo, "Replica not switched") + } else { + stateInfo = append(stateInfo, "Replica switched in cells: "+strings.Join(s.ReplicaCellsSwitched, ",")) + } + if len(s.RdonlyCellsNotSwitched) == 0 { + stateInfo = append(stateInfo, "All Rdonly Reads Switched") + } else if len(s.RdonlyCellsSwitched) == 0 { + stateInfo = append(stateInfo, "Rdonly not switched") + } else { + stateInfo = append(stateInfo, "Rdonly switched in cells: "+strings.Join(s.RdonlyCellsSwitched, ",")) + } + } + } + if s.WritesSwitched { + stateInfo = append(stateInfo, "Writes Switched") + } else if s.IsPartialMigration { + // For partial migrations, the traffic switching is all or nothing + // at the shard level, so reads are effectively switched on the + // shard when writes are switched. + if len(s.ShardsAlreadySwitched) > 0 && len(s.ShardsNotYetSwitched) > 0 { + stateInfo = append(stateInfo, fmt.Sprintf("Reads partially switched, for shards: %s", strings.Join(s.ShardsAlreadySwitched, ","))) + stateInfo = append(stateInfo, fmt.Sprintf("Writes partially switched, for shards: %s", strings.Join(s.ShardsAlreadySwitched, ","))) + } else { + if len(s.ShardsAlreadySwitched) == 0 { + stateInfo = append(stateInfo, "Reads Not Switched") + stateInfo = append(stateInfo, "Writes Not Switched") + } else { + stateInfo = append(stateInfo, "All Reads Switched") + stateInfo = append(stateInfo, "All Writes Switched") + } + } + } else { + stateInfo = append(stateInfo, "Writes Not Switched") + } + return strings.Join(stateInfo, ". ") +} diff --git a/go/vt/vtctl/workflow/stream_migrator.go b/go/vt/vtctl/workflow/stream_migrator.go index 6d6929a9b4a..75d509614b7 100644 --- a/go/vt/vtctl/workflow/stream_migrator.go +++ b/go/vt/vtctl/workflow/stream_migrator.go @@ -25,9 +25,9 @@ import ( "google.golang.org/protobuf/encoding/prototext" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/logutil" @@ -262,7 +262,7 @@ func (sm *StreamMigrator) readTabletStreams(ctx context.Context, ti *topo.Tablet continue } - pos, err := mysql.DecodePosition(row["pos"].ToString()) + pos, err := replication.DecodePosition(row["pos"].ToString()) if err != nil { return nil, err } @@ -426,8 +426,8 @@ func (sm *StreamMigrator) stopSourceStreams(ctx context.Context) error { return nil } -func (sm *StreamMigrator) syncSourceStreams(ctx context.Context) (map[string]mysql.Position, error) { - stopPositions := make(map[string]mysql.Position) +func (sm *StreamMigrator) syncSourceStreams(ctx context.Context) (map[string]replication.Position, error) { + stopPositions := make(map[string]replication.Position) for _, tabletStreams := range sm.streams { for _, vrs := range tabletStreams { @@ -455,7 +455,7 @@ func (sm *StreamMigrator) syncSourceStreams(ctx context.Context) (map[string]mys } wg.Add(1) - go func(vrs *VReplicationStream, shard string, pos mysql.Position) { + go func(vrs *VReplicationStream, shard string, pos replication.Position) { defer wg.Done() sm.ts.Logger().Infof("syncSourceStreams beginning of go func %s %s %+v %d", shard, vrs.BinlogSource.Shard, pos, vrs.ID) @@ -471,14 +471,14 @@ func (sm *StreamMigrator) syncSourceStreams(ctx context.Context) (map[string]mys return } - query := fmt.Sprintf("update _vt.vreplication set state='Running', stop_pos='%s', message='synchronizing for cutover' where id=%d", mysql.EncodePosition(pos), vrs.ID) + query := fmt.Sprintf("update _vt.vreplication set state='Running', stop_pos='%s', message='synchronizing for cutover' where id=%d", replication.EncodePosition(pos), vrs.ID) if _, err := sm.ts.TabletManagerClient().VReplicationExec(ctx, primary.Tablet, query); err != nil { allErrors.RecordError(err) return } sm.ts.Logger().Infof("Waiting for keyspace:shard: %v:%v, position %v", sm.ts.SourceKeyspaceName(), shard, pos) - if err := sm.ts.TabletManagerClient().VReplicationWaitForPos(ctx, primary.Tablet, vrs.ID, mysql.EncodePosition(pos)); err != nil { + if err := sm.ts.TabletManagerClient().VReplicationWaitForPos(ctx, primary.Tablet, vrs.ID, replication.EncodePosition(pos)); err != nil { allErrors.RecordError(err) return } @@ -493,7 +493,7 @@ func (sm *StreamMigrator) syncSourceStreams(ctx context.Context) (map[string]mys return stopPositions, allErrors.AggrError(vterrors.Aggregate) } -func (sm *StreamMigrator) verifyStreamPositions(ctx context.Context, stopPositions map[string]mysql.Position) ([]string, error) { +func (sm *StreamMigrator) verifyStreamPositions(ctx context.Context, stopPositions map[string]replication.Position) ([]string, error) { var ( mu sync.Mutex stoppedStreams = make(map[string][]*VReplicationStream) @@ -538,7 +538,7 @@ func (sm *StreamMigrator) verifyStreamPositions(ctx context.Context, stopPositio for _, vrs := range tabletStreams { key := fmt.Sprintf("%s:%s", vrs.BinlogSource.Keyspace, vrs.BinlogSource.Shard) if pos := stopPositions[key]; !vrs.Position.Equal(pos) { - allErrors.RecordError(fmt.Errorf("%s: stream %d position: %s does not match %s", key, vrs.ID, mysql.EncodePosition(vrs.Position), mysql.EncodePosition(pos))) + allErrors.RecordError(fmt.Errorf("%s: stream %d position: %s does not match %s", key, vrs.ID, replication.EncodePosition(vrs.Position), replication.EncodePosition(pos))) } } } @@ -564,7 +564,7 @@ func (sm *StreamMigrator) createTargetStreams(ctx context.Context, tmpl []*VRepl } return sm.ts.ForAllTargets(func(target *MigrationTarget) error { - ig := vreplication.NewInsertGenerator(binlogplayer.BlpStopped, target.GetPrimary().DbName()) + ig := vreplication.NewInsertGenerator(binlogdatapb.VReplicationWorkflowState_Stopped, target.GetPrimary().DbName()) tabletStreams := VReplicationStreams(tmpl).Copy().ToSlice() for _, vrs := range tabletStreams { @@ -579,7 +579,7 @@ func (sm *StreamMigrator) createTargetStreams(ctx context.Context, tmpl []*VRepl rule.Filter = buf.String() } - ig.AddRow(vrs.Workflow, vrs.BinlogSource, mysql.EncodePosition(vrs.Position), "", "", + ig.AddRow(vrs.Workflow, vrs.BinlogSource, replication.EncodePosition(vrs.Position), "", "", vrs.WorkflowType, vrs.WorkflowSubType, vrs.DeferSecondaryKeys) } diff --git a/go/vt/vtctl/workflow/stream_migrator_test.go b/go/vt/vtctl/workflow/stream_migrator_test.go index 903e873a130..04f787eb4d4 100644 --- a/go/vt/vtctl/workflow/stream_migrator_test.go +++ b/go/vt/vtctl/workflow/stream_migrator_test.go @@ -24,7 +24,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/vt/proto/vschema" "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" @@ -282,20 +281,20 @@ func TestTemplatize(t *testing.T) { }} vs := &vschemapb.Keyspace{ Sharded: true, - Vindexes: map[string]*vschema.Vindex{ + Vindexes: map[string]*vschemapb.Vindex{ "thash": { Type: "hash", }, }, - Tables: map[string]*vschema.Table{ + Tables: map[string]*vschemapb.Table{ "t1": { - ColumnVindexes: []*vschema.ColumnVindex{{ + ColumnVindexes: []*vschemapb.ColumnVindex{{ Columns: []string{"c1"}, Name: "thash", }}, }, "t2": { - ColumnVindexes: []*vschema.ColumnVindex{{ + ColumnVindexes: []*vschemapb.ColumnVindex{{ Columns: []string{"c1"}, Name: "thash", }}, diff --git a/go/vt/vtctl/workflow/switcher.go b/go/vt/vtctl/workflow/switcher.go new file mode 100644 index 00000000000..0cbdce164dc --- /dev/null +++ b/go/vt/vtctl/workflow/switcher.go @@ -0,0 +1,151 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "context" + "time" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" +) + +var _ iswitcher = (*switcher)(nil) + +type switcher struct { + s *Server + ts *trafficSwitcher +} + +func (r *switcher) addParticipatingTablesToKeyspace(ctx context.Context, keyspace, tableSpecs string) error { + return r.ts.addParticipatingTablesToKeyspace(ctx, keyspace, tableSpecs) +} + +func (r *switcher) deleteRoutingRules(ctx context.Context) error { + return r.ts.deleteRoutingRules(ctx) +} + +func (r *switcher) deleteShardRoutingRules(ctx context.Context) error { + return r.ts.deleteShardRoutingRules(ctx) +} + +func (r *switcher) dropSourceDeniedTables(ctx context.Context) error { + return r.ts.dropSourceDeniedTables(ctx) +} + +func (r *switcher) dropTargetDeniedTables(ctx context.Context) error { + return r.ts.dropTargetDeniedTables(ctx) +} + +func (r *switcher) validateWorkflowHasCompleted(ctx context.Context) error { + return r.ts.validateWorkflowHasCompleted(ctx) +} + +func (r *switcher) removeSourceTables(ctx context.Context, removalType TableRemovalType) error { + return r.ts.removeSourceTables(ctx, removalType) +} + +func (r *switcher) dropSourceShards(ctx context.Context) error { + return r.ts.dropSourceShards(ctx) +} + +func (r *switcher) switchShardReads(ctx context.Context, cells []string, servedTypes []topodatapb.TabletType, direction TrafficSwitchDirection) error { + return r.ts.switchShardReads(ctx, cells, servedTypes, direction) +} + +func (r *switcher) switchTableReads(ctx context.Context, cells []string, servedTypes []topodatapb.TabletType, direction TrafficSwitchDirection) error { + return r.ts.switchTableReads(ctx, cells, servedTypes, direction) +} + +func (r *switcher) startReverseVReplication(ctx context.Context) error { + return r.ts.startReverseVReplication(ctx) +} + +func (r *switcher) createJournals(ctx context.Context, sourceWorkflows []string) error { + return r.ts.createJournals(ctx, sourceWorkflows) +} + +func (r *switcher) allowTargetWrites(ctx context.Context) error { + return r.ts.allowTargetWrites(ctx) +} + +func (r *switcher) changeRouting(ctx context.Context) error { + return r.ts.changeRouting(ctx) +} + +func (r *switcher) streamMigraterfinalize(ctx context.Context, ts *trafficSwitcher, workflows []string) error { + return StreamMigratorFinalize(ctx, ts, workflows) +} + +func (r *switcher) createReverseVReplication(ctx context.Context) error { + return r.ts.createReverseVReplication(ctx) +} + +func (r *switcher) migrateStreams(ctx context.Context, sm *StreamMigrator) error { + return sm.MigrateStreams(ctx) +} + +func (r *switcher) waitForCatchup(ctx context.Context, filteredReplicationWaitTime time.Duration) error { + return r.ts.waitForCatchup(ctx, filteredReplicationWaitTime) +} + +func (r *switcher) stopSourceWrites(ctx context.Context) error { + return r.ts.stopSourceWrites(ctx) +} + +func (r *switcher) stopStreams(ctx context.Context, sm *StreamMigrator) ([]string, error) { + return sm.StopStreams(ctx) +} + +func (r *switcher) cancelMigration(ctx context.Context, sm *StreamMigrator) { + r.ts.cancelMigration(ctx, sm) +} + +func (r *switcher) lockKeyspace(ctx context.Context, keyspace, action string) (context.Context, func(*error), error) { + return r.s.ts.LockKeyspace(ctx, keyspace, action) +} + +func (r *switcher) freezeTargetVReplication(ctx context.Context) error { + return r.ts.freezeTargetVReplication(ctx) +} + +func (r *switcher) dropTargetVReplicationStreams(ctx context.Context) error { + return r.ts.dropTargetVReplicationStreams(ctx) +} + +func (r *switcher) dropSourceReverseVReplicationStreams(ctx context.Context) error { + return r.ts.dropSourceReverseVReplicationStreams(ctx) +} + +func (r *switcher) removeTargetTables(ctx context.Context) error { + return r.ts.removeTargetTables(ctx) +} + +func (r *switcher) dropTargetShards(ctx context.Context) error { + return r.ts.dropTargetShards(ctx) +} + +func (r *switcher) logs() *[]string { + return nil +} + +func (r *switcher) resetSequences(ctx context.Context) error { + return r.ts.resetSequences(ctx) +} + +func (r *switcher) initializeTargetSequences(ctx context.Context, sequencesByBackingTable map[string]*sequenceMetadata) error { + return r.ts.initializeTargetSequences(ctx, sequencesByBackingTable) +} diff --git a/go/vt/vtctl/workflow/switcher_dry_run.go b/go/vt/vtctl/workflow/switcher_dry_run.go new file mode 100644 index 00000000000..1c8a05e00c2 --- /dev/null +++ b/go/vt/vtctl/workflow/switcher_dry_run.go @@ -0,0 +1,388 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "context" + "fmt" + "slices" + "sort" + "strings" + "time" + + "vitess.io/vitess/go/maps2" + "vitess.io/vitess/go/mysql/replication" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" +) + +var _ iswitcher = (*switcherDryRun)(nil) + +type switcherDryRun struct { + drLog *LogRecorder + ts *trafficSwitcher +} + +func (dr *switcherDryRun) addParticipatingTablesToKeyspace(ctx context.Context, keyspace, tableSpecs string) error { + dr.drLog.Log("All source tables will be added to the target keyspace vschema") + return nil +} + +func (dr *switcherDryRun) deleteRoutingRules(ctx context.Context) error { + dr.drLog.Log("Routing rules for participating tables will be deleted") + return nil +} + +func (dr *switcherDryRun) deleteShardRoutingRules(ctx context.Context) error { + if dr.ts.isPartialMigration { + dr.drLog.Log("Shard routing rules for participating shards will be deleted") + } + return nil +} + +func (dr *switcherDryRun) switchShardReads(ctx context.Context, cells []string, servedTypes []topodatapb.TabletType, direction TrafficSwitchDirection) error { + sourceShards := make([]string, 0) + targetShards := make([]string, 0) + for _, source := range dr.ts.Sources() { + sourceShards = append(sourceShards, source.GetShard().ShardName()) + } + for _, target := range dr.ts.Targets() { + targetShards = append(targetShards, target.GetShard().ShardName()) + } + sort.Strings(sourceShards) + sort.Strings(targetShards) + if direction == DirectionForward { + dr.drLog.Logf("Switch reads from keyspace %s to keyspace %s for shards [%s] to shards [%s]", + dr.ts.SourceKeyspaceName(), dr.ts.TargetKeyspaceName(), strings.Join(sourceShards, ","), strings.Join(targetShards, ",")) + } else { + dr.drLog.Logf("Switch reads from keyspace %s to keyspace %s for shards [%s] to shards [%s]", + dr.ts.TargetKeyspaceName(), dr.ts.SourceKeyspaceName(), strings.Join(targetShards, ","), strings.Join(sourceShards, ",")) + } + return nil +} + +func (dr *switcherDryRun) switchTableReads(ctx context.Context, cells []string, servedTypes []topodatapb.TabletType, direction TrafficSwitchDirection) error { + ks := dr.ts.TargetKeyspaceName() + if direction == DirectionBackward { + ks = dr.ts.SourceKeyspaceName() + } + var tabletTypes []string + for _, servedType := range servedTypes { + tabletTypes = append(tabletTypes, servedType.String()) + } + tables := strings.Join(dr.ts.Tables(), ",") + dr.drLog.Logf("Switch reads for tables [%s] to keyspace %s for tablet types [%s]", tables, ks, strings.Join(tabletTypes, ",")) + dr.drLog.Logf("Routing rules for tables [%s] will be updated", tables) + return nil +} + +func (dr *switcherDryRun) createJournals(ctx context.Context, sourceWorkflows []string) error { + dr.drLog.Log("Create journal entries on source databases") + if len(sourceWorkflows) > 0 { + dr.drLog.Logf("Source workflows found: [%s]", strings.Join(sourceWorkflows, ",")) + } + return nil +} + +func (dr *switcherDryRun) allowTargetWrites(ctx context.Context) error { + dr.drLog.Logf("Enable writes on keyspace %s for tables [%s]", dr.ts.TargetKeyspaceName(), strings.Join(dr.ts.Tables(), ",")) + return nil +} + +func (dr *switcherDryRun) changeRouting(ctx context.Context) error { + dr.drLog.Logf("Switch routing from keyspace %s to keyspace %s", dr.ts.SourceKeyspaceName(), dr.ts.TargetKeyspaceName()) + var deleteLogs, addLogs []string + if dr.ts.MigrationType() == binlogdatapb.MigrationType_TABLES { + tables := strings.Join(dr.ts.Tables(), ",") + dr.drLog.Logf("Routing rules for tables [%s] will be updated", tables) + return nil + } + deleteLogs = nil + addLogs = nil + for _, source := range dr.ts.Sources() { + deleteLogs = append(deleteLogs, fmt.Sprintf("shard:%s;tablet:%d", source.GetShard().ShardName(), source.GetShard().PrimaryAlias.Uid)) + } + for _, target := range dr.ts.Targets() { + addLogs = append(addLogs, fmt.Sprintf("shard:%s;tablet:%d", target.GetShard().ShardName(), target.GetShard().PrimaryAlias.Uid)) + } + if len(deleteLogs) > 0 { + dr.drLog.Logf("IsPrimaryServing will be set to false for: [%s]", strings.Join(deleteLogs, ",")) + dr.drLog.Logf("IsPrimaryServing will be set to true for: [%s]", strings.Join(addLogs, ",")) + } + return nil +} + +func (dr *switcherDryRun) streamMigraterfinalize(ctx context.Context, ts *trafficSwitcher, workflows []string) error { + logs := make([]string, 0) + for _, t := range ts.Targets() { + logs = append(logs, fmt.Sprintf("tablet:%d", t.GetPrimary().Alias.Uid)) + } + dr.drLog.Logf("Switch writes completed, freeze and delete vreplication streams on: [%s]", strings.Join(logs, ",")) + return nil +} + +func (dr *switcherDryRun) startReverseVReplication(ctx context.Context) error { + logs := make([]string, 0) + for _, t := range dr.ts.Sources() { + logs = append(logs, fmt.Sprintf("tablet:%d", t.GetPrimary().Alias.Uid)) + } + dr.drLog.Logf("Start reverse vreplication streams on: [%s]", strings.Join(logs, ",")) + return nil +} + +func (dr *switcherDryRun) createReverseVReplication(ctx context.Context) error { + dr.drLog.Logf("Create reverse vreplication workflow %s", dr.ts.ReverseWorkflowName()) + return nil +} + +func (dr *switcherDryRun) migrateStreams(ctx context.Context, sm *StreamMigrator) error { + templates := sm.Templates() + + if len(templates) == 0 { + return nil + } + logs := make([]string, 0) + + dr.drLog.Logf("Migrate streams to %s:", dr.ts.TargetKeyspaceName()) + for key, streams := range sm.Streams() { + for _, stream := range streams { + logs = append(logs, fmt.Sprintf("shard:%s;id:%d;workflow:%s;position:%s;binlogsource:%v", key, stream.ID, stream.Workflow, replication.EncodePosition(stream.Position), stream.BinlogSource)) + } + } + if len(logs) > 0 { + dr.drLog.Logf("Migrate source streams: [%s]", strings.Join(logs, ",")) + logs = nil + } + for _, target := range dr.ts.Targets() { + tabletStreams := templates + for _, vrs := range tabletStreams { + logs = append(logs, fmt.Sprintf("keyspace:%s;shard:%s;tablet:%d;workflow:%s;id:%d,position:%v;binlogsource:%s", + vrs.BinlogSource.Keyspace, vrs.BinlogSource.Shard, target.GetPrimary().Alias.Uid, vrs.Workflow, vrs.ID, replication.EncodePosition(vrs.Position), vrs.BinlogSource)) + } + } + if len(logs) > 0 { + dr.drLog.Logf("Create target streams (as stopped): [%s]", strings.Join(logs, ",")) + } + return nil +} + +func (dr *switcherDryRun) waitForCatchup(ctx context.Context, filteredReplicationWaitTime time.Duration) error { + dr.drLog.Logf("Wait for vreplication on stopped streams to catchup for up to %v", filteredReplicationWaitTime) + return nil +} + +func (dr *switcherDryRun) stopSourceWrites(ctx context.Context) error { + logs := make([]string, 0) + for _, source := range dr.ts.Sources() { + position, _ := dr.ts.TabletManagerClient().PrimaryPosition(ctx, source.GetPrimary().Tablet) + logs = append(logs, fmt.Sprintf("keyspace:%s;shard:%s;position:%s", dr.ts.SourceKeyspaceName(), source.GetShard().ShardName(), position)) + } + if len(logs) > 0 { + dr.drLog.Logf("Stop writes on keyspace %s for tables [%s]: [%s]", dr.ts.SourceKeyspaceName(), + strings.Join(dr.ts.Tables(), ","), strings.Join(logs, ",")) + } + return nil +} + +func (dr *switcherDryRun) stopStreams(ctx context.Context, sm *StreamMigrator) ([]string, error) { + logs := make([]string, 0) + for _, streams := range sm.Streams() { + for _, stream := range streams { + logs = append(logs, fmt.Sprintf("id:%d;keyspace:%s;shard:%s;rules:%s;position:%v", + stream.ID, stream.BinlogSource.Keyspace, stream.BinlogSource.Shard, stream.BinlogSource.Filter, stream.Position)) + } + } + if len(logs) > 0 { + dr.drLog.Logf("Stop streams on keyspace %s: [%s]", dr.ts.SourceKeyspaceName(), strings.Join(logs, ",")) + } + return nil, nil +} + +func (dr *switcherDryRun) cancelMigration(ctx context.Context, sm *StreamMigrator) { + dr.drLog.Log("Cancel stream migrations as requested") +} + +func (dr *switcherDryRun) lockKeyspace(ctx context.Context, keyspace, _ string) (context.Context, func(*error), error) { + dr.drLog.Logf("Lock keyspace %s", keyspace) + return ctx, func(e *error) { + dr.drLog.Logf("Unlock keyspace %s", keyspace) + }, nil +} + +func (dr *switcherDryRun) removeSourceTables(ctx context.Context, removalType TableRemovalType) error { + logs := make([]string, 0) + for _, source := range dr.ts.Sources() { + for _, tableName := range dr.ts.Tables() { + logs = append(logs, fmt.Sprintf("keyspace:%s;shard:%s;dbname:%s;tablet:%d;table:%s", + source.GetPrimary().Keyspace, source.GetPrimary().Shard, source.GetPrimary().DbName(), source.GetPrimary().Alias.Uid, tableName)) + } + } + action := "Dropping" + if removalType == RenameTable { + action = "Renaming" + } + if len(logs) > 0 { + dr.drLog.Logf("%s these tables from the database and removing them from the vschema for keyspace %s: [%s]", + action, dr.ts.SourceKeyspaceName(), strings.Join(logs, ",")) + } + return nil +} + +func (dr *switcherDryRun) dropSourceShards(ctx context.Context) error { + logs := make([]string, 0) + tabletsList := make(map[string][]string) + for _, si := range dr.ts.SourceShards() { + tabletAliases, err := dr.ts.TopoServer().FindAllTabletAliasesInShard(ctx, si.Keyspace(), si.ShardName()) + if err != nil { + return err + } + tabletsList[si.ShardName()] = make([]string, 0) + for _, t := range tabletAliases { + tabletsList[si.ShardName()] = append(tabletsList[si.ShardName()], fmt.Sprintf("%d", t.Uid)) + } + sort.Strings(tabletsList[si.ShardName()]) + logs = append(logs, fmt.Sprintf("cell:%s;keyspace:%s;shards:[%s]", + si.Shard.PrimaryAlias.Cell, si.Keyspace(), si.ShardName()), strings.Join(tabletsList[si.ShardName()], ",")) + } + if len(logs) > 0 { + dr.drLog.Logf("Delete shards (and all related tablets): [%s]", strings.Join(logs, ",")) + } + + return nil +} + +func (dr *switcherDryRun) validateWorkflowHasCompleted(ctx context.Context) error { + return doValidateWorkflowHasCompleted(ctx, dr.ts) +} + +func (dr *switcherDryRun) dropTargetVReplicationStreams(ctx context.Context) error { + logs := make([]string, 0) + for _, t := range dr.ts.Targets() { + logs = append(logs, fmt.Sprintf("keyspace:%s;shard:%s;workflow:%s;dbname:%s;tablet:%d", + t.GetShard().Keyspace(), t.GetShard().ShardName(), dr.ts.WorkflowName(), t.GetPrimary().DbName(), t.GetPrimary().Alias.Uid)) + } + dr.drLog.Logf("Delete vreplication streams on targets: [%s]", strings.Join(logs, ",")) + return nil +} + +func (dr *switcherDryRun) dropSourceReverseVReplicationStreams(ctx context.Context) error { + logs := make([]string, 0) + for _, t := range dr.ts.Sources() { + logs = append(logs, fmt.Sprintf("keyspace:%s;shard:%s;workflow:%s;dbname:%s;tablet:%d", + t.GetShard().Keyspace(), t.GetShard().ShardName(), ReverseWorkflowName(dr.ts.WorkflowName()), t.GetPrimary().DbName(), t.GetPrimary().Alias.Uid)) + } + dr.drLog.Logf("Delete reverse vreplication streams on sources: [%s]", strings.Join(logs, ",")) + return nil +} + +func (dr *switcherDryRun) freezeTargetVReplication(ctx context.Context) error { + logs := make([]string, 0) + for _, target := range dr.ts.Targets() { + logs = append(logs, fmt.Sprintf("keyspace:%s;shard:%s;tablet:%d;workflow:%s;dbname:%s", + target.GetPrimary().Keyspace, target.GetPrimary().Shard, target.GetPrimary().Alias.Uid, dr.ts.WorkflowName(), target.GetPrimary().DbName())) + } + if len(logs) > 0 { + dr.drLog.Logf("Mark vreplication streams frozen on: [%s]", strings.Join(logs, ",")) + } + return nil +} + +func (dr *switcherDryRun) dropSourceDeniedTables(ctx context.Context) error { + logs := make([]string, 0) + for _, si := range dr.ts.SourceShards() { + logs = append(logs, fmt.Sprintf("keyspace:%s;shard:%s;tablet:%d", si.Keyspace(), si.ShardName(), si.PrimaryAlias.Uid)) + } + if len(logs) > 0 { + dr.drLog.Logf("Denied tables records on [%s] will be removed from: [%s]", strings.Join(dr.ts.Tables(), ","), strings.Join(logs, ",")) + } + return nil +} + +func (dr *switcherDryRun) dropTargetDeniedTables(ctx context.Context) error { + logs := make([]string, 0) + for _, si := range dr.ts.TargetShards() { + logs = append(logs, fmt.Sprintf("keyspace:%s;shard:%s;tablet:%d", si.Keyspace(), si.ShardName(), si.PrimaryAlias.Uid)) + } + if len(logs) > 0 { + dr.drLog.Logf("Denied tables records on [%s] will be removed from: [%s]", strings.Join(dr.ts.Tables(), ","), strings.Join(logs, ",")) + } + return nil +} + +func (dr *switcherDryRun) logs() *[]string { + return &dr.drLog.logs +} + +func (dr *switcherDryRun) removeTargetTables(ctx context.Context) error { + logs := make([]string, 0) + for _, target := range dr.ts.Targets() { + for _, tableName := range dr.ts.Tables() { + logs = append(logs, fmt.Sprintf("keyspace:%s;shard:%s;dbname:%s;tablet:%d;table:%s", + target.GetPrimary().Keyspace, target.GetPrimary().Shard, target.GetPrimary().DbName(), target.GetPrimary().Alias.Uid, tableName)) + } + } + if len(logs) > 0 { + dr.drLog.Logf("Dropping these tables from the database and removing from the vschema for keyspace %s: [%s]", + dr.ts.TargetKeyspaceName(), strings.Join(logs, ",")) + } + return nil +} + +func (dr *switcherDryRun) dropTargetShards(ctx context.Context) error { + logs := make([]string, 0) + tabletsList := make(map[string][]string) + for _, si := range dr.ts.TargetShards() { + tabletAliases, err := dr.ts.TopoServer().FindAllTabletAliasesInShard(ctx, si.Keyspace(), si.ShardName()) + if err != nil { + return err + } + tabletsList[si.ShardName()] = make([]string, 0) + for _, t := range tabletAliases { + tabletsList[si.ShardName()] = append(tabletsList[si.ShardName()], fmt.Sprintf("%d", t.Uid)) + } + sort.Strings(tabletsList[si.ShardName()]) + logs = append(logs, fmt.Sprintf("cell:%s;keyspace:%s;shards:[%s]", + si.Shard.PrimaryAlias.Cell, si.Keyspace(), si.ShardName()), strings.Join(tabletsList[si.ShardName()], ",")) + } + if len(logs) > 0 { + dr.drLog.Logf("Delete shards (and all related tablets): [%s]", strings.Join(logs, ",")) + } + + return nil +} + +func (dr *switcherDryRun) resetSequences(ctx context.Context) error { + var err error + mustReset := false + if mustReset, err = dr.ts.mustResetSequences(ctx); err != nil { + return err + } + if !mustReset { + return nil + } + dr.drLog.Log("The sequence caches will be reset on the source since sequence tables are being moved") + return nil +} + +func (dr *switcherDryRun) initializeTargetSequences(ctx context.Context, sequencesByBackingTable map[string]*sequenceMetadata) error { + sortedBackingTableNames := maps2.Keys(sequencesByBackingTable) + slices.Sort(sortedBackingTableNames) + dr.drLog.Log(fmt.Sprintf("The following sequence backing tables used by tables being moved will be initialized: %s", + strings.Join(sortedBackingTableNames, ","))) + return nil +} diff --git a/go/vt/vtctl/workflow/switcher_interface.go b/go/vt/vtctl/workflow/switcher_interface.go new file mode 100644 index 00000000000..8d0f9e847be --- /dev/null +++ b/go/vt/vtctl/workflow/switcher_interface.go @@ -0,0 +1,57 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "context" + "time" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" +) + +type iswitcher interface { + lockKeyspace(ctx context.Context, keyspace, action string) (context.Context, func(*error), error) + cancelMigration(ctx context.Context, sm *StreamMigrator) + stopStreams(ctx context.Context, sm *StreamMigrator) ([]string, error) + stopSourceWrites(ctx context.Context) error + waitForCatchup(ctx context.Context, filteredReplicationWaitTime time.Duration) error + migrateStreams(ctx context.Context, sm *StreamMigrator) error + createReverseVReplication(ctx context.Context) error + createJournals(ctx context.Context, sourceWorkflows []string) error + allowTargetWrites(ctx context.Context) error + changeRouting(ctx context.Context) error + streamMigraterfinalize(ctx context.Context, ts *trafficSwitcher, workflows []string) error + startReverseVReplication(ctx context.Context) error + switchTableReads(ctx context.Context, cells []string, servedType []topodatapb.TabletType, direction TrafficSwitchDirection) error + switchShardReads(ctx context.Context, cells []string, servedType []topodatapb.TabletType, direction TrafficSwitchDirection) error + validateWorkflowHasCompleted(ctx context.Context) error + removeSourceTables(ctx context.Context, removalType TableRemovalType) error + dropSourceShards(ctx context.Context) error + dropSourceDeniedTables(ctx context.Context) error + dropTargetDeniedTables(ctx context.Context) error + freezeTargetVReplication(ctx context.Context) error + dropSourceReverseVReplicationStreams(ctx context.Context) error + dropTargetVReplicationStreams(ctx context.Context) error + removeTargetTables(ctx context.Context) error + dropTargetShards(ctx context.Context) error + deleteRoutingRules(ctx context.Context) error + deleteShardRoutingRules(ctx context.Context) error + addParticipatingTablesToKeyspace(ctx context.Context, keyspace, tableSpecs string) error + resetSequences(ctx context.Context) error + initializeTargetSequences(ctx context.Context, sequencesByBackingTable map[string]*sequenceMetadata) error + logs() *[]string +} diff --git a/go/vt/vtctl/workflow/traffic_switcher.go b/go/vt/vtctl/workflow/traffic_switcher.go index 53a6e0ede9d..35f1d1b966b 100644 --- a/go/vt/vtctl/workflow/traffic_switcher.go +++ b/go/vt/vtctl/workflow/traffic_switcher.go @@ -17,43 +17,73 @@ limitations under the License. package workflow import ( - "bytes" "context" "errors" "fmt" - "hash/fnv" - "math" "sort" "strings" + "sync" + "time" - "google.golang.org/protobuf/encoding/prototext" + "golang.org/x/sync/errgroup" - "vitess.io/vitess/go/sets" + "vitess.io/vitess/go/json2" + "vitess.io/vitess/go/maps2" + "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/concurrency" + "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/vindexes" + "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" "vitess.io/vitess/go/vt/vttablet/tmclient" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) const ( // Frozen is the message value of frozen vreplication streams. Frozen = "FROZEN" + // Running is the state value of a vreplication stream in the + // replicating state. + Running = "RUNNING" + + // How long to wait when refreshing the state of each tablet in a shard. Note that these + // are refreshed in parallel, non-topo errors are ignored (in the error handling) and we + // may only do a partial refresh. Because in some cases it's unsafe to switch the traffic + // if some tablets do not refresh, we may need to look for partial results and produce + // an error (with the provided details of WHY) if we see them. + // Side note: the default lock/lease TTL in etcd is 60s so the default tablet refresh + // timeout of 60s can cause us to lose our keyspace lock before completing the + // operation too. + shardTabletRefreshTimeout = time.Duration(30 * time.Second) + + // Use pt-osc's naming convention, this format also ensures vstreamer ignores such tables. + renameTableTemplate = "_%.59s_old" // limit table name to 64 characters + + sqlDeleteWorkflow = "delete from _vt.vreplication where db_name = %s and workflow = %s" + sqlGetMaxSequenceVal = "select max(%a) as maxval from %a.%a" + sqlInitSequenceTable = "insert into %a.%a (id, next_id, cache) values (0, %d, 1000) on duplicate key update next_id = if(next_id < %d, %d, next_id)" ) -var ( - // ErrNoStreams occurs when no target streams are found for a workflow in a - // target keyspace. - ErrNoStreams = errors.New("no streams found") -) +// accessType specifies the type of access for a shard (allow/disallow writes). +type accessType int -// TrafficSwitchDirection specifies the switching direction. -type TrafficSwitchDirection int +const ( + allowWrites = accessType(iota) + disallowWrites +) // The following constants define the switching direction. const ( @@ -61,20 +91,29 @@ const ( DirectionBackward ) -// TableRemovalType specifies the way the a table will be removed during a -// DropSource for a MoveTables workflow. -type TableRemovalType int - // The following consts define if DropSource will drop or rename the table. const ( DropTable = TableRemovalType(iota) RenameTable ) -var tableRemovalTypeStrs = [...]string{ - "DROP TABLE", - "RENAME TABLE", -} +// TrafficSwitchDirection specifies the switching direction. +type TrafficSwitchDirection int + +// TableRemovalType specifies the way the a table will be removed during a +// DropSource for a MoveTables workflow. +type TableRemovalType int + +var ( + // ErrNoStreams occurs when no target streams are found for a workflow in a + // target keyspace. + ErrNoStreams = errors.New("no streams found") + + tableRemovalTypeStrs = []string{ + "DROP TABLE", + "RENAME TABLE", + } +) // String returns a string representation of a TableRemovalType func (trt TableRemovalType) String() string { @@ -85,13 +124,13 @@ func (trt TableRemovalType) String() string { return tableRemovalTypeStrs[trt] } -// ITrafficSwitcher is a temporary hack to allow us to move streamMigrater out -// of package wrangler without also needing to move trafficSwitcher in the same -// changeset. +// ITrafficSwitcher is a hack to allow us to maintain the legacy wrangler +// package for vtctl/vtctlclient while migrating most of the TrafficSwitcher +// related code to the workflow package for vtctldclient usage. // -// After moving TrafficSwitcher to this package, this type should be removed, -// and StreamMigrator should be updated to contain a field of type -// *TrafficSwitcher instead of ITrafficSwitcher. +// After moving TrafficSwitcher to this package and removing the implementation +// in wrangler, this type should be removed, and StreamMigrator should be updated +// to contain a field of type *TrafficSwitcher instead of ITrafficSwitcher. type ITrafficSwitcher interface { /* Functions that expose types and behavior contained in *wrangler.Wrangler */ @@ -165,6 +204,96 @@ func (source *MigrationSource) GetPrimary() *topo.TabletInfo { return source.primary } +// trafficSwitcher contains the metadata for switching read and write traffic +// for vreplication streams. +type trafficSwitcher struct { + ws *Server + logger logutil.Logger + + migrationType binlogdatapb.MigrationType + isPartialMigration bool + workflow string + + // if frozen is true, the rest of the fields are not set. + frozen bool + reverseWorkflow string + id int64 + sources map[string]*MigrationSource + targets map[string]*MigrationTarget + sourceKeyspace string + targetKeyspace string + tables []string + keepRoutingRules bool + sourceKSSchema *vindexes.KeyspaceSchema + optCells string // cells option passed to MoveTables/Reshard Create + optTabletTypes string // tabletTypes option passed to MoveTables/Reshard Create + externalCluster string + externalTopo *topo.Server + sourceTimeZone string + targetTimeZone string + workflowType binlogdatapb.VReplicationWorkflowType + workflowSubType binlogdatapb.VReplicationWorkflowSubType +} + +func (ts *trafficSwitcher) TopoServer() *topo.Server { return ts.ws.ts } +func (ts *trafficSwitcher) TabletManagerClient() tmclient.TabletManagerClient { return ts.ws.tmc } +func (ts *trafficSwitcher) Logger() logutil.Logger { + if ts.logger == nil { + ts.logger = logutil.NewConsoleLogger() + } + return ts.logger +} +func (ts *trafficSwitcher) VReplicationExec(ctx context.Context, alias *topodatapb.TabletAlias, query string) (*querypb.QueryResult, error) { + return ts.ws.VReplicationExec(ctx, alias, query) +} +func (ts *trafficSwitcher) ExternalTopo() *topo.Server { return ts.externalTopo } +func (ts *trafficSwitcher) MigrationType() binlogdatapb.MigrationType { return ts.migrationType } +func (ts *trafficSwitcher) IsPartialMigration() bool { return ts.isPartialMigration } +func (ts *trafficSwitcher) ReverseWorkflowName() string { return ts.reverseWorkflow } +func (ts *trafficSwitcher) SourceKeyspaceName() string { return ts.sourceKSSchema.Keyspace.Name } +func (ts *trafficSwitcher) SourceKeyspaceSchema() *vindexes.KeyspaceSchema { return ts.sourceKSSchema } +func (ts *trafficSwitcher) Sources() map[string]*MigrationSource { return ts.sources } +func (ts *trafficSwitcher) Tables() []string { return ts.tables } +func (ts *trafficSwitcher) TargetKeyspaceName() string { return ts.targetKeyspace } +func (ts *trafficSwitcher) Targets() map[string]*MigrationTarget { return ts.targets } +func (ts *trafficSwitcher) WorkflowName() string { return ts.workflow } +func (ts *trafficSwitcher) SourceTimeZone() string { return ts.sourceTimeZone } +func (ts *trafficSwitcher) TargetTimeZone() string { return ts.targetTimeZone } + +func (ts *trafficSwitcher) ForAllSources(f func(source *MigrationSource) error) error { + var wg sync.WaitGroup + allErrors := &concurrency.AllErrorRecorder{} + for _, source := range ts.sources { + wg.Add(1) + go func(source *MigrationSource) { + defer wg.Done() + + if err := f(source); err != nil { + allErrors.RecordError(err) + } + }(source) + } + wg.Wait() + return allErrors.AggrError(vterrors.Aggregate) +} + +func (ts *trafficSwitcher) ForAllTargets(f func(source *MigrationTarget) error) error { + var wg sync.WaitGroup + allErrors := &concurrency.AllErrorRecorder{} + for _, target := range ts.targets { + wg.Add(1) + go func(target *MigrationTarget) { + defer wg.Done() + + if err := f(target); err != nil { + allErrors.RecordError(err) + } + }(target) + } + wg.Wait() + return allErrors.AggrError(vterrors.Aggregate) +} + // MigrationTarget contains the metadata for each migration target. type MigrationTarget struct { si *topo.ShardInfo @@ -184,204 +313,1241 @@ func (target *MigrationTarget) GetPrimary() *topo.TabletInfo { return target.primary } -// BuildTargets collects MigrationTargets and other metadata (see TargetInfo) -// from a workflow in the target keyspace. -// -// It returns ErrNoStreams if there are no targets found for the workflow. -func BuildTargets(ctx context.Context, ts *topo.Server, tmc tmclient.TabletManagerClient, targetKeyspace string, workflow string) (*TargetInfo, error) { - targetShards, err := ts.GetShardNames(ctx, targetKeyspace) +func (ts *trafficSwitcher) SourceShards() []*topo.ShardInfo { + shards := make([]*topo.ShardInfo, 0, len(ts.Sources())) + for _, source := range ts.Sources() { + shards = append(shards, source.GetShard()) + } + return shards +} + +func (ts *trafficSwitcher) TargetShards() []*topo.ShardInfo { + shards := make([]*topo.ShardInfo, 0, len(ts.Targets())) + for _, target := range ts.Targets() { + shards = append(shards, target.GetShard()) + } + return shards +} + +func (ts *trafficSwitcher) getSourceAndTargetShardsNames() ([]string, []string) { + var sourceShards, targetShards []string + for _, si := range ts.SourceShards() { + sourceShards = append(sourceShards, si.ShardName()) + } + for _, si := range ts.TargetShards() { + targetShards = append(targetShards, si.ShardName()) + } + return sourceShards, targetShards +} + +// isPartialMoveTables returns true if whe workflow is MoveTables, has the same +// number of shards, is not covering the entire shard range, and has one-to-one +// shards in source and target. +func (ts *trafficSwitcher) isPartialMoveTables(sourceShards, targetShards []string) (bool, error) { + if ts.MigrationType() != binlogdatapb.MigrationType_TABLES { + return false, nil + } + + skr, tkr, err := getSourceAndTargetKeyRanges(sourceShards, targetShards) if err != nil { - return nil, err + return false, err + } + + if key.KeyRangeIsComplete(skr) || key.KeyRangeIsComplete(tkr) || len(sourceShards) != len(targetShards) { + return false, nil } - var ( - frozen bool - optCells string - optTabletTypes string - targets = make(map[string]*MigrationTarget, len(targetShards)) - workflowType binlogdatapb.VReplicationWorkflowType - workflowSubType binlogdatapb.VReplicationWorkflowSubType - ) - - // We check all shards in the target keyspace. Not all of them may have a - // stream. For example, if we're splitting -80 to [-40,40-80], only those - // two target shards will have vreplication streams, and the other shards in - // the target keyspace will not. - for _, targetShard := range targetShards { - si, err := ts.GetShard(ctx, targetKeyspace, targetShard) + return key.KeyRangeEqual(skr, tkr), nil +} + +// addParticipatingTablesToKeyspace updates the vschema with the new tables that +// were created as part of the Migrate flow. It is called when the Migrate flow +// is Completed. +func (ts *trafficSwitcher) addParticipatingTablesToKeyspace(ctx context.Context, keyspace, tableSpecs string) error { + vschema, err := ts.TopoServer().GetVSchema(ctx, keyspace) + if err != nil { + return err + } + if vschema == nil { + return fmt.Errorf("no vschema found for keyspace %s", keyspace) + } + if vschema.Tables == nil { + vschema.Tables = make(map[string]*vschemapb.Table) + } + if strings.HasPrefix(tableSpecs, "{") { // user defined the vschema snippet, typically for a sharded target + wrap := fmt.Sprintf(`{"tables": %s}`, tableSpecs) + ks := &vschemapb.Keyspace{} + if err := json2.Unmarshal([]byte(wrap), ks); err != nil { + return err + } if err != nil { - return nil, err + return err + } + for table, vtab := range ks.Tables { + vschema.Tables[table] = vtab + } + } else { + if vschema.Sharded { + return fmt.Errorf("no sharded vschema was provided, so you will need to update the vschema of the target manually for the moved tables") + } + for _, table := range ts.tables { + vschema.Tables[table] = &vschemapb.Table{} + } + } + return ts.TopoServer().SaveVSchema(ctx, keyspace, vschema) +} + +func (ts *trafficSwitcher) deleteRoutingRules(ctx context.Context) error { + rules, err := topotools.GetRoutingRules(ctx, ts.TopoServer()) + if err != nil { + return err + } + for _, table := range ts.Tables() { + delete(rules, table) + delete(rules, table+"@replica") + delete(rules, table+"@rdonly") + delete(rules, ts.TargetKeyspaceName()+"."+table) + delete(rules, ts.TargetKeyspaceName()+"."+table+"@replica") + delete(rules, ts.TargetKeyspaceName()+"."+table+"@rdonly") + delete(rules, ts.SourceKeyspaceName()+"."+table) + delete(rules, ts.SourceKeyspaceName()+"."+table+"@replica") + delete(rules, ts.SourceKeyspaceName()+"."+table+"@rdonly") + } + if err := topotools.SaveRoutingRules(ctx, ts.TopoServer(), rules); err != nil { + return err + } + return nil +} + +func (ts *trafficSwitcher) deleteShardRoutingRules(ctx context.Context) error { + if !ts.isPartialMigration { + return nil + } + srr, err := topotools.GetShardRoutingRules(ctx, ts.TopoServer()) + if err != nil { + return err + } + for _, si := range ts.TargetShards() { + delete(srr, fmt.Sprintf("%s.%s", ts.targetKeyspace, si.ShardName())) + } + if err := topotools.SaveShardRoutingRules(ctx, ts.TopoServer(), srr); err != nil { + return err + } + return nil +} + +func (ts *trafficSwitcher) dropSourceDeniedTables(ctx context.Context) error { + return ts.ForAllSources(func(source *MigrationSource) error { + if _, err := ts.TopoServer().UpdateShardFields(ctx, ts.SourceKeyspaceName(), source.GetShard().ShardName(), func(si *topo.ShardInfo) error { + return si.UpdateDeniedTables(ctx, topodatapb.TabletType_PRIMARY, nil, true, ts.Tables()) + }); err != nil { + return err } + rtbsCtx, cancel := context.WithTimeout(ctx, shardTabletRefreshTimeout) + defer cancel() + _, _, err := topotools.RefreshTabletsByShard(rtbsCtx, ts.TopoServer(), ts.TabletManagerClient(), source.GetShard(), nil, ts.Logger()) + return err + }) +} - if si.PrimaryAlias == nil { - // This can happen if bad inputs are given. - return nil, fmt.Errorf("shard %v/%v doesn't have a primary set", targetKeyspace, targetShard) +func (ts *trafficSwitcher) dropTargetDeniedTables(ctx context.Context) error { + return ts.ForAllTargets(func(target *MigrationTarget) error { + if _, err := ts.TopoServer().UpdateShardFields(ctx, ts.TargetKeyspaceName(), target.GetShard().ShardName(), func(si *topo.ShardInfo) error { + return si.UpdateDeniedTables(ctx, topodatapb.TabletType_PRIMARY, nil, true, ts.Tables()) + }); err != nil { + return err } + rtbsCtx, cancel := context.WithTimeout(ctx, shardTabletRefreshTimeout) + defer cancel() + _, _, err := topotools.RefreshTabletsByShard(rtbsCtx, ts.TopoServer(), ts.TabletManagerClient(), target.GetShard(), nil, ts.Logger()) + return err + }) +} + +func (ts *trafficSwitcher) validateWorkflowHasCompleted(ctx context.Context) error { + return doValidateWorkflowHasCompleted(ctx, ts) +} + +func (ts *trafficSwitcher) dropParticipatingTablesFromKeyspace(ctx context.Context, keyspace string) error { + vschema, err := ts.TopoServer().GetVSchema(ctx, keyspace) + if err != nil { + return err + } + // VReplication does NOT create the vschema entries in SHARDED + // TARGET keyspaces -- as we cannot know the proper vindex + // definitions to use -- and we should not delete them either + // (on workflow Cancel) as the user must create them separately + // and they contain information about the vindex definitions, etc. + if vschema.Sharded && keyspace == ts.TargetKeyspaceName() { + return nil + } + for _, tableName := range ts.Tables() { + delete(vschema.Tables, tableName) + } + return ts.TopoServer().SaveVSchema(ctx, keyspace, vschema) +} + +func (ts *trafficSwitcher) removeSourceTables(ctx context.Context, removalType TableRemovalType) error { + err := ts.ForAllSources(func(source *MigrationSource) error { + for _, tableName := range ts.Tables() { + query := fmt.Sprintf("drop table %s.%s", + sqlescape.EscapeID(sqlescape.UnescapeID(source.GetPrimary().DbName())), + sqlescape.EscapeID(sqlescape.UnescapeID(tableName))) + if removalType == DropTable { + ts.Logger().Infof("%s: Dropping table %s.%s\n", + source.GetPrimary().String(), source.GetPrimary().DbName(), tableName) + } else { + renameName := getRenameFileName(tableName) + ts.Logger().Infof("%s: Renaming table %s.%s to %s.%s\n", + source.GetPrimary().String(), source.GetPrimary().DbName(), tableName, source.GetPrimary().DbName(), renameName) + query = fmt.Sprintf("rename table %s.%s TO %s.%s", + sqlescape.EscapeID(sqlescape.UnescapeID(source.GetPrimary().DbName())), + sqlescape.EscapeID(sqlescape.UnescapeID(tableName)), + sqlescape.EscapeID(sqlescape.UnescapeID(source.GetPrimary().DbName())), + sqlescape.EscapeID(sqlescape.UnescapeID(renameName))) + } + _, err := ts.ws.tmc.ExecuteFetchAsDba(ctx, source.GetPrimary().Tablet, false, &tabletmanagerdatapb.ExecuteFetchAsDbaRequest{ + Query: []byte(query), + MaxRows: 1, + ReloadSchema: true, + }) + if err != nil { + ts.Logger().Errorf("%s: Error removing table %s: %v", source.GetPrimary().String(), tableName, err) + return err + } + ts.Logger().Infof("%s: Removed table %s.%s\n", source.GetPrimary().String(), source.GetPrimary().DbName(), tableName) - primary, err := ts.GetTablet(ctx, si.PrimaryAlias) - if err != nil { - return nil, err } + return nil + }) + if err != nil { + return err + } + + return ts.dropParticipatingTablesFromKeyspace(ctx, ts.SourceKeyspaceName()) +} - // NB: changing the whitespace of this query breaks tests for now. - // (TODO:@ajm188) extend FakeDBClient to be less whitespace-sensitive on - // expected queries. - query := fmt.Sprintf("select id, source, message, cell, tablet_types, workflow_type, workflow_sub_type, defer_secondary_keys from _vt.vreplication where workflow=%s and db_name=%s", encodeString(workflow), encodeString(primary.DbName())) - p3qr, err := tmc.VReplicationExec(ctx, primary.Tablet, query) +// FIXME: even after dropSourceShards there are still entries in the topo, need to research and fix +func (ts *trafficSwitcher) dropSourceShards(ctx context.Context) error { + return ts.ForAllSources(func(source *MigrationSource) error { + ts.Logger().Infof("Deleting shard %s.%s\n", source.GetShard().Keyspace(), source.GetShard().ShardName()) + err := ts.ws.DeleteShard(ctx, source.GetShard().Keyspace(), source.GetShard().ShardName(), true, false) if err != nil { - return nil, err + ts.Logger().Errorf("Error deleting shard %s: %v", source.GetShard().ShardName(), err) + return err } + ts.Logger().Infof("Deleted shard %s.%s\n", source.GetShard().Keyspace(), source.GetShard().ShardName()) + return nil + }) +} - if len(p3qr.Rows) < 1 { - continue +func (ts *trafficSwitcher) switchShardReads(ctx context.Context, cells []string, servedTypes []topodatapb.TabletType, direction TrafficSwitchDirection) error { + var fromShards, toShards []*topo.ShardInfo + if direction == DirectionForward { + fromShards, toShards = ts.SourceShards(), ts.TargetShards() + } else { + fromShards, toShards = ts.TargetShards(), ts.SourceShards() + } + if err := ts.TopoServer().ValidateSrvKeyspace(ctx, ts.TargetKeyspaceName(), strings.Join(cells, ",")); err != nil { + err2 := vterrors.Wrapf(err, "Before switching shard reads, found SrvKeyspace for %s is corrupt in cell %s", + ts.TargetKeyspaceName(), strings.Join(cells, ",")) + log.Errorf("%w", err2) + return err2 + } + for _, servedType := range servedTypes { + if err := ts.ws.updateShardRecords(ctx, ts.SourceKeyspaceName(), fromShards, cells, servedType, true /* isFrom */, false /* clearSourceShards */, ts.logger); err != nil { + return err + } + if err := ts.ws.updateShardRecords(ctx, ts.SourceKeyspaceName(), toShards, cells, servedType, false, false, ts.logger); err != nil { + return err } + err := ts.TopoServer().MigrateServedType(ctx, ts.SourceKeyspaceName(), toShards, fromShards, servedType, cells) + if err != nil { + return err + } + } + if err := ts.TopoServer().ValidateSrvKeyspace(ctx, ts.TargetKeyspaceName(), strings.Join(cells, ",")); err != nil { + err2 := vterrors.Wrapf(err, "after switching shard reads, found SrvKeyspace for %s is corrupt in cell %s", + ts.TargetKeyspaceName(), strings.Join(cells, ",")) + log.Errorf("%w", err2) + return err2 + } + return nil +} - target := &MigrationTarget{ - si: si, - primary: primary, - Sources: make(map[int32]*binlogdatapb.BinlogSource), +func (ts *trafficSwitcher) switchTableReads(ctx context.Context, cells []string, servedTypes []topodatapb.TabletType, direction TrafficSwitchDirection) error { + log.Infof("switchTableReads: servedTypes: %+v, direction %t", servedTypes, direction) + rules, err := topotools.GetRoutingRules(ctx, ts.TopoServer()) + if err != nil { + return err + } + // We assume that the following rules were setup when the targets were created: + // table -> sourceKeyspace.table + // targetKeyspace.table -> sourceKeyspace.table + // For forward migration, we add tablet type specific rules to redirect traffic to the target. + // For backward, we redirect to source. + for _, servedType := range servedTypes { + if servedType != topodatapb.TabletType_REPLICA && servedType != topodatapb.TabletType_RDONLY { + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid tablet type specified when switching reads: %v", servedType) } - qr := sqltypes.Proto3ToResult(p3qr) - for _, row := range qr.Named().Rows { - id, err := row["id"].ToInt32() - if err != nil { - return nil, err + tt := strings.ToLower(servedType.String()) + for _, table := range ts.Tables() { + if direction == DirectionForward { + log.Infof("Route direction forward") + } else { + log.Infof("Route direction backwards") } + toTarget := []string{ts.TargetKeyspaceName() + "." + table} + rules[table+"@"+tt] = toTarget + rules[ts.TargetKeyspaceName()+"."+table+"@"+tt] = toTarget + rules[ts.SourceKeyspaceName()+"."+table+"@"+tt] = toTarget + } + } + if err := topotools.SaveRoutingRules(ctx, ts.TopoServer(), rules); err != nil { + return err + } + return ts.TopoServer().RebuildSrvVSchema(ctx, cells) +} - var bls binlogdatapb.BinlogSource - rowBytes, err := row["source"].ToBytes() - if err != nil { - return nil, err - } - if err := prototext.Unmarshal(rowBytes, &bls); err != nil { - return nil, err - } +func (ts *trafficSwitcher) startReverseVReplication(ctx context.Context) error { + return ts.ForAllSources(func(source *MigrationSource) error { + query := fmt.Sprintf("update _vt.vreplication set state='Running', message='' where db_name=%s", encodeString(source.GetPrimary().DbName())) + _, err := ts.VReplicationExec(ctx, source.GetPrimary().Alias, query) + return err + }) +} - if row["message"].ToString() == Frozen { - frozen = true +func (ts *trafficSwitcher) createJournals(ctx context.Context, sourceWorkflows []string) error { + log.Infof("In createJournals for source workflows %+v", sourceWorkflows) + return ts.ForAllSources(func(source *MigrationSource) error { + if source.Journaled { + return nil + } + participants := make([]*binlogdatapb.KeyspaceShard, 0) + participantMap := make(map[string]bool) + journal := &binlogdatapb.Journal{ + Id: ts.id, + MigrationType: ts.MigrationType(), + Tables: ts.Tables(), + LocalPosition: source.Position, + Participants: participants, + SourceWorkflows: sourceWorkflows, + } + for targetShard, target := range ts.Targets() { + for _, tsource := range target.Sources { + participantMap[tsource.Shard] = true } + journal.ShardGtids = append(journal.ShardGtids, &binlogdatapb.ShardGtid{ + Keyspace: ts.TargetKeyspaceName(), + Shard: targetShard, + Gtid: target.Position, + }) + } + shards := make([]string, 0) + for shard := range participantMap { + shards = append(shards, shard) + } + sort.Sort(vreplication.ShardSorter(shards)) + for _, shard := range shards { + journal.Participants = append(journal.Participants, &binlogdatapb.KeyspaceShard{ + Keyspace: source.GetShard().Keyspace(), + Shard: shard, + }) + + } + log.Infof("Creating journal %v", journal) + ts.Logger().Infof("Creating journal: %v", journal) + statement := fmt.Sprintf("insert into _vt.resharding_journal "+ + "(id, db_name, val) "+ + "values (%v, %v, %v)", + ts.id, encodeString(source.GetPrimary().DbName()), encodeString(journal.String())) + if _, err := ts.TabletManagerClient().VReplicationExec(ctx, source.GetPrimary().Tablet, statement); err != nil { + return err + } + return nil + }) +} + +func (ts *trafficSwitcher) changeShardsAccess(ctx context.Context, keyspace string, shards []*topo.ShardInfo, access accessType) error { + if err := ts.TopoServer().UpdateDisableQueryService(ctx, keyspace, shards, topodatapb.TabletType_PRIMARY, nil, access == disallowWrites /* disable */); err != nil { + return err + } + return ts.ws.refreshPrimaryTablets(ctx, shards) +} + +func (ts *trafficSwitcher) allowTargetWrites(ctx context.Context) error { + if ts.MigrationType() == binlogdatapb.MigrationType_TABLES { + return ts.allowTableTargetWrites(ctx) + } + return ts.changeShardsAccess(ctx, ts.TargetKeyspaceName(), ts.TargetShards(), allowWrites) +} + +func (ts *trafficSwitcher) allowTableTargetWrites(ctx context.Context) error { + return ts.ForAllTargets(func(target *MigrationTarget) error { + if _, err := ts.TopoServer().UpdateShardFields(ctx, ts.TargetKeyspaceName(), target.GetShard().ShardName(), func(si *topo.ShardInfo) error { + return si.UpdateDeniedTables(ctx, topodatapb.TabletType_PRIMARY, nil, true, ts.Tables()) + }); err != nil { + return err + } + rtbsCtx, cancel := context.WithTimeout(ctx, shardTabletRefreshTimeout) + defer cancel() + _, _, err := topotools.RefreshTabletsByShard(rtbsCtx, ts.TopoServer(), ts.TabletManagerClient(), target.GetShard(), nil, ts.Logger()) + return err + }) +} + +func (ts *trafficSwitcher) changeRouting(ctx context.Context) error { + if ts.MigrationType() == binlogdatapb.MigrationType_TABLES { + return ts.changeWriteRoute(ctx) + } + return ts.changeShardRouting(ctx) +} + +func (ts *trafficSwitcher) changeWriteRoute(ctx context.Context) error { + if ts.isPartialMigration { + srr, err := topotools.GetShardRoutingRules(ctx, ts.TopoServer()) + if err != nil { + return err + } + for _, si := range ts.SourceShards() { + delete(srr, fmt.Sprintf("%s.%s", ts.TargetKeyspaceName(), si.ShardName())) + ts.Logger().Infof("Deleted shard routing: %v:%v", ts.TargetKeyspaceName(), si.ShardName()) + srr[fmt.Sprintf("%s.%s", ts.SourceKeyspaceName(), si.ShardName())] = ts.TargetKeyspaceName() + ts.Logger().Infof("Added shard routing: %v:%v", ts.SourceKeyspaceName(), si.ShardName()) + } + if err := topotools.SaveShardRoutingRules(ctx, ts.TopoServer(), srr); err != nil { + return err + } + } else { + rules, err := topotools.GetRoutingRules(ctx, ts.TopoServer()) + if err != nil { + return err + } + for _, table := range ts.Tables() { + targetKsTable := fmt.Sprintf("%s.%s", ts.TargetKeyspaceName(), table) + sourceKsTable := fmt.Sprintf("%s.%s", ts.SourceKeyspaceName(), table) + delete(rules, targetKsTable) + ts.Logger().Infof("Deleted routing: %s", targetKsTable) + rules[table] = []string{targetKsTable} + rules[sourceKsTable] = []string{targetKsTable} + ts.Logger().Infof("Added routing: %v %v", table, sourceKsTable) + } + if err := topotools.SaveRoutingRules(ctx, ts.TopoServer(), rules); err != nil { + return err + } + } + + return ts.TopoServer().RebuildSrvVSchema(ctx, nil) +} - target.Sources[id] = &bls - optCells = row["cell"].ToString() - optTabletTypes = row["tablet_types"].ToString() +func (ts *trafficSwitcher) changeShardRouting(ctx context.Context) error { + if err := ts.TopoServer().ValidateSrvKeyspace(ctx, ts.TargetKeyspaceName(), ""); err != nil { + err2 := vterrors.Wrapf(err, "Before changing shard routes, found SrvKeyspace for %s is corrupt", ts.TargetKeyspaceName()) + log.Errorf("%w", err2) + return err2 + } + err := ts.ForAllSources(func(source *MigrationSource) error { + _, err := ts.TopoServer().UpdateShardFields(ctx, ts.SourceKeyspaceName(), source.GetShard().ShardName(), func(si *topo.ShardInfo) error { + si.IsPrimaryServing = false + return nil + }) + return err + }) + if err != nil { + return err + } + err = ts.ForAllTargets(func(target *MigrationTarget) error { + _, err := ts.TopoServer().UpdateShardFields(ctx, ts.TargetKeyspaceName(), target.GetShard().ShardName(), func(si *topo.ShardInfo) error { + si.IsPrimaryServing = true + return nil + }) + return err + }) + if err != nil { + return err + } + err = ts.TopoServer().MigrateServedType(ctx, ts.TargetKeyspaceName(), ts.TargetShards(), ts.SourceShards(), topodatapb.TabletType_PRIMARY, nil) + if err != nil { + return err + } + if err := ts.TopoServer().ValidateSrvKeyspace(ctx, ts.TargetKeyspaceName(), ""); err != nil { + err2 := vterrors.Wrapf(err, "after changing shard routes, found SrvKeyspace for %s is corrupt", ts.TargetKeyspaceName()) + log.Errorf("%w", err2) + return err2 + } + return nil +} - workflowType = getVReplicationWorkflowType(row) - workflowSubType = getVReplicationWorkflowSubType(row) +func (ts *trafficSwitcher) getReverseVReplicationUpdateQuery(targetCell string, sourceCell string, dbname string) string { + // we try to be clever to understand what user intends: + // if target's cell is present in cells but not source's cell we replace it + // with the source's cell. + if ts.optCells != "" && targetCell != sourceCell && strings.Contains(ts.optCells+",", targetCell+",") && + !strings.Contains(ts.optCells+",", sourceCell+",") { + ts.optCells = strings.Replace(ts.optCells, targetCell, sourceCell, 1) + } + if ts.optCells != "" || ts.optTabletTypes != "" { + query := fmt.Sprintf("update _vt.vreplication set cell = '%s', tablet_types = '%s' where workflow = '%s' and db_name = '%s'", + ts.optCells, ts.optTabletTypes, ts.ReverseWorkflowName(), dbname) + return query + } + return "" +} + +func (ts *trafficSwitcher) deleteReverseVReplication(ctx context.Context) error { + return ts.ForAllSources(func(source *MigrationSource) error { + query := fmt.Sprintf(sqlDeleteWorkflow, encodeString(source.GetPrimary().DbName()), encodeString(ts.reverseWorkflow)) + if _, err := ts.TabletManagerClient().VReplicationExec(ctx, source.GetPrimary().Tablet, query); err != nil { + return err } + ts.ws.deleteWorkflowVDiffData(ctx, source.GetPrimary().Tablet, ts.reverseWorkflow) + ts.ws.optimizeCopyStateTable(source.GetPrimary().Tablet) + return nil + }) +} - targets[targetShard] = target +func (ts *trafficSwitcher) ForAllUIDs(f func(target *MigrationTarget, uid int32) error) error { + var wg sync.WaitGroup + allErrors := &concurrency.AllErrorRecorder{} + for _, target := range ts.Targets() { + for uid := range target.Sources { + wg.Add(1) + go func(target *MigrationTarget, uid int32) { + defer wg.Done() + + if err := f(target, uid); err != nil { + allErrors.RecordError(err) + } + }(target, uid) + } } + wg.Wait() + return allErrors.AggrError(vterrors.Aggregate) +} - if len(targets) == 0 { - return nil, fmt.Errorf("%w in keyspace %s for %s", ErrNoStreams, targetKeyspace, workflow) +func (ts *trafficSwitcher) createReverseVReplication(ctx context.Context) error { + if err := ts.deleteReverseVReplication(ctx); err != nil { + return err } + err := ts.ForAllUIDs(func(target *MigrationTarget, uid int32) error { + bls := target.Sources[uid] + source := ts.Sources()[bls.Shard] + reverseBls := &binlogdatapb.BinlogSource{ + Keyspace: ts.TargetKeyspaceName(), + Shard: target.GetShard().ShardName(), + TabletType: bls.TabletType, + Filter: &binlogdatapb.Filter{}, + OnDdl: bls.OnDdl, + SourceTimeZone: bls.TargetTimeZone, + TargetTimeZone: bls.SourceTimeZone, + } - return &TargetInfo{ - Targets: targets, - Frozen: frozen, - OptCells: optCells, - OptTabletTypes: optTabletTypes, - WorkflowType: workflowType, - WorkflowSubType: workflowSubType, - }, nil + for _, rule := range bls.Filter.Rules { + if rule.Filter == "exclude" { + reverseBls.Filter.Rules = append(reverseBls.Filter.Rules, rule) + continue + } + var filter string + if strings.HasPrefix(rule.Match, "/") { + if ts.SourceKeyspaceSchema().Keyspace.Sharded { + filter = key.KeyRangeString(source.GetShard().KeyRange) + } + } else { + var inKeyrange string + if ts.SourceKeyspaceSchema().Keyspace.Sharded { + vtable, ok := ts.SourceKeyspaceSchema().Tables[rule.Match] + if !ok { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "table %s not found in vschema", rule.Match) + } + // We currently assume the primary vindex is the best way to filter rows + // for the table, which may not always be true. + // TODO: handle more of these edge cases explicitly, e.g. sequence tables. + switch vtable.Type { + case vindexes.TypeReference: + // For reference tables there are no vindexes and thus no filter to apply. + default: + // For non-reference tables we return an error if there's no primary + // vindex as it's not clear what to do. + if len(vtable.ColumnVindexes) > 0 && len(vtable.ColumnVindexes[0].Columns) > 0 { + inKeyrange = fmt.Sprintf(" where in_keyrange(%s, '%s.%s', '%s')", sqlparser.String(vtable.ColumnVindexes[0].Columns[0]), + ts.SourceKeyspaceName(), vtable.ColumnVindexes[0].Name, key.KeyRangeString(source.GetShard().KeyRange)) + } else { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "no primary vindex found for the %s table in the %s keyspace", + vtable.Name.String(), ts.SourceKeyspaceName()) + } + } + } + filter = fmt.Sprintf("select * from %s%s", sqlescape.EscapeID(rule.Match), inKeyrange) + } + reverseBls.Filter.Rules = append(reverseBls.Filter.Rules, &binlogdatapb.Rule{ + Match: rule.Match, + Filter: filter, + }) + } + log.Infof("Creating reverse workflow vreplication stream on tablet %s: workflow %s, startPos %s", + source.GetPrimary().Alias, ts.ReverseWorkflowName(), target.Position) + _, err := ts.VReplicationExec(ctx, source.GetPrimary().Alias, + binlogplayer.CreateVReplicationState(ts.ReverseWorkflowName(), reverseBls, target.Position, + binlogdatapb.VReplicationWorkflowState_Stopped, source.GetPrimary().DbName(), ts.workflowType, ts.workflowSubType)) + if err != nil { + return err + } + + // if user has defined the cell/tablet_types parameters in the forward workflow, update the reverse workflow as well + updateQuery := ts.getReverseVReplicationUpdateQuery(target.GetPrimary().Alias.Cell, source.GetPrimary().Alias.Cell, source.GetPrimary().DbName()) + if updateQuery != "" { + log.Infof("Updating vreplication stream entry on %s with: %s", source.GetPrimary().Alias, updateQuery) + _, err = ts.VReplicationExec(ctx, source.GetPrimary().Alias, updateQuery) + return err + } + return nil + }) + return err } -func getVReplicationWorkflowType(row sqltypes.RowNamedValues) binlogdatapb.VReplicationWorkflowType { - i, _ := row["workflow_type"].ToInt32() - return binlogdatapb.VReplicationWorkflowType(i) +func (ts *trafficSwitcher) waitForCatchup(ctx context.Context, filteredReplicationWaitTime time.Duration) error { + ctx, cancel := context.WithTimeout(ctx, filteredReplicationWaitTime) + defer cancel() + // Source writes have been stopped, wait for all streams on targets to catch up. + if err := ts.ForAllUIDs(func(target *MigrationTarget, uid int32) error { + ts.Logger().Infof("Before Catchup: uid: %d, target primary %s, target position %s, shard %s", uid, + target.GetPrimary().AliasString(), target.Position, target.GetShard().String()) + bls := target.Sources[uid] + source := ts.Sources()[bls.Shard] + ts.Logger().Infof("Before Catchup: waiting for keyspace:shard: %v:%v to reach source position %v, uid %d", + ts.TargetKeyspaceName(), target.GetShard().ShardName(), source.Position, uid) + if err := ts.TabletManagerClient().VReplicationWaitForPos(ctx, target.GetPrimary().Tablet, uid, source.Position); err != nil { + return err + } + log.Infof("After catchup: target keyspace:shard: %v:%v, source position %v, uid %d", + ts.TargetKeyspaceName(), target.GetShard().ShardName(), source.Position, uid) + ts.Logger().Infof("After catchup: position for keyspace:shard: %v:%v reached, uid %d", + ts.TargetKeyspaceName(), target.GetShard().ShardName(), uid) + if _, err := ts.TabletManagerClient().VReplicationExec(ctx, target.GetPrimary().Tablet, binlogplayer.StopVReplication(uid, "stopped for cutover")); err != nil { + log.Infof("Error marking stopped for cutover on %s, uid %d", target.GetPrimary().AliasString(), uid) + return err + } + return nil + }); err != nil { + return err + } + // all targets have caught up, record their positions for setting up reverse workflows + return ts.ForAllTargets(func(target *MigrationTarget) error { + var err error + target.Position, err = ts.TabletManagerClient().PrimaryPosition(ctx, target.GetPrimary().Tablet) + ts.Logger().Infof("After catchup, position for target primary %s, %v", target.GetPrimary().AliasString(), target.Position) + return err + }) } -func getVReplicationWorkflowSubType(row sqltypes.RowNamedValues) binlogdatapb.VReplicationWorkflowSubType { - i, _ := row["workflow_sub_type"].ToInt32() - return binlogdatapb.VReplicationWorkflowSubType(i) +func (ts *trafficSwitcher) stopSourceWrites(ctx context.Context) error { + var err error + if ts.MigrationType() == binlogdatapb.MigrationType_TABLES { + err = ts.changeTableSourceWrites(ctx, disallowWrites) + } else { + err = ts.changeShardsAccess(ctx, ts.SourceKeyspaceName(), ts.SourceShards(), disallowWrites) + } + if err != nil { + log.Warningf("Error: %s", err) + return err + } + return ts.ForAllSources(func(source *MigrationSource) error { + var err error + source.Position, err = ts.TabletManagerClient().PrimaryPosition(ctx, source.GetPrimary().Tablet) + log.Infof("Stopped Source Writes. Position for source %v:%v: %v", + ts.SourceKeyspaceName(), source.GetShard().ShardName(), source.Position) + if err != nil { + log.Warningf("Error: %s", err) + } + return err + }) } -// CompareShards compares the list of shards in a workflow with the shards in -// that keyspace according to the topo. It returns an error if they do not match. -// -// This function is used to validate MoveTables workflows. -// -// (TODO|@ajm188): This function is temporarily-exported until *wrangler.trafficSwitcher -// has been fully moved over to this package. Once that refactor is finished, -// this function should be unexported. Consequently, YOU SHOULD NOT DEPEND ON -// THIS FUNCTION EXTERNALLY. -func CompareShards(ctx context.Context, keyspace string, shards []*topo.ShardInfo, ts *topo.Server) error { - shardSet := sets.New[string]() - for _, si := range shards { - shardSet.Insert(si.ShardName()) +func (ts *trafficSwitcher) changeTableSourceWrites(ctx context.Context, access accessType) error { + err := ts.ForAllSources(func(source *MigrationSource) error { + if _, err := ts.TopoServer().UpdateShardFields(ctx, ts.SourceKeyspaceName(), source.GetShard().ShardName(), func(si *topo.ShardInfo) error { + return si.UpdateDeniedTables(ctx, topodatapb.TabletType_PRIMARY, nil, access == allowWrites /* remove */, ts.Tables()) + }); err != nil { + return err + } + rtbsCtx, cancel := context.WithTimeout(ctx, shardTabletRefreshTimeout) + defer cancel() + isPartial, partialDetails, err := topotools.RefreshTabletsByShard(rtbsCtx, ts.TopoServer(), ts.TabletManagerClient(), source.GetShard(), nil, ts.Logger()) + if isPartial { + err = fmt.Errorf("failed to successfully refresh all tablets in the %s/%s source shard (%v):\n %v", + source.GetShard().Keyspace(), source.GetShard().ShardName(), err, partialDetails) + } + return err + }) + if err != nil { + log.Warningf("Error in changeTableSourceWrites: %s", err) + return err } + // Note that the denied tables, which are being updated in this method, are not part of the SrvVSchema in the topo. + // However, we are using the notification of a SrvVSchema change in VTGate to recompute the state of a + // MoveTables workflow (which also looks up denied tables from the topo). So we need to trigger a SrvVSchema change here. + return ts.TopoServer().RebuildSrvVSchema(ctx, nil) +} - topoShards, err := ts.GetShardNames(ctx, keyspace) +func (ts *trafficSwitcher) cancelMigration(ctx context.Context, sm *StreamMigrator) { + var err error + if ts.MigrationType() == binlogdatapb.MigrationType_TABLES { + err = ts.changeTableSourceWrites(ctx, allowWrites) + } else { + err = ts.changeShardsAccess(ctx, ts.SourceKeyspaceName(), ts.SourceShards(), allowWrites) + } if err != nil { + ts.Logger().Errorf("Cancel migration failed:", err) + } + + sm.CancelMigration(ctx) + + err = ts.ForAllTargets(func(target *MigrationTarget) error { + query := fmt.Sprintf("update _vt.vreplication set state='Running', message='' where db_name=%s and workflow=%s", + encodeString(target.GetPrimary().DbName()), encodeString(ts.WorkflowName())) + _, err := ts.TabletManagerClient().VReplicationExec(ctx, target.GetPrimary().Tablet, query) return err + }) + if err != nil { + ts.Logger().Errorf("Cancel migration failed: could not restart vreplication: %v", err) } - topoShardSet := sets.New[string](topoShards...) - if !shardSet.Equal(topoShardSet) { - wfExtra := shardSet.Difference(topoShardSet) - topoExtra := topoShardSet.Difference(shardSet) + err = ts.deleteReverseVReplication(ctx) + if err != nil { + ts.Logger().Errorf("Cancel migration failed: could not delete revers vreplication entries: %v", err) + } +} - var rec concurrency.AllErrorRecorder - if wfExtra.Len() > 0 { - wfExtraSorted := sets.List(wfExtra) - rec.RecordError(fmt.Errorf("switch command shards not in topo: %v", wfExtraSorted)) +func (ts *trafficSwitcher) freezeTargetVReplication(ctx context.Context) error { + // Mark target streams as frozen before deleting. If SwitchWrites gets + // re-invoked after a freeze, it will skip all the previous steps + err := ts.ForAllTargets(func(target *MigrationTarget) error { + ts.Logger().Infof("Marking target streams frozen for workflow %s db_name %s", ts.WorkflowName(), target.GetPrimary().DbName()) + query := fmt.Sprintf("update _vt.vreplication set message = '%s' where db_name=%s and workflow=%s", Frozen, + encodeString(target.GetPrimary().DbName()), encodeString(ts.WorkflowName())) + _, err := ts.TabletManagerClient().VReplicationExec(ctx, target.GetPrimary().Tablet, query) + return err + }) + if err != nil { + return err + } + return nil +} + +func (ts *trafficSwitcher) dropTargetVReplicationStreams(ctx context.Context) error { + return ts.ForAllTargets(func(target *MigrationTarget) error { + ts.Logger().Infof("Deleting target streams and related data for workflow %s db_name %s", ts.WorkflowName(), target.GetPrimary().DbName()) + query := fmt.Sprintf(sqlDeleteWorkflow, encodeString(target.GetPrimary().DbName()), encodeString(ts.WorkflowName())) + if _, err := ts.TabletManagerClient().VReplicationExec(ctx, target.GetPrimary().Tablet, query); err != nil { + return err } + ts.ws.deleteWorkflowVDiffData(ctx, target.GetPrimary().Tablet, ts.WorkflowName()) + ts.ws.optimizeCopyStateTable(target.GetPrimary().Tablet) + return nil + }) +} - if topoExtra.Len() > 0 { - topoExtraSorted := sets.List(topoExtra) - rec.RecordError(fmt.Errorf("topo shards not in switch command: %v", topoExtraSorted)) +func (ts *trafficSwitcher) dropSourceReverseVReplicationStreams(ctx context.Context) error { + return ts.ForAllSources(func(source *MigrationSource) error { + ts.Logger().Infof("Deleting reverse streams and related data for workflow %s db_name %s", ts.WorkflowName(), source.GetPrimary().DbName()) + query := fmt.Sprintf(sqlDeleteWorkflow, encodeString(source.GetPrimary().DbName()), encodeString(ReverseWorkflowName(ts.WorkflowName()))) + if _, err := ts.TabletManagerClient().VReplicationExec(ctx, source.GetPrimary().Tablet, query); err != nil { + return err } + ts.ws.deleteWorkflowVDiffData(ctx, source.GetPrimary().Tablet, ReverseWorkflowName(ts.WorkflowName())) + ts.ws.optimizeCopyStateTable(source.GetPrimary().Tablet) + return nil + }) +} + +func (ts *trafficSwitcher) removeTargetTables(ctx context.Context) error { + log.Flush() + err := ts.ForAllTargets(func(target *MigrationTarget) error { + log.Infof("ForAllTargets: %+v", target) + for _, tableName := range ts.Tables() { + query := fmt.Sprintf("drop table %s.%s", + sqlescape.EscapeID(sqlescape.UnescapeID(target.GetPrimary().DbName())), + sqlescape.EscapeID(sqlescape.UnescapeID(tableName))) + ts.Logger().Infof("%s: Dropping table %s.%s\n", + target.GetPrimary().String(), target.GetPrimary().DbName(), tableName) + res, err := ts.ws.tmc.ExecuteFetchAsDba(ctx, target.GetPrimary().Tablet, false, &tabletmanagerdatapb.ExecuteFetchAsDbaRequest{ + Query: []byte(query), + MaxRows: 1, + ReloadSchema: true, + }) + log.Infof("Removed target table with result: %+v", res) + log.Flush() + if err != nil { + ts.Logger().Errorf("%s: Error removing table %s: %v", + target.GetPrimary().String(), tableName, err) + return err + } + ts.Logger().Infof("%s: Removed table %s.%s\n", + target.GetPrimary().String(), target.GetPrimary().DbName(), tableName) - return fmt.Errorf("mismatched shards for keyspace %s: %s", keyspace, strings.Join(rec.ErrorStrings(), "; ")) + } + return nil + }) + if err != nil { + return err } + return ts.dropParticipatingTablesFromKeyspace(ctx, ts.TargetKeyspaceName()) +} + +func (ts *trafficSwitcher) dropTargetShards(ctx context.Context) error { + return ts.ForAllTargets(func(target *MigrationTarget) error { + ts.Logger().Infof("Deleting shard %s.%s\n", target.GetShard().Keyspace(), target.GetShard().ShardName()) + err := ts.ws.DeleteShard(ctx, target.GetShard().Keyspace(), target.GetShard().ShardName(), true, false) + if err != nil { + ts.Logger().Errorf("Error deleting shard %s: %v", target.GetShard().ShardName(), err) + return err + } + ts.Logger().Infof("Deleted shard %s.%s\n", target.GetShard().Keyspace(), target.GetShard().ShardName()) + return nil + }) +} + +func (ts *trafficSwitcher) validate(ctx context.Context) error { + if ts.MigrationType() == binlogdatapb.MigrationType_TABLES { + if ts.isPartialMigration { + return nil + } + sourceTopo := ts.ws.ts + if ts.externalTopo != nil { + sourceTopo = ts.externalTopo + } + + // All shards must be present. + if err := CompareShards(ctx, ts.SourceKeyspaceName(), ts.SourceShards(), sourceTopo); err != nil { + return err + } + if err := CompareShards(ctx, ts.TargetKeyspaceName(), ts.TargetShards(), ts.ws.ts); err != nil { + return err + } + // Wildcard table names not allowed. + for _, table := range ts.tables { + if strings.HasPrefix(table, "/") { + return fmt.Errorf("cannot migrate streams with wild card table names: %v", table) + } + } + } return nil } -// HashStreams produces a stable hash based on the target keyspace and migration -// targets. -func HashStreams(targetKeyspace string, targets map[string]*MigrationTarget) int64 { - var expanded []string - for shard, target := range targets { - for uid := range target.Sources { - expanded = append(expanded, fmt.Sprintf("%s:%d", shard, uid)) +// checkJournals returns true if at least one journal has been created. +// If so, it also returns the list of sourceWorkflows that need to be switched. +func (ts *trafficSwitcher) checkJournals(ctx context.Context) (journalsExist bool, sourceWorkflows []string, err error) { + var mu sync.Mutex + + err = ts.ForAllSources(func(source *MigrationSource) error { + mu.Lock() + defer mu.Unlock() + journal, exists, err := ts.ws.CheckReshardingJournalExistsOnTablet(ctx, source.GetPrimary().Tablet, ts.id) + if err != nil { + return err } + if exists { + if journal.Id != 0 { + sourceWorkflows = journal.SourceWorkflows + } + source.Journaled = true + journalsExist = true + } + return nil + }) + return journalsExist, sourceWorkflows, err +} + +// executeLockTablesOnSource executes a LOCK TABLES tb1 READ, tbl2 READ,... statement on each +// source shard's primary tablet using a non-pooled connection as the DBA user. The connection +// is closed when the LOCK TABLES statement returns, so we immediately release the LOCKs. +func (ts *trafficSwitcher) executeLockTablesOnSource(ctx context.Context) error { + ts.Logger().Infof("Locking (and then immediately unlocking) the following tables on source keyspace %v: %v", ts.SourceKeyspaceName(), ts.Tables()) + if len(ts.Tables()) == 0 { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "no tables found in the source keyspace %v associated with the %s workflow", ts.SourceKeyspaceName(), ts.WorkflowName()) } - sort.Strings(expanded) + sb := strings.Builder{} + sb.WriteString("LOCK TABLES ") + for _, tableName := range ts.Tables() { + sb.WriteString(fmt.Sprintf("%s READ,", sqlescape.EscapeID(tableName))) + } + // trim extra trailing comma + lockStmt := sb.String()[:sb.Len()-1] - hasher := fnv.New64() - hasher.Write([]byte(targetKeyspace)) + return ts.ForAllSources(func(source *MigrationSource) error { + primary := source.GetPrimary() + if primary == nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "no primary found for source shard %s", source.GetShard()) + } + tablet := primary.Tablet + _, err := ts.ws.tmc.ExecuteFetchAsDba(ctx, tablet, true, &tabletmanagerdatapb.ExecuteFetchAsDbaRequest{ + Query: []byte(lockStmt), + MaxRows: uint64(1), + DisableBinlogs: false, + ReloadSchema: true, + }) + if err != nil { + ts.Logger().Errorf("Error executing %s on source tablet %v: %v", lockStmt, tablet, err) + return err + } + return err + }) +} - for _, s := range expanded { - hasher.Write([]byte(s)) +func (ts *trafficSwitcher) gatherPositions(ctx context.Context) error { + err := ts.ForAllSources(func(source *MigrationSource) error { + var err error + source.Position, err = ts.ws.tmc.PrimaryPosition(ctx, source.GetPrimary().Tablet) + ts.Logger().Infof("Position for source %v:%v: %v", ts.SourceKeyspaceName(), source.GetShard().ShardName(), source.Position) + return err + }) + if err != nil { + return err } + return ts.ForAllTargets(func(target *MigrationTarget) error { + var err error + target.Position, err = ts.ws.tmc.PrimaryPosition(ctx, target.GetPrimary().Tablet) + ts.Logger().Infof("Position for target %v:%v: %v", ts.TargetKeyspaceName(), target.GetShard().ShardName(), target.Position) + return err + }) +} - // Convert to int64 after dropping the highest bit. - return int64(hasher.Sum64() & math.MaxInt64) +func (ts *trafficSwitcher) isSequenceParticipating(ctx context.Context) (bool, error) { + vschema, err := ts.TopoServer().GetVSchema(ctx, ts.targetKeyspace) + if err != nil { + return false, err + } + if vschema == nil || len(vschema.Tables) == 0 { + return false, nil + } + sequenceFound := false + for _, table := range ts.Tables() { + vs, ok := vschema.Tables[table] + if !ok || vs == nil { + continue + } + if vs.Type == vindexes.TypeSequence { + sequenceFound = true + break + } + } + return sequenceFound, nil } -const reverseSuffix = "_reverse" +// getTargetSequenceMetadata returns a map of sequence metadata keyed by the +// backing sequence table name. If the target keyspace has no tables +// defined that use sequences for auto_increment generation then a nil +// map will be returned. +func (ts *trafficSwitcher) getTargetSequenceMetadata(ctx context.Context) (map[string]*sequenceMetadata, error) { + vschema, err := ts.TopoServer().GetVSchema(ctx, ts.targetKeyspace) + if err != nil { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get vschema for target keyspace %s: %v", + ts.targetKeyspace, err) + } + if vschema == nil || len(vschema.Tables) == 0 { // Nothing to do + return nil, nil + } + + sequencesByBackingTable, backingTablesFound, err := ts.findSequenceUsageInKeyspace(vschema) + if err != nil { + return nil, err + } + // If all of the sequence tables were defined using qualified table + // names then we don't need to search for them in other keyspaces. + if len(sequencesByBackingTable) == 0 || backingTablesFound { + return sequencesByBackingTable, nil + } -// ReverseWorkflowName returns the "reversed" name of a workflow. For a -// "forward" workflow, this is the workflow name with "_reversed" appended, and -// for a "reversed" workflow, this is the workflow name with the "_reversed" -// suffix removed. -func ReverseWorkflowName(workflow string) string { - if strings.HasSuffix(workflow, reverseSuffix) { - return workflow[:len(workflow)-len(reverseSuffix)] + if err := ctx.Err(); err != nil { + return nil, err } - return workflow + reverseSuffix + // Now we need to locate the backing sequence table(s) which will + // be in another unsharded keyspace. + smMu := sync.Mutex{} + tableCount := len(sequencesByBackingTable) + tablesFound := 0 // Used to short circuit the search + // Define the function used to search each keyspace. + searchKeyspace := func(sctx context.Context, done chan struct{}, keyspace string) error { + kvs, kerr := ts.TopoServer().GetVSchema(sctx, keyspace) + if kerr != nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get vschema for keyspace %s: %v", + keyspace, kerr) + } + if kvs == nil || kvs.Sharded || len(kvs.Tables) == 0 { + return nil + } + for tableName, tableDef := range kvs.Tables { + select { + case <-sctx.Done(): + return sctx.Err() + case <-done: // We've found everything we need in other goroutines + return nil + default: + } + if complete := func() bool { + smMu.Lock() // Prevent concurrent access to the map + defer smMu.Unlock() + sm := sequencesByBackingTable[tableName] + if tableDef != nil && tableDef.Type == vindexes.TypeSequence && + sm != nil && tableName == sm.backingTableName { + tablesFound++ // This is also protected by the mutex + sm.backingTableKeyspace = keyspace + // Set the default keyspace name. We will later check to + // see if the tablet we send requests to is using a dbname + // override and use that if it is. + sm.backingTableDBName = "vt_" + keyspace + if tablesFound == tableCount { // Short circuit the search + select { + case <-done: // It's already been closed + return true + default: + close(done) // Mark the search as completed + return true + } + } + } + return false + }(); complete { + return nil + } + } + return nil + } + keyspaces, err := ts.TopoServer().GetKeyspaces(ctx) + if err != nil { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get keyspaces: %v", err) + } + searchGroup, gctx := errgroup.WithContext(ctx) + searchCompleted := make(chan struct{}) + for _, keyspace := range keyspaces { + keyspace := keyspace // https://golang.org/doc/faq#closures_and_goroutines + searchGroup.Go(func() error { + return searchKeyspace(gctx, searchCompleted, keyspace) + }) + } + if err := searchGroup.Wait(); err != nil { + return nil, err + } + + if tablesFound != tableCount { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to locate all of the backing sequence tables being used; sequence table metadata: %+v", + sequencesByBackingTable) + } + return sequencesByBackingTable, nil +} + +// findSequenceUsageInKeyspace searches the keyspace's vschema for usage +// of sequences. It returns a map of sequence metadata keyed by the backing +// sequence table name -- if any usage is found -- along with a boolean to +// indicate if all of the backing sequence tables were defined using +// qualified table names (so we know where they all live) along with an +// error if any is seen. +func (ts *trafficSwitcher) findSequenceUsageInKeyspace(vschema *vschemapb.Keyspace) (map[string]*sequenceMetadata, bool, error) { + allFullyQualified := true + targets := maps2.Values(ts.Targets()) + if len(targets) == 0 || targets[0].GetPrimary() == nil { // This should never happen + return nil, false, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "no primary tablet found for target keyspace %s", ts.targetKeyspace) + } + targetDBName := targets[0].GetPrimary().DbName() + sequencesByBackingTable := make(map[string]*sequenceMetadata) + + for _, table := range ts.Tables() { + vs, ok := vschema.Tables[table] + if !ok || vs == nil || vs.AutoIncrement == nil || vs.AutoIncrement.Sequence == "" { + continue + } + sm := &sequenceMetadata{ + backingTableName: vs.AutoIncrement.Sequence, + usingTableName: table, + usingTableDefinition: vs, + usingTableDBName: targetDBName, + } + // If the sequence table is fully qualified in the vschema then + // we don't need to find it later. + if strings.Contains(vs.AutoIncrement.Sequence, ".") { + keyspace, tableName, found := strings.Cut(vs.AutoIncrement.Sequence, ".") + if !found { + return nil, false, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "invalid sequence table name %s defined in the %s keyspace", + vs.AutoIncrement.Sequence, ts.targetKeyspace) + } + sm.backingTableName = tableName + sm.backingTableKeyspace = keyspace + // Set the default keyspace name. We will later check to + // see if the tablet we send requests to is using a dbname + // override and use that if it is. + sm.backingTableDBName = "vt_" + keyspace + } else { + allFullyQualified = false + } + sequencesByBackingTable[sm.backingTableName] = sm + } + + return sequencesByBackingTable, allFullyQualified, nil } -// Straight copy-paste of encodeString from wrangler/keyspace.go. I want to make -// this public, but it doesn't belong in package workflow. Maybe package sqltypes, -// or maybe package sqlescape? -func encodeString(in string) string { - buf := bytes.NewBuffer(nil) - sqltypes.NewVarChar(in).EncodeSQL(buf) - return buf.String() +// initializeTargetSequences initializes the backing sequence tables +// using a map keyed by the backing sequence table name. +// +// The backing tables must have already been created. This function will +// then ensure that the next value is set to a value greater than any +// currently stored in the using table on the target keyspace. If the +// backing table is updated to a new higher value then it will also tell +// the primary tablet serving the sequence to refresh/reset its cache to +// be sure that it does not provide a value that is less than the current max. +func (ts *trafficSwitcher) initializeTargetSequences(ctx context.Context, sequencesByBackingTable map[string]*sequenceMetadata) error { + initSequenceTable := func(ictx context.Context, sequenceTableName string, sequenceMetadata *sequenceMetadata) error { + // Now we need to run this query on the target shards in order + // to get the max value and set the next id for the sequence to + // a higher value. + shardResults := make([]int64, 0, len(ts.TargetShards())) + srMu := sync.Mutex{} + ierr := ts.ForAllTargets(func(target *MigrationTarget) error { + primary := target.GetPrimary() + if primary == nil || primary.GetAlias() == nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "no primary tablet found for target shard %s/%s", + ts.targetKeyspace, target.GetShard().ShardName()) + } + query := sqlparser.BuildParsedQuery(sqlGetMaxSequenceVal, + sqlescape.EscapeID(sequenceMetadata.usingTableDefinition.AutoIncrement.Column), + sqlescape.EscapeID(sequenceMetadata.usingTableDBName), + sqlescape.EscapeID(sequenceMetadata.usingTableName), + ) + qr, terr := ts.ws.tmc.ExecuteFetchAsApp(ictx, primary.Tablet, true, &tabletmanagerdatapb.ExecuteFetchAsAppRequest{ + Query: []byte(query.Query), + MaxRows: 1, + }) + if terr != nil || len(qr.Rows) != 1 { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get the max used sequence value for target table %s.%s in order to initialize the backing sequence table: %v", + ts.targetKeyspace, sequenceMetadata.usingTableName, terr) + } + maxID, terr := sqltypes.Proto3ToResult(qr).Rows[0][0].ToInt64() + if terr != nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get the max used sequence value for target table %s.%s in order to initialize the backing sequence table: %v", + ts.targetKeyspace, sequenceMetadata.usingTableName, terr) + } + srMu.Lock() + defer srMu.Unlock() + shardResults = append(shardResults, maxID) + return nil + }) + if ierr != nil { + return ierr + } + select { + case <-ictx.Done(): + return ictx.Err() + default: + } + // Sort the values to find the max value across all shards. + sort.Slice(shardResults, func(i, j int) bool { + return shardResults[i] < shardResults[j] + }) + nextVal := shardResults[len(shardResults)-1] + 1 + // Now we need to update the sequence table, if needed, in order to + // ensure that that the next value it provides is > the current max. + sequenceShard, ierr := ts.TopoServer().GetOnlyShard(ictx, sequenceMetadata.backingTableKeyspace) + if ierr != nil || sequenceShard == nil || sequenceShard.PrimaryAlias == nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get the primary tablet for keyspace %s: %v", + sequenceMetadata.backingTableKeyspace, ierr) + } + sequenceTablet, ierr := ts.TopoServer().GetTablet(ictx, sequenceShard.PrimaryAlias) + if ierr != nil || sequenceTablet == nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get the primary tablet for keyspace %s: %v", + sequenceMetadata.backingTableKeyspace, ierr) + } + select { + case <-ictx.Done(): + return ictx.Err() + default: + } + if sequenceTablet.DbNameOverride != "" { + sequenceMetadata.backingTableDBName = sequenceTablet.DbNameOverride + } + query := sqlparser.BuildParsedQuery(sqlInitSequenceTable, + sqlescape.EscapeID(sequenceMetadata.backingTableDBName), + sqlescape.EscapeID(sequenceMetadata.backingTableName), + nextVal, + nextVal, + nextVal, + ) + // Now execute this on the primary tablet of the unsharded keyspace + // housing the backing table. + primaryTablet, ierr := ts.TopoServer().GetTablet(ictx, sequenceShard.PrimaryAlias) + if ierr != nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get the primary tablet for %s.%s using alias %s: %v", + sequenceShard.Keyspace(), sequenceShard.ShardName(), sequenceShard.PrimaryAlias, ierr) + } + qr, ierr := ts.ws.tmc.ExecuteFetchAsApp(ictx, primaryTablet.Tablet, true, &tabletmanagerdatapb.ExecuteFetchAsAppRequest{ + Query: []byte(query.Query), + MaxRows: 1, + }) + if ierr != nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to initialize the backing sequence table %s.%s: %v", + sequenceMetadata.backingTableDBName, sequenceMetadata.backingTableName, ierr) + } + // If we actually updated the backing sequence table, then we need + // to tell the primary tablet managing the sequence to refresh/reset + // its cache for the table. + if qr.RowsAffected == 0 { + return nil + } + select { + case <-ictx.Done(): + return ictx.Err() + default: + } + ts.Logger().Infof("Resetting sequence cache for backing table %s on shard %s/%s using tablet %s", + sequenceMetadata.backingTableName, sequenceShard.Keyspace(), sequenceShard.ShardName(), sequenceShard.PrimaryAlias) + ti, ierr := ts.TopoServer().GetTablet(ictx, sequenceShard.PrimaryAlias) + if ierr != nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get primary tablet for keyspace %s: %v", + sequenceMetadata.backingTableKeyspace, ierr) + } + ierr = ts.TabletManagerClient().ResetSequences(ictx, ti.Tablet, []string{sequenceMetadata.backingTableName}) + if ierr != nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to reset the sequence cache for backing table %s on shard %s/%s using tablet %s: %v", + sequenceMetadata.backingTableName, sequenceShard.Keyspace(), sequenceShard.ShardName(), sequenceShard.PrimaryAlias, ierr) + } + return nil + } + + initGroup, gctx := errgroup.WithContext(ctx) + for sequenceTableName, sequenceMetadata := range sequencesByBackingTable { + sequenceTableName, sequenceMetadata := sequenceTableName, sequenceMetadata // https://golang.org/doc/faq#closures_and_goroutines + initGroup.Go(func() error { + return initSequenceTable(gctx, sequenceTableName, sequenceMetadata) + }) + } + return initGroup.Wait() +} + +func (ts *trafficSwitcher) mustResetSequences(ctx context.Context) (bool, error) { + switch ts.workflowType { + case binlogdatapb.VReplicationWorkflowType_Migrate, + binlogdatapb.VReplicationWorkflowType_MoveTables: + return ts.isSequenceParticipating(ctx) + default: + return false, nil + } +} + +func (ts *trafficSwitcher) resetSequences(ctx context.Context) error { + var err error + mustReset := false + if mustReset, err = ts.mustResetSequences(ctx); err != nil { + return err + } + if !mustReset { + return nil + } + return ts.ForAllSources(func(source *MigrationSource) error { + ts.Logger().Infof("Resetting sequences for source shard %s.%s on tablet %s", + source.GetShard().Keyspace(), source.GetShard().ShardName(), source.GetPrimary().String()) + return ts.TabletManagerClient().ResetSequences(ctx, source.GetPrimary().Tablet, ts.Tables()) + }) } diff --git a/go/vt/vtctl/workflow/traffic_switcher_test.go b/go/vt/vtctl/workflow/traffic_switcher_test.go index 447e47d7490..c416baa18f9 100644 --- a/go/vt/vtctl/workflow/traffic_switcher_test.go +++ b/go/vt/vtctl/workflow/traffic_switcher_test.go @@ -25,7 +25,7 @@ import ( ) type testTrafficSwitcher struct { - ITrafficSwitcher + trafficSwitcher sourceKeyspaceSchema *vindexes.KeyspaceSchema } diff --git a/go/vt/vtctl/workflow/utils.go b/go/vt/vtctl/workflow/utils.go new file mode 100644 index 00000000000..1a723c6192c --- /dev/null +++ b/go/vt/vtctl/workflow/utils.go @@ -0,0 +1,768 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "bytes" + "context" + "fmt" + "hash/fnv" + "math" + "sort" + "strings" + "sync" + + "google.golang.org/protobuf/encoding/prototext" + + "vitess.io/vitess/go/sets" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/concurrency" + "vitess.io/vitess/go/vt/discovery" + "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/schema" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vttablet/tmclient" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" +) + +const reverseSuffix = "_reverse" + +func getTablesInKeyspace(ctx context.Context, ts *topo.Server, tmc tmclient.TabletManagerClient, keyspace string) ([]string, error) { + shards, err := ts.GetServingShards(ctx, keyspace) + if err != nil { + return nil, err + } + if len(shards) == 0 { + return nil, fmt.Errorf("keyspace %s has no shards", keyspace) + } + primary := shards[0].PrimaryAlias + if primary == nil { + return nil, fmt.Errorf("shard does not have a primary: %v", shards[0].ShardName()) + } + allTables := []string{"/.*/"} + + ti, err := ts.GetTablet(ctx, primary) + if err != nil { + return nil, err + } + req := &tabletmanagerdatapb.GetSchemaRequest{Tables: allTables} + schema, err := tmc.GetSchema(ctx, ti.Tablet, req) + if err != nil { + return nil, err + } + log.Infof("got table schemas: %+v from source primary %v.", schema, primary) + + var sourceTables []string + for _, td := range schema.TableDefinitions { + sourceTables = append(sourceTables, td.Name) + } + return sourceTables, nil +} + +// validateNewWorkflow ensures that the specified workflow doesn't already exist +// in the keyspace. +func validateNewWorkflow(ctx context.Context, ts *topo.Server, tmc tmclient.TabletManagerClient, keyspace, workflow string) error { + allshards, err := ts.FindAllShardsInKeyspace(ctx, keyspace) + if err != nil { + return err + } + var wg sync.WaitGroup + allErrors := &concurrency.AllErrorRecorder{} + for _, si := range allshards { + if si.PrimaryAlias == nil { + allErrors.RecordError(fmt.Errorf("shard has no primary: %v", si.ShardName())) + continue + } + wg.Add(1) + go func(si *topo.ShardInfo) { + defer wg.Done() + + primary, err := ts.GetTablet(ctx, si.PrimaryAlias) + if err != nil { + allErrors.RecordError(vterrors.Wrap(err, "validateWorkflowName.GetTablet")) + return + } + validations := []struct { + query string + msg string + }{{ + fmt.Sprintf("select 1 from _vt.vreplication where db_name=%s and workflow=%s", encodeString(primary.DbName()), encodeString(workflow)), + fmt.Sprintf("workflow %s already exists in keyspace %s on tablet %v", workflow, keyspace, primary.Alias), + }, { + fmt.Sprintf("select 1 from _vt.vreplication where db_name=%s and message='FROZEN' and workflow_sub_type != %d", encodeString(primary.DbName()), binlogdatapb.VReplicationWorkflowSubType_Partial), + fmt.Sprintf("found previous frozen workflow on tablet %v, please review and delete it first before creating a new workflow", + primary.Alias), + }} + for _, validation := range validations { + p3qr, err := tmc.VReplicationExec(ctx, primary.Tablet, validation.query) + if err != nil { + allErrors.RecordError(vterrors.Wrap(err, "validateWorkflowName.VReplicationExec")) + return + } + if p3qr != nil && len(p3qr.Rows) != 0 { + allErrors.RecordError(vterrors.Wrap(fmt.Errorf(validation.msg), "validateWorkflowName.VReplicationExec")) + return + } + } + }(si) + } + wg.Wait() + return allErrors.AggrError(vterrors.Aggregate) +} + +// createDefaultShardRoutingRules creates a reverse routing rule for +// each shard in a new partial keyspace migration workflow that does +// not already have an existing routing rule in place. +func createDefaultShardRoutingRules(ctx context.Context, ms *vtctldatapb.MaterializeSettings, ts *topo.Server) error { + srr, err := topotools.GetShardRoutingRules(ctx, ts) + if err != nil { + return err + } + allShards, err := ts.GetServingShards(ctx, ms.SourceKeyspace) + if err != nil { + return err + } + changed := false + for _, si := range allShards { + fromSource := fmt.Sprintf("%s.%s", ms.SourceKeyspace, si.ShardName()) + fromTarget := fmt.Sprintf("%s.%s", ms.TargetKeyspace, si.ShardName()) + if srr[fromSource] == "" && srr[fromTarget] == "" { + srr[fromTarget] = ms.SourceKeyspace + changed = true + log.Infof("Added default shard routing rule from %q to %q", fromTarget, fromSource) + } + } + if changed { + if err := topotools.SaveShardRoutingRules(ctx, ts, srr); err != nil { + return err + } + if err := ts.RebuildSrvVSchema(ctx, nil); err != nil { + return err + } + } + return nil +} + +func stripTableConstraints(ddl string) (string, error) { + ast, err := sqlparser.ParseStrictDDL(ddl) + if err != nil { + return "", err + } + + stripConstraints := func(cursor *sqlparser.Cursor) bool { + switch node := cursor.Node().(type) { + case sqlparser.DDLStatement: + if node.GetTableSpec() != nil { + node.GetTableSpec().Constraints = nil + } + } + return true + } + + noConstraintAST := sqlparser.Rewrite(ast, stripConstraints, nil) + newDDL := sqlparser.String(noConstraintAST) + + return newDDL, nil +} + +func stripTableForeignKeys(ddl string) (string, error) { + ast, err := sqlparser.ParseStrictDDL(ddl) + if err != nil { + return "", err + } + + stripFKConstraints := func(cursor *sqlparser.Cursor) bool { + switch node := cursor.Node().(type) { + case sqlparser.DDLStatement: + if node.GetTableSpec() != nil { + var noFKConstraints []*sqlparser.ConstraintDefinition + for _, constraint := range node.GetTableSpec().Constraints { + if constraint.Details != nil { + if _, ok := constraint.Details.(*sqlparser.ForeignKeyDefinition); !ok { + noFKConstraints = append(noFKConstraints, constraint) + } + } + } + node.GetTableSpec().Constraints = noFKConstraints + } + } + return true + } + + noFKConstraintAST := sqlparser.Rewrite(ast, stripFKConstraints, nil) + newDDL := sqlparser.String(noFKConstraintAST) + return newDDL, nil +} + +func getSourceTableDDLs(ctx context.Context, ts *topo.Server, tmc tmclient.TabletManagerClient, shards []*topo.ShardInfo) (map[string]string, error) { + sourceDDLs := make(map[string]string) + allTables := []string{"/.*/"} + + sourcePrimary := shards[0].PrimaryAlias + if sourcePrimary == nil { + return nil, fmt.Errorf("shard must have a primary for copying schema: %v", shards[0].ShardName()) + } + + ti, err := ts.GetTablet(ctx, sourcePrimary) + if err != nil { + return nil, err + } + req := &tabletmanagerdatapb.GetSchemaRequest{Tables: allTables} + sourceSchema, err := tmc.GetSchema(ctx, ti.Tablet, req) + if err != nil { + return nil, err + } + + for _, td := range sourceSchema.TableDefinitions { + sourceDDLs[td.Name] = td.Schema + } + return sourceDDLs, nil +} + +func forAllShards(shards []*topo.ShardInfo, f func(*topo.ShardInfo) error) error { + var wg sync.WaitGroup + allErrors := &concurrency.AllErrorRecorder{} + for _, target := range shards { + wg.Add(1) + go func(target *topo.ShardInfo) { + defer wg.Done() + + if err := f(target); err != nil { + allErrors.RecordError(err) + } + }(target) + } + wg.Wait() + return allErrors.AggrError(vterrors.Aggregate) +} + +func matchColInSelect(col sqlparser.IdentifierCI, sel *sqlparser.Select) (*sqlparser.ColName, error) { + for _, selExpr := range sel.SelectExprs { + switch selExpr := selExpr.(type) { + case *sqlparser.StarExpr: + return &sqlparser.ColName{Name: col}, nil + case *sqlparser.AliasedExpr: + match := selExpr.As + if match.IsEmpty() { + if colExpr, ok := selExpr.Expr.(*sqlparser.ColName); ok { + match = colExpr.Name + } else { + // Cannot match against a complex expression. + continue + } + } + if match.Equal(col) { + colExpr, ok := selExpr.Expr.(*sqlparser.ColName) + if !ok { + return nil, fmt.Errorf("vindex column cannot be a complex expression: %v", sqlparser.String(selExpr)) + } + return colExpr, nil + } + default: + return nil, fmt.Errorf("unsupported select expression: %v", sqlparser.String(selExpr)) + } + } + return nil, fmt.Errorf("could not find vindex column %v", sqlparser.String(col)) +} + +func shouldInclude(table string, excludes []string) bool { + // We filter out internal tables elsewhere when processing SchemaDefinition + // structures built from the GetSchema database related API calls. In this + // case, however, the table list comes from the user via the -tables flag + // so we need to filter out internal table names here in case a user has + // explicitly specified some. + // This could happen if there's some automated tooling that creates the list of + // tables to explicitly specify. + // But given that this should never be done in practice, we ignore the request. + if schema.IsInternalOperationTableName(table) { + return false + } + for _, t := range excludes { + if t == table { + return false + } + } + return true +} + +// getMigrationID produces a reproducible hash based on the input parameters. +func getMigrationID(targetKeyspace string, shardTablets []string) (int64, error) { + sort.Strings(shardTablets) + hasher := fnv.New64() + hasher.Write([]byte(targetKeyspace)) + for _, str := range shardTablets { + hasher.Write([]byte(str)) + } + // Convert to int64 after dropping the highest bit. + return int64(hasher.Sum64() & math.MaxInt64), nil +} + +// BuildTargets collects MigrationTargets and other metadata (see TargetInfo) +// from a workflow in the target keyspace. +// +// It returns ErrNoStreams if there are no targets found for the workflow. +func BuildTargets(ctx context.Context, ts *topo.Server, tmc tmclient.TabletManagerClient, targetKeyspace string, workflow string) (*TargetInfo, error) { + targetShards, err := ts.GetShardNames(ctx, targetKeyspace) + if err != nil { + return nil, err + } + + var ( + frozen bool + optCells string + optTabletTypes string + targets = make(map[string]*MigrationTarget, len(targetShards)) + workflowType binlogdatapb.VReplicationWorkflowType + workflowSubType binlogdatapb.VReplicationWorkflowSubType + ) + + // We check all shards in the target keyspace. Not all of them may have a + // stream. For example, if we're splitting -80 to [-40,40-80], only those + // two target shards will have vreplication streams, and the other shards in + // the target keyspace will not. + for _, targetShard := range targetShards { + si, err := ts.GetShard(ctx, targetKeyspace, targetShard) + if err != nil { + return nil, err + } + + if si.PrimaryAlias == nil { + // This can happen if bad inputs are given. + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "shard %v/%v doesn't have a primary set", targetKeyspace, targetShard) + } + + primary, err := ts.GetTablet(ctx, si.PrimaryAlias) + if err != nil { + return nil, err + } + + wf, err := tmc.ReadVReplicationWorkflow(ctx, primary.Tablet, &tabletmanagerdatapb.ReadVReplicationWorkflowRequest{ + Workflow: workflow, + }) + if err != nil { + return nil, err + } + + if len(wf.Streams) < 1 { + continue + } + + target := &MigrationTarget{ + si: si, + primary: primary, + Sources: make(map[int32]*binlogdatapb.BinlogSource), + } + + optCells = wf.Cells + optTabletTypes = topoproto.MakeStringTypeCSV(wf.TabletTypes) + workflowType = wf.WorkflowType + workflowSubType = wf.WorkflowSubType + + for _, stream := range wf.Streams { + if stream.Message == Frozen { + frozen = true + } + target.Sources[stream.Id] = stream.Bls + } + + targets[targetShard] = target + } + + if len(targets) == 0 { + return nil, fmt.Errorf("%w in keyspace %s for %s", ErrNoStreams, targetKeyspace, workflow) + } + + return &TargetInfo{ + Targets: targets, + Frozen: frozen, + OptCells: optCells, + OptTabletTypes: optTabletTypes, + WorkflowType: workflowType, + WorkflowSubType: workflowSubType, + }, nil +} + +func getSourceAndTargetKeyRanges(sourceShards, targetShards []string) (*topodatapb.KeyRange, *topodatapb.KeyRange, error) { + if len(sourceShards) == 0 || len(targetShards) == 0 { + return nil, nil, fmt.Errorf("either source or target shards are missing") + } + + getKeyRange := func(shard string) (*topodatapb.KeyRange, error) { + krs, err := key.ParseShardingSpec(shard) + if err != nil { + return nil, err + } + return krs[0], nil + } + + // Happily string sorting of shards also sorts them in the ascending order of key + // ranges in vitess. + sort.Strings(sourceShards) + sort.Strings(targetShards) + getFullKeyRange := func(shards []string) (*topodatapb.KeyRange, error) { + // Expect sorted shards. + kr1, err := getKeyRange(sourceShards[0]) + if err != nil { + return nil, err + } + kr2, err := getKeyRange(sourceShards[len(sourceShards)-1]) + if err != nil { + return nil, err + } + return &topodatapb.KeyRange{ + Start: kr1.Start, + End: kr2.End, + }, nil + } + + skr, err := getFullKeyRange(sourceShards) + if err != nil { + return nil, nil, err + } + tkr, err := getFullKeyRange(targetShards) + if err != nil { + return nil, nil, err + } + + return skr, tkr, nil +} + +// CompareShards compares the list of shards in a workflow with the shards in +// that keyspace according to the topo. It returns an error if they do not match. +// +// This function is used to validate MoveTables workflows. +// +// (TODO|@ajm188): This function is temporarily-exported until *wrangler.trafficSwitcher +// has been fully moved over to this package. Once that refactor is finished, +// this function should be unexported. Consequently, YOU SHOULD NOT DEPEND ON +// THIS FUNCTION EXTERNALLY. +func CompareShards(ctx context.Context, keyspace string, shards []*topo.ShardInfo, ts *topo.Server) error { + shardSet := sets.New[string]() + for _, si := range shards { + shardSet.Insert(si.ShardName()) + } + + topoShards, err := ts.GetShardNames(ctx, keyspace) + if err != nil { + return err + } + + topoShardSet := sets.New[string](topoShards...) + if !shardSet.Equal(topoShardSet) { + wfExtra := shardSet.Difference(topoShardSet) + topoExtra := topoShardSet.Difference(shardSet) + + var rec concurrency.AllErrorRecorder + if wfExtra.Len() > 0 { + wfExtraSorted := sets.List(wfExtra) + rec.RecordError(fmt.Errorf("switch command shards not in topo: %v", wfExtraSorted)) + } + + if topoExtra.Len() > 0 { + topoExtraSorted := sets.List(topoExtra) + rec.RecordError(fmt.Errorf("topo shards not in switch command: %v", topoExtraSorted)) + } + + return fmt.Errorf("mismatched shards for keyspace %s: %s", keyspace, strings.Join(rec.ErrorStrings(), "; ")) + } + + return nil +} + +// HashStreams produces a stable hash based on the target keyspace and migration +// targets. +func HashStreams(targetKeyspace string, targets map[string]*MigrationTarget) int64 { + var expanded []string + for shard, target := range targets { + for uid := range target.Sources { + expanded = append(expanded, fmt.Sprintf("%s:%d", shard, uid)) + } + } + + sort.Strings(expanded) + + hasher := fnv.New64() + hasher.Write([]byte(targetKeyspace)) + + for _, s := range expanded { + hasher.Write([]byte(s)) + } + + // Convert to int64 after dropping the highest bit. + return int64(hasher.Sum64() & math.MaxInt64) +} + +func doValidateWorkflowHasCompleted(ctx context.Context, ts *trafficSwitcher) error { + wg := sync.WaitGroup{} + rec := concurrency.AllErrorRecorder{} + if ts.MigrationType() == binlogdatapb.MigrationType_SHARDS { + _ = ts.ForAllSources(func(source *MigrationSource) error { + wg.Add(1) + if source.GetShard().IsPrimaryServing { + rec.RecordError(fmt.Errorf(fmt.Sprintf("Shard %s is still serving", source.GetShard().ShardName()))) + } + wg.Done() + return nil + }) + } else { + _ = ts.ForAllTargets(func(target *MigrationTarget) error { + wg.Add(1) + query := fmt.Sprintf("select 1 from _vt.vreplication where db_name='%s' and workflow='%s' and message!='FROZEN'", target.GetPrimary().DbName(), ts.WorkflowName()) + rs, _ := ts.VReplicationExec(ctx, target.GetPrimary().Alias, query) + if len(rs.Rows) > 0 { + rec.RecordError(fmt.Errorf("vreplication streams are not frozen on tablet %d", target.GetPrimary().Alias.Uid)) + } + wg.Done() + return nil + }) + } + wg.Wait() + + if !ts.keepRoutingRules { + // Check if table is routable. + if ts.MigrationType() == binlogdatapb.MigrationType_TABLES { + rules, err := topotools.GetRoutingRules(ctx, ts.TopoServer()) + if err != nil { + rec.RecordError(fmt.Errorf("could not get RoutingRules")) + } + for fromTable, toTables := range rules { + for _, toTable := range toTables { + for _, table := range ts.Tables() { + if toTable == fmt.Sprintf("%s.%s", ts.SourceKeyspaceName(), table) { + rec.RecordError(fmt.Errorf("routing still exists from keyspace %s table %s to %s", ts.SourceKeyspaceName(), table, fromTable)) + } + } + } + } + } + } + if rec.HasErrors() { + return fmt.Errorf("%s", strings.Join(rec.ErrorStrings(), "\n")) + } + return nil + +} + +// ReverseWorkflowName returns the "reversed" name of a workflow. For a +// "forward" workflow, this is the workflow name with "_reverse" appended, and +// for a "reversed" workflow, this is the workflow name with the "_reverse" +// suffix removed. +func ReverseWorkflowName(workflow string) string { + if strings.HasSuffix(workflow, reverseSuffix) { + return workflow[:len(workflow)-len(reverseSuffix)] + } + + return workflow + reverseSuffix +} + +// Straight copy-paste of encodeString from wrangler/keyspace.go. I want to make +// this public, but it doesn't belong in package workflow. Maybe package sqltypes, +// or maybe package sqlescape? +func encodeString(in string) string { + buf := bytes.NewBuffer(nil) + sqltypes.NewVarChar(in).EncodeSQL(buf) + return buf.String() +} + +func getRenameFileName(tableName string) string { + return fmt.Sprintf(renameTableTemplate, tableName) +} + +func parseTabletTypes(tabletTypes []topodatapb.TabletType) (hasReplica, hasRdonly, hasPrimary bool, err error) { + for _, tabletType := range tabletTypes { + switch { + case tabletType == topodatapb.TabletType_REPLICA: + hasReplica = true + case tabletType == topodatapb.TabletType_RDONLY: + hasRdonly = true + case tabletType == topodatapb.TabletType_PRIMARY: + hasPrimary = true + default: + return false, false, false, fmt.Errorf("invalid tablet type passed %s", tabletType) + } + } + return hasReplica, hasRdonly, hasPrimary, nil +} + +func areTabletsAvailableToStreamFrom(ctx context.Context, req *vtctldatapb.WorkflowSwitchTrafficRequest, ts *trafficSwitcher, keyspace string, shards []*topo.ShardInfo) error { + // We use the value from the workflow for the TabletPicker. + tabletTypesStr := ts.optTabletTypes + cells := req.Cells + // If no cells were provided in the command then use the value from the workflow. + if len(cells) == 0 && ts.optCells != "" { + cells = strings.Split(strings.TrimSpace(ts.optCells), ",") + } + + var wg sync.WaitGroup + allErrors := &concurrency.AllErrorRecorder{} + for _, shard := range shards { + wg.Add(1) + go func(cells []string, keyspace string, shard *topo.ShardInfo) { + defer wg.Done() + if cells == nil { + cells = append(cells, shard.PrimaryAlias.Cell) + } + tp, err := discovery.NewTabletPicker(ctx, ts.ws.ts, cells, shard.PrimaryAlias.Cell, keyspace, shard.ShardName(), tabletTypesStr, discovery.TabletPickerOptions{}) + if err != nil { + allErrors.RecordError(err) + return + } + tablets := tp.GetMatchingTablets(ctx) + if len(tablets) == 0 { + allErrors.RecordError(fmt.Errorf("no tablet found to source data in keyspace %s, shard %s", keyspace, shard.ShardName())) + return + } + }(cells, keyspace, shard) + } + + wg.Wait() + if allErrors.HasErrors() { + log.Errorf("%s", allErrors.Error()) + return allErrors.Error() + } + return nil +} + +// LegacyBuildTargets collects MigrationTargets and other metadata (see TargetInfo) +// from a workflow in the target keyspace. It uses VReplicationExec to get the workflow +// details rather than the new TabletManager ReadVReplicationWorkflow RPC. This is +// being used to slowly transition all of the older code, including unit tests, over to +// the new RPC and limit the impact of the new implementation to vtctldclient. You can see +// how the unit tests were being migrated here: https://gist.github.com/mattlord/738c12befe951f8d09304ff7fdc47c46 +// +// New callers should instead use the new BuildTargets function. +// +// It returns ErrNoStreams if there are no targets found for the workflow. +func LegacyBuildTargets(ctx context.Context, ts *topo.Server, tmc tmclient.TabletManagerClient, targetKeyspace string, workflow string) (*TargetInfo, error) { + targetShards, err := ts.GetShardNames(ctx, targetKeyspace) + if err != nil { + return nil, err + } + + var ( + frozen bool + optCells string + optTabletTypes string + targets = make(map[string]*MigrationTarget, len(targetShards)) + workflowType binlogdatapb.VReplicationWorkflowType + workflowSubType binlogdatapb.VReplicationWorkflowSubType + ) + + getVReplicationWorkflowType := func(row sqltypes.RowNamedValues) binlogdatapb.VReplicationWorkflowType { + i, _ := row["workflow_type"].ToInt32() + return binlogdatapb.VReplicationWorkflowType(i) + } + + getVReplicationWorkflowSubType := func(row sqltypes.RowNamedValues) binlogdatapb.VReplicationWorkflowSubType { + i, _ := row["workflow_sub_type"].ToInt32() + return binlogdatapb.VReplicationWorkflowSubType(i) + } + + // We check all shards in the target keyspace. Not all of them may have a + // stream. For example, if we're splitting -80 to [-40,40-80], only those + // two target shards will have vreplication streams, and the other shards in + // the target keyspace will not. + for _, targetShard := range targetShards { + si, err := ts.GetShard(ctx, targetKeyspace, targetShard) + if err != nil { + return nil, err + } + + if si.PrimaryAlias == nil { + // This can happen if bad inputs are given. + return nil, fmt.Errorf("shard %v/%v doesn't have a primary set", targetKeyspace, targetShard) + } + + primary, err := ts.GetTablet(ctx, si.PrimaryAlias) + if err != nil { + return nil, err + } + + // NB: changing the whitespace of this query breaks tests for now. + // (TODO:@ajm188) extend FakeDBClient to be less whitespace-sensitive on + // expected queries. + query := fmt.Sprintf("select id, source, message, cell, tablet_types, workflow_type, workflow_sub_type, defer_secondary_keys from _vt.vreplication where workflow=%s and db_name=%s", encodeString(workflow), encodeString(primary.DbName())) + p3qr, err := tmc.VReplicationExec(ctx, primary.Tablet, query) + if err != nil { + return nil, err + } + + if len(p3qr.Rows) < 1 { + continue + } + + target := &MigrationTarget{ + si: si, + primary: primary, + Sources: make(map[int32]*binlogdatapb.BinlogSource), + } + + qr := sqltypes.Proto3ToResult(p3qr) + for _, row := range qr.Named().Rows { + id, err := row["id"].ToInt32() + if err != nil { + return nil, err + } + + var bls binlogdatapb.BinlogSource + rowBytes, err := row["source"].ToBytes() + if err != nil { + return nil, err + } + if err := prototext.Unmarshal(rowBytes, &bls); err != nil { + return nil, err + } + + if row["message"].ToString() == Frozen { + frozen = true + } + + target.Sources[id] = &bls + optCells = row["cell"].ToString() + optTabletTypes = row["tablet_types"].ToString() + + workflowType = getVReplicationWorkflowType(row) + workflowSubType = getVReplicationWorkflowSubType(row) + + } + + targets[targetShard] = target + } + + if len(targets) == 0 { + return nil, fmt.Errorf("%w in keyspace %s for %s", ErrNoStreams, targetKeyspace, workflow) + } + + return &TargetInfo{ + Targets: targets, + Frozen: frozen, + OptCells: optCells, + OptTabletTypes: optTabletTypes, + WorkflowType: workflowType, + WorkflowSubType: workflowSubType, + }, nil +} diff --git a/go/vt/vtctl/workflow/vexec/vexec.go b/go/vt/vtctl/workflow/vexec/vexec.go index aa55a168380..477b81a1a03 100644 --- a/go/vt/vtctl/workflow/vexec/vexec.go +++ b/go/vt/vtctl/workflow/vexec/vexec.go @@ -29,7 +29,6 @@ import ( "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vttablet/tmclient" querypb "vitess.io/vitess/go/vt/proto/query" @@ -277,7 +276,7 @@ func (vx *VExec) getPlanner(ctx context.Context, table string) (QueryPlanner, er tabletStreamIDMap[aliasStr] = make([]int64, len(qr.Rows)) for i, row := range qr.Rows { - id, err := evalengine.ToInt64(row[0]) + id, err := row[0].ToCastInt64() if err != nil { return nil, err } diff --git a/go/vt/vtctl/workflow/vreplication_stream.go b/go/vt/vtctl/workflow/vreplication_stream.go index 7d3c2b94145..980d686bae9 100644 --- a/go/vt/vtctl/workflow/vreplication_stream.go +++ b/go/vt/vtctl/workflow/vreplication_stream.go @@ -21,9 +21,7 @@ import ( "sort" "strings" - "google.golang.org/protobuf/proto" - - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) @@ -33,7 +31,7 @@ type VReplicationStream struct { ID int32 Workflow string BinlogSource *binlogdatapb.BinlogSource - Position mysql.Position + Position replication.Position WorkflowType binlogdatapb.VReplicationWorkflowType WorkflowSubType binlogdatapb.VReplicationWorkflowSubType DeferSecondaryKeys bool @@ -89,7 +87,7 @@ func (streams VReplicationStreams) Copy() VReplicationStreams { out[i] = &VReplicationStream{ ID: vrs.ID, Workflow: vrs.Workflow, - BinlogSource: proto.Clone(vrs.BinlogSource).(*binlogdatapb.BinlogSource), + BinlogSource: vrs.BinlogSource.CloneVT(), Position: vrs.Position, } } diff --git a/go/vt/vtctld/api.go b/go/vt/vtctld/api.go index ee2c970503b..43afcb29452 100644 --- a/go/vt/vtctld/api.go +++ b/go/vt/vtctld/api.go @@ -48,9 +48,8 @@ import ( ) var ( - localCell string - proxyTablets bool - showTopologyCRUD = true + localCell string + proxyTablets bool ) // This file implements a REST-style API for the vtctld web interface. @@ -86,8 +85,6 @@ func init() { func registerVtctldAPIFlags(fs *pflag.FlagSet) { fs.StringVar(&localCell, "cell", localCell, "cell to use") fs.BoolVar(&proxyTablets, "proxy_tablets", proxyTablets, "Setting this true will make vtctld proxy the tablet status instead of redirecting to them") - fs.BoolVar(&showTopologyCRUD, "vtctld_show_topology_crud", showTopologyCRUD, "Controls the display of the CRUD topology actions in the vtctld UI.") - fs.MarkDeprecated("vtctld_show_topology_crud", "It is no longer applicable because vtctld no longer provides a UI.") } func newTabletWithURL(t *topodatapb.Tablet) *TabletWithURL { @@ -534,7 +531,7 @@ func initAPI(ctx context.Context, ts *topo.Server, actions *ActionRepository) { } requestContext := fmt.Sprintf("vtctld/api:%s", apiCallUUID) - executor := schemamanager.NewTabletExecutor(requestContext, wr.TopoServer(), wr.TabletManagerClient(), wr.Logger(), time.Duration(req.ReplicaTimeoutSeconds)*time.Second) + executor := schemamanager.NewTabletExecutor(requestContext, wr.TopoServer(), wr.TabletManagerClient(), wr.Logger(), time.Duration(req.ReplicaTimeoutSeconds)*time.Second, 0) if err := executor.SetDDLStrategy(req.DDLStrategy); err != nil { return fmt.Errorf("error setting DDL strategy: %v", err) } diff --git a/go/vt/vtctld/api_test.go b/go/vt/vtctld/api_test.go index b00de6b0d09..6443d89a56b 100644 --- a/go/vt/vtctld/api_test.go +++ b/go/vt/vtctld/api_test.go @@ -42,9 +42,11 @@ func compactJSON(in []byte) string { } func TestAPI(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() cells := []string{"cell1", "cell2"} - ts := memorytopo.NewServer(cells...) + ts := memorytopo.NewServer(ctx, cells...) + defer ts.Close() actionRepo := NewActionRepository(ts) server := testutils.HTTPTestServer() defer server.Close() diff --git a/go/vt/vtctld/explorer_test.go b/go/vt/vtctld/explorer_test.go index 62eb7c01642..95ce6c6c3d9 100644 --- a/go/vt/vtctld/explorer_test.go +++ b/go/vt/vtctld/explorer_test.go @@ -17,12 +17,11 @@ limitations under the License. package vtctld import ( + "context" "path" "reflect" "testing" - "context" - "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" @@ -35,7 +34,10 @@ func TestHandlePathRoot(t *testing.T) { cells := []string{"cell1", "cell2", "cell3"} want := []string{topo.GlobalCell, "cell1", "cell2", "cell3"} - ts := memorytopo.NewServer(cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cells...) + defer ts.Close() ex := newBackendExplorer(ts) result := ex.HandlePath(input, nil) if got := result.Children; !reflect.DeepEqual(got, want) { @@ -52,8 +54,10 @@ func TestHandlePathKeyspace(t *testing.T) { KeyspaceType: topodatapb.KeyspaceType_SNAPSHOT, } - ctx := context.Background() - ts := memorytopo.NewServer(cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cells...) + defer ts.Close() if err := ts.CreateKeyspace(ctx, "test_keyspace", keyspace); err != nil { t.Fatalf("CreateKeyspace error: %v", err) } @@ -101,8 +105,11 @@ func TestHandlePathShard(t *testing.T) { keyspace := &topodatapb.Keyspace{} want := "is_primary_serving:true" - ctx := context.Background() - ts := memorytopo.NewServer(cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cells...) + defer ts.Close() + if err := ts.CreateKeyspace(ctx, "test_keyspace", keyspace); err != nil { t.Fatalf("CreateKeyspace error: %v", err) } @@ -140,8 +147,11 @@ func TestHandlePathTablet(t *testing.T) { } want := "alias:{cell:\"cell1\" uid:123} hostname:\"example.com\" port_map:{key:\"vt\" value:4321}" - ctx := context.Background() - ts := memorytopo.NewServer(cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cells...) + defer ts.Close() + if err := ts.CreateTablet(ctx, tablet); err != nil { t.Fatalf("CreateTablet error: %v", err) } @@ -164,7 +174,11 @@ func TestHandleBadPath(t *testing.T) { cells := []string{"cell1", "cell2", "cell3"} want := "Invalid cell: node doesn't exist: cells/foo/CellInfo" - ts := memorytopo.NewServer(cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cells...) + defer ts.Close() + ex := newBackendExplorer(ts) result := ex.HandlePath(input, nil) if got := result.Error; !reflect.DeepEqual(got, want) { diff --git a/go/vt/vtctld/tablet_data_test.go b/go/vt/vtctld/tablet_data_test.go index 34428738e90..d40c6647ef3 100644 --- a/go/vt/vtctld/tablet_data_test.go +++ b/go/vt/vtctld/tablet_data_test.go @@ -17,13 +17,12 @@ limitations under the License. package vtctld import ( + "context" "io" "sync" "testing" "time" - "context" - "google.golang.org/protobuf/proto" "vitess.io/vitess/go/vt/logutil" @@ -93,7 +92,7 @@ func (s *streamHealthTabletServer) streamHealthUnregister(id int) error { // BroadcastHealth will broadcast the current health to all listeners func (s *streamHealthTabletServer) BroadcastHealth() { shr := &querypb.StreamHealthResponse{ - TabletExternallyReparentedTimestamp: 42, + PrimaryTermStartTimestamp: 42, RealtimeStats: &querypb.RealtimeStats{ HealthError: "testHealthError", ReplicationLagSeconds: 72, @@ -109,7 +108,10 @@ func (s *streamHealthTabletServer) BroadcastHealth() { } func TestTabletData(t *testing.T) { - ts := memorytopo.NewServer("cell1", "cell2") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1", "cell2") + defer ts.Close() wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) if err := ts.CreateKeyspace(context.Background(), "ks", &topodatapb.Keyspace{}); err != nil { @@ -138,9 +140,9 @@ func TestTabletData(t *testing.T) { }() // Start streaming and wait for the first result. - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - result, err := thc.Get(ctx, tablet1.Tablet.Alias) - cancel() + requestCtx, requestCancel := context.WithTimeout(context.Background(), 5*time.Second) + defer requestCancel() + result, err := thc.Get(requestCtx, tablet1.Tablet.Alias) close(stop) if err != nil { diff --git a/go/vt/vtctld/vtctld.go b/go/vt/vtctld/vtctld.go index a265b013075..ab9cf24c9a5 100644 --- a/go/vt/vtctld/vtctld.go +++ b/go/vt/vtctld/vtctld.go @@ -34,7 +34,6 @@ import ( ) var ( - durabilityPolicy = "none" sanitizeLogMessages = false ) @@ -45,8 +44,6 @@ func init() { } func registerVtctldFlags(fs *pflag.FlagSet) { - fs.StringVar(&durabilityPolicy, "durability_policy", durabilityPolicy, "type of durability to enforce. Default is none. Other values are dictated by registered plugins") - fs.MarkDeprecated("durability_policy", "Set the correct durability policy in the keyspace information instead.") fs.BoolVar(&sanitizeLogMessages, "vtctld_sanitize_log_messages", sanitizeLogMessages, "When true, vtctld sanitizes logging.") } diff --git a/go/vt/vterrors/code.go b/go/vt/vterrors/code.go index 26abd85e49e..9749a311913 100644 --- a/go/vt/vterrors/code.go +++ b/go/vt/vterrors/code.go @@ -48,6 +48,7 @@ var ( VT03023 = errorWithoutState("VT03023", vtrpcpb.Code_INVALID_ARGUMENT, "INSERT not supported when targeting a key range: %s", "When targeting a range of shards, Vitess does not know which shard to send the INSERT to.") VT03024 = errorWithoutState("VT03024", vtrpcpb.Code_INVALID_ARGUMENT, "'%s' user defined variable does not exists", "The query cannot be prepared using the user defined variable as it does not exists for this session.") VT03025 = errorWithState("VT03025", vtrpcpb.Code_INVALID_ARGUMENT, WrongArguments, "Incorrect arguments to %s", "The execute statement have wrong number of arguments") + VT03026 = errorWithoutState("VT03024", vtrpcpb.Code_INVALID_ARGUMENT, "'%s' bind variable does not exists", "The query cannot be executed as missing the bind variable.") VT05001 = errorWithState("VT05001", vtrpcpb.Code_NOT_FOUND, DbDropExists, "cannot drop database '%s'; database does not exists", "The given database does not exist; Vitess cannot drop it.") VT05002 = errorWithState("VT05002", vtrpcpb.Code_NOT_FOUND, BadDb, "cannot alter database '%s'; unknown database", "The given database does not exist; Vitess cannot alter it.") @@ -59,6 +60,8 @@ var ( VT06001 = errorWithState("VT06001", vtrpcpb.Code_ALREADY_EXISTS, DbCreateExists, "cannot create database '%s'; database exists", "The given database name already exists.") + VT07001 = errorWithState("VT07001", vtrpcpb.Code_PERMISSION_DENIED, KillDeniedError, "%s", "Kill statement is not allowed. More in docs about how to enable it and its limitations.") + VT09001 = errorWithState("VT09001", vtrpcpb.Code_FAILED_PRECONDITION, RequiresPrimaryKey, PrimaryVindexNotSet, "the table does not have a primary vindex, the operation is impossible.") VT09002 = errorWithState("VT09002", vtrpcpb.Code_FAILED_PRECONDITION, InnodbReadOnly, "%s statement with a replica target", "This type of DML statement is not allowed on a replica target.") VT09003 = errorWithoutState("VT09003", vtrpcpb.Code_FAILED_PRECONDITION, "INSERT query does not have primary vindex column '%v' in the column list", "A vindex column is mandatory for the insert, please provide one.") @@ -73,10 +76,13 @@ var ( VT09012 = errorWithoutState("VT09012", vtrpcpb.Code_FAILED_PRECONDITION, "%s statement with %s tablet not allowed", "This type of statement is not allowed on the given tablet.") VT09013 = errorWithoutState("VT09013", vtrpcpb.Code_FAILED_PRECONDITION, "semi-sync plugins are not loaded", "Durability policy wants Vitess to use semi-sync, but the MySQL instances don't have the semi-sync plugin loaded.") VT09014 = errorWithoutState("VT09014", vtrpcpb.Code_FAILED_PRECONDITION, "vindex cannot be modified", "The vindex cannot be used as table in DML statement") + VT09015 = errorWithoutState("VT09015", vtrpcpb.Code_FAILED_PRECONDITION, "schema tracking required", "This query cannot be planned without more information on the SQL schema. Please turn on schema tracking or add authoritative columns information to your VSchema.") + VT09016 = errorWithState("VT09016", vtrpcpb.Code_FAILED_PRECONDITION, RowIsReferenced2, "Cannot delete or update a parent row: a foreign key constraint fails", "SET DEFAULT is not supported by InnoDB") VT10001 = errorWithoutState("VT10001", vtrpcpb.Code_ABORTED, "foreign key constraints are not allowed", "Foreign key constraints are not allowed, see https://vitess.io/blog/2021-06-15-online-ddl-why-no-fk/.") VT12001 = errorWithoutState("VT12001", vtrpcpb.Code_UNIMPLEMENTED, "unsupported: %s", "This statement is unsupported by Vitess. Please rewrite your query to use supported syntax.") + VT12002 = errorWithoutState("VT12002", vtrpcpb.Code_UNIMPLEMENTED, "unsupported: cross-shard foreign keys", "Vitess does not support cross shard foreign keys.") // VT13001 General Error VT13001 = errorWithoutState("VT13001", vtrpcpb.Code_INTERNAL, "[BUG] %s", "This error should not happen and is a bug. Please file an issue on GitHub: https://github.com/vitessio/vitess/issues/new/choose.") @@ -114,6 +120,7 @@ var ( VT03023, VT03024, VT03025, + VT03026, VT05001, VT05002, VT05003, @@ -122,6 +129,7 @@ var ( VT05006, VT05007, VT06001, + VT07001, VT09001, VT09002, VT09003, @@ -136,8 +144,11 @@ var ( VT09012, VT09013, VT09014, + VT09015, + VT09016, VT10001, VT12001, + VT12002, VT13001, VT13002, VT14001, diff --git a/go/vt/vterrors/errors_test.go b/go/vt/vterrors/errors_test.go index c115fb41686..8c039e5874f 100644 --- a/go/vt/vterrors/errors_test.go +++ b/go/vt/vterrors/errors_test.go @@ -257,8 +257,8 @@ func TestStackFormat(t *testing.T) { assertContains(t, got, "middle", false) assertContains(t, got, "outer", false) - logErrStacks = true - defer func() { logErrStacks = false }() + setLogErrStacks(true) + defer func() { setLogErrStacks(false) }() got = fmt.Sprintf("%v", err) assertContains(t, got, "innerMost", true) assertContains(t, got, "middle", true) @@ -340,9 +340,9 @@ func TestWrapping(t *testing.T) { err3 := Wrapf(err2, "baz") errorWithoutStack := fmt.Sprintf("%v", err3) - logErrStacks = true + setLogErrStacks(true) errorWithStack := fmt.Sprintf("%v", err3) - logErrStacks = false + setLogErrStacks(false) assertEquals(t, err3.Error(), "baz: bar: foo") assertContains(t, errorWithoutStack, "foo", true) diff --git a/go/vt/vterrors/state.go b/go/vt/vterrors/state.go index d7ed04e1c7b..5e3dcf22dfb 100644 --- a/go/vt/vterrors/state.go +++ b/go/vt/vterrors/state.go @@ -55,6 +55,8 @@ const ( CantDoThisInTransaction RequiresPrimaryKey OperandColumns + RowIsReferenced2 + NoReferencedRow2 UnknownStmtHandler // not found @@ -81,6 +83,7 @@ const ( // permission denied AccessDeniedError + KillDeniedError // server not available ServerNotAvailable @@ -88,6 +91,31 @@ const ( // unknown timezone UnknownTimeZone + // regexp errors + RegexpStringNotTerminated + RegexpBufferOverflow + RegexpIllegalArgument + RegexpIndexOutOfBounds + RegexpInternal + RegexpRuleSyntax + RegexpBadEscapeSequence + RegexpUnimplemented + RegexpMismatchParen + RegexpBadInterval + RegexpMaxLtMin + RegexpInvalidBackRef + RegexpLookBehindLimit + RegexpMissingCloseBracket + RegexpInvalidRange + RegexpStackOverflow + RegexpTimeOut + RegexpPatternTooBig + RegexpInvalidCaptureGroup + RegexpInvalidFlag + + CharacterSetMismatch + WrongParametersToNativeFct + // No state should be added below NumOfStates NumOfStates ) diff --git a/go/vt/vterrors/vterrors.go b/go/vt/vterrors/vterrors.go index 3b264b01104..6a322837de9 100644 --- a/go/vt/vterrors/vterrors.go +++ b/go/vt/vterrors/vterrors.go @@ -91,19 +91,35 @@ import ( "errors" "fmt" "io" + "sync" "github.com/spf13/pflag" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) -// logErrStacks controls whether or not printing errors includes the +// logErrStacks controls whether printing errors includes the // embedded stack trace in the output. var logErrStacks bool +var muLogErrStacks sync.Mutex + +func getLogErrStacks() bool { + muLogErrStacks.Lock() + defer muLogErrStacks.Unlock() + return logErrStacks +} + +func setLogErrStacks(val bool) { + muLogErrStacks.Lock() + defer muLogErrStacks.Unlock() + logErrStacks = val +} // RegisterFlags registers the command-line options that control vterror // behavior on the provided FlagSet. func RegisterFlags(fs *pflag.FlagSet) { + muLogErrStacks.Lock() + defer muLogErrStacks.Unlock() fs.BoolVar(&logErrStacks, "log_err_stacks", false, "log stack traces for errors") } @@ -161,7 +177,7 @@ func (f *fundamental) Format(s fmt.State, verb rune) { case 'v': panicIfError(io.WriteString(s, "Code: "+f.code.String()+"\n")) panicIfError(io.WriteString(s, f.msg+"\n")) - if logErrStacks { + if getLogErrStacks() { f.stack.Format(s, verb) } return @@ -278,7 +294,7 @@ func (w *wrapping) Format(s fmt.State, verb rune) { if rune('v') == verb { panicIfError(fmt.Fprintf(s, "%v\n", w.Cause())) panicIfError(io.WriteString(s, w.msg)) - if logErrStacks { + if getLogErrStacks() { w.stack.Format(s, verb) } return diff --git a/go/vt/vterrors/vterrorsgen/main.go b/go/vt/vterrors/vterrorsgen/main.go index f705813af8c..2aafee509e6 100644 --- a/go/vt/vterrors/vterrorsgen/main.go +++ b/go/vt/vterrors/vterrorsgen/main.go @@ -22,7 +22,7 @@ import ( "strings" "text/template" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/vt/vterrors" ) @@ -44,8 +44,8 @@ const ( func main() { t := template.New("template") t.Funcs(map[string]any{ - "ConvertStateToMySQLErrorCode": mysql.ConvertStateToMySQLErrorCode, - "ConvertStateToMySQLState": mysql.ConvertStateToMySQLState, + "ConvertStateToMySQLErrorCode": sqlerror.ConvertStateToMySQLErrorCode, + "ConvertStateToMySQLState": sqlerror.ConvertStateToMySQLState, "FormatError": func(err error) string { s := err.Error() return strings.TrimSpace(strings.Join(strings.Split(s, ":")[1:], ":")) diff --git a/go/vt/vtexplain/vtexplain.go b/go/vt/vtexplain/vtexplain.go index 74810dc618f..55e76606e08 100644 --- a/go/vt/vtexplain/vtexplain.go +++ b/go/vt/vtexplain/vtexplain.go @@ -21,6 +21,7 @@ package vtexplain import ( "bytes" + "context" "fmt" "sort" "strings" @@ -180,7 +181,7 @@ type TabletActions struct { } // Init sets up the fake execution environment -func Init(vSchemaStr, sqlSchema, ksShardMapStr string, opts *Options) (*VTExplain, error) { +func Init(ctx context.Context, vSchemaStr, sqlSchema, ksShardMapStr string, opts *Options) (*VTExplain, error) { // Verify options if opts.ReplicationMode != "ROW" && opts.ReplicationMode != "STATEMENT" { return nil, fmt.Errorf("invalid replication mode \"%s\"", opts.ReplicationMode) @@ -200,7 +201,7 @@ func Init(vSchemaStr, sqlSchema, ksShardMapStr string, opts *Options) (*VTExplai Autocommit: true, }} vte.setGlobalTabletEnv(tabletEnv) - err = vte.initVtgateExecutor(vSchemaStr, ksShardMapStr, opts) + err = vte.initVtgateExecutor(ctx, vSchemaStr, ksShardMapStr, opts) if err != nil { return nil, fmt.Errorf("initVtgateExecutor: %v", err.Error()) } @@ -210,10 +211,15 @@ func Init(vSchemaStr, sqlSchema, ksShardMapStr string, opts *Options) (*VTExplai // Stop and cleans up fake execution environment func (vte *VTExplain) Stop() { + if vte.vtgateExecutor != nil { + vte.vtgateExecutor.Close() + } + // Cleanup all created fake dbs. if vte.explainTopo != nil { for _, conn := range vte.explainTopo.TabletConns { conn.tsv.StopService() + conn.tsv.Close(context.Background()) } for _, conn := range vte.explainTopo.TabletConns { conn.db.Close() diff --git a/go/vt/vtexplain/vtexplain_test.go b/go/vt/vtexplain/vtexplain_test.go index 30fd289b671..54f1efbc522 100644 --- a/go/vt/vtexplain/vtexplain_test.go +++ b/go/vt/vtexplain/vtexplain_test.go @@ -17,6 +17,7 @@ limitations under the License. package vtexplain import ( + "context" "encoding/json" "fmt" "os" @@ -27,12 +28,12 @@ import ( "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv/tabletenvtest" - - querypb "vitess.io/vitess/go/vt/proto/query" ) func defaultTestOpts() *Options { @@ -48,7 +49,7 @@ type testopts struct { shardmap map[string]map[string]*topo.ShardInfo } -func initTest(mode string, opts *Options, topts *testopts, t *testing.T) *VTExplain { +func initTest(ctx context.Context, mode string, opts *Options, topts *testopts, t *testing.T) *VTExplain { schema, err := os.ReadFile("testdata/test-schema.sql") require.NoError(t, err) @@ -64,7 +65,7 @@ func initTest(mode string, opts *Options, topts *testopts, t *testing.T) *VTExpl } opts.ExecutionMode = mode - vte, err := Init(string(vSchema), string(schema), shardmap, opts) + vte, err := Init(ctx, string(vSchema), string(schema), shardmap, opts) require.NoError(t, err, "vtexplain Init error\n%s", string(schema)) return vte } @@ -85,7 +86,10 @@ func testExplain(testcase string, opts *Options, t *testing.T) { func runTestCase(testcase, mode string, opts *Options, topts *testopts, t *testing.T) { t.Run(testcase, func(t *testing.T) { - vte := initTest(mode, opts, topts, t) + ctx := utils.LeakCheckContext(t) + + vte := initTest(ctx, mode, opts, topts, t) + defer vte.Stop() sqlFile := fmt.Sprintf("testdata/%s-queries.sql", testcase) sql, err := os.ReadFile(sqlFile) @@ -141,28 +145,6 @@ func TestExplain(t *testing.T) { } tests := []test{ {"unsharded", defaultTestOpts()}, - {"selectsharded", defaultTestOpts()}, - {"insertsharded", defaultTestOpts()}, - {"updatesharded", defaultTestOpts()}, - {"deletesharded", defaultTestOpts()}, - {"comments", defaultTestOpts()}, - {"options", &Options{ - ReplicationMode: "STATEMENT", - NumShards: 4, - Normalize: false, - }}, - {"target", &Options{ - ReplicationMode: "ROW", - NumShards: 4, - Normalize: false, - Target: "ks_sharded/40-80", - }}, - {"gen4", &Options{ - ReplicationMode: "ROW", - NumShards: 4, - Normalize: true, - PlannerVersion: querypb.ExecuteOptions_Gen4, - }}, } for _, tst := range tests { @@ -171,7 +153,10 @@ func TestExplain(t *testing.T) { } func TestErrors(t *testing.T) { - vte := initTest(ModeMulti, defaultTestOpts(), &testopts{}, t) + ctx := utils.LeakCheckContext(t) + + vte := initTest(ctx, ModeMulti, defaultTestOpts(), &testopts{}, t) + defer vte.Stop() tests := []struct { SQL string @@ -208,7 +193,10 @@ func TestErrors(t *testing.T) { } func TestJSONOutput(t *testing.T) { - vte := initTest(ModeMulti, defaultTestOpts(), &testopts{}, t) + ctx := utils.LeakCheckContext(t) + + vte := initTest(ctx, ModeMulti, defaultTestOpts(), &testopts{}, t) + defer vte.Stop() sql := "select 1 from user where id = 1" explains, err := vte.Run(sql) require.NoError(t, err, "vtexplain error") @@ -344,6 +332,9 @@ func TestUsingKeyspaceShardMap(t *testing.T) { } func TestInit(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vschema := `{ "ks1": { "sharded": true, @@ -353,7 +344,7 @@ func TestInit(t *testing.T) { } }` schema := "create table table_missing_primary_vindex (id int primary key)" - _, err := Init(vschema, schema, "", defaultTestOpts()) + _, err := Init(ctx, vschema, schema, "", defaultTestOpts()) require.Error(t, err) require.Contains(t, err.Error(), "missing primary col vindex") } diff --git a/go/vt/vtexplain/vtexplain_vtgate.go b/go/vt/vtexplain/vtexplain_vtgate.go index 682919e5569..aa219fdb1eb 100644 --- a/go/vt/vtexplain/vtexplain_vtgate.go +++ b/go/vt/vtexplain/vtexplain_vtgate.go @@ -25,10 +25,10 @@ import ( "sort" "strings" + "vitess.io/vitess/go/cache/theine" "vitess.io/vitess/go/vt/vtgate/logstats" "vitess.io/vitess/go/vt/vtgate/vindexes" - "vitess.io/vitess/go/cache" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" @@ -50,14 +50,14 @@ import ( vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" ) -func (vte *VTExplain) initVtgateExecutor(vSchemaStr, ksShardMapStr string, opts *Options) error { +func (vte *VTExplain) initVtgateExecutor(ctx context.Context, vSchemaStr, ksShardMapStr string, opts *Options) error { vte.explainTopo = &ExplainTopo{NumShards: opts.NumShards} - vte.explainTopo.TopoServer = memorytopo.NewServer(vtexplainCell) + vte.explainTopo.TopoServer = memorytopo.NewServer(ctx, vtexplainCell) vte.healthCheck = discovery.NewFakeHealthCheck(nil) - resolver := vte.newFakeResolver(opts, vte.explainTopo, vtexplainCell) + resolver := vte.newFakeResolver(ctx, opts, vte.explainTopo, vtexplainCell) - err := vte.buildTopology(opts, vSchemaStr, ksShardMapStr, opts.NumShards) + err := vte.buildTopology(ctx, opts, vSchemaStr, ksShardMapStr, opts.NumShards) if err != nil { return err } @@ -73,18 +73,17 @@ func (vte *VTExplain) initVtgateExecutor(vSchemaStr, ksShardMapStr string, opts streamSize := 10 var schemaTracker vtgate.SchemaInfo // no schema tracker for these tests - vte.vtgateExecutor = vtgate.NewExecutor(context.Background(), vte.explainTopo, vtexplainCell, resolver, opts.Normalize, false, streamSize, cache.DefaultConfig, schemaTracker, false, opts.PlannerVersion) - queryLogBufferSize := 10 - vtgate.SetQueryLogger(streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize)) + plans := theine.NewStore[vtgate.PlanCacheKey, *engine.Plan](4*1024*1024, false) + vte.vtgateExecutor = vtgate.NewExecutor(ctx, vte.explainTopo, vtexplainCell, resolver, opts.Normalize, false, streamSize, plans, schemaTracker, false, opts.PlannerVersion) + vte.vtgateExecutor.SetQueryLogger(streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize)) return nil } -func (vte *VTExplain) newFakeResolver(opts *Options, serv srvtopo.Server, cell string) *vtgate.Resolver { - ctx := context.Background() +func (vte *VTExplain) newFakeResolver(ctx context.Context, opts *Options, serv srvtopo.Server, cell string) *vtgate.Resolver { gw := vtgate.NewTabletGateway(ctx, vte.healthCheck, serv, cell) - _ = gw.WaitForTablets([]topodatapb.TabletType{topodatapb.TabletType_REPLICA}) + _ = gw.WaitForTablets(ctx, []topodatapb.TabletType{topodatapb.TabletType_REPLICA}) txMode := vtgatepb.TransactionMode_MULTI if opts.ExecutionMode == ModeTwoPC { @@ -96,7 +95,7 @@ func (vte *VTExplain) newFakeResolver(opts *Options, serv srvtopo.Server, cell s return vtgate.NewResolver(srvResolver, serv, cell, sc) } -func (vte *VTExplain) buildTopology(opts *Options, vschemaStr string, ksShardMapStr string, numShardsPerKeyspace int) error { +func (vte *VTExplain) buildTopology(ctx context.Context, opts *Options, vschemaStr string, ksShardMapStr string, numShardsPerKeyspace int) error { vte.explainTopo.Lock.Lock() defer vte.explainTopo.Lock.Unlock() @@ -144,7 +143,7 @@ func (vte *VTExplain) buildTopology(opts *Options, vschemaStr string, ksShardMap log.Infof("registering test tablet %s for keyspace %s shard %s", hostname, ks, shard.Name) tablet := vte.healthCheck.AddFakeTablet(vtexplainCell, hostname, 1, ks, shard.Name, topodatapb.TabletType_PRIMARY, true, 1, nil, func(t *topodatapb.Tablet) queryservice.QueryService { - return vte.newTablet(opts, t) + return vte.newTablet(ctx, opts, t) }) vte.explainTopo.TabletConns[hostname] = tablet.(*explainTablet) vte.explainTopo.KeyspaceShards[ks][shard.Name] = shard @@ -209,29 +208,27 @@ func (vte *VTExplain) vtgateExecute(sql string) ([]*engine.Plan, map[string]*Tab // This will ensure that the commit/rollback order is predictable. vte.sortShardSession() - // use the plan cache to get the set of plans used for this query, then - // clear afterwards for the next run - planCache := vte.vtgateExecutor.Plans() - - _, err := vte.vtgateExecutor.Execute(context.Background(), nil, "VtexplainExecute", vtgate.NewSafeSession(vte.vtgateSession), sql, nil) + _, err := vte.vtgateExecutor.Execute(context.Background(), nil, nil, "VtexplainExecute", vtgate.NewSafeSession(vte.vtgateSession), sql, nil) if err != nil { for _, tc := range vte.explainTopo.TabletConns { tc.tabletQueries = nil tc.mysqlQueries = nil } - planCache.Clear() - + vte.vtgateExecutor.ClearPlans() return nil, nil, vterrors.Wrapf(err, "vtexplain execute error in '%s'", sql) } var plans []*engine.Plan - planCache.ForEach(func(value any) bool { - plan := value.(*engine.Plan) + + // use the plan cache to get the set of plans used for this query, then + // clear afterwards for the next run + vte.vtgateExecutor.ForEachPlan(func(plan *engine.Plan) bool { plan.ExecTime = 0 plans = append(plans, plan) return true }) - planCache.Clear() + + vte.vtgateExecutor.ClearPlans() tabletActions := make(map[string]*TabletActions) for shard, tc := range vte.explainTopo.TabletConns { diff --git a/go/vt/vtexplain/vtexplain_vttablet.go b/go/vt/vtexplain/vtexplain_vttablet.go index 14349dd05f1..f902eca8b07 100644 --- a/go/vt/vtexplain/vtexplain_vttablet.go +++ b/go/vt/vtexplain/vtexplain_vttablet.go @@ -102,7 +102,7 @@ type explainTablet struct { var _ queryservice.QueryService = (*explainTablet)(nil) -func (vte *VTExplain) newTablet(opts *Options, t *topodatapb.Tablet) *explainTablet { +func (vte *VTExplain) newTablet(ctx context.Context, opts *Options, t *topodatapb.Tablet) *explainTablet { db := fakesqldb.New(nil) sidecardb.AddSchemaInitQueries(db, true) @@ -117,7 +117,7 @@ func (vte *VTExplain) newTablet(opts *Options, t *topodatapb.Tablet) *explainTab config.EnableTableGC = false // XXX much of this is cloned from the tabletserver tests - tsv := tabletserver.NewTabletServer(topoproto.TabletAliasString(t.Alias), config, memorytopo.NewServer(""), t.Alias) + tsv := tabletserver.NewTabletServer(ctx, topoproto.TabletAliasString(t.Alias), config, memorytopo.NewServer(ctx, ""), t.Alias) tablet := explainTablet{db: db, tsv: tsv, vte: vte} db.Handler = &tablet @@ -720,13 +720,13 @@ func (t *explainTablet) analyzeWhere(selStmt *sqlparser.Select, tableColumnMap m if !ok { continue } - value, err := evalengine.LiteralToValue(lit) + value, err := sqlparser.LiteralToValue(lit) if err != nil { return "", nil, 0, nil, err } // Cast the value in the tuple to the expected value of the column - castedValue, err := evalengine.Cast(value, colType) + castedValue, err := sqltypes.Cast(value, colType) if err != nil { return "", nil, 0, nil, err } diff --git a/go/vt/vtexplain/vtexplain_vttablet_test.go b/go/vt/vtexplain/vtexplain_vttablet_test.go index 9f6159ad2f9..614ad186224 100644 --- a/go/vt/vtexplain/vtexplain_vttablet_test.go +++ b/go/vt/vtexplain/vtexplain_vttablet_test.go @@ -17,6 +17,7 @@ limitations under the License. package vtexplain import ( + "context" "encoding/json" "testing" @@ -67,7 +68,9 @@ create table t2 ( NumShards: 2, } - vte, err := Init(testVSchema, testSchema, "", opts) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vte, err := Init(ctx, testVSchema, testSchema, "", opts) require.NoError(t, err) defer vte.Stop() @@ -119,12 +122,16 @@ create table test_partitioned ( if err != nil { t.Fatalf("parseSchema: %v", err) } - vte := initTest(ModeMulti, defaultTestOpts(), &testopts{}, t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vte := initTest(ctx, ModeMulti, defaultTestOpts(), &testopts{}, t) + defer vte.Stop() tabletEnv, _ := newTabletEnvironment(ddls, defaultTestOpts()) vte.setGlobalTabletEnv(tabletEnv) - tablet := vte.newTablet(defaultTestOpts(), &topodatapb.Tablet{ + tablet := vte.newTablet(ctx, defaultTestOpts(), &topodatapb.Tablet{ Keyspace: "test_keyspace", Shard: "-80", Alias: &topodatapb.TabletAlias{}, diff --git a/go/vt/vtgate/autocommit_test.go b/go/vt/vtgate/autocommit_test.go index 06a0ef46619..2a6b3375954 100644 --- a/go/vt/vtgate/autocommit_test.go +++ b/go/vt/vtgate/autocommit_test.go @@ -35,7 +35,7 @@ import ( // TestAutocommitUpdateSharded: instant-commit. func TestAutocommitUpdateSharded(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) _, err := autocommitExec(executor, "update user set a=2 where id = 1") require.NoError(t, err) @@ -52,7 +52,7 @@ func TestAutocommitUpdateSharded(t *testing.T) { // TestAutocommitUpdateLookup: transaction: select before update. func TestAutocommitUpdateLookup(t *testing.T) { - executor, sbc1, _, sbclookup := createExecutorEnv() + executor, sbc1, _, sbclookup, _ := createExecutorEnv(t) sbclookup.SetResults([]*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields("b|a", "int64|varbinary"), "2|1", @@ -81,7 +81,7 @@ func TestAutocommitUpdateLookup(t *testing.T) { // TestAutocommitUpdateVindexChange: transaction: select & update before final update. func TestAutocommitUpdateVindexChange(t *testing.T) { - executor, sbc, _, sbclookup := createExecutorEnv() + executor, sbc, _, sbclookup, _ := createExecutorEnv(t) sbc.SetResults([]*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields("id|name|lastname|name_lastname_keyspace_id_map", "int64|int32|varchar|int64"), "1|1|foo|0", @@ -120,7 +120,7 @@ func TestAutocommitUpdateVindexChange(t *testing.T) { // TestAutocommitDeleteSharded: instant-commit. func TestAutocommitDeleteSharded(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) _, err := autocommitExec(executor, "delete from user_extra where user_id = 1") require.NoError(t, err) @@ -137,7 +137,7 @@ func TestAutocommitDeleteSharded(t *testing.T) { // TestAutocommitDeleteLookup: transaction: select before update. func TestAutocommitDeleteLookup(t *testing.T) { - executor, sbc1, _, sbclookup := createExecutorEnv() + executor, sbc1, _, sbclookup, _ := createExecutorEnv(t) sbc1.SetResults([]*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields("id|name|lastname", "int64|int32|varchar"), "1|1|foo", @@ -179,7 +179,7 @@ func TestAutocommitDeleteLookup(t *testing.T) { // TestAutocommitDeleteIn: instant-commit. func TestAutocommitDeleteIn(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) _, err := autocommitExec(executor, "delete from user_extra where user_id in (1, 2)") require.NoError(t, err) @@ -196,7 +196,7 @@ func TestAutocommitDeleteIn(t *testing.T) { // TestAutocommitDeleteMultiShard: instant-commit. func TestAutocommitDeleteMultiShard(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) _, err := autocommitExec(executor, "delete from user_extra where user_id = user_id + 1") require.NoError(t, err) @@ -216,7 +216,7 @@ func TestAutocommitDeleteMultiShard(t *testing.T) { // TestAutocommitDeleteMultiShardAutoCommit: instant-commit. func TestAutocommitDeleteMultiShardAutoCommit(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) _, err := autocommitExec(executor, "delete /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ from user_extra where user_id = user_id + 1") require.NoError(t, err) @@ -236,7 +236,7 @@ func TestAutocommitDeleteMultiShardAutoCommit(t *testing.T) { // TestAutocommitInsertSharded: instant-commit. func TestAutocommitInsertSharded(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) _, err := autocommitExec(executor, "insert into user_extra(user_id, v) values (1, 2)") require.NoError(t, err) @@ -255,7 +255,7 @@ func TestAutocommitInsertSharded(t *testing.T) { // TestAutocommitInsertLookup: transaction: select before update. func TestAutocommitInsertLookup(t *testing.T) { - executor, sbc1, _, sbclookup := createExecutorEnv() + executor, sbc1, _, sbclookup, _ := createExecutorEnv(t) _, err := autocommitExec(executor, "insert into user(id, v, name) values (1, 2, 'myname')") require.NoError(t, err) @@ -274,7 +274,6 @@ func TestAutocommitInsertLookup(t *testing.T) { BindVariables: map[string]*querypb.BindVariable{ "_Id_0": sqltypes.Int64BindVariable(1), "_name_0": sqltypes.StringBindVariable("myname"), - "__seq0": sqltypes.Int64BindVariable(1), }, }}) testCommitCount(t, "sbc1", sbc1, 1) @@ -282,51 +281,52 @@ func TestAutocommitInsertLookup(t *testing.T) { // TestAutocommitInsertShardAutoCommit: instant-commit. func TestAutocommitInsertMultishardAutoCommit(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() - - _, err := autocommitExec(executor, "insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ into user_extra(user_id, v) values (1, 2), (3, 4)") - require.NoError(t, err) - - assertQueries(t, sbc1, []*querypb.BoundQuery{{ - Sql: "insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ into user_extra(user_id, v) values (:_user_id_0, 2)", - BindVariables: map[string]*querypb.BindVariable{ - "_user_id_0": sqltypes.Int64BindVariable(1), - "_user_id_1": sqltypes.Int64BindVariable(3), - }, - }}) - testCommitCount(t, "sbc1", sbc1, 0) - - assertQueries(t, sbc2, []*querypb.BoundQuery{{ - Sql: "insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ into user_extra(user_id, v) values (:_user_id_1, 4)", - BindVariables: map[string]*querypb.BindVariable{ - "_user_id_0": sqltypes.Int64BindVariable(1), - "_user_id_1": sqltypes.Int64BindVariable(3), - }, - }}) - testCommitCount(t, "sbc2", sbc2, 0) + t.Run("1", func(t *testing.T) { + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) + + _, err := autocommitExec(executor, "insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ into user_extra(user_id, v) values (1, 2), (3, 4)") + require.NoError(t, err) + + assertQueries(t, sbc1, []*querypb.BoundQuery{{ + Sql: "insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ into user_extra(user_id, v) values (:_user_id_0, 2)", + BindVariables: map[string]*querypb.BindVariable{ + "_user_id_0": sqltypes.Int64BindVariable(1), + }, + }}) + testCommitCount(t, "sbc1", sbc1, 0) + + assertQueries(t, sbc2, []*querypb.BoundQuery{{ + Sql: "insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ into user_extra(user_id, v) values (:_user_id_1, 4)", + BindVariables: map[string]*querypb.BindVariable{ + "_user_id_1": sqltypes.Int64BindVariable(3), + }, + }}) + testCommitCount(t, "sbc2", sbc2, 0) + }) - executor, sbc1, sbc2, _ = createExecutorEnv() - // Make the first shard fail - the second completes anyway - sbc1.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 - _, err = autocommitExec(executor, "insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ into user_extra(user_id, v) values (1, 2), (3, 4)") - require.Error(t, err) - require.Contains(t, err.Error(), "INVALID_ARGUMENT", "expected invalid argument error") + t.Run("2", func(t *testing.T) { + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) - testCommitCount(t, "sbc1", sbc1, 0) + // Make the first shard fail - the second completes anyway + sbc1.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 + _, err := autocommitExec(executor, "insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ into user_extra(user_id, v) values (1, 2), (3, 4)") + require.Error(t, err) + require.Contains(t, err.Error(), "INVALID_ARGUMENT", "expected invalid argument error") - assertQueries(t, sbc2, []*querypb.BoundQuery{{ - Sql: "insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ into user_extra(user_id, v) values (:_user_id_1, 4)", - BindVariables: map[string]*querypb.BindVariable{ - "_user_id_0": sqltypes.Int64BindVariable(1), - "_user_id_1": sqltypes.Int64BindVariable(3), - }, - }}) - testCommitCount(t, "sbc2", sbc2, 0) + testCommitCount(t, "sbc1", sbc1, 0) + assertQueries(t, sbc2, []*querypb.BoundQuery{{ + Sql: "insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ into user_extra(user_id, v) values (:_user_id_1, 4)", + BindVariables: map[string]*querypb.BindVariable{ + "_user_id_1": sqltypes.Int64BindVariable(3), + }, + }}) + testCommitCount(t, "sbc2", sbc2, 0) + }) } func TestAutocommitInsertMultishard(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) _, err := autocommitExec(executor, "insert into user_extra(user_id, v) values (1, 2), (3, 4)") require.NoError(t, err) @@ -335,7 +335,6 @@ func TestAutocommitInsertMultishard(t *testing.T) { Sql: "insert into user_extra(user_id, v) values (:_user_id_0, 2)", BindVariables: map[string]*querypb.BindVariable{ "_user_id_0": sqltypes.Int64BindVariable(1), - "_user_id_1": sqltypes.Int64BindVariable(3), }, }}) testCommitCount(t, "sbc1", sbc1, 1) @@ -343,7 +342,6 @@ func TestAutocommitInsertMultishard(t *testing.T) { assertQueries(t, sbc2, []*querypb.BoundQuery{{ Sql: "insert into user_extra(user_id, v) values (:_user_id_1, 4)", BindVariables: map[string]*querypb.BindVariable{ - "_user_id_0": sqltypes.Int64BindVariable(1), "_user_id_1": sqltypes.Int64BindVariable(3), }, }}) @@ -352,7 +350,7 @@ func TestAutocommitInsertMultishard(t *testing.T) { // TestAutocommitInsertAutoinc: instant-commit: sequence fetch is not transactional. func TestAutocommitInsertAutoinc(t *testing.T) { - executor, _, _, sbclookup := createExecutorEnv() + executor, _, _, sbclookup, _ := createExecutorEnv(t) _, err := autocommitExec(executor, "insert into main1(id, name) values (null, 'myname')") require.NoError(t, err) @@ -371,7 +369,7 @@ func TestAutocommitInsertAutoinc(t *testing.T) { // TestAutocommitTransactionStarted: no instant-commit. func TestAutocommitTransactionStarted(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, _ := createExecutorEnv(t) session := &vtgatepb.Session{ TargetString: "@primary", @@ -382,7 +380,7 @@ func TestAutocommitTransactionStarted(t *testing.T) { // single shard query - no savepoint needed sql := "update `user` set a = 2 where id = 1" - _, err := executor.Execute(context.Background(), nil, "TestExecute", NewSafeSession(session), sql, map[string]*querypb.BindVariable{}) + _, err := executor.Execute(context.Background(), nil, nil, "TestExecute", NewSafeSession(session), sql, map[string]*querypb.BindVariable{}) require.NoError(t, err) require.Len(t, sbc1.Queries, 1) require.Equal(t, sql, sbc1.Queries[0].Sql) @@ -393,7 +391,7 @@ func TestAutocommitTransactionStarted(t *testing.T) { // multi shard query - savepoint needed sql = "update `user` set a = 2 where id in (1, 4)" - _, err = executor.Execute(context.Background(), nil, "TestExecute", NewSafeSession(session), sql, map[string]*querypb.BindVariable{}) + _, err = executor.Execute(context.Background(), nil, nil, "TestExecute", NewSafeSession(session), sql, map[string]*querypb.BindVariable{}) require.NoError(t, err) require.Len(t, sbc1.Queries, 2) require.Contains(t, sbc1.Queries[0].Sql, "savepoint") @@ -403,7 +401,7 @@ func TestAutocommitTransactionStarted(t *testing.T) { // TestAutocommitDirectTarget: instant-commit. func TestAutocommitDirectTarget(t *testing.T) { - executor, _, _, sbclookup := createExecutorEnv() + executor, _, _, sbclookup, _ := createExecutorEnv(t) session := &vtgatepb.Session{ TargetString: "TestUnsharded/0@primary", @@ -412,7 +410,7 @@ func TestAutocommitDirectTarget(t *testing.T) { } sql := "insert into `simple`(val) values ('val')" - _, err := executor.Execute(context.Background(), nil, "TestExecute", NewSafeSession(session), sql, map[string]*querypb.BindVariable{}) + _, err := executor.Execute(context.Background(), nil, nil, "TestExecute", NewSafeSession(session), sql, map[string]*querypb.BindVariable{}) require.NoError(t, err) assertQueries(t, sbclookup, []*querypb.BoundQuery{{ @@ -424,7 +422,7 @@ func TestAutocommitDirectTarget(t *testing.T) { // TestAutocommitDirectRangeTarget: no instant-commit. func TestAutocommitDirectRangeTarget(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, _ := createExecutorEnv(t) session := &vtgatepb.Session{ TargetString: "TestExecutor[-]@primary", @@ -433,7 +431,7 @@ func TestAutocommitDirectRangeTarget(t *testing.T) { } sql := "delete from sharded_user_msgs limit 1000" - _, err := executor.Execute(context.Background(), nil, "TestExecute", NewSafeSession(session), sql, map[string]*querypb.BindVariable{}) + _, err := executor.Execute(context.Background(), nil, nil, "TestExecute", NewSafeSession(session), sql, map[string]*querypb.BindVariable{}) require.NoError(t, err) assertQueries(t, sbc1, []*querypb.BoundQuery{{ @@ -450,5 +448,5 @@ func autocommitExec(executor *Executor, sql string) (*sqltypes.Result, error) { TransactionMode: vtgatepb.TransactionMode_MULTI, } - return executor.Execute(context.Background(), nil, "TestExecute", NewSafeSession(session), sql, map[string]*querypb.BindVariable{}) + return executor.Execute(context.Background(), nil, nil, "TestExecute", NewSafeSession(session), sql, map[string]*querypb.BindVariable{}) } diff --git a/go/vt/vtgate/bench_test.go b/go/vt/vtgate/bench_test.go index daa3ee40f79..c0201a6e019 100644 --- a/go/vt/vtgate/bench_test.go +++ b/go/vt/vtgate/bench_test.go @@ -21,9 +21,6 @@ import ( "fmt" "testing" - "context" - - topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" ) @@ -61,16 +58,12 @@ func init() { } func BenchmarkWithNormalizer(b *testing.B) { - createSandbox(KsTestUnsharded) - hcVTGateTest.Reset() - _ = hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) - saved := rpcVTGate.executor.normalize - rpcVTGate.executor.normalize = true - defer func() { rpcVTGate.executor.normalize = saved }() + vtgateInst, _, ctx := createVtgateEnv(b) for i := 0; i < b.N; i++ { - _, _, err := rpcVTGate.Execute( - context.Background(), + _, _, err := vtgateInst.Execute( + ctx, + nil, nil, &vtgatepb.Session{ TargetString: "@primary", @@ -86,16 +79,14 @@ func BenchmarkWithNormalizer(b *testing.B) { } func BenchmarkWithoutNormalizer(b *testing.B) { - createSandbox(KsTestUnsharded) - hcVTGateTest.Reset() - _ = hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) - saved := rpcVTGate.executor.normalize - rpcVTGate.executor.normalize = false - defer func() { rpcVTGate.executor.normalize = saved }() + vtgateInst, _, ctx := createVtgateEnv(b) + + vtgateInst.executor.normalize = false for i := 0; i < b.N; i++ { - _, _, err := rpcVTGate.Execute( - context.Background(), + _, _, err := vtgateInst.Execute( + ctx, + nil, nil, &vtgatepb.Session{ TargetString: "@primary", diff --git a/go/vt/vtgate/buffer/buffer.go b/go/vt/vtgate/buffer/buffer.go index f9618b6e0c7..622bb03b082 100644 --- a/go/vt/vtgate/buffer/buffer.go +++ b/go/vt/vtgate/buffer/buffer.go @@ -28,14 +28,13 @@ package buffer import ( "context" - "fmt" + "strings" "sync" "golang.org/x/sync/semaphore" "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/log" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vterrors" @@ -67,12 +66,61 @@ const ( // currently retried. type RetryDoneFunc context.CancelFunc +const ( + ClusterEventReshardingInProgress = "current keyspace is being resharded" + ClusterEventReparentInProgress = "primary is not serving, there may be a reparent operation in progress" + ClusterEventMoveTables = "disallowed due to rule" +) + +var ClusterEvents []string + +func init() { + ClusterEvents = []string{ + ClusterEventReshardingInProgress, + ClusterEventReparentInProgress, + ClusterEventMoveTables, + } +} + // CausedByFailover returns true if "err" was supposedly caused by a failover. // To simplify things, we've merged the detection for different MySQL flavors // in one function. Supported flavors: MariaDB, MySQL func CausedByFailover(err error) bool { log.V(2).Infof("Checking error (type: %T) if it is caused by a failover. err: %v", err, err) - return vterrors.Code(err) == vtrpcpb.Code_CLUSTER_EVENT + reason, isFailover := isFailoverError(err) + if isFailover { + log.Infof("CausedByFailover signalling failover for reason: %s", reason) + } + return isFailover +} + +// for debugging purposes +func getReason(err error) string { + for _, ce := range ClusterEvents { + if strings.Contains(err.Error(), ce) { + return ce + } + } + return "" +} + +// isFailoverError looks at the error returned by the sql query execution to check if there is a cluster event +// (caused by resharding or reparenting) or a denied tables error seen during switch writes in MoveTables +func isFailoverError(err error) (string, bool) { + var reason string + var isFailover bool + switch vterrors.Code(err) { + case vtrpcpb.Code_CLUSTER_EVENT: + isFailover = true + case vtrpcpb.Code_FAILED_PRECONDITION: + if strings.Contains(err.Error(), ClusterEventMoveTables) { + isFailover = true + } + } + if isFailover { + reason = getReason(err) + } + return reason, isFailover } // Buffer is used to track ongoing PRIMARY tablet failovers and buffer @@ -140,36 +188,14 @@ func (b *Buffer) WaitForFailoverEnd(ctx context.Context, keyspace, shard string, requestsSkipped.Add([]string{keyspace, shard, skippedDisabled}, 1) return nil, nil } - return sb.waitForFailoverEnd(ctx, keyspace, shard, err) } -// ProcessPrimaryHealth notifies the buffer to record a new primary -// and end any failover buffering that may be in progress -func (b *Buffer) ProcessPrimaryHealth(th *discovery.TabletHealth) { - if th.Target.TabletType != topodatapb.TabletType_PRIMARY { - panic(fmt.Sprintf("BUG: non-PRIMARY TabletHealth object must not be forwarded: %#v", th)) - } - timestamp := th.PrimaryTermStartTime - if timestamp == 0 { - // Primarys where TabletExternallyReparented was never called will return 0. - // Ignore them. - return - } - - sb := b.getOrCreateBuffer(th.Target.Keyspace, th.Target.Shard) - if sb == nil { - // Buffer is shut down. Ignore all calls. - return - } - sb.recordExternallyReparentedTimestamp(timestamp, th.Tablet.Alias) -} - func (b *Buffer) HandleKeyspaceEvent(ksevent *discovery.KeyspaceEvent) { for _, shard := range ksevent.Shards { sb := b.getOrCreateBuffer(shard.Target.Keyspace, shard.Target.Shard) if sb != nil { - sb.recordKeyspaceEvent(shard.Tablet, shard.Serving) + sb.recordKeyspaceEvent(shard.Tablet, shard.Serving, ksevent) } } } diff --git a/go/vt/vtgate/buffer/buffer_helper_test.go b/go/vt/vtgate/buffer/buffer_helper_test.go index a6b7605d4da..2deb460fc39 100644 --- a/go/vt/vtgate/buffer/buffer_helper_test.go +++ b/go/vt/vtgate/buffer/buffer_helper_test.go @@ -20,17 +20,6 @@ type failover func(buf *Buffer, tablet *topodatapb.Tablet, keyspace, shard strin func testAllImplementations(t *testing.T, runTest func(t *testing.T, fail failover)) { t.Helper() - t.Run("HealthCheck", func(t *testing.T) { - t.Helper() - runTest(t, func(buf *Buffer, tablet *topodatapb.Tablet, keyspace, shard string, now time.Time) { - buf.ProcessPrimaryHealth(&discovery.TabletHealth{ - Tablet: tablet, - Target: &query.Target{Keyspace: keyspace, Shard: shard, TabletType: topodatapb.TabletType_PRIMARY}, - PrimaryTermStartTime: now.Unix(), - }) - }) - }) - t.Run("KeyspaceEvent", func(t *testing.T) { t.Helper() runTest(t, func(buf *Buffer, tablet *topodatapb.Tablet, keyspace, shard string, now time.Time) { diff --git a/go/vt/vtgate/buffer/flags.go b/go/vt/vtgate/buffer/flags.go index 742a5d5d412..a17cc09ccc3 100644 --- a/go/vt/vtgate/buffer/flags.go +++ b/go/vt/vtgate/buffer/flags.go @@ -70,9 +70,6 @@ func verifyFlags() error { if bufferSize < 1 { return fmt.Errorf("--buffer_size must be >= 1 (specified value: %d)", bufferSize) } - if bufferMinTimeBetweenFailovers < bufferMaxFailoverDuration*time.Duration(2) { - return fmt.Errorf("--buffer_min_time_between_failovers should be at least twice the length of --buffer_max_failover_duration: %v vs. %v", bufferMinTimeBetweenFailovers, bufferMaxFailoverDuration) - } if bufferDrainConcurrency < 1 { return fmt.Errorf("--buffer_drain_concurrency must be >= 1 (specified value: %d)", bufferDrainConcurrency) @@ -165,6 +162,16 @@ func NewDefaultConfig() *Config { } } +// EnableBuffering is used in tests where we require the keyspace event watcher to be created +func EnableBuffering() { + bufferEnabled = true +} + +// DisableBuffering is the counterpart of EnableBuffering +func DisableBuffering() { + bufferEnabled = false +} + func NewConfigFromFlags() *Config { if err := verifyFlags(); err != nil { log.Fatalf("Invalid buffer configuration: %v", err) diff --git a/go/vt/vtgate/buffer/shard_buffer.go b/go/vt/vtgate/buffer/shard_buffer.go index 1b829cb3ddd..ae33aabb399 100644 --- a/go/vt/vtgate/buffer/shard_buffer.go +++ b/go/vt/vtgate/buffer/shard_buffer.go @@ -23,6 +23,8 @@ import ( "sync" "time" + "vitess.io/vitess/go/vt/discovery" + "vitess.io/vitess/go/vt/vtgate/errorsanitizer" "vitess.io/vitess/go/vt/log" @@ -93,14 +95,6 @@ type shardBuffer struct { state bufferState // queue is the list of buffered requests (ordered by arrival). queue []*entry - // externallyReparented is the maximum value of all seen - // "StreamHealthResponse.TabletexternallyReparentedTimestamp" values across - // all PRIMARY tablets of this shard. - // In practice, it is a) the last time the shard was reparented or b) the last - // time the TabletExternallyReparented RPC was called on the tablet to confirm - // that the tablet is the current PRIMARY. - // We assume the value is a Unix timestamp in seconds. - externallyReparented int64 // lastStart is the last time we saw the start of a failover. lastStart time.Time // lastEnd is the last time we saw the end of a failover. @@ -476,11 +470,12 @@ func (sb *shardBuffer) remove(toRemove *entry) { // Entry was already removed. Keep the queue as it is. } -func (sb *shardBuffer) recordKeyspaceEvent(alias *topodatapb.TabletAlias, stillServing bool) { +func (sb *shardBuffer) recordKeyspaceEvent(alias *topodatapb.TabletAlias, stillServing bool, keyspaceEvent *discovery.KeyspaceEvent) { sb.mu.Lock() defer sb.mu.Unlock() - log.Infof("disruption in shard %s/%s resolved (serving: %v)", sb.keyspace, sb.shard, stillServing) + log.Infof("disruption in shard %s/%s resolved (serving: %v), movetable state %#v", + sb.keyspace, sb.shard, stillServing, keyspaceEvent.MoveTablesState) if !topoproto.TabletAliasEqual(alias, sb.currentPrimary) { if sb.currentPrimary != nil { @@ -488,42 +483,26 @@ func (sb *shardBuffer) recordKeyspaceEvent(alias *topodatapb.TabletAlias, stillS } sb.currentPrimary = alias } - if stillServing { - sb.stopBufferingLocked(stopFailoverEndDetected, "a primary promotion has been detected") - } else { - sb.stopBufferingLocked(stopShardMissing, "the keyspace has been resharded") - } -} - -func (sb *shardBuffer) recordExternallyReparentedTimestamp(timestamp int64, alias *topodatapb.TabletAlias) { - // Fast path (read lock): Check if new timestamp is higher. - sb.mu.RLock() - if timestamp <= sb.externallyReparented { - // Do nothing. Equal values are reported if the primary has not changed. - // Smaller values can be reported during the failover by the old primary - // after the new primary already took over. - sb.mu.RUnlock() - return - } - sb.mu.RUnlock() + var reason stopReason + var msg string - // New timestamp is higher. Stop buffering if running. - sb.mu.Lock() - defer sb.mu.Unlock() - - // Re-check value after acquiring write lock. - if timestamp <= sb.externallyReparented { - return + // heuristically determine the reason why vtgate is currently buffering + moveTablesSwitched := false + if keyspaceEvent.MoveTablesState.State == discovery.MoveTablesSwitched { + moveTablesSwitched = true } - - sb.externallyReparented = timestamp - if !topoproto.TabletAliasEqual(alias, sb.currentPrimary) { - if sb.currentPrimary != nil { - sb.lastReparent = sb.timeNow() - } - sb.currentPrimary = alias + switch { + case moveTablesSwitched: + reason = stopMoveTablesSwitchingTraffic + msg = stopMoveTablesSwitchingTrafficMessage + case stillServing: + reason = stopFailoverEndDetected + msg = stopFailoverEndDetectedMessage + default: + reason = stopShardMissing + msg = stopShardMissingMessage } - sb.stopBufferingLocked(stopFailoverEndDetected, "failover end detected") + sb.stopBufferingLocked(reason, msg) } func (sb *shardBuffer) stopBufferingDueToMaxDuration() { @@ -569,7 +548,8 @@ func (sb *shardBuffer) stopBufferingLocked(reason stopReason, details string) { if sb.mode == bufferModeDryRun { msg = "Dry-run: Would have stopped buffering" } - log.Infof("%v for shard: %s after: %.1f seconds due to: %v. Draining %d buffered requests now.", msg, topoproto.KeyspaceShardString(sb.keyspace, sb.shard), d.Seconds(), details, len(q)) + log.Infof("%v for shard: %s after: %.1f seconds due to: %v. Draining %d buffered requests now.", + msg, topoproto.KeyspaceShardString(sb.keyspace, sb.shard), d.Seconds(), details, len(q)) var clientEntryError error if reason == stopShardMissing { diff --git a/go/vt/vtgate/buffer/variables.go b/go/vt/vtgate/buffer/variables.go index b4b036b0775..af99cb52220 100644 --- a/go/vt/vtgate/buffer/variables.go +++ b/go/vt/vtgate/buffer/variables.go @@ -112,6 +112,11 @@ const ( stopFailoverEndDetected stopReason = "NewPrimarySeen" stopMaxFailoverDurationExceeded stopReason = "MaxDurationExceeded" stopShutdown stopReason = "Shutdown" + stopMoveTablesSwitchingTraffic stopReason = "MoveTablesSwitchedTraffic" + + stopMoveTablesSwitchingTrafficMessage = "MoveTables has switched writes" + stopFailoverEndDetectedMessage = "a primary promotion has been detected" + stopShardMissingMessage = "the keyspace has been resharded" ) // evictedReason is used in "requestsEvicted" as "Reason" label. diff --git a/go/vt/vtgate/endtoend/last_insert_id_test.go b/go/vt/vtgate/endtoend/last_insert_id_test.go index 6d841fadd07..e3fbcdaa2dd 100644 --- a/go/vt/vtgate/endtoend/last_insert_id_test.go +++ b/go/vt/vtgate/endtoend/last_insert_id_test.go @@ -26,7 +26,6 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/utils" - "vitess.io/vitess/go/vt/vtgate/evalengine" ) func TestLastInsertId(t *testing.T) { @@ -40,7 +39,7 @@ func TestLastInsertId(t *testing.T) { // figure out the last inserted id before we run change anything qr := exec(t, conn, "select max(id) from t1_last_insert_id") - oldLastID, err := evalengine.ToUint64(qr.Rows[0][0]) + oldLastID, err := qr.Rows[0][0].ToCastUint64() require.NoError(t, err) exec(t, conn, "insert into t1_last_insert_id(id1) values(42)") @@ -66,7 +65,7 @@ func TestLastInsertIdWithRollback(t *testing.T) { // figure out the last inserted id before we run our tests qr := exec(t, conn, "select max(id) from t1_last_insert_id") - oldLastID, err := evalengine.ToUint64(qr.Rows[0][0]) + oldLastID, err := qr.Rows[0][0].ToCastUint64() require.NoError(t, err) // add row inside explicit transaction diff --git a/go/vt/vtgate/endtoend/lookup_test.go b/go/vt/vtgate/endtoend/lookup_test.go index 01fc3aee32d..d69bec8c0c6 100644 --- a/go/vt/vtgate/endtoend/lookup_test.go +++ b/go/vt/vtgate/endtoend/lookup_test.go @@ -23,6 +23,8 @@ import ( "github.com/stretchr/testify/assert" + "vitess.io/vitess/go/mysql/sqlerror" + "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" @@ -61,8 +63,8 @@ func TestConsistentLookup(t *testing.T) { _, err = conn.ExecuteFetch("insert into t1(id1, id2) values(1, 4)", 1000, false) exec(t, conn, "rollback") require.Error(t, err) - mysqlErr := err.(*mysql.SQLError) - assert.Equal(t, mysql.ERDupEntry, mysqlErr.Num) + mysqlErr := err.(*sqlerror.SQLError) + assert.Equal(t, sqlerror.ERDupEntry, mysqlErr.Num) assert.Equal(t, "23000", mysqlErr.State) // Simple delete. diff --git a/go/vt/vtgate/endtoend/main_test.go b/go/vt/vtgate/endtoend/main_test.go index 08aae25420e..b471786b78e 100644 --- a/go/vt/vtgate/endtoend/main_test.go +++ b/go/vt/vtgate/endtoend/main_test.go @@ -153,6 +153,19 @@ var ( Name: "hash", }}, }, + "oltp_test": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "id", + Name: "hash", + }}, + Columns: []*vschemapb.Column{{ + Name: "c", + Type: sqltypes.Char, + }, { + Name: "pad", + Type: sqltypes.Char, + }}, + }, }, } diff --git a/go/vt/vtgate/endtoend/oltp_test.go b/go/vt/vtgate/endtoend/oltp_test.go new file mode 100644 index 00000000000..f8ca646f8c7 --- /dev/null +++ b/go/vt/vtgate/endtoend/oltp_test.go @@ -0,0 +1,132 @@ +package endtoend + +import ( + "bytes" + "context" + "fmt" + "math/rand" + "sync" + "testing" + + "vitess.io/vitess/go/mysql" +) + +// 10 groups, 119 characters +const cValueTemplate = "###########-###########-###########-" + + "###########-###########-###########-" + + "###########-###########-###########-" + + "###########" + +// 5 groups, 59 characters +const padValueTemplate = "###########-###########-###########-" + + "###########-###########" + +func sysbenchRandom(rng *rand.Rand, template string) []byte { + out := make([]byte, 0, len(template)) + for i := range template { + switch template[i] { + case '#': + out = append(out, '0'+byte(rng.Intn(10))) + default: + out = append(out, template[i]) + } + } + return out +} + +var oltpInitOnce sync.Once + +func BenchmarkOLTP(b *testing.B) { + const MaxRows = 10000 + const RangeSize = 100 + + rng := rand.New(rand.NewSource(1234)) + + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + if err != nil { + b.Fatal(err) + } + defer conn.Close() + + var query bytes.Buffer + + oltpInitOnce.Do(func() { + b.Logf("seeding database for benchmark...") + + var rows int = 1 + for i := 0; i < MaxRows/10; i++ { + query.Reset() + query.WriteString("insert into oltp_test(id, k, c, pad) values ") + for j := 0; j < 10; j++ { + if j > 0 { + query.WriteString(", ") + } + _, _ = fmt.Fprintf(&query, "(%d, %d, '%s', '%s')", rows, rng.Int31n(0xFFFF), sysbenchRandom(rng, cValueTemplate), sysbenchRandom(rng, padValueTemplate)) + rows++ + } + + _, err = conn.ExecuteFetch(query.String(), -1, false) + if err != nil { + b.Fatal(err) + } + } + b.Logf("finshed (inserted %d rows)", rows) + }) + + b.Run("SimpleRanges", func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + id := rng.Intn(MaxRows) + + query.Reset() + _, _ = fmt.Fprintf(&query, "SELECT c FROM oltp_test WHERE id BETWEEN %d AND %d", id, id+rng.Intn(RangeSize)-1) + _, err := conn.ExecuteFetch(query.String(), 1000, false) + if err != nil { + b.Error(err) + } + } + }) + + b.Run("SumRanges", func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + id := rng.Intn(MaxRows) + + query.Reset() + _, _ = fmt.Fprintf(&query, "SELECT SUM(k) FROM oltp_test WHERE id BETWEEN %d AND %d", id, id+rng.Intn(RangeSize)-1) + _, err := conn.ExecuteFetch(query.String(), 1000, false) + if err != nil { + b.Error(err) + } + } + }) + + b.Run("OrderRanges", func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + id := rng.Intn(MaxRows) + + query.Reset() + _, _ = fmt.Fprintf(&query, "SELECT c FROM oltp_test WHERE id BETWEEN %d AND %d ORDER BY c", id, id+rng.Intn(RangeSize)-1) + _, err := conn.ExecuteFetch(query.String(), 1000, false) + if err != nil { + b.Error(err) + } + } + }) + + b.Run("DistinctRanges", func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + id := rng.Intn(MaxRows) + + query.Reset() + _, _ = fmt.Fprintf(&query, "SELECT DISTINCT c FROM oltp_test WHERE id BETWEEN %d AND %d ORDER BY c", id, id+rng.Intn(RangeSize)-1) + _, err := conn.ExecuteFetch(query.String(), 1000, false) + if err != nil { + b.Error(err) + } + } + }) +} diff --git a/go/vt/vtgate/endtoend/schema.sql b/go/vt/vtgate/endtoend/schema.sql index 5fb1f52224f..7036d348771 100644 --- a/go/vt/vtgate/endtoend/schema.sql +++ b/go/vt/vtgate/endtoend/schema.sql @@ -1,74 +1,82 @@ create table t1( - id1 bigint, - id2 bigint, - primary key(id1) + id1 bigint, + id2 bigint, + primary key(id1) ) Engine=InnoDB; create table t1_copy_basic( - id1 bigint, - id2 bigint, - primary key(id1) + id1 bigint, + id2 bigint, + primary key(id1) ) Engine=InnoDB; create table t1_copy_all( - id1 bigint, - id2 bigint, - primary key(id1) + id1 bigint, + id2 bigint, +primary key(id1) ) Engine=InnoDB; create table t1_copy_resume( - id1 bigint, - id2 bigint, - primary key(id1) + id1 bigint, + id2 bigint, + primary key(id1) ) Engine=InnoDB; create table t1_id2_idx( - id2 bigint, - keyspace_id varbinary(10), - primary key(id2) + id2 bigint, + keyspace_id varbinary(10), + primary key(id2) ) Engine=InnoDB; create table vstream_test( - id bigint, - val bigint, - primary key(id) + id bigint, + val bigint, + primary key(id) ) Engine=InnoDB; create table aggr_test( - id bigint, - val1 varchar(16), - val2 bigint, - primary key(id) + id bigint, + val1 varchar(16), + val2 bigint, + primary key(id) ) Engine=InnoDB; create table t2( - id3 bigint, - id4 bigint, - primary key(id3) + id3 bigint, + id4 bigint, + primary key(id3) ) Engine=InnoDB; create table t2_id4_idx( - id bigint not null auto_increment, - id4 bigint, - id3 bigint, - primary key(id), - key idx_id4(id4) + id bigint not null auto_increment, + id4 bigint, + id3 bigint, + primary key(id), + key idx_id4(id4) ) Engine=InnoDB; create table t1_last_insert_id( - id bigint not null auto_increment, - id1 bigint, - primary key(id) + id bigint not null auto_increment, + id1 bigint, + primary key(id) ) Engine=InnoDB; create table t1_row_count( - id bigint not null, - id1 bigint, - primary key(id) + id bigint not null, + id1 bigint, + primary key(id) ) Engine=InnoDB; create table t1_sharded( - id1 bigint, - id2 bigint, - primary key(id1) + id1 bigint, + id2 bigint, + primary key(id1) +) Engine=InnoDB; + +create table oltp_test( + id bigint not null auto_increment, + k bigint default 0 not null, + c char(120) default '' not null, + pad char(60) default '' not null, + primary key (id) ) Engine=InnoDB; diff --git a/go/vt/vtgate/endtoend/vstream_test.go b/go/vt/vtgate/endtoend/vstream_test.go index d2ae07697a9..9016f0ab538 100644 --- a/go/vt/vtgate/endtoend/vstream_test.go +++ b/go/vt/vtgate/endtoend/vstream_test.go @@ -25,20 +25,20 @@ import ( "sync" "testing" - "vitess.io/vitess/go/mysql/collations" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/vtgate/vtgateconn" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - "vitess.io/vitess/go/vt/proto/query" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/vtgate/vtgateconn" ) func initialize(ctx context.Context, t *testing.T) (*vtgateconn.VTGateConn, *mysql.Conn, *mysql.Conn, func()) { @@ -151,11 +151,12 @@ func TestVStream(t *testing.T) { Keyspace: "ks", Shard: "-80", RowChanges: []*binlogdatapb.RowChange{{ - After: &query.Row{ + After: &querypb.Row{ Lengths: []int64{1, 1}, Values: []byte("11"), }, }}, + Flags: 1, // foreign_key_checks are enabled by default. } gotRows := events[2].RowEvent if !proto.Equal(gotRows, wantRows) { @@ -177,7 +178,7 @@ func TestVStreamCopyBasic(t *testing.T) { } lastPK := sqltypes.Result{ - Fields: []*query.Field{{Name: "id1", Type: query.Type_INT32}}, + Fields: []*querypb.Field{{Name: "id1", Type: querypb.Type_INT32}}, Rows: [][]sqltypes.Value{{sqltypes.NewInt32(4)}}, } qr := sqltypes.ResultToProto3(&lastPK) @@ -405,7 +406,7 @@ func TestVStreamCopyResume(t *testing.T) { // lastPK is id1=4, meaning we should only copy rows for id1 IN(5,6,7,8,9) lastPK := sqltypes.Result{ - Fields: []*query.Field{{Name: "id1", Type: query.Type_INT64, Charset: collations.CollationBinaryID, Flags: uint32(query.MySqlFlag_NUM_FLAG | query.MySqlFlag_BINARY_FLAG)}}, + Fields: []*querypb.Field{{Name: "id1", Type: querypb.Type_INT64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG | querypb.MySqlFlag_BINARY_FLAG)}}, Rows: [][]sqltypes.Value{{sqltypes.NewInt64(4)}}, } tableLastPK := []*binlogdatapb.TableLastPK{{ @@ -478,6 +479,7 @@ func TestVStreamCopyResume(t *testing.T) { case nil: for _, ev := range e { if ev.Type == binlogdatapb.VEventType_ROW { + ev.RowEvent.Flags = 0 // null Flags, so we don't have to define flags in every wanted row event. evs = append(evs, ev) if ev.Timestamp == 0 { rowCopyEvents++ @@ -616,9 +618,9 @@ func TestVStreamSharded(t *testing.T) { received bool } expectedEvents := []*expectedEvent{ - {`type:FIELD field_event:{table_name:"ks.t1_sharded" fields:{name:"id1" type:INT64 table:"t1_sharded" org_table:"t1_sharded" database:"ks_-80" org_name:"id1" column_length:20 charset:63 flags:53251} fields:{name:"id2" type:INT64 table:"t1_sharded" org_table:"t1_sharded" database:"ks_-80" org_name:"id2" column_length:20 charset:63 flags:32768} keyspace:"ks" shard:"-80"}`, false}, + {`type:FIELD field_event:{table_name:"ks.t1_sharded" fields:{name:"id1" type:INT64 table:"t1_sharded" org_table:"t1_sharded" database:"ks_-80" org_name:"id1" column_length:20 charset:63 flags:53251 column_type:"bigint(20)"} fields:{name:"id2" type:INT64 table:"t1_sharded" org_table:"t1_sharded" database:"ks_-80" org_name:"id2" column_length:20 charset:63 flags:32768 column_type:"bigint(20)"} keyspace:"ks" shard:"-80"}`, false}, {`type:ROW row_event:{table_name:"ks.t1_sharded" row_changes:{after:{lengths:1 lengths:1 values:"11"}} keyspace:"ks" shard:"-80"}`, false}, - {`type:FIELD field_event:{table_name:"ks.t1_sharded" fields:{name:"id1" type:INT64 table:"t1_sharded" org_table:"t1_sharded" database:"ks_80-" org_name:"id1" column_length:20 charset:63 flags:53251} fields:{name:"id2" type:INT64 table:"t1_sharded" org_table:"t1_sharded" database:"ks_80-" org_name:"id2" column_length:20 charset:63 flags:32768} keyspace:"ks" shard:"80-"}`, false}, + {`type:FIELD field_event:{table_name:"ks.t1_sharded" fields:{name:"id1" type:INT64 table:"t1_sharded" org_table:"t1_sharded" database:"ks_80-" org_name:"id1" column_length:20 charset:63 flags:53251 column_type:"bigint(20)"} fields:{name:"id2" type:INT64 table:"t1_sharded" org_table:"t1_sharded" database:"ks_80-" org_name:"id2" column_length:20 charset:63 flags:32768 column_type:"bigint(20)"} keyspace:"ks" shard:"80-"}`, false}, {`type:ROW row_event:{table_name:"ks.t1_sharded" row_changes:{after:{lengths:1 lengths:1 values:"44"}} keyspace:"ks" shard:"80-"}`, false}, } for { @@ -643,7 +645,7 @@ func TestVStreamSharded(t *testing.T) { for _, ev := range evs { s := fmt.Sprintf("%v", ev) for _, expectedEv := range expectedEvents { - if expectedEv.ev == s { + if removeAnyDeprecatedDisplayWidths(expectedEv.ev) == removeAnyDeprecatedDisplayWidths(s) { expectedEv.received = true break } diff --git a/go/vt/vtgate/engine/aggregations.go b/go/vt/vtgate/engine/aggregations.go new file mode 100644 index 00000000000..8037dda37a9 --- /dev/null +++ b/go/vt/vtgate/engine/aggregations.go @@ -0,0 +1,446 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "fmt" + "strconv" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/slice" + "vitess.io/vitess/go/sqltypes" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + . "vitess.io/vitess/go/vt/vtgate/engine/opcode" + "vitess.io/vitess/go/vt/vtgate/evalengine" +) + +// AggregateParams specify the parameters for each aggregation. +// It contains the opcode and input column number. +type AggregateParams struct { + Opcode AggregateOpcode + Col int + + // These are used only for distinct opcodes. + KeyCol int + WCol int + Type sqltypes.Type + CollationID collations.ID + + Alias string `json:",omitempty"` + Expr sqlparser.Expr + Original *sqlparser.AliasedExpr + + // This is based on the function passed in the select expression and + // not what we use to aggregate at the engine primitive level. + OrigOpcode AggregateOpcode +} + +func NewAggregateParam(opcode AggregateOpcode, col int, alias string) *AggregateParams { + out := &AggregateParams{ + Opcode: opcode, + Col: col, + Alias: alias, + WCol: -1, + Type: sqltypes.Unknown, + } + if opcode.NeedsComparableValues() { + out.KeyCol = col + } + return out +} + +func (ap *AggregateParams) WAssigned() bool { + return ap.WCol >= 0 +} + +func (ap *AggregateParams) String() string { + keyCol := strconv.Itoa(ap.Col) + if ap.WAssigned() { + keyCol = fmt.Sprintf("%s|%d", keyCol, ap.WCol) + } + if sqltypes.IsText(ap.Type) && ap.CollationID != collations.Unknown { + keyCol += " COLLATE " + collations.Local().LookupName(ap.CollationID) + } + dispOrigOp := "" + if ap.OrigOpcode != AggregateUnassigned && ap.OrigOpcode != ap.Opcode { + dispOrigOp = "_" + ap.OrigOpcode.String() + } + if ap.Alias != "" { + return fmt.Sprintf("%s%s(%s) AS %s", ap.Opcode.String(), dispOrigOp, keyCol, ap.Alias) + } + return fmt.Sprintf("%s%s(%s)", ap.Opcode.String(), dispOrigOp, keyCol) +} + +func (ap *AggregateParams) typ(inputType querypb.Type) querypb.Type { + if ap.OrigOpcode != AggregateUnassigned { + return ap.OrigOpcode.Type(inputType) + } + return ap.Opcode.Type(inputType) +} + +type aggregator interface { + add(row []sqltypes.Value) error + finish() sqltypes.Value + reset() +} + +type aggregatorDistinct struct { + column int + last sqltypes.Value + coll collations.ID +} + +func (a *aggregatorDistinct) shouldReturn(row []sqltypes.Value) (bool, error) { + if a.column >= 0 { + if !a.last.IsNull() { + cmp, err := evalengine.NullsafeCompare(a.last, row[a.column], a.coll) + if err != nil { + return true, err + } + if cmp == 0 { + return true, nil + } + } + a.last = row[a.column] + } + return false, nil +} + +func (a *aggregatorDistinct) reset() { + a.last = sqltypes.NULL +} + +type aggregatorCount struct { + from int + n int64 + distinct aggregatorDistinct +} + +func (a *aggregatorCount) add(row []sqltypes.Value) error { + if row[a.from].IsNull() { + return nil + } + if ret, err := a.distinct.shouldReturn(row); ret { + return err + } + a.n++ + return nil +} + +func (a *aggregatorCount) finish() sqltypes.Value { + return sqltypes.NewInt64(a.n) +} + +func (a *aggregatorCount) reset() { + a.n = 0 + a.distinct.reset() +} + +type aggregatorCountStar struct { + n int64 +} + +func (a *aggregatorCountStar) add(_ []sqltypes.Value) error { + a.n++ + return nil +} + +func (a *aggregatorCountStar) finish() sqltypes.Value { + return sqltypes.NewInt64(a.n) +} + +func (a *aggregatorCountStar) reset() { + a.n = 0 +} + +type aggregatorMinMax struct { + from int + minmax evalengine.MinMax +} + +type aggregatorMin struct { + aggregatorMinMax +} + +func (a *aggregatorMin) add(row []sqltypes.Value) (err error) { + return a.minmax.Min(row[a.from]) +} + +type aggregatorMax struct { + aggregatorMinMax +} + +func (a *aggregatorMax) add(row []sqltypes.Value) (err error) { + return a.minmax.Max(row[a.from]) +} + +func (a *aggregatorMinMax) finish() sqltypes.Value { + return a.minmax.Result() +} + +func (a *aggregatorMinMax) reset() { + a.minmax.Reset() +} + +type aggregatorSum struct { + from int + sum evalengine.Sum + distinct aggregatorDistinct +} + +func (a *aggregatorSum) add(row []sqltypes.Value) error { + if row[a.from].IsNull() { + return nil + } + if ret, err := a.distinct.shouldReturn(row); ret { + return err + } + return a.sum.Add(row[a.from]) +} + +func (a *aggregatorSum) finish() sqltypes.Value { + return a.sum.Result() +} + +func (a *aggregatorSum) reset() { + a.sum.Reset() + a.distinct.reset() +} + +type aggregatorScalar struct { + from int + current sqltypes.Value + init bool +} + +func (a *aggregatorScalar) add(row []sqltypes.Value) error { + if !a.init { + a.current = row[a.from] + a.init = true + } + return nil +} + +func (a *aggregatorScalar) finish() sqltypes.Value { + return a.current +} + +func (a *aggregatorScalar) reset() { + a.current = sqltypes.NULL + a.init = false +} + +type aggregatorGroupConcat struct { + from int + type_ sqltypes.Type + + concat []byte + n int +} + +func (a *aggregatorGroupConcat) add(row []sqltypes.Value) error { + if row[a.from].IsNull() { + return nil + } + if a.n > 0 { + a.concat = append(a.concat, ',') + } + a.concat = append(a.concat, row[a.from].Raw()...) + a.n++ + return nil +} + +func (a *aggregatorGroupConcat) finish() sqltypes.Value { + if a.n == 0 { + return sqltypes.NULL + } + return sqltypes.MakeTrusted(a.type_, a.concat) +} + +func (a *aggregatorGroupConcat) reset() { + a.n = 0 + a.concat = nil // not safe to reuse this byte slice as it's returned as MakeTrusted +} + +type aggregatorGtid struct { + from int + shards []*binlogdatapb.ShardGtid +} + +func (a *aggregatorGtid) add(row []sqltypes.Value) error { + a.shards = append(a.shards, &binlogdatapb.ShardGtid{ + Keyspace: row[a.from-1].ToString(), + Shard: row[a.from+1].ToString(), + Gtid: row[a.from].ToString(), + }) + return nil +} + +func (a *aggregatorGtid) finish() sqltypes.Value { + gtid := binlogdatapb.VGtid{ShardGtids: a.shards} + return sqltypes.NewVarChar(gtid.String()) +} + +func (a *aggregatorGtid) reset() { + a.shards = a.shards[:0] // safe to reuse because only the serialized form of a.shards is returned +} + +type aggregationState []aggregator + +func (a aggregationState) add(row []sqltypes.Value) error { + for _, st := range a { + if err := st.add(row); err != nil { + return err + } + } + return nil +} + +func (a aggregationState) finish() (row []sqltypes.Value) { + row = make([]sqltypes.Value, 0, len(a)) + for _, st := range a { + row = append(row, st.finish()) + } + return +} + +func (a aggregationState) reset() { + for _, st := range a { + st.reset() + } +} + +func isComparable(typ sqltypes.Type) bool { + if typ == sqltypes.Null || sqltypes.IsNumber(typ) || sqltypes.IsBinary(typ) { + return true + } + switch typ { + case sqltypes.Timestamp, + sqltypes.Date, + sqltypes.Time, + sqltypes.Datetime, + sqltypes.Enum, + sqltypes.Set, + sqltypes.TypeJSON, + sqltypes.Bit: + return true + } + return false +} + +func newAggregation(fields []*querypb.Field, aggregates []*AggregateParams) (aggregationState, []*querypb.Field, error) { + fields = slice.Map(fields, func(from *querypb.Field) *querypb.Field { return from.CloneVT() }) + + agstate := make([]aggregator, len(fields)) + for _, aggr := range aggregates { + sourceType := fields[aggr.Col].Type + targetType := aggr.typ(sourceType) + + var ag aggregator + var distinct = -1 + + if aggr.Opcode.IsDistinct() { + distinct = aggr.KeyCol + if aggr.WAssigned() && !isComparable(sourceType) { + distinct = aggr.WCol + } + } + + if aggr.Opcode == AggregateMin || aggr.Opcode == AggregateMax { + if aggr.WAssigned() && !isComparable(sourceType) { + return nil, nil, vterrors.VT12001("min/max on types that are not comparable is not supported") + } + } + + switch aggr.Opcode { + case AggregateCountStar: + ag = &aggregatorCountStar{} + + case AggregateCount, AggregateCountDistinct: + ag = &aggregatorCount{ + from: aggr.Col, + distinct: aggregatorDistinct{ + column: distinct, + coll: aggr.CollationID, + }, + } + + case AggregateSum, AggregateSumDistinct: + var sum evalengine.Sum + switch aggr.OrigOpcode { + case AggregateCount, AggregateCountStar, AggregateCountDistinct: + sum = evalengine.NewSumOfCounts() + default: + sum = evalengine.NewAggregationSum(sourceType) + } + + ag = &aggregatorSum{ + from: aggr.Col, + sum: sum, + distinct: aggregatorDistinct{ + column: distinct, + coll: aggr.CollationID, + }, + } + + case AggregateMin: + ag = &aggregatorMin{ + aggregatorMinMax{ + from: aggr.Col, + minmax: evalengine.NewAggregationMinMax(sourceType, aggr.CollationID), + }, + } + + case AggregateMax: + ag = &aggregatorMax{ + aggregatorMinMax{ + from: aggr.Col, + minmax: evalengine.NewAggregationMinMax(sourceType, aggr.CollationID), + }, + } + + case AggregateGtid: + ag = &aggregatorGtid{from: aggr.Col} + + case AggregateAnyValue: + ag = &aggregatorScalar{from: aggr.Col} + + case AggregateGroupConcat: + ag = &aggregatorGroupConcat{from: aggr.Col, type_: targetType} + + default: + panic("BUG: unexpected Aggregation opcode") + } + + agstate[aggr.Col] = ag + fields[aggr.Col].Type = targetType + if aggr.Alias != "" { + fields[aggr.Col].Name = aggr.Alias + } + } + + for i, a := range agstate { + if a == nil { + agstate[i] = &aggregatorScalar{from: i} + } + } + + return agstate, fields, nil +} diff --git a/go/vt/vtgate/engine/aggregations_test.go b/go/vt/vtgate/engine/aggregations_test.go new file mode 100644 index 00000000000..55ec59f73e1 --- /dev/null +++ b/go/vt/vtgate/engine/aggregations_test.go @@ -0,0 +1,181 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + "fmt" + "math/rand" + "strings" + "testing" + + "github.com/google/uuid" + + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + . "vitess.io/vitess/go/vt/vtgate/engine/opcode" +) + +func makeTestResults(fields []*querypb.Field, gen []sqltypes.RandomGenerator, N int) []*sqltypes.Result { + result := &sqltypes.Result{Fields: fields} + + for i := 0; i < N; i++ { + row := make([]sqltypes.Value, 0, len(fields)) + for _, f := range gen { + row = append(row, f()) + } + result.Rows = append(result.Rows, row) + } + + return []*sqltypes.Result{result} +} + +func benchmarkName(fields []*querypb.Field) string { + var buf strings.Builder + for i, f := range fields { + if i > 0 { + buf.WriteByte('_') + } + fmt.Fprintf(&buf, "%s(%s)", f.Name, f.Type.String()) + } + return buf.String() +} + +func BenchmarkScalarAggregate(b *testing.B) { + var rand_i64 = sqltypes.RandomGenerators[sqltypes.Int64] + var rand_i64small = func() sqltypes.Value { + return sqltypes.NewInt64(rand.Int63n(1024)) + } + var rand_f64 = sqltypes.RandomGenerators[sqltypes.Float64] + var rand_dec = sqltypes.RandomGenerators[sqltypes.Decimal] + var rand_bin = sqltypes.RandomGenerators[sqltypes.VarBinary] + + var cases = []struct { + fields []*querypb.Field + gen []sqltypes.RandomGenerator + params []*AggregateParams + }{ + { + fields: sqltypes.MakeTestFields("count", "int64"), + gen: []sqltypes.RandomGenerator{rand_i64}, + params: []*AggregateParams{ + {Opcode: AggregateCount, Col: 0}, + }, + }, + { + fields: sqltypes.MakeTestFields("sum_small", "int64"), + gen: []sqltypes.RandomGenerator{rand_i64small}, + params: []*AggregateParams{ + {Opcode: AggregateSum, Col: 0}, + }, + }, + { + fields: sqltypes.MakeTestFields("sum", "int64"), + gen: []sqltypes.RandomGenerator{rand_i64}, + params: []*AggregateParams{ + {Opcode: AggregateSum, Col: 0}, + }, + }, + { + fields: sqltypes.MakeTestFields("sum", "float64"), + gen: []sqltypes.RandomGenerator{rand_f64}, + params: []*AggregateParams{ + {Opcode: AggregateSum, Col: 0}, + }, + }, + { + fields: sqltypes.MakeTestFields("sum", "decimal"), + gen: []sqltypes.RandomGenerator{rand_dec}, + params: []*AggregateParams{ + {Opcode: AggregateSum, Col: 0}, + }, + }, + { + fields: sqltypes.MakeTestFields("min", "int64"), + gen: []sqltypes.RandomGenerator{rand_i64}, + params: []*AggregateParams{ + {Opcode: AggregateMin, Col: 0}, + }, + }, + { + fields: sqltypes.MakeTestFields("min", "float64"), + gen: []sqltypes.RandomGenerator{rand_f64}, + params: []*AggregateParams{ + {Opcode: AggregateMin, Col: 0}, + }, + }, + { + fields: sqltypes.MakeTestFields("min", "decimal"), + gen: []sqltypes.RandomGenerator{rand_dec}, + params: []*AggregateParams{ + {Opcode: AggregateMin, Col: 0}, + }, + }, + { + fields: sqltypes.MakeTestFields("min", "varbinary"), + gen: []sqltypes.RandomGenerator{rand_bin}, + params: []*AggregateParams{ + {Opcode: AggregateMin, Col: 0}, + }, + }, + { + fields: sqltypes.MakeTestFields("keyspace|gtid|shard", "varchar|varchar|varchar"), + gen: []sqltypes.RandomGenerator{ + func() sqltypes.Value { + return sqltypes.NewVarChar("keyspace") + }, + func() sqltypes.Value { + return sqltypes.NewVarChar(uuid.New().String()) + }, + func() sqltypes.Value { + return sqltypes.NewVarChar(fmt.Sprintf("%x-%x", rand.Intn(256), rand.Intn(256))) + }, + }, + params: []*AggregateParams{ + {Opcode: AggregateGtid, Col: 1}, + }, + }, + } + + for _, tc := range cases { + b.Run(benchmarkName(tc.fields), func(b *testing.B) { + results := makeTestResults(tc.fields, tc.gen, 10000) + + fp := &fakePrimitive{ + allResultsInOneCall: true, + results: results, + } + oa := &ScalarAggregate{ + Aggregates: tc.params, + Input: fp, + } + + b.Run("TryExecute", func(b *testing.B) { + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + fp.rewind() + _, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, true) + if err != nil { + panic(err) + } + } + }) + }) + } +} diff --git a/go/vt/vtgate/engine/cached_size.go b/go/vt/vtgate/engine/cached_size.go index f03c27d40b6..6625b79240e 100644 --- a/go/vt/vtgate/engine/cached_size.go +++ b/go/vt/vtgate/engine/cached_size.go @@ -145,7 +145,7 @@ func (cached *DML) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(112) + size += int64(128) } // field Query string size += hack.RuntimeAllocSize(int64(len(cached.Query))) @@ -153,10 +153,17 @@ func (cached *DML) CachedSize(alloc bool) int64 { if cc, ok := cached.KsidVindex.(cachedObject); ok { size += cc.CachedSize(true) } - // field Table []*vitess.io/vitess/go/vt/vtgate/vindexes.Table + // field TableNames []string { - size += hack.RuntimeAllocSize(int64(cap(cached.Table)) * int64(8)) - for _, elem := range cached.Table { + size += hack.RuntimeAllocSize(int64(cap(cached.TableNames)) * int64(16)) + for _, elem := range cached.TableNames { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } + // field Vindexes []*vitess.io/vitess/go/vt/vtgate/vindexes.ColumnVindex + { + size += hack.RuntimeAllocSize(int64(cap(cached.Vindexes)) * int64(8)) + for _, elem := range cached.Vindexes { size += elem.CachedSize(true) } } @@ -199,20 +206,6 @@ func (cached *Distinct) CachedSize(alloc bool) int64 { } return size } -func (cached *DistinctV3) CachedSize(alloc bool) int64 { - if cached == nil { - return int64(0) - } - size := int64(0) - if alloc { - size += int64(16) - } - // field Source vitess.io/vitess/go/vt/vtgate/engine.Primitive - if cc, ok := cached.Source.(cachedObject); ok { - size += cc.CachedSize(true) - } - return size -} func (cached *ExecStmt) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -240,7 +233,7 @@ func (cached *Filter) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(48) + size += int64(64) } // field Predicate vitess.io/vitess/go/vt/vtgate/evalengine.Expr if cc, ok := cached.Predicate.(cachedObject); ok { @@ -256,20 +249,68 @@ func (cached *Filter) CachedSize(alloc bool) int64 { } return size } -func (cached *Gen4CompareV3) CachedSize(alloc bool) int64 { +func (cached *FkCascade) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) } size := int64(0) if alloc { - size += int64(48) + size += int64(64) } - // field V3 vitess.io/vitess/go/vt/vtgate/engine.Primitive - if cc, ok := cached.V3.(cachedObject); ok { + // field Selection vitess.io/vitess/go/vt/vtgate/engine.Primitive + if cc, ok := cached.Selection.(cachedObject); ok { size += cc.CachedSize(true) } - // field Gen4 vitess.io/vitess/go/vt/vtgate/engine.Primitive - if cc, ok := cached.Gen4.(cachedObject); ok { + // field Children []*vitess.io/vitess/go/vt/vtgate/engine.FkChild + { + size += hack.RuntimeAllocSize(int64(cap(cached.Children)) * int64(8)) + for _, elem := range cached.Children { + size += elem.CachedSize(true) + } + } + // field Parent vitess.io/vitess/go/vt/vtgate/engine.Primitive + if cc, ok := cached.Parent.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *FkChild) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(64) + } + // field BVName string + size += hack.RuntimeAllocSize(int64(len(cached.BVName))) + // field Cols []int + { + size += hack.RuntimeAllocSize(int64(cap(cached.Cols)) * int64(8)) + } + // field Exec vitess.io/vitess/go/vt/vtgate/engine.Primitive + if cc, ok := cached.Exec.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *FkVerify) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field Verify []*vitess.io/vitess/go/vt/vtgate/engine.Verify + { + size += hack.RuntimeAllocSize(int64(cap(cached.Verify)) * int64(8)) + for _, elem := range cached.Verify { + size += elem.CachedSize(true) + } + } + // field Exec vitess.io/vitess/go/vt/vtgate/engine.Primitive + if cc, ok := cached.Exec.(cachedObject); ok { size += cc.CachedSize(true) } return size @@ -338,7 +379,7 @@ func (cached *Insert) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(224) + size += int64(240) } // field Keyspace *vitess.io/vitess/go/vt/vtgate/vindexes.Keyspace size += cached.Keyspace.CachedSize(true) @@ -370,17 +411,24 @@ func (cached *Insert) CachedSize(alloc bool) int64 { size += elem.CachedSize(true) } } - // field Table *vitess.io/vitess/go/vt/vtgate/vindexes.Table - size += cached.Table.CachedSize(true) + // field TableName string + size += hack.RuntimeAllocSize(int64(len(cached.TableName))) // field Generate *vitess.io/vitess/go/vt/vtgate/engine.Generate size += cached.Generate.CachedSize(true) // field Prefix string size += hack.RuntimeAllocSize(int64(len(cached.Prefix))) - // field Mid []string + // field Mid vitess.io/vitess/go/vt/sqlparser.Values { - size += hack.RuntimeAllocSize(int64(cap(cached.Mid)) * int64(16)) + size += hack.RuntimeAllocSize(int64(cap(cached.Mid)) * int64(24)) for _, elem := range cached.Mid { - size += hack.RuntimeAllocSize(int64(len(elem))) + { + size += hack.RuntimeAllocSize(int64(cap(elem)) * int64(16)) + for _, elem := range elem { + if cc, ok := elem.(cachedObject); ok { + size += cc.CachedSize(true) + } + } + } } } // field Suffix string @@ -533,7 +581,7 @@ func (cached *MemorySort) CachedSize(alloc bool) int64 { } // field OrderBy []vitess.io/vitess/go/vt/vtgate/engine.OrderByParams { - size += hack.RuntimeAllocSize(int64(cap(cached.OrderBy)) * int64(42)) + size += hack.RuntimeAllocSize(int64(cap(cached.OrderBy)) * int64(38)) } // field Input vitess.io/vitess/go/vt/vtgate/engine.Primitive if cc, ok := cached.Input.(cachedObject); ok { @@ -560,7 +608,7 @@ func (cached *MergeSort) CachedSize(alloc bool) int64 { } // field OrderBy []vitess.io/vitess/go/vt/vtgate/engine.OrderByParams { - size += hack.RuntimeAllocSize(int64(cap(cached.OrderBy)) * int64(42)) + size += hack.RuntimeAllocSize(int64(cap(cached.OrderBy)) * int64(38)) } return size } @@ -594,7 +642,7 @@ func (cached *OrderedAggregate) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(96) + size += int64(80) } // field Aggregates []*vitess.io/vitess/go/vt/vtgate/engine.AggregateParams { @@ -678,28 +726,6 @@ func (cached *Projection) CachedSize(alloc bool) int64 { } return size } -func (cached *PulloutSubquery) CachedSize(alloc bool) int64 { - if cached == nil { - return int64(0) - } - size := int64(0) - if alloc { - size += int64(80) - } - // field SubqueryResult string - size += hack.RuntimeAllocSize(int64(len(cached.SubqueryResult))) - // field HasValues string - size += hack.RuntimeAllocSize(int64(len(cached.HasValues))) - // field Subquery vitess.io/vitess/go/vt/vtgate/engine.Primitive - if cc, ok := cached.Subquery.(cachedObject); ok { - size += cc.CachedSize(true) - } - // field Underlying vitess.io/vitess/go/vt/vtgate/engine.Primitive - if cc, ok := cached.Underlying.(cachedObject); ok { - size += cc.CachedSize(true) - } - return size -} func (cached *RenameFields) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -775,7 +801,7 @@ func (cached *Route) CachedSize(alloc bool) int64 { size += hack.RuntimeAllocSize(int64(len(cached.FieldQuery))) // field OrderBy []vitess.io/vitess/go/vt/vtgate/engine.OrderByParams { - size += hack.RuntimeAllocSize(int64(cap(cached.OrderBy)) * int64(42)) + size += hack.RuntimeAllocSize(int64(cap(cached.OrderBy)) * int64(38)) } // field RoutingParameters *vitess.io/vitess/go/vt/vtgate/engine.RoutingParameters size += cached.RoutingParameters.CachedSize(true) @@ -891,7 +917,7 @@ func (cached *ScalarAggregate) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(64) + size += int64(48) } // field Aggregates []*vitess.io/vitess/go/vt/vtgate/engine.AggregateParams { @@ -1097,6 +1123,42 @@ func (cached *SysVarSetAware) CachedSize(alloc bool) int64 { } return size } +func (cached *ThrottleApp) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(16) + } + // field Keyspace *vitess.io/vitess/go/vt/vtgate/vindexes.Keyspace + size += cached.Keyspace.CachedSize(true) + // field ThrottledAppRule *vitess.io/vitess/go/vt/proto/topodata.ThrottledAppRule + size += cached.ThrottledAppRule.CachedSize(true) + return size +} +func (cached *UncorrelatedSubquery) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(80) + } + // field SubqueryResult string + size += hack.RuntimeAllocSize(int64(len(cached.SubqueryResult))) + // field HasValues string + size += hack.RuntimeAllocSize(int64(len(cached.HasValues))) + // field Subquery vitess.io/vitess/go/vt/vtgate/engine.Primitive + if cc, ok := cached.Subquery.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Outer vitess.io/vitess/go/vt/vtgate/engine.Primitive + if cc, ok := cached.Outer.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} //go:nocheckptr func (cached *Update) CachedSize(alloc bool) int64 { @@ -1188,6 +1250,22 @@ func (cached *VStream) CachedSize(alloc bool) int64 { size += hack.RuntimeAllocSize(int64(len(cached.Position))) return size } +func (cached *Verify) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field Exec vitess.io/vitess/go/vt/vtgate/engine.Primitive + if cc, ok := cached.Exec.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Typ string + size += hack.RuntimeAllocSize(int64(len(cached.Typ))) + return size +} func (cached *VindexFunc) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -1265,17 +1343,17 @@ func (cached *VindexValues) CachedSize(alloc bool) int64 { if alloc { size += int64(16) } - // field PvMap map[string]vitess.io/vitess/go/vt/vtgate/evalengine.Expr - if cached.PvMap != nil { + // field EvalExprMap map[string]vitess.io/vitess/go/vt/vtgate/evalengine.Expr + if cached.EvalExprMap != nil { size += int64(48) - hmap := reflect.ValueOf(cached.PvMap) + hmap := reflect.ValueOf(cached.EvalExprMap) numBuckets := int(math.Pow(2, float64((*(*uint8)(unsafe.Pointer(hmap.Pointer() + uintptr(9))))))) numOldBuckets := (*(*uint16)(unsafe.Pointer(hmap.Pointer() + uintptr(10)))) size += hack.RuntimeAllocSize(int64(numOldBuckets * 272)) - if len(cached.PvMap) > 0 || numBuckets > 1 { + if len(cached.EvalExprMap) > 0 || numBuckets > 1 { size += hack.RuntimeAllocSize(int64(numBuckets * 272)) } - for k, v := range cached.PvMap { + for k, v := range cached.EvalExprMap { size += hack.RuntimeAllocSize(int64(len(k))) if cc, ok := v.(cachedObject); ok { size += cc.CachedSize(true) diff --git a/go/vt/vtgate/engine/compare_utils.go b/go/vt/vtgate/engine/compare_utils.go deleted file mode 100644 index c854d6723d3..00000000000 --- a/go/vt/vtgate/engine/compare_utils.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright 2022 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package engine - -import ( - "encoding/json" - - "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/log" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/vterrors" -) - -func printMismatch(leftResult, rightResult *sqltypes.Result, leftPrimitive, rightPrimitive Primitive, leftName, rightName string) { - log.Errorf("Results of %s and %s are not equal. Displaying diff.", rightName, leftName) - - // get right plan and print it - rightplan := &Plan{ - Instructions: rightPrimitive, - } - rightJSON, _ := json.MarshalIndent(rightplan, "", " ") - log.Errorf("%s's plan:\n%s", rightName, string(rightJSON)) - - // get left's plan and print it - leftplan := &Plan{ - Instructions: leftPrimitive, - } - leftJSON, _ := json.MarshalIndent(leftplan, "", " ") - log.Errorf("%s's plan:\n%s", leftName, string(leftJSON)) - - log.Errorf("%s's results:\n", rightName) - log.Errorf("\t[rows affected: %d]\n", rightResult.RowsAffected) - for _, row := range rightResult.Rows { - log.Errorf("\t%s", row) - } - log.Errorf("%s's results:\n", leftName) - log.Errorf("\t[rows affected: %d]\n", leftResult.RowsAffected) - for _, row := range leftResult.Rows { - log.Errorf("\t%s", row) - } - log.Error("End of diff.") -} - -// CompareErrors compares the two errors, and if they don't match, produces an error -func CompareErrors(leftErr, rightErr error, leftName, rightName string) error { - if leftErr != nil && rightErr != nil { - if leftErr.Error() == rightErr.Error() { - return rightErr - } - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "%s and %s failed with different errors: %s: [%s], %s: [%s]", leftName, rightName, leftErr.Error(), rightErr.Error(), leftName, rightName) - } - if leftErr == nil && rightErr != nil { - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "%s failed while %s did not: %s", rightName, rightErr.Error(), leftName) - } - if leftErr != nil && rightErr == nil { - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "%s failed while %s did not: %s", leftName, leftErr.Error(), rightName) - } - return nil -} diff --git a/go/vt/vtgate/engine/concatenate.go b/go/vt/vtgate/engine/concatenate.go index 4cc5fcc440b..904a44ccb85 100644 --- a/go/vt/vtgate/engine/concatenate.go +++ b/go/vt/vtgate/engine/concatenate.go @@ -340,8 +340,8 @@ func (c *Concatenate) NeedsTransaction() bool { } // Inputs returns the input primitives for this -func (c *Concatenate) Inputs() []Primitive { - return c.Sources +func (c *Concatenate) Inputs() ([]Primitive, []map[string]any) { + return c.Sources, nil } func (c *Concatenate) description() PrimitiveDescription { diff --git a/go/vt/vtgate/engine/ddl.go b/go/vt/vtgate/engine/ddl.go index d0ac2cb457e..17aa7945537 100644 --- a/go/vt/vtgate/engine/ddl.go +++ b/go/vt/vtgate/engine/ddl.go @@ -95,6 +95,11 @@ func (ddl *DDL) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[st return vcursor.ExecutePrimitive(ctx, ddl.NormalDDL, bindVars, wantfields) } + // Commit any open transaction before executing the ddl query. + if err = vcursor.Session().Commit(ctx); err != nil { + return nil, err + } + ddlStrategySetting, err := schema.ParseDDLStrategy(vcursor.Session().GetDDLStrategy()) if err != nil { return nil, err diff --git a/go/vt/vtgate/engine/ddl_test.go b/go/vt/vtgate/engine/ddl_test.go new file mode 100644 index 00000000000..3f7ccb75f70 --- /dev/null +++ b/go/vt/vtgate/engine/ddl_test.go @@ -0,0 +1,85 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +func TestDDL(t *testing.T) { + ddl := &DDL{ + DDL: &sqlparser.CreateTable{ + Table: sqlparser.NewTableName("a"), + }, + DirectDDLEnabled: true, + OnlineDDL: &OnlineDDL{}, + NormalDDL: &Send{ + Keyspace: &vindexes.Keyspace{ + Name: "ks", + Sharded: true, + }, + TargetDestination: key.DestinationAllShards{}, + Query: "ddl query", + }, + } + + vc := &loggingVCursor{} + _, err := ddl.TryExecute(context.Background(), vc, nil, true) + require.NoError(t, err) + + vc.ExpectLog(t, []string{ + "commit", + "ResolveDestinations ks [] Destinations:DestinationAllShards()", + "ExecuteMultiShard false false", + }) +} + +func TestDDLTempTable(t *testing.T) { + ddl := &DDL{ + CreateTempTable: true, + DDL: &sqlparser.CreateTable{ + Temp: true, + Table: sqlparser.NewTableName("a"), + }, + NormalDDL: &Send{ + Keyspace: &vindexes.Keyspace{ + Name: "ks", + Sharded: true, + }, + TargetDestination: key.DestinationAllShards{}, + Query: "ddl query", + }, + } + + vc := &loggingVCursor{} + _, err := ddl.TryExecute(context.Background(), vc, nil, true) + require.NoError(t, err) + + vc.ExpectLog(t, []string{ + "temp table getting created", + "Needs Reserved Conn", + "ResolveDestinations ks [] Destinations:DestinationAllShards()", + "ExecuteMultiShard false false", + }) +} diff --git a/go/vt/vtgate/engine/delete.go b/go/vt/vtgate/engine/delete.go index 1db717450f8..e931d665b44 100644 --- a/go/vt/vtgate/engine/delete.go +++ b/go/vt/vtgate/engine/delete.go @@ -108,11 +108,7 @@ func (del *Delete) deleteVindexEntries(ctx context.Context, vcursor VCursor, bin return err } colnum := del.KsidLength - vindexTable, err := del.GetSingleTable() - if err != nil { - return err - } - for _, colVindex := range vindexTable.Owned { + for _, colVindex := range del.Vindexes { // Fetch the column values. colnum must keep incrementing. fromIds := make([]sqltypes.Value, 0, len(colVindex.Columns)) for range colVindex.Columns { diff --git a/go/vt/vtgate/engine/delete_test.go b/go/vt/vtgate/engine/delete_test.go index 7b7e5e1557a..7312b4bd010 100644 --- a/go/vt/vtgate/engine/delete_test.go +++ b/go/vt/vtgate/engine/delete_test.go @@ -65,7 +65,7 @@ func TestDeleteUnsharded(t *testing.T) { } func TestDeleteEqual(t *testing.T) { - vindex, _ := vindexes.NewHash("", nil) + vindex, _ := vindexes.CreateVindex("hash", "", nil) del := &Delete{ DML: &DML{ RoutingParameters: &RoutingParameters{ @@ -97,7 +97,7 @@ func TestDeleteEqual(t *testing.T) { } func TestDeleteEqualMultiCol(t *testing.T) { - vindex, _ := vindexes.NewRegionExperimental("", map[string]string{"region_bytes": "1"}) + vindex, _ := vindexes.CreateVindex("region_experimental", "", map[string]string{"region_bytes": "1"}) del := &Delete{ DML: &DML{ RoutingParameters: &RoutingParameters{ @@ -129,7 +129,7 @@ func TestDeleteEqualMultiCol(t *testing.T) { } func TestDeleteEqualNoRoute(t *testing.T) { - vindex, _ := vindexes.NewLookupUnique("", map[string]string{ + vindex, _ := vindexes.CreateVindex("lookup_unique", "", map[string]string{ "table": "lkp", "from": "from", "to": "toc", @@ -161,7 +161,7 @@ func TestDeleteEqualNoRoute(t *testing.T) { func TestDeleteEqualNoScatter(t *testing.T) { t.Skip("planner does not produces this plan anymore") - vindex, _ := vindexes.NewLookupUnique("", map[string]string{ + vindex, _ := vindexes.CreateVindex("lookup_unique", "", map[string]string{ "table": "lkp", "from": "from", "to": "toc", @@ -197,10 +197,9 @@ func TestDeleteOwnedVindex(t *testing.T) { Vindex: ks.Vindexes["hash"], Values: []evalengine.Expr{evalengine.NewLiteralInt(1)}, }, - Query: "dummy_delete", - Table: []*vindexes.Table{ - ks.Tables["t1"], - }, + Query: "dummy_delete", + TableNames: []string{ks.Tables["t1"].Name.String()}, + Vindexes: ks.Tables["t1"].Owned, OwnedVindexQuery: "dummy_subquery", KsidVindex: ks.Vindexes["hash"], KsidLength: 1, @@ -285,10 +284,9 @@ func TestDeleteOwnedVindexMultiCol(t *testing.T) { Vindex: ks.Vindexes["rg_vdx"], Values: []evalengine.Expr{evalengine.NewLiteralInt(1), evalengine.NewLiteralInt(2)}, }, - Query: "dummy_delete", - Table: []*vindexes.Table{ - ks.Tables["rg_tbl"], - }, + Query: "dummy_delete", + TableNames: []string{ks.Tables["rg_tbl"].Name.String()}, + Vindexes: ks.Tables["rg_tbl"].Owned, OwnedVindexQuery: "dummy_subquery", KsidVindex: ks.Vindexes["rg_vdx"], KsidLength: 2, @@ -368,10 +366,9 @@ func TestDeleteSharded(t *testing.T) { Opcode: Scatter, Keyspace: ks.Keyspace, }, - Query: "dummy_delete", - Table: []*vindexes.Table{ - ks.Tables["t2"], - }, + Query: "dummy_delete", + TableNames: []string{ks.Tables["t2"].Name.String()}, + Vindexes: ks.Tables["t2"].Owned, }, } @@ -397,10 +394,9 @@ func TestDeleteShardedStreaming(t *testing.T) { Opcode: Scatter, Keyspace: ks.Keyspace, }, - Query: "dummy_delete", - Table: []*vindexes.Table{ - ks.Tables["t2"], - }, + Query: "dummy_delete", + TableNames: []string{ks.Tables["t2"].Name.String()}, + Vindexes: ks.Tables["t2"].Owned, }, } @@ -423,10 +419,9 @@ func TestDeleteScatterOwnedVindex(t *testing.T) { Opcode: Scatter, Keyspace: ks.Keyspace, }, - Query: "dummy_delete", - Table: []*vindexes.Table{ - ks.Tables["t1"], - }, + Query: "dummy_delete", + TableNames: []string{ks.Tables["t1"].Name.String()}, + Vindexes: ks.Tables["t1"].Owned, OwnedVindexQuery: "dummy_subquery", KsidVindex: ks.Vindexes["hash"], KsidLength: 1, @@ -515,10 +510,9 @@ func TestDeleteInChangedVindexMultiCol(t *testing.T) { evalengine.NewLiteralInt(3), }, }, - Query: "dummy_update", - Table: []*vindexes.Table{ - ks.Tables["rg_tbl"], - }, + Query: "dummy_update", + TableNames: []string{ks.Tables["rg_tbl"].Name.String()}, + Vindexes: ks.Tables["rg_tbl"].Owned, OwnedVindexQuery: "dummy_subquery", KsidVindex: ks.Vindexes["rg_vdx"], KsidLength: 2, @@ -556,7 +550,7 @@ func TestDeleteInChangedVindexMultiCol(t *testing.T) { } func TestDeleteEqualSubshard(t *testing.T) { - vindex, _ := vindexes.NewRegionExperimental("", map[string]string{"region_bytes": "1"}) + vindex, _ := vindexes.CreateVindex("region_experimental", "", map[string]string{"region_bytes": "1"}) del := &Delete{ DML: &DML{ RoutingParameters: &RoutingParameters{ diff --git a/go/vt/vtgate/engine/distinct.go b/go/vt/vtgate/engine/distinct.go index 0998bfa0e62..8608aec0d98 100644 --- a/go/vt/vtgate/engine/distinct.go +++ b/go/vt/vtgate/engine/distinct.go @@ -246,8 +246,8 @@ func (d *Distinct) NeedsTransaction() bool { } // Inputs implements the Primitive interface -func (d *Distinct) Inputs() []Primitive { - return []Primitive{d.Source} +func (d *Distinct) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{d.Source}, nil } func (d *Distinct) description() PrimitiveDescription { @@ -283,7 +283,7 @@ func (cc CheckCol) SwitchToWeightString() CheckCol { func (cc CheckCol) String() string { var collation string if sqltypes.IsText(cc.Type) && cc.Collation != collations.Unknown { - collation = ": " + cc.Collation.Get().Name() + collation = ": " + collations.Local().LookupName(cc.Collation) } var column string diff --git a/go/vt/vtgate/engine/distinctV3.go b/go/vt/vtgate/engine/distinctV3.go deleted file mode 100644 index 0506331d9c6..00000000000 --- a/go/vt/vtgate/engine/distinctV3.go +++ /dev/null @@ -1,180 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package engine - -import ( - "context" - - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/sqltypes" - querypb "vitess.io/vitess/go/vt/proto/query" - "vitess.io/vitess/go/vt/vtgate/evalengine" -) - -// DistinctV3 Primitive is used to uniqueify results -// It does not always work, and should be removed once the V3 planner has been removed -var _ Primitive = (*DistinctV3)(nil) - -// Distinct Primitive is used to uniqueify results -type DistinctV3 struct { - Source Primitive -} - -type row = []sqltypes.Value - -type probeTableV3 struct { - m map[evalengine.HashCode][]row -} - -func (pt *probeTableV3) exists(inputRow row) (bool, error) { - // calculate hashcode from all column values in the input row - code := evalengine.HashCode(17) - for _, value := range inputRow { - hashcode, err := evalengine.NullsafeHashcode(value, collations.Unknown, value.Type()) - if err != nil { - return false, err - } - code = code*31 + hashcode - } - - existingRows, found := pt.m[code] - if !found { - // nothing with this hash code found, we can be sure it's a not seen row - pt.m[code] = []row{inputRow} - return false, nil - } - - // we found something in the map - still need to check all individual values - // so we don't just fall for a hash collision - for _, existingRow := range existingRows { - exists, err := equalV3(existingRow, inputRow) - if err != nil { - return false, err - } - if exists { - return true, nil - } - } - - pt.m[code] = append(existingRows, inputRow) - - return false, nil -} - -func equalV3(a, b []sqltypes.Value) (bool, error) { - for i, aVal := range a { - cmp, err := evalengine.NullsafeCompare(aVal, b[i], collations.Unknown) - if err != nil { - return false, err - } - if cmp != 0 { - return false, nil - } - } - return true, nil -} - -func newProbeTableV3() *probeTableV3 { - return &probeTableV3{m: map[evalengine.HashCode][]row{}} -} - -// TryExecute implements the Primitive interface -func (d *DistinctV3) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { - input, err := vcursor.ExecutePrimitive(ctx, d.Source, bindVars, wantfields) - if err != nil { - return nil, err - } - - result := &sqltypes.Result{ - Fields: input.Fields, - InsertID: input.InsertID, - } - - pt := newProbeTableV3() - - for _, row := range input.Rows { - exists, err := pt.exists(row) - if err != nil { - return nil, err - } - if !exists { - result.Rows = append(result.Rows, row) - } - } - - return result, err -} - -// TryStreamExecute implements the Primitive interface -func (d *DistinctV3) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { - pt := newProbeTableV3() - - err := vcursor.StreamExecutePrimitive(ctx, d.Source, bindVars, wantfields, func(input *sqltypes.Result) error { - result := &sqltypes.Result{ - Fields: input.Fields, - InsertID: input.InsertID, - } - for _, row := range input.Rows { - exists, err := pt.exists(row) - if err != nil { - return err - } - if !exists { - result.Rows = append(result.Rows, row) - } - } - return callback(result) - }) - - return err -} - -// RouteType implements the Primitive interface -func (d *DistinctV3) RouteType() string { - return d.Source.RouteType() -} - -// GetKeyspaceName implements the Primitive interface -func (d *DistinctV3) GetKeyspaceName() string { - return d.Source.GetKeyspaceName() -} - -// GetTableName implements the Primitive interface -func (d *DistinctV3) GetTableName() string { - return d.Source.GetTableName() -} - -// GetFields implements the Primitive interface -func (d *DistinctV3) GetFields(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { - return d.Source.GetFields(ctx, vcursor, bindVars) -} - -// NeedsTransaction implements the Primitive interface -func (d *DistinctV3) NeedsTransaction() bool { - return d.Source.NeedsTransaction() -} - -// Inputs implements the Primitive interface -func (d *DistinctV3) Inputs() []Primitive { - return []Primitive{d.Source} -} - -func (d *DistinctV3) description() PrimitiveDescription { - return PrimitiveDescription{ - OperatorType: "Distinct", - } -} diff --git a/go/vt/vtgate/engine/dml.go b/go/vt/vtgate/engine/dml.go index 5201fe9f81e..51177f41e08 100644 --- a/go/vt/vtgate/engine/dml.go +++ b/go/vt/vtgate/engine/dml.go @@ -45,8 +45,11 @@ type DML struct { // KsidLength is number of columns that represents KsidVindex KsidLength int - // Table specifies the table for the update. - Table []*vindexes.Table + // TableNames are the name of the tables involved in the query. + TableNames []string + + // Vindexes are the column vindexes modified by this DML. + Vindexes []*vindexes.ColumnVindex // OwnedVindexQuery is used for updating changes in lookup vindexes. OwnedVindexQuery string @@ -103,29 +106,16 @@ func (dml *DML) GetKeyspaceName() string { // GetTableName specifies the table that this primitive routes to. func (dml *DML) GetTableName() string { - if dml.Table != nil { - tableNameMap := map[string]any{} - for _, table := range dml.Table { - tableNameMap[table.Name.String()] = nil - } - - var tableNames []string - for name := range tableNameMap { + sort.Strings(dml.TableNames) + var tableNames []string + var previousTbl string + for _, name := range dml.TableNames { + if name != previousTbl { tableNames = append(tableNames, name) + previousTbl = name } - sort.Strings(tableNames) - - return strings.Join(tableNames, ", ") - } - return "" -} - -// GetSingleTable returns single table used in dml. -func (dml *DML) GetSingleTable() (*vindexes.Table, error) { - if len(dml.Table) > 1 { - return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported dml on complex table expression") } - return dml.Table[0], nil + return strings.Join(tableNames, ", ") } func allowOnlyPrimary(rss ...*srvtopo.ResolvedShard) error { diff --git a/go/vt/vtgate/engine/exec_prepared_statement.go b/go/vt/vtgate/engine/exec_prepared_statement.go index facdff5b681..1874350f7db 100644 --- a/go/vt/vtgate/engine/exec_prepared_statement.go +++ b/go/vt/vtgate/engine/exec_prepared_statement.go @@ -31,8 +31,10 @@ var _ Primitive = (*ExecStmt)(nil) type ExecStmt struct { Params []*sqlparser.Variable Input Primitive +} - noTxNeeded +func (e *ExecStmt) NeedsTransaction() bool { + return e.Input.NeedsTransaction() } func (e *ExecStmt) RouteType() string { @@ -61,8 +63,8 @@ func (e *ExecStmt) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVa return vcursor.StreamExecutePrimitive(ctx, e.Input, bindVars, wantfields, callback) } -func (e *ExecStmt) Inputs() []Primitive { - return []Primitive{e.Input} +func (e *ExecStmt) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{e.Input}, nil } func (e *ExecStmt) description() PrimitiveDescription { diff --git a/go/vt/vtgate/engine/fake_primitive_test.go b/go/vt/vtgate/engine/fake_primitive_test.go index 1a168dc3dc4..dcec32f1ffd 100644 --- a/go/vt/vtgate/engine/fake_primitive_test.go +++ b/go/vt/vtgate/engine/fake_primitive_test.go @@ -43,8 +43,8 @@ type fakePrimitive struct { allResultsInOneCall bool } -func (f *fakePrimitive) Inputs() []Primitive { - return []Primitive{} +func (f *fakePrimitive) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{}, nil } var _ Primitive = (*fakePrimitive)(nil) diff --git a/go/vt/vtgate/engine/fake_vcursor_test.go b/go/vt/vtgate/engine/fake_vcursor_test.go index a2602da4651..afc0b57963a 100644 --- a/go/vt/vtgate/engine/fake_vcursor_test.go +++ b/go/vt/vtgate/engine/fake_vcursor_test.go @@ -51,6 +51,10 @@ var _ SessionActions = (*noopVCursor)(nil) type noopVCursor struct { } +func (t *noopVCursor) Commit(ctx context.Context) error { + return nil +} + func (t *noopVCursor) GetUDV(key string) *querypb.BindVariable { // TODO implement me panic("implement me") @@ -93,6 +97,10 @@ func (t *noopVCursor) SetExec(ctx context.Context, name string, value string) er panic("implement me") } +func (t *noopVCursor) ThrottleApp(ctx context.Context, throttleAppRule *topodatapb.ThrottledAppRule) error { + panic("implement me") +} + func (t *noopVCursor) ShowExec(ctx context.Context, command sqlparser.ShowCommandType, filter *sqlparser.ShowFilter) (*sqltypes.Result, error) { panic("implement me") } @@ -152,6 +160,14 @@ func (t *noopVCursor) SetDDLStrategy(strategy string) { } func (t *noopVCursor) GetDDLStrategy() string { + return "" +} + +func (t *noopVCursor) SetMigrationContext(migrationContext string) { + panic("implement me") +} + +func (t *noopVCursor) GetMigrationContext() string { panic("implement me") } @@ -377,6 +393,15 @@ type loggingVCursor struct { shardSession []*srvtopo.ResolvedShard } +func (f *loggingVCursor) HasCreatedTempTable() { + f.log = append(f.log, "temp table getting created") +} + +func (f *loggingVCursor) Commit(_ context.Context) error { + f.log = append(f.log, "commit") + return nil +} + func (f *loggingVCursor) GetUDV(key string) *querypb.BindVariable { // TODO implement me panic("implement me") diff --git a/go/vt/vtgate/engine/filter.go b/go/vt/vtgate/engine/filter.go index bab94335e67..c0a54f2b6ac 100644 --- a/go/vt/vtgate/engine/filter.go +++ b/go/vt/vtgate/engine/filter.go @@ -19,12 +19,10 @@ package engine import ( "context" - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/sqltypes" - querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/evalengine" ) var _ Primitive = (*Filter)(nil) @@ -35,6 +33,8 @@ type Filter struct { ASTPredicate sqlparser.Expr Input Primitive + Truncate int + noTxNeeded } @@ -73,7 +73,7 @@ func (f *Filter) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[s } } result.Rows = rows - return result, nil + return result.Truncate(f.Truncate), nil } // TryStreamExecute satisfies the Primitive interface. @@ -96,7 +96,7 @@ func (f *Filter) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars } } results.Rows = rows - return callback(results) + return callback(results.Truncate(f.Truncate)) } return vcursor.StreamExecutePrimitive(ctx, f.Input, bindVars, wantfields, filter) @@ -108,13 +108,14 @@ func (f *Filter) GetFields(ctx context.Context, vcursor VCursor, bindVars map[st } // Inputs returns the input to limit -func (f *Filter) Inputs() []Primitive { - return []Primitive{f.Input} +func (f *Filter) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{f.Input}, nil } func (f *Filter) description() PrimitiveDescription { other := map[string]any{ - "Predicate": sqlparser.String(f.ASTPredicate), + "Predicate": sqlparser.String(f.ASTPredicate), + "ResultColumns": f.Truncate, } return PrimitiveDescription{ diff --git a/go/vt/vtgate/engine/filter_test.go b/go/vt/vtgate/engine/filter_test.go index 01ba7175db4..9a8335e4d7e 100644 --- a/go/vt/vtgate/engine/filter_test.go +++ b/go/vt/vtgate/engine/filter_test.go @@ -29,7 +29,7 @@ import ( ) func TestFilterPass(t *testing.T) { - utf8mb4Bin := collationEnv.LookupByName("utf8mb4_bin").ID() + utf8mb4Bin := collationEnv.LookupByName("utf8mb4_bin") predicate := &sqlparser.ComparisonExpr{ Operator: sqlparser.GreaterThanOp, Left: sqlparser.NewColName("left"), diff --git a/go/vt/vtgate/engine/fk_cascade.go b/go/vt/vtgate/engine/fk_cascade.go new file mode 100644 index 00000000000..d0bddbea8f9 --- /dev/null +++ b/go/vt/vtgate/engine/fk_cascade.go @@ -0,0 +1,189 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + "fmt" + + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +// FkChild contains the Child Primitive to be executed collecting the values from the Selection Primitive using the column indexes. +// BVName is used to pass the value as bind variable to the Child Primitive. +type FkChild struct { + BVName string + Cols []int // indexes + Exec Primitive +} + +// FkCascade is a primitive that implements foreign key cascading using Selection as values required to execute the FkChild Primitives. +// On success, it executes the Parent Primitive. +type FkCascade struct { + // Selection is the Primitive that is used to find the rows that are going to be modified in the child tables. + Selection Primitive + // Children is a list of child foreign key Primitives that are executed using rows from the Selection Primitive. + Children []*FkChild + // Parent is the Primitive that is executed after the children are modified. + Parent Primitive + + txNeeded +} + +// RouteType implements the Primitive interface. +func (fkc *FkCascade) RouteType() string { + return "FkCascade" +} + +// GetKeyspaceName implements the Primitive interface. +func (fkc *FkCascade) GetKeyspaceName() string { + return fkc.Parent.GetKeyspaceName() +} + +// GetTableName implements the Primitive interface. +func (fkc *FkCascade) GetTableName() string { + return fkc.Parent.GetTableName() +} + +// GetFields implements the Primitive interface. +func (fkc *FkCascade) GetFields(_ context.Context, _ VCursor, _ map[string]*querypb.BindVariable) (*sqltypes.Result, error) { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] GetFields should not be called") +} + +// TryExecute implements the Primitive interface. +func (fkc *FkCascade) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { + // Execute the Selection primitive to find the rows that are going to modified. + // This will be used to find the rows that need modification on the children. + selectionRes, err := vcursor.ExecutePrimitive(ctx, fkc.Selection, bindVars, wantfields) + if err != nil { + return nil, err + } + + // If no rows are to be modified, there is nothing to do. + if len(selectionRes.Rows) == 0 { + return &sqltypes.Result{}, nil + } + + for _, child := range fkc.Children { + // We create a bindVariable for each Child + // that stores the tuple of columns involved in the fk constraint. + bv := &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + } + for _, row := range selectionRes.Rows { + var tupleValues []sqltypes.Value + for _, colIdx := range child.Cols { + tupleValues = append(tupleValues, row[colIdx]) + } + bv.Values = append(bv.Values, sqltypes.TupleToProto(tupleValues)) + } + // Execute the child primitive, and bail out incase of failure. + // Since this Primitive is always executed in a transaction, the changes should + // be rolled back incase of an error. + bindVars[child.BVName] = bv + _, err = vcursor.ExecutePrimitive(ctx, child.Exec, bindVars, wantfields) + if err != nil { + return nil, err + } + delete(bindVars, child.BVName) + } + + // All the children are modified successfully, we can now execute the Parent Primitive. + return vcursor.ExecutePrimitive(ctx, fkc.Parent, bindVars, wantfields) +} + +// TryStreamExecute implements the Primitive interface. +func (fkc *FkCascade) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { + // We create a bindVariable for each Child + // that stores the tuple of columns involved in the fk constraint. + var bindVariables []*querypb.BindVariable + for range fkc.Children { + bindVariables = append(bindVariables, &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + }) + } + + // Execute the Selection primitive to find the rows that are going to modified. + // This will be used to find the rows that need modification on the children. + err := vcursor.StreamExecutePrimitive(ctx, fkc.Selection, bindVars, wantfields, func(result *sqltypes.Result) error { + if len(result.Rows) == 0 { + return nil + } + for idx, child := range fkc.Children { + for _, row := range result.Rows { + var tupleValues []sqltypes.Value + for _, colIdx := range child.Cols { + tupleValues = append(tupleValues, row[colIdx]) + } + bindVariables[idx].Values = append(bindVariables[idx].Values, sqltypes.TupleToProto(tupleValues)) + } + } + return nil + }) + if err != nil { + return err + } + + // Execute the child primitive, and bail out incase of failure. + // Since this Primitive is always executed in a transaction, the changes should + // be rolled back incase of an error. + for idx, child := range fkc.Children { + bindVars[child.BVName] = bindVariables[idx] + err = vcursor.StreamExecutePrimitive(ctx, child.Exec, bindVars, wantfields, func(result *sqltypes.Result) error { + return nil + }) + if err != nil { + return err + } + delete(bindVars, child.BVName) + } + + // All the children are modified successfully, we can now execute the Parent Primitive. + return vcursor.StreamExecutePrimitive(ctx, fkc.Parent, bindVars, wantfields, callback) +} + +// Inputs implements the Primitive interface. +func (fkc *FkCascade) Inputs() ([]Primitive, []map[string]any) { + var inputs []Primitive + var inputsMap []map[string]any + inputs = append(inputs, fkc.Selection) + inputsMap = append(inputsMap, map[string]any{ + inputName: "Selection", + }) + for idx, child := range fkc.Children { + inputsMap = append(inputsMap, map[string]any{ + inputName: fmt.Sprintf("CascadeChild-%d", idx+1), + "BvName": child.BVName, + "Cols": child.Cols, + }) + inputs = append(inputs, child.Exec) + } + inputs = append(inputs, fkc.Parent) + inputsMap = append(inputsMap, map[string]any{ + inputName: "Parent", + }) + return inputs, inputsMap +} + +func (fkc *FkCascade) description() PrimitiveDescription { + return PrimitiveDescription{OperatorType: fkc.RouteType()} +} + +var _ Primitive = (*FkCascade)(nil) diff --git a/go/vt/vtgate/engine/fk_cascade_test.go b/go/vt/vtgate/engine/fk_cascade_test.go new file mode 100644 index 00000000000..ddd381003b1 --- /dev/null +++ b/go/vt/vtgate/engine/fk_cascade_test.go @@ -0,0 +1,160 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +// TestDeleteCascade tests that FkCascade executes the child and parent primitives for a delete cascade. +func TestDeleteCascade(t *testing.T) { + fakeRes := sqltypes.MakeTestResult(sqltypes.MakeTestFields("cola|colb", "int64|varchar"), "1|a", "2|b") + + inputP := &Route{ + Query: "select cola, colb from parent where foo = 48", + RoutingParameters: &RoutingParameters{ + Opcode: Unsharded, + Keyspace: &vindexes.Keyspace{Name: "ks"}, + }, + } + childP := &Delete{ + DML: &DML{ + Query: "delete from child where (ca, cb) in ::__vals", + RoutingParameters: &RoutingParameters{ + Opcode: Unsharded, + Keyspace: &vindexes.Keyspace{Name: "ks"}, + }, + }, + } + parentP := &Delete{ + DML: &DML{ + Query: "delete from parent where foo = 48", + RoutingParameters: &RoutingParameters{ + Opcode: Unsharded, + Keyspace: &vindexes.Keyspace{Name: "ks"}, + }, + }, + } + fkc := &FkCascade{ + Selection: inputP, + Children: []*FkChild{{BVName: "__vals", Cols: []int{0, 1}, Exec: childP}}, + Parent: parentP, + } + + vc := newDMLTestVCursor("0") + vc.results = []*sqltypes.Result{fakeRes} + _, err := fkc.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, true) + require.NoError(t, err) + vc.ExpectLog(t, []string{ + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: select cola, colb from parent where foo = 48 {} false false`, + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: delete from child where (ca, cb) in ::__vals {__vals: type:TUPLE values:{type:TUPLE value:"\x89\x02\x011\x950\x01a"} values:{type:TUPLE value:"\x89\x02\x012\x950\x01b"}} true true`, + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: delete from parent where foo = 48 {} true true`, + }) + + vc.Rewind() + err = fkc.TryStreamExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, true, func(result *sqltypes.Result) error { return nil }) + require.NoError(t, err) + vc.ExpectLog(t, []string{ + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `StreamExecuteMulti select cola, colb from parent where foo = 48 ks.0: {} `, + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: delete from child where (ca, cb) in ::__vals {__vals: type:TUPLE values:{type:TUPLE value:"\x89\x02\x011\x950\x01a"} values:{type:TUPLE value:"\x89\x02\x012\x950\x01b"}} true true`, + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: delete from parent where foo = 48 {} true true`, + }) +} + +// TestUpdateCascade tests that FkCascade executes the child and parent primitives for an update cascade. +func TestUpdateCascade(t *testing.T) { + fakeRes := sqltypes.MakeTestResult(sqltypes.MakeTestFields("cola|colb", "int64|varchar"), "1|a", "2|b") + + inputP := &Route{ + Query: "select cola, colb from parent where foo = 48", + RoutingParameters: &RoutingParameters{ + Opcode: Unsharded, + Keyspace: &vindexes.Keyspace{Name: "ks"}, + }, + } + childP := &Update{ + DML: &DML{ + Query: "update child set ca = :vtg1 where (ca, cb) in ::__vals", + RoutingParameters: &RoutingParameters{ + Opcode: Unsharded, + Keyspace: &vindexes.Keyspace{Name: "ks"}, + }, + }, + } + parentP := &Update{ + DML: &DML{ + Query: "update parent set cola = 1 where foo = 48", + RoutingParameters: &RoutingParameters{ + Opcode: Unsharded, + Keyspace: &vindexes.Keyspace{Name: "ks"}, + }, + }, + } + fkc := &FkCascade{ + Selection: inputP, + Children: []*FkChild{{BVName: "__vals", Cols: []int{0, 1}, Exec: childP}}, + Parent: parentP, + } + + vc := newDMLTestVCursor("0") + vc.results = []*sqltypes.Result{fakeRes} + _, err := fkc.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, true) + require.NoError(t, err) + vc.ExpectLog(t, []string{ + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: select cola, colb from parent where foo = 48 {} false false`, + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: update child set ca = :vtg1 where (ca, cb) in ::__vals {__vals: type:TUPLE values:{type:TUPLE value:"\x89\x02\x011\x950\x01a"} values:{type:TUPLE value:"\x89\x02\x012\x950\x01b"}} true true`, + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: update parent set cola = 1 where foo = 48 {} true true`, + }) + + vc.Rewind() + err = fkc.TryStreamExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, true, func(result *sqltypes.Result) error { return nil }) + require.NoError(t, err) + vc.ExpectLog(t, []string{ + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `StreamExecuteMulti select cola, colb from parent where foo = 48 ks.0: {} `, + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: update child set ca = :vtg1 where (ca, cb) in ::__vals {__vals: type:TUPLE values:{type:TUPLE value:"\x89\x02\x011\x950\x01a"} values:{type:TUPLE value:"\x89\x02\x012\x950\x01b"}} true true`, + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: update parent set cola = 1 where foo = 48 {} true true`, + }) +} + +// TestNeedsTransactionInExecPrepared tests that if we have a foreign key cascade inside an ExecStmt plan, then we do mark the plan to require a transaction. +func TestNeedsTransactionInExecPrepared(t *testing.T) { + // Even if FkCascade is wrapped in ExecStmt, the plan should be marked such that it requires a transaction. + // This is necessary because if we don't run the cascades for DMLs in a transaction, we might end up committing partial writes that should eventually be rolled back. + execPrepared := &ExecStmt{ + Input: &FkCascade{}, + } + require.True(t, execPrepared.NeedsTransaction()) +} diff --git a/go/vt/vtgate/engine/fk_verify.go b/go/vt/vtgate/engine/fk_verify.go new file mode 100644 index 00000000000..350aeec59e0 --- /dev/null +++ b/go/vt/vtgate/engine/fk_verify.go @@ -0,0 +1,129 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + "fmt" + + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +// Verify contains the verification primitve and its type i.e. parent or child +type Verify struct { + Exec Primitive + Typ string +} + +// FkVerify is a primitive that verifies that the foreign key constraints in parent tables are satisfied. +// It does this by executing a select distinct query on the parent table with the values that are being inserted/updated. +type FkVerify struct { + Verify []*Verify + Exec Primitive + + txNeeded +} + +// constants for verification type. +const ( + ParentVerify = "VerifyParent" + ChildVerify = "VerifyChild" +) + +// RouteType implements the Primitive interface +func (f *FkVerify) RouteType() string { + return "FKVerify" +} + +// GetKeyspaceName implements the Primitive interface +func (f *FkVerify) GetKeyspaceName() string { + return f.Exec.GetKeyspaceName() +} + +// GetTableName implements the Primitive interface +func (f *FkVerify) GetTableName() string { + return f.Exec.GetTableName() +} + +// GetFields implements the Primitive interface +func (f *FkVerify) GetFields(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] GetFields should not be called") +} + +// TryExecute implements the Primitive interface +func (f *FkVerify) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { + for _, v := range f.Verify { + qr, err := vcursor.ExecutePrimitive(ctx, v.Exec, bindVars, wantfields) + if err != nil { + return nil, err + } + if len(qr.Rows) > 0 { + return nil, getError(v.Typ) + } + } + return vcursor.ExecutePrimitive(ctx, f.Exec, bindVars, wantfields) +} + +// TryStreamExecute implements the Primitive interface +func (f *FkVerify) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { + for _, v := range f.Verify { + err := vcursor.StreamExecutePrimitive(ctx, v.Exec, bindVars, wantfields, func(qr *sqltypes.Result) error { + if len(qr.Rows) > 0 { + return getError(v.Typ) + } + return nil + }) + if err != nil { + return err + } + } + return vcursor.StreamExecutePrimitive(ctx, f.Exec, bindVars, wantfields, callback) +} + +// Inputs implements the Primitive interface +func (f *FkVerify) Inputs() ([]Primitive, []map[string]any) { + var inputs []Primitive + var inputsMap []map[string]any + for idx, v := range f.Verify { + inputsMap = append(inputsMap, map[string]any{ + inputName: fmt.Sprintf("%s-%d", v.Typ, idx+1), + }) + inputs = append(inputs, v.Exec) + } + inputs = append(inputs, f.Exec) + inputsMap = append(inputsMap, map[string]any{ + inputName: "PostVerify", + }) + return inputs, inputsMap + +} + +func (f *FkVerify) description() PrimitiveDescription { + return PrimitiveDescription{OperatorType: f.RouteType()} +} + +var _ Primitive = (*FkVerify)(nil) + +func getError(typ string) error { + if typ == ParentVerify { + return vterrors.NewErrorf(vtrpcpb.Code_FAILED_PRECONDITION, vterrors.NoReferencedRow2, "Cannot add or update a child row: a foreign key constraint fails") + } + return vterrors.NewErrorf(vtrpcpb.Code_FAILED_PRECONDITION, vterrors.RowIsReferenced2, "Cannot delete or update a parent row: a foreign key constraint fails") +} diff --git a/go/vt/vtgate/engine/fk_verify_test.go b/go/vt/vtgate/engine/fk_verify_test.go new file mode 100644 index 00000000000..5635a32bc2c --- /dev/null +++ b/go/vt/vtgate/engine/fk_verify_test.go @@ -0,0 +1,125 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +func TestFKVerifyUpdate(t *testing.T) { + verifyP := &Route{ + Query: "select 1 from child c left join parent p on p.cola = 1 and p.colb = 'a' where p.cola is null and p.colb is null", + RoutingParameters: &RoutingParameters{ + Opcode: Unsharded, + Keyspace: &vindexes.Keyspace{Name: "ks"}, + }, + } + verifyC := &Route{ + Query: "select 1 from grandchild g join child c on g.cola = c.cola and g.colb = c.colb where c.foo = 48", + RoutingParameters: &RoutingParameters{ + Opcode: Unsharded, + Keyspace: &vindexes.Keyspace{Name: "ks"}, + }, + } + childP := &Update{ + DML: &DML{ + Query: "update child set cola = 1, colb = 'a' where foo = 48", + RoutingParameters: &RoutingParameters{ + Opcode: Unsharded, + Keyspace: &vindexes.Keyspace{Name: "ks"}, + }, + }, + } + fkc := &FkVerify{ + Verify: []*Verify{{Exec: verifyP, Typ: ParentVerify}}, + Exec: childP, + } + + t.Run("foreign key verification success", func(t *testing.T) { + fakeRes := sqltypes.MakeTestResult(sqltypes.MakeTestFields("1", "int64")) + vc := newDMLTestVCursor("0") + vc.results = []*sqltypes.Result{fakeRes} + _, err := fkc.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, true) + require.NoError(t, err) + vc.ExpectLog(t, []string{ + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: select 1 from child c left join parent p on p.cola = 1 and p.colb = 'a' where p.cola is null and p.colb is null {} false false`, + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: update child set cola = 1, colb = 'a' where foo = 48 {} true true`, + }) + + vc.Rewind() + err = fkc.TryStreamExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, true, func(result *sqltypes.Result) error { return nil }) + require.NoError(t, err) + vc.ExpectLog(t, []string{ + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `StreamExecuteMulti select 1 from child c left join parent p on p.cola = 1 and p.colb = 'a' where p.cola is null and p.colb is null ks.0: {} `, + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: update child set cola = 1, colb = 'a' where foo = 48 {} true true`, + }) + }) + + t.Run("parent foreign key verification failure", func(t *testing.T) { + // No results from select, should cause the foreign key verification to fail. + fakeRes := sqltypes.MakeTestResult(sqltypes.MakeTestFields("1", "int64"), "1", "1", "1") + vc := newDMLTestVCursor("0") + vc.results = []*sqltypes.Result{fakeRes} + _, err := fkc.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, true) + require.ErrorContains(t, err, "Cannot add or update a child row: a foreign key constraint fails") + vc.ExpectLog(t, []string{ + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: select 1 from child c left join parent p on p.cola = 1 and p.colb = 'a' where p.cola is null and p.colb is null {} false false`, + }) + + vc.Rewind() + err = fkc.TryStreamExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, true, func(result *sqltypes.Result) error { return nil }) + require.ErrorContains(t, err, "Cannot add or update a child row: a foreign key constraint fails") + vc.ExpectLog(t, []string{ + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `StreamExecuteMulti select 1 from child c left join parent p on p.cola = 1 and p.colb = 'a' where p.cola is null and p.colb is null ks.0: {} `, + }) + }) + + fkc.Verify[0] = &Verify{Exec: verifyC, Typ: ChildVerify} + t.Run("child foreign key verification failure", func(t *testing.T) { + // No results from select, should cause the foreign key verification to fail. + fakeRes := sqltypes.MakeTestResult(sqltypes.MakeTestFields("1", "int64"), "1", "1", "1") + vc := newDMLTestVCursor("0") + vc.results = []*sqltypes.Result{fakeRes} + _, err := fkc.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, true) + require.ErrorContains(t, err, "Cannot delete or update a parent row: a foreign key constraint fails") + vc.ExpectLog(t, []string{ + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: select 1 from grandchild g join child c on g.cola = c.cola and g.colb = c.colb where c.foo = 48 {} false false`, + }) + + vc.Rewind() + err = fkc.TryStreamExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, true, func(result *sqltypes.Result) error { return nil }) + require.ErrorContains(t, err, "Cannot delete or update a parent row: a foreign key constraint fails") + vc.ExpectLog(t, []string{ + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `StreamExecuteMulti select 1 from grandchild g join child c on g.cola = c.cola and g.colb = c.colb where c.foo = 48 ks.0: {} `, + }) + }) +} diff --git a/go/vt/vtgate/engine/gen4_compare_v3.go b/go/vt/vtgate/engine/gen4_compare_v3.go deleted file mode 100644 index a913c442a2c..00000000000 --- a/go/vt/vtgate/engine/gen4_compare_v3.go +++ /dev/null @@ -1,144 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package engine - -import ( - "context" - "sync" - - "vitess.io/vitess/go/sqltypes" - querypb "vitess.io/vitess/go/vt/proto/query" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/vterrors" -) - -// Gen4CompareV3 is a Primitive used to compare V3 and Gen4's plans. -type Gen4CompareV3 struct { - V3, Gen4 Primitive - HasOrderBy bool -} - -var _ Primitive = (*Gen4CompareV3)(nil) -var _ Gen4Comparer = (*Gen4CompareV3)(nil) - -// GetGen4Primitive implements the Gen4Comparer interface -func (gc *Gen4CompareV3) GetGen4Primitive() Primitive { - return gc.Gen4 -} - -// RouteType implements the Primitive interface -func (gc *Gen4CompareV3) RouteType() string { - return gc.Gen4.RouteType() -} - -// GetKeyspaceName implements the Primitive interface -func (gc *Gen4CompareV3) GetKeyspaceName() string { - return gc.Gen4.GetKeyspaceName() -} - -// GetTableName implements the Primitive interface -func (gc *Gen4CompareV3) GetTableName() string { - return gc.Gen4.GetTableName() -} - -// GetFields implements the Primitive interface -func (gc *Gen4CompareV3) GetFields(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { - return gc.Gen4.GetFields(ctx, vcursor, bindVars) -} - -// NeedsTransaction implements the Primitive interface -func (gc *Gen4CompareV3) NeedsTransaction() bool { - return gc.Gen4.NeedsTransaction() -} - -// TryExecute implements the Primitive interface -func (gc *Gen4CompareV3) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { - var v3Err, gen4Err error - v3Result, gen4Result := &sqltypes.Result{}, &sqltypes.Result{} - if gc.Gen4 != nil { - gen4Result, gen4Err = gc.Gen4.TryExecute(ctx, vcursor, bindVars, wantfields) - } - if gc.V3 != nil { - v3Result, v3Err = gc.V3.TryExecute(ctx, vcursor, bindVars, wantfields) - } - - if err := CompareErrors(v3Err, gen4Err, "v3", "Gen4"); err != nil { - return nil, err - } - - if err := gc.compareResults(v3Result, gen4Result); err != nil { - return nil, err - } - return gen4Result, nil -} - -// TryStreamExecute implements the Primitive interface -func (gc *Gen4CompareV3) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { - var mu sync.Mutex - var v3Err, gen4Err error - v3Result, gen4Result := &sqltypes.Result{}, &sqltypes.Result{} - - if gc.Gen4 != nil { - gen4Err = gc.Gen4.TryStreamExecute(ctx, vcursor, bindVars, wantfields, func(result *sqltypes.Result) error { - mu.Lock() - defer mu.Unlock() - gen4Result.AppendResult(result) - return nil - }) - } - if gc.V3 != nil { - v3Err = gc.V3.TryStreamExecute(ctx, vcursor, bindVars, wantfields, func(result *sqltypes.Result) error { - mu.Lock() - defer mu.Unlock() - v3Result.AppendResult(result) - return nil - }) - } - - if err := CompareErrors(v3Err, gen4Err, "v3", "Gen4"); err != nil { - return err - } - - if err := gc.compareResults(v3Result, gen4Result); err != nil { - return err - } - return callback(gen4Result) -} - -func (gc *Gen4CompareV3) compareResults(v3Result *sqltypes.Result, gen4Result *sqltypes.Result) error { - var match bool - if gc.HasOrderBy { - match = sqltypes.ResultsEqual([]sqltypes.Result{*v3Result}, []sqltypes.Result{*gen4Result}) - } else { - match = sqltypes.ResultsEqualUnordered([]sqltypes.Result{*v3Result}, []sqltypes.Result{*gen4Result}) - } - if !match { - printMismatch(v3Result, gen4Result, gc.V3, gc.Gen4, "V3", "Gen4") - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "results did not match, see VTGate's logs for more information") - } - return nil -} - -// Inputs implements the Primitive interface -func (gc *Gen4CompareV3) Inputs() []Primitive { - return []Primitive{gc.Gen4, gc.V3} -} - -// description implements the Primitive interface -func (gc *Gen4CompareV3) description() PrimitiveDescription { - return PrimitiveDescription{OperatorType: "Gen4CompareV3"} -} diff --git a/go/vt/vtgate/engine/hash_join.go b/go/vt/vtgate/engine/hash_join.go index 1fb889c8fd4..a38fc21bf97 100644 --- a/go/vt/vtgate/engine/hash_join.go +++ b/go/vt/vtgate/engine/hash_join.go @@ -98,7 +98,7 @@ func (hj *HashJoin) TryExecute(ctx context.Context, vcursor VCursor, bindVars ma for _, currentLHSRow := range lftRows { lhsVal := currentLHSRow[hj.LHSKey] // hash codes can give false positives, so we need to check with a real comparison as well - cmp, err := evalengine.NullsafeCompare(joinVal, lhsVal, collations.Unknown) + cmp, err := evalengine.NullsafeCompare(joinVal, lhsVal, hj.Collation) if err != nil { return nil, err } @@ -234,8 +234,8 @@ func (hj *HashJoin) NeedsTransaction() bool { } // Inputs implements the Primitive interface -func (hj *HashJoin) Inputs() []Primitive { - return []Primitive{hj.Left, hj.Right} +func (hj *HashJoin) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{hj.Left, hj.Right}, nil } // description implements the Primitive interface @@ -246,9 +246,9 @@ func (hj *HashJoin) description() PrimitiveDescription { "Predicate": sqlparser.String(hj.ASTPred), "ComparisonType": hj.ComparisonType.String(), } - coll := hj.Collation.Get() - if coll != nil { - other["Collation"] = coll.Name() + coll := hj.Collation + if coll != collations.Unknown { + other["Collation"] = collations.Local().LookupName(coll) } return PrimitiveDescription{ OperatorType: "Join", diff --git a/go/vt/vtgate/engine/insert.go b/go/vt/vtgate/engine/insert.go index 21227c3d931..49aa96f62a7 100644 --- a/go/vt/vtgate/engine/insert.go +++ b/go/vt/vtgate/engine/insert.go @@ -25,6 +25,7 @@ import ( "sync" "time" + "vitess.io/vitess/go/slice" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" querypb "vitess.io/vitess/go/vt/proto/query" @@ -70,15 +71,15 @@ type ( // ColVindexes are the vindexes that will use the VindexValues ColVindexes []*vindexes.ColumnVindex - // Table specifies the table for the insert. - Table *vindexes.Table + // TableName is the name of the table on which row will be inserted. + TableName string // Generate is only set for inserts where a sequence must be generated. Generate *Generate // Prefix, Mid and Suffix are for sharded insert plans. Prefix string - Mid []string + Mid sqlparser.Values Suffix string // Option to override the standard behavior and allow a multi-shard insert @@ -111,11 +112,11 @@ type ( ksID = []byte ) -func (ins *Insert) Inputs() []Primitive { +func (ins *Insert) Inputs() ([]Primitive, []map[string]any) { if ins.Input == nil { - return nil + return nil, nil } - return []Primitive{ins.Input} + return []Primitive{ins.Input}, nil } // NewQueryInsert creates an Insert with a query string. @@ -127,15 +128,6 @@ func NewQueryInsert(opcode InsertOpcode, keyspace *vindexes.Keyspace, query stri } } -// NewSimpleInsert creates an Insert for a Table. -func NewSimpleInsert(opcode InsertOpcode, table *vindexes.Table, keyspace *vindexes.Keyspace) *Insert { - return &Insert{ - Opcode: opcode, - Table: table, - Keyspace: keyspace, - } -} - // NewInsert creates a new Insert. func NewInsert( opcode InsertOpcode, @@ -144,19 +136,28 @@ func NewInsert( vindexValues [][][]evalengine.Expr, table *vindexes.Table, prefix string, - mid []string, + mid sqlparser.Values, suffix string, ) *Insert { - return &Insert{ + ins := &Insert{ Opcode: opcode, Ignore: ignore, Keyspace: keyspace, VindexValues: vindexValues, - Table: table, Prefix: prefix, Mid: mid, Suffix: suffix, } + if table != nil { + ins.TableName = table.Name.String() + for _, colVindex := range table.ColumnVindexes { + if colVindex.IsPartialVindex() { + continue + } + ins.ColVindexes = append(ins.ColVindexes, colVindex) + } + } + return ins } // Generate represents the instruction to generate @@ -226,10 +227,7 @@ func (ins *Insert) GetKeyspaceName() string { // GetTableName specifies the table that this primitive routes to. func (ins *Insert) GetTableName() string { - if ins.Table != nil { - return ins.Table.Name.String() - } - return "" + return ins.TableName } // TryExecute performs a non-streaming exec. @@ -404,10 +402,6 @@ func (ins *Insert) getInsertSelectQueries( rows []sqltypes.Row, ) ([]*srvtopo.ResolvedShard, []*querypb.BoundQuery, error) { colVindexes := ins.ColVindexes - if colVindexes == nil { - colVindexes = ins.Table.ColumnVindexes - } - if len(colVindexes) != len(ins.VindexValueOffset) { return nil, nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "vindex value offsets and vindex info do not match") } @@ -520,7 +514,7 @@ func shouldGenerate(v sqltypes.Value) bool { // Unless the NO_AUTO_VALUE_ON_ZERO sql mode is active in mysql, it also // treats 0 as a value that should generate a new sequence. - n, err := evalengine.ToUint64(v) + n, err := v.ToCastUint64() if err == nil && n == 0 { return true } @@ -577,7 +571,7 @@ func (ins *Insert) processGenerateFromValues( } // If no rows are returned, it's an internal error, and the code // must panic, which will be caught and reported. - insertID, err = evalengine.ToInt64(qr.Rows[0][0]) + insertID, err = qr.Rows[0][0].ToCastInt64() if err != nil { return 0, err } @@ -639,7 +633,7 @@ func (ins *Insert) processGenerateFromRows( } // If no rows are returned, it's an internal error, and the code // must panic, which will be caught and reported. - insertID, err = evalengine.ToInt64(qr.Rows[0][0]) + insertID, err = qr.Rows[0][0].ToCastInt64() if err != nil { return 0, err } @@ -682,9 +676,6 @@ func (ins *Insert) getInsertShardedRoute( rowCount := 0 env := evalengine.NewExpressionEnv(ctx, bindVars, vcursor) colVindexes := ins.ColVindexes - if colVindexes == nil { - colVindexes = ins.Table.ColumnVindexes - } for vIdx, vColValues := range ins.VindexValues { if len(vColValues) != len(colVindexes[vIdx].Columns) { return nil, nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] supplied vindex column values don't match vschema: %v %v", vColValues, colVindexes[vIdx].Columns) @@ -723,7 +714,7 @@ func (ins *Insert) getInsertShardedRoute( // results in an error. For 'ignore' type inserts, the keyspace // id is returned as nil, which is used later to drop the corresponding rows. if len(vindexRowsValues) == 0 || len(colVindexes) == 0 { - return nil, nil, vterrors.NewErrorf(vtrpcpb.Code_FAILED_PRECONDITION, vterrors.RequiresPrimaryKey, vterrors.PrimaryVindexNotSet, ins.Table.Name) + return nil, nil, vterrors.NewErrorf(vtrpcpb.Code_FAILED_PRECONDITION, vterrors.RequiresPrimaryKey, vterrors.PrimaryVindexNotSet, ins.TableName) } keyspaceIDs, err := ins.processPrimary(ctx, vcursor, vindexRowsValues[0], colVindexes[0]) if err != nil { @@ -785,17 +776,33 @@ func (ins *Insert) getInsertShardedRoute( queries := make([]*querypb.BoundQuery, len(rss)) for i := range rss { + shardBindVars := map[string]*querypb.BindVariable{} var mids []string for _, indexValue := range indexesPerRss[i] { index, _ := strconv.ParseInt(string(indexValue.Value), 0, 64) if keyspaceIDs[index] != nil { - mids = append(mids, ins.Mid[index]) + mids = append(mids, sqlparser.String(ins.Mid[index])) + for _, expr := range ins.Mid[index] { + err = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + if arg, ok := node.(*sqlparser.Argument); ok { + bv, exists := bindVars[arg.Name] + if !exists { + return false, vterrors.VT03026(arg.Name) + } + shardBindVars[arg.Name] = bv + } + return true, nil + }, expr, nil) + if err != nil { + return nil, nil, err + } + } } } rewritten := ins.Prefix + strings.Join(mids, ",") + ins.Suffix queries[i] = &querypb.BoundQuery{ Sql: rewritten, - BindVariables: bindVars, + BindVariables: shardBindVars, } } @@ -1012,7 +1019,10 @@ func (ins *Insert) description() PrimitiveDescription { other["VindexOffsetFromSelect"] = valuesOffsets } if len(ins.Mid) > 0 { - shardQuery := fmt.Sprintf("%s%s%s", ins.Prefix, strings.Join(ins.Mid, ", "), ins.Suffix) + mids := slice.Map(ins.Mid, func(from sqlparser.ValTuple) string { + return sqlparser.String(from) + }) + shardQuery := fmt.Sprintf("%s%s%s", ins.Prefix, strings.Join(mids, ", "), ins.Suffix) if shardQuery != ins.Query { other["ShardedQuery"] = shardQuery } diff --git a/go/vt/vtgate/engine/insert_test.go b/go/vt/vtgate/engine/insert_test.go index 72dec39045d..014654f37d6 100644 --- a/go/vt/vtgate/engine/insert_test.go +++ b/go/vt/vtgate/engine/insert_test.go @@ -21,17 +21,15 @@ import ( "errors" "testing" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - - "vitess.io/vitess/go/vt/vtgate/evalengine" - "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/vtgate/vindexes" - querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" vschemapb "vitess.io/vitess/go/vt/proto/vschema" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/evalengine" + "vitess.io/vitess/go/vt/vtgate/vindexes" ) func TestInsertUnsharded(t *testing.T) { @@ -212,7 +210,9 @@ func TestInsertShardedSimple(t *testing.T) { }}, ks.Tables["t1"], "prefix", - []string{" mid1"}, + sqlparser.Values{ + {&sqlparser.Argument{Name: "_id_0", Type: sqltypes.Int64}}, + }, " suffix", ) vc := newDMLTestVCursor("-20", "20-") @@ -227,7 +227,7 @@ func TestInsertShardedSimple(t *testing.T) { `ResolveDestinations sharded [value:"0"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6)`, // Row 2 will go to -20, rows 1 & 3 will go to 20- `ExecuteMultiShard ` + - `sharded.20-: prefix mid1 suffix {_id_0: type:INT64 value:"1"} ` + + `sharded.20-: prefix(:_id_0 /* INT64 */) suffix {_id_0: type:INT64 value:"1"} ` + `true true`, }) @@ -247,7 +247,11 @@ func TestInsertShardedSimple(t *testing.T) { }}, ks.Tables["t1"], "prefix", - []string{" mid1", " mid2", " mid3"}, + sqlparser.Values{ + {&sqlparser.Argument{Name: "_id_0", Type: sqltypes.Int64}}, + {&sqlparser.Argument{Name: "_id_1", Type: sqltypes.Int64}}, + {&sqlparser.Argument{Name: "_id_2", Type: sqltypes.Int64}}, + }, " suffix", ) vc = newDMLTestVCursor("-20", "20-") @@ -262,8 +266,8 @@ func TestInsertShardedSimple(t *testing.T) { `ResolveDestinations sharded [value:"0" value:"1" value:"2"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6),DestinationKeyspaceID(06e7ea22ce92708f),DestinationKeyspaceID(4eb190c9a2fa169c)`, // Row 2 will go to -20, rows 1 & 3 will go to 20- `ExecuteMultiShard ` + - `sharded.20-: prefix mid1, mid3 suffix {_id_0: type:INT64 value:"1" _id_1: type:INT64 value:"2" _id_2: type:INT64 value:"3"} ` + - `sharded.-20: prefix mid2 suffix {_id_0: type:INT64 value:"1" _id_1: type:INT64 value:"2" _id_2: type:INT64 value:"3"} ` + + `sharded.20-: prefix(:_id_0 /* INT64 */),(:_id_2 /* INT64 */) suffix {_id_0: type:INT64 value:"1" _id_2: type:INT64 value:"3"} ` + + `sharded.-20: prefix(:_id_1 /* INT64 */) suffix {_id_1: type:INT64 value:"2"} ` + `true false`, }) @@ -284,7 +288,11 @@ func TestInsertShardedSimple(t *testing.T) { ks.Tables["t1"], "prefix", - []string{" mid1", " mid2", " mid3"}, + sqlparser.Values{ + {&sqlparser.Argument{Name: "_id_0", Type: sqltypes.Int64}}, + {&sqlparser.Argument{Name: "_id_1", Type: sqltypes.Int64}}, + {&sqlparser.Argument{Name: "_id_2", Type: sqltypes.Int64}}, + }, " suffix", ) ins.MultiShardAutocommit = true @@ -301,8 +309,8 @@ func TestInsertShardedSimple(t *testing.T) { `ResolveDestinations sharded [value:"0" value:"1" value:"2"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6),DestinationKeyspaceID(06e7ea22ce92708f),DestinationKeyspaceID(4eb190c9a2fa169c)`, // Row 2 will go to -20, rows 1 & 3 will go to 20- `ExecuteMultiShard ` + - `sharded.20-: prefix mid1, mid3 suffix {_id_0: type:INT64 value:"1" _id_1: type:INT64 value:"2" _id_2: type:INT64 value:"3"} ` + - `sharded.-20: prefix mid2 suffix {_id_0: type:INT64 value:"1" _id_1: type:INT64 value:"2" _id_2: type:INT64 value:"3"} ` + + `sharded.20-: prefix(:_id_0 /* INT64 */),(:_id_2 /* INT64 */) suffix {_id_0: type:INT64 value:"1" _id_2: type:INT64 value:"3"} ` + + `sharded.-20: prefix(:_id_1 /* INT64 */) suffix {_id_1: type:INT64 value:"2"} ` + `true true`, }) } @@ -349,7 +357,9 @@ func TestInsertShardedFail(t *testing.T) { ks.Tables["t1"], "prefix", - []string{" mid1", " mid2", " mid3"}, + sqlparser.Values{ + {&sqlparser.Argument{Name: "_id_0", Type: sqltypes.Int64}}, + }, " suffix", ) @@ -399,7 +409,11 @@ func TestInsertShardedGenerate(t *testing.T) { }}, ks.Tables["t1"], "prefix", - []string{" mid1", " mid2", " mid3"}, + sqlparser.Values{ + {&sqlparser.Argument{Name: "__seq0", Type: sqltypes.Int64}}, + {&sqlparser.Argument{Name: "__seq1", Type: sqltypes.Int64}}, + {&sqlparser.Argument{Name: "__seq2", Type: sqltypes.Int64}}, + }, " suffix", ) @@ -412,7 +426,7 @@ func TestInsertShardedGenerate(t *testing.T) { Values: evalengine.NewTupleExpr( evalengine.NewLiteralInt(1), evalengine.NullExpr, - evalengine.NewLiteralInt(2), + evalengine.NewLiteralInt(3), ), } @@ -440,12 +454,10 @@ func TestInsertShardedGenerate(t *testing.T) { `ResolveDestinations sharded [value:"0" value:"1" value:"2"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6),DestinationKeyspaceID(06e7ea22ce92708f),DestinationKeyspaceID(4eb190c9a2fa169c)`, // Row 2 will go to -20, rows 1 & 3 will go to 20- `ExecuteMultiShard ` + - `sharded.20-: prefix mid1, mid3 suffix ` + - `{__seq0: type:INT64 value:"1" __seq1: type:INT64 value:"2" __seq2: type:INT64 value:"2" ` + - `_id_0: type:INT64 value:"1" _id_1: type:INT64 value:"2" _id_2: type:INT64 value:"3"} ` + - `sharded.-20: prefix mid2 suffix ` + - `{__seq0: type:INT64 value:"1" __seq1: type:INT64 value:"2" __seq2: type:INT64 value:"2" ` + - `_id_0: type:INT64 value:"1" _id_1: type:INT64 value:"2" _id_2: type:INT64 value:"3"} ` + + `sharded.20-: prefix(:__seq0 /* INT64 */),(:__seq2 /* INT64 */) suffix ` + + `{__seq0: type:INT64 value:"1" __seq2: type:INT64 value:"3"} ` + + `sharded.-20: prefix(:__seq1 /* INT64 */) suffix ` + + `{__seq1: type:INT64 value:"2"} ` + `true false`, }) @@ -535,7 +547,11 @@ func TestInsertShardedOwned(t *testing.T) { }}, ks.Tables["t1"], "prefix", - []string{" mid1", " mid2", " mid3"}, + sqlparser.Values{ + {&sqlparser.Argument{Name: "_id_0", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c1_0", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c2_0", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c3_0", Type: sqltypes.Int64}}, + {&sqlparser.Argument{Name: "_id_1", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c1_1", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c2_1", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c3_1", Type: sqltypes.Int64}}, + {&sqlparser.Argument{Name: "_id_2", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c1_2", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c2_2", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c3_2", Type: sqltypes.Int64}}, + }, " suffix", ) @@ -557,16 +573,15 @@ func TestInsertShardedOwned(t *testing.T) { // Based on shardForKsid, values returned will be 20-, -20, 20-. `ResolveDestinations sharded [value:"0" value:"1" value:"2"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6),DestinationKeyspaceID(06e7ea22ce92708f),DestinationKeyspaceID(4eb190c9a2fa169c)`, `ExecuteMultiShard ` + - `sharded.20-: prefix mid1, mid3 suffix ` + - `{_c1_0: type:INT64 value:"4" _c1_1: type:INT64 value:"5" _c1_2: type:INT64 value:"6" ` + - `_c2_0: type:INT64 value:"7" _c2_1: type:INT64 value:"8" _c2_2: type:INT64 value:"9" ` + - `_c3_0: type:INT64 value:"10" _c3_1: type:INT64 value:"11" _c3_2: type:INT64 value:"12" ` + - `_id_0: type:INT64 value:"1" _id_1: type:INT64 value:"2" _id_2: type:INT64 value:"3"} ` + - `sharded.-20: prefix mid2 suffix ` + - `{_c1_0: type:INT64 value:"4" _c1_1: type:INT64 value:"5" _c1_2: type:INT64 value:"6" ` + - `_c2_0: type:INT64 value:"7" _c2_1: type:INT64 value:"8" _c2_2: type:INT64 value:"9" ` + - `_c3_0: type:INT64 value:"10" _c3_1: type:INT64 value:"11" _c3_2: type:INT64 value:"12" ` + - `_id_0: type:INT64 value:"1" _id_1: type:INT64 value:"2" _id_2: type:INT64 value:"3"} ` + + `sharded.20-: prefix(:_id_0 /* INT64 */, :_c1_0 /* INT64 */, :_c2_0 /* INT64 */, :_c3_0 /* INT64 */)` + + `,(:_id_2 /* INT64 */, :_c1_2 /* INT64 */, :_c2_2 /* INT64 */, :_c3_2 /* INT64 */) suffix ` + + `{_c1_0: type:INT64 value:"4" _c1_2: type:INT64 value:"6" ` + + `_c2_0: type:INT64 value:"7" _c2_2: type:INT64 value:"9" ` + + `_c3_0: type:INT64 value:"10" _c3_2: type:INT64 value:"12" ` + + `_id_0: type:INT64 value:"1" _id_2: type:INT64 value:"3"} ` + + `sharded.-20: prefix(:_id_1 /* INT64 */, :_c1_1 /* INT64 */, :_c2_1 /* INT64 */, :_c3_1 /* INT64 */) suffix ` + + `{_c1_1: type:INT64 value:"5" _c2_1: type:INT64 value:"8" _c3_1: type:INT64 value:"11" ` + + `_id_1: type:INT64 value:"2"} ` + `true false`, }) } @@ -626,7 +641,9 @@ func TestInsertShardedOwnedWithNull(t *testing.T) { }}, ks.Tables["t1"], "prefix", - []string{" mid1", " mid2", " mid3"}, + sqlparser.Values{ + {&sqlparser.Argument{Name: "_id_0", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c3_0", Type: sqltypes.Null}}, + }, " suffix", ) @@ -639,7 +656,7 @@ func TestInsertShardedOwnedWithNull(t *testing.T) { } vc.ExpectLog(t, []string{ `ResolveDestinations sharded [value:"0"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6)`, - `ExecuteMultiShard sharded.20-: prefix mid1 suffix ` + + `ExecuteMultiShard sharded.20-: prefix(:_id_0 /* INT64 */, :_c3_0 /* NULL_TYPE */) suffix ` + `{_c3_0: _id_0: type:INT64 value:"1"} true true`, }) } @@ -709,15 +726,12 @@ func TestInsertShardedGeo(t *testing.T) { }}, ks.Tables["t1"], "prefix", - []string{" mid1", " mid2"}, + sqlparser.Values{ + {&sqlparser.Argument{Name: "_region_0", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_id_0", Type: sqltypes.Int64}}, + {&sqlparser.Argument{Name: "_region_1", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_id_1", Type: sqltypes.Int64}}, + }, " suffix", ) - for _, colVindex := range ks.Tables["t1"].ColumnVindexes { - if colVindex.IsPartialVindex() { - continue - } - ins.ColVindexes = append(ins.ColVindexes, colVindex) - } vc := newDMLTestVCursor("-20", "20-") vc.shardForKsid = []string{"20-", "-20"} @@ -731,12 +745,10 @@ func TestInsertShardedGeo(t *testing.T) { `id_0: type:INT64 value:"1" id_1: type:INT64 value:"1" ` + `keyspace_id_0: type:VARBINARY value:"\x01\x16k@\xb4J\xbaK\xd6" keyspace_id_1: type:VARBINARY value:"\xff\x16k@\xb4J\xbaK\xd6" true`, `ResolveDestinations sharded [value:"0" value:"1"] Destinations:DestinationKeyspaceID(01166b40b44aba4bd6),DestinationKeyspaceID(ff166b40b44aba4bd6)`, - `ExecuteMultiShard sharded.20-: prefix mid1 suffix ` + - `{_id_0: type:INT64 value:"1" _id_1: type:INT64 value:"1" ` + - `_region_0: type:INT64 value:"1" _region_1: type:INT64 value:"255"} ` + - `sharded.-20: prefix mid2 suffix ` + - `{_id_0: type:INT64 value:"1" _id_1: type:INT64 value:"1" ` + - `_region_0: type:INT64 value:"1" _region_1: type:INT64 value:"255"} ` + + `ExecuteMultiShard sharded.20-: prefix(:_region_0 /* INT64 */, :_id_0 /* INT64 */) suffix ` + + `{_id_0: type:INT64 value:"1" _region_0: type:INT64 value:"1"} ` + + `sharded.-20: prefix(:_region_1 /* INT64 */, :_id_1 /* INT64 */) suffix ` + + `{_id_1: type:INT64 value:"1" _region_1: type:INT64 value:"255"} ` + `true false`, }) } @@ -836,7 +848,12 @@ func TestInsertShardedIgnoreOwned(t *testing.T) { }}, ks.Tables["t1"], "prefix", - []string{" mid1", " mid2", " mid3", " mid4"}, + sqlparser.Values{ + {&sqlparser.Argument{Name: "_id_0", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c1_0", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c2_0", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c3_0", Type: sqltypes.Int64}}, + {&sqlparser.Argument{Name: "_id_1", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c1_1", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c2_1", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c3_1", Type: sqltypes.Int64}}, + {&sqlparser.Argument{Name: "_id_2", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c1_2", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c2_2", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c3_2", Type: sqltypes.Int64}}, + {&sqlparser.Argument{Name: "_id_3", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c1_3", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c2_3", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c3_3", Type: sqltypes.Int64}}, + }, " suffix", ) @@ -900,16 +917,10 @@ func TestInsertShardedIgnoreOwned(t *testing.T) { `ResolveDestinations sharded [value:"0" value:"3"] Destinations:DestinationKeyspaceID(00),DestinationKeyspaceID(00)`, // Bind vars for rows 2 & 3 may be missing because they were not sent. `ExecuteMultiShard ` + - `sharded.20-: prefix mid1 suffix ` + - `{_c1_0: type:INT64 value:"5" _c1_3: type:INT64 value:"8" ` + - `_c2_0: type:INT64 value:"9" _c2_3: type:INT64 value:"12" ` + - `_c3_0: type:INT64 value:"13" _c3_3: type:INT64 value:"16" ` + - `_id_0: type:INT64 value:"1" _id_3: type:INT64 value:"4"} ` + - `sharded.-20: prefix mid4 suffix ` + - `{_c1_0: type:INT64 value:"5" _c1_3: type:INT64 value:"8" ` + - `_c2_0: type:INT64 value:"9" _c2_3: type:INT64 value:"12" ` + - `_c3_0: type:INT64 value:"13" _c3_3: type:INT64 value:"16" ` + - `_id_0: type:INT64 value:"1" _id_3: type:INT64 value:"4"} ` + + `sharded.20-: prefix(:_id_0 /* INT64 */, :_c1_0 /* INT64 */, :_c2_0 /* INT64 */, :_c3_0 /* INT64 */) suffix ` + + `{_c1_0: type:INT64 value:"5" _c2_0: type:INT64 value:"9" _c3_0: type:INT64 value:"13" _id_0: type:INT64 value:"1"} ` + + `sharded.-20: prefix(:_id_3 /* INT64 */, :_c1_3 /* INT64 */, :_c2_3 /* INT64 */, :_c3_3 /* INT64 */) suffix ` + + `{_c1_3: type:INT64 value:"8" _c2_3: type:INT64 value:"12" _c3_3: type:INT64 value:"16" _id_3: type:INT64 value:"4"} ` + `true false`, }) } @@ -970,7 +981,9 @@ func TestInsertShardedIgnoreOwnedWithNull(t *testing.T) { }}, ks.Tables["t1"], "prefix", - []string{" mid1", " mid2", " mid3", " mid4"}, + sqlparser.Values{ + {&sqlparser.Argument{Name: "_id_0", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c3_0", Type: sqltypes.Int64}}, + }, " suffix", ) @@ -996,7 +1009,7 @@ func TestInsertShardedIgnoreOwnedWithNull(t *testing.T) { vc.ExpectLog(t, []string{ `Execute select from from lkp1 where from = :from and toc = :toc from: toc: type:VARBINARY value:"\x16k@\xb4J\xbaK\xd6" false`, `ResolveDestinations sharded [value:"0"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6)`, - `ExecuteMultiShard sharded.-20: prefix mid1 suffix ` + + `ExecuteMultiShard sharded.-20: prefix(:_id_0 /* INT64 */, :_c3_0 /* INT64 */) suffix ` + `{_c3_0: _id_0: type:INT64 value:"1"} true true`, }) } @@ -1084,7 +1097,11 @@ func TestInsertShardedUnownedVerify(t *testing.T) { }}, ks.Tables["t1"], "prefix", - []string{" mid1", " mid2", " mid3"}, + sqlparser.Values{ + {&sqlparser.Argument{Name: "_id_0", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c1_0", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c2_0", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c3_0", Type: sqltypes.Int64}}, + {&sqlparser.Argument{Name: "_id_1", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c1_1", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c2_1", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c3_1", Type: sqltypes.Int64}}, + {&sqlparser.Argument{Name: "_id_2", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c1_2", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c2_2", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c3_2", Type: sqltypes.Int64}}, + }, " suffix", ) @@ -1123,16 +1140,15 @@ func TestInsertShardedUnownedVerify(t *testing.T) { // Based on shardForKsid, values returned will be 20-, -20, 20-. `ResolveDestinations sharded [value:"0" value:"1" value:"2"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6),DestinationKeyspaceID(06e7ea22ce92708f),DestinationKeyspaceID(4eb190c9a2fa169c)`, `ExecuteMultiShard ` + - `sharded.20-: prefix mid1, mid3 suffix ` + - `{_c1_0: type:INT64 value:"4" _c1_1: type:INT64 value:"5" _c1_2: type:INT64 value:"6" ` + - `_c2_0: type:INT64 value:"7" _c2_1: type:INT64 value:"8" _c2_2: type:INT64 value:"9" ` + - `_c3_0: type:INT64 value:"10" _c3_1: type:INT64 value:"11" _c3_2: type:INT64 value:"12" ` + - `_id_0: type:INT64 value:"1" _id_1: type:INT64 value:"2" _id_2: type:INT64 value:"3"} ` + - `sharded.-20: prefix mid2 suffix ` + - `{_c1_0: type:INT64 value:"4" _c1_1: type:INT64 value:"5" _c1_2: type:INT64 value:"6" ` + - `_c2_0: type:INT64 value:"7" _c2_1: type:INT64 value:"8" _c2_2: type:INT64 value:"9" ` + - `_c3_0: type:INT64 value:"10" _c3_1: type:INT64 value:"11" _c3_2: type:INT64 value:"12" ` + - `_id_0: type:INT64 value:"1" _id_1: type:INT64 value:"2" _id_2: type:INT64 value:"3"} ` + + `sharded.20-: prefix(:_id_0 /* INT64 */, :_c1_0 /* INT64 */, :_c2_0 /* INT64 */, :_c3_0 /* INT64 */),` + + `(:_id_2 /* INT64 */, :_c1_2 /* INT64 */, :_c2_2 /* INT64 */, :_c3_2 /* INT64 */) suffix ` + + `{_c1_0: type:INT64 value:"4" _c1_2: type:INT64 value:"6" ` + + `_c2_0: type:INT64 value:"7" _c2_2: type:INT64 value:"9" ` + + `_c3_0: type:INT64 value:"10" _c3_2: type:INT64 value:"12" ` + + `_id_0: type:INT64 value:"1" _id_2: type:INT64 value:"3"} ` + + `sharded.-20: prefix(:_id_1 /* INT64 */, :_c1_1 /* INT64 */, :_c2_1 /* INT64 */, :_c3_1 /* INT64 */) suffix ` + + `{_c1_1: type:INT64 value:"5" _c2_1: type:INT64 value:"8" ` + + `_c3_1: type:INT64 value:"11" _id_1: type:INT64 value:"2"} ` + `true false`, }) } @@ -1195,7 +1211,11 @@ func TestInsertShardedIgnoreUnownedVerify(t *testing.T) { }}, ks.Tables["t1"], "prefix", - []string{" mid1", " mid2", " mid3"}, + sqlparser.Values{ + {&sqlparser.Argument{Name: "_id_0", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c3_0", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "v1", Type: sqltypes.VarChar}}, + {&sqlparser.Argument{Name: "_id_1", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c3_1", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "v2", Type: sqltypes.VarChar}}, + {&sqlparser.Argument{Name: "_id_2", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c3_2", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "v3", Type: sqltypes.VarChar}}, + }, " suffix", ) @@ -1216,7 +1236,9 @@ func TestInsertShardedIgnoreUnownedVerify(t *testing.T) { {}, nonemptyResult, } - _, err := ins.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, false) + _, err := ins.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{ + "v1": sqltypes.StringBindVariable("a"), "v2": sqltypes.StringBindVariable("b"), "v3": sqltypes.StringBindVariable("c"), + }, false) if err != nil { t.Fatal(err) } @@ -1229,12 +1251,10 @@ func TestInsertShardedIgnoreUnownedVerify(t *testing.T) { // Based on shardForKsid, values returned will be 20-, -20. `ResolveDestinations sharded [value:"0" value:"2"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6),DestinationKeyspaceID(4eb190c9a2fa169c)`, `ExecuteMultiShard ` + - `sharded.20-: prefix mid1 suffix ` + - `{_c3_0: type:INT64 value:"10" _c3_2: type:INT64 value:"12" ` + - `_id_0: type:INT64 value:"1" _id_2: type:INT64 value:"3"} ` + - `sharded.-20: prefix mid3 suffix ` + - `{_c3_0: type:INT64 value:"10" _c3_2: type:INT64 value:"12" ` + - `_id_0: type:INT64 value:"1" _id_2: type:INT64 value:"3"} ` + + `sharded.20-: prefix(:_id_0 /* INT64 */, :_c3_0 /* INT64 */, :v1 /* VARCHAR */) suffix ` + + `{_c3_0: type:INT64 value:"10" _id_0: type:INT64 value:"1" v1: type:VARCHAR value:"a"} ` + + `sharded.-20: prefix(:_id_2 /* INT64 */, :_c3_2 /* INT64 */, :v3 /* VARCHAR */) suffix ` + + `{_c3_2: type:INT64 value:"12" _id_2: type:INT64 value:"3" v3: type:VARCHAR value:"c"} ` + `true false`, }) } @@ -1293,7 +1313,9 @@ func TestInsertShardedIgnoreUnownedVerifyFail(t *testing.T) { }}, ks.Tables["t1"], "prefix", - []string{" mid1", " mid2", " mid3"}, + sqlparser.Values{ + {&sqlparser.Argument{Name: "_id_0", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c3_0", Type: sqltypes.Int64}}, + }, " suffix", ) @@ -1386,7 +1408,11 @@ func TestInsertShardedUnownedReverseMap(t *testing.T) { }}, ks.Tables["t1"], "prefix", - []string{" mid1", " mid2", " mid3"}, + sqlparser.Values{ + {&sqlparser.Argument{Name: "_id_0", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c1_0", Type: sqltypes.Null}, &sqlparser.Argument{Name: "_c2_0", Type: sqltypes.Null}, &sqlparser.Argument{Name: "_c3_0", Type: sqltypes.Null}}, + {&sqlparser.Argument{Name: "_id_1", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c1_1", Type: sqltypes.Null}, &sqlparser.Argument{Name: "_c2_1", Type: sqltypes.Null}, &sqlparser.Argument{Name: "_c3_1", Type: sqltypes.Null}}, + {&sqlparser.Argument{Name: "_id_2", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c1_2", Type: sqltypes.Null}, &sqlparser.Argument{Name: "_c2_2", Type: sqltypes.Null}, &sqlparser.Argument{Name: "_c3_2", Type: sqltypes.Null}}, + }, " suffix", ) @@ -1411,18 +1437,16 @@ func TestInsertShardedUnownedReverseMap(t *testing.T) { } vc.ExpectLog(t, []string{ `ResolveDestinations sharded [value:"0" value:"1" value:"2"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6),DestinationKeyspaceID(06e7ea22ce92708f),DestinationKeyspaceID(4eb190c9a2fa169c)`, - `ExecuteMultiShard ` + - `sharded.20-: prefix mid1, mid3 suffix ` + - `{_c1_0: type:UINT64 value:"1" _c1_1: type:UINT64 value:"2" _c1_2: type:UINT64 value:"3" ` + - `_c2_0: _c2_1: _c2_2: ` + - `_c3_0: type:UINT64 value:"1" _c3_1: type:UINT64 value:"2" _c3_2: type:UINT64 value:"3" ` + - `_id_0: type:INT64 value:"1" _id_1: type:INT64 value:"2" _id_2: type:INT64 value:"3"} ` + - `sharded.-20: prefix mid2 suffix ` + - `{_c1_0: type:UINT64 value:"1" _c1_1: type:UINT64 value:"2" _c1_2: type:UINT64 value:"3" ` + - `_c2_0: _c2_1: _c2_2: ` + - `_c3_0: type:UINT64 value:"1" _c3_1: type:UINT64 value:"2" _c3_2: type:UINT64 value:"3" ` + - `_id_0: type:INT64 value:"1" _id_1: type:INT64 value:"2" _id_2: type:INT64 value:"3"} ` + - `true false`, + `ExecuteMultiShard sharded.20-: ` + + `prefix(:_id_0 /* INT64 */, :_c1_0 /* NULL_TYPE */, :_c2_0 /* NULL_TYPE */, :_c3_0 /* NULL_TYPE */),` + + `(:_id_2 /* INT64 */, :_c1_2 /* NULL_TYPE */, :_c2_2 /* NULL_TYPE */, :_c3_2 /* NULL_TYPE */) suffix ` + + `{_c1_0: type:UINT64 value:"1" _c1_2: type:UINT64 value:"3" ` + + `_c2_0: _c2_2: ` + + `_c3_0: type:UINT64 value:"1" _c3_2: type:UINT64 value:"3" ` + + `_id_0: type:INT64 value:"1" _id_2: type:INT64 value:"3"} ` + + `sharded.-20: ` + + `prefix(:_id_1 /* INT64 */, :_c1_1 /* NULL_TYPE */, :_c2_1 /* NULL_TYPE */, :_c3_1 /* NULL_TYPE */) suffix ` + + `{_c1_1: type:UINT64 value:"2" _c2_1: _c3_1: type:UINT64 value:"2" _id_1: type:INT64 value:"2"} true false`, }) } @@ -1480,7 +1504,9 @@ func TestInsertShardedUnownedReverseMapSuccess(t *testing.T) { }}, ks.Tables["t1"], "prefix", - []string{" mid1", " mid2", " mid3"}, + sqlparser.Values{ + {&sqlparser.Argument{Name: "_id_0", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c3_0", Type: sqltypes.Null}}, + }, " suffix", ) @@ -1511,7 +1537,6 @@ func TestInsertSelectSimple(t *testing.T) { Opcode: InsertSelect, Keyspace: ks.Keyspace, Query: "dummy_insert", - Table: ks.Tables["t1"], VindexValueOffset: [][]int{{1}}, Input: &Route{ Query: "dummy_select", @@ -1602,7 +1627,6 @@ func TestInsertSelectOwned(t *testing.T) { Opcode: InsertSelect, Keyspace: ks.Keyspace, Query: "dummy_insert", - Table: ks.Tables["t1"], VindexValueOffset: [][]int{ {1}, // The primary vindex has a single column as sharding key {0}}, // the onecol vindex uses the 'name' column @@ -1699,19 +1723,23 @@ func TestInsertSelectGenerate(t *testing.T) { vs := vindexes.BuildVSchema(invschema) ks := vs.Keyspaces["sharded"] - ins := &Insert{ - Opcode: InsertSelect, - Keyspace: ks.Keyspace, - Query: "dummy_insert", - Table: ks.Tables["t1"], - VindexValueOffset: [][]int{ - {1}}, // The primary vindex has a single column as sharding key - Input: &Route{ - Query: "dummy_select", - FieldQuery: "dummy_field_query", - RoutingParameters: &RoutingParameters{ - Opcode: Scatter, - Keyspace: ks.Keyspace}}} + ins := NewInsert( + InsertSelect, + false, + ks.Keyspace, + nil, + ks.Tables["t1"], + "prefix ", + nil, + " suffix") + ins.Query = "dummy_insert" + ins.VindexValueOffset = [][]int{{1}} // The primary vindex has a single column as sharding key + ins.Input = &Route{ + Query: "dummy_select", + FieldQuery: "dummy_field_query", + RoutingParameters: &RoutingParameters{ + Opcode: Scatter, + Keyspace: ks.Keyspace}} ins.Generate = &Generate{ Keyspace: &vindexes.Keyspace{ @@ -1721,8 +1749,6 @@ func TestInsertSelectGenerate(t *testing.T) { Query: "dummy_generate", Offset: 1, } - ins.Prefix = "prefix " - ins.Suffix = " suffix" vc := newDMLTestVCursor("-20", "20-") vc.shardForKsid = []string{"20-", "-20", "20-"} @@ -1795,7 +1821,6 @@ func TestStreamingInsertSelectGenerate(t *testing.T) { Opcode: InsertSelect, Keyspace: ks.Keyspace, Query: "dummy_insert", - Table: ks.Tables["t1"], VindexValueOffset: [][]int{ {1}}, // The primary vindex has a single column as sharding key Input: &Route{ @@ -1804,6 +1829,7 @@ func TestStreamingInsertSelectGenerate(t *testing.T) { RoutingParameters: &RoutingParameters{ Opcode: Scatter, Keyspace: ks.Keyspace}}} + ins.ColVindexes = ks.Tables["t1"].ColumnVindexes ins.Generate = &Generate{ Keyspace: &vindexes.Keyspace{ @@ -1891,7 +1917,6 @@ func TestInsertSelectGenerateNotProvided(t *testing.T) { Opcode: InsertSelect, Keyspace: ks.Keyspace, Query: "dummy_insert", - Table: ks.Tables["t1"], VindexValueOffset: [][]int{ {1}}, // The primary vindex has a single column as sharding key Input: &Route{ @@ -1901,6 +1926,7 @@ func TestInsertSelectGenerateNotProvided(t *testing.T) { Opcode: Scatter, Keyspace: ks.Keyspace}}} + ins.ColVindexes = ks.Tables["t1"].ColumnVindexes ins.Generate = &Generate{ Keyspace: &vindexes.Keyspace{ Name: "ks2", @@ -1979,7 +2005,6 @@ func TestStreamingInsertSelectGenerateNotProvided(t *testing.T) { Opcode: InsertSelect, Keyspace: ks.Keyspace, Query: "dummy_insert", - Table: ks.Tables["t1"], VindexValueOffset: [][]int{ {1}}, // The primary vindex has a single column as sharding key Input: &Route{ @@ -1989,6 +2014,7 @@ func TestStreamingInsertSelectGenerateNotProvided(t *testing.T) { Opcode: Scatter, Keyspace: ks.Keyspace}}} + ins.ColVindexes = ks.Tables["t1"].ColumnVindexes ins.Generate = &Generate{ Keyspace: &vindexes.Keyspace{ Name: "ks2", @@ -2077,7 +2103,6 @@ func TestInsertSelectUnowned(t *testing.T) { Opcode: InsertSelect, Keyspace: ks.Keyspace, Query: "dummy_insert", - Table: ks.Tables["t2"], VindexValueOffset: [][]int{ {0}}, // the onecol vindex as unowned lookup sharding column Input: &Route{ @@ -2199,7 +2224,6 @@ func TestInsertSelectShardingCases(t *testing.T) { Opcode: InsertSelect, Keyspace: sks1.Keyspace, Query: "dummy_insert", - Table: sks1.Tables["s1"], Prefix: "prefix ", Suffix: " suffix", ColVindexes: sks1.Tables["s1"].ColumnVindexes, @@ -2278,7 +2302,6 @@ func TestInsertSelectShardingCases(t *testing.T) { Opcode: InsertUnsharded, Keyspace: uks1.Keyspace, Query: "dummy_insert", - Table: uks1.Tables["s1"], Prefix: "prefix ", Suffix: " suffix", Input: sRoute, diff --git a/go/vt/vtgate/engine/join.go b/go/vt/vtgate/engine/join.go index a4c7f66b174..ef50389c989 100644 --- a/go/vt/vtgate/engine/join.go +++ b/go/vt/vtgate/engine/join.go @@ -20,6 +20,8 @@ import ( "context" "fmt" "strings" + "sync" + "sync/atomic" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" @@ -114,34 +116,45 @@ func bindvarForType(t querypb.Type) *querypb.BindVariable { // TryStreamExecute performs a streaming exec. func (jn *Join) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { - joinVars := make(map[string]*querypb.BindVariable) + var mu sync.Mutex + // We need to use this atomic since we're also reading this + // value outside of it being locked with the mu lock. + // This is still racy, but worst case it means that we may + // retrieve the right hand side fields twice instead of once. + var fieldsSent atomic.Bool + fieldsSent.Store(!wantfields) err := vcursor.StreamExecutePrimitive(ctx, jn.Left, bindVars, wantfields, func(lresult *sqltypes.Result) error { + joinVars := make(map[string]*querypb.BindVariable) for _, lrow := range lresult.Rows { for k, col := range jn.Vars { joinVars[k] = sqltypes.ValueBindVariable(lrow[col]) } - rowSent := false - err := vcursor.StreamExecutePrimitive(ctx, jn.Right, combineVars(bindVars, joinVars), wantfields, func(rresult *sqltypes.Result) error { + var rowSent atomic.Bool + err := vcursor.StreamExecutePrimitive(ctx, jn.Right, combineVars(bindVars, joinVars), !fieldsSent.Load(), func(rresult *sqltypes.Result) error { + // This needs to be locking since it's not safe to just use + // fieldsSent. This is because we can't have a race between + // checking fieldsSent and then actually calling the callback + // and in parallel another goroutine doing the same. That + // can lead to out of order execution of the callback. So the callback + // itself and the check need to be covered by the same lock. + mu.Lock() + defer mu.Unlock() result := &sqltypes.Result{} - if wantfields { - // This code is currently unreachable because the first result - // will always be just the field info, which will cause the outer - // wantfields code path to be executed. But this may change in the future. - wantfields = false + if fieldsSent.CompareAndSwap(false, true) { result.Fields = joinFields(lresult.Fields, rresult.Fields, jn.Cols) } for _, rrow := range rresult.Rows { result.Rows = append(result.Rows, joinRows(lrow, rrow, jn.Cols)) } if len(rresult.Rows) != 0 { - rowSent = true + rowSent.Store(true) } return callback(result) }) if err != nil { return err } - if jn.Opcode == LeftJoin && !rowSent { + if jn.Opcode == LeftJoin && !rowSent.Load() { result := &sqltypes.Result{} result.Rows = [][]sqltypes.Value{joinRows( lrow, @@ -151,8 +164,15 @@ func (jn *Join) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars return callback(result) } } - if wantfields { - wantfields = false + // This needs to be locking since it's not safe to just use + // fieldsSent. This is because we can't have a race between + // checking fieldsSent and then actually calling the callback + // and in parallel another goroutine doing the same. That + // can lead to out of order execution of the callback. So the callback + // itself and the check need to be covered by the same lock. + mu.Lock() + defer mu.Unlock() + if fieldsSent.CompareAndSwap(false, true) { for k := range jn.Vars { joinVars[k] = sqltypes.NullBindVariable } @@ -189,8 +209,8 @@ func (jn *Join) GetFields(ctx context.Context, vcursor VCursor, bindVars map[str } // Inputs returns the input primitives for this join -func (jn *Join) Inputs() []Primitive { - return []Primitive{jn.Left, jn.Right} +func (jn *Join) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{jn.Left, jn.Right}, nil } func joinFields(lfields, rfields []*querypb.Field, cols []int) []*querypb.Field { diff --git a/go/vt/vtgate/engine/limit.go b/go/vt/vtgate/engine/limit.go index 70cc7d2fb58..6a66bd56f82 100644 --- a/go/vt/vtgate/engine/limit.go +++ b/go/vt/vtgate/engine/limit.go @@ -154,8 +154,8 @@ func (l *Limit) GetFields(ctx context.Context, vcursor VCursor, bindVars map[str } // Inputs returns the input to limit -func (l *Limit) Inputs() []Primitive { - return []Primitive{l.Input} +func (l *Limit) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{l.Input}, nil } // NeedsTransaction implements the Primitive interface. diff --git a/go/vt/vtgate/engine/memory_sort.go b/go/vt/vtgate/engine/memory_sort.go index 91c431f0a61..b1770225211 100644 --- a/go/vt/vtgate/engine/memory_sort.go +++ b/go/vt/vtgate/engine/memory_sort.go @@ -150,8 +150,8 @@ func (ms *MemorySort) GetFields(ctx context.Context, vcursor VCursor, bindVars m } // Inputs returns the input to memory sort -func (ms *MemorySort) Inputs() []Primitive { - return []Primitive{ms.Input} +func (ms *MemorySort) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{ms.Input}, nil } // NeedsTransaction implements the Primitive interface diff --git a/go/vt/vtgate/engine/memory_sort_test.go b/go/vt/vtgate/engine/memory_sort_test.go index 0af0564dc7c..3b53ef11250 100644 --- a/go/vt/vtgate/engine/memory_sort_test.go +++ b/go/vt/vtgate/engine/memory_sort_test.go @@ -20,16 +20,14 @@ import ( "context" "testing" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/vtgate/evalengine" - - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/test/utils" - "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/utils" querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/vtgate/evalengine" ) func init() { diff --git a/go/vt/vtgate/engine/merge_sort.go b/go/vt/vtgate/engine/merge_sort.go index 1ff4ca7e736..6c694ae9e37 100644 --- a/go/vt/vtgate/engine/merge_sort.go +++ b/go/vt/vtgate/engine/merge_sort.go @@ -21,7 +21,7 @@ import ( "context" "io" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" @@ -167,7 +167,7 @@ func (ms *MergeSort) TryStreamExecute(ctx context.Context, vcursor VCursor, bind if err != nil && ms.ScatterErrorsAsWarnings && len(errs) < len(handles) { // we got errors, but not all shards failed, so we can hide the error and just warn instead partialSuccessScatterQueries.Add(1) - sErr := mysql.NewSQLErrorFromError(err).(*mysql.SQLError) + sErr := sqlerror.NewSQLErrorFromError(err).(*sqlerror.SQLError) vcursor.Session().RecordWarning(&querypb.QueryWarning{Code: uint32(sErr.Num), Message: err.Error()}) return nil } diff --git a/go/vt/vtgate/engine/merge_sort_test.go b/go/vt/vtgate/engine/merge_sort_test.go index 74c8d320d2b..e8823e9e6d5 100644 --- a/go/vt/vtgate/engine/merge_sort_test.go +++ b/go/vt/vtgate/engine/merge_sort_test.go @@ -370,7 +370,7 @@ func TestMergeSortDataFailures(t *testing.T) { }} err := testMergeSort(shardResults, orderBy, func(qr *sqltypes.Result) error { return nil }) - want := `strconv.ParseInt: parsing "2.1": invalid syntax` + want := `unparsed tail left after parsing int64 from "2.1": ".1"` require.EqualError(t, err, want) // Create a new VCursor because the previous MergeSort will still @@ -386,7 +386,7 @@ func TestMergeSortDataFailures(t *testing.T) { ), }} err = testMergeSort(shardResults, orderBy, func(qr *sqltypes.Result) error { return nil }) - want = `strconv.ParseInt: parsing "1.1": invalid syntax` + want = `unparsed tail left after parsing int64 from "1.1": ".1"` require.EqualError(t, err, want) } diff --git a/go/vt/vtgate/engine/online_ddl.go b/go/vt/vtgate/engine/online_ddl.go index 215b8d9be5f..c972fee66e9 100644 --- a/go/vt/vtgate/engine/online_ddl.go +++ b/go/vt/vtgate/engine/online_ddl.go @@ -84,8 +84,13 @@ func (v *OnlineDDL) TryExecute(ctx context.Context, vcursor VCursor, bindVars ma }, Rows: [][]sqltypes.Value{}, } + migrationContext := vcursor.Session().GetMigrationContext() + if migrationContext == "" { + // default to @@session_uuid + migrationContext = fmt.Sprintf("vtgate:%s", vcursor.Session().GetSessionUUID()) + } onlineDDLs, err := schema.NewOnlineDDLs(v.GetKeyspaceName(), v.SQL, v.DDL, - v.DDLStrategySetting, fmt.Sprintf("vtgate:%s", vcursor.Session().GetSessionUUID()), "", + v.DDLStrategySetting, migrationContext, "", ) if err != nil { return result, err diff --git a/go/vt/vtgate/engine/opcode/constants.go b/go/vt/vtgate/engine/opcode/constants.go index 37b5f9fd288..07a39020f8b 100644 --- a/go/vt/vtgate/engine/opcode/constants.go +++ b/go/vt/vtgate/engine/opcode/constants.go @@ -33,19 +33,25 @@ const ( PulloutIn PulloutNotIn PulloutExists + PulloutNotExists ) var pulloutName = map[PulloutOpcode]string{ - PulloutValue: "PulloutValue", - PulloutIn: "PulloutIn", - PulloutNotIn: "PulloutNotIn", - PulloutExists: "PulloutExists", + PulloutValue: "PulloutValue", + PulloutIn: "PulloutIn", + PulloutNotIn: "PulloutNotIn", + PulloutExists: "PulloutExists", + PulloutNotExists: "PulloutNotExists", } func (code PulloutOpcode) String() string { return pulloutName[code] } +func (code PulloutOpcode) NeedsListArg() bool { + return code == PulloutIn || code == PulloutNotIn +} + // MarshalJSON serializes the PulloutOpcode as a JSON string. // It's used for testing and diagnostics. func (code PulloutOpcode) MarshalJSON() ([]byte, error) { @@ -65,8 +71,10 @@ const ( AggregateCountDistinct AggregateSumDistinct AggregateGtid - AggregateRandom + AggregateAnyValue AggregateCountStar + AggregateGroupConcat + _NumOfOpCodes // This line must be last of the opcodes! ) var ( @@ -94,16 +102,29 @@ var SupportedAggregates = map[string]AggregateOpcode{ "sum_distinct": AggregateSumDistinct, "vgtid": AggregateGtid, "count_star": AggregateCountStar, - "random": AggregateRandom, + "any_value": AggregateAnyValue, + "group_concat": AggregateGroupConcat, +} + +var AggregateName = map[AggregateOpcode]string{ + AggregateCount: "count", + AggregateSum: "sum", + AggregateMin: "min", + AggregateMax: "max", + AggregateCountDistinct: "count_distinct", + AggregateSumDistinct: "sum_distinct", + AggregateGtid: "vgtid", + AggregateCountStar: "count_star", + AggregateGroupConcat: "group_concat", + AggregateAnyValue: "any_value", } func (code AggregateOpcode) String() string { - for k, v := range SupportedAggregates { - if v == code { - return k - } + name := AggregateName[code] + if name == "" { + name = "ERROR" } - return "ERROR" + return name } // MarshalJSON serializes the AggregateOpcode as a JSON string. @@ -111,3 +132,47 @@ func (code AggregateOpcode) String() string { func (code AggregateOpcode) MarshalJSON() ([]byte, error) { return ([]byte)(fmt.Sprintf("\"%s\"", code.String())), nil } + +// Type returns the opcode return sql type, and a bool telling is we are sure about this type or not +func (code AggregateOpcode) Type(typ querypb.Type) querypb.Type { + switch code { + case AggregateUnassigned: + return sqltypes.Null + case AggregateGroupConcat: + if sqltypes.IsBinary(typ) { + return sqltypes.Blob + } + return sqltypes.Text + case AggregateMax, AggregateMin, AggregateAnyValue: + return typ + case AggregateSumDistinct, AggregateSum: + if sqltypes.IsIntegral(typ) || sqltypes.IsDecimal(typ) { + return sqltypes.Decimal + } + return sqltypes.Float64 + case AggregateCount, AggregateCountStar, AggregateCountDistinct: + return sqltypes.Int64 + case AggregateGtid: + return sqltypes.VarChar + default: + panic(code.String()) // we have a unit test checking we never reach here + } +} + +func (code AggregateOpcode) NeedsComparableValues() bool { + switch code { + case AggregateCountDistinct, AggregateSumDistinct, AggregateMin, AggregateMax: + return true + default: + return false + } +} + +func (code AggregateOpcode) IsDistinct() bool { + switch code { + case AggregateCountDistinct, AggregateSumDistinct: + return true + default: + return false + } +} diff --git a/go/vt/vtgate/engine/opcode/constants_test.go b/go/vt/vtgate/engine/opcode/constants_test.go new file mode 100644 index 00000000000..50cfc49a71c --- /dev/null +++ b/go/vt/vtgate/engine/opcode/constants_test.go @@ -0,0 +1,30 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package opcode + +import ( + "testing" + + "vitess.io/vitess/go/sqltypes" +) + +func TestCheckAllAggrOpCodes(t *testing.T) { + // This test is just checking that we never reach the panic when using Type() on valid opcodes + for i := AggregateOpcode(0); i < _NumOfOpCodes; i++ { + i.Type(sqltypes.Null) + } +} diff --git a/go/vt/vtgate/engine/ordered_aggregate.go b/go/vt/vtgate/engine/ordered_aggregate.go index 8b532bab605..e5cd7273aaa 100644 --- a/go/vt/vtgate/engine/ordered_aggregate.go +++ b/go/vt/vtgate/engine/ordered_aggregate.go @@ -22,15 +22,10 @@ import ( "strconv" "vitess.io/vitess/go/mysql/collations" - - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/sqltypes" - . "vitess.io/vitess/go/vt/vtgate/engine/opcode" - "vitess.io/vitess/go/vt/vtgate/evalengine" - - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/evalengine" ) var ( @@ -48,15 +43,10 @@ var _ Primitive = (*OrderedAggregate)(nil) // is that the underlying primitive is a scatter select with pre-sorted // rows. type OrderedAggregate struct { - // PreProcess is true if one of the aggregates needs preprocessing. - PreProcess bool `json:",omitempty"` - // Aggregates specifies the aggregation parameters for each // aggregation function: function opcode and input column number. Aggregates []*AggregateParams - AggrOnEngine bool - // GroupByKeys specifies the input values that must be used for // the aggregation key. GroupByKeys []*GroupByParams @@ -90,61 +80,12 @@ func (gbp GroupByParams) String() string { } if sqltypes.IsText(gbp.Type) && gbp.CollationID != collations.Unknown { - collation := gbp.CollationID.Get() - out += " COLLATE " + collation.Name() + out += " COLLATE " + collations.Local().LookupName(gbp.CollationID) } return out } -// AggregateParams specify the parameters for each aggregation. -// It contains the opcode and input column number. -type AggregateParams struct { - Opcode AggregateOpcode - Col int - - // These are used only for distinct opcodes. - KeyCol int - WCol int - WAssigned bool - Type sqltypes.Type - CollationID collations.ID - - Alias string `json:",omitempty"` - Expr sqlparser.Expr - Original *sqlparser.AliasedExpr - - // This is based on the function passed in the select expression and - // not what we use to aggregate at the engine primitive level. - OrigOpcode AggregateOpcode -} - -func (ap *AggregateParams) isDistinct() bool { - return ap.Opcode == AggregateCountDistinct || ap.Opcode == AggregateSumDistinct -} - -func (ap *AggregateParams) preProcess() bool { - return ap.Opcode == AggregateCountDistinct || ap.Opcode == AggregateSumDistinct || ap.Opcode == AggregateGtid || ap.Opcode == AggregateCount -} - -func (ap *AggregateParams) String() string { - keyCol := strconv.Itoa(ap.Col) - if ap.WAssigned { - keyCol = fmt.Sprintf("%s|%d", keyCol, ap.WCol) - } - if sqltypes.IsText(ap.Type) && ap.CollationID != collations.Unknown { - keyCol += " COLLATE " + ap.CollationID.Get().Name() - } - dispOrigOp := "" - if ap.OrigOpcode != AggregateUnassigned && ap.OrigOpcode != ap.Opcode { - dispOrigOp = "_" + ap.OrigOpcode.String() - } - if ap.Alias != "" { - return fmt.Sprintf("%s%s(%s) AS %s", ap.Opcode.String(), dispOrigOp, keyCol, ap.Alias) - } - return fmt.Sprintf("%s%s(%s)", ap.Opcode.String(), dispOrigOp, keyCol) -} - // RouteType returns a description of the query routing type used by the primitive func (oa *OrderedAggregate) RouteType() string { return oa.Input.RouteType() @@ -166,196 +107,208 @@ func (oa *OrderedAggregate) SetTruncateColumnCount(count int) { } // TryExecute is a Primitive function. -func (oa *OrderedAggregate) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { - qr, err := oa.execute(ctx, vcursor, bindVars, wantfields) +func (oa *OrderedAggregate) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, _ bool) (*sqltypes.Result, error) { + qr, err := oa.execute(ctx, vcursor, bindVars) if err != nil { return nil, err } return qr.Truncate(oa.TruncateColumnCount), nil } -func (oa *OrderedAggregate) execute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { - result, err := vcursor.ExecutePrimitive(ctx, oa.Input, bindVars, wantfields) +func (oa *OrderedAggregate) executeGroupBy(result *sqltypes.Result) (*sqltypes.Result, error) { + if len(result.Rows) < 1 { + return result, nil + } + + out := &sqltypes.Result{ + Fields: result.Fields, + Rows: result.Rows[:0], + } + + var currentKey []sqltypes.Value + var lastRow sqltypes.Row + var err error + for _, row := range result.Rows { + var nextGroup bool + + currentKey, nextGroup, err = oa.nextGroupBy(currentKey, row) + if err != nil { + return nil, err + } + if nextGroup { + out.Rows = append(out.Rows, lastRow) + } + lastRow = row + } + out.Rows = append(out.Rows, lastRow) + return out, nil +} + +func (oa *OrderedAggregate) execute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { + result, err := vcursor.ExecutePrimitive( + ctx, + oa.Input, + bindVars, + true, /*wantFields - we need the input fields types to correctly calculate the output types*/ + ) if err != nil { return nil, err } + if len(oa.Aggregates) == 0 { + return oa.executeGroupBy(result) + } + + agg, fields, err := newAggregation(result.Fields, oa.Aggregates) + if err != nil { + return nil, err + } + out := &sqltypes.Result{ - Fields: convertFields(result.Fields, oa.PreProcess, oa.Aggregates, oa.AggrOnEngine), + Fields: fields, Rows: make([][]sqltypes.Value, 0, len(result.Rows)), } - // This code is similar to the one in StreamExecute. - var current []sqltypes.Value - var curDistincts []sqltypes.Value + + var currentKey []sqltypes.Value for _, row := range result.Rows { - if current == nil { - current, curDistincts = convertRow(row, oa.PreProcess, oa.Aggregates, oa.AggrOnEngine) - continue - } - equal, err := oa.keysEqual(current, row) + var nextGroup bool + + currentKey, nextGroup, err = oa.nextGroupBy(currentKey, row) if err != nil { return nil, err } - if equal { - current, curDistincts, err = merge(result.Fields, current, row, curDistincts, oa.Aggregates) - if err != nil { - return nil, err - } - continue + if nextGroup { + out.Rows = append(out.Rows, agg.finish()) + agg.reset() } - out.Rows = append(out.Rows, current) - current, curDistincts = convertRow(row, oa.PreProcess, oa.Aggregates, oa.AggrOnEngine) - } - if current != nil { - final, err := convertFinal(current, oa.Aggregates) - if err != nil { + if err := agg.add(row); err != nil { return nil, err } - out.Rows = append(out.Rows, final) } + + if currentKey != nil { + out.Rows = append(out.Rows, agg.finish()) + } + return out, nil } -// TryStreamExecute is a Primitive function. -func (oa *OrderedAggregate) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { - var current []sqltypes.Value - var curDistincts []sqltypes.Value - var fields []*querypb.Field - +func (oa *OrderedAggregate) executeStreamGroupBy(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) error { cb := func(qr *sqltypes.Result) error { return callback(qr.Truncate(oa.TruncateColumnCount)) } - err := vcursor.StreamExecutePrimitive(ctx, oa.Input, bindVars, wantfields, func(qr *sqltypes.Result) error { - if len(qr.Fields) != 0 { - fields = convertFields(qr.Fields, oa.PreProcess, oa.Aggregates, oa.AggrOnEngine) - if err := cb(&sqltypes.Result{Fields: fields}); err != nil { + var fields []*querypb.Field + var currentKey []sqltypes.Value + var lastRow sqltypes.Row + + visitor := func(qr *sqltypes.Result) error { + var err error + if fields == nil && len(qr.Fields) > 0 { + fields = qr.Fields + if err = cb(&sqltypes.Result{Fields: fields}); err != nil { return err } } - // This code is similar to the one in Execute. for _, row := range qr.Rows { - if current == nil { - current, curDistincts = convertRow(row, oa.PreProcess, oa.Aggregates, oa.AggrOnEngine) - continue - } + var nextGroup bool - equal, err := oa.keysEqual(current, row) + currentKey, nextGroup, err = oa.nextGroupBy(currentKey, row) if err != nil { return err } - if equal { - current, curDistincts, err = merge(fields, current, row, curDistincts, oa.Aggregates) - if err != nil { + if nextGroup { + // this is a new grouping. let's yield the old one, and start a new + if err := cb(&sqltypes.Result{Rows: []sqltypes.Row{lastRow}}); err != nil { return err } - continue - } - if err := cb(&sqltypes.Result{Rows: [][]sqltypes.Value{current}}); err != nil { - return err } - current, curDistincts = convertRow(row, oa.PreProcess, oa.Aggregates, oa.AggrOnEngine) + + lastRow = row } return nil - }) + } + + /* we need the input fields types to correctly calculate the output types */ + err := vcursor.StreamExecutePrimitive(ctx, oa.Input, bindVars, true, visitor) if err != nil { return err } - if current != nil { - if err := cb(&sqltypes.Result{Rows: [][]sqltypes.Value{current}}); err != nil { + if lastRow != nil { + if err := cb(&sqltypes.Result{Rows: [][]sqltypes.Value{lastRow}}); err != nil { return err } } return nil } -func convertFields(fields []*querypb.Field, preProcess bool, aggrs []*AggregateParams, aggrOnEngine bool) []*querypb.Field { - if !preProcess { - return fields - } - for _, aggr := range aggrs { - if !aggr.preProcess() && !aggrOnEngine { - continue - } - fields[aggr.Col] = &querypb.Field{ - Name: aggr.Alias, - Type: OpcodeType[aggr.Opcode], - } - if aggr.isDistinct() { - aggr.KeyCol = aggr.Col - } +// TryStreamExecute is a Primitive function. +func (oa *OrderedAggregate) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, _ bool, callback func(*sqltypes.Result) error) error { + if len(oa.Aggregates) == 0 { + return oa.executeStreamGroupBy(ctx, vcursor, bindVars, callback) } - return fields -} -func convertRow(row []sqltypes.Value, preProcess bool, aggregates []*AggregateParams, aggrOnEngine bool) (newRow []sqltypes.Value, curDistincts []sqltypes.Value) { - if !preProcess { - return row, nil + cb := func(qr *sqltypes.Result) error { + return callback(qr.Truncate(oa.TruncateColumnCount)) } - newRow = append(newRow, row...) - curDistincts = make([]sqltypes.Value, len(aggregates)) - for index, aggr := range aggregates { - switch aggr.Opcode { - case AggregateCountStar: - newRow[aggr.Col] = countOne - case AggregateCount: - val := countOne - if row[aggr.Col].IsNull() { - val = countZero - } - newRow[aggr.Col] = val - case AggregateCountDistinct: - curDistincts[index] = findComparableCurrentDistinct(row, aggr) - // Type is int64. Ok to call MakeTrusted. - if row[aggr.KeyCol].IsNull() { - newRow[aggr.Col] = countZero - } else { - newRow[aggr.Col] = countOne - } - case AggregateSum: - if !aggrOnEngine { - break + + var agg aggregationState + var fields []*querypb.Field + var currentKey []sqltypes.Value + + visitor := func(qr *sqltypes.Result) error { + var err error + + if agg == nil && len(qr.Fields) != 0 { + agg, fields, err = newAggregation(qr.Fields, oa.Aggregates) + if err != nil { + return err } - if row[aggr.Col].IsNull() { - break + if err = cb(&sqltypes.Result{Fields: fields}); err != nil { + return err } - var err error - newRow[aggr.Col], err = evalengine.Cast(row[aggr.Col], OpcodeType[aggr.Opcode]) + } + + // This code is similar to the one in Execute. + for _, row := range qr.Rows { + var nextGroup bool + + currentKey, nextGroup, err = oa.nextGroupBy(currentKey, row) if err != nil { - newRow[aggr.Col] = sumZero + return err } - case AggregateSumDistinct: - curDistincts[index] = findComparableCurrentDistinct(row, aggr) - var err error - newRow[aggr.Col], err = evalengine.Cast(row[aggr.Col], OpcodeType[aggr.Opcode]) - if err != nil { - newRow[aggr.Col] = sumZero + + if nextGroup { + // this is a new grouping. let's yield the old one, and start a new + if err := cb(&sqltypes.Result{Rows: [][]sqltypes.Value{agg.finish()}}); err != nil { + return err + } + + agg.reset() + } + + if err := agg.add(row); err != nil { + return err } - case AggregateGtid: - vgtid := &binlogdatapb.VGtid{} - vgtid.ShardGtids = append(vgtid.ShardGtids, &binlogdatapb.ShardGtid{ - Keyspace: row[aggr.Col-1].ToString(), - Shard: row[aggr.Col+1].ToString(), - Gtid: row[aggr.Col].ToString(), - }) - data, _ := vgtid.MarshalVT() - val, _ := sqltypes.NewValue(sqltypes.VarBinary, data) - newRow[aggr.Col] = val } + return nil + } + + /* we need the input fields types to correctly calculate the output types */ + err := vcursor.StreamExecutePrimitive(ctx, oa.Input, bindVars, true, visitor) + if err != nil { + return err } - return newRow, curDistincts -} -func findComparableCurrentDistinct(row []sqltypes.Value, aggr *AggregateParams) sqltypes.Value { - curDistinct := row[aggr.KeyCol] - if aggr.WAssigned && !curDistinct.IsComparable() { - aggr.KeyCol = aggr.WCol - curDistinct = row[aggr.KeyCol] + if currentKey != nil { + if err := cb(&sqltypes.Result{Rows: [][]sqltypes.Value{agg.finish()}}); err != nil { + return err + } } - return curDistinct + return nil } // GetFields is a Primitive function. @@ -364,13 +317,19 @@ func (oa *OrderedAggregate) GetFields(ctx context.Context, vcursor VCursor, bind if err != nil { return nil, err } - qr = &sqltypes.Result{Fields: convertFields(qr.Fields, oa.PreProcess, oa.Aggregates, oa.AggrOnEngine)} + + _, fields, err := newAggregation(qr.Fields, oa.Aggregates) + if err != nil { + return nil, err + } + + qr = &sqltypes.Result{Fields: fields} return qr.Truncate(oa.TruncateColumnCount), nil } // Inputs returns the Primitive input for this aggregation -func (oa *OrderedAggregate) Inputs() []Primitive { - return []Primitive{oa.Input} +func (oa *OrderedAggregate) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{oa.Input}, nil } // NeedsTransaction implements the Primitive interface @@ -378,104 +337,30 @@ func (oa *OrderedAggregate) NeedsTransaction() bool { return oa.Input.NeedsTransaction() } -func (oa *OrderedAggregate) keysEqual(row1, row2 []sqltypes.Value) (bool, error) { +func (oa *OrderedAggregate) nextGroupBy(currentKey, nextRow []sqltypes.Value) (nextKey []sqltypes.Value, nextGroup bool, err error) { + if currentKey == nil { + return nextRow, false, nil + } + for _, gb := range oa.GroupByKeys { - cmp, err := evalengine.NullsafeCompare(row1[gb.KeyCol], row2[gb.KeyCol], gb.CollationID) + cmp, err := evalengine.NullsafeCompare(currentKey[gb.KeyCol], nextRow[gb.KeyCol], gb.CollationID) if err != nil { _, isComparisonErr := err.(evalengine.UnsupportedComparisonError) _, isCollationErr := err.(evalengine.UnsupportedCollationError) if !isComparisonErr && !isCollationErr || gb.WeightStringCol == -1 { - return false, err + return nil, false, err } gb.KeyCol = gb.WeightStringCol - cmp, err = evalengine.NullsafeCompare(row1[gb.WeightStringCol], row2[gb.WeightStringCol], gb.CollationID) + cmp, err = evalengine.NullsafeCompare(currentKey[gb.WeightStringCol], nextRow[gb.WeightStringCol], gb.CollationID) if err != nil { - return false, err + return nil, false, err } } if cmp != 0 { - return false, nil - } - } - return true, nil -} - -func merge( - fields []*querypb.Field, - row1, row2 []sqltypes.Value, - curDistincts []sqltypes.Value, - aggregates []*AggregateParams, -) ([]sqltypes.Value, []sqltypes.Value, error) { - result := sqltypes.CopyRow(row1) - for index, aggr := range aggregates { - if aggr.isDistinct() { - if row2[aggr.KeyCol].IsNull() { - continue - } - cmp, err := evalengine.NullsafeCompare(curDistincts[index], row2[aggr.KeyCol], aggr.CollationID) - if err != nil { - return nil, nil, err - } - if cmp == 0 { - continue - } - curDistincts[index] = findComparableCurrentDistinct(row2, aggr) - } - var err error - switch aggr.Opcode { - case AggregateCountStar: - value := row1[aggr.Col] - result[aggr.Col], err = evalengine.NullSafeAdd(value, countOne, fields[aggr.Col].Type) - case AggregateCount: - val := countOne - if row2[aggr.Col].IsNull() { - val = countZero - } - result[aggr.Col], err = evalengine.NullSafeAdd(row1[aggr.Col], val, fields[aggr.Col].Type) - case AggregateSum: - value := row1[aggr.Col] - v2 := row2[aggr.Col] - if value.IsNull() && v2.IsNull() { - result[aggr.Col] = sqltypes.NULL - break - } - result[aggr.Col], err = evalengine.NullSafeAdd(value, v2, fields[aggr.Col].Type) - case AggregateMin: - result[aggr.Col], err = evalengine.Min(row1[aggr.Col], row2[aggr.Col], aggr.CollationID) - case AggregateMax: - result[aggr.Col], err = evalengine.Max(row1[aggr.Col], row2[aggr.Col], aggr.CollationID) - case AggregateCountDistinct: - result[aggr.Col], err = evalengine.NullSafeAdd(row1[aggr.Col], countOne, OpcodeType[aggr.Opcode]) - case AggregateSumDistinct: - result[aggr.Col], err = evalengine.NullSafeAdd(row1[aggr.Col], row2[aggr.Col], OpcodeType[aggr.Opcode]) - case AggregateGtid: - vgtid := &binlogdatapb.VGtid{} - rowBytes, err := row1[aggr.Col].ToBytes() - if err != nil { - return nil, nil, err - } - err = vgtid.UnmarshalVT(rowBytes) - if err != nil { - return nil, nil, err - } - vgtid.ShardGtids = append(vgtid.ShardGtids, &binlogdatapb.ShardGtid{ - Keyspace: row2[aggr.Col-1].ToString(), - Shard: row2[aggr.Col+1].ToString(), - Gtid: row2[aggr.Col].ToString(), - }) - data, _ := vgtid.MarshalVT() - val, _ := sqltypes.NewValue(sqltypes.VarBinary, data) - result[aggr.Col] = val - case AggregateRandom: - // we just grab the first value per grouping. no need to do anything more complicated here - default: - return nil, nil, fmt.Errorf("BUG: Unexpected opcode: %v", aggr.Opcode) - } - if err != nil { - return nil, nil, err + return nextRow, true, nil } } - return result, curDistincts, nil + return currentKey, false, nil } func aggregateParamsToString(in any) string { @@ -502,23 +387,3 @@ func (oa *OrderedAggregate) description() PrimitiveDescription { Other: other, } } - -func convertFinal(current []sqltypes.Value, aggregates []*AggregateParams) ([]sqltypes.Value, error) { - result := sqltypes.CopyRow(current) - for _, aggr := range aggregates { - switch aggr.Opcode { - case AggregateGtid: - vgtid := &binlogdatapb.VGtid{} - currentBytes, err := current[aggr.Col].ToBytes() - if err != nil { - return nil, err - } - err = vgtid.UnmarshalVT(currentBytes) - if err != nil { - return nil, err - } - result[aggr.Col] = sqltypes.NewVarChar(vgtid.String()) - } - } - return result, nil -} diff --git a/go/vt/vtgate/engine/ordered_aggregate_test.go b/go/vt/vtgate/engine/ordered_aggregate_test.go index e915c6fd877..8aa0bf3c3b4 100644 --- a/go/vt/vtgate/engine/ordered_aggregate_test.go +++ b/go/vt/vtgate/engine/ordered_aggregate_test.go @@ -28,7 +28,6 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/servenv" @@ -45,7 +44,6 @@ func init() { } func TestOrderedAggregateExecute(t *testing.T) { - assert := assert.New(t) fields := sqltypes.MakeTestFields( "col|count(*)", "varbinary|decimal", @@ -62,16 +60,13 @@ func TestOrderedAggregateExecute(t *testing.T) { } oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{{ - Opcode: AggregateSum, - Col: 1, - }}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "")}, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, } result, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) - assert.NoError(err) + assert.NoError(t, err) wantResult := sqltypes.MakeTestResult( fields, @@ -79,16 +74,15 @@ func TestOrderedAggregateExecute(t *testing.T) { "b|2", "c|7", ) - assert.Equal(wantResult, result) + utils.MustMatch(t, wantResult, result) } func TestOrderedAggregateExecuteTruncate(t *testing.T) { - assert := assert.New(t) fp := &fakePrimitive{ results: []*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields( "col|count(*)|weight_string(col)", - "varchar|decimal|varbinary", + "varchar|int64|varbinary", ), "a|1|A", "A|1|A", @@ -98,33 +92,59 @@ func TestOrderedAggregateExecuteTruncate(t *testing.T) { )}, } + aggr := NewAggregateParam(AggregateSum, 1, "") + aggr.OrigOpcode = AggregateCountStar + oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{{ - Opcode: AggregateSum, - Col: 1, - }}, + Aggregates: []*AggregateParams{aggr}, GroupByKeys: []*GroupByParams{{KeyCol: 2}}, TruncateColumnCount: 2, Input: fp, } result, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) - assert.NoError(err) + assert.NoError(t, err) wantResult := sqltypes.MakeTestResult( sqltypes.MakeTestFields( "col|count(*)", - "varchar|decimal", + "varchar|int64", ), "a|2", "b|2", "C|7", ) - assert.Equal(wantResult, result) + utils.MustMatch(t, wantResult, result) +} + +func TestMinMaxFailsCorrectly(t *testing.T) { + fp := &fakePrimitive{ + results: []*sqltypes.Result{sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "col|weight_string(col)", + "varchar|varbinary", + ), + "a|A", + "A|A", + "b|B", + "C|C", + "c|C", + )}, + } + + aggr := NewAggregateParam(AggregateMax, 0, "") + aggr.WCol = 1 + oa := &ScalarAggregate{ + Aggregates: []*AggregateParams{aggr}, + TruncateColumnCount: 1, + Input: fp, + } + + _, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) + assert.ErrorContains(t, err, "min/max on types that are not comparable is not supported") } func TestOrderedAggregateStreamExecute(t *testing.T) { - assert := assert.New(t) fields := sqltypes.MakeTestFields( "col|count(*)", "varbinary|decimal", @@ -141,10 +161,7 @@ func TestOrderedAggregateStreamExecute(t *testing.T) { } oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{{ - Opcode: AggregateSum, - Col: 1, - }}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "")}, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, } @@ -154,7 +171,7 @@ func TestOrderedAggregateStreamExecute(t *testing.T) { results = append(results, qr) return nil }) - assert.NoError(err) + assert.NoError(t, err) wantResults := sqltypes.MakeTestStreamingResults( fields, @@ -164,11 +181,10 @@ func TestOrderedAggregateStreamExecute(t *testing.T) { "---", "c|7", ) - assert.Equal(wantResults, results) + utils.MustMatch(t, wantResults, results) } func TestOrderedAggregateStreamExecuteTruncate(t *testing.T) { - assert := assert.New(t) fp := &fakePrimitive{ results: []*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields( @@ -184,10 +200,7 @@ func TestOrderedAggregateStreamExecuteTruncate(t *testing.T) { } oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{{ - Opcode: AggregateSum, - Col: 1, - }}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "")}, GroupByKeys: []*GroupByParams{{KeyCol: 2}}, TruncateColumnCount: 2, Input: fp, @@ -198,7 +211,7 @@ func TestOrderedAggregateStreamExecuteTruncate(t *testing.T) { results = append(results, qr) return nil }) - assert.NoError(err) + assert.NoError(t, err) wantResults := sqltypes.MakeTestStreamingResults( sqltypes.MakeTestFields( @@ -211,11 +224,10 @@ func TestOrderedAggregateStreamExecuteTruncate(t *testing.T) { "---", "C|7", ) - assert.Equal(wantResults, results) + utils.MustMatch(t, wantResults, results) } func TestOrderedAggregateGetFields(t *testing.T) { - assert := assert.New(t) input := sqltypes.MakeTestResult( sqltypes.MakeTestFields( "col|count(*)", @@ -227,34 +239,8 @@ func TestOrderedAggregateGetFields(t *testing.T) { oa := &OrderedAggregate{Input: fp} got, err := oa.GetFields(context.Background(), nil, nil) - assert.NoError(err) - assert.Equal(got, input) -} - -func TestOrderedAggregateGetFieldsTruncate(t *testing.T) { - assert := assert.New(t) - result := sqltypes.MakeTestResult( - sqltypes.MakeTestFields( - "col|count(*)|weight_string(col)", - "varchar|decimal|varbinary", - ), - ) - fp := &fakePrimitive{results: []*sqltypes.Result{result}} - - oa := &OrderedAggregate{ - TruncateColumnCount: 2, - Input: fp, - } - - got, err := oa.GetFields(context.Background(), nil, nil) - assert.NoError(err) - wantResult := sqltypes.MakeTestResult( - sqltypes.MakeTestFields( - "col|count(*)", - "varchar|decimal", - ), - ) - assert.Equal(wantResult, got) + assert.NoError(t, err) + assert.Equal(t, got, input) } func TestOrderedAggregateInputFail(t *testing.T) { @@ -279,7 +265,6 @@ func TestOrderedAggregateInputFail(t *testing.T) { } func TestOrderedAggregateExecuteCountDistinct(t *testing.T) { - assert := assert.New(t) fp := &fakePrimitive{ results: []*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields( @@ -318,23 +303,17 @@ func TestOrderedAggregateExecuteCountDistinct(t *testing.T) { )}, } + aggr1 := NewAggregateParam(AggregateCountDistinct, 1, "count(distinct col2)") + aggr2 := NewAggregateParam(AggregateSum, 2, "") + aggr2.OrigOpcode = AggregateCountStar oa := &OrderedAggregate{ - PreProcess: true, - Aggregates: []*AggregateParams{{ - Opcode: AggregateCountDistinct, - Col: 1, - Alias: "count(distinct col2)", - }, { - // Also add a count(*) - Opcode: AggregateSum, - Col: 2, - }}, + Aggregates: []*AggregateParams{aggr1, aggr2}, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, } result, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) - assert.NoError(err) + assert.NoError(t, err) wantResult := sqltypes.MakeTestResult( sqltypes.MakeTestFields( @@ -351,11 +330,10 @@ func TestOrderedAggregateExecuteCountDistinct(t *testing.T) { "h|3|4", "i|2|2", ) - assert.Equal(wantResult, result) + utils.MustMatch(t, wantResult, result) } func TestOrderedAggregateStreamCountDistinct(t *testing.T) { - assert := assert.New(t) fp := &fakePrimitive{ results: []*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields( @@ -394,17 +372,13 @@ func TestOrderedAggregateStreamCountDistinct(t *testing.T) { )}, } + aggr2 := NewAggregateParam(AggregateSum, 2, "") + aggr2.OrigOpcode = AggregateCountDistinct + oa := &OrderedAggregate{ - PreProcess: true, - Aggregates: []*AggregateParams{{ - Opcode: AggregateCountDistinct, - Col: 1, - Alias: "count(distinct col2)", - }, { - // Also add a count(*) - Opcode: AggregateSum, - Col: 2, - }}, + Aggregates: []*AggregateParams{ + NewAggregateParam(AggregateCountDistinct, 1, "count(distinct col2)"), + aggr2}, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, } @@ -414,7 +388,7 @@ func TestOrderedAggregateStreamCountDistinct(t *testing.T) { results = append(results, qr) return nil }) - assert.NoError(err) + assert.NoError(t, err) wantResults := sqltypes.MakeTestStreamingResults( sqltypes.MakeTestFields( @@ -439,11 +413,10 @@ func TestOrderedAggregateStreamCountDistinct(t *testing.T) { "-----", "i|2|2", ) - assert.Equal(wantResults, results) + utils.MustMatch(t, wantResults, results) } func TestOrderedAggregateSumDistinctGood(t *testing.T) { - assert := assert.New(t) fp := &fakePrimitive{ results: []*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields( @@ -484,22 +457,16 @@ func TestOrderedAggregateSumDistinctGood(t *testing.T) { } oa := &OrderedAggregate{ - PreProcess: true, - Aggregates: []*AggregateParams{{ - Opcode: AggregateSumDistinct, - Col: 1, - Alias: "sum(distinct col2)", - }, { - // Also add a count(*) - Opcode: AggregateSum, - Col: 2, - }}, + Aggregates: []*AggregateParams{ + NewAggregateParam(AggregateSumDistinct, 1, "sum(distinct col2)"), + NewAggregateParam(AggregateSum, 2, ""), + }, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, } result, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) - assert.NoError(err) + assert.NoError(t, err) wantResult := sqltypes.MakeTestResult( sqltypes.MakeTestFields( @@ -518,7 +485,7 @@ func TestOrderedAggregateSumDistinctGood(t *testing.T) { ) want := fmt.Sprintf("%v", wantResult.Rows) got := fmt.Sprintf("%v", result.Rows) - assert.Equal(want, got) + assert.Equal(t, want, got) } func TestOrderedAggregateSumDistinctTolerateError(t *testing.T) { @@ -535,12 +502,7 @@ func TestOrderedAggregateSumDistinctTolerateError(t *testing.T) { } oa := &OrderedAggregate{ - PreProcess: true, - Aggregates: []*AggregateParams{{ - Opcode: AggregateSumDistinct, - Col: 1, - Alias: "sum(distinct col2)", - }}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateSumDistinct, 1, "sum(distinct col2)")}, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, } @@ -551,7 +513,7 @@ func TestOrderedAggregateSumDistinctTolerateError(t *testing.T) { wantResult := sqltypes.MakeTestResult( sqltypes.MakeTestFields( "col1|sum(distinct col2)", - "varbinary|decimal", + "varbinary|float64", ), "a|1", ) @@ -572,10 +534,7 @@ func TestOrderedAggregateKeysFail(t *testing.T) { } oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{{ - Opcode: AggregateSum, - Col: 1, - }}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "")}, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, } @@ -605,10 +564,7 @@ func TestOrderedAggregateMergeFail(t *testing.T) { } oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{{ - Opcode: AggregateSum, - Col: 1, - }}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "")}, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, } @@ -642,43 +598,6 @@ func TestOrderedAggregateMergeFail(t *testing.T) { require.NoError(t, err) } -func TestMerge(t *testing.T) { - assert := assert.New(t) - oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{{ - Opcode: AggregateSum, - Col: 1, - }, { - Opcode: AggregateSum, - Col: 2, - }, { - Opcode: AggregateMin, - Col: 3, - }, { - Opcode: AggregateMax, - Col: 4, - }}, - } - fields := sqltypes.MakeTestFields( - "a|b|c|d|e", - "int64|int64|decimal|in32|varbinary", - ) - r := sqltypes.MakeTestResult(fields, - "1|2|3.2|3|ab", - "1|3|2.8|2|bc", - ) - - merged, _, err := merge(fields, r.Rows[0], r.Rows[1], nil, oa.Aggregates) - assert.NoError(err) - want := sqltypes.MakeTestResult(fields, "1|5|6.0|2|bc").Rows[0] - assert.Equal(want, merged) - - // swap and retry - merged, _, err = merge(fields, r.Rows[1], r.Rows[0], nil, oa.Aggregates) - assert.NoError(err) - assert.Equal(want, merged) -} - func TestOrderedAggregateExecuteGtid(t *testing.T) { vgtid := binlogdatapb.VGtid{} vgtid.ShardGtids = append(vgtid.ShardGtids, &binlogdatapb.ShardGtid{ @@ -706,12 +625,7 @@ func TestOrderedAggregateExecuteGtid(t *testing.T) { } oa := &OrderedAggregate{ - PreProcess: true, - Aggregates: []*AggregateParams{{ - Opcode: AggregateGtid, - Col: 1, - Alias: "vgtid", - }}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateGtid, 1, "vgtid")}, TruncateColumnCount: 2, Input: fp, } @@ -726,7 +640,7 @@ func TestOrderedAggregateExecuteGtid(t *testing.T) { ), `ks|shard_gtids:{keyspace:"ks" shard:"-40" gtid:"a"} shard_gtids:{keyspace:"ks" shard:"40-80" gtid:"b"} shard_gtids:{keyspace:"ks" shard:"80-c0" gtid:"c"} shard_gtids:{keyspace:"ks" shard:"c0-" gtid:"d"}`, ) - assert.Equal(t, wantResult, result) + utils.MustMatch(t, wantResult, result) } func TestCountDistinctOnVarchar(t *testing.T) { @@ -744,15 +658,10 @@ func TestCountDistinctOnVarchar(t *testing.T) { )}, } + aggr := NewAggregateParam(AggregateCountDistinct, 1, "count(distinct c2)") + aggr.WCol = 2 oa := &OrderedAggregate{ - PreProcess: true, - Aggregates: []*AggregateParams{{ - Opcode: AggregateCountDistinct, - Col: 1, - WCol: 2, - WAssigned: true, - Alias: "count(distinct c2)", - }}, + Aggregates: []*AggregateParams{aggr}, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, TruncateColumnCount: 2, @@ -769,7 +678,7 @@ func TestCountDistinctOnVarchar(t *testing.T) { qr, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) require.NoError(t, err) - assert.Equal(t, want, qr) + utils.MustMatch(t, want, qr) fp.rewind() results := &sqltypes.Result{} @@ -781,7 +690,7 @@ func TestCountDistinctOnVarchar(t *testing.T) { return nil }) require.NoError(t, err) - assert.Equal(t, want, results) + utils.MustMatch(t, want, results) } func TestCountDistinctOnVarcharWithNulls(t *testing.T) { @@ -809,15 +718,10 @@ func TestCountDistinctOnVarcharWithNulls(t *testing.T) { )}, } + aggr := NewAggregateParam(AggregateCountDistinct, 1, "count(distinct c2)") + aggr.WCol = 2 oa := &OrderedAggregate{ - PreProcess: true, - Aggregates: []*AggregateParams{{ - Opcode: AggregateCountDistinct, - Col: 1, - WCol: 2, - WAssigned: true, - Alias: "count(distinct c2)", - }}, + Aggregates: []*AggregateParams{aggr}, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, TruncateColumnCount: 2, @@ -836,7 +740,7 @@ func TestCountDistinctOnVarcharWithNulls(t *testing.T) { qr, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) require.NoError(t, err) - assert.Equal(t, want, qr) + utils.MustMatch(t, want, qr) fp.rewind() results := &sqltypes.Result{} @@ -848,7 +752,7 @@ func TestCountDistinctOnVarcharWithNulls(t *testing.T) { return nil }) require.NoError(t, err) - assert.Equal(t, want, results) + utils.MustMatch(t, want, results) } func TestSumDistinctOnVarcharWithNulls(t *testing.T) { @@ -876,15 +780,10 @@ func TestSumDistinctOnVarcharWithNulls(t *testing.T) { )}, } + aggr := NewAggregateParam(AggregateSumDistinct, 1, "sum(distinct c2)") + aggr.WCol = 2 oa := &OrderedAggregate{ - PreProcess: true, - Aggregates: []*AggregateParams{{ - Opcode: AggregateSumDistinct, - Col: 1, - WCol: 2, - WAssigned: true, - Alias: "sum(distinct c2)", - }}, + Aggregates: []*AggregateParams{aggr}, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, TruncateColumnCount: 2, @@ -893,7 +792,7 @@ func TestSumDistinctOnVarcharWithNulls(t *testing.T) { want := sqltypes.MakeTestResult( sqltypes.MakeTestFields( "c1|sum(distinct c2)", - "int64|decimal", + "int64|float64", ), `null|0`, `10|0`, @@ -903,7 +802,7 @@ func TestSumDistinctOnVarcharWithNulls(t *testing.T) { qr, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) require.NoError(t, err) - assert.Equal(t, want, qr) + utils.MustMatch(t, want, qr) fp.rewind() results := &sqltypes.Result{} @@ -915,7 +814,7 @@ func TestSumDistinctOnVarcharWithNulls(t *testing.T) { return nil }) require.NoError(t, err) - assert.Equal(t, want, results) + utils.MustMatch(t, want, results) } func TestMultiDistinct(t *testing.T) { @@ -946,16 +845,10 @@ func TestMultiDistinct(t *testing.T) { } oa := &OrderedAggregate{ - PreProcess: true, - Aggregates: []*AggregateParams{{ - Opcode: AggregateCountDistinct, - Col: 1, - Alias: "count(distinct c2)", - }, { - Opcode: AggregateSumDistinct, - Col: 2, - Alias: "sum(distinct c3)", - }}, + Aggregates: []*AggregateParams{ + NewAggregateParam(AggregateCountDistinct, 1, "count(distinct c2)"), + NewAggregateParam(AggregateSumDistinct, 2, "sum(distinct c3)"), + }, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, } @@ -974,7 +867,7 @@ func TestMultiDistinct(t *testing.T) { qr, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) require.NoError(t, err) - assert.Equal(t, want, qr) + utils.MustMatch(t, want, qr) fp.rewind() results := &sqltypes.Result{} @@ -986,11 +879,10 @@ func TestMultiDistinct(t *testing.T) { return nil }) require.NoError(t, err) - assert.Equal(t, want, results) + utils.MustMatch(t, want, results) } func TestOrderedAggregateCollate(t *testing.T) { - assert := assert.New(t) fields := sqltypes.MakeTestFields( "col|count(*)", "varchar|decimal", @@ -1012,16 +904,13 @@ func TestOrderedAggregateCollate(t *testing.T) { collationID, _ := collationEnv.LookupID("utf8mb4_0900_ai_ci") oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{{ - Opcode: AggregateSum, - Col: 1, - }}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "")}, GroupByKeys: []*GroupByParams{{KeyCol: 0, CollationID: collationID}}, Input: fp, } result, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) - assert.NoError(err) + assert.NoError(t, err) wantResult := sqltypes.MakeTestResult( fields, @@ -1030,11 +919,10 @@ func TestOrderedAggregateCollate(t *testing.T) { "c|7", "ß|13", ) - assert.Equal(wantResult, result) + utils.MustMatch(t, wantResult, result) } func TestOrderedAggregateCollateAS(t *testing.T) { - assert := assert.New(t) fields := sqltypes.MakeTestFields( "col|count(*)", "varchar|decimal", @@ -1054,16 +942,13 @@ func TestOrderedAggregateCollateAS(t *testing.T) { collationID, _ := collationEnv.LookupID("utf8mb4_0900_as_ci") oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{{ - Opcode: AggregateSum, - Col: 1, - }}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "")}, GroupByKeys: []*GroupByParams{{KeyCol: 0, CollationID: collationID}}, Input: fp, } result, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) - assert.NoError(err) + assert.NoError(t, err) wantResult := sqltypes.MakeTestResult( fields, @@ -1073,11 +958,10 @@ func TestOrderedAggregateCollateAS(t *testing.T) { "c|7", "Ç|4", ) - assert.Equal(wantResult, result) + utils.MustMatch(t, wantResult, result) } func TestOrderedAggregateCollateKS(t *testing.T) { - assert := assert.New(t) fields := sqltypes.MakeTestFields( "col|count(*)", "varchar|decimal", @@ -1098,16 +982,13 @@ func TestOrderedAggregateCollateKS(t *testing.T) { collationID, _ := collationEnv.LookupID("utf8mb4_ja_0900_as_cs_ks") oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{{ - Opcode: AggregateSum, - Col: 1, - }}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "")}, GroupByKeys: []*GroupByParams{{KeyCol: 0, CollationID: collationID}}, Input: fp, } result, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) - assert.NoError(err) + assert.NoError(t, err) wantResult := sqltypes.MakeTestResult( fields, @@ -1119,5 +1000,172 @@ func TestOrderedAggregateCollateKS(t *testing.T) { "\xE3\x83\x8F\xE3\x81\xAF|2", "\xE3\x83\x8F\xE3\x83\x8F|1", ) - assert.Equal(wantResult, result) + utils.MustMatch(t, wantResult, result) +} + +// TestGroupConcatWithAggrOnEngine tests group_concat with full aggregation on engine. +func TestGroupConcatWithAggrOnEngine(t *testing.T) { + fields := sqltypes.MakeTestFields( + "c1|c2", + "int64|varchar", + ) + + varbinaryFields := sqltypes.MakeTestFields( + "c1|c2", + "int64|varbinary", + ) + + textOutFields := sqltypes.MakeTestFields( + "c1|group_concat(c2)", + "int64|text", + ) + + var tcases = []struct { + name string + inputResult *sqltypes.Result + expResult *sqltypes.Result + }{{ + name: "multiple grouping keys", + inputResult: sqltypes.MakeTestResult(fields, + "10|a", "10|a", "10|b", + "20|b", + "30|null", + "40|null", "40|c", + "50|d", "50|null", "50|a", "50|", "50|"), + expResult: sqltypes.MakeTestResult(textOutFields, + `10|a,a,b`, + `20|b`, + `30|null`, + `40|c`, + `50|d,a,,`), + }, { + name: "empty result", + inputResult: sqltypes.MakeTestResult(fields), + expResult: sqltypes.MakeTestResult(textOutFields), + }, { + name: "null value for concat", + inputResult: sqltypes.MakeTestResult(fields, + "42|null", "42|null", "42|null"), + expResult: sqltypes.MakeTestResult(textOutFields, + `42|null`), + }, { + name: "concat on varbinary column", + inputResult: sqltypes.MakeTestResult(varbinaryFields, + "42|a", "42|b", "42|c"), + expResult: sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "c1|group_concat(c2)", + "int64|blob", + ), + `42|a,b,c`), + }} + + for _, tcase := range tcases { + t.Run(tcase.name, func(t *testing.T) { + fp := &fakePrimitive{results: []*sqltypes.Result{tcase.inputResult}} + oa := &OrderedAggregate{ + Aggregates: []*AggregateParams{NewAggregateParam(AggregateGroupConcat, 1, "group_concat(c2)")}, + GroupByKeys: []*GroupByParams{{KeyCol: 0}}, + Input: fp, + } + qr, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) + require.NoError(t, err) + if len(qr.Rows) == 0 { + qr.Rows = nil // just to make the expectation. + // empty slice or nil both are valid and will not cause any issue. + } + utils.MustMatch(t, tcase.expResult, qr) + + fp.rewind() + results := &sqltypes.Result{} + err = oa.TryStreamExecute(context.Background(), &noopVCursor{}, nil, true, func(qr *sqltypes.Result) error { + if qr.Fields != nil { + results.Fields = qr.Fields + } + results.Rows = append(results.Rows, qr.Rows...) + return nil + }) + require.NoError(t, err) + utils.MustMatch(t, tcase.expResult, results) + }) + } +} + +// TestGroupConcat tests group_concat with partial aggregation on engine. +func TestGroupConcat(t *testing.T) { + fields := sqltypes.MakeTestFields( + "c1|group_concat(c2)", + "int64|text", + ) + + varbinaryFields := sqltypes.MakeTestFields( + "c1|group_concat(c2)", + "int64|blob", + ) + + var tcases = []struct { + name string + inputResult *sqltypes.Result + expResult *sqltypes.Result + }{{ + name: "multiple grouping keys", + inputResult: sqltypes.MakeTestResult(fields, + "10|a", "10|a", "10|b", + "20|b", + "30|null", + "40|null", "40|c", + "50|d", "50|null", "50|a", "50|", "50|"), + expResult: sqltypes.MakeTestResult(fields, + `10|a,a,b`, + `20|b`, + `30|null`, + `40|c`, + `50|d,a,,`), + }, { + name: "empty result", + inputResult: sqltypes.MakeTestResult(fields), + expResult: sqltypes.MakeTestResult(fields), + }, { + name: "null value for concat", + inputResult: sqltypes.MakeTestResult(fields, + "42|null", "42|null", "42|null"), + expResult: sqltypes.MakeTestResult(fields, + `42|null`), + }, { + name: "concat on varbinary column", + inputResult: sqltypes.MakeTestResult(varbinaryFields, + "42|a", "42|b", "42|c"), + expResult: sqltypes.MakeTestResult(varbinaryFields, + `42|a,b,c`), + }} + + for _, tcase := range tcases { + t.Run(tcase.name, func(t *testing.T) { + fp := &fakePrimitive{results: []*sqltypes.Result{tcase.inputResult}} + oa := &OrderedAggregate{ + Aggregates: []*AggregateParams{NewAggregateParam(AggregateGroupConcat, 1, "")}, + GroupByKeys: []*GroupByParams{{KeyCol: 0}}, + Input: fp, + } + qr, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) + require.NoError(t, err) + if len(qr.Rows) == 0 { + qr.Rows = nil // just to make the expectation. + // empty slice or nil both are valid and will not cause any issue. + } + assert.Equal(t, tcase.expResult, qr) + + fp.rewind() + results := &sqltypes.Result{} + err = oa.TryStreamExecute(context.Background(), &noopVCursor{}, nil, true, func(qr *sqltypes.Result) error { + if qr.Fields != nil { + results.Fields = qr.Fields + } + results.Rows = append(results.Rows, qr.Rows...) + return nil + }) + require.NoError(t, err) + assert.Equal(t, tcase.expResult, results) + }) + } } diff --git a/go/vt/vtgate/engine/plan_description.go b/go/vt/vtgate/engine/plan_description.go index ef8b4360dfb..72220fda460 100644 --- a/go/vt/vtgate/engine/plan_description.go +++ b/go/vt/vtgate/engine/plan_description.go @@ -28,6 +28,8 @@ import ( "vitess.io/vitess/go/vt/vtgate/vindexes" ) +const inputName = "InputName" + // PrimitiveDescription is used to create a serializable representation of the Primitive tree // Using this structure, all primitives can share json marshalling code, which gives us an uniform output type PrimitiveDescription struct { @@ -41,7 +43,9 @@ type PrimitiveDescription struct { // this is only used in conjunction with TargetDestination TargetTabletType topodatapb.TabletType Other map[string]any - Inputs []PrimitiveDescription + + InputName string + Inputs []PrimitiveDescription } // MarshalJSON serializes the PlanDescription into a JSON representation. @@ -51,16 +55,24 @@ func (pd PrimitiveDescription) MarshalJSON() ([]byte, error) { buf := &bytes.Buffer{} buf.WriteString("{") - if err := marshalAdd("", buf, "OperatorType", pd.OperatorType); err != nil { + prepend := "" + if pd.InputName != "" { + if err := marshalAdd(prepend, buf, "InputName", pd.InputName); err != nil { + return nil, err + } + prepend = "," + } + if err := marshalAdd(prepend, buf, "OperatorType", pd.OperatorType); err != nil { return nil, err } + prepend = "," if pd.Variant != "" { - if err := marshalAdd(",", buf, "Variant", pd.Variant); err != nil { + if err := marshalAdd(prepend, buf, "Variant", pd.Variant); err != nil { return nil, err } } if pd.Keyspace != nil { - if err := marshalAdd(",", buf, "Keyspace", pd.Keyspace); err != nil { + if err := marshalAdd(prepend, buf, "Keyspace", pd.Keyspace); err != nil { return nil, err } } @@ -68,12 +80,12 @@ func (pd PrimitiveDescription) MarshalJSON() ([]byte, error) { s := pd.TargetDestination.String() dest := s[11:] // TODO: All these start with Destination. We should fix that instead if trimming it out here - if err := marshalAdd(",", buf, "TargetDestination", dest); err != nil { + if err := marshalAdd(prepend, buf, "TargetDestination", dest); err != nil { return nil, err } } if pd.TargetTabletType != topodatapb.TabletType_UNKNOWN { - if err := marshalAdd(",", buf, "TargetTabletType", pd.TargetTabletType.String()); err != nil { + if err := marshalAdd(prepend, buf, "TargetTabletType", pd.TargetTabletType.String()); err != nil { return nil, err } } @@ -83,7 +95,7 @@ func (pd PrimitiveDescription) MarshalJSON() ([]byte, error) { } if len(pd.Inputs) > 0 { - if err := marshalAdd(",", buf, "Inputs", pd.Inputs); err != nil { + if err := marshalAdd(prepend, buf, "Inputs", pd.Inputs); err != nil { return nil, err } } @@ -172,11 +184,25 @@ func marshalAdd(prepend string, buf *bytes.Buffer, name string, obj any) error { func PrimitiveToPlanDescription(in Primitive) PrimitiveDescription { this := in.description() - for _, input := range in.Inputs() { - this.Inputs = append(this.Inputs, PrimitiveToPlanDescription(input)) + inputs, infos := in.Inputs() + for idx, input := range inputs { + pd := PrimitiveToPlanDescription(input) + if infos != nil { + for k, v := range infos[idx] { + if k == inputName { + pd.InputName = v.(string) + continue + } + if pd.Other == nil { + pd.Other = map[string]any{} + } + pd.Other[k] = v + } + } + this.Inputs = append(this.Inputs, pd) } - if len(in.Inputs()) == 0 { + if len(inputs) == 0 { this.Inputs = []PrimitiveDescription{} } diff --git a/go/vt/vtgate/engine/plan_description_test.go b/go/vt/vtgate/engine/plan_description_test.go index 0d985b9b606..b986cea59cf 100644 --- a/go/vt/vtgate/engine/plan_description_test.go +++ b/go/vt/vtgate/engine/plan_description_test.go @@ -50,7 +50,7 @@ func TestCreateRoutePlanDescription(t *testing.T) { } func createRoute() *Route { - hash, _ := vindexes.NewHash("vindex name", nil) + hash, _ := vindexes.CreateVindex("hash", "vindex name", nil) return &Route{ RoutingParameters: &RoutingParameters{ Opcode: Scatter, diff --git a/go/vt/vtgate/engine/primitive.go b/go/vt/vtgate/engine/primitive.go index ddded092887..44654f2850d 100644 --- a/go/vt/vtgate/engine/primitive.go +++ b/go/vt/vtgate/engine/primitive.go @@ -29,6 +29,7 @@ import ( binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" ) @@ -54,7 +55,6 @@ type ( // if the max memory rows override directive is set to true ExceedsMaxMemoryRows(numRows int) bool - // V3 functions. Execute(ctx context.Context, method string, query string, bindVars map[string]*querypb.BindVariable, rollbackOnError bool, co vtgatepb.CommitOrder) (*sqltypes.Result, error) AutocommitApproval() bool @@ -111,6 +111,8 @@ type ( ShowExec(ctx context.Context, command sqlparser.ShowCommandType, filter *sqlparser.ShowFilter) (*sqltypes.Result, error) // SetExec takes in k,v pair and use executor to set them in topo metadata. SetExec(ctx context.Context, name string, value string) error + // ThrottleApp sets a ThrottlerappRule in topo + ThrottleApp(ctx context.Context, throttleAppRule *topodatapb.ThrottledAppRule) error // CanUseSetVar returns true if system_settings can use SET_VAR hint. CanUseSetVar() bool @@ -154,6 +156,8 @@ type ( SetDDLStrategy(string) GetDDLStrategy() string + SetMigrationContext(string) + GetMigrationContext() string GetSessionUUID() string @@ -199,6 +203,8 @@ type ( // InTransaction returns true if the session has already opened transaction or // will start a transaction on the query execution. InTransaction() bool + + Commit(ctx context.Context) error } // Match is used to check if a Primitive matches @@ -218,8 +224,9 @@ type ( TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error - // Inputs is a slice containing the inputs to this Primitive - Inputs() []Primitive + // Inputs is a slice containing the inputs to this Primitive. + // The returned map has additional information about the inputs, that is used in the description. + Inputs() ([]Primitive, []map[string]any) // description is the description, sans the inputs, of this Primitive. // to get the plan description with all children, use PrimitiveToPlanDescription() @@ -234,12 +241,6 @@ type ( // txNeeded is a default implementation for Primitives that need transaction handling txNeeded struct{} - - // Gen4Comparer interfaces all Primitive used to compare Gen4 with other planners (V3, MySQL, ...). - Gen4Comparer interface { - Primitive - GetGen4Primitive() Primitive - } ) // Find will return the first Primitive that matches the evaluate function. If no match is found, nil will be returned @@ -247,7 +248,8 @@ func Find(isMatch Match, start Primitive) Primitive { if isMatch(start) { return start } - for _, input := range start.Inputs() { + inputs, _ := start.Inputs() + for _, input := range inputs { result := Find(isMatch, input) if result != nil { return result @@ -262,8 +264,8 @@ func Exists(m Match, p Primitive) bool { } // Inputs implements no inputs -func (noInputs) Inputs() []Primitive { - return nil +func (noInputs) Inputs() ([]Primitive, []map[string]any) { + return nil, nil } func (noTxNeeded) NeedsTransaction() bool { diff --git a/go/vt/vtgate/engine/projection.go b/go/vt/vtgate/engine/projection.go index 5438b8144a6..ad1be62ea53 100644 --- a/go/vt/vtgate/engine/projection.go +++ b/go/vt/vtgate/engine/projection.go @@ -165,8 +165,8 @@ func (p *Projection) evalFields(env *evalengine.ExpressionEnv, infields []*query } // Inputs implements the Primitive interface -func (p *Projection) Inputs() []Primitive { - return []Primitive{p.Input} +func (p *Projection) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{p.Input}, nil } // description implements the Primitive interface diff --git a/go/vt/vtgate/engine/rename_fields.go b/go/vt/vtgate/engine/rename_fields.go index 3eb1917abdd..e1dc7cbbb43 100644 --- a/go/vt/vtgate/engine/rename_fields.go +++ b/go/vt/vtgate/engine/rename_fields.go @@ -110,8 +110,8 @@ func (r *RenameFields) GetFields(ctx context.Context, vcursor VCursor, bindVars } // Inputs implements the primitive interface -func (r *RenameFields) Inputs() []Primitive { - return []Primitive{r.Input} +func (r *RenameFields) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{r.Input}, nil } // description implements the primitive interface diff --git a/go/vt/vtgate/engine/replace_variables.go b/go/vt/vtgate/engine/replace_variables.go index 5667e9bae10..66375266427 100644 --- a/go/vt/vtgate/engine/replace_variables.go +++ b/go/vt/vtgate/engine/replace_variables.go @@ -77,8 +77,8 @@ func (r *ReplaceVariables) GetFields(ctx context.Context, vcursor VCursor, bindV } // Inputs implements the Primitive interface -func (r *ReplaceVariables) Inputs() []Primitive { - return []Primitive{r.Input} +func (r *ReplaceVariables) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{r.Input}, nil } // description implements the Primitive interface diff --git a/go/vt/vtgate/engine/route.go b/go/vt/vtgate/engine/route.go index bcbd51609ef..5604fc33ada 100644 --- a/go/vt/vtgate/engine/route.go +++ b/go/vt/vtgate/engine/route.go @@ -19,15 +19,16 @@ package engine import ( "context" "fmt" + "slices" "sort" "strconv" "strings" "time" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/key" @@ -89,16 +90,6 @@ type Route struct { noTxNeeded } -// NewSimpleRoute creates a Route with the bare minimum of parameters. -func NewSimpleRoute(opcode Opcode, keyspace *vindexes.Keyspace) *Route { - return &Route{ - RoutingParameters: &RoutingParameters{ - Opcode: opcode, - Keyspace: keyspace, - }, - } -} - // NewRoute creates a Route. func NewRoute(opcode Opcode, keyspace *vindexes.Keyspace, query, fieldQuery string) *Route { return &Route{ @@ -120,8 +111,6 @@ type OrderByParams struct { WeightStringCol int Desc bool StarColFixedIndex int - // v3 specific boolean. Used to also add weight strings originating from GroupBys to the Group by clause - FromGroupBy bool // Type for knowing if the collation is relevant Type querypb.Type // Collation ID for comparison using collation @@ -144,8 +133,7 @@ func (obp OrderByParams) String() string { } if sqltypes.IsText(obp.Type) && obp.CollationID != collations.Unknown { - collation := obp.CollationID.Get() - val += " COLLATE " + collation.Name() + val += " COLLATE " + collations.Local().LookupName(obp.CollationID) } return val } @@ -262,7 +250,7 @@ func (route *Route) executeShards( partialSuccessScatterQueries.Add(1) for _, err := range errs { - serr := mysql.NewSQLErrorFromError(err).(*mysql.SQLError) + serr := sqlerror.NewSQLErrorFromError(err).(*sqlerror.SQLError) vcursor.Session().RecordWarning(&querypb.QueryWarning{Code: uint32(serr.Num), Message: err.Error()}) } } @@ -351,7 +339,7 @@ func (route *Route) streamExecuteShards( } partialSuccessScatterQueries.Add(1) for _, err := range errs { - sErr := mysql.NewSQLErrorFromError(err).(*mysql.SQLError) + sErr := sqlerror.NewSQLErrorFromError(err).(*sqlerror.SQLError) vcursor.Session().RecordWarning(&querypb.QueryWarning{Code: uint32(sErr.Num), Message: err.Error()}) } } @@ -431,10 +419,10 @@ func (route *Route) sort(in *sqltypes.Result) (*sqltypes.Result, error) { comparers := extractSlices(route.OrderBy) - sort.Slice(out.Rows, func(i, j int) bool { + slices.SortFunc(out.Rows, func(a, b sqltypes.Row) int { var cmp int if err != nil { - return true + return -1 } // If there are any errors below, the function sets // the external err and returns true. Once err is set, @@ -442,16 +430,15 @@ func (route *Route) sort(in *sqltypes.Result) (*sqltypes.Result, error) { // Slice think that all elements are in the correct // order and return more quickly. for _, c := range comparers { - cmp, err = c.compare(out.Rows[i], out.Rows[j]) + cmp, err = c.compare(a, b) if err != nil { - return true + return -1 } - if cmp == 0 { - continue + if cmp != 0 { + return cmp } - return cmp < 0 } - return true + return 0 }) return out.Truncate(route.TruncateColumnCount), err diff --git a/go/vt/vtgate/engine/route_test.go b/go/vt/vtgate/engine/route_test.go index 3f9a415a069..13fb0be656b 100644 --- a/go/vt/vtgate/engine/route_test.go +++ b/go/vt/vtgate/engine/route_test.go @@ -24,21 +24,17 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/sqlparser" - + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/evalengine" - - "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/sqltypes" - querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/vtgate/vindexes" ) @@ -236,7 +232,7 @@ func TestSelectScatter(t *testing.T) { } func TestSelectEqualUnique(t *testing.T) { - vindex, _ := vindexes.NewHash("", nil) + vindex, _ := vindexes.CreateVindex("hash", "", nil) sel := NewRoute( EqualUnique, &vindexes.Keyspace{ @@ -274,7 +270,7 @@ func TestSelectEqualUnique(t *testing.T) { } func TestSelectNone(t *testing.T) { - vindex, _ := vindexes.NewHash("", nil) + vindex, _ := vindexes.CreateVindex("hash", "", nil) sel := NewRoute( None, &vindexes.Keyspace{ @@ -325,7 +321,7 @@ func TestSelectNone(t *testing.T) { } func TestSelectEqualUniqueScatter(t *testing.T) { - vindex, _ := vindexes.NewLookupUnique("", map[string]string{ + vindex, _ := vindexes.CreateVindex("lookup_unique", "", map[string]string{ "table": "lkp", "from": "from", "to": "toc", @@ -368,7 +364,7 @@ func TestSelectEqualUniqueScatter(t *testing.T) { } func TestSelectEqual(t *testing.T) { - vindex, _ := vindexes.NewLookup("", map[string]string{ + vindex, _ := vindexes.CreateVindex("lookup", "", map[string]string{ "table": "lkp", "from": "from", "to": "toc", @@ -421,7 +417,7 @@ func TestSelectEqual(t *testing.T) { } func TestSelectEqualNoRoute(t *testing.T) { - vindex, _ := vindexes.NewLookupUnique("", map[string]string{ + vindex, _ := vindexes.CreateVindex("lookup_unique", "", map[string]string{ "table": "lkp", "from": "from", "to": "toc", @@ -485,7 +481,7 @@ func TestSelectEqualNoRoute(t *testing.T) { } func TestINUnique(t *testing.T) { - vindex, _ := vindexes.NewHash("", nil) + vindex, _ := vindexes.CreateVindex("hash", "", nil) sel := NewRoute( IN, &vindexes.Keyspace{ @@ -530,7 +526,7 @@ func TestINUnique(t *testing.T) { } func TestINNonUnique(t *testing.T) { - vindex, _ := vindexes.NewLookup("", map[string]string{ + vindex, _ := vindexes.CreateVindex("lookup", "", map[string]string{ "table": "lkp", "from": "from", "to": "toc", @@ -597,7 +593,7 @@ func TestINNonUnique(t *testing.T) { } func TestMultiEqual(t *testing.T) { - vindex, _ := vindexes.NewHash("", nil) + vindex, _ := vindexes.CreateVindex("hash", "", nil) sel := NewRoute( MultiEqual, &vindexes.Keyspace{ @@ -640,7 +636,7 @@ func TestMultiEqual(t *testing.T) { } func TestSelectLike(t *testing.T) { - subshard, _ := vindexes.NewCFC("cfc", map[string]string{"hash": "md5", "offsets": "[1,2]"}) + subshard, _ := vindexes.CreateVindex("cfc", "cfc", map[string]string{"hash": "md5", "offsets": "[1,2]"}) vindex := subshard.(*vindexes.CFC).PrefixVindex() vc := &loggingVCursor{ // we have shards '-0c80', '0c80-0d', '0d-40', '40-80', '80-' @@ -816,7 +812,7 @@ func TestSelectReference(t *testing.T) { } func TestRouteGetFields(t *testing.T) { - vindex, _ := vindexes.NewLookupUnique("", map[string]string{ + vindex, _ := vindexes.CreateVindex("lookup_unique", "", map[string]string{ "table": "lkp", "from": "from", "to": "toc", @@ -1441,7 +1437,7 @@ func TestExecFail(t *testing.T) { expectResult(t, "sel.Execute", result, defaultSelectResult) vc.Rewind() - vc.resultErr = mysql.NewSQLError(mysql.ERQueryInterrupted, "", "query timeout -20") + vc.resultErr = sqlerror.NewSQLError(sqlerror.ERQueryInterrupted, "", "query timeout -20") // test when there is order by column sel.OrderBy = []OrderByParams{{ WeightStringCol: -1, @@ -1449,12 +1445,12 @@ func TestExecFail(t *testing.T) { }} _, err = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, false) require.NoError(t, err, "unexpected ScatterErrorsAsWarnings error %v", err) - vc.ExpectWarnings(t, []*querypb.QueryWarning{{Code: uint32(mysql.ERQueryInterrupted), Message: "query timeout -20 (errno 1317) (sqlstate HY000)"}}) + vc.ExpectWarnings(t, []*querypb.QueryWarning{{Code: uint32(sqlerror.ERQueryInterrupted), Message: "query timeout -20 (errno 1317) (sqlstate HY000)"}}) }) } func TestSelectEqualUniqueMultiColumnVindex(t *testing.T) { - vindex, _ := vindexes.NewRegionExperimental("", map[string]string{"region_bytes": "1"}) + vindex, _ := vindexes.CreateVindex("region_experimental", "", map[string]string{"region_bytes": "1"}) sel := NewRoute( EqualUnique, &vindexes.Keyspace{ @@ -1493,7 +1489,7 @@ func TestSelectEqualUniqueMultiColumnVindex(t *testing.T) { } func TestSelectEqualMultiColumnVindex(t *testing.T) { - vindex, _ := vindexes.NewRegionExperimental("", map[string]string{"region_bytes": "1"}) + vindex, _ := vindexes.CreateVindex("region_experimental", "", map[string]string{"region_bytes": "1"}) vc := &loggingVCursor{ shards: []string{"-20", "20-"}, shardForKsid: []string{"-20", "20-"}, @@ -1530,7 +1526,7 @@ func TestSelectEqualMultiColumnVindex(t *testing.T) { } func TestINMultiColumnVindex(t *testing.T) { - vindex, _ := vindexes.NewRegionExperimental("", map[string]string{"region_bytes": "1"}) + vindex, _ := vindexes.CreateVindex("region_experimental", "", map[string]string{"region_bytes": "1"}) sel := NewRoute( IN, &vindexes.Keyspace{ @@ -1576,7 +1572,7 @@ func TestINMultiColumnVindex(t *testing.T) { } func TestINMixedMultiColumnComparision(t *testing.T) { - vindex, _ := vindexes.NewRegionExperimental("", map[string]string{"region_bytes": "1"}) + vindex, _ := vindexes.CreateVindex("region_experimental", "", map[string]string{"region_bytes": "1"}) sel := NewRoute( IN, &vindexes.Keyspace{ @@ -1619,7 +1615,7 @@ func TestINMixedMultiColumnComparision(t *testing.T) { } func TestMultiEqualMultiCol(t *testing.T) { - vindex, _ := vindexes.NewRegionExperimental("", map[string]string{"region_bytes": "1"}) + vindex, _ := vindexes.CreateVindex("region_experimental", "", map[string]string{"region_bytes": "1"}) sel := NewRoute( MultiEqual, &vindexes.Keyspace{Name: "ks", Sharded: true}, diff --git a/go/vt/vtgate/engine/scalar_aggregation.go b/go/vt/vtgate/engine/scalar_aggregation.go index 51e13aac8be..85e90420ff9 100644 --- a/go/vt/vtgate/engine/scalar_aggregation.go +++ b/go/vt/vtgate/engine/scalar_aggregation.go @@ -22,20 +22,12 @@ import ( "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" - "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/vterrors" - . "vitess.io/vitess/go/vt/vtgate/engine/opcode" ) var _ Primitive = (*ScalarAggregate)(nil) // ScalarAggregate is a primitive used to do aggregations without grouping keys type ScalarAggregate struct { - // PreProcess is true if one of the aggregates needs preprocessing. - PreProcess bool `json:",omitempty"` - - AggrOnEngine bool - // Aggregates specifies the aggregation parameters for each // aggregation function: function opcode and input column number. Aggregates []*AggregateParams @@ -71,7 +63,13 @@ func (sa *ScalarAggregate) GetFields(ctx context.Context, vcursor VCursor, bindV if err != nil { return nil, err } - qr = &sqltypes.Result{Fields: convertFields(qr.Fields, sa.PreProcess, sa.Aggregates, sa.AggrOnEngine)} + + _, fields, err := newAggregation(qr.Fields, sa.Aggregates) + if err != nil { + return nil, err + } + + qr = &sqltypes.Result{Fields: fields} return qr.Truncate(sa.TruncateColumnCount), nil } @@ -86,38 +84,22 @@ func (sa *ScalarAggregate) TryExecute(ctx context.Context, vcursor VCursor, bind if err != nil { return nil, err } - out := &sqltypes.Result{ - Fields: convertFields(result.Fields, sa.PreProcess, sa.Aggregates, sa.AggrOnEngine), + + agg, fields, err := newAggregation(result.Fields, sa.Aggregates) + if err != nil { + return nil, err } - var resultRow []sqltypes.Value - var curDistincts []sqltypes.Value for _, row := range result.Rows { - if resultRow == nil { - resultRow, curDistincts = convertRow(row, sa.PreProcess, sa.Aggregates, sa.AggrOnEngine) - continue - } - resultRow, curDistincts, err = merge(result.Fields, resultRow, row, curDistincts, sa.Aggregates) - if err != nil { + if err := agg.add(row); err != nil { return nil, err } } - if resultRow == nil { - // When doing aggregation without grouping keys, we need to produce a single row containing zero-value for the - // different aggregation functions - resultRow, err = sa.createEmptyRow() - if err != nil { - return nil, err - } - } else { - resultRow, err = convertFinal(resultRow, sa.Aggregates) - if err != nil { - return nil, err - } + out := &sqltypes.Result{ + Fields: fields, + Rows: [][]sqltypes.Value{agg.finish()}, } - - out.Rows = [][]sqltypes.Value{resultRow} return out.Truncate(sa.TruncateColumnCount), nil } @@ -126,11 +108,11 @@ func (sa *ScalarAggregate) TryStreamExecute(ctx context.Context, vcursor VCursor cb := func(qr *sqltypes.Result) error { return callback(qr.Truncate(sa.TruncateColumnCount)) } - var current []sqltypes.Value - var curDistincts []sqltypes.Value - var fields []*querypb.Field - fieldsSent := false + var mu sync.Mutex + var agg aggregationState + var fields []*querypb.Field + fieldsSent := !wantfields err := vcursor.StreamExecutePrimitive(ctx, sa.Input, bindVars, wantfields, func(result *sqltypes.Result) error { // as the underlying primitive call is not sync @@ -138,23 +120,23 @@ func (sa *ScalarAggregate) TryStreamExecute(ctx context.Context, vcursor VCursor // for correct aggregation. mu.Lock() defer mu.Unlock() - if len(result.Fields) != 0 && !fieldsSent { - fields = convertFields(result.Fields, sa.PreProcess, sa.Aggregates, sa.AggrOnEngine) + + if agg == nil && len(result.Fields) != 0 { + var err error + agg, fields, err = newAggregation(result.Fields, sa.Aggregates) + if err != nil { + return err + } + } + if !fieldsSent { if err := cb(&sqltypes.Result{Fields: fields}); err != nil { return err } fieldsSent = true } - // this code is very similar to the TryExecute method for _, row := range result.Rows { - if current == nil { - current, curDistincts = convertRow(row, sa.PreProcess, sa.Aggregates, sa.AggrOnEngine) - continue - } - var err error - current, curDistincts, err = merge(fields, current, row, curDistincts, sa.Aggregates) - if err != nil { + if err := agg.add(row); err != nil { return err } } @@ -164,62 +146,12 @@ func (sa *ScalarAggregate) TryStreamExecute(ctx context.Context, vcursor VCursor return err } - if current == nil { - // When doing aggregation without grouping keys, we need to produce a single row containing zero-value for the - // different aggregation functions - current, err = sa.createEmptyRow() - if err != nil { - return err - } - } else { - current, err = convertFinal(current, sa.Aggregates) - if err != nil { - return err - } - } - - return cb(&sqltypes.Result{Rows: [][]sqltypes.Value{current}}) -} - -// creates the empty row for the case when we are missing grouping keys and have empty input table -func (sa *ScalarAggregate) createEmptyRow() ([]sqltypes.Value, error) { - out := make([]sqltypes.Value, len(sa.Aggregates)) - for i, aggr := range sa.Aggregates { - op := aggr.Opcode - if aggr.OrigOpcode != AggregateUnassigned { - op = aggr.OrigOpcode - } - value, err := createEmptyValueFor(op) - if err != nil { - return nil, err - } - out[i] = value - } - return out, nil -} - -func createEmptyValueFor(opcode AggregateOpcode) (sqltypes.Value, error) { - switch opcode { - case - AggregateCountDistinct, - AggregateCount, - AggregateCountStar: - return countZero, nil - case - AggregateSumDistinct, - AggregateSum, - AggregateMin, - AggregateMax, - AggregateRandom: - return sqltypes.NULL, nil - - } - return sqltypes.NULL, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "unknown aggregation %v", opcode) + return cb(&sqltypes.Result{Rows: [][]sqltypes.Value{agg.finish()}}) } // Inputs implements the Primitive interface -func (sa *ScalarAggregate) Inputs() []Primitive { - return []Primitive{sa.Input} +func (sa *ScalarAggregate) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{sa.Input}, nil } // description implements the Primitive interface diff --git a/go/vt/vtgate/engine/scalar_aggregation_test.go b/go/vt/vtgate/engine/scalar_aggregation_test.go index ec2fa06c970..3329fc72d39 100644 --- a/go/vt/vtgate/engine/scalar_aggregation_test.go +++ b/go/vt/vtgate/engine/scalar_aggregation_test.go @@ -25,6 +25,7 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/utils" . "vitess.io/vitess/go/vt/vtgate/engine/opcode" ) @@ -49,7 +50,7 @@ func TestEmptyRows(outer *testing.T) { }, { opcode: AggregateSum, expectedVal: "null", - expectedTyp: "int64", + expectedTyp: "decimal", }, { opcode: AggregateSum, expectedVal: "0", @@ -67,7 +68,6 @@ func TestEmptyRows(outer *testing.T) { for _, test := range testCases { outer.Run(test.opcode.String(), func(t *testing.T) { - assert := assert.New(t) fp := &fakePrimitive{ results: []*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields( @@ -79,7 +79,6 @@ func TestEmptyRows(outer *testing.T) { } oa := &ScalarAggregate{ - PreProcess: true, Aggregates: []*AggregateParams{{ Opcode: test.opcode, Col: 0, @@ -90,7 +89,7 @@ func TestEmptyRows(outer *testing.T) { } result, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) - assert.NoError(err) + assert.NoError(t, err) wantResult := sqltypes.MakeTestResult( sqltypes.MakeTestFields( @@ -99,13 +98,12 @@ func TestEmptyRows(outer *testing.T) { ), test.expectedVal, ) - assert.Equal(wantResult, result) + utils.MustMatch(t, wantResult, result) }) } } func TestScalarAggregateStreamExecute(t *testing.T) { - assert := assert.New(t) fields := sqltypes.MakeTestFields( "col|weight_string(col)", "uint64|varbinary", @@ -127,7 +125,6 @@ func TestScalarAggregateStreamExecute(t *testing.T) { }}, Input: fp, TruncateColumnCount: 1, - PreProcess: true, } var results []*sqltypes.Result @@ -135,17 +132,16 @@ func TestScalarAggregateStreamExecute(t *testing.T) { results = append(results, qr) return nil }) - assert.NoError(err) + assert.NoError(t, err) // one for the fields, and one for the actual aggregation result require.EqualValues(t, 2, len(results), "number of results") got := fmt.Sprintf("%v", results[1].Rows) - assert.Equal("[[UINT64(4)]]", got) + assert.Equal(t, "[[DECIMAL(4)]]", got) } // TestScalarAggregateExecuteTruncate checks if truncate works func TestScalarAggregateExecuteTruncate(t *testing.T) { - assert := assert.New(t) fields := sqltypes.MakeTestFields( "col|weight_string(col)", "uint64|varbinary", @@ -166,10 +162,255 @@ func TestScalarAggregateExecuteTruncate(t *testing.T) { }}, Input: fp, TruncateColumnCount: 1, - PreProcess: true, } qr, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, true) - assert.NoError(err) - assert.Equal("[[UINT64(4)]]", fmt.Sprintf("%v", qr.Rows)) + assert.NoError(t, err) + assert.Equal(t, "[[DECIMAL(4)]]", fmt.Sprintf("%v", qr.Rows)) +} + +// TestScalarGroupConcatWithAggrOnEngine tests group_concat with full aggregation on engine. +func TestScalarGroupConcatWithAggrOnEngine(t *testing.T) { + fields := sqltypes.MakeTestFields( + "c2", + "varchar", + ) + + varbinaryFields := sqltypes.MakeTestFields( + "c2", + "varbinary", + ) + + textOutFields := sqltypes.MakeTestFields( + "group_concat(c2)", + "text", + ) + + var tcases = []struct { + name string + inputResult *sqltypes.Result + expResult *sqltypes.Result + }{{ + name: "ending with null", + inputResult: sqltypes.MakeTestResult(fields, + "a", "a", "b", "null", "null"), + expResult: sqltypes.MakeTestResult(textOutFields, + `a,a,b`), + }, { + name: "empty result", + inputResult: sqltypes.MakeTestResult(fields), + expResult: sqltypes.MakeTestResult(textOutFields, + `null`), + }, { + name: "only null value for concat", + inputResult: sqltypes.MakeTestResult(fields, + "null", "null", "null"), + expResult: sqltypes.MakeTestResult(textOutFields, + `null`), + }, { + name: "empty string value for concat", + inputResult: sqltypes.MakeTestResult(fields, + "", "", ""), + expResult: sqltypes.MakeTestResult(textOutFields, + `,,`), + }, { + name: "varbinary column", + inputResult: sqltypes.MakeTestResult(varbinaryFields, + "foo", "null", "bar"), + expResult: sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "group_concat(c2)", + "blob", + ), + `foo,bar`), + }} + + for _, tcase := range tcases { + t.Run(tcase.name, func(t *testing.T) { + fp := &fakePrimitive{results: []*sqltypes.Result{tcase.inputResult}} + oa := &ScalarAggregate{ + Aggregates: []*AggregateParams{{ + Opcode: AggregateGroupConcat, + Col: 0, + Alias: "group_concat(c2)", + }}, + Input: fp, + } + qr, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) + require.NoError(t, err) + utils.MustMatch(t, tcase.expResult, qr) + + fp.rewind() + results := &sqltypes.Result{} + err = oa.TryStreamExecute(context.Background(), &noopVCursor{}, nil, true, func(qr *sqltypes.Result) error { + if qr.Fields != nil { + results.Fields = qr.Fields + } + results.Rows = append(results.Rows, qr.Rows...) + return nil + }) + require.NoError(t, err) + utils.MustMatch(t, tcase.expResult, results) + }) + } +} + +// TestScalarDistinctAggr tests distinct aggregation on engine. +func TestScalarDistinctAggrOnEngine(t *testing.T) { + fields := sqltypes.MakeTestFields( + "value|value", + "int64|int64", + ) + + fp := &fakePrimitive{results: []*sqltypes.Result{sqltypes.MakeTestResult( + fields, + "100|100", + "200|200", + "200|200", + "400|400", + "400|400", + "600|600", + )}} + + oa := &ScalarAggregate{ + Aggregates: []*AggregateParams{ + NewAggregateParam(AggregateCountDistinct, 0, "count(distinct value)"), + NewAggregateParam(AggregateSumDistinct, 1, "sum(distinct value)"), + }, + Input: fp, + } + qr, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) + require.NoError(t, err) + require.Equal(t, `[[INT64(4) DECIMAL(1300)]]`, fmt.Sprintf("%v", qr.Rows)) + + fp.rewind() + results := &sqltypes.Result{} + err = oa.TryStreamExecute(context.Background(), &noopVCursor{}, nil, true, func(qr *sqltypes.Result) error { + if qr.Fields != nil { + results.Fields = qr.Fields + } + results.Rows = append(results.Rows, qr.Rows...) + return nil + }) + require.NoError(t, err) + require.Equal(t, `[[INT64(4) DECIMAL(1300)]]`, fmt.Sprintf("%v", results.Rows)) +} + +func TestScalarDistinctPushedDown(t *testing.T) { + fields := sqltypes.MakeTestFields( + "count(distinct value)|sum(distinct value)", + "int64|decimal", + ) + + fp := &fakePrimitive{results: []*sqltypes.Result{sqltypes.MakeTestResult( + fields, + "2|200", + "6|400", + "3|700", + "1|10", + "7|30", + "8|90", + )}} + + countAggr := NewAggregateParam(AggregateSum, 0, "count(distinct value)") + countAggr.OrigOpcode = AggregateCountDistinct + sumAggr := NewAggregateParam(AggregateSum, 1, "sum(distinct value)") + sumAggr.OrigOpcode = AggregateSumDistinct + oa := &ScalarAggregate{ + Aggregates: []*AggregateParams{ + countAggr, + sumAggr, + }, + Input: fp, + } + qr, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) + require.NoError(t, err) + require.Equal(t, `[[INT64(27) DECIMAL(1430)]]`, fmt.Sprintf("%v", qr.Rows)) + + fp.rewind() + results := &sqltypes.Result{} + err = oa.TryStreamExecute(context.Background(), &noopVCursor{}, nil, true, func(qr *sqltypes.Result) error { + if qr.Fields != nil { + results.Fields = qr.Fields + } + results.Rows = append(results.Rows, qr.Rows...) + return nil + }) + require.NoError(t, err) + require.Equal(t, `[[INT64(27) DECIMAL(1430)]]`, fmt.Sprintf("%v", results.Rows)) +} + +// TestScalarGroupConcat tests group_concat with partial aggregation on engine. +func TestScalarGroupConcat(t *testing.T) { + fields := sqltypes.MakeTestFields( + "group_concat(c2)", + "text", + ) + + varbinaryFields := sqltypes.MakeTestFields( + "group_concat(c2)", + "blob", + ) + + var tcases = []struct { + name string + inputResult *sqltypes.Result + expResult *sqltypes.Result + }{{ + name: "ending with null", + inputResult: sqltypes.MakeTestResult(fields, + "a", "a", "b", "null", "null"), + expResult: sqltypes.MakeTestResult(fields, + `a,a,b`), + }, { + name: "empty result", + inputResult: sqltypes.MakeTestResult(fields), + expResult: sqltypes.MakeTestResult(fields, + `null`), + }, { + name: "only null value for concat", + inputResult: sqltypes.MakeTestResult(fields, + "null", "null", "null"), + expResult: sqltypes.MakeTestResult(fields, + `null`), + }, { + name: "empty string value for concat", + inputResult: sqltypes.MakeTestResult(fields, + "", "", ""), + expResult: sqltypes.MakeTestResult(fields, + `,,`), + }, { + name: "varbinary column", + inputResult: sqltypes.MakeTestResult(varbinaryFields, + "foo", "null", "bar"), + expResult: sqltypes.MakeTestResult(varbinaryFields, + `foo,bar`), + }} + + for _, tcase := range tcases { + t.Run(tcase.name, func(t *testing.T) { + fp := &fakePrimitive{results: []*sqltypes.Result{tcase.inputResult}} + oa := &ScalarAggregate{ + Aggregates: []*AggregateParams{{ + Opcode: AggregateGroupConcat, + Col: 0, + }}, + Input: fp, + } + qr, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) + require.NoError(t, err) + assert.Equal(t, tcase.expResult, qr) + + fp.rewind() + results := &sqltypes.Result{} + err = oa.TryStreamExecute(context.Background(), &noopVCursor{}, nil, true, func(qr *sqltypes.Result) error { + if qr.Fields != nil { + results.Fields = qr.Fields + } + results.Rows = append(results.Rows, qr.Rows...) + return nil + }) + require.NoError(t, err) + assert.Equal(t, tcase.expResult, results) + }) + } } diff --git a/go/vt/vtgate/engine/semi_join.go b/go/vt/vtgate/engine/semi_join.go index 2b08fe0f26e..d291b348da9 100644 --- a/go/vt/vtgate/engine/semi_join.go +++ b/go/vt/vtgate/engine/semi_join.go @@ -102,8 +102,12 @@ func (jn *SemiJoin) GetFields(ctx context.Context, vcursor VCursor, bindVars map } // Inputs returns the input primitives for this SemiJoin -func (jn *SemiJoin) Inputs() []Primitive { - return []Primitive{jn.Left, jn.Right} +func (jn *SemiJoin) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{jn.Left, jn.Right}, []map[string]any{{ + inputName: "Outer", + }, { + inputName: "SubQuery", + }} } // RouteType returns a description of the query routing type used by the primitive @@ -147,6 +151,9 @@ func projectFields(lfields []*querypb.Field, cols []int) []*querypb.Field { if lfields == nil { return nil } + if len(cols) == 0 { + return lfields + } fields := make([]*querypb.Field, len(cols)) for i, index := range cols { fields[i] = lfields[-index-1] @@ -155,6 +162,9 @@ func projectFields(lfields []*querypb.Field, cols []int) []*querypb.Field { } func projectRows(lrow []sqltypes.Value, cols []int) []sqltypes.Value { + if len(cols) == 0 { + return lrow + } row := make([]sqltypes.Value, len(cols)) for i, index := range cols { if index < 0 { diff --git a/go/vt/vtgate/engine/set.go b/go/vt/vtgate/engine/set.go index 09663bc5e31..768581a7504 100644 --- a/go/vt/vtgate/engine/set.go +++ b/go/vt/vtgate/engine/set.go @@ -153,8 +153,8 @@ func (s *Set) GetFields(context.Context, VCursor, map[string]*querypb.BindVariab } // Inputs implements the Primitive interface -func (s *Set) Inputs() []Primitive { - return []Primitive{s.Input} +func (s *Set) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{s.Input}, nil } func (s *Set) description() PrimitiveDescription { @@ -493,6 +493,15 @@ func (svss *SysVarSetAware) Execute(ctx context.Context, vcursor VCursor, env *e return vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongValueForVar, "invalid DDL strategy: %s", str) } vcursor.Session().SetDDLStrategy(str) + case sysvars.MigrationContext.Name: + str, err := svss.evalAsString(env, vcursor) + if err != nil { + return err + } + if err := schema.ValidateMigrationContext(str); err != nil { + return vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongValueForVar, "invalid migration_context: %s", str) + } + vcursor.Session().SetMigrationContext(str) case sysvars.QueryTimeout.Name: queryTimeout, err := svss.evalAsInt64(env, vcursor) if err != nil { diff --git a/go/vt/vtgate/engine/simple_projection.go b/go/vt/vtgate/engine/simple_projection.go index 774fabb4d4a..1a4f4ce92c4 100644 --- a/go/vt/vtgate/engine/simple_projection.go +++ b/go/vt/vtgate/engine/simple_projection.go @@ -79,8 +79,8 @@ func (sc *SimpleProjection) GetFields(ctx context.Context, vcursor VCursor, bind } // Inputs returns the input to this primitive -func (sc *SimpleProjection) Inputs() []Primitive { - return []Primitive{sc.Input} +func (sc *SimpleProjection) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{sc.Input}, nil } // buildResult builds a new result by pulling the necessary columns from diff --git a/go/vt/vtgate/engine/sql_calc_found_rows.go b/go/vt/vtgate/engine/sql_calc_found_rows.go index 9553023069c..2472bfd1d14 100644 --- a/go/vt/vtgate/engine/sql_calc_found_rows.go +++ b/go/vt/vtgate/engine/sql_calc_found_rows.go @@ -23,7 +23,6 @@ import ( querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/evalengine" ) var _ Primitive = (*SQLCalcFoundRows)(nil) @@ -62,7 +61,7 @@ func (s SQLCalcFoundRows) TryExecute(ctx context.Context, vcursor VCursor, bindV if len(countQr.Rows) != 1 || len(countQr.Rows[0]) != 1 { return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "count query is not a scalar") } - fr, err := evalengine.ToUint64(countQr.Rows[0][0]) + fr, err := countQr.Rows[0][0].ToCastUint64() if err != nil { return nil, err } @@ -87,7 +86,7 @@ func (s SQLCalcFoundRows) TryStreamExecute(ctx context.Context, vcursor VCursor, if len(countQr.Rows) != 1 || len(countQr.Rows[0]) != 1 { return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "count query is not a scalar") } - toUint64, err := evalengine.ToUint64(countQr.Rows[0][0]) + toUint64, err := countQr.Rows[0][0].ToCastUint64() if err != nil { return err } @@ -115,8 +114,8 @@ func (s SQLCalcFoundRows) NeedsTransaction() bool { } // Inputs implements the Primitive interface -func (s SQLCalcFoundRows) Inputs() []Primitive { - return []Primitive{s.LimitPrimitive, s.CountPrimitive} +func (s SQLCalcFoundRows) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{s.LimitPrimitive, s.CountPrimitive}, nil } func (s SQLCalcFoundRows) description() PrimitiveDescription { diff --git a/go/vt/vtgate/engine/throttle_app.go b/go/vt/vtgate/engine/throttle_app.go new file mode 100644 index 00000000000..db485e6bec3 --- /dev/null +++ b/go/vt/vtgate/engine/throttle_app.go @@ -0,0 +1,89 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +var _ Primitive = (*ThrottleApp)(nil) + +// ThrottleApp represents the instructions to perform an online schema change via vtctld +type ThrottleApp struct { + Keyspace *vindexes.Keyspace + ThrottledAppRule *topodatapb.ThrottledAppRule + + noTxNeeded + + noInputs +} + +func (v *ThrottleApp) description() PrimitiveDescription { + return PrimitiveDescription{ + OperatorType: "ThrottleApp", + Keyspace: v.Keyspace, + Other: map[string]any{ + "appName": v.ThrottledAppRule.Name, + "expireAt": v.ThrottledAppRule.ExpiresAt, + "ratio": v.ThrottledAppRule.Ratio, + }, + } +} + +// RouteType implements the Primitive interface +func (v *ThrottleApp) RouteType() string { + return "ThrottleApp" +} + +// GetKeyspaceName implements the Primitive interface +func (v *ThrottleApp) GetKeyspaceName() string { + return v.Keyspace.Name +} + +// GetTableName implements the Primitive interface +func (v *ThrottleApp) GetTableName() string { + return "" +} + +// TryExecute implements the Primitive interface +func (v *ThrottleApp) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (result *sqltypes.Result, err error) { + if err := vcursor.ThrottleApp(ctx, v.ThrottledAppRule); err != nil { + return nil, err + } + return &sqltypes.Result{}, nil +} + +// TryStreamExecute implements the Primitive interface +func (v *ThrottleApp) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { + results, err := v.TryExecute(ctx, vcursor, bindVars, wantfields) + if err != nil { + return err + } + return callback(results) +} + +// GetFields implements the Primitive interface +func (v *ThrottleApp) GetFields(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] GetFields is not reachable") +} diff --git a/go/vt/vtgate/engine/pullout_subquery.go b/go/vt/vtgate/engine/uncorrelated_subquery.go similarity index 71% rename from go/vt/vtgate/engine/pullout_subquery.go rename to go/vt/vtgate/engine/uncorrelated_subquery.go index 545e795ee60..311cd8d203a 100644 --- a/go/vt/vtgate/engine/pullout_subquery.go +++ b/go/vt/vtgate/engine/uncorrelated_subquery.go @@ -20,68 +20,71 @@ import ( "context" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/vterrors" - . "vitess.io/vitess/go/vt/vtgate/engine/opcode" - querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" + . "vitess.io/vitess/go/vt/vtgate/engine/opcode" ) -var _ Primitive = (*PulloutSubquery)(nil) +var _ Primitive = (*UncorrelatedSubquery)(nil) -// PulloutSubquery executes a "pulled out" subquery and stores -// the results in a bind variable. -type PulloutSubquery struct { +// UncorrelatedSubquery executes a subquery once and uses +// the result as a bind variable for the underlying primitive. +type UncorrelatedSubquery struct { Opcode PulloutOpcode // SubqueryResult and HasValues are used to send in the bindvar used in the query to the underlying primitive SubqueryResult string HasValues string - Subquery Primitive - Underlying Primitive + Subquery Primitive + Outer Primitive } // Inputs returns the input primitives for this join -func (ps *PulloutSubquery) Inputs() []Primitive { - return []Primitive{ps.Subquery, ps.Underlying} +func (ps *UncorrelatedSubquery) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{ps.Subquery, ps.Outer}, []map[string]any{{ + inputName: "SubQuery", + }, { + inputName: "Outer", + }} } // RouteType returns a description of the query routing type used by the primitive -func (ps *PulloutSubquery) RouteType() string { +func (ps *UncorrelatedSubquery) RouteType() string { return ps.Opcode.String() } // GetKeyspaceName specifies the Keyspace that this primitive routes to. -func (ps *PulloutSubquery) GetKeyspaceName() string { - return ps.Underlying.GetKeyspaceName() +func (ps *UncorrelatedSubquery) GetKeyspaceName() string { + return ps.Outer.GetKeyspaceName() } // GetTableName specifies the table that this primitive routes to. -func (ps *PulloutSubquery) GetTableName() string { - return ps.Underlying.GetTableName() +func (ps *UncorrelatedSubquery) GetTableName() string { + return ps.Outer.GetTableName() } // TryExecute satisfies the Primitive interface. -func (ps *PulloutSubquery) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { +func (ps *UncorrelatedSubquery) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { combinedVars, err := ps.execSubquery(ctx, vcursor, bindVars) if err != nil { return nil, err } - return vcursor.ExecutePrimitive(ctx, ps.Underlying, combinedVars, wantfields) + return vcursor.ExecutePrimitive(ctx, ps.Outer, combinedVars, wantfields) } // TryStreamExecute performs a streaming exec. -func (ps *PulloutSubquery) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { +func (ps *UncorrelatedSubquery) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { combinedVars, err := ps.execSubquery(ctx, vcursor, bindVars) if err != nil { return err } - return vcursor.StreamExecutePrimitive(ctx, ps.Underlying, combinedVars, wantfields, callback) + return vcursor.StreamExecutePrimitive(ctx, ps.Outer, combinedVars, wantfields, callback) } // GetFields fetches the field info. -func (ps *PulloutSubquery) GetFields(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { +func (ps *UncorrelatedSubquery) GetFields(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { combinedVars := make(map[string]*querypb.BindVariable, len(bindVars)+1) for k, v := range bindVars { combinedVars[k] = v @@ -98,12 +101,12 @@ func (ps *PulloutSubquery) GetFields(ctx context.Context, vcursor VCursor, bindV case PulloutExists: combinedVars[ps.HasValues] = sqltypes.Int64BindVariable(0) } - return ps.Underlying.GetFields(ctx, vcursor, combinedVars) + return ps.Outer.GetFields(ctx, vcursor, combinedVars) } // NeedsTransaction implements the Primitive interface -func (ps *PulloutSubquery) NeedsTransaction() bool { - return ps.Subquery.NeedsTransaction() || ps.Underlying.NeedsTransaction() +func (ps *UncorrelatedSubquery) NeedsTransaction() bool { + return ps.Subquery.NeedsTransaction() || ps.Outer.NeedsTransaction() } var ( @@ -111,7 +114,7 @@ var ( errSqColumn = vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "subquery returned more than one column") ) -func (ps *PulloutSubquery) execSubquery(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (map[string]*querypb.BindVariable, error) { +func (ps *UncorrelatedSubquery) execSubquery(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (map[string]*querypb.BindVariable, error) { subqueryBindVars := make(map[string]*querypb.BindVariable, len(bindVars)) for k, v := range bindVars { subqueryBindVars[k] = v @@ -130,9 +133,6 @@ func (ps *PulloutSubquery) execSubquery(ctx context.Context, vcursor VCursor, bi case 0: combinedVars[ps.SubqueryResult] = sqltypes.NullBindVariable case 1: - if len(result.Rows[0]) != 1 { - return nil, errSqColumn - } combinedVars[ps.SubqueryResult] = sqltypes.ValueBindVariable(result.Rows[0][0]) default: return nil, errSqRow @@ -147,9 +147,6 @@ func (ps *PulloutSubquery) execSubquery(ctx context.Context, vcursor VCursor, bi Values: []*querypb.Value{sqltypes.ValueToProto(sqltypes.NewInt64(0))}, } default: - if len(result.Rows[0]) != 1 { - return nil, errSqColumn - } combinedVars[ps.HasValues] = sqltypes.Int64BindVariable(1) values := &querypb.BindVariable{ Type: querypb.Type_TUPLE, @@ -171,7 +168,7 @@ func (ps *PulloutSubquery) execSubquery(ctx context.Context, vcursor VCursor, bi return combinedVars, nil } -func (ps *PulloutSubquery) description() PrimitiveDescription { +func (ps *UncorrelatedSubquery) description() PrimitiveDescription { other := map[string]any{} var pulloutVars []string if ps.HasValues != "" { @@ -184,7 +181,7 @@ func (ps *PulloutSubquery) description() PrimitiveDescription { other["PulloutVars"] = pulloutVars } return PrimitiveDescription{ - OperatorType: "Subquery", + OperatorType: "UncorrelatedSubquery", Variant: ps.Opcode.String(), Other: other, } diff --git a/go/vt/vtgate/engine/pullout_subquery_test.go b/go/vt/vtgate/engine/uncorrelated_subquery_test.go similarity index 83% rename from go/vt/vtgate/engine/pullout_subquery_test.go rename to go/vt/vtgate/engine/uncorrelated_subquery_test.go index 9b6e7c490f0..3e80c6369a7 100644 --- a/go/vt/vtgate/engine/pullout_subquery_test.go +++ b/go/vt/vtgate/engine/uncorrelated_subquery_test.go @@ -54,11 +54,11 @@ func TestPulloutSubqueryValueGood(t *testing.T) { ufp := &fakePrimitive{ results: []*sqltypes.Result{underlyingResult}, } - ps := &PulloutSubquery{ + ps := &UncorrelatedSubquery{ Opcode: PulloutValue, SubqueryResult: "sq", Subquery: sfp, - Underlying: ufp, + Outer: ufp, } result, err := ps.TryExecute(context.Background(), &noopVCursor{}, bindVars, false) @@ -79,11 +79,11 @@ func TestPulloutSubqueryValueNone(t *testing.T) { results: []*sqltypes.Result{sqResult}, } ufp := &fakePrimitive{} - ps := &PulloutSubquery{ + ps := &UncorrelatedSubquery{ Opcode: PulloutValue, SubqueryResult: "sq", Subquery: sfp, - Underlying: ufp, + Outer: ufp, } if _, err := ps.TryExecute(context.Background(), &noopVCursor{}, make(map[string]*querypb.BindVariable), false); err != nil { @@ -93,27 +93,6 @@ func TestPulloutSubqueryValueNone(t *testing.T) { ufp.ExpectLog(t, []string{`Execute sq: false`}) } -func TestPulloutSubqueryValueBadColumns(t *testing.T) { - sqResult := sqltypes.MakeTestResult( - sqltypes.MakeTestFields( - "col1|col2", - "int64|int64", - ), - "1|1", - ) - sfp := &fakePrimitive{ - results: []*sqltypes.Result{sqResult}, - } - ps := &PulloutSubquery{ - Opcode: PulloutValue, - SubqueryResult: "sq", - Subquery: sfp, - } - - _, err := ps.TryExecute(context.Background(), &noopVCursor{}, make(map[string]*querypb.BindVariable), false) - require.EqualError(t, err, "subquery returned more than one column") -} - func TestPulloutSubqueryValueBadRows(t *testing.T) { sqResult := sqltypes.MakeTestResult( sqltypes.MakeTestFields( @@ -126,7 +105,7 @@ func TestPulloutSubqueryValueBadRows(t *testing.T) { sfp := &fakePrimitive{ results: []*sqltypes.Result{sqResult}, } - ps := &PulloutSubquery{ + ps := &UncorrelatedSubquery{ Opcode: PulloutValue, SubqueryResult: "sq", Subquery: sfp, @@ -149,12 +128,12 @@ func TestPulloutSubqueryInNotinGood(t *testing.T) { results: []*sqltypes.Result{sqResult}, } ufp := &fakePrimitive{} - ps := &PulloutSubquery{ + ps := &UncorrelatedSubquery{ Opcode: PulloutIn, SubqueryResult: "sq", HasValues: "has_values", Subquery: sfp, - Underlying: ufp, + Outer: ufp, } if _, err := ps.TryExecute(context.Background(), &noopVCursor{}, make(map[string]*querypb.BindVariable), false); err != nil { @@ -185,12 +164,12 @@ func TestPulloutSubqueryInNone(t *testing.T) { results: []*sqltypes.Result{sqResult}, } ufp := &fakePrimitive{} - ps := &PulloutSubquery{ + ps := &UncorrelatedSubquery{ Opcode: PulloutIn, SubqueryResult: "sq", HasValues: "has_values", Subquery: sfp, - Underlying: ufp, + Outer: ufp, } if _, err := ps.TryExecute(context.Background(), &noopVCursor{}, make(map[string]*querypb.BindVariable), false); err != nil { @@ -200,27 +179,6 @@ func TestPulloutSubqueryInNone(t *testing.T) { ufp.ExpectLog(t, []string{`Execute has_values: type:INT64 value:"0" sq: type:TUPLE values:{type:INT64 value:"0"} false`}) } -func TestPulloutSubqueryInBadColumns(t *testing.T) { - sqResult := sqltypes.MakeTestResult( - sqltypes.MakeTestFields( - "col1|col2", - "int64|int64", - ), - "1|1", - ) - sfp := &fakePrimitive{ - results: []*sqltypes.Result{sqResult}, - } - ps := &PulloutSubquery{ - Opcode: PulloutIn, - SubqueryResult: "sq", - Subquery: sfp, - } - - _, err := ps.TryExecute(context.Background(), &noopVCursor{}, make(map[string]*querypb.BindVariable), false) - require.EqualError(t, err, "subquery returned more than one column") -} - func TestPulloutSubqueryExists(t *testing.T) { sqResult := sqltypes.MakeTestResult( sqltypes.MakeTestFields( @@ -233,11 +191,11 @@ func TestPulloutSubqueryExists(t *testing.T) { results: []*sqltypes.Result{sqResult}, } ufp := &fakePrimitive{} - ps := &PulloutSubquery{ - Opcode: PulloutExists, - HasValues: "has_values", - Subquery: sfp, - Underlying: ufp, + ps := &UncorrelatedSubquery{ + Opcode: PulloutExists, + HasValues: "has_values", + Subquery: sfp, + Outer: ufp, } if _, err := ps.TryExecute(context.Background(), &noopVCursor{}, make(map[string]*querypb.BindVariable), false); err != nil { @@ -258,11 +216,11 @@ func TestPulloutSubqueryExistsNone(t *testing.T) { results: []*sqltypes.Result{sqResult}, } ufp := &fakePrimitive{} - ps := &PulloutSubquery{ - Opcode: PulloutExists, - HasValues: "has_values", - Subquery: sfp, - Underlying: ufp, + ps := &UncorrelatedSubquery{ + Opcode: PulloutExists, + HasValues: "has_values", + Subquery: sfp, + Outer: ufp, } if _, err := ps.TryExecute(context.Background(), &noopVCursor{}, make(map[string]*querypb.BindVariable), false); err != nil { @@ -276,7 +234,7 @@ func TestPulloutSubqueryError(t *testing.T) { sfp := &fakePrimitive{ sendErr: errors.New("err"), } - ps := &PulloutSubquery{ + ps := &UncorrelatedSubquery{ Opcode: PulloutExists, SubqueryResult: "sq", Subquery: sfp, @@ -310,11 +268,11 @@ func TestPulloutSubqueryStream(t *testing.T) { ufp := &fakePrimitive{ results: []*sqltypes.Result{underlyingResult}, } - ps := &PulloutSubquery{ + ps := &UncorrelatedSubquery{ Opcode: PulloutValue, SubqueryResult: "sq", Subquery: sfp, - Underlying: ufp, + Outer: ufp, } result, err := wrapStreamExecute(ps, &noopVCursor{}, bindVars, true) @@ -329,11 +287,11 @@ func TestPulloutSubqueryGetFields(t *testing.T) { "aa": sqltypes.Int64BindVariable(1), } ufp := &fakePrimitive{} - ps := &PulloutSubquery{ + ps := &UncorrelatedSubquery{ Opcode: PulloutValue, SubqueryResult: "sq", HasValues: "has_values", - Underlying: ufp, + Outer: ufp, } if _, err := ps.GetFields(context.Background(), nil, bindVars); err != nil { diff --git a/go/vt/vtgate/engine/update.go b/go/vt/vtgate/engine/update.go index 72aef9005c6..3db7972fba5 100644 --- a/go/vt/vtgate/engine/update.go +++ b/go/vt/vtgate/engine/update.go @@ -36,8 +36,8 @@ var _ Primitive = (*Update)(nil) // VindexValues contains changed values for a vindex. type VindexValues struct { - PvMap map[string]evalengine.Expr - Offset int // Offset from ownedVindexQuery to provide input decision for vindex update. + EvalExprMap map[string]evalengine.Expr + Offset int // Offset from ownedVindexQuery to provide input decision for vindex update. } // Update represents the instructions to perform an update. @@ -128,11 +128,7 @@ func (upd *Update) updateVindexEntries(ctx context.Context, vcursor VCursor, bin return err } - vindexTable, err := upd.GetSingleTable() - if err != nil { - return err - } - for _, colVindex := range vindexTable.ColumnVindexes { + for _, colVindex := range upd.Vindexes { // Skip this vindex if no rows are being changed updColValues, ok := upd.ChangedVindexValues[colVindex.Name] if !ok { @@ -141,7 +137,7 @@ func (upd *Update) updateVindexEntries(ctx context.Context, vcursor VCursor, bin offset := updColValues.Offset if !row[offset].IsNull() { - val, err := evalengine.ToInt64(row[offset]) + val, err := row[offset].ToCastInt64() if err != nil { return err } @@ -156,7 +152,7 @@ func (upd *Update) updateVindexEntries(ctx context.Context, vcursor VCursor, bin // Fetch the column values. origColValue := row[fieldColNumMap[vCol.String()]] fromIds = append(fromIds, origColValue) - if colValue, exists := updColValues.PvMap[vCol.String()]; exists { + if colValue, exists := updColValues.EvalExprMap[vCol.String()]; exists { resolvedVal, err := env.Evaluate(colValue) if err != nil { return err diff --git a/go/vt/vtgate/engine/update_test.go b/go/vt/vtgate/engine/update_test.go index 6af186a92f3..313602668bc 100644 --- a/go/vt/vtgate/engine/update_test.go +++ b/go/vt/vtgate/engine/update_test.go @@ -69,7 +69,7 @@ func TestUpdateUnsharded(t *testing.T) { } func TestUpdateEqual(t *testing.T) { - vindex, _ := vindexes.NewHash("", nil) + vindex, _ := vindexes.CreateVindex("hash", "", nil) upd := &Update{ DML: &DML{ RoutingParameters: &RoutingParameters{ @@ -100,7 +100,7 @@ func TestUpdateEqual(t *testing.T) { } func TestUpdateEqualMultiCol(t *testing.T) { - vindex, _ := vindexes.NewRegionExperimental("", map[string]string{"region_bytes": "1"}) + vindex, _ := vindexes.CreateVindex("region_experimental", "", map[string]string{"region_bytes": "1"}) upd := &Update{ DML: &DML{ RoutingParameters: &RoutingParameters{ @@ -126,7 +126,7 @@ func TestUpdateEqualMultiCol(t *testing.T) { } func TestUpdateScatter(t *testing.T) { - vindex, _ := vindexes.NewHash("", nil) + vindex, _ := vindexes.CreateVindex("hash", "", nil) upd := &Update{ DML: &DML{ RoutingParameters: &RoutingParameters{ @@ -179,7 +179,7 @@ func TestUpdateScatter(t *testing.T) { } func TestUpdateEqualNoRoute(t *testing.T) { - vindex, _ := vindexes.NewLookupUnique("", map[string]string{ + vindex, _ := vindexes.CreateVindex("lookup_unique", "", map[string]string{ "table": "lkp", "from": "from", "to": "toc", @@ -211,7 +211,7 @@ func TestUpdateEqualNoRoute(t *testing.T) { func TestUpdateEqualNoScatter(t *testing.T) { t.Skip("planner does not produces this plan anymore") - vindex, _ := vindexes.NewLookupUnique("", map[string]string{ + vindex, _ := vindexes.CreateVindex("lookup_unique", "", map[string]string{ "table": "lkp", "from": "from", "to": "toc", @@ -247,24 +247,23 @@ func TestUpdateEqualChangedVindex(t *testing.T) { Vindex: ks.Vindexes["hash"], Values: []evalengine.Expr{evalengine.NewLiteralInt(1)}, }, - Query: "dummy_update", - Table: []*vindexes.Table{ - ks.Tables["t1"], - }, + Query: "dummy_update", + TableNames: []string{ks.Tables["t1"].Name.String()}, + Vindexes: ks.Tables["t1"].Owned, OwnedVindexQuery: "dummy_subquery", KsidVindex: ks.Vindexes["hash"], KsidLength: 1, }, ChangedVindexValues: map[string]*VindexValues{ "twocol": { - PvMap: map[string]evalengine.Expr{ + EvalExprMap: map[string]evalengine.Expr{ "c1": evalengine.NewLiteralInt(1), "c2": evalengine.NewLiteralInt(2), }, Offset: 4, }, "onecol": { - PvMap: map[string]evalengine.Expr{ + EvalExprMap: map[string]evalengine.Expr{ "c3": evalengine.NewLiteralInt(3), }, Offset: 5, @@ -392,17 +391,16 @@ func TestUpdateEqualMultiColChangedVindex(t *testing.T) { Vindex: ks.Vindexes["rg_vdx"], Values: []evalengine.Expr{evalengine.NewLiteralInt(1), evalengine.NewLiteralInt(2)}, }, - Query: "dummy_update", - Table: []*vindexes.Table{ - ks.Tables["rg_tbl"], - }, + Query: "dummy_update", + TableNames: []string{ks.Tables["rg_tbl"].Name.String()}, + Vindexes: ks.Tables["rg_tbl"].Owned, OwnedVindexQuery: "dummy_subquery", KsidVindex: ks.Vindexes["rg_vdx"], KsidLength: 2, }, ChangedVindexValues: map[string]*VindexValues{ "lkp_rg": { - PvMap: map[string]evalengine.Expr{ + EvalExprMap: map[string]evalengine.Expr{ "colc": evalengine.NewLiteralInt(5), }, Offset: 3, @@ -513,24 +511,23 @@ func TestUpdateScatterChangedVindex(t *testing.T) { Opcode: Scatter, Keyspace: ks.Keyspace, }, - Query: "dummy_update", - Table: []*vindexes.Table{ - ks.Tables["t1"], - }, + Query: "dummy_update", + TableNames: []string{ks.Tables["t1"].Name.String()}, + Vindexes: ks.Tables["t1"].Owned, OwnedVindexQuery: "dummy_subquery", KsidVindex: ks.Vindexes["hash"], KsidLength: 1, }, ChangedVindexValues: map[string]*VindexValues{ "twocol": { - PvMap: map[string]evalengine.Expr{ + EvalExprMap: map[string]evalengine.Expr{ "c1": evalengine.NewLiteralInt(1), "c2": evalengine.NewLiteralInt(2), }, Offset: 4, }, "onecol": { - PvMap: map[string]evalengine.Expr{ + EvalExprMap: map[string]evalengine.Expr{ "c3": evalengine.NewLiteralInt(3), }, Offset: 5, @@ -709,24 +706,23 @@ func TestUpdateInChangedVindex(t *testing.T) { evalengine.NewLiteralInt(2), }}, }, - Query: "dummy_update", - Table: []*vindexes.Table{ - ks.Tables["t1"], - }, + Query: "dummy_update", + TableNames: []string{ks.Tables["t1"].Name.String()}, + Vindexes: ks.Tables["t1"].Owned, OwnedVindexQuery: "dummy_subquery", KsidVindex: ks.Vindexes["hash"], KsidLength: 1, }, ChangedVindexValues: map[string]*VindexValues{ "twocol": { - PvMap: map[string]evalengine.Expr{ + EvalExprMap: map[string]evalengine.Expr{ "c1": evalengine.NewLiteralInt(1), "c2": evalengine.NewLiteralInt(2), }, Offset: 4, }, "onecol": { - PvMap: map[string]evalengine.Expr{ + EvalExprMap: map[string]evalengine.Expr{ "c3": evalengine.NewLiteralInt(3), }, Offset: 5, @@ -840,17 +836,16 @@ func TestUpdateInChangedVindexMultiCol(t *testing.T) { evalengine.NewLiteralInt(3), }, }, - Query: "dummy_update", - Table: []*vindexes.Table{ - ks.Tables["rg_tbl"], - }, + Query: "dummy_update", + TableNames: []string{ks.Tables["rg_tbl"].Name.String()}, + Vindexes: ks.Tables["rg_tbl"].Owned, OwnedVindexQuery: "dummy_subquery", KsidVindex: ks.Vindexes["rg_vdx"], KsidLength: 2, }, ChangedVindexValues: map[string]*VindexValues{ "lkp_rg": { - PvMap: map[string]evalengine.Expr{ + EvalExprMap: map[string]evalengine.Expr{ "colc": evalengine.NewLiteralInt(5), }, Offset: 3, @@ -891,7 +886,7 @@ func TestUpdateInChangedVindexMultiCol(t *testing.T) { } func TestUpdateEqualSubshard(t *testing.T) { - vindex, _ := vindexes.NewRegionExperimental("", map[string]string{"region_bytes": "1"}) + vindex, _ := vindexes.CreateVindex("region_experimental", "", map[string]string{"region_bytes": "1"}) upd := &Update{ DML: &DML{ RoutingParameters: &RoutingParameters{ diff --git a/go/vt/vtgate/engine/vexplain.go b/go/vt/vtgate/engine/vexplain.go index da7b6100221..ad540f96c9c 100644 --- a/go/vt/vtgate/engine/vexplain.go +++ b/go/vt/vtgate/engine/vexplain.go @@ -170,11 +170,22 @@ func primitiveToPlanDescriptionWithSQLResults(in Primitive, res map[Primitive]st this.Other["mysql_explain_json"] = json.RawMessage(v) } - for _, input := range in.Inputs() { - this.Inputs = append(this.Inputs, primitiveToPlanDescriptionWithSQLResults(input, res)) + inputs, infos := in.Inputs() + for idx, input := range inputs { + pd := primitiveToPlanDescriptionWithSQLResults(input, res) + if infos != nil { + for k, v := range infos[idx] { + if k == inputName { + pd.InputName = v.(string) + continue + } + pd.Other[k] = v + } + } + this.Inputs = append(this.Inputs, pd) } - if len(in.Inputs()) == 0 { + if len(inputs) == 0 { this.Inputs = []PrimitiveDescription{} } @@ -206,8 +217,8 @@ func convertToVExplainQueriesResult(logs []ExecuteEntry) *sqltypes.Result { } // Inputs implements the Primitive interface -func (v *VExplain) Inputs() []Primitive { - return []Primitive{v.Input} +func (v *VExplain) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{v.Input}, nil } func (v *VExplain) description() PrimitiveDescription { diff --git a/go/vt/vtgate/engine/vindex_func.go b/go/vt/vtgate/engine/vindex_func.go index eb55ff32d8c..918bc9240ad 100644 --- a/go/vt/vtgate/engine/vindex_func.go +++ b/go/vt/vtgate/engine/vindex_func.go @@ -136,7 +136,7 @@ func (vf *VindexFunc) mapVindex(ctx context.Context, vcursor VCursor, bindVars m len(values), len(destinations)) } for i, value := range values { - vkey, err := evalengine.Cast(value, sqltypes.VarBinary) + vkey, err := sqltypes.Cast(value, sqltypes.VarBinary) if err != nil { return nil, err } diff --git a/go/vt/vtgate/engine/vindex_lookup.go b/go/vt/vtgate/engine/vindex_lookup.go index 2cb7f852be5..576cad14287 100644 --- a/go/vt/vtgate/engine/vindex_lookup.go +++ b/go/vt/vtgate/engine/vindex_lookup.go @@ -136,12 +136,12 @@ func (vr *VindexLookup) TryStreamExecute(ctx context.Context, vcursor VCursor, b } // Inputs implements the Primitive interface -func (vr *VindexLookup) Inputs() []Primitive { +func (vr *VindexLookup) Inputs() ([]Primitive, []map[string]any) { if vr.Lookup != nil { - return []Primitive{vr.Lookup, vr.SendTo} + return []Primitive{vr.Lookup, vr.SendTo}, nil } - return []Primitive{vr.SendTo} + return []Primitive{vr.SendTo}, nil } // description implements the Primitive interface diff --git a/go/vt/vtgate/evalengine/api_aggregation.go b/go/vt/vtgate/evalengine/api_aggregation.go new file mode 100644 index 00000000000..c0d490ced22 --- /dev/null +++ b/go/vt/vtgate/evalengine/api_aggregation.go @@ -0,0 +1,497 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package evalengine + +import ( + "strconv" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/decimal" + "vitess.io/vitess/go/mysql/fastparse" + "vitess.io/vitess/go/mysql/format" + "vitess.io/vitess/go/sqltypes" +) + +// Sum implements a SUM() aggregation +type Sum interface { + Add(value sqltypes.Value) error + Result() sqltypes.Value + Reset() +} + +// MinMax implements a MIN() or MAX() aggregation +type MinMax interface { + Min(value sqltypes.Value) error + Max(value sqltypes.Value) error + Result() sqltypes.Value + Reset() +} + +// aggregationSumCount implements a sum of count values. +// This is a Vitess-specific optimization that allows our planner to push down +// some expensive cross-shard operations by summing counts from different result sets. +// The result of this operator is always an INT64 (like for the COUNT() operator); +// if no values were provided to the operator, the result will be 0 (not NULL). +// If the sum of counts overflows, an error will be returned (instead of transparently +// calculating the larger sum using decimals). +type aggregationSumCount struct { + n int64 +} + +func (s *aggregationSumCount) Add(value sqltypes.Value) error { + if value.IsNull() { + return nil + } + n, err := value.ToInt64() + if err != nil { + return err + } + + result := s.n + n + if (result > s.n) != (n > 0) { + return dataOutOfRangeError(s.n, n, "BIGINT", "+") + } + + s.n = result + return nil +} + +func (s *aggregationSumCount) Result() sqltypes.Value { + return sqltypes.NewInt64(s.n) +} + +func (s *aggregationSumCount) Reset() { + s.n = 0 +} + +// aggregationInt implements SUM, MIN and MAX aggregation for Signed types, +// including INT64, INT32, INT24, INT16 and INT8. +// +// For SUM, the result of the operator is always a DECIMAL (matching MySQL's behavior), +// unless no values have been aggregated, in which case the result is NULL. +// For performance reasons, although the output of a SUM is a DECIMAL, the computations +// are performed using 64-bit arithmetic as long as they don't overflow. +// +// For MIN and MAX aggregations, the result of the operator is the same type as the values that +// have been aggregated. +type aggregationInt struct { + current int64 + dec decimal.Decimal + t sqltypes.Type + init bool +} + +func (s *aggregationInt) Add(value sqltypes.Value) error { + if value.IsNull() { + return nil + } + n, err := value.ToInt64() + if err != nil { + return err + } + + s.init = true + + if s.dec.IsInitialized() { + s.dec = s.dec.Add(decimal.NewFromInt(n)) + return nil + } + + result := s.current + n + if (result > s.current) != (n > 0) { + s.dec = decimal.NewFromInt(s.current).Add(decimal.NewFromInt(n)) + } else { + s.current = result + } + + return nil +} + +func (s *aggregationInt) Min(value sqltypes.Value) error { + if value.IsNull() { + return nil + } + n, err := value.ToInt64() + if err != nil { + return err + } + if !s.init || n < s.current { + s.current = n + } + s.init = true + return nil +} + +func (s *aggregationInt) Max(value sqltypes.Value) error { + if value.IsNull() { + return nil + } + n, err := value.ToInt64() + if err != nil { + return err + } + if !s.init || n > s.current { + s.current = n + } + s.init = true + return nil +} + +func (s *aggregationInt) Result() sqltypes.Value { + if !s.init { + return sqltypes.NULL + } + + var b []byte + if s.dec.IsInitialized() { + b = s.dec.FormatMySQL(0) + } else { + b = strconv.AppendInt(nil, s.current, 10) + } + return sqltypes.MakeTrusted(s.t, b) +} + +func (s *aggregationInt) Reset() { + s.current = 0 + s.dec = decimal.Decimal{} + s.init = false +} + +// aggregationUint implements SUM, MIN and MAX aggregation for Unsigned types, +// including UINT64, UINT32, UINT24, UINT16 and UINT8. +// +// For SUM, the result of the operator is always a DECIMAL (matching MySQL's behavior), +// unless no values have been aggregated, in which case the result is NULL. +// For performance reasons, although the output of a SUM is a DECIMAL, the computations +// are performed using 64-bit arithmetic as long as they don't overflow. +// +// For MIN and MAX aggregations, the result of the operator is the same type as the values that +// have been aggregated. +type aggregationUint struct { + current uint64 + dec decimal.Decimal + t sqltypes.Type + init bool +} + +func (s *aggregationUint) Add(value sqltypes.Value) error { + if value.IsNull() { + return nil + } + n, err := value.ToUint64() + if err != nil { + return err + } + + s.init = true + + if s.dec.IsInitialized() { + s.dec = s.dec.Add(decimal.NewFromUint(n)) + return nil + } + + result := s.current + n + if false { + s.dec = decimal.NewFromUint(s.current).Add(decimal.NewFromUint(n)) + } else { + s.current = result + } + + return nil +} + +func (s *aggregationUint) Min(value sqltypes.Value) error { + if value.IsNull() { + return nil + } + n, err := value.ToUint64() + if err != nil { + return err + } + if !s.init || n < s.current { + s.current = n + } + s.init = true + return nil +} + +func (s *aggregationUint) Max(value sqltypes.Value) error { + if value.IsNull() { + return nil + } + n, err := value.ToUint64() + if err != nil { + return err + } + if !s.init || n > s.current { + s.current = n + } + s.init = true + return nil +} + +func (s *aggregationUint) Result() sqltypes.Value { + if !s.init { + return sqltypes.NULL + } + + var b []byte + if s.dec.IsInitialized() { + b = s.dec.FormatMySQL(0) + } else { + b = strconv.AppendUint(nil, s.current, 10) + } + return sqltypes.MakeTrusted(s.t, b) +} + +func (s *aggregationUint) Reset() { + s.current = 0 + s.dec = decimal.Decimal{} + s.init = false +} + +// aggregationFloat implements SUM, MIN and MAX aggregations for FLOAT32 and FLOAT64 types. +// For SUM aggregations, the result is always a FLOAT64, unless no values have been aggregated, +// in which case the result is NULL. +// For MIN and MAX aggregations, the result is the same type as the aggregated values. +type aggregationFloat struct { + current float64 + t sqltypes.Type + init bool +} + +func (s *aggregationFloat) Add(value sqltypes.Value) error { + if value.IsNull() { + return nil + } + f, err := value.ToFloat64() + if err != nil { + return err + } + s.current += f + s.init = true + return nil +} + +func (s *aggregationFloat) Min(value sqltypes.Value) error { + if value.IsNull() { + return nil + } + n, err := value.ToFloat64() + if err != nil { + return err + } + if !s.init || n < s.current { + s.current = n + } + s.init = true + return nil +} + +func (s *aggregationFloat) Max(value sqltypes.Value) error { + if value.IsNull() { + return nil + } + n, err := value.ToFloat64() + if err != nil { + return err + } + if !s.init || n > s.current { + s.current = n + } + s.init = true + return nil +} + +func (s *aggregationFloat) Result() sqltypes.Value { + if !s.init { + return sqltypes.NULL + } + return sqltypes.MakeTrusted(s.t, format.FormatFloat(s.current)) +} + +func (s *aggregationFloat) Reset() { + s.current = 0 + s.init = false +} + +// aggregationSumAny implements SUM aggregation for non-numeric values. +// Matching MySQL's behavior, all the values are best-effort parsed as FLOAT64 +// before being aggregated. +type aggregationSumAny struct { + aggregationFloat +} + +func (s *aggregationSumAny) Add(value sqltypes.Value) error { + if value.IsNull() { + return nil + } + f, _ := fastparse.ParseFloat64(value.RawStr()) + s.current += f + s.init = true + return nil +} + +func (s *aggregationSumAny) Result() sqltypes.Value { + if !s.init { + return sqltypes.NULL + } + return sqltypes.NewFloat64(s.current) +} + +// aggregationDecimal implements SUM, MIN and MAX aggregations for the DECIMAL type. +// The return of all aggregations is always DECIMAL, except when no values have been +// aggregated, where the return is NULL. +type aggregationDecimal struct { + dec decimal.Decimal + prec int32 +} + +func (s *aggregationDecimal) Add(value sqltypes.Value) error { + if value.IsNull() { + return nil + } + dec, err := decimal.NewFromMySQL(value.Raw()) + if err != nil { + return err + } + if !s.dec.IsInitialized() { + s.dec = dec + s.prec = -dec.Exponent() + } else { + s.dec = s.dec.Add(dec) + s.prec = max(s.prec, -dec.Exponent()) + } + return nil +} + +func (s *aggregationDecimal) Min(value sqltypes.Value) error { + if value.IsNull() { + return nil + } + dec, err := decimal.NewFromMySQL(value.Raw()) + if err != nil { + return err + } + if !s.dec.IsInitialized() || dec.Cmp(s.dec) < 0 { + s.dec = dec + } + return nil +} + +func (s *aggregationDecimal) Max(value sqltypes.Value) error { + if value.IsNull() { + return nil + } + dec, err := decimal.NewFromMySQL(value.Raw()) + if err != nil { + return err + } + if !s.dec.IsInitialized() || dec.Cmp(s.dec) > 0 { + s.dec = dec + } + return nil +} + +func (s *aggregationDecimal) Result() sqltypes.Value { + if !s.dec.IsInitialized() { + return sqltypes.NULL + } + return sqltypes.MakeTrusted(sqltypes.Decimal, s.dec.FormatMySQL(s.prec)) +} + +func (s *aggregationDecimal) Reset() { + s.dec = decimal.Decimal{} + s.prec = 0 +} + +func NewSumOfCounts() Sum { + return &aggregationSumCount{} +} + +func NewAggregationSum(type_ sqltypes.Type) Sum { + switch { + case sqltypes.IsSigned(type_): + return &aggregationInt{t: sqltypes.Decimal} + case sqltypes.IsUnsigned(type_): + return &aggregationUint{t: sqltypes.Decimal} + case sqltypes.IsFloat(type_): + return &aggregationFloat{t: sqltypes.Float64} + case sqltypes.IsDecimal(type_): + return &aggregationDecimal{} + default: + return &aggregationSumAny{} + } +} + +// aggregationMinMax implements MIN and MAX aggregations for all data types +// that cannot be more efficiently handled by one of the numeric aggregators. +// The aggregation is performed using the slow NullSafeComparison path of the +// evaluation engine. +type aggregationMinMax struct { + current sqltypes.Value + collation collations.ID +} + +func (a *aggregationMinMax) minmax(value sqltypes.Value, max bool) (err error) { + if value.IsNull() { + return nil + } + if a.current.IsNull() { + a.current = value + return nil + } + n, err := compare(a.current, value, a.collation) + if err != nil { + return err + } + if (n < 0) == max { + a.current = value + } + return nil +} + +func (a *aggregationMinMax) Min(value sqltypes.Value) (err error) { + return a.minmax(value, false) +} + +func (a *aggregationMinMax) Max(value sqltypes.Value) error { + return a.minmax(value, true) +} + +func (a *aggregationMinMax) Result() sqltypes.Value { + return a.current +} + +func (a *aggregationMinMax) Reset() { + a.current = sqltypes.NULL +} + +func NewAggregationMinMax(type_ sqltypes.Type, collation collations.ID) MinMax { + switch { + case sqltypes.IsSigned(type_): + return &aggregationInt{t: type_} + case sqltypes.IsUnsigned(type_): + return &aggregationUint{t: type_} + case sqltypes.IsFloat(type_): + return &aggregationFloat{t: type_} + case sqltypes.IsDecimal(type_): + return &aggregationDecimal{} + default: + return &aggregationMinMax{collation: collation} + } +} diff --git a/go/vt/vtgate/evalengine/api_aggregation_test.go b/go/vt/vtgate/evalengine/api_aggregation_test.go new file mode 100644 index 00000000000..aab49541e71 --- /dev/null +++ b/go/vt/vtgate/evalengine/api_aggregation_test.go @@ -0,0 +1,166 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package evalengine + +import ( + "strconv" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/utils" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +func TestMinMax(t *testing.T) { + tcases := []struct { + type_ sqltypes.Type + coll collations.ID + values []sqltypes.Value + min, max sqltypes.Value + err error + }{ + { + type_: sqltypes.Int64, + values: []sqltypes.Value{}, + min: sqltypes.NULL, + max: sqltypes.NULL, + }, + { + type_: sqltypes.Int64, + values: []sqltypes.Value{NULL, NULL}, + min: sqltypes.NULL, + max: sqltypes.NULL, + }, + { + type_: sqltypes.Int64, + values: []sqltypes.Value{NULL, NewInt64(1)}, + min: NewInt64(1), + max: NewInt64(1), + }, + { + type_: sqltypes.Int64, + values: []sqltypes.Value{NewInt64(1), NewInt64(2)}, + min: NewInt64(1), + max: NewInt64(2), + }, + { + type_: sqltypes.VarChar, + values: []sqltypes.Value{TestValue(sqltypes.VarChar, "aa"), TestValue(sqltypes.VarChar, "bb")}, + err: vterrors.New(vtrpcpb.Code_UNKNOWN, "cannot compare strings, collation is unknown or unsupported (collation ID: 0)"), + }, + { + type_: sqltypes.VarBinary, + values: []sqltypes.Value{sqltypes.NewVarBinary("a"), sqltypes.NewVarBinary("b")}, + min: sqltypes.NewVarBinary("a"), + max: sqltypes.NewVarBinary("b"), + }, + { + // accent insensitive + type_: sqltypes.VarChar, + coll: getCollationID("utf8mb4_0900_as_ci"), + values: []sqltypes.Value{ + sqltypes.NewVarChar("ǍḄÇ"), + sqltypes.NewVarChar("ÁḆĈ"), + }, + min: sqltypes.NewVarChar("ÁḆĈ"), + max: sqltypes.NewVarChar("ǍḄÇ"), + }, + { + // kana sensitive + type_: sqltypes.VarChar, + coll: getCollationID("utf8mb4_ja_0900_as_cs_ks"), + values: []sqltypes.Value{ + sqltypes.NewVarChar("\xE3\x81\xAB\xE3\x81\xBB\xE3\x82\x93\xE3\x81\x94"), + sqltypes.NewVarChar("\xE3\x83\x8B\xE3\x83\x9B\xE3\x83\xB3\xE3\x82\xB4"), + }, + min: sqltypes.NewVarChar("\xE3\x81\xAB\xE3\x81\xBB\xE3\x82\x93\xE3\x81\x94"), + max: sqltypes.NewVarChar("\xE3\x83\x8B\xE3\x83\x9B\xE3\x83\xB3\xE3\x82\xB4"), + }, + { + // non breaking space + type_: sqltypes.VarChar, + coll: getCollationID("utf8mb4_0900_as_cs"), + values: []sqltypes.Value{ + sqltypes.NewVarChar("abc "), + sqltypes.NewVarChar("abc\u00a0"), + }, + min: sqltypes.NewVarChar("abc "), + max: sqltypes.NewVarChar("abc\u00a0"), + }, + { + type_: sqltypes.VarChar, + coll: getCollationID("utf8mb4_hu_0900_ai_ci"), + // "cs" counts as a separate letter, where c < cs < d + values: []sqltypes.Value{ + sqltypes.NewVarChar("c"), + sqltypes.NewVarChar("cs"), + }, + min: sqltypes.NewVarChar("c"), + max: sqltypes.NewVarChar("cs"), + }, + { + type_: sqltypes.VarChar, + coll: getCollationID("utf8mb4_hu_0900_ai_ci"), + // "cs" counts as a separate letter, where c < cs < d + values: []sqltypes.Value{ + sqltypes.NewVarChar("cukor"), + sqltypes.NewVarChar("csak"), + }, + min: sqltypes.NewVarChar("cukor"), + max: sqltypes.NewVarChar("csak"), + }, + } + for i, tcase := range tcases { + t.Run(strconv.Itoa(i), func(t *testing.T) { + t.Run("Min", func(t *testing.T) { + agg := NewAggregationMinMax(tcase.type_, tcase.coll) + + for _, v := range tcase.values { + err := agg.Min(v) + if err != nil { + if tcase.err != nil { + return + } + require.NoError(t, err) + } + } + + utils.MustMatch(t, agg.Result(), tcase.min) + }) + + t.Run("Max", func(t *testing.T) { + agg := NewAggregationMinMax(tcase.type_, tcase.coll) + + for _, v := range tcase.values { + err := agg.Max(v) + if err != nil { + if tcase.err != nil { + return + } + require.NoError(t, err) + } + } + + utils.MustMatch(t, agg.Result(), tcase.max) + }) + }) + } +} diff --git a/go/vt/vtgate/evalengine/api_arithmetic_test.go b/go/vt/vtgate/evalengine/api_arithmetic_test.go index 0a0abc84a30..40373423aa5 100644 --- a/go/vt/vtgate/evalengine/api_arithmetic_test.go +++ b/go/vt/vtgate/evalengine/api_arithmetic_test.go @@ -24,7 +24,6 @@ import ( "strconv" "testing" - "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/vthash" @@ -117,12 +116,12 @@ func TestArithmetics(t *testing.T) { // testing for error for parsing float value to uint64 v1: TestValue(sqltypes.Uint64, "1.2"), v2: NewInt64(2), - err: "strconv.ParseUint: parsing \"1.2\": invalid syntax", + err: "unparsed tail left after parsing uint64 from \"1.2\": \".2\"", }, { // testing for error for parsing float value to uint64 v1: NewUint64(2), v2: TestValue(sqltypes.Uint64, "1.2"), - err: "strconv.ParseUint: parsing \"1.2\": invalid syntax", + err: "unparsed tail left after parsing uint64 from \"1.2\": \".2\"", }, { // uint64 - uint64 v1: NewUint64(8), @@ -253,11 +252,11 @@ func TestArithmetics(t *testing.T) { }, { v1: TestValue(sqltypes.Int64, "1.2"), v2: NewInt64(2), - err: "strconv.ParseInt: parsing \"1.2\": invalid syntax", + err: "unparsed tail left after parsing int64 from \"1.2\": \".2\"", }, { v1: NewInt64(2), v2: TestValue(sqltypes.Int64, "1.2"), - err: "strconv.ParseInt: parsing \"1.2\": invalid syntax", + err: "unparsed tail left after parsing int64 from \"1.2\": \".2\"", }, { // testing for uint64 overflow with max uint64 + int value v1: NewUint64(maxUint64), @@ -320,12 +319,12 @@ func TestArithmetics(t *testing.T) { // testing for error in types v1: TestValue(sqltypes.Int64, "1.2"), v2: NewInt64(2), - err: "strconv.ParseInt: parsing \"1.2\": invalid syntax", + err: "unparsed tail left after parsing int64 from \"1.2\": \".2\"", }, { // testing for error in types v1: NewInt64(2), v2: TestValue(sqltypes.Int64, "1.2"), - err: "strconv.ParseInt: parsing \"1.2\": invalid syntax", + err: "unparsed tail left after parsing int64 from \"1.2\": \".2\"", }, { // testing for uint/int v1: NewUint64(4), @@ -384,12 +383,12 @@ func TestArithmetics(t *testing.T) { // testing for error in types v1: TestValue(sqltypes.Int64, "1.2"), v2: NewInt64(2), - err: "strconv.ParseInt: parsing \"1.2\": invalid syntax", + err: "unparsed tail left after parsing int64 from \"1.2\": \".2\"", }, { // testing for error in types v1: NewInt64(2), v2: TestValue(sqltypes.Int64, "1.2"), - err: "strconv.ParseInt: parsing \"1.2\": invalid syntax", + err: "unparsed tail left after parsing int64 from \"1.2\": \".2\"", }, { // testing for uint*int v1: NewUint64(4), @@ -479,12 +478,12 @@ func TestNullSafeAdd(t *testing.T) { // Make sure underlying error is returned for LHS. v1: TestValue(sqltypes.Int64, "1.2"), v2: NewInt64(2), - err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "strconv.ParseInt: parsing \"1.2\": invalid syntax"), + err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "unparsed tail left after parsing int64 from \"1.2\": \".2\""), }, { // Make sure underlying error is returned for RHS. v1: NewInt64(2), v2: TestValue(sqltypes.Int64, "1.2"), - err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "strconv.ParseInt: parsing \"1.2\": invalid syntax"), + err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "unparsed tail left after parsing int64 from \"1.2\": \".2\""), }, { // Make sure underlying error is returned while adding. v1: NewInt64(-1), @@ -515,313 +514,6 @@ func TestNullSafeAdd(t *testing.T) { } } -func TestCast(t *testing.T) { - tcases := []struct { - typ sqltypes.Type - v sqltypes.Value - out sqltypes.Value - err error - }{{ - typ: sqltypes.VarChar, - v: NULL, - out: NULL, - }, { - typ: sqltypes.VarChar, - v: TestValue(sqltypes.VarChar, "exact types"), - out: TestValue(sqltypes.VarChar, "exact types"), - }, { - typ: sqltypes.Int64, - v: TestValue(sqltypes.Int32, "32"), - out: TestValue(sqltypes.Int64, "32"), - }, { - typ: sqltypes.Int24, - v: TestValue(sqltypes.Uint64, "64"), - out: TestValue(sqltypes.Int24, "64"), - }, { - typ: sqltypes.Int24, - v: TestValue(sqltypes.VarChar, "bad int"), - err: vterrors.New(vtrpcpb.Code_UNKNOWN, `strconv.ParseInt: parsing "bad int": invalid syntax`), - }, { - typ: sqltypes.Uint64, - v: TestValue(sqltypes.Uint32, "32"), - out: TestValue(sqltypes.Uint64, "32"), - }, { - typ: sqltypes.Uint24, - v: TestValue(sqltypes.Int64, "64"), - out: TestValue(sqltypes.Uint24, "64"), - }, { - typ: sqltypes.Uint24, - v: TestValue(sqltypes.Int64, "-1"), - err: vterrors.New(vtrpcpb.Code_UNKNOWN, `strconv.ParseUint: parsing "-1": invalid syntax`), - }, { - typ: sqltypes.Float64, - v: TestValue(sqltypes.Int64, "64"), - out: TestValue(sqltypes.Float64, "64"), - }, { - typ: sqltypes.Float32, - v: TestValue(sqltypes.Float64, "64"), - out: TestValue(sqltypes.Float32, "64"), - }, { - typ: sqltypes.Float32, - v: TestValue(sqltypes.Decimal, "1.24"), - out: TestValue(sqltypes.Float32, "1.24"), - }, { - typ: sqltypes.Float64, - v: TestValue(sqltypes.VarChar, "1.25"), - out: TestValue(sqltypes.Float64, "1.25"), - }, { - typ: sqltypes.Float64, - v: TestValue(sqltypes.VarChar, "bad float"), - err: vterrors.New(vtrpcpb.Code_UNKNOWN, `strconv.ParseFloat: parsing "bad float": invalid syntax`), - }, { - typ: sqltypes.VarChar, - v: TestValue(sqltypes.Int64, "64"), - out: TestValue(sqltypes.VarChar, "64"), - }, { - typ: sqltypes.VarBinary, - v: TestValue(sqltypes.Float64, "64"), - out: TestValue(sqltypes.VarBinary, "64"), - }, { - typ: sqltypes.VarBinary, - v: TestValue(sqltypes.Decimal, "1.24"), - out: TestValue(sqltypes.VarBinary, "1.24"), - }, { - typ: sqltypes.VarBinary, - v: TestValue(sqltypes.VarChar, "1.25"), - out: TestValue(sqltypes.VarBinary, "1.25"), - }, { - typ: sqltypes.VarChar, - v: TestValue(sqltypes.VarBinary, "valid string"), - out: TestValue(sqltypes.VarChar, "valid string"), - }, { - typ: sqltypes.VarChar, - v: TestValue(sqltypes.Expression, "bad string"), - err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "expression cannot be converted to bytes"), - }} - for _, tcase := range tcases { - got, err := Cast(tcase.v, tcase.typ) - if !vterrors.Equals(err, tcase.err) { - t.Errorf("Cast(%v) error: %v, want %v", tcase.v, vterrors.Print(err), vterrors.Print(tcase.err)) - } - if tcase.err != nil { - continue - } - - if !reflect.DeepEqual(got, tcase.out) { - t.Errorf("Cast(%v): %v, want %v", tcase.v, got, tcase.out) - } - } -} - -func TestToUint64(t *testing.T) { - tcases := []struct { - v sqltypes.Value - out uint64 - err error - }{{ - v: TestValue(sqltypes.VarChar, "abcd"), - err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "could not parse value: 'abcd'"), - }, { - v: NewInt64(-1), - err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "negative number cannot be converted to unsigned: -1"), - }, { - v: NewInt64(1), - out: 1, - }, { - v: NewUint64(1), - out: 1, - }} - for _, tcase := range tcases { - got, err := ToUint64(tcase.v) - if !vterrors.Equals(err, tcase.err) { - t.Errorf("ToUint64(%v) error: %v, want %v", tcase.v, vterrors.Print(err), vterrors.Print(tcase.err)) - } - if tcase.err != nil { - continue - } - - if got != tcase.out { - t.Errorf("ToUint64(%v): %v, want %v", tcase.v, got, tcase.out) - } - } -} - -func TestToInt64(t *testing.T) { - tcases := []struct { - v sqltypes.Value - out int64 - err error - }{{ - v: TestValue(sqltypes.VarChar, "abcd"), - err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "could not parse value: 'abcd'"), - }, { - v: NewUint64(18446744073709551615), - err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "unsigned number overflows int64 value: 18446744073709551615"), - }, { - v: NewInt64(1), - out: 1, - }, { - v: NewUint64(1), - out: 1, - }} - for _, tcase := range tcases { - got, err := ToInt64(tcase.v) - if !vterrors.Equals(err, tcase.err) { - t.Errorf("ToInt64(%v) error: %v, want %v", tcase.v, vterrors.Print(err), vterrors.Print(tcase.err)) - } - if tcase.err != nil { - continue - } - - if got != tcase.out { - t.Errorf("ToInt64(%v): %v, want %v", tcase.v, got, tcase.out) - } - } -} - -func TestToFloat64(t *testing.T) { - tcases := []struct { - v sqltypes.Value - out float64 - err error - }{{ - v: TestValue(sqltypes.VarChar, "abcd"), - out: 0, - }, { - v: TestValue(sqltypes.VarChar, "1.2"), - out: 1.2, - }, { - v: NewInt64(1), - out: 1, - }, { - v: NewUint64(1), - out: 1, - }, { - v: NewFloat64(1.2), - out: 1.2, - }, { - v: TestValue(sqltypes.Int64, "1.2"), - err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "strconv.ParseInt: parsing \"1.2\": invalid syntax"), - }} - for _, tcase := range tcases { - t.Run(tcase.v.String(), func(t *testing.T) { - got, err := ToFloat64(tcase.v) - if tcase.err != nil { - require.EqualError(t, err, tcase.err.Error()) - } else { - require.Equal(t, tcase.out, got) - } - }) - } -} - -func TestToNative(t *testing.T) { - testcases := []struct { - in sqltypes.Value - out any - }{{ - in: NULL, - out: nil, - }, { - in: TestValue(sqltypes.Int8, "1"), - out: int64(1), - }, { - in: TestValue(sqltypes.Int16, "1"), - out: int64(1), - }, { - in: TestValue(sqltypes.Int24, "1"), - out: int64(1), - }, { - in: TestValue(sqltypes.Int32, "1"), - out: int64(1), - }, { - in: TestValue(sqltypes.Int64, "1"), - out: int64(1), - }, { - in: TestValue(sqltypes.Uint8, "1"), - out: uint64(1), - }, { - in: TestValue(sqltypes.Uint16, "1"), - out: uint64(1), - }, { - in: TestValue(sqltypes.Uint24, "1"), - out: uint64(1), - }, { - in: TestValue(sqltypes.Uint32, "1"), - out: uint64(1), - }, { - in: TestValue(sqltypes.Uint64, "1"), - out: uint64(1), - }, { - in: TestValue(sqltypes.Float32, "1"), - out: float64(1), - }, { - in: TestValue(sqltypes.Float64, "1"), - out: float64(1), - }, { - in: TestValue(sqltypes.Timestamp, "2012-02-24 23:19:43"), - out: []byte("2012-02-24 23:19:43"), - }, { - in: TestValue(sqltypes.Date, "2012-02-24"), - out: []byte("2012-02-24"), - }, { - in: TestValue(sqltypes.Time, "23:19:43"), - out: []byte("23:19:43"), - }, { - in: TestValue(sqltypes.Datetime, "2012-02-24 23:19:43"), - out: []byte("2012-02-24 23:19:43"), - }, { - in: TestValue(sqltypes.Year, "1"), - out: uint64(1), - }, { - in: TestValue(sqltypes.Decimal, "1"), - out: []byte("1"), - }, { - in: TestValue(sqltypes.Text, "a"), - out: []byte("a"), - }, { - in: TestValue(sqltypes.Blob, "a"), - out: []byte("a"), - }, { - in: TestValue(sqltypes.VarChar, "a"), - out: []byte("a"), - }, { - in: TestValue(sqltypes.VarBinary, "a"), - out: []byte("a"), - }, { - in: TestValue(sqltypes.Char, "a"), - out: []byte("a"), - }, { - in: TestValue(sqltypes.Binary, "a"), - out: []byte("a"), - }, { - in: TestValue(sqltypes.Bit, "1"), - out: []byte("1"), - }, { - in: TestValue(sqltypes.Enum, "a"), - out: []byte("a"), - }, { - in: TestValue(sqltypes.Set, "a"), - out: []byte("a"), - }} - for _, tcase := range testcases { - v, err := ToNative(tcase.in) - if err != nil { - t.Error(err) - } - if !reflect.DeepEqual(v, tcase.out) { - t.Errorf("%v.ToNative = %#v, want %#v", tcase.in, v, tcase.out) - } - } - - // Test Expression failure. - _, err := ToNative(TestValue(sqltypes.Expression, "aa")) - want := vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "EXPRESSION(aa) cannot be converted to a go type") - if !vterrors.Equals(err, want) { - t.Errorf("ToNative(EXPRESSION): %v, want %v", vterrors.Print(err), vterrors.Print(want)) - } -} - func TestNewIntegralNumeric(t *testing.T) { tcases := []struct { v sqltypes.Value @@ -847,11 +539,11 @@ func TestNewIntegralNumeric(t *testing.T) { }, { // Only valid Int64 allowed if type is Int64. v: TestValue(sqltypes.Int64, "1.2"), - err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "strconv.ParseInt: parsing \"1.2\": invalid syntax"), + err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "unparsed tail left after parsing int64 from \"1.2\": \".2\""), }, { // Only valid Uint64 allowed if type is Uint64. v: TestValue(sqltypes.Uint64, "1.2"), - err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "strconv.ParseUint: parsing \"1.2\": invalid syntax"), + err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "unparsed tail left after parsing uint64 from \"1.2\": \".2\""), }, { v: TestValue(sqltypes.VarChar, "abcd"), err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "could not parse value: 'abcd'"), @@ -1112,220 +804,6 @@ func TestCompareNumeric(t *testing.T) { } } -func TestMin(t *testing.T) { - tcases := []struct { - v1, v2 sqltypes.Value - min sqltypes.Value - err error - }{{ - v1: NULL, - v2: NULL, - min: NULL, - }, { - v1: NewInt64(1), - v2: NULL, - min: NewInt64(1), - }, { - v1: NULL, - v2: NewInt64(1), - min: NewInt64(1), - }, { - v1: NewInt64(1), - v2: NewInt64(2), - min: NewInt64(1), - }, { - v1: NewInt64(2), - v2: NewInt64(1), - min: NewInt64(1), - }, { - v1: NewInt64(1), - v2: NewInt64(1), - min: NewInt64(1), - }, { - v1: TestValue(sqltypes.VarChar, "aa"), - v2: TestValue(sqltypes.VarChar, "aa"), - err: vterrors.New(vtrpcpb.Code_UNKNOWN, "cannot compare strings, collation is unknown or unsupported (collation ID: 0)"), - }} - for _, tcase := range tcases { - v, err := Min(tcase.v1, tcase.v2, collations.Unknown) - if !vterrors.Equals(err, tcase.err) { - t.Errorf("Min error: %v, want %v", vterrors.Print(err), vterrors.Print(tcase.err)) - } - if tcase.err != nil { - continue - } - - if !reflect.DeepEqual(v, tcase.min) { - t.Errorf("Min(%v, %v): %v, want %v", tcase.v1, tcase.v2, v, tcase.min) - } - } -} - -func TestMinCollate(t *testing.T) { - tcases := []struct { - v1, v2 string - collation collations.ID - out string - err error - }{ - { - // accent insensitive - v1: "ǍḄÇ", - v2: "ÁḆĈ", - out: "ǍḄÇ", - collation: getCollationID("utf8mb4_0900_as_ci"), - }, - { - // kana sensitive - v1: "\xE3\x81\xAB\xE3\x81\xBB\xE3\x82\x93\xE3\x81\x94", - v2: "\xE3\x83\x8B\xE3\x83\x9B\xE3\x83\xB3\xE3\x82\xB4", - out: "\xE3\x83\x8B\xE3\x83\x9B\xE3\x83\xB3\xE3\x82\xB4", - collation: getCollationID("utf8mb4_ja_0900_as_cs_ks"), - }, - { - // non breaking space - v1: "abc ", - v2: "abc\u00a0", - out: "abc\u00a0", - collation: getCollationID("utf8mb4_0900_as_cs"), - }, - { - // "cs" counts as a separate letter, where c < cs < d - v1: "c", - v2: "cs", - out: "cs", - collation: getCollationID("utf8mb4_hu_0900_ai_ci"), - }, - { - // "cs" counts as a separate letter, where c < cs < d - v1: "cukor", - v2: "csak", - out: "csak", - collation: getCollationID("utf8mb4_hu_0900_ai_ci"), - }, - } - for _, tcase := range tcases { - got, err := Min(TestValue(sqltypes.VarChar, tcase.v1), TestValue(sqltypes.VarChar, tcase.v2), tcase.collation) - if !vterrors.Equals(err, tcase.err) { - t.Errorf("NullsafeCompare(%v, %v) error: %v, want %v", tcase.v1, tcase.v2, vterrors.Print(err), vterrors.Print(tcase.err)) - } - if tcase.err != nil { - continue - } - - if got.ToString() == tcase.out { - t.Errorf("NullsafeCompare(%v, %v): %v, want %v", tcase.v1, tcase.v2, got, tcase.out) - } - } -} - -func TestMax(t *testing.T) { - tcases := []struct { - v1, v2 sqltypes.Value - max sqltypes.Value - err error - }{{ - v1: NULL, - v2: NULL, - max: NULL, - }, { - v1: NewInt64(1), - v2: NULL, - max: NewInt64(1), - }, { - v1: NULL, - v2: NewInt64(1), - max: NewInt64(1), - }, { - v1: NewInt64(1), - v2: NewInt64(2), - max: NewInt64(2), - }, { - v1: NewInt64(2), - v2: NewInt64(1), - max: NewInt64(2), - }, { - v1: NewInt64(1), - v2: NewInt64(1), - max: NewInt64(1), - }, { - v1: TestValue(sqltypes.VarChar, "aa"), - v2: TestValue(sqltypes.VarChar, "aa"), - err: vterrors.New(vtrpcpb.Code_UNKNOWN, "cannot compare strings, collation is unknown or unsupported (collation ID: 0)"), - }} - for _, tcase := range tcases { - v, err := Max(tcase.v1, tcase.v2, collations.Unknown) - if !vterrors.Equals(err, tcase.err) { - t.Errorf("Max error: %v, want %v", vterrors.Print(err), vterrors.Print(tcase.err)) - } - if tcase.err != nil { - continue - } - - if !reflect.DeepEqual(v, tcase.max) { - t.Errorf("Max(%v, %v): %v, want %v", tcase.v1, tcase.v2, v, tcase.max) - } - } -} - -func TestMaxCollate(t *testing.T) { - tcases := []struct { - v1, v2 string - collation collations.ID - out string - err error - }{ - { - // accent insensitive - v1: "ǍḄÇ", - v2: "ÁḆĈ", - out: "ǍḄÇ", - collation: getCollationID("utf8mb4_0900_as_ci"), - }, - { - // kana sensitive - v1: "\xE3\x81\xAB\xE3\x81\xBB\xE3\x82\x93\xE3\x81\x94", - v2: "\xE3\x83\x8B\xE3\x83\x9B\xE3\x83\xB3\xE3\x82\xB4", - out: "\xE3\x83\x8B\xE3\x83\x9B\xE3\x83\xB3\xE3\x82\xB4", - collation: getCollationID("utf8mb4_ja_0900_as_cs_ks"), - }, - { - // non breaking space - v1: "abc ", - v2: "abc\u00a0", - out: "abc\u00a0", - collation: getCollationID("utf8mb4_0900_as_cs"), - }, - { - // "cs" counts as a separate letter, where c < cs < d - v1: "c", - v2: "cs", - out: "cs", - collation: getCollationID("utf8mb4_hu_0900_ai_ci"), - }, - { - // "cs" counts as a separate letter, where c < cs < d - v1: "cukor", - v2: "csak", - out: "csak", - collation: getCollationID("utf8mb4_hu_0900_ai_ci"), - }, - } - for _, tcase := range tcases { - got, err := Max(TestValue(sqltypes.VarChar, tcase.v1), TestValue(sqltypes.VarChar, tcase.v2), tcase.collation) - if !vterrors.Equals(err, tcase.err) { - t.Errorf("NullsafeCompare(%v, %v) error: %v, want %v", tcase.v1, tcase.v2, vterrors.Print(err), vterrors.Print(tcase.err)) - } - if tcase.err != nil { - continue - } - - if got.ToString() != tcase.out { - t.Errorf("NullsafeCompare(%v, %v): %v, want %v", tcase.v1, tcase.v2, got, tcase.out) - } - } -} - func printValue(v sqltypes.Value) string { vBytes, _ := v.ToBytes() return fmt.Sprintf("%v:%q", v.Type(), vBytes) @@ -1360,8 +838,8 @@ func BenchmarkAddNoNative(b *testing.B) { v1 := sqltypes.MakeTrusted(sqltypes.Int64, []byte("1")) v2 := sqltypes.MakeTrusted(sqltypes.Int64, []byte("12")) for i := 0; i < b.N; i++ { - iv1, _ := ToInt64(v1) - iv2, _ := ToInt64(v2) + iv1, _ := v1.ToInt64() + iv2, _ := v2.ToInt64() v1 = sqltypes.MakeTrusted(sqltypes.Int64, strconv.AppendInt(nil, iv1+iv2, 10)) } } diff --git a/go/vt/vtgate/evalengine/api_compare.go b/go/vt/vtgate/evalengine/api_compare.go index 9ecc03a3c6f..d05e86a12bb 100644 --- a/go/vt/vtgate/evalengine/api_compare.go +++ b/go/vt/vtgate/evalengine/api_compare.go @@ -21,6 +21,7 @@ import ( "fmt" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/collations/colldata" "vitess.io/vitess/go/sqltypes" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" @@ -50,56 +51,103 @@ func (err UnsupportedCollationError) Error() string { // UnsupportedCollationHashError is returned when we try to get the hash value and are missing the collation to use var UnsupportedCollationHashError = vterrors.Errorf(vtrpcpb.Code_INTERNAL, "text type with an unknown/unsupported collation cannot be hashed") -// Min returns the minimum of v1 and v2. If one of the -// values is NULL, it returns the other value. If both -// are NULL, it returns NULL. -func Min(v1, v2 sqltypes.Value, collation collations.ID) (sqltypes.Value, error) { - return minmax(v1, v2, true, collation) -} - -// Max returns the maximum of v1 and v2. If one of the -// values is NULL, it returns the other value. If both -// are NULL, it returns NULL. -func Max(v1, v2 sqltypes.Value, collation collations.ID) (sqltypes.Value, error) { - return minmax(v1, v2, false, collation) -} - -func minmax(v1, v2 sqltypes.Value, min bool, collation collations.ID) (sqltypes.Value, error) { - if v1.IsNull() { - return v2, nil - } - if v2.IsNull() { - return v1, nil +func compare(v1, v2 sqltypes.Value, collationID collations.ID) (int, error) { + v1t := v1.Type() + + // We have a fast path here for the case where both values are + // the same type, and it's one of the basic types we can compare + // directly. This is a common case for equality checks. + if v1t == v2.Type() { + switch { + case sqltypes.IsText(v1t): + if collationID == collations.CollationBinaryID { + return bytes.Compare(v1.Raw(), v2.Raw()), nil + } + coll := colldata.Lookup(collationID) + if coll == nil { + return 0, UnsupportedCollationError{ID: collationID} + } + result := coll.Collate(v1.Raw(), v2.Raw(), false) + switch { + case result < 0: + return -1, nil + case result > 0: + return 1, nil + default: + return 0, nil + } + case sqltypes.IsBinary(v1t), v1t == sqltypes.Date, v1t == sqltypes.Datetime, v1t == sqltypes.Timestamp: + // We can't optimize for Time here, since Time is not sortable + // based on the raw bytes. This is because of cases like + // '24:00:00' and '101:00:00' which are both valid times and + // order wrong based on the raw bytes. + return bytes.Compare(v1.Raw(), v2.Raw()), nil + case sqltypes.IsSigned(v1t): + i1, err := v1.ToInt64() + if err != nil { + return 0, err + } + i2, err := v2.ToInt64() + if err != nil { + return 0, err + } + switch { + case i1 < i2: + return -1, nil + case i1 > i2: + return 1, nil + default: + return 0, nil + } + case sqltypes.IsUnsigned(v1t): + u1, err := v1.ToUint64() + if err != nil { + return 0, err + } + u2, err := v2.ToUint64() + if err != nil { + return 0, err + } + switch { + case u1 < u2: + return -1, nil + case u1 > u2: + return 1, nil + default: + return 0, nil + } + } } - n, err := NullsafeCompare(v1, v2, collation) + v1eval, err := valueToEval(v1, collations.TypedCollation{ + Collation: collationID, + Coercibility: collations.CoerceImplicit, + Repertoire: collations.RepertoireUnicode, + }) if err != nil { - return sqltypes.NULL, err + return 0, err } - // XNOR construct. See tests. - v1isSmaller := n < 0 - if min == v1isSmaller { - return v1, nil + v2eval, err := valueToEval(v2, collations.TypedCollation{ + Collation: collationID, + Coercibility: collations.CoerceImplicit, + Repertoire: collations.RepertoireUnicode, + }) + if err != nil { + return 0, err } - return v2, nil -} -// isByteComparable returns true if the type is binary or date/time. -func isByteComparable(typ sqltypes.Type, collationID collations.ID) bool { - if sqltypes.IsBinary(typ) { - return true + out, err := evalCompare(v1eval, v2eval) + if err != nil { + return 0, err } - if sqltypes.IsText(typ) { - return collationID == collations.CollationBinaryID + if out == 0 { + return 0, nil } - switch typ { - case sqltypes.Timestamp, sqltypes.Date, sqltypes.Time, sqltypes.Datetime, sqltypes.Enum, - sqltypes.Set, sqltypes.TypeJSON, sqltypes.Bit, sqltypes.Geometry: - return true - default: - return false + if out > 0 { + return 1, nil } + return -1, nil } // NullsafeCompare returns 0 if v1==v2, -1 if v1v2. @@ -121,53 +169,5 @@ func NullsafeCompare(v1, v2 sqltypes.Value, collationID collations.ID) (int, err if v2.IsNull() { return 1, nil } - - if isByteComparable(v1.Type(), collationID) && isByteComparable(v2.Type(), collationID) { - return bytes.Compare(v1.Raw(), v2.Raw()), nil - } - - typ, err := CoerceTo(v1.Type(), v2.Type()) // TODO systay we should add a method where this decision is done at plantime - if err != nil { - return 0, err - } - - switch { - case sqltypes.IsText(typ): - collation := collationID.Get() - if collation == nil { - return 0, UnsupportedCollationError{ID: collationID} - } - - v1Bytes, err := v1.ToBytes() - if err != nil { - return 0, err - } - v2Bytes, err := v2.ToBytes() - if err != nil { - return 0, err - } - - switch result := collation.Collate(v1Bytes, v2Bytes, false); { - case result < 0: - return -1, nil - case result > 0: - return 1, nil - default: - return 0, nil - } - - case sqltypes.IsNumber(typ): - v1cast, err := valueToEvalCast(v1, typ) - if err != nil { - return 0, err - } - v2cast, err := valueToEvalCast(v2, typ) - if err != nil { - return 0, err - } - return compareNumeric(v1cast, v2cast) - - default: - return 0, UnsupportedComparisonError{Type1: v1.Type(), Type2: v2.Type()} - } + return compare(v1, v2, collationID) } diff --git a/go/vt/vtgate/evalengine/api_compare_test.go b/go/vt/vtgate/evalengine/api_compare_test.go index 603f3ebe676..bd87363b7e8 100644 --- a/go/vt/vtgate/evalengine/api_compare_test.go +++ b/go/vt/vtgate/evalengine/api_compare_test.go @@ -61,7 +61,7 @@ func init() { func defaultCollation() collations.TypedCollation { return collations.TypedCollation{ - Collation: collationEnv.LookupByName("utf8mb4_bin").ID(), + Collation: collationEnv.LookupByName("utf8mb4_bin"), Coercibility: collations.CoerceImplicit, Repertoire: collations.RepertoireASCII, } @@ -767,6 +767,18 @@ func TestCompareTime(t *testing.T) { out: &F, op: sqlparser.GreaterThanOp, row: []sqltypes.Value{sqltypes.NewTime("02:46:02"), sqltypes.NewTime("10:42:50")}, }, + { + name: "time is greater than time", + v1: NewColumn(0, sqltypes.Time, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Time, collations.CollationBinaryID), + out: &T, op: sqlparser.GreaterThanOp, + row: []sqltypes.Value{sqltypes.NewTime("101:14:35"), sqltypes.NewTime("13:01:38")}, + }, + { + name: "time is not greater than time", + v1: NewColumn(0, sqltypes.Time, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Time, collations.CollationBinaryID), + out: &F, op: sqlparser.GreaterThanOp, + row: []sqltypes.Value{sqltypes.NewTime("24:46:02"), sqltypes.NewTime("101:42:50")}, + }, { name: "time is less than time", v1: NewColumn(0, sqltypes.Time, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Time, collations.CollationBinaryID), @@ -1095,47 +1107,55 @@ func TestNullComparisons(t *testing.T) { } func TestNullsafeCompare(t *testing.T) { - collation := collationEnv.LookupByName("utf8mb4_general_ci").ID() + collation := collationEnv.LookupByName("utf8mb4_general_ci") tcases := []struct { v1, v2 sqltypes.Value out int err error - }{{ - // All nulls. - v1: NULL, - v2: NULL, - out: 0, - }, { - // LHS null. - v1: NULL, - v2: NewInt64(1), - out: -1, - }, { - // RHS null. - v1: NewInt64(1), - v2: NULL, - out: 1, - }, { - // LHS Text - v1: TestValue(sqltypes.VarChar, "abcd"), - v2: TestValue(sqltypes.VarChar, "abcd"), - out: 0, - }, { - // Make sure underlying error is returned for LHS. - v1: TestValue(sqltypes.Float64, "0.0"), - v2: TestValue(sqltypes.VarChar, " 6736380880502626304.000000 aa"), - out: -1, - }} + }{ + { + v1: NULL, + v2: NULL, + out: 0, + }, + { + v1: NULL, + v2: NewInt64(1), + out: -1, + }, + { + v1: NewInt64(1), + v2: NULL, + out: 1, + }, + { + v1: TestValue(sqltypes.VarChar, "abcd"), + v2: TestValue(sqltypes.VarChar, "abcd"), + out: 0, + }, + { + v1: TestValue(sqltypes.Float64, "0.0"), + v2: TestValue(sqltypes.VarChar, " 6736380880502626304.000000 aa"), + out: -1, + }, + { + v1: TestValue(sqltypes.Enum, "foo"), + v2: TestValue(sqltypes.Enum, "bar"), + out: 1, + }, + } for _, tcase := range tcases { - got, err := NullsafeCompare(tcase.v1, tcase.v2, collation) - if tcase.err != nil { - require.EqualError(t, err, tcase.err.Error()) - continue - } - require.NoError(t, err) - if got != tcase.out { - t.Errorf("NullsafeCompare(%v, %v): %v, want %v", printValue(tcase.v1), printValue(tcase.v2), got, tcase.out) - } + t.Run(fmt.Sprintf("%v/%v", tcase.v1, tcase.v2), func(t *testing.T) { + got, err := NullsafeCompare(tcase.v1, tcase.v2, collation) + if tcase.err != nil { + require.EqualError(t, err, tcase.err.Error()) + return + } + require.NoError(t, err) + if got != tcase.out { + t.Errorf("NullsafeCompare(%v, %v): %v, want %v", printValue(tcase.v1), printValue(tcase.v2), got, tcase.out) + } + }) } } @@ -1214,17 +1234,24 @@ func TestNullsafeCompareCollate(t *testing.T) { }, } for _, tcase := range tcases { - got, err := NullsafeCompare(TestValue(sqltypes.VarChar, tcase.v1), TestValue(sqltypes.VarChar, tcase.v2), tcase.collation) - if !vterrors.Equals(err, tcase.err) { - t.Errorf("NullsafeCompare(%v, %v) error: %v, want %v", tcase.v1, tcase.v2, vterrors.Print(err), vterrors.Print(tcase.err)) - } - if tcase.err != nil { - continue - } + t.Run(fmt.Sprintf("%v/%v", tcase.v1, tcase.v2), func(t *testing.T) { + got, err := NullsafeCompare(TestValue(sqltypes.VarChar, tcase.v1), TestValue(sqltypes.VarChar, tcase.v2), tcase.collation) + if tcase.err == nil { + require.NoError(t, err) + } else { + require.Error(t, err) + } + if !vterrors.Equals(err, tcase.err) { + t.Errorf("NullsafeCompare(%v, %v) error: %v, want %v", tcase.v1, tcase.v2, vterrors.Print(err), vterrors.Print(tcase.err)) + } + if tcase.err != nil { + return + } - if got != tcase.out { - t.Errorf("NullsafeCompare(%v, %v): %v, want %v", tcase.v1, tcase.v2, got, tcase.out) - } + if got != tcase.out { + t.Errorf("NullsafeCompare(%v, %v): %v, want %v", tcase.v1, tcase.v2, got, tcase.out) + } + }) } } diff --git a/go/vt/vtgate/evalengine/api_hash.go b/go/vt/vtgate/evalengine/api_hash.go index 60dce8232d2..209f766840d 100644 --- a/go/vt/vtgate/evalengine/api_hash.go +++ b/go/vt/vtgate/evalengine/api_hash.go @@ -20,6 +20,7 @@ import ( "math" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/collations/colldata" "vitess.io/vitess/go/mysql/decimal" "vitess.io/vitess/go/mysql/fastparse" "vitess.io/vitess/go/sqltypes" @@ -34,7 +35,7 @@ type HashCode = uint64 // NullsafeHashcode returns an int64 hashcode that is guaranteed to be the same // for two values that are considered equal by `NullsafeCompare`. func NullsafeHashcode(v sqltypes.Value, collation collations.ID, coerceType sqltypes.Type) (HashCode, error) { - e, err := valueToEvalCast(v, coerceType) + e, err := valueToEvalCast(v, coerceType, collation) if err != nil { return 0, err } @@ -45,7 +46,7 @@ func NullsafeHashcode(v sqltypes.Value, collation collations.ID, coerceType sqlt h := vthash.New() switch e := e.(type) { case *evalBytes: - if !collation.Valid() { + if collation == collations.Unknown { return 0, UnsupportedCollationHashError } e.col.Collation = collation @@ -93,10 +94,10 @@ func NullsafeHashcode128(hash *vthash.Hasher, v sqltypes.Value, collation collat f = float64(uval) case v.IsFloat() || v.IsDecimal(): f, err = v.ToFloat64() - case v.IsQuoted(): + case v.IsText(), v.IsBinary(): f, _ = fastparse.ParseFloat64(v.RawStr()) default: - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected type %v", v.Type()) + return nullsafeHashcode128Default(hash, v, collation, coerceTo) } if err != nil { return err @@ -107,10 +108,12 @@ func NullsafeHashcode128(hash *vthash.Hasher, v sqltypes.Value, collation collat case sqltypes.IsSigned(coerceTo): var i int64 var err error + var neg bool switch { case v.IsSigned(): i, err = v.ToInt64() + neg = i < 0 case v.IsUnsigned(): var uval uint64 uval, err = v.ToUint64() @@ -122,7 +125,8 @@ func NullsafeHashcode128(hash *vthash.Hasher, v sqltypes.Value, collation collat return ErrHashCoercionIsNotExact } i = int64(fval) - case v.IsQuoted(): + neg = i < 0 + case v.IsText(), v.IsBinary(): i, err = fastparse.ParseInt64(v.RawStr(), 10) if err != nil { fval, _ := fastparse.ParseFloat64(v.RawStr()) @@ -131,13 +135,14 @@ func NullsafeHashcode128(hash *vthash.Hasher, v sqltypes.Value, collation collat } i, err = int64(fval), nil } + neg = i < 0 default: - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected type %v", v.Type()) + return nullsafeHashcode128Default(hash, v, collation, coerceTo) } if err != nil { return err } - if i < 0 { + if neg { hash.Write16(hashPrefixIntegralNegative) } else { hash.Write16(hashPrefixIntegralPositive) @@ -147,11 +152,12 @@ func NullsafeHashcode128(hash *vthash.Hasher, v sqltypes.Value, collation collat case sqltypes.IsUnsigned(coerceTo): var u uint64 var err error - + var neg bool switch { case v.IsSigned(): var ival int64 ival, err = v.ToInt64() + neg = ival < 0 u = uint64(ival) case v.IsUnsigned(): u, err = v.ToUint64() @@ -161,31 +167,37 @@ func NullsafeHashcode128(hash *vthash.Hasher, v sqltypes.Value, collation collat if fval != math.Trunc(fval) || fval < 0 { return ErrHashCoercionIsNotExact } + neg = fval < 0 u = uint64(fval) - case v.IsQuoted(): + case v.IsText(), v.IsBinary(): u, err = fastparse.ParseUint64(v.RawStr(), 10) if err != nil { fval, _ := fastparse.ParseFloat64(v.RawStr()) if fval != math.Trunc(fval) || fval < 0 { return ErrHashCoercionIsNotExact } + neg = fval < 0 u, err = uint64(fval), nil } default: - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected type %v", v.Type()) + return nullsafeHashcode128Default(hash, v, collation, coerceTo) } if err != nil { return err } - hash.Write16(hashPrefixIntegralPositive) + if neg { + hash.Write16(hashPrefixIntegralNegative) + } else { + hash.Write16(hashPrefixIntegralPositive) + } hash.Write64(u) case sqltypes.IsBinary(coerceTo): hash.Write16(hashPrefixBytes) - collations.Binary.Hash(hash, v.Raw(), 0) + colldata.Lookup(collations.CollationBinaryID).Hash(hash, v.Raw(), 0) case sqltypes.IsText(coerceTo): - coll := collation.Get() + coll := colldata.Lookup(collation) if coll == nil { panic("cannot hash unsupported collation") } @@ -211,13 +223,30 @@ func NullsafeHashcode128(hash *vthash.Hasher, v sqltypes.Value, collation collat fval, _ := fastparse.ParseFloat64(v.RawStr()) dec = decimal.NewFromFloat(fval) default: - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "coercion should not try to coerce this value to a decimal: %v", v) + return nullsafeHashcode128Default(hash, v, collation, coerceTo) } hash.Write16(hashPrefixDecimal) dec.Hash(hash) - default: - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected type %v", v.Type()) + return nullsafeHashcode128Default(hash, v, collation, coerceTo) } return nil } + +func nullsafeHashcode128Default(hash *vthash.Hasher, v sqltypes.Value, collation collations.ID, coerceTo sqltypes.Type) error { + // Slow path to handle all other types. This uses the generic + // logic for value casting to ensure we match MySQL here. + e, err := valueToEvalCast(v, coerceTo, collation) + if err != nil { + return err + } + switch e := e.(type) { + case nil: + hash.Write16(hashPrefixNil) + return nil + case hashable: + e.Hash(hash) + return nil + } + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected type %v", coerceTo) +} diff --git a/go/vt/vtgate/evalengine/api_hash_test.go b/go/vt/vtgate/evalengine/api_hash_test.go index 55200eb0619..832a1ed3b88 100644 --- a/go/vt/vtgate/evalengine/api_hash_test.go +++ b/go/vt/vtgate/evalengine/api_hash_test.go @@ -18,13 +18,15 @@ package evalengine import ( "fmt" - "math/rand" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vthash" "vitess.io/vitess/go/mysql/collations" @@ -37,24 +39,33 @@ func TestHashCodes(t *testing.T) { equal bool err error }{ - {sqltypes.NewInt64(-1), sqltypes.NewUint64(^uint64(0)), true, nil}, - {sqltypes.NewUint64(^uint64(0)), sqltypes.NewInt64(-1), true, nil}, {sqltypes.NewFloat64(-1), sqltypes.NewVarChar("-1"), true, nil}, {sqltypes.NewDecimal("-1"), sqltypes.NewVarChar("-1"), true, nil}, + {sqltypes.NewDate("2000-01-01"), sqltypes.NewInt64(20000101), true, nil}, + {sqltypes.NewDatetime("2000-01-01 11:22:33"), sqltypes.NewInt64(20000101112233), true, nil}, + {sqltypes.NewTime("11:22:33"), sqltypes.NewInt64(112233), true, nil}, + {sqltypes.NewInt64(20000101), sqltypes.NewDate("2000-01-01"), true, nil}, + {sqltypes.NewInt64(20000101112233), sqltypes.NewDatetime("2000-01-01 11:22:33"), true, nil}, + {sqltypes.NewInt64(112233), sqltypes.NewTime("11:22:33"), true, nil}, + {sqltypes.MakeTrusted(sqltypes.TypeJSON, []byte(`{"1": "foo", "2": "bar"}`)), sqltypes.MakeTrusted(sqltypes.TypeJSON, []byte(`{"2": "bar", "1": "foo"}`)), true, nil}, + {sqltypes.MakeTrusted(sqltypes.TypeJSON, []byte(`{"1": "foo", "2": "bar"}`)), sqltypes.NewVarChar(`{"2": "bar", "1": "foo"}`), false, nil}, + {sqltypes.NewVarChar(`{"2": "bar", "1": "foo"}`), sqltypes.MakeTrusted(sqltypes.TypeJSON, []byte(`{"1": "foo", "2": "bar"}`)), false, nil}, } for _, tc := range cases { - cmp, err := NullsafeCompare(tc.static, tc.dynamic, collations.CollationUtf8mb4ID) - require.NoError(t, err) - require.Equalf(t, tc.equal, cmp == 0, "got %v %s %v (expected %s)", tc.static, equality(cmp == 0).Operator(), tc.dynamic, equality(tc.equal)) + t.Run(fmt.Sprintf("%v %s %v", tc.static, equality(tc.equal).Operator(), tc.dynamic), func(t *testing.T) { + cmp, err := NullsafeCompare(tc.static, tc.dynamic, collations.CollationUtf8mb4ID) + require.NoError(t, err) + require.Equalf(t, tc.equal, cmp == 0, "got %v %s %v (expected %s)", tc.static, equality(cmp == 0).Operator(), tc.dynamic, equality(tc.equal)) - h1, err := NullsafeHashcode(tc.static, collations.CollationUtf8mb4ID, tc.static.Type()) - require.NoError(t, err) + h1, err := NullsafeHashcode(tc.static, collations.CollationUtf8mb4ID, tc.static.Type()) + require.NoError(t, err) - h2, err := NullsafeHashcode(tc.dynamic, collations.CollationUtf8mb4ID, tc.static.Type()) - require.ErrorIs(t, err, tc.err) + h2, err := NullsafeHashcode(tc.dynamic, collations.CollationUtf8mb4ID, tc.static.Type()) + require.ErrorIs(t, err, tc.err) - assert.Equalf(t, tc.equal, h1 == h2, "HASH(%v) %s HASH(%v) (expected %s)", tc.static, equality(h1 == h2).Operator(), tc.dynamic, equality(tc.equal)) + assert.Equalf(t, tc.equal, h1 == h2, "HASH(%v) %s HASH(%v) (expected %s)", tc.static, equality(h1 == h2).Operator(), tc.dynamic, equality(tc.equal)) + }) } } @@ -63,14 +74,14 @@ func TestHashCodes(t *testing.T) { func TestHashCodesRandom(t *testing.T) { tested := 0 equal := 0 - collation := collations.Local().LookupByName("utf8mb4_general_ci").ID() + collation := collations.Local().LookupByName("utf8mb4_general_ci") endTime := time.Now().Add(1 * time.Second) for time.Now().Before(endTime) { tested++ - v1, v2 := randomValues() + v1, v2 := sqltypes.TestRandomValues() cmp, err := NullsafeCompare(v1, v2, collation) require.NoErrorf(t, err, "%s compared with %s", v1.String(), v2.String()) - typ, err := CoerceTo(v1.Type(), v2.Type()) + typ, err := coerceTo(v1.Type(), v2.Type()) require.NoError(t, err) hash1, err := NullsafeHashcode(v1, collation, typ) @@ -107,32 +118,43 @@ func TestHashCodes128(t *testing.T) { equal bool err error }{ - {sqltypes.NewInt64(-1), sqltypes.NewUint64(^uint64(0)), true, nil}, - {sqltypes.NewUint64(^uint64(0)), sqltypes.NewInt64(-1), true, nil}, + {sqltypes.NewInt64(-1), sqltypes.NewUint64(^uint64(0)), false, nil}, + {sqltypes.NewUint64(^uint64(0)), sqltypes.NewInt64(-1), false, nil}, {sqltypes.NewInt64(-1), sqltypes.NewVarChar("-1"), true, nil}, {sqltypes.NewVarChar("-1"), sqltypes.NewInt64(-1), true, nil}, {sqltypes.NewInt64(23), sqltypes.NewFloat64(23.0), true, nil}, {sqltypes.NewInt64(23), sqltypes.NewFloat64(23.1), false, ErrHashCoercionIsNotExact}, {sqltypes.NewUint64(^uint64(0)), sqltypes.NewFloat64(-1.0), false, ErrHashCoercionIsNotExact}, {sqltypes.NewUint64(42), sqltypes.NewFloat64(42.0), true, nil}, + {sqltypes.NewDate("2000-01-01"), sqltypes.NewInt64(20000101), true, nil}, + {sqltypes.NewDatetime("2000-01-01 11:22:33"), sqltypes.NewInt64(20000101112233), true, nil}, + {sqltypes.NewTime("11:22:33"), sqltypes.NewInt64(112233), true, nil}, + {sqltypes.NewInt64(20000101), sqltypes.NewDate("2000-01-01"), true, nil}, + {sqltypes.NewInt64(20000101112233), sqltypes.NewDatetime("2000-01-01 11:22:33"), true, nil}, + {sqltypes.NewInt64(112233), sqltypes.NewTime("11:22:33"), true, nil}, + {sqltypes.MakeTrusted(sqltypes.TypeJSON, []byte(`{"1": "foo", "2": "bar"}`)), sqltypes.MakeTrusted(sqltypes.TypeJSON, []byte(`{"2": "bar", "1": "foo"}`)), true, nil}, + {sqltypes.MakeTrusted(sqltypes.TypeJSON, []byte(`{"1": "foo", "2": "bar"}`)), sqltypes.NewVarChar(`{"2": "bar", "1": "foo"}`), false, nil}, + {sqltypes.NewVarChar(`{"2": "bar", "1": "foo"}`), sqltypes.MakeTrusted(sqltypes.TypeJSON, []byte(`{"1": "foo", "2": "bar"}`)), false, nil}, } for _, tc := range cases { - cmp, err := NullsafeCompare(tc.static, tc.dynamic, collations.CollationUtf8mb4ID) - require.NoError(t, err) - require.Equalf(t, tc.equal, cmp == 0, "got %v %s %v (expected %s)", tc.static, equality(cmp == 0).Operator(), tc.dynamic, equality(tc.equal)) - - hasher1 := vthash.New() - err = NullsafeHashcode128(&hasher1, tc.static, collations.CollationUtf8mb4ID, tc.static.Type()) - require.NoError(t, err) - - hasher2 := vthash.New() - err = NullsafeHashcode128(&hasher2, tc.dynamic, collations.CollationUtf8mb4ID, tc.static.Type()) - require.ErrorIs(t, err, tc.err) - - h1 := hasher1.Sum128() - h2 := hasher2.Sum128() - assert.Equalf(t, tc.equal, h1 == h2, "HASH(%v) %s HASH(%v) (expected %s)", tc.static, equality(h1 == h2).Operator(), tc.dynamic, equality(tc.equal)) + t.Run(fmt.Sprintf("%v %s %v", tc.static, equality(tc.equal).Operator(), tc.dynamic), func(t *testing.T) { + cmp, err := NullsafeCompare(tc.static, tc.dynamic, collations.CollationUtf8mb4ID) + require.NoError(t, err) + require.Equalf(t, tc.equal, cmp == 0, "got %v %s %v (expected %s)", tc.static, equality(cmp == 0).Operator(), tc.dynamic, equality(tc.equal)) + + hasher1 := vthash.New() + err = NullsafeHashcode128(&hasher1, tc.static, collations.CollationUtf8mb4ID, tc.static.Type()) + require.NoError(t, err) + + hasher2 := vthash.New() + err = NullsafeHashcode128(&hasher2, tc.dynamic, collations.CollationUtf8mb4ID, tc.static.Type()) + require.ErrorIs(t, err, tc.err) + + h1 := hasher1.Sum128() + h2 := hasher2.Sum128() + assert.Equalf(t, tc.equal, h1 == h2, "HASH(%v) %s HASH(%v) (expected %s)", tc.static, equality(h1 == h2).Operator(), tc.dynamic, equality(tc.equal)) + }) } } @@ -141,14 +163,14 @@ func TestHashCodes128(t *testing.T) { func TestHashCodesRandom128(t *testing.T) { tested := 0 equal := 0 - collation := collations.Local().LookupByName("utf8mb4_general_ci").ID() + collation := collations.Local().LookupByName("utf8mb4_general_ci") endTime := time.Now().Add(1 * time.Second) for time.Now().Before(endTime) { tested++ - v1, v2 := randomValues() + v1, v2 := sqltypes.TestRandomValues() cmp, err := NullsafeCompare(v1, v2, collation) require.NoErrorf(t, err, "%s compared with %s", v1.String(), v2.String()) - typ, err := CoerceTo(v1.Type(), v2.Type()) + typ, err := coerceTo(v1.Type(), v2.Type()) require.NoError(t, err) hasher1 := vthash.New() @@ -167,62 +189,47 @@ func TestHashCodesRandom128(t *testing.T) { t.Logf("tested %d values, with %d equalities found\n", tested, equal) } -func randomValues() (sqltypes.Value, sqltypes.Value) { - if rand.Int()%2 == 0 { - // create a single value, and turn it into two different types - v := rand.Int() - return randomNumericType(v), randomNumericType(v) +// coerceTo takes two input types, and decides how they should be coerced before compared +func coerceTo(v1, v2 sqltypes.Type) (sqltypes.Type, error) { + if v1 == v2 { + return v1, nil + } + if sqltypes.IsNull(v1) || sqltypes.IsNull(v2) { + return sqltypes.Null, nil + } + if (sqltypes.IsText(v1) || sqltypes.IsBinary(v1)) && (sqltypes.IsText(v2) || sqltypes.IsBinary(v2)) { + return sqltypes.VarChar, nil + } + if sqltypes.IsDateOrTime(v1) { + return v1, nil + } + if sqltypes.IsDateOrTime(v2) { + return v2, nil } - // just produce two arbitrary random values and compare - return randomValue(), randomValue() -} - -func randomNumericType(i int) sqltypes.Value { - r := rand.Intn(len(numericTypes)) - return numericTypes[r](i) - -} - -var numericTypes = []func(int) sqltypes.Value{ - func(i int) sqltypes.Value { return sqltypes.NULL }, - func(i int) sqltypes.Value { return sqltypes.NewInt8(int8(i)) }, - func(i int) sqltypes.Value { return sqltypes.NewInt32(int32(i)) }, - func(i int) sqltypes.Value { return sqltypes.NewInt64(int64(i)) }, - func(i int) sqltypes.Value { return sqltypes.NewUint64(uint64(i)) }, - func(i int) sqltypes.Value { return sqltypes.NewUint32(uint32(i)) }, - func(i int) sqltypes.Value { return sqltypes.NewFloat64(float64(i)) }, - func(i int) sqltypes.Value { return sqltypes.NewDecimal(fmt.Sprintf("%d", i)) }, - func(i int) sqltypes.Value { return sqltypes.NewVarChar(fmt.Sprintf("%d", i)) }, - func(i int) sqltypes.Value { return sqltypes.NewVarChar(fmt.Sprintf(" %f aa", float64(i))) }, -} - -var randomGenerators = []func() sqltypes.Value{ - randomNull, - randomInt8, - randomInt32, - randomInt64, - randomUint64, - randomUint32, - randomVarChar, - randomComplexVarChar, - randomDecimal, -} - -func randomValue() sqltypes.Value { - r := rand.Intn(len(randomGenerators)) - return randomGenerators[r]() -} - -func randomNull() sqltypes.Value { return sqltypes.NULL } -func randomInt8() sqltypes.Value { return sqltypes.NewInt8(int8(rand.Intn(255))) } -func randomInt32() sqltypes.Value { return sqltypes.NewInt32(rand.Int31()) } -func randomInt64() sqltypes.Value { return sqltypes.NewInt64(rand.Int63()) } -func randomUint32() sqltypes.Value { return sqltypes.NewUint32(rand.Uint32()) } -func randomUint64() sqltypes.Value { return sqltypes.NewUint64(rand.Uint64()) } -func randomDecimal() sqltypes.Value { return sqltypes.NewDecimal(fmt.Sprintf("%d", rand.Int63())) } -func randomVarChar() sqltypes.Value { return sqltypes.NewVarChar(fmt.Sprintf("%d", rand.Int63())) } - -func randomComplexVarChar() sqltypes.Value { - return sqltypes.NewVarChar(fmt.Sprintf(" \t %f apa", float64(rand.Intn(1000))*1.10)) + if sqltypes.IsNumber(v1) || sqltypes.IsNumber(v2) { + switch { + case sqltypes.IsText(v1) || sqltypes.IsBinary(v1) || sqltypes.IsText(v2) || sqltypes.IsBinary(v2): + return sqltypes.Float64, nil + case sqltypes.IsFloat(v2) || v2 == sqltypes.Decimal || sqltypes.IsFloat(v1) || v1 == sqltypes.Decimal: + return sqltypes.Float64, nil + case sqltypes.IsSigned(v1): + switch { + case sqltypes.IsUnsigned(v2): + return sqltypes.Uint64, nil + case sqltypes.IsSigned(v2): + return sqltypes.Int64, nil + default: + return 0, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "types does not support hashcode yet: %v vs %v", v1, v2) + } + case sqltypes.IsUnsigned(v1): + switch { + case sqltypes.IsSigned(v2) || sqltypes.IsUnsigned(v2): + return sqltypes.Uint64, nil + default: + return 0, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "types does not support hashcode yet: %v vs %v", v1, v2) + } + } + } + return 0, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "types does not support hashcode yet: %v vs %v", v1, v2) } diff --git a/go/vt/vtgate/evalengine/api_literal.go b/go/vt/vtgate/evalengine/api_literal.go index 271798a5c7f..1b2ba6e2da2 100644 --- a/go/vt/vtgate/evalengine/api_literal.go +++ b/go/vt/vtgate/evalengine/api_literal.go @@ -17,14 +17,16 @@ limitations under the License. package evalengine import ( - "encoding/hex" + "errors" "math" "math/big" - "strconv" "unicode/utf8" + "vitess.io/vitess/go/hack" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/decimal" + "vitess.io/vitess/go/mysql/fastparse" + "vitess.io/vitess/go/mysql/hex" "vitess.io/vitess/go/sqltypes" ) @@ -38,9 +40,9 @@ func NewLiteralIntegralFromBytes(val []byte) (*Literal, error) { panic("NewLiteralIntegralFromBytes: negative value") } - uval, err := strconv.ParseUint(string(val), 10, 64) + uval, err := fastparse.ParseUint64(hack.String(val), 10) if err != nil { - if numError, ok := err.(*strconv.NumError); ok && numError.Err == strconv.ErrRange { + if errors.Is(err, fastparse.ErrOverflow) { return NewLiteralDecimalFromBytes(val) } return nil, err @@ -72,7 +74,7 @@ func NewLiteralFloat(val float64) *Literal { // NewLiteralFloatFromBytes returns a float literal expression from a slice of bytes func NewLiteralFloatFromBytes(val []byte) (*Literal, error) { - fval, err := strconv.ParseFloat(string(val), 64) + fval, err := fastparse.ParseFloat64(hack.String(val)) if err != nil { return nil, err } @@ -129,8 +131,8 @@ func NewLiteralDatetimeFromBytes(val []byte) (*Literal, error) { } func parseHexLiteral(val []byte) ([]byte, error) { - raw := make([]byte, hex.DecodedLen(len(val))) - if _, err := hex.Decode(raw, val); err != nil { + raw := make([]byte, hex.DecodedLen(val)) + if err := hex.DecodeBytes(raw, val); err != nil { return nil, err } return raw, nil @@ -194,39 +196,27 @@ func NewLiteralBinaryFromBit(val []byte) (*Literal, error) { // NewBindVar returns a bind variable func NewBindVar(key string, typ sqltypes.Type, col collations.ID) *BindVariable { return &BindVariable{ - Key: key, - Type: typ, - Collation: collations.TypedCollation{ - Collation: col, - Coercibility: collations.CoerceCoercible, - Repertoire: collations.RepertoireUnicode, - }, + Key: key, + Type: typ, + Collation: defaultCoercionCollation(col), } } // NewBindVarTuple returns a bind variable containing a tuple func NewBindVarTuple(key string, col collations.ID) *BindVariable { return &BindVariable{ - Key: key, - Type: sqltypes.Tuple, - Collation: collations.TypedCollation{ - Collation: col, - Coercibility: collations.CoerceCoercible, - Repertoire: collations.RepertoireUnicode, - }, + Key: key, + Type: sqltypes.Tuple, + Collation: defaultCoercionCollation(col), } } // NewColumn returns a column expression func NewColumn(offset int, typ sqltypes.Type, col collations.ID) *Column { return &Column{ - Offset: offset, - Type: typ, - Collation: collations.TypedCollation{ - Collation: col, - Coercibility: collations.CoerceImplicit, - Repertoire: collations.RepertoireUnicode, - }, + Offset: offset, + Type: typ, + Collation: defaultCoercionCollation(col), } } diff --git a/go/vt/vtgate/evalengine/api_types.go b/go/vt/vtgate/evalengine/api_types.go deleted file mode 100644 index 734f35b35aa..00000000000 --- a/go/vt/vtgate/evalengine/api_types.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright 2023 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package evalengine - -import ( - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/sqltypes" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" -) - -// CoerceTo takes two input types, and decides how they should be coerced before compared -func CoerceTo(v1, v2 sqltypes.Type) (sqltypes.Type, error) { - if v1 == v2 { - return v1, nil - } - if sqltypes.IsNull(v1) || sqltypes.IsNull(v2) { - return sqltypes.Null, nil - } - if (sqltypes.IsText(v1) || sqltypes.IsBinary(v1)) && (sqltypes.IsText(v2) || sqltypes.IsBinary(v2)) { - return sqltypes.VarChar, nil - } - if sqltypes.IsNumber(v1) || sqltypes.IsNumber(v2) { - switch { - case sqltypes.IsText(v1) || sqltypes.IsBinary(v1) || sqltypes.IsText(v2) || sqltypes.IsBinary(v2): - return sqltypes.Float64, nil - case sqltypes.IsFloat(v2) || v2 == sqltypes.Decimal || sqltypes.IsFloat(v1) || v1 == sqltypes.Decimal: - return sqltypes.Float64, nil - case sqltypes.IsSigned(v1): - switch { - case sqltypes.IsUnsigned(v2): - return sqltypes.Uint64, nil - case sqltypes.IsSigned(v2): - return sqltypes.Int64, nil - default: - return 0, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "types does not support hashcode yet: %v vs %v", v1, v2) - } - case sqltypes.IsUnsigned(v1): - switch { - case sqltypes.IsSigned(v2) || sqltypes.IsUnsigned(v2): - return sqltypes.Uint64, nil - default: - return 0, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "types does not support hashcode yet: %v vs %v", v1, v2) - } - } - } - return 0, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "types does not support hashcode yet: %v vs %v", v1, v2) -} - -// Cast converts a Value to the target type. -func Cast(v sqltypes.Value, typ sqltypes.Type) (sqltypes.Value, error) { - if v.Type() == typ || v.IsNull() { - return v, nil - } - vBytes, err := v.ToBytes() - if err != nil { - return v, err - } - if sqltypes.IsSigned(typ) && v.IsSigned() { - return sqltypes.MakeTrusted(typ, vBytes), nil - } - if sqltypes.IsUnsigned(typ) && v.IsUnsigned() { - return sqltypes.MakeTrusted(typ, vBytes), nil - } - if (sqltypes.IsFloat(typ) || typ == sqltypes.Decimal) && (v.IsIntegral() || v.IsFloat() || v.Type() == sqltypes.Decimal) { - return sqltypes.MakeTrusted(typ, vBytes), nil - } - if sqltypes.IsQuoted(typ) && (v.IsIntegral() || v.IsFloat() || v.Type() == sqltypes.Decimal || v.IsQuoted()) { - return sqltypes.MakeTrusted(typ, vBytes), nil - } - - // Explicitly disallow Expression. - if v.Type() == sqltypes.Expression { - return sqltypes.NULL, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v cannot be cast to %v", v, typ) - } - - // If the above fast-paths were not possible, - // go through full validation. - return sqltypes.NewValue(typ, vBytes) -} - -// ToUint64 converts Value to uint64. -func ToUint64(v sqltypes.Value) (uint64, error) { - num, err := valueToEvalNumeric(v) - if err != nil { - return 0, err - } - switch num := num.(type) { - case *evalInt64: - if num.i < 0 { - return 0, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "negative number cannot be converted to unsigned: %d", num.i) - } - return uint64(num.i), nil - case *evalUint64: - return num.u, nil - default: - return 0, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected return from numeric evaluation (%T)", num) - } -} - -// ToInt64 converts Value to int64. -func ToInt64(v sqltypes.Value) (int64, error) { - num, err := valueToEvalNumeric(v) - if err != nil { - return 0, err - } - switch num := num.(type) { - case *evalInt64: - return num.i, nil - case *evalUint64: - ival := int64(num.u) - if ival < 0 { - return 0, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unsigned number overflows int64 value: %d", num.u) - } - return ival, nil - default: - return 0, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected return from numeric evaluation (%T)", num) - } -} - -// ToFloat64 converts Value to float64. -func ToFloat64(v sqltypes.Value) (float64, error) { - num, err := valueToEval(v, collationNumeric) - if err != nil { - return 0, err - } - f, _ := evalToFloat(num) - return f.f, nil -} - -func LiteralToValue(literal *sqlparser.Literal) (sqltypes.Value, error) { - lit, err := translateLiteral(literal, collations.Default()) - if err != nil { - return sqltypes.Value{}, err - } - return evalToSQLValue(lit.inner), nil -} - -// ToNative converts Value to a native go type. -// Decimal is returned as []byte. -func ToNative(v sqltypes.Value) (any, error) { - var out any - var err error - switch { - case v.Type() == sqltypes.Null: - // no-op - case v.IsSigned(): - return ToInt64(v) - case v.IsUnsigned(): - return ToUint64(v) - case v.IsFloat(): - return ToFloat64(v) - case v.IsQuoted() || v.Type() == sqltypes.Bit || v.Type() == sqltypes.Decimal: - out, err = v.ToBytes() - case v.Type() == sqltypes.Expression: - err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v cannot be converted to a go type", v) - } - return out, err -} diff --git a/go/vt/vtgate/evalengine/arithmetic.go b/go/vt/vtgate/evalengine/arithmetic.go index 5117dabe045..d6ac81b7a58 100644 --- a/go/vt/vtgate/evalengine/arithmetic.go +++ b/go/vt/vtgate/evalengine/arithmetic.go @@ -19,16 +19,13 @@ package evalengine import ( "math" - "golang.org/x/exp/constraints" - "vitess.io/vitess/go/mysql/decimal" - "vitess.io/vitess/go/sqltypes" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" ) -func dataOutOfRangeError[N1, N2 constraints.Integer | constraints.Float](v1 N1, v2 N2, typ, sign string) error { +func dataOutOfRangeError[N1, N2 int | int64 | uint64 | float64](v1 N1, v2 N2, typ, sign string) error { return vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.DataOutOfRange, "%s value is out of range in '(%v %s %v)'", typ, v1, sign, v2) } @@ -329,12 +326,12 @@ func mathAdd_dx(v1 *evalDecimal, v2 evalNumeric) *evalDecimal { } func mathAdd_dd(v1, v2 *evalDecimal) *evalDecimal { - return newEvalDecimalWithPrec(v1.dec.Add(v2.dec), maxprec(v1.length, v2.length)) + return newEvalDecimalWithPrec(v1.dec.Add(v2.dec), max(v1.length, v2.length)) } func mathAdd_dd0(v1, v2 *evalDecimal) { v1.dec = v1.dec.Add(v2.dec) - v1.length = maxprec(v1.length, v2.length) + v1.length = max(v1.length, v2.length) } func mathSub_ii(v1, v2 int64) (*evalInt64, error) { @@ -420,12 +417,12 @@ func mathSub_xd(v1 evalNumeric, v2 *evalDecimal) *evalDecimal { } func mathSub_dd(v1, v2 *evalDecimal) *evalDecimal { - return newEvalDecimalWithPrec(v1.dec.Sub(v2.dec), maxprec(v1.length, v2.length)) + return newEvalDecimalWithPrec(v1.dec.Sub(v2.dec), max(v1.length, v2.length)) } func mathSub_dd0(v1, v2 *evalDecimal) { v1.dec = v1.dec.Sub(v2.dec) - v1.length = maxprec(v1.length, v2.length) + v1.length = max(v1.length, v2.length) } func mathMul_ii(v1, v2 int64) (*evalInt64, error) { @@ -484,13 +481,6 @@ func mathMul_ff(v1, v2 float64) *evalFloat { return newEvalFloat(v1 * v2) } -func maxprec(a, b int32) int32 { - if a > b { - return a - } - return b -} - func mathMul_dx(v1 *evalDecimal, v2 evalNumeric) *evalDecimal { return mathMul_dd(v1, v2.toDecimal(0, 0)) } diff --git a/go/vt/vtgate/evalengine/cached_size.go b/go/vt/vtgate/evalengine/cached_size.go index bfd2d9db9b4..69c39249fb9 100644 --- a/go/vt/vtgate/evalengine/cached_size.go +++ b/go/vt/vtgate/evalengine/cached_size.go @@ -279,7 +279,7 @@ func (cached *LikeExpr) CachedSize(alloc bool) int64 { } // field BinaryExpr vitess.io/vitess/go/vt/vtgate/evalengine.BinaryExpr size += cached.BinaryExpr.CachedSize(false) - // field Match vitess.io/vitess/go/mysql/collations.WildcardPattern + // field Match vitess.io/vitess/go/mysql/collations/colldata.WildcardPattern if cc, ok := cached.Match.(cachedObject); ok { size += cc.CachedSize(true) } @@ -669,6 +669,18 @@ func (cached *builtinDateFormat) CachedSize(alloc bool) int64 { size += cached.CallExpr.CachedSize(false) return size } +func (cached *builtinDateMath) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} func (cached *builtinDayOfMonth) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -1257,6 +1269,54 @@ func (cached *builtinRandomBytes) CachedSize(alloc bool) int64 { size += cached.CallExpr.CachedSize(false) return size } +func (cached *builtinRegexpInstr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinRegexpLike) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinRegexpReplace) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinRegexpSubstr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} func (cached *builtinRepeat) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -1565,8 +1625,8 @@ func (cached *builtinWeightString) CachedSize(alloc bool) int64 { if alloc { size += int64(48) } - // field String vitess.io/vitess/go/vt/vtgate/evalengine.Expr - if cc, ok := cached.String.(cachedObject); ok { + // field Expr vitess.io/vitess/go/vt/vtgate/evalengine.Expr + if cc, ok := cached.Expr.(cachedObject); ok { size += cc.CachedSize(true) } // field Cast string diff --git a/go/vt/vtgate/evalengine/collation.go b/go/vt/vtgate/evalengine/collation.go new file mode 100644 index 00000000000..9d53a9d8ea9 --- /dev/null +++ b/go/vt/vtgate/evalengine/collation.go @@ -0,0 +1,27 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package evalengine + +import "vitess.io/vitess/go/mysql/collations" + +func defaultCoercionCollation(id collations.ID) collations.TypedCollation { + return collations.TypedCollation{ + Collation: id, + Coercibility: collations.CoerceCoercible, + Repertoire: collations.RepertoireUnicode, + } +} diff --git a/go/vt/vtgate/evalengine/compare.go b/go/vt/vtgate/evalengine/compare.go index f2262cf8730..aa452c61729 100644 --- a/go/vt/vtgate/evalengine/compare.go +++ b/go/vt/vtgate/evalengine/compare.go @@ -19,6 +19,7 @@ package evalengine import ( "bytes" + "vitess.io/vitess/go/mysql/collations/colldata" "vitess.io/vitess/go/mysql/decimal" "vitess.io/vitess/go/mysql/json" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" @@ -137,9 +138,9 @@ func compareStrings(l, r eval) (int, error) { if err != nil { return 0, err } - collation := col.Get() + collation := colldata.Lookup(col.Collation) if collation == nil { - panic("unknown collation after coercion") + return 0, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "cannot compare strings, collation is unknown or unsupported (collation ID: %d)", col.Collation) } return collation.Collate(l.ToRawBytes(), r.ToRawBytes(), false), nil } @@ -180,7 +181,7 @@ func compareJSONValue(lj, rj *json.Value) (int, error) { } return ld.Cmp(rd), nil case json.TypeString: - return collationJSON.Collation.Get().Collate(lj.ToRawBytes(), rj.ToRawBytes(), false), nil + return colldata.Lookup(collationJSON.Collation).Collate(lj.ToRawBytes(), rj.ToRawBytes(), false), nil case json.TypeBlob, json.TypeBit, json.TypeOpaque: return bytes.Compare(lj.ToUnencodedBytes(), rj.ToUnencodedBytes()), nil case json.TypeBoolean: diff --git a/go/vt/vtgate/evalengine/compiler.go b/go/vt/vtgate/evalengine/compiler.go index 58fb02dae9f..23f7a9f10aa 100644 --- a/go/vt/vtgate/evalengine/compiler.go +++ b/go/vt/vtgate/evalengine/compiler.go @@ -19,6 +19,7 @@ package evalengine import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/charset" + "vitess.io/vitess/go/mysql/collations/colldata" "vitess.io/vitess/go/mysql/json" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/proto/vtrpc" @@ -38,9 +39,9 @@ type CompilerLog interface { } type compiledCoercion struct { - col collations.Collation - left collations.Coercion - right collations.Coercion + col colldata.Collation + left colldata.Coercion + right colldata.Coercion } type ctype struct { @@ -349,7 +350,7 @@ func (c *compiler) compareAsStrings(lt ctype, rt ctype) error { return err } if coerceLeft == nil && coerceRight == nil { - c.asm.CmpString_collate(merged.Collation.Get()) + c.asm.CmpString_collate(colldata.Lookup(merged.Collation)) } else { if coerceLeft == nil { coerceLeft = func(dst, in []byte) ([]byte, error) { return in, nil } @@ -358,7 +359,7 @@ func (c *compiler) compareAsStrings(lt ctype, rt ctype) error { coerceRight = func(dst, in []byte) ([]byte, error) { return in, nil } } c.asm.CmpString_coerce(&compiledCoercion{ - col: merged.Collation.Get(), + col: colldata.Lookup(merged.Collation), left: coerceLeft, right: coerceRight, }) @@ -367,7 +368,7 @@ func (c *compiler) compareAsStrings(lt ctype, rt ctype) error { } func isEncodingJSONSafe(col collations.ID) bool { - switch col.Get().Charset().(type) { + switch colldata.Lookup(col).Charset().(type) { case charset.Charset_utf8mb4, charset.Charset_utf8mb3, charset.Charset_binary: return true default: diff --git a/go/vt/vtgate/evalengine/compiler_asm.go b/go/vt/vtgate/evalengine/compiler_asm.go index fe3927792a4..2a881760fb2 100644 --- a/go/vt/vtgate/evalengine/compiler_asm.go +++ b/go/vt/vtgate/evalengine/compiler_asm.go @@ -35,6 +35,7 @@ import ( "github.com/google/uuid" + "vitess.io/vitess/go/mysql/collations/colldata" "vitess.io/vitess/go/mysql/hex" "vitess.io/vitess/go/hack" @@ -43,8 +44,9 @@ import ( "vitess.io/vitess/go/mysql/datetime" "vitess.io/vitess/go/mysql/decimal" "vitess.io/vitess/go/mysql/fastparse" + "vitess.io/vitess/go/mysql/icuregex" "vitess.io/vitess/go/mysql/json" - "vitess.io/vitess/go/slices2" + "vitess.io/vitess/go/slice" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/proto/vtrpc" @@ -692,7 +694,7 @@ func (asm *assembler) CmpString_coerce(coercion *compiledCoercion) { }, "CMP VARCHAR(SP-2), VARCHAR(SP-1) COERCE AND COLLATE '%s'", coercion.col.Name()) } -func (asm *assembler) CmpString_collate(collation collations.Collation) { +func (asm *assembler) CmpString_collate(collation colldata.Collation) { asm.adjustStack(-2) asm.emit(func(env *ExpressionEnv) int { @@ -732,10 +734,10 @@ func (asm *assembler) CmpTupleNullsafe() { l := env.vm.stack[env.vm.sp-2].(*evalTuple) r := env.vm.stack[env.vm.sp-1].(*evalTuple) - var equals bool + var equals int equals, env.vm.err = evalCompareTuplesNullSafe(l.t, r.t) - env.vm.stack[env.vm.sp-2] = env.vm.arena.newEvalBool(equals) + env.vm.stack[env.vm.sp-2] = env.vm.arena.newEvalBool(equals == 0) env.vm.sp -= 1 return 1 }, "CMP NULLSAFE TUPLE(SP-2), TUPLE(SP-1)") @@ -2024,7 +2026,7 @@ func (asm *assembler) Fn_CONV_uc(t sqltypes.Type, col collations.TypedCollation) func (asm *assembler) Fn_COLLATION(col collations.TypedCollation) { asm.emit(func(env *ExpressionEnv) int { v := evalCollation(env.vm.stack[env.vm.sp-1]) - env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalText([]byte(v.Collation.Get().Name()), col) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalText([]byte(collations.Local().LookupName(v.Collation)), col) return 1 }, "FN COLLATION (SP-1)") } @@ -2040,6 +2042,7 @@ func (asm *assembler) Fn_FROM_BASE64(t sqltypes.Type) { } str.tt = int16(t) str.bytes = decoded + str.col = collationBinary return 1 }, "FN FROM_BASE64 VARCHAR(SP-1)") } @@ -2100,8 +2103,8 @@ func (asm *assembler) Fn_UNHEX_b(tt sqltypes.Type) { arg := env.vm.stack[env.vm.sp-1].(*evalBytes) decoded := make([]byte, hex.DecodedLen(arg.bytes)) - ok := hex.DecodeBytes(decoded, arg.bytes) - if !ok { + err := hex.DecodeBytes(decoded, arg.bytes) + if err != nil { env.vm.stack[env.vm.sp-1] = nil return 1 } @@ -2170,7 +2173,7 @@ func (asm *assembler) Fn_JSON_CONTAINS_PATH(match jsonMatch, paths []*json.Path) } func (asm *assembler) Fn_JSON_EXTRACT0(jp []*json.Path) { - multi := len(jp) > 1 || slices2.Any(jp, func(path *json.Path) bool { return path.ContainsWildcards() }) + multi := len(jp) > 1 || slice.Any(jp, func(path *json.Path) bool { return path.ContainsWildcards() }) if multi { asm.emit(func(env *ExpressionEnv) int { @@ -2292,7 +2295,7 @@ func (asm *assembler) Fn_CHAR_LENGTH() { if sqltypes.IsBinary(arg.SQLType()) { env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(int64(len(arg.bytes))) } else { - coll := arg.col.Collation.Get() + coll := colldata.Lookup(arg.col.Collation) count := charset.Length(coll.Charset(), arg.bytes) env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(int64(count)) } @@ -2321,8 +2324,8 @@ func (asm *assembler) Fn_LUCASE(upcase bool) { asm.emit(func(env *ExpressionEnv) int { str := env.vm.stack[env.vm.sp-1].(*evalBytes) - coll := str.col.Collation.Get() - csa, ok := coll.(collations.CaseAwareCollation) + coll := colldata.Lookup(str.col.Collation) + csa, ok := coll.(colldata.CaseAwareCollation) if !ok { env.vm.err = vterrors.Errorf(vtrpc.Code_UNIMPLEMENTED, "not implemented") } else { @@ -2335,8 +2338,8 @@ func (asm *assembler) Fn_LUCASE(upcase bool) { asm.emit(func(env *ExpressionEnv) int { str := env.vm.stack[env.vm.sp-1].(*evalBytes) - coll := str.col.Collation.Get() - csa, ok := coll.(collations.CaseAwareCollation) + coll := colldata.Lookup(str.col.Collation) + csa, ok := coll.(colldata.CaseAwareCollation) if !ok { env.vm.err = vterrors.Errorf(vtrpc.Code_UNIMPLEMENTED, "not implemented") } else { @@ -2366,7 +2369,7 @@ func (asm *assembler) Fn_MULTICMP_b(args int, lessThan bool) { } func (asm *assembler) Fn_MULTICMP_c(args int, lessThan bool, tc collations.TypedCollation) { - col := tc.Collation.Get() + col := colldata.Lookup(tc.Collation) asm.adjustStack(-(args - 1)) asm.emit(func(env *ExpressionEnv) int { @@ -2495,7 +2498,7 @@ func (asm *assembler) Fn_LEFT(col collations.TypedCollation) { return 1 } - cs := col.Collation.Get().Charset() + cs := colldata.Lookup(col.Collation).Charset() strLen := charset.Length(cs, str.bytes) str.tt = int16(sqltypes.VarChar) @@ -2526,7 +2529,7 @@ func (asm *assembler) Fn_RIGHT(col collations.TypedCollation) { return 1 } - cs := col.Collation.Get().Charset() + cs := colldata.Lookup(col.Collation).Charset() strLen := charset.Length(cs, str.bytes) str.tt = int16(sqltypes.VarChar) @@ -2563,7 +2566,7 @@ func (asm *assembler) Fn_LPAD(col collations.TypedCollation) { return 1 } - cs := col.Collation.Get().Charset() + cs := colldata.Lookup(col.Collation).Charset() strLen := charset.Length(cs, str.bytes) l := int(length.i) @@ -2617,7 +2620,7 @@ func (asm *assembler) Fn_RPAD(col collations.TypedCollation) { return 1 } - cs := col.Collation.Get().Charset() + cs := colldata.Lookup(col.Collation).Charset() strLen := charset.Length(cs, str.bytes) l := int(length.i) @@ -2730,22 +2733,17 @@ func (asm *assembler) Fn_TO_BASE64(t sqltypes.Type, col collations.TypedCollatio }, "FN TO_BASE64 VARCHAR(SP-1)") } -func (asm *assembler) Fn_WEIGHT_STRING_b(length int) { - asm.emit(func(env *ExpressionEnv) int { - str := env.vm.stack[env.vm.sp-1].(*evalBytes) - w := collations.Binary.WeightString(make([]byte, 0, length), str.bytes, collations.PadToMax) - env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalBinary(w) - return 1 - }, "FN WEIGHT_STRING VARBINARY(SP-1)") -} - -func (asm *assembler) Fn_WEIGHT_STRING_c(col collations.Collation, length int) { +func (asm *assembler) Fn_WEIGHT_STRING(typ sqltypes.Type, length int) { asm.emit(func(env *ExpressionEnv) int { - str := env.vm.stack[env.vm.sp-1].(*evalBytes) - w := col.WeightString(nil, str.bytes, length) - env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalBinary(w) + input := env.vm.stack[env.vm.sp-1] + w, _, err := evalWeightString(nil, input, length, 0) + if err != nil { + env.vm.err = err + return 1 + } + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalRaw(w, typ, collationBinary) return 1 - }, "FN WEIGHT_STRING VARCHAR(SP-1)") + }, "FN WEIGHT_STRING (SP-1)") } func (asm *assembler) In_table(not bool, table map[vthash.Hash]struct{}) { @@ -2973,7 +2971,7 @@ func (asm *assembler) Like_coerce(expr *LikeExpr, coercion *compiledCoercion) { }, "LIKE VARCHAR(SP-2), VARCHAR(SP-1) COERCE AND COLLATE '%s'", coercion.col.Name()) } -func (asm *assembler) Like_collate(expr *LikeExpr, collation collations.Collation) { +func (asm *assembler) Like_collate(expr *LikeExpr, collation colldata.Collation) { asm.adjustStack(-1) asm.emit(func(env *ExpressionEnv) int { @@ -3346,7 +3344,7 @@ func (asm *assembler) Fn_Sysdate(prec uint8) { if tz := env.currentTimezone(); tz != nil { now = now.In(tz) } - val.bytes = datetime.FromStdTime(now).Format(prec) + val.bytes = datetime.NewDateTimeFromStd(now).Format(prec) val.col = collationBinary env.vm.stack[env.vm.sp] = val env.vm.sp++ @@ -3597,7 +3595,7 @@ func (asm *assembler) Fn_FROM_UNIXTIME_i() { if tz := env.currentTimezone(); tz != nil { t = t.In(tz) } - env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalDateTime(datetime.FromStdTime(t), 0) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalDateTime(datetime.NewDateTimeFromStd(t), 0) return 1 }, "FN FROM_UNIXTIME INT64(SP-1)") } @@ -3613,7 +3611,7 @@ func (asm *assembler) Fn_FROM_UNIXTIME_u() { if tz := env.currentTimezone(); tz != nil { t = t.In(tz) } - env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalDateTime(datetime.FromStdTime(t), 0) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalDateTime(datetime.NewDateTimeFromStd(t), 0) return 1 }, "FN FROM_UNIXTIME UINT64(SP-1)") } @@ -3637,7 +3635,7 @@ func (asm *assembler) Fn_FROM_UNIXTIME_d() { if tz := env.currentTimezone(); tz != nil { t = t.In(tz) } - env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalDateTime(datetime.FromStdTime(t), int(arg.length)) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalDateTime(datetime.NewDateTimeFromStd(t), int(arg.length)) return 1 }, "FN FROM_UNIXTIME DECIMAL(SP-1)") } @@ -3654,7 +3652,7 @@ func (asm *assembler) Fn_FROM_UNIXTIME_f() { if tz := env.currentTimezone(); tz != nil { t = t.In(tz) } - env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalDateTime(datetime.FromStdTime(t), 6) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalDateTime(datetime.NewDateTimeFromStd(t), 6) return 1 }, "FN FROM_UNIXTIME FLOAT(SP-1)") } @@ -3680,7 +3678,7 @@ func (asm *assembler) Fn_MAKEDATE() { if t.IsZero() { env.vm.stack[env.vm.sp-2] = nil } else { - env.vm.stack[env.vm.sp-2] = env.vm.arena.newEvalDate(datetime.FromStdTime(t).Date) + env.vm.stack[env.vm.sp-2] = env.vm.arena.newEvalDate(datetime.NewDateTimeFromStd(t).Date) } env.vm.sp-- return 1 @@ -3948,10 +3946,6 @@ func (asm *assembler) Fn_YEARWEEK() { }, "FN YEARWEEK DATE(SP-1)") } -func intervalStackOffset(l, i int) int { - return l - i + 1 -} - func (asm *assembler) Interval_i(l int) { asm.adjustStack(-l) asm.emit(func(env *ExpressionEnv) int { @@ -4247,6 +4241,491 @@ func (asm *assembler) Fn_UUID_TO_BIN1() { }, "FN UUID_TO_BIN VARBINARY(SP-2) INT64(SP-1)") } +func (asm *assembler) Fn_DATEADD_D(unit datetime.IntervalType, sub bool) { + asm.adjustStack(-1) + asm.emit(func(env *ExpressionEnv) int { + interval := evalToInterval(env.vm.stack[env.vm.sp-1], unit, sub) + if interval == nil { + env.vm.stack[env.vm.sp-2] = nil + env.vm.sp-- + return 1 + } + + tmp := env.vm.stack[env.vm.sp-2].(*evalTemporal) + env.vm.stack[env.vm.sp-2] = tmp.addInterval(interval, collations.TypedCollation{}) + env.vm.sp-- + return 1 + }, "FN DATEADD TEMPORAL(SP-2), INTERVAL(SP-1)") +} + +func (asm *assembler) Fn_DATEADD_s(unit datetime.IntervalType, sub bool, col collations.TypedCollation) { + asm.adjustStack(-1) + asm.emit(func(env *ExpressionEnv) int { + var interval *datetime.Interval + var tmp *evalTemporal + + interval = evalToInterval(env.vm.stack[env.vm.sp-1], unit, sub) + if interval == nil { + goto baddate + } + + tmp = evalToTemporal(env.vm.stack[env.vm.sp-2]) + if tmp == nil { + goto baddate + } + + env.vm.stack[env.vm.sp-2] = tmp.addInterval(interval, col) + env.vm.sp-- + return 1 + + baddate: + env.vm.stack[env.vm.sp-2] = nil + env.vm.sp-- + return 1 + }, "FN DATEADD TEMPORAL(SP-2), INTERVAL(SP-1)") + +} + +func (asm *assembler) Fn_REGEXP_LIKE(m *icuregex.Matcher, negate bool, c charset.Charset, offset int) { + asm.adjustStack(-offset) + asm.emit(func(env *ExpressionEnv) int { + input := env.vm.stack[env.vm.sp-offset-1].(*evalBytes) + m.Reset(charset.Expand(nil, input.bytes, c)) + + ok, err := m.Find() + if err != nil { + env.vm.err = err + env.vm.sp -= offset + return 1 + } + if negate { + ok = !ok + } + env.vm.stack[env.vm.sp-offset-1] = env.vm.arena.newEvalBool(ok) + env.vm.sp -= offset + return 1 + }, "FN REGEXP_LIKE VARCHAR(SP-2), VARCHAR(SP-1)") +} + +func (asm *assembler) Fn_REGEXP_LIKE_slow(negate bool, c colldata.Charset, flags icuregex.RegexpFlag, offset int) { + asm.adjustStack(-offset) + asm.emit(func(env *ExpressionEnv) int { + var err error + input := env.vm.stack[env.vm.sp-offset-1].(*evalBytes) + pattern := env.vm.stack[env.vm.sp-offset].(*evalBytes) + + if offset > 1 { + fe := env.vm.stack[env.vm.sp-offset+1] + flags, err = regexpFlags(fe, flags, "regexp_like") + if err != nil { + env.vm.err = err + env.vm.sp -= offset + return 1 + } + } + + p, err := compileRegex(pattern, c, flags) + if err != nil { + env.vm.err = err + env.vm.sp -= offset + return 1 + } + + m := icuregex.NewMatcher(p) + m.Reset(charset.Expand(nil, input.bytes, c)) + + ok, err := m.Find() + if err != nil { + env.vm.err = err + env.vm.sp-- + return 1 + } + if negate { + ok = !ok + } + env.vm.stack[env.vm.sp-offset-1] = env.vm.arena.newEvalBool(ok) + env.vm.sp -= offset + return 1 + }, "FN REGEXP_LIKE_SLOW VARCHAR(SP-2), VARCHAR(SP-1)") +} + +func (asm *assembler) Fn_REGEXP_INSTR(m *icuregex.Matcher, c charset.Charset, offset int) { + asm.adjustStack(-offset) + asm.emit(func(env *ExpressionEnv) int { + input := env.vm.stack[env.vm.sp-offset-1].(*evalBytes) + runes := charset.Expand(nil, input.bytes, c) + + if len(runes) == 0 { + env.vm.stack[env.vm.sp-offset-1] = env.vm.arena.newEvalInt64(0) + env.vm.sp -= offset + return 1 + } + + pos := int64(1) + if offset > 1 { + pos, env.vm.err = positionInstr(env.vm.stack[env.vm.sp-offset+1].(*evalInt64), int64(len(runes))) + if env.vm.err != nil { + env.vm.sp -= offset + return 1 + } + } + + occ := int64(1) + if offset > 2 { + occ = occurrence(env.vm.stack[env.vm.sp-offset+2].(*evalInt64), occ) + } + + returnOpt := int64(0) + if offset > 3 { + returnOpt, env.vm.err = returnOption(env.vm.stack[env.vm.sp-offset+3].(*evalInt64), "regexp_instr") + if env.vm.err != nil { + env.vm.sp -= offset + return 1 + } + } + + m.Reset(runes[pos-1:]) + + found := false + for i := int64(0); i < occ; i++ { + found, env.vm.err = m.Find() + if env.vm.err != nil { + env.vm.sp -= offset + return 1 + } + if !found { + break + } + } + if !found { + env.vm.stack[env.vm.sp-offset-1] = env.vm.arena.newEvalInt64(0) + } else if returnOpt == 0 { + env.vm.stack[env.vm.sp-offset-1] = env.vm.arena.newEvalInt64(int64(m.Start()) + pos) + } else { + env.vm.stack[env.vm.sp-offset-1] = env.vm.arena.newEvalInt64(int64(m.End()) + pos) + } + env.vm.sp -= offset + return 1 + }, "FN REGEXP_INSTR VARCHAR(SP-2), VARCHAR(SP-1)") +} + +func (asm *assembler) Fn_REGEXP_INSTR_slow(c colldata.Charset, flags icuregex.RegexpFlag, offset int) { + asm.adjustStack(-offset) + asm.emit(func(env *ExpressionEnv) int { + input := env.vm.stack[env.vm.sp-offset-1].(*evalBytes) + pattern := env.vm.stack[env.vm.sp-offset].(*evalBytes) + + if offset > 4 { + fe := env.vm.stack[env.vm.sp-offset+4] + flags, env.vm.err = regexpFlags(fe, flags, "regexp_instr") + if env.vm.err != nil { + env.vm.sp -= offset + return 1 + } + } + + p, err := compileRegex(pattern, c, flags) + if err != nil { + env.vm.err = err + env.vm.sp -= offset + return 1 + } + + runes := charset.Expand(nil, input.bytes, c) + if len(runes) == 0 { + env.vm.stack[env.vm.sp-offset-1] = env.vm.arena.newEvalInt64(0) + env.vm.sp -= offset + return 1 + } + + pos := int64(1) + if offset > 1 { + pos, env.vm.err = positionInstr(env.vm.stack[env.vm.sp-offset+1].(*evalInt64), int64(len(runes))) + if env.vm.err != nil { + env.vm.sp -= offset + return 1 + } + } + + occ := int64(1) + if offset > 2 { + occ = occurrence(env.vm.stack[env.vm.sp-offset+2].(*evalInt64), occ) + } + + returnOpt := int64(0) + if offset > 3 { + returnOpt, env.vm.err = returnOption(env.vm.stack[env.vm.sp-offset+3].(*evalInt64), "regexp_instr") + if env.vm.err != nil { + env.vm.sp -= offset + return 1 + } + } + + m := icuregex.NewMatcher(p) + m.Reset(runes[pos-1:]) + + found := false + for i := int64(0); i < occ; i++ { + found, env.vm.err = m.Find() + if env.vm.err != nil { + env.vm.sp -= offset + return 1 + } + if !found { + break + } + } + if !found { + env.vm.stack[env.vm.sp-offset-1] = env.vm.arena.newEvalInt64(0) + } else if returnOpt == 0 { + env.vm.stack[env.vm.sp-offset-1] = env.vm.arena.newEvalInt64(int64(m.Start()) + pos) + } else { + env.vm.stack[env.vm.sp-offset-1] = env.vm.arena.newEvalInt64(int64(m.End()) + pos) + } + env.vm.sp -= offset + return 1 + }, "FN REGEXP_INSTR_SLOW VARCHAR(SP-2), VARCHAR(SP-1)") +} + +func (asm *assembler) Fn_REGEXP_SUBSTR(m *icuregex.Matcher, merged collations.TypedCollation, offset int) { + asm.adjustStack(-offset) + asm.emit(func(env *ExpressionEnv) int { + input := env.vm.stack[env.vm.sp-offset-1].(*evalBytes) + c := colldata.Lookup(merged.Collation).Charset() + runes := charset.Expand(nil, input.bytes, c) + + pos := int64(1) + if offset > 1 { + limit := int64(len(runes)) + pos, env.vm.err = position(env.vm.stack[env.vm.sp-offset+1].(*evalInt64), limit, "regexp_substr") + if env.vm.err != nil { + env.vm.sp -= offset + return 1 + } + if pos-1 == limit { + env.vm.stack[env.vm.sp-offset-1] = nil + env.vm.sp -= offset + return 1 + } + } + + occ := int64(1) + if offset > 2 { + occ = occurrence(env.vm.stack[env.vm.sp-offset+2].(*evalInt64), occ) + } + + m.Reset(runes[pos-1:]) + + found := false + for i := int64(0); i < occ; i++ { + found, env.vm.err = m.Find() + if env.vm.err != nil { + env.vm.sp -= offset + return 1 + } + if !found { + break + } + } + + if !found { + env.vm.stack[env.vm.sp-offset-1] = nil + } else { + out := runes[int64(m.Start())+pos-1 : int64(m.End())+pos-1] + b := charset.Collapse(nil, out, c) + env.vm.stack[env.vm.sp-offset-1] = env.vm.arena.newEvalText(b, resultCollation(merged)) + } + env.vm.sp -= offset + return 1 + }, "FN REGEXP_SUBSTR VARCHAR(SP-2), VARCHAR(SP-1)") +} + +func (asm *assembler) Fn_REGEXP_SUBSTR_slow(merged collations.TypedCollation, flags icuregex.RegexpFlag, offset int) { + asm.adjustStack(-offset) + asm.emit(func(env *ExpressionEnv) int { + input := env.vm.stack[env.vm.sp-offset-1].(*evalBytes) + pattern := env.vm.stack[env.vm.sp-offset].(*evalBytes) + c := colldata.Lookup(merged.Collation).Charset() + runes := charset.Expand(nil, input.bytes, c) + + pos := int64(1) + if offset > 1 { + limit := int64(len(runes)) + pos, env.vm.err = position(env.vm.stack[env.vm.sp-offset+1].(*evalInt64), limit, "regexp_substr") + if env.vm.err != nil { + env.vm.sp -= offset + return 1 + } + if pos-1 == limit { + env.vm.stack[env.vm.sp-offset-1] = nil + env.vm.sp -= offset + return 1 + } + } + + occ := int64(1) + if offset > 2 { + occ = occurrence(env.vm.stack[env.vm.sp-offset+2].(*evalInt64), occ) + } + + if offset > 3 { + fe := env.vm.stack[env.vm.sp-offset+3] + flags, env.vm.err = regexpFlags(fe, flags, "regexp_substr") + if env.vm.err != nil { + env.vm.sp -= offset + return 1 + } + } + + p, err := compileRegex(pattern, c, flags) + if err != nil { + env.vm.err = err + env.vm.sp -= offset + return 1 + } + + m := icuregex.NewMatcher(p) + m.Reset(runes[pos-1:]) + + found := false + for i := int64(0); i < occ; i++ { + found, env.vm.err = m.Find() + if env.vm.err != nil { + env.vm.sp -= offset + return 1 + } + if !found { + break + } + } + + if !found { + env.vm.stack[env.vm.sp-offset-1] = nil + } else { + out := runes[int64(m.Start())+pos-1 : int64(m.End())+pos-1] + b := charset.Collapse(nil, out, c) + env.vm.stack[env.vm.sp-offset-1] = env.vm.arena.newEvalText(b, resultCollation(merged)) + } + env.vm.sp -= offset + return 1 + }, "FN REGEXP_SUBSTR_SLOW VARCHAR(SP-2), VARCHAR(SP-1)") +} + +func (asm *assembler) Fn_REGEXP_REPLACE(m *icuregex.Matcher, merged collations.TypedCollation, offset int) { + asm.adjustStack(-offset) + asm.emit(func(env *ExpressionEnv) int { + input := env.vm.stack[env.vm.sp-offset-1].(*evalBytes) + repl := env.vm.stack[env.vm.sp-offset+1].(*evalBytes) + + c := colldata.Lookup(merged.Collation).Charset() + inputRunes := charset.Expand(nil, input.bytes, c) + replRunes := charset.Expand(nil, repl.bytes, c) + + pos := int64(1) + if offset > 2 { + limit := int64(len(inputRunes)) + pos, env.vm.err = position(env.vm.stack[env.vm.sp-offset+2].(*evalInt64), limit, "regexp_replace") + if env.vm.err != nil { + env.vm.sp -= offset + return 1 + } + if pos-1 == limit { + env.vm.stack[env.vm.sp-offset-1] = env.vm.arena.newEvalRaw(input.bytes, sqltypes.Text, resultCollation(merged)) + env.vm.sp -= offset + return 1 + } + } + + occ := int64(0) + if offset > 3 { + occ = occurrence(env.vm.stack[env.vm.sp-offset+3].(*evalInt64), occ) + } + + m.Reset(inputRunes[pos-1:]) + + cs := colldata.Lookup(merged.Collation).Charset() + b, replaced, err := regexpReplace(m, inputRunes, replRunes, pos, occ, cs) + if err != nil { + env.vm.err = err + env.vm.sp -= offset + return 1 + } + if !replaced { + env.vm.stack[env.vm.sp-offset-1] = env.vm.arena.newEvalRaw(input.bytes, sqltypes.Text, resultCollation(merged)) + } else { + env.vm.stack[env.vm.sp-offset-1] = env.vm.arena.newEvalRaw(b, sqltypes.Text, resultCollation(merged)) + } + env.vm.sp -= offset + return 1 + }, "FN REGEXP_REPLACE VARCHAR(SP-2), VARCHAR(SP-1)") +} + +func (asm *assembler) Fn_REGEXP_REPLACE_slow(merged collations.TypedCollation, flags icuregex.RegexpFlag, offset int) { + asm.adjustStack(-offset) + asm.emit(func(env *ExpressionEnv) int { + input := env.vm.stack[env.vm.sp-offset-1].(*evalBytes) + pattern := env.vm.stack[env.vm.sp-offset].(*evalBytes) + repl := env.vm.stack[env.vm.sp-offset+1].(*evalBytes) + + c := colldata.Lookup(merged.Collation).Charset() + inputRunes := charset.Expand(nil, input.bytes, c) + replRunes := charset.Expand(nil, repl.bytes, c) + + pos := int64(1) + if offset > 2 { + limit := int64(len(inputRunes)) + pos, env.vm.err = position(env.vm.stack[env.vm.sp-offset+2].(*evalInt64), limit, "regexp_replace") + if env.vm.err != nil { + env.vm.sp -= offset + return 1 + } + if pos-1 == limit { + env.vm.stack[env.vm.sp-offset-1] = env.vm.arena.newEvalRaw(input.bytes, sqltypes.Text, resultCollation(merged)) + env.vm.sp -= offset + return 1 + } + } + + occ := int64(0) + if offset > 3 { + occ = occurrence(env.vm.stack[env.vm.sp-offset+3].(*evalInt64), 0) + } + + if offset > 4 { + fe := env.vm.stack[env.vm.sp-offset+4] + flags, env.vm.err = regexpFlags(fe, flags, "regexp_replace") + if env.vm.err != nil { + env.vm.sp -= offset + return 1 + } + } + + p, err := compileRegex(pattern, c, flags) + if err != nil { + env.vm.err = err + env.vm.sp -= offset + return 1 + } + + m := icuregex.NewMatcher(p) + m.Reset(inputRunes[pos-1:]) + + b, replaced, err := regexpReplace(m, inputRunes, replRunes, pos, occ, colldata.Lookup(merged.Collation).Charset()) + if err != nil { + env.vm.err = err + env.vm.sp -= offset + return 1 + } + if !replaced { + env.vm.stack[env.vm.sp-offset-1] = env.vm.arena.newEvalRaw(input.bytes, sqltypes.Text, resultCollation(merged)) + } else { + env.vm.stack[env.vm.sp-offset-1] = env.vm.arena.newEvalRaw(b, sqltypes.Text, resultCollation(merged)) + } + env.vm.sp -= offset + return 1 + }, "FN REGEXP_REPLACE_SLOW VARCHAR(SP-2), VARCHAR(SP-1)") +} + func (asm *assembler) Introduce(offset int, t sqltypes.Type, col collations.TypedCollation) { asm.emit(func(env *ExpressionEnv) int { arg := evalToBinary(env.vm.stack[env.vm.sp-offset]) diff --git a/go/vt/vtgate/evalengine/compiler_test.go b/go/vt/vtgate/evalengine/compiler_test.go index 4e1eb479834..efcf0036acb 100644 --- a/go/vt/vtgate/evalengine/compiler_test.go +++ b/go/vt/vtgate/evalengine/compiler_test.go @@ -179,19 +179,6 @@ func TestCompilerReference(t *testing.T) { t.Logf("\n%s", track.String()) } -type debugCompiler struct { - t testing.TB -} - -func (d *debugCompiler) Instruction(ins string, args ...any) { - ins = fmt.Sprintf(ins, args...) - d.t.Logf("> %s", ins) -} - -func (d *debugCompiler) Stack(old, new int) { - d.t.Logf("\tsp = %d -> %d", old, new) -} - func TestCompilerSingle(t *testing.T) { var testCases = []struct { expression string @@ -444,6 +431,30 @@ func TestCompilerSingle(t *testing.T) { expression: `INTERVAL(0, 0, 0, -1, NULL, NULL, 1)`, result: `INT64(5)`, }, + { + expression: `REGEXP_REPLACE(1234, 12, 6, 1)`, + result: `TEXT("634")`, + }, + { + expression: `_latin1 0xFF`, + result: `VARCHAR("ÿ")`, + }, + { + expression: `TRIM(_latin1 0xA078A0 FROM _utf8mb4 0xC2A078C2A0)`, + result: `VARCHAR("")`, + }, + { + expression: `CONCAT_WS("😊😂🤢", date '2000-01-01', _latin1 0xFF)`, + result: `VARCHAR("2000-01-01😊😂🤢ÿ")`, + }, + { + expression: `concat('test', _latin1 0xff)`, + result: `VARCHAR("testÿ")`, + }, + { + expression: `WEIGHT_STRING('foobar' as char(3))`, + result: `VARBINARY("\x1c\xe5\x1d\xdd\x1d\xdd")`, + }, } for _, tc := range testCases { @@ -576,3 +587,65 @@ func TestBindVarLiteral(t *testing.T) { }) } } + +func TestCompilerNonConstant(t *testing.T) { + var testCases = []struct { + expression string + }{ + { + expression: "RANDOM_BYTES(4)", + }, + { + expression: "UUID()", + }, + } + + for _, tc := range testCases { + t.Run(tc.expression, func(t *testing.T) { + expr, err := sqlparser.ParseExpr(tc.expression) + if err != nil { + t.Fatal(err) + } + + cfg := &evalengine.Config{ + Collation: collations.CollationUtf8mb4ID, + Optimization: evalengine.OptimizationLevelCompile, + } + + converted, err := evalengine.Translate(expr, cfg) + if err != nil { + t.Fatal(err) + } + + env := evalengine.EmptyExpressionEnv() + var prev string + for i := 0; i < 1000; i++ { + expected, err := env.Evaluate(evalengine.Deoptimize(converted)) + if err != nil { + t.Fatal(err) + } + if expected.String() == prev { + t.Fatalf("constant evaluation from eval engine: got %s multiple times", expected.String()) + } + prev = expected.String() + } + + if cfg.CompilerErr != nil { + t.Fatalf("bad compilation: %v", cfg.CompilerErr) + } + + // re-run the same evaluation multiple times to ensure results are always consistent + for i := 0; i < 1000; i++ { + res, err := env.EvaluateVM(converted.(*evalengine.CompiledExpr)) + if err != nil { + t.Fatal(err) + } + + if res.String() == prev { + t.Fatalf("constant evaluation from eval engine: got %s multiple times", res.String()) + } + prev = res.String() + } + }) + } +} diff --git a/go/vt/vtgate/evalengine/eval.go b/go/vt/vtgate/evalengine/eval.go index d11bba24dde..fbc3cbca57d 100644 --- a/go/vt/vtgate/evalengine/eval.go +++ b/go/vt/vtgate/evalengine/eval.go @@ -17,7 +17,6 @@ limitations under the License. package evalengine import ( - "fmt" "strconv" "unicode/utf8" @@ -199,14 +198,18 @@ func evalCoerce(e eval, typ sqltypes.Type, col collations.ID) (eval, error) { return evalToInt64(e), nil case sqltypes.Uint8, sqltypes.Uint16, sqltypes.Uint32, sqltypes.Uint64: return evalToInt64(e).toUint64(), nil - case sqltypes.Date, sqltypes.Datetime, sqltypes.Year, sqltypes.TypeJSON, sqltypes.Time, sqltypes.Bit: - return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "Unsupported type conversion: %s", typ.String()) + case sqltypes.Date: + return evalToDate(e), nil + case sqltypes.Datetime, sqltypes.Timestamp: + return evalToDateTime(e, -1), nil + case sqltypes.Time: + return evalToTime(e, -1), nil default: - panic(fmt.Sprintf("BUG: emitted unknown type: %s", typ)) + return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "Unsupported type conversion: %s", typ.String()) } } -func valueToEvalCast(v sqltypes.Value, typ sqltypes.Type) (eval, error) { +func valueToEvalCast(v sqltypes.Value, typ sqltypes.Type, collation collations.ID) (eval, error) { switch { case typ == sqltypes.Null: return nil, nil @@ -226,7 +229,12 @@ func valueToEvalCast(v sqltypes.Value, typ sqltypes.Type) (eval, error) { fval, _ := fastparse.ParseFloat64(v.RawStr()) return newEvalFloat(fval), nil default: - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "coercion should not try to coerce this value to a float: %v", v) + e, err := valueToEval(v, defaultCoercionCollation(collation)) + if err != nil { + return nil, err + } + f, _ := evalToFloat(e) + return f, nil } case sqltypes.IsDecimal(typ): @@ -248,7 +256,11 @@ func valueToEvalCast(v sqltypes.Value, typ sqltypes.Type) (eval, error) { fval, _ := fastparse.ParseFloat64(v.RawStr()) dec = decimal.NewFromFloat(fval) default: - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "coercion should not try to coerce this value to a decimal: %v", v) + e, err := valueToEval(v, defaultCoercionCollation(collation)) + if err != nil { + return nil, err + } + return evalToDecimal(e, 0, 0), nil } return &evalDecimal{dec: dec, length: -dec.Exponent()}, nil @@ -260,8 +272,15 @@ func valueToEvalCast(v sqltypes.Value, typ sqltypes.Type) (eval, error) { case v.IsUnsigned(): uval, err := v.ToUint64() return newEvalInt64(int64(uval)), err + case v.IsText() || v.IsBinary(): + i, err := fastparse.ParseInt64(v.RawStr(), 10) + return newEvalInt64(i), err default: - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "coercion should not try to coerce this value to a signed int: %v", v) + e, err := valueToEval(v, defaultCoercionCollation(collation)) + if err != nil { + return nil, err + } + return evalToInt64(e), nil } case sqltypes.IsUnsigned(typ): @@ -272,18 +291,71 @@ func valueToEvalCast(v sqltypes.Value, typ sqltypes.Type) (eval, error) { case v.IsUnsigned(): uval, err := v.ToUint64() return newEvalUint64(uval), err + case v.IsText() || v.IsBinary(): + u, err := fastparse.ParseUint64(v.RawStr(), 10) + return newEvalUint64(u), err default: - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "coercion should not try to coerce this value to a unsigned int: %v", v) + e, err := valueToEval(v, defaultCoercionCollation(collation)) + if err != nil { + return nil, err + } + i := evalToInt64(e) + return newEvalUint64(uint64(i.i)), nil } case sqltypes.IsText(typ) || sqltypes.IsBinary(typ): switch { case v.IsText() || v.IsBinary(): - // TODO: collation - return newEvalRaw(v.Type(), v.Raw(), collationBinary), nil + return newEvalRaw(v.Type(), v.Raw(), defaultCoercionCollation(collation)), nil + case sqltypes.IsText(typ): + e, err := valueToEval(v, defaultCoercionCollation(collation)) + if err != nil { + return nil, err + } + return evalToVarchar(e, collation, true) default: - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "coercion should not try to coerce this value to a text: %v", v) + e, err := valueToEval(v, defaultCoercionCollation(collation)) + if err != nil { + return nil, err + } + return evalToBinary(e), nil + } + + case typ == sqltypes.TypeJSON: + return json.NewFromSQL(v) + case typ == sqltypes.Date: + e, err := valueToEval(v, defaultCoercionCollation(collation)) + if err != nil { + return nil, err + } + // Separate return here to avoid nil wrapped in interface type + d := evalToDate(e) + if d == nil { + return nil, nil + } + return d, nil + case typ == sqltypes.Datetime || typ == sqltypes.Timestamp: + e, err := valueToEval(v, defaultCoercionCollation(collation)) + if err != nil { + return nil, err + } + // Separate return here to avoid nil wrapped in interface type + dt := evalToDateTime(e, -1) + if dt == nil { + return nil, nil + } + return dt, nil + case typ == sqltypes.Time: + e, err := valueToEval(v, defaultCoercionCollation(collation)) + if err != nil { + return nil, err + } + // Separate return here to avoid nil wrapped in interface type + t := evalToTime(e, -1) + if t == nil { + return nil, nil } + return t, nil } return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "coercion should not try to coerce this value: %v", v) } @@ -362,6 +434,8 @@ func valueToEval(value sqltypes.Value, collation collations.TypedCollation) (eva var p json.Parser j, err := p.ParseBytes(value.Raw()) return j, wrap(err) + case fallbackBinary(tt): + return newEvalRaw(tt, value.Raw(), collation), nil default: return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "Type is not supported: %q %s", value, value.Type()) } diff --git a/go/vt/vtgate/evalengine/eval_bytes.go b/go/vt/vtgate/evalengine/eval_bytes.go index adc9cb32f2d..455394e31e4 100644 --- a/go/vt/vtgate/evalengine/eval_bytes.go +++ b/go/vt/vtgate/evalengine/eval_bytes.go @@ -22,6 +22,7 @@ import ( "vitess.io/vitess/go/hack" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/charset" + "vitess.io/vitess/go/mysql/collations/colldata" "vitess.io/vitess/go/mysql/datetime" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/vthash" @@ -82,8 +83,8 @@ func evalToVarchar(e eval, col collations.ID, convert bool) (*evalBytes, error) typedcol.Collation = col if col != collations.CollationBinaryID { - fromCollation := b.col.Collation.Get() - toCollation := col.Get() + fromCollation := colldata.Lookup(b.col.Collation) + toCollation := colldata.Lookup(col) var err error bytes, err = charset.Convert(nil, toCollation.Charset(), bytes, fromCollation.Charset()) @@ -109,7 +110,7 @@ func (e *evalBytes) Hash(h *vthash.Hasher) { _, _ = h.Write(e.bytes) default: h.Write16(hashPrefixBytes) - col := e.col.Collation.Get() + col := colldata.Lookup(e.col.Collation) col.Hash(h, e.bytes, 0) } } @@ -153,7 +154,7 @@ func (e *evalBytes) truncateInPlace(size int) { e.bytes = e.bytes[:size] } case sqltypes.IsText(tt): - collation := e.col.Collation.Get() + collation := colldata.Lookup(e.col.Collation) e.bytes = charset.Slice(collation.Charset(), e.bytes, 0, size) default: panic("called EvalResult.truncate on non-quoted") diff --git a/go/vt/vtgate/evalengine/eval_json.go b/go/vt/vtgate/evalengine/eval_json.go index 01cf69e5e99..8b19a27f92b 100644 --- a/go/vt/vtgate/evalengine/eval_json.go +++ b/go/vt/vtgate/evalengine/eval_json.go @@ -23,6 +23,7 @@ import ( "vitess.io/vitess/go/hack" "vitess.io/vitess/go/mysql/collations/charset" + "vitess.io/vitess/go/mysql/collations/colldata" "vitess.io/vitess/go/mysql/json" "vitess.io/vitess/go/sqltypes" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" @@ -98,7 +99,7 @@ func evalConvert_nj(e evalNumeric) *evalJSON { } func evalConvert_cj(e *evalBytes) (*evalJSON, error) { - jsonText, err := charset.Convert(nil, charset.Charset_utf8mb4{}, e.bytes, e.col.Collation.Get().Charset()) + jsonText, err := charset.Convert(nil, charset.Charset_utf8mb4{}, e.bytes, colldata.Lookup(e.col.Collation).Charset()) if err != nil { return nil, err } @@ -107,7 +108,7 @@ func evalConvert_cj(e *evalBytes) (*evalJSON, error) { } func evalConvertArg_cj(e *evalBytes) (*evalJSON, error) { - jsonText, err := charset.Convert(nil, charset.Charset_utf8mb4{}, e.bytes, e.col.Collation.Get().Charset()) + jsonText, err := charset.Convert(nil, charset.Charset_utf8mb4{}, e.bytes, colldata.Lookup(e.col.Collation).Charset()) if err != nil { return nil, err } diff --git a/go/vt/vtgate/evalengine/eval_result.go b/go/vt/vtgate/evalengine/eval_result.go index fa48ce19a25..19a6ea59220 100644 --- a/go/vt/vtgate/evalengine/eval_result.go +++ b/go/vt/vtgate/evalengine/eval_result.go @@ -21,6 +21,7 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/charset" + "vitess.io/vitess/go/mysql/collations/colldata" "vitess.io/vitess/go/sqltypes" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" @@ -39,7 +40,7 @@ func (er EvalResult) Value(id collations.ID) sqltypes.Value { return evalToSQLValue(er.v) } - dst, err := charset.Convert(nil, id.Get().Charset(), str.bytes, str.col.Collation.Get().Charset()) + dst, err := charset.Convert(nil, colldata.Lookup(id).Charset(), str.bytes, colldata.Lookup(str.col.Collation).Charset()) if err != nil { // If we can't convert, we just return what we have, but it's going // to be invalidly encoded. Should normally never happen as only utf8mb4 diff --git a/go/vt/vtgate/evalengine/eval_temporal.go b/go/vt/vtgate/evalengine/eval_temporal.go index e04f6174dd6..13acc5bd290 100644 --- a/go/vt/vtgate/evalengine/eval_temporal.go +++ b/go/vt/vtgate/evalengine/eval_temporal.go @@ -1,9 +1,8 @@ package evalengine import ( - "time" - "vitess.io/vitess/go/hack" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/datetime" "vitess.io/vitess/go/mysql/decimal" "vitess.io/vitess/go/mysql/json" @@ -139,8 +138,28 @@ func (e *evalTemporal) isZero() bool { return e.dt.IsZero() } -func (e *evalTemporal) toStdTime(loc *time.Location) time.Time { - return e.dt.ToStdTime(loc) +func (e *evalTemporal) addInterval(interval *datetime.Interval, strcoll collations.TypedCollation) eval { + var tmp *evalTemporal + var ok bool + + switch tt := e.SQLType(); { + case tt == sqltypes.Date && !interval.Unit().HasTimeParts(): + tmp = &evalTemporal{t: e.t} + tmp.dt.Date, ok = e.dt.Date.AddInterval(interval) + case tt == sqltypes.Time && !interval.Unit().HasDateParts(): + tmp = &evalTemporal{t: e.t} + tmp.dt.Time, tmp.prec, ok = e.dt.Time.AddInterval(interval, strcoll.Valid()) + case tt == sqltypes.Datetime || tt == sqltypes.Timestamp || (tt == sqltypes.Date && interval.Unit().HasTimeParts()) || (tt == sqltypes.Time && interval.Unit().HasDateParts()): + tmp = e.toDateTime(int(e.prec)) + tmp.dt, tmp.prec, ok = e.dt.AddInterval(interval, strcoll.Valid()) + } + if !ok { + return nil + } + if strcoll.Valid() { + return newEvalRaw(sqltypes.Char, tmp.ToRawBytes(), strcoll) + } + return tmp } func newEvalDateTime(dt datetime.DateTime, l int) *evalTemporal { @@ -190,6 +209,74 @@ func precision(req, got int) int { return req } +func evalToTemporal(e eval) *evalTemporal { + switch e := e.(type) { + case *evalTemporal: + return e + case *evalBytes: + if t, l, ok := datetime.ParseDateTime(e.string(), -1); ok { + return newEvalDateTime(t, l) + } + if d, ok := datetime.ParseDate(e.string()); ok { + return newEvalDate(d) + } + if t, l, ok := datetime.ParseTime(e.string(), -1); ok { + return newEvalTime(t, l) + } + case *evalInt64: + if t, ok := datetime.ParseDateTimeInt64(e.i); ok { + return newEvalDateTime(t, 0) + } + if d, ok := datetime.ParseDateInt64(e.i); ok { + return newEvalDate(d) + } + if t, ok := datetime.ParseTimeInt64(e.i); ok { + return newEvalTime(t, 0) + } + case *evalUint64: + if t, ok := datetime.ParseDateTimeInt64(int64(e.u)); ok { + return newEvalDateTime(t, 0) + } + if d, ok := datetime.ParseDateInt64(int64(e.u)); ok { + return newEvalDate(d) + } + if t, ok := datetime.ParseTimeInt64(int64(e.u)); ok { + return newEvalTime(t, 0) + } + case *evalFloat: + if t, l, ok := datetime.ParseDateTimeFloat(e.f, -1); ok { + return newEvalDateTime(t, l) + } + if d, ok := datetime.ParseDateFloat(e.f); ok { + return newEvalDate(d) + } + if t, l, ok := datetime.ParseTimeFloat(e.f, -1); ok { + return newEvalTime(t, l) + } + case *evalDecimal: + if t, l, ok := datetime.ParseDateTimeDecimal(e.dec, e.length, -1); ok { + return newEvalDateTime(t, l) + } + if d, ok := datetime.ParseDateDecimal(e.dec); ok { + return newEvalDate(d) + } + if d, l, ok := datetime.ParseTimeDecimal(e.dec, e.length, -1); ok { + return newEvalTime(d, l) + } + case *evalJSON: + if dt, ok := e.DateTime(); ok { + if dt.Date.IsZero() { + return newEvalTime(dt.Time, datetime.DefaultPrecision) + } + if dt.Time.IsZero() { + return newEvalDate(dt.Date) + } + return newEvalDateTime(dt, datetime.DefaultPrecision) + } + } + return nil +} + func evalToTime(e eval, l int) *evalTemporal { switch e := e.(type) { case *evalTemporal: diff --git a/go/vt/vtgate/evalengine/expr_bvar.go b/go/vt/vtgate/evalengine/expr_bvar.go index 387438c4310..9172f8abc3c 100644 --- a/go/vt/vtgate/evalengine/expr_bvar.go +++ b/go/vt/vtgate/evalengine/expr_bvar.go @@ -57,11 +57,7 @@ func (bv *BindVariable) eval(env *ExpressionEnv) (eval, error) { tuple := make([]eval, 0, len(bvar.Values)) for _, value := range bvar.Values { - e, err := valueToEval(sqltypes.MakeTrusted(value.Type, value.Value), collations.TypedCollation{ - Collation: collations.DefaultCollationForType(value.Type), - Coercibility: collations.CoerceCoercible, - Repertoire: collations.RepertoireUnicode, - }) + e, err := valueToEval(sqltypes.MakeTrusted(value.Type, value.Value), defaultCoercionCollation(collations.DefaultCollationForType(value.Type))) if err != nil { return nil, err } @@ -77,11 +73,7 @@ func (bv *BindVariable) eval(env *ExpressionEnv) (eval, error) { if bv.typed() { typ = bv.Type } - return valueToEval(sqltypes.MakeTrusted(typ, bvar.Value), collations.TypedCollation{ - Collation: collations.DefaultCollationForType(typ), - Coercibility: collations.CoerceCoercible, - Repertoire: collations.RepertoireUnicode, - }) + return valueToEval(sqltypes.MakeTrusted(typ, bvar.Value), defaultCoercionCollation(collations.DefaultCollationForType(typ))) } } diff --git a/go/vt/vtgate/evalengine/expr_collate.go b/go/vt/vtgate/evalengine/expr_collate.go index c03a61b2178..9828a1d8722 100644 --- a/go/vt/vtgate/evalengine/expr_collate.go +++ b/go/vt/vtgate/evalengine/expr_collate.go @@ -18,6 +18,7 @@ package evalengine import ( "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/collations/colldata" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" @@ -54,6 +55,12 @@ var collationUtf8mb3 = collations.TypedCollation{ Repertoire: collations.RepertoireUnicode, } +var collationRegexpFallback = collations.TypedCollation{ + Collation: collations.CollationLatin1Swedish, + Coercibility: collations.CoerceCoercible, + Repertoire: collations.RepertoireASCII, +} + type ( CollateExpr struct { UnaryExpr @@ -133,7 +140,7 @@ func evalCollation(e eval) collations.TypedCollation { } } -func mergeCollations(c1, c2 collations.TypedCollation, t1, t2 sqltypes.Type) (collations.TypedCollation, collations.Coercion, collations.Coercion, error) { +func mergeCollations(c1, c2 collations.TypedCollation, t1, t2 sqltypes.Type) (collations.TypedCollation, colldata.Coercion, colldata.Coercion, error) { if c1.Collation == c2.Collation { return c1, nil, nil, nil } @@ -151,22 +158,22 @@ func mergeCollations(c1, c2 collations.TypedCollation, t1, t2 sqltypes.Type) (co } env := collations.Local() - return env.MergeCollations(c1, c2, collations.CoercionOptions{ + return colldata.Merge(env, c1, c2, colldata.CoercionOptions{ ConvertToSuperset: true, ConvertWithCoercion: true, }) } -func mergeAndCoerceCollations(left, right eval) (eval, eval, collations.ID, error) { +func mergeAndCoerceCollations(left, right eval) (eval, eval, collations.TypedCollation, error) { lt := left.SQLType() rt := right.SQLType() mc, coerceLeft, coerceRight, err := mergeCollations(evalCollation(left), evalCollation(right), lt, rt) if err != nil { - return nil, nil, 0, err + return nil, nil, collations.TypedCollation{}, err } if coerceLeft == nil && coerceRight == nil { - return left, right, mc.Collation, nil + return left, right, mc, nil } left1 := newEvalRaw(lt, left.(*evalBytes).bytes, mc) @@ -175,16 +182,16 @@ func mergeAndCoerceCollations(left, right eval) (eval, eval, collations.ID, erro if coerceLeft != nil { left1.bytes, err = coerceLeft(nil, left1.bytes) if err != nil { - return nil, nil, 0, err + return nil, nil, collations.TypedCollation{}, err } } if coerceRight != nil { right1.bytes, err = coerceRight(nil, right1.bytes) if err != nil { - return nil, nil, 0, err + return nil, nil, collations.TypedCollation{}, err } } - return left1, right1, mc.Collation, nil + return left1, right1, mc, nil } type collationAggregation struct { @@ -196,7 +203,7 @@ func (ca *collationAggregation) add(env *collations.Environment, tc collations.T ca.cur = tc } else { var err error - ca.cur, _, _, err = env.MergeCollations(ca.cur, tc, collations.CoercionOptions{ConvertToSuperset: true, ConvertWithCoercion: true}) + ca.cur, _, _, err = colldata.Merge(env, ca.cur, tc, colldata.CoercionOptions{ConvertToSuperset: true, ConvertWithCoercion: true}) if err != nil { return err } diff --git a/go/vt/vtgate/evalengine/expr_compare.go b/go/vt/vtgate/evalengine/expr_compare.go index cef7493e026..e7490370a1b 100644 --- a/go/vt/vtgate/evalengine/expr_compare.go +++ b/go/vt/vtgate/evalengine/expr_compare.go @@ -17,7 +17,10 @@ limitations under the License. package evalengine import ( + "bytes" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/collations/colldata" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" @@ -39,7 +42,7 @@ type ( LikeExpr struct { BinaryExpr Negate bool - Match collations.WildcardPattern + Match colldata.WildcardPattern MatchCollation collations.ID } @@ -108,7 +111,7 @@ func (compareGE) compare(left, right eval) (boolean, error) { func (compareNullSafeEQ) String() string { return "<=>" } func (compareNullSafeEQ) compare(left, right eval) (boolean, error) { cmp, err := evalCompareNullSafe(left, right) - return makeboolean(cmp), err + return makeboolean(cmp == 0), err } func typeIsTextual(tt sqltypes.Type) bool { @@ -162,15 +165,21 @@ func compareAsJSON(l, r sqltypes.Type) bool { return l == sqltypes.TypeJSON || r == sqltypes.TypeJSON } -func evalCompareNullSafe(lVal, rVal eval) (bool, error) { - if lVal == nil || rVal == nil { - return lVal == rVal, nil +func evalCompareNullSafe(lVal, rVal eval) (int, error) { + if lVal == nil { + if rVal == nil { + return 0, nil + } + return -1, nil + } + if rVal == nil { + return 1, nil } if left, right, ok := compareAsTuples(lVal, rVal); ok { return evalCompareTuplesNullSafe(left.t, right.t) } n, err := evalCompare(lVal, rVal) - return n == 0, err + return n, err } func evalCompareMany(left, right []eval, fulleq bool) (int, bool, error) { @@ -233,6 +242,8 @@ func evalCompare(left, right eval) (comp int, err error) { return compareJSON(left, right) case lt == sqltypes.Tuple || rt == sqltypes.Tuple: return 0, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "BUG: evalCompare: tuple comparison should be handled early") + case lt == rt && fallbackBinary(lt): + return bytes.Compare(left.ToRawBytes(), right.ToRawBytes()), nil default: // Quoting MySQL Docs: // @@ -247,20 +258,32 @@ func evalCompare(left, right eval) (comp int, err error) { } } -func evalCompareTuplesNullSafe(left, right []eval) (bool, error) { +// fallbackBinary compares two values of the same type using the fallback binary comparison. +// This is for types we don't yet properly support otherwise but do end up being used +// for comparisons, for example when using vdiff. +// TODO: Clean this up as we add more properly supported types and comparisons. +func fallbackBinary(t sqltypes.Type) bool { + switch t { + case sqltypes.Bit, sqltypes.Enum, sqltypes.Set, sqltypes.Geometry: + return true + } + return false +} + +func evalCompareTuplesNullSafe(left, right []eval) (int, error) { if len(left) != len(right) { panic("did not typecheck cardinality") } for idx, lResult := range left { res, err := evalCompareNullSafe(lResult, right[idx]) if err != nil { - return false, err + return 0, err } - if !res { - return false, nil + if res != 0 { + return res, nil } } - return true, nil + return 0, nil } // eval implements the Expr interface @@ -547,7 +570,7 @@ func (l *LikeExpr) matchWildcard(left, right []byte, coll collations.ID) bool { if l.Match != nil && l.MatchCollation == coll { return l.Match.Match(left) } - fullColl := coll.Get() + fullColl := colldata.Lookup(coll) wc := fullColl.Wildcard(right, 0, 0, 0) return wc.Match(left) } @@ -558,7 +581,7 @@ func (l *LikeExpr) eval(env *ExpressionEnv) (eval, error) { return nil, err } - var col collations.ID + var col collations.TypedCollation left, right, col, err = mergeAndCoerceCollations(left, right) if err != nil { return nil, err @@ -567,11 +590,11 @@ func (l *LikeExpr) eval(env *ExpressionEnv) (eval, error) { var matched bool switch { case typeIsTextual(left.SQLType()) && typeIsTextual(right.SQLType()): - matched = l.matchWildcard(left.(*evalBytes).bytes, right.(*evalBytes).bytes, col) + matched = l.matchWildcard(left.(*evalBytes).bytes, right.(*evalBytes).bytes, col.Collation) case typeIsTextual(right.SQLType()): - matched = l.matchWildcard(left.ToRawBytes(), right.(*evalBytes).bytes, col) + matched = l.matchWildcard(left.ToRawBytes(), right.(*evalBytes).bytes, col.Collation) case typeIsTextual(left.SQLType()): - matched = l.matchWildcard(left.(*evalBytes).bytes, right.ToRawBytes(), col) + matched = l.matchWildcard(left.(*evalBytes).bytes, right.ToRawBytes(), col.Collation) default: matched = l.matchWildcard(left.ToRawBytes(), right.ToRawBytes(), collations.CollationBinaryID) } @@ -617,12 +640,12 @@ func (expr *LikeExpr) compile(c *compiler) (ctype, error) { } var merged collations.TypedCollation - var coerceLeft collations.Coercion - var coerceRight collations.Coercion + var coerceLeft colldata.Coercion + var coerceRight colldata.Coercion var env = collations.Local() if lt.Col.Collation != rt.Col.Collation { - merged, coerceLeft, coerceRight, err = env.MergeCollations(lt.Col, rt.Col, collations.CoercionOptions{ + merged, coerceLeft, coerceRight, err = colldata.Merge(env, lt.Col, rt.Col, colldata.CoercionOptions{ ConvertToSuperset: true, ConvertWithCoercion: true, }) @@ -634,7 +657,7 @@ func (expr *LikeExpr) compile(c *compiler) (ctype, error) { } if coerceLeft == nil && coerceRight == nil { - c.asm.Like_collate(expr, merged.Collation.Get()) + c.asm.Like_collate(expr, colldata.Lookup(merged.Collation)) } else { if coerceLeft == nil { coerceLeft = func(dst, in []byte) ([]byte, error) { return in, nil } @@ -643,7 +666,7 @@ func (expr *LikeExpr) compile(c *compiler) (ctype, error) { coerceRight = func(dst, in []byte) ([]byte, error) { return in, nil } } c.asm.Like_coerce(expr, &compiledCoercion{ - col: merged.Collation.Get(), + col: colldata.Lookup(merged.Collation), left: coerceLeft, right: coerceRight, }) diff --git a/go/vt/vtgate/evalengine/expr_convert.go b/go/vt/vtgate/evalengine/expr_convert.go index 8c7c079228b..6531cdd6fae 100644 --- a/go/vt/vtgate/evalengine/expr_convert.go +++ b/go/vt/vtgate/evalengine/expr_convert.go @@ -18,6 +18,7 @@ package evalengine import ( "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/collations/colldata" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" @@ -120,6 +121,10 @@ func (c *ConvertExpr) eval(env *ExpressionEnv) (eval, error) { case "JSON": return evalToJSON(e) case "DATETIME": + switch p := c.Length; { + case p > 6: + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Too-big precision %d specified for 'CONVERT'. Maximum is 6.", p) + } if dt := evalToDateTime(e, c.Length); dt != nil { return dt, nil } @@ -130,6 +135,10 @@ func (c *ConvertExpr) eval(env *ExpressionEnv) (eval, error) { } return nil, nil case "TIME": + switch p := c.Length; { + case p > 6: + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Too-big precision %d specified for 'CONVERT'. Maximum is 6.", p) + } if t := evalToTime(e, c.Length); t != nil { return t, nil } @@ -187,7 +196,7 @@ func (c *ConvertExpr) convertToBinaryType(tt sqltypes.Type) sqltypes.Type { func (c *ConvertExpr) convertToCharType(tt sqltypes.Type) sqltypes.Type { if c.HasLength { - col := c.Collation.Get() + col := colldata.Lookup(c.Collation) length := c.Length * col.Charset().MaxWidth() if length > 64*1024 { return sqltypes.Text @@ -227,6 +236,9 @@ func (conv *ConvertExpr) compile(c *compiler) (ctype, error) { case "DOUBLE", "REAL": convt = c.compileToFloat(arg, 1) + case "FLOAT": + return ctype{}, c.unsupported(conv) + case "SIGNED", "SIGNED INTEGER": convt = c.compileToInt64(arg, 1) @@ -244,9 +256,17 @@ func (conv *ConvertExpr) compile(c *compiler) (ctype, error) { convt = c.compileToDate(arg, 1) case "DATETIME": + switch p := conv.Length; { + case p > 6: + return ctype{}, c.unsupported(conv) + } convt = c.compileToDateTime(arg, 1, conv.Length) case "TIME": + switch p := conv.Length; { + case p > 6: + return ctype{}, c.unsupported(conv) + } convt = c.compileToTime(arg, 1, conv.Length) default: @@ -256,7 +276,6 @@ func (conv *ConvertExpr) compile(c *compiler) (ctype, error) { c.asm.jumpDestination(skip) convt.Flag = arg.Flag | flagNullable return convt, nil - } func (c *ConvertUsingExpr) eval(env *ExpressionEnv) (eval, error) { diff --git a/go/vt/vtgate/evalengine/expr_env.go b/go/vt/vtgate/evalengine/expr_env.go index d4fb7f3b60a..e67e25e70a6 100644 --- a/go/vt/vtgate/evalengine/expr_env.go +++ b/go/vt/vtgate/evalengine/expr_env.go @@ -51,9 +51,9 @@ type ( func (env *ExpressionEnv) time(utc bool) datetime.DateTime { if utc { - return datetime.FromStdTime(env.now.UTC()) + return datetime.NewDateTimeFromStd(env.now.UTC()) } - return datetime.FromStdTime(env.now) + return datetime.NewDateTimeFromStd(env.now) } func (env *ExpressionEnv) currentUser() string { diff --git a/go/vt/vtgate/evalengine/fn_compare.go b/go/vt/vtgate/evalengine/fn_compare.go index ef8d7f4e3c8..ee4f61cb596 100644 --- a/go/vt/vtgate/evalengine/fn_compare.go +++ b/go/vt/vtgate/evalengine/fn_compare.go @@ -21,6 +21,7 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/charset" + "vitess.io/vitess/go/mysql/collations/colldata" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/proto/vtrpc" @@ -215,11 +216,11 @@ func compareAllText(args []eval, cmp int) (eval, error) { if err := ca.add(env, col); err != nil { return nil, err } - charsets = append(charsets, col.Collation.Get().Charset()) + charsets = append(charsets, colldata.Lookup(col.Collation).Charset()) } tc := ca.result() - col := tc.Collation.Get() + col := colldata.Lookup(tc.Collation) cs := col.Charset() b1, err := charset.Convert(nil, cs, args[0].ToRawBytes(), charsets[0]) diff --git a/go/vt/vtgate/evalengine/fn_hex.go b/go/vt/vtgate/evalengine/fn_hex.go index 3aa395f74cc..0045bfd6688 100644 --- a/go/vt/vtgate/evalengine/fn_hex.go +++ b/go/vt/vtgate/evalengine/fn_hex.go @@ -109,8 +109,8 @@ func hexDecodeJSON(j *evalJSON) ([]byte, bool) { default: b := j.ToRawBytes() decoded := make([]byte, hex.DecodedLen(b)) - ok := hex.DecodeBytes(decoded, b) - if !ok { + err := hex.DecodeBytes(decoded, b) + if err != nil { return nil, false } return decoded, true @@ -130,8 +130,8 @@ func (call *builtinUnhex) eval(env *ExpressionEnv) (eval, error) { switch arg := arg.(type) { case *evalBytes: decoded = make([]byte, hex.DecodedLen(arg.bytes)) - ok := hex.DecodeBytes(decoded, arg.bytes) - if !ok { + err := hex.DecodeBytes(decoded, arg.bytes) + if err != nil { return nil, nil } case *evalInt64: @@ -144,8 +144,8 @@ func (call *builtinUnhex) eval(env *ExpressionEnv) (eval, error) { case *evalDecimal: b := arg.ToRawBytes() decoded = make([]byte, hex.DecodedLen(b)) - ok := hex.DecodeBytes(decoded, b) - if !ok { + err := hex.DecodeBytes(decoded, b) + if err != nil { return nil, nil } case *evalFloat: @@ -163,8 +163,8 @@ func (call *builtinUnhex) eval(env *ExpressionEnv) (eval, error) { default: b := evalToBinary(arg) decoded = make([]byte, hex.DecodedLen(b.bytes)) - ok := hex.DecodeBytes(decoded, b.bytes) - if !ok { + err := hex.DecodeBytes(decoded, b.bytes) + if err != nil { return nil, nil } } diff --git a/go/vt/vtgate/evalengine/fn_json.go b/go/vt/vtgate/evalengine/fn_json.go index 31f568a3df5..7c7c6a67f8d 100644 --- a/go/vt/vtgate/evalengine/fn_json.go +++ b/go/vt/vtgate/evalengine/fn_json.go @@ -19,7 +19,7 @@ package evalengine import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/json" - "vitess.io/vitess/go/slices2" + "vitess.io/vitess/go/slice" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" @@ -131,7 +131,7 @@ func (call *builtinJSONExtract) compile(c *compiler) (ctype, error) { return ctype{}, err } - if slices2.All(call.Arguments[1:], func(expr Expr) bool { return expr.constant() }) { + if slice.All(call.Arguments[1:], func(expr Expr) bool { return expr.constant() }) { paths := make([]*json.Path, 0, len(call.Arguments[1:])) for _, arg := range call.Arguments[1:] { @@ -406,7 +406,7 @@ func (call *builtinJSONContainsPath) compile(c *compiler) (ctype, error) { return ctype{}, c.unsupported(call) } - if !slices2.All(call.Arguments[2:], func(expr Expr) bool { return expr.constant() }) { + if !slice.All(call.Arguments[2:], func(expr Expr) bool { return expr.constant() }) { return ctype{}, c.unsupported(call) } diff --git a/go/vt/vtgate/evalengine/fn_misc.go b/go/vt/vtgate/evalengine/fn_misc.go index 96522a2314f..04770c387af 100644 --- a/go/vt/vtgate/evalengine/fn_misc.go +++ b/go/vt/vtgate/evalengine/fn_misc.go @@ -586,6 +586,10 @@ func (call *builtinUUID) compile(c *compiler) (ctype, error) { return ctype{Type: sqltypes.VarChar, Flag: 0, Col: collationUtf8mb3}, nil } +func (call *builtinUUID) constant() bool { + return false +} + func (call *builtinUUIDToBin) eval(env *ExpressionEnv) (eval, error) { arg, err := call.arg1(env) if arg == nil || err != nil { diff --git a/go/vt/vtgate/evalengine/fn_numeric.go b/go/vt/vtgate/evalengine/fn_numeric.go index fe8eeffb2c4..3802fbd5630 100644 --- a/go/vt/vtgate/evalengine/fn_numeric.go +++ b/go/vt/vtgate/evalengine/fn_numeric.go @@ -1549,7 +1549,7 @@ func (expr *builtinConv) compile(c *compiler) (ctype, error) { c.asm.Fn_CONV_bu(3, 2) } - col := defaultCoercionCollation(n.Col.Collation) + col := defaultCoercionCollation(expr.collate) c.asm.Fn_CONV_uc(t, col) c.asm.jumpDestination(skip) diff --git a/go/vt/vtgate/evalengine/fn_regexp.go b/go/vt/vtgate/evalengine/fn_regexp.go new file mode 100644 index 00000000000..2ba5b97573f --- /dev/null +++ b/go/vt/vtgate/evalengine/fn_regexp.go @@ -0,0 +1,1064 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package evalengine + +import ( + "errors" + "strings" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/collations/charset" + "vitess.io/vitess/go/mysql/collations/colldata" + "vitess.io/vitess/go/mysql/icuregex" + icuerrors "vitess.io/vitess/go/mysql/icuregex/errors" + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +func regexpFlags(m eval, flags icuregex.RegexpFlag, f string) (icuregex.RegexpFlag, error) { + switch m := m.(type) { + case *evalBytes: + for _, b := range m.bytes { + switch b { + case 'c': + flags &= ^icuregex.CaseInsensitive + case 'i': + flags |= icuregex.CaseInsensitive + case 'm': + flags |= icuregex.Multiline + case 'n': + flags |= icuregex.DotAll + case 'u': + flags |= icuregex.UnixLines + default: + return flags, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongArguments, "Incorrect arguments to %s.", f) + } + } + default: + return flags, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongArguments, "Incorrect arguments to %s.", f) + } + + return flags, nil +} + +func occurrence(e *evalInt64, min int64) int64 { + if e.i < min { + return min + } + return e.i +} + +func returnOption(val *evalInt64, f string) (int64, error) { + switch val.i { + case 0, 1: + // Valid return options. + return val.i, nil + } + return 0, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongArguments, "Incorrect arguments to %s: return_option must be 1 or 0.", f) +} + +func positionInstr(val *evalInt64, limit int64) (int64, error) { + pos := val.i + if pos < 1 || pos > limit { + return 0, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpIndexOutOfBounds, "Index out of bounds in regular expression search.") + } + return pos, nil +} + +func position(val *evalInt64, limit int64, f string) (int64, error) { + pos := val.i + if pos < 1 { + return 0, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongParametersToNativeFct, "Incorrect parameters in the call to native function '%s'", f) + } + if pos-1 > limit { + return 0, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpIndexOutOfBounds, "Index out of bounds in regular expression search.") + } + return pos, nil +} + +func evalRegexpCollation(input, pat eval, f string) (eval, eval, collations.TypedCollation, icuregex.RegexpFlag, error) { + var typedCol collations.TypedCollation + var err error + + if inputBytes, ok := input.(*evalBytes); ok { + if patBytes, ok := pat.(*evalBytes); ok { + inputCol := inputBytes.col.Collation + patCol := patBytes.col.Collation + if (inputCol == collations.CollationBinaryID && patCol != collations.CollationBinaryID) || + (inputCol != collations.CollationBinaryID && patCol == collations.CollationBinaryID) { + env := collations.Local() + inputColName := env.LookupName(inputCol) + patColName := env.LookupName(patCol) + return nil, nil, typedCol, 0, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.CharacterSetMismatch, "Character set '%s' cannot be used in conjunction with '%s' in call to %s.", inputColName, patColName, f) + } + } + } + + input, pat, typedCol, err = mergeAndCoerceCollations(input, pat) + if err != nil { + return nil, nil, collations.TypedCollation{}, 0, err + } + + var flags icuregex.RegexpFlag + var collation = collations.Local().LookupName(typedCol.Collation) + if strings.Contains(collation, "_ci") { + flags |= icuregex.CaseInsensitive + } + + return input, pat, typedCol, flags, nil +} + +func compileRegexpCollation(input, pat ctype, f string) (collations.TypedCollation, icuregex.RegexpFlag, error) { + var merged collations.TypedCollation + var err error + + env := collations.Local() + if input.isTextual() && pat.isTextual() { + inputCol := input.Col.Collation + patCol := pat.Col.Collation + if (inputCol == collations.CollationBinaryID && patCol != collations.CollationBinaryID) || + (inputCol != collations.CollationBinaryID && patCol == collations.CollationBinaryID) { + inputColName := env.LookupName(inputCol) + patColName := env.LookupName(patCol) + return input.Col, 0, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.CharacterSetMismatch, "Character set '%s' cannot be used in conjunction with '%s' in call to %s.", inputColName, patColName, f) + } + } + + if input.Col.Collation != pat.Col.Collation { + merged, _, _, err = mergeCollations(input.Col, pat.Col, input.Type, pat.Type) + } else { + merged = input.Col + } + if err != nil { + return input.Col, 0, err + } + + var flags icuregex.RegexpFlag + if strings.Contains(env.LookupName(merged.Collation), "_ci") { + flags |= icuregex.CaseInsensitive + } + return merged, flags, nil +} + +func compileRegex(pat eval, c colldata.Charset, flags icuregex.RegexpFlag) (*icuregex.Pattern, error) { + patRunes := charset.Expand(nil, pat.ToRawBytes(), c) + + if len(patRunes) == 0 { + return nil, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpIllegalArgument, "Illegal argument to a regular expression.") + } + + regexp, err := icuregex.Compile(patRunes, flags) + if err == nil { + return regexp, nil + } + + var compileErr *icuregex.CompileError + if errors.Is(err, icuerrors.ErrUnsupported) { + err = vterrors.NewErrorf(vtrpcpb.Code_UNIMPLEMENTED, vterrors.RegexpUnimplemented, err.Error()) + } else if errors.Is(err, icuerrors.ErrIllegalArgument) { + err = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpIllegalArgument, err.Error()) + } else if errors.As(err, &compileErr) { + switch compileErr.Code { + case icuregex.InternalError: + err = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpInternal, compileErr.Error()) + case icuregex.RuleSyntax: + err = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpRuleSyntax, compileErr.Error()) + case icuregex.BadEscapeSequence: + err = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpBadEscapeSequence, compileErr.Error()) + case icuregex.PropertySyntax: + err = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpRuleSyntax, compileErr.Error()) + case icuregex.Unimplemented: + err = vterrors.NewErrorf(vtrpcpb.Code_UNIMPLEMENTED, vterrors.RegexpUnimplemented, compileErr.Error()) + case icuregex.MismatchedParen: + err = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpMismatchParen, compileErr.Error()) + case icuregex.BadInterval: + err = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpBadInterval, compileErr.Error()) + case icuregex.MaxLtMin: + err = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpMaxLtMin, compileErr.Error()) + case icuregex.InvalidBackRef: + err = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpInvalidBackRef, compileErr.Error()) + case icuregex.InvalidFlag: + err = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpInvalidFlag, compileErr.Error()) + case icuregex.LookBehindLimit: + err = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpLookBehindLimit, compileErr.Error()) + case icuregex.MissingCloseBracket: + err = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpMissingCloseBracket, compileErr.Error()) + case icuregex.InvalidRange: + err = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpInvalidRange, compileErr.Error()) + case icuregex.PatternTooBig: + err = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpPatternTooBig, compileErr.Error()) + case icuregex.InvalidCaptureGroupName: + err = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpInvalidCaptureGroup, compileErr.Error()) + default: + err = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpInternal, compileErr.Error()) + } + } + + return nil, err +} + +func compileConstantRegex(c *compiler, args TupleExpr, pat, mt int, cs collations.TypedCollation, flags icuregex.RegexpFlag, f string) (*icuregex.Pattern, error) { + pattern := args[pat] + if !pattern.constant() { + return nil, c.unsupported(pattern) + } + var err error + staticEnv := EmptyExpressionEnv() + pattern, err = simplifyExpr(staticEnv, pattern) + if err != nil { + return nil, err + } + + if len(args) > mt { + fl := args[mt] + if !fl.constant() { + return nil, c.unsupported(fl) + } + fl, err = simplifyExpr(staticEnv, fl) + if err != nil { + return nil, err + } + flags, err = regexpFlags(fl.(*Literal).inner, flags, f) + if err != nil { + return nil, err + } + } + + if pattern.(*Literal).inner == nil { + return nil, c.unsupported(pattern) + } + + innerPat, err := evalToVarchar(pattern.(*Literal).inner, cs.Collation, true) + if err != nil { + return nil, err + } + + return compileRegex(innerPat, colldata.Lookup(cs.Collation).Charset(), flags) +} + +// resultCollation returns the collation to use for the result of a regexp. +// This falls back to latin1_swedish if the input collation is binary. This +// seems to be a side effect of how MySQL also works. Probably due to how it +// is using ICU and converting there. +func resultCollation(in collations.TypedCollation) collations.TypedCollation { + if in.Collation == collationBinary.Collation { + return collationRegexpFallback + } + return in +} + +type builtinRegexpLike struct { + CallExpr + Negate bool +} + +func (r *builtinRegexpLike) eval(env *ExpressionEnv) (eval, error) { + input, err := r.Arguments[0].eval(env) + if err != nil || input == nil { + return nil, err + } + + pat, err := r.Arguments[1].eval(env) + if err != nil || pat == nil { + return nil, err + } + + input, pat, typedCol, flags, err := evalRegexpCollation(input, pat, "regexp_like") + if err != nil { + return nil, err + } + collation := colldata.Lookup(typedCol.Collation) + + if len(r.Arguments) > 2 { + m, err := r.Arguments[2].eval(env) + if err != nil || m == nil { + return nil, err + } + flags, err = regexpFlags(m, flags, "regexp_like") + if err != nil { + return nil, err + } + } + + regexp, err := compileRegex(pat, collation.Charset(), flags) + if err != nil { + return nil, err + } + + inputRunes := charset.Expand(nil, input.ToRawBytes(), collation.Charset()) + m := icuregex.NewMatcher(regexp) + m.Reset(inputRunes) + + ok, err := m.Find() + if err != nil { + return nil, err + } + if r.Negate { + ok = !ok + } + return newEvalBool(ok), nil +} + +func (r *builtinRegexpLike) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f1 := r.Arguments[0].typeof(env, fields) + _, f2 := r.Arguments[1].typeof(env, fields) + var f3 typeFlag + if len(r.Arguments) > 2 { + _, f3 = r.Arguments[2].typeof(env, fields) + } + return sqltypes.Int64, f1 | f2 | f3 | flagIsBoolean +} + +func (r *builtinRegexpLike) compileSlow(c *compiler, input, pat, fl ctype, merged collations.TypedCollation, flags icuregex.RegexpFlag, skips ...*jump) (ctype, error) { + if !pat.isTextual() || pat.Col.Collation != merged.Collation { + c.asm.Convert_xce(len(r.Arguments)-1, sqltypes.VarChar, merged.Collation) + } + + c.asm.Fn_REGEXP_LIKE_slow(r.Negate, colldata.Lookup(merged.Collation).Charset(), flags, len(r.Arguments)-1) + c.asm.jumpDestination(skips...) + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: input.Flag | pat.Flag | fl.Flag | flagIsBoolean}, nil +} + +func (r *builtinRegexpLike) compile(c *compiler) (ctype, error) { + input, err := r.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + var skips []*jump + skips = append(skips, c.compileNullCheckArg(input, 0)) + + pat, err := r.Arguments[1].compile(c) + if err != nil { + return ctype{}, err + } + skips = append(skips, c.compileNullCheckArg(pat, 1)) + + var f ctype + + if len(r.Arguments) > 2 { + f, err = r.Arguments[2].compile(c) + if err != nil { + return ctype{}, err + } + skips = append(skips, c.compileNullCheckArg(f, 2)) + } + + merged, flags, err := compileRegexpCollation(input, pat, "regexp_like") + if err != nil { + return ctype{}, err + } + + if !input.isTextual() || input.Col.Collation != merged.Collation { + c.asm.Convert_xce(len(r.Arguments), sqltypes.VarChar, merged.Collation) + } + + // We optimize for the case where the pattern is a constant. If not, + // we fall back to the slow path. + p, err := compileConstantRegex(c, r.Arguments, 1, 2, merged, flags, "regexp_like") + if err != nil { + return r.compileSlow(c, input, pat, f, merged, flags, skips...) + } + + c.asm.Fn_REGEXP_LIKE(icuregex.NewMatcher(p), r.Negate, colldata.Lookup(merged.Collation).Charset(), len(r.Arguments)-1) + c.asm.jumpDestination(skips...) + + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: input.Flag | pat.Flag | f.Flag | flagIsBoolean}, nil +} + +var _ Expr = (*builtinRegexpLike)(nil) + +type builtinRegexpInstr struct { + CallExpr +} + +func (r *builtinRegexpInstr) eval(env *ExpressionEnv) (eval, error) { + input, err := r.Arguments[0].eval(env) + if err != nil || input == nil { + return nil, err + } + + pat, err := r.Arguments[1].eval(env) + if err != nil || pat == nil { + return nil, err + } + + input, pat, typedCol, flags, err := evalRegexpCollation(input, pat, "regexp_instr") + if err != nil { + return nil, err + } + + var posExpr eval + if len(r.Arguments) > 2 { + posExpr, err = r.Arguments[2].eval(env) + if err != nil || posExpr == nil { + return nil, err + } + } + + var occExpr eval + if len(r.Arguments) > 3 { + occExpr, err = r.Arguments[3].eval(env) + if err != nil || occExpr == nil { + return nil, err + } + } + + var retExpr eval + if len(r.Arguments) > 4 { + retExpr, err = r.Arguments[4].eval(env) + if err != nil || retExpr == nil { + return nil, err + } + } + + var mtExpr eval + if len(r.Arguments) > 5 { + mtExpr, err = r.Arguments[5].eval(env) + if err != nil || mtExpr == nil { + return nil, err + } + } + + collation := colldata.Lookup(typedCol.Collation) + + pos := int64(1) + occ := int64(1) + returnOpt := int64(0) + + if mtExpr != nil { + flags, err = regexpFlags(mtExpr, flags, "regexp_instr") + if err != nil { + return nil, err + } + } + + regexp, err := compileRegex(pat, collation.Charset(), flags) + if err != nil { + return nil, err + } + + inputRunes := charset.Expand(nil, input.ToRawBytes(), collation.Charset()) + if len(inputRunes) == 0 { + return newEvalInt64(0), nil + } + + if posExpr != nil { + pos, err = positionInstr(evalToInt64(posExpr), int64(len(inputRunes))) + if err != nil { + return nil, err + } + } + + if occExpr != nil { + occ = occurrence(evalToInt64(occExpr), occ) + } + + if retExpr != nil { + returnOpt, err = returnOption(evalToInt64(retExpr), "regexp_instr") + if err != nil { + return nil, err + } + } + + m := icuregex.NewMatcher(regexp) + m.Reset(inputRunes[pos-1:]) + + found := false + for i := int64(0); i < occ; i++ { + found, err = m.Find() + if err != nil { + return nil, err + } + if !found { + break + } + } + if !found { + return newEvalInt64(0), nil + } + if returnOpt == 0 { + return newEvalInt64(int64(m.Start()) + pos), nil + } + return newEvalInt64(int64(m.End()) + pos), nil +} + +func (r *builtinRegexpInstr) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f1 := r.Arguments[0].typeof(env, fields) + _, f2 := r.Arguments[1].typeof(env, fields) + var f3, f4, f5, f6 typeFlag + if len(r.Arguments) > 2 { + _, f3 = r.Arguments[2].typeof(env, fields) + } + if len(r.Arguments) > 3 { + _, f4 = r.Arguments[3].typeof(env, fields) + } + if len(r.Arguments) > 4 { + _, f5 = r.Arguments[4].typeof(env, fields) + } + if len(r.Arguments) > 5 { + _, f6 = r.Arguments[5].typeof(env, fields) + } + return sqltypes.Int64, f1 | f2 | f3 | f4 | f5 | f6 +} + +func (r *builtinRegexpInstr) compileSlow(c *compiler, input, pat, pos, occ, returnOption, matchType ctype, merged collations.TypedCollation, flags icuregex.RegexpFlag, skips ...*jump) (ctype, error) { + if !pat.isTextual() || pat.Col.Collation != merged.Collation { + c.asm.Convert_xce(len(r.Arguments)-1, sqltypes.VarChar, merged.Collation) + } + + c.asm.Fn_REGEXP_INSTR_slow(colldata.Lookup(merged.Collation).Charset(), flags, len(r.Arguments)-1) + c.asm.jumpDestination(skips...) + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: input.Flag | pat.Flag | pos.Flag | occ.Flag | returnOption.Flag | matchType.Flag}, nil +} + +func (r *builtinRegexpInstr) compile(c *compiler) (ctype, error) { + input, err := r.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + var skips []*jump + skips = append(skips, c.compileNullCheckArg(input, 0)) + + pat, err := r.Arguments[1].compile(c) + if err != nil { + return ctype{}, err + } + skips = append(skips, c.compileNullCheckArg(pat, 1)) + + var pos ctype + if len(r.Arguments) > 2 { + pos, err = r.Arguments[2].compile(c) + if err != nil { + return ctype{}, err + } + skips = append(skips, c.compileNullCheckArg(pos, 2)) + _ = c.compileToInt64(pos, 1) + } + + var occ ctype + if len(r.Arguments) > 3 { + occ, err = r.Arguments[3].compile(c) + if err != nil { + return ctype{}, err + } + skips = append(skips, c.compileNullCheckArg(occ, 3)) + _ = c.compileToInt64(occ, 1) + } + + var returnOpt ctype + if len(r.Arguments) > 4 { + returnOpt, err = r.Arguments[4].compile(c) + if err != nil { + return ctype{}, err + } + skips = append(skips, c.compileNullCheckArg(returnOpt, 4)) + _ = c.compileToInt64(returnOpt, 1) + } + + var matchType ctype + if len(r.Arguments) > 5 { + matchType, err = r.Arguments[5].compile(c) + if err != nil { + return ctype{}, err + } + skips = append(skips, c.compileNullCheckArg(matchType, 5)) + switch { + case matchType.isTextual(): + default: + c.asm.Convert_xb(1, sqltypes.VarBinary, 0, false) + } + } + + merged, flags, err := compileRegexpCollation(input, pat, "regexp_instr") + if err != nil { + return ctype{}, err + } + + if !input.isTextual() || input.Col.Collation != merged.Collation { + c.asm.Convert_xce(len(r.Arguments), sqltypes.VarChar, merged.Collation) + } + + // We optimize for the case where the pattern is a constant. If not, + // we fall back to the slow path. + p, err := compileConstantRegex(c, r.Arguments, 1, 5, merged, flags, "regexp_instr") + if err != nil { + return r.compileSlow(c, input, pat, pos, occ, returnOpt, matchType, merged, flags, skips...) + } + + c.asm.Fn_REGEXP_INSTR(icuregex.NewMatcher(p), colldata.Lookup(merged.Collation).Charset(), len(r.Arguments)-1) + c.asm.jumpDestination(skips...) + + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: input.Flag | pat.Flag | flagIsBoolean}, nil +} + +var _ Expr = (*builtinRegexpInstr)(nil) + +type builtinRegexpSubstr struct { + CallExpr +} + +func (r *builtinRegexpSubstr) eval(env *ExpressionEnv) (eval, error) { + input, err := r.Arguments[0].eval(env) + if err != nil || input == nil { + return nil, err + } + + pat, err := r.Arguments[1].eval(env) + if err != nil || pat == nil { + return nil, err + } + + input, pat, typedCol, flags, err := evalRegexpCollation(input, pat, "regexp_substr") + if err != nil { + return nil, err + } + + var posExpr eval + // For some reason this gets checked before NULL checks of the other values + if len(r.Arguments) > 2 { + posExpr, err = r.Arguments[2].eval(env) + if err != nil || posExpr == nil { + return nil, err + } + } + + var occExpr eval + if len(r.Arguments) > 3 { + occExpr, err = r.Arguments[3].eval(env) + if err != nil || occExpr == nil { + return nil, err + } + } + + var mtExpr eval + if len(r.Arguments) > 4 { + mtExpr, err = r.Arguments[4].eval(env) + if err != nil || mtExpr == nil { + return nil, err + } + } + + collation := colldata.Lookup(typedCol.Collation) + pos := int64(1) + occ := int64(1) + inputRunes := charset.Expand(nil, input.ToRawBytes(), collation.Charset()) + + if posExpr != nil { + pos, err = position(evalToInt64(posExpr), int64(len(inputRunes)), "regexp_substr") + if err != nil { + return nil, err + } + + } + + if occExpr != nil { + occ = occurrence(evalToInt64(occExpr), occ) + } + + if mtExpr != nil { + flags, err = regexpFlags(mtExpr, flags, "regexp_substr") + if err != nil { + return nil, err + } + } + + regexp, err := compileRegex(pat, collation.Charset(), flags) + if err != nil { + return nil, err + } + + m := icuregex.NewMatcher(regexp) + m.Reset(inputRunes[pos-1:]) + + found := false + for i := int64(0); i < occ; i++ { + found, err = m.Find() + if err != nil { + return nil, err + } + if !found { + break + } + } + if !found { + return nil, nil + } + out := inputRunes[int64(m.Start())+pos-1 : int64(m.End())+pos-1] + b := charset.Collapse(nil, out, collation.Charset()) + return newEvalText(b, resultCollation(typedCol)), nil +} + +func (r *builtinRegexpSubstr) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f1 := r.Arguments[0].typeof(env, fields) + _, f2 := r.Arguments[1].typeof(env, fields) + var f3, f4, f5 typeFlag + if len(r.Arguments) > 2 { + _, f3 = r.Arguments[2].typeof(env, fields) + } + if len(r.Arguments) > 3 { + _, f4 = r.Arguments[3].typeof(env, fields) + } + if len(r.Arguments) > 4 { + _, f5 = r.Arguments[4].typeof(env, fields) + } + return sqltypes.VarChar, f1 | f2 | f3 | f4 | f5 +} + +func (r *builtinRegexpSubstr) compileSlow(c *compiler, input, pat, pos, occ, matchType ctype, merged collations.TypedCollation, flags icuregex.RegexpFlag, skips ...*jump) (ctype, error) { + if !pat.isTextual() || pat.Col.Collation != merged.Collation { + c.asm.Convert_xce(len(r.Arguments)-1, sqltypes.VarChar, merged.Collation) + } + + c.asm.Fn_REGEXP_SUBSTR_slow(merged, flags, len(r.Arguments)-1) + c.asm.jumpDestination(skips...) + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: input.Flag | pat.Flag | pos.Flag | occ.Flag | matchType.Flag}, nil +} + +func (r *builtinRegexpSubstr) compile(c *compiler) (ctype, error) { + input, err := r.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + var skips []*jump + skips = append(skips, c.compileNullCheckArg(input, 0)) + + pat, err := r.Arguments[1].compile(c) + if err != nil { + return ctype{}, err + } + skips = append(skips, c.compileNullCheckArg(pat, 1)) + + var pos ctype + if len(r.Arguments) > 2 { + pos, err = r.Arguments[2].compile(c) + if err != nil { + return ctype{}, err + } + skips = append(skips, c.compileNullCheckArg(pos, 2)) + _ = c.compileToInt64(pos, 1) + } + + var occ ctype + if len(r.Arguments) > 3 { + occ, err = r.Arguments[3].compile(c) + if err != nil { + return ctype{}, err + } + skips = append(skips, c.compileNullCheckArg(occ, 3)) + _ = c.compileToInt64(occ, 1) + } + + var matchType ctype + if len(r.Arguments) > 4 { + matchType, err = r.Arguments[4].compile(c) + if err != nil { + return ctype{}, err + } + skips = append(skips, c.compileNullCheckArg(matchType, 4)) + switch { + case matchType.isTextual(): + default: + c.asm.Convert_xb(1, sqltypes.VarBinary, 0, false) + } + } + + merged, flags, err := compileRegexpCollation(input, pat, "regexp_substr") + if err != nil { + return ctype{}, err + } + + if !input.isTextual() || input.Col.Collation != merged.Collation { + c.asm.Convert_xce(len(r.Arguments), sqltypes.VarChar, merged.Collation) + } + + // We optimize for the case where the pattern is a constant. If not, + // we fall back to the slow path. + p, err := compileConstantRegex(c, r.Arguments, 1, 4, merged, flags, "regexp_substr") + if err != nil { + return r.compileSlow(c, input, pat, pos, occ, matchType, merged, flags, skips...) + } + + c.asm.Fn_REGEXP_SUBSTR(icuregex.NewMatcher(p), merged, len(r.Arguments)-1) + c.asm.jumpDestination(skips...) + + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: input.Flag | pat.Flag | pos.Flag | occ.Flag | matchType.Flag}, nil +} + +var _ Expr = (*builtinRegexpSubstr)(nil) + +type builtinRegexpReplace struct { + CallExpr +} + +func regexpReplace(m *icuregex.Matcher, inputRunes, replRunes []rune, pos, occ int64, c colldata.Charset) ([]byte, bool, error) { + var err error + found := false + if occ > 0 { + for i := int64(0); i < occ; i++ { + found, err = m.Find() + if err != nil { + return nil, false, err + } + if !found { + break + } + } + if !found { + return nil, false, nil + } + + out := append(inputRunes[:int64(m.Start())+pos-1], replRunes...) + out = append(out, inputRunes[int64(m.End())+pos-1:]...) + return charset.Collapse(nil, out, c), true, nil + } + + found, err = m.Find() + if err != nil { + return nil, false, err + } + + if !found { + return nil, false, nil + } + + start := int64(m.Start()) + pos - 1 + out := append(inputRunes[:start], replRunes...) + end := int64(m.End()) + pos - 1 + for { + found, err = m.Find() + if err != nil { + return nil, false, err + } + if !found { + break + } + nextStart := int64(m.Start()) + pos - 1 + out = append(out, inputRunes[end:nextStart]...) + out = append(out, replRunes...) + end = int64(m.End()) + pos - 1 + } + + out = append(out, inputRunes[end:]...) + return charset.Collapse(nil, out, c), true, nil +} + +func (r *builtinRegexpReplace) eval(env *ExpressionEnv) (eval, error) { + input, err := r.Arguments[0].eval(env) + if err != nil || input == nil { + return nil, err + } + + pat, err := r.Arguments[1].eval(env) + if err != nil || pat == nil { + return nil, err + } + + replArg, err := r.Arguments[2].eval(env) + if err != nil || replArg == nil { + return nil, err + } + + input, pat, typedCol, flags, err := evalRegexpCollation(input, pat, "regexp_replace") + if err != nil { + return nil, err + } + + var posExpr eval + // For some reason this gets checked before NULL checks of the other values + if len(r.Arguments) > 3 { + posExpr, err = r.Arguments[3].eval(env) + if err != nil || posExpr == nil { + return nil, err + } + } + + var occExpr eval + if len(r.Arguments) > 4 { + occExpr, err = r.Arguments[4].eval(env) + if err != nil || occExpr == nil { + return nil, err + } + } + + var mtExpr eval + if len(r.Arguments) > 5 { + mtExpr, err = r.Arguments[5].eval(env) + if err != nil || mtExpr == nil { + return nil, err + } + } + + collation := colldata.Lookup(typedCol.Collation) + + repl, ok := replArg.(*evalBytes) + if !ok { + repl, err = evalToVarchar(replArg, typedCol.Collation, true) + if err != nil { + return nil, err + } + } + pos := int64(1) + occ := int64(0) + inputRunes := charset.Expand(nil, input.ToRawBytes(), collation.Charset()) + replRunes := charset.Expand(nil, repl.ToRawBytes(), colldata.Lookup(repl.col.Collation).Charset()) + + if posExpr != nil { + pos, err = position(evalToInt64(posExpr), int64(len(inputRunes)), "regexp_replace") + if err != nil { + return nil, err + } + } + + if occExpr != nil { + occ = occurrence(evalToInt64(occExpr), occ) + } + + if mtExpr != nil { + flags, err = regexpFlags(mtExpr, flags, "regexp_replace") + if err != nil { + return nil, err + } + } + + regexp, err := compileRegex(pat, collation.Charset(), flags) + if err != nil { + return nil, err + } + + m := icuregex.NewMatcher(regexp) + m.Reset(inputRunes[pos-1:]) + + bytes, replaced, err := regexpReplace(m, inputRunes, replRunes, pos, occ, collation.Charset()) + if err != nil { + return nil, err + } + if !replaced { + return newEvalRaw(sqltypes.Text, input.ToRawBytes(), resultCollation(typedCol)), nil + } + return newEvalRaw(sqltypes.Text, bytes, resultCollation(typedCol)), nil +} + +func (r *builtinRegexpReplace) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f1 := r.Arguments[0].typeof(env, fields) + _, f2 := r.Arguments[1].typeof(env, fields) + _, f3 := r.Arguments[2].typeof(env, fields) + var f4, f5, f6 typeFlag + if len(r.Arguments) > 3 { + _, f4 = r.Arguments[3].typeof(env, fields) + } + if len(r.Arguments) > 4 { + _, f5 = r.Arguments[4].typeof(env, fields) + } + if len(r.Arguments) > 5 { + _, f6 = r.Arguments[5].typeof(env, fields) + } + return sqltypes.Text, f1 | f2 | f3 | f4 | f5 | f6 +} + +func (r *builtinRegexpReplace) compileSlow(c *compiler, input, pat, repl, pos, occ, matchType ctype, merged collations.TypedCollation, flags icuregex.RegexpFlag, skips ...*jump) (ctype, error) { + if !pat.isTextual() || pat.Col.Collation != merged.Collation { + c.asm.Convert_xce(len(r.Arguments)-1, sqltypes.VarChar, merged.Collation) + } + + c.asm.Fn_REGEXP_REPLACE_slow(merged, flags, len(r.Arguments)-1) + c.asm.jumpDestination(skips...) + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: input.Flag | pat.Flag | repl.Flag | pos.Flag | occ.Flag | matchType.Flag}, nil +} + +func (r *builtinRegexpReplace) compile(c *compiler) (ctype, error) { + input, err := r.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + var skips []*jump + skips = append(skips, c.compileNullCheckArg(input, 0)) + + pat, err := r.Arguments[1].compile(c) + if err != nil { + return ctype{}, err + } + skips = append(skips, c.compileNullCheckArg(pat, 1)) + + repl, err := r.Arguments[2].compile(c) + if err != nil { + return ctype{}, err + } + skips = append(skips, c.compileNullCheckArg(repl, 2)) + + var pos ctype + if len(r.Arguments) > 3 { + pos, err = r.Arguments[3].compile(c) + if err != nil { + return ctype{}, err + } + skips = append(skips, c.compileNullCheckArg(pos, 3)) + _ = c.compileToInt64(pos, 1) + } + + var occ ctype + if len(r.Arguments) > 4 { + occ, err = r.Arguments[4].compile(c) + if err != nil { + return ctype{}, err + } + skips = append(skips, c.compileNullCheckArg(occ, 4)) + _ = c.compileToInt64(occ, 1) + } + + var matchType ctype + if len(r.Arguments) > 5 { + matchType, err = r.Arguments[5].compile(c) + if err != nil { + return ctype{}, err + } + skips = append(skips, c.compileNullCheckArg(matchType, 5)) + switch { + case matchType.isTextual(): + default: + c.asm.Convert_xb(1, sqltypes.VarBinary, 0, false) + } + } + + merged, flags, err := compileRegexpCollation(input, pat, "regexp_replace") + if err != nil { + return ctype{}, err + } + + if !input.isTextual() || input.Col.Collation != merged.Collation { + c.asm.Convert_xce(len(r.Arguments), sqltypes.VarChar, merged.Collation) + } + + if !repl.isTextual() || repl.Col.Collation != merged.Collation { + c.asm.Convert_xce(len(r.Arguments)-2, sqltypes.VarChar, merged.Collation) + } + + // We optimize for the case where the pattern is a constant. If not, + // we fall back to the slow path. + p, err := compileConstantRegex(c, r.Arguments, 1, 5, merged, flags, "regexp_replace") + if err != nil { + return r.compileSlow(c, input, pat, repl, pos, occ, matchType, merged, flags, skips...) + } + + c.asm.Fn_REGEXP_REPLACE(icuregex.NewMatcher(p), merged, len(r.Arguments)-1) + c.asm.jumpDestination(skips...) + + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: input.Flag | pat.Flag | repl.Flag | pos.Flag | occ.Flag | matchType.Flag}, nil +} + +var _ Expr = (*builtinRegexpReplace)(nil) diff --git a/go/vt/vtgate/evalengine/fn_string.go b/go/vt/vtgate/evalengine/fn_string.go index 7146ac03b68..b34618b00d2 100644 --- a/go/vt/vtgate/evalengine/fn_string.go +++ b/go/vt/vtgate/evalengine/fn_string.go @@ -21,6 +21,7 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/charset" + "vitess.io/vitess/go/mysql/collations/colldata" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" @@ -61,7 +62,7 @@ type ( } builtinWeightString struct { - String Expr + Expr Expr Cast string Len int HasLen bool @@ -117,8 +118,8 @@ func (call *builtinChangeCase) eval(env *ExpressionEnv) (eval, error) { return evalToVarchar(e, call.collate, false) case *evalBytes: - coll := e.col.Collation.Get() - csa, ok := coll.(collations.CaseAwareCollation) + coll := colldata.Lookup(e.col.Collation) + csa, ok := coll.(colldata.CaseAwareCollation) if !ok { return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "not implemented") } @@ -172,7 +173,7 @@ func (call *builtinCharLength) eval(env *ExpressionEnv) (eval, error) { if sqltypes.IsBinary(e.SQLType()) { return newEvalInt64(int64(len(e.bytes))), nil } - coll := e.col.Collation.Get() + coll := colldata.Lookup(e.col.Collation) count := charset.Length(coll.Charset(), e.bytes) return newEvalInt64(int64(count)), nil default: @@ -277,7 +278,7 @@ func charOrd(b []byte, coll collations.ID) int64 { if len(b) == 0 { return 0 } - cs := coll.Get().Charset() + cs := colldata.Lookup(coll).Charset() _, l := cs.DecodeRune(b) var r int64 for i := 0; i < l; i++ { @@ -429,11 +430,11 @@ func (c *builtinCollation) eval(env *ExpressionEnv) (eval, error) { return nil, err } - col := evalCollation(arg).Collation.Get() + col := evalCollation(arg) - // the collation of a `COLLATION` expr is hardcoded to `utf8_general_ci`, + // the collation of a `COLLATION` expr is hardcoded to `utf8mb3_general_ci`, // not to the default collation of our connection. this is probably a bug in MySQL, but we match it - return newEvalText([]byte(col.Name()), collationUtf8mb3), nil + return newEvalText([]byte(collations.Local().LookupName(col.Collation)), collationUtf8mb3), nil } func (*builtinCollation) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { @@ -455,76 +456,138 @@ func (expr *builtinCollation) compile(c *compiler) (ctype, error) { } func (c *builtinWeightString) callable() []Expr { - return []Expr{c.String} + return []Expr{c.Expr} } func (c *builtinWeightString) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { - _, f := c.String.typeof(env, fields) + tt, f := c.Expr.typeof(env, fields) + switch tt { + case sqltypes.Blob, sqltypes.Text, sqltypes.TypeJSON: + return sqltypes.Blob, f + } return sqltypes.VarBinary, f } func (c *builtinWeightString) eval(env *ExpressionEnv) (eval, error) { - var ( - tc collations.TypedCollation - text []byte - weights []byte - length = c.Len - ) - - str, err := c.String.eval(env) + var weights []byte + + input, err := c.Expr.eval(env) if err != nil { return nil, err } - switch str := str.(type) { - case *evalInt64, *evalUint64: - // when calling WEIGHT_STRING with an integral value, MySQL returns the - // internal sort key that would be used in an InnoDB table... we do not - // support that - return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "%s: %s", ErrEvaluatedExprNotSupported, FormatExpr(c)) + typ := sqltypes.VarBinary + + if c.Cast == "binary" { + switch input.SQLType() { + case sqltypes.Blob, sqltypes.Text, sqltypes.TypeJSON: + typ = sqltypes.Blob + } + + weights, _, err = evalWeightString(weights, evalToBinary(input), c.Len, 0) + if err != nil { + return nil, err + } + return newEvalRaw(typ, weights, collationBinary), nil + } + + switch val := input.(type) { + case *evalInt64, *evalUint64, *evalTemporal: + weights, _, err = evalWeightString(weights, val, 0, 0) + case *evalJSON: + // JSON doesn't actually use a sortable weight string for this function, but + // returns the weight string directly for the string based representation. This + // means that ordering etc. is not correct for JSON values, but that's how MySQL + // works here for this function. We still have the internal weight string logic + // that can order these correctly. + out, err := evalToVarchar(val, collationJSON.Collation, false) + if err != nil { + return nil, err + } + weights, _, err = evalWeightString(weights, out, 0, 0) + if err != nil { + return nil, err + } + typ = sqltypes.Blob case *evalBytes: - text = str.bytes - tc = str.col + switch val.SQLType() { + case sqltypes.Blob, sqltypes.Text: + typ = sqltypes.Blob + } + if val.isBinary() { + weights, _, err = evalWeightString(weights, val, 0, 0) + } else { + var strLen int + if c.Cast == "char" { + strLen = c.Len + } + weights, _, err = evalWeightString(weights, val, strLen, 0) + } default: return nil, nil } - if c.Cast == "binary" { - tc = collationBinary - weights = make([]byte, 0, c.Len) - length = collations.PadToMax + if err != nil { + return nil, err } - collation := tc.Collation.Get() - weights = collation.WeightString(weights, text, length) - return newEvalBinary(weights), nil + return newEvalRaw(typ, weights, collationBinary), nil } func (call *builtinWeightString) compile(c *compiler) (ctype, error) { - str, err := call.String.compile(c) + str, err := call.Expr.compile(c) if err != nil { return ctype{}, err } - switch str.Type { - case sqltypes.Int64, sqltypes.Uint64: - return ctype{}, c.unsupported(call) - - case sqltypes.VarChar, sqltypes.VarBinary: - skip := c.compileNullCheck1(str) + var flag typeFlag + if str.Flag&flagNullable != 0 { + flag = flag | flagNullable + } - if call.Cast == "binary" { - c.asm.Fn_WEIGHT_STRING_b(call.Len) - } else { - c.asm.Fn_WEIGHT_STRING_c(str.Col.Collation.Get(), call.Len) + typ := sqltypes.VarBinary + skip := c.compileNullCheck1(str) + if call.Cast == "binary" { + if !sqltypes.IsBinary(str.Type) { + c.asm.Convert_xb(1, sqltypes.VarBinary, 0, false) + } + switch str.Type { + case sqltypes.Blob, sqltypes.Text, sqltypes.TypeJSON: + typ = sqltypes.Blob } + + c.asm.Fn_WEIGHT_STRING(typ, call.Len) c.asm.jumpDestination(skip) - return ctype{Type: sqltypes.VarBinary, Col: collationBinary}, nil + return ctype{Type: sqltypes.VarBinary, Flag: flagNullable | flagNull, Col: collationBinary}, nil + } + + switch str.Type { + case sqltypes.Int64, sqltypes.Uint64, sqltypes.Date, sqltypes.Datetime, sqltypes.Timestamp, sqltypes.Time, sqltypes.VarBinary, sqltypes.Binary, sqltypes.Blob: + if str.Type == sqltypes.Blob { + typ = sqltypes.Blob + } + c.asm.Fn_WEIGHT_STRING(typ, 0) + case sqltypes.TypeJSON: + typ = sqltypes.Blob + c.asm.Convert_xce(1, sqltypes.VarChar, collationJSON.Collation) + c.asm.Fn_WEIGHT_STRING(typ, 0) + case sqltypes.VarChar, sqltypes.Char, sqltypes.Text: + if str.Type == sqltypes.Text { + typ = sqltypes.Blob + } + var strLen int + if call.Cast == "char" { + strLen = call.Len + } + c.asm.Fn_WEIGHT_STRING(typ, strLen) default: c.asm.SetNull(1) - return ctype{Type: sqltypes.VarBinary, Flag: flagNullable | flagNull, Col: collationBinary}, nil + flag = flag | flagNull | flagNullable } + + c.asm.jumpDestination(skip) + return ctype{Type: typ, Flag: flag, Col: collationBinary}, nil } func (call builtinLeftRight) eval(env *ExpressionEnv) (eval, error) { @@ -550,7 +613,7 @@ func (call builtinLeftRight) eval(env *ExpressionEnv) (eval, error) { } // LEFT / RIGHT operates on characters, not bytes - cs := text.col.Collation.Get().Charset() + cs := colldata.Lookup(text.col.Collation).Charset() strLen := charset.Length(cs, text.bytes) if strLen <= int(length) { @@ -620,9 +683,9 @@ func (call builtinPad) eval(env *ExpressionEnv) (eval, error) { } } - cs := text.col.Collation.Get().Charset() + cs := colldata.Lookup(text.col.Collation).Charset() pad, ok := p.(*evalBytes) - if !ok || pad.col.Collation.Get().Charset() != cs { + if !ok || colldata.Lookup(pad.col.Collation).Charset() != cs { pad, err = evalToVarchar(p, text.col.Collation, true) if err != nil { return nil, err @@ -706,8 +769,8 @@ func (call builtinPad) compile(c *compiler) (ctype, error) { switch { case pad.isTextual(): - fromCharset := pad.Col.Collation.Get().Charset() - toCharset := col.Collation.Get().Charset() + fromCharset := colldata.Lookup(pad.Col.Collation).Charset() + toCharset := colldata.Lookup(col.Collation).Charset() if fromCharset != toCharset && !toCharset.IsSuperset(fromCharset) { c.asm.Convert_xce(1, sqltypes.VarChar, col.Collation) } @@ -725,7 +788,7 @@ func (call builtinPad) compile(c *compiler) (ctype, error) { } func strcmpCollate(left, right []byte, col collations.ID) int64 { - cmp := col.Get().Collate(left, right, false) + cmp := colldata.Lookup(col).Collate(left, right, false) switch { case cmp == 0: return 0 @@ -757,7 +820,7 @@ func (l *builtinStrcmp) eval(env *ExpressionEnv) (eval, error) { col1 := evalCollation(left) col2 := evalCollation(right) - mcol, _, _, err := collations.Local().MergeCollations(col1, col2, collations.CoercionOptions{ + mcol, _, _, err := colldata.Merge(collations.Local(), col1, col2, colldata.CoercionOptions{ ConvertToSuperset: true, ConvertWithCoercion: true, }) @@ -804,7 +867,7 @@ func (expr *builtinStrcmp) compile(c *compiler) (ctype, error) { if sqltypes.IsNumber(lt.Type) || sqltypes.IsNumber(rt.Type) { mcol = collationNumeric } else { - mcol, _, _, err = collations.Local().MergeCollations(lt.Col, rt.Col, collations.CoercionOptions{ + mcol, _, _, err = colldata.Merge(collations.Local(), lt.Col, rt.Col, colldata.CoercionOptions{ ConvertToSuperset: true, ConvertWithCoercion: true, }) @@ -864,7 +927,7 @@ func (call builtinTrim) eval(env *ExpressionEnv) (eval, error) { } pat, ok := p.(*evalBytes) - if !ok || pat.col.Collation.Get().Charset() != text.col.Collation.Get().Charset() { + if !ok || colldata.Lookup(pat.col.Collation).Charset() != colldata.Lookup(text.col.Collation).Charset() { pat, err = evalToVarchar(p, text.col.Collation, true) if err != nil { return nil, err @@ -924,8 +987,8 @@ func (call builtinTrim) compile(c *compiler) (ctype, error) { switch { case pat.isTextual(): - fromCharset := pat.Col.Collation.Get().Charset() - toCharset := col.Collation.Get().Charset() + fromCharset := colldata.Lookup(pat.Col.Collation).Charset() + toCharset := colldata.Lookup(col.Collation).Charset() if fromCharset != toCharset && !toCharset.IsSuperset(fromCharset) { c.asm.Convert_xce(1, sqltypes.VarChar, col.Collation) } @@ -971,8 +1034,8 @@ func concatConvert(buf []byte, str *evalBytes, tc collations.TypedCollation) ([] if tc.Collation == collations.CollationBinaryID { return append(buf, str.bytes...), nil } - fromCharset := str.col.Collation.Get().Charset() - toCharset := tc.Collation.Get().Charset() + fromCharset := colldata.Lookup(str.col.Collation).Charset() + toCharset := colldata.Lookup(tc.Collation).Charset() if fromCharset != toCharset { return charset.Convert(buf, toCharset, str.bytes, fromCharset) } @@ -1076,8 +1139,8 @@ func (call *builtinConcat) compile(c *compiler) (ctype, error) { c.asm.Convert_xce(len(args)-i, arg.Type, tc.Collation) } case sqltypes.VarChar, sqltypes.Char, sqltypes.Text: - fromCharset := arg.Col.Collation.Get().Charset() - toCharset := tc.Collation.Get().Charset() + fromCharset := colldata.Lookup(arg.Col.Collation).Charset() + toCharset := colldata.Lookup(tc.Collation).Charset() if fromCharset != toCharset && !toCharset.IsSuperset(fromCharset) { c.asm.Convert_xce(len(args)-i, arg.Type, tc.Collation) } @@ -1224,8 +1287,8 @@ func (call *builtinConcatWs) compile(c *compiler) (ctype, error) { c.asm.Convert_xce(offset, arg.Type, tc.Collation) } case sqltypes.VarChar, sqltypes.Char, sqltypes.Text: - fromCharset := arg.Col.Collation.Get().Charset() - toCharset := tc.Collation.Get().Charset() + fromCharset := colldata.Lookup(arg.Col.Collation).Charset() + toCharset := colldata.Lookup(tc.Collation).Charset() if fromCharset != toCharset && !toCharset.IsSuperset(fromCharset) { c.asm.Convert_xce(offset, arg.Type, tc.Collation) } diff --git a/go/vt/vtgate/evalengine/fn_time.go b/go/vt/vtgate/evalengine/fn_time.go index 483c27144c5..99e0f27f755 100644 --- a/go/vt/vtgate/evalengine/fn_time.go +++ b/go/vt/vtgate/evalengine/fn_time.go @@ -141,6 +141,13 @@ type ( builtinYearWeek struct { CallExpr } + + builtinDateMath struct { + CallExpr + sub bool + unit datetime.IntervalType + collate collations.ID + } ) var _ Expr = (*builtinNow)(nil) @@ -212,7 +219,7 @@ func (call *builtinSysdate) eval(env *ExpressionEnv) (eval, error) { if tz := env.currentTimezone(); tz != nil { now = now.In(tz) } - return newEvalRaw(sqltypes.Datetime, datetime.FromStdTime(now).Format(call.prec), collationBinary), nil + return newEvalRaw(sqltypes.Datetime, datetime.NewDateTimeFromStd(now).Format(call.prec), collationBinary), nil } func (call *builtinSysdate) typeof(_ *ExpressionEnv, _ []*querypb.Field) (sqltypes.Type, typeFlag) { @@ -340,7 +347,7 @@ func convertTz(dt datetime.DateTime, from, to *time.Location) (datetime.DateTime if err != nil { return datetime.DateTime{}, false } - return datetime.FromStdTime(ts.In(to)), true + return datetime.NewDateTimeFromStd(ts.In(to)), true } func (call *builtinConvertTz) eval(env *ExpressionEnv) (eval, error) { @@ -646,7 +653,7 @@ func (b *builtinFromUnixtime) eval(env *ExpressionEnv) (eval, error) { t = t.In(tz) } - dt := newEvalDateTime(datetime.FromStdTime(t), prec) + dt := newEvalDateTime(datetime.NewDateTimeFromStd(t), prec) if len(b.Arguments) == 1 { return dt, nil @@ -809,7 +816,7 @@ func (b *builtinMakedate) eval(env *ExpressionEnv) (eval, error) { if t.IsZero() { return nil, nil } - return newEvalDate(datetime.FromStdTime(t).Date), nil + return newEvalDate(datetime.NewDateTimeFromStd(t).Date), nil } func (b *builtinMakedate) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { @@ -1688,3 +1695,92 @@ func (call *builtinYearWeek) compile(c *compiler) (ctype, error) { c.asm.jumpDestination(skip1, skip2) return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: arg.Flag | flagNullable}, nil } + +func evalToInterval(itv eval, unit datetime.IntervalType, negate bool) *datetime.Interval { + switch itv := itv.(type) { + case *evalBytes: + return datetime.ParseInterval(itv.string(), unit, negate) + case *evalFloat: + return datetime.ParseIntervalFloat(itv.f, unit, negate) + case *evalDecimal: + return datetime.ParseIntervalDecimal(itv.dec, itv.length, unit, negate) + default: + return datetime.ParseIntervalInt64(evalToNumeric(itv, false).toInt64().i, unit, negate) + } +} + +func (call *builtinDateMath) eval(env *ExpressionEnv) (eval, error) { + date, err := call.Arguments[0].eval(env) + if err != nil || date == nil { + return date, err + } + + itv, err := call.Arguments[1].eval(env) + if err != nil || itv == nil { + return itv, err + } + + interval := evalToInterval(itv, call.unit, call.sub) + if interval == nil { + return nil, nil + } + + if tmp, ok := date.(*evalTemporal); ok { + return tmp.addInterval(interval, collations.TypedCollation{}), nil + } + + if tmp := evalToTemporal(date); tmp != nil { + return tmp.addInterval(interval, defaultCoercionCollation(call.collate)), nil + } + + return nil, nil +} + +func (call *builtinDateMath) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + tt, f := call.Arguments[0].typeof(env, fields) + + switch { + case tt == sqltypes.Date && !call.unit.HasTimeParts(): + return sqltypes.Date, f | flagNullable + case tt == sqltypes.Time && !call.unit.HasDateParts(): + return sqltypes.Time, f | flagNullable + case tt == sqltypes.Datetime || tt == sqltypes.Timestamp || (tt == sqltypes.Date && call.unit.HasTimeParts()) || (tt == sqltypes.Time && call.unit.HasDateParts()): + return sqltypes.Datetime, f | flagNullable + default: + return sqltypes.Char, f | flagNullable + } +} + +func (call *builtinDateMath) compile(c *compiler) (ctype, error) { + date, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + // TODO: constant propagation + _, err = call.Arguments[1].compile(c) + if err != nil { + return ctype{}, err + } + + var ret ctype + ret.Flag = date.Flag | flagNullable + ret.Col = collationBinary + + switch { + case date.Type == sqltypes.Date && !call.unit.HasTimeParts(): + ret.Type = sqltypes.Date + c.asm.Fn_DATEADD_D(call.unit, call.sub) + case date.Type == sqltypes.Time && !call.unit.HasDateParts(): + ret.Type = sqltypes.Time + c.asm.Fn_DATEADD_D(call.unit, call.sub) + case date.Type == sqltypes.Datetime || date.Type == sqltypes.Timestamp || (date.Type == sqltypes.Date && call.unit.HasTimeParts()) || (date.Type == sqltypes.Time && call.unit.HasDateParts()): + ret.Type = sqltypes.Datetime + c.asm.Fn_DATEADD_D(call.unit, call.sub) + default: + ret.Type = sqltypes.VarChar + ret.Col = defaultCoercionCollation(c.cfg.Collation) + c.asm.Fn_DATEADD_s(call.unit, call.sub, ret.Col) + } + return ret, nil +} diff --git a/go/vt/vtgate/evalengine/format.go b/go/vt/vtgate/evalengine/format.go index 4c043d399d4..446d3e0f28f 100644 --- a/go/vt/vtgate/evalengine/format.go +++ b/go/vt/vtgate/evalengine/format.go @@ -117,15 +117,13 @@ func (t TupleExpr) format(w *formatter, depth int) { func (c *CollateExpr) format(w *formatter, depth int) { c.Inner.format(w, depth) - coll := c.TypedCollation.Collation.Get() w.WriteString(" COLLATE ") - w.WriteString(coll.Name()) + w.WriteString(collations.Local().LookupName(c.TypedCollation.Collation)) } func (i *IntroducerExpr) format(w *formatter, depth int) { w.WriteString("_") - coll := i.TypedCollation.Collation.Get() - w.WriteString(coll.Name()) + w.WriteString(collations.Local().LookupName(i.TypedCollation.Collation)) i.Inner.format(w, depth) } @@ -170,7 +168,7 @@ func (c *CallExpr) format(w *formatter, depth int) { func (c *builtinWeightString) format(w *formatter, depth int) { w.WriteString("WEIGHT_STRING(") - c.String.format(w, depth) + c.Expr.format(w, depth) if c.Cast != "" { fmt.Fprintf(w, " AS %s(%d)", strings.ToUpper(c.Cast), c.Len) @@ -206,7 +204,7 @@ func (c *ConvertExpr) format(buf *formatter, depth int) { } if c.Collation != collations.Unknown { buf.WriteString(" CHARACTER SET ") - buf.WriteString(c.Collation.Get().Name()) + buf.WriteString(collations.Local().LookupName(c.Collation)) } buf.WriteByte(')') } @@ -215,6 +213,6 @@ func (c *ConvertUsingExpr) format(buf *formatter, depth int) { buf.WriteString("CONVERT(") c.Inner.format(buf, depth) buf.WriteString(" USING ") - buf.WriteString(c.Collation.Get().Name()) + buf.WriteString(collations.Local().LookupName(c.Collation)) buf.WriteByte(')') } diff --git a/go/vt/vtgate/evalengine/integration/comparison_test.go b/go/vt/vtgate/evalengine/integration/comparison_test.go index 06a6157b73b..bde8435f688 100644 --- a/go/vt/vtgate/evalengine/integration/comparison_test.go +++ b/go/vt/vtgate/evalengine/integration/comparison_test.go @@ -29,14 +29,14 @@ import ( "github.com/spf13/pflag" - "vitess.io/vitess/go/mysql/format" - "vitess.io/vitess/go/vt/callerid" - "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/format" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/vt/callerid" querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vtgate/evalengine/testcases" @@ -45,7 +45,7 @@ import ( var ( collationEnv *collations.Environment - debugPrintAll bool + debugGolden = false debugNormalize = true debugSimplify = time.Now().UnixNano()&1 != 0 debugCheckTypes = true @@ -53,7 +53,7 @@ var ( ) func registerFlags(fs *pflag.FlagSet) { - fs.BoolVar(&debugPrintAll, "print-all", debugPrintAll, "print all matching tests") + fs.BoolVar(&debugGolden, "golden", debugGolden, "print golden test files") fs.BoolVar(&debugNormalize, "normalize", debugNormalize, "normalize comparisons against MySQL values") fs.BoolVar(&debugSimplify, "simplify", debugSimplify, "simplify expressions before evaluating them") fs.BoolVar(&debugCheckTypes, "check-types", debugCheckTypes, "check the TypeOf operator for all queries") @@ -179,7 +179,7 @@ func compareRemoteExprEnv(t *testing.T, env *evalengine.ExpressionEnv, conn *mys // TODO: passthrough proper collations for nullable fields remoteCollation = collations.CollationBinaryID } else { - remoteCollation = collationEnv.LookupByName(remote.Rows[0][1].ToString()).ID() + remoteCollation = collationEnv.LookupByName(remote.Rows[0][1].ToString()) } } } @@ -195,13 +195,24 @@ func compareRemoteExprEnv(t *testing.T, env *evalengine.ExpressionEnv, conn *mys Collation: remoteCollation, } + if debugGolden { + g := GoldenTest{Query: localQuery} + if remoteErr != nil { + g.Error = remoteErr.Error() + } else { + g.Value = remoteVal.String() + } + seenGoldenTests = append(seenGoldenTests, g) + return + } + if err := compareResult(localResult, remoteResult, cmp); err != nil { t.Errorf("%s\nquery: %s (SIMPLIFY=%v)\nrow: %v", err, localQuery, debugSimplify, env.Row) - } else if debugPrintAll { - t.Logf("local=%s mysql=%s\nquery: %s\nrow: %v", localVal.String(), remoteVal.String(), localQuery, env.Row) } } +var seenGoldenTests []GoldenTest + type vcursor struct { } @@ -239,6 +250,7 @@ func initTimezoneData(t *testing.T, conn *mysql.Conn) { } func TestMySQL(t *testing.T) { + defer utils.EnsureNoLeaks(t) var conn = mysqlconn(t) defer conn.Close() @@ -261,4 +273,8 @@ func TestMySQL(t *testing.T) { }) }) } + + if debugGolden { + writeGolden(t, seenGoldenTests) + } } diff --git a/go/vt/vtgate/evalengine/integration/fuzz_test.go b/go/vt/vtgate/evalengine/integration/fuzz_test.go index bdcd6e5f4cb..ebfaa486b19 100644 --- a/go/vt/vtgate/evalengine/integration/fuzz_test.go +++ b/go/vt/vtgate/evalengine/integration/fuzz_test.go @@ -98,6 +98,11 @@ var ( regexp.MustCompile(`Invalid JSON text in argument (\d+) to function (\w+): (.*?)`), regexp.MustCompile(`Illegal mix of collations`), regexp.MustCompile(`Incorrect (DATE|DATETIME) value`), + regexp.MustCompile(`Syntax error in regular expression`), + regexp.MustCompile(`The regular expression contains an unclosed bracket expression`), + regexp.MustCompile(`Illegal argument to a regular expression`), + regexp.MustCompile(`Incorrect arguments to regexp_substr`), + regexp.MustCompile(`Incorrect arguments to regexp_replace`), } ) @@ -177,6 +182,12 @@ func evaluateLocalEvalengine(env *evalengine.ExpressionEnv, query string, fields const syntaxErr = `You have an error in your SQL syntax; (errno 1064) (sqlstate 42000) during query: SQL` const localSyntaxErr = `You have an error in your SQL syntax;` +type GoldenTest struct { + Query string + Value string `json:",omitempty"` + Error string `json:",omitempty"` +} + func TestGenerateFuzzCases(t *testing.T) { if fuzzMaxFailures <= 0 { t.Skipf("skipping fuzz test generation") @@ -254,12 +265,7 @@ func TestGenerateFuzzCases(t *testing.T) { return } - type evaltest struct { - Query string - Value string `json:",omitempty"` - Error string `json:",omitempty"` - } - var golden []evaltest + var golden []GoldenTest for _, fail := range failures { failErr := fail.Error() @@ -276,18 +282,22 @@ func TestGenerateFuzzCases(t *testing.T) { query := "SELECT " + sqlparser.String(simplified) if fail.remoteErr != nil { - golden = append(golden, evaltest{ + golden = append(golden, GoldenTest{ Query: query, Error: fail.remoteErr.Error(), }) } else { - golden = append(golden, evaltest{ + golden = append(golden, GoldenTest{ Query: query, Value: fail.remoteVal.String(), }) } } + writeGolden(t, golden) +} + +func writeGolden(t *testing.T, golden []GoldenTest) { out, err := os.Create(fmt.Sprintf("testdata/mysql_golden_%d.json", time.Now().Unix())) if err != nil { t.Fatal(err) @@ -334,11 +344,12 @@ func compareResult(local, remote Result, cmp *testcases.Comparison) error { var localCollationName string var remoteCollationName string - if coll := local.Collation.Get(); coll != nil { - localCollationName = coll.Name() + env := collations.Local() + if coll := local.Collation; coll != collations.Unknown { + localCollationName = env.LookupName(coll) } - if coll := remote.Collation.Get(); coll != nil { - remoteCollationName = coll.Name() + if coll := remote.Collation; coll != collations.Unknown { + remoteCollationName = env.LookupName(coll) } equals, err := cmp.Equals(local.Value, remote.Value) diff --git a/go/vt/vtgate/evalengine/integration/testdata/mysql_golden_1686149004.json b/go/vt/vtgate/evalengine/integration/testdata/mysql_golden_1686149004.json new file mode 100644 index 00000000000..1c12d2d8825 --- /dev/null +++ b/go/vt/vtgate/evalengine/integration/testdata/mysql_golden_1686149004.json @@ -0,0 +1,36046 @@ +[ + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01',INTERVAL 1 DAY)", + "Value": "DATE(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01',INTERVAL 1 YEAR)", + "Value": "DATE(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 SECOND)", + "Value": "DATETIME(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2018-12-31 23:59:59', INTERVAL 1 DAY)", + "Value": "DATETIME(\"2019-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2100-12-31 23:59:59', INTERVAL '1:1' MINUTE_SECOND)", + "Value": "DATETIME(\"2101-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' DAY_SECOND)", + "Value": "DATETIME(\"2024-12-30 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'1900-01-01 00:00:00', INTERVAL '-1 10' DAY_HOUR)", + "Value": "DATETIME(\"1899-12-30 14:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'1998-01-02', INTERVAL 31 DAY)", + "Value": "DATE(\"1997-12-02\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'1992-12-31 23:59:59.000002', INTERVAL '1.999999' SECOND_MICROSECOND)", + "Value": "DATETIME(\"1993-01-01 00:00:01.000001\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2024-03-30', INTERVAL 1 MONTH)", + "Value": "DATE(\"2024-04-30\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2024-03-31', INTERVAL 1 MONTH)", + "Value": "DATE(\"2024-04-30\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 day)", + "Value": "DATE(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 day)", + "Value": "DATE(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' day)", + "Value": "DATE(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' day)", + "Value": "DATE(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' day)", + "Value": "DATE(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' day)", + "Value": "DATE(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' day)", + "Value": "DATE(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' day)", + "Value": "DATE(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' day)", + "Value": "DATE(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' day)", + "Value": "DATE(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 day)", + "Value": "DATE(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 day)", + "Value": "DATE(\"2018-03-31\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 day)", + "Value": "DATE(\"2018-05-31\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 day)", + "Value": "DATE(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' day)", + "Value": "DATE(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' day)", + "Value": "DATE(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 day)", + "Value": "DATE(\"2018-05-03\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 day)", + "Value": "DATE(\"2018-04-29\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' day)", + "Value": "DATE(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' day)", + "Value": "DATE(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' day)", + "Value": "DATE(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' day)", + "Value": "DATE(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' day)", + "Value": "DATE(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' day)", + "Value": "DATE(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' day)", + "Value": "DATE(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' day)", + "Value": "DATE(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' day)", + "Value": "DATE(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' day)", + "Value": "DATE(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 day)", + "Value": "DATE(\"2018-05-03\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 day)", + "Value": "DATE(\"2018-04-29\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 day)", + "Value": "DATE(\"2018-05-03\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 day)", + "Value": "DATE(\"2018-04-29\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 day)", + "Value": "DATE(\"2018-05-03\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 day)", + "Value": "DATE(\"2018-04-29\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' day)", + "Value": "DATE(\"2018-05-07\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' day)", + "Value": "DATE(\"2018-04-25\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 day)", + "Value": "DATE(\"2018-05-03\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 day)", + "Value": "DATE(\"2018-04-29\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 day)", + "Value": "DATE(\"2018-05-03\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 day)", + "Value": "DATE(\"2018-04-29\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day)", + "Value": "DATE(\"2018-05-03\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day)", + "Value": "DATE(\"2018-04-29\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day)", + "Value": "DATE(\"2018-05-03\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day)", + "Value": "DATE(\"2018-04-29\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 day)", + "Value": "DATE(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 day)", + "Value": "DATE(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' day)", + "Value": "DATE(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' day)", + "Value": "DATE(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' day)", + "Value": "DATE(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' day)", + "Value": "DATE(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 week)", + "Value": "DATE(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 week)", + "Value": "DATE(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' week)", + "Value": "DATE(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' week)", + "Value": "DATE(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' week)", + "Value": "DATE(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' week)", + "Value": "DATE(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' week)", + "Value": "DATE(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' week)", + "Value": "DATE(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' week)", + "Value": "DATE(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' week)", + "Value": "DATE(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 week)", + "Value": "DATE(\"2018-12-04\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 week)", + "Value": "DATE(\"2017-09-26\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 week)", + "Value": "DATE(\"2018-11-27\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 week)", + "Value": "DATE(\"2017-10-03\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' week)", + "Value": "DATE(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' week)", + "Value": "DATE(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 week)", + "Value": "DATE(\"2018-05-15\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 week)", + "Value": "DATE(\"2018-04-17\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' week)", + "Value": "DATE(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' week)", + "Value": "DATE(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' week)", + "Value": "DATE(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' week)", + "Value": "DATE(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' week)", + "Value": "DATE(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' week)", + "Value": "DATE(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' week)", + "Value": "DATE(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' week)", + "Value": "DATE(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' week)", + "Value": "DATE(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' week)", + "Value": "DATE(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 week)", + "Value": "DATE(\"2018-05-15\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 week)", + "Value": "DATE(\"2018-04-17\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 week)", + "Value": "DATE(\"2018-05-15\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 week)", + "Value": "DATE(\"2018-04-17\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 week)", + "Value": "DATE(\"2018-05-15\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 week)", + "Value": "DATE(\"2018-04-17\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' week)", + "Value": "DATE(\"2018-06-12\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' week)", + "Value": "DATE(\"2018-03-20\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 week)", + "Value": "DATE(\"2018-05-15\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 week)", + "Value": "DATE(\"2018-04-17\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 week)", + "Value": "DATE(\"2018-05-15\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 week)", + "Value": "DATE(\"2018-04-17\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) week)", + "Value": "DATE(\"2018-05-15\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) week)", + "Value": "DATE(\"2018-04-17\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) week)", + "Value": "DATE(\"2018-05-15\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) week)", + "Value": "DATE(\"2018-04-17\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 week)", + "Value": "DATE(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 week)", + "Value": "DATE(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' week)", + "Value": "DATE(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' week)", + "Value": "DATE(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' week)", + "Value": "DATE(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' week)", + "Value": "DATE(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 month)", + "Value": "DATE(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 month)", + "Value": "DATE(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' month)", + "Value": "DATE(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' month)", + "Value": "DATE(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' month)", + "Value": "DATE(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' month)", + "Value": "DATE(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' month)", + "Value": "DATE(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' month)", + "Value": "DATE(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' month)", + "Value": "DATE(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' month)", + "Value": "DATE(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 month)", + "Value": "DATE(\"2020-12-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 month)", + "Value": "DATE(\"2015-10-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 month)", + "Value": "DATE(\"2020-11-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 month)", + "Value": "DATE(\"2015-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' month)", + "Value": "DATE(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' month)", + "Value": "DATE(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 month)", + "Value": "DATE(\"2018-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 month)", + "Value": "DATE(\"2018-03-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' month)", + "Value": "DATE(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' month)", + "Value": "DATE(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' month)", + "Value": "DATE(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' month)", + "Value": "DATE(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' month)", + "Value": "DATE(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' month)", + "Value": "DATE(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' month)", + "Value": "DATE(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' month)", + "Value": "DATE(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' month)", + "Value": "DATE(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' month)", + "Value": "DATE(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 month)", + "Value": "DATE(\"2018-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 month)", + "Value": "DATE(\"2018-03-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 month)", + "Value": "DATE(\"2018-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 month)", + "Value": "DATE(\"2018-03-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 month)", + "Value": "DATE(\"2018-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 month)", + "Value": "DATE(\"2018-03-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' month)", + "Value": "DATE(\"2018-11-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' month)", + "Value": "DATE(\"2017-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 month)", + "Value": "DATE(\"2018-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 month)", + "Value": "DATE(\"2018-03-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 month)", + "Value": "DATE(\"2018-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 month)", + "Value": "DATE(\"2018-03-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) month)", + "Value": "DATE(\"2018-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) month)", + "Value": "DATE(\"2018-03-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) month)", + "Value": "DATE(\"2018-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) month)", + "Value": "DATE(\"2018-03-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 month)", + "Value": "DATE(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 month)", + "Value": "DATE(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' month)", + "Value": "DATE(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' month)", + "Value": "DATE(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' month)", + "Value": "DATE(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' month)", + "Value": "DATE(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 year)", + "Value": "DATE(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 year)", + "Value": "DATE(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' year)", + "Value": "DATE(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' year)", + "Value": "DATE(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' year)", + "Value": "DATE(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' year)", + "Value": "DATE(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' year)", + "Value": "DATE(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' year)", + "Value": "DATE(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' year)", + "Value": "DATE(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' year)", + "Value": "DATE(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 year)", + "Value": "DATE(\"2049-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 year)", + "Value": "DATE(\"1987-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 year)", + "Value": "DATE(\"2048-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 year)", + "Value": "DATE(\"1988-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' year)", + "Value": "DATE(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' year)", + "Value": "DATE(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 year)", + "Value": "DATE(\"2020-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 year)", + "Value": "DATE(\"2016-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' year)", + "Value": "DATE(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' year)", + "Value": "DATE(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' year)", + "Value": "DATE(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' year)", + "Value": "DATE(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' year)", + "Value": "DATE(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' year)", + "Value": "DATE(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' year)", + "Value": "DATE(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' year)", + "Value": "DATE(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' year)", + "Value": "DATE(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' year)", + "Value": "DATE(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 year)", + "Value": "DATE(\"2020-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 year)", + "Value": "DATE(\"2016-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 year)", + "Value": "DATE(\"2020-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 year)", + "Value": "DATE(\"2016-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 year)", + "Value": "DATE(\"2020-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 year)", + "Value": "DATE(\"2016-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' year)", + "Value": "DATE(\"2024-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' year)", + "Value": "DATE(\"2012-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 year)", + "Value": "DATE(\"2020-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 year)", + "Value": "DATE(\"2016-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 year)", + "Value": "DATE(\"2020-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 year)", + "Value": "DATE(\"2016-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year)", + "Value": "DATE(\"2020-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year)", + "Value": "DATE(\"2016-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year)", + "Value": "DATE(\"2020-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year)", + "Value": "DATE(\"2016-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 year)", + "Value": "DATE(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 year)", + "Value": "DATE(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' year)", + "Value": "DATE(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' year)", + "Value": "DATE(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' year)", + "Value": "DATE(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' year)", + "Value": "DATE(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 day_hour)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 day_hour)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' day_hour)", + "Value": "DATETIME(\"2018-05-02 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' day_hour)", + "Value": "DATETIME(\"2018-04-29 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' day_hour)", + "Value": "DATETIME(\"2018-04-29 14:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' day_hour)", + "Value": "DATETIME(\"2018-05-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' day_hour)", + "Value": "DATETIME(\"2018-05-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' day_hour)", + "Value": "DATETIME(\"2018-04-29 14:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 day_hour)", + "Value": "DATETIME(\"2018-05-02 07:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 day_hour)", + "Value": "DATETIME(\"2018-04-29 17:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 day_hour)", + "Value": "DATETIME(\"2018-05-02 06:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 day_hour)", + "Value": "DATETIME(\"2018-04-29 18:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' day_hour)", + "Value": "DATETIME(\"2132-05-30 15:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' day_hour)", + "Value": "DATETIME(\"1904-04-01 09:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 day_hour)", + "Value": "DATETIME(\"2018-06-12 15:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 day_hour)", + "Value": "DATETIME(\"2018-03-19 09:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' day_hour)", + "Value": "DATETIME(\"2018-06-12 15:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' day_hour)", + "Value": "DATETIME(\"2018-03-19 09:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' day_hour)", + "Value": "DATETIME(\"2018-04-29 14:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' day_hour)", + "Value": "DATETIME(\"2018-05-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' day_hour)", + "Value": "DATETIME(\"2018-05-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' day_hour)", + "Value": "DATETIME(\"2018-04-29 14:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 day_hour)", + "Value": "DATETIME(\"2018-05-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 day_hour)", + "Value": "DATETIME(\"2018-04-29 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 day_hour)", + "Value": "DATETIME(\"2018-11-26 08:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 day_hour)", + "Value": "DATETIME(\"2017-10-03 16:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 day_hour)", + "Value": "DATETIME(\"2018-11-26 08:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 day_hour)", + "Value": "DATETIME(\"2017-10-03 16:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' day_hour)", + "Value": "DATETIME(\"2018-05-07 04:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' day_hour)", + "Value": "DATETIME(\"2018-04-24 20:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 day_hour)", + "Value": "DATETIME(\"2018-05-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 day_hour)", + "Value": "DATETIME(\"2018-04-29 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 day_hour)", + "Value": "DATETIME(\"2018-05-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 day_hour)", + "Value": "DATETIME(\"2018-04-29 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_hour)", + "Value": "DATETIME(\"2018-05-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_hour)", + "Value": "DATETIME(\"2018-04-29 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_hour)", + "Value": "DATETIME(\"2018-05-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_hour)", + "Value": "DATETIME(\"2018-04-30 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 day_hour)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 day_hour)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' day_hour)", + "Value": "DATETIME(\"2018-05-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' day_hour)", + "Value": "DATETIME(\"2018-04-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' day_hour)", + "Value": "DATETIME(\"2018-05-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' day_hour)", + "Value": "DATETIME(\"2018-04-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' day_microsecond)", + "Value": "DATETIME(\"2018-05-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' day_microsecond)", + "Value": "DATETIME(\"2018-04-30 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' day_microsecond)", + "Value": "DATETIME(\"2018-05-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' day_microsecond)", + "Value": "DATETIME(\"2018-04-30 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' day_microsecond)", + "Value": "DATETIME(\"2018-05-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' day_microsecond)", + "Value": "DATETIME(\"2018-04-30 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 day_minute)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 day_minute)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' day_minute)", + "Value": "DATETIME(\"2018-05-01 01:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' day_minute)", + "Value": "DATETIME(\"2018-04-30 22:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' day_minute)", + "Value": "DATETIME(\"2018-04-30 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' day_minute)", + "Value": "DATETIME(\"2018-05-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' day_minute)", + "Value": "DATETIME(\"2018-05-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' day_minute)", + "Value": "DATETIME(\"2018-04-30 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 day_minute)", + "Value": "DATETIME(\"2018-05-01 00:31:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 day_minute)", + "Value": "DATETIME(\"2018-04-30 23:29:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 day_minute)", + "Value": "DATETIME(\"2018-05-01 00:30:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 day_minute)", + "Value": "DATETIME(\"2018-04-30 23:30:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' day_minute)", + "Value": "DATETIME(\"2020-03-25 11:39:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' day_minute)", + "Value": "DATETIME(\"2016-06-05 12:21:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 day_minute)", + "Value": "DATETIME(\"2018-05-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 day_minute)", + "Value": "DATETIME(\"2018-04-30 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' day_minute)", + "Value": "DATETIME(\"2018-05-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' day_minute)", + "Value": "DATETIME(\"2018-04-30 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' day_minute)", + "Value": "DATETIME(\"2018-04-30 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' day_minute)", + "Value": "DATETIME(\"2018-05-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' day_minute)", + "Value": "DATETIME(\"2018-05-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' day_minute)", + "Value": "DATETIME(\"2018-04-30 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 day_minute)", + "Value": "DATETIME(\"2018-05-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 day_minute)", + "Value": "DATETIME(\"2018-04-30 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 day_minute)", + "Value": "DATETIME(\"2018-05-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 day_minute)", + "Value": "DATETIME(\"2018-04-27 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 day_minute)", + "Value": "DATETIME(\"2018-05-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 day_minute)", + "Value": "DATETIME(\"2018-04-27 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' day_minute)", + "Value": "DATETIME(\"2018-05-01 06:04:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' day_minute)", + "Value": "DATETIME(\"2018-04-30 17:56:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 day_minute)", + "Value": "DATETIME(\"2018-05-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 day_minute)", + "Value": "DATETIME(\"2018-04-30 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 day_minute)", + "Value": "DATETIME(\"2018-05-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 day_minute)", + "Value": "DATETIME(\"2018-04-30 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_minute)", + "Value": "DATETIME(\"2018-05-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_minute)", + "Value": "DATETIME(\"2018-04-30 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_minute)", + "Value": "DATETIME(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_minute)", + "Value": "DATETIME(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 day_minute)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 day_minute)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' day_minute)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' day_minute)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' day_minute)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' day_minute)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 day_second)", + "Value": "DATETIME(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 day_second)", + "Value": "DATETIME(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' day_second)", + "Value": "DATETIME(\"2018-05-01 00:01:01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' day_second)", + "Value": "DATETIME(\"2018-04-30 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' day_second)", + "Value": "DATETIME(\"2018-05-02 01:01:01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' day_second)", + "Value": "DATETIME(\"2018-04-29 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' day_second)", + "Value": "DATETIME(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' day_second)", + "Value": "DATETIME(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' day_second)", + "Value": "DATETIME(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' day_second)", + "Value": "DATETIME(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 day_second)", + "Value": "DATETIME(\"2018-05-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 day_second)", + "Value": "DATETIME(\"2018-04-30 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 day_second)", + "Value": "DATETIME(\"2018-05-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 day_second)", + "Value": "DATETIME(\"2018-04-30 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' day_second)", + "Value": "DATETIME(\"2018-05-12 13:47:39\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' day_second)", + "Value": "DATETIME(\"2018-04-19 10:12:21\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 day_second)", + "Value": "DATETIME(\"2018-05-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 day_second)", + "Value": "DATETIME(\"2018-04-30 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' day_second)", + "Value": "DATETIME(\"2018-05-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' day_second)", + "Value": "DATETIME(\"2018-04-30 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' day_second)", + "Value": "DATETIME(\"2018-05-02 01:01:01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' day_second)", + "Value": "DATETIME(\"2018-04-29 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' day_second)", + "Value": "DATETIME(\"2018-05-02 01:01:01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' day_second)", + "Value": "DATETIME(\"2018-04-29 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' day_second)", + "Value": "DATETIME(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' day_second)", + "Value": "DATETIME(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' day_second)", + "Value": "DATETIME(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' day_second)", + "Value": "DATETIME(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 day_second)", + "Value": "DATETIME(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 day_second)", + "Value": "DATETIME(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 day_second)", + "Value": "DATETIME(\"2018-05-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 day_second)", + "Value": "DATETIME(\"2018-04-30 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 day_second)", + "Value": "DATETIME(\"2018-05-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 day_second)", + "Value": "DATETIME(\"2018-04-30 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' day_second)", + "Value": "DATETIME(\"2018-05-01 00:06:04\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' day_second)", + "Value": "DATETIME(\"2018-04-30 23:53:56\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 day_second)", + "Value": "DATETIME(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 day_second)", + "Value": "DATETIME(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 day_second)", + "Value": "DATETIME(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 day_second)", + "Value": "DATETIME(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_second)", + "Value": "DATETIME(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_second)", + "Value": "DATETIME(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_second)", + "Value": "DATETIME(\"2018-05-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_second)", + "Value": "DATETIME(\"2018-04-30 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 day_second)", + "Value": "DATETIME(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 day_second)", + "Value": "DATETIME(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' day_second)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' day_second)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' day_second)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' day_second)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 hour)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 hour)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' hour)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' hour)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' hour)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' hour)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' hour)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' hour)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' hour)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' hour)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 hour)", + "Value": "DATETIME(\"2018-05-02 07:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 hour)", + "Value": "DATETIME(\"2018-04-29 17:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 hour)", + "Value": "DATETIME(\"2018-05-02 06:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 hour)", + "Value": "DATETIME(\"2018-04-29 18:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' hour)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' hour)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 hour)", + "Value": "DATETIME(\"2018-05-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 hour)", + "Value": "DATETIME(\"2018-04-30 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' hour)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' hour)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' hour)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' hour)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' hour)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' hour)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' hour)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' hour)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' hour)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' hour)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 hour)", + "Value": "DATETIME(\"2018-05-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 hour)", + "Value": "DATETIME(\"2018-04-30 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 hour)", + "Value": "DATETIME(\"2018-05-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 hour)", + "Value": "DATETIME(\"2018-04-30 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 hour)", + "Value": "DATETIME(\"2018-05-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 hour)", + "Value": "DATETIME(\"2018-04-30 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' hour)", + "Value": "DATETIME(\"2018-05-01 06:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' hour)", + "Value": "DATETIME(\"2018-04-30 18:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 hour)", + "Value": "DATETIME(\"2018-05-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 hour)", + "Value": "DATETIME(\"2018-04-30 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 hour)", + "Value": "DATETIME(\"2018-05-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 hour)", + "Value": "DATETIME(\"2018-04-30 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour)", + "Value": "DATETIME(\"2018-05-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour)", + "Value": "DATETIME(\"2018-04-30 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour)", + "Value": "DATETIME(\"2018-05-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour)", + "Value": "DATETIME(\"2018-04-30 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 hour)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 hour)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' hour)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' hour)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' hour)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' hour)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 hour_minute)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 hour_minute)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' hour_minute)", + "Value": "DATETIME(\"2018-05-01 01:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' hour_minute)", + "Value": "DATETIME(\"2018-04-30 22:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' hour_minute)", + "Value": "DATETIME(\"2018-04-30 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' hour_minute)", + "Value": "DATETIME(\"2018-05-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' hour_minute)", + "Value": "DATETIME(\"2018-05-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' hour_minute)", + "Value": "DATETIME(\"2018-04-30 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 hour_minute)", + "Value": "DATETIME(\"2018-05-01 00:31:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 hour_minute)", + "Value": "DATETIME(\"2018-04-30 23:29:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 hour_minute)", + "Value": "DATETIME(\"2018-05-01 00:30:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 hour_minute)", + "Value": "DATETIME(\"2018-04-30 23:30:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' hour_minute)", + "Value": "DATETIME(\"2020-03-25 11:39:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' hour_minute)", + "Value": "DATETIME(\"2016-06-05 12:21:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 hour_minute)", + "Value": "DATETIME(\"2018-05-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 hour_minute)", + "Value": "DATETIME(\"2018-04-30 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' hour_minute)", + "Value": "DATETIME(\"2018-05-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' hour_minute)", + "Value": "DATETIME(\"2018-04-30 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' hour_minute)", + "Value": "DATETIME(\"2018-04-30 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' hour_minute)", + "Value": "DATETIME(\"2018-05-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' hour_minute)", + "Value": "DATETIME(\"2018-05-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' hour_minute)", + "Value": "DATETIME(\"2018-04-30 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 hour_minute)", + "Value": "DATETIME(\"2018-05-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 hour_minute)", + "Value": "DATETIME(\"2018-04-30 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 hour_minute)", + "Value": "DATETIME(\"2018-05-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 hour_minute)", + "Value": "DATETIME(\"2018-04-27 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 hour_minute)", + "Value": "DATETIME(\"2018-05-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 hour_minute)", + "Value": "DATETIME(\"2018-04-27 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' hour_minute)", + "Value": "DATETIME(\"2018-05-01 06:04:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' hour_minute)", + "Value": "DATETIME(\"2018-04-30 17:56:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 hour_minute)", + "Value": "DATETIME(\"2018-05-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 hour_minute)", + "Value": "DATETIME(\"2018-04-30 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 hour_minute)", + "Value": "DATETIME(\"2018-05-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 hour_minute)", + "Value": "DATETIME(\"2018-04-30 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_minute)", + "Value": "DATETIME(\"2018-05-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_minute)", + "Value": "DATETIME(\"2018-04-30 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_minute)", + "Value": "DATETIME(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_minute)", + "Value": "DATETIME(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 hour_minute)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 hour_minute)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' hour_minute)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' hour_minute)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' hour_minute)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' hour_minute)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 hour_second)", + "Value": "DATETIME(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 hour_second)", + "Value": "DATETIME(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' hour_second)", + "Value": "DATETIME(\"2018-05-01 00:01:01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' hour_second)", + "Value": "DATETIME(\"2018-04-30 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' hour_second)", + "Value": "DATETIME(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' hour_second)", + "Value": "DATETIME(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' hour_second)", + "Value": "DATETIME(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' hour_second)", + "Value": "DATETIME(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 hour_second)", + "Value": "DATETIME(\"2018-05-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 hour_second)", + "Value": "DATETIME(\"2018-04-30 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 hour_second)", + "Value": "DATETIME(\"2018-05-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 hour_second)", + "Value": "DATETIME(\"2018-04-30 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' hour_second)", + "Value": "DATETIME(\"2018-05-12 13:47:39\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' hour_second)", + "Value": "DATETIME(\"2018-04-19 10:12:21\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 hour_second)", + "Value": "DATETIME(\"2018-05-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 hour_second)", + "Value": "DATETIME(\"2018-04-30 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' hour_second)", + "Value": "DATETIME(\"2018-05-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' hour_second)", + "Value": "DATETIME(\"2018-04-30 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' hour_second)", + "Value": "DATETIME(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' hour_second)", + "Value": "DATETIME(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' hour_second)", + "Value": "DATETIME(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' hour_second)", + "Value": "DATETIME(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 hour_second)", + "Value": "DATETIME(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 hour_second)", + "Value": "DATETIME(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 hour_second)", + "Value": "DATETIME(\"2018-05-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 hour_second)", + "Value": "DATETIME(\"2018-04-30 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 hour_second)", + "Value": "DATETIME(\"2018-05-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 hour_second)", + "Value": "DATETIME(\"2018-04-30 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' hour_second)", + "Value": "DATETIME(\"2018-05-01 00:06:04\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' hour_second)", + "Value": "DATETIME(\"2018-04-30 23:53:56\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 hour_second)", + "Value": "DATETIME(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 hour_second)", + "Value": "DATETIME(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 hour_second)", + "Value": "DATETIME(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 hour_second)", + "Value": "DATETIME(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_second)", + "Value": "DATETIME(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_second)", + "Value": "DATETIME(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_second)", + "Value": "DATETIME(\"2018-05-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_second)", + "Value": "DATETIME(\"2018-04-30 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 hour_second)", + "Value": "DATETIME(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 hour_second)", + "Value": "DATETIME(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' hour_second)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' hour_second)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' hour_second)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' hour_second)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000031\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999969\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000030\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999970\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000006\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999994\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 minute)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 minute)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' minute)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' minute)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' minute)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' minute)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' minute)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' minute)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' minute)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' minute)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 minute)", + "Value": "DATETIME(\"2018-05-01 00:31:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 minute)", + "Value": "DATETIME(\"2018-04-30 23:29:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 minute)", + "Value": "DATETIME(\"2018-05-01 00:30:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 minute)", + "Value": "DATETIME(\"2018-04-30 23:30:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' minute)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' minute)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 minute)", + "Value": "DATETIME(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 minute)", + "Value": "DATETIME(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' minute)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' minute)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' minute)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' minute)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' minute)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' minute)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' minute)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' minute)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' minute)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' minute)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 minute)", + "Value": "DATETIME(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 minute)", + "Value": "DATETIME(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 minute)", + "Value": "DATETIME(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 minute)", + "Value": "DATETIME(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 minute)", + "Value": "DATETIME(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 minute)", + "Value": "DATETIME(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' minute)", + "Value": "DATETIME(\"2018-05-01 00:06:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' minute)", + "Value": "DATETIME(\"2018-04-30 23:54:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 minute)", + "Value": "DATETIME(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 minute)", + "Value": "DATETIME(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 minute)", + "Value": "DATETIME(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 minute)", + "Value": "DATETIME(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute)", + "Value": "DATETIME(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute)", + "Value": "DATETIME(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute)", + "Value": "DATETIME(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute)", + "Value": "DATETIME(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 minute)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 minute)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' minute)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' minute)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' minute)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' minute)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 minute_second)", + "Value": "DATETIME(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 minute_second)", + "Value": "DATETIME(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' minute_second)", + "Value": "DATETIME(\"2018-05-01 00:01:01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' minute_second)", + "Value": "DATETIME(\"2018-04-30 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' minute_second)", + "Value": "DATETIME(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' minute_second)", + "Value": "DATETIME(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' minute_second)", + "Value": "DATETIME(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' minute_second)", + "Value": "DATETIME(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 minute_second)", + "Value": "DATETIME(\"2018-05-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 minute_second)", + "Value": "DATETIME(\"2018-04-30 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 minute_second)", + "Value": "DATETIME(\"2018-05-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 minute_second)", + "Value": "DATETIME(\"2018-04-30 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' minute_second)", + "Value": "DATETIME(\"2018-05-12 13:47:39\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' minute_second)", + "Value": "DATETIME(\"2018-04-19 10:12:21\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 minute_second)", + "Value": "DATETIME(\"2018-05-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 minute_second)", + "Value": "DATETIME(\"2018-04-30 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' minute_second)", + "Value": "DATETIME(\"2018-05-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' minute_second)", + "Value": "DATETIME(\"2018-04-30 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' minute_second)", + "Value": "DATETIME(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' minute_second)", + "Value": "DATETIME(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' minute_second)", + "Value": "DATETIME(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' minute_second)", + "Value": "DATETIME(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 minute_second)", + "Value": "DATETIME(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 minute_second)", + "Value": "DATETIME(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 minute_second)", + "Value": "DATETIME(\"2018-05-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 minute_second)", + "Value": "DATETIME(\"2018-04-30 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 minute_second)", + "Value": "DATETIME(\"2018-05-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 minute_second)", + "Value": "DATETIME(\"2018-04-30 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' minute_second)", + "Value": "DATETIME(\"2018-05-01 00:06:04\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' minute_second)", + "Value": "DATETIME(\"2018-04-30 23:53:56\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 minute_second)", + "Value": "DATETIME(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 minute_second)", + "Value": "DATETIME(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 minute_second)", + "Value": "DATETIME(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 minute_second)", + "Value": "DATETIME(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_second)", + "Value": "DATETIME(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_second)", + "Value": "DATETIME(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_second)", + "Value": "DATETIME(\"2018-05-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_second)", + "Value": "DATETIME(\"2018-04-30 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 minute_second)", + "Value": "DATETIME(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 minute_second)", + "Value": "DATETIME(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' minute_second)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' minute_second)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' minute_second)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' minute_second)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 quarter)", + "Value": "DATE(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 quarter)", + "Value": "DATE(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' quarter)", + "Value": "DATE(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' quarter)", + "Value": "DATE(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' quarter)", + "Value": "DATE(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' quarter)", + "Value": "DATE(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' quarter)", + "Value": "DATE(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' quarter)", + "Value": "DATE(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' quarter)", + "Value": "DATE(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' quarter)", + "Value": "DATE(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 quarter)", + "Value": "DATE(\"2026-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 quarter)", + "Value": "DATE(\"2010-08-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 quarter)", + "Value": "DATE(\"2025-11-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 quarter)", + "Value": "DATE(\"2010-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' quarter)", + "Value": "DATE(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' quarter)", + "Value": "DATE(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 quarter)", + "Value": "DATE(\"2018-11-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 quarter)", + "Value": "DATE(\"2017-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' quarter)", + "Value": "DATE(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' quarter)", + "Value": "DATE(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' quarter)", + "Value": "DATE(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' quarter)", + "Value": "DATE(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' quarter)", + "Value": "DATE(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' quarter)", + "Value": "DATE(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' quarter)", + "Value": "DATE(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' quarter)", + "Value": "DATE(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' quarter)", + "Value": "DATE(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' quarter)", + "Value": "DATE(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 quarter)", + "Value": "DATE(\"2018-11-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 quarter)", + "Value": "DATE(\"2017-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 quarter)", + "Value": "DATE(\"2018-11-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 quarter)", + "Value": "DATE(\"2017-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 quarter)", + "Value": "DATE(\"2018-11-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 quarter)", + "Value": "DATE(\"2017-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' quarter)", + "Value": "DATE(\"2019-11-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' quarter)", + "Value": "DATE(\"2016-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 quarter)", + "Value": "DATE(\"2018-11-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 quarter)", + "Value": "DATE(\"2017-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 quarter)", + "Value": "DATE(\"2018-11-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 quarter)", + "Value": "DATE(\"2017-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) quarter)", + "Value": "DATE(\"2018-11-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) quarter)", + "Value": "DATE(\"2017-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) quarter)", + "Value": "DATE(\"2018-11-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) quarter)", + "Value": "DATE(\"2017-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 quarter)", + "Value": "DATE(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 quarter)", + "Value": "DATE(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' quarter)", + "Value": "DATE(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' quarter)", + "Value": "DATE(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' quarter)", + "Value": "DATE(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' quarter)", + "Value": "DATE(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 second)", + "Value": "DATETIME(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 second)", + "Value": "DATETIME(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' second)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' second)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' second)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' second)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 second)", + "Value": "DATETIME(\"2018-05-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 second)", + "Value": "DATETIME(\"2018-04-30 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 second)", + "Value": "DATETIME(\"2018-05-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 second)", + "Value": "DATETIME(\"2018-04-30 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' second)", + "Value": "DATETIME(\"2018-04-30 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.999\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 second)", + "Value": "DATETIME(\"2018-04-30 23:59:58.001\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' second)", + "Value": "DATETIME(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' second)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' second)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' second)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' second)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.5\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 second)", + "Value": "DATETIME(\"2018-04-30 23:59:58.5\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.5000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 second)", + "Value": "DATETIME(\"2018-04-30 23:59:58.5000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.5000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 second)", + "Value": "DATETIME(\"2018-04-30 23:59:58.5000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' second)", + "Value": "DATETIME(\"2018-05-01 00:00:06.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' second)", + "Value": "DATETIME(\"2018-04-30 23:59:54.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 second)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 second)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.5\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second)", + "Value": "DATETIME(\"2018-04-30 23:59:58.5\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second)", + "Value": "DATETIME(\"2018-05-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second)", + "Value": "DATETIME(\"2018-04-30 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 second)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' second)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' second)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 year_month)", + "Value": "DATE(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 year_month)", + "Value": "DATE(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' year_month)", + "Value": "DATE(\"2019-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' year_month)", + "Value": "DATE(\"2017-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' year_month)", + "Value": "DATE(\"2016-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' year_month)", + "Value": "DATE(\"2020-03-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' year_month)", + "Value": "DATE(\"2020-03-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' year_month)", + "Value": "DATE(\"2016-07-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 year_month)", + "Value": "DATE(\"2020-12-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 year_month)", + "Value": "DATE(\"2015-10-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 year_month)", + "Value": "DATE(\"2020-11-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 year_month)", + "Value": "DATE(\"2015-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 year_month)", + "Value": "DATE(\"2102-08-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 year_month)", + "Value": "DATE(\"1934-02-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' year_month)", + "Value": "DATE(\"2102-08-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' year_month)", + "Value": "DATE(\"1934-02-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' year_month)", + "Value": "DATE(\"2016-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' year_month)", + "Value": "DATE(\"2020-03-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' year_month)", + "Value": "DATE(\"2020-03-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' year_month)", + "Value": "DATE(\"2016-07-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 year_month)", + "Value": "DATE(\"2019-10-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 year_month)", + "Value": "DATE(\"2016-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 year_month)", + "Value": "DATE(\"2436-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 year_month)", + "Value": "DATE(\"1600-09-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 year_month)", + "Value": "DATE(\"2436-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 year_month)", + "Value": "DATE(\"1600-09-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' year_month)", + "Value": "DATE(\"2024-09-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' year_month)", + "Value": "DATE(\"2012-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 year_month)", + "Value": "DATE(\"2019-10-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 year_month)", + "Value": "DATE(\"2016-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 year_month)", + "Value": "DATE(\"2019-10-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 year_month)", + "Value": "DATE(\"2016-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year_month)", + "Value": "DATE(\"2019-10-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year_month)", + "Value": "DATE(\"2016-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year_month)", + "Value": "DATE(\"2018-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year_month)", + "Value": "DATE(\"2018-03-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 year_month)", + "Value": "DATE(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 year_month)", + "Value": "DATE(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' year_month)", + "Value": "DATE(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' year_month)", + "Value": "DATE(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' year_month)", + "Value": "DATE(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' year_month)", + "Value": "DATE(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 day)", + "Value": "DATETIME(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 day)", + "Value": "DATETIME(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' day)", + "Value": "DATETIME(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' day)", + "Value": "DATETIME(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' day)", + "Value": "DATETIME(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' day)", + "Value": "DATETIME(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' day)", + "Value": "DATETIME(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' day)", + "Value": "DATETIME(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' day)", + "Value": "DATETIME(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' day)", + "Value": "DATETIME(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 day)", + "Value": "DATETIME(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 day)", + "Value": "DATETIME(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 day)", + "Value": "DATETIME(\"2021-01-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 day)", + "Value": "DATETIME(\"2020-12-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' day)", + "Value": "DATETIME(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' day)", + "Value": "DATETIME(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 day)", + "Value": "DATETIME(\"2021-01-02 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 day)", + "Value": "DATETIME(\"2020-12-29 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' day)", + "Value": "DATETIME(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' day)", + "Value": "DATETIME(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' day)", + "Value": "DATETIME(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' day)", + "Value": "DATETIME(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' day)", + "Value": "DATETIME(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' day)", + "Value": "DATETIME(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' day)", + "Value": "DATETIME(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' day)", + "Value": "DATETIME(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' day)", + "Value": "DATETIME(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' day)", + "Value": "DATETIME(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 day)", + "Value": "DATETIME(\"2021-01-02 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 day)", + "Value": "DATETIME(\"2020-12-29 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 day)", + "Value": "DATETIME(\"2021-01-02 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 day)", + "Value": "DATETIME(\"2020-12-29 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 day)", + "Value": "DATETIME(\"2021-01-02 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 day)", + "Value": "DATETIME(\"2020-12-29 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' day)", + "Value": "DATETIME(\"2021-01-06 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' day)", + "Value": "DATETIME(\"2020-12-25 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 day)", + "Value": "DATETIME(\"2021-01-02 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 day)", + "Value": "DATETIME(\"2020-12-29 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 day)", + "Value": "DATETIME(\"2021-01-02 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 day)", + "Value": "DATETIME(\"2020-12-29 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day)", + "Value": "DATETIME(\"2021-01-02 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day)", + "Value": "DATETIME(\"2020-12-29 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day)", + "Value": "DATETIME(\"2021-01-02 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day)", + "Value": "DATETIME(\"2020-12-29 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 day)", + "Value": "DATETIME(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 day)", + "Value": "DATETIME(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' day)", + "Value": "DATETIME(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' day)", + "Value": "DATETIME(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' day)", + "Value": "DATETIME(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' day)", + "Value": "DATETIME(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 week)", + "Value": "DATETIME(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 week)", + "Value": "DATETIME(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' week)", + "Value": "DATETIME(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' week)", + "Value": "DATETIME(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' week)", + "Value": "DATETIME(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' week)", + "Value": "DATETIME(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' week)", + "Value": "DATETIME(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' week)", + "Value": "DATETIME(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' week)", + "Value": "DATETIME(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' week)", + "Value": "DATETIME(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 week)", + "Value": "DATETIME(\"2021-08-05 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 week)", + "Value": "DATETIME(\"2020-05-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 week)", + "Value": "DATETIME(\"2021-07-29 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 week)", + "Value": "DATETIME(\"2020-06-04 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' week)", + "Value": "DATETIME(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' week)", + "Value": "DATETIME(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 week)", + "Value": "DATETIME(\"2021-01-14 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 week)", + "Value": "DATETIME(\"2020-12-17 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' week)", + "Value": "DATETIME(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' week)", + "Value": "DATETIME(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' week)", + "Value": "DATETIME(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' week)", + "Value": "DATETIME(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' week)", + "Value": "DATETIME(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' week)", + "Value": "DATETIME(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' week)", + "Value": "DATETIME(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' week)", + "Value": "DATETIME(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' week)", + "Value": "DATETIME(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' week)", + "Value": "DATETIME(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 week)", + "Value": "DATETIME(\"2021-01-14 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 week)", + "Value": "DATETIME(\"2020-12-17 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 week)", + "Value": "DATETIME(\"2021-01-14 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 week)", + "Value": "DATETIME(\"2020-12-17 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 week)", + "Value": "DATETIME(\"2021-01-14 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 week)", + "Value": "DATETIME(\"2020-12-17 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' week)", + "Value": "DATETIME(\"2021-02-11 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' week)", + "Value": "DATETIME(\"2020-11-19 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 week)", + "Value": "DATETIME(\"2021-01-14 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 week)", + "Value": "DATETIME(\"2020-12-17 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 week)", + "Value": "DATETIME(\"2021-01-14 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 week)", + "Value": "DATETIME(\"2020-12-17 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) week)", + "Value": "DATETIME(\"2021-01-14 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) week)", + "Value": "DATETIME(\"2020-12-17 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) week)", + "Value": "DATETIME(\"2021-01-14 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) week)", + "Value": "DATETIME(\"2020-12-17 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 week)", + "Value": "DATETIME(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 week)", + "Value": "DATETIME(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' week)", + "Value": "DATETIME(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' week)", + "Value": "DATETIME(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' week)", + "Value": "DATETIME(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' week)", + "Value": "DATETIME(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 month)", + "Value": "DATETIME(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 month)", + "Value": "DATETIME(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' month)", + "Value": "DATETIME(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' month)", + "Value": "DATETIME(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' month)", + "Value": "DATETIME(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' month)", + "Value": "DATETIME(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' month)", + "Value": "DATETIME(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' month)", + "Value": "DATETIME(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' month)", + "Value": "DATETIME(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' month)", + "Value": "DATETIME(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 month)", + "Value": "DATETIME(\"2023-07-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 month)", + "Value": "DATETIME(\"2018-05-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 month)", + "Value": "DATETIME(\"2023-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 month)", + "Value": "DATETIME(\"2018-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' month)", + "Value": "DATETIME(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' month)", + "Value": "DATETIME(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 month)", + "Value": "DATETIME(\"2021-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 month)", + "Value": "DATETIME(\"2020-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' month)", + "Value": "DATETIME(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' month)", + "Value": "DATETIME(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' month)", + "Value": "DATETIME(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' month)", + "Value": "DATETIME(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' month)", + "Value": "DATETIME(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' month)", + "Value": "DATETIME(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' month)", + "Value": "DATETIME(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' month)", + "Value": "DATETIME(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' month)", + "Value": "DATETIME(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' month)", + "Value": "DATETIME(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 month)", + "Value": "DATETIME(\"2021-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 month)", + "Value": "DATETIME(\"2020-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 month)", + "Value": "DATETIME(\"2021-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 month)", + "Value": "DATETIME(\"2020-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 month)", + "Value": "DATETIME(\"2021-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 month)", + "Value": "DATETIME(\"2020-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' month)", + "Value": "DATETIME(\"2021-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' month)", + "Value": "DATETIME(\"2020-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 month)", + "Value": "DATETIME(\"2021-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 month)", + "Value": "DATETIME(\"2020-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 month)", + "Value": "DATETIME(\"2021-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 month)", + "Value": "DATETIME(\"2020-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) month)", + "Value": "DATETIME(\"2021-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) month)", + "Value": "DATETIME(\"2020-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) month)", + "Value": "DATETIME(\"2021-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) month)", + "Value": "DATETIME(\"2020-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 month)", + "Value": "DATETIME(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 month)", + "Value": "DATETIME(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' month)", + "Value": "DATETIME(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' month)", + "Value": "DATETIME(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' month)", + "Value": "DATETIME(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' month)", + "Value": "DATETIME(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 year)", + "Value": "DATETIME(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 year)", + "Value": "DATETIME(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' year)", + "Value": "DATETIME(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' year)", + "Value": "DATETIME(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' year)", + "Value": "DATETIME(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' year)", + "Value": "DATETIME(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' year)", + "Value": "DATETIME(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' year)", + "Value": "DATETIME(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' year)", + "Value": "DATETIME(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' year)", + "Value": "DATETIME(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 year)", + "Value": "DATETIME(\"2051-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 year)", + "Value": "DATETIME(\"1989-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 year)", + "Value": "DATETIME(\"2050-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 year)", + "Value": "DATETIME(\"1990-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' year)", + "Value": "DATETIME(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' year)", + "Value": "DATETIME(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 year)", + "Value": "DATETIME(\"2022-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 year)", + "Value": "DATETIME(\"2018-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' year)", + "Value": "DATETIME(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' year)", + "Value": "DATETIME(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' year)", + "Value": "DATETIME(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' year)", + "Value": "DATETIME(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' year)", + "Value": "DATETIME(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' year)", + "Value": "DATETIME(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' year)", + "Value": "DATETIME(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' year)", + "Value": "DATETIME(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' year)", + "Value": "DATETIME(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' year)", + "Value": "DATETIME(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 year)", + "Value": "DATETIME(\"2022-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 year)", + "Value": "DATETIME(\"2018-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 year)", + "Value": "DATETIME(\"2022-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 year)", + "Value": "DATETIME(\"2018-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 year)", + "Value": "DATETIME(\"2022-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 year)", + "Value": "DATETIME(\"2018-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' year)", + "Value": "DATETIME(\"2026-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' year)", + "Value": "DATETIME(\"2014-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 year)", + "Value": "DATETIME(\"2022-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 year)", + "Value": "DATETIME(\"2018-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 year)", + "Value": "DATETIME(\"2022-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 year)", + "Value": "DATETIME(\"2018-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year)", + "Value": "DATETIME(\"2022-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year)", + "Value": "DATETIME(\"2018-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year)", + "Value": "DATETIME(\"2022-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year)", + "Value": "DATETIME(\"2018-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 year)", + "Value": "DATETIME(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 year)", + "Value": "DATETIME(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' year)", + "Value": "DATETIME(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' year)", + "Value": "DATETIME(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' year)", + "Value": "DATETIME(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' year)", + "Value": "DATETIME(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 day_hour)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 day_hour)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' day_hour)", + "Value": "DATETIME(\"2021-01-02 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' day_hour)", + "Value": "DATETIME(\"2020-12-30 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' day_hour)", + "Value": "DATETIME(\"2020-12-30 13:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' day_hour)", + "Value": "DATETIME(\"2021-01-02 09:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' day_hour)", + "Value": "DATETIME(\"2021-01-02 09:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' day_hour)", + "Value": "DATETIME(\"2020-12-30 13:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 day_hour)", + "Value": "DATETIME(\"2021-01-02 06:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 day_hour)", + "Value": "DATETIME(\"2020-12-30 16:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 day_hour)", + "Value": "DATETIME(\"2021-01-02 05:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 day_hour)", + "Value": "DATETIME(\"2020-12-30 17:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' day_hour)", + "Value": "DATETIME(\"2135-01-31 14:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' day_hour)", + "Value": "DATETIME(\"1906-12-03 08:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 day_hour)", + "Value": "DATETIME(\"2021-02-12 14:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 day_hour)", + "Value": "DATETIME(\"2020-11-19 08:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' day_hour)", + "Value": "DATETIME(\"2021-02-12 14:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' day_hour)", + "Value": "DATETIME(\"2020-11-19 08:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' day_hour)", + "Value": "DATETIME(\"2020-12-30 13:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' day_hour)", + "Value": "DATETIME(\"2021-01-02 09:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' day_hour)", + "Value": "DATETIME(\"2021-01-02 09:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' day_hour)", + "Value": "DATETIME(\"2020-12-30 13:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 day_hour)", + "Value": "DATETIME(\"2021-01-02 04:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 day_hour)", + "Value": "DATETIME(\"2020-12-30 18:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 day_hour)", + "Value": "DATETIME(\"2021-07-29 07:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 day_hour)", + "Value": "DATETIME(\"2020-06-05 15:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 day_hour)", + "Value": "DATETIME(\"2021-07-29 07:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 day_hour)", + "Value": "DATETIME(\"2020-06-05 15:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' day_hour)", + "Value": "DATETIME(\"2021-01-07 03:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' day_hour)", + "Value": "DATETIME(\"2020-12-25 19:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 day_hour)", + "Value": "DATETIME(\"2021-01-02 04:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 day_hour)", + "Value": "DATETIME(\"2020-12-30 18:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 day_hour)", + "Value": "DATETIME(\"2021-01-02 04:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 day_hour)", + "Value": "DATETIME(\"2020-12-30 18:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_hour)", + "Value": "DATETIME(\"2021-01-02 04:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_hour)", + "Value": "DATETIME(\"2020-12-30 18:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_hour)", + "Value": "DATETIME(\"2021-01-01 01:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_hour)", + "Value": "DATETIME(\"2020-12-31 21:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 day_hour)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 day_hour)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' day_hour)", + "Value": "DATETIME(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' day_hour)", + "Value": "DATETIME(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' day_hour)", + "Value": "DATETIME(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' day_hour)", + "Value": "DATETIME(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' day_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' day_microsecond)", + "Value": "DATETIME(\"2021-01-01 01:01:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' day_microsecond)", + "Value": "DATETIME(\"2020-12-31 22:58:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' day_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' day_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.310000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.690000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.300000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.700000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' day_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.999999\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.000001\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 day_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' day_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' day_microsecond)", + "Value": "DATETIME(\"2021-01-01 01:01:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' day_microsecond)", + "Value": "DATETIME(\"2020-12-31 22:58:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' day_microsecond)", + "Value": "DATETIME(\"2021-01-01 01:01:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' day_microsecond)", + "Value": "DATETIME(\"2020-12-31 22:58:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' day_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' day_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 day_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 day_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 day_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' day_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:05.400000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:52.600000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 day_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 day_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.200000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.800000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' day_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' day_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 day_minute)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 day_minute)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' day_minute)", + "Value": "DATETIME(\"2021-01-01 01:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' day_minute)", + "Value": "DATETIME(\"2020-12-31 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' day_minute)", + "Value": "DATETIME(\"2020-12-31 22:49:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' day_minute)", + "Value": "DATETIME(\"2021-01-01 01:09:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' day_minute)", + "Value": "DATETIME(\"2021-01-01 01:09:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' day_minute)", + "Value": "DATETIME(\"2020-12-31 22:49:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 day_minute)", + "Value": "DATETIME(\"2021-01-01 00:30:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 day_minute)", + "Value": "DATETIME(\"2020-12-31 23:28:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 day_minute)", + "Value": "DATETIME(\"2021-01-01 00:29:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 day_minute)", + "Value": "DATETIME(\"2020-12-31 23:29:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' day_minute)", + "Value": "DATETIME(\"2022-11-26 11:38:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' day_minute)", + "Value": "DATETIME(\"2019-02-06 12:20:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 day_minute)", + "Value": "DATETIME(\"2021-01-01 17:38:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 day_minute)", + "Value": "DATETIME(\"2020-12-31 06:20:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' day_minute)", + "Value": "DATETIME(\"2021-01-01 17:38:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' day_minute)", + "Value": "DATETIME(\"2020-12-31 06:20:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' day_minute)", + "Value": "DATETIME(\"2020-12-31 22:49:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' day_minute)", + "Value": "DATETIME(\"2021-01-01 01:09:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' day_minute)", + "Value": "DATETIME(\"2021-01-01 01:09:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' day_minute)", + "Value": "DATETIME(\"2020-12-31 22:49:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 day_minute)", + "Value": "DATETIME(\"2021-01-01 01:04:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 day_minute)", + "Value": "DATETIME(\"2020-12-31 22:54:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 day_minute)", + "Value": "DATETIME(\"2021-01-04 12:19:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 day_minute)", + "Value": "DATETIME(\"2020-12-28 11:39:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 day_minute)", + "Value": "DATETIME(\"2021-01-04 12:19:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 day_minute)", + "Value": "DATETIME(\"2020-12-28 11:39:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' day_minute)", + "Value": "DATETIME(\"2021-01-01 06:03:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' day_minute)", + "Value": "DATETIME(\"2020-12-31 17:55:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 day_minute)", + "Value": "DATETIME(\"2021-01-01 01:04:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 day_minute)", + "Value": "DATETIME(\"2020-12-31 22:54:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 day_minute)", + "Value": "DATETIME(\"2021-01-01 01:04:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 day_minute)", + "Value": "DATETIME(\"2020-12-31 22:54:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_minute)", + "Value": "DATETIME(\"2021-01-01 01:04:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_minute)", + "Value": "DATETIME(\"2020-12-31 22:54:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_minute)", + "Value": "DATETIME(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_minute)", + "Value": "DATETIME(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 day_minute)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 day_minute)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' day_minute)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' day_minute)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' day_minute)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' day_minute)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 day_second)", + "Value": "DATETIME(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 day_second)", + "Value": "DATETIME(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' day_second)", + "Value": "DATETIME(\"2021-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' day_second)", + "Value": "DATETIME(\"2020-12-31 23:58:58\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' day_second)", + "Value": "DATETIME(\"2021-01-02 01:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' day_second)", + "Value": "DATETIME(\"2020-12-30 22:58:58\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' day_second)", + "Value": "DATETIME(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' day_second)", + "Value": "DATETIME(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' day_second)", + "Value": "DATETIME(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' day_second)", + "Value": "DATETIME(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 day_second)", + "Value": "DATETIME(\"2021-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 day_second)", + "Value": "DATETIME(\"2020-12-31 23:59:28\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 day_second)", + "Value": "DATETIME(\"2021-01-01 00:00:29\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 day_second)", + "Value": "DATETIME(\"2020-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' day_second)", + "Value": "DATETIME(\"2021-01-12 13:47:38\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' day_second)", + "Value": "DATETIME(\"2020-12-20 10:12:20\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 day_second)", + "Value": "DATETIME(\"2021-01-01 00:17:38\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 day_second)", + "Value": "DATETIME(\"2020-12-31 23:42:20\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' day_second)", + "Value": "DATETIME(\"2021-01-01 00:17:38\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' day_second)", + "Value": "DATETIME(\"2020-12-31 23:42:20\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' day_second)", + "Value": "DATETIME(\"2021-01-02 01:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' day_second)", + "Value": "DATETIME(\"2020-12-30 22:58:58\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' day_second)", + "Value": "DATETIME(\"2021-01-02 01:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' day_second)", + "Value": "DATETIME(\"2020-12-30 22:58:58\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' day_second)", + "Value": "DATETIME(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' day_second)", + "Value": "DATETIME(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' day_second)", + "Value": "DATETIME(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' day_second)", + "Value": "DATETIME(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 day_second)", + "Value": "DATETIME(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 day_second)", + "Value": "DATETIME(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 day_second)", + "Value": "DATETIME(\"2021-01-01 01:24:19\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 day_second)", + "Value": "DATETIME(\"2020-12-31 22:35:39\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 day_second)", + "Value": "DATETIME(\"2021-01-01 01:24:19\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 day_second)", + "Value": "DATETIME(\"2020-12-31 22:35:39\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' day_second)", + "Value": "DATETIME(\"2021-01-01 00:06:03\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' day_second)", + "Value": "DATETIME(\"2020-12-31 23:53:55\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 day_second)", + "Value": "DATETIME(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 day_second)", + "Value": "DATETIME(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 day_second)", + "Value": "DATETIME(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 day_second)", + "Value": "DATETIME(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_second)", + "Value": "DATETIME(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_second)", + "Value": "DATETIME(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_second)", + "Value": "DATETIME(\"2021-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_second)", + "Value": "DATETIME(\"2020-12-31 23:59:57\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 day_second)", + "Value": "DATETIME(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 day_second)", + "Value": "DATETIME(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' day_second)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' day_second)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' day_second)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' day_second)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 hour)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 hour)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' hour)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' hour)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' hour)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' hour)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' hour)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' hour)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' hour)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' hour)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 hour)", + "Value": "DATETIME(\"2021-01-02 06:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 hour)", + "Value": "DATETIME(\"2020-12-30 16:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 hour)", + "Value": "DATETIME(\"2021-01-02 05:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 hour)", + "Value": "DATETIME(\"2020-12-30 17:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' hour)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' hour)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 hour)", + "Value": "DATETIME(\"2021-01-01 01:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 hour)", + "Value": "DATETIME(\"2020-12-31 21:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' hour)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' hour)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' hour)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' hour)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' hour)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' hour)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' hour)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' hour)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' hour)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' hour)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 hour)", + "Value": "DATETIME(\"2021-01-01 01:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 hour)", + "Value": "DATETIME(\"2020-12-31 21:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 hour)", + "Value": "DATETIME(\"2021-01-01 01:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 hour)", + "Value": "DATETIME(\"2020-12-31 21:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 hour)", + "Value": "DATETIME(\"2021-01-01 01:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 hour)", + "Value": "DATETIME(\"2020-12-31 21:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' hour)", + "Value": "DATETIME(\"2021-01-01 05:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' hour)", + "Value": "DATETIME(\"2020-12-31 17:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 hour)", + "Value": "DATETIME(\"2021-01-01 01:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 hour)", + "Value": "DATETIME(\"2020-12-31 21:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 hour)", + "Value": "DATETIME(\"2021-01-01 01:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 hour)", + "Value": "DATETIME(\"2020-12-31 21:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour)", + "Value": "DATETIME(\"2021-01-01 01:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour)", + "Value": "DATETIME(\"2020-12-31 21:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour)", + "Value": "DATETIME(\"2021-01-01 01:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour)", + "Value": "DATETIME(\"2020-12-31 21:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 hour)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 hour)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' hour)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' hour)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' hour)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' hour)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 01:01:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 22:58:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.310000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.690000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.300000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.700000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.999999\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.000001\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 01:01:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 22:58:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 01:01:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 22:58:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:05.400000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:52.600000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.200000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.800000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 hour_minute)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 hour_minute)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' hour_minute)", + "Value": "DATETIME(\"2021-01-01 01:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' hour_minute)", + "Value": "DATETIME(\"2020-12-31 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' hour_minute)", + "Value": "DATETIME(\"2020-12-31 22:49:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' hour_minute)", + "Value": "DATETIME(\"2021-01-01 01:09:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' hour_minute)", + "Value": "DATETIME(\"2021-01-01 01:09:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' hour_minute)", + "Value": "DATETIME(\"2020-12-31 22:49:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 hour_minute)", + "Value": "DATETIME(\"2021-01-01 00:30:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 hour_minute)", + "Value": "DATETIME(\"2020-12-31 23:28:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 hour_minute)", + "Value": "DATETIME(\"2021-01-01 00:29:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 hour_minute)", + "Value": "DATETIME(\"2020-12-31 23:29:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' hour_minute)", + "Value": "DATETIME(\"2022-11-26 11:38:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' hour_minute)", + "Value": "DATETIME(\"2019-02-06 12:20:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 hour_minute)", + "Value": "DATETIME(\"2021-01-01 17:38:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 hour_minute)", + "Value": "DATETIME(\"2020-12-31 06:20:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' hour_minute)", + "Value": "DATETIME(\"2021-01-01 17:38:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' hour_minute)", + "Value": "DATETIME(\"2020-12-31 06:20:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' hour_minute)", + "Value": "DATETIME(\"2020-12-31 22:49:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' hour_minute)", + "Value": "DATETIME(\"2021-01-01 01:09:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' hour_minute)", + "Value": "DATETIME(\"2021-01-01 01:09:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' hour_minute)", + "Value": "DATETIME(\"2020-12-31 22:49:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 hour_minute)", + "Value": "DATETIME(\"2021-01-01 01:04:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 hour_minute)", + "Value": "DATETIME(\"2020-12-31 22:54:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 hour_minute)", + "Value": "DATETIME(\"2021-01-04 12:19:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 hour_minute)", + "Value": "DATETIME(\"2020-12-28 11:39:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 hour_minute)", + "Value": "DATETIME(\"2021-01-04 12:19:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 hour_minute)", + "Value": "DATETIME(\"2020-12-28 11:39:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' hour_minute)", + "Value": "DATETIME(\"2021-01-01 06:03:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' hour_minute)", + "Value": "DATETIME(\"2020-12-31 17:55:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 hour_minute)", + "Value": "DATETIME(\"2021-01-01 01:04:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 hour_minute)", + "Value": "DATETIME(\"2020-12-31 22:54:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 hour_minute)", + "Value": "DATETIME(\"2021-01-01 01:04:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 hour_minute)", + "Value": "DATETIME(\"2020-12-31 22:54:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_minute)", + "Value": "DATETIME(\"2021-01-01 01:04:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_minute)", + "Value": "DATETIME(\"2020-12-31 22:54:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_minute)", + "Value": "DATETIME(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_minute)", + "Value": "DATETIME(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 hour_minute)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 hour_minute)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' hour_minute)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' hour_minute)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' hour_minute)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' hour_minute)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 hour_second)", + "Value": "DATETIME(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 hour_second)", + "Value": "DATETIME(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' hour_second)", + "Value": "DATETIME(\"2021-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' hour_second)", + "Value": "DATETIME(\"2020-12-31 23:58:58\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' hour_second)", + "Value": "DATETIME(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' hour_second)", + "Value": "DATETIME(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' hour_second)", + "Value": "DATETIME(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' hour_second)", + "Value": "DATETIME(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 hour_second)", + "Value": "DATETIME(\"2021-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 hour_second)", + "Value": "DATETIME(\"2020-12-31 23:59:28\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 hour_second)", + "Value": "DATETIME(\"2021-01-01 00:00:29\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 hour_second)", + "Value": "DATETIME(\"2020-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' hour_second)", + "Value": "DATETIME(\"2021-01-12 13:47:38\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' hour_second)", + "Value": "DATETIME(\"2020-12-20 10:12:20\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 hour_second)", + "Value": "DATETIME(\"2021-01-01 00:17:38\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 hour_second)", + "Value": "DATETIME(\"2020-12-31 23:42:20\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' hour_second)", + "Value": "DATETIME(\"2021-01-01 00:17:38\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' hour_second)", + "Value": "DATETIME(\"2020-12-31 23:42:20\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' hour_second)", + "Value": "DATETIME(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' hour_second)", + "Value": "DATETIME(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' hour_second)", + "Value": "DATETIME(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' hour_second)", + "Value": "DATETIME(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 hour_second)", + "Value": "DATETIME(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 hour_second)", + "Value": "DATETIME(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 hour_second)", + "Value": "DATETIME(\"2021-01-01 01:24:19\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 hour_second)", + "Value": "DATETIME(\"2020-12-31 22:35:39\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 hour_second)", + "Value": "DATETIME(\"2021-01-01 01:24:19\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 hour_second)", + "Value": "DATETIME(\"2020-12-31 22:35:39\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' hour_second)", + "Value": "DATETIME(\"2021-01-01 00:06:03\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' hour_second)", + "Value": "DATETIME(\"2020-12-31 23:53:55\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 hour_second)", + "Value": "DATETIME(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 hour_second)", + "Value": "DATETIME(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 hour_second)", + "Value": "DATETIME(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 hour_second)", + "Value": "DATETIME(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_second)", + "Value": "DATETIME(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_second)", + "Value": "DATETIME(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_second)", + "Value": "DATETIME(\"2021-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_second)", + "Value": "DATETIME(\"2020-12-31 23:59:57\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 hour_second)", + "Value": "DATETIME(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 hour_second)", + "Value": "DATETIME(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' hour_second)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' hour_second)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' hour_second)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' hour_second)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000031\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999969\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000030\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999970\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000002\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999998\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000002\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999998\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000002\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999998\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000002\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999998\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000006\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999994\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000002\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999998\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000002\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999998\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000002\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999998\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000002\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999998\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 minute)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 minute)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' minute)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' minute)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' minute)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' minute)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' minute)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' minute)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' minute)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' minute)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 minute)", + "Value": "DATETIME(\"2021-01-01 00:30:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 minute)", + "Value": "DATETIME(\"2020-12-31 23:28:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 minute)", + "Value": "DATETIME(\"2021-01-01 00:29:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 minute)", + "Value": "DATETIME(\"2020-12-31 23:29:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' minute)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' minute)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 minute)", + "Value": "DATETIME(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 minute)", + "Value": "DATETIME(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' minute)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' minute)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' minute)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' minute)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' minute)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' minute)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' minute)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' minute)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' minute)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' minute)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 minute)", + "Value": "DATETIME(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 minute)", + "Value": "DATETIME(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 minute)", + "Value": "DATETIME(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 minute)", + "Value": "DATETIME(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 minute)", + "Value": "DATETIME(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 minute)", + "Value": "DATETIME(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' minute)", + "Value": "DATETIME(\"2021-01-01 00:05:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' minute)", + "Value": "DATETIME(\"2020-12-31 23:53:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 minute)", + "Value": "DATETIME(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 minute)", + "Value": "DATETIME(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 minute)", + "Value": "DATETIME(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 minute)", + "Value": "DATETIME(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute)", + "Value": "DATETIME(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute)", + "Value": "DATETIME(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute)", + "Value": "DATETIME(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute)", + "Value": "DATETIME(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 minute)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 minute)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' minute)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' minute)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' minute)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' minute)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' minute_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' minute_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' minute_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.310000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.690000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.300000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.700000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' minute_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.999999\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.000001\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 minute_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' minute_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' minute_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' minute_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 minute_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 minute_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 minute_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' minute_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:05.400000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:52.600000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 minute_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 minute_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.200000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.800000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' minute_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' minute_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 minute_second)", + "Value": "DATETIME(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 minute_second)", + "Value": "DATETIME(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' minute_second)", + "Value": "DATETIME(\"2021-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' minute_second)", + "Value": "DATETIME(\"2020-12-31 23:58:58\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' minute_second)", + "Value": "DATETIME(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' minute_second)", + "Value": "DATETIME(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' minute_second)", + "Value": "DATETIME(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' minute_second)", + "Value": "DATETIME(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 minute_second)", + "Value": "DATETIME(\"2021-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 minute_second)", + "Value": "DATETIME(\"2020-12-31 23:59:28\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 minute_second)", + "Value": "DATETIME(\"2021-01-01 00:00:29\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 minute_second)", + "Value": "DATETIME(\"2020-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' minute_second)", + "Value": "DATETIME(\"2021-01-12 13:47:38\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' minute_second)", + "Value": "DATETIME(\"2020-12-20 10:12:20\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 minute_second)", + "Value": "DATETIME(\"2021-01-01 00:17:38\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 minute_second)", + "Value": "DATETIME(\"2020-12-31 23:42:20\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' minute_second)", + "Value": "DATETIME(\"2021-01-01 00:17:38\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' minute_second)", + "Value": "DATETIME(\"2020-12-31 23:42:20\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' minute_second)", + "Value": "DATETIME(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' minute_second)", + "Value": "DATETIME(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' minute_second)", + "Value": "DATETIME(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' minute_second)", + "Value": "DATETIME(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 minute_second)", + "Value": "DATETIME(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 minute_second)", + "Value": "DATETIME(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 minute_second)", + "Value": "DATETIME(\"2021-01-01 01:24:19\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 minute_second)", + "Value": "DATETIME(\"2020-12-31 22:35:39\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 minute_second)", + "Value": "DATETIME(\"2021-01-01 01:24:19\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 minute_second)", + "Value": "DATETIME(\"2020-12-31 22:35:39\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' minute_second)", + "Value": "DATETIME(\"2021-01-01 00:06:03\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' minute_second)", + "Value": "DATETIME(\"2020-12-31 23:53:55\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 minute_second)", + "Value": "DATETIME(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 minute_second)", + "Value": "DATETIME(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 minute_second)", + "Value": "DATETIME(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 minute_second)", + "Value": "DATETIME(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_second)", + "Value": "DATETIME(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_second)", + "Value": "DATETIME(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_second)", + "Value": "DATETIME(\"2021-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_second)", + "Value": "DATETIME(\"2020-12-31 23:59:57\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 minute_second)", + "Value": "DATETIME(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 minute_second)", + "Value": "DATETIME(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' minute_second)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' minute_second)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' minute_second)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' minute_second)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 quarter)", + "Value": "DATETIME(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 quarter)", + "Value": "DATETIME(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' quarter)", + "Value": "DATETIME(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' quarter)", + "Value": "DATETIME(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' quarter)", + "Value": "DATETIME(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' quarter)", + "Value": "DATETIME(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' quarter)", + "Value": "DATETIME(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' quarter)", + "Value": "DATETIME(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' quarter)", + "Value": "DATETIME(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' quarter)", + "Value": "DATETIME(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 quarter)", + "Value": "DATETIME(\"2028-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 quarter)", + "Value": "DATETIME(\"2013-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 quarter)", + "Value": "DATETIME(\"2028-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 quarter)", + "Value": "DATETIME(\"2013-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' quarter)", + "Value": "DATETIME(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' quarter)", + "Value": "DATETIME(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 quarter)", + "Value": "DATETIME(\"2021-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 quarter)", + "Value": "DATETIME(\"2020-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' quarter)", + "Value": "DATETIME(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' quarter)", + "Value": "DATETIME(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' quarter)", + "Value": "DATETIME(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' quarter)", + "Value": "DATETIME(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' quarter)", + "Value": "DATETIME(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' quarter)", + "Value": "DATETIME(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' quarter)", + "Value": "DATETIME(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' quarter)", + "Value": "DATETIME(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' quarter)", + "Value": "DATETIME(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' quarter)", + "Value": "DATETIME(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 quarter)", + "Value": "DATETIME(\"2021-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 quarter)", + "Value": "DATETIME(\"2020-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 quarter)", + "Value": "DATETIME(\"2021-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 quarter)", + "Value": "DATETIME(\"2020-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 quarter)", + "Value": "DATETIME(\"2021-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 quarter)", + "Value": "DATETIME(\"2020-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' quarter)", + "Value": "DATETIME(\"2022-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' quarter)", + "Value": "DATETIME(\"2019-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 quarter)", + "Value": "DATETIME(\"2021-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 quarter)", + "Value": "DATETIME(\"2020-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 quarter)", + "Value": "DATETIME(\"2021-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 quarter)", + "Value": "DATETIME(\"2020-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) quarter)", + "Value": "DATETIME(\"2021-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) quarter)", + "Value": "DATETIME(\"2020-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) quarter)", + "Value": "DATETIME(\"2021-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) quarter)", + "Value": "DATETIME(\"2020-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 quarter)", + "Value": "DATETIME(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 quarter)", + "Value": "DATETIME(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' quarter)", + "Value": "DATETIME(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' quarter)", + "Value": "DATETIME(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' quarter)", + "Value": "DATETIME(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' quarter)", + "Value": "DATETIME(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 second)", + "Value": "DATETIME(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 second)", + "Value": "DATETIME(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' second)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' second)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' second)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' second)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 second)", + "Value": "DATETIME(\"2021-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 second)", + "Value": "DATETIME(\"2020-12-31 23:59:28\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 second)", + "Value": "DATETIME(\"2021-01-01 00:00:29\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 second)", + "Value": "DATETIME(\"2020-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.999999\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' second)", + "Value": "DATETIME(\"2020-12-31 23:59:57.000001\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.999\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 second)", + "Value": "DATETIME(\"2020-12-31 23:59:57.001\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' second)", + "Value": "DATETIME(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' second)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' second)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' second)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' second)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.5\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 second)", + "Value": "DATETIME(\"2020-12-31 23:59:57.5\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.5000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 second)", + "Value": "DATETIME(\"2020-12-31 23:59:57.5000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.5000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 second)", + "Value": "DATETIME(\"2020-12-31 23:59:57.5000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' second)", + "Value": "DATETIME(\"2021-01-01 00:00:05.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' second)", + "Value": "DATETIME(\"2020-12-31 23:59:53.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 second)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 second)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.5\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second)", + "Value": "DATETIME(\"2020-12-31 23:59:57.5\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second)", + "Value": "DATETIME(\"2021-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second)", + "Value": "DATETIME(\"2020-12-31 23:59:57\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 second)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' second)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' second)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' second_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' second_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' second_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.310000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.690000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.300000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.700000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' second_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.999999\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.000001\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 second_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' second_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' second_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' second_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 second_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 second_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 second_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' second_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:05.400000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:52.600000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 second_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 second_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.200000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.800000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' second_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' second_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 year_month)", + "Value": "DATETIME(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 year_month)", + "Value": "DATETIME(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' year_month)", + "Value": "DATETIME(\"2022-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' year_month)", + "Value": "DATETIME(\"2019-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' year_month)", + "Value": "DATETIME(\"2019-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' year_month)", + "Value": "DATETIME(\"2022-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' year_month)", + "Value": "DATETIME(\"2022-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' year_month)", + "Value": "DATETIME(\"2019-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 year_month)", + "Value": "DATETIME(\"2023-07-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 year_month)", + "Value": "DATETIME(\"2018-05-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 year_month)", + "Value": "DATETIME(\"2023-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 year_month)", + "Value": "DATETIME(\"2018-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 year_month)", + "Value": "DATETIME(\"2105-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 year_month)", + "Value": "DATETIME(\"1936-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' year_month)", + "Value": "DATETIME(\"2105-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' year_month)", + "Value": "DATETIME(\"1936-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' year_month)", + "Value": "DATETIME(\"2019-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' year_month)", + "Value": "DATETIME(\"2022-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' year_month)", + "Value": "DATETIME(\"2022-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' year_month)", + "Value": "DATETIME(\"2019-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 year_month)", + "Value": "DATETIME(\"2022-05-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 year_month)", + "Value": "DATETIME(\"2019-07-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 year_month)", + "Value": "DATETIME(\"2438-08-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 year_month)", + "Value": "DATETIME(\"1603-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 year_month)", + "Value": "DATETIME(\"2438-08-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 year_month)", + "Value": "DATETIME(\"1603-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' year_month)", + "Value": "DATETIME(\"2027-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' year_month)", + "Value": "DATETIME(\"2014-08-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 year_month)", + "Value": "DATETIME(\"2022-05-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 year_month)", + "Value": "DATETIME(\"2019-07-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 year_month)", + "Value": "DATETIME(\"2022-05-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 year_month)", + "Value": "DATETIME(\"2019-07-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year_month)", + "Value": "DATETIME(\"2022-05-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year_month)", + "Value": "DATETIME(\"2019-07-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year_month)", + "Value": "DATETIME(\"2021-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year_month)", + "Value": "DATETIME(\"2020-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 year_month)", + "Value": "DATETIME(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 year_month)", + "Value": "DATETIME(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' year_month)", + "Value": "DATETIME(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' year_month)", + "Value": "DATETIME(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' year_month)", + "Value": "DATETIME(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' year_month)", + "Value": "DATETIME(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 day)", + "Value": "DATETIME(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 day)", + "Value": "DATETIME(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' day)", + "Value": "DATETIME(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' day)", + "Value": "DATETIME(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' day)", + "Value": "DATETIME(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' day)", + "Value": "DATETIME(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' day)", + "Value": "DATETIME(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' day)", + "Value": "DATETIME(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' day)", + "Value": "DATETIME(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' day)", + "Value": "DATETIME(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 day)", + "Value": "DATETIME(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 day)", + "Value": "DATETIME(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 day)", + "Value": "DATETIME(\"2025-01-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 day)", + "Value": "DATETIME(\"2024-12-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' day)", + "Value": "DATETIME(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' day)", + "Value": "DATETIME(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 day)", + "Value": "DATETIME(\"2025-01-03 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 day)", + "Value": "DATETIME(\"2024-12-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' day)", + "Value": "DATETIME(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' day)", + "Value": "DATETIME(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' day)", + "Value": "DATETIME(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' day)", + "Value": "DATETIME(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' day)", + "Value": "DATETIME(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' day)", + "Value": "DATETIME(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' day)", + "Value": "DATETIME(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' day)", + "Value": "DATETIME(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' day)", + "Value": "DATETIME(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' day)", + "Value": "DATETIME(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 day)", + "Value": "DATETIME(\"2025-01-03 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 day)", + "Value": "DATETIME(\"2024-12-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 day)", + "Value": "DATETIME(\"2025-01-03 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 day)", + "Value": "DATETIME(\"2024-12-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 day)", + "Value": "DATETIME(\"2025-01-03 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 day)", + "Value": "DATETIME(\"2024-12-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' day)", + "Value": "DATETIME(\"2025-01-07 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' day)", + "Value": "DATETIME(\"2024-12-26 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 day)", + "Value": "DATETIME(\"2025-01-03 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 day)", + "Value": "DATETIME(\"2024-12-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 day)", + "Value": "DATETIME(\"2025-01-03 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 day)", + "Value": "DATETIME(\"2024-12-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day)", + "Value": "DATETIME(\"2025-01-03 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day)", + "Value": "DATETIME(\"2024-12-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day)", + "Value": "DATETIME(\"2025-01-03 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day)", + "Value": "DATETIME(\"2024-12-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 day)", + "Value": "DATETIME(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 day)", + "Value": "DATETIME(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' day)", + "Value": "DATETIME(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' day)", + "Value": "DATETIME(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' day)", + "Value": "DATETIME(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' day)", + "Value": "DATETIME(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 week)", + "Value": "DATETIME(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 week)", + "Value": "DATETIME(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' week)", + "Value": "DATETIME(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' week)", + "Value": "DATETIME(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' week)", + "Value": "DATETIME(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' week)", + "Value": "DATETIME(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' week)", + "Value": "DATETIME(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' week)", + "Value": "DATETIME(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' week)", + "Value": "DATETIME(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' week)", + "Value": "DATETIME(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 week)", + "Value": "DATETIME(\"2025-08-06 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 week)", + "Value": "DATETIME(\"2024-05-29 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 week)", + "Value": "DATETIME(\"2025-07-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 week)", + "Value": "DATETIME(\"2024-06-05 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' week)", + "Value": "DATETIME(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' week)", + "Value": "DATETIME(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 week)", + "Value": "DATETIME(\"2025-01-15 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 week)", + "Value": "DATETIME(\"2024-12-18 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' week)", + "Value": "DATETIME(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' week)", + "Value": "DATETIME(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' week)", + "Value": "DATETIME(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' week)", + "Value": "DATETIME(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' week)", + "Value": "DATETIME(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' week)", + "Value": "DATETIME(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' week)", + "Value": "DATETIME(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' week)", + "Value": "DATETIME(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' week)", + "Value": "DATETIME(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' week)", + "Value": "DATETIME(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 week)", + "Value": "DATETIME(\"2025-01-15 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 week)", + "Value": "DATETIME(\"2024-12-18 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 week)", + "Value": "DATETIME(\"2025-01-15 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 week)", + "Value": "DATETIME(\"2024-12-18 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 week)", + "Value": "DATETIME(\"2025-01-15 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 week)", + "Value": "DATETIME(\"2024-12-18 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' week)", + "Value": "DATETIME(\"2025-02-12 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' week)", + "Value": "DATETIME(\"2024-11-20 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 week)", + "Value": "DATETIME(\"2025-01-15 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 week)", + "Value": "DATETIME(\"2024-12-18 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 week)", + "Value": "DATETIME(\"2025-01-15 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 week)", + "Value": "DATETIME(\"2024-12-18 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) week)", + "Value": "DATETIME(\"2025-01-15 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) week)", + "Value": "DATETIME(\"2024-12-18 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) week)", + "Value": "DATETIME(\"2025-01-15 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) week)", + "Value": "DATETIME(\"2024-12-18 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 week)", + "Value": "DATETIME(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 week)", + "Value": "DATETIME(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' week)", + "Value": "DATETIME(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' week)", + "Value": "DATETIME(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' week)", + "Value": "DATETIME(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' week)", + "Value": "DATETIME(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 month)", + "Value": "DATETIME(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 month)", + "Value": "DATETIME(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' month)", + "Value": "DATETIME(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' month)", + "Value": "DATETIME(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' month)", + "Value": "DATETIME(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' month)", + "Value": "DATETIME(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' month)", + "Value": "DATETIME(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' month)", + "Value": "DATETIME(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' month)", + "Value": "DATETIME(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' month)", + "Value": "DATETIME(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 month)", + "Value": "DATETIME(\"2027-08-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 month)", + "Value": "DATETIME(\"2022-06-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 month)", + "Value": "DATETIME(\"2027-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 month)", + "Value": "DATETIME(\"2022-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' month)", + "Value": "DATETIME(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' month)", + "Value": "DATETIME(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 month)", + "Value": "DATETIME(\"2025-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 month)", + "Value": "DATETIME(\"2024-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' month)", + "Value": "DATETIME(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' month)", + "Value": "DATETIME(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' month)", + "Value": "DATETIME(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' month)", + "Value": "DATETIME(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' month)", + "Value": "DATETIME(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' month)", + "Value": "DATETIME(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' month)", + "Value": "DATETIME(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' month)", + "Value": "DATETIME(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' month)", + "Value": "DATETIME(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' month)", + "Value": "DATETIME(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 month)", + "Value": "DATETIME(\"2025-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 month)", + "Value": "DATETIME(\"2024-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 month)", + "Value": "DATETIME(\"2025-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 month)", + "Value": "DATETIME(\"2024-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 month)", + "Value": "DATETIME(\"2025-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 month)", + "Value": "DATETIME(\"2024-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' month)", + "Value": "DATETIME(\"2025-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' month)", + "Value": "DATETIME(\"2024-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 month)", + "Value": "DATETIME(\"2025-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 month)", + "Value": "DATETIME(\"2024-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 month)", + "Value": "DATETIME(\"2025-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 month)", + "Value": "DATETIME(\"2024-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) month)", + "Value": "DATETIME(\"2025-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) month)", + "Value": "DATETIME(\"2024-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) month)", + "Value": "DATETIME(\"2025-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) month)", + "Value": "DATETIME(\"2024-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 month)", + "Value": "DATETIME(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 month)", + "Value": "DATETIME(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' month)", + "Value": "DATETIME(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' month)", + "Value": "DATETIME(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' month)", + "Value": "DATETIME(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' month)", + "Value": "DATETIME(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 year)", + "Value": "DATETIME(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 year)", + "Value": "DATETIME(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' year)", + "Value": "DATETIME(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' year)", + "Value": "DATETIME(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' year)", + "Value": "DATETIME(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' year)", + "Value": "DATETIME(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' year)", + "Value": "DATETIME(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' year)", + "Value": "DATETIME(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' year)", + "Value": "DATETIME(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' year)", + "Value": "DATETIME(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 year)", + "Value": "DATETIME(\"2056-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 year)", + "Value": "DATETIME(\"1994-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 year)", + "Value": "DATETIME(\"2055-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 year)", + "Value": "DATETIME(\"1995-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' year)", + "Value": "DATETIME(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' year)", + "Value": "DATETIME(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 year)", + "Value": "DATETIME(\"2027-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 year)", + "Value": "DATETIME(\"2023-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' year)", + "Value": "DATETIME(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' year)", + "Value": "DATETIME(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' year)", + "Value": "DATETIME(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' year)", + "Value": "DATETIME(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' year)", + "Value": "DATETIME(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' year)", + "Value": "DATETIME(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' year)", + "Value": "DATETIME(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' year)", + "Value": "DATETIME(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' year)", + "Value": "DATETIME(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' year)", + "Value": "DATETIME(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 year)", + "Value": "DATETIME(\"2027-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 year)", + "Value": "DATETIME(\"2023-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 year)", + "Value": "DATETIME(\"2027-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 year)", + "Value": "DATETIME(\"2023-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 year)", + "Value": "DATETIME(\"2027-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 year)", + "Value": "DATETIME(\"2023-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' year)", + "Value": "DATETIME(\"2031-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' year)", + "Value": "DATETIME(\"2019-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 year)", + "Value": "DATETIME(\"2027-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 year)", + "Value": "DATETIME(\"2023-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 year)", + "Value": "DATETIME(\"2027-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 year)", + "Value": "DATETIME(\"2023-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year)", + "Value": "DATETIME(\"2027-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year)", + "Value": "DATETIME(\"2023-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year)", + "Value": "DATETIME(\"2027-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year)", + "Value": "DATETIME(\"2023-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 year)", + "Value": "DATETIME(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 year)", + "Value": "DATETIME(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' year)", + "Value": "DATETIME(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' year)", + "Value": "DATETIME(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' year)", + "Value": "DATETIME(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' year)", + "Value": "DATETIME(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 day_hour)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 day_hour)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' day_hour)", + "Value": "DATETIME(\"2025-01-02 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' day_hour)", + "Value": "DATETIME(\"2024-12-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' day_hour)", + "Value": "DATETIME(\"2024-12-30 14:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' day_hour)", + "Value": "DATETIME(\"2025-01-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' day_hour)", + "Value": "DATETIME(\"2025-01-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' day_hour)", + "Value": "DATETIME(\"2024-12-30 14:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 day_hour)", + "Value": "DATETIME(\"2025-01-02 07:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 day_hour)", + "Value": "DATETIME(\"2024-12-30 17:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 day_hour)", + "Value": "DATETIME(\"2025-01-02 06:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 day_hour)", + "Value": "DATETIME(\"2024-12-30 18:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' day_hour)", + "Value": "DATETIME(\"2139-01-31 15:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' day_hour)", + "Value": "DATETIME(\"1910-12-03 09:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 day_hour)", + "Value": "DATETIME(\"2025-02-12 15:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 day_hour)", + "Value": "DATETIME(\"2024-11-19 09:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' day_hour)", + "Value": "DATETIME(\"2025-02-12 15:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' day_hour)", + "Value": "DATETIME(\"2024-11-19 09:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' day_hour)", + "Value": "DATETIME(\"2024-12-30 14:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' day_hour)", + "Value": "DATETIME(\"2025-01-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' day_hour)", + "Value": "DATETIME(\"2025-01-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' day_hour)", + "Value": "DATETIME(\"2024-12-30 14:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 day_hour)", + "Value": "DATETIME(\"2025-01-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 day_hour)", + "Value": "DATETIME(\"2024-12-30 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 day_hour)", + "Value": "DATETIME(\"2025-07-29 08:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 day_hour)", + "Value": "DATETIME(\"2024-06-05 16:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 day_hour)", + "Value": "DATETIME(\"2025-07-29 08:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 day_hour)", + "Value": "DATETIME(\"2024-06-05 16:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' day_hour)", + "Value": "DATETIME(\"2025-01-07 04:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' day_hour)", + "Value": "DATETIME(\"2024-12-25 20:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 day_hour)", + "Value": "DATETIME(\"2025-01-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 day_hour)", + "Value": "DATETIME(\"2024-12-30 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 day_hour)", + "Value": "DATETIME(\"2025-01-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 day_hour)", + "Value": "DATETIME(\"2024-12-30 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_hour)", + "Value": "DATETIME(\"2025-01-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_hour)", + "Value": "DATETIME(\"2024-12-30 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_hour)", + "Value": "DATETIME(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_hour)", + "Value": "DATETIME(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 day_hour)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 day_hour)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' day_hour)", + "Value": "DATETIME(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' day_hour)", + "Value": "DATETIME(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' day_hour)", + "Value": "DATETIME(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' day_hour)", + "Value": "DATETIME(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' day_microsecond)", + "Value": "DATETIME(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' day_microsecond)", + "Value": "DATETIME(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' day_microsecond)", + "Value": "DATETIME(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' day_microsecond)", + "Value": "DATETIME(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' day_microsecond)", + "Value": "DATETIME(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' day_microsecond)", + "Value": "DATETIME(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 day_minute)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 day_minute)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' day_minute)", + "Value": "DATETIME(\"2025-01-01 01:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' day_minute)", + "Value": "DATETIME(\"2024-12-31 22:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' day_minute)", + "Value": "DATETIME(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' day_minute)", + "Value": "DATETIME(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' day_minute)", + "Value": "DATETIME(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' day_minute)", + "Value": "DATETIME(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 day_minute)", + "Value": "DATETIME(\"2025-01-01 00:31:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 day_minute)", + "Value": "DATETIME(\"2024-12-31 23:29:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 day_minute)", + "Value": "DATETIME(\"2025-01-01 00:30:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 day_minute)", + "Value": "DATETIME(\"2024-12-31 23:30:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' day_minute)", + "Value": "DATETIME(\"2026-11-26 11:39:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' day_minute)", + "Value": "DATETIME(\"2023-02-06 12:21:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 day_minute)", + "Value": "DATETIME(\"2025-01-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 day_minute)", + "Value": "DATETIME(\"2024-12-31 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' day_minute)", + "Value": "DATETIME(\"2025-01-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' day_minute)", + "Value": "DATETIME(\"2024-12-31 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' day_minute)", + "Value": "DATETIME(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' day_minute)", + "Value": "DATETIME(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' day_minute)", + "Value": "DATETIME(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' day_minute)", + "Value": "DATETIME(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 day_minute)", + "Value": "DATETIME(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 day_minute)", + "Value": "DATETIME(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 day_minute)", + "Value": "DATETIME(\"2025-01-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 day_minute)", + "Value": "DATETIME(\"2024-12-28 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 day_minute)", + "Value": "DATETIME(\"2025-01-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 day_minute)", + "Value": "DATETIME(\"2024-12-28 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' day_minute)", + "Value": "DATETIME(\"2025-01-01 06:04:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' day_minute)", + "Value": "DATETIME(\"2024-12-31 17:56:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 day_minute)", + "Value": "DATETIME(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 day_minute)", + "Value": "DATETIME(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 day_minute)", + "Value": "DATETIME(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 day_minute)", + "Value": "DATETIME(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_minute)", + "Value": "DATETIME(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_minute)", + "Value": "DATETIME(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_minute)", + "Value": "DATETIME(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_minute)", + "Value": "DATETIME(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 day_minute)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 day_minute)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' day_minute)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' day_minute)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' day_minute)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' day_minute)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 day_second)", + "Value": "DATETIME(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 day_second)", + "Value": "DATETIME(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' day_second)", + "Value": "DATETIME(\"2025-01-01 00:01:01\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' day_second)", + "Value": "DATETIME(\"2024-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' day_second)", + "Value": "DATETIME(\"2025-01-02 01:01:01\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' day_second)", + "Value": "DATETIME(\"2024-12-30 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' day_second)", + "Value": "DATETIME(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' day_second)", + "Value": "DATETIME(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' day_second)", + "Value": "DATETIME(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' day_second)", + "Value": "DATETIME(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 day_second)", + "Value": "DATETIME(\"2025-01-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 day_second)", + "Value": "DATETIME(\"2024-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 day_second)", + "Value": "DATETIME(\"2025-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 day_second)", + "Value": "DATETIME(\"2024-12-31 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' day_second)", + "Value": "DATETIME(\"2025-01-12 13:47:39\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' day_second)", + "Value": "DATETIME(\"2024-12-20 10:12:21\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 day_second)", + "Value": "DATETIME(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 day_second)", + "Value": "DATETIME(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' day_second)", + "Value": "DATETIME(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' day_second)", + "Value": "DATETIME(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' day_second)", + "Value": "DATETIME(\"2025-01-02 01:01:01\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' day_second)", + "Value": "DATETIME(\"2024-12-30 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' day_second)", + "Value": "DATETIME(\"2025-01-02 01:01:01\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' day_second)", + "Value": "DATETIME(\"2024-12-30 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' day_second)", + "Value": "DATETIME(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' day_second)", + "Value": "DATETIME(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' day_second)", + "Value": "DATETIME(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' day_second)", + "Value": "DATETIME(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 day_second)", + "Value": "DATETIME(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 day_second)", + "Value": "DATETIME(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 day_second)", + "Value": "DATETIME(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 day_second)", + "Value": "DATETIME(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 day_second)", + "Value": "DATETIME(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 day_second)", + "Value": "DATETIME(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' day_second)", + "Value": "DATETIME(\"2025-01-01 00:06:04\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' day_second)", + "Value": "DATETIME(\"2024-12-31 23:53:56\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 day_second)", + "Value": "DATETIME(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 day_second)", + "Value": "DATETIME(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 day_second)", + "Value": "DATETIME(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 day_second)", + "Value": "DATETIME(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_second)", + "Value": "DATETIME(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_second)", + "Value": "DATETIME(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_second)", + "Value": "DATETIME(\"2025-01-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_second)", + "Value": "DATETIME(\"2024-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 day_second)", + "Value": "DATETIME(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 day_second)", + "Value": "DATETIME(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' day_second)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' day_second)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' day_second)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' day_second)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 hour)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 hour)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' hour)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' hour)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' hour)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' hour)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' hour)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' hour)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' hour)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' hour)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 hour)", + "Value": "DATETIME(\"2025-01-02 07:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 hour)", + "Value": "DATETIME(\"2024-12-30 17:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 hour)", + "Value": "DATETIME(\"2025-01-02 06:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 hour)", + "Value": "DATETIME(\"2024-12-30 18:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' hour)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' hour)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 hour)", + "Value": "DATETIME(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 hour)", + "Value": "DATETIME(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' hour)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' hour)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' hour)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' hour)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' hour)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' hour)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' hour)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' hour)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' hour)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' hour)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 hour)", + "Value": "DATETIME(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 hour)", + "Value": "DATETIME(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 hour)", + "Value": "DATETIME(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 hour)", + "Value": "DATETIME(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 hour)", + "Value": "DATETIME(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 hour)", + "Value": "DATETIME(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' hour)", + "Value": "DATETIME(\"2025-01-01 06:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' hour)", + "Value": "DATETIME(\"2024-12-31 18:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 hour)", + "Value": "DATETIME(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 hour)", + "Value": "DATETIME(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 hour)", + "Value": "DATETIME(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 hour)", + "Value": "DATETIME(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour)", + "Value": "DATETIME(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour)", + "Value": "DATETIME(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour)", + "Value": "DATETIME(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour)", + "Value": "DATETIME(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 hour)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 hour)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' hour)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' hour)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' hour)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' hour)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 hour_minute)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 hour_minute)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' hour_minute)", + "Value": "DATETIME(\"2025-01-01 01:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' hour_minute)", + "Value": "DATETIME(\"2024-12-31 22:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' hour_minute)", + "Value": "DATETIME(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' hour_minute)", + "Value": "DATETIME(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' hour_minute)", + "Value": "DATETIME(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' hour_minute)", + "Value": "DATETIME(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 hour_minute)", + "Value": "DATETIME(\"2025-01-01 00:31:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 hour_minute)", + "Value": "DATETIME(\"2024-12-31 23:29:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 hour_minute)", + "Value": "DATETIME(\"2025-01-01 00:30:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 hour_minute)", + "Value": "DATETIME(\"2024-12-31 23:30:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' hour_minute)", + "Value": "DATETIME(\"2026-11-26 11:39:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' hour_minute)", + "Value": "DATETIME(\"2023-02-06 12:21:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 hour_minute)", + "Value": "DATETIME(\"2025-01-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 hour_minute)", + "Value": "DATETIME(\"2024-12-31 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' hour_minute)", + "Value": "DATETIME(\"2025-01-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' hour_minute)", + "Value": "DATETIME(\"2024-12-31 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' hour_minute)", + "Value": "DATETIME(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' hour_minute)", + "Value": "DATETIME(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' hour_minute)", + "Value": "DATETIME(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' hour_minute)", + "Value": "DATETIME(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 hour_minute)", + "Value": "DATETIME(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 hour_minute)", + "Value": "DATETIME(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 hour_minute)", + "Value": "DATETIME(\"2025-01-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 hour_minute)", + "Value": "DATETIME(\"2024-12-28 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 hour_minute)", + "Value": "DATETIME(\"2025-01-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 hour_minute)", + "Value": "DATETIME(\"2024-12-28 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' hour_minute)", + "Value": "DATETIME(\"2025-01-01 06:04:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' hour_minute)", + "Value": "DATETIME(\"2024-12-31 17:56:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 hour_minute)", + "Value": "DATETIME(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 hour_minute)", + "Value": "DATETIME(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 hour_minute)", + "Value": "DATETIME(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 hour_minute)", + "Value": "DATETIME(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_minute)", + "Value": "DATETIME(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_minute)", + "Value": "DATETIME(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_minute)", + "Value": "DATETIME(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_minute)", + "Value": "DATETIME(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 hour_minute)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 hour_minute)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' hour_minute)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' hour_minute)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' hour_minute)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' hour_minute)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 hour_second)", + "Value": "DATETIME(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 hour_second)", + "Value": "DATETIME(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' hour_second)", + "Value": "DATETIME(\"2025-01-01 00:01:01\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' hour_second)", + "Value": "DATETIME(\"2024-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' hour_second)", + "Value": "DATETIME(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' hour_second)", + "Value": "DATETIME(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' hour_second)", + "Value": "DATETIME(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' hour_second)", + "Value": "DATETIME(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 hour_second)", + "Value": "DATETIME(\"2025-01-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 hour_second)", + "Value": "DATETIME(\"2024-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 hour_second)", + "Value": "DATETIME(\"2025-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 hour_second)", + "Value": "DATETIME(\"2024-12-31 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' hour_second)", + "Value": "DATETIME(\"2025-01-12 13:47:39\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' hour_second)", + "Value": "DATETIME(\"2024-12-20 10:12:21\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 hour_second)", + "Value": "DATETIME(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 hour_second)", + "Value": "DATETIME(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' hour_second)", + "Value": "DATETIME(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' hour_second)", + "Value": "DATETIME(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' hour_second)", + "Value": "DATETIME(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' hour_second)", + "Value": "DATETIME(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' hour_second)", + "Value": "DATETIME(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' hour_second)", + "Value": "DATETIME(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 hour_second)", + "Value": "DATETIME(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 hour_second)", + "Value": "DATETIME(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 hour_second)", + "Value": "DATETIME(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 hour_second)", + "Value": "DATETIME(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 hour_second)", + "Value": "DATETIME(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 hour_second)", + "Value": "DATETIME(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' hour_second)", + "Value": "DATETIME(\"2025-01-01 00:06:04\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' hour_second)", + "Value": "DATETIME(\"2024-12-31 23:53:56\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 hour_second)", + "Value": "DATETIME(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 hour_second)", + "Value": "DATETIME(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 hour_second)", + "Value": "DATETIME(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 hour_second)", + "Value": "DATETIME(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_second)", + "Value": "DATETIME(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_second)", + "Value": "DATETIME(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_second)", + "Value": "DATETIME(\"2025-01-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_second)", + "Value": "DATETIME(\"2024-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 hour_second)", + "Value": "DATETIME(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 hour_second)", + "Value": "DATETIME(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' hour_second)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' hour_second)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' hour_second)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' hour_second)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000031\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999969\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000030\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999970\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000006\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999994\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 minute)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 minute)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' minute)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' minute)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' minute)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' minute)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' minute)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' minute)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' minute)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' minute)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 minute)", + "Value": "DATETIME(\"2025-01-01 00:31:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 minute)", + "Value": "DATETIME(\"2024-12-31 23:29:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 minute)", + "Value": "DATETIME(\"2025-01-01 00:30:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 minute)", + "Value": "DATETIME(\"2024-12-31 23:30:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' minute)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' minute)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 minute)", + "Value": "DATETIME(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 minute)", + "Value": "DATETIME(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' minute)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' minute)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' minute)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' minute)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' minute)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' minute)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' minute)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' minute)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' minute)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' minute)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 minute)", + "Value": "DATETIME(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 minute)", + "Value": "DATETIME(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 minute)", + "Value": "DATETIME(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 minute)", + "Value": "DATETIME(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 minute)", + "Value": "DATETIME(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 minute)", + "Value": "DATETIME(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' minute)", + "Value": "DATETIME(\"2025-01-01 00:06:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' minute)", + "Value": "DATETIME(\"2024-12-31 23:54:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 minute)", + "Value": "DATETIME(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 minute)", + "Value": "DATETIME(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 minute)", + "Value": "DATETIME(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 minute)", + "Value": "DATETIME(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute)", + "Value": "DATETIME(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute)", + "Value": "DATETIME(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute)", + "Value": "DATETIME(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute)", + "Value": "DATETIME(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 minute)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 minute)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' minute)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' minute)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' minute)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' minute)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 minute_second)", + "Value": "DATETIME(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 minute_second)", + "Value": "DATETIME(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' minute_second)", + "Value": "DATETIME(\"2025-01-01 00:01:01\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' minute_second)", + "Value": "DATETIME(\"2024-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' minute_second)", + "Value": "DATETIME(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' minute_second)", + "Value": "DATETIME(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' minute_second)", + "Value": "DATETIME(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' minute_second)", + "Value": "DATETIME(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 minute_second)", + "Value": "DATETIME(\"2025-01-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 minute_second)", + "Value": "DATETIME(\"2024-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 minute_second)", + "Value": "DATETIME(\"2025-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 minute_second)", + "Value": "DATETIME(\"2024-12-31 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' minute_second)", + "Value": "DATETIME(\"2025-01-12 13:47:39\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' minute_second)", + "Value": "DATETIME(\"2024-12-20 10:12:21\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 minute_second)", + "Value": "DATETIME(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 minute_second)", + "Value": "DATETIME(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' minute_second)", + "Value": "DATETIME(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' minute_second)", + "Value": "DATETIME(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' minute_second)", + "Value": "DATETIME(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' minute_second)", + "Value": "DATETIME(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' minute_second)", + "Value": "DATETIME(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' minute_second)", + "Value": "DATETIME(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 minute_second)", + "Value": "DATETIME(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 minute_second)", + "Value": "DATETIME(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 minute_second)", + "Value": "DATETIME(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 minute_second)", + "Value": "DATETIME(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 minute_second)", + "Value": "DATETIME(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 minute_second)", + "Value": "DATETIME(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' minute_second)", + "Value": "DATETIME(\"2025-01-01 00:06:04\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' minute_second)", + "Value": "DATETIME(\"2024-12-31 23:53:56\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 minute_second)", + "Value": "DATETIME(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 minute_second)", + "Value": "DATETIME(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 minute_second)", + "Value": "DATETIME(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 minute_second)", + "Value": "DATETIME(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_second)", + "Value": "DATETIME(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_second)", + "Value": "DATETIME(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_second)", + "Value": "DATETIME(\"2025-01-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_second)", + "Value": "DATETIME(\"2024-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 minute_second)", + "Value": "DATETIME(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 minute_second)", + "Value": "DATETIME(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' minute_second)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' minute_second)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' minute_second)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' minute_second)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 quarter)", + "Value": "DATETIME(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 quarter)", + "Value": "DATETIME(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' quarter)", + "Value": "DATETIME(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' quarter)", + "Value": "DATETIME(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' quarter)", + "Value": "DATETIME(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' quarter)", + "Value": "DATETIME(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' quarter)", + "Value": "DATETIME(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' quarter)", + "Value": "DATETIME(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' quarter)", + "Value": "DATETIME(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' quarter)", + "Value": "DATETIME(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 quarter)", + "Value": "DATETIME(\"2032-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 quarter)", + "Value": "DATETIME(\"2017-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 quarter)", + "Value": "DATETIME(\"2032-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 quarter)", + "Value": "DATETIME(\"2017-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' quarter)", + "Value": "DATETIME(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' quarter)", + "Value": "DATETIME(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 quarter)", + "Value": "DATETIME(\"2025-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 quarter)", + "Value": "DATETIME(\"2024-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' quarter)", + "Value": "DATETIME(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' quarter)", + "Value": "DATETIME(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' quarter)", + "Value": "DATETIME(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' quarter)", + "Value": "DATETIME(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' quarter)", + "Value": "DATETIME(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' quarter)", + "Value": "DATETIME(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' quarter)", + "Value": "DATETIME(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' quarter)", + "Value": "DATETIME(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' quarter)", + "Value": "DATETIME(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' quarter)", + "Value": "DATETIME(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 quarter)", + "Value": "DATETIME(\"2025-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 quarter)", + "Value": "DATETIME(\"2024-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 quarter)", + "Value": "DATETIME(\"2025-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 quarter)", + "Value": "DATETIME(\"2024-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 quarter)", + "Value": "DATETIME(\"2025-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 quarter)", + "Value": "DATETIME(\"2024-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' quarter)", + "Value": "DATETIME(\"2026-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' quarter)", + "Value": "DATETIME(\"2023-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 quarter)", + "Value": "DATETIME(\"2025-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 quarter)", + "Value": "DATETIME(\"2024-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 quarter)", + "Value": "DATETIME(\"2025-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 quarter)", + "Value": "DATETIME(\"2024-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) quarter)", + "Value": "DATETIME(\"2025-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) quarter)", + "Value": "DATETIME(\"2024-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) quarter)", + "Value": "DATETIME(\"2025-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) quarter)", + "Value": "DATETIME(\"2024-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 quarter)", + "Value": "DATETIME(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 quarter)", + "Value": "DATETIME(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' quarter)", + "Value": "DATETIME(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' quarter)", + "Value": "DATETIME(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' quarter)", + "Value": "DATETIME(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' quarter)", + "Value": "DATETIME(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 second)", + "Value": "DATETIME(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 second)", + "Value": "DATETIME(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' second)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' second)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' second)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' second)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 second)", + "Value": "DATETIME(\"2025-01-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 second)", + "Value": "DATETIME(\"2024-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 second)", + "Value": "DATETIME(\"2025-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 second)", + "Value": "DATETIME(\"2024-12-31 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' second)", + "Value": "DATETIME(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.999\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 second)", + "Value": "DATETIME(\"2024-12-31 23:59:58.001\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' second)", + "Value": "DATETIME(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' second)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' second)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' second)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' second)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.5\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 second)", + "Value": "DATETIME(\"2024-12-31 23:59:58.5\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.5000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 second)", + "Value": "DATETIME(\"2024-12-31 23:59:58.5000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.5000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 second)", + "Value": "DATETIME(\"2024-12-31 23:59:58.5000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' second)", + "Value": "DATETIME(\"2025-01-01 00:00:06.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' second)", + "Value": "DATETIME(\"2024-12-31 23:59:54.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 second)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 second)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.5\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second)", + "Value": "DATETIME(\"2024-12-31 23:59:58.5\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second)", + "Value": "DATETIME(\"2025-01-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second)", + "Value": "DATETIME(\"2024-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 second)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' second)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' second)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 year_month)", + "Value": "DATETIME(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 year_month)", + "Value": "DATETIME(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' year_month)", + "Value": "DATETIME(\"2026-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' year_month)", + "Value": "DATETIME(\"2023-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' year_month)", + "Value": "DATETIME(\"2023-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' year_month)", + "Value": "DATETIME(\"2026-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' year_month)", + "Value": "DATETIME(\"2026-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' year_month)", + "Value": "DATETIME(\"2023-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 year_month)", + "Value": "DATETIME(\"2027-08-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 year_month)", + "Value": "DATETIME(\"2022-06-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 year_month)", + "Value": "DATETIME(\"2027-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 year_month)", + "Value": "DATETIME(\"2022-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 year_month)", + "Value": "DATETIME(\"2109-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 year_month)", + "Value": "DATETIME(\"1940-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' year_month)", + "Value": "DATETIME(\"2109-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' year_month)", + "Value": "DATETIME(\"1940-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' year_month)", + "Value": "DATETIME(\"2023-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' year_month)", + "Value": "DATETIME(\"2026-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' year_month)", + "Value": "DATETIME(\"2026-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' year_month)", + "Value": "DATETIME(\"2023-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 year_month)", + "Value": "DATETIME(\"2026-06-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 year_month)", + "Value": "DATETIME(\"2023-08-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 year_month)", + "Value": "DATETIME(\"2442-09-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 year_month)", + "Value": "DATETIME(\"1607-05-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 year_month)", + "Value": "DATETIME(\"2442-09-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 year_month)", + "Value": "DATETIME(\"1607-05-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' year_month)", + "Value": "DATETIME(\"2031-05-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' year_month)", + "Value": "DATETIME(\"2018-09-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 year_month)", + "Value": "DATETIME(\"2026-06-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 year_month)", + "Value": "DATETIME(\"2023-08-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 year_month)", + "Value": "DATETIME(\"2026-06-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 year_month)", + "Value": "DATETIME(\"2023-08-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year_month)", + "Value": "DATETIME(\"2026-06-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year_month)", + "Value": "DATETIME(\"2023-08-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year_month)", + "Value": "DATETIME(\"2025-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year_month)", + "Value": "DATETIME(\"2024-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 year_month)", + "Value": "DATETIME(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 year_month)", + "Value": "DATETIME(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' year_month)", + "Value": "DATETIME(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' year_month)", + "Value": "DATETIME(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' year_month)", + "Value": "DATETIME(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' year_month)", + "Value": "DATETIME(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 day)", + "Value": "CHAR(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 day)", + "Value": "CHAR(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' day)", + "Value": "CHAR(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' day)", + "Value": "CHAR(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' day)", + "Value": "CHAR(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' day)", + "Value": "CHAR(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' day)", + "Value": "CHAR(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' day)", + "Value": "CHAR(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' day)", + "Value": "CHAR(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' day)", + "Value": "CHAR(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 day)", + "Value": "CHAR(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 day)", + "Value": "CHAR(\"2018-03-31\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 day)", + "Value": "CHAR(\"2018-05-31\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 day)", + "Value": "CHAR(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' day)", + "Value": "CHAR(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' day)", + "Value": "CHAR(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 day)", + "Value": "CHAR(\"2018-05-03\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 day)", + "Value": "CHAR(\"2018-04-29\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' day)", + "Value": "CHAR(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' day)", + "Value": "CHAR(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' day)", + "Value": "CHAR(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' day)", + "Value": "CHAR(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' day)", + "Value": "CHAR(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' day)", + "Value": "CHAR(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' day)", + "Value": "CHAR(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' day)", + "Value": "CHAR(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' day)", + "Value": "CHAR(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' day)", + "Value": "CHAR(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 day)", + "Value": "CHAR(\"2018-05-03\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 day)", + "Value": "CHAR(\"2018-04-29\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 day)", + "Value": "CHAR(\"2018-05-03\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 day)", + "Value": "CHAR(\"2018-04-29\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 day)", + "Value": "CHAR(\"2018-05-03\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 day)", + "Value": "CHAR(\"2018-04-29\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' day)", + "Value": "CHAR(\"2018-05-07\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' day)", + "Value": "CHAR(\"2018-04-25\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 day)", + "Value": "CHAR(\"2018-05-03\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 day)", + "Value": "CHAR(\"2018-04-29\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 day)", + "Value": "CHAR(\"2018-05-03\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 day)", + "Value": "CHAR(\"2018-04-29\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day)", + "Value": "CHAR(\"2018-05-03\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day)", + "Value": "CHAR(\"2018-04-29\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day)", + "Value": "CHAR(\"2018-05-03\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day)", + "Value": "CHAR(\"2018-04-29\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 day)", + "Value": "CHAR(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 day)", + "Value": "CHAR(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' day)", + "Value": "CHAR(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' day)", + "Value": "CHAR(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' day)", + "Value": "CHAR(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' day)", + "Value": "CHAR(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 week)", + "Value": "CHAR(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 week)", + "Value": "CHAR(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' week)", + "Value": "CHAR(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' week)", + "Value": "CHAR(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' week)", + "Value": "CHAR(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' week)", + "Value": "CHAR(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' week)", + "Value": "CHAR(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' week)", + "Value": "CHAR(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' week)", + "Value": "CHAR(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' week)", + "Value": "CHAR(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 week)", + "Value": "CHAR(\"2018-12-04\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 week)", + "Value": "CHAR(\"2017-09-26\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 week)", + "Value": "CHAR(\"2018-11-27\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 week)", + "Value": "CHAR(\"2017-10-03\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' week)", + "Value": "CHAR(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' week)", + "Value": "CHAR(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 week)", + "Value": "CHAR(\"2018-05-15\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 week)", + "Value": "CHAR(\"2018-04-17\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' week)", + "Value": "CHAR(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' week)", + "Value": "CHAR(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' week)", + "Value": "CHAR(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' week)", + "Value": "CHAR(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' week)", + "Value": "CHAR(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' week)", + "Value": "CHAR(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' week)", + "Value": "CHAR(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' week)", + "Value": "CHAR(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' week)", + "Value": "CHAR(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' week)", + "Value": "CHAR(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 week)", + "Value": "CHAR(\"2018-05-15\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 week)", + "Value": "CHAR(\"2018-04-17\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 week)", + "Value": "CHAR(\"2018-05-15\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 week)", + "Value": "CHAR(\"2018-04-17\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 week)", + "Value": "CHAR(\"2018-05-15\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 week)", + "Value": "CHAR(\"2018-04-17\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' week)", + "Value": "CHAR(\"2018-06-12\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' week)", + "Value": "CHAR(\"2018-03-20\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 week)", + "Value": "CHAR(\"2018-05-15\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 week)", + "Value": "CHAR(\"2018-04-17\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 week)", + "Value": "CHAR(\"2018-05-15\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 week)", + "Value": "CHAR(\"2018-04-17\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) week)", + "Value": "CHAR(\"2018-05-15\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) week)", + "Value": "CHAR(\"2018-04-17\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) week)", + "Value": "CHAR(\"2018-05-15\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) week)", + "Value": "CHAR(\"2018-04-17\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 week)", + "Value": "CHAR(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 week)", + "Value": "CHAR(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' week)", + "Value": "CHAR(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' week)", + "Value": "CHAR(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' week)", + "Value": "CHAR(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' week)", + "Value": "CHAR(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 month)", + "Value": "CHAR(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 month)", + "Value": "CHAR(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' month)", + "Value": "CHAR(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' month)", + "Value": "CHAR(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' month)", + "Value": "CHAR(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' month)", + "Value": "CHAR(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' month)", + "Value": "CHAR(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' month)", + "Value": "CHAR(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' month)", + "Value": "CHAR(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' month)", + "Value": "CHAR(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 month)", + "Value": "CHAR(\"2020-12-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 month)", + "Value": "CHAR(\"2015-10-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 month)", + "Value": "CHAR(\"2020-11-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 month)", + "Value": "CHAR(\"2015-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' month)", + "Value": "CHAR(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' month)", + "Value": "CHAR(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 month)", + "Value": "CHAR(\"2018-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 month)", + "Value": "CHAR(\"2018-03-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' month)", + "Value": "CHAR(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' month)", + "Value": "CHAR(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' month)", + "Value": "CHAR(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' month)", + "Value": "CHAR(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' month)", + "Value": "CHAR(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' month)", + "Value": "CHAR(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' month)", + "Value": "CHAR(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' month)", + "Value": "CHAR(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' month)", + "Value": "CHAR(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' month)", + "Value": "CHAR(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 month)", + "Value": "CHAR(\"2018-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 month)", + "Value": "CHAR(\"2018-03-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 month)", + "Value": "CHAR(\"2018-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 month)", + "Value": "CHAR(\"2018-03-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 month)", + "Value": "CHAR(\"2018-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 month)", + "Value": "CHAR(\"2018-03-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' month)", + "Value": "CHAR(\"2018-11-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' month)", + "Value": "CHAR(\"2017-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 month)", + "Value": "CHAR(\"2018-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 month)", + "Value": "CHAR(\"2018-03-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 month)", + "Value": "CHAR(\"2018-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 month)", + "Value": "CHAR(\"2018-03-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) month)", + "Value": "CHAR(\"2018-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) month)", + "Value": "CHAR(\"2018-03-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) month)", + "Value": "CHAR(\"2018-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) month)", + "Value": "CHAR(\"2018-03-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 month)", + "Value": "CHAR(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 month)", + "Value": "CHAR(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' month)", + "Value": "CHAR(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' month)", + "Value": "CHAR(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' month)", + "Value": "CHAR(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' month)", + "Value": "CHAR(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 year)", + "Value": "CHAR(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 year)", + "Value": "CHAR(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' year)", + "Value": "CHAR(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' year)", + "Value": "CHAR(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' year)", + "Value": "CHAR(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' year)", + "Value": "CHAR(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' year)", + "Value": "CHAR(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' year)", + "Value": "CHAR(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' year)", + "Value": "CHAR(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' year)", + "Value": "CHAR(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 year)", + "Value": "CHAR(\"2049-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 year)", + "Value": "CHAR(\"1987-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 year)", + "Value": "CHAR(\"2048-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 year)", + "Value": "CHAR(\"1988-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' year)", + "Value": "CHAR(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' year)", + "Value": "CHAR(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 year)", + "Value": "CHAR(\"2020-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 year)", + "Value": "CHAR(\"2016-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' year)", + "Value": "CHAR(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' year)", + "Value": "CHAR(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' year)", + "Value": "CHAR(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' year)", + "Value": "CHAR(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' year)", + "Value": "CHAR(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' year)", + "Value": "CHAR(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' year)", + "Value": "CHAR(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' year)", + "Value": "CHAR(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' year)", + "Value": "CHAR(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' year)", + "Value": "CHAR(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 year)", + "Value": "CHAR(\"2020-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 year)", + "Value": "CHAR(\"2016-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 year)", + "Value": "CHAR(\"2020-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 year)", + "Value": "CHAR(\"2016-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 year)", + "Value": "CHAR(\"2020-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 year)", + "Value": "CHAR(\"2016-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' year)", + "Value": "CHAR(\"2024-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' year)", + "Value": "CHAR(\"2012-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 year)", + "Value": "CHAR(\"2020-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 year)", + "Value": "CHAR(\"2016-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 year)", + "Value": "CHAR(\"2020-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 year)", + "Value": "CHAR(\"2016-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year)", + "Value": "CHAR(\"2020-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year)", + "Value": "CHAR(\"2016-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year)", + "Value": "CHAR(\"2020-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year)", + "Value": "CHAR(\"2016-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 year)", + "Value": "CHAR(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 year)", + "Value": "CHAR(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' year)", + "Value": "CHAR(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' year)", + "Value": "CHAR(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' year)", + "Value": "CHAR(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' year)", + "Value": "CHAR(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 day_hour)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 day_hour)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' day_hour)", + "Value": "CHAR(\"2018-05-02 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' day_hour)", + "Value": "CHAR(\"2018-04-29 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' day_hour)", + "Value": "CHAR(\"2018-04-29 14:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' day_hour)", + "Value": "CHAR(\"2018-05-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' day_hour)", + "Value": "CHAR(\"2018-05-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' day_hour)", + "Value": "CHAR(\"2018-04-29 14:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 day_hour)", + "Value": "CHAR(\"2018-05-02 07:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 day_hour)", + "Value": "CHAR(\"2018-04-29 17:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 day_hour)", + "Value": "CHAR(\"2018-05-02 06:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 day_hour)", + "Value": "CHAR(\"2018-04-29 18:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' day_hour)", + "Value": "CHAR(\"2132-05-30 15:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' day_hour)", + "Value": "CHAR(\"1904-04-01 09:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 day_hour)", + "Value": "CHAR(\"2018-06-12 15:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 day_hour)", + "Value": "CHAR(\"2018-03-19 09:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' day_hour)", + "Value": "CHAR(\"2018-06-12 15:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' day_hour)", + "Value": "CHAR(\"2018-03-19 09:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' day_hour)", + "Value": "CHAR(\"2018-04-29 14:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' day_hour)", + "Value": "CHAR(\"2018-05-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' day_hour)", + "Value": "CHAR(\"2018-05-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' day_hour)", + "Value": "CHAR(\"2018-04-29 14:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 day_hour)", + "Value": "CHAR(\"2018-05-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 day_hour)", + "Value": "CHAR(\"2018-04-29 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 day_hour)", + "Value": "CHAR(\"2018-11-26 08:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 day_hour)", + "Value": "CHAR(\"2017-10-03 16:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 day_hour)", + "Value": "CHAR(\"2018-11-26 08:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 day_hour)", + "Value": "CHAR(\"2017-10-03 16:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' day_hour)", + "Value": "CHAR(\"2018-05-07 04:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' day_hour)", + "Value": "CHAR(\"2018-04-24 20:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 day_hour)", + "Value": "CHAR(\"2018-05-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 day_hour)", + "Value": "CHAR(\"2018-04-29 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 day_hour)", + "Value": "CHAR(\"2018-05-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 day_hour)", + "Value": "CHAR(\"2018-04-29 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_hour)", + "Value": "CHAR(\"2018-05-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_hour)", + "Value": "CHAR(\"2018-04-29 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_hour)", + "Value": "CHAR(\"2018-05-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_hour)", + "Value": "CHAR(\"2018-04-30 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 day_hour)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 day_hour)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' day_hour)", + "Value": "CHAR(\"2018-05-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' day_hour)", + "Value": "CHAR(\"2018-04-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' day_hour)", + "Value": "CHAR(\"2018-05-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' day_hour)", + "Value": "CHAR(\"2018-04-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' day_microsecond)", + "Value": "CHAR(\"2018-05-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' day_microsecond)", + "Value": "CHAR(\"2018-04-30 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' day_microsecond)", + "Value": "CHAR(\"2018-05-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' day_microsecond)", + "Value": "CHAR(\"2018-04-30 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' day_microsecond)", + "Value": "CHAR(\"2018-05-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' day_microsecond)", + "Value": "CHAR(\"2018-04-30 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 day_minute)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 day_minute)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' day_minute)", + "Value": "CHAR(\"2018-05-01 01:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' day_minute)", + "Value": "CHAR(\"2018-04-30 22:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' day_minute)", + "Value": "CHAR(\"2018-04-30 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' day_minute)", + "Value": "CHAR(\"2018-05-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' day_minute)", + "Value": "CHAR(\"2018-05-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' day_minute)", + "Value": "CHAR(\"2018-04-30 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 day_minute)", + "Value": "CHAR(\"2018-05-01 00:31:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 day_minute)", + "Value": "CHAR(\"2018-04-30 23:29:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 day_minute)", + "Value": "CHAR(\"2018-05-01 00:30:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 day_minute)", + "Value": "CHAR(\"2018-04-30 23:30:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' day_minute)", + "Value": "CHAR(\"2020-03-25 11:39:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' day_minute)", + "Value": "CHAR(\"2016-06-05 12:21:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 day_minute)", + "Value": "CHAR(\"2018-05-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 day_minute)", + "Value": "CHAR(\"2018-04-30 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' day_minute)", + "Value": "CHAR(\"2018-05-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' day_minute)", + "Value": "CHAR(\"2018-04-30 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' day_minute)", + "Value": "CHAR(\"2018-04-30 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' day_minute)", + "Value": "CHAR(\"2018-05-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' day_minute)", + "Value": "CHAR(\"2018-05-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' day_minute)", + "Value": "CHAR(\"2018-04-30 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 day_minute)", + "Value": "CHAR(\"2018-05-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 day_minute)", + "Value": "CHAR(\"2018-04-30 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 day_minute)", + "Value": "CHAR(\"2018-05-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 day_minute)", + "Value": "CHAR(\"2018-04-27 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 day_minute)", + "Value": "CHAR(\"2018-05-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 day_minute)", + "Value": "CHAR(\"2018-04-27 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' day_minute)", + "Value": "CHAR(\"2018-05-01 06:04:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' day_minute)", + "Value": "CHAR(\"2018-04-30 17:56:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 day_minute)", + "Value": "CHAR(\"2018-05-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 day_minute)", + "Value": "CHAR(\"2018-04-30 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 day_minute)", + "Value": "CHAR(\"2018-05-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 day_minute)", + "Value": "CHAR(\"2018-04-30 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_minute)", + "Value": "CHAR(\"2018-05-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_minute)", + "Value": "CHAR(\"2018-04-30 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_minute)", + "Value": "CHAR(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_minute)", + "Value": "CHAR(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 day_minute)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 day_minute)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' day_minute)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' day_minute)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' day_minute)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' day_minute)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 day_second)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 day_second)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' day_second)", + "Value": "CHAR(\"2018-05-01 00:01:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' day_second)", + "Value": "CHAR(\"2018-04-30 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' day_second)", + "Value": "CHAR(\"2018-05-02 01:01:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' day_second)", + "Value": "CHAR(\"2018-04-29 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' day_second)", + "Value": "CHAR(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' day_second)", + "Value": "CHAR(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' day_second)", + "Value": "CHAR(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' day_second)", + "Value": "CHAR(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 day_second)", + "Value": "CHAR(\"2018-05-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 day_second)", + "Value": "CHAR(\"2018-04-30 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 day_second)", + "Value": "CHAR(\"2018-05-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 day_second)", + "Value": "CHAR(\"2018-04-30 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' day_second)", + "Value": "CHAR(\"2018-05-12 13:47:39\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' day_second)", + "Value": "CHAR(\"2018-04-19 10:12:21\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 day_second)", + "Value": "CHAR(\"2018-05-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 day_second)", + "Value": "CHAR(\"2018-04-30 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' day_second)", + "Value": "CHAR(\"2018-05-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' day_second)", + "Value": "CHAR(\"2018-04-30 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' day_second)", + "Value": "CHAR(\"2018-05-02 01:01:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' day_second)", + "Value": "CHAR(\"2018-04-29 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' day_second)", + "Value": "CHAR(\"2018-05-02 01:01:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' day_second)", + "Value": "CHAR(\"2018-04-29 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' day_second)", + "Value": "CHAR(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' day_second)", + "Value": "CHAR(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' day_second)", + "Value": "CHAR(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' day_second)", + "Value": "CHAR(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 day_second)", + "Value": "CHAR(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 day_second)", + "Value": "CHAR(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 day_second)", + "Value": "CHAR(\"2018-05-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 day_second)", + "Value": "CHAR(\"2018-04-30 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 day_second)", + "Value": "CHAR(\"2018-05-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 day_second)", + "Value": "CHAR(\"2018-04-30 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' day_second)", + "Value": "CHAR(\"2018-05-01 00:06:04\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' day_second)", + "Value": "CHAR(\"2018-04-30 23:53:56\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 day_second)", + "Value": "CHAR(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 day_second)", + "Value": "CHAR(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 day_second)", + "Value": "CHAR(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 day_second)", + "Value": "CHAR(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_second)", + "Value": "CHAR(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_second)", + "Value": "CHAR(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_second)", + "Value": "CHAR(\"2018-05-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_second)", + "Value": "CHAR(\"2018-04-30 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 day_second)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 day_second)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' day_second)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' day_second)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' day_second)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' day_second)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 hour)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 hour)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' hour)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' hour)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' hour)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' hour)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' hour)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' hour)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' hour)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' hour)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 hour)", + "Value": "CHAR(\"2018-05-02 07:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 hour)", + "Value": "CHAR(\"2018-04-29 17:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 hour)", + "Value": "CHAR(\"2018-05-02 06:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 hour)", + "Value": "CHAR(\"2018-04-29 18:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' hour)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' hour)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 hour)", + "Value": "CHAR(\"2018-05-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 hour)", + "Value": "CHAR(\"2018-04-30 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' hour)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' hour)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' hour)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' hour)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' hour)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' hour)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' hour)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' hour)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' hour)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' hour)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 hour)", + "Value": "CHAR(\"2018-05-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 hour)", + "Value": "CHAR(\"2018-04-30 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 hour)", + "Value": "CHAR(\"2018-05-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 hour)", + "Value": "CHAR(\"2018-04-30 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 hour)", + "Value": "CHAR(\"2018-05-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 hour)", + "Value": "CHAR(\"2018-04-30 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' hour)", + "Value": "CHAR(\"2018-05-01 06:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' hour)", + "Value": "CHAR(\"2018-04-30 18:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 hour)", + "Value": "CHAR(\"2018-05-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 hour)", + "Value": "CHAR(\"2018-04-30 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 hour)", + "Value": "CHAR(\"2018-05-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 hour)", + "Value": "CHAR(\"2018-04-30 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour)", + "Value": "CHAR(\"2018-05-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour)", + "Value": "CHAR(\"2018-04-30 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour)", + "Value": "CHAR(\"2018-05-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour)", + "Value": "CHAR(\"2018-04-30 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 hour)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 hour)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' hour)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' hour)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' hour)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' hour)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' hour_microsecond)", + "Value": "CHAR(\"2018-05-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' hour_microsecond)", + "Value": "CHAR(\"2018-04-30 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' hour_microsecond)", + "Value": "CHAR(\"2018-05-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' hour_microsecond)", + "Value": "CHAR(\"2018-04-30 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' hour_microsecond)", + "Value": "CHAR(\"2018-05-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' hour_microsecond)", + "Value": "CHAR(\"2018-04-30 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 hour_minute)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 hour_minute)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' hour_minute)", + "Value": "CHAR(\"2018-05-01 01:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' hour_minute)", + "Value": "CHAR(\"2018-04-30 22:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' hour_minute)", + "Value": "CHAR(\"2018-04-30 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' hour_minute)", + "Value": "CHAR(\"2018-05-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' hour_minute)", + "Value": "CHAR(\"2018-05-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' hour_minute)", + "Value": "CHAR(\"2018-04-30 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 hour_minute)", + "Value": "CHAR(\"2018-05-01 00:31:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 hour_minute)", + "Value": "CHAR(\"2018-04-30 23:29:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 hour_minute)", + "Value": "CHAR(\"2018-05-01 00:30:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 hour_minute)", + "Value": "CHAR(\"2018-04-30 23:30:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' hour_minute)", + "Value": "CHAR(\"2020-03-25 11:39:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' hour_minute)", + "Value": "CHAR(\"2016-06-05 12:21:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 hour_minute)", + "Value": "CHAR(\"2018-05-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 hour_minute)", + "Value": "CHAR(\"2018-04-30 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' hour_minute)", + "Value": "CHAR(\"2018-05-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' hour_minute)", + "Value": "CHAR(\"2018-04-30 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' hour_minute)", + "Value": "CHAR(\"2018-04-30 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' hour_minute)", + "Value": "CHAR(\"2018-05-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' hour_minute)", + "Value": "CHAR(\"2018-05-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' hour_minute)", + "Value": "CHAR(\"2018-04-30 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 hour_minute)", + "Value": "CHAR(\"2018-05-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 hour_minute)", + "Value": "CHAR(\"2018-04-30 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 hour_minute)", + "Value": "CHAR(\"2018-05-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 hour_minute)", + "Value": "CHAR(\"2018-04-27 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 hour_minute)", + "Value": "CHAR(\"2018-05-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 hour_minute)", + "Value": "CHAR(\"2018-04-27 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' hour_minute)", + "Value": "CHAR(\"2018-05-01 06:04:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' hour_minute)", + "Value": "CHAR(\"2018-04-30 17:56:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 hour_minute)", + "Value": "CHAR(\"2018-05-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 hour_minute)", + "Value": "CHAR(\"2018-04-30 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 hour_minute)", + "Value": "CHAR(\"2018-05-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 hour_minute)", + "Value": "CHAR(\"2018-04-30 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_minute)", + "Value": "CHAR(\"2018-05-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_minute)", + "Value": "CHAR(\"2018-04-30 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_minute)", + "Value": "CHAR(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_minute)", + "Value": "CHAR(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 hour_minute)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 hour_minute)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' hour_minute)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' hour_minute)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' hour_minute)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' hour_minute)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 hour_second)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 hour_second)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' hour_second)", + "Value": "CHAR(\"2018-05-01 00:01:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' hour_second)", + "Value": "CHAR(\"2018-04-30 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' hour_second)", + "Value": "CHAR(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' hour_second)", + "Value": "CHAR(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' hour_second)", + "Value": "CHAR(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' hour_second)", + "Value": "CHAR(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 hour_second)", + "Value": "CHAR(\"2018-05-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 hour_second)", + "Value": "CHAR(\"2018-04-30 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 hour_second)", + "Value": "CHAR(\"2018-05-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 hour_second)", + "Value": "CHAR(\"2018-04-30 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' hour_second)", + "Value": "CHAR(\"2018-05-12 13:47:39\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' hour_second)", + "Value": "CHAR(\"2018-04-19 10:12:21\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 hour_second)", + "Value": "CHAR(\"2018-05-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 hour_second)", + "Value": "CHAR(\"2018-04-30 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' hour_second)", + "Value": "CHAR(\"2018-05-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' hour_second)", + "Value": "CHAR(\"2018-04-30 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' hour_second)", + "Value": "CHAR(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' hour_second)", + "Value": "CHAR(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' hour_second)", + "Value": "CHAR(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' hour_second)", + "Value": "CHAR(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 hour_second)", + "Value": "CHAR(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 hour_second)", + "Value": "CHAR(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 hour_second)", + "Value": "CHAR(\"2018-05-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 hour_second)", + "Value": "CHAR(\"2018-04-30 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 hour_second)", + "Value": "CHAR(\"2018-05-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 hour_second)", + "Value": "CHAR(\"2018-04-30 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' hour_second)", + "Value": "CHAR(\"2018-05-01 00:06:04\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' hour_second)", + "Value": "CHAR(\"2018-04-30 23:53:56\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 hour_second)", + "Value": "CHAR(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 hour_second)", + "Value": "CHAR(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 hour_second)", + "Value": "CHAR(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 hour_second)", + "Value": "CHAR(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_second)", + "Value": "CHAR(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_second)", + "Value": "CHAR(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_second)", + "Value": "CHAR(\"2018-05-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_second)", + "Value": "CHAR(\"2018-04-30 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 hour_second)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 hour_second)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' hour_second)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' hour_second)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' hour_second)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' hour_second)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000031\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999969\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000030\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999970\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000006\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999994\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 minute)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 minute)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' minute)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' minute)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' minute)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' minute)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' minute)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' minute)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' minute)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' minute)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 minute)", + "Value": "CHAR(\"2018-05-01 00:31:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 minute)", + "Value": "CHAR(\"2018-04-30 23:29:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 minute)", + "Value": "CHAR(\"2018-05-01 00:30:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 minute)", + "Value": "CHAR(\"2018-04-30 23:30:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' minute)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' minute)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 minute)", + "Value": "CHAR(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 minute)", + "Value": "CHAR(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' minute)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' minute)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' minute)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' minute)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' minute)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' minute)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' minute)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' minute)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' minute)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' minute)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 minute)", + "Value": "CHAR(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 minute)", + "Value": "CHAR(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 minute)", + "Value": "CHAR(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 minute)", + "Value": "CHAR(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 minute)", + "Value": "CHAR(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 minute)", + "Value": "CHAR(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' minute)", + "Value": "CHAR(\"2018-05-01 00:06:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' minute)", + "Value": "CHAR(\"2018-04-30 23:54:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 minute)", + "Value": "CHAR(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 minute)", + "Value": "CHAR(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 minute)", + "Value": "CHAR(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 minute)", + "Value": "CHAR(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute)", + "Value": "CHAR(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute)", + "Value": "CHAR(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute)", + "Value": "CHAR(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute)", + "Value": "CHAR(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 minute)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 minute)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' minute)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' minute)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' minute)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' minute)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 minute_second)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 minute_second)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' minute_second)", + "Value": "CHAR(\"2018-05-01 00:01:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' minute_second)", + "Value": "CHAR(\"2018-04-30 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' minute_second)", + "Value": "CHAR(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' minute_second)", + "Value": "CHAR(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' minute_second)", + "Value": "CHAR(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' minute_second)", + "Value": "CHAR(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 minute_second)", + "Value": "CHAR(\"2018-05-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 minute_second)", + "Value": "CHAR(\"2018-04-30 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 minute_second)", + "Value": "CHAR(\"2018-05-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 minute_second)", + "Value": "CHAR(\"2018-04-30 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' minute_second)", + "Value": "CHAR(\"2018-05-12 13:47:39\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' minute_second)", + "Value": "CHAR(\"2018-04-19 10:12:21\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 minute_second)", + "Value": "CHAR(\"2018-05-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 minute_second)", + "Value": "CHAR(\"2018-04-30 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' minute_second)", + "Value": "CHAR(\"2018-05-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' minute_second)", + "Value": "CHAR(\"2018-04-30 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' minute_second)", + "Value": "CHAR(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' minute_second)", + "Value": "CHAR(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' minute_second)", + "Value": "CHAR(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' minute_second)", + "Value": "CHAR(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 minute_second)", + "Value": "CHAR(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 minute_second)", + "Value": "CHAR(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 minute_second)", + "Value": "CHAR(\"2018-05-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 minute_second)", + "Value": "CHAR(\"2018-04-30 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 minute_second)", + "Value": "CHAR(\"2018-05-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 minute_second)", + "Value": "CHAR(\"2018-04-30 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' minute_second)", + "Value": "CHAR(\"2018-05-01 00:06:04\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' minute_second)", + "Value": "CHAR(\"2018-04-30 23:53:56\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 minute_second)", + "Value": "CHAR(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 minute_second)", + "Value": "CHAR(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 minute_second)", + "Value": "CHAR(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 minute_second)", + "Value": "CHAR(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_second)", + "Value": "CHAR(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_second)", + "Value": "CHAR(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_second)", + "Value": "CHAR(\"2018-05-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_second)", + "Value": "CHAR(\"2018-04-30 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 minute_second)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 minute_second)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' minute_second)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' minute_second)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' minute_second)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' minute_second)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 quarter)", + "Value": "CHAR(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 quarter)", + "Value": "CHAR(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' quarter)", + "Value": "CHAR(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' quarter)", + "Value": "CHAR(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' quarter)", + "Value": "CHAR(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' quarter)", + "Value": "CHAR(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' quarter)", + "Value": "CHAR(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' quarter)", + "Value": "CHAR(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' quarter)", + "Value": "CHAR(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' quarter)", + "Value": "CHAR(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 quarter)", + "Value": "CHAR(\"2026-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 quarter)", + "Value": "CHAR(\"2010-08-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 quarter)", + "Value": "CHAR(\"2025-11-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 quarter)", + "Value": "CHAR(\"2010-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' quarter)", + "Value": "CHAR(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' quarter)", + "Value": "CHAR(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 quarter)", + "Value": "CHAR(\"2018-11-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 quarter)", + "Value": "CHAR(\"2017-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' quarter)", + "Value": "CHAR(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' quarter)", + "Value": "CHAR(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' quarter)", + "Value": "CHAR(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' quarter)", + "Value": "CHAR(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' quarter)", + "Value": "CHAR(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' quarter)", + "Value": "CHAR(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' quarter)", + "Value": "CHAR(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' quarter)", + "Value": "CHAR(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' quarter)", + "Value": "CHAR(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' quarter)", + "Value": "CHAR(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 quarter)", + "Value": "CHAR(\"2018-11-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 quarter)", + "Value": "CHAR(\"2017-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 quarter)", + "Value": "CHAR(\"2018-11-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 quarter)", + "Value": "CHAR(\"2017-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 quarter)", + "Value": "CHAR(\"2018-11-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 quarter)", + "Value": "CHAR(\"2017-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' quarter)", + "Value": "CHAR(\"2019-11-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' quarter)", + "Value": "CHAR(\"2016-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 quarter)", + "Value": "CHAR(\"2018-11-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 quarter)", + "Value": "CHAR(\"2017-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 quarter)", + "Value": "CHAR(\"2018-11-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 quarter)", + "Value": "CHAR(\"2017-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) quarter)", + "Value": "CHAR(\"2018-11-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) quarter)", + "Value": "CHAR(\"2017-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) quarter)", + "Value": "CHAR(\"2018-11-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) quarter)", + "Value": "CHAR(\"2017-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 quarter)", + "Value": "CHAR(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 quarter)", + "Value": "CHAR(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' quarter)", + "Value": "CHAR(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' quarter)", + "Value": "CHAR(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' quarter)", + "Value": "CHAR(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' quarter)", + "Value": "CHAR(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 second)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 second)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' second)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' second)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' second)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' second)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' second)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' second)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' second)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' second)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 second)", + "Value": "CHAR(\"2018-05-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 second)", + "Value": "CHAR(\"2018-04-30 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 second)", + "Value": "CHAR(\"2018-05-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 second)", + "Value": "CHAR(\"2018-04-30 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' second)", + "Value": "CHAR(\"2018-05-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' second)", + "Value": "CHAR(\"2018-04-30 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 second)", + "Value": "CHAR(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 second)", + "Value": "CHAR(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' second)", + "Value": "CHAR(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' second)", + "Value": "CHAR(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' second)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' second)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' second)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' second)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' second)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' second)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' second)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' second)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 second)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 second)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 second)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 second)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 second)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 second)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' second)", + "Value": "CHAR(\"2018-05-01 00:00:06\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' second)", + "Value": "CHAR(\"2018-04-30 23:59:54\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 second)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 second)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 second)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 second)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second)", + "Value": "CHAR(\"2018-05-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second)", + "Value": "CHAR(\"2018-04-30 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 second)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 second)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' second)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' second)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' second)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' second)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 year_month)", + "Value": "CHAR(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 year_month)", + "Value": "CHAR(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' year_month)", + "Value": "CHAR(\"2019-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' year_month)", + "Value": "CHAR(\"2017-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' year_month)", + "Value": "CHAR(\"2016-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' year_month)", + "Value": "CHAR(\"2020-03-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' year_month)", + "Value": "CHAR(\"2020-03-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' year_month)", + "Value": "CHAR(\"2016-07-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 year_month)", + "Value": "CHAR(\"2020-12-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 year_month)", + "Value": "CHAR(\"2015-10-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 year_month)", + "Value": "CHAR(\"2020-11-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 year_month)", + "Value": "CHAR(\"2015-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 year_month)", + "Value": "CHAR(\"2102-08-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 year_month)", + "Value": "CHAR(\"1934-02-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' year_month)", + "Value": "CHAR(\"2102-08-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' year_month)", + "Value": "CHAR(\"1934-02-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' year_month)", + "Value": "CHAR(\"2016-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' year_month)", + "Value": "CHAR(\"2020-03-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' year_month)", + "Value": "CHAR(\"2020-03-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' year_month)", + "Value": "CHAR(\"2016-07-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 year_month)", + "Value": "CHAR(\"2019-10-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 year_month)", + "Value": "CHAR(\"2016-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 year_month)", + "Value": "CHAR(\"2436-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 year_month)", + "Value": "CHAR(\"1600-09-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 year_month)", + "Value": "CHAR(\"2436-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 year_month)", + "Value": "CHAR(\"1600-09-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' year_month)", + "Value": "CHAR(\"2024-09-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' year_month)", + "Value": "CHAR(\"2012-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 year_month)", + "Value": "CHAR(\"2019-10-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 year_month)", + "Value": "CHAR(\"2016-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 year_month)", + "Value": "CHAR(\"2019-10-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 year_month)", + "Value": "CHAR(\"2016-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year_month)", + "Value": "CHAR(\"2019-10-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year_month)", + "Value": "CHAR(\"2016-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year_month)", + "Value": "CHAR(\"2018-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year_month)", + "Value": "CHAR(\"2018-03-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 year_month)", + "Value": "CHAR(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 year_month)", + "Value": "CHAR(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' year_month)", + "Value": "CHAR(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' year_month)", + "Value": "CHAR(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' year_month)", + "Value": "CHAR(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' year_month)", + "Value": "CHAR(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 day)", + "Value": "CHAR(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 day)", + "Value": "CHAR(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' day)", + "Value": "CHAR(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' day)", + "Value": "CHAR(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' day)", + "Value": "CHAR(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' day)", + "Value": "CHAR(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' day)", + "Value": "CHAR(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' day)", + "Value": "CHAR(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' day)", + "Value": "CHAR(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' day)", + "Value": "CHAR(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 day)", + "Value": "CHAR(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 day)", + "Value": "CHAR(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 day)", + "Value": "CHAR(\"2021-01-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 day)", + "Value": "CHAR(\"2020-12-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' day)", + "Value": "CHAR(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' day)", + "Value": "CHAR(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 day)", + "Value": "CHAR(\"2021-01-02 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 day)", + "Value": "CHAR(\"2020-12-29 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' day)", + "Value": "CHAR(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' day)", + "Value": "CHAR(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' day)", + "Value": "CHAR(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' day)", + "Value": "CHAR(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' day)", + "Value": "CHAR(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' day)", + "Value": "CHAR(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' day)", + "Value": "CHAR(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' day)", + "Value": "CHAR(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' day)", + "Value": "CHAR(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' day)", + "Value": "CHAR(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 day)", + "Value": "CHAR(\"2021-01-02 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 day)", + "Value": "CHAR(\"2020-12-29 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 day)", + "Value": "CHAR(\"2021-01-02 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 day)", + "Value": "CHAR(\"2020-12-29 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 day)", + "Value": "CHAR(\"2021-01-02 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 day)", + "Value": "CHAR(\"2020-12-29 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' day)", + "Value": "CHAR(\"2021-01-06 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' day)", + "Value": "CHAR(\"2020-12-25 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 day)", + "Value": "CHAR(\"2021-01-02 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 day)", + "Value": "CHAR(\"2020-12-29 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 day)", + "Value": "CHAR(\"2021-01-02 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 day)", + "Value": "CHAR(\"2020-12-29 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day)", + "Value": "CHAR(\"2021-01-02 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day)", + "Value": "CHAR(\"2020-12-29 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day)", + "Value": "CHAR(\"2021-01-02 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day)", + "Value": "CHAR(\"2020-12-29 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 day)", + "Value": "CHAR(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 day)", + "Value": "CHAR(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' day)", + "Value": "CHAR(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' day)", + "Value": "CHAR(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' day)", + "Value": "CHAR(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' day)", + "Value": "CHAR(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 week)", + "Value": "CHAR(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 week)", + "Value": "CHAR(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' week)", + "Value": "CHAR(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' week)", + "Value": "CHAR(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' week)", + "Value": "CHAR(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' week)", + "Value": "CHAR(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' week)", + "Value": "CHAR(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' week)", + "Value": "CHAR(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' week)", + "Value": "CHAR(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' week)", + "Value": "CHAR(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 week)", + "Value": "CHAR(\"2021-08-05 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 week)", + "Value": "CHAR(\"2020-05-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 week)", + "Value": "CHAR(\"2021-07-29 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 week)", + "Value": "CHAR(\"2020-06-04 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' week)", + "Value": "CHAR(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' week)", + "Value": "CHAR(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 week)", + "Value": "CHAR(\"2021-01-14 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 week)", + "Value": "CHAR(\"2020-12-17 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' week)", + "Value": "CHAR(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' week)", + "Value": "CHAR(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' week)", + "Value": "CHAR(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' week)", + "Value": "CHAR(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' week)", + "Value": "CHAR(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' week)", + "Value": "CHAR(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' week)", + "Value": "CHAR(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' week)", + "Value": "CHAR(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' week)", + "Value": "CHAR(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' week)", + "Value": "CHAR(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 week)", + "Value": "CHAR(\"2021-01-14 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 week)", + "Value": "CHAR(\"2020-12-17 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 week)", + "Value": "CHAR(\"2021-01-14 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 week)", + "Value": "CHAR(\"2020-12-17 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 week)", + "Value": "CHAR(\"2021-01-14 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 week)", + "Value": "CHAR(\"2020-12-17 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' week)", + "Value": "CHAR(\"2021-02-11 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' week)", + "Value": "CHAR(\"2020-11-19 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 week)", + "Value": "CHAR(\"2021-01-14 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 week)", + "Value": "CHAR(\"2020-12-17 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 week)", + "Value": "CHAR(\"2021-01-14 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 week)", + "Value": "CHAR(\"2020-12-17 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) week)", + "Value": "CHAR(\"2021-01-14 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) week)", + "Value": "CHAR(\"2020-12-17 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) week)", + "Value": "CHAR(\"2021-01-14 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) week)", + "Value": "CHAR(\"2020-12-17 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 week)", + "Value": "CHAR(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 week)", + "Value": "CHAR(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' week)", + "Value": "CHAR(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' week)", + "Value": "CHAR(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' week)", + "Value": "CHAR(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' week)", + "Value": "CHAR(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 month)", + "Value": "CHAR(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 month)", + "Value": "CHAR(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' month)", + "Value": "CHAR(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' month)", + "Value": "CHAR(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' month)", + "Value": "CHAR(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' month)", + "Value": "CHAR(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' month)", + "Value": "CHAR(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' month)", + "Value": "CHAR(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' month)", + "Value": "CHAR(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' month)", + "Value": "CHAR(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 month)", + "Value": "CHAR(\"2023-07-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 month)", + "Value": "CHAR(\"2018-05-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 month)", + "Value": "CHAR(\"2023-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 month)", + "Value": "CHAR(\"2018-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' month)", + "Value": "CHAR(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' month)", + "Value": "CHAR(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 month)", + "Value": "CHAR(\"2021-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 month)", + "Value": "CHAR(\"2020-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' month)", + "Value": "CHAR(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' month)", + "Value": "CHAR(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' month)", + "Value": "CHAR(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' month)", + "Value": "CHAR(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' month)", + "Value": "CHAR(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' month)", + "Value": "CHAR(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' month)", + "Value": "CHAR(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' month)", + "Value": "CHAR(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' month)", + "Value": "CHAR(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' month)", + "Value": "CHAR(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 month)", + "Value": "CHAR(\"2021-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 month)", + "Value": "CHAR(\"2020-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 month)", + "Value": "CHAR(\"2021-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 month)", + "Value": "CHAR(\"2020-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 month)", + "Value": "CHAR(\"2021-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 month)", + "Value": "CHAR(\"2020-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' month)", + "Value": "CHAR(\"2021-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' month)", + "Value": "CHAR(\"2020-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 month)", + "Value": "CHAR(\"2021-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 month)", + "Value": "CHAR(\"2020-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 month)", + "Value": "CHAR(\"2021-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 month)", + "Value": "CHAR(\"2020-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) month)", + "Value": "CHAR(\"2021-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) month)", + "Value": "CHAR(\"2020-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) month)", + "Value": "CHAR(\"2021-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) month)", + "Value": "CHAR(\"2020-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 month)", + "Value": "CHAR(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 month)", + "Value": "CHAR(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' month)", + "Value": "CHAR(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' month)", + "Value": "CHAR(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' month)", + "Value": "CHAR(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' month)", + "Value": "CHAR(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 year)", + "Value": "CHAR(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 year)", + "Value": "CHAR(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' year)", + "Value": "CHAR(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' year)", + "Value": "CHAR(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' year)", + "Value": "CHAR(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' year)", + "Value": "CHAR(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' year)", + "Value": "CHAR(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' year)", + "Value": "CHAR(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' year)", + "Value": "CHAR(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' year)", + "Value": "CHAR(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 year)", + "Value": "CHAR(\"2051-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 year)", + "Value": "CHAR(\"1989-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 year)", + "Value": "CHAR(\"2050-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 year)", + "Value": "CHAR(\"1990-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' year)", + "Value": "CHAR(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' year)", + "Value": "CHAR(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 year)", + "Value": "CHAR(\"2022-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 year)", + "Value": "CHAR(\"2018-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' year)", + "Value": "CHAR(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' year)", + "Value": "CHAR(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' year)", + "Value": "CHAR(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' year)", + "Value": "CHAR(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' year)", + "Value": "CHAR(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' year)", + "Value": "CHAR(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' year)", + "Value": "CHAR(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' year)", + "Value": "CHAR(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' year)", + "Value": "CHAR(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' year)", + "Value": "CHAR(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 year)", + "Value": "CHAR(\"2022-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 year)", + "Value": "CHAR(\"2018-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 year)", + "Value": "CHAR(\"2022-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 year)", + "Value": "CHAR(\"2018-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 year)", + "Value": "CHAR(\"2022-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 year)", + "Value": "CHAR(\"2018-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' year)", + "Value": "CHAR(\"2026-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' year)", + "Value": "CHAR(\"2014-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 year)", + "Value": "CHAR(\"2022-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 year)", + "Value": "CHAR(\"2018-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 year)", + "Value": "CHAR(\"2022-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 year)", + "Value": "CHAR(\"2018-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year)", + "Value": "CHAR(\"2022-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year)", + "Value": "CHAR(\"2018-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year)", + "Value": "CHAR(\"2022-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year)", + "Value": "CHAR(\"2018-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 year)", + "Value": "CHAR(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 year)", + "Value": "CHAR(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' year)", + "Value": "CHAR(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' year)", + "Value": "CHAR(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' year)", + "Value": "CHAR(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' year)", + "Value": "CHAR(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 day_hour)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 day_hour)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' day_hour)", + "Value": "CHAR(\"2021-01-02 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' day_hour)", + "Value": "CHAR(\"2020-12-30 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' day_hour)", + "Value": "CHAR(\"2020-12-30 13:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' day_hour)", + "Value": "CHAR(\"2021-01-02 09:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' day_hour)", + "Value": "CHAR(\"2021-01-02 09:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' day_hour)", + "Value": "CHAR(\"2020-12-30 13:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 day_hour)", + "Value": "CHAR(\"2021-01-02 06:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 day_hour)", + "Value": "CHAR(\"2020-12-30 16:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 day_hour)", + "Value": "CHAR(\"2021-01-02 05:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 day_hour)", + "Value": "CHAR(\"2020-12-30 17:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' day_hour)", + "Value": "CHAR(\"2135-01-31 14:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' day_hour)", + "Value": "CHAR(\"1906-12-03 08:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 day_hour)", + "Value": "CHAR(\"2021-02-12 14:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 day_hour)", + "Value": "CHAR(\"2020-11-19 08:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' day_hour)", + "Value": "CHAR(\"2021-02-12 14:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' day_hour)", + "Value": "CHAR(\"2020-11-19 08:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' day_hour)", + "Value": "CHAR(\"2020-12-30 13:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' day_hour)", + "Value": "CHAR(\"2021-01-02 09:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' day_hour)", + "Value": "CHAR(\"2021-01-02 09:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' day_hour)", + "Value": "CHAR(\"2020-12-30 13:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 day_hour)", + "Value": "CHAR(\"2021-01-02 04:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 day_hour)", + "Value": "CHAR(\"2020-12-30 18:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 day_hour)", + "Value": "CHAR(\"2021-07-29 07:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 day_hour)", + "Value": "CHAR(\"2020-06-05 15:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 day_hour)", + "Value": "CHAR(\"2021-07-29 07:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 day_hour)", + "Value": "CHAR(\"2020-06-05 15:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' day_hour)", + "Value": "CHAR(\"2021-01-07 03:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' day_hour)", + "Value": "CHAR(\"2020-12-25 19:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 day_hour)", + "Value": "CHAR(\"2021-01-02 04:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 day_hour)", + "Value": "CHAR(\"2020-12-30 18:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 day_hour)", + "Value": "CHAR(\"2021-01-02 04:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 day_hour)", + "Value": "CHAR(\"2020-12-30 18:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_hour)", + "Value": "CHAR(\"2021-01-02 04:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_hour)", + "Value": "CHAR(\"2020-12-30 18:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_hour)", + "Value": "CHAR(\"2021-01-01 01:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_hour)", + "Value": "CHAR(\"2020-12-31 21:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 day_hour)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 day_hour)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' day_hour)", + "Value": "CHAR(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' day_hour)", + "Value": "CHAR(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' day_hour)", + "Value": "CHAR(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' day_hour)", + "Value": "CHAR(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' day_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' day_microsecond)", + "Value": "CHAR(\"2021-01-01 01:01:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' day_microsecond)", + "Value": "CHAR(\"2020-12-31 22:58:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' day_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' day_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.310000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.690000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.300000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.700000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' day_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 day_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' day_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' day_microsecond)", + "Value": "CHAR(\"2021-01-01 01:01:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' day_microsecond)", + "Value": "CHAR(\"2020-12-31 22:58:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' day_microsecond)", + "Value": "CHAR(\"2021-01-01 01:01:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' day_microsecond)", + "Value": "CHAR(\"2020-12-31 22:58:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' day_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' day_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 day_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 day_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 day_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' day_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:05.400000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:52.600000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 day_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 day_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.200000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.800000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' day_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' day_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 day_minute)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 day_minute)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' day_minute)", + "Value": "CHAR(\"2021-01-01 01:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' day_minute)", + "Value": "CHAR(\"2020-12-31 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' day_minute)", + "Value": "CHAR(\"2020-12-31 22:49:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' day_minute)", + "Value": "CHAR(\"2021-01-01 01:09:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' day_minute)", + "Value": "CHAR(\"2021-01-01 01:09:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' day_minute)", + "Value": "CHAR(\"2020-12-31 22:49:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 day_minute)", + "Value": "CHAR(\"2021-01-01 00:30:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 day_minute)", + "Value": "CHAR(\"2020-12-31 23:28:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 day_minute)", + "Value": "CHAR(\"2021-01-01 00:29:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 day_minute)", + "Value": "CHAR(\"2020-12-31 23:29:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' day_minute)", + "Value": "CHAR(\"2022-11-26 11:38:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' day_minute)", + "Value": "CHAR(\"2019-02-06 12:20:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 day_minute)", + "Value": "CHAR(\"2021-01-01 17:38:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 day_minute)", + "Value": "CHAR(\"2020-12-31 06:20:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' day_minute)", + "Value": "CHAR(\"2021-01-01 17:38:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' day_minute)", + "Value": "CHAR(\"2020-12-31 06:20:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' day_minute)", + "Value": "CHAR(\"2020-12-31 22:49:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' day_minute)", + "Value": "CHAR(\"2021-01-01 01:09:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' day_minute)", + "Value": "CHAR(\"2021-01-01 01:09:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' day_minute)", + "Value": "CHAR(\"2020-12-31 22:49:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 day_minute)", + "Value": "CHAR(\"2021-01-01 01:04:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 day_minute)", + "Value": "CHAR(\"2020-12-31 22:54:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 day_minute)", + "Value": "CHAR(\"2021-01-04 12:19:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 day_minute)", + "Value": "CHAR(\"2020-12-28 11:39:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 day_minute)", + "Value": "CHAR(\"2021-01-04 12:19:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 day_minute)", + "Value": "CHAR(\"2020-12-28 11:39:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' day_minute)", + "Value": "CHAR(\"2021-01-01 06:03:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' day_minute)", + "Value": "CHAR(\"2020-12-31 17:55:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 day_minute)", + "Value": "CHAR(\"2021-01-01 01:04:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 day_minute)", + "Value": "CHAR(\"2020-12-31 22:54:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 day_minute)", + "Value": "CHAR(\"2021-01-01 01:04:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 day_minute)", + "Value": "CHAR(\"2020-12-31 22:54:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_minute)", + "Value": "CHAR(\"2021-01-01 01:04:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_minute)", + "Value": "CHAR(\"2020-12-31 22:54:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_minute)", + "Value": "CHAR(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_minute)", + "Value": "CHAR(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 day_minute)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 day_minute)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' day_minute)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' day_minute)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' day_minute)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' day_minute)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 day_second)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 day_second)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' day_second)", + "Value": "CHAR(\"2021-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' day_second)", + "Value": "CHAR(\"2020-12-31 23:58:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' day_second)", + "Value": "CHAR(\"2021-01-02 01:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' day_second)", + "Value": "CHAR(\"2020-12-30 22:58:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' day_second)", + "Value": "CHAR(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' day_second)", + "Value": "CHAR(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' day_second)", + "Value": "CHAR(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' day_second)", + "Value": "CHAR(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 day_second)", + "Value": "CHAR(\"2021-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 day_second)", + "Value": "CHAR(\"2020-12-31 23:59:28\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 day_second)", + "Value": "CHAR(\"2021-01-01 00:00:29\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 day_second)", + "Value": "CHAR(\"2020-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' day_second)", + "Value": "CHAR(\"2021-01-12 13:47:38\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' day_second)", + "Value": "CHAR(\"2020-12-20 10:12:20\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 day_second)", + "Value": "CHAR(\"2021-01-01 00:17:38\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 day_second)", + "Value": "CHAR(\"2020-12-31 23:42:20\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' day_second)", + "Value": "CHAR(\"2021-01-01 00:17:38\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' day_second)", + "Value": "CHAR(\"2020-12-31 23:42:20\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' day_second)", + "Value": "CHAR(\"2021-01-02 01:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' day_second)", + "Value": "CHAR(\"2020-12-30 22:58:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' day_second)", + "Value": "CHAR(\"2021-01-02 01:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' day_second)", + "Value": "CHAR(\"2020-12-30 22:58:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' day_second)", + "Value": "CHAR(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' day_second)", + "Value": "CHAR(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' day_second)", + "Value": "CHAR(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' day_second)", + "Value": "CHAR(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 day_second)", + "Value": "CHAR(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 day_second)", + "Value": "CHAR(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 day_second)", + "Value": "CHAR(\"2021-01-01 01:24:19\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 day_second)", + "Value": "CHAR(\"2020-12-31 22:35:39\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 day_second)", + "Value": "CHAR(\"2021-01-01 01:24:19\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 day_second)", + "Value": "CHAR(\"2020-12-31 22:35:39\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' day_second)", + "Value": "CHAR(\"2021-01-01 00:06:03\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' day_second)", + "Value": "CHAR(\"2020-12-31 23:53:55\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 day_second)", + "Value": "CHAR(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 day_second)", + "Value": "CHAR(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 day_second)", + "Value": "CHAR(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 day_second)", + "Value": "CHAR(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_second)", + "Value": "CHAR(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_second)", + "Value": "CHAR(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_second)", + "Value": "CHAR(\"2021-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_second)", + "Value": "CHAR(\"2020-12-31 23:59:57\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 day_second)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 day_second)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' day_second)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' day_second)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' day_second)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' day_second)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 hour)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 hour)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' hour)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' hour)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' hour)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' hour)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' hour)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' hour)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' hour)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' hour)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 hour)", + "Value": "CHAR(\"2021-01-02 06:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 hour)", + "Value": "CHAR(\"2020-12-30 16:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 hour)", + "Value": "CHAR(\"2021-01-02 05:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 hour)", + "Value": "CHAR(\"2020-12-30 17:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' hour)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' hour)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 hour)", + "Value": "CHAR(\"2021-01-01 01:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 hour)", + "Value": "CHAR(\"2020-12-31 21:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' hour)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' hour)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' hour)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' hour)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' hour)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' hour)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' hour)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' hour)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' hour)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' hour)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 hour)", + "Value": "CHAR(\"2021-01-01 01:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 hour)", + "Value": "CHAR(\"2020-12-31 21:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 hour)", + "Value": "CHAR(\"2021-01-01 01:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 hour)", + "Value": "CHAR(\"2020-12-31 21:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 hour)", + "Value": "CHAR(\"2021-01-01 01:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 hour)", + "Value": "CHAR(\"2020-12-31 21:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' hour)", + "Value": "CHAR(\"2021-01-01 05:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' hour)", + "Value": "CHAR(\"2020-12-31 17:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 hour)", + "Value": "CHAR(\"2021-01-01 01:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 hour)", + "Value": "CHAR(\"2020-12-31 21:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 hour)", + "Value": "CHAR(\"2021-01-01 01:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 hour)", + "Value": "CHAR(\"2020-12-31 21:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour)", + "Value": "CHAR(\"2021-01-01 01:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour)", + "Value": "CHAR(\"2020-12-31 21:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour)", + "Value": "CHAR(\"2021-01-01 01:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour)", + "Value": "CHAR(\"2020-12-31 21:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 hour)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 hour)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' hour)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' hour)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' hour)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' hour)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' hour_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' hour_microsecond)", + "Value": "CHAR(\"2021-01-01 01:01:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' hour_microsecond)", + "Value": "CHAR(\"2020-12-31 22:58:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' hour_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' hour_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.310000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.690000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.300000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.700000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' hour_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 hour_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' hour_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' hour_microsecond)", + "Value": "CHAR(\"2021-01-01 01:01:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' hour_microsecond)", + "Value": "CHAR(\"2020-12-31 22:58:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' hour_microsecond)", + "Value": "CHAR(\"2021-01-01 01:01:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' hour_microsecond)", + "Value": "CHAR(\"2020-12-31 22:58:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' hour_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' hour_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 hour_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 hour_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 hour_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' hour_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:05.400000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:52.600000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 hour_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 hour_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.200000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.800000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' hour_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' hour_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 hour_minute)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 hour_minute)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' hour_minute)", + "Value": "CHAR(\"2021-01-01 01:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' hour_minute)", + "Value": "CHAR(\"2020-12-31 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' hour_minute)", + "Value": "CHAR(\"2020-12-31 22:49:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' hour_minute)", + "Value": "CHAR(\"2021-01-01 01:09:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' hour_minute)", + "Value": "CHAR(\"2021-01-01 01:09:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' hour_minute)", + "Value": "CHAR(\"2020-12-31 22:49:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 hour_minute)", + "Value": "CHAR(\"2021-01-01 00:30:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 hour_minute)", + "Value": "CHAR(\"2020-12-31 23:28:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 hour_minute)", + "Value": "CHAR(\"2021-01-01 00:29:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 hour_minute)", + "Value": "CHAR(\"2020-12-31 23:29:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' hour_minute)", + "Value": "CHAR(\"2022-11-26 11:38:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' hour_minute)", + "Value": "CHAR(\"2019-02-06 12:20:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 hour_minute)", + "Value": "CHAR(\"2021-01-01 17:38:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 hour_minute)", + "Value": "CHAR(\"2020-12-31 06:20:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' hour_minute)", + "Value": "CHAR(\"2021-01-01 17:38:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' hour_minute)", + "Value": "CHAR(\"2020-12-31 06:20:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' hour_minute)", + "Value": "CHAR(\"2020-12-31 22:49:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' hour_minute)", + "Value": "CHAR(\"2021-01-01 01:09:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' hour_minute)", + "Value": "CHAR(\"2021-01-01 01:09:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' hour_minute)", + "Value": "CHAR(\"2020-12-31 22:49:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 hour_minute)", + "Value": "CHAR(\"2021-01-01 01:04:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 hour_minute)", + "Value": "CHAR(\"2020-12-31 22:54:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 hour_minute)", + "Value": "CHAR(\"2021-01-04 12:19:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 hour_minute)", + "Value": "CHAR(\"2020-12-28 11:39:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 hour_minute)", + "Value": "CHAR(\"2021-01-04 12:19:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 hour_minute)", + "Value": "CHAR(\"2020-12-28 11:39:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' hour_minute)", + "Value": "CHAR(\"2021-01-01 06:03:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' hour_minute)", + "Value": "CHAR(\"2020-12-31 17:55:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 hour_minute)", + "Value": "CHAR(\"2021-01-01 01:04:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 hour_minute)", + "Value": "CHAR(\"2020-12-31 22:54:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 hour_minute)", + "Value": "CHAR(\"2021-01-01 01:04:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 hour_minute)", + "Value": "CHAR(\"2020-12-31 22:54:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_minute)", + "Value": "CHAR(\"2021-01-01 01:04:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_minute)", + "Value": "CHAR(\"2020-12-31 22:54:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_minute)", + "Value": "CHAR(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_minute)", + "Value": "CHAR(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 hour_minute)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 hour_minute)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' hour_minute)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' hour_minute)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' hour_minute)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' hour_minute)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 hour_second)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 hour_second)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' hour_second)", + "Value": "CHAR(\"2021-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' hour_second)", + "Value": "CHAR(\"2020-12-31 23:58:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' hour_second)", + "Value": "CHAR(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' hour_second)", + "Value": "CHAR(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' hour_second)", + "Value": "CHAR(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' hour_second)", + "Value": "CHAR(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 hour_second)", + "Value": "CHAR(\"2021-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 hour_second)", + "Value": "CHAR(\"2020-12-31 23:59:28\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 hour_second)", + "Value": "CHAR(\"2021-01-01 00:00:29\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 hour_second)", + "Value": "CHAR(\"2020-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' hour_second)", + "Value": "CHAR(\"2021-01-12 13:47:38\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' hour_second)", + "Value": "CHAR(\"2020-12-20 10:12:20\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 hour_second)", + "Value": "CHAR(\"2021-01-01 00:17:38\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 hour_second)", + "Value": "CHAR(\"2020-12-31 23:42:20\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' hour_second)", + "Value": "CHAR(\"2021-01-01 00:17:38\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' hour_second)", + "Value": "CHAR(\"2020-12-31 23:42:20\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' hour_second)", + "Value": "CHAR(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' hour_second)", + "Value": "CHAR(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' hour_second)", + "Value": "CHAR(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' hour_second)", + "Value": "CHAR(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 hour_second)", + "Value": "CHAR(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 hour_second)", + "Value": "CHAR(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 hour_second)", + "Value": "CHAR(\"2021-01-01 01:24:19\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 hour_second)", + "Value": "CHAR(\"2020-12-31 22:35:39\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 hour_second)", + "Value": "CHAR(\"2021-01-01 01:24:19\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 hour_second)", + "Value": "CHAR(\"2020-12-31 22:35:39\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' hour_second)", + "Value": "CHAR(\"2021-01-01 00:06:03\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' hour_second)", + "Value": "CHAR(\"2020-12-31 23:53:55\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 hour_second)", + "Value": "CHAR(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 hour_second)", + "Value": "CHAR(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 hour_second)", + "Value": "CHAR(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 hour_second)", + "Value": "CHAR(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_second)", + "Value": "CHAR(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_second)", + "Value": "CHAR(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_second)", + "Value": "CHAR(\"2021-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_second)", + "Value": "CHAR(\"2020-12-31 23:59:57\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 hour_second)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 hour_second)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' hour_second)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' hour_second)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' hour_second)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' hour_second)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000031\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999969\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000030\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999970\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000006\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999994\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 minute)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 minute)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' minute)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' minute)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' minute)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' minute)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' minute)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' minute)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' minute)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' minute)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 minute)", + "Value": "CHAR(\"2021-01-01 00:30:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 minute)", + "Value": "CHAR(\"2020-12-31 23:28:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 minute)", + "Value": "CHAR(\"2021-01-01 00:29:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 minute)", + "Value": "CHAR(\"2020-12-31 23:29:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' minute)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' minute)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 minute)", + "Value": "CHAR(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 minute)", + "Value": "CHAR(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' minute)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' minute)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' minute)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' minute)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' minute)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' minute)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' minute)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' minute)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' minute)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' minute)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 minute)", + "Value": "CHAR(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 minute)", + "Value": "CHAR(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 minute)", + "Value": "CHAR(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 minute)", + "Value": "CHAR(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 minute)", + "Value": "CHAR(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 minute)", + "Value": "CHAR(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' minute)", + "Value": "CHAR(\"2021-01-01 00:05:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' minute)", + "Value": "CHAR(\"2020-12-31 23:53:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 minute)", + "Value": "CHAR(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 minute)", + "Value": "CHAR(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 minute)", + "Value": "CHAR(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 minute)", + "Value": "CHAR(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute)", + "Value": "CHAR(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute)", + "Value": "CHAR(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute)", + "Value": "CHAR(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute)", + "Value": "CHAR(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 minute)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 minute)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' minute)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' minute)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' minute)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' minute)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' minute_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' minute_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' minute_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.310000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.690000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.300000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.700000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' minute_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 minute_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' minute_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' minute_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' minute_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 minute_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 minute_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 minute_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' minute_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:05.400000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:52.600000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 minute_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 minute_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.200000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.800000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' minute_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' minute_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 minute_second)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 minute_second)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' minute_second)", + "Value": "CHAR(\"2021-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' minute_second)", + "Value": "CHAR(\"2020-12-31 23:58:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' minute_second)", + "Value": "CHAR(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' minute_second)", + "Value": "CHAR(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' minute_second)", + "Value": "CHAR(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' minute_second)", + "Value": "CHAR(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 minute_second)", + "Value": "CHAR(\"2021-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 minute_second)", + "Value": "CHAR(\"2020-12-31 23:59:28\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 minute_second)", + "Value": "CHAR(\"2021-01-01 00:00:29\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 minute_second)", + "Value": "CHAR(\"2020-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' minute_second)", + "Value": "CHAR(\"2021-01-12 13:47:38\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' minute_second)", + "Value": "CHAR(\"2020-12-20 10:12:20\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 minute_second)", + "Value": "CHAR(\"2021-01-01 00:17:38\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 minute_second)", + "Value": "CHAR(\"2020-12-31 23:42:20\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' minute_second)", + "Value": "CHAR(\"2021-01-01 00:17:38\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' minute_second)", + "Value": "CHAR(\"2020-12-31 23:42:20\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' minute_second)", + "Value": "CHAR(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' minute_second)", + "Value": "CHAR(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' minute_second)", + "Value": "CHAR(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' minute_second)", + "Value": "CHAR(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 minute_second)", + "Value": "CHAR(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 minute_second)", + "Value": "CHAR(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 minute_second)", + "Value": "CHAR(\"2021-01-01 01:24:19\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 minute_second)", + "Value": "CHAR(\"2020-12-31 22:35:39\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 minute_second)", + "Value": "CHAR(\"2021-01-01 01:24:19\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 minute_second)", + "Value": "CHAR(\"2020-12-31 22:35:39\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' minute_second)", + "Value": "CHAR(\"2021-01-01 00:06:03\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' minute_second)", + "Value": "CHAR(\"2020-12-31 23:53:55\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 minute_second)", + "Value": "CHAR(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 minute_second)", + "Value": "CHAR(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 minute_second)", + "Value": "CHAR(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 minute_second)", + "Value": "CHAR(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_second)", + "Value": "CHAR(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_second)", + "Value": "CHAR(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_second)", + "Value": "CHAR(\"2021-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_second)", + "Value": "CHAR(\"2020-12-31 23:59:57\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 minute_second)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 minute_second)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' minute_second)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' minute_second)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' minute_second)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' minute_second)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 quarter)", + "Value": "CHAR(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 quarter)", + "Value": "CHAR(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' quarter)", + "Value": "CHAR(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' quarter)", + "Value": "CHAR(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' quarter)", + "Value": "CHAR(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' quarter)", + "Value": "CHAR(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' quarter)", + "Value": "CHAR(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' quarter)", + "Value": "CHAR(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' quarter)", + "Value": "CHAR(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' quarter)", + "Value": "CHAR(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 quarter)", + "Value": "CHAR(\"2028-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 quarter)", + "Value": "CHAR(\"2013-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 quarter)", + "Value": "CHAR(\"2028-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 quarter)", + "Value": "CHAR(\"2013-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' quarter)", + "Value": "CHAR(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' quarter)", + "Value": "CHAR(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 quarter)", + "Value": "CHAR(\"2021-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 quarter)", + "Value": "CHAR(\"2020-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' quarter)", + "Value": "CHAR(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' quarter)", + "Value": "CHAR(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' quarter)", + "Value": "CHAR(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' quarter)", + "Value": "CHAR(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' quarter)", + "Value": "CHAR(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' quarter)", + "Value": "CHAR(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' quarter)", + "Value": "CHAR(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' quarter)", + "Value": "CHAR(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' quarter)", + "Value": "CHAR(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' quarter)", + "Value": "CHAR(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 quarter)", + "Value": "CHAR(\"2021-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 quarter)", + "Value": "CHAR(\"2020-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 quarter)", + "Value": "CHAR(\"2021-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 quarter)", + "Value": "CHAR(\"2020-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 quarter)", + "Value": "CHAR(\"2021-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 quarter)", + "Value": "CHAR(\"2020-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' quarter)", + "Value": "CHAR(\"2022-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' quarter)", + "Value": "CHAR(\"2019-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 quarter)", + "Value": "CHAR(\"2021-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 quarter)", + "Value": "CHAR(\"2020-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 quarter)", + "Value": "CHAR(\"2021-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 quarter)", + "Value": "CHAR(\"2020-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) quarter)", + "Value": "CHAR(\"2021-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) quarter)", + "Value": "CHAR(\"2020-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) quarter)", + "Value": "CHAR(\"2021-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) quarter)", + "Value": "CHAR(\"2020-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 quarter)", + "Value": "CHAR(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 quarter)", + "Value": "CHAR(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' quarter)", + "Value": "CHAR(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' quarter)", + "Value": "CHAR(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' quarter)", + "Value": "CHAR(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' quarter)", + "Value": "CHAR(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 second)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 second)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' second)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' second)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' second)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' second)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' second)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' second)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' second)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' second)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 second)", + "Value": "CHAR(\"2021-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 second)", + "Value": "CHAR(\"2020-12-31 23:59:28\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 second)", + "Value": "CHAR(\"2021-01-01 00:00:29\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 second)", + "Value": "CHAR(\"2020-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' second)", + "Value": "CHAR(\"2021-01-01 00:00:00.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' second)", + "Value": "CHAR(\"2020-12-31 23:59:57.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 second)", + "Value": "CHAR(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 second)", + "Value": "CHAR(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' second)", + "Value": "CHAR(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' second)", + "Value": "CHAR(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' second)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' second)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' second)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' second)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' second)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' second)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' second)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' second)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 second)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 second)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 second)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 second)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 second)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 second)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' second)", + "Value": "CHAR(\"2021-01-01 00:00:05\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' second)", + "Value": "CHAR(\"2020-12-31 23:59:53\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 second)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 second)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 second)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 second)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second)", + "Value": "CHAR(\"2021-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second)", + "Value": "CHAR(\"2020-12-31 23:59:57\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 second)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 second)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' second)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' second)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' second)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' second)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' second_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' second_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' second_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.310000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.690000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.300000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.700000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' second_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 second_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' second_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' second_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' second_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 second_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 second_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 second_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' second_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:05.400000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:52.600000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 second_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 second_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.200000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.800000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' second_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' second_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 year_month)", + "Value": "CHAR(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 year_month)", + "Value": "CHAR(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' year_month)", + "Value": "CHAR(\"2022-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' year_month)", + "Value": "CHAR(\"2019-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' year_month)", + "Value": "CHAR(\"2019-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' year_month)", + "Value": "CHAR(\"2022-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' year_month)", + "Value": "CHAR(\"2022-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' year_month)", + "Value": "CHAR(\"2019-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 year_month)", + "Value": "CHAR(\"2023-07-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 year_month)", + "Value": "CHAR(\"2018-05-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 year_month)", + "Value": "CHAR(\"2023-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 year_month)", + "Value": "CHAR(\"2018-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 year_month)", + "Value": "CHAR(\"2105-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 year_month)", + "Value": "CHAR(\"1936-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' year_month)", + "Value": "CHAR(\"2105-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' year_month)", + "Value": "CHAR(\"1936-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' year_month)", + "Value": "CHAR(\"2019-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' year_month)", + "Value": "CHAR(\"2022-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' year_month)", + "Value": "CHAR(\"2022-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' year_month)", + "Value": "CHAR(\"2019-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 year_month)", + "Value": "CHAR(\"2022-05-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 year_month)", + "Value": "CHAR(\"2019-07-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 year_month)", + "Value": "CHAR(\"2438-08-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 year_month)", + "Value": "CHAR(\"1603-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 year_month)", + "Value": "CHAR(\"2438-08-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 year_month)", + "Value": "CHAR(\"1603-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' year_month)", + "Value": "CHAR(\"2027-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' year_month)", + "Value": "CHAR(\"2014-08-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 year_month)", + "Value": "CHAR(\"2022-05-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 year_month)", + "Value": "CHAR(\"2019-07-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 year_month)", + "Value": "CHAR(\"2022-05-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 year_month)", + "Value": "CHAR(\"2019-07-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year_month)", + "Value": "CHAR(\"2022-05-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year_month)", + "Value": "CHAR(\"2019-07-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year_month)", + "Value": "CHAR(\"2021-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year_month)", + "Value": "CHAR(\"2020-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 year_month)", + "Value": "CHAR(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 year_month)", + "Value": "CHAR(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' year_month)", + "Value": "CHAR(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' year_month)", + "Value": "CHAR(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' year_month)", + "Value": "CHAR(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' year_month)", + "Value": "CHAR(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 day)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 day)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' day)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' day)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' day)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' day)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' day)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' day)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' day)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' day)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 day)", + "Value": "CHAR(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 day)", + "Value": "CHAR(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 day)", + "Value": "CHAR(\"2025-01-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 day)", + "Value": "CHAR(\"2024-12-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' day)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' day)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 day)", + "Value": "CHAR(\"2025-01-03 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 day)", + "Value": "CHAR(\"2024-12-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' day)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' day)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' day)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' day)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' day)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' day)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' day)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' day)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' day)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' day)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 day)", + "Value": "CHAR(\"2025-01-03 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 day)", + "Value": "CHAR(\"2024-12-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 day)", + "Value": "CHAR(\"2025-01-03 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 day)", + "Value": "CHAR(\"2024-12-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 day)", + "Value": "CHAR(\"2025-01-03 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 day)", + "Value": "CHAR(\"2024-12-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' day)", + "Value": "CHAR(\"2025-01-07 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' day)", + "Value": "CHAR(\"2024-12-26 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 day)", + "Value": "CHAR(\"2025-01-03 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 day)", + "Value": "CHAR(\"2024-12-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 day)", + "Value": "CHAR(\"2025-01-03 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 day)", + "Value": "CHAR(\"2024-12-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day)", + "Value": "CHAR(\"2025-01-03 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day)", + "Value": "CHAR(\"2024-12-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day)", + "Value": "CHAR(\"2025-01-03 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day)", + "Value": "CHAR(\"2024-12-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 day)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 day)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' day)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' day)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' day)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' day)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 week)", + "Value": "CHAR(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 week)", + "Value": "CHAR(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' week)", + "Value": "CHAR(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' week)", + "Value": "CHAR(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' week)", + "Value": "CHAR(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' week)", + "Value": "CHAR(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' week)", + "Value": "CHAR(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' week)", + "Value": "CHAR(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' week)", + "Value": "CHAR(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' week)", + "Value": "CHAR(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 week)", + "Value": "CHAR(\"2025-08-06 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 week)", + "Value": "CHAR(\"2024-05-29 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 week)", + "Value": "CHAR(\"2025-07-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 week)", + "Value": "CHAR(\"2024-06-05 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' week)", + "Value": "CHAR(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' week)", + "Value": "CHAR(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 week)", + "Value": "CHAR(\"2025-01-15 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 week)", + "Value": "CHAR(\"2024-12-18 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' week)", + "Value": "CHAR(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' week)", + "Value": "CHAR(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' week)", + "Value": "CHAR(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' week)", + "Value": "CHAR(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' week)", + "Value": "CHAR(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' week)", + "Value": "CHAR(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' week)", + "Value": "CHAR(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' week)", + "Value": "CHAR(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' week)", + "Value": "CHAR(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' week)", + "Value": "CHAR(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 week)", + "Value": "CHAR(\"2025-01-15 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 week)", + "Value": "CHAR(\"2024-12-18 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 week)", + "Value": "CHAR(\"2025-01-15 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 week)", + "Value": "CHAR(\"2024-12-18 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 week)", + "Value": "CHAR(\"2025-01-15 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 week)", + "Value": "CHAR(\"2024-12-18 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' week)", + "Value": "CHAR(\"2025-02-12 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' week)", + "Value": "CHAR(\"2024-11-20 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 week)", + "Value": "CHAR(\"2025-01-15 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 week)", + "Value": "CHAR(\"2024-12-18 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 week)", + "Value": "CHAR(\"2025-01-15 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 week)", + "Value": "CHAR(\"2024-12-18 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) week)", + "Value": "CHAR(\"2025-01-15 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) week)", + "Value": "CHAR(\"2024-12-18 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) week)", + "Value": "CHAR(\"2025-01-15 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) week)", + "Value": "CHAR(\"2024-12-18 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 week)", + "Value": "CHAR(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 week)", + "Value": "CHAR(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' week)", + "Value": "CHAR(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' week)", + "Value": "CHAR(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' week)", + "Value": "CHAR(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' week)", + "Value": "CHAR(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 month)", + "Value": "CHAR(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 month)", + "Value": "CHAR(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' month)", + "Value": "CHAR(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' month)", + "Value": "CHAR(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' month)", + "Value": "CHAR(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' month)", + "Value": "CHAR(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' month)", + "Value": "CHAR(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' month)", + "Value": "CHAR(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' month)", + "Value": "CHAR(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' month)", + "Value": "CHAR(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 month)", + "Value": "CHAR(\"2027-08-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 month)", + "Value": "CHAR(\"2022-06-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 month)", + "Value": "CHAR(\"2027-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 month)", + "Value": "CHAR(\"2022-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' month)", + "Value": "CHAR(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' month)", + "Value": "CHAR(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 month)", + "Value": "CHAR(\"2025-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 month)", + "Value": "CHAR(\"2024-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' month)", + "Value": "CHAR(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' month)", + "Value": "CHAR(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' month)", + "Value": "CHAR(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' month)", + "Value": "CHAR(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' month)", + "Value": "CHAR(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' month)", + "Value": "CHAR(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' month)", + "Value": "CHAR(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' month)", + "Value": "CHAR(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' month)", + "Value": "CHAR(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' month)", + "Value": "CHAR(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 month)", + "Value": "CHAR(\"2025-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 month)", + "Value": "CHAR(\"2024-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 month)", + "Value": "CHAR(\"2025-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 month)", + "Value": "CHAR(\"2024-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 month)", + "Value": "CHAR(\"2025-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 month)", + "Value": "CHAR(\"2024-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' month)", + "Value": "CHAR(\"2025-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' month)", + "Value": "CHAR(\"2024-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 month)", + "Value": "CHAR(\"2025-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 month)", + "Value": "CHAR(\"2024-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 month)", + "Value": "CHAR(\"2025-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 month)", + "Value": "CHAR(\"2024-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) month)", + "Value": "CHAR(\"2025-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) month)", + "Value": "CHAR(\"2024-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) month)", + "Value": "CHAR(\"2025-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) month)", + "Value": "CHAR(\"2024-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 month)", + "Value": "CHAR(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 month)", + "Value": "CHAR(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' month)", + "Value": "CHAR(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' month)", + "Value": "CHAR(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' month)", + "Value": "CHAR(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' month)", + "Value": "CHAR(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 year)", + "Value": "CHAR(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 year)", + "Value": "CHAR(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' year)", + "Value": "CHAR(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' year)", + "Value": "CHAR(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' year)", + "Value": "CHAR(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' year)", + "Value": "CHAR(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' year)", + "Value": "CHAR(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' year)", + "Value": "CHAR(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' year)", + "Value": "CHAR(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' year)", + "Value": "CHAR(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 year)", + "Value": "CHAR(\"2056-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 year)", + "Value": "CHAR(\"1994-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 year)", + "Value": "CHAR(\"2055-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 year)", + "Value": "CHAR(\"1995-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' year)", + "Value": "CHAR(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' year)", + "Value": "CHAR(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 year)", + "Value": "CHAR(\"2027-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 year)", + "Value": "CHAR(\"2023-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' year)", + "Value": "CHAR(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' year)", + "Value": "CHAR(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' year)", + "Value": "CHAR(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' year)", + "Value": "CHAR(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' year)", + "Value": "CHAR(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' year)", + "Value": "CHAR(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' year)", + "Value": "CHAR(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' year)", + "Value": "CHAR(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' year)", + "Value": "CHAR(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' year)", + "Value": "CHAR(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 year)", + "Value": "CHAR(\"2027-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 year)", + "Value": "CHAR(\"2023-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 year)", + "Value": "CHAR(\"2027-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 year)", + "Value": "CHAR(\"2023-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 year)", + "Value": "CHAR(\"2027-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 year)", + "Value": "CHAR(\"2023-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' year)", + "Value": "CHAR(\"2031-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' year)", + "Value": "CHAR(\"2019-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 year)", + "Value": "CHAR(\"2027-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 year)", + "Value": "CHAR(\"2023-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 year)", + "Value": "CHAR(\"2027-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 year)", + "Value": "CHAR(\"2023-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year)", + "Value": "CHAR(\"2027-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year)", + "Value": "CHAR(\"2023-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year)", + "Value": "CHAR(\"2027-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year)", + "Value": "CHAR(\"2023-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 year)", + "Value": "CHAR(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 year)", + "Value": "CHAR(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' year)", + "Value": "CHAR(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' year)", + "Value": "CHAR(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' year)", + "Value": "CHAR(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' year)", + "Value": "CHAR(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 day_hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 day_hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' day_hour)", + "Value": "CHAR(\"2025-01-02 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' day_hour)", + "Value": "CHAR(\"2024-12-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' day_hour)", + "Value": "CHAR(\"2024-12-30 14:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' day_hour)", + "Value": "CHAR(\"2025-01-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' day_hour)", + "Value": "CHAR(\"2025-01-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' day_hour)", + "Value": "CHAR(\"2024-12-30 14:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 day_hour)", + "Value": "CHAR(\"2025-01-02 07:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 day_hour)", + "Value": "CHAR(\"2024-12-30 17:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 day_hour)", + "Value": "CHAR(\"2025-01-02 06:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 day_hour)", + "Value": "CHAR(\"2024-12-30 18:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' day_hour)", + "Value": "CHAR(\"2139-01-31 15:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' day_hour)", + "Value": "CHAR(\"1910-12-03 09:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 day_hour)", + "Value": "CHAR(\"2025-02-12 15:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 day_hour)", + "Value": "CHAR(\"2024-11-19 09:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' day_hour)", + "Value": "CHAR(\"2025-02-12 15:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' day_hour)", + "Value": "CHAR(\"2024-11-19 09:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' day_hour)", + "Value": "CHAR(\"2024-12-30 14:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' day_hour)", + "Value": "CHAR(\"2025-01-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' day_hour)", + "Value": "CHAR(\"2025-01-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' day_hour)", + "Value": "CHAR(\"2024-12-30 14:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 day_hour)", + "Value": "CHAR(\"2025-01-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 day_hour)", + "Value": "CHAR(\"2024-12-30 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 day_hour)", + "Value": "CHAR(\"2025-07-29 08:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 day_hour)", + "Value": "CHAR(\"2024-06-05 16:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 day_hour)", + "Value": "CHAR(\"2025-07-29 08:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 day_hour)", + "Value": "CHAR(\"2024-06-05 16:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' day_hour)", + "Value": "CHAR(\"2025-01-07 04:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' day_hour)", + "Value": "CHAR(\"2024-12-25 20:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 day_hour)", + "Value": "CHAR(\"2025-01-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 day_hour)", + "Value": "CHAR(\"2024-12-30 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 day_hour)", + "Value": "CHAR(\"2025-01-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 day_hour)", + "Value": "CHAR(\"2024-12-30 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_hour)", + "Value": "CHAR(\"2025-01-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_hour)", + "Value": "CHAR(\"2024-12-30 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 day_hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 day_hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' day_hour)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' day_hour)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' day_hour)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' day_hour)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' day_microsecond)", + "Value": "CHAR(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' day_microsecond)", + "Value": "CHAR(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' day_microsecond)", + "Value": "CHAR(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' day_microsecond)", + "Value": "CHAR(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' day_microsecond)", + "Value": "CHAR(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' day_microsecond)", + "Value": "CHAR(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 day_minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 day_minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' day_minute)", + "Value": "CHAR(\"2025-01-01 01:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' day_minute)", + "Value": "CHAR(\"2024-12-31 22:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' day_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' day_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' day_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' day_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 day_minute)", + "Value": "CHAR(\"2025-01-01 00:31:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 day_minute)", + "Value": "CHAR(\"2024-12-31 23:29:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 day_minute)", + "Value": "CHAR(\"2025-01-01 00:30:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 day_minute)", + "Value": "CHAR(\"2024-12-31 23:30:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' day_minute)", + "Value": "CHAR(\"2026-11-26 11:39:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' day_minute)", + "Value": "CHAR(\"2023-02-06 12:21:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 day_minute)", + "Value": "CHAR(\"2025-01-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 day_minute)", + "Value": "CHAR(\"2024-12-31 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' day_minute)", + "Value": "CHAR(\"2025-01-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' day_minute)", + "Value": "CHAR(\"2024-12-31 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' day_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' day_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' day_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' day_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 day_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 day_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 day_minute)", + "Value": "CHAR(\"2025-01-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 day_minute)", + "Value": "CHAR(\"2024-12-28 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 day_minute)", + "Value": "CHAR(\"2025-01-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 day_minute)", + "Value": "CHAR(\"2024-12-28 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' day_minute)", + "Value": "CHAR(\"2025-01-01 06:04:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' day_minute)", + "Value": "CHAR(\"2024-12-31 17:56:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 day_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 day_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 day_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 day_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 day_minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 day_minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' day_minute)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' day_minute)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' day_minute)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' day_minute)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 day_second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 day_second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' day_second)", + "Value": "CHAR(\"2024-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' day_second)", + "Value": "CHAR(\"2025-01-02 01:01:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' day_second)", + "Value": "CHAR(\"2024-12-30 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' day_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' day_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 day_second)", + "Value": "CHAR(\"2025-01-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 day_second)", + "Value": "CHAR(\"2024-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 day_second)", + "Value": "CHAR(\"2025-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 day_second)", + "Value": "CHAR(\"2024-12-31 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' day_second)", + "Value": "CHAR(\"2025-01-12 13:47:39\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' day_second)", + "Value": "CHAR(\"2024-12-20 10:12:21\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 day_second)", + "Value": "CHAR(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 day_second)", + "Value": "CHAR(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' day_second)", + "Value": "CHAR(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' day_second)", + "Value": "CHAR(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' day_second)", + "Value": "CHAR(\"2025-01-02 01:01:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' day_second)", + "Value": "CHAR(\"2024-12-30 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' day_second)", + "Value": "CHAR(\"2025-01-02 01:01:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' day_second)", + "Value": "CHAR(\"2024-12-30 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' day_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' day_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 day_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 day_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 day_second)", + "Value": "CHAR(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 day_second)", + "Value": "CHAR(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 day_second)", + "Value": "CHAR(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 day_second)", + "Value": "CHAR(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' day_second)", + "Value": "CHAR(\"2025-01-01 00:06:04\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' day_second)", + "Value": "CHAR(\"2024-12-31 23:53:56\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 day_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 day_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 day_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 day_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_second)", + "Value": "CHAR(\"2025-01-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_second)", + "Value": "CHAR(\"2024-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 day_second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 day_second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' day_second)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' day_second)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 hour)", + "Value": "CHAR(\"2025-01-02 07:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 hour)", + "Value": "CHAR(\"2024-12-30 17:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 hour)", + "Value": "CHAR(\"2025-01-02 06:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 hour)", + "Value": "CHAR(\"2024-12-30 18:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' hour)", + "Value": "CHAR(\"2025-01-01 06:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' hour)", + "Value": "CHAR(\"2024-12-31 18:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 hour_minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 hour_minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' hour_minute)", + "Value": "CHAR(\"2024-12-31 22:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' hour_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' hour_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 hour_minute)", + "Value": "CHAR(\"2025-01-01 00:31:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 hour_minute)", + "Value": "CHAR(\"2024-12-31 23:29:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 hour_minute)", + "Value": "CHAR(\"2025-01-01 00:30:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 hour_minute)", + "Value": "CHAR(\"2024-12-31 23:30:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' hour_minute)", + "Value": "CHAR(\"2026-11-26 11:39:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' hour_minute)", + "Value": "CHAR(\"2023-02-06 12:21:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 hour_minute)", + "Value": "CHAR(\"2025-01-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 hour_minute)", + "Value": "CHAR(\"2024-12-31 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' hour_minute)", + "Value": "CHAR(\"2025-01-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' hour_minute)", + "Value": "CHAR(\"2024-12-31 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' hour_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' hour_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 hour_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 hour_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 hour_minute)", + "Value": "CHAR(\"2025-01-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 hour_minute)", + "Value": "CHAR(\"2024-12-28 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 hour_minute)", + "Value": "CHAR(\"2025-01-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 hour_minute)", + "Value": "CHAR(\"2024-12-28 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' hour_minute)", + "Value": "CHAR(\"2025-01-01 06:04:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' hour_minute)", + "Value": "CHAR(\"2024-12-31 17:56:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 hour_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 hour_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 hour_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 hour_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 hour_minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 hour_minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' hour_minute)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' hour_minute)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 hour_second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 hour_second)", + "Value": "CHAR(\"2025-01-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 hour_second)", + "Value": "CHAR(\"2025-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' hour_second)", + "Value": "CHAR(\"2025-01-12 13:47:39\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' hour_second)", + "Value": "CHAR(\"2024-12-20 10:12:21\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 hour_second)", + "Value": "CHAR(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 hour_second)", + "Value": "CHAR(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' hour_second)", + "Value": "CHAR(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' hour_second)", + "Value": "CHAR(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 hour_second)", + "Value": "CHAR(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 hour_second)", + "Value": "CHAR(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 hour_second)", + "Value": "CHAR(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 hour_second)", + "Value": "CHAR(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' hour_second)", + "Value": "CHAR(\"2025-01-01 00:06:04\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' hour_second)", + "Value": "CHAR(\"2024-12-31 23:53:56\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_second)", + "Value": "CHAR(\"2025-01-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 hour_second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000031\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999969\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000030\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999970\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000006\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999994\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 minute)", + "Value": "CHAR(\"2025-01-01 00:31:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 minute)", + "Value": "CHAR(\"2024-12-31 23:29:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 minute)", + "Value": "CHAR(\"2025-01-01 00:30:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 minute)", + "Value": "CHAR(\"2024-12-31 23:30:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' minute)", + "Value": "CHAR(\"2025-01-01 00:06:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' minute)", + "Value": "CHAR(\"2024-12-31 23:54:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 minute_second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 minute_second)", + "Value": "CHAR(\"2025-01-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 minute_second)", + "Value": "CHAR(\"2025-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' minute_second)", + "Value": "CHAR(\"2025-01-12 13:47:39\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' minute_second)", + "Value": "CHAR(\"2024-12-20 10:12:21\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 minute_second)", + "Value": "CHAR(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 minute_second)", + "Value": "CHAR(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' minute_second)", + "Value": "CHAR(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' minute_second)", + "Value": "CHAR(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 minute_second)", + "Value": "CHAR(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 minute_second)", + "Value": "CHAR(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 minute_second)", + "Value": "CHAR(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 minute_second)", + "Value": "CHAR(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' minute_second)", + "Value": "CHAR(\"2025-01-01 00:06:04\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' minute_second)", + "Value": "CHAR(\"2024-12-31 23:53:56\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_second)", + "Value": "CHAR(\"2025-01-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 minute_second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 quarter)", + "Value": "CHAR(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 quarter)", + "Value": "CHAR(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' quarter)", + "Value": "CHAR(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' quarter)", + "Value": "CHAR(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' quarter)", + "Value": "CHAR(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' quarter)", + "Value": "CHAR(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' quarter)", + "Value": "CHAR(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' quarter)", + "Value": "CHAR(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' quarter)", + "Value": "CHAR(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' quarter)", + "Value": "CHAR(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 quarter)", + "Value": "CHAR(\"2032-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 quarter)", + "Value": "CHAR(\"2017-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 quarter)", + "Value": "CHAR(\"2032-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 quarter)", + "Value": "CHAR(\"2017-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' quarter)", + "Value": "CHAR(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' quarter)", + "Value": "CHAR(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 quarter)", + "Value": "CHAR(\"2025-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 quarter)", + "Value": "CHAR(\"2024-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' quarter)", + "Value": "CHAR(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' quarter)", + "Value": "CHAR(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' quarter)", + "Value": "CHAR(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' quarter)", + "Value": "CHAR(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' quarter)", + "Value": "CHAR(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' quarter)", + "Value": "CHAR(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' quarter)", + "Value": "CHAR(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' quarter)", + "Value": "CHAR(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' quarter)", + "Value": "CHAR(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' quarter)", + "Value": "CHAR(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 quarter)", + "Value": "CHAR(\"2025-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 quarter)", + "Value": "CHAR(\"2024-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 quarter)", + "Value": "CHAR(\"2025-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 quarter)", + "Value": "CHAR(\"2024-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 quarter)", + "Value": "CHAR(\"2025-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 quarter)", + "Value": "CHAR(\"2024-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' quarter)", + "Value": "CHAR(\"2026-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' quarter)", + "Value": "CHAR(\"2023-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 quarter)", + "Value": "CHAR(\"2025-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 quarter)", + "Value": "CHAR(\"2024-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 quarter)", + "Value": "CHAR(\"2025-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 quarter)", + "Value": "CHAR(\"2024-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) quarter)", + "Value": "CHAR(\"2025-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) quarter)", + "Value": "CHAR(\"2024-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) quarter)", + "Value": "CHAR(\"2025-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) quarter)", + "Value": "CHAR(\"2024-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 quarter)", + "Value": "CHAR(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 quarter)", + "Value": "CHAR(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' quarter)", + "Value": "CHAR(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' quarter)", + "Value": "CHAR(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' quarter)", + "Value": "CHAR(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' quarter)", + "Value": "CHAR(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 second)", + "Value": "CHAR(\"2025-01-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 second)", + "Value": "CHAR(\"2024-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 second)", + "Value": "CHAR(\"2025-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 second)", + "Value": "CHAR(\"2024-12-31 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' second)", + "Value": "CHAR(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' second)", + "Value": "CHAR(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 second)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 second)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' second)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' second)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 second)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 second)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 second)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 second)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 second)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 second)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' second)", + "Value": "CHAR(\"2025-01-01 00:00:06\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' second)", + "Value": "CHAR(\"2024-12-31 23:59:54\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 second)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 second)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 second)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 second)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second)", + "Value": "CHAR(\"2025-01-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second)", + "Value": "CHAR(\"2024-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 year_month)", + "Value": "CHAR(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 year_month)", + "Value": "CHAR(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' year_month)", + "Value": "CHAR(\"2026-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' year_month)", + "Value": "CHAR(\"2023-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' year_month)", + "Value": "CHAR(\"2023-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' year_month)", + "Value": "CHAR(\"2026-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' year_month)", + "Value": "CHAR(\"2026-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' year_month)", + "Value": "CHAR(\"2023-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 year_month)", + "Value": "CHAR(\"2027-08-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 year_month)", + "Value": "CHAR(\"2022-06-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 year_month)", + "Value": "CHAR(\"2027-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 year_month)", + "Value": "CHAR(\"2022-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 year_month)", + "Value": "CHAR(\"2109-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 year_month)", + "Value": "CHAR(\"1940-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' year_month)", + "Value": "CHAR(\"2109-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' year_month)", + "Value": "CHAR(\"1940-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' year_month)", + "Value": "CHAR(\"2023-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' year_month)", + "Value": "CHAR(\"2026-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' year_month)", + "Value": "CHAR(\"2026-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' year_month)", + "Value": "CHAR(\"2023-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 year_month)", + "Value": "CHAR(\"2026-06-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 year_month)", + "Value": "CHAR(\"2023-08-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 year_month)", + "Value": "CHAR(\"2442-09-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 year_month)", + "Value": "CHAR(\"1607-05-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 year_month)", + "Value": "CHAR(\"2442-09-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 year_month)", + "Value": "CHAR(\"1607-05-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' year_month)", + "Value": "CHAR(\"2031-05-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' year_month)", + "Value": "CHAR(\"2018-09-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 year_month)", + "Value": "CHAR(\"2026-06-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 year_month)", + "Value": "CHAR(\"2023-08-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 year_month)", + "Value": "CHAR(\"2026-06-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 year_month)", + "Value": "CHAR(\"2023-08-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year_month)", + "Value": "CHAR(\"2026-06-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year_month)", + "Value": "CHAR(\"2023-08-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year_month)", + "Value": "CHAR(\"2025-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year_month)", + "Value": "CHAR(\"2024-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 year_month)", + "Value": "CHAR(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 year_month)", + "Value": "CHAR(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' year_month)", + "Value": "CHAR(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' year_month)", + "Value": "CHAR(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' year_month)", + "Value": "CHAR(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' year_month)", + "Value": "CHAR(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 day)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 day)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 day)", + "Value": "CHAR(\"2025-01-31\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 day)", + "Value": "CHAR(\"2024-12-02\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 day)", + "Value": "CHAR(\"2025-01-03\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 day)", + "Value": "CHAR(\"2024-12-30\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 day)", + "Value": "CHAR(\"2025-01-03\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 day)", + "Value": "CHAR(\"2024-12-30\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 day)", + "Value": "CHAR(\"2025-01-03\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 day)", + "Value": "CHAR(\"2024-12-30\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 day)", + "Value": "CHAR(\"2025-01-03\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 day)", + "Value": "CHAR(\"2024-12-30\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' day)", + "Value": "CHAR(\"2025-01-07\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' day)", + "Value": "CHAR(\"2024-12-26\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 day)", + "Value": "CHAR(\"2025-01-03\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 day)", + "Value": "CHAR(\"2024-12-30\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 day)", + "Value": "CHAR(\"2025-01-03\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 day)", + "Value": "CHAR(\"2024-12-30\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) day)", + "Value": "CHAR(\"2025-01-03\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) day)", + "Value": "CHAR(\"2024-12-30\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) day)", + "Value": "CHAR(\"2025-01-03\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) day)", + "Value": "CHAR(\"2024-12-30\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 week)", + "Value": "CHAR(\"2025-08-06\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 week)", + "Value": "CHAR(\"2024-05-29\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 week)", + "Value": "CHAR(\"2025-07-30\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 week)", + "Value": "CHAR(\"2024-06-05\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 week)", + "Value": "CHAR(\"2025-01-15\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 week)", + "Value": "CHAR(\"2024-12-18\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 week)", + "Value": "CHAR(\"2025-01-15\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 week)", + "Value": "CHAR(\"2024-12-18\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 week)", + "Value": "CHAR(\"2025-01-15\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 week)", + "Value": "CHAR(\"2024-12-18\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 week)", + "Value": "CHAR(\"2025-01-15\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 week)", + "Value": "CHAR(\"2024-12-18\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' week)", + "Value": "CHAR(\"2025-02-12\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' week)", + "Value": "CHAR(\"2024-11-20\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 week)", + "Value": "CHAR(\"2025-01-15\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 week)", + "Value": "CHAR(\"2024-12-18\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 week)", + "Value": "CHAR(\"2025-01-15\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 week)", + "Value": "CHAR(\"2024-12-18\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) week)", + "Value": "CHAR(\"2025-01-15\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) week)", + "Value": "CHAR(\"2024-12-18\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) week)", + "Value": "CHAR(\"2025-01-15\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) week)", + "Value": "CHAR(\"2024-12-18\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 month)", + "Value": "CHAR(\"2027-08-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 month)", + "Value": "CHAR(\"2022-06-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 month)", + "Value": "CHAR(\"2027-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 month)", + "Value": "CHAR(\"2022-07-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 month)", + "Value": "CHAR(\"2025-03-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 month)", + "Value": "CHAR(\"2024-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 month)", + "Value": "CHAR(\"2025-03-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 month)", + "Value": "CHAR(\"2024-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 month)", + "Value": "CHAR(\"2025-03-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 month)", + "Value": "CHAR(\"2024-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 month)", + "Value": "CHAR(\"2025-03-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 month)", + "Value": "CHAR(\"2024-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' month)", + "Value": "CHAR(\"2025-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' month)", + "Value": "CHAR(\"2024-07-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 month)", + "Value": "CHAR(\"2025-03-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 month)", + "Value": "CHAR(\"2024-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 month)", + "Value": "CHAR(\"2025-03-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 month)", + "Value": "CHAR(\"2024-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) month)", + "Value": "CHAR(\"2025-03-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) month)", + "Value": "CHAR(\"2024-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) month)", + "Value": "CHAR(\"2025-03-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) month)", + "Value": "CHAR(\"2024-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 year)", + "Value": "CHAR(\"2056-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 year)", + "Value": "CHAR(\"1994-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 year)", + "Value": "CHAR(\"2055-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 year)", + "Value": "CHAR(\"1995-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 year)", + "Value": "CHAR(\"2027-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 year)", + "Value": "CHAR(\"2023-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 year)", + "Value": "CHAR(\"2027-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 year)", + "Value": "CHAR(\"2023-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 year)", + "Value": "CHAR(\"2027-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 year)", + "Value": "CHAR(\"2023-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 year)", + "Value": "CHAR(\"2027-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 year)", + "Value": "CHAR(\"2023-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' year)", + "Value": "CHAR(\"2031-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' year)", + "Value": "CHAR(\"2019-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 year)", + "Value": "CHAR(\"2027-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 year)", + "Value": "CHAR(\"2023-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 year)", + "Value": "CHAR(\"2027-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 year)", + "Value": "CHAR(\"2023-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) year)", + "Value": "CHAR(\"2027-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) year)", + "Value": "CHAR(\"2023-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) year)", + "Value": "CHAR(\"2027-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) year)", + "Value": "CHAR(\"2023-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 day_hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 day_hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' day_hour)", + "Value": "CHAR(\"2025-01-02 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' day_hour)", + "Value": "CHAR(\"2024-12-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' day_hour)", + "Value": "CHAR(\"2024-12-30 14:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' day_hour)", + "Value": "CHAR(\"2025-01-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' day_hour)", + "Value": "CHAR(\"2025-01-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' day_hour)", + "Value": "CHAR(\"2024-12-30 14:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 day_hour)", + "Value": "CHAR(\"2025-01-02 07:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 day_hour)", + "Value": "CHAR(\"2024-12-30 17:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 day_hour)", + "Value": "CHAR(\"2025-01-02 06:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 day_hour)", + "Value": "CHAR(\"2024-12-30 18:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' day_hour)", + "Value": "CHAR(\"2139-01-31 15:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' day_hour)", + "Value": "CHAR(\"1910-12-03 09:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 day_hour)", + "Value": "CHAR(\"2025-02-12 15:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 day_hour)", + "Value": "CHAR(\"2024-11-19 09:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' day_hour)", + "Value": "CHAR(\"2025-02-12 15:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' day_hour)", + "Value": "CHAR(\"2024-11-19 09:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' day_hour)", + "Value": "CHAR(\"2024-12-30 14:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' day_hour)", + "Value": "CHAR(\"2025-01-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' day_hour)", + "Value": "CHAR(\"2025-01-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' day_hour)", + "Value": "CHAR(\"2024-12-30 14:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 day_hour)", + "Value": "CHAR(\"2025-01-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 day_hour)", + "Value": "CHAR(\"2024-12-30 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 day_hour)", + "Value": "CHAR(\"2025-07-29 08:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 day_hour)", + "Value": "CHAR(\"2024-06-05 16:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 day_hour)", + "Value": "CHAR(\"2025-07-29 08:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 day_hour)", + "Value": "CHAR(\"2024-06-05 16:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' day_hour)", + "Value": "CHAR(\"2025-01-07 04:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' day_hour)", + "Value": "CHAR(\"2024-12-25 20:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 day_hour)", + "Value": "CHAR(\"2025-01-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 day_hour)", + "Value": "CHAR(\"2024-12-30 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 day_hour)", + "Value": "CHAR(\"2025-01-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 day_hour)", + "Value": "CHAR(\"2024-12-30 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_hour)", + "Value": "CHAR(\"2025-01-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_hour)", + "Value": "CHAR(\"2024-12-30 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 day_hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 day_hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' day_hour)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' day_hour)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' day_hour)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' day_hour)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' day_microsecond)", + "Value": "CHAR(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' day_microsecond)", + "Value": "CHAR(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' day_microsecond)", + "Value": "CHAR(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' day_microsecond)", + "Value": "CHAR(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' day_microsecond)", + "Value": "CHAR(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' day_microsecond)", + "Value": "CHAR(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 day_minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 day_minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' day_minute)", + "Value": "CHAR(\"2025-01-01 01:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' day_minute)", + "Value": "CHAR(\"2024-12-31 22:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' day_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' day_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' day_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' day_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 day_minute)", + "Value": "CHAR(\"2025-01-01 00:31:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 day_minute)", + "Value": "CHAR(\"2024-12-31 23:29:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 day_minute)", + "Value": "CHAR(\"2025-01-01 00:30:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 day_minute)", + "Value": "CHAR(\"2024-12-31 23:30:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' day_minute)", + "Value": "CHAR(\"2026-11-26 11:39:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' day_minute)", + "Value": "CHAR(\"2023-02-06 12:21:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 day_minute)", + "Value": "CHAR(\"2025-01-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 day_minute)", + "Value": "CHAR(\"2024-12-31 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' day_minute)", + "Value": "CHAR(\"2025-01-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' day_minute)", + "Value": "CHAR(\"2024-12-31 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' day_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' day_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' day_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' day_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 day_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 day_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 day_minute)", + "Value": "CHAR(\"2025-01-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 day_minute)", + "Value": "CHAR(\"2024-12-28 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 day_minute)", + "Value": "CHAR(\"2025-01-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 day_minute)", + "Value": "CHAR(\"2024-12-28 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' day_minute)", + "Value": "CHAR(\"2025-01-01 06:04:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' day_minute)", + "Value": "CHAR(\"2024-12-31 17:56:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 day_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 day_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 day_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 day_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 day_minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 day_minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' day_minute)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' day_minute)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' day_minute)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' day_minute)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 day_second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 day_second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' day_second)", + "Value": "CHAR(\"2024-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' day_second)", + "Value": "CHAR(\"2025-01-02 01:01:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' day_second)", + "Value": "CHAR(\"2024-12-30 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' day_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' day_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 day_second)", + "Value": "CHAR(\"2025-01-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 day_second)", + "Value": "CHAR(\"2024-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 day_second)", + "Value": "CHAR(\"2025-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 day_second)", + "Value": "CHAR(\"2024-12-31 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' day_second)", + "Value": "CHAR(\"2025-01-12 13:47:39\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' day_second)", + "Value": "CHAR(\"2024-12-20 10:12:21\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 day_second)", + "Value": "CHAR(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 day_second)", + "Value": "CHAR(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' day_second)", + "Value": "CHAR(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' day_second)", + "Value": "CHAR(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' day_second)", + "Value": "CHAR(\"2025-01-02 01:01:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' day_second)", + "Value": "CHAR(\"2024-12-30 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' day_second)", + "Value": "CHAR(\"2025-01-02 01:01:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' day_second)", + "Value": "CHAR(\"2024-12-30 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' day_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' day_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 day_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 day_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 day_second)", + "Value": "CHAR(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 day_second)", + "Value": "CHAR(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 day_second)", + "Value": "CHAR(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 day_second)", + "Value": "CHAR(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' day_second)", + "Value": "CHAR(\"2025-01-01 00:06:04\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' day_second)", + "Value": "CHAR(\"2024-12-31 23:53:56\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 day_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 day_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 day_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 day_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_second)", + "Value": "CHAR(\"2025-01-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_second)", + "Value": "CHAR(\"2024-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 day_second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 day_second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' day_second)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' day_second)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 hour)", + "Value": "CHAR(\"2025-01-02 07:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 hour)", + "Value": "CHAR(\"2024-12-30 17:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 hour)", + "Value": "CHAR(\"2025-01-02 06:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 hour)", + "Value": "CHAR(\"2024-12-30 18:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' hour)", + "Value": "CHAR(\"2025-01-01 06:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' hour)", + "Value": "CHAR(\"2024-12-31 18:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 hour_minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 hour_minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' hour_minute)", + "Value": "CHAR(\"2024-12-31 22:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' hour_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' hour_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 hour_minute)", + "Value": "CHAR(\"2025-01-01 00:31:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 hour_minute)", + "Value": "CHAR(\"2024-12-31 23:29:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 hour_minute)", + "Value": "CHAR(\"2025-01-01 00:30:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 hour_minute)", + "Value": "CHAR(\"2024-12-31 23:30:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' hour_minute)", + "Value": "CHAR(\"2026-11-26 11:39:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' hour_minute)", + "Value": "CHAR(\"2023-02-06 12:21:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 hour_minute)", + "Value": "CHAR(\"2025-01-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 hour_minute)", + "Value": "CHAR(\"2024-12-31 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' hour_minute)", + "Value": "CHAR(\"2025-01-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' hour_minute)", + "Value": "CHAR(\"2024-12-31 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' hour_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' hour_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 hour_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 hour_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 hour_minute)", + "Value": "CHAR(\"2025-01-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 hour_minute)", + "Value": "CHAR(\"2024-12-28 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 hour_minute)", + "Value": "CHAR(\"2025-01-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 hour_minute)", + "Value": "CHAR(\"2024-12-28 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' hour_minute)", + "Value": "CHAR(\"2025-01-01 06:04:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' hour_minute)", + "Value": "CHAR(\"2024-12-31 17:56:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 hour_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 hour_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 hour_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 hour_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 hour_minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 hour_minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' hour_minute)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' hour_minute)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 hour_second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 hour_second)", + "Value": "CHAR(\"2025-01-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 hour_second)", + "Value": "CHAR(\"2025-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' hour_second)", + "Value": "CHAR(\"2025-01-12 13:47:39\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' hour_second)", + "Value": "CHAR(\"2024-12-20 10:12:21\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 hour_second)", + "Value": "CHAR(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 hour_second)", + "Value": "CHAR(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' hour_second)", + "Value": "CHAR(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' hour_second)", + "Value": "CHAR(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 hour_second)", + "Value": "CHAR(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 hour_second)", + "Value": "CHAR(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 hour_second)", + "Value": "CHAR(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 hour_second)", + "Value": "CHAR(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' hour_second)", + "Value": "CHAR(\"2025-01-01 00:06:04\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' hour_second)", + "Value": "CHAR(\"2024-12-31 23:53:56\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_second)", + "Value": "CHAR(\"2025-01-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 hour_second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000031\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999969\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000030\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999970\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000006\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999994\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 minute)", + "Value": "CHAR(\"2025-01-01 00:31:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 minute)", + "Value": "CHAR(\"2024-12-31 23:29:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 minute)", + "Value": "CHAR(\"2025-01-01 00:30:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 minute)", + "Value": "CHAR(\"2024-12-31 23:30:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' minute)", + "Value": "CHAR(\"2025-01-01 00:06:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' minute)", + "Value": "CHAR(\"2024-12-31 23:54:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 minute_second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 minute_second)", + "Value": "CHAR(\"2025-01-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 minute_second)", + "Value": "CHAR(\"2025-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' minute_second)", + "Value": "CHAR(\"2025-01-12 13:47:39\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' minute_second)", + "Value": "CHAR(\"2024-12-20 10:12:21\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 minute_second)", + "Value": "CHAR(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 minute_second)", + "Value": "CHAR(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' minute_second)", + "Value": "CHAR(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' minute_second)", + "Value": "CHAR(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 minute_second)", + "Value": "CHAR(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 minute_second)", + "Value": "CHAR(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 minute_second)", + "Value": "CHAR(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 minute_second)", + "Value": "CHAR(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' minute_second)", + "Value": "CHAR(\"2025-01-01 00:06:04\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' minute_second)", + "Value": "CHAR(\"2024-12-31 23:53:56\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_second)", + "Value": "CHAR(\"2025-01-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 minute_second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 quarter)", + "Value": "CHAR(\"2032-10-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 quarter)", + "Value": "CHAR(\"2017-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 quarter)", + "Value": "CHAR(\"2032-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 quarter)", + "Value": "CHAR(\"2017-07-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 quarter)", + "Value": "CHAR(\"2025-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 quarter)", + "Value": "CHAR(\"2024-07-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 quarter)", + "Value": "CHAR(\"2025-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 quarter)", + "Value": "CHAR(\"2024-07-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 quarter)", + "Value": "CHAR(\"2025-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 quarter)", + "Value": "CHAR(\"2024-07-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 quarter)", + "Value": "CHAR(\"2025-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 quarter)", + "Value": "CHAR(\"2024-07-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' quarter)", + "Value": "CHAR(\"2026-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' quarter)", + "Value": "CHAR(\"2023-07-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 quarter)", + "Value": "CHAR(\"2025-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 quarter)", + "Value": "CHAR(\"2024-07-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 quarter)", + "Value": "CHAR(\"2025-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 quarter)", + "Value": "CHAR(\"2024-07-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) quarter)", + "Value": "CHAR(\"2025-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) quarter)", + "Value": "CHAR(\"2024-07-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) quarter)", + "Value": "CHAR(\"2025-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) quarter)", + "Value": "CHAR(\"2024-07-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 second)", + "Value": "CHAR(\"2025-01-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 second)", + "Value": "CHAR(\"2024-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 second)", + "Value": "CHAR(\"2025-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 second)", + "Value": "CHAR(\"2024-12-31 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' second)", + "Value": "CHAR(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' second)", + "Value": "CHAR(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 second)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 second)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' second)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' second)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 second)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 second)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 second)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 second)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 second)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 second)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' second)", + "Value": "CHAR(\"2025-01-01 00:00:06\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' second)", + "Value": "CHAR(\"2024-12-31 23:59:54\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 second)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 second)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 second)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 second)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) second)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) second)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) second)", + "Value": "CHAR(\"2025-01-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) second)", + "Value": "CHAR(\"2024-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 year_month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 year_month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' year_month)", + "Value": "CHAR(\"2026-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' year_month)", + "Value": "CHAR(\"2023-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' year_month)", + "Value": "CHAR(\"2023-03-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' year_month)", + "Value": "CHAR(\"2026-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' year_month)", + "Value": "CHAR(\"2026-11-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' year_month)", + "Value": "CHAR(\"2023-03-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 year_month)", + "Value": "CHAR(\"2027-08-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 year_month)", + "Value": "CHAR(\"2022-06-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 year_month)", + "Value": "CHAR(\"2027-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 year_month)", + "Value": "CHAR(\"2022-07-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 year_month)", + "Value": "CHAR(\"2109-04-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 year_month)", + "Value": "CHAR(\"1940-10-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' year_month)", + "Value": "CHAR(\"2109-04-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' year_month)", + "Value": "CHAR(\"1940-10-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' year_month)", + "Value": "CHAR(\"2023-03-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' year_month)", + "Value": "CHAR(\"2026-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' year_month)", + "Value": "CHAR(\"2026-11-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' year_month)", + "Value": "CHAR(\"2023-03-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 year_month)", + "Value": "CHAR(\"2026-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 year_month)", + "Value": "CHAR(\"2023-08-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 year_month)", + "Value": "CHAR(\"2442-09-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 year_month)", + "Value": "CHAR(\"1607-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 year_month)", + "Value": "CHAR(\"2442-09-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 year_month)", + "Value": "CHAR(\"1607-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' year_month)", + "Value": "CHAR(\"2031-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' year_month)", + "Value": "CHAR(\"2018-09-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 year_month)", + "Value": "CHAR(\"2026-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 year_month)", + "Value": "CHAR(\"2023-08-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 year_month)", + "Value": "CHAR(\"2026-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 year_month)", + "Value": "CHAR(\"2023-08-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) year_month)", + "Value": "CHAR(\"2026-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) year_month)", + "Value": "CHAR(\"2023-08-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) year_month)", + "Value": "CHAR(\"2025-03-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) year_month)", + "Value": "CHAR(\"2024-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 year_month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 year_month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' year_month)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' year_month)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' year_month)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' year_month)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 day)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 day)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 day)", + "Value": "CHAR(\"2025-01-31\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 day)", + "Value": "CHAR(\"2024-12-02\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 day)", + "Value": "CHAR(\"2025-01-03\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 day)", + "Value": "CHAR(\"2024-12-30\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 day)", + "Value": "CHAR(\"2025-01-03\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 day)", + "Value": "CHAR(\"2024-12-30\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 day)", + "Value": "CHAR(\"2025-01-03\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 day)", + "Value": "CHAR(\"2024-12-30\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 day)", + "Value": "CHAR(\"2025-01-03\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 day)", + "Value": "CHAR(\"2024-12-30\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' day)", + "Value": "CHAR(\"2025-01-07\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' day)", + "Value": "CHAR(\"2024-12-26\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 day)", + "Value": "CHAR(\"2025-01-03\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 day)", + "Value": "CHAR(\"2024-12-30\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 day)", + "Value": "CHAR(\"2025-01-03\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 day)", + "Value": "CHAR(\"2024-12-30\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day)", + "Value": "CHAR(\"2025-01-03\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day)", + "Value": "CHAR(\"2024-12-30\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day)", + "Value": "CHAR(\"2025-01-03\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day)", + "Value": "CHAR(\"2024-12-30\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 week)", + "Value": "CHAR(\"2025-08-06\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 week)", + "Value": "CHAR(\"2024-05-29\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 week)", + "Value": "CHAR(\"2025-07-30\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 week)", + "Value": "CHAR(\"2024-06-05\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 week)", + "Value": "CHAR(\"2025-01-15\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 week)", + "Value": "CHAR(\"2024-12-18\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 week)", + "Value": "CHAR(\"2025-01-15\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 week)", + "Value": "CHAR(\"2024-12-18\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 week)", + "Value": "CHAR(\"2025-01-15\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 week)", + "Value": "CHAR(\"2024-12-18\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 week)", + "Value": "CHAR(\"2025-01-15\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 week)", + "Value": "CHAR(\"2024-12-18\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' week)", + "Value": "CHAR(\"2025-02-12\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' week)", + "Value": "CHAR(\"2024-11-20\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 week)", + "Value": "CHAR(\"2025-01-15\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 week)", + "Value": "CHAR(\"2024-12-18\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 week)", + "Value": "CHAR(\"2025-01-15\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 week)", + "Value": "CHAR(\"2024-12-18\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) week)", + "Value": "CHAR(\"2025-01-15\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) week)", + "Value": "CHAR(\"2024-12-18\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) week)", + "Value": "CHAR(\"2025-01-15\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) week)", + "Value": "CHAR(\"2024-12-18\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 month)", + "Value": "CHAR(\"2027-08-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 month)", + "Value": "CHAR(\"2022-06-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 month)", + "Value": "CHAR(\"2027-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 month)", + "Value": "CHAR(\"2022-07-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 month)", + "Value": "CHAR(\"2025-03-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 month)", + "Value": "CHAR(\"2024-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 month)", + "Value": "CHAR(\"2025-03-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 month)", + "Value": "CHAR(\"2024-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 month)", + "Value": "CHAR(\"2025-03-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 month)", + "Value": "CHAR(\"2024-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 month)", + "Value": "CHAR(\"2025-03-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 month)", + "Value": "CHAR(\"2024-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' month)", + "Value": "CHAR(\"2025-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' month)", + "Value": "CHAR(\"2024-07-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 month)", + "Value": "CHAR(\"2025-03-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 month)", + "Value": "CHAR(\"2024-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 month)", + "Value": "CHAR(\"2025-03-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 month)", + "Value": "CHAR(\"2024-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) month)", + "Value": "CHAR(\"2025-03-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) month)", + "Value": "CHAR(\"2024-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) month)", + "Value": "CHAR(\"2025-03-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) month)", + "Value": "CHAR(\"2024-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 year)", + "Value": "CHAR(\"2056-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 year)", + "Value": "CHAR(\"1994-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 year)", + "Value": "CHAR(\"2055-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 year)", + "Value": "CHAR(\"1995-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 year)", + "Value": "CHAR(\"2027-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 year)", + "Value": "CHAR(\"2023-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 year)", + "Value": "CHAR(\"2027-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 year)", + "Value": "CHAR(\"2023-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 year)", + "Value": "CHAR(\"2027-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 year)", + "Value": "CHAR(\"2023-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 year)", + "Value": "CHAR(\"2027-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 year)", + "Value": "CHAR(\"2023-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' year)", + "Value": "CHAR(\"2031-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' year)", + "Value": "CHAR(\"2019-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 year)", + "Value": "CHAR(\"2027-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 year)", + "Value": "CHAR(\"2023-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 year)", + "Value": "CHAR(\"2027-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 year)", + "Value": "CHAR(\"2023-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year)", + "Value": "CHAR(\"2027-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year)", + "Value": "CHAR(\"2023-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year)", + "Value": "CHAR(\"2027-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year)", + "Value": "CHAR(\"2023-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 day_hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 day_hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' day_hour)", + "Value": "CHAR(\"2025-01-02 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' day_hour)", + "Value": "CHAR(\"2024-12-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' day_hour)", + "Value": "CHAR(\"2024-12-30 14:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' day_hour)", + "Value": "CHAR(\"2025-01-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' day_hour)", + "Value": "CHAR(\"2025-01-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' day_hour)", + "Value": "CHAR(\"2024-12-30 14:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 day_hour)", + "Value": "CHAR(\"2025-01-02 07:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 day_hour)", + "Value": "CHAR(\"2024-12-30 17:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 day_hour)", + "Value": "CHAR(\"2025-01-02 06:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 day_hour)", + "Value": "CHAR(\"2024-12-30 18:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' day_hour)", + "Value": "CHAR(\"2139-01-31 15:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' day_hour)", + "Value": "CHAR(\"1910-12-03 09:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 day_hour)", + "Value": "CHAR(\"2025-02-12 15:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 day_hour)", + "Value": "CHAR(\"2024-11-19 09:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' day_hour)", + "Value": "CHAR(\"2025-02-12 15:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' day_hour)", + "Value": "CHAR(\"2024-11-19 09:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' day_hour)", + "Value": "CHAR(\"2024-12-30 14:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' day_hour)", + "Value": "CHAR(\"2025-01-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' day_hour)", + "Value": "CHAR(\"2025-01-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' day_hour)", + "Value": "CHAR(\"2024-12-30 14:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 day_hour)", + "Value": "CHAR(\"2025-01-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 day_hour)", + "Value": "CHAR(\"2024-12-30 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 day_hour)", + "Value": "CHAR(\"2025-07-29 08:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 day_hour)", + "Value": "CHAR(\"2024-06-05 16:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 day_hour)", + "Value": "CHAR(\"2025-07-29 08:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 day_hour)", + "Value": "CHAR(\"2024-06-05 16:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' day_hour)", + "Value": "CHAR(\"2025-01-07 04:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' day_hour)", + "Value": "CHAR(\"2024-12-25 20:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 day_hour)", + "Value": "CHAR(\"2025-01-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 day_hour)", + "Value": "CHAR(\"2024-12-30 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 day_hour)", + "Value": "CHAR(\"2025-01-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 day_hour)", + "Value": "CHAR(\"2024-12-30 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_hour)", + "Value": "CHAR(\"2025-01-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_hour)", + "Value": "CHAR(\"2024-12-30 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 day_hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 day_hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' day_hour)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' day_hour)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' day_hour)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' day_hour)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' day_microsecond)", + "Value": "CHAR(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' day_microsecond)", + "Value": "CHAR(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' day_microsecond)", + "Value": "CHAR(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' day_microsecond)", + "Value": "CHAR(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' day_microsecond)", + "Value": "CHAR(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' day_microsecond)", + "Value": "CHAR(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 day_minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 day_minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' day_minute)", + "Value": "CHAR(\"2025-01-01 01:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' day_minute)", + "Value": "CHAR(\"2024-12-31 22:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' day_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' day_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' day_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' day_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 day_minute)", + "Value": "CHAR(\"2025-01-01 00:31:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 day_minute)", + "Value": "CHAR(\"2024-12-31 23:29:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 day_minute)", + "Value": "CHAR(\"2025-01-01 00:30:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 day_minute)", + "Value": "CHAR(\"2024-12-31 23:30:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' day_minute)", + "Value": "CHAR(\"2026-11-26 11:39:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' day_minute)", + "Value": "CHAR(\"2023-02-06 12:21:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 day_minute)", + "Value": "CHAR(\"2025-01-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 day_minute)", + "Value": "CHAR(\"2024-12-31 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' day_minute)", + "Value": "CHAR(\"2025-01-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' day_minute)", + "Value": "CHAR(\"2024-12-31 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' day_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' day_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' day_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' day_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 day_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 day_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 day_minute)", + "Value": "CHAR(\"2025-01-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 day_minute)", + "Value": "CHAR(\"2024-12-28 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 day_minute)", + "Value": "CHAR(\"2025-01-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 day_minute)", + "Value": "CHAR(\"2024-12-28 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' day_minute)", + "Value": "CHAR(\"2025-01-01 06:04:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' day_minute)", + "Value": "CHAR(\"2024-12-31 17:56:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 day_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 day_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 day_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 day_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 day_minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 day_minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' day_minute)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' day_minute)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' day_minute)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' day_minute)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 day_second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 day_second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' day_second)", + "Value": "CHAR(\"2024-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' day_second)", + "Value": "CHAR(\"2025-01-02 01:01:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' day_second)", + "Value": "CHAR(\"2024-12-30 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' day_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' day_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 day_second)", + "Value": "CHAR(\"2025-01-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 day_second)", + "Value": "CHAR(\"2024-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 day_second)", + "Value": "CHAR(\"2025-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 day_second)", + "Value": "CHAR(\"2024-12-31 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' day_second)", + "Value": "CHAR(\"2025-01-12 13:47:39\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' day_second)", + "Value": "CHAR(\"2024-12-20 10:12:21\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 day_second)", + "Value": "CHAR(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 day_second)", + "Value": "CHAR(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' day_second)", + "Value": "CHAR(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' day_second)", + "Value": "CHAR(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' day_second)", + "Value": "CHAR(\"2025-01-02 01:01:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' day_second)", + "Value": "CHAR(\"2024-12-30 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' day_second)", + "Value": "CHAR(\"2025-01-02 01:01:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' day_second)", + "Value": "CHAR(\"2024-12-30 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' day_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' day_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 day_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 day_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 day_second)", + "Value": "CHAR(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 day_second)", + "Value": "CHAR(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 day_second)", + "Value": "CHAR(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 day_second)", + "Value": "CHAR(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' day_second)", + "Value": "CHAR(\"2025-01-01 00:06:04\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' day_second)", + "Value": "CHAR(\"2024-12-31 23:53:56\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 day_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 day_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 day_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 day_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_second)", + "Value": "CHAR(\"2025-01-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_second)", + "Value": "CHAR(\"2024-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 day_second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 day_second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' day_second)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' day_second)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 hour)", + "Value": "CHAR(\"2025-01-02 07:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 hour)", + "Value": "CHAR(\"2024-12-30 17:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 hour)", + "Value": "CHAR(\"2025-01-02 06:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 hour)", + "Value": "CHAR(\"2024-12-30 18:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' hour)", + "Value": "CHAR(\"2025-01-01 06:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' hour)", + "Value": "CHAR(\"2024-12-31 18:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 hour_minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 hour_minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' hour_minute)", + "Value": "CHAR(\"2024-12-31 22:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' hour_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' hour_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 hour_minute)", + "Value": "CHAR(\"2025-01-01 00:31:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 hour_minute)", + "Value": "CHAR(\"2024-12-31 23:29:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 hour_minute)", + "Value": "CHAR(\"2025-01-01 00:30:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 hour_minute)", + "Value": "CHAR(\"2024-12-31 23:30:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' hour_minute)", + "Value": "CHAR(\"2026-11-26 11:39:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' hour_minute)", + "Value": "CHAR(\"2023-02-06 12:21:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 hour_minute)", + "Value": "CHAR(\"2025-01-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 hour_minute)", + "Value": "CHAR(\"2024-12-31 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' hour_minute)", + "Value": "CHAR(\"2025-01-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' hour_minute)", + "Value": "CHAR(\"2024-12-31 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' hour_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' hour_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 hour_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 hour_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 hour_minute)", + "Value": "CHAR(\"2025-01-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 hour_minute)", + "Value": "CHAR(\"2024-12-28 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 hour_minute)", + "Value": "CHAR(\"2025-01-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 hour_minute)", + "Value": "CHAR(\"2024-12-28 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' hour_minute)", + "Value": "CHAR(\"2025-01-01 06:04:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' hour_minute)", + "Value": "CHAR(\"2024-12-31 17:56:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 hour_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 hour_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 hour_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 hour_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 hour_minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 hour_minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' hour_minute)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' hour_minute)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 hour_second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 hour_second)", + "Value": "CHAR(\"2025-01-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 hour_second)", + "Value": "CHAR(\"2025-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' hour_second)", + "Value": "CHAR(\"2025-01-12 13:47:39\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' hour_second)", + "Value": "CHAR(\"2024-12-20 10:12:21\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 hour_second)", + "Value": "CHAR(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 hour_second)", + "Value": "CHAR(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' hour_second)", + "Value": "CHAR(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' hour_second)", + "Value": "CHAR(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 hour_second)", + "Value": "CHAR(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 hour_second)", + "Value": "CHAR(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 hour_second)", + "Value": "CHAR(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 hour_second)", + "Value": "CHAR(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' hour_second)", + "Value": "CHAR(\"2025-01-01 00:06:04\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' hour_second)", + "Value": "CHAR(\"2024-12-31 23:53:56\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_second)", + "Value": "CHAR(\"2025-01-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 hour_second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000031\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999969\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000030\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999970\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000006\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999994\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 minute)", + "Value": "CHAR(\"2025-01-01 00:31:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 minute)", + "Value": "CHAR(\"2024-12-31 23:29:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 minute)", + "Value": "CHAR(\"2025-01-01 00:30:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 minute)", + "Value": "CHAR(\"2024-12-31 23:30:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' minute)", + "Value": "CHAR(\"2025-01-01 00:06:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' minute)", + "Value": "CHAR(\"2024-12-31 23:54:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 minute_second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 minute_second)", + "Value": "CHAR(\"2025-01-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 minute_second)", + "Value": "CHAR(\"2025-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' minute_second)", + "Value": "CHAR(\"2025-01-12 13:47:39\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' minute_second)", + "Value": "CHAR(\"2024-12-20 10:12:21\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 minute_second)", + "Value": "CHAR(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 minute_second)", + "Value": "CHAR(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' minute_second)", + "Value": "CHAR(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' minute_second)", + "Value": "CHAR(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 minute_second)", + "Value": "CHAR(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 minute_second)", + "Value": "CHAR(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 minute_second)", + "Value": "CHAR(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 minute_second)", + "Value": "CHAR(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' minute_second)", + "Value": "CHAR(\"2025-01-01 00:06:04\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' minute_second)", + "Value": "CHAR(\"2024-12-31 23:53:56\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_second)", + "Value": "CHAR(\"2025-01-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 minute_second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 quarter)", + "Value": "CHAR(\"2032-10-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 quarter)", + "Value": "CHAR(\"2017-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 quarter)", + "Value": "CHAR(\"2032-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 quarter)", + "Value": "CHAR(\"2017-07-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 quarter)", + "Value": "CHAR(\"2025-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 quarter)", + "Value": "CHAR(\"2024-07-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 quarter)", + "Value": "CHAR(\"2025-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 quarter)", + "Value": "CHAR(\"2024-07-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 quarter)", + "Value": "CHAR(\"2025-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 quarter)", + "Value": "CHAR(\"2024-07-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 quarter)", + "Value": "CHAR(\"2025-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 quarter)", + "Value": "CHAR(\"2024-07-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' quarter)", + "Value": "CHAR(\"2026-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' quarter)", + "Value": "CHAR(\"2023-07-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 quarter)", + "Value": "CHAR(\"2025-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 quarter)", + "Value": "CHAR(\"2024-07-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 quarter)", + "Value": "CHAR(\"2025-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 quarter)", + "Value": "CHAR(\"2024-07-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) quarter)", + "Value": "CHAR(\"2025-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) quarter)", + "Value": "CHAR(\"2024-07-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) quarter)", + "Value": "CHAR(\"2025-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) quarter)", + "Value": "CHAR(\"2024-07-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 second)", + "Value": "CHAR(\"2025-01-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 second)", + "Value": "CHAR(\"2024-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 second)", + "Value": "CHAR(\"2025-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 second)", + "Value": "CHAR(\"2024-12-31 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' second)", + "Value": "CHAR(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' second)", + "Value": "CHAR(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 second)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 second)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' second)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' second)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 second)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 second)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 second)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 second)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 second)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 second)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' second)", + "Value": "CHAR(\"2025-01-01 00:00:06\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' second)", + "Value": "CHAR(\"2024-12-31 23:59:54\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 second)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 second)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 second)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 second)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second)", + "Value": "CHAR(\"2025-01-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second)", + "Value": "CHAR(\"2024-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 year_month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 year_month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' year_month)", + "Value": "CHAR(\"2026-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' year_month)", + "Value": "CHAR(\"2023-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' year_month)", + "Value": "CHAR(\"2023-03-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' year_month)", + "Value": "CHAR(\"2026-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' year_month)", + "Value": "CHAR(\"2026-11-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' year_month)", + "Value": "CHAR(\"2023-03-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 year_month)", + "Value": "CHAR(\"2027-08-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 year_month)", + "Value": "CHAR(\"2022-06-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 year_month)", + "Value": "CHAR(\"2027-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 year_month)", + "Value": "CHAR(\"2022-07-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 year_month)", + "Value": "CHAR(\"2109-04-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 year_month)", + "Value": "CHAR(\"1940-10-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' year_month)", + "Value": "CHAR(\"2109-04-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' year_month)", + "Value": "CHAR(\"1940-10-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' year_month)", + "Value": "CHAR(\"2023-03-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' year_month)", + "Value": "CHAR(\"2026-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' year_month)", + "Value": "CHAR(\"2026-11-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' year_month)", + "Value": "CHAR(\"2023-03-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 year_month)", + "Value": "CHAR(\"2026-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 year_month)", + "Value": "CHAR(\"2023-08-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 year_month)", + "Value": "CHAR(\"2442-09-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 year_month)", + "Value": "CHAR(\"1607-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 year_month)", + "Value": "CHAR(\"2442-09-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 year_month)", + "Value": "CHAR(\"1607-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' year_month)", + "Value": "CHAR(\"2031-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' year_month)", + "Value": "CHAR(\"2018-09-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 year_month)", + "Value": "CHAR(\"2026-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 year_month)", + "Value": "CHAR(\"2023-08-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 year_month)", + "Value": "CHAR(\"2026-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 year_month)", + "Value": "CHAR(\"2023-08-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year_month)", + "Value": "CHAR(\"2026-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year_month)", + "Value": "CHAR(\"2023-08-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year_month)", + "Value": "CHAR(\"2025-03-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year_month)", + "Value": "CHAR(\"2024-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 year_month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 year_month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' year_month)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' year_month)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' year_month)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' year_month)", + "Value": "CHAR(\"2024-01-01\")" + } +] diff --git a/go/vt/vtgate/evalengine/mysql_test.go b/go/vt/vtgate/evalengine/mysql_test.go index bf5b8193409..d63962f78c2 100644 --- a/go/vt/vtgate/evalengine/mysql_test.go +++ b/go/vt/vtgate/evalengine/mysql_test.go @@ -20,9 +20,9 @@ import ( "context" "encoding/json" "errors" - "fmt" "os" "path/filepath" + "slices" "strings" "testing" @@ -81,7 +81,13 @@ func testSingle(t *testing.T, query string) (EvalResult, error) { } func TestMySQLGolden(t *testing.T) { + const Target = 0 + + var testcount int + golden, _ := filepath.Glob("integration/testdata/*.json") + slices.Sort(golden) + for _, gld := range golden { t.Run(filepath.Base(gld), func(t *testing.T) { var testcases []struct { @@ -102,7 +108,11 @@ func TestMySQLGolden(t *testing.T) { var ok int for _, tc := range testcases { - debug := fmt.Sprintf("\n// Debug\neval, err := testSingle(t, `%s`)\nt.Logf(\"eval=%%s err=%%v\", eval.Value(), err) // want value=%q\n", tc.Query, tc.Value) + testcount++ + if Target != 0 && Target != testcount { + continue + } + eval, err := testSingle(t, tc.Query) if err == errKnownBadQuery { ok++ @@ -110,20 +120,20 @@ func TestMySQLGolden(t *testing.T) { } if err != nil { if tc.Error == "" { - t.Errorf("query: %s\nmysql val: %s\nvitess err: %s\n%s", tc.Query, tc.Value, err.Error(), debug) + t.Errorf("query %d: %s\nmysql val: %s\nvitess err: %s", testcount, tc.Query, tc.Value, err.Error()) } else if !strings.HasPrefix(tc.Error, err.Error()) { - t.Errorf("query: %s\nmysql err: %s\nvitess err: %s\n%s", tc.Query, tc.Error, err.Error(), debug) + t.Errorf("query %d: %s\nmysql err: %s\nvitess err: %s", testcount, tc.Query, tc.Error, err.Error()) } else { ok++ } continue } if tc.Error != "" { - t.Errorf("query: %s\nmysql err: %s\nvitess val: %s\n%s", tc.Query, tc.Error, eval.String(), debug) + t.Errorf("query %d: %s\nmysql err: %s\nvitess val: %s", testcount, tc.Query, tc.Error, eval.Value(collations.Default())) continue } if eval.String() != tc.Value { - t.Errorf("query: %s\nmysql val: %s\nvitess val: %s\n%s", tc.Query, tc.Value, eval.String(), debug) + t.Errorf("query %d: %s\nmysql val: %s\nvitess val: %s", testcount, tc.Query, tc.Value, eval.Value(collations.Default())) continue } ok++ @@ -136,6 +146,6 @@ func TestMySQLGolden(t *testing.T) { func TestDebug1(t *testing.T) { // Debug - eval, err := testSingle(t, `SELECT DATE_FORMAT(TIMESTAMP '1999-12-31 23:59:58.999', "%a %b %c %D %d %e %f %H %h %I %i %j %k %l %M %m %p %r %S %s %T %U %u %V %v %W %w %X %x %Y %y %%")`) - t.Logf("eval=%s err=%v coll=%s", eval.String(), err, eval.Collation().Get().Name()) + eval, err := testSingle(t, `SELECT _latin1 0xFF regexp _latin1 '[[:lower:]]' COLLATE latin1_bin`) + t.Logf("eval=%s err=%v coll=%s", eval.String(), err, collations.Local().LookupName(eval.Collation())) } diff --git a/go/vt/vtgate/evalengine/testcases/cases.go b/go/vt/vtgate/evalengine/testcases/cases.go index cb03c6cb08f..cd52631c00c 100644 --- a/go/vt/vtgate/evalengine/testcases/cases.go +++ b/go/vt/vtgate/evalengine/testcases/cases.go @@ -150,6 +150,11 @@ var Cases = []TestCase{ {Run: FnIsUUID}, {Run: FnUUID}, {Run: FnUUIDToBin}, + {Run: DateMath}, + {Run: RegexpLike}, + {Run: RegexpInstr}, + {Run: RegexpSubstr}, + {Run: RegexpReplace}, } func JSONPathOperations(yield Query) { @@ -824,10 +829,47 @@ func BitwiseOperators(yield Query) { func WeightString(yield Query) { var inputs = []string{ `'foobar'`, `_latin1 'foobar'`, - `'foobar' as char(12)`, `'foobar' as binary(12)`, + `'foobar' as char(12)`, `'foobar' as char(3)`, `'foobar' as binary(12)`, `'foobar' as binary(3)`, + `'foobar' collate utf8mb4_bin as char(12)`, `'foobar' collate utf8mb4_bin as char(3)`, + `'foobar' collate binary as char(12)`, `'foobar' collate binary as char(3)`, `_latin1 'foobar' as char(12)`, `_latin1 'foobar' as binary(12)`, + `_binary 'foobar' as char(12)`, `_binary 'foobar' as binary(12)`, + `1`, `-1`, `9223372036854775807`, `18446744073709551615`, `-9223372036854775808`, + `1 as char(1)`, `-1 as char(1)`, `9223372036854775807 as char(1)`, `18446744073709551615 as char(1)`, `-9223372036854775808 as char(1)`, + `1 as char(32)`, `-1 as char(32)`, `9223372036854775807 as char(32)`, `18446744073709551615 as char(32)`, `-9223372036854775808 as char(32)`, + `1 as binary(1)`, `-1 as binary(1)`, `9223372036854775807 as binary(1)`, `18446744073709551615 as binary(1)`, `-9223372036854775808 as binary(1)`, + `1 as binary(32)`, `-1 as binary(32)`, `9223372036854775807 as binary(32)`, `18446744073709551615 as binary(32)`, `-9223372036854775808 as binary(32)`, `1234.0`, `12340e0`, `0x1234`, `0x1234 as char(12)`, `0x1234 as char(2)`, + `date'2000-01-01'`, `date'2000-01-01' as char(12)`, `date'2000-01-01' as char(2)`, `date'2000-01-01' as binary(12)`, `date'2000-01-01' as binary(2)`, + `timestamp'2000-01-01 11:22:33'`, `timestamp'2000-01-01 11:22:33' as char(12)`, `timestamp'2000-01-01 11:22:33' as char(2)`, `timestamp'2000-01-01 11:22:33' as binary(12)`, `timestamp'2000-01-01 11:22:33' as binary(2)`, + `timestamp'2000-01-01 11:22:33.123456'`, `timestamp'2000-01-01 11:22:33.123456' as char(12)`, `timestamp'2000-01-01 11:22:33.123456' as char(2)`, `timestamp'2000-01-01 11:22:33.123456' as binary(12)`, `timestamp'2000-01-01 11:22:33.123456' as binary(2)`, + `time'-11:22:33'`, `time'-11:22:33' as char(12)`, `time'-11:22:33' as char(2)`, `time'-11:22:33' as binary(12)`, `time'-11:22:33' as binary(2)`, + `time'11:22:33'`, `time'11:22:33' as char(12)`, `time'11:22:33' as char(2)`, `time'11:22:33' as binary(12)`, `time'11:22:33' as binary(2)`, + `time'101:22:33'`, `time'101:22:33' as char(12)`, `time'101:22:33' as char(2)`, `time'101:22:33' as binary(12)`, `time'101:22:33' as binary(2)`, + "cast(0 as json)", "cast(1 as json)", + "cast(true as json)", "cast(false as json)", + "cast('{}' as json)", "cast('[]' as json)", + "cast('null' as json)", "cast('true' as json)", "cast('false' as json)", + "cast('1' as json)", "cast('2' as json)", "cast('1.1' as json)", "cast('-1.1' as json)", + "cast('9223372036854775807' as json)", "cast('18446744073709551615' as json)", + // JSON strings + "cast('\"foo\"' as json)", "cast('\"bar\"' as json)", "cast('invalid' as json)", + // JSON binary values + "cast(_binary' \"foo\"' as json)", "cast(_binary '\"bar\"' as json)", + "cast(0xFF666F6F626172FF as json)", "cast(0x666F6F626172FF as json)", + "cast(0b01 as json)", "cast(0b001 as json)", + // JSON arrays + "cast('[\"a\"]' as json)", "cast('[\"ab\"]' as json)", + "cast('[\"ab\", \"cd\", \"ef\"]' as json)", "cast('[\"ab\", \"ef\"]' as json)", + // JSON objects + "cast('{\"a\": 1, \"b\": 2}' as json)", "cast('{\"b\": 2, \"a\": 1}' as json)", + "cast('{\"c\": 1, \"b\": 2}' as json)", "cast('{\"b\": 2, \"c\": 1}' as json)", + "cast(' \"b\": 2}' as json)", "cast('\"a\": 1' as json)", + // JSON date, datetime & time + "cast(date '2000-01-01' as json)", "cast(date '2000-01-02' as json)", + "cast(timestamp '2000-01-01 12:34:58' as json)", + "cast(time '12:34:56' as json)", "cast(time '12:34:58' as json)", "cast(time '5 12:34:58' as json)", } for _, i := range inputs { @@ -1847,3 +1889,337 @@ func FnUUIDToBin(yield Query) { } } } + +func DateMath(yield Query) { + dates := []string{ + `DATE'2018-05-01'`, + `TIMESTAMP'2020-12-31 23:59:59'`, + `TIMESTAMP'2025-01-01 00:00:00'`, + `'2018-05-01'`, + `'2020-12-31 23:59:59'`, + `'2025-01-01 00:00:00'`, + `20250101`, + `'pokemon trainers'`, + `'20250101'`, + } + intervalValues := []string{ + `1`, `'1:1'`, `'1 1:1:1'`, `'-1 10'`, `'1 10'`, `31`, `30`, `'1.999999'`, `1.999`, `'1.999'`, + `'1:1:1:1'`, `'1:1 1:1'`, `'-1:10'`, `'1:10'`, `1.5`, `1.5000`, `6/4`, `'6/4'`, `1.5e0`, `1.5000e0`, + `CAST(6/4 AS DECIMAL(3,1))`, `CAST(6/4 AS DECIMAL(3,0))`, `1e0`, `'1.0'`, `'1.0foobar'`, + } + mysqlDocSamples := []string{ + `DATE_ADD(DATE'2018-05-01',INTERVAL 1 DAY)`, + `DATE_SUB(DATE'2018-05-01',INTERVAL 1 YEAR)`, + `DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 SECOND)`, + `DATE_ADD(TIMESTAMP'2018-12-31 23:59:59', INTERVAL 1 DAY)`, + `DATE_ADD(TIMESTAMP'2100-12-31 23:59:59', INTERVAL '1:1' MINUTE_SECOND)`, + `DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' DAY_SECOND)`, + `DATE_ADD(TIMESTAMP'1900-01-01 00:00:00', INTERVAL '-1 10' DAY_HOUR)`, + `DATE_SUB(DATE'1998-01-02', INTERVAL 31 DAY)`, + `DATE_ADD(TIMESTAMP'1992-12-31 23:59:59.000002', INTERVAL '1.999999' SECOND_MICROSECOND)`, + `DATE_ADD(DATE'2024-03-30', INTERVAL 1 MONTH)`, + `DATE_ADD(DATE'2024-03-31', INTERVAL 1 MONTH)`, + `TIMESTAMPADD(MINUTE, 1, '2003-01-02')`, + `TIMESTAMPADD(WEEK,1,'2003-01-02')`, + `TIMESTAMPADD(MONTH, 1, DATE '2024-03-30')`, + `TIMESTAMPADD(MONTH, 1, DATE '2024-03-31')`, + } + + for _, q := range mysqlDocSamples { + yield(q, nil) + } + + for _, d := range dates { + for _, i := range inputIntervals { + for _, v := range intervalValues { + yield(fmt.Sprintf("DATE_ADD(%s, INTERVAL %s %s)", d, v, i), nil) + yield(fmt.Sprintf("DATE_SUB(%s, INTERVAL %s %s)", d, v, i), nil) + yield(fmt.Sprintf("TIMESTAMPADD(%v, %s, %s)", i, v, d), nil) + } + } + } +} + +func RegexpLike(yield Query) { + mysqlDocSamples := []string{ + `'Michael!' REGEXP '.*'`, + `'Michael!' RLIKE '.*'`, + `'Michael!' NOT REGEXP '.*'`, + `'Michael!' NOT RLIKE '.*'`, + `'new*\n*line' REGEXP 'new\\*.\\*line'`, + `'a' REGEXP '^[a-d]'`, + `REGEXP_LIKE('CamelCase', 'CAMELCASE')`, + `REGEXP_LIKE('CamelCase', 'CAMELCASE' COLLATE utf8mb4_0900_as_cs)`, + `REGEXP_LIKE('abc', 'ABC'`, + `REGEXP_LIKE('abc', 'ABC', 'c')`, + `REGEXP_LIKE(1234, 12)`, + `REGEXP_LIKE(1234, 12, 'c')`, + `' ' REGEXP '[[:blank:]]'`, + `'\t' REGEXP '[[:blank:]]'`, + `' ' REGEXP '[[:space:]]'`, + `'\t' REGEXP '[[:space:]]'`, + `_latin1 0xFF regexp _latin1 '[[:lower:]]' COLLATE latin1_bin`, + `_koi8r 0xFF regexp _koi8r '[[:lower:]]' COLLATE koi8r_bin`, + `_latin1 0xFF regexp _latin1 '[[:upper:]]' COLLATE latin1_bin`, + `_koi8r 0xFF regexp _koi8r '[[:upper:]]' COLLATE koi8r_bin`, + `_latin1 0xF7 regexp _latin1 '[[:alpha:]]'`, + `_koi8r 0xF7 regexp _koi8r '[[:alpha:]]'`, + `_latin1'a' regexp _latin1'A' collate latin1_general_ci`, + `_latin1'a' regexp _latin1'A' collate latin1_bin`, + + `_latin1 'ÿ' regexp _utf8mb4 'ÿ'`, + `_utf8mb4 'ÿ' regexp _latin1 'ÿ'`, + `convert('ÿ' as char character set latin1) regexp _utf8mb4 'ÿ'`, + `_utf8mb4 'ÿ' regexp convert('ÿ' as char character set latin1)`, + + `'a' regexp '\\p{alphabetic}'`, + `'a' regexp '\\P{alphabetic}'`, + `'👌🏾regexp '\\p{Emoji}\\p{Emoji_modifier}'`, + `'a' regexp '\\p{Lowercase_letter}'`, + `'a' regexp '\\p{Uppercase_letter}'`, + `'A' regexp '\\p{Lowercase_letter}'`, + `'A' regexp '\\p{Uppercase_letter}'`, + `'a' collate utf8mb4_0900_as_cs regexp '\\p{Lowercase_letter}'`, + `'A' collate utf8mb4_0900_as_cs regexp '\\p{Lowercase_letter}'`, + `'a' collate utf8mb4_0900_as_cs regexp '\\p{Uppercase_letter}'`, + `'A' collate utf8mb4_0900_as_cs regexp '\\p{Uppercase_letter}'`, + `0xff REGEXP 0xff`, + `0xff REGEXP 0xfe`, + `cast(time '12:34:58' as json) REGEXP 0xff`, + } + + for _, q := range mysqlDocSamples { + yield(q, nil) + } + + for _, i := range regexInputs { + for _, p := range regexInputs { + yield(fmt.Sprintf("%s REGEXP %s", i, p), nil) + yield(fmt.Sprintf("%s NOT REGEXP %s", i, p), nil) + for _, m := range regexMatchStrings { + yield(fmt.Sprintf("REGEXP_LIKE(%s, %s, %s)", i, p, m), nil) + } + } + } +} + +func RegexpInstr(yield Query) { + mysqlDocSamples := []string{ + `REGEXP_INSTR('Michael!', '.*')`, + `REGEXP_INSTR('new*\n*line', 'new\\*.\\*line')`, + `REGEXP_INSTR('a', '^[a-d]')`, + `REGEXP_INSTR('CamelCase', 'CAMELCASE')`, + `REGEXP_INSTR('CamelCase', 'CAMELCASE' COLLATE utf8mb4_0900_as_cs)`, + `REGEXP_INSTR('abc', 'ABC'`, + `REGEXP_INSTR('abc', 'ABC', 'c')`, + `REGEXP_INSTR('0', '0', 1, 0)`, + `REGEXP_INSTR(' ', '[[:blank:]]')`, + `REGEXP_INSTR('\t', '[[:blank:]]')`, + `REGEXP_INSTR(' ', '[[:space:]]')`, + `REGEXP_INSTR('\t', '[[:space:]]')`, + `REGEXP_INSTR(_latin1 0xFF, _latin1 '[[:lower:]]' COLLATE latin1_bin)`, + `REGEXP_INSTR(_koi8r 0xFF, _koi8r '[[:lower:]]' COLLATE koi8r_bin)`, + `REGEXP_INSTR(_latin1 0xFF, _latin1 '[[:upper:]]' COLLATE latin1_bin)`, + `REGEXP_INSTR(_koi8r 0xFF, _koi8r '[[:upper:]]' COLLATE koi8r_bin)`, + `REGEXP_INSTR(_latin1 0xF7, _latin1 '[[:alpha:]]')`, + `REGEXP_INSTR(_koi8r 0xF7, _koi8r '[[:alpha:]]')`, + `REGEXP_INSTR(_latin1'a', _latin1'A' collate latin1_general_ci)`, + `REGEXP_INSTR(_latin1'a', _latin1'A' collate latin1_bin)`, + `REGEXP_INSTR('a', '\\p{alphabetic}')`, + `REGEXP_INSTR('a', '\\P{alphabetic}')`, + `REGEXP_INSTR('👌🏾, '\\p{Emoji}\\p{Emoji_modifier}')`, + `REGEXP_INSTR('a', '\\p{Lowercase_letter}')`, + `REGEXP_INSTR('a', '\\p{Uppercase_letter}')`, + `REGEXP_INSTR('A', '\\p{Lowercase_letter}')`, + `REGEXP_INSTR('A', '\\p{Uppercase_letter}')`, + `REGEXP_INSTR('a', collate utf8mb4_0900_as_cs regexp '\\p{Lowercase_letter}')`, + `REGEXP_INSTR('A', collate utf8mb4_0900_as_cs regexp '\\p{Lowercase_letter}')`, + `REGEXP_INSTR('a', collate utf8mb4_0900_as_cs regexp '\\p{Uppercase_letter}')`, + `REGEXP_INSTR('A', collate utf8mb4_0900_as_cs regexp '\\p{Uppercase_letter}')`, + `REGEXP_INSTR('dog cat dog', 'dog')`, + `REGEXP_INSTR('dog cat dog', 'dog', 2)`, + `REGEXP_INSTR('dog cat dog', 'dog', 1, 1)`, + `REGEXP_INSTR('dog cat dog', 'dog', 1, 1, 0)`, + `REGEXP_INSTR('dog cat dog', 'dog', 1, 1, 1)`, + `REGEXP_INSTR('dog cat dog', 'DOG', 1, 1, 1, 'i')`, + `REGEXP_INSTR('dog cat dog', 'DOG', 1, 1, 1, 'c')`, + `REGEXP_INSTR('dog cat dog', 'dog', 1, 2)`, + `REGEXP_INSTR('dog cat dog', 'dog', 1, 2, 0)`, + `REGEXP_INSTR('dog cat dog', 'dog', 1, 2, 1)`, + `REGEXP_INSTR('dog cat dog', 'DOG', 1, 2, 1, 'i')`, + `REGEXP_INSTR('dog cat dog', 'DOG', 1, 2, 1, 'c')`, + `REGEXP_INSTR('aa aaa aaaa', 'a{2}')`, + `REGEXP_INSTR('aa aaa aaaa', 'a{4}')`, + `REGEXP_INSTR(1234, 12)`, + `REGEXP_INSTR(1234, 12, 1)`, + `REGEXP_INSTR(1234, 12, 100)`, + `REGEXP_INSTR(1234, 12, 1, 1)`, + `REGEXP_INSTR(1234, 12, 1, 1, 1)`, + `REGEXP_INSTR(1234, 12, 1, 1, 1, 'c')`, + `REGEXP_INSTR('', ' ', 1000)`, + `REGEXP_INSTR(' ', ' ', 1000)`, + `REGEXP_INSTR(NULL, 'DOG', 1, 2, 1, 'c')`, + `REGEXP_INSTR('dog cat dog', NULL, 1, 2, 1, 'c')`, + `REGEXP_INSTR('dog cat dog', 'DOG', NULL, 2, 1, 'c')`, + `REGEXP_INSTR('dog cat dog', 'DOG', 1, NULL, 1, 'c')`, + `REGEXP_INSTR('dog cat dog', 'DOG', 1, 2, NULL, 'c')`, + `REGEXP_INSTR('dog cat dog', 'DOG', 1, 2, 1, NULL)`, + + `REGEXP_INSTR('dog cat dog', NULL, 1, 2, 1, 'c')`, + `REGEXP_INSTR('dog cat dog', _latin1 'DOG', NULL, 2, 1, 'c')`, + `REGEXP_INSTR('dog cat dog', _latin1 'DOG', 1, NULL, 1, 'c')`, + `REGEXP_INSTR('dog cat dog', _latin1 'DOG', 1, 2, NULL, 'c')`, + `REGEXP_INSTR('dog cat dog', _latin1 'DOG', 1, 2, 1, NULL)`, + } + + for _, q := range mysqlDocSamples { + yield(q, nil) + } +} + +func RegexpSubstr(yield Query) { + mysqlDocSamples := []string{ + `REGEXP_SUBSTR('Michael!', '.*')`, + `REGEXP_SUBSTR('new*\n*line', 'new\\*.\\*line')`, + `REGEXP_SUBSTR('a', '^[a-d]')`, + `REGEXP_SUBSTR('CamelCase', 'CAMELCASE')`, + `REGEXP_SUBSTR('CamelCase', 'CAMELCASE' COLLATE utf8mb4_0900_as_cs)`, + `REGEXP_SUBSTR('abc', 'ABC'`, + `REGEXP_SUBSTR(' ', '[[:blank:]]')`, + `REGEXP_SUBSTR('\t', '[[:blank:]]')`, + `REGEXP_SUBSTR(' ', '[[:space:]]')`, + `REGEXP_SUBSTR('\t', '[[:space:]]')`, + `REGEXP_SUBSTR(_latin1'a', _latin1'A' collate latin1_general_ci)`, + `REGEXP_SUBSTR(_latin1'a', _latin1'A' collate latin1_bin)`, + `REGEXP_SUBSTR('a', '\\p{alphabetic}')`, + `REGEXP_SUBSTR('a', '\\P{alphabetic}')`, + `REGEXP_SUBSTR('👌🏾, '\\p{Emoji}\\p{Emoji_modifier}')`, + `REGEXP_SUBSTR('a', '\\p{Lowercase_letter}')`, + `REGEXP_SUBSTR('a', '\\p{Uppercase_letter}')`, + `REGEXP_SUBSTR('A', '\\p{Lowercase_letter}')`, + `REGEXP_SUBSTR('A', '\\p{Uppercase_letter}')`, + `REGEXP_SUBSTR('a', collate utf8mb4_0900_as_cs regexp '\\p{Lowercase_letter}')`, + `REGEXP_SUBSTR('A', collate utf8mb4_0900_as_cs regexp '\\p{Lowercase_letter}')`, + `REGEXP_SUBSTR('a', collate utf8mb4_0900_as_cs regexp '\\p{Uppercase_letter}')`, + `REGEXP_SUBSTR('A', collate utf8mb4_0900_as_cs regexp '\\p{Uppercase_letter}')`, + `REGEXP_SUBSTR('dog cat dog', 'dog')`, + `REGEXP_SUBSTR('dog cat dog', 'dog', 2)`, + `REGEXP_SUBSTR('dog cat dog', 'dog', 1, 1)`, + `REGEXP_SUBSTR('dog cat dog', 'DOG', 1, 1, 'i')`, + `REGEXP_SUBSTR('dog cat dog', 'DOG', 1, 1, 'c')`, + `REGEXP_SUBSTR('dog cat dog', 'dog', 1, 2)`, + `REGEXP_SUBSTR('dog cat dog', 'DOG', 1, 2, 'i')`, + `REGEXP_SUBSTR('dog cat dog', 'DOG', 1, 2, 'c')`, + `REGEXP_SUBSTR('aa aaa aaaa', 'a{2}')`, + `REGEXP_SUBSTR('aa aaa aaaa', 'a{4}')`, + `REGEXP_SUBSTR(1234, 12)`, + `REGEXP_SUBSTR(1234, 12, 1)`, + `REGEXP_SUBSTR(1234, 12, 100)`, + `REGEXP_SUBSTR(1234, 12, 1, 1)`, + `REGEXP_SUBSTR(1234, 12, 1, 1, 'c')`, + + `REGEXP_SUBSTR(NULL, 'DOG', 1, 1, 'i')`, + `REGEXP_SUBSTR('dog cat dog', NULL, 1, 1, 'i')`, + `REGEXP_SUBSTR('dog cat dog', 'DOG', NULL, 1, 'i')`, + `REGEXP_SUBSTR('dog cat dog', 'DOG', 1, NULL, 'i')`, + `REGEXP_SUBSTR('dog cat dog', 'DOG', 1, 1, NULL)`, + + `REGEXP_SUBSTR(NULL, '[', 1, 1, 'i')`, + `REGEXP_SUBSTR('dog cat dog', '[', NULL, 1, 'i')`, + `REGEXP_SUBSTR('dog cat dog', '[', 1, NULL, 'i')`, + `REGEXP_SUBSTR('dog cat dog', '[', 1, 1, NULL)`, + + `REGEXP_SUBSTR('dog cat dog', 'DOG', 0, 1, 'i')`, + `REGEXP_SUBSTR('dog cat dog', 'DOG', -1, 1, 'i')`, + `REGEXP_SUBSTR('dog cat dog', 'DOG', 100, 1, 'i')`, + `REGEXP_SUBSTR('dog cat dog', 'DOG', 1, 1, 0)`, + + `REGEXP_SUBSTR(' ', ' ', 1)`, + `REGEXP_SUBSTR(' ', ' ', 2)`, + `REGEXP_SUBSTR(' ', ' ', 3)`, + } + + for _, q := range mysqlDocSamples { + yield(q, nil) + } +} + +func RegexpReplace(yield Query) { + mysqlDocSamples := []string{ + `REGEXP_REPLACE('a b c', 'b', 'X')`, + `REGEXP_REPLACE('abc def ghi', '[a-z]+', 'X', 1, 0)`, + `REGEXP_REPLACE('abc def ghi', '[a-z]+', 'X', 1, 1)`, + `REGEXP_REPLACE('abc def ghi', '[a-z]+', 'X', 1, 2)`, + `REGEXP_REPLACE('abc def ghi', '[a-z]+', 'X', 1, 3)`, + `REGEXP_REPLACE('abc def ghi', '[a-z]+', 'X', 2, 0)`, + `REGEXP_REPLACE('abc def ghi', '[a-z]+', 'X', 2, 1)`, + `REGEXP_REPLACE('abc def ghi', '[a-z]+', 'X', 2, 2)`, + `REGEXP_REPLACE('abc def ghi', '[a-z]+', 'X', 2, 3)`, + `REGEXP_REPLACE('abc def ghi', '[a-z]+', 'X', 3, 0)`, + `REGEXP_REPLACE('abc def ghi', '[a-z]+', 'X', 3, 1)`, + `REGEXP_REPLACE('abc def ghi', '[a-z]+', 'X', 3, 2)`, + `REGEXP_REPLACE('abc def ghi', '[a-z]+', 'X', 3, 3)`, + `REGEXP_REPLACE('abc def ghi', '[a-z]+', 'X', 4, 0)`, + `REGEXP_REPLACE('abc def ghi', '[a-z]+', 'X', 4, 1)`, + `REGEXP_REPLACE('abc def ghi', '[a-z]+', 'X', 4, 2)`, + `REGEXP_REPLACE('abc def ghi', '[a-z]+', 'X', 4, 3)`, + `REGEXP_REPLACE('a', '\\p{Lowercase_letter}', 'X')`, + `REGEXP_REPLACE('a', '\\p{Uppercase_letter}', 'X')`, + `REGEXP_REPLACE('A', '\\p{Lowercase_letter}', 'X')`, + `REGEXP_REPLACE('A', '\\p{Uppercase_letter}', 'X')`, + `REGEXP_REPLACE(1234, 12, 6)`, + `REGEXP_REPLACE(1234, 12, 6, 1)`, + `REGEXP_REPLACE(1234, 12, 6, 100)`, + `REGEXP_REPLACE(1234, 12, 6, 1, 1)`, + `REGEXP_REPLACE(1234, 12, 6, 1, 1, 'c')`, + + `REGEXP_REPLACE(NULL, 'DOG', 'bar', 1, 1, 'i')`, + `REGEXP_REPLACE('dog cat dog', NULL, 'bar', 1, 1, 'i')`, + `REGEXP_REPLACE('dog cat dog', 'DOG', NULL, 1, 1, 'i')`, + `REGEXP_REPLACE('dog cat dog', 'DOG', 'bar', 1, NULL, 'i')`, + `REGEXP_REPLACE('dog cat dog', 'DOG', 'bar', 1, 1, NULL)`, + `REGEXP_REPLACE('dog cat dog', 'DOG', 'bar', '1', '1', 0)`, + + `REGEXP_REPLACE(NULL, _latin1'DOG', 'bar', 1, 1, 'i')`, + `REGEXP_REPLACE('dog cat dog', _latin1'DOG', NULL, 1, 1, 'i')`, + `REGEXP_REPLACE('dog cat dog', _latin1'DOG', 'bar', 1, NULL, 'i')`, + `REGEXP_REPLACE('dog cat dog', _latin1'DOG', 'bar', 1, 1, NULL)`, + `REGEXP_REPLACE('dog cat dog', _latin1'DOG', 'bar', '1', '1', 0)`, + + `REGEXP_REPLACE(NULL, '[', 'bar', 1, 1, 'i')`, + `REGEXP_REPLACE('dog cat dog', '[', NULL, 1, 1, 'i')`, + `REGEXP_REPLACE('dog cat dog', '[', 'bar', 1, NULL, 'i')`, + `REGEXP_REPLACE('dog cat dog', '[', 'bar', 1, 1, NULL)`, + + `REGEXP_REPLACE(NULL, _latin1'[', 'bar', 1, 1, 'i')`, + `REGEXP_REPLACE('dog cat dog', _latin1'[', NULL, 1, 1, 'i')`, + `REGEXP_REPLACE('dog cat dog', _latin1'[', 'bar', 1, NULL, 'i')`, + `REGEXP_REPLACE('dog cat dog', _latin1'[', 'bar', 1, 1, NULL)`, + + `REGEXP_REPLACE('dog cat dog', 'DOG', 'bar', 0, 1, 'i')`, + `REGEXP_REPLACE('dog cat dog', 'DOG', 'bar', -1, 1, 'i')`, + `REGEXP_REPLACE('', 'DOG', 'bar', -1, 1, 'i')`, + `REGEXP_REPLACE('dog cat dog', 'DOG', 'bar', 100, 1, 'i')`, + `REGEXP_REPLACE('', 'DOG', 'bar', 100, 1, 'i')`, + `REGEXP_REPLACE('dog cat dog', 'DOG', 'bar', 1, 1, 0)`, + + `REGEXP_REPLACE('dog cat dog', _latin1'DOG', 'bar', 0, 1, 'i')`, + `REGEXP_REPLACE('dog cat dog', _latin1'DOG', 'bar', -1, 1, 'i')`, + `REGEXP_REPLACE('', _latin1'DOG', 'bar', -1, 1, 'i')`, + `REGEXP_REPLACE('dog cat dog', _latin1'DOG', 'bar', 100, 1, 'i')`, + `REGEXP_REPLACE('', _latin1'DOG', 'bar', 100, 1, 'i')`, + `REGEXP_REPLACE('dog cat dog', _latin1'DOG', 'bar', 1, 1, 0)`, + + `REGEXP_REPLACE(' ', ' ', 'x', 1)`, + `REGEXP_REPLACE(' ', ' ', 'x', 2)`, + `REGEXP_REPLACE(' ', ' ', 'x', 3)`, + + `REGEXP_REPLACE(' ', _latin1' ', 'x', 1)`, + `REGEXP_REPLACE(' ', _latin1' ', 'x', 2)`, + `REGEXP_REPLACE(' ', _latin1' ', 'x', 3)`, + } + + for _, q := range mysqlDocSamples { + yield(q, nil) + } +} diff --git a/go/vt/vtgate/evalengine/testcases/helpers.go b/go/vt/vtgate/evalengine/testcases/helpers.go index b05a4c7dbb1..f7cf5b22dd8 100644 --- a/go/vt/vtgate/evalengine/testcases/helpers.go +++ b/go/vt/vtgate/evalengine/testcases/helpers.go @@ -78,13 +78,6 @@ func genSubsets(args []string, subsetLen int, yield func([]string)) { genSubsets1(args, subset, 0, 0, yield) } -func min(a, b int) int { - if a < b { - return a - } - return b -} - func mustJSON(j string) sqltypes.Value { v, err := sqltypes.NewJSON(j) if err != nil { diff --git a/go/vt/vtgate/evalengine/testcases/inputs.go b/go/vt/vtgate/evalengine/testcases/inputs.go index badf7bc9dc8..245318529c3 100644 --- a/go/vt/vtgate/evalengine/testcases/inputs.go +++ b/go/vt/vtgate/evalengine/testcases/inputs.go @@ -133,6 +133,35 @@ var inputConversions = []string{ "cast(time '12:34:56' as json)", "cast(time '12:34:58' as json)", "cast(time '5 12:34:58' as json)", } +var regexInputs = []string{ + "0", "1", "' 0 '", `'\t1foo\t'`, + `'foobar'`, `_utf8 'foobar'`, `''`, `_binary 'foobar'`, + `0x0`, `0x1`, `0xff`, + "NULL", "true", "false", + "0xFF666F6F626172FF", + "time '10:04:58'", "date '2000-01-01'", + "timestamp '2000-01-01 10:34:58'", + "cast(0 as json)", "cast(1 as json)", + "cast(true as json)", "cast(false as json)", + // JSON numbers + "cast(2 as json)", "cast(1.1 as json)", "cast(-1.1 as json)", + // JSON strings + "cast('\"foo\"' as json)", + // JSON binary values + "cast(_binary' \"foo\"' as json)", + "cast(0xFF666F6F626172FF as json)", + "cast(0b01 as json)", + // JSON arrays + "cast('[\"a\"]' as json)", + // JSON objects + "cast('{\"a\": 1, \"b\": 2}' as json)", +} + +var regexMatchStrings = []string{ + "NULL", + "'c'", "'i'", "'m'", "'n'", "'u'", "'cimnu'", "'cimnuunmic'", +} + const inputPi = "314159265358979323846264338327950288419716939937510582097494459" var inputStrings = []string{ @@ -285,3 +314,25 @@ var uuidInputs = []string{ "0x09DB81F6F26611EDA6F920FC8FD6830E", "0x11EDF26609DB81F6A6F920FC8FD6830E", } + +var inputIntervals = []string{"day", + "week", + "month", + "year", + "day_hour", + "day_microsecond", + "day_minute", + "day_second", + "hour", + "hour_microsecond", + "hour_minute", + "hour_second", + "microsecond", + "minute", + "minute_microsecond", + "minute_second", + "quarter", + "second", + "second_microsecond", + "year_month", +} diff --git a/go/vt/vtgate/evalengine/translate.go b/go/vt/vtgate/evalengine/translate.go index 6b14d8d350c..3af97a183e3 100644 --- a/go/vt/vtgate/evalengine/translate.go +++ b/go/vt/vtgate/evalengine/translate.go @@ -75,6 +75,14 @@ func (ast *astCompiler) translateComparisonExpr2(op sqlparser.ComparisonExprOper return &LikeExpr{BinaryExpr: binaryExpr}, nil case sqlparser.NotLikeOp: return &LikeExpr{BinaryExpr: binaryExpr, Negate: true}, nil + case sqlparser.RegexpOp, sqlparser.NotRegexpOp: + return &builtinRegexpLike{ + CallExpr: CallExpr{ + Arguments: []Expr{left, right}, + Method: "REGEXP_LIKE", + }, + Negate: op == sqlparser.NotRegexpOp, + }, nil default: return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, op.ToString()) } @@ -175,14 +183,6 @@ func (ast *astCompiler) translateIsExpr(left sqlparser.Expr, op sqlparser.IsExpr }, nil } -func defaultCoercionCollation(id collations.ID) collations.TypedCollation { - return collations.TypedCollation{ - Collation: id, - Coercibility: collations.CoerceCoercible, - Repertoire: collations.RepertoireUnicode, - } -} - func (ast *astCompiler) translateBindVar(arg *sqlparser.Argument) (Expr, error) { bvar := NewBindVar(arg.Name, arg.Type, ast.cfg.Collation) @@ -211,7 +211,7 @@ func (ast *astCompiler) translateColOffset(col *sqlparser.Offset) (Expr, error) func (ast *astCompiler) translateColName(colname *sqlparser.ColName) (Expr, error) { if ast.cfg.ResolveColumn == nil { - return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "cannot lookup column (column access not supported here)") + return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "cannot lookup column '%s' (column access not supported here)", sqlparser.String(colname)) } idx, err := ast.cfg.ResolveColumn(colname) if err != nil { @@ -325,13 +325,13 @@ func (ast *astCompiler) translateCollateExpr(collate *sqlparser.CollateExpr) (Ex return nil, err } coll := collations.Local().LookupByName(collate.Collation) - if coll == nil { + if coll == collations.Unknown { return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Unknown collation: '%s'", collate.Collation) } return &CollateExpr{ UnaryExpr: UnaryExpr{expr}, TypedCollation: collations.TypedCollation{ - Collation: coll.ID(), + Collation: coll, Coercibility: collations.CoerceExplicit, Repertoire: collations.RepertoireUnicode, }, @@ -349,10 +349,10 @@ func (ast *astCompiler) translateIntroducerExpr(introduced *sqlparser.Introducer collation = collations.CollationBinaryID } else { defaultCollation := collations.Local().DefaultCollationForCharset(introduced.CharacterSet[1:]) - if defaultCollation == nil { + if defaultCollation == collations.Unknown { panic(fmt.Sprintf("unknown character set: %s", introduced.CharacterSet)) } - collation = defaultCollation.ID() + collation = defaultCollation } switch lit := expr.(type) { diff --git a/go/vt/vtgate/evalengine/translate_builtin.go b/go/vt/vtgate/evalengine/translate_builtin.go index e5045a1900f..4a4c3f1d9d2 100644 --- a/go/vt/vtgate/evalengine/translate_builtin.go +++ b/go/vt/vtgate/evalengine/translate_builtin.go @@ -594,7 +594,7 @@ func (ast *astCompiler) translateCallable(call sqlparser.Callable) (Expr, error) var ws builtinWeightString var err error - ws.String, err = ast.translateExpr(call.Expr) + ws.Expr, err = ast.translateExpr(call.Expr) if err != nil { return nil, err } @@ -744,6 +744,188 @@ func (ast *astCompiler) translateCallable(call sqlparser.Callable) (Expr, error) trim: call.Type, }, nil + case *sqlparser.IntervalDateExpr: + var err error + args := make([]Expr, 2) + + args[0], err = ast.translateExpr(call.Date) + if err != nil { + return nil, err + } + args[1], err = ast.translateExpr(call.Interval) + if err != nil { + return nil, err + } + + cexpr := CallExpr{Arguments: args, Method: call.FnName()} + return &builtinDateMath{ + CallExpr: cexpr, + sub: call.IsSubtraction(), + unit: call.NormalizedUnit(), + collate: ast.cfg.Collation, + }, nil + + case *sqlparser.RegexpLikeExpr: + input, err := ast.translateExpr(call.Expr) + if err != nil { + return nil, err + } + + pattern, err := ast.translateExpr(call.Pattern) + if err != nil { + return nil, err + } + + args := []Expr{input, pattern} + + if call.MatchType != nil { + matchType, err := ast.translateExpr(call.MatchType) + if err != nil { + return nil, err + } + args = append(args, matchType) + } + + return &builtinRegexpLike{ + CallExpr: CallExpr{Arguments: args, Method: "REGEXP_LIKE"}, + Negate: false, + }, nil + + case *sqlparser.RegexpInstrExpr: + input, err := ast.translateExpr(call.Expr) + if err != nil { + return nil, err + } + + pattern, err := ast.translateExpr(call.Pattern) + if err != nil { + return nil, err + } + + args := []Expr{input, pattern} + + if call.Position != nil { + position, err := ast.translateExpr(call.Position) + if err != nil { + return nil, err + } + args = append(args, position) + } + + if call.Occurrence != nil { + occurrence, err := ast.translateExpr(call.Occurrence) + if err != nil { + return nil, err + } + args = append(args, occurrence) + } + + if call.ReturnOption != nil { + returnOption, err := ast.translateExpr(call.ReturnOption) + if err != nil { + return nil, err + } + args = append(args, returnOption) + } + + if call.MatchType != nil { + matchType, err := ast.translateExpr(call.MatchType) + if err != nil { + return nil, err + } + args = append(args, matchType) + } + + return &builtinRegexpInstr{ + CallExpr: CallExpr{Arguments: args, Method: "REGEXP_INSTR"}, + }, nil + + case *sqlparser.RegexpSubstrExpr: + input, err := ast.translateExpr(call.Expr) + if err != nil { + return nil, err + } + + pattern, err := ast.translateExpr(call.Pattern) + if err != nil { + return nil, err + } + + args := []Expr{input, pattern} + + if call.Position != nil { + position, err := ast.translateExpr(call.Position) + if err != nil { + return nil, err + } + args = append(args, position) + } + + if call.Occurrence != nil { + occurrence, err := ast.translateExpr(call.Occurrence) + if err != nil { + return nil, err + } + args = append(args, occurrence) + } + + if call.MatchType != nil { + matchType, err := ast.translateExpr(call.MatchType) + if err != nil { + return nil, err + } + args = append(args, matchType) + } + + return &builtinRegexpSubstr{ + CallExpr: CallExpr{Arguments: args, Method: "REGEXP_SUBSTR"}, + }, nil + + case *sqlparser.RegexpReplaceExpr: + input, err := ast.translateExpr(call.Expr) + if err != nil { + return nil, err + } + + pattern, err := ast.translateExpr(call.Pattern) + if err != nil { + return nil, err + } + + repl, err := ast.translateExpr(call.Repl) + if err != nil { + return nil, err + } + + args := []Expr{input, pattern, repl} + + if call.Position != nil { + position, err := ast.translateExpr(call.Position) + if err != nil { + return nil, err + } + args = append(args, position) + } + + if call.Occurrence != nil { + occurrence, err := ast.translateExpr(call.Occurrence) + if err != nil { + return nil, err + } + args = append(args, occurrence) + } + + if call.MatchType != nil { + matchType, err := ast.translateExpr(call.MatchType) + if err != nil { + return nil, err + } + args = append(args, matchType) + } + + return &builtinRegexpReplace{ + CallExpr: CallExpr{Arguments: args, Method: "REGEXP_REPLACE"}, + }, nil default: return nil, translateExprNotSupported(call) } diff --git a/go/vt/vtgate/evalengine/translate_convert.go b/go/vt/vtgate/evalengine/translate_convert.go index 12b1fdfc9ab..5560315f8e2 100644 --- a/go/vt/vtgate/evalengine/translate_convert.go +++ b/go/vt/vtgate/evalengine/translate_convert.go @@ -20,6 +20,7 @@ import ( "strings" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/collations/colldata" "vitess.io/vitess/go/mysql/decimal" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" @@ -27,15 +28,11 @@ import ( ) func (ast *astCompiler) binaryCollationForCollation(collation collations.ID) collations.ID { - binary := collation.Get() + binary := colldata.Lookup(collation) if binary == nil { return collations.Unknown } - binaryCollation := collations.Local().BinaryCollationForCharset(binary.Charset().Name()) - if binaryCollation == nil { - return collations.Unknown - } - return binaryCollation.ID() + return collations.Local().BinaryCollationForCharset(binary.Charset().Name()) } func (ast *astCompiler) translateConvertCharset(charset string, binary bool) (collations.ID, error) { @@ -50,11 +47,10 @@ func (ast *astCompiler) translateConvertCharset(charset string, binary bool) (co return collation, nil } charset = strings.ToLower(charset) - collation := collations.Local().DefaultCollationForCharset(charset) - if collation == nil { + collationID := collations.Local().DefaultCollationForCharset(charset) + if collationID == collations.Unknown { return collations.Unknown, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Unknown character set: '%s'", charset) } - collationID := collation.ID() if binary { collationID = ast.binaryCollationForCollation(collationID) if collationID == collations.Unknown { diff --git a/go/vt/vtgate/evalengine/translate_simplify.go b/go/vt/vtgate/evalengine/translate_simplify.go index 3e957a943fc..7f2261b4790 100644 --- a/go/vt/vtgate/evalengine/translate_simplify.go +++ b/go/vt/vtgate/evalengine/translate_simplify.go @@ -16,6 +16,8 @@ limitations under the License. package evalengine +import "vitess.io/vitess/go/mysql/collations/colldata" + func (expr *Literal) constant() bool { return true } @@ -78,7 +80,7 @@ func (expr *LikeExpr) simplify(env *ExpressionEnv) error { if lit, ok := expr.Right.(*Literal); ok { if b, ok := lit.inner.(*evalBytes); ok && (b.isVarChar() || b.isBinary()) { expr.MatchCollation = b.col.Collation - coll := expr.MatchCollation.Get() + coll := colldata.Lookup(expr.MatchCollation) expr.Match = coll.Wildcard(b.bytes, 0, 0, 0) } } @@ -123,12 +125,12 @@ func (c *CallExpr) simplify(env *ExpressionEnv) error { } func (c *builtinWeightString) constant() bool { - return c.String.constant() + return c.Expr.constant() } func (c *builtinWeightString) simplify(env *ExpressionEnv) error { var err error - c.String, err = simplifyExpr(env, c.String) + c.Expr, err = simplifyExpr(env, c.Expr) return err } diff --git a/go/vt/vtgate/evalengine/weights.go b/go/vt/vtgate/evalengine/weights.go new file mode 100644 index 00000000000..08ec844f357 --- /dev/null +++ b/go/vt/vtgate/evalengine/weights.go @@ -0,0 +1,178 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package evalengine + +import ( + "encoding/binary" + "math" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/collations/charset" + "vitess.io/vitess/go/mysql/collations/colldata" + "vitess.io/vitess/go/mysql/decimal" + "vitess.io/vitess/go/mysql/json" + "vitess.io/vitess/go/sqltypes" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +// WeightString returns the weight string for a value. +// It appends to dst if an existing slice is given, otherwise it +// returns a new one. +// The returned boolean indicates whether the weight string is a +// fixed-width weight string, such as for fixed size integer values. +// Our WeightString implementation supports more types that MySQL +// externally communicates with the `WEIGHT_STRING` function, so that we +// can also use this to order / sort other types like Float and Decimal +// as well. +func WeightString(dst []byte, v sqltypes.Value, coerceTo sqltypes.Type, col collations.ID, length, precision int) ([]byte, bool, error) { + // We optimize here for the case where we already have the desired type. + // Otherwise, we fall back to the general evalengine conversion logic. + if v.Type() != coerceTo { + return fallbackWeightString(dst, v, coerceTo, col, length, precision) + } + + switch { + case sqltypes.IsNull(coerceTo): + return nil, true, nil + + case sqltypes.IsSigned(coerceTo): + i, err := v.ToInt64() + if err != nil { + return dst, false, err + } + raw := uint64(i) + raw = raw ^ (1 << 63) + return binary.BigEndian.AppendUint64(dst, raw), true, nil + + case sqltypes.IsUnsigned(coerceTo): + u, err := v.ToUint64() + if err != nil { + return dst, false, err + } + return binary.BigEndian.AppendUint64(dst, u), true, nil + + case sqltypes.IsFloat(coerceTo): + f, err := v.ToFloat64() + if err != nil { + return dst, false, err + } + + raw := math.Float64bits(f) + if math.Signbit(f) { + raw = ^raw + } else { + raw = raw ^ (1 << 63) + } + return binary.BigEndian.AppendUint64(dst, raw), true, nil + + case sqltypes.IsBinary(coerceTo): + b := v.Raw() + if length != 0 { + if length > cap(b) { + b = append(b, make([]byte, length-len(b))...) + } else { + b = b[:length] + } + } + return append(dst, b...), false, nil + + case sqltypes.IsText(coerceTo): + coll := colldata.Lookup(col) + if coll == nil { + return dst, false, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "cannot hash unsupported collation") + } + b := v.Raw() + if length != 0 { + b = charset.Slice(coll.Charset(), b, 0, length) + } + return coll.WeightString(dst, b, length), false, nil + + case sqltypes.IsDecimal(coerceTo): + dec, err := decimal.NewFromMySQL(v.Raw()) + if err != nil { + return dst, false, err + } + return dec.WeightString(dst, int32(length), int32(precision)), true, nil + case coerceTo == sqltypes.TypeJSON: + j, err := json.NewFromSQL(v) + if err != nil { + return dst, false, err + } + return j.WeightString(dst), false, nil + default: + return fallbackWeightString(dst, v, coerceTo, col, length, precision) + } +} + +func fallbackWeightString(dst []byte, v sqltypes.Value, coerceTo sqltypes.Type, col collations.ID, length, precision int) ([]byte, bool, error) { + e, err := valueToEvalCast(v, coerceTo, col) + if err != nil { + return dst, false, err + } + return evalWeightString(dst, e, length, precision) +} + +func evalWeightString(dst []byte, e eval, length, precision int) ([]byte, bool, error) { + switch e := e.(type) { + case nil: + return nil, true, nil + case *evalInt64: + raw := uint64(e.i) + raw = raw ^ (1 << 63) + return binary.BigEndian.AppendUint64(dst, raw), true, nil + case *evalUint64: + return binary.BigEndian.AppendUint64(dst, e.u), true, nil + case *evalFloat: + raw := math.Float64bits(e.f) + if math.Signbit(e.f) { + raw = ^raw + } else { + raw = raw ^ (1 << 63) + } + return binary.BigEndian.AppendUint64(dst, raw), true, nil + case *evalDecimal: + return e.dec.WeightString(dst, int32(length), int32(precision)), true, nil + case *evalBytes: + if e.isBinary() { + b := e.bytes + if length != 0 { + if length > cap(b) { + b = append(b, make([]byte, length-len(b))...) + } else { + b = b[:length] + } + } + return append(dst, b...), false, nil + } + coll := colldata.Lookup(e.col.Collation) + if coll == nil { + return dst, false, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "cannot hash unsupported collation") + } + b := e.bytes + if length != 0 { + b = charset.Slice(coll.Charset(), b, 0, length) + } + return coll.WeightString(dst, b, length), false, nil + case *evalTemporal: + return e.dt.WeightString(dst), true, nil + case *evalJSON: + return e.WeightString(dst), false, nil + } + + return dst, false, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected type %v", e.SQLType()) +} diff --git a/go/vt/vtgate/evalengine/weights_test.go b/go/vt/vtgate/evalengine/weights_test.go new file mode 100644 index 00000000000..50a1d91f20c --- /dev/null +++ b/go/vt/vtgate/evalengine/weights_test.go @@ -0,0 +1,103 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package evalengine + +import ( + "fmt" + "slices" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" +) + +func TestWeightStrings(t *testing.T) { + const Length = 1000 + + type item struct { + value sqltypes.Value + weight string + } + + var cases = []struct { + name string + gen func() sqltypes.Value + types []sqltypes.Type + col collations.ID + len int + prec int + }{ + {name: "int64", gen: sqltypes.RandomGenerators[sqltypes.Int64], types: []sqltypes.Type{sqltypes.Int64, sqltypes.VarChar, sqltypes.TypeJSON}, col: collations.CollationBinaryID}, + {name: "uint64", gen: sqltypes.RandomGenerators[sqltypes.Uint64], types: []sqltypes.Type{sqltypes.Uint64, sqltypes.VarChar, sqltypes.TypeJSON}, col: collations.CollationBinaryID}, + {name: "float64", gen: sqltypes.RandomGenerators[sqltypes.Float64], types: []sqltypes.Type{sqltypes.Float64, sqltypes.VarChar, sqltypes.TypeJSON}, col: collations.CollationBinaryID}, + {name: "varchar", gen: sqltypes.RandomGenerators[sqltypes.VarChar], types: []sqltypes.Type{sqltypes.VarChar, sqltypes.VarChar, sqltypes.TypeJSON}, col: collations.CollationUtf8mb4ID}, + {name: "varbinary", gen: sqltypes.RandomGenerators[sqltypes.VarBinary], types: []sqltypes.Type{sqltypes.VarBinary, sqltypes.VarChar, sqltypes.TypeJSON}, col: collations.CollationBinaryID}, + {name: "decimal", gen: sqltypes.RandomGenerators[sqltypes.Decimal], types: []sqltypes.Type{sqltypes.Decimal, sqltypes.VarChar, sqltypes.TypeJSON}, col: collations.CollationBinaryID, len: 20, prec: 10}, + {name: "json", gen: sqltypes.RandomGenerators[sqltypes.TypeJSON], types: []sqltypes.Type{sqltypes.TypeJSON}, col: collations.CollationBinaryID}, + {name: "date", gen: sqltypes.RandomGenerators[sqltypes.Date], types: []sqltypes.Type{sqltypes.Date, sqltypes.VarChar, sqltypes.TypeJSON}, col: collations.CollationBinaryID}, + {name: "datetime", gen: sqltypes.RandomGenerators[sqltypes.Datetime], types: []sqltypes.Type{sqltypes.Datetime, sqltypes.VarChar, sqltypes.TypeJSON}, col: collations.CollationBinaryID}, + {name: "timestamp", gen: sqltypes.RandomGenerators[sqltypes.Timestamp], types: []sqltypes.Type{sqltypes.Timestamp, sqltypes.VarChar, sqltypes.TypeJSON}, col: collations.CollationBinaryID}, + {name: "time", gen: sqltypes.RandomGenerators[sqltypes.Time], types: []sqltypes.Type{sqltypes.Time, sqltypes.VarChar, sqltypes.TypeJSON}, col: collations.CollationBinaryID}, + } + + for _, tc := range cases { + for _, typ := range tc.types { + t.Run(fmt.Sprintf("%s/%v", tc.name, typ), func(t *testing.T) { + items := make([]item, 0, Length) + for i := 0; i < Length; i++ { + v := tc.gen() + w, _, err := WeightString(nil, v, typ, tc.col, tc.len, tc.prec) + require.NoError(t, err) + + items = append(items, item{value: v, weight: string(w)}) + } + + slices.SortFunc(items, func(a, b item) int { + if a.weight < b.weight { + return -1 + } else if a.weight > b.weight { + return 1 + } else { + return 0 + } + }) + + for i := 0; i < Length-1; i++ { + a := items[i] + b := items[i+1] + + v1, err := valueToEvalCast(a.value, typ, tc.col) + require.NoError(t, err) + v2, err := valueToEvalCast(b.value, typ, tc.col) + require.NoError(t, err) + + cmp, err := evalCompareNullSafe(v1, v2) + require.NoError(t, err) + + if cmp > 0 { + t.Fatalf("expected %v [pos=%d] to come after %v [pos=%d]\nav = %v\nbv = %v", + a.value, i, b.value, i+1, + []byte(a.weight), []byte(b.weight), + ) + } + } + }) + } + } +} diff --git a/go/vt/vtgate/executor.go b/go/vt/vtgate/executor.go index 577e1738731..1edd10626f4 100644 --- a/go/vt/vtgate/executor.go +++ b/go/vt/vtgate/executor.go @@ -17,11 +17,8 @@ limitations under the License. package vtgate import ( - "bufio" "bytes" "context" - "crypto/sha256" - "encoding/hex" "encoding/json" "fmt" "io" @@ -33,10 +30,12 @@ import ( "github.com/spf13/pflag" + "vitess.io/vitess/go/cache/theine" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/streamlog" + "vitess.io/vitess/go/vt/vthash" "vitess.io/vitess/go/acl" - "vitess.io/vitess/go/cache" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/stats" @@ -44,6 +43,11 @@ import ( "vitess.io/vitess/go/vt/callerid" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/log" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/srvtopo" @@ -57,12 +61,7 @@ import ( "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vtgate/vschemaacl" - - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - querypb "vitess.io/vitess/go/vt/proto/query" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vtgate/vtgateservice" ) var ( @@ -105,9 +104,11 @@ type Executor struct { mu sync.Mutex vschema *vindexes.VSchema streamSize int - plans cache.Cache vschemaStats *VSchemaStats + plans *PlanCache + epoch atomic.Uint32 + normalize bool warnShardedOnly bool @@ -117,11 +118,11 @@ type Executor struct { // allowScatter will fail planning if set to false and a plan contains any scatter queries allowScatter bool - // truncateErrorLen truncates errors sent to client if they are above this value - // (0 means do not truncate). - truncateErrorLen int //auth authServer mysql.AuthServer + + // queryLogger is passed in for logging from this vtgate executor. + queryLogger *streamlog.StreamLogger[*logstats.LogStats] } var executorOnce sync.Once @@ -130,6 +131,15 @@ const pathQueryPlans = "/debug/query_plans" const pathScatterStats = "/debug/scatter_stats" const pathVSchema = "/debug/vschema" +type PlanCacheKey = theine.HashKey256 +type PlanCache = theine.Store[PlanCacheKey, *engine.Plan] + +func DefaultPlanCache() *PlanCache { + // when being endtoend tested, disable the doorkeeper to ensure reproducible results + doorkeeper := !servenv.TestingEndtoend + return theine.NewStore[PlanCacheKey, *engine.Plan](queryPlanCacheMemory, doorkeeper) +} + // NewExecutor creates a new Executor. func NewExecutor( ctx context.Context, @@ -138,7 +148,7 @@ func NewExecutor( resolver *Resolver, normalize, warnOnShardedOnly bool, streamSize int, - cacheCfg *cache.Config, + plans *PlanCache, schemaTracker SchemaInfo, noScatter bool, pv plancontext.PlannerVersion, @@ -149,13 +159,13 @@ func NewExecutor( resolver: resolver, scatterConn: resolver.scatterConn, txConn: resolver.scatterConn.txConn, - plans: cache.NewDefaultCacheImpl(cacheCfg), normalize: normalize, warnShardedOnly: warnOnShardedOnly, streamSize: streamSize, schemaTracker: schemaTracker, allowScatter: !noScatter, pv: pv, + plans: plans, } vschemaacl.Init() @@ -173,19 +183,19 @@ func NewExecutor( return int64(e.plans.Len()) }) stats.NewGaugeFunc("QueryPlanCacheSize", "Query plan cache size", func() int64 { - return e.plans.UsedCapacity() + return int64(e.plans.UsedCapacity()) }) stats.NewGaugeFunc("QueryPlanCacheCapacity", "Query plan cache capacity", func() int64 { - return e.plans.MaxCapacity() + return int64(e.plans.MaxCapacity()) }) stats.NewCounterFunc("QueryPlanCacheEvictions", "Query plan cache evictions", func() int64 { - return e.plans.Evictions() + return e.plans.Metrics.Evicted() }) stats.NewCounterFunc("QueryPlanCacheHits", "Query plan cache hits", func() int64 { - return e.plans.Hits() + return e.plans.Metrics.Hits() }) stats.NewCounterFunc("QueryPlanCacheMisses", "Query plan cache misses", func() int64 { - return e.plans.Misses() + return e.plans.Metrics.Hits() }) servenv.HTTPHandle(pathQueryPlans, e) servenv.HTTPHandle(pathScatterStats, e) @@ -195,7 +205,7 @@ func NewExecutor( } // Execute executes a non-streaming query. -func (e *Executor) Execute(ctx context.Context, c *mysql.Conn, method string, safeSession *SafeSession, sql string, bindVars map[string]*querypb.BindVariable) (result *sqltypes.Result, err error) { +func (e *Executor) Execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, c *mysql.Conn, method string, safeSession *SafeSession, sql string, bindVars map[string]*querypb.BindVariable) (result *sqltypes.Result, err error) { span, ctx := trace.NewSpan(ctx, "executor.Execute") span.Annotate("method", method) trace.AnnotateSQL(span, sqlparser.Preview(sql)) @@ -207,7 +217,7 @@ func (e *Executor) Execute(ctx context.Context, c *mysql.Conn, method string, sa case sqlparser.StmtLoadData: return e.executeLoad(ctx, c, safeSession, sql, bindVars, logStats) } - stmtType, result, err = e.execute(ctx, safeSession, sql, bindVars, logStats, c) + stmtType, result, err = e.execute(ctx, mysqlCtx, safeSession, sql, bindVars, logStats, c) logStats.Error = err if result == nil { saveSessionStats(safeSession, stmtType, 0, 0, 0, err) @@ -224,7 +234,7 @@ func (e *Executor) Execute(ctx context.Context, c *mysql.Conn, method string, sa } logStats.SaveEndTime() - QueryLogger.Send(logStats) + e.queryLogger.Send(logStats) err = vterrors.TruncateError(err, truncateErrorLen) return result, err } @@ -278,6 +288,7 @@ func (s *streaminResultReceiver) storeResultStats(typ sqlparser.StatementType, q // StreamExecute executes a streaming query. func (e *Executor) StreamExecute( ctx context.Context, + mysqlCtx vtgateservice.MySQLConnection, c *mysql.Conn, method string, safeSession *SafeSession, @@ -369,7 +380,7 @@ func (e *Executor) StreamExecute( return err } - err = e.newExecute(ctx, safeSession, sql, bindVars, logStats, resultHandler, srr.storeResultStats) + err = e.newExecute(ctx, mysqlCtx, safeSession, sql, bindVars, logStats, resultHandler, srr.storeResultStats) logStats.Error = err saveSessionStats(safeSession, srr.stmtType, srr.rowsAffected, srr.insertID, srr.rowsReturned, err) @@ -383,7 +394,7 @@ func (e *Executor) StreamExecute( } logStats.SaveEndTime() - QueryLogger.Send(logStats) + e.queryLogger.Send(logStats) return vterrors.TruncateError(err, truncateErrorLen) } @@ -416,11 +427,11 @@ func saveSessionStats(safeSession *SafeSession, stmtType sqlparser.StatementType } } -func (e *Executor) execute(ctx context.Context, safeSession *SafeSession, sql string, bindVars map[string]*querypb.BindVariable, logStats *logstats.LogStats, c *mysql.Conn) (sqlparser.StatementType, *sqltypes.Result, error) { +func (e *Executor) execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, safeSession *SafeSession, sql string, bindVars map[string]*querypb.BindVariable, logStats *logstats.LogStats, c *mysql.Conn) (sqlparser.StatementType, *sqltypes.Result, error) { var err error var qr *sqltypes.Result var stmtType sqlparser.StatementType - err = e.newExecute(ctx, safeSession, sql, bindVars, logStats, func(ctx context.Context, plan *engine.Plan, vc *vcursorImpl, bindVars map[string]*querypb.BindVariable, time time.Time) error { + err = e.newExecute(ctx, mysqlCtx, safeSession, sql, bindVars, logStats, func(ctx context.Context, plan *engine.Plan, vc *vcursorImpl, bindVars map[string]*querypb.BindVariable, time time.Time) error { stmtType = plan.Type qr, err = e.executePlan(ctx, c, safeSession, plan, vc, bindVars, logStats, time) return err @@ -485,6 +496,8 @@ func (e *Executor) addNeededBindVars(vcursor *vcursorImpl, bindVarNeeds *sqlpars bindVars[key] = sqltypes.StringBindVariable(v) case sysvars.DDLStrategy.Name: bindVars[key] = sqltypes.StringBindVariable(session.DDLStrategy) + case sysvars.MigrationContext.Name: + bindVars[key] = sqltypes.StringBindVariable(session.MigrationContext) case sysvars.SessionUUID.Name: bindVars[key] = sqltypes.StringBindVariable(session.SessionUUID) case sysvars.SessionEnableSystemSettings.Name: @@ -664,6 +677,36 @@ func (e *Executor) executeSPInAllSessions(ctx context.Context, safeSession *Safe return qr, nil } +// handleKill executed the kill statement. +func (e *Executor) handleKill(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, stmt sqlparser.Statement, logStats *logstats.LogStats) (result *sqltypes.Result, err error) { + execStart := time.Now() + logStats.PlanTime = execStart.Sub(logStats.StartTime) + e.updateQueryCounts("Kill", "", "", 0) + defer func() { + logStats.ExecuteTime = time.Since(execStart) + }() + + if !allowKillStmt { + return nil, vterrors.VT07001("kill statement execution not permitted.") + } + + if mysqlCtx == nil { + return nil, vterrors.VT12001("kill statement works with access through mysql protocol") + } + + killStmt := stmt.(*sqlparser.Kill) + switch killStmt.Type { + case sqlparser.QueryType: + err = mysqlCtx.KillQuery(uint32(killStmt.ProcesslistID)) + default: + err = mysqlCtx.KillConnection(ctx, uint32(killStmt.ProcesslistID)) + } + if err != nil { + return nil, err + } + return &sqltypes.Result{}, nil +} + // CloseSession releases the current connection, which rollbacks open transactions and closes reserved connections. // It is called then the MySQL servers closes the connection to its client. func (e *Executor) CloseSession(ctx context.Context, safeSession *SafeSession) error { @@ -978,12 +1021,19 @@ func (e *Executor) SaveVSchema(vschema *vindexes.VSchema, stats *VSchemaStats) { e.vschema = vschema } e.vschemaStats = stats - e.plans.Clear() + e.ClearPlans() if vschemaCounters != nil { vschemaCounters.Add("Reload", 1) } + if vindexUnknownParams != nil { + var unknownParams int + for _, ks := range stats.Keyspaces { + unknownParams += ks.VindexUnknownParamsCount + } + vindexUnknownParams.Set(int64(unknownParams)) + } } // ParseDestinationTarget parses destination target string and sets default keyspace if possible. @@ -1064,37 +1114,23 @@ func (e *Executor) getPlan( return e.cacheAndBuildStatement(ctx, vcursor, query, stmt, reservedVars, bindVarNeeds, logStats) } -func (e *Executor) hashPlan(ctx context.Context, vcursor *vcursorImpl, query string) string { - planHash := sha256.New() - - { - // use a bufio.Writer to accumulate writes instead of writing directly to the hasher - buf := bufio.NewWriter(planHash) - vcursor.keyForPlan(ctx, query, buf) - buf.Flush() - } +func (e *Executor) hashPlan(ctx context.Context, vcursor *vcursorImpl, query string) PlanCacheKey { + hasher := vthash.New256() + vcursor.keyForPlan(ctx, query, hasher) - return hex.EncodeToString(planHash.Sum(nil)) + var planKey PlanCacheKey + hasher.Sum(planKey[:0]) + return planKey } -func (e *Executor) cacheAndBuildStatement( +func (e *Executor) buildStatement( ctx context.Context, vcursor *vcursorImpl, query string, stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, bindVarNeeds *sqlparser.BindVarNeeds, - logStats *logstats.LogStats, ) (*engine.Plan, error) { - planKey := e.hashPlan(ctx, vcursor, query) - planCachable := sqlparser.CachePlan(stmt) && vcursor.safeSession.cachePlan() - if planCachable { - if plan, ok := e.plans.Get(planKey); ok { - logStats.CachedPlan = true - return plan.(*engine.Plan), nil - } - } - plan, err := planbuilder.BuildFromStmt(ctx, query, stmt, reservedVars, vcursor, bindVarNeeds, enableOnlineDDL, enableDirectDDL) if err != nil { return nil, err @@ -1104,13 +1140,32 @@ func (e *Executor) cacheAndBuildStatement( vcursor.warnings = nil err = e.checkThatPlanIsValid(stmt, plan) - // Only cache the plan if it is valid (i.e. does not scatter) - if err == nil && planCachable { - e.plans.Set(planKey, plan) - } return plan, err } +func (e *Executor) cacheAndBuildStatement( + ctx context.Context, + vcursor *vcursorImpl, + query string, + stmt sqlparser.Statement, + reservedVars *sqlparser.ReservedVars, + bindVarNeeds *sqlparser.BindVarNeeds, + logStats *logstats.LogStats, +) (*engine.Plan, error) { + planCachable := sqlparser.CachePlan(stmt) && vcursor.safeSession.cachePlan() + if planCachable { + planKey := e.hashPlan(ctx, vcursor, query) + + var plan *engine.Plan + var err error + plan, logStats.CachedPlan, err = e.plans.GetOrLoad(planKey, e.epoch.Load(), func() (*engine.Plan, error) { + return e.buildStatement(ctx, vcursor, query, stmt, reservedVars, bindVarNeeds) + }) + return plan, err + } + return e.buildStatement(ctx, vcursor, query, stmt, reservedVars, bindVarNeeds) +} + func (e *Executor) canNormalizeStatement(stmt sqlparser.Statement, setVarComment string) bool { return sqlparser.CanNormalize(stmt) || setVarComment != "" } @@ -1143,18 +1198,10 @@ func prepareSetVarComment(vcursor *vcursorImpl, stmt sqlparser.Statement) (strin return strings.TrimSpace(res.String()), nil } -type cacheItem struct { - Key string - Value *engine.Plan -} - -func (e *Executor) debugCacheEntries() (items []cacheItem) { - e.plans.ForEach(func(value any) bool { - plan := value.(*engine.Plan) - items = append(items, cacheItem{ - Key: plan.Original, - Value: plan, - }) +func (e *Executor) debugCacheEntries() (items map[string]*engine.Plan) { + items = make(map[string]*engine.Plan) + e.ForEachPlan(func(plan *engine.Plan) bool { + items[plan.Original] = plan return true }) return @@ -1192,10 +1239,20 @@ func returnAsJSON(response http.ResponseWriter, stuff any) { } // Plans returns the LRU plan cache -func (e *Executor) Plans() cache.Cache { +func (e *Executor) Plans() *PlanCache { return e.plans } +func (e *Executor) ForEachPlan(each func(plan *engine.Plan) bool) { + e.plans.Range(e.epoch.Load(), func(_ PlanCacheKey, value *engine.Plan) bool { + return each(value) + }) +} + +func (e *Executor) ClearPlans() { + e.epoch.Add(1) +} + func (e *Executor) updateQueryCounts(planType, keyspace, tableName string, shardQueries int64) { queriesProcessed.Add(planType, 1) queriesRouted.Add(planType, shardQueries) @@ -1264,7 +1321,7 @@ func (e *Executor) Prepare(ctx context.Context, method string, safeSession *Safe // it was a no-op record (i.e. didn't issue any queries) if !(logStats.StmtType == "ROLLBACK" && logStats.ShardQueries == 0) { logStats.SaveEndTime() - QueryLogger.Send(logStats) + e.queryLogger.Send(logStats) } return fld, vterrors.TruncateError(err, truncateErrorLen) } @@ -1309,14 +1366,13 @@ func (e *Executor) prepare(ctx context.Context, safeSession *SafeSession, sql st } return nil, err case sqlparser.StmtDDL, sqlparser.StmtBegin, sqlparser.StmtCommit, sqlparser.StmtRollback, sqlparser.StmtSet, sqlparser.StmtInsert, sqlparser.StmtReplace, sqlparser.StmtUpdate, sqlparser.StmtDelete, - sqlparser.StmtUse, sqlparser.StmtOther, sqlparser.StmtComment, sqlparser.StmtExplain, sqlparser.StmtFlush: + sqlparser.StmtUse, sqlparser.StmtOther, sqlparser.StmtAnalyze, sqlparser.StmtComment, sqlparser.StmtExplain, sqlparser.StmtFlush, sqlparser.StmtKill: return nil, nil } return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unrecognized prepare statement: %s", sql) } func (e *Executor) handlePrepare(ctx context.Context, safeSession *SafeSession, sql string, bindVars map[string]*querypb.BindVariable, logStats *logstats.LogStats) ([]*querypb.Field, error) { - // V3 mode. query, comments := sqlparser.SplitMarginComments(sql) vcursor, _ := newVCursorImpl(safeSession, comments, e, logStats, e.vm, e.VSchema(), e.resolver.resolver, e.serv, e.warnShardedOnly, e.pv) @@ -1844,3 +1900,12 @@ func intersect(lhs, rhs []string) []string { } return dst } +func (e *Executor) Close() { + e.scatterConn.Close() + topo, err := e.serv.GetTopoServer() + if err != nil { + panic(err) + } + topo.Close() + e.plans.Close() +} diff --git a/go/vt/vtgate/executor_ddl_test.go b/go/vt/vtgate/executor_ddl_test.go index 7948f1b6208..8b6c2ba9c25 100644 --- a/go/vt/vtgate/executor_ddl_test.go +++ b/go/vt/vtgate/executor_ddl_test.go @@ -26,7 +26,7 @@ import ( ) func TestDDLFlags(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) session := NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded}) defer func() { enableOnlineDDL = true @@ -59,7 +59,7 @@ func TestDDLFlags(t *testing.T) { t.Run(fmt.Sprintf("%s-%v-%v", testcase.sql, testcase.enableDirectDDL, testcase.enableOnlineDDL), func(t *testing.T) { enableDirectDDL = testcase.enableDirectDDL enableOnlineDDL = testcase.enableOnlineDDL - _, err := executor.Execute(ctx, nil, "TestDDLFlags", session, testcase.sql, nil) + _, err := executor.Execute(ctx, nil, nil, "TestDDLFlags", session, testcase.sql, nil) if testcase.wantErr { require.EqualError(t, err, testcase.err) } else { diff --git a/go/vt/vtgate/executor_dml_test.go b/go/vt/vtgate/executor_dml_test.go index d61b419d063..0445a9c8f79 100644 --- a/go/vt/vtgate/executor_dml_test.go +++ b/go/vt/vtgate/executor_dml_test.go @@ -25,7 +25,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" querypb "vitess.io/vitess/go/vt/proto/query" @@ -37,13 +38,16 @@ import ( ) func TestUpdateEqual(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) // Update by primary vindex. - _, err := executorExec(executor, "update user set a=2 where id = 1", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "update user set a=2 where id = 1", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "update `user` set a = 2 where id = 1", @@ -51,10 +55,10 @@ func TestUpdateEqual(t *testing.T) { }} assertQueries(t, sbc1, wantQueries) assertQueries(t, sbc2, nil) - testQueryLog(t, logChan, "TestExecute", "UPDATE", "update `user` set a = 2 where id = 1", 1) + testQueryLog(t, executor, logChan, "TestExecute", "UPDATE", "update `user` set a = 2 where id = 1", 1) sbc1.Queries = nil - _, err = executorExec(executor, "update user set a=2 where id = 3", nil) + _, err = executorExec(ctx, executor, session, "update user set a=2 where id = 3", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "update `user` set a = 2 where id = 3", @@ -67,7 +71,7 @@ func TestUpdateEqual(t *testing.T) { sbc1.Queries = nil sbc2.Queries = nil sbclookup.SetResults([]*sqltypes.Result{{}}) - _, err = executorExec(executor, "update music set a=2 where id = 2", nil) + _, err = executorExec(ctx, executor, session, "update music set a=2 where id = 2", nil) require.NoError(t, err) vars, err := sqltypes.BuildBindVariable([]any{sqltypes.NewInt64(2)}) require.NoError(t, err) @@ -91,7 +95,7 @@ func TestUpdateEqual(t *testing.T) { ), }) - _, err = executorExec(executor, "update user2 set `name`='myname', lastname='mylastname' where id = 1", nil) + _, err = executorExec(ctx, executor, session, "update user2 set `name`='myname', lastname='mylastname' where id = 1", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{ { @@ -129,10 +133,11 @@ func TestUpdateEqual(t *testing.T) { } func TestUpdateFromSubQuery(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) + executor.pv = querypb.ExecuteOptions_Gen4 - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) fields := []*querypb.Field{ {Name: "count(*)", Type: sqltypes.Int64}, @@ -145,7 +150,10 @@ func TestUpdateFromSubQuery(t *testing.T) { }}) // Update by primary vindex, but first execute subquery - _, err := executorExec(executor, "update user set a=(select count(*) from user where id = 3) where id = 1", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "update user set a=(select count(*) from user where id = 3) where id = 1", nil) require.NoError(t, err) wantQueriesSbc1 := []*querypb.BoundQuery{{ Sql: "update `user` set a = :__sq1 where id = 1", @@ -159,7 +167,7 @@ func TestUpdateFromSubQuery(t *testing.T) { }} assertQueries(t, sbc1, wantQueriesSbc1) assertQueries(t, sbc2, wantQueriesSbc2) - testQueryLog(t, logChan, "TestExecute", "UPDATE", "update `user` set a = (select count(*) from `user` where id = 3) where id = 1", 2) + testQueryLog(t, executor, logChan, "TestExecute", "UPDATE", "update `user` set a = (select count(*) from `user` where id = 3) where id = 1", 2) } func TestUpdateEqualWithNoVerifyAndWriteOnlyLookupUniqueVindexes(t *testing.T) { @@ -170,9 +178,12 @@ func TestUpdateEqualWithNoVerifyAndWriteOnlyLookupUniqueVindexes(t *testing.T) { ), "1|2|2|2|2|2|1|0", )} - executor, sbc1, sbc2, sbcLookup := createCustomExecutorSetValues(executorVSchema, res) + executor, sbc1, sbc2, sbcLookup, ctx := createCustomExecutorSetValues(t, executorVSchema, res) - _, err := executorExec(executor, "update t2_lookup set lu_col = 5 where wo_lu_col = 2", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "update t2_lookup set lu_col = 5 where wo_lu_col = 2", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{ { @@ -221,10 +232,10 @@ func TestUpdateInTransactionLookupDefaultReadLock(t *testing.T) { ), "1|2|2|2|2|2|1|0", )} - executor, sbc1, sbc2, sbcLookup := createCustomExecutorSetValues(executorVSchema, res) + executor, sbc1, sbc2, sbcLookup, ctx := createCustomExecutorSetValues(t, executorVSchema, res) safeSession := NewSafeSession(&vtgatepb.Session{InTransaction: true}) - _, err := executorExecSession( + _, err := executorExecSession(ctx, executor, "update t2_lookup set lu_col = 5 where nv_lu_col = 2", nil, @@ -283,10 +294,10 @@ func TestUpdateInTransactionLookupExclusiveReadLock(t *testing.T) { ), "1|2|2|2|2|2|1|0", )} - executor, sbc1, sbc2, sbcLookup := createCustomExecutorSetValues(executorVSchema, res) + executor, sbc1, sbc2, sbcLookup, ctx := createCustomExecutorSetValues(t, executorVSchema, res) safeSession := NewSafeSession(&vtgatepb.Session{InTransaction: true}) - _, err := executorExecSession( + _, err := executorExecSession(ctx, executor, "update t2_lookup set lu_col = 5 where erl_lu_col = 2", nil, @@ -345,10 +356,10 @@ func TestUpdateInTransactionLookupSharedReadLock(t *testing.T) { ), "1|2|2|2|2|2|1|0", )} - executor, sbc1, sbc2, sbcLookup := createCustomExecutorSetValues(executorVSchema, res) + executor, sbc1, sbc2, sbcLookup, ctx := createCustomExecutorSetValues(t, executorVSchema, res) safeSession := NewSafeSession(&vtgatepb.Session{InTransaction: true}) - _, err := executorExecSession( + _, err := executorExecSession(ctx, executor, "update t2_lookup set lu_col = 5 where srl_lu_col = 2", nil, @@ -407,10 +418,10 @@ func TestUpdateInTransactionLookupNoReadLock(t *testing.T) { ), "1|2|2|2|2|2|1|0", )} - executor, sbc1, sbc2, sbcLookup := createCustomExecutorSetValues(executorVSchema, res) + executor, sbc1, sbc2, sbcLookup, ctx := createCustomExecutorSetValues(t, executorVSchema, res) safeSession := NewSafeSession(&vtgatepb.Session{InTransaction: true}) - _, err := executorExecSession( + _, err := executorExecSession(ctx, executor, "update t2_lookup set lu_col = 5 where nrl_lu_col = 2", nil, @@ -521,7 +532,7 @@ func TestUpdateMultiOwned(t *testing.T) { } } ` - executor, sbc1, sbc2, sbclookup := createCustomExecutor(vschema) + executor, sbc1, sbc2, sbclookup, ctx := createCustomExecutor(t, vschema) sbc1.SetResults([]*sqltypes.Result{ sqltypes.MakeTestResult( @@ -529,7 +540,10 @@ func TestUpdateMultiOwned(t *testing.T) { "1|10|20|30|40|50|60|0|0", ), }) - _, err := executorExec(executor, "update user set a=1, b=2, f=4, e=3 where id=1", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "update user set a=1, b=2, f=4, e=3 where id=1", nil) if err != nil { t.Fatal(err) } @@ -577,9 +591,12 @@ func TestUpdateMultiOwned(t *testing.T) { } func TestUpdateComments(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) - _, err := executorExec(executor, "update user set a=2 where id = 1 /* trailing */", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "update user set a=2 where id = 1 /* trailing */", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "update `user` set a = 2 where id = 1 /* trailing */", @@ -590,10 +607,13 @@ func TestUpdateComments(t *testing.T) { } func TestUpdateNormalize(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) executor.normalize = true - _, err := executorExec(executor, "/* leading */ update user set a=2 where id = 1 /* trailing */", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "/* leading */ update user set a=2 where id = 1 /* trailing */", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "/* leading */ update `user` set a = :a /* INT64 */ where id = :id /* INT64 */ /* trailing */", @@ -607,8 +627,8 @@ func TestUpdateNormalize(t *testing.T) { sbc1.Queries = nil // Force the query to go to the "wrong" shard and ensure that normalization still happens - primarySession.TargetString = "TestExecutor/40-60" - _, err = executorExec(executor, "/* leading */ update user set a=2 where id = 1 /* trailing */", nil) + session.TargetString = "TestExecutor/40-60" + _, err = executorExec(ctx, executor, session, "/* leading */ update user set a=2 where id = 1 /* trailing */", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "/* leading */ update `user` set a = :a /* INT64 */ where id = :id /* INT64 */ /* trailing */", @@ -620,11 +640,10 @@ func TestUpdateNormalize(t *testing.T) { assertQueries(t, sbc1, nil) assertQueries(t, sbc2, wantQueries) sbc2.Queries = nil - primarySession.TargetString = "" } func TestDeleteEqual(t *testing.T) { - executor, sbc, _, sbclookup := createExecutorEnv() + executor, sbc, _, sbclookup, ctx := createExecutorEnv(t) sbc.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ @@ -638,7 +657,10 @@ func TestDeleteEqual(t *testing.T) { sqltypes.NewVarChar("myname"), }}, }}) - _, err := executorExec(executor, "delete from user where id = 1", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "delete from user where id = 1", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select Id, `name` from `user` where id = 1 for update", @@ -661,7 +683,7 @@ func TestDeleteEqual(t *testing.T) { sbc.Queries = nil sbclookup.Queries = nil sbc.SetResults([]*sqltypes.Result{{}}) - _, err = executorExec(executor, "delete from user where id = 1", nil) + _, err = executorExec(ctx, executor, session, "delete from user where id = 1", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "select Id, `name` from `user` where id = 1 for update", @@ -676,7 +698,7 @@ func TestDeleteEqual(t *testing.T) { sbc.Queries = nil sbclookup.Queries = nil sbclookup.SetResults([]*sqltypes.Result{{}}) - _, err = executorExec(executor, "delete from music where id = 1", nil) + _, err = executorExec(ctx, executor, session, "delete from music where id = 1", nil) require.NoError(t, err) vars, err := sqltypes.BuildBindVariable([]any{sqltypes.NewInt64(1)}) require.NoError(t, err) @@ -692,7 +714,7 @@ func TestDeleteEqual(t *testing.T) { sbc.Queries = nil sbclookup.Queries = nil sbclookup.SetResults([]*sqltypes.Result{{}}) - _, err = executorExec(executor, "delete from user_extra where user_id = 1", nil) + _, err = executorExec(ctx, executor, session, "delete from user_extra where user_id = 1", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "delete from user_extra where user_id = 1", @@ -708,7 +730,7 @@ func TestDeleteEqual(t *testing.T) { "1|1|foo", ), }) - _, err = executorExec(executor, "delete from user2 where id = 1", nil) + _, err = executorExec(ctx, executor, session, "delete from user2 where id = 1", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{ { @@ -737,8 +759,12 @@ func TestDeleteEqual(t *testing.T) { } func TestUpdateScatter(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() - _, err := executorExec(executor, "update user_extra set col = 2", nil) + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) + + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "update user_extra set col = 2", nil) require.NoError(t, err) // Queries get annotatted. wantQueries := []*querypb.BoundQuery{{ @@ -750,8 +776,12 @@ func TestUpdateScatter(t *testing.T) { } func TestDeleteScatter(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() - _, err := executorExec(executor, "delete from user_extra", nil) + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) + + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "delete from user_extra", nil) require.NoError(t, err) // Queries get annotatted. wantQueries := []*querypb.BoundQuery{{ @@ -763,7 +793,7 @@ func TestDeleteScatter(t *testing.T) { } func TestUpdateEqualWithMultipleLookupVindex(t *testing.T) { - executor, sbc1, sbc2, sbcLookup := createCustomExecutorSetValues(executorVSchema, nil) + executor, sbc1, sbc2, sbcLookup, ctx := createExecutorEnv(t) sbcLookup.SetResults([]*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields("lu_col|keyspace_id", "int64|varbinary"), @@ -778,7 +808,10 @@ func TestUpdateEqualWithMultipleLookupVindex(t *testing.T) { "1|2|2|2|2|2|1|0", )}) - _, err := executorExec(executor, "update t2_lookup set lu_col = 5 where wo_lu_col = 2 and lu_col = 1", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "update t2_lookup set lu_col = 5 where wo_lu_col = 2 and lu_col = 1", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{ { @@ -816,7 +849,7 @@ func TestUpdateEqualWithMultipleLookupVindex(t *testing.T) { } func TestUpdateUseHigherCostVindexIfBackfilling(t *testing.T) { - executor, sbc1, sbc2, sbcLookup := createCustomExecutorSetValues(executorVSchema, nil) + executor, sbc1, sbc2, sbcLookup, ctx := createExecutorEnv(t) sbcLookup.SetResults([]*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields("lu_col|keyspace_id", "int64|varbinary"), @@ -833,7 +866,10 @@ func TestUpdateUseHigherCostVindexIfBackfilling(t *testing.T) { "1|2|2|2|2|2|2|0", )}) - _, err := executorExec(executor, "update t2_lookup set lu_col = 5 where wo_lu_col = 2 and lu_col in (1, 2)", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "update t2_lookup set lu_col = 5 where wo_lu_col = 2 and lu_col in (1, 2)", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{ { @@ -891,9 +927,12 @@ func TestDeleteEqualWithNoVerifyAndWriteOnlyLookupUniqueVindex(t *testing.T) { ), "1|1|1|1|1|1|1", )} - executor, sbc1, sbc2, sbcLookup := createCustomExecutorSetValues(executorVSchema, res) + executor, sbc1, sbc2, sbcLookup, ctx := createCustomExecutorSetValues(t, executorVSchema, res) - _, err := executorExec(executor, "delete from t2_lookup where wo_lu_col = 1", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "delete from t2_lookup where wo_lu_col = 1", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{ { @@ -962,7 +1001,7 @@ func TestDeleteEqualWithNoVerifyAndWriteOnlyLookupUniqueVindex(t *testing.T) { } func TestDeleteEqualWithMultipleLookupVindex(t *testing.T) { - executor, sbc1, sbc2, sbcLookup := createCustomExecutorSetValues(executorVSchema, nil) + executor, sbc1, sbc2, sbcLookup, ctx := createCustomExecutorSetValues(t, executorVSchema, nil) sbcLookup.SetResults([]*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields("lu_col|keyspace_id", "int64|varbinary"), @@ -977,7 +1016,10 @@ func TestDeleteEqualWithMultipleLookupVindex(t *testing.T) { "1|1|1|1|1|1|1", )}) - _, err := executorExec(executor, "delete from t2_lookup where wo_lu_col = 1 and lu_col = 1", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "delete from t2_lookup where wo_lu_col = 1 and lu_col = 1", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{ { @@ -1040,7 +1082,7 @@ func TestDeleteEqualWithMultipleLookupVindex(t *testing.T) { } func TestDeleteUseHigherCostVindexIfBackfilling(t *testing.T) { - executor, sbc1, sbc2, sbcLookup := createCustomExecutorSetValues(executorVSchema, nil) + executor, sbc1, sbc2, sbcLookup, ctx := createCustomExecutorSetValues(t, executorVSchema, nil) sbcLookup.SetResults([]*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields("lu_col|keyspace_id", "int64|varbinary"), @@ -1057,7 +1099,10 @@ func TestDeleteUseHigherCostVindexIfBackfilling(t *testing.T) { "1|1|1|1|1|1|2", )}) - _, err := executorExec(executor, "delete from t2_lookup where wo_lu_col = 1 and lu_col in (1, 2)", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "delete from t2_lookup where wo_lu_col = 1 and lu_col in (1, 2)", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{ { @@ -1157,9 +1202,12 @@ func TestDeleteUseHigherCostVindexIfBackfilling(t *testing.T) { } func TestDeleteByDestination(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() - // This query is not supported in v3, so we know for sure is taking the DeleteByDestination route - _, err := executorExec(executor, "delete from `TestExecutor[-]`.user_extra limit 10", nil) + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) + + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "delete from `TestExecutor[-]`.user_extra limit 10", nil) require.NoError(t, err) // Queries get annotatted. wantQueries := []*querypb.BoundQuery{{ @@ -1171,7 +1219,7 @@ func TestDeleteByDestination(t *testing.T) { } func TestDeleteComments(t *testing.T) { - executor, sbc, _, sbclookup := createExecutorEnv() + executor, sbc, _, sbclookup, ctx := createExecutorEnv(t) sbc.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ @@ -1185,7 +1233,10 @@ func TestDeleteComments(t *testing.T) { sqltypes.NewVarChar("myname"), }}, }}) - _, err := executorExec(executor, "delete from user where id = 1 /* trailing */", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "delete from user where id = 1 /* trailing */", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select Id, `name` from `user` where id = 1 for update /* trailing */", @@ -1207,19 +1258,21 @@ func TestDeleteComments(t *testing.T) { } func TestInsertSharded(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) - _, err := executorExec(executor, "insert into user(id, v, name) values (1, 2, 'myname')", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "insert into user(id, v, name) values (1, 2, 'myname')", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "insert into `user`(id, v, `name`) values (:_Id_0, 2, :_name_0)", BindVariables: map[string]*querypb.BindVariable{ "_Id_0": sqltypes.Int64BindVariable(1), "_name_0": sqltypes.StringBindVariable("myname"), - "__seq0": sqltypes.Int64BindVariable(1), }, }} assertQueries(t, sbc1, wantQueries) @@ -1233,19 +1286,18 @@ func TestInsertSharded(t *testing.T) { }} assertQueries(t, sbclookup, wantQueries) - testQueryLog(t, logChan, "MarkSavepoint", "SAVEPOINT", "savepoint x", 0) - testQueryLog(t, logChan, "VindexCreate", "INSERT", "insert into name_user_map(`name`, user_id) values (:name_0, :user_id_0)", 1) - testQueryLog(t, logChan, "TestExecute", "INSERT", "insert into `user`(id, v, `name`) values (1, 2, 'myname')", 1) + testQueryLog(t, executor, logChan, "MarkSavepoint", "SAVEPOINT", "savepoint x", 0) + testQueryLog(t, executor, logChan, "VindexCreate", "INSERT", "insert into name_user_map(`name`, user_id) values (:name_0, :user_id_0)", 1) + testQueryLog(t, executor, logChan, "TestExecute", "INSERT", "insert into `user`(id, v, `name`) values (1, 2, 'myname')", 1) sbc1.Queries = nil sbclookup.Queries = nil - _, err = executorExec(executor, "insert into user(id, v, name) values (3, 2, 'myname2')", nil) + _, err = executorExec(ctx, executor, session, "insert into user(id, v, name) values (3, 2, 'myname2')", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "insert into `user`(id, v, `name`) values (:_Id_0, 2, :_name_0)", BindVariables: map[string]*querypb.BindVariable{ "_Id_0": sqltypes.Int64BindVariable(3), - "__seq0": sqltypes.Int64BindVariable(3), "_name_0": sqltypes.StringBindVariable("myname2"), }, }} @@ -1259,12 +1311,12 @@ func TestInsertSharded(t *testing.T) { }, }} assertQueries(t, sbclookup, wantQueries) - testQueryLog(t, logChan, "MarkSavepoint", "SAVEPOINT", "savepoint x", 2) - testQueryLog(t, logChan, "VindexCreate", "INSERT", "insert into name_user_map(`name`, user_id) values (:name_0, :user_id_0)", 1) - testQueryLog(t, logChan, "TestExecute", "INSERT", "insert into `user`(id, v, `name`) values (3, 2, 'myname2')", 1) + testQueryLog(t, executor, logChan, "MarkSavepoint", "SAVEPOINT", "savepoint x", 2) + testQueryLog(t, executor, logChan, "VindexCreate", "INSERT", "insert into name_user_map(`name`, user_id) values (:name_0, :user_id_0)", 1) + testQueryLog(t, executor, logChan, "TestExecute", "INSERT", "insert into `user`(id, v, `name`) values (3, 2, 'myname2')", 1) sbc1.Queries = nil - _, err = executorExec(executor, "insert into user2(id, name, lastname) values (2, 'myname', 'mylastname')", nil) + _, err = executorExec(ctx, executor, session, "insert into user2(id, name, lastname) values (2, 'myname', 'mylastname')", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "insert into user2(id, `name`, lastname) values (:_id_0, :_name_0, :_lastname_0)", @@ -1275,26 +1327,23 @@ func TestInsertSharded(t *testing.T) { }, }} assertQueries(t, sbc1, wantQueries) - testQueryLog(t, logChan, "MarkSavepoint", "SAVEPOINT", "savepoint x", 3) - testQueryLog(t, logChan, "VindexCreate", "INSERT", "insert into name_lastname_keyspace_id_map(`name`, lastname, keyspace_id) values (:name_0, :lastname_0, :keyspace_id_0)", 1) - testQueryLog(t, logChan, "TestExecute", "INSERT", "insert into user2(id, `name`, lastname) values (2, 'myname', 'mylastname')", 1) + testQueryLog(t, executor, logChan, "MarkSavepoint", "SAVEPOINT", "savepoint x", 3) + testQueryLog(t, executor, logChan, "VindexCreate", "INSERT", "insert into name_lastname_keyspace_id_map(`name`, lastname, keyspace_id) values (:name_0, :lastname_0, :keyspace_id_0)", 1) + testQueryLog(t, executor, logChan, "TestExecute", "INSERT", "insert into user2(id, `name`, lastname) values (2, 'myname', 'mylastname')", 1) // insert with binary values executor.normalize = true sbc1.Queries = nil sbc2.Queries = nil sbclookup.Queries = nil - _, err = executorExec(executor, "insert into user(id, v, name) values (1, 2, _binary 'myname')", nil) + _, err = executorExec(ctx, executor, session, "insert into user(id, v, name) values (1, 2, _binary 'myname')", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "insert into `user`(id, v, `name`) values (:_Id_0, :vtg2 /* INT64 */, :_name_0)", BindVariables: map[string]*querypb.BindVariable{ "_Id_0": sqltypes.Int64BindVariable(1), "_name_0": sqltypes.BytesBindVariable([]byte("myname")), - "__seq0": sqltypes.Int64BindVariable(1), - "vtg1": sqltypes.Int64BindVariable(1), "vtg2": sqltypes.Int64BindVariable(2), - "vtg3": sqltypes.StringBindVariable("myname"), }, }} assertQueries(t, sbc1, wantQueries) @@ -1308,16 +1357,55 @@ func TestInsertSharded(t *testing.T) { }} assertQueries(t, sbclookup, wantQueries) - testQueryLog(t, logChan, "MarkSavepoint", "SAVEPOINT", "savepoint x", 3) - testQueryLog(t, logChan, "VindexCreate", "INSERT", "insert into name_user_map(`name`, user_id) values (:name_0, :user_id_0)", 1) - testQueryLog(t, logChan, "TestExecute", "INSERT", "insert into `user`(id, v, `name`) values (:vtg1 /* INT64 */, :vtg2 /* INT64 */, _binary :vtg3 /* VARCHAR */)", 1) + testQueryLog(t, executor, logChan, "MarkSavepoint", "SAVEPOINT", "savepoint x", 3) + testQueryLog(t, executor, logChan, "VindexCreate", "INSERT", "insert into name_user_map(`name`, user_id) values (:name_0, :user_id_0)", 1) + testQueryLog(t, executor, logChan, "TestExecute", "INSERT", "insert into `user`(id, v, `name`) values (:vtg1 /* INT64 */, :vtg2 /* INT64 */, _binary :vtg3 /* VARCHAR */)", 1) +} + +func TestInsertNegativeValue(t *testing.T) { + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) + executor.normalize = true + + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) + + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "insert into user(id, v, name) values (1, -2, 'myname')", nil) + require.NoError(t, err) + wantQueries := []*querypb.BoundQuery{{ + Sql: "insert into `user`(id, v, `name`) values (:_Id_0, -:vtg2 /* INT64 */, :_name_0)", + BindVariables: map[string]*querypb.BindVariable{ + "_Id_0": sqltypes.Int64BindVariable(1), + "vtg2": sqltypes.Int64BindVariable(2), + "_name_0": sqltypes.StringBindVariable("myname"), + }, + }} + assertQueries(t, sbc1, wantQueries) + assertQueries(t, sbc2, nil) + wantQueries = []*querypb.BoundQuery{{ + Sql: "insert into name_user_map(`name`, user_id) values (:name_0, :user_id_0)", + BindVariables: map[string]*querypb.BindVariable{ + "name_0": sqltypes.StringBindVariable("myname"), + "user_id_0": sqltypes.Uint64BindVariable(1), + }, + }} + assertQueries(t, sbclookup, wantQueries) + + testQueryLog(t, executor, logChan, "MarkSavepoint", "SAVEPOINT", "savepoint x", 0) + testQueryLog(t, executor, logChan, "VindexCreate", "INSERT", "insert into name_user_map(`name`, user_id) values (:name_0, :user_id_0)", 1) + testQueryLog(t, executor, logChan, "TestExecute", "INSERT", "insert into `user`(id, v, `name`) values (:vtg1 /* INT64 */, -:vtg2 /* INT64 */, :vtg3 /* VARCHAR */)", 1) } func TestInsertShardedKeyrange(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) // If a unique vindex returns a keyrange, we fail the insert - _, err := executorExec(executor, "insert into keyrange_table(krcol_unique, krcol) values(1, 1)", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "insert into keyrange_table(krcol_unique, krcol) values(1, 1)", nil) require.EqualError(t, err, "could not map [INT64(1)] to a unique keyspace id: DestinationKeyRange(-10)") } @@ -1381,9 +1469,9 @@ func TestInsertShardedAutocommitLookup(t *testing.T) { } } ` - executor, sbc1, sbc2, sbclookup := createCustomExecutor(vschema) + executor, sbc1, sbc2, sbclookup, ctx := createCustomExecutor(t, vschema) - _, err := executorExecSession(executor, "insert into user(id, v, name, music) values (1, 2, 'myname', 'star')", nil, &vtgatepb.Session{}) + _, err := executorExecSession(ctx, executor, "insert into user(id, v, name, music) values (1, 2, 'myname', 'star')", nil, &vtgatepb.Session{}) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "insert into `user`(id, v, `name`, music) values (:_Id_0, 2, :_name_0, :_music_0)", @@ -1391,7 +1479,6 @@ func TestInsertShardedAutocommitLookup(t *testing.T) { "_Id_0": sqltypes.Int64BindVariable(1), "_music_0": sqltypes.StringBindVariable("star"), "_name_0": sqltypes.StringBindVariable("myname"), - "__seq0": sqltypes.Int64BindVariable(1), }, }} assertQueries(t, sbc1, wantQueries) @@ -1414,7 +1501,7 @@ func TestInsertShardedAutocommitLookup(t *testing.T) { } func TestInsertShardedIgnore(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) // Build the sequence of responses for sbclookup. This should // match the sequence of queries we validate below. @@ -1443,34 +1530,28 @@ func TestInsertShardedIgnore(t *testing.T) { // Fifth row: first shard. // Sixth row: second shard (because 3 hash maps to 40-60). query := "insert ignore into insert_ignore_test(pv, owned, verify) values (1, 1, 1), (2, 2, 2), (3, 3, 1), (4, 4, 4), (5, 5, 1), (6, 6, 3)" - _, err := executorExec(executor, query, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, query, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "insert ignore into insert_ignore_test(pv, owned, verify) values (:_pv_0, :_owned_0, :_verify_0),(:_pv_4, :_owned_4, :_verify_4)", BindVariables: map[string]*querypb.BindVariable{ "_pv_0": sqltypes.Int64BindVariable(1), "_pv_4": sqltypes.Int64BindVariable(5), - "_pv_5": sqltypes.Int64BindVariable(6), "_owned_0": sqltypes.Int64BindVariable(1), "_owned_4": sqltypes.Int64BindVariable(5), - "_owned_5": sqltypes.Int64BindVariable(6), "_verify_0": sqltypes.Int64BindVariable(1), "_verify_4": sqltypes.Int64BindVariable(1), - "_verify_5": sqltypes.Int64BindVariable(3), }, }} assertQueries(t, sbc1, wantQueries) wantQueries = []*querypb.BoundQuery{{ Sql: "insert ignore into insert_ignore_test(pv, owned, verify) values (:_pv_5, :_owned_5, :_verify_5)", BindVariables: map[string]*querypb.BindVariable{ - "_pv_0": sqltypes.Int64BindVariable(1), - "_pv_4": sqltypes.Int64BindVariable(5), "_pv_5": sqltypes.Int64BindVariable(6), - "_owned_0": sqltypes.Int64BindVariable(1), - "_owned_4": sqltypes.Int64BindVariable(5), "_owned_5": sqltypes.Int64BindVariable(6), - "_verify_0": sqltypes.Int64BindVariable(1), - "_verify_4": sqltypes.Int64BindVariable(1), "_verify_5": sqltypes.Int64BindVariable(3), }, }} @@ -1545,7 +1626,7 @@ func TestInsertShardedIgnore(t *testing.T) { {}, }) query = "insert ignore into insert_ignore_test(pv, owned, verify) values (1, 1, 1)" - qr, err := executorExec(executor, query, nil) + qr, err := executorExec(ctx, executor, session, query, nil) require.NoError(t, err) if !qr.Equal(&sqltypes.Result{}) { t.Errorf("qr: %v, want empty result", qr) @@ -1566,13 +1647,16 @@ func TestInsertShardedIgnore(t *testing.T) { func TestInsertOnDupKey(t *testing.T) { // This test just sanity checks that the statement is getting passed through // correctly. The full set of use cases are covered by TestInsertShardedIgnore. - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) sbclookup.SetResults([]*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields("b|a", "int64|varbinary"), "1|1", )}) query := "insert into insert_ignore_test(pv, owned, verify) values (1, 1, 1) on duplicate key update col = 2" - _, err := executorExec(executor, query, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, query, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "insert into insert_ignore_test(pv, owned, verify) values (:_pv_0, :_owned_0, :_verify_0) on duplicate key update col = 2", @@ -1608,33 +1692,35 @@ func TestInsertOnDupKey(t *testing.T) { } func TestAutocommitFail(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, ctx := createExecutorEnv(t) query := "insert into user (id) values (1)" sbc1.MustFailCodes[vtrpcpb.Code_ALREADY_EXISTS] = 1 - primarySession.Reset() - primarySession.Autocommit = true - defer func() { - primarySession.Autocommit = false - }() - _, err := executorExec(executor, query, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + Autocommit: true, + } + + _, err := executorExec(ctx, executor, session, query, nil) require.Error(t, err) // make sure we have closed and rolled back any transactions started - assert.False(t, primarySession.InTransaction, "left with tx open") + assert.False(t, session.InTransaction, "left with tx open") } func TestInsertComments(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) - _, err := executorExec(executor, "insert into user(id, v, name) values (1, 2, 'myname') /* trailing */", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "insert into user(id, v, name) values (1, 2, 'myname') /* trailing */", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "insert into `user`(id, v, `name`) values (:_Id_0, 2, :_name_0) /* trailing */", BindVariables: map[string]*querypb.BindVariable{ "_Id_0": sqltypes.Int64BindVariable(1), "_name_0": sqltypes.StringBindVariable("myname"), - "__seq0": sqltypes.Int64BindVariable(1), }, }} assertQueries(t, sbc1, wantQueries) @@ -1650,7 +1736,7 @@ func TestInsertComments(t *testing.T) { } func TestInsertGeneratorSharded(t *testing.T) { - executor, sbc, _, sbclookup := createExecutorEnv() + executor, sbc, _, sbclookup, ctx := createExecutorEnv(t) sbclookup.SetResults([]*sqltypes.Result{{ Rows: [][]sqltypes.Value{{ @@ -1659,13 +1745,15 @@ func TestInsertGeneratorSharded(t *testing.T) { RowsAffected: 1, InsertID: 1, }}) - result, err := executorExec(executor, "insert into user(v, `name`) values (2, 'myname')", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + result, err := executorExec(ctx, executor, session, "insert into user(v, `name`) values (2, 'myname')", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "insert into `user`(v, `name`, id) values (2, :_name_0, :_Id_0)", BindVariables: map[string]*querypb.BindVariable{ "_Id_0": sqltypes.Int64BindVariable(1), - "__seq0": sqltypes.Int64BindVariable(1), "_name_0": sqltypes.StringBindVariable("myname"), }, }} @@ -1689,7 +1777,7 @@ func TestInsertGeneratorSharded(t *testing.T) { } func TestInsertAutoincSharded(t *testing.T) { - router, sbc, _, _ := createExecutorEnv() + router, sbc, _, _, ctx := createExecutorEnv(t) // Fake a mysql auto-inc response. wantResult := &sqltypes.Result{ @@ -1700,7 +1788,10 @@ func TestInsertAutoincSharded(t *testing.T) { InsertID: 2, } sbc.SetResults([]*sqltypes.Result{wantResult}) - result, err := executorExec(router, "insert into user_extra(user_id) values (2)", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + result, err := executorExec(ctx, router, session, "insert into user_extra(user_id) values (2)", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "insert into user_extra(user_id) values (:_user_id_0)", @@ -1712,12 +1803,15 @@ func TestInsertAutoincSharded(t *testing.T) { if !result.Equal(wantResult) { t.Errorf("result: %+v, want %+v", result, wantResult) } - assert.EqualValues(t, 2, primarySession.LastInsertId) + assert.EqualValues(t, 2, session.LastInsertId) } func TestInsertGeneratorUnsharded(t *testing.T) { - executor, _, _, sbclookup := createExecutorEnv() - result, err := executorExec(executor, "insert into main1(id, name) values (null, 'myname')", nil) + executor, _, _, sbclookup, ctx := createExecutorEnv(t) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + result, err := executorExec(ctx, executor, session, "insert into main1(id, name) values (null, 'myname')", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select next :n /* INT64 */ values from user_seq", @@ -1737,10 +1831,10 @@ func TestInsertGeneratorUnsharded(t *testing.T) { } func TestInsertAutoincUnsharded(t *testing.T) { - router, _, _, sbclookup := createExecutorEnv() + router, _, _, sbclookup, ctx := createExecutorEnv(t) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := router.queryLogger.Subscribe("Test") + defer router.queryLogger.Unsubscribe(logChan) // Fake a mysql auto-inc response. query := "insert into `simple`(val) values ('val')" @@ -1753,7 +1847,10 @@ func TestInsertAutoincUnsharded(t *testing.T) { } sbclookup.SetResults([]*sqltypes.Result{wantResult}) - result, err := executorExec(router, query, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + result, err := executorExec(ctx, router, session, query, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: query, @@ -1762,20 +1859,22 @@ func TestInsertAutoincUnsharded(t *testing.T) { assertQueries(t, sbclookup, wantQueries) assert.Equal(t, result, wantResult) - testQueryLog(t, logChan, "TestExecute", "INSERT", "insert into `simple`(val) values ('val')", 1) + testQueryLog(t, router, logChan, "TestExecute", "INSERT", "insert into `simple`(val) values ('val')", 1) } func TestInsertLookupOwned(t *testing.T) { - executor, sbc, _, sbclookup := createExecutorEnv() + executor, sbc, _, sbclookup, ctx := createExecutorEnv(t) - _, err := executorExec(executor, "insert into music(user_id, id) values (2, 3)", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "insert into music(user_id, id) values (2, 3)", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "insert into music(user_id, id) values (:_user_id_0, :_id_0)", BindVariables: map[string]*querypb.BindVariable{ "_user_id_0": sqltypes.Int64BindVariable(2), "_id_0": sqltypes.Int64BindVariable(3), - "__seq0": sqltypes.Int64BindVariable(3), }, }} assertQueries(t, sbc, wantQueries) @@ -1790,7 +1889,7 @@ func TestInsertLookupOwned(t *testing.T) { } func TestInsertLookupOwnedGenerator(t *testing.T) { - executor, sbc, _, sbclookup := createExecutorEnv() + executor, sbc, _, sbclookup, ctx := createExecutorEnv(t) sbclookup.SetResults([]*sqltypes.Result{{ Rows: [][]sqltypes.Value{{ @@ -1799,14 +1898,16 @@ func TestInsertLookupOwnedGenerator(t *testing.T) { RowsAffected: 1, InsertID: 1, }}) - result, err := executorExec(executor, "insert into music(user_id) values (2)", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + result, err := executorExec(ctx, executor, session, "insert into music(user_id) values (2)", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "insert into music(user_id, id) values (:_user_id_0, :_id_0)", BindVariables: map[string]*querypb.BindVariable{ "_user_id_0": sqltypes.Int64BindVariable(2), "_id_0": sqltypes.Int64BindVariable(4), - "__seq0": sqltypes.Int64BindVariable(4), }, }} assertQueries(t, sbc, wantQueries) @@ -1829,9 +1930,12 @@ func TestInsertLookupOwnedGenerator(t *testing.T) { } func TestInsertLookupUnowned(t *testing.T) { - executor, sbc, _, sbclookup := createExecutorEnv() + executor, sbc, _, sbclookup, ctx := createExecutorEnv(t) - _, err := executorExec(executor, "insert into music_extra(user_id, music_id) values (2, 3)", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "insert into music_extra(user_id, music_id) values (2, 3)", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "insert into music_extra(user_id, music_id) values (:_user_id_0, :_music_id_0)", @@ -1852,12 +1956,15 @@ func TestInsertLookupUnowned(t *testing.T) { } func TestInsertLookupUnownedUnsupplied(t *testing.T) { - executor, sbc, _, sbclookup := createExecutorEnv() + executor, sbc, _, sbclookup, ctx := createExecutorEnv(t) sbclookup.SetResults([]*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields("b|a", "int64|varbinary"), "3|1", )}) - _, err := executorExec(executor, "insert into music_extra_reversed(music_id) values (3)", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "insert into music_extra_reversed(music_id) values (3)", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "insert into music_extra_reversed(music_id, user_id) values (:_music_id_0, :_user_id_0)", @@ -1881,7 +1988,7 @@ func TestInsertLookupUnownedUnsupplied(t *testing.T) { // If a statement gets broken up into two, and the first one fails, // then an error should be returned normally. func TestInsertPartialFail1(t *testing.T) { - executor, _, _, sbclookup := createExecutorEnv() + executor, _, _, sbclookup, _ := createExecutorEnv(t) // Make the first DML fail, there should be no rollback. sbclookup.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 @@ -1889,6 +1996,7 @@ func TestInsertPartialFail1(t *testing.T) { _, err := executor.Execute( context.Background(), nil, + nil, "TestExecute", NewSafeSession(&vtgatepb.Session{InTransaction: true}), "insert into user(id, v, name) values (1, 2, 'myname')", @@ -1901,7 +2009,7 @@ func TestInsertPartialFail1(t *testing.T) { // after successful execution of the first, then the transaction must // be rolled back due to partial execution. func TestInsertPartialFail2(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, _ := createExecutorEnv(t) // Make the second DML fail, it should result in a rollback. sbc1.MustFailExecute[sqlparser.StmtInsert] = 1 @@ -1910,6 +2018,7 @@ func TestInsertPartialFail2(t *testing.T) { _, err := executor.Execute( context.Background(), nil, + nil, "TestExecute", safeSession, "insert into user(id, v, name) values (1, 2, 'myname')", @@ -1930,7 +2039,6 @@ func TestInsertPartialFail2(t *testing.T) { Sql: "insert into `user`(id, v, `name`) values (:_Id_0, 2, :_name_0)", BindVariables: map[string]*querypb.BindVariable{ "_Id_0": sqltypes.Int64BindVariable(1), - "__seq0": sqltypes.Int64BindVariable(1), "_name_0": sqltypes.StringBindVariable("myname"), }, }, { @@ -1941,31 +2049,26 @@ func TestInsertPartialFail2(t *testing.T) { } func TestMultiInsertSharded(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) - _, err := executorExec(executor, "insert into user(id, v, name) values (1, 1, 'myname1'),(3, 3, 'myname3')", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "insert into user(id, v, name) values (1, 1, 'myname1'),(3, 3, 'myname3')", nil) require.NoError(t, err) wantQueries1 := []*querypb.BoundQuery{{ Sql: "insert into `user`(id, v, `name`) values (:_Id_0, 1, :_name_0)", BindVariables: map[string]*querypb.BindVariable{ "_Id_0": sqltypes.Int64BindVariable(1), "_name_0": sqltypes.StringBindVariable("myname1"), - "__seq0": sqltypes.Int64BindVariable(1), - "_Id_1": sqltypes.Int64BindVariable(3), - "_name_1": sqltypes.StringBindVariable("myname3"), - "__seq1": sqltypes.Int64BindVariable(3), }, }} wantQueries2 := []*querypb.BoundQuery{{ Sql: "insert into `user`(id, v, `name`) values (:_Id_1, 3, :_name_1)", BindVariables: map[string]*querypb.BindVariable{ - "_Id_0": sqltypes.Int64BindVariable(1), - "_name_0": sqltypes.StringBindVariable("myname1"), - "__seq0": sqltypes.Int64BindVariable(1), "_Id_1": sqltypes.Int64BindVariable(3), "_name_1": sqltypes.StringBindVariable("myname3"), - "__seq1": sqltypes.Int64BindVariable(3), }, }} assertQueries(t, sbc1, wantQueries1) @@ -1985,16 +2088,14 @@ func TestMultiInsertSharded(t *testing.T) { sbc1.Queries = nil sbclookup.Queries = nil sbc2.Queries = nil - _, err = executorExec(executor, "insert into user(id, v, name) values (1, 1, 'myname1'),(2, 2, 'myname2')", nil) + _, err = executorExec(ctx, executor, session, "insert into user(id, v, name) values (1, 1, 'myname1'),(2, 2, 'myname2')", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "insert into `user`(id, v, `name`) values (:_Id_0, 1, :_name_0),(:_Id_1, 2, :_name_1)", BindVariables: map[string]*querypb.BindVariable{ "_Id_0": sqltypes.Int64BindVariable(1), - "__seq0": sqltypes.Int64BindVariable(1), "_name_0": sqltypes.StringBindVariable("myname1"), "_Id_1": sqltypes.Int64BindVariable(2), - "__seq1": sqltypes.Int64BindVariable(2), "_name_1": sqltypes.StringBindVariable("myname2"), }, }} @@ -2016,7 +2117,7 @@ func TestMultiInsertSharded(t *testing.T) { sbc1.Queries = nil sbclookup.Queries = nil sbc2.Queries = nil - _, err = executorExec(executor, "insert into user2(id, `name`, lastname) values (2, 'myname', 'mylastname'), (3, 'myname2', 'mylastname2')", nil) + _, err = executorExec(ctx, executor, session, "insert into user2(id, `name`, lastname) values (2, 'myname', 'mylastname'), (3, 'myname2', 'mylastname2')", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "insert into user2(id, `name`, lastname) values (:_id_0, :_name_0, :_lastname_0)", @@ -2024,12 +2125,18 @@ func TestMultiInsertSharded(t *testing.T) { "_id_0": sqltypes.Int64BindVariable(2), "_name_0": sqltypes.StringBindVariable("myname"), "_lastname_0": sqltypes.StringBindVariable("mylastname"), + }, + }} + assertQueries(t, sbc1, wantQueries) + wantQueries = []*querypb.BoundQuery{{ + Sql: "insert into user2(id, `name`, lastname) values (:_id_1, :_name_1, :_lastname_1)", + BindVariables: map[string]*querypb.BindVariable{ "_id_1": sqltypes.Int64BindVariable(3), "_name_1": sqltypes.StringBindVariable("myname2"), "_lastname_1": sqltypes.StringBindVariable("mylastname2"), }, }} - assertQueries(t, sbc1, wantQueries) + assertQueries(t, sbc2, wantQueries) wantQueries = []*querypb.BoundQuery{{ Sql: "insert into name_lastname_keyspace_id_map(`name`, lastname, keyspace_id) values (:name_0, :lastname_0, :keyspace_id_0), (:name_1, :lastname_1, :keyspace_id_1)", BindVariables: map[string]*querypb.BindVariable{ @@ -2045,7 +2152,7 @@ func TestMultiInsertSharded(t *testing.T) { } func TestMultiInsertGenerator(t *testing.T) { - executor, sbc, _, sbclookup := createExecutorEnv() + executor, sbc, _, sbclookup, ctx := createExecutorEnv(t) sbclookup.SetResults([]*sqltypes.Result{{ Rows: [][]sqltypes.Value{{ @@ -2054,17 +2161,17 @@ func TestMultiInsertGenerator(t *testing.T) { RowsAffected: 1, InsertID: 1, }}) - result, err := executorExec(executor, "insert into music(user_id, `name`) values (:u, 'myname1'),(:u, 'myname2')", map[string]*querypb.BindVariable{"u": sqltypes.Int64BindVariable(2)}) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + result, err := executorExec(ctx, executor, session, "insert into music(user_id, `name`) values (:u, 'myname1'),(:u, 'myname2')", map[string]*querypb.BindVariable{"u": sqltypes.Int64BindVariable(2)}) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "insert into music(user_id, `name`, id) values (:_user_id_0, 'myname1', :_id_0),(:_user_id_1, 'myname2', :_id_1)", BindVariables: map[string]*querypb.BindVariable{ - "u": sqltypes.Int64BindVariable(2), "_id_0": sqltypes.Int64BindVariable(1), - "__seq0": sqltypes.Int64BindVariable(1), "_user_id_0": sqltypes.Int64BindVariable(2), "_id_1": sqltypes.Int64BindVariable(2), - "__seq1": sqltypes.Int64BindVariable(2), "_user_id_1": sqltypes.Int64BindVariable(2), }, }} @@ -2090,7 +2197,7 @@ func TestMultiInsertGenerator(t *testing.T) { } func TestMultiInsertGeneratorSparse(t *testing.T) { - executor, sbc, _, sbclookup := createExecutorEnv() + executor, sbc, _, sbclookup, ctx := createExecutorEnv(t) sbclookup.SetResults([]*sqltypes.Result{{ Rows: [][]sqltypes.Value{{ @@ -2099,20 +2206,19 @@ func TestMultiInsertGeneratorSparse(t *testing.T) { RowsAffected: 1, InsertID: 1, }}) - result, err := executorExec(executor, "insert into music(id, user_id, name) values (NULL, :u, 'myname1'),(2, :u, 'myname2'), (NULL, :u, 'myname3')", map[string]*querypb.BindVariable{"u": sqltypes.Int64BindVariable(2)}) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + result, err := executorExec(ctx, executor, session, "insert into music(id, user_id, name) values (NULL, :u, 'myname1'),(2, :u, 'myname2'), (NULL, :u, 'myname3')", map[string]*querypb.BindVariable{"u": sqltypes.Int64BindVariable(2)}) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "insert into music(id, user_id, `name`) values (:_id_0, :_user_id_0, 'myname1'),(:_id_1, :_user_id_1, 'myname2'),(:_id_2, :_user_id_2, 'myname3')", BindVariables: map[string]*querypb.BindVariable{ - "u": sqltypes.Int64BindVariable(2), "_id_0": sqltypes.Int64BindVariable(1), - "__seq0": sqltypes.Int64BindVariable(1), "_user_id_0": sqltypes.Int64BindVariable(2), "_id_1": sqltypes.Int64BindVariable(2), - "__seq1": sqltypes.Int64BindVariable(2), "_user_id_1": sqltypes.Int64BindVariable(2), "_id_2": sqltypes.Int64BindVariable(2), - "__seq2": sqltypes.Int64BindVariable(2), "_user_id_2": sqltypes.Int64BindVariable(2), }, }} @@ -2164,10 +2270,13 @@ func TestInsertBadAutoInc(t *testing.T) { } } ` - executor, _, _, _ := createCustomExecutor(vschema) + executor, _, _, _, ctx := createCustomExecutor(t, vschema) // If auto inc table cannot be found, the table should not be added to vschema. - _, err := executorExec(executor, "insert into bad_auto(v, name) values (1, 'myname')", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "insert into bad_auto(v, name) values (1, 'myname')", nil) want := "table bad_auto not found" if err == nil || err.Error() != want { t.Errorf("bad auto inc err: %v, want %v", err, want) @@ -2234,10 +2343,12 @@ func TestKeyDestRangeQuery(t *testing.T) { for _, tc := range tests { t.Run(tc.targetString+" - "+tc.inputQuery, func(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) - primarySession.TargetString = tc.targetString - _, err := executorExec(executor, tc.inputQuery, nil) + session := &vtgatepb.Session{ + TargetString: tc.targetString, + } + _, err := executorExec(ctx, executor, session, tc.inputQuery, nil) require.NoError(t, err) if tc.expectedSbc1Query == "" { @@ -2255,13 +2366,13 @@ func TestKeyDestRangeQuery(t *testing.T) { } // it does not work for inserts - executor, _, _, _ := createExecutorEnv() - primarySession.TargetString = "TestExecutor[-]" - _, err := executorExec(executor, insertInput, nil) + executor, _, _, _, ctx := createExecutorEnv(t) + session := &vtgatepb.Session{ + TargetString: "TestExecutor[-]", + } + _, err := executorExec(ctx, executor, session, insertInput, nil) require.EqualError(t, err, "VT03023: INSERT not supported when targeting a key range: TestExecutor[-]") - - primarySession.TargetString = "" } func assertQueriesContain(t *testing.T, sql, sbcName string, sbc *sandboxconn.SandboxConn) { @@ -2275,12 +2386,15 @@ func assertQueriesContain(t *testing.T, sql, sbcName string, sbc *sandboxconn.Sa // Prepared statement tests func TestUpdateEqualWithPrepare(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) - _, err := executorPrepare(executor, "update music set a = :a0 where id = :id0", map[string]*querypb.BindVariable{ + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorPrepare(ctx, executor, session, "update music set a = :a0 where id = :id0", map[string]*querypb.BindVariable{ "a0": sqltypes.Int64BindVariable(3), "id0": sqltypes.Int64BindVariable(2), }) @@ -2293,12 +2407,15 @@ func TestUpdateEqualWithPrepare(t *testing.T) { assertQueries(t, sbc1, nil) } func TestInsertShardedWithPrepare(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) - _, err := executorPrepare(executor, "insert into user(id, v, name) values (:_Id0, 2, ':_name_0')", map[string]*querypb.BindVariable{ + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorPrepare(ctx, executor, session, "insert into user(id, v, name) values (:_Id0, 2, ':_name_0')", map[string]*querypb.BindVariable{ "_Id0": sqltypes.Int64BindVariable(1), "_name_0": sqltypes.BytesBindVariable([]byte("myname")), "__seq0": sqltypes.Int64BindVariable(1), @@ -2314,8 +2431,12 @@ func TestInsertShardedWithPrepare(t *testing.T) { } func TestDeleteEqualWithPrepare(t *testing.T) { - executor, sbc, _, sbclookup := createExecutorEnv() - _, err := executorPrepare(executor, "delete from user where id = :id0", map[string]*querypb.BindVariable{ + executor, sbc, _, sbclookup, ctx := createExecutorEnv(t) + + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorPrepare(ctx, executor, session, "delete from user where id = :id0", map[string]*querypb.BindVariable{ "id0": sqltypes.Int64BindVariable(1), }) require.NoError(t, err) @@ -2328,12 +2449,16 @@ func TestDeleteEqualWithPrepare(t *testing.T) { } func TestUpdateLastInsertID(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, ctx := createExecutorEnv(t) + executor.normalize = true sql := "update user set a = last_insert_id() where id = 1" - primarySession.LastInsertId = 43 - _, err := executorExec(executor, sql, map[string]*querypb.BindVariable{}) + session := &vtgatepb.Session{ + TargetString: "@primary", + LastInsertId: 43, + } + _, err := executorExec(ctx, executor, session, sql, map[string]*querypb.BindVariable{}) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "update `user` set a = :__lastInsertId where id = :id /* INT64 */", @@ -2346,12 +2471,15 @@ func TestUpdateLastInsertID(t *testing.T) { } func TestUpdateReference(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) - _, err := executorExec(executor, "update zip_detail set status = 'CLOSED' where id = 1", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "update zip_detail set status = 'CLOSED' where id = 1", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "update zip_detail set `status` = 'CLOSED' where id = 1", @@ -2361,11 +2489,11 @@ func TestUpdateReference(t *testing.T) { assertQueries(t, sbc2, nil) assertQueries(t, sbclookup, wantQueries) - testQueryLog(t, logChan, "TestExecute", "UPDATE", "update zip_detail set `status` = 'CLOSED' where id = 1", 1) + testQueryLog(t, executor, logChan, "TestExecute", "UPDATE", "update zip_detail set `status` = 'CLOSED' where id = 1", 1) sbclookup.Queries = nil - _, err = executorExec(executor, "update TestUnsharded.zip_detail set status = 'CLOSED' where id = 1", nil) + _, err = executorExec(ctx, executor, session, "update TestUnsharded.zip_detail set status = 'CLOSED' where id = 1", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "update zip_detail set `status` = 'CLOSED' where id = 1", @@ -2375,22 +2503,25 @@ func TestUpdateReference(t *testing.T) { assertQueries(t, sbc2, nil) assertQueries(t, sbclookup, wantQueries) - testQueryLog(t, logChan, "TestExecute", "UPDATE", + testQueryLog(t, executor, logChan, "TestExecute", "UPDATE", "update TestUnsharded.zip_detail set `status` = 'CLOSED' where id = 1", 1) sbclookup.Queries = nil - _, err = executorExec(executor, "update TestExecutor.zip_detail set status = 'CLOSED' where id = 1", nil) - require.Error(t, err) + _, err = executorExec(ctx, executor, session, "update TestExecutor.zip_detail set status = 'CLOSED' where id = 1", nil) + require.NoError(t, err) // Gen4 planner can redirect the query to correct source for update when reference table is involved. } func TestDeleteLookupOwnedEqual(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) sbc1.SetResults([]*sqltypes.Result{ sqltypes.MakeTestResult(sqltypes.MakeTestFields("uniq_col|keyspace_id", "int64|varbinary"), "1|N±\u0090ɢú\u0016\u009C"), }) - _, err := executorExec(executor, "delete from t1 where unq_col = 1", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "delete from t1 where unq_col = 1", nil) require.NoError(t, err) tupleBindVar, _ := sqltypes.BuildBindVariable([]int64{1}) sbc1wantQueries := []*querypb.BoundQuery{{ @@ -2412,12 +2543,15 @@ func TestDeleteLookupOwnedEqual(t *testing.T) { } func TestDeleteReference(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) - _, err := executorExec(executor, "delete from zip_detail where id = 1", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "delete from zip_detail where id = 1", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "delete from zip_detail where id = 1", @@ -2427,11 +2561,11 @@ func TestDeleteReference(t *testing.T) { assertQueries(t, sbc2, nil) assertQueries(t, sbclookup, wantQueries) - testQueryLog(t, logChan, "TestExecute", "DELETE", "delete from zip_detail where id = 1", 1) + testQueryLog(t, executor, logChan, "TestExecute", "DELETE", "delete from zip_detail where id = 1", 1) sbclookup.Queries = nil - _, err = executorExec(executor, "delete from zip_detail where id = 1", nil) + _, err = executorExec(ctx, executor, session, "delete from zip_detail where id = 1", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "delete from zip_detail where id = 1", @@ -2441,24 +2575,23 @@ func TestDeleteReference(t *testing.T) { assertQueries(t, sbc2, nil) assertQueries(t, sbclookup, wantQueries) - testQueryLog(t, logChan, "TestExecute", "DELETE", "delete from zip_detail where id = 1", 1) + testQueryLog(t, executor, logChan, "TestExecute", "DELETE", "delete from zip_detail where id = 1", 1) sbclookup.Queries = nil - _, err = executorExec(executor, "delete from TestExecutor.zip_detail where id = 1", nil) - require.Error(t, err) + _, err = executorExec(ctx, executor, session, "delete from TestExecutor.zip_detail where id = 1", nil) + require.NoError(t, err) // Gen4 planner can redirect the query to correct source for update when reference table is involved. } func TestReservedConnDML(t *testing.T) { - executor, _, _, sbc := createExecutorEnv() + executor, _, _, sbc, ctx := createExecutorEnv(t) - logChan := QueryLogger.Subscribe("TestReservedConnDML") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("TestReservedConnDML") + defer executor.queryLogger.Unsubscribe(logChan) - ctx := context.Background() session := NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true}) - _, err := executor.Execute(ctx, nil, "TestReservedConnDML", session, "use "+KsTestUnsharded, nil) + _, err := executor.Execute(ctx, nil, nil, "TestReservedConnDML", session, "use "+KsTestUnsharded, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{ @@ -2467,48 +2600,47 @@ func TestReservedConnDML(t *testing.T) { sbc.SetResults([]*sqltypes.Result{ sqltypes.MakeTestResult(sqltypes.MakeTestFields("id", "int64"), "1"), }) - _, err = executor.Execute(ctx, nil, "TestReservedConnDML", session, "set default_week_format = 1", nil) + _, err = executor.Execute(ctx, nil, nil, "TestReservedConnDML", session, "set default_week_format = 1", nil) require.NoError(t, err) assertQueries(t, sbc, wantQueries) - _, err = executor.Execute(ctx, nil, "TestReservedConnDML", session, "begin", nil) + _, err = executor.Execute(ctx, nil, nil, "TestReservedConnDML", session, "begin", nil) require.NoError(t, err) wantQueries = append(wantQueries, &querypb.BoundQuery{Sql: "set default_week_format = 1", BindVariables: map[string]*querypb.BindVariable{}}, &querypb.BoundQuery{Sql: "insert into `simple`() values ()", BindVariables: map[string]*querypb.BindVariable{}}) - _, err = executor.Execute(ctx, nil, "TestReservedConnDML", session, "insert into `simple`() values ()", nil) + _, err = executor.Execute(ctx, nil, nil, "TestReservedConnDML", session, "insert into `simple`() values ()", nil) require.NoError(t, err) assertQueries(t, sbc, wantQueries) - _, err = executor.Execute(ctx, nil, "TestReservedConnDML", session, "commit", nil) + _, err = executor.Execute(ctx, nil, nil, "TestReservedConnDML", session, "commit", nil) require.NoError(t, err) - _, err = executor.Execute(ctx, nil, "TestReservedConnDML", session, "begin", nil) + _, err = executor.Execute(ctx, nil, nil, "TestReservedConnDML", session, "begin", nil) require.NoError(t, err) - sbc.EphemeralShardErr = mysql.NewSQLError(mysql.CRServerGone, mysql.SSNetError, "connection gone") + sbc.EphemeralShardErr = sqlerror.NewSQLError(sqlerror.CRServerGone, sqlerror.SSNetError, "connection gone") // as the first time the query fails due to connection loss i.e. reserved conn lost. It will be recreated to set statement will be executed again. wantQueries = append(wantQueries, &querypb.BoundQuery{Sql: "set default_week_format = 1", BindVariables: map[string]*querypb.BindVariable{}}, &querypb.BoundQuery{Sql: "insert into `simple`() values ()", BindVariables: map[string]*querypb.BindVariable{}}) - _, err = executor.Execute(ctx, nil, "TestReservedConnDML", session, "insert into `simple`() values ()", nil) + _, err = executor.Execute(ctx, nil, nil, "TestReservedConnDML", session, "insert into `simple`() values ()", nil) require.NoError(t, err) assertQueries(t, sbc, wantQueries) - _, err = executor.Execute(ctx, nil, "TestReservedConnDML", session, "commit", nil) + _, err = executor.Execute(ctx, nil, nil, "TestReservedConnDML", session, "commit", nil) require.NoError(t, err) } func TestStreamingDML(t *testing.T) { method := "TestStreamingDML" - executor, _, _, sbc := createExecutorEnv() + executor, _, _, sbc, ctx := createExecutorEnv(t) - logChan := QueryLogger.Subscribe(method) - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe(method) + defer executor.queryLogger.Unsubscribe(logChan) - ctx := context.Background() session := NewAutocommitSession(&vtgatepb.Session{}) tcases := []struct { @@ -2569,7 +2701,7 @@ func TestStreamingDML(t *testing.T) { for _, tcase := range tcases { sbc.Queries = nil sbc.SetResults([]*sqltypes.Result{tcase.result}) - err := executor.StreamExecute(ctx, nil, method, session, tcase.query, nil, func(result *sqltypes.Result) error { + err := executor.StreamExecute(ctx, nil, nil, method, session, tcase.query, nil, func(result *sqltypes.Result) error { qr = result return nil }) @@ -2588,16 +2720,16 @@ func TestStreamingDML(t *testing.T) { } func TestPartialVindexInsertQueryFailure(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) session := NewAutocommitSession(&vtgatepb.Session{}) require.True(t, session.GetAutocommit()) require.False(t, session.InTransaction()) - _, err := executorExecSession(executor, "begin", nil, session.Session) + _, err := executorExecSession(ctx, executor, "begin", nil, session.Session) require.NoError(t, err) require.True(t, session.GetAutocommit()) require.True(t, session.InTransaction()) @@ -2610,19 +2742,15 @@ func TestPartialVindexInsertQueryFailure(t *testing.T) { }, { Sql: "insert into t1_lkp_idx(unq_col, keyspace_id) values (:_unq_col_0, :keyspace_id_0)", BindVariables: map[string]*querypb.BindVariable{ - "unq_col_0": sqltypes.Int64BindVariable(1), "keyspace_id_0": sqltypes.BytesBindVariable([]byte("\x16k@\xb4J\xbaK\xd6")), - "unq_col_1": sqltypes.Int64BindVariable(3), - "keyspace_id_1": sqltypes.BytesBindVariable([]byte("\x06\xe7\xea\"Βp\x8f")), "_unq_col_0": sqltypes.Int64BindVariable(1), - "_unq_col_1": sqltypes.Int64BindVariable(3), }, }, { Sql: "rollback to x", BindVariables: map[string]*querypb.BindVariable{}, }} - _, err = executorExecSession(executor, "insert into t1(id, unq_col) values (1, 1), (2, 3)", nil, session.Session) + _, err = executorExecSession(ctx, executor, "insert into t1(id, unq_col) values (1, 1), (2, 3)", nil, session.Session) require.Error(t, err) require.Contains(t, err.Error(), "reverted partial DML execution failure") require.True(t, session.GetAutocommit()) @@ -2632,19 +2760,23 @@ func TestPartialVindexInsertQueryFailure(t *testing.T) { // only parameter in expected query changes wantQ[1].Sql = "insert into t1_lkp_idx(unq_col, keyspace_id) values (:_unq_col_1, :keyspace_id_1)" + wantQ[1].BindVariables = map[string]*querypb.BindVariable{ + "keyspace_id_1": sqltypes.BytesBindVariable([]byte("\x06\xe7\xea\"Βp\x8f")), + "_unq_col_1": sqltypes.Int64BindVariable(3), + } assertQueriesWithSavepoint(t, sbc2, wantQ) - testQueryLog(t, logChan, "TestExecute", "BEGIN", "begin", 0) - testQueryLog(t, logChan, "MarkSavepoint", "SAVEPOINT", "savepoint x", 0) - testQueryLog(t, logChan, "VindexCreate", "SAVEPOINT_ROLLBACK", "rollback to x", 0) - testQueryLog(t, logChan, "TestExecute", "INSERT", "insert into t1(id, unq_col) values (1, 1), (2, 3)", 0) + testQueryLog(t, executor, logChan, "TestExecute", "BEGIN", "begin", 0) + testQueryLog(t, executor, logChan, "MarkSavepoint", "SAVEPOINT", "savepoint x", 0) + testQueryLog(t, executor, logChan, "VindexCreate", "SAVEPOINT_ROLLBACK", "rollback to x", 0) + testQueryLog(t, executor, logChan, "TestExecute", "INSERT", "insert into t1(id, unq_col) values (1, 1), (2, 3)", 0) } func TestPartialVindexInsertQueryFailureAutoCommit(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) session := NewAutocommitSession(&vtgatepb.Session{}) require.True(t, session.GetAutocommit()) @@ -2655,16 +2787,12 @@ func TestPartialVindexInsertQueryFailureAutoCommit(t *testing.T) { wantQ := []*querypb.BoundQuery{{ Sql: "insert into t1_lkp_idx(unq_col, keyspace_id) values (:_unq_col_0, :keyspace_id_0)", BindVariables: map[string]*querypb.BindVariable{ - "unq_col_0": sqltypes.Int64BindVariable(1), "keyspace_id_0": sqltypes.BytesBindVariable([]byte("\x16k@\xb4J\xbaK\xd6")), - "unq_col_1": sqltypes.Int64BindVariable(3), - "keyspace_id_1": sqltypes.BytesBindVariable([]byte("\x06\xe7\xea\"Βp\x8f")), "_unq_col_0": sqltypes.Int64BindVariable(1), - "_unq_col_1": sqltypes.Int64BindVariable(3), }, }} - _, err := executorExecSession(executor, "insert into t1(id, unq_col) values (1, 1), (2, 3)", nil, session.Session) + _, err := executorExecSession(ctx, executor, "insert into t1(id, unq_col) values (1, 1), (2, 3)", nil, session.Session) require.Error(t, err) assert.Contains(t, err.Error(), "transaction rolled back to reverse changes of partial DML execution") assert.True(t, session.GetAutocommit()) @@ -2674,10 +2802,14 @@ func TestPartialVindexInsertQueryFailureAutoCommit(t *testing.T) { // only parameter in expected query changes wantQ[0].Sql = "insert into t1_lkp_idx(unq_col, keyspace_id) values (:_unq_col_1, :keyspace_id_1)" + wantQ[0].BindVariables = map[string]*querypb.BindVariable{ + "keyspace_id_1": sqltypes.BytesBindVariable([]byte("\x06\xe7\xea\"Βp\x8f")), + "_unq_col_1": sqltypes.Int64BindVariable(3), + } assertQueriesWithSavepoint(t, sbc2, wantQ) - testQueryLog(t, logChan, "VindexCreate", "INSERT", "insert into t1_lkp_idx(unq_col, keyspace_id) values (:unq_col_0, :keyspace_id_0), (:unq_col_1, :keyspace_id_1)", 2) - testQueryLog(t, logChan, "TestExecute", "INSERT", "insert into t1(id, unq_col) values (1, 1), (2, 3)", 0) + testQueryLog(t, executor, logChan, "VindexCreate", "INSERT", "insert into t1_lkp_idx(unq_col, keyspace_id) values (:unq_col_0, :keyspace_id_0), (:unq_col_1, :keyspace_id_1)", 2) + testQueryLog(t, executor, logChan, "TestExecute", "INSERT", "insert into t1(id, unq_col) values (1, 1), (2, 3)", 0) } // TestMultiInternalSavepoint shows that the internal savepoint created for rolling back any partial dml changes on a failure is not removed from the savepoint list. @@ -2685,14 +2817,14 @@ func TestPartialVindexInsertQueryFailureAutoCommit(t *testing.T) { // The change for it cannot be done as the executor level and will be made at the VTGate entry point. // Test TestMultiInternalSavepointVtGate shows that it fixes the behaviour. func TestMultiInternalSavepoint(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) session := NewAutocommitSession(&vtgatepb.Session{}) - _, err := executorExecSession(executor, "begin", nil, session.Session) + _, err := executorExecSession(ctx, executor, "begin", nil, session.Session) require.NoError(t, err) // this query goes to multiple shards so internal savepoint will be created. - _, err = executorExecSession(executor, "insert into user_extra(user_id) values (1), (4)", nil, session.Session) + _, err = executorExecSession(ctx, executor, "insert into user_extra(user_id) values (1), (4)", nil, session.Session) require.NoError(t, err) wantQ := []*querypb.BoundQuery{{ @@ -2702,14 +2834,13 @@ func TestMultiInternalSavepoint(t *testing.T) { Sql: "insert into user_extra(user_id) values (:_user_id_0)", BindVariables: map[string]*querypb.BindVariable{ "_user_id_0": sqltypes.Int64BindVariable(1), - "_user_id_1": sqltypes.Int64BindVariable(4), }, }} assertQueriesWithSavepoint(t, sbc1, wantQ) require.Len(t, sbc2.Queries, 0) sbc1.Queries = nil - _, err = executorExecSession(executor, "insert into user_extra(user_id) values (3), (6)", nil, session.Session) + _, err = executorExecSession(ctx, executor, "insert into user_extra(user_id) values (3), (6)", nil, session.Session) require.NoError(t, err) wantQ = []*querypb.BoundQuery{{ Sql: "savepoint x", @@ -2721,7 +2852,6 @@ func TestMultiInternalSavepoint(t *testing.T) { Sql: "insert into user_extra(user_id) values (:_user_id_0)", BindVariables: map[string]*querypb.BindVariable{ "_user_id_0": sqltypes.Int64BindVariable(3), - "_user_id_1": sqltypes.Int64BindVariable(6), }, }} assertQueriesWithSavepoint(t, sbc2, wantQ) @@ -2733,15 +2863,18 @@ func TestMultiInternalSavepoint(t *testing.T) { } func TestInsertSelectFromDual(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, _ := createExecutorEnv(t) - logChan := QueryLogger.Subscribe("TestInsertSelect") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("TestInsertSelect") + defer executor.queryLogger.Unsubscribe(logChan) session := NewAutocommitSession(&vtgatepb.Session{}) query := "insert into user(id, v, name) select 1, 2, 'myname' from dual" wantQueries := []*querypb.BoundQuery{{ + Sql: "select 1, 2, 'myname' from dual lock in share mode", + BindVariables: map[string]*querypb.BindVariable{}, + }, { Sql: "insert into `user`(id, v, `name`) values (:_c0_0, :_c0_1, :_c0_2)", BindVariables: map[string]*querypb.BindVariable{ "_c0_0": sqltypes.Int64BindVariable(1), @@ -2759,37 +2892,42 @@ func TestInsertSelectFromDual(t *testing.T) { }} for _, workload := range []string{"olap", "oltp"} { - sbc1.Queries = nil - sbc2.Queries = nil - sbclookup.Queries = nil - wQuery := fmt.Sprintf("set @@workload = %s", workload) - _, err := executor.Execute(context.Background(), nil, "TestInsertSelect", session, wQuery, nil) - require.NoError(t, err) + t.Run(workload, func(t *testing.T) { + sbc1.Queries = nil + sbc2.Queries = nil + sbclookup.Queries = nil + wQuery := fmt.Sprintf("set @@workload = %s", workload) + // set result for dual query. + sbc1.SetResults([]*sqltypes.Result{sqltypes.MakeTestResult(sqltypes.MakeTestFields("1|2|myname", "int64|int64|varchar"), "1|2|myname")}) + + _, err := executor.Execute(context.Background(), nil, nil, "TestInsertSelect", session, wQuery, nil) + require.NoError(t, err) - _, err = executor.Execute(context.Background(), nil, "TestInsertSelect", session, query, nil) - require.NoError(t, err) + _, err = executor.Execute(context.Background(), nil, nil, "TestInsertSelect", session, query, nil) + require.NoError(t, err) - assertQueries(t, sbc1, wantQueries) - assertQueries(t, sbc2, nil) - assertQueries(t, sbclookup, wantlkpQueries) + assertQueries(t, sbc1, wantQueries) + assertQueries(t, sbc2, nil) + assertQueries(t, sbclookup, wantlkpQueries) - testQueryLog(t, logChan, "TestInsertSelect", "SET", wQuery, 0) - testQueryLog(t, logChan, "VindexCreate", "INSERT", "insert into name_user_map(`name`, user_id) values (:name_0, :user_id_0)", 1) - testQueryLog(t, logChan, "TestInsertSelect", "INSERT", "insert into `user`(id, v, `name`) select 1, 2, 'myname' from dual", 1) + testQueryLog(t, executor, logChan, "TestInsertSelect", "SET", wQuery, 0) + testQueryLog(t, executor, logChan, "VindexCreate", "INSERT", "insert into name_user_map(`name`, user_id) values (:name_0, :user_id_0)", 1) + testQueryLog(t, executor, logChan, "TestInsertSelect", "INSERT", "insert into `user`(id, v, `name`) select 1, 2, 'myname' from dual", 2) + }) } } func TestInsertSelectFromTable(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, _ := createExecutorEnv(t) - logChan := QueryLogger.Subscribe("TestInsertSelect") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("TestInsertSelect") + defer executor.queryLogger.Unsubscribe(logChan) session := NewAutocommitSession(&vtgatepb.Session{}) query := "insert into user(id, name) select c1, c2 from music" wantQueries := []*querypb.BoundQuery{{ - Sql: "select c1, c2 from music for update", + Sql: "select c1, c2 from music lock in share mode", BindVariables: map[string]*querypb.BindVariable{}, }, { Sql: "insert into `user`(id, `name`) values (:_c0_0, :_c0_1), (:_c1_0, :_c1_1), (:_c2_0, :_c2_1), (:_c3_0, :_c3_1), (:_c4_0, :_c4_1), (:_c5_0, :_c5_1), (:_c6_0, :_c6_1), (:_c7_0, :_c7_1)", @@ -2824,29 +2962,32 @@ func TestInsertSelectFromTable(t *testing.T) { sbc2.Queries = nil sbclookup.Queries = nil wQuery := fmt.Sprintf("set @@workload = %s", workload) - _, err := executor.Execute(context.Background(), nil, "TestInsertSelect", session, wQuery, nil) + _, err := executor.Execute(context.Background(), nil, nil, "TestInsertSelect", session, wQuery, nil) require.NoError(t, err) - _, err = executor.Execute(context.Background(), nil, "TestInsertSelect", session, query, nil) + _, err = executor.Execute(context.Background(), nil, nil, "TestInsertSelect", session, query, nil) require.NoError(t, err) assertQueries(t, sbc1, wantQueries) assertQueries(t, sbc2, wantQueries[:1]) // select scatter query went scatter. assertQueries(t, sbclookup, wantlkpQueries) - testQueryLog(t, logChan, "TestInsertSelect", "SET", wQuery, 0) - testQueryLog(t, logChan, "VindexCreate", "INSERT", "insert into name_user_map(`name`, user_id) values (:name_0, :user_id_0), (:name_1, :user_id_1), (:name_2, :user_id_2), (:name_3, :user_id_3), (:name_4, :user_id_4), (:name_5, :user_id_5), (:name_6, :user_id_6), (:name_7, :user_id_7)", 1) - testQueryLog(t, logChan, "TestInsertSelect", "INSERT", "insert into `user`(id, `name`) select c1, c2 from music", 9) // 8 from select and 1 from insert. + testQueryLog(t, executor, logChan, "TestInsertSelect", "SET", wQuery, 0) + testQueryLog(t, executor, logChan, "VindexCreate", "INSERT", "insert into name_user_map(`name`, user_id) values (:name_0, :user_id_0), (:name_1, :user_id_1), (:name_2, :user_id_2), (:name_3, :user_id_3), (:name_4, :user_id_4), (:name_5, :user_id_5), (:name_6, :user_id_6), (:name_7, :user_id_7)", 1) + testQueryLog(t, executor, logChan, "TestInsertSelect", "INSERT", "insert into `user`(id, `name`) select c1, c2 from music", 9) // 8 from select and 1 from insert. } } func TestInsertReference(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) - _, err := executorExec(executor, "insert into zip_detail(id, status) values (1, 'CLOSED')", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "insert into zip_detail(id, status) values (1, 'CLOSED')", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "insert into zip_detail(id, `status`) values (1, 'CLOSED')", @@ -2856,11 +2997,11 @@ func TestInsertReference(t *testing.T) { assertQueries(t, sbc2, nil) assertQueries(t, sbclookup, wantQueries) - testQueryLog(t, logChan, "TestExecute", "INSERT", "insert into zip_detail(id, `status`) values (1, 'CLOSED')", 1) + testQueryLog(t, executor, logChan, "TestExecute", "INSERT", "insert into zip_detail(id, `status`) values (1, 'CLOSED')", 1) sbclookup.Queries = nil - _, err = executorExec(executor, "insert into TestUnsharded.zip_detail(id, status) values (1, 'CLOSED')", nil) + _, err = executorExec(ctx, executor, session, "insert into TestUnsharded.zip_detail(id, status) values (1, 'CLOSED')", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "insert into zip_detail(id, `status`) values (1, 'CLOSED')", @@ -2870,11 +3011,11 @@ func TestInsertReference(t *testing.T) { assertQueries(t, sbc2, nil) assertQueries(t, sbclookup, wantQueries) - testQueryLog(t, logChan, "TestExecute", "INSERT", + testQueryLog(t, executor, logChan, "TestExecute", "INSERT", "insert into TestUnsharded.zip_detail(id, `status`) values (1, 'CLOSED')", 1) sbclookup.Queries = nil - _, err = executorExec(executor, "insert into TestExecutor.zip_detail(id, status) values (1, 'CLOSED')", nil) - require.Error(t, err) + _, err = executorExec(ctx, executor, session, "insert into TestExecutor.zip_detail(id, status) values (1, 'CLOSED')", nil) + require.NoError(t, err) // Gen4 planner can redirect the query to correct source for update when reference table is involved. } diff --git a/go/vt/vtgate/executor_framework_test.go b/go/vt/vtgate/executor_framework_test.go index 7f4c2dcfe97..c59c100fd36 100644 --- a/go/vt/vtgate/executor_framework_test.go +++ b/go/vt/vtgate/executor_framework_test.go @@ -25,27 +25,27 @@ import ( "strings" "testing" - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/sidecardb" - "vitess.io/vitess/go/vt/vtgate/logstats" - - vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" - + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/stretchr/testify/assert" + "vitess.io/vitess/go/cache/theine" + "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/cache" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/streamlog" "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/log" + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/srvtopo" + "vitess.io/vitess/go/vt/vtgate/logstats" "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet/sandboxconn" - - querypb "vitess.io/vitess/go/vt/proto/query" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) //go:embed testdata/executorVSchema.json @@ -128,13 +128,23 @@ func init() { vindexes.Register("keyrange_lookuper_unique", newKeyRangeLookuperUnique) } -func createExecutorEnv() (executor *Executor, sbc1, sbc2, sbclookup *sandboxconn.SandboxConn) { +func createExecutorEnv(t testing.TB) (executor *Executor, sbc1, sbc2, sbclookup *sandboxconn.SandboxConn, ctx context.Context) { + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(context.Background()) cell := "aa" - hc := discovery.NewFakeHealthCheck(nil) + hc := discovery.NewFakeHealthCheck(make(chan *discovery.TabletHealth)) + s := createSandbox(KsTestSharded) s.VSchema = executorVSchema - serv := newSandboxForCells([]string{cell}) - serv.topoServer.CreateKeyspace(context.Background(), "TestExecutor", &topodatapb.Keyspace{SidecarDbName: sidecardb.DefaultName}) + sb := createSandbox(KsTestUnsharded) + sb.VSchema = unshardedVSchema + // Use the 'X' in the name to ensure it's not alphabetically first. + // Otherwise, it would become the default keyspace for the dual table. + bad := createSandbox("TestXBadSharding") + bad.VSchema = badVSchema + + serv := newSandboxForCells(ctx, []string{cell}) + serv.topoServer.CreateKeyspace(ctx, KsTestSharded, &topodatapb.Keyspace{SidecarDbName: sidecar.DefaultName}) // Force a new cache to use for lookups of the sidecar database identifier // in use by each keyspace -- as we want to use a different load function // than the one already created by the vtgate as it uses a different topo. @@ -151,7 +161,8 @@ func createExecutorEnv() (executor *Executor, sbc1, sbc2, sbclookup *sandboxconn if !created { log.Fatal("Failed to [re]create a sidecar database identifier cache!") } - resolver := newTestResolver(hc, serv, cell) + + resolver := newTestResolver(ctx, hc, serv, cell) sbc1 = hc.AddTestTablet(cell, "-20", 1, "TestExecutor", "-20", topodatapb.TabletType_PRIMARY, true, 1, nil) sbc2 = hc.AddTestTablet(cell, "40-60", 1, "TestExecutor", "40-60", topodatapb.TabletType_PRIMARY, true, 1, nil) // Create these connections so scatter queries don't fail. @@ -162,57 +173,76 @@ func createExecutorEnv() (executor *Executor, sbc1, sbc2, sbclookup *sandboxconn _ = hc.AddTestTablet(cell, "c0-e0", 1, "TestExecutor", "c0-e0", topodatapb.TabletType_PRIMARY, true, 1, nil) _ = hc.AddTestTablet(cell, "e0-", 1, "TestExecutor", "e0-", topodatapb.TabletType_PRIMARY, true, 1, nil) // Below is needed so that SendAnyWherePlan doesn't fail - _ = hc.AddTestTablet(cell, "random", 1, "TestXBadVSchema", "-20", topodatapb.TabletType_PRIMARY, true, 1, nil) - createSandbox(KsTestUnsharded) sbclookup = hc.AddTestTablet(cell, "0", 1, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) _ = hc.AddTestTablet(cell, "2", 3, KsTestUnsharded, "0", topodatapb.TabletType_REPLICA, true, 1, nil) - // Ues the 'X' in the name to ensure it's not alphabetically first. - // Otherwise, it would become the default keyspace for the dual table. - bad := createSandbox("TestXBadSharding") - bad.VSchema = badVSchema + queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - executor = NewExecutor(context.Background(), serv, cell, resolver, false, false, testBufferSize, cache.DefaultConfig, nil, false, querypb.ExecuteOptions_V3) + // All these vtgate tests expect plans to be immediately cached after first use; + // this is not the actual behavior of the system in a production context because we use a doorkeeper + // that sometimes can cause a plan to not be cached the very first time it's seen, to prevent + // one-off queries from thrashing the cache. Disable the doorkeeper in the tests to prevent flakiness. + plans := theine.NewStore[PlanCacheKey, *engine.Plan](queryPlanCacheMemory, false) + + executor = NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4) + executor.SetQueryLogger(queryLogger) key.AnyShardPicker = DestinationAnyShardPickerFirstShard{} - // create a new session each time so that ShardSessions don't get re-used across tests - primarySession = &vtgatepb.Session{ - TargetString: "@primary", - } - return executor, sbc1, sbc2, sbclookup + + t.Cleanup(func() { + defer utils.EnsureNoLeaks(t) + executor.Close() + cancel() + }) + + return executor, sbc1, sbc2, sbclookup, ctx } -func createCustomExecutor(vschema string) (executor *Executor, sbc1, sbc2, sbclookup *sandboxconn.SandboxConn) { +func createCustomExecutor(t testing.TB, vschema string) (executor *Executor, sbc1, sbc2, sbclookup *sandboxconn.SandboxConn, ctx context.Context) { + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(context.Background()) cell := "aa" hc := discovery.NewFakeHealthCheck(nil) + s := createSandbox(KsTestSharded) s.VSchema = vschema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) - sbc1 = hc.AddTestTablet(cell, "-20", 1, "TestExecutor", "-20", topodatapb.TabletType_PRIMARY, true, 1, nil) - sbc2 = hc.AddTestTablet(cell, "40-60", 1, "TestExecutor", "40-60", topodatapb.TabletType_PRIMARY, true, 1, nil) + sb := createSandbox(KsTestUnsharded) + sb.VSchema = unshardedVSchema - createSandbox(KsTestUnsharded) + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) + sbc1 = hc.AddTestTablet(cell, "-20", 1, KsTestSharded, "-20", topodatapb.TabletType_PRIMARY, true, 1, nil) + sbc2 = hc.AddTestTablet(cell, "40-60", 1, KsTestSharded, "40-60", topodatapb.TabletType_PRIMARY, true, 1, nil) sbclookup = hc.AddTestTablet(cell, "0", 1, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - executor = NewExecutor(context.Background(), serv, cell, resolver, false, false, testBufferSize, cache.DefaultConfig, nil, false, querypb.ExecuteOptions_V3) - // create a new session each time so that ShardSessions don't get re-used across tests - primarySession = &vtgatepb.Session{ - TargetString: "@primary", - } - return executor, sbc1, sbc2, sbclookup + queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) + plans := DefaultPlanCache() + executor = NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4) + executor.SetQueryLogger(queryLogger) + + t.Cleanup(func() { + defer utils.EnsureNoLeaks(t) + executor.Close() + cancel() + }) + + return executor, sbc1, sbc2, sbclookup, ctx } -func createCustomExecutorSetValues(vschema string, values []*sqltypes.Result) (executor *Executor, sbc1, sbc2, sbclookup *sandboxconn.SandboxConn) { +func createCustomExecutorSetValues(t testing.TB, vschema string, values []*sqltypes.Result) (executor *Executor, sbc1, sbc2, sbclookup *sandboxconn.SandboxConn, ctx context.Context) { + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(context.Background()) cell := "aa" hc := discovery.NewFakeHealthCheck(nil) + s := createSandbox(KsTestSharded) s.VSchema = vschema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + sb := createSandbox(KsTestUnsharded) + sb.VSchema = unshardedVSchema + + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} sbcs := []*sandboxconn.SandboxConn{} for _, shard := range shards { @@ -222,22 +252,26 @@ func createCustomExecutorSetValues(vschema string, values []*sqltypes.Result) (e } sbcs = append(sbcs, sbc) } - - createSandbox(KsTestUnsharded) sbclookup = hc.AddTestTablet(cell, "0", 1, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - executor = NewExecutor(context.Background(), serv, cell, resolver, false, false, testBufferSize, cache.DefaultConfig, nil, false, querypb.ExecuteOptions_V3) - // create a new session each time so that ShardSessions don't get re-used across tests - primarySession = &vtgatepb.Session{ - TargetString: "@primary", - } - return executor, sbcs[0], sbcs[1], sbclookup + queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) + plans := DefaultPlanCache() + executor = NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4) + executor.SetQueryLogger(queryLogger) + + t.Cleanup(func() { + defer utils.EnsureNoLeaks(t) + executor.Close() + cancel() + }) + + return executor, sbcs[0], sbcs[1], sbclookup, ctx } -func executorExecSession(executor *Executor, sql string, bv map[string]*querypb.BindVariable, session *vtgatepb.Session) (*sqltypes.Result, error) { +func executorExecSession(ctx context.Context, executor *Executor, sql string, bv map[string]*querypb.BindVariable, session *vtgatepb.Session) (*sqltypes.Result, error) { return executor.Execute( - context.Background(), + ctx, + nil, nil, "TestExecute", NewSafeSession(session), @@ -245,23 +279,24 @@ func executorExecSession(executor *Executor, sql string, bv map[string]*querypb. bv) } -func executorExec(executor *Executor, sql string, bv map[string]*querypb.BindVariable) (*sqltypes.Result, error) { - return executorExecSession(executor, sql, bv, primarySession) +func executorExec(ctx context.Context, executor *Executor, session *vtgatepb.Session, sql string, bv map[string]*querypb.BindVariable) (*sqltypes.Result, error) { + return executorExecSession(ctx, executor, sql, bv, session) } -func executorPrepare(executor *Executor, sql string, bv map[string]*querypb.BindVariable) ([]*querypb.Field, error) { +func executorPrepare(ctx context.Context, executor *Executor, session *vtgatepb.Session, sql string, bv map[string]*querypb.BindVariable) ([]*querypb.Field, error) { return executor.Prepare( - context.Background(), + ctx, "TestExecute", - NewSafeSession(primarySession), + NewSafeSession(session), sql, bv) } -func executorStream(executor *Executor, sql string) (qr *sqltypes.Result, err error) { +func executorStream(ctx context.Context, executor *Executor, sql string) (qr *sqltypes.Result, err error) { results := make(chan *sqltypes.Result, 100) err = executor.StreamExecute( - context.Background(), + ctx, + nil, nil, "TestExecuteStream", NewSafeSession(nil), @@ -299,8 +334,8 @@ func assertQueries(t *testing.T, sbc *sandboxconn.SandboxConn, wantQueries []*qu } got := query.Sql expected := wantQueries[idx].Sql - assert.Equal(t, expected, got) - assert.Equal(t, wantQueries[idx].BindVariables, query.BindVariables) + utils.MustMatch(t, expected, got) + utils.MustMatch(t, wantQueries[idx].BindVariables, query.BindVariables) idx++ } } @@ -366,14 +401,14 @@ func getQueryLog(logChan chan *logstats.LogStats) *logstats.LogStats { // is a repeat query. var testPlannedQueries = map[string]bool{} -func testQueryLog(t *testing.T, logChan chan *logstats.LogStats, method, stmtType, sql string, shardQueries int) *logstats.LogStats { +func testQueryLog(t *testing.T, executor *Executor, logChan chan *logstats.LogStats, method, stmtType, sql string, shardQueries int) *logstats.LogStats { t.Helper() logStats := getQueryLog(logChan) require.NotNil(t, logStats) var log bytes.Buffer - streamlog.GetFormatter(QueryLogger)(&log, nil, logStats) + streamlog.GetFormatter(executor.queryLogger)(&log, nil, logStats) fields := strings.Split(log.String(), "\t") // fields[0] is the method @@ -429,8 +464,8 @@ func testQueryLog(t *testing.T, logChan chan *logstats.LogStats, method, stmtTyp return logStats } -func newTestResolver(hc discovery.HealthCheck, serv srvtopo.Server, cell string) *Resolver { - sc := newTestScatterConn(hc, serv, cell) +func newTestResolver(ctx context.Context, hc discovery.HealthCheck, serv srvtopo.Server, cell string) *Resolver { + sc := newTestScatterConn(ctx, hc, serv, cell) srvResolver := srvtopo.NewResolver(serv, sc.gateway, cell) return NewResolver(srvResolver, serv, cell, sc) } diff --git a/go/vt/vtgate/executor_scatter_stats.go b/go/vt/vtgate/executor_scatter_stats.go index cfe0b7435f2..beaa60d7012 100644 --- a/go/vt/vtgate/executor_scatter_stats.go +++ b/go/vt/vtgate/executor_scatter_stats.go @@ -62,8 +62,7 @@ func (e *Executor) gatherScatterStats() (statsResults, error) { plans := make([]*engine.Plan, 0) routes := make([]*engine.Route, 0) // First we go over all plans and collect statistics and all query plans for scatter queries - e.plans.ForEach(func(value any) bool { - plan := value.(*engine.Plan) + e.ForEachPlan(func(plan *engine.Plan) bool { scatter := engine.Find(findScatter, plan.Instructions) readOnly := !engine.Exists(isUpdating, plan.Instructions) isScatter := scatter != nil diff --git a/go/vt/vtgate/executor_scatter_stats_test.go b/go/vt/vtgate/executor_scatter_stats_test.go index 59eba522bb8..eea711588bf 100644 --- a/go/vt/vtgate/executor_scatter_stats_test.go +++ b/go/vt/vtgate/executor_scatter_stats_test.go @@ -17,9 +17,9 @@ limitations under the License. package vtgate import ( - "context" "net/http/httptest" "testing" + "time" "github.com/stretchr/testify/require" @@ -27,10 +27,11 @@ import ( ) func TestScatterStatsWithNoScatterQuery(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) + session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary"}) - _, err := executor.Execute(context.Background(), nil, "TestExecutorResultsExceeded", session, "select * from main1", nil) + _, err := executor.Execute(ctx, nil, nil, "TestExecutorResultsExceeded", session, "select * from main1", nil) require.NoError(t, err) result, err := executor.gatherScatterStats() @@ -39,10 +40,10 @@ func TestScatterStatsWithNoScatterQuery(t *testing.T) { } func TestScatterStatsWithSingleScatterQuery(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary"}) - _, err := executor.Execute(context.Background(), nil, "TestExecutorResultsExceeded", session, "select * from user", nil) + _, err := executor.Execute(ctx, nil, nil, "TestExecutorResultsExceeded", session, "select * from user", nil) require.NoError(t, err) result, err := executor.gatherScatterStats() @@ -51,23 +52,23 @@ func TestScatterStatsWithSingleScatterQuery(t *testing.T) { } func TestScatterStatsHttpWriting(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary"}) - _, err := executor.Execute(context.Background(), nil, "TestExecutorResultsExceeded", session, "select * from user", nil) + _, err := executor.Execute(ctx, nil, nil, "TestExecutorResultsExceeded", session, "select * from user", nil) require.NoError(t, err) - _, err = executor.Execute(context.Background(), nil, "TestExecutorResultsExceeded", session, "select * from user where Id = 15", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecutorResultsExceeded", session, "select * from user where Id = 15", nil) require.NoError(t, err) - _, err = executor.Execute(context.Background(), nil, "TestExecutorResultsExceeded", session, "select * from user where Id > 15", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecutorResultsExceeded", session, "select * from user where Id > 15", nil) require.NoError(t, err) query4 := "select * from user as u1 join user as u2 on u1.Id = u2.Id" - _, err = executor.Execute(context.Background(), nil, "TestExecutorResultsExceeded", session, query4, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecutorResultsExceeded", session, query4, nil) require.NoError(t, err) - executor.plans.Wait() + time.Sleep(500 * time.Millisecond) recorder := httptest.NewRecorder() executor.WriteScatterStats(recorder) diff --git a/go/vt/vtgate/executor_select_test.go b/go/vt/vtgate/executor_select_test.go index 258f48f0a78..a780f77dda1 100644 --- a/go/vt/vtgate/executor_select_test.go +++ b/go/vt/vtgate/executor_select_test.go @@ -28,15 +28,15 @@ import ( _flag "vitess.io/vitess/go/internal/flag" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/streamlog" + "vitess.io/vitess/go/vt/vtgate/logstats" "vitess.io/vitess/go/vt/sqlparser" "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "google.golang.org/protobuf/proto" - "vitess.io/vitess/go/cache" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/discovery" @@ -51,7 +51,7 @@ import ( ) func TestSelectNext(t *testing.T) { - executor, _, _, sbclookup := createExecutorEnv() + executor, _, _, sbclookup, _ := createExecutorEnv(t) query := "select next :n values from user_seq" bv := map[string]*querypb.BindVariable{"n": sqltypes.Int64BindVariable(2)} @@ -62,7 +62,7 @@ func TestSelectNext(t *testing.T) { // Autocommit session := NewAutocommitSession(&vtgatepb.Session{}) - _, err := executor.Execute(context.Background(), nil, "TestSelectNext", session, query, bv) + _, err := executor.Execute(context.Background(), nil, nil, "TestSelectNext", session, query, bv) require.NoError(t, err) utils.MustMatch(t, wantQueries, sbclookup.Queries) @@ -73,7 +73,7 @@ func TestSelectNext(t *testing.T) { // Txn session = NewAutocommitSession(&vtgatepb.Session{}) session.Session.InTransaction = true - _, err = executor.Execute(context.Background(), nil, "TestSelectNext", session, query, bv) + _, err = executor.Execute(context.Background(), nil, nil, "TestSelectNext", session, query, bv) require.NoError(t, err) utils.MustMatch(t, wantQueries, sbclookup.Queries) @@ -84,7 +84,7 @@ func TestSelectNext(t *testing.T) { // Reserve session = NewAutocommitSession(&vtgatepb.Session{}) session.Session.InReservedConn = true - _, err = executor.Execute(context.Background(), nil, "TestSelectNext", session, query, bv) + _, err = executor.Execute(context.Background(), nil, nil, "TestSelectNext", session, query, bv) require.NoError(t, err) utils.MustMatch(t, wantQueries, sbclookup.Queries) @@ -96,7 +96,7 @@ func TestSelectNext(t *testing.T) { session = NewAutocommitSession(&vtgatepb.Session{}) session.Session.InReservedConn = true session.Session.InTransaction = true - _, err = executor.Execute(context.Background(), nil, "TestSelectNext", session, query, bv) + _, err = executor.Execute(context.Background(), nil, nil, "TestSelectNext", session, query, bv) require.NoError(t, err) utils.MustMatch(t, wantQueries, sbclookup.Queries) @@ -105,10 +105,10 @@ func TestSelectNext(t *testing.T) { } func TestSelectDBA(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, _ := createExecutorEnv(t) query := "select * from INFORMATION_SCHEMA.foo" - _, err := executor.Execute(context.Background(), nil, "TestSelectDBA", + _, err := executor.Execute(context.Background(), nil, nil, "TestSelectDBA", NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}), query, map[string]*querypb.BindVariable{}, ) @@ -118,7 +118,7 @@ func TestSelectDBA(t *testing.T) { sbc1.Queries = nil query = "SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES ist WHERE ist.table_schema = 'performance_schema' AND ist.table_name = 'foo'" - _, err = executor.Execute(context.Background(), nil, "TestSelectDBA", + _, err = executor.Execute(context.Background(), nil, nil, "TestSelectDBA", NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}), query, map[string]*querypb.BindVariable{}, ) @@ -132,7 +132,7 @@ func TestSelectDBA(t *testing.T) { sbc1.Queries = nil query = "select 1 from information_schema.table_constraints where constraint_schema = 'ks' and table_name = 'user'" - _, err = executor.Execute(context.Background(), nil, "TestSelectDBA", + _, err = executor.Execute(context.Background(), nil, nil, "TestSelectDBA", NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}), query, map[string]*querypb.BindVariable{}, ) @@ -146,7 +146,7 @@ func TestSelectDBA(t *testing.T) { sbc1.Queries = nil query = "select 1 from information_schema.table_constraints where constraint_schema = 'ks'" - _, err = executor.Execute(context.Background(), nil, "TestSelectDBA", + _, err = executor.Execute(context.Background(), nil, nil, "TestSelectDBA", NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}), query, map[string]*querypb.BindVariable{}, ) @@ -159,7 +159,7 @@ func TestSelectDBA(t *testing.T) { } func TestSystemVariablesMySQLBelow80(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, _ := createExecutorEnv(t) executor.normalize = true sqlparser.SetParserVersion("57000") @@ -178,10 +178,10 @@ func TestSystemVariablesMySQLBelow80(t *testing.T) { }}, }}) - _, err := executor.Execute(context.Background(), nil, "TestSetStmt", session, "set @@sql_mode = only_full_group_by", map[string]*querypb.BindVariable{}) + _, err := executor.Execute(context.Background(), nil, nil, "TestSetStmt", session, "set @@sql_mode = only_full_group_by", map[string]*querypb.BindVariable{}) require.NoError(t, err) - _, err = executor.Execute(context.Background(), nil, "TestSelect", session, "select 1 from information_schema.table", map[string]*querypb.BindVariable{}) + _, err = executor.Execute(context.Background(), nil, nil, "TestSelect", session, "select 1 from information_schema.table", map[string]*querypb.BindVariable{}) require.NoError(t, err) require.True(t, session.InReservedConn()) @@ -195,7 +195,7 @@ func TestSystemVariablesMySQLBelow80(t *testing.T) { } func TestSystemVariablesWithSetVarDisabled(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, _ := createExecutorEnv(t) executor.normalize = true sqlparser.SetParserVersion("80000") @@ -216,10 +216,10 @@ func TestSystemVariablesWithSetVarDisabled(t *testing.T) { }}, }}) - _, err := executor.Execute(context.Background(), nil, "TestSetStmt", session, "set @@sql_mode = only_full_group_by", map[string]*querypb.BindVariable{}) + _, err := executor.Execute(context.Background(), nil, nil, "TestSetStmt", session, "set @@sql_mode = only_full_group_by", map[string]*querypb.BindVariable{}) require.NoError(t, err) - _, err = executor.Execute(context.Background(), nil, "TestSelect", session, "select 1 from information_schema.table", map[string]*querypb.BindVariable{}) + _, err = executor.Execute(context.Background(), nil, nil, "TestSelect", session, "select 1 from information_schema.table", map[string]*querypb.BindVariable{}) require.NoError(t, err) require.True(t, session.InReservedConn()) @@ -233,17 +233,17 @@ func TestSystemVariablesWithSetVarDisabled(t *testing.T) { } func TestSetSystemVariablesTx(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, _ := createExecutorEnv(t) executor.normalize = true sqlparser.SetParserVersion("80001") session := NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, TargetString: "TestExecutor"}) - _, err := executor.Execute(context.Background(), nil, "TestBegin", session, "begin", map[string]*querypb.BindVariable{}) + _, err := executor.Execute(context.Background(), nil, nil, "TestBegin", session, "begin", map[string]*querypb.BindVariable{}) require.NoError(t, err) - _, err = executor.Execute(context.Background(), nil, "TestSelect", session, "select 1 from information_schema.table", map[string]*querypb.BindVariable{}) + _, err = executor.Execute(context.Background(), nil, nil, "TestSelect", session, "select 1 from information_schema.table", map[string]*querypb.BindVariable{}) require.NoError(t, err) require.NotZero(t, session.ShardSessions) @@ -258,14 +258,14 @@ func TestSetSystemVariablesTx(t *testing.T) { }}, }}) - _, err = executor.Execute(context.Background(), nil, "TestSetStmt", session, "set @@sql_mode = only_full_group_by", map[string]*querypb.BindVariable{}) + _, err = executor.Execute(context.Background(), nil, nil, "TestSetStmt", session, "set @@sql_mode = only_full_group_by", map[string]*querypb.BindVariable{}) require.NoError(t, err) require.False(t, session.InReservedConn()) - _, err = executor.Execute(context.Background(), nil, "TestSelect", session, "select 1 from information_schema.table", map[string]*querypb.BindVariable{}) + _, err = executor.Execute(context.Background(), nil, nil, "TestSelect", session, "select 1 from information_schema.table", map[string]*querypb.BindVariable{}) require.NoError(t, err) - _, err = executor.Execute(context.Background(), nil, "TestCommit", session, "commit", map[string]*querypb.BindVariable{}) + _, err = executor.Execute(context.Background(), nil, nil, "TestCommit", session, "commit", map[string]*querypb.BindVariable{}) require.NoError(t, err) require.False(t, session.InReservedConn()) @@ -281,7 +281,7 @@ func TestSetSystemVariablesTx(t *testing.T) { } func TestSetSystemVariables(t *testing.T) { - executor, _, _, lookup := createExecutorEnv() + executor, _, _, lookup, _ := createExecutorEnv(t) executor.normalize = true sqlparser.SetParserVersion("80001") @@ -300,10 +300,10 @@ func TestSetSystemVariables(t *testing.T) { sqltypes.NewVarChar("only_full_group_by"), }}, }}) - _, err := executor.Execute(context.Background(), nil, "TestSetStmt", session, "set @@sql_mode = only_full_group_by", map[string]*querypb.BindVariable{}) + _, err := executor.Execute(context.Background(), nil, nil, "TestSetStmt", session, "set @@sql_mode = only_full_group_by", map[string]*querypb.BindVariable{}) require.NoError(t, err) - _, err = executor.Execute(context.Background(), nil, "TestSelect", session, "select 1 from information_schema.table", map[string]*querypb.BindVariable{}) + _, err = executor.Execute(context.Background(), nil, nil, "TestSelect", session, "select 1 from information_schema.table", map[string]*querypb.BindVariable{}) require.NoError(t, err) require.False(t, session.InReservedConn()) wantQueries := []*querypb.BoundQuery{ @@ -315,7 +315,7 @@ func TestSetSystemVariables(t *testing.T) { // Execute a select with a comment that needs a query hint - _, err = executor.Execute(context.Background(), nil, "TestSelect", session, "select /* comment */ 1 from information_schema.table", map[string]*querypb.BindVariable{}) + _, err = executor.Execute(context.Background(), nil, nil, "TestSelect", session, "select /* comment */ 1 from information_schema.table", map[string]*querypb.BindVariable{}) require.NoError(t, err) require.False(t, session.InReservedConn()) wantQueries = []*querypb.BoundQuery{ @@ -332,7 +332,7 @@ func TestSetSystemVariables(t *testing.T) { sqltypes.NewVarChar("0"), }}, }}) - _, err = executor.Execute(context.Background(), nil, "TestSetStmt", session, "set @@sql_safe_updates = 0", map[string]*querypb.BindVariable{}) + _, err = executor.Execute(context.Background(), nil, nil, "TestSetStmt", session, "set @@sql_safe_updates = 0", map[string]*querypb.BindVariable{}) require.NoError(t, err) require.False(t, session.InReservedConn()) wantQueries = []*querypb.BoundQuery{ @@ -341,7 +341,7 @@ func TestSetSystemVariables(t *testing.T) { utils.MustMatch(t, wantQueries, lookup.Queries) lookup.Queries = nil - _, err = executor.Execute(context.Background(), nil, "TestSetStmt", session, "set @var = @@sql_mode", map[string]*querypb.BindVariable{}) + _, err = executor.Execute(context.Background(), nil, nil, "TestSetStmt", session, "set @var = @@sql_mode", map[string]*querypb.BindVariable{}) require.NoError(t, err) require.False(t, session.InReservedConn()) require.Nil(t, lookup.Queries) @@ -355,7 +355,7 @@ func TestSetSystemVariables(t *testing.T) { sqltypes.NewVarChar("4"), }}, }}) - _, err = executor.Execute(context.Background(), nil, "TestSetStmt", session, "set @x = @@sql_mode, @y = @@max_tmp_tables", map[string]*querypb.BindVariable{}) + _, err = executor.Execute(context.Background(), nil, nil, "TestSetStmt", session, "set @x = @@sql_mode, @y = @@max_tmp_tables", map[string]*querypb.BindVariable{}) require.NoError(t, err) require.False(t, session.InReservedConn()) wantQueries = []*querypb.BoundQuery{ @@ -378,11 +378,11 @@ func TestSetSystemVariables(t *testing.T) { sqltypes.NewVarChar("1"), }}, }}) - _, err = executor.Execute(context.Background(), nil, "TestSetStmt", session, "set @@max_tmp_tables = 1", map[string]*querypb.BindVariable{}) + _, err = executor.Execute(context.Background(), nil, nil, "TestSetStmt", session, "set @@max_tmp_tables = 1", map[string]*querypb.BindVariable{}) require.NoError(t, err) require.True(t, session.InReservedConn()) - _, err = executor.Execute(context.Background(), nil, "TestSelect", session, "select 1 from information_schema.table", map[string]*querypb.BindVariable{}) + _, err = executor.Execute(context.Background(), nil, nil, "TestSelect", session, "select 1 from information_schema.table", map[string]*querypb.BindVariable{}) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{ @@ -394,7 +394,7 @@ func TestSetSystemVariables(t *testing.T) { } func TestSetSystemVariablesWithReservedConnection(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, _ := createExecutorEnv(t) executor.normalize = true session := NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, SystemVariables: map[string]string{}}) @@ -409,10 +409,10 @@ func TestSetSystemVariablesWithReservedConnection(t *testing.T) { sqltypes.NewVarChar(""), }}, }}) - _, err := executor.Execute(context.Background(), nil, "TestSetStmt", session, "set @@sql_mode = ''", map[string]*querypb.BindVariable{}) + _, err := executor.Execute(context.Background(), nil, nil, "TestSetStmt", session, "set @@sql_mode = ''", map[string]*querypb.BindVariable{}) require.NoError(t, err) - _, err = executor.Execute(context.Background(), nil, "TestSelect", session, "select age, city from user group by age", map[string]*querypb.BindVariable{}) + _, err = executor.Execute(context.Background(), nil, nil, "TestSelect", session, "select age, city from user group by age", map[string]*querypb.BindVariable{}) require.NoError(t, err) require.True(t, session.InReservedConn()) wantQueries := []*querypb.BoundQuery{ @@ -422,7 +422,7 @@ func TestSetSystemVariablesWithReservedConnection(t *testing.T) { } utils.MustMatch(t, wantQueries, sbc1.Queries) - _, err = executor.Execute(context.Background(), nil, "TestSelect", session, "select age, city+1 from user group by age", map[string]*querypb.BindVariable{}) + _, err = executor.Execute(context.Background(), nil, nil, "TestSelect", session, "select age, city+1 from user group by age", map[string]*querypb.BindVariable{}) require.NoError(t, err) require.True(t, session.InReservedConn()) wantQueries = []*querypb.BoundQuery{ @@ -437,13 +437,13 @@ func TestSetSystemVariablesWithReservedConnection(t *testing.T) { } func TestCreateTableValidTimestamp(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, _ := createExecutorEnv(t) executor.normalize = true session := NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor", SystemVariables: map[string]string{"sql_mode": "ALLOW_INVALID_DATES"}}) query := "create table aa(t timestamp default 0)" - _, err := executor.Execute(context.Background(), nil, "TestSelect", session, query, map[string]*querypb.BindVariable{}) + _, err := executor.Execute(context.Background(), nil, nil, "TestSelect", session, query, map[string]*querypb.BindVariable{}) require.NoError(t, err) require.True(t, session.InReservedConn()) @@ -456,12 +456,12 @@ func TestCreateTableValidTimestamp(t *testing.T) { } func TestGen4SelectDBA(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, _ := createExecutorEnv(t) executor.normalize = true executor.pv = querypb.ExecuteOptions_Gen4 query := "select * from INFORMATION_SCHEMA.TABLE_CONSTRAINTS" - _, err := executor.Execute(context.Background(), nil, "TestSelectDBA", + _, err := executor.Execute(context.Background(), nil, nil, "TestSelectDBA", NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}), query, map[string]*querypb.BindVariable{}, ) @@ -472,7 +472,7 @@ func TestGen4SelectDBA(t *testing.T) { sbc1.Queries = nil query = "SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES ist WHERE ist.table_schema = 'performance_schema' AND ist.table_name = 'foo'" - _, err = executor.Execute(context.Background(), nil, "TestSelectDBA", + _, err = executor.Execute(context.Background(), nil, nil, "TestSelectDBA", NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}), query, map[string]*querypb.BindVariable{}, ) @@ -488,7 +488,7 @@ func TestGen4SelectDBA(t *testing.T) { sbc1.Queries = nil query = "select 1 from information_schema.table_constraints where constraint_schema = 'ks' and table_name = 'user'" - _, err = executor.Execute(context.Background(), nil, "TestSelectDBA", + _, err = executor.Execute(context.Background(), nil, nil, "TestSelectDBA", NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}), query, map[string]*querypb.BindVariable{}, ) @@ -505,10 +505,7 @@ func TestGen4SelectDBA(t *testing.T) { sbc1.Queries = nil query = "select 1 from information_schema.table_constraints where constraint_schema = 'ks'" - _, err = executor.Execute(context.Background(), nil, "TestSelectDBA", - NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}), - query, map[string]*querypb.BindVariable{}, - ) + _, err = executor.Execute(context.Background(), nil, nil, "TestSelectDBA", NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}), query, map[string]*querypb.BindVariable{}) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{Sql: "select :vtg1 /* INT64 */ from information_schema.table_constraints where constraint_schema = :__vtschemaname /* VARCHAR */", BindVariables: map[string]*querypb.BindVariable{ @@ -520,7 +517,7 @@ func TestGen4SelectDBA(t *testing.T) { sbc1.Queries = nil query = "select t.table_schema,t.table_name,c.column_name,c.column_type from tables t join columns c on c.table_schema = t.table_schema and c.table_name = t.table_name where t.table_schema = 'TestExecutor' and c.table_schema = 'TestExecutor' order by t.table_schema,t.table_name,c.column_name" - _, err = executor.Execute(context.Background(), nil, "TestSelectDBA", + _, err = executor.Execute(context.Background(), nil, nil, "TestSelectDBA", NewSafeSession(&vtgatepb.Session{TargetString: "information_schema"}), query, map[string]*querypb.BindVariable{}, ) @@ -534,9 +531,12 @@ func TestGen4SelectDBA(t *testing.T) { } func TestUnsharded(t *testing.T) { - executor, _, _, sbclookup := createExecutorEnv() + executor, _, _, sbclookup, ctx := createExecutorEnv(t) - _, err := executorExec(executor, "select id from music_user_map where id = 1", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "select id from music_user_map where id = 1", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select id from music_user_map where id = 1", @@ -546,9 +546,12 @@ func TestUnsharded(t *testing.T) { } func TestUnshardedComments(t *testing.T) { - executor, _, _, sbclookup := createExecutorEnv() + executor, _, _, sbclookup, ctx := createExecutorEnv(t) - _, err := executorExec(executor, "/* leading */ select id from music_user_map where id = 1 /* trailing */", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "/* leading */ select id from music_user_map where id = 1 /* trailing */", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "/* leading */ select id from music_user_map where id = 1 /* trailing */", @@ -556,7 +559,7 @@ func TestUnshardedComments(t *testing.T) { }} utils.MustMatch(t, wantQueries, sbclookup.Queries) - _, err = executorExec(executor, "update music_user_map set id = 1 /* trailing */", nil) + _, err = executorExec(ctx, executor, session, "update music_user_map set id = 1 /* trailing */", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "/* leading */ select id from music_user_map where id = 1 /* trailing */", @@ -568,7 +571,7 @@ func TestUnshardedComments(t *testing.T) { assertQueries(t, sbclookup, wantQueries) sbclookup.Queries = nil - _, err = executorExec(executor, "delete from music_user_map /* trailing */", nil) + _, err = executorExec(ctx, executor, session, "delete from music_user_map /* trailing */", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "delete from music_user_map /* trailing */", @@ -577,7 +580,7 @@ func TestUnshardedComments(t *testing.T) { assertQueries(t, sbclookup, wantQueries) sbclookup.Queries = nil - _, err = executorExec(executor, "insert into music_user_map values (1) /* trailing */", nil) + _, err = executorExec(ctx, executor, session, "insert into music_user_map values (1) /* trailing */", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "insert into music_user_map values (1) /* trailing */", @@ -587,23 +590,23 @@ func TestUnshardedComments(t *testing.T) { } func TestStreamUnsharded(t *testing.T) { - executor, _, _, _ := createExecutorEnv() - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + executor, _, _, _, ctx := createExecutorEnv(t) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) sql := "select id from music_user_map where id = 1" - result, err := executorStream(executor, sql) + result, err := executorStream(ctx, executor, sql) require.NoError(t, err) wantResult := sandboxconn.StreamRowResult if !result.Equal(wantResult) { diff := cmp.Diff(wantResult, result) t.Errorf("result: %+v, want %+v\ndiff: %s", result, wantResult, diff) } - testQueryLog(t, logChan, "TestExecuteStream", "SELECT", sql, 1) + testQueryLog(t, executor, logChan, "TestExecuteStream", "SELECT", sql, 1) } func TestStreamBuffering(t *testing.T) { - executor, _, _, sbclookup := createExecutorEnv() + executor, _, _, sbclookup, _ := createExecutorEnv(t) // This test is similar to TestStreamUnsharded except that it returns a Result > 10 bytes, // such that the splitting of the Result into multiple Result responses gets tested. @@ -622,11 +625,16 @@ func TestStreamBuffering(t *testing.T) { }}) var results []*sqltypes.Result + session := &vtgatepb.Session{ + TargetString: "@primary", + } + err := executor.StreamExecute( context.Background(), nil, + nil, "TestStreamBuffering", - NewSafeSession(primarySession), + NewSafeSession(session), "select id from music_user_map where id = 1", nil, func(qr *sqltypes.Result) error { @@ -655,7 +663,7 @@ func TestStreamBuffering(t *testing.T) { } func TestStreamLimitOffset(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) // This test is similar to TestStreamUnsharded except that it returns a Result > 10 bytes, // such that the splitting of the Result into multiple Result responses gets tested. @@ -690,11 +698,15 @@ func TestStreamLimitOffset(t *testing.T) { }}) results := make(chan *sqltypes.Result, 10) + session := &vtgatepb.Session{ + TargetString: "@primary", + } err := executor.StreamExecute( context.Background(), nil, + nil, "TestStreamLimitOffset", - NewSafeSession(primarySession), + NewSafeSession(session), "select id, textcol from user order by id limit 2 offset 2", nil, func(qr *sqltypes.Result) error { @@ -730,14 +742,17 @@ func TestStreamLimitOffset(t *testing.T) { } func TestSelectLastInsertId(t *testing.T) { - executor, _, _, _ := createExecutorEnv() - primarySession.LastInsertId = 52 + executor, _, _, _, ctx := createExecutorEnv(t) + session := &vtgatepb.Session{ + TargetString: "@primary", + LastInsertId: 52, + } executor.normalize = true - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) sql := "select last_insert_id()" - result, err := executorExec(executor, sql, map[string]*querypb.BindVariable{}) + result, err := executorExec(ctx, executor, session, sql, map[string]*querypb.BindVariable{}) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ {Name: "last_insert_id()", Type: sqltypes.Uint64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG | querypb.MySqlFlag_UNSIGNED_FLAG)}, @@ -751,21 +766,25 @@ func TestSelectLastInsertId(t *testing.T) { } func TestSelectSystemVariables(t *testing.T) { - executor, _, _, _ := createExecutorEnv() - primarySession.ReadAfterWrite = &vtgatepb.ReadAfterWrite{ - ReadAfterWriteGtid: "a fine gtid", - ReadAfterWriteTimeout: 13, - SessionTrackGtids: true, + executor, _, _, _, ctx := createExecutorEnv(t) + + session := &vtgatepb.Session{ + TargetString: "@primary", + ReadAfterWrite: &vtgatepb.ReadAfterWrite{ + ReadAfterWriteGtid: "a fine gtid", + ReadAfterWriteTimeout: 13, + SessionTrackGtids: true, + }, } executor.normalize = true - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) sql := "select @@autocommit, @@client_found_rows, @@skip_query_plan_cache, @@enable_system_settings, " + "@@sql_select_limit, @@transaction_mode, @@workload, @@read_after_write_gtid, " + - "@@read_after_write_timeout, @@session_track_gtids, @@ddl_strategy, @@socket, @@query_timeout" + "@@read_after_write_timeout, @@session_track_gtids, @@ddl_strategy, @@migration_context, @@socket, @@query_timeout" - result, err := executorExec(executor, sql, map[string]*querypb.BindVariable{}) + result, err := executorExec(ctx, executor, session, sql, map[string]*querypb.BindVariable{}) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ {Name: "@@autocommit", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG)}, @@ -779,6 +798,7 @@ func TestSelectSystemVariables(t *testing.T) { {Name: "@@read_after_write_timeout", Type: sqltypes.Float64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG)}, {Name: "@@session_track_gtids", Type: sqltypes.VarChar, Charset: uint32(collations.Default()), Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG)}, {Name: "@@ddl_strategy", Type: sqltypes.VarChar, Charset: uint32(collations.Default()), Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG)}, + {Name: "@@migration_context", Type: sqltypes.VarChar, Charset: uint32(collations.Default()), Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG)}, {Name: "@@socket", Type: sqltypes.VarChar, Charset: uint32(collations.Default()), Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG)}, {Name: "@@query_timeout", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG)}, }, @@ -797,6 +817,7 @@ func TestSelectSystemVariables(t *testing.T) { sqltypes.NewVarChar("own_gtid"), sqltypes.NewVarChar(""), sqltypes.NewVarChar(""), + sqltypes.NewVarChar(""), sqltypes.NewInt64(0), }}, } @@ -805,24 +826,21 @@ func TestSelectSystemVariables(t *testing.T) { } func TestSelectInitializedVitessAwareVariable(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) executor.normalize = true - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) - - primarySession.Autocommit = true - primarySession.EnableSystemSettings = true - primarySession.QueryTimeout = 75 + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) - defer func() { - primarySession.Autocommit = false - primarySession.EnableSystemSettings = false - primarySession.QueryTimeout = 0 - }() + session := &vtgatepb.Session{ + TargetString: "@primary", + Autocommit: true, + EnableSystemSettings: true, + QueryTimeout: 75, + } sql := "select @@autocommit, @@enable_system_settings, @@query_timeout" - result, err := executorExec(executor, sql, nil) + result, err := executorExec(ctx, executor, session, sql, nil) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ {Name: "@@autocommit", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG)}, @@ -840,13 +858,16 @@ func TestSelectInitializedVitessAwareVariable(t *testing.T) { } func TestSelectUserDefinedVariable(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) executor.normalize = true - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) sql := "select @foo" - result, err := executorExec(executor, sql, map[string]*querypb.BindVariable{}) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + result, err := executorExec(ctx, executor, session, sql, map[string]*querypb.BindVariable{}) require.NoError(t, err) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ @@ -858,8 +879,8 @@ func TestSelectUserDefinedVariable(t *testing.T) { } utils.MustMatch(t, wantResult, result, "Mismatch") - primarySession = &vtgatepb.Session{UserDefinedVariables: createMap([]string{"foo"}, []any{"bar"})} - result, err = executorExec(executor, sql, map[string]*querypb.BindVariable{}) + session = &vtgatepb.Session{UserDefinedVariables: createMap([]string{"foo"}, []any{"bar"})} + result, err = executorExec(ctx, executor, session, sql, map[string]*querypb.BindVariable{}) require.NoError(t, err) wantResult = &sqltypes.Result{ Fields: []*querypb.Field{ @@ -873,17 +894,20 @@ func TestSelectUserDefinedVariable(t *testing.T) { } func TestFoundRows(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) executor.normalize = true - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) + session := &vtgatepb.Session{ + TargetString: "@primary", + } // run this extra query so we can assert on the number of rows found - _, err := executorExec(executor, "select 42", map[string]*querypb.BindVariable{}) + _, err := executorExec(ctx, executor, session, "select 42", map[string]*querypb.BindVariable{}) require.NoError(t, err) sql := "select found_rows()" - result, err := executorExec(executor, sql, map[string]*querypb.BindVariable{}) + result, err := executorExec(ctx, executor, session, sql, map[string]*querypb.BindVariable{}) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ {Name: "found_rows()", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG)}, @@ -897,23 +921,26 @@ func TestFoundRows(t *testing.T) { } func TestRowCount(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) executor.normalize = true - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) - _, err := executorExec(executor, "select 42", map[string]*querypb.BindVariable{}) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "select 42", map[string]*querypb.BindVariable{}) require.NoError(t, err) - testRowCount(t, executor, -1) + testRowCount(t, ctx, executor, session, -1) - _, err = executorExec(executor, "delete from user where id in (42, 24)", map[string]*querypb.BindVariable{}) + _, err = executorExec(ctx, executor, session, "delete from user where id in (42, 24)", map[string]*querypb.BindVariable{}) require.NoError(t, err) - testRowCount(t, executor, 2) + testRowCount(t, ctx, executor, session, 2) } -func testRowCount(t *testing.T, executor *Executor, wantRowCount int64) { +func testRowCount(t *testing.T, ctx context.Context, executor *Executor, session *vtgatepb.Session, wantRowCount int64) { t.Helper() - result, err := executorExec(executor, "select row_count()", map[string]*querypb.BindVariable{}) + result, err := executorExec(ctx, executor, session, "select row_count()", map[string]*querypb.BindVariable{}) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ {Name: "row_count()", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG)}, @@ -927,9 +954,13 @@ func testRowCount(t *testing.T, executor *Executor, wantRowCount int64) { } func TestSelectLastInsertIdInUnion(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, ctx := createExecutorEnv(t) executor.normalize = true - primarySession.LastInsertId = 52 + + session := &vtgatepb.Session{ + TargetString: "@primary", + LastInsertId: 52, + } result1 := []*sqltypes.Result{{ Fields: []*querypb.Field{ @@ -943,7 +974,7 @@ func TestSelectLastInsertIdInUnion(t *testing.T) { sbc1.SetResults(result1) sql := "select last_insert_id() as id union select last_insert_id() as id" - got, err := executorExec(executor, sql, map[string]*querypb.BindVariable{}) + got, err := executorExec(ctx, executor, session, sql, map[string]*querypb.BindVariable{}) require.NoError(t, err) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ @@ -957,13 +988,16 @@ func TestSelectLastInsertIdInUnion(t *testing.T) { } func TestSelectLastInsertIdInWhere(t *testing.T) { - executor, _, _, lookup := createExecutorEnv() + executor, _, _, lookup, ctx := createExecutorEnv(t) executor.normalize = true - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) sql := "select id from music_user_map where id = last_insert_id()" - _, err := executorExec(executor, sql, map[string]*querypb.BindVariable{}) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, sql, map[string]*querypb.BindVariable{}) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select id from music_user_map where id = :__lastInsertId", @@ -974,7 +1008,7 @@ func TestSelectLastInsertIdInWhere(t *testing.T) { } func TestLastInsertIDInVirtualTable(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, ctx := createExecutorEnv(t) executor.normalize = true result1 := []*sqltypes.Result{{ Fields: []*querypb.Field{ @@ -988,10 +1022,13 @@ func TestLastInsertIDInVirtualTable(t *testing.T) { }}, }} sbc1.SetResults(result1) - _, err := executorExec(executor, "select * from (select last_insert_id()) as t", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "select * from (select last_insert_id()) as t", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select * from (select :__lastInsertId as `last_insert_id()` from dual) as t", + Sql: "select t.`last_insert_id()` from (select :__lastInsertId as `last_insert_id()` from dual) as t", BindVariables: map[string]*querypb.BindVariable{"__lastInsertId": sqltypes.Uint64BindVariable(0)}, }} @@ -999,14 +1036,13 @@ func TestLastInsertIDInVirtualTable(t *testing.T) { } func TestLastInsertIDInSubQueryExpression(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) executor.normalize = true - primarySession.LastInsertId = 12345 - defer func() { - // clean up global state - primarySession.LastInsertId = 0 - }() - rs, err := executorExec(executor, "select (select last_insert_id()) as x", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + LastInsertId: 12345, + } + rs, err := executorExec(ctx, executor, session, "select (select last_insert_id()) as x", nil) require.NoError(t, err) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ @@ -1024,15 +1060,18 @@ func TestLastInsertIDInSubQueryExpression(t *testing.T) { } func TestSelectDatabase(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, _ := createExecutorEnv(t) executor.normalize = true sql := "select database()" - newSession := proto.Clone(primarySession).(*vtgatepb.Session) + newSession := &vtgatepb.Session{ + TargetString: "@primary", + } session := NewSafeSession(newSession) session.TargetString = "TestExecutor@primary" result, err := executor.Execute( context.Background(), nil, + nil, "TestExecute", session, sql, @@ -1051,9 +1090,9 @@ func TestSelectDatabase(t *testing.T) { } func TestSelectBindvars(t *testing.T) { - executor, sbc1, sbc2, lookup := createExecutorEnv() - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + executor, sbc1, sbc2, lookup, ctx := createExecutorEnv(t) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) lookup.SetResults([]*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields("b|a", "varbinary|varbinary"), @@ -1064,7 +1103,10 @@ func TestSelectBindvars(t *testing.T) { )}) sql := "select id from `user` where id = :id" - _, err := executorExec(executor, sql, map[string]*querypb.BindVariable{ + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, sql, map[string]*querypb.BindVariable{ "id": sqltypes.Int64BindVariable(1), }) require.NoError(t, err) @@ -1075,11 +1117,11 @@ func TestSelectBindvars(t *testing.T) { utils.MustMatch(t, sbc1.Queries, wantQueries) assert.Empty(t, sbc2.Queries) sbc1.Queries = nil - testQueryLog(t, logChan, "TestExecute", "SELECT", sql, 1) + testQueryLog(t, executor, logChan, "TestExecute", "SELECT", sql, 1) // Test with StringBindVariable sql = "select id from `user` where `name` in (:name1, :name2)" - _, err = executorExec(executor, sql, map[string]*querypb.BindVariable{ + _, err = executorExec(ctx, executor, session, sql, map[string]*querypb.BindVariable{ "name1": sqltypes.StringBindVariable("foo1"), "name2": sqltypes.StringBindVariable("foo2"), }) @@ -1094,13 +1136,11 @@ func TestSelectBindvars(t *testing.T) { }} utils.MustMatch(t, wantQueries, sbc1.Queries) sbc1.Queries = nil - testQueryLog(t, logChan, "VindexLookup", "SELECT", "select `name`, user_id from name_user_map where `name` in ::name", 1) - testQueryLog(t, logChan, "VindexLookup", "SELECT", "select `name`, user_id from name_user_map where `name` in ::name", 1) - testQueryLog(t, logChan, "TestExecute", "SELECT", "select id from `user` where `name` in (:name1, :name2)", 1) + testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select id from `user` where `name` in (:name1, :name2)", 3) // Test with BytesBindVariable sql = "select id from `user` where `name` in (:name1, :name2)" - _, err = executorExec(executor, sql, map[string]*querypb.BindVariable{ + _, err = executorExec(ctx, executor, session, sql, map[string]*querypb.BindVariable{ "name1": sqltypes.BytesBindVariable([]byte("foo1")), "name2": sqltypes.BytesBindVariable([]byte("foo2")), }) @@ -1108,14 +1148,13 @@ func TestSelectBindvars(t *testing.T) { wantQueries = []*querypb.BoundQuery{{ Sql: "select id from `user` where 1 != 1", BindVariables: map[string]*querypb.BindVariable{ - "name1": sqltypes.BytesBindVariable([]byte("foo1")), - "name2": sqltypes.BytesBindVariable([]byte("foo2")), + "__vals": sqltypes.TestBindVariable([]any{[]byte("foo1"), []byte("foo2")}), + "name1": sqltypes.BytesBindVariable([]byte("foo1")), + "name2": sqltypes.BytesBindVariable([]byte("foo2")), }, }} utils.MustMatch(t, wantQueries, sbc1.Queries) - testQueryLog(t, logChan, "VindexLookup", "SELECT", "select `name`, user_id from name_user_map where `name` in ::name", 1) - testQueryLog(t, logChan, "VindexLookup", "SELECT", "select `name`, user_id from name_user_map where `name` in ::name", 1) - testQueryLog(t, logChan, "TestExecute", "SELECT", sql, 1) + testQueryLog(t, executor, logChan, "TestExecute", "SELECT", sql, 3) // Test no match in the lookup vindex sbc1.Queries = nil @@ -1130,7 +1169,7 @@ func TestSelectBindvars(t *testing.T) { }}) sql = "select id from user where name = :name" - _, err = executorExec(executor, sql, map[string]*querypb.BindVariable{ + _, err = executorExec(ctx, executor, session, sql, map[string]*querypb.BindVariable{ "name": sqltypes.StringBindVariable("nonexistent"), }) require.NoError(t, err) @@ -1154,16 +1193,16 @@ func TestSelectBindvars(t *testing.T) { }} utils.MustMatch(t, wantLookupQueries, lookup.Queries) - - testQueryLog(t, logChan, "VindexLookup", "SELECT", "select `name`, user_id from name_user_map where `name` in ::name", 1) - testQueryLog(t, logChan, "TestExecute", "SELECT", "select id from `user` where `name` = :name", 1) - + testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select id from `user` where `name` = :name", 2) } func TestSelectEqual(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) - _, err := executorExec(executor, "select id from user where id = 1", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "select id from user where id = 1", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select id from `user` where id = 1", @@ -1175,7 +1214,7 @@ func TestSelectEqual(t *testing.T) { } sbc1.Queries = nil - _, err = executorExec(executor, "select id from user where id = 3", nil) + _, err = executorExec(ctx, executor, session, "select id from user where id = 3", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "select id from `user` where id = 3", @@ -1190,7 +1229,7 @@ func TestSelectEqual(t *testing.T) { } sbc2.Queries = nil - _, err = executorExec(executor, "select id from user where id = '3'", nil) + _, err = executorExec(ctx, executor, session, "select id from user where id = '3'", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "select id from `user` where id = '3'", @@ -1209,7 +1248,7 @@ func TestSelectEqual(t *testing.T) { sqltypes.MakeTestFields("b|a", "varbinary|varbinary"), "foo|1", )}) - _, err = executorExec(executor, "select id from user where name = 'foo'", nil) + _, err = executorExec(ctx, executor, session, "select id from user where name = 'foo'", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "select id from `user` where `name` = 'foo'", @@ -1228,10 +1267,13 @@ func TestSelectEqual(t *testing.T) { } func TestSelectINFromOR(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, ctx := createExecutorEnv(t) executor.pv = querypb.ExecuteOptions_Gen4 - _, err := executorExec(executor, "select 1 from user where id = 1 and name = 'apa' or id = 2 and name = 'toto'", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "select 1 from user where id = 1 and name = 'apa' or id = 2 and name = 'toto'", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select 1 from `user` where id = 1 and `name` = 'apa' or id = 2 and `name` = 'toto'", @@ -1243,9 +1285,12 @@ func TestSelectINFromOR(t *testing.T) { } func TestSelectDual(t *testing.T) { - executor, sbc1, _, lookup := createExecutorEnv() + executor, sbc1, _, lookup, ctx := createExecutorEnv(t) - _, err := executorExec(executor, "select @@aa.bb from dual", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "select @@aa.bb from dual", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select @@`aa.bb` from dual", @@ -1253,15 +1298,18 @@ func TestSelectDual(t *testing.T) { }} utils.MustMatch(t, wantQueries, sbc1.Queries) - _, err = executorExec(executor, "select @@aa.bb from TestUnsharded.dual", nil) + _, err = executorExec(ctx, executor, session, "select @@aa.bb from TestUnsharded.dual", nil) require.NoError(t, err) utils.MustMatch(t, wantQueries, lookup.Queries) } func TestSelectComments(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) - _, err := executorExec(executor, "/* leading */ select id from user where id = 1 /* trailing */", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "/* leading */ select id from user where id = 1 /* trailing */", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "/* leading */ select id from `user` where id = 1 /* trailing */", @@ -1275,10 +1323,13 @@ func TestSelectComments(t *testing.T) { } func TestSelectNormalize(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) executor.normalize = true - _, err := executorExec(executor, "/* leading */ select id from user where id = 1 /* trailing */", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "/* leading */ select id from user where id = 1 /* trailing */", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "/* leading */ select id from `user` where id = :id /* INT64 */ /* trailing */", @@ -1293,8 +1344,8 @@ func TestSelectNormalize(t *testing.T) { sbc1.Queries = nil // Force the query to go to the "wrong" shard and ensure that normalization still happens - primarySession.TargetString = "TestExecutor/40-60" - _, err = executorExec(executor, "/* leading */ select id from user where id = 1 /* trailing */", nil) + session.TargetString = "TestExecutor/40-60" + _, err = executorExec(ctx, executor, session, "/* leading */ select id from user where id = 1 /* trailing */", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "/* leading */ select id from `user` where id = :id /* INT64 */ /* trailing */", @@ -1305,13 +1356,15 @@ func TestSelectNormalize(t *testing.T) { require.Empty(t, sbc1.Queries) utils.MustMatch(t, wantQueries, sbc2.Queries, "sbc2.Queries") sbc2.Queries = nil - primarySession.TargetString = "" } func TestSelectCaseSensitivity(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) - _, err := executorExec(executor, "select Id from user where iD = 1", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "select Id from user where iD = 1", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select Id from `user` where iD = 1", @@ -1325,10 +1378,10 @@ func TestSelectCaseSensitivity(t *testing.T) { } func TestStreamSelectEqual(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) sql := "select id from user where id = 1" - result, err := executorStream(executor, sql) + result, err := executorStream(ctx, executor, sql) require.NoError(t, err) wantResult := sandboxconn.StreamRowResult if !result.Equal(wantResult) { @@ -1337,9 +1390,12 @@ func TestStreamSelectEqual(t *testing.T) { } func TestSelectKeyRange(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) - _, err := executorExec(executor, "select krcol_unique, krcol from keyrange_table where krcol = 1", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "select krcol_unique, krcol from keyrange_table where krcol = 1", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select krcol_unique, krcol from keyrange_table where krcol = 1", @@ -1353,9 +1409,12 @@ func TestSelectKeyRange(t *testing.T) { } func TestSelectKeyRangeUnique(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) - _, err := executorExec(executor, "select krcol_unique, krcol from keyrange_table where krcol_unique = 1", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "select krcol_unique, krcol from keyrange_table where krcol_unique = 1", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select krcol_unique, krcol from keyrange_table where krcol_unique = 1", @@ -1369,16 +1428,17 @@ func TestSelectKeyRangeUnique(t *testing.T) { } func TestSelectIN(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) // Constant in IN clause is just a number, not a bind variable. - _, err := executorExec(executor, "select id from user where id in (1)", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "select id from user where id in (1)", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select id from `user` where id in ::__vals", - BindVariables: map[string]*querypb.BindVariable{ - "__vals": sqltypes.TestBindVariable([]any{int64(1)}), - }, + Sql: "select id from `user` where id in (1)", + BindVariables: map[string]*querypb.BindVariable{}, }} utils.MustMatch(t, wantQueries, sbc1.Queries) if sbc2.Queries != nil { @@ -1389,7 +1449,7 @@ func TestSelectIN(t *testing.T) { // They result in two different queries on two shards. sbc1.Queries = nil sbc2.Queries = nil - _, err = executorExec(executor, "select id from user where id in (1, 3)", nil) + _, err = executorExec(ctx, executor, session, "select id from user where id in (1, 3)", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "select id from `user` where id in ::__vals", @@ -1410,7 +1470,7 @@ func TestSelectIN(t *testing.T) { // This is using []any for the bind variable list. sbc1.Queries = nil sbc2.Queries = nil - _, err = executorExec(executor, "select id from user where id in ::vals", map[string]*querypb.BindVariable{ + _, err = executorExec(ctx, executor, session, "select id from user where id in ::vals", map[string]*querypb.BindVariable{ "vals": sqltypes.TestBindVariable([]any{int64(1), int64(3)}), }) require.NoError(t, err) @@ -1438,7 +1498,7 @@ func TestSelectIN(t *testing.T) { sqltypes.MakeTestFields("b|a", "varbinary|varbinary"), "foo|1", )}) - _, err = executorExec(executor, "select id from user where name = 'foo'", nil) + _, err = executorExec(ctx, executor, session, "select id from user where name = 'foo'", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "select id from `user` where `name` = 'foo'", @@ -1457,10 +1517,10 @@ func TestSelectIN(t *testing.T) { } func TestStreamSelectIN(t *testing.T) { - executor, _, _, sbclookup := createExecutorEnv() + executor, _, _, sbclookup, ctx := createExecutorEnv(t) sql := "select id from user where id in (1)" - result, err := executorStream(executor, sql) + result, err := executorStream(ctx, executor, sql) require.NoError(t, err) wantResult := sandboxconn.StreamRowResult if !result.Equal(wantResult) { @@ -1468,7 +1528,7 @@ func TestStreamSelectIN(t *testing.T) { } sql = "select id from user where id in (1, 3)" - result, err = executorStream(executor, sql) + result, err = executorStream(ctx, executor, sql) require.NoError(t, err) wantResult = &sqltypes.Result{ Fields: sandboxconn.StreamRowResult.Fields, @@ -1483,7 +1543,7 @@ func TestStreamSelectIN(t *testing.T) { } sql = "select id from user where name = 'foo'" - result, err = executorStream(executor, sql) + result, err = executorStream(ctx, executor, sql) require.NoError(t, err) wantResult = sandboxconn.StreamRowResult if !result.Equal(wantResult) { @@ -1501,31 +1561,42 @@ func TestStreamSelectIN(t *testing.T) { utils.MustMatch(t, wantQueries, sbclookup.Queries) } -func createExecutor(serv *sandboxTopo, cell string, resolver *Resolver) *Executor { - return NewExecutor(context.Background(), serv, cell, resolver, false, false, testBufferSize, cache.DefaultConfig, nil, false, querypb.ExecuteOptions_V3) +func createExecutor(ctx context.Context, serv *sandboxTopo, cell string, resolver *Resolver) *Executor { + queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) + plans := DefaultPlanCache() + ex := NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4) + ex.SetQueryLogger(queryLogger) + return ex } func TestSelectScatter(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createExecutorEnv. cell := "aa" hc := discovery.NewFakeHealthCheck(nil) + u := createSandbox(KsTestUnsharded) s := createSandbox(KsTestSharded) s.VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} var conns []*sandboxconn.SandboxConn for _, shard := range shards { sbc := hc.AddTestTablet(cell, shard, 1, "TestExecutor", shard, topodatapb.TabletType_PRIMARY, true, 1, nil) conns = append(conns, sbc) } - executor := createExecutor(serv, cell, resolver) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) sql := "select id from `user`" - _, err := executorExec(executor, sql, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, sql, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select id from `user`", @@ -1534,21 +1605,24 @@ func TestSelectScatter(t *testing.T) { for _, conn := range conns { utils.MustMatch(t, wantQueries, conn.Queries) } - testQueryLog(t, logChan, "TestExecute", "SELECT", sql, 8) + testQueryLog(t, executor, logChan, "TestExecute", "SELECT", sql, 8) } func TestSelectScatterPartial(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createExecutorEnv. - primarySession = &vtgatepb.Session{ + session := &vtgatepb.Session{ TargetString: "@primary", } cell := "aa" hc := discovery.NewFakeHealthCheck(nil) + u := createSandbox(KsTestUnsharded) s := createSandbox(KsTestSharded) s.VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} var conns []*sandboxconn.SandboxConn for _, shard := range shards { @@ -1556,13 +1630,14 @@ func TestSelectScatterPartial(t *testing.T) { conns = append(conns, sbc) } - executor := createExecutor(serv, cell, resolver) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) // Fail 1 of N without the directive fails the whole operation conns[2].MustFailCodes[vtrpcpb.Code_RESOURCE_EXHAUSTED] = 1000 - results, err := executorExec(executor, "select id from `user`", nil) + results, err := executorExec(ctx, executor, session, "select id from `user`", nil) wantErr := "TestExecutor.40-60.primary" if err == nil || !strings.Contains(err.Error(), wantErr) { t.Errorf("want error %v, got %v", wantErr, err) @@ -1573,15 +1648,15 @@ func TestSelectScatterPartial(t *testing.T) { if results != nil { t.Errorf("want nil results, got %v", results) } - testQueryLog(t, logChan, "TestExecute", "SELECT", "select id from `user`", 8) + testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select id from `user`", 8) // Fail 1 of N with the directive succeeds with 7 rows - results, err = executorExec(executor, "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user", nil) + results, err = executorExec(ctx, executor, session, "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user", nil) require.NoError(t, err) if results == nil || len(results.Rows) != 7 { t.Errorf("want 7 results, got %v", results) } - testQueryLog(t, logChan, "TestExecute", "SELECT", "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from `user`", 8) + testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from `user`", 8) // When all shards fail, the execution should also fail conns[0].MustFailCodes[vtrpcpb.Code_RESOURCE_EXHAUSTED] = 1000 @@ -1592,23 +1667,26 @@ func TestSelectScatterPartial(t *testing.T) { conns[6].MustFailCodes[vtrpcpb.Code_RESOURCE_EXHAUSTED] = 1000 conns[7].MustFailCodes[vtrpcpb.Code_RESOURCE_EXHAUSTED] = 1000 - _, err = executorExec(executor, "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user", nil) + _, err = executorExec(ctx, executor, session, "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user", nil) require.Error(t, err) - testQueryLog(t, logChan, "TestExecute", "SELECT", "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from `user`", 8) + testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from `user`", 8) - _, err = executorExec(executor, "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user order by id", nil) + _, err = executorExec(ctx, executor, session, "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user order by id", nil) require.Error(t, err) } func TestSelectScatterPartialOLAP(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createExecutorEnv. cell := "aa" hc := discovery.NewFakeHealthCheck(nil) + u := createSandbox(KsTestUnsharded) s := createSandbox(KsTestSharded) s.VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} var conns []*sandboxconn.SandboxConn for _, shard := range shards { @@ -1616,23 +1694,24 @@ func TestSelectScatterPartialOLAP(t *testing.T) { conns = append(conns, sbc) } - executor := createExecutor(serv, cell, resolver) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) // Fail 1 of N without the directive fails the whole operation conns[2].MustFailCodes[vtrpcpb.Code_RESOURCE_EXHAUSTED] = 1000 - results, err := executorStream(executor, "select id from `user`") + results, err := executorStream(ctx, executor, "select id from `user`") assert.EqualError(t, err, "target: TestExecutor.40-60.primary: RESOURCE_EXHAUSTED error") assert.Equal(t, vtrpcpb.Code_RESOURCE_EXHAUSTED, vterrors.Code(err)) assert.Nil(t, results) - testQueryLog(t, logChan, "TestExecuteStream", "SELECT", "select id from `user`", 8) + testQueryLog(t, executor, logChan, "TestExecuteStream", "SELECT", "select id from `user`", 8) // Fail 1 of N with the directive succeeds with 7 rows - results, err = executorStream(executor, "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user") + results, err = executorStream(ctx, executor, "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user") require.NoError(t, err) assert.EqualValues(t, 7, len(results.Rows)) - testQueryLog(t, logChan, "TestExecuteStream", "SELECT", "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from `user`", 8) + testQueryLog(t, executor, logChan, "TestExecuteStream", "SELECT", "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from `user`", 8) // If all shards fail, the operation should also fail conns[0].MustFailCodes[vtrpcpb.Code_RESOURCE_EXHAUSTED] = 1000 @@ -1643,23 +1722,26 @@ func TestSelectScatterPartialOLAP(t *testing.T) { conns[6].MustFailCodes[vtrpcpb.Code_RESOURCE_EXHAUSTED] = 1000 conns[7].MustFailCodes[vtrpcpb.Code_RESOURCE_EXHAUSTED] = 1000 - _, err = executorStream(executor, "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user") + _, err = executorStream(ctx, executor, "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user") require.Error(t, err) - testQueryLog(t, logChan, "TestExecuteStream", "SELECT", "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from `user`", 8) + testQueryLog(t, executor, logChan, "TestExecuteStream", "SELECT", "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from `user`", 8) - _, err = executorStream(executor, "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user order by id") + _, err = executorStream(ctx, executor, "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user order by id") require.Error(t, err) } func TestSelectScatterPartialOLAP2(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createExecutorEnv. cell := "aa" hc := discovery.NewFakeHealthCheck(nil) + u := createSandbox(KsTestUnsharded) s := createSandbox(KsTestSharded) s.VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} var conns []*sandboxconn.SandboxConn for _, shard := range shards { @@ -1667,9 +1749,10 @@ func TestSelectScatterPartialOLAP2(t *testing.T) { conns = append(conns, sbc) } - executor := createExecutor(serv, cell, resolver) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) // Fail 1 of N without the directive fails the whole operation tablet0 := conns[2].Tablet() @@ -1681,49 +1764,53 @@ func TestSelectScatterPartialOLAP2(t *testing.T) { sbc0Th := ths[0] sbc0Th.Serving = false - results, err := executorStream(executor, "select id from `user`") + results, err := executorStream(ctx, executor, "select id from `user`") require.Error(t, err) assert.Contains(t, err.Error(), `no healthy tablet available for 'keyspace:"TestExecutor" shard:"40-60"`) assert.Equal(t, vtrpcpb.Code_UNAVAILABLE, vterrors.Code(err)) assert.Nil(t, results) - testQueryLog(t, logChan, "TestExecuteStream", "SELECT", "select id from `user`", 8) + testQueryLog(t, executor, logChan, "TestExecuteStream", "SELECT", "select id from `user`", 8) // Fail 1 of N with the directive succeeds with 7 rows - results, err = executorStream(executor, "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user") + results, err = executorStream(ctx, executor, "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user") require.NoError(t, err) assert.EqualValues(t, 7, len(results.Rows)) - testQueryLog(t, logChan, "TestExecuteStream", "SELECT", "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from `user`", 8) + testQueryLog(t, executor, logChan, "TestExecuteStream", "SELECT", "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from `user`", 8) // order by - results, err = executorStream(executor, "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user order by id") + results, err = executorStream(ctx, executor, "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user order by id") require.NoError(t, err) assert.EqualValues(t, 7, len(results.Rows)) - testQueryLog(t, logChan, "TestExecuteStream", "SELECT", "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from `user` order by id asc", 8) + testQueryLog(t, executor, logChan, "TestExecuteStream", "SELECT", "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from `user` order by id asc", 8) // order by and limit - results, err = executorStream(executor, "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user order by id limit 5") + results, err = executorStream(ctx, executor, "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user order by id limit 5") require.NoError(t, err) assert.EqualValues(t, 5, len(results.Rows)) - testQueryLog(t, logChan, "TestExecuteStream", "SELECT", "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from `user` order by id asc limit 5", 8) + testQueryLog(t, executor, logChan, "TestExecuteStream", "SELECT", "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from `user` order by id asc limit 5", 8) } func TestStreamSelectScatter(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createExecutorEnv. cell := "aa" hc := discovery.NewFakeHealthCheck(nil) + u := createSandbox(KsTestUnsharded) s := createSandbox(KsTestSharded) s.VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} for _, shard := range shards { _ = hc.AddTestTablet(cell, shard, 1, "TestExecutor", shard, topodatapb.TabletType_PRIMARY, true, 1, nil) } - executor := createExecutor(serv, cell, resolver) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() sql := "select id from `user`" - result, err := executorStream(executor, sql) + result, err := executorStream(ctx, executor, sql) require.NoError(t, err) wantResult := &sqltypes.Result{ Fields: sandboxconn.SingleRowResult.Fields, @@ -1743,14 +1830,17 @@ func TestStreamSelectScatter(t *testing.T) { // TestSelectScatterOrderBy will run an ORDER BY query that will scatter out to 8 shards and return the 8 rows (one per shard) sorted. func TestSelectScatterOrderBy(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createExecutorEnv. cell := "aa" hc := discovery.NewFakeHealthCheck(nil) + u := createSandbox(KsTestUnsharded) s := createSandbox(KsTestSharded) s.VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} var conns []*sandboxconn.SandboxConn for i, shard := range shards { @@ -1773,10 +1863,14 @@ func TestSelectScatterOrderBy(t *testing.T) { }}) conns = append(conns, sbc) } - executor := createExecutor(serv, cell, resolver) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() query := "select col1, col2 from user order by col2 desc" - gotResult, err := executorExec(executor, query, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + gotResult, err := executorExec(ctx, executor, session, query, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ @@ -1809,14 +1903,17 @@ func TestSelectScatterOrderBy(t *testing.T) { // TestSelectScatterOrderByVarChar will run an ORDER BY query that will scatter out to 8 shards and return the 8 rows (one per shard) sorted. func TestSelectScatterOrderByVarChar(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createExecutorEnv. cell := "aa" hc := discovery.NewFakeHealthCheck(nil) + u := createSandbox(KsTestUnsharded) s := createSandbox(KsTestSharded) s.VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} var conns []*sandboxconn.SandboxConn for i, shard := range shards { @@ -1833,19 +1930,22 @@ func TestSelectScatterOrderByVarChar(t *testing.T) { // This will allow us to test that cross-shard ordering // still works correctly. sqltypes.NewVarChar(fmt.Sprintf("%d", i%4)), - sqltypes.NewVarBinary(fmt.Sprintf("%d", i%4)), }}, }}) conns = append(conns, sbc) } - executor := createExecutor(serv, cell, resolver) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() query := "select col1, textcol from user order by textcol desc" - gotResult, err := executorExec(executor, query, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + gotResult, err := executorExec(ctx, executor, session, query, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select col1, textcol, weight_string(textcol) from `user` order by textcol desc", + Sql: "select col1, textcol from `user` order by textcol desc", BindVariables: map[string]*querypb.BindVariable{}, }} for _, conn := range conns { @@ -1873,14 +1973,17 @@ func TestSelectScatterOrderByVarChar(t *testing.T) { } func TestStreamSelectScatterOrderBy(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createExecutorEnv. cell := "aa" hc := discovery.NewFakeHealthCheck(nil) + u := createSandbox(KsTestUnsharded) s := createSandbox(KsTestSharded) s.VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} var conns []*sandboxconn.SandboxConn for i, shard := range shards { @@ -1900,10 +2003,11 @@ func TestStreamSelectScatterOrderBy(t *testing.T) { }}) conns = append(conns, sbc) } - executor := createExecutor(serv, cell, resolver) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() query := "select id, col from user order by col desc" - gotResult, err := executorStream(executor, query) + gotResult, err := executorStream(ctx, executor, query) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ @@ -1931,14 +2035,17 @@ func TestStreamSelectScatterOrderBy(t *testing.T) { } func TestStreamSelectScatterOrderByVarChar(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createExecutorEnv. cell := "aa" hc := discovery.NewFakeHealthCheck(nil) + u := createSandbox(KsTestUnsharded) s := createSandbox(KsTestSharded) s.VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} var conns []*sandboxconn.SandboxConn for i, shard := range shards { @@ -1952,19 +2059,19 @@ func TestStreamSelectScatterOrderByVarChar(t *testing.T) { Rows: [][]sqltypes.Value{{ sqltypes.NewInt32(1), sqltypes.NewVarChar(fmt.Sprintf("%d", i%4)), - sqltypes.NewVarBinary(fmt.Sprintf("%d", i%4)), }}, }}) conns = append(conns, sbc) } - executor := createExecutor(serv, cell, resolver) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() query := "select id, textcol from user order by textcol desc" - gotResult, err := executorStream(executor, query) + gotResult, err := executorStream(ctx, executor, query) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select id, textcol, weight_string(textcol) from `user` order by textcol desc", + Sql: "select id, textcol from `user` order by textcol desc", BindVariables: map[string]*querypb.BindVariable{}, }} for _, conn := range conns { @@ -1989,14 +2096,17 @@ func TestStreamSelectScatterOrderByVarChar(t *testing.T) { // TestSelectScatterAggregate will run an aggregate query that will scatter out to 8 shards and return 4 aggregated rows. func TestSelectScatterAggregate(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createExecutorEnv. cell := "aa" hc := discovery.NewFakeHealthCheck(nil) + u := createSandbox(KsTestUnsharded) s := createSandbox(KsTestSharded) s.VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} var conns []*sandboxconn.SandboxConn for i, shard := range shards { @@ -2004,7 +2114,7 @@ func TestSelectScatterAggregate(t *testing.T) { sbc.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ {Name: "col", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, - {Name: "sum(foo)", Type: sqltypes.Decimal, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "sum(foo)", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, {Name: "weight_string(col)", Type: sqltypes.VarBinary, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_BINARY_FLAG)}, }, InsertID: 0, @@ -2016,10 +2126,14 @@ func TestSelectScatterAggregate(t *testing.T) { }}) conns = append(conns, sbc) } - executor := createExecutor(serv, cell, resolver) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() query := "select col, sum(foo) from user group by col" - gotResult, err := executorExec(executor, query, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + gotResult, err := executorExec(ctx, executor, session, query, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ @@ -2048,14 +2162,17 @@ func TestSelectScatterAggregate(t *testing.T) { } func TestStreamSelectScatterAggregate(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createExecutorEnv. cell := "aa" hc := discovery.NewFakeHealthCheck(nil) + u := createSandbox(KsTestUnsharded) s := createSandbox(KsTestSharded) s.VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} var conns []*sandboxconn.SandboxConn for i, shard := range shards { @@ -2063,7 +2180,7 @@ func TestStreamSelectScatterAggregate(t *testing.T) { sbc.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ {Name: "col", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, - {Name: "sum(foo)", Type: sqltypes.Decimal, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "sum(foo)", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, {Name: "weight_string(col)", Type: sqltypes.VarBinary, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_BINARY_FLAG)}, }, InsertID: 0, @@ -2075,10 +2192,11 @@ func TestStreamSelectScatterAggregate(t *testing.T) { }}) conns = append(conns, sbc) } - executor := createExecutor(serv, cell, resolver) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() query := "select col, sum(foo) from user group by col" - gotResult, err := executorStream(executor, query) + gotResult, err := executorStream(ctx, executor, query) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ @@ -2108,14 +2226,17 @@ func TestStreamSelectScatterAggregate(t *testing.T) { // TestSelectScatterLimit will run a limit query (ordered for consistency) against // a scatter route and verify that the limit primitive works as intended. func TestSelectScatterLimit(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createExecutorEnv. cell := "aa" hc := discovery.NewFakeHealthCheck(nil) + u := createSandbox(KsTestUnsharded) s := createSandbox(KsTestSharded) s.VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} var conns []*sandboxconn.SandboxConn for i, shard := range shards { @@ -2135,10 +2256,14 @@ func TestSelectScatterLimit(t *testing.T) { }}) conns = append(conns, sbc) } - executor := createExecutor(serv, cell, resolver) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() query := "select col1, col2 from user order by col2 desc limit 3" - gotResult, err := executorExec(executor, query, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + gotResult, err := executorExec(ctx, executor, session, query, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ @@ -2176,14 +2301,17 @@ func TestSelectScatterLimit(t *testing.T) { // TestStreamSelectScatterLimit will run a streaming limit query (ordered for consistency) against // a scatter route and verify that the limit primitive works as intended. func TestStreamSelectScatterLimit(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createExecutorEnv. cell := "aa" hc := discovery.NewFakeHealthCheck(nil) + u := createSandbox(KsTestUnsharded) s := createSandbox(KsTestSharded) s.VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} var conns []*sandboxconn.SandboxConn for i, shard := range shards { @@ -2203,10 +2331,11 @@ func TestStreamSelectScatterLimit(t *testing.T) { }}) conns = append(conns, sbc) } - executor := createExecutor(serv, cell, resolver) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() query := "select col1, col2 from user order by col2 desc limit 3" - gotResult, err := executorStream(executor, query) + gotResult, err := executorStream(ctx, executor, query) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ @@ -2243,12 +2372,15 @@ func TestStreamSelectScatterLimit(t *testing.T) { // TODO(sougou): stream and non-stream testing are very similar. // Could reuse code, func TestSimpleJoin(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) sql := "select u1.id, u2.id from user u1 join user u2 where u1.id = 1 and u2.id = 3" - result, err := executorExec(executor, sql, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + result, err := executorExec(ctx, executor, session, sql, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select u1.id from `user` as u1 where u1.id = 1", @@ -2276,16 +2408,19 @@ func TestSimpleJoin(t *testing.T) { t.Errorf("result: %+v, want %+v", result, wantResult) } - testQueryLog(t, logChan, "TestExecute", "SELECT", "select u1.id, u2.id from `user` as u1 join `user` as u2 where u1.id = 1 and u2.id = 3", 2) + testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select u1.id, u2.id from `user` as u1 join `user` as u2 where u1.id = 1 and u2.id = 3", 2) } func TestJoinComments(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) sql := "select u1.id, u2.id from user u1 join user u2 where u1.id = 1 and u2.id = 3 /* trailing */" - _, err := executorExec(executor, sql, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, sql, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select u1.id from `user` as u1 where u1.id = 1 /* trailing */", @@ -2298,16 +2433,16 @@ func TestJoinComments(t *testing.T) { }} utils.MustMatch(t, wantQueries, sbc2.Queries) - testQueryLog(t, logChan, "TestExecute", "SELECT", "select u1.id, u2.id from `user` as u1 join `user` as u2 where u1.id = 1 and u2.id = 3 /* trailing */", 2) + testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select u1.id, u2.id from `user` as u1 join `user` as u2 where u1.id = 1 and u2.id = 3 /* trailing */", 2) } func TestSimpleJoinStream(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) sql := "select u1.id, u2.id from user u1 join user u2 where u1.id = 1 and u2.id = 3" - result, err := executorStream(executor, sql) + result, err := executorStream(ctx, executor, sql) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select u1.id from `user` as u1 where u1.id = 1", @@ -2336,13 +2471,13 @@ func TestSimpleJoinStream(t *testing.T) { t.Errorf("result: %+v, want %+v", result, wantResult) } - testQueryLog(t, logChan, "TestExecuteStream", "SELECT", "select u1.id, u2.id from `user` as u1 join `user` as u2 where u1.id = 1 and u2.id = 3", 2) + testQueryLog(t, executor, logChan, "TestExecuteStream", "SELECT", "select u1.id, u2.id from `user` as u1 join `user` as u2 where u1.id = 1 and u2.id = 3", 2) } func TestVarJoin(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) result1 := []*sqltypes.Result{{ Fields: []*querypb.Field{ @@ -2357,7 +2492,10 @@ func TestVarJoin(t *testing.T) { }} sbc1.SetResults(result1) sql := "select u1.id, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 1" - _, err := executorExec(executor, sql, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, sql, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select u1.id, u1.col from `user` as u1 where u1.id = 1", @@ -2371,13 +2509,13 @@ func TestVarJoin(t *testing.T) { t.Errorf("sbc2.Queries: %s, want %s\n", got, want) } - testQueryLog(t, logChan, "TestExecute", "SELECT", "select u1.id, u2.id from `user` as u1 join `user` as u2 on u2.id = u1.col where u1.id = 1", 2) + testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select u1.id, u2.id from `user` as u1 join `user` as u2 on u2.id = u1.col where u1.id = 1", 2) } func TestVarJoinStream(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) result1 := []*sqltypes.Result{{ Fields: []*querypb.Field{ @@ -2392,7 +2530,7 @@ func TestVarJoinStream(t *testing.T) { }} sbc1.SetResults(result1) sql := "select u1.id, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 1" - _, err := executorStream(executor, sql) + _, err := executorStream(ctx, executor, sql) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select u1.id, u1.col from `user` as u1 where u1.id = 1", @@ -2406,13 +2544,13 @@ func TestVarJoinStream(t *testing.T) { t.Errorf("sbc2.Queries: %s, want %s\n", got, want) } - testQueryLog(t, logChan, "TestExecuteStream", "SELECT", "select u1.id, u2.id from `user` as u1 join `user` as u2 on u2.id = u1.col where u1.id = 1", 2) + testQueryLog(t, executor, logChan, "TestExecuteStream", "SELECT", "select u1.id, u2.id from `user` as u1 join `user` as u2 on u2.id = u1.col where u1.id = 1", 2) } func TestLeftJoin(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) result1 := []*sqltypes.Result{{ Fields: []*querypb.Field{ {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, @@ -2432,7 +2570,10 @@ func TestLeftJoin(t *testing.T) { sbc1.SetResults(result1) sbc2.SetResults(emptyResult) sql := "select u1.id, u2.id from user u1 left join user u2 on u2.id = u1.col where u1.id = 1" - result, err := executorExec(executor, sql, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + result, err := executorExec(ctx, executor, session, sql, nil) require.NoError(t, err) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ @@ -2449,11 +2590,11 @@ func TestLeftJoin(t *testing.T) { if !result.Equal(wantResult) { t.Errorf("result: \n%+v, want \n%+v", result, wantResult) } - testQueryLog(t, logChan, "TestExecute", "SELECT", "select u1.id, u2.id from `user` as u1 left join `user` as u2 on u2.id = u1.col where u1.id = 1", 2) + testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select u1.id, u2.id from `user` as u1 left join `user` as u2 on u2.id = u1.col where u1.id = 1", 2) } func TestLeftJoinStream(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) result1 := []*sqltypes.Result{{ Fields: []*querypb.Field{ {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, @@ -2472,7 +2613,7 @@ func TestLeftJoinStream(t *testing.T) { }} sbc1.SetResults(result1) sbc2.SetResults(emptyResult) - result, err := executorStream(executor, "select u1.id, u2.id from user u1 left join user u2 on u2.id = u1.col where u1.id = 1") + result, err := executorStream(ctx, executor, "select u1.id, u2.id from user u1 left join user u2 on u2.id = u1.col where u1.id = 1") require.NoError(t, err) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ @@ -2493,7 +2634,7 @@ func TestLeftJoinStream(t *testing.T) { } func TestEmptyJoin(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, ctx := createExecutorEnv(t) // Empty result requires a field query for the second part of join, // which is sent to shard 0. sbc1.SetResults([]*sqltypes.Result{{ @@ -2507,7 +2648,10 @@ func TestEmptyJoin(t *testing.T) { {Name: "col", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, }}) - result, err := executorExec(executor, "select u1.id, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 1", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + result, err := executorExec(ctx, executor, session, "select u1.id, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 1", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select u1.id, u1.col from `user` as u1 where u1.id = 1", @@ -2531,7 +2675,7 @@ func TestEmptyJoin(t *testing.T) { } func TestEmptyJoinStream(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, ctx := createExecutorEnv(t) // Empty result requires a field query for the second part of join, // which is sent to shard 0. sbc1.SetResults([]*sqltypes.Result{{ @@ -2543,7 +2687,7 @@ func TestEmptyJoinStream(t *testing.T) { {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, }}) - result, err := executorStream(executor, "select u1.id, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 1") + result, err := executorStream(ctx, executor, "select u1.id, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 1") require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select u1.id, u1.col from `user` as u1 where u1.id = 1", @@ -2567,7 +2711,7 @@ func TestEmptyJoinStream(t *testing.T) { } func TestEmptyJoinRecursive(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, ctx := createExecutorEnv(t) // Make sure it also works recursively. sbc1.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ @@ -2583,7 +2727,10 @@ func TestEmptyJoinRecursive(t *testing.T) { {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, }}) - result, err := executorExec(executor, "select u1.id, u2.id, u3.id from user u1 join (user u2 join user u3 on u3.id = u2.col) where u1.id = 1", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + result, err := executorExec(ctx, executor, session, "select u1.id, u2.id, u3.id from user u1 join (user u2 join user u3 on u3.id = u2.col) where u1.id = 1", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select u1.id from `user` as u1 where u1.id = 1", @@ -2611,7 +2758,7 @@ func TestEmptyJoinRecursive(t *testing.T) { } func TestEmptyJoinRecursiveStream(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, ctx := createExecutorEnv(t) // Make sure it also works recursively. sbc1.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ @@ -2627,7 +2774,7 @@ func TestEmptyJoinRecursiveStream(t *testing.T) { {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, }}) - result, err := executorStream(executor, "select u1.id, u2.id, u3.id from user u1 join (user u2 join user u3 on u3.id = u2.col) where u1.id = 1") + result, err := executorStream(ctx, executor, "select u1.id, u2.id, u3.id from user u1 join (user u2 join user u3 on u3.id = u2.col) where u1.id = 1") require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select u1.id from `user` as u1 where u1.id = 1", @@ -2655,7 +2802,7 @@ func TestEmptyJoinRecursiveStream(t *testing.T) { } func TestCrossShardSubquery(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) result1 := []*sqltypes.Result{{ Fields: []*querypb.Field{ {Name: "id", Type: sqltypes.Int32}, @@ -2668,35 +2815,31 @@ func TestCrossShardSubquery(t *testing.T) { }}, }} sbc1.SetResults(result1) - result, err := executorExec(executor, "select id1 from (select u1.id id1, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 1) as t", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + result, err := executorExec(ctx, executor, session, "select id1 from (select u1.id id1, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 1) as t", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select u1.id as id1, u1.col from `user` as u1 where u1.id = 1", + Sql: "select id1, t.`u1.col` from (select u1.id as id1, u1.col as `u1.col` from `user` as u1 where u1.id = 1) as t", BindVariables: map[string]*querypb.BindVariable{}, }} utils.MustMatch(t, wantQueries, sbc1.Queries) - // We have to use string representation because bindvars type is too complex. - got := fmt.Sprintf("%+v", sbc2.Queries) - want := `[sql:"select u2.id from ` + "`user`" + ` as u2 where u2.id = :u1_col" bind_variables:{key:"u1_col" value:{type:INT32 value:"3"}}]` - if got != want { - t.Errorf("sbc2.Queries: %s, want %s\n", got, want) - } - wantResult := &sqltypes.Result{ - Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, - }, - Rows: [][]sqltypes.Value{{ - sqltypes.NewInt32(1), - }}, - } + wantQueries = []*querypb.BoundQuery{{ + Sql: "select 1 from (select u2.id from `user` as u2 where u2.id = :u1_col) as t", + BindVariables: map[string]*querypb.BindVariable{"u1_col": sqltypes.Int32BindVariable(3)}, + }} + utils.MustMatch(t, wantQueries, sbc2.Queries) + + wantResult := sqltypes.MakeTestResult(sqltypes.MakeTestFields("id", "int32"), "1") if !result.Equal(wantResult) { t.Errorf("result: %+v, want %+v", result, wantResult) } } func TestSubQueryAndQueryWithLimit(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) result1 := []*sqltypes.Result{{ Fields: []*querypb.Field{ {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, @@ -2738,7 +2881,7 @@ func TestSubQueryAndQueryWithLimit(t *testing.T) { } func TestCrossShardSubqueryStream(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) result1 := []*sqltypes.Result{{ Fields: []*querypb.Field{ {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, @@ -2751,19 +2894,18 @@ func TestCrossShardSubqueryStream(t *testing.T) { }}, }} sbc1.SetResults(result1) - result, err := executorStream(executor, "select id1 from (select u1.id id1, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 1) as t") + result, err := executorStream(ctx, executor, "select id1 from (select u1.id id1, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 1) as t") require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select u1.id as id1, u1.col from `user` as u1 where u1.id = 1", + Sql: "select id1, t.`u1.col` from (select u1.id as id1, u1.col as `u1.col` from `user` as u1 where u1.id = 1) as t", BindVariables: map[string]*querypb.BindVariable{}, }} utils.MustMatch(t, wantQueries, sbc1.Queries) - // We have to use string representation because bindvars type is too complex. - got := fmt.Sprintf("%+v", sbc2.Queries) - want := `[sql:"select u2.id from ` + "`user`" + ` as u2 where u2.id = :u1_col" bind_variables:{key:"u1_col" value:{type:INT32 value:"3"}}]` - if got != want { - t.Errorf("sbc2.Queries:\n%s, want\n%s\n", got, want) - } + wantQueries = []*querypb.BoundQuery{{ + Sql: "select 1 from (select u2.id from `user` as u2 where u2.id = :u1_col) as t", + BindVariables: map[string]*querypb.BindVariable{"u1_col": sqltypes.Int32BindVariable(3)}, + }} + utils.MustMatch(t, wantQueries, sbc2.Queries) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ @@ -2779,7 +2921,7 @@ func TestCrossShardSubqueryStream(t *testing.T) { } func TestCrossShardSubqueryGetFields(t *testing.T) { - executor, sbc1, _, sbclookup := createExecutorEnv() + executor, sbc1, _, sbclookup, ctx := createExecutorEnv(t) sbclookup.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ {Name: "col", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, @@ -2792,13 +2934,16 @@ func TestCrossShardSubqueryGetFields(t *testing.T) { }, }} sbc1.SetResults(result1) - result, err := executorExec(executor, "select main1.col, t.id1 from main1 join (select u1.id id1, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 1) as t", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + result, err := executorExec(ctx, executor, session, "select main1.col, t.id1 from main1 join (select u1.id id1, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 1) as t", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select u1.id as id1, u1.col from `user` as u1 where 1 != 1", + Sql: "select t.id1, t.`u1.col` from (select u1.id as id1, u1.col as `u1.col` from `user` as u1 where 1 != 1) as t where 1 != 1", BindVariables: map[string]*querypb.BindVariable{}, }, { - Sql: "select u2.id from `user` as u2 where 1 != 1", + Sql: "select 1 from (select u2.id from `user` as u2 where 1 != 1) as t where 1 != 1", BindVariables: map[string]*querypb.BindVariable{ "u1_col": sqltypes.NullBindVariable, }, @@ -2817,12 +2962,15 @@ func TestCrossShardSubqueryGetFields(t *testing.T) { } func TestSelectBindvarswithPrepare(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) sql := "select id from `user` where id = :id" - _, err := executorPrepare(executor, sql, map[string]*querypb.BindVariable{ + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorPrepare(ctx, executor, session, sql, map[string]*querypb.BindVariable{ "id": sqltypes.Int64BindVariable(1), }) require.NoError(t, err) @@ -2838,18 +2986,21 @@ func TestSelectBindvarswithPrepare(t *testing.T) { } func TestSelectDatabasePrepare(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) executor.normalize = true - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) sql := "select database()" - _, err := executorPrepare(executor, sql, map[string]*querypb.BindVariable{}) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorPrepare(ctx, executor, session, sql, map[string]*querypb.BindVariable{}) require.NoError(t, err) } func TestSelectWithUnionAll(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) executor.normalize = true sql := "select id from user where id in (1, 2, 3) union all select id from user where id in (1, 2, 3)" bv, _ := sqltypes.BuildBindVariable([]int64{1, 2, 3}) @@ -2885,7 +3036,10 @@ func TestSelectWithUnionAll(t *testing.T) { "vtg2": bv, }, }} - _, err := executorExec(executor, sql, map[string]*querypb.BindVariable{}) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, sql, map[string]*querypb.BindVariable{}) require.NoError(t, err) utils.MustMatch(t, sbc1WantQueries, sbc1.Queries, "sbc1") utils.MustMatch(t, sbc2WantQueries, sbc2.Queries, "sbc2") @@ -2894,14 +3048,14 @@ func TestSelectWithUnionAll(t *testing.T) { sbc1.Queries = nil sbc2.Queries = nil - _, err = executorStream(executor, sql) + _, err = executorStream(ctx, executor, sql) require.NoError(t, err) utils.MustMatch(t, sbc1WantQueries, sbc1.Queries, "sbc1") utils.MustMatch(t, sbc2WantQueries, sbc2.Queries, "sbc2") } func TestSelectLock(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, _ := createExecutorEnv(t) session := NewSafeSession(nil) session.Session.InTransaction = true session.ShardSessions = []*vtgatepb.Session_ShardSession{{ @@ -2960,6 +3114,8 @@ func TestSelectLock(t *testing.T) { } func TestLockReserve(t *testing.T) { + executor, _, _, _, _ := createExecutorEnv(t) + // no connection should be reserved for these queries. tcases := []string{ "select is_free_lock('lock name') from dual", @@ -2968,7 +3124,6 @@ func TestLockReserve(t *testing.T) { "select release_lock('lock name') from dual", } - executor, _, _, _ := createExecutorEnv() session := NewAutocommitSession(&vtgatepb.Session{}) for _, sql := range tcases { @@ -2987,7 +3142,7 @@ func TestLockReserve(t *testing.T) { } func TestSelectFromInformationSchema(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, _ := createExecutorEnv(t) session := NewSafeSession(nil) // check failure when trying to query two keyspaces @@ -2999,25 +3154,30 @@ func TestSelectFromInformationSchema(t *testing.T) { session.TargetString = "TestExecutor" _, err = exec(executor, session, "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = database()") require.NoError(t, err) - assert.Equal(t, sbc1.StringQueries(), []string{"select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = database()"}) + assert.Equal(t, []string{"select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = database()"}, + sbc1.StringQueries()) // `USE TestXBadSharding` and then query info_schema about TestExecutor - should target TestExecutor and not use the default keyspace sbc1.Queries = nil session.TargetString = "TestXBadSharding" _, err = exec(executor, session, "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'TestExecutor'") require.NoError(t, err) - assert.Equal(t, sbc1.StringQueries(), []string{"select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname /* VARCHAR */"}) + assert.Equal(t, []string{"select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname /* VARCHAR */"}, + sbc1.StringQueries()) } func TestStreamOrderByLimitWithMultipleResults(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createExecutorEnv. cell := "aa" hc := discovery.NewFakeHealthCheck(nil) + u := createSandbox(KsTestUnsharded) s := createSandbox(KsTestSharded) s.VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} count := 1 for _, shard := range shards { @@ -3029,11 +3189,17 @@ func TestStreamOrderByLimitWithMultipleResults(t *testing.T) { count++ } - executor := NewExecutor(context.Background(), serv, cell, resolver, true, false, testBufferSize, cache.DefaultConfig, nil, false, querypb.ExecuteOptions_V3) + queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) + plans := DefaultPlanCache() + executor := NewExecutor(ctx, serv, cell, resolver, true, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4) + executor.SetQueryLogger(queryLogger) + defer executor.Close() + // some sleep for all goroutines to start + time.Sleep(100 * time.Millisecond) before := runtime.NumGoroutine() query := "select id, col from user order by id limit 2" - gotResult, err := executorStream(executor, query) + gotResult, err := executorStream(ctx, executor, query) require.NoError(t, err) wantResult := sqltypes.MakeTestResult(sqltypes.MakeTestFields("id|col", "int32|int32"), "1|1", "2|2") @@ -3044,14 +3210,17 @@ func TestStreamOrderByLimitWithMultipleResults(t *testing.T) { } func TestSelectScatterFails(t *testing.T) { + ctx := utils.LeakCheckContext(t) + sess := &vtgatepb.Session{} cell := "aa" hc := discovery.NewFakeHealthCheck(nil) + u := createSandbox(KsTestUnsharded) s := createSandbox(KsTestSharded) s.VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} for i, shard := range shards { @@ -3071,40 +3240,41 @@ func TestSelectScatterFails(t *testing.T) { }}) } - executor := createExecutor(serv, cell, resolver) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() executor.allowScatter = false - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) - _, err := executorExecSession(executor, "select id from `user`", nil, sess) + _, err := executorExecSession(ctx, executor, "select id from `user`", nil, sess) require.Error(t, err) assert.Contains(t, err.Error(), "scatter") // Run the test again, to ensure it behaves the same for a cached query - _, err = executorExecSession(executor, "select id from `user`", nil, sess) + _, err = executorExecSession(ctx, executor, "select id from `user`", nil, sess) require.Error(t, err) assert.Contains(t, err.Error(), "scatter") - _, err = executorExecSession(executor, "select /*vt+ ALLOW_SCATTER */ id from user", nil, sess) + _, err = executorExecSession(ctx, executor, "select /*vt+ ALLOW_SCATTER */ id from user", nil, sess) require.NoError(t, err) - _, err = executorExecSession(executor, "begin", nil, sess) + _, err = executorExecSession(ctx, executor, "begin", nil, sess) require.NoError(t, err) - _, err = executorExecSession(executor, "commit", nil, sess) + _, err = executorExecSession(ctx, executor, "commit", nil, sess) require.NoError(t, err) - _, err = executorExecSession(executor, "savepoint a", nil, sess) + _, err = executorExecSession(ctx, executor, "savepoint a", nil, sess) require.NoError(t, err) } func TestGen4SelectStraightJoin(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, _ := createExecutorEnv(t) executor.normalize = true executor.pv = querypb.ExecuteOptions_Gen4 session := NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}) query := "select u.id from user u straight_join user2 u2 on u.id = u2.id" - _, err := executor.Execute(context.Background(), + _, err := executor.Execute(context.Background(), nil, nil, "TestGen4SelectStraightJoin", session, @@ -3128,18 +3298,13 @@ func TestGen4SelectStraightJoin(t *testing.T) { } func TestGen4MultiColumnVindexEqual(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) executor.normalize = true executor.pv = querypb.ExecuteOptions_Gen4 session := NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}) query := "select * from user_region where cola = 1 and colb = 2" - _, err := executor.Execute(context.Background(), - nil, - "TestGen4MultiColumnVindex", - session, - query, map[string]*querypb.BindVariable{}, - ) + _, err := executor.Execute(context.Background(), nil, nil, "TestGen4MultiColumnVindex", session, query, map[string]*querypb.BindVariable{}) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{ { @@ -3156,12 +3321,7 @@ func TestGen4MultiColumnVindexEqual(t *testing.T) { sbc1.Queries = nil query = "select * from user_region where cola = 17984 and colb = 1" - _, err = executor.Execute(context.Background(), - nil, - "TestGen4MultiColumnVindex", - session, - query, map[string]*querypb.BindVariable{}, - ) + _, err = executor.Execute(context.Background(), nil, nil, "TestGen4MultiColumnVindex", session, query, map[string]*querypb.BindVariable{}) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{ { @@ -3177,18 +3337,13 @@ func TestGen4MultiColumnVindexEqual(t *testing.T) { } func TestGen4MultiColumnVindexIn(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) executor.normalize = true executor.pv = querypb.ExecuteOptions_Gen4 session := NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}) query := "select * from user_region where cola IN (1,17984) and colb IN (2,3,4)" - _, err := executor.Execute(context.Background(), - nil, - "TestGen4MultiColumnVindex", - session, - query, map[string]*querypb.BindVariable{}, - ) + _, err := executor.Execute(context.Background(), nil, nil, "TestGen4MultiColumnVindex", session, query, map[string]*querypb.BindVariable{}) require.NoError(t, err) bv1, _ := sqltypes.BuildBindVariable([]int64{1}) bv2, _ := sqltypes.BuildBindVariable([]int64{17984}) @@ -3221,18 +3376,13 @@ func TestGen4MultiColumnVindexIn(t *testing.T) { } func TestGen4MultiColMixedColComparision(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) executor.normalize = true executor.pv = querypb.ExecuteOptions_Gen4 session := NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}) query := "select * from user_region where colb = 2 and cola IN (1,17984)" - _, err := executor.Execute(context.Background(), - nil, - "TestGen4MultiColMixedColComparision", - session, - query, map[string]*querypb.BindVariable{}, - ) + _, err := executor.Execute(context.Background(), nil, nil, "TestGen4MultiColMixedColComparision", session, query, map[string]*querypb.BindVariable{}) require.NoError(t, err) bvtg1 := sqltypes.Int64BindVariable(2) bvtg2, _ := sqltypes.BuildBindVariable([]int64{1, 17984}) @@ -3263,18 +3413,13 @@ func TestGen4MultiColMixedColComparision(t *testing.T) { } func TestGen4MultiColBestVindexSel(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) executor.normalize = true executor.pv = querypb.ExecuteOptions_Gen4 session := NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}) query := "select * from user_region where colb = 2 and cola IN (1,17984) and cola = 1" - _, err := executor.Execute(context.Background(), - nil, - "TestGen4MultiColBestVindexSel", - session, - query, map[string]*querypb.BindVariable{}, - ) + _, err := executor.Execute(context.Background(), nil, nil, "TestGen4MultiColBestVindexSel", session, query, map[string]*querypb.BindVariable{}) require.NoError(t, err) bvtg2, _ := sqltypes.BuildBindVariable([]int64{1, 17984}) wantQueries := []*querypb.BoundQuery{ @@ -3294,12 +3439,7 @@ func TestGen4MultiColBestVindexSel(t *testing.T) { sbc1.Queries = nil query = "select * from user_region where colb in (10,20) and cola IN (1,17984) and cola = 1 and colb = 2" - _, err = executor.Execute(context.Background(), - nil, - "TestGen4MultiColBestVindexSel", - session, - query, map[string]*querypb.BindVariable{}, - ) + _, err = executor.Execute(context.Background(), nil, nil, "TestGen4MultiColBestVindexSel", session, query, map[string]*querypb.BindVariable{}) require.NoError(t, err) bvtg1, _ := sqltypes.BuildBindVariable([]int64{10, 20}) @@ -3319,18 +3459,13 @@ func TestGen4MultiColBestVindexSel(t *testing.T) { } func TestGen4MultiColMultiEqual(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) executor.normalize = true executor.pv = querypb.ExecuteOptions_Gen4 session := NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}) query := "select * from user_region where (cola,colb) in ((17984,2),(17984,3))" - _, err := executor.Execute(context.Background(), - nil, - "TestGen4MultiColMultiEqual", - session, - query, map[string]*querypb.BindVariable{}, - ) + _, err := executor.Execute(context.Background(), nil, nil, "TestGen4MultiColMultiEqual", session, query, map[string]*querypb.BindVariable{}) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{ { @@ -3347,11 +3482,14 @@ func TestGen4MultiColMultiEqual(t *testing.T) { } func TestGen4SelectUnqualifiedReferenceTable(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) executor.pv = querypb.ExecuteOptions_Gen4 query := "select * from zip_detail" - _, err := executorExec(executor, query, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, query, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{ { @@ -3365,11 +3503,14 @@ func TestGen4SelectUnqualifiedReferenceTable(t *testing.T) { } func TestGen4SelectQualifiedReferenceTable(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) executor.pv = querypb.ExecuteOptions_Gen4 query := fmt.Sprintf("select * from %s.zip_detail", KsTestSharded) - _, err := executorExec(executor, query, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, query, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{ { @@ -3383,11 +3524,14 @@ func TestGen4SelectQualifiedReferenceTable(t *testing.T) { } func TestGen4JoinUnqualifiedReferenceTable(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) executor.pv = querypb.ExecuteOptions_Gen4 query := "select * from user join zip_detail on user.zip_detail_id = zip_detail.id" - _, err := executorExec(executor, query, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, query, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{ { @@ -3403,7 +3547,7 @@ func TestGen4JoinUnqualifiedReferenceTable(t *testing.T) { sbc2.Queries = nil query = "select * from simple join zip_detail on simple.zip_detail_id = zip_detail.id" - _, err = executorExec(executor, query, nil) + _, err = executorExec(ctx, executor, session, query, nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{ { @@ -3417,11 +3561,14 @@ func TestGen4JoinUnqualifiedReferenceTable(t *testing.T) { } func TestGen4CrossShardJoinQualifiedReferenceTable(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) executor.pv = querypb.ExecuteOptions_Gen4 query := "select user.id from user join TestUnsharded.zip_detail on user.zip_detail_id = TestUnsharded.zip_detail.id" - _, err := executorExec(executor, query, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, query, nil) require.NoError(t, err) shardedWantQueries := []*querypb.BoundQuery{ @@ -3439,7 +3586,7 @@ func TestGen4CrossShardJoinQualifiedReferenceTable(t *testing.T) { sbc2.Queries = nil query = "select simple.id from simple join TestExecutor.zip_detail on simple.zip_detail_id = TestExecutor.zip_detail.id" - _, err = executorExec(executor, query, nil) + _, err = executorExec(ctx, executor, session, query, nil) require.NoError(t, err) unshardedWantQueries := []*querypb.BoundQuery{ { @@ -3453,23 +3600,25 @@ func TestGen4CrossShardJoinQualifiedReferenceTable(t *testing.T) { } func TestRegionRange(t *testing.T) { - // Special setup: Don't use createExecutorEnv. + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createExecutorEnv. cell := "regioncell" ks := "TestExecutor" hc := discovery.NewFakeHealthCheck(nil) s := createSandbox(ks) s.ShardSpec = "-20-20a0-" s.VSchema = executorVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-20a0", "20a0-"} var conns []*sandboxconn.SandboxConn for _, shard := range shards { sbc := hc.AddTestTablet(cell, shard, 1, ks, shard, topodatapb.TabletType_PRIMARY, true, 1, nil) conns = append(conns, sbc) } - executor := createExecutor(serv, cell, resolver) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() executor.pv = querypb.ExecuteOptions_Gen4 tcases := []struct { @@ -3488,13 +3637,7 @@ func TestRegionRange(t *testing.T) { for _, tcase := range tcases { t.Run(strconv.Itoa(tcase.regionID), func(t *testing.T) { sql := fmt.Sprintf("select * from user_region where cola = %d", tcase.regionID) - _, err := executor.Execute( - context.Background(), - nil, - "TestRegionRange", - NewAutocommitSession(&vtgatepb.Session{}), - sql, - nil) + _, err := executor.Execute(context.Background(), nil, nil, "TestRegionRange", NewAutocommitSession(&vtgatepb.Session{}), sql, nil) require.NoError(t, err) count := 0 for _, sbc := range conns { @@ -3507,6 +3650,8 @@ func TestRegionRange(t *testing.T) { } func TestMultiCol(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createLegacyExecutorEnv. cell := "multicol" ks := "TestMultiCol" @@ -3514,15 +3659,16 @@ func TestMultiCol(t *testing.T) { s := createSandbox(ks) s.ShardSpec = "-20-20a0-" s.VSchema = multiColVschema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-20a0", "20a0-"} var conns []*sandboxconn.SandboxConn for _, shard := range shards { sbc := hc.AddTestTablet(cell, shard, 1, ks, shard, topodatapb.TabletType_PRIMARY, true, 1, nil) conns = append(conns, sbc) } - executor := createExecutor(serv, cell, resolver) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() executor.pv = querypb.ExecuteOptions_Gen4 tcases := []struct { @@ -3539,13 +3685,12 @@ func TestMultiCol(t *testing.T) { shards: []string{"20a0-"}, }} - ctx := context.Background() session := NewAutocommitSession(&vtgatepb.Session{}) for _, tcase := range tcases { t.Run(fmt.Sprintf("%d_%d_%d", tcase.cola, tcase.colb, tcase.colc), func(t *testing.T) { sql := fmt.Sprintf("select * from multicoltbl where cola = %d and colb = %d and colc = '%d'", tcase.cola, tcase.colb, tcase.colc) - _, err := executor.Execute(ctx, nil, "TestMultiCol", session, sql, nil) + _, err := executor.Execute(ctx, nil, nil, "TestMultiCol", session, sql, nil) require.NoError(t, err) var shards []string for _, sbc := range conns { @@ -3586,6 +3731,8 @@ var multiColVschema = ` ` func TestMultiColPartial(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createLegacyExecutorEnv. cell := "multicol" ks := "TestMultiCol" @@ -3593,15 +3740,16 @@ func TestMultiColPartial(t *testing.T) { s := createSandbox(ks) s.ShardSpec = "-20-20a0c0-" s.VSchema = multiColVschema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-20a0c0", "20a0c0-"} var conns []*sandboxconn.SandboxConn for _, shard := range shards { sbc := hc.AddTestTablet(cell, shard, 1, ks, shard, topodatapb.TabletType_PRIMARY, true, 1, nil) conns = append(conns, sbc) } - executor := createExecutor(serv, cell, resolver) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() executor.pv = querypb.ExecuteOptions_Gen4 tcases := []struct { @@ -3627,13 +3775,12 @@ func TestMultiColPartial(t *testing.T) { shards: []string{"20a0c0-"}, }} - ctx := context.Background() session := NewAutocommitSession(&vtgatepb.Session{}) for _, tcase := range tcases { t.Run(tcase.where, func(t *testing.T) { sql := fmt.Sprintf("select * from multicoltbl where %s", tcase.where) - _, err := executor.Execute(ctx, nil, "TestMultiCol", session, sql, nil) + _, err := executor.Execute(ctx, nil, nil, "TestMultiCol", session, sql, nil) require.NoError(t, err) var shards []string for _, sbc := range conns { @@ -3648,20 +3795,25 @@ func TestMultiColPartial(t *testing.T) { } func TestSelectAggregationNoData(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createExecutorEnv. cell := "aa" hc := discovery.NewFakeHealthCheck(nil) - createSandbox(KsTestSharded).VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u := createSandbox(KsTestUnsharded) + s := createSandbox(KsTestSharded) + s.VSchema = executorVSchema + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} var conns []*sandboxconn.SandboxConn for _, shard := range shards { sbc := hc.AddTestTablet(cell, shard, 1, KsTestSharded, shard, topodatapb.TabletType_PRIMARY, true, 1, nil) conns = append(conns, sbc) } - executor := createExecutor(serv, cell, resolver) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() executor.pv = querypb.ExecuteOptions_Gen4 tcases := []struct { @@ -3701,16 +3853,16 @@ func TestSelectAggregationNoData(t *testing.T) { }, { sql: `select count(*) from (select col1, col2 from user limit 2) x`, - sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2", "int64|int64")), - expSandboxQ: "select col1, col2 from `user` limit :__upper_limit", + sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2|1", "int64|int64|int64")), + expSandboxQ: "select col1, col2, 1 from (select col1, col2 from `user`) as x limit :__upper_limit", expField: `[name:"count(*)" type:INT64]`, expRow: `[[INT64(0)]]`, }, { sql: `select col2, count(*) from (select col1, col2 from user limit 2) x group by col2`, - sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2|weight_string(col2)", "int64|int64|varbinary")), - expSandboxQ: "select col1, col2, weight_string(col2) from `user` order by col2 asc limit :__upper_limit", - expField: `[name:"col2" type:INT64 charset:63 flags:32768 name:"count(*)" type:INT64]`, + sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2|1|weight_string(col2)", "int64|int64|int64|varbinary")), + expSandboxQ: "select col1, col2, 1, weight_string(col2) from (select col1, col2 from `user`) as x limit :__upper_limit", + expField: `[name:"col2" type:INT64 name:"count(*)" type:INT64]`, expRow: `[]`, }, } @@ -3721,7 +3873,10 @@ func TestSelectAggregationNoData(t *testing.T) { sbc.SetResults([]*sqltypes.Result{tc.sandboxRes}) sbc.Queries = nil } - qr, err := executorExec(executor, tc.sql, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + qr, err := executorExec(ctx, executor, session, tc.sql, nil) require.NoError(t, err) assert.Equal(t, tc.expField, fmt.Sprintf("%v", qr.Fields)) assert.Equal(t, tc.expRow, fmt.Sprintf("%v", qr.Rows)) @@ -3732,20 +3887,25 @@ func TestSelectAggregationNoData(t *testing.T) { } func TestSelectAggregationData(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createExecutorEnv. cell := "aa" hc := discovery.NewFakeHealthCheck(nil) - createSandbox(KsTestSharded).VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u := createSandbox(KsTestUnsharded) + s := createSandbox(KsTestSharded) + s.VSchema = executorVSchema + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} var conns []*sandboxconn.SandboxConn for _, shard := range shards { sbc := hc.AddTestTablet(cell, shard, 1, KsTestSharded, shard, topodatapb.TabletType_PRIMARY, true, 1, nil) conns = append(conns, sbc) } - executor := createExecutor(serv, cell, resolver) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() executor.pv = querypb.ExecuteOptions_Gen4 tcases := []struct { @@ -3785,72 +3945,72 @@ func TestSelectAggregationData(t *testing.T) { }, { sql: `select count(*) from (select col1, col2 from user limit 2) x`, - sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2", "int64|int64"), "1|2", "2|1"), - expSandboxQ: "select col1, col2 from `user` limit :__upper_limit", + sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2|1", "int64|int64|int64"), "100|200|1", "200|300|1"), + expSandboxQ: "select col1, col2, 1 from (select col1, col2 from `user`) as x limit :__upper_limit", expField: `[name:"count(*)" type:INT64]`, expRow: `[[INT64(2)]]`, }, { sql: `select col2, count(*) from (select col1, col2 from user limit 9) x group by col2`, - sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2|weight_string(col2)", "int64|int64|varbinary"), "3|1|NULL", "2|2|NULL"), - expSandboxQ: "select col1, col2, weight_string(col2) from `user` order by col2 asc limit :__upper_limit", - expField: `[name:"col2" type:INT64 charset:63 flags:32769 name:"count(*)" type:INT64]`, - expRow: `[[INT64(1) INT64(8)] [INT64(2) INT64(1)]]`, + sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2|1|weight_string(col2)", "int64|int64|int64|varbinary"), "100|3|1|NULL", "200|2|1|NULL"), + expSandboxQ: "select col1, col2, 1, weight_string(col2) from (select col1, col2 from `user`) as x limit :__upper_limit", + expField: `[name:"col2" type:INT64 name:"count(*)" type:INT64]`, + expRow: `[[INT64(2) INT64(4)] [INT64(3) INT64(5)]]`, }, { sql: `select count(col1) from (select id, col1 from user limit 2) x`, - sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("id|col1", "int64|varchar"), "3|a", "2|b"), - expSandboxQ: "select id, col1 from `user` limit :__upper_limit", + sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("id|col1", "int64|varchar"), "1|a", "2|b"), + expSandboxQ: "select id, col1 from (select id, col1 from `user`) as x limit :__upper_limit", expField: `[name:"count(col1)" type:INT64]`, expRow: `[[INT64(2)]]`, }, { sql: `select count(col1), col2 from (select col2, col1 from user limit 9) x group by col2`, sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col2|col1|weight_string(col2)", "int64|varchar|varbinary"), "3|a|NULL", "2|b|NULL"), - expSandboxQ: "select col2, col1, weight_string(col2) from `user` order by col2 asc limit :__upper_limit", - expField: `[name:"count(col1)" type:INT64 name:"col2" type:INT64 charset:63 flags:32769]`, - expRow: `[[INT64(8) INT64(2)] [INT64(1) INT64(3)]]`, + expSandboxQ: "select col2, col1, weight_string(col2) from (select col2, col1 from `user`) as x limit :__upper_limit", + expField: `[name:"count(col1)" type:INT64 name:"col2" type:INT64]`, + expRow: `[[INT64(4) INT64(2)] [INT64(5) INT64(3)]]`, }, { sql: `select col1, count(col2) from (select col1, col2 from user limit 9) x group by col1`, sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2|weight_string(col1)", "varchar|int64|varbinary"), "a|1|a", "b|null|b"), - expSandboxQ: "select col1, col2, weight_string(col1) from `user` order by col1 asc limit :__upper_limit", - expField: `[name:"col1" type:VARCHAR charset:255 flags:1 name:"count(col2)" type:INT64]`, - expRow: `[[VARCHAR("a") INT64(8)] [VARCHAR("b") INT64(0)]]`, + expSandboxQ: "select col1, col2, weight_string(col1) from (select col1, col2 from `user`) as x limit :__upper_limit", + expField: `[name:"col1" type:VARCHAR name:"count(col2)" type:INT64]`, + expRow: `[[VARCHAR("a") INT64(5)] [VARCHAR("b") INT64(0)]]`, }, { sql: `select col1, count(col2) from (select col1, col2 from user limit 32) x group by col1`, sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2|weight_string(col1)", "varchar|int64|varbinary"), "null|1|null", "null|null|null", "a|1|a", "b|null|b"), - expSandboxQ: "select col1, col2, weight_string(col1) from `user` order by col1 asc limit :__upper_limit", - expField: `[name:"col1" type:VARCHAR charset:255 flags:1 name:"count(col2)" type:INT64]`, + expSandboxQ: "select col1, col2, weight_string(col1) from (select col1, col2 from `user`) as x limit :__upper_limit", + expField: `[name:"col1" type:VARCHAR name:"count(col2)" type:INT64]`, expRow: `[[NULL INT64(8)] [VARCHAR("a") INT64(8)] [VARCHAR("b") INT64(0)]]`, }, { sql: `select col1, sum(col2) from (select col1, col2 from user limit 4) x group by col1`, sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2|weight_string(col1)", "varchar|int64|varbinary"), "a|3|a"), - expSandboxQ: "select col1, col2, weight_string(col1) from `user` order by col1 asc limit :__upper_limit", - expField: `[name:"col1" type:VARCHAR charset:255 flags:1 name:"sum(col2)" type:DECIMAL]`, + expSandboxQ: "select col1, col2, weight_string(col1) from (select col1, col2 from `user`) as x limit :__upper_limit", + expField: `[name:"col1" type:VARCHAR name:"sum(col2)" type:DECIMAL]`, expRow: `[[VARCHAR("a") DECIMAL(12)]]`, }, { sql: `select col1, sum(col2) from (select col1, col2 from user limit 4) x group by col1`, sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2|weight_string(col1)", "varchar|varchar|varbinary"), "a|2|a"), - expSandboxQ: "select col1, col2, weight_string(col1) from `user` order by col1 asc limit :__upper_limit", - expField: `[name:"col1" type:VARCHAR charset:255 flags:1 name:"sum(col2)" type:DECIMAL]`, - expRow: `[[VARCHAR("a") DECIMAL(8)]]`, + expSandboxQ: "select col1, col2, weight_string(col1) from (select col1, col2 from `user`) as x limit :__upper_limit", + expField: `[name:"col1" type:VARCHAR name:"sum(col2)" type:FLOAT64]`, + expRow: `[[VARCHAR("a") FLOAT64(8)]]`, }, { sql: `select col1, sum(col2) from (select col1, col2 from user limit 4) x group by col1`, sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2|weight_string(col1)", "varchar|varchar|varbinary"), "a|x|a"), - expSandboxQ: "select col1, col2, weight_string(col1) from `user` order by col1 asc limit :__upper_limit", - expField: `[name:"col1" type:VARCHAR charset:255 flags:1 name:"sum(col2)" type:DECIMAL]`, - expRow: `[[VARCHAR("a") DECIMAL(0)]]`, + expSandboxQ: "select col1, col2, weight_string(col1) from (select col1, col2 from `user`) as x limit :__upper_limit", + expField: `[name:"col1" type:VARCHAR name:"sum(col2)" type:FLOAT64]`, + expRow: `[[VARCHAR("a") FLOAT64(0)]]`, }, { sql: `select col1, sum(col2) from (select col1, col2 from user limit 4) x group by col1`, sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2|weight_string(col1)", "varchar|varchar|varbinary"), "a|null|a"), - expSandboxQ: "select col1, col2, weight_string(col1) from `user` order by col1 asc limit :__upper_limit", - expField: `[name:"col1" type:VARCHAR charset:255 flags:1 name:"sum(col2)" type:DECIMAL]`, + expSandboxQ: "select col1, col2, weight_string(col1) from (select col1, col2 from `user`) as x limit :__upper_limit", + expField: `[name:"col1" type:VARCHAR name:"sum(col2)" type:FLOAT64]`, expRow: `[[VARCHAR("a") NULL]]`, }, } @@ -3861,7 +4021,10 @@ func TestSelectAggregationData(t *testing.T) { sbc.SetResults([]*sqltypes.Result{tc.sandboxRes}) sbc.Queries = nil } - qr, err := executorExec(executor, tc.sql, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + qr, err := executorExec(ctx, executor, session, tc.sql, nil) require.NoError(t, err) assert.Equal(t, tc.expField, fmt.Sprintf("%v", qr.Fields)) assert.Equal(t, tc.expRow, fmt.Sprintf("%v", qr.Rows)) @@ -3872,12 +4035,16 @@ func TestSelectAggregationData(t *testing.T) { } func TestSelectAggregationRandom(t *testing.T) { + ctx := utils.LeakCheckContext(t) + cell := "aa" hc := discovery.NewFakeHealthCheck(nil) - createSandbox(KsTestSharded).VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u := createSandbox(KsTestUnsharded) + s := createSandbox(KsTestSharded) + s.VSchema = executorVSchema + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} var conns []*sandboxconn.SandboxConn for _, shard := range shards { @@ -3895,28 +4062,26 @@ func TestSelectAggregationRandom(t *testing.T) { "10|1", )}) - executor := createExecutor(serv, cell, resolver) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() executor.pv = querypb.ExecuteOptions_Gen4 session := NewAutocommitSession(&vtgatepb.Session{}) - rs, err := executor.Execute(context.Background(), nil, "TestSelectCFC", session, - "select /*vt+ PLANNER=gen4 */ A.a, A.b, (A.a / A.b) as c from (select sum(a) as a, sum(b) as b from user) A", nil) + rs, err := executor.Execute(context.Background(), nil, nil, "TestSelectCFC", session, "select /*vt+ PLANNER=gen4 */ A.a, A.b, (A.a / A.b) as c from (select sum(a) as a, sum(b) as b from user) A", nil) require.NoError(t, err) - assert.Equal(t, `[[INT64(10) INT64(1) DECIMAL(10.0000)]]`, fmt.Sprintf("%v", rs.Rows)) + assert.Equal(t, `[[DECIMAL(10) DECIMAL(1) DECIMAL(10.0000)]]`, fmt.Sprintf("%v", rs.Rows)) } func TestSelectHexAndBit(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, _ := createExecutorEnv(t) executor.normalize = true session := NewAutocommitSession(&vtgatepb.Session{}) - qr, err := executor.Execute(context.Background(), nil, "TestSelectHexAndBit", session, - "select 0b1001, b'1001', 0x9, x'09'", nil) + qr, err := executor.Execute(context.Background(), nil, nil, "TestSelectHexAndBit", session, "select 0b1001, b'1001', 0x9, x'09'", nil) require.NoError(t, err) require.Equal(t, `[[VARBINARY("\t") VARBINARY("\t") VARBINARY("\t") VARBINARY("\t")]]`, fmt.Sprintf("%v", qr.Rows)) - qr, err = executor.Execute(context.Background(), nil, "TestSelectHexAndBit", session, - "select 1 + 0b1001, 1 + b'1001', 1 + 0x9, 1 + x'09'", nil) + qr, err = executor.Execute(context.Background(), nil, nil, "TestSelectHexAndBit", session, "select 1 + 0b1001, 1 + b'1001', 1 + 0x9, 1 + x'09'", nil) require.NoError(t, err) require.Equal(t, `[[UINT64(10) UINT64(10) UINT64(10) UINT64(10)]]`, fmt.Sprintf("%v", qr.Rows)) } @@ -3924,24 +4089,23 @@ func TestSelectHexAndBit(t *testing.T) { // TestSelectCFC tests validates that cfc vindex plan gets cached and same plan is getting reused. // This also validates that cache_size is able to calculate the cfc vindex plan size. func TestSelectCFC(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, _ := createExecutorEnv(t) executor.normalize = true session := NewAutocommitSession(&vtgatepb.Session{}) - _, err := executor.Execute(context.Background(), nil, "TestSelectCFC", session, - "select /*vt+ PLANNER=gen4 */ c2 from tbl_cfc where c1 like 'A%'", nil) + _, err := executor.Execute(context.Background(), nil, nil, "TestSelectCFC", session, "select /*vt+ PLANNER=gen4 */ c2 from tbl_cfc where c1 like 'A%'", nil) require.NoError(t, err) - timeout := time.After(10 * time.Second) + timeout := time.After(30 * time.Second) for { select { case <-timeout: - t.Fatal("not able to cache a plan withing 10 seconds.") + t.Fatal("not able to cache a plan within 30 seconds.") case <-time.After(5 * time.Millisecond): // should be able to find cache entry before the timeout. cacheItems := executor.debugCacheEntries() for _, item := range cacheItems { - if strings.Contains(item.Key, "c2 from tbl_cfc where c1 like") { + if strings.Contains(item.Original, "c2 from tbl_cfc where c1 like") { return } } @@ -3950,7 +4114,7 @@ func TestSelectCFC(t *testing.T) { } func TestSelectView(t *testing.T) { - executor, sbc, _, _ := createExecutorEnv() + executor, sbc, _, _, _ := createExecutorEnv(t) // add the view to local vschema err := executor.vschema.AddView(KsTestSharded, "user_details_view", "select user.id, user_extra.col from user join user_extra on user.id = user_extra.user_id") require.NoError(t, err) @@ -3958,21 +4122,19 @@ func TestSelectView(t *testing.T) { executor.normalize = true session := NewAutocommitSession(&vtgatepb.Session{}) - _, err = executor.Execute(context.Background(), nil, "TestSelectView", session, - "select * from user_details_view", nil) + _, err = executor.Execute(context.Background(), nil, nil, "TestSelectView", session, "select * from user_details_view", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select * from (select `user`.id, user_extra.col from `user` join user_extra on `user`.id = user_extra.user_id) as user_details_view", + Sql: "select user_details_view.id, user_details_view.col from (select `user`.id, user_extra.col from `user`, user_extra where `user`.id = user_extra.user_id) as user_details_view", BindVariables: map[string]*querypb.BindVariable{}, }} utils.MustMatch(t, wantQueries, sbc.Queries) sbc.Queries = nil - _, err = executor.Execute(context.Background(), nil, "TestSelectView", session, - "select * from user_details_view where id = 2", nil) + _, err = executor.Execute(context.Background(), nil, nil, "TestSelectView", session, "select * from user_details_view where id = 2", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ - Sql: "select * from (select `user`.id, user_extra.col from `user` join user_extra on `user`.id = user_extra.user_id) as user_details_view where id = :id /* INT64 */", + Sql: "select user_details_view.id, user_details_view.col from (select `user`.id, user_extra.col from `user`, user_extra where `user`.id = :id /* INT64 */ and `user`.id = user_extra.user_id) as user_details_view", BindVariables: map[string]*querypb.BindVariable{ "id": sqltypes.Int64BindVariable(2), }, @@ -3980,13 +4142,12 @@ func TestSelectView(t *testing.T) { utils.MustMatch(t, wantQueries, sbc.Queries) sbc.Queries = nil - _, err = executor.Execute(context.Background(), nil, "TestSelectView", session, - "select * from user_details_view where id in (1,2,3,4,5)", nil) + _, err = executor.Execute(context.Background(), nil, nil, "TestSelectView", session, "select * from user_details_view where id in (1,2,3,4,5)", nil) require.NoError(t, err) bvtg1, _ := sqltypes.BuildBindVariable([]int64{1, 2, 3, 4, 5}) bvals, _ := sqltypes.BuildBindVariable([]int64{1, 2}) wantQueries = []*querypb.BoundQuery{{ - Sql: "select * from (select `user`.id, user_extra.col from `user` join user_extra on `user`.id = user_extra.user_id) as user_details_view where id in ::__vals", + Sql: "select user_details_view.id, user_details_view.col from (select `user`.id, user_extra.col from `user`, user_extra where `user`.id in ::__vals and `user`.id = user_extra.user_id) as user_details_view", BindVariables: map[string]*querypb.BindVariable{ "vtg1": bvtg1, "__vals": bvals, @@ -3999,3 +4160,38 @@ func TestMain(m *testing.M) { _flag.ParseFlagsForTest() os.Exit(m.Run()) } + +func TestStreamJoinQuery(t *testing.T) { + ctx := utils.LeakCheckContext(t) + + // Special setup: Don't use createExecutorEnv. + cell := "aa" + hc := discovery.NewFakeHealthCheck(nil) + u := createSandbox(KsTestUnsharded) + s := createSandbox(KsTestSharded) + s.VSchema = executorVSchema + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) + shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} + for _, shard := range shards { + _ = hc.AddTestTablet(cell, shard, 1, "TestExecutor", shard, topodatapb.TabletType_PRIMARY, true, 1, nil) + } + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() + + sql := "select u.foo, u.apa, ue.bar, ue.apa from user u join user_extra ue on u.foo = ue.bar" + result, err := executorStream(ctx, executor, sql) + require.NoError(t, err) + wantResult := &sqltypes.Result{ + Fields: append(sandboxconn.SingleRowResult.Fields, sandboxconn.SingleRowResult.Fields...), + } + wantRow := append(sandboxconn.StreamRowResult.Rows[0], sandboxconn.StreamRowResult.Rows[0]...) + for i := 0; i < 64; i++ { + wantResult.Rows = append(wantResult.Rows, wantRow) + } + require.Equal(t, len(wantResult.Rows), len(result.Rows)) + for idx := 0; idx < 64; idx++ { + utils.MustMatch(t, wantResult.Rows[idx], result.Rows[idx], "mismatched on: ", strconv.Itoa(idx)) + } +} diff --git a/go/vt/vtgate/executor_set_test.go b/go/vt/vtgate/executor_set_test.go index 3a45750c744..395b6be06a1 100644 --- a/go/vt/vtgate/executor_set_test.go +++ b/go/vt/vtgate/executor_set_test.go @@ -17,11 +17,10 @@ limitations under the License. package vtgate import ( - "context" "fmt" "testing" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/vt/sqlparser" querypb "vitess.io/vitess/go/vt/proto/query" @@ -41,7 +40,7 @@ import ( ) func TestExecutorSet(t *testing.T) { - executorEnv, _, _, _ := createExecutorEnv() + executorEnv, _, _, _, ctx := createExecutorEnv(t) testcases := []struct { in string @@ -228,14 +227,14 @@ func TestExecutorSet(t *testing.T) { in: "set transaction isolation level serializable", out: &vtgatepb.Session{ Autocommit: true, - Warnings: []*querypb.QueryWarning{{Code: uint32(mysql.ERNotSupportedYet), Message: "converted 'next transaction' scope to 'session' scope"}}, + Warnings: []*querypb.QueryWarning{{Code: uint32(sqlerror.ERNotSupportedYet), Message: "converted 'next transaction' scope to 'session' scope"}}, }, }, { in: "set transaction read only", - out: &vtgatepb.Session{Autocommit: true, Warnings: []*querypb.QueryWarning{{Code: uint32(mysql.ERNotSupportedYet), Message: "converted 'next transaction' scope to 'session' scope"}}}, + out: &vtgatepb.Session{Autocommit: true, Warnings: []*querypb.QueryWarning{{Code: uint32(sqlerror.ERNotSupportedYet), Message: "converted 'next transaction' scope to 'session' scope"}}}, }, { in: "set transaction read write", - out: &vtgatepb.Session{Autocommit: true, Warnings: []*querypb.QueryWarning{{Code: uint32(mysql.ERNotSupportedYet), Message: "converted 'next transaction' scope to 'session' scope"}}}, + out: &vtgatepb.Session{Autocommit: true, Warnings: []*querypb.QueryWarning{{Code: uint32(sqlerror.ERNotSupportedYet), Message: "converted 'next transaction' scope to 'session' scope"}}}, }, { in: "set session transaction read write", out: &vtgatepb.Session{Autocommit: true}, @@ -270,7 +269,7 @@ func TestExecutorSet(t *testing.T) { for i, tcase := range testcases { t.Run(fmt.Sprintf("%d-%s", i, tcase.in), func(t *testing.T) { session := NewSafeSession(&vtgatepb.Session{Autocommit: true}) - _, err := executorEnv.Execute(context.Background(), nil, "TestExecute", session, tcase.in, nil) + _, err := executorEnv.Execute(ctx, nil, nil, "TestExecute", session, tcase.in, nil) if tcase.err == "" { require.NoError(t, err) utils.MustMatch(t, tcase.out, session.Session, "new executor") @@ -282,7 +281,7 @@ func TestExecutorSet(t *testing.T) { } func TestExecutorSetOp(t *testing.T) { - executor, _, _, sbclookup := createExecutorEnv() + executor, _, _, sbclookup, ctx := createExecutorEnv(t) sysVarSetEnabled = true returnResult := func(columnName, typ, value string) *sqltypes.Result { @@ -366,17 +365,13 @@ func TestExecutorSetOp(t *testing.T) { }} for _, tcase := range testcases { t.Run(tcase.in, func(t *testing.T) { - session := NewAutocommitSession(primarySession) + session := NewAutocommitSession(&vtgatepb.Session{ + TargetString: "@primary", + }) session.TargetString = KsTestUnsharded session.EnableSystemSettings = !tcase.disallowResConn sbclookup.SetResults([]*sqltypes.Result{tcase.result}) - _, err := executor.Execute( - context.Background(), - nil, - "TestExecute", - session, - tcase.in, - nil) + _, err := executor.Execute(ctx, nil, nil, "TestExecute", session, tcase.in, nil) require.NoError(t, err) utils.MustMatch(t, tcase.warning, session.Warnings, "") utils.MustMatch(t, tcase.sysVars, session.SystemVariables, "") @@ -385,64 +380,68 @@ func TestExecutorSetOp(t *testing.T) { } func TestExecutorSetMetadata(t *testing.T) { - executor, _, _, _ := createExecutorEnv() - session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary", Autocommit: true}) + t.Run("Session 1", func(t *testing.T) { + executor, _, _, _, ctx := createExecutorEnv(t) + session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary", Autocommit: true}) - set := "set @@vitess_metadata.app_keyspace_v1= '1'" - _, err := executor.Execute(context.Background(), nil, "TestExecute", session, set, nil) - assert.Equalf(t, vtrpcpb.Code_PERMISSION_DENIED, vterrors.Code(err), "expected error %v, got error: %v", vtrpcpb.Code_PERMISSION_DENIED, err) - - vschemaacl.AuthorizedDDLUsers = "%" - defer func() { - vschemaacl.AuthorizedDDLUsers = "" - }() - - executor, _, _, _ = createExecutorEnv() - session = NewSafeSession(&vtgatepb.Session{TargetString: "@primary", Autocommit: true}) - - set = "set @@vitess_metadata.app_keyspace_v1= '1'" - _, err = executor.Execute(context.Background(), nil, "TestExecute", session, set, nil) - assert.NoError(t, err, "%s error: %v", set, err) - - show := `show vitess_metadata variables like 'app\\_keyspace\\_v_'` - result, err := executor.Execute(context.Background(), nil, "TestExecute", session, show, nil) - assert.NoError(t, err) - - want := "1" - got := result.Rows[0][1].ToString() - assert.Equalf(t, want, got, "want migrations %s, result %s", want, got) - - // Update metadata - set = "set @@vitess_metadata.app_keyspace_v2='2'" - _, err = executor.Execute(context.Background(), nil, "TestExecute", session, set, nil) - assert.NoError(t, err, "%s error: %v", set, err) - - show = `show vitess_metadata variables like 'app\\_keyspace\\_v%'` - gotqr, err := executor.Execute(context.Background(), nil, "TestExecute", session, show, nil) - assert.NoError(t, err) + set := "set @@vitess_metadata.app_keyspace_v1= '1'" + _, err := executor.Execute(ctx, nil, nil, "TestExecute", session, set, nil) + assert.Equalf(t, vtrpcpb.Code_PERMISSION_DENIED, vterrors.Code(err), "expected error %v, got error: %v", vtrpcpb.Code_PERMISSION_DENIED, err) + }) - wantqr := &sqltypes.Result{ - Fields: buildVarCharFields("Key", "Value"), - Rows: [][]sqltypes.Value{ - buildVarCharRow("app_keyspace_v1", "1"), - buildVarCharRow("app_keyspace_v2", "2"), - }, - RowsAffected: 2, - } + t.Run("Session 2", func(t *testing.T) { + vschemaacl.AuthorizedDDLUsers = "%" + defer func() { + vschemaacl.AuthorizedDDLUsers = "" + }() + + executor, _, _, _, ctx := createExecutorEnv(t) + session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary", Autocommit: true}) + + set := "set @@vitess_metadata.app_keyspace_v1= '1'" + _, err := executor.Execute(ctx, nil, nil, "TestExecute", session, set, nil) + assert.NoError(t, err, "%s error: %v", set, err) + + show := `show vitess_metadata variables like 'app\\_keyspace\\_v_'` + result, err := executor.Execute(ctx, nil, nil, "TestExecute", session, show, nil) + assert.NoError(t, err) + + want := "1" + got := result.Rows[0][1].ToString() + assert.Equalf(t, want, got, "want migrations %s, result %s", want, got) + + // Update metadata + set = "set @@vitess_metadata.app_keyspace_v2='2'" + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, set, nil) + assert.NoError(t, err, "%s error: %v", set, err) + + show = `show vitess_metadata variables like 'app\\_keyspace\\_v%'` + gotqr, err := executor.Execute(ctx, nil, nil, "TestExecute", session, show, nil) + assert.NoError(t, err) + + wantqr := &sqltypes.Result{ + Fields: buildVarCharFields("Key", "Value"), + Rows: [][]sqltypes.Value{ + buildVarCharRow("app_keyspace_v1", "1"), + buildVarCharRow("app_keyspace_v2", "2"), + }, + RowsAffected: 2, + } - assert.Equal(t, wantqr.Fields, gotqr.Fields) - assert.ElementsMatch(t, wantqr.Rows, gotqr.Rows) + assert.Equal(t, wantqr.Fields, gotqr.Fields) + assert.ElementsMatch(t, wantqr.Rows, gotqr.Rows) - show = "show vitess_metadata variables" - gotqr, err = executor.Execute(context.Background(), nil, "TestExecute", session, show, nil) - require.NoError(t, err) + show = "show vitess_metadata variables" + gotqr, err = executor.Execute(ctx, nil, nil, "TestExecute", session, show, nil) + require.NoError(t, err) - assert.Equal(t, wantqr.Fields, gotqr.Fields) - assert.ElementsMatch(t, wantqr.Rows, gotqr.Rows) + assert.Equal(t, wantqr.Fields, gotqr.Fields) + assert.ElementsMatch(t, wantqr.Rows, gotqr.Rows) + }) } func TestPlanExecutorSetUDV(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) testcases := []struct { in string @@ -461,7 +460,7 @@ func TestPlanExecutorSetUDV(t *testing.T) { for _, tcase := range testcases { t.Run(tcase.in, func(t *testing.T) { session := NewSafeSession(&vtgatepb.Session{Autocommit: true}) - _, err := executor.Execute(context.Background(), nil, "TestExecute", session, tcase.in, nil) + _, err := executor.Execute(ctx, nil, nil, "TestExecute", session, tcase.in, nil) if err != nil { require.EqualError(t, err, tcase.err) } else { @@ -472,7 +471,7 @@ func TestPlanExecutorSetUDV(t *testing.T) { } func TestSetUDVFromTabletInput(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, ctx := createExecutorEnv(t) fields := sqltypes.MakeTestFields("some", "VARCHAR") sbc1.SetResults([]*sqltypes.Result{ @@ -482,15 +481,12 @@ func TestSetUDVFromTabletInput(t *testing.T) { ), }) - primarySession.TargetString = "TestExecutor" - defer func() { - primarySession.TargetString = "" - }() - _, err := executorExec(executor, "set @foo = concat('a','b','c')", nil) + session := &vtgatepb.Session{TargetString: "TestExecutor"} + _, err := executorExec(ctx, executor, session, "set @foo = concat('a','b','c')", nil) require.NoError(t, err) want := map[string]*querypb.BindVariable{"foo": sqltypes.StringBindVariable("abc")} - utils.MustMatch(t, want, primarySession.UserDefinedVariables, "") + utils.MustMatch(t, want, session.UserDefinedVariables, "") } func createMap(keys []string, values []any) map[string]*querypb.BindVariable { @@ -506,7 +502,7 @@ func createMap(keys []string, values []any) map[string]*querypb.BindVariable { } func TestSetVar(t *testing.T) { - executor, _, _, sbc := createExecutorEnv() + executor, _, _, sbc, ctx := createExecutorEnv(t) executor.normalize = true oldVersion := sqlparser.GetParserVersion() @@ -520,7 +516,7 @@ func TestSetVar(t *testing.T) { sqltypes.MakeTestFields("orig|new", "varchar|varchar"), "|only_full_group_by")}) - _, err := executor.Execute(context.Background(), nil, "TestSetVar", session, "set @@sql_mode = only_full_group_by", map[string]*querypb.BindVariable{}) + _, err := executor.Execute(ctx, nil, nil, "TestSetVar", session, "set @@sql_mode = only_full_group_by", map[string]*querypb.BindVariable{}) require.NoError(t, err) tcases := []struct { @@ -542,7 +538,7 @@ func TestSetVar(t *testing.T) { // reset reserved conn need. session.SetReservedConn(false) - _, err = executor.Execute(context.Background(), nil, "TestSetVar", session, tc.sql, map[string]*querypb.BindVariable{}) + _, err = executor.Execute(ctx, nil, nil, "TestSetVar", session, tc.sql, map[string]*querypb.BindVariable{}) require.NoError(t, err) assert.Equal(t, tc.rc, session.InReservedConn()) }) @@ -550,7 +546,7 @@ func TestSetVar(t *testing.T) { } func TestSetVarShowVariables(t *testing.T) { - executor, _, _, sbc := createExecutorEnv() + executor, _, _, sbc, ctx := createExecutorEnv(t) executor.normalize = true oldVersion := sqlparser.GetParserVersion() @@ -568,18 +564,18 @@ func TestSetVarShowVariables(t *testing.T) { sqltypes.MakeTestResult(sqltypes.MakeTestFields("Variable_name|Value", "varchar|varchar"), "sql_mode|ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE")}) - _, err := executor.Execute(context.Background(), nil, "TestSetVar", session, "set @@sql_mode = only_full_group_by", map[string]*querypb.BindVariable{}) + _, err := executor.Execute(ctx, nil, nil, "TestSetVar", session, "set @@sql_mode = only_full_group_by", map[string]*querypb.BindVariable{}) require.NoError(t, err) // this should return the updated value of sql_mode. - qr, err := executor.Execute(context.Background(), nil, "TestSetVar", session, "show variables like 'sql_mode'", map[string]*querypb.BindVariable{}) + qr, err := executor.Execute(ctx, nil, nil, "TestSetVar", session, "show variables like 'sql_mode'", map[string]*querypb.BindVariable{}) require.NoError(t, err) assert.False(t, session.InReservedConn(), "reserved connection should not be used") assert.Equal(t, `[[VARCHAR("sql_mode") VARCHAR("only_full_group_by")]]`, fmt.Sprintf("%v", qr.Rows)) } func TestExecutorSetAndSelect(t *testing.T) { - e, _, _, sbc := createExecutorEnv() + e, _, _, sbc, ctx := createExecutorEnv(t) e.normalize = true testcases := []struct { @@ -615,7 +611,7 @@ func TestExecutorSetAndSelect(t *testing.T) { sqltypes.MakeTestResult(nil)}) // third one for new set query setQ := fmt.Sprintf("set %s = '%s'", tcase.sysVar, tcase.val) - _, err := e.Execute(context.Background(), nil, "TestExecutorSetAndSelect", session, setQ, nil) + _, err := e.Execute(ctx, nil, nil, "TestExecutorSetAndSelect", session, setQ, nil) require.NoError(t, err) } @@ -623,7 +619,7 @@ func TestExecutorSetAndSelect(t *testing.T) { // if the query reaches the shard, it will return REPEATABLE-READ isolation level. sbc.SetResults([]*sqltypes.Result{sqltypes.MakeTestResult(sqltypes.MakeTestFields(tcase.sysVar, "varchar"), "REPEATABLE-READ")}) - qr, err := e.Execute(context.Background(), nil, "TestExecutorSetAndSelect", session, selectQ, nil) + qr, err := e.Execute(ctx, nil, nil, "TestExecutorSetAndSelect", session, selectQ, nil) require.NoError(t, err) assert.Equal(t, tcase.exp, fmt.Sprintf("%v", qr.Rows)) }) diff --git a/go/vt/vtgate/executor_stream_test.go b/go/vt/vtgate/executor_stream_test.go index 88bfbc77b3f..46fe62c49ea 100644 --- a/go/vt/vtgate/executor_stream_test.go +++ b/go/vt/vtgate/executor_stream_test.go @@ -17,17 +17,19 @@ limitations under the License. package vtgate import ( + "context" "testing" "time" + "vitess.io/vitess/go/streamlog" + "vitess.io/vitess/go/test/utils" querypb "vitess.io/vitess/go/vt/proto/query" + vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + "vitess.io/vitess/go/vt/vtgate/logstats" - "vitess.io/vitess/go/cache" "vitess.io/vitess/go/vt/discovery" topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "context" - "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" @@ -36,9 +38,9 @@ import ( ) func TestStreamSQLUnsharded(t *testing.T) { - executor, _, _, _ := createExecutorEnv() - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + executor, _, _, _, _ := createExecutorEnv(t) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) sql := "stream * from user_msgs" result, err := executorStreamMessages(executor, sql) @@ -50,18 +52,26 @@ func TestStreamSQLUnsharded(t *testing.T) { } func TestStreamSQLSharded(t *testing.T) { + ctx := utils.LeakCheckContext(t) cell := "aa" hc := discovery.NewFakeHealthCheck(nil) + u := createSandbox(KsTestUnsharded) s := createSandbox("TestExecutor") s.VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} for _, shard := range shards { _ = hc.AddTestTablet(cell, shard, 1, "TestExecutor", shard, topodatapb.TabletType_PRIMARY, true, 1, nil) } - executor := NewExecutor(context.Background(), serv, cell, resolver, false, false, testBufferSize, cache.DefaultConfig, nil, false, querypb.ExecuteOptions_V3) + queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) + plans := DefaultPlanCache() + + executor := NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4) + executor.SetQueryLogger(queryLogger) + + defer executor.Close() sql := "stream * from sharded_user_msgs" result, err := executorStreamMessages(executor, sql) @@ -88,11 +98,13 @@ func executorStreamMessages(executor *Executor, sql string) (qr *sqltypes.Result results := make(chan *sqltypes.Result, 100) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) defer cancel() + session := &vtgatepb.Session{TargetString: "@primary"} err = executor.StreamExecute( ctx, nil, + nil, "TestExecuteStream", - NewSafeSession(primarySession), + NewSafeSession(session), sql, nil, func(qr *sqltypes.Result) error { diff --git a/go/vt/vtgate/executor_test.go b/go/vt/vtgate/executor_test.go index 90270643980..11098bd0cf4 100644 --- a/go/vt/vtgate/executor_test.go +++ b/go/vt/vtgate/executor_test.go @@ -20,6 +20,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "net/http" "net/http/httptest" @@ -27,46 +28,45 @@ import ( "sort" "strings" "testing" + "time" + "github.com/google/go-cmp/cmp" "github.com/google/safehtml/template" - - "vitess.io/vitess/go/vt/vtgate/logstats" - + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/cache" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/topo" - "github.com/google/go-cmp/cmp" - - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/callerid" - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/vindexes" - "vitess.io/vitess/go/vt/vtgate/vschemaacl" - + "vitess.io/vitess/go/vt/discovery" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vschemapb "vitess.io/vitess/go/vt/proto/vschema" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/buffer" + "vitess.io/vitess/go/vt/vtgate/logstats" + "vitess.io/vitess/go/vt/vtgate/vindexes" + "vitess.io/vitess/go/vt/vtgate/vschemaacl" + "vitess.io/vitess/go/vt/vtgate/vtgateservice" ) func TestExecutorResultsExceeded(t *testing.T) { + executor, _, _, sbclookup, ctx := createExecutorEnv(t) + save := warnMemoryRows warnMemoryRows = 3 defer func() { warnMemoryRows = save }() - executor, _, _, sbclookup := createExecutorEnv() session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary"}) initial := warnings.Counts()["ResultsExceeded"] @@ -75,21 +75,22 @@ func TestExecutorResultsExceeded(t *testing.T) { result2 := sqltypes.MakeTestResult(sqltypes.MakeTestFields("col", "int64"), "1", "2", "3", "4") sbclookup.SetResults([]*sqltypes.Result{result1, result2}) - _, err := executor.Execute(ctx, nil, "TestExecutorResultsExceeded", session, "select * from main1", nil) + _, err := executor.Execute(ctx, nil, nil, "TestExecutorResultsExceeded", session, "select * from main1", nil) require.NoError(t, err) assert.Equal(t, initial, warnings.Counts()["ResultsExceeded"], "warnings count") - _, err = executor.Execute(ctx, nil, "TestExecutorResultsExceeded", session, "select * from main1", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecutorResultsExceeded", session, "select * from main1", nil) require.NoError(t, err) assert.Equal(t, initial+1, warnings.Counts()["ResultsExceeded"], "warnings count") } func TestExecutorMaxMemoryRowsExceeded(t *testing.T) { + executor, _, _, sbclookup, ctx := createExecutorEnv(t) + save := maxMemoryRows maxMemoryRows = 3 defer func() { maxMemoryRows = save }() - executor, _, _, sbclookup := createExecutorEnv() session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary"}) result := sqltypes.MakeTestResult(sqltypes.MakeTestFields("col", "int64"), "1", "2", "3", "4") fn := func(r *sqltypes.Result) error { @@ -108,7 +109,7 @@ func TestExecutorMaxMemoryRowsExceeded(t *testing.T) { stmt, err := sqlparser.Parse(test.query) require.NoError(t, err) - _, err = executor.Execute(ctx, nil, "TestExecutorMaxMemoryRowsExceeded", session, test.query, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecutorMaxMemoryRowsExceeded", session, test.query, nil) if sqlparser.IgnoreMaxMaxMemoryRowsDirective(stmt) { require.NoError(t, err, "no error when DirectiveIgnoreMaxMemoryRows is provided") } else { @@ -116,36 +117,37 @@ func TestExecutorMaxMemoryRowsExceeded(t *testing.T) { } sbclookup.SetResults([]*sqltypes.Result{result}) - err = executor.StreamExecute(ctx, nil, "TestExecutorMaxMemoryRowsExceeded", session, test.query, nil, fn) + err = executor.StreamExecute(ctx, nil, nil, "TestExecutorMaxMemoryRowsExceeded", session, test.query, nil, fn) require.NoError(t, err, "maxMemoryRows limit does not apply to StreamExecute") } } func TestExecutorTransactionsNoAutoCommit(t *testing.T) { - executor, _, _, sbclookup := createExecutorEnv() + executor, _, _, sbclookup, ctx := createExecutorEnv(t) + session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary", SessionUUID: "suuid"}) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) // begin. - _, err := executor.Execute(ctx, nil, "TestExecute", session, "begin", nil) + _, err := executor.Execute(ctx, nil, nil, "TestExecute", session, "begin", nil) require.NoError(t, err) wantSession := &vtgatepb.Session{InTransaction: true, TargetString: "@primary", SessionUUID: "suuid"} utils.MustMatch(t, wantSession, session.Session, "session") assert.EqualValues(t, 0, sbclookup.CommitCount.Load(), "commit count") - logStats := testQueryLog(t, logChan, "TestExecute", "BEGIN", "begin", 0) + logStats := testQueryLog(t, executor, logChan, "TestExecute", "BEGIN", "begin", 0) assert.EqualValues(t, 0, logStats.CommitTime, "logstats: expected zero CommitTime") assert.EqualValues(t, "suuid", logStats.SessionUUID, "logstats: expected non-empty SessionUUID") // commit. - _, err = executor.Execute(ctx, nil, "TestExecute", session, "select id from main1", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "select id from main1", nil) require.NoError(t, err) - logStats = testQueryLog(t, logChan, "TestExecute", "SELECT", "select id from main1", 1) + logStats = testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select id from main1", 1) assert.EqualValues(t, 0, logStats.CommitTime, "logstats: expected zero CommitTime") assert.EqualValues(t, "suuid", logStats.SessionUUID, "logstats: expected non-empty SessionUUID") - _, err = executor.Execute(context.Background(), nil, "TestExecute", session, "commit", nil) + _, err = executor.Execute(context.Background(), nil, nil, "TestExecute", session, "commit", nil) if err != nil { t.Fatal(err) } @@ -156,25 +158,25 @@ func TestExecutorTransactionsNoAutoCommit(t *testing.T) { if commitCount := sbclookup.CommitCount.Load(); commitCount != 1 { t.Errorf("want 1, got %d", commitCount) } - logStats = testQueryLog(t, logChan, "TestExecute", "COMMIT", "commit", 1) + logStats = testQueryLog(t, executor, logChan, "TestExecute", "COMMIT", "commit", 1) if logStats.CommitTime == 0 { t.Errorf("logstats: expected non-zero CommitTime") } assert.EqualValues(t, "suuid", logStats.SessionUUID, "logstats: expected non-empty SessionUUID") // rollback. - _, err = executor.Execute(ctx, nil, "TestExecute", session, "begin", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "begin", nil) require.NoError(t, err) - _, err = executor.Execute(ctx, nil, "TestExecute", session, "select id from main1", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "select id from main1", nil) require.NoError(t, err) - _, err = executor.Execute(ctx, nil, "TestExecute", session, "rollback", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "rollback", nil) require.NoError(t, err) wantSession = &vtgatepb.Session{TargetString: "@primary", SessionUUID: "suuid"} utils.MustMatch(t, wantSession, session.Session, "session") assert.EqualValues(t, 1, sbclookup.RollbackCount.Load(), "rollback count") - _ = testQueryLog(t, logChan, "TestExecute", "BEGIN", "begin", 0) - _ = testQueryLog(t, logChan, "TestExecute", "SELECT", "select id from main1", 1) - logStats = testQueryLog(t, logChan, "TestExecute", "ROLLBACK", "rollback", 1) + _ = testQueryLog(t, executor, logChan, "TestExecute", "BEGIN", "begin", 0) + _ = testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select id from main1", 1) + logStats = testQueryLog(t, executor, logChan, "TestExecute", "ROLLBACK", "rollback", 1) if logStats.CommitTime == 0 { t.Errorf("logstats: expected non-zero CommitTime") } @@ -190,12 +192,13 @@ func TestExecutorTransactionsNoAutoCommit(t *testing.T) { // Prevent use of non-primary if in_transaction is on. session = NewSafeSession(&vtgatepb.Session{TargetString: "@primary", InTransaction: true}) - _, err = executor.Execute(ctx, nil, "TestExecute", session, "use @replica", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "use @replica", nil) require.EqualError(t, err, `can't execute the given command because you have an active transaction`) } func TestDirectTargetRewrites(t *testing.T) { - executor, _, _, sbclookup := createExecutorEnv() + executor, _, _, sbclookup, ctx := createExecutorEnv(t) + executor.normalize = true session := &vtgatepb.Session{ @@ -205,7 +208,7 @@ func TestDirectTargetRewrites(t *testing.T) { } sql := "select database()" - _, err := executor.Execute(ctx, nil, "TestExecute", NewSafeSession(session), sql, map[string]*querypb.BindVariable{}) + _, err := executor.Execute(ctx, nil, nil, "TestExecute", NewSafeSession(session), sql, map[string]*querypb.BindVariable{}) require.NoError(t, err) assertQueries(t, sbclookup, []*querypb.BoundQuery{{ Sql: "select :__vtdbname as `database()` from dual", @@ -214,59 +217,61 @@ func TestDirectTargetRewrites(t *testing.T) { } func TestExecutorTransactionsAutoCommit(t *testing.T) { - executor, _, _, sbclookup := createExecutorEnv() + executor, _, _, sbclookup, ctx := createExecutorEnv(t) + session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary", Autocommit: true, SessionUUID: "suuid"}) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) // begin. - _, err := executor.Execute(ctx, nil, "TestExecute", session, "begin", nil) + _, err := executor.Execute(ctx, nil, nil, "TestExecute", session, "begin", nil) require.NoError(t, err) wantSession := &vtgatepb.Session{InTransaction: true, TargetString: "@primary", Autocommit: true, SessionUUID: "suuid"} utils.MustMatch(t, wantSession, session.Session, "session") if commitCount := sbclookup.CommitCount.Load(); commitCount != 0 { t.Errorf("want 0, got %d", commitCount) } - logStats := testQueryLog(t, logChan, "TestExecute", "BEGIN", "begin", 0) + logStats := testQueryLog(t, executor, logChan, "TestExecute", "BEGIN", "begin", 0) assert.EqualValues(t, "suuid", logStats.SessionUUID, "logstats: expected non-empty SessionUUID") // commit. - _, err = executor.Execute(ctx, nil, "TestExecute", session, "select id from main1", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "select id from main1", nil) require.NoError(t, err) - _, err = executor.Execute(ctx, nil, "TestExecute", session, "commit", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "commit", nil) require.NoError(t, err) wantSession = &vtgatepb.Session{TargetString: "@primary", Autocommit: true, SessionUUID: "suuid"} utils.MustMatch(t, wantSession, session.Session, "session") assert.EqualValues(t, 1, sbclookup.CommitCount.Load()) - logStats = testQueryLog(t, logChan, "TestExecute", "SELECT", "select id from main1", 1) + logStats = testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select id from main1", 1) assert.EqualValues(t, 0, logStats.CommitTime) assert.EqualValues(t, "suuid", logStats.SessionUUID, "logstats: expected non-empty SessionUUID") - logStats = testQueryLog(t, logChan, "TestExecute", "COMMIT", "commit", 1) + logStats = testQueryLog(t, executor, logChan, "TestExecute", "COMMIT", "commit", 1) assert.NotEqual(t, 0, logStats.CommitTime) assert.EqualValues(t, "suuid", logStats.SessionUUID, "logstats: expected non-empty SessionUUID") // rollback. - _, err = executor.Execute(ctx, nil, "TestExecute", session, "begin", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "begin", nil) require.NoError(t, err) - _, err = executor.Execute(ctx, nil, "TestExecute", session, "select id from main1", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "select id from main1", nil) require.NoError(t, err) - _, err = executor.Execute(ctx, nil, "TestExecute", session, "rollback", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "rollback", nil) require.NoError(t, err) wantSession = &vtgatepb.Session{TargetString: "@primary", Autocommit: true, SessionUUID: "suuid"} utils.MustMatch(t, wantSession, session.Session, "session") if rollbackCount := sbclookup.RollbackCount.Load(); rollbackCount != 1 { t.Errorf("want 1, got %d", rollbackCount) } - _ = testQueryLog(t, logChan, "TestExecute", "BEGIN", "begin", 0) - _ = testQueryLog(t, logChan, "TestExecute", "SELECT", "select id from main1", 1) - logStats = testQueryLog(t, logChan, "TestExecute", "ROLLBACK", "rollback", 1) + _ = testQueryLog(t, executor, logChan, "TestExecute", "BEGIN", "begin", 0) + _ = testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select id from main1", 1) + logStats = testQueryLog(t, executor, logChan, "TestExecute", "ROLLBACK", "rollback", 1) assert.EqualValues(t, "suuid", logStats.SessionUUID, "logstats: expected non-empty SessionUUID") } func TestExecutorTransactionsAutoCommitStreaming(t *testing.T) { - executor, _, _, sbclookup := createExecutorEnv() + executor, _, _, sbclookup, ctx := createExecutorEnv(t) + oltpOptions := &querypb.ExecuteOptions{Workload: querypb.ExecuteOptions_OLTP} session := NewSafeSession(&vtgatepb.Session{ TargetString: "@primary", @@ -275,13 +280,13 @@ func TestExecutorTransactionsAutoCommitStreaming(t *testing.T) { SessionUUID: "suuid", }) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) var results []*sqltypes.Result // begin. - err := executor.StreamExecute(ctx, nil, "TestExecute", session, "begin", nil, func(result *sqltypes.Result) error { + err := executor.StreamExecute(ctx, nil, nil, "TestExecute", session, "begin", nil, func(result *sqltypes.Result) error { results = append(results, result) return nil }) @@ -299,31 +304,31 @@ func TestExecutorTransactionsAutoCommitStreaming(t *testing.T) { } utils.MustMatch(t, wantSession, session.Session, "session") assert.Zero(t, sbclookup.CommitCount.Load()) - logStats := testQueryLog(t, logChan, "TestExecute", "BEGIN", "begin", 0) + logStats := testQueryLog(t, executor, logChan, "TestExecute", "BEGIN", "begin", 0) assert.EqualValues(t, "suuid", logStats.SessionUUID, "logstats: expected non-empty SessionUUID") // commit. - _, err = executor.Execute(ctx, nil, "TestExecute", session, "select id from main1", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "select id from main1", nil) require.NoError(t, err) - _, err = executor.Execute(ctx, nil, "TestExecute", session, "commit", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "commit", nil) require.NoError(t, err) wantSession = &vtgatepb.Session{TargetString: "@primary", Autocommit: true, Options: oltpOptions, SessionUUID: "suuid"} utils.MustMatch(t, wantSession, session.Session, "session") assert.EqualValues(t, 1, sbclookup.CommitCount.Load()) - logStats = testQueryLog(t, logChan, "TestExecute", "SELECT", "select id from main1", 1) + logStats = testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select id from main1", 1) assert.EqualValues(t, 0, logStats.CommitTime) assert.EqualValues(t, "suuid", logStats.SessionUUID, "logstats: expected non-empty SessionUUID") - logStats = testQueryLog(t, logChan, "TestExecute", "COMMIT", "commit", 1) + logStats = testQueryLog(t, executor, logChan, "TestExecute", "COMMIT", "commit", 1) assert.NotEqual(t, 0, logStats.CommitTime) assert.EqualValues(t, "suuid", logStats.SessionUUID, "logstats: expected non-empty SessionUUID") // rollback. - _, err = executor.Execute(ctx, nil, "TestExecute", session, "begin", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "begin", nil) require.NoError(t, err) - _, err = executor.Execute(ctx, nil, "TestExecute", session, "select id from main1", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "select id from main1", nil) require.NoError(t, err) - _, err = executor.Execute(ctx, nil, "TestExecute", session, "rollback", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "rollback", nil) require.NoError(t, err) wantSession = &vtgatepb.Session{TargetString: "@primary", Autocommit: true, Options: oltpOptions, SessionUUID: "suuid"} utils.MustMatch(t, wantSession, session.Session, "session") @@ -336,49 +341,50 @@ func TestExecutorDeleteMetadata(t *testing.T) { vschemaacl.AuthorizedDDLUsers = "" }() - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary", Autocommit: true}) set := "set @@vitess_metadata.app_v1= '1'" - _, err := executor.Execute(ctx, nil, "TestExecute", session, set, nil) + _, err := executor.Execute(ctx, nil, nil, "TestExecute", session, set, nil) assert.NoError(t, err, "%s error: %v", set, err) show := `show vitess_metadata variables like 'app\\_%'` - result, _ := executor.Execute(ctx, nil, "TestExecute", session, show, nil) + result, _ := executor.Execute(ctx, nil, nil, "TestExecute", session, show, nil) assert.Len(t, result.Rows, 1) // Fails if deleting key that doesn't exist delQuery := "set @@vitess_metadata.doesn't_exist=''" - _, err = executor.Execute(ctx, nil, "TestExecute", session, delQuery, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, delQuery, nil) assert.True(t, topo.IsErrType(err, topo.NoNode)) // Delete existing key, show should fail given the node doesn't exist delQuery = "set @@vitess_metadata.app_v1=''" - _, err = executor.Execute(ctx, nil, "TestExecute", session, delQuery, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, delQuery, nil) assert.NoError(t, err) show = `show vitess_metadata variables like 'app\\_%'` - _, err = executor.Execute(ctx, nil, "TestExecute", session, show, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, show, nil) assert.True(t, topo.IsErrType(err, topo.NoNode)) } func TestExecutorAutocommit(t *testing.T) { - executor, _, _, sbclookup := createExecutorEnv() + executor, _, _, sbclookup, ctx := createExecutorEnv(t) + session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary"}) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) // autocommit = 0 startCount := sbclookup.CommitCount.Load() - _, err := executor.Execute(ctx, nil, "TestExecute", session, "select id from main1", nil) + _, err := executor.Execute(ctx, nil, nil, "TestExecute", session, "select id from main1", nil) require.NoError(t, err) wantSession := &vtgatepb.Session{TargetString: "@primary", InTransaction: true, FoundRows: 1, RowCount: -1} - testSession := proto.Clone(session.Session).(*vtgatepb.Session) + testSession := session.Session.CloneVT() testSession.ShardSessions = nil utils.MustMatch(t, wantSession, testSession, "session does not match for autocommit=0") - logStats := testQueryLog(t, logChan, "TestExecute", "SELECT", "select id from main1", 1) + logStats := testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select id from main1", 1) if logStats.CommitTime != 0 { t.Errorf("logstats: expected zero CommitTime") } @@ -387,42 +393,42 @@ func TestExecutorAutocommit(t *testing.T) { } // autocommit = 1 - _, err = executor.Execute(ctx, nil, "TestExecute", session, "set autocommit=1", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "set autocommit=1", nil) require.NoError(t, err) - _ = testQueryLog(t, logChan, "TestExecute", "SET", "set @@autocommit = 1", 0) + _ = testQueryLog(t, executor, logChan, "TestExecute", "SET", "set @@autocommit = 1", 0) // Setting autocommit=1 commits existing transaction. if got, want := sbclookup.CommitCount.Load(), startCount+1; got != want { t.Errorf("Commit count: %d, want %d", got, want) } - _, err = executor.Execute(ctx, nil, "TestExecute", session, "update main1 set id=1", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "update main1 set id=1", nil) require.NoError(t, err) wantSession = &vtgatepb.Session{Autocommit: true, TargetString: "@primary", FoundRows: 0, RowCount: 1} utils.MustMatch(t, wantSession, session.Session, "session does not match for autocommit=1") - logStats = testQueryLog(t, logChan, "TestExecute", "UPDATE", "update main1 set id = 1", 1) + logStats = testQueryLog(t, executor, logChan, "TestExecute", "UPDATE", "update main1 set id = 1", 1) assert.NotZero(t, logStats.CommitTime, "logstats: expected non-zero CommitTime") assert.NotEqual(t, uint64(0), logStats.RowsAffected, "logstats: expected non-zero RowsAffected") // autocommit = 1, "begin" session.ResetTx() startCount = sbclookup.CommitCount.Load() - _, err = executor.Execute(ctx, nil, "TestExecute", session, "begin", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "begin", nil) require.NoError(t, err) - _ = testQueryLog(t, logChan, "TestExecute", "BEGIN", "begin", 0) + _ = testQueryLog(t, executor, logChan, "TestExecute", "BEGIN", "begin", 0) - _, err = executor.Execute(ctx, nil, "TestExecute", session, "update main1 set id=1", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "update main1 set id=1", nil) require.NoError(t, err) wantSession = &vtgatepb.Session{InTransaction: true, Autocommit: true, TargetString: "@primary", FoundRows: 0, RowCount: 1} - testSession = proto.Clone(session.Session).(*vtgatepb.Session) + testSession = session.Session.CloneVT() testSession.ShardSessions = nil utils.MustMatch(t, wantSession, testSession, "session does not match for autocommit=1") if got, want := sbclookup.CommitCount.Load(), startCount; got != want { t.Errorf("Commit count: %d, want %d", got, want) } - logStats = testQueryLog(t, logChan, "TestExecute", "UPDATE", "update main1 set id = 1", 1) + logStats = testQueryLog(t, executor, logChan, "TestExecute", "UPDATE", "update main1 set id = 1", 1) if logStats.CommitTime != 0 { t.Errorf("logstats: expected zero CommitTime") } @@ -430,7 +436,7 @@ func TestExecutorAutocommit(t *testing.T) { t.Errorf("logstats: expected non-zero RowsAffected") } - _, err = executor.Execute(ctx, nil, "TestExecute", session, "commit", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "commit", nil) require.NoError(t, err) wantSession = &vtgatepb.Session{Autocommit: true, TargetString: "@primary"} if !proto.Equal(session.Session, wantSession) { @@ -439,19 +445,19 @@ func TestExecutorAutocommit(t *testing.T) { if got, want := sbclookup.CommitCount.Load(), startCount+1; got != want { t.Errorf("Commit count: %d, want %d", got, want) } - _ = testQueryLog(t, logChan, "TestExecute", "COMMIT", "commit", 1) + _ = testQueryLog(t, executor, logChan, "TestExecute", "COMMIT", "commit", 1) // transition autocommit from 0 to 1 in the middle of a transaction. startCount = sbclookup.CommitCount.Load() session = NewSafeSession(&vtgatepb.Session{TargetString: "@primary"}) - _, err = executor.Execute(ctx, nil, "TestExecute", session, "begin", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "begin", nil) require.NoError(t, err) - _, err = executor.Execute(ctx, nil, "TestExecute", session, "update main1 set id=1", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "update main1 set id=1", nil) require.NoError(t, err) if got, want := sbclookup.CommitCount.Load(), startCount; got != want { t.Errorf("Commit count: %d, want %d", got, want) } - _, err = executor.Execute(ctx, nil, "TestExecute", session, "set autocommit=1", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "set autocommit=1", nil) require.NoError(t, err) wantSession = &vtgatepb.Session{Autocommit: true, TargetString: "@primary"} if !proto.Equal(session.Session, wantSession) { @@ -463,7 +469,8 @@ func TestExecutorAutocommit(t *testing.T) { } func TestExecutorShowColumns(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) + session := NewSafeSession(&vtgatepb.Session{TargetString: ""}) queries := []string{ @@ -474,7 +481,7 @@ func TestExecutorShowColumns(t *testing.T) { } for _, query := range queries { t.Run(query, func(t *testing.T) { - _, err := executor.Execute(ctx, nil, "TestExecute", session, query, nil) + _, err := executor.Execute(ctx, nil, nil, "TestExecute", session, query, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ @@ -514,32 +521,33 @@ func assertMatchesNoOrder(t *testing.T, expected, got string) { } func TestExecutorShow(t *testing.T) { - executor, _, _, sbclookup := createExecutorEnv() + executor, _, _, sbclookup, ctx := createExecutorEnv(t) + session := NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}) for _, query := range []string{"show vitess_keyspaces", "show keyspaces"} { - qr, err := executor.Execute(ctx, nil, "TestExecute", session, query, nil) + qr, err := executor.Execute(ctx, nil, nil, "TestExecute", session, query, nil) require.NoError(t, err) assertMatchesNoOrder(t, `[[VARCHAR("TestUnsharded")] [VARCHAR("TestMultiCol")] [VARCHAR("TestXBadVSchema")] [VARCHAR("TestXBadSharding")] [VARCHAR("TestExecutor")]]`, fmt.Sprintf("%v", qr.Rows)) } for _, query := range []string{"show databases", "show DATABASES", "show schemas", "show SCHEMAS"} { - qr, err := executor.Execute(ctx, nil, "TestExecute", session, query, nil) + qr, err := executor.Execute(ctx, nil, nil, "TestExecute", session, query, nil) require.NoError(t, err) // Showing default tables (5+4[default]) assertMatchesNoOrder(t, `[[VARCHAR("TestUnsharded")] [VARCHAR("TestMultiCol")] [VARCHAR("TestXBadVSchema")] [VARCHAR("TestXBadSharding")] [VARCHAR("TestExecutor")]] [VARCHAR("information_schema")] [VARCHAR("mysql")] [VARCHAR("sys")] [VARCHAR("performance_schema")]`, fmt.Sprintf("%v", qr.Rows)) } - _, err := executor.Execute(ctx, nil, "TestExecute", session, "show variables", nil) + _, err := executor.Execute(ctx, nil, nil, "TestExecute", session, "show variables", nil) require.NoError(t, err) - _, err = executor.Execute(ctx, nil, "TestExecute", session, "show collation", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "show collation", nil) require.NoError(t, err) - _, err = executor.Execute(ctx, nil, "TestExecute", session, "show collation where `Charset` = 'utf8' and `Collation` = 'utf8_bin'", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "show collation where `Charset` = 'utf8' and `Collation` = 'utf8_bin'", nil) require.NoError(t, err) - _, err = executor.Execute(ctx, nil, "TestExecute", session, "use @primary", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "use @primary", nil) require.NoError(t, err) - _, err = executor.Execute(ctx, nil, "TestExecute", session, "show tables", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "show tables", nil) assert.EqualError(t, err, errNoKeyspace.Error(), "'show tables' should fail without a keyspace") assert.Empty(t, sbclookup.Queries, "sbclookup unexpectedly has queries already") @@ -556,7 +564,7 @@ func TestExecutorShow(t *testing.T) { sbclookup.SetResults([]*sqltypes.Result{showResults}) query := fmt.Sprintf("show tables from %v", KsTestUnsharded) - qr, err := executor.Execute(ctx, nil, "TestExecute", session, query, nil) + qr, err := executor.Execute(ctx, nil, nil, "TestExecute", session, query, nil) require.NoError(t, err) assert.Equal(t, 1, len(sbclookup.Queries), "Tablet should have received one 'show' query. Instead received: %v", sbclookup.Queries) @@ -568,129 +576,130 @@ func TestExecutorShow(t *testing.T) { utils.MustMatch(t, wantqr, qr, fmt.Sprintf("unexpected results running query: %s", query)) wantErrNoTable := "table unknown_table not found" - _, err = executor.Execute(ctx, nil, "TestExecute", session, "show create table unknown_table", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "show create table unknown_table", nil) assert.EqualErrorf(t, err, wantErrNoTable, "Got: %v. Want: %v", wantErrNoTable) // SHOW CREATE table using vschema to find keyspace. - _, err = executor.Execute(ctx, nil, "TestExecute", session, "show create table user_seq", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "show create table user_seq", nil) require.NoError(t, err) lastQuery = sbclookup.Queries[len(sbclookup.Queries)-1].Sql wantQuery := "show create table user_seq" assert.Equal(t, wantQuery, lastQuery, "Got: %v. Want: %v", lastQuery, wantQuery) // SHOW CREATE table with query-provided keyspace - _, err = executor.Execute(ctx, nil, "TestExecute", session, fmt.Sprintf("show create table %v.unknown", KsTestUnsharded), nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, fmt.Sprintf("show create table %v.unknown", KsTestUnsharded), nil) require.NoError(t, err) lastQuery = sbclookup.Queries[len(sbclookup.Queries)-1].Sql wantQuery = "show create table unknown" assert.Equal(t, wantQuery, lastQuery, "Got: %v. Want: %v", lastQuery, wantQuery) // SHOW KEYS with two different syntax - _, err = executor.Execute(ctx, nil, "TestExecute", session, fmt.Sprintf("show keys from %v.unknown", KsTestUnsharded), nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, fmt.Sprintf("show keys from %v.unknown", KsTestUnsharded), nil) require.NoError(t, err) lastQuery = sbclookup.Queries[len(sbclookup.Queries)-1].Sql wantQuery = "show indexes from unknown" assert.Equal(t, wantQuery, lastQuery, "Got: %v. Want: %v", lastQuery, wantQuery) - _, err = executor.Execute(ctx, nil, "TestExecute", session, fmt.Sprintf("show keys from unknown from %v", KsTestUnsharded), nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, fmt.Sprintf("show keys from unknown from %v", KsTestUnsharded), nil) require.NoError(t, err) lastQuery = sbclookup.Queries[len(sbclookup.Queries)-1].Sql assert.Equal(t, wantQuery, lastQuery, "Got: %v. Want: %v", lastQuery, wantQuery) // SHOW INDEX with two different syntax - _, err = executor.Execute(ctx, nil, "TestExecute", session, fmt.Sprintf("show index from %v.unknown", KsTestUnsharded), nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, fmt.Sprintf("show index from %v.unknown", KsTestUnsharded), nil) require.NoError(t, err) lastQuery = sbclookup.Queries[len(sbclookup.Queries)-1].Sql assert.Equal(t, wantQuery, lastQuery, "Got: %v. Want: %v", lastQuery, wantQuery) - _, err = executor.Execute(ctx, nil, "TestExecute", session, fmt.Sprintf("show index from unknown from %v", KsTestUnsharded), nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, fmt.Sprintf("show index from unknown from %v", KsTestUnsharded), nil) require.NoError(t, err) lastQuery = sbclookup.Queries[len(sbclookup.Queries)-1].Sql assert.Equal(t, wantQuery, lastQuery, "Got: %v. Want: %v", lastQuery, wantQuery) // SHOW INDEXES with two different syntax - _, err = executor.Execute(ctx, nil, "TestExecute", session, fmt.Sprintf("show indexes from %v.unknown", KsTestUnsharded), nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, fmt.Sprintf("show indexes from %v.unknown", KsTestUnsharded), nil) require.NoError(t, err) lastQuery = sbclookup.Queries[len(sbclookup.Queries)-1].Sql assert.Equal(t, wantQuery, lastQuery, "Got: %v. Want: %v", lastQuery, wantQuery) - _, err = executor.Execute(ctx, nil, "TestExecute", session, fmt.Sprintf("show indexes from unknown from %v", KsTestUnsharded), nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, fmt.Sprintf("show indexes from unknown from %v", KsTestUnsharded), nil) require.NoError(t, err) lastQuery = sbclookup.Queries[len(sbclookup.Queries)-1].Sql assert.Equal(t, wantQuery, lastQuery, "Got: %v. Want: %v", lastQuery, wantQuery) // SHOW EXTENDED {INDEX | INDEXES | KEYS} - _, err = executor.Execute(ctx, nil, "TestExecute", session, fmt.Sprintf("show extended index from unknown from %v", KsTestUnsharded), nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, fmt.Sprintf("show extended index from unknown from %v", KsTestUnsharded), nil) require.NoError(t, err) lastQuery = sbclookup.Queries[len(sbclookup.Queries)-1].Sql assert.Equal(t, wantQuery, lastQuery, "Got: %v. Want: %v", lastQuery, wantQuery) - _, err = executor.Execute(ctx, nil, "TestExecute", session, fmt.Sprintf("show extended indexes from unknown from %v", KsTestUnsharded), nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, fmt.Sprintf("show extended indexes from unknown from %v", KsTestUnsharded), nil) require.NoError(t, err) lastQuery = sbclookup.Queries[len(sbclookup.Queries)-1].Sql assert.Equal(t, wantQuery, lastQuery, "Got: %v. Want: %v", lastQuery, wantQuery) - _, err = executor.Execute(ctx, nil, "TestExecute", session, fmt.Sprintf("show extended keys from unknown from %v", KsTestUnsharded), nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, fmt.Sprintf("show extended keys from unknown from %v", KsTestUnsharded), nil) require.NoError(t, err) lastQuery = sbclookup.Queries[len(sbclookup.Queries)-1].Sql assert.Equal(t, wantQuery, lastQuery, "Got: %v. Want: %v", lastQuery, wantQuery) // Set desitation keyspace in session session.TargetString = KsTestUnsharded - _, err = executor.Execute(ctx, nil, "TestExecute", session, "show create table unknown", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "show create table unknown", nil) require.NoError(t, err) - _, err = executor.Execute(ctx, nil, "TestExecute", session, "show full columns from table1", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "show full columns from table1", nil) require.NoError(t, err) // Reset target string so other tests dont fail. session.TargetString = "@primary" - _, err = executor.Execute(ctx, nil, "TestExecute", session, fmt.Sprintf("show full columns from unknown from %v", KsTestUnsharded), nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, fmt.Sprintf("show full columns from unknown from %v", KsTestUnsharded), nil) require.NoError(t, err) - for _, query := range []string{"show charset", "show character set"} { - qr, err := executor.Execute(ctx, nil, "TestExecute", session, query, nil) + for _, query := range []string{"show charset like 'utf8%'", "show character set like 'utf8%'"} { + qr, err := executor.Execute(ctx, nil, nil, "TestExecute", session, query, nil) require.NoError(t, err) wantqr := &sqltypes.Result{ - Fields: append(buildVarCharFields("Charset", "Description", "Default collation"), &querypb.Field{Name: "Maxlen", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG | querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_UNSIGNED_FLAG | querypb.MySqlFlag_NO_DEFAULT_VALUE_FLAG)}), + Fields: append(buildVarCharFields("Charset", "Description", "Default collation"), &querypb.Field{Name: "Maxlen", Type: sqltypes.Uint32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG | querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_UNSIGNED_FLAG | querypb.MySqlFlag_NO_DEFAULT_VALUE_FLAG)}), Rows: [][]sqltypes.Value{ append(buildVarCharRow( - "utf8", + "utf8mb3", "UTF-8 Unicode", - "utf8_general_ci"), sqltypes.NewInt32(3)), + "utf8mb3_general_ci"), + sqltypes.NewUint32(3)), append(buildVarCharRow( "utf8mb4", "UTF-8 Unicode", - "utf8mb4_general_ci"), - sqltypes.NewInt32(4)), + collations.Local().LookupName(collations.Default())), + sqltypes.NewUint32(4)), }, } utils.MustMatch(t, wantqr, qr, query) } - for _, query := range []string{"show charset like '%foo'", "show character set like 'foo%'", "show charset like 'foo%'", "show character set where foo like 'utf8'", "show character set where charset like '%foo'", "show charset where charset = '%foo'"} { - qr, err := executor.Execute(ctx, nil, "TestExecute", session, query, nil) + for _, query := range []string{"show charset like '%foo'", "show character set like 'foo%'", "show charset like 'foo%'", "show character set where charset like '%foo'", "show charset where charset = '%foo'"} { + qr, err := executor.Execute(ctx, nil, nil, "TestExecute", session, query, nil) require.NoError(t, err) wantqr := &sqltypes.Result{ - Fields: append(buildVarCharFields("Charset", "Description", "Default collation"), &querypb.Field{Name: "Maxlen", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG | querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_UNSIGNED_FLAG | querypb.MySqlFlag_NO_DEFAULT_VALUE_FLAG)}), - Rows: [][]sqltypes.Value{}, + Fields: append(buildVarCharFields("Charset", "Description", "Default collation"), &querypb.Field{Name: "Maxlen", Type: sqltypes.Uint32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG | querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_UNSIGNED_FLAG | querypb.MySqlFlag_NO_DEFAULT_VALUE_FLAG)}), RowsAffected: 0, } utils.MustMatch(t, wantqr, qr, query) } - for _, query := range []string{"show charset like 'utf8'", "show character set like 'utf8'", "show charset where charset = 'utf8'", "show character set where charset = 'utf8'"} { - qr, err := executor.Execute(ctx, nil, "TestExecute", session, query, nil) + for _, query := range []string{"show charset like 'utf8mb3'", "show character set like 'utf8mb3'", "show charset where charset = 'utf8mb3'", "show character set where charset = 'utf8mb3'"} { + qr, err := executor.Execute(ctx, nil, nil, "TestExecute", session, query, nil) require.NoError(t, err) wantqr := &sqltypes.Result{ - Fields: append(buildVarCharFields("Charset", "Description", "Default collation"), &querypb.Field{Name: "Maxlen", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG | querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_UNSIGNED_FLAG | querypb.MySqlFlag_NO_DEFAULT_VALUE_FLAG)}), + Fields: append(buildVarCharFields("Charset", "Description", "Default collation"), &querypb.Field{Name: "Maxlen", Type: sqltypes.Uint32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG | querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_UNSIGNED_FLAG | querypb.MySqlFlag_NO_DEFAULT_VALUE_FLAG)}), Rows: [][]sqltypes.Value{ append(buildVarCharRow( - "utf8", + "utf8mb3", "UTF-8 Unicode", - "utf8_general_ci"), sqltypes.NewInt32(3)), + "utf8mb3_general_ci"), + sqltypes.NewUint32(3)), }, } @@ -698,23 +707,28 @@ func TestExecutorShow(t *testing.T) { } for _, query := range []string{"show charset like 'utf8mb4'", "show character set like 'utf8mb4'", "show charset where charset = 'utf8mb4'", "show character set where charset = 'utf8mb4'"} { - qr, err := executor.Execute(ctx, nil, "TestExecute", session, query, nil) + qr, err := executor.Execute(ctx, nil, nil, "TestExecute", session, query, nil) require.NoError(t, err) wantqr := &sqltypes.Result{ - Fields: append(buildVarCharFields("Charset", "Description", "Default collation"), &querypb.Field{Name: "Maxlen", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG | querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_UNSIGNED_FLAG | querypb.MySqlFlag_NO_DEFAULT_VALUE_FLAG)}), + Fields: append(buildVarCharFields("Charset", "Description", "Default collation"), &querypb.Field{Name: "Maxlen", Type: sqltypes.Uint32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG | querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_UNSIGNED_FLAG | querypb.MySqlFlag_NO_DEFAULT_VALUE_FLAG)}), Rows: [][]sqltypes.Value{ append(buildVarCharRow( "utf8mb4", "UTF-8 Unicode", - "utf8mb4_general_ci"), - sqltypes.NewInt32(4)), + collations.Local().LookupName(collations.Default())), + sqltypes.NewUint32(4)), }, } utils.MustMatch(t, wantqr, qr, query) } + for _, query := range []string{"show character set where foo like '%foo'"} { + _, err := executor.Execute(ctx, nil, nil, "TestExecute", session, query, nil) + require.Error(t, err) + } + query = "show engines" - qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, nil, "TestExecute", session, query, nil) require.NoError(t, err) wantqr = &sqltypes.Result{ Fields: buildVarCharFields("Engine", "Support", "Comment", "Transactions", "XA", "Savepoints"), @@ -731,7 +745,7 @@ func TestExecutorShow(t *testing.T) { utils.MustMatch(t, wantqr, qr, query) query = "show plugins" - qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, nil, "TestExecute", session, query, nil) require.NoError(t, err) wantqr = &sqltypes.Result{ Fields: buildVarCharFields("Name", "Status", "Type", "Library", "License"), @@ -747,7 +761,7 @@ func TestExecutorShow(t *testing.T) { utils.MustMatch(t, wantqr, qr, query) for _, sql := range []string{"show session status", "show session status like 'Ssl_cipher'"} { - qr, err = executor.Execute(ctx, nil, "TestExecute", session, sql, nil) + qr, err = executor.Execute(ctx, nil, nil, "TestExecute", session, sql, nil) require.NoError(t, err) wantqr = &sqltypes.Result{ Fields: []*querypb.Field{ @@ -763,11 +777,11 @@ func TestExecutorShow(t *testing.T) { } // Test SHOW FULL COLUMNS FROM where query has a qualifier - _, err = executor.Execute(ctx, nil, "TestExecute", session, fmt.Sprintf("show full columns from %v.table1", KsTestUnsharded), nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, fmt.Sprintf("show full columns from %v.table1", KsTestUnsharded), nil) require.NoError(t, err) query = "show vitess_shards" - qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, nil, "TestExecute", session, query, nil) require.NoError(t, err) // Just test for first & last. @@ -782,7 +796,7 @@ func TestExecutorShow(t *testing.T) { utils.MustMatch(t, wantqr, qr, query) query = "show vitess_shards like 'TestExecutor/%'" - qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, nil, "TestExecute", session, query, nil) require.NoError(t, err) // Just test for first & last. @@ -797,7 +811,7 @@ func TestExecutorShow(t *testing.T) { utils.MustMatch(t, wantqr, qr, query) query = "show vitess_shards like 'TestExec%/%'" - qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, nil, "TestExecute", session, query, nil) require.NoError(t, err) // Just test for first & last. @@ -812,7 +826,7 @@ func TestExecutorShow(t *testing.T) { utils.MustMatch(t, wantqr, qr, query) query = "show vitess_replication_status" - qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, nil, "TestExecute", session, query, nil) require.NoError(t, err) qr.Rows = [][]sqltypes.Value{} wantqr = &sqltypes.Result{ @@ -821,7 +835,7 @@ func TestExecutorShow(t *testing.T) { } utils.MustMatch(t, wantqr, qr, query) query = "show vitess_replication_status like 'x'" - qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, nil, "TestExecute", session, query, nil) require.NoError(t, err) qr.Rows = [][]sqltypes.Value{} wantqr = &sqltypes.Result{ @@ -831,7 +845,7 @@ func TestExecutorShow(t *testing.T) { utils.MustMatch(t, wantqr, qr, query) query = "show vitess_tablets" - qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, nil, "TestExecute", session, query, nil) require.NoError(t, err) // Just test for first & last. qr.Rows = [][]sqltypes.Value{qr.Rows[0], qr.Rows[len(qr.Rows)-1]} @@ -839,13 +853,13 @@ func TestExecutorShow(t *testing.T) { Fields: buildVarCharFields("Cell", "Keyspace", "Shard", "TabletType", "State", "Alias", "Hostname", "PrimaryTermStartTime"), Rows: [][]sqltypes.Value{ buildVarCharRow("aa", "TestExecutor", "-20", "PRIMARY", "SERVING", "aa-0000000001", "-20", "1970-01-01T00:00:01Z"), - buildVarCharRow("aa", "TestXBadVSchema", "-20", "PRIMARY", "SERVING", "aa-0000000009", "random", "1970-01-01T00:00:01Z"), + buildVarCharRow("aa", "TestUnsharded", "0", "REPLICA", "SERVING", "aa-0000000010", "2", "1970-01-01T00:00:01Z"), }, } utils.MustMatch(t, wantqr, qr, query) query = "show vitess_tablets like 'x'" - qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, nil, "TestExecute", session, query, nil) require.NoError(t, err) wantqr = &sqltypes.Result{ Fields: buildVarCharFields("Cell", "Keyspace", "Shard", "TabletType", "State", "Alias", "Hostname", "PrimaryTermStartTime"), @@ -854,7 +868,7 @@ func TestExecutorShow(t *testing.T) { utils.MustMatch(t, wantqr, qr, fmt.Sprintf("%q should be empty", query)) query = "show vitess_tablets like '-20%'" - qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, nil, "TestExecute", session, query, nil) require.NoError(t, err) wantqr = &sqltypes.Result{ Fields: buildVarCharFields("Cell", "Keyspace", "Shard", "TabletType", "State", "Alias", "Hostname", "PrimaryTermStartTime"), @@ -865,7 +879,7 @@ func TestExecutorShow(t *testing.T) { utils.MustMatch(t, wantqr, qr, query) query = "show vschema vindexes" - qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, nil, "TestExecute", session, query, nil) require.NoError(t, err) wantqr = &sqltypes.Result{ Fields: buildVarCharFields("Keyspace", "Name", "Type", "Params", "Owner"), @@ -895,7 +909,7 @@ func TestExecutorShow(t *testing.T) { utils.MustMatch(t, wantqr, qr, query) query = "show vschema vindexes on TestExecutor.user" - qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, nil, "TestExecute", session, query, nil) require.NoError(t, err) wantqr = &sqltypes.Result{ Fields: buildVarCharFields("Columns", "Name", "Type", "Params", "Owner"), @@ -907,18 +921,18 @@ func TestExecutorShow(t *testing.T) { utils.MustMatch(t, wantqr, qr, query) query = "show vschema vindexes on user" - _, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, query, nil) wantErr := errNoKeyspace.Error() assert.EqualError(t, err, wantErr, query) query = "show vschema vindexes on TestExecutor.garbage" - _, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, query, nil) wantErr = "VT05005: table 'garbage' does not exist in keyspace 'TestExecutor'" assert.EqualError(t, err, wantErr, query) query = "show vschema vindexes on user" session.TargetString = "TestExecutor" - qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, nil, "TestExecute", session, query, nil) require.NoError(t, err) wantqr = &sqltypes.Result{ Fields: buildVarCharFields("Columns", "Name", "Type", "Params", "Owner"), @@ -931,7 +945,7 @@ func TestExecutorShow(t *testing.T) { query = "show vschema vindexes on user2" session.TargetString = "TestExecutor" - qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, nil, "TestExecute", session, query, nil) require.NoError(t, err) wantqr = &sqltypes.Result{ Fields: buildVarCharFields("Columns", "Name", "Type", "Params", "Owner"), @@ -943,12 +957,12 @@ func TestExecutorShow(t *testing.T) { utils.MustMatch(t, wantqr, qr, query) query = "show vschema vindexes on garbage" - _, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, query, nil) wantErr = "VT05005: table 'garbage' does not exist in keyspace 'TestExecutor'" assert.EqualError(t, err, wantErr, query) query = "show warnings" - qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, nil, "TestExecute", session, query, nil) require.NoError(t, err) wantqr = &sqltypes.Result{ Fields: []*querypb.Field{ @@ -962,7 +976,7 @@ func TestExecutorShow(t *testing.T) { query = "show warnings" session.Warnings = []*querypb.QueryWarning{} - qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, nil, "TestExecute", session, query, nil) require.NoError(t, err) wantqr = &sqltypes.Result{ Fields: []*querypb.Field{ @@ -976,10 +990,10 @@ func TestExecutorShow(t *testing.T) { query = "show warnings" session.Warnings = []*querypb.QueryWarning{ - {Code: uint32(mysql.ERBadTable), Message: "bad table"}, - {Code: uint32(mysql.EROutOfResources), Message: "ks/-40: query timed out"}, + {Code: uint32(sqlerror.ERBadTable), Message: "bad table"}, + {Code: uint32(sqlerror.EROutOfResources), Message: "ks/-40: query timed out"}, } - qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, nil, "TestExecute", session, query, nil) require.NoError(t, err) wantqr = &sqltypes.Result{ Fields: []*querypb.Field{ @@ -989,16 +1003,16 @@ func TestExecutorShow(t *testing.T) { }, Rows: [][]sqltypes.Value{ - {sqltypes.NewVarChar("Warning"), sqltypes.NewUint32(uint32(mysql.ERBadTable)), sqltypes.NewVarChar("bad table")}, - {sqltypes.NewVarChar("Warning"), sqltypes.NewUint32(uint32(mysql.EROutOfResources)), sqltypes.NewVarChar("ks/-40: query timed out")}, + {sqltypes.NewVarChar("Warning"), sqltypes.NewUint32(uint32(sqlerror.ERBadTable)), sqltypes.NewVarChar("bad table")}, + {sqltypes.NewVarChar("Warning"), sqltypes.NewUint32(uint32(sqlerror.EROutOfResources)), sqltypes.NewVarChar("ks/-40: query timed out")}, }, } utils.MustMatch(t, wantqr, qr, query) // Make sure it still works when one of the keyspaces is in a bad state - getSandbox("TestExecutor").SrvKeyspaceMustFail++ + getSandbox(KsTestSharded).SrvKeyspaceMustFail++ query = "show vitess_shards" - qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, nil, "TestExecute", session, query, nil) require.NoError(t, err) // Just test for first & last. qr.Rows = [][]sqltypes.Value{qr.Rows[0], qr.Rows[len(qr.Rows)-1]} @@ -1013,7 +1027,7 @@ func TestExecutorShow(t *testing.T) { query = "show vschema tables" session = NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded}) - qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, nil, "TestExecute", session, query, nil) require.NoError(t, err) wantqr = &sqltypes.Result{ Fields: buildVarCharFields("Tables"), @@ -1039,34 +1053,35 @@ func TestExecutorShow(t *testing.T) { query = "show vschema tables" session = NewSafeSession(&vtgatepb.Session{}) - _, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, query, nil) want = errNoKeyspace.Error() assert.EqualError(t, err, want, query) query = "show 10" - _, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, query, nil) want = "syntax error at position 8 near '10'" assert.EqualError(t, err, want, query) query = "show vschema tables" session = NewSafeSession(&vtgatepb.Session{TargetString: "no_such_keyspace"}) - _, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, query, nil) want = "VT05003: unknown database 'no_such_keyspace' in vschema" assert.EqualError(t, err, want, query) query = "show vitess_migrations" - _, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, query, nil) want = "VT05003: unknown database 'no_such_keyspace' in vschema" assert.EqualError(t, err, want, query) query = "show vitess_migrations from ks like '9748c3b7_7fdb_11eb_ac2c_f875a4d24e90'" - _, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, query, nil) want = "VT05003: unknown database 'ks' in vschema" assert.EqualError(t, err, want, query) } func TestExecutorShowTargeted(t *testing.T) { - executor, _, sbc2, _ := createExecutorEnv() + executor, _, sbc2, _, ctx := createExecutorEnv(t) + session := NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor/40-60"}) queries := []string{ @@ -1083,7 +1098,7 @@ func TestExecutorShowTargeted(t *testing.T) { } for _, sql := range queries { - _, err := executor.Execute(ctx, nil, "TestExecutorShowTargeted", session, sql, nil) + _, err := executor.Execute(ctx, nil, nil, "TestExecutorShowTargeted", session, sql, nil) require.NoError(t, err) assert.NotZero(t, len(sbc2.Queries), "Tablet should have received 'show' query") lastQuery := sbc2.Queries[len(sbc2.Queries)-1].Sql @@ -1092,7 +1107,8 @@ func TestExecutorShowTargeted(t *testing.T) { } func TestExecutorUse(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) + session := NewSafeSession(&vtgatepb.Session{Autocommit: true, TargetString: "@primary"}) stmts := []string{ @@ -1104,7 +1120,7 @@ func TestExecutorUse(t *testing.T) { "TestExecutor:-80@primary", } for i, stmt := range stmts { - _, err := executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) + _, err := executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) if err != nil { t.Error(err) } @@ -1112,18 +1128,18 @@ func TestExecutorUse(t *testing.T) { utils.MustMatch(t, wantSession, session.Session, "session does not match") } - _, err := executor.Execute(ctx, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{}), "use 1", nil) + _, err := executor.Execute(ctx, nil, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{}), "use 1", nil) wantErr := "syntax error at position 6 near '1'" if err == nil || err.Error() != wantErr { t.Errorf("got: %v, want %v", err, wantErr) } - _, err = executor.Execute(ctx, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{}), "use UnexistentKeyspace", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{}), "use UnexistentKeyspace", nil) require.EqualError(t, err, "VT05003: unknown database 'UnexistentKeyspace' in vschema") } func TestExecutorComment(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) stmts := []string{ "/*! SET autocommit=1*/", @@ -1132,7 +1148,7 @@ func TestExecutorComment(t *testing.T) { wantResult := &sqltypes.Result{} for _, stmt := range stmts { - gotResult, err := executor.Execute(ctx, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded}), stmt, nil) + gotResult, err := executor.Execute(ctx, nil, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded}), stmt, nil) if err != nil { t.Error(err) } @@ -1143,7 +1159,7 @@ func TestExecutorComment(t *testing.T) { } func TestExecutorOther(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) type cnts struct { Sbc1Cnt int64 @@ -1201,7 +1217,6 @@ func TestExecutorOther(t *testing.T) { } stmts := []string{ - "analyze table t1", "describe select * from t1", "explain select * from t1", "repair table t1", @@ -1215,7 +1230,7 @@ func TestExecutorOther(t *testing.T) { sbc2.ExecCount.Store(0) sbclookup.ExecCount.Store(0) - _, err := executor.Execute(ctx, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil) + _, err := executor.Execute(ctx, nil, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil) if tc.hasNoKeyspaceErr { assert.Error(t, err, errNoKeyspace) } else if tc.hasDestinationShardErr { @@ -1235,10 +1250,10 @@ func TestExecutorOther(t *testing.T) { } func TestExecutorDDL(t *testing.T) { - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) type cnts struct { Sbc1Cnt int64 @@ -1309,7 +1324,7 @@ func TestExecutorDDL(t *testing.T) { sbc2.ExecCount.Store(0) sbclookup.ExecCount.Store(0) stmtType := "DDL" - _, err := executor.Execute(ctx, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil) + _, err := executor.Execute(ctx, nil, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil) if tc.hasNoKeyspaceErr { require.EqualError(t, err, errNoKeyspace.Error(), "expect query to fail: %q", stmt) stmtType = "" // For error case, plan is not generated to query log will not contain any stmtType. @@ -1326,7 +1341,7 @@ func TestExecutorDDL(t *testing.T) { t.Errorf("stmt: %s\ntc: %+v\n-want,+got:\n%s", stmt, tc, diff) } - testQueryLog(t, logChan, "TestExecute", stmtType, stmt, tc.shardQueryCnt) + testQueryLog(t, executor, logChan, "TestExecute", stmtType, stmt, tc.shardQueryCnt) } } @@ -1347,19 +1362,19 @@ func TestExecutorDDL(t *testing.T) { sbc1.ExecCount.Store(0) sbc2.ExecCount.Store(0) sbclookup.ExecCount.Store(0) - _, err := executor.Execute(ctx, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: ""}), stmt.input, nil) + _, err := executor.Execute(ctx, nil, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: ""}), stmt.input, nil) if stmt.hasErr { require.EqualError(t, err, errNoKeyspace.Error(), "expect query to fail") - testQueryLog(t, logChan, "TestExecute", "", stmt.input, 0) + testQueryLog(t, executor, logChan, "TestExecute", "", stmt.input, 0) } else { require.NoError(t, err) - testQueryLog(t, logChan, "TestExecute", "DDL", stmt.input, 8) + testQueryLog(t, executor, logChan, "TestExecute", "DDL", stmt.input, 8) } } } func TestExecutorDDLFk(t *testing.T) { - executor, _, _, sbc := createExecutorEnv() + executor, _, _, sbc, ctx := createExecutorEnv(t) mName := "TestExecutorDDLFk" stmts := []string{ @@ -1372,7 +1387,7 @@ func TestExecutorDDLFk(t *testing.T) { t.Run(stmt+fkMode, func(t *testing.T) { sbc.ExecCount.Store(0) foreignKeyMode = fkMode - _, err := executor.Execute(ctx, nil, mName, NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded}), stmt, nil) + _, err := executor.Execute(ctx, nil, nil, mName, NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded}), stmt, nil) if fkMode == "allow" { require.NoError(t, err) require.EqualValues(t, 1, sbc.ExecCount.Load()) @@ -1390,7 +1405,8 @@ func TestExecutorAlterVSchemaKeyspace(t *testing.T) { defer func() { vschemaacl.AuthorizedDDLUsers = "" }() - executor, _, _, _ := createExecutorEnv() + + executor, _, _, _, ctx := createExecutorEnv(t) session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary", Autocommit: true}) vschemaUpdates := make(chan *vschemapb.SrvVSchema, 2) @@ -1406,7 +1422,7 @@ func TestExecutorAlterVSchemaKeyspace(t *testing.T) { } stmt := "alter vschema create vindex TestExecutor.test_vindex using hash" - _, err := executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) + _, err := executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) _, vindex := waitForVindex(t, "TestExecutor", "test_vindex", vschemaUpdates, executor) @@ -1418,7 +1434,7 @@ func TestExecutorCreateVindexDDL(t *testing.T) { defer func() { vschemaacl.AuthorizedDDLUsers = "" }() - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) ks := "TestExecutor" vschemaUpdates := make(chan *vschemapb.SrvVSchema, 4) @@ -1435,7 +1451,7 @@ func TestExecutorCreateVindexDDL(t *testing.T) { session := NewSafeSession(&vtgatepb.Session{TargetString: ks}) stmt := "alter vschema create vindex test_vindex using hash" - _, err := executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) + _, err := executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) _, vindex := waitForVindex(t, ks, "test_vindex", vschemaUpdates, executor) @@ -1443,7 +1459,7 @@ func TestExecutorCreateVindexDDL(t *testing.T) { t.Errorf("updated vschema did not contain test_vindex") } - _, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) wantErr := "vindex test_vindex already exists in keyspace TestExecutor" if err == nil || err.Error() != wantErr { t.Errorf("create duplicate vindex: %v, want %s", err, wantErr) @@ -1459,7 +1475,7 @@ func TestExecutorCreateVindexDDL(t *testing.T) { // ksNew := "test_new_keyspace" session = NewSafeSession(&vtgatepb.Session{TargetString: ks}) stmt = "alter vschema create vindex test_vindex2 using hash" - _, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) if err != nil { t.Fatalf("error in %s: %v", stmt, err) } @@ -1488,7 +1504,7 @@ func TestExecutorAddDropVschemaTableDDL(t *testing.T) { defer func() { vschemaacl.AuthorizedDDLUsers = "" }() - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) ks := KsTestUnsharded vschemaUpdates := make(chan *vschemapb.SrvVSchema, 4) @@ -1510,19 +1526,19 @@ func TestExecutorAddDropVschemaTableDDL(t *testing.T) { session := NewSafeSession(&vtgatepb.Session{TargetString: ks}) stmt := "alter vschema add table test_table" - _, err := executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) + _, err := executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) _ = waitForVschemaTables(t, ks, append([]string{"test_table"}, vschemaTables...), executor) stmt = "alter vschema add table test_table2" - _, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) _ = waitForVschemaTables(t, ks, append([]string{"test_table", "test_table2"}, vschemaTables...), executor) // Should fail adding a table on a sharded keyspace session = NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}) stmt = "alter vschema add table test_table" - _, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) require.EqualError(t, err, "add vschema table: unsupported on sharded keyspace TestExecutor") // No queries should have gone to any tablets @@ -1536,7 +1552,8 @@ func TestExecutorAddDropVschemaTableDDL(t *testing.T) { } func TestExecutorVindexDDLACL(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) + ks := "TestExecutor" session := NewSafeSession(&vtgatepb.Session{TargetString: ks}) @@ -1545,21 +1562,21 @@ func TestExecutorVindexDDLACL(t *testing.T) { // test that by default no users can perform the operation stmt := "alter vschema create vindex test_hash using hash" - _, err := executor.Execute(ctxRedUser, nil, "TestExecute", session, stmt, nil) + _, err := executor.Execute(ctxRedUser, nil, nil, "TestExecute", session, stmt, nil) require.EqualError(t, err, `User 'redUser' is not authorized to perform vschema operations`) - _, err = executor.Execute(ctxBlueUser, nil, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctxBlueUser, nil, nil, "TestExecute", session, stmt, nil) require.EqualError(t, err, `User 'blueUser' is not authorized to perform vschema operations`) // test when all users are enabled vschemaacl.AuthorizedDDLUsers = "%" vschemaacl.Init() - _, err = executor.Execute(ctxRedUser, nil, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctxRedUser, nil, nil, "TestExecute", session, stmt, nil) if err != nil { t.Errorf("unexpected error '%v'", err) } stmt = "alter vschema create vindex test_hash2 using hash" - _, err = executor.Execute(ctxBlueUser, nil, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctxBlueUser, nil, nil, "TestExecute", session, stmt, nil) if err != nil { t.Errorf("unexpected error '%v'", err) } @@ -1567,11 +1584,11 @@ func TestExecutorVindexDDLACL(t *testing.T) { // test when only one user is enabled vschemaacl.AuthorizedDDLUsers = "orangeUser, blueUser, greenUser" vschemaacl.Init() - _, err = executor.Execute(ctxRedUser, nil, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctxRedUser, nil, nil, "TestExecute", session, stmt, nil) require.EqualError(t, err, `User 'redUser' is not authorized to perform vschema operations`) stmt = "alter vschema create vindex test_hash3 using hash" - _, err = executor.Execute(ctxBlueUser, nil, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctxBlueUser, nil, nil, "TestExecute", session, stmt, nil) if err != nil { t.Errorf("unexpected error '%v'", err) } @@ -1581,16 +1598,16 @@ func TestExecutorVindexDDLACL(t *testing.T) { } func TestExecutorUnrecognized(t *testing.T) { - executor, _, _, _ := createExecutorEnv() - _, err := executor.Execute(ctx, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{}), "invalid statement", nil) + + executor, _, _, _, ctx := createExecutorEnv(t) + _, err := executor.Execute(ctx, nil, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{}), "invalid statement", nil) require.Error(t, err, "unrecognized statement: invalid statement'") } // TestVSchemaStats makes sure the building and displaying of the // VSchemaStats works. func TestVSchemaStats(t *testing.T) { - r, _, _, _ := createExecutorEnv() - + r, _, _, _, _ := createExecutorEnv(t) stats := r.VSchemaStats() templ := template.New("") @@ -1612,18 +1629,19 @@ func TestVSchemaStats(t *testing.T) { var pv = querypb.ExecuteOptions_Gen4 func TestGetPlanUnnormalized(t *testing.T) { - r, _, _, _ := createExecutorEnv() + r, _, _, _, ctx := createExecutorEnv(t) + emptyvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) unshardedvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) query1 := "select * from music_user_map where id = 1" - plan1, logStats1 := getPlanCached(t, r, emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) + plan1, logStats1 := getPlanCached(t, ctx, r, emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) wantSQL := query1 + " /* comment */" if logStats1.SQL != wantSQL { t.Errorf("logstats sql want \"%s\" got \"%s\"", wantSQL, logStats1.SQL) } - plan2, logStats2 := getPlanCached(t, r, emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) + plan2, logStats2 := getPlanCached(t, ctx, r, emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) if plan1 != plan2 { t.Errorf("getPlan(query1): plans must be equal: %p %p", plan1, plan2) } @@ -1631,14 +1649,14 @@ func TestGetPlanUnnormalized(t *testing.T) { if logStats2.SQL != wantSQL { t.Errorf("logstats sql want \"%s\" got \"%s\"", wantSQL, logStats2.SQL) } - plan3, logStats3 := getPlanCached(t, r, unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) + plan3, logStats3 := getPlanCached(t, ctx, r, unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) if plan1 == plan3 { t.Errorf("getPlan(query1, ks): plans must not be equal: %p %p", plan1, plan3) } if logStats3.SQL != wantSQL { t.Errorf("logstats sql want \"%s\" got \"%s\"", wantSQL, logStats3.SQL) } - plan4, logStats4 := getPlanCached(t, r, unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) + plan4, logStats4 := getPlanCached(t, ctx, r, unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) if plan3 != plan4 { t.Errorf("getPlan(query1, ks): plans must be equal: %p %p", plan3, plan4) } @@ -1649,13 +1667,9 @@ func TestGetPlanUnnormalized(t *testing.T) { } } -func assertCacheSize(t *testing.T, c cache.Cache, expected int) { +func assertCacheSize(t *testing.T, c *PlanCache, expected int) { t.Helper() - var size int - c.ForEach(func(_ any) bool { - size++ - return true - }) + size := c.Len() if size != expected { t.Errorf("getPlan() expected cache to have size %d, but got: %d", expected, size) } @@ -1666,8 +1680,7 @@ func assertCacheContains(t *testing.T, e *Executor, vc *vcursorImpl, sql string) var plan *engine.Plan if vc == nil { - e.plans.ForEach(func(x any) bool { - p := x.(*engine.Plan) + e.ForEachPlan(func(p *engine.Plan) bool { if p.Original == sql { plan = p } @@ -1675,15 +1688,13 @@ func assertCacheContains(t *testing.T, e *Executor, vc *vcursorImpl, sql string) }) } else { h := e.hashPlan(context.Background(), vc, sql) - if p, ok := e.plans.Get(h); ok { - plan = p.(*engine.Plan) - } + plan, _ = e.plans.Get(h, e.epoch.Load()) } require.Truef(t, plan != nil, "plan not found for query: %s", sql) return plan } -func getPlanCached(t *testing.T, e *Executor, vcursor *vcursorImpl, sql string, comments sqlparser.MarginComments, bindVars map[string]*querypb.BindVariable, skipQueryPlanCache bool) (*engine.Plan, *logstats.LogStats) { +func getPlanCached(t *testing.T, ctx context.Context, e *Executor, vcursor *vcursorImpl, sql string, comments sqlparser.MarginComments, bindVars map[string]*querypb.BindVariable, skipQueryPlanCache bool) (*engine.Plan, *logstats.LogStats) { logStats := logstats.NewLogStats(ctx, "Test", "", "", nil) vcursor.safeSession = &SafeSession{ Session: &vtgatepb.Session{ @@ -1696,95 +1707,106 @@ func getPlanCached(t *testing.T, e *Executor, vcursor *vcursorImpl, sql string, require.NoError(t, err) // Wait for cache to settle - e.plans.Wait() + time.Sleep(100 * time.Millisecond) return plan, logStats } func TestGetPlanCacheUnnormalized(t *testing.T) { - r, _, _, _ := createExecutorEnv() - emptyvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) - query1 := "select * from music_user_map where id = 1" - - _, logStats1 := getPlanCached(t, r, emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, true) - assertCacheSize(t, r.plans, 0) - - wantSQL := query1 + " /* comment */" - if logStats1.SQL != wantSQL { - t.Errorf("logstats sql want \"%s\" got \"%s\"", wantSQL, logStats1.SQL) - } + t.Run("Cache", func(t *testing.T) { + r, _, _, _, ctx := createExecutorEnv(t) - _, logStats2 := getPlanCached(t, r, emptyvc, query1, makeComments(" /* comment 2 */"), map[string]*querypb.BindVariable{}, false) - assertCacheSize(t, r.plans, 1) + emptyvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) + query1 := "select * from music_user_map where id = 1" - wantSQL = query1 + " /* comment 2 */" - if logStats2.SQL != wantSQL { - t.Errorf("logstats sql want \"%s\" got \"%s\"", wantSQL, logStats2.SQL) - } + _, logStats1 := getPlanCached(t, ctx, r, emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, true) + assertCacheSize(t, r.plans, 0) - // Skip cache using directive - r, _, _, _ = createExecutorEnv() - unshardedvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) + wantSQL := query1 + " /* comment */" + if logStats1.SQL != wantSQL { + t.Errorf("logstats sql want \"%s\" got \"%s\"", wantSQL, logStats1.SQL) + } - query1 = "insert /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ into user(id) values (1), (2)" - getPlanCached(t, r, unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) - assertCacheSize(t, r.plans, 0) + _, logStats2 := getPlanCached(t, ctx, r, emptyvc, query1, makeComments(" /* comment 2 */"), map[string]*querypb.BindVariable{}, false) + assertCacheSize(t, r.plans, 1) - query1 = "insert into user(id) values (1), (2)" - getPlanCached(t, r, unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) - assertCacheSize(t, r.plans, 1) + wantSQL = query1 + " /* comment 2 */" + if logStats2.SQL != wantSQL { + t.Errorf("logstats sql want \"%s\" got \"%s\"", wantSQL, logStats2.SQL) + } + }) - // the target string will be resolved and become part of the plan cache key, which adds a new entry - ksIDVc1, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[deadbeef]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) - getPlanCached(t, r, ksIDVc1, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) - assertCacheSize(t, r.plans, 2) + t.Run("Skip Cache", func(t *testing.T) { + // Skip cache using directive + r, _, _, _, ctx := createExecutorEnv(t) - // the target string will be resolved and become part of the plan cache key, as it's an unsharded ks, it will be the same entry as above - ksIDVc2, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[beefdead]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) - getPlanCached(t, r, ksIDVc2, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) - assertCacheSize(t, r.plans, 2) -} + unshardedvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) -func TestGetPlanCacheNormalized(t *testing.T) { - r, _, _, _ := createExecutorEnv() - r.normalize = true - emptyvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) + query1 := "insert /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ into user(id) values (1), (2)" + getPlanCached(t, ctx, r, unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) + assertCacheSize(t, r.plans, 0) - query1 := "select * from music_user_map where id = 1" - _, logStats1 := getPlanCached(t, r, emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, true /* skipQueryPlanCache */) - assertCacheSize(t, r.plans, 0) - wantSQL := "select * from music_user_map where id = :id /* INT64 */ /* comment */" - assert.Equal(t, wantSQL, logStats1.SQL) + query1 = "insert into user(id) values (1), (2)" + getPlanCached(t, ctx, r, unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) + assertCacheSize(t, r.plans, 1) - _, logStats2 := getPlanCached(t, r, emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false /* skipQueryPlanCache */) - assertCacheSize(t, r.plans, 1) - assert.Equal(t, wantSQL, logStats2.SQL) + // the target string will be resolved and become part of the plan cache key, which adds a new entry + ksIDVc1, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[deadbeef]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) + getPlanCached(t, ctx, r, ksIDVc1, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) + assertCacheSize(t, r.plans, 2) - // Skip cache using directive - r, _, _, _ = createExecutorEnv() - r.normalize = true - unshardedvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) - - query1 = "insert /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ into user(id) values (1), (2)" - getPlanCached(t, r, unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) - assertCacheSize(t, r.plans, 0) - - query1 = "insert into user(id) values (1), (2)" - getPlanCached(t, r, unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) - assertCacheSize(t, r.plans, 1) + // the target string will be resolved and become part of the plan cache key, as it's an unsharded ks, it will be the same entry as above + ksIDVc2, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[beefdead]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) + getPlanCached(t, ctx, r, ksIDVc2, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) + assertCacheSize(t, r.plans, 2) + }) +} - // the target string will be resolved and become part of the plan cache key, which adds a new entry - ksIDVc1, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[deadbeef]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) - getPlanCached(t, r, ksIDVc1, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) - assertCacheSize(t, r.plans, 2) +func TestGetPlanCacheNormalized(t *testing.T) { + t.Run("Cache", func(t *testing.T) { + r, _, _, _, ctx := createExecutorEnv(t) + r.normalize = true + emptyvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) + + query1 := "select * from music_user_map where id = 1" + _, logStats1 := getPlanCached(t, ctx, r, emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, true /* skipQueryPlanCache */) + assertCacheSize(t, r.plans, 0) + wantSQL := "select * from music_user_map where id = :id /* INT64 */ /* comment */" + assert.Equal(t, wantSQL, logStats1.SQL) + + _, logStats2 := getPlanCached(t, ctx, r, emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false /* skipQueryPlanCache */) + assertCacheSize(t, r.plans, 1) + assert.Equal(t, wantSQL, logStats2.SQL) + }) - // the target string will be resolved and become part of the plan cache key, as it's an unsharded ks, it will be the same entry as above - ksIDVc2, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[beefdead]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) - getPlanCached(t, r, ksIDVc2, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) - assertCacheSize(t, r.plans, 2) + t.Run("Skip Cache", func(t *testing.T) { + // Skip cache using directive + r, _, _, _, ctx := createExecutorEnv(t) + r.normalize = true + unshardedvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) + + query1 := "insert /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ into user(id) values (1), (2)" + getPlanCached(t, ctx, r, unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) + assertCacheSize(t, r.plans, 0) + + query1 = "insert into user(id) values (1), (2)" + getPlanCached(t, ctx, r, unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) + assertCacheSize(t, r.plans, 1) + + // the target string will be resolved and become part of the plan cache key, which adds a new entry + ksIDVc1, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[deadbeef]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) + getPlanCached(t, ctx, r, ksIDVc1, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) + assertCacheSize(t, r.plans, 2) + + // the target string will be resolved and become part of the plan cache key, as it's an unsharded ks, it will be the same entry as above + ksIDVc2, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[beefdead]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) + getPlanCached(t, ctx, r, ksIDVc2, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) + assertCacheSize(t, r.plans, 2) + }) } func TestGetPlanNormalized(t *testing.T) { - r, _, _, _ := createExecutorEnv() + r, _, _, _, ctx := createExecutorEnv(t) + r.normalize = true emptyvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) unshardedvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) @@ -1793,8 +1815,8 @@ func TestGetPlanNormalized(t *testing.T) { query2 := "select * from music_user_map where id = 2" normalized := "select * from music_user_map where id = :id /* INT64 */" - plan1, logStats1 := getPlanCached(t, r, emptyvc, query1, makeComments(" /* comment 1 */"), map[string]*querypb.BindVariable{}, false) - plan2, logStats2 := getPlanCached(t, r, emptyvc, query1, makeComments(" /* comment 2 */"), map[string]*querypb.BindVariable{}, false) + plan1, logStats1 := getPlanCached(t, ctx, r, emptyvc, query1, makeComments(" /* comment 1 */"), map[string]*querypb.BindVariable{}, false) + plan2, logStats2 := getPlanCached(t, ctx, r, emptyvc, query1, makeComments(" /* comment 2 */"), map[string]*querypb.BindVariable{}, false) assert.Equal(t, plan1, plan2) assertCacheContains(t, r, emptyvc, normalized) @@ -1804,18 +1826,18 @@ func TestGetPlanNormalized(t *testing.T) { wantSQL = normalized + " /* comment 2 */" assert.Equal(t, wantSQL, logStats2.SQL) - plan3, logStats3 := getPlanCached(t, r, emptyvc, query2, makeComments(" /* comment 3 */"), map[string]*querypb.BindVariable{}, false) + plan3, logStats3 := getPlanCached(t, ctx, r, emptyvc, query2, makeComments(" /* comment 3 */"), map[string]*querypb.BindVariable{}, false) assert.Equal(t, plan1, plan3) wantSQL = normalized + " /* comment 3 */" assert.Equal(t, wantSQL, logStats3.SQL) var logStats5 *logstats.LogStats - plan3, logStats5 = getPlanCached(t, r, unshardedvc, query1, makeComments(" /* comment 5 */"), map[string]*querypb.BindVariable{}, false) + plan3, logStats5 = getPlanCached(t, ctx, r, unshardedvc, query1, makeComments(" /* comment 5 */"), map[string]*querypb.BindVariable{}, false) assert.Equal(t, plan1, plan3) wantSQL = normalized + " /* comment 5 */" assert.Equal(t, wantSQL, logStats5.SQL) - plan4, _ := getPlanCached(t, r, unshardedvc, query1, makeComments(" /* comment 6 */"), map[string]*querypb.BindVariable{}, false) + plan4, _ := getPlanCached(t, ctx, r, unshardedvc, query1, makeComments(" /* comment 6 */"), map[string]*querypb.BindVariable{}, false) assert.Equal(t, plan1, plan4) assertCacheContains(t, r, emptyvc, normalized) assertCacheContains(t, r, unshardedvc, normalized) @@ -1840,7 +1862,8 @@ func TestGetPlanPriority(t *testing.T) { testCase := aTestCase t.Run(testCase.name, func(t *testing.T) { - r, _, _, _ := createExecutorEnv() + r, _, _, _, ctx := createExecutorEnv(t) + r.normalize = true logStats := logstats.NewLogStats(ctx, "Test", "", "", nil) vCursor, err := newVCursorImpl(session, makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) @@ -1864,11 +1887,13 @@ func TestGetPlanPriority(t *testing.T) { } func TestPassthroughDDL(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() - primarySession.TargetString = "TestExecutor" + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) + session := &vtgatepb.Session{ + TargetString: "TestExecutor", + } - alterDDL := "/* leading */ alter table passthrough_ddl add columne col bigint default 123 /* trailing */" - _, err := executorExec(executor, alterDDL, nil) + alterDDL := "/* leading */ alter table passthrough_ddl add column col bigint default 123 /* trailing */" + _, err := executorExec(ctx, executor, session, alterDDL, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: alterDDL, @@ -1884,23 +1909,22 @@ func TestPassthroughDDL(t *testing.T) { sbc2.Queries = nil // Force the query to go to only one shard. Normalization doesn't make any difference. - primarySession.TargetString = "TestExecutor/40-60" + session.TargetString = "TestExecutor/40-60" executor.normalize = true - _, err = executorExec(executor, alterDDL, nil) + _, err = executorExec(ctx, executor, session, alterDDL, nil) require.NoError(t, err) require.Nil(t, sbc1.Queries) if !reflect.DeepEqual(sbc2.Queries, wantQueries) { t.Errorf("sbc2.Queries: %+v, want %+v\n", sbc2.Queries, wantQueries) } sbc2.Queries = nil - primarySession.TargetString = "" // Use range query - primarySession.TargetString = "TestExecutor[-]" + session.TargetString = "TestExecutor[-]" executor.normalize = true - _, err = executorExec(executor, alterDDL, nil) + _, err = executorExec(ctx, executor, session, alterDDL, nil) require.NoError(t, err) if !reflect.DeepEqual(sbc1.Queries, wantQueries) { t.Errorf("sbc2.Queries: %+v, want %+v\n", sbc1.Queries, wantQueries) @@ -1909,11 +1933,11 @@ func TestPassthroughDDL(t *testing.T) { t.Errorf("sbc2.Queries: %+v, want %+v\n", sbc2.Queries, wantQueries) } sbc2.Queries = nil - primarySession.TargetString = "" } func TestParseEmptyTargetSingleKeyspace(t *testing.T) { - r, _, _, _ := createExecutorEnv() + r, _, _, _, _ := createExecutorEnv(t) + altVSchema := &vindexes.VSchema{ Keyspaces: map[string]*vindexes.KeyspaceSchema{ KsTestUnsharded: r.vschema.Keyspaces[KsTestUnsharded], @@ -1935,7 +1959,8 @@ func TestParseEmptyTargetSingleKeyspace(t *testing.T) { } func TestParseEmptyTargetMultiKeyspace(t *testing.T) { - r, _, _, _ := createExecutorEnv() + r, _, _, _, _ := createExecutorEnv(t) + altVSchema := &vindexes.VSchema{ Keyspaces: map[string]*vindexes.KeyspaceSchema{ KsTestUnsharded: r.vschema.Keyspaces[KsTestUnsharded], @@ -1958,7 +1983,8 @@ func TestParseEmptyTargetMultiKeyspace(t *testing.T) { } func TestParseTargetSingleKeyspace(t *testing.T) { - r, _, _, _ := createExecutorEnv() + r, _, _, _, _ := createExecutorEnv(t) + altVSchema := &vindexes.VSchema{ Keyspaces: map[string]*vindexes.KeyspaceSchema{ KsTestUnsharded: r.vschema.Keyspaces[KsTestUnsharded], @@ -1980,10 +2006,11 @@ func TestParseTargetSingleKeyspace(t *testing.T) { } func TestDebugVSchema(t *testing.T) { + executor, _, _, _, _ := createExecutorEnv(t) + resp := httptest.NewRecorder() req, _ := http.NewRequest("GET", "/debug/vschema", nil) - executor, _, _, _ := createExecutorEnv() executor.ServeHTTP(resp, req) v := make(map[string]any) if err := json.Unmarshal(resp.Body.Bytes(), &v); err != nil { @@ -2007,7 +2034,8 @@ func TestExecutorMaxPayloadSizeExceeded(t *testing.T) { warnPayloadSize = saveWarn }() - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, _ := createExecutorEnv(t) + session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary"}) warningCount := warnings.Counts()["WarnPayloadSizeExceeded"] testMaxPayloadSizeExceeded := []string{ @@ -2017,7 +2045,7 @@ func TestExecutorMaxPayloadSizeExceeded(t *testing.T) { "delete from main1 where id=1", } for _, query := range testMaxPayloadSizeExceeded { - _, err := executor.Execute(context.Background(), nil, "TestExecutorMaxPayloadSizeExceeded", session, query, nil) + _, err := executor.Execute(context.Background(), nil, nil, "TestExecutorMaxPayloadSizeExceeded", session, query, nil) require.NotNil(t, err) assert.EqualError(t, err, "query payload size above threshold") } @@ -2030,21 +2058,22 @@ func TestExecutorMaxPayloadSizeExceeded(t *testing.T) { "delete /*vt+ IGNORE_MAX_PAYLOAD_SIZE=1 */ from main1 where id=1", } for _, query := range testMaxPayloadSizeOverride { - _, err := executor.Execute(context.Background(), nil, "TestExecutorMaxPayloadSizeWithOverride", session, query, nil) + _, err := executor.Execute(context.Background(), nil, nil, "TestExecutorMaxPayloadSizeWithOverride", session, query, nil) assert.Equal(t, nil, err, "err should be nil") } assert.Equal(t, warningCount, warnings.Counts()["WarnPayloadSizeExceeded"], "warnings count") maxPayloadSize = 1000 for _, query := range testMaxPayloadSizeExceeded { - _, err := executor.Execute(context.Background(), nil, "TestExecutorMaxPayloadSizeExceeded", session, query, nil) + _, err := executor.Execute(context.Background(), nil, nil, "TestExecutorMaxPayloadSizeExceeded", session, query, nil) assert.Equal(t, nil, err, "err should be nil") } assert.Equal(t, warningCount+4, warnings.Counts()["WarnPayloadSizeExceeded"], "warnings count") } func TestOlapSelectDatabase(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, _ := createExecutorEnv(t) + executor.normalize = true session := &vtgatepb.Session{Autocommit: true} @@ -2055,23 +2084,73 @@ func TestOlapSelectDatabase(t *testing.T) { cbInvoked = true return nil } - err := executor.StreamExecute(context.Background(), nil, "TestExecute", NewSafeSession(session), sql, nil, cb) + err := executor.StreamExecute(context.Background(), nil, nil, "TestExecute", NewSafeSession(session), sql, nil, cb) assert.NoError(t, err) assert.True(t, cbInvoked) } func TestExecutorClearsWarnings(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, _ := createExecutorEnv(t) + session := NewSafeSession(&vtgatepb.Session{ Warnings: []*querypb.QueryWarning{{Code: 234, Message: "oh noes"}}, }) - _, err := executor.Execute(context.Background(), nil, "TestExecute", session, "select 42", nil) + _, err := executor.Execute(context.Background(), nil, nil, "TestExecute", session, "select 42", nil) require.NoError(t, err) require.Empty(t, session.Warnings) } +// TestServingKeyspaces tests that the dual queries are routed to the correct keyspaces from the list of serving keyspaces. +func TestServingKeyspaces(t *testing.T) { + buffer.SetBufferingModeInTestingEnv(true) + defer func() { + buffer.SetBufferingModeInTestingEnv(false) + }() + + executor, sbc1, _, sbclookup, ctx := createExecutorEnv(t) + + executor.pv = querypb.ExecuteOptions_Gen4 + gw, ok := executor.resolver.resolver.GetGateway().(*TabletGateway) + require.True(t, ok) + hc := gw.hc.(*discovery.FakeHealthCheck) + + // We broadcast twice because we want to ensure the keyspace event watcher has processed all the healthcheck updates + // from the first broadcast. Since we use a channel for broadcasting, it is blocking and hence the second call ensures + // all the updates (specifically the last one) has been processed by the keyspace-event-watcher. + hc.BroadcastAll() + hc.BroadcastAll() + + sbc1.SetResults([]*sqltypes.Result{ + sqltypes.MakeTestResult(sqltypes.MakeTestFields("keyspace", "varchar"), "TestExecutor"), + }) + sbclookup.SetResults([]*sqltypes.Result{ + sqltypes.MakeTestResult(sqltypes.MakeTestFields("keyspace", "varchar"), "TestUnsharded"), + }) + + require.ElementsMatch(t, []string{"TestExecutor", "TestUnsharded"}, gw.GetServingKeyspaces()) + result, err := executor.Execute(ctx, nil, nil, "TestServingKeyspaces", NewSafeSession(&vtgatepb.Session{}), "select keyspace_name from dual", nil) + require.NoError(t, err) + require.Equal(t, `[[VARCHAR("TestExecutor")]]`, fmt.Sprintf("%v", result.Rows)) + + for _, tablet := range hc.GetAllTablets() { + if tablet.Keyspace == "TestExecutor" { + hc.SetServing(tablet, false) + } + } + // Two broadcast calls for the same reason as above. + hc.BroadcastAll() + hc.BroadcastAll() + + // Clear plan cache, to force re-planning of the query. + executor.ClearPlans() + require.ElementsMatch(t, []string{"TestUnsharded"}, gw.GetServingKeyspaces()) + result, err = executor.Execute(ctx, nil, nil, "TestServingKeyspaces", NewSafeSession(&vtgatepb.Session{}), "select keyspace_name from dual", nil) + require.NoError(t, err) + require.Equal(t, `[[VARCHAR("TestUnsharded")]]`, fmt.Sprintf("%v", result.Rows)) +} + func TestExecutorOtherRead(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, _ := createExecutorEnv(t) type cnts struct { Sbc1Cnt int64 @@ -2113,7 +2192,6 @@ func TestExecutorOtherRead(t *testing.T) { } stmts := []string{ - "analyze table t1", "describe select * from t1", "explain select * from t1", "do 1", @@ -2126,7 +2204,7 @@ func TestExecutorOtherRead(t *testing.T) { sbc2.ExecCount.Store(0) sbclookup.ExecCount.Store(0) - _, err := executor.Execute(context.Background(), nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil) + _, err := executor.Execute(context.Background(), nil, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil) if tc.hasNoKeyspaceErr { assert.EqualError(t, err, errNoKeyspace.Error()) } else if tc.hasDestinationShardErr { @@ -2145,28 +2223,81 @@ func TestExecutorOtherRead(t *testing.T) { } } +func TestExecutorAnalyze(t *testing.T) { + executor, sbc1, sbc2, sbclookup, _ := createExecutorEnv(t) + + type cnts struct { + Sbc1Cnt int64 + Sbc2Cnt int64 + SbcLookupCnt int64 + } + + tcs := []struct { + targetStr string + + wantCnts cnts + }{{ + targetStr: "TestExecutor[-]", + wantCnts: cnts{Sbc1Cnt: 1, Sbc2Cnt: 1}, + }, { + targetStr: KsTestUnsharded, + wantCnts: cnts{SbcLookupCnt: 1}, + }, { + targetStr: "TestExecutor", + wantCnts: cnts{Sbc1Cnt: 1, Sbc2Cnt: 1}, + }, { + targetStr: "TestExecutor/-20", + wantCnts: cnts{Sbc1Cnt: 1}, + }, { + targetStr: "TestExecutor[00]", + wantCnts: cnts{Sbc1Cnt: 1}, + }} + + stmt := "analyze table t1" + for _, tc := range tcs { + t.Run(tc.targetStr, func(t *testing.T) { + sbc1.ExecCount.Store(0) + sbc2.ExecCount.Store(0) + sbclookup.ExecCount.Store(0) + + _, err := executor.Execute(context.Background(), nil, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil) + require.NoError(t, err) + + utils.MustMatch(t, tc.wantCnts, cnts{ + Sbc1Cnt: sbc1.ExecCount.Load(), + Sbc2Cnt: sbc2.ExecCount.Load(), + SbcLookupCnt: sbclookup.ExecCount.Load(), + }, "count did not match") + }) + } +} + func TestExecutorVExplain(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) + executor.normalize = true - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) bindVars := map[string]*querypb.BindVariable{} - result, err := executorExec(executor, "vexplain plan select * from user", bindVars) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + result, err := executorExec(ctx, executor, session, "vexplain plan select * from user", bindVars) require.NoError(t, err) require.Equal(t, `[[VARCHAR("{\n\t\"OperatorType\": \"Route\",\n\t\"Variant\": \"Scatter\",\n\t\"Keyspace\": {\n\t\t\"Name\": \"TestExecutor\",\n\t\t\"Sharded\": true\n\t},\n\t\"FieldQuery\": \"select * from `+"`user`"+` where 1 != 1\",\n\t\"Query\": \"select * from `+"`user`"+`\",\n\t\"Table\": \"`+"`user`"+`\"\n}")]]`, fmt.Sprintf("%v", result.Rows)) - result, err = executorExec(executor, "vexplain plan select 42", bindVars) + result, err = executorExec(ctx, executor, session, "vexplain plan select 42", bindVars) require.NoError(t, err) expected := `[[VARCHAR("{\n\t\"OperatorType\": \"Projection\",\n\t\"Expressions\": [\n\t\t\"INT64(42) as 42\"\n\t],\n\t\"Inputs\": [\n\t\t{\n\t\t\t\"OperatorType\": \"SingleRow\"\n\t\t}\n\t]\n}")]]` require.Equal(t, expected, fmt.Sprintf("%v", result.Rows)) } func TestExecutorOtherAdmin(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, _ := createExecutorEnv(t) type cnts struct { Sbc1Cnt int64 @@ -2218,7 +2349,7 @@ func TestExecutorOtherAdmin(t *testing.T) { sbc2.ExecCount.Store(0) sbclookup.ExecCount.Store(0) - _, err := executor.Execute(context.Background(), nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil) + _, err := executor.Execute(context.Background(), nil, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil) if tc.hasNoKeyspaceErr { assert.Error(t, err, errNoKeyspace) } else if tc.hasDestinationShardErr { @@ -2240,9 +2371,10 @@ func TestExecutorOtherAdmin(t *testing.T) { } func TestExecutorSavepointInTx(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() - logChan := QueryLogger.Subscribe("TestExecutorSavepoint") - defer QueryLogger.Unsubscribe(logChan) + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) + + logChan := executor.queryLogger.Subscribe("TestExecutorSavepoint") + defer executor.queryLogger.Unsubscribe(logChan) session := NewSafeSession(&vtgatepb.Session{Autocommit: false, TargetString: "@primary"}) _, err := exec(executor, session, "savepoint a") @@ -2310,21 +2442,22 @@ func TestExecutorSavepointInTx(t *testing.T) { }} utils.MustMatch(t, sbc1WantQueries, sbc1.Queries, "") utils.MustMatch(t, sbc2WantQueries, sbc2.Queries, "") - testQueryLog(t, logChan, "TestExecute", "SAVEPOINT", "savepoint a", 0) - testQueryLog(t, logChan, "TestExecute", "SAVEPOINT_ROLLBACK", "rollback to a", 0) - testQueryLog(t, logChan, "TestExecute", "RELEASE", "release savepoint a", 0) - testQueryLog(t, logChan, "TestExecute", "SELECT", "select id from `user` where id = 1", 1) - testQueryLog(t, logChan, "TestExecute", "SAVEPOINT", "savepoint b", 1) - testQueryLog(t, logChan, "TestExecute", "SAVEPOINT_ROLLBACK", "rollback to b", 1) - testQueryLog(t, logChan, "TestExecute", "RELEASE", "release savepoint b", 1) - testQueryLog(t, logChan, "TestExecute", "SELECT", "select id from `user` where id = 3", 1) - testQueryLog(t, logChan, "TestExecute", "ROLLBACK", "rollback", 2) + testQueryLog(t, executor, logChan, "TestExecute", "SAVEPOINT", "savepoint a", 0) + testQueryLog(t, executor, logChan, "TestExecute", "SAVEPOINT_ROLLBACK", "rollback to a", 0) + testQueryLog(t, executor, logChan, "TestExecute", "RELEASE", "release savepoint a", 0) + testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select id from `user` where id = 1", 1) + testQueryLog(t, executor, logChan, "TestExecute", "SAVEPOINT", "savepoint b", 1) + testQueryLog(t, executor, logChan, "TestExecute", "SAVEPOINT_ROLLBACK", "rollback to b", 1) + testQueryLog(t, executor, logChan, "TestExecute", "RELEASE", "release savepoint b", 1) + testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select id from `user` where id = 3", 1) + testQueryLog(t, executor, logChan, "TestExecute", "ROLLBACK", "rollback", 2) } func TestExecutorSavepointInTxWithReservedConn(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() - logChan := QueryLogger.Subscribe("TestExecutorSavepoint") - defer QueryLogger.Unsubscribe(logChan) + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) + + logChan := executor.queryLogger.Subscribe("TestExecutorSavepoint") + defer executor.queryLogger.Unsubscribe(logChan) session := NewSafeSession(&vtgatepb.Session{Autocommit: true, TargetString: "TestExecutor", EnableSystemSettings: true}) sbc1.SetResults([]*sqltypes.Result{ @@ -2377,20 +2510,21 @@ func TestExecutorSavepointInTxWithReservedConn(t *testing.T) { utils.MustMatch(t, sbc1WantQueries, sbc1.Queries, "") utils.MustMatch(t, sbc2WantQueries, sbc2.Queries, "") - testQueryLog(t, logChan, "TestExecute", "SET", "set @@sql_mode = ''", 1) - testQueryLog(t, logChan, "TestExecute", "BEGIN", "begin", 0) - testQueryLog(t, logChan, "TestExecute", "SAVEPOINT", "savepoint a", 0) - testQueryLog(t, logChan, "TestExecute", "SELECT", "select id from `user` where id = 1", 1) - testQueryLog(t, logChan, "TestExecute", "SAVEPOINT", "savepoint b", 1) - testQueryLog(t, logChan, "TestExecute", "RELEASE", "release savepoint a", 1) - testQueryLog(t, logChan, "TestExecute", "SELECT", "select id from `user` where id = 3", 1) - testQueryLog(t, logChan, "TestExecute", "COMMIT", "commit", 2) + testQueryLog(t, executor, logChan, "TestExecute", "SET", "set @@sql_mode = ''", 1) + testQueryLog(t, executor, logChan, "TestExecute", "BEGIN", "begin", 0) + testQueryLog(t, executor, logChan, "TestExecute", "SAVEPOINT", "savepoint a", 0) + testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select id from `user` where id = 1", 1) + testQueryLog(t, executor, logChan, "TestExecute", "SAVEPOINT", "savepoint b", 1) + testQueryLog(t, executor, logChan, "TestExecute", "RELEASE", "release savepoint a", 1) + testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select id from `user` where id = 3", 1) + testQueryLog(t, executor, logChan, "TestExecute", "COMMIT", "commit", 2) } func TestExecutorSavepointWithoutTx(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() - logChan := QueryLogger.Subscribe("TestExecutorSavepoint") - defer QueryLogger.Unsubscribe(logChan) + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) + + logChan := executor.queryLogger.Subscribe("TestExecutorSavepoint") + defer executor.queryLogger.Unsubscribe(logChan) session := NewSafeSession(&vtgatepb.Session{Autocommit: true, TargetString: "@primary", InTransaction: false}) _, err := exec(executor, session, "savepoint a") @@ -2420,18 +2554,18 @@ func TestExecutorSavepointWithoutTx(t *testing.T) { }} utils.MustMatch(t, sbc1WantQueries, sbc1.Queries, "") utils.MustMatch(t, sbc2WantQueries, sbc2.Queries, "") - testQueryLog(t, logChan, "TestExecute", "SAVEPOINT", "savepoint a", 0) - testQueryLog(t, logChan, "TestExecute", "SAVEPOINT_ROLLBACK", "rollback to a", 0) - testQueryLog(t, logChan, "TestExecute", "RELEASE", "release savepoint a", 0) - testQueryLog(t, logChan, "TestExecute", "SELECT", "select id from `user` where id = 1", 1) - testQueryLog(t, logChan, "TestExecute", "SAVEPOINT", "savepoint b", 0) - testQueryLog(t, logChan, "TestExecute", "SAVEPOINT_ROLLBACK", "rollback to b", 0) - testQueryLog(t, logChan, "TestExecute", "RELEASE", "release savepoint b", 0) - testQueryLog(t, logChan, "TestExecute", "SELECT", "select id from `user` where id = 3", 1) + testQueryLog(t, executor, logChan, "TestExecute", "SAVEPOINT", "savepoint a", 0) + testQueryLog(t, executor, logChan, "TestExecute", "SAVEPOINT_ROLLBACK", "rollback to a", 0) + testQueryLog(t, executor, logChan, "TestExecute", "RELEASE", "release savepoint a", 0) + testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select id from `user` where id = 1", 1) + testQueryLog(t, executor, logChan, "TestExecute", "SAVEPOINT", "savepoint b", 0) + testQueryLog(t, executor, logChan, "TestExecute", "SAVEPOINT_ROLLBACK", "rollback to b", 0) + testQueryLog(t, executor, logChan, "TestExecute", "RELEASE", "release savepoint b", 0) + testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select id from `user` where id = 3", 1) } func TestExecutorCallProc(t *testing.T) { - executor, sbc1, sbc2, sbcUnsharded := createExecutorEnv() + executor, sbc1, sbc2, sbcUnsharded, _ := createExecutorEnv(t) type cnts struct { Sbc1Cnt int64 @@ -2477,7 +2611,7 @@ func TestExecutorCallProc(t *testing.T) { sbc2.ExecCount.Store(0) sbcUnsharded.ExecCount.Store(0) - _, err := executor.Execute(context.Background(), nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), "CALL proc()", nil) + _, err := executor.Execute(context.Background(), nil, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), "CALL proc()", nil) if tc.hasNoKeyspaceErr { assert.EqualError(t, err, errNoKeyspace.Error()) } else if tc.unshardedOnlyErr { @@ -2496,52 +2630,53 @@ func TestExecutorCallProc(t *testing.T) { } func TestExecutorTempTable(t *testing.T) { - executor, _, _, sbcUnsharded := createExecutorEnv() + executor, _, _, sbcUnsharded, ctx := createExecutorEnv(t) + executor.warnShardedOnly = true creatQuery := "create temporary table temp_t(id bigint primary key)" session := NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded}) - ctx := context.Background() - _, err := executor.Execute(ctx, nil, "TestExecutorTempTable", session, creatQuery, nil) + _, err := executor.Execute(ctx, nil, nil, "TestExecutorTempTable", session, creatQuery, nil) require.NoError(t, err) assert.EqualValues(t, 1, sbcUnsharded.ExecCount.Load()) assert.NotEmpty(t, session.Warnings) before := executor.plans.Len() - _, err = executor.Execute(ctx, nil, "TestExecutorTempTable", session, "select * from temp_t", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecutorTempTable", session, "select * from temp_t", nil) require.NoError(t, err) assert.Equal(t, before, executor.plans.Len()) } func TestExecutorShowVitessMigrations(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) + showQuery := "show vitess_migrations" session := NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}) - ctx := context.Background() - _, err := executor.Execute(ctx, nil, "", session, showQuery, nil) + _, err := executor.Execute(ctx, nil, nil, "", session, showQuery, nil) require.NoError(t, err) - assert.Contains(t, sbc1.StringQueries(), "SELECT * FROM _vt.schema_migrations") - assert.Contains(t, sbc2.StringQueries(), "SELECT * FROM _vt.schema_migrations") + assert.Contains(t, sbc1.StringQueries(), "show vitess_migrations") + assert.Contains(t, sbc2.StringQueries(), "show vitess_migrations") } func TestExecutorDescHash(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) + showQuery := "desc hash_index" session := NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}) - ctx := context.Background() - _, err := executor.Execute(ctx, nil, "", session, showQuery, nil) + _, err := executor.Execute(ctx, nil, nil, "", session, showQuery, nil) require.NoError(t, err) } func TestExecutorVExplainQueries(t *testing.T) { - executor, _, _, sbclookup := createExecutorEnv() + executor, _, _, sbclookup, ctx := createExecutorEnv(t) + session := NewAutocommitSession(&vtgatepb.Session{}) sbclookup.SetResults([]*sqltypes.Result{ sqltypes.MakeTestResult(sqltypes.MakeTestFields("name|user_id", "varchar|int64"), "apa|1", "apa|2"), }) - qr, err := executor.Execute(ctx, nil, "TestExecutorVExplainQueries", session, "vexplain queries select * from user where name = 'apa'", nil) + qr, err := executor.Execute(ctx, nil, nil, "TestExecutorVExplainQueries", session, "vexplain queries select * from user where name = 'apa'", nil) require.NoError(t, err) txt := fmt.Sprintf("%v\n", qr.Rows) lookupQuery := "select `name`, user_id from name_user_map where `name` in" @@ -2550,7 +2685,7 @@ func TestExecutorVExplainQueries(t *testing.T) { // Test the streaming side as well var results []sqltypes.Row session = NewAutocommitSession(&vtgatepb.Session{}) - err = executor.StreamExecute(ctx, nil, "TestExecutorVExplainQueries", session, "vexplain queries select * from user where name = 'apa'", nil, func(result *sqltypes.Result) error { + err = executor.StreamExecute(ctx, nil, nil, "TestExecutorVExplainQueries", session, "vexplain queries select * from user where name = 'apa'", nil, func(result *sqltypes.Result) error { results = append(results, result.Rows...) return nil }) @@ -2560,7 +2695,8 @@ func TestExecutorVExplainQueries(t *testing.T) { } func TestExecutorStartTxnStmt(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) + session := NewAutocommitSession(&vtgatepb.Session{}) tcases := []struct { @@ -2592,12 +2728,12 @@ func TestExecutorStartTxnStmt(t *testing.T) { for _, tcase := range tcases { t.Run(tcase.beginSQL, func(t *testing.T) { - _, err := executor.Execute(ctx, nil, "TestExecutorStartTxnStmt", session, tcase.beginSQL, nil) + _, err := executor.Execute(ctx, nil, nil, "TestExecutorStartTxnStmt", session, tcase.beginSQL, nil) require.NoError(t, err) assert.Equal(t, tcase.expTxAccessMode, session.GetOrCreateOptions().TransactionAccessMode) - _, err = executor.Execute(ctx, nil, "TestExecutorStartTxnStmt", session, "rollback", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecutorStartTxnStmt", session, "rollback", nil) require.NoError(t, err) }) @@ -2605,17 +2741,13 @@ func TestExecutorStartTxnStmt(t *testing.T) { } func TestExecutorPrepareExecute(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, _ := createExecutorEnv(t) + executor.normalize = true session := NewAutocommitSession(&vtgatepb.Session{}) // prepare statement. - _, err := executor.Execute(context.Background(), - nil, - "TestExecutorPrepareExecute", - session, - "prepare prep_user from 'select * from user where id = ?'", - nil) + _, err := executor.Execute(context.Background(), nil, nil, "TestExecutorPrepareExecute", session, "prepare prep_user from 'select * from user where id = ?'", nil) require.NoError(t, err) prepData := session.PrepareStatement["prep_user"] require.NotNil(t, prepData) @@ -2623,20 +2755,10 @@ func TestExecutorPrepareExecute(t *testing.T) { require.EqualValues(t, 1, prepData.ParamsCount) // prepare statement using user defined variable - _, err = executor.Execute(context.Background(), - nil, - "TestExecutorPrepareExecute", - session, - "set @udv_query = 'select * from user where id in (?,?,?)'", - nil) + _, err = executor.Execute(context.Background(), nil, nil, "TestExecutorPrepareExecute", session, "set @udv_query = 'select * from user where id in (?,?,?)'", nil) require.NoError(t, err) - _, err = executor.Execute(context.Background(), - nil, - "TestExecutorPrepareExecute", - session, - "prepare prep_user2 from @udv_query", - nil) + _, err = executor.Execute(context.Background(), nil, nil, "TestExecutorPrepareExecute", session, "prepare prep_user2 from @udv_query", nil) require.NoError(t, err) prepData = session.PrepareStatement["prep_user2"] require.NotNil(t, prepData) @@ -2644,50 +2766,36 @@ func TestExecutorPrepareExecute(t *testing.T) { require.EqualValues(t, 3, prepData.ParamsCount) // syntax error on prepared query - _, err = executor.Execute(context.Background(), - nil, - "TestExecutorPrepareExecute", - session, - "prepare prep_user2 from 'select'", - nil) + _, err = executor.Execute(context.Background(), nil, nil, "TestExecutorPrepareExecute", session, "prepare prep_user2 from 'select'", nil) require.Error(t, err) require.Nil(t, session.PrepareStatement["prep_user2"]) // prepared statement is cleared from the session. // user defined variable does not exists on prepared query - _, err = executor.Execute(context.Background(), - nil, - "TestExecutorPrepareExecute", - session, - "prepare prep_user from @foo", - nil) + _, err = executor.Execute(context.Background(), nil, nil, "TestExecutorPrepareExecute", session, "prepare prep_user from @foo", nil) require.Error(t, err) require.Nil(t, session.PrepareStatement["prep_user"]) // prepared statement is cleared from the session. // empty prepared query - _, err = executor.Execute(context.Background(), - nil, - "TestExecutorPrepareExecute", - session, - "prepare prep_user from ''", - nil) + _, err = executor.Execute(context.Background(), nil, nil, "TestExecutorPrepareExecute", session, "prepare prep_user from ''", nil) require.Error(t, err) } func TestExecutorTruncateErrors(t *testing.T) { + executor, _, _, _, ctx := createExecutorEnv(t) + save := truncateErrorLen truncateErrorLen = 32 defer func() { truncateErrorLen = save }() - executor, _, _, _ := createExecutorEnv() session := NewSafeSession(&vtgatepb.Session{}) fn := func(r *sqltypes.Result) error { return nil } - _, err := executor.Execute(ctx, nil, "TestExecute", session, "invalid statement", nil) + _, err := executor.Execute(ctx, nil, nil, "TestExecute", session, "invalid statement", nil) assert.EqualError(t, err, "syntax error at posi [TRUNCATED]") - err = executor.StreamExecute(ctx, nil, "TestExecute", session, "invalid statement", nil, fn) + err = executor.StreamExecute(ctx, nil, nil, "TestExecute", session, "invalid statement", nil, fn) assert.EqualError(t, err, "syntax error at posi [TRUNCATED]") _, err = executor.Prepare(context.Background(), "TestExecute", session, "invalid statement", nil) @@ -2695,7 +2803,7 @@ func TestExecutorTruncateErrors(t *testing.T) { } func TestExecutorFlushStmt(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, _ := createExecutorEnv(t) tcs := []struct { targetStr string @@ -2739,7 +2847,7 @@ func TestExecutorFlushStmt(t *testing.T) { for _, tc := range tcs { t.Run(tc.query+tc.targetStr, func(t *testing.T) { - _, err := executor.Execute(context.Background(), nil, "TestExecutorFlushStmt", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), tc.query, nil) + _, err := executor.Execute(context.Background(), nil, nil, "TestExecutorFlushStmt", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), tc.query, nil) if tc.expectedErr == "" { require.NoError(t, err) } else { @@ -2750,8 +2858,90 @@ func TestExecutorFlushStmt(t *testing.T) { } } +// TestExecutorKillStmt tests the kill statements on executor. +func TestExecutorKillStmt(t *testing.T) { + executor, _, _, _, _ := createExecutorEnv(t) + + tcs := []struct { + errStr string + query string + disallow bool + + expectedLog string + }{{ + query: "kill 42", + expectedLog: "kill connection: 42", + }, { + query: "kill query 42", + expectedLog: "kill query: 42", + }, { + query: "kill 42", + errStr: "connection does not exists: 42", + }, { + query: "kill query 24", + errStr: "connection does not exists: 24", + }, { + query: "kill connection 1", + disallow: true, + errStr: "VT07001: kill statement execution not permitted.", + }, { + query: "kill query 1", + disallow: true, + errStr: "VT07001: kill statement execution not permitted.", + }} + + for _, tc := range tcs { + allowKillStmt = !tc.disallow + t.Run("execute:"+tc.query+tc.errStr, func(t *testing.T) { + mysqlCtx := &fakeMysqlConnection{ErrMsg: tc.errStr} + _, err := executor.Execute(context.Background(), mysqlCtx, nil, "TestExecutorKillStmt", NewAutocommitSession(&vtgatepb.Session{}), tc.query, nil) + if tc.errStr != "" { + require.ErrorContains(t, err, tc.errStr) + } else { + require.NoError(t, err) + require.Equal(t, mysqlCtx.Log[0], tc.expectedLog) + } + }) + t.Run("stream:"+tc.query+tc.errStr, func(t *testing.T) { + mysqlCtx := &fakeMysqlConnection{ErrMsg: tc.errStr} + err := executor.StreamExecute(context.Background(), mysqlCtx, nil, "TestExecutorKillStmt", NewAutocommitSession(&vtgatepb.Session{}), tc.query, nil, func(result *sqltypes.Result) error { + return nil + }) + if tc.errStr != "" { + require.ErrorContains(t, err, tc.errStr) + } else { + require.NoError(t, err) + require.Contains(t, mysqlCtx.Log[0], tc.expectedLog) + } + }) + } +} + +type fakeMysqlConnection struct { + ErrMsg string + Log []string +} + +func (f *fakeMysqlConnection) KillQuery(connID uint32) error { + if f.ErrMsg != "" { + return errors.New(f.ErrMsg) + } + f.Log = append(f.Log, fmt.Sprintf("kill query: %d", connID)) + return nil +} + +func (f *fakeMysqlConnection) KillConnection(ctx context.Context, connID uint32) error { + if f.ErrMsg != "" { + return errors.New(f.ErrMsg) + } + f.Log = append(f.Log, fmt.Sprintf("kill connection: %d", connID)) + return nil +} + +var _ vtgateservice.MySQLConnection = (*fakeMysqlConnection)(nil) + func exec(executor *Executor, session *SafeSession, sql string) (*sqltypes.Result, error) { - return executor.Execute(context.Background(), nil, "TestExecute", session, sql, nil) + return executor.Execute(context.Background(), nil, nil, "TestExecute", session, sql, nil) } func makeComments(text string) sqlparser.MarginComments { diff --git a/go/vt/vtgate/executor_vschema_ddl_test.go b/go/vt/vtgate/executor_vschema_ddl_test.go index 3e3d445f21a..1cf8b6bb902 100644 --- a/go/vt/vtgate/executor_vschema_ddl_test.go +++ b/go/vt/vtgate/executor_vschema_ddl_test.go @@ -17,8 +17,9 @@ limitations under the License. package vtgate import ( + "context" "reflect" - "sort" + "slices" "testing" "time" @@ -28,8 +29,6 @@ import ( querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "context" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/vtgate/vschemaacl" @@ -80,13 +79,19 @@ func waitForVschemaTables(t *testing.T, ks string, tables []string, executor *Ex // Wait up to 100ms until the vindex manager gets notified of the update for i := 0; i < 10; i++ { vschema := executor.vm.GetCurrentSrvVschema() - gotTables := []string{} + var gotTables []string for t := range vschema.Keyspaces[ks].Tables { gotTables = append(gotTables, t) } - sort.Strings(tables) - sort.Strings(gotTables) - if reflect.DeepEqual(tables, gotTables) { + + foundAll := true + for _, expTbl := range tables { + if !slices.Contains(gotTables, expTbl) { + foundAll = false + break + } + } + if foundAll { return vschema } time.Sleep(10 * time.Millisecond) @@ -96,7 +101,6 @@ func waitForVschemaTables(t *testing.T, ks string, tables []string, executor *Ex return nil } -// nolint func waitForColVindexes(t *testing.T, ks, table string, names []string, executor *Executor) *vschemapb.SrvVSchema { t.Helper() @@ -136,11 +140,11 @@ func TestPlanExecutorAlterVSchemaKeyspace(t *testing.T) { defer func() { vschemaacl.AuthorizedDDLUsers = "" }() - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary", Autocommit: true}) vschemaUpdates := make(chan *vschemapb.SrvVSchema, 2) - executor.serv.WatchSrvVSchema(context.Background(), "aa", func(vschema *vschemapb.SrvVSchema, err error) bool { + executor.serv.WatchSrvVSchema(ctx, "aa", func(vschema *vschemapb.SrvVSchema, err error) bool { vschemaUpdates <- vschema return true }) @@ -152,7 +156,7 @@ func TestPlanExecutorAlterVSchemaKeyspace(t *testing.T) { } stmt := "alter vschema create vindex TestExecutor.test_vindex using hash" - _, err := executor.Execute(context.Background(), nil, "TestExecute", session, stmt, nil) + _, err := executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) _, vindex := waitForVindex(t, "TestExecutor", "test_vindex", vschemaUpdates, executor) @@ -164,11 +168,11 @@ func TestPlanExecutorCreateVindexDDL(t *testing.T) { defer func() { vschemaacl.AuthorizedDDLUsers = "" }() - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) ks := "TestExecutor" vschemaUpdates := make(chan *vschemapb.SrvVSchema, 4) - executor.serv.WatchSrvVSchema(context.Background(), "aa", func(vschema *vschemapb.SrvVSchema, err error) bool { + executor.serv.WatchSrvVSchema(ctx, "aa", func(vschema *vschemapb.SrvVSchema, err error) bool { vschemaUpdates <- vschema return true }) @@ -181,7 +185,7 @@ func TestPlanExecutorCreateVindexDDL(t *testing.T) { session := NewSafeSession(&vtgatepb.Session{TargetString: ks}) stmt := "alter vschema create vindex test_vindex using hash" - _, err := executor.Execute(context.Background(), nil, "TestExecute", session, stmt, nil) + _, err := executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) _, vindex := waitForVindex(t, ks, "test_vindex", vschemaUpdates, executor) @@ -189,7 +193,7 @@ func TestPlanExecutorCreateVindexDDL(t *testing.T) { t.Errorf("updated vschema did not contain test_vindex") } - _, err = executor.Execute(context.Background(), nil, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) wantErr := "vindex test_vindex already exists in keyspace TestExecutor" if err == nil || err.Error() != wantErr { t.Errorf("create duplicate vindex: %v, want %s", err, wantErr) @@ -206,11 +210,11 @@ func TestPlanExecutorDropVindexDDL(t *testing.T) { defer func() { vschemaacl.AuthorizedDDLUsers = "" }() - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) ks := "TestExecutor" vschemaUpdates := make(chan *vschemapb.SrvVSchema, 4) - executor.serv.WatchSrvVSchema(context.Background(), "aa", func(vschema *vschemapb.SrvVSchema, err error) bool { + executor.serv.WatchSrvVSchema(ctx, "aa", func(vschema *vschemapb.SrvVSchema, err error) bool { vschemaUpdates <- vschema return true }) @@ -223,14 +227,14 @@ func TestPlanExecutorDropVindexDDL(t *testing.T) { session := NewSafeSession(&vtgatepb.Session{TargetString: ks}) stmt := "alter vschema drop vindex test_vindex" - _, err := executor.Execute(context.Background(), nil, "TestExecute", session, stmt, nil) + _, err := executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) wantErr := "vindex test_vindex does not exists in keyspace TestExecutor" if err == nil || err.Error() != wantErr { t.Errorf("want error %v got %v", wantErr, err) } stmt = "alter vschema drop vindex TestExecutor.test_vindex" - _, err = executor.Execute(context.Background(), nil, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) wantErr = "vindex test_vindex does not exists in keyspace TestExecutor" if err == nil || err.Error() != wantErr { t.Errorf("want error %v got %v", wantErr, err) @@ -238,7 +242,7 @@ func TestPlanExecutorDropVindexDDL(t *testing.T) { // add one vindex that has never been used by the tables stmt = "alter vschema create vindex test_vindex using hash" - _, err = executor.Execute(context.Background(), nil, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) _, vindex := waitForVindex(t, ks, "test_vindex", vschemaUpdates, executor) @@ -248,7 +252,7 @@ func TestPlanExecutorDropVindexDDL(t *testing.T) { // drop an existing vindex that has never been used by the tables stmt = "alter vschema drop vindex TestExecutor.test_vindex" - _, err = executor.Execute(context.Background(), nil, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) vschema = <-vschemaUpdates _, ok = vschema.Keyspaces[ks].Vindexes["test_vindex"] @@ -258,7 +262,7 @@ func TestPlanExecutorDropVindexDDL(t *testing.T) { // drop an existing vindex that is used by at least one table stmt = "alter vschema drop vindex TestExecutor.keyspace_id" - _, err = executor.Execute(context.Background(), nil, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) wantErr = "can not drop vindex cause keyspace_id still defined on table ksid_table" if err == nil || err.Error() != wantErr { t.Errorf("drop vindex still defined: %v, want %s", err, wantErr) @@ -275,11 +279,11 @@ func TestPlanExecutorAddDropVschemaTableDDL(t *testing.T) { defer func() { vschemaacl.AuthorizedDDLUsers = "" }() - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) ks := KsTestUnsharded vschemaUpdates := make(chan *vschemapb.SrvVSchema, 4) - executor.serv.WatchSrvVSchema(context.Background(), "aa", func(vschema *vschemapb.SrvVSchema, err error) bool { + executor.serv.WatchSrvVSchema(ctx, "aa", func(vschema *vschemapb.SrvVSchema, err error) bool { vschemaUpdates <- vschema return true }) @@ -297,19 +301,19 @@ func TestPlanExecutorAddDropVschemaTableDDL(t *testing.T) { session := NewSafeSession(&vtgatepb.Session{TargetString: ks}) stmt := "alter vschema add table test_table" - _, err := executor.Execute(context.Background(), nil, "TestExecute", session, stmt, nil) + _, err := executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) _ = waitForVschemaTables(t, ks, append([]string{"test_table"}, vschemaTables...), executor) stmt = "alter vschema add table test_table2" - _, err = executor.Execute(context.Background(), nil, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) _ = waitForVschemaTables(t, ks, append([]string{"test_table", "test_table2"}, vschemaTables...), executor) // Should fail adding a table on a sharded keyspace session = NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}) stmt = "alter vschema add table test_table" - _, err = executor.Execute(context.Background(), nil, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) wantErr := "add vschema table: unsupported on sharded keyspace TestExecutor" if err == nil || err.Error() != wantErr { t.Errorf("want error %v got %v", wantErr, err) @@ -332,7 +336,7 @@ func TestExecutorAddSequenceDDL(t *testing.T) { defer func() { vschemaacl.AuthorizedDDLUsers = "" }() - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) ks := KsTestUnsharded vschema := executor.vm.GetCurrentSrvVschema() @@ -344,7 +348,7 @@ func TestExecutorAddSequenceDDL(t *testing.T) { session := NewSafeSession(&vtgatepb.Session{TargetString: ks}) stmt := "alter vschema add sequence test_seq" - _, err := executor.Execute(context.Background(), nil, "TestExecute", session, stmt, nil) + _, err := executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) _ = waitForVschemaTables(t, ks, append(vschemaTables, []string{"test_seq"}...), executor) vschema = executor.vm.GetCurrentSrvVschema() @@ -359,7 +363,7 @@ func TestExecutorAddSequenceDDL(t *testing.T) { session = NewSafeSession(&vtgatepb.Session{TargetString: ksSharded}) stmt = "alter vschema add sequence sequence_table" - _, err = executor.Execute(context.Background(), nil, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) wantErr := "add sequence table: unsupported on sharded keyspace TestExecutor" if err == nil || err.Error() != wantErr { @@ -368,13 +372,13 @@ func TestExecutorAddSequenceDDL(t *testing.T) { // Should be able to add autoincrement to table in sharded keyspace stmt = "alter vschema on test_table add vindex hash_index (id)" - if _, err = executor.Execute(context.Background(), nil, "TestExecute", session, stmt, nil); err != nil { + if _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil); err != nil { t.Error(err) } time.Sleep(10 * time.Millisecond) stmt = "alter vschema on test_table add auto_increment id using `db-name`.`test_seq`" - if _, err = executor.Execute(context.Background(), nil, "TestExecute", session, stmt, nil); err != nil { + if _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil); err != nil { t.Error(err) } time.Sleep(10 * time.Millisecond) @@ -387,16 +391,115 @@ func TestExecutorAddSequenceDDL(t *testing.T) { } } +func TestExecutorDropSequenceDDL(t *testing.T) { + vschemaacl.AuthorizedDDLUsers = "%" + defer func() { + vschemaacl.AuthorizedDDLUsers = "" + }() + executor, _, _, _, ctx := createExecutorEnv(t) + ks := KsTestUnsharded + + vschema := executor.vm.GetCurrentSrvVschema() + + _, ok := vschema.Keyspaces[ks].Tables["test_seq"] + if ok { + t.Fatalf("test_seq should not exist in original vschema") + } + + session := NewSafeSession(&vtgatepb.Session{TargetString: ks}) + + // add test sequence + stmt := "alter vschema add sequence test_seq" + _, err := executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) + require.NoError(t, err) + _ = waitForVschemaTables(t, ks, []string{"test_seq"}, executor) + vschema = executor.vm.GetCurrentSrvVschema() + table := vschema.Keyspaces[ks].Tables["test_seq"] + wantType := "sequence" + require.Equal(t, wantType, table.Type) + + // note the last vschema updated time. + ts := executor.VSchema().GetCreated() + + // drop existing test sequence + stmt = "alter vschema drop sequence test_seq" + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) + require.NoError(t, err) + + ctxWithTimeout, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + if !waitForNewerVSchema(ctxWithTimeout, executor, ts) { + t.Fatalf("vschema did not drop the sequene 'test_seq'") + } + + // Should fail dropping a non-existing test sequence + session = NewSafeSession(&vtgatepb.Session{TargetString: ks}) + + stmt = "alter vschema drop sequence test_seq" + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) + + wantErr := "vschema does not contain sequence test_seq in keyspace TestUnsharded" + if err == nil || err.Error() != wantErr { + t.Errorf("want error %v got %v", wantErr, err) + } +} + +func TestExecutorDropAutoIncDDL(t *testing.T) { + vschemaacl.AuthorizedDDLUsers = "%" + defer func() { + vschemaacl.AuthorizedDDLUsers = "" + }() + executor, _, _, _, ctx := createExecutorEnv(t) + ks := KsTestUnsharded + + session := NewSafeSession(&vtgatepb.Session{TargetString: ks}) + + stmt := "alter vschema add table test_table" + _, err := executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) + require.NoError(t, err) + + _ = waitForVschemaTables(t, ks, []string{"test_table"}, executor) + ts := executor.VSchema().GetCreated() + + stmt = "alter vschema on test_table add auto_increment id using `db-name`.`test_seq`" + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) + require.NoError(t, err) + ctxWithTimeout, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + if !waitForNewerVSchema(ctxWithTimeout, executor, ts) { + t.Fatalf("vschema did not update with auto_increment for 'test_table'") + } + ts = executor.VSchema().GetCreated() + + wantAutoInc := &vschemapb.AutoIncrement{Column: "id", Sequence: "`db-name`.test_seq"} + gotAutoInc := executor.vm.GetCurrentSrvVschema().Keyspaces[ks].Tables["test_table"].AutoIncrement + + utils.MustMatch(t, wantAutoInc, gotAutoInc) + + stmt = "alter vschema on test_table drop auto_increment" + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) + require.NoError(t, err) + + ctxWithTimeout, cancel2 := context.WithTimeout(ctx, 5*time.Second) + defer cancel2() + if !waitForNewerVSchema(ctxWithTimeout, executor, ts) { + t.Fatalf("vschema did not drop the auto_increment for 'test_table'") + } + if executor.vm.GetCurrentSrvVschema().Keyspaces[ks].Tables["test_table"].AutoIncrement != nil { + t.Errorf("auto increment should be nil after drop") + } +} + func TestExecutorAddDropVindexDDL(t *testing.T) { vschemaacl.AuthorizedDDLUsers = "%" defer func() { vschemaacl.AuthorizedDDLUsers = "" }() - executor, sbc1, sbc2, sbclookup := createExecutorEnv() // nolint + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) ks := "TestExecutor" session := NewSafeSession(&vtgatepb.Session{TargetString: ks}) vschemaUpdates := make(chan *vschemapb.SrvVSchema, 4) - executor.serv.WatchSrvVSchema(context.Background(), "aa", func(vschema *vschemapb.SrvVSchema, err error) bool { + executor.serv.WatchSrvVSchema(ctx, "aa", func(vschema *vschemapb.SrvVSchema, err error) bool { vschemaUpdates <- vschema return true }) @@ -407,14 +510,14 @@ func TestExecutorAddDropVindexDDL(t *testing.T) { // Create a new vindex implicitly with the statement stmt := "alter vschema on test add vindex test_hash (id) using hash " - _, err := executor.Execute(context.Background(), nil, "TestExecute", session, stmt, nil) + _, err := executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) _, vindex := waitForVindex(t, ks, "test_hash", vschemaUpdates, executor) require.Equal(t, "hash", vindex.Type) _ = waitForColVindexes(t, ks, "test", []string{"test_hash"}, executor) - qr, err := executor.Execute(context.Background(), nil, "TestExecute", session, "show vschema vindexes on TestExecutor.test", nil) + qr, err := executor.Execute(ctx, nil, nil, "TestExecute", session, "show vschema vindexes on TestExecutor.test", nil) require.NoError(t, err) wantqr := &sqltypes.Result{ Fields: buildVarCharFields("Columns", "Name", "Type", "Params", "Owner"), @@ -426,17 +529,17 @@ func TestExecutorAddDropVindexDDL(t *testing.T) { // Drop it stmt = "alter vschema on test drop vindex test_hash" - _, err = executor.Execute(context.Background(), nil, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) _, _ = waitForVindex(t, ks, "test_hash", vschemaUpdates, executor) _ = waitForColVindexes(t, ks, "test", []string{}, executor) - _, err = executor.Execute(context.Background(), nil, "TestExecute", session, "show vschema vindexes on TestExecutor.test", nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "show vschema vindexes on TestExecutor.test", nil) require.EqualError(t, err, "VT05005: table 'test' does not exist in keyspace 'TestExecutor'") // add it again using the same syntax stmt = "alter vschema on test add vindex test_hash (id) using hash " - _, err = executor.Execute(context.Background(), nil, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) _, vindex = waitForVindex(t, ks, "test_hash", vschemaUpdates, executor) @@ -444,7 +547,7 @@ func TestExecutorAddDropVindexDDL(t *testing.T) { _ = waitForColVindexes(t, ks, "test", []string{"test_hash"}, executor) - qr, err = executor.Execute(context.Background(), nil, "TestExecute", session, "show vschema vindexes on TestExecutor.test", nil) + qr, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "show vschema vindexes on TestExecutor.test", nil) require.NoError(t, err) wantqr = &sqltypes.Result{ Fields: buildVarCharFields("Columns", "Name", "Type", "Params", "Owner"), @@ -457,7 +560,7 @@ func TestExecutorAddDropVindexDDL(t *testing.T) { // add another stmt = "alter vschema on test add vindex test_lookup (c1,c2) using lookup with owner=`test`, from=`c1,c2`, table=test_lookup, to=keyspace_id" - _, err = executor.Execute(context.Background(), nil, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) vschema, vindex = waitForVindex(t, ks, "test_lookup", vschemaUpdates, executor) @@ -474,7 +577,7 @@ func TestExecutorAddDropVindexDDL(t *testing.T) { t.Fatalf("table test not defined in vschema") } - qr, err = executor.Execute(context.Background(), nil, "TestExecute", session, "show vschema vindexes on TestExecutor.test", nil) + qr, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "show vschema vindexes on TestExecutor.test", nil) require.NoError(t, err) wantqr = &sqltypes.Result{ Fields: buildVarCharFields("Columns", "Name", "Type", "Params", "Owner"), @@ -486,7 +589,7 @@ func TestExecutorAddDropVindexDDL(t *testing.T) { utils.MustMatch(t, wantqr, qr) stmt = "alter vschema on test add vindex test_hash_id2 (id2) using hash" - _, err = executor.Execute(context.Background(), nil, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) vschema, vindex = waitForVindex(t, ks, "test_hash_id2", vschemaUpdates, executor) @@ -503,7 +606,7 @@ func TestExecutorAddDropVindexDDL(t *testing.T) { t.Fatalf("table test not defined in vschema") } - qr, err = executor.Execute(context.Background(), nil, "TestExecute", session, "show vschema vindexes on TestExecutor.test", nil) + qr, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "show vschema vindexes on TestExecutor.test", nil) require.NoError(t, err) wantqr = &sqltypes.Result{ Fields: buildVarCharFields("Columns", "Name", "Type", "Params", "Owner"), @@ -517,13 +620,13 @@ func TestExecutorAddDropVindexDDL(t *testing.T) { // drop one stmt = "alter vschema on test drop vindex test_lookup" - _, err = executor.Execute(context.Background(), nil, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) // wait for up to 50ms for it to disappear deadline := time.Now().Add(50 * time.Millisecond) for { - qr, err = executor.Execute(context.Background(), nil, "TestExecute", session, "show vschema vindexes on TestExecutor.test", nil) + qr, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "show vschema vindexes on TestExecutor.test", nil) require.NoError(t, err) wantqr = &sqltypes.Result{ Fields: buildVarCharFields("Columns", "Name", "Type", "Params", "Owner"), @@ -544,7 +647,7 @@ func TestExecutorAddDropVindexDDL(t *testing.T) { // use the newly created vindex on a new table stmt = "alter vschema on test2 add vindex test_hash (id)" - _, err = executor.Execute(context.Background(), nil, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) vschema, vindex = waitForVindex(t, ks, "test_hash", vschemaUpdates, executor) @@ -557,7 +660,7 @@ func TestExecutorAddDropVindexDDL(t *testing.T) { // create an identical vindex definition on a different table stmt = "alter vschema on test2 add vindex test_lookup (c1,c2) using lookup with owner=`test`, from=`c1,c2`, table=test_lookup, to=keyspace_id" - _, err = executor.Execute(context.Background(), nil, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) vschema, vindex = waitForVindex(t, ks, "test_lookup", vschemaUpdates, executor) @@ -568,7 +671,7 @@ func TestExecutorAddDropVindexDDL(t *testing.T) { require.Len(t, table.ColumnVindexes, 2) require.Equal(t, "test_lookup", table.ColumnVindexes[1].Name) - qr, err = executor.Execute(context.Background(), nil, "TestExecute", session, "show vschema vindexes on TestExecutor.test2", nil) + qr, err = executor.Execute(ctx, nil, nil, "TestExecute", session, "show vschema vindexes on TestExecutor.test2", nil) require.NoError(t, err) wantqr = &sqltypes.Result{ Fields: buildVarCharFields("Columns", "Name", "Type", "Params", "Owner"), @@ -581,7 +684,7 @@ func TestExecutorAddDropVindexDDL(t *testing.T) { // now make sure we can create another vindex that references a table with dashes (i.e. escaping is necessary) stmt = "alter vschema on test2 add vindex test_lookup_fqn(c1,c2) using consistent_lookup_unique with owner=`test`, from=`c1,c2`, table=`test-keyspace`.`lookup-fqn`, to=`keyspace_id`" - _, err = executor.Execute(context.Background(), nil, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) _, vindex = waitForVindex(t, ks, "test_lookup_fqn", vschemaUpdates, executor) @@ -592,35 +695,36 @@ func TestExecutorAddDropVindexDDL(t *testing.T) { require.Equal(t, "keyspace_id", vindex.Params["to"]) stmt = "alter vschema on test2 add vindex nonexistent (c1,c2)" - _, err = executor.Execute(context.Background(), nil, "TestExecute", session, stmt, nil) + + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) require.EqualError(t, err, "vindex nonexistent does not exist in keyspace TestExecutor") stmt = "alter vschema on test2 add vindex test_hash (c1,c2) using lookup" - _, err = executor.Execute(context.Background(), nil, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) require.EqualError(t, err, "vindex test_hash defined with type hash not lookup") stmt = "alter vschema on test2 add vindex test_lookup (c1,c2) using lookup with owner=xyz" - _, err = executor.Execute(context.Background(), nil, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) require.EqualError(t, err, "vindex test_lookup defined with owner test not xyz") stmt = "alter vschema on test2 add vindex test_lookup (c1,c2) using lookup with owner=`test`, foo=bar" - _, err = executor.Execute(context.Background(), nil, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) require.EqualError(t, err, "vindex test_lookup defined with different parameters") stmt = "alter vschema on nonexistent drop vindex test_lookup" - _, err = executor.Execute(context.Background(), nil, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) require.EqualError(t, err, "table TestExecutor.nonexistent not defined in vschema") stmt = "alter vschema on nonexistent drop vindex test_lookup" - _, err = executor.Execute(context.Background(), nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: "InvalidKeyspace"}), stmt, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: "InvalidKeyspace"}), stmt, nil) require.EqualError(t, err, "VT05003: unknown database 'InvalidKeyspace' in vschema") stmt = "alter vschema on nowhere.nohow drop vindex test_lookup" - _, err = executor.Execute(context.Background(), nil, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) require.EqualError(t, err, "VT05003: unknown database 'nowhere' in vschema") stmt = "alter vschema on test drop vindex test_lookup" - _, err = executor.Execute(context.Background(), nil, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, nil, "TestExecute", session, stmt, nil) require.EqualError(t, err, "vindex test_lookup not defined in table TestExecutor.test") // no queries should have gone to any tablets @@ -635,30 +739,30 @@ func TestExecutorAddDropVindexDDL(t *testing.T) { func TestPlanExecutorVindexDDLACL(t *testing.T) { // t.Skip("not yet planned") - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) ks := "TestExecutor" session := NewSafeSession(&vtgatepb.Session{TargetString: ks}) - ctxRedUser := callerid.NewContext(context.Background(), &vtrpcpb.CallerID{}, &querypb.VTGateCallerID{Username: "redUser"}) - ctxBlueUser := callerid.NewContext(context.Background(), &vtrpcpb.CallerID{}, &querypb.VTGateCallerID{Username: "blueUser"}) + ctxRedUser := callerid.NewContext(ctx, &vtrpcpb.CallerID{}, &querypb.VTGateCallerID{Username: "redUser"}) + ctxBlueUser := callerid.NewContext(ctx, &vtrpcpb.CallerID{}, &querypb.VTGateCallerID{Username: "blueUser"}) // test that by default no users can perform the operation stmt := "alter vschema create vindex test_hash using hash" - _, err := executor.Execute(ctxRedUser, nil, "TestExecute", session, stmt, nil) + _, err := executor.Execute(ctxRedUser, nil, nil, "TestExecute", session, stmt, nil) require.EqualError(t, err, `User 'redUser' is not authorized to perform vschema operations`) - _, err = executor.Execute(ctxBlueUser, nil, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctxBlueUser, nil, nil, "TestExecute", session, stmt, nil) require.EqualError(t, err, `User 'blueUser' is not authorized to perform vschema operations`) // test when all users are enabled vschemaacl.AuthorizedDDLUsers = "%" vschemaacl.Init() - _, err = executor.Execute(ctxRedUser, nil, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctxRedUser, nil, nil, "TestExecute", session, stmt, nil) if err != nil { t.Errorf("unexpected error '%v'", err) } stmt = "alter vschema create vindex test_hash2 using hash" - _, err = executor.Execute(ctxBlueUser, nil, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctxBlueUser, nil, nil, "TestExecute", session, stmt, nil) if err != nil { t.Errorf("unexpected error '%v'", err) } @@ -666,11 +770,11 @@ func TestPlanExecutorVindexDDLACL(t *testing.T) { // test when only one user is enabled vschemaacl.AuthorizedDDLUsers = "orangeUser, blueUser, greenUser" vschemaacl.Init() - _, err = executor.Execute(ctxRedUser, nil, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctxRedUser, nil, nil, "TestExecute", session, stmt, nil) require.EqualError(t, err, `User 'redUser' is not authorized to perform vschema operations`) stmt = "alter vschema create vindex test_hash3 using hash" - _, err = executor.Execute(ctxBlueUser, nil, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctxBlueUser, nil, nil, "TestExecute", session, stmt, nil) if err != nil { t.Errorf("unexpected error '%v'", err) } diff --git a/go/vt/vtgate/executor_vstream_test.go b/go/vt/vtgate/executor_vstream_test.go index 876961bf4ed..1f3294c1eca 100644 --- a/go/vt/vtgate/executor_vstream_test.go +++ b/go/vt/vtgate/executor_vstream_test.go @@ -28,8 +28,6 @@ import ( vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" - "context" - "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" @@ -38,10 +36,10 @@ import ( // TestVStreamSQLUnsharded tests the experimental 'vstream * from' vtgate olap query func TestVStreamSQLUnsharded(t *testing.T) { - t.Skip("this test is failing due to races") //FIXME - executor, _, _, sbcLookup := createExecutorEnv() - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + t.Skip("this test is failing due to races") // FIXME + executor, _, _, sbcLookup, ctx := createExecutorEnv(t) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) send1 := []*binlogdatapb.VEvent{ {Type: binlogdatapb.VEventType_GTID, Gtid: "gtid01"}, {Type: binlogdatapb.VEventType_FIELD, FieldEvent: &binlogdatapb.FieldEvent{TableName: "t1", Fields: []*querypb.Field{ @@ -77,21 +75,11 @@ func TestVStreamSQLUnsharded(t *testing.T) { sql := "vstream * from t1" results := make(chan *sqltypes.Result, 20) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() go func() { - err := executor.StreamExecute( - ctx, - nil, - "TestExecuteStream", - NewAutocommitSession(&vtgatepb.Session{TargetString: KsTestUnsharded}), - sql, - nil, - func(qr *sqltypes.Result) error { - results <- qr - return nil - }, - ) + err := executor.StreamExecute(ctx, nil, nil, "TestExecuteStream", NewAutocommitSession(&vtgatepb.Session{TargetString: KsTestUnsharded}), sql, nil, func(qr *sqltypes.Result) error { + results <- qr + return nil + }) require.NoError(t, err) }() timer := time.NewTimer(5 * time.Second) diff --git a/go/vt/vtgate/grpcvtgateconn/suite_test.go b/go/vt/vtgate/grpcvtgateconn/suite_test.go index 7cfbdc0429e..6e01f4e25e0 100644 --- a/go/vt/vtgate/grpcvtgateconn/suite_test.go +++ b/go/vt/vtgate/grpcvtgateconn/suite_test.go @@ -22,6 +22,7 @@ package grpcvtgateconn // moved back to its own package for reusability. import ( + "context" "errors" "fmt" "io" @@ -32,22 +33,19 @@ import ( "google.golang.org/protobuf/proto" - "context" - "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/tb" "vitess.io/vitess/go/vt/callerid" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/vtgateconn" - "vitess.io/vitess/go/vt/vtgate/vtgateservice" - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/vtgateconn" + "vitess.io/vitess/go/vt/vtgate/vtgateservice" ) // fakeVTGateService has the server side of this fake @@ -97,7 +95,7 @@ func (q *queryExecute) equal(q2 *queryExecute) bool { } // Execute is part of the VTGateService interface -func (f *fakeVTGateService) Execute(ctx context.Context, c *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) { +func (f *fakeVTGateService) Execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, c *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) { if f.hasError { return session, nil, errTestVtGateError } @@ -158,7 +156,7 @@ func (f *fakeVTGateService) ExecuteBatch(ctx context.Context, c *mysql.Conn, ses } // StreamExecute is part of the VTGateService interface -func (f *fakeVTGateService) StreamExecute(ctx context.Context, c *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) { +func (f *fakeVTGateService) StreamExecute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, c *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) { if f.panics { panic(fmt.Errorf("test forced panic")) } @@ -506,9 +504,9 @@ func testPrepare(t *testing.T, session *vtgateconn.VTGateSession) { execCase := execMap["request1"] _, err := session.Prepare(ctx, execCase.execQuery.SQL, execCase.execQuery.BindVariables) require.NoError(t, err) - //if !qr.Equal(execCase.result) { + // if !qr.Equal(execCase.result) { // t.Errorf("Unexpected result from Execute: got\n%#v want\n%#v", qr, execCase.result) - //} + // } _, err = session.Prepare(ctx, "none", nil) require.EqualError(t, err, "no match for: none") diff --git a/go/vt/vtgate/grpcvtgateservice/server.go b/go/vt/vtgate/grpcvtgateservice/server.go index 4797a9f5652..c97ea63a903 100644 --- a/go/vt/vtgate/grpcvtgateservice/server.go +++ b/go/vt/vtgate/grpcvtgateservice/server.go @@ -48,12 +48,15 @@ var ( useEffective bool useEffectiveGroups bool useStaticAuthenticationIdentity bool + + sendSessionInStreaming bool ) func registerFlags(fs *pflag.FlagSet) { fs.BoolVar(&useEffective, "grpc_use_effective_callerid", false, "If set, and SSL is not used, will set the immediate caller id from the effective caller id's principal.") fs.BoolVar(&useEffectiveGroups, "grpc-use-effective-groups", false, "If set, and SSL is not used, will set the immediate caller's security groups from the effective caller id's groups.") fs.BoolVar(&useStaticAuthenticationIdentity, "grpc-use-static-authentication-callerid", false, "If set, will set the immediate caller id to the username authenticated by the static auth plugin.") + fs.BoolVar(&sendSessionInStreaming, "grpc-send-session-in-streaming", false, "If set, will send the session as last packet in streaming api to support transactions in streaming") } func init() { @@ -142,7 +145,7 @@ func (vtg *VTGate) Execute(ctx context.Context, request *vtgatepb.ExecuteRequest if session == nil { session = &vtgatepb.Session{Autocommit: true} } - session, result, err := vtg.server.Execute(ctx, nil, session, request.Query.Sql, request.Query.BindVariables) + session, result, err := vtg.server.Execute(ctx, nil, nil, session, request.Query.Sql, request.Query.BindVariables) return &vtgatepb.ExecuteResponse{ Result: sqltypes.ResultToProto3(result), Session: session, @@ -184,7 +187,7 @@ func (vtg *VTGate) StreamExecute(request *vtgatepb.StreamExecuteRequest, stream session = &vtgatepb.Session{Autocommit: true} } - session, vtgErr := vtg.server.StreamExecute(ctx, nil, session, request.Query.Sql, request.Query.BindVariables, func(value *sqltypes.Result) error { + session, vtgErr := vtg.server.StreamExecute(ctx, nil, nil, session, request.Query.Sql, request.Query.BindVariables, func(value *sqltypes.Result) error { // Send is not safe to call concurrently, but vtgate // guarantees that it's not. return stream.Send(&vtgatepb.StreamExecuteResponse{ @@ -192,19 +195,22 @@ func (vtg *VTGate) StreamExecute(request *vtgatepb.StreamExecuteRequest, stream }) }) - // even if there is an error, session could have been modified. - // So, this needs to be sent back to the client. Session is sent in the last stream response. - lastErr := stream.Send(&vtgatepb.StreamExecuteResponse{ - Session: session, - }) - var errs []error if vtgErr != nil { errs = append(errs, vtgErr) } - if lastErr != nil { - errs = append(errs, lastErr) + + if sendSessionInStreaming { + // even if there is an error, session could have been modified. + // So, this needs to be sent back to the client. Session is sent in the last stream response. + lastErr := stream.Send(&vtgatepb.StreamExecuteResponse{ + Session: session, + }) + if lastErr != nil { + errs = append(errs, lastErr) + } } + return vterrors.ToGRPC(vterrors.Aggregate(errs)) } diff --git a/go/vt/vtgate/legacy_scatter_conn_test.go b/go/vt/vtgate/legacy_scatter_conn_test.go index ee5e9a2f5f6..7ada1c3ac31 100644 --- a/go/vt/vtgate/legacy_scatter_conn_test.go +++ b/go/vt/vtgate/legacy_scatter_conn_test.go @@ -44,10 +44,11 @@ import ( // This file uses the sandbox_test framework. func TestLegacyExecuteFailOnAutocommit(t *testing.T) { + ctx := utils.LeakCheckContext(t) createSandbox("TestExecuteFailOnAutocommit") hc := discovery.NewFakeHealthCheck(nil) - sc := newTestScatterConn(hc, newSandboxForCells([]string{"aa"}), "aa") + sc := newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") sbc0 := hc.AddTestTablet("aa", "0", 1, "TestExecuteFailOnAutocommit", "0", topodatapb.TabletType_PRIMARY, true, 1, nil) sbc1 := hc.AddTestTablet("aa", "1", 1, "TestExecuteFailOnAutocommit", "1", topodatapb.TabletType_PRIMARY, true, 1, nil) @@ -107,8 +108,8 @@ func TestLegacyExecuteFailOnAutocommit(t *testing.T) { } func TestScatterConnExecuteMulti(t *testing.T) { - testScatterConnGeneric(t, "TestScatterConnExecuteMultiShard", func(sc *ScatterConn, shards []string) (*sqltypes.Result, error) { - res := srvtopo.NewResolver(newSandboxForCells([]string{"aa"}), sc.gateway, "aa") + testScatterConnGeneric(t, "TestScatterConnExecuteMultiShard", func(ctx context.Context, sc *ScatterConn, shards []string) (*sqltypes.Result, error) { + res := srvtopo.NewResolver(newSandboxForCells(ctx, []string{"aa"}), sc.gateway, "aa") rss, err := res.ResolveDestination(ctx, "TestScatterConnExecuteMultiShard", topodatapb.TabletType_REPLICA, key.DestinationShards(shards)) if err != nil { return nil, err @@ -128,8 +129,8 @@ func TestScatterConnExecuteMulti(t *testing.T) { } func TestScatterConnStreamExecuteMulti(t *testing.T) { - testScatterConnGeneric(t, "TestScatterConnStreamExecuteMulti", func(sc *ScatterConn, shards []string) (*sqltypes.Result, error) { - res := srvtopo.NewResolver(newSandboxForCells([]string{"aa"}), sc.gateway, "aa") + testScatterConnGeneric(t, "TestScatterConnStreamExecuteMulti", func(ctx context.Context, sc *ScatterConn, shards []string) (*sqltypes.Result, error) { + res := srvtopo.NewResolver(newSandboxForCells(ctx, []string{"aa"}), sc.gateway, "aa") rss, err := res.ResolveDestination(ctx, "TestScatterConnStreamExecuteMulti", topodatapb.TabletType_REPLICA, key.DestinationShards(shards)) if err != nil { return nil, err @@ -155,13 +156,15 @@ func verifyScatterConnError(t *testing.T, err error, wantErr string, wantCode vt assert.Equal(t, wantCode, vterrors.Code(err)) } -func testScatterConnGeneric(t *testing.T, name string, f func(sc *ScatterConn, shards []string) (*sqltypes.Result, error)) { +func testScatterConnGeneric(t *testing.T, name string, f func(ctx context.Context, sc *ScatterConn, shards []string) (*sqltypes.Result, error)) { + ctx := utils.LeakCheckContext(t) + hc := discovery.NewFakeHealthCheck(nil) // no shard s := createSandbox(name) - sc := newTestScatterConn(hc, newSandboxForCells([]string{"aa"}), "aa") - qr, err := f(sc, nil) + sc := newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") + qr, err := f(ctx, sc, nil) require.NoError(t, err) if qr.RowsAffected != 0 { t.Errorf("want 0, got %v", qr.RowsAffected) @@ -169,10 +172,10 @@ func testScatterConnGeneric(t *testing.T, name string, f func(sc *ScatterConn, s // single shard s.Reset() - sc = newTestScatterConn(hc, newSandboxForCells([]string{"aa"}), "aa") + sc = newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") sbc := hc.AddTestTablet("aa", "0", 1, name, "0", topodatapb.TabletType_REPLICA, true, 1, nil) sbc.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 - _, err = f(sc, []string{"0"}) + _, err = f(ctx, sc, []string{"0"}) want := fmt.Sprintf("target: %v.0.replica: INVALID_ARGUMENT error", name) // Verify server error string. if err == nil || err.Error() != want { @@ -186,12 +189,12 @@ func testScatterConnGeneric(t *testing.T, name string, f func(sc *ScatterConn, s // two shards s.Reset() hc.Reset() - sc = newTestScatterConn(hc, newSandboxForCells([]string{"aa"}), "aa") + sc = newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") sbc0 := hc.AddTestTablet("aa", "0", 1, name, "0", topodatapb.TabletType_REPLICA, true, 1, nil) sbc1 := hc.AddTestTablet("aa", "1", 1, name, "1", topodatapb.TabletType_REPLICA, true, 1, nil) sbc0.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 sbc1.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 - _, err = f(sc, []string{"0", "1"}) + _, err = f(ctx, sc, []string{"0", "1"}) // Verify server errors are consolidated. want = fmt.Sprintf("target: %v.0.replica: INVALID_ARGUMENT error\ntarget: %v.1.replica: INVALID_ARGUMENT error", name, name) verifyScatterConnError(t, err, want, vtrpcpb.Code_INVALID_ARGUMENT) @@ -206,12 +209,12 @@ func testScatterConnGeneric(t *testing.T, name string, f func(sc *ScatterConn, s // two shards with different errors s.Reset() hc.Reset() - sc = newTestScatterConn(hc, newSandboxForCells([]string{"aa"}), "aa") + sc = newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") sbc0 = hc.AddTestTablet("aa", "0", 1, name, "0", topodatapb.TabletType_REPLICA, true, 1, nil) sbc1 = hc.AddTestTablet("aa", "1", 1, name, "1", topodatapb.TabletType_REPLICA, true, 1, nil) sbc0.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 sbc1.MustFailCodes[vtrpcpb.Code_RESOURCE_EXHAUSTED] = 1 - _, err = f(sc, []string{"0", "1"}) + _, err = f(ctx, sc, []string{"0", "1"}) // Verify server errors are consolidated. want = fmt.Sprintf("target: %v.0.replica: INVALID_ARGUMENT error\ntarget: %v.1.replica: RESOURCE_EXHAUSTED error", name, name) // We should only surface the higher priority error code @@ -227,9 +230,9 @@ func testScatterConnGeneric(t *testing.T, name string, f func(sc *ScatterConn, s // duplicate shards s.Reset() hc.Reset() - sc = newTestScatterConn(hc, newSandboxForCells([]string{"aa"}), "aa") + sc = newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") sbc = hc.AddTestTablet("aa", "0", 1, name, "0", topodatapb.TabletType_REPLICA, true, 1, nil) - _, _ = f(sc, []string{"0", "0"}) + _, _ = f(ctx, sc, []string{"0", "0"}) // Ensure that we executed only once. if execCount := sbc.ExecCount.Load(); execCount != 1 { t.Errorf("want 1, got %v", execCount) @@ -238,10 +241,10 @@ func testScatterConnGeneric(t *testing.T, name string, f func(sc *ScatterConn, s // no errors s.Reset() hc.Reset() - sc = newTestScatterConn(hc, newSandboxForCells([]string{"aa"}), "aa") + sc = newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") sbc0 = hc.AddTestTablet("aa", "0", 1, name, "0", topodatapb.TabletType_REPLICA, true, 1, nil) sbc1 = hc.AddTestTablet("aa", "1", 1, name, "1", topodatapb.TabletType_REPLICA, true, 1, nil) - qr, err = f(sc, []string{"0", "1"}) + qr, err = f(ctx, sc, []string{"0", "1"}) if err != nil { t.Fatalf("want nil, got %v", err) } @@ -260,17 +263,19 @@ func testScatterConnGeneric(t *testing.T, name string, f func(sc *ScatterConn, s } func TestMaxMemoryRows(t *testing.T) { + ctx := utils.LeakCheckContext(t) + save := maxMemoryRows maxMemoryRows = 3 defer func() { maxMemoryRows = save }() createSandbox("TestMaxMemoryRows") hc := discovery.NewFakeHealthCheck(nil) - sc := newTestScatterConn(hc, newSandboxForCells([]string{"aa"}), "aa") + sc := newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") sbc0 := hc.AddTestTablet("aa", "0", 1, "TestMaxMemoryRows", "0", topodatapb.TabletType_REPLICA, true, 1, nil) sbc1 := hc.AddTestTablet("aa", "1", 1, "TestMaxMemoryRows", "1", topodatapb.TabletType_REPLICA, true, 1, nil) - res := srvtopo.NewResolver(newSandboxForCells([]string{"aa"}), sc.gateway, "aa") + res := srvtopo.NewResolver(newSandboxForCells(ctx, []string{"aa"}), sc.gateway, "aa") rss, _, err := res.ResolveDestinations(ctx, "TestMaxMemoryRows", topodatapb.TabletType_REPLICA, nil, []key.Destination{key.DestinationShard("0"), key.DestinationShard("1")}) require.NoError(t, err) @@ -315,12 +320,13 @@ func TestMaxMemoryRows(t *testing.T) { } func TestLegaceHealthCheckFailsOnReservedConnections(t *testing.T) { + ctx := utils.LeakCheckContext(t) keyspace := "keyspace" createSandbox(keyspace) hc := discovery.NewFakeHealthCheck(nil) - sc := newTestScatterConn(hc, newSandboxForCells([]string{"aa"}), "aa") + sc := newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") - res := srvtopo.NewResolver(newSandboxForCells([]string{"aa"}), sc.gateway, "aa") + res := srvtopo.NewResolver(newSandboxForCells(ctx, []string{"aa"}), sc.gateway, "aa") session := NewSafeSession(&vtgatepb.Session{InTransaction: false, InReservedConn: true}) destinations := []key.Destination{key.DestinationShard("0")} @@ -340,12 +346,12 @@ func TestLegaceHealthCheckFailsOnReservedConnections(t *testing.T) { require.Error(t, vterrors.Aggregate(errs)) } -func executeOnShards(t *testing.T, res *srvtopo.Resolver, keyspace string, sc *ScatterConn, session *SafeSession, destinations []key.Destination) { +func executeOnShards(t *testing.T, ctx context.Context, res *srvtopo.Resolver, keyspace string, sc *ScatterConn, session *SafeSession, destinations []key.Destination) { t.Helper() - require.Empty(t, executeOnShardsReturnsErr(t, res, keyspace, sc, session, destinations)) + require.Empty(t, executeOnShardsReturnsErr(t, ctx, res, keyspace, sc, session, destinations)) } -func executeOnShardsReturnsErr(t *testing.T, res *srvtopo.Resolver, keyspace string, sc *ScatterConn, session *SafeSession, destinations []key.Destination) error { +func executeOnShardsReturnsErr(t *testing.T, ctx context.Context, res *srvtopo.Resolver, keyspace string, sc *ScatterConn, session *SafeSession, destinations []key.Destination) error { t.Helper() rss, _, err := res.ResolveDestinations(ctx, keyspace, topodatapb.TabletType_REPLICA, nil, destinations) require.NoError(t, err) @@ -364,9 +370,10 @@ func executeOnShardsReturnsErr(t *testing.T, res *srvtopo.Resolver, keyspace str } func TestMultiExecs(t *testing.T) { + ctx := utils.LeakCheckContext(t) createSandbox("TestMultiExecs") hc := discovery.NewFakeHealthCheck(nil) - sc := newTestScatterConn(hc, newSandboxForCells([]string{"aa"}), "aa") + sc := newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") sbc0 := hc.AddTestTablet("aa", "0", 1, "TestMultiExecs", "0", topodatapb.TabletType_REPLICA, true, 1, nil) sbc1 := hc.AddTestTablet("aa", "1", 1, "TestMultiExecs", "1", topodatapb.TabletType_REPLICA, true, 1, nil) @@ -460,15 +467,16 @@ func TestMultiExecs(t *testing.T) { } func TestScatterConnSingleDB(t *testing.T) { + ctx := utils.LeakCheckContext(t) createSandbox("TestScatterConnSingleDB") hc := discovery.NewFakeHealthCheck(nil) hc.Reset() - sc := newTestScatterConn(hc, newSandboxForCells([]string{"aa"}), "aa") + sc := newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") hc.AddTestTablet("aa", "0", 1, "TestScatterConnSingleDB", "0", topodatapb.TabletType_PRIMARY, true, 1, nil) hc.AddTestTablet("aa", "1", 1, "TestScatterConnSingleDB", "1", topodatapb.TabletType_PRIMARY, true, 1, nil) - res := srvtopo.NewResolver(newSandboxForCells([]string{"aa"}), sc.gateway, "aa") + res := srvtopo.NewResolver(newSandboxForCells(ctx, []string{"aa"}), sc.gateway, "aa") rss0, err := res.ResolveDestination(ctx, "TestScatterConnSingleDB", topodatapb.TabletType_PRIMARY, key.DestinationShard("0")) require.NoError(t, err) rss1, err := res.ResolveDestination(ctx, "TestScatterConnSingleDB", topodatapb.TabletType_PRIMARY, key.DestinationShard("1")) @@ -553,10 +561,11 @@ func TestAppendResult(t *testing.T) { } func TestReservePrequeries(t *testing.T) { + ctx := utils.LeakCheckContext(t) keyspace := "keyspace" createSandbox(keyspace) hc := discovery.NewFakeHealthCheck(nil) - sc := newTestScatterConn(hc, newSandboxForCells([]string{"aa"}), "aa") + sc := newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") sbc0 := hc.AddTestTablet("aa", "0", 1, keyspace, "0", topodatapb.TabletType_REPLICA, true, 1, nil) sbc1 := hc.AddTestTablet("aa", "1", 1, keyspace, "1", topodatapb.TabletType_REPLICA, true, 1, nil) @@ -564,7 +573,7 @@ func TestReservePrequeries(t *testing.T) { sbc0.SetResults([]*sqltypes.Result{{}}) sbc1.SetResults([]*sqltypes.Result{{}}) - res := srvtopo.NewResolver(newSandboxForCells([]string{"aa"}), sc.gateway, "aa") + res := srvtopo.NewResolver(newSandboxForCells(ctx, []string{"aa"}), sc.gateway, "aa") session := NewSafeSession(&vtgatepb.Session{ InTransaction: false, @@ -576,11 +585,11 @@ func TestReservePrequeries(t *testing.T) { }) destinations := []key.Destination{key.DestinationShard("0")} - executeOnShards(t, res, keyspace, sc, session, destinations) + executeOnShards(t, ctx, res, keyspace, sc, session, destinations) assert.Equal(t, 1+1, len(sbc0.StringQueries())) } -func newTestScatterConn(hc discovery.HealthCheck, serv srvtopo.Server, cell string) *ScatterConn { +func newTestScatterConn(ctx context.Context, hc discovery.HealthCheck, serv srvtopo.Server, cell string) *ScatterConn { // The topo.Server is used to start watching the cells described // in '-cells_to_watch' command line parameter, which is // empty by default. So it's unused in this test, set to nil. @@ -588,5 +597,3 @@ func newTestScatterConn(hc discovery.HealthCheck, serv srvtopo.Server, cell stri tc := NewTxConn(gw, vtgatepb.TransactionMode_TWOPC) return NewScatterConn("", tc, gw) } - -var ctx = context.Background() diff --git a/go/vt/vtgate/load_data.go b/go/vt/vtgate/load_data.go index e69ac240324..e19345da6ba 100644 --- a/go/vt/vtgate/load_data.go +++ b/go/vt/vtgate/load_data.go @@ -26,7 +26,6 @@ import ( "vitess.io/vitess/go/vt/srvtopo" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vtgate/vindexes" ) @@ -528,8 +527,7 @@ func (l *LoadDataInfo) GetGeneratedID(ctx context.Context, vcursor engine.VCurso } // If no rows are returned, it's an internal error, and the code // must panic, which will be caught and reported. - - minID, err = evalengine.ToInt64(qr.Rows[0][0]) + minID, err = qr.Rows[0][0].ToInt64() if err != nil { return 0, 0, err } diff --git a/go/vt/vtgate/mysql_protocol_test.go b/go/vt/vtgate/mysql_protocol_test.go deleted file mode 100644 index a0189f0c98f..00000000000 --- a/go/vt/vtgate/mysql_protocol_test.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package vtgate - -import ( - "net" - "strconv" - "testing" - - "vitess.io/vitess/go/test/utils" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "context" - - "google.golang.org/protobuf/proto" - - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/vt/vttablet/sandboxconn" - - querypb "vitess.io/vitess/go/vt/proto/query" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" -) - -func testMySQLProtocolExecute(t *testing.T) { - createSandbox(KsTestUnsharded) - hcVTGateTest.Reset() - sbc := hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) - - c, err := mysqlConnect(&mysql.ConnParams{}) - if err != nil { - t.Fatal(err) - } - defer c.Close() - - qr, err := c.ExecuteFetch("select id from t1", 10, true /* wantfields */) - require.NoError(t, err) - utils.MustMatch(t, sandboxconn.SingleRowResult, qr, "mismatch in rows") - - options := &querypb.ExecuteOptions{ - IncludedFields: querypb.ExecuteOptions_ALL, - Workload: querypb.ExecuteOptions_OLTP, - } - if !proto.Equal(sbc.Options[0], options) { - t.Errorf("got ExecuteOptions \n%+v, want \n%+v", sbc.Options[0], options) - } -} - -func testMySQLProtocolStreamExecute(t *testing.T) { - createSandbox(KsTestUnsharded) - hcVTGateTest.Reset() - sbc := hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) - - c, err := mysqlConnect(&mysql.ConnParams{}) - if err != nil { - t.Fatal(err) - } - defer c.Close() - - _, err = c.ExecuteFetch("set workload='olap'", 1, true /* wantfields */) - require.NoError(t, err) - - qr, err := c.ExecuteFetch("select id from t1", 10, true /* wantfields */) - require.NoError(t, err) - utils.MustMatch(t, sandboxconn.SingleRowResult, qr, "mismatch in rows") - - options := &querypb.ExecuteOptions{ - IncludedFields: querypb.ExecuteOptions_ALL, - Workload: querypb.ExecuteOptions_OLAP, - } - if !proto.Equal(sbc.Options[0], options) { - t.Errorf("got ExecuteOptions \n%+v, want \n%+v", sbc.Options[0], options) - } -} - -func TestMySQLProtocolExecuteUseStatement(t *testing.T) { - createSandbox(KsTestUnsharded) - hcVTGateTest.Reset() - hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) - - c, err := mysqlConnect(&mysql.ConnParams{DbName: "@primary"}) - if err != nil { - t.Fatal(err) - } - defer c.Close() - - qr, err := c.ExecuteFetch("select id from t1", 10, true /* wantfields */) - require.NoError(t, err) - utils.MustMatch(t, sandboxconn.SingleRowResult, qr) - - qr, err = c.ExecuteFetch("show vitess_target", 1, false) - require.NoError(t, err) - assert.Equal(t, "VARCHAR(\"@primary\")", qr.Rows[0][0].String()) - - _, err = c.ExecuteFetch("use TestUnsharded", 0, false) - require.NoError(t, err) - - qr, err = c.ExecuteFetch("select id from t1", 10, true /* wantfields */) - require.NoError(t, err) - utils.MustMatch(t, sandboxconn.SingleRowResult, qr) - - // No such keyspace this will fail - _, err = c.ExecuteFetch("use InvalidKeyspace", 0, false) - require.Error(t, err) - assert.Contains(t, err.Error(), "VT05003: unknown database 'InvalidKeyspace' in vschema (errno 1049) (sqlstate 42000)") - - // That doesn't reset the vitess_target - qr, err = c.ExecuteFetch("show vitess_target", 1, false) - require.NoError(t, err) - assert.Equal(t, "VARCHAR(\"TestUnsharded\")", qr.Rows[0][0].String()) - - _, err = c.ExecuteFetch("use @replica", 0, false) - require.NoError(t, err) - - // No replica tablets, this should also fail - _, err = c.ExecuteFetch("select id from t1", 10, true /* wantfields */) - require.Error(t, err) - assert.Contains(t, err.Error(), `no healthy tablet available for 'keyspace:"TestUnsharded" shard:"0" tablet_type:REPLICA`) -} - -func TestMysqlProtocolInvalidDB(t *testing.T) { - _, err := mysqlConnect(&mysql.ConnParams{DbName: "invalidDB"}) - require.EqualError(t, err, "VT05003: unknown database 'invalidDB' in vschema (errno 1049) (sqlstate 42000)") -} - -func testMySQLProtocolClientFoundRows(t *testing.T) { - createSandbox(KsTestUnsharded) - hcVTGateTest.Reset() - sbc := hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) - - c, err := mysqlConnect(&mysql.ConnParams{Flags: mysql.CapabilityClientFoundRows}) - if err != nil { - t.Fatal(err) - } - defer c.Close() - - qr, err := c.ExecuteFetch("select id from t1", 10, true /* wantfields */) - require.NoError(t, err) - utils.MustMatch(t, sandboxconn.SingleRowResult, qr) - - options := &querypb.ExecuteOptions{ - IncludedFields: querypb.ExecuteOptions_ALL, - ClientFoundRows: true, - Workload: querypb.ExecuteOptions_OLTP, - } - - if !proto.Equal(sbc.Options[0], options) { - t.Errorf("got ExecuteOptions \n%+v, want \n%+v", sbc.Options[0], options) - } -} - -// mysqlConnect fills the host & port into params and connects -// to the mysql protocol port. -func mysqlConnect(params *mysql.ConnParams) (*mysql.Conn, error) { - host, port, err := net.SplitHostPort(mysqlListener.Addr().String()) - if err != nil { - return nil, err - } - portnum, _ := strconv.Atoi(port) - params.Host = host - params.Port = portnum - return mysql.Connect(context.Background(), params) -} diff --git a/go/vt/vtgate/plan_execute.go b/go/vt/vtgate/plan_execute.go index a58fc08c897..a1eec9d73ba 100644 --- a/go/vt/vtgate/plan_execute.go +++ b/go/vt/vtgate/plan_execute.go @@ -18,25 +18,48 @@ package vtgate import ( "context" + "fmt" + "strings" "time" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/vt/vtgate/logstats" - "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/logstats" + "vitess.io/vitess/go/vt/vtgate/vtgateservice" ) type planExec func(ctx context.Context, plan *engine.Plan, vc *vcursorImpl, bindVars map[string]*querypb.BindVariable, startTime time.Time) error type txResult func(sqlparser.StatementType, *sqltypes.Result) error +func waitForNewerVSchema(ctx context.Context, e *Executor, lastVSchemaCreated time.Time) bool { + timeout := 30 * time.Second + pollingInterval := 10 * time.Millisecond + waitCtx, cancel := context.WithTimeout(ctx, timeout) + ticker := time.NewTicker(pollingInterval) + defer ticker.Stop() + defer cancel() + for { + select { + case <-waitCtx.Done(): + return false + case <-ticker.C: + if e.VSchema().GetCreated().After(lastVSchemaCreated) { + return true + } + } + } +} + func (e *Executor) newExecute( ctx context.Context, + mysqlCtx vtgateservice.MySQLConnection, safeSession *SafeSession, sql string, bindVars map[string]*querypb.BindVariable, @@ -57,10 +80,6 @@ func (e *Executor) newExecute( } query, comments := sqlparser.SplitMarginComments(sql) - vcursor, err := newVCursorImpl(safeSession, comments, e, logStats, e.vm, e.VSchema(), e.resolver.resolver, e.serv, e.warnShardedOnly, e.pv) - if err != nil { - return err - } // 2: Parse and Validate query stmt, reservedVars, err := parseAndValidateQuery(query) @@ -68,52 +87,95 @@ func (e *Executor) newExecute( return err } - // 3: Create a plan for the query - plan, err := e.getPlan(ctx, vcursor, query, stmt, comments, bindVars, reservedVars, e.normalize, logStats) - execStart := e.logPlanningFinished(logStats, plan) + var lastVSchemaCreated time.Time + vs := e.VSchema() + lastVSchemaCreated = vs.GetCreated() + for try := 0; try < MaxBufferingRetries; try++ { + if try > 0 && !vs.GetCreated().After(lastVSchemaCreated) { + // There is a race due to which the executor's vschema may not have been updated yet. + // Without a wait we fail non-deterministically since the previous vschema will not have the updated routing rules + if waitForNewerVSchema(ctx, e, lastVSchemaCreated) { + vs = e.VSchema() + } + } - if err != nil { - safeSession.ClearWarnings() - return err - } + vcursor, err := newVCursorImpl(safeSession, comments, e, logStats, e.vm, vs, e.resolver.resolver, e.serv, e.warnShardedOnly, e.pv) + if err != nil { + return err + } - if plan.Type != sqlparser.StmtShow { - safeSession.ClearWarnings() - } + // 3: Create a plan for the query + // If we are retrying, it is likely that the routing rules have changed and hence we need to + // replan the query since the target keyspace of the resolved shards may have changed as a + // result of MoveTables. So we cannot reuse the plan from the first try. + // When buffering ends, many queries might be getting planned at the same time. Ideally we + // should be able to reuse plans once the first drained query has been planned. For now, we + // punt on this and choose not to prematurely optimize since it is not clear how much caching + // will help and if it will result in hard-to-track edge cases. + + var plan *engine.Plan + plan, err = e.getPlan(ctx, vcursor, query, stmt, comments, bindVars, reservedVars, e.normalize, logStats) + execStart := e.logPlanningFinished(logStats, plan) + + if err != nil { + safeSession.ClearWarnings() + return err + } - // add any warnings that the planner wants to add - for _, warning := range plan.Warnings { - safeSession.RecordWarning(warning) - } + if plan.Type != sqlparser.StmtShow { + safeSession.ClearWarnings() + } - result, err := e.handleTransactions(ctx, safeSession, plan, logStats, vcursor, stmt) - if err != nil { - return err - } - if result != nil { - return recResult(plan.Type, result) - } + // add any warnings that the planner wants to add + for _, warning := range plan.Warnings { + safeSession.RecordWarning(warning) + } - // 3: Prepare for execution - err = e.addNeededBindVars(vcursor, plan.BindVarNeeds, bindVars, safeSession) - if err != nil { - logStats.Error = err - return err - } + result, err := e.handleTransactions(ctx, mysqlCtx, safeSession, plan, logStats, vcursor, stmt) + if err != nil { + return err + } + if result != nil { + return recResult(plan.Type, result) + } - if plan.Instructions.NeedsTransaction() { - return e.insideTransaction(ctx, safeSession, logStats, - func() error { - return execPlan(ctx, plan, vcursor, bindVars, execStart) - }) - } + // 4: Prepare for execution + err = e.addNeededBindVars(vcursor, plan.BindVarNeeds, bindVars, safeSession) + if err != nil { + logStats.Error = err + return err + } + + // 5: Execute the plan and retry if needed + if plan.Instructions.NeedsTransaction() { + err = e.insideTransaction(ctx, safeSession, logStats, + func() error { + return execPlan(ctx, plan, vcursor, bindVars, execStart) + }) + } else { + err = execPlan(ctx, plan, vcursor, bindVars, execStart) + } + + if err == nil || safeSession.InTransaction() { + return err + } - return execPlan(ctx, plan, vcursor, bindVars, execStart) + rootCause := vterrors.RootCause(err) + if rootCause != nil && strings.Contains(rootCause.Error(), "enforce denied tables") { + log.V(2).Infof("Retry: %d, will retry query %s due to %v", try, query, err) + lastVSchemaCreated = vs.GetCreated() + continue + } + + return err + } + return vterrors.New(vtrpcpb.Code_INTERNAL, fmt.Sprintf("query %s failed after retries: %v ", query, err)) } // handleTransactions deals with transactional queries: begin, commit, rollback and savepoint management func (e *Executor) handleTransactions( ctx context.Context, + mysqlCtx vtgateservice.MySQLConnection, safeSession *SafeSession, plan *engine.Plan, logStats *logstats.LogStats, @@ -150,6 +212,8 @@ func (e *Executor) handleTransactions( return nil, vterrors.NewErrorf(vtrpcpb.Code_NOT_FOUND, vterrors.SPDoesNotExist, "SAVEPOINT does not exist: %s", query) }, vcursor.ignoreMaxMemoryRows) return qr, err + case sqlparser.StmtKill: + return e.handleKill(ctx, mysqlCtx, stmt, logStats) } return nil, nil } @@ -243,25 +307,37 @@ func (e *Executor) rollbackExecIfNeeded(ctx context.Context, c *mysql.Conn, safe // If it fails to rollback to the previous savepoint then, the transaction is forced to be rolled back. func (e *Executor) rollbackPartialExec(ctx context.Context, c *mysql.Conn, safeSession *SafeSession, bindVars map[string]*querypb.BindVariable, logStats *logstats.LogStats) error { var err error + var errMsg strings.Builder + + // If the context got cancelled we still have to revert the partial DML execution. + // We cannot use the parent context here anymore. + if ctx.Err() != nil { + errMsg.WriteString("context canceled: ") + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + } // needs to rollback only once. rQuery := safeSession.rollbackOnPartialExec if rQuery != txRollback { safeSession.SavepointRollback() - _, _, err := e.execute(ctx, safeSession, rQuery, bindVars, logStats, c) + _, _, err = e.execute(ctx, nil, safeSession, rQuery, bindVars, logStats, c) + // If no error, the revert is successful with the savepoint. Notify the reason as error to the client. if err == nil { - return vterrors.New(vtrpcpb.Code_ABORTED, "reverted partial DML execution failure") + errMsg.WriteString("reverted partial DML execution failure") + return vterrors.New(vtrpcpb.Code_ABORTED, errMsg.String()) } // not able to rollback changes of the failed query, so have to abort the complete transaction. } // abort the transaction. _ = e.txConn.Rollback(ctx, safeSession) - var errMsg = "transaction rolled back to reverse changes of partial DML execution" + errMsg.WriteString("transaction rolled back to reverse changes of partial DML execution") if err != nil { - return vterrors.Wrap(err, errMsg) + return vterrors.Wrap(err, errMsg.String()) } - return vterrors.New(vtrpcpb.Code_ABORTED, errMsg) + return vterrors.New(vtrpcpb.Code_ABORTED, errMsg.String()) } func (e *Executor) setLogStats(logStats *logstats.LogStats, plan *engine.Plan, vcursor *vcursorImpl, execStart time.Time, err error, qr *sqltypes.Result) { diff --git a/go/vt/vtgate/planbuilder/aggregation_pushing.go b/go/vt/vtgate/planbuilder/aggregation_pushing.go deleted file mode 100644 index fd5c6d84735..00000000000 --- a/go/vt/vtgate/planbuilder/aggregation_pushing.go +++ /dev/null @@ -1,578 +0,0 @@ -/* -Copyright 2022 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "fmt" - "strconv" - - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" - popcode "vitess.io/vitess/go/vt/vtgate/engine/opcode" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" -) - -// pushAggregation pushes grouping and aggregation as far down in the tree as possible -// the output `outputAggrsOffset` needs a little explaining: this is the offsets for aggregation - remember -// that aggregation can be broken down into multiple expressions that are later combined. -// this is why this output is a slice of slices -func (hp *horizonPlanning) pushAggregation( - ctx *plancontext.PlanningContext, - plan logicalPlan, - grouping []operators.GroupBy, - aggregations []operators.Aggr, - ignoreOutputOrder bool, -) (output logicalPlan, - groupingOffsets []offsets, - outputAggrsOffset [][]offsets, - pushed bool, - err error) { - pushed = true - switch plan := plan.(type) { - case *routeGen4: - output = plan - groupingOffsets, outputAggrsOffset, _, err = pushAggrOnRoute(ctx, plan, aggregations, grouping, ignoreOutputOrder) - return - - case *joinGen4: - output = plan - groupingOffsets, outputAggrsOffset, err = hp.pushAggrOnJoin(ctx, plan, grouping, aggregations) - return - - case *semiJoin: - output = plan - groupingOffsets, outputAggrsOffset, pushed, err = hp.pushAggrOnSemiJoin(ctx, plan, grouping, aggregations, ignoreOutputOrder) - return - - case *simpleProjection: - // we just remove the simpleProjection. We are doing an OA on top anyway, so no need to clean up the output columns - return hp.pushAggregation(ctx, plan.input, grouping, aggregations, ignoreOutputOrder) - - case *limit: - // if we are seeing a limit, it's because we are building on top of a derived table. - output = plan - pushed = false - - for _, grp := range grouping { - offset, wOffset, err := wrapAndPushExpr(ctx, grp.Inner, grp.SimplifiedExpr, plan.input) - if err != nil { - return nil, nil, nil, false, err - } - groupingOffsets = append(groupingOffsets, offsets{ - col: offset, - wsCol: wOffset, - }) - } - - for _, aggr := range aggregations { - var offset int - aggrExpr, ok := aggr.Original.Expr.(sqlparser.AggrFunc) - if !ok { - return nil, nil, nil, false, vterrors.VT13001(fmt.Sprintf("unexpected expression: %v", aggr.Original)) - } - - switch aggrExpr.(type) { - case *sqlparser.CountStar: - offset = 0 - default: - if len(aggrExpr.GetArgs()) != 1 { - return nil, nil, nil, false, vterrors.VT13001(fmt.Sprintf("unexpected expression: %v", aggrExpr)) - } - offset, _, err = pushProjection(ctx, &sqlparser.AliasedExpr{Expr: aggrExpr.GetArg() /*As: expr.As*/}, plan.input, true, true, false) - } - - if err != nil { - return nil, nil, nil, false, err - } - - outputAggrsOffset = append(outputAggrsOffset, []offsets{newOffset(offset)}) - } - - return - default: - err = vterrors.VT12001(fmt.Sprintf("using aggregation on top of a %T plan", plan)) - return - } -} - -func pushAggrOnRoute( - ctx *plancontext.PlanningContext, - plan *routeGen4, - aggregations []operators.Aggr, - grouping []operators.GroupBy, - ignoreOutputOrder bool, -) ( - groupingOffsets []offsets, - vtgateAggregation [][]offsets, - nonAggrOffsets []offsets, - err error, -) { - columnOrderMatters := !ignoreOutputOrder - sel, isSel := plan.Select.(*sqlparser.Select) - if !isSel { - return nil, nil, nil, vterrors.VT12001("plan aggregation on union") - } - - var groupingCols []int - var reorg = passThrough - - if columnOrderMatters { - // During this first run, we push the projections for the normal columns (not the weigh_string ones, that is) - // in the order that the user asked for it - // sortOffsets also returns a reorgFunc, - // that can be used to rearrange the produced outputs to the original order - var it *sortedIterator - var err error - grouping, reorg, it = sortOffsets(grouping, aggregations) - vtgateAggregation, groupingCols, err = pushAggrsAndGroupingInOrder(ctx, plan, it, sel, vtgateAggregation, groupingCols) - if err != nil { - return nil, nil, nil, err - } - } else { - // if we haven't already pushed the aggregations, now is the time - for _, aggregation := range aggregations { - param := addAggregationToSelect(ctx, sel, aggregation) - vtgateAggregation = append(vtgateAggregation, []offsets{param}) - } - } - - groupingOffsets = make([]offsets, 0, len(grouping)) - for idx, expr := range grouping { - sel.AddGroupBy(expr.Inner) - var pos offsets - if ignoreOutputOrder { - // we have not yet pushed anything, so we need to push the expression first - col, _, err := addExpressionToRoute(ctx, plan, &sqlparser.AliasedExpr{Expr: expr.Inner}, true) - if err != nil { - return nil, nil, nil, err - } - pos = newOffset(col) - } else { - pos = newOffset(groupingCols[idx]) - } - - if expr.SimplifiedExpr != nil && ctx.SemTable.NeedsWeightString(expr.Inner) { - wsExpr := weightStringFor(expr.SimplifiedExpr) - wsCol, _, err := addExpressionToRoute(ctx, plan, &sqlparser.AliasedExpr{Expr: wsExpr}, true) - if err != nil { - return nil, nil, nil, err - } - pos.wsCol = wsCol - sel.AddGroupBy(wsExpr) - } - groupingOffsets = append(groupingOffsets, pos) - } - - groupingOffsets, vtgateAggregation = reorg(groupingOffsets, vtgateAggregation) - return groupingOffsets, vtgateAggregation, nil, nil -} - -func pushAggrsAndGroupingInOrder( - ctx *plancontext.PlanningContext, - plan *routeGen4, - it *sortedIterator, - sel *sqlparser.Select, - vtgateAggregation [][]offsets, - groupingCols []int, -) ([][]offsets, []int, error) { - for it.next() { - groupBy, aggregation := it.current() - if aggregation != nil { - param := addAggregationToSelect(ctx, sel, *aggregation) - vtgateAggregation = append(vtgateAggregation, []offsets{param}) - continue - } - if groupBy != nil { - reuseCol := groupBy.InnerIndex == nil - col, _, err := addExpressionToRoute(ctx, plan, groupBy.AsAliasedExpr(), reuseCol) - groupingCols = append(groupingCols, col) - if err != nil { - return nil, nil, err - } - } - } - return vtgateAggregation, groupingCols, nil -} - -// addAggregationToSelect adds the aggregation to the SELECT statement and returns the AggregateParams to be used outside -func addAggregationToSelect(ctx *plancontext.PlanningContext, sel *sqlparser.Select, aggregation operators.Aggr) offsets { - // TODO: removing duplicated aggregation expression should also be done at the join level - for i, expr := range sel.SelectExprs { - aliasedExpr, isAliasedExpr := expr.(*sqlparser.AliasedExpr) - if !isAliasedExpr { - continue - } - if ctx.SemTable.EqualsExpr(aliasedExpr.Expr, aggregation.Original.Expr) { - return newOffset(i) - } - } - - sel.SelectExprs = append(sel.SelectExprs, aggregation.Original) - return newOffset(len(sel.SelectExprs) - 1) -} - -func countStarAggr() *operators.Aggr { - f := &sqlparser.CountStar{} - aggr := operators.NewAggr(popcode.AggregateCountStar, f, &sqlparser.AliasedExpr{Expr: f}, "count(*)") - return &aggr -} - -/* -We push down aggregations using the logic from the paper Orthogonal Optimization of Subqueries and Aggregation, by -Cesar A. Galindo-Legaria and Milind M. Joshi from Microsoft Corp. - -It explains how one can split an aggregation into local aggregates that depend on only one side of the join. -The local aggregates can then be gathered together to produce the global -group by/aggregate query that the user asked for. - -In Vitess, this is particularly useful because it allows us to push aggregation down to the routes, even when -we have to join the results at the vtgate level. Instead of doing all the grouping and aggregation at the -vtgate level, we can offload most of the work to MySQL, and at the vtgate just summarize the results. -*/ -func (hp *horizonPlanning) pushAggrOnJoin( - ctx *plancontext.PlanningContext, - join *joinGen4, - grouping []operators.GroupBy, - aggregations []operators.Aggr, -) ([]offsets, [][]offsets, error) { - // First we separate aggregations according to which side the dependencies are coming from - lhsAggrs, rhsAggrs, err := splitAggregationsToLeftAndRight(ctx, aggregations, join) - if err != nil { - return nil, nil, err - } - - // We need to group by the columns used in the join condition. - // If we don't, the LHS will not be able to return the column, and it can't be used to send down to the RHS - lhsCols, err := hp.createGroupingsForColumns(join.LHSColumns) - if err != nil { - return nil, nil, err - } - - // Here we split the grouping depending on if they should with the LHS or RHS of the query - // This is done by using the semantic table and checking dependencies - lhsGrouping, rhsGrouping, groupingOffsets, err := splitGroupingsToLeftAndRight(ctx, join, grouping, lhsCols) - if err != nil { - return nil, nil, err - } - - // If the rhs has no grouping column then a count(*) will return 0 from the query and will get mapped to the record from left hand side. - // This is an incorrect behaviour as the join condition has not matched, so we add a literal 1 to the select query and also group by on it. - // So that only if join condition matches the records will be mapped and returned. - if len(rhsGrouping) == 0 && len(rhsAggrs) != 0 { - l := sqlparser.NewIntLiteral("1") - aExpr := &sqlparser.AliasedExpr{ - Expr: l, - } - offset, _, err := pushProjection(ctx, aExpr, join.Right, true, true, false) - if err != nil { - return nil, nil, err - } - l = sqlparser.NewIntLiteral(strconv.Itoa(offset + 1)) - rhsGrouping = append(rhsGrouping, operators.NewGroupBy(l, nil, nil)) - } - - // Next we push the aggregations to both sides - newLHS, lhsOffsets, lhsAggrOffsets, _, err := hp.filteredPushAggregation(ctx, join.Left, lhsGrouping, lhsAggrs, true) - if err != nil { - return nil, nil, err - } - - newRHS, rhsOffsets, rhsAggrOffsets, _, err := hp.filteredPushAggregation(ctx, join.Right, rhsGrouping, rhsAggrs, true) - if err != nil { - return nil, nil, err - } - join.Left, join.Right = newLHS, newRHS - - // Next, we have to pass through the grouping values through the join and the projection we add on top - // We added new groupings to the LHS because of the join condition, so we don't want to pass through everything, - // just the groupings that are used by operators on top of this current one - wsOutputGrpOffset := len(groupingOffsets) + len(join.Cols) - outputGroupings := make([]offsets, 0, len(groupingOffsets)) - var wsOffsets []int - for _, groupBy := range groupingOffsets { - var offset offsets - var f func(i int) int - if groupBy < 0 { - offset = lhsOffsets[-groupBy-1] - f = func(i int) int { return -(i + 1) } - } else { - offset = rhsOffsets[groupBy-1] - f = func(i int) int { return i + 1 } - } - outputGrouping := newOffset(len(join.Cols)) - join.Cols = append(join.Cols, f(offset.col)) - if offset.wsCol > -1 { - // we add the weight_string calls at the end of the join columns - outputGrouping.wsCol = wsOutputGrpOffset + len(wsOffsets) - wsOffsets = append(wsOffsets, f(offset.wsCol)) - } - outputGroupings = append(outputGroupings, outputGrouping) - } - join.Cols = append(join.Cols, wsOffsets...) - - outputAggrOffsets := make([][]offsets, 0, len(aggregations)) - for idx := range aggregations { - l, r := lhsAggrOffsets[idx], rhsAggrOffsets[idx] - var offSlice []offsets - for _, off := range l { - offSlice = append(offSlice, newOffset(len(join.Cols))) - join.Cols = append(join.Cols, -(off.col + 1)) - } - for _, off := range r { - offSlice = append(offSlice, newOffset(len(join.Cols))) - join.Cols = append(join.Cols, off.col+1) - } - outputAggrOffsets = append(outputAggrOffsets, offSlice) - } - return outputGroupings, outputAggrOffsets, err -} - -/* -pushAggrOnSemiJoin works similarly to pushAggrOnJoin, but it's simpler, because we don't get any inputs from the RHS, -so there are no aggregations or groupings that have to be sent to the RHS - -We do however need to add the columns used in the subquery coming from the LHS to the grouping. -That way we get the aggregation grouped by the column we need to use to decide if the row should -*/ -func (hp *horizonPlanning) pushAggrOnSemiJoin( - ctx *plancontext.PlanningContext, - join *semiJoin, - grouping []operators.GroupBy, - aggregations []operators.Aggr, - ignoreOutputOrder bool, -) ([]offsets, [][]offsets, bool, error) { - // We need to group by the columns used in the join condition. - // If we don't, the LHS will not be able to return the column, and it can't be used to send down to the RHS - lhsCols, err := hp.createGroupingsForColumns(join.LHSColumns) - if err != nil { - return nil, nil, false, err - } - - totalGrouping := append(grouping, lhsCols...) - newLeft, groupingOffsets, aggrParams, pushed, err := hp.pushAggregation(ctx, join.lhs, totalGrouping, aggregations, ignoreOutputOrder) - if err != nil { - return nil, nil, false, err - } - join.lhs = newLeft - - outputGroupings := make([]offsets, 0, len(grouping)) - for idx := range grouping { - outputGroupings = append(outputGroupings, groupingOffsets[idx]) - } - - return outputGroupings, aggrParams, pushed, nil -} - -// this method takes a slice of aggregations that can have missing spots in the form of `nil`, -// and pushes the non-empty values down. -// during aggregation planning, it's important to know which of -// the incoming aggregations correspond to what is sent to the LHS and RHS. -// Some aggregations only need to be sent to one of the sides of the join, and in that case, -// the other side will have a nil in this offset of the aggregations -func (hp *horizonPlanning) filteredPushAggregation( - ctx *plancontext.PlanningContext, - plan logicalPlan, - grouping []operators.GroupBy, - aggregations []*operators.Aggr, - ignoreOutputOrder bool, -) (out logicalPlan, groupingOffsets []offsets, outputAggrs [][]offsets, pushed bool, err error) { - used := make([]bool, len(aggregations)) - var aggrs []operators.Aggr - - for idx, aggr := range aggregations { - if aggr != nil { - used[idx] = true - aggrs = append(aggrs, *aggr) - } - } - newplan, groupingOffsets, pushedAggrs, pushed, err := hp.pushAggregation(ctx, plan, grouping, aggrs, ignoreOutputOrder) - if err != nil { - return nil, nil, nil, pushed, err - } - idx := 0 - for _, b := range used { - if !b { - outputAggrs = append(outputAggrs, nil) - continue - } - outputAggrs = append(outputAggrs, pushedAggrs[idx]) - idx++ - } - return newplan, groupingOffsets, outputAggrs, pushed, nil -} - -func isMinOrMax(in popcode.AggregateOpcode) bool { - switch in { - case popcode.AggregateMin, popcode.AggregateMax: - return true - default: - return false - } -} - -func isRandom(in popcode.AggregateOpcode) bool { - return in == popcode.AggregateRandom -} - -func splitAggregationsToLeftAndRight( - ctx *plancontext.PlanningContext, - aggregations []operators.Aggr, - join *joinGen4, -) ([]*operators.Aggr, []*operators.Aggr, error) { - var lhsAggrs, rhsAggrs []*operators.Aggr - for _, aggr := range aggregations { - newAggr := aggr - if _, ok := aggr.Original.Expr.(*sqlparser.CountStar); ok { - lhsAggrs = append(lhsAggrs, &newAggr) - rhsAggrs = append(rhsAggrs, &newAggr) - } else { - deps := ctx.SemTable.RecursiveDeps(aggr.Original.Expr) - var other *operators.Aggr - // if we are sending down min/max/random, we don't have to multiply the results with anything - if !isMinOrMax(aggr.OpCode) && !isRandom(aggr.OpCode) { - other = countStarAggr() - } - switch { - case deps.IsSolvedBy(join.Left.ContainsTables()): - lhsAggrs = append(lhsAggrs, &newAggr) - rhsAggrs = append(rhsAggrs, other) - case deps.IsSolvedBy(join.Right.ContainsTables()): - rhsAggrs = append(rhsAggrs, &newAggr) - lhsAggrs = append(lhsAggrs, other) - default: - return nil, nil, vterrors.VT12001("aggregation on columns from different sources") - } - } - } - return lhsAggrs, rhsAggrs, nil -} - -func splitGroupingsToLeftAndRight( - ctx *plancontext.PlanningContext, - join *joinGen4, - grouping, lhsGrouping []operators.GroupBy, -) ([]operators.GroupBy, []operators.GroupBy, []int, error) { - var rhsGrouping []operators.GroupBy - - lhsTS := join.Left.ContainsTables() - rhsTS := join.Right.ContainsTables() - // here we store information about which side the grouping value is coming from. - // Negative values from the left operator and positive values are offsets into the RHS - var groupingOffsets []int - for _, groupBy := range grouping { - deps := ctx.SemTable.RecursiveDeps(groupBy.Inner) - switch { - case deps.IsSolvedBy(lhsTS): - groupingOffsets = append(groupingOffsets, -(len(lhsGrouping) + 1)) - lhsGrouping = append(lhsGrouping, groupBy) - case deps.IsSolvedBy(rhsTS): - groupingOffsets = append(groupingOffsets, len(rhsGrouping)+1) - rhsGrouping = append(rhsGrouping, groupBy) - default: - return nil, nil, nil, vterrors.VT12001("grouping on columns from different sources") - } - } - return lhsGrouping, rhsGrouping, groupingOffsets, nil -} - -type ( - reorgFunc = func(groupByOffsets []offsets, aggrOffsets [][]offsets) ([]offsets, [][]offsets) - sortedIterator struct { - grouping []operators.GroupBy - aggregations []operators.Aggr - valueGB *operators.GroupBy - valueA *operators.Aggr - groupbyIdx int - aggrIdx int - } -) - -func (it *sortedIterator) current() (*operators.GroupBy, *operators.Aggr) { - return it.valueGB, it.valueA -} - -func (it *sortedIterator) next() bool { - if it.aggrIdx < len(it.aggregations) && it.groupbyIdx < len(it.grouping) { - aggregation := it.aggregations[it.aggrIdx] - groupBy := it.grouping[it.groupbyIdx] - if operators.CompareRefInt(aggregation.Index, groupBy.InnerIndex) { - it.aggrIdx++ - it.valueA, it.valueGB = &aggregation, nil - return true - } - it.groupbyIdx++ - it.valueA, it.valueGB = nil, &groupBy - return true - } - - if it.groupbyIdx < len(it.grouping) { - groupBy := it.grouping[it.groupbyIdx] - it.groupbyIdx++ - it.valueA, it.valueGB = nil, &groupBy - return true - } - if it.aggrIdx < len(it.aggregations) { - aggregation := it.aggregations[it.aggrIdx] - it.aggrIdx++ - it.valueA, it.valueGB = &aggregation, nil - return true - } - return false -} - -func passThrough(groupByOffsets []offsets, aggrOffsets [][]offsets) ([]offsets, [][]offsets) { - return groupByOffsets, aggrOffsets -} - -func sortOffsets(grouping []operators.GroupBy, aggregations []operators.Aggr) ([]operators.GroupBy, reorgFunc, *sortedIterator) { - originalGrouping := make([]operators.GroupBy, len(grouping)) - originalAggr := make([]operators.Aggr, len(aggregations)) - copy(originalAggr, aggregations) - copy(originalGrouping, grouping) - operators.SortAggregations(aggregations) - operators.SortGrouping(grouping) - - reorg := func(groupByOffsets []offsets, aggrOffsets [][]offsets) ([]offsets, [][]offsets) { - orderedGroupingOffsets := make([]offsets, 0, len(originalGrouping)) - for _, og := range originalGrouping { - for i, g := range grouping { - if og.Inner == g.Inner { - orderedGroupingOffsets = append(orderedGroupingOffsets, groupByOffsets[i]) - break - } - } - } - - orderedAggrs := make([][]offsets, 0, len(originalAggr)) - for _, og := range originalAggr { - for i, g := range aggregations { - if og.Original.Expr == g.Original.Expr { - orderedAggrs = append(orderedAggrs, aggrOffsets[i]) - break - } - } - } - - return orderedGroupingOffsets, orderedAggrs - } - - return grouping, reorg, &sortedIterator{ - grouping: grouping, - aggregations: aggregations, - } -} diff --git a/go/vt/vtgate/planbuilder/builder.go b/go/vt/vtgate/planbuilder/builder.go index 7bed56965d7..a8ba70c1d42 100644 --- a/go/vt/vtgate/planbuilder/builder.go +++ b/go/vt/vtgate/planbuilder/builder.go @@ -22,41 +22,29 @@ import ( "fmt" "sort" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - - querypb "vitess.io/vitess/go/vt/proto/query" - - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/log" + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/semantics" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/vindexes" ) const ( - // V3 is also the default planner - V3 = querypb.ExecuteOptions_V3 // Gen4 uses the default Gen4 planner, which is the greedy planner Gen4 = querypb.ExecuteOptions_Gen4 // Gen4GreedyOnly uses only the faster greedy planner Gen4GreedyOnly = querypb.ExecuteOptions_Gen4Greedy - // Gen4Left2Right tries to emulate the V3 planner by only joining plans in the order they are listed in the FROM-clause + // Gen4Left2Right joins table in the order they are listed in the FROM-clause Gen4Left2Right = querypb.ExecuteOptions_Gen4Left2Right - // Gen4WithFallback first attempts to use the Gen4 planner, and if that fails, uses the V3 planner instead - Gen4WithFallback = querypb.ExecuteOptions_Gen4WithFallback - // Gen4CompareV3 executes queries on both Gen4 and V3 to compare their results. - Gen4CompareV3 = querypb.ExecuteOptions_Gen4CompareV3 - // V3Insert executes insert query on V3 and others on Gen4. - V3Insert = querypb.ExecuteOptions_V3Insert ) var ( - plannerVersions = []plancontext.PlannerVersion{V3, V3Insert, Gen4, Gen4GreedyOnly, Gen4Left2Right, Gen4WithFallback, Gen4CompareV3} + plannerVersions = []plancontext.PlannerVersion{Gen4, Gen4GreedyOnly, Gen4Left2Right} ) type ( @@ -80,24 +68,6 @@ func singleTable(ks, tbl string) string { return fmt.Sprintf("%s.%s", ks, tbl) } -func tablesFromSemantics(semTable *semantics.SemTable) []string { - tables := make(map[string]any, len(semTable.Tables)) - for _, info := range semTable.Tables { - vindexTable := info.GetVindexTable() - if vindexTable == nil { - continue - } - tables[vindexTable.String()] = nil - } - - names := make([]string, 0, len(tables)) - for tbl := range tables { - names = append(names, tbl) - } - sort.Strings(names) - return names -} - // TestBuilder builds a plan for a query based on the specified vschema. // This method is only used from tests func TestBuilder(query string, vschema plancontext.VSchema, keyspace string) (*engine.Plan, error) { @@ -140,59 +110,20 @@ func BuildFromStmt(ctx context.Context, query string, stmt sqlparser.Statement, return plan, nil } -func getConfiguredPlanner(vschema plancontext.VSchema, v3planner func(string) stmtPlanner, stmt sqlparser.Statement, query string) (stmtPlanner, error) { - planner, ok := getPlannerFromQuery(stmt) - if !ok { +func getConfiguredPlanner(vschema plancontext.VSchema, stmt sqlparser.Statement, query string) (stmtPlanner, error) { + planner, found := getPlannerFromQueryHint(stmt) + if !found { // if the query doesn't specify the planner, we check what the configuration is planner = vschema.Planner() } switch planner { - case Gen4CompareV3: - return gen4CompareV3Planner(query), nil - case Gen4Left2Right, Gen4GreedyOnly: - return gen4Planner(query, planner), nil - case Gen4WithFallback: - fp := &fallbackPlanner{ - primary: gen4Planner(query, Gen4), - fallback: v3planner(query), - } - return fp.plan, nil - case V3Insert: - if _, isInsert := stmt.(*sqlparser.Insert); isInsert { - return v3planner(query), nil - } - return gen4Planner(query, Gen4), nil - case V3: - return v3planner(query), nil + case Gen4Left2Right, Gen4GreedyOnly, Gen4: default: // default is gen4 plan - return gen4Planner(query, Gen4), nil + log.Infof("Using Gen4 planner instead of %s", planner.String()) + planner = Gen4 } -} - -// getPlannerFromQuery chooses the planner to use based on the query -// The default planner can be overridden using /*vt+ PLANNER=gen4 */ -// We will also fall back on the gen4 planner if we encounter outer join, -// since there are known problems with the v3 planner and outer joins -func getPlannerFromQuery(stmt sqlparser.Statement) (version plancontext.PlannerVersion, found bool) { - version, found = getPlannerFromQueryHint(stmt) - if found { - return - } - - _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { - join, ok := node.(*sqlparser.JoinTableExpr) - if ok { - if join.Join == sqlparser.LeftJoinType || join.Join == sqlparser.RightJoinType { - version = querypb.ExecuteOptions_Gen4 - found = true - return false, nil - } - } - return true, nil - }, stmt) - - return + return gen4Planner(query, planner), nil } func getPlannerFromQueryHint(stmt sqlparser.Statement) (plancontext.PlannerVersion, bool) { @@ -219,31 +150,31 @@ func buildRoutePlan(stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVa func createInstructionFor(ctx context.Context, query string, stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, enableOnlineDDL, enableDirectDDL bool) (*planResult, error) { switch stmt := stmt.(type) { case *sqlparser.Select: - configuredPlanner, err := getConfiguredPlanner(vschema, buildSelectPlan, stmt, query) + configuredPlanner, err := getConfiguredPlanner(vschema, stmt, query) if err != nil { return nil, err } return buildRoutePlan(stmt, reservedVars, vschema, configuredPlanner) case *sqlparser.Insert: - configuredPlanner, err := getConfiguredPlanner(vschema, buildInsertPlan, stmt, query) + configuredPlanner, err := getConfiguredPlanner(vschema, stmt, query) if err != nil { return nil, err } return buildRoutePlan(stmt, reservedVars, vschema, configuredPlanner) case *sqlparser.Update: - configuredPlanner, err := getConfiguredPlanner(vschema, buildUpdatePlan, stmt, query) + configuredPlanner, err := getConfiguredPlanner(vschema, stmt, query) if err != nil { return nil, err } return buildRoutePlan(stmt, reservedVars, vschema, configuredPlanner) case *sqlparser.Delete: - configuredPlanner, err := getConfiguredPlanner(vschema, buildDeletePlan, stmt, query) + configuredPlanner, err := getConfiguredPlanner(vschema, stmt, query) if err != nil { return nil, err } return buildRoutePlan(stmt, reservedVars, vschema, configuredPlanner) case *sqlparser.Union: - configuredPlanner, err := getConfiguredPlanner(vschema, buildUnionPlan, stmt, query) + configuredPlanner, err := getConfiguredPlanner(vschema, stmt, query) if err != nil { return nil, err } @@ -251,7 +182,7 @@ func createInstructionFor(ctx context.Context, query string, stmt sqlparser.Stat case sqlparser.DDLStatement: return buildGeneralDDLPlan(ctx, query, stmt, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) case *sqlparser.AlterMigration: - return buildAlterMigrationPlan(query, vschema, enableOnlineDDL) + return buildAlterMigrationPlan(query, stmt, vschema, enableOnlineDDL) case *sqlparser.RevertMigration: return buildRevertMigrationPlan(query, stmt, vschema, enableOnlineDDL) case *sqlparser.ShowMigrationLogs: @@ -268,15 +199,19 @@ func createInstructionFor(ctx context.Context, query string, stmt sqlparser.Stat return buildExplainPlan(ctx, stmt, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) case *sqlparser.VExplainStmt: return buildVExplainPlan(ctx, stmt, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) - case *sqlparser.OtherRead, *sqlparser.OtherAdmin: + case *sqlparser.OtherAdmin: return buildOtherReadAndAdmin(query, vschema) + case *sqlparser.Analyze: + return buildRoutePlan(stmt, reservedVars, vschema, buildAnalyzePlan) case *sqlparser.Set: return buildSetPlan(stmt, vschema) case *sqlparser.Load: return buildLoadPlan(query, vschema) case sqlparser.DBDDLStatement: return buildRoutePlan(stmt, reservedVars, vschema, buildDBDDLPlan) - case *sqlparser.Begin, *sqlparser.Commit, *sqlparser.Rollback, *sqlparser.Savepoint, *sqlparser.SRollback, *sqlparser.Release: + case *sqlparser.Begin, *sqlparser.Commit, *sqlparser.Rollback, + *sqlparser.Savepoint, *sqlparser.SRollback, *sqlparser.Release, + *sqlparser.Kill: // Empty by design. Not executed by a plan return nil, nil case *sqlparser.Show: @@ -308,6 +243,43 @@ func createInstructionFor(ctx context.Context, query string, stmt sqlparser.Stat return nil, vterrors.VT13001(fmt.Sprintf("unexpected statement type: %T", stmt)) } +func buildAnalyzePlan(stmt sqlparser.Statement, _ *sqlparser.ReservedVars, vschema plancontext.VSchema) (*planResult, error) { + analyzeStmt := stmt.(*sqlparser.Analyze) + + var ks *vindexes.Keyspace + var err error + dest := key.Destination(key.DestinationAllShards{}) + + if !analyzeStmt.Table.Qualifier.IsEmpty() && sqlparser.SystemSchema(analyzeStmt.Table.Qualifier.String()) { + ks, err = vschema.AnyKeyspace() + if err != nil { + return nil, err + } + } else { + tbl, _, _, _, destKs, err := vschema.FindTableOrVindex(analyzeStmt.Table) + if err != nil { + return nil, err + } + if tbl == nil { + return nil, vterrors.VT05004(sqlparser.String(analyzeStmt.Table)) + } + + ks = tbl.Keyspace + if destKs != nil { + dest = destKs + } + analyzeStmt.Table.Name = tbl.Name + } + analyzeStmt.Table.Qualifier = sqlparser.NewIdentifierCS("") + + prim := &engine.Send{ + Keyspace: ks, + TargetDestination: dest, + Query: sqlparser.String(analyzeStmt), + } + return newPlanResult(prim, sqlparser.String(analyzeStmt.Table)), nil +} + func buildDBDDLPlan(stmt sqlparser.Statement, _ *sqlparser.ReservedVars, vschema plancontext.VSchema) (*planResult, error) { dbDDLstmt := stmt.(sqlparser.DBDDLStatement) ksName := dbDDLstmt.GetDatabaseName() @@ -509,26 +481,6 @@ func (tc *tableCollector) getTables() []string { return tableNames } -func (tc *tableCollector) addVindexTable(t *vindexes.Table) { - if t == nil { - return - } - ks, tbl := "", t.Name.String() - if t.Keyspace != nil { - ks = t.Keyspace.Name - } - tc.addTable(ks, tbl) -} - -func (tc *tableCollector) addAllTables(tables []string) { - if tc.tables == nil { - tc.tables = map[string]any{} - } - for _, tbl := range tables { - tc.tables[tbl] = nil - } -} - func newFlushStmt(stmt *sqlparser.Flush, tables sqlparser.TableNames) *sqlparser.Flush { return &sqlparser.Flush{ IsLocal: stmt.IsLocal, diff --git a/go/vt/vtgate/planbuilder/bypass.go b/go/vt/vtgate/planbuilder/bypass.go index a5490e2231e..52286816a11 100644 --- a/go/vt/vtgate/planbuilder/bypass.go +++ b/go/vt/vtgate/planbuilder/bypass.go @@ -30,7 +30,6 @@ func buildPlanForBypass(stmt sqlparser.Statement, _ *sqlparser.ReservedVars, vsc if err != nil { return nil, err } - switch dest := vschema.Destination().(type) { case key.DestinationExactKeyRange: if _, ok := stmt.(*sqlparser.Insert); ok { diff --git a/go/vt/vtgate/planbuilder/collations_test.go b/go/vt/vtgate/planbuilder/collations_test.go index 2a7ffebf91c..24fb038b4c2 100644 --- a/go/vt/vtgate/planbuilder/collations_test.go +++ b/go/vt/vtgate/planbuilder/collations_test.go @@ -20,6 +20,8 @@ import ( "fmt" "testing" + "vitess.io/vitess/go/test/vschemawrapper" + "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql/collations" @@ -39,21 +41,21 @@ type collationTestCase struct { } func (tc *collationTestCase) run(t *testing.T) { - vschemaWrapper := &vschemaWrapper{ - v: loadSchema(t, "vschemas/schema.json", false), - sysVarEnabled: true, - version: Gen4, + vschemaWrapper := &vschemawrapper.VSchemaWrapper{ + V: loadSchema(t, "vschemas/schema.json", false), + SysVarEnabled: true, + Version: Gen4, } tc.addCollationsToSchema(vschemaWrapper) - plan, err := TestBuilder(tc.query, vschemaWrapper, vschemaWrapper.currentDb()) + plan, err := TestBuilder(tc.query, vschemaWrapper, vschemaWrapper.CurrentDb()) require.NoError(t, err) tc.check(t, tc.collations, plan.Instructions) } -func (tc *collationTestCase) addCollationsToSchema(vschema *vschemaWrapper) { +func (tc *collationTestCase) addCollationsToSchema(vschema *vschemawrapper.VSchemaWrapper) { for _, collation := range tc.collations { - tbl := vschema.v.Keyspaces[collation.ks].Tables[collation.table] + tbl := vschema.V.Keyspaces[collation.ks].Tables[collation.table] for i, c := range tbl.Columns { if c.Name.EqualString(collation.colName) { tbl.Columns[i].CollationName = collation.collationName @@ -65,7 +67,7 @@ func (tc *collationTestCase) addCollationsToSchema(vschema *vschemaWrapper) { func TestOrderedAggregateCollations(t *testing.T) { collid := func(collname string) collations.ID { - return collations.Local().LookupByName(collname).ID() + return collations.Local().LookupByName(collname) } testCases := []collationTestCase{ { @@ -81,9 +83,9 @@ func TestOrderedAggregateCollations(t *testing.T) { collations: []collationInTable{{ks: "user", table: "user", collationName: "utf8mb4_bin", colName: "textcol1"}}, query: "select distinct textcol1 from user", check: func(t *testing.T, colls []collationInTable, primitive engine.Primitive) { - oa, isOA := primitive.(*engine.OrderedAggregate) - require.True(t, isOA, "should be an OrderedAggregate") - require.Equal(t, collid(colls[0].collationName), oa.GroupByKeys[0].CollationID) + distinct, isDistinct := primitive.(*engine.Distinct) + require.True(t, isDistinct, "should be a distinct") + require.Equal(t, collid(colls[0].collationName), distinct.CheckCols[0].Collation) }, }, { diff --git a/go/vt/vtgate/planbuilder/concatenate.go b/go/vt/vtgate/planbuilder/concatenate.go index 70b867b1146..b6ece23d010 100644 --- a/go/vt/vtgate/planbuilder/concatenate.go +++ b/go/vt/vtgate/planbuilder/concatenate.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Vitess Authors. +Copyright 2021 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,69 +20,65 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" ) type concatenate struct { - v3Plan - lhs, rhs logicalPlan - order int -} - -var _ logicalPlan = (*concatenate)(nil) - -func (c *concatenate) Order() int { - return c.order -} + sources []logicalPlan -func (c *concatenate) ResultColumns() []*resultColumn { - return c.lhs.ResultColumns() + // These column offsets do not need to be typed checked - they usually contain weight_string() + // columns that are not going to be returned to the user + noNeedToTypeCheck []int } -func (c *concatenate) Reorder(order int) { - c.lhs.Reorder(order) - c.rhs.Reorder(c.lhs.Order()) - c.order = c.rhs.Order() + 1 -} +var _ logicalPlan = (*concatenate)(nil) -func (c *concatenate) Wireup(plan logicalPlan, jt *jointab) error { - // TODO systay should we do something different here? - err := c.lhs.Wireup(plan, jt) - if err != nil { - return err +// Wireup implements the logicalPlan interface +func (c *concatenate) Wireup(ctx *plancontext.PlanningContext) error { + for _, source := range c.sources { + err := source.Wireup(ctx) + if err != nil { + return err + } } - return c.rhs.Wireup(plan, jt) -} - -func (c *concatenate) SupplyVar(from, to int, col *sqlparser.ColName, varname string) { - panic("implement me") -} - -func (c *concatenate) SupplyCol(col *sqlparser.ColName) (rc *resultColumn, colNumber int) { - panic("implement me") -} - -func (c *concatenate) SupplyWeightString(colNumber int, alsoAddToGroupBy bool) (weightcolNumber int, err error) { - panic("implement me") + return nil } +// Primitive implements the logicalPlan interface func (c *concatenate) Primitive() engine.Primitive { - lhs := c.lhs.Primitive() - rhs := c.rhs.Primitive() + var sources []engine.Primitive + for _, source := range c.sources { + sources = append(sources, source.Primitive()) + } - return engine.NewConcatenate([]engine.Primitive{lhs, rhs}, nil) + return engine.NewConcatenate(sources, c.noNeedToTypeCheck) } // Rewrite implements the logicalPlan interface func (c *concatenate) Rewrite(inputs ...logicalPlan) error { - if len(inputs) != 2 { + if len(inputs) != len(c.sources) { return vterrors.VT13001("concatenate: wrong number of inputs") } - c.lhs = inputs[0] - c.rhs = inputs[1] + c.sources = inputs return nil } +// ContainsTables implements the logicalPlan interface +func (c *concatenate) ContainsTables() semantics.TableSet { + var tableSet semantics.TableSet + for _, source := range c.sources { + tableSet = tableSet.Merge(source.ContainsTables()) + } + return tableSet +} + // Inputs implements the logicalPlan interface func (c *concatenate) Inputs() []logicalPlan { - return []logicalPlan{c.lhs, c.rhs} + return c.sources +} + +// OutputColumns implements the logicalPlan interface +func (c *concatenate) OutputColumns() []sqlparser.SelectExpr { + return c.sources[0].OutputColumns() } diff --git a/go/vt/vtgate/planbuilder/concatenateGen4.go b/go/vt/vtgate/planbuilder/concatenateGen4.go deleted file mode 100644 index fa12d24cf73..00000000000 --- a/go/vt/vtgate/planbuilder/concatenateGen4.go +++ /dev/null @@ -1,119 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/vt/vtgate/semantics" -) - -type concatenateGen4 struct { - sources []logicalPlan - - // These column offsets do not need to be typed checked - they usually contain weight_string() - // columns that are not going to be returned to the user - noNeedToTypeCheck []int -} - -var _ logicalPlan = (*concatenateGen4)(nil) - -// Order implements the logicalPlan interface -func (c *concatenateGen4) Order() int { - panic("implement me") -} - -// ResultColumns implements the logicalPlan interface -func (c *concatenateGen4) ResultColumns() []*resultColumn { - panic("implement me") -} - -// Reorder implements the logicalPlan interface -func (c *concatenateGen4) Reorder(order int) { - panic("implement me") -} - -// Wireup implements the logicalPlan interface -func (c *concatenateGen4) Wireup(plan logicalPlan, jt *jointab) error { - panic("implement me") -} - -// WireupGen4 implements the logicalPlan interface -func (c *concatenateGen4) WireupGen4(ctx *plancontext.PlanningContext) error { - for _, source := range c.sources { - err := source.WireupGen4(ctx) - if err != nil { - return err - } - } - return nil -} - -// SupplyVar implements the logicalPlan interface -func (c *concatenateGen4) SupplyVar(from, to int, col *sqlparser.ColName, varname string) { - panic("implement me") -} - -// SupplyCol implements the logicalPlan interface -func (c *concatenateGen4) SupplyCol(col *sqlparser.ColName) (rc *resultColumn, colNumber int) { - panic("implement me") -} - -// SupplyWeightString implements the logicalPlan interface -func (c *concatenateGen4) SupplyWeightString(colNumber int, alsoAddToGroupBy bool) (weightcolNumber int, err error) { - panic("implement me") -} - -// Primitive implements the logicalPlan interface -func (c *concatenateGen4) Primitive() engine.Primitive { - var sources []engine.Primitive - for _, source := range c.sources { - sources = append(sources, source.Primitive()) - } - - return engine.NewConcatenate(sources, c.noNeedToTypeCheck) -} - -// Rewrite implements the logicalPlan interface -func (c *concatenateGen4) Rewrite(inputs ...logicalPlan) error { - if len(inputs) != len(c.sources) { - return vterrors.VT13001("concatenateGen4: wrong number of inputs") - } - c.sources = inputs - return nil -} - -// ContainsTables implements the logicalPlan interface -func (c *concatenateGen4) ContainsTables() semantics.TableSet { - var tableSet semantics.TableSet - for _, source := range c.sources { - tableSet = tableSet.Merge(source.ContainsTables()) - } - return tableSet -} - -// Inputs implements the logicalPlan interface -func (c *concatenateGen4) Inputs() []logicalPlan { - return c.sources -} - -// OutputColumns implements the logicalPlan interface -func (c *concatenateGen4) OutputColumns() []sqlparser.SelectExpr { - return c.sources[0].OutputColumns() -} diff --git a/go/vt/vtgate/planbuilder/ddl.go b/go/vt/vtgate/planbuilder/ddl.go index 738595d7d6c..41e5d64346e 100644 --- a/go/vt/vtgate/planbuilder/ddl.go +++ b/go/vt/vtgate/planbuilder/ddl.go @@ -5,6 +5,7 @@ import ( "fmt" "vitess.io/vitess/go/vt/key" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" @@ -19,18 +20,6 @@ const ( DifferentDestinations string = "Tables or Views specified in the query do not belong to the same destination" ) -type fkStrategy int - -const ( - fkAllow fkStrategy = iota - fkDisallow -) - -var fkStrategyMap = map[string]fkStrategy{ - "allow": fkAllow, - "disallow": fkDisallow, -} - type fkContraint struct { found bool } @@ -55,7 +44,7 @@ func (fk *fkContraint) FkWalk(node sqlparser.SQLNode) (kontinue bool, err error) // and which chooses which of the two to invoke at runtime. func buildGeneralDDLPlan(ctx context.Context, sql string, ddlStatement sqlparser.DDLStatement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, enableOnlineDDL, enableDirectDDL bool) (*planResult, error) { if vschema.Destination() != nil { - return buildByPassDDLPlan(sql, vschema) + return buildByPassPlan(sql, vschema) } normalDDLPlan, onlineDDLPlan, err := buildDDLPlans(ctx, sql, ddlStatement, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) if err != nil { @@ -90,7 +79,7 @@ func buildGeneralDDLPlan(ctx context.Context, sql string, ddlStatement sqlparser return newPlanResult(eddl, tc.getTables()...), nil } -func buildByPassDDLPlan(sql string, vschema plancontext.VSchema) (*planResult, error) { +func buildByPassPlan(sql string, vschema plancontext.VSchema) (*planResult, error) { keyspace, err := vschema.DefaultKeyspace() if err != nil { return nil, err @@ -110,10 +99,6 @@ func buildDDLPlans(ctx context.Context, sql string, ddlStatement sqlparser.DDLSt switch ddl := ddlStatement.(type) { case *sqlparser.AlterTable, *sqlparser.CreateTable, *sqlparser.TruncateTable: - err = checkFKError(vschema, ddlStatement) - if err != nil { - return nil, nil, err - } // For ALTER TABLE and TRUNCATE TABLE, the table must already exist // // For CREATE TABLE, the table may (in the case of --declarative) @@ -121,6 +106,10 @@ func buildDDLPlans(ctx context.Context, sql string, ddlStatement sqlparser.DDLSt // // We should find the target of the query from this tables location. destination, keyspace, err = findTableDestinationAndKeyspace(vschema, ddlStatement) + if err != nil { + return nil, nil, err + } + err = checkFKError(vschema, ddlStatement, keyspace) case *sqlparser.CreateView: destination, keyspace, err = buildCreateView(ctx, vschema, ddl, reservedVars, enableOnlineDDL, enableDirectDDL) case *sqlparser.AlterView: @@ -161,8 +150,12 @@ func buildDDLPlans(ctx context.Context, sql string, ddlStatement sqlparser.DDLSt }, nil } -func checkFKError(vschema plancontext.VSchema, ddlStatement sqlparser.DDLStatement) error { - if fkStrategyMap[vschema.ForeignKeyMode()] == fkDisallow { +func checkFKError(vschema plancontext.VSchema, ddlStatement sqlparser.DDLStatement, keyspace *vindexes.Keyspace) error { + fkMode, err := vschema.ForeignKeyMode(keyspace.Name) + if err != nil { + return err + } + if fkMode == vschemapb.Keyspace_disallow { fk := &fkContraint{} _ = sqlparser.Walk(fk.FkWalk, ddlStatement) if fk.found { @@ -410,8 +403,6 @@ func tryToGetRoutePlan(selectPlan engine.Primitive) (valid bool, opCode engine.O switch plan := selectPlan.(type) { case *engine.Route: return true, plan.Opcode - case engine.Gen4Comparer: - return tryToGetRoutePlan(plan.GetGen4Primitive()) default: return false, engine.Opcode(0) } diff --git a/go/vt/vtgate/planbuilder/delete.go b/go/vt/vtgate/planbuilder/delete.go index 569bf455cce..e5e993d50e0 100644 --- a/go/vt/vtgate/planbuilder/delete.go +++ b/go/vt/vtgate/planbuilder/delete.go @@ -17,63 +17,98 @@ limitations under the License. package planbuilder import ( + querypb "vitess.io/vitess/go/vt/proto/query" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" + "vitess.io/vitess/go/vt/vtgate/vindexes" ) -// buildDeletePlan builds the instructions for a DELETE statement. -func buildDeletePlan(string) stmtPlanner { - return func(stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema) (*planResult, error) { - del := stmt.(*sqlparser.Delete) - if del.With != nil { - return nil, vterrors.VT12001("WITH expression in DELETE statement") - } - err := checkUnsupportedExpressions(del) - if err != nil { - return nil, err - } - if len(del.TableExprs) == 1 && len(del.Targets) == 1 { - del, err = rewriteSingleTbl(del) - if err != nil { - return nil, err - } - } - dml, tables, ksidVindex, err := buildDMLPlan(vschema, "delete", del, reservedVars, del.TableExprs, del.Where, del.OrderBy, del.Limit, del.Comments, del.Targets) +func gen4DeleteStmtPlanner( + version querypb.ExecuteOptions_PlannerVersion, + deleteStmt *sqlparser.Delete, + reservedVars *sqlparser.ReservedVars, + vschema plancontext.VSchema, +) (*planResult, error) { + if deleteStmt.With != nil { + return nil, vterrors.VT12001("WITH expression in DELETE statement") + } + + var err error + if len(deleteStmt.TableExprs) == 1 && len(deleteStmt.Targets) == 1 { + deleteStmt, err = rewriteSingleTbl(deleteStmt) if err != nil { return nil, err } - edel := &engine.Delete{DML: dml} - if dml.Opcode == engine.Unsharded { - return newPlanResult(edel, tables...), nil - } + } - if len(del.Targets) > 1 { - return nil, vterrors.VT12001("multi-table DELETE statement in a sharded keyspace") + ctx, err := plancontext.CreatePlanningContext(deleteStmt, reservedVars, vschema, version) + if err != nil { + return nil, err + } + + err = rewriteRoutedTables(deleteStmt, vschema) + if err != nil { + return nil, err + } + + if ks, tables := ctx.SemTable.SingleUnshardedKeyspace(); ks != nil { + if fkManagementNotRequired(ctx, vschema, tables) { + plan := deleteUnshardedShortcut(deleteStmt, ks, tables) + return newPlanResult(plan.Primitive(), operators.QualifiedTables(ks, tables)...), nil } + } + + if err := checkIfDeleteSupported(deleteStmt, ctx.SemTable); err != nil { + return nil, err + } + + err = queryRewrite(ctx.SemTable, reservedVars, deleteStmt) + if err != nil { + return nil, err + } + + op, err := operators.PlanQuery(ctx, deleteStmt) + if err != nil { + return nil, err + } + + plan, err := transformToLogicalPlan(ctx, op) + if err != nil { + return nil, err + } + + plan = pushCommentDirectivesOnPlan(plan, deleteStmt) + + setLockOnAllSelect(plan) - edelTable, err := edel.GetSingleTable() + if err := plan.Wireup(ctx); err != nil { + return nil, err + } + + return newPlanResult(plan.Primitive(), operators.TablesUsed(op)...), nil +} + +func fkManagementNotRequired(ctx *plancontext.PlanningContext, vschema plancontext.VSchema, vTables []*vindexes.Table) bool { + // Find the foreign key mode and check for any managed child foreign keys. + for _, vTable := range vTables { + ksMode, err := vschema.ForeignKeyMode(vTable.Keyspace.Name) if err != nil { - return nil, err + return false } - if len(del.Targets) == 1 && del.Targets[0].Name != edelTable.Name { - return nil, vterrors.VT03003(del.Targets[0].Name.String()) + if ksMode != vschemapb.Keyspace_managed { + continue } - - if len(edelTable.Owned) > 0 { - aTblExpr, ok := del.TableExprs[0].(*sqlparser.AliasedTableExpr) - if !ok { - return nil, vterrors.VT12001("deleting from a complex table expression") - } - tblExpr := &sqlparser.AliasedTableExpr{Expr: sqlparser.TableName{Name: edelTable.Name}, As: aTblExpr.As} - edel.OwnedVindexQuery = generateDMLSubquery(tblExpr, del.Where, del.OrderBy, del.Limit, edelTable, ksidVindex.Columns) - edel.KsidVindex = ksidVindex.Vindex - edel.KsidLength = len(ksidVindex.Columns) + childFks := vTable.ChildFKsNeedsHandling(ctx.VerifyAllFKs, vindexes.DeleteAction) + if len(childFks) > 0 { + return false } - - return newPlanResult(edel, tables...), nil } + return true } func rewriteSingleTbl(del *sqlparser.Delete) (*sqlparser.Delete, error) { @@ -112,3 +147,51 @@ func rewriteSingleTbl(del *sqlparser.Delete) (*sqlparser.Delete, error) { } return del, nil } + +func deleteUnshardedShortcut(stmt *sqlparser.Delete, ks *vindexes.Keyspace, tables []*vindexes.Table) logicalPlan { + edml := engine.NewDML() + edml.Keyspace = ks + edml.Opcode = engine.Unsharded + edml.Query = generateQuery(stmt) + for _, tbl := range tables { + edml.TableNames = append(edml.TableNames, tbl.Name.String()) + } + return &primitiveWrapper{prim: &engine.Delete{DML: edml}} +} + +// checkIfDeleteSupported checks if the delete query is supported or we must return an error. +func checkIfDeleteSupported(del *sqlparser.Delete, semTable *semantics.SemTable) error { + if semTable.NotUnshardedErr != nil { + return semTable.NotUnshardedErr + } + + // Delete is only supported for a single TableExpr which is supposed to be an aliased expression + multiShardErr := vterrors.VT12001("multi-shard or vindex write statement") + if len(del.TableExprs) != 1 { + return multiShardErr + } + _, isAliasedExpr := del.TableExprs[0].(*sqlparser.AliasedTableExpr) + if !isAliasedExpr { + return multiShardErr + } + + if len(del.Targets) > 1 { + return vterrors.VT12001("multi-table DELETE statement in a sharded keyspace") + } + + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch node.(type) { + case *sqlparser.Subquery, *sqlparser.DerivedTable: + // We have a subquery, so we must fail the planning. + // If this subquery and the table expression were all belonging to the same unsharded keyspace, + // we would have already created a plan for them before doing these checks. + return false, vterrors.VT12001("subqueries in DML") + } + return true, nil + }, del) + if err != nil { + return err + } + + return nil +} diff --git a/go/vt/vtgate/planbuilder/distinct.go b/go/vt/vtgate/planbuilder/distinct.go index dff6370078e..8b81fa4a8ce 100644 --- a/go/vt/vtgate/planbuilder/distinct.go +++ b/go/vt/vtgate/planbuilder/distinct.go @@ -41,24 +41,7 @@ func newDistinct(source logicalPlan, checkCols []engine.CheckCol, truncateColumn } } -func newDistinctGen4Legacy(source logicalPlan, checkCols []engine.CheckCol, needToTruncate bool) logicalPlan { - return &distinct{ - logicalPlanCommon: newBuilderCommon(source), - checkCols: checkCols, - needToTruncate: needToTruncate, - } -} - -func newDistinctV3(source logicalPlan) logicalPlan { - return &distinct{logicalPlanCommon: newBuilderCommon(source)} -} - func (d *distinct) Primitive() engine.Primitive { - if d.checkCols == nil { - // If we are missing the checkCols information, we are on the V3 planner and should produce a V3 Distinct - return &engine.DistinctV3{Source: d.input.Primitive()} - } - truncate := d.truncateColumn if d.needToTruncate { wsColFound := false diff --git a/go/vt/vtgate/planbuilder/dml_planner.go b/go/vt/vtgate/planbuilder/dml_planner.go index 8d4d3d2a1fe..a85d10b742a 100644 --- a/go/vt/vtgate/planbuilder/dml_planner.go +++ b/go/vt/vtgate/planbuilder/dml_planner.go @@ -17,387 +17,56 @@ limitations under the License. package planbuilder import ( - "fmt" - - "vitess.io/vitess/go/vt/key" - - topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - - "vitess.io/vitess/go/vt/vtgate/vindexes" -) - -type ( - // costDML is used to compare the cost of vindexOptionDML - costDML struct { - vindexCost int - isUnique bool - opCode engine.Opcode - } - - // vindexPlusPredicatesDML is a struct used to store all the predicates that the vindex can be used to query - vindexPlusPredicatesDML struct { - colVindex *vindexes.ColumnVindex - - // during planning, we store the alternatives found for this DML in this slice - options []*vindexOptionDML - } - - // vindexOptionDML stores the information needed to know if we have all the information needed to use a vindex - vindexOptionDML struct { - ready bool - values []evalengine.Expr - // columns that we have seen so far. Used only for multi-column vindexes so that we can track how many columns part of the vindex we have seen - colsSeen map[string]any - opcode engine.Opcode - foundVindex vindexes.Vindex - cost costDML - } ) -// getDMLRouting returns the vindex and values for the DML, -// If it cannot find a unique vindex match, it returns an error. -func getDMLRouting(where *sqlparser.Where, table *vindexes.Table) ( - engine.Opcode, - *vindexes.ColumnVindex, - vindexes.Vindex, - []evalengine.Expr, - error, -) { - // Check that we have a primary vindex which is valid - if len(table.ColumnVindexes) == 0 || !table.ColumnVindexes[0].IsUnique() { - return engine.Scatter, nil, nil, nil, vterrors.VT09001(table.Name) - } - // ksidVindex is the primary vindex - ksidVindex := table.ColumnVindexes[0] - if where == nil { - return engine.Scatter, ksidVindex, nil, nil, nil - } - - filters := sqlparser.SplitAndExpression(nil, where.Expr) - // go over the vindexes in the order of increasing cost - for _, colVindex := range table.Ordered { - if lu, isLu := colVindex.Vindex.(vindexes.LookupBackfill); isLu && lu.IsBackfilling() { - // Checking if the Vindex is currently backfilling or not, if it isn't we can read from the vindex table - // and we will be able to do a delete equal. Otherwise, we continue to look for next best vindex. - continue - } - // get the best vindex option that can be used for this vindexes.ColumnVindex - if vindexOption := getBestVindexOption(filters, colVindex); vindexOption != nil { - return vindexOption.opcode, ksidVindex, colVindex.Vindex, vindexOption.values, nil +func rewriteRoutedTables(stmt sqlparser.Statement, vschema plancontext.VSchema) error { + // Rewrite routed tables + return sqlparser.Walk(func(node sqlparser.SQLNode) (bool, error) { + aliasTbl, isAlias := node.(*sqlparser.AliasedTableExpr) + if !isAlias { + return true, nil } - } - return engine.Scatter, ksidVindex, nil, nil, nil -} - -// getBestVindexOption returns the best vindex option that can be used for this vindexes.ColumnVindex -// It returns nil if there is no suitable way to use the ColumnVindex -func getBestVindexOption(exprs []sqlparser.Expr, index *vindexes.ColumnVindex) *vindexOptionDML { - vindexPlusPredicates := &vindexPlusPredicatesDML{ - colVindex: index, - } - for _, filter := range exprs { - comparison, ok := filter.(*sqlparser.ComparisonExpr) + tableName, ok := aliasTbl.Expr.(sqlparser.TableName) if !ok { - continue - } - var colName *sqlparser.ColName - var valExpr sqlparser.Expr - if col, ok := comparison.Left.(*sqlparser.ColName); ok { - colName = col - valExpr = comparison.Right - } else if col, ok := comparison.Right.(*sqlparser.ColName); ok { - colName = col - valExpr = comparison.Left - } else { - continue - } - - var opcode engine.Opcode - switch comparison.Operator { - case sqlparser.EqualOp: - if !sqlparser.IsValue(valExpr) { - continue - } - opcode = engine.Equal - case sqlparser.InOp: - if !sqlparser.IsSimpleTuple(valExpr) { - continue - } - opcode = engine.IN - default: - continue + return true, nil } - expr, err := evalengine.Translate(comparison.Right, nil) + vschemaTable, vindexTbl, _, _, _, err := vschema.FindTableOrVindex(tableName) if err != nil { - continue + return false, err } - addVindexOptions(colName, expr, opcode, vindexPlusPredicates) - } - return vindexPlusPredicates.bestOption() -} - -// bestOption returns the option which is ready and has the lowest associated cost -func (vpp *vindexPlusPredicatesDML) bestOption() *vindexOptionDML { - var best *vindexOptionDML - for _, option := range vpp.options { - if option.ready { - if best == nil || lessCostDML(option.cost, best.cost) { - best = option - } + if vindexTbl != nil { + // vindex cannot be present in a dml statement. + return false, vterrors.VT09014() } - } - return best -} - -// lessCostDML compares two costDML and returns true if the first cost is cheaper than the second -func lessCostDML(c1, c2 costDML) bool { - switch { - case c1.opCode != c2.opCode: - return c1.opCode < c2.opCode - case c1.isUnique == c2.isUnique: - return c1.vindexCost <= c2.vindexCost - default: - return c1.isUnique - } -} -// addVindexOptions adds new vindexOptionDML if it matches any column of the vindexes.ColumnVindex -func addVindexOptions(column *sqlparser.ColName, value evalengine.Expr, opcode engine.Opcode, v *vindexPlusPredicatesDML) { - switch v.colVindex.Vindex.(type) { - case vindexes.SingleColumn: - col := v.colVindex.Columns[0] - if column.Name.Equal(col) { - // single column vindex - just add the option - vindex := v.colVindex - v.options = append(v.options, &vindexOptionDML{ - values: []evalengine.Expr{value}, - opcode: opcode, - foundVindex: vindex.Vindex, - cost: costForDML(v.colVindex, opcode), - ready: true, - }) - } - case vindexes.MultiColumn: - colLoweredName := "" - indexOfCol := -1 - for idx, col := range v.colVindex.Columns { - if column.Name.Equal(col) { - colLoweredName = column.Name.Lowered() - indexOfCol = idx - break + if vschemaTable.Name.String() != tableName.Name.String() { + name := tableName.Name + if aliasTbl.As.IsEmpty() { + // if the user hasn't specified an alias, we'll insert one here so the old table name still works + aliasTbl.As = sqlparser.NewIdentifierCS(name.String()) } + tableName.Name = sqlparser.NewIdentifierCS(vschemaTable.Name.String()) + aliasTbl.Expr = tableName } - if colLoweredName == "" { - break - } - - var newOption []*vindexOptionDML - for _, op := range v.options { - if op.ready { - continue - } - _, isPresent := op.colsSeen[colLoweredName] - if isPresent { - continue - } - option := copyOptionDML(op) - option.updateWithNewColumn(colLoweredName, indexOfCol, value, v.colVindex, opcode) - newOption = append(newOption, option) - } - v.options = append(v.options, newOption...) - - // multi column vindex - just always add as new option - option := createOptionDML(v.colVindex) - option.updateWithNewColumn(colLoweredName, indexOfCol, value, v.colVindex, opcode) - v.options = append(v.options, option) - } -} - -// copyOptionDML is used to copy vindexOptionDML -func copyOptionDML(orig *vindexOptionDML) *vindexOptionDML { - colsSeen := make(map[string]any, len(orig.colsSeen)) - values := make([]evalengine.Expr, len(orig.values)) - - copy(values, orig.values) - for k, v := range orig.colsSeen { - colsSeen[k] = v - } - vo := &vindexOptionDML{ - values: values, - colsSeen: colsSeen, - opcode: orig.opcode, - foundVindex: orig.foundVindex, - cost: orig.cost, - } - return vo -} - -// updateWithNewColumn is used to update vindexOptionDML with a new column that matches one of its unseen columns -func (option *vindexOptionDML) updateWithNewColumn(colLoweredName string, indexOfCol int, value evalengine.Expr, colVindex *vindexes.ColumnVindex, opcode engine.Opcode) { - option.colsSeen[colLoweredName] = true - option.values[indexOfCol] = value - option.ready = len(option.colsSeen) == len(colVindex.Columns) - if option.opcode < opcode { - option.opcode = opcode - option.cost = costForDML(colVindex, opcode) - } -} - -// createOptionDML is used to create a vindexOptionDML -func createOptionDML( - colVindex *vindexes.ColumnVindex, -) *vindexOptionDML { - values := make([]evalengine.Expr, len(colVindex.Columns)) - vindex := colVindex.Vindex - - return &vindexOptionDML{ - values: values, - colsSeen: map[string]any{}, - foundVindex: vindex, - } -} - -// costForDML returns a cost struct to make route choices easier to compare -func costForDML(foundVindex *vindexes.ColumnVindex, opcode engine.Opcode) costDML { - switch opcode { - // For these opcodes, we should not have a vindex, so we just return the opcode as the cost - case engine.Unsharded, engine.Scatter: - return costDML{ - opCode: opcode, - } - } - - return costDML{ - vindexCost: foundVindex.Cost(), - isUnique: foundVindex.IsUnique(), - opCode: opcode, - } -} - -func buildDMLPlan( - vschema plancontext.VSchema, - dmlType string, - stmt sqlparser.Statement, - reservedVars *sqlparser.ReservedVars, - tableExprs sqlparser.TableExprs, - where *sqlparser.Where, - orderBy sqlparser.OrderBy, - limit *sqlparser.Limit, - comments *sqlparser.ParsedComments, - nodes ...sqlparser.SQLNode, -) (*engine.DML, []string, *vindexes.ColumnVindex, error) { - edml := engine.NewDML() - pb := newPrimitiveBuilder(vschema, newJointab(reservedVars)) - rb, err := pb.processDMLTable(tableExprs, reservedVars, nil) - if err != nil { - return nil, nil, nil, err - } - edml.Keyspace = rb.eroute.Keyspace - tc := &tableCollector{} - for _, tval := range pb.st.tables { - tc.addVindexTable(tval.vschemaTable) - } - - edml.Table, err = pb.st.AllVschemaTableNames() - if err != nil { - return nil, nil, nil, err - } - if !edml.Keyspace.Sharded { - // We only validate non-table subexpressions because the previous analysis has already validated them. - var subqueryArgs []sqlparser.SQLNode - subqueryArgs = append(subqueryArgs, nodes...) - subqueryArgs = append(subqueryArgs, where, orderBy, limit) - subqueryIsUnsharded, subqueryTables := pb.finalizeUnshardedDMLSubqueries(reservedVars, subqueryArgs...) - if subqueryIsUnsharded { - vschema.WarnUnshardedOnly("subqueries can't be sharded in DML") - } else { - return nil, nil, nil, vterrors.VT12001("sharded subqueries in DML") - } - edml.Opcode = engine.Unsharded - // Generate query after all the analysis. Otherwise table name substitutions for - // routed tables won't happen. - edml.Query = generateQuery(stmt) - edml.Table = append(edml.Table, subqueryTables...) - return edml, tc.getTables(), nil, nil - } - - if hasSubquery(stmt) { - return nil, nil, nil, vterrors.VT12001("sharded subqueries in DML") - } - - // Generate query after all the analysis. Otherwise table name substitutions for - // routed tables won't happen. - edml.Query = generateQuery(stmt) - - directives := comments.Directives() - if directives.IsSet(sqlparser.DirectiveMultiShardAutocommit) { - edml.MultiShardAutocommit = true - } - - edml.QueryTimeout = queryTimeout(directives) - - if len(pb.st.tables) != 1 { - return nil, nil, nil, vterrors.VT12001(fmt.Sprintf("multi-table %s statement in a sharded keyspace", dmlType)) - } - edmlTable, err := edml.GetSingleTable() - if err != nil { - return nil, nil, nil, err - } - - if edmlTable.Pinned != nil { - edml.Opcode = engine.ByDestination - edml.TargetDestination = key.DestinationKeyspaceID(edmlTable.Pinned) - return edml, nil, nil, nil - } - - routingType, ksidVindex, vindex, values, err := getDMLRouting(where, edmlTable) - if err != nil { - return nil, nil, nil, err - } - if rb.eroute.TargetDestination != nil { - if rb.eroute.TargetTabletType != topodatapb.TabletType_PRIMARY { - return nil, nil, nil, vterrors.VT09002(dmlType) - } - edml.Opcode = engine.ByDestination - edml.TargetDestination = rb.eroute.TargetDestination - return edml, tc.getTables(), ksidVindex, nil - } - - edml.Opcode = routingType - if routingType == engine.Scatter { - if limit != nil { - return nil, nil, nil, vterrors.VT12001(fmt.Sprintf("multi-shard %s with LIMIT", dmlType)) - } - } else { - edml.Vindex = vindex - edml.Values = values - } - - return edml, tc.getTables(), ksidVindex, nil + return true, nil + }, stmt) } -func generateDMLSubquery(tblExpr sqlparser.TableExpr, where *sqlparser.Where, orderBy sqlparser.OrderBy, limit *sqlparser.Limit, table *vindexes.Table, ksidCols []sqlparser.IdentifierCI) string { - buf := sqlparser.NewTrackedBuffer(nil) - for idx, col := range ksidCols { - if idx == 0 { - buf.Myprintf("select %v", col) - } else { - buf.Myprintf(", %v", col) - } - } - for _, cv := range table.Owned { - for _, column := range cv.Columns { - buf.Myprintf(", %v", column) +func setLockOnAllSelect(plan logicalPlan) { + _, _ = visit(plan, func(plan logicalPlan) (bool, logicalPlan, error) { + switch node := plan.(type) { + case *route: + if node.Select.GetLock() == sqlparser.NoLock { + node.Select.SetLock(sqlparser.ShareModeLock) + } + return true, node, nil } - } - buf.Myprintf(" from %v%v%v%v for update", tblExpr, where, orderBy, limit) - return buf.String() + return true, plan, nil + }) } func generateQuery(statement sqlparser.Statement) string { diff --git a/go/vt/vtgate/planbuilder/doc.go b/go/vt/vtgate/planbuilder/doc.go deleted file mode 100644 index 77a4a88ed7b..00000000000 --- a/go/vt/vtgate/planbuilder/doc.go +++ /dev/null @@ -1,85 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Package planbuilder allows you to build execution -plans that describe how to fulfill a query that may -span multiple keyspaces or shards. The main entry -points for this package are Build and BuildFromStmt. -*/ -package planbuilder - -/* -The main strategy of the planbuilder is to push down as -much of the work as possible down to the vttablets. The -special primitive for doing this is route, which can -execute any SQL on a single shard (or scatter). Any -work that cannot be done by a single route is stitched -together by VTGate using relational primitives. If -stitching is not possible using existing primitives, -then an "unsupported" error is returned. - -If a query is split into multiple parts, like a -cross-shard join, the latter parts may carry references -to the former parts. If this happens, the primitive -specifies how to build these cross-shard references as -"join variables" that will essentially be sent in -as bind vars during execution. For example: - - select ... from a join b on b.col = a.col - -will be executed as: - - select ... a.col from a (produce "a_col" from a.col) - select ... from b where b.col = :a_col - -The central design element for analyzing queries and -building plans is the symbol table (symtab). This data -structure evolves as a query is analyzed. Therefore, -searches are not repeatable. To resolve this, search -results are persisted inside the ColName as 'Metadata', -and reused as needed. - -The plan is built in two phases. In the -first phase (break-up and push-down), the query is -broken into smaller parts and pushed down into -various primitives. In the second phase (wire-up), -external references are wired up using bind vars, and -the individual ASTs are converted into actual queries. - -In current architecture, VTGate does not know the -underlying MySQL schema. Due to this, we assume that -any qualified or implicit column reference of a table -is valid and we rely on the underlying vttablet/MySQL -to eventually validate such references. - -Every 'logicalPlan' primitive must satisfy the logicalPlan -interface. This allows the planbuilder to outsource -primitive-specific handling into those implementations. - -Variable naming: The AST, planbuilder and engine -are three different worlds that use overloaded -names that are contextually similar, but different. -For example a join is: - Join is the AST node that represents the SQL construct - join is a logicalPlan in the current package - Join is a primitive in the engine package -In order to disambiguate, we'll use the 'a' prefix -for AST vars, and the 'e' prefix for engine vars. -So, 'ajoin' would be of type *sqlparser.Join, and -'ejoin' would be of type *engine.Join. For the planbuilder -join we'll use 'jb'. -*/ diff --git a/go/vt/vtgate/planbuilder/expr.go b/go/vt/vtgate/planbuilder/expr.go deleted file mode 100644 index fb92707b0ca..00000000000 --- a/go/vt/vtgate/planbuilder/expr.go +++ /dev/null @@ -1,337 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "bytes" - "fmt" - - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" - popcode "vitess.io/vitess/go/vt/vtgate/engine/opcode" - "vitess.io/vitess/go/vt/vtgate/vindexes" -) - -type subqueryInfo struct { - ast *sqlparser.Subquery - plan logicalPlan - origin logicalPlan -} - -// findOrigin identifies the right-most origin referenced by expr. In situations where -// the expression references columns from multiple origins, the expression will be -// pushed to the right-most origin, and the executor will use the results of -// the previous origins to feed the necessary values to the primitives on the right. -// -// If the expression contains a subquery, the right-most origin identification -// also follows the same rules of a normal expression. This is achieved by -// looking at the Externs field of its symbol table that contains the list of -// external references. -// -// Once the target origin is identified, we have to verify that the subquery's -// route can be merged with it. If it cannot, we fail the query. This is because -// we don't have the ability to wire up subqueries through expression evaluation -// primitives. Consequently, if the plan for a subquery comes out as a Join, -// we can immediately error out. -// -// Since findOrigin can itself be called from within a subquery, it has to assume -// that some of the external references may actually be pointing to an outer -// query. The isLocal response from the symtab is used to make sure that we -// only analyze symbols that point to the current symtab. -// -// If an expression has no references to the current query, then the left-most -// origin is chosen as the default. -func (pb *primitiveBuilder) findOrigin(expr sqlparser.Expr, reservedVars *sqlparser.ReservedVars) (pullouts []*pulloutSubquery, origin logicalPlan, pushExpr sqlparser.Expr, err error) { - // highestOrigin tracks the highest origin referenced by the expression. - // Default is the first. - highestOrigin := first(pb.plan) - - // subqueries tracks the list of subqueries encountered. - var subqueries []subqueryInfo - - // constructsMap tracks the sub-construct in which a subquery - // occurred. The construct type decides on how the query gets - // pulled out. - constructsMap := make(map[*sqlparser.Subquery]sqlparser.Expr) - - err = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { - switch node := node.(type) { - case *sqlparser.ColName: - newOrigin, isLocal, err := pb.st.Find(node) - if err != nil { - return false, err - } - if isLocal && newOrigin.Order() > highestOrigin.Order() { - highestOrigin = newOrigin - } - case *sqlparser.ComparisonExpr: - if node.Operator == sqlparser.InOp || node.Operator == sqlparser.NotInOp { - if sq, ok := node.Right.(*sqlparser.Subquery); ok { - constructsMap[sq] = node - } - } - case *sqlparser.ExistsExpr: - constructsMap[node.Subquery] = node - case *sqlparser.Subquery: - spb := newPrimitiveBuilder(pb.vschema, pb.jt) - switch stmt := node.Select.(type) { - case *sqlparser.Select: - if err := spb.processSelect(stmt, reservedVars, pb.st, ""); err != nil { - return false, err - } - case *sqlparser.Union: - if err := spb.processUnion(stmt, reservedVars, pb.st); err != nil { - return false, err - } - default: - return false, vterrors.VT13001(fmt.Sprintf("unexpected SELECT type: %T", node)) - } - sqi := subqueryInfo{ - ast: node, - plan: spb.plan, - } - for _, extern := range spb.st.Externs { - // No error expected. These are resolved externs. - newOrigin, isLocal, _ := pb.st.Find(extern) - if !isLocal { - continue - } - if highestOrigin.Order() < newOrigin.Order() { - highestOrigin = newOrigin - } - if sqi.origin == nil { - sqi.origin = newOrigin - } else if sqi.origin.Order() < newOrigin.Order() { - sqi.origin = newOrigin - } - } - subqueries = append(subqueries, sqi) - return false, nil - } - return true, nil - }, expr) - if err != nil { - return nil, nil, nil, err - } - - highestRoute, _ := highestOrigin.(*route) - for _, sqi := range subqueries { - subroute, _ := sqi.plan.(*route) - if highestRoute != nil && subroute != nil && highestRoute.MergeSubquery(pb, subroute) { - continue - } - if sqi.origin != nil { - return nil, nil, nil, vterrors.VT12001("cross-shard correlated subquery") - } - - sqName, hasValues := pb.jt.GenerateSubqueryVars() - construct, ok := constructsMap[sqi.ast] - if !ok { - // (subquery) -> :_sq - expr = sqlparser.ReplaceExpr(expr, sqi.ast, sqlparser.NewArgument(sqName)) - pullouts = append(pullouts, newPulloutSubquery(popcode.PulloutValue, sqName, hasValues, sqi.plan)) - continue - } - switch construct := construct.(type) { - case *sqlparser.ComparisonExpr: - if construct.Operator == sqlparser.InOp { - // a in (subquery) -> (:__sq_has_values = 1 and (a in ::__sq)) - right := &sqlparser.ComparisonExpr{ - Operator: construct.Operator, - Left: construct.Left, - Right: sqlparser.ListArg(sqName), - } - left := &sqlparser.ComparisonExpr{ - Left: sqlparser.NewArgument(hasValues), - Operator: sqlparser.EqualOp, - Right: sqlparser.NewIntLiteral("1"), - } - newExpr := &sqlparser.AndExpr{ - Left: left, - Right: right, - } - expr = sqlparser.ReplaceExpr(expr, construct, newExpr) - pullouts = append(pullouts, newPulloutSubquery(popcode.PulloutIn, sqName, hasValues, sqi.plan)) - } else { - // a not in (subquery) -> (:__sq_has_values = 0 or (a not in ::__sq)) - left := &sqlparser.ComparisonExpr{ - Left: sqlparser.NewArgument(hasValues), - Operator: sqlparser.EqualOp, - Right: sqlparser.NewIntLiteral("0"), - } - right := &sqlparser.ComparisonExpr{ - Operator: construct.Operator, - Left: construct.Left, - Right: sqlparser.ListArg(sqName), - } - newExpr := &sqlparser.OrExpr{ - Left: left, - Right: right, - } - expr = sqlparser.ReplaceExpr(expr, construct, newExpr) - pullouts = append(pullouts, newPulloutSubquery(popcode.PulloutNotIn, sqName, hasValues, sqi.plan)) - } - case *sqlparser.ExistsExpr: - // exists (subquery) -> :__sq_has_values - expr = sqlparser.ReplaceExpr(expr, construct, sqlparser.NewArgument(hasValues)) - pullouts = append(pullouts, newPulloutSubquery(popcode.PulloutExists, sqName, hasValues, sqi.plan)) - } - } - return pullouts, highestOrigin, expr, nil -} - -var dummyErr = vterrors.VT13001("dummy") - -func hasSubquery(node sqlparser.SQLNode) bool { - has := false - _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { - switch node.(type) { - case *sqlparser.DerivedTable, *sqlparser.Subquery: - has = true - return false, dummyErr - } - return true, nil - }, node) - return has -} - -func (pb *primitiveBuilder) finalizeUnshardedDMLSubqueries(reservedVars *sqlparser.ReservedVars, nodes ...sqlparser.SQLNode) (bool, []*vindexes.Table) { - var keyspace string - var tables []*vindexes.Table - if rb, ok := pb.plan.(*route); ok { - keyspace = rb.eroute.Keyspace.Name - } else { - // This code is unreachable because the caller checks. - return false, nil - } - - for _, node := range nodes { - samePlan := true - inSubQuery := false - _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { - switch nodeType := node.(type) { - case *sqlparser.Subquery, *sqlparser.Insert: - inSubQuery = true - return true, nil - case *sqlparser.Select: - if !inSubQuery { - return true, nil - } - spb := newPrimitiveBuilder(pb.vschema, pb.jt) - if err := spb.processSelect(nodeType, reservedVars, pb.st, ""); err != nil { - samePlan = false - return false, err - } - innerRoute, ok := spb.plan.(*route) - if !ok { - samePlan = false - return false, dummyErr - } - if innerRoute.eroute.Keyspace.Name != keyspace { - samePlan = false - return false, dummyErr - } - for _, sub := range innerRoute.substitutions { - *sub.oldExpr = *sub.newExpr - } - spbTables, err := spb.st.AllVschemaTableNames() - if err != nil { - return false, err - } - tables = append(tables, spbTables...) - case *sqlparser.Union: - if !inSubQuery { - return true, nil - } - spb := newPrimitiveBuilder(pb.vschema, pb.jt) - if err := spb.processUnion(nodeType, reservedVars, pb.st); err != nil { - samePlan = false - return false, err - } - innerRoute, ok := spb.plan.(*route) - if !ok { - samePlan = false - return false, dummyErr - } - if innerRoute.eroute.Keyspace.Name != keyspace { - samePlan = false - return false, dummyErr - } - } - - return true, nil - }, node) - if !samePlan { - return false, nil - } - } - return true, tables -} - -func valEqual(a, b sqlparser.Expr) bool { - switch a := a.(type) { - case *sqlparser.ColName: - if b, ok := b.(*sqlparser.ColName); ok { - return a.Metadata == b.Metadata - } - case *sqlparser.Argument: - b, ok := b.(*sqlparser.Argument) - if !ok { - return false - } - return a.Name == b.Name - case *sqlparser.Literal: - b, ok := b.(*sqlparser.Literal) - if !ok { - return false - } - switch a.Type { - case sqlparser.StrVal: - switch b.Type { - case sqlparser.StrVal: - return a.Val == b.Val - case sqlparser.HexVal: - return hexEqual(b, a) - } - case sqlparser.HexVal: - return hexEqual(a, b) - case sqlparser.IntVal: - if b.Type == (sqlparser.IntVal) { - return a.Val == b.Val - } - } - } - return false -} - -func hexEqual(a, b *sqlparser.Literal) bool { - v, err := a.HexDecode() - if err != nil { - return false - } - switch b.Type { - case sqlparser.StrVal: - return bytes.Equal(v, b.Bytes()) - case sqlparser.HexVal: - v2, err := b.HexDecode() - if err != nil { - return false - } - return bytes.Equal(v, v2) - } - return false -} diff --git a/go/vt/vtgate/planbuilder/expr_test.go b/go/vt/vtgate/planbuilder/expr_test.go deleted file mode 100644 index b59bd034810..00000000000 --- a/go/vt/vtgate/planbuilder/expr_test.go +++ /dev/null @@ -1,104 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "testing" - - "vitess.io/vitess/go/vt/sqlparser" -) - -func TestValEqual(t *testing.T) { - c1 := &column{} - c2 := &column{} - testcases := []struct { - in1, in2 sqlparser.Expr - out bool - }{{ - in1: &sqlparser.ColName{Metadata: c1, Name: sqlparser.NewIdentifierCI("c1")}, - in2: &sqlparser.ColName{Metadata: c1, Name: sqlparser.NewIdentifierCI("c1")}, - out: true, - }, { - // Objects that have the same name need not be the same because - // they might have appeared in different scopes and could have - // resolved to different columns. - in1: &sqlparser.ColName{Metadata: c1, Name: sqlparser.NewIdentifierCI("c1")}, - in2: &sqlparser.ColName{Metadata: c2, Name: sqlparser.NewIdentifierCI("c1")}, - out: false, - }, { - in1: sqlparser.NewArgument("aa"), - in2: &sqlparser.ColName{Metadata: c1, Name: sqlparser.NewIdentifierCI("c1")}, - out: false, - }, { - in1: sqlparser.NewArgument("aa"), - in2: sqlparser.NewArgument("aa"), - out: true, - }, { - in1: sqlparser.NewArgument("aa"), - in2: sqlparser.NewArgument("bb"), - }, { - in1: sqlparser.NewStrLiteral("aa"), - in2: sqlparser.NewStrLiteral("aa"), - out: true, - }, { - in1: sqlparser.NewStrLiteral("11"), - in2: sqlparser.NewHexLiteral("3131"), - out: true, - }, { - in1: sqlparser.NewHexLiteral("3131"), - in2: sqlparser.NewStrLiteral("11"), - out: true, - }, { - in1: sqlparser.NewHexLiteral("3131"), - in2: sqlparser.NewHexLiteral("3131"), - out: true, - }, { - in1: sqlparser.NewHexLiteral("3131"), - in2: sqlparser.NewHexLiteral("3132"), - out: false, - }, { - in1: sqlparser.NewHexLiteral("313"), - in2: sqlparser.NewHexLiteral("3132"), - out: false, - }, { - in1: sqlparser.NewHexLiteral("3132"), - in2: sqlparser.NewHexLiteral("313"), - out: false, - }, { - in1: sqlparser.NewIntLiteral("313"), - in2: sqlparser.NewHexLiteral("3132"), - out: false, - }, { - in1: sqlparser.NewHexLiteral("3132"), - in2: sqlparser.NewIntLiteral("313"), - out: false, - }, { - in1: sqlparser.NewIntLiteral("313"), - in2: sqlparser.NewIntLiteral("313"), - out: true, - }, { - in1: sqlparser.NewIntLiteral("313"), - in2: sqlparser.NewIntLiteral("314"), - out: false, - }} - for _, tc := range testcases { - out := valEqual(tc.in1, tc.in2) - if out != tc.out { - t.Errorf("valEqual(%#v, %#v): %v, want %v", tc.in1, tc.in2, out, tc.out) - } - } -} diff --git a/go/vt/vtgate/planbuilder/expression_converter.go b/go/vt/vtgate/planbuilder/expression_converter.go index f100d0d93e0..7a9dc374ea6 100644 --- a/go/vt/vtgate/planbuilder/expression_converter.go +++ b/go/vt/vtgate/planbuilder/expression_converter.go @@ -40,7 +40,7 @@ func booleanValues(astExpr sqlparser.Expr) evalengine.Expr { ) switch node := astExpr.(type) { case *sqlparser.Literal: - //set autocommit = 'on' + // set autocommit = 'on' if node.Type == sqlparser.StrVal { switch strings.ToLower(node.Val) { case "on": @@ -50,7 +50,7 @@ func booleanValues(astExpr sqlparser.Expr) evalengine.Expr { } } case *sqlparser.ColName: - //set autocommit = on + // set autocommit = on switch node.Name.Lowered() { case "on": return ON diff --git a/go/vt/vtgate/planbuilder/fallback_planner.go b/go/vt/vtgate/planbuilder/fallback_planner.go deleted file mode 100644 index 8b76efb2a21..00000000000 --- a/go/vt/vtgate/planbuilder/fallback_planner.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "fmt" - - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - - "vitess.io/vitess/go/vt/sqlparser" -) - -type fallbackPlanner struct { - primary, fallback stmtPlanner -} - -var _ stmtPlanner = (*fallbackPlanner)(nil).plan - -func (fp *fallbackPlanner) safePrimary(stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema) (res *planResult, err error) { - defer func() { - // if the primary planner panics, we want to catch it here so we can fall back - if r := recover(); r != nil { - err = fmt.Errorf("%v", r) // not using vterror since this will only be used for logging - } - }() - res, err = fp.primary(stmt, reservedVars, vschema) - return -} - -func (fp *fallbackPlanner) plan(stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema) (*planResult, error) { - res, err := fp.safePrimary(sqlparser.CloneStatement(stmt), reservedVars, vschema) - if err != nil { - return fp.fallback(stmt, reservedVars, vschema) - } - return res, nil -} diff --git a/go/vt/vtgate/planbuilder/fallback_planner_test.go b/go/vt/vtgate/planbuilder/fallback_planner_test.go deleted file mode 100644 index d0110f72428..00000000000 --- a/go/vt/vtgate/planbuilder/fallback_planner_test.go +++ /dev/null @@ -1,108 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "fmt" - "testing" - - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - - "github.com/stretchr/testify/assert" - - "vitess.io/vitess/go/vt/sqlparser" - - "vitess.io/vitess/go/vt/vtgate/engine" -) - -type testPlanner struct { - panic any - err error - res engine.Primitive - messWithAST func(sqlparser.Statement) - called bool -} - -var _ stmtPlanner = (*testPlanner)(nil).plan - -func (tp *testPlanner) plan(statement sqlparser.Statement, vars *sqlparser.ReservedVars, schema plancontext.VSchema) (*planResult, error) { - tp.called = true - if tp.panic != nil { - panic(tp.panic) - } - if tp.messWithAST != nil { - tp.messWithAST(statement) - } - return newPlanResult(tp.res), tp.err -} - -func TestFallbackPlanner(t *testing.T) { - a := &testPlanner{} - b := &testPlanner{} - fb := &fallbackPlanner{ - primary: a.plan, - fallback: b.plan, - } - - stmt := &sqlparser.Select{} - var vschema plancontext.VSchema - - // first planner succeeds - _, _ = fb.plan(stmt, nil, vschema) - assert.True(t, a.called) - assert.False(t, b.called) - a.called = false - - // first planner errors - a.err = fmt.Errorf("fail") - _, _ = fb.plan(stmt, nil, vschema) - assert.True(t, a.called) - assert.True(t, b.called) - - a.called = false - b.called = false - - // first planner panics - a.panic = "oh noes" - _, _ = fb.plan(stmt, nil, vschema) - assert.True(t, a.called) - assert.True(t, b.called) -} - -func TestFallbackClonesBeforePlanning(t *testing.T) { - a := &testPlanner{ - messWithAST: func(statement sqlparser.Statement) { - sel := statement.(*sqlparser.Select) - sel.SelectExprs = nil - }, - } - b := &testPlanner{} - fb := &fallbackPlanner{ - primary: a.plan, - fallback: b.plan, - } - - stmt := &sqlparser.Select{ - SelectExprs: sqlparser.SelectExprs{&sqlparser.StarExpr{}}, - } - var vschema plancontext.VSchema - - // first planner succeeds - _, _ = fb.plan(stmt, nil, vschema) - - assert.NotNilf(t, stmt.SelectExprs, "should not have changed") -} diff --git a/go/vt/vtgate/planbuilder/filter.go b/go/vt/vtgate/planbuilder/filter.go index 589287495a7..c3686380446 100644 --- a/go/vt/vtgate/planbuilder/filter.go +++ b/go/vt/vtgate/planbuilder/filter.go @@ -17,11 +17,7 @@ limitations under the License. package planbuilder import ( - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) type ( @@ -34,38 +30,6 @@ type ( var _ logicalPlan = (*filter)(nil) -func resolveFromPlan(ctx *plancontext.PlanningContext, plan logicalPlan, canPushProjection bool) evalengine.ColumnResolver { - return func(expr *sqlparser.ColName) (int, error) { - offset, added, err := pushProjection(ctx, &sqlparser.AliasedExpr{Expr: expr}, plan, true, true, false) - if err != nil { - return 0, err - } - if added && !canPushProjection { - return 0, vterrors.VT13001("column should not be pushed to projection while doing a column lookup") - } - return offset, nil - } -} - -// newFilter builds a new filter. -func newFilter(ctx *plancontext.PlanningContext, plan logicalPlan, expr sqlparser.Expr) (*filter, error) { - predicate, err := evalengine.Translate(expr, &evalengine.Config{ - ResolveColumn: resolveFromPlan(ctx, plan, false), - ResolveType: ctx.SemTable.TypeForExpr, - Collation: ctx.SemTable.Collation, - }) - if err != nil { - return nil, err - } - return &filter{ - logicalPlanCommon: newBuilderCommon(plan), - efilter: &engine.Filter{ - Predicate: predicate, - ASTPredicate: expr, - }, - }, nil -} - // Primitive implements the logicalPlan interface func (l *filter) Primitive() engine.Primitive { l.efilter.Input = l.input.Primitive() diff --git a/go/vt/vtgate/planbuilder/filtering.go b/go/vt/vtgate/planbuilder/filtering.go deleted file mode 100644 index 429bdf93cc2..00000000000 --- a/go/vt/vtgate/planbuilder/filtering.go +++ /dev/null @@ -1,118 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "fmt" - - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" -) - -// planFilter solves this particular expression, either by pushing it down to a child or changing this logicalPlan -func planFilter(pb *primitiveBuilder, input logicalPlan, filter sqlparser.Expr, whereType string, origin logicalPlan) (logicalPlan, error) { - switch node := input.(type) { - case *join: - isLeft := true - var in logicalPlan - if node.isOnLeft(origin.Order()) { - in = node.Left - } else { - if node.ejoin.Opcode == engine.LeftJoin { - return nil, vterrors.VT12001("cross-shard LEFT JOIN and WHERE clause") - } - isLeft = false - in = node.Right - } - - filtered, err := planFilter(pb, in, filter, whereType, origin) - if err != nil { - return nil, err - } - if isLeft { - node.Left = filtered - } else { - node.Right = filtered - } - return node, nil - - case *route: - sel := node.Select.(*sqlparser.Select) - switch whereType { - case sqlparser.WhereStr: - sel.AddWhere(filter) - case sqlparser.HavingStr: - sel.AddHaving(filter) - } - node.UpdatePlan(pb, filter) - return node, nil - case *pulloutSubquery: - plan, err := planFilter(pb, node.underlying, filter, whereType, origin) - if err != nil { - return nil, err - } - node.underlying = plan - return node, nil - case *vindexFunc: - return filterVindexFunc(node, filter) - case *simpleProjection: - return nil, vterrors.VT12001("filtering on results of cross-shard subquery") - case *orderedAggregate: - return nil, vterrors.VT12001("filtering on results of aggregates") - } - - return nil, vterrors.VT13001(fmt.Sprintf("unreachable %T.filtering", input)) -} - -func filterVindexFunc(node *vindexFunc, filter sqlparser.Expr) (logicalPlan, error) { - if node.eVindexFunc.Opcode != engine.VindexNone { - return nil, vterrors.VT12001(operators.VindexUnsupported + " (multiple filters)") - } - - // Check LHS. - comparison, ok := filter.(*sqlparser.ComparisonExpr) - if !ok { - return nil, vterrors.VT12001(operators.VindexUnsupported + " (not a comparison)") - } - if comparison.Operator != sqlparser.EqualOp && comparison.Operator != sqlparser.InOp { - return nil, vterrors.VT12001(operators.VindexUnsupported + " (not equality)") - } - colname, ok := comparison.Left.(*sqlparser.ColName) - if !ok { - return nil, vterrors.VT12001(operators.VindexUnsupported + " (lhs is not a column)") - } - if !colname.Name.EqualString("id") { - return nil, vterrors.VT12001(operators.VindexUnsupported + " (lhs is not id)") - } - - // Check RHS. - // We have to check before calling NewPlanValue because NewPlanValue allows lists also. - if !sqlparser.IsValue(comparison.Right) && !sqlparser.IsSimpleTuple(comparison.Right) { - return nil, vterrors.VT12001(operators.VindexUnsupported + " (rhs is not a value)") - } - var err error - node.eVindexFunc.Value, err = evalengine.Translate(comparison.Right, nil) - if err != nil { - return nil, vterrors.VT12001(fmt.Sprintf("%s: %v", operators.VindexUnsupported, err)) - } - - node.eVindexFunc.Opcode = engine.VindexMap - return node, nil -} diff --git a/go/vt/vtgate/planbuilder/fk_cascade.go b/go/vt/vtgate/planbuilder/fk_cascade.go new file mode 100644 index 00000000000..5a709156955 --- /dev/null +++ b/go/vt/vtgate/planbuilder/fk_cascade.go @@ -0,0 +1,85 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package planbuilder + +import ( + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" +) + +var _ logicalPlan = (*fkCascade)(nil) + +// fkCascade is the logicalPlan for engine.FkCascade. +type fkCascade struct { + parent logicalPlan + selection logicalPlan + children []*engine.FkChild +} + +// newFkCascade builds a new fkCascade. +func newFkCascade(parent, selection logicalPlan, children []*engine.FkChild) *fkCascade { + return &fkCascade{ + parent: parent, + selection: selection, + children: children, + } +} + +// Primitive implements the logicalPlan interface +func (fkc *fkCascade) Primitive() engine.Primitive { + return &engine.FkCascade{ + Parent: fkc.parent.Primitive(), + Selection: fkc.selection.Primitive(), + Children: fkc.children, + } +} + +// Wireup implements the logicalPlan interface +func (fkc *fkCascade) Wireup(ctx *plancontext.PlanningContext) error { + if err := fkc.parent.Wireup(ctx); err != nil { + return err + } + return fkc.selection.Wireup(ctx) +} + +// Rewrite implements the logicalPlan interface +func (fkc *fkCascade) Rewrite(inputs ...logicalPlan) error { + if len(inputs) != 2 { + return vterrors.VT13001("fkCascade: wrong number of inputs") + } + fkc.parent = inputs[0] + fkc.selection = inputs[1] + return nil +} + +// ContainsTables implements the logicalPlan interface +func (fkc *fkCascade) ContainsTables() semantics.TableSet { + return fkc.parent.ContainsTables() +} + +// Inputs implements the logicalPlan interface +func (fkc *fkCascade) Inputs() []logicalPlan { + return []logicalPlan{fkc.parent, fkc.selection} +} + +// OutputColumns implements the logicalPlan interface +func (fkc *fkCascade) OutputColumns() []sqlparser.SelectExpr { + return nil +} diff --git a/go/vt/vtgate/planbuilder/fk_verify.go b/go/vt/vtgate/planbuilder/fk_verify.go new file mode 100644 index 00000000000..71638f88b9b --- /dev/null +++ b/go/vt/vtgate/planbuilder/fk_verify.go @@ -0,0 +1,103 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package planbuilder + +import ( + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" +) + +var _ logicalPlan = (*fkVerify)(nil) + +type verifyLP struct { + verify logicalPlan + typ string +} + +// fkVerify is the logicalPlan for engine.FkVerify. +type fkVerify struct { + input logicalPlan + verify []*verifyLP +} + +// newFkVerify builds a new fkVerify. +func newFkVerify(input logicalPlan, verify []*verifyLP) *fkVerify { + return &fkVerify{ + input: input, + verify: verify, + } +} + +// Primitive implements the logicalPlan interface +func (fkc *fkVerify) Primitive() engine.Primitive { + var verify []*engine.Verify + for _, v := range fkc.verify { + verify = append(verify, &engine.Verify{ + Exec: v.verify.Primitive(), + Typ: v.typ, + }) + } + return &engine.FkVerify{ + Exec: fkc.input.Primitive(), + Verify: verify, + } +} + +// Wireup implements the logicalPlan interface +func (fkc *fkVerify) Wireup(ctx *plancontext.PlanningContext) error { + for _, v := range fkc.verify { + err := v.verify.Wireup(ctx) + if err != nil { + return err + } + } + return fkc.input.Wireup(ctx) +} + +// Rewrite implements the logicalPlan interface +func (fkc *fkVerify) Rewrite(inputs ...logicalPlan) error { + if len(fkc.verify) != len(inputs)-1 { + return vterrors.VT13001("fkVerify: wrong number of inputs") + } + fkc.input = inputs[0] + for i := 1; i < len(inputs); i++ { + fkc.verify[i-1].verify = inputs[i] + } + return nil +} + +// ContainsTables implements the logicalPlan interface +func (fkc *fkVerify) ContainsTables() semantics.TableSet { + return fkc.input.ContainsTables() +} + +// Inputs implements the logicalPlan interface +func (fkc *fkVerify) Inputs() []logicalPlan { + inputs := []logicalPlan{fkc.input} + for _, v := range fkc.verify { + inputs = append(inputs, v.verify) + } + return inputs +} + +// OutputColumns implements the logicalPlan interface +func (fkc *fkVerify) OutputColumns() []sqlparser.SelectExpr { + return nil +} diff --git a/go/vt/vtgate/planbuilder/from.go b/go/vt/vtgate/planbuilder/from.go deleted file mode 100644 index cdd5a801be2..00000000000 --- a/go/vt/vtgate/planbuilder/from.go +++ /dev/null @@ -1,430 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "fmt" - "sort" - "strings" - - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/vt/vtgate/evalengine" - - querypb "vitess.io/vitess/go/vt/proto/query" - "vitess.io/vitess/go/vt/vterrors" - - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/vindexes" -) - -// This file has functions to analyze the FROM clause. - -// processDMLTable analyzes the FROM clause for DMLs and returns a route. -func (pb *primitiveBuilder) processDMLTable(tableExprs sqlparser.TableExprs, reservedVars *sqlparser.ReservedVars, where sqlparser.Expr) (*route, error) { - if err := pb.processTableExprs(tableExprs, reservedVars, where); err != nil { - return nil, err - } - rb, ok := pb.plan.(*route) - if !ok { - return nil, vterrors.VT12001("multi-shard or vindex write statement") - } - for _, sub := range rb.substitutions { - *sub.oldExpr = *sub.newExpr - } - return rb, nil -} - -// processTableExprs analyzes the FROM clause. It produces a logicalPlan -// with all the routes identified. -func (pb *primitiveBuilder) processTableExprs(tableExprs sqlparser.TableExprs, reservedVars *sqlparser.ReservedVars, where sqlparser.Expr) error { - if len(tableExprs) == 1 { - return pb.processTableExpr(tableExprs[0], reservedVars, where) - } - - if err := pb.processTableExpr(tableExprs[0], reservedVars, where); err != nil { - return err - } - rpb := newPrimitiveBuilder(pb.vschema, pb.jt) - if err := rpb.processTableExprs(tableExprs[1:], reservedVars, where); err != nil { - return err - } - return pb.join(rpb, nil, reservedVars, where) -} - -// processTableExpr produces a logicalPlan subtree for the given TableExpr. -func (pb *primitiveBuilder) processTableExpr(tableExpr sqlparser.TableExpr, reservedVars *sqlparser.ReservedVars, where sqlparser.Expr) error { - switch tableExpr := tableExpr.(type) { - case *sqlparser.AliasedTableExpr: - return pb.processAliasedTable(tableExpr, reservedVars) - case *sqlparser.ParenTableExpr: - err := pb.processTableExprs(tableExpr.Exprs, reservedVars, where) - // If it's a route, preserve the parenthesis so things - // don't associate differently when more things are pushed - // into it. FROM a, (b, c) should not become FROM a, b, c. - if rb, ok := pb.plan.(*route); ok { - sel, ok := rb.Select.(*sqlparser.Select) - if !ok { - return vterrors.VT13002(sqlparser.String(rb.Select)) - } - - sel.From = sqlparser.TableExprs{&sqlparser.ParenTableExpr{Exprs: sel.From}} - } - return err - case *sqlparser.JoinTableExpr: - return pb.processJoin(tableExpr, reservedVars, where) - case *sqlparser.JSONTableExpr: - return vterrors.VT12001("JSON_TABLE expressions") - } - return vterrors.VT13001(fmt.Sprintf("unexpected table expression type: %T", tableExpr)) -} - -// processAliasedTable produces a logicalPlan subtree for the given AliasedTableExpr. -// If the expression is a subquery, then the primitive will create a table -// for it in the symtab. If the subquery is a route, then we build a route -// primitive with the subquery in the From clause, because a route is more -// versatile than a subquery. If a subquery becomes a route, then any result -// columns that represent underlying vindex columns are also exposed as -// vindex columns. -func (pb *primitiveBuilder) processAliasedTable(tableExpr *sqlparser.AliasedTableExpr, reservedVars *sqlparser.ReservedVars) error { - if tableExpr.Columns != nil { - return vterrors.VT12001("column aliases in derived table") - } - switch expr := tableExpr.Expr.(type) { - case sqlparser.TableName: - return pb.buildTablePrimitive(tableExpr, expr) - case *sqlparser.DerivedTable: - if expr.Lateral { - return vterrors.VT12001("lateral derived tables") - } - spb := newPrimitiveBuilder(pb.vschema, pb.jt) - switch stmt := expr.Select.(type) { - case *sqlparser.Select: - if err := spb.processSelect(stmt, reservedVars, nil, ""); err != nil { - return err - } - case *sqlparser.Union: - if err := spb.processUnion(stmt, reservedVars, nil); err != nil { - return err - } - default: - return vterrors.VT13001(fmt.Sprintf("unexpected SELECT type: %T", stmt)) - } - - subroute, ok := spb.plan.(*route) - if !ok { - var err error - pb.plan, pb.st, err = newSimpleProjection(tableExpr.As, spb.plan) - if err != nil { - return err - } - pb.plan.Reorder(0) - return nil - } - - // Since a route is more versatile than a subquery, we - // build a route primitive that has the subquery in its - // FROM clause. This allows for other constructs to be - // later pushed into it. - rb, st := newRoute(&sqlparser.Select{From: []sqlparser.TableExpr{tableExpr}}) - rb.substitutions = subroute.substitutions - rb.condition = subroute.condition - rb.eroute = subroute.eroute - subroute.Redirect = rb - - // The subquery needs to be represented as a new logical table in the symtab. - // The new route will inherit the routeOptions of the underlying subquery. - // For this, we first build new vschema tables based on the columns returned - // by the subquery, and re-expose possible vindexes. When added to the symtab, - // a new set of column references will be generated against the new tables, - // and those vindex maps will be returned. They have to replace the old vindex - // maps of the inherited route options. - var tableNames []string - spbTables, err := spb.st.AllVschemaTableNames() - if err != nil { - return err - } - for _, table := range spbTables { - tableNames = append(tableNames, table.Name.String()) - } - sort.Strings(tableNames) - vschemaTable := &vindexes.Table{ - Keyspace: subroute.eroute.Keyspace, - Name: sqlparser.NewIdentifierCS(strings.Join(tableNames, ", ")), - } - for _, rc := range subroute.ResultColumns() { - if rc.column.vindex == nil { - continue - } - // Check if a colvindex of the same name already exists. - // Dups are not allowed in subqueries in this situation. - for _, colVindex := range vschemaTable.ColumnVindexes { - if colVindex.Columns[0].Equal(rc.alias) { - return vterrors.VT12001(fmt.Sprintf("duplicate column aliases: %v", rc.alias)) - } - } - vschemaTable.ColumnVindexes = append(vschemaTable.ColumnVindexes, &vindexes.ColumnVindex{ - Columns: []sqlparser.IdentifierCI{rc.alias}, - Vindex: rc.column.vindex, - }) - } - if err := st.AddVSchemaTable(sqlparser.TableName{Name: tableExpr.As}, vschemaTable, rb); err != nil { - return err - } - - pb.plan, pb.st = rb, st - return nil - } - return vterrors.VT13001(fmt.Sprintf("unexpected table expression type: %T", tableExpr.Expr)) -} - -// buildTablePrimitive builds a primitive based on the table name. -func (pb *primitiveBuilder) buildTablePrimitive(tableExpr *sqlparser.AliasedTableExpr, tableName sqlparser.TableName) error { - alias := tableName - if !tableExpr.As.IsEmpty() { - alias = sqlparser.TableName{Name: tableExpr.As} - } - sel := &sqlparser.Select{From: sqlparser.TableExprs([]sqlparser.TableExpr{tableExpr})} - - if sqlparser.SystemSchema(tableName.Qualifier.String()) { - ks, err := pb.vschema.AnyKeyspace() - if err != nil { - return err - } - rb, st := newRoute(sel) - rb.eroute = engine.NewSimpleRoute(engine.DBA, ks) - rb.eroute.TableName = sqlparser.String(tableName) - pb.plan, pb.st = rb, st - // Add the table to symtab - return st.AddTable(&table{ - alias: alias, - origin: rb, - }) - } - - vschemaTable, vindex, _, destTableType, destTarget, err := pb.vschema.FindTableOrVindex(tableName) - if err != nil { - return err - } - if vindex != nil { - single, ok := vindex.(vindexes.SingleColumn) - if !ok { - return vterrors.VT12001("multi-column vindexes") - } - pb.plan, pb.st = newVindexFunc(alias, single) - return nil - } - - sourceTable, err := pb.tryRedirectGen4InsertToSource(vschemaTable) - if err != nil { - return err - } - if sourceTable != nil { - vschemaTable = sourceTable - } - - rb, st := newRoute(sel) - pb.plan, pb.st = rb, st - if err := st.AddVSchemaTable(alias, vschemaTable, rb); err != nil { - return err - } - - sub := &tableSubstitution{ - oldExpr: tableExpr, - } - if tableExpr.As.IsEmpty() { - if tableName.Name != vschemaTable.Name { - // Table name does not match. Change and alias it to old name. - sub.newExpr = &sqlparser.AliasedTableExpr{ - Expr: sqlparser.TableName{Name: vschemaTable.Name}, - As: tableName.Name, - } - } - } else { - // Table is already aliased. - if tableName.Name != vschemaTable.Name { - // Table name does not match. Change it and reuse existing alias. - sub.newExpr = &sqlparser.AliasedTableExpr{ - Expr: sqlparser.TableName{Name: vschemaTable.Name}, - As: tableExpr.As, - } - } - } - if sub != nil && sub.newExpr != nil { - rb.substitutions = []*tableSubstitution{sub} - } - - var eroute *engine.Route - switch { - case vschemaTable.Type == vindexes.TypeSequence: - eroute = engine.NewSimpleRoute(engine.Next, vschemaTable.Keyspace) - vindex, _ = vindexes.NewBinary("binary", nil) - eroute.Vindex, _ = vindex.(vindexes.SingleColumn) - lit := evalengine.NewLiteralString(vschemaTable.Pinned, collations.TypedCollation{}) - eroute.Values = []evalengine.Expr{lit} - case vschemaTable.Type == vindexes.TypeReference: - eroute = engine.NewSimpleRoute(engine.Reference, vschemaTable.Keyspace) - case !vschemaTable.Keyspace.Sharded: - eroute = engine.NewSimpleRoute(engine.Unsharded, vschemaTable.Keyspace) - case vschemaTable.Pinned == nil || (vschemaTable.Pinned != nil && tableName.Name.String() == "dual"): - eroute = engine.NewSimpleRoute(engine.Scatter, vschemaTable.Keyspace) - eroute.TargetDestination = destTarget - eroute.TargetTabletType = destTableType - default: - // Pinned tables have their keyspace ids already assigned. - // Use the Binary vindex, which is the identity function - // for keyspace id. - eroute = engine.NewSimpleRoute(engine.EqualUnique, vschemaTable.Keyspace) - vindex, _ = vindexes.NewBinary("binary", nil) - eroute.Vindex = vindex - lit := evalengine.NewLiteralString(vschemaTable.Pinned, collations.TypedCollation{}) - eroute.Values = []evalengine.Expr{lit} - } - eroute.TableName = sqlparser.String(vschemaTable.Name) - rb.eroute = eroute - - return nil -} - -// processJoin produces a logicalPlan subtree for the given Join. -// If the left and right nodes can be part of the same route, -// then it's a route. Otherwise, it's a join. -func (pb *primitiveBuilder) processJoin(ajoin *sqlparser.JoinTableExpr, reservedVars *sqlparser.ReservedVars, where sqlparser.Expr) error { - switch ajoin.Join { - case sqlparser.NormalJoinType, sqlparser.StraightJoinType, sqlparser.LeftJoinType: - case sqlparser.RightJoinType: - convertToLeftJoin(ajoin) - default: - return vterrors.VT12001(ajoin.Join.ToString()) - } - if err := pb.processTableExpr(ajoin.LeftExpr, reservedVars, where); err != nil { - return err - } - rpb := newPrimitiveBuilder(pb.vschema, pb.jt) - if err := rpb.processTableExpr(ajoin.RightExpr, reservedVars, where); err != nil { - return err - } - return pb.join(rpb, ajoin, reservedVars, where) -} - -// If the primitiveBuilder context is a Gen4 planner, the statement is an -// INSERT, and the vschema table is a reference with a valid source reference, -// then redirect the INSERT back to the source. -func (pb *primitiveBuilder) tryRedirectGen4InsertToSource(vschemaTable *vindexes.Table) (*vindexes.Table, error) { - if pb.stmt == nil { - return nil, nil - } - if _, ok := pb.stmt.(*sqlparser.Insert); !ok { - return nil, nil - } - if pb.vschema.Planner() == querypb.ExecuteOptions_V3 { - return nil, nil - } - if vschemaTable.Type != vindexes.TypeReference || vschemaTable.Source == nil { - return nil, nil - } - vschemaTable, _, _, _, _, err := pb.vschema.FindTableOrVindex(vschemaTable.Source.TableName) - return vschemaTable, err -} - -// convertToLeftJoin converts a right join into a left join. -func convertToLeftJoin(ajoin *sqlparser.JoinTableExpr) { - newRHS := ajoin.LeftExpr - // If the LHS is a join, we have to parenthesize it. - // Otherwise, it can be used as is. - if _, ok := newRHS.(*sqlparser.JoinTableExpr); ok { - newRHS = &sqlparser.ParenTableExpr{ - Exprs: sqlparser.TableExprs{newRHS}, - } - } - ajoin.LeftExpr, ajoin.RightExpr = ajoin.RightExpr, newRHS - ajoin.Join = sqlparser.LeftJoinType -} - -func (pb *primitiveBuilder) join(rpb *primitiveBuilder, ajoin *sqlparser.JoinTableExpr, reservedVars *sqlparser.ReservedVars, where sqlparser.Expr) error { - // Merge the symbol tables. In the case of a left join, we have to - // ideally create new symbols that originate from the join primitive. - // However, this is not worth it for now, because the Push functions - // verify that only valid constructs are passed through in case of left join. - err := pb.st.Merge(rpb.st) - if err != nil { - return err - } - - lRoute, leftIsRoute := pb.plan.(*route) - rRoute, rightIsRoute := rpb.plan.(*route) - if !leftIsRoute || !rightIsRoute { - return newJoin(pb, rpb, ajoin, reservedVars) - } - - // Try merging the routes. - if !lRoute.JoinCanMerge(pb, rRoute, ajoin, where) { - return newJoin(pb, rpb, ajoin, reservedVars) - } - - if lRoute.eroute.Opcode == engine.Reference { - // Swap the conditions & eroutes, and then merge. - lRoute.condition, rRoute.condition = rRoute.condition, lRoute.condition - lRoute.eroute, rRoute.eroute = rRoute.eroute, lRoute.eroute - } - lRoute.substitutions = append(lRoute.substitutions, rRoute.substitutions...) - rRoute.Redirect = lRoute - - // Merge the AST. - sel, ok := lRoute.Select.(*sqlparser.Select) - if !ok { - return vterrors.VT13002(sqlparser.String(lRoute.Select)) - } - if ajoin == nil { - rhsSel, ok := rRoute.Select.(*sqlparser.Select) - if !ok { - return vterrors.VT13002(sqlparser.String(rRoute.Select)) - } - sel.From = append(sel.From, rhsSel.From...) - } else { - sel.From = sqlparser.TableExprs{ajoin} - } - // join table name - if lRoute.eroute.TableName != rRoute.eroute.TableName { - lRoute.eroute.TableName = strings.Join([]string{lRoute.eroute.TableName, rRoute.eroute.TableName}, ", ") - } - - // join sysTableNames - for tableName, expr := range rRoute.eroute.SysTableTableName { - _, ok := lRoute.eroute.SysTableTableName[tableName] - if !ok { - lRoute.eroute.SysTableTableName[tableName] = expr - } - } - - // Since the routes have merged, set st.singleRoute to point at - // the merged route. - pb.st.singleRoute = lRoute - if ajoin == nil { - return nil - } - pullouts, _, expr, err := pb.findOrigin(ajoin.Condition.On, reservedVars) - if err != nil { - return err - } - ajoin.Condition.On = expr - pb.addPullouts(pullouts) - for _, filter := range sqlparser.SplitAndExpression(nil, ajoin.Condition.On) { - lRoute.UpdatePlan(pb, filter) - } - return nil -} diff --git a/go/vt/vtgate/planbuilder/gen4_compare_v3_planner.go b/go/vt/vtgate/planbuilder/gen4_compare_v3_planner.go deleted file mode 100644 index 3165225db78..00000000000 --- a/go/vt/vtgate/planbuilder/gen4_compare_v3_planner.go +++ /dev/null @@ -1,134 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "context" - - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/engine" -) - -func gen4CompareV3Planner(query string) func(sqlparser.Statement, *sqlparser.ReservedVars, plancontext.VSchema) (*planResult, error) { - return func(statement sqlparser.Statement, vars *sqlparser.ReservedVars, ctxVSchema plancontext.VSchema) (*planResult, error) { - // we will be switching the planner version to Gen4 and V3 in order to - // create instructions using them, thus we make sure to switch back to - // the Gen4CompareV3 planner before exiting this method. - defer ctxVSchema.SetPlannerVersion(Gen4CompareV3) - switch statement.(type) { - case *sqlparser.Select, *sqlparser.Union: - // These we can compare. Everything else we'll just use the Gen4 planner - default: - return planWithPlannerVersion(statement, vars, ctxVSchema, query, Gen4) - } - - // preliminary checks on the given statement - onlyGen4, hasOrderBy, err := preliminaryChecks(statement) - if err != nil { - return nil, err - } - - // plan statement using Gen4 - gen4Primitive, gen4Err := planWithPlannerVersion(statement, vars, ctxVSchema, query, Gen4) - - // if onlyGen4 is set to true or Gen4's instruction contain a lock primitive, - // we use only Gen4's primitive and exit early without using V3's. - // since lock primitives can imply the creation or deletion of locks, - // we want to execute them once using Gen4 to avoid the duplicated locks - // or double lock-releases. - if onlyGen4 || (gen4Primitive != nil && hasLockPrimitive(gen4Primitive.primitive)) { - return gen4Primitive, gen4Err - } - - // get V3's plan - v3Primitive, v3Err := planWithPlannerVersion(statement, vars, ctxVSchema, query, V3) - - // check potential errors from Gen4 and V3 - err = engine.CompareErrors(v3Err, gen4Err, "v3", "Gen4") - if err != nil { - return nil, err - } - - primitive := &engine.Gen4CompareV3{ - V3: v3Primitive.primitive, - Gen4: gen4Primitive.primitive, - HasOrderBy: hasOrderBy, - } - - return newPlanResult(primitive, gen4Primitive.tables...), nil - } -} - -func preliminaryChecks(statement sqlparser.Statement) (bool, bool, error) { - var onlyGen4, hasOrderBy bool - switch s := statement.(type) { - case *sqlparser.Union: - hasOrderBy = len(s.OrderBy) > 0 - - // walk through the union and search for select statements that have - // a next val select expression, in which case we need to only use - // the Gen4 planner instead of using both Gen4 and V3 to avoid unintended - // double-incrementation of sequence. - err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { - if _, isNextVal := node.(*sqlparser.Nextval); isNextVal { - onlyGen4 = true - return false, nil - } - return true, nil - }, s) - if err != nil { - return false, false, err - } - case *sqlparser.Select: - hasOrderBy = len(s.OrderBy) > 0 - - for _, expr := range s.SelectExprs { - // we are not executing the plan a second time if the query is a select next val, - // since the first execution might increment the `next` value, results will almost - // always be different between v3 and Gen4. - if _, nextVal := expr.(*sqlparser.Nextval); nextVal { - onlyGen4 = true - break - } - } - } - return onlyGen4, hasOrderBy, nil -} - -func planWithPlannerVersion(statement sqlparser.Statement, vars *sqlparser.ReservedVars, ctxVSchema plancontext.VSchema, query string, version plancontext.PlannerVersion) (*planResult, error) { - ctxVSchema.SetPlannerVersion(version) - stmt := sqlparser.CloneStatement(statement) - return createInstructionFor(context.Background(), query, stmt, vars, ctxVSchema, false, false) -} - -// hasLockPrimitive recursively walks through the given primitive and its children -// to see if there are any engine.Lock primitive. -func hasLockPrimitive(primitive engine.Primitive) bool { - switch primitive.(type) { - case *engine.Lock: - return true - default: - for _, p := range primitive.Inputs() { - if hasLockPrimitive(p) { - return true - } - } - } - return false -} diff --git a/go/vt/vtgate/planbuilder/gen4_planner.go b/go/vt/vtgate/planbuilder/gen4_planner.go deleted file mode 100644 index 190585e74ef..00000000000 --- a/go/vt/vtgate/planbuilder/gen4_planner.go +++ /dev/null @@ -1,708 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "fmt" - - "vitess.io/vitess/go/vt/key" - - querypb "vitess.io/vitess/go/vt/proto/query" - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/vt/vtgate/semantics" - "vitess.io/vitess/go/vt/vtgate/vindexes" -) - -func gen4Planner(query string, plannerVersion querypb.ExecuteOptions_PlannerVersion) stmtPlanner { - return func(stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema) (*planResult, error) { - switch stmt := stmt.(type) { - case sqlparser.SelectStatement: - return gen4SelectStmtPlanner(query, plannerVersion, stmt, reservedVars, vschema) - case *sqlparser.Update: - return gen4UpdateStmtPlanner(plannerVersion, stmt, reservedVars, vschema) - case *sqlparser.Delete: - return gen4DeleteStmtPlanner(plannerVersion, stmt, reservedVars, vschema) - case *sqlparser.Insert: - return gen4InsertStmtPlanner(plannerVersion, stmt, reservedVars, vschema) - default: - return nil, vterrors.VT12001(fmt.Sprintf("%T", stmt)) - } - } -} - -func gen4SelectStmtPlanner( - query string, - plannerVersion querypb.ExecuteOptions_PlannerVersion, - stmt sqlparser.SelectStatement, - reservedVars *sqlparser.ReservedVars, - vschema plancontext.VSchema, -) (*planResult, error) { - switch node := stmt.(type) { - case *sqlparser.Select: - if node.With != nil { - return nil, vterrors.VT12001("WITH expression in SELECT statement") - } - case *sqlparser.Union: - if node.With != nil { - return nil, vterrors.VT12001("WITH expression in UNION statement") - } - } - - sel, isSel := stmt.(*sqlparser.Select) - if isSel { - // handle dual table for processing at vtgate. - p, err := handleDualSelects(sel, vschema) - if err != nil { - return nil, err - } - if p != nil { - used := "dual" - keyspace, ksErr := vschema.DefaultKeyspace() - if ksErr == nil { - // we are just getting the ks to log the correct table use. - // no need to fail this if we can't find the default keyspace - used = keyspace.Name + ".dual" - } - return newPlanResult(p, used), nil - } - - if sel.SQLCalcFoundRows && sel.Limit != nil { - return gen4planSQLCalcFoundRows(vschema, sel, query, reservedVars) - } - // if there was no limit, we can safely ignore the SQLCalcFoundRows directive - sel.SQLCalcFoundRows = false - } - - getPlan := func(selStatement sqlparser.SelectStatement) (logicalPlan, *semantics.SemTable, []string, error) { - return newBuildSelectPlan(selStatement, reservedVars, vschema, plannerVersion) - } - - plan, _, tablesUsed, err := getPlan(stmt) - if err != nil { - return nil, err - } - - if shouldRetryAfterPredicateRewriting(plan) { - // by transforming the predicates to CNF, the planner will sometimes find better plans - plan2, _, tablesUsed := gen4PredicateRewrite(stmt, getPlan) - if plan2 != nil { - return newPlanResult(plan2.Primitive(), tablesUsed...), nil - } - } - - primitive := plan.Primitive() - if !isSel { - return newPlanResult(primitive, tablesUsed...), nil - } - - // this is done because engine.Route doesn't handle the empty result well - // if it doesn't find a shard to send the query to. - // All other engine primitives can handle this, so we only need it when - // Route is the last (and only) instruction before the user sees a result - if isOnlyDual(sel) || (len(sel.GroupBy) == 0 && sel.SelectExprs.AllAggregation()) { - switch prim := primitive.(type) { - case *engine.Route: - prim.NoRoutesSpecialHandling = true - case *engine.VindexLookup: - prim.SendTo.NoRoutesSpecialHandling = true - } - } - return newPlanResult(primitive, tablesUsed...), nil -} - -func gen4planSQLCalcFoundRows(vschema plancontext.VSchema, sel *sqlparser.Select, query string, reservedVars *sqlparser.ReservedVars) (*planResult, error) { - ksName := "" - if ks, _ := vschema.DefaultKeyspace(); ks != nil { - ksName = ks.Name - } - semTable, err := semantics.Analyze(sel, ksName, vschema) - if err != nil { - return nil, err - } - // record any warning as planner warning. - vschema.PlannerWarning(semTable.Warning) - - plan, tablesUsed, err := buildSQLCalcFoundRowsPlan(query, sel, reservedVars, vschema, planSelectGen4) - if err != nil { - return nil, err - } - return newPlanResult(plan.Primitive(), tablesUsed...), nil -} - -func planSelectGen4(reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, sel *sqlparser.Select) (*jointab, logicalPlan, []string, error) { - plan, _, tablesUsed, err := newBuildSelectPlan(sel, reservedVars, vschema, 0) - if err != nil { - return nil, nil, nil, err - } - return nil, plan, tablesUsed, nil -} - -func gen4PredicateRewrite(stmt sqlparser.Statement, getPlan func(selStatement sqlparser.SelectStatement) (logicalPlan, *semantics.SemTable, []string, error)) (logicalPlan, *semantics.SemTable, []string) { - rewritten, isSel := sqlparser.RewritePredicate(stmt).(sqlparser.SelectStatement) - if !isSel { - // Fail-safe code, should never happen - return nil, nil, nil - } - plan2, st, op, err := getPlan(rewritten) - if err == nil && !shouldRetryAfterPredicateRewriting(plan2) { - // we only use this new plan if it's better than the old one we got - return plan2, st, op - } - return nil, nil, nil -} - -func newBuildSelectPlan( - selStmt sqlparser.SelectStatement, - reservedVars *sqlparser.ReservedVars, - vschema plancontext.VSchema, - version querypb.ExecuteOptions_PlannerVersion, -) (plan logicalPlan, semTable *semantics.SemTable, tablesUsed []string, err error) { - ksName := "" - if ks, _ := vschema.DefaultKeyspace(); ks != nil { - ksName = ks.Name - } - semTable, err = semantics.Analyze(selStmt, ksName, vschema) - if err != nil { - return nil, nil, nil, err - } - // record any warning as planner warning. - vschema.PlannerWarning(semTable.Warning) - - ctx := plancontext.NewPlanningContext(reservedVars, semTable, vschema, version) - - if ks, _ := semTable.SingleUnshardedKeyspace(); ks != nil { - plan, tablesUsed, err = selectUnshardedShortcut(ctx, selStmt, ks) - if err != nil { - return nil, nil, nil, err - } - plan = pushCommentDirectivesOnPlan(plan, selStmt) - return plan, semTable, tablesUsed, err - } - - // From this point on, we know it is not an unsharded query and return the NotUnshardedErr if there is any - if semTable.NotUnshardedErr != nil { - return nil, nil, nil, semTable.NotUnshardedErr - } - - err = queryRewrite(semTable, reservedVars, selStmt) - if err != nil { - return nil, nil, nil, err - } - - op, err := operators.PlanQuery(ctx, selStmt) - if err != nil { - return nil, nil, nil, err - } - - plan, err = transformToLogicalPlan(ctx, op, true) - if err != nil { - return nil, nil, nil, err - } - - optimizePlan(plan) - - sel, isSel := selStmt.(*sqlparser.Select) - if isSel { - if err = setMiscFunc(plan, sel); err != nil { - return nil, nil, nil, err - } - } - - if err = plan.WireupGen4(ctx); err != nil { - return nil, nil, nil, err - } - - plan = pushCommentDirectivesOnPlan(plan, selStmt) - - return plan, semTable, operators.TablesUsed(op), nil -} - -// optimizePlan removes unnecessary simpleProjections that have been created while planning -func optimizePlan(plan logicalPlan) { - for _, lp := range plan.Inputs() { - optimizePlan(lp) - } - - this, ok := plan.(*simpleProjection) - if !ok { - return - } - - input, ok := this.input.(*simpleProjection) - if !ok { - return - } - - for i, col := range this.eSimpleProj.Cols { - this.eSimpleProj.Cols[i] = input.eSimpleProj.Cols[col] - } - this.input = input.input -} - -func gen4UpdateStmtPlanner( - version querypb.ExecuteOptions_PlannerVersion, - updStmt *sqlparser.Update, - reservedVars *sqlparser.ReservedVars, - vschema plancontext.VSchema, -) (*planResult, error) { - if updStmt.With != nil { - return nil, vterrors.VT12001("WITH expression in UPDATE statement") - } - - ksName := "" - if ks, _ := vschema.DefaultKeyspace(); ks != nil { - ksName = ks.Name - } - semTable, err := semantics.Analyze(updStmt, ksName, vschema) - if err != nil { - return nil, err - } - // record any warning as planner warning. - vschema.PlannerWarning(semTable.Warning) - - err = rewriteRoutedTables(updStmt, vschema) - if err != nil { - return nil, err - } - - if ks, tables := semTable.SingleUnshardedKeyspace(); ks != nil { - plan := updateUnshardedShortcut(updStmt, ks, tables) - plan = pushCommentDirectivesOnPlan(plan, updStmt) - return newPlanResult(plan.Primitive(), operators.QualifiedTables(ks, tables)...), nil - } - - if semTable.NotUnshardedErr != nil { - return nil, semTable.NotUnshardedErr - } - - err = queryRewrite(semTable, reservedVars, updStmt) - if err != nil { - return nil, err - } - - ctx := plancontext.NewPlanningContext(reservedVars, semTable, vschema, version) - - op, err := operators.PlanQuery(ctx, updStmt) - if err != nil { - return nil, err - } - - plan, err := transformToLogicalPlan(ctx, op, true) - if err != nil { - return nil, err - } - - plan = pushCommentDirectivesOnPlan(plan, updStmt) - - setLockOnAllSelect(plan) - - if err := plan.WireupGen4(ctx); err != nil { - return nil, err - } - - return newPlanResult(plan.Primitive(), operators.TablesUsed(op)...), nil -} - -func updateUnshardedShortcut(stmt *sqlparser.Update, ks *vindexes.Keyspace, tables []*vindexes.Table) logicalPlan { - edml := engine.NewDML() - edml.Keyspace = ks - edml.Table = tables - edml.Opcode = engine.Unsharded - edml.Query = generateQuery(stmt) - return &primitiveWrapper{prim: &engine.Update{DML: edml}} -} - -func gen4DeleteStmtPlanner( - version querypb.ExecuteOptions_PlannerVersion, - deleteStmt *sqlparser.Delete, - reservedVars *sqlparser.ReservedVars, - vschema plancontext.VSchema, -) (*planResult, error) { - if deleteStmt.With != nil { - return nil, vterrors.VT12001("WITH expression in DELETE statement") - } - - var err error - if len(deleteStmt.TableExprs) == 1 && len(deleteStmt.Targets) == 1 { - deleteStmt, err = rewriteSingleTbl(deleteStmt) - if err != nil { - return nil, err - } - } - - ksName := "" - if ks, _ := vschema.DefaultKeyspace(); ks != nil { - ksName = ks.Name - } - semTable, err := semantics.Analyze(deleteStmt, ksName, vschema) - if err != nil { - return nil, err - } - - // record any warning as planner warning. - vschema.PlannerWarning(semTable.Warning) - err = rewriteRoutedTables(deleteStmt, vschema) - if err != nil { - return nil, err - } - if ks, tables := semTable.SingleUnshardedKeyspace(); ks != nil { - plan := deleteUnshardedShortcut(deleteStmt, ks, tables) - plan = pushCommentDirectivesOnPlan(plan, deleteStmt) - return newPlanResult(plan.Primitive(), operators.QualifiedTables(ks, tables)...), nil - } - - if err := checkIfDeleteSupported(deleteStmt, semTable); err != nil { - return nil, err - } - - err = queryRewrite(semTable, reservedVars, deleteStmt) - if err != nil { - return nil, err - } - - ctx := plancontext.NewPlanningContext(reservedVars, semTable, vschema, version) - op, err := operators.PlanQuery(ctx, deleteStmt) - if err != nil { - return nil, err - } - - plan, err := transformToLogicalPlan(ctx, op, true) - if err != nil { - return nil, err - } - - plan = pushCommentDirectivesOnPlan(plan, deleteStmt) - - setLockOnAllSelect(plan) - - if err := plan.WireupGen4(ctx); err != nil { - return nil, err - } - - return newPlanResult(plan.Primitive(), operators.TablesUsed(op)...), nil -} - -func deleteUnshardedShortcut(stmt *sqlparser.Delete, ks *vindexes.Keyspace, tables []*vindexes.Table) logicalPlan { - edml := engine.NewDML() - edml.Keyspace = ks - edml.Table = tables - edml.Opcode = engine.Unsharded - edml.Query = generateQuery(stmt) - return &primitiveWrapper{prim: &engine.Delete{DML: edml}} -} - -func gen4InsertStmtPlanner(version querypb.ExecuteOptions_PlannerVersion, insStmt *sqlparser.Insert, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema) (*planResult, error) { - ksName := "" - if ks, _ := vschema.DefaultKeyspace(); ks != nil { - ksName = ks.Name - } - semTable, err := semantics.Analyze(insStmt, ksName, vschema) - if err != nil { - return nil, err - } - // record any warning as planner warning. - vschema.PlannerWarning(semTable.Warning) - - err = rewriteRoutedTables(insStmt, vschema) - if err != nil { - return nil, err - } - // remove any alias added from routing table. - // insert query does not support table alias. - insStmt.Table.As = sqlparser.NewIdentifierCS("") - - // Check single unsharded. Even if the table is for single unsharded but sequence table is used. - // We cannot shortcut here as sequence column needs additional planning. - ks, tables := semTable.SingleUnshardedKeyspace() - if ks != nil && tables[0].AutoIncrement == nil { - plan := insertUnshardedShortcut(insStmt, ks, tables) - plan = pushCommentDirectivesOnPlan(plan, insStmt) - return newPlanResult(plan.Primitive(), operators.QualifiedTables(ks, tables)...), nil - } - - tblInfo, err := semTable.TableInfoFor(semTable.TableSetFor(insStmt.Table)) - if err != nil { - return nil, err - } - if tblInfo.GetVindexTable().Pinned != nil { - ks, err := vschema.DefaultKeyspace() - if err != nil { - return nil, err - } - plan := insertDestinationShortcut(insStmt, ks, tblInfo.GetVindexTable()) - plan = pushCommentDirectivesOnPlan(plan, insStmt) - return newPlanResult(plan.Primitive(), operators.QualifiedTables(ks, tables)...), nil - } - if tblInfo.GetVindexTable().Keyspace.Sharded && semTable.NotUnshardedErr != nil { - return nil, semTable.NotUnshardedErr - } - - err = queryRewrite(semTable, reservedVars, insStmt) - if err != nil { - return nil, err - } - - ctx := plancontext.NewPlanningContext(reservedVars, semTable, vschema, version) - - op, err := operators.PlanQuery(ctx, insStmt) - if err != nil { - return nil, err - } - - plan, err := transformToLogicalPlan(ctx, op, true) - if err != nil { - return nil, err - } - - plan = pushCommentDirectivesOnPlan(plan, insStmt) - - setLockOnAllSelect(plan) - - if err := plan.WireupGen4(ctx); err != nil { - return nil, err - } - - return newPlanResult(plan.Primitive(), operators.TablesUsed(op)...), nil -} - -func insertUnshardedShortcut(stmt *sqlparser.Insert, ks *vindexes.Keyspace, tables []*vindexes.Table) logicalPlan { - eIns := &engine.Insert{} - eIns.Keyspace = ks - eIns.Table = tables[0] - eIns.Opcode = engine.InsertUnsharded - eIns.Query = generateQuery(stmt) - return &insert{eInsert: eIns} -} - -func rewriteRoutedTables(stmt sqlparser.Statement, vschema plancontext.VSchema) error { - // Rewrite routed tables - return sqlparser.Walk(func(node sqlparser.SQLNode) (bool, error) { - aliasTbl, isAlias := node.(*sqlparser.AliasedTableExpr) - if !isAlias { - return true, nil - } - tableName, ok := aliasTbl.Expr.(sqlparser.TableName) - if !ok { - return true, nil - } - vschemaTable, vindexTbl, _, _, _, err := vschema.FindTableOrVindex(tableName) - if err != nil { - return false, err - } - if vindexTbl != nil { - // vindex cannot be present in a dml statement. - return false, vterrors.VT09014() - } - - if vschemaTable.Name.String() != tableName.Name.String() { - name := tableName.Name - if aliasTbl.As.IsEmpty() { - // if the user hasn't specified an alias, we'll insert one here so the old table name still works - aliasTbl.As = sqlparser.NewIdentifierCS(name.String()) - } - tableName.Name = sqlparser.NewIdentifierCS(vschemaTable.Name.String()) - aliasTbl.Expr = tableName - } - - return true, nil - }, stmt) -} - -func setLockOnAllSelect(plan logicalPlan) { - _, _ = visit(plan, func(plan logicalPlan) (bool, logicalPlan, error) { - switch node := plan.(type) { - case *routeGen4: - node.Select.SetLock(sqlparser.ShareModeLock) - return true, node, nil - } - return true, plan, nil - }) -} - -func planLimit(limit *sqlparser.Limit, plan logicalPlan) (logicalPlan, error) { - if limit == nil { - return plan, nil - } - rb, ok := plan.(*routeGen4) - if ok && rb.isSingleShard() { - rb.SetLimit(limit) - return plan, nil - } - - lPlan, err := createLimit(plan, limit) - if err != nil { - return nil, err - } - - // visit does not modify the plan. - _, err = visit(lPlan, setUpperLimit) - if err != nil { - return nil, err - } - return lPlan, nil -} - -func planHorizon(ctx *plancontext.PlanningContext, plan logicalPlan, in sqlparser.SelectStatement, truncateColumns bool) (logicalPlan, error) { - switch node := in.(type) { - case *sqlparser.Select: - hp := horizonPlanning{ - sel: node, - } - - replaceSubQuery(ctx, node) - var err error - plan, err = hp.planHorizon(ctx, plan, truncateColumns) - if err != nil { - return nil, err - } - plan, err = planLimit(node.Limit, plan) - if err != nil { - return nil, err - } - case *sqlparser.Union: - var err error - rb, isRoute := plan.(*routeGen4) - if !isRoute && ctx.SemTable.NotSingleRouteErr != nil { - return nil, ctx.SemTable.NotSingleRouteErr - } - if isRoute && rb.isSingleShard() { - err = planSingleRoutePlan(node, rb) - } else { - plan, err = planOrderByOnUnion(ctx, plan, node) - } - if err != nil { - return nil, err - } - - plan, err = planLimit(node.Limit, plan) - if err != nil { - return nil, err - } - } - return plan, nil - -} - -func planOrderByOnUnion(ctx *plancontext.PlanningContext, plan logicalPlan, union *sqlparser.Union) (logicalPlan, error) { - qp, err := operators.CreateQPFromUnion(ctx, union) - if err != nil { - return nil, err - } - hp := horizonPlanning{ - qp: qp, - } - if len(qp.OrderExprs) > 0 { - plan, err = hp.planOrderBy(ctx, qp.OrderExprs, plan) - if err != nil { - return nil, err - } - } - return plan, nil -} - -func pushCommentDirectivesOnPlan(plan logicalPlan, stmt sqlparser.Statement) logicalPlan { - var directives *sqlparser.CommentDirectives - cmt, ok := stmt.(sqlparser.Commented) - if ok { - directives = cmt.GetParsedComments().Directives() - scatterAsWarns := directives.IsSet(sqlparser.DirectiveScatterErrorsAsWarnings) - timeout := queryTimeout(directives) - multiShardAutoCommit := directives.IsSet(sqlparser.DirectiveMultiShardAutocommit) - - if scatterAsWarns || timeout > 0 || multiShardAutoCommit { - _, _ = visit(plan, func(logicalPlan logicalPlan) (bool, logicalPlan, error) { - switch plan := logicalPlan.(type) { - case *routeGen4: - plan.eroute.ScatterErrorsAsWarnings = scatterAsWarns - plan.eroute.QueryTimeout = timeout - case *primitiveWrapper: - setDirective(plan.prim, multiShardAutoCommit, timeout) - case *insert: - setDirective(plan.eInsert, multiShardAutoCommit, timeout) - } - return true, logicalPlan, nil - }) - } - } - - return plan -} - -func setDirective(prim engine.Primitive, msac bool, timeout int) { - switch edml := prim.(type) { - case *engine.Insert: - edml.MultiShardAutocommit = msac - edml.QueryTimeout = timeout - case *engine.Update: - edml.MultiShardAutocommit = msac - edml.QueryTimeout = timeout - case *engine.Delete: - edml.MultiShardAutocommit = msac - edml.QueryTimeout = timeout - } -} - -// checkIfDeleteSupported checks if the delete query is supported or we must return an error. -func checkIfDeleteSupported(del *sqlparser.Delete, semTable *semantics.SemTable) error { - if semTable.NotUnshardedErr != nil { - return semTable.NotUnshardedErr - } - - // Delete is only supported for a single TableExpr which is supposed to be an aliased expression - multiShardErr := vterrors.VT12001("multi-shard or vindex write statement") - if len(del.TableExprs) != 1 { - return multiShardErr - } - _, isAliasedExpr := del.TableExprs[0].(*sqlparser.AliasedTableExpr) - if !isAliasedExpr { - return multiShardErr - } - - if len(del.Targets) > 1 { - return vterrors.VT12001("multi-table DELETE statement in a sharded keyspace") - } - - err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { - switch node.(type) { - case *sqlparser.Subquery, *sqlparser.DerivedTable: - // We have a subquery, so we must fail the planning. - // If this subquery and the table expression were all belonging to the same unsharded keyspace, - // we would have already created a plan for them before doing these checks. - return false, vterrors.VT12001("subqueries in DML") - } - return true, nil - }, del) - if err != nil { - return err - } - - return nil -} - -func insertDestinationShortcut(stmt *sqlparser.Insert, ks *vindexes.Keyspace, tables *vindexes.Table) logicalPlan { - eIns := &engine.Insert{} - eIns.Keyspace = ks - eIns.Table = tables - eIns.Opcode = engine.InsertByDestination - eIns.TargetDestination = key.DestinationKeyspaceID(tables.Pinned) - eIns.Query = generateQuery(stmt) - return &insert{eInsert: eIns} -} diff --git a/go/vt/vtgate/planbuilder/grouping.go b/go/vt/vtgate/planbuilder/grouping.go deleted file mode 100644 index 0bd10666029..00000000000 --- a/go/vt/vtgate/planbuilder/grouping.go +++ /dev/null @@ -1,128 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "fmt" - - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/engine" -) - -func planGroupBy(pb *primitiveBuilder, input logicalPlan, groupBy sqlparser.GroupBy) (logicalPlan, error) { - if len(groupBy) == 0 { - // if we have no grouping declared, we only want to visit orderedAggregate - _, isOrdered := input.(*orderedAggregate) - if !isOrdered { - return input, nil - } - } - - switch node := input.(type) { - case *mergeSort, *pulloutSubquery, *distinct: - inputs := node.Inputs() - input := inputs[0] - - newInput, err := planGroupBy(pb, input, groupBy) - if err != nil { - return nil, err - } - inputs[0] = newInput - err = node.Rewrite(inputs...) - if err != nil { - return nil, err - } - return node, nil - case *route: - node.Select.(*sqlparser.Select).GroupBy = groupBy - return node, nil - case *orderedAggregate: - for _, expr := range groupBy { - colNumber := -1 - switch e := expr.(type) { - case *sqlparser.ColName: - c := e.Metadata.(*column) - if c.Origin() == node { - return nil, vterrors.VT03005(sqlparser.String(e)) - } - for i, rc := range node.resultColumns { - if rc.column == c { - colNumber = i - break - } - } - if colNumber == -1 { - return nil, vterrors.VT12001("in scatter query: GROUP BY column must reference column in SELECT list") - } - case *sqlparser.Literal: - num, err := ResultFromNumber(node.resultColumns, e, "group statement") - if err != nil { - return nil, err - } - colNumber = num - default: - return nil, vterrors.VT12001("in scatter query: only simple references are allowed") - } - node.groupByKeys = append(node.groupByKeys, &engine.GroupByParams{KeyCol: colNumber, WeightStringCol: -1, FromGroupBy: true}) - } - // Append the distinct aggregate if any. - if node.extraDistinct != nil { - groupBy = append(groupBy, node.extraDistinct) - } - - newInput, err := planGroupBy(pb, node.input, groupBy) - if err != nil { - return nil, err - } - node.input = newInput - - return node, nil - } - return nil, vterrors.VT13001(fmt.Sprintf("unreachable %T.groupBy: ", input)) -} - -// planDistinct makes the output distinct -func planDistinct(input logicalPlan) (logicalPlan, error) { - switch node := input.(type) { - case *route: - node.Select.MakeDistinct() - return node, nil - case *orderedAggregate: - for i, rc := range node.resultColumns { - // If the column origin is oa (and not the underlying route), - // it means that it's an aggregate function supplied by oa. - // So, the distinct 'operator' cannot be pushed down into the - // route. - if rc.column.Origin() == node { - return newDistinctV3(node), nil - } - node.groupByKeys = append(node.groupByKeys, &engine.GroupByParams{KeyCol: i, WeightStringCol: -1, FromGroupBy: false}) - } - newInput, err := planDistinct(node.input) - if err != nil { - return nil, err - } - node.input = newInput - return node, nil - - case *distinct: - return input, nil - } - - return nil, vterrors.VT13001(fmt.Sprintf("unreachable %T.distinct", input)) -} diff --git a/go/vt/vtgate/planbuilder/hash_join.go b/go/vt/vtgate/planbuilder/hash_join.go deleted file mode 100644 index cef2f30bead..00000000000 --- a/go/vt/vtgate/planbuilder/hash_join.go +++ /dev/null @@ -1,111 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "fmt" - - "vitess.io/vitess/go/mysql/collations" - querypb "vitess.io/vitess/go/vt/proto/query" - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/vt/vtgate/semantics" -) - -var _ logicalPlan = (*hashJoin)(nil) - -// hashJoin is used to build a HashJoin primitive. -type hashJoin struct { - gen4Plan - - // Left and Right are the nodes for the join. - Left, Right logicalPlan - - Opcode engine.JoinOpcode - - Cols []int - - // The keys correspond to the column offset in the inputs where - // the join columns can be found - LHSKey, RHSKey int - - ComparisonType querypb.Type - - Collation collations.ID -} - -// WireupGen4 implements the logicalPlan interface -func (hj *hashJoin) WireupGen4(ctx *plancontext.PlanningContext) error { - err := hj.Left.WireupGen4(ctx) - if err != nil { - return err - } - return hj.Right.WireupGen4(ctx) -} - -// Primitive implements the logicalPlan interface -func (hj *hashJoin) Primitive() engine.Primitive { - return &engine.HashJoin{ - Left: hj.Left.Primitive(), - Right: hj.Right.Primitive(), - Cols: hj.Cols, - Opcode: hj.Opcode, - LHSKey: hj.LHSKey, - RHSKey: hj.RHSKey, - ComparisonType: hj.ComparisonType, - Collation: hj.Collation, - } -} - -// Inputs implements the logicalPlan interface -func (hj *hashJoin) Inputs() []logicalPlan { - return []logicalPlan{hj.Left, hj.Right} -} - -// Rewrite implements the logicalPlan interface -func (hj *hashJoin) Rewrite(inputs ...logicalPlan) error { - if len(inputs) != 2 { - return vterrors.VT13001(fmt.Sprintf("wrong number of children in hashJoin rewrite: %d; should be exactly 2", len(inputs))) - } - hj.Left = inputs[0] - hj.Right = inputs[1] - return nil -} - -// ContainsTables implements the logicalPlan interface -func (hj *hashJoin) ContainsTables() semantics.TableSet { - return hj.Left.ContainsTables().Merge(hj.Right.ContainsTables()) -} - -// OutputColumns implements the logicalPlan interface -func (hj *hashJoin) OutputColumns() []sqlparser.SelectExpr { - return getOutputColumnsFromJoin(hj.Cols, hj.Left.OutputColumns(), hj.Right.OutputColumns()) -} - -func getOutputColumnsFromJoin(ints []int, lhs []sqlparser.SelectExpr, rhs []sqlparser.SelectExpr) (cols []sqlparser.SelectExpr) { - for _, col := range ints { - if col < 0 { - col *= -1 - cols = append(cols, lhs[col-1]) - } else { - cols = append(cols, rhs[col-1]) - } - } - return -} diff --git a/go/vt/vtgate/planbuilder/horizon_planning.go b/go/vt/vtgate/planbuilder/horizon_planning.go deleted file mode 100644 index c3a46adbc17..00000000000 --- a/go/vt/vtgate/planbuilder/horizon_planning.go +++ /dev/null @@ -1,1208 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "fmt" - - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/sqltypes" - popcode "vitess.io/vitess/go/vt/vtgate/engine/opcode" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - - "vitess.io/vitess/go/vt/vtgate/semantics" - - "vitess.io/vitess/go/vt/vterrors" - - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/engine" -) - -type horizonPlanning struct { - sel *sqlparser.Select - qp *operators.QueryProjection -} - -func (hp *horizonPlanning) planHorizon(ctx *plancontext.PlanningContext, plan logicalPlan, truncateColumns bool) (logicalPlan, error) { - rb, isRoute := plan.(*routeGen4) - if !isRoute && ctx.SemTable.NotSingleRouteErr != nil { - // If we got here, we don't have a single shard plan - return nil, ctx.SemTable.NotSingleRouteErr - } - - if isRoute && rb.isSingleShard() { - err := planSingleRoutePlan(hp.sel, rb) - if err != nil { - return nil, err - } - return plan, nil - } - - // If the current plan is a simpleProjection, we want to rewrite derived expression. - // In transformDerivedPlan (operator_transformers.go), derived tables that are not - // a simple route are put behind a simpleProjection. In this simple projection, - // every Route will represent the original derived table. Thus, pushing new expressions - // to those Routes require us to rewrite them. - // On the other hand, when a derived table is a simple Route, we do not put it under - // a simpleProjection. We create a new Route that contains the derived table in the - // FROM clause. Meaning that, when we push expressions to the select list of this - // new Route, we do not want them to rewrite them. - sp, derivedTable := plan.(*simpleProjection) - if derivedTable { - oldRewriteDerivedExpr := ctx.RewriteDerivedExpr - defer func() { - ctx.RewriteDerivedExpr = oldRewriteDerivedExpr - }() - ctx.RewriteDerivedExpr = true - } - - var err error - hp.qp, err = operators.CreateQPFromSelect(ctx, hp.sel) - if err != nil { - return nil, err - } - - needsOrdering := len(hp.qp.OrderExprs) > 0 - - // If we still have a HAVING clause, it's because it could not be pushed to the WHERE, - // so it probably has aggregations - canShortcut := isRoute && hp.sel.Having == nil && !needsOrdering - - switch { - case hp.qp.NeedsAggregation() || hp.sel.Having != nil: - plan, err = hp.planAggregations(ctx, plan) - if err != nil { - return nil, err - } - // if we already did sorting, we don't need to do it again - needsOrdering = needsOrdering && !hp.qp.CanPushDownSorting - case canShortcut: - err = planSingleRoutePlan(hp.sel, rb) - if err != nil { - return nil, err - } - case derivedTable: - pusher := func(ae *sqlparser.AliasedExpr) (int, error) { - offset, _, err := pushProjection(ctx, ae, sp.input, true, true, false) - return offset, err - } - needsVtGate, projections, colNames, err := hp.qp.NeedsProjecting(ctx, pusher) - if err != nil { - return nil, err - } - if !needsVtGate { - break - } - - // there were some expressions we could not push down entirely, - // so replace the simpleProjection with a real projection - plan = &projection{ - source: sp.input, - columns: projections, - columnNames: colNames, - } - default: - err = pushProjections(ctx, plan, hp.qp.SelectExprs) - if err != nil { - return nil, err - } - } - - // If we didn't already take care of ORDER BY during aggregation planning, we need to handle it now - if needsOrdering { - plan, err = hp.planOrderBy(ctx, hp.qp.OrderExprs, plan) - if err != nil { - return nil, err - } - } - - plan, err = hp.planDistinct(ctx, plan) - if err != nil { - return nil, err - } - - if !truncateColumns { - return plan, nil - } - - plan, err = hp.truncateColumnsIfNeeded(ctx, plan) - if err != nil { - return nil, err - } - - return plan, nil -} - -func pushProjections(ctx *plancontext.PlanningContext, plan logicalPlan, selectExprs []operators.SelectExpr) error { - for _, e := range selectExprs { - aliasExpr, err := e.GetAliasedExpr() - if err != nil { - return err - } - if _, _, err := pushProjection(ctx, aliasExpr, plan, true, false, false); err != nil { - return err - } - } - return nil -} - -func (hp *horizonPlanning) truncateColumnsIfNeeded(ctx *plancontext.PlanningContext, plan logicalPlan) (logicalPlan, error) { - if len(plan.OutputColumns()) == hp.qp.GetColumnCount() { - return plan, nil - } - switch p := plan.(type) { - case *routeGen4: - p.eroute.SetTruncateColumnCount(hp.qp.GetColumnCount()) - case *joinGen4, *semiJoin, *hashJoin: - // since this is a join, we can safely add extra columns and not need to truncate them - case *orderedAggregate: - p.truncateColumnCount = hp.qp.GetColumnCount() - case *memorySort: - p.truncater.SetTruncateColumnCount(hp.qp.GetColumnCount()) - case *pulloutSubquery: - newUnderlyingPlan, err := hp.truncateColumnsIfNeeded(ctx, p.underlying) - if err != nil { - return nil, err - } - p.underlying = newUnderlyingPlan - default: - plan = &simpleProjection{ - logicalPlanCommon: newBuilderCommon(plan), - eSimpleProj: &engine.SimpleProjection{}, - } - - exprs := hp.qp.SelectExprs[0:hp.qp.GetColumnCount()] - err := pushProjections(ctx, plan, exprs) - if err != nil { - return nil, err - } - } - return plan, nil -} - -func checkIfAlreadyExists(expr *sqlparser.AliasedExpr, node sqlparser.SelectStatement, semTable *semantics.SemTable) int { - // Here to find if the expr already exists in the SelectStatement, we have 3 cases - // input is a Select -> In this case we want to search in the select - // input is a Union -> In this case we want to search in the First Select of the Union - // input is a Parenthesised Select -> In this case we want to search in the select - // all these three cases are handled by the call to GetFirstSelect. - sel := sqlparser.GetFirstSelect(node) - - exprCol, isExprCol := expr.Expr.(*sqlparser.ColName) - - // first pass - search for aliased expressions - for i, selectExpr := range sel.SelectExprs { - if !isExprCol { - break - } - - selectExpr, ok := selectExpr.(*sqlparser.AliasedExpr) - if ok && selectExpr.As.Equal(exprCol.Name) { - return i - } - } - - // next pass - we are searching the actual expressions and not the aliases - for i, selectExpr := range sel.SelectExprs { - selectExpr, ok := selectExpr.(*sqlparser.AliasedExpr) - if !ok { - continue - } - - if semTable.EqualsExpr(expr.Expr, selectExpr.Expr) { - return i - } - } - return -1 -} - -func (hp *horizonPlanning) planAggregations(ctx *plancontext.PlanningContext, plan logicalPlan) (logicalPlan, error) { - isPushable := !isJoin(plan) - grouping := hp.qp.GetGrouping() - vindexOverlapWithGrouping := hasUniqueVindex(ctx.SemTable, grouping) - if isPushable && vindexOverlapWithGrouping { - // If we have a plan that we can push the group by and aggregation through, we don't need to do aggregation - // at the vtgate level at all - err := hp.planAggregationWithoutOA(ctx, plan) - if err != nil { - return nil, err - } - resultPlan, err := hp.planOrderBy(ctx, hp.qp.OrderExprs, plan) - if err != nil { - return nil, err - } - - newPlan, err := hp.planHaving(ctx, resultPlan) - if err != nil { - return nil, err - } - - return newPlan, nil - } - - return hp.planAggrUsingOA(ctx, plan, grouping) -} - -func (hp *horizonPlanning) planAggrUsingOA( - ctx *plancontext.PlanningContext, - plan logicalPlan, - grouping []operators.GroupBy, -) (logicalPlan, error) { - oa := &orderedAggregate{ - groupByKeys: make([]*engine.GroupByParams, 0, len(grouping)), - } - - var order []ops.OrderBy - if hp.qp.CanPushDownSorting { - hp.qp.OldAlignGroupByAndOrderBy(ctx) - // the grouping order might have changed, so we reload the grouping expressions - grouping = hp.qp.GetGrouping() - order = hp.qp.OrderExprs - } else { - for _, expr := range grouping { - order = append(order, expr.AsOrderBy()) - } - } - - // here we are building up the grouping keys for the OA, - // but they are lacking the input offsets because we have yet to push the columns down - for _, expr := range grouping { - typ, col, _ := ctx.SemTable.TypeForExpr(expr.Inner) - oa.groupByKeys = append(oa.groupByKeys, &engine.GroupByParams{ - Expr: expr.Inner, - FromGroupBy: true, - Type: typ, - CollationID: col, - }) - } - - if hp.sel.Having != nil { - rewriter := hp.qp.AggrRewriter(ctx) - sqlparser.SafeRewrite(hp.sel.Having.Expr, rewriter.RewriteDown(), rewriter.RewriteUp()) - if rewriter.Err != nil { - return nil, rewriter.Err - } - } - - aggregationExprs, err := hp.qp.AggregationExpressions(ctx) - if err != nil { - return nil, err - } - - // If we have a distinct aggregating expression, - // we handle it by pushing it down to the underlying input as a grouping column - distinctGroupBy, distinctOffsets, aggrs, err := hp.handleDistinctAggr(ctx, aggregationExprs) - if err != nil { - return nil, err - } - - if len(distinctGroupBy) > 0 { - grouping = append(grouping, distinctGroupBy...) - // all the distinct grouping aggregates use the same expression, so it should be OK to just add it once - order = append(order, distinctGroupBy[0].AsOrderBy()) - oa.preProcess = true - } - - newPlan, groupingOffsets, aggrParamOffsets, pushed, err := hp.pushAggregation(ctx, plan, grouping, aggrs, false) - if err != nil { - return nil, err - } - if !pushed { - oa.preProcess = true - oa.aggrOnEngine = true - } - - plan = newPlan - - _, isRoute := plan.(*routeGen4) - needsProj := !isRoute - var aggPlan = plan - var proj *projection - if needsProj { - length := getLengthOfProjection(groupingOffsets, aggrs) - proj = &projection{ - source: plan, - columns: make([]sqlparser.Expr, length), - columnNames: make([]string, length), - } - aggPlan = proj - } - - aggrParams, err := generateAggregateParams(aggrs, aggrParamOffsets, proj, pushed) - if err != nil { - return nil, err - } - - if proj != nil { - groupingOffsets, err = passGroupingColumns(proj, groupingOffsets, grouping) - if err != nil { - return nil, err - } - } - - // Next we add the aggregation expressions and grouping offsets to the OA - addColumnsToOA(ctx, oa, distinctGroupBy, aggrParams, distinctOffsets, groupingOffsets, aggregationExprs) - - aggPlan, err = hp.planOrderBy(ctx, order, aggPlan) - if err != nil { - return nil, err - } - - oa.resultsBuilder = resultsBuilder{ - logicalPlanCommon: newBuilderCommon(aggPlan), - weightStrings: make(map[*resultColumn]int), - } - - return hp.planHaving(ctx, oa) -} - -func passGroupingColumns(proj *projection, groupings []offsets, grouping []operators.GroupBy) (projGrpOffsets []offsets, err error) { - for idx, grp := range groupings { - origGrp := grouping[idx] - var offs offsets - expr := origGrp.AsAliasedExpr() - offs.col, err = proj.addColumn(origGrp.InnerIndex, sqlparser.NewOffset(grp.col, expr.Expr), expr.ColumnName()) - if err != nil { - return nil, err - } - if grp.wsCol != -1 { - offs.wsCol, err = proj.addColumn(nil, sqlparser.NewOffset(grp.wsCol, weightStringFor(expr.Expr)), "") - if err != nil { - return nil, err - } - } - projGrpOffsets = append(projGrpOffsets, offs) - } - return projGrpOffsets, nil -} - -func generateAggregateParams(aggrs []operators.Aggr, aggrParamOffsets [][]offsets, proj *projection, pushed bool) ([]*engine.AggregateParams, error) { - aggrParams := make([]*engine.AggregateParams, len(aggrs)) - for idx, paramOffset := range aggrParamOffsets { - aggr := aggrs[idx] - incomingOffset := paramOffset[0].col - var offset int - if proj != nil { - var aggrExpr sqlparser.Expr - for _, ofs := range paramOffset { - curr := sqlparser.NewOffset(ofs.col, aggr.Func) - if aggrExpr == nil { - aggrExpr = curr - } else { - aggrExpr = &sqlparser.BinaryExpr{ - Operator: sqlparser.MultOp, - Left: aggrExpr, - Right: &sqlparser.FuncExpr{ - Name: sqlparser.NewIdentifierCI("coalesce"), - Exprs: sqlparser.SelectExprs{ - &sqlparser.AliasedExpr{Expr: curr}, - &sqlparser.AliasedExpr{Expr: sqlparser.NewIntLiteral("1")}, - }, - }, - } - } - } - - pos, err := proj.addColumn(aggr.Index, aggrExpr, aggr.Alias) - if err != nil { - return nil, err - } - offset = pos - } else { - offset = incomingOffset - } - - opcode := popcode.AggregateSum - switch aggr.OpCode { - case popcode.AggregateMin, popcode.AggregateMax, popcode.AggregateRandom: - opcode = aggr.OpCode - case popcode.AggregateCount, popcode.AggregateCountStar, popcode.AggregateCountDistinct, popcode.AggregateSumDistinct: - if !pushed { - opcode = aggr.OpCode - } - } - - aggrParams[idx] = &engine.AggregateParams{ - Opcode: opcode, - Col: offset, - Alias: aggr.Alias, - Expr: aggr.Original.Expr, - Original: aggr.Original, - OrigOpcode: aggr.OpCode, - Type: sqltypes.Unknown, - CollationID: collations.Unknown, - } - } - return aggrParams, nil -} - -func addColumnsToOA( - ctx *plancontext.PlanningContext, - oa *orderedAggregate, - // these are the group by expressions that where added because we have unique aggregations - distinctGroupBy []operators.GroupBy, - // these are the aggregate params we already have for non-distinct aggregations - aggrParams []*engine.AggregateParams, - // distinctOffsets mark out where we need to use the distinctGroupBy offsets - // to create *engine.AggregateParams for the distinct aggregations - distinctOffsets []int, - // these are the offsets for the group by params - groupings []offsets, - // aggregationExprs are all the original aggregation expressions the query requested - aggregationExprs []operators.Aggr, -) { - if len(distinctGroupBy) == 0 { - // no distinct aggregations - oa.aggregates = aggrParams - } else { - count := len(groupings) - len(distinctOffsets) - addDistinctAggr := func(offset int) { - // the last grouping we pushed is the one we added for the distinct aggregation - o := groupings[count] - count++ - a := aggregationExprs[offset] - typ, collID, _ := ctx.SemTable.TypeForExpr(a.Func.GetArg()) - oa.aggregates = append(oa.aggregates, &engine.AggregateParams{ - Opcode: a.OpCode, - Col: o.col, - KeyCol: o.col, - WAssigned: o.wsCol >= 0, - WCol: o.wsCol, - Alias: a.Alias, - Original: a.Original, - Type: typ, - CollationID: collID, - }) - } - lastOffset := distinctOffsets[len(distinctOffsets)-1] - distinctIdx := 0 - for i := 0; i <= lastOffset || i <= len(aggrParams); i++ { - for distinctIdx < len(distinctOffsets) && i == distinctOffsets[distinctIdx] { - // we loop here since we could be dealing with multiple distinct aggregations after each other - addDistinctAggr(i) - distinctIdx++ - } - if i < len(aggrParams) { - oa.aggregates = append(oa.aggregates, aggrParams[i]) - } - } - - // we have to remove the tail of the grouping offsets, so we only have the offsets for the GROUP BY in the query - groupings = groupings[:len(groupings)-len(distinctOffsets)] - } - - for i, grouping := range groupings { - oa.groupByKeys[i].KeyCol = grouping.col - oa.groupByKeys[i].WeightStringCol = grouping.wsCol - } -} - -// handleDistinctAggr takes in a slice of aggregations and returns GroupBy elements that replace -// the distinct aggregations in the input, along with a slice of offsets and the non-distinct aggregations left, -// so we can later reify the original aggregations -func (hp *horizonPlanning) handleDistinctAggr(ctx *plancontext.PlanningContext, exprs []operators.Aggr) ( - distincts []operators.GroupBy, offsets []int, aggrs []operators.Aggr, err error) { - var distinctExpr sqlparser.Expr - for i, expr := range exprs { - if !expr.Distinct { - aggrs = append(aggrs, expr) - continue - } - - inner := expr.Func.GetArg() - innerWS := hp.qp.GetSimplifiedExpr(inner) - if err != nil { - return nil, nil, nil, err - } - if exprHasVindex(ctx.SemTable, innerWS, false) { - aggrs = append(aggrs, expr) - continue - } - if distinctExpr == nil { - distinctExpr = innerWS - } else { - if !ctx.SemTable.EqualsExpr(distinctExpr, innerWS) { - err = vterrors.VT12001(fmt.Sprintf("only one DISTINCT aggregation is allowed in a SELECT: %s", sqlparser.String(expr.Original))) - return nil, nil, nil, err - } - } - groupBy := operators.NewGroupBy(inner, innerWS, nil) - groupBy.InnerIndex = expr.Index - distincts = append(distincts, groupBy) - offsets = append(offsets, i) - } - return -} - -func (hp *horizonPlanning) planAggregationWithoutOA(ctx *plancontext.PlanningContext, plan logicalPlan) error { - for _, expr := range hp.qp.SelectExprs { - aliasedExpr, err := expr.GetAliasedExpr() - if err != nil { - return err - } - _, _, err = pushProjection(ctx, aliasedExpr, plan, true, false, false) - if err != nil { - return err - } - } - for _, expr := range hp.qp.GetGrouping() { - // since all the grouping will be done at the mysql level, - // we know that we won't need any weight_string() calls - err := planGroupByGen4(ctx, expr, plan /*weighString*/, false) - if err != nil { - return err - } - } - return nil -} - -type offsets struct { - col, wsCol int -} - -func newOffset(col int) offsets { - return offsets{col: col, wsCol: -1} -} - -func (hp *horizonPlanning) createGroupingsForColumns(columns []*sqlparser.ColName) ([]operators.GroupBy, error) { - var lhsGrouping []operators.GroupBy - for _, lhsColumn := range columns { - wsExpr := hp.qp.GetSimplifiedExpr(lhsColumn) - - lhsGrouping = append(lhsGrouping, operators.NewGroupBy(lhsColumn, wsExpr, nil)) - } - return lhsGrouping, nil -} - -func hasUniqueVindex(semTable *semantics.SemTable, groupByExprs []operators.GroupBy) bool { - for _, groupByExpr := range groupByExprs { - if exprHasUniqueVindex(semTable, groupByExpr.SimplifiedExpr) { - return true - } - } - return false -} - -func (hp *horizonPlanning) planOrderBy(ctx *plancontext.PlanningContext, orderExprs []ops.OrderBy, plan logicalPlan) (logicalPlan, error) { - switch plan := plan.(type) { - case *routeGen4: - return planOrderByForRoute(ctx, orderExprs, plan, hp.qp.HasStar) - case *joinGen4: - return hp.planOrderByForJoin(ctx, orderExprs, plan) - case *hashJoin: - return hp.planOrderByForHashJoin(ctx, orderExprs, plan) - case *orderedAggregate: - // remove ORDER BY NULL from the list of order by expressions since we will be doing the ordering on vtgate level so NULL is not useful - var orderExprsWithoutNils []ops.OrderBy - for _, expr := range orderExprs { - if sqlparser.IsNull(expr.Inner.Expr) { - continue - } - orderExprsWithoutNils = append(orderExprsWithoutNils, expr) - } - orderExprs = orderExprsWithoutNils - - for _, order := range orderExprs { - if sqlparser.ContainsAggregation(order.SimplifiedExpr) { - ms, err := createMemorySortPlanOnAggregation(ctx, plan, orderExprs) - if err != nil { - return nil, err - } - return ms, nil - } - } - newInput, err := hp.planOrderBy(ctx, orderExprs, plan.input) - if err != nil { - return nil, err - } - plan.input = newInput - return plan, nil - case *memorySort: - return plan, nil - case *simpleProjection: - return hp.createMemorySortPlan(ctx, plan, orderExprs, true) - case *vindexFunc: - // This is evaluated at VTGate only, so weight_string function cannot be used. - return hp.createMemorySortPlan(ctx, plan, orderExprs /* useWeightStr */, false) - case *limit, *semiJoin, *filter, *pulloutSubquery, *projection: - inputs := plan.Inputs() - if len(inputs) == 0 { - break - } - newFirstInput, err := hp.planOrderBy(ctx, orderExprs, inputs[0]) - if err != nil { - return nil, err - } - inputs[0] = newFirstInput - err = plan.Rewrite(inputs...) - if err != nil { - return nil, err - } - return plan, nil - } - return nil, vterrors.VT13001(fmt.Sprintf("ORDER BY in complex query %T", plan)) -} - -func isSpecialOrderBy(o ops.OrderBy) bool { - if sqlparser.IsNull(o.Inner.Expr) { - return true - } - f, isFunction := o.Inner.Expr.(*sqlparser.FuncExpr) - return isFunction && f.Name.Lowered() == "rand" -} - -func planOrderByForRoute(ctx *plancontext.PlanningContext, orderExprs []ops.OrderBy, plan *routeGen4, hasStar bool) (logicalPlan, error) { - for _, order := range orderExprs { - err := checkOrderExprCanBePlannedInScatter(ctx, plan, order, hasStar) - if err != nil { - return nil, err - } - plan.Select.AddOrder(order.Inner) - if isSpecialOrderBy(order) { - continue - } - var wsExpr sqlparser.Expr - if ctx.SemTable.NeedsWeightString(order.Inner.Expr) { - wsExpr = order.SimplifiedExpr - } - - offset, weightStringOffset, err := wrapAndPushExpr(ctx, order.Inner.Expr, wsExpr, plan) - if err != nil { - return nil, err - } - typ, col, _ := ctx.SemTable.TypeForExpr(order.Inner.Expr) - plan.eroute.OrderBy = append(plan.eroute.OrderBy, engine.OrderByParams{ - Col: offset, - WeightStringCol: weightStringOffset, - Desc: order.Inner.Direction == sqlparser.DescOrder, - Type: typ, - CollationID: col, - }) - } - return plan, nil -} - -// checkOrderExprCanBePlannedInScatter verifies that the given order by expression can be planned. -// It checks if the expression exists in the plan's select list when the query is a scatter. -func checkOrderExprCanBePlannedInScatter(ctx *plancontext.PlanningContext, plan *routeGen4, order ops.OrderBy, hasStar bool) error { - if !hasStar { - return nil - } - sel := sqlparser.GetFirstSelect(plan.Select) - found := false - for _, expr := range sel.SelectExprs { - aliasedExpr, isAliasedExpr := expr.(*sqlparser.AliasedExpr) - if isAliasedExpr && ctx.SemTable.EqualsExpr(aliasedExpr.Expr, order.Inner.Expr) { - found = true - break - } - } - if !found { - return vterrors.VT12001(fmt.Sprintf("in scatter query: ORDER BY must reference a column in the SELECT list: %s", sqlparser.String(order.Inner))) - } - return nil -} - -// wrapAndPushExpr pushes the expression and weighted_string function to the plan using semantics.SemTable -// It returns (expr offset, weight_string offset, error) -func wrapAndPushExpr(ctx *plancontext.PlanningContext, expr sqlparser.Expr, weightStrExpr sqlparser.Expr, plan logicalPlan) (int, int, error) { - offset, _, err := pushProjection(ctx, &sqlparser.AliasedExpr{Expr: expr}, plan, true, true, false) - if err != nil { - return 0, 0, err - } - if weightStrExpr == nil { - return offset, -1, nil - } - if !sqlparser.IsColName(expr) { - switch unary := expr.(type) { - case *sqlparser.CastExpr: - expr = unary.Expr - case *sqlparser.ConvertExpr: - expr = unary.Expr - } - if !sqlparser.IsColName(expr) { - return 0, 0, vterrors.VT13001(fmt.Sprintf("in scatter query: complex ORDER BY expression: %s", sqlparser.String(expr))) - } - } - qt, _, found := ctx.SemTable.TypeForExpr(expr) - wsNeeded := true - if found && sqltypes.IsNumber(qt) { - wsNeeded = false - } - - weightStringOffset := -1 - if wsNeeded { - aliasedExpr := &sqlparser.AliasedExpr{Expr: weightStringFor(weightStrExpr)} - weightStringOffset, _, err = pushProjection(ctx, aliasedExpr, plan, true, true, false) - if err != nil { - return 0, 0, err - } - } - return offset, weightStringOffset, nil -} - -func weightStringFor(expr sqlparser.Expr) sqlparser.Expr { - return &sqlparser.WeightStringFuncExpr{Expr: expr} -} - -func (hp *horizonPlanning) planOrderByForHashJoin(ctx *plancontext.PlanningContext, orderExprs []ops.OrderBy, plan *hashJoin) (logicalPlan, error) { - if len(orderExprs) == 1 && isSpecialOrderBy(orderExprs[0]) { - rhs, err := hp.planOrderBy(ctx, orderExprs, plan.Right) - if err != nil { - return nil, err - } - plan.Right = rhs - return plan, nil - } - if orderExprsDependsOnTableSet(orderExprs, ctx.SemTable, plan.Right.ContainsTables()) { - newRight, err := hp.planOrderBy(ctx, orderExprs, plan.Right) - if err != nil { - return nil, err - } - plan.Right = newRight - return plan, nil - } - sortPlan, err := hp.createMemorySortPlan(ctx, plan, orderExprs, true) - if err != nil { - return nil, err - } - return sortPlan, nil -} - -func (hp *horizonPlanning) planOrderByForJoin(ctx *plancontext.PlanningContext, orderExprs []ops.OrderBy, plan *joinGen4) (logicalPlan, error) { - if len(orderExprs) == 1 && isSpecialOrderBy(orderExprs[0]) { - lhs, err := hp.planOrderBy(ctx, orderExprs, plan.Left) - if err != nil { - return nil, err - } - rhs, err := hp.planOrderBy(ctx, orderExprs, plan.Right) - if err != nil { - return nil, err - } - plan.Left = lhs - plan.Right = rhs - return plan, nil - } - // We can only push down sorting on the LHS of the join. - // If the order is on the RHS, we need to do the sorting on the vtgate - if orderExprsDependsOnTableSet(orderExprs, ctx.SemTable, plan.Left.ContainsTables()) { - newLeft, err := hp.planOrderBy(ctx, orderExprs, plan.Left) - if err != nil { - return nil, err - } - plan.Left = newLeft - return plan, nil - } - sortPlan, err := hp.createMemorySortPlan(ctx, plan, orderExprs, true) - if err != nil { - return nil, err - } - return sortPlan, nil -} - -func createMemorySortPlanOnAggregation(ctx *plancontext.PlanningContext, plan *orderedAggregate, orderExprs []ops.OrderBy) (logicalPlan, error) { - primitive := &engine.MemorySort{} - ms := &memorySort{ - resultsBuilder: resultsBuilder{ - logicalPlanCommon: newBuilderCommon(plan), - weightStrings: make(map[*resultColumn]int), - truncater: primitive, - }, - eMemorySort: primitive, - } - - for _, order := range orderExprs { - offset, woffset, found := findExprInOrderedAggr(ctx, plan, order) - if !found { - return nil, vterrors.VT13001(fmt.Sprintf("expected to find ORDER BY expression (%s) in orderedAggregate", sqlparser.String(order.Inner))) - } - - typ, collationID, _ := ctx.SemTable.TypeForExpr(order.SimplifiedExpr) - ms.eMemorySort.OrderBy = append(ms.eMemorySort.OrderBy, engine.OrderByParams{ - Col: offset, - WeightStringCol: woffset, - Desc: order.Inner.Direction == sqlparser.DescOrder, - StarColFixedIndex: offset, - Type: typ, - CollationID: collationID, - }) - } - return ms, nil -} - -func findExprInOrderedAggr(ctx *plancontext.PlanningContext, plan *orderedAggregate, order ops.OrderBy) (keyCol int, weightStringCol int, found bool) { - for _, key := range plan.groupByKeys { - if ctx.SemTable.EqualsExpr(order.SimplifiedExpr, key.Expr) || - ctx.SemTable.EqualsExpr(order.Inner.Expr, key.Expr) { - return key.KeyCol, key.WeightStringCol, true - } - } - for _, aggregate := range plan.aggregates { - if ctx.SemTable.EqualsExpr(order.SimplifiedExpr, aggregate.Original.Expr) || - ctx.SemTable.EqualsExpr(order.Inner.Expr, aggregate.Original.Expr) { - return aggregate.Col, -1, true - } - } - return 0, 0, false -} - -func (hp *horizonPlanning) createMemorySortPlan(ctx *plancontext.PlanningContext, plan logicalPlan, orderExprs []ops.OrderBy, useWeightStr bool) (logicalPlan, error) { - primitive := &engine.MemorySort{} - ms := &memorySort{ - resultsBuilder: resultsBuilder{ - logicalPlanCommon: newBuilderCommon(plan), - weightStrings: make(map[*resultColumn]int), - truncater: primitive, - }, - eMemorySort: primitive, - } - - for _, order := range orderExprs { - wsExpr := order.SimplifiedExpr - if !useWeightStr { - wsExpr = nil - } - offset, weightStringOffset, err := wrapAndPushExpr(ctx, order.Inner.Expr, wsExpr, plan) - if err != nil { - return nil, err - } - typ, col, _ := ctx.SemTable.TypeForExpr(order.Inner.Expr) - ms.eMemorySort.OrderBy = append(ms.eMemorySort.OrderBy, engine.OrderByParams{ - Col: offset, - WeightStringCol: weightStringOffset, - Desc: order.Inner.Direction == sqlparser.DescOrder, - StarColFixedIndex: offset, - Type: typ, - CollationID: col, - }) - } - return ms, nil -} - -func orderExprsDependsOnTableSet(orderExprs []ops.OrderBy, semTable *semantics.SemTable, ts semantics.TableSet) bool { - for _, expr := range orderExprs { - exprDependencies := semTable.RecursiveDeps(expr.Inner.Expr) - if !exprDependencies.IsSolvedBy(ts) { - return false - } - } - return true -} - -func (hp *horizonPlanning) planDistinct(ctx *plancontext.PlanningContext, plan logicalPlan) (logicalPlan, error) { - if !hp.qp.NeedsDistinct() { - return plan, nil - } - switch p := plan.(type) { - case *routeGen4: - // we always make the underlying query distinct, - // and then we might also add a distinct operator on top if it is needed - p.Select.MakeDistinct() - if p.isSingleShard() || selectHasUniqueVindex(ctx.SemTable, hp.qp.SelectExprs) { - return plan, nil - } - - return hp.addDistinct(ctx, plan) - case *joinGen4, *pulloutSubquery: - return hp.addDistinct(ctx, plan) - case *orderedAggregate: - return hp.planDistinctOA(ctx.SemTable, p) - default: - return nil, vterrors.VT13001(fmt.Sprintf("unknown plan type for DISTINCT %T", plan)) - } -} - -func (hp *horizonPlanning) planDistinctOA(semTable *semantics.SemTable, currPlan *orderedAggregate) (logicalPlan, error) { - oa := &orderedAggregate{ - resultsBuilder: resultsBuilder{ - logicalPlanCommon: newBuilderCommon(currPlan), - weightStrings: make(map[*resultColumn]int), - }, - } - for _, sExpr := range hp.qp.SelectExprs { - expr, err := sExpr.GetExpr() - if err != nil { - return nil, err - } - found := false - for _, grpParam := range currPlan.groupByKeys { - if semTable.EqualsExpr(expr, grpParam.Expr) { - found = true - oa.groupByKeys = append(oa.groupByKeys, grpParam) - break - } - } - if found { - continue - } - for _, aggrParam := range currPlan.aggregates { - if semTable.EqualsExpr(expr, aggrParam.Expr) { - found = true - typ, col, _ := semTable.TypeForExpr(expr) - oa.groupByKeys = append(oa.groupByKeys, &engine.GroupByParams{KeyCol: aggrParam.Col, WeightStringCol: -1, Type: typ, CollationID: col}) - break - } - } - if !found { - return nil, vterrors.VT13001(fmt.Sprintf("unable to plan DISTINCT query as the column is not projected: %s", sqlparser.String(sExpr.Col))) - } - } - return oa, nil -} - -func (hp *horizonPlanning) addDistinct(ctx *plancontext.PlanningContext, plan logicalPlan) (logicalPlan, error) { - var orderExprs []ops.OrderBy - var groupByKeys []*engine.GroupByParams - for index, sExpr := range hp.qp.SelectExprs { - aliasExpr, err := sExpr.GetAliasedExpr() - if err != nil { - return nil, err - } - if isAmbiguousOrderBy(index, aliasExpr.As, hp.qp.SelectExprs) { - return nil, vterrors.VT13001(fmt.Sprintf("generating ORDER BY clause: ambiguous symbol reference: %s", sqlparser.String(aliasExpr.As))) - } - var inner sqlparser.Expr - if aliasExpr.As.IsEmpty() { - inner = aliasExpr.Expr - } else { - // If we have an alias, we need to use the alias and not the original expression - // to make sure dependencies work correctly, - // we simply copy the dependencies of the original expression here - inner = sqlparser.NewColName(aliasExpr.As.String()) - ctx.SemTable.CopyDependencies(aliasExpr.Expr, inner) - } - typ, col, _ := ctx.SemTable.TypeForExpr(inner) - grpParam := &engine.GroupByParams{KeyCol: index, WeightStringCol: -1, Type: typ, CollationID: col, Expr: inner} - _, wOffset, err := wrapAndPushExpr(ctx, aliasExpr.Expr, aliasExpr.Expr, plan) - if err != nil { - return nil, err - } - grpParam.WeightStringCol = wOffset - groupByKeys = append(groupByKeys, grpParam) - - orderExprs = append(orderExprs, ops.OrderBy{ - Inner: &sqlparser.Order{Expr: inner}, - SimplifiedExpr: aliasExpr.Expr}, - ) - } - innerPlan, err := hp.planOrderBy(ctx, orderExprs, plan) - if err != nil { - return nil, err - } - oa := &orderedAggregate{ - resultsBuilder: resultsBuilder{ - logicalPlanCommon: newBuilderCommon(innerPlan), - weightStrings: make(map[*resultColumn]int), - }, - groupByKeys: groupByKeys, - } - return oa, nil -} - -func isAmbiguousOrderBy(index int, col sqlparser.IdentifierCI, exprs []operators.SelectExpr) bool { - if col.String() == "" { - return false - } - for i, expr := range exprs { - if i == index { - continue - } - aliasExpr, isAlias := expr.Col.(*sqlparser.AliasedExpr) - if !isAlias { - // TODO: handle star expression error - return true - } - alias := aliasExpr.As - if alias.IsEmpty() { - if col, ok := aliasExpr.Expr.(*sqlparser.ColName); ok { - alias = col.Name - } - } - if col.Equal(alias) { - return true - } - } - return false -} - -func selectHasUniqueVindex(semTable *semantics.SemTable, sel []operators.SelectExpr) bool { - for _, expr := range sel { - exp, err := expr.GetExpr() - if err != nil { - // TODO: handle star expression error - return false - } - if exprHasUniqueVindex(semTable, exp) { - return true - } - } - return false -} - -func (hp *horizonPlanning) planHaving(ctx *plancontext.PlanningContext, plan logicalPlan) (logicalPlan, error) { - if hp.sel.Having == nil { - return plan, nil - } - return pushHaving(ctx, hp.sel.Having.Expr, plan) -} - -func pushHaving(ctx *plancontext.PlanningContext, expr sqlparser.Expr, plan logicalPlan) (logicalPlan, error) { - switch node := plan.(type) { - case *routeGen4: - sel := sqlparser.GetFirstSelect(node.Select) - sel.AddHaving(expr) - return plan, nil - case *pulloutSubquery: - return pushHaving(ctx, expr, node.underlying) - case *simpleProjection: - return nil, vterrors.VT13001("filtering on results of cross-shard derived table") - case *orderedAggregate: - return newFilter(ctx, plan, expr) - } - return nil, vterrors.VT13001(fmt.Sprintf("unreachable %T.filtering", plan)) -} - -func isJoin(plan logicalPlan) bool { - switch plan.(type) { - case *joinGen4, *hashJoin: - return true - default: - return false - } -} - -func exprHasUniqueVindex(semTable *semantics.SemTable, expr sqlparser.Expr) bool { - return exprHasVindex(semTable, expr, true) -} - -func exprHasVindex(semTable *semantics.SemTable, expr sqlparser.Expr, hasToBeUnique bool) bool { - col, isCol := expr.(*sqlparser.ColName) - if !isCol { - return false - } - ts := semTable.RecursiveDeps(expr) - tableInfo, err := semTable.TableInfoFor(ts) - if err != nil { - return false - } - vschemaTable := tableInfo.GetVindexTable() - for _, vindex := range vschemaTable.ColumnVindexes { - if len(vindex.Columns) > 1 || hasToBeUnique && !vindex.IsUnique() { - return false - } - if col.Name.Equal(vindex.Columns[0]) { - return true - } - } - return false -} - -func planSingleRoutePlan(sel sqlparser.SelectStatement, rb *routeGen4) error { - err := stripDownQuery(sel, rb.Select) - if err != nil { - return err - } - return sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { - if aliasedExpr, ok := node.(sqlparser.SelectExpr); ok { - removeKeyspaceFromSelectExpr(aliasedExpr) - } - return true, nil - }, rb.Select) - -} - -func removeKeyspaceFromSelectExpr(expr sqlparser.SelectExpr) { - switch expr := expr.(type) { - case *sqlparser.AliasedExpr: - sqlparser.RemoveKeyspaceFromColName(expr.Expr) - case *sqlparser.StarExpr: - expr.TableName.Qualifier = sqlparser.NewIdentifierCS("") - } -} - -func stripDownQuery(from, to sqlparser.SelectStatement) error { - var err error - - switch node := from.(type) { - case *sqlparser.Select: - toNode, ok := to.(*sqlparser.Select) - if !ok { - return vterrors.VT13001("AST did not match") - } - toNode.Distinct = node.Distinct - toNode.GroupBy = node.GroupBy - toNode.Having = node.Having - toNode.OrderBy = node.OrderBy - toNode.Comments = node.Comments - toNode.SelectExprs = node.SelectExprs - for _, expr := range toNode.SelectExprs { - removeKeyspaceFromSelectExpr(expr) - } - case *sqlparser.Union: - toNode, ok := to.(*sqlparser.Union) - if !ok { - return vterrors.VT13001("AST did not match") - } - err = stripDownQuery(node.Left, toNode.Left) - if err != nil { - return err - } - err = stripDownQuery(node.Right, toNode.Right) - if err != nil { - return err - } - toNode.OrderBy = node.OrderBy - default: - return vterrors.VT13001(fmt.Sprintf("this should not happen - we have covered all implementations of SelectStatement %T", from)) - } - return nil -} - -func planGroupByGen4(ctx *plancontext.PlanningContext, groupExpr operators.GroupBy, plan logicalPlan, wsAdded bool) error { - switch node := plan.(type) { - case *routeGen4: - sel := node.Select.(*sqlparser.Select) - sel.AddGroupBy(groupExpr.Inner) - // If a weight_string function is added to the select list, - // then we need to add that to the group by clause otherwise the query will fail on mysql with full_group_by error - // as the weight_string function might not be functionally dependent on the group by. - if wsAdded { - sel.AddGroupBy(weightStringFor(groupExpr.SimplifiedExpr)) - } - return nil - case *pulloutSubquery: - return planGroupByGen4(ctx, groupExpr, node.underlying, wsAdded) - case *semiJoin: - return vterrors.VT13001("GROUP BY in a query having a correlated subquery") - default: - return vterrors.VT13001(fmt.Sprintf("GROUP BY on: %T", plan)) - } -} - -func getLengthOfProjection(groupingOffsets []offsets, aggregations []operators.Aggr) int { - length := 0 - for _, groupBy := range groupingOffsets { - if groupBy.wsCol != -1 { - length++ - } - length++ - } - length += len(aggregations) - return length -} diff --git a/go/vt/vtgate/planbuilder/horizon_planning_test.go b/go/vt/vtgate/planbuilder/horizon_planning_test.go deleted file mode 100644 index 94e51b6700a..00000000000 --- a/go/vt/vtgate/planbuilder/horizon_planning_test.go +++ /dev/null @@ -1,80 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "vitess.io/vitess/go/vt/vtgate/semantics" - - "vitess.io/vitess/go/vt/sqlparser" -) - -func TestCheckIfAlreadyExists(t *testing.T) { - tests := []struct { - name string - expr *sqlparser.AliasedExpr - sel *sqlparser.Select - want int - }{ - { - name: "No alias, both ColName", - want: 0, - expr: &sqlparser.AliasedExpr{Expr: sqlparser.NewColName("id")}, - sel: &sqlparser.Select{SelectExprs: []sqlparser.SelectExpr{&sqlparser.AliasedExpr{Expr: sqlparser.NewColName("id")}}}, - }, - { - name: "Aliased expression and ColName", - want: 0, - expr: &sqlparser.AliasedExpr{Expr: sqlparser.NewColName("user_id")}, - sel: &sqlparser.Select{SelectExprs: []sqlparser.SelectExpr{&sqlparser.AliasedExpr{As: sqlparser.NewIdentifierCI("user_id"), Expr: sqlparser.NewColName("id")}}}, - }, - { - name: "Non-ColName expressions", - want: 0, - expr: &sqlparser.AliasedExpr{Expr: sqlparser.NewStrLiteral("test")}, - sel: &sqlparser.Select{SelectExprs: []sqlparser.SelectExpr{&sqlparser.AliasedExpr{Expr: sqlparser.NewStrLiteral("test")}}}, - }, - { - name: "No alias, multiple ColName in projection", - want: 1, - expr: &sqlparser.AliasedExpr{Expr: sqlparser.NewColName("id")}, - sel: &sqlparser.Select{SelectExprs: []sqlparser.SelectExpr{&sqlparser.AliasedExpr{Expr: sqlparser.NewColName("foo")}, &sqlparser.AliasedExpr{Expr: sqlparser.NewColName("id")}}}, - }, - { - name: "No matching entry", - want: -1, - expr: &sqlparser.AliasedExpr{Expr: sqlparser.NewColName("id")}, - sel: &sqlparser.Select{SelectExprs: []sqlparser.SelectExpr{&sqlparser.AliasedExpr{Expr: sqlparser.NewColName("foo")}, &sqlparser.AliasedExpr{Expr: sqlparser.NewColName("name")}}}, - }, - { - name: "No AliasedExpr in projection", - want: -1, - expr: &sqlparser.AliasedExpr{Expr: sqlparser.NewColName("id")}, - sel: &sqlparser.Select{SelectExprs: []sqlparser.SelectExpr{&sqlparser.StarExpr{TableName: sqlparser.TableName{Name: sqlparser.NewIdentifierCS("user")}}, &sqlparser.StarExpr{TableName: sqlparser.TableName{Name: sqlparser.NewIdentifierCS("people")}}}}, - }, - } - for _, tt := range tests { - semTable := semantics.EmptySemTable() - t.Run(tt.name, func(t *testing.T) { - got := checkIfAlreadyExists(tt.expr, tt.sel, semTable) - assert.Equal(t, tt.want, got) - }) - } -} diff --git a/go/vt/vtgate/planbuilder/insert.go b/go/vt/vtgate/planbuilder/insert.go index 53a4ee669be..f66fd5727b1 100644 --- a/go/vt/vtgate/planbuilder/insert.go +++ b/go/vt/vtgate/planbuilder/insert.go @@ -17,452 +17,136 @@ limitations under the License. package planbuilder import ( - "fmt" - "strconv" - "strings" - - "vitess.io/vitess/go/vt/key" - - "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/evalengine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" ) -// buildInsertPlan builds the route for an INSERT statement. -func buildInsertPlan(string) stmtPlanner { - return func(stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema) (*planResult, error) { - pb := newStmtAwarePrimitiveBuilder(vschema, newJointab(reservedVars), stmt) - ins := stmt.(*sqlparser.Insert) - err := checkUnsupportedExpressions(ins) - if err != nil { - return nil, err - } - exprs := sqlparser.TableExprs{ins.Table} - rb, err := pb.processDMLTable(exprs, reservedVars, nil) - if err != nil { - return nil, err - } - // The table might have been routed to a different one. - ins.Table = exprs[0].(*sqlparser.AliasedTableExpr) - // remove any alias added from routing table. insert query does not support table alias. - ins.Table.As = sqlparser.NewIdentifierCS("") - if rb.eroute.TargetDestination != nil { - return nil, vterrors.VT12001("INSERT with a target destination") - } - - if len(pb.st.tables) != 1 { - // Unreachable. - return nil, vterrors.VT12001("multi-table INSERT statement in a sharded keyspace") - } - var vschemaTable *vindexes.Table - for _, tval := range pb.st.tables { - // There is only one table. - vschemaTable = tval.vschemaTable - } - if !rb.eroute.Keyspace.Sharded { - return buildInsertUnshardedPlan(ins, vschemaTable, reservedVars, vschema) - } - - if vschemaTable.Pinned != nil { - return buildInsertUnshardedPlan(ins, vschemaTable, reservedVars, vschema) - } - if ins.Action == sqlparser.ReplaceAct { - return nil, vterrors.VT12001("REPLACE INTO with sharded keyspace") - } - return buildInsertShardedPlan(ins, vschemaTable, reservedVars, vschema) - } -} - -func buildInsertUnshardedPlan(ins *sqlparser.Insert, table *vindexes.Table, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema) (*planResult, error) { - eins := engine.NewSimpleInsert( - engine.InsertUnsharded, - table, - table.Keyspace, - ) - if table.Pinned != nil { - eins.Opcode = engine.InsertByDestination - eins.TargetDestination = key.DestinationKeyspaceID(table.Pinned) - } - applyCommentDirectives(ins, eins) - - var rows sqlparser.Values - tc := &tableCollector{} - tc.addVindexTable(table) - switch insertValues := ins.Rows.(type) { - case *sqlparser.Select, *sqlparser.Union: - if eins.Table.AutoIncrement != nil { - return nil, vterrors.VT12001("auto-increment and SELECT in INSERT") - } - plan, err := subquerySelectPlan(ins, vschema, reservedVars, false) - if err != nil { - return nil, err - } - tc.addAllTables(plan.tables) - if route, ok := plan.primitive.(*engine.Route); ok && !route.Keyspace.Sharded && table.Keyspace.Name == route.Keyspace.Name { - eins.Query = generateQuery(ins) - } else { - eins.Input = plan.primitive - eins.Prefix, _, eins.Suffix = generateInsertShardedQuery(ins) - } - return newPlanResult(eins, tc.getTables()...), nil - case sqlparser.Values: - rows = insertValues - default: - return nil, vterrors.VT13001(fmt.Sprintf("unexpected construct in INSERT: %T", insertValues)) - } - if eins.Table.AutoIncrement == nil { - eins.Query = generateQuery(ins) - } else { - // Table has auto-inc and has a VALUES clause. - // If the column list is nil then add all the columns - // If the column list is empty then add only the auto-inc column and this happens on calling modifyForAutoinc - if ins.Columns == nil { - if table.ColumnListAuthoritative { - populateInsertColumnlist(ins, table) - } else { - return nil, vterrors.VT13001("column list required for tables with auto-inc columns") - } - } - for _, row := range rows { - if len(ins.Columns) != len(row) { - return nil, vterrors.VT13001("column list does not match values") - } - } - if err := modifyForAutoinc(ins, eins); err != nil { - return nil, err - } - eins.Query = generateQuery(ins) - } - - return newPlanResult(eins, tc.getTables()...), nil -} - -func buildInsertShardedPlan(ins *sqlparser.Insert, table *vindexes.Table, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema) (*planResult, error) { - eins := &engine.Insert{ - Table: table, - Keyspace: table.Keyspace, - } - tc := &tableCollector{} - tc.addVindexTable(table) - eins.Ignore = bool(ins.Ignore) - if ins.OnDup != nil { - if isVindexChanging(sqlparser.UpdateExprs(ins.OnDup), eins.Table.ColumnVindexes) { - return nil, vterrors.VT12001("DML cannot update vindex column") - } - eins.Ignore = true - } - if ins.Columns == nil && table.ColumnListAuthoritative { - populateInsertColumnlist(ins, table) - } - - applyCommentDirectives(ins, eins) - eins.ColVindexes = getColVindexes(eins.Table.ColumnVindexes) - - // Till here common plan building done for insert by providing values or select query. - - rows, isRowValues := ins.Rows.(sqlparser.Values) - if !isRowValues { - return buildInsertSelectPlan(ins, table, reservedVars, vschema, eins) - } - eins.Opcode = engine.InsertSharded - - for _, value := range rows { - if len(ins.Columns) != len(value) { - return nil, vterrors.VT13001("column list does not match values") - } - } - - if err := modifyForAutoinc(ins, eins); err != nil { +func gen4InsertStmtPlanner(version querypb.ExecuteOptions_PlannerVersion, insStmt *sqlparser.Insert, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema) (*planResult, error) { + ctx, err := plancontext.CreatePlanningContext(insStmt, reservedVars, vschema, version) + if err != nil { return nil, err } - // Fill out the 3-d Values structure. Please see documentation of Insert.Values for details. - colVindexes := eins.ColVindexes - routeValues := make([][][]evalengine.Expr, len(colVindexes)) - for vIdx, colVindex := range colVindexes { - routeValues[vIdx] = make([][]evalengine.Expr, len(colVindex.Columns)) - for colIdx, col := range colVindex.Columns { - routeValues[vIdx][colIdx] = make([]evalengine.Expr, len(rows)) - colNum := findOrAddColumn(ins, col) - for rowNum, row := range rows { - innerpv, err := evalengine.Translate(row[colNum], nil) - if err != nil { - return nil, err - } - routeValues[vIdx][colIdx][rowNum] = innerpv - } - } - } - for _, colVindex := range colVindexes { - for _, col := range colVindex.Columns { - colNum := findOrAddColumn(ins, col) - for rowNum, row := range rows { - name := engine.InsertVarName(col, rowNum) - row[colNum] = sqlparser.NewArgument(name) - } - } + err = rewriteRoutedTables(insStmt, vschema) + if err != nil { + return nil, err } - eins.VindexValues = routeValues - eins.Query = generateQuery(ins) - eins.Prefix, eins.Mid, eins.Suffix = generateInsertShardedQuery(ins) - return newPlanResult(eins, tc.getTables()...), nil -} - -// buildInsertSelectPlan builds an insert using select plan. -func buildInsertSelectPlan(ins *sqlparser.Insert, table *vindexes.Table, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, eins *engine.Insert) (*planResult, error) { - eins.Opcode = engine.InsertSelect - tc := &tableCollector{} - tc.addVindexTable(table) + // remove any alias added from routing table. + // insert query does not support table alias. + insStmt.Table.As = sqlparser.NewIdentifierCS("") - // check if column list is provided if not, then vschema should be able to provide the column list. - if len(ins.Columns) == 0 { - if !table.ColumnListAuthoritative { - return nil, vterrors.VT09004() + // Check single unsharded. Even if the table is for single unsharded but sequence table is used. + // We cannot shortcut here as sequence column needs additional planning. + ks, tables := ctx.SemTable.SingleUnshardedKeyspace() + fkPlanNeeded := false + if ks != nil { + noAutoInc := tables[0].AutoIncrement == nil + fkPlanNeeded = fkManagementRequiredForInsert(ctx, tables[0], sqlparser.UpdateExprs(insStmt.OnDup), insStmt.Action == sqlparser.ReplaceAct) + if noAutoInc && !fkPlanNeeded { + plan := insertUnshardedShortcut(insStmt, ks, tables) + plan = pushCommentDirectivesOnPlan(plan, insStmt) + return newPlanResult(plan.Primitive(), operators.QualifiedTables(ks, tables)...), nil } - populateInsertColumnlist(ins, table) } - // select plan will be taken as input to insert rows into the table. - plan, err := subquerySelectPlan(ins, vschema, reservedVars, true) + tblInfo, err := ctx.SemTable.TableInfoFor(ctx.SemTable.TableSetFor(insStmt.Table)) if err != nil { return nil, err } - tc.addAllTables(plan.tables) - eins.Input = plan.primitive - - // When the table you are steaming data from and table you are inserting from are same. - // Then due to locking of the index range on the table we might not be able to insert into the table. - // Therefore, instead of streaming, this flag will ensure the records are first read and then inserted. - if strings.Contains(plan.primitive.GetTableName(), table.Name.String()) { - eins.ForceNonStreaming = true - } - // auto-increment column is added explicitly if not provided. - if err := modifyForAutoinc(ins, eins); err != nil { + if err = errOutIfPlanCannotBeConstructed(ctx, tblInfo.GetVindexTable(), insStmt, fkPlanNeeded); err != nil { return nil, err } - // Fill out the 3-d Values structure - eins.VindexValueOffset, err = extractColVindexOffsets(ins, eins.ColVindexes) + err = queryRewrite(ctx.SemTable, reservedVars, insStmt) if err != nil { return nil, err } - eins.Prefix, _, eins.Suffix = generateInsertShardedQuery(ins) - return newPlanResult(eins, tc.getTables()...), nil -} - -func subquerySelectPlan(ins *sqlparser.Insert, vschema plancontext.VSchema, reservedVars *sqlparser.ReservedVars, sharded bool) (*planResult, error) { - selectStmt, queryPlanner, err := getStatementAndPlanner(ins, vschema) + op, err := operators.PlanQuery(ctx, insStmt) if err != nil { return nil, err } - // validate the columns to match on insert and select - // for sharded insert table only - if sharded { - if err := checkColumnCounts(ins, selectStmt); err != nil { - return nil, err - } - } - - // Override the locking with `for update` to lock the rows for inserting the data. - selectStmt.SetLock(sqlparser.ForUpdateLock) - - return queryPlanner(selectStmt, reservedVars, vschema) -} - -func getStatementAndPlanner( - ins *sqlparser.Insert, - vschema plancontext.VSchema, -) (selectStmt sqlparser.SelectStatement, configuredPlanner stmtPlanner, err error) { - switch stmt := ins.Rows.(type) { - case *sqlparser.Select: - configuredPlanner, err = getConfiguredPlanner(vschema, buildSelectPlan, stmt, "") - selectStmt = stmt - case *sqlparser.Union: - configuredPlanner, err = getConfiguredPlanner(vschema, buildUnionPlan, stmt, "") - selectStmt = stmt - default: - err = vterrors.VT12001(fmt.Sprintf("INSERT plan with %T", ins.Rows)) - } - + plan, err := transformToLogicalPlan(ctx, op) if err != nil { - return nil, nil, err + return nil, err } - return selectStmt, configuredPlanner, nil -} + plan = pushCommentDirectivesOnPlan(plan, insStmt) -func checkColumnCounts(ins *sqlparser.Insert, selectStmt sqlparser.SelectStatement) error { - if len(ins.Columns) < selectStmt.GetColumnCount() { - return vterrors.VT03006() - } - if len(ins.Columns) > selectStmt.GetColumnCount() { - sel := sqlparser.GetFirstSelect(selectStmt) - var hasStarExpr bool - for _, sExpr := range sel.SelectExprs { - if _, hasStarExpr = sExpr.(*sqlparser.StarExpr); hasStarExpr { - break - } - } - if !hasStarExpr { - return vterrors.VT03006() - } - } - return nil -} + setLockOnAllSelect(plan) -func applyCommentDirectives(ins *sqlparser.Insert, eins *engine.Insert) { - directives := ins.Comments.Directives() - if directives.IsSet(sqlparser.DirectiveMultiShardAutocommit) { - eins.MultiShardAutocommit = true + if err := plan.Wireup(ctx); err != nil { + return nil, err } - eins.QueryTimeout = queryTimeout(directives) -} -func getColVindexes(allColVindexes []*vindexes.ColumnVindex) (colVindexes []*vindexes.ColumnVindex) { - for _, colVindex := range allColVindexes { - if colVindex.IsPartialVindex() { - continue - } - colVindexes = append(colVindexes, colVindex) - } - return + return newPlanResult(plan.Primitive(), operators.TablesUsed(op)...), nil } -func extractColVindexOffsets(ins *sqlparser.Insert, colVindexes []*vindexes.ColumnVindex) ([][]int, error) { - vv := make([][]int, len(colVindexes)) - for idx, colVindex := range colVindexes { - for _, col := range colVindex.Columns { - colNum := findColumn(ins, col) - // sharding column values should be provided in the insert. - if colNum == -1 && idx == 0 { - return nil, vterrors.VT09003(col) - } - vv[idx] = append(vv[idx], colNum) - } +func errOutIfPlanCannotBeConstructed(ctx *plancontext.PlanningContext, vTbl *vindexes.Table, insStmt *sqlparser.Insert, fkPlanNeeded bool) error { + if vTbl.Keyspace.Sharded && ctx.SemTable.NotUnshardedErr != nil { + return ctx.SemTable.NotUnshardedErr } - return vv, nil -} - -// findColumn returns the column index where it is placed on the insert column list. -// Otherwise, return -1 when not found. -func findColumn(ins *sqlparser.Insert, col sqlparser.IdentifierCI) int { - for i, column := range ins.Columns { - if col.Equal(column) { - return i - } + if insStmt.Action != sqlparser.ReplaceAct { + return nil } - return -1 -} - -func populateInsertColumnlist(ins *sqlparser.Insert, table *vindexes.Table) { - cols := make(sqlparser.Columns, 0, len(table.Columns)) - for _, c := range table.Columns { - cols = append(cols, c.Name) + if fkPlanNeeded { + return vterrors.VT12001("REPLACE INTO with foreign keys") } - ins.Columns = cols + return nil } -// modifyForAutoinc modifies the AST and the plan to generate necessary autoinc values. -// For row values cases, bind variable names are generated using baseName. -func modifyForAutoinc(ins *sqlparser.Insert, eins *engine.Insert) error { - if eins.Table.AutoIncrement == nil { - return nil +// TODO: Handle all this in semantic analysis. +func fkManagementRequiredForInsert(ctx *plancontext.PlanningContext, vTbl *vindexes.Table, updateExprs sqlparser.UpdateExprs, replace bool) bool { + ksMode, err := ctx.VSchema.ForeignKeyMode(vTbl.Keyspace.Name) + if err != nil || ksMode != vschemapb.Keyspace_managed { + return false } - colNum := findOrAddColumn(ins, eins.Table.AutoIncrement.Column) - selNext := &sqlparser.Select{ - From: []sqlparser.TableExpr{&sqlparser.AliasedTableExpr{Expr: &sqlparser.TableName{Name: eins.Table.AutoIncrement.Sequence.Name}}}, - SelectExprs: sqlparser.SelectExprs{&sqlparser.Nextval{Expr: &sqlparser.Argument{Name: "n", Type: sqltypes.Int64}}}, - } - eins.Generate = &engine.Generate{ - Keyspace: eins.Table.AutoIncrement.Sequence.Keyspace, - Query: sqlparser.String(selNext), - Pinned: eins.Table.AutoIncrement.Sequence.Pinned, - } - switch rows := ins.Rows.(type) { - case sqlparser.SelectStatement: - eins.Generate.Offset = colNum - return nil - case sqlparser.Values: - autoIncValues := make([]evalengine.Expr, 0, len(rows)) - for rowNum, row := range rows { - // Support the DEFAULT keyword by treating it as null - if _, ok := row[colNum].(*sqlparser.Default); ok { - row[colNum] = &sqlparser.NullVal{} - } - pv, err := evalengine.Translate(row[colNum], nil) - if err != nil { - return err - } - autoIncValues = append(autoIncValues, pv) - row[colNum] = sqlparser.NewArgument(engine.SeqVarName + strconv.Itoa(rowNum)) - } - eins.Generate.Values = evalengine.NewTupleExpr(autoIncValues...) - return nil + if len(vTbl.ParentFKsNeedsHandling(ctx.VerifyAllFKs, "")) > 0 { + return true } - return vterrors.VT13001(fmt.Sprintf("unexpected construct in INSERT: %T", ins.Rows)) -} -// findOrAddColumn finds the position of a column in the insert. If it's -// absent it appends it to the with NULL values and returns that position. -func findOrAddColumn(ins *sqlparser.Insert, col sqlparser.IdentifierCI) int { - colNum := findColumn(ins, col) - if colNum >= 0 { - return colNum + childFks := vTbl.ChildFKsNeedsHandling(ctx.VerifyAllFKs, vindexes.UpdateAction) + if len(childFks) > 0 && replace { + return true } - colOffset := len(ins.Columns) - ins.Columns = append(ins.Columns, col) - if rows, ok := ins.Rows.(sqlparser.Values); ok { - for i := range rows { - rows[i] = append(rows[i], &sqlparser.NullVal{}) - } - } - return colOffset + + // Check if any column in the parent table is being updated which has a child foreign key. + return columnModified(updateExprs, func(expr *sqlparser.UpdateExpr) ([]vindexes.ParentFKInfo, []vindexes.ChildFKInfo) { + return nil, childFks + }) } -// isVindexChanging returns true if any of the update -// expressions modify a vindex column. -func isVindexChanging(setClauses sqlparser.UpdateExprs, colVindexes []*vindexes.ColumnVindex) bool { - for _, assignment := range setClauses { - for _, vcol := range colVindexes { - for _, col := range vcol.Columns { - if col.Equal(assignment.Name.Name) { - valueExpr, isValuesFuncExpr := assignment.Expr.(*sqlparser.ValuesFuncExpr) - if !isValuesFuncExpr { - return true - } - // update on duplicate key is changing the vindex column, not supported. - if !valueExpr.Name.Name.Equal(assignment.Name.Name) { - return true - } - } - } - } - } - return false +func insertUnshardedShortcut(stmt *sqlparser.Insert, ks *vindexes.Keyspace, tables []*vindexes.Table) logicalPlan { + eIns := &engine.Insert{} + eIns.Keyspace = ks + eIns.TableName = tables[0].Name.String() + eIns.Opcode = engine.InsertUnsharded + eIns.Query = generateQuery(stmt) + return &insert{eInsert: eIns} } type insert struct { eInsert *engine.Insert source logicalPlan - gen4Plan } var _ logicalPlan = (*insert)(nil) -func (i *insert) WireupGen4(ctx *plancontext.PlanningContext) error { +func (i *insert) Wireup(ctx *plancontext.PlanningContext) error { if i.source == nil { return nil } - return i.source.WireupGen4(ctx) + return i.source.Wireup(ctx) } func (i *insert) Primitive() engine.Primitive { diff --git a/go/vt/vtgate/planbuilder/join.go b/go/vt/vtgate/planbuilder/join.go index 0fc9b5f2ce3..2b438ce56a0 100644 --- a/go/vt/vtgate/planbuilder/join.go +++ b/go/vt/vtgate/planbuilder/join.go @@ -19,230 +19,90 @@ package planbuilder import ( "fmt" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" ) var _ logicalPlan = (*join)(nil) // join is used to build a Join primitive. -// It's used to build a normal join or a left join -// operation. +// It's used to build an inner join and only used by the Gen4 planner type join struct { - v3Plan - order int - resultColumns []*resultColumn - weightStrings map[*resultColumn]int - - // leftOrder stores the order number of the left node. This is - // used for a b-tree style traversal towards the target route. - // Let us assume the following execution tree: - // J9 - // / \ - // / \ - // J3 J8 - // / \ / \ - // R1 R2 J6 R7 - // / \ - // R4 R5 - // - // In the above trees, the suffix numbers indicate the - // execution order. The leftOrder for the joins will then - // be as follows: - // J3: 1 - // J6: 4 - // J8: 6 - // J9: 3 - // - // The route to R4 would be: - // Go right from J9->J8 because Left(J9)==3, which is <4. - // Go left from J8->J6 because Left(J8)==6, which is >=4. - // Go left from J6->R4 because Left(J6)==4, the destination. - // Look for 'isOnLeft' to see how these numbers are used. - leftOrder int - // Left and Right are the nodes for the join. Left, Right logicalPlan - ejoin *engine.Join -} + // The Opcode tells us if this is an inner or outer join + Opcode engine.JoinOpcode -// newJoin makes a new join using the two planBuilder. ajoin can be nil -// if the join is on a ',' operator. lpb will contain the resulting join. -// rpb will be discarded. -func newJoin(lpb, rpb *primitiveBuilder, ajoin *sqlparser.JoinTableExpr, reservedVars *sqlparser.ReservedVars) error { - // This function converts ON clauses to WHERE clauses. The WHERE clause - // scope can see all tables, whereas the ON clause can only see the - // participants of the JOIN. However, since the ON clause doesn't allow - // external references, and the FROM clause doesn't allow duplicates, - // it's safe to perform this conversion and still expect the same behavior. - - opcode := engine.InnerJoin - if ajoin != nil { - switch { - case ajoin.Join == sqlparser.LeftJoinType: - opcode = engine.LeftJoin - - // For left joins, we have to push the ON clause into the RHS. - // We do this before creating the join primitive. - // However, variables of LHS need to be visible. To allow this, - // we mark the LHS symtab as outer scope to the RHS, just like - // a subquery. This make the RHS treat the LHS symbols as external. - // This will prevent constructs from escaping out of the rpb scope. - // At this point, the LHS symtab also contains symbols of the RHS. - // But the RHS will hide those, as intended. - rpb.st.Outer = lpb.st - if err := rpb.pushFilter(ajoin.Condition.On, sqlparser.WhereStr, reservedVars); err != nil { - return err - } - case ajoin.Condition.Using != nil: - return vterrors.VT12001("JOIN with USING(column_list) clause for complex queries") - } - } - lpb.plan = &join{ - weightStrings: make(map[*resultColumn]int), - Left: lpb.plan, - Right: rpb.plan, - ejoin: &engine.Join{ - Opcode: opcode, - Vars: make(map[string]int), - }, - } - lpb.plan.Reorder(0) - if ajoin == nil || opcode == engine.LeftJoin { - return nil - } - return lpb.pushFilter(ajoin.Condition.On, sqlparser.WhereStr, reservedVars) -} + // These are the columns that will be produced by this plan. + // Negative offsets come from the LHS, and positive from the RHS + Cols []int -// Order implements the logicalPlan interface -func (jb *join) Order() int { - return jb.order -} + // Vars are the columns that will be sent from the LHS to the RHS + // the number is the offset on the LHS result, and the string is the bind variable name used in the RHS + Vars map[string]int -// Reorder implements the logicalPlan interface -func (jb *join) Reorder(order int) { - jb.Left.Reorder(order) - jb.leftOrder = jb.Left.Order() - jb.Right.Reorder(jb.leftOrder) - jb.order = jb.Right.Order() + 1 -} - -// Primitive implements the logicalPlan interface -func (jb *join) Primitive() engine.Primitive { - jb.ejoin.Left = jb.Left.Primitive() - jb.ejoin.Right = jb.Right.Primitive() - return jb.ejoin -} - -// ResultColumns implements the logicalPlan interface -func (jb *join) ResultColumns() []*resultColumn { - return jb.resultColumns + // LHSColumns are the columns from the LHS used for the join. + // These are the same columns pushed on the LHS that are now used in the Vars field + LHSColumns []*sqlparser.ColName } // Wireup implements the logicalPlan interface -func (jb *join) Wireup(plan logicalPlan, jt *jointab) error { - err := jb.Right.Wireup(plan, jt) +func (j *join) Wireup(ctx *plancontext.PlanningContext) error { + err := j.Left.Wireup(ctx) if err != nil { return err } - return jb.Left.Wireup(plan, jt) -} - -// SupplyVar implements the logicalPlan interface -func (jb *join) SupplyVar(from, to int, col *sqlparser.ColName, varname string) { - if !jb.isOnLeft(from) { - jb.Right.SupplyVar(from, to, col, varname) - return - } - if jb.isOnLeft(to) { - jb.Left.SupplyVar(from, to, col, varname) - return - } - if _, ok := jb.ejoin.Vars[varname]; ok { - // Looks like somebody else already requested this. - return - } - c := col.Metadata.(*column) - for i, rc := range jb.resultColumns { - if jb.ejoin.Cols[i] > 0 { - continue - } - if rc.column == c { - jb.ejoin.Vars[varname] = -jb.ejoin.Cols[i] - 1 - return - } - } - _, jb.ejoin.Vars[varname] = jb.Left.SupplyCol(col) + return j.Right.Wireup(ctx) } -// SupplyCol implements the logicalPlan interface -func (jb *join) SupplyCol(col *sqlparser.ColName) (rc *resultColumn, colNumber int) { - c := col.Metadata.(*column) - for i, rc := range jb.resultColumns { - if rc.column == c { - return rc, i - } - } - - routeNumber := c.Origin().Order() - var sourceCol int - if jb.isOnLeft(routeNumber) { - rc, sourceCol = jb.Left.SupplyCol(col) - jb.ejoin.Cols = append(jb.ejoin.Cols, -sourceCol-1) - } else { - rc, sourceCol = jb.Right.SupplyCol(col) - jb.ejoin.Cols = append(jb.ejoin.Cols, sourceCol+1) +// Primitive implements the logicalPlan interface +func (j *join) Primitive() engine.Primitive { + return &engine.Join{ + Left: j.Left.Primitive(), + Right: j.Right.Primitive(), + Cols: j.Cols, + Vars: j.Vars, + Opcode: j.Opcode, } - jb.resultColumns = append(jb.resultColumns, rc) - return rc, len(jb.ejoin.Cols) - 1 } -// SupplyWeightString implements the logicalPlan interface -func (jb *join) SupplyWeightString(colNumber int, alsoAddToGroupBy bool) (weightcolNumber int, err error) { - rc := jb.resultColumns[colNumber] - if weightcolNumber, ok := jb.weightStrings[rc]; ok { - return weightcolNumber, nil - } - routeNumber := rc.column.Origin().Order() - if jb.isOnLeft(routeNumber) { - sourceCol, err := jb.Left.SupplyWeightString(-jb.ejoin.Cols[colNumber]-1, alsoAddToGroupBy) - if err != nil { - return 0, err - } - jb.ejoin.Cols = append(jb.ejoin.Cols, -sourceCol-1) - } else { - sourceCol, err := jb.Right.SupplyWeightString(jb.ejoin.Cols[colNumber]-1, alsoAddToGroupBy) - if err != nil { - return 0, err - } - jb.ejoin.Cols = append(jb.ejoin.Cols, sourceCol+1) - } - jb.resultColumns = append(jb.resultColumns, rc) - jb.weightStrings[rc] = len(jb.ejoin.Cols) - 1 - return len(jb.ejoin.Cols) - 1, nil +// Inputs implements the logicalPlan interface +func (j *join) Inputs() []logicalPlan { + return []logicalPlan{j.Left, j.Right} } // Rewrite implements the logicalPlan interface -func (jb *join) Rewrite(inputs ...logicalPlan) error { +func (j *join) Rewrite(inputs ...logicalPlan) error { if len(inputs) != 2 { - return vterrors.VT13001(fmt.Sprintf("join: wrong number of inputs, got: %d, expect: 2", len(inputs))) + return vterrors.VT13001(fmt.Sprintf("wrong number of children in join rewrite, got: %d, expect: 2", len(inputs))) } - jb.Left = inputs[0] - jb.Right = inputs[1] + j.Left = inputs[0] + j.Right = inputs[1] return nil } -// Inputs implements the logicalPlan interface -func (jb *join) Inputs() []logicalPlan { - return []logicalPlan{jb.Left, jb.Right} +// ContainsTables implements the logicalPlan interface +func (j *join) ContainsTables() semantics.TableSet { + return j.Left.ContainsTables().Merge(j.Right.ContainsTables()) } -// isOnLeft returns true if the specified route number -// is on the left side of the join. If false, it means -// the node is on the right. -func (jb *join) isOnLeft(nodeNum int) bool { - return nodeNum <= jb.leftOrder +// OutputColumns implements the logicalPlan interface +func (j *join) OutputColumns() []sqlparser.SelectExpr { + return getOutputColumnsFromJoin(j.Cols, j.Left.OutputColumns(), j.Right.OutputColumns()) +} + +func getOutputColumnsFromJoin(ints []int, lhs []sqlparser.SelectExpr, rhs []sqlparser.SelectExpr) (cols []sqlparser.SelectExpr) { + for _, col := range ints { + if col < 0 { + col *= -1 + cols = append(cols, lhs[col-1]) + } else { + cols = append(cols, rhs[col-1]) + } + } + return } diff --git a/go/vt/vtgate/planbuilder/joinGen4.go b/go/vt/vtgate/planbuilder/joinGen4.go deleted file mode 100644 index 04a408b1fb4..00000000000 --- a/go/vt/vtgate/planbuilder/joinGen4.go +++ /dev/null @@ -1,98 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "fmt" - - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/vt/vtgate/semantics" -) - -var _ logicalPlan = (*joinGen4)(nil) - -// joinGen4 is used to build a Join primitive. -// It's used to build an inner join and only used by the Gen4 planner -type joinGen4 struct { - // Left and Right are the nodes for the join. - Left, Right logicalPlan - - // The Opcode tells us if this is an inner or outer join - Opcode engine.JoinOpcode - - // These are the columns that will be produced by this plan. - // Negative offsets come from the LHS, and positive from the RHS - Cols []int - - // Vars are the columns that will be sent from the LHS to the RHS - // the number is the offset on the LHS result, and the string is the bind variable name used in the RHS - Vars map[string]int - - // LHSColumns are the columns from the LHS used for the join. - // These are the same columns pushed on the LHS that are now used in the Vars field - LHSColumns []*sqlparser.ColName - - gen4Plan -} - -// WireupGen4 implements the logicalPlan interface -func (j *joinGen4) WireupGen4(ctx *plancontext.PlanningContext) error { - err := j.Left.WireupGen4(ctx) - if err != nil { - return err - } - return j.Right.WireupGen4(ctx) -} - -// Primitive implements the logicalPlan interface -func (j *joinGen4) Primitive() engine.Primitive { - return &engine.Join{ - Left: j.Left.Primitive(), - Right: j.Right.Primitive(), - Cols: j.Cols, - Vars: j.Vars, - Opcode: j.Opcode, - } -} - -// Inputs implements the logicalPlan interface -func (j *joinGen4) Inputs() []logicalPlan { - return []logicalPlan{j.Left, j.Right} -} - -// Rewrite implements the logicalPlan interface -func (j *joinGen4) Rewrite(inputs ...logicalPlan) error { - if len(inputs) != 2 { - return vterrors.VT13001(fmt.Sprintf("wrong number of children in joinGen4 rewrite, got: %d, expect: 2", len(inputs))) - } - j.Left = inputs[0] - j.Right = inputs[1] - return nil -} - -// ContainsTables implements the logicalPlan interface -func (j *joinGen4) ContainsTables() semantics.TableSet { - return j.Left.ContainsTables().Merge(j.Right.ContainsTables()) -} - -// OutputColumns implements the logicalPlan interface -func (j *joinGen4) OutputColumns() []sqlparser.SelectExpr { - return getOutputColumnsFromJoin(j.Cols, j.Left.OutputColumns(), j.Right.OutputColumns()) -} diff --git a/go/vt/vtgate/planbuilder/jointab.go b/go/vt/vtgate/planbuilder/jointab.go deleted file mode 100644 index 956f7330bda..00000000000 --- a/go/vt/vtgate/planbuilder/jointab.go +++ /dev/null @@ -1,78 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "fmt" - - "vitess.io/vitess/go/vt/sqlparser" -) - -// jointab manages procurement and naming of join -// variables across primitives. -type jointab struct { - refs map[*column]string - reserved *sqlparser.ReservedVars - varIndex int -} - -// newJointab creates a new jointab for the current plan -// being built. It also needs the current list of bind vars -// used in the original query to make sure that the names -// it generates don't collide with those already in use. -func newJointab(reserved *sqlparser.ReservedVars) *jointab { - return &jointab{ - refs: make(map[*column]string), - reserved: reserved, - } -} - -// Procure requests for the specified column from the plan -// and returns the join var name for it. -func (jt *jointab) Procure(plan logicalPlan, col *sqlparser.ColName, to int) string { - from, joinVar := jt.Lookup(col) - // If joinVar is empty, generate a unique name. - if joinVar == "" { - joinVar = jt.reserved.ReserveColName(col) - jt.refs[col.Metadata.(*column)] = joinVar - } - plan.SupplyVar(from, to, col, joinVar) - return joinVar -} - -// GenerateSubqueryVars generates substitution variable names for -// a subquery. It returns two names based on: __sq, __sq_has_values. -// The appropriate names can be used for substitution -// depending on the scenario. -func (jt *jointab) GenerateSubqueryVars() (sq, hasValues string) { - for { - jt.varIndex++ - var1 := fmt.Sprintf("__sq%d", jt.varIndex) - var2 := fmt.Sprintf("__sq_has_values%d", jt.varIndex) - if !jt.reserved.ReserveAll(var1, var2) { - continue - } - return var1, var2 - } -} - -// Lookup returns the order of the route that supplies the column and -// the join var name if one has already been assigned for it. -func (jt *jointab) Lookup(col *sqlparser.ColName) (order int, joinVar string) { - c := col.Metadata.(*column) - return c.Origin().Order(), jt.refs[c] -} diff --git a/go/vt/vtgate/planbuilder/jointab_test.go b/go/vt/vtgate/planbuilder/jointab_test.go deleted file mode 100644 index 6bfc23c155c..00000000000 --- a/go/vt/vtgate/planbuilder/jointab_test.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "reflect" - "testing" - - "vitess.io/vitess/go/vt/sqlparser" -) - -func TestGenerateSubqueryVars(t *testing.T) { - reserved := sqlparser.NewReservedVars("vtg", map[string]struct{}{ - "__sq1": {}, - "__sq_has_values3": {}, - }) - jt := newJointab(reserved) - - v1, v2 := jt.GenerateSubqueryVars() - combined := []string{v1, v2} - want := []string{"__sq2", "__sq_has_values2"} - if !reflect.DeepEqual(combined, want) { - t.Errorf("jt.GenerateSubqueryVars: %v, want %v", combined, want) - } - - v1, v2 = jt.GenerateSubqueryVars() - combined = []string{v1, v2} - want = []string{"__sq4", "__sq_has_values4"} - if !reflect.DeepEqual(combined, want) { - t.Errorf("jt.GenerateSubqueryVars: %v, want %v", combined, want) - } -} diff --git a/go/vt/vtgate/planbuilder/logical_plan.go b/go/vt/vtgate/planbuilder/logical_plan.go index 363c012daf8..51ed8e72b0e 100644 --- a/go/vt/vtgate/planbuilder/logical_plan.go +++ b/go/vt/vtgate/planbuilder/logical_plan.go @@ -29,50 +29,9 @@ import ( // logicalPlan defines the interface that a primitive must // satisfy. type logicalPlan interface { - // Order is the execution order of the primitive. If there are subprimitives, - // the order is one above the order of the subprimitives. - // This is because the primitive executes its subprimitives first and - // processes their results to generate its own values. - // Please copy code from an existing primitive to define this function. - Order() int - // ResultColumns returns the list of result columns the - // primitive returns. - // Please copy code from an existing primitive to define this function. - ResultColumns() []*resultColumn - - // Reorder reassigns order for the primitive and its sub-primitives. - // The input is the order of the previous primitive that should - // execute before this one. - Reorder(int) - - // Wireup performs the wire-up work. Nodes should be traversed - // from right to left because the rhs nodes can request vars from - // the lhs nodes. - Wireup(lp logicalPlan, jt *jointab) error - - // WireupGen4 does the wire up work for the Gen4 planner - WireupGen4(*plancontext.PlanningContext) error - - // SupplyVar finds the common root between from and to. If it's - // the common root, it supplies the requested var to the rhs tree. - // If the primitive already has the column in its list, it should - // just supply it to the 'to' node. Otherwise, it should request - // for it by calling SupplyCol on the 'from' sub-tree to request the - // column, and then supply it to the 'to' node. - SupplyVar(from, to int, col *sqlparser.ColName, varname string) - - // SupplyCol is meant to be used for the wire-up process. This function - // changes the primitive to supply the requested column and returns - // the resultColumn and column number of the result. SupplyCol - // is different from PushSelect because it may reuse an existing - // resultColumn, whereas PushSelect guarantees the addition of a new - // result column and returns a distinct symbol for it. - SupplyCol(col *sqlparser.ColName) (rc *resultColumn, colNumber int) - - // SupplyWeightString must supply a weight_string expression of the - // specified column. It returns an error if we cannot supply a weight column for it. - SupplyWeightString(colNumber int, alsoAddToGroupBy bool) (weightcolNumber int, err error) + // Wireup does the wire up of primitive with the source. + Wireup(*plancontext.PlanningContext) error // Primitive returns the underlying primitive. // This function should only be called after Wireup is finished. @@ -92,59 +51,6 @@ type logicalPlan interface { OutputColumns() []sqlparser.SelectExpr } -// gen4Plan implements a few methods from logicalPlan that are unused by Gen4. -type gen4Plan struct{} - -// Order implements the logicalPlan interface -func (*gen4Plan) Order() int { - panic("[BUG]: should not be called. This is a Gen4 primitive") -} - -// ResultColumns implements the logicalPlan interface -func (*gen4Plan) ResultColumns() []*resultColumn { - panic("[BUG]: should not be called. This is a Gen4 primitive") -} - -// Reorder implements the logicalPlan interface -func (*gen4Plan) Reorder(int) { - panic("[BUG]: should not be called. This is a Gen4 primitive") -} - -// Wireup implements the logicalPlan interface -func (*gen4Plan) Wireup(logicalPlan, *jointab) error { - panic("[BUG]: should not be called. This is a Gen4 primitive") -} - -// SupplyVar implements the logicalPlan interface -func (*gen4Plan) SupplyVar(int, int, *sqlparser.ColName, string) { - panic("[BUG]: should not be called. This is a Gen4 primitive") -} - -// SupplyCol implements the logicalPlan interface -func (*gen4Plan) SupplyCol(*sqlparser.ColName) (rc *resultColumn, colNumber int) { - panic("[BUG]: should not be called. This is a Gen4 primitive") -} - -// SupplyWeightString implements the logicalPlan interface -func (*gen4Plan) SupplyWeightString(int, bool) (weightcolNumber int, err error) { - panic("[BUG]: should not be called. This is a Gen4 primitive") -} - -// v3Plan implements methods that are only used by gen4 -type v3Plan struct{} - -func (*v3Plan) WireupGen4(*plancontext.PlanningContext) error { - panic("[BUG]: should not be called. This is a V3 primitive") -} - -func (*v3Plan) ContainsTables() semantics.TableSet { - panic("[BUG]: should not be called. This is a V3 primitive") -} - -func (*v3Plan) OutputColumns() []sqlparser.SelectExpr { - panic("[BUG]: should not be called. This is a V3 primitive") -} - type planVisitor func(logicalPlan) (bool, logicalPlan, error) func visit(node logicalPlan, visitor planVisitor) (logicalPlan, error) { @@ -180,16 +86,6 @@ func visit(node logicalPlan, visitor planVisitor) (logicalPlan, error) { return node, nil } -// first returns the first logical plan of the tree, -// which is usually the left most leaf. -func first(input logicalPlan) logicalPlan { - inputs := input.Inputs() - if len(inputs) == 0 { - return input - } - return first(inputs[0]) -} - // ------------------------------------------------------------------------- // logicalPlanCommon implements some common functionality of builders. @@ -207,33 +103,8 @@ func (bc *logicalPlanCommon) Order() int { return bc.order } -func (bc *logicalPlanCommon) Reorder(order int) { - bc.input.Reorder(order) - bc.order = bc.input.Order() + 1 -} - -func (bc *logicalPlanCommon) ResultColumns() []*resultColumn { - return bc.input.ResultColumns() -} - -func (bc *logicalPlanCommon) Wireup(plan logicalPlan, jt *jointab) error { - return bc.input.Wireup(plan, jt) -} - -func (bc *logicalPlanCommon) WireupGen4(ctx *plancontext.PlanningContext) error { - return bc.input.WireupGen4(ctx) -} - -func (bc *logicalPlanCommon) SupplyVar(from, to int, col *sqlparser.ColName, varname string) { - bc.input.SupplyVar(from, to, col, varname) -} - -func (bc *logicalPlanCommon) SupplyCol(col *sqlparser.ColName) (rc *resultColumn, colNumber int) { - return bc.input.SupplyCol(col) -} - -func (bc *logicalPlanCommon) SupplyWeightString(colNumber int, alsoAddToGroupBy bool) (weightcolNumber int, err error) { - return bc.input.SupplyWeightString(colNumber, alsoAddToGroupBy) +func (bc *logicalPlanCommon) Wireup(ctx *plancontext.PlanningContext) error { + return bc.input.Wireup(ctx) } // Rewrite implements the logicalPlan interface @@ -266,67 +137,12 @@ func (bc *logicalPlanCommon) OutputColumns() []sqlparser.SelectExpr { // resultsColumn functionality. type resultsBuilder struct { logicalPlanCommon - resultColumns []*resultColumn - weightStrings map[*resultColumn]int - truncater truncater + truncater truncater } func newResultsBuilder(input logicalPlan, truncater truncater) resultsBuilder { return resultsBuilder{ logicalPlanCommon: newBuilderCommon(input), - resultColumns: input.ResultColumns(), - weightStrings: make(map[*resultColumn]int), truncater: truncater, } } - -func (rsb *resultsBuilder) ResultColumns() []*resultColumn { - return rsb.resultColumns -} - -// SupplyCol is currently unreachable because the builders using resultsBuilder -// are currently above a join, which is the only logicalPlan that uses it for now. -// This can change if we start supporting correlated subqueries. -func (rsb *resultsBuilder) SupplyCol(col *sqlparser.ColName) (rc *resultColumn, colNumber int) { - c := col.Metadata.(*column) - for i, rc := range rsb.resultColumns { - if rc.column == c { - return rc, i - } - } - rc, colNumber = rsb.input.SupplyCol(col) - if colNumber < len(rsb.resultColumns) { - return rc, colNumber - } - // Add result columns from input until colNumber is reached. - for colNumber >= len(rsb.resultColumns) { - rsb.resultColumns = append(rsb.resultColumns, rsb.input.ResultColumns()[len(rsb.resultColumns)]) - } - rsb.truncater.SetTruncateColumnCount(len(rsb.resultColumns)) - return rc, colNumber -} - -func (rsb *resultsBuilder) SupplyWeightString(colNumber int, alsoAddToGroupBy bool) (weightcolNumber int, err error) { - rc := rsb.resultColumns[colNumber] - var ok bool - weightcolNumber, ok = rsb.weightStrings[rc] - if !alsoAddToGroupBy && ok { - return weightcolNumber, nil - } - weightcolNumber, err = rsb.input.SupplyWeightString(colNumber, alsoAddToGroupBy) - if err != nil { - return 0, nil - } - rsb.weightStrings[rc] = weightcolNumber - if weightcolNumber < len(rsb.resultColumns) { - return weightcolNumber, nil - } - // Add result columns from input until weightcolNumber is reached. - for weightcolNumber >= len(rsb.resultColumns) { - rsb.resultColumns = append(rsb.resultColumns, rsb.input.ResultColumns()[len(rsb.resultColumns)]) - } - rsb.truncater.SetTruncateColumnCount(len(rsb.resultColumns)) - return weightcolNumber, nil -} - -// ------------------------------------------------------------------------- diff --git a/go/vt/vtgate/planbuilder/memory_sort.go b/go/vt/vtgate/planbuilder/memory_sort.go index 20dd125ecd0..d32777ac123 100644 --- a/go/vt/vtgate/planbuilder/memory_sort.go +++ b/go/vt/vtgate/planbuilder/memory_sort.go @@ -17,14 +17,10 @@ limitations under the License. package planbuilder import ( - "fmt" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/engine" ) @@ -41,67 +37,6 @@ type memorySort struct { eMemorySort *engine.MemorySort } -func findColNumber(ms *memorySort, expr *sqlparser.ColName) int { - c := expr.Metadata.(*column) - for i, rc := range ms.ResultColumns() { - if rc.column == c { - return i - } - } - return -1 -} - -// newMemorySort builds a new memorySort. -func newMemorySort(plan logicalPlan, orderBy v3OrderBy) (*memorySort, error) { - eMemorySort := &engine.MemorySort{} - ms := &memorySort{ - resultsBuilder: newResultsBuilder(plan, eMemorySort), - eMemorySort: eMemorySort, - } - for _, order := range orderBy { - var colNumber int - switch expr := order.Expr.(type) { - case *sqlparser.Literal: - var err error - if colNumber, err = ResultFromNumber(ms.ResultColumns(), expr, "order clause"); err != nil { - return nil, err - } - case *sqlparser.ColName: - colNumber = findColNumber(ms, expr) - case *sqlparser.CastExpr: - colName, ok := expr.Expr.(*sqlparser.ColName) - if !ok { - return nil, vterrors.VT12001(fmt.Sprintf("memory sort: complex ORDER BY expression: %s", sqlparser.String(expr))) - } - colNumber = findColNumber(ms, colName) - case *sqlparser.ConvertExpr: - colName, ok := expr.Expr.(*sqlparser.ColName) - if !ok { - return nil, vterrors.VT12001(fmt.Sprintf("memory sort: complex ORDER BY expression: %s", sqlparser.String(expr))) - } - colNumber = findColNumber(ms, colName) - default: - return nil, vterrors.VT12001(fmt.Sprintf("memory sort: complex ORDER BY expression: %s", sqlparser.String(expr))) - } - // If column is not found, then the order by is referencing - // a column that's not on the select list. - if colNumber == -1 { - return nil, vterrors.VT12001(fmt.Sprintf("memory sort: ORDER BY must reference a column in the SELECT list: %s", sqlparser.String(order))) - } - // TODO(king-11) need to pass in collation here - ob := engine.OrderByParams{ - Col: colNumber, - WeightStringCol: -1, - Desc: order.Direction == sqlparser.DescOrder, - StarColFixedIndex: colNumber, - FromGroupBy: order.fromGroupBy, - CollationID: collations.Unknown, - } - ms.eMemorySort.OrderBy = append(ms.eMemorySort.OrderBy, ob) - } - return ms, nil -} - // Primitive implements the logicalPlan interface func (ms *memorySort) Primitive() engine.Primitive { ms.eMemorySort.Input = ms.input.Primitive() @@ -113,32 +48,6 @@ func (ms *memorySort) SetLimit(limit *sqlparser.Limit) error { return vterrors.VT13001("memorySort.Limit: unreachable") } -// Wireup implements the logicalPlan interface -// If text columns are detected in the keys, then the function modifies -// the primitive to pull a corresponding weight_string from mysql and -// compare those instead. This is because we currently don't have the -// ability to mimic mysql's collation behavior. -func (ms *memorySort) Wireup(plan logicalPlan, jt *jointab) error { - for i, orderby := range ms.eMemorySort.OrderBy { - rc := ms.resultColumns[orderby.Col] - // Add a weight_string column if we know that the column is a textual column or if its type is unknown - if sqltypes.IsText(rc.column.typ) || rc.column.typ == sqltypes.Null { - weightcolNumber, err := ms.input.SupplyWeightString(orderby.Col, orderby.FromGroupBy) - if err != nil { - _, isUnsupportedErr := err.(UnsupportedSupplyWeightString) - if isUnsupportedErr { - continue - } - return err - } - ms.weightStrings[rc] = weightcolNumber - ms.eMemorySort.OrderBy[i].WeightStringCol = weightcolNumber - ms.eMemorySort.TruncateColumnCount = len(ms.resultColumns) - } - } - return ms.input.Wireup(plan, jt) -} - -func (ms *memorySort) WireupGen4(ctx *plancontext.PlanningContext) error { - return ms.input.WireupGen4(ctx) +func (ms *memorySort) Wireup(ctx *plancontext.PlanningContext) error { + return ms.input.Wireup(ctx) } diff --git a/go/vt/vtgate/planbuilder/merge_sort.go b/go/vt/vtgate/planbuilder/merge_sort.go index 4e72d062241..0da5b5fc135 100644 --- a/go/vt/vtgate/planbuilder/merge_sort.go +++ b/go/vt/vtgate/planbuilder/merge_sort.go @@ -17,7 +17,6 @@ limitations under the License. package planbuilder import ( - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" @@ -37,15 +36,6 @@ type mergeSort struct { truncateColumnCount int } -// newMergeSort builds a new mergeSort. -func newMergeSort(rb *route) *mergeSort { - ms := &mergeSort{ - resultsBuilder: newResultsBuilder(rb, nil), - } - ms.truncater = ms - return ms -} - // SetTruncateColumnCount satisfies the truncater interface. // This function records the truncate column count and sets // it later on the eroute during wire-up phase. @@ -58,35 +48,8 @@ func (ms *mergeSort) Primitive() engine.Primitive { return ms.input.Primitive() } -// Wireup implements the logicalPlan interface -func (ms *mergeSort) Wireup(plan logicalPlan, jt *jointab) error { - // If the route has to do the ordering, and if any columns are Text, - // we have to request the corresponding weight_string from mysql - // and use that value instead. This is because we cannot mimic - // mysql's collation behavior yet. - rb := ms.input.(*route) - for i, orderby := range rb.eroute.OrderBy { - rc := ms.resultColumns[orderby.Col] - // Add a weight_string column if we know that the column is a textual column or if its type is unknown - if sqltypes.IsText(rc.column.typ) || rc.column.typ == sqltypes.Null { - var err error - rb.eroute.OrderBy[i].WeightStringCol, err = rb.SupplyWeightString(orderby.Col, orderby.FromGroupBy) - if err != nil { - _, isUnsupportedErr := err.(UnsupportedSupplyWeightString) - if isUnsupportedErr { - continue - } - return err - } - ms.truncateColumnCount = len(ms.resultColumns) - } - } - rb.eroute.TruncateColumnCount = ms.truncateColumnCount - return ms.input.Wireup(plan, jt) -} - -func (ms *mergeSort) WireupGen4(ctx *plancontext.PlanningContext) error { - return ms.input.WireupGen4(ctx) +func (ms *mergeSort) Wireup(ctx *plancontext.PlanningContext) error { + return ms.input.Wireup(ctx) } // OutputColumns implements the logicalPlan interface diff --git a/go/vt/vtgate/planbuilder/migration.go b/go/vt/vtgate/planbuilder/migration.go index 468c86d3ffb..6fb73a9039d 100644 --- a/go/vt/vtgate/planbuilder/migration.go +++ b/go/vt/vtgate/planbuilder/migration.go @@ -17,19 +17,74 @@ limitations under the License. package planbuilder import ( + "strconv" + "time" + + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/vt/key" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/vindexes" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" ) -func buildAlterMigrationPlan(query string, vschema plancontext.VSchema, enableOnlineDDL bool) (*planResult, error) { +func validateThrottleParams(alterMigrationType sqlparser.AlterMigrationType, expireString string, ratioLiteral *sqlparser.Literal) (duration time.Duration, ratio float64, err error) { + switch alterMigrationType { + case sqlparser.UnthrottleMigrationType, + sqlparser.UnthrottleAllMigrationType: + // Unthrottling is like throttling with duration=0 + duration = 0 + default: + duration = throttle.DefaultAppThrottleDuration + if expireString != "" { + duration, err = time.ParseDuration(expireString) + if err != nil || duration < 0 { + return duration, ratio, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid EXPIRE value: %s. Try '120s', '30m', '1h', etc. Allowed units are (s)ec, (m)in, (h)hour", expireString) + } + } + } + ratio = 1.0 + if ratioLiteral != nil { + ratio, err = strconv.ParseFloat(ratioLiteral.Val, 64) + if err != nil || ratio < 0 || ratio > 1 { + return duration, ratio, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid RATIO value: %s. Try any decimal number between '0.0' (no throttle) and `1.0` (fully throttled)", ratioLiteral.Val) + } + } + return duration, ratio, nil +} + +func buildAlterMigrationThrottleAppPlan(query string, alterMigration *sqlparser.AlterMigration, keyspace *vindexes.Keyspace) (*planResult, error) { + duration, ratio, err := validateThrottleParams(alterMigration.Type, alterMigration.Expire, alterMigration.Ratio) + if err != nil { + return nil, err + } + expireAt := time.Now().Add(duration) + appName := alterMigration.UUID + if appName == "" { + appName = throttlerapp.OnlineDDLName.String() + } + throttledAppRule := &topodatapb.ThrottledAppRule{ + Name: appName, + ExpiresAt: protoutil.TimeToProto(expireAt), + Ratio: ratio, + } + return newPlanResult(&engine.ThrottleApp{ + Keyspace: keyspace, + ThrottledAppRule: throttledAppRule, + }), nil +} + +func buildAlterMigrationPlan(query string, alterMigration *sqlparser.AlterMigration, vschema plancontext.VSchema, enableOnlineDDL bool) (*planResult, error) { if !enableOnlineDDL { return nil, schema.ErrOnlineDDLDisabled } + dest, ks, tabletType, err := vschema.TargetDestination("") if err != nil { return nil, err @@ -38,6 +93,15 @@ func buildAlterMigrationPlan(query string, vschema plancontext.VSchema, enableOn return nil, vterrors.VT09005() } + switch alterMigration.Type { + case sqlparser.ThrottleMigrationType, + sqlparser.ThrottleAllMigrationType, + sqlparser.UnthrottleMigrationType, + sqlparser.UnthrottleAllMigrationType: + // ALTER VITESS_MIGRATION ... THROTTLE ... queries go to topo (similarly to `vtctldclient UpdateThrottlerConfig`) + return buildAlterMigrationThrottleAppPlan(query, alterMigration, ks) + } + if tabletType != topodatapb.TabletType_PRIMARY { return nil, vterrors.VT09006("ALTER") } diff --git a/go/vt/vtgate/planbuilder/operator_transformers.go b/go/vt/vtgate/planbuilder/operator_transformers.go index 6344ba3b0a0..8987906bda4 100644 --- a/go/vt/vtgate/planbuilder/operator_transformers.go +++ b/go/vt/vtgate/planbuilder/operator_transformers.go @@ -22,9 +22,9 @@ import ( "strconv" "strings" - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/slices2" + "vitess.io/vitess/go/slice" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" @@ -37,26 +37,22 @@ import ( "vitess.io/vitess/go/vt/vtgate/vindexes" ) -func transformToLogicalPlan(ctx *plancontext.PlanningContext, op ops.Operator, isRoot bool) (logicalPlan, error) { +func transformToLogicalPlan(ctx *plancontext.PlanningContext, op ops.Operator) (logicalPlan, error) { switch op := op.(type) { case *operators.Route: return transformRoutePlan(ctx, op) case *operators.ApplyJoin: return transformApplyJoinPlan(ctx, op) case *operators.Union: - return transformUnionPlan(ctx, op, isRoot) + return transformUnionPlan(ctx, op) case *operators.Vindex: return transformVindexPlan(ctx, op) - case *operators.SubQueryOp: - return transformSubQueryPlan(ctx, op) - case *operators.CorrelatedSubQueryOp: - return transformCorrelatedSubQueryPlan(ctx, op) - case *operators.Derived: - return transformDerivedPlan(ctx, op) + case *operators.SubQuery: + return transformSubQuery(ctx, op) case *operators.Filter: return transformFilter(ctx, op) case *operators.Horizon: - return transformHorizon(ctx, op, isRoot) + panic("should have been solved in the operator") case *operators.Projection: return transformProjection(ctx, op) case *operators.Limit: @@ -67,40 +63,128 @@ func transformToLogicalPlan(ctx *plancontext.PlanningContext, op ops.Operator, i return transformAggregator(ctx, op) case *operators.Distinct: return transformDistinct(ctx, op) + case *operators.FkCascade: + return transformFkCascade(ctx, op) + case *operators.FkVerify: + return transformFkVerify(ctx, op) } return nil, vterrors.VT13001(fmt.Sprintf("unknown type encountered: %T (transformToLogicalPlan)", op)) } +// transformFkCascade transforms a FkCascade operator into a logical plan. +func transformFkCascade(ctx *plancontext.PlanningContext, fkc *operators.FkCascade) (logicalPlan, error) { + // We convert the parent operator to a logical plan. + parentLP, err := transformToLogicalPlan(ctx, fkc.Parent) + if err != nil { + return nil, nil + } + + // Once we have the parent logical plan, we can create the selection logical plan and the primitives for the children operators. + // For all of these, we don't need the semTable anymore. We set it to nil, to avoid using an incorrect one. + ctx.SemTable = nil + selLP, err := transformToLogicalPlan(ctx, fkc.Selection) + if err != nil { + return nil, err + } + + // Go over the children and convert them to Primitives too. + var children []*engine.FkChild + for _, child := range fkc.Children { + childLP, err := transformToLogicalPlan(ctx, child.Op) + if err != nil { + return nil, err + } + err = childLP.Wireup(ctx) + if err != nil { + return nil, err + } + childEngine := childLP.Primitive() + children = append(children, &engine.FkChild{ + BVName: child.BVName, + Cols: child.Cols, + Exec: childEngine, + }) + } + + return newFkCascade(parentLP, selLP, children), nil +} + +func transformSubQuery(ctx *plancontext.PlanningContext, op *operators.SubQuery) (logicalPlan, error) { + outer, err := transformToLogicalPlan(ctx, op.Outer) + if err != nil { + return nil, err + } + + inner, err := transformToLogicalPlan(ctx, op.Subquery) + if err != nil { + return nil, err + } + + cols, err := op.GetJoinColumns(ctx, op.Outer) + if err != nil { + return nil, err + } + if len(cols) == 0 { + // no correlation, so uncorrelated it is + return newUncorrelatedSubquery(op.FilterType, op.SubqueryValueName, op.HasValuesName, inner, outer), nil + } + + lhsCols, err := op.OuterExpressionsNeeded(ctx, op.Outer) + if err != nil { + return nil, err + } + return newSemiJoin(outer, inner, op.Vars, lhsCols), nil +} + +// transformFkVerify transforms a FkVerify operator into a logical plan. +func transformFkVerify(ctx *plancontext.PlanningContext, fkv *operators.FkVerify) (logicalPlan, error) { + inputLP, err := transformToLogicalPlan(ctx, fkv.Input) + if err != nil { + return nil, err + } + + // Once we have the input logical plan, we can create the primitives for the verification operators. + // For all of these, we don't need the semTable anymore. We set it to nil, to avoid using an incorrect one. + ctx.SemTable = nil + + // Go over the children and convert them to Primitives too. + var verify []*verifyLP + for _, v := range fkv.Verify { + lp, err := transformToLogicalPlan(ctx, v.Op) + if err != nil { + return nil, err + } + verify = append(verify, &verifyLP{ + verify: lp, + typ: v.Typ, + }) + } + + return newFkVerify(inputLP, verify), nil +} + func transformAggregator(ctx *plancontext.PlanningContext, op *operators.Aggregator) (logicalPlan, error) { - plan, err := transformToLogicalPlan(ctx, op.Source, false) + plan, err := transformToLogicalPlan(ctx, op.Source) if err != nil { return nil, err } oa := &orderedAggregate{ - resultsBuilder: resultsBuilder{ - logicalPlanCommon: newBuilderCommon(plan), - weightStrings: make(map[*resultColumn]int), - }, + resultsBuilder: newResultsBuilder(plan, nil), } for _, aggr := range op.Aggregations { if aggr.OpCode == opcode.AggregateUnassigned { return nil, vterrors.VT12001(fmt.Sprintf("in scatter query: aggregation function '%s'", sqlparser.String(aggr.Original))) } - typ, col := aggr.GetTypeCollation(ctx) - oa.aggregates = append(oa.aggregates, &engine.AggregateParams{ - Opcode: aggr.OpCode, - Col: aggr.ColOffset, - Alias: aggr.Alias, - Expr: aggr.Func, - Original: aggr.Original, - OrigOpcode: aggr.OriginalOpCode, - WCol: aggr.WSOffset, - Type: typ, - CollationID: col, - }) + aggrParam := engine.NewAggregateParam(aggr.OpCode, aggr.ColOffset, aggr.Alias) + aggrParam.Expr = aggr.Func + aggrParam.Original = aggr.Original + aggrParam.OrigOpcode = aggr.OriginalOpCode + aggrParam.WCol = aggr.WSOffset + aggrParam.Type, aggrParam.CollationID = aggr.GetTypeCollation(ctx) + oa.aggregates = append(oa.aggregates, aggrParam) } for _, groupBy := range op.Grouping { typ, col, _ := ctx.SemTable.TypeForExpr(groupBy.SimplifiedExpr) @@ -121,7 +205,7 @@ func transformAggregator(ctx *plancontext.PlanningContext, op *operators.Aggrega } func transformDistinct(ctx *plancontext.PlanningContext, op *operators.Distinct) (logicalPlan, error) { - src, err := transformToLogicalPlan(ctx, op.Source, false) + src, err := transformToLogicalPlan(ctx, op.Source) if err != nil { return nil, err } @@ -129,7 +213,7 @@ func transformDistinct(ctx *plancontext.PlanningContext, op *operators.Distinct) } func transformOrdering(ctx *plancontext.PlanningContext, op *operators.Ordering) (logicalPlan, error) { - plan, err := transformToLogicalPlan(ctx, op.Source, false) + plan, err := transformToLogicalPlan(ctx, op.Source) if err != nil { return nil, err } @@ -142,12 +226,8 @@ func createMemorySort(ctx *plancontext.PlanningContext, src logicalPlan, orderin TruncateColumnCount: ordering.ResultColumns, } ms := &memorySort{ - resultsBuilder: resultsBuilder{ - logicalPlanCommon: newBuilderCommon(src), - weightStrings: make(map[*resultColumn]int), - truncater: primitive, - }, - eMemorySort: primitive, + resultsBuilder: newResultsBuilder(src, primitive), + eMemorySort: primitive, } for idx, order := range ordering.Order { @@ -166,7 +246,7 @@ func createMemorySort(ctx *plancontext.PlanningContext, src logicalPlan, orderin } func transformProjection(ctx *plancontext.PlanningContext, op *operators.Projection) (logicalPlan, error) { - src, err := transformToLogicalPlan(ctx, op.Source, false) + src, err := transformToLogicalPlan(ctx, op.Source) if err != nil { return nil, err } @@ -174,50 +254,57 @@ func transformProjection(ctx *plancontext.PlanningContext, op *operators.Project if cols := op.AllOffsets(); cols != nil { // if all this op is doing is passing through columns from the input, we // can use the faster SimpleProjection - return useSimpleProjection(op, cols, src) + return useSimpleProjection(ctx, op, cols, src) } - expressions := slices2.Map(op.Projections, func(from operators.ProjExpr) sqlparser.Expr { - return from.GetExpr() - }) + ap, err := op.GetAliasedProjections() + if err != nil { + return nil, err + } - failed := false - evalengineExprs := slices2.Map(op.Projections, func(from operators.ProjExpr) evalengine.Expr { - switch e := from.(type) { - case operators.Eval: - return e.EExpr - case operators.Offset: - typ, col, _ := ctx.SemTable.TypeForExpr(e.Expr) - return evalengine.NewColumn(e.Offset, typ, col) - default: - failed = true - return nil + var exprs []sqlparser.Expr + var evalengineExprs []evalengine.Expr + var columnNames []string + for _, pe := range ap { + ee, err := getEvalEngingeExpr(ctx, pe) + if err != nil { + return nil, err } - }) - var primitive *engine.Projection - columnNames := slices2.Map(op.Columns, func(from *sqlparser.AliasedExpr) string { - return from.ColumnName() - }) + evalengineExprs = append(evalengineExprs, ee) + exprs = append(exprs, pe.EvalExpr) + columnNames = append(columnNames, pe.Original.ColumnName()) + } - if !failed { - primitive = &engine.Projection{ - Cols: columnNames, - Exprs: evalengineExprs, - } + primitive := &engine.Projection{ + Cols: columnNames, + Exprs: evalengineExprs, } return &projection{ source: src, columnNames: columnNames, - columns: expressions, + columns: exprs, primitive: primitive, }, nil } +func getEvalEngingeExpr(ctx *plancontext.PlanningContext, pe *operators.ProjExpr) (evalengine.Expr, error) { + switch e := pe.Info.(type) { + case *operators.EvalEngine: + return e.EExpr, nil + case operators.Offset: + typ, col, _ := ctx.SemTable.TypeForExpr(pe.EvalExpr) + return evalengine.NewColumn(int(e), typ, col), nil + default: + return nil, vterrors.VT13001("project not planned for: %s", pe.String()) + } + +} + // useSimpleProjection uses nothing at all if the output is already correct, // or SimpleProjection when we have to reorder or truncate the columns -func useSimpleProjection(op *operators.Projection, cols []int, src logicalPlan) (logicalPlan, error) { - columns, err := op.Source.GetColumns() +func useSimpleProjection(ctx *plancontext.PlanningContext, op *operators.Projection, cols []int, src logicalPlan) (logicalPlan, error) { + columns, err := op.Source.GetColumns(ctx) if err != nil { return nil, err } @@ -246,24 +333,16 @@ func elementsMatchIndices(in []int) bool { } func transformFilter(ctx *plancontext.PlanningContext, op *operators.Filter) (logicalPlan, error) { - plan, err := transformToLogicalPlan(ctx, op.Source, false) + plan, err := transformToLogicalPlan(ctx, op.Source) if err != nil { return nil, err } - predicate := op.FinalPredicate + predicate := op.PredicateWithOffsets ast := ctx.SemTable.AndExpressions(op.Predicates...) - // this might already have been done on the operators if predicate == nil { - predicate, err = evalengine.Translate(ast, &evalengine.Config{ - ResolveType: ctx.SemTable.TypeForExpr, - ResolveColumn: resolveFromPlan(ctx, plan, true), - Collation: ctx.SemTable.Collation, - }) - if err != nil { - return nil, err - } + panic("this should have already been done") } return &filter{ @@ -271,55 +350,17 @@ func transformFilter(ctx *plancontext.PlanningContext, op *operators.Filter) (lo efilter: &engine.Filter{ Predicate: predicate, ASTPredicate: ast, + Truncate: op.Truncate, }, }, nil } -func transformHorizon(ctx *plancontext.PlanningContext, op *operators.Horizon, isRoot bool) (logicalPlan, error) { - source, err := transformToLogicalPlan(ctx, op.Source, isRoot) - if err != nil { - return nil, err - } - switch node := op.Select.(type) { - case *sqlparser.Select: - hp := horizonPlanning{ - sel: node, - } - - replaceSubQuery(ctx, node) - plan, err := hp.planHorizon(ctx, source, true) - if err != nil { - return nil, err - } - return planLimit(node.Limit, plan) - case *sqlparser.Union: - var err error - rb, isRoute := source.(*routeGen4) - if !isRoute && ctx.SemTable.NotSingleRouteErr != nil { - return nil, ctx.SemTable.NotSingleRouteErr - } - var plan logicalPlan - if isRoute && rb.isSingleShard() { - err = planSingleRoutePlan(node, rb) - plan = rb - } else { - plan, err = planOrderByOnUnion(ctx, source, node) - } - if err != nil { - return nil, err - } - - return planLimit(node.Limit, plan) - } - return nil, vterrors.VT13001("only SELECT and UNION implement the SelectStatement interface") -} - func transformApplyJoinPlan(ctx *plancontext.PlanningContext, n *operators.ApplyJoin) (logicalPlan, error) { - lhs, err := transformToLogicalPlan(ctx, n.LHS, false) + lhs, err := transformToLogicalPlan(ctx, n.LHS) if err != nil { return nil, err } - rhs, err := transformToLogicalPlan(ctx, n.RHS, false) + rhs, err := transformToLogicalPlan(ctx, n.RHS) if err != nil { return nil, err } @@ -328,13 +369,12 @@ func transformApplyJoinPlan(ctx *plancontext.PlanningContext, n *operators.Apply opCode = engine.LeftJoin } - return &joinGen4{ - Left: lhs, - Right: rhs, - Cols: n.Columns, - Vars: n.Vars, - LHSColumns: n.LHSColumns, - Opcode: opCode, + return &join{ + Left: lhs, + Right: rhs, + Cols: n.Columns, + Vars: n.Vars, + Opcode: opCode, }, nil } @@ -371,20 +411,34 @@ func newRoutingParams(ctx *plancontext.PlanningContext, opCode engine.Opcode) *e } func transformRoutePlan(ctx *plancontext.PlanningContext, op *operators.Route) (logicalPlan, error) { - switch src := op.Source.(type) { - case *operators.Insert: - return transformInsertPlan(ctx, op, src) - case *operators.Update: - return transformUpdatePlan(ctx, op, src) - case *operators.Delete: - return transformDeletePlan(ctx, op, src) - } - condition := getVindexPredicate(ctx, op) - sel, err := operators.ToSQL(ctx, op.Source) + stmt, dmlOp, err := operators.ToSQL(ctx, op.Source) if err != nil { return nil, err } - replaceSubQuery(ctx, sel) + + if stmtWithComments, ok := stmt.(sqlparser.Commented); ok && op.Comments != nil { + stmtWithComments.SetComments(op.Comments.GetComments()) + } + + switch stmt := stmt.(type) { + case sqlparser.SelectStatement: + if op.Lock != sqlparser.NoLock { + stmt.SetLock(op.Lock) + } + return buildRouteLogicalPlan(ctx, op, stmt) + case *sqlparser.Update: + return buildUpdateLogicalPlan(ctx, op, dmlOp, stmt) + case *sqlparser.Delete: + return buildDeleteLogicalPlan(ctx, op, dmlOp) + case *sqlparser.Insert: + return buildInsertLogicalPlan(ctx, op, dmlOp, stmt) + default: + return nil, vterrors.VT13001(fmt.Sprintf("dont know how to %T", stmt)) + } +} + +func buildRouteLogicalPlan(ctx *plancontext.PlanningContext, op *operators.Route, stmt sqlparser.SelectStatement) (logicalPlan, error) { + condition := getVindexPredicate(op) eroute, err := routeToEngineRoute(ctx, op) for _, order := range op.Ordering { typ, collation, _ := ctx.SemTable.TypeForExpr(order.AST) @@ -399,20 +453,20 @@ func transformRoutePlan(ctx *plancontext.PlanningContext, op *operators.Route) ( if err != nil { return nil, err } - return &routeGen4{ + return &route{ eroute: eroute, - Select: sel, + Select: stmt, tables: operators.TableID(op), condition: condition, }, nil - } -func transformInsertPlan(ctx *plancontext.PlanningContext, op *operators.Route, ins *operators.Insert) (i *insert, err error) { +func buildInsertLogicalPlan(ctx *plancontext.PlanningContext, rb *operators.Route, op ops.Operator, stmt *sqlparser.Insert) (logicalPlan, error) { + ins := op.(*operators.Insert) eins := &engine.Insert{ - Opcode: mapToInsertOpCode(op.Routing.OpCode(), ins.Input != nil), - Keyspace: op.Routing.Keyspace(), - Table: ins.VTable, + Opcode: mapToInsertOpCode(rb.Routing.OpCode(), ins.Input != nil, ins.VTable.Pinned != nil), + Keyspace: rb.Routing.Keyspace(), + TableName: ins.VTable.Name.String(), Ignore: ins.Ignore, ForceNonStreaming: ins.ForceNonStreaming, Generate: autoIncGenerate(ins.AutoIncrement), @@ -420,32 +474,41 @@ func transformInsertPlan(ctx *plancontext.PlanningContext, op *operators.Route, VindexValues: ins.VindexValues, VindexValueOffset: ins.VindexValueOffset, } - i = &insert{eInsert: eins} + lp := &insert{eInsert: eins} // we would need to generate the query on the fly. The only exception here is // when unsharded query with autoincrement for that there is no input operator. if eins.Opcode != engine.InsertUnsharded || ins.Input != nil { eins.Prefix, eins.Mid, eins.Suffix = generateInsertShardedQuery(ins.AST) } + // pinned table + if ins.VTable.Pinned != nil { + eins.TargetDestination = key.DestinationKeyspaceID(ins.VTable.Pinned) + } if ins.Input == nil { - eins.Query = generateQuery(ins.AST) + eins.Query = generateQuery(stmt) } else { - i.source, err = transformToLogicalPlan(ctx, ins.Input, true) + newSrc, err := transformToLogicalPlan(ctx, ins.Input) if err != nil { - return + return nil, err } + lp.source = newSrc } - return + + return lp, nil } -func mapToInsertOpCode(code engine.Opcode, insertSelect bool) engine.InsertOpcode { +func mapToInsertOpCode(code engine.Opcode, insertSelect bool, pinnedTable bool) engine.InsertOpcode { if code == engine.Unsharded { return engine.InsertUnsharded } if insertSelect { return engine.InsertSelect } + if pinnedTable { + return engine.InsertByDestination + } return engine.InsertSharded } @@ -466,8 +529,8 @@ func autoIncGenerate(gen *operators.Generate) *engine.Generate { } } -func generateInsertShardedQuery(ins *sqlparser.Insert) (prefix string, mid []string, suffix string) { - valueTuples, isValues := ins.Rows.(sqlparser.Values) +func generateInsertShardedQuery(ins *sqlparser.Insert) (prefix string, mids sqlparser.Values, suffix string) { + mids, isValues := ins.Rows.(sqlparser.Values) prefixFormat := "insert %v%sinto %v%v " if isValues { // the mid values are filled differently @@ -484,19 +547,6 @@ func generateInsertShardedQuery(ins *sqlparser.Insert) (prefix string, mid []str suffixBuf := sqlparser.NewTrackedBuffer(dmlFormatter) suffixBuf.Myprintf("%v", ins.OnDup) suffix = suffixBuf.String() - - if !isValues { - // this is a insert query using select to insert the rows. - return - } - - midBuf := sqlparser.NewTrackedBuffer(dmlFormatter) - mid = make([]string, len(valueTuples)) - for rowNum, val := range valueTuples { - midBuf.Myprintf("%v", val) - mid[rowNum] = midBuf.String() - midBuf.Reset() - } return } @@ -510,24 +560,31 @@ func dmlFormatter(buf *sqlparser.TrackedBuffer, node sqlparser.SQLNode) { node.Format(buf) } -func transformUpdatePlan(ctx *plancontext.PlanningContext, op *operators.Route, upd *operators.Update) (logicalPlan, error) { - ast := upd.AST - replaceSubQuery(ctx, ast) - rp := newRoutingParams(ctx, op.Routing.OpCode()) - err := op.Routing.UpdateRoutingParams(ctx, rp) +func buildUpdateLogicalPlan( + ctx *plancontext.PlanningContext, + rb *operators.Route, + dmlOp ops.Operator, + stmt *sqlparser.Update, +) (logicalPlan, error) { + upd := dmlOp.(*operators.Update) + rp := newRoutingParams(ctx, rb.Routing.OpCode()) + err := rb.Routing.UpdateRoutingParams(ctx, rp) if err != nil { return nil, err } edml := &engine.DML{ - Query: generateQuery(ast), - Table: []*vindexes.Table{ - upd.VTable, - }, + Query: generateQuery(stmt), + TableNames: []string{upd.VTable.Name.String()}, + Vindexes: upd.VTable.ColumnVindexes, OwnedVindexQuery: upd.OwnedVindexQuery, RoutingParameters: rp, } - transformDMLPlan(upd.VTable, edml, op.Routing, len(upd.ChangedVindexValues) > 0) + if upd.VTable.Pinned != nil { + edml.TargetDestination = key.DestinationKeyspaceID(upd.VTable.Pinned) + } + + transformDMLPlan(upd.VTable, edml, rb.Routing, len(upd.ChangedVindexValues) > 0) e := &engine.Update{ ChangedVindexValues: upd.ChangedVindexValues, @@ -537,24 +594,30 @@ func transformUpdatePlan(ctx *plancontext.PlanningContext, op *operators.Route, return &primitiveWrapper{prim: e}, nil } -func transformDeletePlan(ctx *plancontext.PlanningContext, op *operators.Route, del *operators.Delete) (logicalPlan, error) { - ast := del.AST - replaceSubQuery(ctx, ast) - rp := newRoutingParams(ctx, op.Routing.OpCode()) - err := op.Routing.UpdateRoutingParams(ctx, rp) +func buildDeleteLogicalPlan( + ctx *plancontext.PlanningContext, + rb *operators.Route, + dmlOp ops.Operator, +) (logicalPlan, error) { + del := dmlOp.(*operators.Delete) + rp := newRoutingParams(ctx, rb.Routing.OpCode()) + err := rb.Routing.UpdateRoutingParams(ctx, rp) if err != nil { return nil, err } edml := &engine.DML{ - Query: generateQuery(ast), - Table: []*vindexes.Table{ - del.VTable, - }, + Query: generateQuery(del.AST), + TableNames: []string{del.VTable.Name.String()}, + Vindexes: del.VTable.Owned, OwnedVindexQuery: del.OwnedVindexQuery, RoutingParameters: rp, } - transformDMLPlan(del.VTable, edml, op.Routing, del.OwnedVindexQuery != "") + if del.VTable.Pinned != nil { + edml.TargetDestination = key.DestinationKeyspaceID(del.VTable.Pinned) + } + + transformDMLPlan(del.VTable, edml, rb.Routing, del.OwnedVindexQuery != "") e := &engine.Delete{ DML: edml, @@ -571,21 +634,7 @@ func transformDMLPlan(vtable *vindexes.Table, edml *engine.DML, routing operator } } -func replaceSubQuery(ctx *plancontext.PlanningContext, sel sqlparser.Statement) { - extractedSubqueries := ctx.SemTable.GetSubqueryNeedingRewrite() - if len(extractedSubqueries) == 0 { - return - } - sqr := &subQReplacer{subqueryToReplace: extractedSubqueries} - sqlparser.SafeRewrite(sel, nil, sqr.replacer) - for sqr.replaced { - // to handle subqueries inside subqueries, we need to do this again and again until no replacements are left - sqr.replaced = false - sqlparser.SafeRewrite(sel, nil, sqr.replacer) - } -} - -func getVindexPredicate(ctx *plancontext.PlanningContext, op *operators.Route) sqlparser.Expr { +func getVindexPredicate(op *operators.Route) sqlparser.Expr { tr, ok := op.Routing.(*operators.ShardedRouting) if !ok || tr.Selected == nil { return nil @@ -612,12 +661,6 @@ func getVindexPredicate(ctx *plancontext.PlanningContext, op *operators.Route) s argName = engine.ListVarName } - if subq, isSubq := cmp.Right.(*sqlparser.Subquery); isSubq { - extractedSubquery := ctx.SemTable.FindSubqueryReference(subq) - if extractedSubquery != nil { - extractedSubquery.SetArgName(argName) - } - } cmp.Right = sqlparser.ListArg(argName) } return condition @@ -649,440 +692,33 @@ func getAllTableNames(op *operators.Route) ([]string, error) { return tableNames, nil } -func transformUnionPlan(ctx *plancontext.PlanningContext, op *operators.Union, isRoot bool) (logicalPlan, error) { - var sources []logicalPlan - var err error - if op.Distinct { - sources, err = transformAndMerge(ctx, op) - if err != nil { - return nil, err - } - for _, source := range sources { - pushDistinct(source) - } - } else { - sources, err = transformAndMergeInOrder(ctx, op) - if err != nil { - return nil, err - } - } - var result logicalPlan - if len(sources) == 1 { - src := sources[0] - if rb, isRoute := src.(*routeGen4); isRoute && rb.isSingleShard() { - // if we have a single shard route, we don't need to do anything to make it distinct - // TODO - // rb.Select.SetLimit(op.limit) - // rb.Select.SetOrderBy(op.ordering) - return src, nil - } - result = src - } else { - if len(op.Ordering) > 0 { - return nil, vterrors.VT12001("ORDER BY on top of UNION") - } - result = &concatenateGen4{sources: sources} - } - if op.Distinct { - colls := getCollationsFor(ctx, op) - checkCols, err := getCheckColsForUnion(ctx, result, colls) - if err != nil { - return nil, err - } - return newDistinctGen4Legacy(result, checkCols, isRoot), nil - } - return result, nil - -} - -func getWeightStringForSelectExpr(selectExpr sqlparser.SelectExpr) (*sqlparser.AliasedExpr, error) { - expr, isAliased := selectExpr.(*sqlparser.AliasedExpr) - if !isAliased { - return nil, vterrors.VT12001("get weight string expression for non-aliased expression") - } - return &sqlparser.AliasedExpr{Expr: weightStringFor(expr.Expr)}, nil -} - -func getCheckColsForUnion(ctx *plancontext.PlanningContext, result logicalPlan, colls []collationInfo) ([]engine.CheckCol, error) { - checkCols := make([]engine.CheckCol, 0, len(colls)) - for i, coll := range colls { - checkCol := engine.CheckCol{Col: i, Type: coll.typ, Collation: coll.col} - if coll.typ >= 0 { - checkCols = append(checkCols, checkCol) - continue - } - // We might need a weight string - let's push one - // `might` because we just don't know what type we are dealing with. - // If we encounter a numerical value, we don't need any weight_string values - newOffset, err := pushWeightStringForDistinct(ctx, result, i) - if err != nil { - return nil, err - } - checkCol.WsCol = &newOffset - checkCols = append(checkCols, checkCol) - } - return checkCols, nil -} - -// pushWeightStringForDistinct adds a weight_string projection -func pushWeightStringForDistinct(ctx *plancontext.PlanningContext, plan logicalPlan, offset int) (newOffset int, err error) { - switch node := plan.(type) { - case *routeGen4: - allSelects := sqlparser.GetAllSelects(node.Select) - for _, sel := range allSelects { - expr, err := getWeightStringForSelectExpr(sel.SelectExprs[offset]) - if err != nil { - return 0, err - } - if i := checkIfAlreadyExists(expr, sel, ctx.SemTable); i != -1 { - return i, nil - } - sel.SelectExprs = append(sel.SelectExprs, expr) - newOffset = len(sel.SelectExprs) - 1 - } - // we leave the responsibility of truncating to distinct - node.eroute.TruncateColumnCount = 0 - case *concatenateGen4: - for _, source := range node.sources { - newOffset, err = pushWeightStringForDistinct(ctx, source, offset) - if err != nil { - return 0, err - } - } - node.noNeedToTypeCheck = append(node.noNeedToTypeCheck, newOffset) - case *joinGen4: - joinOffset := node.Cols[offset] - switch { - case joinOffset < 0: - offset, err = pushWeightStringForDistinct(ctx, node.Left, -(joinOffset + 1)) - offset = -(offset + 1) - case joinOffset > 0: - offset, err = pushWeightStringForDistinct(ctx, node.Right, joinOffset-1) - offset = offset + 1 - default: - return 0, vterrors.VT13001("wrong column offset in join plan to push DISTINCT WEIGHT_STRING") - } - if err != nil { - return 0, err - } - newOffset = len(node.Cols) - node.Cols = append(node.Cols, offset) - default: - return 0, vterrors.VT13001(fmt.Sprintf("pushWeightStringForDistinct on %T", plan)) - } - return -} - -func transformAndMerge(ctx *plancontext.PlanningContext, op *operators.Union) (sources []logicalPlan, err error) { - for _, source := range op.Sources { - // first we go over all the operator inputs and turn them into logical plans, - // including horizon planning - plan, err := transformToLogicalPlan(ctx, source, false) +func transformUnionPlan(ctx *plancontext.PlanningContext, op *operators.Union) (logicalPlan, error) { + sources, err := slice.MapWithError(op.Sources, func(src ops.Operator) (logicalPlan, error) { + plan, err := transformToLogicalPlan(ctx, src) if err != nil { return nil, err } - sources = append(sources, plan) - } - - // next we'll go over all the plans from and check if any two can be merged. if they can, they are merged, - // and we continue checking for pairs of plans that can be merged into a single route - idx := 0 - for idx < len(sources) { - keep := make([]bool, len(sources)) - srcA := sources[idx] - merged := false - for j, srcB := range sources { - if j <= idx { - continue - } - newPlan := mergeUnionLogicalPlans(ctx, srcA, srcB) - if newPlan != nil { - sources[idx] = newPlan - srcA = newPlan - merged = true - } else { - keep[j] = true - } - } - if !merged { - return sources, nil - } - var phase []logicalPlan - for i, source := range sources { - if keep[i] || i <= idx { - phase = append(phase, source) - } - } - idx++ - sources = phase - } - return sources, nil -} - -func transformAndMergeInOrder(ctx *plancontext.PlanningContext, op *operators.Union) (sources []logicalPlan, err error) { - // We go over all the input operators and turn them into logical plans - for i, source := range op.Sources { - plan, err := transformToLogicalPlan(ctx, source, false) - if err != nil { - return nil, err - } - if i == 0 { - sources = append(sources, plan) - continue - } - - // next we check if the last plan we produced can be merged with this new plan - last := sources[len(sources)-1] - newPlan := mergeUnionLogicalPlans(ctx, last, plan) - if newPlan != nil { - // if we could merge them, let's replace the last plan with this new merged one - sources[len(sources)-1] = newPlan - continue - } - // else we just add the new plan to the end of list - sources = append(sources, plan) - } - return sources, nil -} - -type collationInfo struct { - typ sqltypes.Type - col collations.ID -} - -func getCollationsFor(ctx *plancontext.PlanningContext, n *operators.Union) []collationInfo { - // TODO: coerce selects' select expressions' collations - var colls []collationInfo - - sel, err := n.GetSelectFor(0) - if err != nil { - return nil - } - for _, expr := range sel.SelectExprs { - aliasedE, ok := expr.(*sqlparser.AliasedExpr) - if !ok { - return nil - } - typ, col, _ := ctx.SemTable.TypeForExpr(aliasedE.Expr) - colls = append(colls, collationInfo{typ: typ, col: col}) - } - return colls -} - -func transformDerivedPlan(ctx *plancontext.PlanningContext, op *operators.Derived) (logicalPlan, error) { - // transforming the inner part of the derived table into a logical plan - // so that we can do horizon planning on the inner. If the logical plan - // we've produced is a Route, we set its Select.From field to be an aliased - // expression containing our derived table's inner select and the derived - // table's alias. - - plan, err := transformToLogicalPlan(ctx, op.Source, false) + return plan, nil + }) if err != nil { return nil, err } - plan, err = planHorizon(ctx, plan, op.Query, false) - if err != nil { - return nil, err + if len(sources) == 1 { + return sources[0], nil } + return &concatenate{ + sources: sources, + noNeedToTypeCheck: nil, + }, nil - rb, isRoute := plan.(*routeGen4) - if !isRoute { - return &simpleProjection{ - logicalPlanCommon: newBuilderCommon(plan), - eSimpleProj: &engine.SimpleProjection{ - Cols: op.ColumnsOffset, - }, - }, nil - } - innerSelect := rb.Select - derivedTable := &sqlparser.DerivedTable{Select: innerSelect} - tblExpr := &sqlparser.AliasedTableExpr{ - Expr: derivedTable, - As: sqlparser.NewIdentifierCS(op.Alias), - Columns: op.ColumnAliases, - } - selectExprs := sqlparser.SelectExprs{} - for _, colName := range op.Columns { - selectExprs = append(selectExprs, &sqlparser.AliasedExpr{ - Expr: colName, - }) - } - rb.Select = &sqlparser.Select{ - From: []sqlparser.TableExpr{tblExpr}, - SelectExprs: selectExprs, - } - return plan, nil } func transformLimit(ctx *plancontext.PlanningContext, op *operators.Limit) (logicalPlan, error) { - plan, err := transformToLogicalPlan(ctx, op.Source, false) + plan, err := transformToLogicalPlan(ctx, op.Source) if err != nil { return nil, err } return createLimit(plan, op.AST) } - -type subQReplacer struct { - subqueryToReplace []*sqlparser.ExtractedSubquery - replaced bool -} - -func (sqr *subQReplacer) replacer(cursor *sqlparser.Cursor) bool { - ext, ok := cursor.Node().(*sqlparser.ExtractedSubquery) - if !ok { - return true - } - for _, replaceByExpr := range sqr.subqueryToReplace { - // we are comparing the ArgNames in case the expressions have been cloned - if ext.GetArgName() == replaceByExpr.GetArgName() { - cursor.Replace(ext.Original) - sqr.replaced = true - return true - } - } - return true -} - -func pushDistinct(plan logicalPlan) { - switch n := plan.(type) { - case *routeGen4: - n.Select.MakeDistinct() - case *concatenateGen4: - for _, source := range n.sources { - pushDistinct(source) - } - } -} - -func mergeUnionLogicalPlans(ctx *plancontext.PlanningContext, left logicalPlan, right logicalPlan) logicalPlan { - lroute, ok := left.(*routeGen4) - if !ok { - return nil - } - rroute, ok := right.(*routeGen4) - if !ok { - return nil - } - - if canMergeUnionPlans(ctx, lroute, rroute) { - lroute.Select = &sqlparser.Union{Left: lroute.Select, Distinct: false, Right: rroute.Select} - return mergeSystemTableInformation(lroute, rroute) - } - return nil -} - -func canMergeUnionPlans(ctx *plancontext.PlanningContext, a, b *routeGen4) bool { - // this method should be close to tryMerge below. it does the same thing, but on logicalPlans instead of queryTrees - if a.eroute.Keyspace.Name != b.eroute.Keyspace.Name { - return false - } - switch a.eroute.Opcode { - case engine.Unsharded, engine.Reference: - return a.eroute.Opcode == b.eroute.Opcode - case engine.DBA: - return canSelectDBAMerge(a, b) - case engine.EqualUnique: - // Check if they target the same shard. - if b.eroute.Opcode == engine.EqualUnique && - a.eroute.Vindex == b.eroute.Vindex && - a.condition != nil && - b.condition != nil && - gen4ValuesEqual(ctx, []sqlparser.Expr{a.condition}, []sqlparser.Expr{b.condition}) { - return true - } - case engine.Scatter: - return b.eroute.Opcode == engine.Scatter - case engine.Next: - return false - } - return false -} - -func canSelectDBAMerge(a, b *routeGen4) bool { - if a.eroute.Opcode != engine.DBA { - return false - } - if b.eroute.Opcode != engine.DBA { - return false - } - - // safe to merge when any 1 table name or schema matches, since either the routing will match or either side would be throwing an error - // during run-time which we want to preserve. For example outer side has User in sys table schema and inner side has User and Main in sys table schema - // Inner might end up throwing an error at runtime, but if it doesn't then it is safe to merge. - for _, aExpr := range a.eroute.SysTableTableSchema { - for _, bExpr := range b.eroute.SysTableTableSchema { - if evalengine.FormatExpr(aExpr) == evalengine.FormatExpr(bExpr) { - return true - } - } - } - for _, aExpr := range a.eroute.SysTableTableName { - for _, bExpr := range b.eroute.SysTableTableName { - if evalengine.FormatExpr(aExpr) == evalengine.FormatExpr(bExpr) { - return true - } - } - } - - // if either/both of the side does not have any routing information, then they can be merged. - return (len(a.eroute.SysTableTableSchema) == 0 && len(a.eroute.SysTableTableName) == 0) || - (len(b.eroute.SysTableTableSchema) == 0 && len(b.eroute.SysTableTableName) == 0) -} - -func gen4ValuesEqual(ctx *plancontext.PlanningContext, a, b []sqlparser.Expr) bool { - if len(a) != len(b) { - return false - } - - // TODO: check SemTable's columnEqualities for better plan - - for i, aExpr := range a { - bExpr := b[i] - if !gen4ValEqual(ctx, aExpr, bExpr) { - return false - } - } - return true -} - -func gen4ValEqual(ctx *plancontext.PlanningContext, a, b sqlparser.Expr) bool { - switch a := a.(type) { - case *sqlparser.ColName: - if b, ok := b.(*sqlparser.ColName); ok { - if !a.Name.Equal(b.Name) { - return false - } - - return ctx.SemTable.DirectDeps(a) == ctx.SemTable.DirectDeps(b) - } - case *sqlparser.Argument: - b, ok := b.(*sqlparser.Argument) - if !ok { - return false - } - return a.Name == b.Name - case *sqlparser.Literal: - b, ok := b.(*sqlparser.Literal) - if !ok { - return false - } - switch a.Type { - case sqlparser.StrVal: - switch b.Type { - case sqlparser.StrVal: - return a.Val == b.Val - case sqlparser.HexVal: - return hexEqual(b, a) - } - case sqlparser.HexVal: - return hexEqual(a, b) - case sqlparser.IntVal: - if b.Type == (sqlparser.IntVal) { - return a.Val == b.Val - } - } - } - return false -} diff --git a/go/vt/vtgate/planbuilder/operators/SQL_builder.go b/go/vt/vtgate/planbuilder/operators/SQL_builder.go index 07fa5fbbd9d..9802e374d87 100644 --- a/go/vt/vtgate/planbuilder/operators/SQL_builder.go +++ b/go/vt/vtgate/planbuilder/operators/SQL_builder.go @@ -18,8 +18,8 @@ package operators import ( "fmt" + "slices" "sort" - "strings" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" @@ -30,20 +30,27 @@ import ( type ( queryBuilder struct { - ctx *plancontext.PlanningContext - sel sqlparser.SelectStatement - tableNames []string + ctx *plancontext.PlanningContext + stmt sqlparser.Statement + tableNames []string + dmlOperator ops.Operator } ) -func ToSQL(ctx *plancontext.PlanningContext, op ops.Operator) (sqlparser.SelectStatement, error) { +func (qb *queryBuilder) asSelectStatement() sqlparser.SelectStatement { + return qb.stmt.(sqlparser.SelectStatement) +} + +func ToSQL(ctx *plancontext.PlanningContext, op ops.Operator) (sqlparser.Statement, ops.Operator, error) { q := &queryBuilder{ctx: ctx} err := buildQuery(op, q) if err != nil { - return nil, err + return nil, nil, err + } + if ctx.SemTable != nil { + q.sortTables() } - q.sortTables() - return q.sel, nil + return q.stmt, q.dmlOperator, nil } func (qb *queryBuilder) addTable(db, tableName, alias string, tableID semantics.TableSet, hints sqlparser.IndexHints) { @@ -61,10 +68,10 @@ func (qb *queryBuilder) addTableExpr( hints sqlparser.IndexHints, columnAliases sqlparser.Columns, ) { - if qb.sel == nil { - qb.sel = &sqlparser.Select{} + if qb.stmt == nil { + qb.stmt = &sqlparser.Select{} } - sel := qb.sel.(*sqlparser.Select) + sel := qb.stmt.(*sqlparser.Select) elems := &sqlparser.AliasedTableExpr{ Expr: tblExpr, Partitions: nil, @@ -74,7 +81,7 @@ func (qb *queryBuilder) addTableExpr( } qb.ctx.SemTable.ReplaceTableSetFor(tableID, elems) sel.From = append(sel.From, elems) - qb.sel = sel + qb.stmt = sel qb.tableNames = append(qb.tableNames, tableName) } @@ -85,38 +92,118 @@ func (qb *queryBuilder) addPredicate(expr sqlparser.Expr) { return } - sel := qb.sel.(*sqlparser.Select) - _, isSubQuery := expr.(*sqlparser.ExtractedSubquery) var addPred func(sqlparser.Expr) - if sqlparser.ContainsAggregation(expr) && !isSubQuery { - addPred = sel.AddHaving - } else { - addPred = sel.AddWhere + switch stmt := qb.stmt.(type) { + case *sqlparser.Select: + if containsAggr(expr) { + addPred = stmt.AddHaving + } else { + addPred = stmt.AddWhere + } + case *sqlparser.Update: + addPred = stmt.AddWhere + case *sqlparser.Delete: + addPred = stmt.AddWhere + default: + panic(fmt.Sprintf("cant add WHERE to %T", qb.stmt)) } + for _, exp := range sqlparser.SplitAndExpression(nil, expr) { addPred(exp) } } func (qb *queryBuilder) addGroupBy(original sqlparser.Expr) { - sel := qb.sel.(*sqlparser.Select) + sel := qb.stmt.(*sqlparser.Select) sel.GroupBy = append(sel.GroupBy, original) } -func (qb *queryBuilder) addProjection(projection *sqlparser.AliasedExpr) { - sel := qb.sel.(*sqlparser.Select) - sel.SelectExprs = append(sel.SelectExprs, projection) +func (qb *queryBuilder) addProjection(projection sqlparser.SelectExpr) error { + switch stmt := qb.stmt.(type) { + case *sqlparser.Select: + stmt.SelectExprs = append(stmt.SelectExprs, projection) + return nil + case *sqlparser.Union: + if ae, ok := projection.(*sqlparser.AliasedExpr); ok { + if col, ok := ae.Expr.(*sqlparser.ColName); ok { + return checkUnionColumnByName(col, stmt) + } + } + + qb.pushUnionInsideDerived() + return qb.addProjection(projection) + } + return vterrors.VT13001(fmt.Sprintf("unknown select statement type: %T", qb.stmt)) +} + +func (qb *queryBuilder) pushUnionInsideDerived() { + selStmt := qb.asSelectStatement() + dt := &sqlparser.DerivedTable{ + Lateral: false, + Select: selStmt, + } + sel := &sqlparser.Select{ + From: []sqlparser.TableExpr{&sqlparser.AliasedTableExpr{ + Expr: dt, + As: sqlparser.NewIdentifierCS("dt"), + }}, + } + sel.SelectExprs = unionSelects(sqlparser.GetFirstSelect(selStmt).SelectExprs) + qb.stmt = sel +} + +func unionSelects(exprs sqlparser.SelectExprs) (selectExprs sqlparser.SelectExprs) { + for _, col := range exprs { + switch col := col.(type) { + case *sqlparser.AliasedExpr: + expr := sqlparser.NewColName(col.ColumnName()) + selectExprs = append(selectExprs, &sqlparser.AliasedExpr{Expr: expr}) + default: + selectExprs = append(selectExprs, col) + } + } + return +} + +func checkUnionColumnByName(column *sqlparser.ColName, sel sqlparser.SelectStatement) error { + colName := column.Name.String() + exprs := sqlparser.GetFirstSelect(sel).SelectExprs + offset := slices.IndexFunc(exprs, func(expr sqlparser.SelectExpr) bool { + switch ae := expr.(type) { + case *sqlparser.StarExpr: + return true + case *sqlparser.AliasedExpr: + // When accessing columns on top of a UNION, we fall back to this simple strategy of string comparisons + return ae.ColumnName() == colName + } + return false + }) + if offset == -1 { + return vterrors.VT12001(fmt.Sprintf("did not find column [%s] on UNION", sqlparser.String(column))) + } + return nil } func (qb *queryBuilder) clearProjections() { - sel := qb.sel.(*sqlparser.Select) + sel, isSel := qb.stmt.(*sqlparser.Select) + if !isSel { + return + } sel.SelectExprs = nil } +func (qb *queryBuilder) unionWith(other *queryBuilder, distinct bool) { + qb.stmt = &sqlparser.Union{ + Left: qb.asSelectStatement(), + Right: other.asSelectStatement(), + Distinct: distinct, + } +} + func (qb *queryBuilder) joinInnerWith(other *queryBuilder, onCondition sqlparser.Expr) { - sel := qb.sel.(*sqlparser.Select) - otherSel := other.sel.(*sqlparser.Select) + sel := qb.stmt.(*sqlparser.Select) + otherSel := other.stmt.(*sqlparser.Select) sel.From = append(sel.From, otherSel.From...) sel.SelectExprs = append(sel.SelectExprs, otherSel.SelectExprs...) @@ -137,8 +224,8 @@ func (qb *queryBuilder) joinInnerWith(other *queryBuilder, onCondition sqlparser } func (qb *queryBuilder) joinOuterWith(other *queryBuilder, onCondition sqlparser.Expr) { - sel := qb.sel.(*sqlparser.Select) - otherSel := other.sel.(*sqlparser.Select) + sel := qb.stmt.(*sqlparser.Select) + otherSel := other.stmt.(*sqlparser.Select) var lhs sqlparser.TableExpr if len(sel.From) == 1 { lhs = sel.From[0] @@ -173,31 +260,6 @@ func (qb *queryBuilder) joinOuterWith(other *queryBuilder, onCondition sqlparser } } -func (qb *queryBuilder) rewriteExprForDerivedTable(expr sqlparser.Expr, dtName string) { - _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { - col, ok := node.(*sqlparser.ColName) - if !ok { - return true, nil - } - hasTable := qb.hasTable(col.Qualifier.Name.String()) - if hasTable { - col.Qualifier = sqlparser.TableName{ - Name: sqlparser.NewIdentifierCS(dtName), - } - } - return true, nil - }, expr) -} - -func (qb *queryBuilder) hasTable(tableName string) bool { - for _, name := range qb.tableNames { - if strings.EqualFold(tableName, name) { - return true - } - } - return false -} - func (qb *queryBuilder) sortTables() { _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { sel, isSel := node.(*sqlparser.Select) @@ -210,7 +272,7 @@ func (qb *queryBuilder) sortTables() { } sort.Sort(ts) return true, nil - }, qb.sel) + }, qb.stmt) } @@ -245,20 +307,6 @@ func (ts *tableSorter) Swap(i, j int) { ts.sel.From[i], ts.sel.From[j] = ts.sel.From[j], ts.sel.From[i] } -func (h *Horizon) toSQL(qb *queryBuilder) error { - err := stripDownQuery(h.Select, qb.sel) - if err != nil { - return err - } - _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { - if aliasedExpr, ok := node.(sqlparser.SelectExpr); ok { - removeKeyspaceFromSelectExpr(aliasedExpr) - } - return true, nil - }, qb.sel) - return nil -} - func removeKeyspaceFromSelectExpr(expr sqlparser.SelectExpr) { switch expr := expr.(type) { case *sqlparser.AliasedExpr: @@ -318,9 +366,10 @@ func buildQuery(op ops.Operator, qb *queryBuilder) error { return buildApplyJoin(op, qb) case *Filter: return buildFilter(op, qb) - case *Derived: - return buildDerived(op, qb) case *Horizon: + if op.TableId != nil { + return buildDerived(op, qb) + } return buildHorizon(op, qb) case *Limit: return buildLimit(op, qb) @@ -328,12 +377,65 @@ func buildQuery(op ops.Operator, qb *queryBuilder) error { return buildOrdering(op, qb) case *Aggregator: return buildAggregation(op, qb) + case *Union: + return buildUnion(op, qb) + case *Distinct: + err := buildQuery(op.Source, qb) + if err != nil { + return err + } + qb.asSelectStatement().MakeDistinct() + case *Update: + buildUpdate(op, qb) + case *Delete: + buildDML(op, qb) + case *Insert: + buildDML(op, qb) default: - return vterrors.VT13001(fmt.Sprintf("do not know how to turn %T into SQL", op)) + return vterrors.VT13001(fmt.Sprintf("unknown operator to convert to SQL: %T", op)) } return nil } +func buildUpdate(op *Update, qb *queryBuilder) { + tblName := sqlparser.NewTableName(op.QTable.Table.Name.String()) + aTblExpr := &sqlparser.AliasedTableExpr{ + Expr: tblName, + As: op.QTable.Alias.As, + } + updExprs := make(sqlparser.UpdateExprs, 0, len(op.Assignments)) + for _, se := range op.Assignments { + updExprs = append(updExprs, &sqlparser.UpdateExpr{ + Name: se.Name, + Expr: se.Expr.EvalExpr, + }) + } + + qb.stmt = &sqlparser.Update{ + Ignore: op.Ignore, + TableExprs: sqlparser.TableExprs{aTblExpr}, + Exprs: updExprs, + OrderBy: op.OrderBy, + Limit: op.Limit, + } + + for _, pred := range op.QTable.Predicates { + qb.addPredicate(pred) + } + + qb.dmlOperator = op +} + +type OpWithAST interface { + ops.Operator + Statement() sqlparser.Statement +} + +func buildDML(op OpWithAST, qb *queryBuilder) { + qb.stmt = op.Statement() + qb.dmlOperator = op +} + func buildAggregation(op *Aggregator, qb *queryBuilder) error { err := buildQuery(op.Source, qb) if err != nil { @@ -342,12 +444,15 @@ func buildAggregation(op *Aggregator, qb *queryBuilder) error { qb.clearProjections() - cols, err := op.GetColumns() + cols, err := op.GetColumns(qb.ctx) if err != nil { return err } for _, column := range cols { - qb.addProjection(column) + err := qb.addProjection(column) + if err != nil { + return err + } } for _, by := range op.Grouping { @@ -368,7 +473,7 @@ func buildOrdering(op *Ordering, qb *queryBuilder) error { } for _, order := range op.Order { - qb.sel.AddOrder(order.Inner) + qb.asSelectStatement().AddOrder(order.Inner) } return nil } @@ -378,7 +483,7 @@ func buildLimit(op *Limit, qb *queryBuilder) error { if err != nil { return err } - qb.sel.SetLimit(op.AST) + qb.asSelectStatement().SetLimit(op.AST) return nil } @@ -393,7 +498,10 @@ func buildTable(op *Table, qb *queryBuilder) { qb.addPredicate(pred) } for _, name := range op.Columns { - qb.addProjection(&sqlparser.AliasedExpr{Expr: name}) + err := qb.addProjection(&sqlparser.AliasedExpr{Expr: name}) + if err != nil { + return + } } } @@ -403,20 +511,42 @@ func buildProjection(op *Projection, qb *queryBuilder) error { return err } - qb.clearProjections() - - for _, column := range op.Columns { - qb.addProjection(column) + _, isSel := qb.stmt.(*sqlparser.Select) + if isSel { + qb.clearProjections() + cols, err := op.GetSelectExprs(qb.ctx) + if err != nil { + return err + } + for _, column := range cols { + err := qb.addProjection(column) + if err != nil { + return err + } + } } // if the projection is on derived table, we use the select we have // created above and transform it into a derived table - if op.TableID != nil { - sel := qb.sel.(*sqlparser.Select) - qb.sel = nil - qb.addTableExpr(op.Alias, op.Alias, TableID(op), &sqlparser.DerivedTable{ + if op.DT != nil { + sel := qb.asSelectStatement() + qb.stmt = nil + qb.addTableExpr(op.DT.Alias, op.DT.Alias, TableID(op), &sqlparser.DerivedTable{ Select: sel, - }, nil, nil) + }, nil, op.DT.Columns) + } + + if !isSel { + cols, err := op.GetSelectExprs(qb.ctx) + if err != nil { + return err + } + for _, column := range cols { + err := qb.addProjection(column) + if err != nil { + return err + } + } } return nil @@ -446,6 +576,30 @@ func buildApplyJoin(op *ApplyJoin, qb *queryBuilder) error { return nil } +func buildUnion(op *Union, qb *queryBuilder) error { + // the first input is built first + err := buildQuery(op.Sources[0], qb) + if err != nil { + return err + } + + for i, src := range op.Sources { + if i == 0 { + continue + } + + // now we can go over the remaining inputs and UNION them together + qbOther := &queryBuilder{ctx: qb.ctx} + err = buildQuery(src, qbOther) + if err != nil { + return err + } + qb.unionWith(qbOther, op.distinct) + } + + return nil +} + func buildFilter(op *Filter, qb *queryBuilder) error { err := buildQuery(op.Source, qb) if err != nil { @@ -457,15 +611,46 @@ func buildFilter(op *Filter, qb *queryBuilder) error { return nil } -func buildDerived(op *Derived, qb *queryBuilder) error { +func buildDerived(op *Horizon, qb *queryBuilder) error { err := buildQuery(op.Source, qb) if err != nil { return err } - sel := qb.sel.(*sqlparser.Select) // we can only handle SELECT in derived tables at the moment - qb.sel = nil sqlparser.RemoveKeyspace(op.Query) - opQuery := op.Query.(*sqlparser.Select) + + stmt := qb.stmt + qb.stmt = nil + switch sel := stmt.(type) { + case *sqlparser.Select: + return buildDerivedSelect(op, qb, sel) + case *sqlparser.Union: + return buildDerivedUnion(op, qb, sel) + } + panic(fmt.Sprintf("unknown select statement type: %T", stmt)) +} + +func buildDerivedUnion(op *Horizon, qb *queryBuilder, union *sqlparser.Union) error { + opQuery, ok := op.Query.(*sqlparser.Union) + if !ok { + return vterrors.VT12001("Horizon contained SELECT but statement was UNION") + } + + union.Limit = opQuery.Limit + union.OrderBy = opQuery.OrderBy + union.Distinct = opQuery.Distinct + + qb.addTableExpr(op.Alias, op.Alias, TableID(op), &sqlparser.DerivedTable{ + Select: union, + }, nil, op.ColumnAliases) + + return nil +} + +func buildDerivedSelect(op *Horizon, qb *queryBuilder, sel *sqlparser.Select) error { + opQuery, ok := op.Query.(*sqlparser.Select) + if !ok { + return vterrors.VT12001("Horizon contained UNION but statement was SELECT") + } sel.Limit = opQuery.Limit sel.OrderBy = opQuery.OrderBy sel.GroupBy = opQuery.GroupBy @@ -475,9 +660,13 @@ func buildDerived(op *Derived, qb *queryBuilder) error { Select: sel, }, nil, op.ColumnAliases) for _, col := range op.Columns { - qb.addProjection(&sqlparser.AliasedExpr{Expr: col}) + err := qb.addProjection(&sqlparser.AliasedExpr{Expr: col}) + if err != nil { + return err + } } return nil + } func buildHorizon(op *Horizon, qb *queryBuilder) error { @@ -486,7 +675,7 @@ func buildHorizon(op *Horizon, qb *queryBuilder) error { return err } - err = stripDownQuery(op.Select, qb.sel) + err = stripDownQuery(op.Query, qb.asSelectStatement()) if err != nil { return err } @@ -495,7 +684,7 @@ func buildHorizon(op *Horizon, qb *queryBuilder) error { removeKeyspaceFromSelectExpr(aliasedExpr) } return true, nil - }, qb.sel) + }, qb.stmt) return nil } diff --git a/go/vt/vtgate/planbuilder/operators/aggregation_pushing.go b/go/vt/vtgate/planbuilder/operators/aggregation_pushing.go index 22cb21426dd..c7fc8c790a7 100644 --- a/go/vt/vtgate/planbuilder/operators/aggregation_pushing.go +++ b/go/vt/vtgate/planbuilder/operators/aggregation_pushing.go @@ -17,7 +17,9 @@ limitations under the License. package operators import ( + "errors" "fmt" + "slices" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" @@ -28,43 +30,110 @@ import ( "vitess.io/vitess/go/vt/vtgate/semantics" ) -func tryPushingDownAggregator(ctx *plancontext.PlanningContext, aggregator *Aggregator) (output ops.Operator, applyResult *rewrite.ApplyResult, err error) { +func tryPushAggregator(ctx *plancontext.PlanningContext, aggregator *Aggregator) (output ops.Operator, applyResult *rewrite.ApplyResult, err error) { if aggregator.Pushed { return aggregator, rewrite.SameTree, nil } - aggregator.Pushed = true switch src := aggregator.Source.(type) { case *Route: - output, applyResult, err = pushDownAggregationThroughRoute(ctx, aggregator, src) + // if we have a single sharded route, we can push it down + output, applyResult, err = pushAggregationThroughRoute(ctx, aggregator, src) case *ApplyJoin: - output, applyResult, err = pushDownAggregationThroughJoin(ctx, aggregator, src) + if reachedPhase(ctx, delegateAggregation) { + output, applyResult, err = pushAggregationThroughJoin(ctx, aggregator, src) + } case *Filter: - output, applyResult, err = pushDownAggregationThroughFilter(ctx, aggregator, src) + if reachedPhase(ctx, delegateAggregation) { + output, applyResult, err = pushAggregationThroughFilter(ctx, aggregator, src) + } + case *SubQueryContainer: + if reachedPhase(ctx, delegateAggregation) { + output, applyResult, err = pushAggregationThroughSubquery(ctx, aggregator, src) + } default: return aggregator, rewrite.SameTree, nil } - if applyResult != rewrite.SameTree && aggregator.Original { - aggregator.aggregateTheAggregates() + if err != nil { + return nil, nil, err + } + + if output == nil { + return aggregator, rewrite.SameTree, nil } + aggregator.Pushed = true + return } -func (a *Aggregator) aggregateTheAggregates() { - for i, aggr := range a.Aggregations { - // Handle different aggregation operations when pushing down through a sharded route. - switch aggr.OpCode { - case opcode.AggregateCount, opcode.AggregateCountStar, opcode.AggregateCountDistinct: - // All count variations turn into SUM above the Route. - // Think of it as we are SUMming together a bunch of distributed COUNTs. - aggr.OriginalOpCode, aggr.OpCode = aggr.OpCode, opcode.AggregateSum - a.Aggregations[i] = aggr +func reachedPhase(ctx *plancontext.PlanningContext, p Phase) bool { + b := ctx.CurrentPhase >= int(p) + return b +} + +// pushAggregationThroughSubquery pushes an aggregation under a subquery. +// Any columns that are needed to evaluate the subquery needs to be added as +// grouping columns to the aggregation being pushed down, and then after the +// subquery evaluation we are free to reassemble the total aggregation values. +// This is very similar to how we push aggregation through an shouldRun-join. +func pushAggregationThroughSubquery( + ctx *plancontext.PlanningContext, + rootAggr *Aggregator, + src *SubQueryContainer, +) (ops.Operator, *rewrite.ApplyResult, error) { + pushedAggr := rootAggr.Clone([]ops.Operator{src.Outer}).(*Aggregator) + pushedAggr.Original = false + pushedAggr.Pushed = false + + for _, subQuery := range src.Inner { + lhsCols, err := subQuery.OuterExpressionsNeeded(ctx, src.Outer) + if err != nil { + return nil, nil, err + } + for _, colName := range lhsCols { + idx := slices.IndexFunc(pushedAggr.Columns, func(ae *sqlparser.AliasedExpr) bool { + return ctx.SemTable.EqualsExpr(ae.Expr, colName) + }) + if idx >= 0 { + continue + } + _, err := pushedAggr.addColumnWithoutPushing(ctx, aeWrap(colName), true) + if err != nil { + return nil, nil, err + } } } + + src.Outer = pushedAggr + + if !rootAggr.Original { + return src, rewrite.NewTree("push Aggregation under subquery - keep original", rootAggr), nil + } + + rootAggr.aggregateTheAggregates() + + return rootAggr, rewrite.NewTree("push Aggregation under subquery", rootAggr), nil } -func pushDownAggregationThroughRoute( +func (a *Aggregator) aggregateTheAggregates() { + for i := range a.Aggregations { + aggregateTheAggregate(a, i) + } +} + +func aggregateTheAggregate(a *Aggregator, i int) { + aggr := a.Aggregations[i] + switch aggr.OpCode { + case opcode.AggregateCount, opcode.AggregateCountStar, opcode.AggregateCountDistinct, opcode.AggregateSumDistinct: + // All count variations turn into SUM above the Route. This is also applied for Sum distinct when it is pushed down. + // Think of it as we are SUMming together a bunch of distributed COUNTs. + aggr.OriginalOpCode, aggr.OpCode = aggr.OpCode, opcode.AggregateSum + a.Aggregations[i] = aggr + } +} + +func pushAggregationThroughRoute( ctx *plancontext.PlanningContext, aggregator *Aggregator, route *Route, @@ -74,10 +143,18 @@ func pushDownAggregationThroughRoute( return rewrite.Swap(aggregator, route, "push down aggregation under route - remove original") } + if !reachedPhase(ctx, delegateAggregation) { + return nil, nil, nil + } + // Create a new aggregator to be placed below the route. - aggrBelowRoute := aggregator.Clone([]ops.Operator{route.Source}).(*Aggregator) - aggrBelowRoute.Pushed = false - aggrBelowRoute.Original = false + aggrBelowRoute := aggregator.SplitAggregatorBelowRoute(route.Inputs()) + aggrBelowRoute.Aggregations = nil + + err := pushAggregations(ctx, aggregator, aggrBelowRoute) + if err != nil { + return nil, nil, err + } // Set the source of the route to the new aggregator placed below the route. route.Source = aggrBelowRoute @@ -91,18 +168,80 @@ func pushDownAggregationThroughRoute( return aggregator, rewrite.NewTree("push aggregation under route - keep original", aggregator), nil } -func pushDownAggregationThroughFilter( +// pushAggregations splits aggregations between the original aggregator and the one we are pushing down +func pushAggregations(ctx *plancontext.PlanningContext, aggregator *Aggregator, aggrBelowRoute *Aggregator) error { + canPushDistinctAggr, distinctExpr, err := checkIfWeCanPush(ctx, aggregator) + if err != nil { + return err + } + + distinctAggrGroupByAdded := false + + for i, aggr := range aggregator.Aggregations { + if !aggr.Distinct || canPushDistinctAggr { + aggrBelowRoute.Aggregations = append(aggrBelowRoute.Aggregations, aggr) + aggregateTheAggregate(aggregator, i) + continue + } + + // We handle a distinct aggregation by turning it into a group by and + // doing the aggregating on the vtgate level instead + aeDistinctExpr := aeWrap(distinctExpr) + aggrBelowRoute.Columns[aggr.ColOffset] = aeDistinctExpr + + // We handle a distinct aggregation by turning it into a group by and + // doing the aggregating on the vtgate level instead + // Adding to group by can be done only once even though there are multiple distinct aggregation with same expression. + if !distinctAggrGroupByAdded { + groupBy := NewGroupBy(distinctExpr, distinctExpr, aeDistinctExpr) + groupBy.ColOffset = aggr.ColOffset + aggrBelowRoute.Grouping = append(aggrBelowRoute.Grouping, groupBy) + distinctAggrGroupByAdded = true + } + } + + if !canPushDistinctAggr { + aggregator.DistinctExpr = distinctExpr + } + + return nil +} + +func checkIfWeCanPush(ctx *plancontext.PlanningContext, aggregator *Aggregator) (bool, sqlparser.Expr, error) { + canPush := true + var distinctExpr sqlparser.Expr + var differentExpr *sqlparser.AliasedExpr + + for _, aggr := range aggregator.Aggregations { + if !aggr.Distinct { + continue + } + + innerExpr := aggr.Func.GetArg() + if !exprHasUniqueVindex(ctx, innerExpr) { + canPush = false + } + if distinctExpr == nil { + distinctExpr = innerExpr + } + if !ctx.SemTable.EqualsExpr(distinctExpr, innerExpr) { + differentExpr = aggr.Original + } + } + + if !canPush && differentExpr != nil { + return false, nil, vterrors.VT12001(fmt.Sprintf("only one DISTINCT aggregation is allowed in a SELECT: %s", sqlparser.String(differentExpr))) + } + + return canPush, distinctExpr, nil +} + +func pushAggregationThroughFilter( ctx *plancontext.PlanningContext, aggregator *Aggregator, filter *Filter, ) (ops.Operator, *rewrite.ApplyResult, error) { - for _, predicate := range filter.Predicates { - if sqlparser.ContainsAggregation(predicate) { - return nil, nil, errHorizonNotPlanned() - } - } - columnsNeeded := collectColNamesNeeded(ctx, filter) // Create a new aggregator to be placed below the route. @@ -117,7 +256,10 @@ withNextColumn: continue withNextColumn } } - pushedAggr.addColumnWithoutPushing(aeWrap(col), true) + _, err := pushedAggr.addColumnWithoutPushing(ctx, aeWrap(col), true) + if err != nil { + return nil, nil, err + } } // Set the source of the filter to the new aggregator placed below the route. @@ -128,7 +270,7 @@ withNextColumn: // by splitting one and pushing under a join, we can get rid of this one return aggregator.Source, rewrite.NewTree("push aggregation under filter - remove original", aggregator), nil } - + aggregator.aggregateTheAggregates() return aggregator, rewrite.NewTree("push aggregation under filter - keep original", aggregator), nil } @@ -225,7 +367,7 @@ Transformed: / \ R1 R2 */ -func pushDownAggregationThroughJoin(ctx *plancontext.PlanningContext, rootAggr *Aggregator, join *ApplyJoin) (ops.Operator, *rewrite.ApplyResult, error) { +func pushAggregationThroughJoin(ctx *plancontext.PlanningContext, rootAggr *Aggregator, join *ApplyJoin) (ops.Operator, *rewrite.ApplyResult, error) { lhs := &joinPusher{ orig: rootAggr, pushed: &Aggregator{ @@ -247,6 +389,10 @@ func pushDownAggregationThroughJoin(ctx *plancontext.PlanningContext, rootAggr * joinColumns, output, err := splitAggrColumnsToLeftAndRight(ctx, rootAggr, join, lhs, rhs) if err != nil { + // if we get this error, we just abort the splitting and fall back on simpler ways of solving the same query + if errors.Is(err, errAbortAggrPushing) { + return nil, nil, nil + } return nil, nil, err } @@ -264,7 +410,7 @@ func pushDownAggregationThroughJoin(ctx *plancontext.PlanningContext, rootAggr * } join.LHS, join.RHS = lhs.pushed, rhs.pushed - join.ColumnsAST = joinColumns + join.JoinColumns = joinColumns if !rootAggr.Original { // we only keep the root aggregation, if this aggregator was created @@ -272,13 +418,17 @@ func pushDownAggregationThroughJoin(ctx *plancontext.PlanningContext, rootAggr * return output, rewrite.NewTree("push Aggregation under join - keep original", rootAggr), nil } + rootAggr.aggregateTheAggregates() rootAggr.Source = output return rootAggr, rewrite.NewTree("push Aggregation under join", rootAggr), nil } +var errAbortAggrPushing = fmt.Errorf("abort aggregation pushing") + func addColumnsFromLHSInJoinPredicates(ctx *plancontext.PlanningContext, rootAggr *Aggregator, join *ApplyJoin, lhs *joinPusher) error { for _, pred := range join.JoinPredicates { - for _, expr := range pred.LHSExprs { + for _, bve := range pred.LHSExprs { + expr := bve.Expr wexpr := rootAggr.QP.GetSimplifiedExpr(expr) idx, found := canReuseColumn(ctx, lhs.pushed.Columns, expr, extractExpr) if !found { @@ -315,7 +465,7 @@ func splitGroupingToLeftAndRight(ctx *plancontext.PlanningContext, rootAggr *Agg lhs.addGrouping(ctx, groupBy) groupingJCs = append(groupingJCs, JoinColumn{ Original: aeWrap(groupBy.Inner), - LHSExprs: []sqlparser.Expr{expr}, + LHSExprs: []BindVarExpr{{Expr: expr}}, }) case deps.IsSolvedBy(rhs.tableID): rhs.addGrouping(ctx, groupBy) @@ -323,8 +473,18 @@ func splitGroupingToLeftAndRight(ctx *plancontext.PlanningContext, rootAggr *Agg Original: aeWrap(groupBy.Inner), RHSExpr: expr, }) + case deps.IsSolvedBy(lhs.tableID.Merge(rhs.tableID)): + jc, err := BreakExpressionInLHSandRHS(ctx, groupBy.SimplifiedExpr, lhs.tableID) + if err != nil { + return nil, err + } + for _, lhsExpr := range jc.LHSExprs { + e := lhsExpr.Expr + lhs.addGrouping(ctx, NewGroupBy(e, e, aeWrap(e))) + } + rhs.addGrouping(ctx, NewGroupBy(jc.RHSExpr, jc.RHSExpr, aeWrap(jc.RHSExpr))) default: - return nil, vterrors.VT12001("grouping on columns from different sources") + return nil, vterrors.VT13001(fmt.Sprintf("grouping with bad dependencies %s", groupBy.SimplifiedExpr)) } } return groupingJCs, nil @@ -339,13 +499,27 @@ func splitAggrColumnsToLeftAndRight( join *ApplyJoin, lhs, rhs *joinPusher, ) ([]JoinColumn, ops.Operator, error) { + proj := newAliasedProjection(join) + proj.FromAggr = true builder := &aggBuilder{ lhs: lhs, rhs: rhs, - proj: &Projection{Source: join, FromAggr: true}, + proj: proj, outerJoin: join.LeftJoin, } + canPushDistinctAggr, distinctExpr, err := checkIfWeCanPush(ctx, aggregator) + if err != nil { + return nil, nil, err + } + + // Distinct aggregation cannot be pushed down in the join. + // We keep node of the distinct aggregation expression to be used later for ordering. + if !canPushDistinctAggr { + aggregator.DistinctExpr = distinctExpr + return nil, nil, errAbortAggrPushing + } + outer: // we prefer adding the aggregations in the same order as the columns are declared for colIdx, col := range aggregator.Columns { @@ -358,7 +532,10 @@ outer: continue outer } } - builder.proj.addUnexploredExpr(col, col.Expr) + _, err := builder.proj.addUnexploredExpr(col, col.Expr) + if err != nil { + return nil, nil, err + } } return builder.joinColumns, builder.proj, nil } @@ -391,7 +568,7 @@ func (ab *aggBuilder) leftCountStar(ctx *plancontext.PlanningContext) *sqlparser if created { ab.joinColumns = append(ab.joinColumns, JoinColumn{ Original: ae, - LHSExprs: []sqlparser.Expr{ae.Expr}, + LHSExprs: []BindVarExpr{{Expr: ae.Expr}}, }) } return ae @@ -423,16 +600,30 @@ func (p *joinPusher) countStar(ctx *plancontext.PlanningContext) (*sqlparser.Ali func (ab *aggBuilder) handleAggr(ctx *plancontext.PlanningContext, aggr Aggr) error { switch aggr.OpCode { case opcode.AggregateCountStar: - ab.handleCountStar(ctx, aggr) - return nil - case opcode.AggregateMax, opcode.AggregateMin, opcode.AggregateRandom: - return ab.handlePushThroughAggregation(ctx, aggr) + return ab.handleCountStar(ctx, aggr) case opcode.AggregateCount, opcode.AggregateSum: return ab.handleAggrWithCountStarMultiplier(ctx, aggr) + case opcode.AggregateMax, opcode.AggregateMin, opcode.AggregateAnyValue: + return ab.handlePushThroughAggregation(ctx, aggr) + case opcode.AggregateGroupConcat: + f := aggr.Func.(*sqlparser.GroupConcatExpr) + if f.Distinct || len(f.OrderBy) > 0 || f.Separator != "" { + panic("fail here") + } + // this needs special handling, currently aborting the push of function + // and later will try pushing the column instead. + // TODO: this should be handled better by pushing the function down. + return errAbortAggrPushing case opcode.AggregateUnassigned: return vterrors.VT12001(fmt.Sprintf("in scatter query: aggregation function '%s'", sqlparser.String(aggr.Original))) + case opcode.AggregateGtid: + // this is only used for SHOW GTID queries that will never contain joins + return vterrors.VT13001("cannot do join with vgtid") + case opcode.AggregateSumDistinct, opcode.AggregateCountDistinct: + // we are not going to see values multiple times, so we don't need to multiply with the count(*) from the other side + return ab.handlePushThroughAggregation(ctx, aggr) default: - return errHorizonNotPlanned() + return vterrors.VT12001(fmt.Sprintf("aggregation not planned: %s", aggr.OpCode.String())) } } @@ -443,9 +634,10 @@ func (ab *aggBuilder) pushThroughLeft(aggr Aggr) { ab.lhs.pushThroughAggr(aggr) ab.joinColumns = append(ab.joinColumns, JoinColumn{ Original: aggr.Original, - LHSExprs: []sqlparser.Expr{aggr.Original.Expr}, + LHSExprs: []BindVarExpr{{Expr: aggr.Original.Expr}}, }) } + func (ab *aggBuilder) pushThroughRight(aggr Aggr) { ab.rhs.pushThroughAggr(aggr) ab.joinColumns = append(ab.joinColumns, JoinColumn{ @@ -455,7 +647,10 @@ func (ab *aggBuilder) pushThroughRight(aggr Aggr) { } func (ab *aggBuilder) handlePushThroughAggregation(ctx *plancontext.PlanningContext, aggr Aggr) error { - ab.proj.addUnexploredExpr(aggr.Original, aggr.Original.Expr) + _, err := ab.proj.addUnexploredExpr(aggr.Original, aggr.Original.Expr) + if err != nil { + return err + } deps := ctx.SemTable.RecursiveDeps(aggr.Original.Expr) switch { @@ -464,17 +659,17 @@ func (ab *aggBuilder) handlePushThroughAggregation(ctx *plancontext.PlanningCont case deps.IsSolvedBy(ab.rhs.tableID): ab.pushThroughRight(aggr) default: - return vterrors.VT12001("aggregation on columns from different sources: " + sqlparser.String(aggr.Original.Expr)) + return errAbortAggrPushing } return nil } -func (ab *aggBuilder) handleCountStar(ctx *plancontext.PlanningContext, aggr Aggr) { +func (ab *aggBuilder) handleCountStar(ctx *plancontext.PlanningContext, aggr Aggr) error { // Add the aggregate to both sides of the join. lhsAE := ab.leftCountStar(ctx) rhsAE := ab.rightCountStar(ctx) - ab.buildProjectionForAggr(lhsAE, rhsAE, aggr, true) + return ab.buildProjectionForAggr(lhsAE, rhsAE, aggr, true) } func (ab *aggBuilder) handleAggrWithCountStarMultiplier(ctx *plancontext.PlanningContext, aggr Aggr) error { @@ -497,14 +692,13 @@ func (ab *aggBuilder) handleAggrWithCountStarMultiplier(ctx *plancontext.Plannin rhsAE = aggr.Original default: - return errHorizonNotPlanned() + return errAbortAggrPushing } - ab.buildProjectionForAggr(lhsAE, rhsAE, aggr, addCoalesce) - return nil + return ab.buildProjectionForAggr(lhsAE, rhsAE, aggr, addCoalesce) } -func (ab *aggBuilder) buildProjectionForAggr(lhsAE *sqlparser.AliasedExpr, rhsAE *sqlparser.AliasedExpr, aggr Aggr, coalesce bool) { +func (ab *aggBuilder) buildProjectionForAggr(lhsAE *sqlparser.AliasedExpr, rhsAE *sqlparser.AliasedExpr, aggr Aggr, coalesce bool) error { // We expect the expressions to be different on each side of the join, otherwise it's an error. if lhsAE.Expr == rhsAE.Expr { panic(fmt.Sprintf("Need the two produced expressions to be different. %T %T", lhsAE, rhsAE)) @@ -533,7 +727,8 @@ func (ab *aggBuilder) buildProjectionForAggr(lhsAE *sqlparser.AliasedExpr, rhsAE As: sqlparser.NewIdentifierCI(aggr.Original.ColumnName()), } - ab.proj.addUnexploredExpr(projAE, projExpr) + _, err := ab.proj.addUnexploredExpr(projAE, projExpr) + return err } func coalesceFunc(e sqlparser.Expr) sqlparser.Expr { diff --git a/go/vt/vtgate/planbuilder/operators/aggregator.go b/go/vt/vtgate/planbuilder/operators/aggregator.go index c9327b4e384..f83a1df9725 100644 --- a/go/vt/vtgate/planbuilder/operators/aggregator.go +++ b/go/vt/vtgate/planbuilder/operators/aggregator.go @@ -18,11 +18,10 @@ package operators import ( "fmt" + "slices" "strings" - "golang.org/x/exp/slices" - - "vitess.io/vitess/go/slices2" + "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine/opcode" @@ -42,31 +41,32 @@ type ( Grouping []GroupBy Aggregations []Aggr + // We support a single distinct aggregation per aggregator. It is stored here. + // When planning the ordering that the OrderedAggregate will require, + // this needs to be the last ORDER BY expression + DistinctExpr sqlparser.Expr + // Pushed will be set to true once this aggregation has been pushed deeper in the tree - Pushed bool + Pushed bool + offsetPlanned bool // Original will only be true for the original aggregator created from the AST Original bool ResultColumns int QP *QueryProjection - // TableID will be non-nil for derived tables - TableID *semantics.TableSet - Alias string + + DT *DerivedTable } ) func (a *Aggregator) Clone(inputs []ops.Operator) ops.Operator { - return &Aggregator{ - Source: inputs[0], - Columns: slices.Clone(a.Columns), - Grouping: slices.Clone(a.Grouping), - Aggregations: slices.Clone(a.Aggregations), - Pushed: a.Pushed, - Original: a.Original, - ResultColumns: a.ResultColumns, - QP: a.QP, - } + kopy := *a + kopy.Source = inputs[0] + kopy.Columns = slices.Clone(a.Columns) + kopy.Grouping = slices.Clone(a.Grouping) + kopy.Aggregations = slices.Clone(a.Aggregations) + return &kopy } func (a *Aggregator) Inputs() []ops.Operator { @@ -81,15 +81,13 @@ func (a *Aggregator) SetInputs(operators []ops.Operator) { } func (a *Aggregator) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (ops.Operator, error) { - newOp, err := a.Source.AddPredicate(ctx, expr) - if err != nil { - return nil, err - } - a.Source = newOp - return a, nil + return &Filter{ + Source: a, + Predicates: []sqlparser.Expr{expr}, + }, nil } -func (a *Aggregator) addColumnWithoutPushing(expr *sqlparser.AliasedExpr, addToGroupBy bool) int { +func (a *Aggregator) addColumnWithoutPushing(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, addToGroupBy bool) (int, error) { offset := len(a.Columns) a.Columns = append(a.Columns, expr) @@ -98,84 +96,177 @@ func (a *Aggregator) addColumnWithoutPushing(expr *sqlparser.AliasedExpr, addToG groupBy.ColOffset = offset a.Grouping = append(a.Grouping, groupBy) } else { - aggr := NewAggr(opcode.AggregateRandom, nil, expr, expr.As.String()) + var aggr Aggr + switch e := expr.Expr.(type) { + case sqlparser.AggrFunc: + aggr = createAggrFromAggrFunc(e, expr) + default: + aggr = NewAggr(opcode.AggregateAnyValue, nil, expr, expr.As.String()) + } aggr.ColOffset = offset a.Aggregations = append(a.Aggregations, aggr) } - return offset + return offset, nil +} + +func (a *Aggregator) addColumnsWithoutPushing(ctx *plancontext.PlanningContext, reuse bool, groupby []bool, exprs []*sqlparser.AliasedExpr) (offsets []int, err error) { + for i, ae := range exprs { + offset, err := a.addColumnWithoutPushing(ctx, ae, groupby[i]) + if err != nil { + return nil, err + } + offsets = append(offsets, offset) + } + return } func (a *Aggregator) isDerived() bool { - return a.TableID != nil + return a.DT != nil } -func (a *Aggregator) AddColumn(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, _, addToGroupBy bool) (ops.Operator, int, error) { - if addToGroupBy { - return nil, 0, vterrors.VT13001("did not expect to add group by here") +func (a *Aggregator) FindCol(ctx *plancontext.PlanningContext, in sqlparser.Expr, _ bool) (int, error) { + expr, err := a.DT.RewriteExpression(ctx, in) + if err != nil { + return 0, err } - if offset, found := canReuseColumn(ctx, a.Columns, expr.Expr, extractExpr); found { - return a, offset, nil + if offset, found := canReuseColumn(ctx, a.Columns, expr, extractExpr); found { + return offset, nil } - colName, isColName := expr.Expr.(*sqlparser.ColName) - for i, col := range a.Columns { - if isColName && colName.Name.EqualString(col.As.String()) { - return a, i, nil - } + return -1, nil +} + +func (a *Aggregator) AddColumn(ctx *plancontext.PlanningContext, reuse bool, groupBy bool, ae *sqlparser.AliasedExpr) (int, error) { + rewritten, err := a.DT.RewriteExpression(ctx, ae.Expr) + if err != nil { + return 0, err } - // If weight string function is received from above operator. Then check if we have a group on the expression used. - // If it is found, then continue to push it down but with addToGroupBy true so that is the added to group by sql down in the AddColumn. - // This also set the weight string column offset so that we would not need to add it later in aggregator operator planOffset. + ae = &sqlparser.AliasedExpr{ + Expr: rewritten, + As: ae.As, + } - // If the expression is a WeightStringFuncExpr, it checks if a GroupBy - // already exists on the argument of the expression. - // If it is found, the column offset for the WeightStringFuncExpr is set, - // and the column is marked to be added to the GroupBy in the recursive AddColumn call. - if wsExpr, isWS := expr.Expr.(*sqlparser.WeightStringFuncExpr); isWS { + if reuse { + offset, err := a.findColInternal(ctx, ae, groupBy) + if err != nil { + return 0, err + } + if offset >= 0 { + return offset, nil + } + } + + // Upon receiving a weight string function from an upstream operator, check for an existing grouping on the argument expression. + // If a grouping is found, continue to push the function down, marking it with 'addToGroupBy' to ensure it's correctly treated as a grouping column. + // This process also sets the weight string column offset, eliminating the need for a later addition in the aggregator operator's planOffset. + if wsExpr, isWS := rewritten.(*sqlparser.WeightStringFuncExpr); isWS { idx := slices.IndexFunc(a.Grouping, func(by GroupBy) bool { return ctx.SemTable.EqualsExprWithDeps(wsExpr.Expr, by.SimplifiedExpr) }) if idx >= 0 { a.Grouping[idx].WSOffset = len(a.Columns) - addToGroupBy = true + groupBy = true } } - if !addToGroupBy { - aggr := NewAggr(opcode.AggregateRandom, nil, expr, expr.As.String()) + if !groupBy { + aggr := NewAggr(opcode.AggregateAnyValue, nil, ae, ae.As.String()) aggr.ColOffset = len(a.Columns) a.Aggregations = append(a.Aggregations, aggr) } - a.Columns = append(a.Columns, expr) - expectedOffset := len(a.Columns) - 1 - newSrc, offset, err := a.Source.AddColumn(ctx, expr, false, addToGroupBy) + + offset := len(a.Columns) + a.Columns = append(a.Columns, ae) + incomingOffset, err := a.Source.AddColumn(ctx, false, groupBy, ae) if err != nil { - return nil, 0, err + return 0, err } - if offset != expectedOffset { - return nil, 0, vterrors.VT13001("the offset needs to be aligned here") + + if offset != incomingOffset { + return 0, errFailedToPlan(ae) } - a.Source = newSrc - return a, offset, nil + + return offset, nil } -func (a *Aggregator) GetColumns() (columns []*sqlparser.AliasedExpr, err error) { - return a.Columns, nil +func (a *Aggregator) findColInternal(ctx *plancontext.PlanningContext, ae *sqlparser.AliasedExpr, addToGroupBy bool) (int, error) { + expr := ae.Expr + offset, err := a.FindCol(ctx, expr, false) + if err != nil { + return 0, err + } + if offset >= 0 { + return offset, err + } + expr, err = a.DT.RewriteExpression(ctx, expr) + if err != nil { + return 0, err + } + + // Aggregator is little special and cannot work if the input offset are not matched with the aggregation columns. + // So, before pushing anything from above the aggregator offset planning needs to be completed. + err = a.planOffsets(ctx) + if err != nil { + return 0, err + } + + if offset, found := canReuseColumn(ctx, a.Columns, expr, extractExpr); found { + return offset, nil + } + colName, isColName := expr.(*sqlparser.ColName) + for i, col := range a.Columns { + if isColName && colName.Name.EqualString(col.As.String()) { + return i, nil + } + } + + if addToGroupBy { + return 0, vterrors.VT13001("did not expect to add group by here") + } + + return -1, nil } -func (a *Aggregator) Description() ops.OpDescription { - return ops.OpDescription{ - OperatorType: "Aggregator", +func (a *Aggregator) GetColumns(ctx *plancontext.PlanningContext) ([]*sqlparser.AliasedExpr, error) { + if _, isSourceDerived := a.Source.(*Horizon); isSourceDerived { + return a.Columns, nil + } + + // we update the incoming columns, so we know about any new columns that have been added + // in the optimization phase, other operators could be pushed down resulting in additional columns for aggregator. + // Aggregator should be made aware of these to truncate them in final result. + columns, err := a.Source.GetColumns(ctx) + if err != nil { + return nil, err } + + // if this operator is producing more columns than expected, we want to know about it + if len(columns) > len(a.Columns) { + a.Columns = append(a.Columns, columns[len(a.Columns):]...) + } + + return a.Columns, nil +} + +func (a *Aggregator) GetSelectExprs(ctx *plancontext.PlanningContext) (sqlparser.SelectExprs, error) { + return transformColumnsToSelectExprs(ctx, a) } func (a *Aggregator) ShortDescription() string { - columnns := slices2.Map(a.Columns, func(from *sqlparser.AliasedExpr) string { + columns := slice.Map(a.Columns, func(from *sqlparser.AliasedExpr) string { return sqlparser.String(from) }) + if a.DT != nil { + columns = append([]string{a.DT.String()}, columns...) + } + + org := "" + if a.Original { + org = "ORG " + } if len(a.Grouping) == 0 { - return strings.Join(columnns, ", ") + return fmt.Sprintf("%s%s", org, strings.Join(columns, ", ")) } var grouping []string @@ -183,12 +274,7 @@ func (a *Aggregator) ShortDescription() string { grouping = append(grouping, sqlparser.String(gb.SimplifiedExpr)) } - org := "" - if a.Original { - org = "ORG " - } - - return fmt.Sprintf("%s%s group by %s", org, strings.Join(columnns, ", "), strings.Join(grouping, ",")) + return fmt.Sprintf("%s%s group by %s", org, strings.Join(columns, ", "), strings.Join(grouping, ",")) } func (a *Aggregator) GetOrdering() ([]ops.OrderBy, error) { @@ -196,29 +282,19 @@ func (a *Aggregator) GetOrdering() ([]ops.OrderBy, error) { } func (a *Aggregator) planOffsets(ctx *plancontext.PlanningContext) error { - addColumn := func(aliasedExpr *sqlparser.AliasedExpr, addToGroupBy bool) (int, error) { - newSrc, offset, err := a.Source.AddColumn(ctx, aliasedExpr, true, addToGroupBy) - if err != nil { - return 0, err - } - a.Source = newSrc - if offset == len(a.Columns) { - // if we get an offset at the end of our current column list, it means we added a new column - a.Columns = append(a.Columns, aliasedExpr) - } - return offset, nil + if a.offsetPlanned { + return nil } - + defer func() { + a.offsetPlanned = true + }() if !a.Pushed { - err := a.planOffsetsNotPushed(ctx) - if err != nil { - return err - } + return a.planOffsetsNotPushed(ctx) } for idx, gb := range a.Grouping { if gb.ColOffset == -1 { - offset, err := addColumn(aeWrap(gb.Inner), false) + offset, err := a.internalAddColumn(ctx, aeWrap(gb.Inner), false) if err != nil { return err } @@ -228,7 +304,7 @@ func (a *Aggregator) planOffsets(ctx *plancontext.PlanningContext) error { continue } - offset, err := addColumn(aeWrap(weightStringFor(gb.SimplifiedExpr)), true) + offset, err := a.internalAddColumn(ctx, aeWrap(weightStringFor(gb.SimplifiedExpr)), true) if err != nil { return err } @@ -236,10 +312,10 @@ func (a *Aggregator) planOffsets(ctx *plancontext.PlanningContext) error { } for idx, aggr := range a.Aggregations { - if !aggr.NeedWeightString(ctx) { + if !aggr.NeedsWeightString(ctx) { continue } - offset, err := addColumn(aeWrap(weightStringFor(aggr.Func.GetArg())), true) + offset, err := a.internalAddColumn(ctx, aeWrap(weightStringFor(aggr.Func.GetArg())), true) if err != nil { return err } @@ -249,32 +325,35 @@ func (a *Aggregator) planOffsets(ctx *plancontext.PlanningContext) error { return nil } -func (aggr Aggr) getPushDownColumn() sqlparser.Expr { +func (aggr Aggr) getPushColumn() sqlparser.Expr { switch aggr.OpCode { - case opcode.AggregateRandom: + case opcode.AggregateAnyValue: return aggr.Original.Expr case opcode.AggregateCountStar: return sqlparser.NewIntLiteral("1") + case opcode.AggregateGroupConcat: + if len(aggr.Func.GetArgs()) > 1 { + panic("more than 1 column") + } + fallthrough default: return aggr.Func.GetArg() } } func (a *Aggregator) planOffsetsNotPushed(ctx *plancontext.PlanningContext) error { + a.Source = newAliasedProjection(a.Source) // we need to keep things in the column order, so we can't iterate over the aggregations or groupings - for colIdx, col := range a.Columns { - idx, err := a.addIfGroupingColumn(ctx, col) + for colIdx := range a.Columns { + idx, err := a.addIfGroupingColumn(ctx, colIdx) if err != nil { return err } if idx >= 0 { - if idx != colIdx { - return vterrors.VT13001(fmt.Sprintf("grouping column on wrong index: want: %d, got: %d", colIdx, idx)) - } continue } - idx, err = a.addIfAggregationColumn(ctx, col) + idx, err = a.addIfAggregationColumn(ctx, colIdx) if err != nil { return err } @@ -283,52 +362,120 @@ func (a *Aggregator) planOffsetsNotPushed(ctx *plancontext.PlanningContext) erro return vterrors.VT13001("failed to find the corresponding column") } } - return nil + + return a.pushRemainingGroupingColumnsAndWeightStrings(ctx) } -func (a *Aggregator) addIfAggregationColumn(ctx *plancontext.PlanningContext, col *sqlparser.AliasedExpr) (int, error) { - for aggIdx, aggr := range a.Aggregations { - if !ctx.SemTable.EqualsExprWithDeps(col.Expr, aggr.Original.Expr) { +func (a *Aggregator) addIfAggregationColumn(ctx *plancontext.PlanningContext, colIdx int) (int, error) { + for _, aggr := range a.Aggregations { + if aggr.ColOffset != colIdx { continue } - newSrc, offset, err := a.Source.AddColumn(ctx, aeWrap(aggr.getPushDownColumn()), false, false) + wrap := aeWrap(aggr.getPushColumn()) + offset, err := a.Source.AddColumn(ctx, false, false, wrap) if err != nil { return 0, err } - a.Aggregations[aggIdx].ColOffset = offset - a.Source = newSrc + if aggr.ColOffset != offset { + return -1, errFailedToPlan(aggr.Original) + } + return offset, nil } return -1, nil } -func (a *Aggregator) addIfGroupingColumn(ctx *plancontext.PlanningContext, col *sqlparser.AliasedExpr) (int, error) { - for gbIdx, gb := range a.Grouping { - if !ctx.SemTable.EqualsExprWithDeps(col.Expr, gb.SimplifiedExpr) { +func errFailedToPlan(original *sqlparser.AliasedExpr) *vterrors.VitessError { + return vterrors.VT12001(fmt.Sprintf("failed to plan aggregation on: %s", sqlparser.String(original))) +} + +func (a *Aggregator) addIfGroupingColumn(ctx *plancontext.PlanningContext, colIdx int) (int, error) { + for _, gb := range a.Grouping { + if gb.ColOffset != colIdx { continue } - newSrc, offset, err := a.Source.AddColumn(ctx, col, false, false) + expr := a.Columns[colIdx] + offset, err := a.Source.AddColumn(ctx, false, true, expr) if err != nil { - return 0, err + return -1, err } - a.Grouping[gbIdx].ColOffset = offset - a.Source = newSrc - - if !ctx.SemTable.NeedsWeightString(gb.SimplifiedExpr) { - return offset, nil + if gb.ColOffset != offset { + return -1, errFailedToPlan(expr) } - // TODO: we need to do stuff return offset, nil } return -1, nil } +// pushRemainingGroupingColumnsAndWeightStrings pushes any grouping column that is not part of the columns list and weight strings needed for performing grouping aggregations. +func (a *Aggregator) pushRemainingGroupingColumnsAndWeightStrings(ctx *plancontext.PlanningContext) error { + for idx, gb := range a.Grouping { + if gb.ColOffset == -1 { + offset, err := a.internalAddColumn(ctx, aeWrap(gb.Inner), false) + if err != nil { + return err + } + a.Grouping[idx].ColOffset = offset + } + if gb.WSOffset != -1 || !ctx.SemTable.NeedsWeightString(gb.SimplifiedExpr) { + continue + } + + offset, err := a.internalAddColumn(ctx, aeWrap(weightStringFor(gb.SimplifiedExpr)), false) + if err != nil { + return err + } + a.Grouping[idx].WSOffset = offset + } + for idx, aggr := range a.Aggregations { + if aggr.WSOffset != -1 || !aggr.NeedsWeightString(ctx) { + continue + } + + offset, err := a.internalAddColumn(ctx, aeWrap(weightStringFor(aggr.Func.GetArg())), false) + if err != nil { + return err + } + a.Aggregations[idx].WSOffset = offset + } + + return nil +} + func (a *Aggregator) setTruncateColumnCount(offset int) { a.ResultColumns = offset } +func (a *Aggregator) internalAddColumn(ctx *plancontext.PlanningContext, aliasedExpr *sqlparser.AliasedExpr, addToGroupBy bool) (int, error) { + offset, err := a.Source.AddColumn(ctx, true, addToGroupBy, aliasedExpr) + if err != nil { + return 0, err + } + + if offset == len(a.Columns) { + // if we get an offset at the end of our current column list, it means we added a new column + a.Columns = append(a.Columns, aliasedExpr) + } + return offset, nil +} + +// SplitAggregatorBelowRoute returns the aggregator that will live under the Route. +// This is used when we are splitting the aggregation so one part is done +// at the mysql level and one part at the vtgate level +func (a *Aggregator) SplitAggregatorBelowRoute(input []ops.Operator) *Aggregator { + newOp := a.Clone(input).(*Aggregator) + newOp.Pushed = false + newOp.Original = false + newOp.DT = nil + return newOp +} + +func (a *Aggregator) introducesTableID() semantics.TableSet { + return a.DT.introducesTableID() +} + var _ ops.Operator = (*Aggregator)(nil) diff --git a/go/vt/vtgate/planbuilder/operators/apply_join.go b/go/vt/vtgate/planbuilder/operators/apply_join.go index 6f67e549a7a..5e48fb4d5e3 100644 --- a/go/vt/vtgate/planbuilder/operators/apply_join.go +++ b/go/vt/vtgate/planbuilder/operators/apply_join.go @@ -18,66 +18,73 @@ package operators import ( "fmt" + "maps" + "slices" "strings" - "golang.org/x/exp/maps" - "golang.org/x/exp/slices" - - "vitess.io/vitess/go/slices2" + "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) -// ApplyJoin is a nested loop join - for each row on the LHS, -// we'll execute the plan on the RHS, feeding data from left to right -type ApplyJoin struct { - LHS, RHS ops.Operator +type ( + // ApplyJoin is a nested loop join - for each row on the LHS, + // we'll execute the plan on the RHS, feeding data from left to right + ApplyJoin struct { + LHS, RHS ops.Operator - // LeftJoin will be true in the case of an outer join - LeftJoin bool + // LeftJoin will be true in the case of an outer join + LeftJoin bool - // JoinCols are the columns from the LHS used for the join. - // These are the same columns pushed on the LHS that are now used in the Vars field - LHSColumns []*sqlparser.ColName + // Before offset planning + Predicate sqlparser.Expr - // Before offset planning - Predicate sqlparser.Expr + // JoinColumns keeps track of what AST expression is represented in the Columns array + JoinColumns []JoinColumn - // ColumnsAST keeps track of what AST expression is represented in the Columns array - ColumnsAST []JoinColumn + // JoinPredicates are join predicates that have been broken up into left hand side and right hand side parts. + JoinPredicates []JoinColumn - // JoinPredicates are join predicates that have been broken up into left hand side and right hand side parts. - JoinPredicates []JoinColumn + // ExtraVars are columns we need to copy from left to right not needed by any predicates or projections, + // these are needed by other operators further down the right hand side of the join + ExtraLHSVars []BindVarExpr - // After offset planning + // After offset planning - // Columns stores the column indexes of the columns coming from the left and right side - // negative value comes from LHS and positive from RHS - Columns []int + // Columns stores the column indexes of the columns coming from the left and right side + // negative value comes from LHS and positive from RHS + Columns []int - // Vars are the arguments that need to be copied from the LHS to the RHS - Vars map[string]int -} + // Vars are the arguments that need to be copied from the LHS to the RHS + Vars map[string]int + } -// JoinColumn is where we store information about columns passing through the join operator -// It can be in one of three possible configurations: -// - Pure left -// We are projecting a column that comes from the left. The RHSExpr will be nil for these -// - Pure right -// We are projecting a column that comes from the right. The LHSExprs will be empty for these -// - Mix of data from left and right -// Here we need to transmit columns from the LHS to the RHS, -// so they can be used for the result of this expression that is using data from both sides. -// All fields will be used for these -type JoinColumn struct { - Original *sqlparser.AliasedExpr // this is the original expression being passed through - BvNames []string // the BvNames and LHSCols line up - LHSExprs []sqlparser.Expr - RHSExpr sqlparser.Expr - GroupBy bool // if this is true, we need to push this down to our inputs with addToGroupBy set to true -} + // JoinColumn is where we store information about columns passing through the join operator + // It can be in one of three possible configurations: + // - Pure left + // We are projecting a column that comes from the left. The RHSExpr will be nil for these + // - Pure right + // We are projecting a column that comes from the right. The LHSExprs will be empty for these + // - Mix of data from left and right + // Here we need to transmit columns from the LHS to the RHS, + // so they can be used for the result of this expression that is using data from both sides. + // All fields will be used for these + JoinColumn struct { + Original *sqlparser.AliasedExpr // this is the original expression being passed through + LHSExprs []BindVarExpr + RHSExpr sqlparser.Expr + GroupBy bool // if this is true, we need to push this down to our inputs with addToGroupBy set to true + } + + // BindVarExpr is an expression needed from one side of a join/subquery, and the argument name for it. + // TODO: Do we really need to store the name here? it could be found in the semantic state instead + BindVarExpr struct { + Name string + Expr sqlparser.Expr + } +) func NewApplyJoin(lhs, rhs ops.Operator, predicate sqlparser.Expr, leftOuterJoin bool) *ApplyJoin { return &ApplyJoin{ @@ -90,101 +97,100 @@ func NewApplyJoin(lhs, rhs ops.Operator, predicate sqlparser.Expr, leftOuterJoin } // Clone implements the Operator interface -func (a *ApplyJoin) Clone(inputs []ops.Operator) ops.Operator { - return &ApplyJoin{ - LHS: inputs[0], - RHS: inputs[1], - Columns: slices.Clone(a.Columns), - ColumnsAST: slices.Clone(a.ColumnsAST), - JoinPredicates: slices.Clone(a.JoinPredicates), - Vars: maps.Clone(a.Vars), - LeftJoin: a.LeftJoin, - Predicate: sqlparser.CloneExpr(a.Predicate), - LHSColumns: slices.Clone(a.LHSColumns), - } +func (aj *ApplyJoin) Clone(inputs []ops.Operator) ops.Operator { + kopy := *aj + kopy.LHS = inputs[0] + kopy.RHS = inputs[1] + kopy.Columns = slices.Clone(aj.Columns) + kopy.JoinColumns = slices.Clone(aj.JoinColumns) + kopy.JoinPredicates = slices.Clone(aj.JoinPredicates) + kopy.Vars = maps.Clone(aj.Vars) + kopy.Predicate = sqlparser.CloneExpr(aj.Predicate) + kopy.ExtraLHSVars = slices.Clone(aj.ExtraLHSVars) + return &kopy } -func (a *ApplyJoin) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (ops.Operator, error) { - return AddPredicate(ctx, a, expr, false, newFilter) +func (aj *ApplyJoin) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (ops.Operator, error) { + return AddPredicate(ctx, aj, expr, false, newFilter) } // Inputs implements the Operator interface -func (a *ApplyJoin) Inputs() []ops.Operator { - return []ops.Operator{a.LHS, a.RHS} +func (aj *ApplyJoin) Inputs() []ops.Operator { + return []ops.Operator{aj.LHS, aj.RHS} } // SetInputs implements the Operator interface -func (a *ApplyJoin) SetInputs(inputs []ops.Operator) { - a.LHS, a.RHS = inputs[0], inputs[1] +func (aj *ApplyJoin) SetInputs(inputs []ops.Operator) { + aj.LHS, aj.RHS = inputs[0], inputs[1] } -var _ JoinOp = (*ApplyJoin)(nil) - -func (a *ApplyJoin) GetLHS() ops.Operator { - return a.LHS +func (aj *ApplyJoin) GetLHS() ops.Operator { + return aj.LHS } -func (a *ApplyJoin) GetRHS() ops.Operator { - return a.RHS +func (aj *ApplyJoin) GetRHS() ops.Operator { + return aj.RHS } -func (a *ApplyJoin) SetLHS(operator ops.Operator) { - a.LHS = operator +func (aj *ApplyJoin) SetLHS(operator ops.Operator) { + aj.LHS = operator } -func (a *ApplyJoin) SetRHS(operator ops.Operator) { - a.RHS = operator +func (aj *ApplyJoin) SetRHS(operator ops.Operator) { + aj.RHS = operator } -func (a *ApplyJoin) MakeInner() { - a.LeftJoin = false +func (aj *ApplyJoin) MakeInner() { + aj.LeftJoin = false } -func (a *ApplyJoin) IsInner() bool { - return !a.LeftJoin +func (aj *ApplyJoin) IsInner() bool { + return !aj.LeftJoin } -func (a *ApplyJoin) AddJoinPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) error { - a.Predicate = ctx.SemTable.AndExpressions(expr, a.Predicate) +func (aj *ApplyJoin) AddJoinPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) error { + aj.Predicate = ctx.SemTable.AndExpressions(expr, aj.Predicate) - col, err := BreakExpressionInLHSandRHS(ctx, expr, TableID(a.LHS)) + col, err := BreakExpressionInLHSandRHS(ctx, expr, TableID(aj.LHS)) if err != nil { return err } - a.JoinPredicates = append(a.JoinPredicates, col) - rhs, err := a.RHS.AddPredicate(ctx, col.RHSExpr) + aj.JoinPredicates = append(aj.JoinPredicates, col) + rhs, err := aj.RHS.AddPredicate(ctx, col.RHSExpr) if err != nil { return err } - a.RHS = rhs + aj.RHS = rhs return nil } -func (a *ApplyJoin) pushColLeft(ctx *plancontext.PlanningContext, e *sqlparser.AliasedExpr, addToGroupBy bool) (int, error) { - newLHS, offset, err := a.LHS.AddColumn(ctx, e, true, addToGroupBy) +func (aj *ApplyJoin) pushColLeft(ctx *plancontext.PlanningContext, e *sqlparser.AliasedExpr, addToGroupBy bool) (int, error) { + offset, err := aj.LHS.AddColumn(ctx, true, addToGroupBy, e) if err != nil { return 0, err } - a.LHS = newLHS return offset, nil } -func (a *ApplyJoin) pushColRight(ctx *plancontext.PlanningContext, e *sqlparser.AliasedExpr, addToGroupBy bool) (int, error) { - newRHS, offset, err := a.RHS.AddColumn(ctx, e, true, addToGroupBy) +func (aj *ApplyJoin) pushColRight(ctx *plancontext.PlanningContext, e *sqlparser.AliasedExpr, addToGroupBy bool) (int, error) { + offset, err := aj.RHS.AddColumn(ctx, true, addToGroupBy, e) if err != nil { return 0, err } - a.RHS = newRHS return offset, nil } -func (a *ApplyJoin) GetColumns() ([]*sqlparser.AliasedExpr, error) { - return slices2.Map(a.ColumnsAST, joinColumnToAliasedExpr), nil +func (aj *ApplyJoin) GetColumns(*plancontext.PlanningContext) ([]*sqlparser.AliasedExpr, error) { + return slice.Map(aj.JoinColumns, joinColumnToAliasedExpr), nil +} + +func (aj *ApplyJoin) GetSelectExprs(ctx *plancontext.PlanningContext) (sqlparser.SelectExprs, error) { + return transformColumnsToSelectExprs(ctx, aj) } -func (a *ApplyJoin) GetOrdering() ([]ops.OrderBy, error) { - return a.LHS.GetOrdering() +func (aj *ApplyJoin) GetOrdering() ([]ops.OrderBy, error) { + return aj.LHS.GetOrdering() } func joinColumnToAliasedExpr(c JoinColumn) *sqlparser.AliasedExpr { @@ -195,24 +201,23 @@ func joinColumnToExpr(column JoinColumn) sqlparser.Expr { return column.Original.Expr } -func (a *ApplyJoin) getJoinColumnFor(ctx *plancontext.PlanningContext, e *sqlparser.AliasedExpr, addToGroupBy bool) (col JoinColumn, err error) { +func (aj *ApplyJoin) getJoinColumnFor(ctx *plancontext.PlanningContext, orig *sqlparser.AliasedExpr, e sqlparser.Expr, addToGroupBy bool) (col JoinColumn, err error) { defer func() { - col.Original = e + col.Original = orig }() - lhs := TableID(a.LHS) - rhs := TableID(a.RHS) + lhs := TableID(aj.LHS) + rhs := TableID(aj.RHS) both := lhs.Merge(rhs) - expr := e.Expr - deps := ctx.SemTable.RecursiveDeps(expr) + deps := ctx.SemTable.RecursiveDeps(e) col.GroupBy = addToGroupBy switch { case deps.IsSolvedBy(lhs): - col.LHSExprs = []sqlparser.Expr{expr} + col.LHSExprs = []BindVarExpr{{Expr: e}} case deps.IsSolvedBy(rhs): - col.RHSExpr = expr + col.RHSExpr = e case deps.IsSolvedBy(both): - col, err = BreakExpressionInLHSandRHS(ctx, expr, TableID(a.LHS)) + col, err = BreakExpressionInLHSandRHS(ctx, e, TableID(aj.LHS)) if err != nil { return JoinColumn{}, err } @@ -223,91 +228,183 @@ func (a *ApplyJoin) getJoinColumnFor(ctx *plancontext.PlanningContext, e *sqlpar return } -func (a *ApplyJoin) AddColumn(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, _, addToGroupBy bool) (ops.Operator, int, error) { - if offset, found := canReuseColumn(ctx, a.ColumnsAST, expr.Expr, joinColumnToExpr); found { - return a, offset, nil +func (aj *ApplyJoin) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, _ bool) (int, error) { + offset, found := canReuseColumn(ctx, aj.JoinColumns, expr, joinColumnToExpr) + if !found { + return -1, nil } - col, err := a.getJoinColumnFor(ctx, expr, addToGroupBy) + return offset, nil +} + +func (aj *ApplyJoin) AddColumn( + ctx *plancontext.PlanningContext, + reuse bool, + groupBy bool, + expr *sqlparser.AliasedExpr, +) (int, error) { + if reuse { + offset, err := aj.FindCol(ctx, expr.Expr, false) + if err != nil { + return 0, err + } + if offset != -1 { + return offset, nil + } + } + col, err := aj.getJoinColumnFor(ctx, expr, expr.Expr, groupBy) if err != nil { - return nil, 0, err + return 0, err } - a.ColumnsAST = append(a.ColumnsAST, col) - return a, len(a.ColumnsAST) - 1, nil + offset := len(aj.JoinColumns) + aj.JoinColumns = append(aj.JoinColumns, col) + return offset, nil } -func (a *ApplyJoin) planOffsets(ctx *plancontext.PlanningContext) (err error) { - for _, col := range a.ColumnsAST { +func (aj *ApplyJoin) planOffsets(ctx *plancontext.PlanningContext) (err error) { + for _, col := range aj.JoinColumns { // Read the type description for JoinColumn to understand the following code - for i, lhsExpr := range col.LHSExprs { - offset, err := a.pushColLeft(ctx, aeWrap(lhsExpr), col.GroupBy) + for _, lhsExpr := range col.LHSExprs { + offset, err := aj.pushColLeft(ctx, aeWrap(lhsExpr.Expr), col.GroupBy) if err != nil { return err } if col.RHSExpr == nil { // if we don't have an RHS expr, it means that this is a pure LHS expression - a.addOffset(-offset - 1) + aj.addOffset(-offset - 1) } else { - a.Vars[col.BvNames[i]] = offset + aj.Vars[lhsExpr.Name] = offset } } if col.RHSExpr != nil { - offset, err := a.pushColRight(ctx, aeWrap(col.RHSExpr), col.GroupBy) + offset, err := aj.pushColRight(ctx, aeWrap(col.RHSExpr), col.GroupBy) if err != nil { return err } - a.addOffset(offset + 1) + aj.addOffset(offset + 1) } } - for _, col := range a.JoinPredicates { - for i, lhsExpr := range col.LHSExprs { - offset, err := a.pushColLeft(ctx, aeWrap(lhsExpr), false) + for _, col := range aj.JoinPredicates { + for _, lhsExpr := range col.LHSExprs { + offset, err := aj.pushColLeft(ctx, aeWrap(lhsExpr.Expr), false) if err != nil { return err } - a.Vars[col.BvNames[i]] = offset + aj.Vars[lhsExpr.Name] = offset } - lhsColumns := slices2.Map(col.LHSExprs, func(from sqlparser.Expr) *sqlparser.ColName { - col, ok := from.(*sqlparser.ColName) - if !ok { - // todo: there is no good reason to keep this limitation around - err = vterrors.VT13001("joins can only compare columns: %s", sqlparser.String(from)) - } - return col - }) if err != nil { return err } - a.LHSColumns = append(a.LHSColumns, lhsColumns...) + } + + for _, lhsExpr := range aj.ExtraLHSVars { + offset, err := aj.pushColLeft(ctx, aeWrap(lhsExpr.Expr), false) + if err != nil { + return err + } + aj.Vars[lhsExpr.Name] = offset } return nil } -func (a *ApplyJoin) addOffset(offset int) { - a.Columns = append(a.Columns, offset) +func (aj *ApplyJoin) addOffset(offset int) { + aj.Columns = append(aj.Columns, offset) +} + +func (aj *ApplyJoin) ShortDescription() string { + pred := sqlparser.String(aj.Predicate) + columns := slice.Map(aj.JoinColumns, func(from JoinColumn) string { + return sqlparser.String(from.Original) + }) + firstPart := fmt.Sprintf("on %s columns: %s", pred, strings.Join(columns, ", ")) + if len(aj.ExtraLHSVars) == 0 { + return firstPart + } + extraCols := slice.Map(aj.ExtraLHSVars, func(s BindVarExpr) string { return s.String() }) + + return firstPart + " extra: " + strings.Join(extraCols, ", ") } -func (a *ApplyJoin) Description() ops.OpDescription { - other := map[string]any{} - if len(a.Columns) > 0 { - other["OutputColumns"] = a.Columns +func (aj *ApplyJoin) isColNameMovedFromL2R(bindVarName string) bool { + for _, jc := range aj.JoinColumns { + for _, bve := range jc.LHSExprs { + if bve.Name == bindVarName { + return true + } + } } - if a.Predicate != nil { - other["Predicate"] = sqlparser.String(a.Predicate) + for _, jp := range aj.JoinPredicates { + for _, bve := range jp.LHSExprs { + if bve.Name == bindVarName { + return true + } + } } - return ops.OpDescription{ - OperatorType: "Join", - Variant: "Apply", - Other: other, + for _, bve := range aj.ExtraLHSVars { + if bve.Name == bindVarName { + return true + } } + return false } -func (a *ApplyJoin) ShortDescription() string { - pred := sqlparser.String(a.Predicate) - columns := slices2.Map(a.ColumnsAST, func(from JoinColumn) string { - return sqlparser.String(from.Original) +// findOrAddColNameBindVarName goes through the JoinColumns and looks for the given colName coming from the LHS of the join +// and returns the argument name if found. if it's not found, a new JoinColumn passing this through will be added +func (aj *ApplyJoin) findOrAddColNameBindVarName(ctx *plancontext.PlanningContext, col *sqlparser.ColName) (string, error) { + for i, thisCol := range aj.JoinColumns { + idx := slices.IndexFunc(thisCol.LHSExprs, func(e BindVarExpr) bool { + return ctx.SemTable.EqualsExpr(e.Expr, col) + }) + + if idx != -1 { + if len(thisCol.LHSExprs) == 1 && thisCol.RHSExpr == nil { + // this is a ColName that was not being sent to the RHS, so it has no bindvar name. + // let's add one. + expr := thisCol.LHSExprs[idx] + bvname := ctx.GetReservedArgumentFor(expr.Expr) + expr.Name = bvname + aj.JoinColumns[i].LHSExprs[idx] = expr + } + return thisCol.LHSExprs[idx].Name, nil + } + } + for _, thisCol := range aj.JoinPredicates { + idx := slices.IndexFunc(thisCol.LHSExprs, func(e BindVarExpr) bool { + return ctx.SemTable.EqualsExpr(e.Expr, col) + }) + if idx != -1 { + return thisCol.LHSExprs[idx].Name, nil + } + } + + idx := slices.IndexFunc(aj.ExtraLHSVars, func(e BindVarExpr) bool { + return ctx.SemTable.EqualsExpr(e.Expr, col) + }) + if idx != -1 { + return aj.ExtraLHSVars[idx].Name, nil + } + + // we didn't find it, so we need to add it + bvName := ctx.GetReservedArgumentFor(col) + aj.ExtraLHSVars = append(aj.ExtraLHSVars, BindVarExpr{ + Name: bvName, + Expr: col, }) - return fmt.Sprintf("on %s columns: %s", pred, strings.Join(columns, ", ")) + return bvName, nil +} + +func (a *ApplyJoin) LHSColumnsNeeded(ctx *plancontext.PlanningContext) (needed sqlparser.Exprs) { + f := func(from BindVarExpr) sqlparser.Expr { + return from.Expr + } + for _, jc := range a.JoinColumns { + needed = append(needed, slice.Map(jc.LHSExprs, f)...) + } + for _, jc := range a.JoinPredicates { + needed = append(needed, slice.Map(jc.LHSExprs, f)...) + } + needed = append(needed, slice.Map(a.ExtraLHSVars, f)...) + return ctx.SemTable.Uniquify(needed) } func (jc JoinColumn) IsPureLeft() bool { @@ -321,3 +418,11 @@ func (jc JoinColumn) IsPureRight() bool { func (jc JoinColumn) IsMixedLeftAndRight() bool { return len(jc.LHSExprs) > 0 && jc.RHSExpr != nil } + +func (bve BindVarExpr) String() string { + if bve.Name == "" { + return sqlparser.String(bve.Expr) + } + + return fmt.Sprintf(":%s|`%s`", bve.Name, sqlparser.String(bve.Expr)) +} diff --git a/go/vt/vtgate/planbuilder/operators/ast2op_test.go b/go/vt/vtgate/planbuilder/operators/ast2op_test.go new file mode 100644 index 00000000000..4dbcf49e80a --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/ast2op_test.go @@ -0,0 +1,205 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operators + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +// Test_fkNeedsHandlingForUpdates tests the functionality of the function fkNeedsHandlingForUpdates. +// It verifies the different cases in which foreign key handling is required on vtgate level. +func Test_fkNeedsHandlingForUpdates(t *testing.T) { + t1 := &vindexes.Table{ + Name: sqlparser.NewIdentifierCS("t1"), + Keyspace: &vindexes.Keyspace{Name: "ks"}, + } + t2 := &vindexes.Table{ + Name: sqlparser.NewIdentifierCS("t2"), + Keyspace: &vindexes.Keyspace{Name: "ks2"}, + } + t3 := &vindexes.Table{ + Name: sqlparser.NewIdentifierCS("t3"), + Keyspace: &vindexes.Keyspace{Name: "ks"}, + } + + tests := []struct { + name string + verifyAllFks bool + parentFkToIgnore string + updateExprs sqlparser.UpdateExprs + parentFks []vindexes.ParentFKInfo + childFks []vindexes.ChildFKInfo + parentFKsWanted []bool + childFKsWanted []bool + }{{ + name: "No Fks filtered", + updateExprs: sqlparser.UpdateExprs{ + &sqlparser.UpdateExpr{Name: sqlparser.NewColName("a"), Expr: sqlparser.NewIntLiteral("1")}, + }, + childFks: []vindexes.ChildFKInfo{ + {Table: t2, ParentColumns: sqlparser.MakeColumns("a", "b", "c")}, + }, + parentFks: []vindexes.ParentFKInfo{ + {Table: t2, ChildColumns: sqlparser.MakeColumns("a", "b", "c")}, + }, + parentFKsWanted: []bool{true}, + childFKsWanted: []bool{true}, + }, { + name: "Child Fks filtering", + updateExprs: sqlparser.UpdateExprs{ + &sqlparser.UpdateExpr{Name: sqlparser.NewColName("a"), Expr: sqlparser.NewIntLiteral("1")}, + }, + childFks: []vindexes.ChildFKInfo{ + {Table: t2, ParentColumns: sqlparser.MakeColumns("b", "a", "c")}, + {Table: t2, ParentColumns: sqlparser.MakeColumns("d", "c")}, + }, + parentFks: []vindexes.ParentFKInfo{ + {Table: t2, ChildColumns: sqlparser.MakeColumns("a", "b", "c")}, + }, + parentFKsWanted: []bool{true}, + childFKsWanted: []bool{true, false}, + }, { + name: "Parent Fks filtered based on columns", + updateExprs: sqlparser.UpdateExprs{ + &sqlparser.UpdateExpr{Name: sqlparser.NewColName("a"), Expr: sqlparser.NewIntLiteral("1")}, + }, + childFks: []vindexes.ChildFKInfo{ + {Table: t2, ParentColumns: sqlparser.MakeColumns("a", "b", "c")}, + }, + parentFks: []vindexes.ParentFKInfo{ + {Table: t2, ChildColumns: sqlparser.MakeColumns("b", "a", "c")}, + {Table: t2, ChildColumns: sqlparser.MakeColumns("d", "b")}, + }, + parentFKsWanted: []bool{true, false}, + childFKsWanted: []bool{true}, + }, { + name: "Parent Fks filtered because all null values", + updateExprs: sqlparser.UpdateExprs{ + &sqlparser.UpdateExpr{Name: sqlparser.NewColName("a"), Expr: &sqlparser.NullVal{}}, + }, + childFks: []vindexes.ChildFKInfo{ + {Table: t2, ParentColumns: sqlparser.MakeColumns("a", "b", "c")}, + }, + parentFks: []vindexes.ParentFKInfo{ + {Table: t2, ChildColumns: sqlparser.MakeColumns("b", "a", "c")}, + {Table: t2, ChildColumns: sqlparser.MakeColumns("a", "b")}, + }, + parentFKsWanted: []bool{false, false}, + childFKsWanted: []bool{true}, + }, { + name: "Parent Fks filtered because some column has null values", + updateExprs: sqlparser.UpdateExprs{ + &sqlparser.UpdateExpr{Name: sqlparser.NewColName("a"), Expr: sqlparser.NewIntLiteral("1")}, + &sqlparser.UpdateExpr{Name: sqlparser.NewColName("c"), Expr: &sqlparser.NullVal{}}, + }, + childFks: []vindexes.ChildFKInfo{ + {Table: t2, ParentColumns: sqlparser.MakeColumns("a", "b", "c")}, + }, + parentFks: []vindexes.ParentFKInfo{ + {Table: t2, ChildColumns: sqlparser.MakeColumns("b", "a", "c")}, + {Table: t2, ChildColumns: sqlparser.MakeColumns("a", "b")}, + {Table: t3, ChildColumns: sqlparser.MakeColumns("a", "b")}, + }, + parentFKsWanted: []bool{false, true, false}, + childFKsWanted: []bool{true}, + }, { + name: "Unsharded fk with verifyAllFk", + verifyAllFks: true, + updateExprs: sqlparser.UpdateExprs{ + &sqlparser.UpdateExpr{Name: sqlparser.NewColName("a"), Expr: sqlparser.NewIntLiteral("1")}, + &sqlparser.UpdateExpr{Name: sqlparser.NewColName("c"), Expr: &sqlparser.NullVal{}}, + }, + childFks: []vindexes.ChildFKInfo{ + {Table: t2, ParentColumns: sqlparser.MakeColumns("a", "b", "c")}, + }, + parentFks: []vindexes.ParentFKInfo{ + {Table: t2, ChildColumns: sqlparser.MakeColumns("b", "a", "c")}, + {Table: t2, ChildColumns: sqlparser.MakeColumns("a", "b")}, + {Table: t3, ChildColumns: sqlparser.MakeColumns("a", "b")}, + {Table: t3, ChildColumns: sqlparser.MakeColumns("a", "b", "c")}, + }, + parentFKsWanted: []bool{false, true, true, false}, + childFKsWanted: []bool{true}, + }, { + name: "Mixed case", + verifyAllFks: true, + parentFkToIgnore: "ks.t1abks.t3", + updateExprs: sqlparser.UpdateExprs{ + &sqlparser.UpdateExpr{Name: sqlparser.NewColName("a"), Expr: sqlparser.NewIntLiteral("1")}, + &sqlparser.UpdateExpr{Name: sqlparser.NewColName("c"), Expr: &sqlparser.NullVal{}}, + }, + childFks: []vindexes.ChildFKInfo{ + {Table: t2, ParentColumns: sqlparser.MakeColumns("a", "b", "c")}, + }, + parentFks: []vindexes.ParentFKInfo{ + {Table: t2, ChildColumns: sqlparser.MakeColumns("b", "a", "c")}, + {Table: t2, ChildColumns: sqlparser.MakeColumns("a", "b")}, + {Table: t3, ChildColumns: sqlparser.MakeColumns("a", "b")}, + {Table: t3, ChildColumns: sqlparser.MakeColumns("a", "b", "c")}, + }, + parentFKsWanted: []bool{false, true, false, false}, + childFKsWanted: []bool{true}, + }, { + name: "Ignore Fk specified", + parentFkToIgnore: "ks.t1aefks2.t2", + updateExprs: sqlparser.UpdateExprs{ + &sqlparser.UpdateExpr{Name: sqlparser.NewColName("a"), Expr: sqlparser.NewIntLiteral("1")}, + &sqlparser.UpdateExpr{Name: sqlparser.NewColName("c"), Expr: &sqlparser.NullVal{}}, + }, + childFks: []vindexes.ChildFKInfo{ + {Table: t2, ParentColumns: sqlparser.MakeColumns("a", "b", "c")}, + }, + parentFks: []vindexes.ParentFKInfo{ + {Table: t2, ChildColumns: sqlparser.MakeColumns("b", "a", "c")}, + {Table: t2, ChildColumns: sqlparser.MakeColumns("a", "b")}, + {Table: t2, ChildColumns: sqlparser.MakeColumns("a", "e", "f")}, + }, + parentFKsWanted: []bool{false, true, false}, + childFKsWanted: []bool{true}, + }} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t1.ParentForeignKeys = tt.parentFks + t1.ChildForeignKeys = tt.childFks + ctx := &plancontext.PlanningContext{ + VerifyAllFKs: tt.verifyAllFks, + ParentFKToIgnore: tt.parentFkToIgnore, + } + parentFksGot, childFksGot := getFKRequirementsForUpdate(ctx, tt.updateExprs, t1) + var pFks []vindexes.ParentFKInfo + for idx, expected := range tt.parentFKsWanted { + if expected { + pFks = append(pFks, tt.parentFks[idx]) + } + } + var cFks []vindexes.ChildFKInfo + for idx, expected := range tt.childFKsWanted { + if expected { + cFks = append(cFks, tt.childFks[idx]) + } + } + require.EqualValues(t, pFks, parentFksGot) + require.EqualValues(t, cFks, childFksGot) + }) + } +} diff --git a/go/vt/vtgate/planbuilder/operators/ast_to_op.go b/go/vt/vtgate/planbuilder/operators/ast_to_op.go new file mode 100644 index 00000000000..e7628edacc5 --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/ast_to_op.go @@ -0,0 +1,401 @@ +/* +Copyright 2022 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operators + +import ( + "fmt" + + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +const foreignKeyConstraintValues = "fkc_vals" + +// translateQueryToOp creates an operator tree that represents the input SELECT or UNION query +func translateQueryToOp(ctx *plancontext.PlanningContext, selStmt sqlparser.Statement) (op ops.Operator, err error) { + switch node := selStmt.(type) { + case *sqlparser.Select: + op, err = createOperatorFromSelect(ctx, node) + case *sqlparser.Union: + op, err = createOperatorFromUnion(ctx, node) + case *sqlparser.Update: + op, err = createOperatorFromUpdate(ctx, node) + case *sqlparser.Delete: + op, err = createOperatorFromDelete(ctx, node) + case *sqlparser.Insert: + op, err = createOperatorFromInsert(ctx, node) + default: + err = vterrors.VT12001(fmt.Sprintf("operator: %T", selStmt)) + } + if err != nil { + return nil, err + } + + return op, nil +} + +func createOperatorFromSelect(ctx *plancontext.PlanningContext, sel *sqlparser.Select) (ops.Operator, error) { + op, err := crossJoin(ctx, sel.From) + if err != nil { + return nil, err + } + + if sel.Where != nil { + op, err = addWherePredicates(ctx, sel.Where.Expr, op) + if err != nil { + return nil, err + } + } + + if sel.Comments != nil || sel.Lock != sqlparser.NoLock { + op = &LockAndComment{ + Source: op, + Comments: sel.Comments, + Lock: sel.Lock, + } + } + + op = newHorizon(op, sel) + + return op, nil +} + +func addWherePredicates(ctx *plancontext.PlanningContext, expr sqlparser.Expr, op ops.Operator) (ops.Operator, error) { + sqc := &SubQueryBuilder{} + outerID := TableID(op) + exprs := sqlparser.SplitAndExpression(nil, expr) + for _, expr := range exprs { + sqlparser.RemoveKeyspaceFromColName(expr) + subq, err := sqc.handleSubquery(ctx, expr, outerID) + if err != nil { + return nil, err + } + if subq != nil { + continue + } + op, err = op.AddPredicate(ctx, expr) + if err != nil { + return nil, err + } + addColumnEquality(ctx, expr) + } + return sqc.getRootOperator(op), nil +} + +// cloneASTAndSemState clones the AST and the semantic state of the input node. +func cloneASTAndSemState[T sqlparser.SQLNode](ctx *plancontext.PlanningContext, original T) T { + return sqlparser.CopyOnRewrite(original, nil, func(cursor *sqlparser.CopyOnWriteCursor) { + e, ok := cursor.Node().(sqlparser.Expr) + if !ok { + return + } + cursor.Replace(e) // We do this only to trigger the cloning of the AST + }, ctx.SemTable.CopySemanticInfo).(T) +} + +// findTablesContained returns the TableSet of all the contained +func findTablesContained(ctx *plancontext.PlanningContext, node sqlparser.SQLNode) (result semantics.TableSet) { + _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + t, ok := node.(*sqlparser.AliasedTableExpr) + if !ok { + return true, nil + } + ts := ctx.SemTable.TableSetFor(t) + result = result.Merge(ts) + return true, nil + }, node) + return +} + +func rewriteRemainingColumns( + ctx *plancontext.PlanningContext, + stmt sqlparser.SelectStatement, + subqID semantics.TableSet, +) sqlparser.SelectStatement { + return sqlparser.CopyOnRewrite(stmt, nil, func(cursor *sqlparser.CopyOnWriteCursor) { + colname, isColname := cursor.Node().(*sqlparser.ColName) + if !isColname { + return + } + deps := ctx.SemTable.RecursiveDeps(colname) + if deps.IsSolvedBy(subqID) { + return + } + rsv := ctx.GetReservedArgumentFor(colname) + cursor.Replace(sqlparser.NewArgument(rsv)) + }, nil).(sqlparser.SelectStatement) +} + +// joinPredicateCollector is used to inspect the predicates inside the subquery, looking for any +// comparisons between the inner and the outer side. +// They can be used for merging the two parts of the query together +type joinPredicateCollector struct { + predicates sqlparser.Exprs + remainingPredicates sqlparser.Exprs + joinColumns []JoinColumn + + totalID, + subqID, + outerID semantics.TableSet +} + +func (jpc *joinPredicateCollector) inspectPredicate( + ctx *plancontext.PlanningContext, + predicate sqlparser.Expr, +) error { + pred := predicate + deps := ctx.SemTable.RecursiveDeps(predicate) + // if the subquery is not enough, but together we have all we need, + // then we can use this predicate to connect the subquery to the outer query + if !deps.IsSolvedBy(jpc.subqID) && deps.IsSolvedBy(jpc.totalID) { + jpc.predicates = append(jpc.predicates, predicate) + jc, err := BreakExpressionInLHSandRHS(ctx, predicate, jpc.outerID) + if err != nil { + return err + } + jpc.joinColumns = append(jpc.joinColumns, jc) + pred = jc.RHSExpr + } + jpc.remainingPredicates = append(jpc.remainingPredicates, pred) + return nil +} + +func createOperatorFromUnion(ctx *plancontext.PlanningContext, node *sqlparser.Union) (ops.Operator, error) { + opLHS, err := translateQueryToOp(ctx, node.Left) + if err != nil { + return nil, err + } + + _, isRHSUnion := node.Right.(*sqlparser.Union) + if isRHSUnion { + return nil, vterrors.VT12001("nesting of UNIONs on the right-hand side") + } + opRHS, err := translateQueryToOp(ctx, node.Right) + if err != nil { + return nil, err + } + + lexprs := ctx.SemTable.SelectExprs(node.Left) + rexprs := ctx.SemTable.SelectExprs(node.Right) + + unionCols := ctx.SemTable.SelectExprs(node) + union := newUnion([]ops.Operator{opLHS, opRHS}, []sqlparser.SelectExprs{lexprs, rexprs}, unionCols, node.Distinct) + return newHorizon(union, node), nil +} + +// createOpFromStmt creates an operator from the given statement. It takes in two additional arguments— +// 1. verifyAllFKs: For this given statement, do we need to verify validity of all the foreign keys on the vtgate level. +// 2. fkToIgnore: The foreign key constraint to specifically ignore while planning the statement. +func createOpFromStmt(ctx *plancontext.PlanningContext, stmt sqlparser.Statement, verifyAllFKs bool, fkToIgnore string) (ops.Operator, error) { + newCtx, err := plancontext.CreatePlanningContext(stmt, ctx.ReservedVars, ctx.VSchema, ctx.PlannerVersion) + if err != nil { + return nil, err + } + + newCtx.VerifyAllFKs = verifyAllFKs + newCtx.ParentFKToIgnore = fkToIgnore + + return PlanQuery(newCtx, stmt) +} + +func getOperatorFromTableExpr(ctx *plancontext.PlanningContext, tableExpr sqlparser.TableExpr, onlyTable bool) (ops.Operator, error) { + switch tableExpr := tableExpr.(type) { + case *sqlparser.AliasedTableExpr: + return getOperatorFromAliasedTableExpr(ctx, tableExpr, onlyTable) + case *sqlparser.JoinTableExpr: + return getOperatorFromJoinTableExpr(ctx, tableExpr) + case *sqlparser.ParenTableExpr: + return crossJoin(ctx, tableExpr.Exprs) + default: + return nil, vterrors.VT13001(fmt.Sprintf("unable to use: %T table type", tableExpr)) + } +} + +func getOperatorFromJoinTableExpr(ctx *plancontext.PlanningContext, tableExpr *sqlparser.JoinTableExpr) (ops.Operator, error) { + lhs, err := getOperatorFromTableExpr(ctx, tableExpr.LeftExpr, false) + if err != nil { + return nil, err + } + rhs, err := getOperatorFromTableExpr(ctx, tableExpr.RightExpr, false) + if err != nil { + return nil, err + } + + switch tableExpr.Join { + case sqlparser.NormalJoinType: + return createInnerJoin(ctx, tableExpr, lhs, rhs) + case sqlparser.LeftJoinType, sqlparser.RightJoinType: + return createOuterJoin(tableExpr, lhs, rhs) + default: + return nil, vterrors.VT13001("unsupported: %s", tableExpr.Join.ToString()) + } +} + +func getOperatorFromAliasedTableExpr(ctx *plancontext.PlanningContext, tableExpr *sqlparser.AliasedTableExpr, onlyTable bool) (ops.Operator, error) { + tableID := ctx.SemTable.TableSetFor(tableExpr) + switch tbl := tableExpr.Expr.(type) { + case sqlparser.TableName: + tableInfo, err := ctx.SemTable.TableInfoFor(tableID) + if err != nil { + return nil, err + } + + if vt, isVindex := tableInfo.(*semantics.VindexTable); isVindex { + solves := tableID + return &Vindex{ + Table: VindexTable{ + TableID: tableID, + Alias: tableExpr, + Table: tbl, + VTable: vt.Table.GetVindexTable(), + }, + Vindex: vt.Vindex, + Solved: solves, + }, nil + } + qg := newQueryGraph() + isInfSchema := tableInfo.IsInfSchema() + qt := &QueryTable{Alias: tableExpr, Table: tbl, ID: tableID, IsInfSchema: isInfSchema} + qg.Tables = append(qg.Tables, qt) + return qg, nil + case *sqlparser.DerivedTable: + if onlyTable && tbl.Select.GetLimit() == nil { + tbl.Select.SetOrderBy(nil) + } + + inner, err := translateQueryToOp(ctx, tbl.Select) + if err != nil { + return nil, err + } + if horizon, ok := inner.(*Horizon); ok { + horizon.TableId = &tableID + horizon.Alias = tableExpr.As.String() + horizon.ColumnAliases = tableExpr.Columns + qp, err := CreateQPFromSelectStatement(ctx, tbl.Select) + if err != nil { + return nil, err + } + horizon.QP = qp + } + + return inner, nil + default: + return nil, vterrors.VT13001(fmt.Sprintf("unable to use: %T", tbl)) + } +} + +func crossJoin(ctx *plancontext.PlanningContext, exprs sqlparser.TableExprs) (ops.Operator, error) { + var output ops.Operator + for _, tableExpr := range exprs { + op, err := getOperatorFromTableExpr(ctx, tableExpr, len(exprs) == 1) + if err != nil { + return nil, err + } + if output == nil { + output = op + } else { + output = createJoin(ctx, output, op) + } + } + return output, nil +} + +func createQueryTableForDML(ctx *plancontext.PlanningContext, tableExpr sqlparser.TableExpr, whereClause *sqlparser.Where) (semantics.TableInfo, *QueryTable, error) { + alTbl, ok := tableExpr.(*sqlparser.AliasedTableExpr) + if !ok { + return nil, nil, vterrors.VT13001("expected AliasedTableExpr") + } + tblName, ok := alTbl.Expr.(sqlparser.TableName) + if !ok { + return nil, nil, vterrors.VT13001("expected TableName") + } + + tableID := ctx.SemTable.TableSetFor(alTbl) + tableInfo, err := ctx.SemTable.TableInfoFor(tableID) + if err != nil { + return nil, nil, err + } + + if tableInfo.IsInfSchema() { + return nil, nil, vterrors.VT12001("update information schema tables") + } + + var predicates []sqlparser.Expr + if whereClause != nil { + predicates = sqlparser.SplitAndExpression(nil, whereClause.Expr) + } + qt := &QueryTable{ + ID: tableID, + Alias: alTbl, + Table: tblName, + Predicates: predicates, + } + return tableInfo, qt, nil +} + +func addColumnEquality(ctx *plancontext.PlanningContext, expr sqlparser.Expr) { + switch expr := expr.(type) { + case *sqlparser.ComparisonExpr: + if expr.Operator != sqlparser.EqualOp { + return + } + + if left, isCol := expr.Left.(*sqlparser.ColName); isCol { + ctx.SemTable.AddColumnEquality(left, expr.Right) + } + if right, isCol := expr.Right.(*sqlparser.ColName); isCol { + ctx.SemTable.AddColumnEquality(right, expr.Left) + } + } +} + +// createSelectionOp creates the selection operator to select the parent columns for the foreign key constraints. +// The Select statement looks something like this - `SELECT FROM WHERE ` +// TODO (@Harshit, @GuptaManan100): Compress the columns in the SELECT statement, if there are multiple foreign key constraints using the same columns. +func createSelectionOp( + ctx *plancontext.PlanningContext, + selectExprs []sqlparser.SelectExpr, + tableExprs sqlparser.TableExprs, + where *sqlparser.Where, + limit *sqlparser.Limit, + lock sqlparser.Lock, +) (ops.Operator, error) { + selectionStmt := &sqlparser.Select{ + SelectExprs: selectExprs, + From: tableExprs, + Where: where, + Limit: limit, + Lock: lock, + } + // There are no foreign keys to check for a select query, so we can pass anything for verifyAllFKs and fkToIgnore. + return createOpFromStmt(ctx, selectionStmt, false /* verifyAllFKs */, "" /* fkToIgnore */) +} + +func selectParentColumns(fk vindexes.ChildFKInfo, lastOffset int) ([]int, []sqlparser.SelectExpr) { + var cols []int + var exprs []sqlparser.SelectExpr + for _, column := range fk.ParentColumns { + cols = append(cols, lastOffset) + exprs = append(exprs, aeWrap(sqlparser.NewColName(column.String()))) + lastOffset++ + } + return cols, exprs +} diff --git a/go/vt/vtgate/planbuilder/operators/comments.go b/go/vt/vtgate/planbuilder/operators/comments.go new file mode 100644 index 00000000000..46f9e8c7462 --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/comments.go @@ -0,0 +1,85 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operators + +import ( + "slices" + "strings" + + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" +) + +// LockAndComment contains any comments or locking directives we want on all queries down from this operator +type LockAndComment struct { + Source ops.Operator + Comments *sqlparser.ParsedComments + Lock sqlparser.Lock +} + +func (l *LockAndComment) Clone(inputs []ops.Operator) ops.Operator { + klon := *l + klon.Source = inputs[0] + return &klon +} + +func (l *LockAndComment) Inputs() []ops.Operator { + return []ops.Operator{l.Source} +} + +func (l *LockAndComment) SetInputs(operators []ops.Operator) { + l.Source = operators[0] +} + +func (l *LockAndComment) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (ops.Operator, error) { + newSrc, err := l.Source.AddPredicate(ctx, expr) + if err != nil { + return nil, err + } + l.Source = newSrc + return l, nil +} + +func (l *LockAndComment) AddColumn(ctx *plancontext.PlanningContext, reuseExisting bool, addToGroupBy bool, expr *sqlparser.AliasedExpr) (int, error) { + return l.Source.AddColumn(ctx, reuseExisting, addToGroupBy, expr) +} + +func (l *LockAndComment) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) (int, error) { + return l.Source.FindCol(ctx, expr, underRoute) +} + +func (l *LockAndComment) GetColumns(ctx *plancontext.PlanningContext) ([]*sqlparser.AliasedExpr, error) { + return l.Source.GetColumns(ctx) +} + +func (l *LockAndComment) GetSelectExprs(ctx *plancontext.PlanningContext) (sqlparser.SelectExprs, error) { + return l.Source.GetSelectExprs(ctx) +} + +func (l *LockAndComment) ShortDescription() string { + s := slices.Clone(l.Comments.GetComments()) + if l.Lock != sqlparser.NoLock { + s = append(s, l.Lock.ToString()) + } + + return strings.Join(s, " ") +} + +func (l *LockAndComment) GetOrdering() ([]ops.OrderBy, error) { + return l.Source.GetOrdering() +} diff --git a/go/vt/vtgate/planbuilder/operators/correlated_subquery.go b/go/vt/vtgate/planbuilder/operators/correlated_subquery.go deleted file mode 100644 index 40a8497126a..00000000000 --- a/go/vt/vtgate/planbuilder/operators/correlated_subquery.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Copyright 2022 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package operators - -import ( - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" -) - -type ( - CorrelatedSubQueryOp struct { - Outer, Inner ops.Operator - Extracted *sqlparser.ExtractedSubquery - - // JoinCols are the columns from the LHS used for the join. - // These are the same columns pushed on the LHS that are now used in the Vars field - LHSColumns []*sqlparser.ColName - - // arguments that need to be copied from the outer to inner - Vars map[string]int - - noColumns - noPredicates - } - - SubQueryOp struct { - Outer, Inner ops.Operator - Extracted *sqlparser.ExtractedSubquery - - noColumns - noPredicates - } -) - -// Clone implements the Operator interface -func (s *SubQueryOp) Clone(inputs []ops.Operator) ops.Operator { - result := &SubQueryOp{ - Outer: inputs[0], - Inner: inputs[1], - Extracted: s.Extracted, - } - return result -} - -func (s *SubQueryOp) GetOrdering() ([]ops.OrderBy, error) { - return s.Outer.GetOrdering() -} - -// Inputs implements the Operator interface -func (s *SubQueryOp) Inputs() []ops.Operator { - return []ops.Operator{s.Outer, s.Inner} -} - -// SetInputs implements the Operator interface -func (s *SubQueryOp) SetInputs(ops []ops.Operator) { - s.Outer, s.Inner = ops[0], ops[1] -} - -func (s *SubQueryOp) Description() ops.OpDescription { - return ops.OpDescription{ - OperatorType: "SubQuery", - Variant: "Apply", - } -} - -func (s *SubQueryOp) ShortDescription() string { - return "" -} - -// Clone implements the Operator interface -func (c *CorrelatedSubQueryOp) Clone(inputs []ops.Operator) ops.Operator { - columns := make([]*sqlparser.ColName, len(c.LHSColumns)) - copy(columns, c.LHSColumns) - vars := make(map[string]int, len(c.Vars)) - for k, v := range c.Vars { - vars[k] = v - } - - result := &CorrelatedSubQueryOp{ - Outer: inputs[0], - Inner: inputs[1], - Extracted: c.Extracted, - LHSColumns: columns, - Vars: vars, - } - return result -} - -func (c *CorrelatedSubQueryOp) GetOrdering() ([]ops.OrderBy, error) { - return c.Outer.GetOrdering() -} - -// Inputs implements the Operator interface -func (c *CorrelatedSubQueryOp) Inputs() []ops.Operator { - return []ops.Operator{c.Outer, c.Inner} -} - -// SetInputs implements the Operator interface -func (c *CorrelatedSubQueryOp) SetInputs(ops []ops.Operator) { - c.Outer, c.Inner = ops[0], ops[1] -} - -func (c *CorrelatedSubQueryOp) Description() ops.OpDescription { - return ops.OpDescription{ - OperatorType: "SubQuery", - Variant: "Correlated", - } -} - -func (c *CorrelatedSubQueryOp) ShortDescription() string { - return "" -} diff --git a/go/vt/vtgate/planbuilder/operators/delete.go b/go/vt/vtgate/planbuilder/operators/delete.go index cd4f30f6d01..1c493c18300 100644 --- a/go/vt/vtgate/planbuilder/operators/delete.go +++ b/go/vt/vtgate/planbuilder/operators/delete.go @@ -19,8 +19,12 @@ package operators import ( "fmt" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" ) @@ -37,7 +41,7 @@ type Delete struct { } // Introduces implements the PhysicalOperator interface -func (d *Delete) Introduces() semantics.TableSet { +func (d *Delete) introducesTableID() semantics.TableSet { return d.QTable.ID } @@ -62,12 +66,199 @@ func (d *Delete) GetOrdering() ([]ops.OrderBy, error) { return nil, nil } -func (d *Delete) Description() ops.OpDescription { - return ops.OpDescription{ - OperatorType: "Delete", +func (d *Delete) ShortDescription() string { + return fmt.Sprintf("%s.%s %s", d.VTable.Keyspace.Name, d.VTable.Name.String(), sqlparser.String(d.AST.Where)) +} + +func (d *Delete) Statement() sqlparser.Statement { + return d.AST +} + +func createOperatorFromDelete(ctx *plancontext.PlanningContext, deleteStmt *sqlparser.Delete) (ops.Operator, error) { + tableInfo, qt, err := createQueryTableForDML(ctx, deleteStmt.TableExprs[0], deleteStmt.Where) + if err != nil { + return nil, err + } + + vindexTable, routing, err := buildVindexTableForDML(ctx, tableInfo, qt, "delete") + if err != nil { + return nil, err + } + + delClone := sqlparser.CloneRefOfDelete(deleteStmt) + // Create the delete operator first. + delOp, err := createDeleteOperator(ctx, deleteStmt, qt, vindexTable, routing) + if err != nil { + return nil, err + } + + // Now we check for the foreign key mode and make changes if required. + ksMode, err := ctx.VSchema.ForeignKeyMode(vindexTable.Keyspace.Name) + if err != nil { + return nil, err + } + + // Unmanaged foreign-key-mode, we don't need to do anything. + if ksMode != vschemapb.Keyspace_managed { + return delOp, nil + } + + childFks := vindexTable.ChildFKsNeedsHandling(ctx.VerifyAllFKs, vindexes.DeleteAction) + // If there are no foreign key constraints, then we don't need to do anything. + if len(childFks) == 0 { + return delOp, nil + } + // If the delete statement has a limit, we don't support it yet. + if deleteStmt.Limit != nil { + return nil, vterrors.VT12001("foreign keys management at vitess with limit") } + + return createFkCascadeOpForDelete(ctx, delOp, delClone, childFks) } -func (d *Delete) ShortDescription() string { - return fmt.Sprintf("%s.%s %s", d.VTable.Keyspace.Name, d.VTable.Name.String(), sqlparser.String(d.AST.Where)) +func createDeleteOperator( + ctx *plancontext.PlanningContext, + deleteStmt *sqlparser.Delete, + qt *QueryTable, + vindexTable *vindexes.Table, + routing Routing) (ops.Operator, error) { + del := &Delete{ + QTable: qt, + VTable: vindexTable, + AST: deleteStmt, + } + route := &Route{ + Source: del, + Routing: routing, + } + + if !vindexTable.Keyspace.Sharded { + return route, nil + } + + if vindexTable.Pinned != nil { + return route, nil + } + + primaryVindex, vindexAndPredicates, err := getVindexInformation(qt.ID, vindexTable) + if err != nil { + return nil, err + } + + tr, ok := routing.(*ShardedRouting) + if ok { + tr.VindexPreds = vindexAndPredicates + } + + var ovq string + if len(vindexTable.Owned) > 0 { + tblExpr := &sqlparser.AliasedTableExpr{Expr: sqlparser.TableName{Name: vindexTable.Name}, As: qt.Alias.As} + ovq = generateOwnedVindexQuery(tblExpr, deleteStmt, vindexTable, primaryVindex.Columns) + } + + del.OwnedVindexQuery = ovq + + sqc := &SubQueryBuilder{} + for _, predicate := range qt.Predicates { + if subq, err := sqc.handleSubquery(ctx, predicate, qt.ID); err != nil { + return nil, err + } else if subq != nil { + continue + } + routing, err = UpdateRoutingLogic(ctx, predicate, routing) + if err != nil { + return nil, err + } + } + + if routing.OpCode() == engine.Scatter && deleteStmt.Limit != nil { + // TODO systay: we should probably check for other op code types - IN could also hit multiple shards (2022-04-07) + return nil, vterrors.VT12001("multi shard DELETE with LIMIT") + } + + return sqc.getRootOperator(route), nil +} + +func createFkCascadeOpForDelete(ctx *plancontext.PlanningContext, parentOp ops.Operator, delStmt *sqlparser.Delete, childFks []vindexes.ChildFKInfo) (ops.Operator, error) { + var fkChildren []*FkChild + var selectExprs []sqlparser.SelectExpr + for _, fk := range childFks { + // Any RESTRICT type foreign keys that arrive here, + // are cross-shard/cross-keyspace RESTRICT cases, which we don't currently support. + if fk.OnDelete.IsRestrict() { + return nil, vterrors.VT12002() + } + + // We need to select all the parent columns for the foreign key constraint, to use in the update of the child table. + cols, exprs := selectParentColumns(fk, len(selectExprs)) + selectExprs = append(selectExprs, exprs...) + + fkChild, err := createFkChildForDelete(ctx, fk, cols) + if err != nil { + return nil, err + } + fkChildren = append(fkChildren, fkChild) + } + selectionOp, err := createSelectionOp(ctx, selectExprs, delStmt.TableExprs, delStmt.Where, nil, sqlparser.ForUpdateLock) + if err != nil { + return nil, err + } + + return &FkCascade{ + Selection: selectionOp, + Children: fkChildren, + Parent: parentOp, + }, nil +} + +func createFkChildForDelete(ctx *plancontext.PlanningContext, fk vindexes.ChildFKInfo, cols []int) (*FkChild, error) { + bvName := ctx.ReservedVars.ReserveVariable(foreignKeyConstraintValues) + + var childStmt sqlparser.Statement + switch fk.OnDelete { + case sqlparser.Cascade: + // We now construct the delete query for the child table. + // The query looks something like this - `DELETE FROM WHERE IN ()` + var valTuple sqlparser.ValTuple + for _, column := range fk.ChildColumns { + valTuple = append(valTuple, sqlparser.NewColName(column.String())) + } + compExpr := sqlparser.NewComparisonExpr(sqlparser.InOp, valTuple, sqlparser.NewListArg(bvName), nil) + childStmt = &sqlparser.Delete{ + TableExprs: []sqlparser.TableExpr{sqlparser.NewAliasedTableExpr(fk.Table.GetTableName(), "")}, + Where: &sqlparser.Where{Type: sqlparser.WhereClause, Expr: compExpr}, + } + case sqlparser.SetNull: + // We now construct the update query for the child table. + // The query looks something like this - `UPDATE SET = NULL [AND = NULL]... WHERE IN ()` + var valTuple sqlparser.ValTuple + var updExprs sqlparser.UpdateExprs + for _, column := range fk.ChildColumns { + valTuple = append(valTuple, sqlparser.NewColName(column.String())) + updExprs = append(updExprs, &sqlparser.UpdateExpr{ + Name: sqlparser.NewColName(column.String()), + Expr: &sqlparser.NullVal{}, + }) + } + compExpr := sqlparser.NewComparisonExpr(sqlparser.InOp, valTuple, sqlparser.NewListArg(bvName), nil) + childStmt = &sqlparser.Update{ + Exprs: updExprs, + TableExprs: []sqlparser.TableExpr{sqlparser.NewAliasedTableExpr(fk.Table.GetTableName(), "")}, + Where: &sqlparser.Where{Type: sqlparser.WhereClause, Expr: compExpr}, + } + case sqlparser.SetDefault: + return nil, vterrors.VT09016() + } + + // For the child statement of a DELETE query, we don't need to verify all the FKs on VTgate or ignore any foreign key explicitly. + childOp, err := createOpFromStmt(ctx, childStmt, false /* verifyAllFKs */, "" /* fkToIgnore */) + if err != nil { + return nil, err + } + + return &FkChild{ + BVName: bvName, + Cols: cols, + Op: childOp, + }, nil } diff --git a/go/vt/vtgate/planbuilder/operators/derived.go b/go/vt/vtgate/planbuilder/operators/derived.go deleted file mode 100644 index fd1d57cc63a..00000000000 --- a/go/vt/vtgate/planbuilder/operators/derived.go +++ /dev/null @@ -1,268 +0,0 @@ -/* -Copyright 2022 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package operators - -import ( - "io" - - "golang.org/x/exp/slices" - - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/vt/vtgate/semantics" -) - -type Derived struct { - Source ops.Operator - TableId semantics.TableSet - - // QP contains the QueryProjection for this op - QP *QueryProjection - - Query sqlparser.SelectStatement - Alias string - ColumnAliases sqlparser.Columns - - // Columns needed to feed other plans - Columns []*sqlparser.ColName - ColumnsOffset []int -} - -// Clone implements the Operator interface -func (d *Derived) Clone(inputs []ops.Operator) ops.Operator { - return &Derived{ - Source: inputs[0], - Query: d.Query, - Alias: d.Alias, - ColumnAliases: sqlparser.CloneColumns(d.ColumnAliases), - Columns: slices.Clone(d.Columns), - ColumnsOffset: slices.Clone(d.ColumnsOffset), - TableId: d.TableId, - } -} - -// findOutputColumn returns the index on which the given name is found in the slice of -// *sqlparser.SelectExprs of the derivedTree. The *sqlparser.SelectExpr must be of type -// *sqlparser.AliasedExpr and match the given name. -// If name is not present but the query's select expressions contain a *sqlparser.StarExpr -// the function will return no error and an index equal to -1. -// If name is not present and the query does not have a *sqlparser.StarExpr, the function -// will return an unknown column error. -func (d *Derived) findOutputColumn(name *sqlparser.ColName) (int, error) { - hasStar := false - for j, exp := range sqlparser.GetFirstSelect(d.Query).SelectExprs { - switch exp := exp.(type) { - case *sqlparser.AliasedExpr: - if !exp.As.IsEmpty() && exp.As.Equal(name.Name) { - return j, nil - } - if exp.As.IsEmpty() { - col, ok := exp.Expr.(*sqlparser.ColName) - if !ok { - return 0, vterrors.VT12001("complex expression needs column alias: %s", sqlparser.String(exp)) - } - if name.Name.Equal(col.Name) { - return j, nil - } - } - case *sqlparser.StarExpr: - hasStar = true - } - } - - // we have found a star but no matching *sqlparser.AliasedExpr, thus we return -1 with no error. - if hasStar { - return -1, nil - } - return 0, vterrors.VT03014(name.Name.String(), "field list") -} - -// IsMergeable is not a great name for this function. Suggestions for a better one are welcome! -// This function will return false if the derived table inside it has to run on the vtgate side, and so can't be merged with subqueries -// This logic can also be used to check if this is a derived table that can be had on the left hand side of a vtgate join. -// Since vtgate joins are always nested loop joins, we can't execute them on the RHS -// if they do some things, like LIMIT or GROUP BY on wrong columns -func (d *Derived) IsMergeable(ctx *plancontext.PlanningContext) bool { - return isMergeable(ctx, d.Query, d) -} - -// Inputs implements the Operator interface -func (d *Derived) Inputs() []ops.Operator { - return []ops.Operator{d.Source} -} - -// SetInputs implements the Operator interface -func (d *Derived) SetInputs(ops []ops.Operator) { - d.Source = ops[0] -} - -func (d *Derived) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (ops.Operator, error) { - if _, isUNion := d.Source.(*Union); isUNion { - // If we have a derived table on top of a UNION, we can let the UNION do the expression rewriting - var err error - d.Source, err = d.Source.AddPredicate(ctx, expr) - return d, err - } - tableInfo, err := ctx.SemTable.TableInfoForExpr(expr) - if err != nil { - if err == semantics.ErrNotSingleTable { - return &Filter{ - Source: d, - Predicates: []sqlparser.Expr{expr}, - }, nil - } - return nil, err - } - - newExpr := semantics.RewriteDerivedTableExpression(expr, tableInfo) - if !canBePushedDownIntoDerived(newExpr) { - // if we have an aggregation, we don't want to push it inside - return &Filter{Source: d, Predicates: []sqlparser.Expr{expr}}, nil - } - d.Source, err = d.Source.AddPredicate(ctx, newExpr) - if err != nil { - return nil, err - } - return d, nil -} - -func canBePushedDownIntoDerived(expr sqlparser.Expr) (canBePushed bool) { - canBePushed = true - _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { - switch node.(type) { - case *sqlparser.Max, *sqlparser.Min: - // empty by default - case sqlparser.AggrFunc: - canBePushed = false - return false, io.EOF - } - return true, nil - }, expr) - return -} - -func (d *Derived) AddColumn(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, _, addToGroupBy bool) (ops.Operator, int, error) { - col, ok := expr.Expr.(*sqlparser.ColName) - if !ok { - return nil, 0, vterrors.VT13001("cannot push non-colname expression to a derived table") - } - - identity := func(c *sqlparser.ColName) sqlparser.Expr { return c } - if offset, found := canReuseColumn(ctx, d.Columns, col, identity); found { - return d, offset, nil - } - - i, err := d.findOutputColumn(col) - if err != nil { - return nil, 0, err - } - var pos int - d.ColumnsOffset, pos = addToIntSlice(d.ColumnsOffset, i) - - d.Columns = append(d.Columns, col) - // add it to the source if we were not already passing it through - if i <= -1 { - newSrc, _, err := d.Source.AddColumn(ctx, aeWrap(sqlparser.NewColName(col.Name.String())), true, addToGroupBy) - if err != nil { - return nil, 0, err - } - d.Source = newSrc - } - return d, pos, nil -} - -// canReuseColumn is generic, so it can be used with slices of different types. -// We don't care about the actual type, as long as we know it's a sqlparser.Expr -func canReuseColumn[T any]( - ctx *plancontext.PlanningContext, - columns []T, - col sqlparser.Expr, - f func(T) sqlparser.Expr, -) (offset int, found bool) { - for offset, column := range columns { - if ctx.SemTable.EqualsExprWithDeps(col, f(column)) { - return offset, true - } - } - - return -} - -func (d *Derived) GetColumns() (exprs []*sqlparser.AliasedExpr, err error) { - for _, expr := range sqlparser.GetFirstSelect(d.Query).SelectExprs { - ae, ok := expr.(*sqlparser.AliasedExpr) - if !ok { - return nil, errHorizonNotPlanned() - } - exprs = append(exprs, ae) - } - return -} - -func (d *Derived) GetOrdering() ([]ops.OrderBy, error) { - if d.QP == nil { - return nil, vterrors.VT13001("QP should already be here") - } - return d.QP.OrderExprs, nil -} - -func addToIntSlice(columnOffset []int, valToAdd int) ([]int, int) { - for idx, val := range columnOffset { - if val == valToAdd { - return columnOffset, idx - } - } - columnOffset = append(columnOffset, valToAdd) - return columnOffset, len(columnOffset) - 1 -} - -// TODO: REMOVE -func (d *Derived) selectStatement() sqlparser.SelectStatement { - return d.Query -} - -func (d *Derived) src() ops.Operator { - return d.Source -} - -func (d *Derived) getQP(ctx *plancontext.PlanningContext) (*QueryProjection, error) { - if d.QP != nil { - return d.QP, nil - } - qp, err := CreateQPFromSelect(ctx, d.Query.(*sqlparser.Select)) - if err != nil { - return nil, err - } - d.QP = qp - return d.QP, nil -} - -func (d *Derived) setQP(qp *QueryProjection) { - d.QP = qp -} - -func (d *Derived) Description() ops.OpDescription { - return ops.OpDescription{ - OperatorType: "Derived", - } -} - -func (d *Derived) ShortDescription() string { - return d.Alias -} diff --git a/go/vt/vtgate/planbuilder/operators/distinct.go b/go/vt/vtgate/planbuilder/operators/distinct.go index f562e03adf7..f7f4b350fc7 100644 --- a/go/vt/vtgate/planbuilder/operators/distinct.go +++ b/go/vt/vtgate/planbuilder/operators/distinct.go @@ -17,7 +17,7 @@ limitations under the License. package operators import ( - "golang.org/x/exp/slices" + "slices" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/engine" @@ -29,9 +29,17 @@ type ( Distinct struct { Source ops.Operator QP *QueryProjection - Pushed bool - // When offset planning, we'll fill in this field + // When we go from AST to operator, we place DISTINCT ops in the required places in the op tree + // These are marked as `Required`, because they are semantically important to the results of the query. + // During planning, when we can't push down the DISTINCT op any further, we sometimes create and push down + // additional DISTINCT ops that are not strictly required, but that limit the number of incoming rows so less + // work has to be done. When we have pushed down these performance DISTINCTs, we set the `PushedPerformance` + // field to true on the originating op + Required bool + PushedPerformance bool + + // This is only filled in during offset planning Columns []engine.CheckCol Truncate int @@ -39,48 +47,41 @@ type ( ) func (d *Distinct) planOffsets(ctx *plancontext.PlanningContext) error { - columns, err := d.GetColumns() + columns, err := d.GetColumns(ctx) if err != nil { return err } - d.Columns = nil - var exprs []sqlparser.Expr - for _, col := range columns { - newSrc, offset, err := d.Source.AddColumn(ctx, col, true, false) - if err != nil { - return err - } - d.Source = newSrc + for idx, col := range columns { e := d.QP.GetSimplifiedExpr(col.Expr) - exprs = append(exprs, e) + var wsCol *int typ, coll, _ := ctx.SemTable.TypeForExpr(e) + + if ctx.SemTable.NeedsWeightString(e) { + offset, err := d.Source.AddColumn(ctx, true, false, aeWrap(weightStringFor(e))) + if err != nil { + return err + } + wsCol = &offset + } + d.Columns = append(d.Columns, engine.CheckCol{ - Col: offset, + Col: idx, + WsCol: wsCol, Type: typ, Collation: coll, }) } - for i, e := range exprs { - if !ctx.SemTable.NeedsWeightString(e) { - continue - } - newSrc, offset, err := d.Source.AddColumn(ctx, aeWrap(weightStringFor(e)), true, false) - if err != nil { - return err - } - d.Source = newSrc - d.Columns[i].WsCol = &offset - } return nil } func (d *Distinct) Clone(inputs []ops.Operator) ops.Operator { return &Distinct{ - Source: inputs[0], - Columns: slices.Clone(d.Columns), - QP: d.QP, - Pushed: d.Pushed, - Truncate: d.Truncate, + Required: d.Required, + Source: inputs[0], + Columns: slices.Clone(d.Columns), + QP: d.QP, + PushedPerformance: d.PushedPerformance, + Truncate: d.Truncate, } } @@ -101,27 +102,27 @@ func (d *Distinct) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser return d, nil } -func (d *Distinct) AddColumn(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, reuseExisting, addToGroupBy bool) (ops.Operator, int, error) { - newSrc, offset, err := d.Source.AddColumn(ctx, expr, reuseExisting, addToGroupBy) - if err != nil { - return nil, 0, err - } - d.Source = newSrc - return d, offset, nil +func (d *Distinct) AddColumn(ctx *plancontext.PlanningContext, reuse bool, gb bool, expr *sqlparser.AliasedExpr) (int, error) { + return d.Source.AddColumn(ctx, reuse, gb, expr) } -func (d *Distinct) GetColumns() ([]*sqlparser.AliasedExpr, error) { - return d.Source.GetColumns() +func (d *Distinct) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) (int, error) { + return d.Source.FindCol(ctx, expr, underRoute) } -func (d *Distinct) Description() ops.OpDescription { - return ops.OpDescription{ - OperatorType: "Distinct", - } +func (d *Distinct) GetColumns(ctx *plancontext.PlanningContext) ([]*sqlparser.AliasedExpr, error) { + return d.Source.GetColumns(ctx) +} + +func (d *Distinct) GetSelectExprs(ctx *plancontext.PlanningContext) (sqlparser.SelectExprs, error) { + return d.Source.GetSelectExprs(ctx) } func (d *Distinct) ShortDescription() string { - return "" + if d.Required { + return "Required" + } + return "Performance" } func (d *Distinct) GetOrdering() ([]ops.OrderBy, error) { diff --git a/go/vt/vtgate/planbuilder/operators/dml_planning.go b/go/vt/vtgate/planbuilder/operators/dml_planning.go index a9c5c4b8871..9618c34e21e 100644 --- a/go/vt/vtgate/planbuilder/operators/dml_planning.go +++ b/go/vt/vtgate/planbuilder/operators/dml_planning.go @@ -29,19 +29,16 @@ import ( // getVindexInformation returns the vindex and VindexPlusPredicates for the DML, // If it cannot find a unique vindex match, it returns an error. -func getVindexInformation( - id semantics.TableSet, - predicates []sqlparser.Expr, - table *vindexes.Table, -) (*vindexes.ColumnVindex, []*VindexPlusPredicates, error) { +func getVindexInformation(id semantics.TableSet, table *vindexes.Table) ( + *vindexes.ColumnVindex, + []*VindexPlusPredicates, + error) { + // Check that we have a primary vindex which is valid if len(table.ColumnVindexes) == 0 || !table.ColumnVindexes[0].IsUnique() { return nil, nil, vterrors.VT09001(table.Name) } primaryVindex := table.ColumnVindexes[0] - if len(predicates) == 0 { - return primaryVindex, nil, nil - } var vindexesAndPredicates []*VindexPlusPredicates for _, colVindex := range table.Ordered { @@ -59,7 +56,7 @@ func getVindexInformation( return primaryVindex, vindexesAndPredicates, nil } -func buildChangedVindexesValues(update *sqlparser.Update, table *vindexes.Table, ksidCols []sqlparser.IdentifierCI) (map[string]*engine.VindexValues, string, error) { +func buildChangedVindexesValues(update *sqlparser.Update, table *vindexes.Table, ksidCols []sqlparser.IdentifierCI, assignments []SetExpr) (vv map[string]*engine.VindexValues, ownedVindexQuery string, subQueriesArgOnChangedVindex []string, err error) { changedVindexes := make(map[string]*engine.VindexValues) buf, offset := initialQuery(ksidCols, table) for i, vindex := range table.ColumnVindexes { @@ -68,24 +65,34 @@ func buildChangedVindexesValues(update *sqlparser.Update, table *vindexes.Table, for _, vcol := range vindex.Columns { // Searching in order of columns in colvindex. found := false - for _, assignment := range update.Exprs { + for _, assignment := range assignments { if !vcol.Equal(assignment.Name.Name) { continue } if found { - return nil, "", vterrors.VT03015(assignment.Name.Name) + return nil, "", nil, vterrors.VT03015(assignment.Name.Name) } found = true - pv, err := extractValueFromUpdate(assignment) + pv, err := evalengine.Translate(assignment.Expr.EvalExpr, nil) if err != nil { - return nil, "", err + return nil, "", nil, invalidUpdateExpr(assignment.Name.Name.String(), assignment.Expr.EvalExpr) + } + + if assignment.Expr.Info != nil { + sqe, ok := assignment.Expr.Info.(SubQueryExpression) + if ok { + for _, sq := range sqe { + subQueriesArgOnChangedVindex = append(subQueriesArgOnChangedVindex, sq.ArgName) + } + } } + vindexValueMap[vcol.String()] = pv if first { - buf.Myprintf(", %v", assignment) + buf.Myprintf(", %s", assignment.String()) first = false } else { - buf.Myprintf(" and %v", assignment) + buf.Myprintf(" and %s", assignment.String()) } } } @@ -95,31 +102,31 @@ func buildChangedVindexesValues(update *sqlparser.Update, table *vindexes.Table, } if update.Limit != nil && len(update.OrderBy) == 0 { - return nil, "", vterrors.VT12001(fmt.Sprintf("you need to provide the ORDER BY clause when using LIMIT; invalid update on vindex: %v", vindex.Name)) + return nil, "", nil, vterrors.VT12001(fmt.Sprintf("you need to provide the ORDER BY clause when using LIMIT; invalid update on vindex: %v", vindex.Name)) } if i == 0 { - return nil, "", vterrors.VT12001(fmt.Sprintf("you cannot UPDATE primary vindex columns; invalid update on vindex: %v", vindex.Name)) + return nil, "", nil, vterrors.VT12001(fmt.Sprintf("you cannot UPDATE primary vindex columns; invalid update on vindex: %v", vindex.Name)) } if _, ok := vindex.Vindex.(vindexes.Lookup); !ok { - return nil, "", vterrors.VT12001(fmt.Sprintf("you can only UPDATE lookup vindexes; invalid update on vindex: %v", vindex.Name)) + return nil, "", nil, vterrors.VT12001(fmt.Sprintf("you can only UPDATE lookup vindexes; invalid update on vindex: %v", vindex.Name)) } changedVindexes[vindex.Name] = &engine.VindexValues{ - PvMap: vindexValueMap, - Offset: offset, + EvalExprMap: vindexValueMap, + Offset: offset, } offset++ } if len(changedVindexes) == 0 { - return nil, "", nil + return nil, "", nil, nil } // generate rest of the owned vindex query. aTblExpr, ok := update.TableExprs[0].(*sqlparser.AliasedTableExpr) if !ok { - return nil, "", vterrors.VT12001("UPDATE on complex table expression") + return nil, "", nil, vterrors.VT12001("UPDATE on complex table expression") } tblExpr := &sqlparser.AliasedTableExpr{Expr: sqlparser.TableName{Name: table.Name}, As: aTblExpr.As} buf.Myprintf(" from %v%v%v%v for update", tblExpr, update.Where, update.OrderBy, update.Limit) - return changedVindexes, buf.String(), nil + return changedVindexes, buf.String(), subQueriesArgOnChangedVindex, nil } func initialQuery(ksidCols []sqlparser.IdentifierCI, table *vindexes.Table) (*sqlparser.TrackedBuffer, int) { @@ -142,27 +149,6 @@ func initialQuery(ksidCols []sqlparser.IdentifierCI, table *vindexes.Table) (*sq return buf, offset } -// extractValueFromUpdate given an UpdateExpr, builds an evalengine.Expr -func extractValueFromUpdate(upd *sqlparser.UpdateExpr) (evalengine.Expr, error) { - expr := upd.Expr - if sq, ok := expr.(*sqlparser.ExtractedSubquery); ok { - // if we are planning an update that needs one or more values from the outside, we can trust that they have - // been correctly extracted from this query before we reach this far - // if Merged is true, it means that this subquery was happily merged with the outer. - // But in that case we should not be here, so we fail - if sq.Merged { - return nil, invalidUpdateExpr(upd, expr) - } - expr = sqlparser.NewArgument(sq.GetArgName()) - } - - pv, err := evalengine.Translate(expr, nil) - if err != nil || sqlparser.IsSimpleTuple(expr) { - return nil, invalidUpdateExpr(upd, expr) - } - return pv, nil -} - -func invalidUpdateExpr(upd *sqlparser.UpdateExpr, expr sqlparser.Expr) error { - return vterrors.VT12001(fmt.Sprintf("only values are supported; invalid update on column: `%s` with expr: [%s]", upd.Name.Name.String(), sqlparser.String(expr))) +func invalidUpdateExpr(upd string, expr sqlparser.Expr) error { + return vterrors.VT12001(fmt.Sprintf("only values are supported; invalid update on column: `%s` with expr: [%s]", upd, sqlparser.String(expr))) } diff --git a/go/vt/vtgate/planbuilder/operators/expressions.go b/go/vt/vtgate/planbuilder/operators/expressions.go index 246a6702142..7ab27e787e8 100644 --- a/go/vt/vtgate/planbuilder/operators/expressions.go +++ b/go/vt/vtgate/planbuilder/operators/expressions.go @@ -18,7 +18,6 @@ package operators import ( "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -31,28 +30,20 @@ func BreakExpressionInLHSandRHS( lhs semantics.TableSet, ) (col JoinColumn, err error) { rewrittenExpr := sqlparser.CopyOnRewrite(expr, nil, func(cursor *sqlparser.CopyOnWriteCursor) { - node := cursor.Node() - reservedName := getReservedBVName(node) - if reservedName == "" { + nodeExpr, ok := cursor.Node().(sqlparser.Expr) + if !ok || !fetchByOffset(nodeExpr) { return } - nodeExpr := node.(sqlparser.Expr) deps := ctx.SemTable.RecursiveDeps(nodeExpr) - if deps.IsEmpty() { - err = vterrors.VT13001("unknown column. has the AST been copied?") - cursor.StopTreeWalk() - return - } if !deps.IsSolvedBy(lhs) { return } - col.LHSExprs = append(col.LHSExprs, nodeExpr) - bvName := ctx.GetArgumentFor(nodeExpr, func() string { - return ctx.ReservedVars.ReserveVariable(reservedName) + bvName := ctx.GetReservedArgumentFor(nodeExpr) + col.LHSExprs = append(col.LHSExprs, BindVarExpr{ + Name: bvName, + Expr: nodeExpr, }) - - col.BvNames = append(col.BvNames, bvName) arg := sqlparser.NewArgument(bvName) // we are replacing one of the sides of the comparison with an argument, // but we don't want to lose the type information we have, so we copy it over @@ -67,14 +58,3 @@ func BreakExpressionInLHSandRHS( col.RHSExpr = rewrittenExpr return } - -func getReservedBVName(node sqlparser.SQLNode) string { - switch node := node.(type) { - case *sqlparser.ColName: - node.Qualifier.Qualifier = sqlparser.NewIdentifierCS("") - return node.CompliantName() - case sqlparser.AggrFunc: - return sqlparser.CompliantString(node) - } - return "" -} diff --git a/go/vt/vtgate/planbuilder/operators/filter.go b/go/vt/vtgate/planbuilder/operators/filter.go index f2f07b3ee19..874e799cf43 100644 --- a/go/vt/vtgate/planbuilder/operators/filter.go +++ b/go/vt/vtgate/planbuilder/operators/filter.go @@ -17,7 +17,12 @@ limitations under the License. package operators import ( + "slices" + "strings" + + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" @@ -29,9 +34,11 @@ type Filter struct { Source ops.Operator Predicates []sqlparser.Expr - // FinalPredicate is the evalengine expression that will finally be used. + // PredicateWithOffsets is the evalengine expression that will finally be used. // It contains the ANDed predicates in Predicates, with ColName:s replaced by Offset:s - FinalPredicate evalengine.Expr + PredicateWithOffsets evalengine.Expr + + Truncate int } func newFilter(op ops.Operator, expr sqlparser.Expr) ops.Operator { @@ -42,11 +49,11 @@ func newFilter(op ops.Operator, expr sqlparser.Expr) ops.Operator { // Clone implements the Operator interface func (f *Filter) Clone(inputs []ops.Operator) ops.Operator { - predicatesClone := make([]sqlparser.Expr, len(f.Predicates)) - copy(predicatesClone, f.Predicates) return &Filter{ - Source: inputs[0], - Predicates: predicatesClone, + Source: inputs[0], + Predicates: slices.Clone(f.Predicates), + PredicateWithOffsets: f.PredicateWithOffsets, + Truncate: f.Truncate, } } @@ -82,17 +89,20 @@ func (f *Filter) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.E return f, nil } -func (f *Filter) AddColumn(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, reuseExisting, addToGroupBy bool) (ops.Operator, int, error) { - newSrc, offset, err := f.Source.AddColumn(ctx, expr, reuseExisting, addToGroupBy) - if err != nil { - return nil, 0, err - } - f.Source = newSrc - return f, offset, nil +func (f *Filter) AddColumn(ctx *plancontext.PlanningContext, reuse bool, gb bool, expr *sqlparser.AliasedExpr) (int, error) { + return f.Source.AddColumn(ctx, reuse, gb, expr) } -func (f *Filter) GetColumns() ([]*sqlparser.AliasedExpr, error) { - return f.Source.GetColumns() +func (f *Filter) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) (int, error) { + return f.Source.FindCol(ctx, expr, underRoute) +} + +func (f *Filter) GetColumns(ctx *plancontext.PlanningContext) ([]*sqlparser.AliasedExpr, error) { + return f.Source.GetColumns(ctx) +} + +func (f *Filter) GetSelectExprs(ctx *plancontext.PlanningContext) (sqlparser.SelectExprs, error) { + return f.Source.GetSelectExprs(ctx) } func (f *Filter) GetOrdering() ([]ops.OrderBy, error) { @@ -114,38 +124,32 @@ func (f *Filter) Compact(*plancontext.PlanningContext) (ops.Operator, *rewrite.A } func (f *Filter) planOffsets(ctx *plancontext.PlanningContext) error { - resolveColumn := func(col *sqlparser.ColName) (int, error) { - newSrc, offset, err := f.Source.AddColumn(ctx, aeWrap(col), true, false) - if err != nil { - return 0, err - } - f.Source = newSrc - return offset, nil - } cfg := &evalengine.Config{ - ResolveType: ctx.SemTable.TypeForExpr, - Collation: ctx.SemTable.Collation, - ResolveColumn: resolveColumn, + ResolveType: ctx.SemTable.TypeForExpr, + Collation: ctx.SemTable.Collation, } - eexpr, err := evalengine.Translate(sqlparser.AndExpressions(f.Predicates...), cfg) + predicate := sqlparser.AndExpressions(f.Predicates...) + rewritten, err := useOffsets(ctx, predicate, f) if err != nil { return err } + eexpr, err := evalengine.Translate(rewritten, cfg) + if err != nil { + if strings.HasPrefix(err.Error(), evalengine.ErrTranslateExprNotSupported) { + return vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "%s: %s", evalengine.ErrTranslateExprNotSupported, sqlparser.String(predicate)) + } + return err + } - f.FinalPredicate = eexpr + f.PredicateWithOffsets = eexpr return nil } -func (f *Filter) Description() ops.OpDescription { - return ops.OpDescription{ - OperatorType: "Filter", - Other: map[string]any{ - "Predicate": sqlparser.String(sqlparser.AndExpressions(f.Predicates...)), - }, - } -} - func (f *Filter) ShortDescription() string { return sqlparser.String(sqlparser.AndExpressions(f.Predicates...)) } + +func (f *Filter) setTruncateColumnCount(offset int) { + f.Truncate = offset +} diff --git a/go/vt/vtgate/planbuilder/operators/fk_cascade.go b/go/vt/vtgate/planbuilder/operators/fk_cascade.go new file mode 100644 index 00000000000..a9afbde0a7c --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/fk_cascade.go @@ -0,0 +1,106 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operators + +import ( + "slices" + + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" +) + +// FkChild is used to represent a foreign key child table operation +type FkChild struct { + BVName string + Cols []int // indexes + Op ops.Operator + + noColumns + noPredicates +} + +// FkCascade is used to represent a foreign key cascade operation +// as an operator. This operator is created for DML queries that require +// cascades (for example, ON DELETE CASCADE). +type FkCascade struct { + Selection ops.Operator + Children []*FkChild + Parent ops.Operator + + noColumns + noPredicates +} + +var _ ops.Operator = (*FkCascade)(nil) + +// Inputs implements the Operator interface +func (fkc *FkCascade) Inputs() []ops.Operator { + var inputs []ops.Operator + inputs = append(inputs, fkc.Parent) + inputs = append(inputs, fkc.Selection) + for _, child := range fkc.Children { + inputs = append(inputs, child.Op) + } + return inputs +} + +// SetInputs implements the Operator interface +func (fkc *FkCascade) SetInputs(operators []ops.Operator) { + if len(operators) < 2 { + panic("incorrect count of inputs for FkCascade") + } + fkc.Parent = operators[0] + fkc.Selection = operators[1] + for idx, operator := range operators { + if idx < 2 { + continue + } + fkc.Children[idx-2].Op = operator + } +} + +// Clone implements the Operator interface +func (fkc *FkCascade) Clone(inputs []ops.Operator) ops.Operator { + if len(inputs) < 2 { + panic("incorrect count of inputs for FkCascade") + } + newFkc := &FkCascade{ + Parent: inputs[0], + Selection: inputs[1], + } + for idx, operator := range inputs { + if idx < 2 { + continue + } + + newFkc.Children = append(newFkc.Children, &FkChild{ + BVName: fkc.Children[idx-2].BVName, + Cols: slices.Clone(fkc.Children[idx-2].Cols), + Op: operator, + }) + } + return newFkc +} + +// GetOrdering implements the Operator interface +func (fkc *FkCascade) GetOrdering() ([]ops.OrderBy, error) { + return nil, nil +} + +// ShortDescription implements the Operator interface +func (fkc *FkCascade) ShortDescription() string { + return "" +} diff --git a/go/vt/vtgate/planbuilder/operators/fk_verify.go b/go/vt/vtgate/planbuilder/operators/fk_verify.go new file mode 100644 index 00000000000..8c2431d26fc --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/fk_verify.go @@ -0,0 +1,80 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operators + +import ( + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" +) + +// VerifyOp keeps the information about the foreign key verification operation. +// It is a Parent verification or a Child verification. +type VerifyOp struct { + Op ops.Operator + Typ string +} + +// FkVerify is used to represent a foreign key verification operation +// as an operator. This operator is created for DML queries that require +// verifications on the existence of the rows in the parent table (for example, INSERT and UPDATE). +type FkVerify struct { + Verify []*VerifyOp + Input ops.Operator + + noColumns + noPredicates +} + +var _ ops.Operator = (*FkVerify)(nil) + +// Inputs implements the Operator interface +func (fkv *FkVerify) Inputs() []ops.Operator { + inputs := []ops.Operator{fkv.Input} + for _, v := range fkv.Verify { + inputs = append(inputs, v.Op) + } + return inputs +} + +// SetInputs implements the Operator interface +func (fkv *FkVerify) SetInputs(operators []ops.Operator) { + fkv.Input = operators[0] + if len(fkv.Verify) != len(operators)-1 { + panic("mismatched number of verify inputs") + } + for i := 1; i < len(operators); i++ { + fkv.Verify[i-1].Op = operators[i] + } +} + +// Clone implements the Operator interface +func (fkv *FkVerify) Clone(inputs []ops.Operator) ops.Operator { + newFkv := &FkVerify{ + Verify: fkv.Verify, + } + newFkv.SetInputs(inputs) + return newFkv +} + +// GetOrdering implements the Operator interface +func (fkv *FkVerify) GetOrdering() ([]ops.OrderBy, error) { + return nil, nil +} + +// ShortDescription implements the Operator interface +func (fkv *FkVerify) ShortDescription() string { + return "" +} diff --git a/go/vt/vtgate/planbuilder/operators/helpers.go b/go/vt/vtgate/planbuilder/operators/helpers.go index 1c472acf413..21be634d7d8 100644 --- a/go/vt/vtgate/planbuilder/operators/helpers.go +++ b/go/vt/vtgate/planbuilder/operators/helpers.go @@ -67,15 +67,15 @@ func Clone(op ops.Operator) ops.Operator { return op.Clone(clones) } -// TableIDIntroducer is used to signal that this operator introduces data from a new source -type TableIDIntroducer interface { - Introduces() semantics.TableSet +// tableIDIntroducer is used to signal that this operator introduces data from a new source +type tableIDIntroducer interface { + introducesTableID() semantics.TableSet } func TableID(op ops.Operator) (result semantics.TableSet) { _ = rewrite.Visit(op, func(this ops.Operator) error { - if tbl, ok := this.(TableIDIntroducer); ok { - result = result.Merge(tbl.Introduces()) + if tbl, ok := this.(tableIDIntroducer); ok { + result = result.Merge(tbl.introducesTableID()) } return nil }) diff --git a/go/vt/vtgate/planbuilder/operators/horizon.go b/go/vt/vtgate/planbuilder/operators/horizon.go index 39efe2b9956..2f9d574d99d 100644 --- a/go/vt/vtgate/planbuilder/operators/horizon.go +++ b/go/vt/vtgate/planbuilder/operators/horizon.go @@ -17,10 +17,13 @@ limitations under the License. package operators import ( + "slices" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" ) // Horizon is an operator that allows us to postpone planning things like SELECT/GROUP BY/ORDER BY/LIMIT until later. @@ -32,58 +35,150 @@ import ( // and some that have to be evaluated at the vtgate level. type Horizon struct { Source ops.Operator - Select sqlparser.SelectStatement - QP *QueryProjection + + // If this is a derived table, the two following fields will contain the tableID and name of it + TableId *semantics.TableSet + Alias string + ColumnAliases sqlparser.Columns // derived tables can have their column aliases specified outside the subquery + + // QP contains the QueryProjection for this op + QP *QueryProjection + + Query sqlparser.SelectStatement + + // Columns needed to feed other plans + Columns []*sqlparser.ColName + ColumnsOffset []int } -func (h *Horizon) AddColumn(*plancontext.PlanningContext, *sqlparser.AliasedExpr, bool, bool) (ops.Operator, int, error) { - return nil, 0, vterrors.VT13001("the Horizon operator cannot accept new columns") +func newHorizon(src ops.Operator, query sqlparser.SelectStatement) *Horizon { + return &Horizon{Source: src, Query: query} } -func (h *Horizon) GetColumns() (exprs []*sqlparser.AliasedExpr, err error) { - for _, expr := range sqlparser.GetFirstSelect(h.Select).SelectExprs { - ae, ok := expr.(*sqlparser.AliasedExpr) - if !ok { - return nil, errHorizonNotPlanned() - } - exprs = append(exprs, ae) - } - return +// Clone implements the Operator interface +func (h *Horizon) Clone(inputs []ops.Operator) ops.Operator { + klone := *h + klone.Source = inputs[0] + klone.ColumnAliases = sqlparser.CloneColumns(h.ColumnAliases) + klone.Columns = slices.Clone(h.Columns) + klone.ColumnsOffset = slices.Clone(h.ColumnsOffset) + klone.QP = h.QP + return &klone +} + +// IsMergeable is not a great name for this function. Suggestions for a better one are welcome! +// This function will return false if the derived table inside it has to run on the vtgate side, and so can't be merged with subqueries +// This logic can also be used to check if this is a derived table that can be had on the left hand side of a vtgate join. +// Since vtgate joins are always nested loop joins, we can't execute them on the RHS +// if they do some things, like LIMIT or GROUP BY on wrong columns +func (h *Horizon) IsMergeable(ctx *plancontext.PlanningContext) bool { + return isMergeable(ctx, h.Query, h) +} + +// Inputs implements the Operator interface +func (h *Horizon) Inputs() []ops.Operator { + return []ops.Operator{h.Source} } -var _ ops.Operator = (*Horizon)(nil) +// SetInputs implements the Operator interface +func (h *Horizon) SetInputs(ops []ops.Operator) { + h.Source = ops[0] +} func (h *Horizon) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (ops.Operator, error) { - newSrc, err := h.Source.AddPredicate(ctx, expr) + if _, isUNion := h.Source.(*Union); isUNion { + // If we have a derived table on top of a UNION, we can let the UNION do the expression rewriting + var err error + h.Source, err = h.Source.AddPredicate(ctx, expr) + return h, err + } + tableInfo, err := ctx.SemTable.TableInfoForExpr(expr) + if err != nil { + if err == semantics.ErrNotSingleTable { + return &Filter{ + Source: h, + Predicates: []sqlparser.Expr{expr}, + }, nil + } + return nil, err + } + + newExpr := semantics.RewriteDerivedTableExpression(expr, tableInfo) + if sqlparser.ContainsAggregation(newExpr) { + return &Filter{Source: h, Predicates: []sqlparser.Expr{expr}}, nil + } + h.Source, err = h.Source.AddPredicate(ctx, newExpr) if err != nil { return nil, err } - h.Source = newSrc return h, nil } -func (h *Horizon) Clone(inputs []ops.Operator) ops.Operator { - return &Horizon{ - Source: inputs[0], - Select: h.Select, +func (h *Horizon) AddColumn(ctx *plancontext.PlanningContext, reuse bool, _ bool, expr *sqlparser.AliasedExpr) (int, error) { + if !reuse { + return 0, errNoNewColumns + } + col, ok := expr.Expr.(*sqlparser.ColName) + if !ok { + return 0, vterrors.VT13001("cannot push non-ColName expression to horizon") + } + offset, err := h.FindCol(ctx, col, false) + if err != nil { + return 0, err + } + if offset < 0 { + return 0, errNoNewColumns } + return offset, nil } -func (h *Horizon) Inputs() []ops.Operator { - return []ops.Operator{h.Source} +var errNoNewColumns = vterrors.VT13001("can't add new columns to Horizon") + +// canReuseColumn is generic, so it can be used with slices of different types. +// We don't care about the actual type, as long as we know it's a sqlparser.Expr +func canReuseColumn[T any]( + ctx *plancontext.PlanningContext, + columns []T, + col sqlparser.Expr, + f func(T) sqlparser.Expr, +) (offset int, found bool) { + for offset, column := range columns { + if ctx.SemTable.EqualsExprWithDeps(col, f(column)) { + return offset, true + } + } + + return } -// SetInputs implements the Operator interface -func (h *Horizon) SetInputs(ops []ops.Operator) { - h.Source = ops[0] +func (h *Horizon) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, _ bool) (int, error) { + for idx, se := range sqlparser.GetFirstSelect(h.Query).SelectExprs { + ae, ok := se.(*sqlparser.AliasedExpr) + if !ok { + return 0, vterrors.VT09015() + } + if ctx.SemTable.EqualsExprWithDeps(ae.Expr, expr) { + return idx, nil + } + } + + return -1, nil } -func (h *Horizon) selectStatement() sqlparser.SelectStatement { - return h.Select +func (h *Horizon) GetColumns(ctx *plancontext.PlanningContext) (exprs []*sqlparser.AliasedExpr, err error) { + for _, expr := range ctx.SemTable.SelectExprs(h.Query) { + ae, ok := expr.(*sqlparser.AliasedExpr) + if !ok { + return nil, vterrors.VT09015() + } + exprs = append(exprs, ae) + } + + return exprs, nil } -func (h *Horizon) src() ops.Operator { - return h.Source +func (h *Horizon) GetSelectExprs(*plancontext.PlanningContext) (sqlparser.SelectExprs, error) { + return sqlparser.GetFirstSelect(h.Query).SelectExprs, nil } func (h *Horizon) GetOrdering() ([]ops.OrderBy, error) { @@ -93,11 +188,20 @@ func (h *Horizon) GetOrdering() ([]ops.OrderBy, error) { return h.QP.OrderExprs, nil } +// TODO: REMOVE +func (h *Horizon) selectStatement() sqlparser.SelectStatement { + return h.Query +} + +func (h *Horizon) src() ops.Operator { + return h.Source +} + func (h *Horizon) getQP(ctx *plancontext.PlanningContext) (*QueryProjection, error) { if h.QP != nil { return h.QP, nil } - qp, err := CreateQPFromSelect(ctx, h.Select.(*sqlparser.Select)) + qp, err := CreateQPFromSelectStatement(ctx, h.Query) if err != nil { return nil, err } @@ -105,16 +209,18 @@ func (h *Horizon) getQP(ctx *plancontext.PlanningContext) (*QueryProjection, err return h.QP, nil } -func (h *Horizon) setQP(qp *QueryProjection) { - h.QP = qp +func (h *Horizon) ShortDescription() string { + return h.Alias } -func (h *Horizon) Description() ops.OpDescription { - return ops.OpDescription{ - OperatorType: "Horizon", +func (h *Horizon) introducesTableID() semantics.TableSet { + if h.TableId == nil { + return semantics.EmptyTableSet() } + + return *h.TableId } -func (h *Horizon) ShortDescription() string { - return "" +func (h *Horizon) IsDerived() bool { + return h.TableId != nil } diff --git a/go/vt/vtgate/planbuilder/operators/horizon_expanding.go b/go/vt/vtgate/planbuilder/operators/horizon_expanding.go new file mode 100644 index 00000000000..2714cb73ff1 --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/horizon_expanding.go @@ -0,0 +1,303 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operators + +import ( + "fmt" + "strings" + + "vitess.io/vitess/go/slice" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" +) + +func expandHorizon(ctx *plancontext.PlanningContext, horizon *Horizon) (ops.Operator, *rewrite.ApplyResult, error) { + statement := horizon.selectStatement() + switch sel := statement.(type) { + case *sqlparser.Select: + return expandSelectHorizon(ctx, horizon, sel) + case *sqlparser.Union: + return expandUnionHorizon(ctx, horizon, sel) + } + return nil, nil, vterrors.VT13001(fmt.Sprintf("unexpected statement type %T", statement)) +} + +func expandUnionHorizon(ctx *plancontext.PlanningContext, horizon *Horizon, union *sqlparser.Union) (ops.Operator, *rewrite.ApplyResult, error) { + op := horizon.Source + + qp, err := horizon.getQP(ctx) + if err != nil { + return nil, nil, err + } + + if len(qp.OrderExprs) > 0 { + op = &Ordering{ + Source: op, + Order: qp.OrderExprs, + } + } + + if union.Limit != nil { + op = &Limit{ + Source: op, + AST: union.Limit, + } + } + + if horizon.TableId != nil { + proj := newAliasedProjection(op) + proj.DT = &DerivedTable{ + TableID: *horizon.TableId, + Alias: horizon.Alias, + Columns: horizon.ColumnAliases, + } + op = proj + } + + if op == horizon.Source { + return op, rewrite.NewTree("removed UNION horizon not used", op), nil + } + + return op, rewrite.NewTree("expand UNION horizon into smaller components", op), nil +} + +func expandSelectHorizon(ctx *plancontext.PlanningContext, horizon *Horizon, sel *sqlparser.Select) (ops.Operator, *rewrite.ApplyResult, error) { + op, err := createProjectionFromSelect(ctx, horizon) + if err != nil { + return nil, nil, err + } + + qp, err := horizon.getQP(ctx) + if err != nil { + return nil, nil, err + } + + var extracted []string + if qp.HasAggr { + extracted = append(extracted, "Aggregation") + } else { + extracted = append(extracted, "Projection") + } + + if qp.NeedsDistinct() { + op = &Distinct{ + Required: true, + Source: op, + QP: qp, + } + extracted = append(extracted, "Distinct") + } + + if sel.Having != nil { + op, err = addWherePredicates(ctx, sel.Having.Expr, op) + if err != nil { + return nil, nil, err + } + extracted = append(extracted, "Filter") + } + + if len(qp.OrderExprs) > 0 { + op = &Ordering{ + Source: op, + Order: qp.OrderExprs, + } + extracted = append(extracted, "Ordering") + } + + if sel.Limit != nil { + op = &Limit{ + Source: op, + AST: sel.Limit, + } + extracted = append(extracted, "Limit") + } + + return op, rewrite.NewTree(fmt.Sprintf("expand SELECT horizon into (%s)", strings.Join(extracted, ", ")), op), nil +} + +func createProjectionFromSelect(ctx *plancontext.PlanningContext, horizon *Horizon) (out ops.Operator, err error) { + qp, err := horizon.getQP(ctx) + if err != nil { + return nil, err + } + + var dt *DerivedTable + if horizon.TableId != nil { + dt = &DerivedTable{ + TableID: *horizon.TableId, + Alias: horizon.Alias, + Columns: horizon.ColumnAliases, + } + } + + if !qp.NeedsAggregation() { + projX, err := createProjectionWithoutAggr(ctx, qp, horizon.src()) + if err != nil { + return nil, err + } + projX.DT = dt + out = projX + + return out, nil + } + + aggregations, complexAggr, err := qp.AggregationExpressions(ctx, true) + if err != nil { + return nil, err + } + + a := &Aggregator{ + Source: horizon.src(), + Original: true, + QP: qp, + Grouping: qp.GetGrouping(), + Aggregations: aggregations, + DT: dt, + } + + if complexAggr { + return createProjectionForComplexAggregation(a, qp) + } + return createProjectionForSimpleAggregation(ctx, a, qp) +} + +func createProjectionForSimpleAggregation(ctx *plancontext.PlanningContext, a *Aggregator, qp *QueryProjection) (ops.Operator, error) { +outer: + for colIdx, expr := range qp.SelectExprs { + ae, err := expr.GetAliasedExpr() + if err != nil { + return nil, err + } + addedToCol := false + for idx, groupBy := range a.Grouping { + if ctx.SemTable.EqualsExprWithDeps(groupBy.SimplifiedExpr, ae.Expr) { + if !addedToCol { + a.Columns = append(a.Columns, ae) + addedToCol = true + } + if groupBy.ColOffset < 0 { + a.Grouping[idx].ColOffset = colIdx + } + } + } + if addedToCol { + continue + } + for idx, aggr := range a.Aggregations { + if ctx.SemTable.EqualsExprWithDeps(aggr.Original.Expr, ae.Expr) && aggr.ColOffset < 0 { + a.Columns = append(a.Columns, ae) + a.Aggregations[idx].ColOffset = colIdx + continue outer + } + } + return nil, vterrors.VT13001(fmt.Sprintf("Could not find the %s in aggregation in the original query", sqlparser.String(ae))) + } + return a, nil +} + +func createProjectionForComplexAggregation(a *Aggregator, qp *QueryProjection) (ops.Operator, error) { + p := newAliasedProjection(a) + p.DT = a.DT + for _, expr := range qp.SelectExprs { + ae, err := expr.GetAliasedExpr() + if err != nil { + return nil, err + } + + _, err = p.addProjExpr(newProjExpr(ae)) + if err != nil { + return nil, err + } + } + for i, by := range a.Grouping { + a.Grouping[i].ColOffset = len(a.Columns) + a.Columns = append(a.Columns, aeWrap(by.SimplifiedExpr)) + } + for i, aggregation := range a.Aggregations { + a.Aggregations[i].ColOffset = len(a.Columns) + a.Columns = append(a.Columns, aggregation.Original) + } + return p, nil +} + +func createProjectionWithoutAggr(ctx *plancontext.PlanningContext, qp *QueryProjection, src ops.Operator) (*Projection, error) { + // first we need to check if we have all columns or there are still unexpanded stars + aes, err := slice.MapWithError(qp.SelectExprs, func(from SelectExpr) (*sqlparser.AliasedExpr, error) { + ae, ok := from.Col.(*sqlparser.AliasedExpr) + if !ok { + return nil, fmt.Errorf("star found") + } + return ae, nil + }) + + if err != nil { + // if we have unexpanded expressions, we take this shortcut and hope we don't need any offsets from this plan + return newStarProjection(src, qp) + } + + proj := newAliasedProjection(nil) + sqc := &SubQueryBuilder{} + outerID := TableID(src) + for _, ae := range aes { + org := sqlparser.CloneRefOfAliasedExpr(ae) + expr := ae.Expr + newExpr, subqs, err := sqc.pullOutValueSubqueries(ctx, expr, outerID, false) + if err != nil { + return nil, err + } + if newExpr == nil { + // there was no subquery in this expression + _, err := proj.addUnexploredExpr(org, expr) + if err != nil { + return nil, err + } + } else { + err := proj.addSubqueryExpr(org, newExpr, subqs...) + if err != nil { + return nil, err + } + } + } + proj.Source = sqc.getRootOperator(src) + return proj, nil +} + +func newStarProjection(src ops.Operator, qp *QueryProjection) (*Projection, error) { + cols := sqlparser.SelectExprs{} + + for _, expr := range qp.SelectExprs { + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + _, isSubQ := node.(*sqlparser.Subquery) + if !isSubQ { + return true, nil + } + return false, vterrors.VT09015() + }, expr.Col) + if err != nil { + return nil, err + } + cols = append(cols, expr.Col) + } + + return &Projection{ + Source: src, + Columns: StarProjections(cols), + }, nil +} diff --git a/go/vt/vtgate/planbuilder/operators/horizon_planning.go b/go/vt/vtgate/planbuilder/operators/horizon_planning.go deleted file mode 100644 index 75067f71d69..00000000000 --- a/go/vt/vtgate/planbuilder/operators/horizon_planning.go +++ /dev/null @@ -1,900 +0,0 @@ -/* -Copyright 2022 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package operators - -import ( - "fmt" - "io" - - "vitess.io/vitess/go/slices2" - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/vt/vtgate/semantics" -) - -type ( - projector struct { - cols []ProjExpr - names []*sqlparser.AliasedExpr - } - - // horizonLike should be removed. we should use Horizon for both these cases - horizonLike interface { - ops.Operator - selectStatement() sqlparser.SelectStatement - src() ops.Operator - getQP(ctx *plancontext.PlanningContext) (*QueryProjection, error) - } -) - -func errHorizonNotPlanned() error { - return _errHorizonNotPlanned -} - -var _errHorizonNotPlanned = vterrors.VT12001("query cannot be fully operator planned") - -func tryHorizonPlanning(ctx *plancontext.PlanningContext, root ops.Operator) (output ops.Operator, err error) { - backup := Clone(root) - defer func() { - // If we encounter the _errHorizonNotPlanned error, we'll revert to using the old horizon planning strategy. - if err == _errHorizonNotPlanned { - // The only offset planning we did before was on joins. - // Therefore, we traverse the tree to find all joins and calculate the joinColumns offsets. - // Our fallback strategy is to clone the original operator tree, compute the join offsets, - // and allow the legacy horizonPlanner to handle this query using logical plans. - err = planOffsetsOnJoins(ctx, backup) - if err == nil { - output = backup - } - } - }() - - _, ok := root.(*Horizon) - - if !ok || len(ctx.SemTable.SubqueryMap) > 0 || len(ctx.SemTable.SubqueryRef) > 0 { - // we are not ready to deal with subqueries yet - return root, errHorizonNotPlanned() - } - - output, err = planHorizons(ctx, root) - if err != nil { - return nil, err - } - - output, err = planOffsets(ctx, output) - if err != nil { - return nil, err - } - - output, err = makeSureOutputIsCorrect(ctx, root, output) - if err != nil { - return nil, err - } - - return -} - -// planHorizons is the process of figuring out how to perform the operations in the Horizon -// If we can push it under a route - done. -// If we can't, we will instead expand the Horizon into -// smaller operators and try to push these down as far as possible -func planHorizons(ctx *plancontext.PlanningContext, root ops.Operator) (ops.Operator, error) { - root, err := optimizeHorizonPlanning(ctx, root) - if err != nil { - return nil, err - } - - // Adding Ordering Op - This is needed if there is no explicit ordering and aggregation is performed on top of route. - // Adding Group by - This is needed if the grouping is performed on a join with a join condition then - // aggregation happening at route needs a group by to ensure only matching rows returns - // the aggregations otherwise returns no result. - root, err = addOrderBysAndGroupBysForAggregations(ctx, root) - if err != nil { - return nil, err - } - - root, err = optimizeHorizonPlanning(ctx, root) - if err != nil { - return nil, err - } - return root, nil -} - -func optimizeHorizonPlanning(ctx *plancontext.PlanningContext, root ops.Operator) (ops.Operator, error) { - visitor := func(in ops.Operator, _ semantics.TableSet, isRoot bool) (ops.Operator, *rewrite.ApplyResult, error) { - switch in := in.(type) { - case horizonLike: - return pushOrExpandHorizon(ctx, in) - case *Projection: - return tryPushingDownProjection(ctx, in) - case *Limit: - return tryPushingDownLimit(in) - case *Ordering: - return tryPushingDownOrdering(ctx, in) - case *Aggregator: - return tryPushingDownAggregator(ctx, in) - case *Filter: - return tryPushingDownFilter(ctx, in) - case *Distinct: - return tryPushingDownDistinct(in) - default: - return in, rewrite.SameTree, nil - } - } - - newOp, err := rewrite.FixedPointBottomUp(root, TableID, visitor, stopAtRoute) - if err != nil { - if vterr, ok := err.(*vterrors.VitessError); ok && vterr.ID == "VT13001" { - // we encountered a bug. let's try to back out - return nil, errHorizonNotPlanned() - } - return nil, err - } - - return newOp, nil -} - -func tryPushingDownFilter(ctx *plancontext.PlanningContext, in *Filter) (ops.Operator, *rewrite.ApplyResult, error) { - proj, ok := in.Source.(*Projection) - if !ok { - // we can only push filter under a projection - return in, rewrite.SameTree, nil - } - - for _, p := range in.Predicates { - cantPushDown := false - _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { - if !fetchByOffset(node) { - return true, nil - } - - if proj.needsEvaluation(ctx, node.(sqlparser.Expr)) { - cantPushDown = true - return false, io.EOF - } - - return true, nil - }, p) - - if cantPushDown { - return in, rewrite.SameTree, nil - } - } - - return rewrite.Swap(in, proj, "push filter under projection") -} - -func tryPushingDownDistinct(in *Distinct) (ops.Operator, *rewrite.ApplyResult, error) { - if in.Pushed { - return in, rewrite.SameTree, nil - } - switch src := in.Source.(type) { - case *Route: - if src.IsSingleShard() { - return rewrite.Swap(in, src, "push distinct under route") - } - case *Distinct: - return src, rewrite.NewTree("removed double distinct", src), nil - case *Aggregator: - return in, rewrite.SameTree, nil - } - - cols, err := in.Source.GetColumns() - if err != nil { - return nil, nil, err - } - - aggr := &Aggregator{ - Source: in.Source, - QP: in.QP, - Original: true, - } - - for _, col := range cols { - aggr.addColumnWithoutPushing(col, true) - } - - return aggr, rewrite.NewTree("replace distinct with aggregator", in), nil -} - -// addOrderBysAndGroupBysForAggregations runs after we have run horizonPlanning until the op tree stops changing -// this means that we have pushed aggregations and other ops as far down as they'll go -// addOrderBysAndGroupBysForAggregations will find Aggregators that have not been pushed under routes and -// add the necessary Ordering operators for them -func addOrderBysAndGroupBysForAggregations(ctx *plancontext.PlanningContext, root ops.Operator) (ops.Operator, error) { - visitor := func(in ops.Operator, _ semantics.TableSet, isRoot bool) (ops.Operator, *rewrite.ApplyResult, error) { - switch in := in.(type) { - case *Aggregator: - if in.Pushed { - // first we update the incoming columns, so we know about any new columns that have been added - columns, err := in.Source.GetColumns() - if err != nil { - return nil, nil, err - } - in.Columns = columns - } - - requireOrdering, err := needsOrdering(in, ctx) - if err != nil { - return nil, nil, err - } - if !requireOrdering { - return in, rewrite.SameTree, nil - } - in.Source = &Ordering{ - Source: in.Source, - Order: slices2.Map(in.Grouping, func(from GroupBy) ops.OrderBy { - return from.AsOrderBy() - }), - } - return in, rewrite.NewTree("added ordering before aggregation", in), nil - case *ApplyJoin: - _ = rewrite.Visit(in.RHS, func(op ops.Operator) error { - aggr, isAggr := op.(*Aggregator) - if !isAggr { - return nil - } - if len(aggr.Grouping) == 0 { - gb := sqlparser.NewIntLiteral(".0") - aggr.Grouping = append(aggr.Grouping, NewGroupBy(gb, gb, aeWrap(gb))) - } - return nil - }) - } - return in, rewrite.SameTree, nil - } - - return rewrite.TopDown(root, TableID, visitor, stopAtRoute) -} - -func needsOrdering(in *Aggregator, ctx *plancontext.PlanningContext) (bool, error) { - if len(in.Grouping) == 0 { - return false, nil - } - srcOrdering, err := in.Source.GetOrdering() - if err != nil { - return false, err - } - if len(srcOrdering) < len(in.Grouping) { - return true, nil - } - for idx, gb := range in.Grouping { - if !ctx.SemTable.EqualsExprWithDeps(srcOrdering[idx].SimplifiedExpr, gb.SimplifiedExpr) { - return true, nil - } - } - return false, nil -} - -func tryPushingDownOrdering(ctx *plancontext.PlanningContext, in *Ordering) (ops.Operator, *rewrite.ApplyResult, error) { - switch src := in.Source.(type) { - case *Route: - return rewrite.Swap(in, src, "push ordering under route") - case *ApplyJoin: - if canPushLeft(ctx, src, in.Order) { - // ApplyJoin is stable in regard to the columns coming from the LHS, - // so if all the ordering columns come from the LHS, we can push down the Ordering there - src.LHS, in.Source = in, src.LHS - return src, rewrite.NewTree("push down ordering on the LHS of a join", in), nil - } - case *Ordering: - // we'll just remove the order underneath. The top order replaces whatever was incoming - in.Source = src.Source - return in, rewrite.NewTree("remove double ordering", src), nil - case *Projection: - // we can move ordering under a projection if it's not introducing a column we're sorting by - for _, by := range in.Order { - if !fetchByOffset(by.SimplifiedExpr) { - return in, rewrite.SameTree, nil - } - } - return rewrite.Swap(in, src, "push ordering under projection") - case *Aggregator: - if !(src.QP.AlignGroupByAndOrderBy(ctx) || overlaps(ctx, in.Order, src.Grouping)) { - return in, rewrite.SameTree, nil - } - - return pushOrderingUnderAggr(ctx, in, src) - - } - return in, rewrite.SameTree, nil -} - -func overlaps(ctx *plancontext.PlanningContext, order []ops.OrderBy, grouping []GroupBy) bool { -ordering: - for _, orderBy := range order { - for _, groupBy := range grouping { - if ctx.SemTable.EqualsExprWithDeps(orderBy.SimplifiedExpr, groupBy.SimplifiedExpr) { - continue ordering - } - } - return false - } - - return true -} - -func pushOrderingUnderAggr(ctx *plancontext.PlanningContext, order *Ordering, aggregator *Aggregator) (ops.Operator, *rewrite.ApplyResult, error) { - // Step 1: Align the GROUP BY and ORDER BY. - // Reorder the GROUP BY columns to match the ORDER BY columns. - // Since the GB clause is a set, we can reorder these columns freely. - var newGrouping []GroupBy - used := make([]bool, len(aggregator.Grouping)) - for _, orderExpr := range order.Order { - for grpIdx, by := range aggregator.Grouping { - if !used[grpIdx] && ctx.SemTable.EqualsExprWithDeps(by.SimplifiedExpr, orderExpr.SimplifiedExpr) { - newGrouping = append(newGrouping, by) - used[grpIdx] = true - } - } - } - - // Step 2: Add any missing columns from the ORDER BY. - // The ORDER BY column is not a set, but we can add more elements - // to the end without changing the semantics of the query. - if len(newGrouping) != len(aggregator.Grouping) { - // we are missing some groupings. We need to add them both to the new groupings list, but also to the ORDER BY - for i, added := range used { - if !added { - groupBy := aggregator.Grouping[i] - newGrouping = append(newGrouping, groupBy) - order.Order = append(order.Order, groupBy.AsOrderBy()) - } - } - } - - aggregator.Grouping = newGrouping - aggrSource, isOrdering := aggregator.Source.(*Ordering) - if isOrdering { - // Transform the query plan tree: - // From: Ordering(1) To: Aggregation - // | | - // Aggregation Ordering(1) - // | | - // Ordering(2) - // | - // - // - // Remove Ordering(2) from the plan tree, as it's redundant - // after pushing down the higher ordering. - order.Source = aggrSource.Source - aggrSource.Source = nil // removing from plan tree - aggregator.Source = order - return aggregator, rewrite.NewTree("push ordering under aggregation, removing extra ordering", aggregator), nil - } - return rewrite.Swap(order, aggregator, "push ordering under aggregation") -} - -func canPushLeft(ctx *plancontext.PlanningContext, aj *ApplyJoin, order []ops.OrderBy) bool { - lhs := TableID(aj.LHS) - for _, order := range order { - deps := ctx.SemTable.DirectDeps(order.Inner.Expr) - if !deps.IsSolvedBy(lhs) { - return false - } - } - return true -} - -func tryPushingDownProjection( - ctx *plancontext.PlanningContext, - p *Projection, -) (ops.Operator, *rewrite.ApplyResult, error) { - switch src := p.Source.(type) { - case *Route: - return rewrite.Swap(p, src, "pushed projection under route") - case *ApplyJoin: - if p.FromAggr { - return p, rewrite.SameTree, nil - } - return pushDownProjectionInApplyJoin(ctx, p, src) - case *Vindex: - return pushDownProjectionInVindex(ctx, p, src) - default: - return p, rewrite.SameTree, nil - } -} - -func pushDownProjectionInVindex( - ctx *plancontext.PlanningContext, - p *Projection, - src *Vindex, -) (ops.Operator, *rewrite.ApplyResult, error) { - for _, column := range p.Projections { - expr := column.GetExpr() - _, _, err := src.AddColumn(ctx, aeWrap(expr), true, false) - if err != nil { - return nil, nil, err - } - } - return src, rewrite.NewTree("push projection into vindex", p), nil -} - -func (p *projector) add(e ProjExpr, alias *sqlparser.AliasedExpr) { - p.cols = append(p.cols, e) - p.names = append(p.names, alias) -} - -// pushDownProjectionInApplyJoin pushes down a projection operation into an ApplyJoin operation. -// It processes each input column and creates new JoinColumns for the ApplyJoin operation based on -// the input column's expression. It also creates new Projection operators for the left and right -// children of the ApplyJoin operation, if needed. -func pushDownProjectionInApplyJoin( - ctx *plancontext.PlanningContext, - p *Projection, - src *ApplyJoin, -) (ops.Operator, *rewrite.ApplyResult, error) { - if src.LeftJoin { - // we can't push down expression evaluation to the rhs if we are not sure if it will even be executed - return p, rewrite.SameTree, nil - } - lhs, rhs := &projector{}, &projector{} - - src.ColumnsAST = nil - for idx := 0; idx < len(p.Projections); idx++ { - err := splitProjectionAcrossJoin(ctx, src, lhs, rhs, p.Projections[idx], p.Columns[idx]) - if err != nil { - return nil, nil, err - } - } - - if p.TableID != nil { - err := exposeColumnsThroughDerivedTable(ctx, p, src, lhs) - if err != nil { - return nil, nil, err - } - } - - var err error - - // Create and update the Projection operators for the left and right children, if needed. - src.LHS, err = createProjectionWithTheseColumns(src.LHS, lhs, p.TableID, p.Alias) - if err != nil { - return nil, nil, err - } - - src.RHS, err = createProjectionWithTheseColumns(src.RHS, rhs, p.TableID, p.Alias) - if err != nil { - return nil, nil, err - } - - return src, rewrite.NewTree("split projection to either side of join", src), nil -} - -// splitProjectionAcrossJoin creates JoinColumns for all projections, -// and pushes down columns as needed between the LHS and RHS of a join -func splitProjectionAcrossJoin( - ctx *plancontext.PlanningContext, - join *ApplyJoin, - lhs, rhs *projector, - in ProjExpr, - colName *sqlparser.AliasedExpr, -) error { - expr := in.GetExpr() - - // Check if the current expression can reuse an existing column in the ApplyJoin. - if _, found := canReuseColumn(ctx, join.ColumnsAST, expr, joinColumnToExpr); found { - return nil - } - - // Get a JoinColumn for the current expression. - col, err := join.getJoinColumnFor(ctx, colName, false) - if err != nil { - return err - } - - // Update the left and right child columns and names based on the JoinColumn type. - switch { - case col.IsPureLeft(): - lhs.add(in, colName) - case col.IsPureRight(): - rhs.add(in, colName) - case col.IsMixedLeftAndRight(): - for _, lhsExpr := range col.LHSExprs { - lhs.add(&UnexploredExpression{E: lhsExpr}, aeWrap(lhsExpr)) - } - rhs.add(&UnexploredExpression{E: col.RHSExpr}, &sqlparser.AliasedExpr{Expr: col.RHSExpr, As: colName.As}) - } - - // Add the new JoinColumn to the ApplyJoin's ColumnsAST. - join.ColumnsAST = append(join.ColumnsAST, col) - return nil -} - -// exposeColumnsThroughDerivedTable rewrites expressions within a join that is inside a derived table -// in order to make them accessible outside the derived table. This is necessary when swapping the -// positions of the derived table and join operation. -// -// For example, consider the input query: -// select ... from (select T1.foo from T1 join T2 on T1.id = T2.id) as t -// If we push the derived table under the join, with T1 on the LHS of the join, we need to expose -// the values of T1.id through the derived table, or they will not be accessible on the RHS. -// -// The function iterates through each join predicate, rewriting the expressions in the predicate's -// LHS expressions to include the derived table. This allows the expressions to be accessed outside -// the derived table. -func exposeColumnsThroughDerivedTable(ctx *plancontext.PlanningContext, p *Projection, src *ApplyJoin, lhs *projector) error { - derivedTbl, err := ctx.SemTable.TableInfoFor(*p.TableID) - if err != nil { - return err - } - derivedTblName, err := derivedTbl.Name() - if err != nil { - return err - } - for _, predicate := range src.JoinPredicates { - for idx, expr := range predicate.LHSExprs { - tbl, err := ctx.SemTable.TableInfoForExpr(expr) - if err != nil { - return err - } - tblExpr := tbl.GetExpr() - tblName, err := tblExpr.TableName() - if err != nil { - return err - } - - expr = semantics.RewriteDerivedTableExpression(expr, derivedTbl) - out, err := prefixColNames(tblName, expr) - if err != nil { - return err - } - - alias := sqlparser.UnescapedString(out) - predicate.LHSExprs[idx] = sqlparser.NewColNameWithQualifier(alias, derivedTblName) - lhs.add(&UnexploredExpression{E: out}, &sqlparser.AliasedExpr{Expr: out, As: sqlparser.NewIdentifierCI(alias)}) - } - } - return nil -} - -// prefixColNames adds qualifier prefixes to all ColName:s. -// We want to be more explicit than the user was to make sure we never produce invalid SQL -func prefixColNames(tblName sqlparser.TableName, e sqlparser.Expr) (out sqlparser.Expr, err error) { - out = sqlparser.CopyOnRewrite(e, nil, func(cursor *sqlparser.CopyOnWriteCursor) { - col, ok := cursor.Node().(*sqlparser.ColName) - if !ok { - return - } - col.Qualifier = tblName - }, nil).(sqlparser.Expr) - return -} - -func createProjectionWithTheseColumns( - src ops.Operator, - p *projector, - tableID *semantics.TableSet, - alias string, -) (ops.Operator, error) { - if len(p.cols) == 0 { - return src, nil - } - proj, err := createProjection(src) - if err != nil { - return nil, err - } - proj.Columns = p.names - proj.Projections = p.cols - proj.TableID = tableID - proj.Alias = alias - return proj, nil -} - -func stopAtRoute(operator ops.Operator) rewrite.VisitRule { - _, isRoute := operator.(*Route) - return rewrite.VisitRule(!isRoute) -} - -func tryPushingDownLimit(in *Limit) (ops.Operator, *rewrite.ApplyResult, error) { - switch src := in.Source.(type) { - case *Route: - return tryPushingDownLimitInRoute(in, src) - case *Projection: - return rewrite.Swap(in, src, "push limit under projection") - case *Aggregator: - return in, rewrite.SameTree, nil - default: - return setUpperLimit(in) - } -} - -func setUpperLimit(in *Limit) (ops.Operator, *rewrite.ApplyResult, error) { - if in.Pushed { - return in, rewrite.SameTree, nil - } - in.Pushed = true - visitor := func(op ops.Operator, _ semantics.TableSet, _ bool) (ops.Operator, *rewrite.ApplyResult, error) { - return op, rewrite.SameTree, nil - } - shouldVisit := func(op ops.Operator) rewrite.VisitRule { - switch op := op.(type) { - case *Join, *ApplyJoin: - // we can't push limits down on either side - return rewrite.SkipChildren - case *Route: - newSrc := &Limit{ - Source: op.Source, - AST: &sqlparser.Limit{Rowcount: sqlparser.NewArgument("__upper_limit")}, - Pushed: false, - } - op.Source = newSrc - return rewrite.SkipChildren - default: - return rewrite.VisitChildren - } - } - - _, err := rewrite.TopDown(in.Source, TableID, visitor, shouldVisit) - if err != nil { - return nil, nil, err - } - return in, rewrite.SameTree, nil -} - -func tryPushingDownLimitInRoute(in *Limit, src *Route) (ops.Operator, *rewrite.ApplyResult, error) { - if src.IsSingleShard() { - return rewrite.Swap(in, src, "limit pushed into single sharded route") - } - - return setUpperLimit(in) -} - -func pushOrExpandHorizon(ctx *plancontext.PlanningContext, in horizonLike) (ops.Operator, *rewrite.ApplyResult, error) { - if derived, ok := in.(*Derived); ok { - if len(derived.ColumnAliases) > 0 { - return nil, nil, errHorizonNotPlanned() - } - } - rb, isRoute := in.src().(*Route) - if isRoute && rb.IsSingleShard() { - return rewrite.Swap(in, rb, "push horizon into route") - } - - sel, isSel := in.selectStatement().(*sqlparser.Select) - if !isSel { - return nil, nil, errHorizonNotPlanned() - } - - qp, err := in.getQP(ctx) - if err != nil { - return nil, nil, err - } - - needsOrdering := len(qp.OrderExprs) > 0 - canPushDown := isRoute && sel.Having == nil && !needsOrdering && !qp.NeedsAggregation() && !sel.Distinct && sel.Limit == nil - - if canPushDown { - return rewrite.Swap(in, rb, "push horizon into route") - } - - return expandHorizon(ctx, in) -} - -func expandHorizon(ctx *plancontext.PlanningContext, horizon horizonLike) (ops.Operator, *rewrite.ApplyResult, error) { - sel, isSel := horizon.selectStatement().(*sqlparser.Select) - if !isSel { - return nil, nil, errHorizonNotPlanned() - } - - if sel.Having != nil { - return nil, nil, errHorizonNotPlanned() - } - - op, err := createProjectionFromSelect(ctx, horizon) - if err != nil { - return nil, nil, err - } - - qp, err := horizon.getQP(ctx) - if err != nil { - return nil, nil, err - } - - if qp.NeedsDistinct() { - op = &Distinct{ - Source: op, - QP: qp, - } - } - - if len(qp.OrderExprs) > 0 { - op = &Ordering{ - Source: op, - Order: qp.OrderExprs, - } - } - - if sel.Limit != nil { - op = &Limit{ - Source: op, - AST: sel.Limit, - } - } - - return op, rewrite.NewTree("expand horizon into smaller components", op), nil -} - -func checkInvalid(aggregations []Aggr, horizon horizonLike) error { - for _, aggregation := range aggregations { - if aggregation.Distinct { - return errHorizonNotPlanned() - } - } - if _, isDerived := horizon.(*Derived); isDerived { - return errHorizonNotPlanned() - } - return nil -} - -func createProjectionFromSelect(ctx *plancontext.PlanningContext, horizon horizonLike) (out ops.Operator, err error) { - qp, err := horizon.getQP(ctx) - if err != nil { - return nil, err - } - - if !qp.NeedsAggregation() { - projX, err := createProjectionWithoutAggr(qp, horizon.src()) - if err != nil { - return nil, err - } - if derived, isDerived := horizon.(*Derived); isDerived { - id := derived.TableId - projX.TableID = &id - projX.Alias = derived.Alias - } - out = projX - - return out, nil - } - - err = checkAggregationSupported(horizon) - if err != nil { - return nil, err - } - - aggregations, err := qp.AggregationExpressions(ctx) - if err != nil { - return nil, err - } - - if err := checkInvalid(aggregations, horizon); err != nil { - return nil, err - } - - a := &Aggregator{ - Source: horizon.src(), - Original: true, - QP: qp, - Grouping: qp.GetGrouping(), - Aggregations: aggregations, - } - - if derived, isDerived := horizon.(*Derived); isDerived { - id := derived.TableId - a.TableID = &id - a.Alias = derived.Alias - } - -outer: - for colIdx, expr := range qp.SelectExprs { - ae, err := expr.GetAliasedExpr() - if err != nil { - return nil, err - } - addedToCol := false - for idx, groupBy := range a.Grouping { - if ctx.SemTable.EqualsExprWithDeps(groupBy.SimplifiedExpr, ae.Expr) { - if !addedToCol { - a.Columns = append(a.Columns, ae) - addedToCol = true - } - if groupBy.ColOffset < 0 { - a.Grouping[idx].ColOffset = colIdx - } - } - } - if addedToCol { - continue - } - for idx, aggr := range a.Aggregations { - if ctx.SemTable.EqualsExprWithDeps(aggr.Original.Expr, ae.Expr) && aggr.ColOffset < 0 { - a.Columns = append(a.Columns, ae) - a.Aggregations[idx].ColOffset = colIdx - continue outer - } - } - return nil, vterrors.VT13001(fmt.Sprintf("Could not find the %s in aggregation in the original query", sqlparser.String(ae))) - } - - return a, nil -} - -func createProjectionWithoutAggr(qp *QueryProjection, src ops.Operator) (*Projection, error) { - proj := &Projection{ - Source: src, - } - - for _, e := range qp.SelectExprs { - if _, isStar := e.Col.(*sqlparser.StarExpr); isStar { - return nil, errHorizonNotPlanned() - } - ae, err := e.GetAliasedExpr() - - if err != nil { - return nil, err - } - expr := ae.Expr - if sqlparser.ContainsAggregation(expr) { - aggr, ok := expr.(sqlparser.AggrFunc) - if !ok { - // need to add logic to extract aggregations and pushed them to the top level - return nil, errHorizonNotPlanned() - } - expr = aggr.GetArg() - if expr == nil { - expr = sqlparser.NewIntLiteral("1") - } - } - - proj.addUnexploredExpr(ae, expr) - } - return proj, nil -} - -func aeWrap(e sqlparser.Expr) *sqlparser.AliasedExpr { - return &sqlparser.AliasedExpr{Expr: e} -} - -func makeSureOutputIsCorrect(ctx *plancontext.PlanningContext, oldHorizon ops.Operator, output ops.Operator) (ops.Operator, error) { - // next we use the original Horizon to make sure that the output columns line up with what the user asked for - // in the future, we'll tidy up the results. for now, we are just failing these queries and going back to the - // old horizon planning instead - cols, err := output.GetColumns() - if err != nil { - return nil, err - } - - horizon := oldHorizon.(*Horizon) - - sel := sqlparser.GetFirstSelect(horizon.Select) - - if len(sel.SelectExprs) == len(cols) { - return output, nil - } - - if tryTruncateColumnsAt(output, len(sel.SelectExprs)) { - return output, nil - } - - qp, err := horizon.getQP(ctx) - if err != nil { - return nil, err - } - proj, err := createProjectionWithoutAggr(qp, output) - if err != nil { - return nil, err - } - err = proj.passThroughAllColumns(ctx) - if err != nil { - return nil, err - } - return proj, nil -} diff --git a/go/vt/vtgate/planbuilder/operators/info_schema_planning.go b/go/vt/vtgate/planbuilder/operators/info_schema_planning.go index 20b3d29e427..054e978ef87 100644 --- a/go/vt/vtgate/planbuilder/operators/info_schema_planning.go +++ b/go/vt/vtgate/planbuilder/operators/info_schema_planning.go @@ -17,11 +17,10 @@ limitations under the License. package operators import ( + "maps" + "slices" "strings" - "golang.org/x/exp/maps" - "golang.org/x/exp/slices" - "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" @@ -30,7 +29,6 @@ import ( "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/vindexes" ) @@ -81,7 +79,7 @@ func (isr *InfoSchemaRouting) Clone() Routing { } func (isr *InfoSchemaRouting) updateRoutingLogic(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (Routing, error) { - isTableSchema, bvName, out := extractInfoSchemaRoutingPredicate(expr, ctx.ReservedVars) + isTableSchema, bvName, out := extractInfoSchemaRoutingPredicate(ctx, expr) if out == nil { return isr, nil } @@ -119,7 +117,7 @@ func (isr *InfoSchemaRouting) Keyspace() *vindexes.Keyspace { return nil } -func extractInfoSchemaRoutingPredicate(in sqlparser.Expr, reservedVars *sqlparser.ReservedVars) (bool, string, sqlparser.Expr) { +func extractInfoSchemaRoutingPredicate(ctx *plancontext.PlanningContext, in sqlparser.Expr) (bool, string, sqlparser.Expr) { cmp, ok := in.(*sqlparser.ComparisonExpr) if !ok || cmp.Operator != sqlparser.EqualOp { return false, "", nil @@ -147,7 +145,7 @@ func extractInfoSchemaRoutingPredicate(in sqlparser.Expr, reservedVars *sqlparse if isSchemaName { name = sqltypes.BvSchemaName } else { - name = reservedVars.ReserveColName(col) + name = ctx.GetReservedArgumentFor(col) } cmp.Right = sqlparser.NewTypedArgument(name, sqltypes.VarChar) return isSchemaName, name, rhs @@ -172,7 +170,7 @@ func isTableOrSchemaRoutable(cmp *sqlparser.ComparisonExpr) ( return false, nil } -func tryMergeInfoSchemaRoutings(routingA, routingB Routing, m merger, lhsRoute, rhsRoute *Route) (ops.Operator, error) { +func tryMergeInfoSchemaRoutings(ctx *plancontext.PlanningContext, routingA, routingB Routing, m merger, lhsRoute, rhsRoute *Route) (*Route, error) { // we have already checked type earlier, so this should always be safe isrA := routingA.(*InfoSchemaRouting) isrB := routingB.(*InfoSchemaRouting) @@ -182,9 +180,9 @@ func tryMergeInfoSchemaRoutings(routingA, routingB Routing, m merger, lhsRoute, switch { // if either side has no predicates to help us route, we can merge them case emptyA: - return m.merge(lhsRoute, rhsRoute, isrB) + return m.merge(ctx, lhsRoute, rhsRoute, isrB) case emptyB: - return m.merge(lhsRoute, rhsRoute, isrA) + return m.merge(ctx, lhsRoute, rhsRoute, isrA) // if we have no schema predicates on either side, we can merge if the table info is the same case len(isrA.SysTableTableSchema) == 0 && len(isrB.SysTableTableSchema) == 0: @@ -195,14 +193,14 @@ func tryMergeInfoSchemaRoutings(routingA, routingB Routing, m merger, lhsRoute, } isrA.SysTableTableName[k] = expr } - return m.merge(lhsRoute, rhsRoute, isrA) + return m.merge(ctx, lhsRoute, rhsRoute, isrA) // if both sides have the same schema predicate, we can safely merge them case sqlparser.Equals.Exprs(isrA.SysTableTableSchema, isrB.SysTableTableSchema): for k, expr := range isrB.SysTableTableName { isrA.SysTableTableName[k] = expr } - return m.merge(lhsRoute, rhsRoute, isrA) + return m.merge(ctx, lhsRoute, rhsRoute, isrA) // give up default: diff --git a/go/vt/vtgate/planbuilder/operators/insert.go b/go/vt/vtgate/planbuilder/operators/insert.go index 8af909a572b..f96d1e5cbb6 100644 --- a/go/vt/vtgate/planbuilder/operators/insert.go +++ b/go/vt/vtgate/planbuilder/operators/insert.go @@ -17,9 +17,16 @@ limitations under the License. package operators import ( + "strconv" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + + vschemapb "vitess.io/vitess/go/vt/proto/vschema" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/vindexes" ) @@ -88,18 +95,12 @@ type Generate struct { Pinned []byte } -func (i *Insert) Description() ops.OpDescription { - return ops.OpDescription{ - OperatorType: "Insert", - } -} - func (i *Insert) ShortDescription() string { return i.VTable.String() } func (i *Insert) GetOrdering() ([]ops.OrderBy, error) { - panic("does not expect insert operator to receive get ordering call") + return nil, nil } var _ ops.Operator = (*Insert)(nil) @@ -125,3 +126,330 @@ func (i *Insert) Clone(inputs []ops.Operator) ops.Operator { func (i *Insert) TablesUsed() []string { return SingleQualifiedIdentifier(i.VTable.Keyspace, i.VTable.Name) } + +func (i *Insert) Statement() sqlparser.Statement { + return i.AST +} + +func createOperatorFromInsert(ctx *plancontext.PlanningContext, ins *sqlparser.Insert) (ops.Operator, error) { + tableInfo, qt, err := createQueryTableForDML(ctx, ins.Table, nil) + if err != nil { + return nil, err + } + + vindexTable, routing, err := buildVindexTableForDML(ctx, tableInfo, qt, "insert") + if err != nil { + return nil, err + } + + insOp, err := createInsertOperator(ctx, ins, vindexTable, routing) + if err != nil { + return nil, err + } + + // Find the foreign key mode and for unmanaged foreign-key-mode, we don't need to do anything. + ksMode, err := ctx.VSchema.ForeignKeyMode(vindexTable.Keyspace.Name) + if err != nil { + return nil, err + } + if ksMode != vschemapb.Keyspace_managed { + return insOp, nil + } + + parentFKsForInsert := vindexTable.ParentFKsNeedsHandling(ctx.VerifyAllFKs, ctx.ParentFKToIgnore) + if len(parentFKsForInsert) > 0 { + return nil, vterrors.VT12002() + } + if len(ins.OnDup) == 0 { + return insOp, nil + } + + parentFksForUpdate, childFksForUpdate := getFKRequirementsForUpdate(ctx, sqlparser.UpdateExprs(ins.OnDup), vindexTable) + if len(parentFksForUpdate) == 0 && len(childFksForUpdate) == 0 { + return insOp, nil + } + return nil, vterrors.VT12001("ON DUPLICATE KEY UPDATE with foreign keys") +} + +func createInsertOperator(ctx *plancontext.PlanningContext, insStmt *sqlparser.Insert, vTbl *vindexes.Table, routing Routing) (ops.Operator, error) { + if _, target := routing.(*TargetedRouting); target && vTbl.Pinned == nil { + return nil, vterrors.VT12001("INSERT with a target destination") + } + + insOp := &Insert{ + VTable: vTbl, + AST: insStmt, + } + route := &Route{ + Source: insOp, + Routing: routing, + } + + // Table column list is nil then add all the columns + // If the column list is empty then add only the auto-inc column and + // this happens on calling modifyForAutoinc + if insStmt.Columns == nil && valuesProvided(insStmt.Rows) { + if vTbl.ColumnListAuthoritative { + insStmt = populateInsertColumnlist(insStmt, vTbl) + } else { + return nil, vterrors.VT09004() + } + } + + // modify column list or values for autoincrement column. + autoIncGen, err := modifyForAutoinc(insStmt, vTbl) + if err != nil { + return nil, err + } + insOp.AutoIncrement = autoIncGen + + // set insert ignore. + insOp.Ignore = bool(insStmt.Ignore) || insStmt.OnDup != nil + + insOp.ColVindexes = getColVindexes(insOp) + switch rows := insStmt.Rows.(type) { + case sqlparser.Values: + route.Source, err = insertRowsPlan(insOp, insStmt, rows) + if err != nil { + return nil, err + } + case sqlparser.SelectStatement: + route.Source, err = insertSelectPlan(ctx, insOp, insStmt, rows) + if err != nil { + return nil, err + } + } + return route, nil +} + +func insertSelectPlan(ctx *plancontext.PlanningContext, insOp *Insert, ins *sqlparser.Insert, sel sqlparser.SelectStatement) (*Insert, error) { + if columnMismatch(insOp.AutoIncrement, ins, sel) { + return nil, vterrors.VT03006() + } + + selOp, err := PlanQuery(ctx, sel) + if err != nil { + return nil, err + } + + // select plan will be taken as input to insert rows into the table. + insOp.Input = selOp + + // When the table you are steaming data from and table you are inserting from are same. + // Then due to locking of the index range on the table we might not be able to insert into the table. + // Therefore, instead of streaming, this flag will ensure the records are first read and then inserted. + insertTbl := insOp.TablesUsed()[0] + selTables := TablesUsed(selOp) + for _, tbl := range selTables { + if insertTbl == tbl { + insOp.ForceNonStreaming = true + break + } + } + + if len(insOp.ColVindexes) == 0 { + return insOp, nil + } + + colVindexes := insOp.ColVindexes + vv := make([][]int, len(colVindexes)) + for idx, colVindex := range colVindexes { + for _, col := range colVindex.Columns { + err := checkAndErrIfVindexChanging(sqlparser.UpdateExprs(ins.OnDup), col) + if err != nil { + return nil, err + } + + colNum := findColumn(ins, col) + // sharding column values should be provided in the insert. + if colNum == -1 && idx == 0 { + return nil, vterrors.VT09003(col) + } + vv[idx] = append(vv[idx], colNum) + } + } + insOp.VindexValueOffset = vv + return insOp, nil +} + +func columnMismatch(gen *Generate, ins *sqlparser.Insert, sel sqlparser.SelectStatement) bool { + origColCount := len(ins.Columns) + if gen != nil && gen.added { + // One column got added to the insert query ast for auto increment column. + // adjusting it here for comparison. + origColCount-- + } + if origColCount < sel.GetColumnCount() { + return true + } + if origColCount > sel.GetColumnCount() { + sel := sqlparser.GetFirstSelect(sel) + var hasStarExpr bool + for _, sExpr := range sel.SelectExprs { + if _, hasStarExpr = sExpr.(*sqlparser.StarExpr); hasStarExpr { + break + } + } + if !hasStarExpr { + return true + } + } + return false +} + +func insertRowsPlan(insOp *Insert, ins *sqlparser.Insert, rows sqlparser.Values) (*Insert, error) { + for _, row := range rows { + if len(ins.Columns) != len(row) { + return nil, vterrors.VT03006() + } + } + + if len(insOp.ColVindexes) == 0 { + return insOp, nil + } + + colVindexes := insOp.ColVindexes + routeValues := make([][][]evalengine.Expr, len(colVindexes)) + for vIdx, colVindex := range colVindexes { + routeValues[vIdx] = make([][]evalengine.Expr, len(colVindex.Columns)) + for colIdx, col := range colVindex.Columns { + err := checkAndErrIfVindexChanging(sqlparser.UpdateExprs(ins.OnDup), col) + if err != nil { + return nil, err + } + routeValues[vIdx][colIdx] = make([]evalengine.Expr, len(rows)) + colNum, _ := findOrAddColumn(ins, col) + for rowNum, row := range rows { + innerpv, err := evalengine.Translate(row[colNum], nil) + if err != nil { + return nil, err + } + routeValues[vIdx][colIdx][rowNum] = innerpv + } + } + } + // here we are replacing the row value with the argument. + for _, colVindex := range colVindexes { + for _, col := range colVindex.Columns { + colNum, _ := findOrAddColumn(ins, col) + for rowNum, row := range rows { + name := engine.InsertVarName(col, rowNum) + row[colNum] = sqlparser.NewArgument(name) + } + } + } + insOp.VindexValues = routeValues + return insOp, nil +} + +func valuesProvided(rows sqlparser.InsertRows) bool { + switch values := rows.(type) { + case sqlparser.Values: + return len(values) >= 0 && len(values[0]) > 0 + case sqlparser.SelectStatement: + return true + } + return false +} + +func getColVindexes(insOp *Insert) (colVindexes []*vindexes.ColumnVindex) { + // For unsharded table the Column Vindex does not mean anything. + // And therefore should be ignored. + if !insOp.VTable.Keyspace.Sharded { + return + } + for _, colVindex := range insOp.VTable.ColumnVindexes { + if colVindex.IsPartialVindex() { + continue + } + colVindexes = append(colVindexes, colVindex) + } + return +} + +func checkAndErrIfVindexChanging(setClauses sqlparser.UpdateExprs, col sqlparser.IdentifierCI) error { + for _, assignment := range setClauses { + if col.Equal(assignment.Name.Name) { + valueExpr, isValuesFuncExpr := assignment.Expr.(*sqlparser.ValuesFuncExpr) + // update on duplicate key is changing the vindex column, not supported. + if !isValuesFuncExpr || !valueExpr.Name.Name.Equal(assignment.Name.Name) { + return vterrors.VT12001("DML cannot update vindex column") + } + return nil + } + } + return nil +} + +// findOrAddColumn finds the position of a column in the insert. If it's +// absent it appends it to the with NULL values. +// It returns the position of the column and also boolean representing whether it was added or already present. +func findOrAddColumn(ins *sqlparser.Insert, col sqlparser.IdentifierCI) (int, bool) { + colNum := findColumn(ins, col) + if colNum >= 0 { + return colNum, false + } + colOffset := len(ins.Columns) + ins.Columns = append(ins.Columns, col) + if rows, ok := ins.Rows.(sqlparser.Values); ok { + for i := range rows { + rows[i] = append(rows[i], &sqlparser.NullVal{}) + } + } + return colOffset, true +} + +// findColumn returns the column index where it is placed on the insert column list. +// Otherwise, return -1 when not found. +func findColumn(ins *sqlparser.Insert, col sqlparser.IdentifierCI) int { + for i, column := range ins.Columns { + if col.Equal(column) { + return i + } + } + return -1 +} + +func populateInsertColumnlist(ins *sqlparser.Insert, table *vindexes.Table) *sqlparser.Insert { + cols := make(sqlparser.Columns, 0, len(table.Columns)) + for _, c := range table.Columns { + cols = append(cols, c.Name) + } + ins.Columns = cols + return ins +} + +// modifyForAutoinc modifies the AST and the plan to generate necessary autoinc values. +// For row values cases, bind variable names are generated using baseName. +func modifyForAutoinc(ins *sqlparser.Insert, vTable *vindexes.Table) (*Generate, error) { + if vTable.AutoIncrement == nil { + return nil, nil + } + gen := &Generate{ + Keyspace: vTable.AutoIncrement.Sequence.Keyspace, + TableName: sqlparser.TableName{Name: vTable.AutoIncrement.Sequence.Name}, + Pinned: vTable.AutoIncrement.Sequence.Pinned, + } + colNum, newColAdded := findOrAddColumn(ins, vTable.AutoIncrement.Column) + switch rows := ins.Rows.(type) { + case sqlparser.SelectStatement: + gen.Offset = colNum + gen.added = newColAdded + case sqlparser.Values: + autoIncValues := make([]evalengine.Expr, 0, len(rows)) + for rowNum, row := range rows { + // Support the DEFAULT keyword by treating it as null + if _, ok := row[colNum].(*sqlparser.Default); ok { + row[colNum] = &sqlparser.NullVal{} + } + expr, err := evalengine.Translate(row[colNum], nil) + if err != nil { + return nil, err + } + autoIncValues = append(autoIncValues, expr) + row[colNum] = sqlparser.NewArgument(engine.SeqVarName + strconv.Itoa(rowNum)) + } + gen.Values = evalengine.NewTupleExpr(autoIncValues...) + } + return gen, nil +} diff --git a/go/vt/vtgate/planbuilder/operators/join.go b/go/vt/vtgate/planbuilder/operators/join.go index 5470f3ac378..693b7a75d8e 100644 --- a/go/vt/vtgate/planbuilder/operators/join.go +++ b/go/vt/vtgate/planbuilder/operators/join.go @@ -18,6 +18,7 @@ package operators import ( "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" @@ -79,10 +80,7 @@ func (j *Join) Compact(ctx *plancontext.PlanningContext) (ops.Operator, *rewrite NoDeps: ctx.SemTable.AndExpressions(lqg.NoDeps, rqg.NoDeps), } if j.Predicate != nil { - err := newOp.collectPredicate(ctx, j.Predicate) - if err != nil { - return nil, rewrite.SameTree, err - } + newOp.collectPredicate(ctx, j.Predicate) } return newOp, rewrite.NewTree("merge querygraphs into a single one", newOp), nil } @@ -91,6 +89,10 @@ func createOuterJoin(tableExpr *sqlparser.JoinTableExpr, lhs, rhs ops.Operator) if tableExpr.Join == sqlparser.RightJoinType { lhs, rhs = rhs, lhs } + subq, _ := getSubQuery(tableExpr.Condition.On) + if subq != nil { + return nil, vterrors.VT12001("subquery in outer join predicate") + } predicate := tableExpr.Condition.On sqlparser.RemoveKeyspaceFromColName(predicate) return &Join{LHS: lhs, RHS: rhs, LeftJoin: true, Predicate: predicate}, nil @@ -112,16 +114,25 @@ func createJoin(ctx *plancontext.PlanningContext, LHS, RHS ops.Operator) ops.Ope func createInnerJoin(ctx *plancontext.PlanningContext, tableExpr *sqlparser.JoinTableExpr, lhs, rhs ops.Operator) (ops.Operator, error) { op := createJoin(ctx, lhs, rhs) - pred := tableExpr.Condition.On - if pred != nil { - var err error - sqlparser.RemoveKeyspaceFromColName(pred) + sqc := &SubQueryBuilder{} + outerID := TableID(op) + joinPredicate := tableExpr.Condition.On + sqlparser.RemoveKeyspaceFromColName(joinPredicate) + exprs := sqlparser.SplitAndExpression(nil, joinPredicate) + for _, pred := range exprs { + subq, err := sqc.handleSubquery(ctx, pred, outerID) + if err != nil { + return nil, err + } + if subq != nil { + continue + } op, err = op.AddPredicate(ctx, pred) if err != nil { return nil, err } } - return op, nil + return sqc.getRootOperator(op), nil } func (j *Join) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (ops.Operator, error) { @@ -159,15 +170,6 @@ func (j *Join) AddJoinPredicate(ctx *plancontext.PlanningContext, expr sqlparser return nil } -func (j *Join) Description() ops.OpDescription { - return ops.OpDescription{ - OperatorType: "Join", - Other: map[string]any{ - "Predicate": sqlparser.String(j.Predicate), - }, - } -} - func (j *Join) ShortDescription() string { return sqlparser.String(j.Predicate) } diff --git a/go/vt/vtgate/planbuilder/operators/join_merging.go b/go/vt/vtgate/planbuilder/operators/join_merging.go new file mode 100644 index 00000000000..f31259fddc5 --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/join_merging.go @@ -0,0 +1,216 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operators + +import ( + "fmt" + "reflect" + + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" +) + +// mergeJoinInputs checks whether two operators can be merged into a single one. +// If they can be merged, a new operator with the merged routing is returned +// If they cannot be merged, nil is returned. +func mergeJoinInputs(ctx *plancontext.PlanningContext, lhs, rhs ops.Operator, joinPredicates []sqlparser.Expr, m merger) (*Route, error) { + lhsRoute, rhsRoute, routingA, routingB, a, b, sameKeyspace := prepareInputRoutes(lhs, rhs) + if lhsRoute == nil { + return nil, nil + } + + switch { + // if either side is a dual query, we can always merge them together + case a == dual: + return m.merge(ctx, lhsRoute, rhsRoute, routingB) + case b == dual: + return m.merge(ctx, lhsRoute, rhsRoute, routingA) + + // an unsharded/reference route can be merged with anything going to that keyspace + case a == anyShard && sameKeyspace: + return m.merge(ctx, lhsRoute, rhsRoute, routingB) + case b == anyShard && sameKeyspace: + return m.merge(ctx, lhsRoute, rhsRoute, routingA) + + // None routing can always be merged, as long as we are aiming for the same keyspace + case a == none && sameKeyspace: + return m.merge(ctx, lhsRoute, rhsRoute, routingA) + case b == none && sameKeyspace: + return m.merge(ctx, lhsRoute, rhsRoute, routingB) + + // infoSchema routing is complex, so we handle it in a separate method + case a == infoSchema && b == infoSchema: + return tryMergeInfoSchemaRoutings(ctx, routingA, routingB, m, lhsRoute, rhsRoute) + + // sharded routing is complex, so we handle it in a separate method + case a == sharded && b == sharded: + return tryMergeJoinShardedRouting(ctx, lhsRoute, rhsRoute, m, joinPredicates) + + default: + return nil, nil + } +} + +func prepareInputRoutes(lhs ops.Operator, rhs ops.Operator) (*Route, *Route, Routing, Routing, routingType, routingType, bool) { + lhsRoute, rhsRoute := operatorsToRoutes(lhs, rhs) + if lhsRoute == nil || rhsRoute == nil { + return nil, nil, nil, nil, 0, 0, false + } + + lhsRoute, rhsRoute, routingA, routingB, sameKeyspace := getRoutesOrAlternates(lhsRoute, rhsRoute) + + a, b := getRoutingType(routingA), getRoutingType(routingB) + if getTypeName(routingA) < getTypeName(routingB) { + // while deciding if two routes can be merged, the LHS/RHS order of the routes is not important. + // for the actual merging, we still need to remember which side was inner and which was outer for subqueries + a, b = b, a + routingA, routingB = routingB, routingA + } + + return lhsRoute, rhsRoute, routingA, routingB, a, b, sameKeyspace +} + +type ( + merger interface { + mergeShardedRouting(ctx *plancontext.PlanningContext, r1, r2 *ShardedRouting, op1, op2 *Route) (*Route, error) + merge(ctx *plancontext.PlanningContext, op1, op2 *Route, r Routing) (*Route, error) + } + + joinMerger struct { + predicates []sqlparser.Expr + innerJoin bool + } + + routingType int +) + +const ( + sharded routingType = iota + infoSchema + anyShard + none + dual + targeted +) + +func (rt routingType) String() string { + switch rt { + case sharded: + return "sharded" + case infoSchema: + return "infoSchema" + case anyShard: + return "anyShard" + case none: + return "none" + case dual: + return "dual" + case targeted: + return "targeted" + } + panic("switch should be exhaustive") +} + +// getRoutesOrAlternates gets the Routings from each Route. If they are from different keyspaces, +// we check if this is a table with alternates in other keyspaces that we can use +func getRoutesOrAlternates(lhsRoute, rhsRoute *Route) (*Route, *Route, Routing, Routing, bool) { + routingA := lhsRoute.Routing + routingB := rhsRoute.Routing + sameKeyspace := routingA.Keyspace() == routingB.Keyspace() + + if sameKeyspace || + // if either of these is missing a keyspace, we are not going to be able to find an alternative + routingA.Keyspace() == nil || + routingB.Keyspace() == nil { + return lhsRoute, rhsRoute, routingA, routingB, sameKeyspace + } + + if refA, ok := routingA.(*AnyShardRouting); ok { + if altARoute := refA.AlternateInKeyspace(routingB.Keyspace()); altARoute != nil { + return altARoute, rhsRoute, altARoute.Routing, routingB, true + } + } + + if refB, ok := routingB.(*AnyShardRouting); ok { + if altBRoute := refB.AlternateInKeyspace(routingA.Keyspace()); altBRoute != nil { + return lhsRoute, altBRoute, routingA, altBRoute.Routing, true + } + } + + return lhsRoute, rhsRoute, routingA, routingB, sameKeyspace +} + +func getTypeName(myvar interface{}) string { + return reflect.TypeOf(myvar).String() +} + +func getRoutingType(r Routing) routingType { + switch r.(type) { + case *InfoSchemaRouting: + return infoSchema + case *AnyShardRouting: + return anyShard + case *DualRouting: + return dual + case *ShardedRouting: + return sharded + case *NoneRouting: + return none + case *TargetedRouting: + return targeted + } + panic(fmt.Sprintf("switch should be exhaustive, got %T", r)) +} + +func newJoinMerge(predicates []sqlparser.Expr, innerJoin bool) merger { + return &joinMerger{ + predicates: predicates, + innerJoin: innerJoin, + } +} + +func (jm *joinMerger) mergeShardedRouting(ctx *plancontext.PlanningContext, r1, r2 *ShardedRouting, op1, op2 *Route) (*Route, error) { + return jm.merge(ctx, op1, op2, mergeShardedRouting(r1, r2)) +} + +func mergeShardedRouting(r1 *ShardedRouting, r2 *ShardedRouting) *ShardedRouting { + tr := &ShardedRouting{ + VindexPreds: append(r1.VindexPreds, r2.VindexPreds...), + keyspace: r1.keyspace, + RouteOpCode: r1.RouteOpCode, + SeenPredicates: append(r1.SeenPredicates, r2.SeenPredicates...), + } + if r1.SelectedVindex() == r2.SelectedVindex() { + tr.Selected = r1.Selected + } else { + tr.PickBestAvailableVindex() + } + return tr +} + +func (jm *joinMerger) getApplyJoin(ctx *plancontext.PlanningContext, op1, op2 *Route) *ApplyJoin { + return NewApplyJoin(op1.Source, op2.Source, ctx.SemTable.AndExpressions(jm.predicates...), !jm.innerJoin) +} + +func (jm *joinMerger) merge(ctx *plancontext.PlanningContext, op1, op2 *Route, r Routing) (*Route, error) { + return &Route{ + Source: jm.getApplyJoin(ctx, op1, op2), + MergedWith: []*Route{op2}, + Routing: r, + }, nil +} diff --git a/go/vt/vtgate/planbuilder/operators/limit.go b/go/vt/vtgate/planbuilder/operators/limit.go index a2531b4bde5..79a6980b937 100644 --- a/go/vt/vtgate/planbuilder/operators/limit.go +++ b/go/vt/vtgate/planbuilder/operators/limit.go @@ -56,35 +56,24 @@ func (l *Limit) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Ex return l, nil } -func (l *Limit) AddColumn(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, reuseExisting, addToGroupBy bool) (ops.Operator, int, error) { - newSrc, offset, err := l.Source.AddColumn(ctx, expr, reuseExisting, addToGroupBy) - if err != nil { - return nil, 0, err - } - l.Source = newSrc - return l, offset, nil +func (l *Limit) AddColumn(ctx *plancontext.PlanningContext, reuse bool, gb bool, expr *sqlparser.AliasedExpr) (int, error) { + return l.Source.AddColumn(ctx, reuse, gb, expr) } -func (l *Limit) GetColumns() ([]*sqlparser.AliasedExpr, error) { - return l.Source.GetColumns() +func (l *Limit) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) (int, error) { + return l.Source.FindCol(ctx, expr, underRoute) } -func (l *Limit) GetOrdering() ([]ops.OrderBy, error) { - return l.Source.GetOrdering() +func (l *Limit) GetColumns(ctx *plancontext.PlanningContext) ([]*sqlparser.AliasedExpr, error) { + return l.Source.GetColumns(ctx) } -func (l *Limit) Description() ops.OpDescription { - other := map[string]any{} - if l.AST.Offset != nil { - other["Offset"] = sqlparser.String(l.AST.Offset) - } - if l.AST.Rowcount != nil { - other["RowCount"] = sqlparser.String(l.AST.Rowcount) - } - return ops.OpDescription{ - OperatorType: "Limit", - Other: other, - } +func (l *Limit) GetSelectExprs(ctx *plancontext.PlanningContext) (sqlparser.SelectExprs, error) { + return l.Source.GetSelectExprs(ctx) +} + +func (l *Limit) GetOrdering() ([]ops.OrderBy, error) { + return l.Source.GetOrdering() } func (l *Limit) ShortDescription() string { diff --git a/go/vt/vtgate/planbuilder/operators/logical.go b/go/vt/vtgate/planbuilder/operators/logical.go deleted file mode 100644 index 10ea729727d..00000000000 --- a/go/vt/vtgate/planbuilder/operators/logical.go +++ /dev/null @@ -1,678 +0,0 @@ -/* -Copyright 2022 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package operators - -import ( - "fmt" - "strconv" - - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/vt/vtgate/semantics" - "vitess.io/vitess/go/vt/vtgate/vindexes" -) - -// createLogicalOperatorFromAST creates an operator tree that represents the input SELECT or UNION query -func createLogicalOperatorFromAST(ctx *plancontext.PlanningContext, selStmt sqlparser.Statement) (op ops.Operator, err error) { - switch node := selStmt.(type) { - case *sqlparser.Select: - op, err = createOperatorFromSelect(ctx, node) - case *sqlparser.Union: - op, err = createOperatorFromUnion(ctx, node) - case *sqlparser.Update: - op, err = createOperatorFromUpdate(ctx, node) - case *sqlparser.Delete: - op, err = createOperatorFromDelete(ctx, node) - case *sqlparser.Insert: - op, err = createOperatorFromInsert(ctx, node) - default: - err = vterrors.VT12001(fmt.Sprintf("operator: %T", selStmt)) - } - if err != nil { - return nil, err - } - - return op, nil -} - -// createOperatorFromSelect creates an operator tree that represents the input SELECT query -func createOperatorFromSelect(ctx *plancontext.PlanningContext, sel *sqlparser.Select) (ops.Operator, error) { - subq, err := createSubqueryFromStatement(ctx, sel) - if err != nil { - return nil, err - } - op, err := crossJoin(ctx, sel.From) - if err != nil { - return nil, err - } - if sel.Where != nil { - exprs := sqlparser.SplitAndExpression(nil, sel.Where.Expr) - for _, expr := range exprs { - sqlparser.RemoveKeyspaceFromColName(expr) - op, err = op.AddPredicate(ctx, expr) - if err != nil { - return nil, err - } - addColumnEquality(ctx, expr) - } - } - if subq == nil { - return &Horizon{ - Source: op, - Select: sel, - }, nil - } - subq.Outer = op - return &Horizon{ - Source: subq, - Select: sel, - }, nil -} - -func createOperatorFromUnion(ctx *plancontext.PlanningContext, node *sqlparser.Union) (ops.Operator, error) { - opLHS, err := createLogicalOperatorFromAST(ctx, node.Left) - if err != nil { - return nil, err - } - - _, isRHSUnion := node.Right.(*sqlparser.Union) - if isRHSUnion { - return nil, vterrors.VT12001("nesting of UNIONs on the right-hand side") - } - opRHS, err := createLogicalOperatorFromAST(ctx, node.Right) - if err != nil { - return nil, err - } - - union := &Union{ - Distinct: node.Distinct, - Sources: []ops.Operator{opLHS, opRHS}, - } - return &Horizon{Source: union, Select: node}, nil -} - -func createOperatorFromUpdate(ctx *plancontext.PlanningContext, updStmt *sqlparser.Update) (ops.Operator, error) { - tableInfo, qt, err := createQueryTableForDML(ctx, updStmt.TableExprs[0], updStmt.Where) - if err != nil { - return nil, err - } - - assignments := make(map[string]sqlparser.Expr) - for _, set := range updStmt.Exprs { - assignments[set.Name.Name.String()] = set.Expr - } - - vindexTable, routing, err := buildVindexTableForDML(ctx, tableInfo, qt, "update") - if err != nil { - return nil, err - } - - vp, cvv, ovq, err := getUpdateVindexInformation(updStmt, vindexTable, qt.ID, qt.Predicates) - if err != nil { - return nil, err - } - - tr, ok := routing.(*ShardedRouting) - if ok { - tr.VindexPreds = vp - } - - for _, predicate := range qt.Predicates { - var err error - routing, err = UpdateRoutingLogic(ctx, predicate, routing) - if err != nil { - return nil, err - } - } - - if routing.OpCode() == engine.Scatter && updStmt.Limit != nil { - // TODO systay: we should probably check for other op code types - IN could also hit multiple shards (2022-04-07) - return nil, vterrors.VT12001("multi shard UPDATE with LIMIT") - } - - r := &Route{ - Source: &Update{ - QTable: qt, - VTable: vindexTable, - Assignments: assignments, - ChangedVindexValues: cvv, - OwnedVindexQuery: ovq, - AST: updStmt, - }, - Routing: routing, - } - - subq, err := createSubqueryFromStatement(ctx, updStmt) - if err != nil { - return nil, err - } - if subq == nil { - return r, nil - } - subq.Outer = r - return subq, nil -} - -func createOperatorFromDelete(ctx *plancontext.PlanningContext, deleteStmt *sqlparser.Delete) (ops.Operator, error) { - tableInfo, qt, err := createQueryTableForDML(ctx, deleteStmt.TableExprs[0], deleteStmt.Where) - if err != nil { - return nil, err - } - - vindexTable, routing, err := buildVindexTableForDML(ctx, tableInfo, qt, "delete") - if err != nil { - return nil, err - } - - del := &Delete{ - QTable: qt, - VTable: vindexTable, - AST: deleteStmt, - } - route := &Route{ - Source: del, - Routing: routing, - } - - if !vindexTable.Keyspace.Sharded || vindexTable.Pinned != nil { - return route, nil - } - - primaryVindex, vindexAndPredicates, err := getVindexInformation(qt.ID, qt.Predicates, vindexTable) - if err != nil { - return nil, err - } - - tr, ok := routing.(*ShardedRouting) - if ok { - tr.VindexPreds = vindexAndPredicates - } - - var ovq string - if len(vindexTable.Owned) > 0 { - tblExpr := &sqlparser.AliasedTableExpr{Expr: sqlparser.TableName{Name: vindexTable.Name}, As: qt.Alias.As} - ovq = generateOwnedVindexQuery(tblExpr, deleteStmt, vindexTable, primaryVindex.Columns) - } - - del.OwnedVindexQuery = ovq - - for _, predicate := range qt.Predicates { - var err error - route.Routing, err = UpdateRoutingLogic(ctx, predicate, route.Routing) - if err != nil { - return nil, err - } - } - - if routing.OpCode() == engine.Scatter && deleteStmt.Limit != nil { - // TODO systay: we should probably check for other op code types - IN could also hit multiple shards (2022-04-07) - return nil, vterrors.VT12001("multi shard DELETE with LIMIT") - } - - subq, err := createSubqueryFromStatement(ctx, deleteStmt) - if err != nil { - return nil, err - } - if subq == nil { - return route, nil - } - subq.Outer = route - return subq, nil -} - -func createOperatorFromInsert(ctx *plancontext.PlanningContext, ins *sqlparser.Insert) (ops.Operator, error) { - tableInfo, qt, err := createQueryTableForDML(ctx, ins.Table, nil) - if err != nil { - return nil, err - } - - vindexTable, routing, err := buildVindexTableForDML(ctx, tableInfo, qt, "insert") - if err != nil { - return nil, err - } - - if _, target := routing.(*TargetedRouting); target { - return nil, vterrors.VT12001("INSERT with a target destination") - } - - insOp := &Insert{ - VTable: vindexTable, - AST: ins, - } - route := &Route{ - Source: insOp, - Routing: routing, - } - - // Table column list is nil then add all the columns - // If the column list is empty then add only the auto-inc column and - // this happens on calling modifyForAutoinc - if ins.Columns == nil && valuesProvided(ins.Rows) { - if vindexTable.ColumnListAuthoritative { - ins = populateInsertColumnlist(ins, vindexTable) - } else { - return nil, vterrors.VT09004() - } - } - - // modify column list or values for autoincrement column. - autoIncGen, err := modifyForAutoinc(ins, vindexTable) - if err != nil { - return nil, err - } - insOp.AutoIncrement = autoIncGen - - // set insert ignore. - insOp.Ignore = bool(ins.Ignore) || ins.OnDup != nil - - insOp.ColVindexes = getColVindexes(insOp) - switch rows := ins.Rows.(type) { - case sqlparser.Values: - route.Source, err = insertRowsPlan(insOp, ins, rows) - if err != nil { - return nil, err - } - case sqlparser.SelectStatement: - route.Source, err = insertSelectPlan(ctx, insOp, ins, rows) - if err != nil { - return nil, err - } - } - return route, nil -} - -func insertSelectPlan(ctx *plancontext.PlanningContext, insOp *Insert, ins *sqlparser.Insert, sel sqlparser.SelectStatement) (*Insert, error) { - if columnMismatch(insOp.AutoIncrement, ins, sel) { - return nil, vterrors.VT03006() - } - - selOp, err := PlanQuery(ctx, sel) - if err != nil { - return nil, err - } - - // select plan will be taken as input to insert rows into the table. - insOp.Input = selOp - - // When the table you are steaming data from and table you are inserting from are same. - // Then due to locking of the index range on the table we might not be able to insert into the table. - // Therefore, instead of streaming, this flag will ensure the records are first read and then inserted. - insertTbl := insOp.TablesUsed()[0] - selTables := TablesUsed(selOp) - for _, tbl := range selTables { - if insertTbl == tbl { - insOp.ForceNonStreaming = true - break - } - } - - if len(insOp.ColVindexes) == 0 { - return insOp, nil - } - - colVindexes := insOp.ColVindexes - vv := make([][]int, len(colVindexes)) - for idx, colVindex := range colVindexes { - for _, col := range colVindex.Columns { - err := checkAndErrIfVindexChanging(sqlparser.UpdateExprs(ins.OnDup), col) - if err != nil { - return nil, err - } - - colNum := findColumn(ins, col) - // sharding column values should be provided in the insert. - if colNum == -1 && idx == 0 { - return nil, vterrors.VT09003(col) - } - vv[idx] = append(vv[idx], colNum) - } - } - insOp.VindexValueOffset = vv - return insOp, nil -} - -func columnMismatch(gen *Generate, ins *sqlparser.Insert, sel sqlparser.SelectStatement) bool { - origColCount := len(ins.Columns) - if gen != nil && gen.added { - // One column got added to the insert query ast for auto increment column. - // adjusting it here for comparison. - origColCount-- - } - if origColCount < sel.GetColumnCount() { - return true - } - if origColCount > sel.GetColumnCount() { - sel := sqlparser.GetFirstSelect(sel) - var hasStarExpr bool - for _, sExpr := range sel.SelectExprs { - if _, hasStarExpr = sExpr.(*sqlparser.StarExpr); hasStarExpr { - break - } - } - if !hasStarExpr { - return true - } - } - return false -} - -func insertRowsPlan(insOp *Insert, ins *sqlparser.Insert, rows sqlparser.Values) (*Insert, error) { - for _, row := range rows { - if len(ins.Columns) != len(row) { - return nil, vterrors.VT03006() - } - } - - if len(insOp.ColVindexes) == 0 { - return insOp, nil - } - - colVindexes := insOp.ColVindexes - routeValues := make([][][]evalengine.Expr, len(colVindexes)) - for vIdx, colVindex := range colVindexes { - routeValues[vIdx] = make([][]evalengine.Expr, len(colVindex.Columns)) - for colIdx, col := range colVindex.Columns { - err := checkAndErrIfVindexChanging(sqlparser.UpdateExprs(ins.OnDup), col) - if err != nil { - return nil, err - } - routeValues[vIdx][colIdx] = make([]evalengine.Expr, len(rows)) - colNum, _ := findOrAddColumn(ins, col) - for rowNum, row := range rows { - innerpv, err := evalengine.Translate(row[colNum], nil) - if err != nil { - return nil, err - } - routeValues[vIdx][colIdx][rowNum] = innerpv - } - } - } - // here we are replacing the row value with the argument. - for _, colVindex := range colVindexes { - for _, col := range colVindex.Columns { - colNum, _ := findOrAddColumn(ins, col) - for rowNum, row := range rows { - name := engine.InsertVarName(col, rowNum) - row[colNum] = sqlparser.NewArgument(name) - } - } - } - insOp.VindexValues = routeValues - return insOp, nil -} - -func valuesProvided(rows sqlparser.InsertRows) bool { - switch values := rows.(type) { - case sqlparser.Values: - return len(values) >= 0 && len(values[0]) > 0 - case sqlparser.SelectStatement: - return true - } - return false -} - -func getColVindexes(insOp *Insert) (colVindexes []*vindexes.ColumnVindex) { - // For unsharded table the Column Vindex does not mean anything. - // And therefore should be ignored. - if !insOp.VTable.Keyspace.Sharded { - return - } - for _, colVindex := range insOp.VTable.ColumnVindexes { - if colVindex.IsPartialVindex() { - continue - } - colVindexes = append(colVindexes, colVindex) - } - return -} - -func checkAndErrIfVindexChanging(setClauses sqlparser.UpdateExprs, col sqlparser.IdentifierCI) error { - for _, assignment := range setClauses { - if col.Equal(assignment.Name.Name) { - valueExpr, isValuesFuncExpr := assignment.Expr.(*sqlparser.ValuesFuncExpr) - // update on duplicate key is changing the vindex column, not supported. - if !isValuesFuncExpr || !valueExpr.Name.Name.Equal(assignment.Name.Name) { - return vterrors.VT12001("DML cannot update vindex column") - } - return nil - } - } - return nil -} - -// findOrAddColumn finds the position of a column in the insert. If it's -// absent it appends it to the with NULL values. -// It returns the position of the column and also boolean representing whether it was added or already present. -func findOrAddColumn(ins *sqlparser.Insert, col sqlparser.IdentifierCI) (int, bool) { - colNum := findColumn(ins, col) - if colNum >= 0 { - return colNum, false - } - colOffset := len(ins.Columns) - ins.Columns = append(ins.Columns, col) - if rows, ok := ins.Rows.(sqlparser.Values); ok { - for i := range rows { - rows[i] = append(rows[i], &sqlparser.NullVal{}) - } - } - return colOffset, true -} - -// findColumn returns the column index where it is placed on the insert column list. -// Otherwise, return -1 when not found. -func findColumn(ins *sqlparser.Insert, col sqlparser.IdentifierCI) int { - for i, column := range ins.Columns { - if col.Equal(column) { - return i - } - } - return -1 -} - -func populateInsertColumnlist(ins *sqlparser.Insert, table *vindexes.Table) *sqlparser.Insert { - cols := make(sqlparser.Columns, 0, len(table.Columns)) - for _, c := range table.Columns { - cols = append(cols, c.Name) - } - ins.Columns = cols - return ins -} - -// modifyForAutoinc modifies the AST and the plan to generate necessary autoinc values. -// For row values cases, bind variable names are generated using baseName. -func modifyForAutoinc(ins *sqlparser.Insert, vTable *vindexes.Table) (*Generate, error) { - if vTable.AutoIncrement == nil { - return nil, nil - } - gen := &Generate{ - Keyspace: vTable.AutoIncrement.Sequence.Keyspace, - TableName: sqlparser.TableName{Name: vTable.AutoIncrement.Sequence.Name}, - Pinned: vTable.AutoIncrement.Sequence.Pinned, - } - colNum, newColAdded := findOrAddColumn(ins, vTable.AutoIncrement.Column) - switch rows := ins.Rows.(type) { - case sqlparser.SelectStatement: - gen.Offset = colNum - gen.added = newColAdded - case sqlparser.Values: - autoIncValues := make([]evalengine.Expr, 0, len(rows)) - for rowNum, row := range rows { - // Support the DEFAULT keyword by treating it as null - if _, ok := row[colNum].(*sqlparser.Default); ok { - row[colNum] = &sqlparser.NullVal{} - } - expr, err := evalengine.Translate(row[colNum], nil) - if err != nil { - return nil, err - } - autoIncValues = append(autoIncValues, expr) - row[colNum] = sqlparser.NewArgument(engine.SeqVarName + strconv.Itoa(rowNum)) - } - gen.Values = evalengine.NewTupleExpr(autoIncValues...) - } - return gen, nil -} - -func getOperatorFromTableExpr(ctx *plancontext.PlanningContext, tableExpr sqlparser.TableExpr) (ops.Operator, error) { - switch tableExpr := tableExpr.(type) { - case *sqlparser.AliasedTableExpr: - return getOperatorFromAliasedTableExpr(ctx, tableExpr) - case *sqlparser.JoinTableExpr: - return getOperatorFromJoinTableExpr(ctx, tableExpr) - case *sqlparser.ParenTableExpr: - return crossJoin(ctx, tableExpr.Exprs) - default: - return nil, vterrors.VT13001(fmt.Sprintf("unable to use: %T table type", tableExpr)) - } -} - -func getOperatorFromJoinTableExpr(ctx *plancontext.PlanningContext, tableExpr *sqlparser.JoinTableExpr) (ops.Operator, error) { - lhs, err := getOperatorFromTableExpr(ctx, tableExpr.LeftExpr) - if err != nil { - return nil, err - } - rhs, err := getOperatorFromTableExpr(ctx, tableExpr.RightExpr) - if err != nil { - return nil, err - } - - switch tableExpr.Join { - case sqlparser.NormalJoinType: - return createInnerJoin(ctx, tableExpr, lhs, rhs) - case sqlparser.LeftJoinType, sqlparser.RightJoinType: - return createOuterJoin(tableExpr, lhs, rhs) - default: - return nil, vterrors.VT13001("unsupported: %s", tableExpr.Join.ToString()) - } -} - -func getOperatorFromAliasedTableExpr(ctx *plancontext.PlanningContext, tableExpr *sqlparser.AliasedTableExpr) (ops.Operator, error) { - tableID := ctx.SemTable.TableSetFor(tableExpr) - switch tbl := tableExpr.Expr.(type) { - case sqlparser.TableName: - tableInfo, err := ctx.SemTable.TableInfoFor(tableID) - if err != nil { - return nil, err - } - - if vt, isVindex := tableInfo.(*semantics.VindexTable); isVindex { - solves := tableID - return &Vindex{ - Table: VindexTable{ - TableID: tableID, - Alias: tableExpr, - Table: tbl, - VTable: vt.Table.GetVindexTable(), - }, - Vindex: vt.Vindex, - Solved: solves, - }, nil - } - qg := newQueryGraph() - isInfSchema := tableInfo.IsInfSchema() - qt := &QueryTable{Alias: tableExpr, Table: tbl, ID: tableID, IsInfSchema: isInfSchema} - qg.Tables = append(qg.Tables, qt) - return qg, nil - case *sqlparser.DerivedTable: - inner, err := createLogicalOperatorFromAST(ctx, tbl.Select) - if err != nil { - return nil, err - } - if horizon, ok := inner.(*Horizon); ok { - inner = horizon.Source - } - - return &Derived{ - TableId: tableID, - Alias: tableExpr.As.String(), - Source: inner, - Query: tbl.Select, - ColumnAliases: tableExpr.Columns, - }, nil - default: - return nil, vterrors.VT13001(fmt.Sprintf("unable to use: %T", tbl)) - } -} - -func crossJoin(ctx *plancontext.PlanningContext, exprs sqlparser.TableExprs) (ops.Operator, error) { - var output ops.Operator - for _, tableExpr := range exprs { - op, err := getOperatorFromTableExpr(ctx, tableExpr) - if err != nil { - return nil, err - } - if output == nil { - output = op - } else { - output = createJoin(ctx, output, op) - } - } - return output, nil -} - -func createQueryTableForDML(ctx *plancontext.PlanningContext, tableExpr sqlparser.TableExpr, whereClause *sqlparser.Where) (semantics.TableInfo, *QueryTable, error) { - alTbl, ok := tableExpr.(*sqlparser.AliasedTableExpr) - if !ok { - return nil, nil, vterrors.VT13001("expected AliasedTableExpr") - } - tblName, ok := alTbl.Expr.(sqlparser.TableName) - if !ok { - return nil, nil, vterrors.VT13001("expected TableName") - } - - tableID := ctx.SemTable.TableSetFor(alTbl) - tableInfo, err := ctx.SemTable.TableInfoFor(tableID) - if err != nil { - return nil, nil, err - } - - if tableInfo.IsInfSchema() { - return nil, nil, vterrors.VT12001("update information schema tables") - } - - var predicates []sqlparser.Expr - if whereClause != nil { - predicates = sqlparser.SplitAndExpression(nil, whereClause.Expr) - } - qt := &QueryTable{ - ID: tableID, - Alias: alTbl, - Table: tblName, - Predicates: predicates, - } - return tableInfo, qt, nil -} - -func addColumnEquality(ctx *plancontext.PlanningContext, expr sqlparser.Expr) { - switch expr := expr.(type) { - case *sqlparser.ComparisonExpr: - if expr.Operator != sqlparser.EqualOp { - return - } - - if left, isCol := expr.Left.(*sqlparser.ColName); isCol { - ctx.SemTable.AddColumnEquality(left, expr.Right) - } - if right, isCol := expr.Right.(*sqlparser.ColName); isCol { - ctx.SemTable.AddColumnEquality(right, expr.Left) - } - } -} diff --git a/go/vt/vtgate/planbuilder/operators/merging.go b/go/vt/vtgate/planbuilder/operators/merging.go deleted file mode 100644 index ef8da1ef280..00000000000 --- a/go/vt/vtgate/planbuilder/operators/merging.go +++ /dev/null @@ -1,323 +0,0 @@ -/* -Copyright 2023 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package operators - -import ( - "fmt" - "reflect" - - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" -) - -// Merge checks whether two operators can be merged into a single one. -// If they can be merged, a new operator with the merged routing is returned -// If they cannot be merged, nil is returned. -func Merge(ctx *plancontext.PlanningContext, lhs, rhs ops.Operator, joinPredicates []sqlparser.Expr, m merger) (ops.Operator, error) { - lhsRoute, rhsRoute := operatorsToRoutes(lhs, rhs) - if lhsRoute == nil || rhsRoute == nil { - return nil, nil - } - - lhsRoute, rhsRoute, routingA, routingB, sameKeyspace := getRoutesOrAlternates(lhsRoute, rhsRoute) - - a, b := getRoutingType(routingA), getRoutingType(routingB) - if getTypeName(routingA) < getTypeName(routingB) { - // while deciding if two routes can be merged, the LHS/RHS order of the routes is not important. - // for the actual merging, we still need to remember which side was inner and which was outer for subqueries - a, b = b, a - routingA, routingB = routingB, routingA - } - - switch { - // if either side is a dual query, we can always merge them together - case a == dual: - return m.merge(lhsRoute, rhsRoute, routingB) - case b == dual: - return m.merge(lhsRoute, rhsRoute, routingA) - - // an unsharded/reference route can be merged with anything going to that keyspace - case a == anyShard && sameKeyspace: - return m.merge(lhsRoute, rhsRoute, routingB) - case b == anyShard && sameKeyspace: - return m.merge(lhsRoute, rhsRoute, routingA) - - // None routing can always be merged, as long as we are aiming for the same keyspace - case a == none && sameKeyspace: - return m.merge(lhsRoute, rhsRoute, routingA) - case b == none && sameKeyspace: - return m.merge(lhsRoute, rhsRoute, routingB) - - // infoSchema routing is complex, so we handle it in a separate method - case a == infoSchema && b == infoSchema: - return tryMergeInfoSchemaRoutings(routingA, routingB, m, lhsRoute, rhsRoute) - - // sharded routing is complex, so we handle it in a separate method - case a == sharded && b == sharded: - return tryMergeShardedRouting(ctx, lhsRoute, rhsRoute, m, joinPredicates) - - default: - return nil, nil - } -} - -type ( - merger interface { - mergeTables(r1, r2 *ShardedRouting, op1, op2 *Route) (*Route, error) - merge(op1, op2 *Route, r Routing) (*Route, error) - } - - joinMerger struct { - ctx *plancontext.PlanningContext - predicates []sqlparser.Expr - innerJoin bool - } - - subQueryMerger struct { - ctx *plancontext.PlanningContext - subq *SubQueryInner - } - - // mergeDecorator runs the inner merge and also runs the additional function f. - mergeDecorator struct { - inner merger - f func() error - } - - routingType int -) - -const ( - sharded routingType = iota - infoSchema - anyShard - none - dual - targeted -) - -func (rt routingType) String() string { - switch rt { - case sharded: - return "sharded" - case infoSchema: - return "infoSchema" - case anyShard: - return "anyShard" - case none: - return "none" - case dual: - return "dual" - case targeted: - return "targeted" - } - panic("switch should be exhaustive") -} - -// getRoutesOrAlternates gets the Routings from each Route. If they are from different keyspaces, -// we check if this is a table with alternates in other keyspaces that we can use -func getRoutesOrAlternates(lhsRoute, rhsRoute *Route) (*Route, *Route, Routing, Routing, bool) { - routingA := lhsRoute.Routing - routingB := rhsRoute.Routing - sameKeyspace := routingA.Keyspace() == routingB.Keyspace() - - if sameKeyspace || - // if either of these is missing a keyspace, we are not going to be able to find an alternative - routingA.Keyspace() == nil || - routingB.Keyspace() == nil { - return lhsRoute, rhsRoute, routingA, routingB, sameKeyspace - } - - if refA, ok := routingA.(*AnyShardRouting); ok { - if altARoute := refA.AlternateInKeyspace(routingB.Keyspace()); altARoute != nil { - return altARoute, rhsRoute, altARoute.Routing, routingB, true - } - } - - if refB, ok := routingB.(*AnyShardRouting); ok { - if altBRoute := refB.AlternateInKeyspace(routingA.Keyspace()); altBRoute != nil { - return lhsRoute, altBRoute, routingA, altBRoute.Routing, true - } - } - - return lhsRoute, rhsRoute, routingA, routingB, sameKeyspace -} - -func getTypeName(myvar interface{}) string { - return reflect.TypeOf(myvar).String() -} - -func getRoutingType(r Routing) routingType { - switch r.(type) { - case *InfoSchemaRouting: - return infoSchema - case *AnyShardRouting: - return anyShard - case *DualRouting: - return dual - case *ShardedRouting: - return sharded - case *NoneRouting: - return none - case *TargetedRouting: - return targeted - } - panic(fmt.Sprintf("switch should be exhaustive, got %T", r)) -} - -func newJoinMerge(ctx *plancontext.PlanningContext, predicates []sqlparser.Expr, innerJoin bool) merger { - return &joinMerger{ - ctx: ctx, - predicates: predicates, - innerJoin: innerJoin, - } -} - -func (jm *joinMerger) mergeTables(r1, r2 *ShardedRouting, op1, op2 *Route) (*Route, error) { - tr := &ShardedRouting{ - VindexPreds: append(r1.VindexPreds, r2.VindexPreds...), - keyspace: r1.keyspace, - RouteOpCode: r1.RouteOpCode, - SeenPredicates: append(r1.SeenPredicates, r2.SeenPredicates...), - } - if r1.SelectedVindex() == r2.SelectedVindex() { - tr.Selected = r1.Selected - } else { - tr.PickBestAvailableVindex() - } - - return &Route{ - Source: jm.getApplyJoin(op1, op2), - MergedWith: []*Route{op2}, - Routing: tr, - }, nil -} - -func (jm *joinMerger) getApplyJoin(op1, op2 *Route) *ApplyJoin { - return NewApplyJoin(op1.Source, op2.Source, jm.ctx.SemTable.AndExpressions(jm.predicates...), !jm.innerJoin) -} - -func (jm *joinMerger) merge(op1, op2 *Route, r Routing) (*Route, error) { - return &Route{ - Source: jm.getApplyJoin(op1, op2), - MergedWith: []*Route{op2}, - Routing: r, - }, nil -} - -func newSubQueryMerge(ctx *plancontext.PlanningContext, subq *SubQueryInner) merger { - return &subQueryMerger{ctx: ctx, subq: subq} -} - -// markPredicateInOuterRouting merges a subquery with the outer routing. -// If the subquery was a predicate on the outer side, we see if we can use -// predicates from the subquery to help with routing -func (s *subQueryMerger) markPredicateInOuterRouting(outer *ShardedRouting, inner Routing) (Routing, error) { - // When merging an inner query with its outer query, we can remove the - // inner query from the list of predicates that can influence routing of - // the outer query. - // - // Note that not all inner queries necessarily are part of the routing - // predicates list, so this might be a no-op. - subQueryWasPredicate := false - for i, predicate := range outer.SeenPredicates { - if s.ctx.SemTable.EqualsExprWithDeps(predicate, s.subq.ExtractedSubquery) { - outer.SeenPredicates = append(outer.SeenPredicates[:i], outer.SeenPredicates[i+1:]...) - - subQueryWasPredicate = true - - // The `ExtractedSubquery` of an inner query is unique (due to the uniqueness of bind variable names) - // so we can stop after the first match. - break - } - } - - if !subQueryWasPredicate { - // if the subquery was not a predicate, we are done here - return outer, nil - } - - switch inner := inner.(type) { - case *ShardedRouting: - // Copy Vindex predicates from the inner route to the upper route. - // If we can route based on some of these predicates, the routing can improve - outer.VindexPreds = append(outer.VindexPreds, inner.VindexPreds...) - outer.SeenPredicates = append(outer.SeenPredicates, inner.SeenPredicates...) - routing, err := outer.ResetRoutingLogic(s.ctx) - if err != nil { - return nil, err - } - return routing, nil - case *NoneRouting: - // if we have an ANDed subquery, and we know that it will not find anything, - // we can safely assume that the outer query will also not return anything - return &NoneRouting{keyspace: outer.keyspace}, nil - default: - return outer, nil - } -} - -func (s *subQueryMerger) mergeTables(outer, inner *ShardedRouting, op1, op2 *Route) (*Route, error) { - s.subq.ExtractedSubquery.Merged = true - - routing, err := s.markPredicateInOuterRouting(outer, inner) - if err != nil { - return nil, err - } - op1.Routing = routing - op1.MergedWith = append(op1.MergedWith, op2) - return op1, nil -} - -func (s *subQueryMerger) merge(outer, inner *Route, routing Routing) (*Route, error) { - s.subq.ExtractedSubquery.Merged = true - - if outerSR, ok := outer.Routing.(*ShardedRouting); ok { - var err error - routing, err = s.markPredicateInOuterRouting(outerSR, inner.Routing) - if err != nil { - return nil, err - } - } - - outer.Routing = routing - outer.MergedWith = append(outer.MergedWith, inner) - return outer, nil -} - -func (d *mergeDecorator) mergeTables(outer, inner *ShardedRouting, op1, op2 *Route) (*Route, error) { - merged, err := d.inner.mergeTables(outer, inner, op1, op2) - if err != nil { - return nil, err - } - if err := d.f(); err != nil { - return nil, err - } - return merged, nil -} - -func (d *mergeDecorator) merge(outer, inner *Route, r Routing) (*Route, error) { - merged, err := d.inner.merge(outer, inner, r) - if err != nil { - return nil, err - } - if err := d.f(); err != nil { - return nil, err - } - return merged, nil -} diff --git a/go/vt/vtgate/planbuilder/operators/offset_planning.go b/go/vt/vtgate/planbuilder/operators/offset_planning.go index f4287f281ed..502df5e9c82 100644 --- a/go/vt/vtgate/planbuilder/operators/offset_planning.go +++ b/go/vt/vtgate/planbuilder/operators/offset_planning.go @@ -36,7 +36,7 @@ func planOffsets(ctx *plancontext.PlanningContext, root ops.Operator) (ops.Opera visitor := func(in ops.Operator, _ semantics.TableSet, _ bool) (ops.Operator, *rewrite.ApplyResult, error) { var err error switch op := in.(type) { - case *Derived, *Horizon: + case *Horizon: return nil, nil, vterrors.VT13001(fmt.Sprintf("should not see %T here", in)) case offsettable: err = op.planOffsets(ctx) @@ -47,51 +47,144 @@ func planOffsets(ctx *plancontext.PlanningContext, root ops.Operator) (ops.Opera return in, rewrite.SameTree, nil } - op, err := rewrite.TopDown(root, TableID, visitor, stopAtRoute) - if err != nil { - if vterr, ok := err.(*vterrors.VitessError); ok && vterr.ID == "VT13001" { - // we encountered a bug. let's try to back out - return nil, errHorizonNotPlanned() - } - return nil, err - } + return rewrite.TopDown(root, TableID, visitor, stopAtRoute) +} - return op, nil +func fetchByOffset(e sqlparser.SQLNode) bool { + switch e.(type) { + case *sqlparser.ColName, sqlparser.AggrFunc: + return true + default: + return false + } } -func (p *Projection) passThroughAllColumns(ctx *plancontext.PlanningContext) error { +// useOffsets rewrites an expression to use values from the input +func useOffsets(ctx *plancontext.PlanningContext, expr sqlparser.Expr, op ops.Operator) (sqlparser.Expr, error) { + var exprOffset *sqlparser.Offset - for i, col := range p.Projections { - newSrc, offset, err := p.Source.AddColumn(ctx, aeWrap(col.GetExpr()), true, false) + in := op.Inputs()[0] + found := func(e sqlparser.Expr, offset int) { exprOffset = sqlparser.NewOffset(offset, e) } + + notFound := func(e sqlparser.Expr) error { + _, addToGroupBy := e.(*sqlparser.ColName) + offset, err := in.AddColumn(ctx, true, addToGroupBy, aeWrap(e)) if err != nil { return err } - p.Source = newSrc - p.Projections[i] = Offset{ - Expr: col.GetExpr(), - Offset: offset, + exprOffset = sqlparser.NewOffset(offset, e) + return nil + } + + visitor := getOffsetRewritingVisitor(ctx, in.FindCol, found, notFound) + + // The cursor replace is not available while walking `down`, so `up` is used to do the replacement. + up := func(cursor *sqlparser.CopyOnWriteCursor) { + if exprOffset != nil { + cursor.Replace(exprOffset) + exprOffset = nil } } - return nil + rewritten := sqlparser.CopyOnRewrite(expr, visitor, up, ctx.SemTable.CopySemanticInfo) + + return rewritten.(sqlparser.Expr), nil } -func fetchByOffset(e sqlparser.SQLNode) bool { - switch e.(type) { - case *sqlparser.ColName, sqlparser.AggrFunc: - return true - default: - return false +// addColumnsToInput adds columns needed by an operator to its input. +// This happens only when the filter expression can be retrieved as an offset from the underlying mysql. +func addColumnsToInput(ctx *plancontext.PlanningContext, root ops.Operator) (ops.Operator, error) { + visitor := func(in ops.Operator, _ semantics.TableSet, isRoot bool) (ops.Operator, *rewrite.ApplyResult, error) { + filter, ok := in.(*Filter) + if !ok { + return in, rewrite.SameTree, nil + } + + proj, areOnTopOfProj := filter.Source.(selectExpressions) + if !areOnTopOfProj { + // not much we can do here + return in, rewrite.SameTree, nil + } + addedColumns := false + found := func(expr sqlparser.Expr, i int) {} + notFound := func(e sqlparser.Expr) error { + _, addToGroupBy := e.(*sqlparser.ColName) + _, err := proj.addColumnWithoutPushing(ctx, aeWrap(e), addToGroupBy) + if err != nil { + return err + } + addedColumns = true + return nil + } + visitor := getOffsetRewritingVisitor(ctx, proj.FindCol, found, notFound) + + for _, expr := range filter.Predicates { + _ = sqlparser.CopyOnRewrite(expr, visitor, nil, ctx.SemTable.CopySemanticInfo) + } + if addedColumns { + return in, rewrite.NewTree("added columns because filter needs it", in), nil + } + + return in, rewrite.SameTree, nil + } + + return rewrite.TopDown(root, TableID, visitor, stopAtRoute) +} + +// addColumnsToInput adds columns needed by an operator to its input. +// This happens only when the filter expression can be retrieved as an offset from the underlying mysql. +func pullDistinctFromUNION(_ *plancontext.PlanningContext, root ops.Operator) (ops.Operator, error) { + visitor := func(in ops.Operator, _ semantics.TableSet, isRoot bool) (ops.Operator, *rewrite.ApplyResult, error) { + union, ok := in.(*Union) + if !ok || !union.distinct { + return in, rewrite.SameTree, nil + } + + union.distinct = false + + distinct := &Distinct{ + Required: true, + Source: union, + } + return distinct, rewrite.NewTree("pulled out DISTINCT from union", union), nil } + + return rewrite.TopDown(root, TableID, visitor, stopAtRoute) } -func planOffsetsOnJoins(ctx *plancontext.PlanningContext, op ops.Operator) error { - err := rewrite.Visit(op, func(current ops.Operator) error { - join, ok := current.(*ApplyJoin) +func getOffsetRewritingVisitor( + ctx *plancontext.PlanningContext, + // this is the function that will be called to try to find the offset for an expression + findCol func(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) (int, error), + // this function will be called when an expression has been found on the input + found func(sqlparser.Expr, int), + // if we have an expression that mush be fetched, this method will be called + notFound func(sqlparser.Expr) error, +) func(node, parent sqlparser.SQLNode) bool { + var err error + return func(node, parent sqlparser.SQLNode) bool { + if err != nil { + return false + } + e, ok := node.(sqlparser.Expr) if !ok { - return nil + return true + } + var offset int + offset, err = findCol(ctx, e, false) + if err != nil { + return false } - return join.planOffsets(ctx) - }) - return err + if offset >= 0 { + found(e, offset) + return false + } + + if fetchByOffset(e) { + err = notFound(e) + return false + } + + return true + } } diff --git a/go/vt/vtgate/planbuilder/operators/operator.go b/go/vt/vtgate/planbuilder/operators/operator.go index d2ce5cb77d0..a1cfbfd0cc0 100644 --- a/go/vt/vtgate/planbuilder/operators/operator.go +++ b/go/vt/vtgate/planbuilder/operators/operator.go @@ -17,26 +17,32 @@ limitations under the License. // Package operators contains the operators used to plan queries. /* The operators go through a few phases while planning: -1. Logical - In this first pass, we build an operator tree from the incoming parsed query. - It will contain logical joins - we still haven't decided on the join algorithm to use yet. - At the leaves, it will contain QueryGraphs - these are the tables in the FROM clause - that we can easily do join ordering on. The logical tree will represent the full query, - including projections, Grouping, ordering and so on. -2. Physical - Once the logical plan has been fully built, we go bottom up and plan which routes that will be used. - During this phase, we will also decide which join algorithms should be used on the vtgate level -3. Columns & Aggregation - Once we know which queries will be sent to the tablets, we go over the tree and decide which - columns each operator should output. At this point, we also do offset lookups, - so we know at runtime from which columns in the input table we need to read. +1. Initial plan + In this first pass, we build an operator tree from the incoming parsed query. + At the leaves, it will contain QueryGraphs - these are the tables in the FROM clause + that we can easily do join ordering on because they are all inner joins. + All the post-processing - aggregations, sorting, limit etc. are at this stage + contained in Horizon structs. We try to push these down under routes, and expand + the ones that can't be pushed down into individual operators such as Projection, + Agreggation, Limit, etc. +2. Planning + Once the initial plan has been fully built, we go through a number of phases. + recursively running rewriters on the tree in a fixed point fashion, until we've gone + over all phases and the tree has stop changing. +3. Offset planning + Now is the time to stop working with AST objects and transform remaining expressions being + used on top of vtgate to either offsets on inputs or evalengine expressions. */ package operators import ( + "fmt" + + "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) @@ -53,24 +59,25 @@ type ( // PlanQuery creates a query plan for a given SQL statement func PlanQuery(ctx *plancontext.PlanningContext, stmt sqlparser.Statement) (ops.Operator, error) { - op, err := createLogicalOperatorFromAST(ctx, stmt) + op, err := translateQueryToOp(ctx, stmt) if err != nil { return nil, err } - if err = checkValid(op); err != nil { - return nil, err + if rewrite.DebugOperatorTree { + fmt.Println("Initial tree:") + fmt.Println(ops.ToTree(op)) } - if op, err = transformToPhysical(ctx, op); err != nil { + if op, err = compact(ctx, op); err != nil { return nil, err } - if op, err = tryHorizonPlanning(ctx, op); err != nil { + if err = checkValid(op); err != nil { return nil, err } - if op, err = compact(ctx, op); err != nil { + if op, err = planQuery(ctx, op); err != nil { return nil, err } @@ -96,12 +103,20 @@ func (noInputs) SetInputs(ops []ops.Operator) { } // AddColumn implements the Operator interface -func (noColumns) AddColumn(*plancontext.PlanningContext, *sqlparser.AliasedExpr, bool, bool) (ops.Operator, int, error) { - return nil, 0, vterrors.VT13001("the noColumns operator cannot accept columns") +func (noColumns) AddColumn(*plancontext.PlanningContext, bool, bool, *sqlparser.AliasedExpr) (int, error) { + return 0, vterrors.VT13001("noColumns operators have no column") +} + +func (noColumns) GetColumns(*plancontext.PlanningContext) ([]*sqlparser.AliasedExpr, error) { + return nil, vterrors.VT13001("noColumns operators have no column") } -func (noColumns) GetColumns() ([]*sqlparser.AliasedExpr, error) { - return nil, vterrors.VT13001("the noColumns operator cannot accept columns") +func (noColumns) FindCol(*plancontext.PlanningContext, sqlparser.Expr, bool) (int, error) { + return 0, vterrors.VT13001("noColumns operators have no column") +} + +func (noColumns) GetSelectExprs(*plancontext.PlanningContext) (sqlparser.SelectExprs, error) { + return nil, vterrors.VT13001("noColumns operators have no column") } // AddPredicate implements the Operator interface @@ -121,17 +136,28 @@ func tryTruncateColumnsAt(op ops.Operator, truncateAt int) bool { return true } - inputs := op.Inputs() - if len(inputs) != 1 { - return false - } - - switch op.(type) { + switch op := op.(type) { case *Limit: - // empty by design + return tryTruncateColumnsAt(op.Source, truncateAt) + case *SubQuery: + for _, offset := range op.Vars { + if offset >= truncateAt { + return false + } + } + return tryTruncateColumnsAt(op.Outer, truncateAt) default: return false } +} - return tryTruncateColumnsAt(inputs[0], truncateAt) +func transformColumnsToSelectExprs(ctx *plancontext.PlanningContext, op ops.Operator) (sqlparser.SelectExprs, error) { + columns, err := op.GetColumns(ctx) + if err != nil { + return nil, err + } + selExprs := slice.Map(columns, func(from *sqlparser.AliasedExpr) sqlparser.SelectExpr { + return from + }) + return selExprs, nil } diff --git a/go/vt/vtgate/planbuilder/operators/ops/op.go b/go/vt/vtgate/planbuilder/operators/ops/op.go index d68daed439e..87bf9d9e12f 100644 --- a/go/vt/vtgate/planbuilder/operators/ops/op.go +++ b/go/vt/vtgate/planbuilder/operators/ops/op.go @@ -23,11 +23,11 @@ import ( type ( // Operator forms the tree of operators, representing the declarative query provided. - // While planning, the operator tree starts with logical operators, and later moves to physical operators. - // The difference between the two is that when we get to a physical operator, we have made decisions on in - // which order to do the joins, and how to split them up across shards and keyspaces. - // In some situation we go straight to the physical operator - when there are no options to consider, - // we can go straight to the end result. + // The operator tree is no actually runnable, it's an intermediate representation used + // while query planning + // The mental model are operators that pull data from each other, the root being the + // full query output, and the leaves are most often `Route`s, representing communication + // with one or more shards. We want to push down as much work as possible under these Routes Operator interface { // Clone will return a copy of this operator, protected so changed to the original will not impact the clone Clone(inputs []Operator) Operator @@ -41,15 +41,16 @@ type ( // AddPredicate is used to push predicates. It pushed it as far down as is possible in the tree. // If we encounter a join and the predicate depends on both sides of the join, the predicate will be split into two parts, // where data is fetched from the LHS of the join to be used in the evaluation on the RHS + // TODO: we should remove this and replace it with rewriters AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (Operator, error) - // AddColumn tells an operator to also output an additional column specified. - // The offset to the column is returned. - AddColumn(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, reuseExisting, addToGroupBy bool) (Operator, int, error) + AddColumn(ctx *plancontext.PlanningContext, reuseExisting bool, addToGroupBy bool, expr *sqlparser.AliasedExpr) (int, error) - GetColumns() ([]*sqlparser.AliasedExpr, error) + FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) (int, error) + + GetColumns(ctx *plancontext.PlanningContext) ([]*sqlparser.AliasedExpr, error) + GetSelectExprs(ctx *plancontext.PlanningContext) (sqlparser.SelectExprs, error) - Description() OpDescription ShortDescription() string GetOrdering() ([]OrderBy, error) @@ -62,13 +63,15 @@ type ( // See GroupBy#SimplifiedExpr for more details about this SimplifiedExpr sqlparser.Expr } +) - OpDescription struct { - OperatorType string - Variant string `json:",omitempty"` - Other map[string]any `json:",omitempty"` - - // This field will be filled in by the JSON producer. No need to set it manually - Inputs []OpDescription `json:",omitempty"` +// Map takes in a mapping function and applies it to both the expression in OrderBy. +func (ob OrderBy) Map(mappingFunc func(sqlparser.Expr) sqlparser.Expr) OrderBy { + return OrderBy{ + Inner: &sqlparser.Order{ + Expr: mappingFunc(ob.Inner.Expr), + Direction: ob.Inner.Direction, + }, + SimplifiedExpr: mappingFunc(ob.SimplifiedExpr), } -) +} diff --git a/go/vt/vtgate/planbuilder/operators/ops/to_json.go b/go/vt/vtgate/planbuilder/operators/ops/to_json.go index 2d22e27d8cc..2b8b747f433 100644 --- a/go/vt/vtgate/planbuilder/operators/ops/to_json.go +++ b/go/vt/vtgate/planbuilder/operators/ops/to_json.go @@ -17,31 +17,13 @@ limitations under the License. package ops import ( - "encoding/json" "fmt" "reflect" "github.com/xlab/treeprint" ) -// ToJSON is a debug only function. It can panic, so do not use this in production code -func ToJSON(op Operator) string { - descr := buildDescriptionTree(op) - out, err := json.MarshalIndent(descr, "", " ") - if err != nil { - panic(err) - } - return string(out) -} - -func buildDescriptionTree(op Operator) OpDescription { - descr := op.Description() - for _, in := range op.Inputs() { - descr.Inputs = append(descr.Inputs, buildDescriptionTree(in)) - } - return descr -} - +// ToTree returns the operator as ascii tree. Should only be used for debugging func ToTree(op Operator) string { tree := asTree(op, nil) return tree.String() diff --git a/go/vt/vtgate/planbuilder/operators/ordering.go b/go/vt/vtgate/planbuilder/operators/ordering.go index a0c42fc8ecb..07f82239728 100644 --- a/go/vt/vtgate/planbuilder/operators/ordering.go +++ b/go/vt/vtgate/planbuilder/operators/ordering.go @@ -17,11 +17,10 @@ limitations under the License. package operators import ( + "slices" "strings" - "golang.org/x/exp/slices" - - "vitess.io/vitess/go/slices2" + "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" @@ -38,10 +37,11 @@ type Ordering struct { func (o *Ordering) Clone(inputs []ops.Operator) ops.Operator { return &Ordering{ - Source: inputs[0], - Offset: slices.Clone(o.Offset), - WOffset: slices.Clone(o.WOffset), - Order: slices.Clone(o.Order), + Source: inputs[0], + Offset: slices.Clone(o.Offset), + WOffset: slices.Clone(o.WOffset), + Order: slices.Clone(o.Order), + ResultColumns: o.ResultColumns, } } @@ -62,17 +62,20 @@ func (o *Ordering) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser return o, nil } -func (o *Ordering) AddColumn(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, reuseExisting, addToGroupBy bool) (ops.Operator, int, error) { - newSrc, offset, err := o.Source.AddColumn(ctx, expr, reuseExisting, addToGroupBy) - if err != nil { - return nil, 0, err - } - o.Source = newSrc - return o, offset, nil +func (o *Ordering) AddColumn(ctx *plancontext.PlanningContext, reuse bool, gb bool, expr *sqlparser.AliasedExpr) (int, error) { + return o.Source.AddColumn(ctx, reuse, gb, expr) +} + +func (o *Ordering) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) (int, error) { + return o.Source.FindCol(ctx, expr, underRoute) } -func (o *Ordering) GetColumns() ([]*sqlparser.AliasedExpr, error) { - return o.Source.GetColumns() +func (o *Ordering) GetColumns(ctx *plancontext.PlanningContext) ([]*sqlparser.AliasedExpr, error) { + return o.Source.GetColumns(ctx) +} + +func (o *Ordering) GetSelectExprs(ctx *plancontext.PlanningContext) (sqlparser.SelectExprs, error) { + return o.Source.GetSelectExprs(ctx) } func (o *Ordering) GetOrdering() ([]ops.OrderBy, error) { @@ -81,11 +84,10 @@ func (o *Ordering) GetOrdering() ([]ops.OrderBy, error) { func (o *Ordering) planOffsets(ctx *plancontext.PlanningContext) error { for _, order := range o.Order { - newSrc, offset, err := o.Source.AddColumn(ctx, aeWrap(order.SimplifiedExpr), true, false) + offset, err := o.Source.AddColumn(ctx, true, false, aeWrap(order.SimplifiedExpr)) if err != nil { return err } - o.Source = newSrc o.Offset = append(o.Offset, offset) if !ctx.SemTable.NeedsWeightString(order.SimplifiedExpr) { @@ -94,27 +96,19 @@ func (o *Ordering) planOffsets(ctx *plancontext.PlanningContext) error { } wsExpr := &sqlparser.WeightStringFuncExpr{Expr: order.SimplifiedExpr} - newSrc, offset, err = o.Source.AddColumn(ctx, aeWrap(wsExpr), true, false) + offset, err = o.Source.AddColumn(ctx, true, false, aeWrap(wsExpr)) if err != nil { return err } - o.Source = newSrc o.WOffset = append(o.WOffset, offset) } return nil } -func (o *Ordering) Description() ops.OpDescription { - return ops.OpDescription{ - OperatorType: "Ordering", - Other: map[string]any{}, - } -} - func (o *Ordering) ShortDescription() string { - ordering := slices2.Map(o.Order, func(o ops.OrderBy) string { - return sqlparser.String(o.Inner) + ordering := slice.Map(o.Order, func(o ops.OrderBy) string { + return sqlparser.String(o.SimplifiedExpr) }) return strings.Join(ordering, ", ") } diff --git a/go/vt/vtgate/planbuilder/operators/phases.go b/go/vt/vtgate/planbuilder/operators/phases.go new file mode 100644 index 00000000000..1eecc595c8f --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/phases.go @@ -0,0 +1,209 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operators + +import ( + "vitess.io/vitess/go/slice" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" +) + +type ( + Phase int +) + +const ( + physicalTransform Phase = iota + initialPlanning + pullDistinctFromUnion + delegateAggregation + addAggrOrdering + cleanOutPerfDistinct + subquerySettling + DONE +) + +func (p Phase) String() string { + switch p { + case physicalTransform: + return "physicalTransform" + case initialPlanning: + return "initial horizon planning optimization" + case pullDistinctFromUnion: + return "pull distinct from UNION1" + case delegateAggregation: + return "split aggregation between vtgate and mysql" + case addAggrOrdering: + return "optimize aggregations with ORDER BY" + case cleanOutPerfDistinct: + return "optimize Distinct operations" + case subquerySettling: + return "settle subqueries" + } + + return "unknown" +} + +func (p Phase) shouldRun(s semantics.QuerySignature) bool { + switch p { + case pullDistinctFromUnion: + return s.Union + case delegateAggregation: + return s.Aggregation + case addAggrOrdering: + return s.Aggregation + case cleanOutPerfDistinct: + return s.Distinct + case subquerySettling: + return s.SubQueries + } + return true +} + +func (p Phase) act(ctx *plancontext.PlanningContext, op ops.Operator) (ops.Operator, error) { + switch p { + case pullDistinctFromUnion: + return pullDistinctFromUNION(ctx, op) + case delegateAggregation: + return enableDelegateAggregation(ctx, op) + case addAggrOrdering: + return addOrderBysForAggregations(ctx, op) + case cleanOutPerfDistinct: + return removePerformanceDistinctAboveRoute(ctx, op) + case subquerySettling: + return settleSubqueries(ctx, op) + } + + return op, nil +} + +// getPhases returns the ordered phases that the planner will undergo. +// These phases ensure the appropriate collaboration between rewriters. +func getPhases(ctx *plancontext.PlanningContext) (phases []Phase) { + for p := Phase(0); p < DONE; p++ { + if p.shouldRun(ctx.SemTable.QuerySignature) { + phases = append(phases, p) + } + } + return +} + +func removePerformanceDistinctAboveRoute(_ *plancontext.PlanningContext, op ops.Operator) (ops.Operator, error) { + return rewrite.BottomUp(op, TableID, func(innerOp ops.Operator, _ semantics.TableSet, _ bool) (ops.Operator, *rewrite.ApplyResult, error) { + d, ok := innerOp.(*Distinct) + if !ok || d.Required { + return innerOp, rewrite.SameTree, nil + } + + return d.Source, rewrite.NewTree("removed distinct not required that was not pushed under route", d), nil + }, stopAtRoute) +} + +func enableDelegateAggregation(ctx *plancontext.PlanningContext, op ops.Operator) (ops.Operator, error) { + return addColumnsToInput(ctx, op) +} + +func addOrderBysForAggregations(ctx *plancontext.PlanningContext, root ops.Operator) (ops.Operator, error) { + visitor := func(in ops.Operator, _ semantics.TableSet, isRoot bool) (ops.Operator, *rewrite.ApplyResult, error) { + aggrOp, ok := in.(*Aggregator) + if !ok { + return in, rewrite.SameTree, nil + } + + requireOrdering, err := needsOrdering(ctx, aggrOp) + if err != nil { + return nil, nil, err + } + if !requireOrdering { + return in, rewrite.SameTree, nil + } + orderBys := slice.Map(aggrOp.Grouping, func(from GroupBy) ops.OrderBy { + return from.AsOrderBy() + }) + if aggrOp.DistinctExpr != nil { + orderBys = append(orderBys, ops.OrderBy{ + Inner: &sqlparser.Order{ + Expr: aggrOp.DistinctExpr, + }, + SimplifiedExpr: aggrOp.DistinctExpr, + }) + } + aggrOp.Source = &Ordering{ + Source: aggrOp.Source, + Order: orderBys, + } + return in, rewrite.NewTree("added ordering before aggregation", in), nil + } + + return rewrite.BottomUp(root, TableID, visitor, stopAtRoute) +} + +func needsOrdering(ctx *plancontext.PlanningContext, in *Aggregator) (bool, error) { + requiredOrder := slice.Map(in.Grouping, func(from GroupBy) sqlparser.Expr { + return from.SimplifiedExpr + }) + if in.DistinctExpr != nil { + requiredOrder = append(requiredOrder, in.DistinctExpr) + } + if len(requiredOrder) == 0 { + return false, nil + } + srcOrdering, err := in.Source.GetOrdering() + if err != nil { + return false, err + } + if len(srcOrdering) < len(requiredOrder) { + return true, nil + } + for idx, gb := range requiredOrder { + if !ctx.SemTable.EqualsExprWithDeps(srcOrdering[idx].SimplifiedExpr, gb) { + return true, nil + } + } + return false, nil +} + +func addGroupByOnRHSOfJoin(root ops.Operator) (ops.Operator, error) { + visitor := func(in ops.Operator, _ semantics.TableSet, isRoot bool) (ops.Operator, *rewrite.ApplyResult, error) { + join, ok := in.(*ApplyJoin) + if !ok { + return in, rewrite.SameTree, nil + } + + return addLiteralGroupingToRHS(join) + } + + return rewrite.TopDown(root, TableID, visitor, stopAtRoute) +} + +func addLiteralGroupingToRHS(in *ApplyJoin) (ops.Operator, *rewrite.ApplyResult, error) { + _ = rewrite.Visit(in.RHS, func(op ops.Operator) error { + aggr, isAggr := op.(*Aggregator) + if !isAggr { + return nil + } + if len(aggr.Grouping) == 0 { + gb := sqlparser.NewIntLiteral(".0") + aggr.Grouping = append(aggr.Grouping, NewGroupBy(gb, gb, aeWrap(gb))) + } + return nil + }) + return in, rewrite.SameTree, nil +} diff --git a/go/vt/vtgate/planbuilder/operators/projection.go b/go/vt/vtgate/planbuilder/operators/projection.go index 1ed2e1cd681..1c1ef507f33 100644 --- a/go/vt/vtgate/planbuilder/operators/projection.go +++ b/go/vt/vtgate/planbuilder/operators/projection.go @@ -18,11 +18,12 @@ package operators import ( "fmt" + "slices" "strings" - "golang.org/x/exp/slices" - + "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" @@ -30,94 +31,349 @@ import ( "vitess.io/vitess/go/vt/vtgate/semantics" ) -type ( - // Projection is used when we need to evaluate expressions on the vtgate - // It uses the evalengine to accomplish its goal - Projection struct { - Source ops.Operator +// Projection is used when we need to evaluate expressions on the vtgate +// It uses the evalengine to accomplish its goal +type Projection struct { + Source ops.Operator - // Columns contain the expressions as viewed from the outside of this operator - Columns []*sqlparser.AliasedExpr + // Columns contain the expressions as viewed from the outside of this operator + Columns ProjCols - // Projections will contain the actual evaluations we need to - // do if this operator is still above a route after optimisation - Projections []ProjExpr + // DT will hold all the necessary information if this is a derived table projection + DT *DerivedTable + FromAggr bool +} - // TableID will be non-nil for derived tables - TableID *semantics.TableSet +type ( + DerivedTable struct { + TableID semantics.TableSet Alias string + Columns sqlparser.Columns + } +) - FromAggr bool +func (dt *DerivedTable) String() string { + return fmt.Sprintf("DERIVED %s(%s)", dt.Alias, sqlparser.String(dt.Columns)) +} + +func (dt *DerivedTable) RewriteExpression(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (sqlparser.Expr, error) { + if dt == nil { + return expr, nil + } + tableInfo, err := ctx.SemTable.TableInfoFor(dt.TableID) + if err != nil { + return nil, err } + return semantics.RewriteDerivedTableExpression(expr, tableInfo), nil +} - ProjExpr interface { - GetExpr() sqlparser.Expr +func (dt *DerivedTable) introducesTableID() semantics.TableSet { + if dt == nil { + return semantics.EmptyTableSet() } + return dt.TableID +} - // Offset is used when we are only passing through data from an incoming column - Offset struct { - Expr sqlparser.Expr - Offset int +type ( + // ProjCols is used to enable projections that are only valid if we can push them into a route, and we never need to ask it about offsets + ProjCols interface { + GetColumns() ([]*sqlparser.AliasedExpr, error) + GetSelectExprs() sqlparser.SelectExprs + AddColumn(*sqlparser.AliasedExpr) (ProjCols, int, error) } - // Eval is used for expressions that have to be evaluated in the vtgate using the evalengine - Eval struct { - Expr sqlparser.Expr - EExpr evalengine.Expr + // Used when there are stars in the expressions that we were unable to expand + StarProjections sqlparser.SelectExprs + + // Used when we know all the columns + AliasedProjections []*ProjExpr + + ProjExpr struct { + Original *sqlparser.AliasedExpr // this is the expression the user asked for. should only be used to decide on the column alias + EvalExpr sqlparser.Expr // EvalExpr is the expression that will be evaluated at runtime + ColExpr sqlparser.Expr // ColExpr is used during planning to figure out which column this ProjExpr is representing + Info ExprInfo // Here we store information about evalengine, offsets or subqueries + } +) + +type ( + ExprInfo interface { + expr() } - // UnexploredExpression is used before we have planned - one of two end results are possible for it - // - we are able to push this projection under a route, and then this is not used at all - we'll just - // use the ColumnNames field of the Projection struct - // - we have to evaluate this on the vtgate, and either it's just a copy from the input, - // or it's an evalengine expression that we have to evaluate - UnexploredExpression struct { - E sqlparser.Expr + // Offset is used when we are only passing through data from an incoming column + Offset int + + // EvalEngine is used for expressions that have to be evaluated in the vtgate using the evalengine + EvalEngine struct { + EExpr evalengine.Expr } + + SubQueryExpression []*SubQuery ) +func newProjExpr(ae *sqlparser.AliasedExpr) *ProjExpr { + return &ProjExpr{ + Original: sqlparser.CloneRefOfAliasedExpr(ae), + EvalExpr: ae.Expr, + ColExpr: ae.Expr, + } +} + +func newProjExprWithInner(ae *sqlparser.AliasedExpr, in sqlparser.Expr) *ProjExpr { + return &ProjExpr{ + Original: ae, + EvalExpr: in, + ColExpr: ae.Expr, + } +} + +func newAliasedProjection(src ops.Operator) *Projection { + return &Projection{ + Source: src, + Columns: AliasedProjections{}, + } +} + +func (sp StarProjections) GetColumns() ([]*sqlparser.AliasedExpr, error) { + return nil, vterrors.VT09015() +} + +func (sp StarProjections) AddColumn(*sqlparser.AliasedExpr) (ProjCols, int, error) { + return nil, 0, vterrors.VT09015() +} + +func (sp StarProjections) GetSelectExprs() sqlparser.SelectExprs { + return sqlparser.SelectExprs(sp) +} + +func (ap AliasedProjections) GetColumns() ([]*sqlparser.AliasedExpr, error) { + return slice.Map(ap, func(from *ProjExpr) *sqlparser.AliasedExpr { + return aeWrap(from.ColExpr) + }), nil +} + +func (ap AliasedProjections) GetSelectExprs() sqlparser.SelectExprs { + return slice.Map(ap, func(from *ProjExpr) sqlparser.SelectExpr { + return aeWrap(from.ColExpr) + }) +} + +func (ap AliasedProjections) AddColumn(col *sqlparser.AliasedExpr) (ProjCols, int, error) { + offset := len(ap) + return append(ap, newProjExpr(col)), offset, nil +} + +func (pe *ProjExpr) String() string { + var alias, expr, info string + if !pe.Original.As.IsEmpty() { + alias = " AS " + pe.Original.As.String() + } + if pe.EvalExpr == pe.ColExpr { + expr = sqlparser.String(pe.EvalExpr) + } else { + expr = fmt.Sprintf("%s|%s", sqlparser.String(pe.EvalExpr), sqlparser.String(pe.ColExpr)) + } + switch pe.Info.(type) { + case Offset: + info = " [O]" + case *EvalEngine: + info = " [E]" + case SubQueryExpression: + info = " [SQ]" + } + + return expr + alias + info +} + +func (pe *ProjExpr) isSameInAndOut(ctx *plancontext.PlanningContext) bool { + return ctx.SemTable.EqualsExprWithDeps(pe.EvalExpr, pe.ColExpr) +} + var _ selectExpressions = (*Projection)(nil) -func (p *Projection) addUnexploredExpr(ae *sqlparser.AliasedExpr, e sqlparser.Expr) int { - p.Projections = append(p.Projections, UnexploredExpression{E: e}) - p.Columns = append(p.Columns, ae) - return len(p.Projections) - 1 +// createSimpleProjection returns a projection where all columns are offsets. +// used to change the name and order of the columns in the final output +func createSimpleProjection(ctx *plancontext.PlanningContext, qp *QueryProjection, src ops.Operator) (*Projection, error) { + p := newAliasedProjection(src) + for _, e := range qp.SelectExprs { + ae, err := e.GetAliasedExpr() + if err != nil { + return nil, err + } + offset, err := p.Source.AddColumn(ctx, true, false, ae) + if err != nil { + return nil, err + } + expr := newProjExpr(ae) + expr.Info = Offset(offset) + _, err = p.addProjExpr(expr) + if err != nil { + return nil, err + } + } + return p, nil +} + +// canPush returns false if the projection has subquery expressions in it and the subqueries have not yet +// been settled. Once they have settled, we know where to push the projection, but if we push too early +// the projection can end up in the wrong branch of joins +func (p *Projection) canPush(ctx *plancontext.PlanningContext) bool { + if reachedPhase(ctx, subquerySettling) { + return true + } + ap, ok := p.Columns.(AliasedProjections) + if !ok { + // we can't mix subqueries and unexpanded stars, so we know this does not contain any subqueries + return true + } + for _, projection := range ap { + if _, ok := projection.Info.(SubQueryExpression); ok { + return false + } + } + return true } -func (p *Projection) addColumnWithoutPushing(expr *sqlparser.AliasedExpr, _ bool) int { - return p.addUnexploredExpr(expr, expr.Expr) +func (p *Projection) GetAliasedProjections() (AliasedProjections, error) { + ap, ok := p.Columns.(AliasedProjections) + if !ok { + return nil, vterrors.VT09015() + } + return ap, nil } func (p *Projection) isDerived() bool { - return p.TableID != nil + return p.DT != nil +} + +func (p *Projection) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) (int, error) { + ap, err := p.GetAliasedProjections() + if err != nil { + return 0, err + } + + if underRoute && p.isDerived() { + return -1, nil + } + + for offset, pe := range ap { + if ctx.SemTable.EqualsExprWithDeps(pe.ColExpr, expr) { + return offset, nil + } + } + + return -1, nil +} + +func (p *Projection) addProjExpr(pe *ProjExpr) (int, error) { + ap, err := p.GetAliasedProjections() + if err != nil { + return 0, err + } + + offset := len(ap) + ap = append(ap, pe) + p.Columns = ap + + return offset, nil } -func (p *Projection) AddColumn(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, _, addToGroupBy bool) (ops.Operator, int, error) { - if offset, found := canReuseColumn(ctx, p.Columns, expr.Expr, extractExpr); found { - return p, offset, nil +func (p *Projection) addUnexploredExpr(ae *sqlparser.AliasedExpr, e sqlparser.Expr) (int, error) { + return p.addProjExpr(newProjExprWithInner(ae, e)) +} + +func (p *Projection) addSubqueryExpr(ae *sqlparser.AliasedExpr, expr sqlparser.Expr, sqs ...*SubQuery) error { + pe := newProjExprWithInner(ae, expr) + pe.Info = SubQueryExpression(sqs) + + _, err := p.addProjExpr(pe) + return err +} + +func (p *Projection) addColumnWithoutPushing(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, _ bool) (int, error) { + return p.addColumn(ctx, true, false, expr, false) +} + +func (p *Projection) addColumnsWithoutPushing(ctx *plancontext.PlanningContext, reuse bool, _ []bool, exprs []*sqlparser.AliasedExpr) ([]int, error) { + offsets := make([]int, len(exprs)) + for idx, expr := range exprs { + offset, err := p.addColumn(ctx, reuse, false, expr, false) + if err != nil { + return nil, err + } + offsets[idx] = offset + } + return offsets, nil +} + +func (p *Projection) AddColumn(ctx *plancontext.PlanningContext, reuse bool, addToGroupBy bool, ae *sqlparser.AliasedExpr) (int, error) { + return p.addColumn(ctx, reuse, addToGroupBy, ae, true) +} + +func (p *Projection) addColumn( + ctx *plancontext.PlanningContext, + reuse bool, + addToGroupBy bool, + ae *sqlparser.AliasedExpr, + push bool, +) (int, error) { + expr, err := p.DT.RewriteExpression(ctx, ae.Expr) + if err != nil { + return 0, err + } + + if reuse { + offset, err := p.FindCol(ctx, expr, false) + if err != nil { + return 0, err + } + if offset >= 0 { + return offset, nil + } } - sourceOp, offset, err := p.Source.AddColumn(ctx, expr, true, addToGroupBy) + + // ok, we need to add the expression. let's check if we should rewrite a ws expression first + ws, ok := expr.(*sqlparser.WeightStringFuncExpr) + if ok { + cols, ok := p.Columns.(AliasedProjections) + if !ok { + return 0, vterrors.VT09015() + } + for _, projExpr := range cols { + if ctx.SemTable.EqualsExprWithDeps(ws.Expr, projExpr.ColExpr) { + // if someone is asking for the ws of something we are projecting, + // we need push down the ws of the eval expression + ws.Expr = projExpr.EvalExpr + } + } + } + + pe := newProjExprWithInner(ae, expr) + if !push { + return p.addProjExpr(pe) + } + + // we need to push down this column to our input + inputOffset, err := p.Source.AddColumn(ctx, true, addToGroupBy, ae) if err != nil { - return nil, 0, err + return 0, err } - p.Source = sourceOp - p.Projections = append(p.Projections, Offset{Offset: offset, Expr: expr.Expr}) - p.Columns = append(p.Columns, expr) - return p, len(p.Projections) - 1, nil + + pe.Info = Offset(inputOffset) // since we already know the offset, let's save the information + return p.addProjExpr(pe) } -func (po Offset) GetExpr() sqlparser.Expr { return po.Expr } -func (po Eval) GetExpr() sqlparser.Expr { return po.Expr } -func (po UnexploredExpression) GetExpr() sqlparser.Expr { return po.E } +func (po Offset) expr() {} +func (po *EvalEngine) expr() {} +func (po SubQueryExpression) expr() {} func (p *Projection) Clone(inputs []ops.Operator) ops.Operator { return &Projection{ - Source: inputs[0], - Columns: slices.Clone(p.Columns), - Projections: slices.Clone(p.Projections), - TableID: p.TableID, - Alias: p.Alias, - FromAggr: p.FromAggr, + Source: inputs[0], + Columns: p.Columns, // TODO don't think we need to deep clone here + DT: p.DT, + FromAggr: p.FromAggr, } } @@ -139,11 +395,29 @@ func (p *Projection) AddPredicate(ctx *plancontext.PlanningContext, expr sqlpars return p, nil } -func (p *Projection) GetColumns() ([]*sqlparser.AliasedExpr, error) { - if p.TableID != nil { - return nil, nil +func (p *Projection) GetColumns(*plancontext.PlanningContext) ([]*sqlparser.AliasedExpr, error) { + return p.Columns.GetColumns() +} + +func (p *Projection) GetSelectExprs(*plancontext.PlanningContext) (sqlparser.SelectExprs, error) { + switch cols := p.Columns.(type) { + case StarProjections: + return sqlparser.SelectExprs(cols), nil + case AliasedProjections: + var output sqlparser.SelectExprs + for _, pe := range cols { + ae := &sqlparser.AliasedExpr{Expr: pe.EvalExpr} + if !pe.Original.As.IsEmpty() { + ae.As = pe.Original.As + } else if !sqlparser.Equals.Expr(ae.Expr, pe.Original.Expr) { + ae.As = sqlparser.NewIdentifierCI(pe.Original.ColumnName()) + } + output = append(output, ae) + } + return output, nil + default: + panic("unknown type") } - return p.Columns, nil } func (p *Projection) GetOrdering() ([]ops.OrderBy, error) { @@ -153,162 +427,176 @@ func (p *Projection) GetOrdering() ([]ops.OrderBy, error) { // AllOffsets returns a slice of integer offsets for all columns in the Projection // if all columns are of type Offset. If any column is not of type Offset, it returns nil. func (p *Projection) AllOffsets() (cols []int) { - for _, c := range p.Projections { - offset, ok := c.(Offset) + ap, err := p.GetAliasedProjections() + if err != nil { + return nil + } + for _, c := range ap { + offset, ok := c.Info.(Offset) if !ok { return nil } - cols = append(cols, offset.Offset) + cols = append(cols, int(offset)) } return } -func (p *Projection) Description() ops.OpDescription { - var columns []string - for i, col := range p.Projections { - aliasExpr := p.Columns[i] - if aliasExpr.Expr == col.GetExpr() { - columns = append(columns, sqlparser.String(aliasExpr)) - } else { - columns = append(columns, fmt.Sprintf("%s AS %s", sqlparser.String(col.GetExpr()), aliasExpr.ColumnName())) - } +func (p *Projection) ShortDescription() string { + var result []string + if p.DT != nil { + result = append(result, p.DT.String()) } - other := map[string]any{ - "OutputColumns": strings.Join(columns, ", "), + switch columns := p.Columns.(type) { + case StarProjections: + for _, se := range columns { + result = append(result, sqlparser.String(se)) + } + case AliasedProjections: + for _, col := range columns { + result = append(result, col.String()) + } } - if p.TableID != nil { - other["Derived"] = true - other["Alias"] = p.Alias + + return strings.Join(result, ", ") +} + +func (p *Projection) Compact(ctx *plancontext.PlanningContext) (ops.Operator, *rewrite.ApplyResult, error) { + if p.isDerived() { + return p, rewrite.SameTree, nil } - return ops.OpDescription{ - OperatorType: "Projection", - Other: other, + + ap, err := p.GetAliasedProjections() + if err != nil { + return p, rewrite.SameTree, nil } -} -func (p *Projection) ShortDescription() string { - var columns []string - if p.Alias != "" { - columns = append(columns, "derived["+p.Alias+"]") - } - for i, col := range p.Projections { - aliasExpr := p.Columns[i] - if aliasExpr.Expr == col.GetExpr() { - columns = append(columns, sqlparser.String(aliasExpr)) - } else { - columns = append(columns, fmt.Sprintf("%s AS %s", sqlparser.String(col.GetExpr()), aliasExpr.ColumnName())) + // for projections that are not derived tables, we can check if it is safe to remove or not + needed := false + for i, projection := range ap { + e, ok := projection.Info.(Offset) + if !ok || int(e) != i { + needed = true + break } } - return strings.Join(columns, ", ") -} -func (p *Projection) Compact(ctx *plancontext.PlanningContext) (ops.Operator, *rewrite.ApplyResult, error) { + if !needed { + return p.Source, rewrite.NewTree("removed projection only passing through the input", p), nil + } + switch src := p.Source.(type) { case *Route: - return p.compactWithRoute(src) + return p.compactWithRoute(ctx, src) case *ApplyJoin: return p.compactWithJoin(ctx, src) } return p, rewrite.SameTree, nil } -func (p *Projection) compactWithJoin(ctx *plancontext.PlanningContext, src *ApplyJoin) (ops.Operator, *rewrite.ApplyResult, error) { +func (p *Projection) compactWithJoin(ctx *plancontext.PlanningContext, join *ApplyJoin) (ops.Operator, *rewrite.ApplyResult, error) { + ap, err := p.GetAliasedProjections() + if err != nil { + return p, rewrite.SameTree, nil + } + var newColumns []int var newColumnsAST []JoinColumn - for idx, col := range p.Projections { - switch col := col.(type) { + for _, col := range ap { + switch colInfo := col.Info.(type) { case Offset: - newColumns = append(newColumns, src.Columns[col.Offset]) - newColumnsAST = append(newColumnsAST, src.ColumnsAST[col.Offset]) - case UnexploredExpression: - if !ctx.SemTable.EqualsExprWithDeps(col.E, p.Columns[idx].Expr) { + newColumns = append(newColumns, join.Columns[colInfo]) + newColumnsAST = append(newColumnsAST, join.JoinColumns[colInfo]) + case nil: + if !ctx.SemTable.EqualsExprWithDeps(col.EvalExpr, col.ColExpr) { // the inner expression is different from what we are presenting to the outside - this means we need to evaluate return p, rewrite.SameTree, nil } - offset := slices.IndexFunc(src.ColumnsAST, func(jc JoinColumn) bool { - return ctx.SemTable.EqualsExprWithDeps(jc.Original.Expr, col.E) + offset := slices.IndexFunc(join.JoinColumns, func(jc JoinColumn) bool { + return ctx.SemTable.EqualsExprWithDeps(jc.Original.Expr, col.ColExpr) }) if offset < 0 { return p, rewrite.SameTree, nil } - if len(src.Columns) > 0 { - newColumns = append(newColumns, src.Columns[offset]) + + if len(join.Columns) > 0 { + newColumns = append(newColumns, join.Columns[offset]) } - newColumnsAST = append(newColumnsAST, src.ColumnsAST[offset]) + newColumnsAST = append(newColumnsAST, join.JoinColumns[offset]) default: return p, rewrite.SameTree, nil } } - src.Columns = newColumns - src.ColumnsAST = newColumnsAST - return src, rewrite.NewTree("remove projection from before join", src), nil + join.Columns = newColumns + join.JoinColumns = newColumnsAST + return join, rewrite.NewTree("remove projection from before join", join), nil } -func (p *Projection) compactWithRoute(rb *Route) (ops.Operator, *rewrite.ApplyResult, error) { - for i, col := range p.Projections { - offset, ok := col.(Offset) - if !ok || offset.Offset != i { +func (p *Projection) compactWithRoute(ctx *plancontext.PlanningContext, rb *Route) (ops.Operator, *rewrite.ApplyResult, error) { + ap, err := p.GetAliasedProjections() + if err != nil { + return p, rewrite.SameTree, nil + } + + for i, col := range ap { + offset, ok := col.Info.(Offset) + if !ok || int(offset) != i { return p, rewrite.SameTree, nil } } - columns, err := rb.GetColumns() + columns, err := rb.GetColumns(ctx) if err != nil { return nil, nil, err } - if len(columns) == len(p.Projections) { + if len(columns) == len(ap) { return rb, rewrite.NewTree("remove projection from before route", rb), nil } rb.ResultColumns = len(columns) return rb, rewrite.SameTree, nil } -func stopAtAggregations(node, _ sqlparser.SQLNode) bool { - _, aggr := node.(sqlparser.AggrFunc) - b := !aggr - return b -} - +// needsEvaluation finds the expression given by this argument and checks if the inside and outside expressions match +// we can't rely on the content of the info field since it's not filled in until offset plan time func (p *Projection) needsEvaluation(ctx *plancontext.PlanningContext, e sqlparser.Expr) bool { - offset := slices.IndexFunc(p.Columns, func(expr *sqlparser.AliasedExpr) bool { - return ctx.SemTable.EqualsExprWithDeps(expr.Expr, e) - }) - - if offset < 0 { - return false + ap, err := p.GetAliasedProjections() + if err != nil { + return true } - inside := p.Projections[offset].GetExpr() - outside := p.Columns[offset].Expr - return inside != outside + for _, pe := range ap { + if !ctx.SemTable.EqualsExprWithDeps(pe.ColExpr, e) { + continue + } + return !ctx.SemTable.EqualsExprWithDeps(pe.ColExpr, pe.EvalExpr) + } + return false } func (p *Projection) planOffsets(ctx *plancontext.PlanningContext) error { - for i, col := range p.Projections { - _, unexplored := col.(UnexploredExpression) - if !unexplored { + ap, err := p.GetAliasedProjections() + if err != nil { + return err + } + + for _, pe := range ap { + switch pe.Info.(type) { + case *Offset, *EvalEngine: continue } // first step is to replace the expressions we expect to get from our input with the offsets for these - visitor, errCheck := offsetter(ctx, - func() ops.Operator { return p.Source }, - func(o ops.Operator) { p.Source = o }, - ) - rewritten := sqlparser.CopyOnRewrite(col.GetExpr(), stopAtAggregations, visitor, nil).(sqlparser.Expr) - if err := errCheck(); err != nil { + rewritten, err := useOffsets(ctx, pe.EvalExpr, p) + if err != nil { return err } + pe.EvalExpr = rewritten + // if we get a pure offset back. No need to do anything else offset, ok := rewritten.(*sqlparser.Offset) if ok { - // we got a pure offset back. No need to do anything else - p.Projections[i] = Offset{ - Expr: col.GetExpr(), - Offset: offset.V, - } + pe.Info = Offset(offset.V) continue } @@ -318,8 +606,7 @@ func (p *Projection) planOffsets(ctx *plancontext.PlanningContext) error { return err } - p.Projections[i] = Eval{ - Expr: rewritten, + pe.Info = &EvalEngine{ EExpr: eexpr, } } @@ -327,23 +614,6 @@ func (p *Projection) planOffsets(ctx *plancontext.PlanningContext) error { return nil } -func offsetter(ctx *plancontext.PlanningContext, src func() ops.Operator, setSource func(ops.Operator)) (func(cursor *sqlparser.CopyOnWriteCursor), func() error) { - var err error - return func(cursor *sqlparser.CopyOnWriteCursor) { - expr, ok := cursor.Node().(sqlparser.Expr) - if !ok || !fetchByOffset(expr) { - return - } - - newSrc, offset, terr := src().AddColumn(ctx, aeWrap(expr), true, false) - if terr != nil { - err = terr - return - } - setSource(newSrc) - cursor.Replace(sqlparser.NewOffset(offset, expr)) - - }, func() error { - return err - } +func (p *Projection) introducesTableID() semantics.TableSet { + return p.DT.introducesTableID() } diff --git a/go/vt/vtgate/planbuilder/operators/query_planning.go b/go/vt/vtgate/planbuilder/operators/query_planning.go new file mode 100644 index 00000000000..b4ecbdb4a7f --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/query_planning.go @@ -0,0 +1,896 @@ +/* +Copyright 2022 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operators + +import ( + "fmt" + "io" + + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" +) + +type ( + projector struct { + columns []*ProjExpr + columnAliases sqlparser.Columns + explicitColumnAliases bool + } +) + +func planQuery(ctx *plancontext.PlanningContext, root ops.Operator) (output ops.Operator, err error) { + output, err = runPhases(ctx, root) + if err != nil { + return nil, err + } + + output, err = planOffsets(ctx, output) + if err != nil { + return nil, err + } + + if rewrite.DebugOperatorTree { + fmt.Println("After offset planning:") + fmt.Println(ops.ToTree(output)) + } + + output, err = compact(ctx, output) + if err != nil { + return nil, err + } + + return addTruncationOrProjectionToReturnOutput(ctx, root, output) +} + +// runPhases is the process of figuring out how to perform the operations in the Horizon +// If we can push it under a route - done. +// If we can't, we will instead expand the Horizon into +// smaller operators and try to push these down as far as possible +func runPhases(ctx *plancontext.PlanningContext, root ops.Operator) (op ops.Operator, err error) { + op = root + for _, phase := range getPhases(ctx) { + ctx.CurrentPhase = int(phase) + if rewrite.DebugOperatorTree { + fmt.Printf("PHASE: %s\n", phase.String()) + } + + op, err = phase.act(ctx, op) + if err != nil { + return nil, err + } + + op, err = runRewriters(ctx, op) + if err != nil { + return nil, err + } + + op, err = compact(ctx, op) + if err != nil { + return nil, err + } + } + + return addGroupByOnRHSOfJoin(op) +} + +func runRewriters(ctx *plancontext.PlanningContext, root ops.Operator) (ops.Operator, error) { + visitor := func(in ops.Operator, _ semantics.TableSet, isRoot bool) (ops.Operator, *rewrite.ApplyResult, error) { + switch in := in.(type) { + case *Horizon: + return pushOrExpandHorizon(ctx, in) + case *Join: + return optimizeJoin(ctx, in) + case *Projection: + return tryPushProjection(ctx, in) + case *Limit: + return tryPushLimit(in) + case *Ordering: + return tryPushOrdering(ctx, in) + case *Aggregator: + return tryPushAggregator(ctx, in) + case *Filter: + return tryPushFilter(ctx, in) + case *Distinct: + return tryPushDistinct(in) + case *Union: + return tryPushUnion(ctx, in) + case *SubQueryContainer: + return pushOrMergeSubQueryContainer(ctx, in) + case *QueryGraph: + return optimizeQueryGraph(ctx, in) + case *LockAndComment: + return pushLockAndComment(in) + default: + return in, rewrite.SameTree, nil + } + } + + return rewrite.FixedPointBottomUp(root, TableID, visitor, stopAtRoute) +} + +func pushLockAndComment(l *LockAndComment) (ops.Operator, *rewrite.ApplyResult, error) { + switch src := l.Source.(type) { + case *Horizon, *QueryGraph: + // we want to wait until the horizons have been pushed under a route or expanded + // that way we know that we've replaced the QueryGraphs with Routes + return src, rewrite.SameTree, nil + case *Route: + src.Comments = l.Comments + src.Lock = l.Lock + return src, rewrite.NewTree("put lock and comment into route", l), nil + default: + inputs := src.Inputs() + for i, op := range inputs { + inputs[i] = &LockAndComment{ + Source: op, + Comments: l.Comments, + Lock: l.Lock, + } + } + src.SetInputs(inputs) + return src, rewrite.NewTree("pushed down lock and comments", l), nil + } +} + +func pushOrExpandHorizon(ctx *plancontext.PlanningContext, in *Horizon) (ops.Operator, *rewrite.ApplyResult, error) { + if in.IsDerived() { + newOp, result, err := pushDerived(ctx, in) + if err != nil { + return nil, nil, err + } + if result != rewrite.SameTree { + return newOp, result, nil + } + } + + if !reachedPhase(ctx, initialPlanning) { + return in, rewrite.SameTree, nil + } + + if ctx.SemTable.QuerySignature.SubQueries { + return expandHorizon(ctx, in) + } + + rb, isRoute := in.src().(*Route) + if isRoute && rb.IsSingleShard() { + return rewrite.Swap(in, rb, "push horizon into route") + } + + sel, isSel := in.selectStatement().(*sqlparser.Select) + + qp, err := in.getQP(ctx) + if err != nil { + return nil, nil, err + } + + needsOrdering := len(qp.OrderExprs) > 0 + hasHaving := isSel && sel.Having != nil + + canPush := isRoute && + !hasHaving && + !needsOrdering && + !qp.NeedsAggregation() && + !in.selectStatement().IsDistinct() && + in.selectStatement().GetLimit() == nil + + if canPush { + return rewrite.Swap(in, rb, "push horizon into route") + } + + return expandHorizon(ctx, in) +} + +func tryPushProjection( + ctx *plancontext.PlanningContext, + p *Projection, +) (ops.Operator, *rewrite.ApplyResult, error) { + switch src := p.Source.(type) { + case *Route: + return rewrite.Swap(p, src, "push projection under route") + case *ApplyJoin: + if p.FromAggr || !p.canPush(ctx) { + return p, rewrite.SameTree, nil + } + return pushProjectionInApplyJoin(ctx, p, src) + case *Vindex: + if !p.canPush(ctx) { + return p, rewrite.SameTree, nil + } + return pushProjectionInVindex(ctx, p, src) + case *SubQueryContainer: + if !p.canPush(ctx) { + return p, rewrite.SameTree, nil + } + return pushProjectionToOuterContainer(ctx, p, src) + case *SubQuery: + return pushProjectionToOuter(ctx, p, src) + case *Limit: + return rewrite.Swap(p, src, "push projection under limit") + default: + return p, rewrite.SameTree, nil + } +} + +func pushProjectionToOuter(ctx *plancontext.PlanningContext, p *Projection, sq *SubQuery) (ops.Operator, *rewrite.ApplyResult, error) { + ap, err := p.GetAliasedProjections() + if err != nil { + return p, rewrite.SameTree, nil + } + + if !reachedPhase(ctx, subquerySettling) || err != nil { + return p, rewrite.SameTree, nil + } + + outer := TableID(sq.Outer) + for _, pe := range ap { + _, isOffset := pe.Info.(*Offset) + if isOffset { + continue + } + + if !ctx.SemTable.RecursiveDeps(pe.EvalExpr).IsSolvedBy(outer) { + return p, rewrite.SameTree, nil + } + + se, ok := pe.Info.(SubQueryExpression) + if ok { + pe.EvalExpr = rewriteColNameToArgument(ctx, pe.EvalExpr, se, sq) + } + } + // all projections can be pushed to the outer + sq.Outer, p.Source = p, sq.Outer + return sq, rewrite.NewTree("push projection into outer side of subquery", p), nil +} + +func pushProjectionInVindex( + ctx *plancontext.PlanningContext, + p *Projection, + src *Vindex, +) (ops.Operator, *rewrite.ApplyResult, error) { + ap, err := p.GetAliasedProjections() + if err != nil { + return nil, nil, err + } + for _, pe := range ap { + _, err = src.AddColumn(ctx, true, false, aeWrap(pe.EvalExpr)) + if err != nil { + return nil, nil, err + } + } + return src, rewrite.NewTree("push projection into vindex", p), nil +} + +func (p *projector) add(pe *ProjExpr, col *sqlparser.IdentifierCI) { + p.columns = append(p.columns, pe) + if col != nil { + p.columnAliases = append(p.columnAliases, *col) + } +} + +// pushProjectionInApplyJoin pushes down a projection operation into an ApplyJoin operation. +// It processes each input column and creates new JoinPredicates for the ApplyJoin operation based on +// the input column's expression. It also creates new Projection operators for the left and right +// children of the ApplyJoin operation, if needed. +func pushProjectionInApplyJoin( + ctx *plancontext.PlanningContext, + p *Projection, + src *ApplyJoin, +) (ops.Operator, *rewrite.ApplyResult, error) { + ap, err := p.GetAliasedProjections() + if src.LeftJoin || err != nil { + // we can't push down expression evaluation to the rhs if we are not sure if it will even be executed + return p, rewrite.SameTree, nil + } + lhs, rhs := &projector{}, &projector{} + if p.DT != nil && len(p.DT.Columns) > 0 { + lhs.explicitColumnAliases = true + rhs.explicitColumnAliases = true + } + + src.JoinColumns = nil + for idx, pe := range ap { + var col *sqlparser.IdentifierCI + if p.DT != nil && idx < len(p.DT.Columns) { + col = &p.DT.Columns[idx] + } + err := splitProjectionAcrossJoin(ctx, src, lhs, rhs, pe, col) + if err != nil { + return nil, nil, err + } + } + + if p.isDerived() { + err := exposeColumnsThroughDerivedTable(ctx, p, src, lhs) + if err != nil { + return nil, nil, err + } + } + + // Create and update the Projection operators for the left and right children, if needed. + src.LHS, err = createProjectionWithTheseColumns(ctx, src.LHS, lhs, p.DT) + if err != nil { + return nil, nil, err + } + + src.RHS, err = createProjectionWithTheseColumns(ctx, src.RHS, rhs, p.DT) + if err != nil { + return nil, nil, err + } + + return src, rewrite.NewTree("split projection to either side of join", src), nil +} + +// splitProjectionAcrossJoin creates JoinPredicates for all projections, +// and pushes down columns as needed between the LHS and RHS of a join +func splitProjectionAcrossJoin( + ctx *plancontext.PlanningContext, + join *ApplyJoin, + lhs, rhs *projector, + pe *ProjExpr, + colAlias *sqlparser.IdentifierCI, +) error { + + // Check if the current expression can reuse an existing column in the ApplyJoin. + if _, found := canReuseColumn(ctx, join.JoinColumns, pe.EvalExpr, joinColumnToExpr); found { + return nil + } + + col, err := splitUnexploredExpression(ctx, join, lhs, rhs, pe, colAlias) + if err != nil { + return err + } + + // Add the new JoinColumn to the ApplyJoin's JoinPredicates. + join.JoinColumns = append(join.JoinColumns, col) + return nil +} + +func splitUnexploredExpression( + ctx *plancontext.PlanningContext, + join *ApplyJoin, + lhs, rhs *projector, + pe *ProjExpr, + colAlias *sqlparser.IdentifierCI, +) (JoinColumn, error) { + // Get a JoinColumn for the current expression. + col, err := join.getJoinColumnFor(ctx, pe.Original, pe.ColExpr, false) + if err != nil { + return JoinColumn{}, err + } + + // Update the left and right child columns and names based on the JoinColumn type. + switch { + case col.IsPureLeft(): + lhs.add(pe, colAlias) + case col.IsPureRight(): + rhs.add(pe, colAlias) + case col.IsMixedLeftAndRight(): + for _, lhsExpr := range col.LHSExprs { + var lhsAlias *sqlparser.IdentifierCI + if colAlias != nil { + // we need to add an explicit column alias here. let's try just the ColName as is first + ci := sqlparser.NewIdentifierCI(sqlparser.String(lhsExpr.Expr)) + lhsAlias = &ci + } + lhs.add(newProjExpr(aeWrap(lhsExpr.Expr)), lhsAlias) + } + innerPE := newProjExprWithInner(pe.Original, col.RHSExpr) + innerPE.ColExpr = col.RHSExpr + innerPE.Info = pe.Info + rhs.add(innerPE, colAlias) + } + return col, nil +} + +// exposeColumnsThroughDerivedTable rewrites expressions within a join that is inside a derived table +// in order to make them accessible outside the derived table. This is necessary when swapping the +// positions of the derived table and join operation. +// +// For example, consider the input query: +// select ... from (select T1.foo from T1 join T2 on T1.id = T2.id) as t +// If we push the derived table under the join, with T1 on the LHS of the join, we need to expose +// the values of T1.id through the derived table, or they will not be accessible on the RHS. +// +// The function iterates through each join predicate, rewriting the expressions in the predicate's +// LHS expressions to include the derived table. This allows the expressions to be accessed outside +// the derived table. +func exposeColumnsThroughDerivedTable(ctx *plancontext.PlanningContext, p *Projection, src *ApplyJoin, lhs *projector) error { + derivedTbl, err := ctx.SemTable.TableInfoFor(p.DT.TableID) + if err != nil { + return err + } + derivedTblName, err := derivedTbl.Name() + if err != nil { + return err + } + for _, predicate := range src.JoinPredicates { + for idx, bve := range predicate.LHSExprs { + expr := bve.Expr + tbl, err := ctx.SemTable.TableInfoForExpr(expr) + if err != nil { + return err + } + tblExpr := tbl.GetExpr() + tblName, err := tblExpr.TableName() + if err != nil { + return err + } + + expr = semantics.RewriteDerivedTableExpression(expr, derivedTbl) + out := prefixColNames(tblName, expr) + + alias := sqlparser.UnescapedString(out) + predicate.LHSExprs[idx].Expr = sqlparser.NewColNameWithQualifier(alias, derivedTblName) + identifierCI := sqlparser.NewIdentifierCI(alias) + projExpr := newProjExprWithInner(&sqlparser.AliasedExpr{Expr: out, As: identifierCI}, out) + var colAlias *sqlparser.IdentifierCI + if lhs.explicitColumnAliases { + colAlias = &identifierCI + } + lhs.add(projExpr, colAlias) + } + } + return nil +} + +// prefixColNames adds qualifier prefixes to all ColName:s. +// We want to be more explicit than the user was to make sure we never produce invalid SQL +func prefixColNames(tblName sqlparser.TableName, e sqlparser.Expr) sqlparser.Expr { + return sqlparser.CopyOnRewrite(e, nil, func(cursor *sqlparser.CopyOnWriteCursor) { + col, ok := cursor.Node().(*sqlparser.ColName) + if !ok { + return + } + col.Qualifier = tblName + }, nil).(sqlparser.Expr) +} + +func createProjectionWithTheseColumns( + ctx *plancontext.PlanningContext, + src ops.Operator, + p *projector, + dt *DerivedTable, +) (ops.Operator, error) { + if len(p.columns) == 0 { + return src, nil + } + proj, err := createProjection(ctx, src) + if err != nil { + return nil, err + } + proj.Columns = AliasedProjections(p.columns) + if dt != nil { + kopy := *dt + kopy.Columns = p.columnAliases + proj.DT = &kopy + } + + return proj, nil +} + +func tryPushLimit(in *Limit) (ops.Operator, *rewrite.ApplyResult, error) { + switch src := in.Source.(type) { + case *Route: + return tryPushingDownLimitInRoute(in, src) + case *Aggregator: + return in, rewrite.SameTree, nil + default: + return setUpperLimit(in) + } +} + +func tryPushingDownLimitInRoute(in *Limit, src *Route) (ops.Operator, *rewrite.ApplyResult, error) { + if src.IsSingleShard() { + return rewrite.Swap(in, src, "push limit under route") + } + + return setUpperLimit(in) +} + +func setUpperLimit(in *Limit) (ops.Operator, *rewrite.ApplyResult, error) { + if in.Pushed { + return in, rewrite.SameTree, nil + } + in.Pushed = true + visitor := func(op ops.Operator, _ semantics.TableSet, _ bool) (ops.Operator, *rewrite.ApplyResult, error) { + return op, rewrite.SameTree, nil + } + var result *rewrite.ApplyResult + shouldVisit := func(op ops.Operator) rewrite.VisitRule { + switch op := op.(type) { + case *Join, *ApplyJoin, *SubQueryContainer, *SubQuery: + // we can't push limits down on either side + return rewrite.SkipChildren + case *Route: + newSrc := &Limit{ + Source: op.Source, + AST: &sqlparser.Limit{Rowcount: sqlparser.NewArgument("__upper_limit")}, + Pushed: false, + } + op.Source = newSrc + result = result.Merge(rewrite.NewTree("push limit under route", newSrc)) + return rewrite.SkipChildren + default: + return rewrite.VisitChildren + } + } + + _, err := rewrite.TopDown(in.Source, TableID, visitor, shouldVisit) + if err != nil { + return nil, nil, err + } + return in, result, nil +} + +func tryPushOrdering(ctx *plancontext.PlanningContext, in *Ordering) (ops.Operator, *rewrite.ApplyResult, error) { + switch src := in.Source.(type) { + case *Route: + return rewrite.Swap(in, src, "push ordering under route") + case *ApplyJoin: + if canPushLeft(ctx, src, in.Order) { + // ApplyJoin is stable in regard to the columns coming from the LHS, + // so if all the ordering columns come from the LHS, we can push down the Ordering there + src.LHS, in.Source = in, src.LHS + return src, rewrite.NewTree("push down ordering on the LHS of a join", in), nil + } + case *Ordering: + // we'll just remove the order underneath. The top order replaces whatever was incoming + in.Source = src.Source + return in, rewrite.NewTree("remove double ordering", src), nil + case *Projection: + // we can move ordering under a projection if it's not introducing a column we're sorting by + for _, by := range in.Order { + if !fetchByOffset(by.SimplifiedExpr) { + return in, rewrite.SameTree, nil + } + } + return rewrite.Swap(in, src, "push ordering under projection") + case *Aggregator: + if !src.QP.AlignGroupByAndOrderBy(ctx) && !overlaps(ctx, in.Order, src.Grouping) { + return in, rewrite.SameTree, nil + } + + return pushOrderingUnderAggr(ctx, in, src) + case *SubQueryContainer: + outerTableID := TableID(src.Outer) + for _, order := range in.Order { + deps := ctx.SemTable.RecursiveDeps(order.Inner.Expr) + if !deps.IsSolvedBy(outerTableID) { + return in, rewrite.SameTree, nil + } + } + src.Outer, in.Source = in, src.Outer + return src, rewrite.NewTree("push ordering into outer side of subquery", in), nil + case *SubQuery: + outerTableID := TableID(src.Outer) + for _, order := range in.Order { + deps := ctx.SemTable.RecursiveDeps(order.Inner.Expr) + if !deps.IsSolvedBy(outerTableID) { + return in, rewrite.SameTree, nil + } + } + src.Outer, in.Source = in, src.Outer + return src, rewrite.NewTree("push ordering into outer side of subquery", in), nil + } + return in, rewrite.SameTree, nil +} + +func overlaps(ctx *plancontext.PlanningContext, order []ops.OrderBy, grouping []GroupBy) bool { +ordering: + for _, orderBy := range order { + for _, groupBy := range grouping { + if ctx.SemTable.EqualsExprWithDeps(orderBy.SimplifiedExpr, groupBy.SimplifiedExpr) { + continue ordering + } + } + return false + } + + return true +} + +func pushOrderingUnderAggr(ctx *plancontext.PlanningContext, order *Ordering, aggregator *Aggregator) (ops.Operator, *rewrite.ApplyResult, error) { + // If Aggregator is a derived table, then we should rewrite the ordering before pushing. + if aggregator.isDerived() { + for idx, orderExpr := range order.Order { + ti, err := ctx.SemTable.TableInfoFor(aggregator.DT.TableID) + if err != nil { + return nil, nil, err + } + newOrderExpr := orderExpr.Map(func(expr sqlparser.Expr) sqlparser.Expr { + return semantics.RewriteDerivedTableExpression(expr, ti) + }) + order.Order[idx] = newOrderExpr + } + } + + // Step 1: Align the GROUP BY and ORDER BY. + // Reorder the GROUP BY columns to match the ORDER BY columns. + // Since the GB clause is a set, we can reorder these columns freely. + var newGrouping []GroupBy + used := make([]bool, len(aggregator.Grouping)) + for _, orderExpr := range order.Order { + for grpIdx, by := range aggregator.Grouping { + if !used[grpIdx] && ctx.SemTable.EqualsExprWithDeps(by.SimplifiedExpr, orderExpr.SimplifiedExpr) { + newGrouping = append(newGrouping, by) + used[grpIdx] = true + } + } + } + + // Step 2: Add any missing columns from the ORDER BY. + // The ORDER BY column is not a set, but we can add more elements + // to the end without changing the semantics of the query. + if len(newGrouping) != len(aggregator.Grouping) { + // we are missing some groupings. We need to add them both to the new groupings list, but also to the ORDER BY + for i, added := range used { + if !added { + groupBy := aggregator.Grouping[i] + newGrouping = append(newGrouping, groupBy) + order.Order = append(order.Order, groupBy.AsOrderBy()) + } + } + } + + aggregator.Grouping = newGrouping + aggrSource, isOrdering := aggregator.Source.(*Ordering) + if isOrdering { + // Transform the query plan tree: + // From: Ordering(1) To: Aggregation + // | | + // Aggregation Ordering(1) + // | | + // Ordering(2) + // | + // + // + // Remove Ordering(2) from the plan tree, as it's redundant + // after pushing down the higher ordering. + order.Source = aggrSource.Source + aggrSource.Source = nil // removing from plan tree + aggregator.Source = order + return aggregator, rewrite.NewTree("push ordering under aggregation, removing extra ordering", aggregator), nil + } + return rewrite.Swap(order, aggregator, "push ordering under aggregation") +} + +func canPushLeft(ctx *plancontext.PlanningContext, aj *ApplyJoin, order []ops.OrderBy) bool { + lhs := TableID(aj.LHS) + for _, order := range order { + deps := ctx.SemTable.DirectDeps(order.Inner.Expr) + if !deps.IsSolvedBy(lhs) { + return false + } + } + return true +} + +func isOuterTable(op ops.Operator, ts semantics.TableSet) bool { + aj, ok := op.(*ApplyJoin) + if ok && aj.LeftJoin && TableID(aj.RHS).IsOverlapping(ts) { + return true + } + + for _, op := range op.Inputs() { + if isOuterTable(op, ts) { + return true + } + } + + return false +} + +func tryPushFilter(ctx *plancontext.PlanningContext, in *Filter) (ops.Operator, *rewrite.ApplyResult, error) { + switch src := in.Source.(type) { + case *Projection: + return pushFilterUnderProjection(ctx, in, src) + case *Route: + for _, pred := range in.Predicates { + var err error + deps := ctx.SemTable.RecursiveDeps(pred) + if !isOuterTable(src, deps) { + // we can only update based on predicates on inner tables + src.Routing, err = src.Routing.updateRoutingLogic(ctx, pred) + if err != nil { + return nil, nil, err + } + } + } + return rewrite.Swap(in, src, "push filter into Route") + case *SubQuery: + outerTableID := TableID(src.Outer) + for _, pred := range in.Predicates { + deps := ctx.SemTable.RecursiveDeps(pred) + if !deps.IsSolvedBy(outerTableID) { + return in, rewrite.SameTree, nil + } + } + src.Outer, in.Source = in, src.Outer + return src, rewrite.NewTree("push filter to outer query in subquery container", in), nil + } + + return in, rewrite.SameTree, nil +} + +func pushFilterUnderProjection(ctx *plancontext.PlanningContext, filter *Filter, projection *Projection) (ops.Operator, *rewrite.ApplyResult, error) { + for _, p := range filter.Predicates { + cantPush := false + _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + if !fetchByOffset(node) { + return true, nil + } + + if projection.needsEvaluation(ctx, node.(sqlparser.Expr)) { + cantPush = true + return false, io.EOF + } + + return true, nil + }, p) + + if cantPush { + return filter, rewrite.SameTree, nil + } + } + return rewrite.Swap(filter, projection, "push filter under projection") + +} + +func tryPushDistinct(in *Distinct) (ops.Operator, *rewrite.ApplyResult, error) { + if in.Required && in.PushedPerformance { + return in, rewrite.SameTree, nil + } + switch src := in.Source.(type) { + case *Route: + if isDistinct(src.Source) && src.IsSingleShard() { + return src, rewrite.NewTree("distinct not needed", in), nil + } + if src.IsSingleShard() || !in.Required { + return rewrite.Swap(in, src, "push distinct under route") + } + + if isDistinct(src.Source) { + return in, rewrite.SameTree, nil + } + + src.Source = &Distinct{Source: src.Source} + in.PushedPerformance = true + + return in, rewrite.NewTree("added distinct under route - kept original", src), nil + case *Distinct: + src.Required = false + src.PushedPerformance = false + return src, rewrite.NewTree("remove double distinct", src), nil + case *Union: + for i := range src.Sources { + src.Sources[i] = &Distinct{Source: src.Sources[i]} + } + in.PushedPerformance = true + + return in, rewrite.NewTree("push down distinct under union", src), nil + case *ApplyJoin: + src.LHS = &Distinct{Source: src.LHS} + src.RHS = &Distinct{Source: src.RHS} + in.PushedPerformance = true + + if in.Required { + return in, rewrite.NewTree("push distinct under join - kept original", in.Source), nil + } + + return in.Source, rewrite.NewTree("push distinct under join", in.Source), nil + case *Ordering: + in.Source = src.Source + return in, rewrite.NewTree("remove ordering under distinct", in), nil + } + + return in, rewrite.SameTree, nil +} + +func isDistinct(op ops.Operator) bool { + switch op := op.(type) { + case *Distinct: + return true + case *Union: + return op.distinct + case *Horizon: + return op.Query.IsDistinct() + case *Limit: + return isDistinct(op.Source) + default: + return false + } +} + +func tryPushUnion(ctx *plancontext.PlanningContext, op *Union) (ops.Operator, *rewrite.ApplyResult, error) { + if res := compactUnion(op); res != rewrite.SameTree { + return op, res, nil + } + + var sources []ops.Operator + var selects []sqlparser.SelectExprs + var err error + + if op.distinct { + sources, selects, err = mergeUnionInputInAnyOrder(ctx, op) + } else { + sources, selects, err = mergeUnionInputsInOrder(ctx, op) + } + if err != nil { + return nil, nil, err + } + + if len(sources) == 1 { + result := sources[0].(*Route) + if result.IsSingleShard() || !op.distinct { + return result, rewrite.NewTree("push union under route", op), nil + } + + return &Distinct{ + Source: result, + Required: true, + }, rewrite.NewTree("push union under route", op), nil + } + + if len(sources) == len(op.Sources) { + return op, rewrite.SameTree, nil + } + return newUnion(sources, selects, op.unionColumns, op.distinct), rewrite.NewTree("merge union inputs", op), nil +} + +// addTruncationOrProjectionToReturnOutput uses the original Horizon to make sure that the output columns line up with what the user asked for +func addTruncationOrProjectionToReturnOutput(ctx *plancontext.PlanningContext, oldHorizon ops.Operator, output ops.Operator) (ops.Operator, error) { + horizon, ok := oldHorizon.(*Horizon) + if !ok { + return output, nil + } + + cols, err := output.GetSelectExprs(ctx) + if err != nil { + return nil, err + } + + sel := sqlparser.GetFirstSelect(horizon.Query) + if len(sel.SelectExprs) == len(cols) { + return output, nil + } + + if tryTruncateColumnsAt(output, len(sel.SelectExprs)) { + return output, nil + } + + qp, err := horizon.getQP(ctx) + if err != nil { + return nil, err + } + proj, err := createSimpleProjection(ctx, qp, output) + if err != nil { + return nil, err + } + return proj, nil +} + +func stopAtRoute(operator ops.Operator) rewrite.VisitRule { + _, isRoute := operator.(*Route) + return rewrite.VisitRule(!isRoute) +} + +func aeWrap(e sqlparser.Expr) *sqlparser.AliasedExpr { + return &sqlparser.AliasedExpr{Expr: e} +} diff --git a/go/vt/vtgate/planbuilder/operators/querygraph.go b/go/vt/vtgate/planbuilder/operators/querygraph.go index a764ca3db89..f384607fe10 100644 --- a/go/vt/vtgate/planbuilder/operators/querygraph.go +++ b/go/vt/vtgate/planbuilder/operators/querygraph.go @@ -65,8 +65,8 @@ type ( var _ ops.Operator = (*QueryGraph)(nil) -// Introduces implements the TableIDIntroducer interface -func (qg *QueryGraph) Introduces() semantics.TableSet { +// Introduces implements the tableIDIntroducer interface +func (qg *QueryGraph) introducesTableID() semantics.TableSet { var ts semantics.TableSet for _, table := range qg.Tables { ts = ts.Merge(table.ID) @@ -92,26 +92,6 @@ func newQueryGraph() *QueryGraph { return &QueryGraph{} } -func (qg *QueryGraph) collectPredicates(ctx *plancontext.PlanningContext, sel *sqlparser.Select) error { - predicates := sqlparser.SplitAndExpression(nil, sel.Where.Expr) - - for _, predicate := range predicates { - err := qg.collectPredicate(ctx, predicate) - if err != nil { - return err - } - } - return nil -} - -func (qg *QueryGraph) getPredicateByDeps(ts semantics.TableSet) ([]sqlparser.Expr, bool) { - for _, join := range qg.innerJoins { - if join.deps == ts { - return join.exprs, true - } - } - return nil, false -} func (qg *QueryGraph) addJoinPredicates(ctx *plancontext.PlanningContext, ts semantics.TableSet, predicate sqlparser.Expr) { for _, join := range qg.innerJoins { if join.deps == ts { @@ -130,7 +110,7 @@ func (qg *QueryGraph) addJoinPredicates(ctx *plancontext.PlanningContext, ts sem }) } -func (qg *QueryGraph) collectPredicate(ctx *plancontext.PlanningContext, predicate sqlparser.Expr) error { +func (qg *QueryGraph) collectPredicate(ctx *plancontext.PlanningContext, predicate sqlparser.Expr) { deps := ctx.SemTable.RecursiveDeps(predicate) switch deps.NumberOfTables() { case 0: @@ -144,7 +124,6 @@ func (qg *QueryGraph) collectPredicate(ctx *plancontext.PlanningContext, predica default: qg.addJoinPredicates(ctx, deps, predicate) } - return nil } func (qg *QueryGraph) addToSingleTable(ctx *plancontext.PlanningContext, table semantics.TableSet, predicate sqlparser.Expr) bool { @@ -203,10 +182,7 @@ func (qg *QueryGraph) GetOrdering() ([]ops.OrderBy, error) { func (qg *QueryGraph) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (ops.Operator, error) { for _, e := range sqlparser.SplitAndExpression(nil, expr) { - err := qg.collectPredicate(ctx, e) - if err != nil { - return nil, err - } + qg.collectPredicate(ctx, e) } return qg, nil } @@ -222,13 +198,6 @@ func (qt *QueryTable) Clone() *QueryTable { } } -func (qg *QueryGraph) Description() ops.OpDescription { - return ops.OpDescription{ - OperatorType: "QueryGraph", - Other: map[string]any{"Tables": qg.tableNames()}, - } -} - func (qg *QueryGraph) tableNames() (tables []string) { for _, table := range qg.Tables { tables = append(tables, sqlparser.String(table.Table)) diff --git a/go/vt/vtgate/planbuilder/operators/queryprojection.go b/go/vt/vtgate/planbuilder/operators/queryprojection.go index d18574bc1f3..f15d3642312 100644 --- a/go/vt/vtgate/planbuilder/operators/queryprojection.go +++ b/go/vt/vtgate/planbuilder/operators/queryprojection.go @@ -19,19 +19,17 @@ package operators import ( "encoding/json" "fmt" + "io" + "slices" "sort" "strings" - "golang.org/x/exp/slices" - - "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine/opcode" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -59,7 +57,7 @@ type ( hasCheckedAlignment bool // TODO Remove once all horizon planning is done on the operators - CanPushDownSorting bool + CanPushSorting bool } // GroupBy contains the expression to used in group by and also if grouping is needed at VTGate level then what the weight_string function expression to be sent down for evaluation. @@ -93,8 +91,11 @@ type ( OriginalOpCode opcode.AggregateOpcode Alias string + // The index at which the user expects to see this aggregated function. Set to nil, if the user does not ask for it - Index *int + // Only used in the old Horizon Planner + Index *int + Distinct bool // the offsets point to columns on the same aggregator @@ -109,16 +110,8 @@ type ( } ) -func (aggr Aggr) NeedWeightString(ctx *plancontext.PlanningContext) bool { - switch aggr.OpCode { - case opcode.AggregateCountDistinct, opcode.AggregateSumDistinct: - return ctx.SemTable.NeedsWeightString(aggr.Func.GetArg()) - case opcode.AggregateMin, opcode.AggregateMax: - // currently this returns false, as aggregation engine primitive does not support the usage of weight_string - // for comparison. If Min/Max column is non-comparable then it will fail at runtime. - return false - } - return false +func (aggr Aggr) NeedsWeightString(ctx *plancontext.PlanningContext) bool { + return aggr.OpCode.NeedsComparableValues() && ctx.SemTable.NeedsWeightString(aggr.Func.GetArg()) } func (aggr Aggr) GetTypeCollation(ctx *plancontext.PlanningContext) (sqltypes.Type, collations.ID) { @@ -203,14 +196,14 @@ func (s SelectExpr) GetAliasedExpr() (*sqlparser.AliasedExpr, error) { case *sqlparser.AliasedExpr: return expr, nil case *sqlparser.StarExpr: - return nil, vterrors.VT12001("'*' expression in cross-shard query") + return nil, vterrors.VT09015() default: return nil, vterrors.VT12001(fmt.Sprintf("not an aliased expression: %T", expr)) } } -// CreateQPFromSelect creates the QueryProjection for the input *sqlparser.Select -func CreateQPFromSelect(ctx *plancontext.PlanningContext, sel *sqlparser.Select) (*QueryProjection, error) { +// createQPFromSelect creates the QueryProjection for the input *sqlparser.Select +func createQPFromSelect(ctx *plancontext.PlanningContext, sel *sqlparser.Select) (*QueryProjection, error) { qp := &QueryProjection{ Distinct: sel.Distinct, } @@ -224,7 +217,9 @@ func CreateQPFromSelect(ctx *plancontext.PlanningContext, sel *sqlparser.Select) if err := qp.addOrderBy(ctx, sel.OrderBy); err != nil { return nil, err } - + if !qp.HasAggr && sel.Having != nil { + qp.HasAggr = containsAggr(sel.Having.Expr) + } qp.calculateDistinct(ctx) return qp, nil @@ -296,7 +291,7 @@ func (qp *QueryProjection) addSelectExpressions(sel *sqlparser.Select) error { col := SelectExpr{ Col: selExp, } - if sqlparser.ContainsAggregation(selExp.Expr) { + if containsAggr(selExp.Expr) { col.Aggr = true qp.HasAggr = true } @@ -315,8 +310,27 @@ func (qp *QueryProjection) addSelectExpressions(sel *sqlparser.Select) error { return nil } -// CreateQPFromUnion creates the QueryProjection for the input *sqlparser.Union -func CreateQPFromUnion(ctx *plancontext.PlanningContext, union *sqlparser.Union) (*QueryProjection, error) { +func containsAggr(e sqlparser.SQLNode) (hasAggr bool) { + _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch node.(type) { + case *sqlparser.Offset: + // offsets here indicate that a possible aggregation has already been handled by an input + // so we don't need to worry about aggregation in the original + return false, nil + case sqlparser.AggrFunc: + hasAggr = true + return false, io.EOF + case *sqlparser.Subquery: + return false, nil + } + + return true, nil + }, e) + return +} + +// createQPFromUnion creates the QueryProjection for the input *sqlparser.Union +func createQPFromUnion(ctx *plancontext.PlanningContext, union *sqlparser.Union) (*QueryProjection, error) { qp := &QueryProjection{} sel := sqlparser.GetFirstSelect(union) @@ -351,7 +365,7 @@ func (es *expressionSet) add(ctx *plancontext.PlanningContext, e sqlparser.Expr) } func (qp *QueryProjection) addOrderBy(ctx *plancontext.PlanningContext, orderBy sqlparser.OrderBy) error { - canPushDownSorting := true + canPushSorting := true es := &expressionSet{} for _, order := range orderBy { simpleExpr := qp.GetSimplifiedExpr(order.Expr) @@ -366,16 +380,21 @@ func (qp *QueryProjection) addOrderBy(ctx *plancontext.PlanningContext, orderBy Inner: sqlparser.CloneRefOfOrder(order), SimplifiedExpr: simpleExpr, }) - canPushDownSorting = canPushDownSorting && !sqlparser.ContainsAggregation(simpleExpr) + canPushSorting = canPushSorting && !containsAggr(simpleExpr) } - qp.CanPushDownSorting = canPushDownSorting + qp.CanPushSorting = canPushSorting return nil } func (qp *QueryProjection) calculateDistinct(ctx *plancontext.PlanningContext) { if qp.Distinct && !qp.HasAggr { - // grouping and distinct both lead to unique results, so we don't need - qp.groupByExprs = nil + if qp.useGroupingOverDistinct(ctx) { + // if order by exists with overlap with select expressions, we can use the aggregation with ordering over distinct. + qp.Distinct = false + } else { + // grouping and distinct both lead to unique results, so we don't need + qp.groupByExprs = nil + } } if qp.HasAggr && len(qp.groupByExprs) == 0 { @@ -446,13 +465,9 @@ func checkForInvalidAggregations(exp *sqlparser.AliasedExpr) error { }, exp.Expr) } -func (qp *QueryProjection) isExprInGroupByExprs(ctx *plancontext.PlanningContext, expr SelectExpr) bool { +func (qp *QueryProjection) isExprInGroupByExprs(ctx *plancontext.PlanningContext, expr sqlparser.Expr) bool { for _, groupByExpr := range qp.groupByExprs { - exp, err := expr.GetExpr() - if err != nil { - return false - } - if ctx.SemTable.EqualsExprWithDeps(groupByExpr.SimplifiedExpr, exp) { + if ctx.SemTable.EqualsExprWithDeps(groupByExpr.SimplifiedExpr, expr) { return true } } @@ -461,6 +476,9 @@ func (qp *QueryProjection) isExprInGroupByExprs(ctx *plancontext.PlanningContext // GetSimplifiedExpr takes an expression used in ORDER BY or GROUP BY, and returns an expression that is simpler to evaluate func (qp *QueryProjection) GetSimplifiedExpr(e sqlparser.Expr) sqlparser.Expr { + if qp == nil { + return e + } // If the ORDER BY is against a column alias, we need to remember the expression // behind the alias. The weightstring(.) calls needs to be done against that expression and not the alias. // Eg - select music.foo as bar, weightstring(music.foo) from music order by bar @@ -569,7 +587,7 @@ func (qp *QueryProjection) NeedsProjecting( } rewritten := semantics.RewriteDerivedTableExpression(col, dt) - if sqlparser.ContainsAggregation(rewritten) { + if containsAggr(rewritten) { offset, tErr := pusher(&sqlparser.AliasedExpr{Expr: col}) if tErr != nil { err = tErr @@ -625,7 +643,7 @@ func (qp *QueryProjection) NeedsDistinct() bool { return true } -func (qp *QueryProjection) AggregationExpressions(ctx *plancontext.PlanningContext) (out []Aggr, err error) { +func (qp *QueryProjection) AggregationExpressions(ctx *plancontext.PlanningContext, allowComplexExpression bool) (out []Aggr, complex bool, err error) { orderBy: for _, orderExpr := range qp.OrderExprs { orderExpr := orderExpr.SimplifiedExpr @@ -640,7 +658,7 @@ orderBy: } qp.SelectExprs = append(qp.SelectExprs, SelectExpr{ Col: &sqlparser.AliasedExpr{Expr: orderExpr}, - Aggr: sqlparser.ContainsAggregation(orderExpr), + Aggr: containsAggr(orderExpr), }) qp.AddedColumn++ } @@ -651,49 +669,80 @@ orderBy: for idx, expr := range qp.SelectExprs { aliasedExpr, err := expr.GetAliasedExpr() if err != nil { - return nil, err + return nil, false, err } idxCopy := idx - if !sqlparser.ContainsAggregation(expr.Col) { - if !qp.isExprInGroupByExprs(ctx, expr) { - aggr := NewAggr(opcode.AggregateRandom, nil, aliasedExpr, aliasedExpr.ColumnName()) + if !containsAggr(expr.Col) { + getExpr, err := expr.GetExpr() + if err != nil { + return nil, false, err + } + if !qp.isExprInGroupByExprs(ctx, getExpr) { + aggr := NewAggr(opcode.AggregateAnyValue, nil, aliasedExpr, aliasedExpr.ColumnName()) aggr.Index = &idxCopy out = append(out, aggr) } continue } - fnc, isAggregate := aliasedExpr.Expr.(sqlparser.AggrFunc) - if !isAggregate { - return nil, vterrors.VT12001("in scatter query: complex aggregate expression") + _, isAggregate := aliasedExpr.Expr.(sqlparser.AggrFunc) + if !isAggregate && !allowComplexExpression { + return nil, false, vterrors.VT12001("in scatter query: complex aggregate expression") } - code := opcode.SupportedAggregates[strings.ToLower(fnc.AggrName())] - - if code == opcode.AggregateCount { - if _, isStar := fnc.(*sqlparser.CountStar); isStar { - code = opcode.AggregateCountStar + sqlparser.CopyOnRewrite(aliasedExpr.Expr, func(node, parent sqlparser.SQLNode) bool { + ex, isExpr := node.(sqlparser.Expr) + if !isExpr { + return true } - } + if aggr, isAggr := node.(sqlparser.AggrFunc); isAggr { + ae := aeWrap(aggr) + if aggr == aliasedExpr.Expr { + ae = aliasedExpr + } + aggrFunc := createAggrFromAggrFunc(aggr, ae) + aggrFunc.Index = &idxCopy + out = append(out, aggrFunc) + return false + } + if containsAggr(node) { + complex = true + return true + } + if !qp.isExprInGroupByExprs(ctx, ex) { + aggr := NewAggr(opcode.AggregateAnyValue, nil, aeWrap(ex), "") + aggr.Index = &idxCopy + out = append(out, aggr) + } + return false + }, nil, nil) + } + return +} - aggrF, _ := aliasedExpr.Expr.(sqlparser.AggrFunc) +func createAggrFromAggrFunc(fnc sqlparser.AggrFunc, aliasedExpr *sqlparser.AliasedExpr) Aggr { + code := opcode.SupportedAggregates[fnc.AggrName()] - if aggrF.IsDistinct() { - switch code { - case opcode.AggregateCount: - code = opcode.AggregateCountDistinct - case opcode.AggregateSum: - code = opcode.AggregateSumDistinct - } + if code == opcode.AggregateCount { + if _, isStar := fnc.(*sqlparser.CountStar); isStar { + code = opcode.AggregateCountStar } + } - aggr := NewAggr(code, aggrF, aliasedExpr, aliasedExpr.ColumnName()) - aggr.Index = &idxCopy - aggr.Distinct = aggrF.IsDistinct() - out = append(out, aggr) + distinct := sqlparser.IsDistinct(fnc) + if distinct { + switch code { + case opcode.AggregateCount: + code = opcode.AggregateCountDistinct + case opcode.AggregateSum: + code = opcode.AggregateSumDistinct + } } - return + + aggr := NewAggr(code, fnc, aliasedExpr, aliasedExpr.ColumnName()) + aggr.Distinct = distinct + return aggr } // FindSelectExprIndexForExpr returns the index of the given expression in the select expressions, if it is part of it @@ -765,6 +814,9 @@ func (qp *QueryProjection) OldAlignGroupByAndOrderBy(ctx *plancontext.PlanningCo // We are also free to add more ORDER BY columns than the user asked for which we leverage, // so the input is already ordered according to the GROUP BY columns used func (qp *QueryProjection) AlignGroupByAndOrderBy(ctx *plancontext.PlanningContext) bool { + if qp == nil { + return false + } if qp.hasCheckedAlignment { return false } @@ -804,17 +856,43 @@ func (qp *QueryProjection) GetColumnCount() int { return len(qp.SelectExprs) - qp.AddedColumn } -// checkAggregationSupported checks if the aggregation is supported on the given operator tree or not. -// We don't currently support planning for operators having derived tables. -func checkAggregationSupported(op ops.Operator) error { - return rewrite.Visit(op, func(operator ops.Operator) error { - _, isDerived := operator.(*Derived) - projection, isProjection := operator.(*Projection) - if isDerived || (isProjection && projection.TableID != nil) { - return errHorizonNotPlanned() +func (qp *QueryProjection) orderByOverlapWithSelectExpr(ctx *plancontext.PlanningContext) bool { + for _, expr := range qp.OrderExprs { + idx, _ := qp.FindSelectExprIndexForExpr(ctx, expr.SimplifiedExpr) + if idx != nil { + return true } - return nil - }) + } + return false +} + +func (qp *QueryProjection) useGroupingOverDistinct(ctx *plancontext.PlanningContext) bool { + if !qp.orderByOverlapWithSelectExpr(ctx) { + return false + } + var gbs []GroupBy + for idx, selExpr := range qp.SelectExprs { + ae, err := selExpr.GetAliasedExpr() + if err != nil { + // not an alias Expr, cannot continue forward. + return false + } + sExpr := qp.GetSimplifiedExpr(ae.Expr) + // check if the grouping already exists on that column. + found := slices.IndexFunc(qp.groupByExprs, func(gb GroupBy) bool { + return ctx.SemTable.EqualsExprWithDeps(gb.SimplifiedExpr, sExpr) + }) + if found != -1 { + continue + } + groupBy := NewGroupBy(ae.Expr, sExpr, ae) + selectExprIdx := idx + groupBy.InnerIndex = &selectExprIdx + + gbs = append(gbs, groupBy) + } + qp.groupByExprs = append(qp.groupByExprs, gbs...) + return true } func checkForInvalidGroupingExpressions(expr sqlparser.Expr) error { @@ -854,3 +932,13 @@ func CompareRefInt(a *int, b *int) bool { } return *a < *b } + +func CreateQPFromSelectStatement(ctx *plancontext.PlanningContext, stmt sqlparser.SelectStatement) (*QueryProjection, error) { + switch sel := stmt.(type) { + case *sqlparser.Select: + return createQPFromSelect(ctx, sel) + case *sqlparser.Union: + return createQPFromUnion(ctx, sel) + } + return nil, vterrors.VT13001("can only create query projection from Union and Select statements") +} diff --git a/go/vt/vtgate/planbuilder/operators/queryprojection_test.go b/go/vt/vtgate/planbuilder/operators/queryprojection_test.go index 2a89cd10716..7c92b716d7c 100644 --- a/go/vt/vtgate/planbuilder/operators/queryprojection_test.go +++ b/go/vt/vtgate/planbuilder/operators/queryprojection_test.go @@ -87,7 +87,7 @@ func TestQP(t *testing.T) { _, err = semantics.Analyze(sel, "", &semantics.FakeSI{}) require.NoError(t, err) - qp, err := CreateQPFromSelect(ctx, sel) + qp, err := createQPFromSelect(ctx, sel) if tcase.expErr != "" { require.Error(t, err) require.Contains(t, err.Error(), tcase.expErr) @@ -194,7 +194,7 @@ func TestQPSimplifiedExpr(t *testing.T) { _, err = semantics.Analyze(sel, "", &semantics.FakeSI{}) require.NoError(t, err) ctx := &plancontext.PlanningContext{SemTable: semantics.EmptySemTable()} - qp, err := CreateQPFromSelect(ctx, sel) + qp, err := createQPFromSelect(ctx, sel) require.NoError(t, err) require.Equal(t, tc.expected[1:], qp.toString()) }) diff --git a/go/vt/vtgate/planbuilder/operators/rewrite/rewriters.go b/go/vt/vtgate/planbuilder/operators/rewrite/rewriters.go index 0d81e34fabf..c5a8b0a6fa2 100644 --- a/go/vt/vtgate/planbuilder/operators/rewrite/rewriters.go +++ b/go/vt/vtgate/planbuilder/operators/rewrite/rewriters.go @@ -18,8 +18,7 @@ package rewrite import ( "fmt" - - "golang.org/x/exp/slices" + "slices" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" @@ -113,6 +112,14 @@ func BottomUp( var DebugOperatorTree = false +func EnableDebugPrinting() (reset func()) { + t := DebugOperatorTree + DebugOperatorTree = true + return func() { + DebugOperatorTree = t + } +} + // FixedPointBottomUp rewrites an operator tree much like BottomUp does, // but does the rewriting repeatedly, until a tree walk is done with no changes to the tree. func FixedPointBottomUp( @@ -212,7 +219,7 @@ func bottomUp( shouldVisit ShouldVisit, isRoot bool, ) (ops.Operator, *ApplyResult, error) { - if !shouldVisit(root) { + if shouldVisit != nil && !shouldVisit(root) { return root, SameTree, nil } diff --git a/go/vt/vtgate/planbuilder/operators/route.go b/go/vt/vtgate/planbuilder/operators/route.go index 9f39afa0c29..214b34d1adc 100644 --- a/go/vt/vtgate/planbuilder/operators/route.go +++ b/go/vt/vtgate/planbuilder/operators/route.go @@ -21,6 +21,7 @@ import ( "strings" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" @@ -42,6 +43,9 @@ type ( Ordering []RouteOrdering + Comments *sqlparser.ParsedComments + Lock sqlparser.Lock + ResultColumns int } @@ -532,82 +536,140 @@ func (r *Route) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Ex return r, err } -func createProjection(src ops.Operator) (*Projection, error) { - proj := &Projection{Source: src} - cols, err := src.GetColumns() +func createProjection(ctx *plancontext.PlanningContext, src ops.Operator) (*Projection, error) { + proj := newAliasedProjection(src) + cols, err := src.GetColumns(ctx) if err != nil { return nil, err } for _, col := range cols { - proj.addUnexploredExpr(col, col.Expr) + _, err := proj.addUnexploredExpr(col, col.Expr) + if err != nil { + return nil, err + } } return proj, nil } -func (r *Route) AddColumn(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, _, addToGroupBy bool) (ops.Operator, int, error) { +func (r *Route) AddColumn(ctx *plancontext.PlanningContext, reuse bool, gb bool, expr *sqlparser.AliasedExpr) (int, error) { removeKeyspaceFromSelectExpr(expr) - // check if columns is already added. - cols, err := r.GetColumns() - if err != nil { - return nil, 0, err - } - colAsExpr := func(e *sqlparser.AliasedExpr) sqlparser.Expr { - return e.Expr - } - if offset, found := canReuseColumn(ctx, cols, expr.Expr, colAsExpr); found { - return r, offset, nil + if reuse { + offset, err := r.FindCol(ctx, expr.Expr, true) + if err != nil { + return 0, err + } + if offset != -1 { + return offset, nil + } } - // if column is not already present, we check if we can easily find a projection + // if at least one column is not already present, we check if we can easily find a projection // or aggregation in our source that we can add to - if ok, offset := addColumnToInput(r.Source, expr, addToGroupBy); ok { - return r, offset, nil + op, ok, offsets := addMultipleColumnsToInput(ctx, r.Source, reuse, []bool{gb}, []*sqlparser.AliasedExpr{expr}) + r.Source = op + if ok { + return offsets[0], nil } // If no-one could be found, we probably don't have one yet, so we add one here - src, err := createProjection(r.Source) + src, err := createProjection(ctx, r.Source) if err != nil { - return nil, 0, err + return 0, err } r.Source = src - // And since we are under the route, we don't need to continue pushing anything further down - offset := src.addColumnWithoutPushing(expr, false) - if err != nil { - return nil, 0, err - } - return r, offset, nil + offsets, _ = src.addColumnsWithoutPushing(ctx, reuse, []bool{gb}, []*sqlparser.AliasedExpr{expr}) + return offsets[0], nil } type selectExpressions interface { - addColumnWithoutPushing(expr *sqlparser.AliasedExpr, addToGroupBy bool) int + ops.Operator + addColumnWithoutPushing(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, addToGroupBy bool) (int, error) + addColumnsWithoutPushing(ctx *plancontext.PlanningContext, reuse bool, addToGroupBy []bool, exprs []*sqlparser.AliasedExpr) ([]int, error) isDerived() bool } -func addColumnToInput(operator ops.Operator, expr *sqlparser.AliasedExpr, addToGroupBy bool) (bool, int) { +// addColumnToInput adds a column to an operator without pushing it down. +// It will return a bool indicating whether the addition was successful or not, +// and an offset to where the column can be found +func addMultipleColumnsToInput(ctx *plancontext.PlanningContext, operator ops.Operator, reuse bool, addToGroupBy []bool, exprs []*sqlparser.AliasedExpr) (ops.Operator, bool, []int) { switch op := operator.(type) { - case *CorrelatedSubQueryOp: - return addColumnToInput(op.Outer, expr, addToGroupBy) + case *SubQuery: + src, added, offset := addMultipleColumnsToInput(ctx, op.Outer, reuse, addToGroupBy, exprs) + if added { + op.Outer = src + } + return op, added, offset + + case *Distinct: + src, added, offset := addMultipleColumnsToInput(ctx, op.Source, reuse, addToGroupBy, exprs) + if added { + op.Source = src + } + return op, added, offset + case *Limit: - return addColumnToInput(op.Source, expr, addToGroupBy) + src, added, offset := addMultipleColumnsToInput(ctx, op.Source, reuse, addToGroupBy, exprs) + if added { + op.Source = src + } + return op, added, offset + case *Ordering: - return addColumnToInput(op.Source, expr, addToGroupBy) + src, added, offset := addMultipleColumnsToInput(ctx, op.Source, reuse, addToGroupBy, exprs) + if added { + op.Source = src + } + return op, added, offset + + case *LockAndComment: + src, added, offset := addMultipleColumnsToInput(ctx, op.Source, reuse, addToGroupBy, exprs) + if added { + op.Source = src + } + return op, added, offset + case selectExpressions: if op.isDerived() { // if the only thing we can push to is a derived table, // we have to add a new projection and can't build on this one - return false, 0 + return op, false, nil + } + offset, _ := op.addColumnsWithoutPushing(ctx, reuse, addToGroupBy, exprs) + return op, true, offset + + case *Union: + tableID := semantics.SingleTableSet(len(ctx.SemTable.Tables)) + ctx.SemTable.Tables = append(ctx.SemTable.Tables, nil) + unionColumns, err := op.GetColumns(ctx) + if err != nil { + return op, false, nil } - offset := op.addColumnWithoutPushing(expr, addToGroupBy) - return true, offset + proj := &Projection{ + Source: op, + Columns: AliasedProjections(slice.Map(unionColumns, newProjExpr)), + DT: &DerivedTable{ + TableID: tableID, + Alias: "dt", + }, + } + return addMultipleColumnsToInput(ctx, proj, reuse, addToGroupBy, exprs) default: - return false, 0 + return op, false, nil } } -func (r *Route) GetColumns() ([]*sqlparser.AliasedExpr, error) { - return r.Source.GetColumns() +func (r *Route) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, _ bool) (int, error) { + return r.Source.FindCol(ctx, expr, true) +} + +func (r *Route) GetColumns(ctx *plancontext.PlanningContext) ([]*sqlparser.AliasedExpr, error) { + return r.Source.GetColumns(ctx) +} + +func (r *Route) GetSelectExprs(ctx *plancontext.PlanningContext) (sqlparser.SelectExprs, error) { + return r.Source.GetSelectExprs(ctx) } func (r *Route) GetOrdering() ([]ops.OrderBy, error) { @@ -625,6 +687,7 @@ func (r *Route) TablesUsed() []string { } return collect() } + func isSpecialOrderBy(o ops.OrderBy) bool { if sqlparser.IsNull(o.Inner.Expr) { return true @@ -646,16 +709,11 @@ func (r *Route) planOffsets(ctx *plancontext.PlanningContext) (err error) { return err } - columns, err := r.Source.GetColumns() - if err != nil { - return err - } - for _, order := range ordering { if isSpecialOrderBy(order) { continue } - offset, err := r.getOffsetFor(ctx, order, columns) + offset, err := r.AddColumn(ctx, true, false, aeWrap(order.SimplifiedExpr)) if err != nil { return err } @@ -670,8 +728,8 @@ func (r *Route) planOffsets(ctx *plancontext.PlanningContext) (err error) { Direction: order.Inner.Direction, } if ctx.SemTable.NeedsWeightString(order.SimplifiedExpr) { - wrap := aeWrap(weightStringFor(order.SimplifiedExpr)) - _, offset, err = r.AddColumn(ctx, wrap, true, false) + ws := weightStringFor(order.SimplifiedExpr) + offset, err := r.AddColumn(ctx, true, false, aeWrap(ws)) if err != nil { return err } @@ -687,30 +745,6 @@ func weightStringFor(expr sqlparser.Expr) sqlparser.Expr { return &sqlparser.WeightStringFuncExpr{Expr: expr} } -func (r *Route) getOffsetFor(ctx *plancontext.PlanningContext, order ops.OrderBy, columns []*sqlparser.AliasedExpr) (int, error) { - for idx, column := range columns { - if sqlparser.Equals.Expr(order.SimplifiedExpr, column.Expr) { - return idx, nil - } - } - - _, offset, err := r.AddColumn(ctx, aeWrap(order.Inner.Expr), true, false) - if err != nil { - return 0, err - } - return offset, nil -} - -func (r *Route) Description() ops.OpDescription { - return ops.OpDescription{ - OperatorType: "Route", - Other: map[string]any{ - "OpCode": r.Routing.OpCode(), - "Keyspace": r.Routing.Keyspace(), - }, - } -} - func (r *Route) ShortDescription() string { first := r.Routing.OpCode().String() @@ -719,6 +753,13 @@ func (r *Route) ShortDescription() string { first = fmt.Sprintf("%s on %s", r.Routing.OpCode().String(), ks.Name) } + type extraInfo interface { + extraInfo() string + } + if info, ok := r.Routing.(extraInfo); ok { + first += " " + info.extraInfo() + } + orderBy, err := r.Source.GetOrdering() if err != nil { return first @@ -732,10 +773,25 @@ func (r *Route) ShortDescription() string { } ordering = " order by " + strings.Join(oo, ",") } - - return first + ordering + comments := "" + if r.Comments != nil { + comments = " comments: " + sqlparser.String(r.Comments) + } + lock := "" + if r.Lock != sqlparser.NoLock { + lock = " lock: " + r.Lock.ToString() + } + return first + ordering + comments + lock } func (r *Route) setTruncateColumnCount(offset int) { r.ResultColumns = offset } + +func (r *Route) introducesTableID() semantics.TableSet { + id := semantics.EmptyTableSet() + for _, route := range r.MergedWith { + id = id.Merge(TableID(route)) + } + return id +} diff --git a/go/vt/vtgate/planbuilder/operators/route_planning.go b/go/vt/vtgate/planbuilder/operators/route_planning.go index 354def4ee79..43ea3ab8d6f 100644 --- a/go/vt/vtgate/planbuilder/operators/route_planning.go +++ b/go/vt/vtgate/planbuilder/operators/route_planning.go @@ -41,44 +41,7 @@ type ( opCacheMap map[tableSetPair]ops.Operator ) -// TransformToPhysical takes an operator tree and rewrites any parts that have not yet been planned as physical operators. -// This is where a lot of the optimisations of the query plans are done. -// Here we try to merge query parts into the same route primitives. At the end of this process, -// all the operators in the tree are guaranteed to be PhysicalOperators -func transformToPhysical(ctx *plancontext.PlanningContext, in ops.Operator) (ops.Operator, error) { - op, err := rewrite.BottomUpAll(in, TableID, func(operator ops.Operator, ts semantics.TableSet, _ bool) (ops.Operator, *rewrite.ApplyResult, error) { - switch op := operator.(type) { - case *QueryGraph: - return optimizeQueryGraph(ctx, op) - case *Join: - return optimizeJoin(ctx, op) - case *Derived: - return pushDownDerived(ctx, op) - case *SubQuery: - return optimizeSubQuery(ctx, op, ts) - case *Filter: - return pushDownFilter(op) - default: - return operator, rewrite.SameTree, nil - } - }) - - if err != nil { - return nil, err - } - - return compact(ctx, op) -} - -func pushDownFilter(op *Filter) (ops.Operator, *rewrite.ApplyResult, error) { - if _, ok := op.Source.(*Route); ok { - return rewrite.Swap(op, op.Source, "push filter into Route") - } - - return op, rewrite.SameTree, nil -} - -func pushDownDerived(ctx *plancontext.PlanningContext, op *Derived) (ops.Operator, *rewrite.ApplyResult, error) { +func pushDerived(ctx *plancontext.PlanningContext, op *Horizon) (ops.Operator, *rewrite.ApplyResult, error) { innerRoute, ok := op.Source.(*Route) if !ok { return op, rewrite.SameTree, nil @@ -193,22 +156,27 @@ func getUpdateVindexInformation( updStmt *sqlparser.Update, vindexTable *vindexes.Table, tableID semantics.TableSet, - predicates []sqlparser.Expr, -) ([]*VindexPlusPredicates, map[string]*engine.VindexValues, string, error) { - if !vindexTable.Keyspace.Sharded || vindexTable.Pinned != nil { - return nil, nil, "", nil + assignments []SetExpr, +) ([]*VindexPlusPredicates, map[string]*engine.VindexValues, string, []string, error) { + if !vindexTable.Keyspace.Sharded { + return nil, nil, "", nil, nil + } + + if vindexTable.Pinned != nil { + return nil, nil, "", nil, nil + } - primaryVindex, vindexAndPredicates, err := getVindexInformation(tableID, predicates, vindexTable) + primaryVindex, vindexAndPredicates, err := getVindexInformation(tableID, vindexTable) if err != nil { - return nil, nil, "", err + return nil, nil, "", nil, err } - changedVindexValues, ownedVindexQuery, err := buildChangedVindexesValues(updStmt, vindexTable, primaryVindex.Columns) + changedVindexValues, ownedVindexQuery, subQueriesArgOnChangedVindex, err := buildChangedVindexesValues(updStmt, vindexTable, primaryVindex.Columns, assignments) if err != nil { - return nil, nil, "", err + return nil, nil, "", nil, err } - return vindexAndPredicates, changedVindexValues, ownedVindexQuery, nil + return vindexAndPredicates, changedVindexValues, ownedVindexQuery, subQueriesArgOnChangedVindex, nil } /* @@ -392,9 +360,9 @@ func requiresSwitchingSides(ctx *plancontext.PlanningContext, op ops.Operator) b required := false _ = rewrite.Visit(op, func(current ops.Operator) error { - derived, isDerived := current.(*Derived) + horizon, isHorizon := current.(*Horizon) - if isDerived && !derived.IsMergeable(ctx) { + if isHorizon && horizon.IsDerived() && !horizon.IsMergeable(ctx) { required = true return io.EOF } @@ -406,7 +374,7 @@ func requiresSwitchingSides(ctx *plancontext.PlanningContext, op ops.Operator) b } func mergeOrJoin(ctx *plancontext.PlanningContext, lhs, rhs ops.Operator, joinPredicates []sqlparser.Expr, inner bool) (ops.Operator, *rewrite.ApplyResult, error) { - newPlan, err := Merge(ctx, lhs, rhs, joinPredicates, newJoinMerge(ctx, joinPredicates, inner)) + newPlan, err := mergeJoinInputs(ctx, lhs, rhs, joinPredicates, newJoinMerge(joinPredicates, inner)) if err != nil { return nil, nil, err } @@ -428,7 +396,7 @@ func mergeOrJoin(ctx *plancontext.PlanningContext, lhs, rhs ops.Operator, joinPr if err != nil { return nil, nil, err } - return newOp, rewrite.NewTree("merge routes, but switch sides", newOp), nil + return newOp, rewrite.NewTree("logical join to applyJoin, switching side because derived table", newOp), nil } join := NewApplyJoin(Clone(lhs), Clone(rhs), nil, !inner) @@ -503,11 +471,11 @@ func findColumnVindex(ctx *plancontext.PlanningContext, a ops.Operator, exp sqlp deps := ctx.SemTable.RecursiveDeps(expr) _ = rewrite.Visit(a, func(rel ops.Operator) error { - to, isTableOp := rel.(TableIDIntroducer) + to, isTableOp := rel.(tableIDIntroducer) if !isTableOp { return nil } - id := to.Introduces() + id := to.introducesTableID() if deps.IsSolvedBy(id) { tableInfo, err := ctx.SemTable.TableInfoFor(id) if err != nil { @@ -550,8 +518,9 @@ func unwrapDerivedTables(ctx *plancontext.PlanningContext, exp sqlparser.Expr) s } exp = semantics.RewriteDerivedTableExpression(exp, tbl) - exp = getColName(exp) - if exp == nil { + if col := getColName(exp); col != nil { + exp = col + } else { return nil } } @@ -564,10 +533,7 @@ func getColName(exp sqlparser.Expr) *sqlparser.ColName { return exp case *sqlparser.Max, *sqlparser.Min: aggr := exp.(sqlparser.AggrFunc).GetArg() - colName, ok := aggr.(*sqlparser.ColName) - if ok { - return colName - } + return getColName(aggr) } // for any other expression than a column, or the extremum of a column, we return nil return nil diff --git a/go/vt/vtgate/planbuilder/operators/sharded_routing.go b/go/vt/vtgate/planbuilder/operators/sharded_routing.go index f539432c5e0..215f9333e51 100644 --- a/go/vt/vtgate/planbuilder/operators/sharded_routing.go +++ b/go/vt/vtgate/planbuilder/operators/sharded_routing.go @@ -17,16 +17,16 @@ limitations under the License. package operators import ( - "golang.org/x/exp/slices" + "fmt" + "slices" "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/slices2" + + "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" - popcode "vitess.io/vitess/go/vt/vtgate/engine/opcode" "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -64,7 +64,7 @@ func newShardedRouting(vtable *vindexes.Table, id semantics.TableSet) Routing { // Use the Binary vindex, which is the identity function // for keyspace id. routing.RouteOpCode = engine.EqualUnique - vindex, _ := vindexes.NewBinary("binary", nil) + vindex, _ := vindexes.CreateVindex("binary", "binary", nil) routing.Selected = &VindexOption{ Ready: true, Values: []evalengine.Expr{evalengine.NewLiteralString(vtable.Pinned, collations.SystemCollation)}, @@ -155,7 +155,7 @@ func (tr *ShardedRouting) Clone() Routing { selected = &t } return &ShardedRouting{ - VindexPreds: slices2.Map(tr.VindexPreds, func(from *VindexPlusPredicates) *VindexPlusPredicates { + VindexPreds: slice.Map(tr.VindexPreds, func(from *VindexPlusPredicates) *VindexPlusPredicates { // we do this to create a copy of the struct p := *from return &p @@ -188,7 +188,7 @@ func (tr *ShardedRouting) updateRoutingLogic(ctx *plancontext.PlanningContext, e return tr, nil } -func (tr *ShardedRouting) ResetRoutingLogic(ctx *plancontext.PlanningContext) (Routing, error) { +func (tr *ShardedRouting) resetRoutingLogic(ctx *plancontext.PlanningContext) (Routing, error) { tr.RouteOpCode = engine.Scatter tr.Selected = nil for i, vp := range tr.VindexPreds { @@ -209,19 +209,6 @@ func (tr *ShardedRouting) ResetRoutingLogic(ctx *plancontext.PlanningContext) (R func (tr *ShardedRouting) searchForNewVindexes(ctx *plancontext.PlanningContext, predicate sqlparser.Expr) (Routing, bool, error) { newVindexFound := false switch node := predicate.(type) { - case *sqlparser.ExtractedSubquery: - originalCmp, ok := node.Original.(*sqlparser.ComparisonExpr) - if !ok { - break - } - - // using the node.subquery which is the rewritten version of our subquery - cmp := &sqlparser.ComparisonExpr{ - Left: node.OtherSide, - Right: &sqlparser.Subquery{Select: node.Subquery.Select}, - Operator: originalCmp.Operator, - } - return tr.planComparison(ctx, cmp) case *sqlparser.ComparisonExpr: return tr.planComparison(ctx, node) @@ -327,7 +314,7 @@ func (tr *ShardedRouting) Cost() int { switch tr.RouteOpCode { case engine.EqualUnique: return 1 - case engine.Equal: + case engine.Equal, engine.SubShard: return 5 case engine.IN: return 10 @@ -557,29 +544,6 @@ func (tr *ShardedRouting) hasVindex(column *sqlparser.ColName) bool { return false } -// Reset all vindex predicates on this route and re-build their options from -// the list of seen routing predicates. -func (tr *ShardedRouting) resetRoutingSelections(ctx *plancontext.PlanningContext) error { - tr.RouteOpCode = engine.Scatter - tr.Selected = nil - for i, vp := range tr.VindexPreds { - tr.VindexPreds[i] = &VindexPlusPredicates{ColVindex: vp.ColVindex, TableID: vp.TableID} - } - - var routing Routing = tr - for _, predicate := range tr.SeenPredicates { - var err error - routing, err = UpdateRoutingLogic(ctx, predicate, routing) - if err != nil { - return err - } - } - if routing != tr { - return vterrors.VT13001("uh-oh. we ended up with a different type of routing") - } - return nil -} - func (tr *ShardedRouting) SelectedVindex() vindexes.Vindex { if tr.Selected == nil { return nil @@ -594,7 +558,28 @@ func (tr *ShardedRouting) VindexExpressions() []sqlparser.Expr { return tr.Selected.ValueExprs } -func tryMergeShardedRouting(ctx *plancontext.PlanningContext, routeA *Route, routeB *Route, m merger, joinPredicates []sqlparser.Expr) (ops.Operator, error) { +func (tr *ShardedRouting) extraInfo() string { + if tr.Selected == nil { + return fmt.Sprintf( + "Seen:[%s]", + sqlparser.String(sqlparser.AndExpressions(tr.SeenPredicates...)), + ) + } + + return fmt.Sprintf( + "Vindex[%s] Values[%s] Seen:[%s]", + tr.Selected.FoundVindex.String(), + sqlparser.String(sqlparser.Exprs(tr.Selected.ValueExprs)), + sqlparser.String(sqlparser.AndExpressions(tr.SeenPredicates...)), + ) +} + +func tryMergeJoinShardedRouting( + ctx *plancontext.PlanningContext, + routeA, routeB *Route, + m merger, + joinPredicates []sqlparser.Expr, +) (*Route, error) { sameKeyspace := routeA.Routing.Keyspace() == routeB.Routing.Keyspace() tblA := routeA.Routing.(*ShardedRouting) tblB := routeB.Routing.(*ShardedRouting) @@ -608,7 +593,7 @@ func tryMergeShardedRouting(ctx *plancontext.PlanningContext, routeA *Route, rou aExpr := tblA.VindexExpressions() bExpr := tblB.VindexExpressions() if aVdx == bVdx && gen4ValuesEqual(ctx, aExpr, bExpr) { - return m.mergeTables(tblA, tblB, routeA, routeB) + return m.mergeShardedRouting(ctx, tblA, tblB, routeA, routeB) } } @@ -632,30 +617,14 @@ func tryMergeShardedRouting(ctx *plancontext.PlanningContext, routeA *Route, rou if !canMerge { return nil, nil } - return m.mergeTables(tblA, tblB, routeA, routeB) + return m.mergeShardedRouting(ctx, tblA, tblB, routeA, routeB) } return nil, nil } // makeEvalEngineExpr transforms the given sqlparser.Expr into an evalengine expression func makeEvalEngineExpr(ctx *plancontext.PlanningContext, n sqlparser.Expr) evalengine.Expr { - if ctx.IsSubQueryToReplace(n) { - return nil - } - for _, expr := range ctx.SemTable.GetExprAndEqualities(n) { - if subq, isSubq := expr.(*sqlparser.Subquery); isSubq { - extractedSubquery := ctx.SemTable.FindSubqueryReference(subq) - if extractedSubquery == nil { - continue - } - switch popcode.PulloutOpcode(extractedSubquery.OpCode) { - case popcode.PulloutIn, popcode.PulloutNotIn: - expr = sqlparser.NewListArg(extractedSubquery.GetArgName()) - case popcode.PulloutValue, popcode.PulloutExists: - expr = sqlparser.NewArgument(extractedSubquery.GetArgName()) - } - } ee, _ := evalengine.Translate(expr, &evalengine.Config{ Collation: ctx.SemTable.Collation, ResolveType: ctx.SemTable.TypeForExpr, diff --git a/go/vt/vtgate/planbuilder/operators/subquery.go b/go/vt/vtgate/planbuilder/operators/subquery.go index c004d1a9510..55fcba6cd3b 100644 --- a/go/vt/vtgate/planbuilder/operators/subquery.go +++ b/go/vt/vtgate/planbuilder/operators/subquery.go @@ -17,132 +17,291 @@ limitations under the License. package operators import ( + "fmt" + "maps" + "slices" + + "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/engine/opcode" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" ) -type ( - // SubQuery stores the information about subquery - SubQuery struct { - Outer ops.Operator - Inner []*SubQueryInner +// SubQuery represents a subquery used for filtering rows in an +// outer query through a join. +type SubQuery struct { + // Fields filled in at the time of construction: + Outer ops.Operator // Outer query operator. + Subquery ops.Operator // Subquery operator. + FilterType opcode.PulloutOpcode // Type of subquery filter. + Original sqlparser.Expr // This is the expression we should use if we can merge the inner to the outer + originalSubquery *sqlparser.Subquery // Subquery representation, e.g., (SELECT foo from user LIMIT 1). + Predicates sqlparser.Exprs // Predicates joining outer and inner queries. Empty for uncorrelated subqueries. + OuterPredicate sqlparser.Expr // This is the predicate that is using the subquery expression. It will not be empty for projections + ArgName string // This is the name of the ColName or Argument used to replace the subquery + TopLevel bool // will be false if the subquery is deeply nested + JoinColumns []JoinColumn // Broken up join predicates. + SubqueryValueName string // Value name returned by the subquery (uncorrelated queries). + HasValuesName string // Argument name passed to the subquery (uncorrelated queries). - noColumns - noPredicates - } + // Fields related to correlated subqueries: + Vars map[string]int // Arguments copied from outer to inner, set during offset planning. + outerID semantics.TableSet - // SubQueryInner stores the subquery information for a select statement - SubQueryInner struct { - // Inner is the Operator inside the parenthesis of the subquery. - // i.e: select (select 1 union select 1), the Inner here would be - // of type Concatenate since we have a Union. - Inner ops.Operator + IsProjection bool +} - // ExtractedSubquery contains all information we need about this subquery - ExtractedSubquery *sqlparser.ExtractedSubquery +func (sq *SubQuery) planOffsets(ctx *plancontext.PlanningContext) error { + sq.Vars = make(map[string]int) + columns, err := sq.GetJoinColumns(ctx, sq.Outer) + if err != nil { + return err + } + for _, jc := range columns { + for _, lhsExpr := range jc.LHSExprs { + offset, err := sq.Outer.AddColumn(ctx, true, false, aeWrap(lhsExpr.Expr)) + if err != nil { + return err + } + sq.Vars[lhsExpr.Name] = offset + } + } + return nil +} - noColumns - noPredicates +func (sq *SubQuery) OuterExpressionsNeeded(ctx *plancontext.PlanningContext, outer ops.Operator) (result []*sqlparser.ColName, err error) { + joinColumns, err := sq.GetJoinColumns(ctx, outer) + if err != nil { + return nil, err } -) + for _, jc := range joinColumns { + for _, lhsExpr := range jc.LHSExprs { + col, ok := lhsExpr.Expr.(*sqlparser.ColName) + if !ok { + return nil, vterrors.VT13001("joins can only compare columns: %s", sqlparser.String(lhsExpr.Expr)) + } + result = append(result, col) + } + } + return result, nil +} -var _ ops.Operator = (*SubQuery)(nil) -var _ ops.Operator = (*SubQueryInner)(nil) +func (sq *SubQuery) GetJoinColumns(ctx *plancontext.PlanningContext, outer ops.Operator) ([]JoinColumn, error) { + if outer == nil { + return nil, vterrors.VT13001("outer operator cannot be nil") + } + outerID := TableID(outer) + if sq.JoinColumns != nil { + if sq.outerID == outerID { + return sq.JoinColumns, nil + } + } + sq.outerID = outerID + mapper := func(in sqlparser.Expr) (JoinColumn, error) { + return BreakExpressionInLHSandRHS(ctx, in, outerID) + } + joinPredicates, err := slice.MapWithError(sq.Predicates, mapper) + if err != nil { + return nil, err + } + sq.JoinColumns = joinPredicates + return sq.JoinColumns, nil +} // Clone implements the Operator interface -func (s *SubQueryInner) Clone(inputs []ops.Operator) ops.Operator { - return &SubQueryInner{ - Inner: inputs[0], - ExtractedSubquery: s.ExtractedSubquery, +func (sq *SubQuery) Clone(inputs []ops.Operator) ops.Operator { + klone := *sq + switch len(inputs) { + case 1: + klone.Subquery = inputs[0] + case 2: + klone.Outer = inputs[0] + klone.Subquery = inputs[1] + default: + panic("wrong number of inputs") } + klone.JoinColumns = slices.Clone(sq.JoinColumns) + klone.Vars = maps.Clone(sq.Vars) + klone.Predicates = sqlparser.CloneExprs(sq.Predicates) + return &klone } -func (s *SubQueryInner) GetOrdering() ([]ops.OrderBy, error) { - return s.Inner.GetOrdering() +func (sq *SubQuery) GetOrdering() ([]ops.OrderBy, error) { + return sq.Outer.GetOrdering() } // Inputs implements the Operator interface -func (s *SubQueryInner) Inputs() []ops.Operator { - return []ops.Operator{s.Inner} +func (sq *SubQuery) Inputs() []ops.Operator { + if sq.Outer == nil { + return []ops.Operator{sq.Subquery} + } + + return []ops.Operator{sq.Outer, sq.Subquery} } // SetInputs implements the Operator interface -func (s *SubQueryInner) SetInputs(ops []ops.Operator) { - s.Inner = ops[0] +func (sq *SubQuery) SetInputs(inputs []ops.Operator) { + switch len(inputs) { + case 1: + sq.Subquery = inputs[0] + case 2: + sq.Outer = inputs[0] + sq.Subquery = inputs[1] + default: + panic("wrong number of inputs") + } } -// Clone implements the Operator interface -func (s *SubQuery) Clone(inputs []ops.Operator) ops.Operator { - result := &SubQuery{ - Outer: inputs[0], - } - for idx := range s.Inner { - inner, ok := inputs[idx+1].(*SubQueryInner) - if !ok { - panic("got bad input") - } - result.Inner = append(result.Inner, inner) +func (sq *SubQuery) ShortDescription() string { + var typ string + if sq.IsProjection { + typ = "PROJ" + } else { + typ = "FILTER" } - return result -} + var pred string -func (s *SubQuery) GetOrdering() ([]ops.OrderBy, error) { - return s.Outer.GetOrdering() + if len(sq.Predicates) > 0 || sq.OuterPredicate != nil { + preds := append(sq.Predicates, sq.OuterPredicate) + pred = " MERGE ON " + sqlparser.String(sqlparser.AndExpressions(preds...)) + } + return fmt.Sprintf("%s %v%s", typ, sq.FilterType.String(), pred) } -// Inputs implements the Operator interface -func (s *SubQuery) Inputs() []ops.Operator { - operators := []ops.Operator{s.Outer} - for _, inner := range s.Inner { - operators = append(operators, inner) +func (sq *SubQuery) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (ops.Operator, error) { + newOuter, err := sq.Outer.AddPredicate(ctx, expr) + if err != nil { + return nil, err } - return operators + sq.Outer = newOuter + return sq, nil } -// SetInputs implements the Operator interface -func (s *SubQuery) SetInputs(ops []ops.Operator) { - s.Outer = ops[0] +func (sq *SubQuery) AddColumn(ctx *plancontext.PlanningContext, reuseExisting bool, addToGroupBy bool, exprs *sqlparser.AliasedExpr) (int, error) { + return sq.Outer.AddColumn(ctx, reuseExisting, addToGroupBy, exprs) +} + +func (sq *SubQuery) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) (int, error) { + return sq.Outer.FindCol(ctx, expr, underRoute) +} + +func (sq *SubQuery) GetColumns(ctx *plancontext.PlanningContext) ([]*sqlparser.AliasedExpr, error) { + return sq.Outer.GetColumns(ctx) +} + +func (sq *SubQuery) GetSelectExprs(ctx *plancontext.PlanningContext) (sqlparser.SelectExprs, error) { + return sq.Outer.GetSelectExprs(ctx) } -func createSubqueryFromStatement(ctx *plancontext.PlanningContext, stmt sqlparser.Statement) (*SubQuery, error) { - if len(ctx.SemTable.SubqueryMap[stmt]) == 0 { - return nil, nil +// GetMergePredicates returns the predicates that we can use to try to merge this subquery with the outer query. +func (sq *SubQuery) GetMergePredicates() []sqlparser.Expr { + if sq.OuterPredicate != nil { + return append(sq.Predicates, sq.OuterPredicate) } - subq := &SubQuery{} - for _, sq := range ctx.SemTable.SubqueryMap[stmt] { - opInner, err := createLogicalOperatorFromAST(ctx, sq.Subquery.Select) - if err != nil { - return nil, err + return sq.Predicates +} + +func (sq *SubQuery) settle(ctx *plancontext.PlanningContext, outer ops.Operator) (ops.Operator, error) { + if !sq.TopLevel { + return nil, subqueryNotAtTopErr + } + if sq.IsProjection { + if len(sq.GetMergePredicates()) > 0 { + // this means that we have a correlated subquery on our hands + return nil, correlatedSubqueryErr } - if horizon, ok := opInner.(*Horizon); ok { - opInner = horizon.Source + sq.SubqueryValueName = sq.ArgName + return outer, nil + } + return sq.settleFilter(ctx, outer) +} + +var correlatedSubqueryErr = vterrors.VT12001("correlated subquery is only supported for EXISTS") +var subqueryNotAtTopErr = vterrors.VT12001("unmergable subquery can not be inside complex expression") + +func (sq *SubQuery) settleFilter(ctx *plancontext.PlanningContext, outer ops.Operator) (ops.Operator, error) { + if len(sq.Predicates) > 0 { + if sq.FilterType != opcode.PulloutExists { + return nil, correlatedSubqueryErr } + return outer, nil + } - subq.Inner = append(subq.Inner, &SubQueryInner{ - ExtractedSubquery: sq, - Inner: opInner, - }) + hasValuesArg := func() string { + s := ctx.ReservedVars.ReserveVariable(string(sqlparser.HasValueSubQueryBaseName)) + sq.HasValuesName = s + return s } - return subq, nil -} + post := func(cursor *sqlparser.CopyOnWriteCursor) { + node := cursor.Node() + if _, ok := node.(*sqlparser.Subquery); !ok { + return + } + + var arg sqlparser.Expr + if sq.FilterType.NeedsListArg() { + arg = sqlparser.NewListArg(sq.ArgName) + } else { + arg = sqlparser.NewArgument(sq.ArgName) + } + cursor.Replace(arg) + } + rhsPred := sqlparser.CopyOnRewrite(sq.Original, dontEnterSubqueries, post, ctx.SemTable.CopySemanticInfo).(sqlparser.Expr) -func (s *SubQuery) Description() ops.OpDescription { - return ops.OpDescription{ - OperatorType: "SubQuery", + var predicates []sqlparser.Expr + switch sq.FilterType { + case opcode.PulloutExists: + predicates = append(predicates, sqlparser.NewArgument(hasValuesArg())) + case opcode.PulloutNotExists: + sq.FilterType = opcode.PulloutExists // it's the same pullout as EXISTS, just with a NOT in front of the predicate + predicates = append(predicates, sqlparser.NewNotExpr(sqlparser.NewArgument(hasValuesArg()))) + case opcode.PulloutIn: + predicates = append(predicates, sqlparser.NewArgument(hasValuesArg()), rhsPred) + sq.SubqueryValueName = sq.ArgName + case opcode.PulloutNotIn: + predicates = append(predicates, sqlparser.NewNotExpr(sqlparser.NewArgument(hasValuesArg())), rhsPred) + sq.SubqueryValueName = sq.ArgName + case opcode.PulloutValue: + predicates = append(predicates, rhsPred) + sq.SubqueryValueName = sq.ArgName } + return &Filter{ + Source: outer, + Predicates: predicates, + }, nil } -func (s *SubQueryInner) Description() ops.OpDescription { - return ops.OpDescription{ - OperatorType: "SubQueryInner", +func dontEnterSubqueries(node, _ sqlparser.SQLNode) bool { + if _, ok := node.(*sqlparser.Subquery); ok { + return false } + return true } -func (s *SubQuery) ShortDescription() string { - return "" +func (sq *SubQuery) isMerged(ctx *plancontext.PlanningContext) bool { + return slices.Index(ctx.MergedSubqueries, sq.originalSubquery) >= 0 } -func (s *SubQueryInner) ShortDescription() string { - return "" +// mapExpr rewrites all expressions according to the provided function +func (sq *SubQuery) mapExpr(f func(expr sqlparser.Expr) (sqlparser.Expr, error)) error { + newPredicates, err := slice.MapWithError(sq.Predicates, f) + if err != nil { + return err + } + sq.Predicates = newPredicates + + sq.Original, err = f(sq.Original) + if err != nil { + return err + } + + originalSubquery, err := f(sq.originalSubquery) + if err != nil { + return err + } + sq.originalSubquery = originalSubquery.(*sqlparser.Subquery) + return nil } diff --git a/go/vt/vtgate/planbuilder/operators/subquery_builder.go b/go/vt/vtgate/planbuilder/operators/subquery_builder.go new file mode 100644 index 00000000000..a0897b5ad4b --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/subquery_builder.go @@ -0,0 +1,424 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operators + +import ( + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/engine/opcode" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" +) + +type SubQueryBuilder struct { + Inner []*SubQuery + + totalID, + subqID, + outerID semantics.TableSet +} + +func (sqb *SubQueryBuilder) getRootOperator(op ops.Operator) ops.Operator { + if len(sqb.Inner) == 0 { + return op + } + + return &SubQueryContainer{ + Outer: op, + Inner: sqb.Inner, + } +} + +func (sqb *SubQueryBuilder) handleSubquery( + ctx *plancontext.PlanningContext, + expr sqlparser.Expr, + outerID semantics.TableSet, +) (*SubQuery, error) { + subq, parentExpr := getSubQuery(expr) + if subq == nil { + return nil, nil + } + argName := ctx.GetReservedArgumentFor(subq) + sqInner, err := createSubqueryOp(ctx, parentExpr, expr, subq, outerID, argName) + if err != nil { + return nil, err + } + sqb.Inner = append(sqb.Inner, sqInner) + + return sqInner, nil +} + +func getSubQuery(expr sqlparser.Expr) (subqueryExprExists *sqlparser.Subquery, parentExpr sqlparser.Expr) { + flipped := false + _ = sqlparser.Rewrite(expr, func(cursor *sqlparser.Cursor) bool { + if subq, ok := cursor.Node().(*sqlparser.Subquery); ok { + subqueryExprExists = subq + parentExpr = subq + if expr, ok := cursor.Parent().(sqlparser.Expr); ok { + parentExpr = expr + } + flipped = true + return false + } + return true + }, func(cursor *sqlparser.Cursor) bool { + if !flipped { + return true + } + if not, isNot := cursor.Parent().(*sqlparser.NotExpr); isNot { + parentExpr = not + } + return false + }) + return +} + +func createSubqueryOp( + ctx *plancontext.PlanningContext, + parent, original sqlparser.Expr, + subq *sqlparser.Subquery, + outerID semantics.TableSet, + name string, +) (*SubQuery, error) { + switch parent := parent.(type) { + case *sqlparser.NotExpr: + switch parent.Expr.(type) { + case *sqlparser.ExistsExpr: + return createSubquery(ctx, original, subq, outerID, parent, name, opcode.PulloutNotExists, false) + case *sqlparser.ComparisonExpr: + panic("should have been rewritten") + } + case *sqlparser.ExistsExpr: + return createSubquery(ctx, original, subq, outerID, parent, name, opcode.PulloutExists, false) + case *sqlparser.ComparisonExpr: + return createComparisonSubQuery(ctx, parent, original, subq, outerID, name) + } + return createSubquery(ctx, original, subq, outerID, parent, name, opcode.PulloutValue, false) +} + +// inspectStatement goes through all the predicates contained in the AST +// and extracts subqueries into operators +func (sqb *SubQueryBuilder) inspectStatement(ctx *plancontext.PlanningContext, + stmt sqlparser.SelectStatement, +) (sqlparser.Exprs, []JoinColumn, error) { + switch stmt := stmt.(type) { + case *sqlparser.Select: + return sqb.inspectSelect(ctx, stmt) + case *sqlparser.Union: + exprs1, cols1, err := sqb.inspectStatement(ctx, stmt.Left) + if err != nil { + return nil, nil, err + } + exprs2, cols2, err := sqb.inspectStatement(ctx, stmt.Right) + if err != nil { + return nil, nil, err + } + return append(exprs1, exprs2...), append(cols1, cols2...), nil + } + panic("unknown type") +} + +// inspectSelect goes through all the predicates contained in the SELECT query +// and extracts subqueries into operators, and rewrites the original query to use +// arguments instead of subqueries. +func (sqb *SubQueryBuilder) inspectSelect( + ctx *plancontext.PlanningContext, + sel *sqlparser.Select, +) (sqlparser.Exprs, []JoinColumn, error) { + // first we need to go through all the places where one can find predicates + // and search for subqueries + newWhere, wherePreds, whereJoinCols, err := sqb.inspectWhere(ctx, sel.Where) + if err != nil { + return nil, nil, err + } + newHaving, havingPreds, havingJoinCols, err := sqb.inspectWhere(ctx, sel.Having) + if err != nil { + return nil, nil, err + } + + newFrom, onPreds, onJoinCols, err := sqb.inspectOnExpr(ctx, sel.From) + if err != nil { + return nil, nil, err + } + + // then we use the updated AST structs to build the operator + // these AST elements have any subqueries replace by arguments + sel.Where = newWhere + sel.Having = newHaving + sel.From = newFrom + + return append(append(wherePreds, havingPreds...), onPreds...), + append(append(whereJoinCols, havingJoinCols...), onJoinCols...), + nil +} + +func createSubquery( + ctx *plancontext.PlanningContext, + original sqlparser.Expr, + subq *sqlparser.Subquery, + outerID semantics.TableSet, + parent sqlparser.Expr, + argName string, + filterType opcode.PulloutOpcode, + isProjection bool, +) (*SubQuery, error) { + topLevel := ctx.SemTable.EqualsExpr(original, parent) + original = cloneASTAndSemState(ctx, original) + originalSq := cloneASTAndSemState(ctx, subq) + subqID := findTablesContained(ctx, subq.Select) + totalID := subqID.Merge(outerID) + sqc := &SubQueryBuilder{totalID: totalID, subqID: subqID, outerID: outerID} + + predicates, joinCols, err := sqc.inspectStatement(ctx, subq.Select) + if err != nil { + return nil, err + } + + stmt := rewriteRemainingColumns(ctx, subq.Select, subqID) + + // TODO: this should not be needed. We are using CopyOnRewrite above, but somehow this is not getting copied + ctx.SemTable.CopySemanticInfo(subq.Select, stmt) + + opInner, err := translateQueryToOp(ctx, stmt) + if err != nil { + return nil, err + } + + opInner = sqc.getRootOperator(opInner) + return &SubQuery{ + FilterType: filterType, + Subquery: opInner, + Predicates: predicates, + Original: original, + ArgName: argName, + originalSubquery: originalSq, + IsProjection: isProjection, + TopLevel: topLevel, + JoinColumns: joinCols, + }, nil +} + +func (sqb *SubQueryBuilder) inspectWhere( + ctx *plancontext.PlanningContext, + in *sqlparser.Where, +) (*sqlparser.Where, sqlparser.Exprs, []JoinColumn, error) { + if in == nil { + return nil, nil, nil, nil + } + jpc := &joinPredicateCollector{ + totalID: sqb.totalID, + subqID: sqb.subqID, + outerID: sqb.outerID, + } + for _, predicate := range sqlparser.SplitAndExpression(nil, in.Expr) { + sqlparser.RemoveKeyspaceFromColName(predicate) + subq, err := sqb.handleSubquery(ctx, predicate, sqb.totalID) + if err != nil { + return nil, nil, nil, err + } + if subq != nil { + continue + } + if err = jpc.inspectPredicate(ctx, predicate); err != nil { + return nil, nil, nil, err + } + } + + if len(jpc.remainingPredicates) == 0 { + in = nil + } else { + in.Expr = sqlparser.AndExpressions(jpc.remainingPredicates...) + } + + return in, jpc.predicates, jpc.joinColumns, nil +} + +func (sqb *SubQueryBuilder) inspectOnExpr( + ctx *plancontext.PlanningContext, + from []sqlparser.TableExpr, +) (newFrom []sqlparser.TableExpr, onPreds sqlparser.Exprs, onJoinCols []JoinColumn, err error) { + for _, tbl := range from { + tbl := sqlparser.CopyOnRewrite(tbl, dontEnterSubqueries, func(cursor *sqlparser.CopyOnWriteCursor) { + cond, ok := cursor.Node().(*sqlparser.JoinCondition) + if !ok || cond.On == nil { + return + } + + jpc := &joinPredicateCollector{ + totalID: sqb.totalID, + subqID: sqb.subqID, + outerID: sqb.outerID, + } + + for _, pred := range sqlparser.SplitAndExpression(nil, cond.On) { + subq, innerErr := sqb.handleSubquery(ctx, pred, sqb.totalID) + if err != nil { + err = innerErr + cursor.StopTreeWalk() + return + } + if subq != nil { + continue + } + if err = jpc.inspectPredicate(ctx, pred); err != nil { + err = innerErr + cursor.StopTreeWalk() + return + } + } + if len(jpc.remainingPredicates) == 0 { + cond.On = nil + } else { + cond.On = sqlparser.AndExpressions(jpc.remainingPredicates...) + } + onPreds = append(onPreds, jpc.predicates...) + onJoinCols = append(onJoinCols, jpc.joinColumns...) + }, ctx.SemTable.CopySemanticInfo) + if err != nil { + return + } + newFrom = append(newFrom, tbl.(sqlparser.TableExpr)) + } + return +} + +func createComparisonSubQuery( + ctx *plancontext.PlanningContext, + parent *sqlparser.ComparisonExpr, + original sqlparser.Expr, + subFromOutside *sqlparser.Subquery, + outerID semantics.TableSet, + name string, +) (*SubQuery, error) { + subq, outside := semantics.GetSubqueryAndOtherSide(parent) + if outside == nil || subq != subFromOutside { + panic("uh oh") + } + + filterType := opcode.PulloutValue + switch parent.Operator { + case sqlparser.InOp: + filterType = opcode.PulloutIn + case sqlparser.NotInOp: + filterType = opcode.PulloutNotIn + } + + subquery, err := createSubquery(ctx, original, subq, outerID, parent, name, filterType, false) + if err != nil { + return nil, err + } + + // if we are comparing with a column from the inner subquery, + // we add this extra predicate to check if the two sides are mergable or not + if ae, ok := subq.Select.GetColumns()[0].(*sqlparser.AliasedExpr); ok { + subquery.OuterPredicate = &sqlparser.ComparisonExpr{ + Operator: sqlparser.EqualOp, + Left: outside, + Right: ae.Expr, + } + } + + return subquery, err +} + +func (sqb *SubQueryBuilder) pullOutValueSubqueries( + ctx *plancontext.PlanningContext, + expr sqlparser.Expr, + outerID semantics.TableSet, + isDML bool, +) (sqlparser.Expr, []*SubQuery, error) { + original := sqlparser.CloneExpr(expr) + sqe := extractSubQueries(ctx, expr, isDML) + if sqe == nil { + return nil, nil, nil + } + var newSubqs []*SubQuery + + for idx, subq := range sqe.subq { + sqInner, err := createSubquery(ctx, original, subq, outerID, original, sqe.cols[idx], sqe.pullOutCode[idx], true) + if err != nil { + return nil, nil, err + } + newSubqs = append(newSubqs, sqInner) + } + + sqb.Inner = append(sqb.Inner, newSubqs...) + + return sqe.new, newSubqs, nil +} + +type subqueryExtraction struct { + new sqlparser.Expr + subq []*sqlparser.Subquery + pullOutCode []opcode.PulloutOpcode + cols []string +} + +func getOpCodeFromParent(parent sqlparser.SQLNode) *opcode.PulloutOpcode { + code := opcode.PulloutValue + switch parent := parent.(type) { + case *sqlparser.ExistsExpr: + return nil + case *sqlparser.ComparisonExpr: + switch parent.Operator { + case sqlparser.InOp: + code = opcode.PulloutIn + case sqlparser.NotInOp: + code = opcode.PulloutNotIn + } + } + return &code +} + +func extractSubQueries(ctx *plancontext.PlanningContext, expr sqlparser.Expr, isDML bool) *subqueryExtraction { + sqe := &subqueryExtraction{} + replaceWithArg := func(cursor *sqlparser.Cursor, sq *sqlparser.Subquery, t opcode.PulloutOpcode) { + sqName := ctx.GetReservedArgumentFor(sq) + sqe.cols = append(sqe.cols, sqName) + if isDML { + if t.NeedsListArg() { + cursor.Replace(sqlparser.NewListArg(sqName)) + } else { + cursor.Replace(sqlparser.NewArgument(sqName)) + } + } else { + cursor.Replace(sqlparser.NewColName(sqName)) + } + sqe.subq = append(sqe.subq, sq) + } + + expr = sqlparser.Rewrite(expr, nil, func(cursor *sqlparser.Cursor) bool { + switch node := cursor.Node().(type) { + case *sqlparser.Subquery: + t := getOpCodeFromParent(cursor.Parent()) + if t == nil { + return true + } + replaceWithArg(cursor, node, *t) + sqe.pullOutCode = append(sqe.pullOutCode, *t) + case *sqlparser.ExistsExpr: + replaceWithArg(cursor, node.Subquery, opcode.PulloutExists) + sqe.pullOutCode = append(sqe.pullOutCode, opcode.PulloutExists) + } + return true + }).(sqlparser.Expr) + if len(sqe.subq) == 0 { + return nil + } + sqe.new = expr + return sqe +} diff --git a/go/vt/vtgate/planbuilder/operators/subquery_container.go b/go/vt/vtgate/planbuilder/operators/subquery_container.go new file mode 100644 index 00000000000..a2fba977436 --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/subquery_container.go @@ -0,0 +1,94 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operators + +import ( + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" +) + +type ( + // SubQueryContainer stores the information about a query and it's subqueries. + // The inner subqueries can be executed in any order, so we store them like this so we can see more opportunities + // for merging + SubQueryContainer struct { + Outer ops.Operator + Inner []*SubQuery + } +) + +var _ ops.Operator = (*SubQueryContainer)(nil) + +// Clone implements the Operator interface +func (sqc *SubQueryContainer) Clone(inputs []ops.Operator) ops.Operator { + result := &SubQueryContainer{ + Outer: inputs[0], + } + for idx := range sqc.Inner { + inner, ok := inputs[idx+1].(*SubQuery) + if !ok { + panic("got bad input") + } + result.Inner = append(result.Inner, inner) + } + return result +} + +func (sqc *SubQueryContainer) GetOrdering() ([]ops.OrderBy, error) { + return sqc.Outer.GetOrdering() +} + +// Inputs implements the Operator interface +func (sqc *SubQueryContainer) Inputs() []ops.Operator { + operators := []ops.Operator{sqc.Outer} + for _, inner := range sqc.Inner { + operators = append(operators, inner) + } + return operators +} + +// SetInputs implements the Operator interface +func (sqc *SubQueryContainer) SetInputs(ops []ops.Operator) { + sqc.Outer = ops[0] +} + +func (sqc *SubQueryContainer) ShortDescription() string { + return "" +} + +func (sqc *SubQueryContainer) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (ops.Operator, error) { + newSrc, err := sqc.Outer.AddPredicate(ctx, expr) + sqc.Outer = newSrc + return sqc, err +} + +func (sqc *SubQueryContainer) AddColumn(ctx *plancontext.PlanningContext, reuseExisting bool, addToGroupBy bool, exprs *sqlparser.AliasedExpr) (int, error) { + return sqc.Outer.AddColumn(ctx, reuseExisting, addToGroupBy, exprs) +} + +func (sqc *SubQueryContainer) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) (int, error) { + return sqc.Outer.FindCol(ctx, expr, underRoute) +} + +func (sqc *SubQueryContainer) GetColumns(ctx *plancontext.PlanningContext) ([]*sqlparser.AliasedExpr, error) { + return sqc.Outer.GetColumns(ctx) +} + +func (sqc *SubQueryContainer) GetSelectExprs(ctx *plancontext.PlanningContext) (sqlparser.SelectExprs, error) { + return sqc.Outer.GetSelectExprs(ctx) +} diff --git a/go/vt/vtgate/planbuilder/operators/subquery_planning.go b/go/vt/vtgate/planbuilder/operators/subquery_planning.go index 28dd005e2f1..44bce0e0f2e 100644 --- a/go/vt/vtgate/planbuilder/operators/subquery_planning.go +++ b/go/vt/vtgate/planbuilder/operators/subquery_planning.go @@ -17,83 +17,20 @@ limitations under the License. package operators import ( + "io" + + "golang.org/x/exp/slices" + + "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/engine" - popcode "vitess.io/vitess/go/vt/vtgate/engine/opcode" + "vitess.io/vitess/go/vt/vtgate/engine/opcode" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) -func optimizeSubQuery(ctx *plancontext.PlanningContext, op *SubQuery, ts semantics.TableSet) (ops.Operator, *rewrite.ApplyResult, error) { - var unmerged []*SubQueryOp - - // first loop over the subqueries and try to merge them into the outer plan - outer := op.Outer - for _, inner := range op.Inner { - innerOp := inner.Inner - - var preds []sqlparser.Expr - preds, innerOp = unresolvedAndSource(ctx, innerOp) - - newInner := &SubQueryInner{ - Inner: inner.Inner, - ExtractedSubquery: inner.ExtractedSubquery, - } - merged, err := tryMergeSubQueryOp(ctx, outer, innerOp, newInner, preds, newSubQueryMerge(ctx, newInner), ts) - if err != nil { - return nil, nil, err - } - - if merged != nil { - outer = merged - continue - } - - if len(preds) == 0 { - // uncorrelated queries - sq := &SubQueryOp{ - Extracted: inner.ExtractedSubquery, - Inner: innerOp, - } - unmerged = append(unmerged, sq) - continue - } - - if inner.ExtractedSubquery.OpCode == int(popcode.PulloutExists) { - correlatedTree, err := createCorrelatedSubqueryOp(ctx, innerOp, outer, preds, inner.ExtractedSubquery) - if err != nil { - return nil, nil, err - } - outer = correlatedTree - continue - } - - return nil, nil, vterrors.VT12001("cross-shard correlated subquery") - } - - for _, tree := range unmerged { - tree.Outer = outer - outer = tree - } - return outer, rewrite.NewTree("merged subqueries", outer), nil -} - -func unresolvedAndSource(ctx *plancontext.PlanningContext, op ops.Operator) ([]sqlparser.Expr, ops.Operator) { - preds := UnresolvedPredicates(op, ctx.SemTable) - if filter, ok := op.(*Filter); ok { - if ctx.SemTable.ASTEquals().Exprs(preds, filter.Predicates) { - // if we are seeing a single filter with only these predicates, - // we can throw away the filter and just use the source - return preds, filter.Source - } - } - - return preds, op -} - func isMergeable(ctx *plancontext.PlanningContext, query sqlparser.SelectStatement, op ops.Operator) bool { validVindex := func(expr sqlparser.Expr) bool { sc := findColumnVindex(ctx, op, expr) @@ -134,309 +71,713 @@ func isMergeable(ctx *plancontext.PlanningContext, query sqlparser.SelectStateme return true } -func tryMergeSubQueryOp( - ctx *plancontext.PlanningContext, - outer, subq ops.Operator, - subQueryInner *SubQueryInner, - joinPredicates []sqlparser.Expr, - merger merger, - lhs semantics.TableSet, // these are the tables made available because we are on the RHS of a join -) (ops.Operator, error) { - switch outerOp := outer.(type) { - case *Filter: - op, err := tryMergeSubQueryOp(ctx, outerOp.Source, subq, subQueryInner, joinPredicates, merger, lhs) - if err != nil || op == nil { - return nil, err +func settleSubqueries(ctx *plancontext.PlanningContext, op ops.Operator) (ops.Operator, error) { + visit := func(op ops.Operator, lhsTables semantics.TableSet, isRoot bool) (ops.Operator, *rewrite.ApplyResult, error) { + switch op := op.(type) { + case *SubQueryContainer: + outer := op.Outer + for _, subq := range op.Inner { + newOuter, err := subq.settle(ctx, outer) + if err != nil { + return nil, nil, err + } + subq.Outer = newOuter + outer = subq + } + return outer, rewrite.NewTree("extracted subqueries from subquery container", outer), nil + case *Projection: + ap, err := op.GetAliasedProjections() + if err != nil { + return nil, nil, err + } + + for _, pe := range ap { + mergeSubqueryExpr(ctx, pe) + } + case *Update: + for _, setExpr := range op.Assignments { + mergeSubqueryExpr(ctx, setExpr.Expr) + } + } + return op, rewrite.SameTree, nil + } + return rewrite.BottomUp(op, TableID, visit, nil) +} + +func mergeSubqueryExpr(ctx *plancontext.PlanningContext, pe *ProjExpr) { + se, ok := pe.Info.(SubQueryExpression) + if !ok { + return + } + newExpr, rewritten := rewriteMergedSubqueryExpr(ctx, se, pe.EvalExpr) + if rewritten { + pe.EvalExpr = newExpr + } +} + +func rewriteMergedSubqueryExpr(ctx *plancontext.PlanningContext, se SubQueryExpression, expr sqlparser.Expr) (sqlparser.Expr, bool) { + rewritten := false + for _, sq := range se { + for _, sq2 := range ctx.MergedSubqueries { + if sq.originalSubquery == sq2 { + expr = sqlparser.Rewrite(expr, nil, func(cursor *sqlparser.Cursor) bool { + switch expr := cursor.Node().(type) { + case *sqlparser.ColName: + if expr.Name.String() != sq.ArgName { // TODO systay 2023.09.15 - This is not safe enough. We should figure out a better way. + return true + } + case *sqlparser.Argument: + if expr.Name != sq.ArgName { + return true + } + default: + return true + } + rewritten = true + if sq.FilterType == opcode.PulloutExists { + cursor.Replace(&sqlparser.ExistsExpr{Subquery: sq.originalSubquery}) + } else { + cursor.Replace(sq.originalSubquery) + } + return false + }).(sqlparser.Expr) + } } - outerOp.Source = op - return outerOp, nil - case *Route: - return tryMergeSubqueryWithRoute(ctx, subq, outerOp, joinPredicates, merger, subQueryInner, lhs) - case *ApplyJoin: - return tryMergeSubqueryWithJoin(ctx, subq, outerOp, joinPredicates, merger, subQueryInner, lhs) - default: - return nil, nil } + return expr, rewritten } -func tryMergeSubqueryWithRoute( +// tryPushSubQueryInJoin attempts to push down a SubQuery into an ApplyJoin +/* +For this query: + + select 1 from user u1, user u2 where exists ( + select 1 from user_extra ue where ue.col = u1.col and ue.col = u2.col + ) + +We can use a very simplified tree where the subquery starts at the top, like this: +┌──────────────────────────────────────────────────────────────────────┐ +│SQ WHERE ue.col = u1.col and ue.col = u2.col, JoinVars: u1.col. u2.col│ +└──┬────────────────────────────────────────────────────┬──────────────┘ + inner outer +┌──▼──┐ ┌───────────────▼──────────────┐ +│R(ue)│ │JOIN WHERE true JoinVars │ +└─────┘ └──┬───────────────────────┬───┘ + ┌──▼──┐ ┌─▼───┐ + │R(u1)│ │R(u2)│ + └─────┘ └─────┘ + +We transform it to: + ┌────────────────────────────────┐ + │JOIN WHERE true JoinVars: u1.col│ + ├─────────────────────────────┬──┘ +┌───▼─┐ ┌─────────────────────────▼────────────────────────────────────┐ +│R(u1)│ │SQ WHERE ue.col = :u1_col and ue.col = u2.col JoinVars: u2.col│ +└─────┘ └──┬───────────────────────────────────────────────────────┬───┘ + inner outer + ┌──▼──┐ ┌──▼──┐ + │R(ue)│ │R(u2)│ + └─────┘ └─────┘ +We are rewriting all expressions in the subquery to use arguments any columns +coming from the LHS. The join predicate is not affected, but we are adding +any new columns needed by the inner subquery to the JoinVars that the join +will handle. +*/ +func tryPushSubQueryInJoin( ctx *plancontext.PlanningContext, - subq ops.Operator, - outerOp *Route, - joinPredicates []sqlparser.Expr, - merger merger, - subQueryInner *SubQueryInner, - lhs semantics.TableSet, // these are the tables made available because we are on the RHS of a join -) (ops.Operator, error) { - subqueryRoute, isRoute := subq.(*Route) - if !isRoute { - return nil, nil + inner *SubQuery, + outer *ApplyJoin, +) (ops.Operator, *rewrite.ApplyResult, error) { + lhs := TableID(outer.LHS) + rhs := TableID(outer.RHS) + joinID := TableID(outer) + innerID := TableID(inner.Subquery) + + // Deps are the dependencies of the merge predicates - + // we want to push the subquery as close to its needs + // as possible, so that we can potentially merge them together + // TODO: we need to check dependencies and break apart all expressions in the subquery, not just the merge predicates + deps := semantics.EmptyTableSet() + for _, predicate := range inner.GetMergePredicates() { + deps = deps.Merge(ctx.SemTable.RecursiveDeps(predicate)) + } + deps = deps.Remove(innerID) + + // in general, we don't want to push down uncorrelated subqueries into the RHS of a join, + // since this side is executed once per row from the LHS, so we would unnecessarily execute + // the subquery multiple times. The exception is if we can merge the subquery with the RHS of the join. + merged, result, err := tryMergeWithRHS(ctx, inner, outer) + if err != nil { + return nil, nil, err + } + if merged != nil { + return merged, result, nil } - if outerOp.Routing.OpCode() == engine.Reference && !subqueryRoute.IsSingleShard() { - return nil, nil + _, ok := inner.Subquery.(*Projection) + if ok { + // This is a little hacky, but I could not find a better solution for it. + // Projections are easy to push down, so if this is still at the top, + // it means we have not tried pushing it yet. + // Let's give it a chance to push down before we push it on the left + return nil, rewrite.SameTree, nil } - deps := ctx.SemTable.DirectDeps(subQueryInner.ExtractedSubquery.Subquery) - outer := lhs.Merge(TableID(outerOp)) - if !deps.IsSolvedBy(outer) { - return nil, nil + if deps.IsSolvedBy(lhs) { + // we can safely push down the subquery on the LHS + outer.LHS = addSubQuery(outer.LHS, inner) + return outer, rewrite.NewTree("push subquery into LHS of join", inner), nil } - merged, err := Merge(ctx, outerOp, subq, joinPredicates, merger) - if err != nil { - return nil, err + if outer.LeftJoin || len(inner.Predicates) == 0 { + // we can't push any filters on the RHS of an outer join, and + // we don't want to push uncorrelated subqueries to the RHS of a join + return nil, rewrite.SameTree, nil } - // If the subqueries could be merged here, we're done - if merged != nil { - return merged, err + if deps.IsSolvedBy(rhs) { + // we can push down the subquery filter on RHS of the join + outer.RHS = addSubQuery(outer.RHS, inner) + return outer, rewrite.NewTree("push subquery into RHS of join", inner), nil } - if !isMergeable(ctx, subQueryInner.ExtractedSubquery.Subquery.Select, subq) { - return nil, nil + if deps.IsSolvedBy(joinID) { + // we can rewrite the predicate to not use the values from the lhs, + // and instead use arguments for these dependencies. + // this way we can push the subquery into the RHS of this join + err := inner.mapExpr(extractLHSExpr(ctx, outer, lhs)) + if err != nil { + return nil, nil, err + } + + outer.RHS = addSubQuery(outer.RHS, inner) + return outer, rewrite.NewTree("push subquery into RHS of join rewriting predicates", inner), nil } - // Inner subqueries can be merged with the outer subquery as long as - // the inner query is a single column selection, and that single column has a matching - // vindex on the outer query's operand. - if canMergeSubqueryOnColumnSelection(ctx, outerOp, subqueryRoute, subQueryInner.ExtractedSubquery) { - // TODO: clean up. All this casting is not pretty - outerRouting, ok := outerOp.Routing.(*ShardedRouting) - if !ok { - return nil, nil + return nil, rewrite.SameTree, nil +} + +// extractLHSExpr will return a function that extracts any ColName coming from the LHS table, +// adding them to the ExtraLHSVars on the join if they are not already known +func extractLHSExpr( + ctx *plancontext.PlanningContext, + outer *ApplyJoin, + lhs semantics.TableSet, +) func(expr sqlparser.Expr) (sqlparser.Expr, error) { + return func(expr sqlparser.Expr) (sqlparser.Expr, error) { + col, err := BreakExpressionInLHSandRHS(ctx, expr, lhs) + if err != nil { + return nil, err } - innerRouting := subqueryRoute.Routing.(*ShardedRouting) - if !ok { - return nil, nil + if col.IsPureLeft() { + return nil, vterrors.VT13001("did not expect to find any predicates that do not need data from the inner here") + } + for _, bve := range col.LHSExprs { + if !outer.isColNameMovedFromL2R(bve.Name) { + outer.ExtraLHSVars = append(outer.ExtraLHSVars, bve) + } } - merged, err := merger.mergeTables(outerRouting, innerRouting, outerOp, subqueryRoute) - mergedRouting := merged.Routing.(*ShardedRouting) - mergedRouting.PickBestAvailableVindex() - return merged, err + return col.RHSExpr, nil } - return nil, nil } -func tryMergeSubqueryWithJoin( - ctx *plancontext.PlanningContext, - subq ops.Operator, - outerOp *ApplyJoin, - joinPredicates []sqlparser.Expr, - merger merger, - subQueryInner *SubQueryInner, - lhs semantics.TableSet, // these are the tables made available because we are on the RHS of a join -) (ops.Operator, error) { - // Trying to merge the subquery with the left-hand or right-hand side of the join - - if outerOp.LeftJoin { - return nil, nil +// tryMergeWithRHS attempts to merge a subquery with the RHS of a join +func tryMergeWithRHS(ctx *plancontext.PlanningContext, inner *SubQuery, outer *ApplyJoin) (ops.Operator, *rewrite.ApplyResult, error) { + if outer.LeftJoin { + return nil, nil, nil } - newMergefunc := &mergeDecorator{ - inner: merger, - f: func() error { - var err error - outerOp.RHS, err = rewriteColumnsInSubqueryOpForJoin(ctx, outerOp.RHS, outerOp, subQueryInner) - return err - }, + // both sides need to be routes + outerRoute, ok := outer.RHS.(*Route) + if !ok { + return nil, nil, nil } - merged, err := tryMergeSubQueryOp(ctx, outerOp.LHS, subq, subQueryInner, joinPredicates, newMergefunc, lhs) + innerRoute, ok := inner.Subquery.(*Route) + if !ok { + return nil, nil, nil + } + + newExpr, err := rewriteOriginalPushedToRHS(ctx, inner.Original, outer) if err != nil { - return nil, err + return nil, nil, err } - if merged != nil { - outerOp.LHS = merged - return outerOp, nil + sqm := &subqueryRouteMerger{ + outer: outerRoute, + original: newExpr, + subq: inner, } + newOp, err := mergeSubqueryInputs(ctx, innerRoute, outerRoute, inner.GetMergePredicates(), sqm) + if err != nil || newOp == nil { + return nil, nil, err + } + + outer.RHS = newOp + ctx.MergedSubqueries = append(ctx.MergedSubqueries, inner.originalSubquery) + return outer, rewrite.NewTree("merged subquery with rhs of join", inner), nil +} - newMergefunc.f = func() error { - var err error - outerOp.RHS, err = rewriteColumnsInSubqueryOpForJoin(ctx, outerOp.LHS, outerOp, subQueryInner) - return err +// addSubQuery adds a SubQuery to the given operator. If the operator is a SubQueryContainer, +// it will add the SubQuery to the SubQueryContainer. If the operator is something else, it will +// create a new SubQueryContainer with the given operator as the outer and the SubQuery as the inner. +func addSubQuery(in ops.Operator, inner *SubQuery) ops.Operator { + sql, ok := in.(*SubQueryContainer) + if !ok { + return &SubQueryContainer{ + Outer: in, + Inner: []*SubQuery{inner}, + } } - merged, err = tryMergeSubQueryOp(ctx, outerOp.RHS, subq, subQueryInner, joinPredicates, newMergefunc, lhs.Merge(TableID(outerOp.LHS))) + sql.Inner = append(sql.Inner, inner) + return sql +} + +// rewriteOriginalPushedToRHS rewrites the original expression to use the argument names instead of the column names +// this is necessary because we are pushing the subquery into the RHS of the join, and we need to use the argument names +// instead of the column names +func rewriteOriginalPushedToRHS(ctx *plancontext.PlanningContext, expression sqlparser.Expr, outer *ApplyJoin) (sqlparser.Expr, error) { + var err error + outerID := TableID(outer.LHS) + result := sqlparser.CopyOnRewrite(expression, nil, func(cursor *sqlparser.CopyOnWriteCursor) { + col, ok := cursor.Node().(*sqlparser.ColName) + if !ok || ctx.SemTable.RecursiveDeps(col) != outerID { + // we are only interested in columns that are coming from the LHS of the join + return + } + // this is a dependency we are being fed from the LHS of the join, so we + // need to find the argument name for it and use that instead + // we can't use the column name directly, because we're in the RHS of the join + name, innerErr := outer.findOrAddColNameBindVarName(ctx, col) + if err != nil { + err = innerErr + cursor.StopTreeWalk() + return + } + cursor.Replace(sqlparser.NewArgument(name)) + }, nil) if err != nil { return nil, err } - if merged != nil { - outerOp.RHS = merged - return outerOp, nil - } - return nil, nil + return result.(sqlparser.Expr), nil } -// rewriteColumnsInSubqueryOpForJoin rewrites the columns that appear from the other side -// of the join. For example, let's say we merged a subquery on the right side of a join tree -// If it was using any columns from the left side then they need to be replaced by bind variables supplied -// from that side. -// outerTree is the joinTree within whose children the subquery lives in -// the child of joinTree which does not contain the subquery is the otherTree -func rewriteColumnsInSubqueryOpForJoin( - ctx *plancontext.PlanningContext, - innerOp ops.Operator, - outerTree *ApplyJoin, - subQueryInner *SubQueryInner, -) (ops.Operator, error) { - resultInnerOp := innerOp - var rewriteError error - // go over the entire expression in the subquery - sqlparser.SafeRewrite(subQueryInner.ExtractedSubquery.Original, nil, func(cursor *sqlparser.Cursor) bool { - node, ok := cursor.Node().(*sqlparser.ColName) - if !ok { - return true +func pushProjectionToOuterContainer(ctx *plancontext.PlanningContext, p *Projection, src *SubQueryContainer) (ops.Operator, *rewrite.ApplyResult, error) { + ap, err := p.GetAliasedProjections() + if err != nil { + return p, rewrite.SameTree, nil + } + + outer := TableID(src.Outer) + for _, pe := range ap { + _, isOffset := pe.Info.(*Offset) + if isOffset { + continue } - // check whether the column name belongs to the other side of the join tree - if !ctx.SemTable.RecursiveDeps(node).IsSolvedBy(TableID(resultInnerOp)) { - return true + if !ctx.SemTable.RecursiveDeps(pe.EvalExpr).IsSolvedBy(outer) { + return p, rewrite.SameTree, nil } - // get the bindVariable for that column name and replace it in the subquery - typ, _, _ := ctx.SemTable.TypeForExpr(node) - bindVar := ctx.GetArgumentFor(node, func() string { - return ctx.ReservedVars.ReserveColName(node) - }) - cursor.Replace(sqlparser.NewTypedArgument(bindVar, typ)) - // check whether the bindVariable already exists in the joinVars of the other tree - _, alreadyExists := outerTree.Vars[bindVar] - if alreadyExists { + if se, ok := pe.Info.(SubQueryExpression); ok { + pe.EvalExpr = rewriteColNameToArgument(ctx, pe.EvalExpr, se, src.Inner...) + } + } + // all projections can be pushed to the outer + src.Outer, p.Source = p, src.Outer + return src, rewrite.NewTree("push projection into outer side of subquery container", p), nil +} + +func rewriteColNameToArgument(ctx *plancontext.PlanningContext, in sqlparser.Expr, se SubQueryExpression, subqueries ...*SubQuery) sqlparser.Expr { + rewriteIt := func(s string) sqlparser.SQLNode { + for _, sq1 := range se { + if sq1.ArgName != s && sq1.HasValuesName != s { + continue + } + + for _, sq2 := range subqueries { + if s == sq2.ArgName { + switch { + case sq1.FilterType.NeedsListArg(): + return sqlparser.NewListArg(s) + case sq1.FilterType == opcode.PulloutExists: + if sq1.HasValuesName == "" { + sq1.HasValuesName = ctx.ReservedVars.ReserveHasValuesSubQuery() + sq2.HasValuesName = sq1.HasValuesName + } + return sqlparser.NewArgument(sq1.HasValuesName) + default: + return sqlparser.NewArgument(s) + } + } + } + } + return nil + } + + // replace the ColNames with Argument inside the subquery + result := sqlparser.Rewrite(in, nil, func(cursor *sqlparser.Cursor) bool { + col, ok := cursor.Node().(*sqlparser.ColName) + if !ok || !col.Qualifier.IsEmpty() { return true } - // if it does not exist, then push this as an output column there and add it to the joinVars - newInnerOp, offset, err := resultInnerOp.AddColumn(ctx, aeWrap(node), true, false) - if err != nil { - rewriteError = err - return false + arg := rewriteIt(col.Name.String()) + if arg == nil { + return true } - resultInnerOp = newInnerOp - outerTree.Vars[bindVar] = offset + cursor.Replace(arg) return true }) + return result.(sqlparser.Expr) +} - // update the dependencies for the subquery by removing the dependencies from the innerOp - tableSet := ctx.SemTable.Direct[subQueryInner.ExtractedSubquery.Subquery] - ctx.SemTable.Direct[subQueryInner.ExtractedSubquery.Subquery] = tableSet.Remove(TableID(resultInnerOp)) - tableSet = ctx.SemTable.Recursive[subQueryInner.ExtractedSubquery.Subquery] - ctx.SemTable.Recursive[subQueryInner.ExtractedSubquery.Subquery] = tableSet.Remove(TableID(resultInnerOp)) +func pushOrMergeSubQueryContainer(ctx *plancontext.PlanningContext, in *SubQueryContainer) (ops.Operator, *rewrite.ApplyResult, error) { + if !reachedPhase(ctx, initialPlanning) { + return in, rewrite.SameTree, nil + } - // return any error while rewriting - return resultInnerOp, rewriteError + var remaining []*SubQuery + var result *rewrite.ApplyResult + for _, inner := range in.Inner { + newOuter, _result, err := pushOrMerge(ctx, in.Outer, inner) + if err != nil { + return nil, nil, err + } + if _result == rewrite.SameTree { + remaining = append(remaining, inner) + continue + } + + in.Outer = newOuter + result = result.Merge(_result) + } + + if len(remaining) == 0 { + return in.Outer, result, nil + } + + in.Inner = remaining + + return in, result, nil +} + +func tryMergeSubQuery( + ctx *plancontext.PlanningContext, + subQuery *SubQuery, + outer *Route, +) (newOuter ops.Operator, result *rewrite.ApplyResult, err error) { + switch inner := subQuery.Subquery.(type) { + case *Route: + return tryMergeSubqueryWithOuter(ctx, subQuery, outer, inner) + case *SubQueryContainer: + return tryMergeSubqueriesRecursively(ctx, subQuery, outer, inner) + } + return outer, rewrite.SameTree, nil } -func createCorrelatedSubqueryOp( +// tryMergeSubqueriesRecursively attempts to merge a SubQueryContainer with the outer Route. +func tryMergeSubqueriesRecursively( ctx *plancontext.PlanningContext, - innerOp, outerOp ops.Operator, - preds []sqlparser.Expr, - extractedSubquery *sqlparser.ExtractedSubquery, -) (*CorrelatedSubQueryOp, error) { - newOuter, err := RemovePredicate(ctx, extractedSubquery, outerOp) + subQuery *SubQuery, + outer *Route, + inner *SubQueryContainer, +) (ops.Operator, *rewrite.ApplyResult, error) { + exprs := subQuery.GetMergePredicates() + merger := &subqueryRouteMerger{ + outer: outer, + original: subQuery.Original, + subq: subQuery, + } + op, err := mergeSubqueryInputs(ctx, inner.Outer, outer, exprs, merger) if err != nil { - return nil, vterrors.VT12001("EXISTS sub-queries are only supported with AND clause") - } - - resultOuterOp := newOuter - vars := map[string]int{} - bindVars := map[*sqlparser.ColName]string{} - var lhsCols []*sqlparser.ColName - for _, pred := range preds { - var rewriteError error - sqlparser.SafeRewrite(pred, nil, func(cursor *sqlparser.Cursor) bool { - node, ok := cursor.Node().(*sqlparser.ColName) - if !ok { - return true - } + return nil, nil, err + } + if op == nil { + return outer, rewrite.SameTree, nil + } - nodeDeps := ctx.SemTable.RecursiveDeps(node) - if !nodeDeps.IsSolvedBy(TableID(resultOuterOp)) { - return true - } + op = Clone(op).(*Route) + op.Source = outer.Source + var finalResult *rewrite.ApplyResult + for _, subq := range inner.Inner { + newOuter, res, err := tryMergeSubQuery(ctx, subq, op) + if err != nil { + return nil, nil, err + } + if res == rewrite.SameTree { + // we failed to merge one of the inners - we need to abort + return nil, rewrite.SameTree, nil + } + op = newOuter.(*Route) + finalResult = finalResult.Merge(res) + } - // check whether the bindVariable already exists in the map - // we do so by checking that the column names are the same and their recursive dependencies are the same - // so the column names `user.a` and `a` would be considered equal as long as both are bound to the same table - for colName, bindVar := range bindVars { - if ctx.SemTable.EqualsExprWithDeps(node, colName) { - cursor.Replace(sqlparser.NewArgument(bindVar)) - return true - } - } + op.Source = &Filter{Source: outer.Source, Predicates: []sqlparser.Expr{subQuery.Original}} + return op, finalResult.Merge(rewrite.NewTree("merge outer of two subqueries", subQuery)), nil +} - // get the bindVariable for that column name and replace it in the predicate - typ, _, _ := ctx.SemTable.TypeForExpr(node) - bindVar := ctx.ReservedVars.ReserveColName(node) - cursor.Replace(sqlparser.NewTypedArgument(bindVar, typ)) - // store it in the map for future comparisons - bindVars[node] = bindVar +func tryMergeSubqueryWithOuter(ctx *plancontext.PlanningContext, subQuery *SubQuery, outer *Route, inner ops.Operator) (ops.Operator, *rewrite.ApplyResult, error) { + if updOp, ok := outer.Source.(*Update); ok && mergingIsBlocked(subQuery, updOp) { + return outer, rewrite.SameTree, nil + } + exprs := subQuery.GetMergePredicates() + merger := &subqueryRouteMerger{ + outer: outer, + original: subQuery.Original, + subq: subQuery, + } + op, err := mergeSubqueryInputs(ctx, inner, outer, exprs, merger) + if err != nil { + return nil, nil, err + } + if op == nil { + return outer, rewrite.SameTree, nil + } + if !subQuery.IsProjection { + op.Source = &Filter{Source: outer.Source, Predicates: []sqlparser.Expr{subQuery.Original}} + } + ctx.MergedSubqueries = append(ctx.MergedSubqueries, subQuery.originalSubquery) + return op, rewrite.NewTree("merged subquery with outer", subQuery), nil +} - // if it does not exist, then push this as an output column in the outerOp and add it to the joinVars - newOuterOp, offset, err := resultOuterOp.AddColumn(ctx, aeWrap(node), true, false) - if err != nil { - rewriteError = err +// This checked if subquery is part of the changed vindex values. Subquery cannot be merged with the outer route. +func mergingIsBlocked(subQuery *SubQuery, updOp *Update) bool { + for _, sqArg := range updOp.SubQueriesArgOnChangedVindex { + if sqArg == subQuery.ArgName { + return true + } + } + return false +} + +func pushOrMerge(ctx *plancontext.PlanningContext, outer ops.Operator, inner *SubQuery) (ops.Operator, *rewrite.ApplyResult, error) { + switch o := outer.(type) { + case *Route: + return tryMergeSubQuery(ctx, inner, o) + case *ApplyJoin: + join, applyResult, err := tryPushSubQueryInJoin(ctx, inner, o) + if err != nil { + return nil, nil, err + } + if join == nil { + return outer, rewrite.SameTree, nil + } + return join, applyResult, nil + default: + return outer, rewrite.SameTree, nil + } +} + +type subqueryRouteMerger struct { + outer *Route + original sqlparser.Expr + subq *SubQuery +} + +func (s *subqueryRouteMerger) mergeShardedRouting(ctx *plancontext.PlanningContext, r1, r2 *ShardedRouting, old1, old2 *Route) (*Route, error) { + tr := &ShardedRouting{ + VindexPreds: append(r1.VindexPreds, r2.VindexPreds...), + keyspace: r1.keyspace, + RouteOpCode: r1.RouteOpCode, + } + + if !s.subq.TopLevel { + // if the subquery is not at the root level, we can't use it for routing, only for merging + tr.SeenPredicates = r2.SeenPredicates + } else { + tr.SeenPredicates = slice.Filter(append(r1.SeenPredicates, r2.SeenPredicates...), func(expr sqlparser.Expr) bool { + // There are two cases we can have - we can have predicates in the outer + // that are no longer valid, and predicates in the inner that are no longer valid + // For the case WHERE exists(select 1 from user where user.id = ue.user_id) + // Outer: ::has_values + // Inner: user.id = :ue_user_id + // + // And for the case WHERE id IN (select id FROM user WHERE id = 5) + // Outer: id IN ::__sq1 + // Inner: id = 5 + // + // We only keep SeenPredicates that are not bind variables in the join columns. + // We have to remove the outer predicate since we merge both routes, and no one + // is producing the bind variable anymore. + if exprFromSubQ := ctx.SemTable.RecursiveDeps(expr).IsOverlapping(TableID(s.subq.Subquery)); !exprFromSubQ { return true } - resultOuterOp = newOuterOp - lhsCols = append(lhsCols, node) - vars[bindVar] = offset - return true + var argFound bool + _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + arg, ok := node.(*sqlparser.Argument) + if !ok { + return true, nil + } + f := func(bve BindVarExpr) bool { return bve.Name == arg.Name } + for _, jc := range s.subq.JoinColumns { + if slices.ContainsFunc(jc.LHSExprs, f) { + argFound = true + return false, io.EOF + } + } + return true, nil + }, expr) + + return !argFound }) - if rewriteError != nil { - return nil, rewriteError + } + + routing, err := tr.resetRoutingLogic(ctx) + if err != nil { + return nil, err + } + return s.merge(ctx, old1, old2, routing) +} + +func (s *subqueryRouteMerger) merge(ctx *plancontext.PlanningContext, inner, outer *Route, r Routing) (*Route, error) { + if !s.subq.TopLevel { + // if the subquery we are merging isn't a top level predicate, we can't use it for routing + return &Route{ + Source: outer.Source, + MergedWith: mergedWith(inner, outer), + Routing: outer.Routing, + Ordering: outer.Ordering, + ResultColumns: outer.ResultColumns, + }, nil + + } + _, isSharded := r.(*ShardedRouting) + var src ops.Operator + var err error + if isSharded { + src = s.outer.Source + if !s.subq.IsProjection { + src = &Filter{ + Source: s.outer.Source, + Predicates: []sqlparser.Expr{s.original}, + } } - var err error - innerOp, err = innerOp.AddPredicate(ctx, pred) + } else { + src, err = s.rewriteASTExpression(ctx, inner) if err != nil { return nil, err } } - return &CorrelatedSubQueryOp{ - Outer: resultOuterOp, - Inner: innerOp, - Extracted: extractedSubquery, - Vars: vars, - LHSColumns: lhsCols, + return &Route{ + Source: src, + MergedWith: mergedWith(inner, outer), + Routing: r, + Ordering: s.outer.Ordering, + ResultColumns: s.outer.ResultColumns, }, nil } -// canMergeSubqueryOnColumnSelection will return true if the predicate used allows us to merge the two subqueries -// into a single Route. This can be done if we are comparing two columns that contain data that is guaranteed -// to exist on the same shard. -func canMergeSubqueryOnColumnSelection(ctx *plancontext.PlanningContext, a, b *Route, predicate *sqlparser.ExtractedSubquery) bool { - left := predicate.OtherSide - opCode := predicate.OpCode - if opCode != int(popcode.PulloutValue) && opCode != int(popcode.PulloutIn) { - return false +// rewriteASTExpression rewrites the subquery expression that is used in the merged output +// Any changes that have been done to the operator tree since it was extracted from the +// query need make it to the expression +// TODO: systay 2023-09-26 +// we should be able to use this method for all plan types, +// but using this method for sharded queries introduces bugs +// We really need to figure out why this is not working as expected +func (s *subqueryRouteMerger) rewriteASTExpression(ctx *plancontext.PlanningContext, inner *Route) (ops.Operator, error) { + src := s.outer.Source + stmt, _, err := ToSQL(ctx, inner.Source) + if err != nil { + return nil, err } - - lVindex := findColumnVindex(ctx, a, left) - if lVindex == nil || !lVindex.IsUnique() { - return false + subqStmt, ok := stmt.(sqlparser.SelectStatement) + if !ok { + return nil, vterrors.VT13001("subqueries should only be select statement") } - - rightSelection := extractSingleColumnSubquerySelection(predicate.Subquery) - if rightSelection == nil { - return false + subqID := TableID(s.subq.Subquery) + subqStmt = sqlparser.CopyOnRewrite(subqStmt, nil, func(cursor *sqlparser.CopyOnWriteCursor) { + arg, ok := cursor.Node().(*sqlparser.Argument) + if !ok { + return + } + var exprFound sqlparser.Expr + for expr, argName := range ctx.ReservedArguments { + if arg.Name == argName { + exprFound = expr + } + } + if exprFound == nil { + return + } + deps := ctx.SemTable.RecursiveDeps(exprFound) + if deps.IsEmpty() { + err = vterrors.VT13001("found colname that we dont have deps for") + cursor.StopTreeWalk() + return + } + if !deps.IsSolvedBy(subqID) { + cursor.Replace(exprFound) + } + }, nil).(sqlparser.SelectStatement) + if err != nil { + return nil, err } - rVindex := findColumnVindex(ctx, b, rightSelection) - if rVindex == nil { - return false + if s.subq.IsProjection { + ctx.SemTable.CopySemanticInfo(s.subq.originalSubquery.Select, subqStmt) + s.subq.originalSubquery.Select = subqStmt + } else { + sQuery := sqlparser.CopyOnRewrite(s.original, dontEnterSubqueries, func(cursor *sqlparser.CopyOnWriteCursor) { + if subq, ok := cursor.Node().(*sqlparser.Subquery); ok { + subq.Select = subqStmt + cursor.Replace(subq) + } + }, ctx.SemTable.CopySemanticInfo).(sqlparser.Expr) + src = &Filter{ + Source: s.outer.Source, + Predicates: []sqlparser.Expr{sQuery}, + } } - return rVindex == lVindex + return src, nil } -// Searches for the single column returned from a subquery, like the `col` in `(SELECT col FROM tbl)` -func extractSingleColumnSubquerySelection(subquery *sqlparser.Subquery) *sqlparser.ColName { - if subquery.Select.GetColumnCount() != 1 { - return nil +// mergeSubqueryInputs checks whether two operators can be merged into a single one. +// If they can be merged, a new operator with the merged routing is returned +// If they cannot be merged, nil is returned. +// These rules are similar but different from join merging +func mergeSubqueryInputs(ctx *plancontext.PlanningContext, in, out ops.Operator, joinPredicates []sqlparser.Expr, m *subqueryRouteMerger) (*Route, error) { + inRoute, outRoute := operatorsToRoutes(in, out) + if inRoute == nil || outRoute == nil { + return nil, nil } - columnExpr := subquery.Select.GetColumns()[0] + inRoute, outRoute, inRouting, outRouting, sameKeyspace := getRoutesOrAlternates(inRoute, outRoute) + inner, outer := getRoutingType(inRouting), getRoutingType(outRouting) - aliasedExpr, ok := columnExpr.(*sqlparser.AliasedExpr) - if !ok { - return nil + switch { + // We have to let the outer control how many rows are returned, + // which means that we have to be careful with merging when the outer side + case inner == dual || + (inner == anyShard && sameKeyspace): + return m.merge(ctx, inRoute, outRoute, outRouting) + + case inner == none && sameKeyspace: + return m.merge(ctx, inRoute, outRoute, inRouting) + + // we can merge dual-outer subqueries only if the + // inner is guaranteed to hit a single shard + case inRoute.IsSingleShard() && + (outer == dual || (outer == anyShard && sameKeyspace)): + return m.merge(ctx, inRoute, outRoute, inRouting) + + case outer == none && sameKeyspace: + return m.merge(ctx, inRoute, outRoute, outRouting) + + // infoSchema routing is complex, so we handle it in a separate method + case inner == infoSchema && outer == infoSchema: + return tryMergeInfoSchemaRoutings(ctx, inRouting, outRouting, m, inRoute, outRoute) + + // sharded routing is complex, so we handle it in a separate method + case inner == sharded && outer == sharded: + return tryMergeJoinShardedRouting(ctx, inRoute, outRoute, m, joinPredicates) + + default: + return nil, nil } +} - return getColName(aliasedExpr.Expr) +func mergedWith(inner *Route, outer *Route) []*Route { + mergedWith := append(inner.MergedWith, inner, outer) + mergedWith = append(mergedWith, outer.MergedWith...) + return mergedWith } + +var _ merger = (*subqueryRouteMerger)(nil) diff --git a/go/vt/vtgate/planbuilder/operators/table.go b/go/vt/vtgate/planbuilder/operators/table.go index 92735a055cd..21f46286545 100644 --- a/go/vt/vtgate/planbuilder/operators/table.go +++ b/go/vt/vtgate/planbuilder/operators/table.go @@ -17,7 +17,9 @@ limitations under the License. package operators import ( - "vitess.io/vitess/go/slices2" + "fmt" + + "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" @@ -54,7 +56,7 @@ func (to *Table) Clone([]ops.Operator) ops.Operator { } // Introduces implements the PhysicalOperator interface -func (to *Table) Introduces() semantics.TableSet { +func (to *Table) introducesTableID() semantics.TableSet { return to.QTable.ID } @@ -63,20 +65,31 @@ func (to *Table) AddPredicate(_ *plancontext.PlanningContext, expr sqlparser.Exp return newFilter(to, expr), nil } -func (to *Table) AddColumn(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, _, addToGroupBy bool) (ops.Operator, int, error) { - if addToGroupBy { - return nil, 0, vterrors.VT13001("tried to add group by to a table") +func (to *Table) AddColumn(*plancontext.PlanningContext, bool, bool, *sqlparser.AliasedExpr) (int, error) { + return 0, vterrors.VT13001("did not expect this method to be called") +} + +func (to *Table) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) (int, error) { + colToFind, ok := expr.(*sqlparser.ColName) + if !ok { + return -1, nil } - offset, err := addColumn(ctx, to, expr.Expr) - if err != nil { - return nil, 0, err + + for idx, colName := range to.Columns { + if colName.Name.Equal(colToFind.Name) { + return idx, nil + } } - return to, offset, nil + return -1, nil +} + +func (to *Table) GetColumns(*plancontext.PlanningContext) ([]*sqlparser.AliasedExpr, error) { + return slice.Map(to.Columns, colNameToExpr), nil } -func (to *Table) GetColumns() ([]*sqlparser.AliasedExpr, error) { - return slices2.Map(to.Columns, colNameToExpr), nil +func (to *Table) GetSelectExprs(ctx *plancontext.PlanningContext) (sqlparser.SelectExprs, error) { + return transformColumnsToSelectExprs(ctx, to) } func (to *Table) GetOrdering() ([]ops.OrderBy, error) { @@ -86,6 +99,7 @@ func (to *Table) GetOrdering() ([]ops.OrderBy, error) { func (to *Table) GetColNames() []*sqlparser.ColName { return to.Columns } + func (to *Table) AddCol(col *sqlparser.ColName) { to.Columns = append(to.Columns, col) } @@ -100,7 +114,7 @@ func (to *Table) TablesUsed() []string { func addColumn(ctx *plancontext.PlanningContext, op ColNameColumns, e sqlparser.Expr) (int, error) { col, ok := e.(*sqlparser.ColName) if !ok { - return 0, vterrors.VT13001("cannot push this expression to a table/vindex: %s", sqlparser.String(e)) + return 0, vterrors.VT12001(fmt.Sprintf("cannot add '%s' expression to a table/vindex", sqlparser.String(e))) } sqlparser.RemoveKeyspaceFromColName(col) cols := op.GetColNames() @@ -113,17 +127,16 @@ func addColumn(ctx *plancontext.PlanningContext, op ColNameColumns, e sqlparser. return offset, nil } -func (to *Table) Description() ops.OpDescription { - var columns []string - for _, col := range to.Columns { - columns = append(columns, sqlparser.String(col)) +func (to *Table) ShortDescription() string { + tbl := to.VTable.String() + var alias, where string + if !to.QTable.Alias.As.IsEmpty() { + alias = " AS " + to.QTable.Alias.As.String() } - return ops.OpDescription{ - OperatorType: "Table", - Other: map[string]any{"Columns": columns}, + + if len(to.QTable.Predicates) > 0 { + where = " WHERE " + sqlparser.String(sqlparser.AndExpressions(to.QTable.Predicates...)) } -} -func (to *Table) ShortDescription() string { - return to.VTable.String() + return tbl + alias + where } diff --git a/go/vt/vtgate/planbuilder/operators/union.go b/go/vt/vtgate/planbuilder/operators/union.go index 4fff00ef819..54740e70a29 100644 --- a/go/vt/vtgate/planbuilder/operators/union.go +++ b/go/vt/vtgate/planbuilder/operators/union.go @@ -17,32 +17,49 @@ limitations under the License. package operators import ( + "fmt" + "slices" + + "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) type Union struct { - Sources []ops.Operator - Distinct bool + Sources []ops.Operator + + // These are the select expressions coming from each source + Selects []sqlparser.SelectExprs + distinct bool - // TODO this should be removed. For now it's used to fail queries - Ordering []ops.OrderBy + unionColumns sqlparser.SelectExprs + unionColumnsAsAlisedExprs []*sqlparser.AliasedExpr +} - noColumns +func newUnion(srcs []ops.Operator, sourceSelects []sqlparser.SelectExprs, columns sqlparser.SelectExprs, distinct bool) *Union { + if columns == nil { + panic("rt") + } + return &Union{ + Sources: srcs, + Selects: sourceSelects, + distinct: distinct, + unionColumns: columns, + } } // Clone implements the Operator interface func (u *Union) Clone(inputs []ops.Operator) ops.Operator { newOp := *u newOp.Sources = inputs + newOp.Selects = slices.Clone(u.Selects) return &newOp } func (u *Union) GetOrdering() ([]ops.OrderBy, error) { - return u.Ordering, nil + return nil, nil } // Inputs implements the Operator interface @@ -87,16 +104,33 @@ func (u *Union) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Ex if !ok { return nil, vterrors.VT12001("pushing predicates on UNION where the first SELECT contains * or NEXT") } - if !ae.As.IsEmpty() { - offsets[ae.As.String()] = i - continue - } - col, ok := ae.Expr.(*sqlparser.ColName) - if ok { - offsets[col.Name.Lowered()] = i + offsets[ae.ColumnName()] = i + } + + needsFilter, exprPerSource, err := u.predicatePerSource(expr, offsets) + if err != nil { + return nil, err + } + if needsFilter { + return &Filter{ + Source: u, + Predicates: []sqlparser.Expr{expr}, + }, nil + } + + for i, src := range u.Sources { + u.Sources[i], err = src.AddPredicate(ctx, exprPerSource[i]) + if err != nil { + return nil, err } } + return u, nil +} + +func (u *Union) predicatePerSource(expr sqlparser.Expr, offsets map[string]int) (bool, []sqlparser.Expr, error) { + needsFilter := false + exprPerSource := make([]sqlparser.Expr, len(u.Sources)) for i := range u.Sources { var err error predicate := sqlparser.CopyOnRewrite(expr, nil, func(cursor *sqlparser.CopyOnWriteCursor) { @@ -107,7 +141,7 @@ func (u *Union) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Ex idx, ok := offsets[col.Name.Lowered()] if !ok { - err = vterrors.VT13001("cannot push predicates on concatenate, missing columns from the UNION") + needsFilter = true cursor.StopTreeWalk() return } @@ -121,22 +155,18 @@ func (u *Union) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Ex ae, ok := sel.SelectExprs[idx].(*sqlparser.AliasedExpr) if !ok { - err = vterrors.VT12001("pushing non-aliased expression predicates on concatenate") + err = vterrors.VT09015() cursor.StopTreeWalk() return } cursor.Replace(ae.Expr) }, nil).(sqlparser.Expr) - if err != nil { - return nil, err - } - u.Sources[i], err = u.Sources[i].AddPredicate(ctx, predicate) - if err != nil { - return nil, err + if err != nil || needsFilter { + return needsFilter, nil, err } + exprPerSource[i] = predicate } - - return u, nil + return needsFilter, exprPerSource, nil } func (u *Union) GetSelectFor(source int) (*sqlparser.Select, error) { @@ -144,7 +174,7 @@ func (u *Union) GetSelectFor(source int) (*sqlparser.Select, error) { for { switch op := src.(type) { case *Horizon: - return sqlparser.GetFirstSelect(op.Select), nil + return sqlparser.GetFirstSelect(op.Query), nil case *Route: src = op.Source default: @@ -153,49 +183,144 @@ func (u *Union) GetSelectFor(source int) (*sqlparser.Select, error) { } } -func (u *Union) Compact(*plancontext.PlanningContext) (ops.Operator, *rewrite.ApplyResult, error) { - var newSources []ops.Operator - var anythingChanged *rewrite.ApplyResult - for _, source := range u.Sources { - var other *Union - horizon, ok := source.(*Horizon) - if ok { - union, ok := horizon.Source.(*Union) - if ok { - other = union +func (u *Union) AddColumn(ctx *plancontext.PlanningContext, reuse bool, gb bool, expr *sqlparser.AliasedExpr) (int, error) { + if reuse { + offset, err := u.FindCol(ctx, expr.Expr, false) + if err != nil { + return 0, err + } + + if offset >= 0 { + return offset, nil + } + } + cols, err := u.GetColumns(ctx) + if err != nil { + return 0, err + } + + switch e := expr.Expr.(type) { + case *sqlparser.ColName: + // here we deal with pure column access on top of the union + offset := slices.IndexFunc(cols, func(expr *sqlparser.AliasedExpr) bool { + return e.Name.EqualString(expr.ColumnName()) + }) + if offset == -1 { + return 0, vterrors.VT13001(fmt.Sprintf("could not find the column '%s' on the UNION", sqlparser.String(e))) + } + return offset, nil + case *sqlparser.WeightStringFuncExpr: + wsArg := e.Expr + argIdx := slices.IndexFunc(cols, func(expr *sqlparser.AliasedExpr) bool { + return ctx.SemTable.EqualsExprWithDeps(wsArg, expr.Expr) + }) + + if argIdx == -1 { + return 0, vterrors.VT13001(fmt.Sprintf("could not find the argument to the weight_string function: %s", sqlparser.String(wsArg))) + } + + outputOffset, err := u.addWeightStringToOffset(ctx, argIdx, gb) + if err != nil { + return 0, err + } + + return outputOffset, nil + default: + return 0, vterrors.VT13001(fmt.Sprintf("only weight_string function is expected - got %s", sqlparser.String(expr))) + } +} + +func (u *Union) addWeightStringToOffset(ctx *plancontext.PlanningContext, argIdx int, addToGroupBy bool) (outputOffset int, err error) { + for i, src := range u.Sources { + exprs := u.Selects[i] + selectExpr := exprs[argIdx] + ae, ok := selectExpr.(*sqlparser.AliasedExpr) + if !ok { + return 0, vterrors.VT09015() + } + thisOffset, err := src.AddColumn(ctx, false, addToGroupBy, aeWrap(weightStringFor(ae.Expr))) + if err != nil { + return 0, err + } + + // all offsets for the newly added ws need to line up + if i == 0 { + outputOffset = thisOffset + } else { + if thisOffset != outputOffset { + return 0, vterrors.VT12001("weight_string offsets did not line up for UNION") } } - if other == nil { - newSources = append(newSources, source) - continue + } + return +} + +func (u *Union) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) (int, error) { + columns, err := u.GetColumns(ctx) + if err != nil { + return 0, err + } + + for idx, col := range columns { + if ctx.SemTable.EqualsExprWithDeps(expr, col.Expr) { + return idx, nil } - anythingChanged = anythingChanged.Merge(rewrite.NewTree("merged UNIONs", other)) - switch { - case len(other.Ordering) == 0 && !other.Distinct: - fallthrough - case u.Distinct: - // if the current UNION is a DISTINCT, we can safely ignore everything from children UNIONs, except LIMIT - newSources = append(newSources, other.Sources...) + } - default: - newSources = append(newSources, other) + return -1, nil +} + +func (u *Union) GetColumns(ctx *plancontext.PlanningContext) (result []*sqlparser.AliasedExpr, err error) { + if u.unionColumnsAsAlisedExprs == nil { + allOk := true + u.unionColumnsAsAlisedExprs = slice.Map(u.unionColumns, func(from sqlparser.SelectExpr) *sqlparser.AliasedExpr { + expr, ok := from.(*sqlparser.AliasedExpr) + allOk = allOk && ok + return expr + }) + if !allOk { + return nil, vterrors.VT09015() } } - if anythingChanged != rewrite.SameTree { - u.Sources = newSources + + // if any of the inputs has more columns that we expect, we want to show on top of UNION, so the results can + // be truncated to the expected result columns and nothing else + for _, src := range u.Sources { + columns, err := src.GetColumns(ctx) + if err != nil { + return nil, err + } + + for len(columns) > len(u.unionColumnsAsAlisedExprs) { + u.unionColumnsAsAlisedExprs = append(u.unionColumnsAsAlisedExprs, aeWrap(sqlparser.NewIntLiteral("0"))) + } } - return u, anythingChanged, nil + return u.unionColumnsAsAlisedExprs, nil } -func (u *Union) NoLHSTableSet() {} +func (u *Union) GetSelectExprs(ctx *plancontext.PlanningContext) (sqlparser.SelectExprs, error) { + // if any of the inputs has more columns that we expect, we want to show on top of UNION, so the results can + // be truncated to the expected result columns and nothing else + for _, src := range u.Sources { + columns, err := src.GetSelectExprs(ctx) + if err != nil { + return nil, err + } -func (u *Union) Description() ops.OpDescription { - return ops.OpDescription{ - OperatorType: "Union", + for len(columns) > len(u.unionColumns) { + u.unionColumns = append(u.unionColumns, aeWrap(sqlparser.NewIntLiteral("0"))) + } } + + return u.unionColumns, nil } +func (u *Union) NoLHSTableSet() {} + func (u *Union) ShortDescription() string { + if u.distinct { + return "DISTINCT" + } return "" } diff --git a/go/vt/vtgate/planbuilder/operators/union_merging.go b/go/vt/vtgate/planbuilder/operators/union_merging.go new file mode 100644 index 00000000000..4c8b02f76d8 --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/union_merging.go @@ -0,0 +1,259 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operators + +import ( + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" +) + +// mergeUnionInputInAnyOrder merges sources the sources of the union in any order +// can be used for UNION DISTINCT +func mergeUnionInputInAnyOrder(ctx *plancontext.PlanningContext, op *Union) ([]ops.Operator, []sqlparser.SelectExprs, error) { + sources := op.Sources + selects := op.Selects + + // next we'll go over all the plans from and check if any two can be merged. if they can, they are merged, + // and we continue checking for pairs of plans that can be merged into a single route + idx := 0 + for idx < len(sources) { + keep := make([]bool, len(sources)) + srcA := sources[idx] + merged := false + for j, srcB := range sources { + if j <= idx { + continue + } + selA := selects[idx] + selB := selects[j] + newPlan, sel, err := mergeUnionInputs(ctx, srcA, srcB, selA, selB, op.distinct) + if err != nil { + return nil, nil, err + } + if newPlan != nil { + sources[idx] = newPlan + selects[idx] = sel + srcA = newPlan + merged = true + } else { + keep[j] = true + } + } + if !merged { + return sources, selects, nil + } + + var newSources []ops.Operator + var newSelects []sqlparser.SelectExprs + for i, source := range sources { + if keep[i] || i <= idx { + newSources = append(newSources, source) + newSelects = append(newSelects, selects[i]) + } + } + idx++ + sources = newSources + selects = newSelects + } + + return sources, selects, nil +} + +func mergeUnionInputsInOrder(ctx *plancontext.PlanningContext, op *Union) ([]ops.Operator, []sqlparser.SelectExprs, error) { + sources := op.Sources + selects := op.Selects + for { + merged := false + for i := 0; i < len(sources)-1; i++ { + j := i + 1 + srcA, selA := sources[i], selects[i] + srcB, selB := sources[j], selects[j] + newPlan, sel, err := mergeUnionInputs(ctx, srcA, srcB, selA, selB, op.distinct) + if err != nil { + return nil, nil, err + } + if newPlan != nil { + sources[i] = newPlan + selects[i] = sel + merged = true + sources = append(sources[:i+1], sources[j+1:]...) + selects = append(selects[:i+1], selects[j+1:]...) + } + } + if !merged { + break + } + } + + return sources, selects, nil +} + +// mergeUnionInputs checks whether two operators can be merged into a single one. +// If they can be merged, a new operator with the merged routing is returned +// If they cannot be merged, nil is returned. +// this function is very similar to mergeJoinInputs +func mergeUnionInputs( + ctx *plancontext.PlanningContext, + lhs, rhs ops.Operator, + lhsExprs, rhsExprs sqlparser.SelectExprs, + distinct bool, +) (ops.Operator, sqlparser.SelectExprs, error) { + lhsRoute, rhsRoute, routingA, routingB, a, b, sameKeyspace := prepareInputRoutes(lhs, rhs) + if lhsRoute == nil { + return nil, nil, nil + } + + switch { + // if either side is a dual query, we can always merge them together + // an unsharded/reference route can be merged with anything going to that keyspace + case b == dual || (b == anyShard && sameKeyspace): + return createMergedUnion(ctx, lhsRoute, rhsRoute, lhsExprs, rhsExprs, distinct, routingA) + case a == dual || (a == anyShard && sameKeyspace): + return createMergedUnion(ctx, lhsRoute, rhsRoute, lhsExprs, rhsExprs, distinct, routingB) + + case a == none: + return createMergedUnion(ctx, lhsRoute, rhsRoute, lhsExprs, rhsExprs, distinct, routingB) + case b == none: + return createMergedUnion(ctx, lhsRoute, rhsRoute, lhsExprs, rhsExprs, distinct, routingA) + + case a == sharded && b == sharded && sameKeyspace: + res, exprs, err := tryMergeUnionShardedRouting(ctx, lhsRoute, rhsRoute, lhsExprs, rhsExprs, distinct) + if err != nil || res != nil { + return res, exprs, err + } + } + return nil, nil, nil +} + +func tryMergeUnionShardedRouting( + ctx *plancontext.PlanningContext, + routeA, routeB *Route, + exprsA, exprsB sqlparser.SelectExprs, + distinct bool, +) (ops.Operator, sqlparser.SelectExprs, error) { + tblA := routeA.Routing.(*ShardedRouting) + tblB := routeB.Routing.(*ShardedRouting) + + scatterA := tblA.RouteOpCode == engine.Scatter + scatterB := tblB.RouteOpCode == engine.Scatter + uniqueA := tblA.RouteOpCode == engine.EqualUnique + uniqueB := tblB.RouteOpCode == engine.EqualUnique + + switch { + case scatterA: + return createMergedUnion(ctx, routeA, routeB, exprsA, exprsB, distinct, tblA) + + case scatterB: + return createMergedUnion(ctx, routeA, routeB, exprsA, exprsB, distinct, tblB) + + case uniqueA && uniqueB: + aVdx := tblA.SelectedVindex() + bVdx := tblB.SelectedVindex() + aExpr := tblA.VindexExpressions() + bExpr := tblB.VindexExpressions() + if aVdx == bVdx && gen4ValuesEqual(ctx, aExpr, bExpr) { + return createMergedUnion(ctx, routeA, routeB, exprsA, exprsB, distinct, tblA) + } + } + + return nil, nil, nil +} + +func createMergedUnion( + ctx *plancontext.PlanningContext, + lhsRoute, rhsRoute *Route, + lhsExprs, rhsExprs sqlparser.SelectExprs, + distinct bool, + routing Routing) (ops.Operator, sqlparser.SelectExprs, error) { + + // if there are `*` on either side, or a different number of SelectExpr items, + // we give up aligning the expressions and trust that we can push everything down + cols := make(sqlparser.SelectExprs, len(lhsExprs)) + noDeps := len(lhsExprs) != len(rhsExprs) + for idx, col := range lhsExprs { + ae, ok := col.(*sqlparser.AliasedExpr) + if !ok { + cols[idx] = col + noDeps = true + continue + } + col := sqlparser.NewColName(ae.ColumnName()) + cols[idx] = aeWrap(col) + if noDeps { + continue + } + + deps := ctx.SemTable.RecursiveDeps(ae.Expr) + ae, ok = rhsExprs[idx].(*sqlparser.AliasedExpr) + if !ok { + noDeps = true + continue + } + deps = deps.Merge(ctx.SemTable.RecursiveDeps(ae.Expr)) + ctx.SemTable.Recursive[col] = deps + } + + union := newUnion([]ops.Operator{lhsRoute.Source, rhsRoute.Source}, []sqlparser.SelectExprs{lhsExprs, rhsExprs}, cols, distinct) + selectExprs := unionSelects(lhsExprs) + return &Route{ + Source: union, + MergedWith: []*Route{rhsRoute}, + Routing: routing, + }, selectExprs, nil +} + +func compactUnion(u *Union) *rewrite.ApplyResult { + if u.distinct { + // first we remove unnecessary DISTINCTs + for idx, source := range u.Sources { + d, ok := source.(*Distinct) + if !ok || !d.Required { + continue + } + u.Sources[idx] = d.Source + } + } + + var newSources []ops.Operator + var newSelects []sqlparser.SelectExprs + merged := false + + for idx, source := range u.Sources { + other, ok := source.(*Union) + + if ok && (u.distinct || !other.distinct) { + newSources = append(newSources, other.Sources...) + newSelects = append(newSelects, other.Selects...) + merged = true + continue + } + + newSources = append(newSources, source) + newSelects = append(newSelects, u.Selects[idx]) + } + + if !merged { + return rewrite.SameTree + } + + u.Sources = newSources + u.Selects = newSelects + return rewrite.NewTree("merged UNIONs", u) +} diff --git a/go/vt/vtgate/planbuilder/operators/update.go b/go/vt/vtgate/planbuilder/operators/update.go index f9c831860f1..5a7716bdbeb 100644 --- a/go/vt/vtgate/planbuilder/operators/update.go +++ b/go/vt/vtgate/planbuilder/operators/update.go @@ -17,41 +17,63 @@ limitations under the License. package operators import ( + "fmt" + "maps" + "slices" + "strings" + + vschemapb "vitess.io/vitess/go/vt/proto/vschema" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" ) -type Update struct { - QTable *QueryTable - VTable *vindexes.Table - Assignments map[string]sqlparser.Expr - ChangedVindexValues map[string]*engine.VindexValues - OwnedVindexQuery string - AST *sqlparser.Update +type ( + Update struct { + QTable *QueryTable + VTable *vindexes.Table + Assignments []SetExpr + ChangedVindexValues map[string]*engine.VindexValues + OwnedVindexQuery string + Ignore sqlparser.Ignore + OrderBy sqlparser.OrderBy + Limit *sqlparser.Limit + + // these subqueries cannot be merged as they are part of the changed vindex values + // these values are needed to be sent over to lookup vindex for update. + // On merging this information will be lost, so subquery merge is blocked. + SubQueriesArgOnChangedVindex []string + + noInputs + noColumns + noPredicates + } + + SetExpr struct { + Name *sqlparser.ColName + Expr *ProjExpr + } +) - noInputs - noColumns - noPredicates +func (se SetExpr) String() string { + return fmt.Sprintf("%s = %s", sqlparser.String(se.Name), sqlparser.String(se.Expr.EvalExpr)) } // Introduces implements the PhysicalOperator interface -func (u *Update) Introduces() semantics.TableSet { +func (u *Update) introducesTableID() semantics.TableSet { return u.QTable.ID } // Clone implements the Operator interface func (u *Update) Clone([]ops.Operator) ops.Operator { - return &Update{ - QTable: u.QTable, - VTable: u.VTable, - Assignments: u.Assignments, - ChangedVindexValues: u.ChangedVindexValues, - OwnedVindexQuery: u.OwnedVindexQuery, - AST: u.AST, - } + upd := *u + upd.Assignments = slices.Clone(u.Assignments) + upd.ChangedVindexValues = maps.Clone(u.ChangedVindexValues) + return &upd } func (u *Update) GetOrdering() ([]ops.OrderBy, error) { @@ -65,12 +87,605 @@ func (u *Update) TablesUsed() []string { return nil } -func (u *Update) Description() ops.OpDescription { - return ops.OpDescription{ - OperatorType: "Update", +func (u *Update) ShortDescription() string { + s := []string{u.VTable.String()} + if u.Limit != nil { + s = append(s, sqlparser.String(u.Limit)) + } + if len(u.OrderBy) > 0 { + s = append(s, sqlparser.String(u.OrderBy)) } + return strings.Join(s, " ") } -func (u *Update) ShortDescription() string { - return u.VTable.String() +func createOperatorFromUpdate(ctx *plancontext.PlanningContext, updStmt *sqlparser.Update) (ops.Operator, error) { + tableInfo, qt, err := createQueryTableForDML(ctx, updStmt.TableExprs[0], updStmt.Where) + if err != nil { + return nil, err + } + + vindexTable, routing, err := buildVindexTableForDML(ctx, tableInfo, qt, "update") + if err != nil { + return nil, err + } + + updClone := sqlparser.CloneRefOfUpdate(updStmt) + updOp, err := createUpdateOperator(ctx, updStmt, vindexTable, qt, routing) + if err != nil { + return nil, err + } + + ksMode, err := ctx.VSchema.ForeignKeyMode(vindexTable.Keyspace.Name) + if err != nil { + return nil, err + } + // Unmanaged foreign-key-mode, we don't need to do anything. + if ksMode != vschemapb.Keyspace_managed { + return updOp, nil + } + + parentFks, childFks := getFKRequirementsForUpdate(ctx, updStmt.Exprs, vindexTable) + if len(childFks) == 0 && len(parentFks) == 0 { + return updOp, nil + } + + // If the delete statement has a limit, we don't support it yet. + if updStmt.Limit != nil { + return nil, vterrors.VT12001("update with limit with foreign key constraints") + } + + return buildFkOperator(ctx, updOp, updClone, parentFks, childFks, vindexTable) +} + +func createUpdateOperator(ctx *plancontext.PlanningContext, updStmt *sqlparser.Update, vindexTable *vindexes.Table, qt *QueryTable, routing Routing) (ops.Operator, error) { + sqc := &SubQueryBuilder{} + assignments := make([]SetExpr, len(updStmt.Exprs)) + for idx, updExpr := range updStmt.Exprs { + expr, subqs, err := sqc.pullOutValueSubqueries(ctx, updExpr.Expr, qt.ID, true) + if err != nil { + return nil, err + } + if len(subqs) == 0 { + expr = updExpr.Expr + } + proj := newProjExpr(aeWrap(expr)) + if len(subqs) != 0 { + proj.Info = SubQueryExpression(subqs) + } + assignments[idx] = SetExpr{ + Name: updExpr.Name, + Expr: proj, + } + } + + vp, cvv, ovq, subQueriesArgOnChangedVindex, err := getUpdateVindexInformation(updStmt, vindexTable, qt.ID, assignments) + if err != nil { + return nil, err + } + + tr, ok := routing.(*ShardedRouting) + if ok { + tr.VindexPreds = vp + } + + for _, predicate := range qt.Predicates { + if subq, err := sqc.handleSubquery(ctx, predicate, qt.ID); err != nil { + return nil, err + } else if subq != nil { + continue + } + routing, err = UpdateRoutingLogic(ctx, predicate, routing) + if err != nil { + return nil, err + } + } + + if routing.OpCode() == engine.Scatter && updStmt.Limit != nil { + // TODO systay: we should probably check for other op code types - IN could also hit multiple shards (2022-04-07) + return nil, vterrors.VT12001("multi shard UPDATE with LIMIT") + } + + route := &Route{ + Source: &Update{ + QTable: qt, + VTable: vindexTable, + Assignments: assignments, + ChangedVindexValues: cvv, + OwnedVindexQuery: ovq, + Ignore: updStmt.Ignore, + Limit: updStmt.Limit, + OrderBy: updStmt.OrderBy, + SubQueriesArgOnChangedVindex: subQueriesArgOnChangedVindex, + }, + Routing: routing, + Comments: updStmt.Comments, + } + + return sqc.getRootOperator(route), nil +} + +// getFKRequirementsForUpdate analyzes update expressions to determine which foreign key constraints needs management at the VTGate. +// It identifies parent and child foreign keys that require verification or cascade operations due to column updates. +func getFKRequirementsForUpdate(ctx *plancontext.PlanningContext, updateExprs sqlparser.UpdateExprs, vindexTable *vindexes.Table) ([]vindexes.ParentFKInfo, []vindexes.ChildFKInfo) { + parentFks := vindexTable.ParentFKsNeedsHandling(ctx.VerifyAllFKs, ctx.ParentFKToIgnore) + childFks := vindexTable.ChildFKsNeedsHandling(ctx.VerifyAllFKs, vindexes.UpdateAction) + if len(childFks) == 0 && len(parentFks) == 0 { + return nil, nil + } + + pFksRequired := make([]bool, len(parentFks)) + cFksRequired := make([]bool, len(childFks)) + // Go over all the update expressions + for _, updateExpr := range updateExprs { + // Any foreign key to a child table for a column that has been updated + // will require the cascade operations or restrict verification to happen, so we include all such foreign keys. + for idx, childFk := range childFks { + if childFk.ParentColumns.FindColumn(updateExpr.Name.Name) >= 0 { + cFksRequired[idx] = true + } + } + // If we are setting a column to NULL, then we don't need to verify the existance of an + // equivalent row in the parent table, even if this column was part of a foreign key to a parent table. + if sqlparser.IsNull(updateExpr.Expr) { + continue + } + // We add all the possible parent foreign key constraints that need verification that an equivalent row + // exists, given that this column has changed. + for idx, parentFk := range parentFks { + if parentFk.ChildColumns.FindColumn(updateExpr.Name.Name) >= 0 { + pFksRequired[idx] = true + } + } + } + // For the parent foreign keys, if any of the columns part of the fk is set to NULL, + // then, we don't care for the existance of an equivalent row in the parent table. + for idx, parentFk := range parentFks { + for _, updateExpr := range updateExprs { + if !sqlparser.IsNull(updateExpr.Expr) { + continue + } + if parentFk.ChildColumns.FindColumn(updateExpr.Name.Name) >= 0 { + pFksRequired[idx] = false + } + } + } + // Get the filtered lists and return them. + var pFksNeedsHandling []vindexes.ParentFKInfo + var cFksNeedsHandling []vindexes.ChildFKInfo + for idx, parentFk := range parentFks { + if pFksRequired[idx] { + pFksNeedsHandling = append(pFksNeedsHandling, parentFk) + } + } + for idx, childFk := range childFks { + if cFksRequired[idx] { + cFksNeedsHandling = append(cFksNeedsHandling, childFk) + } + } + return pFksNeedsHandling, cFksNeedsHandling +} + +func buildFkOperator(ctx *plancontext.PlanningContext, updOp ops.Operator, updClone *sqlparser.Update, parentFks []vindexes.ParentFKInfo, childFks []vindexes.ChildFKInfo, updatedTable *vindexes.Table) (ops.Operator, error) { + // We only support simple expressions in update queries for foreign key handling. + if isNonLiteral(updClone.Exprs, parentFks, childFks) { + return nil, vterrors.VT12001("update expression with non-literal values with foreign key constraints") + } + + restrictChildFks, cascadeChildFks := splitChildFks(childFks) + + op, err := createFKCascadeOp(ctx, updOp, updClone, cascadeChildFks, updatedTable) + if err != nil { + return nil, err + } + + return createFKVerifyOp(ctx, op, updClone, parentFks, restrictChildFks) +} + +func isNonLiteral(updExprs sqlparser.UpdateExprs, parentFks []vindexes.ParentFKInfo, childFks []vindexes.ChildFKInfo) bool { + for _, updateExpr := range updExprs { + if sqlparser.IsLiteral(updateExpr.Expr) { + continue + } + for _, parentFk := range parentFks { + if parentFk.ChildColumns.FindColumn(updateExpr.Name.Name) >= 0 { + return true + } + } + for _, childFk := range childFks { + if childFk.ParentColumns.FindColumn(updateExpr.Name.Name) >= 0 { + return true + } + } + } + return false +} + +// splitChildFks splits the child foreign keys into restrict and cascade list as restrict is handled through Verify operator and cascade is handled through Cascade operator. +func splitChildFks(fks []vindexes.ChildFKInfo) (restrictChildFks, cascadeChildFks []vindexes.ChildFKInfo) { + for _, fk := range fks { + // Any RESTRICT type foreign keys that arrive here for 2 reasons— + // 1. cross-shard/cross-keyspace RESTRICT cases. + // 2. shard-scoped/unsharded RESTRICT cases arising because we have to validate all the foreign keys on VTGate. + if fk.OnUpdate.IsRestrict() { + // For RESTRICT foreign keys, we need to verify that there are no child rows corresponding to the rows being updated. + // This is done using a FkVerify Operator. + restrictChildFks = append(restrictChildFks, fk) + } else { + // For all the other foreign keys like CASCADE, SET NULL, we have to cascade the update to the children, + // This is done by using a FkCascade Operator. + cascadeChildFks = append(cascadeChildFks, fk) + } + } + return +} + +func createFKCascadeOp(ctx *plancontext.PlanningContext, parentOp ops.Operator, updStmt *sqlparser.Update, childFks []vindexes.ChildFKInfo, updatedTable *vindexes.Table) (ops.Operator, error) { + if len(childFks) == 0 { + return parentOp, nil + } + + var fkChildren []*FkChild + var selectExprs []sqlparser.SelectExpr + + for _, fk := range childFks { + // We should have already filtered out update restrict foreign keys. + if fk.OnUpdate.IsRestrict() { + return nil, vterrors.VT13001("ON UPDATE RESTRICT foreign keys should already be filtered") + } + + // We need to select all the parent columns for the foreign key constraint, to use in the update of the child table. + cols, exprs := selectParentColumns(fk, len(selectExprs)) + selectExprs = append(selectExprs, exprs...) + + fkChild, err := createFkChildForUpdate(ctx, fk, updStmt, cols, updatedTable) + if err != nil { + return nil, err + } + fkChildren = append(fkChildren, fkChild) + } + + selectionOp, err := createSelectionOp(ctx, selectExprs, updStmt.TableExprs, updStmt.Where, nil, sqlparser.ForUpdateLock) + if err != nil { + return nil, err + } + + return &FkCascade{ + Selection: selectionOp, + Children: fkChildren, + Parent: parentOp, + }, nil +} + +// createFkChildForUpdate creates the update query operator for the child table based on the foreign key constraints. +func createFkChildForUpdate(ctx *plancontext.PlanningContext, fk vindexes.ChildFKInfo, updStmt *sqlparser.Update, cols []int, updatedTable *vindexes.Table) (*FkChild, error) { + // Create a ValTuple of child column names + var valTuple sqlparser.ValTuple + for _, column := range fk.ChildColumns { + valTuple = append(valTuple, sqlparser.NewColName(column.String())) + } + + // Reserve a bind variable name + bvName := ctx.ReservedVars.ReserveVariable(foreignKeyConstraintValues) + // Create a comparison expression for WHERE clause + compExpr := sqlparser.NewComparisonExpr(sqlparser.InOp, valTuple, sqlparser.NewListArg(bvName), nil) + var childWhereExpr sqlparser.Expr = compExpr + + var childOp ops.Operator + var err error + switch fk.OnUpdate { + case sqlparser.Cascade: + childOp, err = buildChildUpdOpForCascade(ctx, fk, updStmt, childWhereExpr, updatedTable) + case sqlparser.SetNull: + childOp, err = buildChildUpdOpForSetNull(ctx, fk, updStmt, childWhereExpr) + case sqlparser.SetDefault: + return nil, vterrors.VT09016() + } + if err != nil { + return nil, err + } + + return &FkChild{ + BVName: bvName, + Cols: cols, + Op: childOp, + }, nil +} + +// buildChildUpdOpForCascade builds the child update statement operator for the CASCADE type foreign key constraint. +// The query looks like this - +// +// `UPDATE SET WHERE IN ()` +func buildChildUpdOpForCascade(ctx *plancontext.PlanningContext, fk vindexes.ChildFKInfo, updStmt *sqlparser.Update, childWhereExpr sqlparser.Expr, updatedTable *vindexes.Table) (ops.Operator, error) { + // The update expressions are the same as the update expressions in the parent update query + // with the column names replaced with the child column names. + var childUpdateExprs sqlparser.UpdateExprs + for _, updateExpr := range updStmt.Exprs { + colIdx := fk.ParentColumns.FindColumn(updateExpr.Name.Name) + if colIdx == -1 { + continue + } + + // The where condition is the same as the comparison expression above + // with the column names replaced with the child column names. + childUpdateExprs = append(childUpdateExprs, &sqlparser.UpdateExpr{ + Name: sqlparser.NewColName(fk.ChildColumns[colIdx].String()), + Expr: updateExpr.Expr, + }) + } + // Because we could be updating the child to a non-null value, + // We have to run with foreign key checks OFF because the parent isn't guaranteed to have + // the data being updated to. + parsedComments := sqlparser.Comments{ + "/*+ SET_VAR(foreign_key_checks=OFF) */", + }.Parsed() + childUpdStmt := &sqlparser.Update{ + Comments: parsedComments, + Exprs: childUpdateExprs, + TableExprs: []sqlparser.TableExpr{sqlparser.NewAliasedTableExpr(fk.Table.GetTableName(), "")}, + Where: &sqlparser.Where{Type: sqlparser.WhereClause, Expr: childWhereExpr}, + } + // Since we are running the child update with foreign key checks turned off, + // we need to verify the validity of the remaining foreign keys on VTGate, + // while specifically ignoring the parent foreign key in question. + return createOpFromStmt(ctx, childUpdStmt, true, fk.String(updatedTable)) +} + +// buildChildUpdOpForSetNull builds the child update statement operator for the SET NULL type foreign key constraint. +// The query looks like this - +// +// `UPDATE SET +// WHERE IN () +// [AND ({ IS NULL OR}... NOT IN ())]` +func buildChildUpdOpForSetNull(ctx *plancontext.PlanningContext, fk vindexes.ChildFKInfo, updStmt *sqlparser.Update, childWhereExpr sqlparser.Expr) (ops.Operator, error) { + // For the SET NULL type constraint, we need to set all the child columns to NULL. + var childUpdateExprs sqlparser.UpdateExprs + for _, column := range fk.ChildColumns { + childUpdateExprs = append(childUpdateExprs, &sqlparser.UpdateExpr{ + Name: sqlparser.NewColName(column.String()), + Expr: &sqlparser.NullVal{}, + }) + } + + // SET NULL cascade should be avoided for the case where the parent columns remains unchanged on the update. + // We need to add a condition to the where clause to handle this case. + // The additional condition looks like [AND ({ IS NULL OR}... NOT IN ())]. + // If any of the parent columns is being set to NULL, then we don't need this condition. + // However, we don't necessarily know on Plan time if the Expr being updated to is NULL or not. Specifically, bindVariables in Prepared statements can be NULL on runtime. + // Therefore, in the condition we create, we also need to make it resilient to NULL values. Therefore we check if each individual value is NULL or not and OR it with the main condition. + // For example, if we are setting `update parent cola = :v1 and colb = :v2`, then on the child, the where condition would look something like this - + // `:v1 IS NULL OR :v2 IS NULL OR (child_cola, child_colb) NOT IN ((:v1,:v2))` + // So, if either of :v1 or :v2 is NULL, then the entire condition is true (which is the same as not having the condition when :v1 or :v2 is NULL). + compExpr := nullSafeNotInComparison(updStmt.Exprs, fk) + if compExpr != nil { + childWhereExpr = &sqlparser.AndExpr{ + Left: childWhereExpr, + Right: compExpr, + } + } + childUpdStmt := &sqlparser.Update{ + Exprs: childUpdateExprs, + TableExprs: []sqlparser.TableExpr{sqlparser.NewAliasedTableExpr(fk.Table.GetTableName(), "")}, + Where: &sqlparser.Where{Type: sqlparser.WhereClause, Expr: childWhereExpr}, + } + return createOpFromStmt(ctx, childUpdStmt, false, "") +} + +// createFKVerifyOp creates the verify operator for the parent foreign key constraints. +func createFKVerifyOp(ctx *plancontext.PlanningContext, childOp ops.Operator, updStmt *sqlparser.Update, parentFks []vindexes.ParentFKInfo, restrictChildFks []vindexes.ChildFKInfo) (ops.Operator, error) { + if len(parentFks) == 0 && len(restrictChildFks) == 0 { + return childOp, nil + } + + var Verify []*VerifyOp + // This validates that new values exists on the parent table. + for _, fk := range parentFks { + op, err := createFkVerifyOpForParentFKForUpdate(ctx, updStmt, fk) + if err != nil { + return nil, err + } + Verify = append(Verify, &VerifyOp{ + Op: op, + Typ: engine.ParentVerify, + }) + } + // This validates that the old values don't exist on the child table. + for _, fk := range restrictChildFks { + op, err := createFkVerifyOpForChildFKForUpdate(ctx, updStmt, fk) + if err != nil { + return nil, err + } + Verify = append(Verify, &VerifyOp{ + Op: op, + Typ: engine.ChildVerify, + }) + } + + return &FkVerify{ + Verify: Verify, + Input: childOp, + }, nil +} + +// Each parent foreign key constraint is verified by an anti join query of the form: +// select 1 from child_tbl left join parent_tbl on +// where and and limit 1 +// E.g: +// Child (c1, c2) references Parent (p1, p2) +// update Child set c1 = 1 where id = 1 +// verify query: +// select 1 from Child left join Parent on Parent.p1 = 1 and Parent.p2 = Child.c2 +// where Parent.p1 is null and Parent.p2 is null and Child.id = 1 +// and Child.c2 is not null +// limit 1 +func createFkVerifyOpForParentFKForUpdate(ctx *plancontext.PlanningContext, updStmt *sqlparser.Update, pFK vindexes.ParentFKInfo) (ops.Operator, error) { + childTblExpr := updStmt.TableExprs[0].(*sqlparser.AliasedTableExpr) + childTbl, err := childTblExpr.TableName() + if err != nil { + return nil, err + } + parentTbl := pFK.Table.GetTableName() + var whereCond sqlparser.Expr + var joinCond sqlparser.Expr + for idx, column := range pFK.ChildColumns { + var matchedExpr *sqlparser.UpdateExpr + for _, updateExpr := range updStmt.Exprs { + if column.Equal(updateExpr.Name.Name) { + matchedExpr = updateExpr + break + } + } + parentIsNullExpr := &sqlparser.IsExpr{ + Left: sqlparser.NewColNameWithQualifier(pFK.ParentColumns[idx].String(), parentTbl), + Right: sqlparser.IsNullOp, + } + var predicate sqlparser.Expr = parentIsNullExpr + var joinExpr sqlparser.Expr + if matchedExpr == nil { + predicate = &sqlparser.AndExpr{ + Left: parentIsNullExpr, + Right: &sqlparser.IsExpr{ + Left: sqlparser.NewColNameWithQualifier(pFK.ChildColumns[idx].String(), childTbl), + Right: sqlparser.IsNotNullOp, + }, + } + joinExpr = &sqlparser.ComparisonExpr{ + Operator: sqlparser.EqualOp, + Left: sqlparser.NewColNameWithQualifier(pFK.ParentColumns[idx].String(), parentTbl), + Right: sqlparser.NewColNameWithQualifier(pFK.ChildColumns[idx].String(), childTbl), + } + } else { + joinExpr = &sqlparser.ComparisonExpr{ + Operator: sqlparser.EqualOp, + Left: sqlparser.NewColNameWithQualifier(pFK.ParentColumns[idx].String(), parentTbl), + Right: prefixColNames(childTbl, matchedExpr.Expr), + } + } + + if idx == 0 { + joinCond, whereCond = joinExpr, predicate + continue + } + joinCond = &sqlparser.AndExpr{Left: joinCond, Right: joinExpr} + whereCond = &sqlparser.AndExpr{Left: whereCond, Right: predicate} + } + // add existing where condition on the update statement + if updStmt.Where != nil { + whereCond = &sqlparser.AndExpr{Left: whereCond, Right: prefixColNames(childTbl, updStmt.Where.Expr)} + } + return createSelectionOp(ctx, + sqlparser.SelectExprs{sqlparser.NewAliasedExpr(sqlparser.NewIntLiteral("1"), "")}, + []sqlparser.TableExpr{ + sqlparser.NewJoinTableExpr( + childTblExpr, + sqlparser.LeftJoinType, + sqlparser.NewAliasedTableExpr(parentTbl, ""), + sqlparser.NewJoinCondition(joinCond, nil)), + }, + sqlparser.NewWhere(sqlparser.WhereClause, whereCond), + sqlparser.NewLimitWithoutOffset(1), + sqlparser.ShareModeLock) +} + +// Each child foreign key constraint is verified by a join query of the form: +// select 1 from child_tbl join parent_tbl on where [AND ({ IS NULL OR}... NOT IN ())] limit 1 +// E.g: +// Child (c1, c2) references Parent (p1, p2) +// update Parent set p1 = 1 where id = 1 +// verify query: +// select 1 from Child join Parent on Parent.p1 = Child.c1 and Parent.p2 = Child.c2 +// where Parent.id = 1 and (1 IS NULL OR (child.c1) NOT IN ((1))) limit 1 +func createFkVerifyOpForChildFKForUpdate(ctx *plancontext.PlanningContext, updStmt *sqlparser.Update, cFk vindexes.ChildFKInfo) (ops.Operator, error) { + // ON UPDATE RESTRICT foreign keys that require validation, should only be allowed in the case where we + // are verifying all the FKs on vtgate level. + if !ctx.VerifyAllFKs { + return nil, vterrors.VT12002() + } + parentTblExpr := updStmt.TableExprs[0].(*sqlparser.AliasedTableExpr) + parentTbl, err := parentTblExpr.TableName() + if err != nil { + return nil, err + } + childTbl := cFk.Table.GetTableName() + var joinCond sqlparser.Expr + for idx := range cFk.ParentColumns { + joinExpr := &sqlparser.ComparisonExpr{ + Operator: sqlparser.EqualOp, + Left: sqlparser.NewColNameWithQualifier(cFk.ParentColumns[idx].String(), parentTbl), + Right: sqlparser.NewColNameWithQualifier(cFk.ChildColumns[idx].String(), childTbl), + } + + if idx == 0 { + joinCond = joinExpr + continue + } + joinCond = &sqlparser.AndExpr{Left: joinCond, Right: joinExpr} + } + + var whereCond sqlparser.Expr + // add existing where condition on the update statement + if updStmt.Where != nil { + whereCond = prefixColNames(parentTbl, updStmt.Where.Expr) + } + + // We don't want to fail the RESTRICT for the case where the parent columns remains unchanged on the update. + // We need to add a condition to the where clause to handle this case. + // The additional condition looks like [AND ({ IS NULL OR}... NOT IN ())]. + // If any of the parent columns is being set to NULL, then we don't need this condition. + // However, we don't necessarily know on Plan time if the Expr being updated to is NULL or not. Specifically, bindVariables in Prepared statements can be NULL on runtime. + // Therefore, in the condition we create, we also need to make it resilient to NULL values. Therefore we check if each individual value is NULL or not and OR it with the main condition. + // For example, if we are setting `update child cola = :v1 and colb = :v2`, then on the parent, the where condition would look something like this - + // `:v1 IS NULL OR :v2 IS NULL OR (cola, colb) NOT IN ((:v1,:v2))` + // So, if either of :v1 or :v2 is NULL, then the entire condition is true (which is the same as not having the condition when :v1 or :v2 is NULL). + compExpr := nullSafeNotInComparison(updStmt.Exprs, cFk) + if compExpr != nil { + whereCond = sqlparser.AndExpressions(whereCond, compExpr) + } + + return createSelectionOp(ctx, + sqlparser.SelectExprs{sqlparser.NewAliasedExpr(sqlparser.NewIntLiteral("1"), "")}, + []sqlparser.TableExpr{ + sqlparser.NewJoinTableExpr( + parentTblExpr, + sqlparser.NormalJoinType, + sqlparser.NewAliasedTableExpr(childTbl, ""), + sqlparser.NewJoinCondition(joinCond, nil)), + }, + sqlparser.NewWhere(sqlparser.WhereClause, whereCond), + sqlparser.NewLimitWithoutOffset(1), + sqlparser.ShareModeLock) +} + +// nullSafeNotInComparison is used to compare the child columns in the foreign key constraint aren't the same as the updateExpressions exactly. +// This comparison has to be null safe so we create an expression which looks like the following for a query like `update child cola = :v1 and colb = :v2` - +// `:v1 IS NULL OR :v2 IS NULL OR (cola, colb) NOT IN ((:v1,:v2))` +// So, if either of :v1 or :v2 is NULL, then the entire condition is true (which is the same as not having the condition when :v1 or :v2 is NULL) +// This expression is used in cascading SET NULLs and in verifying whether an update should be restricted. +func nullSafeNotInComparison(updateExprs sqlparser.UpdateExprs, cFk vindexes.ChildFKInfo) sqlparser.Expr { + var updateValues sqlparser.ValTuple + for _, updateExpr := range updateExprs { + colIdx := cFk.ParentColumns.FindColumn(updateExpr.Name.Name) + if colIdx >= 0 { + if sqlparser.IsNull(updateExpr.Expr) { + return nil + } + updateValues = append(updateValues, updateExpr.Expr) + } + } + // Create a ValTuple of child column names + var valTuple sqlparser.ValTuple + for _, column := range cFk.ChildColumns { + valTuple = append(valTuple, sqlparser.NewColNameWithQualifier(column.String(), cFk.Table.GetTableName())) + } + var finalExpr sqlparser.Expr = sqlparser.NewComparisonExpr(sqlparser.NotInOp, valTuple, sqlparser.ValTuple{updateValues}, nil) + for _, value := range updateValues { + finalExpr = &sqlparser.OrExpr{ + Left: &sqlparser.IsExpr{ + Left: value, + Right: sqlparser.IsNullOp, + }, + Right: finalExpr, + } + } + + return finalExpr } diff --git a/go/vt/vtgate/planbuilder/operators/vindex.go b/go/vt/vtgate/planbuilder/operators/vindex.go index 79104fc7364..51671d4af70 100644 --- a/go/vt/vtgate/planbuilder/operators/vindex.go +++ b/go/vt/vtgate/planbuilder/operators/vindex.go @@ -17,7 +17,7 @@ limitations under the License. package operators import ( - "vitess.io/vitess/go/slices2" + "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" @@ -52,7 +52,7 @@ type ( const VindexUnsupported = "WHERE clause for vindex function must be of the form id = or id in(,...)" // Introduces implements the Operator interface -func (v *Vindex) Introduces() semantics.TableSet { +func (v *Vindex) introducesTableID() semantics.TableSet { return v.Solved } @@ -62,17 +62,21 @@ func (v *Vindex) Clone([]ops.Operator) ops.Operator { return &clone } -func (v *Vindex) AddColumn(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, _, addToGroupBy bool) (ops.Operator, int, error) { - if addToGroupBy { - return nil, 0, vterrors.VT13001("tried to add group by to a table") +func (v *Vindex) AddColumn(ctx *plancontext.PlanningContext, reuse bool, gb bool, ae *sqlparser.AliasedExpr) (int, error) { + if gb { + return 0, vterrors.VT13001("tried to add group by to a table") } - - offset, err := addColumn(ctx, v, expr.Expr) - if err != nil { - return nil, 0, err + if reuse { + offset, err := v.FindCol(ctx, ae.Expr, true) + if err != nil { + return 0, err + } + if offset > -1 { + return offset, nil + } } - return v, offset, nil + return addColumn(ctx, v, ae.Expr) } func colNameToExpr(c *sqlparser.ColName) *sqlparser.AliasedExpr { @@ -82,8 +86,22 @@ func colNameToExpr(c *sqlparser.ColName) *sqlparser.AliasedExpr { } } -func (v *Vindex) GetColumns() ([]*sqlparser.AliasedExpr, error) { - return slices2.Map(v.Columns, colNameToExpr), nil +func (v *Vindex) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) (int, error) { + for idx, col := range v.Columns { + if ctx.SemTable.EqualsExprWithDeps(expr, col) { + return idx, nil + } + } + + return -1, nil +} + +func (v *Vindex) GetColumns(*plancontext.PlanningContext) ([]*sqlparser.AliasedExpr, error) { + return slice.Map(v.Columns, colNameToExpr), nil +} + +func (v *Vindex) GetSelectExprs(ctx *plancontext.PlanningContext) (sqlparser.SelectExprs, error) { + return transformColumnsToSelectExprs(ctx, v) } func (v *Vindex) GetOrdering() ([]ops.OrderBy, error) { @@ -93,6 +111,7 @@ func (v *Vindex) GetOrdering() ([]ops.OrderBy, error) { func (v *Vindex) GetColNames() []*sqlparser.ColName { return v.Columns } + func (v *Vindex) AddCol(col *sqlparser.ColName) { v.Columns = append(v.Columns, col) } @@ -154,10 +173,6 @@ func (v *Vindex) TablesUsed() []string { return []string{v.Table.Table.Name.String()} } -func (v *Vindex) Description() ops.OpDescription { - return ops.OpDescription{OperatorType: "Vindex"} -} - func (v *Vindex) ShortDescription() string { return v.Vindex.String() } diff --git a/go/vt/vtgate/planbuilder/ordered_aggregate.go b/go/vt/vtgate/planbuilder/ordered_aggregate.go index 2c570782c31..6163900e674 100644 --- a/go/vt/vtgate/planbuilder/ordered_aggregate.go +++ b/go/vt/vtgate/planbuilder/ordered_aggregate.go @@ -17,16 +17,8 @@ limitations under the License. package planbuilder import ( - "fmt" - "strconv" - "strings" - - "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" - popcode "vitess.io/vitess/go/vt/vtgate/engine/opcode" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) @@ -60,12 +52,6 @@ var _ logicalPlan = (*orderedAggregate)(nil) // } type orderedAggregate struct { resultsBuilder - extraDistinct *sqlparser.ColName - - // preProcess is true if one of the aggregates needs preprocessing. - preProcess bool - - aggrOnEngine bool // aggregates specifies the aggregation parameters for each // aggregation function: function opcode and input column number. @@ -78,158 +64,11 @@ type orderedAggregate struct { truncateColumnCount int } -// checkAggregates analyzes the select expression for aggregates. If it determines -// that a primitive is needed to handle the aggregation, it builds an orderedAggregate -// primitive and returns it. It returns a groupByHandler if there is aggregation it -// can handle. -func (pb *primitiveBuilder) checkAggregates(sel *sqlparser.Select) error { - rb, isRoute := pb.plan.(*route) - if isRoute && rb.isSingleShard() { - // since we can push down all of the aggregation to the route, - // we don't need to do anything else here - return nil - } - - // Check if we can allow aggregates. - hasAggregates := sqlparser.ContainsAggregation(sel.SelectExprs) || len(sel.GroupBy) > 0 - if !hasAggregates && !sel.Distinct { - return nil - } - - // The query has aggregates. We can proceed only - // if the underlying primitive is a route because - // we need the ability to push down group by and - // order by clauses. - if !isRoute { - if hasAggregates { - return vterrors.VT12001("cross-shard query with aggregates") - } - pb.plan = newDistinctV3(pb.plan) - return nil - } - - // If there is a distinct clause, we can check the select list - // to see if it has a unique vindex reference. For example, - // if the query was 'select distinct id, col from t' (with id - // as a unique vindex), then the distinct operation can be - // safely pushed down because the unique vindex guarantees - // that each id can only be in a single shard. Without the - // unique vindex property, the id could come from multiple - // shards, which will require us to perform the grouping - // at the vtgate level. - if sel.Distinct { - for _, selectExpr := range sel.SelectExprs { - switch selectExpr := selectExpr.(type) { - case *sqlparser.AliasedExpr: - vindex := pb.st.Vindex(selectExpr.Expr, rb) - if vindex != nil && vindex.IsUnique() { - return nil - } - } - } - } - - // The group by clause could also reference a unique vindex. The above - // example could itself have been written as - // 'select id, col from t group by id, col', or a query could be like - // 'select id, count(*) from t group by id'. In the above cases, - // the grouping can be done at the shard level, which allows the entire query - // to be pushed down. In order to perform this analysis, we're going to look - // ahead at the group by clause to see if it references a unique vindex. - if pb.groupByHasUniqueVindex(sel, rb) { - return nil - } - - // We need an aggregator primitive. - oa := &orderedAggregate{} - oa.resultsBuilder = newResultsBuilder(rb, oa) - pb.plan = oa - pb.plan.Reorder(0) - return nil -} - -// groupbyHasUniqueVindex looks ahead at the group by expression to see if -// it references a unique vindex. -// -// The vitess group by rules are different from MySQL because it's not possible -// to match the MySQL behavior without knowing the schema. For example: -// 'select id as val from t group by val' will have different interpretations -// under MySQL depending on whether t has a val column or not. -// In vitess, we always assume that 'val' references 'id'. This is achieved -// by the symbol table resolving against the select list before searching -// the tables. -// -// In order to look ahead, we have to overcome the chicken-and-egg problem: -// group by needs the select aliases to be built. Select aliases are built -// on push-down. But push-down decision depends on whether group by expressions -// reference a vindex. -// To overcome this, the look-ahead has to perform a search that matches -// the group by analyzer. The flow is similar to oa.PushGroupBy, except that -// we don't search the ResultColumns because they're not created yet. Also, -// error conditions are treated as no match for simplicity; They will be -// subsequently caught downstream. -func (pb *primitiveBuilder) groupByHasUniqueVindex(sel *sqlparser.Select, rb *route) bool { - for _, expr := range sel.GroupBy { - var matchedExpr sqlparser.Expr - switch node := expr.(type) { - case *sqlparser.ColName: - if expr := findAlias(node, sel.SelectExprs); expr != nil { - matchedExpr = expr - } else { - matchedExpr = node - } - case *sqlparser.Literal: - if node.Type != sqlparser.IntVal { - continue - } - num, err := strconv.ParseInt(string(node.Val), 0, 64) - if err != nil { - continue - } - if num < 1 || num > int64(len(sel.SelectExprs)) { - continue - } - expr, ok := sel.SelectExprs[num-1].(*sqlparser.AliasedExpr) - if !ok { - continue - } - matchedExpr = expr.Expr - default: - continue - } - vindex := pb.st.Vindex(matchedExpr, rb) - if vindex != nil && vindex.IsUnique() { - return true - } - } - return false -} - -func findAlias(colname *sqlparser.ColName, selects sqlparser.SelectExprs) sqlparser.Expr { - // Qualified column names cannot match an (unqualified) alias. - if !colname.Qualifier.IsEmpty() { - return nil - } - // See if this references an alias. - for _, selectExpr := range selects { - selectExpr, ok := selectExpr.(*sqlparser.AliasedExpr) - if !ok { - continue - } - if colname.Name.Equal(selectExpr.As) { - return selectExpr.Expr - } - } - return nil -} - // Primitive implements the logicalPlan interface func (oa *orderedAggregate) Primitive() engine.Primitive { input := oa.input.Primitive() if len(oa.groupByKeys) == 0 { return &engine.ScalarAggregate{ - PreProcess: oa.preProcess, - AggrOnEngine: oa.aggrOnEngine, Aggregates: oa.aggregates, TruncateColumnCount: oa.truncateColumnCount, Input: input, @@ -237,8 +76,6 @@ func (oa *orderedAggregate) Primitive() engine.Primitive { } return &engine.OrderedAggregate{ - PreProcess: oa.preProcess, - AggrOnEngine: oa.aggrOnEngine, Aggregates: oa.aggregates, GroupByKeys: oa.groupByKeys, TruncateColumnCount: oa.truncateColumnCount, @@ -246,138 +83,8 @@ func (oa *orderedAggregate) Primitive() engine.Primitive { } } -func (oa *orderedAggregate) pushAggr(pb *primitiveBuilder, expr *sqlparser.AliasedExpr, origin logicalPlan) (rc *resultColumn, colNumber int, err error) { - aggrFunc, _ := expr.Expr.(sqlparser.AggrFunc) - origOpcode := popcode.SupportedAggregates[strings.ToLower(aggrFunc.AggrName())] - opcode := origOpcode - if aggrFunc.GetArgs() != nil && - len(aggrFunc.GetArgs()) != 1 { - return nil, 0, vterrors.VT12001(fmt.Sprintf("only one expression is allowed inside aggregates: %s", sqlparser.String(expr))) - } - - handleDistinct, innerAliased, err := oa.needDistinctHandling(pb, expr, opcode) - if err != nil { - return nil, 0, err - } - if handleDistinct { - if oa.extraDistinct != nil { - return nil, 0, vterrors.VT12001(fmt.Sprintf("only one DISTINCT aggregation allowed in a SELECT: %s", sqlparser.String(expr))) - } - // Push the expression that's inside the aggregate. - // The column will eventually get added to the group by and order by clauses. - newBuilder, _, innerCol, err := planProjection(pb, oa.input, innerAliased, origin) - if err != nil { - return nil, 0, err - } - pb.plan = newBuilder - col, err := BuildColName(oa.input.ResultColumns(), innerCol) - if err != nil { - return nil, 0, err - } - oa.extraDistinct = col - oa.preProcess = true - switch opcode { - case popcode.AggregateCount: - opcode = popcode.AggregateCountDistinct - case popcode.AggregateSum: - opcode = popcode.AggregateSumDistinct - } - oa.aggregates = append(oa.aggregates, &engine.AggregateParams{ - Opcode: opcode, - Col: innerCol, - Alias: expr.ColumnName(), - OrigOpcode: origOpcode, - }) - } else { - newBuilder, _, innerCol, err := planProjection(pb, oa.input, expr, origin) - if err != nil { - return nil, 0, err - } - pb.plan = newBuilder - oa.aggregates = append(oa.aggregates, &engine.AggregateParams{ - Opcode: opcode, - Col: innerCol, - OrigOpcode: origOpcode, - }) - } - - // Build a new rc with oa as origin because it's semantically different - // from the expression we pushed down. - rc = newResultColumn(expr, oa) - oa.resultColumns = append(oa.resultColumns, rc) - return rc, len(oa.resultColumns) - 1, nil -} - -// needDistinctHandling returns true if oa needs to handle the distinct clause. -// If true, it will also return the aliased expression that needs to be pushed -// down into the underlying route. -func (oa *orderedAggregate) needDistinctHandling(pb *primitiveBuilder, expr *sqlparser.AliasedExpr, opcode popcode.AggregateOpcode) (bool, *sqlparser.AliasedExpr, error) { - var innerAliased *sqlparser.AliasedExpr - aggr, ok := expr.Expr.(sqlparser.AggrFunc) - - if !ok { - return false, nil, vterrors.VT03012(sqlparser.String(expr)) - } - - if !aggr.IsDistinct() { - return false, nil, nil - } - if opcode != popcode.AggregateCount && opcode != popcode.AggregateSum && opcode != popcode.AggregateCountStar { - return false, nil, nil - } - - innerAliased = &sqlparser.AliasedExpr{Expr: aggr.GetArg()} - - rb, ok := oa.input.(*route) - if !ok { - // Unreachable - return true, innerAliased, nil - } - vindex := pb.st.Vindex(innerAliased.Expr, rb) - if vindex != nil && vindex.IsUnique() { - return false, nil, nil - } - return true, innerAliased, nil -} - -// Wireup implements the logicalPlan interface -// If text columns are detected in the keys, then the function modifies -// the primitive to pull a corresponding weight_string from mysql and -// compare those instead. This is because we currently don't have the -// ability to mimic mysql's collation behavior. -func (oa *orderedAggregate) Wireup(plan logicalPlan, jt *jointab) error { - for i, gbk := range oa.groupByKeys { - rc := oa.resultColumns[gbk.KeyCol] - if sqltypes.IsText(rc.column.typ) { - weightcolNumber, err := oa.input.SupplyWeightString(gbk.KeyCol, gbk.FromGroupBy) - if err != nil { - _, isUnsupportedErr := err.(UnsupportedSupplyWeightString) - if isUnsupportedErr { - continue - } - return err - } - oa.weightStrings[rc] = weightcolNumber - oa.groupByKeys[i].WeightStringCol = weightcolNumber - oa.groupByKeys[i].KeyCol = weightcolNumber - oa.truncateColumnCount = len(oa.resultColumns) - } - } - for _, key := range oa.aggregates { - switch key.Opcode { - case popcode.AggregateCount: - if key.Alias == "" { - key.Alias = key.Opcode.String() - } - key.Opcode = popcode.AggregateSum - } - } - - return oa.input.Wireup(plan, jt) -} - -func (oa *orderedAggregate) WireupGen4(ctx *plancontext.PlanningContext) error { - return oa.input.WireupGen4(ctx) +func (oa *orderedAggregate) Wireup(ctx *plancontext.PlanningContext) error { + return oa.input.Wireup(ctx) } // OutputColumns implements the logicalPlan interface diff --git a/go/vt/vtgate/planbuilder/ordering.go b/go/vt/vtgate/planbuilder/ordering.go deleted file mode 100644 index 2a8613620e7..00000000000 --- a/go/vt/vtgate/planbuilder/ordering.go +++ /dev/null @@ -1,356 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "fmt" - - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/engine" -) - -type v3Order struct { - *sqlparser.Order - fromGroupBy bool -} - -type v3OrderBy []*v3Order - -func planOrdering(pb *primitiveBuilder, input logicalPlan, orderBy v3OrderBy) (logicalPlan, error) { - switch node := input.(type) { - case *simpleProjection, *vindexFunc: - if len(orderBy) == 0 { - return node, nil - } - return newMemorySort(node, orderBy) - case *distinct: - // TODO: this is weird, but needed - newInput, err := planOrdering(pb, node.input, orderBy) - node.input = newInput - return node, err - case *pulloutSubquery: - plan, err := planOrdering(pb, node.underlying, orderBy) - if err != nil { - return nil, err - } - node.underlying = plan - return node, nil - case *route: - return planRouteOrdering(orderBy, node) - case *join: - return planJoinOrdering(pb, orderBy, node) - case *orderedAggregate: - return planOAOrdering(pb, orderBy, node) - case *mergeSort: - return nil, vterrors.VT12001("ORDER BY on top of ORDER BY") - case *concatenate: - if len(orderBy) == 0 { - return input, nil - } - return nil, vterrors.VT12001("ORDER BY on top of UNION") - } - return nil, vterrors.VT13001(fmt.Sprintf("unreachable %T.ordering", input)) -} - -func planOAOrdering(pb *primitiveBuilder, orderBy v3OrderBy, oa *orderedAggregate) (logicalPlan, error) { - // The requested order must be such that the ordering can be done - // before the group by, which will allow us to push it down to the - // route. This is actually true in most use cases, except for situations - // where ordering is requested on values of an aggregate result. - // Such constructs will need to be handled by a separate 'Sorter' - // primitive, after aggregation is done. For example, the following - // constructs are allowed: - // 'select a, b, count(*) from t group by a, b order by a desc, b asc' - // 'select a, b, count(*) from t group by a, b order by b' - // The following construct is not allowed: - // 'select a, count(*) from t group by a order by count(*)' - // Treat order by null as nil order by. - if len(orderBy) == 1 { - if _, ok := orderBy[0].Expr.(*sqlparser.NullVal); ok { - orderBy = nil - } - } - - // referenced tracks the keys referenced by the order by clause. - referenced := make([]bool, len(oa.groupByKeys)) - postSort := false - selOrderBy := make(v3OrderBy, 0, len(orderBy)) - for _, order := range orderBy { - // Identify the order by column. - var orderByCol *column - switch expr := order.Expr.(type) { - case *sqlparser.Literal: - num, err := ResultFromNumber(oa.resultColumns, expr, "order clause") - if err != nil { - return nil, err - } - orderByCol = oa.resultColumns[num].column - case *sqlparser.ColName: - orderByCol = expr.Metadata.(*column) - case *sqlparser.CastExpr: - col, ok := expr.Expr.(*sqlparser.ColName) - if !ok { - return nil, complexOrderBy(sqlparser.String(expr)) - } - orderByCol = col.Metadata.(*column) - case *sqlparser.ConvertExpr: - col, ok := expr.Expr.(*sqlparser.ColName) - if !ok { - return nil, complexOrderBy(sqlparser.String(expr)) - } - orderByCol = col.Metadata.(*column) - default: - return nil, complexOrderBy(sqlparser.String(expr)) - } - - // Match orderByCol against the group by columns. - found := false - for j, groupBy := range oa.groupByKeys { - if oa.resultColumns[groupBy.KeyCol].column != orderByCol { - continue - } - - found = true - referenced[j] = true - order.fromGroupBy = groupBy.FromGroupBy - selOrderBy = append(selOrderBy, order) - break - } - if !found { - postSort = true - } - } - - // Append any unreferenced keys at the end of the order by. - for i, groupByKey := range oa.groupByKeys { - if referenced[i] { - continue - } - // Build a brand new reference for the key. - col, err := BuildColName(oa.input.ResultColumns(), groupByKey.KeyCol) - if err != nil { - return nil, vterrors.Wrapf(err, "generating ORDER BY clause") - } - selOrderBy = append(selOrderBy, &v3Order{ - Order: &sqlparser.Order{Expr: col, Direction: sqlparser.AscOrder}, - fromGroupBy: groupByKey.FromGroupBy, - }) - } - - // Append the distinct aggregate if any. - if oa.extraDistinct != nil { - selOrderBy = append(selOrderBy, &v3Order{ - Order: &sqlparser.Order{Expr: oa.extraDistinct, Direction: sqlparser.AscOrder}, - fromGroupBy: true, - }) - } - - // Push down the order by. - // It's ok to push the original AST down because all references - // should point to the route. Only aggregate functions are originated - // by node, and we currently don't allow the ORDER BY to reference them. - plan, err := planOrdering(pb, oa.input, selOrderBy) - if err != nil { - return nil, err - } - oa.input = plan - if postSort { - return newMemorySort(oa, orderBy) - } - return oa, nil -} - -func planJoinOrdering(pb *primitiveBuilder, orderBy v3OrderBy, node *join) (logicalPlan, error) { - isSpecial := false - switch len(orderBy) { - case 0: - isSpecial = true - case 1: - if _, ok := orderBy[0].Expr.(*sqlparser.NullVal); ok { - isSpecial = true - } else if f, ok := orderBy[0].Expr.(*sqlparser.FuncExpr); ok { - if f.Name.Lowered() == "rand" { - isSpecial = true - } - } - } - if isSpecial { - l, err := planOrdering(pb, node.Left, orderBy) - if err != nil { - return nil, err - } - node.Left = l - r, err := planOrdering(pb, node.Right, orderBy) - if err != nil { - return nil, err - } - node.Right = r - return node, nil - } - - for _, order := range orderBy { - if e, ok := order.Expr.(*sqlparser.Literal); ok { - // This block handles constructs that use ordinals for 'ORDER BY'. For example: - // SELECT a, b, c FROM t1, t2 ORDER BY 1, 2, 3. - num, err := ResultFromNumber(node.ResultColumns(), e, "order clause") - if err != nil { - return nil, err - } - if node.ResultColumns()[num].column.Origin().Order() > node.Left.Order() { - return newMemorySort(node, orderBy) - } - } else { - // Analyze column references within the expression to make sure they all - // go to the left. - err := sqlparser.Walk(func(in sqlparser.SQLNode) (kontinue bool, err error) { - switch e := in.(type) { - case *sqlparser.ColName: - if e.Metadata.(*column).Origin().Order() > node.Left.Order() { - return false, vterrors.VT12001("ORDER BY spans across shards") - } - case *sqlparser.Subquery: - // Unreachable because ResolveSymbols perfoms this check up above. - return false, vterrors.VT12001("ORDER BY has subquery") - } - return true, nil - }, order.Expr) - if err != nil { - return newMemorySort(node, orderBy) - } - } - } - - // There were no errors. We can push the order by to the left-most route. - l, err := planOrdering(pb, node.Left, orderBy) - if err != nil { - return nil, err - } - node.Left = l - // Still need to push an empty order by to the right. - r, err := planOrdering(pb, node.Right, nil) - if err != nil { - return nil, err - } - node.Right = r - return node, nil -} - -func planRouteOrdering(orderBy v3OrderBy, node *route) (logicalPlan, error) { - switch len(orderBy) { - case 0: - return node, nil - case 1: - isSpecial := false - if _, ok := orderBy[0].Expr.(*sqlparser.NullVal); ok { - isSpecial = true - } else if f, ok := orderBy[0].Expr.(*sqlparser.FuncExpr); ok { - if f.Name.Lowered() == "rand" { - isSpecial = true - } - } - if isSpecial { - node.Select.AddOrder(orderBy[0].Order) - return node, nil - } - } - - if node.isSingleShard() { - for _, order := range orderBy { - node.Select.AddOrder(order.Order) - } - return node, nil - } - - // If it's a scatter, we have to populate the OrderBy field. - for _, order := range orderBy { - colNumber := -1 - switch expr := order.Expr.(type) { - case *sqlparser.Literal: - var err error - if colNumber, err = ResultFromNumber(node.resultColumns, expr, "order clause"); err != nil { - return nil, err - } - case *sqlparser.ColName: - c := expr.Metadata.(*column) - for i, rc := range node.resultColumns { - if rc.column == c { - colNumber = i - break - } - } - case *sqlparser.UnaryExpr: - col, ok := expr.Expr.(*sqlparser.ColName) - if !ok { - return nil, complexOrderBy(sqlparser.String(expr)) - } - c := col.Metadata.(*column) - for i, rc := range node.resultColumns { - if rc.column == c { - colNumber = i - break - } - } - default: - return nil, complexOrderBy(sqlparser.String(expr)) - } - // If column is not found, then the order by is referencing - // a column that's not on the select list. - if colNumber == -1 { - return nil, vterrors.VT12001(fmt.Sprintf("in scatter query: ORDER BY must reference a column in the SELECT list: %s", sqlparser.String(order))) - } - starColFixedIndex := colNumber - if selectStatement, ok := node.Select.(*sqlparser.Select); ok { - for i, selectExpr := range selectStatement.SelectExprs { - if starExpr, ok := selectExpr.(*sqlparser.StarExpr); ok { - if i < colNumber { - tableName := starExpr.TableName - tableMap := node.resultColumns[i].column.st.tables - var tableMeta *table - if tableName.IsEmpty() && len(tableMap) == 1 { - for j := range tableMap { - tableMeta = tableMap[j] - } - } else { - tableMeta = tableMap[tableName] - } - if tableMeta == nil || !tableMeta.isAuthoritative { - return nil, vterrors.VT12001("in scatter query, cannot ORDER BY a column that comes after `*` expressions in the SELECT list") - } - starColFixedIndex += len(tableMeta.columnNames) - 1 - } - } - } - } - - // TODO(king-11) pass in collation here - ob := engine.OrderByParams{ - Col: colNumber, - WeightStringCol: -1, - Desc: order.Direction == sqlparser.DescOrder, - StarColFixedIndex: starColFixedIndex, - FromGroupBy: order.fromGroupBy, - CollationID: collations.Unknown, - } - node.eroute.OrderBy = append(node.eroute.OrderBy, ob) - - node.Select.AddOrder(order.Order) - } - return newMergeSort(node), nil -} - -func complexOrderBy(s string) error { - return vterrors.VT12001(fmt.Sprintf("in scatter query: complex ORDER BY expression: %s", s)) -} diff --git a/go/vt/vtgate/planbuilder/other_read.go b/go/vt/vtgate/planbuilder/other_read.go index f9666ef0e4d..1f1c1a1a6ba 100644 --- a/go/vt/vtgate/planbuilder/other_read.go +++ b/go/vt/vtgate/planbuilder/other_read.go @@ -35,7 +35,7 @@ func buildOtherReadAndAdmin(sql string, vschema plancontext.VSchema) (*planResul return newPlanResult(&engine.Send{ Keyspace: keyspace, TargetDestination: destination, - Query: sql, //This is original sql query to be passed as the parser can provide partial ddl AST. + Query: sql, // This is original sql query to be passed as the parser can provide partial ddl AST. SingleShardOnly: true, }), nil } diff --git a/go/vt/vtgate/planbuilder/plan_test.go b/go/vt/vtgate/planbuilder/plan_test.go index e064620f5c8..38c579502fe 100644 --- a/go/vt/vtgate/planbuilder/plan_test.go +++ b/go/vt/vtgate/planbuilder/plan_test.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Vitess Authors. +Copyright 2021 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,919 +17,68 @@ limitations under the License. package planbuilder import ( - "bytes" - "context" - "encoding/json" - "fmt" - "math/rand" - "os" - "path/filepath" - "runtime/debug" - "strings" "testing" - "github.com/nsf/jsondiff" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/test/utils" - "vitess.io/vitess/go/vt/key" - querypb "vitess.io/vitess/go/vt/proto/query" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - vschemapb "vitess.io/vitess/go/vt/proto/vschema" - vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/topo/memorytopo" - "vitess.io/vitess/go/vt/topo/topoproto" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/engine" - oprewriters "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" ) -func makeTestOutput(t *testing.T) string { - testOutputTempDir := utils.MakeTestOutput(t, "testdata", "plan_test") - - return testOutputTempDir -} - -func TestPlan(t *testing.T) { - vschemaWrapper := &vschemaWrapper{ - v: loadSchema(t, "vschemas/schema.json", true), - tabletType: topodatapb.TabletType_PRIMARY, - sysVarEnabled: true, - } - testOutputTempDir := makeTestOutput(t) - - // You will notice that some tests expect user.Id instead of user.id. - // This is because we now pre-create vindex columns in the symbol - // table, which come from vschema. In the test vschema, - // the column is named as Id. This is to make sure that - // column names are case-preserved, but treated as - // case-insensitive even if they come from the vschema. - testFile(t, "aggr_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "dml_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "from_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "filter_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "postprocess_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "select_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "symtab_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "unsupported_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "vindex_func_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "wireup_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "memory_sort_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "use_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "set_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "union_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "large_union_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "transaction_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "lock_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "large_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "ddl_cases_no_default_keyspace.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "flush_cases_no_default_keyspace.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "show_cases_no_default_keyspace.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "stream_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "info_schema80_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "reference_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "vexplain_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "misc_cases.json", testOutputTempDir, vschemaWrapper, false) -} - -func TestSystemTables57(t *testing.T) { - // first we move everything to use 5.7 logic - servenv.SetMySQLServerVersionForTest("5.7") - defer servenv.SetMySQLServerVersionForTest("") - vschemaWrapper := &vschemaWrapper{v: loadSchema(t, "vschemas/schema.json", true)} - testOutputTempDir := makeTestOutput(t) - testFile(t, "info_schema57_cases.json", testOutputTempDir, vschemaWrapper, false) -} - -func TestSysVarSetDisabled(t *testing.T) { - vschemaWrapper := &vschemaWrapper{ - v: loadSchema(t, "vschemas/schema.json", true), - sysVarEnabled: false, - } - - testFile(t, "set_sysvar_disabled_cases.json", makeTestOutput(t), vschemaWrapper, false) -} - -func TestViews(t *testing.T) { - vschemaWrapper := &vschemaWrapper{ - v: loadSchema(t, "vschemas/schema.json", true), - enableViews: true, - } - - testFile(t, "view_cases.json", makeTestOutput(t), vschemaWrapper, false) -} - -func TestOne(t *testing.T) { - oprewriters.DebugOperatorTree = true - vschema := &vschemaWrapper{ - v: loadSchema(t, "vschemas/schema.json", true), - } - - testFile(t, "onecase.json", "", vschema, false) -} - -func TestOneTPCC(t *testing.T) { - vschema := &vschemaWrapper{ - v: loadSchema(t, "vschemas/tpcc_schema.json", true), - } - - testFile(t, "onecase.json", "", vschema, false) -} - -func TestOneWithMainAsDefault(t *testing.T) { - vschema := &vschemaWrapper{ - v: loadSchema(t, "vschemas/schema.json", true), - keyspace: &vindexes.Keyspace{ - Name: "main", - Sharded: false, - }, - } - - testFile(t, "onecase.json", "", vschema, false) -} - -func TestOneWithSecondUserAsDefault(t *testing.T) { - vschema := &vschemaWrapper{ - v: loadSchema(t, "vschemas/schema.json", true), - keyspace: &vindexes.Keyspace{ - Name: "second_user", - Sharded: true, - }, - } - - testFile(t, "onecase.json", "", vschema, false) -} - -func TestOneWithUserAsDefault(t *testing.T) { - vschema := &vschemaWrapper{ - v: loadSchema(t, "vschemas/schema.json", true), - keyspace: &vindexes.Keyspace{ - Name: "user", - Sharded: true, - }, - } - - testFile(t, "onecase.json", "", vschema, false) -} - -func TestOneWithTPCHVSchema(t *testing.T) { - vschema := &vschemaWrapper{ - v: loadSchema(t, "vschemas/tpch_schema.json", true), - sysVarEnabled: true, - } - - testFile(t, "onecase.json", "", vschema, false) -} - -func TestOneWith57Version(t *testing.T) { - // first we move everything to use 5.7 logic - servenv.SetMySQLServerVersionForTest("5.7") - defer servenv.SetMySQLServerVersionForTest("") - vschema := &vschemaWrapper{v: loadSchema(t, "vschemas/schema.json", true)} - - testFile(t, "onecase.json", "", vschema, false) -} - -func TestRubyOnRailsQueries(t *testing.T) { - vschemaWrapper := &vschemaWrapper{ - v: loadSchema(t, "vschemas/rails_schema.json", true), - sysVarEnabled: true, - } - - testFile(t, "rails_cases.json", makeTestOutput(t), vschemaWrapper, false) -} - -func TestOLTP(t *testing.T) { - vschemaWrapper := &vschemaWrapper{ - v: loadSchema(t, "vschemas/oltp_schema.json", true), - sysVarEnabled: true, - } - - testFile(t, "oltp_cases.json", makeTestOutput(t), vschemaWrapper, false) -} - -func TestTPCC(t *testing.T) { - vschemaWrapper := &vschemaWrapper{ - v: loadSchema(t, "vschemas/tpcc_schema.json", true), - sysVarEnabled: true, - } - - testFile(t, "tpcc_cases.json", makeTestOutput(t), vschemaWrapper, false) -} - -func TestTPCH(t *testing.T) { - vschemaWrapper := &vschemaWrapper{ - v: loadSchema(t, "vschemas/tpch_schema.json", true), - sysVarEnabled: true, - } - - testFile(t, "tpch_cases.json", makeTestOutput(t), vschemaWrapper, false) -} - -func BenchmarkOLTP(b *testing.B) { - benchmarkWorkload(b, "oltp") -} - -func BenchmarkTPCC(b *testing.B) { - benchmarkWorkload(b, "tpcc") -} - -func BenchmarkTPCH(b *testing.B) { - benchmarkWorkload(b, "tpch") -} - -func benchmarkWorkload(b *testing.B, name string) { - vschemaWrapper := &vschemaWrapper{ - v: loadSchema(b, "vschemas/"+name+"_schema.json", true), - sysVarEnabled: true, - } - - testCases := readJSONTests(name + "_cases.json") - b.ResetTimer() - for _, version := range plannerVersions { - b.Run(version.String(), func(b *testing.B) { - benchmarkPlanner(b, version, testCases, vschemaWrapper) - }) - } -} - -func TestBypassPlanningShardTargetFromFile(t *testing.T) { - vschema := &vschemaWrapper{ - v: loadSchema(t, "vschemas/schema.json", true), - keyspace: &vindexes.Keyspace{ - Name: "main", - Sharded: false, - }, - tabletType: topodatapb.TabletType_PRIMARY, - dest: key.DestinationShard("-80")} - - testFile(t, "bypass_shard_cases.json", makeTestOutput(t), vschema, false) -} - -func TestBypassPlanningKeyrangeTargetFromFile(t *testing.T) { - keyRange, _ := key.ParseShardingSpec("-") - - vschema := &vschemaWrapper{ - v: loadSchema(t, "vschemas/schema.json", true), - keyspace: &vindexes.Keyspace{ - Name: "main", - Sharded: false, - }, - tabletType: topodatapb.TabletType_PRIMARY, - dest: key.DestinationExactKeyRange{KeyRange: keyRange[0]}, - } - - testFile(t, "bypass_keyrange_cases.json", makeTestOutput(t), vschema, false) -} - -func TestWithDefaultKeyspaceFromFile(t *testing.T) { - // We are testing this separately so we can set a default keyspace - vschema := &vschemaWrapper{ - v: loadSchema(t, "vschemas/schema.json", true), - keyspace: &vindexes.Keyspace{ - Name: "main", - Sharded: false, - }, - tabletType: topodatapb.TabletType_PRIMARY, - } - ts := memorytopo.NewServer("cell1") - ts.CreateKeyspace(context.Background(), "main", &topodatapb.Keyspace{}) - ts.CreateKeyspace(context.Background(), "user", &topodatapb.Keyspace{}) - // Create a cache to use for lookups of the sidecar database identifier - // in use by each keyspace. - _, created := sidecardb.NewIdentifierCache(func(ctx context.Context, keyspace string) (string, error) { - ki, err := ts.GetKeyspace(ctx, keyspace) - if err != nil { - return "", err - } - return ki.SidecarDbName, nil - }) - require.True(t, created) - - testOutputTempDir := makeTestOutput(t) - testFile(t, "alterVschema_cases.json", testOutputTempDir, vschema, false) - testFile(t, "ddl_cases.json", testOutputTempDir, vschema, false) - testFile(t, "migration_cases.json", testOutputTempDir, vschema, false) - testFile(t, "flush_cases.json", testOutputTempDir, vschema, false) - testFile(t, "show_cases.json", testOutputTempDir, vschema, false) - testFile(t, "call_cases.json", testOutputTempDir, vschema, false) -} - -func TestWithDefaultKeyspaceFromFileSharded(t *testing.T) { - // We are testing this separately so we can set a default keyspace - vschema := &vschemaWrapper{ - v: loadSchema(t, "vschemas/schema.json", true), - keyspace: &vindexes.Keyspace{ - Name: "second_user", - Sharded: true, - }, - tabletType: topodatapb.TabletType_PRIMARY, - } - - testOutputTempDir := makeTestOutput(t) - testFile(t, "select_cases_with_default.json", testOutputTempDir, vschema, false) -} - -func TestWithUserDefaultKeyspaceFromFileSharded(t *testing.T) { - // We are testing this separately so we can set a default keyspace - vschema := &vschemaWrapper{ - v: loadSchema(t, "vschemas/schema.json", true), - keyspace: &vindexes.Keyspace{ - Name: "user", - Sharded: true, - }, - tabletType: topodatapb.TabletType_PRIMARY, - } - - testOutputTempDir := makeTestOutput(t) - testFile(t, "select_cases_with_user_as_default.json", testOutputTempDir, vschema, false) -} - -func TestWithSystemSchemaAsDefaultKeyspace(t *testing.T) { - // We are testing this separately so we can set a default keyspace - vschema := &vschemaWrapper{ - v: loadSchema(t, "vschemas/schema.json", true), - keyspace: &vindexes.Keyspace{Name: "information_schema"}, - tabletType: topodatapb.TabletType_PRIMARY, - } - - testFile(t, "sysschema_default.json", makeTestOutput(t), vschema, false) -} - -func TestOtherPlanningFromFile(t *testing.T) { - // We are testing this separately so we can set a default keyspace - vschema := &vschemaWrapper{ - v: loadSchema(t, "vschemas/schema.json", true), - keyspace: &vindexes.Keyspace{ - Name: "main", - Sharded: false, - }, - tabletType: topodatapb.TabletType_PRIMARY, - } - - testOutputTempDir := makeTestOutput(t) - testFile(t, "other_read_cases.json", testOutputTempDir, vschema, false) - testFile(t, "other_admin_cases.json", testOutputTempDir, vschema, false) -} - -func loadSchema(t testing.TB, filename string, setCollation bool) *vindexes.VSchema { - formal, err := vindexes.LoadFormal(locateFile(filename)) - if err != nil { - t.Fatal(err) - } - vschema := vindexes.BuildVSchema(formal) - if err != nil { - t.Fatal(err) - } - for _, ks := range vschema.Keyspaces { - if ks.Error != nil { - t.Fatal(ks.Error) - } - - // adding view in user keyspace - if ks.Keyspace.Name == "user" { - if err = vschema.AddView(ks.Keyspace.Name, - "user_details_view", - "select user.id, user_extra.col from user join user_extra on user.id = user_extra.user_id"); err != nil { - t.Fatal(err) - } - } - - // setting a default value to all the text columns in the tables of this keyspace - // so that we can "simulate" a real case scenario where the vschema is aware of - // columns' collations. - if setCollation { - for _, table := range ks.Tables { - for i, col := range table.Columns { - if sqltypes.IsText(col.Type) { - table.Columns[i].CollationName = "latin1_swedish_ci" - } - } - } - } - } - return vschema -} - -var _ plancontext.VSchema = (*vschemaWrapper)(nil) - -type vschemaWrapper struct { - v *vindexes.VSchema - keyspace *vindexes.Keyspace - tabletType topodatapb.TabletType - dest key.Destination - sysVarEnabled bool - version plancontext.PlannerVersion - enableViews bool -} - -func (vw *vschemaWrapper) GetPrepareData(stmtName string) *vtgatepb.PrepareData { - switch stmtName { - case "prep_one_param": - return &vtgatepb.PrepareData{ - PrepareStatement: "select 1 from user where id = :v1", - ParamsCount: 1, - } - case "prep_in_param": - return &vtgatepb.PrepareData{ - PrepareStatement: "select 1 from user where id in (:v1, :v2)", - ParamsCount: 2, - } - case "prep_no_param": - return &vtgatepb.PrepareData{ - PrepareStatement: "select 1 from user", - ParamsCount: 0, - } - } - return nil -} - -func (vw *vschemaWrapper) PlanPrepareStatement(ctx context.Context, query string) (*engine.Plan, sqlparser.Statement, error) { - plan, err := TestBuilder(query, vw, vw.currentDb()) - if err != nil { - return nil, nil, err - } - stmt, _, err := sqlparser.Parse2(query) - if err != nil { - return nil, nil, err - } - return plan, stmt, nil -} - -func (vw *vschemaWrapper) ClearPrepareData(lowered string) { -} - -func (vw *vschemaWrapper) StorePrepareData(string, *vtgatepb.PrepareData) {} - -func (vw *vschemaWrapper) GetUDV(name string) *querypb.BindVariable { - if strings.EqualFold(name, "prep_stmt") { - return sqltypes.StringBindVariable("select * from user where id in (?, ?, ?)") - } - return nil -} - -func (vw *vschemaWrapper) IsShardRoutingEnabled() bool { - return false -} - -func (vw *vschemaWrapper) GetVSchema() *vindexes.VSchema { - return vw.v -} - -func (vw *vschemaWrapper) GetSrvVschema() *vschemapb.SrvVSchema { - return &vschemapb.SrvVSchema{ - Keyspaces: map[string]*vschemapb.Keyspace{ - "user": { - Sharded: true, - Vindexes: map[string]*vschemapb.Vindex{}, - Tables: map[string]*vschemapb.Table{ - "user": {}, - }, +func TestBindingSubquery(t *testing.T) { + testcases := []struct { + query string + requiredTableSet semantics.TableSet + extractor func(p *sqlparser.Select) sqlparser.Expr + rewrite bool + }{ + { + query: "select (select col from tabl limit 1) as a from foo join tabl order by a + 1", + requiredTableSet: semantics.EmptyTableSet(), + extractor: func(sel *sqlparser.Select) sqlparser.Expr { + return sel.OrderBy[0].Expr }, + rewrite: true, + }, { + query: "select t.a from (select (select col from tabl limit 1) as a from foo join tabl) t", + requiredTableSet: semantics.EmptyTableSet(), + extractor: func(sel *sqlparser.Select) sqlparser.Expr { + return extractExpr(sel, 0) + }, + rewrite: true, + }, { + query: "select (select col from tabl where foo.id = 4 limit 1) as a from foo", + requiredTableSet: semantics.SingleTableSet(0), + extractor: func(sel *sqlparser.Select) sqlparser.Expr { + return extractExpr(sel, 0) + }, + rewrite: false, }, } -} - -func (vw *vschemaWrapper) ConnCollation() collations.ID { - return collations.Default() -} - -func (vw *vschemaWrapper) PlannerWarning(_ string) { -} - -func (vw *vschemaWrapper) ForeignKeyMode() string { - return "allow" -} - -func (vw *vschemaWrapper) AllKeyspace() ([]*vindexes.Keyspace, error) { - if vw.keyspace == nil { - return nil, vterrors.VT13001("keyspace not available") - } - return []*vindexes.Keyspace{vw.keyspace}, nil -} - -// FindKeyspace implements the VSchema interface -func (vw *vschemaWrapper) FindKeyspace(keyspace string) (*vindexes.Keyspace, error) { - if vw.keyspace == nil { - return nil, vterrors.VT13001("keyspace not available") - } - if vw.keyspace.Name == keyspace { - return vw.keyspace, nil - } - return nil, nil -} - -func (vw *vschemaWrapper) Planner() plancontext.PlannerVersion { - return vw.version -} - -// SetPlannerVersion implements the ContextVSchema interface -func (vw *vschemaWrapper) SetPlannerVersion(v plancontext.PlannerVersion) { - vw.version = v -} - -func (vw *vschemaWrapper) GetSemTable() *semantics.SemTable { - return nil -} - -func (vw *vschemaWrapper) KeyspaceExists(keyspace string) bool { - if vw.keyspace != nil { - return vw.keyspace.Name == keyspace - } - return false -} - -func (vw *vschemaWrapper) SysVarSetEnabled() bool { - return vw.sysVarEnabled -} - -func (vw *vschemaWrapper) TargetDestination(qualifier string) (key.Destination, *vindexes.Keyspace, topodatapb.TabletType, error) { - var keyspaceName string - if vw.keyspace != nil { - keyspaceName = vw.keyspace.Name - } - if vw.dest == nil && qualifier != "" { - keyspaceName = qualifier - } - if keyspaceName == "" { - return nil, nil, 0, vterrors.VT03007() - } - keyspace := vw.v.Keyspaces[keyspaceName] - if keyspace == nil { - return nil, nil, 0, vterrors.VT05003(keyspaceName) - } - return vw.dest, keyspace.Keyspace, vw.tabletType, nil - -} - -func (vw *vschemaWrapper) TabletType() topodatapb.TabletType { - return vw.tabletType -} - -func (vw *vschemaWrapper) Destination() key.Destination { - return vw.dest -} - -func (vw *vschemaWrapper) FindTable(tab sqlparser.TableName) (*vindexes.Table, string, topodatapb.TabletType, key.Destination, error) { - destKeyspace, destTabletType, destTarget, err := topoproto.ParseDestination(tab.Qualifier.String(), topodatapb.TabletType_PRIMARY) - if err != nil { - return nil, destKeyspace, destTabletType, destTarget, err - } - table, err := vw.v.FindTable(destKeyspace, tab.Name.String()) - if err != nil { - return nil, destKeyspace, destTabletType, destTarget, err - } - return table, destKeyspace, destTabletType, destTarget, nil -} - -func (vw *vschemaWrapper) FindView(tab sqlparser.TableName) sqlparser.SelectStatement { - destKeyspace, _, _, err := topoproto.ParseDestination(tab.Qualifier.String(), topodatapb.TabletType_PRIMARY) - if err != nil { - return nil - } - return vw.v.FindView(destKeyspace, tab.Name.String()) -} - -func (vw *vschemaWrapper) FindTableOrVindex(tab sqlparser.TableName) (*vindexes.Table, vindexes.Vindex, string, topodatapb.TabletType, key.Destination, error) { - if tab.Qualifier.IsEmpty() && tab.Name.String() == "dual" { - ksName := vw.getActualKeyspace() - var ks *vindexes.Keyspace - if ksName == "" { - ks = vw.getfirstKeyspace() - ksName = ks.Name - } else { - ks = vw.v.Keyspaces[ksName].Keyspace - } - tbl := &vindexes.Table{ - Name: sqlparser.NewIdentifierCS("dual"), - Keyspace: ks, - Type: vindexes.TypeReference, - } - return tbl, nil, ksName, topodatapb.TabletType_PRIMARY, nil, nil - } - destKeyspace, destTabletType, destTarget, err := topoproto.ParseDestination(tab.Qualifier.String(), topodatapb.TabletType_PRIMARY) - if err != nil { - return nil, nil, destKeyspace, destTabletType, destTarget, err - } - if destKeyspace == "" { - destKeyspace = vw.getActualKeyspace() - } - table, vindex, err := vw.v.FindTableOrVindex(destKeyspace, tab.Name.String(), topodatapb.TabletType_PRIMARY) - if err != nil { - return nil, nil, destKeyspace, destTabletType, destTarget, err - } - return table, vindex, destKeyspace, destTabletType, destTarget, nil -} - -func (vw *vschemaWrapper) getfirstKeyspace() (ks *vindexes.Keyspace) { - var f string - for name, schema := range vw.v.Keyspaces { - if f == "" || f > name { - f = name - ks = schema.Keyspace - } - } - return -} - -func (vw *vschemaWrapper) getActualKeyspace() string { - if vw.keyspace == nil { - return "" - } - if !sqlparser.SystemSchema(vw.keyspace.Name) { - return vw.keyspace.Name - } - ks, err := vw.AnyKeyspace() - if err != nil { - return "" - } - return ks.Name -} - -func (vw *vschemaWrapper) DefaultKeyspace() (*vindexes.Keyspace, error) { - return vw.v.Keyspaces["main"].Keyspace, nil -} - -func (vw *vschemaWrapper) AnyKeyspace() (*vindexes.Keyspace, error) { - return vw.DefaultKeyspace() -} - -func (vw *vschemaWrapper) FirstSortedKeyspace() (*vindexes.Keyspace, error) { - return vw.v.Keyspaces["main"].Keyspace, nil -} - -func (vw *vschemaWrapper) TargetString() string { - return "targetString" -} - -func (vw *vschemaWrapper) WarnUnshardedOnly(_ string, _ ...any) { - -} - -func (vw *vschemaWrapper) ErrorIfShardedF(keyspace *vindexes.Keyspace, _, errFmt string, params ...any) error { - if keyspace.Sharded { - return fmt.Errorf(errFmt, params...) - } - return nil -} - -func (vw *vschemaWrapper) currentDb() string { - ksName := "" - if vw.keyspace != nil { - ksName = vw.keyspace.Name - } - return ksName -} - -func (vw *vschemaWrapper) FindRoutedShard(keyspace, shard string) (string, error) { - return "", nil -} - -func (vw *vschemaWrapper) IsViewsEnabled() bool { - return vw.enableViews -} - -type ( - planTest struct { - Comment string `json:"comment,omitempty"` - Query string `json:"query,omitempty"` - Plan json.RawMessage `json:"plan,omitempty"` - V3Plan json.RawMessage `json:"v3-plan,omitempty"` - Gen4Plan json.RawMessage `json:"gen4-plan,omitempty"` - } -) - -func testFile(t *testing.T, filename, tempDir string, vschema *vschemaWrapper, render bool) { - opts := jsondiff.DefaultConsoleOptions() - - t.Run(filename, func(t *testing.T) { - var expected []planTest - var outFirstPlanner string - for _, tcase := range readJSONTests(filename) { - if tcase.V3Plan == nil { - tcase.V3Plan = tcase.Plan - tcase.Gen4Plan = tcase.Plan - } - current := planTest{} - testName := tcase.Comment - if testName == "" { - testName = tcase.Query - } - if tcase.Query == "" { - continue - } - t.Run(fmt.Sprintf("V3: %s", testName), func(t *testing.T) { - vschema.version = V3 - plan, err := TestBuilder(tcase.Query, vschema, vschema.currentDb()) - if render && plan != nil { - viz, err := engine.GraphViz(plan.Instructions) - if err == nil { - _ = viz.Render() - } - } - out := getPlanOrErrorOutput(err, plan) - - compare, s := jsondiff.Compare(tcase.V3Plan, []byte(out), &opts) - if compare != jsondiff.FullMatch { - t.Errorf("V3 - %s\nDiff:\n%s\n[%s] \n[%s]", filename, s, tcase.V3Plan, out) - } - - outFirstPlanner = out - current.Comment = testName - current.Query = tcase.Query - }) - - vschema.version = Gen4 - out, err := getPlanOutput(tcase, vschema, render) - if err != nil && len(tcase.Gen4Plan) == 0 && strings.HasPrefix(err.Error(), "gen4 does not yet support") { - continue - } - - // our expectation for the new planner on this query is one of three - // - it produces the same plan as V3 - this is shown using empty brackets: {\n} - // - it produces a different but accepted plan - this is shown using the accepted plan - // - or it produces a different plan that has not yet been accepted, or it fails to produce a plan - // this is shown by not having any info at all after the result for the V3 planner - // with this last expectation, it is an error if the Gen4 planner - // produces the same plan as the V3 planner does - t.Run(fmt.Sprintf("Gen4: %s", testName), func(t *testing.T) { - compare, s := jsondiff.Compare(tcase.Gen4Plan, []byte(out), &opts) - if compare != jsondiff.FullMatch { - t.Errorf("Gen4 - %s\nDiff:\n%s\n[%s] \n[%s]", filename, s, tcase.Gen4Plan, out) - } - - if outFirstPlanner == out { - current.Plan = []byte(out) - } else { - current.V3Plan = []byte(outFirstPlanner) - current.Gen4Plan = []byte(out) - } + for _, testcase := range testcases { + t.Run(testcase.query, func(t *testing.T) { + parse, err := sqlparser.Parse(testcase.query) + require.NoError(t, err) + selStmt := parse.(*sqlparser.Select) + semTable, err := semantics.Analyze(selStmt, "d", &semantics.FakeSI{ + Tables: map[string]*vindexes.Table{ + "tabl": {Name: sqlparser.NewIdentifierCS("tabl")}, + "foo": {Name: sqlparser.NewIdentifierCS("foo")}, + }, }) - expected = append(expected, current) - } - if tempDir != "" { - name := strings.TrimSuffix(filename, filepath.Ext(filename)) - name = filepath.Join(tempDir, name+".json") - file, err := os.Create(name) require.NoError(t, err) - enc := json.NewEncoder(file) - enc.SetEscapeHTML(false) - enc.SetIndent("", " ") - err = enc.Encode(expected) - if err != nil { + if testcase.rewrite { + err = queryRewrite(semTable, sqlparser.NewReservedVars("vt", make(sqlparser.BindVars)), selStmt) require.NoError(t, err) } - } - }) -} - -func readJSONTests(filename string) []planTest { - var output []planTest - file, err := os.Open(locateFile(filename)) - if err != nil { - panic(err) - } - dec := json.NewDecoder(file) - err = dec.Decode(&output) - if err != nil { - panic(err) - } - return output -} - -func getPlanOutput(tcase planTest, vschema *vschemaWrapper, render bool) (out string, err error) { - defer func() { - if r := recover(); r != nil { - out = fmt.Sprintf("panicked: %v\n%s", r, string(debug.Stack())) - } - }() - plan, err := TestBuilder(tcase.Query, vschema, vschema.currentDb()) - if render && plan != nil { - viz, err := engine.GraphViz(plan.Instructions) - if err == nil { - _ = viz.Render() - } - } - out = getPlanOrErrorOutput(err, plan) - return out, err -} - -func getPlanOrErrorOutput(err error, plan *engine.Plan) string { - if err != nil { - return "\"" + err.Error() + "\"" - } - b := new(bytes.Buffer) - enc := json.NewEncoder(b) - enc.SetEscapeHTML(false) - enc.SetIndent("", " ") - err = enc.Encode(plan) - if err != nil { - panic(err) - } - return b.String() -} - -func locateFile(name string) string { - return "testdata/" + name -} - -var benchMarkFiles = []string{"from_cases.json", "filter_cases.json", "large_cases.json", "aggr_cases.json", "select_cases.json", "union_cases.json"} - -func BenchmarkPlanner(b *testing.B) { - vschema := &vschemaWrapper{ - v: loadSchema(b, "vschemas/schema.json", true), - sysVarEnabled: true, - } - for _, filename := range benchMarkFiles { - testCases := readJSONTests(filename) - b.Run(filename+"-v3", func(b *testing.B) { - benchmarkPlanner(b, V3, testCases, vschema) - }) - b.Run(filename+"-gen4", func(b *testing.B) { - benchmarkPlanner(b, Gen4, testCases, vschema) + expr := testcase.extractor(selStmt) + tableset := semTable.RecursiveDeps(expr) + require.Equal(t, testcase.requiredTableSet, tableset) }) - b.Run(filename+"-gen4left2right", func(b *testing.B) { - benchmarkPlanner(b, Gen4Left2Right, testCases, vschema) - }) - } -} - -func BenchmarkSemAnalysis(b *testing.B) { - vschema := &vschemaWrapper{ - v: loadSchema(b, "vschemas/schema.json", true), - sysVarEnabled: true, - } - - for i := 0; i < b.N; i++ { - for _, filename := range benchMarkFiles { - for _, tc := range readJSONTests(filename) { - exerciseAnalyzer(tc.Query, vschema.currentDb(), vschema) - } - } } } -func exerciseAnalyzer(query, database string, s semantics.SchemaInformation) { - defer func() { - // if analysis panics, let's just continue. this is just a benchmark - recover() - }() - - ast, err := sqlparser.Parse(query) - if err != nil { - return - } - sel, ok := ast.(sqlparser.SelectStatement) - if !ok { - return - } - - _, _ = semantics.Analyze(sel, database, s) -} - -func BenchmarkSelectVsDML(b *testing.B) { - vschema := &vschemaWrapper{ - v: loadSchema(b, "vschemas/schema.json", true), - sysVarEnabled: true, - version: V3, - } - - dmlCases := readJSONTests("dml_cases.json") - selectCases := readJSONTests("select_cases.json") - - rand.Shuffle(len(dmlCases), func(i, j int) { - dmlCases[i], dmlCases[j] = dmlCases[j], dmlCases[i] - }) - - rand.Shuffle(len(selectCases), func(i, j int) { - selectCases[i], selectCases[j] = selectCases[j], selectCases[i] - }) - - b.Run("DML (random sample, N=32)", func(b *testing.B) { - benchmarkPlanner(b, V3, dmlCases[:32], vschema) - }) - - b.Run("Select (random sample, N=32)", func(b *testing.B) { - benchmarkPlanner(b, V3, selectCases[:32], vschema) - }) -} - -func benchmarkPlanner(b *testing.B, version plancontext.PlannerVersion, testCases []planTest, vschema *vschemaWrapper) { - b.ReportAllocs() - for n := 0; n < b.N; n++ { - for _, tcase := range testCases { - if len(tcase.Gen4Plan) > 0 { - vschema.version = version - _, _ = TestBuilder(tcase.Query, vschema, vschema.currentDb()) - } - } - } +func extractExpr(in *sqlparser.Select, idx int) sqlparser.Expr { + return in.SelectExprs[idx].(*sqlparser.AliasedExpr).Expr } diff --git a/go/vt/vtgate/planbuilder/plancontext/planning_context.go b/go/vt/vtgate/planbuilder/plancontext/planning_context.go index e8045ce0b04..d090a593a39 100644 --- a/go/vt/vtgate/planbuilder/plancontext/planning_context.go +++ b/go/vt/vtgate/planbuilder/plancontext/planning_context.go @@ -31,18 +31,51 @@ type PlanningContext struct { // e.g. [FROM tblA JOIN tblB ON a.colA = b.colB] will be rewritten to [FROM tblB WHERE :a_colA = b.colB], // if we assume that tblB is on the RHS of the join. This last predicate in the WHERE clause is added to the // map below - JoinPredicates map[sqlparser.Expr][]sqlparser.Expr - SkipPredicates map[sqlparser.Expr]any - PlannerVersion querypb.ExecuteOptions_PlannerVersion - RewriteDerivedExpr bool + JoinPredicates map[sqlparser.Expr][]sqlparser.Expr + SkipPredicates map[sqlparser.Expr]any + PlannerVersion querypb.ExecuteOptions_PlannerVersion // If we during planning have turned this expression into an argument name, // we can continue using the same argument name ReservedArguments map[sqlparser.Expr]string + + // VerifyAllFKs tells whether we need verification for all the fk constraints on VTGate. + // This is required for queries we are running with /*+ SET_VAR(foreign_key_checks=OFF) */ + VerifyAllFKs bool + + // ParentFKToIgnore stores a specific parent foreign key that we would need to ignore while planning + // a certain query. This field is used in UPDATE CASCADE planning, wherein while planning the child update + // query, we need to ignore the parent foreign key constraint that caused the cascade in question. + ParentFKToIgnore string + + // Projected subqueries that have been merged + MergedSubqueries []*sqlparser.Subquery + + // CurrentPhase keeps track of how far we've gone in the planning process + // The type should be operators.Phase, but depending on that would lead to circular dependencies + CurrentPhase int } -func NewPlanningContext(reservedVars *sqlparser.ReservedVars, semTable *semantics.SemTable, vschema VSchema, version querypb.ExecuteOptions_PlannerVersion) *PlanningContext { - ctx := &PlanningContext{ +func CreatePlanningContext(stmt sqlparser.Statement, + reservedVars *sqlparser.ReservedVars, + + vschema VSchema, + version querypb.ExecuteOptions_PlannerVersion, +) (*PlanningContext, error) { + ksName := "" + if ks, _ := vschema.DefaultKeyspace(); ks != nil { + ksName = ks.Name + } + + semTable, err := semantics.Analyze(stmt, ksName, vschema) + if err != nil { + return nil, err + } + + // record any warning as planner warning. + vschema.PlannerWarning(semTable.Warning) + + return &PlanningContext{ ReservedVars: reservedVars, SemTable: semTable, VSchema: vschema, @@ -50,21 +83,27 @@ func NewPlanningContext(reservedVars *sqlparser.ReservedVars, semTable *semantic SkipPredicates: map[sqlparser.Expr]any{}, PlannerVersion: version, ReservedArguments: map[sqlparser.Expr]string{}, - } - return ctx + }, nil } -func (c *PlanningContext) IsSubQueryToReplace(e sqlparser.Expr) bool { - ext, ok := e.(*sqlparser.Subquery) - if !ok { - return false - } - for _, extractedSubq := range c.SemTable.GetSubqueryNeedingRewrite() { - if extractedSubq.Merged && c.SemTable.EqualsExpr(extractedSubq.Subquery, ext) { - return true +func (ctx *PlanningContext) GetReservedArgumentFor(expr sqlparser.Expr) string { + for key, name := range ctx.ReservedArguments { + if ctx.SemTable.EqualsExpr(key, expr) { + return name } } - return false + var bvName string + switch expr := expr.(type) { + case *sqlparser.ColName: + bvName = ctx.ReservedVars.ReserveColName(expr) + case *sqlparser.Subquery: + bvName = ctx.ReservedVars.ReserveSubQuery() + default: + bvName = ctx.ReservedVars.ReserveVariable(sqlparser.CompliantString(expr)) + } + ctx.ReservedArguments[expr] = bvName + + return bvName } func (ctx *PlanningContext) GetArgumentFor(expr sqlparser.Expr, f func() string) string { diff --git a/go/vt/vtgate/planbuilder/plancontext/vschema.go b/go/vt/vtgate/planbuilder/plancontext/vschema.go index 489a9eafe8f..fc5ee6d9207 100644 --- a/go/vt/vtgate/planbuilder/plancontext/vschema.go +++ b/go/vt/vtgate/planbuilder/plancontext/vschema.go @@ -4,7 +4,6 @@ import ( "context" "strings" - "vitess.io/vitess/go/vt/log" vschemapb "vitess.io/vitess/go/vt/proto/vschema" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" "vitess.io/vitess/go/vt/vtgate/engine" @@ -56,7 +55,7 @@ type VSchema interface { PlannerWarning(message string) // ForeignKeyMode returns the foreign_key flag value - ForeignKeyMode() string + ForeignKeyMode(keyspace string) (vschemapb.Keyspace_ForeignKeyMode, error) // GetVSchema returns the latest cached vindexes.VSchema GetVSchema() *vindexes.VSchema @@ -91,25 +90,13 @@ type VSchema interface { // PlannerNameToVersion returns the numerical representation of the planner func PlannerNameToVersion(s string) (PlannerVersion, bool) { - deprecationMessage := "The V3 planner is deprecated and will be removed in future release of Vitess" switch strings.ToLower(s) { - case "v3": - log.Warning(deprecationMessage) - return querypb.ExecuteOptions_V3, true case "gen4": return querypb.ExecuteOptions_Gen4, true case "gen4greedy", "greedy": return querypb.ExecuteOptions_Gen4Greedy, true case "left2right": return querypb.ExecuteOptions_Gen4Left2Right, true - case "gen4fallback": - return querypb.ExecuteOptions_Gen4WithFallback, true - case "gen4comparev3": - log.Warning(deprecationMessage) - return querypb.ExecuteOptions_Gen4CompareV3, true - case "v3insert": - log.Warning(deprecationMessage) - return querypb.ExecuteOptions_V3Insert, true } return 0, false } diff --git a/go/vt/vtgate/planbuilder/planner.go b/go/vt/vtgate/planbuilder/planner.go new file mode 100644 index 00000000000..984fd83c4e0 --- /dev/null +++ b/go/vt/vtgate/planbuilder/planner.go @@ -0,0 +1,98 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package planbuilder + +import ( + "fmt" + "strconv" + + querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" +) + +func gen4Planner(query string, plannerVersion querypb.ExecuteOptions_PlannerVersion) stmtPlanner { + return func(stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema) (*planResult, error) { + switch stmt := stmt.(type) { + case sqlparser.SelectStatement: + return gen4SelectStmtPlanner(query, plannerVersion, stmt, reservedVars, vschema) + case *sqlparser.Update: + return gen4UpdateStmtPlanner(plannerVersion, stmt, reservedVars, vschema) + case *sqlparser.Delete: + return gen4DeleteStmtPlanner(plannerVersion, stmt, reservedVars, vschema) + case *sqlparser.Insert: + return gen4InsertStmtPlanner(plannerVersion, stmt, reservedVars, vschema) + default: + return nil, vterrors.VT12001(fmt.Sprintf("%T", stmt)) + } + } +} + +// pushCommentDirectivesOnPlan adds comments to queries +// TODO: this should move to the operator side of planning +func pushCommentDirectivesOnPlan(plan logicalPlan, stmt sqlparser.Statement) logicalPlan { + var directives *sqlparser.CommentDirectives + cmt, ok := stmt.(sqlparser.Commented) + if ok { + directives = cmt.GetParsedComments().Directives() + scatterAsWarns := directives.IsSet(sqlparser.DirectiveScatterErrorsAsWarnings) + timeout := queryTimeout(directives) + multiShardAutoCommit := directives.IsSet(sqlparser.DirectiveMultiShardAutocommit) + + if scatterAsWarns || timeout > 0 || multiShardAutoCommit { + _, _ = visit(plan, func(logicalPlan logicalPlan) (bool, logicalPlan, error) { + switch plan := logicalPlan.(type) { + case *route: + plan.eroute.ScatterErrorsAsWarnings = scatterAsWarns + plan.eroute.QueryTimeout = timeout + case *primitiveWrapper: + setDirective(plan.prim, multiShardAutoCommit, timeout) + case *insert: + setDirective(plan.eInsert, multiShardAutoCommit, timeout) + } + return true, logicalPlan, nil + }) + } + } + + return plan +} + +func setDirective(prim engine.Primitive, msac bool, timeout int) { + switch edml := prim.(type) { + case *engine.Insert: + edml.MultiShardAutocommit = msac + edml.QueryTimeout = timeout + case *engine.Update: + edml.MultiShardAutocommit = msac + edml.QueryTimeout = timeout + case *engine.Delete: + edml.MultiShardAutocommit = msac + edml.QueryTimeout = timeout + } +} + +// queryTimeout returns DirectiveQueryTimeout value if set, otherwise returns 0. +func queryTimeout(d *sqlparser.CommentDirectives) int { + val, _ := d.GetString(sqlparser.DirectiveQueryTimeout, "0") + if intVal, err := strconv.Atoi(val); err == nil { + return intVal + } + return 0 +} diff --git a/go/vt/vtgate/planbuilder/gen4_planner_test.go b/go/vt/vtgate/planbuilder/planner_test.go similarity index 100% rename from go/vt/vtgate/planbuilder/gen4_planner_test.go rename to go/vt/vtgate/planbuilder/planner_test.go diff --git a/go/vt/vtgate/planbuilder/postprocess.go b/go/vt/vtgate/planbuilder/postprocess.go index 2161e1ef414..850c68701f8 100644 --- a/go/vt/vtgate/planbuilder/postprocess.go +++ b/go/vt/vtgate/planbuilder/postprocess.go @@ -17,8 +17,6 @@ limitations under the License. package planbuilder import ( - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/evalengine" @@ -27,115 +25,6 @@ import ( // This file has functions to analyze postprocessing // clauses like ORDER BY, etc. -// pushGroupBy processes the group by clause. It resolves all symbols -// and ensures that there are no subqueries. -func (pb *primitiveBuilder) pushGroupBy(sel *sqlparser.Select) error { - if sel.Distinct { - newBuilder, err := planDistinct(pb.plan) - if err != nil { - return err - } - pb.plan = newBuilder - } - - if err := pb.st.ResolveSymbols(sel.GroupBy); err != nil { - return err - } - - newInput, err := planGroupBy(pb, pb.plan, sel.GroupBy) - if err != nil { - return err - } - pb.plan = newInput - - return nil -} - -// pushOrderBy pushes the order by clause into the primitives. -// It resolves all symbols and ensures that there are no subqueries. -func (pb *primitiveBuilder) pushOrderBy(orderBy sqlparser.OrderBy) error { - if err := pb.st.ResolveSymbols(orderBy); err != nil { - return err - } - var v3OrderBylist v3OrderBy - for _, order := range orderBy { - v3OrderBylist = append(v3OrderBylist, &v3Order{Order: order}) - } - plan, err := planOrdering(pb, pb.plan, v3OrderBylist) - if err != nil { - return err - } - pb.plan = plan - pb.plan.Reorder(0) - return nil -} - -func (pb *primitiveBuilder) pushLimit(limit *sqlparser.Limit) error { - if limit == nil { - return nil - } - rb, ok := pb.plan.(*route) - if ok && rb.isSingleShard() { - rb.SetLimit(limit) - return nil - } - - lb, err := createLimit(pb.plan, limit) - if err != nil { - return err - } - - plan, err := visit(lb, setUpperLimit) - if err != nil { - return err - } - - pb.plan = plan - pb.plan.Reorder(0) - return nil -} - -// make sure we have the right signature for this function -var _ planVisitor = setUpperLimit - -// setUpperLimit is an optimization hint that tells that primitive -// that it does not need to return more than the specified number of rows. -// A primitive that cannot perform this can ignore the request. -func setUpperLimit(plan logicalPlan) (bool, logicalPlan, error) { - switch node := plan.(type) { - case *join, *joinGen4, *hashJoin: - return false, node, nil - case *memorySort: - pv := evalengine.NewBindVar("__upper_limit", sqltypes.Int64, collations.CollationBinaryID) - node.eMemorySort.UpperLimit = pv - // we don't want to go down to the rest of the tree - return false, node, nil - case *pulloutSubquery: - // we control the visitation manually here - - // we don't want to visit the subQuery side of this plan - newUnderlying, err := visit(node.underlying, setUpperLimit) - if err != nil { - return false, nil, err - } - - node.underlying = newUnderlying - return false, node, nil - case *route: - // The route pushes the limit regardless of the plan. - // If it's a scatter query, the rows returned will be - // more than the upper limit, but enough for the limit - node.Select.SetLimit(&sqlparser.Limit{Rowcount: sqlparser.NewArgument("__upper_limit")}) - case *routeGen4: - // The route pushes the limit regardless of the plan. - // If it's a scatter query, the rows returned will be - // more than the upper limit, but enough for the limit - node.Select.SetLimit(&sqlparser.Limit{Rowcount: sqlparser.NewArgument("__upper_limit")}) - case *concatenate: - return false, node, nil - } - return true, plan, nil -} - func createLimit(input logicalPlan, limit *sqlparser.Limit) (logicalPlan, error) { plan := newLimit(input) pv, err := evalengine.Translate(limit.Rowcount, nil) diff --git a/go/vt/vtgate/planbuilder/primitive_builder.go b/go/vt/vtgate/planbuilder/primitive_builder.go deleted file mode 100644 index b7c557518e5..00000000000 --- a/go/vt/vtgate/planbuilder/primitive_builder.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" -) - -// primitiveBuilder is the top level type for building plans. -// It contains the current logicalPlan tree, the symtab and -// the jointab. It can create transient planBuilders due -// to the recursive nature of SQL. -type primitiveBuilder struct { - vschema plancontext.VSchema - jt *jointab - plan logicalPlan - st *symtab - stmt sqlparser.Statement -} - -func newStmtAwarePrimitiveBuilder(vschema plancontext.VSchema, jt *jointab, stmt sqlparser.Statement) *primitiveBuilder { - return &primitiveBuilder{ - vschema: vschema, - jt: jt, - stmt: stmt, - } -} - -func newPrimitiveBuilder(vschema plancontext.VSchema, jt *jointab) *primitiveBuilder { - return &primitiveBuilder{ - vschema: vschema, - jt: jt, - } -} diff --git a/go/vt/vtgate/planbuilder/primitive_wrapper.go b/go/vt/vtgate/planbuilder/primitive_wrapper.go index b4ed7c8aa39..cb6a65aba04 100644 --- a/go/vt/vtgate/planbuilder/primitive_wrapper.go +++ b/go/vt/vtgate/planbuilder/primitive_wrapper.go @@ -27,10 +27,9 @@ import ( // primitiveWrapper is used when only need a logical plan that supports plan.Primitive() and nothing else type primitiveWrapper struct { prim engine.Primitive - gen4Plan } -func (p *primitiveWrapper) WireupGen4(*plancontext.PlanningContext) error { +func (p *primitiveWrapper) Wireup(*plancontext.PlanningContext) error { return nil } diff --git a/go/vt/vtgate/planbuilder/project.go b/go/vt/vtgate/planbuilder/project.go deleted file mode 100644 index 3a8d9e260c8..00000000000 --- a/go/vt/vtgate/planbuilder/project.go +++ /dev/null @@ -1,173 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "fmt" - "strings" - - querypb "vitess.io/vitess/go/vt/proto/query" - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/engine" - popcode "vitess.io/vitess/go/vt/vtgate/engine/opcode" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" -) - -// planProjection pushes the select expression to the specified -// originator. If successful, the originator must create -// a resultColumn entry and return it. The top level caller -// must accumulate these result columns and set the symtab -// after analysis. -func planProjection(pb *primitiveBuilder, in logicalPlan, expr *sqlparser.AliasedExpr, origin logicalPlan) (logicalPlan, *resultColumn, int, error) { - switch node := in.(type) { - case *join: - var rc *resultColumn - if node.isOnLeft(origin.Order()) { - newLeft, col, colNumber, err := planProjection(pb, node.Left, expr, origin) - if err != nil { - return nil, nil, 0, err - } - node.ejoin.Cols = append(node.ejoin.Cols, -colNumber-1) - rc = col - node.Left = newLeft - } else { - // Pushing of non-trivial expressions not allowed for RHS of left joins. - if _, ok := expr.Expr.(*sqlparser.ColName); !ok && node.ejoin.Opcode == engine.LeftJoin { - return nil, nil, 0, vterrors.VT12001("cross-shard LEFT JOIN and column expressions") - } - - newRight, col, colNumber, err := planProjection(pb, node.Right, expr, origin) - if err != nil { - return nil, nil, 0, err - } - node.ejoin.Cols = append(node.ejoin.Cols, colNumber+1) - rc = col - node.Right = newRight - } - node.resultColumns = append(node.resultColumns, rc) - return in, rc, len(node.resultColumns) - 1, nil - - // orderedAggregate can accept expressions that are normal (a+b), or aggregate (MAX(v)). - // Normal expressions are pushed through to the underlying route. But aggregate - // expressions require post-processing. In such cases, oa shares the work with - // the underlying route: It asks the scatter route to perform the MAX operation - // also, and only performs the final aggregation with what the route returns. - // Since the results are expected to be ordered, this is something that can - // be performed 'as they come'. In this respect, oa is the originator for - // aggregate expressions like MAX, which will be added to symtab. The underlying - // MAX sent to the route will not be added to symtab and will not be reachable by - // others. This functionality depends on the PushOrderBy to request that - // the rows be correctly ordered. - case *orderedAggregate: - if aggrFunc, isAggregate := expr.Expr.(sqlparser.AggrFunc); isAggregate { - if _, ok := popcode.SupportedAggregates[strings.ToLower(aggrFunc.AggrName())]; ok { - rc, colNumber, err := node.pushAggr(pb, expr, origin) - if err != nil { - return nil, nil, 0, err - } - return node, rc, colNumber, nil - } - } - - // Ensure that there are no aggregates in the expression. - if sqlparser.ContainsAggregation(expr.Expr) { - return nil, nil, 0, vterrors.VT12001("in scatter query: complex aggregate expression") - } - - newInput, innerRC, _, err := planProjection(pb, node.input, expr, origin) - if err != nil { - return nil, nil, 0, err - } - node.input = newInput - node.resultColumns = append(node.resultColumns, innerRC) - return node, innerRC, len(node.resultColumns) - 1, nil - case *route: - sel := node.Select.(*sqlparser.Select) - sel.SelectExprs = append(sel.SelectExprs, expr) - - rc := newResultColumn(expr, node) - node.resultColumns = append(node.resultColumns, rc) - - return node, rc, len(node.resultColumns) - 1, nil - case *mergeSort: - projectedInput, rc, idx, err := planProjection(pb, node.input, expr, origin) - if err != nil { - return nil, nil, 0, err - } - err = node.Rewrite(projectedInput) - if err != nil { - return nil, nil, 0, err - } - return node, rc, idx, nil - case *distinct: - projectedInput, rc, idx, err := planProjection(pb, node.input, expr, origin) - if err != nil { - return nil, nil, 0, err - } - err = node.Rewrite(projectedInput) - if err != nil { - return nil, nil, 0, err - } - return node, rc, idx, nil - case *pulloutSubquery: - projectedInput, rc, idx, err := planProjection(pb, node.underlying, expr, origin) - if err != nil { - return nil, nil, 0, err - } - err = node.Rewrite(projectedInput, node.subquery) - if err != nil { - return nil, nil, 0, err - } - return node, rc, idx, nil - case *simpleProjection: - col, ok := expr.Expr.(*sqlparser.ColName) - if !ok { - return nil, nil, 0, vterrors.VT12001("expression on results of a cross-shard subquery") - } - - // colNumber should already be set for subquery columns. - inner := col.Metadata.(*column).colNumber - node.eSimpleProj.Cols = append(node.eSimpleProj.Cols, inner) - - // Build a new column reference to represent the result column. - rc := newResultColumn(expr, node) - node.resultColumns = append(node.resultColumns, rc) - - return node, rc, len(node.resultColumns) - 1, nil - case *vindexFunc: - // Catch the case where no where clause was specified. If so, the opcode - // won't be set. - if node.eVindexFunc.Opcode == engine.VindexNone { - return nil, nil, 0, vterrors.VT12001(operators.VindexUnsupported + " (where clause missing)") - } - col, ok := expr.Expr.(*sqlparser.ColName) - if !ok { - return nil, nil, 0, vterrors.VT12001("expression on results of a vindex function") - } - rc := newResultColumn(expr, node) - node.resultColumns = append(node.resultColumns, rc) - node.eVindexFunc.Fields = append(node.eVindexFunc.Fields, &querypb.Field{ - Name: rc.alias.String(), - Type: querypb.Type_VARBINARY, - }) - node.eVindexFunc.Cols = append(node.eVindexFunc.Cols, col.Metadata.(*column).colNumber) - return node, rc, len(node.resultColumns) - 1, nil - - } - return nil, nil, 0, vterrors.VT13001(fmt.Sprintf("unreachable %T.projection", in)) -} diff --git a/go/vt/vtgate/planbuilder/projection.go b/go/vt/vtgate/planbuilder/projection.go index d211f16233a..70cb1979780 100644 --- a/go/vt/vtgate/planbuilder/projection.go +++ b/go/vt/vtgate/planbuilder/projection.go @@ -22,13 +22,11 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) type projection struct { - gen4Plan source logicalPlan columnNames []string columns []sqlparser.Expr @@ -40,33 +38,13 @@ type projection struct { var _ logicalPlan = (*projection)(nil) -// WireupGen4 implements the logicalPlan interface -func (p *projection) WireupGen4(ctx *plancontext.PlanningContext) error { - if p.primitive != nil { - // if primitive is not nil, it means that the horizon planning in the operator phase already - // created all the needed evalengine expressions. - // we don't need to do anything here, let's just shortcut out of this call - return p.source.WireupGen4(ctx) - } - - columns := make([]evalengine.Expr, 0, len(p.columns)) - for _, expr := range p.columns { - convert, err := evalengine.Translate(expr, &evalengine.Config{ - ResolveColumn: resolveFromPlan(ctx, p.source, false), - ResolveType: ctx.SemTable.TypeForExpr, - Collation: ctx.SemTable.Collation, - }) - if err != nil { - return err - } - columns = append(columns, convert) - } - p.primitive = &engine.Projection{ - Cols: p.columnNames, - Exprs: columns, +// Wireup implements the logicalPlan interface +func (p *projection) Wireup(ctx *plancontext.PlanningContext) error { + if p.primitive == nil { + return vterrors.VT13001("should already be done") } - return p.source.WireupGen4(ctx) + return p.source.Wireup(ctx) } // Inputs implements the logicalPlan interface @@ -108,21 +86,3 @@ func (p *projection) Primitive() engine.Primitive { p.primitive.Input = p.source.Primitive() return p.primitive } - -// addColumn is used to add a column output for the projection. -// This is the only function that should be used to add columns to projection -func (p *projection) addColumn(idx *int, column sqlparser.Expr, columnName string) (int, error) { - var offset int - if idx == nil { - p.unorderedColumnIdx++ - offset = len(p.columns) - p.unorderedColumnIdx - } else { - offset = *idx - } - if p.columnNames[offset] != "" || p.columns[offset] != nil { - return -1, vterrors.VT13001("overwriting columns in projection is not permitted") - } - p.columns[offset] = column - p.columnNames[offset] = columnName - return offset, nil -} diff --git a/go/vt/vtgate/planbuilder/projection_pushing.go b/go/vt/vtgate/planbuilder/projection_pushing.go deleted file mode 100644 index ca88a3fd57e..00000000000 --- a/go/vt/vtgate/planbuilder/projection_pushing.go +++ /dev/null @@ -1,335 +0,0 @@ -/* -Copyright 2022 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "fmt" - - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/engine" - popcode "vitess.io/vitess/go/vt/vtgate/engine/opcode" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/vt/vtgate/semantics" -) - -// pushProjection pushes a projection to the plan. -func pushProjection( - ctx *plancontext.PlanningContext, - expr *sqlparser.AliasedExpr, - plan logicalPlan, - inner, reuseCol, hasAggregation bool, -) (offset int, added bool, err error) { - switch node := plan.(type) { - case *limit, *projection, *pulloutSubquery, *distinct, *filter: - // All of these either push to the single source, or push to the LHS - src := node.Inputs()[0] - return pushProjection(ctx, expr, src, inner, reuseCol, hasAggregation) - case *routeGen4: - return addExpressionToRoute(ctx, node, expr, reuseCol) - case *hashJoin: - return pushProjectionIntoHashJoin(ctx, expr, node, reuseCol, inner, hasAggregation) - case *joinGen4: - return pushProjectionIntoJoin(ctx, expr, node, reuseCol, inner, hasAggregation) - case *simpleProjection: - return pushProjectionIntoSimpleProj(ctx, expr, node, inner, hasAggregation, reuseCol) - case *orderedAggregate: - return pushProjectionIntoOA(ctx, expr, node, inner, hasAggregation) - case *vindexFunc: - return pushProjectionIntoVindexFunc(node, expr, reuseCol) - case *semiJoin: - return pushProjectionIntoSemiJoin(ctx, expr, reuseCol, node, inner, hasAggregation) - case *concatenateGen4: - return pushProjectionIntoConcatenate(ctx, expr, hasAggregation, node, inner, reuseCol) - default: - return 0, false, vterrors.VT13001(fmt.Sprintf("push projection does not yet support: %T", node)) - } -} - -func pushProjectionIntoVindexFunc(node *vindexFunc, expr *sqlparser.AliasedExpr, reuseCol bool) (int, bool, error) { - colsBefore := len(node.eVindexFunc.Cols) - i, err := node.SupplyProjection(expr, reuseCol) - if err != nil { - return 0, false, err - } - return i /* col added */, len(node.eVindexFunc.Cols) > colsBefore, nil -} - -func pushProjectionIntoConcatenate(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, hasAggregation bool, node *concatenateGen4, inner bool, reuseCol bool) (int, bool, error) { - if hasAggregation { - return 0, false, vterrors.VT12001("aggregation on UNIONs") - } - offset, added, err := pushProjection(ctx, expr, node.sources[0], inner, reuseCol, hasAggregation) - if err != nil { - return 0, false, err - } - if added && ctx.SemTable.DirectDeps(expr.Expr).NonEmpty() { - return 0, false, vterrors.VT13001(fmt.Sprintf("pushing projection %v on concatenate should reference an existing column", sqlparser.String(expr))) - } - if added { - for _, source := range node.sources[1:] { - _, _, err := pushProjection(ctx, expr, source, inner, reuseCol, hasAggregation) - if err != nil { - return 0, false, err - } - } - } - return offset, added, nil -} - -func pushProjectionIntoSemiJoin( - ctx *plancontext.PlanningContext, - expr *sqlparser.AliasedExpr, - reuseCol bool, - node *semiJoin, - inner, hasAggregation bool, -) (int, bool, error) { - passDownReuseCol := reuseCol - if !reuseCol { - passDownReuseCol = expr.As.IsEmpty() - } - offset, added, err := pushProjection(ctx, expr, node.lhs, inner, passDownReuseCol, hasAggregation) - if err != nil { - return 0, false, err - } - column := -(offset + 1) - if reuseCol && !added { - for idx, col := range node.cols { - if column == col { - return idx, false, nil - } - } - } - node.cols = append(node.cols, column) - return len(node.cols) - 1, true, nil -} - -func pushProjectionIntoOA(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, node *orderedAggregate, inner, hasAggregation bool) (int, bool, error) { - colName, isColName := expr.Expr.(*sqlparser.ColName) - for _, aggregate := range node.aggregates { - if ctx.SemTable.EqualsExpr(aggregate.Expr, expr.Expr) { - return aggregate.Col, false, nil - } - if isColName && colName.Name.EqualString(aggregate.Alias) { - return aggregate.Col, false, nil - } - } - for _, key := range node.groupByKeys { - if ctx.SemTable.EqualsExpr(key.Expr, expr.Expr) { - return key.KeyCol, false, nil - } - } - offset, _, err := pushProjection(ctx, expr, node.input, inner, true, hasAggregation) - if err != nil { - return 0, false, err - } - node.aggregates = append(node.aggregates, &engine.AggregateParams{ - Opcode: popcode.AggregateRandom, - Col: offset, - Alias: expr.ColumnName(), - Expr: expr.Expr, - Original: expr, - }) - return offset, true, nil -} - -func pushProjectionIntoSimpleProj( - ctx *plancontext.PlanningContext, - expr *sqlparser.AliasedExpr, - node *simpleProjection, - inner, hasAggregation, reuseCol bool, -) (int, bool, error) { - offset, _, err := pushProjection(ctx, expr, node.input, inner, true, hasAggregation) - if err != nil { - return 0, false, err - } - for i, value := range node.eSimpleProj.Cols { - // we return early if we already have the column in the simple projection's - // output list so we do not add it again. - if reuseCol && value == offset { - return i, false, nil - } - } - node.eSimpleProj.Cols = append(node.eSimpleProj.Cols, offset) - return len(node.eSimpleProj.Cols) - 1, true, nil -} - -func pushProjectionIntoJoin( - ctx *plancontext.PlanningContext, - expr *sqlparser.AliasedExpr, - node *joinGen4, - reuseCol, inner, hasAggregation bool, -) (int, bool, error) { - lhsSolves := node.Left.ContainsTables() - rhsSolves := node.Right.ContainsTables() - deps := ctx.SemTable.RecursiveDeps(expr.Expr) - var column int - var appended bool - passDownReuseCol := reuseCol - if !reuseCol { - passDownReuseCol = expr.As.IsEmpty() - } - switch { - case deps.IsSolvedBy(lhsSolves): - offset, added, err := pushProjection(ctx, expr, node.Left, inner, passDownReuseCol, hasAggregation) - if err != nil { - return 0, false, err - } - column = -(offset + 1) - appended = added - case deps.IsSolvedBy(rhsSolves): - offset, added, err := pushProjection(ctx, expr, node.Right, inner && node.Opcode != engine.LeftJoin, passDownReuseCol, hasAggregation) - if err != nil { - return 0, false, err - } - column = offset + 1 - appended = added - default: - // if an expression has aggregation, then it should not be split up and pushed to both sides, - // for example an expression like count(*) will have dependencies on both sides, but we should not push it - // instead we should return an error - if hasAggregation { - return 0, false, vterrors.VT12001("cross-shard query with aggregates") - } - // now we break the expression into left and right side dependencies and rewrite the left ones to bind variables - joinCol, err := operators.BreakExpressionInLHSandRHS(ctx, expr.Expr, lhsSolves) - if err != nil { - return 0, false, err - } - // go over all the columns coming from the left side of the tree and push them down. While at it, also update the bind variable map. - // It is okay to reuse the columns on the left side since - // the final expression which will be selected will be pushed into the right side. - for i, col := range joinCol.LHSExprs { - colOffset, _, err := pushProjection(ctx, &sqlparser.AliasedExpr{Expr: col}, node.Left, inner, true, false) - if err != nil { - return 0, false, err - } - node.Vars[joinCol.BvNames[i]] = colOffset - } - // push the rewritten expression on the right side of the tree. Here we should take care whether we want to reuse the expression or not. - expr.Expr = joinCol.RHSExpr - offset, added, err := pushProjection(ctx, expr, node.Right, inner && node.Opcode != engine.LeftJoin, passDownReuseCol, false) - if err != nil { - return 0, false, err - } - column = offset + 1 - appended = added - } - if reuseCol && !appended { - for idx, col := range node.Cols { - if column == col { - return idx, false, nil - } - } - // the column was not appended to either child, but we could not find it in out cols list, - // so we'll still add it - } - node.Cols = append(node.Cols, column) - return len(node.Cols) - 1, true, nil -} - -func pushProjectionIntoHashJoin( - ctx *plancontext.PlanningContext, - expr *sqlparser.AliasedExpr, - node *hashJoin, - reuseCol, inner, hasAggregation bool, -) (int, bool, error) { - lhsSolves := node.Left.ContainsTables() - rhsSolves := node.Right.ContainsTables() - deps := ctx.SemTable.RecursiveDeps(expr.Expr) - var column int - var appended bool - passDownReuseCol := reuseCol - if !reuseCol { - passDownReuseCol = expr.As.IsEmpty() - } - switch { - case deps.IsSolvedBy(lhsSolves): - offset, added, err := pushProjection(ctx, expr, node.Left, inner, passDownReuseCol, hasAggregation) - if err != nil { - return 0, false, err - } - column = -(offset + 1) - appended = added - case deps.IsSolvedBy(rhsSolves): - offset, added, err := pushProjection(ctx, expr, node.Right, inner && node.Opcode != engine.LeftJoin, passDownReuseCol, hasAggregation) - if err != nil { - return 0, false, err - } - column = offset + 1 - appended = added - default: - // if an expression has aggregation, then it should not be split up and pushed to both sides, - // for example an expression like count(*) will have dependencies on both sides, but we should not push it - // instead we should return an error - if hasAggregation { - return 0, false, vterrors.VT12001("cross-shard query with aggregates") - } - return 0, false, vterrors.VT12001("hash join with projection from both sides of the join") - } - if reuseCol && !appended { - for idx, col := range node.Cols { - if column == col { - return idx, false, nil - } - } - // the column was not appended to either child, but we could not find it in out cols list, - // so we'll still add it - } - node.Cols = append(node.Cols, column) - return len(node.Cols) - 1, true, nil -} - -func addExpressionToRoute(ctx *plancontext.PlanningContext, rb *routeGen4, expr *sqlparser.AliasedExpr, reuseCol bool) (int, bool, error) { - if reuseCol { - if i := checkIfAlreadyExists(expr, rb.Select, ctx.SemTable); i != -1 { - return i, false, nil - } - } - sqlparser.RemoveKeyspaceFromColName(expr.Expr) - sel, isSel := rb.Select.(*sqlparser.Select) - if !isSel { - return 0, false, vterrors.VT12001(fmt.Sprintf("pushing projection '%s' on %T", sqlparser.String(expr), rb.Select)) - } - - if ctx.RewriteDerivedExpr { - // if we are trying to push a projection that belongs to a DerivedTable - // we rewrite that expression, so it matches the column name used inside - // that derived table. - err := rewriteProjectionOfDerivedTable(expr, ctx.SemTable) - if err != nil { - return 0, false, err - } - } - - offset := len(sel.SelectExprs) - sel.SelectExprs = append(sel.SelectExprs, expr) - return offset, true, nil -} - -func rewriteProjectionOfDerivedTable(expr *sqlparser.AliasedExpr, semTable *semantics.SemTable) error { - ti, err := semTable.TableInfoForExpr(expr.Expr) - if err != nil && err != semantics.ErrNotSingleTable { - return err - } - _, isDerivedTable := ti.(*semantics.DerivedTable) - if isDerivedTable { - expr.Expr = semantics.RewriteDerivedTableExpression(expr.Expr, ti) - } - return nil -} diff --git a/go/vt/vtgate/planbuilder/pullout_subquery.go b/go/vt/vtgate/planbuilder/pullout_subquery.go deleted file mode 100644 index 4e1008ff7ae..00000000000 --- a/go/vt/vtgate/planbuilder/pullout_subquery.go +++ /dev/null @@ -1,141 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/engine" - popcode "vitess.io/vitess/go/vt/vtgate/engine/opcode" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/vt/vtgate/semantics" -) - -var _ logicalPlan = (*pulloutSubquery)(nil) - -// pulloutSubquery is the logicalPlan for engine.PulloutSubquery. -// This gets built if a subquery is not correlated and can -// therefore can be pulled out and executed upfront. -type pulloutSubquery struct { - order int - subquery logicalPlan - underlying logicalPlan - eSubquery *engine.PulloutSubquery -} - -// newPulloutSubquery builds a new pulloutSubquery. -func newPulloutSubquery(opcode popcode.PulloutOpcode, sqName, hasValues string, subquery logicalPlan) *pulloutSubquery { - return &pulloutSubquery{ - subquery: subquery, - eSubquery: &engine.PulloutSubquery{ - Opcode: opcode, - SubqueryResult: sqName, - HasValues: hasValues, - }, - } -} - -// setUnderlying sets the underlying primitive. -func (ps *pulloutSubquery) setUnderlying(underlying logicalPlan) { - ps.underlying = underlying - ps.underlying.Reorder(ps.subquery.Order()) - ps.order = ps.underlying.Order() + 1 -} - -// Order implements the logicalPlan interface -func (ps *pulloutSubquery) Order() int { - return ps.order -} - -// Reorder implements the logicalPlan interface -func (ps *pulloutSubquery) Reorder(order int) { - ps.subquery.Reorder(order) - ps.underlying.Reorder(ps.subquery.Order()) - ps.order = ps.underlying.Order() + 1 -} - -// Primitive implements the logicalPlan interface -func (ps *pulloutSubquery) Primitive() engine.Primitive { - ps.eSubquery.Subquery = ps.subquery.Primitive() - ps.eSubquery.Underlying = ps.underlying.Primitive() - return ps.eSubquery -} - -// ResultColumns implements the logicalPlan interface -func (ps *pulloutSubquery) ResultColumns() []*resultColumn { - return ps.underlying.ResultColumns() -} - -// Wireup implements the logicalPlan interface -func (ps *pulloutSubquery) Wireup(plan logicalPlan, jt *jointab) error { - if err := ps.underlying.Wireup(plan, jt); err != nil { - return err - } - return ps.subquery.Wireup(plan, jt) -} - -// Wireup2 implements the logicalPlan interface -func (ps *pulloutSubquery) WireupGen4(ctx *plancontext.PlanningContext) error { - if err := ps.underlying.WireupGen4(ctx); err != nil { - return err - } - return ps.subquery.WireupGen4(ctx) -} - -// SupplyVar implements the logicalPlan interface -func (ps *pulloutSubquery) SupplyVar(from, to int, col *sqlparser.ColName, varname string) { - if from <= ps.subquery.Order() { - ps.subquery.SupplyVar(from, to, col, varname) - return - } - ps.underlying.SupplyVar(from, to, col, varname) -} - -// SupplyCol implements the logicalPlan interface -func (ps *pulloutSubquery) SupplyCol(col *sqlparser.ColName) (rc *resultColumn, colNumber int) { - return ps.underlying.SupplyCol(col) -} - -// SupplyWeightString implements the logicalPlan interface -func (ps *pulloutSubquery) SupplyWeightString(colNumber int, alsoAddToGroupBy bool) (weightcolNumber int, err error) { - return ps.underlying.SupplyWeightString(colNumber, alsoAddToGroupBy) -} - -// Rewrite implements the logicalPlan interface -func (ps *pulloutSubquery) Rewrite(inputs ...logicalPlan) error { - if len(inputs) != 2 { - return vterrors.VT13001("pulloutSubquery: wrong number of inputs") - } - ps.underlying = inputs[0] - ps.subquery = inputs[1] - return nil -} - -// ContainsTables implements the logicalPlan interface -func (ps *pulloutSubquery) ContainsTables() semantics.TableSet { - return ps.underlying.ContainsTables().Merge(ps.subquery.ContainsTables()) -} - -// Inputs implements the logicalPlan interface -func (ps *pulloutSubquery) Inputs() []logicalPlan { - return []logicalPlan{ps.underlying, ps.subquery} -} - -// OutputColumns implements the logicalPlan interface -func (ps *pulloutSubquery) OutputColumns() []sqlparser.SelectExpr { - return ps.underlying.OutputColumns() -} diff --git a/go/vt/vtgate/planbuilder/rewrite.go b/go/vt/vtgate/planbuilder/rewrite.go index 4a95696c0f0..f59441c77ac 100644 --- a/go/vt/vtgate/planbuilder/rewrite.go +++ b/go/vt/vtgate/planbuilder/rewrite.go @@ -18,8 +18,7 @@ package planbuilder import ( "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" - popcode "vitess.io/vitess/go/vt/vtgate/engine/opcode" + "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -39,21 +38,18 @@ func queryRewrite(semTable *semantics.SemTable, reservedVars *sqlparser.Reserved return nil } +func (r *rewriter) rewriteUp(cursor *sqlparser.Cursor) bool { + _, ok := cursor.Node().(*sqlparser.Subquery) + if ok { + r.inSubquery-- + } + return true +} + func (r *rewriter) rewriteDown(cursor *sqlparser.Cursor) bool { switch node := cursor.Node().(type) { case *sqlparser.Select: rewriteHavingClause(node) - case *sqlparser.ComparisonExpr: - err := rewriteInSubquery(cursor, r, node) - if err != nil { - r.err = err - } - case *sqlparser.ExistsExpr: - err := r.rewriteExistsSubquery(cursor, node) - if err != nil { - r.err = err - } - return false case *sqlparser.AliasedTableExpr: // rewrite names of the routed tables for the subquery // We only need to do this for non-derived tables and if they are in a subquery @@ -88,80 +84,12 @@ func (r *rewriter) rewriteDown(cursor *sqlparser.Cursor) bool { // replace the table name with the original table tableName.Name = vindexTable.Name node.Expr = tableName - case *sqlparser.ExtractedSubquery: - return false case *sqlparser.Subquery: - err := rewriteSubquery(cursor, r, node) - if err != nil { - r.err = err - } + r.inSubquery++ } return true } -func (r *rewriter) rewriteUp(cursor *sqlparser.Cursor) bool { - switch cursor.Node().(type) { - case *sqlparser.Subquery: - r.inSubquery-- - } - return r.err == nil -} - -func rewriteInSubquery(cursor *sqlparser.Cursor, r *rewriter, node *sqlparser.ComparisonExpr) error { - subq, exp := semantics.GetSubqueryAndOtherSide(node) - if subq == nil || exp == nil { - return nil - } - - semTableSQ, err := r.getSubQueryRef(subq) - if err != nil { - return err - } - - r.inSubquery++ - argName, hasValuesArg := r.reservedVars.ReserveSubQueryWithHasValues() - semTableSQ.SetArgName(argName) - semTableSQ.SetHasValuesArg(hasValuesArg) - cursor.Replace(semTableSQ) - return nil -} - -func rewriteSubquery(cursor *sqlparser.Cursor, r *rewriter, node *sqlparser.Subquery) error { - semTableSQ, err := r.getSubQueryRef(node) - if err != nil { - return err - } - if semTableSQ.GetArgName() != "" || popcode.PulloutOpcode(semTableSQ.OpCode) != popcode.PulloutValue { - return nil - } - r.inSubquery++ - argName := r.reservedVars.ReserveSubQuery() - semTableSQ.SetArgName(argName) - cursor.Replace(semTableSQ) - return nil -} - -func (r *rewriter) rewriteExistsSubquery(cursor *sqlparser.Cursor, node *sqlparser.ExistsExpr) error { - semTableSQ, err := r.getSubQueryRef(node.Subquery) - if err != nil { - return err - } - - r.inSubquery++ - hasValuesArg := r.reservedVars.ReserveHasValuesSubQuery() - semTableSQ.SetHasValuesArg(hasValuesArg) - cursor.Replace(semTableSQ) - return nil -} - -func (r *rewriter) getSubQueryRef(sq *sqlparser.Subquery) (*sqlparser.ExtractedSubquery, error) { - semTableSQ, found := r.semTable.SubqueryRef[sq] - if !found { - return nil, vterrors.VT13001("got subquery that was not in the subq map") - } - return semTableSQ, nil -} - func rewriteHavingClause(node *sqlparser.Select) { if node.Having == nil { return diff --git a/go/vt/vtgate/planbuilder/rewrite_test.go b/go/vt/vtgate/planbuilder/rewrite_test.go index b2e9fc7683d..292c94f448a 100644 --- a/go/vt/vtgate/planbuilder/rewrite_test.go +++ b/go/vt/vtgate/planbuilder/rewrite_test.go @@ -26,63 +26,6 @@ import ( "vitess.io/vitess/go/vt/vtgate/semantics" ) -func TestSubqueryRewrite(t *testing.T) { - tcases := []struct { - input string - output string - }{{ - input: "select 1 from t1", - output: "select 1 from t1", - }, { - input: "select (select 1) from t1", - output: "select :__sq1 from t1", - }, { - input: "select 1 from t1 where exists (select 1)", - output: "select 1 from t1 where :__sq_has_values1", - }, { - input: "select id from t1 where id in (select 1)", - output: "select id from t1 where :__sq_has_values1 = 1 and id in ::__sq1", - }, { - input: "select id from t1 where id not in (select 1)", - output: "select id from t1 where :__sq_has_values1 = 0 or id not in ::__sq1", - }, { - input: "select id from t1 where id = (select 1)", - output: "select id from t1 where id = :__sq1", - }, { - input: "select id from t1 where id >= (select 1)", - output: "select id from t1 where id >= :__sq1", - }, { - input: "select id from t1 where t1.id = (select 1 from t2 where t2.id = t1.id)", - output: "select id from t1 where t1.id = :__sq1", - }, { - input: "select id from t1 join t2 where t1.id = t2.id and exists (select 1)", - output: "select id from t1 join t2 where t1.id = t2.id and :__sq_has_values1", - }, { - input: "select id from t1 where not exists (select 1)", - output: "select id from t1 where not :__sq_has_values1", - }, { - input: "select id from t1 where not exists (select 1) and exists (select 2)", - output: "select id from t1 where not :__sq_has_values1 and :__sq_has_values2", - }, { - input: "select (select 1), (select 2) from t1 join t2 on t1.id = (select 1) where t1.id in (select 1)", - output: "select :__sq2, :__sq3 from t1 join t2 on t1.id = :__sq1 where :__sq_has_values4 = 1 and t1.id in ::__sq4", - }} - for _, tcase := range tcases { - t.Run(tcase.input, func(t *testing.T) { - ast, vars, err := sqlparser.Parse2(tcase.input) - require.NoError(t, err) - reservedVars := sqlparser.NewReservedVars("vtg", vars) - selectStatement, isSelectStatement := ast.(*sqlparser.Select) - require.True(t, isSelectStatement, "analyzer expects a select statement") - semTable, err := semantics.Analyze(selectStatement, "", &semantics.FakeSI{}) - require.NoError(t, err) - err = queryRewrite(semTable, reservedVars, selectStatement) - require.NoError(t, err) - assert.Equal(t, tcase.output, sqlparser.String(selectStatement)) - }) - } -} - func TestHavingRewrite(t *testing.T) { tcases := []struct { input string @@ -125,10 +68,7 @@ func TestHavingRewrite(t *testing.T) { input: "select count(*) k from t1 having k = 10", output: "select count(*) as k from t1 having count(*) = 10", }, { - input: "select 1 from t1 where x in (select 1 from t2 having a = 1)", - output: "select 1 from t1 where :__sq_has_values1 = 1 and x in ::__sq1", - sqs: map[string]string{"__sq1": "select 1 from t2 where a = 1"}, - }, {input: "select 1 from t1 group by a having a = 1 and count(*) > 1", + input: "select 1 from t1 group by a having a = 1 and count(*) > 1", output: "select 1 from t1 where a = 1 group by a having count(*) > 1", }} for _, tcase := range tcases { @@ -137,14 +77,6 @@ func TestHavingRewrite(t *testing.T) { err := queryRewrite(semTable, reservedVars, sel) require.NoError(t, err) assert.Equal(t, tcase.output, sqlparser.String(sel)) - squeries, found := semTable.SubqueryMap[sel] - if len(tcase.sqs) > 0 { - assert.True(t, found, "no subquery found in the query") - assert.Equal(t, len(tcase.sqs), len(squeries), "number of subqueries not matched") - } - for _, sq := range squeries { - assert.Equal(t, tcase.sqs[sq.GetArgName()], sqlparser.String(sq.Subquery.Select)) - } }) } } diff --git a/go/vt/vtgate/planbuilder/route.go b/go/vt/vtgate/planbuilder/route.go index ad48c9c4c31..3ad781f5235 100644 --- a/go/vt/vtgate/planbuilder/route.go +++ b/go/vt/vtgate/planbuilder/route.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Vitess Authors. +Copyright 2021 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,15 +17,12 @@ limitations under the License. package planbuilder import ( - "fmt" - "strconv" - - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/evalengine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" ) @@ -37,94 +34,28 @@ var _ logicalPlan = (*route)(nil) // are moved into this node, which will be used to build // the final SQL for this route. type route struct { - v3Plan - order int - - // Redirect may point to another route if this route - // was merged with it. The Resolve function chases - // this pointer till the last un-redirected route. - Redirect *route // Select is the AST for the query fragment that will be // executed by this route. Select sqlparser.SelectStatement - // resultColumns represent the columns returned by this route. - resultColumns []*resultColumn - - // weight_string keeps track of the weight_string expressions - // that were added additionally for each column. These expressions - // are added to be used for collation of text columns. - weightStrings map[*resultColumn]int - - // substitutions contain the list of table expressions that - // have to be substituted in the route's query. - substitutions []*tableSubstitution - // condition stores the AST condition that will be used // to resolve the ERoute Values field. condition sqlparser.Expr // eroute is the primitive being built. eroute *engine.Route -} - -type tableSubstitution struct { - newExpr, oldExpr *sqlparser.AliasedTableExpr -} -func newRoute(stmt sqlparser.SelectStatement) (*route, *symtab) { - rb := &route{ - Select: stmt, - order: 1, - weightStrings: make(map[*resultColumn]int), - } - return rb, newSymtabWithRoute(rb) -} - -// Resolve resolves redirects, and returns the last -// un-redirected route. -func (rb *route) Resolve() *route { - for rb.Redirect != nil { - rb = rb.Redirect - } - return rb -} - -// Order implements the logicalPlan interface -func (rb *route) Order() int { - return rb.order -} + // is the engine primitive we will return from the Primitive() method. Note that it could be different than eroute + enginePrimitive engine.Primitive -// Reorder implements the logicalPlan interface -func (rb *route) Reorder(order int) { - rb.order = order + 1 + // tables keeps track of which tables this route is covering + tables semantics.TableSet } // Primitive implements the logicalPlan interface func (rb *route) Primitive() engine.Primitive { - return rb.eroute -} - -// ResultColumns implements the logicalPlan interface -func (rb *route) ResultColumns() []*resultColumn { - return rb.resultColumns -} - -// PushAnonymous pushes an anonymous expression like '*' or NEXT VALUES -// into the select expression list of the route. This function is -// similar to PushSelect. -func (rb *route) PushAnonymous(expr sqlparser.SelectExpr) *resultColumn { - // TODO: we should not assume that the query is a SELECT - sel := rb.Select.(*sqlparser.Select) - sel.SelectExprs = append(sel.SelectExprs, expr) - - // We just create a place-holder resultColumn. It won't - // match anything. - rc := &resultColumn{column: &column{origin: rb}} - rb.resultColumns = append(rb.resultColumns, rc) - - return rc + return rb.enginePrimitive } // SetLimit adds a LIMIT clause to the route. @@ -133,81 +64,62 @@ func (rb *route) SetLimit(limit *sqlparser.Limit) { } // Wireup implements the logicalPlan interface -func (rb *route) Wireup(plan logicalPlan, jt *jointab) error { - // Precaution: update ERoute.Values only if it's not set already. - if rb.eroute.Values == nil { - // Resolve values stored in the logical plan. - switch vals := rb.condition.(type) { - case *sqlparser.ComparisonExpr: - pv, err := rb.procureValues(plan, jt, vals.Right) - if err != nil { - return err - } - rb.eroute.Values = []evalengine.Expr{pv} - vals.Right = sqlparser.ListArg(engine.ListVarName) - case nil: - // no-op. - default: - pv, err := rb.procureValues(plan, jt, vals) - if err != nil { - return err - } - rb.eroute.Values = []evalengine.Expr{pv} - } +func (rb *route) Wireup(ctx *plancontext.PlanningContext) error { + rb.prepareTheAST() + + // prepare the queries we will pass down + rb.eroute.Query = sqlparser.String(rb.Select) + buffer := sqlparser.NewTrackedBuffer(sqlparser.FormatImpossibleQuery) + node := buffer.WriteNode(rb.Select) + parsedQuery := node.ParsedQuery() + rb.eroute.FieldQuery = parsedQuery.Query + + // if we have a planable vindex lookup, let's extract it into its own primitive + planableVindex, ok := rb.eroute.RoutingParameters.Vindex.(vindexes.LookupPlanable) + if !ok { + rb.enginePrimitive = rb.eroute + return nil } - // Fix up the AST. - _ = sqlparser.Walk(func(node sqlparser.SQLNode) (bool, error) { - switch node := node.(type) { - case *sqlparser.Select: - if len(node.SelectExprs) == 0 { - node.SelectExprs = []sqlparser.SelectExpr{ - &sqlparser.AliasedExpr{ - Expr: sqlparser.NewIntLiteral("1"), - }, - } - } - case *sqlparser.ComparisonExpr: - if node.Operator == sqlparser.EqualOp { - if rb.exprIsValue(node.Left) && !rb.exprIsValue(node.Right) { - node.Left, node.Right = node.Right, node.Left - } - } - } - return true, nil - }, rb.Select) + query, args := planableVindex.Query() + stmt, reserved, err := sqlparser.Parse2(query) + if err != nil { + return err + } + reservedVars := sqlparser.NewReservedVars("vtg", reserved) - // Substitute table names - for _, sub := range rb.substitutions { - *sub.oldExpr = *sub.newExpr + lookupPrimitive, err := gen4SelectStmtPlanner(query, querypb.ExecuteOptions_Gen4, stmt.(sqlparser.SelectStatement), reservedVars, ctx.VSchema) + if err != nil { + return vterrors.Wrapf(err, "failed to plan the lookup query: [%s]", query) } - // Generate query while simultaneously resolving values. - varFormatter := func(buf *sqlparser.TrackedBuffer, node sqlparser.SQLNode) { - switch node := node.(type) { - case *sqlparser.ColName: - if !rb.isLocal(node) { - joinVar := jt.Procure(plan, node, rb.Order()) - buf.WriteArg(":", joinVar) - return - } - case sqlparser.TableName: - if !sqlparser.SystemSchema(node.Qualifier.String()) { - node.Name.Format(buf) - return - } - node.Format(buf) - return - } - node.Format(buf) + rb.enginePrimitive = &engine.VindexLookup{ + Opcode: rb.eroute.Opcode, + Vindex: planableVindex, + Keyspace: rb.eroute.Keyspace, + Values: rb.eroute.Values, + SendTo: rb.eroute, + Arguments: args, + Lookup: lookupPrimitive.primitive, } - buf := sqlparser.NewTrackedBuffer(varFormatter) - varFormatter(buf, rb.Select) - rb.eroute.Query = buf.ParsedQuery().Query - rb.eroute.FieldQuery = rb.generateFieldQuery(rb.Select, jt) + + rb.eroute.RoutingParameters.Opcode = engine.ByDestination + rb.eroute.RoutingParameters.Values = nil + rb.eroute.RoutingParameters.Vindex = nil + return nil } +// ContainsTables implements the logicalPlan interface +func (rb *route) ContainsTables() semantics.TableSet { + return rb.tables +} + +// OutputColumns implements the logicalPlan interface +func (rb *route) OutputColumns() []sqlparser.SelectExpr { + return sqlparser.GetFirstSelect(rb.Select).SelectExprs +} + // prepareTheAST does minor fixups of the SELECT struct before producing the query string func (rb *route) prepareTheAST() { _ = sqlparser.Walk(func(node sqlparser.SQLNode) (bool, error) { @@ -235,130 +147,6 @@ func (rb *route) prepareTheAST() { }, rb.Select) } -// procureValues procures and converts the input into -// the expected types for rb.Values. -func (rb *route) procureValues(plan logicalPlan, jt *jointab, val sqlparser.Expr) (evalengine.Expr, error) { - switch typedVal := val.(type) { - case sqlparser.ValTuple: - exprs := make([]evalengine.Expr, 0, len(typedVal)) - for _, item := range typedVal { - v, err := rb.procureValues(plan, jt, item) - if err != nil { - return nil, err - } - exprs = append(exprs, v) - } - return evalengine.NewTupleExpr(exprs...), nil - case *sqlparser.ColName: - joinVar := jt.Procure(plan, typedVal, rb.Order()) - return evalengine.NewBindVar(joinVar, sqltypes.Unknown, collations.Unknown), nil - default: - return evalengine.Translate(typedVal, nil) - } -} - -func (rb *route) isLocal(col *sqlparser.ColName) bool { - return col.Metadata.(*column).Origin() == rb -} - -// generateFieldQuery generates a query with an impossible where. -// This will be used on the RHS node to fetch field info if the LHS -// returns no result. -func (rb *route) generateFieldQuery(sel sqlparser.SelectStatement, jt *jointab) string { - formatter := func(buf *sqlparser.TrackedBuffer, node sqlparser.SQLNode) { - switch node := node.(type) { - case *sqlparser.ColName: - if !rb.isLocal(node) { - _, joinVar := jt.Lookup(node) - buf.WriteArg(":", joinVar) - return - } - case sqlparser.TableName: - if !sqlparser.SystemSchema(node.Qualifier.String()) { - node.Name.Format(buf) - return - } - node.Format(buf) - return - } - sqlparser.FormatImpossibleQuery(buf, node) - } - - buffer := sqlparser.NewTrackedBuffer(formatter) - node := buffer.WriteNode(sel) - query := node.ParsedQuery() - return query.Query -} - -// SupplyVar implements the logicalPlan interface -func (rb *route) SupplyVar(from, to int, col *sqlparser.ColName, varname string) { - // route is an atomic primitive. So, SupplyVar cannot be - // called on it. - panic("BUG: route is an atomic node.") -} - -// SupplyCol implements the logicalPlan interface -func (rb *route) SupplyCol(col *sqlparser.ColName) (rc *resultColumn, colNumber int) { - c := col.Metadata.(*column) - for i, rc := range rb.resultColumns { - if rc.column == c { - return rc, i - } - } - - // A new result has to be returned. - rc = &resultColumn{column: c} - rb.resultColumns = append(rb.resultColumns, rc) - // TODO: we should not assume that the query is a SELECT query - sel := rb.Select.(*sqlparser.Select) - sel.SelectExprs = append(sel.SelectExprs, &sqlparser.AliasedExpr{Expr: col}) - return rc, len(rb.resultColumns) - 1 -} - -// SupplyWeightString implements the logicalPlan interface -func (rb *route) SupplyWeightString(colNumber int, alsoAddToGroupBy bool) (weightcolNumber int, err error) { - rc := rb.resultColumns[colNumber] - s, ok := rb.Select.(*sqlparser.Select) - if !ok { - return 0, vterrors.VT13001("unexpected AST struct for query") - } - - aliasExpr, ok := s.SelectExprs[colNumber].(*sqlparser.AliasedExpr) - if !ok { - return 0, vterrors.VT13001(fmt.Sprintf("unexpected AST struct for query %T", s.SelectExprs[colNumber])) - } - weightStringExpr := &sqlparser.FuncExpr{ - Name: sqlparser.NewIdentifierCI("weight_string"), - Exprs: []sqlparser.SelectExpr{ - &sqlparser.AliasedExpr{ - Expr: aliasExpr.Expr, - }, - }, - } - expr := &sqlparser.AliasedExpr{ - Expr: weightStringExpr, - } - if alsoAddToGroupBy { - sel, isSelect := rb.Select.(*sqlparser.Select) - if !isSelect { - return 0, vterrors.VT13001(fmt.Sprintf("cannot add weight string in %T", rb.Select)) - } - sel.AddGroupBy(weightStringExpr) - } - - if weightcolNumber, ok := rb.weightStrings[rc]; ok { - return weightcolNumber, nil - } - // It's ok to pass nil for pb and logicalPlan because PushSelect doesn't use them. - // TODO: we are ignoring a potential error here. need to clean this up - _, _, weightcolNumber, err = planProjection(nil, rb, expr, nil) - if err != nil { - return 0, err - } - rb.weightStrings[rc] = weightcolNumber - return weightcolNumber, nil -} - // Rewrite implements the logicalPlan interface func (rb *route) Rewrite(inputs ...logicalPlan) error { if len(inputs) != 0 { @@ -372,487 +160,6 @@ func (rb *route) Inputs() []logicalPlan { return []logicalPlan{} } -// MergeSubquery returns true if the subquery route could successfully be merged -// with the outer route. -func (rb *route) MergeSubquery(pb *primitiveBuilder, inner *route) bool { - if rb.SubqueryCanMerge(pb, inner) { - if inner.eroute.Opcode == engine.DBA && (len(inner.eroute.SysTableTableName) > 0 || len(inner.eroute.SysTableTableSchema) > 0) { - switch rb.eroute.Opcode { - case engine.DBA, engine.Reference: - rb.eroute.SysTableTableSchema = append(rb.eroute.SysTableTableSchema, inner.eroute.SysTableTableSchema...) - for k, v := range inner.eroute.SysTableTableName { - if rb.eroute.SysTableTableName == nil { - rb.eroute.SysTableTableName = map[string]evalengine.Expr{} - } - rb.eroute.SysTableTableName[k] = v - } - rb.eroute.Opcode = engine.DBA - default: - return false - } - } else { - if rb.eroute.Opcode == engine.Reference { - rb.eroute.RoutingParameters = inner.eroute.RoutingParameters - rb.condition = inner.condition - } - } - - rb.substitutions = append(rb.substitutions, inner.substitutions...) - inner.Redirect = rb - return true - } - return false -} - -// MergeUnion returns true if the rhs route could successfully be merged -// with the rb route. -func (rb *route) MergeUnion(right *route, isDistinct bool) bool { - if rb.unionCanMerge(right, isDistinct) { - rb.substitutions = append(rb.substitutions, right.substitutions...) - right.Redirect = rb - return true - } - return false -} - func (rb *route) isSingleShard() bool { return rb.eroute.Opcode.IsSingleShard() } - -// JoinCanMerge, SubqueryCanMerge and unionCanMerge have subtly different behaviors. -// The difference in behavior is around SelectReference. -// It's not worth trying to reuse the code between them. -func (rb *route) JoinCanMerge(pb *primitiveBuilder, rrb *route, ajoin *sqlparser.JoinTableExpr, where sqlparser.Expr) bool { - if rb.eroute.Keyspace.Name != rrb.eroute.Keyspace.Name { - return false - } - if rrb.eroute.Opcode == engine.Reference { - // Any opcode can join with a reference table. - return true - } - switch rb.eroute.Opcode { - case engine.Unsharded: - return rb.eroute.Opcode == rrb.eroute.Opcode - case engine.EqualUnique: - // Check if they target the same shard. - if rrb.eroute.Opcode == engine.EqualUnique && rb.eroute.Vindex == rrb.eroute.Vindex && valEqual(rb.condition, rrb.condition) { - return true - } - case engine.Reference: - return true - case engine.Next: - return false - case engine.DBA: - if rrb.eroute.Opcode != engine.DBA { - return false - } - if where == nil { - return true - } - return ajoin != nil - } - if ajoin == nil { - return false - } - for _, filter := range sqlparser.SplitAndExpression(nil, ajoin.Condition.On) { - if rb.canMergeOnFilter(pb, rrb, filter) { - return true - } - } - return false -} - -func (rb *route) SubqueryCanMerge(pb *primitiveBuilder, inner *route) bool { - if rb.eroute.Keyspace.Name != inner.eroute.Keyspace.Name { - return false - } - - // if either side is a reference table, and we know the other side will only run once, - // we can just merge them and use the opcode of the other side - if rb.eroute.Opcode == engine.Reference || inner.eroute.Opcode == engine.Reference { - return rb.isSingleShard() && inner.isSingleShard() - } - - switch rb.eroute.Opcode { - case engine.Unsharded, engine.DBA: - return rb.eroute.Opcode == inner.eroute.Opcode - case engine.EqualUnique: - // Check if they target the same shard. - if inner.eroute.Opcode == engine.EqualUnique && rb.eroute.Vindex == inner.eroute.Vindex && valEqual(rb.condition, inner.condition) { - return true - } - case engine.Next: - return false - } - - switch vals := inner.condition.(type) { - case *sqlparser.ColName: - if pb.st.Vindex(vals, rb) == inner.eroute.Vindex { - return true - } - } - return false -} - -func (rb *route) unionCanMerge(other *route, distinct bool) bool { - if rb.eroute.Keyspace.Name != other.eroute.Keyspace.Name { - return false - } - switch rb.eroute.Opcode { - case engine.Unsharded, engine.Reference: - return rb.eroute.Opcode == other.eroute.Opcode - case engine.DBA: - return other.eroute.Opcode == engine.DBA && - len(rb.eroute.SysTableTableSchema) == 0 && - len(rb.eroute.SysTableTableName) == 0 && - len(other.eroute.SysTableTableSchema) == 0 && - len(other.eroute.SysTableTableName) == 0 - case engine.EqualUnique: - // Check if they target the same shard. - if other.eroute.Opcode == engine.EqualUnique && rb.eroute.Vindex == other.eroute.Vindex && valEqual(rb.condition, other.condition) { - return true - } - case engine.Scatter: - return other.eroute.Opcode == engine.Scatter && !distinct - case engine.Next: - return false - } - return false -} - -// canMergeOnFilter returns true if the join constraint makes the routes -// mergeable by unique vindex. The constraint has to be an equality -// like a.id = b.id where both columns have the same unique vindex. -func (rb *route) canMergeOnFilter(pb *primitiveBuilder, rrb *route, filter sqlparser.Expr) bool { - comparison, ok := filter.(*sqlparser.ComparisonExpr) - if !ok { - return false - } - if comparison.Operator != sqlparser.EqualOp { - return false - } - left := comparison.Left - right := comparison.Right - lVindex := pb.st.Vindex(left, rb) - if lVindex == nil { - left, right = right, left - lVindex = pb.st.Vindex(left, rb) - } - if lVindex == nil || !lVindex.IsUnique() { - return false - } - rVindex := pb.st.Vindex(right, rrb) - if rVindex == nil { - return false - } - return rVindex == lVindex -} - -// UpdatePlan evaluates the primitive against the specified -// filter. If it's an improvement, the primitive is updated. -// We assume that the filter has already been pushed into -// the route. -func (rb *route) UpdatePlan(pb *primitiveBuilder, filter sqlparser.Expr) { - switch rb.eroute.Opcode { - // For these opcodes, a new filter will not make any difference, so we can just exit early - case engine.Unsharded, engine.Next, engine.DBA, engine.Reference, engine.None: - return - } - opcode, vindex, values := rb.computePlan(pb, filter) - if opcode == engine.Scatter { - return - } - // If we get SelectNone in next filters, override the previous route plan. - if opcode == engine.None { - rb.updateRoute(opcode, vindex, values) - return - } - switch rb.eroute.Opcode { - case engine.EqualUnique: - if opcode == engine.EqualUnique && vindex.Cost() < rb.eroute.Vindex.Cost() { - rb.updateRoute(opcode, vindex, values) - } - case engine.Equal: - switch opcode { - case engine.EqualUnique: - rb.updateRoute(opcode, vindex, values) - case engine.Equal: - if vindex.Cost() < rb.eroute.Vindex.Cost() { - rb.updateRoute(opcode, vindex, values) - } - } - case engine.IN: - switch opcode { - case engine.EqualUnique, engine.Equal: - rb.updateRoute(opcode, vindex, values) - case engine.IN: - if vindex.Cost() < rb.eroute.Vindex.Cost() { - rb.updateRoute(opcode, vindex, values) - } - } - case engine.MultiEqual: - switch opcode { - case engine.EqualUnique, engine.Equal, engine.IN: - rb.updateRoute(opcode, vindex, values) - case engine.MultiEqual: - if vindex.Cost() < rb.eroute.Vindex.Cost() { - rb.updateRoute(opcode, vindex, values) - } - } - case engine.Scatter: - switch opcode { - case engine.EqualUnique, engine.Equal, engine.IN, engine.MultiEqual, engine.None: - rb.updateRoute(opcode, vindex, values) - } - } -} - -func (rb *route) updateRoute(opcode engine.Opcode, vindex vindexes.SingleColumn, condition sqlparser.Expr) { - rb.eroute.Opcode = opcode - rb.eroute.Vindex = vindex - rb.condition = condition -} - -// computePlan computes the plan for the specified filter. -func (rb *route) computePlan(pb *primitiveBuilder, filter sqlparser.Expr) (opcode engine.Opcode, vindex vindexes.SingleColumn, condition sqlparser.Expr) { - switch node := filter.(type) { - case *sqlparser.ComparisonExpr: - switch node.Operator { - case sqlparser.EqualOp: - return rb.computeEqualPlan(pb, node) - case sqlparser.InOp: - return rb.computeINPlan(pb, node) - case sqlparser.NotInOp: - return rb.computeNotInPlan(node.Right), nil, nil - case sqlparser.LikeOp: - return rb.computeLikePlan(pb, node) - } - case *sqlparser.IsExpr: - return rb.computeISPlan(pb, node) - } - return engine.Scatter, nil, nil -} - -// computeLikePlan computes the plan for 'LIKE' constraint -func (rb *route) computeLikePlan(pb *primitiveBuilder, comparison *sqlparser.ComparisonExpr) (opcode engine.Opcode, vindex vindexes.SingleColumn, condition sqlparser.Expr) { - - left := comparison.Left - right := comparison.Right - - if sqlparser.IsNull(right) { - return engine.None, nil, nil - } - if !rb.exprIsValue(right) { - return engine.Scatter, nil, nil - } - vindex = pb.st.Vindex(left, rb) - if vindex == nil { - // if there is no vindex defined, scatter - return engine.Scatter, nil, nil - } - if subsharding, ok := vindex.(vindexes.Prefixable); ok { - return engine.Equal, subsharding.PrefixVindex(), right - } - - return engine.Scatter, nil, nil -} - -// computeEqualPlan computes the plan for an equality constraint. -func (rb *route) computeEqualPlan(pb *primitiveBuilder, comparison *sqlparser.ComparisonExpr) (opcode engine.Opcode, vindex vindexes.SingleColumn, condition sqlparser.Expr) { - left := comparison.Left - right := comparison.Right - - if sqlparser.IsNull(right) { - return engine.None, nil, nil - } - - vindex = pb.st.Vindex(left, rb) - if vindex == nil { - left, right = right, left - vindex = pb.st.Vindex(left, rb) - if vindex == nil { - return engine.Scatter, nil, nil - } - } - if !rb.exprIsValue(right) { - return engine.Scatter, nil, nil - } - if vindex.IsUnique() { - return engine.EqualUnique, vindex, right - } - return engine.Equal, vindex, right -} - -// computeIS computes the plan for an equality constraint. -func (rb *route) computeISPlan(pb *primitiveBuilder, comparison *sqlparser.IsExpr) (opcode engine.Opcode, vindex vindexes.SingleColumn, expr sqlparser.Expr) { - // we only handle IS NULL correct. IsExpr can contain other expressions as well - if comparison.Right != sqlparser.IsNullOp { - return engine.Scatter, nil, nil - } - - vindex = pb.st.Vindex(comparison.Left, rb) - // fallback to scatter gather if there is no vindex - if vindex == nil { - return engine.Scatter, nil, nil - } - if _, isLookup := vindex.(vindexes.Lookup); isLookup { - // the lookup table is keyed by the lookup value, so it does not support nulls - return engine.Scatter, nil, nil - } - if vindex.IsUnique() { - return engine.EqualUnique, vindex, &sqlparser.NullVal{} - } - return engine.Equal, vindex, &sqlparser.NullVal{} -} - -// computeINPlan computes the plan for an IN constraint. -func (rb *route) computeINPlan(pb *primitiveBuilder, comparison *sqlparser.ComparisonExpr) (opcode engine.Opcode, vindex vindexes.SingleColumn, expr sqlparser.Expr) { - switch comparison.Left.(type) { - case *sqlparser.ColName: - return rb.computeSimpleINPlan(pb, comparison) - case sqlparser.ValTuple: - return rb.computeCompositeINPlan(pb, comparison) - } - return engine.Scatter, nil, nil -} - -// computeSimpleINPlan computes the plan for a simple IN constraint. -func (rb *route) computeSimpleINPlan(pb *primitiveBuilder, comparison *sqlparser.ComparisonExpr) (opcode engine.Opcode, vindex vindexes.SingleColumn, expr sqlparser.Expr) { - vindex = pb.st.Vindex(comparison.Left, rb) - if vindex == nil { - return engine.Scatter, nil, nil - } - switch node := comparison.Right.(type) { - case sqlparser.ValTuple: - if len(node) == 1 && sqlparser.IsNull(node[0]) { - return engine.None, nil, nil - } - - for _, n := range node { - if !rb.exprIsValue(n) { - return engine.Scatter, nil, nil - } - } - return engine.IN, vindex, comparison - case sqlparser.ListArg: - return engine.IN, vindex, comparison - } - return engine.Scatter, nil, nil -} - -// computeCompositeINPlan computes the plan for a composite IN constraint. -func (rb *route) computeCompositeINPlan(pb *primitiveBuilder, comparison *sqlparser.ComparisonExpr) (opcode engine.Opcode, vindex vindexes.SingleColumn, values sqlparser.Expr) { - leftTuple := comparison.Left.(sqlparser.ValTuple) - return rb.iterateCompositeIN(pb, comparison, nil, leftTuple) -} - -// iterateCompositeIN recursively walks the LHS tuple of the IN clause looking -// for column names. For those that match a vindex, it builds a multi-value plan -// using the corresponding values in the RHS. It returns the best of the plans built. -func (rb *route) iterateCompositeIN( - pb *primitiveBuilder, - comparison *sqlparser.ComparisonExpr, - coordinates []int, - tuple sqlparser.ValTuple, -) (opcode engine.Opcode, vindex vindexes.SingleColumn, values sqlparser.Expr) { - opcode = engine.Scatter - - cindex := len(coordinates) - coordinates = append(coordinates, 0) - for idx, expr := range tuple { - coordinates[cindex] = idx - switch expr := expr.(type) { - case sqlparser.ValTuple: - newOpcode, newVindex, newValues := rb.iterateCompositeIN(pb, comparison, coordinates, expr) - opcode, vindex, values = bestOfComposite(opcode, newOpcode, vindex, newVindex, values, newValues) - case *sqlparser.ColName: - newVindex := pb.st.Vindex(expr, rb) - if newVindex != nil { - newOpcode, newValues := rb.compositePlanForCol(pb, comparison, coordinates) - opcode, vindex, values = bestOfComposite(opcode, newOpcode, vindex, newVindex, values, newValues) - } - } - } - return opcode, vindex, values -} - -// compositePlanForCol builds a plan for a matched column in the LHS -// of a composite IN clause. -func (rb *route) compositePlanForCol(pb *primitiveBuilder, comparison *sqlparser.ComparisonExpr, coordinates []int) (opcode engine.Opcode, values sqlparser.Expr) { - rightTuple, ok := comparison.Right.(sqlparser.ValTuple) - if !ok { - return engine.Scatter, nil - } - retVal := make(sqlparser.ValTuple, len(rightTuple)) - for i, rval := range rightTuple { - val := tupleAccess(rval, coordinates) - if val == nil { - return engine.Scatter, nil - } - if !rb.exprIsValue(val) { - return engine.Scatter, nil - } - retVal[i] = val - } - return engine.MultiEqual, retVal -} - -// tupleAccess returns the value of the expression that corresponds -// to the specified coordinates. -func tupleAccess(expr sqlparser.Expr, coordinates []int) sqlparser.Expr { - tuple, _ := expr.(sqlparser.ValTuple) - for _, idx := range coordinates { - if idx >= len(tuple) { - return nil - } - expr = tuple[idx] - tuple, _ = expr.(sqlparser.ValTuple) - } - return expr -} - -// bestOfComposite returns the best of two composite IN clause plans. -func bestOfComposite(opcode1, opcode2 engine.Opcode, vindex1, vindex2 vindexes.SingleColumn, values1, values2 sqlparser.Expr) (opcode engine.Opcode, vindex vindexes.SingleColumn, values sqlparser.Expr) { - if opcode1 == engine.Scatter { - return opcode2, vindex2, values2 - } - if opcode2 == engine.Scatter { - return opcode1, vindex1, values1 - } - if vindex1.Cost() < vindex2.Cost() { - return opcode1, vindex1, values1 - } - return opcode2, vindex2, values2 -} - -// computeNotInPlan looks for null values to produce a SelectNone if found -func (rb *route) computeNotInPlan(right sqlparser.Expr) engine.Opcode { - switch node := right.(type) { - case sqlparser.ValTuple: - for _, n := range node { - if sqlparser.IsNull(n) { - return engine.None - } - } - } - - return engine.Scatter -} - -// exprIsValue returns true if the expression can be treated as a value -// for the routeOption. External references are treated as value. -func (rb *route) exprIsValue(expr sqlparser.Expr) bool { - if node, ok := expr.(*sqlparser.ColName); ok { - return node.Metadata.(*column).Origin() != rb - } - return sqlparser.IsValue(expr) -} - -// queryTimeout returns DirectiveQueryTimeout value if set, otherwise returns 0. -func queryTimeout(d *sqlparser.CommentDirectives) int { - val, _ := d.GetString(sqlparser.DirectiveQueryTimeout, "0") - if intVal, err := strconv.Atoi(val); err == nil { - return intVal - } - return 0 -} diff --git a/go/vt/vtgate/planbuilder/routeGen4.go b/go/vt/vtgate/planbuilder/routeGen4.go deleted file mode 100644 index a5b6982319e..00000000000 --- a/go/vt/vtgate/planbuilder/routeGen4.go +++ /dev/null @@ -1,255 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - querypb "vitess.io/vitess/go/vt/proto/query" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/vt/vtgate/semantics" - - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/vindexes" -) - -var _ logicalPlan = (*routeGen4)(nil) - -// routeGen4 is used to build a Route primitive. -// It's used to build one of the Select routes like -// SelectScatter, etc. Portions of the original Select AST -// are moved into this node, which will be used to build -// the final SQL for this route. -type routeGen4 struct { - gen4Plan - - // Select is the AST for the query fragment that will be - // executed by this route. - Select sqlparser.SelectStatement - - // condition stores the AST condition that will be used - // to resolve the ERoute Values field. - condition sqlparser.Expr - - // eroute is the primitive being built. - eroute *engine.Route - - // is the engine primitive we will return from the Primitive() method. Note that it could be different than eroute - enginePrimitive engine.Primitive - - // tables keeps track of which tables this route is covering - tables semantics.TableSet -} - -// Primitive implements the logicalPlan interface -func (rb *routeGen4) Primitive() engine.Primitive { - return rb.enginePrimitive -} - -// SetLimit adds a LIMIT clause to the route. -func (rb *routeGen4) SetLimit(limit *sqlparser.Limit) { - rb.Select.SetLimit(limit) -} - -// WireupGen4 implements the logicalPlan interface -func (rb *routeGen4) WireupGen4(ctx *plancontext.PlanningContext) error { - rb.prepareTheAST() - - // prepare the queries we will pass down - rb.eroute.Query = sqlparser.String(rb.Select) - buffer := sqlparser.NewTrackedBuffer(sqlparser.FormatImpossibleQuery) - node := buffer.WriteNode(rb.Select) - parsedQuery := node.ParsedQuery() - rb.eroute.FieldQuery = parsedQuery.Query - - // if we have a planable vindex lookup, let's extract it into its own primitive - planableVindex, ok := rb.eroute.RoutingParameters.Vindex.(vindexes.LookupPlanable) - if !ok { - rb.enginePrimitive = rb.eroute - return nil - } - - query, args := planableVindex.Query() - stmt, reserved, err := sqlparser.Parse2(query) - if err != nil { - return err - } - reservedVars := sqlparser.NewReservedVars("vtg", reserved) - - lookupPrimitive, err := gen4SelectStmtPlanner(query, querypb.ExecuteOptions_Gen4, stmt.(sqlparser.SelectStatement), reservedVars, ctx.VSchema) - if err != nil { - return vterrors.Wrapf(err, "failed to plan the lookup query: [%s]", query) - } - - rb.enginePrimitive = &engine.VindexLookup{ - Opcode: rb.eroute.Opcode, - Vindex: planableVindex, - Keyspace: rb.eroute.Keyspace, - Values: rb.eroute.Values, - SendTo: rb.eroute, - Arguments: args, - Lookup: lookupPrimitive.primitive, - } - - rb.eroute.RoutingParameters.Opcode = engine.ByDestination - rb.eroute.RoutingParameters.Values = nil - rb.eroute.RoutingParameters.Vindex = nil - - return nil -} - -// ContainsTables implements the logicalPlan interface -func (rb *routeGen4) ContainsTables() semantics.TableSet { - return rb.tables -} - -// OutputColumns implements the logicalPlan interface -func (rb *routeGen4) OutputColumns() []sqlparser.SelectExpr { - return sqlparser.GetFirstSelect(rb.Select).SelectExprs -} - -// prepareTheAST does minor fixups of the SELECT struct before producing the query string -func (rb *routeGen4) prepareTheAST() { - _ = sqlparser.Walk(func(node sqlparser.SQLNode) (bool, error) { - switch node := node.(type) { - case *sqlparser.Select: - if len(node.SelectExprs) == 0 { - node.SelectExprs = []sqlparser.SelectExpr{ - &sqlparser.AliasedExpr{ - Expr: sqlparser.NewIntLiteral("1"), - }, - } - } - case *sqlparser.ComparisonExpr: - // 42 = colName -> colName = 42 - b := node.Operator == sqlparser.EqualOp - value := sqlparser.IsValue(node.Left) - name := sqlparser.IsColName(node.Right) - if b && - value && - name { - node.Left, node.Right = node.Right, node.Left - } - } - return true, nil - }, rb.Select) -} - -func (rb *routeGen4) isLocal(col *sqlparser.ColName) bool { - return col.Metadata.(*column).Origin() == rb -} - -// generateFieldQuery generates a query with an impossible where. -// This will be used on the RHS node to fetch field info if the LHS -// returns no result. -func (rb *routeGen4) generateFieldQuery(sel sqlparser.SelectStatement, jt *jointab) string { - formatter := func(buf *sqlparser.TrackedBuffer, node sqlparser.SQLNode) { - switch node := node.(type) { - case *sqlparser.ColName: - if !rb.isLocal(node) { - _, joinVar := jt.Lookup(node) - buf.WriteArg(":", joinVar) - return - } - case sqlparser.TableName: - if !sqlparser.SystemSchema(node.Qualifier.String()) { - node.Name.Format(buf) - return - } - node.Format(buf) - return - } - sqlparser.FormatImpossibleQuery(buf, node) - } - - buffer := sqlparser.NewTrackedBuffer(formatter) - node := buffer.WriteNode(sel) - query := node.ParsedQuery() - return query.Query -} - -// Rewrite implements the logicalPlan interface -func (rb *routeGen4) Rewrite(inputs ...logicalPlan) error { - if len(inputs) != 0 { - return vterrors.VT13001("route: wrong number of inputs") - } - return nil -} - -// Inputs implements the logicalPlan interface -func (rb *routeGen4) Inputs() []logicalPlan { - return []logicalPlan{} -} - -func (rb *routeGen4) isSingleShard() bool { - return rb.eroute.Opcode.IsSingleShard() -} - -func (rb *routeGen4) unionCanMerge(other *routeGen4, distinct bool) bool { - if rb.eroute.Keyspace.Name != other.eroute.Keyspace.Name { - return false - } - switch rb.eroute.Opcode { - case engine.Unsharded, engine.Reference: - return rb.eroute.Opcode == other.eroute.Opcode - case engine.DBA: - return other.eroute.Opcode == engine.DBA && - len(rb.eroute.SysTableTableSchema) == 0 && - len(rb.eroute.SysTableTableName) == 0 && - len(other.eroute.SysTableTableSchema) == 0 && - len(other.eroute.SysTableTableName) == 0 - case engine.EqualUnique: - // Check if they target the same shard. - if other.eroute.Opcode == engine.EqualUnique && rb.eroute.Vindex == other.eroute.Vindex && valEqual(rb.condition, other.condition) { - return true - } - case engine.Scatter: - return other.eroute.Opcode == engine.Scatter && !distinct - case engine.Next: - return false - } - return false -} - -func (rb *routeGen4) updateRoute(opcode engine.Opcode, vindex vindexes.SingleColumn, condition sqlparser.Expr) { - rb.eroute.Opcode = opcode - rb.eroute.Vindex = vindex - rb.condition = condition -} - -// computeNotInPlan looks for null values to produce a SelectNone if found -func (rb *routeGen4) computeNotInPlan(right sqlparser.Expr) engine.Opcode { - switch node := right.(type) { - case sqlparser.ValTuple: - for _, n := range node { - if sqlparser.IsNull(n) { - return engine.None - } - } - } - - return engine.Scatter -} - -// exprIsValue returns true if the expression can be treated as a value -// for the routeOption. External references are treated as value. -func (rb *routeGen4) exprIsValue(expr sqlparser.Expr) bool { - if node, ok := expr.(*sqlparser.ColName); ok { - return node.Metadata.(*column).Origin() != rb - } - return sqlparser.IsValue(expr) -} diff --git a/go/vt/vtgate/planbuilder/route_test.go b/go/vt/vtgate/planbuilder/route_test.go deleted file mode 100644 index 9f4c8fa3b97..00000000000 --- a/go/vt/vtgate/planbuilder/route_test.go +++ /dev/null @@ -1,168 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/vindexes" -) - -/* - -This test file only tests the V3 planner. It does not test the Subshard opcode - -For easy reference, opcodes are: - Unsharded 0 - EqualUnique 1 - Equal 2 - IN 3 - MultiEqual 4 - Scatter 5 - Next 6 - DBA 7 - Reference 8 - None 9 -*/ - -func TestJoinCanMerge(t *testing.T) { - testcases := [][]bool{ - {true, false, false, false, false /*not tested*/, false, false, false, false, true, false, false}, - {false, true, false, false, false /*not tested*/, false, false, false, false, true, false, false}, - {false, false, false, false, false /*not tested*/, false, false, false, false, true, false, false}, - {false, false, false, false, false /*not tested*/, false, false, false, false, true, false, false}, - {false, false, false, false, false /*not tested*/, false, false, false, false, true, false, false}, - - {false, false, false, false, false, false, false, false, false, false, false, false}, // this whole line is not tested - - {false, false, false, false, false /*not tested*/, false, false, false, false, true, false, false}, - {false, false, false, false, false /*not tested*/, false, false, false, false, true, false, false}, - {false, false, false, false, false /*not tested*/, false, false, false, true, true, false, false}, - {true, true, true, true, true /*not tested*/, false, true, true, true, true, true, true}, - {false, false, false, false, false /*not tested*/, false, false, false, false, true, false, false}, - {false, false, false, false, false /*not tested*/, false, false, false, false, true, false, false}, - } - - ks := &vindexes.Keyspace{} - for left, vals := range testcases { - for right, val := range vals { - name := fmt.Sprintf("%s:%s", engine.Opcode(left).String(), engine.Opcode(right).String()) - if left == int(engine.SubShard) || right == int(engine.SubShard) { - continue // not used by v3 - } - - t.Run(name, func(t *testing.T) { - lRoute := &route{ - // Setting condition will make SelectEqualUnique match itself. - condition: &sqlparser.ColName{}, - } - pb := &primitiveBuilder{ - plan: lRoute, - } - rRoute := &route{ - condition: &sqlparser.ColName{}, - } - lRoute.eroute = engine.NewSimpleRoute(engine.Opcode(left), ks) - rRoute.eroute = engine.NewSimpleRoute(engine.Opcode(right), ks) - assert.Equal(t, val, lRoute.JoinCanMerge(pb, rRoute, nil, nil), fmt.Sprintf("%v:%v", lRoute.eroute.RouteType(), rRoute.eroute.RouteType())) - }) - } - } -} - -func TestSubqueryCanMerge(t *testing.T) { - testcases := [][]bool{ - // US EU E IN ME subShard scatter nxt dba ref none byD - {true, false, false, false, false /*not tested*/, false, false, false, false, true, false, false}, // unsharded - {false, false, false, false, false /*not tested*/, false, false, false, false, true, false, false}, // equalUnique - {false, false, false, false, false /*not tested*/, false, false, false, false, false, false, false}, // equal - {false, false, false, false, false /*not tested*/, false, false, false, false, false, false, false}, // in - {false, false, false, false, false /*not tested*/, false, false, false, false, false, false, false}, // multiEqual - - {false, false, false, false, false, false, false, false, false, false, false, false, false}, // subshard - this whole line is not tested - - {false, false, false, false, false /*not tested*/, false, false, false, false, false, false, false}, // scatter - {false, false, false, false, false /*not tested*/, false, false, false, false, true, false, false}, // next - {false, false, false, false, false /*not tested*/, false, false, false, true, true, false, false}, // dba - {true, true, false, false, false /*not tested*/, false, false, true, true, true, false, false}, // reference - {false, false, false, false, false /*not tested*/, false, false, false, false, false, false, false}, // none - {false, false, false, false, false /*not tested*/, false, false, false, false, false, false, false}, // byDestination - } - - ks := &vindexes.Keyspace{} - lRoute := &route{} - pb := &primitiveBuilder{ - plan: lRoute, - } - rRoute := &route{} - for left, vals := range testcases { - lRoute.eroute = engine.NewSimpleRoute(engine.Opcode(left), ks) - for right, val := range vals { - name := fmt.Sprintf("%s:%s", engine.Opcode(left).String(), engine.Opcode(right).String()) - t.Run(name, func(t *testing.T) { - if left == int(engine.SubShard) || right == int(engine.SubShard) { - t.Skip("not used by v3") - } - - rRoute.eroute = engine.NewSimpleRoute(engine.Opcode(right), ks) - assert.Equal(t, val, lRoute.SubqueryCanMerge(pb, rRoute), fmt.Sprintf("%v:%v", lRoute.eroute.RouteType(), rRoute.eroute.RouteType())) - }) - } - } -} - -func TestUnionCanMerge(t *testing.T) { - testcases := [][]bool{ - {true, false, false, false, false /*not tested*/, false, false, false, false, false, false, false}, - {false, false, false, false, false /*not tested*/, false, false, false, false, false, false, false}, - {false, false, false, false, false /*not tested*/, false, false, false, false, false, false, false}, - {false, false, false, false, false /*not tested*/, false, false, false, false, false, false, false}, - {false, false, false, false, false /*not tested*/, false, false, false, false, false, false, false}, - - {false, false, false, false, false, false, false, false, false, false, false, false, false}, // this whole line is not tested - - {false, false, false, false, false /*not tested*/, false, true, false, false, false, false, false}, - {false, false, false, false, false /*not tested*/, false, false, false, false, false, false, false}, - {false, false, false, false, false /*not tested*/, false, false, false, true, false, false, false}, - {false, false, false, false, false /*not tested*/, false, false, false, false, true, false, false}, - {false, false, false, false, false /*not tested*/, false, false, false, false, false, false, false}, - {false, false, false, false, false /*not tested*/, false, false, false, false, false, false, false}, - } - - ks := &vindexes.Keyspace{} - lRoute := &route{} - rRoute := &route{} - for left, vals := range testcases { - lRoute.eroute = engine.NewSimpleRoute(engine.Opcode(left), ks) - for right, val := range vals { - name := fmt.Sprintf("%s:%s", engine.Opcode(left).String(), engine.Opcode(right).String()) - t.Run(name, func(t *testing.T) { - if left == int(engine.SubShard) || right == int(engine.SubShard) { - t.Skip("not used by v3") - } - - rRoute.eroute = engine.NewSimpleRoute(engine.Opcode(right), ks) - assert.Equal(t, val, lRoute.unionCanMerge(rRoute, false), fmt.Sprintf("can't create a single route from these two inputs %v:%v", lRoute.eroute.RouteType(), rRoute.eroute.RouteType())) - }) - } - } -} diff --git a/go/vt/vtgate/planbuilder/select.go b/go/vt/vtgate/planbuilder/select.go index f6de8264d85..a6ea8a7c8b1 100644 --- a/go/vt/vtgate/planbuilder/select.go +++ b/go/vt/vtgate/planbuilder/select.go @@ -19,280 +19,116 @@ package planbuilder import ( "fmt" - "vitess.io/vitess/go/vt/log" - - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/vt/key" - - "vitess.io/vitess/go/vt/vterrors" - - "vitess.io/vitess/go/vt/vtgate/evalengine" - + querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/evalengine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" ) -func buildSelectPlan(query string) stmtPlanner { - return func(stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema) (*planResult, error) { - sel := stmt.(*sqlparser.Select) - if sel.With != nil { +func gen4SelectStmtPlanner( + query string, + plannerVersion querypb.ExecuteOptions_PlannerVersion, + stmt sqlparser.SelectStatement, + reservedVars *sqlparser.ReservedVars, + vschema plancontext.VSchema, +) (*planResult, error) { + switch node := stmt.(type) { + case *sqlparser.Select: + if node.With != nil { return nil, vterrors.VT12001("WITH expression in SELECT statement") } - err := checkUnsupportedExpressions(sel) - if err != nil { - return nil, err + case *sqlparser.Union: + if node.With != nil { + return nil, vterrors.VT12001("WITH expression in UNION statement") } + } + sel, isSel := stmt.(*sqlparser.Select) + if isSel { + // handle dual table for processing at vtgate. p, err := handleDualSelects(sel, vschema) if err != nil { return nil, err } if p != nil { - return newPlanResult(p), nil - } - - getPlan := func(sel *sqlparser.Select) (logicalPlan, error) { - pb := newPrimitiveBuilder(vschema, newJointab(reservedVars)) - if err := pb.processSelect(sel, reservedVars, nil, query); err != nil { - return nil, err - } - if err := pb.plan.Wireup(pb.plan, pb.jt); err != nil { - return nil, err + used := "dual" + keyspace, ksErr := vschema.DefaultKeyspace() + if ksErr == nil { + // we are just getting the ks to log the correct table use. + // no need to fail this if we can't find the default keyspace + used = keyspace.Name + ".dual" } - return pb.plan, nil + return newPlanResult(p, used), nil } - plan, err := getPlan(sel) - if err != nil { - return nil, err - } - - if shouldRetryAfterPredicateRewriting(plan) { - // by transforming the predicates to CNF, the planner will sometimes find better plans - primitive := rewriteToCNFAndReplan(stmt, getPlan) - if primitive != nil { - return newPlanResult(primitive), nil - } - } - primitive := plan.Primitive() - if rb, ok := primitive.(*engine.Route); ok { - // this is done because engine.Route doesn't handle the empty result well - // if it doesn't find a shard to send the query to. - // All other engine primitives can handle this, so we only need it when - // Route is the last (and only) instruction before the user sees a result - if isOnlyDual(sel) || (len(sel.GroupBy) == 0 && sel.SelectExprs.AllAggregation()) { - rb.NoRoutesSpecialHandling = true - } - } - - return newPlanResult(primitive), nil - } -} - -// checkUnsupportedExpressions checks for unsupported expressions. -func checkUnsupportedExpressions(sel sqlparser.SQLNode) error { - var unsupportedErr error - sqlparser.Rewrite(sel, func(cursor *sqlparser.Cursor) bool { - switch cursor.Node().(type) { - case *sqlparser.AssignmentExpr: - unsupportedErr = vterrors.VT12001("Assignment expression") - return false - default: - return true - } - }, nil) - if unsupportedErr != nil { - return unsupportedErr - } - return nil -} - -func rewriteToCNFAndReplan(stmt sqlparser.Statement, getPlan func(sel *sqlparser.Select) (logicalPlan, error)) engine.Primitive { - rewritten := sqlparser.RewritePredicate(stmt) - sel2, isSelect := rewritten.(*sqlparser.Select) - if isSelect { - log.Infof("retrying plan after cnf: %s", sqlparser.String(sel2)) - plan2, err := getPlan(sel2) - if err == nil && !shouldRetryAfterPredicateRewriting(plan2) { - // we only use this new plan if it's better than the old one we got - return plan2.Primitive() - } - } - return nil -} - -func shouldRetryAfterPredicateRewriting(plan logicalPlan) bool { - // if we have a I_S query, but have not found table_schema or table_name, let's try CNF - var opcode engine.Opcode - var sysTableTableName map[string]evalengine.Expr - var sysTableTableSchema []evalengine.Expr - - switch routePlan := plan.(type) { - case *routeGen4: - opcode = routePlan.eroute.Opcode - sysTableTableName = routePlan.eroute.SysTableTableName - sysTableTableSchema = routePlan.eroute.SysTableTableSchema - case *route: - opcode = routePlan.eroute.Opcode - sysTableTableName = routePlan.eroute.SysTableTableName - sysTableTableSchema = routePlan.eroute.SysTableTableSchema - default: - return false - } - - return opcode == engine.DBA && - len(sysTableTableName) == 0 && - len(sysTableTableSchema) == 0 -} - -// processSelect builds a primitive tree for the given query or subquery. -// The tree built by this function has the following general structure: -// -// The leaf nodes can be a route, vindexFunc or subquery. In the symtab, -// the tables map has columns that point to these leaf nodes. A subquery -// itself contains a logicalPlan tree, but it's opaque and is made to look -// like a table for the analysis of the current tree. -// -// The leaf nodes are usually tied together by join nodes. While the join -// nodes are built, they have ON clauses. Those are analyzed and pushed -// down into the leaf nodes as the tree is formed. Join nodes are formed -// during analysis of the FROM clause. -// -// During the WHERE clause analysis, the target leaf node is identified -// for each part, and the PushFilter function is used to push the condition -// down. The same strategy is used for the other clauses. -// -// So, a typical plan would either be a simple leaf node, or may consist -// of leaf nodes tied together by join nodes. -// -// If a query has aggregates that cannot be pushed down, an aggregator -// primitive is built. The current orderedAggregate primitive can only -// be built on top of a route. The orderedAggregate expects the rows -// to be ordered as they are returned. This work is performed by the -// underlying route. This means that a compatible ORDER BY clause -// can also be handled by this combination of primitives. In this case, -// the tree would consist of an orderedAggregate whose input is a route. -// -// If a query has an ORDER BY, but the route is a scatter, then the -// ordering is pushed down into the route itself. This results in a simple -// route primitive. -// -// The LIMIT clause is the last construct of a query. If it cannot be -// pushed into a route, then a primitive is created on top of any -// of the above trees to make it discard unwanted rows. -func (pb *primitiveBuilder) processSelect(sel *sqlparser.Select, reservedVars *sqlparser.ReservedVars, outer *symtab, query string) error { - // Check and error if there is any locking function present in select expression. - for _, expr := range sel.SelectExprs { - if aExpr, ok := expr.(*sqlparser.AliasedExpr); ok && sqlparser.IsLockingFunc(aExpr.Expr) { - return vterrors.VT12001(fmt.Sprintf("%v is allowed only with dual", sqlparser.String(aExpr))) - } - } - if sel.SQLCalcFoundRows { - if outer != nil || query == "" { - return vterrors.VT03008("SQL_CALC_FOUND_ROWS") + if sel.SQLCalcFoundRows && sel.Limit != nil { + return gen4planSQLCalcFoundRows(vschema, sel, query, reservedVars) } + // if there was no limit, we can safely ignore the SQLCalcFoundRows directive sel.SQLCalcFoundRows = false - if sel.Limit != nil { - plan, _, err := buildSQLCalcFoundRowsPlan(query, sel, reservedVars, pb.vschema, planSelectV3) - if err != nil { - return err - } - pb.plan = plan - return nil - } } - // Into is not supported in subquery. - if sel.Into != nil && (outer != nil || query == "") { - return vterrors.VT03008("INTO") + getPlan := func(selStatement sqlparser.SelectStatement) (logicalPlan, []string, error) { + return newBuildSelectPlan(selStatement, reservedVars, vschema, plannerVersion) } - var where sqlparser.Expr - if sel.Where != nil { - where = sel.Where.Expr - } - if err := pb.processTableExprs(sel.From, reservedVars, where); err != nil { - return err + plan, tablesUsed, err := getPlan(stmt) + if err != nil { + return nil, err } - if rb, ok := pb.plan.(*route); ok { - // TODO(sougou): this can probably be improved. - directives := sel.Comments.Directives() - rb.eroute.QueryTimeout = queryTimeout(directives) - if rb.eroute.TargetDestination != nil { - return vterrors.VT12001("SELECT with a target destination") - } - if directives.IsSet(sqlparser.DirectiveScatterErrorsAsWarnings) { - rb.eroute.ScatterErrorsAsWarnings = true + if shouldRetryAfterPredicateRewriting(plan) { + // by transforming the predicates to CNF, the planner will sometimes find better plans + // TODO: this should move to the operator side of planning + plan2, tablesUsed := gen4PredicateRewrite(stmt, getPlan) + if plan2 != nil { + return newPlanResult(plan2.Primitive(), tablesUsed...), nil } } - // Set the outer symtab after processing of FROM clause. - // This is because correlation is not allowed there. - pb.st.Outer = outer - if sel.Where != nil { - if err := pb.pushFilter(sel.Where.Expr, sqlparser.WhereStr, reservedVars); err != nil { - return err - } + primitive := plan.Primitive() + if !isSel { + return newPlanResult(primitive, tablesUsed...), nil } - if err := pb.checkAggregates(sel); err != nil { - return err - } - if err := pb.pushSelectExprs(sel, reservedVars); err != nil { - return err - } - if sel.Having != nil { - if err := pb.pushFilter(sel.Having.Expr, sqlparser.HavingStr, reservedVars); err != nil { - return err + + // this is done because engine.Route doesn't handle the empty result well + // if it doesn't find a shard to send the query to. + // All other engine primitives can handle this, so we only need it when + // Route is the last (and only) instruction before the user sees a result + if isOnlyDual(sel) || (len(sel.GroupBy) == 0 && sel.SelectExprs.AllAggregation()) { + switch prim := primitive.(type) { + case *engine.Route: + prim.NoRoutesSpecialHandling = true + case *engine.VindexLookup: + prim.SendTo.NoRoutesSpecialHandling = true } } - if err := pb.pushOrderBy(sel.OrderBy); err != nil { - return err - } - if err := pb.pushLimit(sel.Limit); err != nil { - return err - } - - return setMiscFunc(pb.plan, sel) + return newPlanResult(primitive, tablesUsed...), nil } -func setMiscFunc(in logicalPlan, sel *sqlparser.Select) error { - _, err := visit(in, func(plan logicalPlan) (bool, logicalPlan, error) { - switch node := plan.(type) { - case *route: - err := copyCommentsAndLocks(node.Select, sel, node.eroute.Opcode) - if err != nil { - return false, nil, err - } - return true, node, nil - case *routeGen4: - err := copyCommentsAndLocks(node.Select, sel, node.eroute.Opcode) - if err != nil { - return false, nil, err - } - return true, node, nil - } - return true, plan, nil - }) - +func gen4planSQLCalcFoundRows(vschema plancontext.VSchema, sel *sqlparser.Select, query string, reservedVars *sqlparser.ReservedVars) (*planResult, error) { + ksName := "" + if ks, _ := vschema.DefaultKeyspace(); ks != nil { + ksName = ks.Name + } + semTable, err := semantics.Analyze(sel, ksName, vschema) if err != nil { - return err + return nil, err } - return nil -} + // record any warning as planner warning. + vschema.PlannerWarning(semTable.Warning) -func copyCommentsAndLocks(statement sqlparser.SelectStatement, sel *sqlparser.Select, opcode engine.Opcode) error { - query := sqlparser.GetFirstSelect(statement) - query.Comments = sel.Comments - query.Lock = sel.Lock - if sel.Into != nil { - if opcode != engine.Unsharded { - return vterrors.VT12001("INTO on sharded keyspace") - } - query.Into = sel.Into + plan, tablesUsed, err := buildSQLCalcFoundRowsPlan(query, sel, reservedVars, vschema) + if err != nil { + return nil, err } - return nil + return newPlanResult(plan.Primitive(), tablesUsed...), nil } func buildSQLCalcFoundRowsPlan( @@ -300,9 +136,8 @@ func buildSQLCalcFoundRowsPlan( sel *sqlparser.Select, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, - planSelect func(reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, sel *sqlparser.Select) (*jointab, logicalPlan, []string, error), ) (logicalPlan, []string, error) { - ljt, limitPlan, _, err := planSelect(reservedVars, vschema, sel) + limitPlan, _, err := newBuildSelectPlan(sel, reservedVars, vschema, Gen4) if err != nil { return nil, nil, err } @@ -342,18 +177,113 @@ func buildSQLCalcFoundRowsPlan( reservedVars2 := sqlparser.NewReservedVars("vtg", reserved2) - cjt, countPlan, tablesUsed, err := planSelect(reservedVars2, vschema, sel2) + countPlan, tablesUsed, err := newBuildSelectPlan(sel2, reservedVars2, vschema, Gen4) if err != nil { return nil, nil, err } - return &sqlCalcFoundRows{LimitQuery: limitPlan, CountQuery: countPlan, ljt: ljt, cjt: cjt}, tablesUsed, nil + return &sqlCalcFoundRows{LimitQuery: limitPlan, CountQuery: countPlan}, tablesUsed, nil +} + +func gen4PredicateRewrite(stmt sqlparser.Statement, getPlan func(selStatement sqlparser.SelectStatement) (logicalPlan, []string, error)) (logicalPlan, []string) { + rewritten, isSel := sqlparser.RewritePredicate(stmt).(sqlparser.SelectStatement) + if !isSel { + // Fail-safe code, should never happen + return nil, nil + } + plan2, op, err := getPlan(rewritten) + if err == nil && !shouldRetryAfterPredicateRewriting(plan2) { + // we only use this new plan if it's better than the old one we got + return plan2, op + } + return nil, nil } -func planSelectV3(reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, sel *sqlparser.Select) (*jointab, logicalPlan, []string, error) { - ljt := newJointab(reservedVars) - frpb := newPrimitiveBuilder(vschema, ljt) - err := frpb.processSelect(sel, reservedVars, nil, "") - return ljt, frpb.plan, nil, err +func newBuildSelectPlan( + selStmt sqlparser.SelectStatement, + reservedVars *sqlparser.ReservedVars, + vschema plancontext.VSchema, + version querypb.ExecuteOptions_PlannerVersion, +) (plan logicalPlan, tablesUsed []string, err error) { + ctx, err := plancontext.CreatePlanningContext(selStmt, reservedVars, vschema, version) + if err != nil { + return nil, nil, err + } + + if ks, _ := ctx.SemTable.SingleUnshardedKeyspace(); ks != nil { + plan, tablesUsed, err = selectUnshardedShortcut(ctx, selStmt, ks) + if err != nil { + return nil, nil, err + } + plan = pushCommentDirectivesOnPlan(plan, selStmt) + return plan, tablesUsed, err + } + + // From this point on, we know it is not an unsharded query and return the NotUnshardedErr if there is any + if ctx.SemTable.NotUnshardedErr != nil { + return nil, nil, ctx.SemTable.NotUnshardedErr + } + + op, err := createSelectOperator(ctx, selStmt, reservedVars) + if err != nil { + return nil, nil, err + } + + plan, err = transformToLogicalPlan(ctx, op) + if err != nil { + return nil, nil, err + } + + if err = plan.Wireup(ctx); err != nil { + return nil, nil, err + } + return pushCommentDirectivesOnPlan(plan, selStmt), operators.TablesUsed(op), nil +} + +func createSelectOperator(ctx *plancontext.PlanningContext, selStmt sqlparser.SelectStatement, reservedVars *sqlparser.ReservedVars) (ops.Operator, error) { + err := queryRewrite(ctx.SemTable, reservedVars, selStmt) + if err != nil { + return nil, err + } + + return operators.PlanQuery(ctx, selStmt) +} + +func isOnlyDual(sel *sqlparser.Select) bool { + if sel.Where != nil || sel.GroupBy != nil || sel.Having != nil || sel.Limit != nil || sel.OrderBy != nil { + // we can only deal with queries without any other subclauses - just SELECT and FROM, nothing else is allowed + return false + } + + if len(sel.From) > 1 { + return false + } + table, ok := sel.From[0].(*sqlparser.AliasedTableExpr) + if !ok { + return false + } + tableName, ok := table.Expr.(sqlparser.TableName) + + return ok && tableName.Name.String() == "dual" && tableName.Qualifier.IsEmpty() +} + +func shouldRetryAfterPredicateRewriting(plan logicalPlan) bool { + // if we have a I_S query, but have not found table_schema or table_name, let's try CNF + var opcode engine.Opcode + var sysTableTableName map[string]evalengine.Expr + var sysTableTableSchema []evalengine.Expr + + switch routePlan := plan.(type) { + case *route: + opcode = routePlan.eroute.Opcode + sysTableTableName = routePlan.eroute.SysTableTableName + sysTableTableSchema = routePlan.eroute.SysTableTableSchema + default: + return false + } + + return opcode == engine.DBA && + len(sysTableTableName) == 0 && + len(sysTableTableSchema) == 0 } func handleDualSelects(sel *sqlparser.Select, vschema plancontext.VSchema) (engine.Primitive, error) { @@ -418,236 +348,3 @@ func buildLockingPrimitive(sel *sqlparser.Select, vschema plancontext.VSchema, l LockFunctions: lockFunctions, }, nil } - -func isOnlyDual(sel *sqlparser.Select) bool { - if sel.Where != nil || sel.GroupBy != nil || sel.Having != nil || sel.Limit != nil || sel.OrderBy != nil { - // we can only deal with queries without any other subclauses - just SELECT and FROM, nothing else is allowed - return false - } - - if len(sel.From) > 1 { - return false - } - table, ok := sel.From[0].(*sqlparser.AliasedTableExpr) - if !ok { - return false - } - tableName, ok := table.Expr.(sqlparser.TableName) - - return ok && tableName.Name.String() == "dual" && tableName.Qualifier.IsEmpty() -} - -// pushFilter identifies the target route for the specified bool expr, -// pushes it down, and updates the route info if the new constraint improves -// the primitive. This function can push to a WHERE or HAVING clause. -func (pb *primitiveBuilder) pushFilter(in sqlparser.Expr, whereType string, reservedVars *sqlparser.ReservedVars) error { - filters := sqlparser.SplitAndExpression(nil, in) - reorderBySubquery(filters) - for _, filter := range filters { - pullouts, origin, expr, err := pb.findOrigin(filter, reservedVars) - if err != nil { - return err - } - rut, isRoute := origin.(*route) - if isRoute && rut.eroute.Opcode == engine.DBA { - err := pb.findSysInfoRoutingPredicates(expr, rut, reservedVars) - if err != nil { - return err - } - } - // The returned expression may be complex. Resplit before pushing. - for _, subexpr := range sqlparser.SplitAndExpression(nil, expr) { - pb.plan, err = planFilter(pb, pb.plan, subexpr, whereType, origin) - if err != nil { - return err - } - } - pb.addPullouts(pullouts) - } - return nil -} - -// reorderBySubquery reorders the filters by pushing subqueries -// to the end. This allows the non-subquery filters to be -// pushed first because they can potentially improve the routing -// plan, which can later allow a filter containing a subquery -// to successfully merge with the corresponding route. -func reorderBySubquery(filters []sqlparser.Expr) { - max := len(filters) - for i := 0; i < max; i++ { - if !hasSubquery(filters[i]) { - continue - } - saved := filters[i] - for j := i; j < len(filters)-1; j++ { - filters[j] = filters[j+1] - } - filters[len(filters)-1] = saved - max-- - } -} - -// addPullouts adds the pullout subqueries to the primitiveBuilder. -func (pb *primitiveBuilder) addPullouts(pullouts []*pulloutSubquery) { - for _, pullout := range pullouts { - pullout.setUnderlying(pb.plan) - pb.plan = pullout - pb.plan.Reorder(0) - } -} - -// pushSelectExprs identifies the target route for the -// select expressions and pushes them down. -func (pb *primitiveBuilder) pushSelectExprs(sel *sqlparser.Select, reservedVars *sqlparser.ReservedVars) error { - resultColumns, err := pb.pushSelectRoutes(sel.SelectExprs, reservedVars) - if err != nil { - return err - } - pb.st.SetResultColumns(resultColumns) - return pb.pushGroupBy(sel) -} - -// pushSelectRoutes is a convenience function that pushes all the select -// expressions and returns the list of resultColumns generated for it. -func (pb *primitiveBuilder) pushSelectRoutes(selectExprs sqlparser.SelectExprs, reservedVars *sqlparser.ReservedVars) ([]*resultColumn, error) { - resultColumns := make([]*resultColumn, 0, len(selectExprs)) - for _, node := range selectExprs { - switch node := node.(type) { - case *sqlparser.AliasedExpr: - pullouts, origin, expr, err := pb.findOrigin(node.Expr, reservedVars) - if err != nil { - return nil, err - } - node.Expr = expr - newBuilder, rc, _, err := planProjection(pb, pb.plan, node, origin) - if err != nil { - return nil, err - } - pb.plan = newBuilder - resultColumns = append(resultColumns, rc) - pb.addPullouts(pullouts) - case *sqlparser.StarExpr: - var expanded bool - var err error - resultColumns, expanded, err = pb.expandStar(resultColumns, node) - if err != nil { - return nil, err - } - if expanded { - continue - } - // We'll allow select * for simple routes. - rb, ok := pb.plan.(*route) - if !ok { - return nil, vterrors.VT12001("'*' expression in cross-shard query") - } - // Validate keyspace reference if any. - if !node.TableName.IsEmpty() { - if _, err := pb.st.FindTable(node.TableName); err != nil { - return nil, err - } - } - resultColumns = append(resultColumns, rb.PushAnonymous(node)) - case *sqlparser.Nextval: - rb, ok := pb.plan.(*route) - if !ok { - // This code is unreachable because the parser doesn't allow joins for next val statements. - return nil, vterrors.VT12001("SELECT NEXT query in cross-shard query") - } - if rb.eroute.Opcode != engine.Next { - return nil, vterrors.VT03018() - } - rb.eroute.Opcode = engine.Next - resultColumns = append(resultColumns, rb.PushAnonymous(node)) - default: - return nil, vterrors.VT13001(fmt.Sprintf("unexpected SELECT expression type: %T", node)) - } - } - return resultColumns, nil -} - -// expandStar expands a StarExpr and pushes the expanded -// expressions down if the tables have authoritative column lists. -// If not, it returns false. -// This function breaks the abstraction a bit: it directly sets the -// the Metadata for newly created expressions. In all other cases, -// the Metadata is set through a symtab Find. -func (pb *primitiveBuilder) expandStar(inrcs []*resultColumn, expr *sqlparser.StarExpr) (outrcs []*resultColumn, expanded bool, err error) { - tables := pb.st.AllTables() - if tables == nil { - // no table metadata available. - return inrcs, false, nil - } - if expr.TableName.IsEmpty() { - for _, t := range tables { - // All tables must have authoritative column lists. - if !t.isAuthoritative { - return inrcs, false, nil - } - } - singleTable := false - if len(tables) == 1 { - singleTable = true - } - for _, t := range tables { - for _, col := range t.columnNames { - var expr *sqlparser.AliasedExpr - if singleTable { - // If there's only one table, we use unqualified column names. - expr = &sqlparser.AliasedExpr{ - Expr: &sqlparser.ColName{ - Metadata: t.columns[col.Lowered()], - Name: col, - }, - } - } else { - // If a and b have id as their column, then - // select * from a join b should result in - // select a.id as id, b.id as id from a join b. - expr = &sqlparser.AliasedExpr{ - Expr: &sqlparser.ColName{ - Metadata: t.columns[col.Lowered()], - Name: col, - Qualifier: t.alias, - }, - As: col, - } - } - newBuilder, rc, _, err := planProjection(pb, pb.plan, expr, t.Origin()) - if err != nil { - // Unreachable because PushSelect won't fail on ColName. - return inrcs, false, err - } - pb.plan = newBuilder - inrcs = append(inrcs, rc) - } - } - return inrcs, true, nil - } - - // Expression qualified with table name. - t, err := pb.st.FindTable(expr.TableName) - if err != nil { - return inrcs, false, err - } - if !t.isAuthoritative { - return inrcs, false, nil - } - for _, col := range t.columnNames { - expr := &sqlparser.AliasedExpr{ - Expr: &sqlparser.ColName{ - Metadata: t.columns[col.Lowered()], - Name: col, - Qualifier: expr.TableName, - }, - } - newBuilder, rc, _, err := planProjection(pb, pb.plan, expr, t.Origin()) - if err != nil { - // Unreachable because PushSelect won't fail on ColName. - return inrcs, false, err - } - pb.plan = newBuilder - inrcs = append(inrcs, rc) - } - return inrcs, true, nil -} diff --git a/go/vt/vtgate/planbuilder/semi_join.go b/go/vt/vtgate/planbuilder/semi_join.go index 44d99942fe4..2f9f0537f0f 100644 --- a/go/vt/vtgate/planbuilder/semi_join.go +++ b/go/vt/vtgate/planbuilder/semi_join.go @@ -30,7 +30,6 @@ var _ logicalPlan = (*semiJoin)(nil) // This gets built if a rhs is correlated and can // be pulled out but requires some variables to be supplied from outside. type semiJoin struct { - gen4Plan rhs logicalPlan lhs logicalPlan cols []int @@ -62,12 +61,12 @@ func (ps *semiJoin) Primitive() engine.Primitive { } } -// WireupGen4 implements the logicalPlan interface -func (ps *semiJoin) WireupGen4(ctx *plancontext.PlanningContext) error { - if err := ps.lhs.WireupGen4(ctx); err != nil { +// Wireup implements the logicalPlan interface +func (ps *semiJoin) Wireup(ctx *plancontext.PlanningContext) error { + if err := ps.lhs.Wireup(ctx); err != nil { return err } - return ps.rhs.WireupGen4(ctx) + return ps.rhs.Wireup(ctx) } // Rewrite implements the logicalPlan interface diff --git a/go/vt/vtgate/planbuilder/set.go b/go/vt/vtgate/planbuilder/set.go index 8508a791d41..7b1e584132d 100644 --- a/go/vt/vtgate/planbuilder/set.go +++ b/go/vt/vtgate/planbuilder/set.go @@ -261,7 +261,7 @@ func extractValue(expr *sqlparser.SetExpr, boolean bool) (string, error) { } case *sqlparser.ColName: // this is a little of a hack. it's used when the setting is not a normal expression, but rather - // an enumeration, such as utf8, utf8mb4, etc + // an enumeration, such as utf8mb3, utf8mb4, etc switch node.Name.Lowered() { case "on": return "1", nil diff --git a/go/vt/vtgate/planbuilder/show.go b/go/vt/vtgate/planbuilder/show.go index 367fe749b7d..debb631c92c 100644 --- a/go/vt/vtgate/planbuilder/show.go +++ b/go/vt/vtgate/planbuilder/show.go @@ -18,18 +18,17 @@ package planbuilder import ( "fmt" - "regexp" "sort" "strings" + "sync" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" - "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vschemapb "vitess.io/vitess/go/vt/proto/vschema" - "vitess.io/vitess/go/vt/sidecardb" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" @@ -39,15 +38,12 @@ import ( ) const ( - utf8 = "utf8" - utf8mb4 = "utf8mb4" - both = "both" charset = "charset" ) func buildShowPlan(sql string, stmt *sqlparser.Show, _ *sqlparser.ReservedVars, vschema plancontext.VSchema) (*planResult, error) { if vschema.Destination() != nil { - return buildByPassDDLPlan(sql, vschema) + return buildByPassPlan(sql, vschema) } var prim engine.Primitive @@ -99,7 +95,7 @@ func buildShowBasicPlan(show *sqlparser.ShowBasic, vschema plancontext.VSchema) case sqlparser.StatusGlobal, sqlparser.StatusSession: return buildSendAnywherePlan(show, vschema) case sqlparser.VitessMigrations: - return buildShowVMigrationsPlan(show, vschema) + return buildShowVitessMigrationsPlan(show, vschema) case sqlparser.VGtidExecGlobal: return buildShowVGtidPlan(show, vschema) case sqlparser.GtidExecGlobal: @@ -134,16 +130,13 @@ func buildShowTargetPlan(vschema plancontext.VSchema) (engine.Primitive, error) func buildCharsetPlan(show *sqlparser.ShowBasic) (engine.Primitive, error) { fields := buildVarCharFields("Charset", "Description", "Default collation") - maxLenField := &querypb.Field{Name: "Maxlen", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG | querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_UNSIGNED_FLAG | querypb.MySqlFlag_NO_DEFAULT_VALUE_FLAG)} + maxLenField := &querypb.Field{Name: "Maxlen", Type: sqltypes.Uint32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG | querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_UNSIGNED_FLAG | querypb.MySqlFlag_NO_DEFAULT_VALUE_FLAG)} fields = append(fields, maxLenField) - - charsets := []string{utf8, utf8mb4} - rows, err := generateCharsetRows(show.Filter, charsets) + cs, err := generateCharsetRows(show.Filter) if err != nil { return nil, err } - - return engine.NewRowsPrimitive(rows, fields), nil + return engine.NewRowsPrimitive(cs, fields), nil } func buildSendAnywherePlan(show *sqlparser.ShowBasic, vschema plancontext.VSchema) (engine.Primitive, error) { @@ -250,10 +243,9 @@ func buildDBPlan(show *sqlparser.ShowBasic, vschema plancontext.VSchema) (engine */ } -// buildShowVMigrationsPlan serves `SHOW VITESS_MIGRATIONS ...` queries. -// It invokes queries on the sidecar database's schema_migrations table -// on all PRIMARY tablets in the keyspace's shards. -func buildShowVMigrationsPlan(show *sqlparser.ShowBasic, vschema plancontext.VSchema) (engine.Primitive, error) { +// buildShowVitessMigrationsPlan serves `SHOW VITESS_MIGRATIONS ...` queries. +// It sends down the SHOW command to the PRIMARY shard tablets (on all shards) +func buildShowVitessMigrationsPlan(show *sqlparser.ShowBasic, vschema plancontext.VSchema) (engine.Primitive, error) { dest, ks, tabletType, err := vschema.TargetDestination(show.DbName.String()) if err != nil { return nil, err @@ -270,26 +262,11 @@ func buildShowVMigrationsPlan(show *sqlparser.ShowBasic, vschema plancontext.VSc dest = key.DestinationAllShards{} } - sidecarDBID, err := sidecardb.GetIdentifierForKeyspace(ks.Name) - if err != nil { - log.Errorf("Failed to read sidecar database identifier for keyspace %q from the cache: %v", ks.Name, err) - return nil, vterrors.VT14005(ks.Name) - } - - sql := sqlparser.BuildParsedQuery("SELECT * FROM %s.schema_migrations", sidecarDBID).Query - - if show.Filter != nil { - if show.Filter.Filter != nil { - sql += fmt.Sprintf(" where %s", sqlparser.String(show.Filter.Filter)) - } else if show.Filter.Like != "" { - lit := sqlparser.String(sqlparser.NewStrLiteral(show.Filter.Like)) - sql += fmt.Sprintf(" where migration_uuid LIKE %s OR migration_context LIKE %s OR migration_status LIKE %s", lit, lit, lit) - } - } return &engine.Send{ Keyspace: ks, TargetDestination: dest, - Query: sql, + Query: sqlparser.String(show), + IsDML: false, }, nil } @@ -358,20 +335,13 @@ func buildVarCharRow(values ...string) []sqltypes.Value { return row } -func generateCharsetRows(showFilter *sqlparser.ShowFilter, colNames []string) ([][]sqltypes.Value, error) { +func generateCharsetRows(showFilter *sqlparser.ShowFilter) ([][]sqltypes.Value, error) { if showFilter == nil { - return buildCharsetRows(both), nil + return charsets(), nil } - var filteredColName string - var err error - if showFilter.Like != "" { - filteredColName, err = checkLikeOpt(showFilter.Like, colNames) - if err != nil { - return nil, err - } - + return filterLike(showFilter.Like, charsets()) } else { cmpExp, ok := showFilter.Filter.(*sqlparser.ComparisonExpr) if !ok { @@ -393,61 +363,84 @@ func generateCharsetRows(showFilter *sqlparser.ShowFilter, colNames []string) ([ switch cmpExp.Operator { case sqlparser.EqualOp: - for _, colName := range colNames { + for _, row := range charsets() { + colName := row[0].ToString() if rightString == colName { - filteredColName = colName + return [][]sqltypes.Value{row}, nil } } + return nil, nil case sqlparser.LikeOp: - filteredColName, err = checkLikeOpt(rightString, colNames) - if err != nil { - return nil, err - } + return filterLike(rightString, charsets()) } + } else { + return nil, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.BadFieldError, "Unknown column '%s' in 'where clause'", left.Name.String()) } - } - return buildCharsetRows(filteredColName), nil + return charsets(), nil } -func buildCharsetRows(colName string) [][]sqltypes.Value { - row0 := buildVarCharRow( - "utf8", - "UTF-8 Unicode", - "utf8_general_ci") - row0 = append(row0, sqltypes.NewInt32(3)) - row1 := buildVarCharRow( - "utf8mb4", - "UTF-8 Unicode", - "utf8mb4_general_ci") - row1 = append(row1, sqltypes.NewInt32(4)) - - switch colName { - case utf8: - return [][]sqltypes.Value{row0} - case utf8mb4: - return [][]sqltypes.Value{row1} - case both: - return [][]sqltypes.Value{row0, row1} - } - - return [][]sqltypes.Value{} +var once sync.Once +var charsetRows [][]sqltypes.Value + +func charsets() [][]sqltypes.Value { + once.Do(func() { + charsetRows = [][]sqltypes.Value{ + append(buildVarCharRow("armscii8", "ARMSCII-8 Armenian", "armscii8_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("ascii", "US ASCII", "ascii_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("binary", "Binary pseudo charset", "binary"), sqltypes.NewUint32(1)), + append(buildVarCharRow("cp1250", "Windows Central European", "cp1250_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("cp1251", "Windows Cyrillic", "cp1251_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("cp1256", "Windows Arabic", "cp1256_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("cp1257", "Windows Baltic", "cp1257_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("cp850", "DOS West European", "cp850_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("cp852", "DOS Central European", "cp852_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("cp866", "DOS Russian", "cp866_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("cp932", "SJIS for Windows Japanese", "cp932_japanese_ci"), sqltypes.NewUint32(2)), + append(buildVarCharRow("dec8", "DEC West European", "dec8_swedish_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("eucjpms", "UJIS for Windows Japanese", "eucjpms_japanese_ci"), sqltypes.NewUint32(3)), + append(buildVarCharRow("euckr", "EUC-KR Korean", "euckr_korean_ci"), sqltypes.NewUint32(2)), + append(buildVarCharRow("gb2312", "GB2312 Simplified Chinese", "gb2312_chinese_ci"), sqltypes.NewUint32(2)), + append(buildVarCharRow("geostd8", "GEOSTD8 Georgian", "geostd8_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("greek", "ISO 8859-7 Greek", "greek_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("hebrew", "ISO 8859-8 Hebrew", "hebrew_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("hp8", "HP West European", "hp8_english_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("keybcs2", "DOS Kamenicky Czech-Slovak", "keybcs2_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("koi8r", "KOI8-R Relcom Russian", "koi8r_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("koi8u", "KOI8-U Ukrainian", "koi8u_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("latin1", "cp1252 West European", "latin1_swedish_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("latin2", "ISO 8859-2 Central European", "latin2_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("latin5", "ISO 8859-9 Turkish", "latin5_turkish_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("latin7", "ISO 8859-13 Baltic", "latin7_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("macce", "Mac Central European", "macce_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("macroman", "Mac West European", "macroman_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("sjis", "Shift-JIS Japanese", "sjis_japanese_ci"), sqltypes.NewUint32(2)), + append(buildVarCharRow("swe7", "7bit Swedish", "swe7_swedish_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("ucs2", "UCS-2 Unicode", "ucs2_general_ci"), sqltypes.NewUint32(2)), + append(buildVarCharRow("ujis", "EUC-JP Japanese", "ujis_japanese_ci"), sqltypes.NewUint32(3)), + append(buildVarCharRow("utf16", "UTF-16 Unicode", "utf16_general_ci"), sqltypes.NewUint32(4)), + append(buildVarCharRow("utf16le", "UTF-16LE Unicode", "utf16le_general_ci"), sqltypes.NewUint32(4)), + append(buildVarCharRow("utf32", "UTF-32 Unicode", "utf32_general_ci"), sqltypes.NewUint32(4)), + append(buildVarCharRow("utf8mb3", "UTF-8 Unicode", "utf8mb3_general_ci"), sqltypes.NewUint32(3)), + append(buildVarCharRow("utf8mb4", "UTF-8 Unicode", "utf8mb4_0900_ai_ci"), sqltypes.NewUint32(4)), + } + }) + + return charsetRows } -func checkLikeOpt(likeOpt string, colNames []string) (string, error) { - likeRegexp := strings.ReplaceAll(likeOpt, "%", ".*") - for _, v := range colNames { - match, err := regexp.MatchString(likeRegexp, v) - if err != nil { - return "", err - } - if match { - return v, nil +func filterLike(likeOpt string, charsets [][]sqltypes.Value) ([][]sqltypes.Value, error) { + likeRegexp := sqlparser.LikeToRegexp(likeOpt) + var results [][]sqltypes.Value + for _, row := range charsets { + colName := row[0].ToString() + if likeRegexp.MatchString(colName) { + results = append(results, row) } } - return "", nil + return results, nil } func buildShowCreatePlan(show *sqlparser.ShowCreate, vschema plancontext.VSchema) (engine.Primitive, error) { @@ -566,13 +559,8 @@ func buildShowVGtidPlan(show *sqlparser.ShowBasic, vschema plancontext.VSchema) return nil, err } return &engine.OrderedAggregate{ - PreProcess: true, Aggregates: []*engine.AggregateParams{ - { - Opcode: popcode.AggregateGtid, - Col: 1, - Alias: "global vgtid_executed", - }, + engine.NewAggregateParam(popcode.AggregateGtid, 1, "global vgtid_executed"), }, TruncateColumnCount: 2, Input: send, diff --git a/go/vt/vtgate/planbuilder/show_test.go b/go/vt/vtgate/planbuilder/show_test.go index 406202678cb..b36133bb1c7 100644 --- a/go/vt/vtgate/planbuilder/show_test.go +++ b/go/vt/vtgate/planbuilder/show_test.go @@ -21,17 +21,20 @@ import ( "fmt" "testing" + "vitess.io/vitess/go/test/vschemawrapper" + "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/vindexes" ) func TestBuildDBPlan(t *testing.T) { - t.Skip() - vschema := &vschemaWrapper{ - keyspace: &vindexes.Keyspace{Name: "main"}, + vschema := &vschemawrapper.VSchemaWrapper{ + Keyspace: &vindexes.Keyspace{Name: "main"}, } testCases := []struct { @@ -62,56 +65,56 @@ func TestBuildDBPlan(t *testing.T) { } func TestGenerateCharsetRows(t *testing.T) { - rows := make([][]sqltypes.Value, 0, 4) rows0 := [][]sqltypes.Value{ append(buildVarCharRow( - "utf8", + "utf8mb3", "UTF-8 Unicode", - "utf8_general_ci"), - sqltypes.NewInt32(3)), + "utf8mb3_general_ci"), + sqltypes.NewUint32(3)), } rows1 := [][]sqltypes.Value{ append(buildVarCharRow( "utf8mb4", "UTF-8 Unicode", - "utf8mb4_general_ci"), - sqltypes.NewInt32(4)), + collations.Local().LookupName(collations.Default())), + sqltypes.NewUint32(4)), } rows2 := [][]sqltypes.Value{ append(buildVarCharRow( - "utf8", + "utf8mb3", "UTF-8 Unicode", - "utf8_general_ci"), - sqltypes.NewInt32(3)), + "utf8mb3_general_ci"), + sqltypes.NewUint32(3)), append(buildVarCharRow( "utf8mb4", "UTF-8 Unicode", - "utf8mb4_general_ci"), - sqltypes.NewInt32(4)), + collations.Local().LookupName(collations.Default())), + sqltypes.NewUint32(4)), } testcases := []struct { input string expected [][]sqltypes.Value }{ - {input: "show charset", expected: rows2}, - {input: "show character set", expected: rows2}, - {input: "show charset where charset like 'foo%'", expected: rows}, - {input: "show charset where charset like 'utf8%'", expected: rows0}, - {input: "show charset where charset = 'utf8'", expected: rows0}, - {input: "show charset where charset = 'foo%'", expected: rows}, + {input: "show charset", expected: charsets()}, + {input: "show character set", expected: charsets()}, + {input: "show charset where charset like 'foo%'", expected: nil}, + {input: "show charset where charset like 'utf8%'", expected: rows2}, + {input: "show charset where charset like 'utf8mb3%'", expected: rows0}, + {input: "show charset where charset like 'foo%'", expected: nil}, + {input: "show character set where charset like '%foo'", expected: nil}, + {input: "show charset where charset = 'utf8mb3'", expected: rows0}, + {input: "show charset where charset = 'foo%'", expected: nil}, {input: "show charset where charset = 'utf8mb4'", expected: rows1}, } - charsets := []string{"utf8", "utf8mb4"} - for _, tc := range testcases { t.Run(tc.input, func(t *testing.T) { stmt, err := sqlparser.Parse(tc.input) require.NoError(t, err) match := stmt.(*sqlparser.Show).Internal.(*sqlparser.ShowBasic) filter := match.Filter - actual, err := generateCharsetRows(filter, charsets) + actual, err := generateCharsetRows(filter) require.NoError(t, err) require.Equal(t, tc.expected, actual) }) diff --git a/go/vt/vtgate/planbuilder/simple_projection.go b/go/vt/vtgate/planbuilder/simple_projection.go index c413630f386..e9e8a146b59 100644 --- a/go/vt/vtgate/planbuilder/simple_projection.go +++ b/go/vt/vtgate/planbuilder/simple_projection.go @@ -17,10 +17,7 @@ limitations under the License. package planbuilder import ( - "fmt" - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" ) @@ -37,35 +34,7 @@ var _ logicalPlan = (*simpleProjection)(nil) // a simpleProjection. type simpleProjection struct { logicalPlanCommon - resultColumns []*resultColumn - eSimpleProj *engine.SimpleProjection -} - -// newSimpleProjection builds a new simpleProjection. -func newSimpleProjection(alias sqlparser.IdentifierCS, plan logicalPlan) (*simpleProjection, *symtab, error) { - sq := &simpleProjection{ - logicalPlanCommon: newBuilderCommon(plan), - eSimpleProj: &engine.SimpleProjection{}, - } - - // Create a 'table' that represents the derived table. - t := &table{ - alias: sqlparser.TableName{Name: alias}, - origin: sq, - } - - // Create column symbols based on the result column names. - for _, rc := range plan.ResultColumns() { - if _, ok := t.columns[rc.alias.Lowered()]; ok { - return nil, nil, vterrors.VT12001(fmt.Sprintf("duplicate column names in subquery: %s", sqlparser.String(rc.alias))) - } - t.addColumn(rc.alias, &column{origin: sq}) - } - t.isAuthoritative = true - st := newSymtab() - // AddTable will not fail because symtab is empty. - _ = st.AddTable(t) - return sq, st, nil + eSimpleProj *engine.SimpleProjection } // Primitive implements the logicalPlan interface @@ -74,27 +43,6 @@ func (sq *simpleProjection) Primitive() engine.Primitive { return sq.eSimpleProj } -// ResultColumns implements the logicalPlan interface -func (sq *simpleProjection) ResultColumns() []*resultColumn { - return sq.resultColumns -} - -// SupplyCol implements the logicalPlan interface -func (sq *simpleProjection) SupplyCol(col *sqlparser.ColName) (rc *resultColumn, colNumber int) { - c := col.Metadata.(*column) - for i, rc := range sq.resultColumns { - if rc.column == c { - return rc, i - } - } - - // columns that reference subqueries will have their colNumber set. - // Let's use it here. - sq.eSimpleProj.Cols = append(sq.eSimpleProj.Cols, c.colNumber) - sq.resultColumns = append(sq.resultColumns, &resultColumn{column: c}) - return rc, len(sq.resultColumns) - 1 -} - // OutputColumns implements the logicalPlan interface func (sq *simpleProjection) OutputColumns() []sqlparser.SelectExpr { exprs := make([]sqlparser.SelectExpr, 0, len(sq.eSimpleProj.Cols)) diff --git a/go/vt/vtgate/planbuilder/simplifier_test.go b/go/vt/vtgate/planbuilder/simplifier_test.go index 509564225d7..1e106adacc0 100644 --- a/go/vt/vtgate/planbuilder/simplifier_test.go +++ b/go/vt/vtgate/planbuilder/simplifier_test.go @@ -21,6 +21,8 @@ import ( "fmt" "testing" + "vitess.io/vitess/go/test/vschemawrapper" + "vitess.io/vitess/go/vt/vterrors" "github.com/stretchr/testify/assert" @@ -37,19 +39,21 @@ import ( // TestSimplifyBuggyQuery should be used to whenever we get a planner bug reported // It will try to minimize the query to make it easier to understand and work with the bug. func TestSimplifyBuggyQuery(t *testing.T) { - query := "(select id from unsharded union select id from unsharded_auto) union (select id from user union select name from unsharded)" - vschema := &vschemaWrapper{ - v: loadSchema(t, "vschemas/schema.json", true), - version: Gen4, + query := "select distinct count(distinct a), count(distinct 4) from user left join unsharded on 0 limit 5" + // select 0 from unsharded union select 0 from `user` union select 0 from unsharded + // select 0 from unsharded union (select 0 from `user` union select 0 from unsharded) + vschema := &vschemawrapper.VSchemaWrapper{ + V: loadSchema(t, "vschemas/schema.json", true), + Version: Gen4, } stmt, reserved, err := sqlparser.Parse2(query) require.NoError(t, err) - rewritten, _ := sqlparser.RewriteAST(sqlparser.CloneStatement(stmt), vschema.currentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil) + rewritten, _ := sqlparser.RewriteAST(sqlparser.CloneStatement(stmt), vschema.CurrentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil) reservedVars := sqlparser.NewReservedVars("vtg", reserved) simplified := simplifier.SimplifyStatement( stmt.(sqlparser.SelectStatement), - vschema.currentDb(), + vschema.CurrentDb(), vschema, keepSameError(query, reservedVars, vschema, rewritten.BindVarNeeds), ) @@ -60,18 +64,18 @@ func TestSimplifyBuggyQuery(t *testing.T) { func TestSimplifyPanic(t *testing.T) { t.Skip("not needed to run") query := "(select id from unsharded union select id from unsharded_auto) union (select id from unsharded_auto union select name from unsharded)" - vschema := &vschemaWrapper{ - v: loadSchema(t, "vschemas/schema.json", true), - version: Gen4, + vschema := &vschemawrapper.VSchemaWrapper{ + V: loadSchema(t, "vschemas/schema.json", true), + Version: Gen4, } stmt, reserved, err := sqlparser.Parse2(query) require.NoError(t, err) - rewritten, _ := sqlparser.RewriteAST(sqlparser.CloneStatement(stmt), vschema.currentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil) + rewritten, _ := sqlparser.RewriteAST(sqlparser.CloneStatement(stmt), vschema.CurrentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil) reservedVars := sqlparser.NewReservedVars("vtg", reserved) simplified := simplifier.SimplifyStatement( stmt.(sqlparser.SelectStatement), - vschema.currentDb(), + vschema.CurrentDb(), vschema, keepPanicking(query, reservedVars, vschema, rewritten.BindVarNeeds), ) @@ -81,9 +85,9 @@ func TestSimplifyPanic(t *testing.T) { func TestUnsupportedFile(t *testing.T) { t.Skip("run manually to see if any queries can be simplified") - vschema := &vschemaWrapper{ - v: loadSchema(t, "vschemas/schema.json", true), - version: Gen4, + vschema := &vschemawrapper.VSchemaWrapper{ + V: loadSchema(t, "vschemas/schema.json", true), + Version: Gen4, } fmt.Println(vschema) for _, tcase := range readJSONTests("unsupported_cases.txt") { @@ -96,11 +100,11 @@ func TestUnsupportedFile(t *testing.T) { t.Skip() return } - rewritten, err := sqlparser.RewriteAST(stmt, vschema.currentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil) + rewritten, err := sqlparser.RewriteAST(stmt, vschema.CurrentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil) if err != nil { t.Skip() } - vschema.currentDb() + vschema.CurrentDb() reservedVars := sqlparser.NewReservedVars("vtg", reserved) ast := rewritten.AST @@ -108,7 +112,7 @@ func TestUnsupportedFile(t *testing.T) { stmt, _, _ = sqlparser.Parse2(tcase.Query) simplified := simplifier.SimplifyStatement( stmt.(sqlparser.SelectStatement), - vschema.currentDb(), + vschema.CurrentDb(), vschema, keepSameError(tcase.Query, reservedVars, vschema, rewritten.BindVarNeeds), ) @@ -125,12 +129,12 @@ func TestUnsupportedFile(t *testing.T) { } } -func keepSameError(query string, reservedVars *sqlparser.ReservedVars, vschema *vschemaWrapper, needs *sqlparser.BindVarNeeds) func(statement sqlparser.SelectStatement) bool { +func keepSameError(query string, reservedVars *sqlparser.ReservedVars, vschema *vschemawrapper.VSchemaWrapper, needs *sqlparser.BindVarNeeds) func(statement sqlparser.SelectStatement) bool { stmt, _, err := sqlparser.Parse2(query) if err != nil { panic(err) } - rewritten, _ := sqlparser.RewriteAST(stmt, vschema.currentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil) + rewritten, _ := sqlparser.RewriteAST(stmt, vschema.CurrentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil) ast := rewritten.AST _, expected := BuildFromStmt(context.Background(), query, ast, reservedVars, vschema, rewritten.BindVarNeeds, true, true) if expected == nil { @@ -149,7 +153,7 @@ func keepSameError(query string, reservedVars *sqlparser.ReservedVars, vschema * } } -func keepPanicking(query string, reservedVars *sqlparser.ReservedVars, vschema *vschemaWrapper, needs *sqlparser.BindVarNeeds) func(statement sqlparser.SelectStatement) bool { +func keepPanicking(query string, reservedVars *sqlparser.ReservedVars, vschema *vschemawrapper.VSchemaWrapper, needs *sqlparser.BindVarNeeds) func(statement sqlparser.SelectStatement) bool { cmp := func(statement sqlparser.SelectStatement) (res bool) { defer func() { r := recover() diff --git a/go/vt/vtgate/planbuilder/single_sharded_shortcut.go b/go/vt/vtgate/planbuilder/single_sharded_shortcut.go index 56326c8b96f..daf19ced859 100644 --- a/go/vt/vtgate/planbuilder/single_sharded_shortcut.go +++ b/go/vt/vtgate/planbuilder/single_sharded_shortcut.go @@ -47,7 +47,7 @@ func selectUnshardedShortcut(ctx *plancontext.PlanningContext, stmt sqlparser.Se if err != nil { return nil, nil, err } - plan := &routeGen4{ + plan := &route{ eroute: &engine.Route{ RoutingParameters: &engine.RoutingParameters{ Opcode: engine.Unsharded, @@ -58,7 +58,7 @@ func selectUnshardedShortcut(ctx *plancontext.PlanningContext, stmt sqlparser.Se Select: stmt, } - if err := plan.WireupGen4(ctx); err != nil { + if err := plan.Wireup(ctx); err != nil { return nil, nil, err } return plan, operators.QualifiedTableNames(ks, tableNames), nil @@ -102,3 +102,12 @@ func getTableNames(semTable *semantics.SemTable) ([]sqlparser.TableName, error) } return tableNames, nil } + +func removeKeyspaceFromSelectExpr(expr sqlparser.SelectExpr) { + switch expr := expr.(type) { + case *sqlparser.AliasedExpr: + sqlparser.RemoveKeyspaceFromColName(expr.Expr) + case *sqlparser.StarExpr: + expr.TableName.Qualifier = sqlparser.NewIdentifierCS("") + } +} diff --git a/go/vt/vtgate/planbuilder/sql_calc_found_rows.go b/go/vt/vtgate/planbuilder/sql_calc_found_rows.go index 72850361a9e..0657d6c2331 100644 --- a/go/vt/vtgate/planbuilder/sql_calc_found_rows.go +++ b/go/vt/vtgate/planbuilder/sql_calc_found_rows.go @@ -30,27 +30,15 @@ var _ logicalPlan = (*sqlCalcFoundRows)(nil) type sqlCalcFoundRows struct { LimitQuery, CountQuery logicalPlan - - // only used by WireUp for V3 - ljt, cjt *jointab } // Wireup implements the logicalPlan interface -func (s *sqlCalcFoundRows) Wireup(logicalPlan, *jointab) error { - err := s.LimitQuery.Wireup(s.LimitQuery, s.ljt) - if err != nil { - return err - } - return s.CountQuery.Wireup(s.CountQuery, s.cjt) -} - -// WireupGen4 implements the logicalPlan interface -func (s *sqlCalcFoundRows) WireupGen4(ctx *plancontext.PlanningContext) error { - err := s.LimitQuery.WireupGen4(ctx) +func (s *sqlCalcFoundRows) Wireup(ctx *plancontext.PlanningContext) error { + err := s.LimitQuery.Wireup(ctx) if err != nil { return err } - return s.CountQuery.WireupGen4(ctx) + return s.CountQuery.Wireup(ctx) } // ContainsTables implements the logicalPlan interface @@ -72,38 +60,6 @@ func (s *sqlCalcFoundRows) Primitive() engine.Primitive { } } -// All the methods below are not implemented. They should not be called on a sqlCalcFoundRows plan - -// Order implements the logicalPlan interface -func (s *sqlCalcFoundRows) Order() int { - return s.LimitQuery.Order() -} - -// ResultColumns implements the logicalPlan interface -func (s *sqlCalcFoundRows) ResultColumns() []*resultColumn { - return s.LimitQuery.ResultColumns() -} - -// Reorder implements the logicalPlan interface -func (s *sqlCalcFoundRows) Reorder(order int) { - s.LimitQuery.Reorder(order) -} - -// SupplyVar implements the logicalPlan interface -func (s *sqlCalcFoundRows) SupplyVar(from, to int, col *sqlparser.ColName, varname string) { - s.LimitQuery.SupplyVar(from, to, col, varname) -} - -// SupplyCol implements the logicalPlan interface -func (s *sqlCalcFoundRows) SupplyCol(col *sqlparser.ColName) (*resultColumn, int) { - return s.LimitQuery.SupplyCol(col) -} - -// SupplyWeightString implements the logicalPlan interface -func (s *sqlCalcFoundRows) SupplyWeightString(int, bool) (weightcolNumber int, err error) { - return 0, UnsupportedSupplyWeightString{Type: "sqlCalcFoundRows"} -} - // Rewrite implements the logicalPlan interface func (s *sqlCalcFoundRows) Rewrite(inputs ...logicalPlan) error { if len(inputs) != 2 { diff --git a/go/vt/vtgate/planbuilder/subquery_op.go b/go/vt/vtgate/planbuilder/subquery_op.go deleted file mode 100644 index ed945cbc6ad..00000000000 --- a/go/vt/vtgate/planbuilder/subquery_op.go +++ /dev/null @@ -1,118 +0,0 @@ -/* -Copyright 2022 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/engine/opcode" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" -) - -func transformSubQueryPlan(ctx *plancontext.PlanningContext, op *operators.SubQueryOp) (logicalPlan, error) { - innerPlan, err := transformToLogicalPlan(ctx, op.Inner, false) - if err != nil { - return nil, err - } - innerPlan, err = planHorizon(ctx, innerPlan, op.Extracted.Subquery.Select, true) - if err != nil { - return nil, err - } - - argName := op.Extracted.GetArgName() - hasValuesArg := op.Extracted.GetHasValuesArg() - outerPlan, err := transformToLogicalPlan(ctx, op.Outer, false) - - merged := mergeSubQueryOpPlan(ctx, innerPlan, outerPlan, op) - if merged != nil { - return merged, nil - } - plan := newPulloutSubquery(opcode.PulloutOpcode(op.Extracted.OpCode), argName, hasValuesArg, innerPlan) - if err != nil { - return nil, err - } - plan.underlying = outerPlan - return plan, err -} - -func transformCorrelatedSubQueryPlan(ctx *plancontext.PlanningContext, op *operators.CorrelatedSubQueryOp) (logicalPlan, error) { - outer, err := transformToLogicalPlan(ctx, op.Outer, false) - if err != nil { - return nil, err - } - inner, err := transformToLogicalPlan(ctx, op.Inner, false) - if err != nil { - return nil, err - } - return newSemiJoin(outer, inner, op.Vars, op.LHSColumns), nil -} - -func mergeSubQueryOpPlan(ctx *plancontext.PlanningContext, inner, outer logicalPlan, n *operators.SubQueryOp) logicalPlan { - iroute, ok := inner.(*routeGen4) - if !ok { - return nil - } - oroute, ok := outer.(*routeGen4) - if !ok { - return nil - } - - if canMergeSubqueryPlans(ctx, iroute, oroute) { - // n.extracted is an expression that lives in oroute.Select. - // Instead of looking for it in the AST, we have a copy in the subquery tree that we can update - n.Extracted.Merged = true - replaceSubQuery(ctx, oroute.Select) - return mergeSystemTableInformation(oroute, iroute) - } - return nil -} - -// mergeSystemTableInformation copies over information from the second route to the first and appends to it -func mergeSystemTableInformation(a *routeGen4, b *routeGen4) logicalPlan { - // safe to append system table schema and system table names, since either the routing will match or either side would be throwing an error - // during run-time which we want to preserve. For example outer side has User in sys table schema and inner side has User and Main in sys table schema - // Inner might end up throwing an error at runtime, but if it doesn't then it is safe to merge. - a.eroute.SysTableTableSchema = append(a.eroute.SysTableTableSchema, b.eroute.SysTableTableSchema...) - for k, v := range b.eroute.SysTableTableName { - a.eroute.SysTableTableName[k] = v - } - return a -} - -func canMergeSubqueryPlans(ctx *plancontext.PlanningContext, a, b *routeGen4) bool { - // this method should be close to tryMerge below. it does the same thing, but on logicalPlans instead of queryTrees - if a.eroute.Keyspace.Name != b.eroute.Keyspace.Name { - return false - } - switch a.eroute.Opcode { - case engine.Unsharded, engine.Reference: - return a.eroute.Opcode == b.eroute.Opcode - case engine.DBA: - return canSelectDBAMerge(a, b) - case engine.EqualUnique: - // Check if they target the same shard. - if b.eroute.Opcode == engine.EqualUnique && - a.eroute.Vindex == b.eroute.Vindex && - a.condition != nil && - b.condition != nil && - gen4ValuesEqual(ctx, []sqlparser.Expr{a.condition}, []sqlparser.Expr{b.condition}) { - return true - } - } - return false -} diff --git a/go/vt/vtgate/planbuilder/symtab.go b/go/vt/vtgate/planbuilder/symtab.go deleted file mode 100644 index 7853899b4f6..00000000000 --- a/go/vt/vtgate/planbuilder/symtab.go +++ /dev/null @@ -1,617 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "fmt" - "strconv" - "strings" - - "vitess.io/vitess/go/vt/vterrors" - - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/vindexes" - - querypb "vitess.io/vitess/go/vt/proto/query" -) - -// symtab represents the symbol table for a SELECT statement -// or a subquery. The symtab evolves over time. -// As a query is analyzed, multiple independent -// symtabs are created, and they are later merged as each -// sub-expression of a FROM clause is merged. -// -// A symtab maintains uniqueColumns, which is a list of unique -// vindex column names. These names can be resolved without the -// need to qualify them by their table names. If there are -// duplicates during a merge, those columns are removed from -// the unique list, thereby disallowing unqualified references -// to such columns. -// -// After a select expression is analyzed, the -// ResultColumns field is set. In the case of a subquery, the -// Outer field points to the outer symtab. Any symbols that -// are not resolved locally are added to the Externs field, -// which is later used to determine if the subquery can be -// merged with an outer route. -type symtab struct { - tables map[sqlparser.TableName]*table - tableNames []sqlparser.TableName - - // uniqueColumns has the column name as key - // and points at the columns that tables contains. - uniqueColumns map[string]*column - - // singleRoute is set only if all the symbols in - // the symbol table are part of the same route. - singleRoute *route - - ResultColumns []*resultColumn - Outer *symtab - Externs []*sqlparser.ColName -} - -// newSymtab creates a new symtab. -func newSymtab() *symtab { - return &symtab{ - tables: make(map[sqlparser.TableName]*table), - uniqueColumns: make(map[string]*column), - } -} - -// newSymtab creates a new symtab initialized -// to contain just one route. -func newSymtabWithRoute(rb *route) *symtab { - return &symtab{ - tables: make(map[sqlparser.TableName]*table), - uniqueColumns: make(map[string]*column), - singleRoute: rb, - } -} - -// AddVSchemaTable adds a vschema table to symtab. -func (st *symtab) AddVSchemaTable(alias sqlparser.TableName, vschemaTable *vindexes.Table, rb *route) error { - t := &table{ - alias: alias, - origin: rb, - vschemaTable: vschemaTable, - } - - for _, col := range vschemaTable.Columns { - if _, err := t.mergeColumn(col.Name, &column{ - origin: rb, - st: st, - typ: col.Type, - }); err != nil { - return err - } - } - if vschemaTable.ColumnListAuthoritative { - // This will prevent new columns from being added. - t.isAuthoritative = true - } - - for _, cv := range vschemaTable.ColumnVindexes { - single, ok := cv.Vindex.(vindexes.SingleColumn) - if !ok { - continue - } - for i, cvcol := range cv.Columns { - col, err := t.mergeColumn(cvcol, &column{ - origin: rb, - st: st, - }) - if err != nil { - return err - } - if i == 0 { - if col.vindex == nil || col.vindex.Cost() > single.Cost() { - col.vindex = single - } - } - } - } - - if ai := vschemaTable.AutoIncrement; ai != nil { - if _, ok := t.columns[ai.Column.Lowered()]; !ok { - if _, err := t.mergeColumn(ai.Column, &column{ - origin: rb, - st: st, - }); err != nil { - return err - } - } - } - if err := st.AddTable(t); err != nil { - return err - } - return nil -} - -// Merge merges the new symtab into the current one. -// Duplicate table aliases return an error. -// uniqueColumns is updated, but duplicates are removed. -// Merges are only performed during the FROM clause analysis. -// At this point, only tables and uniqueColumns are set. -// All other fields are ignored. -func (st *symtab) Merge(newsyms *symtab) error { - if st.tableNames == nil || newsyms.tableNames == nil { - // If any side of symtab has anonymous tables, - // we treat the merged symtab as having anonymous tables. - return nil - } - for _, t := range newsyms.tables { - if err := st.AddTable(t); err != nil { - return err - } - } - return nil -} - -// AddTable adds a table to symtab. -func (st *symtab) AddTable(t *table) error { - if rb, ok := t.origin.(*route); !ok || rb.Resolve() != st.singleRoute { - st.singleRoute = nil - } - if _, ok := st.tables[t.alias]; ok { - return vterrors.VT03013(t.alias.Name.String()) - } - st.tables[t.alias] = t - st.tableNames = append(st.tableNames, t.alias) - - // update the uniqueColumns list, and eliminate - // duplicate symbols if found. - for colname, c := range t.columns { - c.st = st - if _, ok := st.uniqueColumns[colname]; ok { - // Keep the entry, but make it nil. This will - // ensure that yet another column of the same name - // doesn't get added back in. - st.uniqueColumns[colname] = nil - continue - } - st.uniqueColumns[colname] = c - } - return nil -} - -// AllTables returns an ordered list of all current tables. -func (st *symtab) AllTables() []*table { - if len(st.tableNames) == 0 { - return nil - } - tables := make([]*table, 0, len(st.tableNames)) - for _, tname := range st.tableNames { - tables = append(tables, st.tables[tname]) - } - return tables -} - -// AllVschemaTableNames returns an ordered list of all current vschema tables. -func (st *symtab) AllVschemaTableNames() ([]*vindexes.Table, error) { - if len(st.tableNames) == 0 { - return nil, nil - } - tables := make([]*vindexes.Table, 0, len(st.tableNames)) - for _, tname := range st.tableNames { - t, ok := st.tables[tname] - if !ok { - return nil, vterrors.VT05004(sqlparser.String(tname)) - } - if t.vschemaTable != nil { - tables = append(tables, t.vschemaTable) - } - } - return tables, nil -} - -// FindTable finds a table in symtab. This function is specifically used -// for expanding 'select a.*' constructs. If you're in a subquery, -// you're most likely referring to a table in the local 'from' clause. -// For this reason, the search is only performed in the current scope. -// This may be a deviation from the formal definition of SQL, but there -// are currently no use cases that require the full support. -func (st *symtab) FindTable(tname sqlparser.TableName) (*table, error) { - if st.tableNames == nil { - // Unreachable because current code path checks for this condition - // before invoking this function. - return nil, vterrors.VT05007() - } - t, ok := st.tables[tname] - if !ok { - return nil, vterrors.VT05004(sqlparser.String(tname)) - } - return t, nil -} - -// SetResultColumns sets the result columns. -func (st *symtab) SetResultColumns(rcs []*resultColumn) { - for _, rc := range rcs { - rc.column.st = st - } - st.ResultColumns = rcs -} - -// Find returns the logicalPlan for the symbol referenced by col. -// If a reference is found, col.Metadata is set to point -// to it. Subsequent searches will reuse this metadata. -// -// Unqualified columns are searched in the following order: -// 1. ResultColumns -// 2. uniqueColumns -// 3. symtab has only one table. The column is presumed to -// belong to that table. -// 4. symtab has more than one table, but all tables belong -// to the same route. An anonymous column is created against -// the current route. -// If all the above fail, an error is returned. This means -// that an unqualified reference can only be locally resolved. -// -// For qualified columns, we first look for the table. If one -// is found, we look for a column in the pre-existing list. -// If one is not found, we optimistically create an entry -// presuming that the table has such a column. If this is -// not the case, the query will fail when sent to vttablet. -// If the table is not found in the local scope, the search -// is continued in the outer scope, but only if ResultColumns -// is not set (this is MySQL behavior). -// -// For symbols that were found locally, isLocal is returned -// as true. Otherwise, it's returned as false and the symbol -// gets added to the Externs list, which can later be used -// to decide where to push-down the subquery. -func (st *symtab) Find(col *sqlparser.ColName) (origin logicalPlan, isLocal bool, err error) { - // Return previously cached info if present. - if column, ok := col.Metadata.(*column); ok { - return column.Origin(), column.st == st, nil - } - - // Unqualified column case. - if col.Qualifier.IsEmpty() { - // Step 1. Search ResultColumns. - c, err := st.searchResultColumn(col) - if err != nil { - return nil, false, err - } - if c != nil { - col.Metadata = c - return c.Origin(), true, nil - } - } - - // Steps 2-4 performed by searchTables. - c, err := st.searchTables(col) - if err != nil { - return nil, false, err - } - if c != nil { - col.Metadata = c - return c.Origin(), true, nil - } - - if st.Outer == nil { - return nil, false, vterrors.VT03019(sqlparser.String(col)) - } - // Search is not continued if ResultColumns already has values: - // select a ... having ... (select b ... having a...). In this case, - // a (in having) should not match the outer-most 'a'. This is to - // match MySQL's behavior. - if len(st.ResultColumns) != 0 { - return nil, false, vterrors.VT03020(sqlparser.String(col)) - } - - if origin, _, err = st.Outer.Find(col); err != nil { - return nil, false, err - } - st.Externs = append(st.Externs, col) - return origin, false, nil -} - -// searchResultColumn looks for col in the results columns. -func (st *symtab) searchResultColumn(col *sqlparser.ColName) (c *column, err error) { - var cursym *resultColumn - for _, rc := range st.ResultColumns { - if rc.alias.Equal(col.Name) { - if cursym != nil { - return nil, vterrors.VT03021(sqlparser.String(col)) - } - cursym = rc - } - } - if cursym != nil { - return cursym.column, nil - } - return nil, nil -} - -// searchTables looks for the column in the tables. The search order -// is as described in Find. -func (st *symtab) searchTables(col *sqlparser.ColName) (*column, error) { - var t *table - // @@ syntax is only allowed for dual tables, in which case there should be - // only one in the symtab. So, such expressions will be implicitly matched. - if col.Qualifier.IsEmpty() || strings.HasPrefix(col.Qualifier.Name.String(), "@@") { - // Search uniqueColumns first. If found, our job is done. - // Check for nil because there can be nil entries if there - // are duplicate columns across multiple tables. - if c := st.uniqueColumns[col.Name.Lowered()]; c != nil { - return c, nil - } - - switch { - case len(st.tables) == 1: - // If there's only one table match against it. - // Loop executes once to match the only table. - for _, v := range st.tables { - t = v - } - // No return: break out. - case st.singleRoute != nil: - // If there's only one route, create an anonymous symbol. - return &column{origin: st.singleRoute, st: st}, nil - default: - // If none of the above, the symbol is unresolvable. - return nil, vterrors.VT03019(sqlparser.String(col)) - } - } else { - var ok bool - t, ok = st.tables[col.Qualifier] - if !ok { - return nil, nil - } - } - - // At this point, t should be set. - c, ok := t.columns[col.Name.Lowered()] - if !ok { - // We know all the column names of a subquery. Might as well return an error if it's not found. - if t.isAuthoritative { - return nil, vterrors.VT03019(sqlparser.String(col)) - } - c = &column{ - origin: t.Origin(), - st: st, - } - t.addColumn(col.Name, c) - } - return c, nil -} - -// ResultFromNumber returns the result column index based on the column -// order expression. -func ResultFromNumber(rcs []*resultColumn, val *sqlparser.Literal, caller string) (int, error) { - if val.Type != sqlparser.IntVal { - return 0, vterrors.VT13001("column number is not an INT") - } - num, err := strconv.ParseInt(val.Val, 0, 64) - if err != nil { - return 0, vterrors.VT13001(fmt.Sprintf("error parsing column number: %s", sqlparser.String(val))) - } - if num < 1 || num > int64(len(rcs)) { - return 0, vterrors.VT03014(num, caller) - } - return int(num - 1), nil -} - -// Vindex returns the vindex if the expression is a plain column reference -// that is part of the specified route, and has an associated vindex. -func (st *symtab) Vindex(expr sqlparser.Expr, scope *route) vindexes.SingleColumn { - col, ok := expr.(*sqlparser.ColName) - if !ok { - return nil - } - if col.Metadata == nil { - // Find will set the Metadata. - if _, _, err := st.Find(col); err != nil { - return nil - } - } - c := col.Metadata.(*column) - if c.Origin() != scope { - return nil - } - return c.vindex -} - -// BuildColName builds a *sqlparser.ColName for the resultColumn specified -// by the index. The built ColName will correctly reference the resultColumn -// it was built from. -func BuildColName(rcs []*resultColumn, index int) (*sqlparser.ColName, error) { - alias := rcs[index].alias - if alias.IsEmpty() { - return nil, vterrors.VT12001("reference a complex expression") - } - for i, rc := range rcs { - if i == index { - continue - } - if rc.alias.Equal(alias) { - return nil, vterrors.VT03021(alias) - } - } - return &sqlparser.ColName{ - Metadata: rcs[index].column, - Name: alias, - }, nil -} - -// ResolveSymbols resolves all column references against symtab. -// This makes sure that they all have their Metadata initialized. -// If a symbol cannot be resolved or if the expression contains -// a subquery, an error is returned. -func (st *symtab) ResolveSymbols(node sqlparser.SQLNode) error { - return sqlparser.Walk(func(currNode sqlparser.SQLNode) (kontinue bool, err error) { - switch currNode := currNode.(type) { - case *sqlparser.ColName: - if _, _, err := st.Find(currNode); err != nil { - return false, err - } - case *sqlparser.Subquery: - return false, vterrors.VT12001(fmt.Sprintf("subqueries disallowed in %T", node)) - } - return true, nil - }, node) -} - -// table is part of symtab. -// It represents a table alias in a FROM clause. It points -// to the logicalPlan that represents it. -type table struct { - alias sqlparser.TableName - columns map[string]*column - columnNames []sqlparser.IdentifierCI - isAuthoritative bool - origin logicalPlan - vschemaTable *vindexes.Table -} - -func (t *table) addColumn(alias sqlparser.IdentifierCI, c *column) { - if t.columns == nil { - t.columns = make(map[string]*column) - } - lowered := alias.Lowered() - // Dups are allowed, but first one wins if referenced. - if _, ok := t.columns[lowered]; !ok { - c.colNumber = len(t.columnNames) - t.columns[lowered] = c - } - t.columnNames = append(t.columnNames, alias) -} - -// mergeColumn merges or creates a new column for the table. -// If the table is authoritative and the column doesn't already -// exist, it returns an error. If the table is not authoritative, -// the column is added if not already present. -func (t *table) mergeColumn(alias sqlparser.IdentifierCI, c *column) (*column, error) { - if t.columns == nil { - t.columns = make(map[string]*column) - } - lowered := alias.Lowered() - if col, ok := t.columns[lowered]; ok { - return col, nil - } - if t.isAuthoritative { - return nil, vterrors.VT03022(sqlparser.String(alias), sqlparser.String(t.alias)) - } - c.colNumber = len(t.columnNames) - t.columns[lowered] = c - t.columnNames = append(t.columnNames, alias) - return c, nil -} - -// Origin returns the route that originates the table. -func (t *table) Origin() logicalPlan { - // If it's a route, we have to resolve it. - if rb, ok := t.origin.(*route); ok { - return rb.Resolve() - } - return t.origin -} - -// column represents a unique symbol in the query that other -// parts can refer to. -// Every column contains the logicalPlan it originates from. -// If a column has associated vindexes, then the one with the -// lowest cost is set. -// -// Two columns are equal if their pointer values match. -// -// For subquery and vindexFunc, the colNumber is also set because -// the column order is known and unchangeable. -type column struct { - origin logicalPlan - st *symtab - vindex vindexes.SingleColumn - typ querypb.Type - colNumber int -} - -// Origin returns the route that originates the column. -func (c *column) Origin() logicalPlan { - // If it's a route, we have to resolve it. - if rb, ok := c.origin.(*route); ok { - return rb.Resolve() - } - return c.origin -} - -// resultColumn contains symbol info about a select expression. If the -// expression represents an underlying column, then it points to it. -// Otherwise, an anonymous column is created as place-holder. -type resultColumn struct { - // alias will represent the unqualified symbol name for that expression. - // If the statement provides an explicit alias, that name will be used. - // If the expression is a simple column, then the base name of the - // column will be used as the alias. If the expression is non-trivial, - // alias will be empty, and cannot be referenced from other parts of - // the query. - alias sqlparser.IdentifierCI - column *column -} - -// NewResultColumn creates a new resultColumn based on the supplied expression. -// The created symbol is not remembered until it is later set as ResultColumns -// after all select expressions are analyzed. -func newResultColumn(expr *sqlparser.AliasedExpr, origin logicalPlan) *resultColumn { - rc := &resultColumn{ - alias: expr.As, - } - if col, ok := expr.Expr.(*sqlparser.ColName); ok { - // If no alias was specified, then the base name - // of the column becomes the alias. - if rc.alias.IsEmpty() { - rc.alias = col.Name - } - // If it's a col it should already have metadata. - rc.column = col.Metadata.(*column) - } else { - // We don't generate an alias if the expression is non-trivial. - // Just to be safe, generate an anonymous column for the expression. - typ, err := GetReturnType(expr.Expr) - rc.column = &column{ - origin: origin, - } - if err == nil { - rc.column.typ = typ - } - } - return rc -} - -// GetReturnType returns the type of the select expression that MySQL will return -func GetReturnType(input sqlparser.Expr) (querypb.Type, error) { - switch node := input.(type) { - case *sqlparser.FuncExpr: - functionName := strings.ToUpper(node.Name.String()) - switch functionName { - case "ABS": - // Returned value depends on the return type of the input - if len(node.Exprs) == 1 { - expr, isAliasedExpr := node.Exprs[0].(*sqlparser.AliasedExpr) - if isAliasedExpr { - return GetReturnType(expr.Expr) - } - } - } - case *sqlparser.ColName: - col := node.Metadata.(*column) - return col.typ, nil - case *sqlparser.Count, *sqlparser.CountStar: - return querypb.Type_INT64, nil - } - return 0, vterrors.VT12001(fmt.Sprintf("evaluate return type for %T", input)) -} diff --git a/go/vt/vtgate/planbuilder/symtab_test.go b/go/vt/vtgate/planbuilder/symtab_test.go deleted file mode 100644 index 725eeaa541a..00000000000 --- a/go/vt/vtgate/planbuilder/symtab_test.go +++ /dev/null @@ -1,227 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "testing" - - "github.com/stretchr/testify/require" - - querypb "vitess.io/vitess/go/vt/proto/query" - "vitess.io/vitess/go/vt/sqlparser" -) - -/* -func TestSymtabAddVSchemaTable(t *testing.T) { - tname := sqlparser.TableName{Name: sqlparser.NewIdentifierCS("t")} - rb := &route{} - - null, _ := vindexes.CreateVindex("null", "null", nil) - - tcases := []struct { - in *vindexes.Table - authoritative bool - vindex []string - err string - }{{ - // Single table. - in: &vindexes.Table{ - Columns: []vindexes.Column{{ - Name: sqlparser.NewIdentifierCI("C1"), - }, { - Name: sqlparser.NewIdentifierCI("C2"), - }}, - }, - authoritative: false, - vindex: []string{}, - }, { - // Column vindex specified. - in: &vindexes.Table{ - ColumnVindexes: []*vindexes.ColumnVindex{{ - Columns: []sqlparser.IdentifierCI{sqlparser.NewIdentifierCI("C1")}, - Vindex: null, - }}, - Columns: []vindexes.Column{{ - Name: sqlparser.NewIdentifierCI("C1"), - }, { - Name: sqlparser.NewIdentifierCI("C2"), - }}, - }, - authoritative: false, - vindex: []string{"c1"}, - }, { - // Multi-column vindex. - in: &vindexes.Table{ - ColumnVindexes: []*vindexes.ColumnVindex{{ - Columns: []sqlparser.IdentifierCI{ - sqlparser.NewIdentifierCI("C1"), - sqlparser.NewIdentifierCI("C2"), - }, - Vindex: null, - }}, - Columns: []vindexes.Column{{ - Name: sqlparser.NewIdentifierCI("C1"), - }, { - Name: sqlparser.NewIdentifierCI("C2"), - }}, - }, - authoritative: false, - vindex: []string{"c1"}, - }, { - // AutoIncrement. - in: &vindexes.Table{ - AutoIncrement: &vindexes.AutoIncrement{ - Column: sqlparser.NewIdentifierCI("C1"), - }, - Columns: []vindexes.Column{{ - Name: sqlparser.NewIdentifierCI("C1"), - }, { - Name: sqlparser.NewIdentifierCI("C2"), - }}, - }, - authoritative: false, - vindex: []string{}, - }, { - // Column vindex specifies a column not in list. - in: &vindexes.Table{ - ColumnVindexes: []*vindexes.ColumnVindex{{ - Columns: []sqlparser.IdentifierCI{sqlparser.NewIdentifierCI("C1")}, - Vindex: null, - }}, - Columns: []vindexes.Column{{ - Name: sqlparser.NewIdentifierCI("C2"), - }}, - }, - authoritative: false, - vindex: []string{"c1"}, - }, { - // Column vindex specifies columns with none in list. - in: &vindexes.Table{ - ColumnVindexes: []*vindexes.ColumnVindex{{ - Columns: []sqlparser.IdentifierCI{ - sqlparser.NewIdentifierCI("C1"), - sqlparser.NewIdentifierCI("C2"), - }, - Vindex: null, - }}, - }, - authoritative: false, - vindex: []string{"c1"}, - }, { - // AutoIncrement specifies a column not in list. - in: &vindexes.Table{ - AutoIncrement: &vindexes.AutoIncrement{ - Column: sqlparser.NewIdentifierCI("C1"), - }, - Columns: []vindexes.Column{{ - Name: sqlparser.NewIdentifierCI("C2"), - }}, - }, - authoritative: false, - vindex: []string{}, - }, { - // Two column vindexes. - in: &vindexes.Table{ - ColumnVindexes: []*vindexes.ColumnVindex{{ - Columns: []sqlparser.IdentifierCI{ - sqlparser.NewIdentifierCI("C1"), - }, - Vindex: null, - }, { - Columns: []sqlparser.IdentifierCI{ - sqlparser.NewIdentifierCI("C2"), - }, - Vindex: null, - }}, - }, - authoritative: false, - vindex: []string{"c1", "c2"}, - }} - - out := []string{"c1", "c2"} - for _, tcase := range tcases { - st := newSymtab() - vindexMap, err := st.AddVSchemaTable(tname, tcase.in, rb) - tcasein, _ := json.Marshal(tcase.in) - if err != nil { - if err.Error() != tcase.err { - t.Errorf("st.AddVSchemaTable(%s) err: %v, want %s", tcasein, err, tcase.err) - } - continue - } else if tcase.err != "" { - t.Errorf("st.AddVSchemaTable(%s) succeeded, want error: %s", tcasein, tcase.err) - continue - } - tab := st.tables[tname] - for _, col := range out { - if tab.columns[col] == nil { - t.Errorf("st.AddVSchemaTable(%s): column %s not found", tcasein, col) - } - } - for _, col := range tcase.vindex { - c := tab.columns[col] - if c == nil { - t.Errorf("st.AddVSchemaTable(%s): column %s not found", tcasein, col) - } - if _, ok := vindexMap[c]; !ok { - t.Errorf("st.AddVSchemaTable(%s).vindexMap: column %s not found", tcasein, col) - } - } - if tab.isAuthoritative != tcase.authoritative { - t.Errorf("st.AddVSchemaTable(%s).authoritative: %v want %v", tcasein, tab.isAuthoritative, tcase.authoritative) - } - } -} -*/ - -func TestGetReturnType(t *testing.T) { - tests := []struct { - input sqlparser.Expr - output querypb.Type - expectedErr error - }{{ - input: &sqlparser.FuncExpr{Name: sqlparser.NewIdentifierCI("Abs"), Exprs: sqlparser.SelectExprs{ - &sqlparser.AliasedExpr{ - Expr: &sqlparser.ColName{ - Name: sqlparser.NewIdentifierCI("A"), - Metadata: &column{ - typ: querypb.Type_DECIMAL, - }, - }, - }, - }}, - output: querypb.Type_DECIMAL, - }, { - input: &sqlparser.Count{}, - output: querypb.Type_INT64, - }, { - input: &sqlparser.CountStar{}, - output: querypb.Type_INT64, - }} - - for _, test := range tests { - t.Run(sqlparser.String(test.input), func(t *testing.T) { - got, err := GetReturnType(test.input) - if test.expectedErr != nil { - require.EqualError(t, err, test.expectedErr.Error()) - } else { - require.NoError(t, err) - require.Equal(t, test.output, got) - } - }) - } -} diff --git a/go/vt/vtgate/planbuilder/system_tables.go b/go/vt/vtgate/planbuilder/system_tables.go deleted file mode 100644 index d8c429af6e9..00000000000 --- a/go/vt/vtgate/planbuilder/system_tables.go +++ /dev/null @@ -1,136 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "strings" - - "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" -) - -func (pb *primitiveBuilder) findSysInfoRoutingPredicates(expr sqlparser.Expr, rut *route, reservedVars *sqlparser.ReservedVars) error { - isTableSchema, bvName, out, err := extractInfoSchemaRoutingPredicate(expr, reservedVars) - if err != nil { - return err - } - if out == nil { - // we didn't find a predicate to use for routing, so we just exit early - return nil - } - - if isTableSchema { - rut.eroute.SysTableTableSchema = append(rut.eroute.SysTableTableSchema, out) - } else { - if rut.eroute.SysTableTableName == nil { - rut.eroute.SysTableTableName = map[string]evalengine.Expr{} - } - rut.eroute.SysTableTableName[bvName] = out - } - - return nil -} - -func findOtherComparator(cmp *sqlparser.ComparisonExpr) (bool, sqlparser.Expr, sqlparser.Expr, func(arg *sqlparser.Argument)) { - if schema, table := isTableSchemaOrName(cmp.Left); schema || table { - return schema, cmp.Left, cmp.Right, func(arg *sqlparser.Argument) { - cmp.Right = arg - } - } - if schema, table := isTableSchemaOrName(cmp.Right); schema || table { - return schema, cmp.Right, cmp.Left, func(arg *sqlparser.Argument) { - cmp.Left = arg - } - } - - return false, nil, nil, nil -} - -func isTableSchemaOrName(e sqlparser.Expr) (isTableSchema bool, isTableName bool) { - col, ok := e.(*sqlparser.ColName) - if !ok { - return false, false - } - return isDbNameCol(col), isTableNameCol(col) -} - -var schemaColumns = map[string]any{ - "table_schema": nil, - "constraint_schema": nil, - "schema_name": nil, - "routine_schema": nil, - "specific_schema": nil, - "event_schema": nil, - "referenced_table_schema": nil, - "index_schema": nil, - "trigger_schema": nil, - "event_object_schema": nil, -} - -func isDbNameCol(col *sqlparser.ColName) bool { - _, found := schemaColumns[col.Name.Lowered()] - return found -} - -func isTableNameCol(col *sqlparser.ColName) bool { - return col.Name.EqualString("table_name") || col.Name.EqualString("referenced_table_name") -} - -func extractInfoSchemaRoutingPredicate( - in sqlparser.Expr, - reservedVars *sqlparser.ReservedVars, -) (isSchemaName bool, name string, evalExpr evalengine.Expr, err error) { - cmp, ok := in.(*sqlparser.ComparisonExpr) - if !ok || cmp.Operator != sqlparser.EqualOp { - return - } - - isSchemaName, col, other, replaceOther := findOtherComparator(cmp) - if col == nil || !shouldRewrite(other) { - return - } - - evalExpr, err = evalengine.Translate(other, &evalengine.Config{ResolveColumn: operators.NotImplementedSchemaInfoResolver}) - if err != nil { - if strings.Contains(err.Error(), evalengine.ErrTranslateExprNotSupported) { - // This just means we can't rewrite this particular expression, - // not that we have to exit altogether - err = nil - return - } - return false, "", nil, err - } - - if isSchemaName { - name = sqltypes.BvSchemaName - } else { - name = reservedVars.ReserveColName(col.(*sqlparser.ColName)) - } - replaceOther(sqlparser.NewTypedArgument(name, sqltypes.VarChar)) - return isSchemaName, name, evalExpr, nil -} - -func shouldRewrite(e sqlparser.Expr) bool { - switch node := e.(type) { - case *sqlparser.FuncExpr: - // we should not rewrite database() calls against information_schema - return !(node.Name.EqualString("database") || node.Name.EqualString("schema")) - } - return true -} diff --git a/go/vt/vtgate/planbuilder/testdata/aggr_cases.json b/go/vt/vtgate/planbuilder/testdata/aggr_cases.json index 46e064599f0..684197c5202 100644 --- a/go/vt/vtgate/planbuilder/testdata/aggr_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/aggr_cases.json @@ -2,8 +2,7 @@ { "comment": "count(*) spread across join", "query": "select count(*) from user join user_extra on user.foo = user_extra.bar", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(*) from user join user_extra on user.foo = user_extra.bar", "Instructions": { @@ -63,8 +62,7 @@ { "comment": "sum spread across join", "query": "select sum(user.col) from user join user_extra on user.foo = user_extra.bar", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select sum(user.col) from user join user_extra on user.foo = user_extra.bar", "Instructions": { @@ -124,8 +122,7 @@ { "comment": "count spread across join", "query": "select count(user.col) from user join user_extra on user.foo = user_extra.bar", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(user.col) from user join user_extra on user.foo = user_extra.bar", "Instructions": { @@ -185,8 +182,7 @@ { "comment": "max spread across join", "query": "select max(user.col) from user join user_extra on user.foo = user_extra.bar", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select max(user.col) from user join user_extra on user.foo = user_extra.bar", "Instructions": { @@ -238,8 +234,7 @@ { "comment": "min spread across join RHS", "query": "select min(user_extra.col) from user join user_extra on user.foo = user_extra.bar", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select min(user_extra.col) from user join user_extra on user.foo = user_extra.bar", "Instructions": { @@ -291,22 +286,7 @@ { "comment": "group by a unique vindex should revert to simple route, and having clause should find the correct symbols.", "query": "select id, count(*) c from user group by id having max(col) > 10", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id, count(*) c from user group by id having max(col) > 10", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, count(*) as c from `user` where 1 != 1 group by id", - "Query": "select id, count(*) as c from `user` group by id having max(col) > 10", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id, count(*) c from user group by id having max(col) > 10", "Instructions": { @@ -328,80 +308,13 @@ { "comment": "scatter aggregate in a subquery", "query": "select a from (select count(*) as a from user) t", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a from (select count(*) as a from user) t", - "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count(0) AS count", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*) as a from `user` where 1 != 1", - "Query": "select count(*) as a from `user`", - "Table": "`user`" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a from (select count(*) as a from user) t", - "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count_star(0) AS a", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*) as a from `user` where 1 != 1", - "Query": "select count(*) as a from `user`", - "Table": "`user`" - } - ] - } - ] - }, - "TablesUsed": [ - "user.user" - ] - } - }, - { - "comment": "scatter aggregate with non-aggregate expressions.", - "query": "select id, count(*) from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id, count(*) from user", "Instructions": { "OperatorType": "Aggregate", "Variant": "Scalar", - "Aggregates": "sum_count(1) AS count", + "Aggregates": "sum_count_star(0) AS a", "Inputs": [ { "OperatorType": "Route", @@ -410,20 +323,27 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id, count(*) from `user` where 1 != 1", - "Query": "select id, count(*) from `user`", + "FieldQuery": "select count(*) as a from `user` where 1 != 1", + "Query": "select count(*) as a from `user`", "Table": "`user`" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "scatter aggregate with non-aggregate expressions.", + "query": "select id, count(*) from user", + "plan": { "QueryType": "SELECT", "Original": "select id, count(*) from user", "Instructions": { "OperatorType": "Aggregate", "Variant": "Scalar", - "Aggregates": "random(0) AS id, sum_count_star(1) AS count(*)", + "Aggregates": "any_value(0) AS id, sum_count_star(1) AS count(*)", "Inputs": [ { "OperatorType": "Route", @@ -446,13 +366,14 @@ { "comment": "scatter aggregate using distinctdistinct", "query": "select distinct col from user", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select distinct col from user", "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "GroupBy": "0", + "OperatorType": "Distinct", + "Collations": [ + "0" + ], "Inputs": [ { "OperatorType": "Route", @@ -462,31 +383,7 @@ "Sharded": true }, "FieldQuery": "select col from `user` where 1 != 1", - "OrderBy": "0 ASC", - "Query": "select distinct col from `user` order by col asc", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select distinct col from user", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1 group by col", - "OrderBy": "0 ASC", - "Query": "select col from `user` group by col order by col asc", + "Query": "select distinct col from `user`", "Table": "`user`" } ] @@ -499,30 +396,7 @@ { "comment": "scatter aggregate group by select col", "query": "select col from user group by col", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user group by col", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1 group by col", - "OrderBy": "0 ASC", - "Query": "select col from `user` group by col order by col asc", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user group by col", "Instructions": { @@ -552,22 +426,7 @@ { "comment": "count with distinct group by unique vindex", "query": "select id, count(distinct col) from user group by id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id, count(distinct col) from user group by id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, count(distinct col) from `user` where 1 != 1 group by id", - "Query": "select id, count(distinct col) from `user` group by id", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id, count(distinct col) from user group by id", "Instructions": { @@ -588,38 +447,14 @@ }, { "comment": "count with distinct unique vindex", - "query": "select col, count(distinct id) from user group by col", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col, count(distinct id) from user group by col", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(1) AS count", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col, count(distinct id) from `user` where 1 != 1 group by col", - "OrderBy": "0 ASC", - "Query": "select col, count(distinct id) from `user` group by col order by col asc", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "query": "select col, count(distinct id), sum(distinct id) from user group by col", + "plan": { "QueryType": "SELECT", - "Original": "select col, count(distinct id) from user group by col", + "Original": "select col, count(distinct id), sum(distinct id) from user group by col", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "sum_count_distinct(1) AS count(distinct id)", + "Aggregates": "sum_count_distinct(1) AS count(distinct id), sum_sum_distinct(2) AS sum(distinct id)", "GroupBy": "0", "Inputs": [ { @@ -629,9 +464,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col, count(distinct id) from `user` where 1 != 1 group by col", + "FieldQuery": "select col, count(distinct id), sum(distinct id) from `user` where 1 != 1 group by col", "OrderBy": "0 ASC", - "Query": "select col, count(distinct id) from `user` group by col order by col asc", + "Query": "select col, count(distinct id), sum(distinct id) from `user` group by col order by col asc", "Table": "`user`" } ] @@ -644,32 +479,7 @@ { "comment": "count with distinct no unique vindex", "query": "select col1, count(distinct col2) from user group by col1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col1, count(distinct col2) from user group by col1", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "count_distinct_count(1) AS count(distinct col2)", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)", - "OrderBy": "(0|2) ASC, (1|3) ASC", - "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc", - "ResultColumns": 2, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col1, count(distinct col2) from user group by col1", "Instructions": { @@ -686,9 +496,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), col2, weight_string(col2)", + "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)", "OrderBy": "(0|2) ASC, (1|3) ASC", - "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), col2, weight_string(col2) order by col1 asc, col2 asc", + "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc", "Table": "`user`" } ] @@ -701,31 +511,7 @@ { "comment": "count with distinct no unique vindex and no group by", "query": "select count(distinct col2) from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select count(distinct col2) from user", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "count_distinct_count(0) AS count(distinct col2)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col2, weight_string(col2) from `user` where 1 != 1 group by col2, weight_string(col2)", - "OrderBy": "(0|1) ASC", - "Query": "select col2, weight_string(col2) from `user` group by col2, weight_string(col2) order by col2 asc", - "ResultColumns": 1, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(distinct col2) from user", "Instructions": { @@ -756,32 +542,7 @@ { "comment": "count with distinct no unique vindex, count expression aliased", "query": "select col1, count(distinct col2) c2 from user group by col1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col1, count(distinct col2) c2 from user group by col1", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "count_distinct_count(1) AS c2", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)", - "OrderBy": "(0|2) ASC, (1|3) ASC", - "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc", - "ResultColumns": 2, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col1, count(distinct col2) c2 from user group by col1", "Instructions": { @@ -798,9 +559,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), col2, weight_string(col2)", + "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)", "OrderBy": "(0|2) ASC, (1|3) ASC", - "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), col2, weight_string(col2) order by col1 asc, col2 asc", + "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc", "Table": "`user`" } ] @@ -813,14 +574,15 @@ { "comment": "sum with distinct no unique vindex", "query": "select col1, sum(distinct col2) from user group by col1", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col1, sum(distinct col2) from user group by col1", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "sum_distinct_sum(1) AS sum(distinct col2)", - "GroupBy": "0", + "Aggregates": "sum_distinct(1|3) AS sum(distinct col2)", + "GroupBy": "(0|2)", + "ResultColumns": 2, "Inputs": [ { "OperatorType": "Route", @@ -832,19 +594,25 @@ "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)", "OrderBy": "(0|2) ASC, (1|3) ASC", "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc", - "ResultColumns": 2, "Table": "`user`" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "min with distinct no unique vindex. distinct is ignored.", + "query": "select col1, min(distinct col2) from user group by col1", + "plan": { "QueryType": "SELECT", - "Original": "select col1, sum(distinct col2) from user group by col1", + "Original": "select col1, min(distinct col2) from user group by col1", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "sum_distinct(1|3) AS sum(distinct col2)", + "Aggregates": "min(1|3) AS min(distinct col2)", "GroupBy": "(0|2)", "ResultColumns": 2, "Inputs": [ @@ -855,66 +623,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), col2, weight_string(col2)", - "OrderBy": "(0|2) ASC, (1|3) ASC", - "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), col2, weight_string(col2) order by col1 asc, col2 asc", - "Table": "`user`" - } - ] - }, - "TablesUsed": [ - "user.user" - ] - } - }, - { - "comment": "min with distinct no unique vindex. distinct is ignored.", - "query": "select col1, min(distinct col2) from user group by col1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col1, min(distinct col2) from user group by col1", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "min(1)", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col1, min(distinct col2), weight_string(col1) from `user` where 1 != 1 group by col1, weight_string(col1)", + "FieldQuery": "select col1, min(col2) as `min(distinct col2)`, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), weight_string(col2)", "OrderBy": "(0|2) ASC", - "Query": "select col1, min(distinct col2), weight_string(col1) from `user` group by col1, weight_string(col1) order by col1 asc", - "ResultColumns": 2, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select col1, min(distinct col2) from user group by col1", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "min(1|3) AS min(distinct col2)", - "GroupBy": "(0|2)", - "ResultColumns": 2, - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), col2, weight_string(col2)", - "OrderBy": "(0|2) ASC, (1|3) ASC", - "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), col2, weight_string(col2) order by col1 asc, col2 asc", + "Query": "select col1, min(col2) as `min(distinct col2)`, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), weight_string(col2) order by col1 asc", "Table": "`user`" } ] @@ -927,39 +638,7 @@ { "comment": "order by count distinct", "query": "select col1, count(distinct col2) k from user group by col1 order by k", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col1, count(distinct col2) k from user group by col1 order by k", - "Instructions": { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "1 ASC", - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "count_distinct_count(1) AS k", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)", - "OrderBy": "(0|2) ASC, (1|3) ASC", - "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc", - "ResultColumns": 2, - "Table": "`user`" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col1, count(distinct col2) k from user group by col1 order by k", "Instructions": { @@ -981,9 +660,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), col2, weight_string(col2)", + "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)", "OrderBy": "(0|2) ASC, (1|3) ASC", - "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), col2, weight_string(col2) order by col1 asc, col2 asc", + "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc", "Table": "`user`" } ] @@ -998,38 +677,12 @@ { "comment": "scatter aggregate group by aggregate function", "query": "select count(*) b from user group by b", - "v3-plan": "VT03005: cannot group on 'b'", - "gen4-plan": "VT03005: cannot group on 'count(*)'" + "plan": "VT03005: cannot group on 'count(*)'" }, { "comment": "scatter aggregate multiple group by (columns)", "query": "select a, b, count(*) from user group by a, b", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a, b, count(*) from user group by a, b", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(2) AS count", - "GroupBy": "0, 1", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, b, count(*), weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, b, weight_string(a), weight_string(b)", - "OrderBy": "(0|3) ASC, (1|4) ASC", - "Query": "select a, b, count(*), weight_string(a), weight_string(b) from `user` group by a, b, weight_string(a), weight_string(b) order by a asc, b asc", - "ResultColumns": 3, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a, b, count(*) from user group by a, b", "Instructions": { @@ -1061,32 +714,7 @@ { "comment": "scatter aggregate multiple group by (numbers)", "query": "select a, b, count(*) from user group by 2, 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a, b, count(*) from user group by 2, 1", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(2) AS count", - "GroupBy": "1, 0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, b, count(*), weight_string(b), weight_string(a) from `user` where 1 != 1 group by 2, 1, weight_string(b), weight_string(a)", - "OrderBy": "(1|3) ASC, (0|4) ASC", - "Query": "select a, b, count(*), weight_string(b), weight_string(a) from `user` group by 2, 1, weight_string(b), weight_string(a) order by b asc, a asc", - "ResultColumns": 3, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a, b, count(*) from user group by 2, 1", "Instructions": { @@ -1118,32 +746,7 @@ { "comment": "scatter aggregate multiple group by columns inverse order", "query": "select a, b, count(*) from user group by b, a", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a, b, count(*) from user group by b, a", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(2) AS count", - "GroupBy": "1, 0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, b, count(*), weight_string(b), weight_string(a) from `user` where 1 != 1 group by b, a, weight_string(b), weight_string(a)", - "OrderBy": "(1|3) ASC, (0|4) ASC", - "Query": "select a, b, count(*), weight_string(b), weight_string(a) from `user` group by b, a, weight_string(b), weight_string(a) order by b asc, a asc", - "ResultColumns": 3, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a, b, count(*) from user group by b, a", "Instructions": { @@ -1175,30 +778,7 @@ { "comment": "scatter aggregate group by column number", "query": "select col from user group by 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user group by 1", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1 group by 1", - "OrderBy": "0 ASC", - "Query": "select col from `user` group by 1 order by col asc", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user group by 1", "Instructions": { @@ -1228,35 +808,12 @@ { "comment": "scatter aggregate group by invalid column number", "query": "select col from user group by 2", - "v3-plan": "VT03014: unknown column '2' in 'group statement'", - "gen4-plan": "Unknown column '2' in 'group statement'" + "plan": "Unknown column '2' in 'group statement'" }, { "comment": "scatter aggregate order by null", "query": "select count(*) from user order by null", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select count(*) from user order by null", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count(0) AS count", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*) from `user` where 1 != 1", - "Query": "select count(*) from `user`", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(*) from user order by null", "Instructions": { @@ -1285,38 +842,13 @@ { "comment": "scatter aggregate with numbered order by columns", "query": "select a, b, c, d, count(*) from user group by 1, 2, 3 order by 1, 2, 3", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a, b, c, d, count(*) from user group by 1, 2, 3 order by 1, 2, 3", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(4) AS count", - "GroupBy": "0, 1, 2", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` where 1 != 1 group by 1, 2, 3, weight_string(a), weight_string(b), weight_string(c)", - "OrderBy": "(0|5) ASC, (1|6) ASC, (2|7) ASC", - "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` group by 1, 2, 3, weight_string(a), weight_string(b), weight_string(c) order by 1 asc, 2 asc, 3 asc", - "ResultColumns": 5, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a, b, c, d, count(*) from user group by 1, 2, 3 order by 1, 2, 3", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "random(3) AS d, sum_count_star(4) AS count(*)", + "Aggregates": "any_value(3) AS d, sum_count_star(4) AS count(*)", "GroupBy": "(0|5), (1|6), (2|7)", "ResultColumns": 5, "Inputs": [ @@ -1342,38 +874,13 @@ { "comment": "scatter aggregate with named order by columns", "query": "select a, b, c, d, count(*) from user group by 1, 2, 3 order by a, b, c", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a, b, c, d, count(*) from user group by 1, 2, 3 order by a, b, c", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(4) AS count", - "GroupBy": "0, 1, 2", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` where 1 != 1 group by 1, 2, 3, weight_string(a), weight_string(b), weight_string(c)", - "OrderBy": "(0|5) ASC, (1|6) ASC, (2|7) ASC", - "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` group by 1, 2, 3, weight_string(a), weight_string(b), weight_string(c) order by a asc, b asc, c asc", - "ResultColumns": 5, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a, b, c, d, count(*) from user group by 1, 2, 3 order by a, b, c", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "random(3) AS d, sum_count_star(4) AS count(*)", + "Aggregates": "any_value(3) AS d, sum_count_star(4) AS count(*)", "GroupBy": "(0|5), (1|6), (2|7)", "ResultColumns": 5, "Inputs": [ @@ -1399,32 +906,7 @@ { "comment": "scatter aggregate with jumbled order by columns", "query": "select a, b, c, d, count(*) from user group by 1, 2, 3, 4 order by d, b, a, c", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a, b, c, d, count(*) from user group by 1, 2, 3, 4 order by d, b, a, c", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(4) AS count", - "GroupBy": "0, 1, 2, 3", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` where 1 != 1 group by 1, 2, 3, 4, weight_string(d), weight_string(b), weight_string(a), weight_string(c)", - "OrderBy": "(3|5) ASC, (1|6) ASC, (0|7) ASC, (2|8) ASC", - "Query": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` group by 1, 2, 3, 4, weight_string(d), weight_string(b), weight_string(a), weight_string(c) order by d asc, b asc, a asc, c asc", - "ResultColumns": 5, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a, b, c, d, count(*) from user group by 1, 2, 3, 4 order by d, b, a, c", "Instructions": { @@ -1441,9 +923,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` where 1 != 1 group by a, b, c, d, weight_string(d), weight_string(b), weight_string(a), weight_string(c)", + "FieldQuery": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` where 1 != 1 group by d, b, a, c, weight_string(d), weight_string(b), weight_string(a), weight_string(c)", "OrderBy": "(3|5) ASC, (1|6) ASC, (0|7) ASC, (2|8) ASC", - "Query": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` group by a, b, c, d, weight_string(d), weight_string(b), weight_string(a), weight_string(c) order by d asc, b asc, a asc, c asc", + "Query": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` group by d, b, a, c, weight_string(d), weight_string(b), weight_string(a), weight_string(c) order by d asc, b asc, a asc, c asc", "Table": "`user`" } ] @@ -1456,32 +938,7 @@ { "comment": "scatter aggregate with jumbled group by and order by columns", "query": "select a, b, c, d, count(*) from user group by 3, 2, 1, 4 order by d, b, a, c", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a, b, c, d, count(*) from user group by 3, 2, 1, 4 order by d, b, a, c", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(4) AS count", - "GroupBy": "2, 1, 0, 3", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` where 1 != 1 group by 3, 2, 1, 4, weight_string(d), weight_string(b), weight_string(a), weight_string(c)", - "OrderBy": "(3|5) ASC, (1|6) ASC, (0|7) ASC, (2|8) ASC", - "Query": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` group by 3, 2, 1, 4, weight_string(d), weight_string(b), weight_string(a), weight_string(c) order by d asc, b asc, a asc, c asc", - "ResultColumns": 5, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a, b, c, d, count(*) from user group by 3, 2, 1, 4 order by d, b, a, c", "Instructions": { @@ -1498,9 +955,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` where 1 != 1 group by c, b, a, d, weight_string(d), weight_string(b), weight_string(a), weight_string(c)", + "FieldQuery": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` where 1 != 1 group by d, b, a, c, weight_string(d), weight_string(b), weight_string(a), weight_string(c)", "OrderBy": "(3|5) ASC, (1|6) ASC, (0|7) ASC, (2|8) ASC", - "Query": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` group by c, b, a, d, weight_string(d), weight_string(b), weight_string(a), weight_string(c) order by d asc, b asc, a asc, c asc", + "Query": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` group by d, b, a, c, weight_string(d), weight_string(b), weight_string(a), weight_string(c) order by d asc, b asc, a asc, c asc", "Table": "`user`" } ] @@ -1513,14 +970,15 @@ { "comment": "scatter aggregate with some descending order by cols", "query": "select a, b, c, count(*) from user group by 3, 2, 1 order by 1 desc, 3 desc, b", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a, b, c, count(*) from user group by 3, 2, 1 order by 1 desc, 3 desc, b", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "sum_count(3) AS count", - "GroupBy": "2, 1, 0", + "Aggregates": "sum_count_star(3) AS count(*)", + "GroupBy": "(0|4), (2|5), (1|6)", + "ResultColumns": 4, "Inputs": [ { "OperatorType": "Route", @@ -1529,35 +987,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, b, c, count(*), weight_string(a), weight_string(c), weight_string(b) from `user` where 1 != 1 group by 3, 2, 1, weight_string(a), weight_string(c), weight_string(b)", + "FieldQuery": "select a, b, c, count(*), weight_string(a), weight_string(c), weight_string(b) from `user` where 1 != 1 group by a, c, b, weight_string(a), weight_string(c), weight_string(b)", "OrderBy": "(0|4) DESC, (2|5) DESC, (1|6) ASC", - "Query": "select a, b, c, count(*), weight_string(a), weight_string(c), weight_string(b) from `user` group by 3, 2, 1, weight_string(a), weight_string(c), weight_string(b) order by 1 desc, 3 desc, b asc", - "ResultColumns": 4, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select a, b, c, count(*) from user group by 3, 2, 1 order by 1 desc, 3 desc, b", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count_star(3) AS count(*)", - "GroupBy": "(0|4), (2|5), (1|6)", - "ResultColumns": 4, - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, b, c, count(*), weight_string(a), weight_string(c), weight_string(b) from `user` where 1 != 1 group by c, b, a, weight_string(a), weight_string(c), weight_string(b)", - "OrderBy": "(0|4) DESC, (2|5) DESC, (1|6) ASC", - "Query": "select a, b, c, count(*), weight_string(a), weight_string(c), weight_string(b) from `user` group by c, b, a, weight_string(a), weight_string(c), weight_string(b) order by a desc, c desc, b asc", + "Query": "select a, b, c, count(*), weight_string(a), weight_string(c), weight_string(b) from `user` group by a, c, b, weight_string(a), weight_string(c), weight_string(b) order by a desc, c desc, b asc", "Table": "`user`" } ] @@ -1570,43 +1002,12 @@ { "comment": "invalid order by column numner for scatter", "query": "select col, count(*) from user group by col order by 5 limit 10", - "v3-plan": "VT03014: unknown column '5' in 'order clause'", - "gen4-plan": "Unknown column '5' in 'order clause'" + "plan": "Unknown column '5' in 'order clause'" }, { "comment": "aggregate with limit", "query": "select col, count(*) from user group by col limit 10", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col, count(*) from user group by col limit 10", - "Instructions": { - "OperatorType": "Limit", - "Count": "INT64(10)", - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(1) AS count", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col, count(*) from `user` where 1 != 1 group by col", - "OrderBy": "0 ASC", - "Query": "select col, count(*) from `user` group by col order by col asc limit :__upper_limit", - "Table": "`user`" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col, count(*) from user group by col limit 10", "Instructions": { @@ -1643,26 +1044,7 @@ { "comment": "Group by with collate operator", "query": "select user.col1 as a from user where user.id = 5 group by a collate utf8_general_ci", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col1 as a from user where user.id = 5 group by a collate utf8_general_ci", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col1 as a from `user` where 1 != 1 group by a collate utf8_general_ci", - "Query": "select `user`.col1 as a from `user` where `user`.id = 5 group by a collate utf8_general_ci", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col1 as a from user where user.id = 5 group by a collate utf8_general_ci", "Instructions": { @@ -1688,22 +1070,7 @@ { "comment": "routing rules for aggregates", "query": "select id, count(*) from route2 group by id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id, count(*) from route2 group by id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select id, count(*) from unsharded as route2 where 1 != 1 group by id", - "Query": "select id, count(*) from unsharded as route2 group by id", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id, count(*) from route2 group by id", "Instructions": { @@ -1725,22 +1092,7 @@ { "comment": "order by on a reference table", "query": "select col from ref order by col", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from ref order by col", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from ref where 1 != 1", - "Query": "select col from ref order by col asc", - "Table": "ref" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from ref order by col", "Instructions": { @@ -1762,43 +1114,13 @@ { "comment": "distinct and aggregate functions missing group by", "query": "select distinct a, count(*) from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select distinct a, count(*) from user", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(1) AS count", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, count(*), weight_string(a) from `user` where 1 != 1", - "OrderBy": "(0|2) ASC", - "Query": "select a, count(*), weight_string(a) from `user` order by a asc", - "ResultColumns": 2, - "Table": "`user`" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select distinct a, count(*) from user", "Instructions": { "OperatorType": "Aggregate", "Variant": "Scalar", - "Aggregates": "random(0) AS a, sum_count_star(1) AS count(*)", + "Aggregates": "any_value(0) AS a, sum_count_star(1) AS count(*)", "Inputs": [ { "OperatorType": "Route", @@ -1821,37 +1143,7 @@ { "comment": "distinct and aggregate functions", "query": "select distinct a, count(*) from user group by a", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select distinct a, count(*) from user group by a", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(1) AS count", - "GroupBy": "0, 0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, count(*), weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)", - "OrderBy": "(0|2) ASC, (0|2) ASC", - "Query": "select a, count(*), weight_string(a) from `user` group by a, weight_string(a) order by a asc, a asc", - "ResultColumns": 2, - "Table": "`user`" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select distinct a, count(*) from user group by a", "Instructions": { @@ -1883,14 +1175,13 @@ { "comment": "Group by invalid column number (code is duplicated from symab).", "query": "select id from user group by 1.1", - "v3-plan": "VT13001: [BUG] column number is not an INT", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user group by 1.1", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "random(0) AS id", + "Aggregates": "any_value(0) AS id", "GroupBy": "1", "ResultColumns": 1, "Inputs": [ @@ -1916,14 +1207,12 @@ { "comment": "Group by out of range column number (code is duplicated from symab).", "query": "select id from user group by 2", - "v3-plan": "VT03014: unknown column '2' in 'group statement'", - "gen4-plan": "Unknown column '2' in 'group statement'" + "plan": "Unknown column '2' in 'group statement'" }, { "comment": "here it is safe to remove the order by on the derived table since it will not influence the output of the count(*)", "query": "select count(*) from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(*) from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a", "Instructions": { @@ -1939,7 +1228,7 @@ "Sharded": true }, "FieldQuery": "select count(*) from (select `user`.col, user_extra.extra from `user`, user_extra where 1 != 1) as a where 1 != 1", - "Query": "select count(*) from (select `user`.col, user_extra.extra from `user`, user_extra where `user`.id = user_extra.user_id order by user_extra.extra asc) as a", + "Query": "select count(*) from (select `user`.col, user_extra.extra from `user`, user_extra where `user`.id = user_extra.user_id) as a", "Table": "`user`, user_extra" } ] @@ -1953,32 +1242,7 @@ { "comment": "order by inside derived tables can be ignored", "query": "select col from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a", - "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col, user_extra.extra, weight_string(user_extra.extra) from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1", - "OrderBy": "(1|2) ASC", - "Query": "select `user`.col, user_extra.extra, weight_string(user_extra.extra) from `user` join user_extra on `user`.id = user_extra.user_id order by user_extra.extra asc", - "ResultColumns": 2, - "Table": "`user`, user_extra" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a", "Instructions": { @@ -1989,7 +1253,7 @@ "Sharded": true }, "FieldQuery": "select col from (select `user`.col, user_extra.extra from `user`, user_extra where 1 != 1) as a where 1 != 1", - "Query": "select col from (select `user`.col, user_extra.extra from `user`, user_extra where `user`.id = user_extra.user_id order by user_extra.extra asc) as a", + "Query": "select col from (select `user`.col, user_extra.extra from `user`, user_extra where `user`.id = user_extra.user_id) as a", "Table": "`user`, user_extra" }, "TablesUsed": [ @@ -2001,8 +1265,7 @@ { "comment": "here we keep the order since the column is visible on the outside, and used by the orderedAggregate", "query": "select col, count(*) from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a group by col", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col, count(*) from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a group by col", "Instructions": { @@ -2020,7 +1283,7 @@ }, "FieldQuery": "select col, count(*) from (select `user`.col, user_extra.extra from `user`, user_extra where 1 != 1) as a where 1 != 1 group by col", "OrderBy": "0 ASC", - "Query": "select col, count(*) from (select `user`.col, user_extra.extra from `user`, user_extra where `user`.id = user_extra.user_id order by user_extra.extra asc) as a group by col order by col asc", + "Query": "select col, count(*) from (select `user`.col, user_extra.extra from `user`, user_extra where `user`.id = user_extra.user_id) as a group by col order by col asc", "Table": "`user`, user_extra" } ] @@ -2034,37 +1297,15 @@ { "comment": "optimize group by when using distinct with no aggregation", "query": "select distinct col1, col2 from user group by col1, col2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select distinct col1, col2 from user group by col1, col2", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "GroupBy": "0, 1, 0, 1", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)", - "OrderBy": "(0|2) ASC, (1|3) ASC, (0|2) ASC, (1|3) ASC", - "Query": "select distinct col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc, col1 asc, col2 asc", - "ResultColumns": 2, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select distinct col1, col2 from user group by col1, col2", "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "GroupBy": "(0|2), (1|3)", + "OperatorType": "Distinct", + "Collations": [ + "(0:2)", + "(1:3)" + ], "ResultColumns": 2, "Inputs": [ { @@ -2074,9 +1315,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)", - "OrderBy": "(0|2) ASC, (1|3) ASC", - "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc", + "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1", + "Query": "select distinct col1, col2, weight_string(col1), weight_string(col2) from `user`", "Table": "`user`" } ] @@ -2089,34 +1329,7 @@ { "comment": "do not use distinct when using only aggregates and no group by", "query": "select distinct count(*) from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select distinct count(*) from user", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count(0) AS count", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*) from `user` where 1 != 1", - "Query": "select count(*) from `user`", - "Table": "`user`" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select distinct count(*) from user", "Instructions": { @@ -2145,8 +1358,7 @@ { "comment": "Grouping on join", "query": "select user.a from user join user_extra group by user.a", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.a from user join user_extra group by user.a", "Instructions": { @@ -2194,17 +1406,10 @@ ] } }, - { - "comment": "Cannot have more than one aggr(distinct...", - "query": "select count(distinct a), count(distinct b) from user", - "v3-plan": "VT12001: unsupported: only one DISTINCT aggregation allowed in a SELECT: count(distinct b)", - "gen4-plan": "VT12001: unsupported: only one DISTINCT aggregation is allowed in a SELECT: count(distinct b)" - }, { "comment": "multiple distinct functions with grouping.", "query": "select col1, count(distinct col2), sum(distinct col2) from user group by col1", - "v3-plan": "VT12001: unsupported: only one DISTINCT aggregation allowed in a SELECT: sum(distinct col2)", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col1, count(distinct col2), sum(distinct col2) from user group by col1", "Instructions": { @@ -2221,9 +1426,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col1, col2, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), col2, weight_string(col2)", + "FieldQuery": "select col1, col2, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)", "OrderBy": "(0|3) ASC, (1|4) ASC", - "Query": "select col1, col2, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), col2, weight_string(col2) order by col1 asc, col2 asc", + "Query": "select col1, col2, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc", "Table": "`user`" } ] @@ -2236,8 +1441,7 @@ { "comment": "aggregate query with order by aggregate column along with NULL", "query": "select col, count(*) k from user group by col order by null, k", - "v3-plan": "VT12001: unsupported: in scatter query: complex ORDER BY expression: null", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col, count(*) k from user group by col order by null, k", "Instructions": { @@ -2275,31 +1479,7 @@ { "comment": "aggregate query with order by NULL", "query": "select col, count(*) k from user group by col order by null", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col, count(*) k from user group by col order by null", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(1) AS count", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col, count(*) as k from `user` where 1 != 1 group by col", - "OrderBy": "0 ASC", - "Query": "select col, count(*) as k from `user` group by col order by col asc", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col, count(*) k from user group by col order by null", "Instructions": { @@ -2330,8 +1510,7 @@ { "comment": "join query on sharding key with group by a unique vindex with having clause.", "query": "select user.id, count(*) c from user, user_extra where user.id = user_extra.user_id group by user.id having max(user.col) > 10", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.id, count(*) c from user, user_extra where user.id = user_extra.user_id group by user.id having max(user.col) > 10", "Instructions": { @@ -2354,29 +1533,7 @@ { "comment": "correlated subquery on sharding key with group by a unique vindex with having clause.", "query": "select count(*) from user where exists (select 1 from user_extra where user_id = user.id group by user_id having max(col) > 10)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select count(*) from user where exists (select 1 from user_extra where user_id = user.id group by user_id having max(col) > 10)", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count(0) AS count", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*) from `user` where 1 != 1", - "Query": "select count(*) from `user` where exists (select 1 from user_extra where user_id = `user`.id group by user_id having max(col) > 10 limit 1)", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(*) from user where exists (select 1 from user_extra where user_id = user.id group by user_id having max(col) > 10)", "Instructions": { @@ -2392,7 +1549,7 @@ "Sharded": true }, "FieldQuery": "select count(*) from `user` where 1 != 1", - "Query": "select count(*) from `user` where exists (select 1 from user_extra where user_id = `user`.id group by user_id having max(col) > 10 limit 1)", + "Query": "select count(*) from `user` where exists (select 1 from user_extra where user_id = `user`.id group by user_id having max(col) > 10)", "Table": "`user`" } ] @@ -2406,7 +1563,7 @@ { "comment": "aggregation filtering by having on a route", "query": "select id from user group by id having count(id) = 10", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user group by id having count(id) = 10", "Instructions": { @@ -2419,56 +1576,16 @@ "FieldQuery": "select id from `user` where 1 != 1 group by id", "Query": "select id from `user` group by id having count(id) = 10", "Table": "`user`" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select id from user group by id having count(id) = 10", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1 group by id", - "Query": "select id from `user` group by id having count(id) = 10", - "Table": "`user`" - }, - "TablesUsed": [ - "user.user" - ] - } - }, - { - "comment": "weight_string addition to group by", - "query": "select lower(textcol1) as v, count(*) from user group by v", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select lower(textcol1) as v, count(*) from user group by v", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(1) AS count", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select lower(textcol1) as v, count(*), weight_string(lower(textcol1)) from `user` where 1 != 1 group by v, weight_string(lower(textcol1))", - "OrderBy": "(0|2) ASC", - "Query": "select lower(textcol1) as v, count(*), weight_string(lower(textcol1)) from `user` group by v, weight_string(lower(textcol1)) order by v asc", - "ResultColumns": 2, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "weight_string addition to group by", + "query": "select lower(textcol1) as v, count(*) from user group by v", + "plan": { "QueryType": "SELECT", "Original": "select lower(textcol1) as v, count(*) from user group by v", "Instructions": { @@ -2500,32 +1617,7 @@ { "comment": "weight_string addition to group by when also there in order by", "query": "select char_length(texcol1) as a, count(*) from user group by a order by a", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select char_length(texcol1) as a, count(*) from user group by a order by a", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(1) AS count", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select char_length(texcol1) as a, count(*), weight_string(char_length(texcol1)) from `user` where 1 != 1 group by a, weight_string(char_length(texcol1))", - "OrderBy": "(0|2) ASC", - "Query": "select char_length(texcol1) as a, count(*), weight_string(char_length(texcol1)) from `user` group by a, weight_string(char_length(texcol1)) order by a asc", - "ResultColumns": 2, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select char_length(texcol1) as a, count(*) from user group by a order by a", "Instructions": { @@ -2557,30 +1649,7 @@ { "comment": "order by inside and outside parenthesis select", "query": "(select id from user order by 1 desc) order by 1 asc limit 2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "(select id from user order by 1 desc) order by 1 asc limit 2", - "Instructions": { - "OperatorType": "Limit", - "Count": "INT64(2)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", - "OrderBy": "(0|1) ASC", - "Query": "select id, weight_string(id) from `user` order by 1 asc limit :__upper_limit", - "ResultColumns": 1, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "(select id from user order by 1 desc) order by 1 asc limit 2", "Instructions": { @@ -2610,31 +1679,32 @@ { "comment": "correlated subquery in exists clause with an ordering", "query": "select col, id from user where exists(select user_id from user_extra where user_id = 3 and user_id < user.id) order by id", - "v3-plan": "VT12001: unsupported: cross-shard correlated subquery", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col, id from user where exists(select user_id from user_extra where user_id = 3 and user_id < user.id) order by id", "Instructions": { "OperatorType": "SemiJoin", "JoinVars": { - "user_id": 0 + "user_id": 1 }, - "ProjectedIndexes": "-2,-1", "TableName": "`user`_user_extra", "Inputs": [ { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select `user`.id, col, weight_string(id) from `user` where 1 != 1", - "OrderBy": "(0|2) ASC", - "Query": "select `user`.id, col, weight_string(id) from `user` order by id asc", + "FieldQuery": "select col, id, weight_string(id) from `user` where 1 != 1", + "OrderBy": "(1|2) ASC", + "Query": "select col, id, weight_string(id) from `user` order by id asc", + "ResultColumns": 2, "Table": "`user`" }, { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "EqualUnique", "Keyspace": { @@ -2660,13 +1730,12 @@ { "comment": "Column and Literal equality filter on scatter aggregates", "query": "select count(*) a from user having a = 10", - "v3-plan": "VT12001: unsupported: filtering on results of aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(*) a from user having a = 10", "Instructions": { "OperatorType": "Filter", - "Predicate": ":0 = 10", + "Predicate": "count(*) = 10", "Inputs": [ { "OperatorType": "Aggregate", @@ -2696,13 +1765,12 @@ { "comment": "Equality filtering with column and string literal on scatter aggregates", "query": "select count(*) a from user having a = '1'", - "v3-plan": "VT12001: unsupported: filtering on results of aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(*) a from user having a = '1'", "Instructions": { "OperatorType": "Filter", - "Predicate": ":0 = '1'", + "Predicate": "count(*) = '1'", "Inputs": [ { "OperatorType": "Aggregate", @@ -2732,13 +1800,12 @@ { "comment": "Column and Literal not equal filter on scatter aggregates", "query": "select count(*) a from user having a != 10", - "v3-plan": "VT12001: unsupported: filtering on results of aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(*) a from user having a != 10", "Instructions": { "OperatorType": "Filter", - "Predicate": ":0 != 10", + "Predicate": "count(*) != 10", "Inputs": [ { "OperatorType": "Aggregate", @@ -2768,13 +1835,12 @@ { "comment": "Not equal filter with column and string literal on scatter aggregates", "query": "select count(*) a from user having a != '1'", - "v3-plan": "VT12001: unsupported: filtering on results of aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(*) a from user having a != '1'", "Instructions": { "OperatorType": "Filter", - "Predicate": ":0 != '1'", + "Predicate": "count(*) != '1'", "Inputs": [ { "OperatorType": "Aggregate", @@ -2804,13 +1870,12 @@ { "comment": "Greater than filter on scatter aggregates", "query": "select count(*) a from user having a > 10", - "v3-plan": "VT12001: unsupported: filtering on results of aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(*) a from user having a > 10", "Instructions": { "OperatorType": "Filter", - "Predicate": ":0 > 10", + "Predicate": "count(*) > 10", "Inputs": [ { "OperatorType": "Aggregate", @@ -2840,13 +1905,12 @@ { "comment": "Greater Equal filter on scatter aggregates", "query": "select count(*) a from user having a >= 10", - "v3-plan": "VT12001: unsupported: filtering on results of aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(*) a from user having a >= 10", "Instructions": { "OperatorType": "Filter", - "Predicate": ":0 >= 10", + "Predicate": "count(*) >= 10", "Inputs": [ { "OperatorType": "Aggregate", @@ -2876,13 +1940,12 @@ { "comment": "Less than filter on scatter aggregates", "query": "select count(*) a from user having a < 10", - "v3-plan": "VT12001: unsupported: filtering on results of aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(*) a from user having a < 10", "Instructions": { "OperatorType": "Filter", - "Predicate": ":0 < 10", + "Predicate": "count(*) < 10", "Inputs": [ { "OperatorType": "Aggregate", @@ -2912,13 +1975,12 @@ { "comment": "Less Equal filter on scatter aggregates", "query": "select count(*) a from user having a <= 10", - "v3-plan": "VT12001: unsupported: filtering on results of aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(*) a from user having a <= 10", "Instructions": { "OperatorType": "Filter", - "Predicate": ":0 <= 10", + "Predicate": "count(*) <= 10", "Inputs": [ { "OperatorType": "Aggregate", @@ -2948,13 +2010,12 @@ { "comment": "Less Equal filter on scatter with grouping", "query": "select col, count(*) a from user group by col having a <= 10", - "v3-plan": "VT12001: unsupported: filtering on results of aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col, count(*) a from user group by col having a <= 10", "Instructions": { "OperatorType": "Filter", - "Predicate": ":1 <= 10", + "Predicate": "count(*) <= 10", "Inputs": [ { "OperatorType": "Aggregate", @@ -2986,40 +2047,31 @@ { "comment": "We should be able to find grouping keys on ordered aggregates", "query": "select count(*) as a, val1 from user group by val1 having a = 1.00", - "v3-plan": "VT12001: unsupported: filtering on results of aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(*) as a, val1 from user group by val1 having a = 1.00", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0, - 1 - ], + "OperatorType": "Filter", + "Predicate": "count(*) = 1.00", + "ResultColumns": 2, "Inputs": [ { - "OperatorType": "Filter", - "Predicate": ":0 = 1.00", + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum_count_star(0) AS a", + "GroupBy": "(1|2)", "Inputs": [ { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count_star(0) AS a", - "GroupBy": "(1|2)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*) as a, val1, weight_string(val1) from `user` where 1 != 1 group by val1, weight_string(val1)", - "OrderBy": "(1|2) ASC", - "Query": "select count(*) as a, val1, weight_string(val1) from `user` group by val1, weight_string(val1) order by val1 asc", - "Table": "`user`" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*) as a, val1, weight_string(val1) from `user` where 1 != 1 group by val1, weight_string(val1)", + "OrderBy": "(1|2) ASC", + "Query": "select count(*) as a, val1, weight_string(val1) from `user` group by val1, weight_string(val1) order by val1 asc", + "Table": "`user`" } ] } @@ -3033,32 +2085,7 @@ { "comment": "distinct on text column with collation", "query": "select col, count(distinct textcol1) from user group by col", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col, count(distinct textcol1) from user group by col", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "count_distinct_count(1) AS count(distinct textcol1)", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col, textcol1, weight_string(textcol1) from `user` where 1 != 1 group by col, textcol1, weight_string(textcol1)", - "OrderBy": "0 ASC, (1|2) ASC", - "Query": "select col, textcol1, weight_string(textcol1) from `user` group by col, textcol1, weight_string(textcol1) order by col asc, textcol1 asc", - "ResultColumns": 2, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col, count(distinct textcol1) from user group by col", "Instructions": { @@ -3089,82 +2116,56 @@ { "comment": "aggregation filtering by having on a route with no group by with non-unique vindex filter", "query": "select 1 from user having count(id) = 10 and name = 'a'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select 1 from user having count(id) = 10 and name = 'a'", - "Instructions": { - "OperatorType": "Route", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user` having count(id) = 10 and `name` = 'a'", - "Table": "`user`", - "Values": [ - "VARCHAR(\"a\")" - ], - "Vindex": "name_user_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 1 from user having count(id) = 10 and name = 'a'", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], + "OperatorType": "Filter", + "Predicate": "count(id) = 10", + "ResultColumns": 1, "Inputs": [ { - "OperatorType": "Filter", - "Predicate": ":1 = 10", + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "any_value(0) AS 1, sum_count(1) AS count(id)", "Inputs": [ { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "random(0) AS 1, sum_count(1) AS count(id)", + "OperatorType": "VindexLookup", + "Variant": "Equal", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "Values": [ + "VARCHAR(\"a\")" + ], + "Vindex": "name_user_map", "Inputs": [ { - "OperatorType": "VindexLookup", - "Variant": "Equal", + "OperatorType": "Route", + "Variant": "IN", "Keyspace": { "Name": "user", "Sharded": true }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", "Values": [ - "VARCHAR(\"a\")" + "::name" ], - "Vindex": "name_user_map", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", - "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", - "Table": "name_user_vdx", - "Values": [ - "::name" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "ByDestination", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1, count(id) from `user` where 1 != 1", - "Query": "select 1, count(id) from `user` where `name` = 'a'", - "Table": "`user`" - } - ] + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1, count(id) from `user` where 1 != 1", + "Query": "select 1, count(id) from `user` where `name` = 'a'", + "Table": "`user`" } ] } @@ -3180,8 +2181,7 @@ { "comment": "Aggregates and joins", "query": "select count(*) from user join user_extra", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(*) from user join user_extra", "Instructions": { @@ -3238,51 +2238,29 @@ { "comment": "aggregation filtering by having on a route with no group by", "query": "select 1 from user having count(id) = 10", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select 1 from user having count(id) = 10", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user` having count(id) = 10", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 1 from user having count(id) = 10", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], + "OperatorType": "Filter", + "Predicate": "count(id) = 10", + "ResultColumns": 1, "Inputs": [ { - "OperatorType": "Filter", - "Predicate": ":1 = 10", + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "any_value(0) AS 1, sum_count(1) AS count(id)", "Inputs": [ { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "random(0) AS 1, sum_count(1) AS count(id)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1, count(id) from `user` where 1 != 1", - "Query": "select 1, count(id) from `user`", - "Table": "`user`" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1, count(id) from `user` where 1 != 1", + "Query": "select 1, count(id) from `user`", + "Table": "`user`" } ] } @@ -3296,8 +2274,7 @@ { "comment": "Aggregate on join", "query": "select user.a, count(*) from user join user_extra group by user.a", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.a, count(*) from user join user_extra group by user.a", "Instructions": { @@ -3359,8 +2336,7 @@ { "comment": "Aggregate on other table in join", "query": "select user.a, count(user_extra.a) from user join user_extra group by user.a", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.a, count(user_extra.a) from user join user_extra group by user.a", "Instructions": { @@ -3422,8 +2398,7 @@ { "comment": "aggregation spread out across three routes", "query": "select count(u.textcol1), count(ue.foo), us.bar from user u join user_extra ue on u.foo = ue.bar join unsharded us on ue.bar = us.baz group by us.bar", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(u.textcol1), count(ue.foo), us.bar from user u join user_extra ue on u.foo = ue.bar join unsharded us on ue.bar = us.baz group by us.bar", "Instructions": { @@ -3529,38 +2504,13 @@ { "comment": "using two distinct columns - min with distinct vindex, sum with distinct without vindex", "query": "select col1, min(distinct id), sum(distinct col3) from user group by col1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col1, min(distinct id), sum(distinct col3) from user group by col1", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "min(1), sum_distinct_sum(2) AS sum(distinct col3)", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col1, min(distinct id), col3, weight_string(col1), weight_string(col3) from `user` where 1 != 1 group by col1, col3, weight_string(col1), weight_string(col3)", - "OrderBy": "(0|3) ASC, (2|4) ASC", - "Query": "select col1, min(distinct id), col3, weight_string(col1), weight_string(col3) from `user` group by col1, col3, weight_string(col1), weight_string(col3) order by col1 asc, col3 asc", - "ResultColumns": 3, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col1, min(distinct id), sum(distinct col3) from user group by col1", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "min(1) AS min(distinct id), sum_distinct(2|4) AS sum(distinct col3)", + "Aggregates": "min(1|4) AS min(distinct id), sum_distinct(2|5) AS sum(distinct col3)", "GroupBy": "(0|3)", "ResultColumns": 3, "Inputs": [ @@ -3571,9 +2521,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col1, min(distinct id), col3, weight_string(col1), weight_string(col3) from `user` where 1 != 1 group by col1, weight_string(col1), col3, weight_string(col3)", - "OrderBy": "(0|3) ASC, (2|4) ASC", - "Query": "select col1, min(distinct id), col3, weight_string(col1), weight_string(col3) from `user` group by col1, weight_string(col1), col3, weight_string(col3) order by col1 asc, col3 asc", + "FieldQuery": "select col1, min(id) as `min(distinct id)`, col3, weight_string(col1), weight_string(id), weight_string(col3) from `user` where 1 != 1 group by col1, col3, weight_string(col1), weight_string(id), weight_string(col3)", + "OrderBy": "(0|3) ASC, (2|5) ASC", + "Query": "select col1, min(id) as `min(distinct id)`, col3, weight_string(col1), weight_string(id), weight_string(col3) from `user` group by col1, col3, weight_string(col1), weight_string(id), weight_string(col3) order by col1 asc, col3 asc", "Table": "`user`" } ] @@ -3586,51 +2536,45 @@ { "comment": "aggregation on top of semijoin", "query": "select count(*) from user where exists (select 0 from user_extra where user.apa = user_extra.bar)", - "v3-plan": "VT12001: unsupported: cross-shard correlated subquery", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(*) from user where exists (select 0 from user_extra where user.apa = user_extra.bar)", "Instructions": { "OperatorType": "Aggregate", "Variant": "Scalar", "Aggregates": "sum_count_star(0) AS count(*)", + "ResultColumns": 1, "Inputs": [ { - "OperatorType": "Projection", - "Expressions": [ - "[COLUMN 1] as count(*)" - ], + "OperatorType": "SemiJoin", + "JoinVars": { + "user_apa": 1 + }, + "TableName": "`user`_user_extra", "Inputs": [ { - "OperatorType": "SemiJoin", - "JoinVars": { - "user_apa": 0 + "InputName": "Outer", + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.apa, count(*), weight_string(`user`.apa) from `user` where 1 != 1 group by `user`.apa, weight_string(`user`.apa)", - "Query": "select `user`.apa, count(*), weight_string(`user`.apa) from `user` group by `user`.apa, weight_string(`user`.apa)", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra where user_extra.bar = :user_apa", - "Table": "user_extra" - } - ] + "FieldQuery": "select count(*), `user`.apa from `user` where 1 != 1 group by `user`.apa", + "Query": "select count(*), `user`.apa from `user` group by `user`.apa", + "Table": "`user`" + }, + { + "InputName": "SubQuery", + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from user_extra where 1 != 1", + "Query": "select 1 from user_extra where user_extra.bar = :user_apa", + "Table": "user_extra" } ] } @@ -3645,32 +2589,7 @@ { "comment": "we have to track the order of distinct aggregation expressions", "query": "select val2, count(distinct val1), count(*) from user group by val2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select val2, count(distinct val1), count(*) from user group by val2", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "count_distinct_count(1) AS count(distinct val1), sum_count(2) AS count", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select val2, val1, count(*), weight_string(val2), weight_string(val1) from `user` where 1 != 1 group by val2, val1, weight_string(val2), weight_string(val1)", - "OrderBy": "(0|3) ASC, (1|4) ASC", - "Query": "select val2, val1, count(*), weight_string(val2), weight_string(val1) from `user` group by val2, val1, weight_string(val2), weight_string(val1) order by val2 asc, val1 asc", - "ResultColumns": 3, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select val2, count(distinct val1), count(*) from user group by val2", "Instructions": { @@ -3687,9 +2606,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select val2, val1, count(*), weight_string(val2), weight_string(val1) from `user` where 1 != 1 group by val2, weight_string(val2), val1, weight_string(val1)", + "FieldQuery": "select val2, val1, count(*), weight_string(val2), weight_string(val1) from `user` where 1 != 1 group by val2, val1, weight_string(val2), weight_string(val1)", "OrderBy": "(0|3) ASC, (1|4) ASC", - "Query": "select val2, val1, count(*), weight_string(val2), weight_string(val1) from `user` group by val2, weight_string(val2), val1, weight_string(val1) order by val2 asc, val1 asc", + "Query": "select val2, val1, count(*), weight_string(val2), weight_string(val1) from `user` group by val2, val1, weight_string(val2), weight_string(val1) order by val2 asc, val1 asc", "Table": "`user`" } ] @@ -3702,32 +2621,7 @@ { "comment": "group by column alias", "query": "select ascii(val1) as a, count(*) from user group by a", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select ascii(val1) as a, count(*) from user group by a", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(1) AS count", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select ascii(val1) as a, count(*), weight_string(ascii(val1)) from `user` where 1 != 1 group by a, weight_string(ascii(val1))", - "OrderBy": "(0|2) ASC", - "Query": "select ascii(val1) as a, count(*), weight_string(ascii(val1)) from `user` group by a, weight_string(ascii(val1)) order by a asc", - "ResultColumns": 2, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select ascii(val1) as a, count(*) from user group by a", "Instructions": { @@ -3759,8 +2653,7 @@ { "comment": "multiple distinct aggregations on the same column is allowed", "query": "select tcol1, count(distinct tcol2), sum(distinct tcol2) from user group by tcol1", - "v3-plan": "VT12001: unsupported: only one DISTINCT aggregation allowed in a SELECT: sum(distinct tcol2)", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select tcol1, count(distinct tcol2), sum(distinct tcol2) from user group by tcol1", "Instructions": { @@ -3777,9 +2670,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select tcol1, tcol2, tcol2, weight_string(tcol1), weight_string(tcol2) from `user` where 1 != 1 group by tcol1, weight_string(tcol1), tcol2, weight_string(tcol2)", + "FieldQuery": "select tcol1, tcol2, tcol2, weight_string(tcol1), weight_string(tcol2) from `user` where 1 != 1 group by tcol1, tcol2, weight_string(tcol1), weight_string(tcol2)", "OrderBy": "(0|3) ASC, (1|4) ASC", - "Query": "select tcol1, tcol2, tcol2, weight_string(tcol1), weight_string(tcol2) from `user` group by tcol1, weight_string(tcol1), tcol2, weight_string(tcol2) order by tcol1 asc, tcol2 asc", + "Query": "select tcol1, tcol2, tcol2, weight_string(tcol1), weight_string(tcol2) from `user` group by tcol1, tcol2, weight_string(tcol1), weight_string(tcol2) order by tcol1 asc, tcol2 asc", "Table": "`user`" } ] @@ -3792,15 +2685,14 @@ { "comment": "multiple distinct aggregations on the same column in different positions", "query": "select count(distinct tcol2), tcol1, count(*), sum(distinct tcol2) from user group by tcol1", - "v3-plan": "VT12001: unsupported: only one DISTINCT aggregation allowed in a SELECT: sum(distinct tcol2)", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(distinct tcol2), tcol1, count(*), sum(distinct tcol2) from user group by tcol1", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "count_distinct(0|4) AS count(distinct tcol2), sum_count_star(2) AS count(*), sum_distinct(3|4) AS sum(distinct tcol2)", - "GroupBy": "(1|5)", + "Aggregates": "count_distinct(0|5) AS count(distinct tcol2), sum_count_star(2) AS count(*), sum_distinct(3|5) AS sum(distinct tcol2)", + "GroupBy": "(1|4)", "ResultColumns": 4, "Inputs": [ { @@ -3810,9 +2702,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select tcol2, tcol1, count(*), tcol2, weight_string(tcol2), weight_string(tcol1) from `user` where 1 != 1 group by tcol2, weight_string(tcol2), tcol1, weight_string(tcol1)", - "OrderBy": "(1|5) ASC, (0|4) ASC", - "Query": "select tcol2, tcol1, count(*), tcol2, weight_string(tcol2), weight_string(tcol1) from `user` group by tcol2, weight_string(tcol2), tcol1, weight_string(tcol1) order by tcol1 asc, tcol2 asc", + "FieldQuery": "select tcol2, tcol1, count(*), tcol2, weight_string(tcol1), weight_string(tcol2) from `user` where 1 != 1 group by tcol1, tcol2, weight_string(tcol1), weight_string(tcol2)", + "OrderBy": "(1|4) ASC, (0|5) ASC", + "Query": "select tcol2, tcol1, count(*), tcol2, weight_string(tcol1), weight_string(tcol2) from `user` group by tcol1, tcol2, weight_string(tcol1), weight_string(tcol2) order by tcol1 asc, tcol2 asc", "Table": "`user`" } ] @@ -3825,8 +2717,7 @@ { "comment": "distinct aggregation will 3 table join query", "query": "select u.textcol1, count(distinct u.val2) from user u join user u2 on u.val2 = u2.id join music m on u2.val2 = m.id group by u.textcol1", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.textcol1, count(distinct u.val2) from user u join user u2 on u.val2 = u2.id join music m on u2.val2 = m.id group by u.textcol1", "Instructions": { @@ -3837,59 +2728,34 @@ "ResultColumns": 2, "Inputs": [ { - "OperatorType": "Projection", - "Expressions": [ - "[COLUMN 0] as textcol1", - "[COLUMN 1] as val2", - "[COLUMN 2]" - ], + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,L:1,L:2", + "JoinVars": { + "u2_val2": 3 + }, + "TableName": "`user`_`user`_music", "Inputs": [ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:2,L:3,L:5", + "JoinColumnIndexes": "L:0,L:1,L:2,R:0", "JoinVars": { - "u2_val2": 0 + "u_val2": 1 }, - "TableName": "`user`_`user`_music", + "TableName": "`user`_`user`", "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0,R:0,L:2,L:0,R:1,L:1", - "JoinVars": { - "u_val2": 0 + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true }, - "TableName": "`user`_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.val2, weight_string(u.val2), u.textcol1 from `user` as u where 1 != 1 group by u.val2, weight_string(u.val2), u.textcol1", - "OrderBy": "2 ASC COLLATE latin1_swedish_ci, (0|1) ASC", - "Query": "select u.val2, weight_string(u.val2), u.textcol1 from `user` as u group by u.val2, weight_string(u.val2), u.textcol1 order by u.textcol1 asc, u.val2 asc", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u2.val2, weight_string(u2.val2) from `user` as u2 where 1 != 1 group by u2.val2, weight_string(u2.val2)", - "Query": "select u2.val2, weight_string(u2.val2) from `user` as u2 where u2.id = :u_val2 group by u2.val2, weight_string(u2.val2)", - "Table": "`user`", - "Values": [ - ":u_val2" - ], - "Vindex": "user_index" - } - ] + "FieldQuery": "select u.textcol1, u.val2, weight_string(u.val2) from `user` as u where 1 != 1", + "OrderBy": "0 ASC COLLATE latin1_swedish_ci, (1|2) ASC", + "Query": "select u.textcol1, u.val2, weight_string(u.val2) from `user` as u order by u.textcol1 asc, u.val2 asc", + "Table": "`user`" }, { "OperatorType": "Route", @@ -3898,15 +2764,30 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select 1 from music as m where 1 != 1", - "Query": "select 1 from music as m where m.id = :u2_val2", - "Table": "music", + "FieldQuery": "select u2.val2 from `user` as u2 where 1 != 1", + "Query": "select u2.val2 from `user` as u2 where u2.id = :u_val2", + "Table": "`user`", "Values": [ - ":u2_val2" + ":u_val2" ], - "Vindex": "music_user_map" + "Vindex": "user_index" } ] + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from music as m where 1 != 1", + "Query": "select 1 from music as m where m.id = :u2_val2", + "Table": "music", + "Values": [ + ":u2_val2" + ], + "Vindex": "music_user_map" } ] } @@ -3921,22 +2802,7 @@ { "comment": "group_concat on single shards", "query": "select group_concat(user_id order by name), id from user group by id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select group_concat(user_id order by name), id from user group by id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select group_concat(user_id order by `name` asc), id from `user` where 1 != 1 group by id", - "Query": "select group_concat(user_id order by `name` asc), id from `user` group by id", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select group_concat(user_id order by name), id from user group by id", "Instructions": { @@ -3958,22 +2824,7 @@ { "comment": "select count(distinct user_id, name) from unsharded", "query": "select count(distinct user_id, name) from unsharded", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select count(distinct user_id, name) from unsharded", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select count(distinct user_id, `name`) from unsharded where 1 != 1", - "Query": "select count(distinct user_id, `name`) from unsharded", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(distinct user_id, name) from unsharded", "Instructions": { @@ -3995,14 +2846,12 @@ { "comment": "select count(distinct user_id, name) from user", "query": "select count(distinct user_id, name) from user", - "v3-plan": "VT12001: unsupported: only one expression is allowed inside aggregates: count(distinct user_id, `name`)", - "gen4-plan": "VT03001: aggregate functions take a single argument 'count(distinct user_id, `name`)'" + "plan": "VT03001: aggregate functions take a single argument 'count(distinct user_id, `name`)'" }, { "comment": "select sum(col) from (select user.col as col, 32 from user join user_extra) t", "query": "select sum(col) from (select user.col as col, 32 from user join user_extra) t", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select sum(col) from (select user.col as col, 32 from user join user_extra) t", "Instructions": { @@ -4013,13 +2862,13 @@ { "OperatorType": "Projection", "Expressions": [ - "[COLUMN 2] * COALESCE([COLUMN 3], INT64(1)) as sum(col)" + "[COLUMN 0] * [COLUMN 1] as sum(col)" ], "Inputs": [ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1,L:2,R:1", + "JoinColumnIndexes": "L:0,R:0", "TableName": "`user`_user_extra", "Inputs": [ { @@ -4029,8 +2878,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select `user`.col as col, 32, sum(col) from `user` where 1 != 1", - "Query": "select `user`.col as col, 32, sum(col) from `user`", + "FieldQuery": "select sum(col), 32 from (select `user`.col as col, 32 from `user` where 1 != 1) as t where 1 != 1", + "Query": "select sum(col), 32 from (select `user`.col as col, 32 from `user`) as t", "Table": "`user`" }, { @@ -4040,8 +2889,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select 1, count(*) from user_extra where 1 != 1 group by 1", - "Query": "select 1, count(*) from user_extra group by 1", + "FieldQuery": "select count(*) from user_extra where 1 != 1 group by .0", + "Query": "select count(*) from user_extra group by .0", "Table": "user_extra" } ] @@ -4059,40 +2908,31 @@ { "comment": "find aggregation expression and use column offset in filter", "query": "select foo, count(*) from user group by foo having count(*) = 3", - "v3-plan": "VT12001: unsupported: filtering on results of aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select foo, count(*) from user group by foo having count(*) = 3", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0, - 1 - ], + "OperatorType": "Filter", + "Predicate": "count(*) = 3", + "ResultColumns": 2, "Inputs": [ { - "OperatorType": "Filter", - "Predicate": ":1 = 3", + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum_count_star(1) AS count(*)", + "GroupBy": "(0|2)", "Inputs": [ { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count_star(1) AS count(*)", - "GroupBy": "(0|2)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select foo, count(*), weight_string(foo) from `user` where 1 != 1 group by foo, weight_string(foo)", - "OrderBy": "(0|2) ASC", - "Query": "select foo, count(*), weight_string(foo) from `user` group by foo, weight_string(foo) order by foo asc", - "Table": "`user`" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select foo, count(*), weight_string(foo) from `user` where 1 != 1 group by foo, weight_string(foo)", + "OrderBy": "(0|2) ASC", + "Query": "select foo, count(*), weight_string(foo) from `user` group by foo, weight_string(foo) order by foo asc", + "Table": "`user`" } ] } @@ -4106,41 +2946,31 @@ { "comment": "find aggregation expression and use column offset in filter times two", "query": "select foo, sum(foo), sum(bar) from user group by foo having sum(foo)+sum(bar) = 42", - "v3-plan": "VT12001: unsupported: filtering on results of aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select foo, sum(foo), sum(bar) from user group by foo having sum(foo)+sum(bar) = 42", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0, - 1, - 2 - ], + "OperatorType": "Filter", + "Predicate": "sum(foo) + sum(bar) = 42", + "ResultColumns": 3, "Inputs": [ { - "OperatorType": "Filter", - "Predicate": ":1 + :2 = 42", + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum(1) AS sum(foo), sum(2) AS sum(bar)", + "GroupBy": "(0|3)", "Inputs": [ { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum(1) AS sum(foo), sum(2) AS sum(bar)", - "GroupBy": "(0|3)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select foo, sum(foo), sum(bar), weight_string(foo) from `user` where 1 != 1 group by foo, weight_string(foo)", - "OrderBy": "(0|3) ASC", - "Query": "select foo, sum(foo), sum(bar), weight_string(foo) from `user` group by foo, weight_string(foo) order by foo asc", - "Table": "`user`" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select foo, sum(foo), sum(bar), weight_string(foo) from `user` where 1 != 1 group by foo, weight_string(foo)", + "OrderBy": "(0|3) ASC", + "Query": "select foo, sum(foo), sum(bar), weight_string(foo) from `user` group by foo, weight_string(foo) order by foo asc", + "Table": "`user`" } ] } @@ -4154,41 +2984,31 @@ { "comment": "find aggregation expression and use column offset in filter times three", "query": "select foo, sum(foo) as fooSum, sum(bar) as barSum from user group by foo having fooSum+sum(bar) = 42", - "v3-plan": "VT12001: unsupported: filtering on results of aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select foo, sum(foo) as fooSum, sum(bar) as barSum from user group by foo having fooSum+sum(bar) = 42", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0, - 1, - 2 - ], + "OperatorType": "Filter", + "Predicate": "sum(foo) + sum(bar) = 42", + "ResultColumns": 3, "Inputs": [ { - "OperatorType": "Filter", - "Predicate": ":1 + :2 = 42", + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum(1) AS fooSum, sum(2) AS barSum", + "GroupBy": "(0|3)", "Inputs": [ { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum(1) AS fooSum, sum(2) AS barSum", - "GroupBy": "(0|3)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select foo, sum(foo) as fooSum, sum(bar) as barSum, weight_string(foo) from `user` where 1 != 1 group by foo, weight_string(foo)", - "OrderBy": "(0|3) ASC", - "Query": "select foo, sum(foo) as fooSum, sum(bar) as barSum, weight_string(foo) from `user` group by foo, weight_string(foo) order by foo asc", - "Table": "`user`" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select foo, sum(foo) as fooSum, sum(bar) as barSum, weight_string(foo) from `user` where 1 != 1 group by foo, weight_string(foo)", + "OrderBy": "(0|3) ASC", + "Query": "select foo, sum(foo) as fooSum, sum(bar) as barSum, weight_string(foo) from `user` group by foo, weight_string(foo) order by foo asc", + "Table": "`user`" } ] } @@ -4202,39 +3022,31 @@ { "comment": "having should be able to add new aggregation expressions in having", "query": "select foo from user group by foo having count(*) = 3", - "v3-plan": "VT12001: unsupported: filtering on results of aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select foo from user group by foo having count(*) = 3", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], + "OperatorType": "Filter", + "Predicate": "count(*) = 3", + "ResultColumns": 1, "Inputs": [ { - "OperatorType": "Filter", - "Predicate": ":1 = 3", + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum_count_star(1) AS count(*)", + "GroupBy": "(0|2)", "Inputs": [ { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count_star(1) AS count(*)", - "GroupBy": "(0|2)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select foo, count(*), weight_string(foo) from `user` where 1 != 1 group by foo, weight_string(foo)", - "OrderBy": "(0|2) ASC", - "Query": "select foo, count(*), weight_string(foo) from `user` group by foo, weight_string(foo) order by foo asc", - "Table": "`user`" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select foo, count(*), weight_string(foo) from `user` where 1 != 1 group by foo, weight_string(foo)", + "OrderBy": "(0|2) ASC", + "Query": "select foo, count(*), weight_string(foo) from `user` group by foo, weight_string(foo) order by foo asc", + "Table": "`user`" } ] } @@ -4248,75 +3060,67 @@ { "comment": "select u.id from user u join user_extra ue on ue.id = u.id group by u.id having count(u.name) = 3", "query": "select u.id from user u join user_extra ue on ue.id = u.id group by u.id having count(u.name) = 3", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.id from user u join user_extra ue on ue.id = u.id group by u.id having count(u.name) = 3", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], + "OperatorType": "Filter", + "Predicate": "count(u.`name`) = 3", + "ResultColumns": 1, "Inputs": [ { - "OperatorType": "Filter", - "Predicate": ":1 = 3", + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum_count(1) AS count(u.`name`)", + "GroupBy": "(0|2)", "Inputs": [ { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(1) AS count(u.`name`)", - "GroupBy": "(0|2)", + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 2] as id", + "[COLUMN 1] * [COLUMN 0] as count(u.`name`)", + "[COLUMN 3] as weight_string(u.id)" + ], "Inputs": [ { - "OperatorType": "Projection", - "Expressions": [ - "[COLUMN 0] as id", - "[COLUMN 2] * COALESCE([COLUMN 3], INT64(1)) as count(u.`name`)", - "[COLUMN 1]" - ], + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "(2|3) ASC", "Inputs": [ { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "(0|1) ASC", + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0,L:0,R:1,R:2", + "JoinVars": { + "ue_id": 1 + }, + "TableName": "user_extra_`user`", "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:1,R:2,L:1,R:0", - "JoinVars": { - "ue_id": 0 + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true }, - "TableName": "user_extra_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select ue.id, count(*), weight_string(ue.id) from user_extra as ue where 1 != 1 group by ue.id, weight_string(ue.id)", - "Query": "select ue.id, count(*), weight_string(ue.id) from user_extra as ue group by ue.id, weight_string(ue.id)", - "Table": "user_extra" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(u.`name`), u.id, weight_string(u.id) from `user` as u where 1 != 1 group by u.id, weight_string(u.id)", - "Query": "select count(u.`name`), u.id, weight_string(u.id) from `user` as u where u.id = :ue_id group by u.id, weight_string(u.id)", - "Table": "`user`", - "Values": [ - ":ue_id" - ], - "Vindex": "user_index" - } - ] + "FieldQuery": "select count(*), ue.id from user_extra as ue where 1 != 1 group by ue.id", + "Query": "select count(*), ue.id from user_extra as ue group by ue.id", + "Table": "user_extra" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(u.`name`), u.id, weight_string(u.id) from `user` as u where 1 != 1 group by u.id, weight_string(u.id)", + "Query": "select count(u.`name`), u.id, weight_string(u.id) from `user` as u where u.id = :ue_id group by u.id, weight_string(u.id)", + "Table": "`user`", + "Values": [ + ":ue_id" + ], + "Vindex": "user_index" } ] } @@ -4337,22 +3141,7 @@ { "comment": "select u.id from user u join user_extra ue on ue.user_id = u.id group by u.id having count(u.name) = 3", "query": "select u.id from user u join user_extra ue on ue.user_id = u.id group by u.id having count(u.name) = 3", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u.id from user u join user_extra ue on ue.user_id = u.id group by u.id having count(u.name) = 3", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.id from `user` as u join user_extra as ue on ue.user_id = u.id where 1 != 1 group by u.id", - "Query": "select u.id from `user` as u join user_extra as ue on ue.user_id = u.id group by u.id having count(u.`name`) = 3", - "Table": "`user`, user_extra" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.id from user u join user_extra ue on ue.user_id = u.id group by u.id having count(u.name) = 3", "Instructions": { @@ -4375,75 +3164,67 @@ { "comment": "only extract the aggregation once, even if used twice", "query": "select u.id from user u join user_extra ue on ue.id = u.id group by u.id having count(*) < 3 and count(*) > 5", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.id from user u join user_extra ue on ue.id = u.id group by u.id having count(*) < 3 and count(*) > 5", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], + "OperatorType": "Filter", + "Predicate": "count(*) < 3 and count(*) > 5", + "ResultColumns": 1, "Inputs": [ { - "OperatorType": "Filter", - "Predicate": ":1 < 3 and :1 > 5", + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum_count_star(1) AS count(*)", + "GroupBy": "(0|2)", "Inputs": [ { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count_star(1) AS count(*)", - "GroupBy": "(0|2)", + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 2] as id", + "[COLUMN 0] * [COLUMN 1] as count(*)", + "[COLUMN 3] as weight_string(u.id)" + ], "Inputs": [ { - "OperatorType": "Projection", - "Expressions": [ - "[COLUMN 0] as id", - "[COLUMN 2] * COALESCE([COLUMN 3], INT64(1)) as count(*)", - "[COLUMN 1]" - ], + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "(2|3) ASC", "Inputs": [ { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "(0|1) ASC", + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,R:1,R:2", + "JoinVars": { + "ue_id": 1 + }, + "TableName": "user_extra_`user`", "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:1,R:2,L:1,R:0", - "JoinVars": { - "ue_id": 0 + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true }, - "TableName": "user_extra_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select ue.id, count(*), weight_string(ue.id) from user_extra as ue where 1 != 1 group by ue.id, weight_string(ue.id)", - "Query": "select ue.id, count(*), weight_string(ue.id) from user_extra as ue group by ue.id, weight_string(ue.id)", - "Table": "user_extra" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*), u.id, weight_string(u.id) from `user` as u where 1 != 1 group by u.id, weight_string(u.id)", - "Query": "select count(*), u.id, weight_string(u.id) from `user` as u where u.id = :ue_id group by u.id, weight_string(u.id)", - "Table": "`user`", - "Values": [ - ":ue_id" - ], - "Vindex": "user_index" - } - ] + "FieldQuery": "select count(*), ue.id from user_extra as ue where 1 != 1 group by ue.id", + "Query": "select count(*), ue.id from user_extra as ue group by ue.id", + "Table": "user_extra" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), u.id, weight_string(u.id) from `user` as u where 1 != 1 group by u.id, weight_string(u.id)", + "Query": "select count(*), u.id, weight_string(u.id) from `user` as u where u.id = :ue_id group by u.id, weight_string(u.id)", + "Table": "`user`", + "Values": [ + ":ue_id" + ], + "Vindex": "user_index" } ] } @@ -4461,82 +3242,16 @@ ] } }, - { - "comment": "select (select 1 from user u having count(ue.col) > 10) from user_extra ue", - "query": "select (select 1 from user u having count(ue.col) > 10) from user_extra ue", - "v3-plan": "VT03020: column ue.col not found in subquery", - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select (select 1 from user u having count(ue.col) > 10) from user_extra ue", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutValue", - "PulloutVars": [ - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], - "Inputs": [ - { - "OperatorType": "Filter", - "Predicate": ":1 > 10", - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "random(0) AS 1, sum_count(1) AS count(ue.col)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1, count(ue.col) from `user` as u where 1 != 1", - "Query": "select 1, count(ue.col) from `user` as u", - "Table": "`user`" - } - ] - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select :__sq1 from user_extra as ue where 1 != 1", - "Query": "select :__sq1 from user_extra as ue", - "Table": "user_extra" - } - ] - }, - "TablesUsed": [ - "user.user", - "user.user_extra" - ] - } - }, { "comment": "group by and ',' joins with condition", "query": "select user.col from user join user_extra on user_extra.col = user.col group by user.id", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join user_extra on user_extra.col = user.col group by user.id", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "random(0) AS col", + "Aggregates": "any_value(0) AS col", "GroupBy": "(1|2)", "ResultColumns": 1, "Inputs": [ @@ -4585,8 +3300,7 @@ { "comment": "scatter aggregate symtab lookup error", "query": "select id, b as id, count(*) from user order by id", - "v3-plan": "VT03021: ambiguous column reference: id", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id, b as id, count(*) from user order by id", "Instructions": { @@ -4598,7 +3312,7 @@ { "OperatorType": "Aggregate", "Variant": "Scalar", - "Aggregates": "random(0) AS id, random(1) AS id, sum_count_star(2) AS count(*), random(3)", + "Aggregates": "any_value(0) AS id, any_value(1) AS id, sum_count_star(2) AS count(*), any_value(3)", "Inputs": [ { "OperatorType": "Route", @@ -4623,35 +3337,13 @@ { "comment": "aggr and non-aggr without group by (with query does not give useful result out)", "query": "select id, count(*) from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id, count(*) from user", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count(1) AS count", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, count(*) from `user` where 1 != 1", - "Query": "select id, count(*) from `user`", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id, count(*) from user", "Instructions": { "OperatorType": "Aggregate", "Variant": "Scalar", - "Aggregates": "random(0) AS id, sum_count_star(1) AS count(*)", + "Aggregates": "any_value(0) AS id, sum_count_star(1) AS count(*)", "Inputs": [ { "OperatorType": "Route", @@ -4674,8 +3366,7 @@ { "comment": "group by and ',' joins", "query": "select user.id from user, user_extra group by id", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.id from user, user_extra group by id", "Instructions": { @@ -4726,8 +3417,7 @@ { "comment": "count on column from LIMIT", "query": "select count(city) from (select phone, id, city from user where id > 12 limit 10) as x", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(city) from (select phone, id, city from user where id > 12 limit 10) as x", "Instructions": { @@ -4736,9 +3426,9 @@ "Aggregates": "count(0) AS count(city)", "Inputs": [ { - "OperatorType": "Projection", - "Expressions": [ - "[COLUMN 2] as count(city)" + "OperatorType": "SimpleProjection", + "Columns": [ + 2 ], "Inputs": [ { @@ -4752,8 +3442,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select phone, id, city from `user` where 1 != 1", - "Query": "select phone, id, city from `user` where id > 12 limit :__upper_limit", + "FieldQuery": "select phone, id, city from (select phone, id, city from `user` where 1 != 1) as x where 1 != 1", + "Query": "select phone, id, city from (select phone, id, city from `user` where id > 12) as x limit :__upper_limit", "Table": "`user`" } ] @@ -4770,8 +3460,7 @@ { "comment": "count(*) on column from LIMIT", "query": "select count(*) from (select phone, id, city from user where id > 12 limit 10) as x", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(*) from (select phone, id, city from user where id > 12 limit 10) as x", "Instructions": { @@ -4780,9 +3469,9 @@ "Aggregates": "count_star(0) AS count(*)", "Inputs": [ { - "OperatorType": "Projection", - "Expressions": [ - "[COLUMN 0] as count(*)" + "OperatorType": "SimpleProjection", + "Columns": [ + 3 ], "Inputs": [ { @@ -4796,8 +3485,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select phone, id, city from `user` where 1 != 1", - "Query": "select phone, id, city from `user` where id > 12 limit :__upper_limit", + "FieldQuery": "select phone, id, city, 1 from (select phone, id, city from `user` where 1 != 1) as x where 1 != 1", + "Query": "select phone, id, city, 1 from (select phone, id, city from `user` where id > 12) as x limit :__upper_limit", "Table": "`user`" } ] @@ -4823,47 +3512,39 @@ "Aggregates": "count(0) AS count(col)", "Inputs": [ { - "OperatorType": "Projection", - "Expressions": [ - "[COLUMN 0] as count(col)" - ], + "OperatorType": "Limit", + "Count": "INT64(10)", "Inputs": [ { - "OperatorType": "Limit", - "Count": "INT64(10)", + "OperatorType": "Join", + "Variant": "LeftJoin", + "JoinColumnIndexes": "R:0", + "JoinVars": { + "user_id": 0 + }, + "TableName": "`user`_user_extra", "Inputs": [ { - "OperatorType": "Join", - "Variant": "LeftJoin", - "JoinColumnIndexes": "R:0", - "JoinVars": { - "user_id": 0 + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id from `user` where 1 != 1", - "Query": "select `user`.id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.col as col from user_extra where 1 != 1", - "Query": "select user_extra.col as col from user_extra where user_extra.id = :user_id", - "Table": "user_extra" - } - ] + "FieldQuery": "select `user`.id from `user` where 1 != 1", + "Query": "select `user`.id from `user`", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select user_extra.col from user_extra where 1 != 1", + "Query": "select user_extra.col from user_extra where user_extra.id = :user_id", + "Table": "user_extra" } ] } @@ -4880,8 +3561,7 @@ { "comment": "grouping on data from derived table", "query": "select val1, count(*) from (select id, val1 from user where val2 < 4 order by val1 limit 2) as x group by val1", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select val1, count(*) from (select id, val1 from user where val2 < 4 order by val1 limit 2) as x group by val1", "Instructions": { @@ -4892,11 +3572,11 @@ "ResultColumns": 2, "Inputs": [ { - "OperatorType": "Projection", - "Expressions": [ - "[COLUMN 1] as val1", - "[COLUMN 0] as count(*)", - "[COLUMN 2]" + "OperatorType": "SimpleProjection", + "Columns": [ + 1, + 2, + 3 ], "Inputs": [ { @@ -4910,9 +3590,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id, val1, weight_string(val1) from `user` where 1 != 1", - "OrderBy": "(1|2) ASC, (1|2) ASC", - "Query": "select id, val1, weight_string(val1) from `user` where val2 < 4 order by val1 asc, val1 asc limit :__upper_limit", + "FieldQuery": "select id, val1, 1, weight_string(val1) from (select id, val1 from `user` where 1 != 1) as x where 1 != 1", + "OrderBy": "(1|3) ASC", + "Query": "select id, val1, 1, weight_string(val1) from (select id, val1 from `user` where val2 < 4) as x order by val1 asc limit :__upper_limit", "Table": "`user`" } ] @@ -4929,51 +3609,29 @@ { "comment": "Can't inline derived table when it has HAVING with aggregation function", "query": "select * from (select id from user having count(*) = 1) s", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from (select id from user having count(*) = 1) s", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from (select id from `user` where 1 != 1) as s where 1 != 1", - "Query": "select * from (select id from `user` having count(*) = 1) as s", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from (select id from user having count(*) = 1) s", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], + "OperatorType": "Filter", + "Predicate": "count(*) = 1", + "ResultColumns": 1, "Inputs": [ { - "OperatorType": "Filter", - "Predicate": ":1 = 1", + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "any_value(0) AS id, sum_count_star(1) AS count(*)", "Inputs": [ { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "random(0) AS id, sum_count_star(1) AS count(*)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, count(*) from `user` where 1 != 1", - "Query": "select id, count(*) from `user`", - "Table": "`user`" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id, count(*) from `user` where 1 != 1", + "Query": "select id, count(*) from `user`", + "Table": "`user`" } ] } @@ -4987,8 +3645,7 @@ { "comment": "Group By X Order By X", "query": "SELECT user.intcol FROM user GROUP BY user.intcol ORDER BY COUNT(user.intcol)", - "v3-plan": "VT12001: unsupported: in scatter query: complex ORDER BY expression: count(`user`.intcol)", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT user.intcol FROM user GROUP BY user.intcol ORDER BY COUNT(user.intcol)", "Instructions": { @@ -5025,16 +3682,15 @@ } }, { - "comment": "AggregateRandom in non full group by query", + "comment": "AggregateAnyValue in non full group by query", "query": "select u.id, u.name, count(m.predef1) from user.user as u join user.user_extra as m on u.id = m.order group by u.id", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.id, u.name, count(m.predef1) from user.user as u join user.user_extra as m on u.id = m.order group by u.id", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "random(1) AS name, sum_count(2) AS count(m.predef1)", + "Aggregates": "any_value(1) AS name, sum_count(2) AS count(m.predef1)", "GroupBy": "(0|3)", "ResultColumns": 3, "Inputs": [ @@ -5224,8 +3880,7 @@ { "comment": "Aggregations from derived table used in arithmetic outside derived table", "query": "select A.a, A.b, (A.a / A.b) as d from (select sum(a) as a, sum(b) as b from user) A", - "v3-plan": "VT12001: unsupported: expression on results of a cross-shard subquery", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select A.a, A.b, (A.a / A.b) as d from (select sum(a) as a, sum(b) as b from user) A", "Instructions": { @@ -5264,22 +3919,7 @@ { "comment": "when pushing predicates into derived tables, make sure to put them in HAVING when they contain aggregations", "query": "select t1.portalId, t1.flowId from (select portalId, flowId, count(*) as count from user_extra where localDate > :v1 group by user_id, flowId order by null) as t1 where count >= :v2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select t1.portalId, t1.flowId from (select portalId, flowId, count(*) as count from user_extra where localDate > :v1 group by user_id, flowId order by null) as t1 where count >= :v2", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select t1.portalId, t1.flowId from (select portalId, flowId, count(*) as `count` from user_extra where 1 != 1 group by user_id, flowId) as t1 where 1 != 1", - "Query": "select t1.portalId, t1.flowId from (select portalId, flowId, count(*) as `count` from user_extra where localDate > :v1 group by user_id, flowId order by null) as t1 where `count` >= :v2", - "Table": "user_extra" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select t1.portalId, t1.flowId from (select portalId, flowId, count(*) as count from user_extra where localDate > :v1 group by user_id, flowId order by null) as t1 where count >= :v2", "Instructions": { @@ -5290,7 +3930,7 @@ "Sharded": true }, "FieldQuery": "select t1.portalId, t1.flowId from (select portalId, flowId, count(*) as `count` from user_extra where 1 != 1 group by user_id, flowId) as t1 where 1 != 1", - "Query": "select t1.portalId, t1.flowId from (select portalId, flowId, count(*) as `count` from user_extra where localDate > :v1 group by user_id, flowId order by null) as t1 where `count` >= :v2", + "Query": "select t1.portalId, t1.flowId from (select portalId, flowId, count(*) as `count` from user_extra where localDate > :v1 group by user_id, flowId) as t1 where `count` >= :v2", "Table": "user_extra" }, "TablesUsed": [ @@ -5301,20 +3941,18 @@ { "comment": "aggregation, where and derived tables - we can push extremums", "query": "SELECT foo FROM (SELECT foo, max(baz) as bazo FROM (SELECT foo, baz FROM user) f GROUP BY foo) tt WHERE bazo BETWEEN 100 AND 200", - "v3-plan": "VT12001: unsupported: filtering on results of cross-shard subquery", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT foo FROM (SELECT foo, max(baz) as bazo FROM (SELECT foo, baz FROM user) f GROUP BY foo) tt WHERE bazo BETWEEN 100 AND 200", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], + "OperatorType": "Filter", + "Predicate": "bazo between 100 and 200", + "ResultColumns": 1, "Inputs": [ { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "max(1) AS bazo", + "Aggregates": "max(1|3) AS bazo", "GroupBy": "(0|2)", "Inputs": [ { @@ -5324,9 +3962,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select foo, max(baz) as bazo, weight_string(foo) from (select foo, baz from `user` where 1 != 1) as f where 1 != 1 group by foo, weight_string(foo)", + "FieldQuery": "select foo, max(baz) as bazo, weight_string(foo), weight_string(baz) from (select foo, baz from `user` where 1 != 1) as f where 1 != 1 group by foo, weight_string(foo), weight_string(baz)", "OrderBy": "(0|2) ASC", - "Query": "select foo, max(baz) as bazo, weight_string(foo) from (select foo, baz from `user` having max(baz) between 100 and 200) as f group by foo, weight_string(foo) order by foo asc", + "Query": "select foo, max(baz) as bazo, weight_string(foo), weight_string(baz) from (select foo, baz from `user`) as f group by foo, weight_string(foo), weight_string(baz) order by foo asc", "Table": "`user`" } ] @@ -5341,48 +3979,31 @@ { "comment": "aggregation, where and derived tables - we can't push aggregations that might need a second layer of aggregation", "query": "SELECT foo FROM (SELECT foo, count(baz) as bazo FROM (SELECT foo, baz FROM user) f GROUP BY foo) tt WHERE bazo BETWEEN 100 AND 200", - "v3-plan": "VT12001: unsupported: filtering on results of cross-shard subquery", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT foo FROM (SELECT foo, count(baz) as bazo FROM (SELECT foo, baz FROM user) f GROUP BY foo) tt WHERE bazo BETWEEN 100 AND 200", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 1 - ], + "OperatorType": "Filter", + "Predicate": "bazo between 100 and 200", + "ResultColumns": 1, "Inputs": [ { - "OperatorType": "Filter", - "Predicate": "bazo between 100 and 200", + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum_count(1) AS bazo", + "GroupBy": "(0|2)", "Inputs": [ { - "OperatorType": "SimpleProjection", - "Columns": [ - 1, - 0 - ], - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(1) AS bazo", - "GroupBy": "(0|2)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select foo, count(baz) as bazo, weight_string(foo) from (select foo, baz from `user` where 1 != 1) as f where 1 != 1 group by foo, weight_string(foo)", - "OrderBy": "(0|2) ASC", - "Query": "select foo, count(baz) as bazo, weight_string(foo) from (select foo, baz from `user`) as f group by foo, weight_string(foo) order by foo asc", - "Table": "`user`" - } - ] - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select foo, count(baz) as bazo, weight_string(foo) from (select foo, baz from `user` where 1 != 1) as f where 1 != 1 group by foo, weight_string(foo)", + "OrderBy": "(0|2) ASC", + "Query": "select foo, count(baz) as bazo, weight_string(foo) from (select foo, baz from `user`) as f group by foo, weight_string(foo) order by foo asc", + "Table": "`user`" } ] } @@ -5396,8 +4017,7 @@ { "comment": "Scatter order by is complex with aggregates in select", "query": "select col, count(*) from user group by col order by col+1", - "v3-plan": "VT12001: unsupported: in scatter query: complex ORDER BY expression: col + 1", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col, count(*) from user group by col order by col+1", "Instructions": { @@ -5409,7 +4029,7 @@ { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "sum_count_star(1) AS count(*), random(2) AS col + 1, random(3)", + "Aggregates": "sum_count_star(1) AS count(*), any_value(2) AS col + 1, any_value(3)", "GroupBy": "0", "Inputs": [ { @@ -5436,8 +4056,7 @@ { "comment": "scatter aggregate complex order by", "query": "select id from user group by id order by id+1", - "v3-plan": "VT12001: unsupported: in scatter query: complex ORDER BY expression: id + 1", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user group by id order by id+1", "Instructions": { @@ -5461,14 +4080,13 @@ { "comment": "select expression does not directly depend on grouping expression", "query": "select a from user group by a+1", - "v3-plan": "VT12001: unsupported: in scatter query: only simple references are allowed", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a from user group by a+1", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "random(0) AS a", + "Aggregates": "any_value(0) AS a", "GroupBy": "(1|2)", "ResultColumns": 1, "Inputs": [ @@ -5494,8 +4112,7 @@ { "comment": "inner join with scalar aggregation", "query": "select count(*) from user join music on user.foo = music.bar", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(*) from user join music on user.foo = music.bar", "Instructions": { @@ -5885,8 +4502,7 @@ { "comment": "3 table inner join with scalar aggregation", "query": "select count(*) from user join music on user.foo = music.bar join user_extra on user.foo = user_extra.baz", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(*) from user join music on user.foo = music.bar join user_extra on user.foo = user_extra.baz", "Instructions": { @@ -6069,8 +4685,7 @@ { "comment": "ordering have less column than grouping columns, grouping gets rearranged as order by and missing columns gets added to ordering", "query": "select u.col, u.intcol, count(*) from user u join music group by 1,2 order by 2", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.col, u.intcol, count(*) from user u join music group by 1,2 order by 2", "Instructions": { @@ -6082,8 +4697,8 @@ { "OperatorType": "Projection", "Expressions": [ - "[COLUMN 2] as col", - "[COLUMN 3] as intcol", + "[COLUMN 3] as col", + "[COLUMN 2] as intcol", "[COLUMN 0] * [COLUMN 1] as count(*)" ], "Inputs": [ @@ -6100,9 +4715,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select count(*), u.col, u.intcol from `user` as u where 1 != 1 group by u.col, u.intcol", - "OrderBy": "2 ASC, 1 ASC", - "Query": "select count(*), u.col, u.intcol from `user` as u group by u.col, u.intcol order by u.intcol asc, u.col asc", + "FieldQuery": "select count(*), u.intcol, u.col from `user` as u where 1 != 1 group by u.intcol, u.col", + "OrderBy": "1 ASC, 2 ASC", + "Query": "select count(*), u.intcol, u.col from `user` as u group by u.intcol, u.col order by u.intcol asc, u.col asc", "Table": "`user`" }, { @@ -6131,22 +4746,7 @@ { "comment": "redundant group by columns are not added", "query": "select col, val, id from user group by col, val, id, id, val, col", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col, val, id from user group by col, val, id, id, val, col", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col, val, id from `user` where 1 != 1 group by col, val, id, id, val, col", - "Query": "select col, val, id from `user` group by col, val, id, id, val, col", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col, val, id from user group by col, val, id, id, val, col", "Instructions": { @@ -6168,14 +4768,15 @@ { "comment": "scatter aggregate with ambiguous aliases", "query": "select distinct a, b as a from user", - "v3-plan": "generating ORDER BY clause: VT03021: ambiguous column reference: a", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select distinct a, b as a from user", "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "GroupBy": "(0|2), (1|3)", + "OperatorType": "Distinct", + "Collations": [ + "(0:2)", + "(1:2)" + ], "ResultColumns": 2, "Inputs": [ { @@ -6185,9 +4786,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, b as a, weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, b, weight_string(a), weight_string(b)", - "OrderBy": "(0|2) ASC, (1|3) ASC", - "Query": "select a, b as a, weight_string(a), weight_string(b) from `user` group by a, b, weight_string(a), weight_string(b) order by a asc, b asc", + "FieldQuery": "select a, b as a, weight_string(b) from `user` where 1 != 1", + "Query": "select distinct a, b as a, weight_string(b) from `user`", "Table": "`user`" } ] @@ -6200,14 +4800,14 @@ { "comment": "scatter aggregate with complex select list (can't build order by)", "query": "select distinct a+1 from user", - "v3-plan": "generating ORDER BY clause: VT12001: unsupported: reference a complex expression", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select distinct a+1 from user", "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "GroupBy": "(0|1)", + "OperatorType": "Distinct", + "Collations": [ + "(0:1)" + ], "ResultColumns": 1, "Inputs": [ { @@ -6217,9 +4817,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a + 1, weight_string(a + 1) from `user` where 1 != 1 group by a + 1, weight_string(a + 1)", - "OrderBy": "(0|1) ASC", - "Query": "select a + 1, weight_string(a + 1) from `user` group by a + 1, weight_string(a + 1) order by a + 1 asc", + "FieldQuery": "select a + 1, weight_string(a + 1) from `user` where 1 != 1", + "Query": "select distinct a + 1, weight_string(a + 1) from `user`", "Table": "`user`" } ] @@ -6230,52 +4829,55 @@ } }, { - "QueryType": "SELECT", - "Original": "select distinct count(*) from user group by col", - "Instructions": { - "OperatorType": "Distinct", - "Collations": [ - "0" - ], - "ResultColumns": 1, - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count_star(0) AS count(*)", - "GroupBy": "1", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*), col from `user` where 1 != 1 group by col", - "OrderBy": "1 ASC", - "Query": "select count(*), col from `user` group by col order by col asc", - "Table": "`user`" - } - ] - } + "comment": "distinct on top of aggregation", + "query": "select distinct count(*) from user group by col", + "plan": { + "QueryType": "SELECT", + "Original": "select distinct count(*) from user group by col", + "Instructions": { + "OperatorType": "Distinct", + "Collations": [ + "0" + ], + "ResultColumns": 1, + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum_count_star(0) AS count(*)", + "GroupBy": "1", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), col from `user` where 1 != 1 group by col", + "OrderBy": "1 ASC", + "Query": "select count(*), col from `user` group by col order by col asc", + "Table": "`user`" + } + ] + } + ] + }, + "TablesUsed": [ + "user.user" ] - }, - "TablesUsed": [ - "user.user" - ] + } }, { "comment": "scalar aggregates with min, max, sum distinct and count distinct using collations", "query": "select min(textcol1), max(textcol2), sum(distinct textcol1), count(distinct textcol1) from user", - "v3-plan": "VT12001: unsupported: only one DISTINCT aggregation allowed in a SELECT: count(distinct textcol1)", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select min(textcol1), max(textcol2), sum(distinct textcol1), count(distinct textcol1) from user", "Instructions": { "OperatorType": "Aggregate", "Variant": "Scalar", - "Aggregates": "min(0) AS min(textcol1), max(1) AS max(textcol2), sum_distinct(2 COLLATE latin1_swedish_ci) AS sum(distinct textcol1), count_distinct(3 COLLATE latin1_swedish_ci) AS count(distinct textcol1)", + "Aggregates": "min(0 COLLATE latin1_swedish_ci) AS min(textcol1), max(1 COLLATE latin1_swedish_ci) AS max(textcol2), sum_distinct(2 COLLATE latin1_swedish_ci) AS sum(distinct textcol1), count_distinct(3 COLLATE latin1_swedish_ci) AS count(distinct textcol1)", "Inputs": [ { "OperatorType": "Route", @@ -6299,14 +4901,13 @@ { "comment": "grouping aggregates with mi, max, sum distinct and count distinct using collations", "query": "select col, min(textcol1), max(textcol2), sum(distinct textcol1), count(distinct textcol1) from user group by col", - "v3-plan": "VT12001: unsupported: only one DISTINCT aggregation allowed in a SELECT: count(distinct textcol1)", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col, min(textcol1), max(textcol2), sum(distinct textcol1), count(distinct textcol1) from user group by col", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "min(1) AS min(textcol1), max(2) AS max(textcol2), sum_distinct(3 COLLATE latin1_swedish_ci) AS sum(distinct textcol1), count_distinct(4 COLLATE latin1_swedish_ci) AS count(distinct textcol1)", + "Aggregates": "min(1 COLLATE latin1_swedish_ci) AS min(textcol1), max(2 COLLATE latin1_swedish_ci) AS max(textcol2), sum_distinct(3 COLLATE latin1_swedish_ci) AS sum(distinct textcol1), count_distinct(4 COLLATE latin1_swedish_ci) AS count(distinct textcol1)", "GroupBy": "0", "Inputs": [ { @@ -6331,8 +4932,7 @@ { "comment": "using a grouping column multiple times should be OK", "query": "select col, col, count(*) from user group by col", - "v3-plan": "generating ORDER BY clause: VT03021: ambiguous column reference: col", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col, col, count(*) from user group by col", "Instructions": { @@ -6363,8 +4963,7 @@ { "comment": "multiple count star and a count with 3 table join", "query": "select count(*), count(*), count(u.col) from user u, user u2, user_extra ue", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(*), count(*), count(u.col) from user u, user u2, user_extra ue", "Instructions": { @@ -6451,21 +5050,20 @@ { "comment": "interleaving grouping, aggregation and join with min, max columns", "query": "select user.col, min(user_extra.foo), user.bar, max(user_extra.bar) from user join user_extra on user.col = user_extra.bar group by user.col, user.bar", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col, min(user_extra.foo), user.bar, max(user_extra.bar) from user join user_extra on user.col = user_extra.bar group by user.col, user.bar", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "min(1) AS min(user_extra.foo), max(3) AS max(user_extra.bar)", + "Aggregates": "min(1|5) AS min(user_extra.foo), max(3|6) AS max(user_extra.bar)", "GroupBy": "0, (2|4)", "ResultColumns": 4, "Inputs": [ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0,L:1,R:1,L:2", + "JoinColumnIndexes": "L:0,R:0,L:1,R:1,L:2,R:2,R:3", "JoinVars": { "user_col": 0 }, @@ -6490,8 +5088,113 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select min(user_extra.foo), max(user_extra.bar) from user_extra where 1 != 1 group by .0", - "Query": "select min(user_extra.foo), max(user_extra.bar) from user_extra where user_extra.bar = :user_col group by .0", + "FieldQuery": "select min(user_extra.foo), max(user_extra.bar), weight_string(user_extra.foo), weight_string(user_extra.bar) from user_extra where 1 != 1 group by .0, weight_string(user_extra.foo), weight_string(user_extra.bar)", + "Query": "select min(user_extra.foo), max(user_extra.bar), weight_string(user_extra.foo), weight_string(user_extra.bar) from user_extra where user_extra.bar = :user_col group by .0, weight_string(user_extra.foo), weight_string(user_extra.bar)", + "Table": "user_extra" + } + ] + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "extremum on input from both sides", + "query": "select max(u.foo*ue.bar) from user u join user_extra ue", + "plan": { + "QueryType": "SELECT", + "Original": "select max(u.foo*ue.bar) from user u join user_extra ue", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "max(0|1) AS max(u.foo * ue.bar)", + "ResultColumns": 1, + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0,R:1", + "JoinVars": { + "u_foo": 0 + }, + "TableName": "`user`_user_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select u.foo from `user` as u where 1 != 1", + "Query": "select u.foo from `user` as u", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select :u_foo * ue.bar, weight_string(:u_foo * ue.bar) from user_extra as ue where 1 != 1", + "Query": "select :u_foo * ue.bar, weight_string(:u_foo * ue.bar) from user_extra as ue", + "Table": "user_extra" + } + ] + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "aggregate on input from both sides - TODO optimize more", + "query": "select sum(user.foo+user_extra.bar) from user, user_extra", + "plan": { + "QueryType": "SELECT", + "Original": "select sum(user.foo+user_extra.bar) from user, user_extra", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum(0) AS sum(`user`.foo + user_extra.bar)", + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0", + "JoinVars": { + "user_foo": 0 + }, + "TableName": "`user`_user_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.foo from `user` where 1 != 1", + "Query": "select `user`.foo from `user`", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select :user_foo + user_extra.bar from user_extra where 1 != 1", + "Query": "select :user_foo + user_extra.bar from user_extra", "Table": "user_extra" } ] @@ -6503,5 +5206,787 @@ "user.user_extra" ] } + }, + { + "comment": "grouping column could be coming from multiple sides", + "query": "select count(*) from user, user_extra group by user.id+user_extra.id", + "plan": { + "QueryType": "SELECT", + "Original": "select count(*) from user, user_extra group by user.id+user_extra.id", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum_count_star(0) AS count(*)", + "GroupBy": "(1|2)", + "ResultColumns": 1, + "Inputs": [ + { + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "(1|2) ASC", + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * [COLUMN 1] as count(*)", + "[COLUMN 2] as `user`.id + user_extra.id", + "[COLUMN 3] as weight_string(`user`.id + user_extra.id)" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,R:1,R:2", + "JoinVars": { + "user_id": 1 + }, + "TableName": "`user`_user_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), `user`.id from `user` where 1 != 1 group by `user`.id", + "Query": "select count(*), `user`.id from `user` group by `user`.id", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), :user_id + user_extra.id, weight_string(:user_id + user_extra.id) from user_extra where 1 != 1 group by :user_id + user_extra.id", + "Query": "select count(*), :user_id + user_extra.id, weight_string(:user_id + user_extra.id) from user_extra group by :user_id + user_extra.id", + "Table": "user_extra" + } + ] + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "Complex aggregate expression on scatter", + "query": "select 1+count(*) from user", + "plan": { + "QueryType": "SELECT", + "Original": "select 1+count(*) from user", + "Instructions": { + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] + [COLUMN 1] as 1 + count(*)" + ], + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "any_value(0), sum_count_star(1) AS count(*)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1, count(*) from `user` where 1 != 1", + "Query": "select 1, count(*) from `user`", + "Table": "`user`" + } + ] + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "combine the output of two aggregations in the final result", + "query": "select greatest(sum(user.foo), sum(user_extra.bar)) from user join user_extra on user.col = user_extra.col", + "plan": { + "QueryType": "SELECT", + "Original": "select greatest(sum(user.foo), sum(user_extra.bar)) from user join user_extra on user.col = user_extra.col", + "Instructions": { + "OperatorType": "Projection", + "Expressions": [ + "GREATEST([COLUMN 0], [COLUMN 1]) as greatest(sum(`user`.foo), sum(user_extra.bar))" + ], + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum(0) AS sum(`user`.foo), sum(1) AS sum(user_extra.bar)", + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * [COLUMN 1] as sum(`user`.foo)", + "[COLUMN 3] * [COLUMN 2] as sum(user_extra.bar)" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,R:1,L:1", + "JoinVars": { + "user_col": 2 + }, + "TableName": "`user`_user_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select sum(`user`.foo), count(*), `user`.col from `user` where 1 != 1 group by `user`.col", + "Query": "select sum(`user`.foo), count(*), `user`.col from `user` group by `user`.col", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), sum(user_extra.bar) from user_extra where 1 != 1 group by .0", + "Query": "select count(*), sum(user_extra.bar) from user_extra where user_extra.col = :user_col group by .0", + "Table": "user_extra" + } + ] + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "Aggregate detection (group_concat)", + "query": "select group_concat(user.a) from user join user_extra", + "plan": { + "QueryType": "SELECT", + "Original": "select group_concat(user.a) from user join user_extra", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "group_concat(0) AS group_concat(`user`.a)", + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0", + "TableName": "`user`_user_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.a from `user` where 1 != 1", + "Query": "select `user`.a from `user`", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from user_extra where 1 != 1", + "Query": "select 1 from user_extra", + "Table": "user_extra" + } + ] + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "plan a query with any_value()", + "query": "select count(*), any_value(u.name), any_value(ue.title) from user u join user_extra ue on u.bar = ue.foo ", + "plan": { + "QueryType": "SELECT", + "Original": "select count(*), any_value(u.name), any_value(ue.title) from user u join user_extra ue on u.bar = ue.foo ", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum_count_star(0) AS count(*), any_value(1) AS any_value(u.`name`), any_value(2) AS any_value(ue.title)", + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * [COLUMN 1] as count(*)", + "[COLUMN 2] as any_value(u.`name`)", + "[COLUMN 3] as any_value(ue.title)" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,L:1,R:1", + "JoinVars": { + "u_bar": 2 + }, + "TableName": "`user`_user_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), any_value(u.`name`), u.bar from `user` as u where 1 != 1 group by u.bar", + "Query": "select count(*), any_value(u.`name`), u.bar from `user` as u group by u.bar", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), any_value(ue.title) from user_extra as ue where 1 != 1 group by .0", + "Query": "select count(*), any_value(ue.title) from user_extra as ue where ue.foo = :u_bar group by .0", + "Table": "user_extra" + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "Rewrite derived expression while pushing order by underneath aggregation", + "query": "select d.a from music join (select id, count(*) as a from user) as d on music.user_id = d.id group by 1", + "plan": { + "QueryType": "SELECT", + "Original": "select d.a from music join (select id, count(*) as a from user) as d on music.user_id = d.id group by 1", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "GroupBy": "0", + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0", + "JoinVars": { + "d_id": 1 + }, + "TableName": "`user`_music", + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "GroupBy": "0, (1|2)", + "Inputs": [ + { + "OperatorType": "SimpleProjection", + "Columns": [ + 1, + 0, + 2 + ], + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "any_value(0) AS id, sum_count_star(1) AS a, any_value(2)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id, count(*) as a, weight_string(id) from `user` where 1 != 1", + "OrderBy": "1 ASC, (0|2) ASC", + "Query": "select id, count(*) as a, weight_string(id) from `user` order by count(*) asc, id asc", + "Table": "`user`" + } + ] + } + ] + } + ] + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from music where 1 != 1 group by .0", + "Query": "select 1 from music where music.user_id = :d_id group by .0", + "Table": "music", + "Values": [ + ":d_id" + ], + "Vindex": "user_index" + } + ] + } + ] + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } + }, + { + "comment": "group_concat with group by without in select list", + "query": "select group_concat(user.id) from user, music where user.id = music.foo group by user.bar", + "plan": { + "QueryType": "SELECT", + "Original": "select group_concat(user.id) from user, music where user.id = music.foo group by user.bar", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "group_concat(0) AS group_concat(`user`.id)", + "GroupBy": "(1|2)", + "ResultColumns": 1, + "Inputs": [ + { + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "(1|2) ASC", + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0,R:1,R:2", + "JoinVars": { + "music_foo": 0 + }, + "TableName": "music_`user`", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.foo from music where 1 != 1", + "Query": "select music.foo from music", + "Table": "music" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.id, `user`.bar, weight_string(`user`.bar) from `user` where 1 != 1", + "Query": "select `user`.id, `user`.bar, weight_string(`user`.bar) from `user` where `user`.id = :music_foo", + "Table": "`user`", + "Values": [ + ":music_foo" + ], + "Vindex": "user_index" + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } + }, + { + "comment": "group_concat aggregation on top of route", + "query": "select intcol, group_concat(foo) from user group by intcol", + "plan": { + "QueryType": "SELECT", + "Original": "select intcol, group_concat(foo) from user group by intcol", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "group_concat(1) AS group_concat(foo)", + "GroupBy": "0", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select intcol, group_concat(foo) from `user` where 1 != 1 group by intcol", + "OrderBy": "0 ASC", + "Query": "select intcol, group_concat(foo) from `user` group by intcol order by intcol asc", + "Table": "`user`" + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "ordering on top of aggregator without pushing the column down during the horizon phase", + "query": "select u.foo, group_concat(u.bar) from user u, music m where u.col = m.col group by u.foo order by u.baz", + "plan": { + "QueryType": "SELECT", + "Original": "select u.foo, group_concat(u.bar) from user u, music m where u.col = m.col group by u.foo order by u.baz", + "Instructions": { + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "(2|4) ASC", + "ResultColumns": 2, + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "group_concat(1) AS group_concat(u.bar), any_value(2) AS baz, any_value(4)", + "GroupBy": "(0|3)", + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,L:1,L:2,L:3,L:4", + "JoinVars": { + "u_col": 5 + }, + "TableName": "`user`_music", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select u.foo, u.bar, u.baz, weight_string(u.foo), weight_string(u.baz), u.col from `user` as u where 1 != 1", + "OrderBy": "(0|3) ASC", + "Query": "select u.foo, u.bar, u.baz, weight_string(u.foo), weight_string(u.baz), u.col from `user` as u order by u.foo asc", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from music as m where 1 != 1", + "Query": "select 1 from music as m where m.col = :u_col", + "Table": "music" + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } + }, + { + "comment": "count distinct and sum distinct on join query pushed down - unique vindex", + "query": "select u.col1, count(distinct m.user_id), sum(distinct m.user_id) from user u join music m group by u.col1", + "plan": { + "QueryType": "SELECT", + "Original": "select u.col1, count(distinct m.user_id), sum(distinct m.user_id) from user u join music m group by u.col1", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum_count_distinct(1) AS count(distinct m.user_id), sum_sum_distinct(2) AS sum(distinct m.user_id)", + "GroupBy": "(0|3)", + "ResultColumns": 3, + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,R:1,L:1", + "TableName": "`user`_music", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select u.col1, weight_string(u.col1) from `user` as u where 1 != 1 group by u.col1, weight_string(u.col1)", + "OrderBy": "(0|1) ASC", + "Query": "select u.col1, weight_string(u.col1) from `user` as u group by u.col1, weight_string(u.col1) order by u.col1 asc", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(distinct m.user_id), sum(distinct m.user_id) from music as m where 1 != 1 group by .0", + "Query": "select count(distinct m.user_id), sum(distinct m.user_id) from music as m group by .0", + "Table": "music" + } + ] + } + ] + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } + }, + { + "comment": "count and sum distinct with min distinct on different expressions", + "query": "select foo, min(distinct bar), count(distinct baz), sum(distinct baz), max(distinct toto) from user group by foo", + "plan": { + "QueryType": "SELECT", + "Original": "select foo, min(distinct bar), count(distinct baz), sum(distinct baz), max(distinct toto) from user group by foo", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "min(1|6) AS min(distinct bar), count_distinct(2|7) AS count(distinct baz), sum_distinct(3|7) AS sum(distinct baz), max(4|8) AS max(distinct toto)", + "GroupBy": "(0|5)", + "ResultColumns": 5, + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select foo, min(bar) as `min(distinct bar)`, baz, baz, max(toto) as `max(distinct toto)`, weight_string(foo), weight_string(bar), weight_string(baz), weight_string(toto) from `user` where 1 != 1 group by foo, baz, weight_string(foo), weight_string(bar), weight_string(baz), weight_string(toto)", + "OrderBy": "(0|5) ASC, (2|7) ASC", + "Query": "select foo, min(bar) as `min(distinct bar)`, baz, baz, max(toto) as `max(distinct toto)`, weight_string(foo), weight_string(bar), weight_string(baz), weight_string(toto) from `user` group by foo, baz, weight_string(foo), weight_string(bar), weight_string(baz), weight_string(toto) order by foo asc, baz asc", + "Table": "`user`" + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "aggregation on union", + "query": "select sum(col) from (select col from user union all select col from unsharded) t", + "plan": { + "QueryType": "SELECT", + "Original": "select sum(col) from (select col from user union all select col from unsharded) t", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum(0) AS sum(col)", + "Inputs": [ + { + "OperatorType": "Concatenate", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select col from `user` where 1 != 1", + "Query": "select col from `user`", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select col from unsharded where 1 != 1", + "Query": "select col from unsharded", + "Table": "unsharded" + } + ] + } + ] + }, + "TablesUsed": [ + "main.unsharded", + "user.user" + ] + } + }, + { + "comment": "aggregation on top of derived table with limit", + "query": "select count(val2), sum(val2) from (select id, val2 from user where val2 is null limit 2) as x", + "plan": { + "QueryType": "SELECT", + "Original": "select count(val2), sum(val2) from (select id, val2 from user where val2 is null limit 2) as x", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "count(0) AS count(val2), sum(1) AS sum(val2)", + "Inputs": [ + { + "OperatorType": "SimpleProjection", + "Columns": [ + 1, + 1 + ], + "Inputs": [ + { + "OperatorType": "Limit", + "Count": "INT64(2)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id, val2 from (select id, val2 from `user` where 1 != 1) as x where 1 != 1", + "Query": "select id, val2 from (select id, val2 from `user` where val2 is null) as x limit :__upper_limit", + "Table": "`user`" + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "aggregation on top of aggregation works fine", + "query": "select distinct count(*) from user, (select distinct count(*) from user) X", + "plan": { + "QueryType": "SELECT", + "Original": "select distinct count(*) from user, (select distinct count(*) from user) X", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum_count_star(0) AS count(*)", + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * [COLUMN 1] as count(*)" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0", + "TableName": "`user`_`user`", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*) from `user` where 1 != 1", + "Query": "select count(*) from `user`", + "Table": "`user`" + }, + { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "count_star(0)", + "GroupBy": "1", + "Inputs": [ + { + "OperatorType": "SimpleProjection", + "Columns": [ + 2, + 1 + ], + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum_count_star(0) AS count(*), any_value(2)", + "GroupBy": "1", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), .0, 1 from `user` where 1 != 1 group by .0", + "Query": "select count(*), .0, 1 from `user` group by .0", + "Table": "`user`" + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } } -] +] \ No newline at end of file diff --git a/go/vt/vtgate/planbuilder/testdata/ddl_cases.json b/go/vt/vtgate/planbuilder/testdata/ddl_cases.json index 8326922225c..eec1a0ce101 100644 --- a/go/vt/vtgate/planbuilder/testdata/ddl_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/ddl_cases.json @@ -251,22 +251,7 @@ { "comment": "create view with subquery in unsharded keyspace", "query": "create view view_a as select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a", - "v3-plan": { - "QueryType": "DDL", - "Original": "create view view_a as select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a", - "Instructions": { - "OperatorType": "DDL", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "Query": "create view view_a as select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) as a" - }, - "TablesUsed": [ - "main.view_a" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DDL", "Original": "create view view_a as select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a", "Instructions": { @@ -380,22 +365,7 @@ { "comment": "Create View with authoritative column", "query": "create view user.tmp_view as select * from user.authoritative", - "v3-plan": { - "QueryType": "DDL", - "Original": "create view user.tmp_view as select * from user.authoritative", - "Instructions": { - "OperatorType": "DDL", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "Query": "create view tmp_view as select * from authoritative" - }, - "TablesUsed": [ - "user.tmp_view" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DDL", "Original": "create view user.tmp_view as select * from user.authoritative", "Instructions": { diff --git a/go/vt/vtgate/planbuilder/testdata/ddl_cases_no_default_keyspace.json b/go/vt/vtgate/planbuilder/testdata/ddl_cases_no_default_keyspace.json index 30547db61c4..d05631cbff5 100644 --- a/go/vt/vtgate/planbuilder/testdata/ddl_cases_no_default_keyspace.json +++ b/go/vt/vtgate/planbuilder/testdata/ddl_cases_no_default_keyspace.json @@ -116,22 +116,7 @@ { "comment": "create view with select * from authoritative table", "query": "create view user.view_a as select * from authoritative", - "v3-plan": { - "QueryType": "DDL", - "Original": "create view user.view_a as select * from authoritative", - "Instructions": { - "OperatorType": "DDL", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "Query": "create view view_a as select * from authoritative" - }, - "TablesUsed": [ - "user.view_a" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DDL", "Original": "create view user.view_a as select * from authoritative", "Instructions": { @@ -150,22 +135,7 @@ { "comment": "create view with select * from join of authoritative tables", "query": "create view user.view_a as select * from authoritative a join authoritative b on a.user_id=b.user_id", - "v3-plan": { - "QueryType": "DDL", - "Original": "create view user.view_a as select * from authoritative a join authoritative b on a.user_id=b.user_id", - "Instructions": { - "OperatorType": "DDL", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "Query": "create view view_a as select * from authoritative as a join authoritative as b on a.user_id = b.user_id" - }, - "TablesUsed": [ - "user.view_a" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DDL", "Original": "create view user.view_a as select * from authoritative a join authoritative b on a.user_id=b.user_id", "Instructions": { @@ -184,22 +154,7 @@ { "comment": "create view with select * from qualified authoritative table", "query": "create view user.view_a as select a.* from authoritative a", - "v3-plan": { - "QueryType": "DDL", - "Original": "create view user.view_a as select a.* from authoritative a", - "Instructions": { - "OperatorType": "DDL", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "Query": "create view view_a as select a.* from authoritative as a" - }, - "TablesUsed": [ - "user.view_a" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DDL", "Original": "create view user.view_a as select a.* from authoritative a", "Instructions": { @@ -237,22 +192,7 @@ { "comment": "create view with select authoritative.* with intermixing still expands", "query": "create view user.view_a as select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id", - "v3-plan": { - "QueryType": "DDL", - "Original": "create view user.view_a as select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id", - "Instructions": { - "OperatorType": "DDL", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "Query": "create view view_a as select `user`.id, a.*, `user`.col1 from authoritative as a join `user` on a.user_id = `user`.id" - }, - "TablesUsed": [ - "user.view_a" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DDL", "Original": "create view user.view_a as select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id", "Instructions": { @@ -697,11 +637,6 @@ ] } }, - { - "comment": "drop table with incompatible tables", - "query": "drop table user, unsharded_a", - "plan": "VT12001: unsupported: Tables or Views specified in the query do not belong to the same destination" - }, { "comment": "drop table with unknown table", "query": "drop table unknown", @@ -727,11 +662,6 @@ ] } }, - { - "comment": "drop view with incompatible views", - "query": "drop view user, unsharded_a", - "plan": "VT12001: unsupported: Tables or Views specified in the query do not belong to the same destination" - }, { "comment": "drop view with unknown view", "query": "drop view unknown", @@ -776,11 +706,6 @@ ] } }, - { - "comment": "Rename table with different keyspace tables", - "query": "rename table user_extra to b, main.a to b", - "plan": "VT12001: unsupported: Tables or Views specified in the query do not belong to the same destination" - }, { "comment": "Rename table with change in keyspace name", "query": "rename table user_extra to main.b", diff --git a/go/vt/vtgate/planbuilder/testdata/dml_cases.json b/go/vt/vtgate/planbuilder/testdata/dml_cases.json index 8b92ffe9336..18575c8dcf9 100644 --- a/go/vt/vtgate/planbuilder/testdata/dml_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/dml_cases.json @@ -144,29 +144,7 @@ { "comment": "routing rules: updated of a routed table", "query": "update route1 set a=1 where id=1", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update route1 set a=1 where id=1", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "Query": "update `user` as route1 set a = 1 where id = 1", - "Table": "user", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update route1 set a=1 where id=1", "Instructions": { @@ -192,25 +170,7 @@ { "comment": "update: routing rules for subquery.", "query": "update unsharded_a set a=(select a from route2)", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update unsharded_a set a=(select a from route2)", - "Instructions": { - "OperatorType": "Update", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "TargetTabletType": "PRIMARY", - "Query": "update unsharded_a set a = (select a from unsharded as route2)", - "Table": "unsharded, unsharded_a" - }, - "TablesUsed": [ - "main.unsharded_a" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update unsharded_a set a=(select a from route2)", "Instructions": { @@ -299,29 +259,7 @@ { "comment": "update by primary keyspace id", "query": "update user set val = 1 where id = 1", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update user set val = 1 where id = 1", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "Query": "update `user` set val = 1 where id = 1", - "Table": "user", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update user set val = 1 where id = 1", "Instructions": { @@ -347,29 +285,7 @@ { "comment": "update by primary keyspace id with alias", "query": "update user as user_alias set val = 1 where user_alias.id = 1", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update user as user_alias set val = 1 where user_alias.id = 1", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "Query": "update `user` as user_alias set val = 1 where user_alias.id = 1", - "Table": "user", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update user as user_alias set val = 1 where user_alias.id = 1", "Instructions": { @@ -395,29 +311,7 @@ { "comment": "update by primary keyspace id with parenthesized expression", "query": "update user set val = 1 where (id = 1)", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update user set val = 1 where (id = 1)", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "Query": "update `user` set val = 1 where id = 1", - "Table": "user", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update user set val = 1 where (id = 1)", "Instructions": { @@ -443,29 +337,7 @@ { "comment": "update by primary keyspace id with multi-part where clause with parens", "query": "update user set val = 1 where (name = 'foo' and id = 1)", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update user set val = 1 where (name = 'foo' and id = 1)", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "Query": "update `user` set val = 1 where `name` = 'foo' and id = 1", - "Table": "user", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update user set val = 1 where (name = 'foo' and id = 1)", "Instructions": { @@ -491,35 +363,7 @@ { "comment": "update by primary keyspace id, changing one vindex column", "query": "update user_metadata set email = 'juan@vitess.io' where user_id = 1", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update user_metadata set email = 'juan@vitess.io' where user_id = 1", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "ChangedVindexValues": [ - "email_user_map:4" - ], - "KsidLength": 1, - "KsidVindex": "user_index", - "OwnedVindexQuery": "select user_id, email, address, non_planable, email = 'juan@vitess.io' from user_metadata where user_id = 1 for update", - "Query": "update user_metadata set email = 'juan@vitess.io' where user_id = 1", - "Table": "user_metadata", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user_metadata" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update user_metadata set email = 'juan@vitess.io' where user_id = 1", "Instructions": { @@ -556,36 +400,7 @@ { "comment": "update by primary keyspace id, changing multiple vindex columns", "query": "update user_metadata set email = 'juan@vitess.io', address = '155 5th street' where user_id = 1", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update user_metadata set email = 'juan@vitess.io', address = '155 5th street' where user_id = 1", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "ChangedVindexValues": [ - "address_user_map:5", - "email_user_map:4" - ], - "KsidLength": 1, - "KsidVindex": "user_index", - "OwnedVindexQuery": "select user_id, email, address, non_planable, email = 'juan@vitess.io', address = '155 5th street' from user_metadata where user_id = 1 for update", - "Query": "update user_metadata set email = 'juan@vitess.io', address = '155 5th street' where user_id = 1", - "Table": "user_metadata", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user_metadata" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update user_metadata set email = 'juan@vitess.io', address = '155 5th street' where user_id = 1", "Instructions": { @@ -618,35 +433,7 @@ { "comment": "update by primary keyspace id, changing one vindex column, using order by and limit", "query": "update user_metadata set email = 'juan@vitess.io' where user_id = 1 order by user_id asc limit 10", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update user_metadata set email = 'juan@vitess.io' where user_id = 1 order by user_id asc limit 10", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "ChangedVindexValues": [ - "email_user_map:4" - ], - "KsidLength": 1, - "KsidVindex": "user_index", - "OwnedVindexQuery": "select user_id, email, address, non_planable, email = 'juan@vitess.io' from user_metadata where user_id = 1 order by user_id asc limit 10 for update", - "Query": "update user_metadata set email = 'juan@vitess.io' where user_id = 1 order by user_id asc limit 10", - "Table": "user_metadata", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user_metadata" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update user_metadata set email = 'juan@vitess.io' where user_id = 1 order by user_id asc limit 10", "Instructions": { @@ -678,12 +465,12 @@ { "comment": "update changes non owned vindex column", "query": "update music_extra set music_id = 1 where user_id = 1", - "v3-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update music_extra set music_id = 1 where user_id = 1", "Instructions": { "OperatorType": "Update", - "Variant": "Equal", + "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true @@ -705,10 +492,14 @@ "TablesUsed": [ "user.music_extra" ] - }, - "gen4-plan": { + } + }, + { + "comment": "update by primary keyspace id, stray where clause", + "query": "update user set val = 1 where id = id2 and id = 1", + "plan": { "QueryType": "UPDATE", - "Original": "update music_extra set music_id = 1 where user_id = 1", + "Original": "update user set val = 1 where id = id2 and id = 1", "Instructions": { "OperatorType": "Update", "Variant": "EqualUnique", @@ -717,39 +508,33 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "ChangedVindexValues": [ - "music_user_map:1" - ], - "KsidLength": 1, - "KsidVindex": "user_index", - "OwnedVindexQuery": "select user_id, music_id = 1 from music_extra where user_id = 1 for update", - "Query": "update music_extra set music_id = 1 where user_id = 1", - "Table": "music_extra", + "Query": "update `user` set val = 1 where id = id2 and id = 1", + "Table": "user", "Values": [ "INT64(1)" ], "Vindex": "user_index" }, "TablesUsed": [ - "user.music_extra" + "user.user" ] } }, { - "comment": "update by primary keyspace id, stray where clause", - "query": "update user set val = 1 where id = id2 and id = 1", - "v3-plan": { + "comment": "update by primary keyspace id, stray where clause with conversion error", + "query": "update user set val = 1 where id = 18446744073709551616 and id = 1", + "plan": { "QueryType": "UPDATE", - "Original": "update user set val = 1 where id = id2 and id = 1", + "Original": "update user set val = 1 where id = 18446744073709551616 and id = 1", "Instructions": { "OperatorType": "Update", - "Variant": "Equal", + "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, "TargetTabletType": "PRIMARY", - "Query": "update `user` set val = 1 where id = id2 and id = 1", + "Query": "update `user` set val = 1 where id = 18446744073709551616 and id = 1", "Table": "user", "Values": [ "INT64(1)" @@ -759,121 +544,26 @@ "TablesUsed": [ "user.user" ] - }, - "gen4-plan": { - "QueryType": "UPDATE", - "Original": "update user set val = 1 where id = id2 and id = 1", + } + }, + { + "comment": "delete from by primary keyspace id", + "query": "delete from user where id = 1", + "plan": { + "QueryType": "DELETE", + "Original": "delete from user where id = 1", "Instructions": { - "OperatorType": "Update", + "OperatorType": "Delete", "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, "TargetTabletType": "PRIMARY", - "Query": "update `user` set val = 1 where id = id2 and id = 1", - "Table": "user", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user" - ] - } - }, - { - "comment": "update by primary keyspace id, stray where clause with conversion error", - "query": "update user set val = 1 where id = 18446744073709551616 and id = 1", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update user set val = 1 where id = 18446744073709551616 and id = 1", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "Query": "update `user` set val = 1 where id = 18446744073709551616 and id = 1", - "Table": "user", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user" - ] - }, - "gen4-plan": { - "QueryType": "UPDATE", - "Original": "update user set val = 1 where id = 18446744073709551616 and id = 1", - "Instructions": { - "OperatorType": "Update", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "Query": "update `user` set val = 1 where id = 18446744073709551616 and id = 1", - "Table": "user", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user" - ] - } - }, - { - "comment": "delete from by primary keyspace id", - "query": "delete from user where id = 1", - "v3-plan": { - "QueryType": "DELETE", - "Original": "delete from user where id = 1", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "KsidLength": 1, - "KsidVindex": "user_index", - "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where id = 1 for update", - "Query": "delete from `user` where id = 1", - "Table": "user", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user" - ] - }, - "gen4-plan": { - "QueryType": "DELETE", - "Original": "delete from user where id = 1", - "Instructions": { - "OperatorType": "Delete", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "KsidLength": 1, - "KsidVindex": "user_index", - "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where id = 1 for update", - "Query": "delete from `user` where id = 1", + "KsidLength": 1, + "KsidVindex": "user_index", + "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where id = 1 for update", + "Query": "delete from `user` where id = 1", "Table": "user", "Values": [ "INT64(1)" @@ -956,32 +646,7 @@ { "comment": "routing rules: deleted from a routed table", "query": "delete from route1 where id = 1", - "v3-plan": { - "QueryType": "DELETE", - "Original": "delete from route1 where id = 1", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "KsidLength": 1, - "KsidVindex": "user_index", - "OwnedVindexQuery": "select Id, `Name`, Costly from `user` as route1 where id = 1 for update", - "Query": "delete from `user` as route1 where id = 1", - "Table": "user", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "delete from route1 where id = 1", "Instructions": { @@ -1010,25 +675,7 @@ { "comment": "delete: routing rules for subquery", "query": "delete from unsharded_a where a=(select a from route2)", - "v3-plan": { - "QueryType": "DELETE", - "Original": "delete from unsharded_a where a=(select a from route2)", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "TargetTabletType": "PRIMARY", - "Query": "delete from unsharded_a where a = (select a from unsharded as route2)", - "Table": "unsharded, unsharded_a" - }, - "TablesUsed": [ - "main.unsharded_a" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "delete from unsharded_a where a=(select a from route2)", "Instructions": { @@ -1051,29 +698,7 @@ { "comment": "update by lookup", "query": "update music set val = 1 where id = 1", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update music set val = 1 where id = 1", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "Query": "update music set val = 1 where id = 1", - "Table": "music", - "Values": [ - "INT64(1)" - ], - "Vindex": "music_user_map" - }, - "TablesUsed": [ - "user.music" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update music set val = 1 where id = 1", "Instructions": { @@ -1145,32 +770,7 @@ { "comment": "delete from by lookup", "query": "delete from music where id = 1", - "v3-plan": { - "QueryType": "DELETE", - "Original": "delete from music where id = 1", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "KsidLength": 1, - "KsidVindex": "user_index", - "OwnedVindexQuery": "select user_id, id from music where id = 1 for update", - "Query": "delete from music where id = 1", - "Table": "music", - "Values": [ - "INT64(1)" - ], - "Vindex": "music_user_map" - }, - "TablesUsed": [ - "user.music" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "delete from music where id = 1", "Instructions": { @@ -1199,29 +799,7 @@ { "comment": "delete from, no owned vindexes", "query": "delete from music_extra where user_id = 1", - "v3-plan": { - "QueryType": "DELETE", - "Original": "delete from music_extra where user_id = 1", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "Query": "delete from music_extra where user_id = 1", - "Table": "music_extra", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.music_extra" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "delete from music_extra where user_id = 1", "Instructions": { @@ -1390,25 +968,7 @@ { "comment": "insert unsharded with select", "query": "insert into unsharded select id from unsharded_auto", - "v3-plan": { - "QueryType": "INSERT", - "Original": "insert into unsharded select id from unsharded_auto", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "TargetTabletType": "PRIMARY", - "Query": "insert into unsharded select id from unsharded_auto for update", - "TableName": "unsharded" - }, - "TablesUsed": [ - "main.unsharded" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "INSERT", "Original": "insert into unsharded select id from unsharded_auto", "Instructions": { @@ -1431,25 +991,7 @@ { "comment": "insert unsharded with select with join", "query": "insert into unsharded select id from unsharded join unsharded_auto", - "v3-plan": { - "QueryType": "INSERT", - "Original": "insert into unsharded select id from unsharded join unsharded_auto", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "TargetTabletType": "PRIMARY", - "Query": "insert into unsharded select id from unsharded join unsharded_auto for update", - "TableName": "unsharded" - }, - "TablesUsed": [ - "main.unsharded" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "INSERT", "Original": "insert into unsharded select id from unsharded join unsharded_auto", "Instructions": { @@ -1665,8 +1207,7 @@ { "comment": "insert with mimatched column list", "query": "insert into user(id) values (1, 2)", - "v3-plan": "VT13001: [BUG] column list does not match values", - "gen4-plan": "VT03006: column count does not match value count at row 1" + "plan": "VT03006: column count does not match value count at row 1" }, { "comment": "insert no column list for sharded authoritative table", @@ -2001,25 +1542,7 @@ { "comment": "unsharded insert from union", "query": "insert into unsharded select 1 from dual union select 1 from dual", - "v3-plan": { - "QueryType": "INSERT", - "Original": "insert into unsharded select 1 from dual union select 1 from dual", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "TargetTabletType": "PRIMARY", - "Query": "insert into unsharded select 1 from dual union select 1 from dual for update", - "TableName": "unsharded" - }, - "TablesUsed": [ - "main.unsharded" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "INSERT", "Original": "insert into unsharded select 1 from dual union select 1 from dual", "Instructions": { @@ -2094,7 +1617,7 @@ { "comment": "insert invalid index value", "query": "insert into music_extra(music_id, user_id) values(1, id)", - "plan": "cannot lookup column (column access not supported here)" + "plan": "cannot lookup column 'id' (column access not supported here)" }, { "comment": "insert invalid table", @@ -2190,8 +1713,7 @@ { "comment": "insert into a vindex not allowed", "query": "insert into user_index(id) values(1)", - "v3-plan": "VT12001: unsupported: multi-shard or vindex write statement", - "gen4-plan": "VT09014: vindex cannot be modified" + "plan": "VT09014: vindex cannot be modified" }, { "comment": "simple replace unsharded", @@ -2218,7 +1740,7 @@ { "comment": "replace unsharded with select", "query": "replace into unsharded select id from unsharded_auto", - "v3-plan": { + "plan": { "QueryType": "INSERT", "Original": "replace into unsharded select id from unsharded_auto", "Instructions": { @@ -2229,30 +1751,12 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "Query": "replace into unsharded select id from unsharded_auto for update", + "Query": "replace into unsharded select id from unsharded_auto", "TableName": "unsharded" }, "TablesUsed": [ - "main.unsharded" - ] - }, - "gen4-plan": { - "QueryType": "INSERT", - "Original": "replace into unsharded select id from unsharded_auto", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "TargetTabletType": "PRIMARY", - "Query": "replace into unsharded select id from unsharded_auto", - "TableName": "unsharded" - }, - "TablesUsed": [ - "main.unsharded", - "main.unsharded_auto" + "main.unsharded", + "main.unsharded_auto" ] } }, @@ -2436,32 +1940,7 @@ { "comment": "delete row in a multi column vindex table", "query": "delete from multicolvin where kid=1", - "v3-plan": { - "QueryType": "DELETE", - "Original": "delete from multicolvin where kid=1", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "KsidLength": 1, - "KsidVindex": "kid_index", - "OwnedVindexQuery": "select kid, column_a, column_b, column_c from multicolvin where kid = 1 for update", - "Query": "delete from multicolvin where kid = 1", - "Table": "multicolvin", - "Values": [ - "INT64(1)" - ], - "Vindex": "kid_index" - }, - "TablesUsed": [ - "user.multicolvin" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "delete from multicolvin where kid=1", "Instructions": { @@ -2490,35 +1969,7 @@ { "comment": "update columns of multi column vindex", "query": "update multicolvin set column_b = 1, column_c = 2 where kid = 1", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update multicolvin set column_b = 1, column_c = 2 where kid = 1", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "ChangedVindexValues": [ - "colb_colc_map:4" - ], - "KsidLength": 1, - "KsidVindex": "kid_index", - "OwnedVindexQuery": "select kid, column_a, column_b, column_c, column_b = 1 and column_c = 2 from multicolvin where kid = 1 for update", - "Query": "update multicolvin set column_b = 1, column_c = 2 where kid = 1", - "Table": "multicolvin", - "Values": [ - "INT64(1)" - ], - "Vindex": "kid_index" - }, - "TablesUsed": [ - "user.multicolvin" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update multicolvin set column_b = 1, column_c = 2 where kid = 1", "Instructions": { @@ -2550,36 +2001,7 @@ { "comment": "update multiple vindexes, with multi column vindex", "query": "update multicolvin set column_a = 0, column_b = 1, column_c = 2 where kid = 1", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update multicolvin set column_a = 0, column_b = 1, column_c = 2 where kid = 1", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "ChangedVindexValues": [ - "cola_map:4", - "colb_colc_map:5" - ], - "KsidLength": 1, - "KsidVindex": "kid_index", - "OwnedVindexQuery": "select kid, column_a, column_b, column_c, column_a = 0, column_b = 1 and column_c = 2 from multicolvin where kid = 1 for update", - "Query": "update multicolvin set column_a = 0, column_b = 1, column_c = 2 where kid = 1", - "Table": "multicolvin", - "Values": [ - "INT64(1)" - ], - "Vindex": "kid_index" - }, - "TablesUsed": [ - "user.multicolvin" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update multicolvin set column_a = 0, column_b = 1, column_c = 2 where kid = 1", "Instructions": { @@ -2976,25 +2398,7 @@ { "comment": "unsharded update where inner query references outer query", "query": "update unsharded set col = (select id from unsharded_a where id = unsharded.col) where col = (select id from unsharded_b)", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update unsharded set col = (select id from unsharded_a where id = unsharded.col) where col = (select id from unsharded_b)", - "Instructions": { - "OperatorType": "Update", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "TargetTabletType": "PRIMARY", - "Query": "update unsharded set col = (select id from unsharded_a where id = unsharded.col) where col = (select id from unsharded_b)", - "Table": "unsharded, unsharded_a, unsharded_b" - }, - "TablesUsed": [ - "main.unsharded" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update unsharded set col = (select id from unsharded_a where id = unsharded.col) where col = (select id from unsharded_b)", "Instructions": { @@ -3018,25 +2422,7 @@ { "comment": "unsharded delete where inner query references outer query", "query": "delete from unsharded where col = (select id from unsharded_a where id = unsharded.col)", - "v3-plan": { - "QueryType": "DELETE", - "Original": "delete from unsharded where col = (select id from unsharded_a where id = unsharded.col)", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "TargetTabletType": "PRIMARY", - "Query": "delete from unsharded where col = (select id from unsharded_a where id = unsharded.col)", - "Table": "unsharded, unsharded_a" - }, - "TablesUsed": [ - "main.unsharded" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "delete from unsharded where col = (select id from unsharded_a where id = unsharded.col)", "Instructions": { @@ -3059,35 +2445,7 @@ { "comment": "update vindex value to null", "query": "update user set name = null where id = 1", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update user set name = null where id = 1", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "ChangedVindexValues": [ - "name_user_map:3" - ], - "KsidLength": 1, - "KsidVindex": "user_index", - "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = null from `user` where id = 1 for update", - "Query": "update `user` set `name` = null where id = 1", - "Table": "user", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update user set name = null where id = 1", "Instructions": { @@ -3308,32 +2666,7 @@ { "comment": "delete with single table targets", "query": "delete music from music where id = 1", - "v3-plan": { - "QueryType": "DELETE", - "Original": "delete music from music where id = 1", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "KsidLength": 1, - "KsidVindex": "user_index", - "OwnedVindexQuery": "select user_id, id from music where id = 1 for update", - "Query": "delete from music where id = 1", - "Table": "music", - "Values": [ - "INT64(1)" - ], - "Vindex": "music_user_map" - }, - "TablesUsed": [ - "user.music" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "delete music from music where id = 1", "Instructions": { @@ -3409,35 +2742,7 @@ { "comment": "update multi column vindex, without values for all the vindex columns", "query": "update multicolvin set column_c = 2 where kid = 1", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update multicolvin set column_c = 2 where kid = 1", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "ChangedVindexValues": [ - "colb_colc_map:4" - ], - "KsidLength": 1, - "KsidVindex": "kid_index", - "OwnedVindexQuery": "select kid, column_a, column_b, column_c, column_c = 2 from multicolvin where kid = 1 for update", - "Query": "update multicolvin set column_c = 2 where kid = 1", - "Table": "multicolvin", - "Values": [ - "INT64(1)" - ], - "Vindex": "kid_index" - }, - "TablesUsed": [ - "user.multicolvin" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update multicolvin set column_c = 2 where kid = 1", "Instructions": { @@ -3469,35 +2774,7 @@ { "comment": "update with binary value", "query": "update user set name = _binary 'abc' where id = 1", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update user set name = _binary 'abc' where id = 1", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "ChangedVindexValues": [ - "name_user_map:3" - ], - "KsidLength": 1, - "KsidVindex": "user_index", - "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = _binary 'abc' from `user` where id = 1 for update", - "Query": "update `user` set `name` = _binary 'abc' where id = 1", - "Table": "user", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update user set name = _binary 'abc' where id = 1", "Instructions": { @@ -3529,28 +2806,7 @@ { "comment": "delete with binary value", "query": "delete from user where name = _binary 'abc'", - "v3-plan": { - "QueryType": "DELETE", - "Original": "delete from user where name = _binary 'abc'", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "KsidLength": 1, - "KsidVindex": "user_index", - "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where `name` = _binary 'abc' for update", - "Query": "delete from `user` where `name` = _binary 'abc'", - "Table": "user" - }, - "TablesUsed": [ - "user.user" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "delete from user where name = _binary 'abc'", "Instructions": { @@ -3694,7 +2950,7 @@ { "comment": "INSERT INTO main.user_privacy_consents (user_id, accepted_at) SELECT user_id, accepted_at FROM (SELECT 1 as user_id, 1629194864 as accepted_at) AS tmp WHERE NOT EXISTS (SELECT user_id FROM main.user_privacy_consents WHERE user_id = 1)", "query": "INSERT INTO main.user_privacy_consents (user_id, accepted_at) SELECT user_id, accepted_at FROM (SELECT 1 as user_id, 1629194864 as accepted_at) AS tmp WHERE NOT EXISTS (SELECT user_id FROM main.user_privacy_consents WHERE user_id = 1)", - "v3-plan": { + "plan": { "QueryType": "INSERT", "Original": "INSERT INTO main.user_privacy_consents (user_id, accepted_at) SELECT user_id, accepted_at FROM (SELECT 1 as user_id, 1629194864 as accepted_at) AS tmp WHERE NOT EXISTS (SELECT user_id FROM main.user_privacy_consents WHERE user_id = 1)", "Instructions": { @@ -3705,25 +2961,7 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "Query": "insert into user_privacy_consents(user_id, accepted_at) select user_id, accepted_at from (select 1 as user_id, 1629194864 as accepted_at from dual) as tmp where not exists (select 1 from user_privacy_consents where user_id = 1 limit 1) for update", - "TableName": "user_privacy_consents" - }, - "TablesUsed": [ - "main.user_privacy_consents" - ] - }, - "gen4-plan": { - "QueryType": "INSERT", - "Original": "INSERT INTO main.user_privacy_consents (user_id, accepted_at) SELECT user_id, accepted_at FROM (SELECT 1 as user_id, 1629194864 as accepted_at) AS tmp WHERE NOT EXISTS (SELECT user_id FROM main.user_privacy_consents WHERE user_id = 1)", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "TargetTabletType": "PRIMARY", - "Query": "insert into user_privacy_consents(user_id, accepted_at) select user_id, accepted_at from (select 1 as user_id, 1629194864 as accepted_at from dual) as tmp where not exists (select 1 from user_privacy_consents where user_id = 1 limit 1)", + "Query": "insert into user_privacy_consents(user_id, accepted_at) select user_id, accepted_at from (select 1 as user_id, 1629194864 as accepted_at from dual) as tmp where not exists (select 1 from user_privacy_consents where user_id = 1)", "TableName": "user_privacy_consents" }, "TablesUsed": [ @@ -3788,32 +3026,7 @@ { "comment": "Delete on backfilling and non-backfilling unique lookup vindexes should be a delete equal", "query": "delete from zlookup_unique.t1 where c2 = 10 and c3 = 20", - "v3-plan": { - "QueryType": "DELETE", - "Original": "delete from zlookup_unique.t1 where c2 = 10 and c3 = 20", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Equal", - "Keyspace": { - "Name": "zlookup_unique", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "KsidLength": 1, - "KsidVindex": "xxhash", - "OwnedVindexQuery": "select c1, c2, c3 from t1 where c2 = 10 and c3 = 20 for update", - "Query": "delete from t1 where c2 = 10 and c3 = 20", - "Table": "t1", - "Values": [ - "INT64(20)" - ], - "Vindex": "lookup_t1_2" - }, - "TablesUsed": [ - "zlookup_unique.t1" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "delete from zlookup_unique.t1 where c2 = 10 and c3 = 20", "Instructions": { @@ -3842,35 +3055,7 @@ { "comment": "Update on backfilling and non-backfilling unique lookup vindexes should be an equal", "query": "update zlookup_unique.t1 set c2 = 1 where c2 = 10 and c3 = 20", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update zlookup_unique.t1 set c2 = 1 where c2 = 10 and c3 = 20", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "zlookup_unique", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "ChangedVindexValues": [ - "lookup_t1:3" - ], - "KsidLength": 1, - "KsidVindex": "xxhash", - "OwnedVindexQuery": "select c1, c2, c3, c2 = 1 from t1 where c2 = 10 and c3 = 20 for update", - "Query": "update t1 set c2 = 1 where c2 = 10 and c3 = 20", - "Table": "t1", - "Values": [ - "INT64(20)" - ], - "Vindex": "lookup_t1_2" - }, - "TablesUsed": [ - "zlookup_unique.t1" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update zlookup_unique.t1 set c2 = 1 where c2 = 10 and c3 = 20", "Instructions": { @@ -4016,30 +3201,7 @@ { "comment": "update with a multicol vindex", "query": "update multicol_tbl set x = 1 where cola = 1 and colb = 2", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update multicol_tbl set x = 1 where cola = 1 and colb = 2", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "Query": "update multicol_tbl set x = 1 where cola = 1 and colb = 2", - "Table": "multicol_tbl", - "Values": [ - "INT64(1)", - "INT64(2)" - ], - "Vindex": "multicolIdx" - }, - "TablesUsed": [ - "user.multicol_tbl" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update multicol_tbl set x = 1 where cola = 1 and colb = 2", "Instructions": { @@ -4066,30 +3228,7 @@ { "comment": "update with a multicol vindex - reverse order", "query": "update multicol_tbl set x = 1 where colb = 2 and cola = 1", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update multicol_tbl set x = 1 where colb = 2 and cola = 1", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "Query": "update multicol_tbl set x = 1 where colb = 2 and cola = 1", - "Table": "multicol_tbl", - "Values": [ - "INT64(1)", - "INT64(2)" - ], - "Vindex": "multicolIdx" - }, - "TablesUsed": [ - "user.multicol_tbl" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update multicol_tbl set x = 1 where colb = 2 and cola = 1", "Instructions": { @@ -4170,33 +3309,7 @@ { "comment": "delete with a multicol vindex", "query": "delete from multicol_tbl where cola = 1 and colb = 2", - "v3-plan": { - "QueryType": "DELETE", - "Original": "delete from multicol_tbl where cola = 1 and colb = 2", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "KsidLength": 2, - "KsidVindex": "multicolIdx", - "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where cola = 1 and colb = 2 for update", - "Query": "delete from multicol_tbl where cola = 1 and colb = 2", - "Table": "multicol_tbl", - "Values": [ - "INT64(1)", - "INT64(2)" - ], - "Vindex": "multicolIdx" - }, - "TablesUsed": [ - "user.multicol_tbl" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "delete from multicol_tbl where cola = 1 and colb = 2", "Instructions": { @@ -4226,33 +3339,7 @@ { "comment": "delete with a multicol vindex - reverse order", "query": "delete from multicol_tbl where colb = 2 and cola = 1", - "v3-plan": { - "QueryType": "DELETE", - "Original": "delete from multicol_tbl where colb = 2 and cola = 1", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "KsidLength": 2, - "KsidVindex": "multicolIdx", - "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where colb = 2 and cola = 1 for update", - "Query": "delete from multicol_tbl where colb = 2 and cola = 1", - "Table": "multicol_tbl", - "Values": [ - "INT64(1)", - "INT64(2)" - ], - "Vindex": "multicolIdx" - }, - "TablesUsed": [ - "user.multicol_tbl" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "delete from multicol_tbl where colb = 2 and cola = 1", "Instructions": { @@ -4342,36 +3429,7 @@ { "comment": "update with multicol and an owned vindex which changes", "query": "update multicol_tbl set colc = 1 where cola = 1 and colb = 2", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update multicol_tbl set colc = 1 where cola = 1 and colb = 2", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "ChangedVindexValues": [ - "colc_map:4" - ], - "KsidLength": 2, - "KsidVindex": "multicolIdx", - "OwnedVindexQuery": "select cola, colb, colc, `name`, colc = 1 from multicol_tbl where cola = 1 and colb = 2 for update", - "Query": "update multicol_tbl set colc = 1 where cola = 1 and colb = 2", - "Table": "multicol_tbl", - "Values": [ - "INT64(1)", - "INT64(2)" - ], - "Vindex": "multicolIdx" - }, - "TablesUsed": [ - "user.multicol_tbl" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update multicol_tbl set colc = 1 where cola = 1 and colb = 2", "Instructions": { @@ -4430,29 +3488,7 @@ { "comment": "update with routing using subsharding column", "query": "update multicol_tbl set x = 42 where cola = 1", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update multicol_tbl set x = 42 where cola = 1", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "Query": "update multicol_tbl set x = 42 where cola = 1", - "Table": "multicol_tbl", - "Values": [ - "INT64(1)" - ], - "Vindex": "multicolIdx" - }, - "TablesUsed": [ - "user.multicol_tbl" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update multicol_tbl set x = 42 where cola = 1", "Instructions": { @@ -4478,35 +3514,7 @@ { "comment": "update with routing using subsharding column on lookup vindex", "query": "update multicol_tbl set name = 'bar' where cola = 1", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update multicol_tbl set name = 'bar' where cola = 1", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "ChangedVindexValues": [ - "name_muticoltbl_map:4" - ], - "KsidLength": 2, - "KsidVindex": "multicolIdx", - "OwnedVindexQuery": "select cola, colb, colc, `name`, `name` = 'bar' from multicol_tbl where cola = 1 for update", - "Query": "update multicol_tbl set `name` = 'bar' where cola = 1", - "Table": "multicol_tbl", - "Values": [ - "INT64(1)" - ], - "Vindex": "multicolIdx" - }, - "TablesUsed": [ - "user.multicol_tbl" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update multicol_tbl set name = 'bar' where cola = 1", "Instructions": { @@ -4570,7 +3578,7 @@ { "comment": "update with routing using subsharding column with in query as lower cost over lookup vindex", "query": "update multicol_tbl set x = 1 where name = 'foo' and cola = 2", - "v3-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update multicol_tbl set x = 1 where name = 'foo' and cola = 2", "Instructions": { @@ -4584,45 +3592,23 @@ "Query": "update multicol_tbl set x = 1 where `name` = 'foo' and cola = 2", "Table": "multicol_tbl", "Values": [ - "INT64(2)" + "VARCHAR(\"foo\")" ], - "Vindex": "multicolIdx" + "Vindex": "name_muticoltbl_map" }, "TablesUsed": [ "user.multicol_tbl" ] - }, - "gen4-plan": { - "QueryType": "UPDATE", - "Original": "update multicol_tbl set x = 1 where name = 'foo' and cola = 2", + } + }, + { + "comment": "delete with routing using non-unique lookup vindex", + "query": "delete from multicol_tbl where name = 'foo'", + "plan": { + "QueryType": "DELETE", + "Original": "delete from multicol_tbl where name = 'foo'", "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "Query": "update multicol_tbl set x = 1 where `name` = 'foo' and cola = 2", - "Table": "multicol_tbl", - "Values": [ - "VARCHAR(\"foo\")" - ], - "Vindex": "name_muticoltbl_map" - }, - "TablesUsed": [ - "user.multicol_tbl" - ] - } - }, - { - "comment": "delete with routing using non-unique lookup vindex", - "query": "delete from multicol_tbl where name = 'foo'", - "plan": { - "QueryType": "DELETE", - "Original": "delete from multicol_tbl where name = 'foo'", - "Instructions": { - "OperatorType": "Delete", + "OperatorType": "Delete", "Variant": "Equal", "Keyspace": { "Name": "user", @@ -4647,32 +3633,7 @@ { "comment": "delete with routing using subsharding column", "query": "delete from multicol_tbl where cola = 1", - "v3-plan": { - "QueryType": "DELETE", - "Original": "delete from multicol_tbl where cola = 1", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "KsidLength": 2, - "KsidVindex": "multicolIdx", - "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where cola = 1 for update", - "Query": "delete from multicol_tbl where cola = 1", - "Table": "multicol_tbl", - "Values": [ - "INT64(1)" - ], - "Vindex": "multicolIdx" - }, - "TablesUsed": [ - "user.multicol_tbl" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "delete from multicol_tbl where cola = 1", "Instructions": { @@ -4730,32 +3691,7 @@ { "comment": "delete with routing using subsharding column with in query as lower cost over lookup vindex", "query": "delete from multicol_tbl where name = 'foo' and cola = 2", - "v3-plan": { - "QueryType": "DELETE", - "Original": "delete from multicol_tbl where name = 'foo' and cola = 2", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "KsidLength": 2, - "KsidVindex": "multicolIdx", - "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where `name` = 'foo' and cola = 2 for update", - "Query": "delete from multicol_tbl where `name` = 'foo' and cola = 2", - "Table": "multicol_tbl", - "Values": [ - "INT64(2)" - ], - "Vindex": "multicolIdx" - }, - "TablesUsed": [ - "user.multicol_tbl" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "delete from multicol_tbl where name = 'foo' and cola = 2", "Instructions": { @@ -4784,41 +3720,7 @@ { "comment": "insert using select with simple table.", "query": "insert into music(id, user_id) select * from user", - "v3-plan": { - "QueryType": "INSERT", - "Original": "insert into music(id, user_id) select * from user", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Select", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "TableName": "music", - "VindexOffsetFromSelect": { - "music_user_map": "[0]", - "user_index": "[1]" - }, - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` for update", - "Table": "`user`" - } - ] - }, - "TablesUsed": [ - "user.music" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "INSERT", "Original": "insert into music(id, user_id) select * from user", "Instructions": { @@ -4872,41 +3774,7 @@ { "comment": "insert using select with auto-inc column using vitess sequence, sequence column not present", "query": "insert into user_extra(user_id) select id from user", - "v3-plan": { - "QueryType": "INSERT", - "Original": "insert into user_extra(user_id) select id from user", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Select", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "AutoIncrement": "select next :n /* INT64 */ values from seq:Offset(1)", - "TableName": "user_extra", - "VindexOffsetFromSelect": { - "user_index": "[0]" - }, - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` for update", - "Table": "`user`" - } - ] - }, - "TablesUsed": [ - "user.user_extra" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "INSERT", "Original": "insert into user_extra(user_id) select id from user", "Instructions": { @@ -4945,41 +3813,7 @@ { "comment": "insert using select with auto-inc column using vitess sequence, sequence column present", "query": "insert into user_extra(id, user_id) select null, id from user", - "v3-plan": { - "QueryType": "INSERT", - "Original": "insert into user_extra(id, user_id) select null, id from user", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Select", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "AutoIncrement": "select next :n /* INT64 */ values from seq:Offset(2)", - "TableName": "user_extra", - "VindexOffsetFromSelect": { - "user_index": "[1]" - }, - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select null, id from `user` where 1 != 1", - "Query": "select null, id from `user` for update", - "Table": "`user`" - } - ] - }, - "TablesUsed": [ - "user.user_extra" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "INSERT", "Original": "insert into user_extra(id, user_id) select null, id from user", "Instructions": { @@ -5018,43 +3852,7 @@ { "comment": "sharded insert from select", "query": "insert into user(id) select 1 from dual", - "v3-plan": { - "QueryType": "INSERT", - "Original": "insert into user(id) select 1 from dual", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Select", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "AutoIncrement": "select next :n /* INT64 */ values from seq:Offset(0)", - "TableName": "user", - "VindexOffsetFromSelect": { - "costly_map": "[-1]", - "name_user_map": "[-1]", - "user_index": "[0]" - }, - "Inputs": [ - { - "OperatorType": "Projection", - "Expressions": [ - "INT64(1) as 1" - ], - "Inputs": [ - { - "OperatorType": "SingleRow" - } - ] - } - ] - }, - "TablesUsed": [ - "user.user" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "INSERT", "Original": "insert into user(id) select 1 from dual", "Instructions": { @@ -5095,43 +3893,7 @@ { "comment": "insert using select with sharding column is autoinc and not present in the insert column query", "query": "insert into user(pattern) SELECT 1", - "v3-plan": { - "QueryType": "INSERT", - "Original": "insert into user(pattern) SELECT 1", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Select", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "AutoIncrement": "select next :n /* INT64 */ values from seq:Offset(1)", - "TableName": "user", - "VindexOffsetFromSelect": { - "costly_map": "[-1]", - "name_user_map": "[-1]", - "user_index": "[1]" - }, - "Inputs": [ - { - "OperatorType": "Projection", - "Expressions": [ - "INT64(1) as 1" - ], - "Inputs": [ - { - "OperatorType": "SingleRow" - } - ] - } - ] - }, - "TablesUsed": [ - "user.user" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "INSERT", "Original": "insert into user(pattern) SELECT 1", "Instructions": { @@ -5177,41 +3939,7 @@ { "comment": "sharded same keyspace", "query": "insert into user_extra(user_id, col) select col1, col2 from user", - "v3-plan": { - "QueryType": "INSERT", - "Original": "insert into user_extra(user_id, col) select col1, col2 from user", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Select", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "AutoIncrement": "select next :n /* INT64 */ values from seq:Offset(2)", - "TableName": "user_extra", - "VindexOffsetFromSelect": { - "user_index": "[0]" - }, - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col1, col2 from `user` where 1 != 1", - "Query": "select col1, col2 from `user` for update", - "Table": "`user`" - } - ] - }, - "TablesUsed": [ - "user.user_extra" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "INSERT", "Original": "insert into user_extra(user_id, col) select col1, col2 from user", "Instructions": { @@ -5250,25 +3978,7 @@ { "comment": "unsharded same keyspace", "query": "insert into unsharded(col) select col from unsharded_auto", - "v3-plan": { - "QueryType": "INSERT", - "Original": "insert into unsharded(col) select col from unsharded_auto", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "TargetTabletType": "PRIMARY", - "Query": "insert into unsharded(col) select col from unsharded_auto for update", - "TableName": "unsharded" - }, - "TablesUsed": [ - "main.unsharded" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "INSERT", "Original": "insert into unsharded(col) select col from unsharded_auto", "Instructions": { @@ -5291,41 +4001,7 @@ { "comment": "sharded different keyspace", "query": "insert into user_extra(user_id, col) select col1, col2 from t1", - "v3-plan": { - "QueryType": "INSERT", - "Original": "insert into user_extra(user_id, col) select col1, col2 from t1", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Select", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "AutoIncrement": "select next :n /* INT64 */ values from seq:Offset(2)", - "TableName": "user_extra", - "VindexOffsetFromSelect": { - "user_index": "[0]" - }, - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "zlookup_unique", - "Sharded": true - }, - "FieldQuery": "select col1, col2 from t1 where 1 != 1", - "Query": "select col1, col2 from t1 for update", - "Table": "t1" - } - ] - }, - "TablesUsed": [ - "user.user_extra" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "INSERT", "Original": "insert into user_extra(user_id, col) select col1, col2 from t1", "Instructions": { @@ -5351,54 +4027,20 @@ }, "FieldQuery": "select col1, col2 from t1 where 1 != 1", "Query": "select col1, col2 from t1 lock in share mode", - "Table": "t1" - } - ] - }, - "TablesUsed": [ - "user.user_extra", - "zlookup_unique.t1" - ] - } - }, - { - "comment": "sharded insert table, unsharded select table", - "query": "insert into user_extra(user_id, col) select col1, col2 from unsharded_tab", - "v3-plan": { - "QueryType": "INSERT", - "Original": "insert into user_extra(user_id, col) select col1, col2 from unsharded_tab", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Select", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "AutoIncrement": "select next :n /* INT64 */ values from seq:Offset(2)", - "TableName": "user_extra", - "VindexOffsetFromSelect": { - "user_index": "[0]" - }, - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main_2", - "Sharded": false - }, - "FieldQuery": "select col1, col2 from unsharded_tab where 1 != 1", - "Query": "select col1, col2 from unsharded_tab for update", - "Table": "unsharded_tab" + "Table": "t1" } ] }, "TablesUsed": [ - "user.user_extra" + "user.user_extra", + "zlookup_unique.t1" ] - }, - "gen4-plan": { + } + }, + { + "comment": "sharded insert table, unsharded select table", + "query": "insert into user_extra(user_id, col) select col1, col2 from unsharded_tab", + "plan": { "QueryType": "INSERT", "Original": "insert into user_extra(user_id, col) select col1, col2 from unsharded_tab", "Instructions": { @@ -5437,37 +4079,7 @@ { "comment": "unsharded different keyspace", "query": "insert into unsharded(col) select col from unsharded_tab", - "v3-plan": { - "QueryType": "INSERT", - "Original": "insert into unsharded(col) select col from unsharded_tab", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "TargetTabletType": "PRIMARY", - "TableName": "unsharded", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main_2", - "Sharded": false - }, - "FieldQuery": "select col from unsharded_tab where 1 != 1", - "Query": "select col from unsharded_tab for update", - "Table": "unsharded_tab" - } - ] - }, - "TablesUsed": [ - "main.unsharded" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "INSERT", "Original": "insert into unsharded(col) select col from unsharded_tab", "Instructions": { @@ -5502,37 +4114,7 @@ { "comment": "unsharded insert table, sharded select table", "query": "insert into unsharded(col) select col from t1", - "v3-plan": { - "QueryType": "INSERT", - "Original": "insert into unsharded(col) select col from t1", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "TargetTabletType": "PRIMARY", - "TableName": "unsharded", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "zlookup_unique", - "Sharded": true - }, - "FieldQuery": "select col from t1 where 1 != 1", - "Query": "select col from t1 for update", - "Table": "t1" - } - ] - }, - "TablesUsed": [ - "main.unsharded" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "INSERT", "Original": "insert into unsharded(col) select col from t1", "Instructions": { @@ -5567,18 +4149,18 @@ { "comment": "unsharded subquery in sharded update, not the same keyspace between outer and inner", "query": "update user set col = (select id from unsharded)", - "v3-plan": "VT12001: unsupported: sharded subqueries in DML", - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update user set col = (select id from unsharded)", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutValue", "PulloutVars": [ "__sq1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Unsharded", "Keyspace": { @@ -5590,6 +4172,7 @@ "Table": "unsharded" }, { + "InputName": "Outer", "OperatorType": "Update", "Variant": "Scatter", "Keyspace": { @@ -5611,18 +4194,18 @@ { "comment": "sharded subquery in unsharded update, not the same keyspace", "query": "update unsharded set col = (select id from user)", - "v3-plan": "VT12001: unsupported: sharded subqueries in DML", - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update unsharded set col = (select id from user)", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutValue", "PulloutVars": [ "__sq1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -5634,6 +4217,7 @@ "Table": "`user`" }, { + "InputName": "Outer", "OperatorType": "Update", "Variant": "Unsharded", "Keyspace": { @@ -5655,18 +4239,18 @@ { "comment": "sharded join unsharded subqueries in unsharded update", "query": "update unsharded set col = (select id from unsharded join user on unsharded.id = user.id)", - "v3-plan": "VT12001: unsupported: sharded subqueries in DML", - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update unsharded set col = (select id from unsharded join user on unsharded.id = user.id)", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutValue", "PulloutVars": [ "__sq1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "R:0", @@ -5704,6 +4288,7 @@ ] }, { + "InputName": "Outer", "OperatorType": "Update", "Variant": "Unsharded", "Keyspace": { @@ -5725,8 +4310,7 @@ { "comment": "sharded update with sub query where the sources can be merged into a single query", "query": "update user set col = (select count(*) from user_extra where user_extra.user_id = 5) where id = 5", - "v3-plan": "VT12001: unsupported: sharded subqueries in DML", - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update user set col = (select count(*) from user_extra where user_extra.user_id = 5) where id = 5", "Instructions": { @@ -5753,8 +4337,7 @@ { "comment": "merge through correlated subquery", "query": "update user set col = (select count(*) from user_extra where user_extra.user_id = user.id) where id = 5", - "v3-plan": "VT12001: unsupported: sharded subqueries in DML", - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update user set col = (select count(*) from user_extra where user_extra.user_id = user.id) where id = 5", "Instructions": { @@ -5781,8 +4364,7 @@ { "comment": "merge through correlated subquery #2", "query": "update user set col = (select count(*) from user_extra where user_extra.user_id = user.id) where id > 5", - "v3-plan": "VT12001: unsupported: sharded subqueries in DML", - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update user set col = (select count(*) from user_extra where user_extra.user_id = user.id) where id > 5", "Instructions": { @@ -5895,28 +4477,9 @@ } }, { - "comment": "Here V3 populates the TablesUsed incorrectly\n# delete with join from multi table join subquery.", + "comment": "delete with join from multi table join subquery", "query": "delete foo from unsharded as foo join (select id from unsharded a join unsharded_b b on a.user_id = b.user_id) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col < 1000", - "v3-plan": { - "QueryType": "DELETE", - "Original": "delete foo from unsharded as foo join (select id from unsharded a join unsharded_b b on a.user_id = b.user_id) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col < 1000", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "TargetTabletType": "PRIMARY", - "Query": "delete foo from unsharded as foo join (select id from unsharded as a join unsharded_b as b on a.user_id = b.user_id) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col < 1000", - "Table": "unsharded, unsharded, unsharded_b" - }, - "TablesUsed": [ - "main.unsharded", - "main.unsharded, unsharded_b" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "delete foo from unsharded as foo join (select id from unsharded a join unsharded_b b on a.user_id = b.user_id) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col < 1000", "Instructions": { @@ -5939,25 +4502,7 @@ { "comment": "update with routing using multi column vindex", "query": "update user set col = 1 where (name, col) in (('aa', 'bb'), ('cc', 'dd'))", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update user set col = 1 where (name, col) in (('aa', 'bb'), ('cc', 'dd'))", - "Instructions": { - "OperatorType": "Update", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "Query": "update `user` set col = 1 where (`name`, col) in (('aa', 'bb'), ('cc', 'dd'))", - "Table": "user" - }, - "TablesUsed": [ - "user.user" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update user set col = 1 where (name, col) in (('aa', 'bb'), ('cc', 'dd'))", "Instructions": { @@ -5983,28 +4528,7 @@ { "comment": "delete with routing using multi column vindex", "query": "delete from user where (name, col) in (('aa', 'bb'), ('cc', 'dd'))", - "v3-plan": { - "QueryType": "DELETE", - "Original": "delete from user where (name, col) in (('aa', 'bb'), ('cc', 'dd'))", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "KsidLength": 1, - "KsidVindex": "user_index", - "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where (`name`, col) in (('aa', 'bb'), ('cc', 'dd')) for update", - "Query": "delete from `user` where (`name`, col) in (('aa', 'bb'), ('cc', 'dd'))", - "Table": "user" - }, - "TablesUsed": [ - "user.user" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "delete from user where (name, col) in (('aa', 'bb'), ('cc', 'dd'))", "Instructions": { @@ -6077,25 +4601,7 @@ { "comment": "unsharded update query with comment directive", "query": "update /*vt+ QUERY_TIMEOUT_MS=1 */ unsharded set val = 1", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update /*vt+ QUERY_TIMEOUT_MS=1 */ unsharded set val = 1", - "Instructions": { - "OperatorType": "Update", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "TargetTabletType": "PRIMARY", - "Query": "update /*vt+ QUERY_TIMEOUT_MS=1 */ unsharded set val = 1", - "Table": "unsharded" - }, - "TablesUsed": [ - "main.unsharded" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update /*vt+ QUERY_TIMEOUT_MS=1 */ unsharded set val = 1", "Instructions": { @@ -6141,46 +4647,7 @@ { "comment": "insert with select using same tables, cannot stream parallel", "query": "insert into music(id, user_id) select id, user_id from music where user_id = 1", - "v3-plan": { - "QueryType": "INSERT", - "Original": "insert into music(id, user_id) select id, user_id from music where user_id = 1", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Select", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "InputAsNonStreaming": true, - "TableName": "music", - "VindexOffsetFromSelect": { - "music_user_map": "[0]", - "user_index": "[1]" - }, - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, user_id from music where 1 != 1", - "Query": "select id, user_id from music where user_id = 1 for update", - "Table": "music", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - ] - }, - "TablesUsed": [ - "user.music" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "INSERT", "Original": "insert into music(id, user_id) select id, user_id from music where user_id = 1", "Instructions": { @@ -6277,46 +4744,7 @@ { "comment": "insert + lookup vindex + auto increment on lookup column + select - not provided", "query": "insert into mixed_tbl(shard_key) select foo from user where id = 1", - "v3-plan": { - "QueryType": "INSERT", - "Original": "insert into mixed_tbl(shard_key) select foo from user where id = 1", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Select", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "AutoIncrement": "select next :n /* INT64 */ values from seq:Offset(1)", - "TableName": "mixed_tbl", - "VindexOffsetFromSelect": { - "lkp_shard_map": "[1]", - "shard_index": "[0]" - }, - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select foo from `user` where 1 != 1", - "Query": "select foo from `user` where id = 1 for update", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - ] - }, - "TablesUsed": [ - "user.mixed_tbl" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "INSERT", "Original": "insert into mixed_tbl(shard_key) select foo from user where id = 1", "Instructions": { @@ -6360,46 +4788,7 @@ { "comment": "insert + lookup vindex + auto increment on lookup column + select - provided", "query": "insert into mixed_tbl(shard_key, lkp_key) select foo, bar from user where id = 1", - "v3-plan": { - "QueryType": "INSERT", - "Original": "insert into mixed_tbl(shard_key, lkp_key) select foo, bar from user where id = 1", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Select", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "AutoIncrement": "select next :n /* INT64 */ values from seq:Offset(1)", - "TableName": "mixed_tbl", - "VindexOffsetFromSelect": { - "lkp_shard_map": "[1]", - "shard_index": "[0]" - }, - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select foo, bar from `user` where 1 != 1", - "Query": "select foo, bar from `user` where id = 1 for update", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - ] - }, - "TablesUsed": [ - "user.mixed_tbl" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "INSERT", "Original": "insert into mixed_tbl(shard_key, lkp_key) select foo, bar from user where id = 1", "Instructions": { @@ -6441,16 +4830,16 @@ } }, { - "comment": "insert into a vindex not allowed - gen4 produces different error - forcing a v3 plan here will have same output", - "query": "insert /*vt+ planner=v3insert */ into user_index(id) values(1)", - "plan": "VT12001: unsupported: multi-shard or vindex write statement" + "comment": "insert into a vindex not allowed", + "query": "insert into user_index(id) values(1)", + "plan": "VT09014: vindex cannot be modified" }, { - "comment": "insert with select takes shared lock in gen4 and for update in v3, as forcing v3 plan the output will remain same", - "query": "insert /*vt+ planner=v3insert */ into user(id) select id from user", + "comment": "insert with select takes shared lock", + "query": "insert into user(id) select id from user", "plan": { "QueryType": "INSERT", - "Original": "insert /*vt+ planner=v3insert */ into user(id) select id from user", + "Original": "insert into user(id) select id from user", "Instructions": { "OperatorType": "Insert", "Variant": "Select", @@ -6476,7 +4865,7 @@ "Sharded": true }, "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` for update", + "Query": "select id from `user` lock in share mode", "Table": "`user`" } ] @@ -6486,4 +4875,4 @@ ] } } -] +] \ No newline at end of file diff --git a/go/vt/vtgate/planbuilder/testdata/filter_cases.json b/go/vt/vtgate/planbuilder/testdata/filter_cases.json index 790b1cdccb3..a3753375292 100644 --- a/go/vt/vtgate/planbuilder/testdata/filter_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/filter_cases.json @@ -2,22 +2,7 @@ { "comment": "No where clause", "query": "select id from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user`", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user", "Instructions": { @@ -39,22 +24,7 @@ { "comment": "Query that always return empty", "query": "select id from user where someColumn = null", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where someColumn = null", - "Instructions": { - "OperatorType": "Route", - "Variant": "None", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where someColumn = null", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where someColumn = null", "Instructions": { @@ -76,22 +46,7 @@ { "comment": "Null Safe Equality Operator is handled correctly", "query": "SELECT id from user where someColumn <=> null", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT id from user where someColumn <=> null", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where someColumn <=> null", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT id from user where someColumn <=> null", "Instructions": { @@ -113,26 +68,7 @@ { "comment": "Single table unique vindex route", "query": "select id from user where user.id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where user.id = 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `user`.id = 5", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where user.id = 5", "Instructions": { @@ -158,22 +94,7 @@ { "comment": "Single table unique vindex route, but complex expr", "query": "select id from user where user.id = 5+5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where user.id = 5+5", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `user`.id = 5 + 5", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where user.id = 5+5", "Instructions": { @@ -199,26 +120,7 @@ { "comment": "Single table multiple unique vindex match", "query": "select id from music where id = 5 and user_id = 4", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from music where id = 5 and user_id = 4", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from music where 1 != 1", - "Query": "select id from music where id = 5 and user_id = 4", - "Table": "music", - "Values": [ - "INT64(4)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from music where id = 5 and user_id = 4", "Instructions": { @@ -244,26 +146,7 @@ { "comment": "Single table multiple non-unique vindex match", "query": "select id from user where costly = 'aa' and name = 'bb'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where costly = 'aa' and name = 'bb'", - "Instructions": { - "OperatorType": "Route", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where costly = 'aa' and `name` = 'bb'", - "Table": "`user`", - "Values": [ - "VARCHAR(\"bb\")" - ], - "Vindex": "name_user_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where costly = 'aa' and name = 'bb'", "Instructions": { @@ -314,26 +197,7 @@ { "comment": "Single table multiple non-unique vindex match for IN clause", "query": "select id from user where costly in ('aa', 'bb') and name in ('aa', 'bb')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where costly in ('aa', 'bb') and name in ('aa', 'bb')", - "Instructions": { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where costly in ('aa', 'bb') and `name` in ::__vals", - "Table": "`user`", - "Values": [ - "(VARCHAR(\"aa\"), VARCHAR(\"bb\"))" - ], - "Vindex": "name_user_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where costly in ('aa', 'bb') and name in ('aa', 'bb')", "Instructions": { @@ -384,26 +248,7 @@ { "comment": "Composite IN clause", "query": "select id from user where (name, col) in (('aa', 'bb'), ('cc', 'dd'))", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where (name, col) in (('aa', 'bb'), ('cc', 'dd'))", - "Instructions": { - "OperatorType": "Route", - "Variant": "MultiEqual", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where (`name`, col) in (('aa', 'bb'), ('cc', 'dd'))", - "Table": "`user`", - "Values": [ - "(VARCHAR(\"aa\"), VARCHAR(\"cc\"))" - ], - "Vindex": "name_user_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where (name, col) in (('aa', 'bb'), ('cc', 'dd'))", "Instructions": { @@ -454,26 +299,7 @@ { "comment": "Composite IN clause, swapped columns", "query": "select id from user where (col, name) in (('aa', 'bb'), ('cc', 'dd'))", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where (col, name) in (('aa', 'bb'), ('cc', 'dd'))", - "Instructions": { - "OperatorType": "Route", - "Variant": "MultiEqual", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where (col, `name`) in (('aa', 'bb'), ('cc', 'dd'))", - "Table": "`user`", - "Values": [ - "(VARCHAR(\"bb\"), VARCHAR(\"dd\"))" - ], - "Vindex": "name_user_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where (col, name) in (('aa', 'bb'), ('cc', 'dd'))", "Instructions": { @@ -524,26 +350,7 @@ { "comment": "Composite IN clause, choose cost within tuple", "query": "select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))", - "Instructions": { - "OperatorType": "Route", - "Variant": "MultiEqual", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where (costly, `name`) in (('aa', 'bb'), ('cc', 'dd'))", - "Table": "`user`", - "Values": [ - "(VARCHAR(\"bb\"), VARCHAR(\"dd\"))" - ], - "Vindex": "name_user_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))", "Instructions": { @@ -594,26 +401,7 @@ { "comment": "Composite IN clause, choose cost within tuple, swapped", "query": "select id from user where (name, costly) in (('aa', 'bb'), ('cc', 'dd'))", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where (name, costly) in (('aa', 'bb'), ('cc', 'dd'))", - "Instructions": { - "OperatorType": "Route", - "Variant": "MultiEqual", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where (`name`, costly) in (('aa', 'bb'), ('cc', 'dd'))", - "Table": "`user`", - "Values": [ - "(VARCHAR(\"aa\"), VARCHAR(\"cc\"))" - ], - "Vindex": "name_user_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where (name, costly) in (('aa', 'bb'), ('cc', 'dd'))", "Instructions": { @@ -664,26 +452,7 @@ { "comment": "Composite IN clause, choose cost", "query": "select id from user where (col, costly) in (('aa', 'bb')) and (col, name) in (('cc', 'dd'))", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where (col, costly) in (('aa', 'bb')) and (col, name) in (('cc', 'dd'))", - "Instructions": { - "OperatorType": "Route", - "Variant": "MultiEqual", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where (col, costly) in (('aa', 'bb')) and (col, `name`) in (('cc', 'dd'))", - "Table": "`user`", - "Values": [ - "(VARCHAR(\"dd\"))" - ], - "Vindex": "name_user_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where (col, costly) in (('aa', 'bb')) and (col, name) in (('cc', 'dd'))", "Instructions": { @@ -734,26 +503,7 @@ { "comment": "Composite IN clause vs equality", "query": "select id from user where (col, name) in (('aa', 'bb')) and id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where (col, name) in (('aa', 'bb')) and id = 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where (col, `name`) in (('aa', 'bb')) and id = 5", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where (col, name) in (('aa', 'bb')) and id = 5", "Instructions": { @@ -779,26 +529,7 @@ { "comment": "Composite IN: multiple vindex matches", "query": "select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))", - "Instructions": { - "OperatorType": "Route", - "Variant": "MultiEqual", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where (costly, `name`) in (('aa', 'bb'), ('cc', 'dd'))", - "Table": "`user`", - "Values": [ - "(VARCHAR(\"bb\"), VARCHAR(\"dd\"))" - ], - "Vindex": "name_user_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))", "Instructions": { @@ -849,26 +580,7 @@ { "comment": "Composite IN: tuple inside tuple", "query": "select id from user where ((col1, name), col2) in ((('aa', 'bb'), 'cc'), (('dd', 'ee'), 'ff'))", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where ((col1, name), col2) in ((('aa', 'bb'), 'cc'), (('dd', 'ee'), 'ff'))", - "Instructions": { - "OperatorType": "Route", - "Variant": "MultiEqual", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where ((col1, `name`), col2) in ((('aa', 'bb'), 'cc'), (('dd', 'ee'), 'ff'))", - "Table": "`user`", - "Values": [ - "(VARCHAR(\"bb\"), VARCHAR(\"ee\"))" - ], - "Vindex": "name_user_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where ((col1, name), col2) in ((('aa', 'bb'), 'cc'), (('dd', 'ee'), 'ff'))", "Instructions": { @@ -919,26 +631,7 @@ { "comment": "Composite IN: tuple inside tuple, but no match in tuple", "query": "select id from user where (name, (col1, col2)) in (('aa', ('bb', 'cc')), ('dd', ('ee', 'ff')))", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where (name, (col1, col2)) in (('aa', ('bb', 'cc')), ('dd', ('ee', 'ff')))", - "Instructions": { - "OperatorType": "Route", - "Variant": "MultiEqual", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where (`name`, (col1, col2)) in (('aa', ('bb', 'cc')), ('dd', ('ee', 'ff')))", - "Table": "`user`", - "Values": [ - "(VARCHAR(\"aa\"), VARCHAR(\"dd\"))" - ], - "Vindex": "name_user_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where (name, (col1, col2)) in (('aa', ('bb', 'cc')), ('dd', ('ee', 'ff')))", "Instructions": { @@ -989,22 +682,7 @@ { "comment": "Composite IN: tuple inside tuple, mismiatched values", "query": "select id from user where ((col1, name), col2) in (('aa', 'bb', 'cc'), (('dd', 'ee'), 'ff'))", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where ((col1, name), col2) in (('aa', 'bb', 'cc'), (('dd', 'ee'), 'ff'))", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where ((col1, `name`), col2) in (('aa', 'bb', 'cc'), (('dd', 'ee'), 'ff'))", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where ((col1, name), col2) in (('aa', 'bb', 'cc'), (('dd', 'ee'), 'ff'))", "Instructions": { @@ -1026,22 +704,7 @@ { "comment": "Composite IN: RHS not tuple", "query": "select id from user where (col1, name) in (select * from music where music.user_id=user.id)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where (col1, name) in (select * from music where music.user_id=user.id)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where (col1, `name`) in (select * from music where music.user_id = `user`.id)", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where (col1, name) in (select * from music where music.user_id=user.id)", "Instructions": { @@ -1064,22 +727,7 @@ { "comment": "Composite IN: RHS has no simple values", "query": "select id from user where (col1, name) in (('aa', 1+1))", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where (col1, name) in (('aa', 1+1))", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where (col1, `name`) in (('aa', 1 + 1))", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where (col1, name) in (('aa', 1+1))", "Instructions": { @@ -1130,22 +778,7 @@ { "comment": "IN clause: LHS is neither column nor composite tuple", "query": "select Id from user where 1 in ('aa', 'bb')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select Id from user where 1 in ('aa', 'bb')", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select Id from `user` where 1 != 1", - "Query": "select Id from `user` where 1 in ('aa', 'bb')", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select Id from user where 1 in ('aa', 'bb')", "Instructions": { @@ -1167,22 +800,7 @@ { "comment": "Single table complex in clause", "query": "select id from user where name in (col, 'bb')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where name in (col, 'bb')", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `name` in (col, 'bb')", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where name in (col, 'bb')", "Instructions": { @@ -1204,26 +822,7 @@ { "comment": "Single table equality route with val arg", "query": "select id from user where name = :a", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where name = :a", - "Instructions": { - "OperatorType": "Route", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `name` = :a", - "Table": "`user`", - "Values": [ - ":a" - ], - "Vindex": "name_user_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where name = :a", "Instructions": { @@ -1274,26 +873,7 @@ { "comment": "Single table equality route with unsigned value", "query": "select id from user where name = 18446744073709551615", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where name = 18446744073709551615", - "Instructions": { - "OperatorType": "Route", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `name` = 18446744073709551615", - "Table": "`user`", - "Values": [ - "UINT64(18446744073709551615)" - ], - "Vindex": "name_user_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where name = 18446744073709551615", "Instructions": { @@ -1344,26 +924,7 @@ { "comment": "Single table in clause list arg", "query": "select id from user where name in ::list", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where name in ::list", - "Instructions": { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `name` in ::__vals", - "Table": "`user`", - "Values": [ - "::list" - ], - "Vindex": "name_user_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where name in ::list", "Instructions": { @@ -1414,26 +975,7 @@ { "comment": "Multi-table unique vindex constraint", "query": "select user_extra.id from user join user_extra on user.id = user_extra.user_id where user.id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user_extra.id from user join user_extra on user.id = user_extra.user_id where user.id = 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.id from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1", - "Query": "select user_extra.id from `user` join user_extra on `user`.id = user_extra.user_id where `user`.id = 5", - "Table": "`user`, user_extra", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user_extra.id from user join user_extra on user.id = user_extra.user_id where user.id = 5", "Instructions": { @@ -1460,26 +1002,7 @@ { "comment": "Multi-table unique vindex constraint on right table", "query": "select user_extra.id from user join user_extra on user.id = user_extra.user_id where user_extra.user_id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user_extra.id from user join user_extra on user.id = user_extra.user_id where user_extra.user_id = 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.id from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1", - "Query": "select user_extra.id from `user` join user_extra on `user`.id = user_extra.user_id where user_extra.user_id = 5", - "Table": "`user`, user_extra", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user_extra.id from user join user_extra on user.id = user_extra.user_id where user_extra.user_id = 5", "Instructions": { @@ -1560,48 +1083,7 @@ { "comment": "Multi-route unique vindex constraint", "query": "select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "JoinVars": { - "user_col": 0 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user` where `user`.id = 5", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.id from user_extra where 1 != 1", - "Query": "select user_extra.id from user_extra where user_extra.col = :user_col", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5", "Instructions": { @@ -1650,52 +1132,7 @@ { "comment": "Multi-route unique vindex route on both routes", "query": "select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5 and user_extra.user_id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5 and user_extra.user_id = 5", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "JoinVars": { - "user_col": 0 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user` where `user`.id = 5", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.id from user_extra where 1 != 1", - "Query": "select user_extra.id from user_extra where user_extra.col = :user_col and user_extra.user_id = 5", - "Table": "user_extra", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5 and user_extra.user_id = 5", "Instructions": { @@ -1722,7 +1159,7 @@ { "comment": "Multi-route with cross-route constraint", "query": "select user_extra.id from user join user_extra on user.col = user_extra.col where user_extra.user_id = user.col", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user_extra.user_id = user.col", "Instructions": { @@ -1753,48 +1190,7 @@ "Sharded": true }, "FieldQuery": "select user_extra.id from user_extra where 1 != 1", - "Query": "select user_extra.id from user_extra where user_extra.col = :user_col and user_extra.user_id = :user_col", - "Table": "user_extra", - "Values": [ - ":user_col" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user_extra.user_id = user.col", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "JoinVars": { - "user_col": 0 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.id from user_extra where 1 != 1", - "Query": "select user_extra.id from user_extra where user_extra.user_id = :user_col and user_extra.col = :user_col", + "Query": "select user_extra.id from user_extra where user_extra.user_id = :user_col and user_extra.col = :user_col", "Table": "user_extra", "Values": [ ":user_col" @@ -1812,44 +1208,7 @@ { "comment": "Multi-route with non-route constraint, should use first route.", "query": "select user_extra.id from user join user_extra on user.col = user_extra.col where 1 = 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where 1 = 1", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "JoinVars": { - "user_col": 0 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user` where 1 = 1", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.id from user_extra where 1 != 1", - "Query": "select user_extra.id from user_extra where user_extra.col = :user_col", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where 1 = 1", "Instructions": { @@ -1894,26 +1253,7 @@ { "comment": "Route with multiple route constraints, SelectIN is the best constraint.", "query": "select id from user where user.col = 5 and user.id in (1, 2)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where user.col = 5 and user.id in (1, 2)", - "Instructions": { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `user`.col = 5 and `user`.id in ::__vals", - "Table": "`user`", - "Values": [ - "(INT64(1), INT64(2))" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where user.col = 5 and user.id in (1, 2)", "Instructions": { @@ -1939,26 +1279,7 @@ { "comment": "Route with multiple route constraints and boolean, SelectIN is the best constraint.", "query": "select id from user where user.col = case user.col when 'foo' then true else false end and user.id in (1, 2)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where user.col = case user.col when 'foo' then true else false end and user.id in (1, 2)", - "Instructions": { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `user`.col = case `user`.col when 'foo' then true else false end and `user`.id in ::__vals", - "Table": "`user`", - "Values": [ - "(INT64(1), INT64(2))" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where user.col = case user.col when 'foo' then true else false end and user.id in (1, 2)", "Instructions": { @@ -1984,26 +1305,7 @@ { "comment": "Route with multiple route constraints and boolean, SelectEqual is the best constraint.", "query": "select (id or col) as val from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select (id or col) as val from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa'", - "Instructions": { - "OperatorType": "Route", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id or col as val from `user` where 1 != 1", - "Query": "select id or col as val from `user` where `user`.col = 5 and `user`.id in (1, 2) and `user`.`name` = 'aa'", - "Table": "`user`", - "Values": [ - "VARCHAR(\"aa\")" - ], - "Vindex": "name_user_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select (id or col) as val from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa'", "Instructions": { @@ -2054,26 +1356,7 @@ { "comment": "Route with multiple route constraints, SelectEqual is the best constraint.", "query": "select id from user where user.col = false and user.id in (1, 2) and user.name = 'aa'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where user.col = false and user.id in (1, 2) and user.name = 'aa'", - "Instructions": { - "OperatorType": "Route", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `user`.col = false and `user`.id in (1, 2) and `user`.`name` = 'aa'", - "Table": "`user`", - "Values": [ - "VARCHAR(\"aa\")" - ], - "Vindex": "name_user_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where user.col = false and user.id in (1, 2) and user.name = 'aa'", "Instructions": { @@ -2124,26 +1407,7 @@ { "comment": "Route with multiple route constraints, SelectEqualUnique is the best constraint.", "query": "select id from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa' and user.id = 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa' and user.id = 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `user`.col = 5 and `user`.id in (1, 2) and `user`.`name` = 'aa' and `user`.id = 1", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa' and user.id = 1", "Instructions": { @@ -2169,26 +1433,7 @@ { "comment": "Route with multiple route constraints, SelectEqualUnique is the best constraint, order reversed.", "query": "select id from user where user.id = 1 and user.name = 'aa' and user.id in (1, 2) and user.col = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where user.id = 1 and user.name = 'aa' and user.id in (1, 2) and user.col = 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `user`.id = 1 and `user`.`name` = 'aa' and `user`.id in (1, 2) and `user`.col = 5", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where user.id = 1 and user.name = 'aa' and user.id in (1, 2) and user.col = 5", "Instructions": { @@ -2214,22 +1459,7 @@ { "comment": "Route with OR and AND clause, must parenthesize correctly.", "query": "select id from user where user.id = 1 or user.name = 'aa' and user.id in (1, 2)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where user.id = 1 or user.name = 'aa' and user.id in (1, 2)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `user`.id = 1 or `user`.`name` = 'aa' and `user`.id in (1, 2)", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where user.id = 1 or user.name = 'aa' and user.id in (1, 2)", "Instructions": { @@ -2255,44 +1485,7 @@ { "comment": "Unsharded route", "query": "select unsharded.id from user join unsharded where unsharded.id = user.id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select unsharded.id from user join unsharded where unsharded.id = user.id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "JoinVars": { - "user_id": 0 - }, - "TableName": "`user`_unsharded", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id from `user` where 1 != 1", - "Query": "select `user`.id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded.id from unsharded where 1 != 1", - "Query": "select unsharded.id from unsharded where unsharded.id = :user_id", - "Table": "unsharded" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select unsharded.id from user join unsharded where unsharded.id = user.id", "Instructions": { @@ -2341,26 +1534,7 @@ { "comment": "routing rules: choose the redirected table", "query": "select col from route1 where id = 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from route1 where id = 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` as route1 where 1 != 1", - "Query": "select col from `user` as route1 where id = 1", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from route1 where id = 1", "Instructions": { @@ -2386,7 +1560,7 @@ { "comment": "subquery", "query": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id and user_extra.col = user.col) and u.id in (user_extra.col, 1)", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id and user_extra.col = user.col) and u.id in (user_extra.col, 1)", "Instructions": { @@ -2425,11 +1599,19 @@ "Vindex": "user_index" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "correlated subquery merge-able into a route of a join tree", + "query": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id) and u.id in (user_extra.col, 1)", + "plan": { "QueryType": "SELECT", - "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id and user_extra.col = user.col) and u.id in (user_extra.col, 1)", + "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id) and u.id in (user_extra.col, 1)", "Instructions": { "OperatorType": "Join", "Variant": "Join", @@ -2458,7 +1640,7 @@ "Sharded": true }, "FieldQuery": "select u.m from `user` as u where 1 != 1", - "Query": "select u.m from `user` as u where u.id in (select m2 from `user` where `user`.id = u.id and `user`.col = :user_extra_col /* INT16 */) and u.id in ::__vals", + "Query": "select u.m from `user` as u where u.id in ::__vals and u.id in (select m2 from `user` where `user`.id = u.id)", "Table": "`user`", "Values": [ "(:user_extra_col, INT64(1))" @@ -2474,101 +1656,11 @@ } }, { - "comment": "correlated subquery merge-able into a route of a join tree", - "query": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id) and u.id in (user_extra.col, 1)", - "v3-plan": { + "comment": "ensure subquery reordering gets us a better plan", + "query": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = 5) and u.id = 5", + "plan": { "QueryType": "SELECT", - "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id) and u.id in (user_extra.col, 1)", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "JoinVars": { - "user_extra_col": 0 - }, - "TableName": "user_extra_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.col from user_extra where 1 != 1", - "Query": "select user_extra.col from user_extra", - "Table": "user_extra" - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.m from `user` as u where 1 != 1", - "Query": "select u.m from `user` as u where u.id in ::__vals and u.id in (select m2 from `user` where `user`.id = u.id)", - "Table": "`user`", - "Values": [ - "(:user_extra_col, INT64(1))" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id) and u.id in (user_extra.col, 1)", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "JoinVars": { - "user_extra_col": 0 - }, - "TableName": "user_extra_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.col from user_extra where 1 != 1", - "Query": "select user_extra.col from user_extra", - "Table": "user_extra" - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.m from `user` as u where 1 != 1", - "Query": "select u.m from `user` as u where u.id in (select m2 from `user` where `user`.id = u.id) and u.id in ::__vals", - "Table": "`user`", - "Values": [ - "(:user_extra_col, INT64(1))" - ], - "Vindex": "user_index" - } - ] - }, - "TablesUsed": [ - "user.user", - "user.user_extra" - ] - } - }, - { - "comment": "ensure subquery reordering gets us a better plan", - "query": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = 5) and u.id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = 5) and u.id = 5", + "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = 5) and u.id = 5", "Instructions": { "OperatorType": "Join", "Variant": "Join", @@ -2602,44 +1694,6 @@ "Vindex": "user_index" } ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = 5) and u.id = 5", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "TableName": "user_extra_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.m from `user` as u where 1 != 1", - "Query": "select u.m from `user` as u where u.id in (select m2 from `user` where `user`.id = 5) and u.id = 5", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - ] }, "TablesUsed": [ "user.user", @@ -2650,7 +1704,7 @@ { "comment": "nested subquery", "query": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id and user_extra.col = user.col and user.id in (select m3 from user_extra where user_extra.user_id = user.id)) and u.id in (user_extra.col, 1)", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id and user_extra.col = user.col and user.id in (select m3 from user_extra where user_extra.user_id = user.id)) and u.id in (user_extra.col, 1)", "Instructions": { @@ -2689,47 +1743,6 @@ "Vindex": "user_index" } ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id and user_extra.col = user.col and user.id in (select m3 from user_extra where user_extra.user_id = user.id)) and u.id in (user_extra.col, 1)", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "JoinVars": { - "user_extra_col": 0 - }, - "TableName": "user_extra_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.col from user_extra where 1 != 1", - "Query": "select user_extra.col from user_extra", - "Table": "user_extra" - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.m from `user` as u where 1 != 1", - "Query": "select u.m from `user` as u where u.id in (select m2 from `user` where `user`.id = u.id and `user`.col = :user_extra_col /* INT16 */ and `user`.id in (select m3 from user_extra where user_extra.user_id = `user`.id)) and u.id in ::__vals", - "Table": "`user`", - "Values": [ - "(:user_extra_col, INT64(1))" - ], - "Vindex": "user_index" - } - ] }, "TablesUsed": [ "user.user", @@ -2740,22 +1753,7 @@ { "comment": "Correlated subquery in where clause", "query": "select id from user where user.col in (select user_extra.col from user_extra where user_extra.user_id = user.id)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where user.col in (select user_extra.col from user_extra where user_extra.user_id = user.id)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `user`.col in (select user_extra.col from user_extra where user_extra.user_id = `user`.id)", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where user.col in (select user_extra.col from user_extra where user_extra.user_id = user.id)", "Instructions": { @@ -2778,26 +1776,7 @@ { "comment": "outer and inner subquery route by same int val", "query": "select id from user where id = 5 and user.col in (select user_extra.col from user_extra where user_extra.user_id = 5)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where id = 5 and user.col in (select user_extra.col from user_extra where user_extra.user_id = 5)", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where id = 5 and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = 5)", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where id = 5 and user.col in (select user_extra.col from user_extra where user_extra.user_id = 5)", "Instructions": { @@ -2824,26 +1803,7 @@ { "comment": "outer and inner subquery route by same str val", "query": "select id from user where id = 'aa' and user.col in (select user_extra.col from user_extra where user_extra.user_id = 'aa')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where id = 'aa' and user.col in (select user_extra.col from user_extra where user_extra.user_id = 'aa')", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where id = 'aa' and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = 'aa')", - "Table": "`user`", - "Values": [ - "VARCHAR(\"aa\")" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where id = 'aa' and user.col in (select user_extra.col from user_extra where user_extra.user_id = 'aa')", "Instructions": { @@ -2870,26 +1830,7 @@ { "comment": "outer and inner subquery route by same val arg", "query": "select id from user where id = :a and user.col in (select user_extra.col from user_extra where user_extra.user_id = :a)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where id = :a and user.col in (select user_extra.col from user_extra where user_extra.user_id = :a)", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where id = :a and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = :a)", - "Table": "`user`", - "Values": [ - ":a" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where id = :a and user.col in (select user_extra.col from user_extra where user_extra.user_id = :a)", "Instructions": { @@ -2916,28 +1857,12 @@ { "comment": "unresolved symbol in inner subquery.", "query": "select id from user where id = :a and user.col in (select user_extra.col from user_extra where user_extra.user_id = :a and foo.id = 1)", - "v3-plan": "VT03019: column foo.id not found", - "gen4-plan": "column 'foo.id' not found" + "plan": "column 'foo.id' not found" }, { "comment": "outer and inner subquery route by same outermost column value", "query": "select id2 from user uu where id in (select id from user where id = uu.id and user.col in (select user_extra.col from user_extra where user_extra.user_id = uu.id))", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id2 from user uu where id in (select id from user where id = uu.id and user.col in (select user_extra.col from user_extra where user_extra.user_id = uu.id))", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id2 from `user` as uu where 1 != 1", - "Query": "select id2 from `user` as uu where id in (select id from `user` where id = uu.id and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = uu.id))", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id2 from user uu where id in (select id from user where id = uu.id and user.col in (select user_extra.col from user_extra where user_extra.user_id = uu.id))", "Instructions": { @@ -2960,18 +1885,19 @@ { "comment": "cross-shard subquery in IN clause.\n# Note the improved Underlying plan as SelectIN.", "query": "select id from user where id in (select col from user)", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where id in (select col from user)", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutIn", "PulloutVars": [ - "__sq_has_values1", + "__sq_has_values", "__sq1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -2983,6 +1909,7 @@ "Table": "`user`" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "IN", "Keyspace": { @@ -2990,7 +1917,7 @@ "Sharded": true }, "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals", + "Query": "select id from `user` where :__sq_has_values and id in ::__vals", "Table": "`user`", "Values": [ "::__sq1" @@ -2998,67 +1925,28 @@ "Vindex": "user_index" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "cross-shard subquery in NOT IN clause.", + "query": "select id from user where id not in (select col from user)", + "plan": { "QueryType": "SELECT", - "Original": "select id from user where id in (select col from user)", + "Original": "select id from user where id not in (select col from user)", "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals", - "Table": "`user`", - "Values": [ - "::__sq1" - ], - "Vindex": "user_index" - } - ] - }, - "TablesUsed": [ - "user.user" - ] - } - }, - { - "comment": "cross-shard subquery in NOT IN clause.", - "query": "select id from user where id not in (select col from user)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where id not in (select col from user)", - "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutNotIn", "PulloutVars": [ - "__sq_has_values1", + "__sq_has_values", "__sq1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -3070,6 +1958,7 @@ "Table": "`user`" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -3077,43 +1966,7 @@ "Sharded": true }, "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where :__sq_has_values1 = 0 or id not in ::__sq1", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select id from user where id not in (select col from user)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutNotIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where :__sq_has_values1 = 0 or id not in ::__sq1", + "Query": "select id from `user` where not :__sq_has_values and id not in ::__sq1", "Table": "`user`" } ] @@ -3126,76 +1979,30 @@ { "comment": "cross-shard subquery in EXISTS clause.", "query": "select id from user where exists (select col from user)", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where exists (select col from user)", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutExists", "PulloutVars": [ - "__sq_has_values1", - "__sq1" + "__sq_has_values" ], "Inputs": [ { - "OperatorType": "Limit", - "Count": "INT64(1)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user` limit :__upper_limit", - "Table": "`user`" - } - ] - }, - { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where :__sq_has_values1", + "FieldQuery": "select 1 from `user` where 1 != 1", + "Query": "select 1 from `user`", "Table": "`user`" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select id from user where exists (select col from user)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutExists", - "PulloutVars": [ - "__sq_has_values1" - ], - "Inputs": [ - { - "OperatorType": "Limit", - "Count": "INT64(1)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user` limit :__upper_limit", - "Table": "`user`" - } - ] }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -3203,7 +2010,7 @@ "Sharded": true }, "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where :__sq_has_values1", + "Query": "select id from `user` where :__sq_has_values", "Table": "`user`" } ] @@ -3216,58 +2023,18 @@ { "comment": "cross-shard subquery as expression", "query": "select id from user where id = (select col from user)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where id = (select col from user)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutValue", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where id = :__sq1", - "Table": "`user`", - "Values": [ - ":__sq1" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where id = (select col from user)", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutValue", "PulloutVars": [ - "__sq_has_values1", "__sq1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -3279,6 +2046,7 @@ "Table": "`user`" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "EqualUnique", "Keyspace": { @@ -3303,87 +2071,27 @@ { "comment": "multi-level pullout", "query": "select id1 from user where id = (select id2 from user where id2 in (select id3 from user))", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id1 from user where id = (select id2 from user where id2 in (select id3 from user))", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutValue", - "PulloutVars": [ - "__sq_has_values2", - "__sq2" - ], - "Inputs": [ - { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id3 from `user` where 1 != 1", - "Query": "select id3 from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id2 from `user` where 1 != 1", - "Query": "select id2 from `user` where :__sq_has_values1 = 1 and id2 in ::__sq1", - "Table": "`user`" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id1 from `user` where 1 != 1", - "Query": "select id1 from `user` where id = :__sq2", - "Table": "`user`", - "Values": [ - ":__sq2" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id1 from user where id = (select id2 from user where id2 in (select id3 from user))", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutValue", "PulloutVars": [ - "__sq_has_values1", "__sq1" ], "Inputs": [ { - "OperatorType": "Subquery", + "InputName": "SubQuery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutIn", "PulloutVars": [ - "__sq_has_values2", + "__sq_has_values", "__sq2" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -3395,6 +2103,7 @@ "Table": "`user`" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -3402,104 +2111,21 @@ "Sharded": true }, "FieldQuery": "select id2 from `user` where 1 != 1", - "Query": "select id2 from `user` where :__sq_has_values2 = 1 and id2 in ::__sq2", + "Query": "select id2 from `user` where :__sq_has_values and id2 in ::__sq2", "Table": "`user`" } ] }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select id1 from `user` where 1 != 1", - "Query": "select id1 from `user` where id = :__sq1", - "Table": "`user`", - "Values": [ - ":__sq1" - ], - "Vindex": "user_index" - } - ] - }, - "TablesUsed": [ - "user.user" - ] - } - }, - { - "comment": "routing rules subquery merge", - "query": "select col from user where id = (select id from route1 where route1.id = user.id)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user where id = (select id from route1 where route1.id = user.id)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` where id = (select id from `user` as route1 where route1.id = `user`.id)", - "Table": "`user`" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select col from user where id = (select id from route1 where route1.id = user.id)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` where id = (select id from `user` as route1 where route1.id = `user`.id)", - "Table": "`user`" - }, - "TablesUsed": [ - "user.user" - ] - } - }, - { - "comment": "routing rules subquery pullout", - "query": "select col from user where id = (select id from route2)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user where id = (select id from route2)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutValue", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select id from unsharded as route2 where 1 != 1", - "Query": "select id from unsharded as route2", - "Table": "unsharded" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` where id = :__sq1", + "FieldQuery": "select id1 from `user` where 1 != 1", + "Query": "select id1 from `user` where id = :__sq1", "Table": "`user`", "Values": [ ":__sq1" @@ -3507,20 +2133,49 @@ "Vindex": "user_index" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "routing rules subquery merge", + "query": "select col from user where id = (select id from route1 where route1.id = user.id)", + "plan": { + "QueryType": "SELECT", + "Original": "select col from user where id = (select id from route1 where route1.id = user.id)", + "Instructions": { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select col from `user` where 1 != 1", + "Query": "select col from `user` where id = (select id from `user` as route1 where route1.id = `user`.id)", + "Table": "`user`" + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "routing rules subquery pullout", + "query": "select col from user where id = (select id from route2)", + "plan": { "QueryType": "SELECT", "Original": "select col from user where id = (select id from route2)", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutValue", "PulloutVars": [ - "__sq_has_values1", "__sq1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Unsharded", "Keyspace": { @@ -3532,6 +2187,7 @@ "Table": "unsharded" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "EqualUnique", "Keyspace": { @@ -3557,26 +2213,7 @@ { "comment": "Case preservation test", "query": "select user_extra.Id from user join user_extra on user.iD = user_extra.User_Id where user.Id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user_extra.Id from user join user_extra on user.iD = user_extra.User_Id where user.Id = 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.Id from `user` join user_extra on `user`.iD = user_extra.User_Id where 1 != 1", - "Query": "select user_extra.Id from `user` join user_extra on `user`.iD = user_extra.User_Id where `user`.Id = 5", - "Table": "`user`, user_extra", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user_extra.Id from user join user_extra on user.iD = user_extra.User_Id where user.Id = 5", "Instructions": { @@ -3603,22 +2240,7 @@ { "comment": "database() call in where clause.", "query": "select id from user where database()", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where database()", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where database()", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where database()", "Instructions": { @@ -3640,22 +2262,7 @@ { "comment": "Select with equals null", "query": "select id from music where id = null", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from music where id = null", - "Instructions": { - "OperatorType": "Route", - "Variant": "None", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from music where 1 != 1", - "Query": "select id from music where id = null", - "Table": "music" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from music where id = null", "Instructions": { @@ -3677,22 +2284,7 @@ { "comment": "SELECT with IS NULL", "query": "select id from music where id is null", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from music where id is null", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from music where 1 != 1", - "Query": "select id from music where id is null", - "Table": "music" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from music where id is null", "Instructions": { @@ -3714,22 +2306,7 @@ { "comment": "SELECT with IS NOT NULL", "query": "select id from music where id is not null", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from music where id is not null", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from music where 1 != 1", - "Query": "select id from music where id is not null", - "Table": "music" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from music where id is not null", "Instructions": { @@ -3751,22 +2328,7 @@ { "comment": "Single table with unique vindex match and null match", "query": "select id from music where user_id = 4 and id = null", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from music where user_id = 4 and id = null", - "Instructions": { - "OperatorType": "Route", - "Variant": "None", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from music where 1 != 1", - "Query": "select id from music where user_id = 4 and id = null", - "Table": "music" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from music where user_id = 4 and id = null", "Instructions": { @@ -3788,22 +2350,7 @@ { "comment": "Single table with unique vindex match and IN (null)", "query": "select id from music where user_id = 4 and id IN (null)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from music where user_id = 4 and id IN (null)", - "Instructions": { - "OperatorType": "Route", - "Variant": "None", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from music where 1 != 1", - "Query": "select id from music where user_id = 4 and id in (null)", - "Table": "music" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from music where user_id = 4 and id IN (null)", "Instructions": { @@ -3825,26 +2372,7 @@ { "comment": "Single table with unique vindex match and IN (null, 1, 2)", "query": "select id from music where user_id = 4 and id IN (null, 1, 2)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from music where user_id = 4 and id IN (null, 1, 2)", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from music where 1 != 1", - "Query": "select id from music where user_id = 4 and id in (null, 1, 2)", - "Table": "music", - "Values": [ - "INT64(4)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from music where user_id = 4 and id IN (null, 1, 2)", "Instructions": { @@ -3870,22 +2398,7 @@ { "comment": "Single table with unique vindex match and NOT IN (null, 1, 2)", "query": "select id from music where user_id = 4 and id NOT IN (null, 1, 2)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from music where user_id = 4 and id NOT IN (null, 1, 2)", - "Instructions": { - "OperatorType": "Route", - "Variant": "None", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from music where 1 != 1", - "Query": "select id from music where user_id = 4 and id not in (null, 1, 2)", - "Table": "music" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from music where user_id = 4 and id NOT IN (null, 1, 2)", "Instructions": { @@ -3907,124 +2420,41 @@ { "comment": "Single table with unique vindex match and NOT IN (null, 1, 2) predicates inverted", "query": "select id from music where id NOT IN (null, 1, 2) and user_id = 4", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from music where id NOT IN (null, 1, 2) and user_id = 4", - "Instructions": { - "OperatorType": "Route", - "Variant": "None", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from music where 1 != 1", - "Query": "select id from music where id not in (null, 1, 2) and user_id = 4", - "Table": "music" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from music where id NOT IN (null, 1, 2) and user_id = 4", "Instructions": { "OperatorType": "Route", "Variant": "None", "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from music where 1 != 1", - "Query": "select id from music where id not in (null, 1, 2) and user_id = 4", - "Table": "music" - }, - "TablesUsed": [ - "user.music" - ] - } - }, - { - "comment": "pullout sq after pullout sq", - "query": "select id from user where not id in (select user_extra.col from user_extra where user_extra.user_id = 42) and id in (select user_extra.col from user_extra where user_extra.user_id = 411)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where not id in (select user_extra.col from user_extra where user_extra.user_id = 42) and id in (select user_extra.col from user_extra where user_extra.user_id = 411)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutNotIn", - "PulloutVars": [ - "__sq_has_values2", - "__sq2" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.col from user_extra where 1 != 1", - "Query": "select user_extra.col from user_extra where user_extra.user_id = 42", - "Table": "user_extra", - "Values": [ - "INT64(42)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.col from user_extra where 1 != 1", - "Query": "select user_extra.col from user_extra where user_extra.user_id = 411", - "Table": "user_extra", - "Values": [ - "INT64(411)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals and (:__sq_has_values2 = 0 or id not in ::__sq2)", - "Table": "`user`", - "Values": [ - "::__sq1" - ], - "Vindex": "user_index" - } - ] - } - ] - } - }, - "gen4-plan": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id from music where 1 != 1", + "Query": "select id from music where id not in (null, 1, 2) and user_id = 4", + "Table": "music" + }, + "TablesUsed": [ + "user.music" + ] + } + }, + { + "comment": "pullout sq after pullout sq", + "query": "select id from user where not id in (select user_extra.col from user_extra where user_extra.user_id = 42) and id in (select user_extra.col from user_extra where user_extra.user_id = 411)", + "plan": { "QueryType": "SELECT", "Original": "select id from user where not id in (select user_extra.col from user_extra where user_extra.user_id = 42) and id in (select user_extra.col from user_extra where user_extra.user_id = 411)", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutIn", "PulloutVars": [ - "__sq_has_values2", + "__sq_has_values1", "__sq2" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "EqualUnique", "Keyspace": { @@ -4040,14 +2470,16 @@ "Vindex": "user_index" }, { - "OperatorType": "Subquery", + "InputName": "Outer", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutNotIn", "PulloutVars": [ - "__sq_has_values1", + "__sq_has_values", "__sq1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "EqualUnique", "Keyspace": { @@ -4063,6 +2495,7 @@ "Vindex": "user_index" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "IN", "Keyspace": { @@ -4070,7 +2503,7 @@ "Sharded": true }, "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where (:__sq_has_values1 = 0 or id not in ::__sq1) and (:__sq_has_values2 = 1 and id in ::__vals)", + "Query": "select id from `user` where not :__sq_has_values and id not in ::__sq1 and :__sq_has_values1 and id in ::__vals", "Table": "`user`", "Values": [ "::__sq2" @@ -4090,26 +2523,7 @@ { "comment": "solving LIKE query with a CFC prefix vindex", "query": "select c2 from cfc_vindex_col where c1 like 'A%'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select c2 from cfc_vindex_col where c1 like 'A%'", - "Instructions": { - "OperatorType": "Route", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select c2 from cfc_vindex_col where 1 != 1", - "Query": "select c2 from cfc_vindex_col where c1 like 'A%'", - "Table": "cfc_vindex_col", - "Values": [ - "VARCHAR(\"A%\")" - ], - "Vindex": "cfc" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select c2 from cfc_vindex_col where c1 like 'A%'", "Instructions": { @@ -4135,26 +2549,7 @@ { "comment": "select * from samecolvin where col = :col", "query": "select * from samecolvin where col = :col", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from samecolvin where col = :col", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from samecolvin where 1 != 1", - "Query": "select col from samecolvin where col = :col", - "Table": "samecolvin", - "Values": [ - ":col" - ], - "Vindex": "vindex1" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from samecolvin where col = :col", "Instructions": { @@ -4180,22 +2575,7 @@ { "comment": "non unique predicate on vindex", "query": "select id from user where user.id > 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where user.id > 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `user`.id > 5", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where user.id > 5", "Instructions": { @@ -4217,22 +2597,7 @@ { "comment": "select from unsharded keyspace with uncorrelated subquery which should be merged to a single route", "query": "select unsharded.id from unsharded where unsharded.name in (select name from unsharded_a)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select unsharded.id from unsharded where unsharded.name in (select name from unsharded_a)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded.id from unsharded where 1 != 1", - "Query": "select unsharded.id from unsharded where unsharded.`name` in (select `name` from unsharded_a)", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select unsharded.id from unsharded where unsharded.name in (select name from unsharded_a)", "Instructions": { @@ -4255,58 +2620,19 @@ { "comment": "in subquery the id will be scoped to local table as there is no qualifier associated with it.", "query": "select id from user where id in (select col from unsharded where col = id)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where id in (select col from unsharded where col = id)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select col from unsharded where 1 != 1", - "Query": "select col from unsharded where col = id", - "Table": "unsharded" - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals", - "Table": "`user`", - "Values": [ - "::__sq1" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where id in (select col from unsharded where col = id)", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutIn", "PulloutVars": [ - "__sq_has_values1", + "__sq_has_values", "__sq1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Unsharded", "Keyspace": { @@ -4318,6 +2644,7 @@ "Table": "unsharded" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "IN", "Keyspace": { @@ -4325,7 +2652,7 @@ "Sharded": true }, "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals", + "Query": "select id from `user` where :__sq_has_values and id in ::__vals", "Table": "`user`", "Values": [ "::__sq1" @@ -4340,30 +2667,10 @@ ] } }, - { - "comment": "correlated subquery with different keyspace tables involved", - "query": "select id from user where id in (select col from unsharded where col = user.id)", - "plan": "VT12001: unsupported: cross-shard correlated subquery" - }, { "comment": "correlated subquery with same keyspace", "query": "select u.id from user as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u.id from user as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.id from `user` as u where 1 != 1", - "Query": "select u.id from `user` as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id)", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.id from user as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id)", "Instructions": { @@ -4405,22 +2712,7 @@ { "comment": "SelectReference with uncorrelated subqueries", "query": "select ref.col from ref where ref.col in (select ref.col from ref)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select ref.col from ref where ref.col in (select ref.col from ref)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select ref.col from ref where 1 != 1", - "Query": "select ref.col from ref where ref.col in (select ref.col from ref)", - "Table": "ref" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select ref.col from ref where ref.col in (select ref.col from ref)", "Instructions": { @@ -4442,26 +2734,7 @@ { "comment": "SelectEqualUnique with uncorrelated subqueries", "query": "select u1.col from user as u1 where u1.id = 5 and u1.name in (select u2.name from user u2 where u2.id = 5)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u1.col from user as u1 where u1.id = 5 and u1.name in (select u2.name from user u2 where u2.id = 5)", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u1.col from `user` as u1 where 1 != 1", - "Query": "select u1.col from `user` as u1 where u1.id = 5 and u1.`name` in (select u2.`name` from `user` as u2 where u2.id = 5)", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u1.col from user as u1 where u1.id = 5 and u1.name in (select u2.name from user u2 where u2.id = 5)", "Instructions": { @@ -4487,26 +2760,7 @@ { "comment": "SelectEqualUnique with EXISTS uncorrelated subquery", "query": "select u1.col from user as u1 where u1.id = 5 and exists (select u2.name from user u2 where u2.id = 5)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u1.col from user as u1 where u1.id = 5 and exists (select u2.name from user u2 where u2.id = 5)", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u1.col from `user` as u1 where 1 != 1", - "Query": "select u1.col from `user` as u1 where u1.id = 5 and exists (select 1 from `user` as u2 where u2.id = 5 limit 1)", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u1.col from user as u1 where u1.id = 5 and exists (select u2.name from user u2 where u2.id = 5)", "Instructions": { @@ -4517,7 +2771,7 @@ "Sharded": true }, "FieldQuery": "select u1.col from `user` as u1 where 1 != 1", - "Query": "select u1.col from `user` as u1 where u1.id = 5 and exists (select 1 from `user` as u2 where u2.id = 5 limit 1)", + "Query": "select u1.col from `user` as u1 where u1.id = 5 and exists (select 1 from `user` as u2 where u2.id = 5)", "Table": "`user`", "Values": [ "INT64(5)" @@ -4532,102 +2786,44 @@ { "comment": "SelectEqualUnique with NOT EXISTS uncorrelated subquery", "query": "select u1.col from user as u1 where u1.id = 5 and not exists (select u2.name from user u2 where u2.id = 5)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u1.col from user as u1 where u1.id = 5 and not exists (select u2.name from user u2 where u2.id = 5)", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u1.col from `user` as u1 where 1 != 1", - "Query": "select u1.col from `user` as u1 where u1.id = 5 and not exists (select 1 from `user` as u2 where u2.id = 5 limit 1)", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u1.col from user as u1 where u1.id = 5 and not exists (select u2.name from user u2 where u2.id = 5)", "Instructions": { "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u1.col from `user` as u1 where 1 != 1", - "Query": "select u1.col from `user` as u1 where u1.id = 5 and not exists (select 1 from `user` as u2 where u2.id = 5 limit 1)", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user" - ] - } - }, - { - "comment": "SelectScatter with NOT EXISTS uncorrelated subquery", - "query": "select u1.col from user as u1 where not exists (select u2.name from user u2 where u2.id = 5)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u1.col from user as u1 where not exists (select u2.name from user u2 where u2.id = 5)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutExists", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` as u2 where 1 != 1", - "Query": "select 1 from `user` as u2 where u2.id = 5 limit 1", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u1.col from `user` as u1 where 1 != 1", - "Query": "select u1.col from `user` as u1 where not :__sq_has_values1", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select u1.col from `user` as u1 where 1 != 1", + "Query": "select u1.col from `user` as u1 where u1.id = 5 and not exists (select 1 from `user` as u2 where u2.id = 5)", + "Table": "`user`", + "Values": [ + "INT64(5)" + ], + "Vindex": "user_index" + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "SelectScatter with NOT EXISTS uncorrelated subquery", + "query": "select u1.col from user as u1 where not exists (select u2.name from user u2 where u2.id = 5)", + "plan": { "QueryType": "SELECT", "Original": "select u1.col from user as u1 where not exists (select u2.name from user u2 where u2.id = 5)", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutExists", "PulloutVars": [ - "__sq_has_values1" + "__sq_has_values" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "EqualUnique", "Keyspace": { @@ -4635,7 +2831,7 @@ "Sharded": true }, "FieldQuery": "select 1 from `user` as u2 where 1 != 1", - "Query": "select 1 from `user` as u2 where u2.id = 5 limit 1", + "Query": "select 1 from `user` as u2 where u2.id = 5", "Table": "`user`", "Values": [ "INT64(5)" @@ -4643,6 +2839,7 @@ "Vindex": "user_index" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -4650,7 +2847,7 @@ "Sharded": true }, "FieldQuery": "select u1.col from `user` as u1 where 1 != 1", - "Query": "select u1.col from `user` as u1 where not :__sq_has_values1", + "Query": "select u1.col from `user` as u1 where not :__sq_has_values", "Table": "`user`" } ] @@ -4663,62 +2860,19 @@ { "comment": "The outer and first inner are SelectEqualUnique with same Vindex value, the second inner has different Vindex value", "query": "select id from user where id = 5 and not id in (select user_extra.col from user_extra where user_extra.user_id = 5) and id in (select user_extra.col from user_extra where user_extra.user_id = 4)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where id = 5 and not id in (select user_extra.col from user_extra where user_extra.user_id = 5) and id in (select user_extra.col from user_extra where user_extra.user_id = 4)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.col from user_extra where 1 != 1", - "Query": "select user_extra.col from user_extra where user_extra.user_id = 4", - "Table": "user_extra", - "Values": [ - "INT64(4)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where id = 5 and :__sq_has_values1 = 1 and id in ::__sq1 and id not in (select user_extra.col from user_extra where user_extra.user_id = 5)", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where id = 5 and not id in (select user_extra.col from user_extra where user_extra.user_id = 5) and id in (select user_extra.col from user_extra where user_extra.user_id = 4)", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutIn", "PulloutVars": [ - "__sq_has_values2", + "__sq_has_values", "__sq2" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "EqualUnique", "Keyspace": { @@ -4734,6 +2888,7 @@ "Vindex": "user_index" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "EqualUnique", "Keyspace": { @@ -4741,7 +2896,7 @@ "Sharded": true }, "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where id = 5 and id not in (select user_extra.col from user_extra where user_extra.user_id = 5) and (:__sq_has_values2 = 1 and id in ::__sq2)", + "Query": "select id from `user` where id = 5 and id not in (select user_extra.col from user_extra where user_extra.user_id = 5) and :__sq_has_values and id in ::__sq2", "Table": "`user`", "Values": [ "INT64(5)" @@ -4759,62 +2914,19 @@ { "comment": "The outer and second inner are SelectEqualUnique with same Vindex value, the first inner has different Vindex value", "query": "select id from user where id = 5 and not id in (select user_extra.col from user_extra where user_extra.user_id = 4) and id in (select user_extra.col from user_extra where user_extra.user_id = 5)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where id = 5 and not id in (select user_extra.col from user_extra where user_extra.user_id = 4) and id in (select user_extra.col from user_extra where user_extra.user_id = 5)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutNotIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.col from user_extra where 1 != 1", - "Query": "select user_extra.col from user_extra where user_extra.user_id = 4", - "Table": "user_extra", - "Values": [ - "INT64(4)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where id = 5 and id in (select user_extra.col from user_extra where user_extra.user_id = 5) and (:__sq_has_values1 = 0 or id not in ::__sq1)", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where id = 5 and not id in (select user_extra.col from user_extra where user_extra.user_id = 4) and id in (select user_extra.col from user_extra where user_extra.user_id = 5)", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutNotIn", "PulloutVars": [ - "__sq_has_values1", + "__sq_has_values", "__sq1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "EqualUnique", "Keyspace": { @@ -4830,6 +2942,7 @@ "Vindex": "user_index" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "EqualUnique", "Keyspace": { @@ -4837,7 +2950,7 @@ "Sharded": true }, "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where id = 5 and (:__sq_has_values1 = 0 or id not in ::__sq1) and id in (select user_extra.col from user_extra where user_extra.user_id = 5)", + "Query": "select id from `user` where id = 5 and id in (select user_extra.col from user_extra where user_extra.user_id = 5) and not :__sq_has_values and id not in ::__sq1", "Table": "`user`", "Values": [ "INT64(5)" @@ -4855,22 +2968,7 @@ { "comment": "two correlated subqueries that can be merge in a single route", "query": "select u.id from user as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id) and u.col2 in (select ue.user_id from user_extra as ue where ue.user_id = u.id)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u.id from user as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id) and u.col2 in (select ue.user_id from user_extra as ue where ue.user_id = u.id)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.id from `user` as u where 1 != 1", - "Query": "select u.id from `user` as u where u.col2 in (select ue.user_id from user_extra as ue where ue.user_id = u.id) and u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id)", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.id from user as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id) and u.col2 in (select ue.user_id from user_extra as ue where ue.user_id = u.id)", "Instructions": { @@ -4893,22 +2991,7 @@ { "comment": "transitive closures for the win", "query": "select id from user where user.id = user.col and user.col = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where user.id = user.col and user.col = 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `user`.id = `user`.col and `user`.col = 5", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where user.id = user.col and user.col = 5", "Instructions": { @@ -4934,44 +3017,7 @@ { "comment": "join with transitive closures", "query": "select id from user, user_extra where user.id = user_extra.col and user_extra.col = user_extra.user_id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user, user_extra where user.id = user_extra.col and user_extra.col = user_extra.user_id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "JoinVars": { - "user_id": 0 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra where user_extra.col = :user_id and user_extra.col = user_extra.user_id", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user, user_extra where user.id = user_extra.col and user_extra.col = user_extra.user_id", "Instructions": { @@ -4994,44 +3040,7 @@ { "comment": "not supported transitive closures with equality inside of an OR", "query": "select id from user, user_extra where user.id = user_extra.col and (user_extra.col = user_extra.user_id or user_extra.col2 = user_extra.name)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user, user_extra where user.id = user_extra.col and (user_extra.col = user_extra.user_id or user_extra.col2 = user_extra.name)", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "JoinVars": { - "user_id": 0 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra where user_extra.col = :user_id and (user_extra.col = user_extra.user_id or user_extra.col2 = user_extra.`name`)", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user, user_extra where user.id = user_extra.col and (user_extra.col = user_extra.user_id or user_extra.col2 = user_extra.name)", "Instructions": { @@ -5080,22 +3089,7 @@ { "comment": "routing rules subquery merge with alias", "query": "select col from user where id = (select id from route1 as a where a.id = user.id)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user where id = (select id from route1 as a where a.id = user.id)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` where id = (select id from `user` as a where a.id = `user`.id)", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user where id = (select id from route1 as a where a.id = user.id)", "Instructions": { @@ -5171,74 +3165,18 @@ "QueryType": "SELECT", "Original": "select user.id from user left join user_extra on user.col = user_extra.col where user_extra.id is null", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], - "Inputs": [ - { - "OperatorType": "Filter", - "Predicate": "user_extra.id is null", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "LeftJoin", - "JoinColumnIndexes": "L:0,R:0", - "JoinVars": { - "user_col": 1 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, `user`.col from `user` where 1 != 1", - "Query": "select `user`.id, `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.id from user_extra where 1 != 1", - "Query": "select user_extra.id from user_extra where user_extra.col = :user_col", - "Table": "user_extra" - } - ] - } - ] - } - ] - }, - "TablesUsed": [ - "user.user", - "user.user_extra" - ] - } - }, - { - "comment": "subquery on other table", - "query": "select distinct user.id, user.col from user where user.col in (select id from music where col2 = 'a')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select distinct user.id, user.col from user where user.col in (select id from music where col2 = 'a')", - "Instructions": { - "OperatorType": "Distinct", + "OperatorType": "Filter", + "Predicate": "user_extra.id is null", + "ResultColumns": 1, "Inputs": [ { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], + "OperatorType": "Join", + "Variant": "LeftJoin", + "JoinColumnIndexes": "L:0,R:0", + "JoinVars": { + "user_col": 1 + }, + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -5247,9 +3185,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from music where 1 != 1", - "Query": "select id from music where col2 = 'a'", - "Table": "music" + "FieldQuery": "select `user`.id, `user`.col from `user` where 1 != 1", + "Query": "select `user`.id, `user`.col from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -5258,33 +3196,44 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select `user`.id, `user`.col from `user` where 1 != 1", - "Query": "select `user`.id, `user`.col from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1", - "Table": "`user`" + "FieldQuery": "select user_extra.id from user_extra where 1 != 1", + "Query": "select user_extra.id from user_extra where user_extra.col = :user_col", + "Table": "user_extra" } ] } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "subquery on other table", + "query": "select distinct user.id, user.col from user where user.col in (select id from music where col2 = 'a')", + "plan": { "QueryType": "SELECT", "Original": "select distinct user.id, user.col from user where user.col in (select id from music where col2 = 'a')", "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "GroupBy": "(0|2), 1", + "OperatorType": "Distinct", + "Collations": [ + "(0:2)", + "1" + ], "ResultColumns": 2, "Inputs": [ { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutIn", "PulloutVars": [ - "__sq_has_values1", + "__sq_has_values", "__sq1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -5296,6 +3245,7 @@ "Table": "music" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -5303,8 +3253,7 @@ "Sharded": true }, "FieldQuery": "select `user`.id, `user`.col, weight_string(`user`.id) from `user` where 1 != 1", - "OrderBy": "(0|2) ASC, 1 ASC", - "Query": "select `user`.id, `user`.col, weight_string(`user`.id) from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1 order by `user`.id asc, `user`.col asc", + "Query": "select `user`.id, `user`.col, weight_string(`user`.id) from `user` where :__sq_has_values and `user`.col in ::__sq1", "Table": "`user`" } ] @@ -5320,26 +3269,7 @@ { "comment": "should use colb_colc_map as first column of the vindex is present in predicate", "query": "select * from multicolvin where column_b = 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from multicolvin where column_b = 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from multicolvin where 1 != 1", - "Query": "select * from multicolvin where column_b = 1", - "Table": "multicolvin", - "Values": [ - "INT64(1)" - ], - "Vindex": "colb_colc_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from multicolvin where column_b = 1", "Instructions": { @@ -5365,26 +3295,7 @@ { "comment": "should only use first column of the vindex colb_colc_map", "query": "select * from multicolvin where column_b = 1 and column_c = 2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from multicolvin where column_b = 1 and column_c = 2", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from multicolvin where 1 != 1", - "Query": "select * from multicolvin where column_b = 1 and column_c = 2", - "Table": "multicolvin", - "Values": [ - "INT64(1)" - ], - "Vindex": "colb_colc_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from multicolvin where column_b = 1 and column_c = 2", "Instructions": { @@ -5410,26 +3321,7 @@ { "comment": "uses vindex colb_colc_map", "query": "select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from multicolvin where 1 != 1", - "Query": "select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3", - "Table": "multicolvin", - "Values": [ - "INT64(1)" - ], - "Vindex": "colb_colc_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3", "Instructions": { @@ -5453,28 +3345,9 @@ } }, { - "comment": "v3 takes cola_map, gen4 takes colb_colc_map, may be based on map key ordering", + "comment": "colb_colc_map vindex for routing", "query": "select * from multicolvin where column_a = 3 and column_b = 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from multicolvin where column_a = 3 and column_b = 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from multicolvin where 1 != 1", - "Query": "select * from multicolvin where column_a = 3 and column_b = 1", - "Table": "multicolvin", - "Values": [ - "INT64(3)" - ], - "Vindex": "cola_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from multicolvin where column_a = 3 and column_b = 1", "Instructions": { @@ -5498,24 +3371,9 @@ } }, { - "comment": "multi column vindex produces Equal plan in gen4 and Scatter in v3", + "comment": "multi column vindex produces Equal plan", "query": "select * from multicol_tbl where cola = 1 and colb = 2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from multicol_tbl where cola = 1 and colb = 2", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from multicol_tbl where 1 != 1", - "Query": "select * from multicol_tbl where cola = 1 and colb = 2", - "Table": "multicol_tbl" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from multicol_tbl where cola = 1 and colb = 2", "Instructions": { @@ -5542,22 +3400,7 @@ { "comment": "multi column vindex with different order places the vindex keys in correct order", "query": "select * from multicol_tbl where colb = 2 and cola = 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from multicol_tbl where colb = 2 and cola = 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from multicol_tbl where 1 != 1", - "Query": "select * from multicol_tbl where colb = 2 and cola = 1", - "Table": "multicol_tbl" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from multicol_tbl where colb = 2 and cola = 1", "Instructions": { @@ -5582,24 +3425,9 @@ } }, { - "comment": "multi column vindex produces IN plan in gen4 and Scatter in v3", + "comment": "multi column vindex produces IN plan", "query": "select * from multicol_tbl where cola in (1,2) and colb in (3,4)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from multicol_tbl where cola in (1,2) and colb in (3,4)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from multicol_tbl where 1 != 1", - "Query": "select * from multicol_tbl where cola in (1, 2) and colb in (3, 4)", - "Table": "multicol_tbl" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from multicol_tbl where cola in (1,2) and colb in (3,4)", "Instructions": { @@ -5624,24 +3452,9 @@ } }, { - "comment": "multi column vindex with different order places the vindex keys in correct order in IN plan in gen4", + "comment": "multi column vindex with different order places the vindex keys in correct order in IN plan", "query": "select * from multicol_tbl where colb in (3,4) and cola in (1,2)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from multicol_tbl where colb in (3,4) and cola in (1,2)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from multicol_tbl where 1 != 1", - "Query": "select * from multicol_tbl where colb in (3, 4) and cola in (1, 2)", - "Table": "multicol_tbl" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from multicol_tbl where colb in (3,4) and cola in (1,2)", "Instructions": { @@ -5668,22 +3481,7 @@ { "comment": "multi column vindex with different order with one IN predicate and one equality", "query": "select * from multicol_tbl where colb = 1 and cola in (3,4)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from multicol_tbl where colb = 1 and cola in (3,4)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from multicol_tbl where 1 != 1", - "Query": "select * from multicol_tbl where colb = 1 and cola in (3, 4)", - "Table": "multicol_tbl" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from multicol_tbl where colb = 1 and cola in (3,4)", "Instructions": { @@ -5710,22 +3508,7 @@ { "comment": "deconstruct tuple equality comparisons", "query": "select id from user where (id, name) = (34, 'apa')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where (id, name) = (34, 'apa')", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where (id, `name`) = (34, 'apa')", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where (id, name) = (34, 'apa')", "Instructions": { @@ -5751,22 +3534,7 @@ { "comment": "multi column vindex with both IN predicate and equality predicate", "query": "select * from multicol_tbl where cola in (1,10) and cola = 4 and colb in (5,6) and colb = 7", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from multicol_tbl where cola in (1,10) and cola = 4 and colb in (5,6) and colb = 7", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from multicol_tbl where 1 != 1", - "Query": "select * from multicol_tbl where cola in (1, 10) and cola = 4 and colb in (5, 6) and colb = 7", - "Table": "multicol_tbl" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from multicol_tbl where cola in (1,10) and cola = 4 and colb in (5,6) and colb = 7", "Instructions": { @@ -5793,22 +3561,7 @@ { "comment": "multi column vindex with one column with equal followed by IN predicate, ordering matters for now", "query": "select * from multicol_tbl where colb = 4 and colb in (1,10) and cola in (5,6)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from multicol_tbl where colb = 4 and colb in (1,10) and cola in (5,6)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from multicol_tbl where 1 != 1", - "Query": "select * from multicol_tbl where colb = 4 and colb in (1, 10) and cola in (5, 6)", - "Table": "multicol_tbl" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from multicol_tbl where colb = 4 and colb in (1,10) and cola in (5,6)", "Instructions": { @@ -5835,22 +3588,7 @@ { "comment": "multi column vindex with one column with IN followed by equal predicate, ordering matters for now", "query": "select * from multicol_tbl where colb in (1,10) and colb = 4 and cola in (5,6)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from multicol_tbl where colb in (1,10) and colb = 4 and cola in (5,6)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from multicol_tbl where 1 != 1", - "Query": "select * from multicol_tbl where colb in (1, 10) and colb = 4 and cola in (5, 6)", - "Table": "multicol_tbl" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from multicol_tbl where colb in (1,10) and colb = 4 and cola in (5,6)", "Instructions": { @@ -5877,22 +3615,7 @@ { "comment": "multi column vindex with better plan selection", "query": "select * from multicol_tbl where colb in (1,2) and cola IN (3,4) and cola = 5 and colb = 6", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from multicol_tbl where colb in (1,2) and cola IN (3,4) and cola = 5 and colb = 6", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from multicol_tbl where 1 != 1", - "Query": "select * from multicol_tbl where colb in (1, 2) and cola in (3, 4) and cola = 5 and colb = 6", - "Table": "multicol_tbl" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from multicol_tbl where colb in (1,2) and cola IN (3,4) and cola = 5 and colb = 6", "Instructions": { @@ -5919,22 +3642,7 @@ { "comment": "multi column vindex as tuple", "query": "select * from multicol_tbl where (cola,colb) in ((1,2),(3,4))", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from multicol_tbl where (cola,colb) in ((1,2),(3,4))", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from multicol_tbl where 1 != 1", - "Query": "select * from multicol_tbl where (cola, colb) in ((1, 2), (3, 4))", - "Table": "multicol_tbl" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from multicol_tbl where (cola,colb) in ((1,2),(3,4))", "Instructions": { @@ -5961,22 +3669,7 @@ { "comment": "multi column vindex, partial vindex with SelectEqual", "query": "select * from multicol_tbl where cola = 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from multicol_tbl where cola = 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from multicol_tbl where 1 != 1", - "Query": "select * from multicol_tbl where cola = 1", - "Table": "multicol_tbl" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from multicol_tbl where cola = 1", "Instructions": { @@ -5993,31 +3686,16 @@ "INT64(1)" ], "Vindex": "multicolIdx" - }, - "TablesUsed": [ - "user.multicol_tbl" - ] - } - }, - { - "comment": "multi column vindex, partial vindex with SelectEqual over full vindex with SelectIN", - "query": "select * from multicol_tbl where cola = 1 and colb in (2,3)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from multicol_tbl where cola = 1 and colb in (2,3)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from multicol_tbl where 1 != 1", - "Query": "select * from multicol_tbl where cola = 1 and colb in (2, 3)", - "Table": "multicol_tbl" - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.multicol_tbl" + ] + } + }, + { + "comment": "multi column vindex, partial vindex with SelectEqual over full vindex with SelectIN", + "query": "select * from multicol_tbl where cola = 1 and colb in (2,3)", + "plan": { "QueryType": "SELECT", "Original": "select * from multicol_tbl where cola = 1 and colb in (2,3)", "Instructions": { @@ -6042,7 +3720,7 @@ } }, { - "comment": "left join with where clause - should be handled by gen4 but still isn't", + "comment": "left join with where clause", "query": "select 0 from unsharded_a left join unsharded_b on unsharded_a.col = unsharded_b.col where coalesce(unsharded_b.col, 4) = 5", "plan": { "QueryType": "SELECT", @@ -6225,22 +3903,7 @@ { "comment": "optimize ORs to IN route op codes #1", "query": "select col from user where id = 1 or id = 2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user where id = 1 or id = 2", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` where id = 1 or id = 2", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user where id = 1 or id = 2", "Instructions": { @@ -6266,22 +3929,7 @@ { "comment": "optimize ORs to IN route op codes #2", "query": "select col from user where id = 1 or id = 2 or id = 3", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user where id = 1 or id = 2 or id = 3", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` where id = 1 or id = 2 or id = 3", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user where id = 1 or id = 2 or id = 3", "Instructions": { @@ -6307,22 +3955,7 @@ { "comment": "optimize ORs to IN route op codes #3", "query": "select col from user where (id = 1 or id = 2) or (id = 3 or id = 4)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user where (id = 1 or id = 2) or (id = 3 or id = 4)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` where id = 1 or id = 2 or (id = 3 or id = 4)", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user where (id = 1 or id = 2) or (id = 3 or id = 4)", "Instructions": { @@ -6348,26 +3981,7 @@ { "comment": "Don't pick a vindex for an IS NULL predicate if it's a lookup vindex", "query": "select id from music where id is null and user_id in (1,2)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from music where id is null and user_id in (1,2)", - "Instructions": { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from music where 1 != 1", - "Query": "select id from music where id is null and user_id in ::__vals", - "Table": "music", - "Values": [ - "(INT64(1), INT64(2))" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from music where id is null and user_id in (1,2)", "Instructions": { @@ -6393,22 +4007,7 @@ { "comment": "Self referencing columns in HAVING should work", "query": "select a+2 as a from user having a = 42", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a+2 as a from user having a = 42", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a + 2 as a from `user` where 1 != 1", - "Query": "select a + 2 as a from `user` having a = 42", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a+2 as a from user having a = 42", "Instructions": { @@ -6430,22 +4029,7 @@ { "comment": "HAVING predicates that use table columns are safe to rewrite if we can move them to the WHERE clause", "query": "select user.col + 2 as a from user having a = 42", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col + 2 as a from user having a = 42", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col + 2 as a from `user` where 1 != 1", - "Query": "select `user`.col + 2 as a from `user` having a = 42", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col + 2 as a from user having a = 42", "Instructions": { @@ -6467,22 +4051,7 @@ { "comment": "HAVING predicates that use table columns should not get rewritten on unsharded keyspaces", "query": "select col + 2 as a from unsharded having a = 42", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col + 2 as a from unsharded having a = 42", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select col + 2 as a from unsharded where 1 != 1", - "Query": "select col + 2 as a from unsharded having a = 42", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col + 2 as a from unsharded having a = 42", "Instructions": { @@ -6504,22 +4073,7 @@ { "comment": "Single table unique vindex route hiding behind a silly OR", "query": "select id from user where (id = 5 and name ='apa') or (id = 5 and foo = 'bar')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where (id = 5 and name ='apa') or (id = 5 and foo = 'bar')", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where id = 5 and `name` = 'apa' or id = 5 and foo = 'bar'", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where (id = 5 and name ='apa') or (id = 5 and foo = 'bar')", "Instructions": { @@ -6545,22 +4099,7 @@ { "comment": "Single table IN vindex route hiding behind OR", "query": "select id from user where (id = 5 and name ='foo') or (id = 12 and name = 'bar')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where (id = 5 and name ='foo') or (id = 12 and name = 'bar')", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where id = 5 and `name` = 'foo' or id = 12 and `name` = 'bar'", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where (id = 5 and name ='foo') or (id = 12 and name = 'bar')", "Instructions": { @@ -6586,66 +4125,58 @@ { "comment": "Like clause evaluated on the vtgate", "query": "select a.textcol1 from user a join user b where a.textcol1 = b.textcol2 group by a.textcol1 having repeat(a.textcol1,sum(a.id)) like \"And%res\"", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a.textcol1 from user a join user b where a.textcol1 = b.textcol2 group by a.textcol1 having repeat(a.textcol1,sum(a.id)) like \"And%res\"", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], + "OperatorType": "Filter", + "Predicate": "repeat(a.textcol1, sum(a.id)) like 'And%res'", + "ResultColumns": 1, "Inputs": [ { - "OperatorType": "Filter", - "Predicate": "repeat(a.textcol1, :1) like 'And%res'", + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum(1) AS sum(a.id)", + "GroupBy": "0 COLLATE latin1_swedish_ci", "Inputs": [ { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum(1) AS sum(a.id)", - "GroupBy": "0 COLLATE latin1_swedish_ci", + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 2] as textcol1", + "[COLUMN 0] * [COLUMN 1] as sum(a.id)" + ], "Inputs": [ { - "OperatorType": "Projection", - "Expressions": [ - "[COLUMN 0] as textcol1", - "[COLUMN 1] * COALESCE([COLUMN 2], INT64(1)) as sum(a.id)" - ], + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,L:1", + "JoinVars": { + "a_textcol1": 1 + }, + "TableName": "`user`_`user`", "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1,R:1", - "JoinVars": { - "a_textcol1": 0 + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select sum(a.id), a.textcol1 from `user` as a where 1 != 1 group by a.textcol1", + "OrderBy": "1 ASC COLLATE latin1_swedish_ci", + "Query": "select sum(a.id), a.textcol1 from `user` as a group by a.textcol1 order by a.textcol1 asc", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true }, - "TableName": "`user`_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a.textcol1, sum(a.id) from `user` as a where 1 != 1 group by a.textcol1", - "OrderBy": "0 ASC COLLATE latin1_swedish_ci", - "Query": "select a.textcol1, sum(a.id) from `user` as a group by a.textcol1 order by a.textcol1 asc", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1, count(*) from `user` as b where 1 != 1 group by 1", - "Query": "select 1, count(*) from `user` as b where b.textcol2 = :a_textcol1 group by 1", - "Table": "`user`" - } - ] + "FieldQuery": "select count(*) from `user` as b where 1 != 1 group by .0", + "Query": "select count(*) from `user` as b where b.textcol2 = :a_textcol1 group by .0", + "Table": "`user`" } ] } @@ -6663,22 +4194,7 @@ { "comment": "two predicates that mean the same thing", "query": "select textcol1 from user where foo = 42 and user.foo = 42", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select textcol1 from user where foo = 42 and user.foo = 42", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select textcol1 from `user` where 1 != 1", - "Query": "select textcol1 from `user` where foo = 42 and `user`.foo = 42", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select textcol1 from user where foo = 42 and user.foo = 42", "Instructions": { @@ -6700,36 +4216,35 @@ { "comment": "must merge subquery with the right side of the join", "query": "select 1 from unsharded join user u1 where exists (select 1 from unsharded u2 where u1.bar = u2.baz)", - "v3-plan": "VT12001: unsupported: cross-shard correlated subquery", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 1 from unsharded join user u1 where exists (select 1 from unsharded u2 where u1.bar = u2.baz)", "Instructions": { - "OperatorType": "SemiJoin", - "JoinVars": { - "u1_bar": 0 - }, - "ProjectedIndexes": "-2", + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0", "TableName": "unsharded_`user`_unsharded", "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0,L:0", - "TableName": "unsharded_`user`", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select 1 from unsharded where 1 != 1", + "Query": "select 1 from unsharded", + "Table": "unsharded" + }, + { + "OperatorType": "SemiJoin", + "JoinVars": { + "u1_bar": 0 + }, + "TableName": "`user`_unsharded", "Inputs": [ { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from unsharded where 1 != 1", - "Query": "select 1 from unsharded", - "Table": "unsharded" - }, - { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -6739,19 +4254,20 @@ "FieldQuery": "select u1.bar from `user` as u1 where 1 != 1", "Query": "select u1.bar from `user` as u1", "Table": "`user`" + }, + { + "InputName": "SubQuery", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select 1 from unsharded as u2 where 1 != 1", + "Query": "select 1 from unsharded as u2 where u2.baz = :u1_bar", + "Table": "unsharded" } ] - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from unsharded as u2 where 1 != 1", - "Query": "select 1 from unsharded as u2 where u2.baz = :u1_bar", - "Table": "unsharded" } ] }, @@ -6828,5 +4344,63 @@ "user.user_extra" ] } + }, + { + "comment": "query that would time out because planning was too slow", + "query": "select 1 from user where shard_key = 1 and is_removed = 1 and cmd in ('A','B','C') and not (user_id = 1 and user_id is not null and ts >= 1 and ts <= 2) and not (user_id = 1 and user_id is not null and ts >= 12 and ts <= 13) and not (user_id = 1 and user_id is not null and ts >= 14 and ts <= 15) and not (user_id = 1 and user_id is not null and ts >= 16 and ts <= 17) and not (user_id = 1 and user_id is not null and ts >= 18 and ts <= 19) and not (user_id = 1 and user_id is not null and ts >= 110 and ts <= 111) and not (user_id = 1 and user_id is not null and ts >= 112 and ts <= 113) and not (user_id = 1 and user_id is not null and ts >= 114 and ts <= 115) and not (user_id = 1 and user_id is not null and ts >= 116 and ts <= 117) and not (user_id = 1 and user_id is not null and ts >= 118 and ts <= 119) and not (user_id = 1 and user_id is not null and ts >= 120 and ts <= 121) and not (user_id = 1 and user_id is not null and ts >= 122 and ts <= 123) and not (user_id = 1 and user_id is not null and ts >= 124 and ts <= 125) and not (user_id = 1 and user_id is not null and ts >= 126 and ts <= 127) and not (user_id = 1 and user_id is not null and ts >= 128 and ts <= 129) and not (user_id = 1 and user_id is not null and ts >= 130 and ts <= 131) and not (user_id = 1 and user_id is not null and ts >= 132 and ts <= 133) and not (user_id = 1 and user_id is not null and ts >= 134 and ts <= 135) and not (user_id = 1 and user_id is not null and ts >= 136 and ts <= 137) and not (user_id = 1 and user_id is not null and ts >= 138 and ts <= 139) and not (user_id = 1 and user_id is not null and ts >= 140 and ts <= 141) and not (user_id = 1 and user_id is not null and ts >= 142 and ts <= 143) and not (user_id = 1 and user_id is not null and ts >= 144 and ts <= 145) and not (user_id = 1 and user_id is not null and ts >= 146 and ts <= 147) and not (user_id = 1 and user_id is not null and ts >= 148 and ts <= 149) and not (user_id = 1 and user_id is not null and ts >= 150 and ts <= 151) and not (user_id = 1 and user_id is not null and ts >= 152 and ts <= 153) and not (user_id = 1 and user_id is not null and ts >= 154 and ts <= 155) and not (user_id = 1 and user_id is not null and ts >= 156 and ts <= 157) and not (user_id = 1 and user_id is not null and ts >= 158 and ts <= 159) and not (user_id = 1 and user_id is not null and ts >= 160 and ts <= 161) and not (user_id = 1 and user_id is not null and ts >= 162 and ts <= 163) and not (user_id = 1 and user_id is not null and ts >= 164 and ts <= 165) and not (user_id = 1 and user_id is not null and ts >= 166 and ts <= 167) and not (user_id = 1 and user_id is not null and ts >= 168 and ts <= 169) and not (user_id = 1 and user_id is not null and ts >= 170 and ts <= 171) and not (user_id = 1 and user_id is not null and ts >= 172 and ts <= 173) and not (user_id = 1 and user_id is not null and ts >= 174 and ts <= 175) and not (user_id = 1 and user_id is not null and ts >= 176 and ts <= 177) and not (user_id = 1 and user_id is not null and ts >= 178 and ts <= 179) and not (user_id = 1 and user_id is not null and ts >= 180 and ts <= 181) and not (user_id = 1 and user_id is not null and ts >= 182 and ts <= 183) and not (user_id = 1 and user_id is not null and ts >= 184 and ts <= 185) and not (user_id = 1 and user_id is not null and ts >= 186 and ts <= 187) and not (user_id = 1 and user_id is not null and ts >= 188 and ts <= 189) and not (user_id = 1 and user_id is not null and ts >= 190 and ts <= 191) and not (user_id = 1 and user_id is not null and ts >= 192 and ts <= 193) and not (user_id = 1 and user_id is not null and ts >= 194 and ts <= 195) and not (user_id = 1 and user_id is not null and ts >= 196 and ts <= 197) and not (user_id = 1 and user_id is not null and ts >= 198 and ts <= 199) and not (user_id = 1 and user_id is not null and ts >= 1100 and ts <= 1101) and not (user_id = 1 and user_id is not null and ts >= 1102 and ts <= 1103) and not (user_id = 1 and user_id is not null and ts >= 1104 and ts <= 1105) and not (user_id = 1 and user_id is not null and ts >= 1106 and ts <= 1107) and not (user_id = 1 and user_id is not null and ts >= 1108 and ts <= 1109) and not (user_id = 1 and user_id is not null and ts >= 1110 and ts <= 1111) and not (user_id = 1 and user_id is not null and ts >= 1112 and ts <= 1113) and not (user_id = 1 and user_id is not null and ts >= 1114 and ts <= 1115) and not (user_id = 1 and user_id is not null and ts >= 1116 and ts <= 1117) and not (user_id = 1 and user_id is not null and ts >= 1118 and ts <= 1119) and not (user_id = 1 and user_id is not null and ts >= 1120 and ts <= 1121) and not (user_id = 1 and user_id is not null and ts >= 1122 and ts <= 1123) and not (user_id = 1 and user_id is not null and ts >= 1124 and ts <= 1125) and not (user_id = 1 and user_id is not null and ts >= 1126 and ts <= 1127) and not (user_id = 1 and user_id is not null and ts >= 1128 and ts <= 1129) and not (user_id = 1 and user_id is not null and ts >= 1130 and ts <= 1131) and not (user_id = 1 and user_id is not null and ts >= 1132 and ts <= 1133) and not (user_id = 1 and user_id is not null and ts >= 1134 and ts <= 1135) and not (user_id = 1 and user_id is not null and ts >= 1136 and ts <= 1137) and not (user_id = 1 and user_id is not null and ts >= 1138 and ts <= 1139) and not (user_id = 1 and user_id is not null and ts >= 1140 and ts <= 1141) and not (user_id = 1 and user_id is not null and ts >= 1142 and ts <= 1143) and not (user_id = 1 and user_id is not null and ts >= 1144 and ts <= 1145) and not (user_id = 1 and user_id is not null and ts >= 1146 and ts <= 1147) and not (user_id = 1 and user_id is not null and ts >= 1148 and ts <= 1149) and not (user_id = 1 and user_id is not null and ts >= 1150 and ts <= 1151) and not (user_id = 1 and user_id is not null and ts >= 1152 and ts <= 1153) and not (user_id = 1 and user_id is not null and ts >= 1154 and ts <= 1155) and not (user_id = 1 and user_id is not null and ts >= 1156 and ts <= 1157) and not (user_id = 1 and user_id is not null and ts >= 1158 and ts <= 1159) and not (user_id = 1 and user_id is not null and ts >= 1160 and ts <= 1161) and not (user_id = 1 and user_id is not null and ts >= 1162 and ts <= 1163) and not (user_id = 1 and user_id is not null and ts >= 1164 and ts <= 1165) and not (user_id = 1 and user_id is not null and ts >= 1166 and ts <= 1167) and not (user_id = 1 and user_id is not null and ts >= 1168 and ts <= 1169) and not (user_id = 1 and user_id is not null and ts >= 1170 and ts <= 1171) and not (user_id = 1 and user_id is not null and ts >= 1172 and ts <= 1173) and not (user_id = 1 and user_id is not null and ts >= 1174 and ts <= 1175) and not (user_id = 1 and user_id is not null and ts >= 1176 and ts <= 1177) and not (user_id = 1 and user_id is not null and ts >= 1178 and ts <= 1179) and not (user_id = 1 and user_id is not null and ts >= 1180 and ts <= 1181) and not (user_id = 1 and user_id is not null and ts >= 1182 and ts <= 1183) and not (user_id = 1 and user_id is not null and ts >= 1184 and ts <= 1185) and not (user_id = 1 and user_id is not null and ts >= 1186 and ts <= 1187) and not (user_id = 1 and user_id is not null and ts >= 1188 and ts <= 1189) and not (user_id = 1 and user_id is not null and ts >= 1190 and ts <= 1191) and not (user_id = 1 and user_id is not null and ts >= 1192 and ts <= 1193) and not (user_id = 1 and user_id is not null and ts >= 1194 and ts <= 1195) and not (user_id = 1 and user_id is not null and ts >= 1196 and ts <= 1197) and not (user_id = 1 and user_id is not null and ts >= 1198 and ts <= 1199) and not (user_id = 1 and user_id is not null and ts >= 1200 and ts <= 1201) and not (user_id = 1 and user_id is not null and ts >= 1202 and ts <= 1203) and not (user_id = 1 and user_id is not null and ts >= 1204 and ts <= 1205) and not (user_id = 1 and user_id is not null and ts >= 1206 and ts <= 1207) and not (user_id = 1 and user_id is not null and ts >= 1208 and ts <= 1209) and not (user_id = 1 and user_id is not null and ts >= 1210 and ts <= 1211) and not (user_id = 1 and user_id is not null and ts >= 1212 and ts <= 1213) and not (user_id = 1 and user_id is not null and ts >= 1214 and ts <= 1215) and not (user_id = 1 and user_id is not null and ts >= 1216 and ts <= 1217) and not (user_id = 1 and user_id is not null and ts >= 1218 and ts <= 1219) and not (user_id = 1 and user_id is not null and ts >= 1220 and ts <= 1221) and not (user_id = 1 and user_id is not null and ts >= 1222 and ts <= 1223) and not (user_id = 1 and user_id is not null and ts >= 1224 and ts <= 1225) and not (user_id = 1 and user_id is not null and ts >= 1226 and ts <= 1227) and not (user_id = 1 and user_id is not null and ts >= 1228 and ts <= 1229) and not (user_id = 1 and user_id is not null and ts >= 1230 and ts <= 1231) and not (user_id = 1 and user_id is not null and ts >= 1232 and ts <= 1233) and not (user_id = 1 and user_id is not null and ts >= 1234 and ts <= 1235) and not (user_id = 1 and user_id is not null and ts >= 1236 and ts <= 1237) and not (user_id = 1 and user_id is not null and ts >= 1238 and ts <= 1239) and not (user_id = 1 and user_id is not null and ts >= 1240 and ts <= 1241) and not (user_id = 1 and user_id is not null and ts >= 1242 and ts <= 1243) and not (user_id = 1 and user_id is not null and ts >= 1244 and ts <= 1245) and not (user_id = 1 and user_id is not null and ts >= 1246 and ts <= 1247) and not (user_id = 1 and user_id is not null and ts >= 1248 and ts <= 1249) and not (user_id = 1 and user_id is not null and ts >= 1250 and ts <= 1251) and not (user_id = 1 and user_id is not null and ts >= 1252 and ts <= 1253) and not (user_id = 1 and user_id is not null and ts >= 1254 and ts <= 1255) and not (user_id = 1 and user_id is not null and ts >= 1256 and ts <= 1257) and not (user_id = 1 and user_id is not null and ts >= 1258 and ts <= 1259) and not (user_id = 1 and user_id is not null and ts >= 1260 and ts <= 1261) and not (user_id = 1 and user_id is not null and ts >= 1262 and ts <= 1263) and not (user_id = 1 and user_id is not null and ts >= 1264 and ts <= 1265) and not (user_id = 1 and user_id is not null and ts >= 1266 and ts <= 1267) and not (user_id = 1 and user_id is not null and ts >= 1268 and ts <= 1269) and not (user_id = 1 and user_id is not null and ts >= 1270 and ts <= 1271) and not (user_id = 1 and user_id is not null and ts >= 1272 and ts <= 1273) and not (user_id = 1 and user_id is not null and ts >= 1274 and ts <= 1275) and not (user_id = 1 and user_id is not null and ts >= 1276 and ts <= 1277) and not (user_id = 1 and user_id is not null and ts >= 1278 and ts <= 1279) and not (user_id = 1 and user_id is not null and ts >= 1280 and ts <= 1281) and not (user_id = 1 and user_id is not null and ts >= 1282 and ts <= 1283) and not (user_id = 1 and user_id is not null and ts >= 1284 and ts <= 1285) and not (user_id = 1 and user_id is not null and ts >= 1286 and ts <= 1287) and not (user_id = 1 and user_id is not null and ts >= 1288 and ts <= 1289) and not (user_id = 1 and user_id is not null and ts >= 1290 and ts <= 1291) and not (user_id = 1 and user_id is not null and ts >= 1292 and ts <= 1293) and not (user_id = 1 and user_id is not null and ts >= 1294 and ts <= 1295) and not (user_id = 1 and user_id is not null and ts >= 1296 and ts <= 1297) and not (user_id = 1 and user_id is not null and ts >= 1298 and ts <= 1299) and not (user_id = 1 and user_id is not null and ts >= 1300 and ts <= 1301) and not (user_id = 1 and user_id is not null and ts >= 1302 and ts <= 1303) and not (user_id = 1 and user_id is not null and ts >= 1304 and ts <= 1305) and not (user_id = 1 and user_id is not null and ts >= 1306 and ts <= 1307) and not (user_id = 1 and user_id is not null and ts >= 1308 and ts <= 1309) and not (user_id = 1 and user_id is not null and ts >= 1310 and ts <= 1311) and not (user_id = 1 and user_id is not null and ts >= 1312 and ts <= 1313) and not (user_id = 1 and user_id is not null and ts >= 1314 and ts <= 1315) and not (user_id = 1 and user_id is not null and ts >= 1316 and ts <= 1317) and not (user_id = 1 and user_id is not null and ts >= 1318 and ts <= 1319) and not (user_id = 1 and user_id is not null and ts >= 1320 and ts <= 1321) and not (user_id = 1 and user_id is not null and ts >= 1322 and ts <= 1323) and not (user_id = 1 and user_id is not null and ts >= 1324 and ts <= 1325) and not (user_id = 1 and user_id is not null and ts >= 1326 and ts <= 1327) and not (user_id = 1 and user_id is not null and ts >= 1328 and ts <= 1329) and not (user_id = 1 and user_id is not null and ts >= 1330 and ts <= 1331) and not (user_id = 1 and user_id is not null and ts >= 1332 and ts <= 1333) and not (user_id = 1 and user_id is not null and ts >= 1334 and ts <= 1335) and not (user_id = 1 and user_id is not null and ts >= 1336 and ts <= 1337) and not (user_id = 1 and user_id is not null and ts >= 1338 and ts <= 1339) and not (user_id = 1 and user_id is not null and ts >= 1340 and ts <= 1341) and not (user_id = 1 and user_id is not null and ts >= 1342 and ts <= 1343) and not (user_id = 1 and user_id is not null and ts >= 1344 and ts <= 1345) and not (user_id = 1 and user_id is not null and ts >= 1346 and ts <= 1347) and not (user_id = 1 and user_id is not null and ts >= 1348 and ts <= 1349) and not (user_id = 1 and user_id is not null and ts >= 1350 and ts <= 1351) and not (user_id = 1 and user_id is not null and ts >= 1352 and ts <= 1353) and not (user_id = 1 and user_id is not null and ts >= 1354 and ts <= 1355) and not (user_id = 1 and user_id is not null and ts >= 1356 and ts <= 1357) and not (user_id = 1 and user_id is not null and ts >= 1358 and ts <= 1359) and not (user_id = 1 and user_id is not null and ts >= 1360 and ts <= 1361) and not (user_id = 1 and user_id is not null and ts >= 1362 and ts <= 1363) and not (user_id = 1 and user_id is not null and ts >= 1364 and ts <= 1365) and not (user_id = 1 and user_id is not null and ts >= 1366 and ts <= 1367) and not (user_id = 1 and user_id is not null and ts >= 1368 and ts <= 1369) and not (user_id = 1 and user_id is not null and ts >= 1370 and ts <= 1371) and not (user_id = 1 and user_id is not null and ts >= 1372 and ts <= 1373) and not (user_id = 1 and user_id is not null and ts >= 1374 and ts <= 1375) and not (user_id = 1 and user_id is not null and ts >= 1376 and ts <= 1377) and not (user_id = 1 and user_id is not null and ts >= 1378 and ts <= 1379) and not (user_id = 1 and user_id is not null and ts >= 1380 and ts <= 1381) and not (user_id = 1 and user_id is not null and ts >= 1382 and ts <= 1383) and not (user_id = 1 and user_id is not null and ts >= 1384 and ts <= 1385) and not (user_id = 1 and user_id is not null and ts >= 1386 and ts <= 1387) and not (user_id = 1 and user_id is not null and ts >= 1388 and ts <= 1389) and not (user_id = 1 and user_id is not null and ts >= 1390 and ts <= 1391) and not (user_id = 1 and user_id is not null and ts >= 1392 and ts <= 1393) and not (user_id = 1 and user_id is not null and ts >= 1394 and ts <= 1395) and not (user_id = 1 and user_id is not null and ts >= 1396 and ts <= 1397) and not (user_id = 1 and user_id is not null and ts >= 1398 and ts <= 1399) and not (user_id = 1 and user_id is not null and ts >= 1400 and ts <= 1401) and not (user_id = 1 and user_id is not null and ts >= 1402 and ts <= 1403) and not (user_id = 1 and user_id is not null and ts >= 1404 and ts <= 1405) and not (user_id = 1 and user_id is not null and ts >= 1406 and ts <= 1407) and not (user_id = 1 and user_id is not null and ts >= 1408 and ts <= 1409) and not (user_id = 1 and user_id is not null and ts >= 1410 and ts <= 1411) and not (user_id = 1 and user_id is not null and ts >= 1412 and ts <= 1413) and not (user_id = 1 and user_id is not null and ts >= 1414 and ts <= 1415) and not (user_id = 1 and user_id is not null and ts >= 1416 and ts <= 1417) and not (user_id = 1 and user_id is not null and ts >= 1418 and ts <= 1419) and not (user_id = 1 and user_id is not null and ts >= 1420 and ts <= 1421) and not (user_id = 1 and user_id is not null and ts >= 1422 and ts <= 1423) and not (user_id = 1 and user_id is not null and ts >= 1424 and ts <= 1425) and not (user_id = 1 and user_id is not null and ts >= 1426 and ts <= 1427) and not (user_id = 1 and user_id is not null and ts >= 1428 and ts <= 1429) and not (user_id = 1 and user_id is not null and ts >= 1430 and ts <= 1431) and not (user_id = 1 and user_id is not null and ts >= 1432 and ts <= 1433) and not (user_id = 1 and user_id is not null and ts >= 1434 and ts <= 1435) and not (user_id = 1 and user_id is not null and ts >= 1436 and ts <= 1437) and not (user_id = 1 and user_id is not null and ts >= 1438 and ts <= 1439) and not (user_id = 1 and user_id is not null and ts >= 1440 and ts <= 1441) and not (user_id = 1 and user_id is not null and ts >= 1442 and ts <= 1443) and not (user_id = 1 and user_id is not null and ts >= 1444 and ts <= 1445) and not (user_id = 1 and user_id is not null and ts >= 1446 and ts <= 1447) and not (user_id = 1 and user_id is not null and ts >= 1448 and ts <= 1449) and not (user_id = 1 and user_id is not null and ts >= 1450 and ts <= 1451) and not (user_id = 1 and user_id is not null and ts >= 1452 and ts <= 1453) and not (user_id = 1 and user_id is not null and ts >= 1454 and ts <= 1455) and not (user_id = 1 and user_id is not null and ts >= 1456 and ts <= 1457) and not (user_id = 1 and user_id is not null and ts >= 1458 and ts <= 1459) and not (user_id = 1 and user_id is not null and ts >= 1460 and ts <= 1461) and not (user_id = 1 and user_id is not null and ts >= 1462 and ts <= 1463) and not (user_id = 1 and user_id is not null and ts >= 1464 and ts <= 1465) and not (user_id = 1 and user_id is not null and ts >= 1466 and ts <= 1467) and not (user_id = 1 and user_id is not null and ts >= 1468 and ts <= 1469) and not (user_id = 1 and user_id is not null and ts >= 1470 and ts <= 1471) and not (user_id = 1 and user_id is not null and ts >= 1472 and ts <= 1473) and not (user_id = 1 and user_id is not null and ts >= 1474 and ts <= 1475) and not (user_id = 1 and user_id is not null and ts >= 1476 and ts <= 1477) and not (user_id = 1 and user_id is not null and ts >= 1478 and ts <= 1479) and not (user_id = 1 and user_id is not null and ts >= 1480 and ts <= 1481) and not (user_id = 1 and user_id is not null and ts >= 1482 and ts <= 1483) and not (user_id = 1 and user_id is not null and ts >= 1484 and ts <= 1485) and not (user_id = 1 and user_id is not null and ts >= 1486 and ts <= 1487) and not (user_id = 1 and user_id is not null and ts >= 1488 and ts <= 1489) and not (user_id = 1 and user_id is not null and ts >= 1490 and ts <= 1491) and not (user_id = 1 and user_id is not null and ts >= 1492 and ts <= 1493) and not (user_id = 1 and user_id is not null and ts >= 1494 and ts <= 1495) and not (user_id = 1 and user_id is not null and ts >= 1496 and ts <= 1497) and not (user_id = 1 and user_id is not null and ts >= 1498 and ts <= 1499) and not (user_id = 1 and user_id is not null and ts >= 1500 and ts <= 1501) and not (user_id = 1 and user_id is not null and ts >= 1502 and ts <= 1503) and not (user_id = 1 and user_id is not null and ts >= 1504 and ts <= 1505) and not (user_id = 1 and user_id is not null and ts >= 1506 and ts <= 1507) and not (user_id = 1 and user_id is not null and ts >= 1508 and ts <= 1509) and not (user_id = 1 and user_id is not null and ts >= 1510 and ts <= 1511) and not (user_id = 1 and user_id is not null and ts >= 1512 and ts <= 1513) and not (user_id = 1 and user_id is not null and ts >= 1514 and ts <= 1515) and not (user_id = 1 and user_id is not null and ts >= 1516 and ts <= 1517) and not (user_id = 1 and user_id is not null and ts >= 1518 and ts <= 1519) and not (user_id = 1 and user_id is not null and ts >= 1520 and ts <= 1521) and not (user_id = 1 and user_id is not null and ts >= 1522 and ts <= 1523) and not (user_id = 1 and user_id is not null and ts >= 1524 and ts <= 1525) and not (user_id = 1 and user_id is not null and ts >= 1526 and ts <= 1527) and not (user_id = 1 and user_id is not null and ts >= 1528 and ts <= 1529) and not (user_id = 1 and user_id is not null and ts >= 1530 and ts <= 1531) and not (user_id = 1 and user_id is not null and ts >= 1532 and ts <= 1533) and not (user_id = 1 and user_id is not null and ts >= 1534 and ts <= 1535) and not (user_id = 1 and user_id is not null and ts >= 1536 and ts <= 1537) and not (user_id = 1 and user_id is not null and ts >= 1538 and ts <= 1539) and not (user_id = 1 and user_id is not null and ts >= 1540 and ts <= 1541) and not (user_id = 1 and user_id is not null and ts >= 1542 and ts <= 1543) and not (user_id = 1 and user_id is not null and ts >= 1544 and ts <= 1545) and not (user_id = 1 and user_id is not null and ts >= 1546 and ts <= 1547) and not (user_id = 1 and user_id is not null and ts >= 1548 and ts <= 1549) and not (user_id = 1 and user_id is not null and ts >= 1550 and ts <= 1551) and not (user_id = 1 and user_id is not null and ts >= 1552 and ts <= 1553) and not (user_id = 1 and user_id is not null and ts >= 1554 and ts <= 1555) and not (user_id = 1 and user_id is not null and ts >= 1556 and ts <= 1557) and not (user_id = 1 and user_id is not null and ts >= 1558 and ts <= 1559) and not (user_id = 1 and user_id is not null and ts >= 1560 and ts <= 1561) and not (user_id = 1 and user_id is not null and ts >= 1562 and ts <= 1563) and not (user_id = 1 and user_id is not null and ts >= 1564 and ts <= 1565) and not (user_id = 1 and user_id is not null and ts >= 1566 and ts <= 1567) and not (user_id = 1 and user_id is not null and ts >= 1568 and ts <= 1569) and not (user_id = 1 and user_id is not null and ts >= 1570 and ts <= 1571) and not (user_id = 1 and user_id is not null and ts >= 1572 and ts <= 1573) and not (user_id = 1 and user_id is not null and ts >= 1574 and ts <= 1575) and not (user_id = 1 and user_id is not null and ts >= 1576 and ts <= 1577) and not (user_id = 1 and user_id is not null and ts >= 1578 and ts <= 1579) and not (user_id = 1 and user_id is not null and ts >= 1580 and ts <= 1581) and not (user_id = 1 and user_id is not null and ts >= 1582 and ts <= 1583) and not (user_id = 1 and user_id is not null and ts >= 1584 and ts <= 1585) and not (user_id = 1 and user_id is not null and ts >= 1586 and ts <= 1587) and not (user_id = 1 and user_id is not null and ts >= 1588 and ts <= 1589) and not (user_id = 1 and user_id is not null and ts >= 1590 and ts <= 1591) and not (user_id = 1 and user_id is not null and ts >= 1592 and ts <= 1593) and not (user_id = 1 and user_id is not null and ts >= 1594 and ts <= 1595) and not (user_id = 1 and user_id is not null and ts >= 1596 and ts <= 1597) and not (user_id = 1 and user_id is not null and ts >= 1598 and ts <= 1599) and not (user_id = 1 and user_id is not null and ts >= 1600 and ts <= 1601) and not (user_id = 1 and user_id is not null and ts >= 1602 and ts <= 1603) and not (user_id = 1 and user_id is not null and ts >= 1604 and ts <= 1605) and not (user_id = 1 and user_id is not null and ts >= 1606 and ts <= 1607) and not (user_id = 1 and user_id is not null and ts >= 1608 and ts <= 1609) and not (user_id = 1 and user_id is not null and ts >= 1610 and ts <= 1611) and not (user_id = 1 and user_id is not null and ts >= 1612 and ts <= 1613) and not (user_id = 1 and user_id is not null and ts >= 1614 and ts <= 1615) and not (user_id = 1 and user_id is not null and ts >= 1616 and ts <= 1617) and not (user_id = 1 and user_id is not null and ts >= 1618 and ts <= 1619) and not (user_id = 1 and user_id is not null and ts >= 1620 and ts <= 1621) and not (user_id = 1 and user_id is not null and ts >= 1622 and ts <= 1623) and not (user_id = 1 and user_id is not null and ts >= 1624 and ts <= 1625) and not (user_id = 1 and user_id is not null and ts >= 1626 and ts <= 1627) and not (user_id = 1 and user_id is not null and ts >= 1628 and ts <= 1629) and not (user_id = 1 and user_id is not null and ts >= 1630 and ts <= 1631) and not (user_id = 1 and user_id is not null and ts >= 1632 and ts <= 1633) and not (user_id = 1 and user_id is not null and ts >= 1634 and ts <= 1635) and not (user_id = 1 and user_id is not null and ts >= 1636 and ts <= 1637) and not (user_id = 1 and user_id is not null and ts >= 1638 and ts <= 1639) and not (user_id = 1 and user_id is not null and ts >= 1640 and ts <= 1641) and not (user_id = 1 and user_id is not null and ts >= 1642 and ts <= 1643) and not (user_id = 1 and user_id is not null and ts >= 1644 and ts <= 1645) and not (user_id = 1 and user_id is not null and ts >= 1646 and ts <= 1647) and not (user_id = 1 and user_id is not null and ts >= 1648 and ts <= 1649) and not (user_id = 1 and user_id is not null and ts >= 1650 and ts <= 1651) and not (user_id = 1 and user_id is not null and ts >= 1652 and ts <= 1653) and not (user_id = 1 and user_id is not null and ts >= 1654 and ts <= 1655) and not (user_id = 1 and user_id is not null and ts >= 1656 and ts <= 1657) and not (user_id = 1 and user_id is not null and ts >= 1658 and ts <= 1659) and not (user_id = 1 and user_id is not null and ts >= 1660 and ts <= 1661) and not (user_id = 1 and user_id is not null and ts >= 1662 and ts <= 1663) and not (user_id = 1 and user_id is not null and ts >= 1664 and ts <= 1665) and not (user_id = 1 and user_id is not null and ts >= 1666 and ts <= 1667) and not (user_id = 1 and user_id is not null and ts >= 1668 and ts <= 1669) and not (user_id = 1 and user_id is not null and ts >= 1670 and ts <= 1671) and not (user_id = 1 and user_id is not null and ts >= 1672 and ts <= 1673) and not (user_id = 1 and user_id is not null and ts >= 1674 and ts <= 1675) and not (user_id = 1 and user_id is not null and ts >= 1676 and ts <= 1677) and not (user_id = 1 and user_id is not null and ts >= 1678 and ts <= 1679) and not (user_id = 1 and user_id is not null and ts >= 1680 and ts <= 1681) and not (user_id = 1 and user_id is not null and ts >= 1682 and ts <= 1683) and not (user_id = 1 and user_id is not null and ts >= 1684 and ts <= 1685) and not (user_id = 1 and user_id is not null and ts >= 1686 and ts <= 1687) and not (user_id = 1 and user_id is not null and ts >= 1688 and ts <= 1689) and not (user_id = 1 and user_id is not null and ts >= 1690 and ts <= 1691) and not (user_id = 1 and user_id is not null and ts >= 1692 and ts <= 1693) and not (user_id = 1 and user_id is not null and ts >= 1694 and ts <= 1695) and not (user_id = 1 and user_id is not null and ts >= 1696 and ts <= 1697) and not (user_id = 1 and user_id is not null and ts >= 1698 and ts <= 1699) and not (user_id = 1 and user_id is not null and ts >= 1700 and ts <= 1701) and not (user_id = 1 and user_id is not null and ts >= 1702 and ts <= 1703) and not (user_id = 1 and user_id is not null and ts >= 1704 and ts <= 1705) and not (user_id = 1 and user_id is not null and ts >= 1706 and ts <= 1707) and not (user_id = 1 and user_id is not null and ts >= 1708 and ts <= 1709) and not (user_id = 1 and user_id is not null and ts >= 1710 and ts <= 1711) and not (user_id = 1 and user_id is not null and ts >= 1712 and ts <= 1713) and not (user_id = 1 and user_id is not null and ts >= 1714 and ts <= 1715) and not (user_id = 1 and user_id is not null and ts >= 1716 and ts <= 1717) and not (user_id = 1 and user_id is not null and ts >= 1718 and ts <= 1719) and not (user_id = 1 and user_id is not null and ts >= 1720 and ts <= 1721) and not (user_id = 1 and user_id is not null and ts >= 1722 and ts <= 1723) and not (user_id = 1 and user_id is not null and ts >= 1724 and ts <= 1725) and not (user_id = 1 and user_id is not null and ts >= 1726 and ts <= 1727) and not (user_id = 1 and user_id is not null and ts >= 1728 and ts <= 1729) and not (user_id = 1 and user_id is not null and ts >= 1730 and ts <= 1731) and not (user_id = 1 and user_id is not null and ts >= 1732 and ts <= 1733) and not (user_id = 1 and user_id is not null and ts >= 1734 and ts <= 1735) and not (user_id = 1 and user_id is not null and ts >= 1736 and ts <= 1737) and not (user_id = 1 and user_id is not null and ts >= 1738 and ts <= 1739) and not (user_id = 1 and user_id is not null and ts >= 1740 and ts <= 1741) and not (user_id = 1 and user_id is not null and ts >= 1742 and ts <= 1743) and not (user_id = 1 and user_id is not null and ts >= 1744 and ts <= 1745) and not (user_id = 1 and user_id is not null and ts >= 1746 and ts <= 1747) and not (user_id = 1 and user_id is not null and ts >= 1748 and ts <= 1749) and not (user_id = 1 and user_id is not null and ts >= 1750 and ts <= 1751) and not (user_id = 1 and user_id is not null and ts >= 1752 and ts <= 1753) and not (user_id = 1 and user_id is not null and ts >= 1754 and ts <= 1755) and not (user_id = 1 and user_id is not null and ts >= 1756 and ts <= 1757) and not (user_id = 1 and user_id is not null and ts >= 1758 and ts <= 1759) and not (user_id = 1 and user_id is not null and ts >= 1760 and ts <= 1761) and not (user_id = 1 and user_id is not null and ts >= 1762 and ts <= 1763) and not (user_id = 1 and user_id is not null and ts >= 1764 and ts <= 1765) and not (user_id = 1 and user_id is not null and ts >= 1766 and ts <= 1767) and not (user_id = 1 and user_id is not null and ts >= 1768 and ts <= 1769) and not (user_id = 1 and user_id is not null and ts >= 1770 and ts <= 1771) and not (user_id = 1 and user_id is not null and ts >= 1772 and ts <= 1773) and not (user_id = 1 and user_id is not null and ts >= 1774 and ts <= 1775) and not (user_id = 1 and user_id is not null and ts >= 1776 and ts <= 1777) and not (user_id = 1 and user_id is not null and ts >= 1778 and ts <= 1779) and not (user_id = 1 and user_id is not null and ts >= 1780 and ts <= 1781) and not (user_id = 1 and user_id is not null and ts >= 1782 and ts <= 1783) and not (user_id = 1 and user_id is not null and ts >= 1784 and ts <= 1785) and not (user_id = 1 and user_id is not null and ts >= 1786 and ts <= 1787) and not (user_id = 1 and user_id is not null and ts >= 1788 and ts <= 1789) and not (user_id = 1 and user_id is not null and ts >= 1790 and ts <= 1791) and not (user_id = 1 and user_id is not null and ts >= 1792 and ts <= 1793) and not (user_id = 1 and user_id is not null and ts >= 1794 and ts <= 1795) and not (user_id = 1 and user_id is not null and ts >= 1796 and ts <= 1797) and not (user_id = 1 and user_id is not null and ts >= 1798 and ts <= 1799) and not (user_id = 1 and user_id is not null and ts >= 1800 and ts <= 1801) and not (user_id = 1 and user_id is not null and ts >= 1802 and ts <= 1803) and not (user_id = 1 and user_id is not null and ts >= 1804 and ts <= 1805) and not (user_id = 1 and user_id is not null and ts >= 1806 and ts <= 1807) and not (user_id = 1 and user_id is not null and ts >= 1808 and ts <= 1809) and not (user_id = 1 and user_id is not null and ts >= 1810 and ts <= 1811) and not (user_id = 1 and user_id is not null and ts >= 1812 and ts <= 1813) and not (user_id = 1 and user_id is not null and ts >= 1814 and ts <= 1815) and not (user_id = 1 and user_id is not null and ts >= 1816 and ts <= 1817) and not (user_id = 1 and user_id is not null and ts >= 1818 and ts <= 1819) and not (user_id = 1 and user_id is not null and ts >= 1820 and ts <= 1821) and not (user_id = 1 and user_id is not null and ts >= 1822 and ts <= 1823) and not (user_id = 1 and user_id is not null and ts >= 1824 and ts <= 1825) and not (user_id = 1 and user_id is not null and ts >= 1826 and ts <= 1827) and not (user_id = 1 and user_id is not null and ts >= 1828 and ts <= 1829) and not (user_id = 1 and user_id is not null and ts >= 1830 and ts <= 1831) and not (user_id = 1 and user_id is not null and ts >= 1832 and ts <= 1833) and not (user_id = 1 and user_id is not null and ts >= 1834 and ts <= 1835) and not (user_id = 1 and user_id is not null and ts >= 1836 and ts <= 1837) and not (user_id = 1 and user_id is not null and ts >= 1838 and ts <= 1839) and not (user_id = 1 and user_id is not null and ts >= 1840 and ts <= 1841) and not (user_id = 1 and user_id is not null and ts >= 1842 and ts <= 1843) and not (user_id = 1 and user_id is not null and ts >= 1844 and ts <= 1845) and not (user_id = 1 and user_id is not null and ts >= 1846 and ts <= 1847) and not (user_id = 1 and user_id is not null and ts >= 1848 and ts <= 1849) and not (user_id = 1 and user_id is not null and ts >= 1850 and ts <= 1851) and not (user_id = 1 and user_id is not null and ts >= 1852 and ts <= 1853) and not (user_id = 1 and user_id is not null and ts >= 1854 and ts <= 1855) and not (user_id = 1 and user_id is not null and ts >= 1856 and ts <= 1857) and not (user_id = 1 and user_id is not null and ts >= 1858 and ts <= 1859) and not (user_id = 1 and user_id is not null and ts >= 1860 and ts <= 1861) and not (user_id = 1 and user_id is not null and ts >= 1862 and ts <= 1863) and not (user_id = 1 and user_id is not null and ts >= 1864 and ts <= 1865) and not (user_id = 1 and user_id is not null and ts >= 1866 and ts <= 1867) and not (user_id = 1 and user_id is not null and ts >= 1868 and ts <= 1869) and not (user_id = 1 and user_id is not null and ts >= 1870 and ts <= 1871) and not (user_id = 1 and user_id is not null and ts >= 1872 and ts <= 1873) and not (user_id = 1 and user_id is not null and ts >= 1874 and ts <= 1875) and not (user_id = 1 and user_id is not null and ts >= 1876 and ts <= 1877) and not (user_id = 1 and user_id is not null and ts >= 1878 and ts <= 1879) and not (user_id = 1 and user_id is not null and ts >= 1880 and ts <= 1881) and not (user_id = 1 and user_id is not null and ts >= 1882 and ts <= 1883) and not (user_id = 1 and user_id is not null and ts >= 1884 and ts <= 1885) and not (user_id = 1 and user_id is not null and ts >= 1886 and ts <= 1887) and not (user_id = 1 and user_id is not null and ts >= 1888 and ts <= 1889) and not (user_id = 1 and user_id is not null and ts >= 1890 and ts <= 1891) and not (user_id = 1 and user_id is not null and ts >= 1892 and ts <= 1893) and not (user_id = 1 and user_id is not null and ts >= 1894 and ts <= 1895) and not (user_id = 1 and user_id is not null and ts >= 1896 and ts <= 1897) and not (user_id = 1 and user_id is not null and ts >= 1898 and ts <= 1899) and not (user_id = 1 and user_id is not null and ts >= 1900 and ts <= 1901) and not (user_id = 1 and user_id is not null and ts >= 1902 and ts <= 1903) and not (user_id = 1 and user_id is not null and ts >= 1904 and ts <= 1905) and not (user_id = 1 and user_id is not null and ts >= 1906 and ts <= 1907) and not (user_id = 1 and user_id is not null and ts >= 1908 and ts <= 1909) and not (user_id = 1 and user_id is not null and ts >= 1910 and ts <= 1911) and not (user_id = 1 and user_id is not null and ts >= 1912 and ts <= 1913) and not (user_id = 1 and user_id is not null and ts >= 1914 and ts <= 1915) and not (user_id = 1 and user_id is not null and ts >= 1916 and ts <= 1917) and not (user_id = 1 and user_id is not null and ts >= 1918 and ts <= 1919) and not (user_id = 1 and user_id is not null and ts >= 1920 and ts <= 1921) and not (user_id = 1 and user_id is not null and ts >= 1922 and ts <= 1923) and not (user_id = 1 and user_id is not null and ts >= 1924 and ts <= 1925) and not (user_id = 1 and user_id is not null and ts >= 1926 and ts <= 1927) and not (user_id = 1 and user_id is not null and ts >= 1928 and ts <= 1929) and not (user_id = 1 and user_id is not null and ts >= 1930 and ts <= 1931) and not (user_id = 1 and user_id is not null and ts >= 1932 and ts <= 1933) and not (user_id = 1 and user_id is not null and ts >= 1934 and ts <= 1935) and not (user_id = 1 and user_id is not null and ts >= 1936 and ts <= 1937) and not (user_id = 1 and user_id is not null and ts >= 1938 and ts <= 1939) and not (user_id = 1 and user_id is not null and ts >= 1940 and ts <= 1941) and not (user_id = 1 and user_id is not null and ts >= 1942 and ts <= 1943) and not (user_id = 1 and user_id is not null and ts >= 1944 and ts <= 1945) and not (user_id = 1 and user_id is not null and ts >= 1946 and ts <= 1947) and not (user_id = 1 and user_id is not null and ts >= 1948 and ts <= 1949) and not (user_id = 1 and user_id is not null and ts >= 1950 and ts <= 1951) and not (user_id = 1 and user_id is not null and ts >= 1952 and ts <= 1953) and not (user_id = 1 and user_id is not null and ts >= 1954 and ts <= 1955) and not (user_id = 1 and user_id is not null and ts >= 1956 and ts <= 1957) and not (user_id = 1 and user_id is not null and ts >= 1958 and ts <= 1959) and not (user_id = 1 and user_id is not null and ts >= 1960 and ts <= 1961) and not (user_id = 1 and user_id is not null and ts >= 1962 and ts <= 1963) and not (user_id = 1 and user_id is not null and ts >= 1964 and ts <= 1965) and not (user_id = 1 and user_id is not null and ts >= 1966 and ts <= 1967) and not (user_id = 1 and user_id is not null and ts >= 1968 and ts <= 1969) and not (user_id = 1 and user_id is not null and ts >= 1970 and ts <= 1971) and not (user_id = 1 and user_id is not null and ts >= 1972 and ts <= 1973) and not (user_id = 1 and user_id is not null and ts >= 1974 and ts <= 1975) and not (user_id = 1 and user_id is not null and ts >= 1976 and ts <= 1977) and not (user_id = 1 and user_id is not null and ts >= 1978 and ts <= 1979) and not (user_id = 1 and user_id is not null and ts >= 1980 and ts <= 1981) and not (user_id = 1 and user_id is not null and ts >= 1982 and ts <= 1983) and not (user_id = 1 and user_id is not null and ts >= 1984 and ts <= 1985) and not (user_id = 1 and user_id is not null and ts >= 1986 and ts <= 1987) and not (user_id = 1 and user_id is not null and ts >= 1988 and ts <= 1989) and not (user_id = 1 and user_id is not null and ts >= 1990 and ts <= 1991) and not (user_id = 1 and user_id is not null and ts >= 1992 and ts <= 1993) and not (user_id = 1 and user_id is not null and ts >= 1994 and ts <= 1995) and not (user_id = 1 and user_id is not null and ts >= 1996 and ts <= 1997) and not (user_id = 1 and user_id is not null and ts >= 1998 and ts <= 1999) and not (user_id = 1 and user_id is not null and ts >= 11000 and ts <= 11001) and not (user_id = 1 and user_id is not null and ts >= 11002 and ts <= 11003) and not (user_id = 1 and user_id is not null and ts >= 11004 and ts <= 11005) and not (user_id = 1 and user_id is not null and ts >= 11006 and ts <= 11007) and not (user_id = 1 and user_id is not null and ts >= 11008 and ts <= 11009) and not (user_id = 1 and user_id is not null and ts >= 11010 and ts <= 11011) and not (user_id = 1 and user_id is not null and ts >= 11012 and ts <= 11013) and not (user_id = 1 and user_id is not null and ts >= 11014 and ts <= 11015) and not (user_id = 1 and user_id is not null and ts >= 11016 and ts <= 11017) and not (user_id = 1 and user_id is not null and ts >= 11018 and ts <= 11019) and not (user_id = 1 and user_id is not null and ts >= 11020 and ts <= 11021) and not (user_id = 1 and user_id is not null and ts >= 11022 and ts <= 11023) and not (user_id = 1 and user_id is not null and ts >= 11024 and ts <= 11025) and not (user_id = 1 and user_id is not null and ts >= 11026 and ts <= 11027) and not (user_id = 1 and user_id is not null and ts >= 11028 and ts <= 11029) and not (user_id = 1 and user_id is not null and ts >= 11030 and ts <= 11031) and not (user_id = 1 and user_id is not null and ts >= 11032 and ts <= 11033) and not (user_id = 1 and user_id is not null and ts >= 11034 and ts <= 11035) and not (user_id = 1 and user_id is not null and ts >= 11036 and ts <= 11037) and not (user_id = 1 and user_id is not null and ts >= 11038 and ts <= 11039) and not (user_id = 1 and user_id is not null and ts >= 11040 and ts <= 11041) and not (user_id = 1 and user_id is not null and ts >= 11042 and ts <= 11043) and not (user_id = 1 and user_id is not null and ts >= 11044 and ts <= 11045) and not (user_id = 1 and user_id is not null and ts >= 11046 and ts <= 11047) and not (user_id = 1 and user_id is not null and ts >= 11048 and ts <= 11049) and not (user_id = 1 and user_id is not null and ts >= 11050 and ts <= 11051) and not (user_id = 1 and user_id is not null and ts >= 11052 and ts <= 11053) and not (user_id = 1 and user_id is not null and ts >= 11054 and ts <= 11055) and not (user_id = 1 and user_id is not null and ts >= 11056 and ts <= 11057) and not (user_id = 1 and user_id is not null and ts >= 11058 and ts <= 11059) and not (user_id = 1 and user_id is not null and ts >= 11060 and ts <= 11061) and not (user_id = 1 and user_id is not null and ts >= 11062 and ts <= 11063) and not (user_id = 1 and user_id is not null and ts >= 11064 and ts <= 11065) and not (user_id = 1 and user_id is not null and ts >= 11066 and ts <= 11067) and not (user_id = 1 and user_id is not null and ts >= 11068 and ts <= 11069) and not (user_id = 1 and user_id is not null and ts >= 11070 and ts <= 11071) and not (user_id = 1 and user_id is not null and ts >= 11072 and ts <= 11073) and not (user_id = 1 and user_id is not null and ts >= 11074 and ts <= 11075) and not (user_id = 1 and user_id is not null and ts >= 11076 and ts <= 11077) and not (user_id = 1 and user_id is not null and ts >= 11078 and ts <= 11079) and not (user_id = 1 and user_id is not null and ts >= 11080 and ts <= 11081) and not (user_id = 1 and user_id is not null and ts >= 11082 and ts <= 11083) and not (user_id = 1 and user_id is not null and ts >= 11084 and ts <= 11085) and not (user_id = 1 and user_id is not null and ts >= 11086 and ts <= 11087) and not (user_id = 1 and user_id is not null and ts >= 11088 and ts <= 11089) and not (user_id = 1 and user_id is not null and ts >= 11090 and ts <= 11091) and not (user_id = 1 and user_id is not null and ts >= 11092 and ts <= 11093) and not (user_id = 1 and user_id is not null and ts >= 11094 and ts <= 11095) and not (user_id = 1 and user_id is not null and ts >= 11096 and ts <= 11097) and not (user_id = 1 and user_id is not null and ts >= 11098 and ts <= 11099) and not (user_id = 1 and user_id is not null and ts >= 11100 and ts <= 11101) and not (user_id = 1 and user_id is not null and ts >= 11102 and ts <= 11103) and not (user_id = 1 and user_id is not null and ts >= 11104 and ts <= 11105) and not (user_id = 1 and user_id is not null and ts >= 11106 and ts <= 11107) and not (user_id = 1 and user_id is not null and ts >= 11108 and ts <= 11109) and not (user_id = 1 and user_id is not null and ts >= 11110 and ts <= 11111) and not (user_id = 1 and user_id is not null and ts >= 11112 and ts <= 11113) and not (user_id = 1 and user_id is not null and ts >= 11114 and ts <= 11115) and not (user_id = 1 and user_id is not null and ts >= 11116 and ts <= 11117) and not (user_id = 1 and user_id is not null and ts >= 11118 and ts <= 11119) and not (user_id = 1 and user_id is not null and ts >= 11120 and ts <= 11121) and not (user_id = 1 and user_id is not null and ts >= 11122 and ts <= 11123) and not (user_id = 1 and user_id is not null and ts >= 11124 and ts <= 11125) and not (user_id = 1 and user_id is not null and ts >= 11126 and ts <= 11127) and not (user_id = 1 and user_id is not null and ts >= 11128 and ts <= 11129) and not (user_id = 1 and user_id is not null and ts >= 11130 and ts <= 11131) and not (user_id = 1 and user_id is not null and ts >= 11132 and ts <= 11133) and not (user_id = 1 and user_id is not null and ts >= 11134 and ts <= 11135) and not (user_id = 1 and user_id is not null and ts >= 11136 and ts <= 11137) and not (user_id = 1 and user_id is not null and ts >= 11138 and ts <= 11139) and not (user_id = 1 and user_id is not null and ts >= 11140 and ts <= 11141) and not (user_id = 1 and user_id is not null and ts >= 11142 and ts <= 11143) and not (user_id = 1 and user_id is not null and ts >= 11144 and ts <= 11145) and not (user_id = 1 and user_id is not null and ts >= 11146 and ts <= 11147) and not (user_id = 1 and user_id is not null and ts >= 11148 and ts <= 11149) and not (user_id = 1 and user_id is not null and ts >= 11150 and ts <= 11151) and not (user_id = 1 and user_id is not null and ts >= 11152 and ts <= 11153) and not (user_id = 1 and user_id is not null and ts >= 11154 and ts <= 11155) and not (user_id = 1 and user_id is not null and ts >= 11156 and ts <= 11157) and not (user_id = 1 and user_id is not null and ts >= 11158 and ts <= 11159) and not (user_id = 1 and user_id is not null and ts >= 11160 and ts <= 11161) and not (user_id = 1 and user_id is not null and ts >= 11162 and ts <= 11163) and not (user_id = 1 and user_id is not null and ts >= 11164 and ts <= 11165) and not (user_id = 1 and user_id is not null and ts >= 11166 and ts <= 11167) and not (user_id = 1 and user_id is not null and ts >= 11168 and ts <= 11169) and not (user_id = 1 and user_id is not null and ts >= 11170 and ts <= 11171) and not (user_id = 1 and user_id is not null and ts >= 11172 and ts <= 11173) and not (user_id = 1 and user_id is not null and ts >= 11174 and ts <= 11175) and not (user_id = 1 and user_id is not null and ts >= 11176 and ts <= 11177) and not (user_id = 1 and user_id is not null and ts >= 11178 and ts <= 11179) and not (user_id = 1 and user_id is not null and ts >= 11180 and ts <= 11181) and not (user_id = 1 and user_id is not null and ts >= 11182 and ts <= 11183) and not (user_id = 1 and user_id is not null and ts >= 11184 and ts <= 11185) and not (user_id = 1 and user_id is not null and ts >= 11186 and ts <= 11187) and not (user_id = 1 and user_id is not null and ts >= 11188 and ts <= 11189) and not (user_id = 1 and user_id is not null and ts >= 11190 and ts <= 11191) and not (user_id = 1 and user_id is not null and ts >= 11192 and ts <= 11193) and not (user_id = 1 and user_id is not null and ts >= 11194 and ts <= 11195) and not (user_id = 1 and user_id is not null and ts >= 11196 and ts <= 11197) and not (user_id = 1 and user_id is not null and ts >= 11198 and ts <= 11199) and not (user_id = 1 and user_id is not null and ts >= 11200 and ts <= 11201) and not (user_id = 1 and user_id is not null and ts >= 11202 and ts <= 11203) and not (user_id = 1 and user_id is not null and ts >= 11204 and ts <= 11205) and not (user_id = 1 and user_id is not null and ts >= 11206 and ts <= 11207) and not (user_id = 1 and user_id is not null and ts >= 11208 and ts <= 11209) and not (user_id = 1 and user_id is not null and ts >= 11210 and ts <= 11211) and not (user_id = 1 and user_id is not null and ts >= 11212 and ts <= 11213) and not (user_id = 1 and user_id is not null and ts >= 11214 and ts <= 11215) and not (user_id = 1 and user_id is not null and ts >= 11216 and ts <= 11217) and not (user_id = 1 and user_id is not null and ts >= 11218 and ts <= 11219) and not (user_id = 1 and user_id is not null and ts >= 11220 and ts <= 11221) and not (user_id = 1 and user_id is not null and ts >= 11222 and ts <= 11223) and not (user_id = 1 and user_id is not null and ts >= 11224 and ts <= 11225) and not (user_id = 1 and user_id is not null and ts >= 11226 and ts <= 11227) and not (user_id = 1 and user_id is not null and ts >= 11228 and ts <= 11229) and not (user_id = 1 and user_id is not null and ts >= 11230 and ts <= 11231) and not (user_id = 1 and user_id is not null and ts >= 11232 and ts <= 11233) and not (user_id = 1 and user_id is not null and ts >= 11234 and ts <= 11235) and not (user_id = 1 and user_id is not null and ts >= 11236 and ts <= 11237) and not (user_id = 1 and user_id is not null and ts >= 11238 and ts <= 11239) and not (user_id = 1 and user_id is not null and ts >= 11240 and ts <= 11241) and not (user_id = 1 and user_id is not null and ts >= 11242 and ts <= 11243) and not (user_id = 1 and user_id is not null and ts >= 11244 and ts <= 11245) and not (user_id = 1 and user_id is not null and ts >= 11246 and ts <= 11247) and not (user_id = 1 and user_id is not null and ts >= 11248 and ts <= 11249) and not (user_id = 1 and user_id is not null and ts >= 11250 and ts <= 11251) and not (user_id = 1 and user_id is not null and ts >= 11252 and ts <= 11253) and not (user_id = 1 and user_id is not null and ts >= 11254 and ts <= 11255) and not (user_id = 1 and user_id is not null and ts >= 11256 and ts <= 11257) and not (user_id = 1 and user_id is not null and ts >= 11258 and ts <= 11259) and not (user_id = 1 and user_id is not null and ts >= 11260 and ts <= 11261) and not (user_id = 1 and user_id is not null and ts >= 11262 and ts <= 11263) and not (user_id = 1 and user_id is not null and ts >= 11264 and ts <= 11265) and not (user_id = 1 and user_id is not null and ts >= 11266 and ts <= 11267) and not (user_id = 1 and user_id is not null and ts >= 11268 and ts <= 11269) and not (user_id = 1 and user_id is not null and ts >= 11270 and ts <= 11271) and not (user_id = 1 and user_id is not null and ts >= 11272 and ts <= 11273) and not (user_id = 1 and user_id is not null and ts >= 11274 and ts <= 11275) and not (user_id = 1 and user_id is not null and ts >= 11276 and ts <= 11277) and not (user_id = 1 and user_id is not null and ts >= 11278 and ts <= 11279) and not (user_id = 1 and user_id is not null and ts >= 11280 and ts <= 11281) and not (user_id = 1 and user_id is not null and ts >= 11282 and ts <= 11283) and not (user_id = 1 and user_id is not null and ts >= 11284 and ts <= 11285) and not (user_id = 1 and user_id is not null and ts >= 11286 and ts <= 11287) and not (user_id = 1 and user_id is not null and ts >= 11288 and ts <= 11289) and not (user_id = 1 and user_id is not null and ts >= 11290 and ts <= 11291) and not (user_id = 1 and user_id is not null and ts >= 11292 and ts <= 11293) and not (user_id = 1 and user_id is not null and ts >= 11294 and ts <= 11295) and not (user_id = 1 and user_id is not null and ts >= 11296 and ts <= 11297) and not (user_id = 1 and user_id is not null and ts >= 11298 and ts <= 11299) and not (user_id = 1 and user_id is not null and ts >= 11300 and ts <= 11301) and not (user_id = 1 and user_id is not null and ts >= 11302 and ts <= 11303) and not (user_id = 1 and user_id is not null and ts >= 11304 and ts <= 11305) and not (user_id = 1 and user_id is not null and ts >= 11306 and ts <= 11307) and not (user_id = 1 and user_id is not null and ts >= 11308 and ts <= 11309) and not (user_id = 1 and user_id is not null and ts >= 11310 and ts <= 11311) and not (user_id = 1 and user_id is not null and ts >= 11312 and ts <= 11313) and not (user_id = 1 and user_id is not null and ts >= 11314 and ts <= 11315) and not (user_id = 1 and user_id is not null and ts >= 11316 and ts <= 11317) and not (user_id = 1 and user_id is not null and ts >= 11318 and ts <= 11319) and not (user_id = 1 and user_id is not null and ts >= 11320 and ts <= 11321) and not (user_id = 1 and user_id is not null and ts >= 11322 and ts <= 11323) and not (user_id = 1 and user_id is not null and ts >= 11324 and ts <= 11325) and not (user_id = 1 and user_id is not null and ts >= 11326 and ts <= 11327) and not (user_id = 1 and user_id is not null and ts >= 11328 and ts <= 11329) and not (user_id = 1 and user_id is not null and ts >= 11330 and ts <= 11331) and not (user_id = 1 and user_id is not null and ts >= 11332 and ts <= 11333) and not (user_id = 1 and user_id is not null and ts >= 11334 and ts <= 11335) and not (user_id = 1 and user_id is not null and ts >= 11336 and ts <= 11337) and not (user_id = 1 and user_id is not null and ts >= 11338 and ts <= 11339) and not (user_id = 1 and user_id is not null and ts >= 11340 and ts <= 11341) and not (user_id = 1 and user_id is not null and ts >= 11342 and ts <= 11343) and not (user_id = 1 and user_id is not null and ts >= 11344 and ts <= 11345) and not (user_id = 1 and user_id is not null and ts >= 11346 and ts <= 11347) and not (user_id = 1 and user_id is not null and ts >= 11348 and ts <= 11349) and not (user_id = 1 and user_id is not null and ts >= 11350 and ts <= 11351) and not (user_id = 1 and user_id is not null and ts >= 11352 and ts <= 11353) and not (user_id = 1 and user_id is not null and ts >= 11354 and ts <= 11355) and not (user_id = 1 and user_id is not null and ts >= 11356 and ts <= 11357) and not (user_id = 1 and user_id is not null and ts >= 11358 and ts <= 11359) and not (user_id = 1 and user_id is not null and ts >= 11360 and ts <= 11361) and not (user_id = 1 and user_id is not null and ts >= 11362 and ts <= 11363) and not (user_id = 1 and user_id is not null and ts >= 11364 and ts <= 11365) and not (user_id = 1 and user_id is not null and ts >= 11366 and ts <= 11367) and not (user_id = 1 and user_id is not null and ts >= 11368 and ts <= 11369) and not (user_id = 1 and user_id is not null and ts >= 11370 and ts <= 11371) and not (user_id = 1 and user_id is not null and ts >= 11372 and ts <= 11373) and not (user_id = 1 and user_id is not null and ts >= 11374 and ts <= 11375) and not (user_id = 1 and user_id is not null and ts >= 11376 and ts <= 11377) and not (user_id = 1 and user_id is not null and ts >= 11378 and ts <= 11379) and not (user_id = 1 and user_id is not null and ts >= 11380 and ts <= 11381) and not (user_id = 1 and user_id is not null and ts >= 11382 and ts <= 11383) and not (user_id = 1 and user_id is not null and ts >= 11384 and ts <= 11385) and not (user_id = 1 and user_id is not null and ts >= 11386 and ts <= 11387) and not (user_id = 1 and user_id is not null and ts >= 11388 and ts <= 11389) and not (user_id = 1 and user_id is not null and ts >= 11390 and ts <= 11391) and not (user_id = 1 and user_id is not null and ts >= 11392 and ts <= 11393) and not (user_id = 1 and user_id is not null and ts >= 11394 and ts <= 11395) and not (user_id = 1 and user_id is not null and ts >= 11396 and ts <= 11397) and not (user_id = 1 and user_id is not null and ts >= 11398 and ts <= 11399) and not (user_id = 1 and user_id is not null and ts >= 11400 and ts <= 11401) and not (user_id = 1 and user_id is not null and ts >= 11402 and ts <= 11403) and not (user_id = 1 and user_id is not null and ts >= 11404 and ts <= 11405) and not (user_id = 1 and user_id is not null and ts >= 11406 and ts <= 11407) and not (user_id = 1 and user_id is not null and ts >= 11408 and ts <= 11409) and not (user_id = 1 and user_id is not null and ts >= 11410 and ts <= 11411) and not (user_id = 1 and user_id is not null and ts >= 11412 and ts <= 11413) and not (user_id = 1 and user_id is not null and ts >= 11414 and ts <= 11415) and not (user_id = 1 and user_id is not null and ts >= 11416 and ts <= 11417) and not (user_id = 1 and user_id is not null and ts >= 11418 and ts <= 11419) and not (user_id = 1 and user_id is not null and ts >= 11420 and ts <= 11421) and not (user_id = 1 and user_id is not null and ts >= 11422 and ts <= 11423) and not (user_id = 1 and user_id is not null and ts >= 11424 and ts <= 11425) and not (user_id = 1 and user_id is not null and ts >= 11426 and ts <= 11427) and not (user_id = 1 and user_id is not null and ts >= 11428 and ts <= 11429) and not (user_id = 1 and user_id is not null and ts >= 11430 and ts <= 11431) and not (user_id = 1 and user_id is not null and ts >= 11432 and ts <= 11433) and not (user_id = 1 and user_id is not null and ts >= 11434 and ts <= 11435) and not (user_id = 1 and user_id is not null and ts >= 11436 and ts <= 11437) and not (user_id = 1 and user_id is not null and ts >= 11438 and ts <= 11439) and not (user_id = 1 and user_id is not null and ts >= 11440 and ts <= 11441) and not (user_id = 1 and user_id is not null and ts >= 11442 and ts <= 11443) and not (user_id = 1 and user_id is not null and ts >= 11444 and ts <= 11445) and not (user_id = 1 and user_id is not null and ts >= 11446 and ts <= 11447) and not (user_id = 1 and user_id is not null and ts >= 11448 and ts <= 11449) and not (user_id = 1 and user_id is not null and ts >= 11450 and ts <= 11451) and not (user_id = 1 and user_id is not null and ts >= 11452 and ts <= 11453) and not (user_id = 1 and user_id is not null and ts >= 11454 and ts <= 11455) and not (user_id = 1 and user_id is not null and ts >= 11456 and ts <= 11457) and not (user_id = 1 and user_id is not null and ts >= 11458 and ts <= 11459) and not (user_id = 1 and user_id is not null and ts >= 11460 and ts <= 11461) and not (user_id = 1 and user_id is not null and ts >= 11462 and ts <= 11463) and not (user_id = 1 and user_id is not null and ts >= 11464 and ts <= 11465) and not (user_id = 1 and user_id is not null and ts >= 11466 and ts <= 11467) and not (user_id = 1 and user_id is not null and ts >= 11468 and ts <= 11469) and not (user_id = 1 and user_id is not null and ts >= 11470 and ts <= 11471) and not (user_id = 1 and user_id is not null and ts >= 11472 and ts <= 11473) and not (user_id = 1 and user_id is not null and ts >= 11474 and ts <= 11475) and not (user_id = 1 and user_id is not null and ts >= 11476 and ts <= 11477) and not (user_id = 1 and user_id is not null and ts >= 11478 and ts <= 11479) and not (user_id = 1 and user_id is not null and ts >= 11480 and ts <= 11481) and not (user_id = 1 and user_id is not null and ts >= 11482 and ts <= 11483) and not (user_id = 1 and user_id is not null and ts >= 11484 and ts <= 11485) and not (user_id = 1 and user_id is not null and ts >= 11486 and ts <= 11487) and not (user_id = 1 and user_id is not null and ts >= 11488 and ts <= 11489) and not (user_id = 1 and user_id is not null and ts >= 11490 and ts <= 11491) and not (user_id = 1 and user_id is not null and ts >= 11492 and ts <= 11493) and not (user_id = 1 and user_id is not null and ts >= 11494 and ts <= 11495) and not (user_id = 1 and user_id is not null and ts >= 11496 and ts <= 11497) and not (user_id = 1 and user_id is not null and ts >= 11498 and ts <= 11499) and not (user_id = 1 and user_id is not null and ts >= 11500 and ts <= 11501) and not (user_id = 1 and user_id is not null and ts >= 11502 and ts <= 11503) and not (user_id = 1 and user_id is not null and ts >= 11504 and ts <= 11505) and not (user_id = 1 and user_id is not null and ts >= 11506 and ts <= 11507) and not (user_id = 1 and user_id is not null and ts >= 11508 and ts <= 11509) and not (user_id = 1 and user_id is not null and ts >= 11510 and ts <= 11511) and not (user_id = 1 and user_id is not null and ts >= 11512 and ts <= 11513) and not (user_id = 1 and user_id is not null and ts >= 11514 and ts <= 11515) and not (user_id = 1 and user_id is not null and ts >= 11516 and ts <= 11517) and not (user_id = 1 and user_id is not null and ts >= 11518 and ts <= 11519) and not (user_id = 1 and user_id is not null and ts >= 11520 and ts <= 11521) and not (user_id = 1 and user_id is not null and ts >= 11522 and ts <= 11523) and not (user_id = 1 and user_id is not null and ts >= 11524 and ts <= 11525) and not (user_id = 1 and user_id is not null and ts >= 11526 and ts <= 11527) and not (user_id = 1 and user_id is not null and ts >= 11528 and ts <= 11529) and not (user_id = 1 and user_id is not null and ts >= 11530 and ts <= 11531) and not (user_id = 1 and user_id is not null and ts >= 11532 and ts <= 11533) and not (user_id = 1 and user_id is not null and ts >= 11534 and ts <= 11535) and not (user_id = 1 and user_id is not null and ts >= 11536 and ts <= 11537) and not (user_id = 1 and user_id is not null and ts >= 11538 and ts <= 11539) and not (user_id = 1 and user_id is not null and ts >= 11540 and ts <= 11541) and not (user_id = 1 and user_id is not null and ts >= 11542 and ts <= 11543) and not (user_id = 1 and user_id is not null and ts >= 11544 and ts <= 11545) and not (user_id = 1 and user_id is not null and ts >= 11546 and ts <= 11547) and not (user_id = 1 and user_id is not null and ts >= 11548 and ts <= 11549) and not (user_id = 1 and user_id is not null and ts >= 11550 and ts <= 11551) and not (user_id = 1 and user_id is not null and ts >= 11552 and ts <= 11553) and not (user_id = 1 and user_id is not null and ts >= 11554 and ts <= 11555) and not (user_id = 1 and user_id is not null and ts >= 11556 and ts <= 11557) and not (user_id = 1 and user_id is not null and ts >= 11558 and ts <= 11559) and not (user_id = 1 and user_id is not null and ts >= 11560 and ts <= 11561) and not (user_id = 1 and user_id is not null and ts >= 11562 and ts <= 11563) and not (user_id = 1 and user_id is not null and ts >= 11564 and ts <= 11565) and not (user_id = 1 and user_id is not null and ts >= 11566 and ts <= 11567) and not (user_id = 1 and user_id is not null and ts >= 11568 and ts <= 11569) and not (user_id = 1 and user_id is not null and ts >= 11570 and ts <= 11571) and not (user_id = 1 and user_id is not null and ts >= 11572 and ts <= 11573) and not (user_id = 1 and user_id is not null and ts >= 11574 and ts <= 11575) and not (user_id = 1 and user_id is not null and ts >= 11576 and ts <= 11577) and not (user_id = 1 and user_id is not null and ts >= 11578 and ts <= 11579) and not (user_id = 1 and user_id is not null and ts >= 11580 and ts <= 11581) and not (user_id = 1 and user_id is not null and ts >= 11582 and ts <= 11583) and not (user_id = 1 and user_id is not null and ts >= 11584 and ts <= 11585) and not (user_id = 1 and user_id is not null and ts >= 11586 and ts <= 11587) and not (user_id = 1 and user_id is not null and ts >= 11588 and ts <= 11589) and not (user_id = 1 and user_id is not null and ts >= 11590 and ts <= 11591) and not (user_id = 1 and user_id is not null and ts >= 11592 and ts <= 11593) and not (user_id = 1 and user_id is not null and ts >= 11594 and ts <= 11595) and not (user_id = 1 and user_id is not null and ts >= 11596 and ts <= 11597) and not (user_id = 1 and user_id is not null and ts >= 11598 and ts <= 11599) and not (user_id = 1 and user_id is not null and ts >= 11600 and ts <= 11601) and not (user_id = 1 and user_id is not null and ts >= 11602 and ts <= 11603) and not (user_id = 1 and user_id is not null and ts >= 11604 and ts <= 11605) and not (user_id = 1 and user_id is not null and ts >= 11606 and ts <= 11607) and not (user_id = 1 and user_id is not null and ts >= 11608 and ts <= 11609) and not (user_id = 1 and user_id is not null and ts >= 11610 and ts <= 11611) and not (user_id = 1 and user_id is not null and ts >= 11612 and ts <= 11613) and not (user_id = 1 and user_id is not null and ts >= 11614 and ts <= 11615) and not (user_id = 1 and user_id is not null and ts >= 11616 and ts <= 11617) and not (user_id = 1 and user_id is not null and ts >= 11618 and ts <= 11619) and not (user_id = 1 and user_id is not null and ts >= 11620 and ts <= 11621) and not (user_id = 1 and user_id is not null and ts >= 11622 and ts <= 11623) and not (user_id = 1 and user_id is not null and ts >= 11624 and ts <= 11625) and not (user_id = 1 and user_id is not null and ts >= 11626 and ts <= 11627) and not (user_id = 1 and user_id is not null and ts >= 11628 and ts <= 11629) and not (user_id = 1 and user_id is not null and ts >= 11630 and ts <= 11631) and not (user_id = 1 and user_id is not null and ts >= 11632 and ts <= 11633) and not (user_id = 1 and user_id is not null and ts >= 11634 and ts <= 11635) and not (user_id = 1 and user_id is not null and ts >= 11636 and ts <= 11637) and not (user_id = 1 and user_id is not null and ts >= 11638 and ts <= 11639) and not (user_id = 1 and user_id is not null and ts >= 11640 and ts <= 11641) and not (user_id = 1 and user_id is not null and ts >= 11642 and ts <= 11643) and not (user_id = 1 and user_id is not null and ts >= 11644 and ts <= 11645) and not (user_id = 1 and user_id is not null and ts >= 11646 and ts <= 11647) and not (user_id = 1 and user_id is not null and ts >= 11648 and ts <= 11649) and not (user_id = 1 and user_id is not null and ts >= 11650 and ts <= 11651) and not (user_id = 1 and user_id is not null and ts >= 11652 and ts <= 11653) and not (user_id = 1 and user_id is not null and ts >= 11654 and ts <= 11655) and not (user_id = 1 and user_id is not null and ts >= 11656 and ts <= 11657) and not (user_id = 1 and user_id is not null and ts >= 11658 and ts <= 11659) and not (user_id = 1 and user_id is not null and ts >= 11660 and ts <= 11661) and not (user_id = 1 and user_id is not null and ts >= 11662 and ts <= 11663) and not (user_id = 1 and user_id is not null and ts >= 11664 and ts <= 11665) and not (user_id = 1 and user_id is not null and ts >= 11666 and ts <= 11667) and not (user_id = 1 and user_id is not null and ts >= 11668 and ts <= 11669) and not (user_id = 1 and user_id is not null and ts >= 11670 and ts <= 11671) and not (user_id = 1 and user_id is not null and ts >= 11672 and ts <= 11673) and not (user_id = 1 and user_id is not null and ts >= 11674 and ts <= 11675) and not (user_id = 1 and user_id is not null and ts >= 11676 and ts <= 11677) and not (user_id = 1 and user_id is not null and ts >= 11678 and ts <= 11679) and not (user_id = 1 and user_id is not null and ts >= 11680 and ts <= 11681) and not (user_id = 1 and user_id is not null and ts >= 11682 and ts <= 11683) and not (user_id = 1 and user_id is not null and ts >= 11684 and ts <= 11685) and not (user_id = 1 and user_id is not null and ts >= 11686 and ts <= 11687) and not (user_id = 1 and user_id is not null and ts >= 11688 and ts <= 11689) and not (user_id = 1 and user_id is not null and ts >= 11690 and ts <= 11691) and not (user_id = 1 and user_id is not null and ts >= 11692 and ts <= 11693) and not (user_id = 1 and user_id is not null and ts >= 11694 and ts <= 11695) and not (user_id = 1 and user_id is not null and ts >= 11696 and ts <= 11697) and not (user_id = 1 and user_id is not null and ts >= 11698 and ts <= 11699) and not (user_id = 1 and user_id is not null and ts >= 11700 and ts <= 11701) and not (user_id = 1 and user_id is not null and ts >= 11702 and ts <= 11703) and not (user_id = 1 and user_id is not null and ts >= 11704 and ts <= 11705) and not (user_id = 1 and user_id is not null and ts >= 11706 and ts <= 11707) and not (user_id = 1 and user_id is not null and ts >= 11708 and ts <= 11709) and not (user_id = 1 and user_id is not null and ts >= 11710 and ts <= 11711) and not (user_id = 1 and user_id is not null and ts >= 11712 and ts <= 11713) and not (user_id = 1 and user_id is not null and ts >= 11714 and ts <= 11715) and not (user_id = 1 and user_id is not null and ts >= 11716 and ts <= 11717) and not (user_id = 1 and user_id is not null and ts >= 11718 and ts <= 11719) and not (user_id = 1 and user_id is not null and ts >= 11720 and ts <= 11721) and not (user_id = 1 and user_id is not null and ts >= 11722 and ts <= 11723) and not (user_id = 1 and user_id is not null and ts >= 11724 and ts <= 11725) and not (user_id = 1 and user_id is not null and ts >= 11726 and ts <= 11727) and not (user_id = 1 and user_id is not null and ts >= 11728 and ts <= 11729) and not (user_id = 1 and user_id is not null and ts >= 11730 and ts <= 11731) and not (user_id = 1 and user_id is not null and ts >= 11732 and ts <= 11733) and not (user_id = 1 and user_id is not null and ts >= 11734 and ts <= 11735) and not (user_id = 1 and user_id is not null and ts >= 11736 and ts <= 11737) and not (user_id = 1 and user_id is not null and ts >= 11738 and ts <= 11739) and not (user_id = 1 and user_id is not null and ts >= 11740 and ts <= 11741) and not (user_id = 1 and user_id is not null and ts >= 11742 and ts <= 11743) and not (user_id = 1 and user_id is not null and ts >= 11744 and ts <= 11745) and not (user_id = 1 and user_id is not null and ts >= 11746 and ts <= 11747) and not (user_id = 1 and user_id is not null and ts >= 11748 and ts <= 11749) and not (user_id = 1 and user_id is not null and ts >= 11750 and ts <= 11751) and not (user_id = 1 and user_id is not null and ts >= 11752 and ts <= 11753) and not (user_id = 1 and user_id is not null and ts >= 11754 and ts <= 11755) and not (user_id = 1 and user_id is not null and ts >= 11756 and ts <= 11757) and not (user_id = 1 and user_id is not null and ts >= 11758 and ts <= 11759) and not (user_id = 1 and user_id is not null and ts >= 11760 and ts <= 11761) and not (user_id = 1 and user_id is not null and ts >= 11762 and ts <= 11763) and not (user_id = 1 and user_id is not null and ts >= 11764 and ts <= 11765) and not (user_id = 1 and user_id is not null and ts >= 11766 and ts <= 11767) and not (user_id = 1 and user_id is not null and ts >= 11768 and ts <= 11769) and not (user_id = 1 and user_id is not null and ts >= 11770 and ts <= 11771) and not (user_id = 1 and user_id is not null and ts >= 11772 and ts <= 11773) and not (user_id = 1 and user_id is not null and ts >= 11774 and ts <= 11775) and not (user_id = 1 and user_id is not null and ts >= 11776 and ts <= 11777) and not (user_id = 1 and user_id is not null and ts >= 11778 and ts <= 11779) and not (user_id = 1 and user_id is not null and ts >= 11780 and ts <= 11781) and not (user_id = 1 and user_id is not null and ts >= 11782 and ts <= 11783) and not (user_id = 1 and user_id is not null and ts >= 11784 and ts <= 11785) and not (user_id = 1 and user_id is not null and ts >= 11786 and ts <= 11787) and not (user_id = 1 and user_id is not null and ts >= 11788 and ts <= 11789) and not (user_id = 1 and user_id is not null and ts >= 11790 and ts <= 11791) and not (user_id = 1 and user_id is not null and ts >= 11792 and ts <= 11793) and not (user_id = 1 and user_id is not null and ts >= 11794 and ts <= 11795) and not (user_id = 1 and user_id is not null and ts >= 11796 and ts <= 11797) and not (user_id = 1 and user_id is not null and ts >= 11798 and ts <= 11799) and not (user_id = 1 and user_id is not null and ts >= 11800 and ts <= 11801) and not (user_id = 1 and user_id is not null and ts >= 11802 and ts <= 11803) and not (user_id = 1 and user_id is not null and ts >= 11804 and ts <= 11805) and not (user_id = 1 and user_id is not null and ts >= 11806 and ts <= 11807) and not (user_id = 1 and user_id is not null and ts >= 11808 and ts <= 11809) and not (user_id = 1 and user_id is not null and ts >= 11810 and ts <= 11811) and not (user_id = 1 and user_id is not null and ts >= 11812 and ts <= 11813) and not (user_id = 1 and user_id is not null and ts >= 11814 and ts <= 11815) and not (user_id = 1 and user_id is not null and ts >= 11816 and ts <= 11817) and not (user_id = 1 and user_id is not null and ts >= 11818 and ts <= 11819) and not (user_id = 1 and user_id is not null and ts >= 11820 and ts <= 11821) and not (user_id = 1 and user_id is not null and ts >= 11822 and ts <= 11823) and not (user_id = 1 and user_id is not null and ts >= 11824 and ts <= 11825) and not (user_id = 1 and user_id is not null and ts >= 11826 and ts <= 11827) and not (user_id = 1 and user_id is not null and ts >= 11828 and ts <= 11829) and not (user_id = 1 and user_id is not null and ts >= 11830 and ts <= 11831) and not (user_id = 1 and user_id is not null and ts >= 11832 and ts <= 11833) and not (user_id = 1 and user_id is not null and ts >= 11834 and ts <= 11835) and not (user_id = 1 and user_id is not null and ts >= 11836 and ts <= 11837) and not (user_id = 1 and user_id is not null and ts >= 11838 and ts <= 11839) and not (user_id = 1 and user_id is not null and ts >= 11840 and ts <= 11841) and not (user_id = 1 and user_id is not null and ts >= 11842 and ts <= 11843) and not (user_id = 1 and user_id is not null and ts >= 11844 and ts <= 11845) and not (user_id = 1 and user_id is not null and ts >= 11846 and ts <= 11847) and not (user_id = 1 and user_id is not null and ts >= 11848 and ts <= 11849) and not (user_id = 1 and user_id is not null and ts >= 11850 and ts <= 11851) and not (user_id = 1 and user_id is not null and ts >= 11852 and ts <= 11853) and not (user_id = 1 and user_id is not null and ts >= 11854 and ts <= 11855) and not (user_id = 1 and user_id is not null and ts >= 11856 and ts <= 11857) and not (user_id = 1 and user_id is not null and ts >= 11858 and ts <= 11859) and not (user_id = 1 and user_id is not null and ts >= 11860 and ts <= 11861) and not (user_id = 1 and user_id is not null and ts >= 11862 and ts <= 11863) and not (user_id = 1 and user_id is not null and ts >= 11864 and ts <= 11865) and not (user_id = 1 and user_id is not null and ts >= 11866 and ts <= 11867) and not (user_id = 1 and user_id is not null and ts >= 11868 and ts <= 11869) and not (user_id = 1 and user_id is not null and ts >= 11870 and ts <= 11871) and not (user_id = 1 and user_id is not null and ts >= 11872 and ts <= 11873) and not (user_id = 1 and user_id is not null and ts >= 11874 and ts <= 11875) and not (user_id = 1 and user_id is not null and ts >= 11876 and ts <= 11877) and not (user_id = 1 and user_id is not null and ts >= 11878 and ts <= 11879) and not (user_id = 1 and user_id is not null and ts >= 11880 and ts <= 11881) and not (user_id = 1 and user_id is not null and ts >= 11882 and ts <= 11883) and not (user_id = 1 and user_id is not null and ts >= 11884 and ts <= 11885) and not (user_id = 1 and user_id is not null and ts >= 11886 and ts <= 11887) and not (user_id = 1 and user_id is not null and ts >= 11888 and ts <= 11889) and not (user_id = 1 and user_id is not null and ts >= 11890 and ts <= 11891) and not (user_id = 1 and user_id is not null and ts >= 11892 and ts <= 11893) and not (user_id = 1 and user_id is not null and ts >= 11894 and ts <= 11895) and not (user_id = 1 and user_id is not null and ts >= 11896 and ts <= 11897) and not (user_id = 1 and user_id is not null and ts >= 11898 and ts <= 11899) and not (user_id = 1 and user_id is not null and ts >= 11900 and ts <= 11901) and not (user_id = 1 and user_id is not null and ts >= 11902 and ts <= 11903) and not (user_id = 1 and user_id is not null and ts >= 11904 and ts <= 11905) and not (user_id = 1 and user_id is not null and ts >= 11906 and ts <= 11907) and not (user_id = 1 and user_id is not null and ts >= 11908 and ts <= 11909) and not (user_id = 1 and user_id is not null and ts >= 11910 and ts <= 11911) and not (user_id = 1 and user_id is not null and ts >= 11912 and ts <= 11913) and not (user_id = 1 and user_id is not null and ts >= 11914 and ts <= 11915) and not (user_id = 1 and user_id is not null and ts >= 11916 and ts <= 11917) and not (user_id = 1 and user_id is not null and ts >= 11918 and ts <= 11919) and not (user_id = 1 and user_id is not null and ts >= 11920 and ts <= 11921) and not (user_id = 1 and user_id is not null and ts >= 11922 and ts <= 11923) and not (user_id = 1 and user_id is not null and ts >= 11924 and ts <= 11925) and not (user_id = 1 and user_id is not null and ts >= 11926 and ts <= 11927) and not (user_id = 1 and user_id is not null and ts >= 11928 and ts <= 11929) and not (user_id = 1 and user_id is not null and ts >= 11930 and ts <= 11931) and not (user_id = 1 and user_id is not null and ts >= 11932 and ts <= 11933) and not (user_id = 1 and user_id is not null and ts >= 11934 and ts <= 11935) and not (user_id = 1 and user_id is not null and ts >= 11936 and ts <= 11937) and not (user_id = 1 and user_id is not null and ts >= 11938 and ts <= 11939) and not (user_id = 1 and user_id is not null and ts >= 11940 and ts <= 11941) and not (user_id = 1 and user_id is not null and ts >= 11942 and ts <= 11943) and not (user_id = 1 and user_id is not null and ts >= 11944 and ts <= 11945) and not (user_id = 1 and user_id is not null and ts >= 11946 and ts <= 11947) and not (user_id = 1 and user_id is not null and ts >= 11948 and ts <= 11949) and not (user_id = 1 and user_id is not null and ts >= 11950 and ts <= 11951) and not (user_id = 1 and user_id is not null and ts >= 11952 and ts <= 11953) and not (user_id = 1 and user_id is not null and ts >= 11954 and ts <= 11955) and not (user_id = 1 and user_id is not null and ts >= 11956 and ts <= 11957) and not (user_id = 1 and user_id is not null and ts >= 11958 and ts <= 11959) and not (user_id = 1 and user_id is not null and ts >= 11960 and ts <= 11961) and not (user_id = 1 and user_id is not null and ts >= 11962 and ts <= 11963) and not (user_id = 1 and user_id is not null and ts >= 11964 and ts <= 11965) and not (user_id = 1 and user_id is not null and ts >= 11966 and ts <= 11967) and not (user_id = 1 and user_id is not null and ts >= 11968 and ts <= 11969) and not (user_id = 1 and user_id is not null and ts >= 11970 and ts <= 11971) and not (user_id = 1 and user_id is not null and ts >= 11972 and ts <= 11973) and not (user_id = 1 and user_id is not null and ts >= 11974 and ts <= 11975) and not (user_id = 1 and user_id is not null and ts >= 11976 and ts <= 11977) and not (user_id = 1 and user_id is not null and ts >= 11978 and ts <= 11979) and not (user_id = 1 and user_id is not null and ts >= 11980 and ts <= 11981) and not (user_id = 1 and user_id is not null and ts >= 11982 and ts <= 11983) and not (user_id = 1 and user_id is not null and ts >= 11984 and ts <= 11985) and not (user_id = 1 and user_id is not null and ts >= 11986 and ts <= 11987) and not (user_id = 1 and user_id is not null and ts >= 11988 and ts <= 11989) and not (user_id = 1 and user_id is not null and ts >= 11990 and ts <= 11991) and not (user_id = 1 and user_id is not null and ts >= 11992 and ts <= 11993) and ts >= 113898 and parent_id = 1 order by ts asc limit 100", + "plan": { + "QueryType": "SELECT", + "Original": "select 1 from user where shard_key = 1 and is_removed = 1 and cmd in ('A','B','C') and not (user_id = 1 and user_id is not null and ts >= 1 and ts <= 2) and not (user_id = 1 and user_id is not null and ts >= 12 and ts <= 13) and not (user_id = 1 and user_id is not null and ts >= 14 and ts <= 15) and not (user_id = 1 and user_id is not null and ts >= 16 and ts <= 17) and not (user_id = 1 and user_id is not null and ts >= 18 and ts <= 19) and not (user_id = 1 and user_id is not null and ts >= 110 and ts <= 111) and not (user_id = 1 and user_id is not null and ts >= 112 and ts <= 113) and not (user_id = 1 and user_id is not null and ts >= 114 and ts <= 115) and not (user_id = 1 and user_id is not null and ts >= 116 and ts <= 117) and not (user_id = 1 and user_id is not null and ts >= 118 and ts <= 119) and not (user_id = 1 and user_id is not null and ts >= 120 and ts <= 121) and not (user_id = 1 and user_id is not null and ts >= 122 and ts <= 123) and not (user_id = 1 and user_id is not null and ts >= 124 and ts <= 125) and not (user_id = 1 and user_id is not null and ts >= 126 and ts <= 127) and not (user_id = 1 and user_id is not null and ts >= 128 and ts <= 129) and not (user_id = 1 and user_id is not null and ts >= 130 and ts <= 131) and not (user_id = 1 and user_id is not null and ts >= 132 and ts <= 133) and not (user_id = 1 and user_id is not null and ts >= 134 and ts <= 135) and not (user_id = 1 and user_id is not null and ts >= 136 and ts <= 137) and not (user_id = 1 and user_id is not null and ts >= 138 and ts <= 139) and not (user_id = 1 and user_id is not null and ts >= 140 and ts <= 141) and not (user_id = 1 and user_id is not null and ts >= 142 and ts <= 143) and not (user_id = 1 and user_id is not null and ts >= 144 and ts <= 145) and not (user_id = 1 and user_id is not null and ts >= 146 and ts <= 147) and not (user_id = 1 and user_id is not null and ts >= 148 and ts <= 149) and not (user_id = 1 and user_id is not null and ts >= 150 and ts <= 151) and not (user_id = 1 and user_id is not null and ts >= 152 and ts <= 153) and not (user_id = 1 and user_id is not null and ts >= 154 and ts <= 155) and not (user_id = 1 and user_id is not null and ts >= 156 and ts <= 157) and not (user_id = 1 and user_id is not null and ts >= 158 and ts <= 159) and not (user_id = 1 and user_id is not null and ts >= 160 and ts <= 161) and not (user_id = 1 and user_id is not null and ts >= 162 and ts <= 163) and not (user_id = 1 and user_id is not null and ts >= 164 and ts <= 165) and not (user_id = 1 and user_id is not null and ts >= 166 and ts <= 167) and not (user_id = 1 and user_id is not null and ts >= 168 and ts <= 169) and not (user_id = 1 and user_id is not null and ts >= 170 and ts <= 171) and not (user_id = 1 and user_id is not null and ts >= 172 and ts <= 173) and not (user_id = 1 and user_id is not null and ts >= 174 and ts <= 175) and not (user_id = 1 and user_id is not null and ts >= 176 and ts <= 177) and not (user_id = 1 and user_id is not null and ts >= 178 and ts <= 179) and not (user_id = 1 and user_id is not null and ts >= 180 and ts <= 181) and not (user_id = 1 and user_id is not null and ts >= 182 and ts <= 183) and not (user_id = 1 and user_id is not null and ts >= 184 and ts <= 185) and not (user_id = 1 and user_id is not null and ts >= 186 and ts <= 187) and not (user_id = 1 and user_id is not null and ts >= 188 and ts <= 189) and not (user_id = 1 and user_id is not null and ts >= 190 and ts <= 191) and not (user_id = 1 and user_id is not null and ts >= 192 and ts <= 193) and not (user_id = 1 and user_id is not null and ts >= 194 and ts <= 195) and not (user_id = 1 and user_id is not null and ts >= 196 and ts <= 197) and not (user_id = 1 and user_id is not null and ts >= 198 and ts <= 199) and not (user_id = 1 and user_id is not null and ts >= 1100 and ts <= 1101) and not (user_id = 1 and user_id is not null and ts >= 1102 and ts <= 1103) and not (user_id = 1 and user_id is not null and ts >= 1104 and ts <= 1105) and not (user_id = 1 and user_id is not null and ts >= 1106 and ts <= 1107) and not (user_id = 1 and user_id is not null and ts >= 1108 and ts <= 1109) and not (user_id = 1 and user_id is not null and ts >= 1110 and ts <= 1111) and not (user_id = 1 and user_id is not null and ts >= 1112 and ts <= 1113) and not (user_id = 1 and user_id is not null and ts >= 1114 and ts <= 1115) and not (user_id = 1 and user_id is not null and ts >= 1116 and ts <= 1117) and not (user_id = 1 and user_id is not null and ts >= 1118 and ts <= 1119) and not (user_id = 1 and user_id is not null and ts >= 1120 and ts <= 1121) and not (user_id = 1 and user_id is not null and ts >= 1122 and ts <= 1123) and not (user_id = 1 and user_id is not null and ts >= 1124 and ts <= 1125) and not (user_id = 1 and user_id is not null and ts >= 1126 and ts <= 1127) and not (user_id = 1 and user_id is not null and ts >= 1128 and ts <= 1129) and not (user_id = 1 and user_id is not null and ts >= 1130 and ts <= 1131) and not (user_id = 1 and user_id is not null and ts >= 1132 and ts <= 1133) and not (user_id = 1 and user_id is not null and ts >= 1134 and ts <= 1135) and not (user_id = 1 and user_id is not null and ts >= 1136 and ts <= 1137) and not (user_id = 1 and user_id is not null and ts >= 1138 and ts <= 1139) and not (user_id = 1 and user_id is not null and ts >= 1140 and ts <= 1141) and not (user_id = 1 and user_id is not null and ts >= 1142 and ts <= 1143) and not (user_id = 1 and user_id is not null and ts >= 1144 and ts <= 1145) and not (user_id = 1 and user_id is not null and ts >= 1146 and ts <= 1147) and not (user_id = 1 and user_id is not null and ts >= 1148 and ts <= 1149) and not (user_id = 1 and user_id is not null and ts >= 1150 and ts <= 1151) and not (user_id = 1 and user_id is not null and ts >= 1152 and ts <= 1153) and not (user_id = 1 and user_id is not null and ts >= 1154 and ts <= 1155) and not (user_id = 1 and user_id is not null and ts >= 1156 and ts <= 1157) and not (user_id = 1 and user_id is not null and ts >= 1158 and ts <= 1159) and not (user_id = 1 and user_id is not null and ts >= 1160 and ts <= 1161) and not (user_id = 1 and user_id is not null and ts >= 1162 and ts <= 1163) and not (user_id = 1 and user_id is not null and ts >= 1164 and ts <= 1165) and not (user_id = 1 and user_id is not null and ts >= 1166 and ts <= 1167) and not (user_id = 1 and user_id is not null and ts >= 1168 and ts <= 1169) and not (user_id = 1 and user_id is not null and ts >= 1170 and ts <= 1171) and not (user_id = 1 and user_id is not null and ts >= 1172 and ts <= 1173) and not (user_id = 1 and user_id is not null and ts >= 1174 and ts <= 1175) and not (user_id = 1 and user_id is not null and ts >= 1176 and ts <= 1177) and not (user_id = 1 and user_id is not null and ts >= 1178 and ts <= 1179) and not (user_id = 1 and user_id is not null and ts >= 1180 and ts <= 1181) and not (user_id = 1 and user_id is not null and ts >= 1182 and ts <= 1183) and not (user_id = 1 and user_id is not null and ts >= 1184 and ts <= 1185) and not (user_id = 1 and user_id is not null and ts >= 1186 and ts <= 1187) and not (user_id = 1 and user_id is not null and ts >= 1188 and ts <= 1189) and not (user_id = 1 and user_id is not null and ts >= 1190 and ts <= 1191) and not (user_id = 1 and user_id is not null and ts >= 1192 and ts <= 1193) and not (user_id = 1 and user_id is not null and ts >= 1194 and ts <= 1195) and not (user_id = 1 and user_id is not null and ts >= 1196 and ts <= 1197) and not (user_id = 1 and user_id is not null and ts >= 1198 and ts <= 1199) and not (user_id = 1 and user_id is not null and ts >= 1200 and ts <= 1201) and not (user_id = 1 and user_id is not null and ts >= 1202 and ts <= 1203) and not (user_id = 1 and user_id is not null and ts >= 1204 and ts <= 1205) and not (user_id = 1 and user_id is not null and ts >= 1206 and ts <= 1207) and not (user_id = 1 and user_id is not null and ts >= 1208 and ts <= 1209) and not (user_id = 1 and user_id is not null and ts >= 1210 and ts <= 1211) and not (user_id = 1 and user_id is not null and ts >= 1212 and ts <= 1213) and not (user_id = 1 and user_id is not null and ts >= 1214 and ts <= 1215) and not (user_id = 1 and user_id is not null and ts >= 1216 and ts <= 1217) and not (user_id = 1 and user_id is not null and ts >= 1218 and ts <= 1219) and not (user_id = 1 and user_id is not null and ts >= 1220 and ts <= 1221) and not (user_id = 1 and user_id is not null and ts >= 1222 and ts <= 1223) and not (user_id = 1 and user_id is not null and ts >= 1224 and ts <= 1225) and not (user_id = 1 and user_id is not null and ts >= 1226 and ts <= 1227) and not (user_id = 1 and user_id is not null and ts >= 1228 and ts <= 1229) and not (user_id = 1 and user_id is not null and ts >= 1230 and ts <= 1231) and not (user_id = 1 and user_id is not null and ts >= 1232 and ts <= 1233) and not (user_id = 1 and user_id is not null and ts >= 1234 and ts <= 1235) and not (user_id = 1 and user_id is not null and ts >= 1236 and ts <= 1237) and not (user_id = 1 and user_id is not null and ts >= 1238 and ts <= 1239) and not (user_id = 1 and user_id is not null and ts >= 1240 and ts <= 1241) and not (user_id = 1 and user_id is not null and ts >= 1242 and ts <= 1243) and not (user_id = 1 and user_id is not null and ts >= 1244 and ts <= 1245) and not (user_id = 1 and user_id is not null and ts >= 1246 and ts <= 1247) and not (user_id = 1 and user_id is not null and ts >= 1248 and ts <= 1249) and not (user_id = 1 and user_id is not null and ts >= 1250 and ts <= 1251) and not (user_id = 1 and user_id is not null and ts >= 1252 and ts <= 1253) and not (user_id = 1 and user_id is not null and ts >= 1254 and ts <= 1255) and not (user_id = 1 and user_id is not null and ts >= 1256 and ts <= 1257) and not (user_id = 1 and user_id is not null and ts >= 1258 and ts <= 1259) and not (user_id = 1 and user_id is not null and ts >= 1260 and ts <= 1261) and not (user_id = 1 and user_id is not null and ts >= 1262 and ts <= 1263) and not (user_id = 1 and user_id is not null and ts >= 1264 and ts <= 1265) and not (user_id = 1 and user_id is not null and ts >= 1266 and ts <= 1267) and not (user_id = 1 and user_id is not null and ts >= 1268 and ts <= 1269) and not (user_id = 1 and user_id is not null and ts >= 1270 and ts <= 1271) and not (user_id = 1 and user_id is not null and ts >= 1272 and ts <= 1273) and not (user_id = 1 and user_id is not null and ts >= 1274 and ts <= 1275) and not (user_id = 1 and user_id is not null and ts >= 1276 and ts <= 1277) and not (user_id = 1 and user_id is not null and ts >= 1278 and ts <= 1279) and not (user_id = 1 and user_id is not null and ts >= 1280 and ts <= 1281) and not (user_id = 1 and user_id is not null and ts >= 1282 and ts <= 1283) and not (user_id = 1 and user_id is not null and ts >= 1284 and ts <= 1285) and not (user_id = 1 and user_id is not null and ts >= 1286 and ts <= 1287) and not (user_id = 1 and user_id is not null and ts >= 1288 and ts <= 1289) and not (user_id = 1 and user_id is not null and ts >= 1290 and ts <= 1291) and not (user_id = 1 and user_id is not null and ts >= 1292 and ts <= 1293) and not (user_id = 1 and user_id is not null and ts >= 1294 and ts <= 1295) and not (user_id = 1 and user_id is not null and ts >= 1296 and ts <= 1297) and not (user_id = 1 and user_id is not null and ts >= 1298 and ts <= 1299) and not (user_id = 1 and user_id is not null and ts >= 1300 and ts <= 1301) and not (user_id = 1 and user_id is not null and ts >= 1302 and ts <= 1303) and not (user_id = 1 and user_id is not null and ts >= 1304 and ts <= 1305) and not (user_id = 1 and user_id is not null and ts >= 1306 and ts <= 1307) and not (user_id = 1 and user_id is not null and ts >= 1308 and ts <= 1309) and not (user_id = 1 and user_id is not null and ts >= 1310 and ts <= 1311) and not (user_id = 1 and user_id is not null and ts >= 1312 and ts <= 1313) and not (user_id = 1 and user_id is not null and ts >= 1314 and ts <= 1315) and not (user_id = 1 and user_id is not null and ts >= 1316 and ts <= 1317) and not (user_id = 1 and user_id is not null and ts >= 1318 and ts <= 1319) and not (user_id = 1 and user_id is not null and ts >= 1320 and ts <= 1321) and not (user_id = 1 and user_id is not null and ts >= 1322 and ts <= 1323) and not (user_id = 1 and user_id is not null and ts >= 1324 and ts <= 1325) and not (user_id = 1 and user_id is not null and ts >= 1326 and ts <= 1327) and not (user_id = 1 and user_id is not null and ts >= 1328 and ts <= 1329) and not (user_id = 1 and user_id is not null and ts >= 1330 and ts <= 1331) and not (user_id = 1 and user_id is not null and ts >= 1332 and ts <= 1333) and not (user_id = 1 and user_id is not null and ts >= 1334 and ts <= 1335) and not (user_id = 1 and user_id is not null and ts >= 1336 and ts <= 1337) and not (user_id = 1 and user_id is not null and ts >= 1338 and ts <= 1339) and not (user_id = 1 and user_id is not null and ts >= 1340 and ts <= 1341) and not (user_id = 1 and user_id is not null and ts >= 1342 and ts <= 1343) and not (user_id = 1 and user_id is not null and ts >= 1344 and ts <= 1345) and not (user_id = 1 and user_id is not null and ts >= 1346 and ts <= 1347) and not (user_id = 1 and user_id is not null and ts >= 1348 and ts <= 1349) and not (user_id = 1 and user_id is not null and ts >= 1350 and ts <= 1351) and not (user_id = 1 and user_id is not null and ts >= 1352 and ts <= 1353) and not (user_id = 1 and user_id is not null and ts >= 1354 and ts <= 1355) and not (user_id = 1 and user_id is not null and ts >= 1356 and ts <= 1357) and not (user_id = 1 and user_id is not null and ts >= 1358 and ts <= 1359) and not (user_id = 1 and user_id is not null and ts >= 1360 and ts <= 1361) and not (user_id = 1 and user_id is not null and ts >= 1362 and ts <= 1363) and not (user_id = 1 and user_id is not null and ts >= 1364 and ts <= 1365) and not (user_id = 1 and user_id is not null and ts >= 1366 and ts <= 1367) and not (user_id = 1 and user_id is not null and ts >= 1368 and ts <= 1369) and not (user_id = 1 and user_id is not null and ts >= 1370 and ts <= 1371) and not (user_id = 1 and user_id is not null and ts >= 1372 and ts <= 1373) and not (user_id = 1 and user_id is not null and ts >= 1374 and ts <= 1375) and not (user_id = 1 and user_id is not null and ts >= 1376 and ts <= 1377) and not (user_id = 1 and user_id is not null and ts >= 1378 and ts <= 1379) and not (user_id = 1 and user_id is not null and ts >= 1380 and ts <= 1381) and not (user_id = 1 and user_id is not null and ts >= 1382 and ts <= 1383) and not (user_id = 1 and user_id is not null and ts >= 1384 and ts <= 1385) and not (user_id = 1 and user_id is not null and ts >= 1386 and ts <= 1387) and not (user_id = 1 and user_id is not null and ts >= 1388 and ts <= 1389) and not (user_id = 1 and user_id is not null and ts >= 1390 and ts <= 1391) and not (user_id = 1 and user_id is not null and ts >= 1392 and ts <= 1393) and not (user_id = 1 and user_id is not null and ts >= 1394 and ts <= 1395) and not (user_id = 1 and user_id is not null and ts >= 1396 and ts <= 1397) and not (user_id = 1 and user_id is not null and ts >= 1398 and ts <= 1399) and not (user_id = 1 and user_id is not null and ts >= 1400 and ts <= 1401) and not (user_id = 1 and user_id is not null and ts >= 1402 and ts <= 1403) and not (user_id = 1 and user_id is not null and ts >= 1404 and ts <= 1405) and not (user_id = 1 and user_id is not null and ts >= 1406 and ts <= 1407) and not (user_id = 1 and user_id is not null and ts >= 1408 and ts <= 1409) and not (user_id = 1 and user_id is not null and ts >= 1410 and ts <= 1411) and not (user_id = 1 and user_id is not null and ts >= 1412 and ts <= 1413) and not (user_id = 1 and user_id is not null and ts >= 1414 and ts <= 1415) and not (user_id = 1 and user_id is not null and ts >= 1416 and ts <= 1417) and not (user_id = 1 and user_id is not null and ts >= 1418 and ts <= 1419) and not (user_id = 1 and user_id is not null and ts >= 1420 and ts <= 1421) and not (user_id = 1 and user_id is not null and ts >= 1422 and ts <= 1423) and not (user_id = 1 and user_id is not null and ts >= 1424 and ts <= 1425) and not (user_id = 1 and user_id is not null and ts >= 1426 and ts <= 1427) and not (user_id = 1 and user_id is not null and ts >= 1428 and ts <= 1429) and not (user_id = 1 and user_id is not null and ts >= 1430 and ts <= 1431) and not (user_id = 1 and user_id is not null and ts >= 1432 and ts <= 1433) and not (user_id = 1 and user_id is not null and ts >= 1434 and ts <= 1435) and not (user_id = 1 and user_id is not null and ts >= 1436 and ts <= 1437) and not (user_id = 1 and user_id is not null and ts >= 1438 and ts <= 1439) and not (user_id = 1 and user_id is not null and ts >= 1440 and ts <= 1441) and not (user_id = 1 and user_id is not null and ts >= 1442 and ts <= 1443) and not (user_id = 1 and user_id is not null and ts >= 1444 and ts <= 1445) and not (user_id = 1 and user_id is not null and ts >= 1446 and ts <= 1447) and not (user_id = 1 and user_id is not null and ts >= 1448 and ts <= 1449) and not (user_id = 1 and user_id is not null and ts >= 1450 and ts <= 1451) and not (user_id = 1 and user_id is not null and ts >= 1452 and ts <= 1453) and not (user_id = 1 and user_id is not null and ts >= 1454 and ts <= 1455) and not (user_id = 1 and user_id is not null and ts >= 1456 and ts <= 1457) and not (user_id = 1 and user_id is not null and ts >= 1458 and ts <= 1459) and not (user_id = 1 and user_id is not null and ts >= 1460 and ts <= 1461) and not (user_id = 1 and user_id is not null and ts >= 1462 and ts <= 1463) and not (user_id = 1 and user_id is not null and ts >= 1464 and ts <= 1465) and not (user_id = 1 and user_id is not null and ts >= 1466 and ts <= 1467) and not (user_id = 1 and user_id is not null and ts >= 1468 and ts <= 1469) and not (user_id = 1 and user_id is not null and ts >= 1470 and ts <= 1471) and not (user_id = 1 and user_id is not null and ts >= 1472 and ts <= 1473) and not (user_id = 1 and user_id is not null and ts >= 1474 and ts <= 1475) and not (user_id = 1 and user_id is not null and ts >= 1476 and ts <= 1477) and not (user_id = 1 and user_id is not null and ts >= 1478 and ts <= 1479) and not (user_id = 1 and user_id is not null and ts >= 1480 and ts <= 1481) and not (user_id = 1 and user_id is not null and ts >= 1482 and ts <= 1483) and not (user_id = 1 and user_id is not null and ts >= 1484 and ts <= 1485) and not (user_id = 1 and user_id is not null and ts >= 1486 and ts <= 1487) and not (user_id = 1 and user_id is not null and ts >= 1488 and ts <= 1489) and not (user_id = 1 and user_id is not null and ts >= 1490 and ts <= 1491) and not (user_id = 1 and user_id is not null and ts >= 1492 and ts <= 1493) and not (user_id = 1 and user_id is not null and ts >= 1494 and ts <= 1495) and not (user_id = 1 and user_id is not null and ts >= 1496 and ts <= 1497) and not (user_id = 1 and user_id is not null and ts >= 1498 and ts <= 1499) and not (user_id = 1 and user_id is not null and ts >= 1500 and ts <= 1501) and not (user_id = 1 and user_id is not null and ts >= 1502 and ts <= 1503) and not (user_id = 1 and user_id is not null and ts >= 1504 and ts <= 1505) and not (user_id = 1 and user_id is not null and ts >= 1506 and ts <= 1507) and not (user_id = 1 and user_id is not null and ts >= 1508 and ts <= 1509) and not (user_id = 1 and user_id is not null and ts >= 1510 and ts <= 1511) and not (user_id = 1 and user_id is not null and ts >= 1512 and ts <= 1513) and not (user_id = 1 and user_id is not null and ts >= 1514 and ts <= 1515) and not (user_id = 1 and user_id is not null and ts >= 1516 and ts <= 1517) and not (user_id = 1 and user_id is not null and ts >= 1518 and ts <= 1519) and not (user_id = 1 and user_id is not null and ts >= 1520 and ts <= 1521) and not (user_id = 1 and user_id is not null and ts >= 1522 and ts <= 1523) and not (user_id = 1 and user_id is not null and ts >= 1524 and ts <= 1525) and not (user_id = 1 and user_id is not null and ts >= 1526 and ts <= 1527) and not (user_id = 1 and user_id is not null and ts >= 1528 and ts <= 1529) and not (user_id = 1 and user_id is not null and ts >= 1530 and ts <= 1531) and not (user_id = 1 and user_id is not null and ts >= 1532 and ts <= 1533) and not (user_id = 1 and user_id is not null and ts >= 1534 and ts <= 1535) and not (user_id = 1 and user_id is not null and ts >= 1536 and ts <= 1537) and not (user_id = 1 and user_id is not null and ts >= 1538 and ts <= 1539) and not (user_id = 1 and user_id is not null and ts >= 1540 and ts <= 1541) and not (user_id = 1 and user_id is not null and ts >= 1542 and ts <= 1543) and not (user_id = 1 and user_id is not null and ts >= 1544 and ts <= 1545) and not (user_id = 1 and user_id is not null and ts >= 1546 and ts <= 1547) and not (user_id = 1 and user_id is not null and ts >= 1548 and ts <= 1549) and not (user_id = 1 and user_id is not null and ts >= 1550 and ts <= 1551) and not (user_id = 1 and user_id is not null and ts >= 1552 and ts <= 1553) and not (user_id = 1 and user_id is not null and ts >= 1554 and ts <= 1555) and not (user_id = 1 and user_id is not null and ts >= 1556 and ts <= 1557) and not (user_id = 1 and user_id is not null and ts >= 1558 and ts <= 1559) and not (user_id = 1 and user_id is not null and ts >= 1560 and ts <= 1561) and not (user_id = 1 and user_id is not null and ts >= 1562 and ts <= 1563) and not (user_id = 1 and user_id is not null and ts >= 1564 and ts <= 1565) and not (user_id = 1 and user_id is not null and ts >= 1566 and ts <= 1567) and not (user_id = 1 and user_id is not null and ts >= 1568 and ts <= 1569) and not (user_id = 1 and user_id is not null and ts >= 1570 and ts <= 1571) and not (user_id = 1 and user_id is not null and ts >= 1572 and ts <= 1573) and not (user_id = 1 and user_id is not null and ts >= 1574 and ts <= 1575) and not (user_id = 1 and user_id is not null and ts >= 1576 and ts <= 1577) and not (user_id = 1 and user_id is not null and ts >= 1578 and ts <= 1579) and not (user_id = 1 and user_id is not null and ts >= 1580 and ts <= 1581) and not (user_id = 1 and user_id is not null and ts >= 1582 and ts <= 1583) and not (user_id = 1 and user_id is not null and ts >= 1584 and ts <= 1585) and not (user_id = 1 and user_id is not null and ts >= 1586 and ts <= 1587) and not (user_id = 1 and user_id is not null and ts >= 1588 and ts <= 1589) and not (user_id = 1 and user_id is not null and ts >= 1590 and ts <= 1591) and not (user_id = 1 and user_id is not null and ts >= 1592 and ts <= 1593) and not (user_id = 1 and user_id is not null and ts >= 1594 and ts <= 1595) and not (user_id = 1 and user_id is not null and ts >= 1596 and ts <= 1597) and not (user_id = 1 and user_id is not null and ts >= 1598 and ts <= 1599) and not (user_id = 1 and user_id is not null and ts >= 1600 and ts <= 1601) and not (user_id = 1 and user_id is not null and ts >= 1602 and ts <= 1603) and not (user_id = 1 and user_id is not null and ts >= 1604 and ts <= 1605) and not (user_id = 1 and user_id is not null and ts >= 1606 and ts <= 1607) and not (user_id = 1 and user_id is not null and ts >= 1608 and ts <= 1609) and not (user_id = 1 and user_id is not null and ts >= 1610 and ts <= 1611) and not (user_id = 1 and user_id is not null and ts >= 1612 and ts <= 1613) and not (user_id = 1 and user_id is not null and ts >= 1614 and ts <= 1615) and not (user_id = 1 and user_id is not null and ts >= 1616 and ts <= 1617) and not (user_id = 1 and user_id is not null and ts >= 1618 and ts <= 1619) and not (user_id = 1 and user_id is not null and ts >= 1620 and ts <= 1621) and not (user_id = 1 and user_id is not null and ts >= 1622 and ts <= 1623) and not (user_id = 1 and user_id is not null and ts >= 1624 and ts <= 1625) and not (user_id = 1 and user_id is not null and ts >= 1626 and ts <= 1627) and not (user_id = 1 and user_id is not null and ts >= 1628 and ts <= 1629) and not (user_id = 1 and user_id is not null and ts >= 1630 and ts <= 1631) and not (user_id = 1 and user_id is not null and ts >= 1632 and ts <= 1633) and not (user_id = 1 and user_id is not null and ts >= 1634 and ts <= 1635) and not (user_id = 1 and user_id is not null and ts >= 1636 and ts <= 1637) and not (user_id = 1 and user_id is not null and ts >= 1638 and ts <= 1639) and not (user_id = 1 and user_id is not null and ts >= 1640 and ts <= 1641) and not (user_id = 1 and user_id is not null and ts >= 1642 and ts <= 1643) and not (user_id = 1 and user_id is not null and ts >= 1644 and ts <= 1645) and not (user_id = 1 and user_id is not null and ts >= 1646 and ts <= 1647) and not (user_id = 1 and user_id is not null and ts >= 1648 and ts <= 1649) and not (user_id = 1 and user_id is not null and ts >= 1650 and ts <= 1651) and not (user_id = 1 and user_id is not null and ts >= 1652 and ts <= 1653) and not (user_id = 1 and user_id is not null and ts >= 1654 and ts <= 1655) and not (user_id = 1 and user_id is not null and ts >= 1656 and ts <= 1657) and not (user_id = 1 and user_id is not null and ts >= 1658 and ts <= 1659) and not (user_id = 1 and user_id is not null and ts >= 1660 and ts <= 1661) and not (user_id = 1 and user_id is not null and ts >= 1662 and ts <= 1663) and not (user_id = 1 and user_id is not null and ts >= 1664 and ts <= 1665) and not (user_id = 1 and user_id is not null and ts >= 1666 and ts <= 1667) and not (user_id = 1 and user_id is not null and ts >= 1668 and ts <= 1669) and not (user_id = 1 and user_id is not null and ts >= 1670 and ts <= 1671) and not (user_id = 1 and user_id is not null and ts >= 1672 and ts <= 1673) and not (user_id = 1 and user_id is not null and ts >= 1674 and ts <= 1675) and not (user_id = 1 and user_id is not null and ts >= 1676 and ts <= 1677) and not (user_id = 1 and user_id is not null and ts >= 1678 and ts <= 1679) and not (user_id = 1 and user_id is not null and ts >= 1680 and ts <= 1681) and not (user_id = 1 and user_id is not null and ts >= 1682 and ts <= 1683) and not (user_id = 1 and user_id is not null and ts >= 1684 and ts <= 1685) and not (user_id = 1 and user_id is not null and ts >= 1686 and ts <= 1687) and not (user_id = 1 and user_id is not null and ts >= 1688 and ts <= 1689) and not (user_id = 1 and user_id is not null and ts >= 1690 and ts <= 1691) and not (user_id = 1 and user_id is not null and ts >= 1692 and ts <= 1693) and not (user_id = 1 and user_id is not null and ts >= 1694 and ts <= 1695) and not (user_id = 1 and user_id is not null and ts >= 1696 and ts <= 1697) and not (user_id = 1 and user_id is not null and ts >= 1698 and ts <= 1699) and not (user_id = 1 and user_id is not null and ts >= 1700 and ts <= 1701) and not (user_id = 1 and user_id is not null and ts >= 1702 and ts <= 1703) and not (user_id = 1 and user_id is not null and ts >= 1704 and ts <= 1705) and not (user_id = 1 and user_id is not null and ts >= 1706 and ts <= 1707) and not (user_id = 1 and user_id is not null and ts >= 1708 and ts <= 1709) and not (user_id = 1 and user_id is not null and ts >= 1710 and ts <= 1711) and not (user_id = 1 and user_id is not null and ts >= 1712 and ts <= 1713) and not (user_id = 1 and user_id is not null and ts >= 1714 and ts <= 1715) and not (user_id = 1 and user_id is not null and ts >= 1716 and ts <= 1717) and not (user_id = 1 and user_id is not null and ts >= 1718 and ts <= 1719) and not (user_id = 1 and user_id is not null and ts >= 1720 and ts <= 1721) and not (user_id = 1 and user_id is not null and ts >= 1722 and ts <= 1723) and not (user_id = 1 and user_id is not null and ts >= 1724 and ts <= 1725) and not (user_id = 1 and user_id is not null and ts >= 1726 and ts <= 1727) and not (user_id = 1 and user_id is not null and ts >= 1728 and ts <= 1729) and not (user_id = 1 and user_id is not null and ts >= 1730 and ts <= 1731) and not (user_id = 1 and user_id is not null and ts >= 1732 and ts <= 1733) and not (user_id = 1 and user_id is not null and ts >= 1734 and ts <= 1735) and not (user_id = 1 and user_id is not null and ts >= 1736 and ts <= 1737) and not (user_id = 1 and user_id is not null and ts >= 1738 and ts <= 1739) and not (user_id = 1 and user_id is not null and ts >= 1740 and ts <= 1741) and not (user_id = 1 and user_id is not null and ts >= 1742 and ts <= 1743) and not (user_id = 1 and user_id is not null and ts >= 1744 and ts <= 1745) and not (user_id = 1 and user_id is not null and ts >= 1746 and ts <= 1747) and not (user_id = 1 and user_id is not null and ts >= 1748 and ts <= 1749) and not (user_id = 1 and user_id is not null and ts >= 1750 and ts <= 1751) and not (user_id = 1 and user_id is not null and ts >= 1752 and ts <= 1753) and not (user_id = 1 and user_id is not null and ts >= 1754 and ts <= 1755) and not (user_id = 1 and user_id is not null and ts >= 1756 and ts <= 1757) and not (user_id = 1 and user_id is not null and ts >= 1758 and ts <= 1759) and not (user_id = 1 and user_id is not null and ts >= 1760 and ts <= 1761) and not (user_id = 1 and user_id is not null and ts >= 1762 and ts <= 1763) and not (user_id = 1 and user_id is not null and ts >= 1764 and ts <= 1765) and not (user_id = 1 and user_id is not null and ts >= 1766 and ts <= 1767) and not (user_id = 1 and user_id is not null and ts >= 1768 and ts <= 1769) and not (user_id = 1 and user_id is not null and ts >= 1770 and ts <= 1771) and not (user_id = 1 and user_id is not null and ts >= 1772 and ts <= 1773) and not (user_id = 1 and user_id is not null and ts >= 1774 and ts <= 1775) and not (user_id = 1 and user_id is not null and ts >= 1776 and ts <= 1777) and not (user_id = 1 and user_id is not null and ts >= 1778 and ts <= 1779) and not (user_id = 1 and user_id is not null and ts >= 1780 and ts <= 1781) and not (user_id = 1 and user_id is not null and ts >= 1782 and ts <= 1783) and not (user_id = 1 and user_id is not null and ts >= 1784 and ts <= 1785) and not (user_id = 1 and user_id is not null and ts >= 1786 and ts <= 1787) and not (user_id = 1 and user_id is not null and ts >= 1788 and ts <= 1789) and not (user_id = 1 and user_id is not null and ts >= 1790 and ts <= 1791) and not (user_id = 1 and user_id is not null and ts >= 1792 and ts <= 1793) and not (user_id = 1 and user_id is not null and ts >= 1794 and ts <= 1795) and not (user_id = 1 and user_id is not null and ts >= 1796 and ts <= 1797) and not (user_id = 1 and user_id is not null and ts >= 1798 and ts <= 1799) and not (user_id = 1 and user_id is not null and ts >= 1800 and ts <= 1801) and not (user_id = 1 and user_id is not null and ts >= 1802 and ts <= 1803) and not (user_id = 1 and user_id is not null and ts >= 1804 and ts <= 1805) and not (user_id = 1 and user_id is not null and ts >= 1806 and ts <= 1807) and not (user_id = 1 and user_id is not null and ts >= 1808 and ts <= 1809) and not (user_id = 1 and user_id is not null and ts >= 1810 and ts <= 1811) and not (user_id = 1 and user_id is not null and ts >= 1812 and ts <= 1813) and not (user_id = 1 and user_id is not null and ts >= 1814 and ts <= 1815) and not (user_id = 1 and user_id is not null and ts >= 1816 and ts <= 1817) and not (user_id = 1 and user_id is not null and ts >= 1818 and ts <= 1819) and not (user_id = 1 and user_id is not null and ts >= 1820 and ts <= 1821) and not (user_id = 1 and user_id is not null and ts >= 1822 and ts <= 1823) and not (user_id = 1 and user_id is not null and ts >= 1824 and ts <= 1825) and not (user_id = 1 and user_id is not null and ts >= 1826 and ts <= 1827) and not (user_id = 1 and user_id is not null and ts >= 1828 and ts <= 1829) and not (user_id = 1 and user_id is not null and ts >= 1830 and ts <= 1831) and not (user_id = 1 and user_id is not null and ts >= 1832 and ts <= 1833) and not (user_id = 1 and user_id is not null and ts >= 1834 and ts <= 1835) and not (user_id = 1 and user_id is not null and ts >= 1836 and ts <= 1837) and not (user_id = 1 and user_id is not null and ts >= 1838 and ts <= 1839) and not (user_id = 1 and user_id is not null and ts >= 1840 and ts <= 1841) and not (user_id = 1 and user_id is not null and ts >= 1842 and ts <= 1843) and not (user_id = 1 and user_id is not null and ts >= 1844 and ts <= 1845) and not (user_id = 1 and user_id is not null and ts >= 1846 and ts <= 1847) and not (user_id = 1 and user_id is not null and ts >= 1848 and ts <= 1849) and not (user_id = 1 and user_id is not null and ts >= 1850 and ts <= 1851) and not (user_id = 1 and user_id is not null and ts >= 1852 and ts <= 1853) and not (user_id = 1 and user_id is not null and ts >= 1854 and ts <= 1855) and not (user_id = 1 and user_id is not null and ts >= 1856 and ts <= 1857) and not (user_id = 1 and user_id is not null and ts >= 1858 and ts <= 1859) and not (user_id = 1 and user_id is not null and ts >= 1860 and ts <= 1861) and not (user_id = 1 and user_id is not null and ts >= 1862 and ts <= 1863) and not (user_id = 1 and user_id is not null and ts >= 1864 and ts <= 1865) and not (user_id = 1 and user_id is not null and ts >= 1866 and ts <= 1867) and not (user_id = 1 and user_id is not null and ts >= 1868 and ts <= 1869) and not (user_id = 1 and user_id is not null and ts >= 1870 and ts <= 1871) and not (user_id = 1 and user_id is not null and ts >= 1872 and ts <= 1873) and not (user_id = 1 and user_id is not null and ts >= 1874 and ts <= 1875) and not (user_id = 1 and user_id is not null and ts >= 1876 and ts <= 1877) and not (user_id = 1 and user_id is not null and ts >= 1878 and ts <= 1879) and not (user_id = 1 and user_id is not null and ts >= 1880 and ts <= 1881) and not (user_id = 1 and user_id is not null and ts >= 1882 and ts <= 1883) and not (user_id = 1 and user_id is not null and ts >= 1884 and ts <= 1885) and not (user_id = 1 and user_id is not null and ts >= 1886 and ts <= 1887) and not (user_id = 1 and user_id is not null and ts >= 1888 and ts <= 1889) and not (user_id = 1 and user_id is not null and ts >= 1890 and ts <= 1891) and not (user_id = 1 and user_id is not null and ts >= 1892 and ts <= 1893) and not (user_id = 1 and user_id is not null and ts >= 1894 and ts <= 1895) and not (user_id = 1 and user_id is not null and ts >= 1896 and ts <= 1897) and not (user_id = 1 and user_id is not null and ts >= 1898 and ts <= 1899) and not (user_id = 1 and user_id is not null and ts >= 1900 and ts <= 1901) and not (user_id = 1 and user_id is not null and ts >= 1902 and ts <= 1903) and not (user_id = 1 and user_id is not null and ts >= 1904 and ts <= 1905) and not (user_id = 1 and user_id is not null and ts >= 1906 and ts <= 1907) and not (user_id = 1 and user_id is not null and ts >= 1908 and ts <= 1909) and not (user_id = 1 and user_id is not null and ts >= 1910 and ts <= 1911) and not (user_id = 1 and user_id is not null and ts >= 1912 and ts <= 1913) and not (user_id = 1 and user_id is not null and ts >= 1914 and ts <= 1915) and not (user_id = 1 and user_id is not null and ts >= 1916 and ts <= 1917) and not (user_id = 1 and user_id is not null and ts >= 1918 and ts <= 1919) and not (user_id = 1 and user_id is not null and ts >= 1920 and ts <= 1921) and not (user_id = 1 and user_id is not null and ts >= 1922 and ts <= 1923) and not (user_id = 1 and user_id is not null and ts >= 1924 and ts <= 1925) and not (user_id = 1 and user_id is not null and ts >= 1926 and ts <= 1927) and not (user_id = 1 and user_id is not null and ts >= 1928 and ts <= 1929) and not (user_id = 1 and user_id is not null and ts >= 1930 and ts <= 1931) and not (user_id = 1 and user_id is not null and ts >= 1932 and ts <= 1933) and not (user_id = 1 and user_id is not null and ts >= 1934 and ts <= 1935) and not (user_id = 1 and user_id is not null and ts >= 1936 and ts <= 1937) and not (user_id = 1 and user_id is not null and ts >= 1938 and ts <= 1939) and not (user_id = 1 and user_id is not null and ts >= 1940 and ts <= 1941) and not (user_id = 1 and user_id is not null and ts >= 1942 and ts <= 1943) and not (user_id = 1 and user_id is not null and ts >= 1944 and ts <= 1945) and not (user_id = 1 and user_id is not null and ts >= 1946 and ts <= 1947) and not (user_id = 1 and user_id is not null and ts >= 1948 and ts <= 1949) and not (user_id = 1 and user_id is not null and ts >= 1950 and ts <= 1951) and not (user_id = 1 and user_id is not null and ts >= 1952 and ts <= 1953) and not (user_id = 1 and user_id is not null and ts >= 1954 and ts <= 1955) and not (user_id = 1 and user_id is not null and ts >= 1956 and ts <= 1957) and not (user_id = 1 and user_id is not null and ts >= 1958 and ts <= 1959) and not (user_id = 1 and user_id is not null and ts >= 1960 and ts <= 1961) and not (user_id = 1 and user_id is not null and ts >= 1962 and ts <= 1963) and not (user_id = 1 and user_id is not null and ts >= 1964 and ts <= 1965) and not (user_id = 1 and user_id is not null and ts >= 1966 and ts <= 1967) and not (user_id = 1 and user_id is not null and ts >= 1968 and ts <= 1969) and not (user_id = 1 and user_id is not null and ts >= 1970 and ts <= 1971) and not (user_id = 1 and user_id is not null and ts >= 1972 and ts <= 1973) and not (user_id = 1 and user_id is not null and ts >= 1974 and ts <= 1975) and not (user_id = 1 and user_id is not null and ts >= 1976 and ts <= 1977) and not (user_id = 1 and user_id is not null and ts >= 1978 and ts <= 1979) and not (user_id = 1 and user_id is not null and ts >= 1980 and ts <= 1981) and not (user_id = 1 and user_id is not null and ts >= 1982 and ts <= 1983) and not (user_id = 1 and user_id is not null and ts >= 1984 and ts <= 1985) and not (user_id = 1 and user_id is not null and ts >= 1986 and ts <= 1987) and not (user_id = 1 and user_id is not null and ts >= 1988 and ts <= 1989) and not (user_id = 1 and user_id is not null and ts >= 1990 and ts <= 1991) and not (user_id = 1 and user_id is not null and ts >= 1992 and ts <= 1993) and not (user_id = 1 and user_id is not null and ts >= 1994 and ts <= 1995) and not (user_id = 1 and user_id is not null and ts >= 1996 and ts <= 1997) and not (user_id = 1 and user_id is not null and ts >= 1998 and ts <= 1999) and not (user_id = 1 and user_id is not null and ts >= 11000 and ts <= 11001) and not (user_id = 1 and user_id is not null and ts >= 11002 and ts <= 11003) and not (user_id = 1 and user_id is not null and ts >= 11004 and ts <= 11005) and not (user_id = 1 and user_id is not null and ts >= 11006 and ts <= 11007) and not (user_id = 1 and user_id is not null and ts >= 11008 and ts <= 11009) and not (user_id = 1 and user_id is not null and ts >= 11010 and ts <= 11011) and not (user_id = 1 and user_id is not null and ts >= 11012 and ts <= 11013) and not (user_id = 1 and user_id is not null and ts >= 11014 and ts <= 11015) and not (user_id = 1 and user_id is not null and ts >= 11016 and ts <= 11017) and not (user_id = 1 and user_id is not null and ts >= 11018 and ts <= 11019) and not (user_id = 1 and user_id is not null and ts >= 11020 and ts <= 11021) and not (user_id = 1 and user_id is not null and ts >= 11022 and ts <= 11023) and not (user_id = 1 and user_id is not null and ts >= 11024 and ts <= 11025) and not (user_id = 1 and user_id is not null and ts >= 11026 and ts <= 11027) and not (user_id = 1 and user_id is not null and ts >= 11028 and ts <= 11029) and not (user_id = 1 and user_id is not null and ts >= 11030 and ts <= 11031) and not (user_id = 1 and user_id is not null and ts >= 11032 and ts <= 11033) and not (user_id = 1 and user_id is not null and ts >= 11034 and ts <= 11035) and not (user_id = 1 and user_id is not null and ts >= 11036 and ts <= 11037) and not (user_id = 1 and user_id is not null and ts >= 11038 and ts <= 11039) and not (user_id = 1 and user_id is not null and ts >= 11040 and ts <= 11041) and not (user_id = 1 and user_id is not null and ts >= 11042 and ts <= 11043) and not (user_id = 1 and user_id is not null and ts >= 11044 and ts <= 11045) and not (user_id = 1 and user_id is not null and ts >= 11046 and ts <= 11047) and not (user_id = 1 and user_id is not null and ts >= 11048 and ts <= 11049) and not (user_id = 1 and user_id is not null and ts >= 11050 and ts <= 11051) and not (user_id = 1 and user_id is not null and ts >= 11052 and ts <= 11053) and not (user_id = 1 and user_id is not null and ts >= 11054 and ts <= 11055) and not (user_id = 1 and user_id is not null and ts >= 11056 and ts <= 11057) and not (user_id = 1 and user_id is not null and ts >= 11058 and ts <= 11059) and not (user_id = 1 and user_id is not null and ts >= 11060 and ts <= 11061) and not (user_id = 1 and user_id is not null and ts >= 11062 and ts <= 11063) and not (user_id = 1 and user_id is not null and ts >= 11064 and ts <= 11065) and not (user_id = 1 and user_id is not null and ts >= 11066 and ts <= 11067) and not (user_id = 1 and user_id is not null and ts >= 11068 and ts <= 11069) and not (user_id = 1 and user_id is not null and ts >= 11070 and ts <= 11071) and not (user_id = 1 and user_id is not null and ts >= 11072 and ts <= 11073) and not (user_id = 1 and user_id is not null and ts >= 11074 and ts <= 11075) and not (user_id = 1 and user_id is not null and ts >= 11076 and ts <= 11077) and not (user_id = 1 and user_id is not null and ts >= 11078 and ts <= 11079) and not (user_id = 1 and user_id is not null and ts >= 11080 and ts <= 11081) and not (user_id = 1 and user_id is not null and ts >= 11082 and ts <= 11083) and not (user_id = 1 and user_id is not null and ts >= 11084 and ts <= 11085) and not (user_id = 1 and user_id is not null and ts >= 11086 and ts <= 11087) and not (user_id = 1 and user_id is not null and ts >= 11088 and ts <= 11089) and not (user_id = 1 and user_id is not null and ts >= 11090 and ts <= 11091) and not (user_id = 1 and user_id is not null and ts >= 11092 and ts <= 11093) and not (user_id = 1 and user_id is not null and ts >= 11094 and ts <= 11095) and not (user_id = 1 and user_id is not null and ts >= 11096 and ts <= 11097) and not (user_id = 1 and user_id is not null and ts >= 11098 and ts <= 11099) and not (user_id = 1 and user_id is not null and ts >= 11100 and ts <= 11101) and not (user_id = 1 and user_id is not null and ts >= 11102 and ts <= 11103) and not (user_id = 1 and user_id is not null and ts >= 11104 and ts <= 11105) and not (user_id = 1 and user_id is not null and ts >= 11106 and ts <= 11107) and not (user_id = 1 and user_id is not null and ts >= 11108 and ts <= 11109) and not (user_id = 1 and user_id is not null and ts >= 11110 and ts <= 11111) and not (user_id = 1 and user_id is not null and ts >= 11112 and ts <= 11113) and not (user_id = 1 and user_id is not null and ts >= 11114 and ts <= 11115) and not (user_id = 1 and user_id is not null and ts >= 11116 and ts <= 11117) and not (user_id = 1 and user_id is not null and ts >= 11118 and ts <= 11119) and not (user_id = 1 and user_id is not null and ts >= 11120 and ts <= 11121) and not (user_id = 1 and user_id is not null and ts >= 11122 and ts <= 11123) and not (user_id = 1 and user_id is not null and ts >= 11124 and ts <= 11125) and not (user_id = 1 and user_id is not null and ts >= 11126 and ts <= 11127) and not (user_id = 1 and user_id is not null and ts >= 11128 and ts <= 11129) and not (user_id = 1 and user_id is not null and ts >= 11130 and ts <= 11131) and not (user_id = 1 and user_id is not null and ts >= 11132 and ts <= 11133) and not (user_id = 1 and user_id is not null and ts >= 11134 and ts <= 11135) and not (user_id = 1 and user_id is not null and ts >= 11136 and ts <= 11137) and not (user_id = 1 and user_id is not null and ts >= 11138 and ts <= 11139) and not (user_id = 1 and user_id is not null and ts >= 11140 and ts <= 11141) and not (user_id = 1 and user_id is not null and ts >= 11142 and ts <= 11143) and not (user_id = 1 and user_id is not null and ts >= 11144 and ts <= 11145) and not (user_id = 1 and user_id is not null and ts >= 11146 and ts <= 11147) and not (user_id = 1 and user_id is not null and ts >= 11148 and ts <= 11149) and not (user_id = 1 and user_id is not null and ts >= 11150 and ts <= 11151) and not (user_id = 1 and user_id is not null and ts >= 11152 and ts <= 11153) and not (user_id = 1 and user_id is not null and ts >= 11154 and ts <= 11155) and not (user_id = 1 and user_id is not null and ts >= 11156 and ts <= 11157) and not (user_id = 1 and user_id is not null and ts >= 11158 and ts <= 11159) and not (user_id = 1 and user_id is not null and ts >= 11160 and ts <= 11161) and not (user_id = 1 and user_id is not null and ts >= 11162 and ts <= 11163) and not (user_id = 1 and user_id is not null and ts >= 11164 and ts <= 11165) and not (user_id = 1 and user_id is not null and ts >= 11166 and ts <= 11167) and not (user_id = 1 and user_id is not null and ts >= 11168 and ts <= 11169) and not (user_id = 1 and user_id is not null and ts >= 11170 and ts <= 11171) and not (user_id = 1 and user_id is not null and ts >= 11172 and ts <= 11173) and not (user_id = 1 and user_id is not null and ts >= 11174 and ts <= 11175) and not (user_id = 1 and user_id is not null and ts >= 11176 and ts <= 11177) and not (user_id = 1 and user_id is not null and ts >= 11178 and ts <= 11179) and not (user_id = 1 and user_id is not null and ts >= 11180 and ts <= 11181) and not (user_id = 1 and user_id is not null and ts >= 11182 and ts <= 11183) and not (user_id = 1 and user_id is not null and ts >= 11184 and ts <= 11185) and not (user_id = 1 and user_id is not null and ts >= 11186 and ts <= 11187) and not (user_id = 1 and user_id is not null and ts >= 11188 and ts <= 11189) and not (user_id = 1 and user_id is not null and ts >= 11190 and ts <= 11191) and not (user_id = 1 and user_id is not null and ts >= 11192 and ts <= 11193) and not (user_id = 1 and user_id is not null and ts >= 11194 and ts <= 11195) and not (user_id = 1 and user_id is not null and ts >= 11196 and ts <= 11197) and not (user_id = 1 and user_id is not null and ts >= 11198 and ts <= 11199) and not (user_id = 1 and user_id is not null and ts >= 11200 and ts <= 11201) and not (user_id = 1 and user_id is not null and ts >= 11202 and ts <= 11203) and not (user_id = 1 and user_id is not null and ts >= 11204 and ts <= 11205) and not (user_id = 1 and user_id is not null and ts >= 11206 and ts <= 11207) and not (user_id = 1 and user_id is not null and ts >= 11208 and ts <= 11209) and not (user_id = 1 and user_id is not null and ts >= 11210 and ts <= 11211) and not (user_id = 1 and user_id is not null and ts >= 11212 and ts <= 11213) and not (user_id = 1 and user_id is not null and ts >= 11214 and ts <= 11215) and not (user_id = 1 and user_id is not null and ts >= 11216 and ts <= 11217) and not (user_id = 1 and user_id is not null and ts >= 11218 and ts <= 11219) and not (user_id = 1 and user_id is not null and ts >= 11220 and ts <= 11221) and not (user_id = 1 and user_id is not null and ts >= 11222 and ts <= 11223) and not (user_id = 1 and user_id is not null and ts >= 11224 and ts <= 11225) and not (user_id = 1 and user_id is not null and ts >= 11226 and ts <= 11227) and not (user_id = 1 and user_id is not null and ts >= 11228 and ts <= 11229) and not (user_id = 1 and user_id is not null and ts >= 11230 and ts <= 11231) and not (user_id = 1 and user_id is not null and ts >= 11232 and ts <= 11233) and not (user_id = 1 and user_id is not null and ts >= 11234 and ts <= 11235) and not (user_id = 1 and user_id is not null and ts >= 11236 and ts <= 11237) and not (user_id = 1 and user_id is not null and ts >= 11238 and ts <= 11239) and not (user_id = 1 and user_id is not null and ts >= 11240 and ts <= 11241) and not (user_id = 1 and user_id is not null and ts >= 11242 and ts <= 11243) and not (user_id = 1 and user_id is not null and ts >= 11244 and ts <= 11245) and not (user_id = 1 and user_id is not null and ts >= 11246 and ts <= 11247) and not (user_id = 1 and user_id is not null and ts >= 11248 and ts <= 11249) and not (user_id = 1 and user_id is not null and ts >= 11250 and ts <= 11251) and not (user_id = 1 and user_id is not null and ts >= 11252 and ts <= 11253) and not (user_id = 1 and user_id is not null and ts >= 11254 and ts <= 11255) and not (user_id = 1 and user_id is not null and ts >= 11256 and ts <= 11257) and not (user_id = 1 and user_id is not null and ts >= 11258 and ts <= 11259) and not (user_id = 1 and user_id is not null and ts >= 11260 and ts <= 11261) and not (user_id = 1 and user_id is not null and ts >= 11262 and ts <= 11263) and not (user_id = 1 and user_id is not null and ts >= 11264 and ts <= 11265) and not (user_id = 1 and user_id is not null and ts >= 11266 and ts <= 11267) and not (user_id = 1 and user_id is not null and ts >= 11268 and ts <= 11269) and not (user_id = 1 and user_id is not null and ts >= 11270 and ts <= 11271) and not (user_id = 1 and user_id is not null and ts >= 11272 and ts <= 11273) and not (user_id = 1 and user_id is not null and ts >= 11274 and ts <= 11275) and not (user_id = 1 and user_id is not null and ts >= 11276 and ts <= 11277) and not (user_id = 1 and user_id is not null and ts >= 11278 and ts <= 11279) and not (user_id = 1 and user_id is not null and ts >= 11280 and ts <= 11281) and not (user_id = 1 and user_id is not null and ts >= 11282 and ts <= 11283) and not (user_id = 1 and user_id is not null and ts >= 11284 and ts <= 11285) and not (user_id = 1 and user_id is not null and ts >= 11286 and ts <= 11287) and not (user_id = 1 and user_id is not null and ts >= 11288 and ts <= 11289) and not (user_id = 1 and user_id is not null and ts >= 11290 and ts <= 11291) and not (user_id = 1 and user_id is not null and ts >= 11292 and ts <= 11293) and not (user_id = 1 and user_id is not null and ts >= 11294 and ts <= 11295) and not (user_id = 1 and user_id is not null and ts >= 11296 and ts <= 11297) and not (user_id = 1 and user_id is not null and ts >= 11298 and ts <= 11299) and not (user_id = 1 and user_id is not null and ts >= 11300 and ts <= 11301) and not (user_id = 1 and user_id is not null and ts >= 11302 and ts <= 11303) and not (user_id = 1 and user_id is not null and ts >= 11304 and ts <= 11305) and not (user_id = 1 and user_id is not null and ts >= 11306 and ts <= 11307) and not (user_id = 1 and user_id is not null and ts >= 11308 and ts <= 11309) and not (user_id = 1 and user_id is not null and ts >= 11310 and ts <= 11311) and not (user_id = 1 and user_id is not null and ts >= 11312 and ts <= 11313) and not (user_id = 1 and user_id is not null and ts >= 11314 and ts <= 11315) and not (user_id = 1 and user_id is not null and ts >= 11316 and ts <= 11317) and not (user_id = 1 and user_id is not null and ts >= 11318 and ts <= 11319) and not (user_id = 1 and user_id is not null and ts >= 11320 and ts <= 11321) and not (user_id = 1 and user_id is not null and ts >= 11322 and ts <= 11323) and not (user_id = 1 and user_id is not null and ts >= 11324 and ts <= 11325) and not (user_id = 1 and user_id is not null and ts >= 11326 and ts <= 11327) and not (user_id = 1 and user_id is not null and ts >= 11328 and ts <= 11329) and not (user_id = 1 and user_id is not null and ts >= 11330 and ts <= 11331) and not (user_id = 1 and user_id is not null and ts >= 11332 and ts <= 11333) and not (user_id = 1 and user_id is not null and ts >= 11334 and ts <= 11335) and not (user_id = 1 and user_id is not null and ts >= 11336 and ts <= 11337) and not (user_id = 1 and user_id is not null and ts >= 11338 and ts <= 11339) and not (user_id = 1 and user_id is not null and ts >= 11340 and ts <= 11341) and not (user_id = 1 and user_id is not null and ts >= 11342 and ts <= 11343) and not (user_id = 1 and user_id is not null and ts >= 11344 and ts <= 11345) and not (user_id = 1 and user_id is not null and ts >= 11346 and ts <= 11347) and not (user_id = 1 and user_id is not null and ts >= 11348 and ts <= 11349) and not (user_id = 1 and user_id is not null and ts >= 11350 and ts <= 11351) and not (user_id = 1 and user_id is not null and ts >= 11352 and ts <= 11353) and not (user_id = 1 and user_id is not null and ts >= 11354 and ts <= 11355) and not (user_id = 1 and user_id is not null and ts >= 11356 and ts <= 11357) and not (user_id = 1 and user_id is not null and ts >= 11358 and ts <= 11359) and not (user_id = 1 and user_id is not null and ts >= 11360 and ts <= 11361) and not (user_id = 1 and user_id is not null and ts >= 11362 and ts <= 11363) and not (user_id = 1 and user_id is not null and ts >= 11364 and ts <= 11365) and not (user_id = 1 and user_id is not null and ts >= 11366 and ts <= 11367) and not (user_id = 1 and user_id is not null and ts >= 11368 and ts <= 11369) and not (user_id = 1 and user_id is not null and ts >= 11370 and ts <= 11371) and not (user_id = 1 and user_id is not null and ts >= 11372 and ts <= 11373) and not (user_id = 1 and user_id is not null and ts >= 11374 and ts <= 11375) and not (user_id = 1 and user_id is not null and ts >= 11376 and ts <= 11377) and not (user_id = 1 and user_id is not null and ts >= 11378 and ts <= 11379) and not (user_id = 1 and user_id is not null and ts >= 11380 and ts <= 11381) and not (user_id = 1 and user_id is not null and ts >= 11382 and ts <= 11383) and not (user_id = 1 and user_id is not null and ts >= 11384 and ts <= 11385) and not (user_id = 1 and user_id is not null and ts >= 11386 and ts <= 11387) and not (user_id = 1 and user_id is not null and ts >= 11388 and ts <= 11389) and not (user_id = 1 and user_id is not null and ts >= 11390 and ts <= 11391) and not (user_id = 1 and user_id is not null and ts >= 11392 and ts <= 11393) and not (user_id = 1 and user_id is not null and ts >= 11394 and ts <= 11395) and not (user_id = 1 and user_id is not null and ts >= 11396 and ts <= 11397) and not (user_id = 1 and user_id is not null and ts >= 11398 and ts <= 11399) and not (user_id = 1 and user_id is not null and ts >= 11400 and ts <= 11401) and not (user_id = 1 and user_id is not null and ts >= 11402 and ts <= 11403) and not (user_id = 1 and user_id is not null and ts >= 11404 and ts <= 11405) and not (user_id = 1 and user_id is not null and ts >= 11406 and ts <= 11407) and not (user_id = 1 and user_id is not null and ts >= 11408 and ts <= 11409) and not (user_id = 1 and user_id is not null and ts >= 11410 and ts <= 11411) and not (user_id = 1 and user_id is not null and ts >= 11412 and ts <= 11413) and not (user_id = 1 and user_id is not null and ts >= 11414 and ts <= 11415) and not (user_id = 1 and user_id is not null and ts >= 11416 and ts <= 11417) and not (user_id = 1 and user_id is not null and ts >= 11418 and ts <= 11419) and not (user_id = 1 and user_id is not null and ts >= 11420 and ts <= 11421) and not (user_id = 1 and user_id is not null and ts >= 11422 and ts <= 11423) and not (user_id = 1 and user_id is not null and ts >= 11424 and ts <= 11425) and not (user_id = 1 and user_id is not null and ts >= 11426 and ts <= 11427) and not (user_id = 1 and user_id is not null and ts >= 11428 and ts <= 11429) and not (user_id = 1 and user_id is not null and ts >= 11430 and ts <= 11431) and not (user_id = 1 and user_id is not null and ts >= 11432 and ts <= 11433) and not (user_id = 1 and user_id is not null and ts >= 11434 and ts <= 11435) and not (user_id = 1 and user_id is not null and ts >= 11436 and ts <= 11437) and not (user_id = 1 and user_id is not null and ts >= 11438 and ts <= 11439) and not (user_id = 1 and user_id is not null and ts >= 11440 and ts <= 11441) and not (user_id = 1 and user_id is not null and ts >= 11442 and ts <= 11443) and not (user_id = 1 and user_id is not null and ts >= 11444 and ts <= 11445) and not (user_id = 1 and user_id is not null and ts >= 11446 and ts <= 11447) and not (user_id = 1 and user_id is not null and ts >= 11448 and ts <= 11449) and not (user_id = 1 and user_id is not null and ts >= 11450 and ts <= 11451) and not (user_id = 1 and user_id is not null and ts >= 11452 and ts <= 11453) and not (user_id = 1 and user_id is not null and ts >= 11454 and ts <= 11455) and not (user_id = 1 and user_id is not null and ts >= 11456 and ts <= 11457) and not (user_id = 1 and user_id is not null and ts >= 11458 and ts <= 11459) and not (user_id = 1 and user_id is not null and ts >= 11460 and ts <= 11461) and not (user_id = 1 and user_id is not null and ts >= 11462 and ts <= 11463) and not (user_id = 1 and user_id is not null and ts >= 11464 and ts <= 11465) and not (user_id = 1 and user_id is not null and ts >= 11466 and ts <= 11467) and not (user_id = 1 and user_id is not null and ts >= 11468 and ts <= 11469) and not (user_id = 1 and user_id is not null and ts >= 11470 and ts <= 11471) and not (user_id = 1 and user_id is not null and ts >= 11472 and ts <= 11473) and not (user_id = 1 and user_id is not null and ts >= 11474 and ts <= 11475) and not (user_id = 1 and user_id is not null and ts >= 11476 and ts <= 11477) and not (user_id = 1 and user_id is not null and ts >= 11478 and ts <= 11479) and not (user_id = 1 and user_id is not null and ts >= 11480 and ts <= 11481) and not (user_id = 1 and user_id is not null and ts >= 11482 and ts <= 11483) and not (user_id = 1 and user_id is not null and ts >= 11484 and ts <= 11485) and not (user_id = 1 and user_id is not null and ts >= 11486 and ts <= 11487) and not (user_id = 1 and user_id is not null and ts >= 11488 and ts <= 11489) and not (user_id = 1 and user_id is not null and ts >= 11490 and ts <= 11491) and not (user_id = 1 and user_id is not null and ts >= 11492 and ts <= 11493) and not (user_id = 1 and user_id is not null and ts >= 11494 and ts <= 11495) and not (user_id = 1 and user_id is not null and ts >= 11496 and ts <= 11497) and not (user_id = 1 and user_id is not null and ts >= 11498 and ts <= 11499) and not (user_id = 1 and user_id is not null and ts >= 11500 and ts <= 11501) and not (user_id = 1 and user_id is not null and ts >= 11502 and ts <= 11503) and not (user_id = 1 and user_id is not null and ts >= 11504 and ts <= 11505) and not (user_id = 1 and user_id is not null and ts >= 11506 and ts <= 11507) and not (user_id = 1 and user_id is not null and ts >= 11508 and ts <= 11509) and not (user_id = 1 and user_id is not null and ts >= 11510 and ts <= 11511) and not (user_id = 1 and user_id is not null and ts >= 11512 and ts <= 11513) and not (user_id = 1 and user_id is not null and ts >= 11514 and ts <= 11515) and not (user_id = 1 and user_id is not null and ts >= 11516 and ts <= 11517) and not (user_id = 1 and user_id is not null and ts >= 11518 and ts <= 11519) and not (user_id = 1 and user_id is not null and ts >= 11520 and ts <= 11521) and not (user_id = 1 and user_id is not null and ts >= 11522 and ts <= 11523) and not (user_id = 1 and user_id is not null and ts >= 11524 and ts <= 11525) and not (user_id = 1 and user_id is not null and ts >= 11526 and ts <= 11527) and not (user_id = 1 and user_id is not null and ts >= 11528 and ts <= 11529) and not (user_id = 1 and user_id is not null and ts >= 11530 and ts <= 11531) and not (user_id = 1 and user_id is not null and ts >= 11532 and ts <= 11533) and not (user_id = 1 and user_id is not null and ts >= 11534 and ts <= 11535) and not (user_id = 1 and user_id is not null and ts >= 11536 and ts <= 11537) and not (user_id = 1 and user_id is not null and ts >= 11538 and ts <= 11539) and not (user_id = 1 and user_id is not null and ts >= 11540 and ts <= 11541) and not (user_id = 1 and user_id is not null and ts >= 11542 and ts <= 11543) and not (user_id = 1 and user_id is not null and ts >= 11544 and ts <= 11545) and not (user_id = 1 and user_id is not null and ts >= 11546 and ts <= 11547) and not (user_id = 1 and user_id is not null and ts >= 11548 and ts <= 11549) and not (user_id = 1 and user_id is not null and ts >= 11550 and ts <= 11551) and not (user_id = 1 and user_id is not null and ts >= 11552 and ts <= 11553) and not (user_id = 1 and user_id is not null and ts >= 11554 and ts <= 11555) and not (user_id = 1 and user_id is not null and ts >= 11556 and ts <= 11557) and not (user_id = 1 and user_id is not null and ts >= 11558 and ts <= 11559) and not (user_id = 1 and user_id is not null and ts >= 11560 and ts <= 11561) and not (user_id = 1 and user_id is not null and ts >= 11562 and ts <= 11563) and not (user_id = 1 and user_id is not null and ts >= 11564 and ts <= 11565) and not (user_id = 1 and user_id is not null and ts >= 11566 and ts <= 11567) and not (user_id = 1 and user_id is not null and ts >= 11568 and ts <= 11569) and not (user_id = 1 and user_id is not null and ts >= 11570 and ts <= 11571) and not (user_id = 1 and user_id is not null and ts >= 11572 and ts <= 11573) and not (user_id = 1 and user_id is not null and ts >= 11574 and ts <= 11575) and not (user_id = 1 and user_id is not null and ts >= 11576 and ts <= 11577) and not (user_id = 1 and user_id is not null and ts >= 11578 and ts <= 11579) and not (user_id = 1 and user_id is not null and ts >= 11580 and ts <= 11581) and not (user_id = 1 and user_id is not null and ts >= 11582 and ts <= 11583) and not (user_id = 1 and user_id is not null and ts >= 11584 and ts <= 11585) and not (user_id = 1 and user_id is not null and ts >= 11586 and ts <= 11587) and not (user_id = 1 and user_id is not null and ts >= 11588 and ts <= 11589) and not (user_id = 1 and user_id is not null and ts >= 11590 and ts <= 11591) and not (user_id = 1 and user_id is not null and ts >= 11592 and ts <= 11593) and not (user_id = 1 and user_id is not null and ts >= 11594 and ts <= 11595) and not (user_id = 1 and user_id is not null and ts >= 11596 and ts <= 11597) and not (user_id = 1 and user_id is not null and ts >= 11598 and ts <= 11599) and not (user_id = 1 and user_id is not null and ts >= 11600 and ts <= 11601) and not (user_id = 1 and user_id is not null and ts >= 11602 and ts <= 11603) and not (user_id = 1 and user_id is not null and ts >= 11604 and ts <= 11605) and not (user_id = 1 and user_id is not null and ts >= 11606 and ts <= 11607) and not (user_id = 1 and user_id is not null and ts >= 11608 and ts <= 11609) and not (user_id = 1 and user_id is not null and ts >= 11610 and ts <= 11611) and not (user_id = 1 and user_id is not null and ts >= 11612 and ts <= 11613) and not (user_id = 1 and user_id is not null and ts >= 11614 and ts <= 11615) and not (user_id = 1 and user_id is not null and ts >= 11616 and ts <= 11617) and not (user_id = 1 and user_id is not null and ts >= 11618 and ts <= 11619) and not (user_id = 1 and user_id is not null and ts >= 11620 and ts <= 11621) and not (user_id = 1 and user_id is not null and ts >= 11622 and ts <= 11623) and not (user_id = 1 and user_id is not null and ts >= 11624 and ts <= 11625) and not (user_id = 1 and user_id is not null and ts >= 11626 and ts <= 11627) and not (user_id = 1 and user_id is not null and ts >= 11628 and ts <= 11629) and not (user_id = 1 and user_id is not null and ts >= 11630 and ts <= 11631) and not (user_id = 1 and user_id is not null and ts >= 11632 and ts <= 11633) and not (user_id = 1 and user_id is not null and ts >= 11634 and ts <= 11635) and not (user_id = 1 and user_id is not null and ts >= 11636 and ts <= 11637) and not (user_id = 1 and user_id is not null and ts >= 11638 and ts <= 11639) and not (user_id = 1 and user_id is not null and ts >= 11640 and ts <= 11641) and not (user_id = 1 and user_id is not null and ts >= 11642 and ts <= 11643) and not (user_id = 1 and user_id is not null and ts >= 11644 and ts <= 11645) and not (user_id = 1 and user_id is not null and ts >= 11646 and ts <= 11647) and not (user_id = 1 and user_id is not null and ts >= 11648 and ts <= 11649) and not (user_id = 1 and user_id is not null and ts >= 11650 and ts <= 11651) and not (user_id = 1 and user_id is not null and ts >= 11652 and ts <= 11653) and not (user_id = 1 and user_id is not null and ts >= 11654 and ts <= 11655) and not (user_id = 1 and user_id is not null and ts >= 11656 and ts <= 11657) and not (user_id = 1 and user_id is not null and ts >= 11658 and ts <= 11659) and not (user_id = 1 and user_id is not null and ts >= 11660 and ts <= 11661) and not (user_id = 1 and user_id is not null and ts >= 11662 and ts <= 11663) and not (user_id = 1 and user_id is not null and ts >= 11664 and ts <= 11665) and not (user_id = 1 and user_id is not null and ts >= 11666 and ts <= 11667) and not (user_id = 1 and user_id is not null and ts >= 11668 and ts <= 11669) and not (user_id = 1 and user_id is not null and ts >= 11670 and ts <= 11671) and not (user_id = 1 and user_id is not null and ts >= 11672 and ts <= 11673) and not (user_id = 1 and user_id is not null and ts >= 11674 and ts <= 11675) and not (user_id = 1 and user_id is not null and ts >= 11676 and ts <= 11677) and not (user_id = 1 and user_id is not null and ts >= 11678 and ts <= 11679) and not (user_id = 1 and user_id is not null and ts >= 11680 and ts <= 11681) and not (user_id = 1 and user_id is not null and ts >= 11682 and ts <= 11683) and not (user_id = 1 and user_id is not null and ts >= 11684 and ts <= 11685) and not (user_id = 1 and user_id is not null and ts >= 11686 and ts <= 11687) and not (user_id = 1 and user_id is not null and ts >= 11688 and ts <= 11689) and not (user_id = 1 and user_id is not null and ts >= 11690 and ts <= 11691) and not (user_id = 1 and user_id is not null and ts >= 11692 and ts <= 11693) and not (user_id = 1 and user_id is not null and ts >= 11694 and ts <= 11695) and not (user_id = 1 and user_id is not null and ts >= 11696 and ts <= 11697) and not (user_id = 1 and user_id is not null and ts >= 11698 and ts <= 11699) and not (user_id = 1 and user_id is not null and ts >= 11700 and ts <= 11701) and not (user_id = 1 and user_id is not null and ts >= 11702 and ts <= 11703) and not (user_id = 1 and user_id is not null and ts >= 11704 and ts <= 11705) and not (user_id = 1 and user_id is not null and ts >= 11706 and ts <= 11707) and not (user_id = 1 and user_id is not null and ts >= 11708 and ts <= 11709) and not (user_id = 1 and user_id is not null and ts >= 11710 and ts <= 11711) and not (user_id = 1 and user_id is not null and ts >= 11712 and ts <= 11713) and not (user_id = 1 and user_id is not null and ts >= 11714 and ts <= 11715) and not (user_id = 1 and user_id is not null and ts >= 11716 and ts <= 11717) and not (user_id = 1 and user_id is not null and ts >= 11718 and ts <= 11719) and not (user_id = 1 and user_id is not null and ts >= 11720 and ts <= 11721) and not (user_id = 1 and user_id is not null and ts >= 11722 and ts <= 11723) and not (user_id = 1 and user_id is not null and ts >= 11724 and ts <= 11725) and not (user_id = 1 and user_id is not null and ts >= 11726 and ts <= 11727) and not (user_id = 1 and user_id is not null and ts >= 11728 and ts <= 11729) and not (user_id = 1 and user_id is not null and ts >= 11730 and ts <= 11731) and not (user_id = 1 and user_id is not null and ts >= 11732 and ts <= 11733) and not (user_id = 1 and user_id is not null and ts >= 11734 and ts <= 11735) and not (user_id = 1 and user_id is not null and ts >= 11736 and ts <= 11737) and not (user_id = 1 and user_id is not null and ts >= 11738 and ts <= 11739) and not (user_id = 1 and user_id is not null and ts >= 11740 and ts <= 11741) and not (user_id = 1 and user_id is not null and ts >= 11742 and ts <= 11743) and not (user_id = 1 and user_id is not null and ts >= 11744 and ts <= 11745) and not (user_id = 1 and user_id is not null and ts >= 11746 and ts <= 11747) and not (user_id = 1 and user_id is not null and ts >= 11748 and ts <= 11749) and not (user_id = 1 and user_id is not null and ts >= 11750 and ts <= 11751) and not (user_id = 1 and user_id is not null and ts >= 11752 and ts <= 11753) and not (user_id = 1 and user_id is not null and ts >= 11754 and ts <= 11755) and not (user_id = 1 and user_id is not null and ts >= 11756 and ts <= 11757) and not (user_id = 1 and user_id is not null and ts >= 11758 and ts <= 11759) and not (user_id = 1 and user_id is not null and ts >= 11760 and ts <= 11761) and not (user_id = 1 and user_id is not null and ts >= 11762 and ts <= 11763) and not (user_id = 1 and user_id is not null and ts >= 11764 and ts <= 11765) and not (user_id = 1 and user_id is not null and ts >= 11766 and ts <= 11767) and not (user_id = 1 and user_id is not null and ts >= 11768 and ts <= 11769) and not (user_id = 1 and user_id is not null and ts >= 11770 and ts <= 11771) and not (user_id = 1 and user_id is not null and ts >= 11772 and ts <= 11773) and not (user_id = 1 and user_id is not null and ts >= 11774 and ts <= 11775) and not (user_id = 1 and user_id is not null and ts >= 11776 and ts <= 11777) and not (user_id = 1 and user_id is not null and ts >= 11778 and ts <= 11779) and not (user_id = 1 and user_id is not null and ts >= 11780 and ts <= 11781) and not (user_id = 1 and user_id is not null and ts >= 11782 and ts <= 11783) and not (user_id = 1 and user_id is not null and ts >= 11784 and ts <= 11785) and not (user_id = 1 and user_id is not null and ts >= 11786 and ts <= 11787) and not (user_id = 1 and user_id is not null and ts >= 11788 and ts <= 11789) and not (user_id = 1 and user_id is not null and ts >= 11790 and ts <= 11791) and not (user_id = 1 and user_id is not null and ts >= 11792 and ts <= 11793) and not (user_id = 1 and user_id is not null and ts >= 11794 and ts <= 11795) and not (user_id = 1 and user_id is not null and ts >= 11796 and ts <= 11797) and not (user_id = 1 and user_id is not null and ts >= 11798 and ts <= 11799) and not (user_id = 1 and user_id is not null and ts >= 11800 and ts <= 11801) and not (user_id = 1 and user_id is not null and ts >= 11802 and ts <= 11803) and not (user_id = 1 and user_id is not null and ts >= 11804 and ts <= 11805) and not (user_id = 1 and user_id is not null and ts >= 11806 and ts <= 11807) and not (user_id = 1 and user_id is not null and ts >= 11808 and ts <= 11809) and not (user_id = 1 and user_id is not null and ts >= 11810 and ts <= 11811) and not (user_id = 1 and user_id is not null and ts >= 11812 and ts <= 11813) and not (user_id = 1 and user_id is not null and ts >= 11814 and ts <= 11815) and not (user_id = 1 and user_id is not null and ts >= 11816 and ts <= 11817) and not (user_id = 1 and user_id is not null and ts >= 11818 and ts <= 11819) and not (user_id = 1 and user_id is not null and ts >= 11820 and ts <= 11821) and not (user_id = 1 and user_id is not null and ts >= 11822 and ts <= 11823) and not (user_id = 1 and user_id is not null and ts >= 11824 and ts <= 11825) and not (user_id = 1 and user_id is not null and ts >= 11826 and ts <= 11827) and not (user_id = 1 and user_id is not null and ts >= 11828 and ts <= 11829) and not (user_id = 1 and user_id is not null and ts >= 11830 and ts <= 11831) and not (user_id = 1 and user_id is not null and ts >= 11832 and ts <= 11833) and not (user_id = 1 and user_id is not null and ts >= 11834 and ts <= 11835) and not (user_id = 1 and user_id is not null and ts >= 11836 and ts <= 11837) and not (user_id = 1 and user_id is not null and ts >= 11838 and ts <= 11839) and not (user_id = 1 and user_id is not null and ts >= 11840 and ts <= 11841) and not (user_id = 1 and user_id is not null and ts >= 11842 and ts <= 11843) and not (user_id = 1 and user_id is not null and ts >= 11844 and ts <= 11845) and not (user_id = 1 and user_id is not null and ts >= 11846 and ts <= 11847) and not (user_id = 1 and user_id is not null and ts >= 11848 and ts <= 11849) and not (user_id = 1 and user_id is not null and ts >= 11850 and ts <= 11851) and not (user_id = 1 and user_id is not null and ts >= 11852 and ts <= 11853) and not (user_id = 1 and user_id is not null and ts >= 11854 and ts <= 11855) and not (user_id = 1 and user_id is not null and ts >= 11856 and ts <= 11857) and not (user_id = 1 and user_id is not null and ts >= 11858 and ts <= 11859) and not (user_id = 1 and user_id is not null and ts >= 11860 and ts <= 11861) and not (user_id = 1 and user_id is not null and ts >= 11862 and ts <= 11863) and not (user_id = 1 and user_id is not null and ts >= 11864 and ts <= 11865) and not (user_id = 1 and user_id is not null and ts >= 11866 and ts <= 11867) and not (user_id = 1 and user_id is not null and ts >= 11868 and ts <= 11869) and not (user_id = 1 and user_id is not null and ts >= 11870 and ts <= 11871) and not (user_id = 1 and user_id is not null and ts >= 11872 and ts <= 11873) and not (user_id = 1 and user_id is not null and ts >= 11874 and ts <= 11875) and not (user_id = 1 and user_id is not null and ts >= 11876 and ts <= 11877) and not (user_id = 1 and user_id is not null and ts >= 11878 and ts <= 11879) and not (user_id = 1 and user_id is not null and ts >= 11880 and ts <= 11881) and not (user_id = 1 and user_id is not null and ts >= 11882 and ts <= 11883) and not (user_id = 1 and user_id is not null and ts >= 11884 and ts <= 11885) and not (user_id = 1 and user_id is not null and ts >= 11886 and ts <= 11887) and not (user_id = 1 and user_id is not null and ts >= 11888 and ts <= 11889) and not (user_id = 1 and user_id is not null and ts >= 11890 and ts <= 11891) and not (user_id = 1 and user_id is not null and ts >= 11892 and ts <= 11893) and not (user_id = 1 and user_id is not null and ts >= 11894 and ts <= 11895) and not (user_id = 1 and user_id is not null and ts >= 11896 and ts <= 11897) and not (user_id = 1 and user_id is not null and ts >= 11898 and ts <= 11899) and not (user_id = 1 and user_id is not null and ts >= 11900 and ts <= 11901) and not (user_id = 1 and user_id is not null and ts >= 11902 and ts <= 11903) and not (user_id = 1 and user_id is not null and ts >= 11904 and ts <= 11905) and not (user_id = 1 and user_id is not null and ts >= 11906 and ts <= 11907) and not (user_id = 1 and user_id is not null and ts >= 11908 and ts <= 11909) and not (user_id = 1 and user_id is not null and ts >= 11910 and ts <= 11911) and not (user_id = 1 and user_id is not null and ts >= 11912 and ts <= 11913) and not (user_id = 1 and user_id is not null and ts >= 11914 and ts <= 11915) and not (user_id = 1 and user_id is not null and ts >= 11916 and ts <= 11917) and not (user_id = 1 and user_id is not null and ts >= 11918 and ts <= 11919) and not (user_id = 1 and user_id is not null and ts >= 11920 and ts <= 11921) and not (user_id = 1 and user_id is not null and ts >= 11922 and ts <= 11923) and not (user_id = 1 and user_id is not null and ts >= 11924 and ts <= 11925) and not (user_id = 1 and user_id is not null and ts >= 11926 and ts <= 11927) and not (user_id = 1 and user_id is not null and ts >= 11928 and ts <= 11929) and not (user_id = 1 and user_id is not null and ts >= 11930 and ts <= 11931) and not (user_id = 1 and user_id is not null and ts >= 11932 and ts <= 11933) and not (user_id = 1 and user_id is not null and ts >= 11934 and ts <= 11935) and not (user_id = 1 and user_id is not null and ts >= 11936 and ts <= 11937) and not (user_id = 1 and user_id is not null and ts >= 11938 and ts <= 11939) and not (user_id = 1 and user_id is not null and ts >= 11940 and ts <= 11941) and not (user_id = 1 and user_id is not null and ts >= 11942 and ts <= 11943) and not (user_id = 1 and user_id is not null and ts >= 11944 and ts <= 11945) and not (user_id = 1 and user_id is not null and ts >= 11946 and ts <= 11947) and not (user_id = 1 and user_id is not null and ts >= 11948 and ts <= 11949) and not (user_id = 1 and user_id is not null and ts >= 11950 and ts <= 11951) and not (user_id = 1 and user_id is not null and ts >= 11952 and ts <= 11953) and not (user_id = 1 and user_id is not null and ts >= 11954 and ts <= 11955) and not (user_id = 1 and user_id is not null and ts >= 11956 and ts <= 11957) and not (user_id = 1 and user_id is not null and ts >= 11958 and ts <= 11959) and not (user_id = 1 and user_id is not null and ts >= 11960 and ts <= 11961) and not (user_id = 1 and user_id is not null and ts >= 11962 and ts <= 11963) and not (user_id = 1 and user_id is not null and ts >= 11964 and ts <= 11965) and not (user_id = 1 and user_id is not null and ts >= 11966 and ts <= 11967) and not (user_id = 1 and user_id is not null and ts >= 11968 and ts <= 11969) and not (user_id = 1 and user_id is not null and ts >= 11970 and ts <= 11971) and not (user_id = 1 and user_id is not null and ts >= 11972 and ts <= 11973) and not (user_id = 1 and user_id is not null and ts >= 11974 and ts <= 11975) and not (user_id = 1 and user_id is not null and ts >= 11976 and ts <= 11977) and not (user_id = 1 and user_id is not null and ts >= 11978 and ts <= 11979) and not (user_id = 1 and user_id is not null and ts >= 11980 and ts <= 11981) and not (user_id = 1 and user_id is not null and ts >= 11982 and ts <= 11983) and not (user_id = 1 and user_id is not null and ts >= 11984 and ts <= 11985) and not (user_id = 1 and user_id is not null and ts >= 11986 and ts <= 11987) and not (user_id = 1 and user_id is not null and ts >= 11988 and ts <= 11989) and not (user_id = 1 and user_id is not null and ts >= 11990 and ts <= 11991) and not (user_id = 1 and user_id is not null and ts >= 11992 and ts <= 11993) and ts >= 113898 and parent_id = 1 order by ts asc limit 100", + "Instructions": { + "OperatorType": "Limit", + "Count": "INT64(100)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1, ts, weight_string(ts) from `user` where 1 != 1", + "OrderBy": "(1|2) ASC", + "Query": "select 1, ts, weight_string(ts) from `user` where shard_key = 1 and is_removed = 1 and cmd in ('A', 'B', 'C') and (not user_id = 1 or not user_id is not null or not ts >= 1 or not ts <= 2) and (not user_id = 1 or not user_id is not null or not ts >= 12 or not ts <= 13) and (not user_id = 1 or not user_id is not null or not ts >= 14 or not ts <= 15) and (not user_id = 1 or not user_id is not null or not ts >= 16 or not ts <= 17) and (not user_id = 1 or not user_id is not null or not ts >= 18 or not ts <= 19) and (not user_id = 1 or not user_id is not null or not ts >= 110 or not ts <= 111) and (not user_id = 1 or not user_id is not null or not ts >= 112 or not ts <= 113) and (not user_id = 1 or not user_id is not null or not ts >= 114 or not ts <= 115) and (not user_id = 1 or not user_id is not null or not ts >= 116 or not ts <= 117) and (not user_id = 1 or not user_id is not null or not ts >= 118 or not ts <= 119) and (not user_id = 1 or not user_id is not null or not ts >= 120 or not ts <= 121) and (not user_id = 1 or not user_id is not null or not ts >= 122 or not ts <= 123) and (not user_id = 1 or not user_id is not null or not ts >= 124 or not ts <= 125) and (not user_id = 1 or not user_id is not null or not ts >= 126 or not ts <= 127) and (not user_id = 1 or not user_id is not null or not ts >= 128 or not ts <= 129) and (not user_id = 1 or not user_id is not null or not ts >= 130 or not ts <= 131) and (not user_id = 1 or not user_id is not null or not ts >= 132 or not ts <= 133) and (not user_id = 1 or not user_id is not null or not ts >= 134 or not ts <= 135) and (not user_id = 1 or not user_id is not null or not ts >= 136 or not ts <= 137) and (not user_id = 1 or not user_id is not null or not ts >= 138 or not ts <= 139) and (not user_id = 1 or not user_id is not null or not ts >= 140 or not ts <= 141) and (not user_id = 1 or not user_id is not null or not ts >= 142 or not ts <= 143) and (not user_id = 1 or not user_id is not null or not ts >= 144 or not ts <= 145) and (not user_id = 1 or not user_id is not null or not ts >= 146 or not ts <= 147) and (not user_id = 1 or not user_id is not null or not ts >= 148 or not ts <= 149) and (not user_id = 1 or not user_id is not null or not ts >= 150 or not ts <= 151) and (not user_id = 1 or not user_id is not null or not ts >= 152 or not ts <= 153) and (not user_id = 1 or not user_id is not null or not ts >= 154 or not ts <= 155) and (not user_id = 1 or not user_id is not null or not ts >= 156 or not ts <= 157) and (not user_id = 1 or not user_id is not null or not ts >= 158 or not ts <= 159) and (not user_id = 1 or not user_id is not null or not ts >= 160 or not ts <= 161) and (not user_id = 1 or not user_id is not null or not ts >= 162 or not ts <= 163) and (not user_id = 1 or not user_id is not null or not ts >= 164 or not ts <= 165) and (not user_id = 1 or not user_id is not null or not ts >= 166 or not ts <= 167) and (not user_id = 1 or not user_id is not null or not ts >= 168 or not ts <= 169) and (not user_id = 1 or not user_id is not null or not ts >= 170 or not ts <= 171) and (not user_id = 1 or not user_id is not null or not ts >= 172 or not ts <= 173) and (not user_id = 1 or not user_id is not null or not ts >= 174 or not ts <= 175) and (not user_id = 1 or not user_id is not null or not ts >= 176 or not ts <= 177) and (not user_id = 1 or not user_id is not null or not ts >= 178 or not ts <= 179) and (not user_id = 1 or not user_id is not null or not ts >= 180 or not ts <= 181) and (not user_id = 1 or not user_id is not null or not ts >= 182 or not ts <= 183) and (not user_id = 1 or not user_id is not null or not ts >= 184 or not ts <= 185) and (not user_id = 1 or not user_id is not null or not ts >= 186 or not ts <= 187) and (not user_id = 1 or not user_id is not null or not ts >= 188 or not ts <= 189) and (not user_id = 1 or not user_id is not null or not ts >= 190 or not ts <= 191) and (not user_id = 1 or not user_id is not null or not ts >= 192 or not ts <= 193) and (not user_id = 1 or not user_id is not null or not ts >= 194 or not ts <= 195) and (not user_id = 1 or not user_id is not null or not ts >= 196 or not ts <= 197) and (not user_id = 1 or not user_id is not null or not ts >= 198 or not ts <= 199) and (not user_id = 1 or not user_id is not null or not ts >= 1100 or not ts <= 1101) and (not user_id = 1 or not user_id is not null or not ts >= 1102 or not ts <= 1103) and (not user_id = 1 or not user_id is not null or not ts >= 1104 or not ts <= 1105) and (not user_id = 1 or not user_id is not null or not ts >= 1106 or not ts <= 1107) and (not user_id = 1 or not user_id is not null or not ts >= 1108 or not ts <= 1109) and (not user_id = 1 or not user_id is not null or not ts >= 1110 or not ts <= 1111) and (not user_id = 1 or not user_id is not null or not ts >= 1112 or not ts <= 1113) and (not user_id = 1 or not user_id is not null or not ts >= 1114 or not ts <= 1115) and (not user_id = 1 or not user_id is not null or not ts >= 1116 or not ts <= 1117) and (not user_id = 1 or not user_id is not null or not ts >= 1118 or not ts <= 1119) and (not user_id = 1 or not user_id is not null or not ts >= 1120 or not ts <= 1121) and (not user_id = 1 or not user_id is not null or not ts >= 1122 or not ts <= 1123) and (not user_id = 1 or not user_id is not null or not ts >= 1124 or not ts <= 1125) and (not user_id = 1 or not user_id is not null or not ts >= 1126 or not ts <= 1127) and (not user_id = 1 or not user_id is not null or not ts >= 1128 or not ts <= 1129) and (not user_id = 1 or not user_id is not null or not ts >= 1130 or not ts <= 1131) and (not user_id = 1 or not user_id is not null or not ts >= 1132 or not ts <= 1133) and (not user_id = 1 or not user_id is not null or not ts >= 1134 or not ts <= 1135) and (not user_id = 1 or not user_id is not null or not ts >= 1136 or not ts <= 1137) and (not user_id = 1 or not user_id is not null or not ts >= 1138 or not ts <= 1139) and (not user_id = 1 or not user_id is not null or not ts >= 1140 or not ts <= 1141) and (not user_id = 1 or not user_id is not null or not ts >= 1142 or not ts <= 1143) and (not user_id = 1 or not user_id is not null or not ts >= 1144 or not ts <= 1145) and (not user_id = 1 or not user_id is not null or not ts >= 1146 or not ts <= 1147) and (not user_id = 1 or not user_id is not null or not ts >= 1148 or not ts <= 1149) and (not user_id = 1 or not user_id is not null or not ts >= 1150 or not ts <= 1151) and (not user_id = 1 or not user_id is not null or not ts >= 1152 or not ts <= 1153) and (not user_id = 1 or not user_id is not null or not ts >= 1154 or not ts <= 1155) and (not user_id = 1 or not user_id is not null or not ts >= 1156 or not ts <= 1157) and (not user_id = 1 or not user_id is not null or not ts >= 1158 or not ts <= 1159) and (not user_id = 1 or not user_id is not null or not ts >= 1160 or not ts <= 1161) and (not user_id = 1 or not user_id is not null or not ts >= 1162 or not ts <= 1163) and (not user_id = 1 or not user_id is not null or not ts >= 1164 or not ts <= 1165) and (not user_id = 1 or not user_id is not null or not ts >= 1166 or not ts <= 1167) and (not user_id = 1 or not user_id is not null or not ts >= 1168 or not ts <= 1169) and (not user_id = 1 or not user_id is not null or not ts >= 1170 or not ts <= 1171) and (not user_id = 1 or not user_id is not null or not ts >= 1172 or not ts <= 1173) and (not user_id = 1 or not user_id is not null or not ts >= 1174 or not ts <= 1175) and (not user_id = 1 or not user_id is not null or not ts >= 1176 or not ts <= 1177) and (not user_id = 1 or not user_id is not null or not ts >= 1178 or not ts <= 1179) and (not user_id = 1 or not user_id is not null or not ts >= 1180 or not ts <= 1181) and (not user_id = 1 or not user_id is not null or not ts >= 1182 or not ts <= 1183) and (not user_id = 1 or not user_id is not null or not ts >= 1184 or not ts <= 1185) and (not user_id = 1 or not user_id is not null or not ts >= 1186 or not ts <= 1187) and (not user_id = 1 or not user_id is not null or not ts >= 1188 or not ts <= 1189) and (not user_id = 1 or not user_id is not null or not ts >= 1190 or not ts <= 1191) and (not user_id = 1 or not user_id is not null or not ts >= 1192 or not ts <= 1193) and (not user_id = 1 or not user_id is not null or not ts >= 1194 or not ts <= 1195) and (not user_id = 1 or not user_id is not null or not ts >= 1196 or not ts <= 1197) and (not user_id = 1 or not user_id is not null or not ts >= 1198 or not ts <= 1199) and (not user_id = 1 or not user_id is not null or not ts >= 1200 or not ts <= 1201) and (not user_id = 1 or not user_id is not null or not ts >= 1202 or not ts <= 1203) and (not user_id = 1 or not user_id is not null or not ts >= 1204 or not ts <= 1205) and (not user_id = 1 or not user_id is not null or not ts >= 1206 or not ts <= 1207) and (not user_id = 1 or not user_id is not null or not ts >= 1208 or not ts <= 1209) and (not user_id = 1 or not user_id is not null or not ts >= 1210 or not ts <= 1211) and (not user_id = 1 or not user_id is not null or not ts >= 1212 or not ts <= 1213) and (not user_id = 1 or not user_id is not null or not ts >= 1214 or not ts <= 1215) and (not user_id = 1 or not user_id is not null or not ts >= 1216 or not ts <= 1217) and (not user_id = 1 or not user_id is not null or not ts >= 1218 or not ts <= 1219) and (not user_id = 1 or not user_id is not null or not ts >= 1220 or not ts <= 1221) and (not user_id = 1 or not user_id is not null or not ts >= 1222 or not ts <= 1223) and (not user_id = 1 or not user_id is not null or not ts >= 1224 or not ts <= 1225) and (not user_id = 1 or not user_id is not null or not ts >= 1226 or not ts <= 1227) and (not user_id = 1 or not user_id is not null or not ts >= 1228 or not ts <= 1229) and (not user_id = 1 or not user_id is not null or not ts >= 1230 or not ts <= 1231) and (not user_id = 1 or not user_id is not null or not ts >= 1232 or not ts <= 1233) and (not user_id = 1 or not user_id is not null or not ts >= 1234 or not ts <= 1235) and (not user_id = 1 or not user_id is not null or not ts >= 1236 or not ts <= 1237) and (not user_id = 1 or not user_id is not null or not ts >= 1238 or not ts <= 1239) and (not user_id = 1 or not user_id is not null or not ts >= 1240 or not ts <= 1241) and (not user_id = 1 or not user_id is not null or not ts >= 1242 or not ts <= 1243) and (not user_id = 1 or not user_id is not null or not ts >= 1244 or not ts <= 1245) and (not user_id = 1 or not user_id is not null or not ts >= 1246 or not ts <= 1247) and (not user_id = 1 or not user_id is not null or not ts >= 1248 or not ts <= 1249) and (not user_id = 1 or not user_id is not null or not ts >= 1250 or not ts <= 1251) and (not user_id = 1 or not user_id is not null or not ts >= 1252 or not ts <= 1253) and (not user_id = 1 or not user_id is not null or not ts >= 1254 or not ts <= 1255) and (not user_id = 1 or not user_id is not null or not ts >= 1256 or not ts <= 1257) and (not user_id = 1 or not user_id is not null or not ts >= 1258 or not ts <= 1259) and (not user_id = 1 or not user_id is not null or not ts >= 1260 or not ts <= 1261) and (not user_id = 1 or not user_id is not null or not ts >= 1262 or not ts <= 1263) and (not user_id = 1 or not user_id is not null or not ts >= 1264 or not ts <= 1265) and (not user_id = 1 or not user_id is not null or not ts >= 1266 or not ts <= 1267) and (not user_id = 1 or not user_id is not null or not ts >= 1268 or not ts <= 1269) and (not user_id = 1 or not user_id is not null or not ts >= 1270 or not ts <= 1271) and (not user_id = 1 or not user_id is not null or not ts >= 1272 or not ts <= 1273) and (not user_id = 1 or not user_id is not null or not ts >= 1274 or not ts <= 1275) and (not user_id = 1 or not user_id is not null or not ts >= 1276 or not ts <= 1277) and (not user_id = 1 or not user_id is not null or not ts >= 1278 or not ts <= 1279) and (not user_id = 1 or not user_id is not null or not ts >= 1280 or not ts <= 1281) and (not user_id = 1 or not user_id is not null or not ts >= 1282 or not ts <= 1283) and (not user_id = 1 or not user_id is not null or not ts >= 1284 or not ts <= 1285) and (not user_id = 1 or not user_id is not null or not ts >= 1286 or not ts <= 1287) and (not user_id = 1 or not user_id is not null or not ts >= 1288 or not ts <= 1289) and (not user_id = 1 or not user_id is not null or not ts >= 1290 or not ts <= 1291) and (not user_id = 1 or not user_id is not null or not ts >= 1292 or not ts <= 1293) and (not user_id = 1 or not user_id is not null or not ts >= 1294 or not ts <= 1295) and (not user_id = 1 or not user_id is not null or not ts >= 1296 or not ts <= 1297) and (not user_id = 1 or not user_id is not null or not ts >= 1298 or not ts <= 1299) and (not user_id = 1 or not user_id is not null or not ts >= 1300 or not ts <= 1301) and (not user_id = 1 or not user_id is not null or not ts >= 1302 or not ts <= 1303) and (not user_id = 1 or not user_id is not null or not ts >= 1304 or not ts <= 1305) and (not user_id = 1 or not user_id is not null or not ts >= 1306 or not ts <= 1307) and (not user_id = 1 or not user_id is not null or not ts >= 1308 or not ts <= 1309) and (not user_id = 1 or not user_id is not null or not ts >= 1310 or not ts <= 1311) and (not user_id = 1 or not user_id is not null or not ts >= 1312 or not ts <= 1313) and (not user_id = 1 or not user_id is not null or not ts >= 1314 or not ts <= 1315) and (not user_id = 1 or not user_id is not null or not ts >= 1316 or not ts <= 1317) and (not user_id = 1 or not user_id is not null or not ts >= 1318 or not ts <= 1319) and (not user_id = 1 or not user_id is not null or not ts >= 1320 or not ts <= 1321) and (not user_id = 1 or not user_id is not null or not ts >= 1322 or not ts <= 1323) and (not user_id = 1 or not user_id is not null or not ts >= 1324 or not ts <= 1325) and (not user_id = 1 or not user_id is not null or not ts >= 1326 or not ts <= 1327) and (not user_id = 1 or not user_id is not null or not ts >= 1328 or not ts <= 1329) and (not user_id = 1 or not user_id is not null or not ts >= 1330 or not ts <= 1331) and (not user_id = 1 or not user_id is not null or not ts >= 1332 or not ts <= 1333) and (not user_id = 1 or not user_id is not null or not ts >= 1334 or not ts <= 1335) and (not user_id = 1 or not user_id is not null or not ts >= 1336 or not ts <= 1337) and (not user_id = 1 or not user_id is not null or not ts >= 1338 or not ts <= 1339) and (not user_id = 1 or not user_id is not null or not ts >= 1340 or not ts <= 1341) and (not user_id = 1 or not user_id is not null or not ts >= 1342 or not ts <= 1343) and (not user_id = 1 or not user_id is not null or not ts >= 1344 or not ts <= 1345) and (not user_id = 1 or not user_id is not null or not ts >= 1346 or not ts <= 1347) and (not user_id = 1 or not user_id is not null or not ts >= 1348 or not ts <= 1349) and (not user_id = 1 or not user_id is not null or not ts >= 1350 or not ts <= 1351) and (not user_id = 1 or not user_id is not null or not ts >= 1352 or not ts <= 1353) and (not user_id = 1 or not user_id is not null or not ts >= 1354 or not ts <= 1355) and (not user_id = 1 or not user_id is not null or not ts >= 1356 or not ts <= 1357) and (not user_id = 1 or not user_id is not null or not ts >= 1358 or not ts <= 1359) and (not user_id = 1 or not user_id is not null or not ts >= 1360 or not ts <= 1361) and (not user_id = 1 or not user_id is not null or not ts >= 1362 or not ts <= 1363) and (not user_id = 1 or not user_id is not null or not ts >= 1364 or not ts <= 1365) and (not user_id = 1 or not user_id is not null or not ts >= 1366 or not ts <= 1367) and (not user_id = 1 or not user_id is not null or not ts >= 1368 or not ts <= 1369) and (not user_id = 1 or not user_id is not null or not ts >= 1370 or not ts <= 1371) and (not user_id = 1 or not user_id is not null or not ts >= 1372 or not ts <= 1373) and (not user_id = 1 or not user_id is not null or not ts >= 1374 or not ts <= 1375) and (not user_id = 1 or not user_id is not null or not ts >= 1376 or not ts <= 1377) and (not user_id = 1 or not user_id is not null or not ts >= 1378 or not ts <= 1379) and (not user_id = 1 or not user_id is not null or not ts >= 1380 or not ts <= 1381) and (not user_id = 1 or not user_id is not null or not ts >= 1382 or not ts <= 1383) and (not user_id = 1 or not user_id is not null or not ts >= 1384 or not ts <= 1385) and (not user_id = 1 or not user_id is not null or not ts >= 1386 or not ts <= 1387) and (not user_id = 1 or not user_id is not null or not ts >= 1388 or not ts <= 1389) and (not user_id = 1 or not user_id is not null or not ts >= 1390 or not ts <= 1391) and (not user_id = 1 or not user_id is not null or not ts >= 1392 or not ts <= 1393) and (not user_id = 1 or not user_id is not null or not ts >= 1394 or not ts <= 1395) and (not user_id = 1 or not user_id is not null or not ts >= 1396 or not ts <= 1397) and (not user_id = 1 or not user_id is not null or not ts >= 1398 or not ts <= 1399) and (not user_id = 1 or not user_id is not null or not ts >= 1400 or not ts <= 1401) and (not user_id = 1 or not user_id is not null or not ts >= 1402 or not ts <= 1403) and (not user_id = 1 or not user_id is not null or not ts >= 1404 or not ts <= 1405) and (not user_id = 1 or not user_id is not null or not ts >= 1406 or not ts <= 1407) and (not user_id = 1 or not user_id is not null or not ts >= 1408 or not ts <= 1409) and (not user_id = 1 or not user_id is not null or not ts >= 1410 or not ts <= 1411) and (not user_id = 1 or not user_id is not null or not ts >= 1412 or not ts <= 1413) and (not user_id = 1 or not user_id is not null or not ts >= 1414 or not ts <= 1415) and (not user_id = 1 or not user_id is not null or not ts >= 1416 or not ts <= 1417) and (not user_id = 1 or not user_id is not null or not ts >= 1418 or not ts <= 1419) and (not user_id = 1 or not user_id is not null or not ts >= 1420 or not ts <= 1421) and (not user_id = 1 or not user_id is not null or not ts >= 1422 or not ts <= 1423) and (not user_id = 1 or not user_id is not null or not ts >= 1424 or not ts <= 1425) and (not user_id = 1 or not user_id is not null or not ts >= 1426 or not ts <= 1427) and (not user_id = 1 or not user_id is not null or not ts >= 1428 or not ts <= 1429) and (not user_id = 1 or not user_id is not null or not ts >= 1430 or not ts <= 1431) and (not user_id = 1 or not user_id is not null or not ts >= 1432 or not ts <= 1433) and (not user_id = 1 or not user_id is not null or not ts >= 1434 or not ts <= 1435) and (not user_id = 1 or not user_id is not null or not ts >= 1436 or not ts <= 1437) and (not user_id = 1 or not user_id is not null or not ts >= 1438 or not ts <= 1439) and (not user_id = 1 or not user_id is not null or not ts >= 1440 or not ts <= 1441) and (not user_id = 1 or not user_id is not null or not ts >= 1442 or not ts <= 1443) and (not user_id = 1 or not user_id is not null or not ts >= 1444 or not ts <= 1445) and (not user_id = 1 or not user_id is not null or not ts >= 1446 or not ts <= 1447) and (not user_id = 1 or not user_id is not null or not ts >= 1448 or not ts <= 1449) and (not user_id = 1 or not user_id is not null or not ts >= 1450 or not ts <= 1451) and (not user_id = 1 or not user_id is not null or not ts >= 1452 or not ts <= 1453) and (not user_id = 1 or not user_id is not null or not ts >= 1454 or not ts <= 1455) and (not user_id = 1 or not user_id is not null or not ts >= 1456 or not ts <= 1457) and (not user_id = 1 or not user_id is not null or not ts >= 1458 or not ts <= 1459) and (not user_id = 1 or not user_id is not null or not ts >= 1460 or not ts <= 1461) and (not user_id = 1 or not user_id is not null or not ts >= 1462 or not ts <= 1463) and (not user_id = 1 or not user_id is not null or not ts >= 1464 or not ts <= 1465) and (not user_id = 1 or not user_id is not null or not ts >= 1466 or not ts <= 1467) and (not user_id = 1 or not user_id is not null or not ts >= 1468 or not ts <= 1469) and (not user_id = 1 or not user_id is not null or not ts >= 1470 or not ts <= 1471) and (not user_id = 1 or not user_id is not null or not ts >= 1472 or not ts <= 1473) and (not user_id = 1 or not user_id is not null or not ts >= 1474 or not ts <= 1475) and (not user_id = 1 or not user_id is not null or not ts >= 1476 or not ts <= 1477) and (not user_id = 1 or not user_id is not null or not ts >= 1478 or not ts <= 1479) and (not user_id = 1 or not user_id is not null or not ts >= 1480 or not ts <= 1481) and (not user_id = 1 or not user_id is not null or not ts >= 1482 or not ts <= 1483) and (not user_id = 1 or not user_id is not null or not ts >= 1484 or not ts <= 1485) and (not user_id = 1 or not user_id is not null or not ts >= 1486 or not ts <= 1487) and (not user_id = 1 or not user_id is not null or not ts >= 1488 or not ts <= 1489) and (not user_id = 1 or not user_id is not null or not ts >= 1490 or not ts <= 1491) and (not user_id = 1 or not user_id is not null or not ts >= 1492 or not ts <= 1493) and (not user_id = 1 or not user_id is not null or not ts >= 1494 or not ts <= 1495) and (not user_id = 1 or not user_id is not null or not ts >= 1496 or not ts <= 1497) and (not user_id = 1 or not user_id is not null or not ts >= 1498 or not ts <= 1499) and (not user_id = 1 or not user_id is not null or not ts >= 1500 or not ts <= 1501) and (not user_id = 1 or not user_id is not null or not ts >= 1502 or not ts <= 1503) and (not user_id = 1 or not user_id is not null or not ts >= 1504 or not ts <= 1505) and (not user_id = 1 or not user_id is not null or not ts >= 1506 or not ts <= 1507) and (not user_id = 1 or not user_id is not null or not ts >= 1508 or not ts <= 1509) and (not user_id = 1 or not user_id is not null or not ts >= 1510 or not ts <= 1511) and (not user_id = 1 or not user_id is not null or not ts >= 1512 or not ts <= 1513) and (not user_id = 1 or not user_id is not null or not ts >= 1514 or not ts <= 1515) and (not user_id = 1 or not user_id is not null or not ts >= 1516 or not ts <= 1517) and (not user_id = 1 or not user_id is not null or not ts >= 1518 or not ts <= 1519) and (not user_id = 1 or not user_id is not null or not ts >= 1520 or not ts <= 1521) and (not user_id = 1 or not user_id is not null or not ts >= 1522 or not ts <= 1523) and (not user_id = 1 or not user_id is not null or not ts >= 1524 or not ts <= 1525) and (not user_id = 1 or not user_id is not null or not ts >= 1526 or not ts <= 1527) and (not user_id = 1 or not user_id is not null or not ts >= 1528 or not ts <= 1529) and (not user_id = 1 or not user_id is not null or not ts >= 1530 or not ts <= 1531) and (not user_id = 1 or not user_id is not null or not ts >= 1532 or not ts <= 1533) and (not user_id = 1 or not user_id is not null or not ts >= 1534 or not ts <= 1535) and (not user_id = 1 or not user_id is not null or not ts >= 1536 or not ts <= 1537) and (not user_id = 1 or not user_id is not null or not ts >= 1538 or not ts <= 1539) and (not user_id = 1 or not user_id is not null or not ts >= 1540 or not ts <= 1541) and (not user_id = 1 or not user_id is not null or not ts >= 1542 or not ts <= 1543) and (not user_id = 1 or not user_id is not null or not ts >= 1544 or not ts <= 1545) and (not user_id = 1 or not user_id is not null or not ts >= 1546 or not ts <= 1547) and (not user_id = 1 or not user_id is not null or not ts >= 1548 or not ts <= 1549) and (not user_id = 1 or not user_id is not null or not ts >= 1550 or not ts <= 1551) and (not user_id = 1 or not user_id is not null or not ts >= 1552 or not ts <= 1553) and (not user_id = 1 or not user_id is not null or not ts >= 1554 or not ts <= 1555) and (not user_id = 1 or not user_id is not null or not ts >= 1556 or not ts <= 1557) and (not user_id = 1 or not user_id is not null or not ts >= 1558 or not ts <= 1559) and (not user_id = 1 or not user_id is not null or not ts >= 1560 or not ts <= 1561) and (not user_id = 1 or not user_id is not null or not ts >= 1562 or not ts <= 1563) and (not user_id = 1 or not user_id is not null or not ts >= 1564 or not ts <= 1565) and (not user_id = 1 or not user_id is not null or not ts >= 1566 or not ts <= 1567) and (not user_id = 1 or not user_id is not null or not ts >= 1568 or not ts <= 1569) and (not user_id = 1 or not user_id is not null or not ts >= 1570 or not ts <= 1571) and (not user_id = 1 or not user_id is not null or not ts >= 1572 or not ts <= 1573) and (not user_id = 1 or not user_id is not null or not ts >= 1574 or not ts <= 1575) and (not user_id = 1 or not user_id is not null or not ts >= 1576 or not ts <= 1577) and (not user_id = 1 or not user_id is not null or not ts >= 1578 or not ts <= 1579) and (not user_id = 1 or not user_id is not null or not ts >= 1580 or not ts <= 1581) and (not user_id = 1 or not user_id is not null or not ts >= 1582 or not ts <= 1583) and (not user_id = 1 or not user_id is not null or not ts >= 1584 or not ts <= 1585) and (not user_id = 1 or not user_id is not null or not ts >= 1586 or not ts <= 1587) and (not user_id = 1 or not user_id is not null or not ts >= 1588 or not ts <= 1589) and (not user_id = 1 or not user_id is not null or not ts >= 1590 or not ts <= 1591) and (not user_id = 1 or not user_id is not null or not ts >= 1592 or not ts <= 1593) and (not user_id = 1 or not user_id is not null or not ts >= 1594 or not ts <= 1595) and (not user_id = 1 or not user_id is not null or not ts >= 1596 or not ts <= 1597) and (not user_id = 1 or not user_id is not null or not ts >= 1598 or not ts <= 1599) and (not user_id = 1 or not user_id is not null or not ts >= 1600 or not ts <= 1601) and (not user_id = 1 or not user_id is not null or not ts >= 1602 or not ts <= 1603) and (not user_id = 1 or not user_id is not null or not ts >= 1604 or not ts <= 1605) and (not user_id = 1 or not user_id is not null or not ts >= 1606 or not ts <= 1607) and (not user_id = 1 or not user_id is not null or not ts >= 1608 or not ts <= 1609) and (not user_id = 1 or not user_id is not null or not ts >= 1610 or not ts <= 1611) and (not user_id = 1 or not user_id is not null or not ts >= 1612 or not ts <= 1613) and (not user_id = 1 or not user_id is not null or not ts >= 1614 or not ts <= 1615) and (not user_id = 1 or not user_id is not null or not ts >= 1616 or not ts <= 1617) and (not user_id = 1 or not user_id is not null or not ts >= 1618 or not ts <= 1619) and (not user_id = 1 or not user_id is not null or not ts >= 1620 or not ts <= 1621) and (not user_id = 1 or not user_id is not null or not ts >= 1622 or not ts <= 1623) and (not user_id = 1 or not user_id is not null or not ts >= 1624 or not ts <= 1625) and (not user_id = 1 or not user_id is not null or not ts >= 1626 or not ts <= 1627) and (not user_id = 1 or not user_id is not null or not ts >= 1628 or not ts <= 1629) and (not user_id = 1 or not user_id is not null or not ts >= 1630 or not ts <= 1631) and (not user_id = 1 or not user_id is not null or not ts >= 1632 or not ts <= 1633) and (not user_id = 1 or not user_id is not null or not ts >= 1634 or not ts <= 1635) and (not user_id = 1 or not user_id is not null or not ts >= 1636 or not ts <= 1637) and (not user_id = 1 or not user_id is not null or not ts >= 1638 or not ts <= 1639) and (not user_id = 1 or not user_id is not null or not ts >= 1640 or not ts <= 1641) and (not user_id = 1 or not user_id is not null or not ts >= 1642 or not ts <= 1643) and (not user_id = 1 or not user_id is not null or not ts >= 1644 or not ts <= 1645) and (not user_id = 1 or not user_id is not null or not ts >= 1646 or not ts <= 1647) and (not user_id = 1 or not user_id is not null or not ts >= 1648 or not ts <= 1649) and (not user_id = 1 or not user_id is not null or not ts >= 1650 or not ts <= 1651) and (not user_id = 1 or not user_id is not null or not ts >= 1652 or not ts <= 1653) and (not user_id = 1 or not user_id is not null or not ts >= 1654 or not ts <= 1655) and (not user_id = 1 or not user_id is not null or not ts >= 1656 or not ts <= 1657) and (not user_id = 1 or not user_id is not null or not ts >= 1658 or not ts <= 1659) and (not user_id = 1 or not user_id is not null or not ts >= 1660 or not ts <= 1661) and (not user_id = 1 or not user_id is not null or not ts >= 1662 or not ts <= 1663) and (not user_id = 1 or not user_id is not null or not ts >= 1664 or not ts <= 1665) and (not user_id = 1 or not user_id is not null or not ts >= 1666 or not ts <= 1667) and (not user_id = 1 or not user_id is not null or not ts >= 1668 or not ts <= 1669) and (not user_id = 1 or not user_id is not null or not ts >= 1670 or not ts <= 1671) and (not user_id = 1 or not user_id is not null or not ts >= 1672 or not ts <= 1673) and (not user_id = 1 or not user_id is not null or not ts >= 1674 or not ts <= 1675) and (not user_id = 1 or not user_id is not null or not ts >= 1676 or not ts <= 1677) and (not user_id = 1 or not user_id is not null or not ts >= 1678 or not ts <= 1679) and (not user_id = 1 or not user_id is not null or not ts >= 1680 or not ts <= 1681) and (not user_id = 1 or not user_id is not null or not ts >= 1682 or not ts <= 1683) and (not user_id = 1 or not user_id is not null or not ts >= 1684 or not ts <= 1685) and (not user_id = 1 or not user_id is not null or not ts >= 1686 or not ts <= 1687) and (not user_id = 1 or not user_id is not null or not ts >= 1688 or not ts <= 1689) and (not user_id = 1 or not user_id is not null or not ts >= 1690 or not ts <= 1691) and (not user_id = 1 or not user_id is not null or not ts >= 1692 or not ts <= 1693) and (not user_id = 1 or not user_id is not null or not ts >= 1694 or not ts <= 1695) and (not user_id = 1 or not user_id is not null or not ts >= 1696 or not ts <= 1697) and (not user_id = 1 or not user_id is not null or not ts >= 1698 or not ts <= 1699) and (not user_id = 1 or not user_id is not null or not ts >= 1700 or not ts <= 1701) and (not user_id = 1 or not user_id is not null or not ts >= 1702 or not ts <= 1703) and (not user_id = 1 or not user_id is not null or not ts >= 1704 or not ts <= 1705) and (not user_id = 1 or not user_id is not null or not ts >= 1706 or not ts <= 1707) and (not user_id = 1 or not user_id is not null or not ts >= 1708 or not ts <= 1709) and (not user_id = 1 or not user_id is not null or not ts >= 1710 or not ts <= 1711) and (not user_id = 1 or not user_id is not null or not ts >= 1712 or not ts <= 1713) and (not user_id = 1 or not user_id is not null or not ts >= 1714 or not ts <= 1715) and (not user_id = 1 or not user_id is not null or not ts >= 1716 or not ts <= 1717) and (not user_id = 1 or not user_id is not null or not ts >= 1718 or not ts <= 1719) and (not user_id = 1 or not user_id is not null or not ts >= 1720 or not ts <= 1721) and (not user_id = 1 or not user_id is not null or not ts >= 1722 or not ts <= 1723) and (not user_id = 1 or not user_id is not null or not ts >= 1724 or not ts <= 1725) and (not user_id = 1 or not user_id is not null or not ts >= 1726 or not ts <= 1727) and (not user_id = 1 or not user_id is not null or not ts >= 1728 or not ts <= 1729) and (not user_id = 1 or not user_id is not null or not ts >= 1730 or not ts <= 1731) and (not user_id = 1 or not user_id is not null or not ts >= 1732 or not ts <= 1733) and (not user_id = 1 or not user_id is not null or not ts >= 1734 or not ts <= 1735) and (not user_id = 1 or not user_id is not null or not ts >= 1736 or not ts <= 1737) and (not user_id = 1 or not user_id is not null or not ts >= 1738 or not ts <= 1739) and (not user_id = 1 or not user_id is not null or not ts >= 1740 or not ts <= 1741) and (not user_id = 1 or not user_id is not null or not ts >= 1742 or not ts <= 1743) and (not user_id = 1 or not user_id is not null or not ts >= 1744 or not ts <= 1745) and (not user_id = 1 or not user_id is not null or not ts >= 1746 or not ts <= 1747) and (not user_id = 1 or not user_id is not null or not ts >= 1748 or not ts <= 1749) and (not user_id = 1 or not user_id is not null or not ts >= 1750 or not ts <= 1751) and (not user_id = 1 or not user_id is not null or not ts >= 1752 or not ts <= 1753) and (not user_id = 1 or not user_id is not null or not ts >= 1754 or not ts <= 1755) and (not user_id = 1 or not user_id is not null or not ts >= 1756 or not ts <= 1757) and (not user_id = 1 or not user_id is not null or not ts >= 1758 or not ts <= 1759) and (not user_id = 1 or not user_id is not null or not ts >= 1760 or not ts <= 1761) and (not user_id = 1 or not user_id is not null or not ts >= 1762 or not ts <= 1763) and (not user_id = 1 or not user_id is not null or not ts >= 1764 or not ts <= 1765) and (not user_id = 1 or not user_id is not null or not ts >= 1766 or not ts <= 1767) and (not user_id = 1 or not user_id is not null or not ts >= 1768 or not ts <= 1769) and (not user_id = 1 or not user_id is not null or not ts >= 1770 or not ts <= 1771) and (not user_id = 1 or not user_id is not null or not ts >= 1772 or not ts <= 1773) and (not user_id = 1 or not user_id is not null or not ts >= 1774 or not ts <= 1775) and (not user_id = 1 or not user_id is not null or not ts >= 1776 or not ts <= 1777) and (not user_id = 1 or not user_id is not null or not ts >= 1778 or not ts <= 1779) and (not user_id = 1 or not user_id is not null or not ts >= 1780 or not ts <= 1781) and (not user_id = 1 or not user_id is not null or not ts >= 1782 or not ts <= 1783) and (not user_id = 1 or not user_id is not null or not ts >= 1784 or not ts <= 1785) and (not user_id = 1 or not user_id is not null or not ts >= 1786 or not ts <= 1787) and (not user_id = 1 or not user_id is not null or not ts >= 1788 or not ts <= 1789) and (not user_id = 1 or not user_id is not null or not ts >= 1790 or not ts <= 1791) and (not user_id = 1 or not user_id is not null or not ts >= 1792 or not ts <= 1793) and (not user_id = 1 or not user_id is not null or not ts >= 1794 or not ts <= 1795) and (not user_id = 1 or not user_id is not null or not ts >= 1796 or not ts <= 1797) and (not user_id = 1 or not user_id is not null or not ts >= 1798 or not ts <= 1799) and (not user_id = 1 or not user_id is not null or not ts >= 1800 or not ts <= 1801) and (not user_id = 1 or not user_id is not null or not ts >= 1802 or not ts <= 1803) and (not user_id = 1 or not user_id is not null or not ts >= 1804 or not ts <= 1805) and (not user_id = 1 or not user_id is not null or not ts >= 1806 or not ts <= 1807) and (not user_id = 1 or not user_id is not null or not ts >= 1808 or not ts <= 1809) and (not user_id = 1 or not user_id is not null or not ts >= 1810 or not ts <= 1811) and (not user_id = 1 or not user_id is not null or not ts >= 1812 or not ts <= 1813) and (not user_id = 1 or not user_id is not null or not ts >= 1814 or not ts <= 1815) and (not user_id = 1 or not user_id is not null or not ts >= 1816 or not ts <= 1817) and (not user_id = 1 or not user_id is not null or not ts >= 1818 or not ts <= 1819) and (not user_id = 1 or not user_id is not null or not ts >= 1820 or not ts <= 1821) and (not user_id = 1 or not user_id is not null or not ts >= 1822 or not ts <= 1823) and (not user_id = 1 or not user_id is not null or not ts >= 1824 or not ts <= 1825) and (not user_id = 1 or not user_id is not null or not ts >= 1826 or not ts <= 1827) and (not user_id = 1 or not user_id is not null or not ts >= 1828 or not ts <= 1829) and (not user_id = 1 or not user_id is not null or not ts >= 1830 or not ts <= 1831) and (not user_id = 1 or not user_id is not null or not ts >= 1832 or not ts <= 1833) and (not user_id = 1 or not user_id is not null or not ts >= 1834 or not ts <= 1835) and (not user_id = 1 or not user_id is not null or not ts >= 1836 or not ts <= 1837) and (not user_id = 1 or not user_id is not null or not ts >= 1838 or not ts <= 1839) and (not user_id = 1 or not user_id is not null or not ts >= 1840 or not ts <= 1841) and (not user_id = 1 or not user_id is not null or not ts >= 1842 or not ts <= 1843) and (not user_id = 1 or not user_id is not null or not ts >= 1844 or not ts <= 1845) and (not user_id = 1 or not user_id is not null or not ts >= 1846 or not ts <= 1847) and (not user_id = 1 or not user_id is not null or not ts >= 1848 or not ts <= 1849) and (not user_id = 1 or not user_id is not null or not ts >= 1850 or not ts <= 1851) and (not user_id = 1 or not user_id is not null or not ts >= 1852 or not ts <= 1853) and (not user_id = 1 or not user_id is not null or not ts >= 1854 or not ts <= 1855) and (not user_id = 1 or not user_id is not null or not ts >= 1856 or not ts <= 1857) and (not user_id = 1 or not user_id is not null or not ts >= 1858 or not ts <= 1859) and (not user_id = 1 or not user_id is not null or not ts >= 1860 or not ts <= 1861) and (not user_id = 1 or not user_id is not null or not ts >= 1862 or not ts <= 1863) and (not user_id = 1 or not user_id is not null or not ts >= 1864 or not ts <= 1865) and (not user_id = 1 or not user_id is not null or not ts >= 1866 or not ts <= 1867) and (not user_id = 1 or not user_id is not null or not ts >= 1868 or not ts <= 1869) and (not user_id = 1 or not user_id is not null or not ts >= 1870 or not ts <= 1871) and (not user_id = 1 or not user_id is not null or not ts >= 1872 or not ts <= 1873) and (not user_id = 1 or not user_id is not null or not ts >= 1874 or not ts <= 1875) and (not user_id = 1 or not user_id is not null or not ts >= 1876 or not ts <= 1877) and (not user_id = 1 or not user_id is not null or not ts >= 1878 or not ts <= 1879) and (not user_id = 1 or not user_id is not null or not ts >= 1880 or not ts <= 1881) and (not user_id = 1 or not user_id is not null or not ts >= 1882 or not ts <= 1883) and (not user_id = 1 or not user_id is not null or not ts >= 1884 or not ts <= 1885) and (not user_id = 1 or not user_id is not null or not ts >= 1886 or not ts <= 1887) and (not user_id = 1 or not user_id is not null or not ts >= 1888 or not ts <= 1889) and (not user_id = 1 or not user_id is not null or not ts >= 1890 or not ts <= 1891) and (not user_id = 1 or not user_id is not null or not ts >= 1892 or not ts <= 1893) and (not user_id = 1 or not user_id is not null or not ts >= 1894 or not ts <= 1895) and (not user_id = 1 or not user_id is not null or not ts >= 1896 or not ts <= 1897) and (not user_id = 1 or not user_id is not null or not ts >= 1898 or not ts <= 1899) and (not user_id = 1 or not user_id is not null or not ts >= 1900 or not ts <= 1901) and (not user_id = 1 or not user_id is not null or not ts >= 1902 or not ts <= 1903) and (not user_id = 1 or not user_id is not null or not ts >= 1904 or not ts <= 1905) and (not user_id = 1 or not user_id is not null or not ts >= 1906 or not ts <= 1907) and (not user_id = 1 or not user_id is not null or not ts >= 1908 or not ts <= 1909) and (not user_id = 1 or not user_id is not null or not ts >= 1910 or not ts <= 1911) and (not user_id = 1 or not user_id is not null or not ts >= 1912 or not ts <= 1913) and (not user_id = 1 or not user_id is not null or not ts >= 1914 or not ts <= 1915) and (not user_id = 1 or not user_id is not null or not ts >= 1916 or not ts <= 1917) and (not user_id = 1 or not user_id is not null or not ts >= 1918 or not ts <= 1919) and (not user_id = 1 or not user_id is not null or not ts >= 1920 or not ts <= 1921) and (not user_id = 1 or not user_id is not null or not ts >= 1922 or not ts <= 1923) and (not user_id = 1 or not user_id is not null or not ts >= 1924 or not ts <= 1925) and (not user_id = 1 or not user_id is not null or not ts >= 1926 or not ts <= 1927) and (not user_id = 1 or not user_id is not null or not ts >= 1928 or not ts <= 1929) and (not user_id = 1 or not user_id is not null or not ts >= 1930 or not ts <= 1931) and (not user_id = 1 or not user_id is not null or not ts >= 1932 or not ts <= 1933) and (not user_id = 1 or not user_id is not null or not ts >= 1934 or not ts <= 1935) and (not user_id = 1 or not user_id is not null or not ts >= 1936 or not ts <= 1937) and (not user_id = 1 or not user_id is not null or not ts >= 1938 or not ts <= 1939) and (not user_id = 1 or not user_id is not null or not ts >= 1940 or not ts <= 1941) and (not user_id = 1 or not user_id is not null or not ts >= 1942 or not ts <= 1943) and (not user_id = 1 or not user_id is not null or not ts >= 1944 or not ts <= 1945) and (not user_id = 1 or not user_id is not null or not ts >= 1946 or not ts <= 1947) and (not user_id = 1 or not user_id is not null or not ts >= 1948 or not ts <= 1949) and (not user_id = 1 or not user_id is not null or not ts >= 1950 or not ts <= 1951) and (not user_id = 1 or not user_id is not null or not ts >= 1952 or not ts <= 1953) and (not user_id = 1 or not user_id is not null or not ts >= 1954 or not ts <= 1955) and (not user_id = 1 or not user_id is not null or not ts >= 1956 or not ts <= 1957) and (not user_id = 1 or not user_id is not null or not ts >= 1958 or not ts <= 1959) and (not user_id = 1 or not user_id is not null or not ts >= 1960 or not ts <= 1961) and (not user_id = 1 or not user_id is not null or not ts >= 1962 or not ts <= 1963) and (not user_id = 1 or not user_id is not null or not ts >= 1964 or not ts <= 1965) and (not user_id = 1 or not user_id is not null or not ts >= 1966 or not ts <= 1967) and (not user_id = 1 or not user_id is not null or not ts >= 1968 or not ts <= 1969) and (not user_id = 1 or not user_id is not null or not ts >= 1970 or not ts <= 1971) and (not user_id = 1 or not user_id is not null or not ts >= 1972 or not ts <= 1973) and (not user_id = 1 or not user_id is not null or not ts >= 1974 or not ts <= 1975) and (not user_id = 1 or not user_id is not null or not ts >= 1976 or not ts <= 1977) and (not user_id = 1 or not user_id is not null or not ts >= 1978 or not ts <= 1979) and (not user_id = 1 or not user_id is not null or not ts >= 1980 or not ts <= 1981) and (not user_id = 1 or not user_id is not null or not ts >= 1982 or not ts <= 1983) and (not user_id = 1 or not user_id is not null or not ts >= 1984 or not ts <= 1985) and (not user_id = 1 or not user_id is not null or not ts >= 1986 or not ts <= 1987) and (not user_id = 1 or not user_id is not null or not ts >= 1988 or not ts <= 1989) and (not user_id = 1 or not user_id is not null or not ts >= 1990 or not ts <= 1991) and (not user_id = 1 or not user_id is not null or not ts >= 1992 or not ts <= 1993) and (not user_id = 1 or not user_id is not null or not ts >= 1994 or not ts <= 1995) and (not user_id = 1 or not user_id is not null or not ts >= 1996 or not ts <= 1997) and (not user_id = 1 or not user_id is not null or not ts >= 1998 or not ts <= 1999) and (not user_id = 1 or not user_id is not null or not ts >= 11000 or not ts <= 11001) and (not user_id = 1 or not user_id is not null or not ts >= 11002 or not ts <= 11003) and (not user_id = 1 or not user_id is not null or not ts >= 11004 or not ts <= 11005) and (not user_id = 1 or not user_id is not null or not ts >= 11006 or not ts <= 11007) and (not user_id = 1 or not user_id is not null or not ts >= 11008 or not ts <= 11009) and (not user_id = 1 or not user_id is not null or not ts >= 11010 or not ts <= 11011) and (not user_id = 1 or not user_id is not null or not ts >= 11012 or not ts <= 11013) and (not user_id = 1 or not user_id is not null or not ts >= 11014 or not ts <= 11015) and (not user_id = 1 or not user_id is not null or not ts >= 11016 or not ts <= 11017) and (not user_id = 1 or not user_id is not null or not ts >= 11018 or not ts <= 11019) and (not user_id = 1 or not user_id is not null or not ts >= 11020 or not ts <= 11021) and (not user_id = 1 or not user_id is not null or not ts >= 11022 or not ts <= 11023) and (not user_id = 1 or not user_id is not null or not ts >= 11024 or not ts <= 11025) and (not user_id = 1 or not user_id is not null or not ts >= 11026 or not ts <= 11027) and (not user_id = 1 or not user_id is not null or not ts >= 11028 or not ts <= 11029) and (not user_id = 1 or not user_id is not null or not ts >= 11030 or not ts <= 11031) and (not user_id = 1 or not user_id is not null or not ts >= 11032 or not ts <= 11033) and (not user_id = 1 or not user_id is not null or not ts >= 11034 or not ts <= 11035) and (not user_id = 1 or not user_id is not null or not ts >= 11036 or not ts <= 11037) and (not user_id = 1 or not user_id is not null or not ts >= 11038 or not ts <= 11039) and (not user_id = 1 or not user_id is not null or not ts >= 11040 or not ts <= 11041) and (not user_id = 1 or not user_id is not null or not ts >= 11042 or not ts <= 11043) and (not user_id = 1 or not user_id is not null or not ts >= 11044 or not ts <= 11045) and (not user_id = 1 or not user_id is not null or not ts >= 11046 or not ts <= 11047) and (not user_id = 1 or not user_id is not null or not ts >= 11048 or not ts <= 11049) and (not user_id = 1 or not user_id is not null or not ts >= 11050 or not ts <= 11051) and (not user_id = 1 or not user_id is not null or not ts >= 11052 or not ts <= 11053) and (not user_id = 1 or not user_id is not null or not ts >= 11054 or not ts <= 11055) and (not user_id = 1 or not user_id is not null or not ts >= 11056 or not ts <= 11057) and (not user_id = 1 or not user_id is not null or not ts >= 11058 or not ts <= 11059) and (not user_id = 1 or not user_id is not null or not ts >= 11060 or not ts <= 11061) and (not user_id = 1 or not user_id is not null or not ts >= 11062 or not ts <= 11063) and (not user_id = 1 or not user_id is not null or not ts >= 11064 or not ts <= 11065) and (not user_id = 1 or not user_id is not null or not ts >= 11066 or not ts <= 11067) and (not user_id = 1 or not user_id is not null or not ts >= 11068 or not ts <= 11069) and (not user_id = 1 or not user_id is not null or not ts >= 11070 or not ts <= 11071) and (not user_id = 1 or not user_id is not null or not ts >= 11072 or not ts <= 11073) and (not user_id = 1 or not user_id is not null or not ts >= 11074 or not ts <= 11075) and (not user_id = 1 or not user_id is not null or not ts >= 11076 or not ts <= 11077) and (not user_id = 1 or not user_id is not null or not ts >= 11078 or not ts <= 11079) and (not user_id = 1 or not user_id is not null or not ts >= 11080 or not ts <= 11081) and (not user_id = 1 or not user_id is not null or not ts >= 11082 or not ts <= 11083) and (not user_id = 1 or not user_id is not null or not ts >= 11084 or not ts <= 11085) and (not user_id = 1 or not user_id is not null or not ts >= 11086 or not ts <= 11087) and (not user_id = 1 or not user_id is not null or not ts >= 11088 or not ts <= 11089) and (not user_id = 1 or not user_id is not null or not ts >= 11090 or not ts <= 11091) and (not user_id = 1 or not user_id is not null or not ts >= 11092 or not ts <= 11093) and (not user_id = 1 or not user_id is not null or not ts >= 11094 or not ts <= 11095) and (not user_id = 1 or not user_id is not null or not ts >= 11096 or not ts <= 11097) and (not user_id = 1 or not user_id is not null or not ts >= 11098 or not ts <= 11099) and (not user_id = 1 or not user_id is not null or not ts >= 11100 or not ts <= 11101) and (not user_id = 1 or not user_id is not null or not ts >= 11102 or not ts <= 11103) and (not user_id = 1 or not user_id is not null or not ts >= 11104 or not ts <= 11105) and (not user_id = 1 or not user_id is not null or not ts >= 11106 or not ts <= 11107) and (not user_id = 1 or not user_id is not null or not ts >= 11108 or not ts <= 11109) and (not user_id = 1 or not user_id is not null or not ts >= 11110 or not ts <= 11111) and (not user_id = 1 or not user_id is not null or not ts >= 11112 or not ts <= 11113) and (not user_id = 1 or not user_id is not null or not ts >= 11114 or not ts <= 11115) and (not user_id = 1 or not user_id is not null or not ts >= 11116 or not ts <= 11117) and (not user_id = 1 or not user_id is not null or not ts >= 11118 or not ts <= 11119) and (not user_id = 1 or not user_id is not null or not ts >= 11120 or not ts <= 11121) and (not user_id = 1 or not user_id is not null or not ts >= 11122 or not ts <= 11123) and (not user_id = 1 or not user_id is not null or not ts >= 11124 or not ts <= 11125) and (not user_id = 1 or not user_id is not null or not ts >= 11126 or not ts <= 11127) and (not user_id = 1 or not user_id is not null or not ts >= 11128 or not ts <= 11129) and (not user_id = 1 or not user_id is not null or not ts >= 11130 or not ts <= 11131) and (not user_id = 1 or not user_id is not null or not ts >= 11132 or not ts <= 11133) and (not user_id = 1 or not user_id is not null or not ts >= 11134 or not ts <= 11135) and (not user_id = 1 or not user_id is not null or not ts >= 11136 or not ts <= 11137) and (not user_id = 1 or not user_id is not null or not ts >= 11138 or not ts <= 11139) and (not user_id = 1 or not user_id is not null or not ts >= 11140 or not ts <= 11141) and (not user_id = 1 or not user_id is not null or not ts >= 11142 or not ts <= 11143) and (not user_id = 1 or not user_id is not null or not ts >= 11144 or not ts <= 11145) and (not user_id = 1 or not user_id is not null or not ts >= 11146 or not ts <= 11147) and (not user_id = 1 or not user_id is not null or not ts >= 11148 or not ts <= 11149) and (not user_id = 1 or not user_id is not null or not ts >= 11150 or not ts <= 11151) and (not user_id = 1 or not user_id is not null or not ts >= 11152 or not ts <= 11153) and (not user_id = 1 or not user_id is not null or not ts >= 11154 or not ts <= 11155) and (not user_id = 1 or not user_id is not null or not ts >= 11156 or not ts <= 11157) and (not user_id = 1 or not user_id is not null or not ts >= 11158 or not ts <= 11159) and (not user_id = 1 or not user_id is not null or not ts >= 11160 or not ts <= 11161) and (not user_id = 1 or not user_id is not null or not ts >= 11162 or not ts <= 11163) and (not user_id = 1 or not user_id is not null or not ts >= 11164 or not ts <= 11165) and (not user_id = 1 or not user_id is not null or not ts >= 11166 or not ts <= 11167) and (not user_id = 1 or not user_id is not null or not ts >= 11168 or not ts <= 11169) and (not user_id = 1 or not user_id is not null or not ts >= 11170 or not ts <= 11171) and (not user_id = 1 or not user_id is not null or not ts >= 11172 or not ts <= 11173) and (not user_id = 1 or not user_id is not null or not ts >= 11174 or not ts <= 11175) and (not user_id = 1 or not user_id is not null or not ts >= 11176 or not ts <= 11177) and (not user_id = 1 or not user_id is not null or not ts >= 11178 or not ts <= 11179) and (not user_id = 1 or not user_id is not null or not ts >= 11180 or not ts <= 11181) and (not user_id = 1 or not user_id is not null or not ts >= 11182 or not ts <= 11183) and (not user_id = 1 or not user_id is not null or not ts >= 11184 or not ts <= 11185) and (not user_id = 1 or not user_id is not null or not ts >= 11186 or not ts <= 11187) and (not user_id = 1 or not user_id is not null or not ts >= 11188 or not ts <= 11189) and (not user_id = 1 or not user_id is not null or not ts >= 11190 or not ts <= 11191) and (not user_id = 1 or not user_id is not null or not ts >= 11192 or not ts <= 11193) and (not user_id = 1 or not user_id is not null or not ts >= 11194 or not ts <= 11195) and (not user_id = 1 or not user_id is not null or not ts >= 11196 or not ts <= 11197) and (not user_id = 1 or not user_id is not null or not ts >= 11198 or not ts <= 11199) and (not user_id = 1 or not user_id is not null or not ts >= 11200 or not ts <= 11201) and (not user_id = 1 or not user_id is not null or not ts >= 11202 or not ts <= 11203) and (not user_id = 1 or not user_id is not null or not ts >= 11204 or not ts <= 11205) and (not user_id = 1 or not user_id is not null or not ts >= 11206 or not ts <= 11207) and (not user_id = 1 or not user_id is not null or not ts >= 11208 or not ts <= 11209) and (not user_id = 1 or not user_id is not null or not ts >= 11210 or not ts <= 11211) and (not user_id = 1 or not user_id is not null or not ts >= 11212 or not ts <= 11213) and (not user_id = 1 or not user_id is not null or not ts >= 11214 or not ts <= 11215) and (not user_id = 1 or not user_id is not null or not ts >= 11216 or not ts <= 11217) and (not user_id = 1 or not user_id is not null or not ts >= 11218 or not ts <= 11219) and (not user_id = 1 or not user_id is not null or not ts >= 11220 or not ts <= 11221) and (not user_id = 1 or not user_id is not null or not ts >= 11222 or not ts <= 11223) and (not user_id = 1 or not user_id is not null or not ts >= 11224 or not ts <= 11225) and (not user_id = 1 or not user_id is not null or not ts >= 11226 or not ts <= 11227) and (not user_id = 1 or not user_id is not null or not ts >= 11228 or not ts <= 11229) and (not user_id = 1 or not user_id is not null or not ts >= 11230 or not ts <= 11231) and (not user_id = 1 or not user_id is not null or not ts >= 11232 or not ts <= 11233) and (not user_id = 1 or not user_id is not null or not ts >= 11234 or not ts <= 11235) and (not user_id = 1 or not user_id is not null or not ts >= 11236 or not ts <= 11237) and (not user_id = 1 or not user_id is not null or not ts >= 11238 or not ts <= 11239) and (not user_id = 1 or not user_id is not null or not ts >= 11240 or not ts <= 11241) and (not user_id = 1 or not user_id is not null or not ts >= 11242 or not ts <= 11243) and (not user_id = 1 or not user_id is not null or not ts >= 11244 or not ts <= 11245) and (not user_id = 1 or not user_id is not null or not ts >= 11246 or not ts <= 11247) and (not user_id = 1 or not user_id is not null or not ts >= 11248 or not ts <= 11249) and (not user_id = 1 or not user_id is not null or not ts >= 11250 or not ts <= 11251) and (not user_id = 1 or not user_id is not null or not ts >= 11252 or not ts <= 11253) and (not user_id = 1 or not user_id is not null or not ts >= 11254 or not ts <= 11255) and (not user_id = 1 or not user_id is not null or not ts >= 11256 or not ts <= 11257) and (not user_id = 1 or not user_id is not null or not ts >= 11258 or not ts <= 11259) and (not user_id = 1 or not user_id is not null or not ts >= 11260 or not ts <= 11261) and (not user_id = 1 or not user_id is not null or not ts >= 11262 or not ts <= 11263) and (not user_id = 1 or not user_id is not null or not ts >= 11264 or not ts <= 11265) and (not user_id = 1 or not user_id is not null or not ts >= 11266 or not ts <= 11267) and (not user_id = 1 or not user_id is not null or not ts >= 11268 or not ts <= 11269) and (not user_id = 1 or not user_id is not null or not ts >= 11270 or not ts <= 11271) and (not user_id = 1 or not user_id is not null or not ts >= 11272 or not ts <= 11273) and (not user_id = 1 or not user_id is not null or not ts >= 11274 or not ts <= 11275) and (not user_id = 1 or not user_id is not null or not ts >= 11276 or not ts <= 11277) and (not user_id = 1 or not user_id is not null or not ts >= 11278 or not ts <= 11279) and (not user_id = 1 or not user_id is not null or not ts >= 11280 or not ts <= 11281) and (not user_id = 1 or not user_id is not null or not ts >= 11282 or not ts <= 11283) and (not user_id = 1 or not user_id is not null or not ts >= 11284 or not ts <= 11285) and (not user_id = 1 or not user_id is not null or not ts >= 11286 or not ts <= 11287) and (not user_id = 1 or not user_id is not null or not ts >= 11288 or not ts <= 11289) and (not user_id = 1 or not user_id is not null or not ts >= 11290 or not ts <= 11291) and (not user_id = 1 or not user_id is not null or not ts >= 11292 or not ts <= 11293) and (not user_id = 1 or not user_id is not null or not ts >= 11294 or not ts <= 11295) and (not user_id = 1 or not user_id is not null or not ts >= 11296 or not ts <= 11297) and (not user_id = 1 or not user_id is not null or not ts >= 11298 or not ts <= 11299) and (not user_id = 1 or not user_id is not null or not ts >= 11300 or not ts <= 11301) and (not user_id = 1 or not user_id is not null or not ts >= 11302 or not ts <= 11303) and (not user_id = 1 or not user_id is not null or not ts >= 11304 or not ts <= 11305) and (not user_id = 1 or not user_id is not null or not ts >= 11306 or not ts <= 11307) and (not user_id = 1 or not user_id is not null or not ts >= 11308 or not ts <= 11309) and (not user_id = 1 or not user_id is not null or not ts >= 11310 or not ts <= 11311) and (not user_id = 1 or not user_id is not null or not ts >= 11312 or not ts <= 11313) and (not user_id = 1 or not user_id is not null or not ts >= 11314 or not ts <= 11315) and (not user_id = 1 or not user_id is not null or not ts >= 11316 or not ts <= 11317) and (not user_id = 1 or not user_id is not null or not ts >= 11318 or not ts <= 11319) and (not user_id = 1 or not user_id is not null or not ts >= 11320 or not ts <= 11321) and (not user_id = 1 or not user_id is not null or not ts >= 11322 or not ts <= 11323) and (not user_id = 1 or not user_id is not null or not ts >= 11324 or not ts <= 11325) and (not user_id = 1 or not user_id is not null or not ts >= 11326 or not ts <= 11327) and (not user_id = 1 or not user_id is not null or not ts >= 11328 or not ts <= 11329) and (not user_id = 1 or not user_id is not null or not ts >= 11330 or not ts <= 11331) and (not user_id = 1 or not user_id is not null or not ts >= 11332 or not ts <= 11333) and (not user_id = 1 or not user_id is not null or not ts >= 11334 or not ts <= 11335) and (not user_id = 1 or not user_id is not null or not ts >= 11336 or not ts <= 11337) and (not user_id = 1 or not user_id is not null or not ts >= 11338 or not ts <= 11339) and (not user_id = 1 or not user_id is not null or not ts >= 11340 or not ts <= 11341) and (not user_id = 1 or not user_id is not null or not ts >= 11342 or not ts <= 11343) and (not user_id = 1 or not user_id is not null or not ts >= 11344 or not ts <= 11345) and (not user_id = 1 or not user_id is not null or not ts >= 11346 or not ts <= 11347) and (not user_id = 1 or not user_id is not null or not ts >= 11348 or not ts <= 11349) and (not user_id = 1 or not user_id is not null or not ts >= 11350 or not ts <= 11351) and (not user_id = 1 or not user_id is not null or not ts >= 11352 or not ts <= 11353) and (not user_id = 1 or not user_id is not null or not ts >= 11354 or not ts <= 11355) and (not user_id = 1 or not user_id is not null or not ts >= 11356 or not ts <= 11357) and (not user_id = 1 or not user_id is not null or not ts >= 11358 or not ts <= 11359) and (not user_id = 1 or not user_id is not null or not ts >= 11360 or not ts <= 11361) and (not user_id = 1 or not user_id is not null or not ts >= 11362 or not ts <= 11363) and (not user_id = 1 or not user_id is not null or not ts >= 11364 or not ts <= 11365) and (not user_id = 1 or not user_id is not null or not ts >= 11366 or not ts <= 11367) and (not user_id = 1 or not user_id is not null or not ts >= 11368 or not ts <= 11369) and (not user_id = 1 or not user_id is not null or not ts >= 11370 or not ts <= 11371) and (not user_id = 1 or not user_id is not null or not ts >= 11372 or not ts <= 11373) and (not user_id = 1 or not user_id is not null or not ts >= 11374 or not ts <= 11375) and (not user_id = 1 or not user_id is not null or not ts >= 11376 or not ts <= 11377) and (not user_id = 1 or not user_id is not null or not ts >= 11378 or not ts <= 11379) and (not user_id = 1 or not user_id is not null or not ts >= 11380 or not ts <= 11381) and (not user_id = 1 or not user_id is not null or not ts >= 11382 or not ts <= 11383) and (not user_id = 1 or not user_id is not null or not ts >= 11384 or not ts <= 11385) and (not user_id = 1 or not user_id is not null or not ts >= 11386 or not ts <= 11387) and (not user_id = 1 or not user_id is not null or not ts >= 11388 or not ts <= 11389) and (not user_id = 1 or not user_id is not null or not ts >= 11390 or not ts <= 11391) and (not user_id = 1 or not user_id is not null or not ts >= 11392 or not ts <= 11393) and (not user_id = 1 or not user_id is not null or not ts >= 11394 or not ts <= 11395) and (not user_id = 1 or not user_id is not null or not ts >= 11396 or not ts <= 11397) and (not user_id = 1 or not user_id is not null or not ts >= 11398 or not ts <= 11399) and (not user_id = 1 or not user_id is not null or not ts >= 11400 or not ts <= 11401) and (not user_id = 1 or not user_id is not null or not ts >= 11402 or not ts <= 11403) and (not user_id = 1 or not user_id is not null or not ts >= 11404 or not ts <= 11405) and (not user_id = 1 or not user_id is not null or not ts >= 11406 or not ts <= 11407) and (not user_id = 1 or not user_id is not null or not ts >= 11408 or not ts <= 11409) and (not user_id = 1 or not user_id is not null or not ts >= 11410 or not ts <= 11411) and (not user_id = 1 or not user_id is not null or not ts >= 11412 or not ts <= 11413) and (not user_id = 1 or not user_id is not null or not ts >= 11414 or not ts <= 11415) and (not user_id = 1 or not user_id is not null or not ts >= 11416 or not ts <= 11417) and (not user_id = 1 or not user_id is not null or not ts >= 11418 or not ts <= 11419) and (not user_id = 1 or not user_id is not null or not ts >= 11420 or not ts <= 11421) and (not user_id = 1 or not user_id is not null or not ts >= 11422 or not ts <= 11423) and (not user_id = 1 or not user_id is not null or not ts >= 11424 or not ts <= 11425) and (not user_id = 1 or not user_id is not null or not ts >= 11426 or not ts <= 11427) and (not user_id = 1 or not user_id is not null or not ts >= 11428 or not ts <= 11429) and (not user_id = 1 or not user_id is not null or not ts >= 11430 or not ts <= 11431) and (not user_id = 1 or not user_id is not null or not ts >= 11432 or not ts <= 11433) and (not user_id = 1 or not user_id is not null or not ts >= 11434 or not ts <= 11435) and (not user_id = 1 or not user_id is not null or not ts >= 11436 or not ts <= 11437) and (not user_id = 1 or not user_id is not null or not ts >= 11438 or not ts <= 11439) and (not user_id = 1 or not user_id is not null or not ts >= 11440 or not ts <= 11441) and (not user_id = 1 or not user_id is not null or not ts >= 11442 or not ts <= 11443) and (not user_id = 1 or not user_id is not null or not ts >= 11444 or not ts <= 11445) and (not user_id = 1 or not user_id is not null or not ts >= 11446 or not ts <= 11447) and (not user_id = 1 or not user_id is not null or not ts >= 11448 or not ts <= 11449) and (not user_id = 1 or not user_id is not null or not ts >= 11450 or not ts <= 11451) and (not user_id = 1 or not user_id is not null or not ts >= 11452 or not ts <= 11453) and (not user_id = 1 or not user_id is not null or not ts >= 11454 or not ts <= 11455) and (not user_id = 1 or not user_id is not null or not ts >= 11456 or not ts <= 11457) and (not user_id = 1 or not user_id is not null or not ts >= 11458 or not ts <= 11459) and (not user_id = 1 or not user_id is not null or not ts >= 11460 or not ts <= 11461) and (not user_id = 1 or not user_id is not null or not ts >= 11462 or not ts <= 11463) and (not user_id = 1 or not user_id is not null or not ts >= 11464 or not ts <= 11465) and (not user_id = 1 or not user_id is not null or not ts >= 11466 or not ts <= 11467) and (not user_id = 1 or not user_id is not null or not ts >= 11468 or not ts <= 11469) and (not user_id = 1 or not user_id is not null or not ts >= 11470 or not ts <= 11471) and (not user_id = 1 or not user_id is not null or not ts >= 11472 or not ts <= 11473) and (not user_id = 1 or not user_id is not null or not ts >= 11474 or not ts <= 11475) and (not user_id = 1 or not user_id is not null or not ts >= 11476 or not ts <= 11477) and (not user_id = 1 or not user_id is not null or not ts >= 11478 or not ts <= 11479) and (not user_id = 1 or not user_id is not null or not ts >= 11480 or not ts <= 11481) and (not user_id = 1 or not user_id is not null or not ts >= 11482 or not ts <= 11483) and (not user_id = 1 or not user_id is not null or not ts >= 11484 or not ts <= 11485) and (not user_id = 1 or not user_id is not null or not ts >= 11486 or not ts <= 11487) and (not user_id = 1 or not user_id is not null or not ts >= 11488 or not ts <= 11489) and (not user_id = 1 or not user_id is not null or not ts >= 11490 or not ts <= 11491) and (not user_id = 1 or not user_id is not null or not ts >= 11492 or not ts <= 11493) and (not user_id = 1 or not user_id is not null or not ts >= 11494 or not ts <= 11495) and (not user_id = 1 or not user_id is not null or not ts >= 11496 or not ts <= 11497) and (not user_id = 1 or not user_id is not null or not ts >= 11498 or not ts <= 11499) and (not user_id = 1 or not user_id is not null or not ts >= 11500 or not ts <= 11501) and (not user_id = 1 or not user_id is not null or not ts >= 11502 or not ts <= 11503) and (not user_id = 1 or not user_id is not null or not ts >= 11504 or not ts <= 11505) and (not user_id = 1 or not user_id is not null or not ts >= 11506 or not ts <= 11507) and (not user_id = 1 or not user_id is not null or not ts >= 11508 or not ts <= 11509) and (not user_id = 1 or not user_id is not null or not ts >= 11510 or not ts <= 11511) and (not user_id = 1 or not user_id is not null or not ts >= 11512 or not ts <= 11513) and (not user_id = 1 or not user_id is not null or not ts >= 11514 or not ts <= 11515) and (not user_id = 1 or not user_id is not null or not ts >= 11516 or not ts <= 11517) and (not user_id = 1 or not user_id is not null or not ts >= 11518 or not ts <= 11519) and (not user_id = 1 or not user_id is not null or not ts >= 11520 or not ts <= 11521) and (not user_id = 1 or not user_id is not null or not ts >= 11522 or not ts <= 11523) and (not user_id = 1 or not user_id is not null or not ts >= 11524 or not ts <= 11525) and (not user_id = 1 or not user_id is not null or not ts >= 11526 or not ts <= 11527) and (not user_id = 1 or not user_id is not null or not ts >= 11528 or not ts <= 11529) and (not user_id = 1 or not user_id is not null or not ts >= 11530 or not ts <= 11531) and (not user_id = 1 or not user_id is not null or not ts >= 11532 or not ts <= 11533) and (not user_id = 1 or not user_id is not null or not ts >= 11534 or not ts <= 11535) and (not user_id = 1 or not user_id is not null or not ts >= 11536 or not ts <= 11537) and (not user_id = 1 or not user_id is not null or not ts >= 11538 or not ts <= 11539) and (not user_id = 1 or not user_id is not null or not ts >= 11540 or not ts <= 11541) and (not user_id = 1 or not user_id is not null or not ts >= 11542 or not ts <= 11543) and (not user_id = 1 or not user_id is not null or not ts >= 11544 or not ts <= 11545) and (not user_id = 1 or not user_id is not null or not ts >= 11546 or not ts <= 11547) and (not user_id = 1 or not user_id is not null or not ts >= 11548 or not ts <= 11549) and (not user_id = 1 or not user_id is not null or not ts >= 11550 or not ts <= 11551) and (not user_id = 1 or not user_id is not null or not ts >= 11552 or not ts <= 11553) and (not user_id = 1 or not user_id is not null or not ts >= 11554 or not ts <= 11555) and (not user_id = 1 or not user_id is not null or not ts >= 11556 or not ts <= 11557) and (not user_id = 1 or not user_id is not null or not ts >= 11558 or not ts <= 11559) and (not user_id = 1 or not user_id is not null or not ts >= 11560 or not ts <= 11561) and (not user_id = 1 or not user_id is not null or not ts >= 11562 or not ts <= 11563) and (not user_id = 1 or not user_id is not null or not ts >= 11564 or not ts <= 11565) and (not user_id = 1 or not user_id is not null or not ts >= 11566 or not ts <= 11567) and (not user_id = 1 or not user_id is not null or not ts >= 11568 or not ts <= 11569) and (not user_id = 1 or not user_id is not null or not ts >= 11570 or not ts <= 11571) and (not user_id = 1 or not user_id is not null or not ts >= 11572 or not ts <= 11573) and (not user_id = 1 or not user_id is not null or not ts >= 11574 or not ts <= 11575) and (not user_id = 1 or not user_id is not null or not ts >= 11576 or not ts <= 11577) and (not user_id = 1 or not user_id is not null or not ts >= 11578 or not ts <= 11579) and (not user_id = 1 or not user_id is not null or not ts >= 11580 or not ts <= 11581) and (not user_id = 1 or not user_id is not null or not ts >= 11582 or not ts <= 11583) and (not user_id = 1 or not user_id is not null or not ts >= 11584 or not ts <= 11585) and (not user_id = 1 or not user_id is not null or not ts >= 11586 or not ts <= 11587) and (not user_id = 1 or not user_id is not null or not ts >= 11588 or not ts <= 11589) and (not user_id = 1 or not user_id is not null or not ts >= 11590 or not ts <= 11591) and (not user_id = 1 or not user_id is not null or not ts >= 11592 or not ts <= 11593) and (not user_id = 1 or not user_id is not null or not ts >= 11594 or not ts <= 11595) and (not user_id = 1 or not user_id is not null or not ts >= 11596 or not ts <= 11597) and (not user_id = 1 or not user_id is not null or not ts >= 11598 or not ts <= 11599) and (not user_id = 1 or not user_id is not null or not ts >= 11600 or not ts <= 11601) and (not user_id = 1 or not user_id is not null or not ts >= 11602 or not ts <= 11603) and (not user_id = 1 or not user_id is not null or not ts >= 11604 or not ts <= 11605) and (not user_id = 1 or not user_id is not null or not ts >= 11606 or not ts <= 11607) and (not user_id = 1 or not user_id is not null or not ts >= 11608 or not ts <= 11609) and (not user_id = 1 or not user_id is not null or not ts >= 11610 or not ts <= 11611) and (not user_id = 1 or not user_id is not null or not ts >= 11612 or not ts <= 11613) and (not user_id = 1 or not user_id is not null or not ts >= 11614 or not ts <= 11615) and (not user_id = 1 or not user_id is not null or not ts >= 11616 or not ts <= 11617) and (not user_id = 1 or not user_id is not null or not ts >= 11618 or not ts <= 11619) and (not user_id = 1 or not user_id is not null or not ts >= 11620 or not ts <= 11621) and (not user_id = 1 or not user_id is not null or not ts >= 11622 or not ts <= 11623) and (not user_id = 1 or not user_id is not null or not ts >= 11624 or not ts <= 11625) and (not user_id = 1 or not user_id is not null or not ts >= 11626 or not ts <= 11627) and (not user_id = 1 or not user_id is not null or not ts >= 11628 or not ts <= 11629) and (not user_id = 1 or not user_id is not null or not ts >= 11630 or not ts <= 11631) and (not user_id = 1 or not user_id is not null or not ts >= 11632 or not ts <= 11633) and (not user_id = 1 or not user_id is not null or not ts >= 11634 or not ts <= 11635) and (not user_id = 1 or not user_id is not null or not ts >= 11636 or not ts <= 11637) and (not user_id = 1 or not user_id is not null or not ts >= 11638 or not ts <= 11639) and (not user_id = 1 or not user_id is not null or not ts >= 11640 or not ts <= 11641) and (not user_id = 1 or not user_id is not null or not ts >= 11642 or not ts <= 11643) and (not user_id = 1 or not user_id is not null or not ts >= 11644 or not ts <= 11645) and (not user_id = 1 or not user_id is not null or not ts >= 11646 or not ts <= 11647) and (not user_id = 1 or not user_id is not null or not ts >= 11648 or not ts <= 11649) and (not user_id = 1 or not user_id is not null or not ts >= 11650 or not ts <= 11651) and (not user_id = 1 or not user_id is not null or not ts >= 11652 or not ts <= 11653) and (not user_id = 1 or not user_id is not null or not ts >= 11654 or not ts <= 11655) and (not user_id = 1 or not user_id is not null or not ts >= 11656 or not ts <= 11657) and (not user_id = 1 or not user_id is not null or not ts >= 11658 or not ts <= 11659) and (not user_id = 1 or not user_id is not null or not ts >= 11660 or not ts <= 11661) and (not user_id = 1 or not user_id is not null or not ts >= 11662 or not ts <= 11663) and (not user_id = 1 or not user_id is not null or not ts >= 11664 or not ts <= 11665) and (not user_id = 1 or not user_id is not null or not ts >= 11666 or not ts <= 11667) and (not user_id = 1 or not user_id is not null or not ts >= 11668 or not ts <= 11669) and (not user_id = 1 or not user_id is not null or not ts >= 11670 or not ts <= 11671) and (not user_id = 1 or not user_id is not null or not ts >= 11672 or not ts <= 11673) and (not user_id = 1 or not user_id is not null or not ts >= 11674 or not ts <= 11675) and (not user_id = 1 or not user_id is not null or not ts >= 11676 or not ts <= 11677) and (not user_id = 1 or not user_id is not null or not ts >= 11678 or not ts <= 11679) and (not user_id = 1 or not user_id is not null or not ts >= 11680 or not ts <= 11681) and (not user_id = 1 or not user_id is not null or not ts >= 11682 or not ts <= 11683) and (not user_id = 1 or not user_id is not null or not ts >= 11684 or not ts <= 11685) and (not user_id = 1 or not user_id is not null or not ts >= 11686 or not ts <= 11687) and (not user_id = 1 or not user_id is not null or not ts >= 11688 or not ts <= 11689) and (not user_id = 1 or not user_id is not null or not ts >= 11690 or not ts <= 11691) and (not user_id = 1 or not user_id is not null or not ts >= 11692 or not ts <= 11693) and (not user_id = 1 or not user_id is not null or not ts >= 11694 or not ts <= 11695) and (not user_id = 1 or not user_id is not null or not ts >= 11696 or not ts <= 11697) and (not user_id = 1 or not user_id is not null or not ts >= 11698 or not ts <= 11699) and (not user_id = 1 or not user_id is not null or not ts >= 11700 or not ts <= 11701) and (not user_id = 1 or not user_id is not null or not ts >= 11702 or not ts <= 11703) and (not user_id = 1 or not user_id is not null or not ts >= 11704 or not ts <= 11705) and (not user_id = 1 or not user_id is not null or not ts >= 11706 or not ts <= 11707) and (not user_id = 1 or not user_id is not null or not ts >= 11708 or not ts <= 11709) and (not user_id = 1 or not user_id is not null or not ts >= 11710 or not ts <= 11711) and (not user_id = 1 or not user_id is not null or not ts >= 11712 or not ts <= 11713) and (not user_id = 1 or not user_id is not null or not ts >= 11714 or not ts <= 11715) and (not user_id = 1 or not user_id is not null or not ts >= 11716 or not ts <= 11717) and (not user_id = 1 or not user_id is not null or not ts >= 11718 or not ts <= 11719) and (not user_id = 1 or not user_id is not null or not ts >= 11720 or not ts <= 11721) and (not user_id = 1 or not user_id is not null or not ts >= 11722 or not ts <= 11723) and (not user_id = 1 or not user_id is not null or not ts >= 11724 or not ts <= 11725) and (not user_id = 1 or not user_id is not null or not ts >= 11726 or not ts <= 11727) and (not user_id = 1 or not user_id is not null or not ts >= 11728 or not ts <= 11729) and (not user_id = 1 or not user_id is not null or not ts >= 11730 or not ts <= 11731) and (not user_id = 1 or not user_id is not null or not ts >= 11732 or not ts <= 11733) and (not user_id = 1 or not user_id is not null or not ts >= 11734 or not ts <= 11735) and (not user_id = 1 or not user_id is not null or not ts >= 11736 or not ts <= 11737) and (not user_id = 1 or not user_id is not null or not ts >= 11738 or not ts <= 11739) and (not user_id = 1 or not user_id is not null or not ts >= 11740 or not ts <= 11741) and (not user_id = 1 or not user_id is not null or not ts >= 11742 or not ts <= 11743) and (not user_id = 1 or not user_id is not null or not ts >= 11744 or not ts <= 11745) and (not user_id = 1 or not user_id is not null or not ts >= 11746 or not ts <= 11747) and (not user_id = 1 or not user_id is not null or not ts >= 11748 or not ts <= 11749) and (not user_id = 1 or not user_id is not null or not ts >= 11750 or not ts <= 11751) and (not user_id = 1 or not user_id is not null or not ts >= 11752 or not ts <= 11753) and (not user_id = 1 or not user_id is not null or not ts >= 11754 or not ts <= 11755) and (not user_id = 1 or not user_id is not null or not ts >= 11756 or not ts <= 11757) and (not user_id = 1 or not user_id is not null or not ts >= 11758 or not ts <= 11759) and (not user_id = 1 or not user_id is not null or not ts >= 11760 or not ts <= 11761) and (not user_id = 1 or not user_id is not null or not ts >= 11762 or not ts <= 11763) and (not user_id = 1 or not user_id is not null or not ts >= 11764 or not ts <= 11765) and (not user_id = 1 or not user_id is not null or not ts >= 11766 or not ts <= 11767) and (not user_id = 1 or not user_id is not null or not ts >= 11768 or not ts <= 11769) and (not user_id = 1 or not user_id is not null or not ts >= 11770 or not ts <= 11771) and (not user_id = 1 or not user_id is not null or not ts >= 11772 or not ts <= 11773) and (not user_id = 1 or not user_id is not null or not ts >= 11774 or not ts <= 11775) and (not user_id = 1 or not user_id is not null or not ts >= 11776 or not ts <= 11777) and (not user_id = 1 or not user_id is not null or not ts >= 11778 or not ts <= 11779) and (not user_id = 1 or not user_id is not null or not ts >= 11780 or not ts <= 11781) and (not user_id = 1 or not user_id is not null or not ts >= 11782 or not ts <= 11783) and (not user_id = 1 or not user_id is not null or not ts >= 11784 or not ts <= 11785) and (not user_id = 1 or not user_id is not null or not ts >= 11786 or not ts <= 11787) and (not user_id = 1 or not user_id is not null or not ts >= 11788 or not ts <= 11789) and (not user_id = 1 or not user_id is not null or not ts >= 11790 or not ts <= 11791) and (not user_id = 1 or not user_id is not null or not ts >= 11792 or not ts <= 11793) and (not user_id = 1 or not user_id is not null or not ts >= 11794 or not ts <= 11795) and (not user_id = 1 or not user_id is not null or not ts >= 11796 or not ts <= 11797) and (not user_id = 1 or not user_id is not null or not ts >= 11798 or not ts <= 11799) and (not user_id = 1 or not user_id is not null or not ts >= 11800 or not ts <= 11801) and (not user_id = 1 or not user_id is not null or not ts >= 11802 or not ts <= 11803) and (not user_id = 1 or not user_id is not null or not ts >= 11804 or not ts <= 11805) and (not user_id = 1 or not user_id is not null or not ts >= 11806 or not ts <= 11807) and (not user_id = 1 or not user_id is not null or not ts >= 11808 or not ts <= 11809) and (not user_id = 1 or not user_id is not null or not ts >= 11810 or not ts <= 11811) and (not user_id = 1 or not user_id is not null or not ts >= 11812 or not ts <= 11813) and (not user_id = 1 or not user_id is not null or not ts >= 11814 or not ts <= 11815) and (not user_id = 1 or not user_id is not null or not ts >= 11816 or not ts <= 11817) and (not user_id = 1 or not user_id is not null or not ts >= 11818 or not ts <= 11819) and (not user_id = 1 or not user_id is not null or not ts >= 11820 or not ts <= 11821) and (not user_id = 1 or not user_id is not null or not ts >= 11822 or not ts <= 11823) and (not user_id = 1 or not user_id is not null or not ts >= 11824 or not ts <= 11825) and (not user_id = 1 or not user_id is not null or not ts >= 11826 or not ts <= 11827) and (not user_id = 1 or not user_id is not null or not ts >= 11828 or not ts <= 11829) and (not user_id = 1 or not user_id is not null or not ts >= 11830 or not ts <= 11831) and (not user_id = 1 or not user_id is not null or not ts >= 11832 or not ts <= 11833) and (not user_id = 1 or not user_id is not null or not ts >= 11834 or not ts <= 11835) and (not user_id = 1 or not user_id is not null or not ts >= 11836 or not ts <= 11837) and (not user_id = 1 or not user_id is not null or not ts >= 11838 or not ts <= 11839) and (not user_id = 1 or not user_id is not null or not ts >= 11840 or not ts <= 11841) and (not user_id = 1 or not user_id is not null or not ts >= 11842 or not ts <= 11843) and (not user_id = 1 or not user_id is not null or not ts >= 11844 or not ts <= 11845) and (not user_id = 1 or not user_id is not null or not ts >= 11846 or not ts <= 11847) and (not user_id = 1 or not user_id is not null or not ts >= 11848 or not ts <= 11849) and (not user_id = 1 or not user_id is not null or not ts >= 11850 or not ts <= 11851) and (not user_id = 1 or not user_id is not null or not ts >= 11852 or not ts <= 11853) and (not user_id = 1 or not user_id is not null or not ts >= 11854 or not ts <= 11855) and (not user_id = 1 or not user_id is not null or not ts >= 11856 or not ts <= 11857) and (not user_id = 1 or not user_id is not null or not ts >= 11858 or not ts <= 11859) and (not user_id = 1 or not user_id is not null or not ts >= 11860 or not ts <= 11861) and (not user_id = 1 or not user_id is not null or not ts >= 11862 or not ts <= 11863) and (not user_id = 1 or not user_id is not null or not ts >= 11864 or not ts <= 11865) and (not user_id = 1 or not user_id is not null or not ts >= 11866 or not ts <= 11867) and (not user_id = 1 or not user_id is not null or not ts >= 11868 or not ts <= 11869) and (not user_id = 1 or not user_id is not null or not ts >= 11870 or not ts <= 11871) and (not user_id = 1 or not user_id is not null or not ts >= 11872 or not ts <= 11873) and (not user_id = 1 or not user_id is not null or not ts >= 11874 or not ts <= 11875) and (not user_id = 1 or not user_id is not null or not ts >= 11876 or not ts <= 11877) and (not user_id = 1 or not user_id is not null or not ts >= 11878 or not ts <= 11879) and (not user_id = 1 or not user_id is not null or not ts >= 11880 or not ts <= 11881) and (not user_id = 1 or not user_id is not null or not ts >= 11882 or not ts <= 11883) and (not user_id = 1 or not user_id is not null or not ts >= 11884 or not ts <= 11885) and (not user_id = 1 or not user_id is not null or not ts >= 11886 or not ts <= 11887) and (not user_id = 1 or not user_id is not null or not ts >= 11888 or not ts <= 11889) and (not user_id = 1 or not user_id is not null or not ts >= 11890 or not ts <= 11891) and (not user_id = 1 or not user_id is not null or not ts >= 11892 or not ts <= 11893) and (not user_id = 1 or not user_id is not null or not ts >= 11894 or not ts <= 11895) and (not user_id = 1 or not user_id is not null or not ts >= 11896 or not ts <= 11897) and (not user_id = 1 or not user_id is not null or not ts >= 11898 or not ts <= 11899) and (not user_id = 1 or not user_id is not null or not ts >= 11900 or not ts <= 11901) and (not user_id = 1 or not user_id is not null or not ts >= 11902 or not ts <= 11903) and (not user_id = 1 or not user_id is not null or not ts >= 11904 or not ts <= 11905) and (not user_id = 1 or not user_id is not null or not ts >= 11906 or not ts <= 11907) and (not user_id = 1 or not user_id is not null or not ts >= 11908 or not ts <= 11909) and (not user_id = 1 or not user_id is not null or not ts >= 11910 or not ts <= 11911) and (not user_id = 1 or not user_id is not null or not ts >= 11912 or not ts <= 11913) and (not user_id = 1 or not user_id is not null or not ts >= 11914 or not ts <= 11915) and (not user_id = 1 or not user_id is not null or not ts >= 11916 or not ts <= 11917) and (not user_id = 1 or not user_id is not null or not ts >= 11918 or not ts <= 11919) and (not user_id = 1 or not user_id is not null or not ts >= 11920 or not ts <= 11921) and (not user_id = 1 or not user_id is not null or not ts >= 11922 or not ts <= 11923) and (not user_id = 1 or not user_id is not null or not ts >= 11924 or not ts <= 11925) and (not user_id = 1 or not user_id is not null or not ts >= 11926 or not ts <= 11927) and (not user_id = 1 or not user_id is not null or not ts >= 11928 or not ts <= 11929) and (not user_id = 1 or not user_id is not null or not ts >= 11930 or not ts <= 11931) and (not user_id = 1 or not user_id is not null or not ts >= 11932 or not ts <= 11933) and (not user_id = 1 or not user_id is not null or not ts >= 11934 or not ts <= 11935) and (not user_id = 1 or not user_id is not null or not ts >= 11936 or not ts <= 11937) and (not user_id = 1 or not user_id is not null or not ts >= 11938 or not ts <= 11939) and (not user_id = 1 or not user_id is not null or not ts >= 11940 or not ts <= 11941) and (not user_id = 1 or not user_id is not null or not ts >= 11942 or not ts <= 11943) and (not user_id = 1 or not user_id is not null or not ts >= 11944 or not ts <= 11945) and (not user_id = 1 or not user_id is not null or not ts >= 11946 or not ts <= 11947) and (not user_id = 1 or not user_id is not null or not ts >= 11948 or not ts <= 11949) and (not user_id = 1 or not user_id is not null or not ts >= 11950 or not ts <= 11951) and (not user_id = 1 or not user_id is not null or not ts >= 11952 or not ts <= 11953) and (not user_id = 1 or not user_id is not null or not ts >= 11954 or not ts <= 11955) and (not user_id = 1 or not user_id is not null or not ts >= 11956 or not ts <= 11957) and (not user_id = 1 or not user_id is not null or not ts >= 11958 or not ts <= 11959) and (not user_id = 1 or not user_id is not null or not ts >= 11960 or not ts <= 11961) and (not user_id = 1 or not user_id is not null or not ts >= 11962 or not ts <= 11963) and (not user_id = 1 or not user_id is not null or not ts >= 11964 or not ts <= 11965) and (not user_id = 1 or not user_id is not null or not ts >= 11966 or not ts <= 11967) and (not user_id = 1 or not user_id is not null or not ts >= 11968 or not ts <= 11969) and (not user_id = 1 or not user_id is not null or not ts >= 11970 or not ts <= 11971) and (not user_id = 1 or not user_id is not null or not ts >= 11972 or not ts <= 11973) and (not user_id = 1 or not user_id is not null or not ts >= 11974 or not ts <= 11975) and (not user_id = 1 or not user_id is not null or not ts >= 11976 or not ts <= 11977) and (not user_id = 1 or not user_id is not null or not ts >= 11978 or not ts <= 11979) and (not user_id = 1 or not user_id is not null or not ts >= 11980 or not ts <= 11981) and (not user_id = 1 or not user_id is not null or not ts >= 11982 or not ts <= 11983) and (not user_id = 1 or not user_id is not null or not ts >= 11984 or not ts <= 11985) and (not user_id = 1 or not user_id is not null or not ts >= 11986 or not ts <= 11987) and (not user_id = 1 or not user_id is not null or not ts >= 11988 or not ts <= 11989) and (not user_id = 1 or not user_id is not null or not ts >= 11990 or not ts <= 11991) and (not user_id = 1 or not user_id is not null or not ts >= 11992 or not ts <= 11993) and ts >= 113898 and parent_id = 1 order by ts asc limit :__upper_limit", + "ResultColumns": 1, + "Table": "`user`" + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "union inside subquery. all routes can be merged by literal value", + "query": "select 1 from user where id = 12 and exists(select 1 from music where user_id = 12 union select 1 from user_extra where user_id = 12)", + "plan": { + "QueryType": "SELECT", + "Original": "select 1 from user where id = 12 and exists(select 1 from music where user_id = 12 union select 1 from user_extra where user_id = 12)", + "Instructions": { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from `user` where 1 != 1", + "Query": "select 1 from `user` where id = 12 and exists (select 1 from music where user_id = 12 union select 1 from user_extra where user_id = 12)", + "Table": "`user`", + "Values": [ + "INT64(12)" + ], + "Vindex": "user_index" + }, + "TablesUsed": [ + "user.music", + "user.user", + "user.user_extra" + ] + } } ] diff --git a/go/vt/vtgate/planbuilder/testdata/foreignkey_cases.json b/go/vt/vtgate/planbuilder/testdata/foreignkey_cases.json new file mode 100644 index 00000000000..0d3c5e4745a --- /dev/null +++ b/go/vt/vtgate/planbuilder/testdata/foreignkey_cases.json @@ -0,0 +1,1657 @@ +[ + { + "comment": "Insertion in a table with cross-shard foreign keys disallowed", + "query": "insert into tbl3 (col3, coly) values (1, 3)", + "plan": "VT12002: unsupported: cross-shard foreign keys" + }, + { + "comment": "Insertion in a table with shard-scoped foreign keys is allowed", + "query": "insert into tbl2 (col2, coly) values (1, 3)", + "plan": { + "QueryType": "INSERT", + "Original": "insert into tbl2 (col2, coly) values (1, 3)", + "Instructions": { + "OperatorType": "Insert", + "Variant": "Sharded", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "insert into tbl2(col2, coly) values (:_col2_0, 3)", + "TableName": "tbl2", + "VindexValues": { + "hash_vin": "INT64(1)" + } + }, + "TablesUsed": [ + "sharded_fk_allow.tbl2" + ] + } + }, + { + "comment": "Insertion in a table with shard-scoped multiple column foreign key is allowed", + "query": "insert into multicol_tbl2 (cola, colb, colc) values (1, 2, 3)", + "plan": { + "QueryType": "INSERT", + "Original": "insert into multicol_tbl2 (cola, colb, colc) values (1, 2, 3)", + "Instructions": { + "OperatorType": "Insert", + "Variant": "Sharded", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "insert into multicol_tbl2(cola, colb, colc) values (:_cola_0, :_colb_0, :_colc_0)", + "TableName": "multicol_tbl2", + "VindexValues": { + "multicolIdx": "INT64(1), INT64(2), INT64(3)" + } + }, + "TablesUsed": [ + "sharded_fk_allow.multicol_tbl2" + ] + } + }, + { + "comment": "Delete in a table with cross-shard foreign keys disallowed", + "query": "delete from tbl1", + "plan": "VT12002: unsupported: cross-shard foreign keys" + }, + { + "comment": "Delete in a table with not all column shard-scoped foreign keys - disallowed", + "query": "delete from tbl7", + "plan": "VT12002: unsupported: cross-shard foreign keys" + }, + { + "comment": "Delete in a table with shard-scoped multiple column foreign key with cascade", + "query": "delete from multicol_tbl1 where cola = 1 and colb = 2 and colc = 3", + "plan": { + "QueryType": "DELETE", + "Original": "delete from multicol_tbl1 where cola = 1 and colb = 2 and colc = 3", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "FieldQuery": "select colb, cola, y, colc, x from multicol_tbl1 where 1 != 1", + "Query": "select colb, cola, y, colc, x from multicol_tbl1 where cola = 1 and colb = 2 and colc = 3 for update", + "Table": "multicol_tbl1", + "Values": [ + "INT64(1)", + "INT64(2)", + "INT64(3)" + ], + "Vindex": "multicolIdx" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Delete", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0, + 1, + 2, + 3, + 4 + ], + "Query": "delete from multicol_tbl2 where (colb, cola, x, colc, y) in ::fkc_vals", + "Table": "multicol_tbl2" + }, + { + "InputName": "Parent", + "OperatorType": "Delete", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "delete from multicol_tbl1 where cola = 1 and colb = 2 and colc = 3", + "Table": "multicol_tbl1", + "Values": [ + "INT64(1)", + "INT64(2)", + "INT64(3)" + ], + "Vindex": "multicolIdx" + } + ] + }, + "TablesUsed": [ + "sharded_fk_allow.multicol_tbl1", + "sharded_fk_allow.multicol_tbl2" + ] + } + }, + { + "comment": "Delete in a table with shard-scoped foreign keys with cascade", + "query": "delete from tbl5", + "plan": { + "QueryType": "DELETE", + "Original": "delete from tbl5", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "FieldQuery": "select col5, t5col5 from tbl5 where 1 != 1", + "Query": "select col5, t5col5 from tbl5 for update", + "Table": "tbl5" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Delete", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Query": "delete from tbl4 where (col4) in ::fkc_vals", + "Table": "tbl4" + }, + { + "InputName": "CascadeChild-2", + "OperatorType": "Delete", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals1", + "Cols": [ + 1 + ], + "Query": "delete from tbl4 where (t4col4) in ::fkc_vals1", + "Table": "tbl4" + }, + { + "InputName": "Parent", + "OperatorType": "Delete", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "delete from tbl5", + "Table": "tbl5" + } + ] + }, + "TablesUsed": [ + "sharded_fk_allow.tbl4", + "sharded_fk_allow.tbl5" + ] + } + }, + { + "comment": "Delete in a table with shard-scoped foreign keys with SET NULL", + "query": "delete from tbl8 where col8 = 1", + "plan": "VT12001: unsupported: you cannot UPDATE primary vindex columns; invalid update on vindex: hash_vin" + }, + { + "comment": "Delete in a table with unsharded foreign key with SET NULL", + "query": "delete from u_tbl9 where col9 = 5", + "plan": { + "QueryType": "DELETE", + "Original": "delete from u_tbl9 where col9 = 5", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select col9 from u_tbl9 where 1 != 1", + "Query": "select col9 from u_tbl9 where col9 = 5 for update", + "Table": "u_tbl9" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Query": "update u_tbl8 set col8 = null where (col8) in ::fkc_vals", + "Table": "u_tbl8" + }, + { + "InputName": "Parent", + "OperatorType": "Delete", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "delete from u_tbl9 where col9 = 5", + "Table": "u_tbl9" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl8", + "unsharded_fk_allow.u_tbl9" + ] + } + }, + { + "comment": "update in unsharded table with restrict", + "query": "update u_tbl5 set col5 = 'foo' where id = 1", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_tbl5 set col5 = 'foo' where id = 1", + "Instructions": { + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_tbl5 set col5 = 'foo' where id = 1", + "Table": "u_tbl5" + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl5" + ] + } + }, + { + "comment": "update in unsharded table with cascade", + "query": "update u_tbl2 set col2 = 'bar' where id = 1", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_tbl2 set col2 = 'bar' where id = 1", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select col2 from u_tbl2 where 1 != 1", + "Query": "select col2 from u_tbl2 where id = 1 for update", + "Table": "u_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Query": "update u_tbl3 set col3 = null where (col3) in ::fkc_vals and (u_tbl3.col3) not in (('bar'))", + "Table": "u_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_tbl2 set col2 = 'bar' where id = 1", + "Table": "u_tbl2" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl2", + "unsharded_fk_allow.u_tbl3" + ] + } + }, + { + "comment": "update in unsharded table with cascade - on non-referenced column", + "query": "update u_tbl2 set col_no_ref = 'baz' where id = 1", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_tbl2 set col_no_ref = 'baz' where id = 1", + "Instructions": { + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_tbl2 set col_no_ref = 'baz' where id = 1", + "Table": "u_tbl2" + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl2" + ] + } + }, + { + "comment": "Update in a table with cross-shard foreign keys disallowed", + "query": "update tbl1 set t1col1 = 'foo' where col1 = 1", + "plan": "VT12002: unsupported: cross-shard foreign keys" + }, + { + "comment": "Update in a table with cross-shard foreign keys, column not in update expression - allowed", + "query": "update tbl1 set not_ref_col = 'foo' where id = 1", + "plan": { + "QueryType": "UPDATE", + "Original": "update tbl1 set not_ref_col = 'foo' where id = 1", + "Instructions": { + "OperatorType": "Update", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "update tbl1 set not_ref_col = 'foo' where id = 1", + "Table": "tbl1" + }, + "TablesUsed": [ + "sharded_fk_allow.tbl1" + ] + } + }, + { + "comment": "Update in a table with column modified not shard-scoped foreign key whereas other column referencing same table is - disallowed", + "query": "update tbl7 set t7col7 = 'foo', t7col72 = 42", + "plan": "VT12002: unsupported: cross-shard foreign keys" + }, + { + "comment": "Update in a table with shard-scoped foreign keys with cascade", + "query": "update tbl5 set t5col5 = 'foo'", + "plan": { + "QueryType": "UPDATE", + "Original": "update tbl5 set t5col5 = 'foo'", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "FieldQuery": "select t5col5 from tbl5 where 1 != 1", + "Query": "select t5col5 from tbl5 for update", + "Table": "tbl5" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Query": "update tbl4 set t4col4 = null where (t4col4) in ::fkc_vals and (tbl4.t4col4) not in (('foo'))", + "Table": "tbl4" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "update tbl5 set t5col5 = 'foo'", + "Table": "tbl5" + } + ] + }, + "TablesUsed": [ + "sharded_fk_allow.tbl4", + "sharded_fk_allow.tbl5" + ] + } + }, + { + "comment": "Insertion in a table with 2 foreign keys constraint with same table on different columns - both are not shard scoped - disallowed", + "query": "insert into tbl6 (col6, t6col6) values (100, 'foo')", + "plan": "VT12002: unsupported: cross-shard foreign keys" + }, + { + "comment": "Update a table with parent and child foreign keys - shard scoped", + "query": "update tbl2 set col = 'foo'", + "plan": { + "QueryType": "UPDATE", + "Original": "update tbl2 set col = 'foo'", + "Instructions": { + "OperatorType": "Update", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "update tbl2 set col = 'foo'", + "Table": "tbl2" + }, + "TablesUsed": [ + "sharded_fk_allow.tbl2" + ] + } + }, + { + "comment": "update table with column's parent foreign key cross shard", + "query": "update tbl10 set col = 'foo'", + "plan": { + "QueryType": "UPDATE", + "Original": "update tbl10 set col = 'foo'", + "Instructions": { + "OperatorType": "FKVerify", + "Inputs": [ + { + "InputName": "VerifyParent-1", + "OperatorType": "Limit", + "Count": "INT64(1)", + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "INT64(1) as 1" + ], + "Inputs": [ + { + "OperatorType": "Filter", + "Predicate": "tbl3.col is null", + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "LeftJoin", + "JoinColumnIndexes": "R:0,R:0", + "TableName": "tbl10_tbl3", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "FieldQuery": "select 1 from tbl10 where 1 != 1", + "Query": "select 1 from tbl10 lock in share mode", + "Table": "tbl10" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "FieldQuery": "select tbl3.col from tbl3 where 1 != 1", + "Query": "select tbl3.col from tbl3 where tbl3.col = 'foo' lock in share mode", + "Table": "tbl3" + } + ] + } + ] + } + ] + } + ] + }, + { + "InputName": "PostVerify", + "OperatorType": "Update", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "update tbl10 set col = 'foo'", + "Table": "tbl10" + } + ] + }, + "TablesUsed": [ + "sharded_fk_allow.tbl10", + "sharded_fk_allow.tbl3" + ] + } + }, + { + "comment": "delete table with shard scoped foreign key set default - disallowed", + "query": "delete from tbl20 where col = 'bar'", + "plan": "VT09016: Cannot delete or update a parent row: a foreign key constraint fails" + }, + { + "comment": "Delete table with cross-shard foreign key with set null - should be eventually allowed", + "query": "delete from tbl9 where col9 = 34", + "plan": { + "QueryType": "DELETE", + "Original": "delete from tbl9 where col9 = 34", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "FieldQuery": "select col9 from tbl9 where 1 != 1", + "Query": "select col9 from tbl9 where col9 = 34 for update", + "Table": "tbl9", + "Values": [ + "INT64(34)" + ], + "Vindex": "hash_vin" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Query": "update tbl4 set col_ref = null where (col_ref) in ::fkc_vals", + "Table": "tbl4" + }, + { + "InputName": "Parent", + "OperatorType": "Delete", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "delete from tbl9 where col9 = 34", + "Table": "tbl9", + "Values": [ + "INT64(34)" + ], + "Vindex": "hash_vin" + } + ] + }, + "TablesUsed": [ + "sharded_fk_allow.tbl4", + "sharded_fk_allow.tbl9" + ] + } + }, + { + "comment": "update table with same column having reference to different tables, one with on update cascade other with on update set null - child table have further reference", + "query": "update u_tbl1 set col1 = 'foo'", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_tbl1 set col1 = 'foo'", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select col1, col1 from u_tbl1 where 1 != 1", + "Query": "select col1, col1 from u_tbl1 for update", + "Table": "u_tbl1" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "FkCascade", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select col2 from u_tbl2 where 1 != 1", + "Query": "select col2 from u_tbl2 where (col2) in ::fkc_vals for update", + "Table": "u_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals1", + "Cols": [ + 0 + ], + "Query": "update u_tbl3 set col3 = null where (col3) in ::fkc_vals1 and (u_tbl3.col3) not in (('foo'))", + "Table": "u_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl2 set col2 = 'foo' where (col2) in ::fkc_vals", + "Table": "u_tbl2" + } + ] + }, + { + "InputName": "CascadeChild-2", + "OperatorType": "FkCascade", + "BvName": "fkc_vals2", + "Cols": [ + 1 + ], + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select col9 from u_tbl9 where 1 != 1", + "Query": "select col9 from u_tbl9 where (col9) in ::fkc_vals2 and (u_tbl9.col9) not in (('foo')) for update", + "Table": "u_tbl9" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals3", + "Cols": [ + 0 + ], + "Query": "update u_tbl8 set col8 = null where (col8) in ::fkc_vals3", + "Table": "u_tbl8" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_tbl9 set col9 = null where (col9) in ::fkc_vals2 and (u_tbl9.col9) not in (('foo'))", + "Table": "u_tbl9" + } + ] + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_tbl1 set col1 = 'foo'", + "Table": "u_tbl1" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl1", + "unsharded_fk_allow.u_tbl2", + "unsharded_fk_allow.u_tbl3", + "unsharded_fk_allow.u_tbl8", + "unsharded_fk_allow.u_tbl9" + ] + } + }, + { + "comment": "update in a table with limit - disallowed", + "query": "update u_tbl2 set col2 = 'bar' limit 2", + "plan": "VT12001: unsupported: update with limit with foreign key constraints" + }, + { + "comment": "update in a table with non-literal value - set null fail due to child update where condition", + "query": "update u_tbl2 set m = 2, col2 = col1 + 'bar' where id = 1", + "plan": "VT12001: unsupported: update expression with non-literal values with foreign key constraints" + }, + { + "comment": "update in a table with non-literal value - with cascade fail as the cascade value is not known", + "query": "update u_tbl1 set m = 2, col1 = x + 'bar' where id = 1", + "plan": "VT12001: unsupported: update expression with non-literal values with foreign key constraints" + }, + { + "comment": "update in a table with set null, non-literal value on non-foreign key column - allowed", + "query": "update u_tbl2 set m = col1 + 'bar', col2 = 2 where id = 1", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_tbl2 set m = col1 + 'bar', col2 = 2 where id = 1", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select col2 from u_tbl2 where 1 != 1", + "Query": "select col2 from u_tbl2 where id = 1 for update", + "Table": "u_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Query": "update u_tbl3 set col3 = null where (col3) in ::fkc_vals and (u_tbl3.col3) not in ((2))", + "Table": "u_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_tbl2 set m = col1 + 'bar', col2 = 2 where id = 1", + "Table": "u_tbl2" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl2", + "unsharded_fk_allow.u_tbl3" + ] + } + }, + { + "comment": "update in a table with cascade, non-literal value on non-foreign key column - allowed", + "query": "update u_tbl1 set m = x + 'bar', col1 = 2 where id = 1", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_tbl1 set m = x + 'bar', col1 = 2 where id = 1", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select col1, col1 from u_tbl1 where 1 != 1", + "Query": "select col1, col1 from u_tbl1 where id = 1 for update", + "Table": "u_tbl1" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "FkCascade", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select col2 from u_tbl2 where 1 != 1", + "Query": "select col2 from u_tbl2 where (col2) in ::fkc_vals for update", + "Table": "u_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals1", + "Cols": [ + 0 + ], + "Query": "update u_tbl3 set col3 = null where (col3) in ::fkc_vals1 and (u_tbl3.col3) not in ((2))", + "Table": "u_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl2 set col2 = 2 where (col2) in ::fkc_vals", + "Table": "u_tbl2" + } + ] + }, + { + "InputName": "CascadeChild-2", + "OperatorType": "FkCascade", + "BvName": "fkc_vals2", + "Cols": [ + 1 + ], + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select col9 from u_tbl9 where 1 != 1", + "Query": "select col9 from u_tbl9 where (col9) in ::fkc_vals2 and (u_tbl9.col9) not in ((2)) for update", + "Table": "u_tbl9" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals3", + "Cols": [ + 0 + ], + "Query": "update u_tbl8 set col8 = null where (col8) in ::fkc_vals3", + "Table": "u_tbl8" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_tbl9 set col9 = null where (col9) in ::fkc_vals2 and (u_tbl9.col9) not in ((2))", + "Table": "u_tbl9" + } + ] + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_tbl1 set m = x + 'bar', col1 = 2 where id = 1", + "Table": "u_tbl1" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl1", + "unsharded_fk_allow.u_tbl2", + "unsharded_fk_allow.u_tbl3", + "unsharded_fk_allow.u_tbl8", + "unsharded_fk_allow.u_tbl9" + ] + } + }, + { + "comment": "update in a table with a child table having SET DEFAULT constraint - disallowed", + "query": "update tbl20 set col2 = 'bar'", + "plan": "VT09016: Cannot delete or update a parent row: a foreign key constraint fails" + }, + { + "comment": "delete in a table with limit - disallowed", + "query": "delete from u_tbl2 limit 2", + "plan": "VT12001: unsupported: foreign keys management at vitess with limit" + }, + { + "comment": "update with fk on cross-shard with a where condition on non-literal value - disallowed", + "query": "update tbl3 set coly = colx + 10 where coly = 10", + "plan": "VT12001: unsupported: update expression with non-literal values with foreign key constraints" + }, + { + "comment": "update with fk on cross-shard with a where condition", + "query": "update tbl3 set coly = 20 where coly = 10", + "plan": { + "QueryType": "UPDATE", + "Original": "update tbl3 set coly = 20 where coly = 10", + "Instructions": { + "OperatorType": "FKVerify", + "Inputs": [ + { + "InputName": "VerifyParent-1", + "OperatorType": "Limit", + "Count": "INT64(1)", + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "INT64(1) as 1" + ], + "Inputs": [ + { + "OperatorType": "Filter", + "Predicate": "tbl1.t1col1 is null", + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "LeftJoin", + "JoinColumnIndexes": "R:0,R:0", + "TableName": "tbl3_tbl1", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "FieldQuery": "select 1 from tbl3 where 1 != 1", + "Query": "select 1 from tbl3 where tbl3.coly = 10 lock in share mode", + "Table": "tbl3" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "FieldQuery": "select tbl1.t1col1 from tbl1 where 1 != 1", + "Query": "select tbl1.t1col1 from tbl1 where tbl1.t1col1 = 20 lock in share mode", + "Table": "tbl1" + } + ] + } + ] + } + ] + } + ] + }, + { + "InputName": "PostVerify", + "OperatorType": "Update", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "update tbl3 set coly = 20 where tbl3.coly = 10", + "Table": "tbl3" + } + ] + }, + "TablesUsed": [ + "sharded_fk_allow.tbl1", + "sharded_fk_allow.tbl3" + ] + } + }, + { + "comment": "Update in a table with shard-scoped foreign keys with cascade that requires a validation of a different parent foreign key", + "query": "update u_tbl6 set col6 = 'foo'", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_tbl6 set col6 = 'foo'", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select col6 from u_tbl6 where 1 != 1", + "Query": "select col6 from u_tbl6 for update", + "Table": "u_tbl6" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "FKVerify", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Inputs": [ + { + "InputName": "VerifyParent-1", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select 1 from u_tbl8 left join u_tbl9 on u_tbl9.col9 = 'foo' where 1 != 1", + "Query": "select 1 from u_tbl8 left join u_tbl9 on u_tbl9.col9 = 'foo' where (u_tbl8.col8) in ::fkc_vals and u_tbl9.col9 is null limit 1 lock in share mode", + "Table": "u_tbl8, u_tbl9" + }, + { + "InputName": "PostVerify", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl8 set col8 = 'foo' where (u_tbl8.col8) in ::fkc_vals", + "Table": "u_tbl8" + } + ] + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_tbl6 set col6 = 'foo'", + "Table": "u_tbl6" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl6", + "unsharded_fk_allow.u_tbl8", + "unsharded_fk_allow.u_tbl9" + ] + } + }, + { + "comment": "Update that cascades and requires parent fk and restrict child fk verification", + "query": "update u_tbl7 set col7 = 'foo'", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_tbl7 set col7 = 'foo'", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select col7 from u_tbl7 where 1 != 1", + "Query": "select col7 from u_tbl7 for update", + "Table": "u_tbl7" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "FKVerify", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Inputs": [ + { + "InputName": "VerifyParent-1", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select 1 from u_tbl4 left join u_tbl3 on u_tbl3.col3 = 'foo' where 1 != 1", + "Query": "select 1 from u_tbl4 left join u_tbl3 on u_tbl3.col3 = 'foo' where (u_tbl4.col4) in ::fkc_vals and u_tbl3.col3 is null limit 1 lock in share mode", + "Table": "u_tbl3, u_tbl4" + }, + { + "InputName": "VerifyChild-2", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select 1 from u_tbl4, u_tbl9 where 1 != 1", + "Query": "select 1 from u_tbl4, u_tbl9 where (u_tbl4.col4) in ::fkc_vals and (u_tbl9.col9) not in (('foo')) and u_tbl4.col4 = u_tbl9.col9 limit 1 lock in share mode", + "Table": "u_tbl4, u_tbl9" + }, + { + "InputName": "PostVerify", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl4 set col4 = 'foo' where (u_tbl4.col4) in ::fkc_vals", + "Table": "u_tbl4" + } + ] + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_tbl7 set col7 = 'foo'", + "Table": "u_tbl7" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl3", + "unsharded_fk_allow.u_tbl4", + "unsharded_fk_allow.u_tbl7", + "unsharded_fk_allow.u_tbl9" + ] + } + }, + { + "comment": "Update that cascades and requires parent fk and restrict child fk verification - bindVariable", + "query": "update u_tbl7 set col7 = :v1", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_tbl7 set col7 = :v1", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select col7 from u_tbl7 where 1 != 1", + "Query": "select col7 from u_tbl7 for update", + "Table": "u_tbl7" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "FKVerify", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Inputs": [ + { + "InputName": "VerifyParent-1", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select 1 from u_tbl4 left join u_tbl3 on u_tbl3.col3 = :v1 where 1 != 1", + "Query": "select 1 from u_tbl4 left join u_tbl3 on u_tbl3.col3 = :v1 where (u_tbl4.col4) in ::fkc_vals and u_tbl3.col3 is null limit 1 lock in share mode", + "Table": "u_tbl3, u_tbl4" + }, + { + "InputName": "VerifyChild-2", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select 1 from u_tbl4, u_tbl9 where 1 != 1", + "Query": "select 1 from u_tbl4, u_tbl9 where (u_tbl4.col4) in ::fkc_vals and (:v1 is null or (u_tbl9.col9) not in ((:v1))) and u_tbl4.col4 = u_tbl9.col9 limit 1 lock in share mode", + "Table": "u_tbl4, u_tbl9" + }, + { + "InputName": "PostVerify", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl4 set col4 = :v1 where (u_tbl4.col4) in ::fkc_vals", + "Table": "u_tbl4" + } + ] + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_tbl7 set col7 = :v1", + "Table": "u_tbl7" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl3", + "unsharded_fk_allow.u_tbl4", + "unsharded_fk_allow.u_tbl7", + "unsharded_fk_allow.u_tbl9" + ] + } + }, + { + "comment": "Insert with on duplicate key update - foreign keys disallowed", + "query": "insert into u_tbl1 (id, col1) values (1, 3) on duplicate key update col1 = 5", + "plan": "VT12001: unsupported: ON DUPLICATE KEY UPDATE with foreign keys" + }, + { + "comment": "Insert with on duplicate key update - foreign keys not on update column - allowed", + "query": "insert into u_tbl1 (id, col1, foo) values (1, 3, 'bar') on duplicate key update foo = 'baz'", + "plan": { + "QueryType": "INSERT", + "Original": "insert into u_tbl1 (id, col1, foo) values (1, 3, 'bar') on duplicate key update foo = 'baz'", + "Instructions": { + "OperatorType": "Insert", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "insert into u_tbl1(id, col1, foo) values (1, 3, 'bar') on duplicate key update foo = 'baz'", + "TableName": "u_tbl1" + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl1" + ] + } + }, + { + "comment": "Insert with unsharded table having fk reference in sharded table", + "query": "insert into u_tbl (id, col) values (1, 2)", + "plan": "VT12002: unsupported: cross-shard foreign keys" + }, + { + "comment": "replace with fk reference unsupported", + "query": "replace into u_tbl1 (id, col1) values (1, 2)", + "plan": "VT12001: unsupported: REPLACE INTO with foreign keys" + }, + { + "comment": "update on a multicol foreign key that set nulls and then cascades", + "query": "update u_multicol_tbl1 set cola = 1, colb = 2 where id = 3", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_multicol_tbl1 set cola = 1, colb = 2 where id = 3", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select cola, colb from u_multicol_tbl1 where 1 != 1", + "Query": "select cola, colb from u_multicol_tbl1 where id = 3 for update", + "Table": "u_multicol_tbl1" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "FkCascade", + "BvName": "fkc_vals", + "Cols": [ + 0, + 1 + ], + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select cola, colb from u_multicol_tbl2 where 1 != 1", + "Query": "select cola, colb from u_multicol_tbl2 where (cola, colb) in ::fkc_vals and (u_multicol_tbl2.cola, u_multicol_tbl2.colb) not in ((1, 2)) for update", + "Table": "u_multicol_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals1", + "Cols": [ + 0, + 1 + ], + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_multicol_tbl3 set cola = null, colb = null where (cola, colb) in ::fkc_vals1", + "Table": "u_multicol_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_multicol_tbl2 set cola = null, colb = null where (cola, colb) in ::fkc_vals and (u_multicol_tbl2.cola, u_multicol_tbl2.colb) not in ((1, 2))", + "Table": "u_multicol_tbl2" + } + ] + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_multicol_tbl1 set cola = 1, colb = 2 where id = 3", + "Table": "u_multicol_tbl1" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_multicol_tbl1", + "unsharded_fk_allow.u_multicol_tbl2", + "unsharded_fk_allow.u_multicol_tbl3" + ] + } + }, + { + "comment": "update on a multicol foreign key that set nulls and then cascades - bindVariables", + "query": "update u_multicol_tbl1 set cola = :v1, colb = :v2 where id = :v3", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_multicol_tbl1 set cola = :v1, colb = :v2 where id = :v3", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select cola, colb from u_multicol_tbl1 where 1 != 1", + "Query": "select cola, colb from u_multicol_tbl1 where id = :v3 for update", + "Table": "u_multicol_tbl1" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "FkCascade", + "BvName": "fkc_vals", + "Cols": [ + 0, + 1 + ], + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select cola, colb from u_multicol_tbl2 where 1 != 1", + "Query": "select cola, colb from u_multicol_tbl2 where (cola, colb) in ::fkc_vals and (:v2 is null or (:v1 is null or (u_multicol_tbl2.cola, u_multicol_tbl2.colb) not in ((:v1, :v2)))) for update", + "Table": "u_multicol_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals1", + "Cols": [ + 0, + 1 + ], + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_multicol_tbl3 set cola = null, colb = null where (cola, colb) in ::fkc_vals1", + "Table": "u_multicol_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_multicol_tbl2 set cola = null, colb = null where (cola, colb) in ::fkc_vals and (:v2 is null or (:v1 is null or (u_multicol_tbl2.cola, u_multicol_tbl2.colb) not in ((:v1, :v2))))", + "Table": "u_multicol_tbl2" + } + ] + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_multicol_tbl1 set cola = :v1, colb = :v2 where id = :v3", + "Table": "u_multicol_tbl1" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_multicol_tbl1", + "unsharded_fk_allow.u_multicol_tbl2", + "unsharded_fk_allow.u_multicol_tbl3" + ] + } + }, + { + "comment": "Cascaded delete run from prepared statement", + "query": "execute prep_delete using @foo", + "plan": { + "QueryType": "EXECUTE", + "Original": "execute prep_delete using @foo", + "Instructions": { + "OperatorType": "EXECUTE", + "Parameters": [ + "foo" + ], + "Inputs": [ + { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "FieldQuery": "select col5, t5col5 from tbl5 where 1 != 1", + "Query": "select col5, t5col5 from tbl5 where id = :v1 for update", + "Table": "tbl5" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Delete", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Query": "delete from tbl4 where (col4) in ::fkc_vals", + "Table": "tbl4" + }, + { + "InputName": "CascadeChild-2", + "OperatorType": "Delete", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals1", + "Cols": [ + 1 + ], + "Query": "delete from tbl4 where (t4col4) in ::fkc_vals1", + "Table": "tbl4" + }, + { + "InputName": "Parent", + "OperatorType": "Delete", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "delete from tbl5 where id = :v1", + "Table": "tbl5" + } + ] + } + ] + }, + "TablesUsed": [ + "sharded_fk_allow.tbl4", + "sharded_fk_allow.tbl5" + ] + } + } +] diff --git a/go/vt/vtgate/planbuilder/testdata/from_cases.json b/go/vt/vtgate/planbuilder/testdata/from_cases.json index c406ac98e82..94a37427d69 100644 --- a/go/vt/vtgate/planbuilder/testdata/from_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/from_cases.json @@ -2,22 +2,7 @@ { "comment": "Single table sharded scatter", "query": "select col from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user`", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user", "Instructions": { @@ -39,22 +24,7 @@ { "comment": "Single table unsharded", "query": "select col from unsharded", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from unsharded", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select col from unsharded where 1 != 1", - "Query": "select col from unsharded", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from unsharded", "Instructions": { @@ -76,22 +46,7 @@ { "comment": "Select from sequence", "query": "select next 2 values from seq", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select next 2 values from seq", - "Instructions": { - "OperatorType": "Route", - "Variant": "Next", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select next 2 values from seq where 1 != 1", - "Query": "select next 2 values from seq", - "Table": "seq" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select next 2 values from seq", "Instructions": { @@ -113,154 +68,32 @@ { "comment": "select next from non-sequence table", "query": "select next value from user", - "v3-plan": "VT03018: NEXT used on a non-sequence table", - "gen4-plan": "NEXT used on a non-sequence table `user`" + "plan": "NEXT used on a non-sequence table `user`" }, { "comment": "select next in derived table", "query": "select 1 from (select next value from seq) t", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select 1 from (select next value from seq) t", - "Instructions": { - "OperatorType": "Route", - "Variant": "Next", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from (select next 1 values from seq where 1 != 1) as t where 1 != 1", - "Query": "select 1 from (select next 1 values from seq) as t", - "Table": "seq" - } - }, - "gen4-plan": "Incorrect usage/placement of 'NEXT'" + "plan": "Incorrect usage/placement of 'NEXT'" }, { "comment": "select next in derived table", "query": "select * from (select next value from seq) t", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from (select next value from seq) t", - "Instructions": { - "OperatorType": "Route", - "Variant": "Next", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from (select next 1 values from seq where 1 != 1) as t where 1 != 1", - "Query": "select * from (select next 1 values from seq) as t", - "Table": "seq" - } - }, - "gen4-plan": "Incorrect usage/placement of 'NEXT'" + "plan": "Incorrect usage/placement of 'NEXT'" }, { "comment": "select next in subquery", "query": "select 1 from user where id in (select next value from seq)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select 1 from user where id in (select next value from seq)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Next", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select next 1 values from seq where 1 != 1", - "Query": "select next 1 values from seq", - "Table": "seq" - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user` where :__sq_has_values1 = 1 and id in ::__vals", - "Table": "`user`", - "Values": [ - "::__sq1" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": "Incorrect usage/placement of 'NEXT'" + "plan": "Incorrect usage/placement of 'NEXT'" }, { "comment": "select next in projection", "query": "select (select next value from seq) from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select (select next value from seq) from user", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutValue", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Next", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select next 1 values from seq where 1 != 1", - "Query": "select next 1 values from seq", - "Table": "seq" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select :__sq1 from `user` where 1 != 1", - "Query": "select :__sq1 from `user`", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": "Incorrect usage/placement of 'NEXT'" + "plan": "Incorrect usage/placement of 'NEXT'" }, { "comment": "Select from reference", "query": "select * from ref", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from ref", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from ref where 1 != 1", - "Query": "select * from ref", - "Table": "ref" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from ref", "Instructions": { @@ -282,22 +115,7 @@ { "comment": "Multi-table unsharded", "query": "select m1.col from unsharded as m1 join unsharded as m2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select m1.col from unsharded as m1 join unsharded as m2", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select m1.col from unsharded as m1 join unsharded as m2 where 1 != 1", - "Query": "select m1.col from unsharded as m1 join unsharded as m2", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select m1.col from unsharded as m1 join unsharded as m2", "Instructions": { @@ -319,41 +137,7 @@ { "comment": "Multi-table, multi-chunk", "query": "select music.col from user join music", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select music.col from user join music", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "TableName": "`user`_music", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.col from music where 1 != 1", - "Query": "select music.col from music", - "Table": "music" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select music.col from user join music", "Instructions": { @@ -395,22 +179,7 @@ { "comment": "routing rules where table name matches, and there's no alias.", "query": "select * from second_user.user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from second_user.user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user`", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from second_user.user", "Instructions": { @@ -432,22 +201,7 @@ { "comment": "routing rules where table name matches, and there's an alias.", "query": "select * from second_user.user as a", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from second_user.user as a", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` as a where 1 != 1", - "Query": "select * from `user` as a", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from second_user.user as a", "Instructions": { @@ -469,22 +223,7 @@ { "comment": "routing rules where table name does not match, and there's no alias.", "query": "select * from route1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from route1", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` as route1 where 1 != 1", - "Query": "select * from `user` as route1", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from route1", "Instructions": { @@ -506,22 +245,7 @@ { "comment": "routing rules where table name does not match, and there's an alias.", "query": "select * from route1 as a", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from route1 as a", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` as a where 1 != 1", - "Query": "select * from `user` as a", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from route1 as a", "Instructions": { @@ -543,22 +267,7 @@ { "comment": "routing rules with primary targeting", "query": "select * from primary_redirect", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from primary_redirect", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` as primary_redirect where 1 != 1", - "Query": "select * from `user` as primary_redirect", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from primary_redirect", "Instructions": { @@ -590,22 +299,7 @@ { "comment": "select second_user.foo.col from second_user.foo join user on second_user.foo.id = user.id where second_user.foo.col = 42", "query": "select second_user.foo.col from second_user.foo join user on second_user.foo.id = user.id where second_user.foo.col = 42", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select second_user.foo.col from second_user.foo join user on second_user.foo.id = user.id where second_user.foo.col = 42", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select foo.col from `user` as foo join `user` on foo.id = `user`.id where 1 != 1", - "Query": "select foo.col from `user` as foo join `user` on foo.id = `user`.id where foo.col = 42", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select second_user.foo.col from second_user.foo join user on second_user.foo.id = user.id where second_user.foo.col = 42", "Instructions": { @@ -627,7 +321,7 @@ { "comment": "select user.music.foo from user.music join user on user.music.id = user.id where user.music.col = 42", "query": "select user.music.foo from user.music join user on user.music.id = user.id where user.music.col = 42", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.music.foo from user.music join user on user.music.id = user.id where user.music.col = 42", "Instructions": { @@ -666,92 +360,17 @@ "Vindex": "user_index" } ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select user.music.foo from user.music join user on user.music.id = user.id where user.music.col = 42", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "JoinVars": { - "music_id": 1 - }, - "TableName": "music_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.foo, music.id from music where 1 != 1", - "Query": "select music.foo, music.id from music where music.col = 42", - "Table": "music" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user` where `user`.id = :music_id", - "Table": "`user`", - "Values": [ - ":music_id" - ], - "Vindex": "user_index" - } - ] - }, - "TablesUsed": [ - "user.music", - "user.user" - ] - } - }, - { - "comment": "',' join", - "query": "select music.col from user, music", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select music.col from user, music", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "TableName": "`user`_music", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.col from music where 1 != 1", - "Query": "select music.col from music", - "Table": "music" - } - ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } + }, + { + "comment": "',' join", + "query": "select music.col from user, music", + "plan": { "QueryType": "SELECT", "Original": "select music.col from user, music", "Instructions": { @@ -793,22 +412,7 @@ { "comment": "',' join unsharded", "query": "select u1.a, u2.a from unsharded u1, unsharded u2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u1.a, u2.a from unsharded u1, unsharded u2", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select u1.a, u2.a from unsharded as u1, unsharded as u2 where 1 != 1", - "Query": "select u1.a, u2.a from unsharded as u1, unsharded as u2", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u1.a, u2.a from unsharded u1, unsharded u2", "Instructions": { @@ -830,22 +434,7 @@ { "comment": "',' 3-way join unsharded", "query": "select u1.a, u2.a from unsharded u1, unsharded u2, unsharded u3", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u1.a, u2.a from unsharded u1, unsharded u2, unsharded u3", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select u1.a, u2.a from unsharded as u1, unsharded as u2, unsharded as u3 where 1 != 1", - "Query": "select u1.a, u2.a from unsharded as u1, unsharded as u2, unsharded as u3", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u1.a, u2.a from unsharded u1, unsharded u2, unsharded u3", "Instructions": { @@ -1110,24 +699,9 @@ } }, { - "comment": "Straight-join (Gen4 ignores the straight_join hint)", + "comment": "Straight-join (ignores the straight_join hint)", "query": "select m1.col from unsharded as m1 straight_join unsharded as m2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select m1.col from unsharded as m1 straight_join unsharded as m2", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select m1.col from unsharded as m1 straight_join unsharded as m2 where 1 != 1", - "Query": "select m1.col from unsharded as m1 straight_join unsharded as m2", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select m1.col from unsharded as m1 straight_join unsharded as m2", "Instructions": { @@ -1149,60 +723,7 @@ { "comment": "Three-way join", "query": "select user.col from user join unsharded as m1 join unsharded as m2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join unsharded as m1 join unsharded as m2", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_unsharded_unsharded", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_unsharded", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from unsharded as m1 where 1 != 1", - "Query": "select 1 from unsharded as m1", - "Table": "unsharded" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from unsharded as m2 where 1 != 1", - "Query": "select 1 from unsharded as m2", - "Table": "unsharded" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join unsharded as m1 join unsharded as m2", "Instructions": { @@ -1244,41 +765,7 @@ { "comment": "Parenthesized, single chunk", "query": "select user.col from user join (unsharded as m1 join unsharded as m2)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join (unsharded as m1 join unsharded as m2)", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_unsharded", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from (unsharded as m1 join unsharded as m2) where 1 != 1", - "Query": "select 1 from (unsharded as m1 join unsharded as m2)", - "Table": "unsharded" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join (unsharded as m1 join unsharded as m2)", "Instructions": { @@ -1320,59 +807,7 @@ { "comment": "Parenthesized, multi-chunk", "query": "select user.col from user join (user as u1 join unsharded)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join (user as u1 join unsharded)", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_`user`_unsharded", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Join", - "Variant": "Join", - "TableName": "`user`_unsharded", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` as u1 where 1 != 1", - "Query": "select 1 from `user` as u1", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from unsharded where 1 != 1", - "Query": "select 1 from unsharded", - "Table": "unsharded" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join (user as u1 join unsharded)", "Instructions": { @@ -1433,22 +868,7 @@ { "comment": "index hints, make sure they are not stripped.", "query": "select user.col from user use index(a)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user use index(a)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` use index (a) where 1 != 1", - "Query": "select `user`.col from `user` use index (a)", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user use index(a)", "Instructions": { @@ -1470,22 +890,7 @@ { "comment": "multiple index hints, make sure they are not stripped.", "query": "select user.col from user use index(a) use index for group by (b)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user use index(a) use index for group by (b)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` use index (a) use index for group by (b) where 1 != 1", - "Query": "select `user`.col from `user` use index (a) use index for group by (b)", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user use index(a) use index for group by (b)", "Instructions": { @@ -1507,22 +912,7 @@ { "comment": "mergeable sharded join on unique vindex", "query": "select user.col from user join user_extra on user.id = user_extra.user_id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join user_extra on user.id = user_extra.user_id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1", - "Query": "select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id", - "Table": "`user`, user_extra" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join user_extra on user.id = user_extra.user_id", "Instructions": { @@ -1545,22 +935,7 @@ { "comment": "mergeable sharded join on unique vindex (parenthesized ON clause)", "query": "select user.col from user join user_extra on (user.id = user_extra.user_id)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join user_extra on (user.id = user_extra.user_id)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1", - "Query": "select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id", - "Table": "`user`, user_extra" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join user_extra on (user.id = user_extra.user_id)", "Instructions": { @@ -1583,22 +958,7 @@ { "comment": "mergeable sharded join on unique vindex, with a stray condition", "query": "select user.col from user join user_extra on user.col between 1 and 2 and user.id = user_extra.user_id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join user_extra on user.col between 1 and 2 and user.id = user_extra.user_id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` join user_extra on `user`.col between 1 and 2 and `user`.id = user_extra.user_id where 1 != 1", - "Query": "select `user`.col from `user` join user_extra on `user`.col between 1 and 2 and `user`.id = user_extra.user_id", - "Table": "`user`, user_extra" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join user_extra on user.col between 1 and 2 and user.id = user_extra.user_id", "Instructions": { @@ -1621,22 +981,7 @@ { "comment": "mergeable sharded join on unique vindex, swapped operands", "query": "select user.col from user join user_extra on user_extra.user_id = user.id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join user_extra on user_extra.user_id = user.id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` join user_extra on user_extra.user_id = `user`.id where 1 != 1", - "Query": "select `user`.col from `user` join user_extra on user_extra.user_id = `user`.id", - "Table": "`user`, user_extra" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join user_extra on user_extra.user_id = user.id", "Instructions": { @@ -1659,26 +1004,7 @@ { "comment": "mergeable sharded join on unique vindex, and condition", "query": "select user.col from user join user_extra on user.id = 5 and user.id = user_extra.user_id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join user_extra on user.id = 5 and user.id = user_extra.user_id", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` join user_extra on `user`.id = 5 and `user`.id = user_extra.user_id where 1 != 1", - "Query": "select `user`.col from `user` join user_extra on `user`.id = 5 and `user`.id = user_extra.user_id", - "Table": "`user`, user_extra", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join user_extra on user.id = 5 and user.id = user_extra.user_id", "Instructions": { @@ -1705,44 +1031,7 @@ { "comment": "sharded join on unique vindex, inequality", "query": "select user.col from user join user_extra on user.id < user_extra.user_id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join user_extra on user.id < user_extra.user_id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "JoinVars": { - "user_id": 1 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1", - "Query": "select `user`.col, `user`.id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra where :user_id < user_extra.user_id", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join user_extra on user.id < user_extra.user_id", "Instructions": { @@ -1787,45 +1076,7 @@ { "comment": "sharded join, non-col reference RHS", "query": "select user.col from user join user_extra on user.id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join user_extra on user.id = 5", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user` where `user`.id = 5", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join user_extra on user.id = 5", "Instructions": { @@ -1871,45 +1122,7 @@ { "comment": "sharded join, non-col reference LHS", "query": "select user.col from user join user_extra on 5 = user.id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join user_extra on 5 = user.id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user` where `user`.id = 5", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join user_extra on 5 = user.id", "Instructions": { @@ -1955,44 +1168,7 @@ { "comment": "sharded join, non-vindex col", "query": "select user.col from user join user_extra on user.id = user_extra.col", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join user_extra on user.id = user_extra.col", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "JoinVars": { - "user_id": 1 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1", - "Query": "select `user`.col, `user`.id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra where user_extra.col = :user_id", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join user_extra on user.id = user_extra.col", "Instructions": { @@ -2041,48 +1217,7 @@ { "comment": "sharded join, non-unique vindex", "query": "select user.col from user_extra join user on user_extra.user_id = user.name", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user_extra join user on user_extra.user_id = user.name", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "JoinVars": { - "user_extra_user_id": 0 - }, - "TableName": "user_extra_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.user_id from user_extra where 1 != 1", - "Query": "select user_extra.user_id from user_extra", - "Table": "user_extra" - }, - { - "OperatorType": "Route", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user` where `user`.`name` = :user_extra_user_id", - "Table": "`user`", - "Values": [ - ":user_extra_user_id" - ], - "Vindex": "name_user_map" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user_extra join user on user_extra.user_id = user.name", "Instructions": { @@ -2131,22 +1266,7 @@ { "comment": "join with reference table", "query": "select user.col from user join ref", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join ref", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` join ref where 1 != 1", - "Query": "select `user`.col from `user` join ref", - "Table": "`user`, ref" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join ref", "Instructions": { @@ -2169,22 +1289,7 @@ { "comment": "reference table self-join", "query": "select r1.col from ref r1 join ref", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select r1.col from ref r1 join ref", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select r1.col from ref as r1 join ref where 1 != 1", - "Query": "select r1.col from ref as r1 join ref", - "Table": "ref" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select r1.col from ref r1 join ref", "Instructions": { @@ -2206,22 +1311,7 @@ { "comment": "reference table can merge with other opcodes left to right.", "query": "select ref.col from ref join user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select ref.col from ref join user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select ref.col from ref join `user` where 1 != 1", - "Query": "select ref.col from ref join `user`", - "Table": "`user`, ref" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select ref.col from ref join user", "Instructions": { @@ -2244,26 +1334,7 @@ { "comment": "reference table can merge with other opcodes left to right and vindex value is in the plan.\n# This tests that route.Merge also copies the condition to the LHS.", "query": "select ref.col from ref join (select aa from user where user.id=1) user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select ref.col from ref join (select aa from user where user.id=1) user", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select ref.col from ref join (select aa from `user` where 1 != 1) as `user` where 1 != 1", - "Query": "select ref.col from ref join (select aa from `user` where `user`.id = 1) as `user`", - "Table": "`user`, ref", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select ref.col from ref join (select aa from user where user.id=1) user", "Instructions": { @@ -2273,8 +1344,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select ref.col from ref, (select aa from `user` where 1 != 1) as `user` where 1 != 1", - "Query": "select ref.col from ref, (select aa from `user` where `user`.id = 1) as `user`", + "FieldQuery": "select ref.col from (select aa from `user` where 1 != 1) as `user`, ref where 1 != 1", + "Query": "select ref.col from (select aa from `user` where `user`.id = 1) as `user`, ref", "Table": "`user`, ref", "Values": [ "INT64(1)" @@ -2290,41 +1361,7 @@ { "comment": "routing rules for join, unsharded route wins if we can't find a merged route", "query": "select route2.col from route2 join user_extra", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select route2.col from route2 join user_extra", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "unsharded_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select route2.col from unsharded as route2 where 1 != 1", - "Query": "select route2.col from unsharded as route2", - "Table": "unsharded" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select route2.col from route2 join user_extra", "Instructions": { @@ -2366,26 +1403,7 @@ { "comment": "derived table", "query": "select id from (select id, col from user where id = 5) as t", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from (select id, col from user where id = 5) as t", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from (select id, col from `user` where 1 != 1) as t where 1 != 1", - "Query": "select id from (select id, col from `user` where id = 5) as t", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from (select id, col from user where id = 5) as t", "Instructions": { @@ -2411,26 +1429,7 @@ { "comment": "derived table with join", "query": "select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.user_id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.user_id", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select t.id from (select id from `user` where 1 != 1) as t join user_extra on t.id = user_extra.user_id where 1 != 1", - "Query": "select t.id from (select id from `user` where id = 5) as t join user_extra on t.id = user_extra.user_id", - "Table": "`user`, user_extra", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.user_id", "Instructions": { @@ -2457,26 +1456,7 @@ { "comment": "derived table with join, and aliased references", "query": "select t.id from (select user.id from user where user.id = 5) as t join user_extra on t.id = user_extra.user_id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select t.id from (select user.id from user where user.id = 5) as t join user_extra on t.id = user_extra.user_id", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select t.id from (select `user`.id from `user` where 1 != 1) as t join user_extra on t.id = user_extra.user_id where 1 != 1", - "Query": "select t.id from (select `user`.id from `user` where `user`.id = 5) as t join user_extra on t.id = user_extra.user_id", - "Table": "`user`, user_extra", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select t.id from (select user.id from user where user.id = 5) as t join user_extra on t.id = user_extra.user_id", "Instructions": { @@ -2503,28 +1483,12 @@ { "comment": "derived table with join, duplicate columns", "query": "select t.id from (select user.id, id from user where user.id = 5) as t join user_extra on t.id = user_extra.user_id", - "v3-plan": "VT12001: unsupported: duplicate column aliases: id", - "gen4-plan": "Duplicate column name 'id'" + "plan": "Duplicate column name 'id'" }, { "comment": "derived table in RHS of join", "query": "select t.id from user_extra join (select id from user where id = 5) as t on t.id = user_extra.user_id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select t.id from user_extra join (select id from user where id = 5) as t on t.id = user_extra.user_id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select t.id from user_extra join (select id from `user` where 1 != 1) as t on t.id = user_extra.user_id where 1 != 1", - "Query": "select t.id from user_extra join (select id from `user` where id = 5) as t on t.id = user_extra.user_id", - "Table": "user_extra, `user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select t.id from user_extra join (select id from user where id = 5) as t on t.id = user_extra.user_id", "Instructions": { @@ -2534,8 +1498,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select t.id from user_extra, (select id from `user` where 1 != 1) as t where 1 != 1", - "Query": "select t.id from user_extra, (select id from `user` where id = 5) as t where t.id = user_extra.user_id", + "FieldQuery": "select t.id from (select id from `user` where 1 != 1) as t, user_extra where 1 != 1", + "Query": "select t.id from (select id from `user` where id = 5) as t, user_extra where t.id = user_extra.user_id", "Table": "`user`, user_extra", "Values": [ "INT64(5)" @@ -2551,48 +1515,7 @@ { "comment": "derived table in FROM with cross-shard join", "query": "select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.col", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.col", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "JoinVars": { - "t_id": 0 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select t.id from (select id from `user` where 1 != 1) as t where 1 != 1", - "Query": "select t.id from (select id from `user` where id = 5) as t", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra where user_extra.col = :t_id", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.col", "Instructions": { @@ -2641,26 +1564,7 @@ { "comment": "routing rules for derived table", "query": "select id from (select id, col from route1 where id = 5) as t", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from (select id, col from route1 where id = 5) as t", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from (select id, col from `user` as route1 where 1 != 1) as t where 1 != 1", - "Query": "select id from (select id, col from `user` as route1 where id = 5) as t", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from (select id, col from route1 where id = 5) as t", "Instructions": { @@ -2686,46 +1590,12 @@ { "comment": "derived table missing columns", "query": "select t.id from (select id from user) as t join user_extra on t.id = user_extra.user_id where t.col = 42", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select t.id from (select id from user) as t join user_extra on t.id = user_extra.user_id where t.col = 42", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select t.id from (select id from `user` where 1 != 1) as t join user_extra on t.id = user_extra.user_id where 1 != 1", - "Query": "select t.id from (select id from `user`) as t join user_extra on t.id = user_extra.user_id where t.col = 42", - "Table": "`user`, user_extra" - } - }, - "gen4-plan": "column 't.col' not found" + "plan": "column 't.col' not found" }, { "comment": "routing rules for derived table where the constraint is in the outer query", "query": "select id from (select id, col from route1) as t where id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from (select id, col from route1) as t where id = 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from (select id, col from `user` as route1 where 1 != 1) as t where 1 != 1", - "Query": "select id from (select id, col from `user` as route1) as t where id = 5", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from (select id, col from route1) as t where id = 5", "Instructions": { @@ -2751,42 +1621,12 @@ { "comment": "routing rules for derived table where the constraint is in the outer query", "query": "select id from (select id+col as foo from route1) as t where foo = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from (select id+col as foo from route1) as t where foo = 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from (select id + col as foo from `user` as route1 where 1 != 1) as t where 1 != 1", - "Query": "select id from (select id + col as foo from `user` as route1) as t where foo = 5", - "Table": "`user`" - } - }, - "gen4-plan": "column 'id' not found in table 't'" + "plan": "column 'id' not found in table 't'" }, { "comment": "push predicate on joined derived tables", "query": "select t.id from (select id, textcol1 as baz from route1) as t join (select id, textcol1+textcol1 as baz from user) as s ON t.id = s.id WHERE t.baz = '3' AND s.baz = '3'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select t.id from (select id, textcol1 as baz from route1) as t join (select id, textcol1+textcol1 as baz from user) as s ON t.id = s.id WHERE t.baz = '3' AND s.baz = '3'", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select t.id from (select id, textcol1 as baz from `user` as route1 where 1 != 1) as t join (select id, textcol1 + textcol1 as baz from `user` where 1 != 1) as s on t.id = s.id where 1 != 1", - "Query": "select t.id from (select id, textcol1 as baz from `user` as route1) as t join (select id, textcol1 + textcol1 as baz from `user`) as s on t.id = s.id where t.baz = '3' and s.baz = '3'", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select t.id from (select id, textcol1 as baz from route1) as t join (select id, textcol1+textcol1 as baz from user) as s ON t.id = s.id WHERE t.baz = '3' AND s.baz = '3'", "Instructions": { @@ -2808,22 +1648,7 @@ { "comment": "recursive derived table predicate push down", "query": "select bar from (select foo+4 as bar from (select colA+colB as foo from user) as u) as t where bar = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select bar from (select foo+4 as bar from (select colA+colB as foo from user) as u) as t where bar = 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select bar from (select foo + 4 as bar from (select colA + colB as foo from `user` where 1 != 1) as u where 1 != 1) as t where 1 != 1", - "Query": "select bar from (select foo + 4 as bar from (select colA + colB as foo from `user`) as u) as t where bar = 5", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select bar from (select foo+4 as bar from (select colA+colB as foo from user) as u) as t where bar = 5", "Instructions": { @@ -2845,26 +1670,7 @@ { "comment": "recursive derived table lookups", "query": "select id from (select id from (select id from user) as u) as t where id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from (select id from (select id from user) as u) as t where id = 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from (select id from (select id from `user` where 1 != 1) as u where 1 != 1) as t where 1 != 1", - "Query": "select id from (select id from (select id from `user`) as u) as t where id = 5", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from (select id from (select id from user) as u) as t where id = 5", "Instructions": { @@ -2890,26 +1696,7 @@ { "comment": "merge derived tables with single-shard routes", "query": "select u.col, e.col from (select col from user where id = 5) as u join (select col from user_extra where user_id = 5) as e", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u.col, e.col from (select col from user where id = 5) as u join (select col from user_extra where user_id = 5) as e", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.col, e.col from (select col from `user` where 1 != 1) as u join (select col from user_extra where 1 != 1) as e where 1 != 1", - "Query": "select u.col, e.col from (select col from `user` where id = 5) as u join (select col from user_extra where user_id = 5) as e", - "Table": "`user`, user_extra", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.col, e.col from (select col from user where id = 5) as u join (select col from user_extra where user_id = 5) as e", "Instructions": { @@ -2936,41 +1723,7 @@ { "comment": "join of information_schema with normal table", "query": "select unsharded.foo from information_schema.CHARACTER_SETS join unsharded", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select unsharded.foo from information_schema.CHARACTER_SETS join unsharded", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "TableName": "information_schema.CHARACTER_SETS_unsharded", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from information_schema.CHARACTER_SETS where 1 != 1", - "Query": "select 1 from information_schema.CHARACTER_SETS", - "Table": "information_schema.CHARACTER_SETS" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded.foo from unsharded where 1 != 1", - "Query": "select unsharded.foo from unsharded", - "Table": "unsharded" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select unsharded.foo from information_schema.CHARACTER_SETS join unsharded", "Instructions": { @@ -3011,41 +1764,7 @@ { "comment": "join of normal table with information_schema", "query": "select unsharded.foo from unsharded join information_schema.CHARACTER_SETS", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select unsharded.foo from unsharded join information_schema.CHARACTER_SETS", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "unsharded_information_schema.CHARACTER_SETS", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded.foo from unsharded where 1 != 1", - "Query": "select unsharded.foo from unsharded", - "Table": "unsharded" - }, - { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from information_schema.CHARACTER_SETS where 1 != 1", - "Query": "select 1 from information_schema.CHARACTER_SETS", - "Table": "information_schema.CHARACTER_SETS" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select unsharded.foo from unsharded join information_schema.CHARACTER_SETS", "Instructions": { @@ -3086,73 +1805,7 @@ { "comment": "wire-up on join with cross-shard derived table", "query": "select t.col1 from (select user.id, user.col1 from user join user_extra) as t join unsharded on unsharded.col1 = t.col1 and unsharded.id = t.id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select t.col1 from (select user.id, user.col1 from user join user_extra) as t join unsharded on unsharded.col1 = t.col1 and unsharded.id = t.id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "JoinVars": { - "t_col1": 0, - "t_id": 1 - }, - "TableName": "`user`_user_extra_unsharded", - "Inputs": [ - { - "OperatorType": "SimpleProjection", - "Columns": [ - 1, - 0 - ], - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, `user`.col1 from `user` where 1 != 1", - "Query": "select `user`.id, `user`.col1 from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from unsharded where 1 != 1", - "Query": "select 1 from unsharded where unsharded.col1 = :t_col1 and unsharded.id = :t_id", - "Table": "unsharded" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select t.col1 from (select user.id, user.col1 from user join user_extra) as t join unsharded on unsharded.col1 = t.col1 and unsharded.id = t.id", "Instructions": { @@ -3218,52 +1871,7 @@ { "comment": "wire-up on within cross-shard derived table", "query": "select t.id from (select user.id, user.col1 from user join user_extra on user_extra.col = user.col) as t", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select t.id from (select user.id, user.col1 from user join user_extra on user_extra.col = user.col) as t", - "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1", - "JoinVars": { - "user_col": 2 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, `user`.col1, `user`.col from `user` where 1 != 1", - "Query": "select `user`.id, `user`.col1, `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra where user_extra.col = :user_col", - "Table": "user_extra" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select t.id from (select user.id, user.col1 from user join user_extra on user_extra.col = user.col) as t", "Instructions": { @@ -3308,68 +1916,7 @@ { "comment": "Join with cross-shard derived table on rhs", "query": "select t.col1 from unsharded_a ua join (select user.id, user.col1 from user join user_extra) as t", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select t.col1 from unsharded_a ua join (select user.id, user.col1 from user join user_extra) as t", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "TableName": "unsharded_a_`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from unsharded_a as ua where 1 != 1", - "Query": "select 1 from unsharded_a as ua", - "Table": "unsharded_a" - }, - { - "OperatorType": "SimpleProjection", - "Columns": [ - 1 - ], - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, `user`.col1 from `user` where 1 != 1", - "Query": "select `user`.id, `user`.col1 from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select t.col1 from unsharded_a ua join (select user.id, user.col1 from user join user_extra) as t", "Instructions": { @@ -3431,8 +1978,7 @@ { "comment": "Join with cross-shard derived table on rhs - push down join predicate to derived table", "query": "select t.col1 from unsharded_a ua join (select user.id, user.col1 from user join user_extra) as t on t.id = ua.id", - "v3-plan": "VT12001: unsupported: filtering on results of cross-shard subquery", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select t.col1 from unsharded_a ua join (select user.id, user.col1 from user join user_extra) as t on t.id = ua.id", "Instructions": { @@ -3501,18 +2047,18 @@ { "comment": "subquery in ON clause, single route", "query": "select unsharded_a.col from unsharded_a join unsharded_b on (select col from user)", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select unsharded_a.col from unsharded_a join unsharded_b on (select col from user)", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutValue", "PulloutVars": [ - "__sq_has_values1", "__sq1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -3524,30 +2070,41 @@ "Table": "`user`" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Unsharded", "Keyspace": { "Name": "main", "Sharded": false }, - "FieldQuery": "select unsharded_a.col from unsharded_a join unsharded_b on :__sq1 where 1 != 1", - "Query": "select unsharded_a.col from unsharded_a join unsharded_b on :__sq1", + "FieldQuery": "select unsharded_a.col from unsharded_a, unsharded_b where 1 != 1", + "Query": "select unsharded_a.col from unsharded_a, unsharded_b where :__sq1", "Table": "unsharded_a, unsharded_b" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "main.unsharded_a", + "main.unsharded_b", + "user.user" + ] + } + }, + { + "comment": "subquery in ON clause as sub-expression", + "query": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col+(select col from user)", + "plan": { "QueryType": "SELECT", - "Original": "select unsharded_a.col from unsharded_a join unsharded_b on (select col from user)", + "Original": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col+(select col from user)", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutValue", "PulloutVars": [ "__sq1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -3559,6 +2116,7 @@ "Table": "`user`" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Unsharded", "Keyspace": { @@ -3566,7 +2124,7 @@ "Sharded": false }, "FieldQuery": "select unsharded_a.col from unsharded_a, unsharded_b where 1 != 1", - "Query": "select unsharded_a.col from unsharded_a, unsharded_b where :__sq1", + "Query": "select unsharded_a.col from unsharded_a, unsharded_b where unsharded_a.col + :__sq1", "Table": "unsharded_a, unsharded_b" } ] @@ -3579,20 +2137,21 @@ } }, { - "comment": "subquery in ON clause as sub-expression", - "query": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col+(select col from user)", - "v3-plan": { + "comment": "IN subquery in ON clause, single route", + "query": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col in (select col from user)", + "plan": { "QueryType": "SELECT", - "Original": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col+(select col from user)", + "Original": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col in (select col from user)", "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutValue", + "OperatorType": "UncorrelatedSubquery", + "Variant": "PulloutIn", "PulloutVars": [ - "__sq_has_values1", + "__sq_has_values", "__sq1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -3604,130 +2163,15 @@ "Table": "`user`" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Unsharded", "Keyspace": { "Name": "main", "Sharded": false }, - "FieldQuery": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col + :__sq1 where 1 != 1", - "Query": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col + :__sq1", - "Table": "unsharded_a, unsharded_b" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col+(select col from user)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutValue", - "PulloutVars": [ - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded_a.col from unsharded_a, unsharded_b where 1 != 1", - "Query": "select unsharded_a.col from unsharded_a, unsharded_b where unsharded_a.col + :__sq1", - "Table": "unsharded_a, unsharded_b" - } - ] - }, - "TablesUsed": [ - "main.unsharded_a", - "main.unsharded_b", - "user.user" - ] - } - }, - { - "comment": "IN subquery in ON clause, single route", - "query": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col in (select col from user)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col in (select col from user)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded_a.col from unsharded_a join unsharded_b on :__sq_has_values1 = 1 and unsharded_a.col in ::__sq1 where 1 != 1", - "Query": "select unsharded_a.col from unsharded_a join unsharded_b on :__sq_has_values1 = 1 and unsharded_a.col in ::__sq1", - "Table": "unsharded_a, unsharded_b" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col in (select col from user)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded_a.col from unsharded_a, unsharded_b where 1 != 1", - "Query": "select unsharded_a.col from unsharded_a, unsharded_b where :__sq_has_values1 = 1 and unsharded_a.col in ::__sq1", + "FieldQuery": "select unsharded_a.col from unsharded_a, unsharded_b where 1 != 1", + "Query": "select unsharded_a.col from unsharded_a, unsharded_b where :__sq_has_values and unsharded_a.col in ::__sq1", "Table": "unsharded_a, unsharded_b" } ] @@ -3742,136 +2186,19 @@ { "comment": "subquery in ON clause, with join primitives", "query": "select unsharded.col from unsharded join user on user.col in (select col from user)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select unsharded.col from unsharded join user on user.col in (select col from user)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "unsharded_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded.col from unsharded where 1 != 1", - "Query": "select unsharded.col from unsharded", - "Table": "unsharded" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1", - "Table": "`user`" - } - ] - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select unsharded.col from unsharded join user on user.col in (select col from user)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "unsharded_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded.col from unsharded where 1 != 1", - "Query": "select unsharded.col from unsharded", - "Table": "unsharded" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1", - "Table": "`user`" - } - ] - } - ] - }, - "TablesUsed": [ - "main.unsharded", - "user.user" - ] - } - }, - { - "comment": "subquery in ON clause, with left join primitives\n# The subquery is not pulled all the way out.", - "query": "select unsharded.col from unsharded left join user on user.col in (select col from user)", "plan": { "QueryType": "SELECT", - "Original": "select unsharded.col from unsharded left join user on user.col in (select col from user)", + "Original": "select unsharded.col from unsharded join user on user.col in (select col from user)", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutIn", "PulloutVars": [ - "__sq_has_values1", + "__sq_has_values", "__sq1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -3883,32 +2210,40 @@ "Table": "`user`" }, { - "OperatorType": "Join", - "Variant": "LeftJoin", - "JoinColumnIndexes": "L:0", - "TableName": "unsharded_`user`", + "InputName": "Outer", + "OperatorType": "Filter", + "Predicate": ":__sq_has_values and `user`.col in ::__sq1", + "ResultColumns": 1, "Inputs": [ { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded.col from unsharded where 1 != 1", - "Query": "select unsharded.col from unsharded", - "Table": "unsharded" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1", - "Table": "`user`" + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0", + "TableName": "unsharded_`user`", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select unsharded.col from unsharded where 1 != 1", + "Query": "select unsharded.col from unsharded", + "Table": "unsharded" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.col from `user` where 1 != 1", + "Query": "select `user`.col from `user`", + "Table": "`user`" + } + ] } ] } @@ -3923,7 +2258,7 @@ { "comment": "subquery in ON clause, with join primitives, and join on top\n# The subquery is not pulled all the way out.", "query": "select unsharded.col from unsharded join user on user.col in (select col from user) join unsharded_a", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select unsharded.col from unsharded join user on user.col in (select col from user) join unsharded_a", "Instructions": { @@ -3933,14 +2268,15 @@ "TableName": "unsharded_`user`_unsharded_a", "Inputs": [ { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutIn", "PulloutVars": [ - "__sq_has_values1", + "__sq_has_values", "__sq1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -3952,32 +2288,39 @@ "Table": "`user`" }, { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "unsharded_`user`", + "InputName": "Outer", + "OperatorType": "Filter", + "Predicate": ":__sq_has_values and `user`.col in ::__sq1", "Inputs": [ { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded.col from unsharded where 1 != 1", - "Query": "select unsharded.col from unsharded", - "Table": "unsharded" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1", - "Table": "`user`" + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0", + "TableName": "unsharded_`user`", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select unsharded.col from unsharded where 1 != 1", + "Query": "select unsharded.col from unsharded", + "Table": "unsharded" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.col from `user` where 1 != 1", + "Query": "select `user`.col from `user`", + "Table": "`user`" + } + ] } ] } @@ -3995,61 +2338,6 @@ "Table": "unsharded_a" } ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select unsharded.col from unsharded join user on user.col in (select col from user) join unsharded_a", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "TableName": "`user`_unsharded, unsharded_a", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded.col from unsharded, unsharded_a where 1 != 1", - "Query": "select unsharded.col from unsharded, unsharded_a", - "Table": "unsharded, unsharded_a" - } - ] - } - ] }, "TablesUsed": [ "main.unsharded", @@ -4061,44 +2349,7 @@ { "comment": "keyspace-qualified queries", "query": "select user.user.col1, main.unsharded.col1 from user.user join main.unsharded where main.unsharded.col2 = user.user.col2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.user.col1, main.unsharded.col1 from user.user join main.unsharded where main.unsharded.col2 = user.user.col2", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "JoinVars": { - "user_col2": 1 - }, - "TableName": "`user`_unsharded", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col1, `user`.col2 from `user` where 1 != 1", - "Query": "select `user`.col1, `user`.col2 from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded.col1 from unsharded where 1 != 1", - "Query": "select unsharded.col1 from unsharded where unsharded.col2 = :user_col2", - "Table": "unsharded" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.user.col1, main.unsharded.col1 from user.user join main.unsharded where main.unsharded.col2 = user.user.col2", "Instructions": { @@ -4143,22 +2394,7 @@ { "comment": "implicit table reference for unsharded keyspace", "query": "select main.foo.col from main.foo", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select main.foo.col from main.foo", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select foo.col from foo where 1 != 1", - "Query": "select foo.col from foo", - "Table": "foo" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select main.foo.col from main.foo", "Instructions": { @@ -4180,22 +2416,7 @@ { "comment": "col refs should be case-insensitive", "query": "select user.col from user join user_extra on user.ID = user_extra.User_Id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join user_extra on user.ID = user_extra.User_Id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` join user_extra on `user`.ID = user_extra.User_Id where 1 != 1", - "Query": "select `user`.col from `user` join user_extra on `user`.ID = user_extra.User_Id", - "Table": "`user`, user_extra" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join user_extra on user.ID = user_extra.User_Id", "Instructions": { @@ -4218,7 +2439,7 @@ { "comment": "derived table with join primitive (FROM)", "query": "select id, t.id from (select user.id from user join user_extra) as t", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id, t.id from (select user.id from user join user_extra) as t", "Instructions": { @@ -4241,8 +2462,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select `user`.id from `user` where 1 != 1", - "Query": "select `user`.id from `user`", + "FieldQuery": "select id from (select `user`.id from `user` where 1 != 1) as t where 1 != 1", + "Query": "select id from (select `user`.id from `user`) as t", "Table": "`user`" }, { @@ -4259,40 +2480,6 @@ ] } ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select id, t.id from (select user.id from user join user_extra) as t", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from (select `user`.id from `user` where 1 != 1) as t where 1 != 1", - "Query": "select id from (select `user`.id from `user`) as t", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] }, "TablesUsed": [ "user.user", @@ -4303,22 +2490,7 @@ { "comment": "database call in ON clause.\n# The on clause is weird because the substitution must even for root expressions.", "query": "select u1.a from unsharded u1 join unsharded u2 on database()", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u1.a from unsharded u1 join unsharded u2 on database()", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select u1.a from unsharded as u1 join unsharded as u2 on database() where 1 != 1", - "Query": "select u1.a from unsharded as u1 join unsharded as u2 on database()", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u1.a from unsharded u1 join unsharded u2 on database()", "Instructions": { @@ -4340,22 +2512,7 @@ { "comment": "last_insert_id for dual", "query": "select last_insert_id()", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select last_insert_id()", - "Instructions": { - "OperatorType": "Projection", - "Expressions": [ - ":__lastInsertId as last_insert_id()" - ], - "Inputs": [ - { - "OperatorType": "SingleRow" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select last_insert_id()", "Instructions": { @@ -4377,22 +2534,7 @@ { "comment": "last_insert_id for sharded keyspace", "query": "select last_insert_id() from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select last_insert_id() from user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select :__lastInsertId as `last_insert_id()` from `user` where 1 != 1", - "Query": "select :__lastInsertId as `last_insert_id()` from `user`", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select last_insert_id() from user", "Instructions": { @@ -4414,22 +2556,7 @@ { "comment": "last_insert_id for unsharded route", "query": "select last_insert_id() from main.unsharded", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select last_insert_id() from main.unsharded", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select :__lastInsertId as `last_insert_id()` from unsharded where 1 != 1", - "Query": "select :__lastInsertId as `last_insert_id()` from unsharded", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select last_insert_id() from main.unsharded", "Instructions": { @@ -4451,48 +2578,7 @@ { "comment": "join with bindvariables", "query": "SELECT `user`.`id` FROM `user` INNER JOIN `user_extra` ON `user`.`id` = `user_extra`.`assembly_id` WHERE `user_extra`.`user_id` = 2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT `user`.`id` FROM `user` INNER JOIN `user_extra` ON `user`.`id` = `user_extra`.`assembly_id` WHERE `user_extra`.`user_id` = 2", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "JoinVars": { - "user_id": 0 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id from `user` where 1 != 1", - "Query": "select `user`.id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra where user_extra.assembly_id = :user_id and user_extra.user_id = 2", - "Table": "user_extra", - "Values": [ - "INT64(2)" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT `user`.`id` FROM `user` INNER JOIN `user_extra` ON `user`.`id` = `user_extra`.`assembly_id` WHERE `user_extra`.`user_id` = 2", "Instructions": { @@ -4545,8 +2631,7 @@ { "comment": "verify ',' vs JOIN precedence", "query": "select u1.a from unsharded u1, unsharded u2 join unsharded u3 on u1.a = u2.a", - "v3-plan": "VT03019: column u1.a not found", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u1.a from unsharded u1, unsharded u2 join unsharded u3 on u1.a = u2.a", "Instructions": { @@ -4573,8 +2658,7 @@ { "comment": "table names should be case-sensitive", "query": "select unsharded.id from unsharded where Unsharded.val = 1", - "v3-plan": "VT03019: column Unsharded.val not found", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select unsharded.id from unsharded where Unsharded.val = 1", "Instructions": { @@ -4626,22 +2710,7 @@ { "comment": "query with parens is planned correctly", "query": "select m1.col from (unsharded as m1, unsharded as m2)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select m1.col from (unsharded as m1, unsharded as m2)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select m1.col from (unsharded as m1, unsharded as m2) where 1 != 1", - "Query": "select m1.col from (unsharded as m1, unsharded as m2)", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select m1.col from (unsharded as m1, unsharded as m2)", "Instructions": { @@ -4661,72 +2730,9 @@ } }, { - "comment": "gen4 - optimise plan by merging user_extra and music first, and then querying for user info", + "comment": "optimise plan by merging user_extra and music first, and then querying for user info", "query": "select 1 from user u join user_extra ue on ue.id = u.id join music m on m.user_id = ue.user_id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select 1 from user u join user_extra ue on ue.id = u.id join music m on m.user_id = ue.user_id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "JoinVars": { - "ue_user_id": 1 - }, - "TableName": "`user`_user_extra_music", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "JoinVars": { - "u_id": 1 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1, u.id from `user` as u where 1 != 1", - "Query": "select 1, u.id from `user` as u", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select ue.user_id from user_extra as ue where 1 != 1", - "Query": "select ue.user_id from user_extra as ue where ue.id = :u_id", - "Table": "user_extra" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from music as m where 1 != 1", - "Query": "select 1 from music as m where m.user_id = :ue_user_id", - "Table": "music", - "Values": [ - ":ue_user_id" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 1 from user u join user_extra ue on ue.id = u.id join music m on m.user_id = ue.user_id", "Instructions": { @@ -4776,44 +2782,7 @@ { "comment": "join column selected as alias", "query": "SELECT u.id as uid, ue.id as ueid FROM user u join user_extra ue where u.id = ue.id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT u.id as uid, ue.id as ueid FROM user u join user_extra ue where u.id = ue.id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "JoinVars": { - "u_id": 0 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.id as uid from `user` as u where 1 != 1", - "Query": "select u.id as uid from `user` as u", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select ue.id as ueid from user_extra as ue where 1 != 1", - "Query": "select ue.id as ueid from user_extra as ue where ue.id = :u_id", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT u.id as uid, ue.id as ueid FROM user u join user_extra ue where u.id = ue.id", "Instructions": { @@ -4862,62 +2831,24 @@ { "comment": "alias on column from derived table. TODO: to support alias in SimpleProjection engine primitive.", "query": "select a as k from (select count(*) as a from user) t", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a as k from (select count(*) as a from user) t", - "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count(0) AS count", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*) as a from `user` where 1 != 1", - "Query": "select count(*) as a from `user`", - "Table": "`user`" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a as k from (select count(*) as a from user) t", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count_star(0) AS a", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*) as a from `user` where 1 != 1", - "Query": "select count(*) as a from `user`", - "Table": "`user`" - } - ] + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum_count_star(0) AS a", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*) as a from `user` where 1 != 1", + "Query": "select count(*) as a from `user`", + "Table": "`user`" } ] }, @@ -4929,22 +2860,7 @@ { "comment": "select star from derived table on expandable and unsharded table", "query": "select u.* from (select * from unsharded) u", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u.* from (select * from unsharded) u", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select u.* from (select * from unsharded where 1 != 1) as u where 1 != 1", - "Query": "select u.* from (select * from unsharded) as u", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.* from (select * from unsharded) u", "Instructions": { @@ -4966,8 +2882,7 @@ { "comment": "filtering on a cross-shard derived table", "query": "select id from (select user.id, user.col from user join user_extra) as t where id=5", - "v3-plan": "VT12001: unsupported: filtering on results of cross-shard subquery", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from (select user.id, user.col from user join user_extra) as t where id=5", "Instructions": { @@ -5013,8 +2928,7 @@ { "comment": "expression on a cross-shard derived table", "query": "select id+1 from (select user.id, user.col from user join user_extra) as t", - "v3-plan": "VT12001: unsupported: expression on results of a cross-shard subquery", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id+1 from (select user.id, user.col from user join user_extra) as t", "Instructions": { @@ -5056,8 +2970,7 @@ { "comment": "derived table with aliased columns and outer predicate pushed in derived table", "query": "select u.a from (select id as b, name from user) u(a, n) where u.n = 1", - "v3-plan": "VT12001: unsupported: column aliases in derived table", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.a from (select id as b, name from user) u(a, n) where u.n = 1", "Instructions": { @@ -5108,8 +3021,7 @@ { "comment": "derived table with aliased columns predicate in both the outer and inner", "query": "select u.a from (select id as b, name from user where b = 1) u(a, n) where u.n = 1", - "v3-plan": "VT12001: unsupported: column aliases in derived table", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.a from (select id as b, name from user where b = 1) u(a, n) where u.n = 1", "Instructions": { @@ -5160,45 +3072,36 @@ { "comment": "derived table with aliased columns and a join that requires pushProjection", "query": "select i+1 from (select user.id from user join user_extra) t(i)", - "v3-plan": "VT12001: unsupported: column aliases in derived table", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select i+1 from (select user.id from user join user_extra) t(i)", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 1 - ], + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0", + "TableName": "`user`_user_extra", "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, `user`.id + 1 from `user` where 1 != 1", - "Query": "select `user`.id, `user`.id + 1 from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select i + 1 from (select `user`.id from `user` where 1 != 1) as t(i) where 1 != 1", + "Query": "select i + 1 from (select `user`.id from `user`) as t(i)", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from user_extra where 1 != 1", + "Query": "select 1 from user_extra", + "Table": "user_extra" } ] }, @@ -5211,85 +3114,18 @@ { "comment": "two subqueries with different Select and OpCode", "query": "select id from user where id in (select id from user_extra) and col = (select user_id from user_extra limit 1)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where id in (select id from user_extra) and col = (select user_id from user_extra limit 1)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values2", - "__sq2" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from user_extra where 1 != 1", - "Query": "select id from user_extra", - "Table": "user_extra" - }, - { - "OperatorType": "Subquery", - "Variant": "PulloutValue", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Limit", - "Count": "INT64(1)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_id from user_extra where 1 != 1", - "Query": "select user_id from user_extra limit :__upper_limit", - "Table": "user_extra" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where col = :__sq1 and :__sq_has_values2 = 1 and id in ::__vals", - "Table": "`user`", - "Values": [ - "::__sq2" - ], - "Vindex": "user_index" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where id in (select id from user_extra) and col = (select user_id from user_extra limit 1)", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutValue", "PulloutVars": [ - "__sq_has_values2", "__sq2" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Limit", "Count": "INT64(1)", "Inputs": [ @@ -5307,14 +3143,16 @@ ] }, { - "OperatorType": "Subquery", + "InputName": "Outer", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutIn", "PulloutVars": [ - "__sq_has_values1", + "__sq_has_values", "__sq1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -5326,6 +3164,7 @@ "Table": "user_extra" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "IN", "Keyspace": { @@ -5333,7 +3172,7 @@ "Sharded": true }, "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals and col = :__sq2", + "Query": "select id from `user` where :__sq_has_values and id in ::__vals and col = :__sq2", "Table": "`user`", "Values": [ "::__sq1" @@ -5353,44 +3192,7 @@ { "comment": "join on int columns", "query": "select u.id from user as u join user as uu on u.intcol = uu.intcol", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u.id from user as u join user as uu on u.intcol = uu.intcol", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "JoinVars": { - "u_intcol": 1 - }, - "TableName": "`user`_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.id, u.intcol from `user` as u where 1 != 1", - "Query": "select u.id, u.intcol from `user` as u", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` as uu where 1 != 1", - "Query": "select 1 from `user` as uu where uu.intcol = :u_intcol", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.id from user as u join user as uu on u.intcol = uu.intcol", "Instructions": { @@ -5434,8 +3236,7 @@ { "comment": "Duplicate output column from derived table having a join", "query": "select 0 from (select `user`.col1 from `user` join unsharded) as t join unsharded on unsharded.col1 = t.col1 and unsharded.a = t.col1", - "v3-plan": "VT12001: unsupported: expression on results of a cross-shard subquery", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 0 from (select `user`.col1 from `user` join unsharded) as t join unsharded on unsharded.col1 = t.col1 and unsharded.a = t.col1", "Instructions": { @@ -5503,47 +3304,40 @@ "QueryType": "SELECT", "Original": "select user.id from user left join user_extra on user.col = user_extra.col where coalesce(user_extra.col, 4) = 5", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], + "OperatorType": "Filter", + "Predicate": "coalesce(user_extra.col, 4) = 5", + "ResultColumns": 1, "Inputs": [ { - "OperatorType": "Filter", - "Predicate": "coalesce(user_extra.col, 4) = 5", + "OperatorType": "Join", + "Variant": "LeftJoin", + "JoinColumnIndexes": "L:0,R:0", + "JoinVars": { + "user_col": 1 + }, + "TableName": "`user`_user_extra", "Inputs": [ { - "OperatorType": "Join", - "Variant": "LeftJoin", - "JoinColumnIndexes": "L:0,R:0", - "JoinVars": { - "user_col": 1 + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, `user`.col from `user` where 1 != 1", - "Query": "select `user`.id, `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.col from user_extra where 1 != 1", - "Query": "select user_extra.col from user_extra where user_extra.col = :user_col", - "Table": "user_extra" - } - ] + "FieldQuery": "select `user`.id, `user`.col from `user` where 1 != 1", + "Query": "select `user`.id, `user`.col from `user`", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select user_extra.col from user_extra where 1 != 1", + "Query": "select user_extra.col from user_extra where user_extra.col = :user_col", + "Table": "user_extra" } ] } @@ -5558,41 +3352,7 @@ { "comment": "dont merge unsharded tables from different keyspaces", "query": "select 1 from main.unsharded join main_2.unsharded_tab", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select 1 from main.unsharded join main_2.unsharded_tab", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "unsharded_unsharded_tab", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from unsharded where 1 != 1", - "Query": "select 1 from unsharded", - "Table": "unsharded" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main_2", - "Sharded": false - }, - "FieldQuery": "select 1 from unsharded_tab where 1 != 1", - "Query": "select 1 from unsharded_tab", - "Table": "unsharded_tab" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 1 from main.unsharded join main_2.unsharded_tab", "Instructions": { @@ -5634,22 +3394,7 @@ { "comment": "Unsharded join with using", "query": "select * from unsharded_a join unsharded_b using (propertyId);", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from unsharded_a join unsharded_b using (propertyId);", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from unsharded_a join unsharded_b using (propertyId) where 1 != 1", - "Query": "select * from unsharded_a join unsharded_b using (propertyId)", - "Table": "unsharded_a, unsharded_b" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from unsharded_a join unsharded_b using (propertyId);", "Instructions": { @@ -5672,8 +3417,7 @@ { "comment": "Column aliases in Derived Table", "query": "select id2 from (select id from user) as x (id2)", - "v3-plan": "VT12001: unsupported: column aliases in derived table", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id2 from (select id from user) as x (id2)", "Instructions": { @@ -5695,22 +3439,7 @@ { "comment": "single unsharded keyspace with derived table", "query": "select col from (select col from unsharded join unsharded_b) as u join unsharded_a ua limit 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from (select col from unsharded join unsharded_b) as u join unsharded_a ua limit 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select col from (select col from unsharded join unsharded_b where 1 != 1) as u join unsharded_a as ua where 1 != 1", - "Query": "select col from (select col from unsharded join unsharded_b) as u join unsharded_a as ua limit 1", - "Table": "unsharded, unsharded_b, unsharded_a" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from (select col from unsharded join unsharded_b) as u join unsharded_a ua limit 1", "Instructions": { @@ -5734,74 +3463,7 @@ { "comment": "query builder with derived table having join inside it", "query": "select u.col from (select user.col from user join user_extra) as u join user_extra ue limit 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u.col from (select user.col from user join user_extra) as u join user_extra ue limit 1", - "Instructions": { - "OperatorType": "Limit", - "Count": "INT64(1)", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_user_extra_user_extra", - "Inputs": [ - { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra as ue where 1 != 1", - "Query": "select 1 from user_extra as ue", - "Table": "user_extra" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.col from (select user.col from user join user_extra) as u join user_extra ue limit 1", "Instructions": { @@ -6047,32 +3709,7 @@ { "comment": "Do not rewrite derived expressions when the derived table is merged with the outer", "query": "select col1, count(*) from (select colC+colD as col1 from user) as tbl group by col1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col1, count(*) from (select colC+colD as col1 from user) as tbl group by col1", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(1) AS count", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col1, count(*), weight_string(col1) from (select colC + colD as col1 from `user` where 1 != 1) as tbl where 1 != 1 group by col1, weight_string(col1)", - "OrderBy": "(0|2) ASC", - "Query": "select col1, count(*), weight_string(col1) from (select colC + colD as col1 from `user`) as tbl group by col1, weight_string(col1) order by col1 asc", - "ResultColumns": 2, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col1, count(*) from (select colC+colD as col1 from user) as tbl group by col1", "Instructions": { @@ -6104,8 +3741,7 @@ { "comment": "join with USING construct", "query": "select * from authoritative join unsharded_authoritative using(col1)", - "v3-plan": "VT12001: unsupported: JOIN with USING(column_list) clause for complex queries", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from authoritative join unsharded_authoritative using(col1)", "Instructions": { @@ -6150,26 +3786,7 @@ { "comment": "derived table inside derived table with a where clause depending on columns from the derived table", "query": "select * from (select bar as push_it from (select foo as bar from (select id as foo from user) as t1) as t2) as t3 where push_it = 12", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from (select bar as push_it from (select foo as bar from (select id as foo from user) as t1) as t2) as t3 where push_it = 12", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from (select bar as push_it from (select foo as bar from (select id as foo from `user` where 1 != 1) as t1 where 1 != 1) as t2 where 1 != 1) as t3 where 1 != 1", - "Query": "select * from (select bar as push_it from (select foo as bar from (select id as foo from `user`) as t1) as t2) as t3 where push_it = 12", - "Table": "`user`", - "Values": [ - "INT64(12)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from (select bar as push_it from (select foo as bar from (select id as foo from user) as t1) as t2) as t3 where push_it = 12", "Instructions": { @@ -6195,22 +3812,7 @@ { "comment": "use a view", "query": "select * from user.user_details_view", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user.user_details_view", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from (select `user`.id, user_extra.col from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1) as user_details_view where 1 != 1", - "Query": "select * from (select `user`.id, user_extra.col from `user` join user_extra on `user`.id = user_extra.user_id) as user_details_view", - "Table": "`user`, user_extra" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from user.user_details_view", "Instructions": { @@ -6233,22 +3835,7 @@ { "comment": "use a view without qualifying the keyspace", "query": "select * from user_details_view", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user_details_view", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from (select `user`.id, user_extra.col from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1) as user_details_view where 1 != 1", - "Query": "select * from (select `user`.id, user_extra.col from `user` join user_extra on `user`.id = user_extra.user_id) as user_details_view", - "Table": "`user`, user_extra" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from user_details_view", "Instructions": { @@ -6275,47 +3862,40 @@ "QueryType": "SELECT", "Original": "select user.id from user left join user_extra on user.col = user_extra.col where user_extra.col between 10 and 20", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], + "OperatorType": "Filter", + "Predicate": "user_extra.col between 10 and 20", + "ResultColumns": 1, "Inputs": [ { - "OperatorType": "Filter", - "Predicate": "user_extra.col between 10 and 20", + "OperatorType": "Join", + "Variant": "LeftJoin", + "JoinColumnIndexes": "L:0,R:0", + "JoinVars": { + "user_col": 1 + }, + "TableName": "`user`_user_extra", "Inputs": [ { - "OperatorType": "Join", - "Variant": "LeftJoin", - "JoinColumnIndexes": "L:0,R:0", - "JoinVars": { - "user_col": 1 + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, `user`.col from `user` where 1 != 1", - "Query": "select `user`.id, `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.col from user_extra where 1 != 1", - "Query": "select user_extra.col from user_extra where user_extra.col = :user_col", - "Table": "user_extra" - } - ] + "FieldQuery": "select `user`.id, `user`.col from `user` where 1 != 1", + "Query": "select `user`.id, `user`.col from `user`", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select user_extra.col from user_extra where 1 != 1", + "Query": "select user_extra.col from user_extra where user_extra.col = :user_col", + "Table": "user_extra" } ] } @@ -6330,22 +3910,7 @@ { "comment": "missing and ambiguous column info is OK as long as we can send the query to a single unsharded keyspace", "query": "select missing_column from unsharded, unsharded_a", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select missing_column from unsharded, unsharded_a", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select missing_column from unsharded, unsharded_a where 1 != 1", - "Query": "select missing_column from unsharded, unsharded_a", - "Table": "unsharded, unsharded_a" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select missing_column from unsharded, unsharded_a", "Instructions": { @@ -6368,13 +3933,12 @@ { "comment": "missing and ambiguous column info is not valid when we have two different unsharded keyspaces in the query", "query": "select missing_column from unsharded, unsharded_tab", - "v3-plan": "VT03019: column missing_column not found", - "gen4-plan": "Column 'missing_column' in field list is ambiguous" + "plan": "Column 'missing_column' in field list is ambiguous" }, { "comment": "join predicate only depending on the RHS should not turn outer join into inner join", "query": "select t1.id1, t2.id1 from t1 left join t1 as t2 on t2.id1 = t2.id2", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select t1.id1, t2.id1 from t1 left join t1 as t2 on t2.id1 = t2.id2", "Instructions": { @@ -6410,43 +3974,76 @@ "TablesUsed": [ "zlookup_unique.t1" ] - }, - "gen4-plan": { + } + }, + { + "comment": "left join with using has to be transformed into inner join with on condition", + "query": "SELECT * FROM unsharded_authoritative as A LEFT JOIN unsharded_authoritative as B USING(col1)", + "plan": { "QueryType": "SELECT", - "Original": "select t1.id1, t2.id1 from t1 left join t1 as t2 on t2.id1 = t2.id2", + "Original": "SELECT * FROM unsharded_authoritative as A LEFT JOIN unsharded_authoritative as B USING(col1)", + "Instructions": { + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select A.col1 as col1, A.col2 as col2, B.col2 as col2 from unsharded_authoritative as A left join unsharded_authoritative as B on A.col1 = B.col1 where 1 != 1", + "Query": "select A.col1 as col1, A.col2 as col2, B.col2 as col2 from unsharded_authoritative as A left join unsharded_authoritative as B on A.col1 = B.col1", + "Table": "unsharded_authoritative" + }, + "TablesUsed": [ + "main.unsharded_authoritative" + ] + } + }, + { + "comment": "join query using table with muticolumn vindex", + "query": "select 1 from multicol_tbl m1 join multicol_tbl m2 on m1.cola = m2.cola", + "plan": { + "QueryType": "SELECT", + "Original": "select 1 from multicol_tbl m1 join multicol_tbl m2 on m1.cola = m2.cola", "Instructions": { "OperatorType": "Join", - "Variant": "LeftJoin", - "JoinColumnIndexes": "L:0,R:0", - "TableName": "t1_t1", + "Variant": "Join", + "JoinColumnIndexes": "L:0", + "JoinVars": { + "m1_cola": 1 + }, + "TableName": "multicol_tbl_multicol_tbl", "Inputs": [ { "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { - "Name": "zlookup_unique", + "Name": "user", "Sharded": true }, - "FieldQuery": "select t1.id1 from t1 where 1 != 1", - "Query": "select t1.id1 from t1", - "Table": "t1" + "FieldQuery": "select 1, m1.cola from multicol_tbl as m1 where 1 != 1", + "Query": "select 1, m1.cola from multicol_tbl as m1", + "Table": "multicol_tbl" }, { "OperatorType": "Route", - "Variant": "Scatter", + "Variant": "SubShard", "Keyspace": { - "Name": "zlookup_unique", + "Name": "user", "Sharded": true }, - "FieldQuery": "select t2.id1 from t1 as t2 where 1 != 1", - "Query": "select t2.id1 from t1 as t2 where t2.id1 = t2.id2", - "Table": "t1" + "FieldQuery": "select 1 from multicol_tbl as m2 where 1 != 1", + "Query": "select 1 from multicol_tbl as m2 where m2.cola = :m1_cola", + "Table": "multicol_tbl", + "Values": [ + ":m1_cola" + ], + "Vindex": "multicolIdx" } ] }, "TablesUsed": [ - "zlookup_unique.t1" + "user.multicol_tbl" ] } } -] +] \ No newline at end of file diff --git a/go/vt/vtgate/planbuilder/testdata/info_schema57_cases.json b/go/vt/vtgate/planbuilder/testdata/info_schema57_cases.json index 3723cc0834c..aec230c8b3a 100644 --- a/go/vt/vtgate/planbuilder/testdata/info_schema57_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/info_schema57_cases.json @@ -21,22 +21,7 @@ { "comment": "',' join information_schema", "query": "select a.ENGINE, b.DATA_TYPE from information_schema.TABLES as a, information_schema.COLUMNS as b", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a.ENGINE, b.DATA_TYPE from information_schema.TABLES as a, information_schema.COLUMNS as b", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select a.`ENGINE`, b.DATA_TYPE from information_schema.`TABLES` as a, information_schema.`COLUMNS` as b where 1 != 1", - "Query": "select a.`ENGINE`, b.DATA_TYPE from information_schema.`TABLES` as a, information_schema.`COLUMNS` as b", - "Table": "information_schema.`TABLES`, information_schema.`COLUMNS`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a.ENGINE, b.DATA_TYPE from information_schema.TABLES as a, information_schema.COLUMNS as b", "Instructions": { @@ -74,8 +59,7 @@ { "comment": "information schema join", "query": "select tables.TABLE_SCHEMA, files.`STATUS` from information_schema.tables join information_schema.files", - "v3-plan": "VT03019: column `tables`.TABLE_SCHEMA not found", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select tables.TABLE_SCHEMA, files.`STATUS` from information_schema.tables join information_schema.files", "Instructions": { @@ -94,22 +78,7 @@ { "comment": "access to qualified column names in information_schema", "query": "select * from information_schema.COLUMNS where information_schema.COLUMNS.COLUMN_NAME='toto'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from information_schema.COLUMNS where information_schema.COLUMNS.COLUMN_NAME='toto'", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from information_schema.`COLUMNS` where 1 != 1", - "Query": "select * from information_schema.`COLUMNS` where information_schema.`COLUMNS`.COLUMN_NAME = 'toto'", - "Table": "information_schema.`COLUMNS`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from information_schema.COLUMNS where information_schema.COLUMNS.COLUMN_NAME='toto'", "Instructions": { @@ -131,27 +100,11 @@ "plan": { "QueryType": "SELECT", "Original": "select TABLE_NAME from information_schema.columns union select table_schema from information_schema.tables", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select TABLE_NAME from information_schema.`columns` where 1 != 1 union select table_schema from information_schema.`tables` where 1 != 1", - "Query": "select TABLE_NAME from information_schema.`columns` union select table_schema from information_schema.`tables`", - "Table": "information_schema.`columns`" - } - } - }, - { - "comment": "union between information_schema tables that should not be merged", - "query": "select * from information_schema.tables where table_schema = 'user' union select * from information_schema.tables where table_schema = 'main'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from information_schema.tables where table_schema = 'user' union select * from information_schema.tables where table_schema = 'main'", "Instructions": { "OperatorType": "Distinct", + "Collations": [ + "0: utf8mb4_0900_ai_ci" + ], "Inputs": [ { "OperatorType": "Concatenate", @@ -163,10 +116,9 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select * from information_schema.`tables` where 1 != 1", - "Query": "select * from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */", - "SysTableTableSchema": "[VARCHAR(\"user\")]", - "Table": "information_schema.`tables`" + "FieldQuery": "select TABLE_NAME from information_schema.`columns` where 1 != 1", + "Query": "select distinct TABLE_NAME from information_schema.`columns`", + "Table": "information_schema.`columns`" }, { "OperatorType": "Route", @@ -175,17 +127,20 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select * from information_schema.`tables` where 1 != 1", - "Query": "select * from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */", - "SysTableTableSchema": "[VARCHAR(\"main\")]", + "FieldQuery": "select table_schema from information_schema.`tables` where 1 != 1", + "Query": "select distinct table_schema from information_schema.`tables`", "Table": "information_schema.`tables`" } ] } ] } - }, - "gen4-plan": { + } + }, + { + "comment": "union between information_schema tables that should not be merged", + "query": "select * from information_schema.tables where table_schema = 'user' union select * from information_schema.tables where table_schema = 'main'", + "plan": { "QueryType": "SELECT", "Original": "select * from information_schema.tables where table_schema = 'user' union select * from information_schema.tables where table_schema = 'main'", "Instructions": { @@ -250,24 +205,7 @@ { "comment": "Select from information schema query with two tables that route should be merged", "query": "SELECT RC.CONSTRAINT_NAME, ORDINAL_POSITION FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.COLUMN_NAME = 'id' AND KCU.REFERENCED_TABLE_SCHEMA = 'test' AND KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT RC.CONSTRAINT_NAME, ORDINAL_POSITION FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.COLUMN_NAME = 'id' AND KCU.REFERENCED_TABLE_SCHEMA = 'test' AND KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select RC.CONSTRAINT_NAME, ORDINAL_POSITION from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where 1 != 1", - "Query": "select RC.CONSTRAINT_NAME, ORDINAL_POSITION from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where KCU.TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ and KCU.TABLE_NAME = :KCU_TABLE_NAME /* VARCHAR */ and KCU.COLUMN_NAME = 'id' and KCU.REFERENCED_TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ and KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc", - "SysTableTableName": "[KCU_TABLE_NAME:VARCHAR(\"data_type_table\")]", - "SysTableTableSchema": "[VARCHAR(\"test\"), VARCHAR(\"test\")]", - "Table": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT RC.CONSTRAINT_NAME, ORDINAL_POSITION FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.COLUMN_NAME = 'id' AND KCU.REFERENCED_TABLE_SCHEMA = 'test' AND KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME", "Instructions": { @@ -288,45 +226,7 @@ { "comment": "Select from information schema query with three tables such that route for 2 should be merged but not for the last.", "query": "SELECT KCU.`TABLE_NAME`, S.`TABLE_NAME` FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME, INFORMATION_SCHEMA.`TABLES` AS S WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.TABLE_NAME = 'data_type_table' AND S.TABLE_SCHEMA = 'test' AND S.TABLE_NAME = 'sc' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT KCU.`TABLE_NAME`, S.`TABLE_NAME` FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME, INFORMATION_SCHEMA.`TABLES` AS S WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.TABLE_NAME = 'data_type_table' AND S.TABLE_SCHEMA = 'test' AND S.TABLE_NAME = 'sc' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "TableName": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS_INFORMATION_SCHEMA.`TABLES`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select KCU.TABLE_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where 1 != 1", - "Query": "select KCU.TABLE_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where KCU.TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ and KCU.TABLE_NAME = :KCU_TABLE_NAME /* VARCHAR */ and KCU.TABLE_NAME = :KCU_TABLE_NAME1 /* VARCHAR */ order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc", - "SysTableTableName": "[KCU_TABLE_NAME1:VARCHAR(\"data_type_table\"), KCU_TABLE_NAME:VARCHAR(\"data_type_table\")]", - "SysTableTableSchema": "[VARCHAR(\"test\")]", - "Table": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS" - }, - { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select S.TABLE_NAME from INFORMATION_SCHEMA.`TABLES` as S where 1 != 1", - "Query": "select S.TABLE_NAME from INFORMATION_SCHEMA.`TABLES` as S where S.TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ and S.TABLE_NAME = :S_TABLE_NAME /* VARCHAR */", - "SysTableTableName": "[S_TABLE_NAME:VARCHAR(\"sc\")]", - "SysTableTableSchema": "[VARCHAR(\"test\")]", - "Table": "INFORMATION_SCHEMA.`TABLES`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT KCU.`TABLE_NAME`, S.`TABLE_NAME` FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME, INFORMATION_SCHEMA.`TABLES` AS S WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.TABLE_NAME = 'data_type_table' AND S.TABLE_SCHEMA = 'test' AND S.TABLE_NAME = 'sc' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME", "Instructions": { @@ -387,23 +287,7 @@ { "comment": "information_schema referential contraints", "query": "SELECT kcu.constraint_name constraint_name, kcu.column_name column_name, kcu.referenced_table_name referenced_table_name, kcu.referenced_column_name referenced_column_name, kcu.ordinal_position ordinal_position, kcu.table_name table_name, rc.delete_rule delete_rule, rc.update_rule update_rule FROM information_schema.key_column_usage AS kcu INNER JOIN information_schema.referential_constraints AS rc ON kcu.constraint_name = rc.constraint_name WHERE kcu.table_schema = ? AND rc.constraint_schema = ? AND kcu.referenced_column_name IS NOT NULL ORDER BY ordinal_position", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT kcu.constraint_name constraint_name, kcu.column_name column_name, kcu.referenced_table_name referenced_table_name, kcu.referenced_column_name referenced_column_name, kcu.ordinal_position ordinal_position, kcu.table_name table_name, rc.delete_rule delete_rule, rc.update_rule update_rule FROM information_schema.key_column_usage AS kcu INNER JOIN information_schema.referential_constraints AS rc ON kcu.constraint_name = rc.constraint_name WHERE kcu.table_schema = ? AND rc.constraint_schema = ? AND kcu.referenced_column_name IS NOT NULL ORDER BY ordinal_position", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name, rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.key_column_usage as kcu join information_schema.referential_constraints as rc on kcu.constraint_name = rc.constraint_name where 1 != 1", - "Query": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name, rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.key_column_usage as kcu join information_schema.referential_constraints as rc on kcu.constraint_name = rc.constraint_name where kcu.table_schema = :__vtschemaname /* VARCHAR */ and rc.constraint_schema = :__vtschemaname /* VARCHAR */ and kcu.referenced_column_name is not null order by ordinal_position asc", - "SysTableTableSchema": "[:v1, :v2]", - "Table": "information_schema.key_column_usage, information_schema.referential_constraints" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT kcu.constraint_name constraint_name, kcu.column_name column_name, kcu.referenced_table_name referenced_table_name, kcu.referenced_column_name referenced_column_name, kcu.ordinal_position ordinal_position, kcu.table_name table_name, rc.delete_rule delete_rule, rc.update_rule update_rule FROM information_schema.key_column_usage AS kcu INNER JOIN information_schema.referential_constraints AS rc ON kcu.constraint_name = rc.constraint_name WHERE kcu.table_schema = ? AND rc.constraint_schema = ? AND kcu.referenced_column_name IS NOT NULL ORDER BY ordinal_position", "Instructions": { @@ -446,23 +330,7 @@ { "comment": "rails query", "query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as name, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = ':vtg1' and rc.constraint_schema = database() and rc.table_name = ':vtg1'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as name, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = ':vtg1' and rc.constraint_schema = database() and rc.table_name = ':vtg1'", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where 1 != 1", - "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = :fk_table_name /* VARCHAR */ and rc.constraint_schema = database() and rc.table_name = :rc_table_name /* VARCHAR */", - "SysTableTableName": "[fk_table_name:VARCHAR(\":vtg1\"), rc_table_name:VARCHAR(\":vtg1\")]", - "Table": "information_schema.referential_constraints, information_schema.key_column_usage" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as name, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = ':vtg1' and rc.constraint_schema = database() and rc.table_name = ':vtg1'", "Instructions": { @@ -482,23 +350,7 @@ { "comment": "rails_query 2", "query": "SELECT * FROM information_schema.schemata WHERE schema_name = 'user'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT * FROM information_schema.schemata WHERE schema_name = 'user'", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from information_schema.schemata where 1 != 1", - "Query": "select * from information_schema.schemata where schema_name = :__vtschemaname /* VARCHAR */", - "SysTableTableSchema": "[VARCHAR(\"user\")]", - "Table": "information_schema.schemata" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT * FROM information_schema.schemata WHERE schema_name = 'user'", "Instructions": { @@ -539,24 +391,7 @@ { "comment": "rails_query 4", "query": "SELECT fk.referenced_table_name AS 'to_table', fk.referenced_column_name AS 'primary_key',fk.column_name AS 'column',fk.constraint_name AS 'name',rc.update_rule AS 'on_update',rc.delete_rule AS 'on_delete' FROM information_schema.referential_constraints rc JOIN information_schema.key_column_usage fk USING (constraint_schema, constraint_name) WHERE fk.referenced_column_name IS NOT NULL AND fk.table_schema = 'table_schema' AND fk.table_name = 'table_name' AND rc.constraint_schema = 'table_schema' AND rc.table_name = 'table_name'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT fk.referenced_table_name AS 'to_table', fk.referenced_column_name AS 'primary_key',fk.column_name AS 'column',fk.constraint_name AS 'name',rc.update_rule AS 'on_update',rc.delete_rule AS 'on_delete' FROM information_schema.referential_constraints rc JOIN information_schema.key_column_usage fk USING (constraint_schema, constraint_name) WHERE fk.referenced_column_name IS NOT NULL AND fk.table_schema = 'table_schema' AND fk.table_name = 'table_name' AND rc.constraint_schema = 'table_schema' AND rc.table_name = 'table_name'", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where 1 != 1", - "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = :__vtschemaname /* VARCHAR */ and fk.table_name = :fk_table_name /* VARCHAR */ and rc.constraint_schema = :__vtschemaname /* VARCHAR */ and rc.table_name = :rc_table_name /* VARCHAR */", - "SysTableTableName": "[fk_table_name:VARCHAR(\"table_name\"), rc_table_name:VARCHAR(\"table_name\")]", - "SysTableTableSchema": "[VARCHAR(\"table_schema\"), VARCHAR(\"table_schema\")]", - "Table": "information_schema.referential_constraints, information_schema.key_column_usage" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT fk.referenced_table_name AS 'to_table', fk.referenced_column_name AS 'primary_key',fk.column_name AS 'column',fk.constraint_name AS 'name',rc.update_rule AS 'on_update',rc.delete_rule AS 'on_delete' FROM information_schema.referential_constraints rc JOIN information_schema.key_column_usage fk USING (constraint_schema, constraint_name) WHERE fk.referenced_column_name IS NOT NULL AND fk.table_schema = 'table_schema' AND fk.table_name = 'table_name' AND rc.constraint_schema = 'table_schema' AND rc.table_name = 'table_name'", "Instructions": { @@ -638,23 +473,7 @@ { "comment": "rails_query 9", "query": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select table_name from (select * from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1", - "Query": "select table_name from (select * from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */) as _subquery", - "SysTableTableSchema": "[VARCHAR(\"table_schema\")]", - "Table": "information_schema.`tables`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery", "Instructions": { @@ -674,24 +493,7 @@ { "comment": "rails_query 10", "query": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery WHERE _subquery.table_type = 'table_type' AND _subquery.table_name = 'table_name'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery WHERE _subquery.table_type = 'table_type' AND _subquery.table_name = 'table_name'", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select table_name from (select * from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1", - "Query": "select table_name from (select * from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */) as _subquery where _subquery.table_type = 'table_type' and _subquery.table_name = :_subquery_table_name /* VARCHAR */", - "SysTableTableName": "[_subquery_table_name:VARCHAR(\"table_name\")]", - "SysTableTableSchema": "[VARCHAR(\"table_schema\")]", - "Table": "information_schema.`tables`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery WHERE _subquery.table_type = 'table_type' AND _subquery.table_name = 'table_name'", "Instructions": { @@ -732,8 +534,7 @@ { "comment": "subquery of information_schema with itself", "query": "select TABLES.CHECKSUM from information_schema.`TABLES` where `TABLE_NAME` in (select `TABLE_NAME` from information_schema.`COLUMNS`)", - "v3-plan": "VT03019: column `TABLES`.`CHECKSUM` not found", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select TABLES.CHECKSUM from information_schema.`TABLES` where `TABLE_NAME` in (select `TABLE_NAME` from information_schema.`COLUMNS`)", "Instructions": { @@ -752,23 +553,7 @@ { "comment": "query trying to query two different keyspaces at the same time", "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'user' AND TABLE_SCHEMA = 'main'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'user' AND TABLE_SCHEMA = 'main'", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname /* VARCHAR */", - "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"main\")]", - "Table": "INFORMATION_SCHEMA.`TABLES`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'user' AND TABLE_SCHEMA = 'main'", "Instructions": { @@ -779,7 +564,7 @@ "Sharded": false }, "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname /* VARCHAR */", + "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ and TABLE_SCHEMA = :__vtschemaname /* VARCHAR */", "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"main\")]", "Table": "INFORMATION_SCHEMA.`TABLES`" } @@ -788,22 +573,7 @@ { "comment": "information_schema query using database() func", "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = database()", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = database()", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = database()", - "Table": "INFORMATION_SCHEMA.`TABLES`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = database()", "Instructions": { @@ -822,23 +592,7 @@ { "comment": "table_schema predicate the wrong way around", "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE 'ks' = TABLE_SCHEMA", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE 'ks' = TABLE_SCHEMA", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname /* VARCHAR */", - "SysTableTableSchema": "[VARCHAR(\"ks\")]", - "Table": "INFORMATION_SCHEMA.`TABLES`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE 'ks' = TABLE_SCHEMA", "Instructions": { @@ -858,24 +612,7 @@ { "comment": "table_name predicate against a routed table", "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' AND TABLE_NAME = 'route1'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' AND TABLE_NAME = 'route1'", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ and TABLE_NAME = :TABLE_NAME /* VARCHAR */", - "SysTableTableName": "[TABLE_NAME:VARCHAR(\"route1\")]", - "SysTableTableSchema": "[VARCHAR(\"ks\")]", - "Table": "INFORMATION_SCHEMA.`TABLES`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' AND TABLE_NAME = 'route1'", "Instructions": { @@ -916,23 +653,7 @@ { "comment": "able to isolate table_schema value even when hidden inside of ORs", "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE (TABLE_SCHEMA = 'ks' and DATA_FREE = 42) OR (TABLE_SCHEMA = 'ks' and CHECKSUM = 'value')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE (TABLE_SCHEMA = 'ks' and DATA_FREE = 42) OR (TABLE_SCHEMA = 'ks' and CHECKSUM = 'value')", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ and (DATA_FREE = 42 or `CHECKSUM` = 'value')", - "SysTableTableSchema": "[VARCHAR(\"ks\")]", - "Table": "INFORMATION_SCHEMA.`TABLES`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE (TABLE_SCHEMA = 'ks' and DATA_FREE = 42) OR (TABLE_SCHEMA = 'ks' and CHECKSUM = 'value')", "Instructions": { @@ -952,22 +673,7 @@ { "comment": "expand star with information schema", "query": "select x.table_name from (select a.* from information_schema.key_column_usage a) x", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select x.table_name from (select a.* from information_schema.key_column_usage as a where 1 != 1) as x where 1 != 1", - "Query": "select x.table_name from (select a.* from information_schema.key_column_usage as a) as x", - "Table": "information_schema.key_column_usage" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x", "Instructions": { @@ -986,48 +692,7 @@ { "comment": "expand star with information schema in a derived table", "query": "select x.table_name from (select a.* from information_schema.key_column_usage a) x join user on x.`COLUMN_NAME` = user.id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x join user on x.`COLUMN_NAME` = user.id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "JoinVars": { - "x_COLUMN_NAME": 1 - }, - "TableName": "information_schema.key_column_usage_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select x.table_name, x.COLUMN_NAME from (select a.* from information_schema.key_column_usage as a where 1 != 1) as x where 1 != 1", - "Query": "select x.table_name, x.COLUMN_NAME from (select a.* from information_schema.key_column_usage as a) as x", - "Table": "information_schema.key_column_usage" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user` where `user`.id = :x_COLUMN_NAME", - "Table": "`user`", - "Values": [ - ":x_COLUMN_NAME" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x join user on x.`COLUMN_NAME` = user.id", "Instructions": { @@ -1075,22 +740,7 @@ { "comment": "join of information_schema queries with select stars exprs", "query": "select a.*, b.* from information_schema.GLOBAL_STATUS a, information_schema.CHARACTER_SETS b", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a.*, b.* from information_schema.GLOBAL_STATUS a, information_schema.CHARACTER_SETS b", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select a.*, b.* from information_schema.GLOBAL_STATUS as a, information_schema.CHARACTER_SETS as b where 1 != 1", - "Query": "select a.*, b.* from information_schema.GLOBAL_STATUS as a, information_schema.CHARACTER_SETS as b", - "Table": "information_schema.GLOBAL_STATUS, information_schema.CHARACTER_SETS" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a.*, b.* from information_schema.GLOBAL_STATUS a, information_schema.CHARACTER_SETS b", "Instructions": { @@ -1109,23 +759,7 @@ { "comment": "join two routes with SysTableTableName entries in LHS and RHS", "query": "select a.table_name from (select * from information_schema.key_column_usage a where a.table_name = 'users') a join (select * from information_schema.referential_constraints where table_name = 'users') b", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a.table_name from (select * from information_schema.key_column_usage a where a.table_name = 'users') a join (select * from information_schema.referential_constraints where table_name = 'users') b", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select a.table_name from (select * from information_schema.key_column_usage as a where 1 != 1) as a join (select * from information_schema.referential_constraints where 1 != 1) as b where 1 != 1", - "Query": "select a.table_name from (select * from information_schema.key_column_usage as a where a.table_name = :a_table_name /* VARCHAR */) as a join (select * from information_schema.referential_constraints where table_name = :table_name /* VARCHAR */) as b", - "SysTableTableName": "[a_table_name:VARCHAR(\"users\"), table_name:VARCHAR(\"users\")]", - "Table": "information_schema.key_column_usage, information_schema.referential_constraints" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a.table_name from (select * from information_schema.key_column_usage a where a.table_name = 'users') a join (select * from information_schema.referential_constraints where table_name = 'users') b", "Instructions": { @@ -1145,35 +779,13 @@ { "comment": "select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t", "query": "select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t", "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select sum(found) from (select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)) as t where 1 != 1", - "Query": "select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */ union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname /* VARCHAR */ limit 1)) as t", - "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"music\")]", - "Table": "information_schema.`tables`" - } - } - }, - { - "comment": "union as a derived table", - "query": "select found from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select found from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t", - "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum(0) AS sum(found)", "Inputs": [ { "OperatorType": "Concatenate", @@ -1206,28 +818,49 @@ } ] } - }, - "gen4-plan": { + } + }, + { + "comment": "union as a derived table", + "query": "select found from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t", + "plan": { "QueryType": "SELECT", "Original": "select found from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t", "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select found from (select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)) as t where 1 != 1", - "Query": "select found from (select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */ union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname /* VARCHAR */ limit 1)) as t", - "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"music\")]", - "Table": "information_schema.`tables`" + "OperatorType": "Concatenate", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "DBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1", + "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */", + "SysTableTableSchema": "[VARCHAR(\"music\")]", + "Table": "information_schema.`tables`" + }, + { + "OperatorType": "Route", + "Variant": "DBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select 1 as found from information_schema.views where 1 != 1", + "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname /* VARCHAR */ limit 1", + "SysTableTableSchema": "[VARCHAR(\"music\")]", + "Table": "information_schema.views" + } + ] } } }, { "comment": "merge system schema queries as long as they have any same table_schema", "query": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)", "Instructions": { @@ -1241,7 +874,7 @@ "Sharded": false }, "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1", - "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */", + "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */ and table_schema = :__vtschemaname /* VARCHAR */", "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\")]", "Table": "information_schema.`tables`" }, @@ -1253,34 +886,18 @@ "Sharded": false }, "FieldQuery": "select 1 as found from information_schema.views where 1 != 1", - "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname /* VARCHAR */ limit 1", + "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname /* VARCHAR */ and table_schema = :__vtschemaname /* VARCHAR */ limit 1", "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"user\")]", "Table": "information_schema.views" } ] } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)", - "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */ union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname /* VARCHAR */ limit 1)", - "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\"), VARCHAR(\"music\"), VARCHAR(\"user\")]", - "Table": "information_schema.`tables`" - } } }, { "comment": "merge system schema queries as long as they have any same table_name", "query": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)", "Instructions": { @@ -1294,7 +911,7 @@ "Sharded": false }, "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1", - "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */", + "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */ and table_schema = :__vtschemaname /* VARCHAR */", "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\")]", "Table": "information_schema.`tables`" }, @@ -1306,45 +923,29 @@ "Sharded": false }, "FieldQuery": "select 1 as found from information_schema.views where 1 != 1", - "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname /* VARCHAR */ limit 1", + "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname /* VARCHAR */ and table_schema = :__vtschemaname /* VARCHAR */ limit 1", "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"user\")]", "Table": "information_schema.views" } ] } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)", - "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */ union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname /* VARCHAR */ limit 1)", - "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\"), VARCHAR(\"music\"), VARCHAR(\"user\")]", - "Table": "information_schema.`tables`" - } } }, { "comment": "merge union subquery with outer query referencing the same system schemas", "query": "select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' and exists (select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' union all (select 1 as found from information_schema.views where table_name = 'music' and table_name = 'user' limit 1))", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' and exists (select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' union all (select 1 as found from information_schema.views where table_name = 'music' and table_name = 'user' limit 1))", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutExists", "PulloutVars": [ - "__sq_has_values1", - "__sq1" + "__sq_has_values" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Concatenate", "Inputs": [ { @@ -1355,8 +956,8 @@ "Sharded": false }, "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1", - "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name2 /* VARCHAR */ and table_name = :table_name3 /* VARCHAR */", - "SysTableTableName": "[table_name2:VARCHAR(\"music\"), table_name3:VARCHAR(\"Music\")]", + "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name1 /* VARCHAR */ and table_name = :table_name1 /* VARCHAR */", + "SysTableTableName": "[table_name1:VARCHAR(\"Music\")]", "Table": "information_schema.`tables`" }, { @@ -1367,13 +968,14 @@ "Sharded": false }, "FieldQuery": "select 1 as found from information_schema.views where 1 != 1", - "Query": "select 1 as found from information_schema.views where table_name = :table_name4 /* VARCHAR */ and table_name = :table_name5 /* VARCHAR */ limit 1", - "SysTableTableName": "[table_name4:VARCHAR(\"music\"), table_name5:VARCHAR(\"user\")]", + "Query": "select 1 as found from information_schema.views where table_name = :table_name2 /* VARCHAR */ and table_name = :table_name2 /* VARCHAR */ limit 1", + "SysTableTableName": "[table_name2:VARCHAR(\"user\")]", "Table": "information_schema.views" } ] }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "DBA", "Keyspace": { @@ -1381,111 +983,78 @@ "Sharded": false }, "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1", - "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name /* VARCHAR */ and table_name = :table_name1 /* VARCHAR */ and :__sq_has_values1", - "SysTableTableName": "[table_name1:VARCHAR(\"Music\"), table_name:VARCHAR(\"music\")]", + "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name /* VARCHAR */ and table_name = :table_name /* VARCHAR */ and :__sq_has_values", + "SysTableTableName": "[table_name:VARCHAR(\"Music\")]", "Table": "information_schema.`tables`" } ] } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' and exists (select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' union all (select 1 as found from information_schema.views where table_name = 'music' and table_name = 'user' limit 1))", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1", - "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name /* VARCHAR */ and table_name = :table_name1 /* VARCHAR */ and exists (select 1 as found from information_schema.`tables` where table_name = :table_name2 /* VARCHAR */ and table_name = :table_name3 /* VARCHAR */ union all (select 1 as found from information_schema.views where table_name = :table_name4 /* VARCHAR */ and table_name = :table_name5 /* VARCHAR */ limit 1))", - "SysTableTableName": "[table_name1:VARCHAR(\"Music\"), table_name2:VARCHAR(\"music\"), table_name3:VARCHAR(\"Music\"), table_name4:VARCHAR(\"music\"), table_name5:VARCHAR(\"user\"), table_name:VARCHAR(\"music\")]", - "Table": "information_schema.`tables`" - } } }, { "comment": "merge even one side have schema name in derived table", "query": "select * from (select TABLE_NAME from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select TABLE_NAME from information_schema.columns) dt", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from (select TABLE_NAME from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select TABLE_NAME from information_schema.columns) dt", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 + "OperatorType": "Distinct", + "Collations": [ + "0: utf8mb4_0900_ai_ci" ], "Inputs": [ { - "OperatorType": "Distinct", + "OperatorType": "Concatenate", "Inputs": [ { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select TABLE_NAME from information_schema.`tables` as t where 1 != 1", - "Query": "select TABLE_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname /* VARCHAR */", - "SysTableTableSchema": "[VARCHAR(\"a\")]", - "Table": "information_schema.`tables`" - }, - { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select TABLE_NAME from information_schema.`columns` where 1 != 1", - "Query": "select TABLE_NAME from information_schema.`columns`", - "Table": "information_schema.`columns`" - } - ] + "OperatorType": "Route", + "Variant": "DBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select TABLE_NAME from information_schema.`tables` as t where 1 != 1", + "Query": "select distinct TABLE_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname /* VARCHAR */", + "SysTableTableSchema": "[VARCHAR(\"a\")]", + "Table": "information_schema.`tables`" + }, + { + "OperatorType": "Route", + "Variant": "DBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select TABLE_NAME from information_schema.`columns` where 1 != 1", + "Query": "select distinct TABLE_NAME from information_schema.`columns`", + "Table": "information_schema.`columns`" } ] } ] } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select * from (select TABLE_NAME from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select TABLE_NAME from information_schema.columns) dt", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select dt.TABLE_NAME from (select TABLE_NAME from information_schema.`tables` as t where 1 != 1 union select TABLE_NAME from information_schema.`columns` where 1 != 1) as dt where 1 != 1", - "Query": "select dt.TABLE_NAME from (select TABLE_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ union select TABLE_NAME from information_schema.`columns`) as dt", - "SysTableTableSchema": "[VARCHAR(\"a\")]", - "Table": "information_schema.`tables`" - } } }, { "comment": "merge even one side have schema name in subquery", "query": "select `COLLATION_NAME` from information_schema.`COLUMNS` t where `COLUMN_NAME` in (select `COLUMN_NAME` from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select `COLUMN_NAME` from information_schema.columns)", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select `COLLATION_NAME` from information_schema.`COLUMNS` t where `COLUMN_NAME` in (select `COLUMN_NAME` from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select `COLUMN_NAME` from information_schema.columns)", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutIn", "PulloutVars": [ - "__sq_has_values1", + "__sq_has_values", "__sq1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Distinct", + "Collations": [ + "0: utf8mb4_0900_ai_ci" + ], "Inputs": [ { "OperatorType": "Concatenate", @@ -1497,8 +1066,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select COLUMN_NAME from information_schema.`tables` as t where 1 != 1", - "Query": "select COLUMN_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname /* VARCHAR */", + "FieldQuery": "select :COLUMN_NAME from information_schema.`tables` as t where 1 != 1", + "Query": "select distinct :COLUMN_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname /* VARCHAR */", "SysTableTableSchema": "[VARCHAR(\"a\")]", "Table": "information_schema.`tables`" }, @@ -1510,7 +1079,7 @@ "Sharded": false }, "FieldQuery": "select COLUMN_NAME from information_schema.`columns` where 1 != 1", - "Query": "select COLUMN_NAME from information_schema.`columns`", + "Query": "select distinct COLUMN_NAME from information_schema.`columns`", "Table": "information_schema.`columns`" } ] @@ -1518,6 +1087,7 @@ ] }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "DBA", "Keyspace": { @@ -1525,48 +1095,17 @@ "Sharded": false }, "FieldQuery": "select COLLATION_NAME from information_schema.`COLUMNS` as t where 1 != 1", - "Query": "select COLLATION_NAME from information_schema.`COLUMNS` as t where :__sq_has_values1 = 1 and COLUMN_NAME in ::__sq1", + "Query": "select COLLATION_NAME from information_schema.`COLUMNS` as t where :__sq_has_values and COLUMN_NAME in ::__sq1", "Table": "information_schema.`COLUMNS`" } ] } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select `COLLATION_NAME` from information_schema.`COLUMNS` t where `COLUMN_NAME` in (select `COLUMN_NAME` from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select `COLUMN_NAME` from information_schema.columns)", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select COLLATION_NAME from information_schema.`COLUMNS` as t where 1 != 1", - "Query": "select COLLATION_NAME from information_schema.`COLUMNS` as t where COLUMN_NAME in (select COLUMN_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ union select COLUMN_NAME from information_schema.`columns`)", - "SysTableTableSchema": "[VARCHAR(\"a\")]", - "Table": "information_schema.`COLUMNS`" - } } }, { "comment": "table_schema OR predicate\n# It is unsupported because we do not route queries to multiple keyspaces right now", "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' OR TABLE_SCHEMA = 'main'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' OR TABLE_SCHEMA = 'main'", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = 'ks' or TABLE_SCHEMA = 'main'", - "Table": "INFORMATION_SCHEMA.`TABLES`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' OR TABLE_SCHEMA = 'main'", "Instructions": { diff --git a/go/vt/vtgate/planbuilder/testdata/info_schema80_cases.json b/go/vt/vtgate/planbuilder/testdata/info_schema80_cases.json index 51767530a3c..13a503a4eb8 100644 --- a/go/vt/vtgate/planbuilder/testdata/info_schema80_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/info_schema80_cases.json @@ -21,22 +21,7 @@ { "comment": "',' join information_schema", "query": "select a.ENGINE, b.DATA_TYPE from information_schema.TABLES as a, information_schema.COLUMNS as b", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a.ENGINE, b.DATA_TYPE from information_schema.TABLES as a, information_schema.COLUMNS as b", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select a.`ENGINE`, b.DATA_TYPE from information_schema.`TABLES` as a, information_schema.`COLUMNS` as b where 1 != 1", - "Query": "select a.`ENGINE`, b.DATA_TYPE from information_schema.`TABLES` as a, information_schema.`COLUMNS` as b", - "Table": "information_schema.`TABLES`, information_schema.`COLUMNS`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a.ENGINE, b.DATA_TYPE from information_schema.TABLES as a, information_schema.COLUMNS as b", "Instructions": { @@ -74,8 +59,7 @@ { "comment": "information schema join", "query": "select tables.TABLE_SCHEMA, files.`STATUS` from information_schema.tables join information_schema.files", - "v3-plan": "VT03019: column `tables`.TABLE_SCHEMA not found", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select tables.TABLE_SCHEMA, files.`STATUS` from information_schema.tables join information_schema.files", "Instructions": { @@ -94,22 +78,7 @@ { "comment": "access to qualified column names in information_schema", "query": "select * from information_schema.COLUMNS where information_schema.COLUMNS.COLUMN_NAME='toto'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from information_schema.COLUMNS where information_schema.COLUMNS.COLUMN_NAME='toto'", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from information_schema.`COLUMNS` where 1 != 1", - "Query": "select * from information_schema.`COLUMNS` where information_schema.`COLUMNS`.COLUMN_NAME = 'toto'", - "Table": "information_schema.`COLUMNS`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from information_schema.COLUMNS where information_schema.COLUMNS.COLUMN_NAME='toto'", "Instructions": { @@ -131,27 +100,11 @@ "plan": { "QueryType": "SELECT", "Original": "select TABLE_NAME from information_schema.columns union select table_schema from information_schema.tables", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select TABLE_NAME from information_schema.`columns` where 1 != 1 union select table_schema from information_schema.`tables` where 1 != 1", - "Query": "select TABLE_NAME from information_schema.`columns` union select table_schema from information_schema.`tables`", - "Table": "information_schema.`columns`" - } - } - }, - { - "comment": "union between information_schema tables that should not be merged", - "query": "select * from information_schema.tables where table_schema = 'user' union select * from information_schema.tables where table_schema = 'main'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from information_schema.tables where table_schema = 'user' union select * from information_schema.tables where table_schema = 'main'", "Instructions": { "OperatorType": "Distinct", + "Collations": [ + "0: utf8mb4_0900_ai_ci" + ], "Inputs": [ { "OperatorType": "Concatenate", @@ -163,10 +116,9 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select * from information_schema.`tables` where 1 != 1", - "Query": "select * from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */", - "SysTableTableSchema": "[VARCHAR(\"user\")]", - "Table": "information_schema.`tables`" + "FieldQuery": "select TABLE_NAME from information_schema.`columns` where 1 != 1", + "Query": "select distinct TABLE_NAME from information_schema.`columns`", + "Table": "information_schema.`columns`" }, { "OperatorType": "Route", @@ -175,17 +127,20 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select * from information_schema.`tables` where 1 != 1", - "Query": "select * from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */", - "SysTableTableSchema": "[VARCHAR(\"main\")]", + "FieldQuery": "select table_schema from information_schema.`tables` where 1 != 1", + "Query": "select distinct table_schema from information_schema.`tables`", "Table": "information_schema.`tables`" } ] } ] } - }, - "gen4-plan": { + } + }, + { + "comment": "union between information_schema tables that should not be merged", + "query": "select * from information_schema.tables where table_schema = 'user' union select * from information_schema.tables where table_schema = 'main'", + "plan": { "QueryType": "SELECT", "Original": "select * from information_schema.tables where table_schema = 'user' union select * from information_schema.tables where table_schema = 'main'", "Instructions": { @@ -250,24 +205,7 @@ { "comment": "Select from information schema query with two tables that route should be merged", "query": "SELECT RC.CONSTRAINT_NAME, ORDINAL_POSITION FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.COLUMN_NAME = 'id' AND KCU.REFERENCED_TABLE_SCHEMA = 'test' AND KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT RC.CONSTRAINT_NAME, ORDINAL_POSITION FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.COLUMN_NAME = 'id' AND KCU.REFERENCED_TABLE_SCHEMA = 'test' AND KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select RC.CONSTRAINT_NAME, ORDINAL_POSITION from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where 1 != 1", - "Query": "select RC.CONSTRAINT_NAME, ORDINAL_POSITION from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where KCU.TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ and KCU.TABLE_NAME = :KCU_TABLE_NAME /* VARCHAR */ and KCU.COLUMN_NAME = 'id' and KCU.REFERENCED_TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ and KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc", - "SysTableTableName": "[KCU_TABLE_NAME:VARCHAR(\"data_type_table\")]", - "SysTableTableSchema": "[VARCHAR(\"test\"), VARCHAR(\"test\")]", - "Table": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT RC.CONSTRAINT_NAME, ORDINAL_POSITION FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.COLUMN_NAME = 'id' AND KCU.REFERENCED_TABLE_SCHEMA = 'test' AND KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME", "Instructions": { @@ -288,45 +226,7 @@ { "comment": "Select from information schema query with three tables such that route for 2 should be merged but not for the last.", "query": "SELECT KCU.`TABLE_NAME`, S.`TABLE_NAME` FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME, INFORMATION_SCHEMA.`TABLES` AS S WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.TABLE_NAME = 'data_type_table' AND S.TABLE_SCHEMA = 'test' AND S.TABLE_NAME = 'sc' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT KCU.`TABLE_NAME`, S.`TABLE_NAME` FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME, INFORMATION_SCHEMA.`TABLES` AS S WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.TABLE_NAME = 'data_type_table' AND S.TABLE_SCHEMA = 'test' AND S.TABLE_NAME = 'sc' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "TableName": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS_INFORMATION_SCHEMA.`TABLES`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select KCU.TABLE_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where 1 != 1", - "Query": "select KCU.TABLE_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where KCU.TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ and KCU.TABLE_NAME = :KCU_TABLE_NAME /* VARCHAR */ and KCU.TABLE_NAME = :KCU_TABLE_NAME1 /* VARCHAR */ order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc", - "SysTableTableName": "[KCU_TABLE_NAME1:VARCHAR(\"data_type_table\"), KCU_TABLE_NAME:VARCHAR(\"data_type_table\")]", - "SysTableTableSchema": "[VARCHAR(\"test\")]", - "Table": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS" - }, - { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select S.TABLE_NAME from INFORMATION_SCHEMA.`TABLES` as S where 1 != 1", - "Query": "select S.TABLE_NAME from INFORMATION_SCHEMA.`TABLES` as S where S.TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ and S.TABLE_NAME = :S_TABLE_NAME /* VARCHAR */", - "SysTableTableName": "[S_TABLE_NAME:VARCHAR(\"sc\")]", - "SysTableTableSchema": "[VARCHAR(\"test\")]", - "Table": "INFORMATION_SCHEMA.`TABLES`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT KCU.`TABLE_NAME`, S.`TABLE_NAME` FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME, INFORMATION_SCHEMA.`TABLES` AS S WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.TABLE_NAME = 'data_type_table' AND S.TABLE_SCHEMA = 'test' AND S.TABLE_NAME = 'sc' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME", "Instructions": { @@ -387,23 +287,7 @@ { "comment": "information_schema referential contraints - cant merge without knowing values", "query": "SELECT kcu.constraint_name constraint_name, kcu.column_name column_name, kcu.referenced_table_name referenced_table_name, kcu.referenced_column_name referenced_column_name, kcu.ordinal_position ordinal_position, kcu.table_name table_name, rc.delete_rule delete_rule, rc.update_rule update_rule FROM information_schema.key_column_usage AS kcu INNER JOIN information_schema.referential_constraints AS rc ON kcu.constraint_name = rc.constraint_name WHERE kcu.table_schema = ? AND rc.constraint_schema = ? AND kcu.referenced_column_name IS NOT NULL ORDER BY ordinal_position", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT kcu.constraint_name constraint_name, kcu.column_name column_name, kcu.referenced_table_name referenced_table_name, kcu.referenced_column_name referenced_column_name, kcu.ordinal_position ordinal_position, kcu.table_name table_name, rc.delete_rule delete_rule, rc.update_rule update_rule FROM information_schema.key_column_usage AS kcu INNER JOIN information_schema.referential_constraints AS rc ON kcu.constraint_name = rc.constraint_name WHERE kcu.table_schema = ? AND rc.constraint_schema = ? AND kcu.referenced_column_name IS NOT NULL ORDER BY ordinal_position", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name, rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.key_column_usage as kcu join information_schema.referential_constraints as rc on kcu.constraint_name = rc.constraint_name where 1 != 1", - "Query": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name, rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.key_column_usage as kcu join information_schema.referential_constraints as rc on kcu.constraint_name = rc.constraint_name where kcu.table_schema = :__vtschemaname /* VARCHAR */ and rc.constraint_schema = :__vtschemaname /* VARCHAR */ and kcu.referenced_column_name is not null order by ordinal_position asc", - "SysTableTableSchema": "[:v1, :v2]", - "Table": "information_schema.key_column_usage, information_schema.referential_constraints" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT kcu.constraint_name constraint_name, kcu.column_name column_name, kcu.referenced_table_name referenced_table_name, kcu.referenced_column_name referenced_column_name, kcu.ordinal_position ordinal_position, kcu.table_name table_name, rc.delete_rule delete_rule, rc.update_rule update_rule FROM information_schema.key_column_usage AS kcu INNER JOIN information_schema.referential_constraints AS rc ON kcu.constraint_name = rc.constraint_name WHERE kcu.table_schema = ? AND rc.constraint_schema = ? AND kcu.referenced_column_name IS NOT NULL ORDER BY ordinal_position", "Instructions": { @@ -446,23 +330,7 @@ { "comment": "rails query", "query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as name, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = ':vtg1' and rc.constraint_schema = database() and rc.table_name = ':vtg1'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as name, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = ':vtg1' and rc.constraint_schema = database() and rc.table_name = ':vtg1'", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where 1 != 1", - "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = :fk_table_name /* VARCHAR */ and rc.constraint_schema = database() and rc.table_name = :rc_table_name /* VARCHAR */", - "SysTableTableName": "[fk_table_name:VARCHAR(\":vtg1\"), rc_table_name:VARCHAR(\":vtg1\")]", - "Table": "information_schema.referential_constraints, information_schema.key_column_usage" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as name, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = ':vtg1' and rc.constraint_schema = database() and rc.table_name = ':vtg1'", "Instructions": { @@ -482,23 +350,7 @@ { "comment": "rails_query 2", "query": "SELECT * FROM information_schema.schemata WHERE schema_name = 'user'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT * FROM information_schema.schemata WHERE schema_name = 'user'", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from information_schema.schemata where 1 != 1", - "Query": "select * from information_schema.schemata where schema_name = :__vtschemaname /* VARCHAR */", - "SysTableTableSchema": "[VARCHAR(\"user\")]", - "Table": "information_schema.schemata" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT * FROM information_schema.schemata WHERE schema_name = 'user'", "Instructions": { @@ -539,24 +391,7 @@ { "comment": "rails_query 4", "query": "SELECT fk.referenced_table_name AS 'to_table', fk.referenced_column_name AS 'primary_key',fk.column_name AS 'column',fk.constraint_name AS 'name',rc.update_rule AS 'on_update',rc.delete_rule AS 'on_delete' FROM information_schema.referential_constraints rc JOIN information_schema.key_column_usage fk USING (constraint_schema, constraint_name) WHERE fk.referenced_column_name IS NOT NULL AND fk.table_schema = 'table_schema' AND fk.table_name = 'table_name' AND rc.constraint_schema = 'table_schema' AND rc.table_name = 'table_name'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT fk.referenced_table_name AS 'to_table', fk.referenced_column_name AS 'primary_key',fk.column_name AS 'column',fk.constraint_name AS 'name',rc.update_rule AS 'on_update',rc.delete_rule AS 'on_delete' FROM information_schema.referential_constraints rc JOIN information_schema.key_column_usage fk USING (constraint_schema, constraint_name) WHERE fk.referenced_column_name IS NOT NULL AND fk.table_schema = 'table_schema' AND fk.table_name = 'table_name' AND rc.constraint_schema = 'table_schema' AND rc.table_name = 'table_name'", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where 1 != 1", - "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = :__vtschemaname /* VARCHAR */ and fk.table_name = :fk_table_name /* VARCHAR */ and rc.constraint_schema = :__vtschemaname /* VARCHAR */ and rc.table_name = :rc_table_name /* VARCHAR */", - "SysTableTableName": "[fk_table_name:VARCHAR(\"table_name\"), rc_table_name:VARCHAR(\"table_name\")]", - "SysTableTableSchema": "[VARCHAR(\"table_schema\"), VARCHAR(\"table_schema\")]", - "Table": "information_schema.referential_constraints, information_schema.key_column_usage" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT fk.referenced_table_name AS 'to_table', fk.referenced_column_name AS 'primary_key',fk.column_name AS 'column',fk.constraint_name AS 'name',rc.update_rule AS 'on_update',rc.delete_rule AS 'on_delete' FROM information_schema.referential_constraints rc JOIN information_schema.key_column_usage fk USING (constraint_schema, constraint_name) WHERE fk.referenced_column_name IS NOT NULL AND fk.table_schema = 'table_schema' AND fk.table_name = 'table_name' AND rc.constraint_schema = 'table_schema' AND rc.table_name = 'table_name'", "Instructions": { @@ -577,24 +412,7 @@ { "comment": "rails_query 5", "query": "SELECT cc.constraint_name AS 'name', cc.check_clause AS 'expression' FROM information_schema.check_constraints cc JOIN information_schema.table_constraints tc USING (constraint_schema, constraint_name) WHERE tc.table_schema = 'table_schema' AND tc.table_name = 'table_name' AND cc.constraint_schema = 'constraint_schema'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT cc.constraint_name AS 'name', cc.check_clause AS 'expression' FROM information_schema.check_constraints cc JOIN information_schema.table_constraints tc USING (constraint_schema, constraint_name) WHERE tc.table_schema = 'table_schema' AND tc.table_name = 'table_name' AND cc.constraint_schema = 'constraint_schema'", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select cc.constraint_name as `name`, cc.check_clause as expression from information_schema.check_constraints as cc join information_schema.table_constraints as tc using (constraint_schema, constraint_name) where 1 != 1", - "Query": "select cc.constraint_name as `name`, cc.check_clause as expression from information_schema.check_constraints as cc join information_schema.table_constraints as tc using (constraint_schema, constraint_name) where tc.table_schema = :__vtschemaname /* VARCHAR */ and tc.table_name = :tc_table_name /* VARCHAR */ and cc.constraint_schema = :__vtschemaname /* VARCHAR */", - "SysTableTableName": "[tc_table_name:VARCHAR(\"table_name\")]", - "SysTableTableSchema": "[VARCHAR(\"table_schema\"), VARCHAR(\"constraint_schema\")]", - "Table": "information_schema.check_constraints, information_schema.table_constraints" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT cc.constraint_name AS 'name', cc.check_clause AS 'expression' FROM information_schema.check_constraints cc JOIN information_schema.table_constraints tc USING (constraint_schema, constraint_name) WHERE tc.table_schema = 'table_schema' AND tc.table_name = 'table_name' AND cc.constraint_schema = 'constraint_schema'", "Instructions": { @@ -700,23 +518,7 @@ { "comment": "rails_query 9", "query": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select table_name from (select * from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1", - "Query": "select table_name from (select * from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */) as _subquery", - "SysTableTableSchema": "[VARCHAR(\"table_schema\")]", - "Table": "information_schema.`tables`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery", "Instructions": { @@ -736,24 +538,7 @@ { "comment": "rails_query 10", "query": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery WHERE _subquery.table_type = 'table_type' AND _subquery.table_name = 'table_name'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery WHERE _subquery.table_type = 'table_type' AND _subquery.table_name = 'table_name'", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select table_name from (select * from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1", - "Query": "select table_name from (select * from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */) as _subquery where _subquery.table_type = 'table_type' and _subquery.table_name = :_subquery_table_name /* VARCHAR */", - "SysTableTableName": "[_subquery_table_name:VARCHAR(\"table_name\")]", - "SysTableTableSchema": "[VARCHAR(\"table_schema\")]", - "Table": "information_schema.`tables`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery WHERE _subquery.table_type = 'table_type' AND _subquery.table_name = 'table_name'", "Instructions": { @@ -814,8 +599,7 @@ { "comment": "subquery of information_schema with itself", "query": "select TABLES.CHECKSUM from information_schema.`TABLES` where `TABLE_NAME` in (select `TABLE_NAME` from information_schema.`COLUMNS`)", - "v3-plan": "VT03019: column `TABLES`.`CHECKSUM` not found", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select TABLES.CHECKSUM from information_schema.`TABLES` where `TABLE_NAME` in (select `TABLE_NAME` from information_schema.`COLUMNS`)", "Instructions": { @@ -834,23 +618,7 @@ { "comment": "query trying to query two different keyspaces at the same time", "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'user' AND TABLE_SCHEMA = 'main'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'user' AND TABLE_SCHEMA = 'main'", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname /* VARCHAR */", - "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"main\")]", - "Table": "INFORMATION_SCHEMA.`TABLES`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'user' AND TABLE_SCHEMA = 'main'", "Instructions": { @@ -861,7 +629,7 @@ "Sharded": false }, "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname /* VARCHAR */", + "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ and TABLE_SCHEMA = :__vtschemaname /* VARCHAR */", "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"main\")]", "Table": "INFORMATION_SCHEMA.`TABLES`" } @@ -870,22 +638,7 @@ { "comment": "information_schema query using database() func", "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = database()", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = database()", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = database()", - "Table": "INFORMATION_SCHEMA.`TABLES`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = database()", "Instructions": { @@ -904,23 +657,7 @@ { "comment": "table_schema predicate the wrong way around", "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE 'ks' = TABLE_SCHEMA", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE 'ks' = TABLE_SCHEMA", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname /* VARCHAR */", - "SysTableTableSchema": "[VARCHAR(\"ks\")]", - "Table": "INFORMATION_SCHEMA.`TABLES`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE 'ks' = TABLE_SCHEMA", "Instructions": { @@ -940,24 +677,7 @@ { "comment": "table_name predicate against a routed table", "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' AND TABLE_NAME = 'route1'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' AND TABLE_NAME = 'route1'", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ and TABLE_NAME = :TABLE_NAME /* VARCHAR */", - "SysTableTableName": "[TABLE_NAME:VARCHAR(\"route1\")]", - "SysTableTableSchema": "[VARCHAR(\"ks\")]", - "Table": "INFORMATION_SCHEMA.`TABLES`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' AND TABLE_NAME = 'route1'", "Instructions": { @@ -998,23 +718,7 @@ { "comment": "able to isolate table_schema value even when hidden inside of ORs", "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE (TABLE_SCHEMA = 'ks' and DATA_FREE = 42) OR (TABLE_SCHEMA = 'ks' and CHECKSUM = 'value')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE (TABLE_SCHEMA = 'ks' and DATA_FREE = 42) OR (TABLE_SCHEMA = 'ks' and CHECKSUM = 'value')", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ and (DATA_FREE = 42 or `CHECKSUM` = 'value')", - "SysTableTableSchema": "[VARCHAR(\"ks\")]", - "Table": "INFORMATION_SCHEMA.`TABLES`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE (TABLE_SCHEMA = 'ks' and DATA_FREE = 42) OR (TABLE_SCHEMA = 'ks' and CHECKSUM = 'value')", "Instructions": { @@ -1034,22 +738,7 @@ { "comment": "expand star with information schema", "query": "select x.table_name from (select a.* from information_schema.key_column_usage a) x", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select x.table_name from (select a.* from information_schema.key_column_usage as a where 1 != 1) as x where 1 != 1", - "Query": "select x.table_name from (select a.* from information_schema.key_column_usage as a) as x", - "Table": "information_schema.key_column_usage" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x", "Instructions": { @@ -1068,48 +757,7 @@ { "comment": "expand star with information schema in a derived table", "query": "select x.table_name from (select a.* from information_schema.key_column_usage a) x join user on x.`COLUMN_NAME` = user.id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x join user on x.`COLUMN_NAME` = user.id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "JoinVars": { - "x_COLUMN_NAME": 1 - }, - "TableName": "information_schema.key_column_usage_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select x.table_name, x.COLUMN_NAME from (select a.* from information_schema.key_column_usage as a where 1 != 1) as x where 1 != 1", - "Query": "select x.table_name, x.COLUMN_NAME from (select a.* from information_schema.key_column_usage as a) as x", - "Table": "information_schema.key_column_usage" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user` where `user`.id = :x_COLUMN_NAME", - "Table": "`user`", - "Values": [ - ":x_COLUMN_NAME" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x join user on x.`COLUMN_NAME` = user.id", "Instructions": { @@ -1157,22 +805,7 @@ { "comment": "join of information_schema queries with select stars exprs", "query": "select a.*, b.* from information_schema.CHECK_CONSTRAINTS a, information_schema.CHARACTER_SETS b", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a.*, b.* from information_schema.CHECK_CONSTRAINTS a, information_schema.CHARACTER_SETS b", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select a.*, b.* from information_schema.CHECK_CONSTRAINTS as a, information_schema.CHARACTER_SETS as b where 1 != 1", - "Query": "select a.*, b.* from information_schema.CHECK_CONSTRAINTS as a, information_schema.CHARACTER_SETS as b", - "Table": "information_schema.CHECK_CONSTRAINTS, information_schema.CHARACTER_SETS" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a.*, b.* from information_schema.CHECK_CONSTRAINTS a, information_schema.CHARACTER_SETS b", "Instructions": { @@ -1191,23 +824,7 @@ { "comment": "join two routes with SysTableTableName entries in LHS and RHS", "query": "select a.table_name from (select * from information_schema.key_column_usage a where a.table_name = 'users') a join (select * from information_schema.referential_constraints where table_name = 'users') b", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a.table_name from (select * from information_schema.key_column_usage a where a.table_name = 'users') a join (select * from information_schema.referential_constraints where table_name = 'users') b", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select a.table_name from (select * from information_schema.key_column_usage as a where 1 != 1) as a join (select * from information_schema.referential_constraints where 1 != 1) as b where 1 != 1", - "Query": "select a.table_name from (select * from information_schema.key_column_usage as a where a.table_name = :a_table_name /* VARCHAR */) as a join (select * from information_schema.referential_constraints where table_name = :table_name /* VARCHAR */) as b", - "SysTableTableName": "[a_table_name:VARCHAR(\"users\"), table_name:VARCHAR(\"users\")]", - "Table": "information_schema.key_column_usage, information_schema.referential_constraints" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a.table_name from (select * from information_schema.key_column_usage a where a.table_name = 'users') a join (select * from information_schema.referential_constraints where table_name = 'users') b", "Instructions": { @@ -1227,35 +844,13 @@ { "comment": "select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t", "query": "select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t", "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select sum(found) from (select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)) as t where 1 != 1", - "Query": "select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */ union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname /* VARCHAR */ limit 1)) as t", - "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"music\")]", - "Table": "information_schema.`tables`" - } - } - }, - { - "comment": "union as a derived table", - "query": "select found from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select found from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t", - "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum(0) AS sum(found)", "Inputs": [ { "OperatorType": "Concatenate", @@ -1288,28 +883,49 @@ } ] } - }, - "gen4-plan": { + } + }, + { + "comment": "union as a derived table", + "query": "select found from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t", + "plan": { "QueryType": "SELECT", "Original": "select found from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t", "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select found from (select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)) as t where 1 != 1", - "Query": "select found from (select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */ union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname /* VARCHAR */ limit 1)) as t", - "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"music\")]", - "Table": "information_schema.`tables`" + "OperatorType": "Concatenate", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "DBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1", + "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */", + "SysTableTableSchema": "[VARCHAR(\"music\")]", + "Table": "information_schema.`tables`" + }, + { + "OperatorType": "Route", + "Variant": "DBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select 1 as found from information_schema.views where 1 != 1", + "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname /* VARCHAR */ limit 1", + "SysTableTableSchema": "[VARCHAR(\"music\")]", + "Table": "information_schema.views" + } + ] } } }, { "comment": "merge system schema queries as long as they have any same table_schema", "query": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)", "Instructions": { @@ -1323,7 +939,7 @@ "Sharded": false }, "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1", - "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */", + "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */ and table_schema = :__vtschemaname /* VARCHAR */", "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\")]", "Table": "information_schema.`tables`" }, @@ -1335,34 +951,18 @@ "Sharded": false }, "FieldQuery": "select 1 as found from information_schema.views where 1 != 1", - "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname /* VARCHAR */ limit 1", + "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname /* VARCHAR */ and table_schema = :__vtschemaname /* VARCHAR */ limit 1", "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"user\")]", "Table": "information_schema.views" } ] } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)", - "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */ union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname /* VARCHAR */ limit 1)", - "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\"), VARCHAR(\"music\"), VARCHAR(\"user\")]", - "Table": "information_schema.`tables`" - } } }, { "comment": "merge system schema queries as long as they have any same table_name", "query": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)", "Instructions": { @@ -1376,7 +976,7 @@ "Sharded": false }, "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1", - "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */", + "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */ and table_schema = :__vtschemaname /* VARCHAR */", "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\")]", "Table": "information_schema.`tables`" }, @@ -1388,45 +988,29 @@ "Sharded": false }, "FieldQuery": "select 1 as found from information_schema.views where 1 != 1", - "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname /* VARCHAR */ limit 1", + "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname /* VARCHAR */ and table_schema = :__vtschemaname /* VARCHAR */ limit 1", "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"user\")]", "Table": "information_schema.views" } ] } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)", - "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */ union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname /* VARCHAR */ limit 1)", - "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\"), VARCHAR(\"music\"), VARCHAR(\"user\")]", - "Table": "information_schema.`tables`" - } } }, { "comment": "merge union subquery with outer query referencing the same system schemas", "query": "select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' and exists (select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' union all (select 1 as found from information_schema.views where table_name = 'music' and table_name = 'user' limit 1))", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' and exists (select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' union all (select 1 as found from information_schema.views where table_name = 'music' and table_name = 'user' limit 1))", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutExists", "PulloutVars": [ - "__sq_has_values1", - "__sq1" + "__sq_has_values" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Concatenate", "Inputs": [ { @@ -1437,8 +1021,8 @@ "Sharded": false }, "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1", - "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name2 /* VARCHAR */ and table_name = :table_name3 /* VARCHAR */", - "SysTableTableName": "[table_name2:VARCHAR(\"music\"), table_name3:VARCHAR(\"Music\")]", + "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name1 /* VARCHAR */ and table_name = :table_name1 /* VARCHAR */", + "SysTableTableName": "[table_name1:VARCHAR(\"Music\")]", "Table": "information_schema.`tables`" }, { @@ -1449,13 +1033,14 @@ "Sharded": false }, "FieldQuery": "select 1 as found from information_schema.views where 1 != 1", - "Query": "select 1 as found from information_schema.views where table_name = :table_name4 /* VARCHAR */ and table_name = :table_name5 /* VARCHAR */ limit 1", - "SysTableTableName": "[table_name4:VARCHAR(\"music\"), table_name5:VARCHAR(\"user\")]", + "Query": "select 1 as found from information_schema.views where table_name = :table_name2 /* VARCHAR */ and table_name = :table_name2 /* VARCHAR */ limit 1", + "SysTableTableName": "[table_name2:VARCHAR(\"user\")]", "Table": "information_schema.views" } ] }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "DBA", "Keyspace": { @@ -1463,111 +1048,78 @@ "Sharded": false }, "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1", - "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name /* VARCHAR */ and table_name = :table_name1 /* VARCHAR */ and :__sq_has_values1", - "SysTableTableName": "[table_name1:VARCHAR(\"Music\"), table_name:VARCHAR(\"music\")]", + "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name /* VARCHAR */ and table_name = :table_name /* VARCHAR */ and :__sq_has_values", + "SysTableTableName": "[table_name:VARCHAR(\"Music\")]", "Table": "information_schema.`tables`" } ] } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' and exists (select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' union all (select 1 as found from information_schema.views where table_name = 'music' and table_name = 'user' limit 1))", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1", - "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name /* VARCHAR */ and table_name = :table_name1 /* VARCHAR */ and exists (select 1 as found from information_schema.`tables` where table_name = :table_name2 /* VARCHAR */ and table_name = :table_name3 /* VARCHAR */ union all (select 1 as found from information_schema.views where table_name = :table_name4 /* VARCHAR */ and table_name = :table_name5 /* VARCHAR */ limit 1))", - "SysTableTableName": "[table_name1:VARCHAR(\"Music\"), table_name2:VARCHAR(\"music\"), table_name3:VARCHAR(\"Music\"), table_name4:VARCHAR(\"music\"), table_name5:VARCHAR(\"user\"), table_name:VARCHAR(\"music\")]", - "Table": "information_schema.`tables`" - } } }, { "comment": "merge even one side have schema name in derived table", "query": "select * from (select TABLE_NAME from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select TABLE_NAME from information_schema.columns) dt", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from (select TABLE_NAME from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select TABLE_NAME from information_schema.columns) dt", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 + "OperatorType": "Distinct", + "Collations": [ + "0: utf8mb4_0900_ai_ci" ], "Inputs": [ { - "OperatorType": "Distinct", + "OperatorType": "Concatenate", "Inputs": [ { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select TABLE_NAME from information_schema.`tables` as t where 1 != 1", - "Query": "select TABLE_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname /* VARCHAR */", - "SysTableTableSchema": "[VARCHAR(\"a\")]", - "Table": "information_schema.`tables`" - }, - { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select TABLE_NAME from information_schema.`columns` where 1 != 1", - "Query": "select TABLE_NAME from information_schema.`columns`", - "Table": "information_schema.`columns`" - } - ] + "OperatorType": "Route", + "Variant": "DBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select TABLE_NAME from information_schema.`tables` as t where 1 != 1", + "Query": "select distinct TABLE_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname /* VARCHAR */", + "SysTableTableSchema": "[VARCHAR(\"a\")]", + "Table": "information_schema.`tables`" + }, + { + "OperatorType": "Route", + "Variant": "DBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select TABLE_NAME from information_schema.`columns` where 1 != 1", + "Query": "select distinct TABLE_NAME from information_schema.`columns`", + "Table": "information_schema.`columns`" } ] } ] } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select * from (select TABLE_NAME from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select TABLE_NAME from information_schema.columns) dt", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select dt.TABLE_NAME from (select TABLE_NAME from information_schema.`tables` as t where 1 != 1 union select TABLE_NAME from information_schema.`columns` where 1 != 1) as dt where 1 != 1", - "Query": "select dt.TABLE_NAME from (select TABLE_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ union select TABLE_NAME from information_schema.`columns`) as dt", - "SysTableTableSchema": "[VARCHAR(\"a\")]", - "Table": "information_schema.`tables`" - } } }, { "comment": "merge even one side have schema name in subquery", "query": "select `COLLATION_NAME` from information_schema.`COLUMNS` t where `COLUMN_NAME` in (select `COLUMN_NAME` from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select `COLUMN_NAME` from information_schema.columns)", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select `COLLATION_NAME` from information_schema.`COLUMNS` t where `COLUMN_NAME` in (select `COLUMN_NAME` from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select `COLUMN_NAME` from information_schema.columns)", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutIn", "PulloutVars": [ - "__sq_has_values1", + "__sq_has_values", "__sq1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Distinct", + "Collations": [ + "0: utf8mb4_0900_ai_ci" + ], "Inputs": [ { "OperatorType": "Concatenate", @@ -1579,8 +1131,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select COLUMN_NAME from information_schema.`tables` as t where 1 != 1", - "Query": "select COLUMN_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname /* VARCHAR */", + "FieldQuery": "select :COLUMN_NAME from information_schema.`tables` as t where 1 != 1", + "Query": "select distinct :COLUMN_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname /* VARCHAR */", "SysTableTableSchema": "[VARCHAR(\"a\")]", "Table": "information_schema.`tables`" }, @@ -1592,7 +1144,7 @@ "Sharded": false }, "FieldQuery": "select COLUMN_NAME from information_schema.`columns` where 1 != 1", - "Query": "select COLUMN_NAME from information_schema.`columns`", + "Query": "select distinct COLUMN_NAME from information_schema.`columns`", "Table": "information_schema.`columns`" } ] @@ -1600,6 +1152,7 @@ ] }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "DBA", "Keyspace": { @@ -1607,48 +1160,17 @@ "Sharded": false }, "FieldQuery": "select COLLATION_NAME from information_schema.`COLUMNS` as t where 1 != 1", - "Query": "select COLLATION_NAME from information_schema.`COLUMNS` as t where :__sq_has_values1 = 1 and COLUMN_NAME in ::__sq1", + "Query": "select COLLATION_NAME from information_schema.`COLUMNS` as t where :__sq_has_values and COLUMN_NAME in ::__sq1", "Table": "information_schema.`COLUMNS`" } ] } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select `COLLATION_NAME` from information_schema.`COLUMNS` t where `COLUMN_NAME` in (select `COLUMN_NAME` from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select `COLUMN_NAME` from information_schema.columns)", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select COLLATION_NAME from information_schema.`COLUMNS` as t where 1 != 1", - "Query": "select COLLATION_NAME from information_schema.`COLUMNS` as t where COLUMN_NAME in (select COLUMN_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ union select COLUMN_NAME from information_schema.`columns`)", - "SysTableTableSchema": "[VARCHAR(\"a\")]", - "Table": "information_schema.`COLUMNS`" - } } }, { "comment": "table_schema OR predicate\n# It is unsupported because we do not route queries to multiple keyspaces right now", "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' OR TABLE_SCHEMA = 'main'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' OR TABLE_SCHEMA = 'main'", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = 'ks' or TABLE_SCHEMA = 'main'", - "Table": "INFORMATION_SCHEMA.`TABLES`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' OR TABLE_SCHEMA = 'main'", "Instructions": { diff --git a/go/vt/vtgate/planbuilder/testdata/large_cases.json b/go/vt/vtgate/planbuilder/testdata/large_cases.json index 4b2fae633ab..43adc1f5343 100644 --- a/go/vt/vtgate/planbuilder/testdata/large_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/large_cases.json @@ -2,191 +2,7 @@ { "comment": "select user.id from user, user_extra, user_metadata, music, unsharded, unsharded_a, unsharded_b, unsharded_auto, music_extra where user.id = user_extra.user_id and user_metadata.user_id = user_extra.user_id and music.id = music_extra.music_id and unsharded.x = unsharded_a.y", "query": "select user.id from user, user_extra, user_metadata, music, unsharded, unsharded_a, unsharded_b, unsharded_auto, music_extra where user.id = user_extra.user_id and user_metadata.user_id = user_extra.user_id and music.id = music_extra.music_id and unsharded.x = unsharded_a.y", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.id from user, user_extra, user_metadata, music, unsharded, unsharded_a, unsharded_b, unsharded_auto, music_extra where user.id = user_extra.user_id and user_metadata.user_id = user_extra.user_id and music.id = music_extra.music_id and unsharded.x = unsharded_a.y", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "JoinVars": { - "user_id": 0 - }, - "TableName": "`user`_user_extra_user_metadata_music_unsharded_unsharded_a_unsharded_b_unsharded_auto_music_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id from `user` where 1 != 1", - "Query": "select `user`.id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Join", - "Variant": "Join", - "JoinVars": { - "user_extra_user_id": 0 - }, - "TableName": "user_extra_user_metadata_music_unsharded_unsharded_a_unsharded_b_unsharded_auto_music_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.user_id from user_extra where 1 != 1", - "Query": "select user_extra.user_id from user_extra where user_extra.user_id = :user_id", - "Table": "user_extra", - "Values": [ - ":user_id" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Join", - "Variant": "Join", - "TableName": "user_metadata_music_unsharded_unsharded_a_unsharded_b_unsharded_auto_music_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_metadata where 1 != 1", - "Query": "select 1 from user_metadata where user_metadata.user_id = :user_extra_user_id", - "Table": "user_metadata", - "Values": [ - ":user_extra_user_id" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Join", - "Variant": "Join", - "JoinVars": { - "music_id": 0 - }, - "TableName": "music_unsharded_unsharded_a_unsharded_b_unsharded_auto_music_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music", - "Table": "music" - }, - { - "OperatorType": "Join", - "Variant": "Join", - "JoinVars": { - "unsharded_x": 0 - }, - "TableName": "unsharded_unsharded_a_unsharded_b_unsharded_auto_music_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded.x from unsharded where 1 != 1", - "Query": "select unsharded.x from unsharded", - "Table": "unsharded" - }, - { - "OperatorType": "Join", - "Variant": "Join", - "TableName": "unsharded_a_unsharded_b_unsharded_auto_music_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from unsharded_a where 1 != 1", - "Query": "select 1 from unsharded_a where unsharded_a.y = :unsharded_x", - "Table": "unsharded_a" - }, - { - "OperatorType": "Join", - "Variant": "Join", - "TableName": "unsharded_b_unsharded_auto_music_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from unsharded_b where 1 != 1", - "Query": "select 1 from unsharded_b", - "Table": "unsharded_b" - }, - { - "OperatorType": "Join", - "Variant": "Join", - "TableName": "unsharded_auto_music_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from unsharded_auto where 1 != 1", - "Query": "select 1 from unsharded_auto", - "Table": "unsharded_auto" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from music_extra where 1 != 1", - "Query": "select 1 from music_extra where music_extra.music_id = :music_id", - "Table": "music_extra", - "Values": [ - ":music_id" - ], - "Vindex": "music_user_map" - } - ] - } - ] - } - ] - } - ] - } - ] - } - ] - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.id from user, user_extra, user_metadata, music, unsharded, unsharded_a, unsharded_b, unsharded_auto, music_extra where user.id = user_extra.user_id and user_metadata.user_id = user_extra.user_id and music.id = music_extra.music_id and unsharded.x = unsharded_a.y", "Instructions": { diff --git a/go/vt/vtgate/planbuilder/testdata/large_union_cases.json b/go/vt/vtgate/planbuilder/testdata/large_union_cases.json index 9120e39bfd6..1ad7b33d589 100644 --- a/go/vt/vtgate/planbuilder/testdata/large_union_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/large_union_cases.json @@ -2,1626 +2,7 @@ { "comment": "this testcase breaks goland, so it lives on its own file", "query": "(SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270698330 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270699497 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270703806 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270707364 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270714657 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270721330 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270812079 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271011532 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271034164 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271034177 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271066849 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271098740 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271355000 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271639345 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271914117 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271924504 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272086055 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272127855 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272191137 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272468271 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270637436 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270644941 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270650576 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270652906 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270660650 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270670201 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270698330 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270699497 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270707364 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271799956 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271914117 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270637436 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271799956 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270637436 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271639345 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270644941 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270649256 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270653671 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270670201 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270717223 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270720898 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270982590 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271346411 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271352121 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271354908 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271367516 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271472522 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271607757 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271639345 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271821733 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271914117 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272068709 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272127855 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272191137 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272244005 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272468271 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270982590 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271607757 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270982590 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271607757 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272244005 ORDER BY created_at ASC, id ASC LIMIT 11)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "(SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270698330 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270699497 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270703806 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270707364 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270714657 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270721330 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270812079 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271011532 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271034164 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271034177 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271066849 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271098740 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271355000 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271639345 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271914117 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271924504 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272086055 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272127855 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272191137 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272468271 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270637436 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270644941 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270650576 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270652906 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270660650 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270670201 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270698330 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270699497 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270707364 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271799956 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271914117 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270637436 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271799956 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270637436 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271639345 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270644941 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270649256 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270653671 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270670201 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270717223 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270720898 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270982590 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271346411 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271352121 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271354908 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271367516 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271472522 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271607757 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271639345 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271821733 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271914117 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272068709 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272127855 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272191137 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272244005 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272468271 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270982590 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271607757 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270982590 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271607757 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272244005 ORDER BY created_at ASC, id ASC LIMIT 11)", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270698330 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270698330)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270699497 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270699497)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270703806 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270703806)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270707364 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270707364)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270714657 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270714657)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270721330 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270721330)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270812079 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270812079)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271011532 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271011532)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271034164 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271034164)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271034177 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271034177)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271066849 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271066849)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271098740 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271098740)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271355000 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271355000)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271639345 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271639345)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271914117 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271914117)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271924504 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271924504)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1272086055 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1272086055)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1272127855 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1272127855)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1272191137 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1272191137)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1272468271 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1272468271)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270637436 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270637436)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270644941 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270644941)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270650576 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270650576)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270652906 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270652906)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270660650 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270660650)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270670201 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270670201)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270698330 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270698330)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270699497 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270699497)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270707364 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270707364)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271365691 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271365691)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271799956 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271799956)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271914117 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271914117)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270637436 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270637436)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271799956 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271799956)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270637436 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270637436)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271639345 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271639345)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270644941 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270644941)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270649256 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270649256)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270653671 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270653671)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270670201 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270670201)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270717223 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270717223)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270720898 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270720898)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270982590 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270982590)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271346411 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271346411)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271352121 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271352121)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271354908 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271354908)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271365691 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271365691)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271367516 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271367516)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271472522 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271472522)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271607757 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271607757)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271639345 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271639345)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271821733 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271821733)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271914117 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271914117)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1272068709 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1272068709)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1272127855 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1272127855)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1272191137 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1272191137)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1272244005 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1272244005)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1272468271 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1272468271)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270982590 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270982590)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271365691 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271365691)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271607757 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271607757)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270982590 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270982590)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271365691 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271365691)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271607757 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271607757)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1272244005 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1272244005)" - ], - "Vindex": "user_index" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "(SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270698330 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270699497 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270703806 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270707364 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270714657 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270721330 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270812079 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271011532 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271034164 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271034177 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271066849 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271098740 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271355000 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271639345 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271914117 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271924504 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272086055 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272127855 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272191137 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272468271 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270637436 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270644941 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270650576 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270652906 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270660650 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270670201 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270698330 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270699497 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270707364 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271799956 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271914117 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270637436 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271799956 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270637436 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271639345 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270644941 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270649256 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270653671 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270670201 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270717223 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270720898 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270982590 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271346411 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271352121 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271354908 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271367516 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271472522 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271607757 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271639345 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271821733 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271914117 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272068709 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272127855 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272191137 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272244005 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272468271 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270982590 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271607757 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270982590 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271607757 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272244005 ORDER BY created_at ASC, id ASC LIMIT 11)", "Instructions": { @@ -1642,8 +23,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "(select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1) union (select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1)", - "Query": "(select content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270698330 order by created_at asc, id asc limit 11) union (select content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270698330 order by created_at asc, id asc limit 11)", + "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from ((select content, user_id from music where 1 != 1) union (select content, user_id from music where 1 != 1)) as dt where 1 != 1", + "Query": "select content, user_id, weight_string(content), weight_string(user_id) from ((select content, user_id from music where user_id = 1270698330 order by created_at asc, id asc limit 11) union (select content, user_id from music where user_id = 1270698330 order by created_at asc, id asc limit 11)) as dt", "Table": "music", "Values": [ "INT64(1270698330)" @@ -1657,8 +38,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "(select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1) union (select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1)", - "Query": "(select content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270699497 order by created_at asc, id asc limit 11) union (select content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270699497 order by created_at asc, id asc limit 11)", + "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from ((select content, user_id from music where 1 != 1) union (select content, user_id from music where 1 != 1)) as dt where 1 != 1", + "Query": "select content, user_id, weight_string(content), weight_string(user_id) from ((select content, user_id from music where user_id = 1270699497 order by created_at asc, id asc limit 11) union (select content, user_id from music where user_id = 1270699497 order by created_at asc, id asc limit 11)) as dt", "Table": "music", "Values": [ "INT64(1270699497)" diff --git a/go/vt/vtgate/planbuilder/testdata/lock_cases.json b/go/vt/vtgate/planbuilder/testdata/lock_cases.json index 98ffa9d1bb9..c14ba026869 100644 --- a/go/vt/vtgate/planbuilder/testdata/lock_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/lock_cases.json @@ -2,23 +2,7 @@ { "comment": "get_lock from dual", "query": "select get_lock('xyz', 10) from dual", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select get_lock('xyz', 10) from dual", - "Instructions": { - "OperatorType": "Lock", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "TargetDestination": "KeyspaceID(00)", - "FieldQuery": "select get_lock('xyz', 10) from dual where 1 != 1", - "lock_func": [ - "get_lock('xyz', 10)" - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select get_lock('xyz', 10) from dual", "Instructions": { @@ -41,23 +25,7 @@ { "comment": "is_free_lock from dual", "query": "select is_free_lock('xyz') from dual", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select is_free_lock('xyz') from dual", - "Instructions": { - "OperatorType": "Lock", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "TargetDestination": "KeyspaceID(00)", - "FieldQuery": "select is_free_lock('xyz') from dual where 1 != 1", - "lock_func": [ - "is_free_lock('xyz')" - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select is_free_lock('xyz') from dual", "Instructions": { @@ -80,23 +48,7 @@ { "comment": "get_lock from dual prepare query", "query": "select get_lock(?, ?)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select get_lock(?, ?)", - "Instructions": { - "OperatorType": "Lock", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "TargetDestination": "KeyspaceID(00)", - "FieldQuery": "select get_lock(:v1, :v2) from dual where 1 != 1", - "lock_func": [ - "get_lock(:v1, :v2)" - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select get_lock(?, ?)", "Instructions": { @@ -152,24 +104,7 @@ { "comment": "multiple lock functions", "query": "select get_lock('xyz', 10), is_free_lock('abc') from dual", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select get_lock('xyz', 10), is_free_lock('abc') from dual", - "Instructions": { - "OperatorType": "Lock", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "TargetDestination": "KeyspaceID(00)", - "FieldQuery": "select get_lock('xyz', 10), is_free_lock('abc') from dual where 1 != 1", - "lock_func": [ - "get_lock('xyz', 10)", - "is_free_lock('abc')" - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select get_lock('xyz', 10), is_free_lock('abc') from dual", "Instructions": { diff --git a/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.json b/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.json index 52f5ce2dd2a..58e6744f1a6 100644 --- a/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.json @@ -2,54 +2,20 @@ { "comment": "Test cases in this file follow the code in memory_sort.go.\n# scatter aggregate order by references ungrouped column", "query": "select a, b, count(*) from user group by a order by b", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a, b, count(*) from user group by a order by b", "Instructions": { "OperatorType": "Sort", "Variant": "Memory", - "OrderBy": "(1|3) ASC", + "OrderBy": "(1|4) ASC", "ResultColumns": 3, "Inputs": [ { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "sum_count(2) AS count", - "GroupBy": "0", - "ResultColumns": 4, - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, b, count(*), weight_string(b), weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)", - "OrderBy": "(0|4) ASC", - "Query": "select a, b, count(*), weight_string(b), weight_string(a) from `user` group by a, weight_string(a) order by a asc", - "ResultColumns": 4, - "Table": "`user`" - } - ] - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select a, b, count(*) from user group by a order by b", - "Instructions": { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "(1|3) ASC", - "ResultColumns": 3, - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "random(1) AS b, sum_count_star(2) AS count(*), random(3)", - "GroupBy": "(0|4)", + "Aggregates": "any_value(1) AS b, sum_count_star(2) AS count(*), any_value(4)", + "GroupBy": "(0|3)", "Inputs": [ { "OperatorType": "Route", @@ -58,9 +24,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, b, count(*), weight_string(b), weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)", - "OrderBy": "(0|4) ASC", - "Query": "select a, b, count(*), weight_string(b), weight_string(a) from `user` group by a, weight_string(a) order by a asc", + "FieldQuery": "select a, b, count(*), weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, weight_string(a)", + "OrderBy": "(0|3) ASC", + "Query": "select a, b, count(*), weight_string(a), weight_string(b) from `user` group by a, weight_string(a) order by a asc", "Table": "`user`" } ] @@ -75,39 +41,7 @@ { "comment": "scatter aggregate order by references aggregate expression", "query": "select a, b, count(*) k from user group by a order by k", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a, b, count(*) k from user group by a order by k", - "Instructions": { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "2 ASC", - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(2) AS count", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)", - "OrderBy": "(0|3) ASC", - "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by a asc", - "ResultColumns": 3, - "Table": "`user`" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a, b, count(*) k from user group by a order by k", "Instructions": { @@ -119,7 +53,7 @@ { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "random(1) AS b, sum_count_star(2) AS k", + "Aggregates": "any_value(1) AS b, sum_count_star(2) AS k", "GroupBy": "(0|3)", "Inputs": [ { @@ -146,54 +80,20 @@ { "comment": "select a, b, count(*) k from user group by a order by b, a, k", "query": "select a, b, count(*) k from user group by a order by b, a, k", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a, b, count(*) k from user group by a order by b, a, k", - "Instructions": { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "(1|3) ASC, (0|4) ASC, 2 ASC", - "ResultColumns": 3, - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(2) AS count", - "GroupBy": "0", - "ResultColumns": 5, - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, b, count(*) as k, weight_string(b), weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)", - "OrderBy": "(0|4) ASC", - "Query": "select a, b, count(*) as k, weight_string(b), weight_string(a) from `user` group by a, weight_string(a) order by a asc", - "ResultColumns": 5, - "Table": "`user`" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a, b, count(*) k from user group by a order by b, a, k", "Instructions": { "OperatorType": "Sort", "Variant": "Memory", - "OrderBy": "(1|3) ASC, (0|4) ASC, 2 ASC", + "OrderBy": "(1|4) ASC, (0|3) ASC, 2 ASC", "ResultColumns": 3, "Inputs": [ { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "random(1) AS b, sum_count_star(2) AS k, random(3)", - "GroupBy": "(0|4)", + "Aggregates": "any_value(1) AS b, sum_count_star(2) AS k, any_value(4)", + "GroupBy": "(0|3)", "Inputs": [ { "OperatorType": "Route", @@ -202,9 +102,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, b, count(*) as k, weight_string(b), weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)", - "OrderBy": "(0|4) ASC", - "Query": "select a, b, count(*) as k, weight_string(b), weight_string(a) from `user` group by a, weight_string(a) order by a asc", + "FieldQuery": "select a, b, count(*) as k, weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, weight_string(a)", + "OrderBy": "(0|3) ASC", + "Query": "select a, b, count(*) as k, weight_string(a), weight_string(b) from `user` group by a, weight_string(a) order by a asc", "Table": "`user`" } ] @@ -219,45 +119,7 @@ { "comment": "scatter aggregate with memory sort and limit", "query": "select a, b, count(*) k from user group by a order by k desc limit 10", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a, b, count(*) k from user group by a order by k desc limit 10", - "Instructions": { - "OperatorType": "Limit", - "Count": "INT64(10)", - "Inputs": [ - { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "2 DESC", - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(2) AS count", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)", - "OrderBy": "(0|3) ASC", - "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by a asc", - "ResultColumns": 3, - "Table": "`user`" - } - ] - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a, b, count(*) k from user group by a order by k desc limit 10", "Instructions": { @@ -273,7 +135,7 @@ { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "random(1) AS b, sum_count_star(2) AS k", + "Aggregates": "any_value(1) AS b, sum_count_star(2) AS k", "GroupBy": "(0|3)", "Inputs": [ { @@ -302,41 +164,7 @@ { "comment": "scatter aggregate with memory sort and order by number", "query": "select a, b, count(*) k from user group by a order by 1,3", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a, b, count(*) k from user group by a order by 1,3", - "Instructions": { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "(0|3) ASC, 2 ASC", - "ResultColumns": 3, - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(2) AS count", - "GroupBy": "0", - "ResultColumns": 4, - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)", - "OrderBy": "(0|3) ASC", - "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by 1 asc", - "ResultColumns": 4, - "Table": "`user`" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a, b, count(*) k from user group by a order by 1,3", "Instructions": { @@ -348,7 +176,7 @@ { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "random(1) AS b, sum_count_star(2) AS k", + "Aggregates": "any_value(1) AS b, sum_count_star(2) AS k", "GroupBy": "(0|3)", "Inputs": [ { @@ -375,41 +203,7 @@ { "comment": "scatter aggregate with memory sort and order by number, reuse weight_string\n# we have to use a meaningless construct to test this", "query": "select textcol1 as t, count(*) k from user group by textcol1 order by textcol1, k, textcol1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select textcol1 as t, count(*) k from user group by textcol1 order by textcol1, k, textcol1", - "Instructions": { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "(0|2) ASC, 1 ASC, (0|2) ASC", - "ResultColumns": 2, - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(1) AS count", - "GroupBy": "2", - "ResultColumns": 3, - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select textcol1 as t, count(*) as k, weight_string(textcol1) from `user` where 1 != 1 group by textcol1, weight_string(textcol1)", - "OrderBy": "(0|2) ASC, (0|2) ASC", - "Query": "select textcol1 as t, count(*) as k, weight_string(textcol1) from `user` group by textcol1, weight_string(textcol1) order by textcol1 asc, textcol1 asc", - "ResultColumns": 3, - "Table": "`user`" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select textcol1 as t, count(*) k from user group by textcol1 order by textcol1, k, textcol1", "Instructions": { @@ -447,94 +241,37 @@ { "comment": "order by on a cross-shard derived table", "query": "select id from (select user.id, user.col from user join user_extra) as t order by id", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from (select user.id, user.col from user join user_extra) as t order by id", "Instructions": { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "(0|2) ASC", - "ResultColumns": 1, + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0", + "TableName": "`user`_user_extra", "Inputs": [ { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1,L:2", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, `user`.col, weight_string(`user`.id) from `user` where 1 != 1", - "Query": "select `user`.id, `user`.col, weight_string(`user`.id) from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] - } - ] - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select id from (select user.id, user.col from user join user_extra) as t order by id", - "Instructions": { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "(0|1) ASC", - "ResultColumns": 1, - "Inputs": [ + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id, weight_string(id) from (select `user`.id, `user`.col from `user` where 1 != 1) as t where 1 != 1", + "OrderBy": "(0|1) ASC", + "Query": "select id, weight_string(id) from (select `user`.id, `user`.col from `user`) as t order by id asc", + "Table": "`user`" + }, { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, weight_string(id) from (select `user`.id, `user`.col from `user` where 1 != 1) as t where 1 != 1", - "Query": "select id, weight_string(id) from (select `user`.id, `user`.col from `user`) as t", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from user_extra where 1 != 1", + "Query": "select 1 from user_extra", + "Table": "user_extra" } ] }, @@ -547,60 +284,7 @@ { "comment": "order by on a cross-shard query. Note: this happens only when an order by column is from the second table", "query": "select user.col1 as a, user.col2 b, music.col3 c from user, music where user.id = music.id and user.id = 1 order by c", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col1 as a, user.col2 b, music.col3 c from user, music where user.id = music.id and user.id = 1 order by c", - "Instructions": { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "(2|3) ASC", - "ResultColumns": 3, - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1,R:0,R:1", - "JoinVars": { - "user_id": 2 - }, - "TableName": "`user`_music", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col1 as a, `user`.col2 as b, `user`.id from `user` where 1 != 1", - "Query": "select `user`.col1 as a, `user`.col2 as b, `user`.id from `user` where `user`.id = 1", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.col3 as c, weight_string(music.col3) from music where 1 != 1", - "Query": "select music.col3 as c, weight_string(music.col3) from music where music.id = :user_id", - "Table": "music", - "Values": [ - ":user_id" - ], - "Vindex": "music_user_map" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col1 as a, user.col2 b, music.col3 c from user, music where user.id = music.id and user.id = 1 order by c", "Instructions": { @@ -661,60 +345,7 @@ { "comment": "Order by for join, with mixed cross-shard ordering", "query": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by 1 asc, 3 desc, 2 asc", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by 1 asc, 3 desc, 2 asc", - "Instructions": { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "(0|3) ASC, (2|4) DESC, (1|5) ASC", - "ResultColumns": 3, - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1,R:0,L:2,R:1,L:3", - "JoinVars": { - "user_id": 4 - }, - "TableName": "`user`_music", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col1 as a, `user`.col2, weight_string(`user`.col1), weight_string(`user`.col2), `user`.id from `user` where 1 != 1", - "Query": "select `user`.col1 as a, `user`.col2, weight_string(`user`.col1), weight_string(`user`.col2), `user`.id from `user` where `user`.id = 1", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.col3, weight_string(music.col3) from music where 1 != 1", - "Query": "select music.col3, weight_string(music.col3) from music where music.id = :user_id", - "Table": "music", - "Values": [ - ":user_id" - ], - "Vindex": "music_user_map" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by 1 asc, 3 desc, 2 asc", "Instructions": { @@ -775,49 +406,7 @@ { "comment": "Order by for join, on text column in LHS.", "query": "select u.a, u.textcol1, un.col2 from user u join unsharded un order by u.textcol1, un.col2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u.a, u.textcol1, un.col2 from user u join unsharded un order by u.textcol1, un.col2", - "Instructions": { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "(1|3) ASC, (2|4) ASC", - "ResultColumns": 3, - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1,R:0,L:2,R:1", - "TableName": "`user`_unsharded", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u where 1 != 1", - "Query": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select un.col2, weight_string(un.col2) from unsharded as un where 1 != 1", - "Query": "select un.col2, weight_string(un.col2) from unsharded as un", - "Table": "unsharded" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.a, u.textcol1, un.col2 from user u join unsharded un order by u.textcol1, un.col2", "Instructions": { @@ -867,49 +456,7 @@ { "comment": "Order by for join, on text column in RHS.", "query": "select u.a, u.textcol1, un.col2 from unsharded un join user u order by u.textcol1, un.col2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u.a, u.textcol1, un.col2 from unsharded un join user u order by u.textcol1, un.col2", - "Instructions": { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "(1|3) ASC, (2|4) ASC", - "ResultColumns": 3, - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0,R:1,L:0,R:2,L:1", - "TableName": "unsharded_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select un.col2, weight_string(un.col2) from unsharded as un where 1 != 1", - "Query": "select un.col2, weight_string(un.col2) from unsharded as un", - "Table": "unsharded" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u where 1 != 1", - "Query": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u", - "Table": "`user`" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.a, u.textcol1, un.col2 from unsharded un join user u order by u.textcol1, un.col2", "Instructions": { @@ -959,36 +506,7 @@ { "comment": "order by for vindex func", "query": "select id, keyspace_id, range_start, range_end from user_index where id = :id order by range_start", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id, keyspace_id, range_start, range_end from user_index where id = :id order by range_start", - "Instructions": { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "2 ASC", - "Inputs": [ - { - "OperatorType": "VindexFunc", - "Variant": "VindexMap", - "Columns": [ - 0, - 1, - 2, - 3 - ], - "Fields": { - "id": "VARBINARY", - "keyspace_id": "VARBINARY", - "range_end": "VARBINARY", - "range_start": "VARBINARY" - }, - "Value": ":id", - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id, keyspace_id, range_start, range_end from user_index where id = :id order by range_start", "Instructions": { @@ -1024,8 +542,7 @@ { "comment": "unary expression", "query": "select a from user order by binary a desc", - "v3-plan": "VT12001: unsupported: in scatter query: complex ORDER BY expression: convert(a, binary)", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a from user order by binary a desc", "Instructions": { @@ -1049,8 +566,7 @@ { "comment": "unary expression in join query", "query": "select u.a from user u join music m on u.a = m.a order by binary a desc", - "v3-plan": "VT12001: unsupported: in scatter query: complex ORDER BY expression: convert(a, binary)", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.a from user u join music m on u.a = m.a order by binary a desc", "Instructions": { @@ -1096,23 +612,7 @@ { "comment": "intcol order by", "query": "select id, intcol from user order by intcol", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id, intcol from user order by intcol", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, intcol from `user` where 1 != 1", - "OrderBy": "1 ASC", - "Query": "select id, intcol from `user` order by intcol asc", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id, intcol from user order by intcol", "Instructions": { @@ -1135,8 +635,7 @@ { "comment": "scatter order by with order by column not present", "query": "select col from user order by id", - "v3-plan": "VT12001: unsupported: in scatter query: ORDER BY must reference a column in the SELECT list: id asc", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user order by id", "Instructions": { diff --git a/go/vt/vtgate/planbuilder/testdata/misc_cases.json b/go/vt/vtgate/planbuilder/testdata/misc_cases.json index 38ea0377773..399cebe8939 100644 --- a/go/vt/vtgate/planbuilder/testdata/misc_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/misc_cases.json @@ -2,14 +2,7 @@ { "comment": "prepare statement with select", "query": "prepare prep from 'select * from user where id = ?'", - "v3-plan": { - "QueryType": "PREPARE", - "Original": "prepare prep from 'select * from user where id = ?'", - "Instructions": { - "OperatorType": "Rows" - } - }, - "gen4-plan": { + "plan": { "QueryType": "PREPARE", "Original": "prepare prep from 'select * from user where id = ?'", "Instructions": { @@ -51,14 +44,7 @@ { "comment": "prepare statement with user defined variable", "query": "prepare prep from @prep_stmt", - "v3-plan": { - "QueryType": "PREPARE", - "Original": "prepare prep from @prep_stmt", - "Instructions": { - "OperatorType": "Rows" - } - }, - "gen4-plan": { + "plan": { "QueryType": "PREPARE", "Original": "prepare prep from @prep_stmt", "Instructions": { @@ -82,34 +68,7 @@ { "comment": "execute one param statement", "query": "execute prep_one_param using @foo", - "v3-plan": { - "QueryType": "EXECUTE", - "Original": "execute prep_one_param using @foo", - "Instructions": { - "OperatorType": "EXECUTE", - "Parameters": [ - "foo" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user` where id = :v1", - "Table": "`user`", - "Values": [ - ":v1" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "EXECUTE", "Original": "execute prep_one_param using @foo", "Instructions": { @@ -143,35 +102,7 @@ { "comment": "execute in param statement", "query": "execute prep_in_param using @x, @y", - "v3-plan": { - "QueryType": "EXECUTE", - "Original": "execute prep_in_param using @x, @y", - "Instructions": { - "OperatorType": "EXECUTE", - "Parameters": [ - "x", - "y" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user` where id in ::__vals", - "Table": "`user`", - "Values": [ - "(:v1, :v2)" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "EXECUTE", "Original": "execute prep_in_param using @x, @y", "Instructions": { @@ -206,28 +137,7 @@ { "comment": "execute no param statement", "query": "execute prep_no_param", - "v3-plan": { - "QueryType": "EXECUTE", - "Original": "execute prep_no_param", - "Instructions": { - "OperatorType": "EXECUTE", - "Parameters": null, - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user`", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "EXECUTE", "Original": "execute prep_no_param", "Instructions": { @@ -270,14 +180,7 @@ { "comment": "prepare a dual query", "query": "prepare prep_dual from 'select 1+?, 10/?'", - "v3-plan": { - "QueryType": "PREPARE", - "Original": "prepare prep_dual from 'select 1+?, 10/?'", - "Instructions": { - "OperatorType": "Rows" - } - }, - "gen4-plan": { + "plan": { "QueryType": "PREPARE", "Original": "prepare prep_dual from 'select 1+?, 10/?'", "Instructions": { diff --git a/go/vt/vtgate/planbuilder/testdata/oltp_cases.json b/go/vt/vtgate/planbuilder/testdata/oltp_cases.json index 9fdf352aee7..d470250531e 100644 --- a/go/vt/vtgate/planbuilder/testdata/oltp_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/oltp_cases.json @@ -2,26 +2,7 @@ { "comment": "OLTP simple select", "query": "SELECT c FROM sbtest34 WHERE id=15", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT c FROM sbtest34 WHERE id=15", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select c from sbtest34 where 1 != 1", - "Query": "select c from sbtest34 where id = 15", - "Table": "sbtest34", - "Values": [ - "INT64(15)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT c FROM sbtest34 WHERE id=15", "Instructions": { @@ -47,22 +28,7 @@ { "comment": "OLTP simple range select", "query": "SELECT c FROM sbtest12 WHERE id BETWEEN 1 AND 10", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT c FROM sbtest12 WHERE id BETWEEN 1 AND 10", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select c from sbtest12 where 1 != 1", - "Query": "select c from sbtest12 where id between 1 and 10", - "Table": "sbtest12" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT c FROM sbtest12 WHERE id BETWEEN 1 AND 10", "Instructions": { @@ -84,29 +50,7 @@ { "comment": "OLTP sum range select", "query": "SELECT SUM(k) FROM sbtest43 WHERE id BETWEEN 90 AND 990", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT SUM(k) FROM sbtest43 WHERE id BETWEEN 90 AND 990", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum(0)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select sum(k) from sbtest43 where 1 != 1", - "Query": "select sum(k) from sbtest43 where id between 90 and 990", - "Table": "sbtest43" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT SUM(k) FROM sbtest43 WHERE id BETWEEN 90 AND 990", "Instructions": { @@ -135,24 +79,7 @@ { "comment": "OLTP order range select", "query": "SELECT c FROM sbtest1 WHERE id BETWEEN 50 AND 235 ORDER BY c", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT c FROM sbtest1 WHERE id BETWEEN 50 AND 235 ORDER BY c", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select c, weight_string(c) from sbtest1 where 1 != 1", - "OrderBy": "(0|1) ASC", - "Query": "select c, weight_string(c) from sbtest1 where id between 50 and 235 order by c asc", - "ResultColumns": 1, - "Table": "sbtest1" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT c FROM sbtest1 WHERE id BETWEEN 50 AND 235 ORDER BY c", "Instructions": { @@ -175,32 +102,7 @@ { "comment": "OLTP distinct range select", "query": "SELECT DISTINCT c FROM sbtest30 WHERE id BETWEEN 1 AND 10 ORDER BY c", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT DISTINCT c FROM sbtest30 WHERE id BETWEEN 1 AND 10 ORDER BY c", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "GroupBy": "1", - "ResultColumns": 1, - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select c, weight_string(c) from sbtest30 where 1 != 1", - "OrderBy": "(0|1) ASC", - "Query": "select distinct c, weight_string(c) from sbtest30 where id between 1 and 10 order by c asc", - "ResultColumns": 2, - "Table": "sbtest30" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT DISTINCT c FROM sbtest30 WHERE id BETWEEN 1 AND 10 ORDER BY c", "Instructions": { @@ -230,29 +132,7 @@ { "comment": "OLTP index udpate", "query": "UPDATE sbtest6 SET k=k+1 WHERE id=5", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "UPDATE sbtest6 SET k=k+1 WHERE id=5", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "Query": "update sbtest6 set k = k + 1 where id = 5", - "Table": "sbtest6", - "Values": [ - "INT64(5)" - ], - "Vindex": "hash" - }, - "TablesUsed": [ - "main.sbtest6" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "UPDATE sbtest6 SET k=k+1 WHERE id=5", "Instructions": { @@ -278,29 +158,7 @@ { "comment": "OLTP non index update", "query": "UPDATE sbtest9 SET c=7 WHERE id=8", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "UPDATE sbtest9 SET c=7 WHERE id=8", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "Query": "update sbtest9 set c = 7 where id = 8", - "Table": "sbtest9", - "Values": [ - "INT64(8)" - ], - "Vindex": "hash" - }, - "TablesUsed": [ - "main.sbtest9" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "UPDATE sbtest9 SET c=7 WHERE id=8", "Instructions": { @@ -326,29 +184,7 @@ { "comment": "OLTP delete", "query": "DELETE FROM sbtest15 WHERE id=7525", - "v3-plan": { - "QueryType": "DELETE", - "Original": "DELETE FROM sbtest15 WHERE id=7525", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Equal", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "Query": "delete from sbtest15 where id = 7525", - "Table": "sbtest15", - "Values": [ - "INT64(7525)" - ], - "Vindex": "hash" - }, - "TablesUsed": [ - "main.sbtest15" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "DELETE FROM sbtest15 WHERE id=7525", "Instructions": { @@ -396,4 +232,4 @@ ] } } -] +] \ No newline at end of file diff --git a/go/vt/vtgate/planbuilder/testdata/other_read_cases.json b/go/vt/vtgate/planbuilder/testdata/other_read_cases.json index 795e4855fb5..92c8d132eda 100644 --- a/go/vt/vtgate/planbuilder/testdata/other_read_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/other_read_cases.json @@ -36,7 +36,7 @@ "comment": "Analyze statement", "query": "analyze table t1", "plan": { - "QueryType": "OTHER", + "QueryType": "ANALYZE", "Original": "analyze table t1", "Instructions": { "OperatorType": "Send", @@ -44,10 +44,12 @@ "Name": "main", "Sharded": false }, - "TargetDestination": "AnyShard()", - "Query": "analyze table t1", - "SingleShardOnly": true - } + "TargetDestination": "AllShards()", + "Query": "analyze table t1" + }, + "TablesUsed": [ + "t1" + ] } }, { diff --git a/go/vt/vtgate/planbuilder/testdata/postprocess_cases.json b/go/vt/vtgate/planbuilder/testdata/postprocess_cases.json index 6b80ba8862b..28503204b9f 100644 --- a/go/vt/vtgate/planbuilder/testdata/postprocess_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/postprocess_cases.json @@ -2,22 +2,7 @@ { "comment": "HAVING implicitly references table col", "query": "select user.col1 from user having col2 = 2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col1 from user having col2 = 2", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col1 from `user` where 1 != 1", - "Query": "select `user`.col1 from `user` having col2 = 2", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col1 from user having col2 = 2", "Instructions": { @@ -39,47 +24,12 @@ { "comment": "ambiguous symbol reference", "query": "select user.col1, user_extra.col1 from user join user_extra having col1 = 2", - "v3-plan": "VT03021: ambiguous column reference: col1", - "gen4-plan": "Column 'col1' in field list is ambiguous" + "plan": "Column 'col1' in field list is ambiguous" }, { "comment": "TODO: this should be 'Column 'col1' in having clause is ambiguous'\n# non-ambiguous symbol reference", "query": "select user.col1, user_extra.col1 from user join user_extra having user_extra.col1 = 2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col1, user_extra.col1 from user join user_extra having user_extra.col1 = 2", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col1 from `user` where 1 != 1", - "Query": "select `user`.col1 from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.col1 from user_extra where 1 != 1", - "Query": "select user_extra.col1 from user_extra having user_extra.col1 = 2", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col1, user_extra.col1 from user join user_extra having user_extra.col1 = 2", "Instructions": { @@ -121,41 +71,7 @@ { "comment": "HAVING multi-route", "query": "select user.col1 as a, user.col2, user_extra.col3 from user join user_extra having 1 = 1 and a = 1 and a = user.col2 and user_extra.col3 = 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col1 as a, user.col2, user_extra.col3 from user join user_extra having 1 = 1 and a = 1 and a = user.col2 and user_extra.col3 = 1", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1,R:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col1 as a, `user`.col2 from `user` where 1 != 1", - "Query": "select `user`.col1 as a, `user`.col2 from `user` having 1 = 1 and a = 1 and a = `user`.col2", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.col3 from user_extra where 1 != 1", - "Query": "select user_extra.col3 from user_extra having user_extra.col3 = 1", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col1 as a, user.col2, user_extra.col3 from user join user_extra having 1 = 1 and a = 1 and a = user.col2 and user_extra.col3 = 1", "Instructions": { @@ -197,58 +113,19 @@ { "comment": "HAVING uses subquery", "query": "select id from user having id in (select col from user)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user having id in (select col from user)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` having :__sq_has_values1 = 1 and id in ::__vals", - "Table": "`user`", - "Values": [ - "::__sq1" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user having id in (select col from user)", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutIn", "PulloutVars": [ - "__sq_has_values1", + "__sq_has_values", "__sq1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -260,6 +137,7 @@ "Table": "`user`" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "IN", "Keyspace": { @@ -267,7 +145,7 @@ "Sharded": true }, "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals", + "Query": "select id from `user` where :__sq_has_values and id in ::__vals", "Table": "`user`", "Values": [ "::__sq1" @@ -284,26 +162,7 @@ { "comment": "ORDER BY, reference col from local table.", "query": "select col from user where id = 5 order by aa", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user where id = 5 order by aa", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` where id = 5 order by aa asc", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user where id = 5 order by aa", "Instructions": { @@ -329,26 +188,7 @@ { "comment": "ORDER BY uses column numbers", "query": "select col from user where id = 1 order by 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user where id = 1 order by 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` where id = 1 order by 1 asc", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user where id = 1 order by 1", "Instructions": { @@ -374,23 +214,7 @@ { "comment": "ORDER BY on scatter", "query": "select col from user order by col", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user order by col", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "OrderBy": "0 ASC", - "Query": "select col from `user` order by col asc", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user order by col", "Instructions": { @@ -410,51 +234,10 @@ ] } }, - { - "comment": "ORDER BY on select t.*", - "query": "select t.*, t.col from user t order by t.col", - "v3-plan": "VT12001: unsupported: in scatter query, cannot ORDER BY a column that comes after `*` expressions in the SELECT list", - "gen4-plan": "VT12001: unsupported: '*' expression in cross-shard query" - }, - { - "comment": "ORDER BY on select *", - "query": "select *, col from user order by col", - "v3-plan": "VT12001: unsupported: in scatter query, cannot ORDER BY a column that comes after `*` expressions in the SELECT list", - "gen4-plan": "VT12001: unsupported: '*' expression in cross-shard query" - }, - { - "comment": "ORDER BY on select multi t.*", - "query": "select t.*, t.name, t.*, t.col from user t order by t.col", - "v3-plan": "VT12001: unsupported: in scatter query, cannot ORDER BY a column that comes after `*` expressions in the SELECT list", - "gen4-plan": "VT12001: unsupported: '*' expression in cross-shard query" - }, - { - "comment": "ORDER BY on select multi *", - "query": "select *, name, *, col from user order by col", - "v3-plan": "VT12001: unsupported: in scatter query, cannot ORDER BY a column that comes after `*` expressions in the SELECT list", - "gen4-plan": "VT12001: unsupported: '*' expression in cross-shard query" - }, { "comment": "ORDER BY works for select * from authoritative table", "query": "select * from authoritative order by user_id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from authoritative order by user_id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_id, col1, col2, weight_string(user_id) from authoritative where 1 != 1", - "OrderBy": "(0|3) ASC", - "Query": "select user_id, col1, col2, weight_string(user_id) from authoritative order by user_id asc", - "ResultColumns": 3, - "Table": "authoritative" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from authoritative order by user_id", "Instructions": { @@ -478,24 +261,7 @@ { "comment": "ORDER BY works for select * from authoritative table", "query": "select * from authoritative order by col1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from authoritative order by col1", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_id, col1, col2, weight_string(col1) from authoritative where 1 != 1", - "OrderBy": "(1|3) ASC", - "Query": "select user_id, col1, col2, weight_string(col1) from authoritative order by col1 asc", - "ResultColumns": 3, - "Table": "authoritative" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from authoritative order by col1", "Instructions": { @@ -518,24 +284,7 @@ { "comment": "ORDER BY on scatter with text column", "query": "select a, textcol1, b from user order by a, textcol1, b", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a, textcol1, b from user order by a, textcol1, b", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, textcol1, b, weight_string(a), weight_string(textcol1), weight_string(b) from `user` where 1 != 1", - "OrderBy": "(0|3) ASC, (1|4) ASC, (2|5) ASC", - "Query": "select a, textcol1, b, weight_string(a), weight_string(textcol1), weight_string(b) from `user` order by a asc, textcol1 asc, b asc", - "ResultColumns": 3, - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a, textcol1, b from user order by a, textcol1, b", "Instructions": { @@ -559,24 +308,7 @@ { "comment": "ORDER BY on scatter with text column, qualified name TODO: can plan better", "query": "select a, user.textcol1, b from user order by a, textcol1, b", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a, user.textcol1, b from user order by a, textcol1, b", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, `user`.textcol1, b, weight_string(a), weight_string(`user`.textcol1), weight_string(b) from `user` where 1 != 1", - "OrderBy": "(0|3) ASC, (1|4) ASC, (2|5) ASC", - "Query": "select a, `user`.textcol1, b, weight_string(a), weight_string(`user`.textcol1), weight_string(b) from `user` order by a asc, textcol1 asc, b asc", - "ResultColumns": 3, - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a, user.textcol1, b from user order by a, textcol1, b", "Instructions": { @@ -600,24 +332,7 @@ { "comment": "ORDER BY on scatter with multiple text columns", "query": "select a, textcol1, b, textcol2 from user order by a, textcol1, b, textcol2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a, textcol1, b, textcol2 from user order by a, textcol1, b, textcol2", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, textcol1, b, textcol2, weight_string(a), weight_string(textcol1), weight_string(b), weight_string(textcol2) from `user` where 1 != 1", - "OrderBy": "(0|4) ASC, (1|5) ASC, (2|6) ASC, (3|7) ASC", - "Query": "select a, textcol1, b, textcol2, weight_string(a), weight_string(textcol1), weight_string(b), weight_string(textcol2) from `user` order by a asc, textcol1 asc, b asc, textcol2 asc", - "ResultColumns": 4, - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a, textcol1, b, textcol2 from user order by a, textcol1, b, textcol2", "Instructions": { @@ -641,30 +356,12 @@ { "comment": "ORDER BY invalid col number on scatter", "query": "select col from user order by 2", - "v3-plan": "VT03014: unknown column '2' in 'order clause'", - "gen4-plan": "Unknown column '2' in 'order clause'" + "plan": "Unknown column '2' in 'order clause'" }, { "comment": "ORDER BY column offset", "query": "select id as foo from music order by 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id as foo from music order by 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id as foo, weight_string(id) from music where 1 != 1", - "OrderBy": "(0|1) ASC", - "Query": "select id as foo, weight_string(id) from music order by 1 asc", - "ResultColumns": 1, - "Table": "music" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id as foo from music order by 1", "Instructions": { @@ -688,22 +385,7 @@ { "comment": "ORDER BY NULL", "query": "select col from user order by null", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user order by null", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` order by null", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user order by null", "Instructions": { @@ -725,18 +407,19 @@ { "comment": "ORDER BY after pull-out subquery", "query": "select col from user where col in (select col2 from user) order by col", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user where col in (select col2 from user) order by col", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutIn", "PulloutVars": [ - "__sq_has_values1", + "__sq_has_values", "__sq1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -748,6 +431,7 @@ "Table": "`user`" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -756,104 +440,22 @@ }, "FieldQuery": "select col from `user` where 1 != 1", "OrderBy": "0 ASC", - "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 order by col asc", + "Query": "select col from `user` where :__sq_has_values and col in ::__sq1 order by col asc", "Table": "`user`" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "ORDER BY NULL for join", + "query": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by null", + "plan": { "QueryType": "SELECT", - "Original": "select col from user where col in (select col2 from user) order by col", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col2 from `user` where 1 != 1", - "Query": "select col2 from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "OrderBy": "0 ASC", - "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 order by col asc", - "Table": "`user`" - } - ] - }, - "TablesUsed": [ - "user.user" - ] - } - }, - { - "comment": "ORDER BY NULL for join", - "query": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by null", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by null", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1,R:0", - "JoinVars": { - "user_id": 2 - }, - "TableName": "`user`_music", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where 1 != 1", - "Query": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where `user`.id = 1 order by null", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.col3 from music where 1 != 1", - "Query": "select music.col3 from music where music.id = :user_id order by null", - "Table": "music", - "Values": [ - ":user_id" - ], - "Vindex": "music_user_map" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by null", + "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by null", "Instructions": { "OperatorType": "Join", "Variant": "Join", @@ -904,52 +506,7 @@ { "comment": "ORDER BY non-key column for join", "query": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by a", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by a", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1,R:0", - "JoinVars": { - "user_id": 2 - }, - "TableName": "`user`_music", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where 1 != 1", - "Query": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where `user`.id = 1 order by a asc", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.col3 from music where 1 != 1", - "Query": "select music.col3 from music where music.id = :user_id", - "Table": "music", - "Values": [ - ":user_id" - ], - "Vindex": "music_user_map" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by a", "Instructions": { @@ -1002,52 +559,7 @@ { "comment": "ORDER BY non-key column for implicit join", "query": "select user.col1 as a, user.col2, music.col3 from user, music where user.id = music.id and user.id = 1 order by a", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col1 as a, user.col2, music.col3 from user, music where user.id = music.id and user.id = 1 order by a", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1,R:0", - "JoinVars": { - "user_id": 2 - }, - "TableName": "`user`_music", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where 1 != 1", - "Query": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where `user`.id = 1 order by a asc", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.col3 from music where 1 != 1", - "Query": "select music.col3 from music where music.id = :user_id", - "Table": "music", - "Values": [ - ":user_id" - ], - "Vindex": "music_user_map" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col1 as a, user.col2, music.col3 from user, music where user.id = music.id and user.id = 1 order by a", "Instructions": { @@ -1100,54 +612,19 @@ { "comment": "ORDER BY NULL after pull-out subquery", "query": "select col from user where col in (select col2 from user) order by null", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user where col in (select col2 from user) order by null", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col2 from `user` where 1 != 1", - "Query": "select col2 from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 order by null", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user where col in (select col2 from user) order by null", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutIn", "PulloutVars": [ - "__sq_has_values1", + "__sq_has_values", "__sq1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -1159,6 +636,7 @@ "Table": "`user`" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -1166,7 +644,7 @@ "Sharded": true }, "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1", + "Query": "select col from `user` where :__sq_has_values and col in ::__sq1", "Table": "`user`" } ] @@ -1179,22 +657,7 @@ { "comment": "ORDER BY RAND()", "query": "select col from user order by RAND()", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user order by RAND()", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` order by RAND()", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user order by RAND()", "Instructions": { @@ -1216,52 +679,7 @@ { "comment": "ORDER BY RAND() for join", "query": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by RAND()", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by RAND()", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1,R:0", - "JoinVars": { - "user_id": 2 - }, - "TableName": "`user`_music", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where 1 != 1", - "Query": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where `user`.id = 1 order by RAND()", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.col3 from music where 1 != 1", - "Query": "select music.col3 from music where music.id = :user_id order by RAND()", - "Table": "music", - "Values": [ - ":user_id" - ], - "Vindex": "music_user_map" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by RAND()", "Instructions": { @@ -1314,54 +732,19 @@ { "comment": "ORDER BY RAND() after pull-out subquery", "query": "select col from user where col in (select col2 from user) order by rand()", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user where col in (select col2 from user) order by rand()", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col2 from `user` where 1 != 1", - "Query": "select col2 from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 order by rand()", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user where col in (select col2 from user) order by rand()", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutIn", "PulloutVars": [ - "__sq_has_values1", + "__sq_has_values", "__sq1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -1373,6 +756,7 @@ "Table": "`user`" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -1380,7 +764,7 @@ "Sharded": true }, "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 order by rand()", + "Query": "select col from `user` where :__sq_has_values and col in ::__sq1 order by rand()", "Table": "`user`" } ] @@ -1393,26 +777,7 @@ { "comment": "Order by, '*' expression", "query": "select * from user where id = 5 order by col", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user where id = 5 order by col", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 5 order by col asc", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from user where id = 5 order by col", "Instructions": { @@ -1438,26 +803,7 @@ { "comment": "Order by, qualified '*' expression", "query": "select user.* from user where id = 5 order by user.col", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.* from user where id = 5 order by user.col", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.* from `user` where 1 != 1", - "Query": "select `user`.* from `user` where id = 5 order by `user`.col asc", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.* from user where id = 5 order by user.col", "Instructions": { @@ -1483,26 +829,7 @@ { "comment": "Order by, '*' expression with qualified reference", "query": "select * from user where id = 5 order by user.col", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user where id = 5 order by user.col", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 5 order by `user`.col asc", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from user where id = 5 order by user.col", "Instructions": { @@ -1528,7 +855,7 @@ { "comment": "Order by, '*' expression in a subquery", "query": "select u.id, e.id from user u join user_extra e where u.col = e.col and u.col in (select * from user where user.id = u.id order by col)", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.id, e.id from user u join user_extra e where u.col = e.col and u.col in (select * from user where user.id = u.id order by col)", "Instructions": { @@ -1563,43 +890,6 @@ "Table": "user_extra" } ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select u.id, e.id from user u join user_extra e where u.col = e.col and u.col in (select * from user where user.id = u.id order by col)", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:1,R:0", - "JoinVars": { - "u_col": 0 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.col, u.id from `user` as u where 1 != 1", - "Query": "select u.col, u.id from `user` as u where u.col in (select * from `user` where `user`.id = u.id order by col asc)", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select e.id from user_extra as e where 1 != 1", - "Query": "select e.id from user_extra as e where e.col = :u_col", - "Table": "user_extra" - } - ] }, "TablesUsed": [ "user.user", @@ -1610,8 +900,7 @@ { "comment": "Order by, verify outer symtab is searched according to its own context.", "query": "select u.id from user u having u.id in (select col2 from user where user.id = u.id order by u.col)", - "v3-plan": "VT03020: column u.col not found in subquery", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.id from user u having u.id in (select col2 from user where user.id = u.id order by u.col)", "Instructions": { @@ -1633,44 +922,22 @@ { "comment": "Order by, qualified '*' expression, name mismatched.", "query": "select user.* from user where id = 5 order by e.col", - "v3-plan": "VT03019: column e.col not found", - "gen4-plan": "column 'e.col' not found" + "plan": "column 'e.col' not found" }, { "comment": "Order by, invalid column number", "query": "select col from user order by 18446744073709551616", - "v3-plan": "VT13001: [BUG] error parsing column number: 18446744073709551616", - "gen4-plan": "error parsing column number: 18446744073709551616" + "plan": "error parsing column number: 18446744073709551616" }, { "comment": "Order by, out of range column number", "query": "select col from user order by 2", - "v3-plan": "VT03014: unknown column '2' in 'order clause'", - "gen4-plan": "Unknown column '2' in 'order clause'" + "plan": "Unknown column '2' in 'order clause'" }, { "comment": "Order by, '*' expression with qualified reference and using collate", "query": "select * from user where id = 5 order by user.col collate utf8_general_ci", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user where id = 5 order by user.col collate utf8_general_ci", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 5 order by `user`.col collate utf8_general_ci asc", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from user where id = 5 order by user.col collate utf8_general_ci", "Instructions": { @@ -1696,26 +963,7 @@ { "comment": "Order by with math functions", "query": "select * from user where id = 5 order by -col1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user where id = 5 order by -col1", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 5 order by -col1 asc", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from user where id = 5 order by -col1", "Instructions": { @@ -1741,26 +989,7 @@ { "comment": "Order by with string operations", "query": "select * from user where id = 5 order by concat(col,col1) collate utf8_general_ci desc", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user where id = 5 order by concat(col,col1) collate utf8_general_ci desc", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 5 order by concat(col, col1) collate utf8_general_ci desc", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from user where id = 5 order by concat(col,col1) collate utf8_general_ci desc", "Instructions": { @@ -1786,26 +1015,7 @@ { "comment": "Order by with math operations", "query": "select * from user where id = 5 order by id+col collate utf8_general_ci desc", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user where id = 5 order by id+col collate utf8_general_ci desc", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 5 order by id + col collate utf8_general_ci desc", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from user where id = 5 order by id+col collate utf8_general_ci desc", "Instructions": { @@ -1831,26 +1041,7 @@ { "comment": "Order by derived table column", "query": "select * from user u join (select user_id from user_extra where user_id = 5) eu on u.id = eu.user_id where u.id = 5 order by eu.user_id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user u join (select user_id from user_extra where user_id = 5) eu on u.id = eu.user_id where u.id = 5 order by eu.user_id", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` as u join (select user_id from user_extra where 1 != 1) as eu on u.id = eu.user_id where 1 != 1", - "Query": "select * from `user` as u join (select user_id from user_extra where user_id = 5) as eu on u.id = eu.user_id where u.id = 5 order by eu.user_id asc", - "Table": "`user`, user_extra", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from user u join (select user_id from user_extra where user_id = 5) eu on u.id = eu.user_id where u.id = 5 order by eu.user_id", "Instructions": { @@ -1860,8 +1051,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select * from `user` as u, (select user_id from user_extra where 1 != 1) as eu where 1 != 1", - "Query": "select * from `user` as u, (select user_id from user_extra where user_id = 5) as eu where u.id = 5 and u.id = eu.user_id order by eu.user_id asc", + "FieldQuery": "select * from (select user_id from user_extra where 1 != 1) as eu, `user` as u where 1 != 1", + "Query": "select * from (select user_id from user_extra where user_id = 5) as eu, `user` as u where u.id = 5 and u.id = eu.user_id order by eu.user_id asc", "Table": "`user`, user_extra", "Values": [ "INT64(5)" @@ -1877,26 +1068,7 @@ { "comment": "routing rules: order by gets pushed for routes", "query": "select col from route1 where id = 1 order by col", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from route1 where id = 1 order by col", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` as route1 where 1 != 1", - "Query": "select col from `user` as route1 where id = 1 order by col asc", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from route1 where id = 1 order by col", "Instructions": { @@ -1922,26 +1094,7 @@ { "comment": "LIMIT", "query": "select col1 from user where id = 1 limit 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col1 from user where id = 1 limit 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col1 from `user` where 1 != 1", - "Query": "select col1 from `user` where id = 1 limit 1", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col1 from user where id = 1 limit 1", "Instructions": { @@ -1951,63 +1104,23 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col1 from `user` where 1 != 1", - "Query": "select col1 from `user` where id = 1 limit 1", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user" - ] - } - }, - { - "comment": "limit for joins. Can't push down the limit because result\n# counts get multiplied by join operations.", - "query": "select user.col from user join user_extra limit 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join user_extra limit 1", - "Instructions": { - "OperatorType": "Limit", - "Count": "INT64(1)", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] - } - ] - } - }, - "gen4-plan": { + "FieldQuery": "select col1 from `user` where 1 != 1", + "Query": "select col1 from `user` where id = 1 limit 1", + "Table": "`user`", + "Values": [ + "INT64(1)" + ], + "Vindex": "user_index" + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "limit for joins. Can't push down the limit because result\n# counts get multiplied by join operations.", + "query": "select user.col from user join user_extra limit 1", + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join user_extra limit 1", "Instructions": { @@ -2055,28 +1168,7 @@ { "comment": "limit for scatter", "query": "select col from user limit 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user limit 1", - "Instructions": { - "OperatorType": "Limit", - "Count": "INT64(1)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` limit :__upper_limit", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user limit 1", "Instructions": { @@ -2104,28 +1196,7 @@ { "comment": "limit for scatter with bind var", "query": "select col from user limit :a", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user limit :a", - "Instructions": { - "OperatorType": "Limit", - "Count": ":a", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` limit :__upper_limit", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user limit :a", "Instructions": { @@ -2153,28 +1224,7 @@ { "comment": "cross-shard expression in parenthesis with limit", "query": "select * from user where (id1 = 4 AND name1 ='abc') limit 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user where (id1 = 4 AND name1 ='abc') limit 5", - "Instructions": { - "OperatorType": "Limit", - "Count": "INT64(5)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id1 = 4 and name1 = 'abc' limit :__upper_limit", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from user where (id1 = 4 AND name1 ='abc') limit 5", "Instructions": { @@ -2202,49 +1252,7 @@ { "comment": "scatter limit after pullout subquery", "query": "select col from user where col in (select col1 from user) limit 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user where col in (select col1 from user) limit 1", - "Instructions": { - "OperatorType": "Limit", - "Count": "INT64(1)", - "Inputs": [ - { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col1 from `user` where 1 != 1", - "Query": "select col1 from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 limit :__upper_limit", - "Table": "`user`" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user where col in (select col1 from user) limit 1", "Instructions": { @@ -2252,14 +1260,15 @@ "Count": "INT64(1)", "Inputs": [ { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutIn", "PulloutVars": [ - "__sq_has_values1", + "__sq_has_values", "__sq1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -2271,6 +1280,7 @@ "Table": "`user`" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -2278,7 +1288,7 @@ "Sharded": true }, "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 limit :__upper_limit", + "Query": "select col from `user` where :__sq_has_values and col in ::__sq1", "Table": "`user`" } ] @@ -2293,22 +1303,7 @@ { "comment": "limit on reference table", "query": "select col from ref limit 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from ref limit 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from ref where 1 != 1", - "Query": "select col from ref limit 1", - "Table": "ref" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from ref limit 1", "Instructions": { @@ -2330,28 +1325,7 @@ { "comment": "arithmetic limit", "query": "select id from user limit 1+1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user limit 1+1", - "Instructions": { - "OperatorType": "Limit", - "Count": "INT64(2)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` limit :__upper_limit", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user limit 1+1", "Instructions": { @@ -2379,24 +1353,7 @@ { "comment": "order by column alias", "query": "select id as foo from music order by foo", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id as foo from music order by foo", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id as foo, weight_string(id) from music where 1 != 1", - "OrderBy": "(0|1) ASC", - "Query": "select id as foo, weight_string(id) from music order by foo asc", - "ResultColumns": 1, - "Table": "music" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id as foo from music order by foo", "Instructions": { @@ -2420,24 +1377,7 @@ { "comment": "column alias for a table column in order by", "query": "select id as foo, id2 as id from music order by id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id as foo, id2 as id from music order by id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id as foo, id2 as id, weight_string(id2) from music where 1 != 1", - "OrderBy": "(1|2) ASC", - "Query": "select id as foo, id2 as id, weight_string(id2) from music order by id asc", - "ResultColumns": 2, - "Table": "music" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id as foo, id2 as id from music order by id", "Instructions": { @@ -2461,43 +1401,7 @@ { "comment": "ordering on the left side of the join", "query": "select name from user, music order by name", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select name from user, music order by name", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_music", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `name`, weight_string(`name`) from `user` where 1 != 1", - "OrderBy": "(0|1) ASC", - "Query": "select `name`, weight_string(`name`) from `user` order by `name` asc", - "ResultColumns": 1, - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from music where 1 != 1", - "Query": "select 1 from music", - "Table": "music" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select name from user, music order by name", "Instructions": { @@ -2540,89 +1444,36 @@ { "comment": "aggregation and non-aggregations column without group by", "query": "select count(id), num from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select count(id), num from user", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count(0) AS count", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(id), num from `user` where 1 != 1", - "Query": "select count(id), num from `user`", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(id), num from user", "Instructions": { "OperatorType": "Aggregate", "Variant": "Scalar", - "Aggregates": "sum_count(0) AS count(id), random(1) AS num", + "Aggregates": "sum_count(0) AS count(id), any_value(1) AS num", "Inputs": [ { "OperatorType": "Route", "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(id), num from `user` where 1 != 1", - "Query": "select count(id), num from `user`", - "Table": "`user`" - } - ] - }, - "TablesUsed": [ - "user.user" - ] - } - }, - { - "comment": "aggregation and non-aggregations column with order by", - "query": "select count(id), num from user order by 2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select count(id), num from user order by 2", - "Instructions": { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "(1|2) ASC", - "ResultColumns": 2, - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count(0) AS count", - "ResultColumns": 3, - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(id), num, weight_string(num) from `user` where 1 != 1", - "Query": "select count(id), num, weight_string(num) from `user`", - "Table": "`user`" - } - ] + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(id), num from `user` where 1 != 1", + "Query": "select count(id), num from `user`", + "Table": "`user`" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "aggregation and non-aggregations column with order by", + "query": "select count(id), num from user order by 2", + "plan": { "QueryType": "SELECT", "Original": "select count(id), num from user order by 2", "Instructions": { @@ -2634,7 +1485,7 @@ { "OperatorType": "Aggregate", "Variant": "Scalar", - "Aggregates": "sum_count(0) AS count(id), random(1) AS num, random(2)", + "Aggregates": "sum_count(0) AS count(id), any_value(1) AS num, any_value(2)", "Inputs": [ { "OperatorType": "Route", @@ -2659,32 +1510,7 @@ { "comment": "aggregation and non-aggregations column with group by", "query": "select count(id), num from user group by 2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select count(id), num from user group by 2", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(0) AS count", - "GroupBy": "1", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(id), num, weight_string(num) from `user` where 1 != 1 group by 2, weight_string(num)", - "OrderBy": "(1|2) ASC", - "Query": "select count(id), num, weight_string(num) from `user` group by 2, weight_string(num) order by num asc", - "ResultColumns": 2, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(id), num from user group by 2", "Instructions": { @@ -2716,39 +1542,7 @@ { "comment": "aggregation and non-aggregations column with group by and order by", "query": "select count(id), num from user group by 2 order by 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select count(id), num from user group by 2 order by 1", - "Instructions": { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "0 ASC", - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(0) AS count", - "GroupBy": "1", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(id), num, weight_string(num) from `user` where 1 != 1 group by 2, weight_string(num)", - "OrderBy": "(1|2) ASC", - "Query": "select count(id), num, weight_string(num) from `user` group by 2, weight_string(num) order by num asc", - "ResultColumns": 2, - "Table": "`user`" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(id), num from user group by 2 order by 1", "Instructions": { @@ -2787,38 +1581,46 @@ { "comment": "join order by with ambiguous column reference ; valid in MySQL", "query": "select name, name from user, music order by name", - "v3-plan": "VT03021: ambiguous column reference: `name`", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select name, name from user, music order by name", "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:0", - "TableName": "`user`_music", + "OperatorType": "SimpleProjection", + "Columns": [ + 0, + 0 + ], "Inputs": [ { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `name`, weight_string(`name`) from `user` where 1 != 1", - "OrderBy": "(0|1) ASC", - "Query": "select `name`, weight_string(`name`) from `user` order by `name` asc", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from music where 1 != 1", - "Query": "select 1 from music", - "Table": "music" + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0", + "TableName": "`user`_music", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, weight_string(`name`) from `user` where 1 != 1", + "OrderBy": "(0|1) ASC", + "Query": "select `name`, weight_string(`name`) from `user` order by `name` asc", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from music where 1 != 1", + "Query": "select 1 from music", + "Table": "music" + } + ] } ] }, @@ -2831,8 +1633,7 @@ { "comment": "order by with ambiguous column reference ; valid in MySQL", "query": "select id, id from user order by id", - "v3-plan": "VT03021: ambiguous column reference: id", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id, id from user order by id", "Instructions": { @@ -2856,8 +1657,7 @@ { "comment": "Scatter order by and aggregation: order by column must reference column from select list", "query": "select col, count(*) from user group by col order by c1", - "v3-plan": "VT12001: unsupported: memory sort: ORDER BY must reference a column in the SELECT list: c1 asc", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col, count(*) from user group by col order by c1", "Instructions": { @@ -2869,7 +1669,7 @@ { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "sum_count_star(1) AS count(*), random(2) AS c1, random(3)", + "Aggregates": "sum_count_star(1) AS count(*), any_value(2) AS c1, any_value(3)", "GroupBy": "0", "Inputs": [ { @@ -2896,52 +1696,14 @@ { "comment": "Distinct with cross shard query", "query": "select distinct user.a from user join user_extra", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select distinct user.a from user join user_extra", "Instructions": { "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.a from `user` where 1 != 1", - "Query": "select `user`.a from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select distinct user.a from user join user_extra", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "GroupBy": "(0|1)", + "Collations": [ + "(0:1)" + ], "ResultColumns": 1, "Inputs": [ { @@ -2957,9 +1719,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select `user`.a, weight_string(`user`.a) from `user` where 1 != 1 group by `user`.a, weight_string(`user`.a)", - "OrderBy": "(0|1) ASC", - "Query": "select `user`.a, weight_string(`user`.a) from `user` group by `user`.a, weight_string(`user`.a) order by `user`.a asc", + "FieldQuery": "select `user`.a, weight_string(`user`.a) from `user` where 1 != 1", + "Query": "select distinct `user`.a, weight_string(`user`.a) from `user`", "Table": "`user`" }, { @@ -2969,8 +1730,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select 1 from user_extra where 1 != 1 group by .0", - "Query": "select 1 from user_extra group by .0", + "FieldQuery": "select 1 from user_extra where 1 != 1", + "Query": "select distinct 1 from user_extra", "Table": "user_extra" } ] @@ -2986,37 +1747,15 @@ { "comment": "Distinct with column alias", "query": "select distinct a as c, a from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select distinct a as c, a from user", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "GroupBy": "0, 1", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a as c, a, weight_string(a) from `user` where 1 != 1", - "OrderBy": "(0|2) ASC, (0|2) ASC", - "Query": "select distinct a as c, a, weight_string(a) from `user` order by c asc, a asc", - "ResultColumns": 2, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select distinct a as c, a from user", "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "GroupBy": "(0|2), (1|2)", + "OperatorType": "Distinct", + "Collations": [ + "(0:2)", + "(1:2)" + ], "ResultColumns": 2, "Inputs": [ { @@ -3026,9 +1765,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a as c, a, weight_string(a) from `user` where 1 != 1 group by a, a, weight_string(a)", - "OrderBy": "(0|2) ASC, (0|2) ASC", - "Query": "select a as c, a, weight_string(a) from `user` group by a, a, weight_string(a) order by a asc, a asc", + "FieldQuery": "select a as c, a, weight_string(a) from `user` where 1 != 1", + "Query": "select distinct a as c, a, weight_string(a) from `user`", "Table": "`user`" } ] @@ -3041,14 +1779,15 @@ { "comment": "Distinct with same column", "query": "select distinct a, a from user", - "v3-plan": "generating ORDER BY clause: VT03021: ambiguous column reference: a", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select distinct a, a from user", "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "GroupBy": "(0|2), (1|2)", + "OperatorType": "Distinct", + "Collations": [ + "(0:2)", + "(1:2)" + ], "ResultColumns": 2, "Inputs": [ { @@ -3058,9 +1797,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, a, weight_string(a) from `user` where 1 != 1 group by a, a, weight_string(a)", - "OrderBy": "(0|2) ASC, (0|2) ASC", - "Query": "select a, a, weight_string(a) from `user` group by a, a, weight_string(a) order by a asc, a asc", + "FieldQuery": "select a, a, weight_string(a) from `user` where 1 != 1", + "Query": "select distinct a, a, weight_string(a) from `user`", "Table": "`user`" } ] @@ -3073,8 +1811,7 @@ { "comment": "Order by has subqueries", "query": "select id from unsharded order by (select id from unsharded)", - "v3-plan": "VT12001: unsupported: subqueries disallowed in sqlparser.OrderBy", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from unsharded order by (select id from unsharded)", "Instructions": { @@ -3096,13 +1833,12 @@ { "comment": "Equal filter with hexadecimal value", "query": "select count(*) a from user having a = 0x01", - "v3-plan": "VT12001: unsupported: filtering on results of aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(*) a from user having a = 0x01", "Instructions": { "OperatorType": "Filter", - "Predicate": ":0 = 0x01", + "Predicate": "count(*) = 0x01", "Inputs": [ { "OperatorType": "Aggregate", @@ -3132,8 +1868,7 @@ { "comment": "Order by uses cross-shard expression", "query": "select id from user order by id+1", - "v3-plan": "VT12001: unsupported: in scatter query: complex ORDER BY expression: id + 1", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user order by id+1", "Instructions": { @@ -3157,8 +1892,7 @@ { "comment": "Order by column number with collate", "query": "select user.col1 as a from user order by 1 collate utf8_general_ci", - "v3-plan": "VT12001: unsupported: in scatter query: complex ORDER BY expression: 1 collate utf8_general_ci", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col1 as a from user order by 1 collate utf8_general_ci", "Instructions": { @@ -3182,8 +1916,7 @@ { "comment": "Order by uses cross-shard expression", "query": "select id from user order by id+1", - "v3-plan": "VT12001: unsupported: in scatter query: complex ORDER BY expression: id + 1", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user order by id+1", "Instructions": { @@ -3207,8 +1940,7 @@ { "comment": "Order by column number with collate", "query": "select user.col1 as a from user order by 1 collate utf8_general_ci", - "v3-plan": "VT12001: unsupported: in scatter query: complex ORDER BY expression: 1 collate utf8_general_ci", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col1 as a from user order by 1 collate utf8_general_ci", "Instructions": { @@ -3232,8 +1964,7 @@ { "comment": "Order by column number with coalesce with columns from both sides", "query": "select id from user, user_extra order by coalesce(user.col, user_extra.col)", - "v3-plan": "VT12001: unsupported: memory sort: complex ORDER BY expression: coalesce(`user`.col, user_extra.col)", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user, user_extra order by coalesce(user.col, user_extra.col)", "Instructions": { @@ -3286,67 +2017,56 @@ { "comment": "having filter with %", "query": "select a.tcol1 from user a join music b where a.tcol1 = b.tcol2 group by a.tcol1 having repeat(a.tcol1,min(a.id)) like \"A\\%B\" order by a.tcol1", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a.tcol1 from user a join music b where a.tcol1 = b.tcol2 group by a.tcol1 having repeat(a.tcol1,min(a.id)) like \"A\\%B\" order by a.tcol1", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "(0|2) ASC", + "ResultColumns": 1, "Inputs": [ { "OperatorType": "Filter", - "Predicate": "repeat(a.tcol1, :1) like 'A\\%B'", + "Predicate": "repeat(a.tcol1, min(a.id)) like 'A\\%B'", "Inputs": [ { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "min(1) AS min(a.id)", + "Aggregates": "min(1|3) AS min(a.id)", "GroupBy": "(0|2)", "Inputs": [ { - "OperatorType": "Projection", - "Expressions": [ - "[COLUMN 0] as tcol1", - "[COLUMN 2] as min(a.id)", - "[COLUMN 1]" - ], + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:1,L:0,L:2,L:3", + "JoinVars": { + "a_tcol1": 1 + }, + "TableName": "`user`_music", "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:2,L:1", - "JoinVars": { - "a_tcol1": 0 + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true }, - "TableName": "`user`_music", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a.tcol1, min(a.id), weight_string(a.tcol1) from `user` as a where 1 != 1 group by a.tcol1, weight_string(a.tcol1)", - "OrderBy": "(0|2) ASC", - "Query": "select a.tcol1, min(a.id), weight_string(a.tcol1) from `user` as a group by a.tcol1, weight_string(a.tcol1) order by a.tcol1 asc", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from music as b where 1 != 1 group by 1", - "Query": "select 1 from music as b where b.tcol2 = :a_tcol1 group by 1", - "Table": "music" - } - ] + "FieldQuery": "select min(a.id), a.tcol1, weight_string(a.tcol1), weight_string(a.id) from `user` as a where 1 != 1 group by a.tcol1, weight_string(a.tcol1), weight_string(a.id)", + "OrderBy": "(1|2) ASC", + "Query": "select min(a.id), a.tcol1, weight_string(a.tcol1), weight_string(a.id) from `user` as a group by a.tcol1, weight_string(a.tcol1), weight_string(a.id) order by a.tcol1 asc", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from music as b where 1 != 1 group by .0", + "Query": "select 1 from music as b where b.tcol2 = :a_tcol1 group by .0", + "Table": "music" } ] } @@ -3361,5 +2081,105 @@ "user.user" ] } + }, + { + "comment": "distinct with order by using aggregation engine", + "query": "select distinct col from user where id between :vtg1 and :vtg2 order by col asc", + "plan": { + "QueryType": "SELECT", + "Original": "select distinct col from user where id between :vtg1 and :vtg2 order by col asc", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "GroupBy": "0", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select col from `user` where 1 != 1 group by col", + "OrderBy": "0 ASC", + "Query": "select col from `user` where id between :vtg1 and :vtg2 group by col order by col asc", + "Table": "`user`" + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "distinct with order by having additional non-order by columns in the selection using aggregation engine", + "query": "select distinct foo, col from user where id between :vtg1 and :vtg2 order by col asc", + "plan": { + "QueryType": "SELECT", + "Original": "select distinct foo, col from user where id between :vtg1 and :vtg2 order by col asc", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "GroupBy": "1, (0|2)", + "ResultColumns": 2, + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select foo, col, weight_string(foo) from `user` where 1 != 1 group by col, foo, weight_string(foo)", + "OrderBy": "1 ASC, (0|2) ASC", + "Query": "select foo, col, weight_string(foo) from `user` where id between :vtg1 and :vtg2 group by col, foo, weight_string(foo) order by col asc, foo asc", + "Table": "`user`" + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "distinct with order by having no overalap with the selection columns - using distinct engine", + "query": "select distinct foo from user where id between :vtg1 and :vtg2 order by col asc", + "plan": { + "QueryType": "SELECT", + "Original": "select distinct foo from user where id between :vtg1 and :vtg2 order by col asc", + "Instructions": { + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "1 ASC", + "ResultColumns": 1, + "Inputs": [ + { + "OperatorType": "Distinct", + "Collations": [ + "(0:2)", + "1" + ], + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select foo, col, weight_string(foo) from `user` where 1 != 1", + "Query": "select distinct foo, col, weight_string(foo) from `user` where id between :vtg1 and :vtg2", + "Table": "`user`" + } + ] + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } } -] +] \ No newline at end of file diff --git a/go/vt/vtgate/planbuilder/testdata/rails_cases.json b/go/vt/vtgate/planbuilder/testdata/rails_cases.json index 94ed9961b87..ef36b79c855 100644 --- a/go/vt/vtgate/planbuilder/testdata/rails_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/rails_cases.json @@ -2,122 +2,7 @@ { "comment": "Author5.joins(books: [{orders: :customer}, :supplier])", "query": "select author5s.* from author5s join book6s on book6s.author5_id = author5s.id join book6s_order2s on book6s_order2s.book6_id = book6s.id join order2s on order2s.id = book6s_order2s.order2_id join customer2s on customer2s.id = order2s.customer2_id join supplier5s on supplier5s.id = book6s.supplier5_id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select author5s.* from author5s join book6s on book6s.author5_id = author5s.id join book6s_order2s on book6s_order2s.book6_id = book6s.id join order2s on order2s.id = book6s_order2s.order2_id join customer2s on customer2s.id = order2s.customer2_id join supplier5s on supplier5s.id = book6s.supplier5_id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1,L:2,L:3", - "JoinVars": { - "book6s_supplier5_id": 4 - }, - "TableName": "author5s, book6s_book6s_order2s_order2s_customer2s_supplier5s", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1,L:2,L:3,L:4", - "JoinVars": { - "order2s_customer2_id": 5 - }, - "TableName": "author5s, book6s_book6s_order2s_order2s_customer2s", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1,L:2,L:3,L:4,R:0", - "JoinVars": { - "book6s_order2s_order2_id": 5 - }, - "TableName": "author5s, book6s_book6s_order2s_order2s", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1,L:2,L:3,L:4,R:0", - "JoinVars": { - "book6s_id": 5 - }, - "TableName": "author5s, book6s_book6s_order2s", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select author5s.id, author5s.`name`, author5s.created_at, author5s.updated_at, book6s.supplier5_id, book6s.id from author5s join book6s on book6s.author5_id = author5s.id where 1 != 1", - "Query": "select author5s.id, author5s.`name`, author5s.created_at, author5s.updated_at, book6s.supplier5_id, book6s.id from author5s join book6s on book6s.author5_id = author5s.id", - "Table": "author5s, book6s" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select book6s_order2s.order2_id from book6s_order2s where 1 != 1", - "Query": "select book6s_order2s.order2_id from book6s_order2s where book6s_order2s.book6_id = :book6s_id", - "Table": "book6s_order2s", - "Values": [ - ":book6s_id" - ], - "Vindex": "binary_md5" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select order2s.customer2_id from order2s where 1 != 1", - "Query": "select order2s.customer2_id from order2s where order2s.id = :book6s_order2s_order2_id", - "Table": "order2s" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from customer2s where 1 != 1", - "Query": "select 1 from customer2s where customer2s.id = :order2s_customer2_id", - "Table": "customer2s", - "Values": [ - ":order2s_customer2_id" - ], - "Vindex": "binary_md5" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from supplier5s where 1 != 1", - "Query": "select 1 from supplier5s where supplier5s.id = :book6s_supplier5_id", - "Table": "supplier5s", - "Values": [ - ":book6s_supplier5_id" - ], - "Vindex": "binary_md5" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select author5s.* from author5s join book6s on book6s.author5_id = author5s.id join book6s_order2s on book6s_order2s.book6_id = book6s.id join order2s on order2s.id = book6s_order2s.order2_id join customer2s on customer2s.id = order2s.customer2_id join supplier5s on supplier5s.id = book6s.supplier5_id", "Instructions": { diff --git a/go/vt/vtgate/planbuilder/testdata/reference_cases.json b/go/vt/vtgate/planbuilder/testdata/reference_cases.json index ac5338ecd3a..6b04b7790d8 100644 --- a/go/vt/vtgate/planbuilder/testdata/reference_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/reference_cases.json @@ -2,22 +2,7 @@ { "comment": "select from unqualified ambiguous reference routes to reference source", "query": "select * from ambiguous_ref_with_source", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from ambiguous_ref_with_source", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from ambiguous_ref_with_source where 1 != 1", - "Query": "select * from ambiguous_ref_with_source", - "Table": "ambiguous_ref_with_source" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from ambiguous_ref_with_source", "Instructions": { @@ -39,41 +24,7 @@ { "comment": "join with unqualified ambiguous reference table routes to optimal keyspace", "query": "select user.col from user join ambiguous_ref_with_source", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join ambiguous_ref_with_source", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_ambiguous_ref_with_source", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from ambiguous_ref_with_source where 1 != 1", - "Query": "select 1 from ambiguous_ref_with_source", - "Table": "ambiguous_ref_with_source" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join ambiguous_ref_with_source", "Instructions": { @@ -96,22 +47,7 @@ { "comment": "ambiguous unqualified reference table self-join routes to reference source", "query": "select r1.col from ambiguous_ref_with_source r1 join ambiguous_ref_with_source", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select r1.col from ambiguous_ref_with_source r1 join ambiguous_ref_with_source", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select r1.col from ambiguous_ref_with_source as r1 join ambiguous_ref_with_source where 1 != 1", - "Query": "select r1.col from ambiguous_ref_with_source as r1 join ambiguous_ref_with_source", - "Table": "ambiguous_ref_with_source" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select r1.col from ambiguous_ref_with_source r1 join ambiguous_ref_with_source", "Instructions": { @@ -133,41 +69,7 @@ { "comment": "ambiguous unqualified reference table can merge with other opcodes left to right.", "query": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source join user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source join user", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "ambiguous_ref_with_source_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source where 1 != 1", - "Query": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source", - "Table": "ambiguous_ref_with_source" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user`", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source join user", "Instructions": { @@ -190,45 +92,7 @@ { "comment": "ambiguous unqualified reference table can merge with other opcodes left to right and vindex value is in the plan", "query": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source join (select aa from user where user.id=1) user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source join (select aa from user where user.id=1) user", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "ambiguous_ref_with_source_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source where 1 != 1", - "Query": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source", - "Table": "ambiguous_ref_with_source" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from (select aa from `user` where 1 != 1) as `user` where 1 != 1", - "Query": "select 1 from (select aa from `user` where `user`.id = 1) as `user`", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source join (select aa from user where user.id=1) user", "Instructions": { @@ -238,8 +102,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source, (select aa from `user` where 1 != 1) as `user` where 1 != 1", - "Query": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source, (select aa from `user` where `user`.id = 1) as `user`", + "FieldQuery": "select ambiguous_ref_with_source.col from (select aa from `user` where 1 != 1) as `user`, ambiguous_ref_with_source where 1 != 1", + "Query": "select ambiguous_ref_with_source.col from (select aa from `user` where `user`.id = 1) as `user`, ambiguous_ref_with_source", "Table": "`user`, ambiguous_ref_with_source", "Values": [ "INT64(1)" @@ -255,41 +119,7 @@ { "comment": "qualified join to reference table routes to optimal keyspace", "query": "select user.col from user join main.ambiguous_ref_with_source", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join main.ambiguous_ref_with_source", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_ambiguous_ref_with_source", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from ambiguous_ref_with_source where 1 != 1", - "Query": "select 1 from ambiguous_ref_with_source", - "Table": "ambiguous_ref_with_source" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join main.ambiguous_ref_with_source", "Instructions": { @@ -332,27 +162,9 @@ } }, { - "comment": "insert into qualified ambiguous reference table routes v3 to requested keyspace gen4 to source", + "comment": "insert into qualified ambiguous reference table routes to source", "query": "insert into user.ambiguous_ref_with_source(col) values(1)", - "v3-plan": { - "QueryType": "INSERT", - "Original": "insert into user.ambiguous_ref_with_source(col) values(1)", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Sharded", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "Query": "insert into ambiguous_ref_with_source(col) values (1)", - "TableName": "ambiguous_ref_with_source" - }, - "TablesUsed": [ - "user.ambiguous_ref_with_source" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "INSERT", "Original": "insert into user.ambiguous_ref_with_source(col) values(1)", "Instructions": { @@ -394,10 +206,9 @@ } }, { - "comment": "update qualified ambiguous reference table v3 error no primary vindex v4 route to source", + "comment": "update qualified ambiguous reference table route to source", "query": "update user.ambiguous_ref_with_source set col = 1", - "v3-plan": "VT09001: table 'ambiguous_ref_with_source' does not have a primary vindex", - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update user.ambiguous_ref_with_source set col = 1", "Instructions": { @@ -439,10 +250,9 @@ } }, { - "comment": "delete from qualified ambiguous reference table v3 error no primary vindex v4 route to source", + "comment": "delete from qualified ambiguous reference table route to source", "query": "delete from user.ambiguous_ref_with_source where col = 1", - "v3-plan": "VT09001: table 'ambiguous_ref_with_source' does not have a primary vindex", - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "delete from user.ambiguous_ref_with_source where col = 1", "Instructions": { @@ -464,22 +274,7 @@ { "comment": "join with unqualified unambiguous ref with source routes to requested table", "query": "select user.col from user join ref_with_source", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join ref_with_source", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` join ref_with_source where 1 != 1", - "Query": "select `user`.col from `user` join ref_with_source", - "Table": "`user`, ref_with_source" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join ref_with_source", "Instructions": { @@ -502,41 +297,7 @@ { "comment": "join with unqualified reference optimize routes when source & reference have different names", "query": "select user.col from user join source_of_ref", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join source_of_ref", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_source_of_ref", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from source_of_ref where 1 != 1", - "Query": "select 1 from source_of_ref", - "Table": "source_of_ref" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join source_of_ref", "Instructions": { @@ -559,41 +320,7 @@ { "comment": "join with unqualified reference respects routing rules", "query": "select user.col from user join rerouted_ref", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join rerouted_ref", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_rerouted_ref", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from rerouted_ref where 1 != 1", - "Query": "select 1 from rerouted_ref", - "Table": "rerouted_ref" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join rerouted_ref", "Instructions": { @@ -616,41 +343,7 @@ { "comment": "join with reference to unqualified source routes to optimal keyspace", "query": "select user.col from user join global_ref", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join global_ref", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_global_ref", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from global_ref where 1 != 1", - "Query": "select 1 from global_ref", - "Table": "global_ref" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join global_ref", "Instructions": { @@ -671,27 +364,9 @@ } }, { - "comment": "insert into qualified reference with unqualified source routes v3 to requested keyspace gen4 to source", + "comment": "insert into qualified reference with unqualified source routes to source", "query": "insert into user.global_ref(col) values(1)", - "v3-plan": { - "QueryType": "INSERT", - "Original": "insert into user.global_ref(col) values(1)", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Sharded", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "Query": "insert into global_ref(col) values (1)", - "TableName": "global_ref" - }, - "TablesUsed": [ - "user.global_ref" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "INSERT", "Original": "insert into user.global_ref(col) values(1)", "Instructions": { @@ -710,4 +385,4 @@ ] } } -] +] \ No newline at end of file diff --git a/go/vt/vtgate/planbuilder/testdata/select_cases.json b/go/vt/vtgate/planbuilder/testdata/select_cases.json index 1934414b395..99d23b108b4 100644 --- a/go/vt/vtgate/planbuilder/testdata/select_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/select_cases.json @@ -2,22 +2,7 @@ { "comment": "No column referenced", "query": "select 1 from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select 1 from user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user`", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 1 from user", "Instructions": { @@ -39,22 +24,7 @@ { "comment": "'*' expression for simple route", "query": "select user.* from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.* from user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.* from `user` where 1 != 1", - "Query": "select `user`.* from `user`", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.* from user", "Instructions": { @@ -76,22 +46,7 @@ { "comment": "unqualified '*' expression for simple route", "query": "select * from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user`", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from user", "Instructions": { @@ -113,23 +68,7 @@ { "comment": "select with timeout directive sets QueryTimeout in the route", "query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from `user`", - "QueryTimeout": 1000, - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user", "Instructions": { @@ -152,30 +91,7 @@ { "comment": "select aggregation with timeout directive sets QueryTimeout in the route", "query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ count(*) from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ count(*) from user", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count(0) AS count", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*) from `user` where 1 != 1", - "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ count(*) from `user`", - "QueryTimeout": 1000, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ count(*) from user", "Instructions": { @@ -205,29 +121,7 @@ { "comment": "select limit with timeout directive sets QueryTimeout in the route", "query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user limit 10", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user limit 10", - "Instructions": { - "OperatorType": "Limit", - "Count": "INT64(10)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from `user` limit :__upper_limit", - "QueryTimeout": 1000, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user limit 10", "Instructions": { @@ -256,23 +150,7 @@ { "comment": "select limit with timeout directive sets QueryTimeout in the route", "query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from main.unsharded limit 10", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from main.unsharded limit 10", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from unsharded where 1 != 1", - "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from unsharded limit 10", - "QueryTimeout": 1000, - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from main.unsharded limit 10", "Instructions": { @@ -295,23 +173,7 @@ { "comment": "select with partial scatter directive", "query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS */ * from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS */ * from user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS */ * from `user`", - "ScatterErrorsAsWarnings": true, - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS */ * from user", "Instructions": { @@ -334,30 +196,7 @@ { "comment": "select aggregation with partial scatter directive", "query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count(0) AS count", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*) from `user` where 1 != 1", - "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from `user`", - "ScatterErrorsAsWarnings": true, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user", "Instructions": { @@ -387,30 +226,7 @@ { "comment": "select aggregation with partial scatter directive - added comments to try to confuse the hint extraction", "query": "/*VT_SPAN_CONTEXT=123*/select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "/*VT_SPAN_CONTEXT=123*/select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count(0) AS count", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*) from `user` where 1 != 1", - "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from `user`", - "ScatterErrorsAsWarnings": true, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "/*VT_SPAN_CONTEXT=123*/select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user", "Instructions": { @@ -440,29 +256,7 @@ { "comment": "select limit with partial scatter directive", "query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ * from user limit 10", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ * from user limit 10", - "Instructions": { - "OperatorType": "Limit", - "Count": "INT64(10)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ * from `user` limit :__upper_limit", - "ScatterErrorsAsWarnings": true, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ * from user limit 10", "Instructions": { @@ -491,22 +285,7 @@ { "comment": "qualified '*' expression for simple route", "query": "select user.* from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.* from user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.* from `user` where 1 != 1", - "Query": "select `user`.* from `user`", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.* from user", "Instructions": { @@ -528,22 +307,7 @@ { "comment": "fully qualified '*' expression for simple route", "query": "select user.user.* from user.user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.user.* from user.user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.* from `user` where 1 != 1", - "Query": "select `user`.* from `user`", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.user.* from user.user", "Instructions": { @@ -565,22 +329,7 @@ { "comment": "select * from authoritative table", "query": "select * from authoritative", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from authoritative", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_id, col1, col2 from authoritative where 1 != 1", - "Query": "select user_id, col1, col2 from authoritative", - "Table": "authoritative" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from authoritative", "Instructions": { @@ -602,22 +351,7 @@ { "comment": "select * from join of authoritative tables", "query": "select * from authoritative a join authoritative b on a.user_id=b.user_id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from authoritative a join authoritative b on a.user_id=b.user_id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a.user_id as user_id, a.col1 as col1, a.col2 as col2, b.user_id as user_id, b.col1 as col1, b.col2 as col2 from authoritative as a join authoritative as b on a.user_id = b.user_id where 1 != 1", - "Query": "select a.user_id as user_id, a.col1 as col1, a.col2 as col2, b.user_id as user_id, b.col1 as col1, b.col2 as col2 from authoritative as a join authoritative as b on a.user_id = b.user_id", - "Table": "authoritative" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from authoritative a join authoritative b on a.user_id=b.user_id", "Instructions": { @@ -639,28 +373,12 @@ { "comment": "test table lookup failure for authoritative code path", "query": "select a.* from authoritative", - "v3-plan": "VT05004: table 'a' does not exist", - "gen4-plan": "Unknown table 'a'" + "plan": "Unknown table 'a'" }, { "comment": "select * from qualified authoritative table", "query": "select a.* from authoritative a", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a.* from authoritative a", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a.user_id, a.col1, a.col2 from authoritative as a where 1 != 1", - "Query": "select a.user_id, a.col1, a.col2 from authoritative as a", - "Table": "authoritative" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a.* from authoritative a", "Instructions": { @@ -682,22 +400,7 @@ { "comment": "select * from intermixing of authoritative table with non-authoritative results in no expansion", "query": "select * from authoritative join user on authoritative.user_id=user.id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from authoritative join user on authoritative.user_id=user.id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from authoritative join `user` on authoritative.user_id = `user`.id where 1 != 1", - "Query": "select * from authoritative join `user` on authoritative.user_id = `user`.id", - "Table": "authoritative, `user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from authoritative join user on authoritative.user_id=user.id", "Instructions": { @@ -720,22 +423,7 @@ { "comment": "select authoritative.* with intermixing still expands", "query": "select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, a.user_id, a.col1, a.col2, `user`.col1 from authoritative as a join `user` on a.user_id = `user`.id where 1 != 1", - "Query": "select `user`.id, a.user_id, a.col1, a.col2, `user`.col1 from authoritative as a join `user` on a.user_id = `user`.id", - "Table": "authoritative, `user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id", "Instructions": { @@ -758,22 +446,7 @@ { "comment": "auto-resolve anonymous columns for simple route", "query": "select anon_col from user join user_extra on user.id = user_extra.user_id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select anon_col from user join user_extra on user.id = user_extra.user_id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select anon_col from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1", - "Query": "select anon_col from `user` join user_extra on `user`.id = user_extra.user_id", - "Table": "`user`, user_extra" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select anon_col from user join user_extra on user.id = user_extra.user_id", "Instructions": { @@ -796,47 +469,12 @@ { "comment": "Cannot auto-resolve for cross-shard joins", "query": "select col from user join user_extra", - "v3-plan": "VT03019: column col not found", - "gen4-plan": "Column 'col' in field list is ambiguous" + "plan": "Column 'col' in field list is ambiguous" }, { "comment": "Auto-resolve should work if unique vindex columns are referenced", "query": "select id, user_id from user join user_extra", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id, user_id from user join user_extra", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_id from user_extra where 1 != 1", - "Query": "select user_id from user_extra", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id, user_id from user join user_extra", "Instructions": { @@ -878,22 +516,7 @@ { "comment": "database calls should be substituted", "query": "select database() from dual", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select database() from dual", - "Instructions": { - "OperatorType": "Projection", - "Expressions": [ - ":__vtdbname as database()" - ], - "Inputs": [ - { - "OperatorType": "SingleRow" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select database() from dual", "Instructions": { @@ -915,22 +538,7 @@ { "comment": "last_insert_id for unsharded route", "query": "select last_insert_id() as x from main.unsharded", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select last_insert_id() as x from main.unsharded", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select :__lastInsertId as x from unsharded where 1 != 1", - "Query": "select :__lastInsertId as x from unsharded", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select last_insert_id() as x from main.unsharded", "Instructions": { @@ -952,22 +560,7 @@ { "comment": "select from dual on unqualified keyspace", "query": "select @@session.auto_increment_increment from dual", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select @@session.auto_increment_increment from dual", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select @@auto_increment_increment from dual where 1 != 1", - "Query": "select @@auto_increment_increment from dual", - "Table": "dual" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select @@session.auto_increment_increment from dual", "Instructions": { @@ -989,26 +582,7 @@ { "comment": "select from pinned table", "query": "select * from pin_test", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from pin_test", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from pin_test where 1 != 1", - "Query": "select * from pin_test", - "Table": "pin_test", - "Values": [ - "VARCHAR(\"\\x80\")" - ], - "Vindex": "binary" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from pin_test", "Instructions": { @@ -1039,7 +613,7 @@ { "comment": "RHS route referenced", "query": "select user_extra.id from user join user_extra", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user_extra.id from user join user_extra", "Instructions": { @@ -1071,15 +645,23 @@ "Table": "user_extra" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "Both routes referenced", + "query": "select user.col, user_extra.id from user join user_extra", + "plan": { "QueryType": "SELECT", - "Original": "select user_extra.id from user join user_extra", + "Original": "select user.col, user_extra.id from user join user_extra", "Instructions": { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "R:0", + "JoinColumnIndexes": "L:0,R:0", "TableName": "`user`_user_extra", "Inputs": [ { @@ -1089,8 +671,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user`", + "FieldQuery": "select `user`.col from `user` where 1 != 1", + "Query": "select `user`.col from `user`", "Table": "`user`" }, { @@ -1113,11 +695,11 @@ } }, { - "comment": "Both routes referenced", - "query": "select user.col, user_extra.id from user join user_extra", - "v3-plan": { + "comment": "Expression with single-route reference", + "query": "select user.col, user_extra.id + user_extra.col from user join user_extra", + "plan": { "QueryType": "SELECT", - "Original": "select user.col, user_extra.id from user join user_extra", + "Original": "select user.col, user_extra.id + user_extra.col from user join user_extra", "Instructions": { "OperatorType": "Join", "Variant": "Join", @@ -1142,20 +724,28 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user_extra.id from user_extra where 1 != 1", - "Query": "select user_extra.id from user_extra", + "FieldQuery": "select user_extra.id + user_extra.col from user_extra where 1 != 1", + "Query": "select user_extra.id + user_extra.col from user_extra", "Table": "user_extra" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "Jumbled references", + "query": "select user.col, user_extra.id, user.col2 from user join user_extra", + "plan": { "QueryType": "SELECT", - "Original": "select user.col, user_extra.id from user join user_extra", + "Original": "select user.col, user_extra.id, user.col2 from user join user_extra", "Instructions": { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", + "JoinColumnIndexes": "L:0,R:0,L:1", "TableName": "`user`_user_extra", "Inputs": [ { @@ -1165,8 +755,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user`", + "FieldQuery": "select `user`.col, `user`.col2 from `user` where 1 != 1", + "Query": "select `user`.col, `user`.col2 from `user`", "Table": "`user`" }, { @@ -1189,15 +779,15 @@ } }, { - "comment": "Expression with single-route reference", - "query": "select user.col, user_extra.id + user_extra.col from user join user_extra", - "v3-plan": { + "comment": "Comments", + "query": "select /* comment */ user.col from user join user_extra", + "plan": { "QueryType": "SELECT", - "Original": "select user.col, user_extra.id + user_extra.col from user join user_extra", + "Original": "select /* comment */ user.col from user join user_extra", "Instructions": { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", + "JoinColumnIndexes": "L:0", "TableName": "`user`_user_extra", "Inputs": [ { @@ -1208,7 +798,7 @@ "Sharded": true }, "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user`", + "Query": "select /* comment */ `user`.col from `user`", "Table": "`user`" }, { @@ -1218,20 +808,28 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user_extra.id + user_extra.col from user_extra where 1 != 1", - "Query": "select user_extra.id + user_extra.col from user_extra", + "FieldQuery": "select 1 from user_extra where 1 != 1", + "Query": "select /* comment */ 1 from user_extra", "Table": "user_extra" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "for update", + "query": "select user.col from user join user_extra for update", + "plan": { "QueryType": "SELECT", - "Original": "select user.col, user_extra.id + user_extra.col from user join user_extra", + "Original": "select user.col from user join user_extra for update", "Instructions": { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", + "JoinColumnIndexes": "L:0", "TableName": "`user`_user_extra", "Inputs": [ { @@ -1242,7 +840,7 @@ "Sharded": true }, "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user`", + "Query": "select `user`.col from `user` for update", "Table": "`user`" }, { @@ -1252,8 +850,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user_extra.id + user_extra.col from user_extra where 1 != 1", - "Query": "select user_extra.id + user_extra.col from user_extra", + "FieldQuery": "select 1 from user_extra where 1 != 1", + "Query": "select 1 from user_extra for update", "Table": "user_extra" } ] @@ -1265,16 +863,19 @@ } }, { - "comment": "Jumbled references", - "query": "select user.col, user_extra.id, user.col2 from user join user_extra", - "v3-plan": { + "comment": "Field query should work for joins select bind vars", + "query": "select user.id, (select user.id+outm.m+unsharded.m from unsharded) from user join unsharded outm", + "plan": { "QueryType": "SELECT", - "Original": "select user.col, user_extra.id, user.col2 from user join user_extra", + "Original": "select user.id, (select user.id+outm.m+unsharded.m from unsharded) from user join unsharded outm", "Instructions": { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0,L:1", - "TableName": "`user`_user_extra", + "JoinColumnIndexes": "L:0,R:0", + "JoinVars": { + "user_id": 0 + }, + "TableName": "`user`_unsharded", "Inputs": [ { "OperatorType": "Route", @@ -1283,31 +884,39 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select `user`.col, `user`.col2 from `user` where 1 != 1", - "Query": "select `user`.col, `user`.col2 from `user`", + "FieldQuery": "select `user`.id, `user`.id from `user` where 1 != 1", + "Query": "select `user`.id, `user`.id from `user`", "Table": "`user`" }, { "OperatorType": "Route", - "Variant": "Scatter", + "Variant": "Unsharded", "Keyspace": { - "Name": "user", - "Sharded": true + "Name": "main", + "Sharded": false }, - "FieldQuery": "select user_extra.id from user_extra where 1 != 1", - "Query": "select user_extra.id from user_extra", - "Table": "user_extra" + "FieldQuery": "select (select :user_id + outm.m + unsharded.m from unsharded where 1 != 1) as `(select ``user``.id + outm.m + unsharded.m from unsharded)` from unsharded as outm where 1 != 1", + "Query": "select (select :user_id + outm.m + unsharded.m from unsharded) as `(select ``user``.id + outm.m + unsharded.m from unsharded)` from unsharded as outm", + "Table": "unsharded" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "main.unsharded", + "user.user" + ] + } + }, + { + "comment": "Case preservation", + "query": "select user.Col, user_extra.Id from user join user_extra", + "plan": { "QueryType": "SELECT", - "Original": "select user.col, user_extra.id, user.col2 from user join user_extra", + "Original": "select user.Col, user_extra.Id from user join user_extra", "Instructions": { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0,L:1", + "JoinColumnIndexes": "L:0,R:0", "TableName": "`user`_user_extra", "Inputs": [ { @@ -1317,8 +926,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select `user`.col, `user`.col2 from `user` where 1 != 1", - "Query": "select `user`.col, `user`.col2 from `user`", + "FieldQuery": "select `user`.Col from `user` where 1 != 1", + "Query": "select `user`.Col from `user`", "Table": "`user`" }, { @@ -1328,8 +937,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user_extra.id from user_extra where 1 != 1", - "Query": "select user_extra.id from user_extra", + "FieldQuery": "select user_extra.Id from user_extra where 1 != 1", + "Query": "select user_extra.Id from user_extra", "Table": "user_extra" } ] @@ -1341,126 +950,46 @@ } }, { - "comment": "Comments", - "query": "select /* comment */ user.col from user join user_extra", - "v3-plan": { + "comment": "syntax error", + "query": "the quick brown fox", + "plan": "syntax error at position 4 near 'the'" + }, + { + "comment": "Hex number is not treated as a simple value", + "query": "select * from user where id = 0x04", + "plan": { "QueryType": "SELECT", - "Original": "select /* comment */ user.col from user join user_extra", + "Original": "select * from user where id = 0x04", "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select /* comment */ `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select /* comment */ 1 from user_extra", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select /* comment */ user.col from user join user_extra", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select /* comment */ `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select /* comment */ 1 from user_extra", - "Table": "user_extra" - } - ] + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select * from `user` where 1 != 1", + "Query": "select * from `user` where id = 0x04", + "Table": "`user`", + "Values": [ + "VARBINARY(\"\\x04\")" + ], + "Vindex": "user_index" }, "TablesUsed": [ - "user.user", - "user.user_extra" + "user.user" ] } }, { - "comment": "for update", - "query": "select user.col from user join user_extra for update", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join user_extra for update", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user` for update", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra for update", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { + "comment": "sharded limit offset", + "query": "select user_id from music order by user_id limit 10, 20", + "plan": { "QueryType": "SELECT", - "Original": "select user.col from user join user_extra for update", + "Original": "select user_id from music order by user_id limit 10, 20", "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_user_extra", + "OperatorType": "Limit", + "Count": "INT64(20)", + "Offset": "INT64(10)", "Inputs": [ { "OperatorType": "Route", @@ -1469,213 +998,77 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user` for update", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra for update", - "Table": "user_extra" + "FieldQuery": "select user_id, weight_string(user_id) from music where 1 != 1", + "OrderBy": "(0|1) ASC", + "Query": "select user_id, weight_string(user_id) from music order by user_id asc limit :__upper_limit", + "ResultColumns": 1, + "Table": "music" } ] }, "TablesUsed": [ - "user.user", - "user.user_extra" + "user.music" ] } }, { - "comment": "Field query should work for joins select bind vars", - "query": "select user.id, (select user.id+outm.m+unsharded.m from unsharded) from user join unsharded outm", - "v3-plan": { + "comment": "Sharding Key Condition in Parenthesis", + "query": "select * from user where name ='abc' AND (id = 4) limit 5", + "plan": { "QueryType": "SELECT", - "Original": "select user.id, (select user.id+outm.m+unsharded.m from unsharded) from user join unsharded outm", + "Original": "select * from user where name ='abc' AND (id = 4) limit 5", "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "JoinVars": { - "user_id": 0 + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true }, - "TableName": "`user`_unsharded", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id from `user` where 1 != 1", - "Query": "select `user`.id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select (select :user_id + outm.m + unsharded.m from unsharded where 1 != 1) from unsharded as outm where 1 != 1", - "Query": "select (select :user_id + outm.m + unsharded.m from unsharded) from unsharded as outm", - "Table": "unsharded" - } - ] - } - }, - "gen4-plan": { + "FieldQuery": "select * from `user` where 1 != 1", + "Query": "select * from `user` where `name` = 'abc' and id = 4 limit 5", + "Table": "`user`", + "Values": [ + "INT64(4)" + ], + "Vindex": "user_index" + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "Multiple parenthesized expressions", + "query": "select * from user where (id = 4) AND (name ='abc') limit 5", + "plan": { "QueryType": "SELECT", - "Original": "select user.id, (select user.id+outm.m+unsharded.m from unsharded) from user join unsharded outm", + "Original": "select * from user where (id = 4) AND (name ='abc') limit 5", "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "JoinVars": { - "user_id": 0 + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true }, - "TableName": "`user`_unsharded", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id from `user` where 1 != 1", - "Query": "select `user`.id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select (select :user_id + outm.m + unsharded.m from unsharded where 1 != 1) from unsharded as outm where 1 != 1", - "Query": "select (select :user_id + outm.m + unsharded.m from unsharded) from unsharded as outm", - "Table": "unsharded" - } - ] + "FieldQuery": "select * from `user` where 1 != 1", + "Query": "select * from `user` where id = 4 and `name` = 'abc' limit 5", + "Table": "`user`", + "Values": [ + "INT64(4)" + ], + "Vindex": "user_index" }, "TablesUsed": [ - "main.unsharded", "user.user" ] } }, { - "comment": "Case preservation", - "query": "select user.Col, user_extra.Id from user join user_extra", - "v3-plan": { + "comment": "Multiple parenthesized expressions", + "query": "select * from user where (id = 4 and name ='abc') limit 5", + "plan": { "QueryType": "SELECT", - "Original": "select user.Col, user_extra.Id from user join user_extra", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.Col from `user` where 1 != 1", - "Query": "select `user`.Col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.Id from user_extra where 1 != 1", - "Query": "select user_extra.Id from user_extra", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select user.Col, user_extra.Id from user join user_extra", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.Col from `user` where 1 != 1", - "Query": "select `user`.Col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.Id from user_extra where 1 != 1", - "Query": "select user_extra.Id from user_extra", - "Table": "user_extra" - } - ] - }, - "TablesUsed": [ - "user.user", - "user.user_extra" - ] - } - }, - { - "comment": "syntax error", - "query": "the quick brown fox", - "plan": "syntax error at position 4 near 'the'" - }, - { - "comment": "Hex number is not treated as a simple value", - "query": "select * from user where id = 0x04", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user where id = 0x04", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 0x04", - "Table": "`user`" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select * from user where id = 0x04", + "Original": "select * from user where (id = 4 and name ='abc') limit 5", "Instructions": { "OperatorType": "Route", "Variant": "EqualUnique", @@ -1684,10 +1077,10 @@ "Sharded": true }, "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 0x04", + "Query": "select * from `user` where id = 4 and `name` = 'abc' limit 5", "Table": "`user`", "Values": [ - "VARBINARY(\"\\x04\")" + "INT64(4)" ], "Vindex": "user_index" }, @@ -1697,85 +1090,11 @@ } }, { - "comment": "sharded limit offset", - "query": "select user_id from music order by user_id limit 10, 20", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user_id from music order by user_id limit 10, 20", - "Instructions": { - "OperatorType": "Limit", - "Count": "INT64(20)", - "Offset": "INT64(10)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_id, weight_string(user_id) from music where 1 != 1", - "OrderBy": "(0|1) ASC", - "Query": "select user_id, weight_string(user_id) from music order by user_id asc limit :__upper_limit", - "ResultColumns": 1, - "Table": "music" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select user_id from music order by user_id limit 10, 20", - "Instructions": { - "OperatorType": "Limit", - "Count": "INT64(20)", - "Offset": "INT64(10)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_id, weight_string(user_id) from music where 1 != 1", - "OrderBy": "(0|1) ASC", - "Query": "select user_id, weight_string(user_id) from music order by user_id asc limit :__upper_limit", - "ResultColumns": 1, - "Table": "music" - } - ] - }, - "TablesUsed": [ - "user.music" - ] - } - }, - { - "comment": "Sharding Key Condition in Parenthesis", - "query": "select * from user where name ='abc' AND (id = 4) limit 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user where name ='abc' AND (id = 4) limit 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where `name` = 'abc' and id = 4 limit 5", - "Table": "`user`", - "Values": [ - "INT64(4)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "comment": "Column Aliasing with Table.Column", + "query": "select user0_.col as col0_ from user user0_ where id = 1 order by user0_.col desc limit 2", + "plan": { "QueryType": "SELECT", - "Original": "select * from user where name ='abc' AND (id = 4) limit 5", + "Original": "select user0_.col as col0_ from user user0_ where id = 1 order by user0_.col desc limit 2", "Instructions": { "OperatorType": "Route", "Variant": "EqualUnique", @@ -1783,11 +1102,11 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where `name` = 'abc' and id = 4 limit 5", + "FieldQuery": "select user0_.col as col0_ from `user` as user0_ where 1 != 1", + "Query": "select user0_.col as col0_ from `user` as user0_ where id = 1 order by user0_.col desc limit 2", "Table": "`user`", "Values": [ - "INT64(4)" + "INT64(1)" ], "Vindex": "user_index" }, @@ -1797,30 +1116,11 @@ } }, { - "comment": "Multiple parenthesized expressions", - "query": "select * from user where (id = 4) AND (name ='abc') limit 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user where (id = 4) AND (name ='abc') limit 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 4 and `name` = 'abc' limit 5", - "Table": "`user`", - "Values": [ - "INT64(4)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "comment": "Column Aliasing with Column", + "query": "select user0_.col as col0_ from user user0_ where id = 1 order by col0_ desc limit 3", + "plan": { "QueryType": "SELECT", - "Original": "select * from user where (id = 4) AND (name ='abc') limit 5", + "Original": "select user0_.col as col0_ from user user0_ where id = 1 order by col0_ desc limit 3", "Instructions": { "OperatorType": "Route", "Variant": "EqualUnique", @@ -1828,11 +1128,11 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 4 and `name` = 'abc' limit 5", + "FieldQuery": "select user0_.col as col0_ from `user` as user0_ where 1 != 1", + "Query": "select user0_.col as col0_ from `user` as user0_ where id = 1 order by col0_ desc limit 3", "Table": "`user`", "Values": [ - "INT64(4)" + "INT64(1)" ], "Vindex": "user_index" }, @@ -1842,30 +1142,11 @@ } }, { - "comment": "Multiple parenthesized expressions", - "query": "select * from user where (id = 4 and name ='abc') limit 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user where (id = 4 and name ='abc') limit 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 4 and `name` = 'abc' limit 5", - "Table": "`user`", - "Values": [ - "INT64(4)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "comment": "Booleans and parenthesis", + "query": "select * from user where (id = 1) AND name = true limit 5", + "plan": { "QueryType": "SELECT", - "Original": "select * from user where (id = 4 and name ='abc') limit 5", + "Original": "select * from user where (id = 1) AND name = true limit 5", "Instructions": { "OperatorType": "Route", "Variant": "EqualUnique", @@ -1874,10 +1155,10 @@ "Sharded": true }, "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 4 and `name` = 'abc' limit 5", + "Query": "select * from `user` where id = 1 and `name` = true limit 5", "Table": "`user`", "Values": [ - "INT64(4)" + "INT64(1)" ], "Vindex": "user_index" }, @@ -1887,11 +1168,11 @@ } }, { - "comment": "Column Aliasing with Table.Column", - "query": "select user0_.col as col0_ from user user0_ where id = 1 order by user0_.col desc limit 2", - "v3-plan": { + "comment": "Column as boolean-ish", + "query": "select * from user where (id = 1) AND name limit 5", + "plan": { "QueryType": "SELECT", - "Original": "select user0_.col as col0_ from user user0_ where id = 1 order by user0_.col desc limit 2", + "Original": "select * from user where (id = 1) AND name limit 5", "Instructions": { "OperatorType": "Route", "Variant": "EqualUnique", @@ -1899,18 +1180,25 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user0_.col as col0_ from `user` as user0_ where 1 != 1", - "Query": "select user0_.col as col0_ from `user` as user0_ where id = 1 order by user0_.col desc limit 2", + "FieldQuery": "select * from `user` where 1 != 1", + "Query": "select * from `user` where id = 1 and `name` limit 5", "Table": "`user`", "Values": [ "INT64(1)" ], "Vindex": "user_index" - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "PK as fake boolean, and column as boolean-ish", + "query": "select * from user where (id = 5) AND name = true limit 5", + "plan": { "QueryType": "SELECT", - "Original": "select user0_.col as col0_ from user user0_ where id = 1 order by user0_.col desc limit 2", + "Original": "select * from user where (id = 5) AND name = true limit 5", "Instructions": { "OperatorType": "Route", "Variant": "EqualUnique", @@ -1918,11 +1206,11 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user0_.col as col0_ from `user` as user0_ where 1 != 1", - "Query": "select user0_.col as col0_ from `user` as user0_ where id = 1 order by user0_.col desc limit 2", + "FieldQuery": "select * from `user` where 1 != 1", + "Query": "select * from `user` where id = 5 and `name` = true limit 5", "Table": "`user`", "Values": [ - "INT64(1)" + "INT64(5)" ], "Vindex": "user_index" }, @@ -1932,235 +1220,20 @@ } }, { - "comment": "Column Aliasing with Column", - "query": "select user0_.col as col0_ from user user0_ where id = 1 order by col0_ desc limit 3", - "v3-plan": { + "comment": "top level subquery in select", + "query": "select a, (select col from user) from unsharded", + "plan": { "QueryType": "SELECT", - "Original": "select user0_.col as col0_ from user user0_ where id = 1 order by col0_ desc limit 3", + "Original": "select a, (select col from user) from unsharded", "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user0_.col as col0_ from `user` as user0_ where 1 != 1", - "Query": "select user0_.col as col0_ from `user` as user0_ where id = 1 order by col0_ desc limit 3", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select user0_.col as col0_ from user user0_ where id = 1 order by col0_ desc limit 3", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user0_.col as col0_ from `user` as user0_ where 1 != 1", - "Query": "select user0_.col as col0_ from `user` as user0_ where id = 1 order by col0_ desc limit 3", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user" - ] - } - }, - { - "comment": "Booleans and parenthesis", - "query": "select * from user where (id = 1) AND name = true limit 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user where (id = 1) AND name = true limit 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 1 and `name` = true limit 5", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select * from user where (id = 1) AND name = true limit 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 1 and `name` = true limit 5", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user" - ] - } - }, - { - "comment": "Column as boolean-ish", - "query": "select * from user where (id = 1) AND name limit 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user where (id = 1) AND name limit 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 1 and `name` limit 5", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select * from user where (id = 1) AND name limit 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 1 and `name` limit 5", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user" - ] - } - }, - { - "comment": "PK as fake boolean, and column as boolean-ish", - "query": "select * from user where (id = 5) AND name = true limit 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user where (id = 5) AND name = true limit 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 5 and `name` = true limit 5", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select * from user where (id = 5) AND name = true limit 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 5 and `name` = true limit 5", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user" - ] - } - }, - { - "comment": "top level subquery in select", - "query": "select a, (select col from user) from unsharded", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a, (select col from user) from unsharded", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutValue", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select a, :__sq1 from unsharded where 1 != 1", - "Query": "select a, :__sq1 from unsharded", - "Table": "unsharded" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select a, (select col from user) from unsharded", - "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutValue", "PulloutVars": [ "__sq1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -2172,14 +1245,15 @@ "Table": "`user`" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Unsharded", "Keyspace": { "Name": "main", "Sharded": false }, - "FieldQuery": "select a, :__sq1 from unsharded where 1 != 1", - "Query": "select a, :__sq1 from unsharded", + "FieldQuery": "select a, :__sq1 as `(select col from ``user``)` from unsharded where 1 != 1", + "Query": "select a, :__sq1 as `(select col from ``user``)` from unsharded", "Table": "unsharded" } ] @@ -2193,53 +1267,18 @@ { "comment": "sub-expression subquery in select", "query": "select a, 1+(select col from user) from unsharded", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a, 1+(select col from user) from unsharded", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutValue", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select a, 1 + :__sq1 from unsharded where 1 != 1", - "Query": "select a, 1 + :__sq1 from unsharded", - "Table": "unsharded" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a, 1+(select col from user) from unsharded", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutValue", "PulloutVars": [ "__sq1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -2251,14 +1290,15 @@ "Table": "`user`" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Unsharded", "Keyspace": { "Name": "main", "Sharded": false }, - "FieldQuery": "select a, 1 + :__sq1 from unsharded where 1 != 1", - "Query": "select a, 1 + :__sq1 from unsharded", + "FieldQuery": "select a, 1 + :__sq1 as `1 + (select col from ``user``)` from unsharded where 1 != 1", + "Query": "select a, 1 + :__sq1 as `1 + (select col from ``user``)` from unsharded", "Table": "unsharded" } ] @@ -2272,50 +1312,7 @@ { "comment": "select * from derived table expands specific columns", "query": "select * from (select user.id id1, user_extra.id id2 from user join user_extra) as t", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from (select user.id id1, user_extra.id id2 from user join user_extra) as t", - "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0, - 1 - ], - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id as id1 from `user` where 1 != 1", - "Query": "select `user`.id as id1 from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.id as id2 from user_extra where 1 != 1", - "Query": "select user_extra.id as id2 from user_extra", - "Table": "user_extra" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from (select user.id id1, user_extra.id id2 from user join user_extra) as t", "Instructions": { @@ -2357,19 +1354,17 @@ { "comment": "duplicate columns not allowed in derived table", "query": "select * from (select user.id, user_extra.id from user join user_extra) as t", - "v3-plan": "VT12001: unsupported: duplicate column names in subquery: id", - "gen4-plan": "Duplicate column name 'id'" + "plan": "Duplicate column name 'id'" }, { "comment": "non-existent symbol in cross-shard derived table", "query": "select t.col from (select user.id from user join user_extra) as t", - "v3-plan": "VT03019: column t.col not found", - "gen4-plan": "column 't.col' not found" + "plan": "column 't.col' not found" }, { "comment": "union with the same target shard", "query": "select * from music where user_id = 1 union select * from user where id = 1", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from music where user_id = 1 union select * from user where id = 1", "Instructions": { @@ -2381,26 +1376,7 @@ }, "FieldQuery": "select * from music where 1 != 1 union select * from `user` where 1 != 1", "Query": "select * from music where user_id = 1 union select * from `user` where id = 1", - "Table": "music", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select * from music where user_id = 1 union select * from user where id = 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from music where 1 != 1 union select * from `user` where 1 != 1", - "Query": "select * from music where user_id = 1 union select * from `user` where id = 1", - "Table": "music", + "Table": "`user`, music", "Values": [ "INT64(1)" ], @@ -2415,26 +1391,7 @@ { "comment": "union with the same target shard last_insert_id", "query": "select *, last_insert_id() from music where user_id = 1 union select * from user where id = 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select *, last_insert_id() from music where user_id = 1 union select * from user where id = 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select *, :__lastInsertId as `last_insert_id()` from music where 1 != 1 union select * from `user` where 1 != 1", - "Query": "select *, :__lastInsertId as `last_insert_id()` from music where user_id = 1 union select * from `user` where id = 1", - "Table": "music", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select *, last_insert_id() from music where user_id = 1 union select * from user where id = 1", "Instructions": { @@ -2446,7 +1403,7 @@ }, "FieldQuery": "select *, :__lastInsertId as `last_insert_id()` from music where 1 != 1 union select * from `user` where 1 != 1", "Query": "select *, :__lastInsertId as `last_insert_id()` from music where user_id = 1 union select * from `user` where id = 1", - "Table": "music", + "Table": "`user`, music", "Values": [ "INT64(1)" ], @@ -2461,22 +1418,7 @@ { "comment": "unsharded union in derived table", "query": "select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from (select col1, col2 from unsharded where 1 != 1 union select col1, col2 from unsharded where 1 != 1) as a where 1 != 1", - "Query": "select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) as a", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a", "Instructions": { @@ -2498,22 +1440,7 @@ { "comment": "unsharded union in subquery", "query": "select id, name from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id, name from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select id, `name` from unsharded where 1 != 1", - "Query": "select id, `name` from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id, name from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)", "Instructions": { @@ -2535,22 +1462,7 @@ { "comment": "(select id from unsharded) union (select id from unsharded_auto) order by id limit 5", "query": "(select id from unsharded) union (select id from unsharded_auto) order by id limit 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "(select id from unsharded) union (select id from unsharded_auto) order by id limit 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select id from unsharded where 1 != 1 union select id from unsharded_auto where 1 != 1", - "Query": "select id from unsharded union select id from unsharded_auto order by id asc limit 5", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "(select id from unsharded) union (select id from unsharded_auto) order by id limit 5", "Instructions": { @@ -2573,22 +1485,7 @@ { "comment": "unsharded union", "query": "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select id from unsharded where 1 != 1 union select id from unsharded_auto where 1 != 1 union select id from unsharded_auto where 1 != 1", - "Query": "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)", "Instructions": { @@ -2611,22 +1508,7 @@ { "comment": "unsharded nested union", "query": "(select id from unsharded union select id from unsharded_auto) union (select id from unsharded_auto union select name from unsharded)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "(select id from unsharded union select id from unsharded_auto) union (select id from unsharded_auto union select name from unsharded)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select id from unsharded where 1 != 1 union select id from unsharded_auto where 1 != 1 union select id from unsharded_auto where 1 != 1 union select `name` from unsharded where 1 != 1", - "Query": "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto union select `name` from unsharded", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "(select id from unsharded union select id from unsharded_auto) union (select id from unsharded_auto union select name from unsharded)", "Instructions": { @@ -2649,22 +1531,7 @@ { "comment": "unsharded nested union with limit", "query": "(select id from unsharded order by id asc limit 1) union (select id from unsharded order by id desc limit 1) order by id asc limit 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "(select id from unsharded order by id asc limit 1) union (select id from unsharded order by id desc limit 1) order by id asc limit 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "(select id from unsharded where 1 != 1) union (select id from unsharded where 1 != 1)", - "Query": "(select id from unsharded order by id asc limit 1) union (select id from unsharded order by id desc limit 1) order by id asc limit 1", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "(select id from unsharded order by id asc limit 1) union (select id from unsharded order by id desc limit 1) order by id asc limit 1", "Instructions": { @@ -2686,23 +1553,7 @@ { "comment": "routing rules: ensure directives are not lost", "query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from route2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from route2", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from unsharded as route2 where 1 != 1", - "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from unsharded as route2", - "QueryTimeout": 1000, - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from route2", "Instructions": { @@ -2725,22 +1576,7 @@ { "comment": "testing SingleRow Projection", "query": "select 42", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select 42", - "Instructions": { - "OperatorType": "Projection", - "Expressions": [ - "INT64(42) as 42" - ], - "Inputs": [ - { - "OperatorType": "SingleRow" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 42", "Instructions": { @@ -2762,22 +1598,7 @@ { "comment": "don't filter on the vtgate", "query": "select 42 from dual where false", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select 42 from dual where false", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 42 from dual where 1 != 1", - "Query": "select 42 from dual where false", - "Table": "dual" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 42 from dual where false", "Instructions": { @@ -2799,22 +1620,7 @@ { "comment": "testing SingleRow Projection with arithmetics", "query": "select 42+2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select 42+2", - "Instructions": { - "OperatorType": "Projection", - "Expressions": [ - "INT64(44) as 42 + 2" - ], - "Inputs": [ - { - "OperatorType": "SingleRow" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 42+2", "Instructions": { @@ -2836,26 +1642,7 @@ { "comment": "sql_calc_found_rows without limit", "query": "select sql_calc_found_rows * from music where user_id = 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select sql_calc_found_rows * from music where user_id = 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from music where 1 != 1", - "Query": "select * from music where user_id = 1", - "Table": "music", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select sql_calc_found_rows * from music where user_id = 1", "Instructions": { @@ -2881,51 +1668,7 @@ { "comment": "sql_calc_found_rows with limit", "query": "select sql_calc_found_rows * from music limit 100", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select sql_calc_found_rows * from music limit 100", - "Instructions": { - "OperatorType": "SQL_CALC_FOUND_ROWS", - "Inputs": [ - { - "OperatorType": "Limit", - "Count": "INT64(100)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from music where 1 != 1", - "Query": "select * from music limit :__upper_limit", - "Table": "music" - } - ] - }, - { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count(0) AS count", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*) from music where 1 != 1", - "Query": "select count(*) from music", - "Table": "music" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select sql_calc_found_rows * from music limit 100", "Instructions": { @@ -2976,7 +1719,7 @@ { "comment": "sql_calc_found_rows with SelectEqualUnique plans", "query": "select sql_calc_found_rows * from music where user_id = 1 limit 2", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select sql_calc_found_rows * from music where user_id = 1 limit 2", "Instructions": { @@ -3013,103 +1756,18 @@ "Vindex": "user_index" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.music" + ] + } + }, + { + "comment": "sql_calc_found_rows with group by and having", + "query": "select sql_calc_found_rows user_id, count(id) from music group by user_id having count(user_id) = 1 order by user_id limit 2", + "plan": { "QueryType": "SELECT", - "Original": "select sql_calc_found_rows * from music where user_id = 1 limit 2", - "Instructions": { - "OperatorType": "SQL_CALC_FOUND_ROWS", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from music where 1 != 1", - "Query": "select * from music where user_id = 1 limit 2", - "Table": "music", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*) from music where 1 != 1", - "Query": "select count(*) from music where user_id = 1", - "Table": "music", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - ] - }, - "TablesUsed": [ - "user.music" - ] - } - }, - { - "comment": "sql_calc_found_rows with group by and having", - "query": "select sql_calc_found_rows user_id, count(id) from music group by user_id having count(user_id) = 1 order by user_id limit 2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select sql_calc_found_rows user_id, count(id) from music group by user_id having count(user_id) = 1 order by user_id limit 2", - "Instructions": { - "OperatorType": "SQL_CALC_FOUND_ROWS", - "Inputs": [ - { - "OperatorType": "Limit", - "Count": "INT64(2)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_id, count(id), weight_string(user_id) from music where 1 != 1 group by user_id", - "OrderBy": "(0|2) ASC", - "Query": "select user_id, count(id), weight_string(user_id) from music group by user_id having count(user_id) = 1 order by user_id asc limit :__upper_limit", - "ResultColumns": 2, - "Table": "music" - } - ] - }, - { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count(0) AS count", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*) from (select user_id, count(id) from music where 1 != 1 group by user_id) as t where 1 != 1", - "Query": "select count(*) from (select user_id, count(id) from music group by user_id having count(user_id) = 1) as t", - "Table": "music" - } - ] - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select sql_calc_found_rows user_id, count(id) from music group by user_id having count(user_id) = 1 order by user_id limit 2", + "Original": "select sql_calc_found_rows user_id, count(id) from music group by user_id having count(user_id) = 1 order by user_id limit 2", "Instructions": { "OperatorType": "SQL_CALC_FOUND_ROWS", "Inputs": [ @@ -3160,34 +1818,17 @@ { "comment": "sql_calc_found_rows in sub queries", "query": "select * from music where user_id IN (select sql_calc_found_rows * from music limit 10)", - "v3-plan": "VT03008: incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'", - "gen4-plan": "Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'" + "plan": "Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'" }, { "comment": "sql_calc_found_rows in derived table", "query": "select sql_calc_found_rows * from (select sql_calc_found_rows * from music limit 10) t limit 1", - "v3-plan": "VT03008: incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'", - "gen4-plan": "Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'" + "plan": "Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'" }, { "comment": "select from unsharded keyspace into dumpfile", "query": "select * from main.unsharded into Dumpfile 'x.txt'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from main.unsharded into Dumpfile 'x.txt'", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from unsharded where 1 != 1", - "Query": "select * from unsharded into dumpfile 'x.txt'", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from main.unsharded into Dumpfile 'x.txt'", "Instructions": { @@ -3209,22 +1850,7 @@ { "comment": "select from unsharded keyspace into outfile", "query": "select * from main.unsharded into outfile 'x.txt' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from main.unsharded into outfile 'x.txt' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n'", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from unsharded where 1 != 1", - "Query": "select * from unsharded into outfile 'x.txt' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\\n'", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from main.unsharded into outfile 'x.txt' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n'", "Instructions": { @@ -3246,22 +1872,7 @@ { "comment": "select from unsharded keyspace into outfile s3", "query": "select * from main.unsharded into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n' manifest on overwrite off", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from main.unsharded into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n' manifest on overwrite off", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from unsharded where 1 != 1", - "Query": "select * from unsharded into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\\n' manifest on overwrite off", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from main.unsharded into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n' manifest on overwrite off", "Instructions": { @@ -3298,26 +1909,7 @@ { "comment": "select (select u.id from user as u where u.id = 1), a.id from user as a where a.id = 1", "query": "select (select u.id from user as u where u.id = 1), a.id from user as a where a.id = 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select (select u.id from user as u where u.id = 1), a.id from user as a where a.id = 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select (select u.id from `user` as u where 1 != 1), a.id from `user` as a where 1 != 1", - "Query": "select (select u.id from `user` as u where u.id = 1), a.id from `user` as a where a.id = 1", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select (select u.id from user as u where u.id = 1), a.id from user as a where a.id = 1", "Instructions": { @@ -3343,41 +1935,7 @@ { "comment": "Add two tables with the same column in a join", "query": "select t.id, s.id from user t join user_extra s on t.id = s.user_id join unsharded", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select t.id, s.id from user t join user_extra s on t.id = s.user_id join unsharded", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1", - "TableName": "`user`, user_extra_unsharded", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select t.id, s.id from `user` as t join user_extra as s on t.id = s.user_id where 1 != 1", - "Query": "select t.id, s.id from `user` as t join user_extra as s on t.id = s.user_id", - "Table": "`user`, user_extra" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from unsharded where 1 != 1", - "Query": "select 1 from unsharded", - "Table": "unsharded" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select t.id, s.id from user t join user_extra s on t.id = s.user_id join unsharded", "Instructions": { @@ -3420,22 +1978,7 @@ { "comment": "((((select 1))))", "query": "((((select 1))))", - "v3-plan": { - "QueryType": "SELECT", - "Original": "((((select 1))))", - "Instructions": { - "OperatorType": "Projection", - "Expressions": [ - "INT64(1) as 1" - ], - "Inputs": [ - { - "OperatorType": "SingleRow" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "((((select 1))))", "Instructions": { @@ -3457,41 +2000,7 @@ { "comment": "Merging dual with user", "query": "select 42, id from dual, user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select 42, id from dual, user", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "TableName": "dual_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 42 from dual where 1 != 1", - "Query": "select 42 from dual", - "Table": "dual" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user`", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 42, id from dual, user", "Instructions": { @@ -3514,41 +2023,42 @@ { "comment": "select (select col from user limit 1) as a from user join user_extra order by a", "query": "select (select col from user limit 1) as a from user join user_extra order by a", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select (select col from user limit 1) as a from user join user_extra order by a", "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutValue", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0", + "TableName": "`user`_user_extra", "Inputs": [ { - "OperatorType": "Limit", - "Count": "INT64(1)", + "OperatorType": "UncorrelatedSubquery", + "Variant": "PulloutValue", + "PulloutVars": [ + "__sq1" + ], "Inputs": [ { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` limit :__upper_limit", - "Table": "`user`" - } - ] - }, - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_user_extra", - "Inputs": [ + "InputName": "SubQuery", + "OperatorType": "Limit", + "Count": "INT64(1)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select col from `user` where 1 != 1", + "Query": "select col from `user` limit :__upper_limit", + "Table": "`user`" + } + ] + }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -3558,82 +2068,20 @@ "FieldQuery": "select :__sq1 as a, weight_string(:__sq1) from `user` where 1 != 1", "OrderBy": "(0|1) ASC", "Query": "select :__sq1 as a, weight_string(:__sq1) from `user` order by a asc", - "ResultColumns": 1, - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select (select col from user limit 1) as a from user join user_extra order by a", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutValue", - "PulloutVars": [ - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Limit", - "Count": "INT64(1)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` limit :__upper_limit", "Table": "`user`" } ] }, { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select :__sq1 as a, weight_string(:__sq1) from `user` where 1 != 1", - "OrderBy": "(0|1) ASC", - "Query": "select :__sq1 as a, weight_string(:__sq1) from `user` order by a asc", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from user_extra where 1 != 1", + "Query": "select 1 from user_extra", + "Table": "user_extra" } ] }, @@ -3646,24 +2094,24 @@ { "comment": "select t.a from (select (select col from user limit 1) as a from user join user_extra) t", "query": "select t.a from (select (select col from user limit 1) as a from user join user_extra) t", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select t.a from (select (select col from user limit 1) as a from user join user_extra) t", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0", + "TableName": "`user`_user_extra", "Inputs": [ { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutValue", "PulloutVars": [ - "__sq_has_values1", "__sq1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Limit", "Count": "INT64(1)", "Inputs": [ @@ -3681,104 +2129,29 @@ ] }, { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select :__sq1 as a from `user` where 1 != 1", - "Query": "select :__sq1 as a from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] + "InputName": "Outer", + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select t.a from (select :__sq1 as a from `user` where 1 != 1) as t where 1 != 1", + "Query": "select t.a from (select :__sq1 as a from `user`) as t", + "Table": "`user`" } ] - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select t.a from (select (select col from user limit 1) as a from user join user_extra) t", - "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], - "Inputs": [ + }, { - "OperatorType": "Subquery", - "Variant": "PulloutValue", - "PulloutVars": [ - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Limit", - "Count": "INT64(1)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` limit :__upper_limit", - "Table": "`user`" - } - ] - }, - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select :__sq1 as a from `user` where 1 != 1", - "Query": "select :__sq1 as a from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from user_extra where 1 != 1", + "Query": "select 1 from user_extra", + "Table": "user_extra" } ] }, @@ -3788,30 +2161,10 @@ ] } }, - { - "comment": "select (select col from user where user_extra.id = 4 limit 1) as a from user join user_extra", - "query": "select (select col from user where user_extra.id = 4 limit 1) as a from user join user_extra", - "plan": "VT12001: unsupported: cross-shard correlated subquery" - }, { "comment": "plan test for a natural character set string", "query": "select N'string' from dual", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select N'string' from dual", - "Instructions": { - "OperatorType": "Projection", - "Expressions": [ - "VARCHAR(\"string\") as N'string'" - ], - "Inputs": [ - { - "OperatorType": "SingleRow" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select N'string' from dual", "Instructions": { @@ -3833,44 +2186,7 @@ { "comment": "select expression having dependencies on both sides of a join", "query": "select user.id * user_id as amount from user, user_extra", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.id * user_id as amount from user, user_extra", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "JoinVars": { - "user_id": 0 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id from `user` where 1 != 1", - "Query": "select `user`.id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select :user_id * user_id as amount from user_extra where 1 != 1", - "Query": "select :user_id * user_id as amount from user_extra", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.id * user_id as amount from user, user_extra", "Instructions": { @@ -3915,43 +2231,51 @@ { "comment": "correlated subquery in exists clause", "query": "select col from user where exists(select user_id from user_extra where user_id = 3 and user_id < user.id)", - "v3-plan": "VT12001: unsupported: cross-shard correlated subquery", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user where exists(select user_id from user_extra where user_id = 3 and user_id < user.id)", "Instructions": { - "OperatorType": "SemiJoin", - "JoinVars": { - "user_id": 0 - }, - "ProjectedIndexes": "-2", - "TableName": "`user`_user_extra", + "OperatorType": "SimpleProjection", + "Columns": [ + 0 + ], "Inputs": [ { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, col from `user` where 1 != 1", - "Query": "select `user`.id, col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true + "OperatorType": "SemiJoin", + "JoinVars": { + "user_id": 1 }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra where user_id = 3 and user_id < :user_id", - "Table": "user_extra", - "Values": [ - "INT64(3)" - ], - "Vindex": "user_index" + "TableName": "`user`_user_extra", + "Inputs": [ + { + "InputName": "Outer", + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select col, `user`.id from `user` where 1 != 1", + "Query": "select col, `user`.id from `user`", + "Table": "`user`" + }, + { + "InputName": "SubQuery", + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from user_extra where 1 != 1", + "Query": "select 1 from user_extra where user_id = 3 and user_id < :user_id", + "Table": "user_extra", + "Values": [ + "INT64(3)" + ], + "Vindex": "user_index" + } + ] } ] }, @@ -3964,109 +2288,52 @@ { "comment": "correlated subquery in exists clause with an order by", "query": "select col from user where exists(select user_id from user_extra where user_id = 3 and user_id < user.id) order by col", - "v3-plan": "VT12001: unsupported: cross-shard correlated subquery", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user where exists(select user_id from user_extra where user_id = 3 and user_id < user.id) order by col", "Instructions": { - "OperatorType": "SemiJoin", - "JoinVars": { - "user_id": 0 - }, - "ProjectedIndexes": "-2", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, col from `user` where 1 != 1", - "OrderBy": "1 ASC", - "Query": "select `user`.id, col from `user` order by col asc", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra where user_id = 3 and user_id < :user_id", - "Table": "user_extra", - "Values": [ - "INT64(3)" - ], - "Vindex": "user_index" - } - ] - }, - "TablesUsed": [ - "user.user", - "user.user_extra" - ] - } - }, - { - "comment": "correlated subquery having dependencies on two tables", - "query": "select 1 from user u1, user u2 where exists (select 1 from user_extra ue where ue.col = u1.col and ue.col = u2.col)", - "v3-plan": "VT12001: unsupported: cross-shard correlated subquery", - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select 1 from user u1, user u2 where exists (select 1 from user_extra ue where ue.col = u1.col and ue.col = u2.col)", - "Instructions": { - "OperatorType": "SemiJoin", - "JoinVars": { - "u1_col": 0, - "u2_col": 1 - }, - "ProjectedIndexes": "-3", - "TableName": "`user`_`user`_user_extra", + "OperatorType": "SimpleProjection", + "Columns": [ + 0 + ], "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0,L:1", - "TableName": "`user`_`user`", + "OperatorType": "SemiJoin", + "JoinVars": { + "user_id": 1 + }, + "TableName": "`user`_user_extra", "Inputs": [ { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select u1.col, 1 from `user` as u1 where 1 != 1", - "Query": "select u1.col, 1 from `user` as u1", + "FieldQuery": "select col, `user`.id from `user` where 1 != 1", + "OrderBy": "0 ASC", + "Query": "select col, `user`.id from `user` order by col asc", "Table": "`user`" }, { + "InputName": "SubQuery", "OperatorType": "Route", - "Variant": "Scatter", + "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select u2.col from `user` as u2 where 1 != 1", - "Query": "select u2.col from `user` as u2", - "Table": "`user`" + "FieldQuery": "select 1 from user_extra where 1 != 1", + "Query": "select 1 from user_extra where user_id = 3 and user_id < :user_id", + "Table": "user_extra", + "Values": [ + "INT64(3)" + ], + "Vindex": "user_index" } ] - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra as ue where 1 != 1", - "Query": "select 1 from user_extra as ue where ue.col = :u1_col /* INT16 */ and ue.col = :u2_col /* INT16 */", - "Table": "user_extra" } ] }, @@ -4077,19 +2344,19 @@ } }, { - "comment": "correlated subquery using a column twice", - "query": "select 1 from user u where exists (select 1 from user_extra ue where ue.col = u.col and u.col = ue.col2)", - "v3-plan": "VT12001: unsupported: cross-shard correlated subquery", - "gen4-plan": { + "comment": "correlated subquery having dependencies on two tables", + "query": "select 1 from user u1, user u2 where exists (select 1 from user_extra ue where ue.col = u1.col and ue.col = u2.col)", + "plan": { "QueryType": "SELECT", - "Original": "select 1 from user u where exists (select 1 from user_extra ue where ue.col = u.col and u.col = ue.col2)", + "Original": "select 1 from user u1, user u2 where exists (select 1 from user_extra ue where ue.col = u1.col and ue.col = u2.col)", "Instructions": { - "OperatorType": "SemiJoin", + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0", "JoinVars": { - "u_col": 0 + "u1_col": 1 }, - "ProjectedIndexes": "-2", - "TableName": "`user`_user_extra", + "TableName": "`user`_`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -4098,126 +2365,57 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select u.col, 1 from `user` as u where 1 != 1", - "Query": "select u.col, 1 from `user` as u", + "FieldQuery": "select 1, u1.col from `user` as u1 where 1 != 1", + "Query": "select 1, u1.col from `user` as u1", "Table": "`user`" }, { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true + "OperatorType": "SemiJoin", + "JoinVars": { + "u2_col": 0 }, - "FieldQuery": "select 1 from user_extra as ue where 1 != 1", - "Query": "select 1 from user_extra as ue where ue.col = :u_col /* INT16 */ and ue.col2 = :u_col", - "Table": "user_extra" - } - ] - }, - "TablesUsed": [ - "user.user", - "user.user_extra" - ] - } - }, - { - "comment": "correlated subquery part of an OR clause", - "query": "select 1 from user u where u.col = 6 or exists (select 1 from user_extra ue where ue.col = u.col and u.col = ue.col2)", - "v3-plan": "VT12001: unsupported: cross-shard correlated subquery", - "gen4-plan": "VT12001: unsupported: EXISTS sub-queries are only supported with AND clause" - }, - { - "comment": "correlated subquery that is dependent on one side of a join, fully mergeable", - "query": "SELECT music.id FROM music INNER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND music.id = (SELECT MAX(m2.id) FROM music m2 WHERE m2.user_id = user.id)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music INNER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND music.id = (SELECT MAX(m2.id) FROM music m2 WHERE m2.user_id = user.id)", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music join `user` on music.user_id = `user`.id where 1 != 1", - "Query": "select music.id from music join `user` on music.user_id = `user`.id where music.user_id = 5 and music.id = (select max(m2.id) from music as m2 where m2.user_id = `user`.id)", - "Table": "music, `user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music INNER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND music.id = (SELECT MAX(m2.id) FROM music m2 WHERE m2.user_id = user.id)", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music, `user` where 1 != 1", - "Query": "select music.id from music, `user` where music.user_id = 5 and music.id = (select max(m2.id) from music as m2 where m2.user_id = `user`.id) and music.user_id = `user`.id", - "Table": "`user`, music", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.music", - "user.user" - ] - } - }, - { - "comment": "union as a derived table", - "query": "select found from (select id as found from user union all (select id from unsharded)) as t", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select found from (select id as found from user union all (select id from unsharded)) as t", - "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], - "Inputs": [ - { - "OperatorType": "Concatenate", + "TableName": "`user`_user_extra", "Inputs": [ { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select id as found from `user` where 1 != 1", - "Query": "select id as found from `user`", + "FieldQuery": "select u2.col from `user` as u2 where 1 != 1", + "Query": "select u2.col from `user` as u2", "Table": "`user`" }, { + "InputName": "SubQuery", "OperatorType": "Route", - "Variant": "Unsharded", + "Variant": "Scatter", "Keyspace": { - "Name": "main", - "Sharded": false + "Name": "user", + "Sharded": true }, - "FieldQuery": "select id from unsharded where 1 != 1", - "Query": "select id from unsharded", - "Table": "unsharded" + "FieldQuery": "select 1 from user_extra as ue where 1 != 1", + "Query": "select 1 from user_extra as ue where ue.col = :u1_col and ue.col = :u2_col", + "Table": "user_extra" } ] } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "correlated subquery using a column twice", + "query": "select 1 from user u where exists (select 1 from user_extra ue where ue.col = u.col and u.col = ue.col2)", + "plan": { "QueryType": "SELECT", - "Original": "select found from (select id as found from user union all (select id from unsharded)) as t", + "Original": "select 1 from user u where exists (select 1 from user_extra ue where ue.col = u.col and u.col = ue.col2)", "Instructions": { "OperatorType": "SimpleProjection", "Columns": [ @@ -4225,55 +2423,81 @@ ], "Inputs": [ { - "OperatorType": "Concatenate", + "OperatorType": "SemiJoin", + "JoinVars": { + "u_col": 1 + }, + "TableName": "`user`_user_extra", "Inputs": [ { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select id as found from `user` where 1 != 1", - "Query": "select id as found from `user`", + "FieldQuery": "select 1, u.col from `user` as u where 1 != 1", + "Query": "select 1, u.col from `user` as u", "Table": "`user`" }, { + "InputName": "SubQuery", "OperatorType": "Route", - "Variant": "Unsharded", + "Variant": "Scatter", "Keyspace": { - "Name": "main", - "Sharded": false + "Name": "user", + "Sharded": true }, - "FieldQuery": "select id from unsharded where 1 != 1", - "Query": "select id from unsharded", - "Table": "unsharded" + "FieldQuery": "select 1 from user_extra as ue where 1 != 1", + "Query": "select 1 from user_extra as ue where ue.col = :u_col and ue.col2 = :u_col", + "Table": "user_extra" } ] } ] }, "TablesUsed": [ - "main.unsharded", - "user.user" + "user.user", + "user.user_extra" ] } }, { - "comment": "use output column containing data from both sides of the join", - "query": "select user_extra.col + user.col from user join user_extra on user.id = user_extra.id", - "v3-plan": { + "comment": "correlated subquery that is dependent on one side of a join, fully mergeable", + "query": "SELECT music.id FROM music INNER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND music.id = (SELECT MAX(m2.id) FROM music m2 WHERE m2.user_id = user.id)", + "plan": { "QueryType": "SELECT", - "Original": "select user_extra.col + user.col from user join user_extra on user.id = user_extra.id", + "Original": "SELECT music.id FROM music INNER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND music.id = (SELECT MAX(m2.id) FROM music m2 WHERE m2.user_id = user.id)", "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "JoinVars": { - "user_col": 0, - "user_id": 1 + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true }, - "TableName": "`user`_user_extra", + "FieldQuery": "select music.id from music, `user` where 1 != 1", + "Query": "select music.id from music, `user` where music.user_id = 5 and music.user_id = `user`.id and music.id = (select max(m2.id) from music as m2 where m2.user_id = `user`.id)", + "Table": "`user`, music", + "Values": [ + "INT64(5)" + ], + "Vindex": "user_index" + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } + }, + { + "comment": "union as a derived table", + "query": "select found from (select id as found from user union all (select id from unsharded)) as t", + "plan": { + "QueryType": "SELECT", + "Original": "select found from (select id as found from user union all (select id from unsharded)) as t", + "Instructions": { + "OperatorType": "Concatenate", "Inputs": [ { "OperatorType": "Route", @@ -4282,25 +2506,33 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1", - "Query": "select `user`.col, `user`.id from `user`", + "FieldQuery": "select id as found from `user` where 1 != 1", + "Query": "select id as found from `user`", "Table": "`user`" }, { "OperatorType": "Route", - "Variant": "Scatter", + "Variant": "Unsharded", "Keyspace": { - "Name": "user", - "Sharded": true + "Name": "main", + "Sharded": false }, - "FieldQuery": "select user_extra.col + :user_col from user_extra where 1 != 1", - "Query": "select user_extra.col + :user_col from user_extra where user_extra.id = :user_id", - "Table": "user_extra" + "FieldQuery": "select id from unsharded where 1 != 1", + "Query": "select id from unsharded", + "Table": "unsharded" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "main.unsharded", + "user.user" + ] + } + }, + { + "comment": "use output column containing data from both sides of the join", + "query": "select user_extra.col + user.col from user join user_extra on user.id = user_extra.id", + "plan": { "QueryType": "SELECT", "Original": "select user_extra.col + user.col from user join user_extra on user.id = user_extra.id", "Instructions": { @@ -4331,8 +2563,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select :user_extra_col + `user`.col from `user` where 1 != 1", - "Query": "select :user_extra_col + `user`.col from `user` where `user`.id = :user_extra_id", + "FieldQuery": "select :user_extra_col + `user`.col as `user_extra.col + ``user``.col` from `user` where 1 != 1", + "Query": "select :user_extra_col + `user`.col as `user_extra.col + ``user``.col` from `user` where `user`.id = :user_extra_id", "Table": "`user`", "Values": [ ":user_extra_id" @@ -4394,22 +2626,7 @@ { "comment": "select user.id, trim(leading 'x' from user.name) from user", "query": "select user.id, trim(leading 'x' from user.name) from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.id, trim(leading 'x' from user.name) from user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, trim(leading 'x' from `user`.`name`) from `user` where 1 != 1", - "Query": "select `user`.id, trim(leading 'x' from `user`.`name`) from `user`", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.id, trim(leading 'x' from user.name) from user", "Instructions": { @@ -4431,22 +2648,7 @@ { "comment": "json utility functions", "query": "select jcol, JSON_STORAGE_SIZE(jcol), JSON_STORAGE_FREE(jcol), JSON_PRETTY(jcol) from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select jcol, JSON_STORAGE_SIZE(jcol), JSON_STORAGE_FREE(jcol), JSON_PRETTY(jcol) from user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select jcol, json_storage_size(jcol), json_storage_free(jcol), json_pretty(jcol) from `user` where 1 != 1", - "Query": "select jcol, json_storage_size(jcol), json_storage_free(jcol), json_pretty(jcol) from `user`", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select jcol, JSON_STORAGE_SIZE(jcol), JSON_STORAGE_FREE(jcol), JSON_PRETTY(jcol) from user", "Instructions": { @@ -4468,7 +2670,7 @@ { "comment": "dual query with exists clause", "query": "select 1 from dual where exists (select 1 from information_schema.TABLES where information_schema.TABLES.TABLE_NAME = 'proc' and information_schema.TABLES.TABLE_SCHEMA = 'mysql')", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 1 from dual where exists (select 1 from information_schema.TABLES where information_schema.TABLES.TABLE_NAME = 'proc' and information_schema.TABLES.TABLE_SCHEMA = 'mysql')", "Instructions": { @@ -4479,24 +2681,7 @@ "Sharded": false }, "FieldQuery": "select 1 from dual where 1 != 1", - "Query": "select 1 from dual where exists (select 1 from information_schema.`TABLES` where information_schema.`TABLES`.TABLE_NAME = :TABLES_TABLE_NAME /* VARCHAR */ and information_schema.`TABLES`.TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ limit 1)", - "SysTableTableName": "[TABLES_TABLE_NAME:VARCHAR(\"proc\")]", - "SysTableTableSchema": "[VARCHAR(\"mysql\")]", - "Table": "dual" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select 1 from dual where exists (select 1 from information_schema.TABLES where information_schema.TABLES.TABLE_NAME = 'proc' and information_schema.TABLES.TABLE_SCHEMA = 'mysql')", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from dual where 1 != 1", - "Query": "select 1 from dual where exists (select 1 from information_schema.`TABLES` where `TABLES`.TABLE_NAME = :TABLES_TABLE_NAME /* VARCHAR */ and `TABLES`.TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ limit 1)", + "Query": "select 1 from dual where exists (select 1 from information_schema.`TABLES` where `TABLES`.TABLE_NAME = :TABLES_TABLE_NAME /* VARCHAR */ and `TABLES`.TABLE_SCHEMA = :__vtschemaname /* VARCHAR */)", "SysTableTableName": "[TABLES_TABLE_NAME:VARCHAR(\"proc\")]", "SysTableTableSchema": "[VARCHAR(\"mysql\")]", "Table": "dual" @@ -4509,22 +2694,7 @@ { "comment": "json_quote, json_object and json_array", "query": "SELECT JSON_QUOTE('null'), JSON_QUOTE('\"null\"'), JSON_OBJECT(BIN(1),2,'abc',ASCII(4)), JSON_ARRAY(1, \"abc\", NULL, TRUE, CURTIME())", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT JSON_QUOTE('null'), JSON_QUOTE('\"null\"'), JSON_OBJECT(BIN(1),2,'abc',ASCII(4)), JSON_ARRAY(1, \"abc\", NULL, TRUE, CURTIME())", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select json_quote('null'), json_quote('\\\"null\\\"'), json_object(BIN(1), 2, 'abc', ASCII(4)), json_array(1, 'abc', null, true, curtime()) from dual where 1 != 1", - "Query": "select json_quote('null'), json_quote('\\\"null\\\"'), json_object(BIN(1), 2, 'abc', ASCII(4)), json_array(1, 'abc', null, true, curtime()) from dual", - "Table": "dual" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT JSON_QUOTE('null'), JSON_QUOTE('\"null\"'), JSON_OBJECT(BIN(1),2,'abc',ASCII(4)), JSON_ARRAY(1, \"abc\", NULL, TRUE, CURTIME())", "Instructions": { @@ -4546,61 +2716,18 @@ { "comment": "select (select id from user order by id limit 1) from user_extra", "query": "select (select id from user order by id limit 1) from user_extra", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select (select id from user order by id limit 1) from user_extra", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutValue", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Limit", - "Count": "INT64(1)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", - "OrderBy": "(0|1) ASC", - "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit", - "ResultColumns": 1, - "Table": "`user`" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select :__sq1 from user_extra where 1 != 1", - "Query": "select :__sq1 from user_extra", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select (select id from user order by id limit 1) from user_extra", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutValue", "PulloutVars": [ "__sq1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Limit", "Count": "INT64(1)", "Inputs": [ @@ -4614,20 +2741,20 @@ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", "OrderBy": "(0|1) ASC", "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit", - "ResultColumns": 1, "Table": "`user`" } ] }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select :__sq1 from user_extra where 1 != 1", - "Query": "select :__sq1 from user_extra", + "FieldQuery": "select :__sq1 as `(select id from ``user`` order by id asc limit 1)` from user_extra where 1 != 1", + "Query": "select :__sq1 as `(select id from ``user`` order by id asc limit 1)` from user_extra", "Table": "user_extra" } ] @@ -4641,47 +2768,7 @@ { "comment": "yeah, it does not make sense, but it's valid", "query": "select exists(select 1) from user where id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select exists(select 1) from user where id = 5", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutExists", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from dual where 1 != 1", - "Query": "select 1 from dual limit 1", - "Table": "dual" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select :__sq_has_values1 from `user` where 1 != 1", - "Query": "select :__sq_has_values1 from `user` where id = 5", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select exists(select 1) from user where id = 5", "Instructions": { @@ -4692,7 +2779,7 @@ "Sharded": true }, "FieldQuery": "select exists (select 1 from dual where 1 != 1) from `user` where 1 != 1", - "Query": "select exists (select 1 from dual limit 1) from `user` where id = 5", + "Query": "select exists (select 1 from dual) from `user` where id = 5", "Table": "`user`", "Values": [ "INT64(5)" @@ -4708,22 +2795,7 @@ { "comment": "json schema validation functions", "query": "SELECT JSON_SCHEMA_VALID('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"'), JSON_SCHEMA_VALIDATION_REPORT('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT JSON_SCHEMA_VALID('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"'), JSON_SCHEMA_VALIDATION_REPORT('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"')", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select json_schema_valid('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"'), json_schema_validation_report('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"') from dual where 1 != 1", - "Query": "select json_schema_valid('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"'), json_schema_validation_report('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"') from dual", - "Table": "dual" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT JSON_SCHEMA_VALID('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"'), JSON_SCHEMA_VALIDATION_REPORT('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"')", "Instructions": { @@ -4745,22 +2817,7 @@ { "comment": "json search functions", "query": "SELECT JSON_CONTAINS('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', '1'), JSON_CONTAINS_PATH('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', 'one', '$.a', '$.e'), JSON_EXTRACT('[10, 20, [30, 40]]', '$[1]'), JSON_UNQUOTE(JSON_EXTRACT('[\"a\",\"b\"]', '$[1]')), JSON_KEYS('{\"a\": 1, \"b\": {\"c\": 30}}'), JSON_OVERLAPS(\"[1,3,5,7]\", \"[2,5,7]\"), JSON_SEARCH('[\"abc\"]', 'one', 'abc'), JSON_VALUE('{\"fname\": \"Joe\", \"lname\": \"Palmer\"}', '$.fname'), JSON_ARRAY(4,5) MEMBER OF('[[3,4],[4,5]]')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT JSON_CONTAINS('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', '1'), JSON_CONTAINS_PATH('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', 'one', '$.a', '$.e'), JSON_EXTRACT('[10, 20, [30, 40]]', '$[1]'), JSON_UNQUOTE(JSON_EXTRACT('[\"a\",\"b\"]', '$[1]')), JSON_KEYS('{\"a\": 1, \"b\": {\"c\": 30}}'), JSON_OVERLAPS(\"[1,3,5,7]\", \"[2,5,7]\"), JSON_SEARCH('[\"abc\"]', 'one', 'abc'), JSON_VALUE('{\"fname\": \"Joe\", \"lname\": \"Palmer\"}', '$.fname'), JSON_ARRAY(4,5) MEMBER OF('[[3,4],[4,5]]')", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select json_contains('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', '1'), json_contains_path('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', 'one', '$.a', '$.e'), json_extract('[10, 20, [30, 40]]', '$[1]'), json_unquote(json_extract('[\\\"a\\\",\\\"b\\\"]', '$[1]')), json_keys('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}'), json_overlaps('[1,3,5,7]', '[2,5,7]'), json_search('[\\\"abc\\\"]', 'one', 'abc'), json_value('{\\\"fname\\\": \\\"Joe\\\", \\\"lname\\\": \\\"Palmer\\\"}', '$.fname'), json_array(4, 5) member of ('[[3,4],[4,5]]') from dual where 1 != 1", - "Query": "select json_contains('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', '1'), json_contains_path('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', 'one', '$.a', '$.e'), json_extract('[10, 20, [30, 40]]', '$[1]'), json_unquote(json_extract('[\\\"a\\\",\\\"b\\\"]', '$[1]')), json_keys('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}'), json_overlaps('[1,3,5,7]', '[2,5,7]'), json_search('[\\\"abc\\\"]', 'one', 'abc'), json_value('{\\\"fname\\\": \\\"Joe\\\", \\\"lname\\\": \\\"Palmer\\\"}', '$.fname'), json_array(4, 5) member of ('[[3,4],[4,5]]') from dual", - "Table": "dual" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT JSON_CONTAINS('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', '1'), JSON_CONTAINS_PATH('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', 'one', '$.a', '$.e'), JSON_EXTRACT('[10, 20, [30, 40]]', '$[1]'), JSON_UNQUOTE(JSON_EXTRACT('[\"a\",\"b\"]', '$[1]')), JSON_KEYS('{\"a\": 1, \"b\": {\"c\": 30}}'), JSON_OVERLAPS(\"[1,3,5,7]\", \"[2,5,7]\"), JSON_SEARCH('[\"abc\"]', 'one', 'abc'), JSON_VALUE('{\"fname\": \"Joe\", \"lname\": \"Palmer\"}', '$.fname'), JSON_ARRAY(4,5) MEMBER OF('[[3,4],[4,5]]')", "Instructions": { @@ -4782,22 +2839,7 @@ { "comment": "Json extract and json unquote shorthands", "query": "SELECT a->\"$[4]\", a->>\"$[3]\" FROM user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT a->\"$[4]\", a->>\"$[3]\" FROM user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a -> '$[4]', a ->> '$[3]' from `user` where 1 != 1", - "Query": "select a -> '$[4]', a ->> '$[3]' from `user`", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT a->\"$[4]\", a->>\"$[3]\" FROM user", "Instructions": { @@ -4819,22 +2861,7 @@ { "comment": "groupe by with non aggregated columns and table alias", "query": "select u.id, u.age from user u group by u.id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u.id, u.age from user u group by u.id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.id, u.age from `user` as u where 1 != 1 group by u.id", - "Query": "select u.id, u.age from `user` as u group by u.id", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.id, u.age from user u group by u.id", "Instructions": { @@ -4856,22 +2883,7 @@ { "comment": "Functions that return JSON value attributes", "query": "select JSON_DEPTH('{}'), JSON_LENGTH('{\"a\": 1, \"b\": {\"c\": 30}}', '$.b'), JSON_TYPE(JSON_EXTRACT('{\"a\": [10, true]}', '$.a')), JSON_VALID('{\"a\": 1}')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select JSON_DEPTH('{}'), JSON_LENGTH('{\"a\": 1, \"b\": {\"c\": 30}}', '$.b'), JSON_TYPE(JSON_EXTRACT('{\"a\": [10, true]}', '$.a')), JSON_VALID('{\"a\": 1}')", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select json_depth('{}'), json_length('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}', '$.b'), json_type(json_extract('{\\\"a\\\": [10, true]}', '$.a')), json_valid('{\\\"a\\\": 1}') from dual where 1 != 1", - "Query": "select json_depth('{}'), json_length('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}', '$.b'), json_type(json_extract('{\\\"a\\\": [10, true]}', '$.a')), json_valid('{\\\"a\\\": 1}') from dual", - "Table": "dual" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select JSON_DEPTH('{}'), JSON_LENGTH('{\"a\": 1, \"b\": {\"c\": 30}}', '$.b'), JSON_TYPE(JSON_EXTRACT('{\"a\": [10, true]}', '$.a')), JSON_VALID('{\"a\": 1}')", "Instructions": { @@ -4893,22 +2905,7 @@ { "comment": "Json array functions", "query": "select JSON_ARRAY_APPEND('{\"a\": 1}', '$', 'z'), JSON_ARRAY_INSERT('[\"a\", {\"b\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), JSON_INSERT('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', CAST('[true, false]' AS JSON))", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select JSON_ARRAY_APPEND('{\"a\": 1}', '$', 'z'), JSON_ARRAY_INSERT('[\"a\", {\"b\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), JSON_INSERT('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', CAST('[true, false]' AS JSON))", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select json_array_append('{\\\"a\\\": 1}', '$', 'z'), json_array_insert('[\\\"a\\\", {\\\"b\\\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), json_insert('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', cast('[true, false]' as JSON)) from dual where 1 != 1", - "Query": "select json_array_append('{\\\"a\\\": 1}', '$', 'z'), json_array_insert('[\\\"a\\\", {\\\"b\\\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), json_insert('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', cast('[true, false]' as JSON)) from dual", - "Table": "dual" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select JSON_ARRAY_APPEND('{\"a\": 1}', '$', 'z'), JSON_ARRAY_INSERT('[\"a\", {\"b\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), JSON_INSERT('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', CAST('[true, false]' AS JSON))", "Instructions": { @@ -4930,22 +2927,7 @@ { "comment": "Json merge functions", "query": "select JSON_MERGE('[1, 2]', '[true, false]'), JSON_MERGE_PATCH('{\"name\": \"x\"}', '{\"id\": 47}'), JSON_MERGE_PRESERVE('[1, 2]', '{\"id\": 47}')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select JSON_MERGE('[1, 2]', '[true, false]'), JSON_MERGE_PATCH('{\"name\": \"x\"}', '{\"id\": 47}'), JSON_MERGE_PRESERVE('[1, 2]', '{\"id\": 47}')", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select json_merge('[1, 2]', '[true, false]'), json_merge_patch('{\\\"name\\\": \\\"x\\\"}', '{\\\"id\\\": 47}'), json_merge_preserve('[1, 2]', '{\\\"id\\\": 47}') from dual where 1 != 1", - "Query": "select json_merge('[1, 2]', '[true, false]'), json_merge_patch('{\\\"name\\\": \\\"x\\\"}', '{\\\"id\\\": 47}'), json_merge_preserve('[1, 2]', '{\\\"id\\\": 47}') from dual", - "Table": "dual" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select JSON_MERGE('[1, 2]', '[true, false]'), JSON_MERGE_PATCH('{\"name\": \"x\"}', '{\"id\": 47}'), JSON_MERGE_PRESERVE('[1, 2]', '{\"id\": 47}')", "Instructions": { @@ -4967,22 +2949,7 @@ { "comment": "JSON modifier functions", "query": "select JSON_REMOVE('[1, [2, 3], 4]', '$[1]'), JSON_REPLACE('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_SET('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_UNQUOTE('\"abc\"')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select JSON_REMOVE('[1, [2, 3], 4]', '$[1]'), JSON_REPLACE('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_SET('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_UNQUOTE('\"abc\"')", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select json_remove('[1, [2, 3], 4]', '$[1]'), json_replace('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_set('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_unquote('\\\"abc\\\"') from dual where 1 != 1", - "Query": "select json_remove('[1, [2, 3], 4]', '$[1]'), json_replace('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_set('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_unquote('\\\"abc\\\"') from dual", - "Table": "dual" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select JSON_REMOVE('[1, [2, 3], 4]', '$[1]'), JSON_REPLACE('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_SET('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_UNQUOTE('\"abc\"')", "Instructions": { @@ -5004,47 +2971,7 @@ { "comment": "Reference with a subquery which can be merged", "query": "select exists(select id from user where id = 4)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select exists(select id from user where id = 4)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutExists", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user` where id = 4 limit 1", - "Table": "`user`", - "Values": [ - "INT64(4)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select :__sq_has_values1 from dual where 1 != 1", - "Query": "select :__sq_has_values1 from dual", - "Table": "dual" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select exists(select id from user where id = 4)", "Instructions": { @@ -5055,7 +2982,7 @@ "Sharded": true }, "FieldQuery": "select exists (select 1 from `user` where 1 != 1) from dual where 1 != 1", - "Query": "select exists (select 1 from `user` where id = 4 limit 1) from dual", + "Query": "select exists (select 1 from `user` where id = 4) from dual", "Table": "dual", "Values": [ "INT64(4)" @@ -5071,84 +2998,39 @@ { "comment": "Reference with a subquery which cannot be merged", "query": "select exists(select * from user)", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select exists(select * from user)", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutExists", "PulloutVars": [ - "__sq_has_values1", + "__sq_has_values2", "__sq1" ], "Inputs": [ { - "OperatorType": "Limit", - "Count": "INT64(1)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user` limit :__upper_limit", - "Table": "`user`" - } - ] + "InputName": "SubQuery", + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from `user` where 1 != 1", + "Query": "select 1 from `user`", + "Table": "`user`" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Reference", "Keyspace": { "Name": "main", "Sharded": false }, - "FieldQuery": "select :__sq_has_values1 from dual where 1 != 1", - "Query": "select :__sq_has_values1 from dual", - "Table": "dual" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select exists(select * from user)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutExists", - "PulloutVars": [ - "__sq_has_values1" - ], - "Inputs": [ - { - "OperatorType": "Limit", - "Count": "INT64(1)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user` limit :__upper_limit", - "Table": "`user`" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select :__sq_has_values1 from dual where 1 != 1", - "Query": "select :__sq_has_values1 from dual", + "FieldQuery": "select :__sq_has_values2 as `exists (select 1 from ``user``)` from dual where 1 != 1", + "Query": "select :__sq_has_values2 as `exists (select 1 from ``user``)` from dual", "Table": "dual" } ] @@ -5162,22 +3044,7 @@ { "comment": "insert function not requiring any table", "query": "select insert('Quadratic', 3, 4, 'What')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select insert('Quadratic', 3, 4, 'What')", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select insert('Quadratic', 3, 4, 'What') from dual where 1 != 1", - "Query": "select insert('Quadratic', 3, 4, 'What') from dual", - "Table": "dual" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select insert('Quadratic', 3, 4, 'What')", "Instructions": { @@ -5199,22 +3066,7 @@ { "comment": "insert function using column names as arguments", "query": "select insert(tcol1, id, 3, tcol2) from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select insert(tcol1, id, 3, tcol2) from user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select insert(tcol1, id, 3, tcol2) from `user` where 1 != 1", - "Query": "select insert(tcol1, id, 3, tcol2) from `user`", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select insert(tcol1, id, 3, tcol2) from user", "Instructions": { @@ -5236,22 +3088,7 @@ { "comment": "gtid functions", "query": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57')", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57') from dual where 1 != 1", - "Query": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57') from dual", - "Table": "dual" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57')", "Instructions": { @@ -5273,70 +3110,7 @@ { "comment": "Predicate in apply join which is merged", "query": "select user.col, user_metadata.user_id from user join user_extra on user.col = user_extra.col join user_metadata on user_extra.user_id = user_metadata.user_id where user.textcol1 = 'alice@gmail.com'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col, user_metadata.user_id from user join user_extra on user.col = user_extra.col join user_metadata on user_extra.user_id = user_metadata.user_id where user.textcol1 = 'alice@gmail.com'", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "JoinVars": { - "user_extra_user_id": 1 - }, - "TableName": "`user`_user_extra_user_metadata", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "JoinVars": { - "user_col": 0 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user` where `user`.textcol1 = 'alice@gmail.com'", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.user_id from user_extra where 1 != 1", - "Query": "select user_extra.user_id from user_extra where user_extra.col = :user_col", - "Table": "user_extra" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_metadata.user_id from user_metadata where 1 != 1", - "Query": "select user_metadata.user_id from user_metadata where user_metadata.user_id = :user_extra_user_id", - "Table": "user_metadata", - "Values": [ - ":user_extra_user_id" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col, user_metadata.user_id from user join user_extra on user.col = user_extra.col join user_metadata on user_extra.user_id = user_metadata.user_id where user.textcol1 = 'alice@gmail.com'", "Instructions": { @@ -5382,26 +3156,7 @@ { "comment": "Join across multiple tables, with conditions on different vindexes, but mergeable through join predicates", "query": "SELECT user.id FROM user INNER JOIN music_extra ON user.id = music_extra.user_id INNER JOIN music ON music_extra.user_id = music.user_id WHERE user.id = 123 and music.id = 456", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT user.id FROM user INNER JOIN music_extra ON user.id = music_extra.user_id INNER JOIN music ON music_extra.user_id = music.user_id WHERE user.id = 123 and music.id = 456", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id from `user` join music_extra on `user`.id = music_extra.user_id join music on music_extra.user_id = music.user_id where 1 != 1", - "Query": "select `user`.id from `user` join music_extra on `user`.id = music_extra.user_id join music on music_extra.user_id = music.user_id where `user`.id = 123 and music.id = 456", - "Table": "`user`, music_extra, music", - "Values": [ - "INT64(123)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT user.id FROM user INNER JOIN music_extra ON user.id = music_extra.user_id INNER JOIN music ON music_extra.user_id = music.user_id WHERE user.id = 123 and music.id = 456", "Instructions": { @@ -5429,61 +3184,7 @@ { "comment": "SQL_CALC_FOUND_ROWS with vindex lookup", "query": "select SQL_CALC_FOUND_ROWS id, name from user where name = 'aa' order by id limit 2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select SQL_CALC_FOUND_ROWS id, name from user where name = 'aa' order by id limit 2", - "Instructions": { - "OperatorType": "SQL_CALC_FOUND_ROWS", - "Inputs": [ - { - "OperatorType": "Limit", - "Count": "INT64(2)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, `name`, weight_string(id) from `user` where 1 != 1", - "OrderBy": "(0|2) ASC", - "Query": "select id, `name`, weight_string(id) from `user` where `name` = 'aa' order by id asc limit :__upper_limit", - "ResultColumns": 2, - "Table": "`user`", - "Values": [ - "VARCHAR(\"aa\")" - ], - "Vindex": "name_user_map" - } - ] - }, - { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count(0) AS count", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*) from `user` where 1 != 1", - "Query": "select count(*) from `user` where `name` = 'aa'", - "Table": "`user`", - "Values": [ - "VARCHAR(\"aa\")" - ], - "Vindex": "name_user_map" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select SQL_CALC_FOUND_ROWS id, name from user where name = 'aa' order by id limit 2", "Instructions": { @@ -5594,22 +3295,7 @@ { "comment": "`None` route being merged with another route via join predicate on Vindex columns", "query": "SELECT `music`.id FROM `music` INNER JOIN `user` ON music.user_id = user.id WHERE music.user_id IN (NULL) AND user.id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT `music`.id FROM `music` INNER JOIN `user` ON music.user_id = user.id WHERE music.user_id IN (NULL) AND user.id = 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "None", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music join `user` on music.user_id = `user`.id where 1 != 1", - "Query": "select music.id from music join `user` on music.user_id = `user`.id where music.user_id in (null) and `user`.id = 5", - "Table": "music, `user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT `music`.id FROM `music` INNER JOIN `user` ON music.user_id = user.id WHERE music.user_id IN (NULL) AND user.id = 5", "Instructions": { @@ -5632,51 +3318,7 @@ { "comment": "Treating single value tuples as `EqualUnique` routes", "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (5)) AND music.user_id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (5)) AND music.user_id = 5", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.user_id in ::__vals", - "Table": "music", - "Values": [ - "(INT64(5))" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.user_id = 5 and :__sq_has_values1 = 1 and music.id in ::__sq1", - "Table": "music", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (5)) AND music.user_id = 5", "Instructions": { @@ -5687,7 +3329,7 @@ "Sharded": true }, "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (5)) and music.user_id = 5", + "Query": "select music.id from music where music.user_id = 5 and music.id in (select music.id from music where music.user_id in (5))", "Table": "music", "Values": [ "INT64(5)" @@ -5702,65 +3344,21 @@ { "comment": "Subquery with `IN` condition using columns with matching lookup vindexes", "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3))", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3))", "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.user_id in ::__vals", - "Table": "music", - "Values": [ - "(INT64(1), INT64(2), INT64(3))" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals", - "Table": "music", - "Values": [ - "::__sq1" - ], - "Vindex": "music_user_map" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3))", - "Instructions": { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in ::__vals)", - "Table": "music", - "Values": [ - "(INT64(1), INT64(2), INT64(3))" + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.id from music where 1 != 1", + "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (1, 2, 3))", + "Table": "music", + "Values": [ + "(INT64(1), INT64(2), INT64(3))" ], "Vindex": "user_index" }, @@ -5772,51 +3370,7 @@ { "comment": "Subquery with `IN` condition using columns with matching lookup vindexes, with derived table", "query": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) _inner)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) _inner)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from (select music.id from music where 1 != 1) as _inner where 1 != 1", - "Query": "select * from (select music.id from music where music.user_id in ::__vals) as _inner", - "Table": "music", - "Values": [ - "(INT64(1), INT64(2), INT64(3))" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals", - "Table": "music", - "Values": [ - "::__sq1" - ], - "Vindex": "music_user_map" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) _inner)", "Instructions": { @@ -5827,7 +3381,7 @@ "Sharded": true }, "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.id in (select _inner.id from (select music.id from music where music.user_id in ::__vals) as _inner)", + "Query": "select music.id from music where music.id in (select _inner.id from (select music.id from music where music.user_id in (1, 2, 3)) as _inner)", "Table": "music", "Values": [ "(INT64(1), INT64(2), INT64(3))" @@ -5842,47 +3396,7 @@ { "comment": "Subquery with `IN` condition using columns with matching lookup vindexes, with inner scatter query", "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.foo = 'bar') AND music.user_id IN (3, 4, 5)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.foo = 'bar') AND music.user_id IN (3, 4, 5)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.foo = 'bar'", - "Table": "music" - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.user_id in ::__vals and :__sq_has_values1 = 1 and music.id in ::__sq1", - "Table": "music", - "Values": [ - "(INT64(3), INT64(4), INT64(5))" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.foo = 'bar') AND music.user_id IN (3, 4, 5)", "Instructions": { @@ -5893,7 +3407,7 @@ "Sharded": true }, "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.id in (select music.id from music where music.foo = 'bar') and music.user_id in ::__vals", + "Query": "select music.id from music where music.user_id in ::__vals and music.id in (select music.id from music where music.foo = 'bar')", "Table": "music", "Values": [ "(INT64(3), INT64(4), INT64(5))" @@ -5908,51 +3422,7 @@ { "comment": "Subquery with `IN` condition using columns with matching lookup vindexes", "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) and music.user_id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) and music.user_id = 5", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.user_id in ::__vals", - "Table": "music", - "Values": [ - "(INT64(1), INT64(2), INT64(3))" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.user_id = 5 and :__sq_has_values1 = 1 and music.id in ::__sq1", - "Table": "music", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) and music.user_id = 5", "Instructions": { @@ -5963,7 +3433,7 @@ "Sharded": true }, "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (1, 2, 3)) and music.user_id = 5", + "Query": "select music.id from music where music.user_id = 5 and music.id in (select music.id from music where music.user_id in (1, 2, 3))", "Table": "music", "Values": [ "INT64(5)" @@ -5978,47 +3448,7 @@ { "comment": "Subquery with `IN` condition using columns with matching lookup vindexes, but not a top level predicate", "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) OR music.user_id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) OR music.user_id = 5", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.user_id in ::__vals", - "Table": "music", - "Values": [ - "(INT64(1), INT64(2), INT64(3))" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__sq1 or music.user_id = 5", - "Table": "music" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) OR music.user_id = 5", "Instructions": { @@ -6040,47 +3470,7 @@ { "comment": "`IN` comparison on Vindex with `None` subquery, as routing predicate", "query": "SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "None", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.user_id in (null)", - "Table": "music" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.user_id = 5 and :__sq_has_values1 = 1 and music.id in ::__sq1", - "Table": "music", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5", "Instructions": { @@ -6091,7 +3481,7 @@ "Sharded": true }, "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (null)) and music.user_id = 5", + "Query": "select music.id from music where music.user_id = 5 and music.id in (select music.id from music where music.user_id in (null))", "Table": "music" }, "TablesUsed": [ @@ -6102,705 +3492,19 @@ { "comment": "`IN` comparison on Vindex with `None` subquery, as non-routing predicate", "query": "SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "None", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.user_id in (null)", - "Table": "music" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__sq1 or music.user_id = 5", - "Table": "music" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5", "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (null)) or music.user_id = 5", - "Table": "music" - }, - "TablesUsed": [ - "user.music" - ] - } - }, - { - "comment": "Mergeable scatter subquery", - "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop')", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.genre = 'pop'", - "Table": "music" - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals", - "Table": "music", - "Values": [ - "::__sq1" - ], - "Vindex": "music_user_map" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop')", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.id in (select music.id from music where music.genre = 'pop')", - "Table": "music" - }, - "TablesUsed": [ - "user.music" - ] - } - }, - { - "comment": "Mergeable scatter subquery with `GROUP BY` on unique vindex column", - "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.id)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.id)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1 group by music.id", - "Query": "select music.id from music where music.genre = 'pop' group by music.id", - "Table": "music" - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals", - "Table": "music", - "Values": [ - "::__sq1" - ], - "Vindex": "music_user_map" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.id)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.id in (select music.id from music where music.genre = 'pop' group by music.id)", - "Table": "music" - }, - "TablesUsed": [ - "user.music" - ] - } - }, - { - "comment": "Unmergeable scatter subquery with `GROUP BY` on-non vindex column", - "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.genre)", - "v3-plan": "VT12001: unsupported: in scatter query: GROUP BY column must reference column in SELECT list", - "gen4-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.genre)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "random(0) AS id", - "GroupBy": "(1|2)", - "ResultColumns": 1, - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id, music.genre, weight_string(music.genre) from music where 1 != 1 group by music.genre, weight_string(music.genre)", - "OrderBy": "(1|2) ASC", - "Query": "select music.id, music.genre, weight_string(music.genre) from music where music.genre = 'pop' group by music.genre, weight_string(music.genre) order by music.genre asc", - "Table": "music" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals", - "Table": "music", - "Values": [ - "::__sq1" - ], - "Vindex": "music_user_map" - } - ] - }, - "TablesUsed": [ - "user.music" - ] - } - }, - { - "comment": "Unmergeable scatter subquery with LIMIT", - "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' LIMIT 10)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' LIMIT 10)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Limit", - "Count": "INT64(10)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.genre = 'pop' limit :__upper_limit", - "Table": "music" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals", - "Table": "music", - "Values": [ - "::__sq1" - ], - "Vindex": "music_user_map" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' LIMIT 10)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Limit", - "Count": "INT64(10)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.genre = 'pop' limit :__upper_limit", - "Table": "music" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals", - "Table": "music", - "Values": [ - "::__sq1" - ], - "Vindex": "music_user_map" - } - ] - }, - "TablesUsed": [ - "user.music" - ] - } - }, - { - "comment": "Mergeable subquery with `MAX` aggregate and grouped by unique vindex", - "query": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6) GROUP BY music.user_id)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6) GROUP BY music.user_id)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select max(music.id) from music where 1 != 1 group by music.user_id", - "Query": "select max(music.id) from music where music.user_id in ::__vals group by music.user_id", - "Table": "music", - "Values": [ - "(INT64(5), INT64(6))" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals", - "Table": "music", - "Values": [ - "::__sq1" - ], - "Vindex": "music_user_map" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6) GROUP BY music.user_id)", - "Instructions": { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.id in (select max(music.id) from music where music.user_id in ::__vals group by music.user_id)", - "Table": "music", - "Values": [ - "(INT64(5), INT64(6))" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.music" - ] - } - }, - { - "comment": "Unmergeable subquery with `MAX` aggregate", - "query": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6))", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6))", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "max(0)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select max(music.id) from music where 1 != 1", - "Query": "select max(music.id) from music where music.user_id in ::__vals", - "Table": "music", - "Values": [ - "(INT64(5), INT64(6))" - ], - "Vindex": "user_index" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals", - "Table": "music", - "Values": [ - "::__sq1" - ], - "Vindex": "music_user_map" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6))", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "max(0) AS max(music.id)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select max(music.id) from music where 1 != 1", - "Query": "select max(music.id) from music where music.user_id in ::__vals", - "Table": "music", - "Values": [ - "(INT64(5), INT64(6))" - ], - "Vindex": "user_index" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals", - "Table": "music", - "Values": [ - "::__sq1" - ], - "Vindex": "music_user_map" - } - ] - }, - "TablesUsed": [ - "user.music" - ] - } - }, - { - "comment": "Mergeable subquery with `MAX` aggregate with `EqualUnique` route operator", - "query": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select max(music.id) from music where 1 != 1", - "Query": "select max(music.id) from music where music.user_id = 5", - "Table": "music", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals", - "Table": "music", - "Values": [ - "::__sq1" - ], - "Vindex": "music_user_map" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select max(music.id) from music where 1 != 1", - "Query": "select max(music.id) from music where music.user_id = 5", - "Table": "music", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals", - "Table": "music", - "Values": [ - "::__sq1" - ], - "Vindex": "music_user_map" - } - ] - }, - "TablesUsed": [ - "user.music" - ] - } - }, - { - "comment": "Mergeable subquery with `LIMIT` due to `EqualUnique` route", - "query": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5 LIMIT 10)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5 LIMIT 10)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select max(music.id) from music where 1 != 1", - "Query": "select max(music.id) from music where music.user_id = 5 limit 10", - "Table": "music", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals", - "Table": "music", - "Values": [ - "::__sq1" - ], - "Vindex": "music_user_map" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5 LIMIT 10)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select max(music.id) from music where 1 != 1", - "Query": "select max(music.id) from music where music.user_id = 5 limit 10", - "Table": "music", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals", - "Table": "music", - "Values": [ - "::__sq1" - ], - "Vindex": "music_user_map" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.id from music where 1 != 1", + "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (null)) or music.user_id = 5", + "Table": "music" }, "TablesUsed": [ "user.music" @@ -6808,69 +3512,21 @@ } }, { - "comment": "Mergeable subquery with multiple levels of derived statements", - "query": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id = 5 LIMIT 10) subquery_for_limit) subquery_for_limit)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id = 5 LIMIT 10) subquery_for_limit) subquery_for_limit)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from (select * from (select music.id from music where 1 != 1) as subquery_for_limit where 1 != 1) as subquery_for_limit where 1 != 1", - "Query": "select * from (select * from (select music.id from music where music.user_id = 5 limit 10) as subquery_for_limit) as subquery_for_limit", - "Table": "music", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals", - "Table": "music", - "Values": [ - "::__sq1" - ], - "Vindex": "music_user_map" - } - ] - } - }, - "gen4-plan": { + "comment": "Mergeable scatter subquery", + "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop')", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id = 5 LIMIT 10) subquery_for_limit) subquery_for_limit)", + "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop')", "Instructions": { "OperatorType": "Route", - "Variant": "EqualUnique", + "Variant": "Scatter", "Keyspace": { "Name": "user", "Sharded": true }, "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.id in (select subquery_for_limit.id from (select subquery_for_limit.id from (select music.id from music where music.user_id = 5 limit 10) as subquery_for_limit) as subquery_for_limit)", - "Table": "music", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" + "Query": "select music.id from music where music.id in (select music.id from music where music.genre = 'pop')", + "Table": "music" }, "TablesUsed": [ "user.music" @@ -6878,91 +3534,21 @@ } }, { - "comment": "Mergeable subquery with multiple levels of derived statements, using a single value `IN` predicate", - "query": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5) LIMIT 10) subquery_for_limit) subquery_for_limit)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5) LIMIT 10) subquery_for_limit) subquery_for_limit)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], - "Inputs": [ - { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], - "Inputs": [ - { - "OperatorType": "Limit", - "Count": "INT64(10)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.user_id in ::__vals limit :__upper_limit", - "Table": "music", - "Values": [ - "(INT64(5))" - ], - "Vindex": "user_index" - } - ] - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals", - "Table": "music", - "Values": [ - "::__sq1" - ], - "Vindex": "music_user_map" - } - ] - } - }, - "gen4-plan": { + "comment": "Mergeable scatter subquery with `GROUP BY` on unique vindex column", + "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.id)", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5) LIMIT 10) subquery_for_limit) subquery_for_limit)", + "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.id)", "Instructions": { "OperatorType": "Route", - "Variant": "EqualUnique", + "Variant": "Scatter", "Keyspace": { "Name": "user", "Sharded": true }, "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.id in (select subquery_for_limit.id from (select subquery_for_limit.id from (select music.id from music where music.user_id in (5) limit 10) as subquery_for_limit) as subquery_for_limit)", - "Table": "music", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" + "Query": "select music.id from music where music.id in (select music.id from music where music.genre = 'pop' group by music.id)", + "Table": "music" }, "TablesUsed": [ "user.music" @@ -6970,57 +3556,42 @@ } }, { - "comment": "Unmergeable subquery with multiple levels of derived statements, using a multi value `IN` predicate", - "query": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5, 6) LIMIT 10) subquery_for_limit) subquery_for_limit)", - "v3-plan": { + "comment": "Unmergeable scatter subquery with `GROUP BY` on-non vindex column", + "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.genre)", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5, 6) LIMIT 10) subquery_for_limit) subquery_for_limit)", + "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.genre)", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutIn", "PulloutVars": [ - "__sq_has_values1", + "__sq_has_values", "__sq1" ], "Inputs": [ { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], + "InputName": "SubQuery", + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "any_value(0) AS id", + "GroupBy": "(1|2)", "Inputs": [ { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], - "Inputs": [ - { - "OperatorType": "Limit", - "Count": "INT64(10)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.user_id in ::__vals limit :__upper_limit", - "Table": "music", - "Values": [ - "(INT64(5), INT64(6))" - ], - "Vindex": "user_index" - } - ] - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.id, music.genre, weight_string(music.genre) from music where 1 != 1 group by music.genre, weight_string(music.genre)", + "OrderBy": "(1|2) ASC", + "Query": "select music.id, music.genre, weight_string(music.genre) from music where music.genre = 'pop' group by music.genre, weight_string(music.genre) order by music.genre asc", + "Table": "music" } ] }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "IN", "Keyspace": { @@ -7028,7 +3599,7 @@ "Sharded": true }, "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals", + "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", "Table": "music", "Values": [ "::__sq1" @@ -7036,49 +3607,46 @@ "Vindex": "music_user_map" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.music" + ] + } + }, + { + "comment": "Unmergeable scatter subquery with LIMIT", + "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' LIMIT 10)", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5, 6) LIMIT 10) subquery_for_limit) subquery_for_limit)", + "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' LIMIT 10)", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutIn", "PulloutVars": [ - "__sq_has_values1", + "__sq_has_values", "__sq1" ], "Inputs": [ { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], + "InputName": "SubQuery", + "OperatorType": "Limit", + "Count": "INT64(10)", "Inputs": [ { - "OperatorType": "Limit", - "Count": "INT64(10)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.user_id in ::__vals limit :__upper_limit", - "Table": "music", - "Values": [ - "(INT64(5), INT64(6))" - ], - "Vindex": "user_index" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.id from music where 1 != 1", + "Query": "select music.id from music where music.genre = 'pop' limit :__upper_limit", + "Table": "music" } ] }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "IN", "Keyspace": { @@ -7086,7 +3654,7 @@ "Sharded": true }, "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals", + "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", "Table": "music", "Values": [ "::__sq1" @@ -7101,53 +3669,37 @@ } }, { - "comment": "Unmergeable subquery with multiple levels of derived statements", - "query": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music LIMIT 10) subquery_for_limit) subquery_for_limit)", - "v3-plan": { + "comment": "Mergeable subquery with `MAX` aggregate and grouped by unique vindex", + "query": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6) GROUP BY music.user_id)", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music LIMIT 10) subquery_for_limit) subquery_for_limit)", + "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6) GROUP BY music.user_id)", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutIn", "PulloutVars": [ - "__sq_has_values1", + "__sq_has_values", "__sq1" ], "Inputs": [ { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 + "InputName": "SubQuery", + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select max(music.id) from music where 1 != 1 group by music.user_id", + "Query": "select max(music.id) from music where music.user_id in ::__vals group by music.user_id", + "Table": "music", + "Values": [ + "(INT64(5), INT64(6))" ], - "Inputs": [ - { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], - "Inputs": [ - { - "OperatorType": "Limit", - "Count": "INT64(10)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music limit :__upper_limit", - "Table": "music" - } - ] - } - ] - } - ] + "Vindex": "user_index" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "IN", "Keyspace": { @@ -7155,7 +3707,7 @@ "Sharded": true }, "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals", + "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", "Table": "music", "Values": [ "::__sq1" @@ -7163,45 +3715,51 @@ "Vindex": "music_user_map" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.music" + ] + } + }, + { + "comment": "Unmergeable subquery with `MAX` aggregate", + "query": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6))", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music LIMIT 10) subquery_for_limit) subquery_for_limit)", + "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6))", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutIn", "PulloutVars": [ - "__sq_has_values1", + "__sq_has_values", "__sq1" ], "Inputs": [ { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], + "InputName": "SubQuery", + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "max(0|1) AS max(music.id)", "Inputs": [ { - "OperatorType": "Limit", - "Count": "INT64(10)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music limit :__upper_limit", - "Table": "music" - } - ] + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select max(music.id), weight_string(music.id) from music where 1 != 1 group by weight_string(music.id)", + "Query": "select max(music.id), weight_string(music.id) from music where music.user_id in ::__vals group by weight_string(music.id)", + "Table": "music", + "Values": [ + "(INT64(5), INT64(6))" + ], + "Vindex": "user_index" } ] }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "IN", "Keyspace": { @@ -7209,7 +3767,7 @@ "Sharded": true }, "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals", + "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", "Table": "music", "Values": [ "::__sq1" @@ -7224,31 +3782,37 @@ } }, { - "comment": "`None` subquery as top level predicate - outer query changes from `Scatter` to `None` on merge", - "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL))", - "v3-plan": { + "comment": "Mergeable subquery with `MAX` aggregate with `EqualUnique` route operator", + "query": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5)", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL))", + "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5)", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutIn", "PulloutVars": [ - "__sq_has_values1", + "__sq_has_values", "__sq1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", - "Variant": "None", + "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.user_id in (null)", - "Table": "music" + "FieldQuery": "select max(music.id) from music where 1 != 1", + "Query": "select max(music.id) from music where music.user_id = 5", + "Table": "music", + "Values": [ + "INT64(5)" + ], + "Vindex": "user_index" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "IN", "Keyspace": { @@ -7256,7 +3820,7 @@ "Sharded": true }, "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals", + "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", "Table": "music", "Values": [ "::__sq1" @@ -7264,21 +3828,6 @@ "Vindex": "music_user_map" } ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL))", - "Instructions": { - "OperatorType": "Route", - "Variant": "None", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (null))", - "Table": "music" }, "TablesUsed": [ "user.music" @@ -7286,61 +3835,78 @@ } }, { - "comment": "`None` subquery as top level predicate - outer query changes from `EqualUnique` to `None` on merge", - "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5", - "v3-plan": { + "comment": "Mergeable subquery with `LIMIT` due to `EqualUnique` route", + "query": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5 LIMIT 10)", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5", + "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5 LIMIT 10)", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutIn", "PulloutVars": [ - "__sq_has_values1", + "__sq_has_values", "__sq1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", - "Variant": "None", + "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.user_id in (null)", - "Table": "music" + "FieldQuery": "select max(music.id) from music where 1 != 1", + "Query": "select max(music.id) from music where music.user_id = 5 limit 10", + "Table": "music", + "Values": [ + "INT64(5)" + ], + "Vindex": "user_index" }, { + "InputName": "Outer", "OperatorType": "Route", - "Variant": "EqualUnique", + "Variant": "IN", "Keyspace": { "Name": "user", "Sharded": true }, "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.user_id = 5 and :__sq_has_values1 = 1 and music.id in ::__sq1", + "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", "Table": "music", "Values": [ - "INT64(5)" + "::__sq1" ], - "Vindex": "user_index" + "Vindex": "music_user_map" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.music" + ] + } + }, + { + "comment": "Mergeable subquery with multiple levels of derived statements", + "query": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id = 5 LIMIT 10) subquery_for_limit) subquery_for_limit)", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5", + "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id = 5 LIMIT 10) subquery_for_limit) subquery_for_limit)", "Instructions": { "OperatorType": "Route", - "Variant": "None", + "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (null)) and music.user_id = 5", - "Table": "music" + "Query": "select music.id from music where music.id in (select subquery_for_limit.id from (select subquery_for_limit.id from (select music.id from music where music.user_id = 5 limit 10) as subquery_for_limit) as subquery_for_limit)", + "Table": "music", + "Values": [ + "INT64(5)" + ], + "Vindex": "user_index" }, "TablesUsed": [ "user.music" @@ -7348,56 +3914,160 @@ } }, { - "comment": "`None` subquery nested inside `OR` expression - outer query keeps routing information", - "query": "SELECT music.id FROM music WHERE (music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5)", - "v3-plan": { + "comment": "Mergeable subquery with multiple levels of derived statements, using a single value `IN` predicate", + "query": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5) LIMIT 10) subquery_for_limit) subquery_for_limit)", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE (music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5)", + "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5) LIMIT 10) subquery_for_limit) subquery_for_limit)", + "Instructions": { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.id from music where 1 != 1", + "Query": "select music.id from music where music.id in (select subquery_for_limit.id from (select subquery_for_limit.id from (select music.id from music where music.user_id in (5) limit 10) as subquery_for_limit) as subquery_for_limit)", + "Table": "music", + "Values": [ + "INT64(5)" + ], + "Vindex": "user_index" + }, + "TablesUsed": [ + "user.music" + ] + } + }, + { + "comment": "Unmergeable subquery with multiple levels of derived statements, using a multi value `IN` predicate", + "query": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5, 6) LIMIT 10) subquery_for_limit) subquery_for_limit)", + "plan": { + "QueryType": "SELECT", + "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5, 6) LIMIT 10) subquery_for_limit) subquery_for_limit)", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutIn", "PulloutVars": [ - "__sq_has_values1", + "__sq_has_values", "__sq1" ], "Inputs": [ { + "InputName": "SubQuery", + "OperatorType": "Limit", + "Count": "INT64(10)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select subquery_for_limit.id from (select subquery_for_limit.id from (select music.id from music where 1 != 1) as subquery_for_limit where 1 != 1) as subquery_for_limit where 1 != 1", + "Query": "select subquery_for_limit.id from (select subquery_for_limit.id from (select music.id from music where music.user_id in ::__vals) as subquery_for_limit limit :__upper_limit) as subquery_for_limit limit :__upper_limit", + "Table": "music", + "Values": [ + "(INT64(5), INT64(6))" + ], + "Vindex": "user_index" + } + ] + }, + { + "InputName": "Outer", "OperatorType": "Route", - "Variant": "None", + "Variant": "IN", "Keyspace": { "Name": "user", "Sharded": true }, "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.user_id in (null)", - "Table": "music" + "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", + "Table": "music", + "Values": [ + "::__sq1" + ], + "Vindex": "music_user_map" + } + ] + }, + "TablesUsed": [ + "user.music" + ] + } + }, + { + "comment": "Unmergeable subquery with multiple levels of derived statements", + "query": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music LIMIT 10) subquery_for_limit) subquery_for_limit)", + "plan": { + "QueryType": "SELECT", + "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music LIMIT 10) subquery_for_limit) subquery_for_limit)", + "Instructions": { + "OperatorType": "UncorrelatedSubquery", + "Variant": "PulloutIn", + "PulloutVars": [ + "__sq_has_values", + "__sq1" + ], + "Inputs": [ + { + "InputName": "SubQuery", + "OperatorType": "Limit", + "Count": "INT64(10)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select subquery_for_limit.id from (select subquery_for_limit.id from (select music.id from music where 1 != 1) as subquery_for_limit where 1 != 1) as subquery_for_limit where 1 != 1", + "Query": "select subquery_for_limit.id from (select subquery_for_limit.id from (select music.id from music) as subquery_for_limit limit :__upper_limit) as subquery_for_limit limit :__upper_limit", + "Table": "music" + } + ] }, { + "InputName": "Outer", "OperatorType": "Route", - "Variant": "Scatter", + "Variant": "IN", "Keyspace": { "Name": "user", "Sharded": true }, "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__sq1 or music.user_id = 5", - "Table": "music" + "Query": "select music.id from music where :__sq_has_values and music.id in ::__vals", + "Table": "music", + "Values": [ + "::__sq1" + ], + "Vindex": "music_user_map" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.music" + ] + } + }, + { + "comment": "`None` subquery as top level predicate - outer query changes from `Scatter` to `None` on merge", + "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL))", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE (music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5)", + "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL))", "Instructions": { "OperatorType": "Route", - "Variant": "Scatter", + "Variant": "None", "Keyspace": { "Name": "user", "Sharded": true }, "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (null)) or music.user_id = 5", + "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (null))", "Table": "music" }, "TablesUsed": [ @@ -7406,50 +4076,53 @@ } }, { - "comment": "Joining with a subquery that uses an aggregate column and an `EqualUnique` route can be merged together", - "query": "SELECT music.id FROM music INNER JOIN (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other ON other.maxt = music.id", - "v3-plan": { + "comment": "`None` subquery as top level predicate - outer query changes from `EqualUnique` to `None` on merge", + "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM music INNER JOIN (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other ON other.maxt = music.id", + "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5", "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "JoinVars": { - "music_id": 0 + "OperatorType": "Route", + "Variant": "None", + "Keyspace": { + "Name": "user", + "Sharded": true }, - "TableName": "music_music", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music", - "Table": "music" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from (select max(id) as maxt from music where 1 != 1) as other where 1 != 1", - "Query": "select 1 from (select max(id) as maxt from music where music.user_id = 5) as other where other.maxt = :music_id", - "Table": "music", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "FieldQuery": "select music.id from music where 1 != 1", + "Query": "select music.id from music where music.user_id = 5 and music.id in (select music.id from music where music.user_id in (null))", + "Table": "music" + }, + "TablesUsed": [ + "user.music" + ] + } + }, + { + "comment": "`None` subquery nested inside `OR` expression - outer query keeps routing information", + "query": "SELECT music.id FROM music WHERE (music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5)", + "plan": { + "QueryType": "SELECT", + "Original": "SELECT music.id FROM music WHERE (music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5)", + "Instructions": { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.id from music where 1 != 1", + "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (null)) or music.user_id = 5", + "Table": "music" + }, + "TablesUsed": [ + "user.music" + ] + } + }, + { + "comment": "Joining with a subquery that uses an aggregate column and an `EqualUnique` route can be merged together", + "query": "SELECT music.id FROM music INNER JOIN (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other ON other.maxt = music.id", + "plan": { "QueryType": "SELECT", "Original": "SELECT music.id FROM music INNER JOIN (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other ON other.maxt = music.id", "Instructions": { @@ -7459,8 +4132,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select music.id from music, (select max(id) as maxt from music where 1 != 1) as other where 1 != 1", - "Query": "select music.id from music, (select max(id) as maxt from music where music.user_id = 5) as other where other.maxt = music.id", + "FieldQuery": "select music.id from (select max(id) as maxt from music where 1 != 1) as other, music where 1 != 1", + "Query": "select music.id from (select max(id) as maxt from music where music.user_id = 5) as other, music where other.maxt = music.id", "Table": "music", "Values": [ "INT64(5)" @@ -7475,22 +4148,7 @@ { "comment": "Joining with a subquery that uses an `EqualUnique` route can be merged", "query": "SELECT music.id FROM music INNER JOIN (SELECT id FROM music WHERE music.user_id = 5) other ON other.id = music.id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music INNER JOIN (SELECT id FROM music WHERE music.user_id = 5) other ON other.id = music.id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music join (select id from music where 1 != 1) as other on other.id = music.id where 1 != 1", - "Query": "select music.id from music join (select id from music where music.user_id = 5) as other on other.id = music.id", - "Table": "music" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT music.id FROM music INNER JOIN (SELECT id FROM music WHERE music.user_id = 5) other ON other.id = music.id", "Instructions": { @@ -7500,8 +4158,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select music.id from music, (select id from music where 1 != 1) as other where 1 != 1", - "Query": "select music.id from music, (select id from music where music.user_id = 5) as other where other.id = music.id", + "FieldQuery": "select music.id from (select id from music where 1 != 1) as other, music where 1 != 1", + "Query": "select music.id from (select id from music where music.user_id = 5) as other, music where other.id = music.id", "Table": "music", "Values": [ "INT64(5)" @@ -7516,22 +4174,7 @@ { "comment": "Joining with a subquery that has an `IN` route can be merged", "query": "SELECT music.id FROM music INNER JOIN (SELECT id FROM music WHERE music.user_id IN (5, 6, 7)) other ON other.id = music.id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music INNER JOIN (SELECT id FROM music WHERE music.user_id IN (5, 6, 7)) other ON other.id = music.id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music join (select id from music where 1 != 1) as other on other.id = music.id where 1 != 1", - "Query": "select music.id from music join (select id from music where music.user_id in (5, 6, 7)) as other on other.id = music.id", - "Table": "music" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT music.id FROM music INNER JOIN (SELECT id FROM music WHERE music.user_id IN (5, 6, 7)) other ON other.id = music.id", "Instructions": { @@ -7541,8 +4184,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select music.id from music, (select id from music where 1 != 1) as other where 1 != 1", - "Query": "select music.id from music, (select id from music where music.user_id in ::__vals) as other where other.id = music.id", + "FieldQuery": "select music.id from (select id from music where 1 != 1) as other, music where 1 != 1", + "Query": "select music.id from (select id from music where music.user_id in ::__vals) as other, music where other.id = music.id", "Table": "music", "Values": [ "(INT64(5), INT64(6), INT64(7))" @@ -7557,8 +4200,7 @@ { "comment": "limit on the vtgate has to be executed on the LHS of a join", "query": "select id from user join (select user_id from user_extra limit 10) ue on user.id = ue.user_id", - "v3-plan": "VT12001: unsupported: filtering on results of cross-shard subquery", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user join (select user_id from user_extra limit 10) ue on user.id = ue.user_id", "Instructions": { @@ -7581,8 +4223,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select ue.user_id from (select user_id from user_extra where 1 != 1) as ue where 1 != 1", - "Query": "select ue.user_id from (select user_id from user_extra) as ue limit :__upper_limit", + "FieldQuery": "select user_id from (select user_id from user_extra where 1 != 1) as ue where 1 != 1", + "Query": "select user_id from (select user_id from user_extra) as ue limit :__upper_limit", "Table": "user_extra" } ] @@ -7613,31 +4255,30 @@ { "comment": "select user.a, t.b from user join (select id, count(*) b, req from user_extra group by req, id) as t on user.id = t.id", "query": "select user.a, t.b from user join (select id, count(*) b, req from user_extra group by req, id) as t on user.id = t.id", - "v3-plan": "VT12001: unsupported: filtering on results of cross-shard subquery", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.a, t.b from user join (select id, count(*) b, req from user_extra group by req, id) as t on user.id = t.id", "Instructions": { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "R:0,L:1", + "JoinColumnIndexes": "R:0,L:0", "JoinVars": { - "t_id": 0 + "t_id": 1 }, "TableName": "user_extra_`user`", "Inputs": [ { "OperatorType": "SimpleProjection", "Columns": [ - 0, - 1 + 1, + 0 ], "Inputs": [ { "OperatorType": "Aggregate", "Variant": "Ordered", "Aggregates": "sum_count_star(1) AS b", - "GroupBy": "(0|3), (2|4)", + "GroupBy": "(2|3), (0|4)", "Inputs": [ { "OperatorType": "Route", @@ -7646,9 +4287,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id, count(*) as b, req, weight_string(id), weight_string(req) from user_extra where 1 != 1 group by id, weight_string(id), req, weight_string(req)", - "OrderBy": "(0|3) ASC, (2|4) ASC", - "Query": "select id, count(*) as b, req, weight_string(id), weight_string(req) from user_extra group by id, weight_string(id), req, weight_string(req) order by id asc, req asc", + "FieldQuery": "select id, count(*) as b, req, weight_string(req), weight_string(id) from user_extra where 1 != 1 group by req, id, weight_string(req), weight_string(id)", + "OrderBy": "(2|3) ASC, (0|4) ASC", + "Query": "select id, count(*) as b, req, weight_string(req), weight_string(id) from user_extra group by req, id, weight_string(req), weight_string(id) order by req asc, id asc", "Table": "user_extra" } ] @@ -7678,66 +4319,10 @@ ] } }, - { - "comment": "cant switch sides for outer joins", - "query": "select id from user left join (select user_id from user_extra limit 10) ue on user.id = ue.user_id", - "plan": "VT12001: unsupported: LEFT JOIN with derived tables" - }, - { - "comment": "limit on both sides means that we can't evaluate this at all", - "query": "select id from (select id from user limit 10) u join (select user_id from user_extra limit 10) ue on u.id = ue.user_id", - "v3-plan": "VT12001: unsupported: filtering on results of cross-shard subquery", - "gen4-plan": "VT12001: unsupported: JOIN between derived tables" - }, { "comment": "SELECT music.id FROM (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other JOIN music ON other.maxt = music.id", "query": "SELECT music.id FROM (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other JOIN music ON other.maxt = music.id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other JOIN music ON other.maxt = music.id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "JoinVars": { - "other_maxt": 0 - }, - "TableName": "music_music", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select other.maxt from (select max(id) as maxt from music where 1 != 1) as other where 1 != 1", - "Query": "select other.maxt from (select max(id) as maxt from music where music.user_id = 5) as other", - "Table": "music", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.id = :other_maxt", - "Table": "music", - "Values": [ - ":other_maxt" - ], - "Vindex": "music_user_map" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT music.id FROM (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other JOIN music ON other.maxt = music.id", "Instructions": { @@ -7763,22 +4348,7 @@ { "comment": "Earlier columns are in scope in subqueries https://github.com/vitessio/vitess/issues/11246", "query": "SELECT 1 as x, (SELECT x)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT 1 as x, (SELECT x)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 as x, (select x from dual where 1 != 1) from dual where 1 != 1", - "Query": "select 1 as x, (select x from dual) from dual", - "Table": "dual" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT 1 as x, (SELECT x)", "Instructions": { @@ -7800,22 +4370,7 @@ { "comment": "(OR 1 = 0) doesn't cause unnecessary scatter", "query": "select * from user where id = 1 or 1 = 0", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user where id = 1 or 1 = 0", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 1 or 1 = 0", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from user where id = 1 or 1 = 0", "Instructions": { @@ -7841,22 +4396,7 @@ { "comment": "(OR 2 < 1) doesn't cause unnecessary scatter", "query": "select * from user where id = 1 or 2 < 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user where id = 1 or 2 < 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 1 or 2 < 1", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from user where id = 1 or 2 < 1", "Instructions": { @@ -7882,22 +4422,7 @@ { "comment": "query with a derived table and dual table in unsharded keyspace", "query": "SELECT * FROM unsharded_a AS t1 JOIN (SELECT trim((SELECT MAX(name) FROM unsharded_a)) AS name) AS t2 WHERE t1.name >= t2.name ORDER BY t1.name ASC LIMIT 1;", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT * FROM unsharded_a AS t1 JOIN (SELECT trim((SELECT MAX(name) FROM unsharded_a)) AS name) AS t2 WHERE t1.name >= t2.name ORDER BY t1.name ASC LIMIT 1;", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from unsharded_a as t1 join (select trim((select max(`name`) from unsharded_a where 1 != 1)) as `name` from dual where 1 != 1) as t2 where 1 != 1", - "Query": "select * from unsharded_a as t1 join (select trim((select max(`name`) from unsharded_a)) as `name` from dual) as t2 where t1.`name` >= t2.`name` order by t1.`name` asc limit 1", - "Table": "unsharded_a, dual" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT * FROM unsharded_a AS t1 JOIN (SELECT trim((SELECT MAX(name) FROM unsharded_a)) AS name) AS t2 WHERE t1.name >= t2.name ORDER BY t1.name ASC LIMIT 1;", "Instructions": { @@ -7920,8 +4445,7 @@ { "comment": "subquery having join table on clause, using column reference of outer select table", "query": "select (select 1 from user u1 join user u2 on u1.id = u2.id and u1.id = u3.id) subquery from user u3 where u3.id = 1", - "v3-plan": "VT03019: column u3.id not found", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select (select 1 from user u1 join user u2 on u1.id = u2.id and u1.id = u3.id) subquery from user u3 where u3.id = 1", "Instructions": { @@ -7947,22 +4471,7 @@ { "comment": "allow last_insert_id with argument", "query": "select last_insert_id(id) from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select last_insert_id(id) from user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select last_insert_id(id) from `user` where 1 != 1", - "Query": "select last_insert_id(id) from `user`", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select last_insert_id(id) from user", "Instructions": { @@ -7984,22 +4493,7 @@ { "comment": "merge subquery using MAX and join into single route", "query": "select 1 from user join music_extra on user.id = music_extra.user_id where music_extra.music_id = (select max(music_id) from music_extra where user_id = user.id)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select 1 from user join music_extra on user.id = music_extra.user_id where music_extra.music_id = (select max(music_id) from music_extra where user_id = user.id)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` join music_extra on `user`.id = music_extra.user_id where 1 != 1", - "Query": "select 1 from `user` join music_extra on `user`.id = music_extra.user_id where music_extra.music_id = (select max(music_id) from music_extra where user_id = `user`.id)", - "Table": "`user`, music_extra" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 1 from user join music_extra on user.id = music_extra.user_id where music_extra.music_id = (select max(music_id) from music_extra where user_id = user.id)", "Instructions": { @@ -8010,7 +4504,7 @@ "Sharded": true }, "FieldQuery": "select 1 from `user`, music_extra where 1 != 1", - "Query": "select 1 from `user`, music_extra where music_extra.music_id = (select max(music_id) from music_extra where user_id = `user`.id) and `user`.id = music_extra.user_id", + "Query": "select 1 from `user`, music_extra where `user`.id = music_extra.user_id and music_extra.music_id = (select max(music_id) from music_extra where user_id = `user`.id)", "Table": "`user`, music_extra" }, "TablesUsed": [ @@ -8022,26 +4516,7 @@ { "comment": "Query with non-plannable lookup vindex", "query": "SELECT * FROM user_metadata WHERE user_metadata.non_planable = 'foo'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT * FROM user_metadata WHERE user_metadata.non_planable = 'foo'", - "Instructions": { - "OperatorType": "Route", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from user_metadata where 1 != 1", - "Query": "select * from user_metadata where user_metadata.non_planable = 'foo'", - "Table": "user_metadata", - "Values": [ - "VARCHAR(\"foo\")" - ], - "Vindex": "non_planable_user_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT * FROM user_metadata WHERE user_metadata.non_planable = 'foo'", "Instructions": { @@ -8067,52 +4542,7 @@ { "comment": "join query with lookup and join on different vindex column", "query": "select u.id from user u, user_metadata um where u.name = 'foo' and u.id = um.user_id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u.id from user u, user_metadata um where u.name = 'foo' and u.id = um.user_id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "JoinVars": { - "u_id": 0 - }, - "TableName": "`user`_user_metadata", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.id from `user` as u where 1 != 1", - "Query": "select u.id from `user` as u where u.`name` = 'foo'", - "Table": "`user`", - "Values": [ - "VARCHAR(\"foo\")" - ], - "Vindex": "name_user_map" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_metadata as um where 1 != 1", - "Query": "select 1 from user_metadata as um where um.user_id = :u_id", - "Table": "user_metadata", - "Values": [ - ":u_id" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.id from user u, user_metadata um where u.name = 'foo' and u.id = um.user_id", "Instructions": { @@ -8164,26 +4594,7 @@ { "comment": "pick email as vindex lookup", "query": "select * from customer where email = 'a@mail.com'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from customer where email = 'a@mail.com'", - "Instructions": { - "OperatorType": "Route", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from customer where 1 != 1", - "Query": "select * from customer where email = 'a@mail.com'", - "Table": "customer", - "Values": [ - "VARCHAR(\"a@mail.com\")" - ], - "Vindex": "unq_lkp_vdx" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from customer where email = 'a@mail.com'", "Instructions": { @@ -8234,26 +4645,7 @@ { "comment": "phone is in backfill vindex - not selected for vindex lookup", "query": "select * from customer where phone = 123456", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from customer where phone = 123456", - "Instructions": { - "OperatorType": "Route", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from customer where 1 != 1", - "Query": "select * from customer where phone = 123456", - "Table": "customer", - "Values": [ - "INT64(123456)" - ], - "Vindex": "unq_lkp_bf_vdx" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from customer where phone = 123456", "Instructions": { @@ -8275,26 +4667,7 @@ { "comment": "email vindex is costly than phone vindex - but phone vindex is backfiling hence ignored", "query": "select * from customer where email = 'a@mail.com' and phone = 123456", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from customer where email = 'a@mail.com' and phone = 123456", - "Instructions": { - "OperatorType": "Route", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from customer where 1 != 1", - "Query": "select * from customer where email = 'a@mail.com' and phone = 123456", - "Table": "customer", - "Values": [ - "INT64(123456)" - ], - "Vindex": "unq_lkp_bf_vdx" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from customer where email = 'a@mail.com' and phone = 123456", "Instructions": { @@ -8345,26 +4718,7 @@ { "comment": "predicate order changed: email vindex is costly than phone vindex - but phone vindex is backfiling hence ignored", "query": "select * from customer where phone = 123456 and email = 'a@mail.com'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from customer where phone = 123456 and email = 'a@mail.com'", - "Instructions": { - "OperatorType": "Route", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from customer where 1 != 1", - "Query": "select * from customer where phone = 123456 and email = 'a@mail.com'", - "Table": "customer", - "Values": [ - "INT64(123456)" - ], - "Vindex": "unq_lkp_bf_vdx" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from customer where phone = 123456 and email = 'a@mail.com'", "Instructions": { @@ -8412,4 +4766,4 @@ ] } } -] +] \ No newline at end of file diff --git a/go/vt/vtgate/planbuilder/testdata/select_cases_with_default.json b/go/vt/vtgate/planbuilder/testdata/select_cases_with_default.json index 5817157752b..11a8d8c0b5b 100644 --- a/go/vt/vtgate/planbuilder/testdata/select_cases_with_default.json +++ b/go/vt/vtgate/planbuilder/testdata/select_cases_with_default.json @@ -2,47 +2,7 @@ { "comment": "EXISTS subquery when the default ks is different than the inner query", "query": "select exists(select * from user where id = 5)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select exists(select * from user where id = 5)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutExists", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user` where id = 5 limit 1", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "second_user", - "Sharded": true - }, - "FieldQuery": "select :__sq_has_values1 from dual where 1 != 1", - "Query": "select :__sq_has_values1 from dual", - "Table": "dual" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select exists(select * from user where id = 5)", "Instructions": { @@ -53,7 +13,7 @@ "Sharded": true }, "FieldQuery": "select exists (select 1 from `user` where 1 != 1) from dual where 1 != 1", - "Query": "select exists (select 1 from `user` where id = 5 limit 1) from dual", + "Query": "select exists (select 1 from `user` where id = 5) from dual", "Table": "dual", "Values": [ "INT64(5)" @@ -66,4 +26,4 @@ ] } } -] \ No newline at end of file +] diff --git a/go/vt/vtgate/planbuilder/testdata/select_cases_with_user_as_default.json b/go/vt/vtgate/planbuilder/testdata/select_cases_with_user_as_default.json index 822ed6c2307..9cd549c11d6 100644 --- a/go/vt/vtgate/planbuilder/testdata/select_cases_with_user_as_default.json +++ b/go/vt/vtgate/planbuilder/testdata/select_cases_with_user_as_default.json @@ -2,7 +2,7 @@ { "comment": "EXISTS subquery", "query": "select exists(select * from user where id = 5)", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select exists(select * from user where id = 5)", "Instructions": { @@ -13,26 +13,7 @@ "Sharded": true }, "FieldQuery": "select exists (select 1 from `user` where 1 != 1) from dual where 1 != 1", - "Query": "select exists (select 1 from `user` where id = 5 limit 1) from dual", - "Table": "dual", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select exists(select * from user where id = 5)", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select exists (select 1 from `user` where 1 != 1) from dual where 1 != 1", - "Query": "select exists (select 1 from `user` where id = 5 limit 1) from dual", + "Query": "select exists (select 1 from `user` where id = 5) from dual", "Table": "dual", "Values": [ "INT64(5)" @@ -45,4 +26,4 @@ ] } } -] \ No newline at end of file +] diff --git a/go/vt/vtgate/planbuilder/testdata/show_cases.json b/go/vt/vtgate/planbuilder/testdata/show_cases.json index a70827f9df2..f13a8f38e55 100644 --- a/go/vt/vtgate/planbuilder/testdata/show_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/show_cases.json @@ -165,9 +165,9 @@ "Charset": "VARCHAR", "Default collation": "VARCHAR", "Description": "VARCHAR", - "Maxlen": "INT32" + "Maxlen": "UINT32" }, - "RowCount": 2 + "RowCount": 37 } } }, @@ -525,7 +525,7 @@ "Sharded": true }, "TargetDestination": "AllShards()", - "Query": "SELECT * FROM _vt.schema_migrations where migration_uuid LIKE '%format' OR migration_context LIKE '%format' OR migration_status LIKE '%format'" + "Query": "show vitess_migrations from `user` like '%format'" } } }, @@ -542,7 +542,7 @@ "Sharded": true }, "TargetDestination": "AllShards()", - "Query": "SELECT * FROM _vt.schema_migrations where id = 5" + "Query": "show vitess_migrations from `user` where id = 5" } } }, diff --git a/go/vt/vtgate/planbuilder/testdata/symtab_cases.json b/go/vt/vtgate/planbuilder/testdata/symtab_cases.json index 9dc83122ce0..db9fe66d41e 100644 --- a/go/vt/vtgate/planbuilder/testdata/symtab_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/symtab_cases.json @@ -2,44 +2,7 @@ { "comment": "Tests in this file are for testing symtab functionality\n#\n# Column names need not be qualified if they are predefined in vschema and unambiguous.", "query": "select predef2, predef3 from user join unsharded on predef2 = predef3", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select predef2, predef3 from user join unsharded on predef2 = predef3", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "JoinVars": { - "predef2": 0 - }, - "TableName": "`user`_unsharded", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select predef2 from `user` where 1 != 1", - "Query": "select predef2 from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select predef3 from unsharded where 1 != 1", - "Query": "select predef3 from unsharded where predef3 = :predef2", - "Table": "unsharded" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select predef2, predef3 from user join unsharded on predef2 = predef3", "Instructions": { @@ -84,7 +47,6 @@ { "comment": "predef1 is in both user and unsharded. So, it's ambiguous.", "query": "select predef1, predef3 from user join unsharded on predef1 = predef3", - "v3-plan": "VT03019: column predef1 not found", - "gen4-plan": "Column 'predef1' in field list is ambiguous" + "plan": "Column 'predef1' in field list is ambiguous" } ] diff --git a/go/vt/vtgate/planbuilder/testdata/sysschema_default.json b/go/vt/vtgate/planbuilder/testdata/sysschema_default.json index eec3c4709b9..1d25f0f60af 100644 --- a/go/vt/vtgate/planbuilder/testdata/sysschema_default.json +++ b/go/vt/vtgate/planbuilder/testdata/sysschema_default.json @@ -2,22 +2,7 @@ { "comment": "max_allowed_packet", "query": "select @@max_allowed_packet from dual", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select @@max_allowed_packet from dual", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select @@max_allowed_packet from dual where 1 != 1", - "Query": "select @@max_allowed_packet from dual", - "Table": "dual" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select @@max_allowed_packet from dual", "Instructions": { @@ -39,23 +24,7 @@ { "comment": "unqualified table name", "query": "select t.table_schema,t.table_name,c.column_name,c.column_type from tables t join columns c on c.table_schema = t.table_schema and c.table_name = t.table_name where t.table_schema = 'user' and c.table_schema = 'user' order by t.table_schema,t.table_name,c.column_name", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select t.table_schema,t.table_name,c.column_name,c.column_type from tables t join columns c on c.table_schema = t.table_schema and c.table_name = t.table_name where t.table_schema = 'user' and c.table_schema = 'user' order by t.table_schema,t.table_name,c.column_name", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select t.table_schema, t.table_name, c.column_name, c.column_type from information_schema.`tables` as t join information_schema.`columns` as c on c.table_schema = t.table_schema and c.table_name = t.table_name where 1 != 1", - "Query": "select t.table_schema, t.table_name, c.column_name, c.column_type from information_schema.`tables` as t join information_schema.`columns` as c on c.table_schema = t.table_schema and c.table_name = t.table_name where t.table_schema = :__vtschemaname /* VARCHAR */ and c.table_schema = :__vtschemaname /* VARCHAR */ order by t.table_schema asc, t.table_name asc, c.column_name asc", - "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"user\")]", - "Table": "information_schema.`tables`, information_schema.`columns`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select t.table_schema,t.table_name,c.column_name,c.column_type from tables t join columns c on c.table_schema = t.table_schema and c.table_name = t.table_name where t.table_schema = 'user' and c.table_schema = 'user' order by t.table_schema,t.table_name,c.column_name", "Instructions": { @@ -75,7 +44,7 @@ { "comment": "system schema query as a subquery", "query": "SELECT (SELECT 1 FROM information_schema.schemata WHERE schema_name='MyDatabase' LIMIT 1);", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT (SELECT 1 FROM information_schema.schemata WHERE schema_name='MyDatabase' LIMIT 1);", "Instructions": { @@ -85,24 +54,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select (select 1 from information_schema.schemata where 1 != 1) from dual where 1 != 1", - "Query": "select (select 1 from information_schema.schemata where schema_name = :__vtschemaname /* VARCHAR */ limit 1) from dual", - "SysTableTableSchema": "[VARCHAR(\"MyDatabase\")]", - "Table": "dual" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "SELECT (SELECT 1 FROM information_schema.schemata WHERE schema_name='MyDatabase' LIMIT 1);", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select (select 1 from information_schema.schemata where 1 != 1) from dual where 1 != 1", - "Query": "select (select 1 from information_schema.schemata where schema_name = :__vtschemaname /* VARCHAR */ limit 1) from dual", + "FieldQuery": "select (select 1 from information_schema.schemata where 1 != 1) as `(select 1 from information_schema.schemata where schema_name = 'MyDatabase' limit 1)` from dual where 1 != 1", + "Query": "select (select 1 from information_schema.schemata where schema_name = :__vtschemaname /* VARCHAR */ limit 1) as `(select 1 from information_schema.schemata where schema_name = 'MyDatabase' limit 1)` from dual", "SysTableTableSchema": "[VARCHAR(\"MyDatabase\")]", "Table": "dual" }, @@ -114,23 +67,7 @@ { "comment": "system schema query as a derived table", "query": "SELECT * from (SELECT 1 FROM information_schema.schemata WHERE schema_name='MyDatabase' LIMIT 1) x", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT * from (SELECT 1 FROM information_schema.schemata WHERE schema_name='MyDatabase' LIMIT 1) x", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from (select 1 from information_schema.schemata where 1 != 1) as x where 1 != 1", - "Query": "select * from (select 1 from information_schema.schemata where schema_name = :__vtschemaname /* VARCHAR */ limit 1) as x", - "SysTableTableSchema": "[VARCHAR(\"MyDatabase\")]", - "Table": "information_schema.schemata" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT * from (SELECT 1 FROM information_schema.schemata WHERE schema_name='MyDatabase' LIMIT 1) x", "Instructions": { diff --git a/go/vt/vtgate/planbuilder/testdata/tpcc_cases.json b/go/vt/vtgate/planbuilder/testdata/tpcc_cases.json index a36319cb322..feaae4ec013 100644 --- a/go/vt/vtgate/planbuilder/testdata/tpcc_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/tpcc_cases.json @@ -2,26 +2,7 @@ { "comment": "TPC-C select join customer1 and warehouse1", "query": "SELECT c_discount, c_last, c_credit, w_tax FROM customer1 AS c JOIN warehouse1 AS w ON c_w_id=w_id WHERE w_id = 1 AND c_d_id = 15 AND c_id = 10", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT c_discount, c_last, c_credit, w_tax FROM customer1 AS c JOIN warehouse1 AS w ON c_w_id=w_id WHERE w_id = 1 AND c_d_id = 15 AND c_id = 10", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select c_discount, c_last, c_credit, w_tax from customer1 as c join warehouse1 as w on c_w_id = w_id where 1 != 1", - "Query": "select c_discount, c_last, c_credit, w_tax from customer1 as c join warehouse1 as w on c_w_id = w_id where w_id = 1 and c_d_id = 15 and c_id = 10", - "Table": "customer1, warehouse1", - "Values": [ - "INT64(1)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT c_discount, c_last, c_credit, w_tax FROM customer1 AS c JOIN warehouse1 AS w ON c_w_id=w_id WHERE w_id = 1 AND c_d_id = 15 AND c_id = 10", "Instructions": { @@ -48,26 +29,7 @@ { "comment": "TPC-C select district1 for update", "query": "SELECT d_next_o_id, d_tax FROM district1 WHERE d_w_id = 15 AND d_id = 95 FOR UPDATE", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT d_next_o_id, d_tax FROM district1 WHERE d_w_id = 15 AND d_id = 95 FOR UPDATE", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select d_next_o_id, d_tax from district1 where 1 != 1", - "Query": "select d_next_o_id, d_tax from district1 where d_w_id = 15 and d_id = 95 for update", - "Table": "district1", - "Values": [ - "INT64(15)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT d_next_o_id, d_tax FROM district1 WHERE d_w_id = 15 AND d_id = 95 FOR UPDATE", "Instructions": { @@ -93,29 +55,7 @@ { "comment": "TPC-C update district1 unique", "query": "UPDATE district1 SET d_next_o_id = 56 WHERE d_id = 9842 AND d_w_id= 8546", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "UPDATE district1 SET d_next_o_id = 56 WHERE d_id = 9842 AND d_w_id= 8546", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "Query": "update district1 set d_next_o_id = 56 where d_id = 9842 and d_w_id = 8546", - "Table": "district1", - "Values": [ - "INT64(8546)" - ], - "Vindex": "hash" - }, - "TablesUsed": [ - "main.district1" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "UPDATE district1 SET d_next_o_id = 56 WHERE d_id = 9842 AND d_w_id= 8546", "Instructions": { @@ -191,26 +131,7 @@ { "comment": "TPC-C select unique item1", "query": "SELECT i_price, i_name, i_data FROM item1 WHERE i_id = 9654", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT i_price, i_name, i_data FROM item1 WHERE i_id = 9654", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select i_price, i_name, i_data from item1 where 1 != 1", - "Query": "select i_price, i_name, i_data from item1 where i_id = 9654", - "Table": "item1", - "Values": [ - "INT64(9654)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT i_price, i_name, i_data FROM item1 WHERE i_id = 9654", "Instructions": { @@ -236,26 +157,7 @@ { "comment": "TPC-C select stock1 for update", "query": "SELECT s_quantity, s_data, s_dist_01 s_dist FROM stock1 WHERE s_i_id = 2198 AND s_w_id = 89 FOR UPDATE", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT s_quantity, s_data, s_dist_01 s_dist FROM stock1 WHERE s_i_id = 2198 AND s_w_id = 89 FOR UPDATE", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select s_quantity, s_data, s_dist_01 as s_dist from stock1 where 1 != 1", - "Query": "select s_quantity, s_data, s_dist_01 as s_dist from stock1 where s_i_id = 2198 and s_w_id = 89 for update", - "Table": "stock1", - "Values": [ - "INT64(89)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT s_quantity, s_data, s_dist_01 s_dist FROM stock1 WHERE s_i_id = 2198 AND s_w_id = 89 FOR UPDATE", "Instructions": { @@ -281,29 +183,7 @@ { "comment": "TPC-C update stock1", "query": "UPDATE stock1 SET s_quantity = 894 WHERE s_i_id = 156 AND s_w_id= 6", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "UPDATE stock1 SET s_quantity = 894 WHERE s_i_id = 156 AND s_w_id= 6", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "Query": "update stock1 set s_quantity = 894 where s_i_id = 156 and s_w_id = 6", - "Table": "stock1", - "Values": [ - "INT64(6)" - ], - "Vindex": "hash" - }, - "TablesUsed": [ - "main.stock1" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "UPDATE stock1 SET s_quantity = 894 WHERE s_i_id = 156 AND s_w_id= 6", "Instructions": { @@ -354,29 +234,7 @@ { "comment": "TPC-C update warehouse1 unique", "query": "UPDATE warehouse1 SET w_ytd = w_ytd + 946879 WHERE w_id = 3", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "UPDATE warehouse1 SET w_ytd = w_ytd + 946879 WHERE w_id = 3", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "Query": "update warehouse1 set w_ytd = w_ytd + 946879 where w_id = 3", - "Table": "warehouse1", - "Values": [ - "INT64(3)" - ], - "Vindex": "hash" - }, - "TablesUsed": [ - "main.warehouse1" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "UPDATE warehouse1 SET w_ytd = w_ytd + 946879 WHERE w_id = 3", "Instructions": { @@ -402,26 +260,7 @@ { "comment": "TPC-C select warehouse1 unique", "query": "SELECT w_street_1, w_street_2, w_city, w_state, w_zip, w_name FROM warehouse1 WHERE w_id = 998", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT w_street_1, w_street_2, w_city, w_state, w_zip, w_name FROM warehouse1 WHERE w_id = 998", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select w_street_1, w_street_2, w_city, w_state, w_zip, w_name from warehouse1 where 1 != 1", - "Query": "select w_street_1, w_street_2, w_city, w_state, w_zip, w_name from warehouse1 where w_id = 998", - "Table": "warehouse1", - "Values": [ - "INT64(998)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT w_street_1, w_street_2, w_city, w_state, w_zip, w_name FROM warehouse1 WHERE w_id = 998", "Instructions": { @@ -447,29 +286,7 @@ { "comment": "TPC-C update district1 unique", "query": "UPDATE district1 SET d_ytd = d_ytd + 2 WHERE d_w_id = 89 AND d_id= 9", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "UPDATE district1 SET d_ytd = d_ytd + 2 WHERE d_w_id = 89 AND d_id= 9", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "Query": "update district1 set d_ytd = d_ytd + 2 where d_w_id = 89 and d_id = 9", - "Table": "district1", - "Values": [ - "INT64(89)" - ], - "Vindex": "hash" - }, - "TablesUsed": [ - "main.district1" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "UPDATE district1 SET d_ytd = d_ytd + 2 WHERE d_w_id = 89 AND d_id= 9", "Instructions": { @@ -495,26 +312,7 @@ { "comment": "TPC-C select district1 unique", "query": "SELECT d_street_1, d_street_2, d_city, d_state, d_zip, d_name FROM district1 WHERE d_w_id = 896 AND d_id = 9", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT d_street_1, d_street_2, d_city, d_state, d_zip, d_name FROM district1 WHERE d_w_id = 896 AND d_id = 9", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select d_street_1, d_street_2, d_city, d_state, d_zip, d_name from district1 where 1 != 1", - "Query": "select d_street_1, d_street_2, d_city, d_state, d_zip, d_name from district1 where d_w_id = 896 and d_id = 9", - "Table": "district1", - "Values": [ - "INT64(896)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT d_street_1, d_street_2, d_city, d_state, d_zip, d_name FROM district1 WHERE d_w_id = 896 AND d_id = 9", "Instructions": { @@ -540,26 +338,7 @@ { "comment": "TPC-C select aggr from customer1", "query": "SELECT count(c_id) namecnt FROM customer1 WHERE c_w_id = 5 AND c_d_id= 1 AND c_last='last'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT count(c_id) namecnt FROM customer1 WHERE c_w_id = 5 AND c_d_id= 1 AND c_last='last'", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select count(c_id) as namecnt from customer1 where 1 != 1", - "Query": "select count(c_id) as namecnt from customer1 where c_w_id = 5 and c_d_id = 1 and c_last = 'last'", - "Table": "customer1", - "Values": [ - "INT64(5)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT count(c_id) namecnt FROM customer1 WHERE c_w_id = 5 AND c_d_id= 1 AND c_last='last'", "Instructions": { @@ -585,26 +364,7 @@ { "comment": "TPC-C select customer1 order by", "query": "SELECT c_id FROM customer1 WHERE c_w_id = 8 AND c_d_id = 5 AND c_last='item_last' ORDER BY c_first", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT c_id FROM customer1 WHERE c_w_id = 8 AND c_d_id = 5 AND c_last='item_last' ORDER BY c_first", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select c_id from customer1 where 1 != 1", - "Query": "select c_id from customer1 where c_w_id = 8 and c_d_id = 5 and c_last = 'item_last' order by c_first asc", - "Table": "customer1", - "Values": [ - "INT64(8)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT c_id FROM customer1 WHERE c_w_id = 8 AND c_d_id = 5 AND c_last='item_last' ORDER BY c_first", "Instructions": { @@ -630,26 +390,7 @@ { "comment": "TPC-C select for update customer1 unique", "query": "SELECT c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since FROM customer1 WHERE c_w_id = 8965 AND c_d_id = 1 AND c_id = 9 FOR UPDATE", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since FROM customer1 WHERE c_w_id = 8965 AND c_d_id = 1 AND c_id = 9 FOR UPDATE", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since from customer1 where 1 != 1", - "Query": "select c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since from customer1 where c_w_id = 8965 and c_d_id = 1 and c_id = 9 for update", - "Table": "customer1", - "Values": [ - "INT64(8965)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since FROM customer1 WHERE c_w_id = 8965 AND c_d_id = 1 AND c_id = 9 FOR UPDATE", "Instructions": { @@ -675,26 +416,7 @@ { "comment": "TPC-C select customer1 unique", "query": "SELECT c_data FROM customer1 WHERE c_w_id = 32 AND c_d_id=68 AND c_id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT c_data FROM customer1 WHERE c_w_id = 32 AND c_d_id=68 AND c_id = 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select c_data from customer1 where 1 != 1", - "Query": "select c_data from customer1 where c_w_id = 32 and c_d_id = 68 and c_id = 5", - "Table": "customer1", - "Values": [ - "INT64(32)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT c_data FROM customer1 WHERE c_w_id = 32 AND c_d_id=68 AND c_id = 5", "Instructions": { @@ -720,12 +442,12 @@ { "comment": "TPC-C update customer1 unique and float value", "query": "UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301, c_data='i am data' WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98", - "v3-plan": { + "plan": { "QueryType": "UPDATE", "Original": "UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301, c_data='i am data' WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98", "Instructions": { "OperatorType": "Update", - "Variant": "Equal", + "Variant": "EqualUnique", "Keyspace": { "Name": "main", "Sharded": true @@ -741,56 +463,12 @@ "TablesUsed": [ "main.customer1" ] - }, - "gen4-plan": { - "QueryType": "UPDATE", - "Original": "UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301, c_data='i am data' WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98", - "Instructions": { - "OperatorType": "Update", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "Query": "update customer1 set c_balance = 508.98, c_ytd_payment = 48941.980301, c_data = 'i am data' where c_w_id = 20 and c_d_id = 387 and c_id = 98", - "Table": "customer1", - "Values": [ - "INT64(20)" - ], - "Vindex": "hash" - }, - "TablesUsed": [ - "main.customer1" - ] - } - }, - { - "comment": "TPC-C update customer1 unique and float value", - "query": "UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301 WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301 WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "Query": "update customer1 set c_balance = 508.98, c_ytd_payment = 48941.980301 where c_w_id = 20 and c_d_id = 387 and c_id = 98", - "Table": "customer1", - "Values": [ - "INT64(20)" - ], - "Vindex": "hash" - }, - "TablesUsed": [ - "main.customer1" - ] - }, - "gen4-plan": { + } + }, + { + "comment": "TPC-C update customer1 unique and float value", + "query": "UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301 WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98", + "plan": { "QueryType": "UPDATE", "Original": "UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301 WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98", "Instructions": { @@ -841,26 +519,7 @@ { "comment": "TPC-C select aggr customer1", "query": "SELECT count(c_id) namecnt FROM customer1 WHERE c_w_id = 870 AND c_d_id= 780 AND c_last='last'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT count(c_id) namecnt FROM customer1 WHERE c_w_id = 870 AND c_d_id= 780 AND c_last='last'", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select count(c_id) as namecnt from customer1 where 1 != 1", - "Query": "select count(c_id) as namecnt from customer1 where c_w_id = 870 and c_d_id = 780 and c_last = 'last'", - "Table": "customer1", - "Values": [ - "INT64(870)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT count(c_id) namecnt FROM customer1 WHERE c_w_id = 870 AND c_d_id= 780 AND c_last='last'", "Instructions": { @@ -886,26 +545,7 @@ { "comment": "TPC-C select order by customer1", "query": "SELECT c_balance, c_first, c_middle, c_id FROM customer1 WHERE c_w_id = 840 AND c_d_id= 1 AND c_last='test' ORDER BY c_first", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT c_balance, c_first, c_middle, c_id FROM customer1 WHERE c_w_id = 840 AND c_d_id= 1 AND c_last='test' ORDER BY c_first", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select c_balance, c_first, c_middle, c_id from customer1 where 1 != 1", - "Query": "select c_balance, c_first, c_middle, c_id from customer1 where c_w_id = 840 and c_d_id = 1 and c_last = 'test' order by c_first asc", - "Table": "customer1", - "Values": [ - "INT64(840)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT c_balance, c_first, c_middle, c_id FROM customer1 WHERE c_w_id = 840 AND c_d_id= 1 AND c_last='test' ORDER BY c_first", "Instructions": { @@ -931,26 +571,7 @@ { "comment": "TPC-C select unique customer1", "query": "SELECT c_balance, c_first, c_middle, c_last FROM customer1 WHERE c_w_id = 15 AND c_d_id=5169 AND c_id=1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT c_balance, c_first, c_middle, c_last FROM customer1 WHERE c_w_id = 15 AND c_d_id=5169 AND c_id=1", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select c_balance, c_first, c_middle, c_last from customer1 where 1 != 1", - "Query": "select c_balance, c_first, c_middle, c_last from customer1 where c_w_id = 15 and c_d_id = 5169 and c_id = 1", - "Table": "customer1", - "Values": [ - "INT64(15)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT c_balance, c_first, c_middle, c_last FROM customer1 WHERE c_w_id = 15 AND c_d_id=5169 AND c_id=1", "Instructions": { @@ -976,26 +597,7 @@ { "comment": "TPC-C select order by orders1", "query": "SELECT o_id, o_carrier_id, o_entry_d FROM orders1 WHERE o_w_id = 9894 AND o_d_id = 3 AND o_c_id = 159 ORDER BY o_id DESC", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT o_id, o_carrier_id, o_entry_d FROM orders1 WHERE o_w_id = 9894 AND o_d_id = 3 AND o_c_id = 159 ORDER BY o_id DESC", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select o_id, o_carrier_id, o_entry_d from orders1 where 1 != 1", - "Query": "select o_id, o_carrier_id, o_entry_d from orders1 where o_w_id = 9894 and o_d_id = 3 and o_c_id = 159 order by o_id desc", - "Table": "orders1", - "Values": [ - "INT64(9894)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT o_id, o_carrier_id, o_entry_d FROM orders1 WHERE o_w_id = 9894 AND o_d_id = 3 AND o_c_id = 159 ORDER BY o_id DESC", "Instructions": { @@ -1021,26 +623,7 @@ { "comment": "TPC-C select order_line1", "query": "SELECT ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d FROM order_line1 WHERE ol_w_id = 92 AND ol_d_id = 5 AND ol_o_id = 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d FROM order_line1 WHERE ol_w_id = 92 AND ol_d_id = 5 AND ol_o_id = 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d from order_line1 where 1 != 1", - "Query": "select ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d from order_line1 where ol_w_id = 92 and ol_d_id = 5 and ol_o_id = 1", - "Table": "order_line1", - "Values": [ - "INT64(92)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d FROM order_line1 WHERE ol_w_id = 92 AND ol_d_id = 5 AND ol_o_id = 1", "Instructions": { @@ -1066,26 +649,7 @@ { "comment": "TPC-C select for update new_orders1", "query": "SELECT no_o_id FROM new_orders1 WHERE no_d_id = 689 AND no_w_id = 15 ORDER BY no_o_id ASC LIMIT 1 FOR UPDATE", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT no_o_id FROM new_orders1 WHERE no_d_id = 689 AND no_w_id = 15 ORDER BY no_o_id ASC LIMIT 1 FOR UPDATE", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select no_o_id from new_orders1 where 1 != 1", - "Query": "select no_o_id from new_orders1 where no_d_id = 689 and no_w_id = 15 order by no_o_id asc limit 1 for update", - "Table": "new_orders1", - "Values": [ - "INT64(15)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT no_o_id FROM new_orders1 WHERE no_d_id = 689 AND no_w_id = 15 ORDER BY no_o_id ASC LIMIT 1 FOR UPDATE", "Instructions": { @@ -1111,29 +675,7 @@ { "comment": "TPC-C delete new_orders1", "query": "DELETE FROM new_orders1 WHERE no_o_id = 2218 AND no_d_id = 358 AND no_w_id = 98465", - "v3-plan": { - "QueryType": "DELETE", - "Original": "DELETE FROM new_orders1 WHERE no_o_id = 2218 AND no_d_id = 358 AND no_w_id = 98465", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Equal", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "Query": "delete from new_orders1 where no_o_id = 2218 and no_d_id = 358 and no_w_id = 98465", - "Table": "new_orders1", - "Values": [ - "INT64(98465)" - ], - "Vindex": "hash" - }, - "TablesUsed": [ - "main.new_orders1" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "DELETE FROM new_orders1 WHERE no_o_id = 2218 AND no_d_id = 358 AND no_w_id = 98465", "Instructions": { @@ -1159,26 +701,7 @@ { "comment": "TPC-C select unique orders1", "query": "SELECT o_c_id FROM orders1 WHERE o_id = 6 AND o_d_id = 1983 AND o_w_id = 894605", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT o_c_id FROM orders1 WHERE o_id = 6 AND o_d_id = 1983 AND o_w_id = 894605", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select o_c_id from orders1 where 1 != 1", - "Query": "select o_c_id from orders1 where o_id = 6 and o_d_id = 1983 and o_w_id = 894605", - "Table": "orders1", - "Values": [ - "INT64(894605)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT o_c_id FROM orders1 WHERE o_id = 6 AND o_d_id = 1983 AND o_w_id = 894605", "Instructions": { @@ -1204,29 +727,7 @@ { "comment": "TPC-C update orders1 unique", "query": "UPDATE orders1 SET o_carrier_id = 9 WHERE o_id = 56 AND o_d_id = 98 AND o_w_id = 897", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "UPDATE orders1 SET o_carrier_id = 9 WHERE o_id = 56 AND o_d_id = 98 AND o_w_id = 897", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "Query": "update orders1 set o_carrier_id = 9 where o_id = 56 and o_d_id = 98 and o_w_id = 897", - "Table": "orders1", - "Values": [ - "INT64(897)" - ], - "Vindex": "hash" - }, - "TablesUsed": [ - "main.orders1" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "UPDATE orders1 SET o_carrier_id = 9 WHERE o_id = 56 AND o_d_id = 98 AND o_w_id = 897", "Instructions": { @@ -1252,29 +753,7 @@ { "comment": "TPC-C update order_line1", "query": "UPDATE order_line1 SET ol_delivery_d = NOW() WHERE ol_o_id = 235 AND ol_d_id = 315 AND ol_w_id = 8", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "UPDATE order_line1 SET ol_delivery_d = NOW() WHERE ol_o_id = 235 AND ol_d_id = 315 AND ol_w_id = 8", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "Query": "update order_line1 set ol_delivery_d = now() where ol_o_id = 235 and ol_d_id = 315 and ol_w_id = 8", - "Table": "order_line1", - "Values": [ - "INT64(8)" - ], - "Vindex": "hash" - }, - "TablesUsed": [ - "main.order_line1" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "UPDATE order_line1 SET ol_delivery_d = NOW() WHERE ol_o_id = 235 AND ol_d_id = 315 AND ol_w_id = 8", "Instructions": { @@ -1300,26 +779,7 @@ { "comment": "TPC-C select sum order_line1", "query": "SELECT SUM(ol_amount) sm FROM order_line1 WHERE ol_o_id = 680 AND ol_d_id = 201 AND ol_w_id = 87", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT SUM(ol_amount) sm FROM order_line1 WHERE ol_o_id = 680 AND ol_d_id = 201 AND ol_w_id = 87", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select sum(ol_amount) as sm from order_line1 where 1 != 1", - "Query": "select sum(ol_amount) as sm from order_line1 where ol_o_id = 680 and ol_d_id = 201 and ol_w_id = 87", - "Table": "order_line1", - "Values": [ - "INT64(87)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT SUM(ol_amount) sm FROM order_line1 WHERE ol_o_id = 680 AND ol_d_id = 201 AND ol_w_id = 87", "Instructions": { @@ -1345,29 +805,7 @@ { "comment": "TPC-C update customer1", "query": "UPDATE customer1 SET c_balance = c_balance + 988.01, c_delivery_cnt = c_delivery_cnt + 1 WHERE c_id = 6 AND c_d_id = 5 AND c_w_id = 160", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "UPDATE customer1 SET c_balance = c_balance + 988.01, c_delivery_cnt = c_delivery_cnt + 1 WHERE c_id = 6 AND c_d_id = 5 AND c_w_id = 160", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "Query": "update customer1 set c_balance = c_balance + 988.01, c_delivery_cnt = c_delivery_cnt + 1 where c_id = 6 and c_d_id = 5 and c_w_id = 160", - "Table": "customer1", - "Values": [ - "INT64(160)" - ], - "Vindex": "hash" - }, - "TablesUsed": [ - "main.customer1" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "UPDATE customer1 SET c_balance = c_balance + 988.01, c_delivery_cnt = c_delivery_cnt + 1 WHERE c_id = 6 AND c_d_id = 5 AND c_w_id = 160", "Instructions": { @@ -1393,26 +831,7 @@ { "comment": "TPC-C select unique district1", "query": "SELECT d_next_o_id FROM district1 WHERE d_id = 6 AND d_w_id= 21", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT d_next_o_id FROM district1 WHERE d_id = 6 AND d_w_id= 21", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select d_next_o_id from district1 where 1 != 1", - "Query": "select d_next_o_id from district1 where d_id = 6 and d_w_id = 21", - "Table": "district1", - "Values": [ - "INT64(21)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT d_next_o_id FROM district1 WHERE d_id = 6 AND d_w_id= 21", "Instructions": { @@ -1438,26 +857,7 @@ { "comment": "TPC-C select count distinct stock1 join order_line1", "query": "SELECT COUNT(DISTINCT(s.s_i_id)) FROM stock1 AS s JOIN order_line1 AS ol ON ol.ol_w_id=s.s_w_id AND ol.ol_i_id=s.s_i_id WHERE ol.ol_w_id = 12 AND ol.ol_d_id = 1908 AND ol.ol_o_id < 30 AND ol.ol_o_id >= 15 AND s.s_w_id= 12 AND s.s_quantity < 10", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT COUNT(DISTINCT(s.s_i_id)) FROM stock1 AS s JOIN order_line1 AS ol ON ol.ol_w_id=s.s_w_id AND ol.ol_i_id=s.s_i_id WHERE ol.ol_w_id = 12 AND ol.ol_d_id = 1908 AND ol.ol_o_id < 30 AND ol.ol_o_id >= 15 AND s.s_w_id= 12 AND s.s_quantity < 10", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select count(distinct s.s_i_id) from stock1 as s join order_line1 as ol on ol.ol_w_id = s.s_w_id and ol.ol_i_id = s.s_i_id where 1 != 1", - "Query": "select count(distinct s.s_i_id) from stock1 as s join order_line1 as ol on ol.ol_w_id = s.s_w_id and ol.ol_i_id = s.s_i_id where ol.ol_w_id = 12 and ol.ol_d_id = 1908 and ol.ol_o_id < 30 and ol.ol_o_id >= 15 and s.s_w_id = 12 and s.s_quantity < 10", - "Table": "stock1, order_line1", - "Values": [ - "INT64(12)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT COUNT(DISTINCT(s.s_i_id)) FROM stock1 AS s JOIN order_line1 AS ol ON ol.ol_w_id=s.s_w_id AND ol.ol_i_id=s.s_i_id WHERE ol.ol_w_id = 12 AND ol.ol_d_id = 1908 AND ol.ol_o_id < 30 AND ol.ol_o_id >= 15 AND s.s_w_id= 12 AND s.s_quantity < 10", "Instructions": { @@ -1484,26 +884,7 @@ { "comment": "TPC-C select distinct order_line1", "query": "SELECT DISTINCT ol_i_id FROM order_line1 WHERE ol_w_id = 1 AND ol_d_id = 156 AND ol_o_id < 500 AND ol_o_id >= 56", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT DISTINCT ol_i_id FROM order_line1 WHERE ol_w_id = 1 AND ol_d_id = 156 AND ol_o_id < 500 AND ol_o_id >= 56", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select ol_i_id from order_line1 where 1 != 1", - "Query": "select distinct ol_i_id from order_line1 where ol_w_id = 1 and ol_d_id = 156 and ol_o_id < 500 and ol_o_id >= 56", - "Table": "order_line1", - "Values": [ - "INT64(1)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT DISTINCT ol_i_id FROM order_line1 WHERE ol_w_id = 1 AND ol_d_id = 156 AND ol_o_id < 500 AND ol_o_id >= 56", "Instructions": { @@ -1529,26 +910,7 @@ { "comment": "TPC-C", "query": "SELECT count(*) FROM stock1 WHERE s_w_id = 1 AND s_i_id = 8 AND s_quantity < 1000", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT count(*) FROM stock1 WHERE s_w_id = 1 AND s_i_id = 8 AND s_quantity < 1000", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select count(*) from stock1 where 1 != 1", - "Query": "select count(*) from stock1 where s_w_id = 1 and s_i_id = 8 and s_quantity < 1000", - "Table": "stock1", - "Values": [ - "INT64(1)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT count(*) FROM stock1 WHERE s_w_id = 1 AND s_i_id = 8 AND s_quantity < 1000", "Instructions": { @@ -1574,56 +936,7 @@ { "comment": "TPC-C select with subquery,aggr,distinct,having,limit", "query": "select o.o_id,o.o_d_id from orders1 o, (select o_c_id,o_w_id,o_d_id,count(distinct o_w_id),o_id from orders1 where o_w_id=1 and o_id > 2100 and o_id < 11153 group by o_c_id,o_d_id,o_w_id having count( distinct o_id) > 1 limit 1) t where t.o_w_id=o.o_w_id and t.o_d_id=o.o_d_id and t.o_c_id=o.o_c_id limit 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select o.o_id,o.o_d_id from orders1 o, (select o_c_id,o_w_id,o_d_id,count(distinct o_w_id),o_id from orders1 where o_w_id=1 and o_id > 2100 and o_id < 11153 group by o_c_id,o_d_id,o_w_id having count( distinct o_id) > 1 limit 1) t where t.o_w_id=o.o_w_id and t.o_d_id=o.o_d_id and t.o_c_id=o.o_c_id limit 1", - "Instructions": { - "OperatorType": "Limit", - "Count": "INT64(1)", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1", - "JoinVars": { - "o_o_c_id": 3, - "o_o_d_id": 1, - "o_o_w_id": 2 - }, - "TableName": "orders1_orders1", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select o.o_id, o.o_d_id, o.o_w_id, o.o_c_id from orders1 as o where 1 != 1", - "Query": "select o.o_id, o.o_d_id, o.o_w_id, o.o_c_id from orders1 as o", - "Table": "orders1" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select 1 from (select o_c_id, o_w_id, o_d_id, count(distinct o_w_id), o_id from orders1 where 1 != 1 group by o_c_id, o_d_id, o_w_id) as t where 1 != 1", - "Query": "select 1 from (select o_c_id, o_w_id, o_d_id, count(distinct o_w_id), o_id from orders1 where o_w_id = 1 and o_id > 2100 and o_id < 11153 group by o_c_id, o_d_id, o_w_id having count(distinct o_id) > 1 limit 1) as t where t.o_w_id = :o_o_w_id and t.o_d_id = :o_o_d_id and t.o_c_id = :o_o_c_id", - "Table": "orders1", - "Values": [ - "INT64(1)" - ], - "Vindex": "hash" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select o.o_id,o.o_d_id from orders1 o, (select o_c_id,o_w_id,o_d_id,count(distinct o_w_id),o_id from orders1 where o_w_id=1 and o_id > 2100 and o_id < 11153 group by o_c_id,o_d_id,o_w_id having count( distinct o_id) > 1 limit 1) t where t.o_w_id=o.o_w_id and t.o_d_id=o.o_d_id and t.o_c_id=o.o_c_id limit 1", "Instructions": { @@ -1633,8 +946,8 @@ "Name": "main", "Sharded": true }, - "FieldQuery": "select o.o_id, o.o_d_id from orders1 as o, (select o_c_id, o_w_id, o_d_id, count(distinct o_w_id), o_id from orders1 where 1 != 1 group by o_c_id, o_d_id, o_w_id) as t where 1 != 1", - "Query": "select o.o_id, o.o_d_id from orders1 as o, (select o_c_id, o_w_id, o_d_id, count(distinct o_w_id), o_id from orders1 where o_w_id = 1 and o_id > 2100 and o_id < 11153 group by o_c_id, o_d_id, o_w_id having count(distinct o_id) > 1 limit 1) as t where t.o_w_id = o.o_w_id and t.o_d_id = o.o_d_id and t.o_c_id = o.o_c_id limit 1", + "FieldQuery": "select o.o_id, o.o_d_id from (select o_c_id, o_w_id, o_d_id, count(distinct o_w_id), o_id from orders1 where 1 != 1 group by o_c_id, o_d_id, o_w_id) as t, orders1 as o where 1 != 1", + "Query": "select o.o_id, o.o_d_id from (select o_c_id, o_w_id, o_d_id, count(distinct o_w_id), o_id from orders1 where o_w_id = 1 and o_id > 2100 and o_id < 11153 group by o_c_id, o_d_id, o_w_id having count(distinct o_id) > 1 limit 1) as t, orders1 as o where t.o_w_id = o.o_w_id and t.o_d_id = o.o_d_id and t.o_c_id = o.o_c_id limit 1", "Table": "orders1", "Values": [ "INT64(1)" @@ -1649,29 +962,7 @@ { "comment": "TPC-C delete order_line1", "query": "DELETE FROM order_line1 where ol_w_id=178 AND ol_d_id=1 AND ol_o_id=84", - "v3-plan": { - "QueryType": "DELETE", - "Original": "DELETE FROM order_line1 where ol_w_id=178 AND ol_d_id=1 AND ol_o_id=84", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Equal", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "Query": "delete from order_line1 where ol_w_id = 178 and ol_d_id = 1 and ol_o_id = 84", - "Table": "order_line1", - "Values": [ - "INT64(178)" - ], - "Vindex": "hash" - }, - "TablesUsed": [ - "main.order_line1" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "DELETE FROM order_line1 where ol_w_id=178 AND ol_d_id=1 AND ol_o_id=84", "Instructions": { @@ -1697,29 +988,7 @@ { "comment": "TPC-C delete orders1", "query": "DELETE FROM orders1 where o_w_id=1 AND o_d_id=3 and o_id=384", - "v3-plan": { - "QueryType": "DELETE", - "Original": "DELETE FROM orders1 where o_w_id=1 AND o_d_id=3 and o_id=384", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Equal", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "Query": "delete from orders1 where o_w_id = 1 and o_d_id = 3 and o_id = 384", - "Table": "orders1", - "Values": [ - "INT64(1)" - ], - "Vindex": "hash" - }, - "TablesUsed": [ - "main.orders1" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "DELETE FROM orders1 where o_w_id=1 AND o_d_id=3 and o_id=384", "Instructions": { @@ -1745,29 +1014,7 @@ { "comment": "TPC-C delete history1", "query": "DELETE FROM history1 where h_w_id=75 AND h_d_id=102 LIMIT 10", - "v3-plan": { - "QueryType": "DELETE", - "Original": "DELETE FROM history1 where h_w_id=75 AND h_d_id=102 LIMIT 10", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Equal", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "Query": "delete from history1 where h_w_id = 75 and h_d_id = 102 limit 10", - "Table": "history1", - "Values": [ - "INT64(75)" - ], - "Vindex": "hash" - }, - "TablesUsed": [ - "main.history1" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "DELETE FROM history1 where h_w_id=75 AND h_d_id=102 LIMIT 10", "Instructions": { @@ -1790,4 +1037,4 @@ ] } } -] +] \ No newline at end of file diff --git a/go/vt/vtgate/planbuilder/testdata/tpch_cases.json b/go/vt/vtgate/planbuilder/testdata/tpch_cases.json index 33f3f73d9fb..98c39a2d596 100644 --- a/go/vt/vtgate/planbuilder/testdata/tpch_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/tpch_cases.json @@ -2,20 +2,17 @@ { "comment": "TPC-H query 1", "query": "select l_returnflag, l_linestatus, sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, sum(l_extendedprice * (1 - l_discount)) as sum_disc_price, sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge, avg(l_quantity) as avg_qty, avg(l_extendedprice) as avg_price, avg(l_discount) as avg_disc, count(*) as count_order from lineitem where l_shipdate <= '1998-12-01' - interval '108' day group by l_returnflag, l_linestatus order by l_returnflag, l_linestatus", - "v3-plan": "VT12001: unsupported: in scatter query: complex aggregate expression", - "gen4-plan": "VT12001: unsupported: in scatter query: aggregation function 'avg(l_quantity) as avg_qty'" + "plan": "VT12001: unsupported: in scatter query: aggregation function 'avg(l_quantity) as avg_qty'" }, { "comment": "TPC-H query 2", "query": "select s_acctbal, s_name, n_name, p_partkey, p_mfgr, s_address, s_phone, s_comment from part, supplier, partsupp, nation, region where p_partkey = ps_partkey and s_suppkey = ps_suppkey and p_size = 15 and p_type like '%BRASS' and s_nationkey = n_nationkey and n_regionkey = r_regionkey and r_name = 'EUROPE' and ps_supplycost = ( select min(ps_supplycost) from partsupp, supplier, nation, region where p_partkey = ps_partkey and s_suppkey = ps_suppkey and s_nationkey = n_nationkey and n_regionkey = r_regionkey and r_name = 'EUROPE' ) order by s_acctbal desc, n_name, s_name, p_partkey limit 10", - "v3-plan": "VT03019: column p_partkey not found", - "gen4-plan": "VT12001: unsupported: cross-shard correlated subquery" + "plan": "VT12001: unsupported: correlated subquery is only supported for EXISTS" }, { "comment": "TPC-H query 3", "query": "select l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority from customer, orders, lineitem where c_mktsegment = 'BUILDING' and c_custkey = o_custkey and l_orderkey = o_orderkey and o_orderdate < date('1995-03-15') and l_shipdate > date('1995-03-15') group by l_orderkey, o_orderdate, o_shippriority order by revenue desc, o_orderdate limit 10", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority from customer, orders, lineitem where c_mktsegment = 'BUILDING' and c_custkey = o_custkey and l_orderkey = o_orderkey and o_orderdate < date('1995-03-15') and l_shipdate > date('1995-03-15') group by l_orderkey, o_orderdate, o_shippriority order by revenue desc, o_orderdate limit 10", "Instructions": { @@ -25,14 +22,14 @@ { "OperatorType": "Sort", "Variant": "Memory", - "OrderBy": "1 DESC, (2|4) ASC", + "OrderBy": "1 DESC, (2|5) ASC", "ResultColumns": 4, "Inputs": [ { "OperatorType": "Aggregate", "Variant": "Ordered", "Aggregates": "sum(1) AS revenue", - "GroupBy": "(0|5), (2|4), (3|6)", + "GroupBy": "(0|4), (2|5), (3|6)", "Inputs": [ { "OperatorType": "Projection", @@ -41,20 +38,20 @@ "[COLUMN 0] * [COLUMN 1] as revenue", "[COLUMN 3] as o_orderdate", "[COLUMN 4] as o_shippriority", - "[COLUMN 5] as weight_string(o_orderdate)", - "[COLUMN 6] as weight_string(l_orderkey)", + "[COLUMN 5] as weight_string(l_orderkey)", + "[COLUMN 6] as weight_string(o_orderdate)", "[COLUMN 7] as weight_string(o_shippriority)" ], "Inputs": [ { "OperatorType": "Sort", "Variant": "Memory", - "OrderBy": "(2|6) ASC, (3|5) ASC, (4|7) ASC", + "OrderBy": "(2|5) ASC, (3|6) ASC, (4|7) ASC", "Inputs": [ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0,L:1,R:1,R:2,R:3,L:2,R:4", + "JoinColumnIndexes": "L:0,R:0,L:1,R:1,R:2,L:2,R:3,R:4", "JoinVars": { "l_orderkey": 1 }, @@ -146,83 +143,74 @@ { "comment": "TPC-H query 4", "query": "select o_orderpriority, count(*) as order_count from orders where o_orderdate >= date('1993-07-01') and o_orderdate < date('1993-07-01') + interval '3' month and exists ( select * from lineitem where l_orderkey = o_orderkey and l_commitdate < l_receiptdate ) group by o_orderpriority order by o_orderpriority", - "v3-plan": "VT03019: column o_orderkey not found", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select o_orderpriority, count(*) as order_count from orders where o_orderdate >= date('1993-07-01') and o_orderdate < date('1993-07-01') + interval '3' month and exists ( select * from lineitem where l_orderkey = o_orderkey and l_commitdate < l_receiptdate ) group by o_orderpriority order by o_orderpriority", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", "Aggregates": "sum_count_star(1) AS order_count", - "GroupBy": "(0|2)", + "GroupBy": "(0|3)", "ResultColumns": 2, "Inputs": [ { - "OperatorType": "Projection", - "Expressions": [ - "[COLUMN 1] as o_orderpriority", - "[COLUMN 2] as order_count", - "[COLUMN 3]" - ], + "OperatorType": "SemiJoin", + "JoinVars": { + "o_orderkey": 2 + }, + "TableName": "orders_lineitem", "Inputs": [ { - "OperatorType": "SemiJoin", - "JoinVars": { - "o_orderkey": 0 + "InputName": "Outer", + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "main", + "Sharded": true }, - "TableName": "orders_lineitem", + "FieldQuery": "select o_orderpriority, count(*) as order_count, o_orderkey, weight_string(o_orderpriority) from orders where 1 != 1 group by o_orderpriority, o_orderkey, weight_string(o_orderpriority)", + "OrderBy": "(0|3) ASC", + "Query": "select o_orderpriority, count(*) as order_count, o_orderkey, weight_string(o_orderpriority) from orders where o_orderdate >= date('1993-07-01') and o_orderdate < date('1993-07-01') + interval '3' month group by o_orderpriority, o_orderkey, weight_string(o_orderpriority) order by o_orderpriority asc", + "Table": "orders" + }, + { + "InputName": "SubQuery", + "OperatorType": "VindexLookup", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "Values": [ + ":o_orderkey" + ], + "Vindex": "lineitem_map", "Inputs": [ { "OperatorType": "Route", - "Variant": "Scatter", + "Variant": "IN", "Keyspace": { "Name": "main", "Sharded": true }, - "FieldQuery": "select o_orderkey, o_orderpriority, count(*) as order_count, weight_string(o_orderpriority), weight_string(o_orderkey) from orders where 1 != 1 group by o_orderpriority, weight_string(o_orderpriority), o_orderkey, weight_string(o_orderkey)", - "OrderBy": "(1|3) ASC", - "Query": "select o_orderkey, o_orderpriority, count(*) as order_count, weight_string(o_orderpriority), weight_string(o_orderkey) from orders where o_orderdate >= date('1993-07-01') and o_orderdate < date('1993-07-01') + interval '3' month group by o_orderpriority, weight_string(o_orderpriority), o_orderkey, weight_string(o_orderkey) order by o_orderpriority asc", - "Table": "orders" + "FieldQuery": "select l_orderkey, l_linenumber from lineitem_map where 1 != 1", + "Query": "select l_orderkey, l_linenumber from lineitem_map where l_orderkey in ::__vals", + "Table": "lineitem_map", + "Values": [ + "::l_orderkey" + ], + "Vindex": "md5" }, { - "OperatorType": "VindexLookup", - "Variant": "EqualUnique", + "OperatorType": "Route", + "Variant": "ByDestination", "Keyspace": { "Name": "main", "Sharded": true }, - "Values": [ - ":o_orderkey" - ], - "Vindex": "lineitem_map", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select l_orderkey, l_linenumber from lineitem_map where 1 != 1", - "Query": "select l_orderkey, l_linenumber from lineitem_map where l_orderkey in ::__vals", - "Table": "lineitem_map", - "Values": [ - "::l_orderkey" - ], - "Vindex": "md5" - }, - { - "OperatorType": "Route", - "Variant": "ByDestination", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select 1 from lineitem where 1 != 1", - "Query": "select 1 from lineitem where l_commitdate < l_receiptdate and l_orderkey = :o_orderkey", - "Table": "lineitem" - } - ] + "FieldQuery": "select 1 from lineitem where 1 != 1", + "Query": "select 1 from lineitem where l_orderkey = :o_orderkey and l_commitdate < l_receiptdate", + "Table": "lineitem" } ] } @@ -237,10 +225,9 @@ } }, { - "comment": "TPC-H query 5 - Gen4 produces plan but the plan output is flaky", + "comment": "TPC-H query 5", "query": "select n_name, sum(l_extendedprice * (1 - l_discount)) as revenue from customer, orders, lineitem, supplier, nation, region where c_custkey = o_custkey and l_orderkey = o_orderkey and l_suppkey = s_suppkey and c_nationkey = s_nationkey and s_nationkey = n_nationkey and n_regionkey = r_regionkey and r_name = 'ASIA' and o_orderdate >= date('1994-01-01') and o_orderdate < date('1994-01-01') + interval '1' year group by n_name order by revenue desc", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select n_name, sum(l_extendedprice * (1 - l_discount)) as revenue from customer, orders, lineitem, supplier, nation, region where c_custkey = o_custkey and l_orderkey = o_orderkey and l_suppkey = s_suppkey and c_nationkey = s_nationkey and s_nationkey = n_nationkey and n_regionkey = r_regionkey and r_name = 'ASIA' and o_orderdate >= date('1994-01-01') and o_orderdate < date('1994-01-01') + interval '1' year group by n_name order by revenue desc", "Instructions": { @@ -494,29 +481,7 @@ { "comment": "TPC-H query 6", "query": "select sum(l_extendedprice * l_discount) as revenue from lineitem where l_shipdate >= date('1994-01-01') and l_shipdate < date('1994-01-01') + interval '1' year and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select sum(l_extendedprice * l_discount) as revenue from lineitem where l_shipdate >= date('1994-01-01') and l_shipdate < date('1994-01-01') + interval '1' year and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum(0)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select sum(l_extendedprice * l_discount) as revenue from lineitem where 1 != 1", - "Query": "select sum(l_extendedprice * l_discount) as revenue from lineitem where l_shipdate >= date('1994-01-01') and l_shipdate < date('1994-01-01') + interval '1' year and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24", - "Table": "lineitem" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select sum(l_extendedprice * l_discount) as revenue from lineitem where l_shipdate >= date('1994-01-01') and l_shipdate < date('1994-01-01') + interval '1' year and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24", "Instructions": { @@ -545,139 +510,179 @@ { "comment": "TPC-H query 7", "query": "select supp_nation, cust_nation, l_year, sum(volume) as revenue from (select n1.n_name as supp_nation, n2.n_name as cust_nation, extract(year from l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume from supplier, lineitem, orders, customer, nation n1, nation n2 where s_suppkey = l_suppkey and o_orderkey = l_orderkey and c_custkey = o_custkey and s_nationkey = n1.n_nationkey and c_nationkey = n2.n_nationkey and ((n1.n_name = 'FRANCE' and n2.n_name = 'GERMANY') or (n1.n_name = 'GERMANY' and n2.n_name = 'FRANCE')) and l_shipdate between date('1995-01-01') and date('1996-12-31')) as shipping group by supp_nation, cust_nation, l_year order by supp_nation, cust_nation, l_year", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select supp_nation, cust_nation, l_year, sum(volume) as revenue from (select n1.n_name as supp_nation, n2.n_name as cust_nation, extract(year from l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume from supplier, lineitem, orders, customer, nation n1, nation n2 where s_suppkey = l_suppkey and o_orderkey = l_orderkey and c_custkey = o_custkey and s_nationkey = n1.n_nationkey and c_nationkey = n2.n_nationkey and ((n1.n_name = 'FRANCE' and n2.n_name = 'GERMANY') or (n1.n_name = 'GERMANY' and n2.n_name = 'FRANCE')) and l_shipdate between date('1995-01-01') and date('1996-12-31')) as shipping group by supp_nation, cust_nation, l_year order by supp_nation, cust_nation, l_year", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", "Aggregates": "sum(3) AS revenue", - "GroupBy": "(0|6), (1|5), (2|4)", + "GroupBy": "(0|4), (1|5), (2|6)", "ResultColumns": 4, "Inputs": [ { "OperatorType": "Projection", "Expressions": [ - "[COLUMN 4] as supp_nation", - "[COLUMN 5] as cust_nation", - "[COLUMN 6] as l_year", - "(((([COLUMN 10] * COALESCE([COLUMN 11], INT64(1))) * COALESCE([COLUMN 12], INT64(1))) * COALESCE([COLUMN 13], INT64(1))) * COALESCE([COLUMN 14], INT64(1))) * COALESCE([COLUMN 15], INT64(1)) as revenue", - "[COLUMN 9]", - "[COLUMN 8]", - "[COLUMN 7]" + "[COLUMN 2] as supp_nation", + "[COLUMN 3] as cust_nation", + "[COLUMN 4] as l_year", + "[COLUMN 0] * [COLUMN 1] as revenue", + "[COLUMN 5] as weight_string(supp_nation)", + "[COLUMN 6] as weight_string(cust_nation)", + "[COLUMN 7] as weight_string(l_year)" ], "Inputs": [ { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "(0|16) ASC, (1|17) ASC, (2|18) ASC", + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,L:1,R:1,L:2,L:5,R:2,L:6", + "JoinVars": { + "n1_n_name": 4, + "o_custkey": 3 + }, + "TableName": "lineitem_orders_supplier_nation_customer_nation", "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:2,R:0,L:3,L:4,L:8,R:1,L:9,L:13,R:2,L:14,L:15,L:16,L:17,L:18,R:3,R:4,L:19,R:5,L:20", - "JoinVars": { - "n1_n_name": 1, - "o_custkey": 0 - }, - "TableName": "lineitem_orders_supplier_nation_customer_nation", + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * [COLUMN 1] as revenue", + "[COLUMN 2] as supp_nation", + "[COLUMN 3] as l_year", + "[COLUMN 4] as orders.o_custkey", + "[COLUMN 5] as n1.n_name", + "[COLUMN 6] as weight_string(supp_nation)", + "[COLUMN 7] as weight_string(l_year)" + ], "Inputs": [ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0,R:1,L:2,L:3,L:5,R:2,R:3,R:4,L:6,L:8,R:5,R:6,R:7,L:9,L:10,L:11,R:8,R:9,R:10,L:12", + "JoinColumnIndexes": "L:0,R:0,R:1,L:1,L:2,L:3,R:2,L:5", "JoinVars": { - "l_suppkey": 1 + "l_suppkey": 4 }, "TableName": "lineitem_orders_supplier_nation", "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0,L:0,L:2,L:3,L:0,R:0,L:2,L:6,R:2,L:7,L:4,R:1,L:8", - "JoinVars": { - "l_orderkey": 1 - }, - "TableName": "lineitem_orders", + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * [COLUMN 1] as revenue", + "[COLUMN 2] as l_year", + "[COLUMN 3] as orders.o_custkey", + "[COLUMN 4] as n1.n_name", + "[COLUMN 5] as lineitem.l_suppkey", + "[COLUMN 6] as weight_string(l_year)" + ], "Inputs": [ { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select l_suppkey, l_orderkey, extract(year from l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume, sum(volume) as revenue, weight_string(l_orderkey), weight_string(l_suppkey), weight_string(extract(year from l_shipdate)), weight_string(extract(year from l_shipdate)) from lineitem where 1 != 1 group by l_orderkey, weight_string(l_orderkey), l_suppkey, weight_string(l_suppkey), l_year, weight_string(l_year)", - "Query": "select l_suppkey, l_orderkey, extract(year from l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume, sum(volume) as revenue, weight_string(l_orderkey), weight_string(l_suppkey), weight_string(extract(year from l_shipdate)), weight_string(extract(year from l_shipdate)) from lineitem where l_shipdate between date('1995-01-01') and date('1996-12-31') group by l_orderkey, weight_string(l_orderkey), l_suppkey, weight_string(l_suppkey), l_year, weight_string(l_year)", - "Table": "lineitem" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,L:1,L:2,L:3,L:4,L:6", + "JoinVars": { + "l_orderkey": 5 }, - "FieldQuery": "select o_custkey, count(*), weight_string(o_custkey) from orders where 1 != 1 group by o_custkey, weight_string(o_custkey)", - "Query": "select o_custkey, count(*), weight_string(o_custkey) from orders where o_orderkey = :l_orderkey group by o_custkey, weight_string(o_custkey)", - "Table": "orders", - "Values": [ - ":l_orderkey" - ], - "Vindex": "hash" + "TableName": "lineitem_orders", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select sum(volume) as revenue, l_year, shipping.`orders.o_custkey`, shipping.`n1.n_name`, shipping.`lineitem.l_suppkey`, shipping.`lineitem.l_orderkey`, weight_string(l_year), supp_nation, weight_string(supp_nation), cust_nation, weight_string(cust_nation) from (select extract(year from l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume, orders.o_custkey as `orders.o_custkey`, lineitem.l_suppkey as `lineitem.l_suppkey`, lineitem.l_orderkey as `lineitem.l_orderkey` from lineitem where 1 != 1) as shipping where 1 != 1 group by l_year, shipping.`orders.o_custkey`, shipping.`n1.n_name`, shipping.`lineitem.l_suppkey`, shipping.`lineitem.l_orderkey`, weight_string(l_year)", + "OrderBy": "(7|8) ASC, (9|10) ASC, (1|6) ASC", + "Query": "select sum(volume) as revenue, l_year, shipping.`orders.o_custkey`, shipping.`n1.n_name`, shipping.`lineitem.l_suppkey`, shipping.`lineitem.l_orderkey`, weight_string(l_year), supp_nation, weight_string(supp_nation), cust_nation, weight_string(cust_nation) from (select extract(year from l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume, orders.o_custkey as `orders.o_custkey`, lineitem.l_suppkey as `lineitem.l_suppkey`, lineitem.l_orderkey as `lineitem.l_orderkey` from lineitem where l_shipdate between date('1995-01-01') and date('1996-12-31')) as shipping group by l_year, shipping.`orders.o_custkey`, shipping.`n1.n_name`, shipping.`lineitem.l_suppkey`, shipping.`lineitem.l_orderkey`, weight_string(l_year) order by supp_nation asc, cust_nation asc, l_year asc", + "Table": "lineitem" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select count(*) from orders where 1 != 1 group by .0", + "Query": "select count(*) from orders where o_orderkey = :l_orderkey group by .0", + "Table": "orders", + "Values": [ + ":l_orderkey" + ], + "Vindex": "hash" + } + ] } ] }, { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0,R:1,R:0,R:0,R:1,R:3,R:3,R:4,L:1,R:2,R:5", - "JoinVars": { - "s_nationkey": 0 - }, - "TableName": "supplier_nation", + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * [COLUMN 1] as count(*)", + "[COLUMN 2] as supp_nation", + "[COLUMN 3] as weight_string(supp_nation)" + ], "Inputs": [ { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select s_nationkey, count(*), weight_string(s_nationkey) from supplier where 1 != 1 group by s_nationkey, weight_string(s_nationkey)", - "Query": "select s_nationkey, count(*), weight_string(s_nationkey) from supplier where s_suppkey = :l_suppkey group by s_nationkey, weight_string(s_nationkey)", - "Table": "supplier", - "Values": [ - ":l_suppkey" - ], - "Vindex": "hash" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,R:1,R:2", + "JoinVars": { + "s_nationkey": 1 }, - "FieldQuery": "select n1.n_name, n1.n_name as supp_nation, count(*), weight_string(n1.n_name), weight_string(n1.n_name), weight_string(n1.n_name) from nation as n1 where 1 != 1 group by n1.n_name, weight_string(n1.n_name), supp_nation, weight_string(supp_nation)", - "Query": "select n1.n_name, n1.n_name as supp_nation, count(*), weight_string(n1.n_name), weight_string(n1.n_name), weight_string(n1.n_name) from nation as n1 where n1.n_nationkey = :s_nationkey group by n1.n_name, weight_string(n1.n_name), supp_nation, weight_string(supp_nation)", - "Table": "nation", - "Values": [ - ":s_nationkey" - ], - "Vindex": "hash" + "TableName": "supplier_nation", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select count(*), shipping.`supplier.s_nationkey` from (select supplier.s_nationkey as `supplier.s_nationkey` from supplier where 1 != 1) as shipping where 1 != 1 group by shipping.`supplier.s_nationkey`", + "Query": "select count(*), shipping.`supplier.s_nationkey` from (select supplier.s_nationkey as `supplier.s_nationkey` from supplier where s_suppkey = :l_suppkey) as shipping group by shipping.`supplier.s_nationkey`", + "Table": "supplier", + "Values": [ + ":l_suppkey" + ], + "Vindex": "hash" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select count(*), supp_nation, weight_string(supp_nation) from (select n1.n_name as supp_nation from nation as n1 where 1 != 1) as shipping where 1 != 1 group by supp_nation, weight_string(supp_nation)", + "Query": "select count(*), supp_nation, weight_string(supp_nation) from (select n1.n_name as supp_nation from nation as n1 where n1.n_nationkey = :s_nationkey) as shipping group by supp_nation, weight_string(supp_nation)", + "Table": "nation", + "Values": [ + ":s_nationkey" + ], + "Vindex": "hash" + } + ] } ] } ] - }, + } + ] + }, + { + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * [COLUMN 1] as count(*)", + "[COLUMN 2] as cust_nation", + "[COLUMN 3] as weight_string(cust_nation)" + ], + "Inputs": [ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "R:0,R:0,R:2,L:1,R:1,R:3", + "JoinColumnIndexes": "L:0,R:0,R:1,R:2", "JoinVars": { - "c_nationkey": 0 + "c_nationkey": 1 }, "TableName": "customer_nation", "Inputs": [ @@ -688,8 +693,8 @@ "Name": "main", "Sharded": true }, - "FieldQuery": "select c_nationkey, count(*), weight_string(c_nationkey) from customer where 1 != 1 group by c_nationkey, weight_string(c_nationkey)", - "Query": "select c_nationkey, count(*), weight_string(c_nationkey) from customer where c_custkey = :o_custkey group by c_nationkey, weight_string(c_nationkey)", + "FieldQuery": "select count(*), shipping.`customer.c_nationkey` from (select customer.c_nationkey as `customer.c_nationkey` from customer where 1 != 1) as shipping where 1 != 1 group by shipping.`customer.c_nationkey`", + "Query": "select count(*), shipping.`customer.c_nationkey` from (select customer.c_nationkey as `customer.c_nationkey` from customer where c_custkey = :o_custkey) as shipping group by shipping.`customer.c_nationkey`", "Table": "customer", "Values": [ ":o_custkey" @@ -703,8 +708,8 @@ "Name": "main", "Sharded": true }, - "FieldQuery": "select n2.n_name as cust_nation, count(*), weight_string(n2.n_name), weight_string(n2.n_name) from nation as n2 where 1 != 1 group by cust_nation, weight_string(cust_nation)", - "Query": "select n2.n_name as cust_nation, count(*), weight_string(n2.n_name), weight_string(n2.n_name) from nation as n2 where (:n1_n_name = 'FRANCE' and n2.n_name = 'GERMANY' or :n1_n_name = 'GERMANY' and n2.n_name = 'FRANCE') and n2.n_nationkey = :c_nationkey group by cust_nation, weight_string(cust_nation)", + "FieldQuery": "select count(*), cust_nation, weight_string(cust_nation) from (select n2.n_name as cust_nation from nation as n2 where 1 != 1) as shipping where 1 != 1 group by cust_nation, weight_string(cust_nation)", + "Query": "select count(*), cust_nation, weight_string(cust_nation) from (select n2.n_name as cust_nation from nation as n2 where (:n1_n_name = 'FRANCE' and n2.n_name = 'GERMANY' or :n1_n_name = 'GERMANY' and n2.n_name = 'FRANCE') and n2.n_nationkey = :c_nationkey) as shipping group by cust_nation, weight_string(cust_nation)", "Table": "nation", "Values": [ ":c_nationkey" @@ -733,20 +738,17 @@ { "comment": "TPC-H query 8", "query": "select o_year, sum(case when nation = 'BRAZIL' then volume else 0 end) / sum(volume) as mkt_share from ( select extract(year from o_orderdate) as o_year, l_extendedprice * (1 - l_discount) as volume, n2.n_name as nation from part, supplier, lineitem, orders, customer, nation n1, nation n2, region where p_partkey = l_partkey and s_suppkey = l_suppkey and l_orderkey = o_orderkey and o_custkey = c_custkey and c_nationkey = n1.n_nationkey and n1.n_regionkey = r_regionkey and r_name = 'AMERICA' and s_nationkey = n2.n_nationkey and o_orderdate between date '1995-01-01' and date('1996-12-31') and p_type = 'ECONOMY ANODIZED STEEL' ) as all_nations group by o_year order by o_year", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": "VT12001: unsupported: in scatter query: complex aggregate expression" + "plan": "VT13002: unexpected AST struct for query: o_year" }, { "comment": "TPC-H query 9", "query": "select nation, o_year, sum(amount) as sum_profit from ( select n_name as nation, extract(year from o_orderdate) as o_year, l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity as amount from part, supplier, lineitem, partsupp, orders, nation where s_suppkey = l_suppkey and ps_suppkey = l_suppkey and ps_partkey = l_partkey and p_partkey = l_partkey and o_orderkey = l_orderkey and s_nationkey = n_nationkey and p_name like '%green%' ) as profit group by nation, o_year order by nation, o_year desc", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": "VT12001: unsupported: aggregation on columns from different sources" + "plan": "VT13002: unexpected AST struct for query: nation" }, { "comment": "TPC-H query 10", "query": "select c_custkey, c_name, sum(l_extendedprice * (1 - l_discount)) as revenue, c_acctbal, n_name, c_address, c_phone, c_comment from customer, orders, lineitem, nation where c_custkey = o_custkey and l_orderkey = o_orderkey and o_orderdate >= date('1993-10-01') and o_orderdate < date('1993-10-01') + interval '3' month and l_returnflag = 'R' and c_nationkey = n_nationkey group by c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment order by revenue desc limit 20", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select c_custkey, c_name, sum(l_extendedprice * (1 - l_discount)) as revenue, c_acctbal, n_name, c_address, c_phone, c_comment from customer, orders, lineitem, nation where c_custkey = o_custkey and l_orderkey = o_orderkey and o_orderdate >= date('1993-10-01') and o_orderdate < date('1993-10-01') + interval '3' month and l_returnflag = 'R' and c_nationkey = n_nationkey group by c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment order by revenue desc limit 20", "Instructions": { @@ -956,94 +958,105 @@ { "comment": "TPC-H query 11", "query": "select ps_partkey, sum(ps_supplycost * ps_availqty) as value from partsupp, supplier, nation where ps_suppkey = s_suppkey and s_nationkey = n_nationkey and n_name = 'GERMANY' group by ps_partkey having sum(ps_supplycost * ps_availqty) > ( select sum(ps_supplycost * ps_availqty) * 0.00001000000 from partsupp, supplier, nation where ps_suppkey = s_suppkey and s_nationkey = n_nationkey and n_name = 'GERMANY' ) order by value desc", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": "VT12001: unsupported: in scatter query: complex aggregate expression" - }, - { - "comment": "TPC-H query 12", - "query": "select l_shipmode, sum(case when o_orderpriority = '1-URGENT' or o_orderpriority = '2-HIGH' then 1 else 0 end) as high_line_count, sum(case when o_orderpriority <> '1-URGENT' and o_orderpriority <> '2-HIGH' then 1 else 0 end) as low_line_count from orders, lineitem where o_orderkey = l_orderkey and l_shipmode in ('MAIL', 'SHIP') and l_commitdate < l_receiptdate and l_shipdate < l_commitdate and l_receiptdate >= date('1994-01-01') and l_receiptdate < date('1994-01-01') + interval '1' year group by l_shipmode order by l_shipmode", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", - "Original": "select l_shipmode, sum(case when o_orderpriority = '1-URGENT' or o_orderpriority = '2-HIGH' then 1 else 0 end) as high_line_count, sum(case when o_orderpriority <> '1-URGENT' and o_orderpriority <> '2-HIGH' then 1 else 0 end) as low_line_count from orders, lineitem where o_orderkey = l_orderkey and l_shipmode in ('MAIL', 'SHIP') and l_commitdate < l_receiptdate and l_shipdate < l_commitdate and l_receiptdate >= date('1994-01-01') and l_receiptdate < date('1994-01-01') + interval '1' year group by l_shipmode order by l_shipmode", + "Original": "select ps_partkey, sum(ps_supplycost * ps_availqty) as value from partsupp, supplier, nation where ps_suppkey = s_suppkey and s_nationkey = n_nationkey and n_name = 'GERMANY' group by ps_partkey having sum(ps_supplycost * ps_availqty) > ( select sum(ps_supplycost * ps_availqty) * 0.00001000000 from partsupp, supplier, nation where ps_suppkey = s_suppkey and s_nationkey = n_nationkey and n_name = 'GERMANY' ) order by value desc", "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum(1) AS high_line_count, sum(2) AS low_line_count", - "GroupBy": "(0|3)", - "ResultColumns": 3, + "OperatorType": "UncorrelatedSubquery", + "Variant": "PulloutValue", + "PulloutVars": [ + "__sq1" + ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Projection", "Expressions": [ - "[COLUMN 3] as l_shipmode", - "[COLUMN 0] * [COLUMN 1] as high_line_count", - "[COLUMN 2] * [COLUMN 1] as low_line_count", - "[COLUMN 4] as weight_string(l_shipmode)" + "[COLUMN 0] * [COLUMN 1] as sum(ps_supplycost * ps_availqty) * 0.00001000000" ], "Inputs": [ { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "(3|4) ASC", + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum(0) AS sum(ps_supplycost * ps_availqty), any_value(1)", "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0,L:1,R:1,R:2", - "JoinVars": { - "o_orderkey": 2 - }, - "TableName": "orders_lineitem", + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * [COLUMN 1] as sum(ps_supplycost * ps_availqty)", + "[COLUMN 2] as 0.00001000000" + ], "Inputs": [ { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select sum(case when o_orderpriority = '1-URGENT' or o_orderpriority = '2-HIGH' then 1 else 0 end) as high_line_count, sum(case when o_orderpriority != '1-URGENT' and o_orderpriority != '2-HIGH' then 1 else 0 end) as low_line_count, o_orderkey from orders where 1 != 1 group by o_orderkey", - "Query": "select sum(case when o_orderpriority = '1-URGENT' or o_orderpriority = '2-HIGH' then 1 else 0 end) as high_line_count, sum(case when o_orderpriority != '1-URGENT' and o_orderpriority != '2-HIGH' then 1 else 0 end) as low_line_count, o_orderkey from orders group by o_orderkey", - "Table": "orders" - }, - { - "OperatorType": "VindexLookup", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,L:1", + "JoinVars": { + "s_nationkey1": 2 }, - "Values": [ - ":o_orderkey" - ], - "Vindex": "lineitem_map", + "TableName": "partsupp_supplier_nation", "Inputs": [ { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select l_orderkey, l_linenumber from lineitem_map where 1 != 1", - "Query": "select l_orderkey, l_linenumber from lineitem_map where l_orderkey in ::__vals", - "Table": "lineitem_map", - "Values": [ - "::l_orderkey" + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * [COLUMN 1] as sum(ps_supplycost * ps_availqty)", + "[COLUMN 2] as 0.00001000000", + "[COLUMN 3] as s_nationkey" ], - "Vindex": "md5" + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,L:1,R:1", + "JoinVars": { + "ps_suppkey1": 2 + }, + "TableName": "partsupp_supplier", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select sum(ps_supplycost * ps_availqty), 0.00001000000, ps_suppkey from partsupp where 1 != 1 group by ps_suppkey", + "Query": "select sum(ps_supplycost * ps_availqty), 0.00001000000, ps_suppkey from partsupp group by ps_suppkey", + "Table": "partsupp" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select count(*), s_nationkey from supplier where 1 != 1 group by s_nationkey", + "Query": "select count(*), s_nationkey from supplier where s_suppkey = :ps_suppkey1 group by s_nationkey", + "Table": "supplier", + "Values": [ + ":ps_suppkey1" + ], + "Vindex": "hash" + } + ] + } + ] }, { "OperatorType": "Route", - "Variant": "ByDestination", + "Variant": "EqualUnique", "Keyspace": { "Name": "main", "Sharded": true }, - "FieldQuery": "select count(*), l_shipmode, weight_string(l_shipmode) from lineitem where 1 != 1 group by l_shipmode, weight_string(l_shipmode)", - "Query": "select count(*), l_shipmode, weight_string(l_shipmode) from lineitem where l_shipmode in ('MAIL', 'SHIP') and l_commitdate < l_receiptdate and l_shipdate < l_commitdate and l_receiptdate >= date('1994-01-01') and l_receiptdate < date('1994-01-01') + interval '1' year and l_orderkey = :o_orderkey group by l_shipmode, weight_string(l_shipmode)", - "Table": "lineitem" + "FieldQuery": "select count(*) from nation where 1 != 1 group by .0", + "Query": "select count(*) from nation where n_name = 'GERMANY' and n_nationkey = :s_nationkey1 group by .0", + "Table": "nation", + "Values": [ + ":s_nationkey1" + ], + "Vindex": "hash" } ] } @@ -1052,6 +1065,205 @@ ] } ] + }, + { + "InputName": "Outer", + "OperatorType": "Filter", + "Predicate": "sum(ps_supplycost * ps_availqty) > :__sq1", + "ResultColumns": 2, + "Inputs": [ + { + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "1 DESC", + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum(1) AS value", + "GroupBy": "(0|2)", + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 2] as ps_partkey", + "[COLUMN 0] * [COLUMN 1] as value", + "[COLUMN 3] as weight_string(ps_partkey)" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,L:1,L:3", + "JoinVars": { + "s_nationkey": 2 + }, + "TableName": "partsupp_supplier_nation", + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * [COLUMN 1] as value", + "[COLUMN 2] as ps_partkey", + "[COLUMN 3] as s_nationkey", + "[COLUMN 4] as weight_string(ps_partkey)" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,L:1,R:1,L:3", + "JoinVars": { + "ps_suppkey": 2 + }, + "TableName": "partsupp_supplier", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select sum(ps_supplycost * ps_availqty) as value, ps_partkey, ps_suppkey, weight_string(ps_partkey) from partsupp where 1 != 1 group by ps_partkey, ps_suppkey, weight_string(ps_partkey)", + "OrderBy": "(1|3) ASC", + "Query": "select sum(ps_supplycost * ps_availqty) as value, ps_partkey, ps_suppkey, weight_string(ps_partkey) from partsupp group by ps_partkey, ps_suppkey, weight_string(ps_partkey) order by ps_partkey asc", + "Table": "partsupp" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select count(*), s_nationkey from supplier where 1 != 1 group by s_nationkey", + "Query": "select count(*), s_nationkey from supplier where s_suppkey = :ps_suppkey group by s_nationkey", + "Table": "supplier", + "Values": [ + ":ps_suppkey" + ], + "Vindex": "hash" + } + ] + } + ] + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select count(*) from nation where 1 != 1 group by .0", + "Query": "select count(*) from nation where n_name = 'GERMANY' and n_nationkey = :s_nationkey group by .0", + "Table": "nation", + "Values": [ + ":s_nationkey" + ], + "Vindex": "hash" + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "main.nation", + "main.partsupp", + "main.supplier" + ] + } + }, + { + "comment": "TPC-H query 12", + "query": "select l_shipmode, sum(case when o_orderpriority = '1-URGENT' or o_orderpriority = '2-HIGH' then 1 else 0 end) as high_line_count, sum(case when o_orderpriority <> '1-URGENT' and o_orderpriority <> '2-HIGH' then 1 else 0 end) as low_line_count from orders, lineitem where o_orderkey = l_orderkey and l_shipmode in ('MAIL', 'SHIP') and l_commitdate < l_receiptdate and l_shipdate < l_commitdate and l_receiptdate >= date('1994-01-01') and l_receiptdate < date('1994-01-01') + interval '1' year group by l_shipmode order by l_shipmode", + "plan": { + "QueryType": "SELECT", + "Original": "select l_shipmode, sum(case when o_orderpriority = '1-URGENT' or o_orderpriority = '2-HIGH' then 1 else 0 end) as high_line_count, sum(case when o_orderpriority <> '1-URGENT' and o_orderpriority <> '2-HIGH' then 1 else 0 end) as low_line_count from orders, lineitem where o_orderkey = l_orderkey and l_shipmode in ('MAIL', 'SHIP') and l_commitdate < l_receiptdate and l_shipdate < l_commitdate and l_receiptdate >= date('1994-01-01') and l_receiptdate < date('1994-01-01') + interval '1' year group by l_shipmode order by l_shipmode", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum(1) AS high_line_count, sum(2) AS low_line_count", + "GroupBy": "(0|3)", + "ResultColumns": 3, + "Inputs": [ + { + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "(0|3) ASC", + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0,L:0,L:1,R:1", + "JoinVars": { + "o_orderkey": 2 + }, + "TableName": "orders_lineitem", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select case when o_orderpriority = '1-URGENT' or o_orderpriority = '2-HIGH' then 1 else 0 end, case when o_orderpriority != '1-URGENT' and o_orderpriority != '2-HIGH' then 1 else 0 end, o_orderkey from orders where 1 != 1", + "Query": "select case when o_orderpriority = '1-URGENT' or o_orderpriority = '2-HIGH' then 1 else 0 end, case when o_orderpriority != '1-URGENT' and o_orderpriority != '2-HIGH' then 1 else 0 end, o_orderkey from orders", + "Table": "orders" + }, + { + "OperatorType": "VindexLookup", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "Values": [ + ":o_orderkey" + ], + "Vindex": "lineitem_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select l_orderkey, l_linenumber from lineitem_map where 1 != 1", + "Query": "select l_orderkey, l_linenumber from lineitem_map where l_orderkey in ::__vals", + "Table": "lineitem_map", + "Values": [ + "::l_orderkey" + ], + "Vindex": "md5" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select l_shipmode, weight_string(l_shipmode) from lineitem where 1 != 1", + "Query": "select l_shipmode, weight_string(l_shipmode) from lineitem where l_shipmode in ('MAIL', 'SHIP') and l_commitdate < l_receiptdate and l_shipdate < l_commitdate and l_receiptdate >= date('1994-01-01') and l_receiptdate < date('1994-01-01') + interval '1' year and l_orderkey = :o_orderkey", + "Table": "lineitem" + } + ] + } + ] + } + ] } ] }, @@ -1064,32 +1276,183 @@ { "comment": "TPC-H query 13", "query": "select c_count, count(*) as custdist from ( select c_custkey, count(o_orderkey) from customer left outer join orders on c_custkey = o_custkey and o_comment not like '%special%requests%' group by c_custkey ) as c_orders(c_custkey, c_count) group by c_count order by custdist desc, c_count desc", - "plan": "VT12001: unsupported: using aggregation on top of a *planbuilder.orderedAggregate plan" + "plan": { + "QueryType": "SELECT", + "Original": "select c_count, count(*) as custdist from ( select c_custkey, count(o_orderkey) from customer left outer join orders on c_custkey = o_custkey and o_comment not like '%special%requests%' group by c_custkey ) as c_orders(c_custkey, c_count) group by c_count order by custdist desc, c_count desc", + "Instructions": { + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "1 DESC, 0 DESC", + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "count_star(1) AS custdist", + "GroupBy": "0", + "Inputs": [ + { + "OperatorType": "SimpleProjection", + "Columns": [ + 1, + 3 + ], + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum_count(1) AS count(o_orderkey), any_value(3)", + "GroupBy": "(0|2)", + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 2] as c_custkey", + "[COLUMN 1] * [COLUMN 0] as count(o_orderkey)", + "[COLUMN 3] as weight_string(c_custkey)", + "[COLUMN 4] as 1" + ], + "Inputs": [ + { + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "0 ASC, (2|3) ASC", + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "LeftJoin", + "JoinColumnIndexes": "R:0,L:0,L:1,L:2,L:3", + "JoinVars": { + "c_custkey": 1 + }, + "TableName": "customer_orders", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select count(*), c_custkey, weight_string(c_custkey), 1 from customer where 1 != 1 group by c_custkey, weight_string(c_custkey)", + "OrderBy": "(1|2) ASC", + "Query": "select count(*), c_custkey, weight_string(c_custkey), 1 from customer group by c_custkey, weight_string(c_custkey) order by c_custkey asc", + "Table": "customer" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select count(o_orderkey) from orders where 1 != 1 group by .0", + "Query": "select count(o_orderkey) from orders where o_comment not like '%special%requests%' and o_custkey = :c_custkey group by .0", + "Table": "orders" + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "main.customer", + "main.orders" + ] + } }, { "comment": "TPC-H query 14", "query": "select 100.00 * sum(case when p_type like 'PROMO%' then l_extendedprice * (1 - l_discount) else 0 end) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue from lineitem, part where l_partkey = p_partkey and l_shipdate >= date('1995-09-01') and l_shipdate < date('1995-09-01') + interval '1' month", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": "VT12001: unsupported: in scatter query: complex aggregate expression" + "plan": { + "QueryType": "SELECT", + "Original": "select 100.00 * sum(case when p_type like 'PROMO%' then l_extendedprice * (1 - l_discount) else 0 end) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue from lineitem, part where l_partkey = p_partkey and l_shipdate >= date('1995-09-01') and l_shipdate < date('1995-09-01') + interval '1' month", + "Instructions": { + "OperatorType": "Projection", + "Expressions": [ + "([COLUMN 0] * [COLUMN 1]) / [COLUMN 2] as promo_revenue" + ], + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "any_value(0), sum(1) AS sum(case when p_type like 'PROMO%' then l_extendedprice * (1 - l_discount) else 0 end), sum(2) AS sum(l_extendedprice * (1 - l_discount))", + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,L:3", + "JoinVars": { + "l_discount": 2, + "l_extendedprice": 1, + "l_partkey": 4 + }, + "TableName": "lineitem_part", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select 100.00, l_extendedprice, l_discount, l_extendedprice * (1 - l_discount), l_partkey from lineitem where 1 != 1", + "Query": "select 100.00, l_extendedprice, l_discount, l_extendedprice * (1 - l_discount), l_partkey from lineitem where l_shipdate >= date('1995-09-01') and l_shipdate < date('1995-09-01') + interval '1' month", + "Table": "lineitem" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select case when p_type like 'PROMO%' then :l_extendedprice * (1 - :l_discount) else 0 end from part where 1 != 1", + "Query": "select case when p_type like 'PROMO%' then :l_extendedprice * (1 - :l_discount) else 0 end from part where p_partkey = :l_partkey", + "Table": "part", + "Values": [ + ":l_partkey" + ], + "Vindex": "hash" + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "main.lineitem", + "main.part" + ] + } }, { - "comment": "TPC-H query 15 view\n#\"with revenue0(supplier_no, total_revenue) as (select l_suppkey, sum(l_extendedprice * (1 - l_discount)) from lineitem where l_shipdate >= date('1996-01-01') and l_shipdate < date('1996-01-01') + interval '3' month group by l_suppkey )\"\n#\"syntax error at position 236\"\n#Gen4 plan same as above\n# TPC-H query 15", + "comment": "TPC-H query 15", "query": "select s_suppkey, s_name, s_address, s_phone, total_revenue from supplier, revenue0 where s_suppkey = supplier_no and total_revenue = ( select max(total_revenue) from revenue0 ) order by s_suppkey", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select s_suppkey, s_name, s_address, s_phone, total_revenue from supplier, revenue0 where s_suppkey = supplier_no and total_revenue = ( select max(total_revenue) from revenue0 ) order by s_suppkey", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutValue", "PulloutVars": [ - "__sq_has_values1", "__sq1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Aggregate", "Variant": "Scalar", - "Aggregates": "max(0)", + "Aggregates": "max(0|1) AS max(total_revenue)", "Inputs": [ { "OperatorType": "Route", @@ -1098,121 +1461,140 @@ "Name": "main", "Sharded": true }, - "FieldQuery": "select max(total_revenue) from revenue0 where 1 != 1", - "Query": "select max(total_revenue) from revenue0", + "FieldQuery": "select max(total_revenue), weight_string(total_revenue) from revenue0 where 1 != 1 group by weight_string(total_revenue)", + "Query": "select max(total_revenue), weight_string(total_revenue) from revenue0 group by weight_string(total_revenue)", "Table": "revenue0" } ] }, { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1,L:2,L:3,R:0", - "JoinVars": { - "s_suppkey": 0 + "InputName": "Outer", + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "main", + "Sharded": true }, - "TableName": "supplier_revenue0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select s_suppkey, s_name, s_address, s_phone, weight_string(s_suppkey) from supplier where 1 != 1", - "OrderBy": "(0|4) ASC", - "Query": "select s_suppkey, s_name, s_address, s_phone, weight_string(s_suppkey) from supplier order by s_suppkey asc", - "ResultColumns": 4, - "Table": "supplier" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select total_revenue from revenue0 where 1 != 1", - "Query": "select total_revenue from revenue0 where supplier_no = :s_suppkey and total_revenue = :__sq1", - "Table": "revenue0", - "Values": [ - ":s_suppkey" - ], - "Vindex": "hash" - } - ] + "FieldQuery": "select s_suppkey, s_name, s_address, s_phone, total_revenue, weight_string(s_suppkey) from supplier, revenue0 where 1 != 1", + "OrderBy": "(0|5) ASC", + "Query": "select s_suppkey, s_name, s_address, s_phone, total_revenue, weight_string(s_suppkey) from supplier, revenue0 where s_suppkey = supplier_no and total_revenue = :__sq1 order by s_suppkey asc", + "ResultColumns": 5, + "Table": "revenue0, supplier" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "main.revenue0", + "main.supplier" + ] + } + }, + { + "comment": "TPC-H query 16", + "query": "select p_brand, p_type, p_size, count(distinct ps_suppkey) as supplier_cnt from partsupp, part where p_partkey = ps_partkey and p_brand <> 'Brand#45' and p_type not like 'MEDIUM POLISHED%' and p_size in (49, 14, 23, 45, 19, 3, 36, 9) and ps_suppkey not in ( select s_suppkey from supplier where s_comment like '%Customer%Complaints%' ) group by p_brand, p_type, p_size order by supplier_cnt desc, p_brand, p_type, p_size", + "plan": { "QueryType": "SELECT", - "Original": "select s_suppkey, s_name, s_address, s_phone, total_revenue from supplier, revenue0 where s_suppkey = supplier_no and total_revenue = ( select max(total_revenue) from revenue0 ) order by s_suppkey", + "Original": "select p_brand, p_type, p_size, count(distinct ps_suppkey) as supplier_cnt from partsupp, part where p_partkey = ps_partkey and p_brand <> 'Brand#45' and p_type not like 'MEDIUM POLISHED%' and p_size in (49, 14, 23, 45, 19, 3, 36, 9) and ps_suppkey not in ( select s_suppkey from supplier where s_comment like '%Customer%Complaints%' ) group by p_brand, p_type, p_size order by supplier_cnt desc, p_brand, p_type, p_size", "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutValue", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "3 DESC, (0|4) ASC, (1|5) ASC, (2|6) ASC", + "ResultColumns": 4, "Inputs": [ { "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "max(0) AS max(total_revenue)", + "Variant": "Ordered", + "Aggregates": "count_distinct(3|7) AS supplier_cnt", + "GroupBy": "(0|4), (1|5), (2|6)", "Inputs": [ { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select max(total_revenue) from revenue0 where 1 != 1", - "Query": "select max(total_revenue) from revenue0", - "Table": "revenue0" + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "(0|4) ASC, (1|5) ASC, (2|6) ASC, (3|7) ASC", + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0,R:1,R:2,L:0,R:3,R:4,R:5,L:1", + "JoinVars": { + "ps_partkey": 2, + "ps_suppkey": 0 + }, + "TableName": "partsupp_part", + "Inputs": [ + { + "OperatorType": "UncorrelatedSubquery", + "Variant": "PulloutNotIn", + "PulloutVars": [ + "__sq_has_values", + "__sq1" + ], + "Inputs": [ + { + "InputName": "SubQuery", + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select s_suppkey from supplier where 1 != 1", + "Query": "select s_suppkey from supplier where s_comment like '%Customer%Complaints%'", + "Table": "supplier" + }, + { + "InputName": "Outer", + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select ps_suppkey, weight_string(ps_suppkey), ps_partkey from partsupp where 1 != 1", + "Query": "select ps_suppkey, weight_string(ps_suppkey), ps_partkey from partsupp where not :__sq_has_values and ps_suppkey not in ::__sq1", + "Table": "partsupp" + } + ] + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select p_brand, p_type, p_size, weight_string(p_brand), weight_string(p_type), weight_string(p_size) from part where 1 != 1", + "Query": "select p_brand, p_type, p_size, weight_string(p_brand), weight_string(p_type), weight_string(p_size) from part where p_brand != 'Brand#45' and p_type not like 'MEDIUM POLISHED%' and p_size in (49, 14, 23, 45, 19, 3, 36, 9) and p_partkey = :ps_partkey", + "Table": "part", + "Values": [ + ":ps_partkey" + ], + "Vindex": "hash" + } + ] + } + ] } ] - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select s_suppkey, s_name, s_address, s_phone, total_revenue, weight_string(s_suppkey) from supplier, revenue0 where 1 != 1", - "OrderBy": "(0|5) ASC", - "Query": "select s_suppkey, s_name, s_address, s_phone, total_revenue, weight_string(s_suppkey) from supplier, revenue0 where total_revenue = :__sq1 and s_suppkey = supplier_no order by s_suppkey asc", - "ResultColumns": 5, - "Table": "revenue0, supplier" } ] }, "TablesUsed": [ - "main.revenue0", + "main.part", + "main.partsupp", "main.supplier" ] } }, - { - "comment": "TPC-H query 16", - "query": "select p_brand, p_type, p_size, count(distinct ps_suppkey) as supplier_cnt from partsupp, part where p_partkey = ps_partkey and p_brand <> 'Brand#45' and p_type not like 'MEDIUM POLISHED%' and p_size in (49, 14, 23, 45, 19, 3, 36, 9) and ps_suppkey not in ( select s_suppkey from supplier where s_comment like '%Customer%Complaints%' ) group by p_brand, p_type, p_size order by supplier_cnt desc, p_brand, p_type, p_size", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": "VT12001: unsupported: using aggregation on top of a *planbuilder.pulloutSubquery plan" - }, { "comment": "TPC-H query 17", "query": "select sum(l_extendedprice) / 7.0 as avg_yearly from lineitem, part where p_partkey = l_partkey and p_brand = 'Brand#23' and p_container = 'MED BOX' and l_quantity < ( select 0.2 * avg(l_quantity) from lineitem where l_partkey = p_partkey )", - "v3-plan": "VT03019: column p_partkey not found", - "gen4-plan": "VT12001: unsupported: cross-shard correlated subquery" + "plan": "VT12001: unsupported: correlated subquery is only supported for EXISTS" }, { "comment": "TPC-H query 18", "query": "select c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice, sum(l_quantity) from customer, orders, lineitem where o_orderkey in ( select l_orderkey from lineitem group by l_orderkey having sum(l_quantity) > 300 ) and c_custkey = o_custkey and o_orderkey = l_orderkey group by c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice order by o_totalprice desc, o_orderdate limit 100", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice, sum(l_quantity) from customer, orders, lineitem where o_orderkey in ( select l_orderkey from lineitem group by l_orderkey having sum(l_quantity) > 300 ) and c_custkey = o_custkey and o_orderkey = l_orderkey group by c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice order by o_totalprice desc, o_orderdate limit 100", "Instructions": { @@ -1223,121 +1605,111 @@ "OperatorType": "Aggregate", "Variant": "Ordered", "Aggregates": "sum(5) AS sum(l_quantity)", - "GroupBy": "(4|10), (3|9), (0|8), (1|7), (2|6)", + "GroupBy": "(4|6), (3|7), (0|8), (1|9), (2|10)", "ResultColumns": 6, "Inputs": [ { - "OperatorType": "Projection", - "Expressions": [ - "[COLUMN 2] as c_name", - "[COLUMN 3] as c_custkey", - "[COLUMN 4] as o_orderkey", - "[COLUMN 1] as o_orderdate", - "[COLUMN 0] as o_totalprice", - "([COLUMN 10] * COALESCE([COLUMN 11], INT64(1))) * COALESCE([COLUMN 12], INT64(1)) as sum(l_quantity)", - "[COLUMN 9]", - "[COLUMN 8]", - "[COLUMN 7]", - "[COLUMN 6]", - "[COLUMN 5]" + "OperatorType": "UncorrelatedSubquery", + "Variant": "PulloutIn", + "PulloutVars": [ + "__sq_has_values", + "__sq1" ], "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:2,L:3,L:4,L:5,L:6,L:8,L:9,L:10,L:11,L:12,L:13,L:14,R:1", - "JoinVars": { - "o_orderkey": 0 + "InputName": "SubQuery", + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "main", + "Sharded": true }, - "TableName": "orders_customer_lineitem", + "FieldQuery": "select l_orderkey from lineitem where 1 != 1 group by l_orderkey", + "Query": "select l_orderkey from lineitem group by l_orderkey having sum(l_quantity) > 300", + "Table": "lineitem" + }, + { + "InputName": "Outer", + "OperatorType": "Filter", + "Predicate": ":__sq_has_values and o_orderkey in ::__sq1", "Inputs": [ { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "(2|8) DESC, (3|9) ASC, (4|10) ASC, (5|11) ASC, (0|7) ASC", + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum(5) AS sum(l_quantity)", + "GroupBy": "(4|6), (3|7), (0|8), (1|9), (2|10)", "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:0,L:5,L:7,R:1,R:3,L:0,L:4,L:6,L:8,R:2,R:4,L:4,L:2,R:0", - "JoinVars": { - "o_custkey": 1 - }, - "TableName": "orders_customer", + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "(4|6) DESC, (3|7) ASC, (0|8) ASC, (1|9) ASC, (2|10) ASC", "Inputs": [ { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select o_orderkey, o_custkey, count(*), weight_string(o_custkey), weight_string(o_orderkey), o_totalprice, weight_string(o_totalprice), o_orderdate, weight_string(o_orderdate) from orders where 1 != 1 group by o_custkey, weight_string(o_custkey), o_orderkey, weight_string(o_orderkey), o_totalprice, weight_string(o_totalprice), o_orderdate, weight_string(o_orderdate)", - "Query": "select o_orderkey, o_custkey, count(*), weight_string(o_custkey), weight_string(o_orderkey), o_totalprice, weight_string(o_totalprice), o_orderdate, weight_string(o_orderdate) from orders where :o_orderkey in (select l_orderkey from lineitem group by l_orderkey having sum(l_quantity) > 300) group by o_custkey, weight_string(o_custkey), o_orderkey, weight_string(o_orderkey), o_totalprice, weight_string(o_totalprice), o_orderdate, weight_string(o_orderdate)", - "Table": "orders", - "Values": [ - "::__sq1" - ], - "Vindex": "hash" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0,R:1,R:2,R:3,R:4,L:0,R:5,R:6,R:7,R:8,R:9", + "JoinVars": { + "l_orderkey": 1 }, - "FieldQuery": "select count(*), c_name, weight_string(c_name), c_custkey, weight_string(c_custkey) from customer where 1 != 1 group by c_name, weight_string(c_name), c_custkey, weight_string(c_custkey)", - "Query": "select count(*), c_name, weight_string(c_name), c_custkey, weight_string(c_custkey) from customer where c_custkey = :o_custkey group by c_name, weight_string(c_name), c_custkey, weight_string(c_custkey)", - "Table": "customer", - "Values": [ - ":o_custkey" - ], - "Vindex": "hash" + "TableName": "lineitem_orders_customer", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select l_quantity, l_orderkey from lineitem where 1 != 1", + "Query": "select l_quantity, l_orderkey from lineitem", + "Table": "lineitem" + }, + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0,R:1,L:0,L:1,L:2,L:3,L:4,R:2,R:3,L:5", + "JoinVars": { + "o_custkey": 6 + }, + "TableName": "orders_customer", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select o_orderkey, o_orderdate, o_totalprice, weight_string(o_totalprice), weight_string(o_orderdate), weight_string(o_orderkey), o_custkey from orders where 1 != 1", + "Query": "select o_orderkey, o_orderdate, o_totalprice, weight_string(o_totalprice), weight_string(o_orderdate), weight_string(o_orderkey), o_custkey from orders where o_orderkey = :l_orderkey", + "Table": "orders", + "Values": [ + ":l_orderkey" + ], + "Vindex": "hash" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select c_name, c_custkey, weight_string(c_name), weight_string(c_custkey) from customer where 1 != 1", + "Query": "select c_name, c_custkey, weight_string(c_name), weight_string(c_custkey) from customer where c_custkey = :o_custkey", + "Table": "customer", + "Values": [ + ":o_custkey" + ], + "Vindex": "hash" + } + ] + } + ] } ] } ] - }, - { - "OperatorType": "VindexLookup", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "Values": [ - ":o_orderkey" - ], - "Vindex": "lineitem_map", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select l_orderkey, l_linenumber from lineitem_map where 1 != 1", - "Query": "select l_orderkey, l_linenumber from lineitem_map where l_orderkey in ::__vals", - "Table": "lineitem_map", - "Values": [ - "::l_orderkey" - ], - "Vindex": "md5" - }, - { - "OperatorType": "Route", - "Variant": "ByDestination", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select 1, sum(l_quantity) from lineitem where 1 != 1 group by 1", - "Query": "select 1, sum(l_quantity) from lineitem where l_orderkey = :o_orderkey group by 1", - "Table": "lineitem" - } - ] } ] } @@ -1357,8 +1729,7 @@ { "comment": "TPC-H query 19", "query": "select sum(l_extendedprice* (1 - l_discount)) as revenue from lineitem, part where ( p_partkey = l_partkey and p_brand = 'Brand#12' and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG') and l_quantity >= 1 and l_quantity <= 1 + 10 and p_size between 1 and 5 and l_shipmode in ('AIR', 'AIR REG') and l_shipinstruct = 'DELIVER IN PERSON' ) or ( p_partkey = l_partkey and p_brand = 'Brand#23' and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK') and l_quantity >= 10 and l_quantity <= 10 + 10 and p_size between 1 and 10 and l_shipmode in ('AIR', 'AIR REG') and l_shipinstruct = 'DELIVER IN PERSON' ) or ( p_partkey = l_partkey and p_brand = 'Brand#34' and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG') and l_quantity >= 20 and l_quantity <= 20 + 10 and p_size between 1 and 15 and l_shipmode in ('AIR', 'AIR REG') and l_shipinstruct = 'DELIVER IN PERSON' )", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select sum(l_extendedprice* (1 - l_discount)) as revenue from lineitem, part where ( p_partkey = l_partkey and p_brand = 'Brand#12' and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG') and l_quantity >= 1 and l_quantity <= 1 + 10 and p_size between 1 and 5 and l_shipmode in ('AIR', 'AIR REG') and l_shipinstruct = 'DELIVER IN PERSON' ) or ( p_partkey = l_partkey and p_brand = 'Brand#23' and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK') and l_quantity >= 10 and l_quantity <= 10 + 10 and p_size between 1 and 10 and l_shipmode in ('AIR', 'AIR REG') and l_shipinstruct = 'DELIVER IN PERSON' ) or ( p_partkey = l_partkey and p_brand = 'Brand#34' and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG') and l_quantity >= 20 and l_quantity <= 20 + 10 and p_size between 1 and 15 and l_shipmode in ('AIR', 'AIR REG') and l_shipinstruct = 'DELIVER IN PERSON' )", "Instructions": { @@ -1421,14 +1792,12 @@ { "comment": "TPC-H query 20", "query": "select s_name, s_address from supplier, nation where s_suppkey in ( select ps_suppkey from partsupp where ps_partkey in ( select p_partkey from part where p_name like 'forest%' ) and ps_availqty > ( select 0.5 * sum(l_quantity) from lineitem where l_partkey = ps_partkey and l_suppkey = ps_suppkey and l_shipdate >= date('1994-01-01') and l_shipdate < date('1994-01-01') + interval '1' year ) ) and s_nationkey = n_nationkey and n_name = 'CANADA' order by s_name", - "v3-plan": "VT03019: column ps_partkey not found", - "gen4-plan": "VT12001: unsupported: cross-shard correlated subquery" + "plan": "VT12001: unsupported: correlated subquery is only supported for EXISTS" }, { "comment": "TPC-H query 21", "query": "select s_name, count(*) as numwait from supplier, lineitem l1, orders, nation where s_suppkey = l1.l_suppkey and o_orderkey = l1.l_orderkey and o_orderstatus = 'F' and l1.l_receiptdate > l1.l_commitdate and exists ( select * from lineitem l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey <> l1.l_suppkey ) and not exists ( select * from lineitem l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey <> l1.l_suppkey and l3.l_receiptdate > l3.l_commitdate ) and s_nationkey = n_nationkey and n_name = 'SAUDI ARABIA' group by s_name order by numwait desc, s_name limit 100", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select s_name, count(*) as numwait from supplier, lineitem l1, orders, nation where s_suppkey = l1.l_suppkey and o_orderkey = l1.l_orderkey and o_orderstatus = 'F' and l1.l_receiptdate > l1.l_commitdate and exists ( select * from lineitem l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey <> l1.l_suppkey ) and not exists ( select * from lineitem l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey <> l1.l_suppkey and l3.l_receiptdate > l3.l_commitdate ) and s_nationkey = n_nationkey and n_name = 'SAUDI ARABIA' group by s_name order by numwait desc, s_name limit 100", "Instructions": { @@ -1450,100 +1819,120 @@ { "OperatorType": "Projection", "Expressions": [ - "[COLUMN 0] as s_name", - "(([COLUMN 2] * COALESCE([COLUMN 3], INT64(1))) * COALESCE([COLUMN 4], INT64(1))) * COALESCE([COLUMN 5], INT64(1)) as numwait", - "[COLUMN 1]" + "[COLUMN 2] as s_name", + "[COLUMN 0] * [COLUMN 1] as numwait", + "[COLUMN 3] as weight_string(s_name)" ], "Inputs": [ { "OperatorType": "Sort", "Variant": "Memory", - "OrderBy": "(0|1) ASC", + "OrderBy": "(2|3) ASC", "Inputs": [ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "R:0,R:1,L:3,L:4,R:2,R:3", + "JoinColumnIndexes": "L:0,R:0,R:1,R:2", "JoinVars": { - "l1_l_suppkey": 0 + "l1_l_suppkey": 1 }, "TableName": "lineitem_orders_supplier_nation", "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:0,L:4,L:2,R:1", - "JoinVars": { - "l1_l_orderkey": 1 - }, - "TableName": "lineitem_orders", + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * [COLUMN 1] as count(*)", + "[COLUMN 2] as l_suppkey" + ], "Inputs": [ { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select l1.l_suppkey, l1.l_orderkey, count(*) as numwait, weight_string(l1.l_orderkey), weight_string(l1.l_suppkey) from lineitem as l1 where 1 != 1 group by l1.l_orderkey, weight_string(l1.l_orderkey), l1.l_suppkey, weight_string(l1.l_suppkey)", - "Query": "select l1.l_suppkey, l1.l_orderkey, count(*) as numwait, weight_string(l1.l_orderkey), weight_string(l1.l_suppkey) from lineitem as l1 where l1.l_receiptdate > l1.l_commitdate and exists (select 1 from lineitem as l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey != l1.l_suppkey limit 1) and not exists (select 1 from lineitem as l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey != l1.l_suppkey and l3.l_receiptdate > l3.l_commitdate limit 1) group by l1.l_orderkey, weight_string(l1.l_orderkey), l1.l_suppkey, weight_string(l1.l_suppkey)", - "Table": "lineitem" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,L:1", + "JoinVars": { + "l1_l_orderkey": 2, + "l1_l_suppkey": 1 }, - "FieldQuery": "select 1, count(*) as numwait from orders where 1 != 1 group by 1", - "Query": "select 1, count(*) as numwait from orders where o_orderstatus = 'F' and o_orderkey = :l1_l_orderkey and exists (select 1 from lineitem as l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey != l1.l_suppkey limit 1) and not exists (select 1 from lineitem as l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey != l1.l_suppkey and l3.l_receiptdate > l3.l_commitdate limit 1) group by 1", - "Table": "orders", - "Values": [ - ":l1_l_orderkey" - ], - "Vindex": "hash" + "TableName": "lineitem_orders", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select count(*), l1.l_suppkey, l1.l_orderkey from lineitem as l1 where 1 != 1 group by l1.l_suppkey, l1.l_orderkey", + "Query": "select count(*), l1.l_suppkey, l1.l_orderkey from lineitem as l1 where l1.l_receiptdate > l1.l_commitdate and exists (select 1 from lineitem as l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey != l1.l_suppkey) and not exists (select 1 from lineitem as l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey != l1.l_suppkey and l3.l_receiptdate > l3.l_commitdate) group by l1.l_suppkey, l1.l_orderkey", + "Table": "lineitem" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select count(*) from orders where 1 != 1 group by .0", + "Query": "select count(*) from orders where o_orderstatus = 'F' and o_orderkey = :l1_l_orderkey group by .0", + "Table": "orders", + "Values": [ + ":l1_l_orderkey" + ], + "Vindex": "hash" + } + ] } ] }, { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:3,L:4,L:1,R:1", - "JoinVars": { - "s_nationkey": 0 - }, - "TableName": "supplier_nation", + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * [COLUMN 1] as count(*)", + "[COLUMN 2] as s_name", + "[COLUMN 3] as weight_string(s_name)" + ], "Inputs": [ { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select s_nationkey, count(*) as numwait, weight_string(s_nationkey), s_name, weight_string(s_name) from supplier where 1 != 1 group by s_nationkey, weight_string(s_nationkey), s_name, weight_string(s_name)", - "Query": "select s_nationkey, count(*) as numwait, weight_string(s_nationkey), s_name, weight_string(s_name) from supplier where s_suppkey = :l1_l_suppkey and exists (select 1 from lineitem as l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey != l1.l_suppkey limit 1) and not exists (select 1 from lineitem as l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey != l1.l_suppkey and l3.l_receiptdate > l3.l_commitdate limit 1) group by s_nationkey, weight_string(s_nationkey), s_name, weight_string(s_name)", - "Table": "supplier", - "Values": [ - ":l1_l_suppkey" - ], - "Vindex": "hash" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,L:1,L:3", + "JoinVars": { + "s_nationkey": 2 }, - "FieldQuery": "select 1, count(*) as numwait from nation where 1 != 1 group by 1", - "Query": "select 1, count(*) as numwait from nation where n_name = 'SAUDI ARABIA' and n_nationkey = :s_nationkey and exists (select 1 from lineitem as l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey != l1.l_suppkey limit 1) and not exists (select 1 from lineitem as l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey != l1.l_suppkey and l3.l_receiptdate > l3.l_commitdate limit 1) group by 1", - "Table": "nation", - "Values": [ - ":s_nationkey" - ], - "Vindex": "hash" + "TableName": "supplier_nation", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select count(*), s_name, s_nationkey, weight_string(s_name) from supplier where 1 != 1 group by s_name, s_nationkey, weight_string(s_name)", + "Query": "select count(*), s_name, s_nationkey, weight_string(s_name) from supplier where s_suppkey = :l1_l_suppkey group by s_name, s_nationkey, weight_string(s_name)", + "Table": "supplier", + "Values": [ + ":l1_l_suppkey" + ], + "Vindex": "hash" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select count(*) from nation where 1 != 1 group by .0", + "Query": "select count(*) from nation where n_name = 'SAUDI ARABIA' and n_nationkey = :s_nationkey group by .0", + "Table": "nation", + "Values": [ + ":s_nationkey" + ], + "Vindex": "hash" + } + ] } ] } @@ -1570,7 +1959,6 @@ { "comment": "TPC-H query 22", "query": "select cntrycode, count(*) as numcust, sum(c_acctbal) as totacctbal from ( select substring(c_phone from 1 for 2) as cntrycode, c_acctbal from customer where substring(c_phone from 1 for 2) in ('13', '31', '23', '29', '30', '18', '17') and c_acctbal > ( select avg(c_acctbal) from customer where c_acctbal > 0.00 and substring(c_phone from 1 for 2) in ('13', '31', '23', '29', '30', '18', '17') ) and not exists ( select * from orders where o_custkey = c_custkey ) ) as custsale group by cntrycode order by cntrycode", - "v3-plan": "VT03019: column c_custkey not found", - "gen4-plan": "VT12001: unsupported: EXISTS sub-queries are only supported with AND clause" + "plan": "VT12001: unsupported: correlated subquery is only supported for EXISTS" } -] +] \ No newline at end of file diff --git a/go/vt/vtgate/planbuilder/testdata/union_cases.json b/go/vt/vtgate/planbuilder/testdata/union_cases.json index 753542af337..19b58865546 100644 --- a/go/vt/vtgate/planbuilder/testdata/union_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/union_cases.json @@ -2,7 +2,7 @@ { "comment": "union all between two scatter selects", "query": "select id from user union all select id from music", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user union all select id from music", "Instructions": { @@ -14,22 +14,7 @@ }, "FieldQuery": "select id from `user` where 1 != 1 union all select id from music where 1 != 1", "Query": "select id from `user` union all select id from music", - "Table": "`user`" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select id from user union all select id from music", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1 union all select id from music where 1 != 1", - "Query": "select id from `user` union all select id from music", - "Table": "`user`" + "Table": "`user`, music" }, "TablesUsed": [ "user.music", @@ -40,43 +25,7 @@ { "comment": "union distinct between two scatter selects", "query": "select id from user union select id from music", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user union select id from music", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from music where 1 != 1", - "Query": "select id from music", - "Table": "music" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user union select id from music", "Instructions": { @@ -93,9 +42,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1 union select id, weight_string(id) from music where 1 != 1", - "Query": "select id, weight_string(id) from `user` union select id, weight_string(id) from music", - "Table": "`user`" + "FieldQuery": "select id, weight_string(id) from (select id from `user` where 1 != 1 union select id from music where 1 != 1) as dt where 1 != 1", + "Query": "select id, weight_string(id) from (select id from `user` union select id from music) as dt", + "Table": "`user`, music" } ] }, @@ -108,46 +57,7 @@ { "comment": "union all between two SelectEqualUnique", "query": "select id from user where id = 1 union all select id from user where id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where id = 1 union all select id from user where id = 5", - "Instructions": { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where id = 1", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where id = 5", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where id = 1 union all select id from user where id = 5", "Instructions": { @@ -193,94 +103,53 @@ { "comment": "almost dereks query - two queries with order by and limit being scattered to two different sets of tablets", "query": "(SELECT id FROM user ORDER BY id DESC LIMIT 1) UNION ALL (SELECT id FROM music ORDER BY id DESC LIMIT 1)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "(SELECT id FROM user ORDER BY id DESC LIMIT 1) UNION ALL (SELECT id FROM music ORDER BY id DESC LIMIT 1)", - "Instructions": { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Limit", - "Count": "INT64(1)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", - "OrderBy": "(0|1) DESC", - "Query": "select id, weight_string(id) from `user` order by id desc limit :__upper_limit", - "ResultColumns": 1, - "Table": "`user`" - } - ] - }, - { - "OperatorType": "Limit", - "Count": "INT64(1)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, weight_string(id) from music where 1 != 1", - "OrderBy": "(0|1) DESC", - "Query": "select id, weight_string(id) from music order by id desc limit :__upper_limit", - "ResultColumns": 1, - "Table": "music" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "(SELECT id FROM user ORDER BY id DESC LIMIT 1) UNION ALL (SELECT id FROM music ORDER BY id DESC LIMIT 1)", "Instructions": { - "OperatorType": "Concatenate", + "OperatorType": "SimpleProjection", + "Columns": [ + 0 + ], "Inputs": [ { - "OperatorType": "Limit", - "Count": "INT64(1)", + "OperatorType": "Concatenate", "Inputs": [ { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", - "OrderBy": "(0|1) DESC", - "Query": "select id, weight_string(id) from `user` order by id desc limit :__upper_limit", - "ResultColumns": 1, - "Table": "`user`" - } - ] - }, - { - "OperatorType": "Limit", - "Count": "INT64(1)", - "Inputs": [ + "OperatorType": "Limit", + "Count": "INT64(1)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", + "OrderBy": "(0|1) DESC", + "Query": "select id, weight_string(id) from `user` order by id desc limit :__upper_limit", + "Table": "`user`" + } + ] + }, { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, weight_string(id) from music where 1 != 1", - "OrderBy": "(0|1) DESC", - "Query": "select id, weight_string(id) from music order by id desc limit :__upper_limit", - "ResultColumns": 1, - "Table": "music" + "OperatorType": "Limit", + "Count": "INT64(1)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id, weight_string(id) from music where 1 != 1", + "OrderBy": "(0|1) DESC", + "Query": "select id, weight_string(id) from music order by id desc limit :__upper_limit", + "Table": "music" + } + ] } ] } @@ -295,22 +164,7 @@ { "comment": "Union all", "query": "select col1, col2 from user union all select col1, col2 from user_extra", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col1, col2 from user union all select col1, col2 from user_extra", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col1, col2 from `user` where 1 != 1 union all select col1, col2 from user_extra where 1 != 1", - "Query": "select col1, col2 from `user` union all select col1, col2 from user_extra", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col1, col2 from user union all select col1, col2 from user_extra", "Instructions": { @@ -322,7 +176,7 @@ }, "FieldQuery": "select col1, col2 from `user` where 1 != 1 union all select col1, col2 from user_extra where 1 != 1", "Query": "select col1, col2 from `user` union all select col1, col2 from user_extra", - "Table": "`user`" + "Table": "`user`, user_extra" }, "TablesUsed": [ "user.user", @@ -333,22 +187,7 @@ { "comment": "union operations in subqueries (FROM)", "query": "select * from (select * from user union all select * from user_extra) as t", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from (select * from user union all select * from user_extra) as t", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from (select * from `user` where 1 != 1 union all select * from user_extra where 1 != 1) as t where 1 != 1", - "Query": "select * from (select * from `user` union all select * from user_extra) as t", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from (select * from user union all select * from user_extra) as t", "Instructions": { @@ -360,7 +199,7 @@ }, "FieldQuery": "select * from (select * from `user` where 1 != 1 union all select * from user_extra where 1 != 1) as t where 1 != 1", "Query": "select * from (select * from `user` union all select * from user_extra) as t", - "Table": "`user`" + "Table": "`user`, user_extra" }, "TablesUsed": [ "user.user", @@ -371,22 +210,7 @@ { "comment": "union operations in derived table, without star expression (FROM)¡", "query": "select col1,col2 from (select col1, col2 from user union all select col1, col2 from user_extra) as t", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col1,col2 from (select col1, col2 from user union all select col1, col2 from user_extra) as t", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col1, col2 from (select col1, col2 from `user` where 1 != 1 union all select col1, col2 from user_extra where 1 != 1) as t where 1 != 1", - "Query": "select col1, col2 from (select col1, col2 from `user` union all select col1, col2 from user_extra) as t", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col1,col2 from (select col1, col2 from user union all select col1, col2 from user_extra) as t", "Instructions": { @@ -398,7 +222,7 @@ }, "FieldQuery": "select col1, col2 from (select col1, col2 from `user` where 1 != 1 union all select col1, col2 from user_extra where 1 != 1) as t where 1 != 1", "Query": "select col1, col2 from (select col1, col2 from `user` union all select col1, col2 from user_extra) as t", - "Table": "`user`" + "Table": "`user`, user_extra" }, "TablesUsed": [ "user.user", @@ -409,94 +233,53 @@ { "comment": "union all between two scatter selects, with order by", "query": "(select id from user order by id limit 5) union all (select id from music order by id desc limit 5)", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "(select id from user order by id limit 5) union all (select id from music order by id desc limit 5)", "Instructions": { - "OperatorType": "Concatenate", + "OperatorType": "SimpleProjection", + "Columns": [ + 0 + ], "Inputs": [ { - "OperatorType": "Limit", - "Count": "INT64(5)", + "OperatorType": "Concatenate", "Inputs": [ { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", - "OrderBy": "(0|1) ASC", - "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit", - "ResultColumns": 1, - "Table": "`user`" - } - ] - }, - { - "OperatorType": "Limit", - "Count": "INT64(5)", - "Inputs": [ + "OperatorType": "Limit", + "Count": "INT64(5)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", + "OrderBy": "(0|1) ASC", + "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit", + "Table": "`user`" + } + ] + }, { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, weight_string(id) from music where 1 != 1", - "OrderBy": "(0|1) DESC", - "Query": "select id, weight_string(id) from music order by id desc limit :__upper_limit", - "ResultColumns": 1, - "Table": "music" - } - ] - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "(select id from user order by id limit 5) union all (select id from music order by id desc limit 5)", - "Instructions": { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Limit", - "Count": "INT64(5)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", - "OrderBy": "(0|1) ASC", - "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit", - "ResultColumns": 1, - "Table": "`user`" - } - ] - }, - { - "OperatorType": "Limit", - "Count": "INT64(5)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, weight_string(id) from music where 1 != 1", - "OrderBy": "(0|1) DESC", - "Query": "select id, weight_string(id) from music order by id desc limit :__upper_limit", - "ResultColumns": 1, - "Table": "music" + "OperatorType": "Limit", + "Count": "INT64(5)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id, weight_string(id) from music where 1 != 1", + "OrderBy": "(0|1) DESC", + "Query": "select id, weight_string(id) from music order by id desc limit :__upper_limit", + "Table": "music" + } + ] } ] } @@ -511,74 +294,19 @@ { "comment": "union all on scatter and single route", "query": "select id from user where id = 1 union select id from user where id = 1 union all select id from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where id = 1 union select id from user where id = 1 union all select id from user", - "Instructions": { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1 union select id from `user` where 1 != 1", - "Query": "select id from `user` where id = 1 union select id from `user` where id = 1", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user`", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where id = 1 union select id from user where id = 1 union all select id from user", "Instructions": { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1 union select id from `user` where 1 != 1", - "Query": "select id from `user` where id = 1 union select id from `user` where id = 1", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user`", - "Table": "`user`" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id from `user` where 1 != 1 union select id from `user` where 1 != 1 union all select id from `user` where 1 != 1", + "Query": "select id from `user` where id = 1 union select id from `user` where id = 1 union all select id from `user`", + "Table": "`user`" }, "TablesUsed": [ "user.user" @@ -588,43 +316,7 @@ { "comment": "union of information_schema with normal table", "query": "select CHARACTER_SET_NAME from information_schema.CHARACTER_SETS union select user_name from unsharded", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select CHARACTER_SET_NAME from information_schema.CHARACTER_SETS union select user_name from unsharded", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select CHARACTER_SET_NAME from information_schema.CHARACTER_SETS where 1 != 1", - "Query": "select CHARACTER_SET_NAME from information_schema.CHARACTER_SETS", - "Table": "information_schema.CHARACTER_SETS" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select user_name from unsharded where 1 != 1", - "Query": "select user_name from unsharded", - "Table": "unsharded" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select CHARACTER_SET_NAME from information_schema.CHARACTER_SETS union select user_name from unsharded", "Instructions": { @@ -670,143 +362,12 @@ { "comment": "union of information_schema with normal table", "query": "select * from unsharded union select * from information_schema.CHARACTER_SETS", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from unsharded union select * from information_schema.CHARACTER_SETS", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from unsharded where 1 != 1", - "Query": "select * from unsharded", - "Table": "unsharded" - }, - { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from information_schema.CHARACTER_SETS where 1 != 1", - "Query": "select * from information_schema.CHARACTER_SETS", - "Table": "information_schema.CHARACTER_SETS" - } - ] - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select * from unsharded union select * from information_schema.CHARACTER_SETS", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from unsharded where 1 != 1", - "Query": "select distinct * from unsharded", - "Table": "unsharded" - }, - { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select CHARACTER_SET_NAME, DEFAULT_COLLATE_NAME, DESCRIPTION, MAXLEN from information_schema.CHARACTER_SETS where 1 != 1", - "Query": "select distinct CHARACTER_SET_NAME, DEFAULT_COLLATE_NAME, DESCRIPTION, MAXLEN from information_schema.CHARACTER_SETS", - "Table": "information_schema.CHARACTER_SETS" - } - ] - } - ] - }, - "TablesUsed": [ - "main.unsharded" - ] - } + "plan": "VT09015: schema tracking required" }, { "comment": "multi-shard union", "query": "(select id from user union select id from music) union select 1 from dual", - "v3-plan": { - "QueryType": "SELECT", - "Original": "(select id from user union select id from music) union select 1 from dual", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from music where 1 != 1", - "Query": "select id from music", - "Table": "music" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from dual where 1 != 1", - "Query": "select 1 from dual", - "Table": "dual" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "(select id from user union select id from music) union select 1 from dual", "Instructions": { @@ -817,31 +378,15 @@ "ResultColumns": 1, "Inputs": [ { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1 union select id, weight_string(id) from music where 1 != 1", - "Query": "select id, weight_string(id) from `user` union select id, weight_string(id) from music", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1, weight_string(1) from dual where 1 != 1", - "Query": "select distinct 1, weight_string(1) from dual", - "Table": "dual" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id, weight_string(id) from (select id from `user` where 1 != 1 union select id from music where 1 != 1 union select 1 from dual where 1 != 1) as dt where 1 != 1", + "Query": "select id, weight_string(id) from (select id from `user` union select id from music union select 1 from dual) as dt", + "Table": "`user`, dual, music" } ] }, @@ -852,269 +397,42 @@ ] } }, - { - "comment": "multi-shard union", - "query": "select 1 from music union (select id from user union all select name from unsharded)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select 1 from music union (select id from user union all select name from unsharded)", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from music where 1 != 1", - "Query": "select 1 from music", - "Table": "music" - }, - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select `name` from unsharded where 1 != 1", - "Query": "select `name` from unsharded", - "Table": "unsharded" - } - ] - } - ] - } - ] - } - }, - "gen4-plan": "VT12001: unsupported: nesting of UNIONs on the right-hand side" - }, - { - "comment": "multi-shard union", - "query": "select 1 from music union (select id from user union select name from unsharded)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select 1 from music union (select id from user union select name from unsharded)", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from music where 1 != 1", - "Query": "select 1 from music", - "Table": "music" - }, - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select `name` from unsharded where 1 != 1", - "Query": "select `name` from unsharded", - "Table": "unsharded" - } - ] - } - ] - } - ] - } - ] - } - }, - "gen4-plan": "VT12001: unsupported: nesting of UNIONs on the right-hand side" - }, { "comment": "union with the same target shard because of vindex", - "query": "select * from music where id = 1 union select * from user where id = 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from music where id = 1 union select * from user where id = 1", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from music where 1 != 1", - "Query": "select * from music where id = 1", - "Table": "music", - "Values": [ - "INT64(1)" - ], - "Vindex": "music_user_map" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 1", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - ] - } - ] - } - }, - "gen4-plan": { + "query": "select * from music where user_id = 1 union select * from user where id = 1", + "plan": { "QueryType": "SELECT", - "Original": "select * from music where id = 1 union select * from user where id = 1", + "Original": "select * from music where user_id = 1 union select * from user where id = 1", "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from music where 1 != 1", - "Query": "select distinct * from music where id = 1", - "Table": "music", - "Values": [ - "INT64(1)" - ], - "Vindex": "music_user_map" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select distinct * from `user` where id = 1", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - "TablesUsed": [ - "user.music", - "user.user" - ] - } - }, - { - "comment": "union with different target shards", - "query": "select 1 from music where id = 1 union select 1 from music where id = 2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select 1 from music where id = 1 union select 1 from music where id = 2", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from music where 1 != 1", - "Query": "select 1 from music where id = 1", - "Table": "music", - "Values": [ - "INT64(1)" - ], - "Vindex": "music_user_map" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from music where 1 != 1", - "Query": "select 1 from music where id = 2", - "Table": "music", - "Values": [ - "INT64(2)" - ], - "Vindex": "music_user_map" - } - ] - } - ] - } - }, - "gen4-plan": { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select * from music where 1 != 1 union select * from `user` where 1 != 1", + "Query": "select * from music where user_id = 1 union select * from `user` where id = 1", + "Table": "`user`, music", + "Values": [ + "INT64(1)" + ], + "Vindex": "user_index" + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } + }, + { + "comment": "union with the different target shard because of vindex (music -> lookup vindex, user -> hash vindex)", + "query": "select * from music where id = 1 union select * from user where id = 1", + "plan": "VT09015: schema tracking required" + }, + { + "comment": "union with different target shards", + "query": "select 1 from music where id = 1 union select 1 from music where id = 2", + "plan": { "QueryType": "SELECT", "Original": "select 1 from music where id = 1 union select 1 from music where id = 2", "Instructions": { @@ -1168,47 +486,7 @@ { "comment": "multiple select statement have inner order by with union - TODO (systay) no need to send down ORDER BY if we are going to loose it with UNION DISTINCT", "query": "(select id from user order by 1 desc) union (select id from user order by 1 asc)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "(select id from user order by 1 desc) union (select id from user order by 1 asc)", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", - "OrderBy": "(0|1) DESC", - "Query": "select id, weight_string(id) from `user` order by 1 desc", - "ResultColumns": 1, - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", - "OrderBy": "(0|1) ASC", - "Query": "select id, weight_string(id) from `user` order by 1 asc", - "ResultColumns": 1, - "Table": "`user`" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "(select id from user order by 1 desc) union (select id from user order by 1 asc)", "Instructions": { @@ -1225,10 +503,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "(select id, weight_string(id) from `user` where 1 != 1) union (select id, weight_string(id) from `user` where 1 != 1)", - "OrderBy": "(0|1) DESC", - "Query": "(select id, weight_string(id) from `user` order by id desc) union (select id, weight_string(id) from `user` order by id asc)", - "ResultColumns": 1, + "FieldQuery": "select id, weight_string(id) from ((select id from `user` where 1 != 1) union (select id from `user` where 1 != 1)) as dt where 1 != 1", + "Query": "select id, weight_string(id) from ((select id from `user` order by id desc) union (select id from `user` order by id asc)) as dt", "Table": "`user`" } ] @@ -1241,77 +517,26 @@ { "comment": "multiple unions", "query": "select 1 union select null union select 1.0 union select '1' union select 2 union select 2.0 from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select 1 union select null union select 1.0 union select '1' union select 2 union select 2.0 from user", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from dual where 1 != 1 union select null from dual where 1 != 1 union select 1.0 from dual where 1 != 1 union select '1' from dual where 1 != 1 union select 2 from dual where 1 != 1", - "Query": "select 1 from dual union select null from dual union select 1.0 from dual union select '1' from dual union select 2 from dual", - "Table": "dual" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 2.0 from `user` where 1 != 1", - "Query": "select 2.0 from `user`", - "Table": "`user`" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 1 union select null union select 1.0 union select '1' union select 2 union select 2.0 from user", "Instructions": { "OperatorType": "Distinct", "Collations": [ - "0" + "(0:1)" ], + "ResultColumns": 1, "Inputs": [ { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from dual where 1 != 1 union all select null from dual where 1 != 1 union all select 1.0 from dual where 1 != 1 union all select '1' from dual where 1 != 1 union select 2 from dual where 1 != 1", - "Query": "select 1 from dual union all select null from dual union all select 1.0 from dual union all select '1' from dual union select 2 from dual", - "Table": "dual" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 2.0 from `user` where 1 != 1", - "Query": "select distinct 2.0 from `user`", - "Table": "`user`" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `1`, weight_string(`1`) from (select 1 from dual where 1 != 1 union select null from dual where 1 != 1 union select 1.0 from dual where 1 != 1 union select '1' from dual where 1 != 1 union select 2 from dual where 1 != 1 union select 2.0 from `user` where 1 != 1) as dt where 1 != 1", + "Query": "select `1`, weight_string(`1`) from (select 1 from dual union select null from dual union select 1.0 from dual union select '1' from dual union select 2 from dual union select 2.0 from `user`) as dt", + "Table": "`user`, dual" } ] }, @@ -1324,62 +549,7 @@ { "comment": "union distinct between a scatter query and a join (other side)", "query": "(select user.id, user.name from user join user_extra where user_extra.extra = 'asdf') union select 'b','c' from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "(select user.id, user.name from user join user_extra where user_extra.extra = 'asdf') union select 'b','c' from user", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, `user`.`name` from `user` where 1 != 1", - "Query": "select `user`.id, `user`.`name` from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra where user_extra.extra = 'asdf'", - "Table": "user_extra" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 'b', 'c' from `user` where 1 != 1", - "Query": "select 'b', 'c' from `user`", - "Table": "`user`" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "(select user.id, user.name from user join user_extra where user_extra.extra = 'asdf') union select 'b','c' from user", "Instructions": { @@ -1407,7 +577,7 @@ "Sharded": true }, "FieldQuery": "select `user`.id, `user`.`name`, weight_string(`user`.id), weight_string(`user`.`name`) from `user` where 1 != 1", - "Query": "select `user`.id, `user`.`name`, weight_string(`user`.id), weight_string(`user`.`name`) from `user`", + "Query": "select distinct `user`.id, `user`.`name`, weight_string(`user`.id), weight_string(`user`.`name`) from `user`", "Table": "`user`" }, { @@ -1418,7 +588,7 @@ "Sharded": true }, "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra where user_extra.extra = 'asdf'", + "Query": "select distinct 1 from user_extra where user_extra.extra = 'asdf'", "Table": "user_extra" } ] @@ -1447,62 +617,7 @@ { "comment": "union distinct between a scatter query and a join (other side)", "query": "select 'b','c' from user union (select user.id, user.name from user join user_extra where user_extra.extra = 'asdf')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select 'b','c' from user union (select user.id, user.name from user join user_extra where user_extra.extra = 'asdf')", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 'b', 'c' from `user` where 1 != 1", - "Query": "select 'b', 'c' from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, `user`.`name` from `user` where 1 != 1", - "Query": "select `user`.id, `user`.`name` from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra where user_extra.extra = 'asdf'", - "Table": "user_extra" - } - ] - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 'b','c' from user union (select user.id, user.name from user join user_extra where user_extra.extra = 'asdf')", "Instructions": { @@ -1540,7 +655,7 @@ "Sharded": true }, "FieldQuery": "select `user`.id, `user`.`name` from `user` where 1 != 1", - "Query": "select `user`.id, `user`.`name` from `user`", + "Query": "select distinct `user`.id, `user`.`name` from `user`", "Table": "`user`" }, { @@ -1551,75 +666,25 @@ "Sharded": true }, "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra where user_extra.extra = 'asdf'", - "Table": "user_extra" - } - ] - } - ] - } - ] - }, - "TablesUsed": [ - "user.user", - "user.user_extra" - ] - } - }, - { - "comment": "unmergable because we are using aggregation", - "query": "select count(*) as s from user union select count(*) as s from music", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select count(*) as s from user union select count(*) as s from music", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count(0) AS count", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*) as s from `user` where 1 != 1", - "Query": "select count(*) as s from `user`", - "Table": "`user`" - } - ] - }, - { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count(0) AS count", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*) as s from music where 1 != 1", - "Query": "select count(*) as s from music", - "Table": "music" + "Query": "select distinct 1 from user_extra where user_extra.extra = 'asdf'", + "Table": "user_extra" } ] } ] } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "unmergable because we are using aggregation", + "query": "select count(*) as s from user union select count(*) as s from music", + "plan": { "QueryType": "SELECT", "Original": "select count(*) as s from user union select count(*) as s from music", "Instructions": { @@ -1680,72 +745,7 @@ { "comment": "Union in derived table with first SELECT being an UNION", "query": "select * from ((select id from user union select id+1 from user) union select user_id from user_extra) as t", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from ((select id from user union select id+1 from user) union select user_id from user_extra) as t", - "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id + 1 from `user` where 1 != 1", - "Query": "select id + 1 from `user`", - "Table": "`user`" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_id from user_extra where 1 != 1", - "Query": "select user_id from user_extra", - "Table": "user_extra" - } - ] - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from ((select id from user union select id+1 from user) union select user_id from user_extra) as t", "Instructions": { @@ -1767,9 +767,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1 union all select id + 1, weight_string(id + 1) from `user` where 1 != 1 union select user_id, weight_string(user_id) from user_extra where 1 != 1", - "Query": "select id, weight_string(id) from `user` union all select id + 1, weight_string(id + 1) from `user` union select user_id, weight_string(user_id) from user_extra", - "Table": "`user`" + "FieldQuery": "select id, weight_string(id) from (select id from `user` where 1 != 1 union select id + 1 from `user` where 1 != 1 union select user_id from user_extra where 1 != 1) as dt where 1 != 1", + "Query": "select id, weight_string(id) from (select id from `user` union select id + 1 from `user` union select user_id from user_extra) as dt", + "Table": "`user`, user_extra" } ] } @@ -1784,8 +784,7 @@ { "comment": "gen4 optimises away ORDER BY when it's safe to do", "query": "(select id from user union select id from music order by id) union select 1 from unsharded", - "v3-plan": "VT12001: unsupported: ORDER BY on top of UNION", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "(select id from user union select id from music order by id) union select 1 from unsharded", "Instructions": { @@ -1805,9 +804,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1 union select id, weight_string(id) from music where 1 != 1", - "Query": "select id, weight_string(id) from `user` union select id, weight_string(id) from music", - "Table": "`user`" + "FieldQuery": "select id, weight_string(id) from (select id from `user` where 1 != 1 union select id from music where 1 != 1) as dt where 1 != 1", + "Query": "select id, weight_string(id) from (select id from `user` union select id from music) as dt", + "Table": "`user`, music" }, { "OperatorType": "Route", @@ -1834,49 +833,7 @@ { "comment": "push down the ::upper_limit to the sources, since we are doing DISTINCT on them, it's safe", "query": "select id from user union select 3 limit 10", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user union select 3 limit 10", - "Instructions": { - "OperatorType": "Limit", - "Count": "INT64(10)", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 3 from dual where 1 != 1", - "Query": "select 3 from dual", - "Table": "dual" - } - ] - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user union select 3 limit 10", "Instructions": { @@ -1891,31 +848,15 @@ "ResultColumns": 1, "Inputs": [ { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", - "Query": "select distinct id, weight_string(id) from `user` limit :__upper_limit", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 3, weight_string(3) from dual where 1 != 1", - "Query": "select distinct 3, weight_string(3) from dual limit :__upper_limit", - "Table": "dual" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id, weight_string(id) from (select id from `user` where 1 != 1 union select 3 from dual where 1 != 1) as dt where 1 != 1", + "Query": "select id, weight_string(id) from (select id from `user` union select 3 from dual limit :__upper_limit) as dt", + "Table": "`user`, dual" } ] } @@ -1930,22 +871,7 @@ { "comment": "silly query that should be collapsed into a single unsharded UNION route", "query": "(select 1 from unsharded union select 1 from unsharded union all select 1 from unsharded order by 1) union select 1 from unsharded union all select 1 from unsharded order by 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "(select 1 from unsharded union select 1 from unsharded union all select 1 from unsharded order by 1) union select 1 from unsharded union all select 1 from unsharded order by 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "(select 1 from unsharded where 1 != 1 union select 1 from unsharded where 1 != 1 union all select 1 from unsharded where 1 != 1) union select 1 from unsharded where 1 != 1 union all select 1 from unsharded where 1 != 1", - "Query": "(select 1 from unsharded union select 1 from unsharded union all select 1 from unsharded order by 1 asc) union select 1 from unsharded union all select 1 from unsharded order by 1 asc", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "(select 1 from unsharded union select 1 from unsharded union all select 1 from unsharded order by 1) union select 1 from unsharded union all select 1 from unsharded order by 1", "Instructions": { @@ -1967,85 +893,7 @@ { "comment": "UNION that needs to be reordered to be merged more aggressively. Gen4 is able to get it down to 2 routes", "query": "select col from unsharded union select id from user union select col2 from unsharded union select col from user_extra", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from unsharded union select id from user union select col2 from unsharded union select col from user_extra", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select col from unsharded where 1 != 1", - "Query": "select col from unsharded", - "Table": "unsharded" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user`", - "Table": "`user`" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select col2 from unsharded where 1 != 1", - "Query": "select col2 from unsharded", - "Table": "unsharded" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from user_extra where 1 != 1", - "Query": "select col from user_extra", - "Table": "user_extra" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from unsharded union select id from user union select col2 from unsharded union select col from user_extra", "Instructions": { @@ -2065,8 +913,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select col, weight_string(col) from unsharded where 1 != 1 union select col2, weight_string(col2) from unsharded where 1 != 1", - "Query": "select col, weight_string(col) from unsharded union select col2, weight_string(col2) from unsharded", + "FieldQuery": "select col, weight_string(col) from (select col from unsharded where 1 != 1 union select col2 from unsharded where 1 != 1) as dt where 1 != 1", + "Query": "select col, weight_string(col) from (select col from unsharded union select col2 from unsharded) as dt", "Table": "unsharded" }, { @@ -2076,9 +924,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1 union select col, weight_string(col) from user_extra where 1 != 1", - "Query": "select id, weight_string(id) from `user` union select col, weight_string(col) from user_extra", - "Table": "`user`" + "FieldQuery": "select id, weight_string(id) from (select id from `user` where 1 != 1 union select col from user_extra where 1 != 1) as dt where 1 != 1", + "Query": "select id, weight_string(id) from (select id from `user` union select col from user_extra) as dt", + "Table": "`user`, user_extra" } ] } @@ -2094,7 +942,7 @@ { "comment": "derived table with union", "query": "select tbl2.id FROM ((select id from user order by id limit 5) union all (select id from user order by id desc limit 5)) as tbl1 INNER JOIN user as tbl2 ON tbl1.id = tbl2.id", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select tbl2.id FROM ((select id from user order by id limit 5) union all (select id from user order by id desc limit 5)) as tbl1 INNER JOIN user as tbl2 ON tbl1.id = tbl2.id", "Instructions": { @@ -2129,7 +977,6 @@ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", "OrderBy": "(0|1) ASC", "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit", - "ResultColumns": 1, "Table": "`user`" } ] @@ -2148,7 +995,6 @@ "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", "OrderBy": "(0|1) DESC", "Query": "select id, weight_string(id) from `user` order by id desc limit :__upper_limit", - "ResultColumns": 1, "Table": "`user`" } ] @@ -2173,32 +1019,96 @@ "Vindex": "user_index" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "ambiguous LIMIT", + "query": "select id from user limit 1 union all select id from music limit 1", + "plan": "syntax error at position 34 near 'union'" + }, + { + "comment": "ambiguous ORDER BY", + "query": "select id from user order by id union all select id from music order by id desc", + "plan": "syntax error at position 38 near 'union'" + }, + { + "comment": "different number of columns", + "query": "select id, 42 from user where id = 1 union all select id from user where id = 5", + "plan": "The used SELECT statements have a different number of columns: 2, 1" + }, + { + "comment": "union with invalid order by clause with table qualifier", + "query": "select id from user union select 3 order by user.id", + "plan": "Table `user` from one of the SELECTs cannot be used in global ORDER clause" + }, + { + "comment": "union with invalid order by clause with table qualifier", + "query": "select id from user union select 3 order by id", + "plan": { "QueryType": "SELECT", - "Original": "select tbl2.id FROM ((select id from user order by id limit 5) union all (select id from user order by id desc limit 5)) as tbl1 INNER JOIN user as tbl2 ON tbl1.id = tbl2.id", + "Original": "select id from user union select 3 order by id", "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "JoinVars": { - "tbl1_id": 0 - }, - "TableName": "`user`_`user`", + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "(0|1) ASC", + "ResultColumns": 1, "Inputs": [ { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 + "OperatorType": "Distinct", + "Collations": [ + "(0:1)", + "1" ], "Inputs": [ { - "OperatorType": "Concatenate", + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id, weight_string(id) from (select id from `user` where 1 != 1 union select 3 from dual where 1 != 1) as dt where 1 != 1", + "Query": "select id, weight_string(id) from (select id from `user` union select 3 from dual) as dt", + "Table": "`user`, dual" + } + ] + } + ] + }, + "TablesUsed": [ + "main.dual", + "user.user" + ] + } + }, + { + "comment": "select 1 from (select id+42 as foo from user union select 1+id as foo from unsharded) as t", + "query": "select 1 from (select id+42 as foo from user union select 1+id as foo from unsharded) as t", + "plan": { + "QueryType": "SELECT", + "Original": "select 1 from (select id+42 as foo from user union select 1+id as foo from unsharded) as t", + "Instructions": { + "OperatorType": "Projection", + "Expressions": [ + "INT64(1) as 1" + ], + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": null, + "Inputs": [ + { + "OperatorType": "Distinct", + "Collations": [ + "(0:1)" + ], "Inputs": [ { - "OperatorType": "Limit", - "Count": "INT64(5)", + "OperatorType": "Concatenate", "Inputs": [ { "OperatorType": "Route", @@ -2207,104 +1117,49 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", - "OrderBy": "(0|1) ASC", - "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit", - "ResultColumns": 1, + "FieldQuery": "select id + 42 as foo, weight_string(id + 42) from `user` where 1 != 1", + "Query": "select distinct id + 42 as foo, weight_string(id + 42) from `user`", "Table": "`user`" - } - ] - }, - { - "OperatorType": "Limit", - "Count": "INT64(5)", - "Inputs": [ + }, { "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false }, - "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", - "OrderBy": "(0|1) DESC", - "Query": "select id, weight_string(id) from `user` order by id desc limit :__upper_limit", - "ResultColumns": 1, - "Table": "`user`" + "FieldQuery": "select 1 + id as foo, weight_string(1 + id) from unsharded where 1 != 1", + "Query": "select distinct 1 + id as foo, weight_string(1 + id) from unsharded", + "Table": "unsharded" } ] } ] } ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select tbl2.id from `user` as tbl2 where 1 != 1", - "Query": "select tbl2.id from `user` as tbl2 where tbl2.id = :tbl1_id", - "Table": "`user`", - "Values": [ - ":tbl1_id" - ], - "Vindex": "user_index" } ] }, "TablesUsed": [ + "main.unsharded", "user.user" ] } }, { - "comment": "ambiguous LIMIT", - "query": "select id from user limit 1 union all select id from music limit 1", - "plan": "syntax error at position 34 near 'union'" - }, - { - "comment": "ambiguous ORDER BY", - "query": "select id from user order by id union all select id from music order by id desc", - "plan": "syntax error at position 38 near 'union'" - }, - { - "comment": "different number of columns", - "query": "select id, 42 from user where id = 1 union all select id from user where id = 5", - "v3-plan": "The used SELECT statements have a different number of columns (errno 1222) (sqlstate 21000) during query: select id, 42 from `user` where id = 1 union all select id from `user` where id = 5", - "gen4-plan": "The used SELECT statements have a different number of columns: 2, 1" - }, - { - "comment": "union with invalid order by clause with table qualifier", - "query": "select id from user union select 3 order by user.id", - "v3-plan": "VT12001: unsupported: ORDER BY on top of UNION", - "gen4-plan": "Table `user` from one of the SELECTs cannot be used in global ORDER clause" - }, - { - "comment": "union with invalid order by clause with table qualifier", - "query": "select id from user union select 3 order by id", - "v3-plan": "VT12001: unsupported: ORDER BY on top of UNION", - "gen4-plan": "VT13001: [BUG] ORDER BY in complex query *planbuilder.distinct" - }, - { - "comment": "select 1 from (select id+42 as foo from user union select 1+id as foo from unsharded) as t", - "query": "select 1 from (select id+42 as foo from user union select 1+id as foo from unsharded) as t", - "v3-plan": "VT12001: unsupported: expression on results of a cross-shard subquery", - "gen4-plan": { + "comment": "systable union query in derived table with constraint on outside (without star projection)", + "query": "select * from (select kcu.`COLUMN_NAME` from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'user_extra' union select kcu.`COLUMN_NAME` from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'music') `kcu` where `COLUMN_NAME` = 'primary'", + "plan": { "QueryType": "SELECT", - "Original": "select 1 from (select id+42 as foo from user union select 1+id as foo from unsharded) as t", + "Original": "select * from (select kcu.`COLUMN_NAME` from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'user_extra' union select kcu.`COLUMN_NAME` from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'music') `kcu` where `COLUMN_NAME` = 'primary'", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 2 - ], + "OperatorType": "Filter", + "Predicate": "COLUMN_NAME = 'primary'", "Inputs": [ { "OperatorType": "Distinct", "Collations": [ - "(0:1)" + "0: utf8mb4_0900_ai_ci" ], "Inputs": [ { @@ -2312,65 +1167,42 @@ "Inputs": [ { "OperatorType": "Route", - "Variant": "Scatter", + "Variant": "DBA", "Keyspace": { - "Name": "user", - "Sharded": true + "Name": "main", + "Sharded": false }, - "FieldQuery": "select id + 42 as foo, weight_string(id + 42), 1 from `user` where 1 != 1", - "Query": "select distinct id + 42 as foo, weight_string(id + 42), 1 from `user`", - "Table": "`user`" + "FieldQuery": "select kcu.COLUMN_NAME from information_schema.key_column_usage as kcu where 1 != 1", + "Query": "select distinct kcu.COLUMN_NAME from information_schema.key_column_usage as kcu where kcu.table_schema = :__vtschemaname /* VARCHAR */ and kcu.table_name = :kcu_table_name /* VARCHAR */", + "SysTableTableName": "[kcu_table_name:VARCHAR(\"user_extra\")]", + "SysTableTableSchema": "[VARCHAR(\"user\")]", + "Table": "information_schema.key_column_usage" }, { "OperatorType": "Route", - "Variant": "Unsharded", + "Variant": "DBA", "Keyspace": { "Name": "main", "Sharded": false }, - "FieldQuery": "select 1 + id as foo, weight_string(1 + id), 1 from unsharded where 1 != 1", - "Query": "select distinct 1 + id as foo, weight_string(1 + id), 1 from unsharded", - "Table": "unsharded" + "FieldQuery": "select kcu.COLUMN_NAME from information_schema.key_column_usage as kcu where 1 != 1", + "Query": "select distinct kcu.COLUMN_NAME from information_schema.key_column_usage as kcu where kcu.table_schema = :__vtschemaname /* VARCHAR */ and kcu.table_name = :kcu_table_name1 /* VARCHAR */", + "SysTableTableName": "[kcu_table_name1:VARCHAR(\"music\")]", + "SysTableTableSchema": "[VARCHAR(\"user\")]", + "Table": "information_schema.key_column_usage" } ] } ] } ] - }, - "TablesUsed": [ - "main.unsharded", - "user.user" - ] - } - }, - { - "comment": "systable union query in derived table with constraint on outside (without star projection)", - "query": "select * from (select kcu.`COLUMN_NAME` from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'user_extra' union select kcu.`COLUMN_NAME` from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'music') `kcu` where `COLUMN_NAME` = 'primary'", - "v3-plan": "VT12001: unsupported: filtering on results of cross-shard subquery", - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select * from (select kcu.`COLUMN_NAME` from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'user_extra' union select kcu.`COLUMN_NAME` from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'music') `kcu` where `COLUMN_NAME` = 'primary'", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select kcu.COLUMN_NAME from (select kcu.COLUMN_NAME from information_schema.key_column_usage as kcu where 1 != 1 union select kcu.COLUMN_NAME from information_schema.key_column_usage as kcu where 1 != 1) as kcu where 1 != 1", - "Query": "select kcu.COLUMN_NAME from (select kcu.COLUMN_NAME from information_schema.key_column_usage as kcu where kcu.table_schema = :__vtschemaname /* VARCHAR */ and kcu.table_name = :kcu_table_name /* VARCHAR */ and kcu.COLUMN_NAME = 'primary' union select kcu.COLUMN_NAME from information_schema.key_column_usage as kcu where kcu.table_schema = :__vtschemaname /* VARCHAR */ and kcu.table_name = :kcu_table_name1 /* VARCHAR */ and kcu.COLUMN_NAME = 'primary') as kcu", - "SysTableTableName": "[kcu_table_name1:VARCHAR(\"music\"), kcu_table_name:VARCHAR(\"user_extra\")]", - "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"user\")]", - "Table": "information_schema.key_column_usage" } } }, { "comment": "pushes predicate on both sides of UNION", "query": "select * from (select name, id as foo from user union select 'extra', user_id from user_extra) X where X.foo = 3", - "v3-plan": "VT12001: unsupported: filtering on results of cross-shard subquery", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from (select name, id as foo from user union select 'extra', user_id from user_extra) X where X.foo = 3", "Instructions": { @@ -2382,7 +1214,7 @@ }, "FieldQuery": "select X.`name`, X.foo from (select `name`, id as foo from `user` where 1 != 1 union select 'extra', user_id from user_extra where 1 != 1) as X where 1 != 1", "Query": "select X.`name`, X.foo from (select `name`, id as foo from `user` where id = 3 union select 'extra', user_id from user_extra where user_id = 3) as X", - "Table": "`user`", + "Table": "`user`, user_extra", "Values": [ "INT64(3)" ], @@ -2397,44 +1229,71 @@ { "comment": "systable union query in derived table with constraint on outside (star projection)", "query": "select * from (select * from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'user_extra' union select * from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'music') `kcu` where `constraint_name` = 'primary'", - "v3-plan": "VT03019: column constraint_name not found", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from (select * from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'user_extra' union select * from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'music') `kcu` where `constraint_name` = 'primary'", "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from (select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as kcu where 1 != 1 union select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as kcu where 1 != 1) as kcu where 1 != 1", - "Query": "select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from (select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as kcu where kcu.table_schema = :__vtschemaname /* VARCHAR */ and kcu.table_name = :kcu_table_name /* VARCHAR */ and kcu.CONSTRAINT_NAME = 'primary' union select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as kcu where kcu.table_schema = :__vtschemaname /* VARCHAR */ and kcu.table_name = :kcu_table_name1 /* VARCHAR */ and kcu.CONSTRAINT_NAME = 'primary') as kcu", - "SysTableTableName": "[kcu_table_name1:VARCHAR(\"music\"), kcu_table_name:VARCHAR(\"user_extra\")]", - "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"user\")]", - "Table": "information_schema.key_column_usage" + "OperatorType": "Filter", + "Predicate": "constraint_name = 'primary'", + "Inputs": [ + { + "OperatorType": "Distinct", + "Collations": [ + "0: utf8mb4_0900_ai_ci", + "1: utf8mb4_0900_ai_ci", + "2: utf8mb4_0900_ai_ci", + "3: utf8mb4_0900_ai_ci", + "4: utf8mb4_0900_ai_ci", + "5: utf8mb4_0900_ai_ci", + "6: utf8mb4_0900_ai_ci", + "7", + "8", + "9: utf8mb4_0900_ai_ci", + "10: utf8mb4_0900_ai_ci", + "11: utf8mb4_0900_ai_ci" + ], + "Inputs": [ + { + "OperatorType": "Concatenate", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "DBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as kcu where 1 != 1", + "Query": "select distinct kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as kcu where kcu.table_schema = :__vtschemaname /* VARCHAR */ and kcu.table_name = :kcu_table_name /* VARCHAR */", + "SysTableTableName": "[kcu_table_name:VARCHAR(\"user_extra\")]", + "SysTableTableSchema": "[VARCHAR(\"user\")]", + "Table": "information_schema.key_column_usage" + }, + { + "OperatorType": "Route", + "Variant": "DBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as kcu where 1 != 1", + "Query": "select distinct kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as kcu where kcu.table_schema = :__vtschemaname /* VARCHAR */ and kcu.table_name = :kcu_table_name1 /* VARCHAR */", + "SysTableTableName": "[kcu_table_name1:VARCHAR(\"music\")]", + "SysTableTableSchema": "[VARCHAR(\"user\")]", + "Table": "information_schema.key_column_usage" + } + ] + } + ] + } + ] } } }, { "comment": "unknown columns are OK as long as the whole query is unsharded", "query": "(SELECT * FROM (SELECT * FROM unsharded WHERE branchId = 203622 AND buildNumber <= 113893 AND state = 'FAILED' ORDER BY buildNumber DESC LIMIT 1) AS last_failed) UNION ALL (SELECT * FROM (SELECT * FROM unsharded WHERE branchId = 203622 AND buildNumber <= 113893 AND state = 'SUCCEEDED' ORDER BY buildNumber DESC LIMIT 1) AS last_succeeded) ORDER BY buildNumber DESC LIMIT 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "(SELECT * FROM (SELECT * FROM unsharded WHERE branchId = 203622 AND buildNumber <= 113893 AND state = 'FAILED' ORDER BY buildNumber DESC LIMIT 1) AS last_failed) UNION ALL (SELECT * FROM (SELECT * FROM unsharded WHERE branchId = 203622 AND buildNumber <= 113893 AND state = 'SUCCEEDED' ORDER BY buildNumber DESC LIMIT 1) AS last_succeeded) ORDER BY buildNumber DESC LIMIT 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from (select * from unsharded where 1 != 1) as last_failed where 1 != 1 union all select * from (select * from unsharded where 1 != 1) as last_succeeded where 1 != 1", - "Query": "select * from (select * from unsharded where branchId = 203622 and buildNumber <= 113893 and state = 'FAILED' order by buildNumber desc limit 1) as last_failed union all select * from (select * from unsharded where branchId = 203622 and buildNumber <= 113893 and state = 'SUCCEEDED' order by buildNumber desc limit 1) as last_succeeded order by buildNumber desc limit 1", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "(SELECT * FROM (SELECT * FROM unsharded WHERE branchId = 203622 AND buildNumber <= 113893 AND state = 'FAILED' ORDER BY buildNumber DESC LIMIT 1) AS last_failed) UNION ALL (SELECT * FROM (SELECT * FROM unsharded WHERE branchId = 203622 AND buildNumber <= 113893 AND state = 'SUCCEEDED' ORDER BY buildNumber DESC LIMIT 1) AS last_succeeded) ORDER BY buildNumber DESC LIMIT 1", "Instructions": { @@ -2456,62 +1315,7 @@ { "comment": "union of unsharded route with sharded join with involvement of weight string", "query": "select id, foo, bar from unsharded union select user.intcol, user.textcol2, authoritative.col2 from user join authoritative", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id, foo, bar from unsharded union select user.intcol, user.textcol2, authoritative.col2 from user join authoritative", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select id, foo, bar from unsharded where 1 != 1", - "Query": "select id, foo, bar from unsharded", - "Table": "unsharded" - }, - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1,R:0", - "TableName": "`user`_authoritative", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.intcol, `user`.textcol2 from `user` where 1 != 1", - "Query": "select `user`.intcol, `user`.textcol2 from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select authoritative.col2 from authoritative where 1 != 1", - "Query": "select authoritative.col2 from authoritative", - "Table": "authoritative" - } - ] - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id, foo, bar from unsharded union select user.intcol, user.textcol2, authoritative.col2 from user join authoritative", "Instructions": { @@ -2551,7 +1355,7 @@ "Sharded": true }, "FieldQuery": "select `user`.intcol, `user`.textcol2, weight_string(`user`.intcol), weight_string(`user`.textcol2) from `user` where 1 != 1", - "Query": "select `user`.intcol, `user`.textcol2, weight_string(`user`.intcol), weight_string(`user`.textcol2) from `user`", + "Query": "select distinct `user`.intcol, `user`.textcol2, weight_string(`user`.intcol), weight_string(`user`.textcol2) from `user`", "Table": "`user`" }, { @@ -2562,7 +1366,7 @@ "Sharded": true }, "FieldQuery": "select authoritative.col2, weight_string(authoritative.col2) from authoritative where 1 != 1", - "Query": "select authoritative.col2, weight_string(authoritative.col2) from authoritative", + "Query": "select distinct authoritative.col2, weight_string(authoritative.col2) from authoritative", "Table": "authoritative" } ] @@ -2577,5 +1381,119 @@ "user.user" ] } + }, + { + "comment": "UNION ALL with repeating column on the LHS", + "query": "select foo, foo, foo from user union all select bar, baz, toto from music", + "plan": { + "QueryType": "SELECT", + "Original": "select foo, foo, foo from user union all select bar, baz, toto from music", + "Instructions": { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select foo, foo, foo from `user` where 1 != 1 union all select bar, baz, toto from music where 1 != 1", + "Query": "select foo, foo, foo from `user` union all select bar, baz, toto from music", + "Table": "`user`, music" + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } + }, + { + "comment": "UNION ALL with repeating column on the RHS", + "query": "select bar, baz, toto from music union all select foo, foo, foo from user", + "plan": { + "QueryType": "SELECT", + "Original": "select bar, baz, toto from music union all select foo, foo, foo from user", + "Instructions": { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select bar, baz, toto from music where 1 != 1 union all select foo, foo, foo from `user` where 1 != 1", + "Query": "select bar, baz, toto from music union all select foo, foo, foo from `user`", + "Table": "`user`, music" + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } + }, + { + "comment": "UNION with repeating column on the RHS", + "query": "select bar, baz, toto from music union select foo, foo, foo from user", + "plan": { + "QueryType": "SELECT", + "Original": "select bar, baz, toto from music union select foo, foo, foo from user", + "Instructions": { + "OperatorType": "Distinct", + "Collations": [ + "(0:3)", + "(1:4)", + "(2:5)" + ], + "ResultColumns": 3, + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select bar, baz, toto, weight_string(bar), weight_string(baz), weight_string(toto) from (select bar, baz, toto from music where 1 != 1 union select foo, foo, foo from `user` where 1 != 1) as dt where 1 != 1", + "Query": "select bar, baz, toto, weight_string(bar), weight_string(baz), weight_string(toto) from (select bar, baz, toto from music union select foo, foo, foo from `user`) as dt", + "Table": "`user`, music" + } + ] + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } + }, + { + "comment": "UNION with repeating column on the LHS", + "query": "select foo, foo, foo from user union select bar, baz, toto from music", + "plan": { + "QueryType": "SELECT", + "Original": "select foo, foo, foo from user union select bar, baz, toto from music", + "Instructions": { + "OperatorType": "Distinct", + "Collations": [ + "(0:3)", + "(1:3)", + "(2:3)" + ], + "ResultColumns": 3, + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select foo, foo, foo, weight_string(foo) from (select foo, foo, foo from `user` where 1 != 1 union select bar, baz, toto from music where 1 != 1) as dt where 1 != 1", + "Query": "select foo, foo, foo, weight_string(foo) from (select foo, foo, foo from `user` union select bar, baz, toto from music) as dt", + "Table": "`user`, music" + } + ] + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } } -] +] \ No newline at end of file diff --git a/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json b/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json index 3676c09ead9..342f2cc7c83 100644 --- a/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json @@ -1,8 +1,13 @@ [ { - "comment": "union operations in subqueries (expressions)", + "comment": "unexpanded expressions invalid also inside subqueries", "query": "select * from user where id in (select * from user union select * from user_extra)", - "plan": "VT12001: unsupported: '*' expression in cross-shard query" + "plan": "VT09015: schema tracking required" + }, + { + "comment": "union operations in subqueries (expressions)", + "query": "select col from user u where id in (select bar from user where user.x = u.z union select * from user_extra)", + "plan": "VT09015: schema tracking required" }, { "comment": "TODO: Implement support for select with a target destination", @@ -27,8 +32,7 @@ { "comment": "scatter order by with * expression", "query": "select * from user order by id", - "v3-plan": "VT12001: unsupported: in scatter query: ORDER BY must reference a column in the SELECT list: id asc", - "gen4-plan": "VT12001: unsupported: '*' expression in cross-shard query" + "plan": "VT09015: schema tracking required" }, { "comment": "natural join", @@ -38,14 +42,12 @@ { "comment": "join with USING construct", "query": "select * from user join user_extra using(id)", - "v3-plan": "VT12001: unsupported: JOIN with USING(column_list) clause for complex queries", - "gen4-plan": "can't handle JOIN USING without authoritative tables" + "plan": "VT09015: schema tracking required" }, { "comment": "join with USING construct with 3 tables", "query": "select user.id from user join user_extra using(id) join music using(id2)", - "v3-plan": "VT12001: unsupported: JOIN with USING(column_list) clause for complex queries", - "gen4-plan": "can't handle JOIN USING without authoritative tables" + "plan": "VT09015: schema tracking required" }, { "comment": "natural left join", @@ -60,72 +62,52 @@ { "comment": "* expresson not allowed for cross-shard joins", "query": "select * from user join user_extra", - "plan": "VT12001: unsupported: '*' expression in cross-shard query" + "plan": "VT09015: schema tracking required" }, { "comment": "Group by column number, used with non-aliased expression (duplicated code)", "query": "select * from user group by 1", - "v3-plan": "VT12001: unsupported: '*' expression in cross-shard query", - "gen4-plan": "cannot use column offsets in group statement when using `*`" - }, - { - "comment": "Complex aggregate expression on scatter", - "query": "select 1+count(*) from user", - "plan": "VT12001: unsupported: in scatter query: complex aggregate expression" + "plan": "cannot use column offsets in group statement when using `*`" }, { "comment": "Multi-value aggregates not supported", "query": "select count(a,b) from user", - "v3-plan": "VT12001: unsupported: only one expression is allowed inside aggregates: count(a, b)", - "gen4-plan": "VT03001: aggregate functions take a single argument 'count(a, b)'" - }, - { - "comment": "Aggregate detection (group_concat)", - "query": "select group_concat(user.a) from user join user_extra", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": "VT12001: unsupported: in scatter query: aggregation function 'group_concat(`user`.a)'" + "plan": "VT03001: aggregate functions take a single argument 'count(a, b)'" }, { "comment": "subqueries not supported in group by", "query": "select id from user group by id, (select id from user_extra)", - "v3-plan": "VT12001: unsupported: subqueries disallowed in sqlparser.GroupBy", - "gen4-plan": "VT12001: unsupported: subqueries in GROUP BY" + "plan": "VT12001: unsupported: subqueries in GROUP BY" }, { "comment": "subqueries in delete", "query": "delete from user where col = (select id from unsharded)", - "v3-plan": "VT12001: unsupported: sharded subqueries in DML", - "gen4-plan": "VT12001: unsupported: subqueries in DML" + "plan": "VT12001: unsupported: subqueries in DML" }, { "comment": "sharded subqueries in unsharded delete", "query": "delete from unsharded where col = (select id from user)", - "v3-plan": "VT12001: unsupported: sharded subqueries in DML", - "gen4-plan": "VT12001: unsupported: subqueries in DML" + "plan": "VT12001: unsupported: subqueries in DML" }, { "comment": "sharded delete with limit clasue", "query": "delete from user_extra limit 10", - "v3-plan": "VT12001: unsupported: multi-shard delete with LIMIT", - "gen4-plan": "VT12001: unsupported: multi shard DELETE with LIMIT" + "plan": "VT12001: unsupported: multi shard DELETE with LIMIT" }, { "comment": "sharded subquery in unsharded subquery in unsharded delete", "query": "delete from unsharded where col = (select id from unsharded where id = (select id from user))", - "v3-plan": "VT12001: unsupported: sharded subqueries in DML", - "gen4-plan": "VT12001: unsupported: subqueries in DML" + "plan": "VT12001: unsupported: subqueries in DML" }, { "comment": "sharded join unsharded subqueries in unsharded delete", "query": "delete from unsharded where col = (select id from unsharded join user on unsharded.id = user.id)", - "v3-plan": "VT12001: unsupported: sharded subqueries in DML", - "gen4-plan": "VT12001: unsupported: subqueries in DML" + "plan": "VT12001: unsupported: subqueries in DML" }, { "comment": "scatter update with limit clause", "query": "update user_extra set val = 1 where (name = 'foo' or id = 1) limit 1", - "v3-plan": "VT12001: unsupported: multi-shard update with LIMIT", - "gen4-plan": "VT12001: unsupported: multi shard UPDATE with LIMIT" + "plan": "VT12001: unsupported: multi shard UPDATE with LIMIT" }, { "comment": "multi delete multi table", @@ -135,68 +117,57 @@ { "comment": "update changes primary vindex column", "query": "update user set id = 1 where id = 1", - "v3-plan": "VT12001: unsupported: you cannot update primary vindex columns; invalid update on vindex: user_index", - "gen4-plan": "VT12001: unsupported: you cannot UPDATE primary vindex columns; invalid update on vindex: user_index" + "plan": "VT12001: unsupported: you cannot UPDATE primary vindex columns; invalid update on vindex: user_index" }, { "comment": "update change in multicol vindex column", "query": "update multicol_tbl set colc = 5, colb = 4 where cola = 1 and colb = 2", - "v3-plan": "VT12001: unsupported: you cannot update primary vindex columns; invalid update on vindex: multicolIdx", - "gen4-plan": "VT12001: unsupported: you cannot UPDATE primary vindex columns; invalid update on vindex: multicolIdx" + "plan": "VT12001: unsupported: you cannot UPDATE primary vindex columns; invalid update on vindex: multicolIdx" }, { "comment": "update changes non lookup vindex column", "query": "update user_metadata set md5 = 1 where user_id = 1", - "v3-plan": "VT12001: unsupported: you can only update lookup vindexes; invalid update on vindex: user_md5_index", - "gen4-plan": "VT12001: unsupported: you can only UPDATE lookup vindexes; invalid update on vindex: user_md5_index" + "plan": "VT12001: unsupported: you can only UPDATE lookup vindexes; invalid update on vindex: user_md5_index" }, { "comment": "update with complex set clause", "query": "update music set id = id + 1 where id = 1", - "v3-plan": "VT12001: unsupported: only values are supported: invalid update on column: `id` with expr: [id + 1]", - "gen4-plan": "VT12001: unsupported: only values are supported; invalid update on column: `id` with expr: [id + 1]" + "plan": "VT12001: unsupported: only values are supported; invalid update on column: `id` with expr: [id + 1]" }, { "comment": "update by primary keyspace id, changing one vindex column, limit without order clause", "query": "update user_metadata set email = 'juan@vitess.io' where user_id = 1 limit 10", - "v3-plan": "VT12001: unsupported: need to provide ORDER BY clause when using LIMIT; invalid update on vindex: email_user_map", - "gen4-plan": "VT12001: unsupported: you need to provide the ORDER BY clause when using LIMIT; invalid update on vindex: email_user_map" + "plan": "VT12001: unsupported: you need to provide the ORDER BY clause when using LIMIT; invalid update on vindex: email_user_map" }, { "comment": "update with derived table", "query": "update (select id from user) as u set id = 4", - "v3-plan": "VT12001: unsupported: sharded subqueries in DML", - "gen4-plan": "The target table u of the UPDATE is not updatable" + "plan": "The target table u of the UPDATE is not updatable" }, { "comment": "join in update tables", "query": "update user join user_extra on user.id = user_extra.id set user.name = 'foo'", - "v3-plan": "VT12001: unsupported: multi-shard or vindex write statement", - "gen4-plan": "VT12001: unsupported: unaliased multiple tables in update" + "plan": "VT12001: unsupported: unaliased multiple tables in update" }, { "comment": "multiple tables in update", "query": "update user as u, user_extra as ue set u.name = 'foo' where u.id = ue.id", - "v3-plan": "VT12001: unsupported: multi-shard or vindex write statement", - "gen4-plan": "VT12001: unsupported: multiple (2) tables in update" + "plan": "VT12001: unsupported: multiple (2) tables in update" }, { "comment": "unsharded insert, unqualified names and auto-inc combined", "query": "insert into unsharded_auto select col from unsharded", - "v3-plan": "VT12001: unsupported: auto-increment and SELECT in INSERT", - "gen4-plan": "VT09004: INSERT should contain column list or the table should have authoritative columns in vschema" + "plan": "VT09004: INSERT should contain column list or the table should have authoritative columns in vschema" }, { "comment": "unsharded insert, no col list with auto-inc", "query": "insert into unsharded_auto values(1,1)", - "v3-plan": "VT13001: [BUG] column list required for tables with auto-inc columns", - "gen4-plan": "VT09004: INSERT should contain column list or the table should have authoritative columns in vschema" + "plan": "VT09004: INSERT should contain column list or the table should have authoritative columns in vschema" }, { "comment": "unsharded insert, col list does not match values", "query": "insert into unsharded_auto(id, val) values(1)", - "v3-plan": "VT13001: [BUG] column list does not match values", - "gen4-plan": "VT03006: column count does not match value count at row 1" + "plan": "VT03006: column count does not match value count at row 1" }, { "comment": "sharded upsert can't change vindex", @@ -296,7 +267,7 @@ { "comment": "select func(keyspace_id) from user_index where id = :id", "query": "select func(keyspace_id) from user_index where id = :id", - "plan": "VT12001: unsupported: expression on results of a vindex function" + "plan": "VT12001: unsupported: cannot add 'func(keyspace_id)' expression to a table/vindex" }, { "comment": "delete with multi-table targets", @@ -306,20 +277,17 @@ { "comment": "select get_lock with non-dual table", "query": "select get_lock('xyz', 10) from user", - "v3-plan": "VT12001: unsupported: get_lock('xyz', 10) is allowed only with dual", - "gen4-plan": "get_lock('xyz', 10) allowed only with dual" + "plan": "get_lock('xyz', 10) allowed only with dual" }, { "comment": "select is_free_lock with non-dual table", "query": "select is_free_lock('xyz') from user", - "v3-plan": "VT12001: unsupported: is_free_lock('xyz') is allowed only with dual", - "gen4-plan": "is_free_lock('xyz') allowed only with dual" + "plan": "is_free_lock('xyz') allowed only with dual" }, { "comment": "union with SQL_CALC_FOUND_ROWS", "query": "(select sql_calc_found_rows id from user where id = 1 limit 1) union select id from user where id = 1", - "v3-plan": "VT12001: unsupported: SQL_CALC_FOUND_ROWS not supported with UNION", - "gen4-plan": "VT12001: unsupported: SQL_CALC_FOUND_ROWS not supported with union" + "plan": "VT12001: unsupported: SQL_CALC_FOUND_ROWS not supported with union" }, { "comment": "set with DEFAULT - vitess aware", @@ -339,8 +307,7 @@ { "comment": "create view with Cannot auto-resolve for cross-shard joins", "query": "create view user.view_a as select col from user join user_extra", - "v3-plan": "VT03019: column col not found", - "gen4-plan": "Column 'col' in field list is ambiguous" + "plan": "Column 'col' in field list is ambiguous" }, { "comment": "create view with join that cannot be served in each shard separately", @@ -375,24 +342,22 @@ { "comment": "avg function on scatter query", "query": "select avg(id) from user", - "v3-plan": "VT12001: unsupported: in scatter query: complex aggregate expression", - "gen4-plan": "VT12001: unsupported: in scatter query: aggregation function 'avg(id)'" + "plan": "VT12001: unsupported: in scatter query: aggregation function 'avg(id)'" }, { "comment": "outer and inner subquery route reference the same \"uu.id\" name\n# but they refer to different things. The first reference is to the outermost query,\n# and the second reference is to the innermost 'from' subquery.\n# This query will never work as the inner derived table is only selecting one of the column", "query": "select id2 from user uu where id in (select id from user where id = uu.id and user.col in (select col from (select id from user_extra where user_id = 5) uu where uu.user_id = uu.id))", - "plan": "VT12001: unsupported: cross-shard correlated subquery" + "plan": "VT12001: unsupported: correlated subquery is only supported for EXISTS" }, { "comment": "outer and inner subquery route reference the same \"uu.id\" name\n# but they refer to different things. The first reference is to the outermost query,\n# and the second reference is to the innermost 'from' subquery.\n# changed to project all the columns from the derived tables.", "query": "select id2 from user uu where id in (select id from user where id = uu.id and user.col in (select col from (select col, id, user_id from user_extra where user_id = 5) uu where uu.user_id = uu.id))", - "plan": "VT12001: unsupported: cross-shard correlated subquery" + "plan": "VT12001: unsupported: correlated subquery is only supported for EXISTS" }, { - "comment": "Gen4 does a rewrite of 'order by 2' that becomes 'order by id', leading to ambiguous binding.", + "comment": "rewrite of 'order by 2' that becomes 'order by id', leading to ambiguous binding.", "query": "select a.id, b.id from user as a, user_extra as b union select 1, 2 order by 2", - "v3-plan": "VT12001: unsupported: ORDER BY on top of UNION", - "gen4-plan": "Column 'id' in field list is ambiguous" + "plan": "Column 'id' in field list is ambiguous" }, { "comment": "unsupported with clause in delete statement", @@ -414,12 +379,6 @@ "query": "with x as (select * from user) select * from x union select * from x", "plan": "VT12001: unsupported: WITH expression in UNION statement" }, - { - "comment": "aggregation on union", - "query": "select sum(col) from (select col from user union all select col from unsharded) t", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": "VT12001: unsupported: using aggregation on top of a *planbuilder.concatenateGen4 plan" - }, { "comment": "insert having subquery in row values", "query": "insert into user(id, name) values ((select 1 from user where id = 1), 'A')", @@ -433,8 +392,7 @@ { "comment": "json_table expressions", "query": "SELECT * FROM JSON_TABLE('[ {\"c1\": null} ]','$[*]' COLUMNS( c1 INT PATH '$.c1' ERROR ON ERROR )) as jt", - "v3-plan": "VT12001: unsupported: JSON_TABLE expressions", - "gen4-plan": "VT12001: unsupported: json_table expressions" + "plan": "VT12001: unsupported: json_table expressions" }, { "comment": "mix lock with other expr", @@ -467,32 +425,98 @@ "plan": "VT12001: unsupported: Assignment expression" }, { - "comment": "grouping column could be coming from multiple sides", - "query": "select count(*) from user, user_extra group by user.id+user_extra.id", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": "VT12001: unsupported: grouping on columns from different sources" + "comment": "extremum on input from both sides", + "query": "insert into music(user_id, id) select foo, bar from music on duplicate key update id = id+1", + "plan": "VT12001: unsupported: DML cannot update vindex column" }, { - "comment": "aggregate on input from both sides", - "query": "select sum(user.foo+user_extra.bar) from user, user_extra", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": "VT12001: unsupported: aggregation on columns from different sources" + "comment": "drop table with incompatible tables", + "query": "drop table user, unsharded_a", + "plan": "VT12001: unsupported: Tables or Views specified in the query do not belong to the same destination" }, { - "comment": "combine the output of two aggregations in the final result", - "query": "select greatest(sum(user.foo), sum(user_extra.bar)) from user join user_extra on user.col = user_extra.col", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": "VT12001: unsupported: in scatter query: complex aggregate expression" + "comment": "drop view with incompatible views", + "query": "drop view user, unsharded_a", + "plan": "VT12001: unsupported: Tables or Views specified in the query do not belong to the same destination" }, { - "comment": "extremum on input from both sides", - "query": "select max(u.foo*ue.bar) from user u join user_extra ue", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": "VT12001: unsupported: aggregation on columns from different sources: max(u.foo * ue.bar)" + "comment": "Rename table with different keyspace tables", + "query": "rename table user_extra to b, main.a to b", + "plan": "VT12001: unsupported: Tables or Views specified in the query do not belong to the same destination" }, { - "comment": "extremum on input from both sides", - "query": "insert into music(user_id, id) select foo, bar from music on duplicate key update id = id+1", - "plan": "VT12001: unsupported: DML cannot update vindex column" + "comment": "correlated subquery with different keyspace tables involved", + "query": "select id from user where id in (select col from unsharded where col = user.id)", + "plan": "VT12001: unsupported: correlated subquery is only supported for EXISTS" + }, + { + "comment": "ORDER BY on select t.*", + "query": "select t.*, t.col from user t order by t.col", + "plan": "VT09015: schema tracking required" + }, + { + "comment": "ORDER BY on select *", + "query": "select *, col from user order by col", + "plan": "VT09015: schema tracking required" + }, + { + "comment": "ORDER BY on select multi t.*", + "query": "select t.*, t.name, t.*, t.col from user t order by t.col", + "plan": "VT09015: schema tracking required" + }, + { + "comment": "ORDER BY on select multi *", + "query": "select *, name, *, col from user order by col", + "plan": "VT09015: schema tracking required" + }, + { + "comment": "select (select col from user where user_extra.id = 4 limit 1) as a from user join user_extra", + "query": "select (select col from user where user_extra.id = 4 limit 1) as a from user join user_extra", + "plan": "VT12001: unsupported: correlated subquery is only supported for EXISTS" + }, + { + "comment": "correlated subquery part of an OR clause", + "query": "select 1 from user u where u.col = 6 or exists (select 1 from user_extra ue where ue.col = u.col and u.col = ue.col2)", + "plan": "VT12001: unsupported: unmergable subquery can not be inside complex expression" + }, + { + "comment": "cant switch sides for outer joins", + "query": "select id from user left join (select user_id from user_extra limit 10) ue on user.id = ue.user_id", + "plan": "VT12001: unsupported: LEFT JOIN with derived tables" + }, + { + "comment": "limit on both sides means that we can't evaluate this at all", + "query": "select id from (select id from user limit 10) u join (select user_id from user_extra limit 10) ue on u.id = ue.user_id", + "plan": "VT12001: unsupported: JOIN between derived tables" + }, + { + "comment": "multi-shard union", + "query": "select 1 from music union (select id from user union all select name from unsharded)", + "plan": "VT12001: unsupported: nesting of UNIONs on the right-hand side" + }, + { + "comment": "multi-shard union", + "query": "select 1 from music union (select id from user union select name from unsharded)", + "plan": "VT12001: unsupported: nesting of UNIONs on the right-hand side" + }, + { + "comment": "Cannot have more than one aggr(distinct...", + "query": "select count(distinct a), count(distinct b) from user", + "plan": "VT12001: unsupported: only one DISTINCT aggregation is allowed in a SELECT: count(distinct b)" + }, + { + "comment": "subqueries not supported in the join condition of outer joins", + "query": "select unsharded_a.col from unsharded_a left join unsharded_b on unsharded_a.col IN (select col from user)", + "plan": "VT12001: unsupported: subquery in outer join predicate" + }, + { + "comment": "subquery in ON clause, with left join primitives", + "query": "select unsharded.col from unsharded left join user on user.col in (select col from user)", + "plan": "VT12001: unsupported: subquery in outer join predicate" + }, + { + "comment": "select (select 1 from user u having count(ue.col) > 10) from user_extra ue", + "query": "select (select 1 from user u having count(ue.col) > 10) from user_extra ue", + "plan": "VT12001: unsupported: correlated subquery is only supported for EXISTS" } -] +] \ No newline at end of file diff --git a/go/vt/vtgate/planbuilder/testdata/vexplain_cases.json b/go/vt/vtgate/planbuilder/testdata/vexplain_cases.json index bd828fe2dbf..630e59f3526 100644 --- a/go/vt/vtgate/planbuilder/testdata/vexplain_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/vexplain_cases.json @@ -17,28 +17,7 @@ { "comment": "vexplain queries", "query": "vexplain QUERIES select * from user", - "v3-plan": { - "QueryType": "EXPLAIN", - "Original": "vexplain QUERIES select * from user", - "Instructions": { - "OperatorType": "VEXPLAIN", - "Type": "queries", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user`", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "EXPLAIN", "Original": "vexplain QUERIES select * from user", "Instructions": { @@ -66,28 +45,7 @@ { "comment": "vexplain table", "query": "vexplain ALL select * from user", - "v3-plan": { - "QueryType": "EXPLAIN", - "Original": "vexplain ALL select * from user", - "Instructions": { - "OperatorType": "VEXPLAIN", - "Type": "all", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user`", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "EXPLAIN", "Original": "vexplain ALL select * from user", "Instructions": { diff --git a/go/vt/vtgate/planbuilder/testdata/view_cases.json b/go/vt/vtgate/planbuilder/testdata/view_cases.json index 5b5e76fe9ed..decc6a117cf 100644 --- a/go/vt/vtgate/planbuilder/testdata/view_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/view_cases.json @@ -26,22 +26,7 @@ { "comment": "create view with authoritative columns", "query": "create view user.view_ac as select * from authoritative", - "v3-plan": { - "QueryType": "DDL", - "Original": "create view user.view_ac as select * from authoritative", - "Instructions": { - "OperatorType": "DDL", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "Query": "create view view_ac as select * from authoritative" - }, - "TablesUsed": [ - "user.view_ac" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DDL", "Original": "create view user.view_ac as select * from authoritative", "Instructions": { @@ -107,4 +92,4 @@ "query": "drop view main.a, main.b, main.a", "plan": "VT03013: not unique table/alias: 'a'" } -] \ No newline at end of file +] diff --git a/go/vt/vtgate/planbuilder/testdata/vindex_func_cases.json b/go/vt/vtgate/planbuilder/testdata/vindex_func_cases.json index fa5198db7a9..320b5ae7bac 100644 --- a/go/vt/vtgate/planbuilder/testdata/vindex_func_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/vindex_func_cases.json @@ -2,33 +2,7 @@ { "comment": "vindex func read all cols", "query": "select id, keyspace_id, range_start, range_end, hex_keyspace_id, shard from user_index where id = :id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id, keyspace_id, range_start, range_end, hex_keyspace_id, shard from user_index where id = :id", - "Instructions": { - "OperatorType": "VindexFunc", - "Variant": "VindexMap", - "Columns": [ - 0, - 1, - 2, - 3, - 4, - 5 - ], - "Fields": { - "hex_keyspace_id": "VARBINARY", - "id": "VARBINARY", - "keyspace_id": "VARBINARY", - "range_end": "VARBINARY", - "range_start": "VARBINARY", - "shard": "VARBINARY" - }, - "Value": ":id", - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id, keyspace_id, range_start, range_end, hex_keyspace_id, shard from user_index where id = :id", "Instructions": { @@ -61,33 +35,7 @@ { "comment": "vindex func select *", "query": "select * from user_index where id = :id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user_index where id = :id", - "Instructions": { - "OperatorType": "VindexFunc", - "Variant": "VindexMap", - "Columns": [ - 0, - 1, - 2, - 3, - 4, - 5 - ], - "Fields": { - "hex_keyspace_id": "VARBINARY", - "id": "VARBINARY", - "keyspace_id": "VARBINARY", - "range_end": "VARBINARY", - "range_start": "VARBINARY", - "shard": "VARBINARY" - }, - "Value": ":id", - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from user_index where id = :id", "Instructions": { @@ -120,26 +68,7 @@ { "comment": "vindex func read with id repeated", "query": "select id, keyspace_id, id from user_index where id = :id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id, keyspace_id, id from user_index where id = :id", - "Instructions": { - "OperatorType": "VindexFunc", - "Variant": "VindexMap", - "Columns": [ - 0, - 1, - 0 - ], - "Fields": { - "id": "VARBINARY", - "keyspace_id": "VARBINARY" - }, - "Value": ":id", - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id, keyspace_id, id from user_index where id = :id", "Instructions": { @@ -179,26 +108,7 @@ { "comment": "disambiguated vindex reference", "query": "select id, keyspace_id, id from second_user.hash_dup where id = :id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id, keyspace_id, id from second_user.hash_dup where id = :id", - "Instructions": { - "OperatorType": "VindexFunc", - "Variant": "VindexMap", - "Columns": [ - 0, - 1, - 0 - ], - "Fields": { - "id": "VARBINARY", - "keyspace_id": "VARBINARY" - }, - "Value": ":id", - "Vindex": "hash_dup" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id, keyspace_id, id from second_user.hash_dup where id = :id", "Instructions": { @@ -233,42 +143,7 @@ { "comment": "You can even join with a vindexFunc primitive", "query": "select user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "TableName": "_unsharded", - "Inputs": [ - { - "OperatorType": "VindexFunc", - "Variant": "VindexMap", - "Columns": [ - 1 - ], - "Fields": { - "keyspace_id": "VARBINARY" - }, - "Value": ":id", - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded.id from unsharded where 1 != 1", - "Query": "select unsharded.id from unsharded", - "Table": "unsharded" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id", "Instructions": { @@ -311,42 +186,7 @@ { "comment": "Join vindexFunc on RHS", "query": "select user_index.keyspace_id, unsharded.id from unsharded join user_index where user_index.id = :id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user_index.keyspace_id, unsharded.id from unsharded join user_index where user_index.id = :id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0,L:0", - "TableName": "unsharded_", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded.id from unsharded where 1 != 1", - "Query": "select unsharded.id from unsharded", - "Table": "unsharded" - }, - { - "OperatorType": "VindexFunc", - "Variant": "VindexMap", - "Columns": [ - 1 - ], - "Fields": { - "keyspace_id": "VARBINARY" - }, - "Value": ":id", - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user_index.keyspace_id, unsharded.id from unsharded join user_index where user_index.id = :id", "Instructions": { @@ -389,47 +229,7 @@ { "comment": "Join with vindexFunc on a column of it, already present in select list", "query": "select user_index.id, user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id and unsharded.id = user_index.id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user_index.id, user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id and unsharded.id = user_index.id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1,R:0", - "JoinVars": { - "user_index_id": 0 - }, - "TableName": "_unsharded", - "Inputs": [ - { - "OperatorType": "VindexFunc", - "Variant": "VindexMap", - "Columns": [ - 0, - 1 - ], - "Fields": { - "id": "VARBINARY", - "keyspace_id": "VARBINARY" - }, - "Value": ":id", - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded.id from unsharded where 1 != 1", - "Query": "select unsharded.id from unsharded where unsharded.id = :user_index_id", - "Table": "unsharded" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user_index.id, user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id and unsharded.id = user_index.id", "Instructions": { @@ -477,47 +277,7 @@ { "comment": "Join with vindexFunc on a column of it, already present at the end of the select list", "query": "select user_index.keyspace_id, user_index.id, unsharded.id from user_index join unsharded where user_index.id = :id and unsharded.id = user_index.id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user_index.keyspace_id, user_index.id, unsharded.id from user_index join unsharded where user_index.id = :id and unsharded.id = user_index.id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1,R:0", - "JoinVars": { - "user_index_id": 1 - }, - "TableName": "_unsharded", - "Inputs": [ - { - "OperatorType": "VindexFunc", - "Variant": "VindexMap", - "Columns": [ - 1, - 0 - ], - "Fields": { - "id": "VARBINARY", - "keyspace_id": "VARBINARY" - }, - "Value": ":id", - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded.id from unsharded where 1 != 1", - "Query": "select unsharded.id from unsharded where unsharded.id = :user_index_id", - "Table": "unsharded" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user_index.keyspace_id, user_index.id, unsharded.id from user_index join unsharded where user_index.id = :id and unsharded.id = user_index.id", "Instructions": { @@ -565,47 +325,7 @@ { "comment": "Join with vindexFunc on a column of it, not present in select list", "query": "select user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id and unsharded.id = user_index.id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id and unsharded.id = user_index.id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "JoinVars": { - "user_index_id": 1 - }, - "TableName": "_unsharded", - "Inputs": [ - { - "OperatorType": "VindexFunc", - "Variant": "VindexMap", - "Columns": [ - 1, - 0 - ], - "Fields": { - "id": "VARBINARY", - "keyspace_id": "VARBINARY" - }, - "Value": ":id", - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded.id from unsharded where 1 != 1", - "Query": "select unsharded.id from unsharded where unsharded.id = :user_index_id", - "Table": "unsharded" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id and unsharded.id = user_index.id", "Instructions": { @@ -653,47 +373,7 @@ { "comment": "Join with aliased table name", "query": "select ui.keyspace_id, unsharded.id from user_index ui join unsharded where ui.id = :id and unsharded.id = ui.id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select ui.keyspace_id, unsharded.id from user_index ui join unsharded where ui.id = :id and unsharded.id = ui.id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "JoinVars": { - "ui_id": 1 - }, - "TableName": "_unsharded", - "Inputs": [ - { - "OperatorType": "VindexFunc", - "Variant": "VindexMap", - "Columns": [ - 1, - 0 - ], - "Fields": { - "id": "VARBINARY", - "keyspace_id": "VARBINARY" - }, - "Value": ":id", - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded.id from unsharded where 1 != 1", - "Query": "select unsharded.id from unsharded where unsharded.id = :ui_id", - "Table": "unsharded" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select ui.keyspace_id, unsharded.id from user_index ui join unsharded where ui.id = :id and unsharded.id = ui.id", "Instructions": { @@ -741,7 +421,6 @@ { "comment": "select none from user_index where id = :id", "query": "select none from user_index where id = :id", - "v3-plan": "VT03019: column `none` not found", - "gen4-plan": "column '`none`' not found in table 'user_index'" + "plan": "column '`none`' not found in table 'user_index'" } ] diff --git a/go/vt/vtgate/planbuilder/testdata/vschemas/schema.json b/go/vt/vtgate/planbuilder/testdata/vschemas/schema.json index 9f9ea97200f..ac88fea498d 100644 --- a/go/vt/vtgate/planbuilder/testdata/vschemas/schema.json +++ b/go/vt/vtgate/planbuilder/testdata/vschemas/schema.json @@ -602,6 +602,166 @@ ] } } + }, + "sharded_fk_allow": { + "sharded": true, + "foreignKeyMode": "managed", + "vindexes": { + "hash_vin": { + "type": "hash_test" + }, + "multicolIdx": { + "type": "multiCol_test" + } + }, + "tables": { + "multicol_tbl1": { + "column_vindexes": [ + { + "columns": [ + "cola", + "colb", + "colc" + ], + "name": "multicolIdx" + } + ] + }, + "multicol_tbl2": { + "column_vindexes": [ + { + "columns": [ + "cola", + "colb", + "colc" + ], + "name": "multicolIdx" + } + ] + }, + "tbl1": { + "column_vindexes": [ + { + "column": "col1", + "name": "hash_vin" + } + ] + }, + "tbl2": { + "column_vindexes": [ + { + "column": "col2", + "name": "hash_vin" + } + ] + }, + "tbl3": { + "column_vindexes": [ + { + "column": "col3", + "name": "hash_vin" + } + ] + }, + "tbl4": { + "column_vindexes": [ + { + "column": "col4", + "name": "hash_vin" + } + ] + }, + "tbl5": { + "column_vindexes": [ + { + "column": "col5", + "name": "hash_vin" + } + ] + }, + "tbl6": { + "column_vindexes": [ + { + "column": "col6", + "name": "hash_vin" + } + ] + }, + "tbl7": { + "column_vindexes": [ + { + "column": "col7", + "name": "hash_vin" + } + ] + }, + "tbl8": { + "column_vindexes": [ + { + "column": "col8", + "name": "hash_vin" + } + ] + }, + "tbl9": { + "column_vindexes": [ + { + "column": "col9", + "name": "hash_vin" + } + ] + }, + "tbl10": { + "column_vindexes": [ + { + "column": "sk", + "name": "hash_vin" + } + ] + }, + "tblrefDef": { + "column_vindexes": [ + { + "column": "ref", + "name": "hash_vin" + } + ] + }, + "tbl20": { + "column_vindexes": [ + { + "column": "col", + "name": "hash_vin" + } + ] + }, + "s_tbl": { + "column_vindexes": [ + { + "column": "col", + "name": "hash_vin" + } + ] + } + } + }, + "unsharded_fk_allow": { + "foreignKeyMode": "managed", + "tables": { + "u_tbl1": {}, + "u_tbl2": {}, + "u_tbl3": {}, + "u_tbl4": {}, + "u_tbl5": {}, + "u_tbl6": {}, + "u_tbl7": {}, + "u_tbl8": {}, + "u_tbl9": {}, + "u_tbl": {}, + "u_multicol_tbl1": {}, + "u_multicol_tbl2": {}, + "u_multicol_tbl3": {} + } } } } diff --git a/go/vt/vtgate/planbuilder/testdata/wireup_cases.json b/go/vt/vtgate/planbuilder/testdata/wireup_cases.json index b9984923292..7f749278aaa 100644 --- a/go/vt/vtgate/planbuilder/testdata/wireup_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/wireup_cases.json @@ -2,44 +2,7 @@ { "comment": "join on having clause", "query": "select e.col, u.id uid, e.id eid from user u join user_extra e having uid = eid", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select e.col, u.id uid, e.id eid from user u join user_extra e having uid = eid", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0,L:0,R:1", - "JoinVars": { - "uid": 0 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.id as uid from `user` as u where 1 != 1", - "Query": "select u.id as uid from `user` as u", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select e.col, e.id as eid from user_extra as e where 1 != 1", - "Query": "select e.col, e.id as eid from user_extra as e having eid = :uid", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select e.col, u.id uid, e.id eid from user u join user_extra e having uid = eid", "Instructions": { @@ -88,44 +51,7 @@ { "comment": "bind var already in use", "query": "select e.col, u.id uid, e.id eid from user u join user_extra e having uid = eid and e.col = :uid", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select e.col, u.id uid, e.id eid from user u join user_extra e having uid = eid and e.col = :uid", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0,L:0,R:1", - "JoinVars": { - "uid1": 0 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.id as uid from `user` as u where 1 != 1", - "Query": "select u.id as uid from `user` as u", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select e.col, e.id as eid from user_extra as e where 1 != 1", - "Query": "select e.col, e.id as eid from user_extra as e having eid = :uid1 and e.col = :uid", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select e.col, u.id uid, e.id eid from user u join user_extra e having uid = eid and e.col = :uid", "Instructions": { @@ -174,63 +100,7 @@ { "comment": "wire-up join with join, going left", "query": "select u1.id from user u1 join user u2 join user u3 where u3.col = u1.col", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u1.id from user u1 join user u2 join user u3 where u3.col = u1.col", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "JoinVars": { - "u1_col": 1 - }, - "TableName": "`user`_`user`_`user`", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1", - "TableName": "`user`_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u1.id, u1.col from `user` as u1 where 1 != 1", - "Query": "select u1.id, u1.col from `user` as u1", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` as u2 where 1 != 1", - "Query": "select 1 from `user` as u2", - "Table": "`user`" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` as u3 where 1 != 1", - "Query": "select 1 from `user` as u3 where u3.col = :u1_col", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u1.id from user u1 join user u2 join user u3 where u3.col = u1.col", "Instructions": { @@ -293,63 +163,7 @@ { "comment": "wire-up join with join, going left, then right", "query": "select u1.id from user u1 join user u2 join user u3 where u3.col = u2.col", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u1.id from user u1 join user u2 join user u3 where u3.col = u2.col", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "JoinVars": { - "u2_col": 1 - }, - "TableName": "`user`_`user`_`user`", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "TableName": "`user`_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u1.id from `user` as u1 where 1 != 1", - "Query": "select u1.id from `user` as u1", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u2.col from `user` as u2 where 1 != 1", - "Query": "select u2.col from `user` as u2", - "Table": "`user`" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` as u3 where 1 != 1", - "Query": "select 1 from `user` as u3 where u3.col = :u2_col", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u1.id from user u1 join user u2 join user u3 where u3.col = u2.col", "Instructions": { @@ -411,66 +225,7 @@ { "comment": "wire-up join with join, reuse existing result from a lower join", "query": "select u1.id from user u1 join user u2 on u2.col = u1.col join user u3 where u3.col = u1.col", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u1.id from user u1 join user u2 on u2.col = u1.col join user u3 where u3.col = u1.col", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "JoinVars": { - "u1_col": 1 - }, - "TableName": "`user`_`user`_`user`", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1", - "JoinVars": { - "u1_col": 1 - }, - "TableName": "`user`_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u1.id, u1.col from `user` as u1 where 1 != 1", - "Query": "select u1.id, u1.col from `user` as u1", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` as u2 where 1 != 1", - "Query": "select 1 from `user` as u2 where u2.col = :u1_col", - "Table": "`user`" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` as u3 where 1 != 1", - "Query": "select 1 from `user` as u3 where u3.col = :u1_col", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u1.id from user u1 join user u2 on u2.col = u1.col join user u3 where u3.col = u1.col", "Instructions": { @@ -536,89 +291,7 @@ { "comment": "wire-up join with join, reuse existing result from a lower join.\n# You need two levels of join nesting to test this: when u3 requests\n# col from u1, the u1-u2 joins exports the column to u2-u3. When\n# u4 requests it, it should be reused from the u1-u2 join.", "query": "select u1.id from user u1 join user u2 join user u3 on u3.id = u1.col join user u4 where u4.col = u1.col", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u1.id from user u1 join user u2 join user u3 on u3.id = u1.col join user u4 where u4.col = u1.col", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "JoinVars": { - "u1_col": 1 - }, - "TableName": "`user`_`user`_`user`_`user`", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1", - "JoinVars": { - "u1_col": 1 - }, - "TableName": "`user`_`user`_`user`", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1", - "TableName": "`user`_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u1.id, u1.col from `user` as u1 where 1 != 1", - "Query": "select u1.id, u1.col from `user` as u1", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` as u2 where 1 != 1", - "Query": "select 1 from `user` as u2", - "Table": "`user`" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` as u3 where 1 != 1", - "Query": "select 1 from `user` as u3 where u3.id = :u1_col", - "Table": "`user`", - "Values": [ - ":u1_col" - ], - "Vindex": "user_index" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` as u4 where 1 != 1", - "Query": "select 1 from `user` as u4 where u4.col = :u1_col", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u1.id from user u1 join user u2 join user u3 on u3.id = u1.col join user u4 where u4.col = u1.col", "Instructions": { @@ -707,70 +380,7 @@ { "comment": "Test reuse of join var already being supplied to the right of a node.", "query": "select u1.id from user u1 join (user u2 join user u3) where u2.id = u1.col and u3.id = u1.col", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u1.id from user u1 join (user u2 join user u3) where u2.id = u1.col and u3.id = u1.col", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "JoinVars": { - "u1_col": 1 - }, - "TableName": "`user`_`user`_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u1.id, u1.col from `user` as u1 where 1 != 1", - "Query": "select u1.id, u1.col from `user` as u1", - "Table": "`user`" - }, - { - "OperatorType": "Join", - "Variant": "Join", - "TableName": "`user`_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` as u2 where 1 != 1", - "Query": "select 1 from `user` as u2 where u2.id = :u1_col", - "Table": "`user`", - "Values": [ - ":u1_col" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` as u3 where 1 != 1", - "Query": "select 1 from `user` as u3 where u3.id = :u1_col", - "Table": "`user`", - "Values": [ - ":u1_col" - ], - "Vindex": "user_index" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u1.id from user u1 join (user u2 join user u3) where u2.id = u1.col and u3.id = u1.col", "Instructions": { @@ -844,44 +454,7 @@ { "comment": "Join on weird columns.", "query": "select `weird``name`.a, unsharded.b from `weird``name` join unsharded on `weird``name`.`a``b*c` = unsharded.id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select `weird``name`.a, unsharded.b from `weird``name` join unsharded on `weird``name`.`a``b*c` = unsharded.id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "JoinVars": { - "weird_name_a_b_c": 1 - }, - "TableName": "`weird``name`_unsharded", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `weird``name`.a, `weird``name`.`a``b*c` from `weird``name` where 1 != 1", - "Query": "select `weird``name`.a, `weird``name`.`a``b*c` from `weird``name`", - "Table": "`weird``name`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded.b from unsharded where 1 != 1", - "Query": "select unsharded.b from unsharded where unsharded.id = :weird_name_a_b_c", - "Table": "unsharded" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select `weird``name`.a, unsharded.b from `weird``name` join unsharded on `weird``name`.`a``b*c` = unsharded.id", "Instructions": { @@ -930,44 +503,7 @@ { "comment": "Join on weird column (col is not in select)", "query": "select unsharded.b from `weird``name` join unsharded on `weird``name`.`a``b*c` = unsharded.id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select unsharded.b from `weird``name` join unsharded on `weird``name`.`a``b*c` = unsharded.id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "JoinVars": { - "weird_name_a_b_c": 0 - }, - "TableName": "`weird``name`_unsharded", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `weird``name`.`a``b*c` from `weird``name` where 1 != 1", - "Query": "select `weird``name`.`a``b*c` from `weird``name`", - "Table": "`weird``name`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded.b from unsharded where 1 != 1", - "Query": "select unsharded.b from unsharded where unsharded.id = :weird_name_a_b_c", - "Table": "unsharded" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select unsharded.b from `weird``name` join unsharded on `weird``name`.`a``b*c` = unsharded.id", "Instructions": { @@ -975,91 +511,48 @@ "Variant": "Join", "JoinColumnIndexes": "L:0", "JoinVars": { - "unsharded_id": 1 - }, - "TableName": "unsharded_`weird``name`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded.b, unsharded.id from unsharded where 1 != 1", - "Query": "select unsharded.b, unsharded.id from unsharded", - "Table": "unsharded" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `weird``name` where 1 != 1", - "Query": "select 1 from `weird``name` where `weird``name`.`a``b*c` = :unsharded_id", - "Table": "`weird``name`", - "Values": [ - ":unsharded_id" - ], - "Vindex": "user_index" - } - ] - }, - "TablesUsed": [ - "main.unsharded", - "user.weird`name" - ] - } - }, - { - "comment": "wire-up with limit primitive", - "query": "select u.id, e.id from user u join user_extra e where e.id = u.col limit 10", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u.id, e.id from user u join user_extra e where e.id = u.col limit 10", - "Instructions": { - "OperatorType": "Limit", - "Count": "INT64(10)", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "JoinVars": { - "u_col": 1 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.id, u.col from `user` as u where 1 != 1", - "Query": "select u.id, u.col from `user` as u", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select e.id from user_extra as e where 1 != 1", - "Query": "select e.id from user_extra as e where e.id = :u_col", - "Table": "user_extra" - } - ] + "unsharded_id": 1 + }, + "TableName": "unsharded_`weird``name`", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select unsharded.b, unsharded.id from unsharded where 1 != 1", + "Query": "select unsharded.b, unsharded.id from unsharded", + "Table": "unsharded" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from `weird``name` where 1 != 1", + "Query": "select 1 from `weird``name` where `weird``name`.`a``b*c` = :unsharded_id", + "Table": "`weird``name`", + "Values": [ + ":unsharded_id" + ], + "Vindex": "user_index" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "main.unsharded", + "user.weird`name" + ] + } + }, + { + "comment": "wire-up with limit primitive", + "query": "select u.id, e.id from user u join user_extra e where e.id = u.col limit 10", + "plan": { "QueryType": "SELECT", "Original": "select u.id, e.id from user u join user_extra e where e.id = u.col limit 10", "Instructions": { @@ -1109,28 +602,30 @@ }, { "comment": "Wire-up in subquery", - "query": "select 1 from user where id in (select u.id, e.id from user u join user_extra e where e.id = u.col limit 10)", - "v3-plan": { + "query": "select 1 from user where id in (select u.id+e.id from user u join user_extra e where e.id = u.col limit 10)", + "plan": { "QueryType": "SELECT", - "Original": "select 1 from user where id in (select u.id, e.id from user u join user_extra e where e.id = u.col limit 10)", + "Original": "select 1 from user where id in (select u.id+e.id from user u join user_extra e where e.id = u.col limit 10)", "Instructions": { - "OperatorType": "Subquery", + "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutIn", "PulloutVars": [ - "__sq_has_values1", + "__sq_has_values", "__sq1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Limit", "Count": "INT64(10)", "Inputs": [ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", + "JoinColumnIndexes": "R:0", "JoinVars": { - "u_col": 1 + "u_col": 1, + "u_id": 0 }, "TableName": "`user`_user_extra", "Inputs": [ @@ -1152,76 +647,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select e.id from user_extra as e where 1 != 1", - "Query": "select e.id from user_extra as e where e.id = :u_col", - "Table": "user_extra" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user` where :__sq_has_values1 = 1 and id in ::__vals", - "Table": "`user`", - "Values": [ - "::__sq1" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select 1 from user where id in (select u.id, e.id from user u join user_extra e where e.id = u.col limit 10)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Limit", - "Count": "INT64(10)", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:1,R:0", - "JoinVars": { - "u_col": 0 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.col, u.id from `user` as u where 1 != 1", - "Query": "select u.col, u.id from `user` as u", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select e.id from user_extra as e where 1 != 1", - "Query": "select e.id from user_extra as e where e.id = :u_col", + "FieldQuery": "select :u_id + e.id as `u.id + e.id` from user_extra as e where 1 != 1", + "Query": "select :u_id + e.id as `u.id + e.id` from user_extra as e where e.id = :u_col", "Table": "user_extra" } ] @@ -1229,6 +656,7 @@ ] }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "IN", "Keyspace": { @@ -1236,7 +664,7 @@ "Sharded": true }, "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user` where :__sq_has_values1 = 1 and id in ::__vals", + "Query": "select 1 from `user` where :__sq_has_values and id in ::__vals", "Table": "`user`", "Values": [ "::__sq1" @@ -1254,7 +682,7 @@ { "comment": "Wire-up in underlying primitive after pullout", "query": "select u.id, e.id, (select col from user) from user u join user_extra e where e.id = u.col limit 10", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.id, e.id, (select col from user) from user u join user_extra e where e.id = u.col limit 10", "Instructions": { @@ -1262,76 +690,47 @@ "Count": "INT64(10)", "Inputs": [ { - "OperatorType": "Subquery", - "Variant": "PulloutValue", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,L:1", + "JoinVars": { + "u_col": 2 + }, + "TableName": "`user`_user_extra", "Inputs": [ { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0,L:1", - "JoinVars": { - "u_col": 2 - }, - "TableName": "`user`_user_extra", + "OperatorType": "UncorrelatedSubquery", + "Variant": "PulloutValue", + "PulloutVars": [ + "__sq1" + ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select u.id, :__sq1, u.col from `user` as u where 1 != 1", - "Query": "select u.id, :__sq1, u.col from `user` as u", + "FieldQuery": "select col from `user` where 1 != 1", + "Query": "select col from `user`", "Table": "`user`" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select e.id from user_extra as e where 1 != 1", - "Query": "select e.id from user_extra as e where e.id = :u_col", - "Table": "user_extra" + "FieldQuery": "select u.id, :__sq1 as `(select col from ``user``)`, u.col from `user` as u where 1 != 1", + "Query": "select u.id, :__sq1 as `(select col from ``user``)`, u.col from `user` as u", + "Table": "`user`" } ] - } - ] - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select u.id, e.id, (select col from user) from user u join user_extra e where e.id = u.col limit 10", - "Instructions": { - "OperatorType": "Limit", - "Count": "INT64(10)", - "Inputs": [ - { - "OperatorType": "Subquery", - "Variant": "PulloutValue", - "PulloutVars": [ - "__sq1" - ], - "Inputs": [ + }, { "OperatorType": "Route", "Variant": "Scatter", @@ -1339,42 +738,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:1,R:0,L:2", - "JoinVars": { - "u_col": 0 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.col, u.id, :__sq1 from `user` as u where 1 != 1", - "Query": "select u.col, u.id, :__sq1 from `user` as u", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select e.id from user_extra as e where 1 != 1", - "Query": "select e.id from user_extra as e where e.id = :u_col", - "Table": "user_extra" - } - ] + "FieldQuery": "select e.id from user_extra as e where 1 != 1", + "Query": "select e.id from user_extra as e where e.id = :u_col", + "Table": "user_extra" } ] } @@ -1389,26 +755,7 @@ { "comment": "Invalid value in IN clause", "query": "select id from user where id in (18446744073709551616, 1)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where id in (18446744073709551616, 1)", - "Instructions": { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where id in ::__vals", - "Table": "`user`", - "Values": [ - "(DECIMAL(18446744073709551616), INT64(1))" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where id in (18446744073709551616, 1)", "Instructions": { @@ -1434,45 +781,7 @@ { "comment": "Invalid value in IN clause from LHS of join", "query": "select u1.id from user u1 join user u2 where u1.id = 18446744073709551616", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u1.id from user u1 join user u2 where u1.id = 18446744073709551616", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u1.id from `user` as u1 where 1 != 1", - "Query": "select u1.id from `user` as u1 where u1.id = 18446744073709551616", - "Table": "`user`", - "Values": [ - "DECIMAL(18446744073709551616)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` as u2 where 1 != 1", - "Query": "select 1 from `user` as u2", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u1.id from user u1 join user u2 where u1.id = 18446744073709551616", "Instructions": { @@ -1517,45 +826,7 @@ { "comment": "Invalid value in IN clause from RHS of join", "query": "select u1.id from user u1 join user u2 where u2.id = 18446744073709551616", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u1.id from user u1 join user u2 where u2.id = 18446744073709551616", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u1.id from `user` as u1 where 1 != 1", - "Query": "select u1.id from `user` as u1", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` as u2 where 1 != 1", - "Query": "select 1 from `user` as u2 where u2.id = 18446744073709551616", - "Table": "`user`", - "Values": [ - "DECIMAL(18446744073709551616)" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u1.id from user u1 join user u2 where u2.id = 18446744073709551616", "Instructions": { @@ -1598,11 +869,11 @@ } }, { - "comment": "derived table with column aliases not supported by v3, but planner is overridden with hint", - "query": "select /*vt+ PLANNER=gen4 */ u.a from (select id as b, name from user) u(a, n) where u.n = 1", + "comment": "derived table with column aliases", + "query": "select u.a from (select id as b, name from user) u(a, n) where u.n = 1", "plan": { "QueryType": "SELECT", - "Original": "select /*vt+ PLANNER=gen4 */ u.a from (select id as b, name from user) u(a, n) where u.n = 1", + "Original": "select u.a from (select id as b, name from user) u(a, n) where u.n = 1", "Instructions": { "OperatorType": "VindexLookup", "Variant": "Equal", @@ -1638,7 +909,7 @@ "Sharded": true }, "FieldQuery": "select u.a from (select id as b, `name` from `user` where 1 != 1) as u(a, n) where 1 != 1", - "Query": "select /*vt+ PLANNER=gen4 */ u.a from (select id as b, `name` from `user` where `name` = 1) as u(a, n)", + "Query": "select u.a from (select id as b, `name` from `user` where `name` = 1) as u(a, n)", "Table": "`user`" } ] @@ -1648,11 +919,6 @@ ] } }, - { - "comment": "derived table with column aliases not supported by v3, but planner is overridden with hint", - "query": "select /*vt+ PLANNER=v3 */ u.a from (select id as b, name from user) u(a, n) where u.n = 1", - "plan": "VT12001: unsupported: column aliases in derived table" - }, { "comment": "Three-way join using the left2right. The normal gen4 planner would merge m1 and m2 first, but the left to right doesnt", "query": "select /*vt+ PLANNER=left2right */ user.col from user join unsharded as m1 join unsharded as m2", diff --git a/go/vt/vtgate/planbuilder/uncorrelated_subquery.go b/go/vt/vtgate/planbuilder/uncorrelated_subquery.go new file mode 100644 index 00000000000..451b89fc780 --- /dev/null +++ b/go/vt/vtgate/planbuilder/uncorrelated_subquery.go @@ -0,0 +1,90 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package planbuilder + +import ( + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/engine" + popcode "vitess.io/vitess/go/vt/vtgate/engine/opcode" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" +) + +var _ logicalPlan = (*uncorrelatedSubquery)(nil) + +// uncorrelatedSubquery is the logicalPlan for engine.UncorrelatedSubquery. +// This gets built if a subquery is not correlated and can +// therefore can be pulled out and executed upfront. +type uncorrelatedSubquery struct { + subquery logicalPlan + outer logicalPlan + eSubquery *engine.UncorrelatedSubquery +} + +// newUncorrelatedSubquery builds a new uncorrelatedSubquery. +func newUncorrelatedSubquery(opcode popcode.PulloutOpcode, sqName, hasValues string, subquery, outer logicalPlan) *uncorrelatedSubquery { + return &uncorrelatedSubquery{ + subquery: subquery, + outer: outer, + eSubquery: &engine.UncorrelatedSubquery{ + Opcode: opcode, + SubqueryResult: sqName, + HasValues: hasValues, + }, + } +} + +// Primitive implements the logicalPlan interface +func (ps *uncorrelatedSubquery) Primitive() engine.Primitive { + ps.eSubquery.Subquery = ps.subquery.Primitive() + ps.eSubquery.Outer = ps.outer.Primitive() + return ps.eSubquery +} + +// Wireup implements the logicalPlan interface +func (ps *uncorrelatedSubquery) Wireup(ctx *plancontext.PlanningContext) error { + if err := ps.outer.Wireup(ctx); err != nil { + return err + } + return ps.subquery.Wireup(ctx) +} + +// Rewrite implements the logicalPlan interface +func (ps *uncorrelatedSubquery) Rewrite(inputs ...logicalPlan) error { + if len(inputs) != 2 { + return vterrors.VT13001("uncorrelatedSubquery: wrong number of inputs") + } + ps.outer = inputs[0] + ps.subquery = inputs[1] + return nil +} + +// ContainsTables implements the logicalPlan interface +func (ps *uncorrelatedSubquery) ContainsTables() semantics.TableSet { + return ps.outer.ContainsTables().Merge(ps.subquery.ContainsTables()) +} + +// Inputs implements the logicalPlan interface +func (ps *uncorrelatedSubquery) Inputs() []logicalPlan { + return []logicalPlan{ps.outer, ps.subquery} +} + +// OutputColumns implements the logicalPlan interface +func (ps *uncorrelatedSubquery) OutputColumns() []sqlparser.SelectExpr { + return ps.outer.OutputColumns() +} diff --git a/go/vt/vtgate/planbuilder/union.go b/go/vt/vtgate/planbuilder/union.go deleted file mode 100644 index 22dba1aa236..00000000000 --- a/go/vt/vtgate/planbuilder/union.go +++ /dev/null @@ -1,146 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "fmt" - - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - - "vitess.io/vitess/go/vt/vterrors" - - "vitess.io/vitess/go/mysql" - - "vitess.io/vitess/go/vt/sqlparser" -) - -func buildUnionPlan(string) stmtPlanner { - return func(stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema) (*planResult, error) { - union := stmt.(*sqlparser.Union) - if union.With != nil { - return nil, vterrors.VT12001("WITH expression in UNION statement") - } - err := checkUnsupportedExpressions(union) - if err != nil { - return nil, err - } - // For unions, create a pb with anonymous scope. - pb := newPrimitiveBuilder(vschema, newJointab(reservedVars)) - if err := pb.processUnion(union, reservedVars, nil); err != nil { - return nil, err - } - if err := pb.plan.Wireup(pb.plan, pb.jt); err != nil { - return nil, err - } - return newPlanResult(pb.plan.Primitive()), nil - } -} - -func (pb *primitiveBuilder) processUnion(union *sqlparser.Union, reservedVars *sqlparser.ReservedVars, outer *symtab) error { - if err := pb.processPart(union.Left, reservedVars, outer); err != nil { - return err - } - - rpb := newPrimitiveBuilder(pb.vschema, pb.jt) - if err := rpb.processPart(union.Right, reservedVars, outer); err != nil { - return err - } - err := unionRouteMerge(pb.plan, rpb.plan, union) - if err != nil { - // we are merging between two routes - let's check if we can see so that we have the same amount of columns on both sides of the union - lhsCols := len(pb.plan.ResultColumns()) - rhsCols := len(rpb.plan.ResultColumns()) - if lhsCols != rhsCols { - return &mysql.SQLError{ - Num: mysql.ERWrongNumberOfColumnsInSelect, - State: "21000", - Message: "The used SELECT statements have a different number of columns", - Query: sqlparser.String(union), - } - } - - pb.plan = &concatenate{ - lhs: pb.plan, - rhs: rpb.plan, - } - - if union.Distinct { - pb.plan = newDistinctV3(pb.plan) - } - } - pb.st.Outer = outer - - if err := setLock(pb.plan, union.Lock); err != nil { - return err - } - - if err := pb.pushOrderBy(union.OrderBy); err != nil { - return err - } - return pb.pushLimit(union.Limit) -} - -func (pb *primitiveBuilder) processPart(part sqlparser.SelectStatement, reservedVars *sqlparser.ReservedVars, outer *symtab) error { - switch part := part.(type) { - case *sqlparser.Union: - return pb.processUnion(part, reservedVars, outer) - case *sqlparser.Select: - if part.SQLCalcFoundRows { - return vterrors.VT12001("SQL_CALC_FOUND_ROWS not supported with UNION") - } - return pb.processSelect(part, reservedVars, outer, "") - } - return vterrors.VT13001(fmt.Sprintf("unexpected SELECT type: %T", part)) -} - -// TODO (systay) we never use this as an actual error. we should rethink the return type -func unionRouteMerge(left, right logicalPlan, us *sqlparser.Union) error { - lroute, ok := left.(*route) - if !ok { - return vterrors.VT12001("SELECT of UNION is non-trivial") - } - rroute, ok := right.(*route) - if !ok { - return vterrors.VT12001("SELECT of UNION is non-trivial") - } - mergeSuccess := lroute.MergeUnion(rroute, us.Distinct) - if !mergeSuccess { - return vterrors.VT12001("execute UNION as a single route") - } - - lroute.Select = &sqlparser.Union{Left: lroute.Select, Right: us.Right, Distinct: us.Distinct} - - return nil -} - -// planLock pushes "FOR UPDATE", "LOCK IN SHARE MODE" down to all routes -func setLock(in logicalPlan, lock sqlparser.Lock) error { - _, err := visit(in, func(plan logicalPlan) (bool, logicalPlan, error) { - switch node := in.(type) { - case *route: - node.Select.SetLock(lock) - return false, node, nil - case *sqlCalcFoundRows, *vindexFunc: - return false, nil, vterrors.VT13001(fmt.Sprintf("unreachable %T.locking", in)) - } - return true, plan, nil - }) - if err != nil { - return err - } - return nil -} diff --git a/go/vt/vtgate/planbuilder/update.go b/go/vt/vtgate/planbuilder/update.go new file mode 100644 index 00000000000..aed62f2b1d5 --- /dev/null +++ b/go/vt/vtgate/planbuilder/update.go @@ -0,0 +1,150 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package planbuilder + +import ( + querypb "vitess.io/vitess/go/vt/proto/query" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +func gen4UpdateStmtPlanner( + version querypb.ExecuteOptions_PlannerVersion, + updStmt *sqlparser.Update, + reservedVars *sqlparser.ReservedVars, + vschema plancontext.VSchema, +) (*planResult, error) { + if updStmt.With != nil { + return nil, vterrors.VT12001("WITH expression in UPDATE statement") + } + + ctx, err := plancontext.CreatePlanningContext(updStmt, reservedVars, vschema, version) + if err != nil { + return nil, err + } + + err = rewriteRoutedTables(updStmt, vschema) + if err != nil { + return nil, err + } + + if ks, tables := ctx.SemTable.SingleUnshardedKeyspace(); ks != nil { + if fkManagementNotRequiredForUpdate(ctx, tables, updStmt.Exprs) { + plan := updateUnshardedShortcut(updStmt, ks, tables) + plan = pushCommentDirectivesOnPlan(plan, updStmt) + return newPlanResult(plan.Primitive(), operators.QualifiedTables(ks, tables)...), nil + } + } + + if ctx.SemTable.NotUnshardedErr != nil { + return nil, ctx.SemTable.NotUnshardedErr + } + + err = queryRewrite(ctx.SemTable, reservedVars, updStmt) + if err != nil { + return nil, err + } + + op, err := operators.PlanQuery(ctx, updStmt) + if err != nil { + return nil, err + } + + plan, err := transformToLogicalPlan(ctx, op) + if err != nil { + return nil, err + } + + plan = pushCommentDirectivesOnPlan(plan, updStmt) + + setLockOnAllSelect(plan) + + if err := plan.Wireup(ctx); err != nil { + return nil, err + } + + return newPlanResult(plan.Primitive(), operators.TablesUsed(op)...), nil +} + +// TODO: Handle all this in semantic analysis. +func fkManagementNotRequiredForUpdate(ctx *plancontext.PlanningContext, vTables []*vindexes.Table, updateExprs sqlparser.UpdateExprs) bool { + childFkMap := make(map[string][]vindexes.ChildFKInfo) + + // Find the foreign key mode and check for any managed child foreign keys. + for _, vTable := range vTables { + ksMode, err := ctx.VSchema.ForeignKeyMode(vTable.Keyspace.Name) + if err != nil { + return false + } + if ksMode != vschemapb.Keyspace_managed { + continue + } + childFks := vTable.ChildFKsNeedsHandling(ctx.VerifyAllFKs, vindexes.UpdateAction) + if len(childFks) > 0 { + childFkMap[vTable.String()] = childFks + } + } + + getFKInfo := func(expr *sqlparser.UpdateExpr) ([]vindexes.ParentFKInfo, []vindexes.ChildFKInfo) { + tblInfo, err := ctx.SemTable.TableInfoForExpr(expr.Name) + if err != nil { + return nil, nil + } + vTable := tblInfo.GetVindexTable() + return vTable.ParentForeignKeys, childFkMap[vTable.String()] + } + + // Check if any column in the parent table is being updated which has a child foreign key. + return !columnModified(updateExprs, getFKInfo) +} + +// columnModified checks if any column in the parent table is being updated which has a child foreign key. +func columnModified(exprs sqlparser.UpdateExprs, getFks func(expr *sqlparser.UpdateExpr) ([]vindexes.ParentFKInfo, []vindexes.ChildFKInfo)) bool { + for _, updateExpr := range exprs { + parentFKs, childFks := getFks(updateExpr) + for _, childFk := range childFks { + if childFk.ParentColumns.FindColumn(updateExpr.Name.Name) >= 0 { + return true + } + } + if sqlparser.IsNull(updateExpr.Expr) { + continue + } + for _, parentFk := range parentFKs { + if parentFk.ChildColumns.FindColumn(updateExpr.Name.Name) >= 0 { + return true + } + } + } + return false +} + +func updateUnshardedShortcut(stmt *sqlparser.Update, ks *vindexes.Keyspace, tables []*vindexes.Table) logicalPlan { + edml := engine.NewDML() + edml.Keyspace = ks + edml.Opcode = engine.Unsharded + edml.Query = generateQuery(stmt) + for _, tbl := range tables { + edml.TableNames = append(edml.TableNames, tbl.Name.String()) + } + return &primitiveWrapper{prim: &engine.Update{DML: edml}} +} diff --git a/go/vt/vtgate/planbuilder/update_planner.go b/go/vt/vtgate/planbuilder/update_planner.go deleted file mode 100644 index cf9e8288745..00000000000 --- a/go/vt/vtgate/planbuilder/update_planner.go +++ /dev/null @@ -1,165 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "fmt" - - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - - "vitess.io/vitess/go/vt/vtgate/vindexes" -) - -// buildUpdatePlan returns a stmtPlanner that builds the instructions for an UPDATE statement. -func buildUpdatePlan(string) stmtPlanner { - return func(stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema) (*planResult, error) { - upd := stmt.(*sqlparser.Update) - if upd.With != nil { - return nil, vterrors.VT12001("WITH expression in UPDATE statement") - } - err := checkUnsupportedExpressions(upd) - if err != nil { - return nil, err - } - dml, tables, ksidVindex, err := buildDMLPlan(vschema, "update", stmt, reservedVars, upd.TableExprs, upd.Where, upd.OrderBy, upd.Limit, upd.Comments, upd.Exprs) - if err != nil { - return nil, err - } - eupd := &engine.Update{DML: dml} - - if dml.Opcode == engine.Unsharded { - return newPlanResult(eupd, tables...), nil - } - eupdTable, err := eupd.GetSingleTable() - if err != nil { - return nil, err - } - cvv, ovq, err := buildChangedVindexesValues(upd, eupdTable, ksidVindex.Columns) - if err != nil { - return nil, err - } - eupd.ChangedVindexValues = cvv - eupd.OwnedVindexQuery = ovq - if len(eupd.ChangedVindexValues) != 0 { - eupd.KsidVindex = ksidVindex.Vindex - eupd.KsidLength = len(ksidVindex.Columns) - } - return newPlanResult(eupd, tables...), nil - } -} - -// buildChangedVindexesValues adds to the plan all the lookup vindexes that are changing. -// Updates can only be performed to secondary lookup vindexes with no complex expressions -// in the set clause. -func buildChangedVindexesValues(update *sqlparser.Update, table *vindexes.Table, ksidCols []sqlparser.IdentifierCI) (map[string]*engine.VindexValues, string, error) { - changedVindexes := make(map[string]*engine.VindexValues) - buf, offset := initialQuery(ksidCols, table) - for i, vindex := range table.ColumnVindexes { - vindexValueMap := make(map[string]evalengine.Expr) - first := true - for _, vcol := range vindex.Columns { - // Searching in order of columns in colvindex. - found := false - for _, assignment := range update.Exprs { - if !vcol.Equal(assignment.Name.Name) { - continue - } - if found { - return nil, "", vterrors.VT03015(assignment.Name.Name) - } - found = true - pv, err := extractValueFromUpdate(assignment) - if err != nil { - return nil, "", err - } - vindexValueMap[vcol.String()] = pv - if first { - buf.Myprintf(", %v", assignment) - first = false - } else { - buf.Myprintf(" and %v", assignment) - } - } - } - if len(vindexValueMap) == 0 { - // Vindex not changing, continue - continue - } - - if update.Limit != nil && len(update.OrderBy) == 0 { - return nil, "", vterrors.VT12001(fmt.Sprintf("need to provide ORDER BY clause when using LIMIT; invalid update on vindex: %v", vindex.Name)) - } - if i == 0 { - return nil, "", vterrors.VT12001(fmt.Sprintf("you cannot update primary vindex columns; invalid update on vindex: %v", vindex.Name)) - } - if _, ok := vindex.Vindex.(vindexes.Lookup); !ok { - return nil, "", vterrors.VT12001(fmt.Sprintf("you can only update lookup vindexes; invalid update on vindex: %v", vindex.Name)) - } - changedVindexes[vindex.Name] = &engine.VindexValues{ - PvMap: vindexValueMap, - Offset: offset, - } - offset++ - } - if len(changedVindexes) == 0 { - return nil, "", nil - } - // generate rest of the owned vindex query. - aTblExpr, ok := update.TableExprs[0].(*sqlparser.AliasedTableExpr) - if !ok { - return nil, "", vterrors.VT12001("UPDATE on complex table expression") - } - tblExpr := &sqlparser.AliasedTableExpr{Expr: sqlparser.TableName{Name: table.Name}, As: aTblExpr.As} - buf.Myprintf(" from %v%v%v%v for update", tblExpr, update.Where, update.OrderBy, update.Limit) - return changedVindexes, buf.String(), nil -} - -func initialQuery(ksidCols []sqlparser.IdentifierCI, table *vindexes.Table) (*sqlparser.TrackedBuffer, int) { - buf := sqlparser.NewTrackedBuffer(nil) - offset := 0 - for _, col := range ksidCols { - if offset == 0 { - buf.Myprintf("select %v", col) - } else { - buf.Myprintf(", %v", col) - } - offset++ - } - for _, cv := range table.Owned { - for _, column := range cv.Columns { - buf.Myprintf(", %v", column) - offset++ - } - } - return buf, offset -} - -// extractValueFromUpdate given an UpdateExpr attempts to extracts the Value -// it's holding. At the moment it only supports: StrVal, HexVal, IntVal, ValArg. -// If a complex expression is provided (e.g set name = name + 1), the update will be rejected. -func extractValueFromUpdate(upd *sqlparser.UpdateExpr) (evalengine.Expr, error) { - pv, err := evalengine.Translate(upd.Expr, nil) - if err != nil || sqlparser.IsSimpleTuple(upd.Expr) { - err := vterrors.VT12001(fmt.Sprintf("only values are supported: invalid update on column: `%s` with expr: [%s]", upd.Name.Name.String(), sqlparser.String(upd.Expr))) - return nil, err - } - return pv, nil -} diff --git a/go/vt/vtgate/planbuilder/vindex_func.go b/go/vt/vtgate/planbuilder/vindex_func.go index 33538ad9d14..2708fb465cd 100644 --- a/go/vt/vtgate/planbuilder/vindex_func.go +++ b/go/vt/vtgate/planbuilder/vindex_func.go @@ -26,11 +26,9 @@ import ( "vitess.io/vitess/go/vt/vterrors" + querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/vindexes" - - querypb "vitess.io/vitess/go/vt/proto/query" ) var _ logicalPlan = (*vindexFunc)(nil) @@ -42,9 +40,6 @@ type vindexFunc struct { // the tableID field is only used by the gen4 planner tableID semantics.TableSet - // resultColumns represent the columns returned by this route. - resultColumns []*resultColumn - // eVindexFunc is the primitive being built. eVindexFunc *engine.VindexFunc } @@ -58,89 +53,16 @@ var colnames = []string{ "shard", } -func newVindexFunc(alias sqlparser.TableName, vindex vindexes.SingleColumn) (*vindexFunc, *symtab) { - vf := &vindexFunc{ - order: 1, - eVindexFunc: &engine.VindexFunc{ - Vindex: vindex, - }, - } - - // Create a 'table' that represents the vindex. - t := &table{ - alias: alias, - origin: vf, - } - - for _, colName := range colnames { - t.addColumn(sqlparser.NewIdentifierCI(colName), &column{origin: vf}) - } - t.isAuthoritative = true - - st := newSymtab() - // AddTable will not fail because symtab is empty. - _ = st.AddTable(t) - return vf, st -} - -// Order implements the logicalPlan interface -func (vf *vindexFunc) Order() int { - return vf.order -} - -// Reorder implements the logicalPlan interface -func (vf *vindexFunc) Reorder(order int) { - vf.order = order + 1 -} - // Primitive implements the logicalPlan interface func (vf *vindexFunc) Primitive() engine.Primitive { return vf.eVindexFunc } -// ResultColumns implements the logicalPlan interface -func (vf *vindexFunc) ResultColumns() []*resultColumn { - return vf.resultColumns -} - // Wireup implements the logicalPlan interface -func (vf *vindexFunc) Wireup(logicalPlan, *jointab) error { - return nil -} - -// WireupGen4 implements the logicalPlan interface -func (vf *vindexFunc) WireupGen4(*plancontext.PlanningContext) error { +func (vf *vindexFunc) Wireup(*plancontext.PlanningContext) error { return nil } -// SupplyVar implements the logicalPlan interface -func (vf *vindexFunc) SupplyVar(from, to int, col *sqlparser.ColName, varname string) { - // vindexFunc is an atomic primitive. So, SupplyVar cannot be - // called on it. - panic("BUG: vindexFunc is an atomic node.") -} - -// SupplyCol implements the logicalPlan interface -func (vf *vindexFunc) SupplyCol(col *sqlparser.ColName) (rc *resultColumn, colNumber int) { - c := col.Metadata.(*column) - for i, rc := range vf.resultColumns { - if rc.column == c { - return rc, i - } - } - - vf.resultColumns = append(vf.resultColumns, &resultColumn{column: c}) - vf.eVindexFunc.Fields = append(vf.eVindexFunc.Fields, &querypb.Field{ - Name: col.Name.String(), - Type: querypb.Type_VARBINARY, - }) - - // columns that reference vindexFunc will have their colNumber set. - // Let's use it here. - vf.eVindexFunc.Cols = append(vf.eVindexFunc.Cols, c.colNumber) - return rc, len(vf.resultColumns) - 1 -} - // SupplyProjection pushes the given aliased expression into the fields and cols slices of the // vindexFunc engine primitive. The method returns the offset of the new expression in the columns // list. @@ -183,11 +105,6 @@ func (err UnsupportedSupplyWeightString) Error() string { return fmt.Sprintf("cannot do collation on %s", err.Type) } -// SupplyWeightString implements the logicalPlan interface -func (vf *vindexFunc) SupplyWeightString(colNumber int, alsoAddToGroupBy bool) (weightcolNumber int, err error) { - return 0, UnsupportedSupplyWeightString{Type: "vindex function"} -} - // Rewrite implements the logicalPlan interface func (vf *vindexFunc) Rewrite(inputs ...logicalPlan) error { if len(inputs) != 0 { diff --git a/go/vt/vtgate/planbuilder/vindex_op.go b/go/vt/vtgate/planbuilder/vindex_op.go index 4918a326076..c439dec1701 100644 --- a/go/vt/vtgate/planbuilder/vindex_op.go +++ b/go/vt/vtgate/planbuilder/vindex_op.go @@ -40,9 +40,8 @@ func transformVindexPlan(ctx *plancontext.PlanningContext, op *operators.Vindex) return nil, err } plan := &vindexFunc{ - order: 1, - tableID: op.Solved, - resultColumns: nil, + order: 1, + tableID: op.Solved, eVindexFunc: &engine.VindexFunc{ Opcode: op.OpCode, Vindex: single, diff --git a/go/vt/vtgate/plugin_mysql_server.go b/go/vt/vtgate/plugin_mysql_server.go index 9ada4aa6050..68d665729e8 100644 --- a/go/vt/vtgate/plugin_mysql_server.go +++ b/go/vt/vtgate/plugin_mysql_server.go @@ -35,6 +35,9 @@ import ( "github.com/google/uuid" "github.com/spf13/pflag" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/trace" @@ -64,6 +67,7 @@ var ( mysqlSslServerCA string mysqlTLSMinVersion string + mysqlKeepAlivePeriod time.Duration mysqlConnReadTimeout time.Duration mysqlConnWriteTimeout time.Duration mysqlQueryTimeout time.Duration @@ -72,8 +76,6 @@ var ( mysqlDefaultWorkloadName = "OLTP" mysqlDefaultWorkload int32 - - busyConnections int32 ) func registerPluginFlags(fs *pflag.FlagSet) { @@ -95,6 +97,7 @@ func registerPluginFlags(fs *pflag.FlagSet) { fs.DurationVar(&mysqlConnWriteTimeout, "mysql_server_write_timeout", mysqlConnWriteTimeout, "connection write timeout") fs.DurationVar(&mysqlQueryTimeout, "mysql_server_query_timeout", mysqlQueryTimeout, "mysql query timeout") fs.BoolVar(&mysqlConnBufferPooling, "mysql-server-pool-conn-read-buffers", mysqlConnBufferPooling, "If set, the server will pool incoming connection read buffers") + fs.DurationVar(&mysqlKeepAlivePeriod, "mysql-server-keepalive-period", mysqlKeepAlivePeriod, "TCP period between keep-alives") fs.StringVar(&mysqlDefaultWorkloadName, "mysql_default_workload", mysqlDefaultWorkloadName, "Default session workload (OLTP, OLAP, DBA)") } @@ -105,20 +108,22 @@ type vtgateHandler struct { mu sync.Mutex vtg *VTGate - connections map[*mysql.Conn]bool + connections map[uint32]*mysql.Conn + + busyConnections atomic.Int32 } func newVtgateHandler(vtg *VTGate) *vtgateHandler { return &vtgateHandler{ vtg: vtg, - connections: make(map[*mysql.Conn]bool), + connections: make(map[uint32]*mysql.Conn), } } func (vh *vtgateHandler) NewConnection(c *mysql.Conn) { vh.mu.Lock() defer vh.mu.Unlock() - vh.connections[c] = true + vh.connections[c.ConnectionID] = c } func (vh *vtgateHandler) numConnections() int { @@ -131,7 +136,7 @@ func (vh *vtgateHandler) ComResetConnection(c *mysql.Conn) { ctx := context.Background() session := vh.session(c) if session.InTransaction { - defer atomic.AddInt32(&busyConnections, -1) + defer vh.busyConnections.Add(-1) } err := vh.vtg.CloseSession(ctx, session) if err != nil { @@ -143,8 +148,8 @@ func (vh *vtgateHandler) ConnectionClosed(c *mysql.Conn) { // Rollback if there is an ongoing transaction. Ignore error. defer func() { vh.mu.Lock() - defer vh.mu.Unlock() - delete(vh.connections, c) + delete(vh.connections, c.ConnectionID) + vh.mu.Unlock() }() var ctx context.Context @@ -157,7 +162,7 @@ func (vh *vtgateHandler) ConnectionClosed(c *mysql.Conn) { } session := vh.session(c) if session.InTransaction { - defer atomic.AddInt32(&busyConnections, -1) + defer vh.busyConnections.Add(-1) } _ = vh.vtg.CloseSession(ctx, session) } @@ -197,8 +202,9 @@ func startSpan(ctx context.Context, query, label string) (trace.Span, context.Co } func (vh *vtgateHandler) ComQuery(c *mysql.Conn, query string, callback func(*sqltypes.Result) error) error { - ctx := context.Background() - var cancel context.CancelFunc + ctx, cancel := context.WithCancel(context.Background()) + c.UpdateCancelCtx(cancel) + if mysqlQueryTimeout != 0 { ctx, cancel = context.WithTimeout(ctx, mysqlQueryTimeout) defer cancel() @@ -226,11 +232,11 @@ func (vh *vtgateHandler) ComQuery(c *mysql.Conn, query string, callback func(*sq session := vh.session(c) if !session.InTransaction { - atomic.AddInt32(&busyConnections, 1) + vh.busyConnections.Add(1) } defer func() { if !session.InTransaction { - atomic.AddInt32(&busyConnections, -1) + vh.busyConnections.Add(-1) } }() @@ -246,16 +252,16 @@ func (vh *vtgateHandler) ComQuery(c *mysql.Conn, query string, callback func(*sq session.TargetString = strings.Split(session.TargetString, "@")[0] + tabletType if session.Options.Workload == querypb.ExecuteOptions_OLAP { - session, err := vh.vtg.StreamExecute(ctx, c, session, query, make(map[string]*querypb.BindVariable), callback) + session, err := vh.vtg.StreamExecute(ctx, vh, c, session, query, make(map[string]*querypb.BindVariable), callback) if err != nil { - return mysql.NewSQLErrorFromError(err) + return sqlerror.NewSQLErrorFromError(err) } fillInTxStatusFlags(c, session) return nil } - session, result, err := vh.vtg.Execute(ctx, c, session, query, make(map[string]*querypb.BindVariable)) + session, result, err := vh.vtg.Execute(ctx, vh, c, session, query, make(map[string]*querypb.BindVariable)) - if err := mysql.NewSQLErrorFromError(err); err != nil { + if err := sqlerror.NewSQLErrorFromError(err); err != nil { return err } fillInTxStatusFlags(c, session) @@ -302,16 +308,16 @@ func (vh *vtgateHandler) ComPrepare(c *mysql.Conn, query string, bindVars map[st session := vh.session(c) if !session.InTransaction { - atomic.AddInt32(&busyConnections, 1) + vh.busyConnections.Add(1) } defer func() { if !session.InTransaction { - atomic.AddInt32(&busyConnections, -1) + vh.busyConnections.Add(-1) } }() session, fld, err := vh.vtg.Prepare(ctx, session, query, bindVars) - err = mysql.NewSQLErrorFromError(err) + err = sqlerror.NewSQLErrorFromError(err) if err != nil { return nil, err } @@ -319,13 +325,12 @@ func (vh *vtgateHandler) ComPrepare(c *mysql.Conn, query string, bindVars map[st } func (vh *vtgateHandler) ComStmtExecute(c *mysql.Conn, prepare *mysql.PrepareData, callback func(*sqltypes.Result) error) error { - var ctx context.Context - var cancel context.CancelFunc + ctx, cancel := context.WithCancel(context.Background()) + c.UpdateCancelCtx(cancel) + if mysqlQueryTimeout != 0 { - ctx, cancel = context.WithTimeout(context.Background(), mysqlQueryTimeout) + ctx, cancel = context.WithTimeout(ctx, mysqlQueryTimeout) defer cancel() - } else { - ctx = context.Background() } ctx = callinfo.MysqlCallInfo(ctx, c) @@ -344,25 +349,25 @@ func (vh *vtgateHandler) ComStmtExecute(c *mysql.Conn, prepare *mysql.PrepareDat session := vh.session(c) if !session.InTransaction { - atomic.AddInt32(&busyConnections, 1) + vh.busyConnections.Add(1) } defer func() { if !session.InTransaction { - atomic.AddInt32(&busyConnections, -1) + vh.busyConnections.Add(-1) } }() if session.Options.Workload == querypb.ExecuteOptions_OLAP { - _, err := vh.vtg.StreamExecute(ctx, c, session, prepare.PrepareStmt, prepare.BindVars, callback) + _, err := vh.vtg.StreamExecute(ctx, vh, c, session, prepare.PrepareStmt, prepare.BindVars, callback) if err != nil { - return mysql.NewSQLErrorFromError(err) + return sqlerror.NewSQLErrorFromError(err) } fillInTxStatusFlags(c, session) return nil } - _, qr, err := vh.vtg.Execute(ctx, c, session, prepare.PrepareStmt, prepare.BindVars) + _, qr, err := vh.vtg.Execute(ctx, vh, c, session, prepare.PrepareStmt, prepare.BindVars) if err != nil { - return mysql.NewSQLErrorFromError(err) + return sqlerror.NewSQLErrorFromError(err) } fillInTxStatusFlags(c, session) @@ -384,10 +389,41 @@ func (vh *vtgateHandler) ComBinlogDump(c *mysql.Conn, logFile string, binlogPos } // ComBinlogDumpGTID is part of the mysql.Handler interface. -func (vh *vtgateHandler) ComBinlogDumpGTID(c *mysql.Conn, logFile string, logPos uint64, gtidSet mysql.GTIDSet) error { +func (vh *vtgateHandler) ComBinlogDumpGTID(c *mysql.Conn, logFile string, logPos uint64, gtidSet replication.GTIDSet) error { return vterrors.VT12001("ComBinlogDumpGTID for the VTGate handler") } +// KillConnection closes an open connection by connection ID. +func (vh *vtgateHandler) KillConnection(ctx context.Context, connectionID uint32) error { + vh.mu.Lock() + defer vh.mu.Unlock() + + c, exists := vh.connections[connectionID] + if !exists { + return sqlerror.NewSQLError(sqlerror.ERNoSuchThread, sqlerror.SSUnknownSQLState, "Unknown thread id: %d", connectionID) + } + + // First, we mark the connection for close, so that even when the context is cancelled, while returning the response back to client, + // the connection can get closed, + // Closing the connection will trigger ConnectionClosed method which rollback any open transaction. + c.MarkForClose() + c.CancelCtx() + + return nil +} + +// KillQuery cancels any execution query on the provided connection ID. +func (vh *vtgateHandler) KillQuery(connectionID uint32) error { + vh.mu.Lock() + defer vh.mu.Unlock() + c, exists := vh.connections[connectionID] + if !exists { + return sqlerror.NewSQLError(sqlerror.ERNoSuchThread, sqlerror.SSUnknownSQLState, "Unknown thread id: %d", connectionID) + } + c.CancelCtx() + return nil +} + func (vh *vtgateHandler) session(c *mysql.Conn) *vtgatepb.Session { session, _ := c.ClientData.(*vtgatepb.Session) if session == nil { @@ -401,6 +437,7 @@ func (vh *vtgateHandler) session(c *mysql.Conn) *vtgatepb.Session { }, Autocommit: true, DDLStrategy: defaultDDLStrategy, + MigrationContext: "", SessionUUID: u.String(), EnableSystemSettings: sysVarSetEnabled, } @@ -416,30 +453,37 @@ func (vh *vtgateHandler) session(c *mysql.Conn) *vtgatepb.Session { return session } -var mysqlListener *mysql.Listener -var mysqlUnixListener *mysql.Listener -var sigChan chan os.Signal -var vtgateHandle *vtgateHandler +type mysqlServer struct { + tcpListener *mysql.Listener + unixListener *mysql.Listener + sigChan chan os.Signal + vtgateHandle *vtgateHandler +} // initTLSConfig inits tls config for the given mysql listener -func initTLSConfig(mysqlListener *mysql.Listener, mysqlSslCert, mysqlSslKey, mysqlSslCa, mysqlSslCrl, mysqlSslServerCA string, mysqlServerRequireSecureTransport bool, mysqlMinTLSVersion uint16) error { +func initTLSConfig(ctx context.Context, srv *mysqlServer, mysqlSslCert, mysqlSslKey, mysqlSslCa, mysqlSslCrl, mysqlSslServerCA string, mysqlServerRequireSecureTransport bool, mysqlMinTLSVersion uint16) error { serverConfig, err := vttls.ServerConfig(mysqlSslCert, mysqlSslKey, mysqlSslCa, mysqlSslCrl, mysqlSslServerCA, mysqlMinTLSVersion) if err != nil { log.Exitf("grpcutils.TLSServerConfig failed: %v", err) return err } - mysqlListener.TLSConfig.Store(serverConfig) - mysqlListener.RequireSecureTransport = mysqlServerRequireSecureTransport - sigChan = make(chan os.Signal, 1) - signal.Notify(sigChan, syscall.SIGHUP) + srv.tcpListener.TLSConfig.Store(serverConfig) + srv.tcpListener.RequireSecureTransport = mysqlServerRequireSecureTransport + srv.sigChan = make(chan os.Signal, 1) + signal.Notify(srv.sigChan, syscall.SIGHUP) go func() { - for range sigChan { - serverConfig, err := vttls.ServerConfig(mysqlSslCert, mysqlSslKey, mysqlSslCa, mysqlSslCrl, mysqlSslServerCA, mysqlMinTLSVersion) - if err != nil { - log.Errorf("grpcutils.TLSServerConfig failed: %v", err) - } else { - log.Info("grpcutils.TLSServerConfig updated") - mysqlListener.TLSConfig.Store(serverConfig) + for { + select { + case <-ctx.Done(): + return + case <-srv.sigChan: + serverConfig, err := vttls.ServerConfig(mysqlSslCert, mysqlSslKey, mysqlSslCa, mysqlSslCrl, mysqlSslServerCA, mysqlMinTLSVersion) + if err != nil { + log.Errorf("grpcutils.TLSServerConfig failed: %v", err) + } else { + log.Info("grpcutils.TLSServerConfig updated") + srv.tcpListener.TLSConfig.Store(serverConfig) + } } } }() @@ -448,15 +492,15 @@ func initTLSConfig(mysqlListener *mysql.Listener, mysqlSslCert, mysqlSslKey, mys // initiMySQLProtocol starts the mysql protocol. // It should be called only once in a process. -func initMySQLProtocol() { +func initMySQLProtocol(vtgate *VTGate) *mysqlServer { // Flag is not set, just return. if mysqlServerPort < 0 && mysqlServerSocketPath == "" { - return + return nil } // If no VTGate was created, just return. - if rpcVTGate == nil { - return + if vtgate == nil { + return nil } // Initialize registered AuthServer implementations (or other plugins) @@ -480,53 +524,56 @@ func initMySQLProtocol() { // Create a Listener. var err error - vtgateHandle = newVtgateHandler(rpcVTGate) + srv := &mysqlServer{} + srv.vtgateHandle = newVtgateHandler(vtgate) if mysqlServerPort >= 0 { - mysqlListener, err = mysql.NewListener( + srv.tcpListener, err = mysql.NewListener( mysqlTCPVersion, net.JoinHostPort(mysqlServerBindAddress, fmt.Sprintf("%v", mysqlServerPort)), authServer, - vtgateHandle, + srv.vtgateHandle, mysqlConnReadTimeout, mysqlConnWriteTimeout, mysqlProxyProtocol, mysqlConnBufferPooling, + mysqlKeepAlivePeriod, ) if err != nil { log.Exitf("mysql.NewListener failed: %v", err) } - mysqlListener.ServerVersion = servenv.MySQLServerVersion() + srv.tcpListener.ServerVersion = servenv.MySQLServerVersion() if mysqlSslCert != "" && mysqlSslKey != "" { tlsVersion, err := vttls.TLSVersionToNumber(mysqlTLSMinVersion) if err != nil { log.Exitf("mysql.NewListener failed: %v", err) } - _ = initTLSConfig(mysqlListener, mysqlSslCert, mysqlSslKey, mysqlSslCa, mysqlSslCrl, mysqlSslServerCA, mysqlServerRequireSecureTransport, tlsVersion) + _ = initTLSConfig(context.Background(), srv, mysqlSslCert, mysqlSslKey, mysqlSslCa, mysqlSslCrl, mysqlSslServerCA, mysqlServerRequireSecureTransport, tlsVersion) } - mysqlListener.AllowClearTextWithoutTLS.Store(mysqlAllowClearTextWithoutTLS) + srv.tcpListener.AllowClearTextWithoutTLS.Store(mysqlAllowClearTextWithoutTLS) // Check for the connection threshold if mysqlSlowConnectWarnThreshold != 0 { log.Infof("setting mysql slow connection threshold to %v", mysqlSlowConnectWarnThreshold) - mysqlListener.SlowConnectWarnThreshold.Store(mysqlSlowConnectWarnThreshold.Nanoseconds()) + srv.tcpListener.SlowConnectWarnThreshold.Store(mysqlSlowConnectWarnThreshold.Nanoseconds()) } // Start listening for tcp - go mysqlListener.Accept() + go srv.tcpListener.Accept() } if mysqlServerSocketPath != "" { // Let's create this unix socket with permissions to all users. In this way, // clients can connect to vtgate mysql server without being vtgate user oldMask := syscall.Umask(000) - mysqlUnixListener, err = newMysqlUnixSocket(mysqlServerSocketPath, authServer, vtgateHandle) + srv.unixListener, err = newMysqlUnixSocket(mysqlServerSocketPath, authServer, srv.vtgateHandle) _ = syscall.Umask(oldMask) if err != nil { log.Exitf("mysql.NewListener failed: %v", err) - return + return nil } // Listen for unix socket - go mysqlUnixListener.Accept() + go srv.unixListener.Accept() } + return srv } // newMysqlUnixSocket creates a new unix socket mysql listener. If a socket file already exists, attempts @@ -541,6 +588,7 @@ func newMysqlUnixSocket(address string, authServer mysql.AuthServer, handler mys mysqlConnWriteTimeout, false, mysqlConnBufferPooling, + mysqlKeepAlivePeriod, ) switch err := err.(type) { @@ -572,6 +620,7 @@ func newMysqlUnixSocket(address string, authServer mysql.AuthServer, handler mys mysqlConnWriteTimeout, false, mysqlConnBufferPooling, + mysqlKeepAlivePeriod, ) return listener, listenerErr default: @@ -579,37 +628,38 @@ func newMysqlUnixSocket(address string, authServer mysql.AuthServer, handler mys } } -func shutdownMysqlProtocolAndDrain() { - if mysqlListener != nil { - mysqlListener.Close() - mysqlListener = nil +func (srv *mysqlServer) shutdownMysqlProtocolAndDrain() { + if srv.tcpListener != nil { + srv.tcpListener.Close() + srv.tcpListener = nil } - if mysqlUnixListener != nil { - mysqlUnixListener.Close() - mysqlUnixListener = nil + if srv.unixListener != nil { + srv.unixListener.Close() + srv.unixListener = nil } - if sigChan != nil { - signal.Stop(sigChan) + if srv.sigChan != nil { + signal.Stop(srv.sigChan) } - if atomic.LoadInt32(&busyConnections) > 0 { - log.Infof("Waiting for all client connections to be idle (%d active)...", atomic.LoadInt32(&busyConnections)) + if busy := srv.vtgateHandle.busyConnections.Load(); busy > 0 { + log.Infof("Waiting for all client connections to be idle (%d active)...", busy) start := time.Now() reported := start - for atomic.LoadInt32(&busyConnections) != 0 { + for busy > 0 { if time.Since(reported) > 2*time.Second { - log.Infof("Still waiting for client connections to be idle (%d active)...", atomic.LoadInt32(&busyConnections)) + log.Infof("Still waiting for client connections to be idle (%d active)...", busy) reported = time.Now() } time.Sleep(1 * time.Millisecond) + busy = srv.vtgateHandle.busyConnections.Load() } } } -func rollbackAtShutdown() { +func (srv *mysqlServer) rollbackAtShutdown() { defer log.Flush() - if vtgateHandle == nil { + if srv.vtgateHandle == nil { // we still haven't been able to initialise the vtgateHandler, so we don't need to rollback anything return } @@ -617,12 +667,12 @@ func rollbackAtShutdown() { // Close all open connections. If they're waiting for reads, this will cause // them to error out, which will automatically rollback open transactions. func() { - if vtgateHandle != nil { - vtgateHandle.mu.Lock() - defer vtgateHandle.mu.Unlock() - for c := range vtgateHandle.connections { + if srv.vtgateHandle != nil { + srv.vtgateHandle.mu.Lock() + defer srv.vtgateHandle.mu.Unlock() + for id, c := range srv.vtgateHandle.connections { if c != nil { - log.Infof("Rolling back transactions associated with connection ID: %v", c.ConnectionID) + log.Infof("Rolling back transactions associated with connection ID: %v", id) c.Close() } } @@ -632,7 +682,7 @@ func rollbackAtShutdown() { // If vtgate is instead busy executing a query, the number of open conns // will be non-zero. Give another second for those queries to finish. for i := 0; i < 100; i++ { - if vtgateHandle.numConnections() == 0 { + if srv.vtgateHandle.numConnections() == 0 { log.Infof("All connections have been rolled back.") return } @@ -651,10 +701,6 @@ func mysqlSocketPath() string { func init() { servenv.OnParseFor("vtgate", registerPluginFlags) servenv.OnParseFor("vtcombo", registerPluginFlags) - - servenv.OnRun(initMySQLProtocol) - servenv.OnTermSync(shutdownMysqlProtocolAndDrain) - servenv.OnClose(rollbackAtShutdown) } var pluginInitializers []func() @@ -850,11 +896,11 @@ func (vh *vtgateHandler) ComFieldList(c *mysql.Conn, tableName string, callback } if !session.InTransaction { - atomic.AddInt32(&busyConnections, 1) + vh.busyConnections.Add(1) } defer func() { if !session.InTransaction { - atomic.AddInt32(&busyConnections, -1) + vh.busyConnections.Add(-1) } }() @@ -885,7 +931,7 @@ func (vh *vtgateHandler) ComFieldList(c *mysql.Conn, tableName string, callback query := fmt.Sprintf("SELECT * FROM %s", tableName) session, result, err := vh.vtg.Prepare(ctx, session, query, make(map[string]*querypb.BindVariable)) c.ClientData = session - err = mysql.NewSQLErrorFromError(err) + err = sqlerror.NewSQLErrorFromError(err) if err != nil { return err } @@ -921,7 +967,7 @@ func (vh *vtgateHandler) ValidUseDB(c *mysql.Conn, usedb string, authServer mysq } } err = fmt.Errorf("keyspace %s not found in vschema", usedb) - return mysql.NewSQLErrorFromError(err) + return sqlerror.NewSQLErrorFromError(err) } func (vh *vtgateHandler) getTabletType(accountType int8, query, user string) (string, error) { @@ -949,19 +995,19 @@ func (vh *vtgateHandler) getTabletType(accountType int8, query, user string) (st } privileges, err := vh.vtg.executor.authServer.GetPrivilege(user) if err != nil { - return "", mysql.NewSQLErrorFromError(err) + return "", sqlerror.NewSQLErrorFromError(err) } // In this privileges not set to 0, the following operations need to be interception: // create/drop/alter/truncate/rename if privileges == 0 { err := fmt.Errorf("user %s has no permission to run query, sql: %s", user, query) - return "", mysql.NewSQLErrorFromError(err) + return "", sqlerror.NewSQLErrorFromError(err) } if !hasPrivilege(stmtType, privileges) { err := fmt.Errorf("user %s has no permission to run query, sql: %s", user, query) - return "", mysql.NewSQLErrorFromError(err) + return "", sqlerror.NewSQLErrorFromError(err) } return tabletType, nil diff --git a/go/vt/vtgate/plugin_mysql_server_test.go b/go/vt/vtgate/plugin_mysql_server_test.go index 950aa93053a..ff5fdd1c051 100644 --- a/go/vt/vtgate/plugin_mysql_server_test.go +++ b/go/vt/vtgate/plugin_mysql_server_test.go @@ -28,11 +28,15 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" - "vitess.io/vitess/go/trace" + "vitess.io/vitess/go/test/utils" + + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/trace" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/tlstest" ) @@ -86,7 +90,7 @@ func (th *testHandler) ComBinlogDump(c *mysql.Conn, logFile string, binlogPos ui return nil } -func (th *testHandler) ComBinlogDumpGTID(c *mysql.Conn, logFile string, logPos uint64, gtidSet mysql.GTIDSet) error { +func (th *testHandler) ComBinlogDumpGTID(c *mysql.Conn, logFile string, logPos uint64, gtidSet replication.GTIDSet) error { return nil } @@ -259,6 +263,7 @@ func newTestAuthServerStatic() *mysql.AuthServerStatic { func testDefaultWorkloadEmpty(t *testing.T) { vh := &vtgateHandler{} + mysqlDefaultWorkload = int32(querypb.ExecuteOptions_OLTP) sess := vh.session(&mysql.Conn{}) if sess.Options.Workload != querypb.ExecuteOptions_OLTP { t.Fatalf("Expected default workload OLTP") @@ -284,6 +289,8 @@ func TestInitTLSConfigWithServerCA(t *testing.T) { func testInitTLSConfig(t *testing.T, serverCA bool) { // Create the certs. + ctx := utils.LeakCheckContext(t) + root := t.TempDir() tlstest.CreateCA(root) tlstest.CreateCRL(root, tlstest.CA) @@ -294,20 +301,20 @@ func testInitTLSConfig(t *testing.T, serverCA bool) { serverCACert = path.Join(root, "ca-cert.pem") } - listener := &mysql.Listener{} - if err := initTLSConfig(listener, path.Join(root, "server-cert.pem"), path.Join(root, "server-key.pem"), path.Join(root, "ca-cert.pem"), path.Join(root, "ca-crl.pem"), serverCACert, true, tls.VersionTLS12); err != nil { + srv := &mysqlServer{tcpListener: &mysql.Listener{}} + if err := initTLSConfig(ctx, srv, path.Join(root, "server-cert.pem"), path.Join(root, "server-key.pem"), path.Join(root, "ca-cert.pem"), path.Join(root, "ca-crl.pem"), serverCACert, true, tls.VersionTLS12); err != nil { t.Fatalf("init tls config failure due to: +%v", err) } - serverConfig := listener.TLSConfig.Load() + serverConfig := srv.tcpListener.TLSConfig.Load() if serverConfig == nil { t.Fatalf("init tls config shouldn't create nil server config") } - sigChan <- syscall.SIGHUP + srv.sigChan <- syscall.SIGHUP time.Sleep(100 * time.Millisecond) // wait for signal handler - if listener.TLSConfig.Load() == serverConfig { + if srv.tcpListener.TLSConfig.Load() == serverConfig { t.Fatalf("init tls config should have been recreated after SIGHUP") } } @@ -315,3 +322,42 @@ func testInitTLSConfig(t *testing.T, serverCA bool) { func (th *testHandler) ComFieldList(c *mysql.Conn, tableName string, callback func(*sqltypes.Result) error) error { return nil } + +// TestKillMethods test the mysql plugin for kill method calls. +func TestKillMethods(t *testing.T) { + executor, _, _, _, _ := createExecutorEnv(t) + vh := newVtgateHandler(&VTGate{executor: executor}) + + // connection does not exist + err := vh.KillQuery(12345) + assert.ErrorContains(t, err, "Unknown thread id: 12345 (errno 1094) (sqlstate HY000)") + + err = vh.KillConnection(context.Background(), 12345) + assert.ErrorContains(t, err, "Unknown thread id: 12345 (errno 1094) (sqlstate HY000)") + + // add a connection + mysqlConn := mysql.GetTestConn() + mysqlConn.ConnectionID = 1 + vh.connections[1] = mysqlConn + + // connection exists + + // updating context. + cancelCtx, cancelFunc := context.WithCancel(context.Background()) + mysqlConn.UpdateCancelCtx(cancelFunc) + + // kill query + err = vh.KillQuery(1) + assert.NoError(t, err) + require.EqualError(t, cancelCtx.Err(), "context canceled") + + // updating context. + cancelCtx, cancelFunc = context.WithCancel(context.Background()) + mysqlConn.UpdateCancelCtx(cancelFunc) + + // kill connection + err = vh.KillConnection(context.Background(), 1) + assert.NoError(t, err) + require.EqualError(t, cancelCtx.Err(), "context canceled") + require.True(t, mysqlConn.IsMarkedForClose()) +} diff --git a/go/vt/vtgate/querylog.go b/go/vt/vtgate/querylog.go index c501c5af2a4..7425f2feba9 100644 --- a/go/vt/vtgate/querylog.go +++ b/go/vt/vtgate/querylog.go @@ -18,7 +18,6 @@ package vtgate import ( "net/http" - "sync" "vitess.io/vitess/go/streamlog" "vitess.io/vitess/go/vt/servenv" @@ -34,38 +33,33 @@ var ( // QueryzHandler is the debug UI path for exposing query plan stats QueryzHandler = "/debug/queryz" - - // QueryLogger enables streaming logging of queries - QueryLogger *streamlog.StreamLogger[*logstats.LogStats] - queryLoggerMu sync.Mutex ) -func SetQueryLogger(logger *streamlog.StreamLogger[*logstats.LogStats]) { - queryLoggerMu.Lock() - defer queryLoggerMu.Unlock() - QueryLogger = logger -} - -func initQueryLogger(vtg *VTGate) error { - SetQueryLogger(streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize)) - QueryLogger.ServeLogs(QueryLogHandler, streamlog.GetFormatter(QueryLogger)) +func (e *Executor) defaultQueryLogger() error { + queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) + queryLogger.ServeLogs(QueryLogHandler, streamlog.GetFormatter(queryLogger)) servenv.HTTPHandleFunc(QueryLogzHandler, func(w http.ResponseWriter, r *http.Request) { - ch := QueryLogger.Subscribe("querylogz") - defer QueryLogger.Unsubscribe(ch) + ch := queryLogger.Subscribe("querylogz") + defer queryLogger.Unsubscribe(ch) querylogzHandler(ch, w, r) }) servenv.HTTPHandleFunc(QueryzHandler, func(w http.ResponseWriter, r *http.Request) { - queryzHandler(vtg.executor, w, r) + queryzHandler(e, w, r) }) if queryLogToFile != "" { - _, err := QueryLogger.LogToFile(queryLogToFile, streamlog.GetFormatter(QueryLogger)) + _, err := queryLogger.LogToFile(queryLogToFile, streamlog.GetFormatter(queryLogger)) if err != nil { return err } } + e.queryLogger = queryLogger return nil } + +func (e *Executor) SetQueryLogger(ql *streamlog.StreamLogger[*logstats.LogStats]) { + e.queryLogger = ql +} diff --git a/go/vt/vtgate/queryz.go b/go/vt/vtgate/queryz.go index 6f660127ad4..e546fc68c6f 100644 --- a/go/vt/vtgate/queryz.go +++ b/go/vt/vtgate/queryz.go @@ -143,8 +143,7 @@ func queryzHandler(e *Executor, w http.ResponseWriter, r *http.Request) { }, } - e.plans.ForEach(func(value any) bool { - plan := value.(*engine.Plan) + e.ForEachPlan(func(plan *engine.Plan) bool { Value := &queryzRow{ Query: logz.Wrappable(sqlparser.TruncateForUI(plan.Original)), } diff --git a/go/vt/vtgate/queryz_test.go b/go/vt/vtgate/queryz_test.go index cb03bafcbab..826cb8641d8 100644 --- a/go/vt/vtgate/queryz_test.go +++ b/go/vt/vtgate/queryz_test.go @@ -27,6 +27,8 @@ import ( "github.com/stretchr/testify/require" + vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/vtgate/engine" @@ -34,34 +36,35 @@ import ( ) func TestQueryzHandler(t *testing.T) { + executor, _, _, _, ctx := createExecutorEnv(t) + resp := httptest.NewRecorder() req, _ := http.NewRequest("GET", "/schemaz", nil) - executor, _, _, _ := createExecutorEnv() - + session := &vtgatepb.Session{TargetString: "@primary"} // single shard query sql := "select id from user where id = 1" - _, err := executorExec(executor, sql, nil) + _, err := executorExec(ctx, executor, session, sql, nil) require.NoError(t, err) - executor.plans.Wait() + time.Sleep(100 * time.Millisecond) plan1 := assertCacheContains(t, executor, nil, "select id from `user` where id = 1") plan1.ExecTime = uint64(1 * time.Millisecond) // scatter sql = "select id from user" - _, err = executorExec(executor, sql, nil) + _, err = executorExec(ctx, executor, session, sql, nil) require.NoError(t, err) - executor.plans.Wait() + time.Sleep(100 * time.Millisecond) plan2 := assertCacheContains(t, executor, nil, "select id from `user`") plan2.ExecTime = uint64(1 * time.Second) sql = "insert into user (id, name) values (:id, :name)" - _, err = executorExec(executor, sql, map[string]*querypb.BindVariable{ + _, err = executorExec(ctx, executor, session, sql, map[string]*querypb.BindVariable{ "id": sqltypes.Uint64BindVariable(1), "name": sqltypes.BytesBindVariable([]byte("myname")), }) require.NoError(t, err) - executor.plans.Wait() + time.Sleep(100 * time.Millisecond) plan3 := assertCacheContains(t, executor, nil, "insert into `user`(id, `name`) values (:id, :name)") // vindex insert from above execution @@ -69,7 +72,7 @@ func TestQueryzHandler(t *testing.T) { // same query again should add query counts to existing plans sql = "insert into user (id, name) values (:id, :name)" - _, err = executorExec(executor, sql, map[string]*querypb.BindVariable{ + _, err = executorExec(ctx, executor, session, sql, map[string]*querypb.BindVariable{ "id": sqltypes.Uint64BindVariable(1), "name": sqltypes.BytesBindVariable([]byte("myname")), }) diff --git a/go/vt/vtgate/safe_session.go b/go/vt/vtgate/safe_session.go index 6b7da1dc920..e2f3c235c94 100644 --- a/go/vt/vtgate/safe_session.go +++ b/go/vt/vtgate/safe_session.go @@ -135,7 +135,7 @@ func NewSafeSession(sessn *vtgatepb.Session) *SafeSession { // NewAutocommitSession returns a SafeSession based on the original // session, but with autocommit enabled. func NewAutocommitSession(sessn *vtgatepb.Session) *SafeSession { - newSession := proto.Clone(sessn).(*vtgatepb.Session) + newSession := sessn.CloneVT() newSession.InTransaction = false newSession.ShardSessions = nil newSession.PreSessions = nil @@ -716,6 +716,20 @@ func (session *SafeSession) GetDDLStrategy() string { return session.DDLStrategy } +// SetMigrationContext set the migration_context setting. +func (session *SafeSession) SetMigrationContext(migrationContext string) { + session.mu.Lock() + defer session.mu.Unlock() + session.MigrationContext = migrationContext +} + +// GetMigrationContext returns the migration_context value. +func (session *SafeSession) GetMigrationContext() string { + session.mu.Lock() + defer session.mu.Unlock() + return session.MigrationContext +} + // GetSessionUUID returns the SessionUUID value. func (session *SafeSession) GetSessionUUID() string { session.mu.Lock() diff --git a/go/vt/vtgate/sandbox_test.go b/go/vt/vtgate/sandbox_test.go index 4197e6ef231..1629e9a4faa 100644 --- a/go/vt/vtgate/sandbox_test.go +++ b/go/vt/vtgate/sandbox_test.go @@ -19,6 +19,8 @@ package vtgate import ( "context" "fmt" + "hash/fnv" + "strconv" "sync" "vitess.io/vitess/go/json2" @@ -218,9 +220,9 @@ type sandboxTopo struct { // the given cells. // // when this version is used, WatchSrvVSchema can properly simulate watches -func newSandboxForCells(cells []string) *sandboxTopo { +func newSandboxForCells(ctx context.Context, cells []string) *sandboxTopo { return &sandboxTopo{ - topoServer: memorytopo.NewServer(cells...), + topoServer: memorytopo.NewServer(ctx, cells...), } } @@ -284,6 +286,16 @@ func (sct *sandboxTopo) WatchSrvKeyspace(ctx context.Context, cell, keyspace str // panic("not supported: WatchSrvKeyspace") } +func hash(s string) uint32 { + h := fnv.New32a() + h.Write([]byte(s)) + return h.Sum32() +} + +func GetSrvVSchemaHash(vs *vschemapb.SrvVSchema) string { + return strconv.Itoa(int(hash(vs.String()))) +} + // WatchSrvVSchema is part of the srvtopo.Server interface. // // If the sandbox was created with a backing topo service, piggy back on it @@ -302,11 +314,31 @@ func (sct *sandboxTopo) WatchSrvVSchema(ctx context.Context, cell string, callba if !callback(current.Value, nil) { panic("sandboxTopo callback returned false") } + if updateChan == nil { + panic("sandboxTopo updateChan is nil") + } + currentHash := GetSrvVSchemaHash(current.Value) go func() { for { - update := <-updateChan - if !callback(update.Value, update.Err) { - panic("sandboxTopo callback returned false") + select { + case <-ctx.Done(): + return + case update := <-updateChan: + // If the channel was closed, we're done. + if update == nil { + return + } + newHash := GetSrvVSchemaHash(update.Value) + if newHash == currentHash { + // sometimes we get the same update multiple times. This results in the plan cache to be cleared + // causing tests to fail. So we just ignore the duplicate updates. + continue + } + currentHash = newHash + if !callback(update.Value, update.Err) { + panic("sandboxTopo callback returned false") + } + } } }() diff --git a/go/vt/vtgate/scatter_conn.go b/go/vt/vtgate/scatter_conn.go index 2680d613d89..8200d258523 100644 --- a/go/vt/vtgate/scatter_conn.go +++ b/go/vt/vtgate/scatter_conn.go @@ -22,11 +22,11 @@ import ( "sync" "time" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/vt/sqlparser" "google.golang.org/protobuf/proto" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/concurrency" @@ -755,13 +755,13 @@ func (stc *ScatterConn) ExecuteLock(ctx context.Context, rs *srvtopo.ResolvedSha } func wasConnectionClosed(err error) bool { - sqlErr := mysql.NewSQLErrorFromError(err).(*mysql.SQLError) + sqlErr := sqlerror.NewSQLErrorFromError(err).(*sqlerror.SQLError) message := sqlErr.Error() switch sqlErr.Number() { - case mysql.CRServerGone, mysql.CRServerLost: + case sqlerror.CRServerGone, sqlerror.CRServerLost: return true - case mysql.ERQueryInterrupted: + case sqlerror.ERQueryInterrupted: return vterrors.TxClosed.MatchString(message) default: return false diff --git a/go/vt/vtgate/scatter_conn_test.go b/go/vt/vtgate/scatter_conn_test.go index 7fe751c9a00..6e57c10bbbd 100644 --- a/go/vt/vtgate/scatter_conn_test.go +++ b/go/vt/vtgate/scatter_conn_test.go @@ -19,11 +19,11 @@ package vtgate import ( "testing" + "vitess.io/vitess/go/mysql/sqlerror" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "github.com/stretchr/testify/assert" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/test/utils" @@ -42,10 +42,11 @@ import ( // This file uses the sandbox_test framework. func TestExecuteFailOnAutocommit(t *testing.T) { + ctx := utils.LeakCheckContext(t) createSandbox("TestExecuteFailOnAutocommit") hc := discovery.NewFakeHealthCheck(nil) - sc := newTestScatterConn(hc, newSandboxForCells([]string{"aa"}), "aa") + sc := newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") sbc0 := hc.AddTestTablet("aa", "0", 1, "TestExecuteFailOnAutocommit", "0", topodatapb.TabletType_PRIMARY, true, 1, nil) sbc1 := hc.AddTestTablet("aa", "1", 1, "TestExecuteFailOnAutocommit", "1", topodatapb.TabletType_PRIMARY, true, 1, nil) @@ -105,10 +106,12 @@ func TestExecuteFailOnAutocommit(t *testing.T) { } func TestReservedOnMultiReplica(t *testing.T) { + ctx := utils.LeakCheckContext(t) + keyspace := "keyspace" createSandbox(keyspace) hc := discovery.NewFakeHealthCheck(nil) - sc := newTestScatterConn(hc, newSandboxForCells([]string{"aa"}), "aa") + sc := newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") sbc0_1 := hc.AddTestTablet("aa", "0", 1, keyspace, "0", topodatapb.TabletType_REPLICA, true, 1, nil) sbc0_2 := hc.AddTestTablet("aa", "2", 1, keyspace, "0", topodatapb.TabletType_REPLICA, true, 1, nil) // sbc1 := hc.AddTestTablet("aa", "1", 1, keyspace, "1", topodatapb.TabletType_REPLICA, true, 1, nil) @@ -117,18 +120,20 @@ func TestReservedOnMultiReplica(t *testing.T) { sbc0_1.SetResults([]*sqltypes.Result{{}}) sbc0_2.SetResults([]*sqltypes.Result{{}}) - res := srvtopo.NewResolver(newSandboxForCells([]string{"aa"}), sc.gateway, "aa") + res := srvtopo.NewResolver(newSandboxForCells(ctx, []string{"aa"}), sc.gateway, "aa") session := NewSafeSession(&vtgatepb.Session{InTransaction: false, InReservedConn: true}) destinations := []key.Destination{key.DestinationShard("0")} for i := 0; i < 10; i++ { - executeOnShards(t, res, keyspace, sc, session, destinations) + executeOnShards(t, ctx, res, keyspace, sc, session, destinations) assert.EqualValues(t, 1, sbc0_1.ReserveCount.Load()+sbc0_2.ReserveCount.Load(), "sbc0 reserve count") assert.EqualValues(t, 0, sbc0_1.BeginCount.Load()+sbc0_2.BeginCount.Load(), "sbc0 begin count") } } func TestReservedBeginTableDriven(t *testing.T) { + ctx := utils.LeakCheckContext(t) + type testAction struct { transaction, reserved bool shards []string @@ -253,7 +258,7 @@ func TestReservedBeginTableDriven(t *testing.T) { keyspace := "keyspace" createSandbox(keyspace) hc := discovery.NewFakeHealthCheck(nil) - sc := newTestScatterConn(hc, newSandboxForCells([]string{"aa"}), "aa") + sc := newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") sbc0 := hc.AddTestTablet("aa", "0", 1, keyspace, "0", topodatapb.TabletType_REPLICA, true, 1, nil) sbc1 := hc.AddTestTablet("aa", "1", 1, keyspace, "1", topodatapb.TabletType_REPLICA, true, 1, nil) @@ -261,7 +266,7 @@ func TestReservedBeginTableDriven(t *testing.T) { sbc0.SetResults([]*sqltypes.Result{{}}) sbc1.SetResults([]*sqltypes.Result{{}}) - res := srvtopo.NewResolver(newSandboxForCells([]string{"aa"}), sc.gateway, "aa") + res := srvtopo.NewResolver(newSandboxForCells(ctx, []string{"aa"}), sc.gateway, "aa") t.Run(test.name, func(t *testing.T) { session := NewSafeSession(&vtgatepb.Session{}) @@ -272,7 +277,7 @@ func TestReservedBeginTableDriven(t *testing.T) { for _, shard := range action.shards { destinations = append(destinations, key.DestinationShard(shard)) } - executeOnShards(t, res, keyspace, sc, session, destinations) + executeOnShards(t, ctx, res, keyspace, sc, session, destinations) assert.EqualValues(t, action.sbc0Reserve, sbc0.ReserveCount.Load(), "sbc0 reserve count") assert.EqualValues(t, action.sbc0Begin, sbc0.BeginCount.Load(), "sbc0 begin count") assert.EqualValues(t, action.sbc1Reserve, sbc1.ReserveCount.Load(), "sbc1 reserve count") @@ -287,47 +292,49 @@ func TestReservedBeginTableDriven(t *testing.T) { } func TestReservedConnFail(t *testing.T) { + ctx := utils.LeakCheckContext(t) + keyspace := "keyspace" createSandbox(keyspace) hc := discovery.NewFakeHealthCheck(nil) - sc := newTestScatterConn(hc, newSandboxForCells([]string{"aa"}), "aa") + sc := newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") sbc0 := hc.AddTestTablet("aa", "0", 1, keyspace, "0", topodatapb.TabletType_REPLICA, true, 1, nil) _ = hc.AddTestTablet("aa", "1", 1, keyspace, "1", topodatapb.TabletType_REPLICA, true, 1, nil) - res := srvtopo.NewResolver(newSandboxForCells([]string{"aa"}), sc.gateway, "aa") + res := srvtopo.NewResolver(newSandboxForCells(ctx, []string{"aa"}), sc.gateway, "aa") session := NewSafeSession(&vtgatepb.Session{InTransaction: false, InReservedConn: true}) destinations := []key.Destination{key.DestinationShard("0")} - executeOnShards(t, res, keyspace, sc, session, destinations) + executeOnShards(t, ctx, res, keyspace, sc, session, destinations) assert.Equal(t, 1, len(session.ShardSessions)) oldRId := session.Session.ShardSessions[0].ReservedId - sbc0.EphemeralShardErr = mysql.NewSQLError(mysql.CRServerGone, mysql.SSUnknownSQLState, "lost connection") - _ = executeOnShardsReturnsErr(t, res, keyspace, sc, session, destinations) + sbc0.EphemeralShardErr = sqlerror.NewSQLError(sqlerror.CRServerGone, sqlerror.SSUnknownSQLState, "lost connection") + _ = executeOnShardsReturnsErr(t, ctx, res, keyspace, sc, session, destinations) assert.Equal(t, 3, len(sbc0.Queries), "1 for the successful run, one for the failed attempt, and one for the retry") require.Equal(t, 1, len(session.ShardSessions)) assert.NotEqual(t, oldRId, session.Session.ShardSessions[0].ReservedId, "should have recreated a reserved connection since the last connection was lost") oldRId = session.Session.ShardSessions[0].ReservedId sbc0.Queries = nil - sbc0.EphemeralShardErr = mysql.NewSQLError(mysql.ERQueryInterrupted, mysql.SSUnknownSQLState, "transaction 123 not found") - _ = executeOnShardsReturnsErr(t, res, keyspace, sc, session, destinations) + sbc0.EphemeralShardErr = sqlerror.NewSQLError(sqlerror.ERQueryInterrupted, sqlerror.SSUnknownSQLState, "transaction 123 not found") + _ = executeOnShardsReturnsErr(t, ctx, res, keyspace, sc, session, destinations) assert.Equal(t, 2, len(sbc0.Queries), "one for the failed attempt, and one for the retry") require.Equal(t, 1, len(session.ShardSessions)) assert.NotEqual(t, oldRId, session.Session.ShardSessions[0].ReservedId, "should have recreated a reserved connection since the last connection was lost") oldRId = session.Session.ShardSessions[0].ReservedId sbc0.Queries = nil - sbc0.EphemeralShardErr = mysql.NewSQLError(mysql.ERQueryInterrupted, mysql.SSUnknownSQLState, "transaction 123 ended at 2020-01-20") - _ = executeOnShardsReturnsErr(t, res, keyspace, sc, session, destinations) + sbc0.EphemeralShardErr = sqlerror.NewSQLError(sqlerror.ERQueryInterrupted, sqlerror.SSUnknownSQLState, "transaction 123 ended at 2020-01-20") + _ = executeOnShardsReturnsErr(t, ctx, res, keyspace, sc, session, destinations) assert.Equal(t, 2, len(sbc0.Queries), "one for the failed attempt, and one for the retry") require.Equal(t, 1, len(session.ShardSessions)) assert.NotEqual(t, oldRId, session.Session.ShardSessions[0].ReservedId, "should have recreated a reserved connection since the last connection was lost") oldRId = session.Session.ShardSessions[0].ReservedId sbc0.Queries = nil - sbc0.EphemeralShardErr = mysql.NewSQLError(mysql.ERQueryInterrupted, mysql.SSUnknownSQLState, "transaction 123 in use: for tx killer rollback") - _ = executeOnShardsReturnsErr(t, res, keyspace, sc, session, destinations) + sbc0.EphemeralShardErr = sqlerror.NewSQLError(sqlerror.ERQueryInterrupted, sqlerror.SSUnknownSQLState, "transaction 123 in use: for tx killer rollback") + _ = executeOnShardsReturnsErr(t, ctx, res, keyspace, sc, session, destinations) assert.Equal(t, 2, len(sbc0.Queries), "one for the failed attempt, and one for the retry") require.Equal(t, 1, len(session.ShardSessions)) assert.NotEqual(t, oldRId, session.Session.ShardSessions[0].ReservedId, "should have recreated a reserved connection since the last connection was lost") @@ -335,7 +342,7 @@ func TestReservedConnFail(t *testing.T) { sbc0.Queries = nil sbc0.EphemeralShardErr = vterrors.New(vtrpcpb.Code_CLUSTER_EVENT, "operation not allowed in state NOT_SERVING during query: query1") - _ = executeOnShardsReturnsErr(t, res, keyspace, sc, session, destinations) + _ = executeOnShardsReturnsErr(t, ctx, res, keyspace, sc, session, destinations) assert.Equal(t, 2, len(sbc0.Queries), "one for the failed attempt, and one for the retry") require.Equal(t, 1, len(session.ShardSessions)) assert.NotEqual(t, oldRId, session.Session.ShardSessions[0].ReservedId, "should have recreated a reserved connection since the last connection was lost") @@ -343,7 +350,7 @@ func TestReservedConnFail(t *testing.T) { sbc0.Queries = nil sbc0.EphemeralShardErr = vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, "invalid tablet type: REPLICA, want: PRIMARY") - _ = executeOnShardsReturnsErr(t, res, keyspace, sc, session, destinations) + _ = executeOnShardsReturnsErr(t, ctx, res, keyspace, sc, session, destinations) assert.Equal(t, 2, len(sbc0.Queries), "one for the failed attempt, and one for the retry") require.Equal(t, 1, len(session.ShardSessions)) assert.NotEqual(t, oldRId, session.Session.ShardSessions[0].ReservedId, "should have recreated a reserved connection since the last connection was lost") @@ -364,7 +371,7 @@ func TestReservedConnFail(t *testing.T) { sbc0.Queries = nil sbc0.ExecCount.Store(0) - _ = executeOnShardsReturnsErr(t, res, keyspace, sc, session, destinations) + _ = executeOnShardsReturnsErr(t, ctx, res, keyspace, sc, session, destinations) assert.EqualValues(t, 1, sbc0.ExecCount.Load(), "first attempt should be made on original tablet") assert.EqualValues(t, 0, len(sbc0.Queries), "no query should be executed on it") assert.Equal(t, 1, len(sbc0Rep.Queries), "this attempt on new healthy tablet should pass") @@ -394,7 +401,7 @@ func TestReservedConnFail(t *testing.T) { sbc0Rep.Queries = nil sbc0Rep.ExecCount.Store(0) - _ = executeOnShardsReturnsErr(t, res, keyspace, sc, session, destinations) + _ = executeOnShardsReturnsErr(t, ctx, res, keyspace, sc, session, destinations) assert.EqualValues(t, 1, sbc0Rep.ExecCount.Load(), "first attempt should be made on the changed tablet type") assert.EqualValues(t, 0, len(sbc0Rep.Queries), "no query should be executed on it") assert.Equal(t, 1, len(sbc0.Queries), "this attempt should pass as it is on new healthy tablet and matches the target") @@ -410,27 +417,27 @@ func TestIsConnClosed(t *testing.T) { conClosed bool }{{ "server gone", - mysql.NewSQLError(mysql.CRServerGone, mysql.SSNetError, ""), + sqlerror.NewSQLError(sqlerror.CRServerGone, sqlerror.SSNetError, ""), true, }, { "connection lost", - mysql.NewSQLError(mysql.CRServerLost, mysql.SSNetError, ""), + sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSNetError, ""), true, }, { "tx ended", - mysql.NewSQLError(mysql.ERQueryInterrupted, mysql.SSUnknownSQLState, "transaction 111 ended at ..."), + sqlerror.NewSQLError(sqlerror.ERQueryInterrupted, sqlerror.SSUnknownSQLState, "transaction 111 ended at ..."), true, }, { "tx not found", - mysql.NewSQLError(mysql.ERQueryInterrupted, mysql.SSUnknownSQLState, "transaction 111 not found ..."), + sqlerror.NewSQLError(sqlerror.ERQueryInterrupted, sqlerror.SSUnknownSQLState, "transaction 111 not found ..."), true, }, { "tx not found missing tx id", - mysql.NewSQLError(mysql.ERQueryInterrupted, mysql.SSUnknownSQLState, "transaction not found"), + sqlerror.NewSQLError(sqlerror.ERQueryInterrupted, sqlerror.SSUnknownSQLState, "transaction not found"), false, }, { "tx getting killed by tx killer", - mysql.NewSQLError(mysql.ERQueryInterrupted, mysql.SSUnknownSQLState, "transaction 111 in use: for tx killer rollback"), + sqlerror.NewSQLError(sqlerror.ERQueryInterrupted, sqlerror.SSUnknownSQLState, "transaction 111 in use: for tx killer rollback"), true, }} diff --git a/go/vt/vtgate/schema/tracker.go b/go/vt/vtgate/schema/tracker.go index 24fd3d77f57..3a03ea83a84 100644 --- a/go/vt/vtgate/schema/tracker.go +++ b/go/vt/vtgate/schema/tracker.go @@ -18,24 +18,17 @@ package schema import ( "context" + "maps" + "strings" "sync" "time" - "golang.org/x/exp/maps" - - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/callerid" "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/sidecardb" + querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet/queryservice" - - querypb "vitess.io/vitess/go/vt/proto/query" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) type ( @@ -63,22 +56,12 @@ type ( // defaultConsumeDelay is the default time, the updateController will wait before checking the schema fetch request queue. const defaultConsumeDelay = 1 * time.Second -// aclErrorMessageLog is for logging a warning when an acl error message is received for querying schema tracking table. -const aclErrorMessageLog = "Table ACL might be enabled, --schema_change_signal_user needs to be passed to VTGate for schema tracking to work. Check 'schema tracking' docs on vitess.io" - // NewTracker creates the tracker object. -func NewTracker(ch chan *discovery.TabletHealth, user string, enableViews bool) *Tracker { - ctx := context.Background() - // Set the caller on the context if the user is provided. - // This user that will be sent down to vttablet calls. - if user != "" { - ctx = callerid.NewContext(ctx, nil, callerid.NewImmediateCallerID(user)) - } - +func NewTracker(ch chan *discovery.TabletHealth, enableViews bool) *Tracker { t := &Tracker{ - ctx: ctx, + ctx: context.Background(), ch: ch, - tables: &tableMap{m: map[keyspaceStr]map[tableNameStr][]vindexes.Column{}}, + tables: &tableMap{m: make(map[keyspaceStr]map[tableNameStr]*vindexes.TableInfo)}, tracked: map[keyspaceStr]*updateController{}, consumeDelay: defaultConsumeDelay, } @@ -110,17 +93,6 @@ func (t *Tracker) loadTables(conn queryservice.QueryService, target *querypb.Tar return nil } - sidecarDBID, err := sidecardb.GetIdentifierForKeyspace(target.Keyspace) - if err != nil { - return vterrors.VT14005(target.Keyspace) - } - - ftRes, err := conn.Execute(t.ctx, target, - sqlparser.BuildParsedQuery(mysql.FetchTables, sidecarDBID).Query, - nil, 0, 0, nil) - if err != nil { - return err - } t.mu.Lock() defer t.mu.Unlock() @@ -129,8 +101,17 @@ func (t *Tracker) loadTables(conn queryservice.QueryService, target *querypb.Tar // clearing out the previous schema we can end up with duplicate entries when the // tablet is simply restarted or potentially when we elect a new primary. t.clearKeyspaceTables(target.Keyspace) - t.updateTables(target.Keyspace, ftRes) - log.Infof("finished loading schema for keyspace %s. Found %d columns in total across the tables", target.Keyspace, len(ftRes.Rows)) + + var numTables int + err := conn.GetSchema(t.ctx, target, querypb.SchemaTableType_TABLES, nil, func(schemaRes *querypb.GetSchemaResponse) error { + t.updateTables(target.Keyspace, schemaRes.TableDefinition) + numTables += len(schemaRes.TableDefinition) + return nil + }) + if err != nil { + return err + } + log.Infof("finished loading tables for keyspace %s. Found %d tables", target.Keyspace, numTables) return nil } @@ -170,6 +151,10 @@ func (t *Tracker) Start() { for { select { case th := <-t.ch: + if th == nil { + // channel closed + return + } ksUpdater := t.getKeyspaceUpdateController(th) ksUpdater.add(th) case <-ctx.Done(): @@ -202,10 +187,6 @@ func (t *Tracker) initKeyspace(th *discovery.TabletHealth) error { err := t.LoadKeyspace(th.Conn, th.Target) if err != nil { log.Warningf("Unable to add the %s keyspace to the schema tracker: %v", th.Target.Keyspace, err) - code := vterrors.Code(err) - if code == vtrpcpb.Code_UNAUTHENTICATED || code == vtrpcpb.Code_PERMISSION_DENIED { - log.Warning(aclErrorMessageLog) - } return err } return nil @@ -222,17 +203,27 @@ func (t *Tracker) GetColumns(ks string, tbl string) []vindexes.Column { t.mu.Lock() defer t.mu.Unlock() - return t.tables.get(ks, tbl) + tblInfo := t.tables.get(ks, tbl) + return tblInfo.Columns +} + +// GetForeignKeys returns the foreign keys for table in the given keyspace. +func (t *Tracker) GetForeignKeys(ks string, tbl string) []*sqlparser.ForeignKeyDefinition { + t.mu.Lock() + defer t.mu.Unlock() + + tblInfo := t.tables.get(ks, tbl) + return tblInfo.ForeignKeys } // Tables returns a map with the columns for all known tables in the keyspace -func (t *Tracker) Tables(ks string) map[string][]vindexes.Column { +func (t *Tracker) Tables(ks string) map[string]*vindexes.TableInfo { t.mu.Lock() defer t.mu.Unlock() m := t.tables.m[ks] if m == nil { - return map[string][]vindexes.Column{} // we know nothing about this KS, so that is the info we can give out + return map[string]*vindexes.TableInfo{} // we know nothing about this KS, so that is the info we can give out } return maps.Clone(m) @@ -264,60 +255,96 @@ func (t *Tracker) updateSchema(th *discovery.TabletHealth) bool { } func (t *Tracker) updatedTableSchema(th *discovery.TabletHealth) bool { + t.mu.Lock() + defer t.mu.Unlock() + tablesUpdated := th.Stats.TableSchemaChanged - tables, err := sqltypes.BuildBindVariable(tablesUpdated) - if err != nil { - log.Errorf("Failed to read updated tables from TabletHealth: %v", err) - return false - } - sidecarDBID, err := sidecardb.GetIdentifierForKeyspace(th.Target.Keyspace) - if err != nil { - log.Errorf("Failed to read sidecar database identifier for keyspace %q from the cache: %v", - th.Target.Keyspace, err) - return false + // first we empty all prior schema. deleted tables will not show up in the result, + // so this is the only chance to delete + for _, tbl := range tablesUpdated { + t.tables.delete(th.Target.Keyspace, tbl) } - - bv := map[string]*querypb.BindVariable{"tableNames": tables} - res, err := th.Conn.Execute(t.ctx, th.Target, - sqlparser.BuildParsedQuery(mysql.FetchUpdatedTables, sidecarDBID).Query, - bv, 0, 0, nil) + err := th.Conn.GetSchema(t.ctx, th.Target, querypb.SchemaTableType_TABLES, tablesUpdated, func(schemaRes *querypb.GetSchemaResponse) error { + t.updateTables(th.Target.Keyspace, schemaRes.TableDefinition) + return nil + }) if err != nil { t.tracked[th.Target.Keyspace].setLoaded(false) // TODO: optimize for the tables that got errored out. log.Warningf("error fetching new schema for %v, making them non-authoritative: %v", tablesUpdated, err) - code := vterrors.Code(err) - if code == vtrpcpb.Code_UNAUTHENTICATED || code == vtrpcpb.Code_PERMISSION_DENIED { - log.Warning(aclErrorMessageLog) - } return false } + return true +} - t.mu.Lock() - defer t.mu.Unlock() +func (t *Tracker) updateTables(keyspace string, res map[string]string) { + for tableName, tableDef := range res { + stmt, err := sqlparser.Parse(tableDef) + if err != nil { + log.Warningf("error parsing table definition for %s: %v", tableName, err) + continue + } + ddl, ok := stmt.(*sqlparser.CreateTable) + if !ok { + log.Warningf("parsed table definition for '%s' is not a create table definition", tableName) + continue + } - // first we empty all prior schema. deleted tables will not show up in the result, - // so this is the only chance to delete - for _, tbl := range tablesUpdated { - t.tables.delete(th.Target.Keyspace, tbl) + cols := getColumns(ddl.TableSpec) + fks := getForeignKeys(ddl.TableSpec) + t.tables.set(keyspace, tableName, cols, fks) } - t.updateTables(th.Target.Keyspace, res) - return true } -func (t *Tracker) updateTables(keyspace string, res *sqltypes.Result) { - for _, row := range res.Rows { - tbl := row[0].ToString() - colName := row[1].ToString() - colType := row[2].ToString() - collation := row[3].ToString() +func getColumns(tblSpec *sqlparser.TableSpec) []vindexes.Column { + tblCollation := getTableCollation(tblSpec) + cols := make([]vindexes.Column, 0, len(tblSpec.Columns)) + for _, column := range tblSpec.Columns { + colCollation := getColumnCollation(tblCollation, column) + cols = append(cols, + vindexes.Column{ + Name: column.Name, + Type: column.Type.SQLType(), + CollationName: colCollation, + }) + } + return cols +} + +func getForeignKeys(tblSpec *sqlparser.TableSpec) []*sqlparser.ForeignKeyDefinition { + if tblSpec.Constraints == nil { + return nil + } + var fks []*sqlparser.ForeignKeyDefinition + for _, constraint := range tblSpec.Constraints { + fkDef, ok := constraint.Details.(*sqlparser.ForeignKeyDefinition) + if !ok { + continue + } + fks = append(fks, fkDef) + } + return fks +} - cType := sqlparser.ColumnType{Type: colType} - col := vindexes.Column{Name: sqlparser.NewIdentifierCI(colName), Type: cType.SQLType(), CollationName: collation} - cols := t.tables.get(keyspace, tbl) +func getTableCollation(tblSpec *sqlparser.TableSpec) string { + if tblSpec.Options == nil { + return "" + } + collate := sqlparser.KeywordString(sqlparser.COLLATE) + for _, option := range tblSpec.Options { + if strings.EqualFold(option.Name, collate) { + return option.String + } + } + return "" +} - t.tables.set(keyspace, tbl, append(cols, col)) +func getColumnCollation(defaultCollation string, column *sqlparser.ColumnDefinition) string { + if column.Type.Options == nil || column.Type.Options.Collate == "" { + return defaultCollation } + return column.Type.Options.Collate } func (t *Tracker) updatedViewSchema(th *discovery.TabletHealth) bool { @@ -372,22 +399,22 @@ func (t *Tracker) AddNewKeyspace(conn queryservice.QueryService, target *querypb } type tableMap struct { - m map[keyspaceStr]map[tableNameStr][]vindexes.Column + m map[keyspaceStr]map[tableNameStr]*vindexes.TableInfo } -func (tm *tableMap) set(ks, tbl string, cols []vindexes.Column) { +func (tm *tableMap) set(ks, tbl string, cols []vindexes.Column, fks []*sqlparser.ForeignKeyDefinition) { m := tm.m[ks] if m == nil { - m = make(map[tableNameStr][]vindexes.Column) + m = make(map[tableNameStr]*vindexes.TableInfo) tm.m[ks] = m } - m[tbl] = cols + m[tbl] = &vindexes.TableInfo{Columns: cols, ForeignKeys: fks} } -func (tm *tableMap) get(ks, tbl string) []vindexes.Column { +func (tm *tableMap) get(ks, tbl string) *vindexes.TableInfo { m := tm.m[ks] if m == nil { - return nil + return &vindexes.TableInfo{} } return m[tbl] } diff --git a/go/vt/vtgate/schema/tracker_test.go b/go/vt/vtgate/schema/tracker_test.go index 185371f009f..4f514fec101 100644 --- a/go/vt/vtgate/schema/tracker_test.go +++ b/go/vt/vtgate/schema/tracker_test.go @@ -18,19 +18,14 @@ package schema import ( "context" - "fmt" "os" "sync" "testing" "time" - "vitess.io/vitess/go/mysql" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/log" @@ -50,8 +45,10 @@ var ( func TestMain(m *testing.M) { exitCode := func() int { - ts := memorytopo.NewServer(cell) - ts.CreateKeyspace(context.Background(), keyspace, &topodatapb.Keyspace{}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cell) + ts.CreateKeyspace(ctx, keyspace, &topodatapb.Keyspace{}) _, created := sidecardb.NewIdentifierCache(func(ctx context.Context, keyspace string) (string, error) { ki, err := ts.GetKeyspace(ctx, keyspace) if err != nil { @@ -68,163 +65,7 @@ func TestMain(m *testing.M) { os.Exit(exitCode) } -func TestTracking(t *testing.T) { - target := &querypb.Target{ - Keyspace: keyspace, - Shard: "-80", - TabletType: topodatapb.TabletType_PRIMARY, - Cell: cell, - } - tablet := &topodatapb.Tablet{ - Keyspace: target.Keyspace, - Shard: target.Shard, - Type: target.TabletType, - } - fields := sqltypes.MakeTestFields( - "table_name|col_name|col_type|collation_name", - "varchar|varchar|varchar|varchar", - ) - - type delta struct { - result *sqltypes.Result - updTbl []string - } - var ( - d0 = delta{ - result: sqltypes.MakeTestResult( - fields, - "prior|id|int|", - ), - updTbl: []string{"prior"}, - } - - d1 = delta{ - result: sqltypes.MakeTestResult( - fields, - "t1|id|int|", - "t1|name|varchar|utf8_bin", - "t2|id|varchar|utf8_bin", - ), - updTbl: []string{"t1", "t2"}, - } - - d2 = delta{ - result: sqltypes.MakeTestResult( - fields, - "t2|id|varchar|utf8_bin", - "t2|name|varchar|utf8_bin", - "t3|id|datetime|", - ), - updTbl: []string{"prior", "t1", "t2", "t3"}, - } - - d3 = delta{ - result: sqltypes.MakeTestResult( - fields, - "t4|name|varchar|utf8_bin", - ), - updTbl: []string{"t4"}, - } - ) - - testcases := []struct { - tName string - deltas []delta - exp map[string][]vindexes.Column - }{{ - tName: "new tables", - deltas: []delta{d0, d1}, - exp: map[string][]vindexes.Column{ - "t1": { - {Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT32}, - {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR, CollationName: "utf8_bin"}}, - "t2": { - {Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_VARCHAR, CollationName: "utf8_bin"}}, - "prior": { - {Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT32}}, - }, - }, { - tName: "delete t1 and prior, updated t2 and new t3", - deltas: []delta{d0, d1, d2}, - exp: map[string][]vindexes.Column{ - "t2": { - {Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_VARCHAR, CollationName: "utf8_bin"}, - {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR, CollationName: "utf8_bin"}}, - "t3": { - {Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_DATETIME}}, - }, - }, { - tName: "new t4", - deltas: []delta{d0, d1, d2, d3}, - exp: map[string][]vindexes.Column{ - "t2": { - {Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_VARCHAR, CollationName: "utf8_bin"}, - {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR, CollationName: "utf8_bin"}}, - "t3": { - {Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_DATETIME}}, - "t4": { - {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR, CollationName: "utf8_bin"}}, - }, - }, - } - for i, tcase := range testcases { - t.Run(fmt.Sprintf("%d - %s", i, tcase.tName), func(t *testing.T) { - sbc := sandboxconn.NewSandboxConn(tablet) - ch := make(chan *discovery.TabletHealth) - tracker := NewTracker(ch, "", false) - tracker.consumeDelay = 1 * time.Millisecond - tracker.Start() - defer tracker.Stop() - - results := []*sqltypes.Result{{}} - for _, d := range tcase.deltas { - for _, deltaRow := range d.result.Rows { - same := false - for _, row := range results[0].Rows { - if row[0].String() == deltaRow[0].String() && row[1].String() == deltaRow[1].String() { - same = true - break - } - } - if same == false { - results[0].Rows = append(results[0].Rows, deltaRow) - } - } - } - - sbc.SetResults(results) - sbc.Queries = nil - - wg := sync.WaitGroup{} - wg.Add(1) - tracker.RegisterSignalReceiver(func() { - wg.Done() - }) - - for _, d := range tcase.deltas { - ch <- &discovery.TabletHealth{ - Conn: sbc, - Tablet: tablet, - Target: target, - Serving: true, - Stats: &querypb.RealtimeStats{TableSchemaChanged: d.updTbl}, - } - } - - require.False(t, waitTimeout(&wg, time.Second), "schema was updated but received no signal") - - require.Equal(t, 1, len(sbc.StringQueries())) - - _, keyspacePresent := tracker.tracked[target.Keyspace] - require.Equal(t, true, keyspacePresent) - - for k, v := range tcase.exp { - utils.MustMatch(t, v, tracker.GetColumns("ks", k), "mismatch for table: ", k) - } - }) - } -} - +// TestTrackingUnHealthyTablet tests that the tracker is sending GetSchema calls only when the tablet is healthy. func TestTrackingUnHealthyTablet(t *testing.T) { target := &querypb.Target{ Keyspace: keyspace, @@ -240,7 +81,7 @@ func TestTrackingUnHealthyTablet(t *testing.T) { sbc := sandboxconn.NewSandboxConn(tablet) ch := make(chan *discovery.TabletHealth) - tracker := NewTracker(ch, "", false) + tracker := NewTracker(ch, false) tracker.consumeDelay = 1 * time.Millisecond tracker.Start() defer tracker.Stop() @@ -277,7 +118,6 @@ func TestTrackingUnHealthyTablet(t *testing.T) { }, } - sbc.SetResults([]*sqltypes.Result{{}, {}, {}}) for _, tcase := range tcases { ch <- &discovery.TabletHealth{ Conn: sbc, @@ -290,25 +130,10 @@ func TestTrackingUnHealthyTablet(t *testing.T) { } require.False(t, waitTimeout(&wg, 5*time.Second), "schema was updated but received no signal") - require.Equal(t, []string{sqlparser.BuildParsedQuery(mysql.FetchTables, sidecardb.DefaultName).Query, - sqlparser.BuildParsedQuery(mysql.FetchUpdatedTables, sidecardb.DefaultName).Query, - sqlparser.BuildParsedQuery(mysql.FetchTables, sidecardb.DefaultName).Query}, sbc.StringQueries()) -} - -func waitTimeout(wg *sync.WaitGroup, timeout time.Duration) bool { - c := make(chan struct{}) - go func() { - defer close(c) - wg.Wait() - }() - select { - case <-c: - return false // completed normally - case <-time.After(timeout): - return true // timed out - } + require.EqualValues(t, 3, sbc.GetSchemaCount.Load()) } +// TestTrackerGetKeyspaceUpdateController tests table update controller initialization. func TestTrackerGetKeyspaceUpdateController(t *testing.T) { ks3 := &updateController{} tracker := Tracker{ @@ -341,15 +166,66 @@ func TestTrackerGetKeyspaceUpdateController(t *testing.T) { assert.Nil(t, ks3.reloadKeyspace, "ks3 already initialized") } +// TestTableTracking tests that the tracker is able to track table schema changes. +func TestTableTracking(t *testing.T) { + schemaDefResult := []map[string]string{{ + "prior": "create table prior(id int primary key)", + }, { + // initial load of view - kept empty + }, { + "t1": "create table t1(id bigint primary key, name varchar(50))", + "t2": "create table t2(id varchar(50) primary key)", + }, { + "t2": "create table t2(id varchar(50) primary key, name varchar(50))", + "t3": "create table t3(id datetime primary key)", + }, { + "t4": "create table t4(name varchar(50) primary key)", + }} + + testcases := []testCases{{ + testName: "initial table load", + expTbl: map[string][]vindexes.Column{ + "prior": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT32}}, + }, + }, { + testName: "new tables", + updTbl: []string{"t1", "t2"}, + expTbl: map[string][]vindexes.Column{ + "prior": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT32}}, + "t1": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT64}, {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR}}, + "t2": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_VARCHAR}}, + }, + }, { + testName: "delete prior, updated t2 and new t3", + updTbl: []string{"prior", "t2", "t3"}, + expTbl: map[string][]vindexes.Column{ + "t1": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT64}, {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR}}, + "t2": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_VARCHAR}, {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR}}, + "t3": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_DATETIME}}, + }, + }, { + testName: "new t4", + updTbl: []string{"t4"}, + expTbl: map[string][]vindexes.Column{ + "t1": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT64}, {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR}}, + "t2": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_VARCHAR}, {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR}}, + "t3": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_DATETIME}}, + "t4": {{Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR}}, + }, + }} + + testTracker(t, schemaDefResult, testcases) +} + // TestViewsTracking tests that the tracker is able to track views. func TestViewsTracking(t *testing.T) { - target := &querypb.Target{Cell: cell, Keyspace: keyspace, Shard: "-80", TabletType: topodatapb.TabletType_PRIMARY} - tablet := &topodatapb.Tablet{Keyspace: target.Keyspace, Shard: target.Shard, Type: target.TabletType} - schemaDefResult := []map[string]string{{ + // initial load of table - kept empty + }, { "prior": "create view prior as select 1 from tbl", - "t1": "create view t1 as select 1 from tbl1", - "t2": "create view t2 as select 1 from tbl2", + }, { + "t1": "create view t1 as select 1 from tbl1", + "t2": "create view t2 as select 1 from tbl2", }, { "t2": "create view t2 as select 1,2 from tbl2", "t3": "create view t3 as select 1 from tbl3", @@ -357,37 +233,109 @@ func TestViewsTracking(t *testing.T) { "t4": "create view t4 as select 1 from tbl4", }} - testcases := []struct { - testName string - updView []string - exp map[string]string - }{{ - testName: "new views", - updView: []string{"prior", "t1", "t2"}, - exp: map[string]string{ + testcases := []testCases{{ + testName: "initial view load", + expView: map[string]string{ + "prior": "select 1 from tbl"}, + }, { + testName: "new view t1, t2", + updView: []string{"t1", "t2"}, + expView: map[string]string{ "t1": "select 1 from tbl1", "t2": "select 1 from tbl2", "prior": "select 1 from tbl"}, }, { testName: "delete prior, updated t2 and new t3", updView: []string{"prior", "t2", "t3"}, - exp: map[string]string{ + expView: map[string]string{ "t1": "select 1 from tbl1", "t2": "select 1, 2 from tbl2", "t3": "select 1 from tbl3"}, }, { testName: "new t4", updView: []string{"t4"}, - exp: map[string]string{ + expView: map[string]string{ "t1": "select 1 from tbl1", "t2": "select 1, 2 from tbl2", "t3": "select 1 from tbl3", "t4": "select 1 from tbl4"}, }} + testTracker(t, schemaDefResult, testcases) +} + +// TestTableInfoRetrieval tests that the tracker is able to retrieve required information from ddl statement. +func TestTableInfoRetrieval(t *testing.T) { + schemaDefResult := []map[string]string{{ + "my_tbl": "CREATE TABLE `my_tbl` (" + + "`id` bigint NOT NULL AUTO_INCREMENT," + + "`name` varchar(50) CHARACTER SET latin1 COLLATE latin1_swedish_ci DEFAULT NULL," + + "`email` varbinary(100) DEFAULT NULL," + + "PRIMARY KEY (`id`)," + + "KEY `id` (`id`,`name`)) " + + "ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci", + }, { + // initial load of view - kept empty + }, { + "my_child_tbl": "CREATE TABLE `my_child_tbl` (" + + "`id` bigint NOT NULL AUTO_INCREMENT," + + "`name` varchar(50) CHARACTER SET latin1 COLLATE latin1_swedish_ci DEFAULT NULL," + + "`code` varchar(6) CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci DEFAULT NULL," + + "`my_id` bigint DEFAULT NULL," + + "PRIMARY KEY (`id`)," + + "KEY `my_id` (`my_id`,`name`)," + + "CONSTRAINT `my_child_tbl_ibfk_1` FOREIGN KEY (`my_id`, `name`) REFERENCES `my_tbl` (`id`, `name`) ON DELETE CASCADE) " + + "ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci", + }} + + testcases := []testCases{{ + testName: "initial table load", + expTbl: map[string][]vindexes.Column{ + "my_tbl": { + {Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT64, CollationName: "utf8mb4_0900_ai_ci"}, + {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR, CollationName: "latin1_swedish_ci"}, + {Name: sqlparser.NewIdentifierCI("email"), Type: querypb.Type_VARBINARY, CollationName: "utf8mb4_0900_ai_ci"}, + }, + }, + }, { + testName: "new tables", + updTbl: []string{"my_child_tbl"}, + expTbl: map[string][]vindexes.Column{ + "my_tbl": { + {Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT64, CollationName: "utf8mb4_0900_ai_ci"}, + {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR, CollationName: "latin1_swedish_ci"}, + {Name: sqlparser.NewIdentifierCI("email"), Type: querypb.Type_VARBINARY, CollationName: "utf8mb4_0900_ai_ci"}, + }, + "my_child_tbl": { + {Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT64, CollationName: "utf8mb4_0900_ai_ci"}, + {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR, CollationName: "latin1_swedish_ci"}, + {Name: sqlparser.NewIdentifierCI("code"), Type: querypb.Type_VARCHAR, CollationName: "utf8mb4_0900_ai_ci"}, + {Name: sqlparser.NewIdentifierCI("my_id"), Type: querypb.Type_INT64, CollationName: "utf8mb4_0900_ai_ci"}, + }, + }, + expFk: map[string]string{ + "my_tbl": "", + "my_child_tbl": "foreign key (my_id, `name`) references my_tbl (id, `name`) on delete cascade", + }, + }} + + testTracker(t, schemaDefResult, testcases) +} + +type testCases struct { + testName string + + updTbl []string + expTbl map[string][]vindexes.Column + expFk map[string]string + + updView []string + expView map[string]string +} + +func testTracker(t *testing.T, schemaDefResult []map[string]string, tcases []testCases) { ch := make(chan *discovery.TabletHealth) - tracker := NewTracker(ch, "", true) - tracker.tables = nil // making tables map nil - so load keyspace does not try to load the tables information. + tracker := NewTracker(ch, true) tracker.consumeDelay = 1 * time.Millisecond tracker.Start() defer tracker.Stop() @@ -397,10 +345,13 @@ func TestViewsTracking(t *testing.T) { wg.Done() }) + target := &querypb.Target{Cell: cell, Keyspace: keyspace, Shard: "-80", TabletType: topodatapb.TabletType_PRIMARY} + tablet := &topodatapb.Tablet{Keyspace: target.Keyspace, Shard: target.Shard, Type: target.TabletType} + sbc := sandboxconn.NewSandboxConn(tablet) sbc.SetSchemaResult(schemaDefResult) - for count, tcase := range testcases { + for count, tcase := range tcases { t.Run(tcase.testName, func(t *testing.T) { wg.Add(1) ch <- &discovery.TabletHealth{ @@ -408,18 +359,42 @@ func TestViewsTracking(t *testing.T) { Tablet: tablet, Target: target, Serving: true, - Stats: &querypb.RealtimeStats{ViewSchemaChanged: tcase.updView}, + Stats: &querypb.RealtimeStats{TableSchemaChanged: tcase.updTbl, ViewSchemaChanged: tcase.updView}, } require.False(t, waitTimeout(&wg, time.Second), "schema was updated but received no signal") - require.EqualValues(t, count+1, sbc.GetSchemaCount.Load()) + require.EqualValues(t, count+2, sbc.GetSchemaCount.Load()) _, keyspacePresent := tracker.tracked[target.Keyspace] require.Equal(t, true, keyspacePresent) - for k, v := range tcase.exp { - utils.MustMatch(t, v, sqlparser.String(tracker.GetViews(keyspace, k)), "mismatch for table: ", k) + for k, v := range tcase.expTbl { + utils.MustMatch(t, v, tracker.GetColumns(keyspace, k), "mismatch columns for table: ", k) + if len(tcase.expFk[k]) > 0 { + fks := tracker.GetForeignKeys(keyspace, k) + for _, fk := range fks { + utils.MustMatch(t, tcase.expFk[k], sqlparser.String(fk), "mismatch foreign keys for table: ", k) + } + } + } + + for k, v := range tcase.expView { + utils.MustMatch(t, v, sqlparser.String(tracker.GetViews(keyspace, k)), "mismatch for view: ", k) } }) } } + +func waitTimeout(wg *sync.WaitGroup, timeout time.Duration) bool { + c := make(chan struct{}) + go func() { + defer close(c) + wg.Wait() + }() + select { + case <-c: + return false // completed normally + case <-time.After(timeout): + return true // timed out + } +} diff --git a/go/vt/vtgate/schema/update_controller.go b/go/vt/vtgate/schema/update_controller.go index 0d595a0897d..f68a9448d55 100644 --- a/go/vt/vtgate/schema/update_controller.go +++ b/go/vt/vtgate/schema/update_controller.go @@ -20,7 +20,7 @@ import ( "sync" "time" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -84,8 +84,8 @@ func (u *updateController) consume() { // checkIfWeShouldIgnoreKeyspace inspects an error and // will mark a keyspace as failed and won't try to load more information from it func checkIfWeShouldIgnoreKeyspace(err error) bool { - sqlErr := mysql.NewSQLErrorFromError(err).(*mysql.SQLError) - if sqlErr.Num == mysql.ERBadDb || sqlErr.Num == mysql.ERNoSuchTable { + sqlErr := sqlerror.NewSQLErrorFromError(err).(*sqlerror.SQLError) + if sqlErr.Num == sqlerror.ERBadDb || sqlErr.Num == sqlerror.ERNoSuchTable { // if we are missing the db or table, no point in retrying return true } diff --git a/go/vt/vtgate/semantics/analyzer.go b/go/vt/vtgate/semantics/analyzer.go index d08212d0ad0..1cb457f6882 100644 --- a/go/vt/vtgate/semantics/analyzer.go +++ b/go/vt/vtgate/semantics/analyzer.go @@ -30,6 +30,7 @@ type analyzer struct { binder *binder typer *typer rewriter *earlyRewriter + sig QuerySignature err error inProjection int @@ -101,6 +102,10 @@ func (a *analyzer) newSemTable(statement sqlparser.Statement, coll collations.ID if isCommented { comments = commentedStmt.GetParsedComments() } + columns := map[*sqlparser.Union]sqlparser.SelectExprs{} + for union, info := range a.tables.unionInfo { + columns[union] = info.exprs + } return &SemTable{ Recursive: a.binder.recursive, @@ -111,11 +116,12 @@ func (a *analyzer) newSemTable(statement sqlparser.Statement, coll collations.ID NotUnshardedErr: a.unshardedErr, Warning: a.warning, Comments: comments, - SubqueryMap: a.binder.subqueryMap, - SubqueryRef: a.binder.subqueryRef, ColumnEqualities: map[columnName][]sqlparser.Expr{}, Collation: coll, ExpandedColumns: a.rewriter.expandedColumns, + columns: columns, + StatementIDs: a.scoper.statementIDs, + QuerySignature: a.sig, } } @@ -156,6 +162,8 @@ func (a *analyzer) analyzeDown(cursor *sqlparser.Cursor) bool { // log any warn in rewriting. a.warning = a.rewriter.warning + a.noteQuerySignature(cursor.Node()) + a.enterProjection(cursor) // this is the visitor going down the tree. Returning false here would just not visit the children // to the current node, but that is not what we want if we have encountered an error. @@ -168,11 +176,6 @@ func (a *analyzer) analyzeUp(cursor *sqlparser.Cursor) bool { return false } - if err := a.binder.up(cursor); err != nil { - a.setError(err) - return true - } - if err := a.scoper.up(cursor); err != nil { a.setError(err) return false @@ -181,11 +184,22 @@ func (a *analyzer) analyzeUp(cursor *sqlparser.Cursor) bool { a.setError(err) return false } + + if err := a.binder.up(cursor); err != nil { + a.setError(err) + return true + } + if err := a.typer.up(cursor); err != nil { a.setError(err) return false } + if err := a.rewriter.up(cursor); err != nil { + a.setError(err) + return true + } + a.leaveProjection(cursor) return a.shouldContinue() } @@ -277,6 +291,27 @@ func (a *analyzer) tableSetFor(t *sqlparser.AliasedTableExpr) TableSet { return a.tables.tableSetFor(t) } +func (a *analyzer) noteQuerySignature(node sqlparser.SQLNode) { + switch node := node.(type) { + case *sqlparser.Union: + a.sig.Union = true + if node.Distinct { + a.sig.Distinct = true + } + case *sqlparser.Subquery: + a.sig.SubQueries = true + case *sqlparser.Select: + if node.Distinct { + a.sig.Distinct = true + } + if node.GroupBy != nil { + a.sig.Aggregation = true + } + case sqlparser.AggrFunc: + a.sig.Aggregation = true + } +} + // ProjError is used to mark an error as something that should only be returned // if the planner fails to merge everything down to a single route type ProjError struct { diff --git a/go/vt/vtgate/semantics/analyzer_test.go b/go/vt/vtgate/semantics/analyzer_test.go index 6e20496246d..21222da2263 100644 --- a/go/vt/vtgate/semantics/analyzer_test.go +++ b/go/vt/vtgate/semantics/analyzer_test.go @@ -17,7 +17,6 @@ limitations under the License. package semantics import ( - "fmt" "testing" "github.com/stretchr/testify/assert" @@ -26,7 +25,6 @@ import ( "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/engine/opcode" "vitess.io/vitess/go/vt/vtgate/vindexes" ) @@ -35,11 +33,11 @@ var T0 TableSet var ( // Just here to make outputs more readable None = EmptyTableSet() - T1 = SingleTableSet(0) - T2 = SingleTableSet(1) - T3 = SingleTableSet(2) - T4 = SingleTableSet(3) - T5 = SingleTableSet(4) + TS0 = SingleTableSet(0) + TS1 = SingleTableSet(1) + TS2 = SingleTableSet(2) + TS3 = SingleTableSet(3) + TS4 = SingleTableSet(4) ) func extract(in *sqlparser.Select, idx int) sqlparser.Expr { @@ -68,8 +66,8 @@ func TestBindingSingleTablePositive(t *testing.T) { assert.Equal(t, SingleTableSet(0), ts) recursiveDeps := semTable.RecursiveDeps(extract(sel, 0)) - assert.Equal(t, T1, recursiveDeps, query) - assert.Equal(t, T1, semTable.DirectDeps(extract(sel, 0)), query) + assert.Equal(t, TS0, recursiveDeps, query) + assert.Equal(t, TS0, semTable.DirectDeps(extract(sel, 0)), query) assert.Equal(t, 1, recursiveDeps.NumberOfTables(), "number of tables is wrong") }) } @@ -105,7 +103,7 @@ func TestBindingSingleAliasedTablePositive(t *testing.T) { assert.Equal(t, SingleTableSet(0), ts) recursiveDeps := semTable.RecursiveDeps(extract(sel, 0)) - require.Equal(t, T1, recursiveDeps, query) + require.Equal(t, TS0, recursiveDeps, query) assert.Equal(t, 1, recursiveDeps.NumberOfTables(), "number of tables is wrong") }) } @@ -163,35 +161,35 @@ func TestBindingMultiTablePositive(t *testing.T) { } queries := []testCase{{ query: "select t.col from t, s", - deps: T1, + deps: TS0, numberOfTables: 1, }, { query: "select s.col from t join s", - deps: T2, + deps: TS1, numberOfTables: 1, }, { query: "select max(t.col+s.col) from t, s", - deps: MergeTableSets(T1, T2), + deps: MergeTableSets(TS0, TS1), numberOfTables: 2, }, { query: "select max(t.col+s.col) from t join s", - deps: MergeTableSets(T1, T2), + deps: MergeTableSets(TS0, TS1), numberOfTables: 2, }, { query: "select case t.col when s.col then r.col else u.col end from t, s, r, w, u", - deps: MergeTableSets(T1, T2, T3, T5), + deps: MergeTableSets(TS0, TS1, TS2, TS4), numberOfTables: 4, // }, { // TODO: move to subquery // make sure that we don't let sub-query dependencies leak out by mistake // query: "select t.col + (select 42 from s) from t", - // deps: T1, + // deps: TS0, // }, { // query: "select (select 42 from s where r.id = s.id) from r", - // deps: T1 | T2, + // deps: TS0 | TS1, }, { query: "select u1.a + u2.a from u1, u2", - deps: MergeTableSets(T1, T2), + deps: MergeTableSets(TS0, TS1), numberOfTables: 2, }} for _, query := range queries { @@ -213,19 +211,19 @@ func TestBindingMultiAliasedTablePositive(t *testing.T) { } queries := []testCase{{ query: "select X.col from t as X, s as S", - deps: T1, + deps: TS0, numberOfTables: 1, }, { query: "select X.col+S.col from t as X, s as S", - deps: MergeTableSets(T1, T2), + deps: MergeTableSets(TS0, TS1), numberOfTables: 2, }, { query: "select max(X.col+S.col) from t as X, s as S", - deps: MergeTableSets(T1, T2), + deps: MergeTableSets(TS0, TS1), numberOfTables: 2, }, { query: "select max(X.col+s.col) from t as X, s", - deps: MergeTableSets(T1, T2), + deps: MergeTableSets(TS0, TS1), numberOfTables: 2, }} for _, query := range queries { @@ -487,13 +485,13 @@ func TestScopeForSubqueries(t *testing.T) { }{ { sql: `select t.col1, (select t.col2 from z as t) from x as t`, - deps: T2, + deps: TS1, }, { sql: `select t.col1, (select t.col2 from z) from x as t`, - deps: T1, + deps: TS0, }, { sql: `select t.col1, (select (select z.col2 from y) from z) from x as t`, - deps: T2, + deps: TS1, }, { sql: `select t.col1, (select (select y.col2 from y) from z) from x as t`, deps: None, @@ -502,7 +500,7 @@ func TestScopeForSubqueries(t *testing.T) { deps: None, }, { sql: `select t.col1, (select id from t) from x as t`, - deps: T2, + deps: TS1, }, } for _, tc := range tcases { @@ -521,116 +519,28 @@ func TestScopeForSubqueries(t *testing.T) { } } -func TestSubqueriesMappingWhereClause(t *testing.T) { - tcs := []struct { - sql string - opCode opcode.PulloutOpcode - otherSideName string - }{ - { - sql: "select id from t1 where id in (select uid from t2)", - opCode: opcode.PulloutIn, - otherSideName: "id", - }, - { - sql: "select id from t1 where id not in (select uid from t2)", - opCode: opcode.PulloutNotIn, - otherSideName: "id", - }, - { - sql: "select id from t where col1 = (select uid from t2 order by uid desc limit 1)", - opCode: opcode.PulloutValue, - otherSideName: "col1", - }, - { - sql: "select id from t where exists (select uid from t2 where uid = 42)", - opCode: opcode.PulloutExists, - otherSideName: "", - }, - { - sql: "select id from t where col1 >= (select uid from t2 where uid = 42)", - opCode: opcode.PulloutValue, - otherSideName: "col1", - }, - } - - for i, tc := range tcs { - t.Run(fmt.Sprintf("%d_%s", i+1, tc.sql), func(t *testing.T) { - stmt, semTable := parseAndAnalyze(t, tc.sql, "d") - sel, _ := stmt.(*sqlparser.Select) - - var subq *sqlparser.Subquery - switch whereExpr := sel.Where.Expr.(type) { - case *sqlparser.ComparisonExpr: - subq = whereExpr.Right.(*sqlparser.Subquery) - case *sqlparser.ExistsExpr: - subq = whereExpr.Subquery - } - - extractedSubq := semTable.SubqueryRef[subq] - assert.True(t, sqlparser.Equals.Expr(extractedSubq.Subquery, subq)) - assert.True(t, sqlparser.Equals.Expr(extractedSubq.Original, sel.Where.Expr)) - assert.EqualValues(t, tc.opCode, extractedSubq.OpCode) - if tc.otherSideName == "" { - assert.Nil(t, extractedSubq.OtherSide) - } else { - assert.True(t, sqlparser.Equals.Expr(extractedSubq.OtherSide, sqlparser.NewColName(tc.otherSideName))) - } - }) - } -} - -func TestSubqueriesMappingSelectExprs(t *testing.T) { - tcs := []struct { - sql string - selExprIdx int - }{ - { - sql: "select (select id from t1)", - selExprIdx: 0, - }, - { - sql: "select id, (select id from t1) from t1", - selExprIdx: 1, - }, - } - - for i, tc := range tcs { - t.Run(fmt.Sprintf("%d_%s", i+1, tc.sql), func(t *testing.T) { - stmt, semTable := parseAndAnalyze(t, tc.sql, "d") - sel, _ := stmt.(*sqlparser.Select) - - subq := sel.SelectExprs[tc.selExprIdx].(*sqlparser.AliasedExpr).Expr.(*sqlparser.Subquery) - extractedSubq := semTable.SubqueryRef[subq] - assert.True(t, sqlparser.Equals.Expr(extractedSubq.Subquery, subq)) - assert.True(t, sqlparser.Equals.Expr(extractedSubq.Original, subq)) - assert.EqualValues(t, opcode.PulloutValue, extractedSubq.OpCode) - }) - } -} - func TestSubqueryOrderByBinding(t *testing.T) { queries := []struct { query string expected TableSet }{{ query: "select * from user u where exists (select * from user order by col)", - expected: T2, + expected: TS1, }, { query: "select * from user u where exists (select * from user order by user.col)", - expected: T2, + expected: TS1, }, { query: "select * from user u where exists (select * from user order by u.col)", - expected: T1, + expected: TS0, }, { query: "select * from dbName.user as u where exists (select * from dbName.user order by u.col)", - expected: T1, + expected: TS0, }, { query: "select * from dbName.user where exists (select * from otherDb.user order by dbName.user.col)", - expected: T1, + expected: TS0, }, { query: "select id from dbName.t1 where exists (select * from dbName.t2 order by dbName.t1.id)", - expected: T1, + expected: TS0, }} for _, tc := range queries { @@ -655,52 +565,52 @@ func TestOrderByBindingTable(t *testing.T) { deps TableSet }{{ "select col from tabl order by col", - T1, + TS0, }, { "select tabl.col from d.tabl order by col", - T1, + TS0, }, { "select d.tabl.col from d.tabl order by col", - T1, + TS0, }, { "select col from tabl order by tabl.col", - T1, + TS0, }, { "select col from tabl order by d.tabl.col", - T1, + TS0, }, { "select col from tabl order by 1", - T1, + TS0, }, { "select col as c from tabl order by c", - T1, + TS0, }, { "select 1 as c from tabl order by c", T0, }, { "select name, name from t1, t2 order by name", - T2, + TS1, }, { "(select id from t1) union (select uid from t2) order by id", - MergeTableSets(T1, T2), + MergeTableSets(TS0, TS1), }, { "select id from t1 union (select uid from t2) order by 1", - MergeTableSets(T1, T2), + MergeTableSets(TS0, TS1), }, { "select id from t1 union select uid from t2 union (select name from t) order by 1", - MergeTableSets(T1, T2, T3), + MergeTableSets(TS0, TS1, TS2), }, { "select a.id from t1 as a union (select uid from t2) order by 1", - MergeTableSets(T1, T2), + MergeTableSets(TS0, TS1), }, { "select b.id as a from t1 as b union (select uid as c from t2) order by 1", - MergeTableSets(T1, T2), + MergeTableSets(TS0, TS1), }, { "select a.id from t1 as a union (select uid from t2, t union (select name from t) order by 1) order by 1", - MergeTableSets(T1, T2, T4), + MergeTableSets(TS0, TS1, TS3), }, { "select a.id from t1 as a union (select uid from t2, t union (select name from t) order by 1) order by id", - MergeTableSets(T1, T2, T4), + MergeTableSets(TS0, TS1, TS3), }} for _, tc := range tcases { t.Run(tc.sql, func(t *testing.T) { @@ -727,49 +637,49 @@ func TestGroupByBinding(t *testing.T) { deps TableSet }{{ "select col from tabl group by col", - T1, + TS0, }, { "select col from tabl group by tabl.col", - T1, + TS0, }, { "select col from tabl group by d.tabl.col", - T1, + TS0, }, { "select tabl.col as x from tabl group by x", - T1, + TS0, }, { "select tabl.col as x from tabl group by col", - T1, + TS0, }, { "select d.tabl.col as x from tabl group by x", - T1, + TS0, }, { "select d.tabl.col as x from tabl group by col", - T1, + TS0, }, { "select col from tabl group by 1", - T1, + TS0, }, { "select col as c from tabl group by c", - T1, + TS0, }, { "select 1 as c from tabl group by c", T0, }, { "select t1.id from t1, t2 group by id", - T1, + TS0, }, { "select id from t, t1 group by id", - T2, + TS1, }, { "select id from t, t1 group by id", - T2, + TS1, }, { "select a.id from t as a, t1 group by id", - T1, + TS0, }, { "select a.id from t, t1 as a group by id", - T2, + TS1, }} for _, tc := range tcases { t.Run(tc.sql, func(t *testing.T) { @@ -788,43 +698,43 @@ func TestHavingBinding(t *testing.T) { deps TableSet }{{ "select col from tabl having col = 1", - T1, + TS0, }, { "select col from tabl having tabl.col = 1", - T1, + TS0, }, { "select col from tabl having d.tabl.col = 1", - T1, + TS0, }, { "select tabl.col as x from tabl having x = 1", - T1, + TS0, }, { "select tabl.col as x from tabl having col", - T1, + TS0, }, { "select col from tabl having 1 = 1", T0, }, { "select col as c from tabl having c = 1", - T1, + TS0, }, { "select 1 as c from tabl having c = 1", T0, }, { "select t1.id from t1, t2 having id = 1", - T1, + TS0, }, { "select t.id from t, t1 having id = 1", - T1, + TS0, }, { "select t.id, count(*) as a from t, t1 group by t.id having a = 1", - MergeTableSets(T1, T2), + MergeTableSets(TS0, TS1), }, { "select t.id, sum(t2.name) as a from t, t2 group by t.id having a = 1", - T2, + TS1, }, { sql: "select u2.a, u1.a from u1, u2 having u2.a = 2", - deps: T2, + deps: TS1, }} for _, tc := range tcases { t.Run(tc.sql, func(t *testing.T) { @@ -854,8 +764,8 @@ func TestUnionCheckFirstAndLastSelectsDeps(t *testing.T) { d1 := semTable.RecursiveDeps(extract(sel1, 0)) d2 := semTable.RecursiveDeps(extract(sel2, 0)) - assert.Equal(t, T1, d1) - assert.Equal(t, T2, d2) + assert.Equal(t, TS0, d1) + assert.Equal(t, TS1, d2) } func TestUnionOrderByRewrite(t *testing.T) { @@ -918,6 +828,12 @@ func TestInvalidQueries(t *testing.T) { }, { sql: "select t1.does_not_exist from t1, t2", notUnshardedErr: "column 't1.does_not_exist' not found", + }, { + sql: "select 1 from t1 where id = (select 1, 2)", + serr: "Operand should contain 1 column(s)", + }, { + sql: "select 1 from t1 where (id, id) in (select 1, 2, 3)", + serr: "Operand should contain 2 column(s)", }} for _, tc := range tcases { @@ -957,8 +873,8 @@ func TestUnionWithOrderBy(t *testing.T) { d1 := semTable.RecursiveDeps(extract(sel1, 0)) d2 := semTable.RecursiveDeps(extract(sel2, 0)) - assert.Equal(t, T1, d1) - assert.Equal(t, T2, d2) + assert.Equal(t, TS0, d1) + assert.Equal(t, TS1, d2) } func TestScopingWDerivedTables(t *testing.T) { @@ -970,31 +886,31 @@ func TestScopingWDerivedTables(t *testing.T) { }{ { query: "select id from (select x as id from user) as t", - recursiveExpectation: T1, - expectation: T2, + recursiveExpectation: TS0, + expectation: TS1, }, { query: "select id from (select foo as id from user) as t", - recursiveExpectation: T1, - expectation: T2, + recursiveExpectation: TS0, + expectation: TS1, }, { query: "select id from (select foo as id from (select x as foo from user) as c) as t", - recursiveExpectation: T1, - expectation: T3, + recursiveExpectation: TS0, + expectation: TS2, }, { query: "select t.id from (select foo as id from user) as t", - recursiveExpectation: T1, - expectation: T2, + recursiveExpectation: TS0, + expectation: TS1, }, { query: "select t.id2 from (select foo as id from user) as t", errorMessage: "column 't.id2' not found", }, { query: "select id from (select 42 as id) as t", recursiveExpectation: T0, - expectation: T2, + expectation: TS1, }, { query: "select t.id from (select 42 as id) as t", recursiveExpectation: T0, - expectation: T2, + expectation: TS1, }, { query: "select ks.t.id from (select 42 as id) as t", errorMessage: "column 'ks.t.id' not found", @@ -1003,24 +919,24 @@ func TestScopingWDerivedTables(t *testing.T) { errorMessage: "Duplicate column name 'id'", }, { query: "select t.baz = 1 from (select id as baz from user) as t", - expectation: T2, - recursiveExpectation: T1, + expectation: TS1, + recursiveExpectation: TS0, }, { query: "select t.id from (select * from user, music) as t", - expectation: T3, - recursiveExpectation: MergeTableSets(T1, T2), + expectation: TS2, + recursiveExpectation: MergeTableSets(TS0, TS1), }, { query: "select t.id from (select * from user, music) as t order by t.id", - expectation: T3, - recursiveExpectation: MergeTableSets(T1, T2), + expectation: TS2, + recursiveExpectation: MergeTableSets(TS0, TS1), }, { query: "select t.id from (select * from user) as t join user as u on t.id = u.id", - expectation: T2, - recursiveExpectation: T1, + expectation: TS1, + recursiveExpectation: TS0, }, { query: "select t.col1 from t3 ua join (select t1.id, t1.col1 from t1 join t2) as t", - expectation: T4, - recursiveExpectation: T2, + expectation: TS3, + recursiveExpectation: TS1, }, { query: "select uu.test from (select id from t1) uu", errorMessage: "column 'uu.test' not found", @@ -1032,8 +948,8 @@ func TestScopingWDerivedTables(t *testing.T) { errorMessage: "column 'uu.id' not found", }, { query: "select uu.id from (select id from t1) as uu where exists (select * from t2 as uu where uu.id = uu.uid)", - expectation: T2, - recursiveExpectation: T1, + expectation: TS1, + recursiveExpectation: TS0, }, { query: "select 1 from user uu where exists (select 1 from user where exists (select 1 from (select 1 from t1) uu where uu.user_id = uu.id))", expectation: T0, @@ -1064,6 +980,48 @@ func TestScopingWDerivedTables(t *testing.T) { } } +func TestJoinPredicateDependencies(t *testing.T) { + // create table t() + // create table t1(id bigint) + // create table t2(uid bigint, name varchar(255)) + + queries := []struct { + query string + recursiveExpect TableSet + directExpect TableSet + }{{ + query: "select 1 from t1 join t2 on t1.id = t2.uid", + recursiveExpect: MergeTableSets(TS0, TS1), + directExpect: MergeTableSets(TS0, TS1), + }, { + query: "select 1 from (select * from t1) x join t2 on x.id = t2.uid", + recursiveExpect: MergeTableSets(TS0, TS2), + directExpect: MergeTableSets(TS1, TS2), + }, { + query: "select 1 from (select id from t1) x join t2 on x.id = t2.uid", + recursiveExpect: MergeTableSets(TS0, TS2), + directExpect: MergeTableSets(TS1, TS2), + }, { + query: "select 1 from (select id from t1 union select id from t) x join t2 on x.id = t2.uid", + recursiveExpect: MergeTableSets(TS0, TS1, TS3), + directExpect: MergeTableSets(TS2, TS3), + }} + for _, query := range queries { + t.Run(query.query, func(t *testing.T) { + parse, err := sqlparser.Parse(query.query) + require.NoError(t, err) + + st, err := Analyze(parse, "user", fakeSchemaInfo()) + require.NoError(t, err) + + sel := parse.(*sqlparser.Select) + expr := sel.From[0].(*sqlparser.JoinTableExpr).Condition.On + assert.Equal(t, query.recursiveExpect, st.RecursiveDeps(expr), "RecursiveDeps") + assert.Equal(t, query.directExpect, st.DirectDeps(expr), "DirectDeps") + }) + } +} + func TestDerivedTablesOrderClause(t *testing.T) { queries := []struct { query string @@ -1071,40 +1029,40 @@ func TestDerivedTablesOrderClause(t *testing.T) { expectation TableSet }{{ query: "select 1 from (select id from user) as t order by id", - recursiveExpectation: T1, - expectation: T2, + recursiveExpectation: TS0, + expectation: TS1, }, { query: "select id from (select id from user) as t order by id", - recursiveExpectation: T1, - expectation: T2, + recursiveExpectation: TS0, + expectation: TS1, }, { query: "select id from (select id from user) as t order by t.id", - recursiveExpectation: T1, - expectation: T2, + recursiveExpectation: TS0, + expectation: TS1, }, { query: "select id as foo from (select id from user) as t order by foo", - recursiveExpectation: T1, - expectation: T2, + recursiveExpectation: TS0, + expectation: TS1, }, { query: "select bar from (select id as bar from user) as t order by bar", - recursiveExpectation: T1, - expectation: T2, + recursiveExpectation: TS0, + expectation: TS1, }, { query: "select bar as foo from (select id as bar from user) as t order by bar", - recursiveExpectation: T1, - expectation: T2, + recursiveExpectation: TS0, + expectation: TS1, }, { query: "select bar as foo from (select id as bar from user) as t order by foo", - recursiveExpectation: T1, - expectation: T2, + recursiveExpectation: TS0, + expectation: TS1, }, { query: "select bar as foo from (select id as bar, oo from user) as t order by oo", - recursiveExpectation: T1, - expectation: T2, + recursiveExpectation: TS0, + expectation: TS1, }, { query: "select bar as foo from (select id, oo from user) as t(bar,oo) order by bar", - recursiveExpectation: T1, - expectation: T2, + recursiveExpectation: TS0, + expectation: TS1, }} si := &FakeSI{Tables: map[string]*vindexes.Table{"t": {Name: sqlparser.NewIdentifierCS("t")}}} for _, query := range queries { @@ -1132,13 +1090,13 @@ func TestScopingWComplexDerivedTables(t *testing.T) { }{ { query: "select 1 from user uu where exists (select 1 from user where exists (select 1 from (select 1 from t1) uu where uu.user_id = uu.id))", - rightExpectation: T1, - leftExpectation: T1, + rightExpectation: TS0, + leftExpectation: TS0, }, { query: "select 1 from user.user uu where exists (select 1 from user.user as uu where exists (select 1 from (select 1 from user.t1) uu where uu.user_id = uu.id))", - rightExpectation: T2, - leftExpectation: T2, + rightExpectation: TS1, + leftExpectation: TS1, }, } for _, query := range queries { @@ -1174,19 +1132,19 @@ func TestScopingWVindexTables(t *testing.T) { }{ { query: "select id from user_index where id = 1", - recursiveExpectation: T1, - expectation: T1, + recursiveExpectation: TS0, + expectation: TS0, }, { query: "select u.id + t.id from t as t join user_index as u where u.id = 1 and u.id = t.id", - recursiveExpectation: MergeTableSets(T1, T2), - expectation: MergeTableSets(T1, T2), + recursiveExpectation: MergeTableSets(TS0, TS1), + expectation: MergeTableSets(TS0, TS1), }, } for _, query := range queries { t.Run(query.query, func(t *testing.T) { parse, err := sqlparser.Parse(query.query) require.NoError(t, err) - hash, _ := vindexes.NewHash("user_index", nil) + hash, _ := vindexes.CreateVindex("hash", "user_index", nil) st, err := Analyze(parse, "user", &FakeSI{ Tables: map[string]*vindexes.Table{ "t": {Name: sqlparser.NewIdentifierCS("t")}, @@ -1536,6 +1494,9 @@ var ks3 = &vindexes.Keyspace{ Sharded: true, } +// create table t() +// create table t1(id bigint) +// create table t2(uid bigint, name varchar(255)) func fakeSchemaInfo() *FakeSI { cols1 := []vindexes.Column{{ Name: sqlparser.NewIdentifierCI("id"), diff --git a/go/vt/vtgate/semantics/binder.go b/go/vt/vtgate/semantics/binder.go index b9239fae69f..e3fed7e5a68 100644 --- a/go/vt/vtgate/semantics/binder.go +++ b/go/vt/vtgate/semantics/binder.go @@ -20,7 +20,6 @@ import ( "strings" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/engine/opcode" ) // binder is responsible for finding all the column references in @@ -28,14 +27,12 @@ import ( // While doing this, it will also find the types for columns and // store these in the typer:s expression map type binder struct { - recursive ExprDependencies - direct ExprDependencies - scoper *scoper - tc *tableCollector - org originable - typer *typer - subqueryMap map[sqlparser.Statement][]*sqlparser.ExtractedSubquery - subqueryRef map[*sqlparser.Subquery]*sqlparser.ExtractedSubquery + recursive ExprDependencies + direct ExprDependencies + scoper *scoper + tc *tableCollector + org originable + typer *typer // every table will have an entry in the outer map. it will point to a map with all the columns // that this map is joined with using USING. @@ -51,8 +48,6 @@ func newBinder(scoper *scoper, org originable, tc *tableCollector, typer *typer) org: org, tc: tc, typer: typer, - subqueryMap: map[sqlparser.Statement][]*sqlparser.ExtractedSubquery{}, - subqueryRef: map[*sqlparser.Subquery]*sqlparser.ExtractedSubquery{}, usingJoinInfo: map[TableSet]map[string]TableSet{}, } } @@ -61,18 +56,6 @@ func (b *binder) up(cursor *sqlparser.Cursor) error { switch node := cursor.Node().(type) { case *sqlparser.Subquery: currScope := b.scoper.currentScope() - // do not extract subquery in insert statement. - if _, isInsert := currScope.stmt.(*sqlparser.Insert); isInsert { - return nil - } - sq, err := b.createExtractedSubquery(cursor, currScope, node) - if err != nil { - return err - } - - b.subqueryMap[currScope.stmt] = append(b.subqueryMap[currScope.stmt], sq) - b.subqueryRef[node] = sq - b.setSubQueryDependencies(node, currScope) case *sqlparser.JoinCondition: currScope := b.scoper.currentScope() @@ -84,13 +67,6 @@ func (b *binder) up(cursor *sqlparser.Cursor) error { } currScope.joinUsing[ident.Lowered()] = deps.direct } - if len(node.Using) > 0 { - err := rewriteJoinUsing(currScope, node.Using, b.org) - if err != nil { - return err - } - node.Using = nil - } case *sqlparser.ColName: currentScope := b.scoper.currentScope() deps, err := b.resolveColumn(node, currentScope, false) @@ -116,6 +92,20 @@ func (b *binder) up(cursor *sqlparser.Cursor) error { } case *sqlparser.CountStar: b.bindCountStar(node) + case *sqlparser.Union: + info := b.tc.unionInfo[node] + // TODO: this check can be removed and available type information should be used. + if !info.isAuthoritative { + return nil + } + + for i, expr := range info.exprs { + ae := expr.(*sqlparser.AliasedExpr) + b.recursive[ae.Expr] = info.recursive[i] + if t := info.types[i]; t != nil { + b.typer.exprTypes[ae.Expr] = *t + } + } } return nil } @@ -206,39 +196,6 @@ func (b *binder) setSubQueryDependencies(subq *sqlparser.Subquery, currScope *sc b.direct[subq] = subqDirectDeps.KeepOnly(tablesToKeep) } -func (b *binder) createExtractedSubquery(cursor *sqlparser.Cursor, currScope *scope, subq *sqlparser.Subquery) (*sqlparser.ExtractedSubquery, error) { - if currScope.stmt == nil { - return nil, &BuggyError{Msg: "unable to bind subquery to select statement"} - } - - sq := &sqlparser.ExtractedSubquery{ - Subquery: subq, - Original: subq, - OpCode: int(opcode.PulloutValue), - } - - switch par := cursor.Parent().(type) { - case *sqlparser.ComparisonExpr: - switch par.Operator { - case sqlparser.InOp: - sq.OpCode = int(opcode.PulloutIn) - case sqlparser.NotInOp: - sq.OpCode = int(opcode.PulloutNotIn) - } - subq, exp := GetSubqueryAndOtherSide(par) - sq.Original = &sqlparser.ComparisonExpr{ - Left: exp, - Operator: par.Operator, - Right: subq, - } - sq.OtherSide = exp - case *sqlparser.ExistsExpr: - sq.OpCode = int(opcode.PulloutExists) - sq.Original = par - } - return sq, nil -} - func (b *binder) resolveColumn(colName *sqlparser.ColName, current *scope, allowMulti bool) (dependency, error) { var thisDeps dependencies first := true diff --git a/go/vt/vtgate/semantics/bitset/bitset.go b/go/vt/vtgate/semantics/bitset/bitset.go index 6bb1e2785aa..898d55e1d95 100644 --- a/go/vt/vtgate/semantics/bitset/bitset.go +++ b/go/vt/vtgate/semantics/bitset/bitset.go @@ -50,16 +50,9 @@ func toBitset(words []byte) Bitset { return *(*Bitset)(unsafe.Pointer(&words)) } -func minlen(a, b Bitset) int { - if len(a) < len(b) { - return len(a) - } - return len(b) -} - // Overlaps returns whether this Bitset and the input have any bits in common func (bs Bitset) Overlaps(b2 Bitset) bool { - min := minlen(bs, b2) + min := min(len(bs), len(b2)) for i := 0; i < min; i++ { if bs[i]&b2[i] != 0 { return true @@ -126,7 +119,7 @@ func (bs Bitset) And(b2 Bitset) Bitset { return "" } - merged := make([]byte, minlen(bs, b2)) + merged := make([]byte, min(len(bs), len(b2))) m := 0 for m = 0; m < len(merged); m++ { diff --git a/go/vt/vtgate/semantics/check_invalid.go b/go/vt/vtgate/semantics/check_invalid.go index 84b5a8a0bbc..0cc7f9c15b2 100644 --- a/go/vt/vtgate/semantics/check_invalid.go +++ b/go/vt/vtgate/semantics/check_invalid.go @@ -42,6 +42,8 @@ func (a *analyzer) checkForInvalidConstructs(cursor *sqlparser.Cursor) error { return checkDerived(node) case *sqlparser.AssignmentExpr: return vterrors.VT12001("Assignment expression") + case *sqlparser.Subquery: + return a.checkSubqueryColumns(cursor.Parent(), node) case *sqlparser.Insert: if node.Action == sqlparser.ReplaceAct { return ShardedError{Inner: &UnsupportedConstruct{errString: "REPLACE INTO with sharded keyspace"}} @@ -51,6 +53,39 @@ func (a *analyzer) checkForInvalidConstructs(cursor *sqlparser.Cursor) error { return nil } +// checkSubqueryColumns checks that subqueries used in comparisons have the correct number of columns +func (a *analyzer) checkSubqueryColumns(parent sqlparser.SQLNode, subq *sqlparser.Subquery) error { + cmp, ok := parent.(*sqlparser.ComparisonExpr) + if !ok { + return nil + } + var otherSide sqlparser.Expr + if cmp.Left == subq { + otherSide = cmp.Right + } else { + otherSide = cmp.Left + } + + cols := 1 + if tuple, ok := otherSide.(sqlparser.ValTuple); ok { + cols = len(tuple) + } + columns := subq.Select.GetColumns() + for _, expr := range columns { + _, ok := expr.(*sqlparser.StarExpr) + if ok { + // we can't check these queries properly. if we are able to push it down to mysql, + // it will be checked there. if not, we'll fail because we are missing the column + // information when we get to offset planning + return nil + } + } + if len(columns) != cols { + return &SubqueryColumnCountError{Expected: cols} + } + return nil +} + func checkDerived(node *sqlparser.DerivedTable) error { if node.Lateral { return vterrors.VT12001("lateral derived tables") @@ -132,6 +167,9 @@ func (a *analyzer) checkSelect(cursor *sqlparser.Cursor, node *sqlparser.Select) if a.scoper.currentScope().parent != nil { return &CantUseOptionHereError{Msg: errMsg} } + if node.Into != nil { + return ShardedError{Inner: &UnsupportedConstruct{errString: "INTO on sharded keyspace"}} + } return nil } diff --git a/go/vt/vtgate/semantics/derived_table.go b/go/vt/vtgate/semantics/derived_table.go index e0e4ca528a9..a88f39cf8af 100644 --- a/go/vt/vtgate/semantics/derived_table.go +++ b/go/vt/vtgate/semantics/derived_table.go @@ -33,41 +33,72 @@ type DerivedTable struct { cols []sqlparser.Expr tables TableSet isAuthoritative bool + + recursive []TableSet + types []*Type +} + +type unionInfo struct { + isAuthoritative bool + recursive []TableSet + types []*Type + exprs sqlparser.SelectExprs } var _ TableInfo = (*DerivedTable)(nil) -func createDerivedTableForExpressions(expressions sqlparser.SelectExprs, cols sqlparser.Columns, tables []TableInfo, org originable) *DerivedTable { - vTbl := &DerivedTable{isAuthoritative: true} +func createDerivedTableForExpressions( + expressions sqlparser.SelectExprs, + cols sqlparser.Columns, + tables []TableInfo, + org originable, + expanded bool, + recursiveDeps []TableSet, + types []*Type, +) *DerivedTable { + vTbl := &DerivedTable{isAuthoritative: expanded, recursive: recursiveDeps, types: types} for i, selectExpr := range expressions { switch expr := selectExpr.(type) { case *sqlparser.AliasedExpr: - vTbl.cols = append(vTbl.cols, expr.Expr) - if len(cols) > 0 { - vTbl.columnNames = append(vTbl.columnNames, cols[i].String()) - } else if expr.As.IsEmpty() { - switch expr := expr.Expr.(type) { - case *sqlparser.ColName: - // for projections, we strip out the qualifier and keep only the column name - vTbl.columnNames = append(vTbl.columnNames, expr.Name.String()) - default: - vTbl.columnNames = append(vTbl.columnNames, sqlparser.String(expr)) - } - } else { - vTbl.columnNames = append(vTbl.columnNames, expr.As.String()) - } + handleAliasedExpr(vTbl, expr, cols, i) case *sqlparser.StarExpr: - for _, table := range tables { - vTbl.tables = vTbl.tables.Merge(table.getTableSet(org)) - if !table.authoritative() { - vTbl.isAuthoritative = false - } - } + handleUnexpandedStarExpression(tables, vTbl, org) } } return vTbl } +func handleAliasedExpr(vTbl *DerivedTable, expr *sqlparser.AliasedExpr, cols sqlparser.Columns, i int) { + vTbl.cols = append(vTbl.cols, expr.Expr) + + if len(cols) > 0 { + vTbl.columnNames = append(vTbl.columnNames, cols[i].String()) + return + } + + if !expr.As.IsEmpty() { + vTbl.columnNames = append(vTbl.columnNames, expr.As.String()) + return + } + + switch expr := expr.Expr.(type) { + case *sqlparser.ColName: + // for projections, we strip out the qualifier and keep only the column name + vTbl.columnNames = append(vTbl.columnNames, expr.Name.String()) + default: + vTbl.columnNames = append(vTbl.columnNames, sqlparser.String(expr)) + } +} + +func handleUnexpandedStarExpression(tables []TableInfo, vTbl *DerivedTable, org originable) { + for _, table := range tables { + vTbl.tables = vTbl.tables.Merge(table.getTableSet(org)) + if !table.authoritative() { + vTbl.isAuthoritative = false + } + } +} + // dependencies implements the TableInfo interface func (dt *DerivedTable) dependencies(colName string, org originable) (dependencies, error) { directDeps := org.tableSetFor(dt.ASTNode) @@ -75,7 +106,7 @@ func (dt *DerivedTable) dependencies(colName string, org originable) (dependenci if !strings.EqualFold(name, colName) { continue } - _, recursiveDeps, qt := org.depsForExpr(dt.cols[i]) + recursiveDeps, qt := dt.recursive[i], dt.types[i] return createCertain(directDeps, recursiveDeps, qt), nil } @@ -135,6 +166,9 @@ func (dt *DerivedTable) getTableSet(_ originable) TableSet { // GetExprFor implements the TableInfo interface func (dt *DerivedTable) getExprFor(s string) (sqlparser.Expr, error) { + if !dt.isAuthoritative { + return nil, vterrors.VT09015() + } for i, colName := range dt.columnNames { if colName == s { return dt.cols[i], nil diff --git a/go/vt/vtgate/semantics/early_rewriter.go b/go/vt/vtgate/semantics/early_rewriter.go index b3553a2de73..d11d12023c4 100644 --- a/go/vt/vtgate/semantics/early_rewriter.go +++ b/go/vt/vtgate/semantics/early_rewriter.go @@ -17,15 +17,14 @@ limitations under the License. package semantics import ( + "fmt" "strconv" - "strings" "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/vt/vtgate/evalengine" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/evalengine" ) type earlyRewriter struct { @@ -48,6 +47,8 @@ func (r *earlyRewriter) down(cursor *sqlparser.Cursor) error { handleOrderBy(r, cursor, node) case *sqlparser.OrExpr: rewriteOrExpr(cursor, node) + case *sqlparser.NotExpr: + rewriteNotExpr(cursor, node) case sqlparser.GroupBy: r.clause = "group statement" case *sqlparser.Literal: @@ -60,6 +61,43 @@ func (r *earlyRewriter) down(cursor *sqlparser.Cursor) error { return nil } +func rewriteNotExpr(cursor *sqlparser.Cursor, node *sqlparser.NotExpr) { + cmp, ok := node.Expr.(*sqlparser.ComparisonExpr) + if !ok { + return + } + + cmp.Operator = sqlparser.Inverse(cmp.Operator) + cursor.Replace(cmp) +} + +func (r *earlyRewriter) up(cursor *sqlparser.Cursor) error { + // this rewriting is done in the `up` phase, because we need the scope to have been + // filled in with the available tables + node, ok := cursor.Node().(*sqlparser.JoinTableExpr) + if !ok || len(node.Condition.Using) == 0 { + return nil + } + + err := rewriteJoinUsing(r.binder, node) + if err != nil { + return err + } + + // since the binder has already been over the join, we need to invoke it again so it + // can bind columns to the right tables + sqlparser.Rewrite(node.Condition.On, nil, func(cursor *sqlparser.Cursor) bool { + innerErr := r.binder.up(cursor) + if innerErr == nil { + return true + } + + err = innerErr + return false + }) + return err +} + // handleWhereClause processes WHERE clauses, specifically the HAVING clause. func handleWhereClause(node *sqlparser.Where, parent sqlparser.SQLNode) { if node.Type != sqlparser.HavingClause { @@ -96,7 +134,7 @@ func handleOrderBy(r *earlyRewriter, cursor *sqlparser.Cursor, node sqlparser.Or func rewriteOrExpr(cursor *sqlparser.Cursor, node *sqlparser.OrExpr) { newNode := rewriteOrFalse(*node) if newNode != nil { - cursor.Replace(newNode) + cursor.ReplaceAndRevisit(newNode) } } @@ -344,44 +382,25 @@ func rewriteOrFalse(orExpr sqlparser.OrExpr) sqlparser.Expr { // // This function returns an error if it encounters a non-authoritative table or // if it cannot find a SELECT statement to add the WHERE predicate to. -func rewriteJoinUsing( - current *scope, - using sqlparser.Columns, - org originable, -) error { - predicates, err := buildJoinPredicates(current, using, org) +func rewriteJoinUsing(b *binder, join *sqlparser.JoinTableExpr) error { + predicates, err := buildJoinPredicates(b, join) if err != nil { return err } - // now, we go up the scope until we find a SELECT - // with a where clause we can add this predicate to - for current != nil { - sel, found := current.stmt.(*sqlparser.Select) - if !found { - current = current.parent - continue - } - if sel.Where != nil { - predicates = append(predicates, sel.Where.Expr) - sel.Where = nil - } - sel.Where = &sqlparser.Where{ - Type: sqlparser.WhereClause, - Expr: sqlparser.AndExpressions(predicates...), - } - return nil + if len(predicates) > 0 { + join.Condition.On = sqlparser.AndExpressions(predicates...) + join.Condition.Using = nil } - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "did not find WHERE clause") + return nil } // buildJoinPredicates constructs the join predicates for a given set of USING columns. // It returns a slice of sqlparser.Expr, each representing a join predicate for the given columns. -func buildJoinPredicates(current *scope, using sqlparser.Columns, org originable) ([]sqlparser.Expr, error) { - joinUsing := current.prepareUsingMap() +func buildJoinPredicates(b *binder, join *sqlparser.JoinTableExpr) ([]sqlparser.Expr, error) { var predicates []sqlparser.Expr - for _, column := range using { - foundTables, err := findTablesWithColumn(current, joinUsing, org, column) + for _, column := range join.Condition.Using { + foundTables, err := findTablesWithColumn(b, join, column) if err != nil { return nil, err } @@ -392,42 +411,79 @@ func buildJoinPredicates(current *scope, using sqlparser.Columns, org originable return predicates, nil } -// findTablesWithColumn finds the tables with the specified column in the current scope. -func findTablesWithColumn(current *scope, joinUsing map[TableSet]map[string]TableSet, org originable, column sqlparser.IdentifierCI) ([]sqlparser.TableName, error) { - var foundTables []sqlparser.TableName - - for _, tbl := range current.tables { - if !tbl.authoritative() { - return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "can't handle JOIN USING without authoritative tables") +func findOnlyOneTableInfoThatHasColumn(b *binder, tbl sqlparser.TableExpr, column sqlparser.IdentifierCI) ([]TableInfo, error) { + switch tbl := tbl.(type) { + case *sqlparser.AliasedTableExpr: + ts := b.tc.tableSetFor(tbl) + tblInfo := b.tc.Tables[ts.TableOffset()] + for _, info := range tblInfo.getColumns() { + if column.EqualString(info.Name) { + return []TableInfo{tblInfo}, nil + } } - - currTable := tbl.getTableSet(org) - usingCols := joinUsing[currTable] - if usingCols == nil { - usingCols = map[string]TableSet{} + return nil, nil + case *sqlparser.JoinTableExpr: + tblInfoR, err := findOnlyOneTableInfoThatHasColumn(b, tbl.RightExpr, column) + if err != nil { + return nil, err + } + tblInfoL, err := findOnlyOneTableInfoThatHasColumn(b, tbl.LeftExpr, column) + if err != nil { + return nil, err } - if hasColumnInTable(tbl, usingCols) { - tblName, err := tbl.Name() + return append(tblInfoL, tblInfoR...), nil + case *sqlparser.ParenTableExpr: + var tblInfo []TableInfo + for _, parenTable := range tbl.Exprs { + newTblInfo, err := findOnlyOneTableInfoThatHasColumn(b, parenTable, column) if err != nil { return nil, err } - foundTables = append(foundTables, tblName) + if tblInfo != nil && newTblInfo != nil { + return nil, vterrors.VT03021(column.String()) + } + if newTblInfo != nil { + tblInfo = newTblInfo + } } + return tblInfo, nil + default: + panic(fmt.Sprintf("unsupported TableExpr type in JOIN: %T", tbl)) } - - return foundTables, nil } -// hasColumnInTable checks if the specified table has the given column. -func hasColumnInTable(tbl TableInfo, usingCols map[string]TableSet) bool { - for _, col := range tbl.getColumns() { - _, found := usingCols[strings.ToLower(col.Name)] - if found { - return true +// findTablesWithColumn finds the tables with the specified column in the current scope. +func findTablesWithColumn(b *binder, join *sqlparser.JoinTableExpr, column sqlparser.IdentifierCI) ([]sqlparser.TableName, error) { + leftTableInfo, err := findOnlyOneTableInfoThatHasColumn(b, join.LeftExpr, column) + if err != nil { + return nil, err + } + + rightTableInfo, err := findOnlyOneTableInfoThatHasColumn(b, join.RightExpr, column) + if err != nil { + return nil, err + } + + if leftTableInfo == nil || rightTableInfo == nil { + return nil, ShardedError{Inner: vterrors.VT09015()} + } + var tableNames []sqlparser.TableName + for _, info := range leftTableInfo { + nm, err := info.Name() + if err != nil { + return nil, err } + tableNames = append(tableNames, nm) } - return false + for _, info := range rightTableInfo { + nm, err := info.Name() + if err != nil { + return nil, err + } + tableNames = append(tableNames, nm) + } + return tableNames, nil } // createComparisonPredicates creates a list of comparison predicates between the given column and foundTables. @@ -586,27 +642,3 @@ func (e *expanderState) storeExpandInfo(tbl TableInfo, tblName sqlparser.TableNa } e.expandedColumns[tblName] = append(e.expandedColumns[tblName], colName) } - -// createAliasedExpr creates an AliasedExpr with a ColName and an optional alias based on the given ColumnInfo. -// We need table qualifiers if there are more than one table in the FROM clause. -// If we are adding qualifiers, we also add an alias so the qualifiers do not show -// up in the result. For example, SELECT * FROM t1, t2 -> SELECT t1.col AS col, t2.col AS col ... -func (e *expanderState) createAliasedExpr( - col ColumnInfo, - tbl TableInfo, - tblName sqlparser.TableName, -) *sqlparser.AliasedExpr { - tableAliased := !tbl.GetExpr().As.IsEmpty() - withQualifier := e.needsQualifier || tableAliased - var colName *sqlparser.ColName - var alias sqlparser.IdentifierCI - if withQualifier { - colName = sqlparser.NewColNameWithQualifier(col.Name, tblName) - } else { - colName = sqlparser.NewColName(col.Name) - } - if e.needsQualifier { - alias = sqlparser.NewIdentifierCI(col.Name) - } - return &sqlparser.AliasedExpr{Expr: colName, As: alias} -} diff --git a/go/vt/vtgate/semantics/early_rewriter_test.go b/go/vt/vtgate/semantics/early_rewriter_test.go index f1b16853cfc..bd919fe9201 100644 --- a/go/vt/vtgate/semantics/early_rewriter_test.go +++ b/go/vt/vtgate/semantics/early_rewriter_test.go @@ -144,26 +144,32 @@ func TestExpandStar(t *testing.T) { }, { sql: "select * from t1 join t2 on t1.a = t2.c1", expSQL: "select t1.a as a, t1.b as b, t1.c as c, t2.c1 as c1, t2.c2 as c2 from t1 join t2 on t1.a = t2.c1", + }, { + sql: "select * from t1 left join t2 on t1.a = t2.c1", + expSQL: "select t1.a as a, t1.b as b, t1.c as c, t2.c1 as c1, t2.c2 as c2 from t1 left join t2 on t1.a = t2.c1", + }, { + sql: "select * from t1 right join t2 on t1.a = t2.c1", + expSQL: "select t1.a as a, t1.b as b, t1.c as c, t2.c1 as c1, t2.c2 as c2 from t1 right join t2 on t1.a = t2.c1", }, { sql: "select * from t2 join t4 using (c1)", - expSQL: "select t2.c1 as c1, t2.c2 as c2, t4.c4 as c4 from t2 join t4 where t2.c1 = t4.c1", + expSQL: "select t2.c1 as c1, t2.c2 as c2, t4.c4 as c4 from t2 join t4 on t2.c1 = t4.c1", expanded: "main.t2.c1, main.t2.c2, main.t4.c4", }, { sql: "select * from t2 join t4 using (c1) join t2 as X using (c1)", - expSQL: "select t2.c1 as c1, t2.c2 as c2, t4.c4 as c4, X.c2 as c2 from t2 join t4 join t2 as X where t2.c1 = t4.c1 and t2.c1 = X.c1 and t4.c1 = X.c1", + expSQL: "select t2.c1 as c1, t2.c2 as c2, t4.c4 as c4, X.c2 as c2 from t2 join t4 on t2.c1 = t4.c1 join t2 as X on t2.c1 = t4.c1 and t2.c1 = X.c1 and t4.c1 = X.c1", }, { sql: "select * from t2 join t4 using (c1), t2 as t2b join t4 as t4b using (c1)", - expSQL: "select t2.c1 as c1, t2.c2 as c2, t4.c4 as c4, t2b.c1 as c1, t2b.c2 as c2, t4b.c4 as c4 from t2 join t4, t2 as t2b join t4 as t4b where t2b.c1 = t4b.c1 and t2.c1 = t4.c1", + expSQL: "select t2.c1 as c1, t2.c2 as c2, t4.c4 as c4, t2b.c1 as c1, t2b.c2 as c2, t4b.c4 as c4 from t2 join t4 on t2.c1 = t4.c1, t2 as t2b join t4 as t4b on t2b.c1 = t4b.c1", }, { sql: "select * from t1 join t5 using (b)", - expSQL: "select t1.b as b, t1.a as a, t1.c as c, t5.a as a from t1 join t5 where t1.b = t5.b", + expSQL: "select t1.b as b, t1.a as a, t1.c as c, t5.a as a from t1 join t5 on t1.b = t5.b", expanded: "main.t1.a, main.t1.b, main.t1.c, main.t5.a", }, { sql: "select * from t1 join t5 using (b) having b = 12", - expSQL: "select t1.b as b, t1.a as a, t1.c as c, t5.a as a from t1 join t5 where t1.b = t5.b having b = 12", + expSQL: "select t1.b as b, t1.a as a, t1.c as c, t5.a as a from t1 join t5 on t1.b = t5.b having b = 12", }, { sql: "select 1 from t1 join t5 using (b) having b = 12", - expSQL: "select 1 from t1 join t5 where t1.b = t5.b having t1.b = 12", + expSQL: "select 1 from t1 join t5 on t1.b = t5.b having t1.b = 12", }, { sql: "select * from (select 12) as t", expSQL: "select t.`12` from (select 12 from dual) as t", @@ -265,13 +271,16 @@ func TestRewriteJoinUsingColumns(t *testing.T) { expErr string }{{ sql: "select 1 from t1 join t2 using (a) where a = 42", - expSQL: "select 1 from t1 join t2 where t1.a = t2.a and t1.a = 42", + expSQL: "select 1 from t1 join t2 on t1.a = t2.a where t1.a = 42", }, { sql: "select 1 from t1 join t2 using (a), t3 where a = 42", expErr: "Column 'a' in field list is ambiguous", }, { sql: "select 1 from t1 join t2 using (a), t1 as b join t3 on (a) where a = 42", expErr: "Column 'a' in field list is ambiguous", + }, { + sql: "select 1 from t1 left join t2 using (a) where a = 42", + expSQL: "select 1 from t1 left join t2 on t1.a = t2.a where t1.a = 42", }} for _, tcase := range tcases { t.Run(tcase.sql, func(t *testing.T) { @@ -435,3 +444,101 @@ func TestSemTableDependenciesAfterExpandStar(t *testing.T) { }) } } + +func TestRewriteNot(t *testing.T) { + ks := &vindexes.Keyspace{ + Name: "main", + Sharded: false, + } + schemaInfo := &FakeSI{ + Tables: map[string]*vindexes.Table{ + "t1": { + Keyspace: ks, + Name: sqlparser.NewIdentifierCS("t1"), + Columns: []vindexes.Column{{ + Name: sqlparser.NewIdentifierCI("a"), + Type: sqltypes.VarChar, + }, { + Name: sqlparser.NewIdentifierCI("b"), + Type: sqltypes.VarChar, + }, { + Name: sqlparser.NewIdentifierCI("c"), + Type: sqltypes.VarChar, + }}, + ColumnListAuthoritative: true, + }, + }, + } + cDB := "db" + tcases := []struct { + sql string + expected string + }{{ + sql: "select a,b,c from t1 where not a = 12", + expected: "select a, b, c from t1 where a != 12", + }, { + sql: "select a from t1 where not a > 12", + expected: "select a from t1 where a <= 12", + }} + for _, tcase := range tcases { + t.Run(tcase.sql, func(t *testing.T) { + ast, err := sqlparser.Parse(tcase.sql) + require.NoError(t, err) + selectStatement, isSelectStatement := ast.(*sqlparser.Select) + require.True(t, isSelectStatement, "analyzer expects a select statement") + st, err := Analyze(selectStatement, cDB, schemaInfo) + + require.NoError(t, err) + require.NoError(t, st.NotUnshardedErr) + require.NoError(t, st.NotSingleRouteErr) + assert.Equal(t, tcase.expected, sqlparser.String(selectStatement)) + }) + } +} + +// TestConstantFolding tests that the rewriter is able to do various constant foldings properly. +func TestConstantFolding(t *testing.T) { + ks := &vindexes.Keyspace{ + Name: "main", + Sharded: false, + } + schemaInfo := &FakeSI{ + Tables: map[string]*vindexes.Table{ + "t1": { + Keyspace: ks, + Name: sqlparser.NewIdentifierCS("t1"), + Columns: []vindexes.Column{{ + Name: sqlparser.NewIdentifierCI("a"), + Type: sqltypes.VarChar, + }, { + Name: sqlparser.NewIdentifierCI("b"), + Type: sqltypes.VarChar, + }, { + Name: sqlparser.NewIdentifierCI("c"), + Type: sqltypes.VarChar, + }}, + ColumnListAuthoritative: true, + }, + }, + } + cDB := "db" + tcases := []struct { + sql string + expSQL string + }{{ + sql: "select 1 from t1 where (a, b) in ::fkc_vals and (2 is null or (1 is null or a in (1)))", + expSQL: "select 1 from t1 where (a, b) in ::fkc_vals and a in (1)", + }, { + sql: "select 1 from t1 where (false or (false or a in (1)))", + expSQL: "select 1 from t1 where a in (1)", + }} + for _, tcase := range tcases { + t.Run(tcase.sql, func(t *testing.T) { + ast, err := sqlparser.Parse(tcase.sql) + require.NoError(t, err) + _, err = Analyze(ast, cDB, schemaInfo) + require.NoError(t, err) + require.Equal(t, tcase.expSQL, sqlparser.String(ast)) + }) + } +} diff --git a/go/vt/vtgate/semantics/errors.go b/go/vt/vtgate/semantics/errors.go index 520dda98c42..8d0b23d7f82 100644 --- a/go/vt/vtgate/semantics/errors.go +++ b/go/vt/vtgate/semantics/errors.go @@ -34,6 +34,36 @@ type ( error bug() } + + SQLCalcFoundRowsUsageError struct{} + UnionWithSQLCalcFoundRowsError struct{} + MissingInVSchemaError struct{ Table TableInfo } + CantUseOptionHereError struct{ Msg string } + TableNotUpdatableError struct{ Table string } + UnsupportedNaturalJoinError struct{ JoinExpr *sqlparser.JoinTableExpr } + NotSequenceTableError struct{ Table string } + NextWithMultipleTablesError struct{ CountTables int } + LockOnlyWithDualError struct{ Node *sqlparser.LockingFunc } + JSONTablesError struct{ Table string } + QualifiedOrderInUnionError struct{ Table string } + BuggyError struct{ Msg string } + UnsupportedConstruct struct{ errString string } + AmbiguousColumnError struct{ Column string } + SubqueryColumnCountError struct{ Expected int } + ColumnsMissingInSchemaError struct{} + + UnsupportedMultiTablesInUpdateError struct { + ExprCount int + NotAlias bool + } + UnionColumnsDoNotMatchError struct { + FirstProj int + SecondProj int + } + ColumnNotFoundError struct { + Column *sqlparser.ColName + Table *sqlparser.TableName + } ) func eprintf(e error, format string, args ...any) string { @@ -49,11 +79,6 @@ func eprintf(e error, format string, args ...any) string { // Specific error implementations follow // UnionColumnsDoNotMatchError -type UnionColumnsDoNotMatchError struct { - FirstProj int - SecondProj int -} - func (e *UnionColumnsDoNotMatchError) ErrorState() vterrors.State { return vterrors.WrongNumberOfColumnsInSelect } @@ -67,11 +92,6 @@ func (e *UnionColumnsDoNotMatchError) Error() string { } // UnsupportedMultiTablesInUpdateError -type UnsupportedMultiTablesInUpdateError struct { - ExprCount int - NotAlias bool -} - func (e *UnsupportedMultiTablesInUpdateError) Error() string { switch { case e.NotAlias: @@ -84,10 +104,6 @@ func (e *UnsupportedMultiTablesInUpdateError) Error() string { func (e *UnsupportedMultiTablesInUpdateError) unsupported() {} // UnsupportedNaturalJoinError -type UnsupportedNaturalJoinError struct { - JoinExpr *sqlparser.JoinTableExpr -} - func (e *UnsupportedNaturalJoinError) Error() string { return eprintf(e, "%s", e.JoinExpr.Join.ToString()) } @@ -95,9 +111,6 @@ func (e *UnsupportedNaturalJoinError) Error() string { func (e *UnsupportedNaturalJoinError) unsupported() {} // UnionWithSQLCalcFoundRowsError -type UnionWithSQLCalcFoundRowsError struct { -} - func (e *UnionWithSQLCalcFoundRowsError) Error() string { return eprintf(e, "SQL_CALC_FOUND_ROWS not supported with union") } @@ -105,10 +118,6 @@ func (e *UnionWithSQLCalcFoundRowsError) Error() string { func (e *UnionWithSQLCalcFoundRowsError) unsupported() {} // TableNotUpdatableError -type TableNotUpdatableError struct { - Table string -} - func (e *TableNotUpdatableError) Error() string { return eprintf(e, "The target table %s of the UPDATE is not updatable", e.Table) } @@ -122,9 +131,6 @@ func (e *TableNotUpdatableError) ErrorCode() vtrpcpb.Code { } // SQLCalcFoundRowsUsageError -type SQLCalcFoundRowsUsageError struct { -} - func (e *SQLCalcFoundRowsUsageError) Error() string { return eprintf(e, "Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'") } @@ -134,10 +140,6 @@ func (e *SQLCalcFoundRowsUsageError) ErrorCode() vtrpcpb.Code { } // CantUseOptionHereError -type CantUseOptionHereError struct { - Msg string -} - func (e *CantUseOptionHereError) Error() string { return eprintf(e, "Incorrect usage/placement of '%s'", e.Msg) } @@ -151,10 +153,6 @@ func (e *CantUseOptionHereError) ErrorCode() vtrpcpb.Code { } // MissingInVSchemaError -type MissingInVSchemaError struct { - Table TableInfo -} - func (e *MissingInVSchemaError) Error() string { tableName, _ := e.Table.Name() return eprintf(e, "Table information is not provided in vschema for table `%s`", sqlparser.String(tableName)) @@ -165,10 +163,6 @@ func (e *MissingInVSchemaError) ErrorCode() vtrpcpb.Code { } // NotSequenceTableError -type NotSequenceTableError struct { - Table string -} - func (e *NotSequenceTableError) Error() string { return eprintf(e, "NEXT used on a non-sequence table `%s`", e.Table) } @@ -178,10 +172,6 @@ func (e *NotSequenceTableError) ErrorCode() vtrpcpb.Code { } // NextWithMultipleTablesError -type NextWithMultipleTablesError struct { - CountTables int -} - func (e *NextWithMultipleTablesError) Error() string { return eprintf(e, "Next statement should not contain multiple tables: found %d tables", e.CountTables) } @@ -189,10 +179,6 @@ func (e *NextWithMultipleTablesError) Error() string { func (e *NextWithMultipleTablesError) bug() {} // LockOnlyWithDualError -type LockOnlyWithDualError struct { - Node *sqlparser.LockingFunc -} - func (e *LockOnlyWithDualError) Error() string { return eprintf(e, "%v allowed only with dual", sqlparser.String(e.Node)) } @@ -202,19 +188,11 @@ func (e *LockOnlyWithDualError) ErrorCode() vtrpcpb.Code { } // QualifiedOrderInUnionError -type QualifiedOrderInUnionError struct { - Table string -} - func (e *QualifiedOrderInUnionError) Error() string { return eprintf(e, "Table `%s` from one of the SELECTs cannot be used in global ORDER clause", e.Table) } // JSONTablesError -type JSONTablesError struct { - Table string -} - func (e *JSONTablesError) Error() string { return eprintf(e, "json_table expressions") } @@ -222,10 +200,6 @@ func (e *JSONTablesError) Error() string { func (e *JSONTablesError) unsupported() {} // BuggyError is used for checking conditions that should never occur -type BuggyError struct { - Msg string -} - func (e *BuggyError) Error() string { return eprintf(e, e.Msg) } @@ -233,11 +207,6 @@ func (e *BuggyError) Error() string { func (e *BuggyError) bug() {} // ColumnNotFoundError -type ColumnNotFoundError struct { - Column *sqlparser.ColName - Table *sqlparser.TableName -} - func (e *ColumnNotFoundError) Error() string { if e.Table == nil { return eprintf(e, "column '%s' not found", sqlparser.String(e.Column)) @@ -254,10 +223,6 @@ func (e *ColumnNotFoundError) ErrorState() vterrors.State { } // AmbiguousColumnError -type AmbiguousColumnError struct { - Column string -} - func (e *AmbiguousColumnError) Error() string { return eprintf(e, "Column '%s' in field list is ambiguous", e.Column) } @@ -270,10 +235,6 @@ func (e *AmbiguousColumnError) ErrorCode() vtrpcpb.Code { return vtrpcpb.Code_INVALID_ARGUMENT } -type UnsupportedConstruct struct { - errString string -} - func (e *UnsupportedConstruct) unsupported() {} func (e *UnsupportedConstruct) ErrorCode() vtrpcpb.Code { @@ -283,3 +244,20 @@ func (e *UnsupportedConstruct) ErrorCode() vtrpcpb.Code { func (e *UnsupportedConstruct) Error() string { return eprintf(e, e.errString) } + +func (e *SubqueryColumnCountError) ErrorCode() vtrpcpb.Code { + return vtrpcpb.Code_INVALID_ARGUMENT +} + +func (e *SubqueryColumnCountError) Error() string { + return fmt.Sprintf("Operand should contain %d column(s)", e.Expected) +} + +// MissingInVSchemaError +func (e *ColumnsMissingInSchemaError) Error() string { + return "VT09015: schema tracking required" +} + +func (e *ColumnsMissingInSchemaError) ErrorCode() vtrpcpb.Code { + return vtrpcpb.Code_INVALID_ARGUMENT +} diff --git a/go/vt/vtgate/semantics/info_schema.go b/go/vt/vtgate/semantics/info_schema.go index b27e197c16f..af050d5ff1b 100644 --- a/go/vt/vtgate/semantics/info_schema.go +++ b/go/vt/vtgate/semantics/info_schema.go @@ -1671,16 +1671,22 @@ type infoSchemaWithColumns struct { infoSchemaData map[string][]vindexes.Column } +// We cache this information, since these are maps that are not changed +var infoSchema57 = getInfoSchema57() +var infoSchema80 = getInfoSchema80() + // newSchemaInfo returns a SchemaInformation that has the column information for all info_schema tables func newSchemaInfo(inner SchemaInformation) SchemaInformation { + return &infoSchemaWithColumns{inner: inner, infoSchemaData: loadSchemaInfo()} +} + +func loadSchemaInfo() map[string][]vindexes.Column { version := servenv.MySQLServerVersion() - var infoSchema map[string][]vindexes.Column if strings.HasPrefix(version, "5.7") { - infoSchema = getInfoSchema57() - } else { - infoSchema = getInfoSchema80() + return infoSchema57 } - return &infoSchemaWithColumns{inner: inner, infoSchemaData: infoSchema} + + return infoSchema80 } // FindTableOrVindex implements the SchemaInformation interface diff --git a/go/vt/vtgate/semantics/info_schema_gen_test.go b/go/vt/vtgate/semantics/info_schema_gen_test.go index c5fe0123852..61241d96653 100644 --- a/go/vt/vtgate/semantics/info_schema_gen_test.go +++ b/go/vt/vtgate/semantics/info_schema_gen_test.go @@ -70,69 +70,6 @@ func TestGenerateInfoSchemaMap(t *testing.T) { } var ( - informationSchemaTables57 = []string{ - "CHARACTER_SETS", - "COLLATION_CHARACTER_SET_APPLICABILITY", - "COLLATIONS", - "COLUMN_PRIVILEGES", - "COLUMNS", - "ENGINES", - "EVENTS", - "FILES", - "GLOBAL_STATUS", - "GLOBAL_VARIABLES", - "INNODB_BUFFER_PAGE", - "INNODB_BUFFER_PAGE_LRU", - "INNODB_BUFFER_POOL_STATS", - "INNODB_CMP", - "INNODB_CMP_PER_INDEX", - "INNODB_CMP_PER_INDEX_RESET", - "INNODB_CMP_RESET", - "INNODB_CMPMEM", - "INNODB_CMPMEM_RESET", - "INNODB_FT_BEING_DELETED", - "INNODB_FT_CONFIG", - "INNODB_FT_DEFAULT_STOPWORD", - "INNODB_FT_DELETED", - "INNODB_FT_INDEX_CACHE", - "INNODB_FT_INDEX_TABLE", - "INNODB_LOCK_WAITS", - "INNODB_LOCKS", - "INNODB_METRICS", - "INNODB_SYS_COLUMNS", - "INNODB_SYS_DATAFILES", - "INNODB_SYS_FIELDS", - "INNODB_SYS_FOREIGN", - "INNODB_SYS_FOREIGN_COLS", - "INNODB_SYS_INDEXES", - "INNODB_SYS_TABLES", - "INNODB_SYS_TABLESPACES", - "INNODB_SYS_TABLESTATS", - "INNODB_SYS_VIRTUAL", - "INNODB_TEMP_TABLE_INFO", - "INNODB_TRX", - "KEY_COLUMN_USAGE", - "OPTIMIZER_TRACE", - "PARAMETERS", - "PARTITIONS", - "PLUGINS", - "PROCESSLIST", - "PROFILING", - "REFERENTIAL_CONSTRAINTS", - "ROUTINES", - "SCHEMA_PRIVILEGES", - "SCHEMATA", - "SESSION_STATUS", - "SESSION_VARIABLES", - "STATISTICS", - "TABLE_CONSTRAINTS", - "TABLE_PRIVILEGES", - "TABLES", - "TABLESPACES", - "TRIGGERS", - "USER_PRIVILEGES", - "VIEWS", - } informationSchemaTables80 = []string{ "ADMINISTRABLE_ROLE_AUTHORIZATIONS", "APPLICABLE_ROLES", diff --git a/go/vt/vtgate/semantics/scoper.go b/go/vt/vtgate/semantics/scoper.go index 4df6fb06685..5d27b31b84e 100644 --- a/go/vt/vtgate/semantics/scoper.go +++ b/go/vt/vtgate/semantics/scoper.go @@ -20,9 +20,8 @@ import ( "reflect" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" ) type ( @@ -37,6 +36,7 @@ type ( // These scopes are only used for rewriting ORDER BY 1 and GROUP BY 1 specialExprScopes map[*sqlparser.Literal]*scope + statementIDs map[sqlparser.Statement]TableSet } scope struct { @@ -54,6 +54,7 @@ func newScoper() *scoper { rScope: map[*sqlparser.Select]*scope{}, wScope: map[*sqlparser.Select]*scope{}, specialExprScopes: map[*sqlparser.Literal]*scope{}, + statementIDs: map[sqlparser.Statement]TableSet{}, } } @@ -181,6 +182,12 @@ func (s *scoper) up(cursor *sqlparser.Cursor) error { s.popScope() } case *sqlparser.Select, sqlparser.GroupBy, *sqlparser.Update, *sqlparser.Delete, *sqlparser.Insert: + id := EmptyTableSet() + for _, tableInfo := range s.currentScope().tables { + set := tableInfo.getTableSet(s.org) + id = id.Merge(set) + } + s.statementIDs[s.currentScope().stmt] = id s.popScope() case *sqlparser.Where: if node.Type != sqlparser.HavingClause { diff --git a/go/vt/vtgate/semantics/semantic_state.go b/go/vt/vtgate/semantics/semantic_state.go index 3058f0608cc..0af935918f9 100644 --- a/go/vt/vtgate/semantics/semantic_state.go +++ b/go/vt/vtgate/semantics/semantic_state.go @@ -17,6 +17,8 @@ limitations under the License. package semantics import ( + "fmt" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" @@ -65,6 +67,14 @@ type ( // ExprDependencies stores the tables that an expression depends on as a map ExprDependencies map[sqlparser.Expr]TableSet + // QuerySignature is used to identify shortcuts in the planning process + QuerySignature struct { + Union bool + Aggregation bool + Distinct bool + SubQueries bool + } + // SemTable contains semantic analysis information about the query. SemTable struct { // Tables stores information about the tables in the query, including derived tables @@ -99,11 +109,6 @@ type ( // It doesn't recurse inside derived tables to find the original dependencies. Direct ExprDependencies - // SubqueryMap holds extracted subqueries for each statement. - SubqueryMap map[sqlparser.Statement][]*sqlparser.ExtractedSubquery - // SubqueryRef maps subquery pointers to their extracted subquery. - SubqueryRef map[*sqlparser.Subquery]*sqlparser.ExtractedSubquery - // ColumnEqualities is used for transitive closures (e.g., if a == b and b == c, then a == c). ColumnEqualities map[columnName][]sqlparser.Expr @@ -111,7 +116,15 @@ type ( // The columns were added because of the use of `*` in the query ExpandedColumns map[sqlparser.TableName][]*sqlparser.ColName + columns map[*sqlparser.Union]sqlparser.SelectExprs + comparator *sqlparser.Comparator + + // StatementIDs is a map of statements and all the table IDs that are contained within + StatementIDs map[sqlparser.Statement]TableSet + + // QuerySignature is used to identify shortcuts in the planning process + QuerySignature QuerySignature } columnName struct { @@ -133,21 +146,73 @@ var ( // CopyDependencies copies the dependencies from one expression into the other func (st *SemTable) CopyDependencies(from, to sqlparser.Expr) { - st.Recursive[to] = st.RecursiveDeps(from) - st.Direct[to] = st.DirectDeps(from) + if ValidAsMapKey(to) { + st.Recursive[to] = st.RecursiveDeps(from) + st.Direct[to] = st.DirectDeps(from) + st.ExprTypes[to] = st.ExprTypes[from] + } } -// CopyDependenciesOnSQLNodes copies the dependencies from one expression into the other -func (st *SemTable) CopyDependenciesOnSQLNodes(from, to sqlparser.SQLNode) { - f, ok := from.(sqlparser.Expr) - if !ok { - return +func (st *SemTable) SelectExprs(sel sqlparser.SelectStatement) sqlparser.SelectExprs { + switch sel := sel.(type) { + case *sqlparser.Select: + return sel.SelectExprs + case *sqlparser.Union: + exprs, found := st.columns[sel] + if found { + return exprs + } + for stmt, exprs := range st.columns { + if sqlparser.Equals.SelectStatement(stmt, sel) { + return exprs + } + } + panic("BUG: union not found in semantic table for select expressions") + } + panic(fmt.Sprintf("BUG: unexpected select statement type %T", sel)) +} + +func getColumnNames(exprs sqlparser.SelectExprs) (expanded bool, selectExprs sqlparser.SelectExprs) { + expanded = true + for _, col := range exprs { + switch col := col.(type) { + case *sqlparser.AliasedExpr: + expr := sqlparser.NewColName(col.ColumnName()) + selectExprs = append(selectExprs, &sqlparser.AliasedExpr{Expr: expr}) + default: + selectExprs = append(selectExprs, col) + expanded = false + } + } + return +} + +// CopySemanticInfo copies all semantic information we have about this SQLNode so that it also applies to the `to` node +func (st *SemTable) CopySemanticInfo(from, to sqlparser.SQLNode) { + if f, ok := from.(sqlparser.Statement); ok { + t, ok := to.(sqlparser.Statement) + if ok { + st.StatementIDs[t] = st.StatementIDs[f] + } } - t, ok := to.(sqlparser.Expr) - if !ok { + + switch f := from.(type) { + case sqlparser.Expr: + t, ok := to.(sqlparser.Expr) + if !ok { + return + } + st.CopyDependencies(f, t) + case *sqlparser.Union: + t, ok := to.(*sqlparser.Union) + if !ok { + return + } + exprs := st.columns[f] + st.columns[t] = exprs + default: return } - st.CopyDependencies(f, t) } // Cloned copies the dependencies from one expression into the other @@ -166,6 +231,7 @@ func EmptySemTable() *SemTable { Recursive: map[sqlparser.Expr]TableSet{}, Direct: map[sqlparser.Expr]TableSet{}, ColumnEqualities: map[columnName][]sqlparser.Expr{}, + columns: map[*sqlparser.Union]sqlparser.SelectExprs{}, } } @@ -181,6 +247,9 @@ func (st *SemTable) TableSetFor(t *sqlparser.AliasedTableExpr) TableSet { // ReplaceTableSetFor replaces the given single TabletSet with the new *sqlparser.AliasedTableExpr func (st *SemTable) ReplaceTableSetFor(id TableSet, t *sqlparser.AliasedTableExpr) { + if st == nil { + return + } if id.NumberOfTables() != 1 { // This is probably a derived table return @@ -260,16 +329,30 @@ func (st *SemTable) TypeForExpr(e sqlparser.Expr) (sqltypes.Type, collations.ID, if typ, found := st.ExprTypes[e]; found { return typ.Type, typ.Collation, true } + + // We add a lot of WeightString() expressions to queries at late stages of the planning, + // which means that they don't have any type information. We can safely assume that they + // are VarBinary, since that's the only type that WeightString() can return. + _, isWS := e.(*sqlparser.WeightStringFuncExpr) + if isWS { + return sqltypes.VarBinary, collations.CollationBinaryID, true + } + return sqltypes.Unknown, collations.Unknown, false } // NeedsWeightString returns true if the given expression needs weight_string to do safe comparisons func (st *SemTable) NeedsWeightString(e sqlparser.Expr) bool { - typ, found := st.ExprTypes[e] - if !found { - return true + switch e := e.(type) { + case *sqlparser.WeightStringFuncExpr, *sqlparser.Literal: + return false + default: + typ, found := st.ExprTypes[e] + if !found { + return true + } + return typ.Collation == collations.Unknown && !sqltypes.IsNumber(typ.Type) } - return typ.Collation == collations.Unknown && !sqltypes.IsNumber(typ.Type) } func (st *SemTable) DefaultCollation() collations.ID { @@ -301,13 +384,6 @@ func (d ExprDependencies) dependencies(expr sqlparser.Expr) (deps TableSet) { return true, nil } - if extracted, ok := expr.(*sqlparser.ExtractedSubquery); ok { - if extracted.OtherSide != nil { - set := d.dependencies(extracted.OtherSide) - deps = deps.Merge(set) - } - return false, nil - } set, found := d[expr] deps = deps.Merge(set) @@ -342,27 +418,6 @@ func RewriteDerivedTableExpression(expr sqlparser.Expr, vt TableInfo) sqlparser. }, nil).(sqlparser.Expr) } -// FindSubqueryReference goes over the sub queries and searches for it by value equality instead of reference equality -func (st *SemTable) FindSubqueryReference(subquery *sqlparser.Subquery) *sqlparser.ExtractedSubquery { - for foundSubq, extractedSubquery := range st.SubqueryRef { - if sqlparser.Equals.RefOfSubquery(subquery, foundSubq) { - return extractedSubquery - } - } - return nil -} - -// GetSubqueryNeedingRewrite returns a list of sub-queries that need to be rewritten -func (st *SemTable) GetSubqueryNeedingRewrite() []*sqlparser.ExtractedSubquery { - var res []*sqlparser.ExtractedSubquery - for _, extractedSubquery := range st.SubqueryRef { - if extractedSubquery.Merged { - res = append(res, extractedSubquery) - } - } - return res -} - // CopyExprInfo lookups src in the ExprTypes map and, if a key is found, assign // the corresponding Type value of src to dest. func (st *SemTable) CopyExprInfo(src, dest sqlparser.Expr) { @@ -434,6 +489,10 @@ func (st *SemTable) SingleUnshardedKeyspace() (*vindexes.Keyspace, []*vindexes.T // The expression in the select list is not equal to the one in the ORDER BY, // but they point to the same column and would be considered equal by this method func (st *SemTable) EqualsExpr(a, b sqlparser.Expr) bool { + // If there is no SemTable, then we cannot compare the expressions. + if st == nil { + return false + } return st.ASTEquals().Expr(a, b) } @@ -447,8 +506,8 @@ func (st *SemTable) EqualsExprWithDeps(a, b sqlparser.Expr) bool { if !eq { return false } - adeps := st.DirectDeps(a) - bdeps := st.DirectDeps(b) + adeps := st.RecursiveDeps(a) + bdeps := st.RecursiveDeps(b) if adeps.IsEmpty() || bdeps.IsEmpty() || adeps == bdeps { return true } @@ -464,6 +523,23 @@ func (st *SemTable) ContainsExpr(e sqlparser.Expr, expres []sqlparser.Expr) bool return false } +// Uniquify takes a slice of expressions and removes any duplicates +func (st *SemTable) Uniquify(in []sqlparser.Expr) []sqlparser.Expr { + result := make([]sqlparser.Expr, 0, len(in)) + idx := 0 +outer: + for _, expr := range in { + for i := 0; i < idx; i++ { + if st.EqualsExprWithDeps(result[i], expr) { + continue outer + } + result = append(result, expr) + idx++ + } + } + return result +} + // AndExpressions ands together two or more expressions, minimising the expr when possible func (st *SemTable) AndExpressions(exprs ...sqlparser.Expr) sqlparser.Expr { switch len(exprs) { @@ -498,6 +574,9 @@ func (st *SemTable) AndExpressions(exprs ...sqlparser.Expr) sqlparser.Expr { // ASTEquals returns a sqlparser.Comparator that uses the semantic information in this SemTable to // explicitly compare column names for equality. func (st *SemTable) ASTEquals() *sqlparser.Comparator { + if st == nil { + return sqlparser.Equals + } if st.comparator == nil { st.comparator = &sqlparser.Comparator{ RefOfColName_: func(a, b *sqlparser.ColName) bool { diff --git a/go/vt/vtgate/semantics/table_collector.go b/go/vt/vtgate/semantics/table_collector.go index bf7f5ae74f4..d6fd4c6efd6 100644 --- a/go/vt/vtgate/semantics/table_collector.go +++ b/go/vt/vtgate/semantics/table_collector.go @@ -17,6 +17,7 @@ limitations under the License. package semantics import ( + querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" @@ -31,6 +32,7 @@ type tableCollector struct { si SchemaInformation currentDb string org originable + unionInfo map[*sqlparser.Union]unionInfo } func newTableCollector(scoper *scoper, si SchemaInformation, currentDb string) *tableCollector { @@ -38,44 +40,60 @@ func newTableCollector(scoper *scoper, si SchemaInformation, currentDb string) * scoper: scoper, si: si, currentDb: currentDb, + unionInfo: map[*sqlparser.Union]unionInfo{}, } } func (tc *tableCollector) up(cursor *sqlparser.Cursor) error { - node, ok := cursor.Node().(*sqlparser.AliasedTableExpr) - if !ok { - return nil + switch node := cursor.Node().(type) { + case *sqlparser.AliasedTableExpr: + return tc.visitAliasedTableExpr(node) + case *sqlparser.Union: + firstSelect := sqlparser.GetFirstSelect(node) + expanded, selectExprs := getColumnNames(firstSelect.SelectExprs) + info := unionInfo{ + isAuthoritative: expanded, + exprs: selectExprs, + } + tc.unionInfo[node] = info + if !expanded { + return nil + } + + size := len(firstSelect.SelectExprs) + info.recursive = make([]TableSet, size) + info.types = make([]*Type, size) + + _ = sqlparser.VisitAllSelects(node, func(s *sqlparser.Select, idx int) error { + for i, expr := range s.SelectExprs { + ae, ok := expr.(*sqlparser.AliasedExpr) + if !ok { + continue + } + _, recursiveDeps, qt := tc.org.depsForExpr(ae.Expr) + info.recursive[i] = info.recursive[i].Merge(recursiveDeps) + if idx == 0 { + // TODO: we probably should coerce these types together somehow, but I'm not sure how + info.types[i] = qt + } + } + return nil + }) + tc.unionInfo[node] = info } + + return nil +} + +func (tc *tableCollector) visitAliasedTableExpr(node *sqlparser.AliasedTableExpr) error { switch t := node.Expr.(type) { case *sqlparser.DerivedTable: switch sel := t.Select.(type) { case *sqlparser.Select: - tables := tc.scoper.wScope[sel] - tableInfo := createDerivedTableForExpressions(sqlparser.GetFirstSelect(sel).SelectExprs, node.Columns, tables.tables, tc.org) - if err := tableInfo.checkForDuplicates(); err != nil { - return err - } - - tableInfo.ASTNode = node - tableInfo.tableName = node.As.String() - - tc.Tables = append(tc.Tables, tableInfo) - scope := tc.scoper.currentScope() - return scope.addTable(tableInfo) + return tc.addSelectDerivedTable(sel, node) case *sqlparser.Union: - firstSelect := sqlparser.GetFirstSelect(sel) - tables := tc.scoper.wScope[firstSelect] - tableInfo := createDerivedTableForExpressions(firstSelect.SelectExprs, node.Columns, tables.tables, tc.org) - if err := tableInfo.checkForDuplicates(); err != nil { - return err - } - tableInfo.ASTNode = node - tableInfo.tableName = node.As.String() - - tc.Tables = append(tc.Tables, tableInfo) - scope := tc.scoper.currentScope() - return scope.addTable(tableInfo) + return tc.addUnionDerivedTable(sel, node) default: return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] %T in a derived table", sel) @@ -104,14 +122,62 @@ func (tc *tableCollector) up(cursor *sqlparser.Cursor) error { return nil } +func (tc *tableCollector) addSelectDerivedTable(sel *sqlparser.Select, node *sqlparser.AliasedTableExpr) error { + tables := tc.scoper.wScope[sel] + size := len(sel.SelectExprs) + deps := make([]TableSet, size) + types := make([]*Type, size) + expanded := true + for i, expr := range sel.SelectExprs { + ae, ok := expr.(*sqlparser.AliasedExpr) + if !ok { + expanded = false + continue + } + _, deps[i], types[i] = tc.org.depsForExpr(ae.Expr) + } + + tableInfo := createDerivedTableForExpressions(sel.SelectExprs, node.Columns, tables.tables, tc.org, expanded, deps, types) + if err := tableInfo.checkForDuplicates(); err != nil { + return err + } + + tableInfo.ASTNode = node + tableInfo.tableName = node.As.String() + + tc.Tables = append(tc.Tables, tableInfo) + scope := tc.scoper.currentScope() + return scope.addTable(tableInfo) +} + +func (tc *tableCollector) addUnionDerivedTable(union *sqlparser.Union, node *sqlparser.AliasedTableExpr) error { + firstSelect := sqlparser.GetFirstSelect(union) + tables := tc.scoper.wScope[firstSelect] + info, found := tc.unionInfo[union] + if !found { + return vterrors.VT13001("information about union is not available") + } + + tableInfo := createDerivedTableForExpressions(info.exprs, node.Columns, tables.tables, tc.org, info.isAuthoritative, info.recursive, info.types) + if err := tableInfo.checkForDuplicates(); err != nil { + return err + } + tableInfo.ASTNode = node + tableInfo.tableName = node.As.String() + + tc.Tables = append(tc.Tables, tableInfo) + scope := tc.scoper.currentScope() + return scope.addTable(tableInfo) +} + func newVindexTable(t sqlparser.IdentifierCS) *vindexes.Table { vindexCols := []vindexes.Column{ - {Name: sqlparser.NewIdentifierCI("id")}, - {Name: sqlparser.NewIdentifierCI("keyspace_id")}, - {Name: sqlparser.NewIdentifierCI("range_start")}, - {Name: sqlparser.NewIdentifierCI("range_end")}, - {Name: sqlparser.NewIdentifierCI("hex_keyspace_id")}, - {Name: sqlparser.NewIdentifierCI("shard")}, + {Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_VARBINARY}, + {Name: sqlparser.NewIdentifierCI("keyspace_id"), Type: querypb.Type_VARBINARY}, + {Name: sqlparser.NewIdentifierCI("range_start"), Type: querypb.Type_VARBINARY}, + {Name: sqlparser.NewIdentifierCI("range_end"), Type: querypb.Type_VARBINARY}, + {Name: sqlparser.NewIdentifierCI("hex_keyspace_id"), Type: querypb.Type_VARBINARY}, + {Name: sqlparser.NewIdentifierCI("shard"), Type: querypb.Type_VARBINARY}, } return &vindexes.Table{ diff --git a/go/vt/vtgate/semantics/typer.go b/go/vt/vtgate/semantics/typer.go index 12914693968..6652f1a476b 100644 --- a/go/vt/vtgate/semantics/typer.go +++ b/go/vt/vtgate/semantics/typer.go @@ -17,9 +17,8 @@ limitations under the License. package semantics import ( - "strings" - "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/engine/opcode" @@ -52,13 +51,19 @@ func (t *typer) up(cursor *sqlparser.Cursor) error { t.exprTypes[node] = Type{Type: node.Type, Collation: collations.DefaultCollationForType(node.Type)} } case sqlparser.AggrFunc: - code, ok := opcode.SupportedAggregates[strings.ToLower(node.AggrName())] - if ok { - typ, ok := opcode.OpcodeType[code] + code, ok := opcode.SupportedAggregates[node.AggrName()] + if !ok { + return nil + } + var inputType sqltypes.Type + if arg := node.GetArg(); arg != nil { + t, ok := t.exprTypes[arg] if ok { - t.exprTypes[node] = Type{Type: typ, Collation: collations.DefaultCollationForType(typ)} + inputType = t.Type } } + type_ := code.Type(inputType) + t.exprTypes[node] = Type{Type: type_, Collation: collations.DefaultCollationForType(type_)} } return nil } diff --git a/go/vt/vtgate/simplifier/expression_simplifier.go b/go/vt/vtgate/simplifier/expression_simplifier.go index 279cb1ac7dd..4537a137e76 100644 --- a/go/vt/vtgate/simplifier/expression_simplifier.go +++ b/go/vt/vtgate/simplifier/expression_simplifier.go @@ -21,104 +21,59 @@ import ( "strconv" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/sqlparser" ) // CheckF is used to see if the given expression exhibits the sought after issue type CheckF = func(sqlparser.Expr) bool -func SimplifyExpr(in sqlparser.Expr, test CheckF) (smallestKnown sqlparser.Expr) { - var maxDepth, level int - resetTo := func(e sqlparser.Expr) { - smallestKnown = e - maxDepth = depth(e) - level = 0 +func SimplifyExpr(in sqlparser.Expr, test CheckF) sqlparser.Expr { + // since we can't rewrite the top level, wrap the expr in an Exprs object + smallestKnown := sqlparser.Exprs{sqlparser.CloneExpr(in)} + + alwaysVisit := func(node, parent sqlparser.SQLNode) bool { + return true } - resetTo(in) - for level <= maxDepth { - current := sqlparser.CloneExpr(smallestKnown) - nodes, replaceF := getNodesAtLevel(current, level) - replace := func(e sqlparser.Expr, idx int) { - // if we are at the first level, we are replacing the root, - // not rewriting something deep in the tree - if level == 0 { - current = e + + up := func(cursor *sqlparser.Cursor) bool { + node := sqlparser.CloneSQLNode(cursor.Node()) + s := &shrinker{orig: node} + expr := s.Next() + for expr != nil { + cursor.Replace(expr) + + valid := test(smallestKnown[0]) + log.Errorf("test: %t: simplified %s to %s, full expr: %s", valid, sqlparser.String(node), sqlparser.String(expr), sqlparser.String(smallestKnown)) + if valid { + break // we will still continue trying to simplify other expressions at this level } else { - // replace `node` in current with the simplified expression - replaceF[idx](e) + // undo the change + cursor.Replace(node) } + expr = s.Next() } - simplified := false - for idx, node := range nodes { - // simplify each element and create a new expression with the node replaced by the simplification - // this means that we not only need the node, but also a way to replace the node - s := &shrinker{orig: node} - expr := s.Next() - for expr != nil { - replace(expr, idx) - - valid := test(current) - log.Errorf("test: %t - %s", valid, sqlparser.String(current)) - if valid { - simplified = true - break // we will still continue trying to simplify other expressions at this level - } else { - // undo the change - replace(node, idx) - } - expr = s.Next() - } - } - if simplified { - resetTo(current) - } else { - level++ - } - } - return smallestKnown -} - -func getNodesAtLevel(e sqlparser.Expr, level int) (result []sqlparser.Expr, replaceF []func(node sqlparser.SQLNode)) { - lvl := 0 - pre := func(cursor *sqlparser.Cursor) bool { - if expr, isExpr := cursor.Node().(sqlparser.Expr); level == lvl && isExpr { - result = append(result, expr) - replaceF = append(replaceF, cursor.ReplacerF()) - } - lvl++ - return true - } - post := func(cursor *sqlparser.Cursor) bool { - lvl-- return true } - sqlparser.Rewrite(e, pre, post) - return -} -func depth(e sqlparser.Expr) (depth int) { - lvl := 0 - pre := func(cursor *sqlparser.Cursor) bool { - lvl++ - if lvl > depth { - depth = lvl + // loop until rewriting introduces no more changes + for { + prevSmallest := sqlparser.CloneExprs(smallestKnown) + sqlparser.SafeRewrite(smallestKnown, alwaysVisit, up) + if sqlparser.Equals.Exprs(prevSmallest, smallestKnown) { + break } - return true } - post := func(cursor *sqlparser.Cursor) bool { - lvl-- - return true - } - sqlparser.Rewrite(e, pre, post) - return + + return smallestKnown[0] } type shrinker struct { - orig sqlparser.Expr - queue []sqlparser.Expr + orig sqlparser.SQLNode + queue []sqlparser.SQLNode } -func (s *shrinker) Next() sqlparser.Expr { +func (s *shrinker) Next() sqlparser.SQLNode { for { // first we check if there is already something in the queue. // note that we are doing a nil check and not a length check here. @@ -142,6 +97,10 @@ func (s *shrinker) Next() sqlparser.Expr { func (s *shrinker) fillQueue() bool { before := len(s.queue) switch e := s.orig.(type) { + case *sqlparser.AndExpr: + s.queue = append(s.queue, e.Left, e.Right) + case *sqlparser.OrExpr: + s.queue = append(s.queue, e.Left, e.Right) case *sqlparser.ComparisonExpr: s.queue = append(s.queue, e.Left, e.Right) case *sqlparser.BinaryExpr: @@ -228,9 +187,39 @@ func (s *shrinker) fillQueue() bool { for _, ae := range e.GetArgs() { s.queue = append(s.queue, ae) } + + clone := sqlparser.CloneAggrFunc(e) + if da, ok := clone.(sqlparser.DistinctableAggr); ok { + if da.IsDistinct() { + da.SetDistinct(false) + s.queue = append(s.queue, clone) + } + } case *sqlparser.ColName: // we can try to replace the column with a literal value - s.queue = []sqlparser.Expr{sqlparser.NewIntLiteral("0")} + s.queue = append(s.queue, sqlparser.NewIntLiteral("0")) + case *sqlparser.CaseExpr: + s.queue = append(s.queue, e.Expr, e.Else) + for _, when := range e.Whens { + s.queue = append(s.queue, when.Cond, when.Val) + } + + if len(e.Whens) > 1 { + for i := range e.Whens { + whensCopy := sqlparser.CloneSliceOfRefOfWhen(e.Whens) + // replace ith element with last element, then truncate last element + whensCopy[i] = whensCopy[len(whensCopy)-1] + whensCopy = whensCopy[:len(whensCopy)-1] + s.queue = append(s.queue, sqlparser.NewCaseExpr(e.Expr, whensCopy, e.Else)) + } + } + + if e.Else != nil { + s.queue = append(s.queue, sqlparser.NewCaseExpr(e.Expr, e.Whens, nil)) + } + if e.Expr != nil { + s.queue = append(s.queue, sqlparser.NewCaseExpr(nil, e.Whens, e.Else)) + } default: return false } diff --git a/go/vt/vtgate/simplifier/simplifier.go b/go/vt/vtgate/simplifier/simplifier.go index ef7be4e30e5..0e19935caba 100644 --- a/go/vt/vtgate/simplifier/simplifier.go +++ b/go/vt/vtgate/simplifier/simplifier.go @@ -40,12 +40,12 @@ func SimplifyStatement( return testF(sqlparser.CloneSelectStatement(s)) } + // first we try to simplify the query by removing any unions if success := trySimplifyUnions(sqlparser.CloneSelectStatement(in), test); success != nil { return SimplifyStatement(success, currentDB, si, testF) } - // first we try to simplify the query by removing any table. - // If we can remove a table and all uses of it, that's a good start + // then we try to remove a table and all uses of it if success := tryRemoveTable(tables, sqlparser.CloneSelectStatement(in), currentDB, si, testF); success != nil { return SimplifyStatement(success, currentDB, si, testF) } @@ -55,54 +55,93 @@ func SimplifyStatement( return SimplifyStatement(success, currentDB, si, testF) } - // we try to remove select expressions next + // we try to remove/replace any expressions next if success := trySimplifyExpressions(sqlparser.CloneSelectStatement(in), test); success != nil { return SimplifyStatement(success, currentDB, si, testF) } + + // we try to remove distinct last + if success := trySimplifyDistinct(sqlparser.CloneSelectStatement(in), test); success != nil { + return SimplifyStatement(success, currentDB, si, testF) + } + return in } +func trySimplifyDistinct(in sqlparser.SelectStatement, test func(statement sqlparser.SelectStatement) bool) sqlparser.SelectStatement { + simplified := false + alwaysVisitChildren := func(node, parent sqlparser.SQLNode) bool { + return true + } + + up := func(cursor *sqlparser.Cursor) bool { + if sel, ok := cursor.Node().(*sqlparser.Select); ok { + if sel.Distinct { + sel.Distinct = false + if test(sel) { + log.Errorf("removed distinct to yield: %s", sqlparser.String(sel)) + simplified = true + } else { + sel.Distinct = true + } + } + } + + return true + } + + sqlparser.SafeRewrite(in, alwaysVisitChildren, up) + + if simplified { + + return in + } + // we found no simplifications + return nil +} + func trySimplifyExpressions(in sqlparser.SelectStatement, test func(sqlparser.SelectStatement) bool) sqlparser.SelectStatement { simplified := false - visitAllExpressionsInAST(in, func(cursor expressionCursor) bool { + visit := func(cursor expressionCursor) bool { // first - let's try to remove the expression if cursor.remove() { if test(in) { log.Errorf("removed expression: %s", sqlparser.String(cursor.expr)) simplified = true - return false + // initially return false, but that made the rewriter prematurely abort, if it was the last selectExpr + return true } cursor.restore() } // ok, we seem to need this expression. let's see if we can find a simpler version - s := &shrinker{orig: cursor.expr} - newExpr := s.Next() - for newExpr != nil { - cursor.replace(newExpr) + newExpr := SimplifyExpr(cursor.expr, func(expr sqlparser.Expr) bool { + cursor.replace(expr) if test(in) { - log.Errorf("simplified expression: %s -> %s", sqlparser.String(cursor.expr), sqlparser.String(newExpr)) + log.Errorf("simplified expression: %s -> %s", sqlparser.String(cursor.expr), sqlparser.String(expr)) + cursor.restore() simplified = true - return false + return true } - newExpr = s.Next() - } - // if we get here, we failed to simplify this expression, - // so we put back in the original expression - cursor.restore() + cursor.restore() + return false + }) + + cursor.replace(newExpr) return true - }) + } + + visitAllExpressionsInAST(in, visit) if simplified { return in } - + // we found no simplifications return nil } func trySimplifyUnions(in sqlparser.SelectStatement, test func(sqlparser.SelectStatement) bool) (res sqlparser.SelectStatement) { - if union, ok := in.(*sqlparser.Union); ok { // the root object is an UNION if test(sqlparser.CloneSelectStatement(union.Left)) { @@ -113,9 +152,12 @@ func trySimplifyUnions(in sqlparser.SelectStatement, test func(sqlparser.SelectS } } - abort := false + simplified := false + alwaysVisitChildren := func(node, parent sqlparser.SQLNode) bool { + return true + } - sqlparser.Rewrite(in, func(cursor *sqlparser.Cursor) bool { + up := func(cursor *sqlparser.Cursor) bool { switch node := cursor.Node().(type) { case *sqlparser.Union: if _, ok := cursor.Parent().(*sqlparser.RootNode); ok { @@ -125,29 +167,30 @@ func trySimplifyUnions(in sqlparser.SelectStatement, test func(sqlparser.SelectS cursor.Replace(node.Left) clone := sqlparser.CloneSelectStatement(in) if test(clone) { - log.Errorf("replaced UNION with one of its children") - abort = true + log.Errorf("replaced UNION with its left child: %s -> %s", sqlparser.String(node), sqlparser.String(node.Left)) + simplified = true return true } cursor.Replace(node.Right) clone = sqlparser.CloneSelectStatement(in) if test(clone) { - log.Errorf("replaced UNION with one of its children") - abort = true + log.Errorf("replaced UNION with its right child: %s -> %s", sqlparser.String(node), sqlparser.String(node.Right)) + simplified = true return true } cursor.Replace(node) } return true - }, func(*sqlparser.Cursor) bool { - return !abort - }) + } - if !abort { - // we found no simplifications - return nil + sqlparser.SafeRewrite(in, alwaysVisitChildren, up) + + if simplified { + + return in } - return in + // we found no simplifications + return nil } func tryRemoveTable(tables []semantics.TableInfo, in sqlparser.SelectStatement, currentDB string, si semantics.SchemaInformation, test func(sqlparser.SelectStatement) bool) sqlparser.SelectStatement { @@ -158,11 +201,11 @@ func tryRemoveTable(tables []semantics.TableInfo, in sqlparser.SelectStatement, simplified := removeTable(clone, searchedTS, currentDB, si) name, _ := tbl.Name() if simplified && test(clone) { - log.Errorf("removed table %s", sqlparser.String(name)) + log.Errorf("removed table %s: %s -> %s", sqlparser.String(name), sqlparser.String(in), sqlparser.String(clone)) return clone } } - + // we found no simplifications return nil } @@ -178,7 +221,11 @@ func getTables(in sqlparser.SelectStatement, currentDB string, si semantics.Sche func simplifyStarExpr(in sqlparser.SelectStatement, test func(sqlparser.SelectStatement) bool) sqlparser.SelectStatement { simplified := false - sqlparser.Rewrite(in, func(cursor *sqlparser.Cursor) bool { + alwaysVisitChildren := func(node, parent sqlparser.SQLNode) bool { + return true + } + + up := func(cursor *sqlparser.Cursor) bool { se, ok := cursor.Node().(*sqlparser.StarExpr) if !ok { return true @@ -189,15 +236,19 @@ func simplifyStarExpr(in sqlparser.SelectStatement, test func(sqlparser.SelectSt if test(in) { log.Errorf("replaced star with literal") simplified = true - return false + return true } cursor.Replace(se) return true - }, nil) + } + + sqlparser.SafeRewrite(in, alwaysVisitChildren, up) + if simplified { return in } + // we found no simplifications return nil } @@ -209,92 +260,148 @@ func removeTable(clone sqlparser.SelectStatement, searchedTS semantics.TableSet, panic(err) } - simplified := true + simplified, kontinue := false, true shouldKeepExpr := func(expr sqlparser.Expr) bool { + // why do we keep if the expr contains an aggregation? return !semTable.RecursiveDeps(expr).IsOverlapping(searchedTS) || sqlparser.ContainsAggregation(expr) } - sqlparser.Rewrite(clone, func(cursor *sqlparser.Cursor) bool { + checkSelect := func(node, parent sqlparser.SQLNode) bool { + if sel, ok := node.(*sqlparser.Select); ok { + // remove the table from the from clause on the way down + // so that it happens before removing it anywhere else + kontinue, simplified = removeTableinSelect(sel, searchedTS, semTable, simplified) + } + + return kontinue + } + + up := func(cursor *sqlparser.Cursor) bool { switch node := cursor.Node().(type) { case *sqlparser.JoinTableExpr: - lft, ok := node.LeftExpr.(*sqlparser.AliasedTableExpr) - if ok { - ts := semTable.TableSetFor(lft) - if searchedTS == ts { - cursor.Replace(node.RightExpr) - } - } - rgt, ok := node.RightExpr.(*sqlparser.AliasedTableExpr) - if ok { - ts := semTable.TableSetFor(rgt) - if searchedTS == ts { - cursor.Replace(node.LeftExpr) - } - } - case *sqlparser.Select: - if len(node.From) == 1 { - _, notJoin := node.From[0].(*sqlparser.AliasedTableExpr) - if notJoin { - simplified = false - return false - } - } - for i, tbl := range node.From { - lft, ok := tbl.(*sqlparser.AliasedTableExpr) - if ok { - ts := semTable.TableSetFor(lft) - if searchedTS == ts { - node.From = append(node.From[:i], node.From[i+1:]...) - return true - } - } - } + simplified = removeTableinJoinTableExpr(node, searchedTS, semTable, cursor, simplified) case *sqlparser.Where: - exprs := sqlparser.SplitAndExpression(nil, node.Expr) - var newPredicate sqlparser.Expr - for _, expr := range exprs { - if !semTable.RecursiveDeps(expr).IsOverlapping(searchedTS) { - newPredicate = sqlparser.AndExpressions(newPredicate, expr) - } - } - node.Expr = newPredicate + simplified = removeTableinWhere(node, shouldKeepExpr, simplified) case sqlparser.SelectExprs: - _, isSel := cursor.Parent().(*sqlparser.Select) - if !isSel { - return true - } - - var newExprs sqlparser.SelectExprs - for _, ae := range node { - expr, ok := ae.(*sqlparser.AliasedExpr) - if !ok { - newExprs = append(newExprs, ae) - continue - } - if shouldKeepExpr(expr.Expr) { - newExprs = append(newExprs, ae) - } - } - cursor.Replace(newExprs) + simplified = removeTableinSelectExprs(node, cursor, shouldKeepExpr, simplified) case sqlparser.GroupBy: - var newExprs sqlparser.GroupBy - for _, expr := range node { - if shouldKeepExpr(expr) { - newExprs = append(newExprs, expr) - } - } - cursor.Replace(newExprs) + simplified = removeTableinGroupBy(node, cursor, shouldKeepExpr, simplified) case sqlparser.OrderBy: - var newExprs sqlparser.OrderBy - for _, expr := range node { - if shouldKeepExpr(expr.Expr) { - newExprs = append(newExprs, expr) - } + simplified = removeTableinOrderBy(node, cursor, shouldKeepExpr, simplified) + } + return true + } + + sqlparser.SafeRewrite(clone, checkSelect, up) + return simplified +} + +func removeTableinJoinTableExpr(node *sqlparser.JoinTableExpr, searchedTS semantics.TableSet, semTable *semantics.SemTable, cursor *sqlparser.Cursor, simplified bool) bool { + lft, ok := node.LeftExpr.(*sqlparser.AliasedTableExpr) + if ok { + ts := semTable.TableSetFor(lft) + if searchedTS == ts { + cursor.Replace(node.RightExpr) + simplified = true + } + } + rgt, ok := node.RightExpr.(*sqlparser.AliasedTableExpr) + if ok { + ts := semTable.TableSetFor(rgt) + if searchedTS == ts { + cursor.Replace(node.LeftExpr) + simplified = true + } + } + + return simplified +} + +func removeTableinSelect(node *sqlparser.Select, searchedTS semantics.TableSet, semTable *semantics.SemTable, simplified bool) (bool, bool) { + if len(node.From) == 1 { + _, notJoin := node.From[0].(*sqlparser.AliasedTableExpr) + if notJoin { + return false, simplified + } + } + for i, tbl := range node.From { + lft, ok := tbl.(*sqlparser.AliasedTableExpr) + if ok { + ts := semTable.TableSetFor(lft) + if searchedTS == ts { + node.From = append(node.From[:i], node.From[i+1:]...) + simplified = true } + } + } + + return true, simplified +} - cursor.Replace(newExprs) +func removeTableinWhere(node *sqlparser.Where, shouldKeepExpr func(sqlparser.Expr) bool, simplified bool) bool { + exprs := sqlparser.SplitAndExpression(nil, node.Expr) + var newPredicate sqlparser.Expr + for _, expr := range exprs { + if shouldKeepExpr(expr) { + newPredicate = sqlparser.AndExpressions(newPredicate, expr) + } else { + simplified = true } - return true - }, nil) + } + node.Expr = newPredicate + + return simplified +} + +func removeTableinSelectExprs(node sqlparser.SelectExprs, cursor *sqlparser.Cursor, shouldKeepExpr func(sqlparser.Expr) bool, simplified bool) bool { + _, isSel := cursor.Parent().(*sqlparser.Select) + if !isSel { + return simplified + } + + var newExprs sqlparser.SelectExprs + for _, ae := range node { + expr, ok := ae.(*sqlparser.AliasedExpr) + if !ok { + newExprs = append(newExprs, ae) + continue + } + if shouldKeepExpr(expr.Expr) { + newExprs = append(newExprs, ae) + } else { + simplified = true + } + } + cursor.Replace(newExprs) + + return simplified +} + +func removeTableinGroupBy(node sqlparser.GroupBy, cursor *sqlparser.Cursor, shouldKeepExpr func(sqlparser.Expr) bool, simplified bool) bool { + var newExprs sqlparser.GroupBy + for _, expr := range node { + if shouldKeepExpr(expr) { + newExprs = append(newExprs, expr) + } else { + simplified = true + } + } + cursor.Replace(newExprs) + + return simplified +} + +func removeTableinOrderBy(node sqlparser.OrderBy, cursor *sqlparser.Cursor, shouldKeepExpr func(sqlparser.Expr) bool, simplified bool) bool { + var newExprs sqlparser.OrderBy + for _, expr := range node { + if shouldKeepExpr(expr.Expr) { + newExprs = append(newExprs, expr) + } else { + simplified = true + } + } + + cursor.Replace(newExprs) + return simplified } @@ -315,180 +422,223 @@ func newExprCursor(expr sqlparser.Expr, replace func(replaceWith sqlparser.Expr) } // visitAllExpressionsInAST will walk the AST and visit all expressions -// This cursor has a few extra capabilities that the normal sqlparser.Rewrite does not have, +// This cursor has a few extra capabilities that the normal sqlparser.SafeRewrite does not have, // such as visiting and being able to change individual expressions in a AND tree +// if visit returns true, then traversal continues, otherwise traversal stops func visitAllExpressionsInAST(clone sqlparser.SelectStatement, visit func(expressionCursor) bool) { - abort := false - post := func(*sqlparser.Cursor) bool { - return !abort + alwaysVisitChildren := func(node, parent sqlparser.SQLNode) bool { + return true } - pre := func(cursor *sqlparser.Cursor) bool { - if abort { - return true - } + up := func(cursor *sqlparser.Cursor) bool { switch node := cursor.Node().(type) { case sqlparser.SelectExprs: - _, isSel := cursor.Parent().(*sqlparser.Select) - if !isSel { - return true - } - for idx := 0; idx < len(node); idx++ { - ae := node[idx] - expr, ok := ae.(*sqlparser.AliasedExpr) - if !ok { - continue - } - removed := false - original := sqlparser.CloneExpr(expr.Expr) - item := newExprCursor( - expr.Expr, - /*replace*/ func(replaceWith sqlparser.Expr) { - if removed { - panic("cant replace after remove without restore") - } - expr.Expr = replaceWith - }, - /*remove*/ func() bool { - if removed { - panic("can't remove twice, silly") - } - if len(node) == 1 { - // can't remove the last expressions - we'd end up with an empty SELECT clause - return false - } - withoutElement := append(node[:idx], node[idx+1:]...) - cursor.Replace(withoutElement) - node = withoutElement - removed = true - return true - }, - /*restore*/ func() { - if removed { - front := make(sqlparser.SelectExprs, idx) - copy(front, node[:idx]) - back := make(sqlparser.SelectExprs, len(node)-idx) - copy(back, node[idx:]) - frontWithRestoredExpr := append(front, ae) - node = append(frontWithRestoredExpr, back...) - cursor.Replace(node) - removed = false - return - } - expr.Expr = original - }, - ) - abort = !visit(item) - } + return visitSelectExprs(node, cursor, visit) case *sqlparser.Where: - exprs := sqlparser.SplitAndExpression(nil, node.Expr) - set := func(input []sqlparser.Expr) { - node.Expr = sqlparser.AndExpressions(input...) - exprs = input - } - abort = !visitExpressions(exprs, set, visit) + return visitWhere(node, visit) case *sqlparser.JoinCondition: - join, ok := cursor.Parent().(*sqlparser.JoinTableExpr) - if !ok { - return true - } - if join.Join != sqlparser.NormalJoinType || node.Using != nil { - return false - } - exprs := sqlparser.SplitAndExpression(nil, node.On) - set := func(input []sqlparser.Expr) { - node.On = sqlparser.AndExpressions(input...) - exprs = input - } - abort = !visitExpressions(exprs, set, visit) + return visitJoinCondition(node, cursor, visit) case sqlparser.GroupBy: - set := func(input []sqlparser.Expr) { - node = input - cursor.Replace(node) - } - abort = !visitExpressions(node, set, visit) + return visitGroupBy(node, cursor, visit) case sqlparser.OrderBy: - for idx := 0; idx < len(node); idx++ { - order := node[idx] - removed := false - original := sqlparser.CloneExpr(order.Expr) - item := newExprCursor( - order.Expr, - /*replace*/ func(replaceWith sqlparser.Expr) { - if removed { - panic("cant replace after remove without restore") - } - order.Expr = replaceWith - }, - /*remove*/ func() bool { - if removed { - panic("can't remove twice, silly") - } - withoutElement := append(node[:idx], node[idx+1:]...) - if len(withoutElement) == 0 { - var nilVal sqlparser.OrderBy // this is used to create a typed nil value - cursor.Replace(nilVal) - } else { - cursor.Replace(withoutElement) - } - node = withoutElement - removed = true - return true - }, - /*restore*/ func() { - if removed { - front := make(sqlparser.OrderBy, idx) - copy(front, node[:idx]) - back := make(sqlparser.OrderBy, len(node)-idx) - copy(back, node[idx:]) - frontWithRestoredExpr := append(front, order) - node = append(frontWithRestoredExpr, back...) - cursor.Replace(node) - removed = false - return - } - order.Expr = original - }, - ) - abort = visit(item) - if abort { - break - } - } + return visitOrderBy(node, cursor, visit) case *sqlparser.Limit: - if node.Offset != nil { - original := node.Offset - cursor := newExprCursor(node.Offset, - /*replace*/ func(replaceWith sqlparser.Expr) { - node.Offset = replaceWith - }, - /*remove*/ func() bool { - node.Offset = nil - return true - }, - /*restore*/ func() { - node.Offset = original - }) - abort = visit(cursor) - } - if !abort && node.Rowcount != nil { - original := node.Rowcount - cursor := newExprCursor(node.Rowcount, - /*replace*/ func(replaceWith sqlparser.Expr) { - node.Rowcount = replaceWith - }, - /*remove*/ func() bool { - // removing Rowcount is an invalid op - return false - }, - /*restore*/ func() { - node.Rowcount = original - }) - abort = visit(cursor) - } + return visitLimit(node, cursor, visit) } return true } - sqlparser.Rewrite(clone, pre, post) + sqlparser.SafeRewrite(clone, alwaysVisitChildren, up) +} + +func visitSelectExprs(node sqlparser.SelectExprs, cursor *sqlparser.Cursor, visit func(expressionCursor) bool) bool { + _, isSel := cursor.Parent().(*sqlparser.Select) + if !isSel { + return true + } + for idx := 0; idx < len(node); idx++ { + ae := node[idx] + expr, ok := ae.(*sqlparser.AliasedExpr) + if !ok { + continue + } + removed := false + original := sqlparser.CloneExpr(expr.Expr) + item := newExprCursor( + expr.Expr, + /*replace*/ func(replaceWith sqlparser.Expr) { + if removed { + panic("cant replace after remove without restore") + } + expr.Expr = replaceWith + }, + /*remove*/ func() bool { + if removed { + panic("can't remove twice, silly") + } + if len(node) == 1 { + // can't remove the last expressions - we'd end up with an empty SELECT clause + return false + } + withoutElement := append(node[:idx], node[idx+1:]...) + cursor.Replace(withoutElement) + node = withoutElement + removed = true + return true + }, + /*restore*/ func() { + if removed { + front := make(sqlparser.SelectExprs, idx) + copy(front, node[:idx]) + back := make(sqlparser.SelectExprs, len(node)-idx) + copy(back, node[idx:]) + frontWithRestoredExpr := append(front, ae) + node = append(frontWithRestoredExpr, back...) + cursor.Replace(node) + removed = false + return + } + expr.Expr = original + }, + ) + if !visit(item) { + return false + } + } + + return true +} + +func visitWhere(node *sqlparser.Where, visit func(expressionCursor) bool) bool { + exprs := sqlparser.SplitAndExpression(nil, node.Expr) + set := func(input []sqlparser.Expr) { + node.Expr = sqlparser.AndExpressions(input...) + exprs = input + } + return visitExpressions(exprs, set, visit, 0) +} + +func visitJoinCondition(node *sqlparser.JoinCondition, cursor *sqlparser.Cursor, visit func(expressionCursor) bool) bool { + join, ok := cursor.Parent().(*sqlparser.JoinTableExpr) + if !ok { + return true + } + + if node.Using != nil { + return true + } + + // for only left and right joins must the join condition be nonempty + minExprs := 0 + if join.Join == sqlparser.LeftJoinType || join.Join == sqlparser.RightJoinType { + minExprs = 1 + } + + exprs := sqlparser.SplitAndExpression(nil, node.On) + set := func(input []sqlparser.Expr) { + node.On = sqlparser.AndExpressions(input...) + exprs = input + } + return visitExpressions(exprs, set, visit, minExprs) +} + +func visitGroupBy(node sqlparser.GroupBy, cursor *sqlparser.Cursor, visit func(expressionCursor) bool) bool { + set := func(input []sqlparser.Expr) { + node = input + cursor.Replace(node) + } + return visitExpressions(node, set, visit, 0) +} + +func visitOrderBy(node sqlparser.OrderBy, cursor *sqlparser.Cursor, visit func(expressionCursor) bool) bool { + for idx := 0; idx < len(node); idx++ { + order := node[idx] + removed := false + original := sqlparser.CloneExpr(order.Expr) + item := newExprCursor( + order.Expr, + /*replace*/ func(replaceWith sqlparser.Expr) { + if removed { + panic("cant replace after remove without restore") + } + order.Expr = replaceWith + }, + /*remove*/ func() bool { + if removed { + panic("can't remove twice, silly") + } + withoutElement := append(node[:idx], node[idx+1:]...) + if len(withoutElement) == 0 { + var nilVal sqlparser.OrderBy // this is used to create a typed nil value + cursor.Replace(nilVal) + } else { + cursor.Replace(withoutElement) + } + node = withoutElement + removed = true + return true + }, + /*restore*/ func() { + if removed { + front := make(sqlparser.OrderBy, idx) + copy(front, node[:idx]) + back := make(sqlparser.OrderBy, len(node)-idx) + copy(back, node[idx:]) + frontWithRestoredExpr := append(front, order) + node = append(frontWithRestoredExpr, back...) + cursor.Replace(node) + removed = false + return + } + order.Expr = original + }, + ) + if !visit(item) { + return false + } + } + + return true +} + +func visitLimit(node *sqlparser.Limit, cursor *sqlparser.Cursor, visit func(expressionCursor) bool) bool { + if node.Offset != nil { + original := node.Offset + item := newExprCursor(node.Offset, + /*replace*/ func(replaceWith sqlparser.Expr) { + node.Offset = replaceWith + }, + /*remove*/ func() bool { + node.Offset = nil + return true + }, + /*restore*/ func() { + node.Offset = original + }) + if !visit(item) { + return false + } + } + if node.Rowcount != nil { + original := node.Rowcount + item := newExprCursor(node.Rowcount, + /*replace*/ func(replaceWith sqlparser.Expr) { + node.Rowcount = replaceWith + }, + // this removes the whole limit clause + /*remove*/ + func() bool { + var nilVal *sqlparser.Limit // this is used to create a typed nil value + cursor.Replace(nilVal) + return true + }, + /*restore*/ func() { + node.Rowcount = original + }) + if !visit(item) { + return false + } + } + + return true } // visitExpressions allows the cursor to visit all expressions in a slice, @@ -497,6 +647,7 @@ func visitExpressions( exprs []sqlparser.Expr, set func(input []sqlparser.Expr), visit func(expressionCursor) bool, + minExprs int, ) bool { for idx := 0; idx < len(exprs); idx++ { expr := exprs[idx] @@ -513,6 +664,10 @@ func visitExpressions( if removed { panic("can't remove twice, silly") } + // need to keep at least minExprs + if len(exprs) <= minExprs { + return false + } exprs = append(exprs[:idx], exprs[idx+1:]...) set(exprs) removed = true diff --git a/go/vt/vtgate/simplifier/simplifier_test.go b/go/vt/vtgate/simplifier/simplifier_test.go index 63f43a7febb..6c2c09b4366 100644 --- a/go/vt/vtgate/simplifier/simplifier_test.go +++ b/go/vt/vtgate/simplifier/simplifier_test.go @@ -22,6 +22,8 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/log" + + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/vtgate/evalengine" "github.com/stretchr/testify/require" @@ -54,11 +56,11 @@ limit 123 offset 456 require.NoError(t, err) visitAllExpressionsInAST(ast.(sqlparser.SelectStatement), func(cursor expressionCursor) bool { fmt.Printf(">> found expression: %s\n", sqlparser.String(cursor.expr)) - cursor.replace(sqlparser.NewIntLiteral("1")) + cursor.remove() fmt.Printf("remove: %s\n", sqlparser.String(ast)) cursor.restore() fmt.Printf("restore: %s\n", sqlparser.String(ast)) - cursor.remove() + cursor.replace(sqlparser.NewIntLiteral("1")) fmt.Printf("replace it with literal: %s\n", sqlparser.String(ast)) cursor.restore() fmt.Printf("restore: %s\n", sqlparser.String(ast)) @@ -82,26 +84,35 @@ func TestAbortExpressionCursor(t *testing.T) { func TestSimplifyEvalEngineExpr(t *testing.T) { // ast struct for L0 + - // L1 + + - // L2 + + + + - // L3 1 2 3 4 5 6 7 8 + // L1 + + + // L2 + + + + + // L3 1 2 3 4 5 6 + + + // L4 7 8 9 10 + + // L4 + i7, i8, i9, i10 := + sqlparser.NewIntLiteral("7"), + sqlparser.NewIntLiteral("8"), + sqlparser.NewIntLiteral("9"), + sqlparser.NewIntLiteral("10") // L3 - i1, i2, i3, i4, i5, i6, i7, i8 := + i1, i2, i3, i4, i5, i6, p31, p32 := sqlparser.NewIntLiteral("1"), sqlparser.NewIntLiteral("2"), sqlparser.NewIntLiteral("3"), sqlparser.NewIntLiteral("4"), sqlparser.NewIntLiteral("5"), sqlparser.NewIntLiteral("6"), - sqlparser.NewIntLiteral("7"), - sqlparser.NewIntLiteral("8") + plus(i7, i8), + plus(i9, i10) + // L2 p21, p22, p23, p24 := plus(i1, i2), plus(i3, i4), plus(i5, i6), - plus(i7, i8) + plus(p31, p32) // L1 p11, p12 := @@ -126,7 +137,7 @@ func TestSimplifyEvalEngineExpr(t *testing.T) { } return toInt64 >= 8 }) - log.Infof("simplest expr to evaluate to >= 8: [%s], started from: [%s]", sqlparser.String(expr), sqlparser.String(p0)) + log.Errorf("simplest expr to evaluate to >= 8: [%s], started from: [%s]", sqlparser.String(expr), sqlparser.String(p0)) } func plus(a, b sqlparser.Expr) sqlparser.Expr { diff --git a/go/vt/vtgate/tabletgateway.go b/go/vt/vtgate/tabletgateway.go index a14ed8c7293..5f3627bb848 100644 --- a/go/vt/vtgate/tabletgateway.go +++ b/go/vt/vtgate/tabletgateway.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "math/rand" + "runtime/debug" "sort" "sync" "sync/atomic" @@ -58,6 +59,7 @@ func init() { servenv.OnParseFor("vtgate", func(fs *pflag.FlagSet) { fs.StringVar(&CellsToWatch, "cells_to_watch", "", "comma-separated list of cells for watching tablets") fs.StringVar(&bufferImplementation, "buffer_implementation", "keyspace_events", "Allowed values: healthcheck (legacy implementation), keyspace_events (default)") + fs.MarkDeprecated("buffer_implementation", "The 'healthcheck' buffer implementation has been removed in v18 and this option will be removed in v19") fs.DurationVar(&initialTabletTimeout, "gateway_initial_tablet_timeout", 30*time.Second, "At startup, the tabletGateway will wait up to this duration to get at least one tablet per keyspace/shard/tablet type") fs.IntVar(&retryCount, "retry-count", 2, "retry count") }) @@ -116,57 +118,31 @@ func NewTabletGateway(ctx context.Context, hc discovery.HealthCheck, serv srvtop func (gw *TabletGateway) setupBuffering(ctx context.Context) { cfg := buffer.NewConfigFromFlags() + if !cfg.Enabled { + log.Info("Query buffering is disabled") + return + } gw.buffer = buffer.New(cfg) - switch bufferImplementation { - case "healthcheck": - // subscribe to healthcheck updates so that buffer can be notified if needed - // we run this in a separate goroutine so that normal processing doesn't need to block - hcChan := gw.hc.Subscribe() - bufferCtx, bufferCancel := context.WithCancel(ctx) + gw.kev = discovery.NewKeyspaceEventWatcher(ctx, gw.srvTopoServer, gw.hc, gw.localCell) + ksChan := gw.kev.Subscribe() + bufferCtx, bufferCancel := context.WithCancel(ctx) - go func(ctx context.Context, c chan *discovery.TabletHealth, buffer *buffer.Buffer) { - defer bufferCancel() + go func(ctx context.Context, c chan *discovery.KeyspaceEvent, buffer *buffer.Buffer) { + defer bufferCancel() - for { - select { - case <-ctx.Done(): + for { + select { + case <-ctx.Done(): + return + case result := <-ksChan: + if result == nil { return - case result := <-hcChan: - if result == nil { - return - } - if result.Target.TabletType == topodatapb.TabletType_PRIMARY { - buffer.ProcessPrimaryHealth(result) - } } + buffer.HandleKeyspaceEvent(result) } - }(bufferCtx, hcChan, gw.buffer) - - case "keyspace_events": - gw.kev = discovery.NewKeyspaceEventWatcher(ctx, gw.srvTopoServer, gw.hc, gw.localCell) - ksChan := gw.kev.Subscribe() - bufferCtx, bufferCancel := context.WithCancel(ctx) - - go func(ctx context.Context, c chan *discovery.KeyspaceEvent, buffer *buffer.Buffer) { - defer bufferCancel() - - for { - select { - case <-ctx.Done(): - return - case result := <-ksChan: - if result == nil { - return - } - buffer.HandleKeyspaceEvent(result) - } - } - }(bufferCtx, ksChan, gw.buffer) - - default: - log.Exitf("unknown buffering implementation for TabletGateway: %q", bufferImplementation) - } + } + }(bufferCtx, ksChan, gw.buffer) } // QueryServiceByAlias satisfies the Gateway interface @@ -175,6 +151,14 @@ func (gw *TabletGateway) QueryServiceByAlias(alias *topodatapb.TabletAlias, targ return queryservice.Wrap(qs, gw.withShardError), NewShardError(err, target) } +// GetServingKeyspaces returns list of serving keyspaces. +func (gw *TabletGateway) GetServingKeyspaces() []string { + if gw.kev == nil { + return nil + } + return gw.kev.GetServingKeyspaces() +} + // RegisterStats registers the stats to export the lag since the last refresh // and the checksum of the topology func (gw *TabletGateway) RegisterStats() { @@ -182,9 +166,9 @@ func (gw *TabletGateway) RegisterStats() { } // WaitForTablets is part of the Gateway interface. -func (gw *TabletGateway) WaitForTablets(tabletTypesToWait []topodatapb.TabletType) (err error) { +func (gw *TabletGateway) WaitForTablets(ctx context.Context, tabletTypesToWait []topodatapb.TabletType) (err error) { log.Infof("Gateway waiting for serving tablets of types %v ...", tabletTypesToWait) - ctx, cancel := context.WithTimeout(context.Background(), initialTabletTimeout) + ctx, cancel := context.WithTimeout(ctx, initialTabletTimeout) defer cancel() defer func() { @@ -217,7 +201,9 @@ func (gw *TabletGateway) WaitForTablets(tabletTypesToWait []topodatapb.TabletTyp // Close shuts down underlying connections. // This function hides the inner implementation. func (gw *TabletGateway) Close(_ context.Context) error { - gw.buffer.Shutdown() + if gw.buffer != nil { + gw.buffer.Shutdown() + } return gw.hc.Close() } @@ -244,6 +230,7 @@ func (gw *TabletGateway) CacheStatus() TabletCacheStatusList { // withShardError should not be combined with withRetry. func (gw *TabletGateway) withRetry(ctx context.Context, target *querypb.Target, _ queryservice.QueryService, _ string, inTransaction bool, inner func(ctx context.Context, target *querypb.Target, conn queryservice.QueryService) (bool, error)) error { + // for transactions, we connect to a specific tablet instead of letting gateway choose one if inTransaction && target.TabletType != topodatapb.TabletType_PRIMARY { return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "tabletGateway's query service can only be used for non-transactional queries on replicas") @@ -267,12 +254,11 @@ func (gw *TabletGateway) withRetry(ctx context.Context, target *querypb.Target, bufferedOnce := false for i := 0; i < gw.retryCount+1; i++ { - // Check if we should buffer PRIMARY queries which failed due to an ongoing - // failover. + // Check if we should buffer PRIMARY queries which failed due to an ongoing failover. // Note: We only buffer once and only "!inTransaction" queries i.e. // a) no transaction is necessary (e.g. critical reads) or // b) no transaction was created yet. - if !bufferedOnce && !inTransaction && target.TabletType == topodatapb.TabletType_PRIMARY { + if gw.buffer != nil && !bufferedOnce && !inTransaction && target.TabletType == topodatapb.TabletType_PRIMARY { // The next call blocks if we should buffer during a failover. retryDone, bufferErr := gw.buffer.WaitForFailoverEnd(ctx, target.Keyspace, target.Shard, err) @@ -297,13 +283,14 @@ func (gw *TabletGateway) withRetry(ctx context.Context, target *querypb.Target, // if we have a keyspace event watcher, check if the reason why our primary is not available is that it's currently being resharded // or if a reparent operation is in progress. if kev := gw.kev; kev != nil { - if kev.TargetIsBeingResharded(target) { - err = vterrors.Errorf(vtrpcpb.Code_CLUSTER_EVENT, "current keyspace is being resharded") + if kev.TargetIsBeingResharded(ctx, target) { + log.V(2).Infof("current keyspace is being resharded, retrying: %s: %s", target.Keyspace, debug.Stack()) + err = vterrors.Errorf(vtrpcpb.Code_CLUSTER_EVENT, buffer.ClusterEventReshardingInProgress) continue } - primary, notServing := kev.PrimaryIsNotServing(target) + primary, notServing := kev.PrimaryIsNotServing(ctx, target) if notServing { - err = vterrors.Errorf(vtrpcpb.Code_CLUSTER_EVENT, "primary is not serving, there may be a reparent operation in progress") + err = vterrors.Errorf(vtrpcpb.Code_CLUSTER_EVENT, buffer.ClusterEventReparentInProgress) continue } // if primary is serving, but we initially found no tablet, we're in an inconsistent state @@ -329,6 +316,7 @@ func (gw *TabletGateway) withRetry(ctx context.Context, target *querypb.Target, break } } + gw.shuffleTablets(gw.localCell, tablets) var th *discovery.TabletHealth diff --git a/go/vt/vtgate/tabletgateway_flaky_test.go b/go/vt/vtgate/tabletgateway_flaky_test.go index 80a93808486..f625b5599cd 100644 --- a/go/vt/vtgate/tabletgateway_flaky_test.go +++ b/go/vt/vtgate/tabletgateway_flaky_test.go @@ -17,12 +17,13 @@ limitations under the License. package vtgate import ( - "context" "testing" "time" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" @@ -35,11 +36,11 @@ import ( // TestGatewayBufferingWhenPrimarySwitchesServingState is used to test that the buffering mechanism buffers the queries when a primary goes to a non serving state and // stops buffering when the primary is healthy again func TestGatewayBufferingWhenPrimarySwitchesServingState(t *testing.T) { - bufferImplementation = "keyspace_events" + ctx := utils.LeakCheckContext(t) + buffer.SetBufferingModeInTestingEnv(true) defer func() { buffer.SetBufferingModeInTestingEnv(false) - bufferImplementation = "healthcheck" }() keyspace := "ks1" @@ -57,7 +58,8 @@ func TestGatewayBufferingWhenPrimarySwitchesServingState(t *testing.T) { // create a new fake health check. We want to check the buffering code which uses Subscribe, so we must also pass a channel hc := discovery.NewFakeHealthCheck(make(chan *discovery.TabletHealth)) // create a new tablet gateway - tg := NewTabletGateway(context.Background(), hc, ts, "cell") + tg := NewTabletGateway(ctx, hc, ts, "cell") + defer tg.Close(ctx) // add a primary tabelt which is serving sbc := hc.AddTestTablet("cell", host, port, keyspace, shard, tabletType, true, 10, nil) @@ -77,7 +79,7 @@ func TestGatewayBufferingWhenPrimarySwitchesServingState(t *testing.T) { sbc.SetResults([]*sqltypes.Result{sqlResult1}) // run a query that we indeed get the result added to the sandbox connection back - res, err := tg.Execute(context.Background(), target, "query", nil, 0, 0, nil) + res, err := tg.Execute(ctx, target, "query", nil, 0, 0, nil) require.NoError(t, err) require.Equal(t, res, sqlResult1) @@ -95,7 +97,7 @@ func TestGatewayBufferingWhenPrimarySwitchesServingState(t *testing.T) { // execute the query in a go routine since it should be buffered, and check that it eventually succeed queryChan := make(chan struct{}) go func() { - res, err = tg.Execute(context.Background(), target, "query", nil, 0, 0, nil) + res, err = tg.Execute(ctx, target, "query", nil, 0, 0, nil) queryChan <- struct{}{} }() @@ -119,11 +121,11 @@ func TestGatewayBufferingWhenPrimarySwitchesServingState(t *testing.T) { // TestGatewayBufferingWhileReparenting is used to test that the buffering mechanism buffers the queries when a PRS happens // the healthchecks that happen during a PRS are simulated in this test func TestGatewayBufferingWhileReparenting(t *testing.T) { - bufferImplementation = "keyspace_events" + ctx := utils.LeakCheckContext(t) + buffer.SetBufferingModeInTestingEnv(true) defer func() { buffer.SetBufferingModeInTestingEnv(false) - bufferImplementation = "healthcheck" }() keyspace := "ks1" @@ -143,7 +145,8 @@ func TestGatewayBufferingWhileReparenting(t *testing.T) { // create a new fake health check. We want to check the buffering code which uses Subscribe, so we must also pass a channel hc := discovery.NewFakeHealthCheck(make(chan *discovery.TabletHealth)) // create a new tablet gateway - tg := NewTabletGateway(context.Background(), hc, ts, "cell") + tg := NewTabletGateway(ctx, hc, ts, "cell") + defer tg.Close(ctx) // add a primary tabelt which is serving sbc := hc.AddTestTablet("cell", host, port, keyspace, shard, tabletType, true, 10, nil) @@ -166,7 +169,7 @@ func TestGatewayBufferingWhileReparenting(t *testing.T) { // run a query that we indeed get the result added to the sandbox connection back // this also checks that the query reaches the primary tablet and not the replica - res, err := tg.Execute(context.Background(), target, "query", nil, 0, 0, nil) + res, err := tg.Execute(ctx, target, "query", nil, 0, 0, nil) require.NoError(t, err) require.Equal(t, res, sqlResult1) @@ -195,7 +198,7 @@ func TestGatewayBufferingWhileReparenting(t *testing.T) { hc.Broadcast(primaryTablet) require.Len(t, tg.hc.GetHealthyTabletStats(target), 0, "GetHealthyTabletStats has tablets even though it shouldn't") - _, isNotServing := tg.kev.PrimaryIsNotServing(target) + _, isNotServing := tg.kev.PrimaryIsNotServing(ctx, target) require.True(t, isNotServing) // add a result to the sandbox connection of the new primary @@ -204,7 +207,7 @@ func TestGatewayBufferingWhileReparenting(t *testing.T) { // execute the query in a go routine since it should be buffered, and check that it eventually succeed queryChan := make(chan struct{}) go func() { - res, err = tg.Execute(context.Background(), target, "query", nil, 0, 0, nil) + res, err = tg.Execute(ctx, target, "query", nil, 0, 0, nil) queryChan <- struct{}{} }() @@ -226,7 +229,7 @@ outer: case <-timeout: require.Fail(t, "timed out - could not verify the new primary") case <-time.After(10 * time.Millisecond): - newPrimary, notServing := tg.kev.PrimaryIsNotServing(target) + newPrimary, notServing := tg.kev.PrimaryIsNotServing(ctx, target) if newPrimary != nil && newPrimary.Uid == 1 && !notServing { break outer } @@ -249,11 +252,11 @@ outer: // This is inconsistent and we want to fail properly. This scenario used to panic since no error and no results were // returned. func TestInconsistentStateDetectedBuffering(t *testing.T) { - bufferImplementation = "keyspace_events" + ctx := utils.LeakCheckContext(t) + buffer.SetBufferingModeInTestingEnv(true) defer func() { buffer.SetBufferingModeInTestingEnv(false) - bufferImplementation = "healthcheck" }() keyspace := "ks1" @@ -271,7 +274,8 @@ func TestInconsistentStateDetectedBuffering(t *testing.T) { // create a new fake health check. We want to check the buffering code which uses Subscribe, so we must also pass a channel hc := discovery.NewFakeHealthCheck(make(chan *discovery.TabletHealth)) // create a new tablet gateway - tg := NewTabletGateway(context.Background(), hc, ts, "cell") + tg := NewTabletGateway(ctx, hc, ts, "cell") + defer tg.Close(ctx) tg.retryCount = 0 @@ -310,7 +314,7 @@ func TestInconsistentStateDetectedBuffering(t *testing.T) { var err error queryChan := make(chan struct{}) go func() { - res, err = tg.Execute(context.Background(), target, "query", nil, 0, 0, nil) + res, err = tg.Execute(ctx, target, "query", nil, 0, 0, nil) queryChan <- struct{}{} }() diff --git a/go/vt/vtgate/tabletgateway_test.go b/go/vt/vtgate/tabletgateway_test.go index 99388551ebf..32d18dcc9ab 100644 --- a/go/vt/vtgate/tabletgateway_test.go +++ b/go/vt/vtgate/tabletgateway_test.go @@ -25,6 +25,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/discovery" querypb "vitess.io/vitess/go/vt/proto/query" @@ -35,19 +37,21 @@ import ( ) func TestTabletGatewayExecute(t *testing.T) { - testTabletGatewayGeneric(t, func(tg *TabletGateway, target *querypb.Target) error { - _, err := tg.Execute(context.Background(), target, "query", nil, 0, 0, nil) + ctx := utils.LeakCheckContext(t) + testTabletGatewayGeneric(t, ctx, func(ctx context.Context, tg *TabletGateway, target *querypb.Target) error { + _, err := tg.Execute(ctx, target, "query", nil, 0, 0, nil) return err }) - testTabletGatewayTransact(t, func(tg *TabletGateway, target *querypb.Target) error { - _, err := tg.Execute(context.Background(), target, "query", nil, 1, 0, nil) + testTabletGatewayTransact(t, ctx, func(ctx context.Context, tg *TabletGateway, target *querypb.Target) error { + _, err := tg.Execute(ctx, target, "query", nil, 1, 0, nil) return err }) } func TestTabletGatewayExecuteStream(t *testing.T) { - testTabletGatewayGeneric(t, func(tg *TabletGateway, target *querypb.Target) error { - err := tg.StreamExecute(context.Background(), target, "query", nil, 0, 0, nil, func(qr *sqltypes.Result) error { + ctx := utils.LeakCheckContext(t) + testTabletGatewayGeneric(t, ctx, func(ctx context.Context, tg *TabletGateway, target *querypb.Target) error { + err := tg.StreamExecute(ctx, target, "query", nil, 0, 0, nil, func(qr *sqltypes.Result) error { return nil }) return err @@ -55,36 +59,44 @@ func TestTabletGatewayExecuteStream(t *testing.T) { } func TestTabletGatewayBegin(t *testing.T) { - testTabletGatewayGeneric(t, func(tg *TabletGateway, target *querypb.Target) error { - _, err := tg.Begin(context.Background(), target, nil) + ctx := utils.LeakCheckContext(t) + testTabletGatewayGeneric(t, ctx, func(ctx context.Context, tg *TabletGateway, target *querypb.Target) error { + _, err := tg.Begin(ctx, target, nil) return err }) } func TestTabletGatewayCommit(t *testing.T) { - testTabletGatewayTransact(t, func(tg *TabletGateway, target *querypb.Target) error { - _, err := tg.Commit(context.Background(), target, 1) + ctx := utils.LeakCheckContext(t) + testTabletGatewayTransact(t, ctx, func(ctx context.Context, tg *TabletGateway, target *querypb.Target) error { + _, err := tg.Commit(ctx, target, 1) return err }) } func TestTabletGatewayRollback(t *testing.T) { - testTabletGatewayTransact(t, func(tg *TabletGateway, target *querypb.Target) error { - _, err := tg.Rollback(context.Background(), target, 1) + ctx := utils.LeakCheckContext(t) + testTabletGatewayTransact(t, ctx, func(ctx context.Context, tg *TabletGateway, target *querypb.Target) error { + _, err := tg.Rollback(ctx, target, 1) return err }) } func TestTabletGatewayBeginExecute(t *testing.T) { - testTabletGatewayGeneric(t, func(tg *TabletGateway, target *querypb.Target) error { - _, _, err := tg.BeginExecute(context.Background(), target, nil, "query", nil, 0, nil) + ctx := utils.LeakCheckContext(t) + testTabletGatewayGeneric(t, ctx, func(ctx context.Context, tg *TabletGateway, target *querypb.Target) error { + _, _, err := tg.BeginExecute(ctx, target, nil, "query", nil, 0, nil) return err }) } func TestTabletGatewayShuffleTablets(t *testing.T) { + ctx := utils.LeakCheckContext(t) + hc := discovery.NewFakeHealthCheck(nil) - tg := NewTabletGateway(context.Background(), hc, nil, "local") + ts := &fakeTopoServer{} + tg := NewTabletGateway(ctx, hc, ts, "local") + defer tg.Close(ctx) ts1 := &discovery.TabletHealth{ Tablet: topo.NewTablet(1, "cell1", "host1"), @@ -141,6 +153,8 @@ func TestTabletGatewayShuffleTablets(t *testing.T) { } func TestTabletGatewayReplicaTransactionError(t *testing.T) { + ctx := utils.LeakCheckContext(t) + keyspace := "ks" shard := "0" // transactions on REPLICA are not allowed from tabletgateway @@ -154,14 +168,16 @@ func TestTabletGatewayReplicaTransactionError(t *testing.T) { TabletType: tabletType, } hc := discovery.NewFakeHealthCheck(nil) - tg := NewTabletGateway(context.Background(), hc, nil, "cell") + ts := &fakeTopoServer{} + tg := NewTabletGateway(ctx, hc, ts, "cell") + defer tg.Close(ctx) _ = hc.AddTestTablet("cell", host, port, keyspace, shard, tabletType, true, 10, nil) - _, err := tg.Execute(context.Background(), target, "query", nil, 1, 0, nil) + _, err := tg.Execute(ctx, target, "query", nil, 1, 0, nil) verifyContainsError(t, err, "query service can only be used for non-transactional queries on replicas", vtrpcpb.Code_INTERNAL) } -func testTabletGatewayGeneric(t *testing.T, f func(tg *TabletGateway, target *querypb.Target) error) { +func testTabletGatewayGeneric(t *testing.T, ctx context.Context, f func(ctx context.Context, tg *TabletGateway, target *querypb.Target) error) { t.Helper() keyspace := "ks" shard := "0" @@ -174,23 +190,25 @@ func testTabletGatewayGeneric(t *testing.T, f func(tg *TabletGateway, target *qu TabletType: tabletType, } hc := discovery.NewFakeHealthCheck(nil) - tg := NewTabletGateway(context.Background(), hc, nil, "cell") + ts := &fakeTopoServer{} + tg := NewTabletGateway(ctx, hc, ts, "cell") + defer tg.Close(ctx) // no tablet want := []string{"target: ks.0.replica", `no healthy tablet available for 'keyspace:"ks" shard:"0" tablet_type:REPLICA`} - err := f(tg, target) + err := f(ctx, tg, target) verifyShardErrors(t, err, want, vtrpcpb.Code_UNAVAILABLE) // tablet with error hc.Reset() hc.AddTestTablet("cell", host, port, keyspace, shard, tabletType, false, 10, fmt.Errorf("no connection")) - err = f(tg, target) + err = f(ctx, tg, target) verifyShardErrors(t, err, want, vtrpcpb.Code_UNAVAILABLE) // tablet without connection hc.Reset() _ = hc.AddTestTablet("cell", host, port, keyspace, shard, tabletType, false, 10, nil).Tablet() - err = f(tg, target) + err = f(ctx, tg, target) verifyShardErrors(t, err, want, vtrpcpb.Code_UNAVAILABLE) // retry error @@ -200,7 +218,7 @@ func testTabletGatewayGeneric(t *testing.T, f func(tg *TabletGateway, target *qu sc1.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 1 sc2.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 1 - err = f(tg, target) + err = f(ctx, tg, target) verifyContainsError(t, err, "target: ks.0.replica", vtrpcpb.Code_FAILED_PRECONDITION) // fatal error @@ -209,25 +227,26 @@ func testTabletGatewayGeneric(t *testing.T, f func(tg *TabletGateway, target *qu sc2 = hc.AddTestTablet("cell", host, port+1, keyspace, shard, tabletType, true, 10, nil) sc1.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 1 sc2.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 1 - err = f(tg, target) + err = f(ctx, tg, target) verifyContainsError(t, err, "target: ks.0.replica", vtrpcpb.Code_FAILED_PRECONDITION) // server error - no retry hc.Reset() sc1 = hc.AddTestTablet("cell", host, port, keyspace, shard, tabletType, true, 10, nil) sc1.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 - err = f(tg, target) + err = f(ctx, tg, target) assert.Equal(t, vtrpcpb.Code_INVALID_ARGUMENT, vterrors.Code(err)) // no failure hc.Reset() hc.AddTestTablet("cell", host, port, keyspace, shard, tabletType, true, 10, nil) - err = f(tg, target) + err = f(ctx, tg, target) assert.NoError(t, err) } -func testTabletGatewayTransact(t *testing.T, f func(tg *TabletGateway, target *querypb.Target) error) { +func testTabletGatewayTransact(t *testing.T, ctx context.Context, f func(ctx context.Context, tg *TabletGateway, target *querypb.Target) error) { t.Helper() + keyspace := "ks" shard := "0" // test with PRIMARY because replica transactions don't use gateway's queryservice @@ -241,7 +260,9 @@ func testTabletGatewayTransact(t *testing.T, f func(tg *TabletGateway, target *q TabletType: tabletType, } hc := discovery.NewFakeHealthCheck(nil) - tg := NewTabletGateway(context.Background(), hc, nil, "cell") + ts := &fakeTopoServer{} + tg := NewTabletGateway(ctx, hc, ts, "cell") + defer tg.Close(ctx) // retry error - no retry sc1 := hc.AddTestTablet("cell", host, port, keyspace, shard, tabletType, true, 10, nil) @@ -249,14 +270,14 @@ func testTabletGatewayTransact(t *testing.T, f func(tg *TabletGateway, target *q sc1.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 1 sc2.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 1 - err := f(tg, target) + err := f(ctx, tg, target) verifyContainsError(t, err, "target: ks.0.primary", vtrpcpb.Code_FAILED_PRECONDITION) // server error - no retry hc.Reset() sc1 = hc.AddTestTablet("cell", host, port, keyspace, shard, tabletType, true, 10, nil) sc1.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 - err = f(tg, target) + err = f(ctx, tg, target) verifyContainsError(t, err, "target: ks.0.primary", vtrpcpb.Code_INVALID_ARGUMENT) } diff --git a/go/vt/vtgate/tx_conn_test.go b/go/vt/vtgate/tx_conn_test.go index f7dff51accd..3fc141c64ac 100644 --- a/go/vt/vtgate/tx_conn_test.go +++ b/go/vt/vtgate/tx_conn_test.go @@ -43,7 +43,9 @@ var queries = []*querypb.BoundQuery{{Sql: "query1"}} var twoQueries = []*querypb.BoundQuery{{Sql: "query1"}, {Sql: "query1"}} func TestTxConnBegin(t *testing.T) { - sc, sbc0, _, rss0, _, _ := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, _, rss0, _, _ := newTestTxConnEnv(t, ctx, "TestTxConn") session := &vtgatepb.Session{} // begin @@ -63,7 +65,9 @@ func TestTxConnBegin(t *testing.T) { } func TestTxConnCommitFailure(t *testing.T) { - sc, sbc0, sbc1, rss0, rss1, rss01 := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, rss0, rss1, rss01 := newTestTxConnEnv(t, ctx, "TestTxConn") sc.txConn.mode = vtgatepb.TransactionMode_MULTI // Sequence the executes to ensure commit order @@ -120,7 +124,9 @@ func TestTxConnCommitFailure(t *testing.T) { } func TestTxConnCommitSuccess(t *testing.T) { - sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, ctx, "TestTxConn") sc.txConn.mode = vtgatepb.TransactionMode_MULTI // Sequence the executes to ensure commit order @@ -171,7 +177,9 @@ func TestTxConnCommitSuccess(t *testing.T) { } func TestTxConnReservedCommitSuccess(t *testing.T) { - sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, ctx, "TestTxConn") sc.txConn.mode = vtgatepb.TransactionMode_MULTI // Sequence the executes to ensure commit order @@ -253,8 +261,10 @@ func TestTxConnReservedCommitSuccess(t *testing.T) { } func TestTxConnReservedOn2ShardTxOn1ShardAndCommit(t *testing.T) { + ctx := utils.LeakCheckContext(t) + keyspace := "TestTxConn" - sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, keyspace) + sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, ctx, keyspace) sc.txConn.mode = vtgatepb.TransactionMode_MULTI // Sequence the executes to ensure shard session order @@ -346,8 +356,10 @@ func TestTxConnReservedOn2ShardTxOn1ShardAndCommit(t *testing.T) { } func TestTxConnReservedOn2ShardTxOn1ShardAndRollback(t *testing.T) { + ctx := utils.LeakCheckContext(t) + keyspace := "TestTxConn" - sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, keyspace) + sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, ctx, keyspace) sc.txConn.mode = vtgatepb.TransactionMode_MULTI // Sequence the executes to ensure shard session order @@ -439,7 +451,9 @@ func TestTxConnReservedOn2ShardTxOn1ShardAndRollback(t *testing.T) { } func TestTxConnCommitOrderFailure1(t *testing.T) { - sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, ctx, "TestTxConn") sc.txConn.mode = vtgatepb.TransactionMode_MULTI queries := []*querypb.BoundQuery{{Sql: "query1"}} @@ -470,7 +484,9 @@ func TestTxConnCommitOrderFailure1(t *testing.T) { } func TestTxConnCommitOrderFailure2(t *testing.T) { - sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, ctx, "TestTxConn") sc.txConn.mode = vtgatepb.TransactionMode_MULTI queries := []*querypb.BoundQuery{{ @@ -502,7 +518,9 @@ func TestTxConnCommitOrderFailure2(t *testing.T) { } func TestTxConnCommitOrderFailure3(t *testing.T) { - sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, ctx, "TestTxConn") sc.txConn.mode = vtgatepb.TransactionMode_MULTI queries := []*querypb.BoundQuery{{ @@ -542,7 +560,9 @@ func TestTxConnCommitOrderFailure3(t *testing.T) { } func TestTxConnCommitOrderSuccess(t *testing.T) { - sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, ctx, "TestTxConn") sc.txConn.mode = vtgatepb.TransactionMode_MULTI queries := []*querypb.BoundQuery{{ @@ -638,7 +658,9 @@ func TestTxConnCommitOrderSuccess(t *testing.T) { } func TestTxConnReservedCommitOrderSuccess(t *testing.T) { - sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, ctx, "TestTxConn") sc.txConn.mode = vtgatepb.TransactionMode_MULTI queries := []*querypb.BoundQuery{{ @@ -779,7 +801,9 @@ func TestTxConnReservedCommitOrderSuccess(t *testing.T) { } func TestTxConnCommit2PC(t *testing.T) { - sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, "TestTxConnCommit2PC") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, ctx, "TestTxConnCommit2PC") session := NewSafeSession(&vtgatepb.Session{InTransaction: true}) sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false) @@ -795,7 +819,9 @@ func TestTxConnCommit2PC(t *testing.T) { } func TestTxConnCommit2PCOneParticipant(t *testing.T) { - sc, sbc0, _, rss0, _, _ := newTestTxConnEnv(t, "TestTxConnCommit2PCOneParticipant") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, _, rss0, _, _ := newTestTxConnEnv(t, ctx, "TestTxConnCommit2PCOneParticipant") session := NewSafeSession(&vtgatepb.Session{InTransaction: true}) sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false) session.TransactionMode = vtgatepb.TransactionMode_TWOPC @@ -805,7 +831,9 @@ func TestTxConnCommit2PCOneParticipant(t *testing.T) { } func TestTxConnCommit2PCCreateTransactionFail(t *testing.T) { - sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, "TestTxConnCommit2PCCreateTransactionFail") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, ctx, "TestTxConnCommit2PCCreateTransactionFail") session := NewSafeSession(&vtgatepb.Session{InTransaction: true}) sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false) @@ -827,7 +855,9 @@ func TestTxConnCommit2PCCreateTransactionFail(t *testing.T) { } func TestTxConnCommit2PCPrepareFail(t *testing.T) { - sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, "TestTxConnCommit2PCPrepareFail") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, ctx, "TestTxConnCommit2PCPrepareFail") session := NewSafeSession(&vtgatepb.Session{InTransaction: true}) sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false) @@ -847,7 +877,9 @@ func TestTxConnCommit2PCPrepareFail(t *testing.T) { } func TestTxConnCommit2PCStartCommitFail(t *testing.T) { - sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, "TestTxConnCommit2PCStartCommitFail") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, ctx, "TestTxConnCommit2PCStartCommitFail") session := NewSafeSession(&vtgatepb.Session{InTransaction: true}) sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false) @@ -867,7 +899,9 @@ func TestTxConnCommit2PCStartCommitFail(t *testing.T) { } func TestTxConnCommit2PCCommitPreparedFail(t *testing.T) { - sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, "TestTxConnCommit2PCCommitPreparedFail") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, ctx, "TestTxConnCommit2PCCommitPreparedFail") session := NewSafeSession(&vtgatepb.Session{InTransaction: true}) sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false) @@ -887,7 +921,9 @@ func TestTxConnCommit2PCCommitPreparedFail(t *testing.T) { } func TestTxConnCommit2PCConcludeTransactionFail(t *testing.T) { - sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, "TestTxConnCommit2PCConcludeTransactionFail") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, ctx, "TestTxConnCommit2PCConcludeTransactionFail") session := NewSafeSession(&vtgatepb.Session{InTransaction: true}) sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false) @@ -907,7 +943,9 @@ func TestTxConnCommit2PCConcludeTransactionFail(t *testing.T) { } func TestTxConnRollback(t *testing.T) { - sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, "TxConnRollback") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, ctx, "TxConnRollback") session := NewSafeSession(&vtgatepb.Session{InTransaction: true}) sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false) @@ -921,7 +959,9 @@ func TestTxConnRollback(t *testing.T) { } func TestTxConnReservedRollback(t *testing.T) { - sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, "TxConnReservedRollback") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, ctx, "TxConnReservedRollback") session := NewSafeSession(&vtgatepb.Session{InTransaction: true, InReservedConn: true}) sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false) @@ -956,7 +996,9 @@ func TestTxConnReservedRollback(t *testing.T) { } func TestTxConnReservedRollbackFailure(t *testing.T) { - sc, sbc0, sbc1, rss0, rss1, rss01 := newTestTxConnEnv(t, "TxConnReservedRollback") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, rss0, rss1, rss01 := newTestTxConnEnv(t, ctx, "TxConnReservedRollback") session := NewSafeSession(&vtgatepb.Session{InTransaction: true, InReservedConn: true}) sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false) @@ -985,7 +1027,9 @@ func TestTxConnReservedRollbackFailure(t *testing.T) { } func TestTxConnResolveOnPrepare(t *testing.T) { - sc, sbc0, sbc1, _, _, _ := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, _, _, _ := newTestTxConnEnv(t, ctx, "TestTxConn") dtid := "TestTxConn:0:1234" sbc0.ReadTransactionResults = []*querypb.TransactionMetadata{{ @@ -1006,7 +1050,9 @@ func TestTxConnResolveOnPrepare(t *testing.T) { } func TestTxConnResolveOnRollback(t *testing.T) { - sc, sbc0, sbc1, _, _, _ := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, _, _, _ := newTestTxConnEnv(t, ctx, "TestTxConn") dtid := "TestTxConn:0:1234" sbc0.ReadTransactionResults = []*querypb.TransactionMetadata{{ @@ -1027,7 +1073,9 @@ func TestTxConnResolveOnRollback(t *testing.T) { } func TestTxConnResolveOnCommit(t *testing.T) { - sc, sbc0, sbc1, _, _, _ := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, _, _, _ := newTestTxConnEnv(t, ctx, "TestTxConn") dtid := "TestTxConn:0:1234" sbc0.ReadTransactionResults = []*querypb.TransactionMetadata{{ @@ -1048,7 +1096,9 @@ func TestTxConnResolveOnCommit(t *testing.T) { } func TestTxConnResolveInvalidDTID(t *testing.T) { - sc, _, _, _, _, _ := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, _, _, _, _, _ := newTestTxConnEnv(t, ctx, "TestTxConn") err := sc.txConn.Resolve(ctx, "abcd") want := "invalid parts in dtid: abcd" @@ -1056,7 +1106,9 @@ func TestTxConnResolveInvalidDTID(t *testing.T) { } func TestTxConnResolveReadTransactionFail(t *testing.T) { - sc, sbc0, _, _, _, _ := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, _, _, _, _ := newTestTxConnEnv(t, ctx, "TestTxConn") dtid := "TestTxConn:0:1234" sbc0.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 @@ -1067,7 +1119,9 @@ func TestTxConnResolveReadTransactionFail(t *testing.T) { } func TestTxConnResolveInternalError(t *testing.T) { - sc, sbc0, _, _, _, _ := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, _, _, _, _ := newTestTxConnEnv(t, ctx, "TestTxConn") dtid := "TestTxConn:0:1234" sbc0.ReadTransactionResults = []*querypb.TransactionMetadata{{ @@ -1086,7 +1140,9 @@ func TestTxConnResolveInternalError(t *testing.T) { } func TestTxConnResolveSetRollbackFail(t *testing.T) { - sc, sbc0, sbc1, _, _, _ := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, _, _, _ := newTestTxConnEnv(t, ctx, "TestTxConn") dtid := "TestTxConn:0:1234" sbc0.ReadTransactionResults = []*querypb.TransactionMetadata{{ @@ -1110,7 +1166,9 @@ func TestTxConnResolveSetRollbackFail(t *testing.T) { } func TestTxConnResolveRollbackPreparedFail(t *testing.T) { - sc, sbc0, sbc1, _, _, _ := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, _, _, _ := newTestTxConnEnv(t, ctx, "TestTxConn") dtid := "TestTxConn:0:1234" sbc0.ReadTransactionResults = []*querypb.TransactionMetadata{{ @@ -1134,7 +1192,9 @@ func TestTxConnResolveRollbackPreparedFail(t *testing.T) { } func TestTxConnResolveCommitPreparedFail(t *testing.T) { - sc, sbc0, sbc1, _, _, _ := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, _, _, _ := newTestTxConnEnv(t, ctx, "TestTxConn") dtid := "TestTxConn:0:1234" sbc0.ReadTransactionResults = []*querypb.TransactionMetadata{{ @@ -1158,7 +1218,9 @@ func TestTxConnResolveCommitPreparedFail(t *testing.T) { } func TestTxConnResolveConcludeTransactionFail(t *testing.T) { - sc, sbc0, sbc1, _, _, _ := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, _, _, _ := newTestTxConnEnv(t, ctx, "TestTxConn") dtid := "TestTxConn:0:1234" sbc0.ReadTransactionResults = []*querypb.TransactionMetadata{{ @@ -1182,6 +1244,8 @@ func TestTxConnResolveConcludeTransactionFail(t *testing.T) { } func TestTxConnMultiGoSessions(t *testing.T) { + ctx := utils.LeakCheckContext(t) + txc := &TxConn{} input := []*vtgatepb.Session_ShardSession{{ @@ -1249,7 +1313,9 @@ func TestTxConnMultiGoTargets(t *testing.T) { } func TestTxConnAccessModeReset(t *testing.T) { - sc, _, _, _, _, _ := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, _, _, _, _, _ := newTestTxConnEnv(t, ctx, "TestTxConn") tcases := []struct { name string @@ -1290,14 +1356,14 @@ func TestTxConnAccessModeReset(t *testing.T) { } } -func newTestTxConnEnv(t *testing.T, name string) (sc *ScatterConn, sbc0, sbc1 *sandboxconn.SandboxConn, rss0, rss1, rss01 []*srvtopo.ResolvedShard) { +func newTestTxConnEnv(t *testing.T, ctx context.Context, name string) (sc *ScatterConn, sbc0, sbc1 *sandboxconn.SandboxConn, rss0, rss1, rss01 []*srvtopo.ResolvedShard) { t.Helper() createSandbox(name) hc := discovery.NewFakeHealthCheck(nil) - sc = newTestScatterConn(hc, newSandboxForCells([]string{"aa"}), "aa") + sc = newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") sbc0 = hc.AddTestTablet("aa", "0", 1, name, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) sbc1 = hc.AddTestTablet("aa", "1", 1, name, "1", topodatapb.TabletType_PRIMARY, true, 1, nil) - res := srvtopo.NewResolver(newSandboxForCells([]string{"aa"}), sc.gateway, "aa") + res := srvtopo.NewResolver(newSandboxForCells(ctx, []string{"aa"}), sc.gateway, "aa") var err error rss0, err = res.ResolveDestination(ctx, name, topodatapb.TabletType_PRIMARY, key.DestinationShard("0")) require.NoError(t, err) diff --git a/go/vt/vtgate/vcursor_impl.go b/go/vt/vtgate/vcursor_impl.go index e42ae3e6421..28ae3176357 100644 --- a/go/vt/vtgate/vcursor_impl.go +++ b/go/vt/vtgate/vcursor_impl.go @@ -25,13 +25,12 @@ import ( "sync/atomic" "time" - "vitess.io/vitess/go/vt/vtgate/logstats" - - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/mysql" "github.com/google/uuid" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/callerid" @@ -41,6 +40,7 @@ import ( querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vschemapb "vitess.io/vitess/go/vt/proto/vschema" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" @@ -51,9 +51,12 @@ import ( "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/buffer" "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/logstats" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vtgate/vschemaacl" + "vitess.io/vitess/go/vt/vtgate/vtgateservice" ) var _ engine.VCursor = (*vcursorImpl)(nil) @@ -63,7 +66,7 @@ var _ vindexes.VCursor = (*vcursorImpl)(nil) // vcursor_impl needs these facilities to be able to be able to execute queries for vindexes type iExecute interface { - Execute(ctx context.Context, c *mysql.Conn, method string, session *SafeSession, s string, vars map[string]*querypb.BindVariable) (*sqltypes.Result, error) + Execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, c *mysql.Conn, method string, session *SafeSession, s string, vars map[string]*querypb.BindVariable) (*sqltypes.Result, error) ExecuteMultiShard(ctx context.Context, primitive engine.Primitive, rss []*srvtopo.ResolvedShard, queries []*querypb.BoundQuery, session *SafeSession, autocommit bool, ignoreMaxMemoryRows bool) (qr *sqltypes.Result, errs []error) StreamExecuteMulti(ctx context.Context, primitive engine.Primitive, query string, rss []*srvtopo.ResolvedShard, vars []map[string]*querypb.BindVariable, session *SafeSession, autocommit bool, callback func(reply *sqltypes.Result) error) []error ExecuteLock(ctx context.Context, rs *srvtopo.ResolvedShard, query *querypb.BoundQuery, session *SafeSession, lockFuncType sqlparser.LockingFuncType) (*sqltypes.Result, error) @@ -343,13 +346,7 @@ func (vc *vcursorImpl) AnyKeyspace() (*vindexes.Keyspace, error) { return nil, errNoDbAvailable } - var keyspaces = make([]*vindexes.Keyspace, 0, len(vc.vschema.Keyspaces)) - for _, ks := range vc.vschema.Keyspaces { - keyspaces = append(keyspaces, ks.Keyspace) - } - sort.Slice(keyspaces, func(i, j int) bool { - return keyspaces[i].Name < keyspaces[j].Name - }) + keyspaces := vc.getSortedServingKeyspaces() // Look for any sharded keyspace if present, otherwise take the first keyspace, // sorted alphabetically @@ -361,18 +358,38 @@ func (vc *vcursorImpl) AnyKeyspace() (*vindexes.Keyspace, error) { return keyspaces[0], nil } +// getSortedServingKeyspaces gets the sorted serving keyspaces +func (vc *vcursorImpl) getSortedServingKeyspaces() []*vindexes.Keyspace { + var keyspaces []*vindexes.Keyspace + + if vc.resolver != nil && vc.resolver.GetGateway() != nil { + keyspaceNames := vc.resolver.GetGateway().GetServingKeyspaces() + for _, ksName := range keyspaceNames { + ks, exists := vc.vschema.Keyspaces[ksName] + if exists { + keyspaces = append(keyspaces, ks.Keyspace) + } + } + } + + if len(keyspaces) == 0 { + for _, ks := range vc.vschema.Keyspaces { + keyspaces = append(keyspaces, ks.Keyspace) + } + } + sort.Slice(keyspaces, func(i, j int) bool { + return keyspaces[i].Name < keyspaces[j].Name + }) + return keyspaces +} + func (vc *vcursorImpl) FirstSortedKeyspace() (*vindexes.Keyspace, error) { if len(vc.vschema.Keyspaces) == 0 { return nil, errNoDbAvailable } - kss := vc.vschema.Keyspaces - keys := make([]string, 0, len(kss)) - for ks := range kss { - keys = append(keys, ks) - } - sort.Strings(keys) + keyspaces := vc.getSortedServingKeyspaces() - return kss[keys[0]].Keyspace, nil + return keyspaces[0], nil } // SysVarSetEnabled implements the ContextVSchema interface @@ -498,7 +515,7 @@ func (vc *vcursorImpl) Execute(ctx context.Context, method string, query string, return nil, err } - qr, err := vc.executor.Execute(ctx, nil, method, session, vc.marginComments.Leading+query+vc.marginComments.Trailing, bindVars) + qr, err := vc.executor.Execute(ctx, nil, nil, method, session, vc.marginComments.Leading+query+vc.marginComments.Trailing, bindVars) vc.setRollbackOnPartialExecIfRequired(err != nil, rollbackOnError) return qr, err @@ -513,7 +530,7 @@ func (vc *vcursorImpl) markSavepoint(ctx context.Context, needsRollbackOnParialE } uID := fmt.Sprintf("_vt%s", strings.ReplaceAll(uuid.NewString(), "-", "_")) spQuery := fmt.Sprintf("%ssavepoint %s%s", vc.marginComments.Leading, uID, vc.marginComments.Trailing) - _, err := vc.executor.Execute(ctx, nil, "MarkSavepoint", vc.safeSession, spQuery, bindVars) + _, err := vc.executor.Execute(ctx, nil, nil, "MarkSavepoint", vc.safeSession, spQuery, bindVars) if err != nil { return err } @@ -889,6 +906,16 @@ func (vc *vcursorImpl) GetDDLStrategy() string { return vc.safeSession.GetDDLStrategy() } +// SetMigrationContext implements the SessionActions interface +func (vc *vcursorImpl) SetMigrationContext(migrationContext string) { + vc.safeSession.SetMigrationContext(migrationContext) +} + +// GetMigrationContext implements the SessionActions interface +func (vc *vcursorImpl) GetMigrationContext() string { + return vc.safeSession.GetMigrationContext() +} + // GetSessionUUID implements the SessionActions interface func (vc *vcursorImpl) GetSessionUUID() string { return vc.safeSession.GetSessionUUID() @@ -953,6 +980,10 @@ func (vc *vcursorImpl) InTransaction() bool { return vc.safeSession.InTransaction() } +func (vc *vcursorImpl) Commit(ctx context.Context) error { + return vc.executor.Commit(ctx, vc.safeSession) +} + // GetDBDDLPluginName implements the VCursor interface func (vc *vcursorImpl) GetDBDDLPluginName() string { return dbDDLPlugin @@ -978,7 +1009,7 @@ func (vc *vcursorImpl) ErrorIfShardedF(ks *vindexes.Keyspace, warn, errFormat st func (vc *vcursorImpl) WarnUnshardedOnly(format string, params ...any) { if vc.warnShardedOnly { vc.warnings = append(vc.warnings, &querypb.QueryWarning{ - Code: uint32(mysql.ERNotSupportedYet), + Code: uint32(sqlerror.ERNotSupportedYet), Message: fmt.Sprintf(format, params...), }) } @@ -990,14 +1021,21 @@ func (vc *vcursorImpl) PlannerWarning(message string) { return } vc.warnings = append(vc.warnings, &querypb.QueryWarning{ - Code: uint32(mysql.ERNotSupportedYet), + Code: uint32(sqlerror.ERNotSupportedYet), Message: message, }) } // ForeignKeyMode implements the VCursor interface -func (vc *vcursorImpl) ForeignKeyMode() string { - return strings.ToLower(foreignKeyMode) +func (vc *vcursorImpl) ForeignKeyMode(keyspace string) (vschemapb.Keyspace_ForeignKeyMode, error) { + if strings.ToLower(foreignKeyMode) == "disallow" { + return vschemapb.Keyspace_disallow, nil + } + ks := vc.vschema.Keyspaces[keyspace] + if ks == nil { + return 0, vterrors.VT14004(keyspace) + } + return ks.ForeignKeyMode, nil } // ParseDestinationTarget parses destination target string and sets default keyspace if possible. @@ -1016,7 +1054,7 @@ func (vc *vcursorImpl) keyForPlan(ctx context.Context, query string, buf io.Stri _, _ = buf.WriteString(vc.keyspace) _, _ = buf.WriteString(vindexes.TabletTypeSuffix[vc.tabletType]) _, _ = buf.WriteString("+Collate:") - _, _ = buf.WriteString(vc.collation.Get().Name()) + _, _ = buf.WriteString(collations.Local().LookupName(vc.collation)) if vc.destination != nil { switch vc.destination.(type) { @@ -1124,6 +1162,55 @@ func (vc *vcursorImpl) SetExec(ctx context.Context, name string, value string) e return vc.executor.setVitessMetadata(ctx, name, value) } +func (vc *vcursorImpl) ThrottleApp(ctx context.Context, throttledAppRule *topodatapb.ThrottledAppRule) (err error) { + if throttledAppRule == nil { + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "ThrottleApp: nil rule") + } + if throttledAppRule.Name == "" { + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "ThrottleApp: app name is empty") + } + // We don't strictly have to construct a UpdateThrottlerConfigRequest here, because we only populate it + // with a couple variables; we could do without it. However, constructing the request makes the remaining code + // consistent with vtctldclient/command/throttler.go and we prefer this consistency + req := &vtctldatapb.UpdateThrottlerConfigRequest{ + Keyspace: vc.keyspace, + ThrottledApp: throttledAppRule, + } + + update := func(throttlerConfig *topodatapb.ThrottlerConfig) *topodatapb.ThrottlerConfig { + if throttlerConfig == nil { + throttlerConfig = &topodatapb.ThrottlerConfig{} + } + if throttlerConfig.ThrottledApps == nil { + throttlerConfig.ThrottledApps = make(map[string]*topodatapb.ThrottledAppRule) + } + throttlerConfig.ThrottledApps[req.ThrottledApp.Name] = req.ThrottledApp + return throttlerConfig + } + + ctx, unlock, lockErr := vc.topoServer.LockKeyspace(ctx, req.Keyspace, "UpdateThrottlerConfig") + if lockErr != nil { + return lockErr + } + defer unlock(&err) + + ki, err := vc.topoServer.GetKeyspace(ctx, req.Keyspace) + if err != nil { + return err + } + + ki.ThrottlerConfig = update(ki.ThrottlerConfig) + + err = vc.topoServer.UpdateKeyspace(ctx, ki) + if err != nil { + return err + } + + _, err = vc.topoServer.UpdateSrvKeyspaceThrottlerConfig(ctx, req.Keyspace, []string{}, update) + + return err +} + func (vc *vcursorImpl) CanUseSetVar() bool { return sqlparser.IsMySQL80AndAbove() && setVarEnabled } diff --git a/go/vt/vtgate/vcursor_impl_test.go b/go/vt/vtgate/vcursor_impl_test.go index 011a509c286..3160b8a9b1a 100644 --- a/go/vt/vtgate/vcursor_impl_test.go +++ b/go/vt/vtgate/vcursor_impl_test.go @@ -8,21 +8,18 @@ import ( "strings" "testing" - querypb "vitess.io/vitess/go/vt/proto/query" + "github.com/stretchr/testify/require" - "vitess.io/vitess/go/vt/proto/vschema" - vschemapb "vitess.io/vitess/go/vt/proto/vschema" + "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/srvtopo" "vitess.io/vitess/go/vt/topo" - - "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/vtgate/vindexes" - "github.com/stretchr/testify/require" - + querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" - "vitess.io/vitess/go/vt/sqlparser" ) var _ VSchemaOperator = (*fakeVSchemaOperator)(nil) @@ -31,11 +28,11 @@ type fakeVSchemaOperator struct { vschema *vindexes.VSchema } -func (f fakeVSchemaOperator) GetCurrentSrvVschema() *vschema.SrvVSchema { +func (f fakeVSchemaOperator) GetCurrentSrvVschema() *vschemapb.SrvVSchema { panic("implement me") } -func (f fakeVSchemaOperator) UpdateVSchema(ctx context.Context, ksName string, vschema *vschema.SrvVSchema) error { +func (f fakeVSchemaOperator) UpdateVSchema(ctx context.Context, ksName string, vschema *vschemapb.SrvVSchema) error { panic("implement me") } diff --git a/go/vt/vtgate/vindexes/binary.go b/go/vt/vtgate/vindexes/binary.go index d4487ee84ab..b78451ca1fb 100644 --- a/go/vt/vtgate/vindexes/binary.go +++ b/go/vt/vtgate/vindexes/binary.go @@ -26,19 +26,24 @@ import ( ) var ( - _ SingleColumn = (*Binary)(nil) - _ Reversible = (*Binary)(nil) - _ Hashing = (*Binary)(nil) + _ SingleColumn = (*Binary)(nil) + _ Reversible = (*Binary)(nil) + _ Hashing = (*Binary)(nil) + _ ParamValidating = (*Binary)(nil) ) // Binary is a vindex that converts binary bits to a keyspace id. type Binary struct { - name string + name string + unknownParams []string } -// NewBinary creates a new Binary. -func NewBinary(name string, _ map[string]string) (Vindex, error) { - return &Binary{name: name}, nil +// newBinary creates a new Binary. +func newBinary(name string, params map[string]string) (Vindex, error) { + return &Binary{ + name: name, + unknownParams: FindUnknownParams(params, nil), + }, nil } // String returns the name of the vindex. @@ -103,6 +108,11 @@ func (*Binary) ReverseMap(_ VCursor, ksids [][]byte) ([]sqltypes.Value, error) { return reverseIds, nil } +// UnknownParams implements the ParamValidating interface. +func (vind *Binary) UnknownParams() []string { + return vind.unknownParams +} + func init() { - Register("binary", NewBinary) + Register("binary", newBinary) } diff --git a/go/vt/vtgate/vindexes/binary_test.go b/go/vt/vtgate/vindexes/binary_test.go index a1675b4b44d..27ae6ceca11 100644 --- a/go/vt/vtgate/vindexes/binary_test.go +++ b/go/vt/vtgate/vindexes/binary_test.go @@ -24,7 +24,6 @@ import ( "reflect" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" @@ -34,15 +33,58 @@ import ( var binOnlyVindex SingleColumn func init() { - vindex, _ := CreateVindex("binary", "binary_varchar", nil) + vindex, err := CreateVindex("binary", "binary_varchar", nil) + if err != nil { + panic(err) + } binOnlyVindex = vindex.(SingleColumn) } -func TestBinaryInfo(t *testing.T) { - assert.Equal(t, 0, binOnlyVindex.Cost()) - assert.Equal(t, "binary_varchar", binOnlyVindex.String()) - assert.True(t, binOnlyVindex.IsUnique()) - assert.False(t, binOnlyVindex.NeedsVCursor()) +func binaryCreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "binary", + vindexName: "binary", + vindexParams: vindexParams, + + expectCost: 0, + expectErr: expectErr, + expectIsUnique: true, + expectNeedsVCursor: false, + expectString: "binary", + expectUnknownParams: expectUnknownParams, + } +} + +func TestBinaryCreateVindex(t *testing.T) { + cases := []createVindexTestCase{ + binaryCreateVindexTestCase( + "no params", + nil, + nil, + nil, + ), + binaryCreateVindexTestCase( + "empty params", + map[string]string{}, + nil, + nil, + ), + binaryCreateVindexTestCase( + "unknown params", + map[string]string{"hello": "world"}, + nil, + []string{"hello"}, + ), + } + + testCreateVindexes(t, cases) } func TestBinaryMap(t *testing.T) { diff --git a/go/vt/vtgate/vindexes/binaryhash.go b/go/vt/vtgate/vindexes/binaryhash.go index 8b6e90265e5..b7249c1181f 100644 --- a/go/vt/vtgate/vindexes/binaryhash.go +++ b/go/vt/vtgate/vindexes/binaryhash.go @@ -24,8 +24,6 @@ import ( "strconv" "strings" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" ) @@ -81,7 +79,7 @@ func (vind *BinaryHash) Map(ctx context.Context, cursor VCursor, ids []sqltypes. ival, err = strconv.ParseInt(str, 10, 64) num = uint64(ival) } else { - num, err = evalengine.ToUint64(id) + num, err = id.ToUint64() } if err != nil { @@ -103,7 +101,7 @@ func (vind *BinaryHash) Map(ctx context.Context, cursor VCursor, ids []sqltypes. func (vind *BinaryHash) Verify(_ context.Context, _ VCursor, ids []sqltypes.Value, ksids [][]byte) ([]bool, error) { out := make([]bool, len(ids)) for i := range ids { - num, err := evalengine.ToUint64(ids[i]) + num, err := ids[i].ToUint64() if err != nil { return nil, fmt.Errorf("hash.Verify: %v", err) } diff --git a/go/vt/vtgate/vindexes/binarymd5.go b/go/vt/vtgate/vindexes/binarymd5.go index 49d823a7ed7..d3495e28deb 100644 --- a/go/vt/vtgate/vindexes/binarymd5.go +++ b/go/vt/vtgate/vindexes/binarymd5.go @@ -26,18 +26,23 @@ import ( ) var ( - _ SingleColumn = (*BinaryMD5)(nil) - _ Hashing = (*BinaryMD5)(nil) + _ SingleColumn = (*BinaryMD5)(nil) + _ Hashing = (*BinaryMD5)(nil) + _ ParamValidating = (*BinaryMD5)(nil) ) // BinaryMD5 is a vindex that hashes binary bits to a keyspace id. type BinaryMD5 struct { - name string + name string + unknownParams []string } -// NewBinaryMD5 creates a new BinaryMD5. -func NewBinaryMD5(name string, _ map[string]string) (Vindex, error) { - return &BinaryMD5{name: name}, nil +// newBinaryMD5 creates a new BinaryMD5. +func newBinaryMD5(name string, params map[string]string) (Vindex, error) { + return &BinaryMD5{ + name: name, + unknownParams: FindUnknownParams(params, nil), + }, nil } // String returns the name of the vindex. @@ -94,11 +99,16 @@ func (vind *BinaryMD5) Hash(id sqltypes.Value) ([]byte, error) { return vMD5Hash(idBytes), nil } +// UnknownParams implements the ParamValidating interface. +func (vind *BinaryMD5) UnknownParams() []string { + return vind.unknownParams +} + func vMD5Hash(source []byte) []byte { sum := md5.Sum(source) return sum[:] } func init() { - Register("binary_md5", NewBinaryMD5) + Register("binary_md5", newBinaryMD5) } diff --git a/go/vt/vtgate/vindexes/binarymd5_test.go b/go/vt/vtgate/vindexes/binarymd5_test.go index c3c5dccb0aa..2c25bc794d4 100644 --- a/go/vt/vtgate/vindexes/binarymd5_test.go +++ b/go/vt/vtgate/vindexes/binarymd5_test.go @@ -23,7 +23,6 @@ import ( "reflect" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" @@ -33,15 +32,58 @@ import ( var binVindex SingleColumn func init() { - vindex, _ := CreateVindex("binary_md5", "binary_md5_varchar", nil) + vindex, err := CreateVindex("binary_md5", "binary_md5_varchar", nil) + if err != nil { + panic(err) + } binVindex = vindex.(SingleColumn) } -func TestBinaryMD5Info(t *testing.T) { - assert.Equal(t, 1, binVindex.Cost()) - assert.Equal(t, "binary_md5_varchar", binVindex.String()) - assert.True(t, binVindex.IsUnique()) - assert.False(t, binVindex.NeedsVCursor()) +func binaryMD5CreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "binary_md5", + vindexName: "binary_md5", + vindexParams: vindexParams, + + expectCost: 1, + expectErr: expectErr, + expectIsUnique: true, + expectNeedsVCursor: false, + expectString: "binary_md5", + expectUnknownParams: expectUnknownParams, + } +} + +func TestBinaryMD5CreateVindex(t *testing.T) { + cases := []createVindexTestCase{ + binaryMD5CreateVindexTestCase( + "no params", + nil, + nil, + nil, + ), + binaryMD5CreateVindexTestCase( + "empty params", + map[string]string{}, + nil, + nil, + ), + binaryMD5CreateVindexTestCase( + "unknown params", + map[string]string{"hello": "world"}, + nil, + []string{"hello"}, + ), + } + + testCreateVindexes(t, cases) } func TestBinaryMD5Map(t *testing.T) { @@ -134,3 +176,17 @@ func benchmarkMD5HashBytes(b *testing.B, input []byte) { sinkMD5 = vMD5Hash(input) } } + +func TestCreateVindexBinaryMD5Params(t *testing.T) { + vindex, err := CreateVindex("binary_md5", "binary_md5", nil) + require.NotNil(t, vindex) + unknownParams := vindex.(ParamValidating).UnknownParams() + require.Empty(t, unknownParams) + require.NoError(t, err) + + vindex, err = CreateVindex("binary_md5", "binary_md5", map[string]string{"hello": "world"}) + require.NotNil(t, vindex) + unknownParams = vindex.(ParamValidating).UnknownParams() + require.Len(t, unknownParams, 1) + require.NoError(t, err) +} diff --git a/go/vt/vtgate/vindexes/cached_size.go b/go/vt/vtgate/vindexes/cached_size.go index 55bbd44ea2d..a97411a6ac8 100644 --- a/go/vt/vtgate/vindexes/cached_size.go +++ b/go/vt/vtgate/vindexes/cached_size.go @@ -29,30 +29,23 @@ type cachedObject interface { CachedSize(alloc bool) int64 } -func (cached *AutoIncrement) CachedSize(alloc bool) int64 { - if cached == nil { - return int64(0) - } - size := int64(0) - if alloc { - size += int64(48) - } - // field Column vitess.io/vitess/go/vt/sqlparser.IdentifierCI - size += cached.Column.CachedSize(false) - // field Sequence *vitess.io/vitess/go/vt/vtgate/vindexes.Table - size += cached.Sequence.CachedSize(true) - return size -} func (cached *Binary) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) } size := int64(0) if alloc { - size += int64(16) + size += int64(48) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } func (cached *BinaryMD5) CachedSize(alloc bool) int64 { @@ -61,10 +54,17 @@ func (cached *BinaryMD5) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(16) + size += int64(48) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } func (cached *CFC) CachedSize(alloc bool) int64 { @@ -81,20 +81,6 @@ func (cached *CFC) CachedSize(alloc bool) int64 { size += cached.prefixCFC.CachedSize(true) return size } -func (cached *Column) CachedSize(alloc bool) int64 { - if cached == nil { - return int64(0) - } - size := int64(0) - if alloc { - size += int64(64) - } - // field Name vitess.io/vitess/go/vt/sqlparser.IdentifierCI - size += cached.Name.CachedSize(false) - // field CollationName string - size += hack.RuntimeAllocSize(int64(len(cached.CollationName))) - return size -} func (cached *ColumnVindex) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -126,10 +112,17 @@ func (cached *ConsistentLookup) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(8) + size += int64(32) } // field clCommon *vitess.io/vitess/go/vt/vtgate/vindexes.clCommon size += cached.clCommon.CachedSize(true) + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } func (cached *ConsistentLookupUnique) CachedSize(alloc bool) int64 { @@ -138,10 +131,17 @@ func (cached *ConsistentLookupUnique) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(8) + size += int64(32) } // field clCommon *vitess.io/vitess/go/vt/vtgate/vindexes.clCommon size += cached.clCommon.CachedSize(true) + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } func (cached *Hash) CachedSize(alloc bool) int64 { @@ -150,10 +150,17 @@ func (cached *Hash) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(16) + size += int64(48) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } func (cached *Keyspace) CachedSize(alloc bool) int64 { @@ -174,12 +181,19 @@ func (cached *LookupHash) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(176) + size += int64(192) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) // field lkp vitess.io/vitess/go/vt/vtgate/vindexes.lookupInternal size += cached.lkp.CachedSize(false) + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } func (cached *LookupHashUnique) CachedSize(alloc bool) int64 { @@ -188,12 +202,19 @@ func (cached *LookupHashUnique) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(176) + size += int64(192) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) // field lkp vitess.io/vitess/go/vt/vtgate/vindexes.lookupInternal size += cached.lkp.CachedSize(false) + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } func (cached *LookupNonUnique) CachedSize(alloc bool) int64 { @@ -202,12 +223,19 @@ func (cached *LookupNonUnique) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(176) + size += int64(192) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) // field lkp vitess.io/vitess/go/vt/vtgate/vindexes.lookupInternal size += cached.lkp.CachedSize(false) + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } func (cached *LookupUnicodeLooseMD5Hash) CachedSize(alloc bool) int64 { @@ -216,12 +244,19 @@ func (cached *LookupUnicodeLooseMD5Hash) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(176) + size += int64(192) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) // field lkp vitess.io/vitess/go/vt/vtgate/vindexes.lookupInternal size += cached.lkp.CachedSize(false) + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } func (cached *LookupUnicodeLooseMD5HashUnique) CachedSize(alloc bool) int64 { @@ -230,12 +265,19 @@ func (cached *LookupUnicodeLooseMD5HashUnique) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(176) + size += int64(192) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) // field lkp vitess.io/vitess/go/vt/vtgate/vindexes.lookupInternal size += cached.lkp.CachedSize(false) + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } func (cached *LookupUnique) CachedSize(alloc bool) int64 { @@ -244,12 +286,19 @@ func (cached *LookupUnique) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(176) + size += int64(192) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) // field lkp vitess.io/vitess/go/vt/vtgate/vindexes.lookupInternal size += cached.lkp.CachedSize(false) + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } @@ -299,10 +348,17 @@ func (cached *Null) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(16) + size += int64(48) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } func (cached *Numeric) CachedSize(alloc bool) int64 { @@ -311,10 +367,17 @@ func (cached *Numeric) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(16) + size += int64(48) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } @@ -325,7 +388,7 @@ func (cached *NumericStaticMap) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(48) + size += int64(64) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) @@ -344,6 +407,13 @@ func (cached *NumericStaticMap) CachedSize(alloc bool) int64 { size += hack.RuntimeAllocSize(int64(numBuckets * 144)) } } + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } func (cached *RegionExperimental) CachedSize(alloc bool) int64 { @@ -352,10 +422,17 @@ func (cached *RegionExperimental) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(24) + size += int64(48) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } @@ -366,7 +443,7 @@ func (cached *RegionJSON) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(32) + size += int64(64) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) @@ -384,6 +461,13 @@ func (cached *RegionJSON) CachedSize(alloc bool) int64 { size += hack.RuntimeAllocSize(int64(len(k))) } } + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } func (cached *ReverseBits) CachedSize(alloc bool) int64 { @@ -392,91 +476,17 @@ func (cached *ReverseBits) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(16) + size += int64(48) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) - return size -} -func (cached *Source) CachedSize(alloc bool) int64 { - if cached == nil { - return int64(0) - } - size := int64(0) - if alloc { - size += int64(32) - } - // field TableName vitess.io/vitess/go/vt/sqlparser.TableName - size += cached.TableName.CachedSize(false) - return size -} - -//go:nocheckptr -func (cached *Table) CachedSize(alloc bool) int64 { - if cached == nil { - return int64(0) - } - size := int64(0) - if alloc { - size += int64(192) - } - // field Type string - size += hack.RuntimeAllocSize(int64(len(cached.Type))) - // field Name vitess.io/vitess/go/vt/sqlparser.IdentifierCS - size += cached.Name.CachedSize(false) - // field Keyspace *vitess.io/vitess/go/vt/vtgate/vindexes.Keyspace - size += cached.Keyspace.CachedSize(true) - // field ColumnVindexes []*vitess.io/vitess/go/vt/vtgate/vindexes.ColumnVindex + // field unknownParams []string { - size += hack.RuntimeAllocSize(int64(cap(cached.ColumnVindexes)) * int64(8)) - for _, elem := range cached.ColumnVindexes { - size += elem.CachedSize(true) - } - } - // field Ordered []*vitess.io/vitess/go/vt/vtgate/vindexes.ColumnVindex - { - size += hack.RuntimeAllocSize(int64(cap(cached.Ordered)) * int64(8)) - for _, elem := range cached.Ordered { - size += elem.CachedSize(true) - } - } - // field Owned []*vitess.io/vitess/go/vt/vtgate/vindexes.ColumnVindex - { - size += hack.RuntimeAllocSize(int64(cap(cached.Owned)) * int64(8)) - for _, elem := range cached.Owned { - size += elem.CachedSize(true) - } - } - // field AutoIncrement *vitess.io/vitess/go/vt/vtgate/vindexes.AutoIncrement - size += cached.AutoIncrement.CachedSize(true) - // field Columns []vitess.io/vitess/go/vt/vtgate/vindexes.Column - { - size += hack.RuntimeAllocSize(int64(cap(cached.Columns)) * int64(56)) - for _, elem := range cached.Columns { - size += elem.CachedSize(false) - } - } - // field Pinned []byte - { - size += hack.RuntimeAllocSize(int64(cap(cached.Pinned))) - } - // field ReferencedBy map[string]*vitess.io/vitess/go/vt/vtgate/vindexes.Table - if cached.ReferencedBy != nil { - size += int64(48) - hmap := reflect.ValueOf(cached.ReferencedBy) - numBuckets := int(math.Pow(2, float64((*(*uint8)(unsafe.Pointer(hmap.Pointer() + uintptr(9))))))) - numOldBuckets := (*(*uint16)(unsafe.Pointer(hmap.Pointer() + uintptr(10)))) - size += hack.RuntimeAllocSize(int64(numOldBuckets * 208)) - if len(cached.ReferencedBy) > 0 || numBuckets > 1 { - size += hack.RuntimeAllocSize(int64(numBuckets * 208)) - } - for k, v := range cached.ReferencedBy { - size += hack.RuntimeAllocSize(int64(len(k))) - size += v.CachedSize(true) + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) } } - // field Source *vitess.io/vitess/go/vt/vtgate/vindexes.Source - size += cached.Source.CachedSize(true) return size } func (cached *UnicodeLooseMD5) CachedSize(alloc bool) int64 { @@ -485,10 +495,17 @@ func (cached *UnicodeLooseMD5) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(16) + size += int64(48) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } func (cached *UnicodeLooseXXHash) CachedSize(alloc bool) int64 { @@ -497,10 +514,17 @@ func (cached *UnicodeLooseXXHash) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(16) + size += int64(48) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } func (cached *XXHash) CachedSize(alloc bool) int64 { @@ -509,10 +533,17 @@ func (cached *XXHash) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(16) + size += int64(48) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } func (cached *cfcCommon) CachedSize(alloc bool) int64 { @@ -521,7 +552,7 @@ func (cached *cfcCommon) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(48) + size += int64(80) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) @@ -529,6 +560,13 @@ func (cached *cfcCommon) CachedSize(alloc bool) int64 { { size += hack.RuntimeAllocSize(int64(cap(cached.offsets)) * int64(8)) } + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } func (cached *clCommon) CachedSize(alloc bool) int64 { diff --git a/go/vt/vtgate/vindexes/cfc.go b/go/vt/vtgate/vindexes/cfc.go index 0be28f96bc9..af269b1a0d9 100644 --- a/go/vt/vtgate/vindexes/cfc.go +++ b/go/vt/vtgate/vindexes/cfc.go @@ -28,6 +28,20 @@ import ( "vitess.io/vitess/go/vt/vterrors" ) +const ( + cfcParamHash = "hash" + cfcParamOffsets = "offsets" +) + +var ( + _ ParamValidating = (*CFC)(nil) + + cfcParams = []string{ + cfcParamHash, + cfcParamOffsets, + } +) + // CFC is Concatenated Fixed-width Composite Vindex. // // The purpose of this vindex is to shard the rows based on the prefix of @@ -94,15 +108,17 @@ type CFC struct { } type cfcCommon struct { - name string - hash func([]byte) []byte - offsets []int + name string + hash func([]byte) []byte + offsets []int + unknownParams []string } -// NewCFC creates a new CFC vindex -func NewCFC(name string, params map[string]string) (Vindex, error) { +// newCFC creates a new CFC vindex +func newCFC(name string, params map[string]string) (Vindex, error) { ss := &cfcCommon{ - name: name, + name: name, + unknownParams: FindUnknownParams(params, cfcParams), } cfc := &CFC{ cfcCommon: ss, @@ -113,7 +129,7 @@ func NewCFC(name string, params map[string]string) (Vindex, error) { return cfc, nil } - switch h := params["hash"]; h { + switch h := params[cfcParamHash]; h { case "": return cfc, nil case "md5": @@ -125,7 +141,7 @@ func NewCFC(name string, params map[string]string) (Vindex, error) { } var offsets []int - if p := params["offsets"]; p == "" { + if p := params[cfcParamOffsets]; p == "" { return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "CFC vindex requires offsets when hash is defined") } else if err := json.Unmarshal([]byte(p), &offsets); err != nil || !validOffsets(offsets) { return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid offsets %s to CFC vindex %s. expected sorted positive ints in brackets", p, name) @@ -231,6 +247,11 @@ func (vind *cfcCommon) verify(ids []sqltypes.Value, ksids [][]byte) ([]bool, err return out, nil } +// UnknownParams implements the ParamValidating interface. +func (vind *cfcCommon) UnknownParams() []string { + return vind.unknownParams +} + // Verify returns true if ids maps to ksids. func (vind *CFC) Verify(_ context.Context, _ VCursor, ids []sqltypes.Value, ksids [][]byte) ([]bool, error) { return vind.verify(ids, ksids) @@ -406,5 +427,5 @@ func xxhash64(in []byte) []byte { } func init() { - Register("cfc", NewCFC) + Register("cfc", newCFC) } diff --git a/go/vt/vtgate/vindexes/cfc_test.go b/go/vt/vtgate/vindexes/cfc_test.go index 2e4ff7e6d00..553d36de6c6 100644 --- a/go/vt/vtgate/vindexes/cfc_test.go +++ b/go/vt/vtgate/vindexes/cfc_test.go @@ -30,94 +30,119 @@ import ( "vitess.io/vitess/go/vt/vterrors" ) -func assertEqualVtError(t *testing.T, expected, actual error) { - // vterrors.Errorf returns a struct containing a stacktrace, which fails - // assert.EqualError since the stacktrace would be guaranteed to be different. - // so just check the error message - if expected == nil { - assert.NoError(t, actual) - } else { - assert.EqualError(t, actual, expected.Error()) +func cfcCreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "cfc", + vindexName: "cfc", + vindexParams: vindexParams, + + expectCost: 1, + expectErr: expectErr, + expectIsUnique: true, + expectNeedsVCursor: false, + expectString: "cfc", + expectUnknownParams: expectUnknownParams, } } -func TestCFCBuildCFC(t *testing.T) { - cases := []struct { - testName string - params map[string]string - err error - offsets []int - }{ - { - testName: "no params", - }, - { - testName: "no hash", - params: map[string]string{}, - }, - { - testName: "no hash", - params: map[string]string{"offsets": "[1,2]"}, - }, - { - testName: "no offsets", - params: map[string]string{"hash": "md5"}, - err: vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "CFC vindex requires offsets when hash is defined"), - }, - { - testName: "invalid offset", - params: map[string]string{"hash": "md5", "offsets": "10,12"}, - err: vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid offsets 10,12 to CFC vindex cfc. expected sorted positive ints in brackets"), - }, - { - testName: "invalid offset", - params: map[string]string{"hash": "md5", "offsets": "xxx"}, - err: vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid offsets xxx to CFC vindex cfc. expected sorted positive ints in brackets"), - }, - { - testName: "empty offsets", - params: map[string]string{"hash": "md5", "offsets": "[]"}, - err: vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid offsets [] to CFC vindex cfc. expected sorted positive ints in brackets"), - }, - { - testName: "unsorted offsets", - params: map[string]string{"hash": "md5", "offsets": "[10,3]"}, - err: vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid offsets [10,3] to CFC vindex cfc. expected sorted positive ints in brackets"), - }, - { - testName: "negative offsets", - params: map[string]string{"hash": "md5", "offsets": "[-1,3]"}, - err: vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid offsets [-1,3] to CFC vindex cfc. expected sorted positive ints in brackets"), - }, - { - testName: "normal", - params: map[string]string{"hash": "md5", "offsets": "[3, 7]"}, - offsets: []int{3, 7}, - }, - { - testName: "duplicated offsets", - params: map[string]string{"hash": "md5", "offsets": "[4,4,6]"}, - err: vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid offsets [4,4,6] to CFC vindex cfc. expected sorted positive ints in brackets"), - }, +func TestCFCCreateVindex(t *testing.T) { + cases := []createVindexTestCase{ + cfcCreateVindexTestCase( + "no params", + nil, + nil, + nil, + ), + cfcCreateVindexTestCase( + "no hash", + map[string]string{}, + nil, + nil, + ), + cfcCreateVindexTestCase( + "no hash with offsets", + map[string]string{"offsets": "[1,2]"}, + nil, + nil, + ), + cfcCreateVindexTestCase( + "hash with no offsets", + map[string]string{"hash": "md5"}, + vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "CFC vindex requires offsets when hash is defined"), + nil, + ), + cfcCreateVindexTestCase( + "invalid offsets 10,12", + map[string]string{"hash": "md5", "offsets": "10,12"}, + vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid offsets 10,12 to CFC vindex cfc. expected sorted positive ints in brackets"), + nil, + ), + cfcCreateVindexTestCase( + "invalid offsets xxx", + map[string]string{"hash": "md5", "offsets": "xxx"}, + vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid offsets xxx to CFC vindex cfc. expected sorted positive ints in brackets"), + nil, + ), + cfcCreateVindexTestCase( + "empty offsets", + map[string]string{"hash": "md5", "offsets": "[]"}, + vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid offsets [] to CFC vindex cfc. expected sorted positive ints in brackets"), + nil, + ), + cfcCreateVindexTestCase( + "unsorted offsets", + map[string]string{"hash": "md5", "offsets": "[10,3]"}, + vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid offsets [10,3] to CFC vindex cfc. expected sorted positive ints in brackets"), + nil, + ), + cfcCreateVindexTestCase( + "negative offsets", + map[string]string{"hash": "md5", "offsets": "[-1,3]"}, + vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid offsets [-1,3] to CFC vindex cfc. expected sorted positive ints in brackets"), + nil, + ), + cfcCreateVindexTestCase( + "duplicated offsets", + map[string]string{"hash": "md5", "offsets": "[4,4,6]"}, + vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid offsets [4,4,6] to CFC vindex cfc. expected sorted positive ints in brackets"), + nil, + ), + cfcCreateVindexTestCase( + "unknown params", + map[string]string{"hash": "md5", "offsets": "[3, 7]", "hello": "world"}, + nil, + []string{"hello"}, + ), } - for _, tc := range cases { - t.Run(tc.testName, func(t *testing.T) { - cfc, err := NewCFC("cfc", tc.params) - assertEqualVtError(t, tc.err, err) - if cfc != nil { - assert.EqualValues(t, tc.offsets, cfc.(*CFC).offsets) - assert.Equal(t, "cfc", cfc.String()) - assert.Equal(t, 1, cfc.Cost()) - assert.Equal(t, true, cfc.IsUnique()) - assert.Equal(t, false, cfc.NeedsVCursor()) - } - }) - } + testCreateVindexes(t, cases) +} + +func TestCFCCreateVindexOptions(t *testing.T) { + vdx, err := CreateVindex( + "cfc", + "normal", + map[string]string{ + "hash": "md5", + "offsets": "[3, 7]", + }, + ) + require.NotNil(t, vdx) + require.Nil(t, err) + unknownParams := vdx.(ParamValidating).UnknownParams() + require.Empty(t, unknownParams) + require.EqualValues(t, vdx.(*CFC).offsets, []int{3, 7}) } func makeCFC(t *testing.T, params map[string]string) *CFC { - vind, err := NewCFC("cfc", params) + vind, err := newCFC("cfc", params) require.NoError(t, err) cfc, ok := vind.(*CFC) require.True(t, ok) @@ -225,7 +250,6 @@ func TestCFCComputeKsid(t *testing.T) { } }) } - } func TestCFCComputeKsidXxhash(t *testing.T) { @@ -406,7 +430,6 @@ func TestCFCPrefixMap(t *testing.T) { assert.EqualValues(t, tc.dest, dests[0]) }) } - } func TestCFCPrefixQueryMapNoHash(t *testing.T) { @@ -512,7 +535,6 @@ func TestCFCFindPrefixEscape(t *testing.T) { for _, tc := range cases { assert.EqualValues(t, tc.prefix, string(findPrefix([]byte(tc.str)))) } - } func TestDestinationKeyRangeFromPrefix(t *testing.T) { diff --git a/go/vt/vtgate/vindexes/consistent_lookup.go b/go/vt/vtgate/vindexes/consistent_lookup.go index 3c2166c0aaf..d73631cc6ca 100644 --- a/go/vt/vtgate/vindexes/consistent_lookup.go +++ b/go/vt/vtgate/vindexes/consistent_lookup.go @@ -22,53 +22,66 @@ import ( "encoding/json" "fmt" - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/vt/vtgate/evalengine" - + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/evalengine" + querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/proto/vtgate" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" - "vitess.io/vitess/go/vt/sqlparser" +) + +const ( + consistentLookupParamWriteOnly = "write_only" ) var ( - _ SingleColumn = (*ConsistentLookupUnique)(nil) - _ Lookup = (*ConsistentLookupUnique)(nil) - _ WantOwnerInfo = (*ConsistentLookupUnique)(nil) - _ LookupPlanable = (*ConsistentLookupUnique)(nil) - _ SingleColumn = (*ConsistentLookup)(nil) - _ Lookup = (*ConsistentLookup)(nil) - _ WantOwnerInfo = (*ConsistentLookup)(nil) - _ LookupPlanable = (*ConsistentLookup)(nil) + _ SingleColumn = (*ConsistentLookupUnique)(nil) + _ Lookup = (*ConsistentLookupUnique)(nil) + _ WantOwnerInfo = (*ConsistentLookupUnique)(nil) + _ LookupPlanable = (*ConsistentLookupUnique)(nil) + _ ParamValidating = (*ConsistentLookupUnique)(nil) + _ SingleColumn = (*ConsistentLookup)(nil) + _ Lookup = (*ConsistentLookup)(nil) + _ WantOwnerInfo = (*ConsistentLookup)(nil) + _ LookupPlanable = (*ConsistentLookup)(nil) + _ ParamValidating = (*ConsistentLookup)(nil) + + consistentLookupParams = append( + append(make([]string, 0), lookupInternalParams...), + consistentLookupParamWriteOnly, + ) ) func init() { - Register("consistent_lookup", NewConsistentLookup) - Register("consistent_lookup_unique", NewConsistentLookupUnique) + Register("consistent_lookup", newConsistentLookup) + Register("consistent_lookup_unique", newConsistentLookupUnique) } // ConsistentLookup is a non-unique lookup vindex that can stay // consistent with respect to its owner table. type ConsistentLookup struct { *clCommon + unknownParams []string } -// NewConsistentLookup creates a ConsistentLookup vindex. +// newConsistentLookup creates a ConsistentLookup vindex. // The supplied map has the following required fields: // // table: name of the backing table. It can be qualified by the keyspace. // from: list of columns in the table that have the 'from' values of the lookup vindex. // to: The 'to' column name of the table. -func NewConsistentLookup(name string, m map[string]string) (Vindex, error) { +func newConsistentLookup(name string, m map[string]string) (Vindex, error) { clc, err := newCLCommon(name, m) if err != nil { return nil, err } - return &ConsistentLookup{clCommon: clc}, nil + return &ConsistentLookup{ + clCommon: clc, + unknownParams: FindUnknownParams(m, consistentLookupParams), + }, nil } // Cost returns the cost of this vindex as 20. @@ -152,6 +165,11 @@ func (lu *ConsistentLookup) AutoCommitEnabled() bool { return lu.lkp.Autocommit } +// UnknownParams implements the ParamValidating interface. +func (lu *ConsistentLookup) UnknownParams() []string { + return lu.unknownParams +} + //==================================================================== // ConsistentLookupUnique defines a vindex that uses a lookup table. @@ -159,20 +177,24 @@ func (lu *ConsistentLookup) AutoCommitEnabled() bool { // Unique and a Lookup. type ConsistentLookupUnique struct { *clCommon + unknownParams []string } -// NewConsistentLookupUnique creates a ConsistentLookupUnique vindex. +// newConsistentLookupUnique creates a ConsistentLookupUnique vindex. // The supplied map has the following required fields: // // table: name of the backing table. It can be qualified by the keyspace. // from: list of columns in the table that have the 'from' values of the lookup vindex. // to: The 'to' column name of the table. -func NewConsistentLookupUnique(name string, m map[string]string) (Vindex, error) { +func newConsistentLookupUnique(name string, m map[string]string) (Vindex, error) { clc, err := newCLCommon(name, m) if err != nil { return nil, err } - return &ConsistentLookupUnique{clCommon: clc}, nil + return &ConsistentLookupUnique{ + clCommon: clc, + unknownParams: FindUnknownParams(m, consistentLookupParams), + }, nil } // Cost returns the cost of this vindex as 10. @@ -271,7 +293,7 @@ type clCommon struct { func newCLCommon(name string, m map[string]string) (*clCommon, error) { lu := &clCommon{name: name} var err error - lu.writeOnly, err = boolFromMap(m, "write_only") + lu.writeOnly, err = boolFromMap(m, consistentLookupParamWriteOnly) if err != nil { return nil, err } @@ -313,7 +335,7 @@ func (lu *clCommon) Verify(ctx context.Context, vcursor VCursor, ids []sqltypes. } return out, nil } - return lu.lkp.VerifyCustom(ctx, vcursor, ids, ksidsToValues(ksids), vtgate.CommitOrder_PRE) + return lu.lkp.VerifyCustom(ctx, vcursor, ids, ksidsToValues(ksids), vtgatepb.CommitOrder_PRE) } // Create reserves the id by inserting it into the vindex table. @@ -323,10 +345,10 @@ func (lu *clCommon) Create(ctx context.Context, vcursor VCursor, rowsColValues [ return nil } // Try and convert the error to a MySQL error - sqlErr, isSQLErr := mysql.NewSQLErrorFromError(origErr).(*mysql.SQLError) + sqlErr, isSQLErr := sqlerror.NewSQLErrorFromError(origErr).(*sqlerror.SQLError) // If it is a MySQL error and its code is of duplicate entry, then we would like to continue // Otherwise, we return the error - if !(isSQLErr && sqlErr != nil && sqlErr.Number() == mysql.ERDupEntry) { + if !(isSQLErr && sqlErr != nil && sqlErr.Number() == sqlerror.ERDupEntry) { return origErr } for i, row := range rowsColValues { @@ -389,8 +411,7 @@ func (lu *clCommon) Delete(ctx context.Context, vcursor VCursor, rowsColValues [ func (lu *clCommon) Update(ctx context.Context, vcursor VCursor, oldValues []sqltypes.Value, ksid []byte, newValues []sqltypes.Value) error { equal := true for i := range oldValues { - // TODO(king-11) make collation aware - result, err := evalengine.NullsafeCompare(oldValues[i], newValues[i], collations.Unknown) + result, err := evalengine.NullsafeCompare(oldValues[i], newValues[i], vcursor.ConnCollation()) // errors from NullsafeCompare can be ignored. if they are real problems, we'll see them in the Create/Update if err != nil || result != 0 { equal = false @@ -470,3 +491,8 @@ func (lu *clCommon) GetCommitOrder() vtgatepb.CommitOrder { func (lu *ConsistentLookupUnique) IsBackfilling() bool { return lu.writeOnly } + +// UnknownParams implements the ParamValidating interface. +func (lu *ConsistentLookupUnique) UnknownParams() []string { + return lu.unknownParams +} diff --git a/go/vt/vtgate/vindexes/consistent_lookup_test.go b/go/vt/vtgate/vindexes/consistent_lookup_test.go index 7510a701992..deecc23ebdd 100644 --- a/go/vt/vtgate/vindexes/consistent_lookup_test.go +++ b/go/vt/vtgate/vindexes/consistent_lookup_test.go @@ -29,7 +29,10 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" querypb "vitess.io/vitess/go/vt/proto/query" @@ -40,6 +43,60 @@ import ( "vitess.io/vitess/go/vt/vterrors" ) +func consistentLookupCreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "consistent_lookup", + vindexName: "consistent_lookup", + vindexParams: vindexParams, + + expectCost: 20, + expectErr: expectErr, + expectIsUnique: false, + expectNeedsVCursor: true, + expectString: "consistent_lookup", + expectUnknownParams: expectUnknownParams, + } +} + +func consistentLookupUniqueCreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "consistent_lookup_unique", + vindexName: "consistent_lookup_unique", + vindexParams: vindexParams, + + expectCost: 10, + expectErr: expectErr, + expectIsUnique: true, + expectNeedsVCursor: true, + expectString: "consistent_lookup_unique", + expectUnknownParams: expectUnknownParams, + } +} + +func TestConsistentLookupCreateVindex(t *testing.T) { + testCaseFs := []func(string, map[string]string, error, []string) createVindexTestCase{ + consistentLookupCreateVindexTestCase, + consistentLookupUniqueCreateVindexTestCase, + } + for _, testCaseF := range testCaseFs { + testLookupCreateVindexInternalCases(t, testCaseF) + } +} + func TestConsistentLookupInit(t *testing.T) { lookup := createConsistentLookup(t, "consistent_lookup", true) cols := []sqlparser.IdentifierCI{ @@ -55,22 +112,6 @@ func TestConsistentLookupInit(t *testing.T) { } } -func TestConsistentLookupInfo(t *testing.T) { - lookup := createConsistentLookup(t, "consistent_lookup", false) - assert.Equal(t, 20, lookup.Cost()) - assert.Equal(t, "consistent_lookup", lookup.String()) - assert.False(t, lookup.IsUnique()) - assert.True(t, lookup.NeedsVCursor()) -} - -func TestConsistentLookupUniqueInfo(t *testing.T) { - lookup := createConsistentLookup(t, "consistent_lookup_unique", false) - assert.Equal(t, 10, lookup.Cost()) - assert.Equal(t, "consistent_lookup_unique", lookup.String()) - assert.True(t, lookup.IsUnique()) - assert.True(t, lookup.NeedsVCursor()) -} - func TestConsistentLookupMap(t *testing.T) { lookup := createConsistentLookup(t, "consistent_lookup", false) vc := &loggingVCursor{} @@ -239,7 +280,7 @@ func TestConsistentLookupCreateSimple(t *testing.T) { func TestConsistentLookupCreateThenRecreate(t *testing.T) { lookup := createConsistentLookup(t, "consistent_lookup", false) vc := &loggingVCursor{} - vc.AddResult(nil, mysql.NewSQLError(mysql.ERDupEntry, mysql.SSConstraintViolation, "Duplicate entry")) + vc.AddResult(nil, sqlerror.NewSQLError(sqlerror.ERDupEntry, sqlerror.SSConstraintViolation, "Duplicate entry")) vc.AddResult(&sqltypes.Result{}, nil) vc.AddResult(&sqltypes.Result{}, nil) @@ -413,7 +454,7 @@ func TestConsistentLookupNoUpdate(t *testing.T) { vc.verifyLog(t, []string{}) } -func TestConsistentLookupUpdateBecauseUncomparableTypes(t *testing.T) { +func TestConsistentLookupUpdateBecauseComparableTypes(t *testing.T) { lookup := createConsistentLookup(t, "consistent_lookup", false) vc := &loggingVCursor{} @@ -437,7 +478,7 @@ func TestConsistentLookupUpdateBecauseUncomparableTypes(t *testing.T) { err = lookup.(Lookup).Update(context.Background(), vc, []sqltypes.Value{literal, literal}, []byte("test"), []sqltypes.Value{literal, literal}) require.NoError(t, err) - require.NotEmpty(t, vc.log) + vc.verifyLog(t, []string{}) vc.log = nil }) } @@ -458,6 +499,7 @@ func createConsistentLookup(t *testing.T, name string, writeOnly bool) SingleCol if err != nil { t.Fatal(err) } + require.Empty(t, l.(ParamValidating).UnknownParams()) cols := []sqlparser.IdentifierCI{ sqlparser.NewIdentifierCI("fc1"), sqlparser.NewIdentifierCI("fc2"), @@ -485,6 +527,10 @@ func (vc *loggingVCursor) InTransactionAndIsDML() bool { return false } +func (vc *loggingVCursor) ConnCollation() collations.ID { + return collations.Default() +} + type bv struct { Name string Bv string diff --git a/go/vt/vtgate/vindexes/foreign_keys.go b/go/vt/vtgate/vindexes/foreign_keys.go new file mode 100644 index 00000000000..3fcbc719624 --- /dev/null +++ b/go/vt/vtgate/vindexes/foreign_keys.go @@ -0,0 +1,252 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vindexes + +import ( + "encoding/json" + "fmt" + "strings" + + "vitess.io/vitess/go/vt/sqlparser" +) + +// ParentFKInfo contains the parent foreign key info for the table. +type ParentFKInfo struct { + Table *Table + ParentColumns sqlparser.Columns + ChildColumns sqlparser.Columns +} + +// MarshalJSON returns a JSON representation of ParentFKInfo. +func (fk *ParentFKInfo) MarshalJSON() ([]byte, error) { + return json.Marshal(struct { + Name string `json:"parent_table"` + ParentColumns sqlparser.Columns `json:"parent_columns"` + ChildColumns sqlparser.Columns `json:"child_columns"` + }{ + Name: fk.Table.Name.String(), + ChildColumns: fk.ChildColumns, + ParentColumns: fk.ParentColumns, + }) +} + +func (fk *ParentFKInfo) String(childTable *Table) string { + var str strings.Builder + str.WriteString(childTable.String()) + for _, column := range fk.ChildColumns { + str.WriteString(column.String()) + } + str.WriteString(fk.Table.String()) + for _, column := range fk.ParentColumns { + str.WriteString(column.String()) + } + return str.String() +} + +// NewParentFkInfo creates a new ParentFKInfo. +func NewParentFkInfo(parentTbl *Table, fkDef *sqlparser.ForeignKeyDefinition) ParentFKInfo { + return ParentFKInfo{ + Table: parentTbl, + ChildColumns: fkDef.Source, + ParentColumns: fkDef.ReferenceDefinition.ReferencedColumns, + } +} + +// ChildFKInfo contains the child foreign key info for the table. +type ChildFKInfo struct { + Table *Table + ChildColumns sqlparser.Columns + ParentColumns sqlparser.Columns + Match sqlparser.MatchAction + OnDelete sqlparser.ReferenceAction + OnUpdate sqlparser.ReferenceAction +} + +// MarshalJSON returns a JSON representation of ChildFKInfo. +func (fk *ChildFKInfo) MarshalJSON() ([]byte, error) { + return json.Marshal(struct { + Name string `json:"child_table"` + ChildColumns sqlparser.Columns `json:"child_columns"` + ParentColumns sqlparser.Columns `json:"parent_columns"` + }{ + Name: fk.Table.Name.String(), + ChildColumns: fk.ChildColumns, + ParentColumns: fk.ParentColumns, + }) +} + +func (fk *ChildFKInfo) String(parentTable *Table) string { + var str strings.Builder + str.WriteString(fk.Table.String()) + for _, column := range fk.ChildColumns { + str.WriteString(column.String()) + } + str.WriteString(parentTable.String()) + for _, column := range fk.ParentColumns { + str.WriteString(column.String()) + } + return str.String() +} + +// NewChildFkInfo creates a new ChildFKInfo. +func NewChildFkInfo(childTbl *Table, fkDef *sqlparser.ForeignKeyDefinition) ChildFKInfo { + return ChildFKInfo{ + Table: childTbl, + ChildColumns: fkDef.Source, + ParentColumns: fkDef.ReferenceDefinition.ReferencedColumns, + Match: fkDef.ReferenceDefinition.Match, + OnDelete: fkDef.ReferenceDefinition.OnDelete, + OnUpdate: fkDef.ReferenceDefinition.OnUpdate, + } +} + +// ParentFKsNeedsHandling returns all the parent fk constraints on this table that are not shard scoped. +func (t *Table) ParentFKsNeedsHandling(verifyAllFKs bool, fkToIgnore string) (fks []ParentFKInfo) { + for _, fk := range t.ParentForeignKeys { + // Check if we need to specifically ignore this foreign key + if fkToIgnore != "" && fk.String(t) == fkToIgnore { + continue + } + + // If we require all the foreign keys, add them all. + if verifyAllFKs { + fks = append(fks, fk) + continue + } + + // If the keyspaces are different, then the fk definition + // is going to go across shards. + if fk.Table.Keyspace.Name != t.Keyspace.Name { + fks = append(fks, fk) + continue + } + // If the keyspaces match and they are unsharded, then the fk defintion + // is shard-scoped. + if !t.Keyspace.Sharded { + continue + } + + if !isShardScoped(fk.Table, t, fk.ParentColumns, fk.ChildColumns) { + fks = append(fks, fk) + } + } + return +} + +// ChildFKsNeedsHandling retuns the child foreign keys that needs to be handled by the vtgate. +// This can be either the foreign key is not shard scoped or the child tables needs cascading. +func (t *Table) ChildFKsNeedsHandling(verifyAllFKs bool, getAction func(fk ChildFKInfo) sqlparser.ReferenceAction) (fks []ChildFKInfo) { + // If we require all the foreign keys, return the entire list. + if verifyAllFKs { + return t.ChildForeignKeys + } + for _, fk := range t.ChildForeignKeys { + // If the keyspaces are different, then the fk definition + // is going to go across shards. + if fk.Table.Keyspace.Name != t.Keyspace.Name { + fks = append(fks, fk) + continue + } + // If the action is not Restrict, then it needs a cascade. + switch getAction(fk) { + case sqlparser.Cascade, sqlparser.SetNull, sqlparser.SetDefault: + fks = append(fks, fk) + continue + } + // sqlparser.Restrict, sqlparser.NoAction, sqlparser.DefaultAction + // all the actions means the same thing i.e. Restrict + // do not allow modification if there is a child row. + // Check if the restrict is shard scoped. + if !isShardScoped(t, fk.Table, fk.ParentColumns, fk.ChildColumns) { + fks = append(fks, fk) + } + } + return +} + +func UpdateAction(fk ChildFKInfo) sqlparser.ReferenceAction { return fk.OnUpdate } +func DeleteAction(fk ChildFKInfo) sqlparser.ReferenceAction { return fk.OnDelete } + +func isShardScoped(pTable *Table, cTable *Table, pCols sqlparser.Columns, cCols sqlparser.Columns) bool { + if !pTable.Keyspace.Sharded { + return true + } + + pPrimaryVdx := pTable.ColumnVindexes[0] + cPrimaryVdx := cTable.ColumnVindexes[0] + + // If the primary vindexes don't match between the parent and child table, + // we cannot infer that the fk constraint in shard scoped. + if cPrimaryVdx.Vindex != pPrimaryVdx.Vindex { + return false + } + + childFkContatined, childFkIndexes := cCols.Indexes(cPrimaryVdx.Columns) + if !childFkContatined { + // PrimaryVindex is not part of the foreign key constraint on the children side. + // So it is a cross-shard foreign key. + return false + } + + // We need to run the same check for the parent columns. + parentFkContatined, parentFkIndexes := pCols.Indexes(pPrimaryVdx.Columns) + if !parentFkContatined { + return false + } + + // Both the child and parent table contain the foreign key and that the vindexes are the same, + // now we need to make sure, that the indexes of both match. + // For example, consider the following tables, + // t1 (primary vindex (x,y)) + // t2 (primary vindex (a,b)) + // If we have a foreign key constraint from t1(x,y) to t2(b,a), then they are not shard scoped. + // Let's say in t1, (1,3) will be in -80 and (3,1) will be in 80-, then in t2 (1,3) will end up in 80-. + for i := range parentFkIndexes { + if parentFkIndexes[i] != childFkIndexes[i] { + return false + } + } + return true +} + +// AddForeignKey is for testing only. +func (vschema *VSchema) AddForeignKey(ksname, childTableName string, fkConstraint *sqlparser.ForeignKeyDefinition) error { + ks, ok := vschema.Keyspaces[ksname] + if !ok { + return fmt.Errorf("keyspace %s not found in vschema", ksname) + } + cTbl, ok := ks.Tables[childTableName] + if !ok { + return fmt.Errorf("child table %s not found in keyspace %s", childTableName, ksname) + } + pKsName := fkConstraint.ReferenceDefinition.ReferencedTable.Qualifier.String() + if pKsName != "" { + ks, ok = vschema.Keyspaces[pKsName] + if !ok { + return fmt.Errorf("keyspace %s not found in vschema", pKsName) + } + ksname = pKsName + } + parentTableName := fkConstraint.ReferenceDefinition.ReferencedTable.Name.String() + pTbl, ok := ks.Tables[parentTableName] + if !ok { + return fmt.Errorf("parent table %s not found in keyspace %s", parentTableName, ksname) + } + pTbl.ChildForeignKeys = append(pTbl.ChildForeignKeys, NewChildFkInfo(cTbl, fkConstraint)) + cTbl.ParentForeignKeys = append(cTbl.ParentForeignKeys, NewParentFkInfo(pTbl, fkConstraint)) + return nil +} diff --git a/go/vt/vtgate/vindexes/foreign_keys_test.go b/go/vt/vtgate/vindexes/foreign_keys_test.go new file mode 100644 index 00000000000..147614edcbf --- /dev/null +++ b/go/vt/vtgate/vindexes/foreign_keys_test.go @@ -0,0 +1,314 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vindexes + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/sqlparser" +) + +var ( + uks = &Keyspace{Name: "uks"} + uks2 = &Keyspace{Name: "uks2"} + sks = &Keyspace{Name: "sks", Sharded: true} +) + +// TestTable_CrossShardParentFKs tests the functionality of the method CrossShardParentFKs. +func TestTable_CrossShardParentFKs(t *testing.T) { + col1Vindex := &ColumnVindex{ + Name: "v1", + Vindex: binVindex, + Columns: sqlparser.MakeColumns("col1"), + } + col4DiffVindex := &ColumnVindex{ + Name: "v2", + Vindex: binOnlyVindex, + Columns: sqlparser.MakeColumns("col4"), + } + col123Vindex := &ColumnVindex{ + Name: "v2", + Vindex: binVindex, + Columns: sqlparser.MakeColumns("col1", "col2", "col3"), + } + col456Vindex := &ColumnVindex{ + Name: "v2", + Vindex: binVindex, + Columns: sqlparser.MakeColumns("col4", "col5", "col6"), + } + + unshardedTbl := &Table{ + Name: sqlparser.NewIdentifierCS("t1"), + Keyspace: uks2, + } + shardedSingleColTblWithDiffVindex := &Table{ + Name: sqlparser.NewIdentifierCS("t1"), + Keyspace: sks, + ColumnVindexes: []*ColumnVindex{col4DiffVindex}, + } + shardedMultiColTbl := &Table{ + Name: sqlparser.NewIdentifierCS("t1"), + Keyspace: sks, + ColumnVindexes: []*ColumnVindex{col456Vindex}, + } + + tests := []struct { + name string + table *Table + wantCrossShardFKTables []string + verifyAllFKs bool + fkToIgnore string + }{{ + name: "No Parent FKs", + table: &Table{ + ColumnVindexes: []*ColumnVindex{col1Vindex}, + Keyspace: sks, + }, + wantCrossShardFKTables: []string{}, + }, { + name: "Unsharded keyspace", + table: &Table{ + ColumnVindexes: []*ColumnVindex{col1Vindex}, + Keyspace: uks2, + ParentForeignKeys: []ParentFKInfo{pkInfo(unshardedTbl, []string{"col4"}, []string{"col1"})}, + }, + wantCrossShardFKTables: []string{}, + }, { + name: "Unsharded keyspace with verify all FKs", + verifyAllFKs: true, + table: &Table{ + ColumnVindexes: []*ColumnVindex{col1Vindex}, + Keyspace: uks2, + ParentForeignKeys: []ParentFKInfo{pkInfo(unshardedTbl, []string{"col4"}, []string{"col1"})}, + }, + wantCrossShardFKTables: []string{"t1"}, + }, { + name: "Keyspaces don't match", // parent table is on uks2 + table: &Table{ + Keyspace: uks, + ParentForeignKeys: []ParentFKInfo{pkInfo(unshardedTbl, []string{"col4"}, []string{"col1"})}, + }, + wantCrossShardFKTables: []string{"t1"}, + }, { + name: "Keyspaces don't match with ignore fk", // parent table is on uks2 + fkToIgnore: "uks.col1uks2.t1col4", + table: &Table{ + Keyspace: uks, + ParentForeignKeys: []ParentFKInfo{pkInfo(unshardedTbl, []string{"col4"}, []string{"col1"})}, + }, + wantCrossShardFKTables: []string{}, + }, { + name: "Unsharded keyspace with verify all FKs and fk to ignore", + verifyAllFKs: true, + fkToIgnore: "uks2.col1uks2.t1col4", + table: &Table{ + ColumnVindexes: []*ColumnVindex{col1Vindex}, + Keyspace: uks2, + ParentForeignKeys: []ParentFKInfo{pkInfo(unshardedTbl, []string{"col4"}, []string{"col1"})}, + }, + wantCrossShardFKTables: []string{}, + }, { + name: "Column Vindexes don't match", // primary vindexes on different vindex type + table: &Table{ + Keyspace: sks, + ColumnVindexes: []*ColumnVindex{col1Vindex}, + ParentForeignKeys: []ParentFKInfo{pkInfo(shardedSingleColTblWithDiffVindex, []string{"col4"}, []string{"col1"})}, + }, + wantCrossShardFKTables: []string{"t1"}, + }, { + name: "child table foreign key does not contain primary vindex columns", + table: &Table{ + Keyspace: sks, + ColumnVindexes: []*ColumnVindex{col123Vindex}, + ParentForeignKeys: []ParentFKInfo{pkInfo(shardedMultiColTbl, []string{"col4", "col5", "col6"}, []string{"col3", "col9", "col1"})}, + }, + wantCrossShardFKTables: []string{"t1"}, + }, { + name: "Parent FK doesn't contain primary vindex", + table: &Table{ + Keyspace: sks, + ColumnVindexes: []*ColumnVindex{col123Vindex}, + ParentForeignKeys: []ParentFKInfo{pkInfo(shardedMultiColTbl, []string{"col4", "col9", "col6"}, []string{"col1", "col2", "col3"})}, + }, + wantCrossShardFKTables: []string{"t1"}, + }, { + name: "Indexes of the two FKs with column vindexes don't line up", + table: &Table{ + Keyspace: sks, + ColumnVindexes: []*ColumnVindex{col123Vindex}, + ParentForeignKeys: []ParentFKInfo{pkInfo(shardedMultiColTbl, []string{"col4", "col9", "col5", "col6"}, []string{"col1", "col2", "col3", "col9"})}, + }, + wantCrossShardFKTables: []string{"t1"}, + }, { + name: "Shard scoped foreign key constraint", + table: &Table{ + Keyspace: sks, + ColumnVindexes: []*ColumnVindex{col123Vindex}, + ParentForeignKeys: []ParentFKInfo{pkInfo(shardedMultiColTbl, []string{"col4", "col9", "col5", "col6", "colc"}, []string{"col1", "cola", "col2", "col3", "colb"})}, + }, + wantCrossShardFKTables: []string{}, + }} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + crossShardFks := tt.table.ParentFKsNeedsHandling(tt.verifyAllFKs, tt.fkToIgnore) + var crossShardFkTables []string + for _, fk := range crossShardFks { + crossShardFkTables = append(crossShardFkTables, fk.Table.Name.String()) + } + require.ElementsMatch(t, tt.wantCrossShardFKTables, crossShardFkTables) + }) + } +} + +func pkInfo(parentTable *Table, pCols []string, cCols []string) ParentFKInfo { + return ParentFKInfo{ + Table: parentTable, + ParentColumns: sqlparser.MakeColumns(pCols...), + ChildColumns: sqlparser.MakeColumns(cCols...), + } +} + +// TestChildFKs tests the ChildFKsNeedsHandling method is provides the child foreign key table whose +// rows needs to be managed by vitess. +func TestChildFKs(t *testing.T) { + col1Vindex := &ColumnVindex{ + Name: "v1", + Vindex: binVindex, + Columns: sqlparser.MakeColumns("col1"), + } + col4DiffVindex := &ColumnVindex{ + Name: "v2", + Vindex: binOnlyVindex, + Columns: sqlparser.MakeColumns("col4"), + } + + unshardedTbl := &Table{ + Name: sqlparser.NewIdentifierCS("t1"), + Keyspace: uks2, + } + shardedSingleColTbl := &Table{ + Name: sqlparser.NewIdentifierCS("t1"), + Keyspace: sks, + ColumnVindexes: []*ColumnVindex{col1Vindex}, + } + shardedSingleColTblWithDiffVindex := &Table{ + Name: sqlparser.NewIdentifierCS("t1"), + Keyspace: sks, + ColumnVindexes: []*ColumnVindex{col4DiffVindex}, + } + + tests := []struct { + verifyAllFKs bool + name string + table *Table + expChildTbls []string + }{{ + name: "No Parent FKs", + table: &Table{ + ColumnVindexes: []*ColumnVindex{col1Vindex}, + Keyspace: sks, + }, + expChildTbls: []string{}, + }, { + name: "restrict unsharded", + table: &Table{ + ColumnVindexes: []*ColumnVindex{col1Vindex}, + Keyspace: uks2, + ChildForeignKeys: []ChildFKInfo{ckInfo(unshardedTbl, []string{"col4"}, []string{"col1"}, sqlparser.Restrict)}, + }, + expChildTbls: []string{}, + }, { + name: "restrict unsharded with verify all fks", + verifyAllFKs: true, + table: &Table{ + ColumnVindexes: []*ColumnVindex{col1Vindex}, + Keyspace: uks2, + ChildForeignKeys: []ChildFKInfo{ckInfo(unshardedTbl, []string{"col4"}, []string{"col1"}, sqlparser.Restrict)}, + }, + expChildTbls: []string{"t1"}, + }, { + name: "restrict shard scoped", + table: &Table{ + ColumnVindexes: []*ColumnVindex{col1Vindex}, + Keyspace: sks, + ChildForeignKeys: []ChildFKInfo{ckInfo(shardedSingleColTbl, []string{"col1"}, []string{"col1"}, sqlparser.Restrict)}, + }, + expChildTbls: []string{}, + }, { + name: "restrict shard scoped with verify all fks", + verifyAllFKs: true, + table: &Table{ + ColumnVindexes: []*ColumnVindex{col1Vindex}, + Keyspace: sks, + ChildForeignKeys: []ChildFKInfo{ckInfo(shardedSingleColTbl, []string{"col1"}, []string{"col1"}, sqlparser.Restrict)}, + }, + expChildTbls: []string{"t1"}, + }, { + name: "restrict Keyspaces don't match", + table: &Table{ + Keyspace: uks, + ChildForeignKeys: []ChildFKInfo{ckInfo(shardedSingleColTbl, []string{"col1"}, []string{"col1"}, sqlparser.Restrict)}, + }, + expChildTbls: []string{"t1"}, + }, { + name: "restrict cross shard", + table: &Table{ + Keyspace: sks, + ColumnVindexes: []*ColumnVindex{col1Vindex}, + ChildForeignKeys: []ChildFKInfo{ckInfo(shardedSingleColTblWithDiffVindex, []string{"col4"}, []string{"col1"}, sqlparser.Restrict)}, + }, + expChildTbls: []string{"t1"}, + }, { + name: "cascade unsharded", + table: &Table{ + Keyspace: uks2, + ColumnVindexes: []*ColumnVindex{col1Vindex}, + ChildForeignKeys: []ChildFKInfo{ckInfo(unshardedTbl, []string{"col4"}, []string{"col1"}, sqlparser.Cascade)}, + }, + expChildTbls: []string{"t1"}, + }, { + name: "cascade cross shard", + table: &Table{ + Keyspace: sks, + ColumnVindexes: []*ColumnVindex{col1Vindex}, + ChildForeignKeys: []ChildFKInfo{ckInfo(shardedSingleColTblWithDiffVindex, []string{"col4"}, []string{"col1"}, sqlparser.Cascade)}, + }, + expChildTbls: []string{"t1"}, + }} + deleteAction := func(fk ChildFKInfo) sqlparser.ReferenceAction { return fk.OnDelete } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + childFks := tt.table.ChildFKsNeedsHandling(tt.verifyAllFKs, deleteAction) + var actualChildTbls []string + for _, fk := range childFks { + actualChildTbls = append(actualChildTbls, fk.Table.Name.String()) + } + require.ElementsMatch(t, tt.expChildTbls, actualChildTbls) + }) + } +} + +func ckInfo(cTable *Table, pCols []string, cCols []string, refAction sqlparser.ReferenceAction) ChildFKInfo { + return ChildFKInfo{ + Table: cTable, + ParentColumns: sqlparser.MakeColumns(pCols...), + ChildColumns: sqlparser.MakeColumns(cCols...), + OnDelete: refAction, + } +} diff --git a/go/vt/vtgate/vindexes/hash.go b/go/vt/vtgate/vindexes/hash.go index a488809aeb8..d30895be48a 100644 --- a/go/vt/vtgate/vindexes/hash.go +++ b/go/vt/vtgate/vindexes/hash.go @@ -26,16 +26,15 @@ import ( "fmt" "strconv" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" ) var ( - _ SingleColumn = (*Hash)(nil) - _ Reversible = (*Hash)(nil) - _ Hashing = (*Hash)(nil) + _ SingleColumn = (*Hash)(nil) + _ Reversible = (*Hash)(nil) + _ Hashing = (*Hash)(nil) + _ ParamValidating = (*Hash)(nil) ) // Hash defines vindex that hashes an int64 to a KeyspaceId @@ -44,12 +43,16 @@ var ( // Note that at once stage we used a 3DES-based hash here, // but for a null key as in our case, they are completely equivalent. type Hash struct { - name string + name string + unknownParams []string } -// NewHash creates a new Hash. -func NewHash(name string, _ map[string]string) (Vindex, error) { - return &Hash{name: name}, nil +// newHash creates a new Hash. +func newHash(name string, params map[string]string) (Vindex, error) { + return &Hash{ + name: name, + unknownParams: FindUnknownParams(params, nil), + }, nil } // String returns the name of the vindex. @@ -90,7 +93,7 @@ func (vind *Hash) Map(ctx context.Context, vcursor VCursor, ids []sqltypes.Value func (vind *Hash) Verify(ctx context.Context, vcursor VCursor, ids []sqltypes.Value, ksids [][]byte) ([]bool, error) { out := make([]bool, len(ids)) for i := range ids { - num, err := evalengine.ToUint64(ids[i]) + num, err := ids[i].ToCastUint64() if err != nil { return nil, err } @@ -123,7 +126,7 @@ func (vind *Hash) Hash(id sqltypes.Value) ([]byte, error) { ival, err = strconv.ParseInt(str, 10, 64) num = uint64(ival) } else { - num, err = evalengine.ToUint64(id) + num, err = id.ToCastUint64() } if err != nil { @@ -132,6 +135,11 @@ func (vind *Hash) Hash(id sqltypes.Value) ([]byte, error) { return vhash(num), nil } +// UnknownParams implements the ParamValidating interface. +func (vind *Hash) UnknownParams() []string { + return vind.unknownParams +} + var blockDES cipher.Block func init() { @@ -140,7 +148,7 @@ func init() { if err != nil { panic(err) } - Register("hash", NewHash) + Register("hash", newHash) } func vhash(shardKey uint64) []byte { diff --git a/go/vt/vtgate/vindexes/hash_test.go b/go/vt/vtgate/vindexes/hash_test.go index 24b8f748793..82fb33d82b5 100644 --- a/go/vt/vtgate/vindexes/hash_test.go +++ b/go/vt/vtgate/vindexes/hash_test.go @@ -18,10 +18,10 @@ package vindexes import ( "context" + "hash" "reflect" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" @@ -31,18 +31,62 @@ import ( var hashSingleColumn SingleColumn func init() { - hv, err := CreateVindex("hash", "nn", map[string]string{"Table": "t", "Column": "c"}) + hv, err := CreateVindex("hash", "nn", map[string]string{}) + unknownParams := hv.(ParamValidating).UnknownParams() + if len(unknownParams) > 0 { + panic("hash test init: expected 0 unknown params") + } if err != nil { panic(err) } hashSingleColumn = hv.(SingleColumn) } -func TestHashInfo(t *testing.T) { - assert.Equal(t, 1, hashSingleColumn.Cost()) - assert.Equal(t, "nn", hashSingleColumn.String()) - assert.True(t, hashSingleColumn.IsUnique()) - assert.False(t, hashSingleColumn.NeedsVCursor()) +func hashCreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "hash", + vindexName: "hash", + vindexParams: vindexParams, + + expectCost: 1, + expectErr: expectErr, + expectIsUnique: true, + expectNeedsVCursor: false, + expectString: "hash", + expectUnknownParams: expectUnknownParams, + } +} + +func TestHashCreateVindex(t *testing.T) { + cases := []createVindexTestCase{ + hashCreateVindexTestCase( + "no params", + nil, + nil, + nil, + ), + hashCreateVindexTestCase( + "empty params", + map[string]string{}, + nil, + nil, + ), + hashCreateVindexTestCase( + "unknown params", + map[string]string{"hello": "world"}, + nil, + []string{"hello"}, + ), + } + + testCreateVindexes(t, cases) } func TestHashMap(t *testing.T) { @@ -99,8 +143,8 @@ func TestHashVerify(t *testing.T) { } // Failure test - _, err = hashSingleColumn.Verify(context.Background(), nil, []sqltypes.Value{sqltypes.NewVarBinary("aa")}, [][]byte{nil}) - require.EqualError(t, err, "could not parse value: 'aa'") + _, err = hash.Verify(context.Background(), nil, []sqltypes.Value{sqltypes.NewVarBinary("aa")}, [][]byte{nil}) + require.EqualError(t, err, "cannot parse uint64 from \"aa\"") } func TestHashReverseMap(t *testing.T) { diff --git a/go/vt/vtgate/vindexes/lookup.go b/go/vt/vtgate/vindexes/lookup.go index 9ac514175df..b3e14fa01f6 100644 --- a/go/vt/vtgate/vindexes/lookup.go +++ b/go/vt/vtgate/vindexes/lookup.go @@ -27,27 +27,41 @@ import ( vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" ) +const ( + lookupParamNoVerify = "no_verify" + lookupParamWriteOnly = "write_only" +) + var ( - _ SingleColumn = (*LookupUnique)(nil) - _ Lookup = (*LookupUnique)(nil) - _ LookupPlanable = (*LookupUnique)(nil) - _ SingleColumn = (*LookupNonUnique)(nil) - _ Lookup = (*LookupNonUnique)(nil) - _ LookupPlanable = (*LookupNonUnique)(nil) + _ SingleColumn = (*LookupUnique)(nil) + _ Lookup = (*LookupUnique)(nil) + _ LookupPlanable = (*LookupUnique)(nil) + _ ParamValidating = (*LookupUnique)(nil) + _ SingleColumn = (*LookupNonUnique)(nil) + _ Lookup = (*LookupNonUnique)(nil) + _ LookupPlanable = (*LookupNonUnique)(nil) + _ ParamValidating = (*LookupNonUnique)(nil) + + lookupParams = append( + append(make([]string, 0), lookupCommonParams...), + lookupParamNoVerify, + lookupParamWriteOnly, + ) ) func init() { - Register("lookup", NewLookup) - Register("lookup_unique", NewLookupUnique) + Register("lookup", newLookup) + Register("lookup_unique", newLookupUnique) } // LookupNonUnique defines a vindex that uses a lookup table and create a mapping between from ids and KeyspaceId. // It's NonUnique and a Lookup. type LookupNonUnique struct { - name string - writeOnly bool - noVerify bool - lkp lookupInternal + name string + writeOnly bool + noVerify bool + lkp lookupInternal + unknownParams []string } func (ln *LookupNonUnique) GetCommitOrder() vtgatepb.CommitOrder { @@ -172,7 +186,12 @@ func (ln *LookupNonUnique) Query() (selQuery string, arguments []string) { return ln.lkp.query() } -// NewLookup creates a LookupNonUnique vindex. +// UnknownParams implements the ParamValidating interface. +func (ln *LookupNonUnique) UnknownParams() []string { + return ln.unknownParams +} + +// newLookup creates a LookupNonUnique vindex. // The supplied map has the following required fields: // // table: name of the backing table. It can be qualified by the keyspace. @@ -184,19 +203,22 @@ func (ln *LookupNonUnique) Query() (selQuery string, arguments []string) { // autocommit: setting this to "true" will cause inserts to upsert and deletes to be ignored. // write_only: in this mode, Map functions return the full keyrange causing a full scatter. // no_verify: in this mode, Verify will always succeed. -func NewLookup(name string, m map[string]string) (Vindex, error) { - lookup := &LookupNonUnique{name: name} +func newLookup(name string, m map[string]string) (Vindex, error) { + lookup := &LookupNonUnique{ + name: name, + unknownParams: FindUnknownParams(m, lookupParams), + } cc, err := parseCommonConfig(m) if err != nil { return nil, err } - lookup.writeOnly, err = boolFromMap(m, "write_only") + lookup.writeOnly, err = boolFromMap(m, lookupParamWriteOnly) if err != nil { return nil, err } - lookup.noVerify, err = boolFromMap(m, "no_verify") + lookup.noVerify, err = boolFromMap(m, lookupParamNoVerify) if err != nil { return nil, err } @@ -223,10 +245,11 @@ func ksidsToValues(ksids [][]byte) []sqltypes.Value { // The table is expected to define the id column as unique. It's // Unique and a Lookup. type LookupUnique struct { - name string - writeOnly bool - noVerify bool - lkp lookupInternal + name string + writeOnly bool + noVerify bool + lkp lookupInternal + unknownParams []string } func (lu *LookupUnique) GetCommitOrder() vtgatepb.CommitOrder { @@ -241,7 +264,7 @@ func (lu *LookupUnique) AutoCommitEnabled() bool { return lu.lkp.Autocommit } -// NewLookupUnique creates a LookupUnique vindex. +// newLookupUnique creates a LookupUnique vindex. // The supplied map has the following required fields: // // table: name of the backing table. It can be qualified by the keyspace. @@ -252,19 +275,22 @@ func (lu *LookupUnique) AutoCommitEnabled() bool { // // autocommit: setting this to "true" will cause deletes to be ignored. // write_only: in this mode, Map functions return the full keyrange causing a full scatter. -func NewLookupUnique(name string, m map[string]string) (Vindex, error) { - lu := &LookupUnique{name: name} +func newLookupUnique(name string, m map[string]string) (Vindex, error) { + lu := &LookupUnique{ + name: name, + unknownParams: FindUnknownParams(m, lookupParams), + } cc, err := parseCommonConfig(m) if err != nil { return nil, err } - lu.writeOnly, err = boolFromMap(m, "write_only") + lu.writeOnly, err = boolFromMap(m, lookupParamWriteOnly) if err != nil { return nil, err } - lu.noVerify, err = boolFromMap(m, "no_verify") + lu.noVerify, err = boolFromMap(m, lookupParamNoVerify) if err != nil { return nil, err } @@ -375,3 +401,8 @@ func (lu *LookupUnique) LookupQuery() (string, error) { func (lu *LookupUnique) Query() (string, []string) { return lu.lkp.query() } + +// UnknownParams implements the ParamValidating interface. +func (ln *LookupUnique) UnknownParams() []string { + return ln.unknownParams +} diff --git a/go/vt/vtgate/vindexes/lookup_hash.go b/go/vt/vtgate/vindexes/lookup_hash.go index 993b9655660..de3d078f556 100644 --- a/go/vt/vtgate/vindexes/lookup_hash.go +++ b/go/vt/vtgate/vindexes/lookup_hash.go @@ -21,26 +21,35 @@ import ( "encoding/json" "fmt" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" ) +const ( + lookupHashParamWriteOnly = "write_only" +) + var ( - _ SingleColumn = (*LookupHash)(nil) - _ Lookup = (*LookupHash)(nil) - _ LookupPlanable = (*LookupHash)(nil) - _ SingleColumn = (*LookupHashUnique)(nil) - _ Lookup = (*LookupHashUnique)(nil) - _ LookupPlanable = (*LookupHashUnique)(nil) + _ SingleColumn = (*LookupHash)(nil) + _ Lookup = (*LookupHash)(nil) + _ LookupPlanable = (*LookupHash)(nil) + _ ParamValidating = (*LookupHash)(nil) + _ SingleColumn = (*LookupHashUnique)(nil) + _ Lookup = (*LookupHashUnique)(nil) + _ LookupPlanable = (*LookupHashUnique)(nil) + _ ParamValidating = (*LookupHashUnique)(nil) + + lookupHashParams = append( + append(make([]string, 0), lookupCommonParams...), + lookupHashParamWriteOnly, + ) ) func init() { - Register("lookup_hash", NewLookupHash) - Register("lookup_hash_unique", NewLookupHashUnique) + Register("lookup_hash", newLookupHash) + Register("lookup_hash_unique", newLookupHashUnique) } //==================================================================== @@ -50,12 +59,13 @@ func init() { // NonUnique and a Lookup. // Warning: This Vindex is being deprecated in favor of Lookup type LookupHash struct { - name string - writeOnly bool - lkp lookupInternal + name string + writeOnly bool + lkp lookupInternal + unknownParams []string } -// NewLookupHash creates a LookupHash vindex. +// newLookupHash creates a LookupHash vindex. // The supplied map has the following required fields: // // table: name of the backing table. It can be qualified by the keyspace. @@ -66,14 +76,17 @@ type LookupHash struct { // // autocommit: setting this to "true" will cause inserts to upsert and deletes to be ignored. // write_only: in this mode, Map functions return the full keyrange causing a full scatter. -func NewLookupHash(name string, m map[string]string) (Vindex, error) { - lh := &LookupHash{name: name} +func newLookupHash(name string, m map[string]string) (Vindex, error) { + lh := &LookupHash{ + name: name, + unknownParams: FindUnknownParams(m, lookupHashParams), + } cc, err := parseCommonConfig(m) if err != nil { return nil, err } - lh.writeOnly, err = boolFromMap(m, "write_only") + lh.writeOnly, err = boolFromMap(m, lookupHashParamWriteOnly) if err != nil { return nil, err } @@ -148,7 +161,7 @@ func (lh *LookupHash) MapResult(ids []sqltypes.Value, results []*sqltypes.Result } ksids := make([][]byte, 0, len(result.Rows)) for _, row := range result.Rows { - num, err := evalengine.ToUint64(row[0]) + num, err := row[0].ToCastUint64() if err != nil { // A failure to convert is equivalent to not being // able to map. @@ -229,6 +242,11 @@ func (lh *LookupHash) MarshalJSON() ([]byte, error) { return json.Marshal(lh.lkp) } +// UnknownParams satisifes the ParamValidating interface. +func (lh *LookupHash) UnknownParams() []string { + return lh.unknownParams +} + // unhashList unhashes a list of keyspace ids into []sqltypes.Value. func unhashList(ksids [][]byte) ([]sqltypes.Value, error) { values := make([]sqltypes.Value, 0, len(ksids)) @@ -249,14 +267,15 @@ func unhashList(ksids [][]byte) ([]sqltypes.Value, error) { // Unique and a Lookup. // Warning: This Vindex is being depcreated in favor of LookupUnique type LookupHashUnique struct { - name string - writeOnly bool - lkp lookupInternal + name string + writeOnly bool + lkp lookupInternal + unknownParams []string } var _ LookupPlanable = (*LookupHashUnique)(nil) -// NewLookupHashUnique creates a LookupHashUnique vindex. +// newLookupHashUnique creates a LookupHashUnique vindex. // The supplied map has the following required fields: // // table: name of the backing table. It can be qualified by the keyspace. @@ -267,14 +286,17 @@ var _ LookupPlanable = (*LookupHashUnique)(nil) // // autocommit: setting this to "true" will cause deletes to be ignored. // write_only: in this mode, Map functions return the full keyrange causing a full scatter. -func NewLookupHashUnique(name string, m map[string]string) (Vindex, error) { - lhu := &LookupHashUnique{name: name} +func newLookupHashUnique(name string, m map[string]string) (Vindex, error) { + lhu := &LookupHashUnique{ + name: name, + unknownParams: FindUnknownParams(m, lookupHashParams), + } cc, err := parseCommonConfig(m) if err != nil { return nil, err } - lhu.writeOnly, err = boolFromMap(m, "write_only") + lhu.writeOnly, err = boolFromMap(m, lookupHashParamWriteOnly) if err != nil { return nil, err } @@ -336,7 +358,7 @@ func (lhu *LookupHashUnique) MapResult(ids []sqltypes.Value, results []*sqltypes case 0: out = append(out, key.DestinationNone{}) case 1: - num, err := evalengine.ToUint64(result.Rows[0][0]) + num, err := result.Rows[0][0].ToCastUint64() if err != nil { out = append(out, key.DestinationNone{}) continue @@ -419,3 +441,8 @@ func (lhu *LookupHashUnique) Query() (selQuery string, arguments []string) { func (lhu *LookupHashUnique) GetCommitOrder() vtgatepb.CommitOrder { return vtgatepb.CommitOrder_NORMAL } + +// UnknownParams implements the ParamValidating interface. +func (lhu *LookupHashUnique) UnknownParams() []string { + return lhu.unknownParams +} diff --git a/go/vt/vtgate/vindexes/lookup_hash_test.go b/go/vt/vtgate/vindexes/lookup_hash_test.go index 7703bd4d344..69bff9f6f34 100644 --- a/go/vt/vtgate/vindexes/lookup_hash_test.go +++ b/go/vt/vtgate/vindexes/lookup_hash_test.go @@ -21,7 +21,6 @@ import ( "reflect" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" @@ -29,6 +28,32 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) +func lookupHashCreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "lookup_hash", + vindexName: "lookup_hash", + vindexParams: vindexParams, + + expectCost: 20, + expectErr: expectErr, + expectIsUnique: false, + expectNeedsVCursor: true, + expectString: "lookup_hash", + expectUnknownParams: expectUnknownParams, + } +} + +func TestLookupHashCreateVindex(t *testing.T) { + testLookupCreateVindexCommonCases(t, lookupHashCreateVindexTestCase) +} + func TestLookupHashNew(t *testing.T) { l := createLookup(t, "lookup_hash", false /* writeOnly */) if want, got := l.(*LookupHash).writeOnly, false; got != want { @@ -40,7 +65,7 @@ func TestLookupHashNew(t *testing.T) { t.Errorf("Create(lookup, false): %v, want %v", got, want) } - _, err := CreateVindex("lookup_hash", "lookup_hash", map[string]string{ + vdx, err := CreateVindex("lookup_hash", "lookup_hash", map[string]string{ "table": "t", "from": "fromc", "to": "toc", @@ -50,20 +75,10 @@ func TestLookupHashNew(t *testing.T) { if err == nil || err.Error() != want { t.Errorf("Create(bad_scatter): %v, want %s", err, want) } -} - -func TestLookupHashInfo(t *testing.T) { - lookuphash := createLookup(t, "lookup_hash", false /* writeOnly */) - assert.Equal(t, 20, lookuphash.Cost()) - assert.Equal(t, "lookup_hash", lookuphash.String()) - assert.False(t, lookuphash.IsUnique()) - assert.True(t, lookuphash.NeedsVCursor()) - - lookuphashunique := createLookup(t, "lookup_hash_unique", false /* writeOnly */) - assert.Equal(t, 10, lookuphashunique.Cost()) - assert.Equal(t, "lookup_hash_unique", lookuphashunique.String()) - assert.True(t, lookuphashunique.IsUnique()) - assert.True(t, lookuphashunique.NeedsVCursor()) + if err == nil { + unknownParams := vdx.(ParamValidating).UnknownParams() + require.Empty(t, unknownParams) + } } func TestLookupHashMap(t *testing.T) { diff --git a/go/vt/vtgate/vindexes/lookup_hash_unique_test.go b/go/vt/vtgate/vindexes/lookup_hash_unique_test.go index 9158f99dc04..67697fb5eac 100644 --- a/go/vt/vtgate/vindexes/lookup_hash_unique_test.go +++ b/go/vt/vtgate/vindexes/lookup_hash_unique_test.go @@ -21,7 +21,6 @@ import ( "reflect" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" @@ -29,24 +28,54 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) +func lookupHashUniqueCreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "lookup_hash_unique", + vindexName: "lookup_hash_unique", + vindexParams: vindexParams, + + expectCost: 10, + expectErr: expectErr, + expectIsUnique: true, + expectNeedsVCursor: true, + expectString: "lookup_hash_unique", + expectUnknownParams: expectUnknownParams, + } +} + +func TestLookupHashUniqueCreateVindex(t *testing.T) { + testLookupCreateVindexCommonCases(t, lookupHashUniqueCreateVindexTestCase) +} + func TestLookupHashUniqueNew(t *testing.T) { l := createLookup(t, "lookup_hash_unique", false /* writeOnly */) if want, got := l.(*LookupHashUnique).writeOnly, false; got != want { t.Errorf("Create(lookup, false): %v, want %v", got, want) } - vindex, _ := CreateVindex("lookup_hash_unique", "lookup_hash_unique", map[string]string{ + vindex, err := CreateVindex("lookup_hash_unique", "lookup_hash_unique", map[string]string{ "table": "t", "from": "fromc", "to": "toc", "write_only": "true", }) + unknownParams := vindex.(ParamValidating).UnknownParams() + require.Empty(t, unknownParams) + require.NoError(t, err) + l = vindex.(SingleColumn) if want, got := l.(*LookupHashUnique).writeOnly, true; got != want { t.Errorf("Create(lookup, false): %v, want %v", got, want) } - _, err := CreateVindex("lookup_hash_unique", "lookup_hash_unique", map[string]string{ + vdx, err := CreateVindex("lookup_hash_unique", "lookup_hash_unique", map[string]string{ "table": "t", "from": "fromc", "to": "toc", @@ -56,14 +85,10 @@ func TestLookupHashUniqueNew(t *testing.T) { if err == nil || err.Error() != want { t.Errorf("Create(bad_scatter): %v, want %s", err, want) } -} - -func TestLookupHashUniqueInfo(t *testing.T) { - lhu := createLookup(t, "lookup_hash_unique", false /* writeOnly */) - assert.Equal(t, 10, lhu.Cost()) - assert.Equal(t, "lookup_hash_unique", lhu.String()) - assert.True(t, lhu.IsUnique()) - assert.True(t, lhu.NeedsVCursor()) + if err == nil { + unknownParams = vdx.(ParamValidating).UnknownParams() + require.Empty(t, unknownParams) + } } func TestLookupHashUniqueMap(t *testing.T) { diff --git a/go/vt/vtgate/vindexes/lookup_internal.go b/go/vt/vtgate/vindexes/lookup_internal.go index 2644c261407..673b3fcb64b 100644 --- a/go/vt/vtgate/vindexes/lookup_internal.go +++ b/go/vt/vtgate/vindexes/lookup_internal.go @@ -33,17 +33,47 @@ import ( vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) -var ( +const ( readLockExclusive = "exclusive" readLockShared = "shared" readLockNone = "none" readLockDefault = readLockExclusive + lookupCommonParamAutocommit = "autocommit" + lookupCommonParamMultiShardAutocommit = "multi_shard_autocommit" + + lookupInternalParamTable = "table" + lookupInternalParamFrom = "from" + lookupInternalParamTo = "to" + lookupInternalParamIgnoreNulls = "ignore_nulls" + lookupInternalParamBatchLookup = "batch_lookup" + lookupInternalParamReadLock = "read_lock" +) + +var ( readLockExprs = map[string]string{ readLockExclusive: "for update", readLockShared: "lock in share mode", readLockNone: "", } + + // lookupCommonParams are used only by lookup_* vindexes. + lookupCommonParams = append( + append(make([]string, 0), lookupInternalParams...), + lookupCommonParamAutocommit, + lookupCommonParamMultiShardAutocommit, + ) + + // lookupInternalParams are used by both lookup_* vindexes and the newer + // consistent_lookup_* vindexes. + lookupInternalParams = []string{ + lookupInternalParamTable, + lookupInternalParamFrom, + lookupInternalParamTo, + lookupInternalParamIgnoreNulls, + lookupInternalParamBatchLookup, + lookupInternalParamReadLock, + } ) // lookupInternal implements the functions for the Lookup vindexes. @@ -61,26 +91,26 @@ type lookupInternal struct { } func (lkp *lookupInternal) Init(lookupQueryParams map[string]string, autocommit, upsert, multiShardAutocommit bool) error { - lkp.Table = lookupQueryParams["table"] - lkp.To = lookupQueryParams["to"] + lkp.Table = lookupQueryParams[lookupInternalParamTable] + lkp.To = lookupQueryParams[lookupInternalParamTo] var fromColumns []string - for _, from := range strings.Split(lookupQueryParams["from"], ",") { + for _, from := range strings.Split(lookupQueryParams[lookupInternalParamFrom], ",") { fromColumns = append(fromColumns, strings.TrimSpace(from)) } lkp.FromColumns = fromColumns var err error - lkp.IgnoreNulls, err = boolFromMap(lookupQueryParams, "ignore_nulls") + lkp.IgnoreNulls, err = boolFromMap(lookupQueryParams, lookupInternalParamIgnoreNulls) if err != nil { return err } - lkp.BatchLookup, err = boolFromMap(lookupQueryParams, "batch_lookup") + lkp.BatchLookup, err = boolFromMap(lookupQueryParams, lookupInternalParamBatchLookup) if err != nil { return err } - if readLock, ok := lookupQueryParams["read_lock"]; ok { + if readLock, ok := lookupQueryParams[lookupInternalParamReadLock]; ok { if _, valid := readLockExprs[readLock]; !valid { - return fmt.Errorf("invalid read_lock value: %s", readLock) + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid %s value: %s", lookupInternalParamReadLock, readLock) } lkp.ReadLock = readLock } @@ -399,10 +429,10 @@ type commonConfig struct { func parseCommonConfig(m map[string]string) (*commonConfig, error) { var c commonConfig var err error - if c.autocommit, err = boolFromMap(m, "autocommit"); err != nil { + if c.autocommit, err = boolFromMap(m, lookupCommonParamAutocommit); err != nil { return nil, err } - if c.multiShardAutocommit, err = boolFromMap(m, "multi_shard_autocommit"); err != nil { + if c.multiShardAutocommit, err = boolFromMap(m, lookupCommonParamMultiShardAutocommit); err != nil { return nil, err } return &c, nil diff --git a/go/vt/vtgate/vindexes/lookup_test.go b/go/vt/vtgate/vindexes/lookup_test.go index df21f07c83d..a59fcbf1da9 100644 --- a/go/vt/vtgate/vindexes/lookup_test.go +++ b/go/vt/vtgate/vindexes/lookup_test.go @@ -19,12 +19,12 @@ package vindexes import ( "context" "errors" + "strings" "testing" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/test/utils" - "strings" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -34,6 +34,8 @@ import ( querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" ) // LookupNonUnique tests are more comprehensive than others. @@ -112,6 +114,242 @@ func (vc *vcursor) execute(query string, bindvars map[string]*querypb.BindVariab panic("unexpected") } +func (vc *vcursor) ConnCollation() collations.ID { + return collations.Default() +} + +func lookupCreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "lookup", + vindexName: "lookup", + vindexParams: vindexParams, + + expectCost: 20, + expectErr: expectErr, + expectIsUnique: false, + expectNeedsVCursor: true, + expectString: "lookup", + expectUnknownParams: expectUnknownParams, + } +} + +func lookupUniqueCreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "lookup_unique", + vindexName: "lookup_unique", + vindexParams: vindexParams, + + expectCost: 10, + expectErr: expectErr, + expectIsUnique: true, + expectNeedsVCursor: true, + expectString: "lookup_unique", + expectUnknownParams: expectUnknownParams, + } +} + +func testLookupCreateVindexCommonCases(t *testing.T, testCaseF func(string, map[string]string, error, []string) createVindexTestCase) { + testLookupCreateVindexInternalCases(t, testCaseF) + + cases := []createVindexTestCase{ + testCaseF( + "autocommit true", + map[string]string{"autocommit": "true"}, + nil, + nil, + ), + testCaseF( + "autocommit false", + map[string]string{"autocommit": "false"}, + nil, + nil, + ), + testCaseF( + "autocommit reject not bool", + map[string]string{"autocommit": "hello"}, + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "autocommit value must be 'true' or 'false': 'hello'"), + nil, + ), + testCaseF( + "multi_shard_autocommit true", + map[string]string{"multi_shard_autocommit": "true"}, + nil, + nil, + ), + testCaseF( + "multi_shard_autocommit false", + map[string]string{"multi_shard_autocommit": "false"}, + nil, + nil, + ), + testCaseF( + "multi_shard_autocommit reject not bool", + map[string]string{"multi_shard_autocommit": "hello"}, + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "multi_shard_autocommit value must be 'true' or 'false': 'hello'"), + nil, + ), + } + + testCreateVindexes(t, cases) +} + +func testLookupCreateVindexInternalCases(t *testing.T, testCaseF func(string, map[string]string, error, []string) createVindexTestCase) { + cases := []createVindexTestCase{ + // TODO(maxeng): make table, to, from required params. + testCaseF( + "no params", + nil, + nil, + nil, + ), + testCaseF( + "empty params", + map[string]string{}, + nil, + nil, + ), + testCaseF( + "batch_lookup true", + map[string]string{"batch_lookup": "true"}, + nil, + nil, + ), + testCaseF( + "batch_lookup false", + map[string]string{"batch_lookup": "false"}, + nil, + nil, + ), + testCaseF( + "batch_lookup reject not bool", + map[string]string{"batch_lookup": "hello"}, + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "batch_lookup value must be 'true' or 'false': 'hello'"), + nil, + ), + testCaseF( + "ignore_nulls true", + map[string]string{"ignore_nulls": "true"}, + nil, + nil, + ), + testCaseF( + "ignore_nulls false", + map[string]string{"ignore_nulls": "false"}, + nil, + nil, + ), + testCaseF( + "ignore_nulls reject not bool", + map[string]string{"ignore_nulls": "hello"}, + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "ignore_nulls value must be 'true' or 'false': 'hello'"), + nil, + ), + testCaseF( + "read_lock exclusive", + map[string]string{"read_lock": "exclusive"}, + nil, + nil, + ), + testCaseF( + "read_lock shared", + map[string]string{"read_lock": "shared"}, + nil, + nil, + ), + testCaseF( + "read_lock none", + map[string]string{"read_lock": "none"}, + nil, + nil, + ), + testCaseF( + "read_lock reject unknown values", + map[string]string{"read_lock": "unknown"}, + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid read_lock value: unknown"), + nil, + ), + testCaseF( + "ignore_nulls reject not bool", + map[string]string{"ignore_nulls": "hello"}, + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "ignore_nulls value must be 'true' or 'false': 'hello'"), + nil, + ), + testCaseF( + "write_only true", + map[string]string{"write_only": "true"}, + nil, + nil, + ), + testCaseF( + "write_only false", + map[string]string{"write_only": "false"}, + nil, + nil, + ), + testCaseF( + "write_only reject not bool", + map[string]string{"write_only": "hello"}, + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "write_only value must be 'true' or 'false': 'hello'"), + nil, + ), + testCaseF( + "unknown params", + map[string]string{"hello": "world"}, + nil, + []string{"hello"}, + ), + } + + testCreateVindexes(t, cases) +} + +func TestLookupCreateVindex(t *testing.T) { + testCaseFs := []func(string, map[string]string, error, []string) createVindexTestCase{ + lookupCreateVindexTestCase, + lookupUniqueCreateVindexTestCase, + } + for _, testCaseF := range testCaseFs { + testLookupCreateVindexCommonCases(t, testCaseF) + + cases := []createVindexTestCase{ + testCaseF( + "no_verify true", + map[string]string{"no_verify": "true"}, + nil, + nil, + ), + testCaseF( + "no_verify false", + map[string]string{"no_verify": "false"}, + nil, + nil, + ), + testCaseF( + "no_verify reject not bool", + map[string]string{"no_verify": "hello"}, + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "no_verify value must be 'true' or 'false': 'hello'"), + nil, + ), + } + + testCreateVindexes(t, cases) + } +} + func TestLookupNonUniqueNew(t *testing.T) { l := createLookup(t, "lookup", false /* writeOnly */) assert.False(t, l.(*LookupNonUnique).writeOnly, "Create(lookup, false)") @@ -128,25 +366,17 @@ func TestLookupNonUniqueNew(t *testing.T) { require.EqualError(t, err, "write_only value must be 'true' or 'false': 'invalid'") } -func TestLookupNonUniqueInfo(t *testing.T) { - lookupNonUnique := createLookup(t, "lookup", false /* writeOnly */) - assert.Equal(t, 20, lookupNonUnique.Cost()) - assert.Equal(t, "lookup", lookupNonUnique.String()) - assert.False(t, lookupNonUnique.IsUnique()) - assert.True(t, lookupNonUnique.NeedsVCursor()) -} - func TestLookupNilVCursor(t *testing.T) { - lookupNonUnique := createLookup(t, "lookup", false /* writeOnly */) - _, err := lookupNonUnique.Map(context.Background(), nil, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) + lnu := createLookup(t, "lookup", false /* writeOnly */) + _, err := lnu.Map(context.Background(), nil, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) require.EqualError(t, err, "cannot perform lookup: no vcursor provided") } func TestLookupNonUniqueMap(t *testing.T) { - lookupNonUnique := createLookup(t, "lookup", false /* writeOnly */) + lnu := createLookup(t, "lookup", false /* writeOnly */) vc := &vcursor{numRows: 2} - got, err := lookupNonUnique.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) + got, err := lnu.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) require.NoError(t, err) want := []key.Destination{ key.DestinationKeyspaceIDs([][]byte{ @@ -172,7 +402,7 @@ func TestLookupNonUniqueMap(t *testing.T) { // Test query fail. vc.mustFail = true - _, err = lookupNonUnique.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1)}) + _, err = lnu.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1)}) require.EqualError(t, err, "lookup.Map: execute failed") } @@ -184,10 +414,11 @@ func TestLookupNonUniqueMapAutocommit(t *testing.T) { "autocommit": "true", }) require.NoError(t, err) - lookupNonUnique := vindex.(SingleColumn) + require.Empty(t, vindex.(ParamValidating).UnknownParams()) + lnu := vindex.(SingleColumn) vc := &vcursor{numRows: 2} - got, err := lookupNonUnique.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) + got, err := lnu.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) require.NoError(t, err) want := []key.Destination{ key.DestinationKeyspaceIDs([][]byte{ @@ -214,10 +445,10 @@ func TestLookupNonUniqueMapAutocommit(t *testing.T) { } func TestLookupNonUniqueMapWriteOnly(t *testing.T) { - lookupNonUnique := createLookup(t, "lookup", true) + lnu := createLookup(t, "lookup", true) vc := &vcursor{numRows: 0} - got, err := lookupNonUnique.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) + got, err := lnu.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) require.NoError(t, err) want := []key.Destination{ key.DestinationKeyRange{ @@ -231,10 +462,10 @@ func TestLookupNonUniqueMapWriteOnly(t *testing.T) { } func TestLookupNonUniqueMapAbsent(t *testing.T) { - lookupNonUnique := createLookup(t, "lookup", false /* writeOnly */) + lnu := createLookup(t, "lookup", false /* writeOnly */) vc := &vcursor{numRows: 0} - got, err := lookupNonUnique.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) + got, err := lnu.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) require.NoError(t, err) want := []key.Destination{ key.DestinationNone{}, @@ -244,10 +475,10 @@ func TestLookupNonUniqueMapAbsent(t *testing.T) { } func TestLookupNonUniqueVerify(t *testing.T) { - lookupNonUnique := createLookup(t, "lookup", false /* writeOnly */) + lnu := createLookup(t, "lookup", false /* writeOnly */) vc := &vcursor{numRows: 1} - _, err := lookupNonUnique.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}, [][]byte{[]byte("test1"), []byte("test2")}) + _, err := lnu.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}, [][]byte{[]byte("test1"), []byte("test2")}) require.NoError(t, err) wantqueries := []*querypb.BoundQuery{{ @@ -267,15 +498,15 @@ func TestLookupNonUniqueVerify(t *testing.T) { // Test query fail. vc.mustFail = true - _, err = lookupNonUnique.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1)}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}) + _, err = lnu.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1)}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}) require.EqualError(t, err, "lookup.Verify: execute failed") vc.mustFail = false // writeOnly true should always yield true. - lookupNonUnique = createLookup(t, "lookup", true) + lnu = createLookup(t, "lookup", true) vc.queries = nil - got, err := lookupNonUnique.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}, [][]byte{[]byte(""), []byte("")}) + got, err := lnu.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}, [][]byte{[]byte(""), []byte("")}) require.NoError(t, err) assert.Empty(t, vc.queries, "lookup verify queries") utils.MustMatch(t, []bool{true, true}, got) @@ -289,10 +520,11 @@ func TestLookupNonUniqueNoVerify(t *testing.T) { "no_verify": "true", }) require.NoError(t, err) - lookupNonUnique := vindex.(SingleColumn) + require.Empty(t, vindex.(ParamValidating).UnknownParams()) + lnu := vindex.(SingleColumn) vc := &vcursor{numRows: 1} - _, err = lookupNonUnique.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}, [][]byte{[]byte("test1"), []byte("test2")}) + _, err = lnu.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}, [][]byte{[]byte("test1"), []byte("test2")}) require.NoError(t, err) var wantqueries []*querypb.BoundQuery @@ -300,7 +532,7 @@ func TestLookupNonUniqueNoVerify(t *testing.T) { // Test query fail. vc.mustFail = true - _, err = lookupNonUnique.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1)}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}) + _, err = lnu.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1)}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}) require.NoError(t, err) } @@ -312,6 +544,7 @@ func TestLookupUniqueNoVerify(t *testing.T) { "no_verify": "true", }) require.NoError(t, err) + require.Empty(t, vindex.(ParamValidating).UnknownParams()) lookupUnique := vindex.(SingleColumn) vc := &vcursor{numRows: 1} @@ -335,10 +568,11 @@ func TestLookupNonUniqueVerifyAutocommit(t *testing.T) { "autocommit": "true", }) require.NoError(t, err) - lookupNonUnique := vindex.(SingleColumn) + require.Empty(t, vindex.(ParamValidating).UnknownParams()) + lnu := vindex.(SingleColumn) vc := &vcursor{numRows: 1} - _, err = lookupNonUnique.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}, [][]byte{[]byte("test1"), []byte("test2")}) + _, err = lnu.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}, [][]byte{[]byte("test1"), []byte("test2")}) require.NoError(t, err) wantqueries := []*querypb.BoundQuery{{ @@ -360,10 +594,10 @@ func TestLookupNonUniqueVerifyAutocommit(t *testing.T) { } func TestLookupNonUniqueCreate(t *testing.T) { - lookupNonUnique := createLookup(t, "lookup", false /* writeOnly */) + lnu := createLookup(t, "lookup", false /* writeOnly */) vc := &vcursor{} - err := lookupNonUnique.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}, {sqltypes.NewInt64(2)}}, [][]byte{[]byte("test1"), []byte("test2")}, false /* ignoreMode */) + err := lnu.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}, {sqltypes.NewInt64(2)}}, [][]byte{[]byte("test1"), []byte("test2")}, false /* ignoreMode */) require.NoError(t, err) wantqueries := []*querypb.BoundQuery{{ @@ -379,19 +613,19 @@ func TestLookupNonUniqueCreate(t *testing.T) { // With ignore. vc.queries = nil - err = lookupNonUnique.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(2)}, {sqltypes.NewInt64(1)}}, [][]byte{[]byte("test2"), []byte("test1")}, true /* ignoreMode */) + err = lnu.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(2)}, {sqltypes.NewInt64(1)}}, [][]byte{[]byte("test2"), []byte("test1")}, true /* ignoreMode */) require.NoError(t, err) wantqueries[0].Sql = "insert ignore into t(fromc, toc) values(:fromc_0, :toc_0), (:fromc_1, :toc_1)" utils.MustMatch(t, wantqueries, vc.queries) // With ignore_nulls off - err = lookupNonUnique.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(2)}, {sqltypes.NULL}}, [][]byte{[]byte("test2"), []byte("test1")}, true /* ignoreMode */) + err = lnu.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(2)}, {sqltypes.NULL}}, [][]byte{[]byte("test2"), []byte("test1")}, true /* ignoreMode */) assert.EqualError(t, err, "lookup.Create: input has null values: row: 1, col: 0") // With ignore_nulls on vc.queries = nil - lookupNonUnique.(*LookupNonUnique).lkp.IgnoreNulls = true - err = lookupNonUnique.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(2)}, {sqltypes.NULL}}, [][]byte{[]byte("test2"), []byte("test1")}, true /* ignoreMode */) + lnu.(*LookupNonUnique).lkp.IgnoreNulls = true + err = lnu.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(2)}, {sqltypes.NULL}}, [][]byte{[]byte("test2"), []byte("test1")}, true /* ignoreMode */) require.NoError(t, err) wantqueries = []*querypb.BoundQuery{{ Sql: "insert ignore into t(fromc, toc) values(:fromc_0, :toc_0)", @@ -404,26 +638,27 @@ func TestLookupNonUniqueCreate(t *testing.T) { // Test query fail. vc.mustFail = true - err = lookupNonUnique.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}, false /* ignoreMode */) + err = lnu.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}, false /* ignoreMode */) assert.EqualError(t, err, "lookup.Create: execute failed") vc.mustFail = false // Test column mismatch. - err = lookupNonUnique.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}, false /* ignoreMode */) + err = lnu.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}, false /* ignoreMode */) assert.EqualError(t, err, "lookup.Create: column vindex count does not match the columns in the lookup: 2 vs [fromc]") } func TestLookupNonUniqueCreateAutocommit(t *testing.T) { - lookupNonUnique, err := CreateVindex("lookup", "lookup", map[string]string{ + lnu, err := CreateVindex("lookup", "lookup", map[string]string{ "table": "t", "from": "from1,from2", "to": "toc", "autocommit": "true", }) require.NoError(t, err) + require.Empty(t, lnu.(ParamValidating).UnknownParams()) vc := &vcursor{} - err = lookupNonUnique.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{ + err = lnu.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{ sqltypes.NewInt64(1), sqltypes.NewInt64(2), }, { sqltypes.NewInt64(3), sqltypes.NewInt64(4), @@ -446,10 +681,10 @@ func TestLookupNonUniqueCreateAutocommit(t *testing.T) { } func TestLookupNonUniqueDelete(t *testing.T) { - lookupNonUnique := createLookup(t, "lookup", false /* writeOnly */) + lnu := createLookup(t, "lookup", false /* writeOnly */) vc := &vcursor{} - err := lookupNonUnique.(Lookup).Delete(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}, {sqltypes.NewInt64(2)}}, []byte("test")) + err := lnu.(Lookup).Delete(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}, {sqltypes.NewInt64(2)}}, []byte("test")) require.NoError(t, err) wantqueries := []*querypb.BoundQuery{{ @@ -469,35 +704,37 @@ func TestLookupNonUniqueDelete(t *testing.T) { // Test query fail. vc.mustFail = true - err = lookupNonUnique.(Lookup).Delete(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}}, []byte("\x16k@\xb4J\xbaK\xd6")) + err = lnu.(Lookup).Delete(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}}, []byte("\x16k@\xb4J\xbaK\xd6")) assert.EqualError(t, err, "lookup.Delete: execute failed") vc.mustFail = false // Test column count fail. - err = lookupNonUnique.(Lookup).Delete(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}}, []byte("\x16k@\xb4J\xbaK\xd6")) + err = lnu.(Lookup).Delete(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}}, []byte("\x16k@\xb4J\xbaK\xd6")) assert.EqualError(t, err, "lookup.Delete: column vindex count does not match the columns in the lookup: 2 vs [fromc]") } func TestLookupNonUniqueDeleteAutocommit(t *testing.T) { - lookupNonUnique, _ := CreateVindex("lookup", "lookup", map[string]string{ + lnu, err := CreateVindex("lookup", "lookup", map[string]string{ "table": "t", "from": "fromc", "to": "toc", "autocommit": "true", }) + require.NoError(t, err) + require.Empty(t, lnu.(ParamValidating).UnknownParams()) vc := &vcursor{} - err := lookupNonUnique.(Lookup).Delete(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}, {sqltypes.NewInt64(2)}}, []byte("test")) + err = lnu.(Lookup).Delete(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}, {sqltypes.NewInt64(2)}}, []byte("test")) require.NoError(t, err) utils.MustMatch(t, []*querypb.BoundQuery(nil), vc.queries) } func TestLookupNonUniqueUpdate(t *testing.T) { - lookupNonUnique := createLookup(t, "lookup", false /* writeOnly */) + lnu := createLookup(t, "lookup", false /* writeOnly */) vc := &vcursor{} - err := lookupNonUnique.(Lookup).Update(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1)}, []byte("test"), []sqltypes.Value{sqltypes.NewInt64(2)}) + err := lnu.(Lookup).Update(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1)}, []byte("test"), []sqltypes.Value{sqltypes.NewInt64(2)}) require.NoError(t, err) wantqueries := []*querypb.BoundQuery{{ @@ -567,16 +804,17 @@ func TestLookupUniqueMapResult(t *testing.T) { } func TestLookupNonUniqueCreateMultiShardAutocommit(t *testing.T) { - lookupNonUnique, err := CreateVindex("lookup", "lookup", map[string]string{ + lnu, err := CreateVindex("lookup", "lookup", map[string]string{ "table": "t", "from": "from1,from2", "to": "toc", "multi_shard_autocommit": "true", }) require.NoError(t, err) + require.Empty(t, lnu.(ParamValidating).UnknownParams()) vc := &vcursor{} - err = lookupNonUnique.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{ + err = lnu.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{ sqltypes.NewInt64(1), sqltypes.NewInt64(2), }, { sqltypes.NewInt64(3), sqltypes.NewInt64(4), @@ -611,5 +849,6 @@ func createLookup(t *testing.T, name string, writeOnly bool) SingleColumn { "write_only": write, }) require.NoError(t, err) + require.Empty(t, l.(ParamValidating).UnknownParams()) return l.(SingleColumn) } diff --git a/go/vt/vtgate/vindexes/lookup_unicodeloosemd5_hash.go b/go/vt/vtgate/vindexes/lookup_unicodeloosemd5_hash.go index 725511614ae..74cbe1423a0 100644 --- a/go/vt/vtgate/vindexes/lookup_unicodeloosemd5_hash.go +++ b/go/vt/vtgate/vindexes/lookup_unicodeloosemd5_hash.go @@ -22,24 +22,33 @@ import ( "encoding/json" "fmt" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" ) +const ( + lookupUnicodeLooseMD5HashParamWriteOnly = "write_only" +) + var ( - _ SingleColumn = (*LookupUnicodeLooseMD5Hash)(nil) - _ Lookup = (*LookupUnicodeLooseMD5Hash)(nil) - _ SingleColumn = (*LookupUnicodeLooseMD5HashUnique)(nil) - _ Lookup = (*LookupUnicodeLooseMD5HashUnique)(nil) + _ SingleColumn = (*LookupUnicodeLooseMD5Hash)(nil) + _ Lookup = (*LookupUnicodeLooseMD5Hash)(nil) + _ ParamValidating = (*LookupUnicodeLooseMD5Hash)(nil) + _ SingleColumn = (*LookupUnicodeLooseMD5HashUnique)(nil) + _ Lookup = (*LookupUnicodeLooseMD5HashUnique)(nil) + _ ParamValidating = (*LookupUnicodeLooseMD5HashUnique)(nil) + + lookupUnicodeLooseMD5HashParams = append( + append(make([]string, 0), lookupCommonParams...), + lookupUnicodeLooseMD5HashParamWriteOnly, + ) ) func init() { - Register("lookup_unicodeloosemd5_hash", NewLookupUnicodeLooseMD5Hash) - Register("lookup_unicodeloosemd5_hash_unique", NewLookupUnicodeLooseMD5HashUnique) + Register("lookup_unicodeloosemd5_hash", newLookupUnicodeLooseMD5Hash) + Register("lookup_unicodeloosemd5_hash_unique", newLookupUnicodeLooseMD5HashUnique) } //==================================================================== @@ -49,12 +58,13 @@ func init() { // NonUnique and a Lookup and stores the from value in a hashed form. // Warning: This Vindex is being depcreated in favor of Lookup type LookupUnicodeLooseMD5Hash struct { - name string - writeOnly bool - lkp lookupInternal + name string + writeOnly bool + lkp lookupInternal + unknownParams []string } -// NewLookupUnicodeLooseMD5Hash creates a LookupUnicodeLooseMD5Hash vindex. +// newLookupUnicodeLooseMD5Hash creates a LookupUnicodeLooseMD5Hash vindex. // The supplied map has the following required fields: // // table: name of the backing table. It can be qualified by the keyspace. @@ -65,14 +75,17 @@ type LookupUnicodeLooseMD5Hash struct { // // autocommit: setting this to "true" will cause inserts to upsert and deletes to be ignored. // write_only: in this mode, Map functions return the full keyrange causing a full scatter. -func NewLookupUnicodeLooseMD5Hash(name string, m map[string]string) (Vindex, error) { - lh := &LookupUnicodeLooseMD5Hash{name: name} +func newLookupUnicodeLooseMD5Hash(name string, m map[string]string) (Vindex, error) { + lh := &LookupUnicodeLooseMD5Hash{ + name: name, + unknownParams: FindUnknownParams(m, lookupUnicodeLooseMD5HashParams), + } cc, err := parseCommonConfig(m) if err != nil { return nil, err } - lh.writeOnly, err = boolFromMap(m, "write_only") + lh.writeOnly, err = boolFromMap(m, lookupUnicodeLooseMD5HashParamWriteOnly) if err != nil { return nil, err } @@ -137,7 +150,7 @@ func (lh *LookupUnicodeLooseMD5Hash) Map(ctx context.Context, vcursor VCursor, i } ksids := make([][]byte, 0, len(result.Rows)) for _, row := range result.Rows { - num, err := evalengine.ToUint64(row[0]) + num, err := row[0].ToCastUint64() if err != nil { // A failure to convert is equivalent to not being // able to map. @@ -223,6 +236,11 @@ func (lh *LookupUnicodeLooseMD5Hash) MarshalJSON() ([]byte, error) { return json.Marshal(lh.lkp) } +// UnknownParams implements the ParamValidating interface. +func (lh *LookupUnicodeLooseMD5Hash) UnknownParams() []string { + return lh.unknownParams +} + //==================================================================== // LookupUnicodeLooseMD5HashUnique defines a vindex that uses a lookup table. @@ -230,12 +248,13 @@ func (lh *LookupUnicodeLooseMD5Hash) MarshalJSON() ([]byte, error) { // Unique and a Lookup and will store the from value in a hashed format. // Warning: This Vindex is being depcreated in favor of LookupUnique type LookupUnicodeLooseMD5HashUnique struct { - name string - writeOnly bool - lkp lookupInternal + name string + writeOnly bool + lkp lookupInternal + unknownParams []string } -// NewLookupUnicodeLooseMD5HashUnique creates a LookupUnicodeLooseMD5HashUnique vindex. +// newLookupUnicodeLooseMD5HashUnique creates a LookupUnicodeLooseMD5HashUnique vindex. // The supplied map has the following required fields: // // table: name of the backing table. It can be qualified by the keyspace. @@ -246,20 +265,23 @@ type LookupUnicodeLooseMD5HashUnique struct { // // autocommit: setting this to "true" will cause deletes to be ignored. // write_only: in this mode, Map functions return the full keyrange causing a full scatter. -func NewLookupUnicodeLooseMD5HashUnique(name string, m map[string]string) (Vindex, error) { - lhu := &LookupUnicodeLooseMD5HashUnique{name: name} +func newLookupUnicodeLooseMD5HashUnique(name string, m map[string]string) (Vindex, error) { + lhu := &LookupUnicodeLooseMD5HashUnique{ + name: name, + unknownParams: FindUnknownParams(m, lookupUnicodeLooseMD5HashParams), + } cc, err := parseCommonConfig(m) if err != nil { return nil, err } - lhu.writeOnly, err = boolFromMap(m, "write_only") + lhu.writeOnly, err = boolFromMap(m, lookupUnicodeLooseMD5HashParamWriteOnly) if err != nil { return nil, err } // Don't allow upserts for unique vindexes. - if err := lhu.lkp.Init(m, cc.autocommit, false, cc.multiShardAutocommit); err != nil { + if err := lhu.lkp.Init(m, cc.autocommit, false /* upsert */, cc.multiShardAutocommit); err != nil { return nil, err } return lhu, nil @@ -308,7 +330,7 @@ func (lhu *LookupUnicodeLooseMD5HashUnique) Map(ctx context.Context, vcursor VCu case 0: out = append(out, key.DestinationNone{}) case 1: - num, err := evalengine.ToUint64(result.Rows[0][0]) + num, err := result.Rows[0][0].ToCastUint64() if err != nil { out = append(out, key.DestinationNone{}) continue @@ -399,6 +421,11 @@ func (lhu *LookupUnicodeLooseMD5HashUnique) IsBackfilling() bool { return lhu.writeOnly } +// UnknownParams implements the ParamValidating interface. +func (lhu *LookupUnicodeLooseMD5HashUnique) UnknownParams() []string { + return lhu.unknownParams +} + func unicodeHashValue(value sqltypes.Value) (sqltypes.Value, error) { hash, err := unicodeHash(vMD5Hash, value) if err != nil { diff --git a/go/vt/vtgate/vindexes/lookup_unicodeloosemd5_hash_test.go b/go/vt/vtgate/vindexes/lookup_unicodeloosemd5_hash_test.go index 373bc374074..989458ccc13 100644 --- a/go/vt/vtgate/vindexes/lookup_unicodeloosemd5_hash_test.go +++ b/go/vt/vtgate/vindexes/lookup_unicodeloosemd5_hash_test.go @@ -37,6 +37,32 @@ const ( hashed40 uint64 = 16576388050845489136 ) +func lookupUnicodeLooseMD5HashCreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "lookup_unicodeloosemd5_hash", + vindexName: "lookup_unicodeloosemd5_hash", + vindexParams: vindexParams, + + expectCost: 20, + expectErr: expectErr, + expectIsUnique: false, + expectNeedsVCursor: true, + expectString: "lookup_unicodeloosemd5_hash", + expectUnknownParams: expectUnknownParams, + } +} + +func TestLookupUnicodeLooseMD5HashCreateVindex(t *testing.T) { + testLookupCreateVindexCommonCases(t, lookupUnicodeLooseMD5HashCreateVindexTestCase) +} + func TestLookupUnicodeLooseMD5HashMap(t *testing.T) { lookup := createLookup(t, "lookup_unicodeloosemd5_hash", false) vc := &vcursor{numRows: 2, keys: []sqltypes.Value{sqltypes.NewUint64(hashed10), sqltypes.NewUint64(hashed20)}} @@ -84,16 +110,17 @@ func TestLookupUnicodeLooseMD5HashMapAutocommit(t *testing.T) { "table": "t", "from": "fromc", "to": "toc", - "hash_from": "true", "autocommit": "true", }) if err != nil { t.Fatal(err) } - lookupNonUnique := vindex.(SingleColumn) + unknownParams := vindex.(ParamValidating).UnknownParams() + require.Empty(t, unknownParams) + lnu := vindex.(SingleColumn) vc := &vcursor{numRows: 2, keys: []sqltypes.Value{sqltypes.NewUint64(hashed10), sqltypes.NewUint64(hashed20)}} - got, err := lookupNonUnique.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(10), sqltypes.NewInt64(20)}) + got, err := lnu.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(10), sqltypes.NewInt64(20)}) require.NoError(t, err) want := []key.Destination{ key.DestinationKeyspaceIDs([][]byte{ @@ -127,10 +154,10 @@ func TestLookupUnicodeLooseMD5HashMapAutocommit(t *testing.T) { } func TestLookupUnicodeLooseMD5HashMapWriteOnly(t *testing.T) { - lookupNonUnique := createLookup(t, "lookup_unicodeloosemd5_hash", true) + lnu := createLookup(t, "lookup_unicodeloosemd5_hash", true) vc := &vcursor{numRows: 0} - got, err := lookupNonUnique.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(10), sqltypes.NewInt64(20)}) + got, err := lnu.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(10), sqltypes.NewInt64(20)}) require.NoError(t, err) want := []key.Destination{ key.DestinationKeyRange{ @@ -146,10 +173,10 @@ func TestLookupUnicodeLooseMD5HashMapWriteOnly(t *testing.T) { } func TestLookupUnicodeLooseMD5HashMapAbsent(t *testing.T) { - lookupNonUnique := createLookup(t, "lookup_unicodeloosemd5_hash", false) + lnu := createLookup(t, "lookup_unicodeloosemd5_hash", false) vc := &vcursor{numRows: 0} - got, err := lookupNonUnique.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(10), sqltypes.NewInt64(20)}) + got, err := lnu.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(10), sqltypes.NewInt64(20)}) require.NoError(t, err) want := []key.Destination{ key.DestinationNone{}, @@ -161,10 +188,10 @@ func TestLookupUnicodeLooseMD5HashMapAbsent(t *testing.T) { } func TestLookupUnicodeLooseMD5HashVerify(t *testing.T) { - lookupNonUnique := createLookup(t, "lookup_unicodeloosemd5_hash", false) + lnu := createLookup(t, "lookup_unicodeloosemd5_hash", false) vc := &vcursor{numRows: 1} - got, err := lookupNonUnique.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(10), sqltypes.NewInt64(20)}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6"), []byte("\x06\xe7\xea\"Βp\x8f")}) + got, err := lnu.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(10), sqltypes.NewInt64(20)}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6"), []byte("\x06\xe7\xea\"Βp\x8f")}) require.NoError(t, err) wantResult := []bool{true, true} if !reflect.DeepEqual(got, wantResult) { @@ -190,7 +217,7 @@ func TestLookupUnicodeLooseMD5HashVerify(t *testing.T) { // Test query fail. vc.mustFail = true - _, err = lookupNonUnique.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1)}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}) + _, err = lnu.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1)}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}) want := "lookup.Verify: execute failed" if err == nil || err.Error() != want { t.Errorf("lookupNonUnique(query fail) err: %v, want %s", err, want) @@ -198,10 +225,10 @@ func TestLookupUnicodeLooseMD5HashVerify(t *testing.T) { vc.mustFail = false // writeOnly true should always yield true. - lookupNonUnique = createLookup(t, "lookup_unicodeloosemd5_hash", true) + lnu = createLookup(t, "lookup_unicodeloosemd5_hash", true) vc.queries = nil - got, err = lookupNonUnique.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(10), sqltypes.NewInt64(20)}, [][]byte{[]byte(""), []byte("")}) + got, err = lnu.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(10), sqltypes.NewInt64(20)}, [][]byte{[]byte(""), []byte("")}) require.NoError(t, err) if vc.queries != nil { t.Errorf("lookup.Verify(writeOnly), queries: %v, want nil", vc.queries) @@ -222,10 +249,12 @@ func TestLookupUnicodeLooseMD5HashVerifyAutocommit(t *testing.T) { if err != nil { t.Fatal(err) } - lookupNonUnique := vindex.(SingleColumn) + unknownParams := vindex.(ParamValidating).UnknownParams() + require.Empty(t, unknownParams) + lnu := vindex.(SingleColumn) vc := &vcursor{numRows: 1} - _, err = lookupNonUnique.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(10), sqltypes.NewInt64(20)}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6"), []byte("\x06\xe7\xea\"Βp\x8f")}) + _, err = lnu.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(10), sqltypes.NewInt64(20)}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6"), []byte("\x06\xe7\xea\"Βp\x8f")}) require.NoError(t, err) wantqueries := []*querypb.BoundQuery{{ @@ -251,10 +280,10 @@ func TestLookupUnicodeLooseMD5HashVerifyAutocommit(t *testing.T) { } func TestLookupUnicodeLooseMD5HashCreate(t *testing.T) { - lookupNonUnique := createLookup(t, "lookup_unicodeloosemd5_hash", false) + lnu := createLookup(t, "lookup_unicodeloosemd5_hash", false) vc := &vcursor{} - err := lookupNonUnique.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(10)}, {sqltypes.NewInt64(20)}}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6"), []byte("\x06\xe7\xea\"Βp\x8f")}, false) + err := lnu.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(10)}, {sqltypes.NewInt64(20)}}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6"), []byte("\x06\xe7\xea\"Βp\x8f")}, false) require.NoError(t, err) wantqueries := []*querypb.BoundQuery{{ @@ -272,7 +301,7 @@ func TestLookupUnicodeLooseMD5HashCreate(t *testing.T) { // With ignore. vc.queries = nil - err = lookupNonUnique.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(10)}, {sqltypes.NewInt64(20)}}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6"), []byte("\x06\xe7\xea\"Βp\x8f")}, true) + err = lnu.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(10)}, {sqltypes.NewInt64(20)}}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6"), []byte("\x06\xe7\xea\"Βp\x8f")}, true) require.NoError(t, err) wantqueries[0].Sql = "insert ignore into t(fromc, toc) values(:fromc_0, :toc_0), (:fromc_1, :toc_1)" @@ -282,7 +311,7 @@ func TestLookupUnicodeLooseMD5HashCreate(t *testing.T) { // Test query fail. vc.mustFail = true - err = lookupNonUnique.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(10)}}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}, false) + err = lnu.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(10)}}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}, false) want := "lookup.Create: execute failed" if err == nil || err.Error() != want { t.Errorf("lookupNonUnique(query fail) err: %v, want %s", err, want) @@ -290,7 +319,7 @@ func TestLookupUnicodeLooseMD5HashCreate(t *testing.T) { vc.mustFail = false // Test column mismatch. - err = lookupNonUnique.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(10), sqltypes.NewInt64(20)}}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}, false) + err = lnu.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(10), sqltypes.NewInt64(20)}}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}, false) want = "lookup.Create: column vindex count does not match the columns in the lookup: 2 vs [fromc]" if err == nil || err.Error() != want { t.Errorf("lookupNonUnique(query fail) err: %v, want %s", err, want) @@ -298,7 +327,7 @@ func TestLookupUnicodeLooseMD5HashCreate(t *testing.T) { } func TestLookupUnicodeLooseMD5HashCreateAutocommit(t *testing.T) { - lookupNonUnique, err := CreateVindex("lookup_unicodeloosemd5_hash", "lookup", map[string]string{ + lnu, err := CreateVindex("lookup_unicodeloosemd5_hash", "lookup", map[string]string{ "table": "t", "from": "from1,from2", "to": "toc", @@ -307,9 +336,11 @@ func TestLookupUnicodeLooseMD5HashCreateAutocommit(t *testing.T) { if err != nil { t.Fatal(err) } + unknownParams := lnu.(ParamValidating).UnknownParams() + require.Empty(t, unknownParams) vc := &vcursor{} - err = lookupNonUnique.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{ + err = lnu.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{ sqltypes.NewInt64(10), sqltypes.NewInt64(20), }, { sqltypes.NewInt64(30), sqltypes.NewInt64(40), @@ -337,7 +368,7 @@ func TestLookupUnicodeLooseMD5HashCreateAutocommit(t *testing.T) { } func TestLookupUnicodeLooseMD5HashCreateMultiShardAutocommit(t *testing.T) { - lookupNonUnique, err := CreateVindex("lookup_unicodeloosemd5_hash", "lookup", map[string]string{ + lnu, err := CreateVindex("lookup_unicodeloosemd5_hash", "lookup", map[string]string{ "table": "t", "from": "from1,from2", "to": "toc", @@ -346,9 +377,11 @@ func TestLookupUnicodeLooseMD5HashCreateMultiShardAutocommit(t *testing.T) { if err != nil { t.Fatal(err) } + unknownParams := lnu.(ParamValidating).UnknownParams() + require.Empty(t, unknownParams) vc := &vcursor{} - err = lookupNonUnique.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{ + err = lnu.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{ sqltypes.NewInt64(10), sqltypes.NewInt64(20), }, { sqltypes.NewInt64(30), sqltypes.NewInt64(40), @@ -376,10 +409,10 @@ func TestLookupUnicodeLooseMD5HashCreateMultiShardAutocommit(t *testing.T) { } func TestLookupUnicodeLooseMD5HashDelete(t *testing.T) { - lookupNonUnique := createLookup(t, "lookup_unicodeloosemd5_hash", false) + lnu := createLookup(t, "lookup_unicodeloosemd5_hash", false) vc := &vcursor{} - err := lookupNonUnique.(Lookup).Delete(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(10)}, {sqltypes.NewInt64(20)}}, []byte("\x16k@\xb4J\xbaK\xd6")) + err := lnu.(Lookup).Delete(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(10)}, {sqltypes.NewInt64(20)}}, []byte("\x16k@\xb4J\xbaK\xd6")) require.NoError(t, err) wantqueries := []*querypb.BoundQuery{{ @@ -401,7 +434,7 @@ func TestLookupUnicodeLooseMD5HashDelete(t *testing.T) { // Test query fail. vc.mustFail = true - err = lookupNonUnique.(Lookup).Delete(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}}, []byte("\x16k@\xb4J\xbaK\xd6")) + err = lnu.(Lookup).Delete(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}}, []byte("\x16k@\xb4J\xbaK\xd6")) want := "lookup.Delete: execute failed" if err == nil || err.Error() != want { t.Errorf("lookupNonUnique(query fail) err: %v, want %s", err, want) @@ -409,7 +442,7 @@ func TestLookupUnicodeLooseMD5HashDelete(t *testing.T) { vc.mustFail = false // Test column count fail. - err = lookupNonUnique.(Lookup).Delete(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}}, []byte("\x16k@\xb4J\xbaK\xd6")) + err = lnu.(Lookup).Delete(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}}, []byte("\x16k@\xb4J\xbaK\xd6")) want = "lookup.Delete: column vindex count does not match the columns in the lookup: 2 vs [fromc]" if err == nil || err.Error() != want { t.Errorf("lookupNonUnique(query fail) err: %v, want %s", err, want) @@ -417,15 +450,19 @@ func TestLookupUnicodeLooseMD5HashDelete(t *testing.T) { } func TestLookupUnicodeLooseMD5HashDeleteAutocommit(t *testing.T) { - lookupNonUnique, _ := CreateVindex("lookup_unicodeloosemd5_hash", "lookup", map[string]string{ + lnu, err := CreateVindex("lookup_unicodeloosemd5_hash", "lookup", map[string]string{ "table": "t", "from": "fromc", "to": "toc", "autocommit": "true", }) + unknownParams := lnu.(ParamValidating).UnknownParams() + require.Empty(t, unknownParams) + require.NoError(t, err) + vc := &vcursor{} - err := lookupNonUnique.(Lookup).Delete(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(10)}, {sqltypes.NewInt64(20)}}, []byte("\x16k@\xb4J\xbaK\xd6")) + err = lnu.(Lookup).Delete(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(10)}, {sqltypes.NewInt64(20)}}, []byte("\x16k@\xb4J\xbaK\xd6")) require.NoError(t, err) wantqueries := []*querypb.BoundQuery(nil) @@ -435,10 +472,10 @@ func TestLookupUnicodeLooseMD5HashDeleteAutocommit(t *testing.T) { } func TestLookupUnicodeLooseMD5HashUpdate(t *testing.T) { - lookupNonUnique := createLookup(t, "lookup_unicodeloosemd5_hash", false) + lnu := createLookup(t, "lookup_unicodeloosemd5_hash", false) vc := &vcursor{} - err := lookupNonUnique.(Lookup).Update(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(10)}, []byte("\x16k@\xb4J\xbaK\xd6"), []sqltypes.Value{sqltypes.NewInt64(20)}) + err := lnu.(Lookup).Update(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(10)}, []byte("\x16k@\xb4J\xbaK\xd6"), []sqltypes.Value{sqltypes.NewInt64(20)}) require.NoError(t, err) wantqueries := []*querypb.BoundQuery{{ diff --git a/go/vt/vtgate/vindexes/lookup_unique_test.go b/go/vt/vtgate/vindexes/lookup_unique_test.go index cc04d5340c3..fd2a62c4d21 100644 --- a/go/vt/vtgate/vindexes/lookup_unique_test.go +++ b/go/vt/vtgate/vindexes/lookup_unique_test.go @@ -21,7 +21,6 @@ import ( "reflect" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" @@ -36,18 +35,21 @@ func TestLookupUniqueNew(t *testing.T) { t.Errorf("Create(lookup, false): %v, want %v", got, want) } - vindex, _ := CreateVindex("lookup_unique", "lookup_unique", map[string]string{ + vindex, err := CreateVindex("lookup_unique", "lookup_unique", map[string]string{ "table": "t", "from": "fromc", "to": "toc", "write_only": "true", }) + require.NoError(t, err) + require.Empty(t, vindex.(ParamValidating).UnknownParams()) + l = vindex.(SingleColumn) if want, got := l.(*LookupUnique).writeOnly, true; got != want { t.Errorf("Create(lookup, false): %v, want %v", got, want) } - _, err := CreateVindex("lookup_unique", "lookup_unique", map[string]string{ + _, err = CreateVindex("lookup_unique", "lookup_unique", map[string]string{ "table": "t", "from": "fromc", "to": "toc", @@ -59,13 +61,6 @@ func TestLookupUniqueNew(t *testing.T) { } } -func TestLookupUniqueInfo(t *testing.T) { - lookupUnique := createLookup(t, "lookup_unique", false) - assert.Equal(t, 10, lookupUnique.Cost()) - assert.Equal(t, "lookup_unique", lookupUnique.String()) - assert.True(t, lookupUnique.IsUnique()) -} - func TestLookupUniqueMap(t *testing.T) { lookupUnique := createLookup(t, "lookup_unique", false) vc := &vcursor{numRows: 1} @@ -163,6 +158,7 @@ func TestLookupUniqueCreate(t *testing.T) { if err != nil { t.Fatal(err) } + require.Empty(t, lookupUnique.(ParamValidating).UnknownParams()) vc := &vcursor{} err = lookupUnique.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}}, [][]byte{[]byte("test")}, false /* ignoreMode */) diff --git a/go/vt/vtgate/vindexes/main_test.go b/go/vt/vtgate/vindexes/main_test.go new file mode 100644 index 00000000000..226ecfff431 --- /dev/null +++ b/go/vt/vtgate/vindexes/main_test.go @@ -0,0 +1,94 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vindexes + +import ( + "sort" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type createVindexTestCase struct { + testName string + + vindexType string + vindexName string + vindexParams map[string]string + + expectCost int + expectErr error + expectIsUnique bool + expectNeedsVCursor bool + expectString string + expectUnknownParams []string +} + +func assertEqualVtError(t *testing.T, expected, actual error) { + // vterrors.Errorf returns a struct containing a stacktrace, which fails + // assert.EqualError since the stacktrace would be guaranteed to be different. + // so just check the error message + if expected == nil { + assert.NoError(t, actual) + } else { + assert.EqualError(t, actual, expected.Error()) + } +} + +func testCreateVindex( + t *testing.T, + tc createVindexTestCase, + fns ...func(createVindexTestCase, Vindex, []error, error), +) { + t.Run(tc.testName, func(t *testing.T) { + vdx, err := CreateVindex( + tc.vindexType, + tc.vindexName, + tc.vindexParams, + ) + assertEqualVtError(t, tc.expectErr, err) + if err == nil { + assert.NotNil(t, vdx) + } + paramValidating, ok := vdx.(ParamValidating) + var unknownParams []string + if ok { + unknownParams = paramValidating.UnknownParams() + } + require.Equal(t, len(tc.expectUnknownParams), len(unknownParams)) + sort.Strings(tc.expectUnknownParams) + sort.Strings(unknownParams) + require.Equal(t, tc.expectUnknownParams, unknownParams) + if vdx != nil { + assert.Equal(t, tc.expectString, vdx.String()) + assert.Equal(t, tc.expectCost, vdx.Cost()) + assert.Equal(t, tc.expectIsUnique, vdx.IsUnique()) + assert.Equal(t, tc.expectNeedsVCursor, vdx.NeedsVCursor()) + } + }) +} + +func testCreateVindexes( + t *testing.T, + tcs []createVindexTestCase, + fns ...func(createVindexTestCase, Vindex, []error, error), +) { + for _, tc := range tcs { + testCreateVindex(t, tc, fns...) + } +} diff --git a/go/vt/vtgate/vindexes/multicol.go b/go/vt/vtgate/vindexes/multicol.go index 04d84fadb3a..ee53ea5bb60 100644 --- a/go/vt/vtgate/vindexes/multicol.go +++ b/go/vt/vtgate/vindexes/multicol.go @@ -29,7 +29,9 @@ import ( "vitess.io/vitess/go/vt/vterrors" ) -var _ MultiColumn = (*MultiCol)(nil) +var ( + _ MultiColumn = (*MultiCol)(nil) +) type MultiCol struct { name string @@ -46,8 +48,8 @@ const ( defaultVindex = "hash" ) -// NewMultiCol creates a new MultiCol. -func NewMultiCol(name string, m map[string]string) (Vindex, error) { +// newMultiCol creates a new MultiCol. +func newMultiCol(name string, m map[string]string) (Vindex, error) { colCount, err := getColumnCount(m) if err != nil { return nil, err @@ -150,7 +152,7 @@ func (m *MultiCol) mapKsid(colValues []sqltypes.Value) (bool, []byte, error) { } func init() { - Register("multicol", NewMultiCol) + Register("multicol", newMultiCol) } func getColumnVindex(m map[string]string, colCount int) (map[int]Hashing, int, error) { @@ -164,6 +166,15 @@ func getColumnVindex(m map[string]string, colCount int) (map[int]Hashing, int, e } columnVdx := make(map[int]Hashing, colCount) vindexCost := 0 + subParams := make(map[string]string) + for k, v := range m { + if k == paramColumnCount || + k == paramColumnBytes || + k == paramColumnVindex { + continue + } + subParams[k] = v + } for i := 0; i < colCount; i++ { selVdx := defaultVindex if len(colVdxs) > i { @@ -173,7 +184,7 @@ func getColumnVindex(m map[string]string, colCount int) (map[int]Hashing, int, e } } // TODO: reuse vindex. avoid creating same vindex. - vdx, err := CreateVindex(selVdx, selVdx, m) + vdx, err := CreateVindex(selVdx, selVdx, subParams) if err != nil { return nil, 0, err } diff --git a/go/vt/vtgate/vindexes/multicol_test.go b/go/vt/vtgate/vindexes/multicol_test.go index ce2e57dcb0e..e4e2098dd1b 100644 --- a/go/vt/vtgate/vindexes/multicol_test.go +++ b/go/vt/vtgate/vindexes/multicol_test.go @@ -23,29 +23,199 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +func multicolCreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectCost int, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "multicol", + vindexName: "multicol", + vindexParams: vindexParams, + + expectCost: expectCost, + expectErr: expectErr, + expectIsUnique: true, + expectNeedsVCursor: false, + expectString: "multicol", + expectUnknownParams: expectUnknownParams, + } +} + +func TestMulticolCreateVindex(t *testing.T) { + cases := []createVindexTestCase{ + multicolCreateVindexTestCase( + "column count 0 invalid", + map[string]string{ + "column_count": "0", + }, + 0, + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "number of columns should be between 1 and 8 in the parameter 'column_count'"), + nil, + ), + multicolCreateVindexTestCase( + "column count 3 ok", + map[string]string{ + "column_count": "3", + }, + 3, + nil, + nil, + ), + multicolCreateVindexTestCase( + "column count 9 invalid", + map[string]string{ + "column_count": "9", + }, + 0, + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "number of columns should be between 1 and 8 in the parameter 'column_count'"), + nil, + ), + multicolCreateVindexTestCase( + "column bytes ok", + map[string]string{ + "column_count": "3", + "column_bytes": "1,2,3", + }, + 3, + nil, + nil, + ), + multicolCreateVindexTestCase( + "column bytes more than column count invalid", + map[string]string{ + "column_count": "3", + "column_bytes": "1,2,3,4", + }, + 0, + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "number of column bytes provided are more than column count in the parameter 'column_bytes'"), + nil, + ), + multicolCreateVindexTestCase( + "column bytes exceeds keyspace id length", + map[string]string{ + "column_count": "3", + "column_bytes": "100,200,300", + }, + 3, + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "column bytes count exceeds the keyspace id length (total bytes count cannot exceed 8 bytes) in the parameter 'column_bytes'"), + nil, + ), + multicolCreateVindexTestCase( + "column vindex ok", + map[string]string{ + "column_count": "3", + "column_bytes": "1,2,3", + "column_vindex": "binary,binary,binary", + }, + 0, + nil, + nil, + ), + multicolCreateVindexTestCase( + "column vindex more than column count", + map[string]string{ + "column_count": "3", + "column_bytes": "1,2,3", + "column_vindex": "binary,binary,binary,binary", + }, + 0, + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "number of vindex function provided are more than column count in the parameter 'column_vindex'"), + nil, + ), + multicolCreateVindexTestCase( + "column vindex non-hashing invalid", + map[string]string{ + "column_count": "3", + "column_bytes": "1,2,3", + "column_vindex": "binary,binary,null", + }, + 0, + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "multicol vindex supports vindexes that exports hashing function, are unique and are non-lookup vindex, passed vindex 'null' is invalid"), + nil, + ), + multicolCreateVindexTestCase( + "column vindex non-unique invalid", + map[string]string{ + "column_count": "3", + "column_bytes": "1,2,3", + "column_vindex": "binary,binary,cfc", + }, + 0, + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "multicol vindex supports vindexes that exports hashing function, are unique and are non-lookup vindex, passed vindex 'cfc' is invalid"), + nil, + ), + multicolCreateVindexTestCase( + "column vindex lookup or needs vcursor invalid", + map[string]string{ + "column_count": "3", + "column_bytes": "1,2,3", + "column_vindex": "binary,binary,lookup", + }, + 0, + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "multicol vindex supports vindexes that exports hashing function, are unique and are non-lookup vindex, passed vindex 'lookup' is invalid"), + nil, + ), + multicolCreateVindexTestCase( + "no params", + nil, + 0, + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "number of columns not provided in the parameter 'column_count'"), + nil, + ), + multicolCreateVindexTestCase( + "empty params", + map[string]string{}, + 0, + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "number of columns not provided in the parameter 'column_count'"), + nil, + ), + multicolCreateVindexTestCase( + "allow unknown params", + map[string]string{ + "column_count": "1", + "hello": "world", + }, + 1, + nil, + nil, + ), + } + + testCreateVindexes(t, cases) +} + func TestMultiColMisc(t *testing.T) { - vindex, err := CreateVindex("multicol", "multicol", map[string]string{ + vindex, err := CreateVindex("multicol", "multicol_misc", map[string]string{ "column_count": "3", }) require.NoError(t, err) + _, ok := vindex.(ParamValidating) + require.False(t, ok) multiColVdx, isMultiColVdx := vindex.(*MultiCol) assert.True(t, isMultiColVdx) assert.Equal(t, 3, multiColVdx.Cost()) - assert.Equal(t, "multicol", multiColVdx.String()) + assert.Equal(t, "multicol_misc", multiColVdx.String()) assert.True(t, multiColVdx.IsUnique()) assert.False(t, multiColVdx.NeedsVCursor()) assert.True(t, multiColVdx.PartialVindex()) } func TestMultiColMap(t *testing.T) { - vindex, err := CreateVindex("multicol", "multicol", map[string]string{ + vindex, err := CreateVindex("multicol", "multicol_map", map[string]string{ "column_count": "3", }) require.NoError(t, err) diff --git a/go/vt/vtgate/vindexes/null.go b/go/vt/vtgate/vindexes/null.go index 3e8085b7501..58435643ea7 100644 --- a/go/vt/vtgate/vindexes/null.go +++ b/go/vt/vtgate/vindexes/null.go @@ -25,8 +25,10 @@ import ( ) var ( - _ Vindex = (*Null)(nil) - nullksid = []byte{0} + _ Vindex = (*Null)(nil) + _ ParamValidating = (*Null)(nil) + + nullksid = []byte{0} ) // Null defines a vindex that always return 0. It's Unique and @@ -36,12 +38,16 @@ var ( // Unlike other vindexes, this one will work even for NULL input values. This // will allow you to keep MySQL auto-inc columns unchanged. type Null struct { - name string + name string + unknownParams []string } -// NewNull creates a new Null. -func NewNull(name string, m map[string]string) (Vindex, error) { - return &Null{name: name}, nil +// newNull creates a new Null. +func newNull(name string, m map[string]string) (Vindex, error) { + return &Null{ + name: name, + unknownParams: FindUnknownParams(m, nil), + }, nil } // String returns the name of the vindex. @@ -82,6 +88,11 @@ func (vind *Null) Verify(ctx context.Context, vcursor VCursor, ids []sqltypes.Va return out, nil } +// UnknownParams implements the ParamValidating interface. +func (vind *Null) UnknownParams() []string { + return vind.unknownParams +} + func init() { - Register("null", NewNull) + Register("null", newNull) } diff --git a/go/vt/vtgate/vindexes/null_test.go b/go/vt/vtgate/vindexes/null_test.go index 87baea46ee3..03b97fe651b 100644 --- a/go/vt/vtgate/vindexes/null_test.go +++ b/go/vt/vtgate/vindexes/null_test.go @@ -21,7 +21,6 @@ import ( "reflect" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" @@ -31,18 +30,62 @@ import ( var null SingleColumn func init() { - hv, err := CreateVindex("null", "nn", map[string]string{"Table": "t", "Column": "c"}) + hv, err := CreateVindex("null", "nn", map[string]string{}) if err != nil { panic(err) } + unknownParams := hv.(ParamValidating).UnknownParams() + if len(unknownParams) > 0 { + panic("null test init: expected 0 unknown params") + } null = hv.(SingleColumn) } -func TestNullInfo(t *testing.T) { - assert.Equal(t, 100, null.Cost()) - assert.Equal(t, "nn", null.String()) - assert.True(t, null.IsUnique()) - assert.False(t, null.NeedsVCursor()) +func nullCreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "null", + vindexName: "null", + vindexParams: vindexParams, + + expectCost: 100, + expectErr: expectErr, + expectIsUnique: true, + expectNeedsVCursor: false, + expectString: "null", + expectUnknownParams: expectUnknownParams, + } +} + +func TestNullCreateVindex(t *testing.T) { + cases := []createVindexTestCase{ + nullCreateVindexTestCase( + "no params", + nil, + nil, + nil, + ), + nullCreateVindexTestCase( + "empty params", + map[string]string{}, + nil, + nil, + ), + nullCreateVindexTestCase( + "unknown params", + map[string]string{"hello": "world"}, + nil, + []string{"hello"}, + ), + } + + testCreateVindexes(t, cases) } func TestNullMap(t *testing.T) { diff --git a/go/vt/vtgate/vindexes/numeric.go b/go/vt/vtgate/vindexes/numeric.go index e2f8b512fb9..091807ec2cc 100644 --- a/go/vt/vtgate/vindexes/numeric.go +++ b/go/vt/vtgate/vindexes/numeric.go @@ -22,27 +22,30 @@ import ( "encoding/binary" "fmt" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" ) var ( - _ SingleColumn = (*Numeric)(nil) - _ Reversible = (*Numeric)(nil) - _ Hashing = (*Numeric)(nil) + _ SingleColumn = (*Numeric)(nil) + _ Reversible = (*Numeric)(nil) + _ Hashing = (*Numeric)(nil) + _ ParamValidating = (*Numeric)(nil) ) // Numeric defines a bit-pattern mapping of a uint64 to the KeyspaceId. // It's Unique and Reversible. type Numeric struct { - name string + name string + unknownParams []string } -// NewNumeric creates a Numeric vindex. -func NewNumeric(name string, _ map[string]string) (Vindex, error) { - return &Numeric{name: name}, nil +// newNumeric creates a Numeric vindex. +func newNumeric(name string, m map[string]string) (Vindex, error) { + return &Numeric{ + name: name, + unknownParams: FindUnknownParams(m, nil), + }, nil } // String returns the name of the vindex. @@ -105,8 +108,13 @@ func (*Numeric) ReverseMap(_ VCursor, ksids [][]byte) ([]sqltypes.Value, error) return reverseIds, nil } +// UnknownParams implements the ParamValidating interface. +func (vind *Numeric) UnknownParams() []string { + return vind.unknownParams +} + func (*Numeric) Hash(id sqltypes.Value) ([]byte, error) { - num, err := evalengine.ToUint64(id) + num, err := id.ToCastUint64() if err != nil { return nil, err } @@ -116,5 +124,5 @@ func (*Numeric) Hash(id sqltypes.Value) ([]byte, error) { } func init() { - Register("numeric", NewNumeric) + Register("numeric", newNumeric) } diff --git a/go/vt/vtgate/vindexes/numeric_static_map.go b/go/vt/vtgate/vindexes/numeric_static_map.go index 11a64e98ce0..f97016d915f 100644 --- a/go/vt/vtgate/vindexes/numeric_static_map.go +++ b/go/vt/vtgate/vindexes/numeric_static_map.go @@ -21,19 +21,31 @@ import ( "context" "encoding/binary" "encoding/json" - "errors" "os" "strconv" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +const ( + numericStaticMapParamJSON = "json" + numericStaticMapParamJSONPath = "json_path" + numericStaticMapParamFallbackType = "fallback_type" ) var ( - _ SingleColumn = (*NumericStaticMap)(nil) - _ Hashing = (*NumericStaticMap)(nil) + _ SingleColumn = (*NumericStaticMap)(nil) + _ Hashing = (*NumericStaticMap)(nil) + _ ParamValidating = (*NumericStaticMap)(nil) + + numericStaticMapParams = []string{ + numericStaticMapParamJSON, + numericStaticMapParamJSONPath, + numericStaticMapParamFallbackType, + } ) // NumericLookupTable stores the mapping of keys. @@ -42,26 +54,27 @@ type NumericLookupTable map[uint64]uint64 // NumericStaticMap is similar to vindex Numeric but first attempts a lookup via // a JSON file. type NumericStaticMap struct { - name string - hashVdx Hashing - lookup NumericLookupTable + name string + hashVdx Hashing + lookup NumericLookupTable + unknownParams []string } func init() { - Register("numeric_static_map", NewNumericStaticMap) + Register("numeric_static_map", newNumericStaticMap) } -// NewNumericStaticMap creates a NumericStaticMap vindex. -func NewNumericStaticMap(name string, params map[string]string) (Vindex, error) { - jsonStr, jsok := params["json"] - jsonPath, jpok := params["json_path"] +// newNumericStaticMap creates a NumericStaticMap vindex. +func newNumericStaticMap(name string, params map[string]string) (Vindex, error) { + jsonStr, jsok := params[numericStaticMapParamJSON] + jsonPath, jpok := params[numericStaticMapParamJSONPath] if !jsok && !jpok { - return nil, errors.New("NumericStaticMap: Could not find either `json_path` params in vschema") + return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "NumericStaticMap: Could not find either `json_path` or `json` params in vschema") } if jsok && jpok { - return nil, errors.New("NumericStaticMap: Found both `json` and `json_path` params in vschema") + return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "NumericStaticMap: Found both `json` and `json_path` params in vschema") } var err error @@ -83,19 +96,19 @@ func NewNumericStaticMap(name string, params map[string]string) (Vindex, error) var hashVdx Hashing - if s, ok := params["fallback_type"]; ok { + if s, ok := params[numericStaticMapParamFallbackType]; ok { vindex, err := CreateVindex(s, name+"_hash", map[string]string{}) if err != nil { return nil, err } hashVdx, _ = vindex.(Hashing) // We know this will not fail - } return &NumericStaticMap{ - hashVdx: hashVdx, - lookup: lt, - name: name, + hashVdx: hashVdx, + lookup: lt, + name: name, + unknownParams: FindUnknownParams(params, numericStaticMapParams), }, nil } @@ -147,7 +160,7 @@ func (vind *NumericStaticMap) Map(ctx context.Context, vcursor VCursor, ids []sq } func (vind *NumericStaticMap) Hash(id sqltypes.Value) ([]byte, error) { - num, err := evalengine.ToUint64(id) + num, err := id.ToCastUint64() if err != nil { return nil, err } @@ -166,6 +179,11 @@ func (vind *NumericStaticMap) Hash(id sqltypes.Value) ([]byte, error) { return keybytes[:], nil } +// UnknownParams implements the ParamValidating interface. +func (vind *NumericStaticMap) UnknownParams() []string { + return vind.unknownParams +} + func loadNumericLookupTable(path string) (NumericLookupTable, error) { data, err := os.ReadFile(path) if err != nil { diff --git a/go/vt/vtgate/vindexes/numeric_static_map_test.go b/go/vt/vtgate/vindexes/numeric_static_map_test.go index 05815bd73b1..7a373060f16 100644 --- a/go/vt/vtgate/vindexes/numeric_static_map_test.go +++ b/go/vt/vtgate/vindexes/numeric_static_map_test.go @@ -18,6 +18,7 @@ package vindexes import ( "context" + "errors" "reflect" "testing" @@ -26,6 +27,8 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" ) // createVindex creates the "numeric_static_map" vindex object which is used by @@ -40,23 +43,105 @@ func createVindex() (SingleColumn, error) { return vindex.(SingleColumn), nil } -// createVindexWithParams creates the "numeric_static_map" vindex object with the -// provided params. -func createVindexWithParams(params map[string]string) (SingleColumn, error) { - vindex, err := CreateVindex("numeric_static_map", "numericStaticMapWithParams", params) - if err != nil { - return nil, err +func numericStaticMapCreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "numeric_static_map", + vindexName: "numeric_static_map", + vindexParams: vindexParams, + + expectCost: 1, + expectErr: expectErr, + expectIsUnique: true, + expectNeedsVCursor: false, + expectString: "numeric_static_map", + expectUnknownParams: expectUnknownParams, } - return vindex.(SingleColumn), nil } -func TestNumericStaticMapInfo(t *testing.T) { - numericStaticMap, err := createVindex() - require.NoError(t, err) - assert.Equal(t, 1, numericStaticMap.Cost()) - assert.Equal(t, "numericStaticMap", numericStaticMap.String()) - assert.True(t, numericStaticMap.IsUnique()) - assert.False(t, numericStaticMap.NeedsVCursor()) +func TestNumericStaticMapCreateVindex(t *testing.T) { + cases := []createVindexTestCase{ + numericStaticMapCreateVindexTestCase( + "no params invalid, require either json_path or json", + nil, + vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "NumericStaticMap: Could not find either `json_path` or `json` params in vschema"), + nil, + ), + numericStaticMapCreateVindexTestCase( + "empty params invalid, require either json_path or json", + map[string]string{}, + vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "NumericStaticMap: Could not find either `json_path` or `json` params in vschema"), + nil, + ), + numericStaticMapCreateVindexTestCase( + "json_path and json mutually exclusive", + map[string]string{ + "json": "{}", + "json_path": "/path/to/map.json", + }, + vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "NumericStaticMap: Found both `json` and `json_path` params in vschema"), + nil, + ), + numericStaticMapCreateVindexTestCase( + "json_path must exist", + map[string]string{ + "json_path": "/path/to/map.json", + }, + errors.New("open /path/to/map.json: no such file or directory"), + nil, + ), + numericStaticMapCreateVindexTestCase( + "json ok", + map[string]string{ + "json": "{}", + }, + nil, + nil, + ), + numericStaticMapCreateVindexTestCase( + "json must be valid syntax", + map[string]string{ + "json": "{]", + }, + errors.New("invalid character ']' looking for beginning of object key string"), + nil, + ), + numericStaticMapCreateVindexTestCase( + "fallback_type ok", + map[string]string{ + "json": "{}", + "fallback_type": "binary", + }, + nil, + nil, + ), + numericStaticMapCreateVindexTestCase( + "fallback_type must be valid vindex type", + map[string]string{ + "json": "{}", + "fallback_type": "not_found", + }, + vterrors.Errorf(vtrpc.Code_NOT_FOUND, "vindexType %q not found", "not_found"), + nil, + ), + numericStaticMapCreateVindexTestCase( + "unknown params", + map[string]string{ + "json": "{}", + "hello": "world", + }, + nil, + []string{"hello"}, + ), + } + + testCreateVindexes(t, cases) } func TestNumericStaticMapMap(t *testing.T) { @@ -111,43 +196,66 @@ func TestNumericStaticMapVerify(t *testing.T) { // Failure test _, err = numericStaticMap.Verify(context.Background(), nil, []sqltypes.Value{sqltypes.NewVarBinary("aa")}, [][]byte{nil}) - require.EqualError(t, err, "could not parse value: 'aa'") + require.EqualError(t, err, "cannot parse uint64 from \"aa\"") } func TestNumericStaticMapWithJsonVdx(t *testing.T) { - withFallbackVdx, err := createVindexWithParams(map[string]string{ - "json": "{\"1\":2,\"3\":4,\"5\":6}", - }) + withFallbackVdx, err := CreateVindex( + "numeric_static_map", + t.Name(), + map[string]string{ + "json": "{\"1\":2,\"3\":4,\"5\":6}", + }, + ) require.NoError(t, err) + require.Empty(t, withFallbackVdx.(ParamValidating).UnknownParams()) assert.Equal(t, 1, withFallbackVdx.Cost()) - assert.Equal(t, "numericStaticMapWithParams", withFallbackVdx.String()) + assert.Equal(t, t.Name(), withFallbackVdx.String()) assert.True(t, withFallbackVdx.IsUnique()) assert.False(t, withFallbackVdx.NeedsVCursor()) // Bad format tests - _, err = createVindexWithParams(map[string]string{ - "json": "{\"1\":2,\"3\":4,\"5\":6:8,\"10\":11}", - }) + _, err = CreateVindex( + "numeric_static_map", + t.Name(), + map[string]string{ + "json": "{\"1\":2,\"3\":4,\"5\":6:8,\"10\":11}", + }, + ) require.EqualError(t, err, "invalid character ':' after object key:value pair") // Letters in key or value not allowed - _, err = createVindexWithParams(map[string]string{"json": "{\"1\":a}"}) + _, err = CreateVindex( + "numeric_static_map", + t.Name(), + map[string]string{"json": "{\"1\":a}"}, + ) require.EqualError(t, err, "invalid character 'a' looking for beginning of value") - _, err = createVindexWithParams(map[string]string{"json": "{\"a\":1}"}) + _, err = CreateVindex( + "numeric_static_map", + t.Name(), + map[string]string{"json": "{\"a\":1}"}, + ) require.EqualError(t, err, "strconv.ParseUint: parsing \"a\": invalid syntax") } // Test mapping of vindex, both for specified map keys and underlying xxhash func TestNumericStaticMapWithFallback(t *testing.T) { - mapWithFallbackVdx, err := createVindexWithParams(map[string]string{ - "json": "{\"1\":2,\"3\":4,\"4\":5,\"5\":6,\"6\":7,\"7\":8,\"8\":9,\"10\":18446744073709551615}", - "fallback_type": "xxhash", - }) + mapWithFallbackVdx, err := CreateVindex( + "numeric_static_map", + t.Name(), + map[string]string{ + "json": "{\"1\":2,\"3\":4,\"4\":5,\"5\":6,\"6\":7,\"7\":8,\"8\":9,\"10\":18446744073709551615}", + "fallback_type": "xxhash", + }, + ) if err != nil { t.Fatalf("failed to create vindex: %v", err) } - got, err := mapWithFallbackVdx.Map(context.Background(), nil, []sqltypes.Value{ + require.Empty(t, mapWithFallbackVdx.(ParamValidating).UnknownParams()) + singleCol := mapWithFallbackVdx.(SingleColumn) + got, err := singleCol.Map(context.Background(), nil, []sqltypes.Value{ sqltypes.NewInt64(1), sqltypes.NewInt64(2), sqltypes.NewInt64(3), @@ -183,14 +291,20 @@ func TestNumericStaticMapWithFallback(t *testing.T) { } func TestNumericStaticMapWithFallbackVerify(t *testing.T) { - mapWithFallbackVdx, err := createVindexWithParams(map[string]string{ - "json": "{\"1\":2,\"3\":4,\"4\":5,\"5\":6,\"6\":7,\"7\":8,\"8\":9,\"10\":18446744073709551615}", - "fallback_type": "xxhash", - }) + mapWithFallbackVdx, err := CreateVindex( + "numeric_static_map", + t.Name(), + map[string]string{ + "json": "{\"1\":2,\"3\":4,\"4\":5,\"5\":6,\"6\":7,\"7\":8,\"8\":9,\"10\":18446744073709551615}", + "fallback_type": "xxhash", + }, + ) if err != nil { t.Fatalf("failed to create vindex: %v", err) } - got, err := mapWithFallbackVdx.Verify(context.Background(), nil, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2), sqltypes.NewInt64(11), sqltypes.NewInt64(10)}, [][]byte{[]byte("\x00\x00\x00\x00\x00\x00\x00\x02"), []byte("\x8b\x59\x80\x16\x62\xb5\x21\x60"), []byte("\xff\xff\xff\xff\xff\xff\xff\xff"), []byte("\xff\xff\xff\xff\xff\xff\xff\xff")}) + require.Empty(t, mapWithFallbackVdx.(ParamValidating).UnknownParams()) + singleCol := mapWithFallbackVdx.(SingleColumn) + got, err := singleCol.Verify(context.Background(), nil, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2), sqltypes.NewInt64(11), sqltypes.NewInt64(10)}, [][]byte{[]byte("\x00\x00\x00\x00\x00\x00\x00\x02"), []byte("\x8b\x59\x80\x16\x62\xb5\x21\x60"), []byte("\xff\xff\xff\xff\xff\xff\xff\xff"), []byte("\xff\xff\xff\xff\xff\xff\xff\xff")}) require.NoError(t, err) want := []bool{true, true, false, true} if !reflect.DeepEqual(got, want) { @@ -198,6 +312,6 @@ func TestNumericStaticMapWithFallbackVerify(t *testing.T) { } // Failure test - _, err = mapWithFallbackVdx.Verify(context.Background(), nil, []sqltypes.Value{sqltypes.NewVarBinary("aa")}, [][]byte{nil}) - require.EqualError(t, err, "could not parse value: 'aa'") + _, err = singleCol.Verify(context.Background(), nil, []sqltypes.Value{sqltypes.NewVarBinary("aa")}, [][]byte{nil}) + require.EqualError(t, err, "cannot parse uint64 from \"aa\"") } diff --git a/go/vt/vtgate/vindexes/numeric_test.go b/go/vt/vtgate/vindexes/numeric_test.go index 5d317d3a161..612c0f3c5e7 100644 --- a/go/vt/vtgate/vindexes/numeric_test.go +++ b/go/vt/vtgate/vindexes/numeric_test.go @@ -21,7 +21,6 @@ import ( "reflect" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" @@ -31,15 +30,58 @@ import ( var numeric SingleColumn func init() { - vindex, _ := CreateVindex("numeric", "num", nil) + vindex, err := CreateVindex("numeric", "num", nil) + if err != nil { + panic(err) + } numeric = vindex.(SingleColumn) } -func TestNumericInfo(t *testing.T) { - assert.Equal(t, 0, numeric.Cost()) - assert.Equal(t, "num", numeric.String()) - assert.True(t, numeric.IsUnique()) - assert.False(t, numeric.NeedsVCursor()) +func numericCreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "numeric", + vindexName: "numeric", + vindexParams: vindexParams, + + expectCost: 0, + expectErr: expectErr, + expectIsUnique: true, + expectNeedsVCursor: false, + expectString: "numeric", + expectUnknownParams: expectUnknownParams, + } +} + +func TestNumericCreateVindex(t *testing.T) { + cases := []createVindexTestCase{ + numericCreateVindexTestCase( + "no params", + nil, + nil, + nil, + ), + numericCreateVindexTestCase( + "empty params", + map[string]string{}, + nil, + nil, + ), + numericCreateVindexTestCase( + "unknown params", + map[string]string{"hello": "world"}, + nil, + []string{"hello"}, + ), + } + + testCreateVindexes(t, cases) } func TestNumericMap(t *testing.T) { @@ -85,7 +127,7 @@ func TestNumericVerify(t *testing.T) { // Failure test _, err = numeric.Verify(context.Background(), nil, []sqltypes.Value{sqltypes.NewVarBinary("aa")}, [][]byte{nil}) - require.EqualError(t, err, "could not parse value: 'aa'") + require.EqualError(t, err, "cannot parse uint64 from \"aa\"") } func TestNumericReverseMap(t *testing.T) { diff --git a/go/vt/vtgate/vindexes/region_experimental.go b/go/vt/vtgate/vindexes/region_experimental.go index 14347e9d9a4..c116e9bd84d 100644 --- a/go/vt/vtgate/vindexes/region_experimental.go +++ b/go/vt/vtgate/vindexes/region_experimental.go @@ -22,18 +22,27 @@ import ( "encoding/binary" "fmt" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +const ( + regionExperimentalParamRegionBytes = "region_bytes" ) var ( - _ MultiColumn = (*RegionExperimental)(nil) + _ MultiColumn = (*RegionExperimental)(nil) + _ ParamValidating = (*RegionExperimental)(nil) + + regionExperimentalParams = []string{ + regionExperimentalParamRegionBytes, + } ) func init() { - Register("region_experimental", NewRegionExperimental) + Register("region_experimental", newRegionExperimental) } // RegionExperimental is a multi-column unique vindex. The first column is prefixed @@ -41,17 +50,18 @@ func init() { // RegionExperimental can be used for geo-partitioning because the first column can denote a region, // and its value will dictate the shard for that region. type RegionExperimental struct { - name string - regionBytes int + name string + regionBytes int + unknownParams []string } -// NewRegionExperimental creates a RegionExperimental vindex. +// newRegionExperimental creates a RegionExperimental vindex. // The supplied map requires all the fields of "consistent_lookup_unique". // Additionally, it requires a region_bytes argument whose value can be "1", or "2". -func NewRegionExperimental(name string, m map[string]string) (Vindex, error) { - rbs, ok := m["region_bytes"] +func newRegionExperimental(name string, m map[string]string) (Vindex, error) { + rbs, ok := m[regionExperimentalParamRegionBytes] if !ok { - return nil, fmt.Errorf("region_experimental missing region_bytes param") + return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, fmt.Sprintf("region_experimental missing %s param", regionExperimentalParamRegionBytes)) } var rb int switch rbs { @@ -60,11 +70,12 @@ func NewRegionExperimental(name string, m map[string]string) (Vindex, error) { case "2": rb = 2 default: - return nil, fmt.Errorf("region_bits must be 1 or 2: %v", rbs) + return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "region_bytes must be 1 or 2: %v", rbs) } return &RegionExperimental{ - name: name, - regionBytes: rb, + name: name, + regionBytes: rb, + unknownParams: FindUnknownParams(m, regionExperimentalParams), }, nil } @@ -97,7 +108,7 @@ func (ge *RegionExperimental) Map(ctx context.Context, vcursor VCursor, rowsColV continue } // Compute region prefix. - rn, err := evalengine.ToUint64(row[0]) + rn, err := row[0].ToCastUint64() if err != nil { destinations = append(destinations, key.DestinationNone{}) continue @@ -113,7 +124,7 @@ func (ge *RegionExperimental) Map(ctx context.Context, vcursor VCursor, rowsColV dest := r if len(row) == 2 { // Compute hash. - hn, err := evalengine.ToUint64(row[1]) + hn, err := row[1].ToCastUint64() if err != nil { destinations = append(destinations, key.DestinationNone{}) continue @@ -145,3 +156,8 @@ func (ge *RegionExperimental) Verify(ctx context.Context, vcursor VCursor, rowsC func (ge *RegionExperimental) PartialVindex() bool { return true } + +// UnknownParams implements the ParamValidating interface. +func (ge *RegionExperimental) UnknownParams() []string { + return ge.unknownParams +} diff --git a/go/vt/vtgate/vindexes/region_experimental_test.go b/go/vt/vtgate/vindexes/region_experimental_test.go index dde9a2f6ea9..56b16b8f3ee 100644 --- a/go/vt/vtgate/vindexes/region_experimental_test.go +++ b/go/vt/vtgate/vindexes/region_experimental_test.go @@ -22,6 +22,8 @@ import ( "testing" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -30,6 +32,88 @@ import ( "vitess.io/vitess/go/vt/key" ) +func regionExperimentalCreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "region_experimental", + vindexName: "region_experimental", + vindexParams: vindexParams, + + expectCost: 1, + expectErr: expectErr, + expectIsUnique: true, + expectNeedsVCursor: false, + expectString: "region_experimental", + expectUnknownParams: expectUnknownParams, + } +} + +func TestRegionExperimentalCreateVindex(t *testing.T) { + cases := []createVindexTestCase{ + regionExperimentalCreateVindexTestCase( + "no params invalid: region_bytes required", + nil, + vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "region_experimental missing region_bytes param"), + nil, + ), + regionExperimentalCreateVindexTestCase( + "empty params invalid: region_bytes required", + map[string]string{}, + vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "region_experimental missing region_bytes param"), + nil, + ), + regionExperimentalCreateVindexTestCase( + "region_bytes may not be 0", + map[string]string{ + "region_bytes": "0", + }, + vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "region_bytes must be 1 or 2: 0"), + nil, + ), + regionExperimentalCreateVindexTestCase( + "region_bytes may be 1", + map[string]string{ + "region_bytes": "1", + }, + nil, + nil, + ), + regionExperimentalCreateVindexTestCase( + "region_bytes may be 2", + map[string]string{ + "region_bytes": "2", + }, + nil, + nil, + ), + regionExperimentalCreateVindexTestCase( + "region_bytes may not be 3", + map[string]string{ + "region_bytes": "3", + }, + vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "region_bytes must be 1 or 2: 3"), + nil, + ), + regionExperimentalCreateVindexTestCase( + "unknown params", + map[string]string{ + "region_bytes": "1", + "hello": "world", + }, + nil, + []string{"hello"}, + ), + } + + testCreateVindexes(t, cases) +} + func TestRegionExperimentalMisc(t *testing.T) { ge, err := createRegionVindex(t, "region_experimental", "f1,f2", 1) require.NoError(t, err) @@ -122,18 +206,8 @@ func TestRegionExperimentalVerifyMulti(t *testing.T) { assert.Equal(t, want, got) } -func TestRegionExperimentalCreateErrors(t *testing.T) { - _, err := createRegionVindex(t, "region_experimental", "f1,f2", 3) - assert.EqualError(t, err, "region_bits must be 1 or 2: 3") - _, err = CreateVindex("region_experimental", "region_experimental", nil) - assert.EqualError(t, err, "region_experimental missing region_bytes param") -} - func createRegionVindex(t *testing.T, name, from string, rb int) (Vindex, error) { return CreateVindex(name, name, map[string]string{ "region_bytes": strconv.Itoa(rb), - "table": "t", - "from": from, - "to": "toc", }) } diff --git a/go/vt/vtgate/vindexes/region_json.go b/go/vt/vtgate/vindexes/region_json.go index 093ccd9090b..f0ac2ef18fa 100644 --- a/go/vt/vtgate/vindexes/region_json.go +++ b/go/vt/vtgate/vindexes/region_json.go @@ -25,19 +25,27 @@ import ( "os" "strconv" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/log" ) +const ( + regionJSONParamRegionBytes = "region_bytes" + regionJSONParamRegionMap = "region_map" +) + var ( _ MultiColumn = (*RegionJSON)(nil) + + regionJSONParams = []string{ + regionJSONParamRegionBytes, + regionJSONParamRegionMap, + } ) func init() { - Register("region_json", NewRegionJSON) + Register("region_json", newRegionJSON) } // RegionMap is used to store mapping of country to region @@ -49,17 +57,18 @@ type RegionMap map[string]uint64 // RegionJson can be used for geo-partitioning because the first column can denote a region, // and it will dictate the shard range for that region. type RegionJSON struct { - name string - regionMap RegionMap - regionBytes int + name string + regionMap RegionMap + regionBytes int + unknownParams []string } -// NewRegionJSON creates a RegionJson vindex. +// newRegionJSON creates a RegionJson vindex. // The supplied map requires all the fields of "RegionExperimental". // Additionally, it requires a region_map argument representing the path to a json file // containing a map of country to region. -func NewRegionJSON(name string, m map[string]string) (Vindex, error) { - rmPath := m["region_map"] +func newRegionJSON(name string, m map[string]string) (Vindex, error) { + rmPath := m[regionJSONParamRegionMap] rmap := make(map[string]uint64) data, err := os.ReadFile(rmPath) if err != nil { @@ -70,7 +79,7 @@ func NewRegionJSON(name string, m map[string]string) (Vindex, error) { if err != nil { return nil, err } - rb, err := strconv.Atoi(m["region_bytes"]) + rb, err := strconv.Atoi(m[regionJSONParamRegionBytes]) if err != nil { return nil, err } @@ -81,9 +90,10 @@ func NewRegionJSON(name string, m map[string]string) (Vindex, error) { } return &RegionJSON{ - name: name, - regionMap: rmap, - regionBytes: rb, + name: name, + regionMap: rmap, + regionBytes: rb, + unknownParams: FindUnknownParams(m, regionJSONParams), }, nil } @@ -116,7 +126,7 @@ func (rv *RegionJSON) Map(ctx context.Context, vcursor VCursor, rowsColValues [] continue } // Compute hash. - hn, err := evalengine.ToUint64(row[0]) + hn, err := row[0].ToCastUint64() if err != nil { destinations = append(destinations, key.DestinationNone{}) continue @@ -158,3 +168,8 @@ func (rv *RegionJSON) Verify(ctx context.Context, vcursor VCursor, rowsColValues func (rv *RegionJSON) PartialVindex() bool { return false } + +// UnknownParams implements the ParamValidating interface. +func (rv *RegionJSON) UnknownParams() []string { + return rv.unknownParams +} diff --git a/go/vt/vtgate/vindexes/reverse_bits.go b/go/vt/vtgate/vindexes/reverse_bits.go index 332cae5dfce..80c72ca6924 100644 --- a/go/vt/vtgate/vindexes/reverse_bits.go +++ b/go/vt/vtgate/vindexes/reverse_bits.go @@ -24,8 +24,6 @@ import ( "fmt" "math/bits" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" ) @@ -39,12 +37,16 @@ var ( // ReverseBits defines vindex that reverses the bits of a number. // It's Unique, Reversible and Functional. type ReverseBits struct { - name string + name string + unknownParams []string } -// NewReverseBits creates a new ReverseBits. -func NewReverseBits(name string, m map[string]string) (Vindex, error) { - return &ReverseBits{name: name}, nil +// newReverseBits creates a new ReverseBits. +func newReverseBits(name string, m map[string]string) (Vindex, error) { + return &ReverseBits{ + name: name, + unknownParams: FindUnknownParams(m, nil), + }, nil } // String returns the name of the vindex. @@ -107,8 +109,13 @@ func (vind *ReverseBits) ReverseMap(_ VCursor, ksids [][]byte) ([]sqltypes.Value return reverseIds, nil } +// UnknownParams implements the ParamValidating interface. +func (vind *ReverseBits) UnknownParams() []string { + return vind.unknownParams +} + func (vind *ReverseBits) Hash(id sqltypes.Value) ([]byte, error) { - num, err := evalengine.ToUint64(id) + num, err := id.ToCastUint64() if err != nil { return nil, err } @@ -116,7 +123,7 @@ func (vind *ReverseBits) Hash(id sqltypes.Value) ([]byte, error) { } func init() { - Register("reverse_bits", NewReverseBits) + Register("reverse_bits", newReverseBits) } func reverse(shardKey uint64) []byte { diff --git a/go/vt/vtgate/vindexes/reverse_bits_test.go b/go/vt/vtgate/vindexes/reverse_bits_test.go index 14f3d59820a..dbc2d207919 100644 --- a/go/vt/vtgate/vindexes/reverse_bits_test.go +++ b/go/vt/vtgate/vindexes/reverse_bits_test.go @@ -21,7 +21,6 @@ import ( "reflect" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" @@ -31,18 +30,64 @@ import ( var reverseBits SingleColumn func init() { - hv, err := CreateVindex("reverse_bits", "rr", map[string]string{"Table": "t", "Column": "c"}) + hv, err := CreateVindex("reverse_bits", "rr", map[string]string{}) if err != nil { panic(err) } + unknownParams := hv.(ParamValidating).UnknownParams() + if len(unknownParams) > 0 { + panic("reverse_bits test init: expected 0 unknown params") + } reverseBits = hv.(SingleColumn) } -func TestReverseBitsInfo(t *testing.T) { - assert.Equal(t, 1, reverseBits.Cost()) - assert.Equal(t, "rr", reverseBits.String()) - assert.True(t, reverseBits.IsUnique()) - assert.False(t, reverseBits.NeedsVCursor()) +func reverseBitsCreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "reverse_bits", + vindexName: "reverse_bits", + vindexParams: vindexParams, + + expectCost: 1, + expectErr: expectErr, + expectIsUnique: true, + expectNeedsVCursor: false, + expectString: "reverse_bits", + expectUnknownParams: expectUnknownParams, + } +} + +func TestReverseBitsCreateVindex(t *testing.T) { + cases := []createVindexTestCase{ + reverseBitsCreateVindexTestCase( + "no params", + nil, + nil, + nil, + ), + reverseBitsCreateVindexTestCase( + "empty params", + map[string]string{}, + nil, + nil, + ), + reverseBitsCreateVindexTestCase( + "unknown params", + map[string]string{ + "hello": "world", + }, + nil, + []string{"hello"}, + ), + } + + testCreateVindexes(t, cases) } func TestReverseBitsMap(t *testing.T) { @@ -84,7 +129,7 @@ func TestReverseBitsVerify(t *testing.T) { // Failure test _, err = reverseBits.Verify(context.Background(), nil, []sqltypes.Value{sqltypes.NewVarBinary("aa")}, [][]byte{nil}) - require.EqualError(t, err, "could not parse value: 'aa'") + require.EqualError(t, err, "cannot parse uint64 from \"aa\"") } func TestReverseBitsReverseMap(t *testing.T) { diff --git a/go/vt/vtgate/vindexes/unicodeloosemd5.go b/go/vt/vtgate/vindexes/unicodeloosemd5.go index dfe7d59f737..8fa6ac33bef 100644 --- a/go/vt/vtgate/vindexes/unicodeloosemd5.go +++ b/go/vt/vtgate/vindexes/unicodeloosemd5.go @@ -26,8 +26,9 @@ import ( ) var ( - _ SingleColumn = (*UnicodeLooseMD5)(nil) - _ Hashing = (*UnicodeLooseMD5)(nil) + _ SingleColumn = (*UnicodeLooseMD5)(nil) + _ Hashing = (*UnicodeLooseMD5)(nil) + _ ParamValidating = (*UnicodeLooseMD5)(nil) ) // UnicodeLooseMD5 is a vindex that normalizes and hashes unicode strings @@ -36,12 +37,16 @@ var ( // Ref: http://www.unicode.org/reports/tr10/#Multi_Level_Comparison. // This is compatible with MySQL's utf8_unicode_ci collation. type UnicodeLooseMD5 struct { - name string + name string + unknownParams []string } -// NewUnicodeLooseMD5 creates a new UnicodeLooseMD5. -func NewUnicodeLooseMD5(name string, _ map[string]string) (Vindex, error) { - return &UnicodeLooseMD5{name: name}, nil +// newUnicodeLooseMD5 creates a new UnicodeLooseMD5. +func newUnicodeLooseMD5(name string, m map[string]string) (Vindex, error) { + return &UnicodeLooseMD5{ + name: name, + unknownParams: FindUnknownParams(m, nil), + }, nil } // String returns the name of the vindex. @@ -94,6 +99,11 @@ func (vind *UnicodeLooseMD5) Hash(id sqltypes.Value) ([]byte, error) { return unicodeHash(vMD5Hash, id) } +// UnknownParams implements the ParamValidating interface. +func (vind *UnicodeLooseMD5) UnknownParams() []string { + return vind.unknownParams +} + func init() { - Register("unicode_loose_md5", NewUnicodeLooseMD5) + Register("unicode_loose_md5", newUnicodeLooseMD5) } diff --git a/go/vt/vtgate/vindexes/unicodeloosemd5_test.go b/go/vt/vtgate/vindexes/unicodeloosemd5_test.go index dea4a048783..879414c5be9 100644 --- a/go/vt/vtgate/vindexes/unicodeloosemd5_test.go +++ b/go/vt/vtgate/vindexes/unicodeloosemd5_test.go @@ -21,8 +21,6 @@ import ( "reflect" "testing" - "github.com/stretchr/testify/assert" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" ) @@ -30,15 +28,60 @@ import ( var charVindexMD5 SingleColumn func init() { - vindex, _ := CreateVindex("unicode_loose_md5", "utf8ch", nil) + vindex, err := CreateVindex("unicode_loose_md5", "utf8ch", nil) + if err != nil { + panic(err) + } charVindexMD5 = vindex.(SingleColumn) } -func TestUnicodeLooseMD5Info(t *testing.T) { - assert.Equal(t, 1, charVindexMD5.Cost()) - assert.Equal(t, "utf8ch", charVindexMD5.String()) - assert.True(t, charVindexMD5.IsUnique()) - assert.False(t, charVindexMD5.NeedsVCursor()) +func unicodeLooseMD5CreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "unicode_loose_md5", + vindexName: "unicode_loose_md5", + vindexParams: vindexParams, + + expectCost: 1, + expectErr: expectErr, + expectIsUnique: true, + expectNeedsVCursor: false, + expectString: "unicode_loose_md5", + expectUnknownParams: expectUnknownParams, + } +} + +func TestUnicodeLooseMD5CreateVindex(t *testing.T) { + cases := []createVindexTestCase{ + unicodeLooseMD5CreateVindexTestCase( + "no params", + nil, + nil, + nil, + ), + unicodeLooseMD5CreateVindexTestCase( + "empty params", + map[string]string{}, + nil, + nil, + ), + unicodeLooseMD5CreateVindexTestCase( + "unknown params", + map[string]string{ + "hello": "world", + }, + nil, + []string{"hello"}, + ), + } + + testCreateVindexes(t, cases) } func TestUnicodeLooseMD5Map(t *testing.T) { diff --git a/go/vt/vtgate/vindexes/unicodeloosexxhash.go b/go/vt/vtgate/vindexes/unicodeloosexxhash.go index dcd924131aa..5e04bff1866 100644 --- a/go/vt/vtgate/vindexes/unicodeloosexxhash.go +++ b/go/vt/vtgate/vindexes/unicodeloosexxhash.go @@ -26,8 +26,9 @@ import ( ) var ( - _ SingleColumn = (*UnicodeLooseXXHash)(nil) - _ Hashing = (*UnicodeLooseXXHash)(nil) + _ SingleColumn = (*UnicodeLooseXXHash)(nil) + _ Hashing = (*UnicodeLooseXXHash)(nil) + _ ParamValidating = (*UnicodeLooseXXHash)(nil) ) // UnicodeLooseXXHash is a vindex that normalizes and hashes unicode strings @@ -36,12 +37,16 @@ var ( // Ref: http://www.unicode.org/reports/tr10/#Multi_Level_Comparison. // This is compatible with MySQL's utf8_unicode_ci collation. type UnicodeLooseXXHash struct { - name string + name string + unknownParams []string } -// NewUnicodeLooseXXHash creates a new UnicodeLooseXXHash struct. -func NewUnicodeLooseXXHash(name string, _ map[string]string) (Vindex, error) { - return &UnicodeLooseXXHash{name: name}, nil +// newUnicodeLooseXXHash creates a new UnicodeLooseXXHash struct. +func newUnicodeLooseXXHash(name string, m map[string]string) (Vindex, error) { + return &UnicodeLooseXXHash{ + name: name, + unknownParams: FindUnknownParams(m, nil), + }, nil } // String returns the name of the vindex. @@ -94,6 +99,11 @@ func (vind *UnicodeLooseXXHash) Hash(id sqltypes.Value) ([]byte, error) { return unicodeHash(vXXHash, id) } +// UnknownParams implements the ParamValidating interface. +func (vind *UnicodeLooseXXHash) UnknownParams() []string { + return vind.unknownParams +} + func init() { - Register("unicode_loose_xxhash", NewUnicodeLooseXXHash) + Register("unicode_loose_xxhash", newUnicodeLooseXXHash) } diff --git a/go/vt/vtgate/vindexes/unicodeloosexxhash_test.go b/go/vt/vtgate/vindexes/unicodeloosexxhash_test.go index e5ae98cf87f..6836bfd4ffa 100644 --- a/go/vt/vtgate/vindexes/unicodeloosexxhash_test.go +++ b/go/vt/vtgate/vindexes/unicodeloosexxhash_test.go @@ -21,8 +21,6 @@ import ( "reflect" "testing" - "github.com/stretchr/testify/assert" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" ) @@ -30,15 +28,64 @@ import ( var charVindexXXHash SingleColumn func init() { - vindex, _ := CreateVindex("unicode_loose_xxhash", "utf8ch", nil) + vindex, err := CreateVindex("unicode_loose_xxhash", "utf8ch", nil) + if err != nil { + panic(err) + } + unknownParams := vindex.(ParamValidating).UnknownParams() + if len(unknownParams) > 0 { + panic("unicode_loose_xxhash test init: expected 0 unknown params") + } charVindexXXHash = vindex.(SingleColumn) } -func TestUnicodeLooseXXHashInfo(t *testing.T) { - assert.Equal(t, 1, charVindexXXHash.Cost()) - assert.Equal(t, "utf8ch", charVindexXXHash.String()) - assert.True(t, charVindexXXHash.IsUnique()) - assert.False(t, charVindexXXHash.NeedsVCursor()) +func unicodeLooseXXHashCreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "unicode_loose_xxhash", + vindexName: "unicode_loose_xxhash", + vindexParams: vindexParams, + + expectCost: 1, + expectErr: expectErr, + expectIsUnique: true, + expectNeedsVCursor: false, + expectString: "unicode_loose_xxhash", + expectUnknownParams: expectUnknownParams, + } +} + +func TestUnicodeLooseXXHashCreateVindex(t *testing.T) { + cases := []createVindexTestCase{ + unicodeLooseXXHashCreateVindexTestCase( + "no params", + nil, + nil, + nil, + ), + unicodeLooseXXHashCreateVindexTestCase( + "empty params", + map[string]string{}, + nil, + nil, + ), + unicodeLooseXXHashCreateVindexTestCase( + "unknown params", + map[string]string{ + "hello": "world", + }, + nil, + []string{"hello"}, + ), + } + + testCreateVindexes(t, cases) } func TestUnicodeLooseXXHashMap(t *testing.T) { diff --git a/go/vt/vtgate/vindexes/vindex.go b/go/vt/vtgate/vindexes/vindex.go index 700b8e6175c..a5295681248 100644 --- a/go/vt/vtgate/vindexes/vindex.go +++ b/go/vt/vtgate/vindexes/vindex.go @@ -19,7 +19,9 @@ package vindexes import ( "context" "fmt" + "sort" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/sqlparser" @@ -41,6 +43,7 @@ type ( ExecuteKeyspaceID(ctx context.Context, keyspace string, ksid []byte, query string, bindVars map[string]*querypb.BindVariable, rollbackOnError, autocommit bool) (*sqltypes.Result, error) InTransactionAndIsDML() bool LookupRowLockShardSession() vtgatepb.CommitOrder + ConnCollation() collations.ID } // Vindex defines the interface required to register a vindex. @@ -71,6 +74,22 @@ type ( NeedsVCursor() bool } + // ParamValidating is an optional interface that Vindexes may implement to + // report errors about unknown params encountered during Vindex creation. + ParamValidating interface { + // UnknownParams returns a slice of param names that were provided + // during Vindex creation, but were not known and therefore ignored by + // the Vindex. + UnknownParams() []string + } + + // ParamValidationOpts may be used by Vindexes that accept params to + // validate params with ValidateParams(params, opts). + ParamValidationOpts struct { + // Params contains param names known by the vindex. + Params []string + } + // SingleColumn defines the interface for a single column vindex. SingleColumn interface { Vindex @@ -166,7 +185,7 @@ type ( var registry = make(map[string]NewVindexFunc) -// Register registers a vindex under the specified vindexType. +// Register registers a vindex factory under the specified vindexType. // A duplicate vindexType will generate a panic. // New vindexes will be created using these functions at the // time of vschema loading. @@ -179,7 +198,7 @@ func Register(vindexType string, newVindexFunc NewVindexFunc) { // CreateVindex creates a vindex of the specified type using the // supplied params. The type must have been previously registered. -func CreateVindex(vindexType, name string, params map[string]string) (Vindex, error) { +func CreateVindex(vindexType, name string, params map[string]string) (vindex Vindex, err error) { f, ok := registry[vindexType] if !ok { return nil, fmt.Errorf("vindexType %q not found", vindexType) @@ -216,3 +235,19 @@ func firstColsOnly(rowsColValues [][]sqltypes.Value) []sqltypes.Value { } return firstCols } + +// FindUnknownParams a sorted slice of keys in params that are not present in knownParams. +func FindUnknownParams(params map[string]string, knownParams []string) []string { + var unknownParams []string + knownParamsByName := make(map[string]struct{}) + for _, knownParam := range knownParams { + knownParamsByName[knownParam] = struct{}{} + } + for name := range params { + if _, ok := knownParamsByName[name]; !ok { + unknownParams = append(unknownParams, name) + } + } + sort.Strings(unknownParams) + return unknownParams +} diff --git a/go/vt/vtgate/vindexes/vindex_test.go b/go/vt/vtgate/vindexes/vindex_test.go index 6e2952e6bca..97b17da2a4b 100644 --- a/go/vt/vtgate/vindexes/vindex_test.go +++ b/go/vt/vtgate/vindexes/vindex_test.go @@ -18,14 +18,68 @@ package vindexes import ( "context" + "sort" "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" ) +type testVindex struct { + allowUnknownParams bool + knownParams []string + params map[string]string +} + +func (v *testVindex) Cost() int { + return 0 +} + +func (v *testVindex) String() string { + return "" +} + +func (v *testVindex) IsUnique() bool { + return false +} + +func (v *testVindex) NeedsVCursor() bool { + return false +} + +func (v *testVindex) UnknownParams() []string { + if v.allowUnknownParams { + return nil + } + return FindUnknownParams(v.params, v.knownParams) +} + +func init() { + Register("allow_unknown_params", func(_ string, params map[string]string) (Vindex, error) { + return &testVindex{ + allowUnknownParams: true, + knownParams: []string{ + "option1", + "option2", + }, + params: params, + }, nil + }) + Register("warn_unknown_params", func(_ string, params map[string]string) (Vindex, error) { + return &testVindex{ + allowUnknownParams: false, + knownParams: []string{ + "option1", + "option2", + }, + params: params, + }, nil + }) +} + func TestVindexMap(t *testing.T) { ge, err := createRegionVindex(t, "region_experimental", "f1,f2", 1) assert.NoError(t, err) @@ -42,6 +96,7 @@ func TestVindexMap(t *testing.T) { hash, err := CreateVindex("hash", "hash", nil) assert.NoError(t, err) + require.Empty(t, hash.(ParamValidating).UnknownParams()) got, err = Map(context.Background(), hash, nil, [][]sqltypes.Value{{ sqltypes.NewInt64(1), }}) @@ -55,6 +110,7 @@ func TestVindexMap(t *testing.T) { func TestVindexVerify(t *testing.T) { ge, err := createRegionVindex(t, "region_experimental", "f1,f2", 1) assert.NoError(t, err) + require.Empty(t, ge.(ParamValidating).UnknownParams()) got, err := Verify(context.Background(), ge, nil, [][]sqltypes.Value{{ sqltypes.NewInt64(1), sqltypes.NewInt64(1), @@ -67,6 +123,7 @@ func TestVindexVerify(t *testing.T) { assert.Equal(t, want, got) hash, err := CreateVindex("hash", "hash", nil) + require.Empty(t, hash.(ParamValidating).UnknownParams()) assert.NoError(t, err) got, err = Verify(context.Background(), hash, nil, [][]sqltypes.Value{{ sqltypes.NewInt64(1), @@ -76,3 +133,40 @@ func TestVindexVerify(t *testing.T) { assert.NoError(t, err) assert.Equal(t, want, got) } + +func TestCreateVindexAllowUnknownParams(t *testing.T) { + vindex, err := CreateVindex( + "allow_unknown_params", + "allow_unknown_params", + map[string]string{ + "option1": "value1", + "option2": "value2", + "option3": "value3", + "option4": "value4", + }, + ) + + require.NotNil(t, vindex) + require.NoError(t, err) +} + +func TestCreateVindexWarnUnknownParams(t *testing.T) { + vindex, err := CreateVindex( + "warn_unknown_params", + "warn_unknown_params", + map[string]string{ + "option1": "value1", + "option2": "value2", + "option3": "value3", + "option4": "value4", + }, + ) + + require.NotNil(t, vindex) + require.NoError(t, err) + + unknownParams := vindex.(ParamValidating).UnknownParams() + sort.Strings(unknownParams) + require.Len(t, unknownParams, 2) + require.Equal(t, []string{"option3", "option4"}, unknownParams) +} diff --git a/go/vt/vtgate/vindexes/vschema.go b/go/vt/vtgate/vindexes/vschema.go index 1afb383980c..b9a559cf7c9 100644 --- a/go/vt/vtgate/vindexes/vschema.go +++ b/go/vt/vtgate/vindexes/vschema.go @@ -23,6 +23,7 @@ import ( "os" "sort" "strings" + "time" "vitess.io/vitess/go/sqlescape" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" @@ -69,6 +70,9 @@ type VSchema struct { uniqueVindexes map[string]Vindex Keyspaces map[string]*KeyspaceSchema `json:"keyspaces"` ShardRoutingRules map[string]string `json:"shard_routing_rules"` + // created is the time when the VSchema object was created. Used to detect if a cached + // copy of the vschema is stale. + created time.Time } // RoutingRule represents one routing rule. @@ -111,6 +115,14 @@ type Table struct { // Source is a keyspace-qualified table name that points to the source of a // reference table. Only applicable for tables with Type set to "reference". Source *Source `json:"source,omitempty"` + + ChildForeignKeys []ChildFKInfo `json:"child_foreign_keys,omitempty"` + ParentForeignKeys []ParentFKInfo `json:"parent_foreign_keys,omitempty"` +} + +// GetTableName gets the sqlparser.TableName for the vindex Table. +func (t *Table) GetTableName() sqlparser.TableName { + return sqlparser.NewTableNameWithQualifier(t.Name.String(), t.Keyspace.Name) } // Keyspace contains the keyspcae info for each Table. @@ -135,6 +147,12 @@ type ColumnVindex struct { backfill bool } +// TableInfo contains column and foreign key info for a table. +type TableInfo struct { + Columns []Column + ForeignKeys []*sqlparser.ForeignKeyDefinition +} + // IsUnique is used to tell whether the ColumnVindex // will return a unique shard value or not when queried with // the given column list @@ -178,19 +196,21 @@ func (col *Column) MarshalJSON() ([]byte, error) { // KeyspaceSchema contains the schema(table) for a keyspace. type KeyspaceSchema struct { - Keyspace *Keyspace - Tables map[string]*Table - Vindexes map[string]Vindex - Views map[string]sqlparser.SelectStatement - Error error + Keyspace *Keyspace + ForeignKeyMode vschemapb.Keyspace_ForeignKeyMode + Tables map[string]*Table + Vindexes map[string]Vindex + Views map[string]sqlparser.SelectStatement + Error error } type ksJSON struct { - Sharded bool `json:"sharded,omitempty"` - Tables map[string]*Table `json:"tables,omitempty"` - Vindexes map[string]Vindex `json:"vindexes,omitempty"` - Views map[string]string `json:"views,omitempty"` - Error string `json:"error,omitempty"` + Sharded bool `json:"sharded,omitempty"` + ForeignKeyMode string `json:"foreignKeyMode,omitempty"` + Tables map[string]*Table `json:"tables,omitempty"` + Vindexes map[string]Vindex `json:"vindexes,omitempty"` + Views map[string]string `json:"views,omitempty"` + Error string `json:"error,omitempty"` } // findTable looks for the table with the requested tablename in the keyspace. @@ -219,9 +239,10 @@ func (ks *KeyspaceSchema) findTable( // MarshalJSON returns a JSON representation of KeyspaceSchema. func (ks *KeyspaceSchema) MarshalJSON() ([]byte, error) { ksJ := ksJSON{ - Sharded: ks.Keyspace.Sharded, - Tables: ks.Tables, - Vindexes: ks.Vindexes, + Sharded: ks.Keyspace.Sharded, + Tables: ks.Tables, + ForeignKeyMode: ks.ForeignKeyMode.String(), + Vindexes: ks.Vindexes, } if ks.Error != nil { ksJ.Error = ks.Error.Error() @@ -259,15 +280,17 @@ func BuildVSchema(source *vschemapb.SrvVSchema) (vschema *VSchema) { globalTables: make(map[string]*Table), uniqueVindexes: make(map[string]Vindex), Keyspaces: make(map[string]*KeyspaceSchema), + created: time.Now(), } buildKeyspaces(source, vschema) // buildGlobalTables before buildReferences so that buildReferences can // resolve sources which reference global tables. buildGlobalTables(source, vschema) buildReferences(source, vschema) - resolveAutoIncrement(source, vschema) buildRoutingRule(source, vschema) buildShardRoutingRule(source, vschema) + // Resolve auto-increments after routing rules are built since sequence tables also obey routing rules. + resolveAutoIncrement(source, vschema) return vschema } @@ -293,11 +316,10 @@ func BuildKeyspaceSchema(input *vschemapb.Keyspace, keyspace string) (*KeyspaceS return vschema.Keyspaces[keyspace], err } -// ValidateKeyspace ensures that the keyspace vschema is valid. +// BuildKeyspace ensures that the keyspace vschema is valid. // External references (like sequence) are not validated. -func ValidateKeyspace(input *vschemapb.Keyspace) error { - _, err := BuildKeyspaceSchema(input, "") - return err +func BuildKeyspace(input *vschemapb.Keyspace) (*KeyspaceSchema, error) { + return BuildKeyspaceSchema(input, "") } func buildKeyspaces(source *vschemapb.SrvVSchema, vschema *VSchema) { @@ -310,14 +332,23 @@ func buildKeyspaces(source *vschemapb.SrvVSchema, vschema *VSchema) { AttachEnable: ks.AttachEnable, AttachTo: ks.AttachTo, }, - Tables: make(map[string]*Table), - Vindexes: make(map[string]Vindex), + ForeignKeyMode: replaceUnspecifiedForeignKeyMode(ks.ForeignKeyMode), + Tables: make(map[string]*Table), + Vindexes: make(map[string]Vindex), } vschema.Keyspaces[ksname] = ksvschema ksvschema.Error = buildTables(ks, vschema, ksvschema) } } +// replaceUnspecifiedForeignKeyMode replaces the default value of the foreign key mode enum with the default we want to keep. +func replaceUnspecifiedForeignKeyMode(fkMode vschemapb.Keyspace_ForeignKeyMode) vschemapb.Keyspace_ForeignKeyMode { + if fkMode == vschemapb.Keyspace_unspecified { + return vschemapb.Keyspace_unmanaged + } + return fkMode +} + func (vschema *VSchema) AddView(ksname string, viewName, query string) error { ks, ok := vschema.Keyspaces[ksname] if !ok { @@ -734,7 +765,11 @@ func resolveAutoIncrement(source *vschemapb.SrvVSchema, vschema *VSchema) { seqks, seqtab, err := sqlparser.ParseTable(table.AutoIncrement.Sequence) var seq *Table if err == nil { - seq, err = vschema.FindTable(seqks, seqtab) + // Ensure that sequence tables also obey routing rules. + seq, err = vschema.FindRoutedTable(seqks, seqtab, topodatapb.TabletType_PRIMARY) + if seq == nil && err == nil { + err = vterrors.Errorf(vtrpcpb.Code_NOT_FOUND, "table %s not found", seqtab) + } } if err != nil { // Better to remove the table than to leave it partially initialized. @@ -1137,6 +1172,17 @@ func (vschema *VSchema) FindRoutedShard(keyspace, shard string) (string, error) return keyspace, nil } +// GetCreated returns the time when the VSchema was created. +func (vschema *VSchema) GetCreated() time.Time { + return vschema.created +} + +// ResetCreated resets the created time to zero value. +// Used only in tests where vschema protos are compared. +func (vschema *VSchema) ResetCreated() { + vschema.created = time.Time{} +} + // ByCost provides the interface needed for ColumnVindexes to // be sorted by cost order. type ByCost []*ColumnVindex @@ -1186,11 +1232,12 @@ func LoadFormalKeyspace(filename string) (*vschemapb.Keyspace, error) { return formal, nil } -// ChooseVindexForType chooses the most appropriate vindex for the give type. +// ChooseVindexForType chooses the most appropriate vindex type for +// the given SQL data type. func ChooseVindexForType(typ querypb.Type) (string, error) { switch { case sqltypes.IsIntegral(typ): - return "hash", nil + return "xxhash", nil case sqltypes.IsText(typ): return "unicode_loose_md5", nil case sqltypes.IsBinary(typ): diff --git a/go/vt/vtgate/vindexes/vschema_test.go b/go/vt/vtgate/vindexes/vschema_test.go index c4684cc8945..a59ec78139d 100644 --- a/go/vt/vtgate/vindexes/vschema_test.go +++ b/go/vt/vtgate/vindexes/vschema_test.go @@ -20,6 +20,7 @@ import ( "context" "encoding/json" "errors" + "fmt" "reflect" "strings" "testing" @@ -41,8 +42,7 @@ import ( // cheapVindex is a Functional, Unique Vindex. type cheapVindex struct { - name string - Params map[string]string + name string } func (v *cheapVindex) String() string { return v.name } @@ -56,16 +56,15 @@ func (*cheapVindex) Map(ctx context.Context, vcursor VCursor, ids []sqltypes.Val return nil, nil } -func NewCheapVindex(name string, params map[string]string) (Vindex, error) { - return &cheapVindex{name: name, Params: params}, nil +func newCheapVindex(name string, _ map[string]string) (Vindex, error) { + return &cheapVindex{name: name}, nil } var _ SingleColumn = (*stFU)(nil) // stFU is a Functional, Unique Vindex. type stFU struct { - name string - Params map[string]string + name string } func (v *stFU) String() string { return v.name } @@ -79,16 +78,15 @@ func (*stFU) Map(ctx context.Context, vcursor VCursor, ids []sqltypes.Value) ([] return nil, nil } -func NewSTFU(name string, params map[string]string) (Vindex, error) { - return &stFU{name: name, Params: params}, nil +func newSTFU(name string, _ map[string]string) (Vindex, error) { + return &stFU{name: name}, nil } var _ SingleColumn = (*stFU)(nil) // stFN is a Functional, NonUnique Vindex. type stFN struct { - name string - Params map[string]string + name string } func (v *stFN) String() string { return v.name } @@ -102,16 +100,15 @@ func (*stFN) Map(ctx context.Context, vcursor VCursor, ids []sqltypes.Value) ([] return nil, nil } -func NewSTFN(name string, params map[string]string) (Vindex, error) { - return &stFN{name: name, Params: params}, nil +func newSTFN(name string, _ map[string]string) (Vindex, error) { + return &stFN{name: name}, nil } var _ SingleColumn = (*stFN)(nil) // stLN is a Lookup, NonUnique Vindex. type stLN struct { - name string - Params map[string]string + name string } func (v *stLN) String() string { return v.name } @@ -130,8 +127,8 @@ func (*stLN) Update(context.Context, VCursor, []sqltypes.Value, []byte, []sqltyp return nil } -func NewSTLN(name string, params map[string]string) (Vindex, error) { - return &stLN{name: name, Params: params}, nil +func newSTLN(name string, _ map[string]string) (Vindex, error) { + return &stLN{name: name}, nil } var _ SingleColumn = (*stLN)(nil) @@ -139,8 +136,7 @@ var _ Lookup = (*stLN)(nil) // stLU is a Lookup, Unique Vindex. type stLU struct { - name string - Params map[string]string + name string } func (v *stLU) String() string { return v.name } @@ -159,8 +155,8 @@ func (*stLU) Update(context.Context, VCursor, []sqltypes.Value, []byte, []sqltyp return nil } -func NewSTLU(name string, params map[string]string) (Vindex, error) { - return &stLU{name: name, Params: params}, nil +func newSTLU(name string, _ map[string]string) (Vindex, error) { + return &stLU{name: name}, nil } var _ SingleColumn = (*stLO)(nil) @@ -197,7 +193,7 @@ func (v *stLO) SetOwnerInfo(keyspace, table string, cols []sqlparser.IdentifierC return nil } -func NewSTLO(name string, _ map[string]string) (Vindex, error) { +func newSTLO(name string, _ map[string]string) (Vindex, error) { return &stLO{name: name}, nil } @@ -206,8 +202,7 @@ var _ Lookup = (*stLO)(nil) // mcFU is a multi-column Functional, Unique Vindex. type mcFU struct { - name string - Params map[string]string + name string } func (v *mcFU) String() string { return v.name } @@ -222,25 +217,33 @@ func (*mcFU) Map(ctx context.Context, vcursor VCursor, rowsColValues [][]sqltype } func (*mcFU) PartialVindex() bool { return false } -func NewMCFU(name string, params map[string]string) (Vindex, error) { - return &mcFU{name: name, Params: params}, nil +func newMCFU(name string, _ map[string]string) (Vindex, error) { + return &mcFU{name: name}, nil } var _ MultiColumn = (*mcFU)(nil) func init() { - Register("cheap", NewCheapVindex) - Register("stfu", NewSTFU) - Register("stfn", NewSTFN) - Register("stln", NewSTLN) - Register("stlu", NewSTLU) - Register("stlo", NewSTLO) - Register("region_experimental_test", NewRegionExperimental) - Register("mcfu", NewMCFU) + Register("cheap", newCheapVindex) + Register("stfu", newSTFU) + Register("stfn", newSTFN) + Register("stln", newSTLN) + Register("stlu", newSTLU) + Register("stlo", newSTLO) + Register("region_experimental_test", newRegionExperimental) + Register("mcfu", newMCFU) +} + +func buildVSchema(source *vschemapb.SrvVSchema) (vschema *VSchema) { + vs := BuildVSchema(source) + if vs != nil { + vs.ResetCreated() + } + return vs } func TestUnshardedVSchemaValid(t *testing.T) { - err := ValidateKeyspace(&vschemapb.Keyspace{ + _, err := BuildKeyspace(&vschemapb.Keyspace{ Sharded: false, Vindexes: make(map[string]*vschemapb.Vindex), Tables: make(map[string]*vschemapb.Table), @@ -248,6 +251,45 @@ func TestUnshardedVSchemaValid(t *testing.T) { require.NoError(t, err) } +func TestForeignKeyMode(t *testing.T) { + tests := []struct { + name string + fkMode vschemapb.Keyspace_ForeignKeyMode + wantedFkMode vschemapb.Keyspace_ForeignKeyMode + }{ + { + name: "Default Value", + wantedFkMode: vschemapb.Keyspace_unmanaged, + }, { + name: "Managed Value", + fkMode: vschemapb.Keyspace_managed, + wantedFkMode: vschemapb.Keyspace_managed, + }, { + name: "Unmanaged Value", + fkMode: vschemapb.Keyspace_unmanaged, + wantedFkMode: vschemapb.Keyspace_unmanaged, + }, { + name: "Disallow Value", + fkMode: vschemapb.Keyspace_disallow, + wantedFkMode: vschemapb.Keyspace_disallow, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ksSchema, err := BuildKeyspace(&vschemapb.Keyspace{ + Sharded: false, + ForeignKeyMode: test.fkMode, + Vindexes: make(map[string]*vschemapb.Vindex), + Tables: make(map[string]*vschemapb.Table), + }) + require.NoError(t, err) + require.Equal(t, test.wantedFkMode, ksSchema.ForeignKeyMode) + }) + + } +} + func TestUnshardedVSchema(t *testing.T) { good := vschemapb.SrvVSchema{ Keyspaces: map[string]*vschemapb.Keyspace{ @@ -323,6 +365,7 @@ func TestVSchemaViews(t *testing.T) { got := string(out) want := ` { + "foreignKeyMode":"unmanaged", "tables": { "t1": { "name": "t1", @@ -345,6 +388,76 @@ func TestVSchemaViews(t *testing.T) { require.JSONEq(t, want, got) } +func TestVSchemaForeignKeys(t *testing.T) { + good := vschemapb.SrvVSchema{ + Keyspaces: map[string]*vschemapb.Keyspace{ + "unsharded": { + Tables: map[string]*vschemapb.Table{ + "t1": { + Columns: []*vschemapb.Column{{ + Name: "c1", + }, { + Name: "c2", + Type: sqltypes.VarChar}}}}}, + "main": { + Tables: map[string]*vschemapb.Table{ + "t1": { + Columns: []*vschemapb.Column{{ + Name: "c1", + }, { + Name: "c2", + Type: sqltypes.VarChar}}}}}}} + vschema := BuildVSchema(&good) + require.NoError(t, vschema.Keyspaces["main"].Error) + + // add fk containst a keyspace. + vschema.AddForeignKey("main", "t1", &sqlparser.ForeignKeyDefinition{ + Source: sqlparser.Columns{sqlparser.NewIdentifierCI("c2")}, + ReferenceDefinition: &sqlparser.ReferenceDefinition{ + ReferencedTable: sqlparser.NewTableName("t1"), + ReferencedColumns: sqlparser.Columns{sqlparser.NewIdentifierCI("c1")}, + }, + }) + + out, err := json.MarshalIndent(vschema.Keyspaces["main"], "", " ") + require.NoError(t, err) + want := ` +{ + "foreignKeyMode": "unmanaged", + "tables": { + "t1": { + "name": "t1", + "columns": [ + { + "name": "c1", + "type": "NULL_TYPE" + }, + { + "name": "c2", + "type": "VARCHAR" + } + ], + "parent_foreign_keys": [ + { + "parent_table": "t1", + "parent_columns": ["c1"], + "child_columns": ["c2"] + } + ], + "child_foreign_keys": [ + { + "child_table": "t1", + "child_columns": ["c2"], + "parent_columns": ["c1"] + } + ] + } + } +}` + got := string(out) + require.JSONEq(t, want, got) +} + func TestVSchemaColumnListAuthoritative(t *testing.T) { good := vschemapb.SrvVSchema{ Keyspaces: map[string]*vschemapb.Keyspace{ @@ -406,10 +519,9 @@ func TestShardedVSchemaOwned(t *testing.T) { Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ "stfu1": { - Type: "stfu", - Params: map[string]string{ - "stfu1": "1"}, - Owner: "t1"}, + Type: "stfu", + Params: map[string]string{}, + Owner: "t1"}, "stln1": { Type: "stln", Owner: "t1"}}, @@ -429,10 +541,7 @@ func TestShardedVSchemaOwned(t *testing.T) { t1, err := got.FindTable("sharded", "t1") require.NoError(t, err) - vindex1 := &stFU{ - name: "stfu1", - Params: map[string]string{ - "stfu1": "1"}} + vindex1 := &stFU{name: "stfu1"} assertVindexMatches(t, t1.ColumnVindexes[0], vindex1, "stfu1", false) vindex2 := &stLN{name: "stln1"} @@ -571,7 +680,8 @@ func TestVSchemaRoutingRules(t *testing.T) { }, Keyspaces: map[string]*vschemapb.Keyspace{ "ks1": { - Sharded: true, + Sharded: true, + ForeignKeyMode: vschemapb.Keyspace_unmanaged, Vindexes: map[string]*vschemapb.Vindex{ "stfu1": { Type: "stfu", @@ -589,6 +699,7 @@ func TestVSchemaRoutingRules(t *testing.T) { }, }, "ks2": { + ForeignKeyMode: vschemapb.Keyspace_managed, Tables: map[string]*vschemapb.Table{ "t2": {}, }, @@ -661,7 +772,8 @@ func TestVSchemaRoutingRules(t *testing.T) { }, Keyspaces: map[string]*KeyspaceSchema{ "ks1": { - Keyspace: ks1, + Keyspace: ks1, + ForeignKeyMode: vschemapb.Keyspace_unmanaged, Tables: map[string]*Table{ "t1": t1, }, @@ -670,7 +782,8 @@ func TestVSchemaRoutingRules(t *testing.T) { }, }, "ks2": { - Keyspace: ks2, + ForeignKeyMode: vschemapb.Keyspace_managed, + Keyspace: ks2, Tables: map[string]*Table{ "t2": t2, }, @@ -692,37 +805,37 @@ func TestChooseVindexForType(t *testing.T) { out: "", }, { in: sqltypes.Int8, - out: "hash", + out: "xxhash", }, { in: sqltypes.Uint8, - out: "hash", + out: "xxhash", }, { in: sqltypes.Int16, - out: "hash", + out: "xxhash", }, { in: sqltypes.Uint16, - out: "hash", + out: "xxhash", }, { in: sqltypes.Int24, - out: "hash", + out: "xxhash", }, { in: sqltypes.Uint24, - out: "hash", + out: "xxhash", }, { in: sqltypes.Int32, - out: "hash", + out: "xxhash", }, { in: sqltypes.Uint32, - out: "hash", + out: "xxhash", }, { in: sqltypes.Int64, - out: "hash", + out: "xxhash", }, { in: sqltypes.Uint64, - out: "hash", + out: "xxhash", }, { in: sqltypes.Float32, - out: "hash", + out: "", }, { in: sqltypes.Float64, out: "", @@ -740,7 +853,7 @@ func TestChooseVindexForType(t *testing.T) { out: "", }, { in: sqltypes.Year, - out: "hash", + out: "xxhash", }, { in: sqltypes.Decimal, out: "", @@ -784,11 +897,16 @@ func TestChooseVindexForType(t *testing.T) { for _, tcase := range testcases { out, err := ChooseVindexForType(tcase.in) - if out == "" { - assert.Error(t, err, tcase.in) + // If no type is returned then we do not recommend the column be + // used for a vindex. If the test case provides an empty output + // value then we expect an error. + if tcase.out == "" { + assert.Error(t, err, "unexpectedly got a recommended vindex type of %s for input column type %v", + out, tcase.in) continue } - assert.Equal(t, out, tcase.out, tcase.in) + assert.Equal(t, out, tcase.out, "expected a recommended vindex type of %s for input column type %v but got %s", + tcase.out, tcase.in, out) } } @@ -876,12 +994,7 @@ func TestFindVindexForSharding(t *testing.T) { Name: "sharded", Sharded: true, } - vindex1 := &stFU{ - name: "stfu1", - Params: map[string]string{ - "stfu1": "1", - }, - } + vindex1 := &stFU{name: "stfu1"} vindex2 := &stLN{name: "stln1"} t1 := &Table{ Name: sqlparser.NewIdentifierCS("t1"), @@ -959,12 +1072,7 @@ func TestFindVindexForSharding2(t *testing.T) { Sharded: true, } vindex1 := &stLU{name: "stlu1"} - vindex2 := &stFU{ - name: "stfu1", - Params: map[string]string{ - "stfu1": "1", - }, - } + vindex2 := &stFU{name: "stfu1"} t1 := &Table{ Name: sqlparser.NewIdentifierCS("t1"), Keyspace: ks, @@ -999,32 +1107,27 @@ func TestShardedVSchemaMultiColumnVindex(t *testing.T) { good := vschemapb.SrvVSchema{ Keyspaces: map[string]*vschemapb.Keyspace{ "sharded": { - Sharded: true, + Sharded: true, + ForeignKeyMode: vschemapb.Keyspace_disallow, Vindexes: map[string]*vschemapb.Vindex{ "stfu1": { - Type: "stfu", - Params: map[string]string{ - "stfu1": "1"}, - Owner: "t1"}}, + Type: "stfu", + Params: map[string]string{}, + Owner: "t1"}}, Tables: map[string]*vschemapb.Table{ "t1": { ColumnVindexes: []*vschemapb.ColumnVindex{{ Columns: []string{"c1", "c2"}, Name: "stfu1"}}}}}}} - got := BuildVSchema(&good) + got := buildVSchema(&good) err := got.Keyspaces["sharded"].Error require.NoError(t, err) ks := &Keyspace{ Name: "sharded", Sharded: true, } - vindex1 := &stFU{ - name: "stfu1", - Params: map[string]string{ - "stfu1": "1", - }, - } + vindex1 := &stFU{name: "stfu1"} t1 := &Table{ Name: sqlparser.NewIdentifierCS("t1"), Keyspace: ks, @@ -1052,7 +1155,8 @@ func TestShardedVSchemaMultiColumnVindex(t *testing.T) { }, Keyspaces: map[string]*KeyspaceSchema{ "sharded": { - Keyspace: ks, + ForeignKeyMode: vschemapb.Keyspace_disallow, + Keyspace: ks, Tables: map[string]*Table{ "t1": t1}, Vindexes: map[string]Vindex{ @@ -1069,7 +1173,8 @@ func TestShardedVSchemaNotOwned(t *testing.T) { good := vschemapb.SrvVSchema{ Keyspaces: map[string]*vschemapb.Keyspace{ "sharded": { - Sharded: true, + ForeignKeyMode: vschemapb.Keyspace_managed, + Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ "stlu1": { Type: "stlu", @@ -1084,7 +1189,7 @@ func TestShardedVSchemaNotOwned(t *testing.T) { Name: "stlu1"}, { Column: "c2", Name: "stfu1"}}}}}}} - got := BuildVSchema(&good) + got := buildVSchema(&good) err := got.Keyspaces["sharded"].Error require.NoError(t, err) ks := &Keyspace{ @@ -1126,7 +1231,8 @@ func TestShardedVSchemaNotOwned(t *testing.T) { "stfu1": vindex2}, Keyspaces: map[string]*KeyspaceSchema{ "sharded": { - Keyspace: ks, + ForeignKeyMode: vschemapb.Keyspace_managed, + Keyspace: ks, Tables: map[string]*Table{ "t1": t1, }, @@ -1200,10 +1306,12 @@ func TestBuildVSchemaDupSeq(t *testing.T) { good := vschemapb.SrvVSchema{ Keyspaces: map[string]*vschemapb.Keyspace{ "ksa": { + ForeignKeyMode: vschemapb.Keyspace_managed, Tables: map[string]*vschemapb.Table{ "t1": { Type: "sequence"}}}, "ksb": { + ForeignKeyMode: vschemapb.Keyspace_managed, Tables: map[string]*vschemapb.Table{ "t1": { Type: "sequence"}}}}} @@ -1211,7 +1319,7 @@ func TestBuildVSchemaDupSeq(t *testing.T) { Name: "ksa"} ksb := &Keyspace{ Name: "ksb"} - got := BuildVSchema(&good) + got := buildVSchema(&good) t1a := &Table{ Name: sqlparser.NewIdentifierCS("t1"), Keyspace: ksa, @@ -1228,14 +1336,16 @@ func TestBuildVSchemaDupSeq(t *testing.T) { uniqueVindexes: map[string]Vindex{}, Keyspaces: map[string]*KeyspaceSchema{ "ksa": { - Keyspace: ksa, + ForeignKeyMode: vschemapb.Keyspace_managed, + Keyspace: ksa, Tables: map[string]*Table{ "t1": t1a, }, Vindexes: map[string]Vindex{}, }, "ksb": { - Keyspace: ksb, + ForeignKeyMode: vschemapb.Keyspace_managed, + Keyspace: ksb, Tables: map[string]*Table{ "t1": t1b, }, @@ -1251,18 +1361,20 @@ func TestBuildVSchemaDupTable(t *testing.T) { good := vschemapb.SrvVSchema{ Keyspaces: map[string]*vschemapb.Keyspace{ "ksa": { + ForeignKeyMode: vschemapb.Keyspace_unmanaged, Tables: map[string]*vschemapb.Table{ "t1": {}, }, }, "ksb": { + ForeignKeyMode: vschemapb.Keyspace_unmanaged, Tables: map[string]*vschemapb.Table{ "t1": {}, }, }, }, } - got := BuildVSchema(&good) + got := buildVSchema(&good) ksa := &Keyspace{ Name: "ksa", } @@ -1285,14 +1397,16 @@ func TestBuildVSchemaDupTable(t *testing.T) { uniqueVindexes: map[string]Vindex{}, Keyspaces: map[string]*KeyspaceSchema{ "ksa": { - Keyspace: ksa, + ForeignKeyMode: vschemapb.Keyspace_unmanaged, + Keyspace: ksa, Tables: map[string]*Table{ "t1": t1a, }, Vindexes: map[string]Vindex{}, }, "ksb": { - Keyspace: ksb, + ForeignKeyMode: vschemapb.Keyspace_unmanaged, + Keyspace: ksb, Tables: map[string]*Table{ "t1": t1b, }, @@ -1311,7 +1425,8 @@ func TestBuildVSchemaDupVindex(t *testing.T) { good := vschemapb.SrvVSchema{ Keyspaces: map[string]*vschemapb.Keyspace{ "ksa": { - Sharded: true, + ForeignKeyMode: vschemapb.Keyspace_unmanaged, + Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ "stlu1": { Type: "stlu", @@ -1330,7 +1445,8 @@ func TestBuildVSchemaDupVindex(t *testing.T) { }, }, "ksb": { - Sharded: true, + ForeignKeyMode: vschemapb.Keyspace_unmanaged, + Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ "stlu1": { Type: "stlu", @@ -1350,7 +1466,7 @@ func TestBuildVSchemaDupVindex(t *testing.T) { }, }, } - got := BuildVSchema(&good) + got := buildVSchema(&good) err := got.Keyspaces["ksa"].Error err1 := got.Keyspaces["ksb"].Error require.NoError(t, err) @@ -1412,7 +1528,8 @@ func TestBuildVSchemaDupVindex(t *testing.T) { }, Keyspaces: map[string]*KeyspaceSchema{ "ksa": { - Keyspace: ksa, + ForeignKeyMode: vschemapb.Keyspace_unmanaged, + Keyspace: ksa, Tables: map[string]*Table{ "t1": t1, }, @@ -1421,7 +1538,8 @@ func TestBuildVSchemaDupVindex(t *testing.T) { }, }, "ksb": { - Keyspace: ksb, + ForeignKeyMode: vschemapb.Keyspace_unmanaged, + Keyspace: ksb, Tables: map[string]*Table{ "t1": t2, }, @@ -1879,6 +1997,7 @@ func TestSequence(t *testing.T) { good := vschemapb.SrvVSchema{ Keyspaces: map[string]*vschemapb.Keyspace{ "unsharded": { + ForeignKeyMode: vschemapb.Keyspace_disallow, Tables: map[string]*vschemapb.Table{ "seq": { Type: "sequence", @@ -1886,13 +2005,12 @@ func TestSequence(t *testing.T) { }, }, "sharded": { - Sharded: true, + Sharded: true, + ForeignKeyMode: vschemapb.Keyspace_unmanaged, Vindexes: map[string]*vschemapb.Vindex{ "stfu1": { - Type: "stfu", - Params: map[string]string{ - "stfu1": "1", - }, + Type: "stfu", + Params: map[string]string{}, }, }, Tables: map[string]*vschemapb.Table{ @@ -1924,7 +2042,7 @@ func TestSequence(t *testing.T) { }, }, } - got := BuildVSchema(&good) + got := buildVSchema(&good) err := got.Keyspaces["sharded"].Error require.NoError(t, err) err1 := got.Keyspaces["unsharded"].Error @@ -1943,12 +2061,7 @@ func TestSequence(t *testing.T) { Keyspace: ksu, Type: "sequence", } - vindex1 := &stFU{ - name: "stfu1", - Params: map[string]string{ - "stfu1": "1", - }, - } + vindex1 := &stFU{name: "stfu1"} t1 := &Table{ Name: sqlparser.NewIdentifierCS("t1"), Keyspace: kss, @@ -2003,14 +2116,16 @@ func TestSequence(t *testing.T) { }, Keyspaces: map[string]*KeyspaceSchema{ "unsharded": { - Keyspace: ksu, + ForeignKeyMode: vschemapb.Keyspace_disallow, + Keyspace: ksu, Tables: map[string]*Table{ "seq": seq, }, Vindexes: map[string]Vindex{}, }, "sharded": { - Keyspace: kss, + ForeignKeyMode: vschemapb.Keyspace_unmanaged, + Keyspace: kss, Tables: map[string]*Table{ "t1": t1, "t2": t2, @@ -2168,10 +2283,8 @@ func TestFindTable(t *testing.T) { Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ "stfu1": { - Type: "stfu", - Params: map[string]string{ - "stfu1": "1", - }, + Type: "stfu", + Params: map[string]string{}, }, }, Tables: map[string]*vschemapb.Table{ @@ -2441,7 +2554,8 @@ func TestBuildKeyspaceSchema(t *testing.T) { Keyspace: ks, } want := &KeyspaceSchema{ - Keyspace: ks, + Keyspace: ks, + ForeignKeyMode: vschemapb.Keyspace_unmanaged, Tables: map[string]*Table{ "t1": t1, "t2": t2, @@ -2467,7 +2581,7 @@ func TestValidate(t *testing.T) { "t2": {}, }, } - err := ValidateKeyspace(good) + _, err := BuildKeyspace(good) require.NoError(t, err) bad := &vschemapb.Keyspace{ Sharded: true, @@ -2480,7 +2594,7 @@ func TestValidate(t *testing.T) { "t2": {}, }, } - err = ValidateKeyspace(bad) + _, err = BuildKeyspace(bad) want := `vindexType "absent" not found` if err == nil || !strings.HasPrefix(err.Error(), want) { t.Errorf("Validate: %v, must start with %s", err, want) @@ -2552,13 +2666,18 @@ func TestVSchemaPBJSON(t *testing.T) { } func TestVSchemaJSON(t *testing.T) { - lkp, _ := NewLookupHash("n2", map[string]string{ + lkp, err := newLookupHash("n2", map[string]string{ "from": "f", "table": "t", "to": "2", }) + unknownParams := lkp.(ParamValidating).UnknownParams() + require.Empty(t, unknownParams) + require.NoError(t, err) + in := map[string]*KeyspaceSchema{ "unsharded": { + ForeignKeyMode: vschemapb.Keyspace_managed, Keyspace: &Keyspace{ Name: "k1", }, @@ -2579,6 +2698,7 @@ func TestVSchemaJSON(t *testing.T) { }, }, "sharded": { + ForeignKeyMode: vschemapb.Keyspace_disallow, Keyspace: &Keyspace{ Name: "k2", Sharded: true, @@ -2605,6 +2725,7 @@ func TestVSchemaJSON(t *testing.T) { want := `{ "sharded": { "sharded": true, + "foreignKeyMode": "disallow", "tables": { "t3": { "name": "n3", @@ -2629,6 +2750,7 @@ func TestVSchemaJSON(t *testing.T) { } }, "unsharded": { + "foreignKeyMode": "managed", "tables": { "t1": { "name": "n1", @@ -2896,6 +3018,135 @@ func TestOtherTablesMakeReferenceTableAndSourceAmbiguous(t *testing.T) { require.Error(t, err) } +// TestFindTableWithSequences tests tables with an autoincrement column that are associated with a sequence. +// It validates that sequences obey routing rules, which might be set, for example, during a MoveTables +// when sequence tables are being migrated to a new cluster. +func TestFindTableWithSequences(t *testing.T) { + input := vschemapb.SrvVSchema{ + RoutingRules: &vschemapb.RoutingRules{ + Rules: []*vschemapb.RoutingRule{{ + FromTable: "seq3", + ToTables: []string{"ksb.seq3"}, + }, + { + FromTable: "seq4", + ToTables: []string{"ksb.seq4"}, + }}, + }, + Keyspaces: map[string]*vschemapb.Keyspace{ + "ksa": { + Vindexes: map[string]*vschemapb.Vindex{ + "stfu1": { + Type: "stfu", + }}, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{ + { + Column: "c1", + Name: "stfu1", + }, + }, + AutoIncrement: &vschemapb.AutoIncrement{ + Column: "c1", + Sequence: "seq1", + }, + }, + "t2": { + ColumnVindexes: []*vschemapb.ColumnVindex{ + { + Column: "c2", + Name: "stfu1", + }, + }, + AutoIncrement: &vschemapb.AutoIncrement{ + Column: "c2", + Sequence: "seq2", + }, + }, + "t3": { + ColumnVindexes: []*vschemapb.ColumnVindex{ + { + Column: "c3", + Name: "stfu1", + }, + }, + AutoIncrement: &vschemapb.AutoIncrement{ + Column: "c3", + Sequence: "seq3", + }, + }, + "t4": { + ColumnVindexes: []*vschemapb.ColumnVindex{ + { + Column: "c4", + Name: "stfu1", + }, + }, + AutoIncrement: &vschemapb.AutoIncrement{ + Column: "c4", + Sequence: "ksa.seq4", + }, + }, + "seq1": { + Type: "sequence", + }, + "seq2": { + Type: "sequence", + }, + "seq3": { + Type: "sequence", + }, + "seq4": { + Type: "sequence", + }, + }, + }, + "ksb": { + Tables: map[string]*vschemapb.Table{ + "seq2": { + Type: "sequence", + }, + "seq3": { + Type: "sequence", + }, + }, + }, + }, + } + vschema := BuildVSchema(&input) + + notFoundError := func(table string) string { + return fmt.Sprintf("table %s not found", table) + } + + type testCase struct { + name string + keyspace string + table string + mustError bool + errorContains string + } + testCases := []testCase{ + {"unambiguous", "", "t1", false, ""}, + {"ambiguous", "", "t2", true, notFoundError("t2")}, + {"routed unambiguous", "", "t3", false, ""}, + {"routed qualified unambiguous", "", "t4", false, ""}, + {"keyspace specified", "ksa", "t2", false, ""}, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + _, _, err := vschema.FindTableOrVindex(tc.keyspace, tc.table, topodatapb.TabletType_PRIMARY) + if tc.mustError { + require.Error(t, err) + require.Contains(t, err.Error(), tc.errorContains) + } else { + require.NoError(t, err) + } + }) + } +} + func vindexNames(vindexes []*ColumnVindex) (result []string) { for _, vindex := range vindexes { result = append(result, vindex.Name) diff --git a/go/vt/vtgate/vindexes/xxhash.go b/go/vt/vtgate/vindexes/xxhash.go index 471ad996757..3362cd0aab1 100644 --- a/go/vt/vtgate/vindexes/xxhash.go +++ b/go/vt/vtgate/vindexes/xxhash.go @@ -28,19 +28,24 @@ import ( ) var ( - _ SingleColumn = (*XXHash)(nil) - _ Hashing = (*XXHash)(nil) + _ SingleColumn = (*XXHash)(nil) + _ Hashing = (*XXHash)(nil) + _ ParamValidating = (*XXHash)(nil) ) // XXHash defines vindex that hashes any sql types to a KeyspaceId // by using xxhash64. It's Unique and works on any platform giving identical result. type XXHash struct { - name string + name string + unknownParams []string } -// NewXXHash creates a new XXHash. -func NewXXHash(name string, _ map[string]string) (Vindex, error) { - return &XXHash{name: name}, nil +// newXXHash creates a new XXHash. +func newXXHash(name string, m map[string]string) (Vindex, error) { + return &XXHash{ + name: name, + unknownParams: FindUnknownParams(m, nil), + }, nil } // String returns the name of the vindex. @@ -97,8 +102,13 @@ func (vind *XXHash) Hash(id sqltypes.Value) ([]byte, error) { return vXXHash(idBytes), nil } +// UnknownParams implements the ParamValidating interface. +func (vind *XXHash) UnknownParams() []string { + return vind.unknownParams +} + func init() { - Register("xxhash", NewXXHash) + Register("xxhash", newXXHash) } func vXXHash(shardKey []byte) []byte { diff --git a/go/vt/vtgate/vindexes/xxhash_test.go b/go/vt/vtgate/vindexes/xxhash_test.go index 148a2e7e7f8..b7bd77a1142 100644 --- a/go/vt/vtgate/vindexes/xxhash_test.go +++ b/go/vt/vtgate/vindexes/xxhash_test.go @@ -24,8 +24,6 @@ import ( "reflect" "testing" - "github.com/stretchr/testify/assert" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" ) @@ -33,18 +31,60 @@ import ( var xxHash SingleColumn func init() { - hv, err := CreateVindex("xxhash", "xxhash_name", map[string]string{"Table": "t", "Column": "c"}) + hv, err := CreateVindex("xxhash", "xxhash_name", map[string]string{}) if err != nil { panic(err) } xxHash = hv.(SingleColumn) } -func TestXXHashInfo(t *testing.T) { - assert.Equal(t, 1, xxHash.Cost()) - assert.Equal(t, "xxhash_name", xxHash.String()) - assert.True(t, xxHash.IsUnique()) - assert.False(t, xxHash.NeedsVCursor()) +func xxhashCreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "xxhash", + vindexName: "xxhash", + vindexParams: vindexParams, + + expectCost: 1, + expectErr: expectErr, + expectIsUnique: true, + expectNeedsVCursor: false, + expectString: "xxhash", + expectUnknownParams: expectUnknownParams, + } +} + +func TestXXHashCreateVindex(t *testing.T) { + cases := []createVindexTestCase{ + xxhashCreateVindexTestCase( + "no params", + nil, + nil, + nil, + ), + xxhashCreateVindexTestCase( + "empty params", + map[string]string{}, + nil, + nil, + ), + xxhashCreateVindexTestCase( + "unknown params", + map[string]string{ + "hello": "world", + }, + nil, + []string{"hello"}, + ), + } + + testCreateVindexes(t, cases) } func TestXXHashMap(t *testing.T) { diff --git a/go/vt/vtgate/vschema_manager.go b/go/vt/vtgate/vschema_manager.go index baa232a87d8..3b99be052b0 100644 --- a/go/vt/vtgate/vschema_manager.go +++ b/go/vt/vtgate/vschema_manager.go @@ -20,12 +20,10 @@ import ( "context" "sync" + "vitess.io/vitess/go/vt/log" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/srvtopo" - - "google.golang.org/protobuf/proto" - - "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -48,7 +46,7 @@ type VSchemaManager struct { // SchemaInfo is an interface to schema tracker. type SchemaInfo interface { - Tables(ks string) map[string][]vindexes.Column + Tables(ks string) map[string]*vindexes.TableInfo Views(ks string) map[string]sqlparser.SelectStatement } @@ -57,7 +55,7 @@ type SchemaInfo interface { func (vm *VSchemaManager) GetCurrentSrvVschema() *vschemapb.SrvVSchema { vm.mu.Lock() defer vm.mu.Unlock() - return proto.Clone(vm.currentSrvVschema).(*vschemapb.SrvVSchema) + return vm.currentSrvVschema.CloneVT() } // UpdateVSchema propagates the updated vschema to the topo. The entry for @@ -70,6 +68,12 @@ func (vm *VSchemaManager) UpdateVSchema(ctx context.Context, ksName string, vsch } ks := vschema.Keyspaces[ksName] + + _, err = vindexes.BuildKeyspace(ks) + if err != nil { + return err + } + err = topoServer.SaveVSchema(ctx, ksName, ks) if err != nil { return err @@ -191,23 +195,29 @@ func (vm *VSchemaManager) buildAndEnhanceVSchema(v *vschemapb.SrvVSchema) *vinde func (vm *VSchemaManager) updateFromSchema(vschema *vindexes.VSchema) { for ksName, ks := range vschema.Keyspaces { m := vm.schema.Tables(ksName) + // Before we add the foreign key definitions in the tables, we need to make sure that all the tables + // are created in the Vschema, so that later when we try to find the routed tables, we don't end up + // getting dummy tables. + for tblName, tblInfo := range m { + setColumns(ks, tblName, tblInfo.Columns) + } - for tblName, columns := range m { - vTbl := ks.Tables[tblName] - if vTbl == nil { - // a table that is unknown by the vschema. we add it as a normal table - ks.Tables[tblName] = &vindexes.Table{ - Name: sqlparser.NewIdentifierCS(tblName), - Keyspace: ks.Keyspace, - Columns: columns, - ColumnListAuthoritative: true, + // Now that we have ensured that all the tables are created, we can start populating the foreign keys + // in the tables. + for tblName, tblInfo := range m { + for _, fkDef := range tblInfo.ForeignKeys { + parentTbl, err := vschema.FindRoutedTable(ksName, fkDef.ReferenceDefinition.ReferencedTable.Name.String(), topodatapb.TabletType_PRIMARY) + if err != nil { + log.Errorf("error finding parent table %s: %v", fkDef.ReferenceDefinition.ReferencedTable.Name.String(), err) + continue } - continue - } - if !vTbl.ColumnListAuthoritative { - // if we found the matching table and the vschema view of it is not authoritative, then we just update the columns of the table - vTbl.Columns = columns - vTbl.ColumnListAuthoritative = true + childTbl, err := vschema.FindRoutedTable(ksName, tblName, topodatapb.TabletType_PRIMARY) + if err != nil { + log.Errorf("error finding child table %s: %v", tblName, err) + continue + } + childTbl.ParentForeignKeys = append(childTbl.ParentForeignKeys, vindexes.NewParentFkInfo(parentTbl, fkDef)) + parentTbl.ChildForeignKeys = append(parentTbl.ChildForeignKeys, vindexes.NewChildFkInfo(childTbl, fkDef)) } } @@ -220,3 +230,23 @@ func (vm *VSchemaManager) updateFromSchema(vschema *vindexes.VSchema) { } } } + +func setColumns(ks *vindexes.KeyspaceSchema, tblName string, columns []vindexes.Column) *vindexes.Table { + vTbl := ks.Tables[tblName] + if vTbl == nil { + // a table that is unknown by the vschema. we add it as a normal table + ks.Tables[tblName] = &vindexes.Table{ + Name: sqlparser.NewIdentifierCS(tblName), + Keyspace: ks.Keyspace, + Columns: columns, + ColumnListAuthoritative: true, + } + return ks.Tables[tblName] + } + // if we found the matching table and the vschema view of it is not authoritative, then we just update the columns of the table + if !vTbl.ColumnListAuthoritative { + vTbl.Columns = columns + vTbl.ColumnListAuthoritative = true + } + return ks.Tables[tblName] +} diff --git a/go/vt/vtgate/vschema_manager_test.go b/go/vt/vtgate/vschema_manager_test.go index 7e9a9224371..6e7a9a9a2d1 100644 --- a/go/vt/vtgate/vschema_manager_test.go +++ b/go/vt/vtgate/vschema_manager_test.go @@ -29,11 +29,63 @@ func TestVSchemaUpdate(t *testing.T) { tblCol2 := &vindexes.Table{Name: sqlparser.NewIdentifierCS("tbl"), Keyspace: ks, Columns: cols2, ColumnListAuthoritative: true} tblCol2NA := &vindexes.Table{Name: sqlparser.NewIdentifierCS("tbl"), Keyspace: ks, Columns: cols2} + vindexTable_multicol_t1 := &vindexes.Table{ + Name: sqlparser.NewIdentifierCS("multicol_t1"), + Keyspace: ks, + Columns: cols2, + ColumnListAuthoritative: true, + } + vindexTable_multicol_t2 := &vindexes.Table{ + Name: sqlparser.NewIdentifierCS("multicol_t2"), + Keyspace: ks, + Columns: cols2, + ColumnListAuthoritative: true, + } + vindexTable_t1 := &vindexes.Table{ + Name: sqlparser.NewIdentifierCS("t1"), + Keyspace: ks, + Columns: cols1, + ColumnListAuthoritative: true, + } + vindexTable_t2 := &vindexes.Table{ + Name: sqlparser.NewIdentifierCS("t2"), + Keyspace: ks, + Columns: cols1, + ColumnListAuthoritative: true, + } + sqlparserCols1 := sqlparser.MakeColumns("id") + sqlparserCols2 := sqlparser.MakeColumns("uid", "name") + + vindexTable_multicol_t1.ChildForeignKeys = append(vindexTable_multicol_t1.ChildForeignKeys, vindexes.ChildFKInfo{ + Table: vindexTable_multicol_t2, + ChildColumns: sqlparserCols2, + ParentColumns: sqlparserCols2, + OnDelete: sqlparser.NoAction, + OnUpdate: sqlparser.Restrict, + }) + vindexTable_multicol_t2.ParentForeignKeys = append(vindexTable_multicol_t2.ParentForeignKeys, vindexes.ParentFKInfo{ + Table: vindexTable_multicol_t1, + ChildColumns: sqlparserCols2, + ParentColumns: sqlparserCols2, + }) + vindexTable_t1.ChildForeignKeys = append(vindexTable_t1.ChildForeignKeys, vindexes.ChildFKInfo{ + Table: vindexTable_t2, + ChildColumns: sqlparserCols1, + ParentColumns: sqlparserCols1, + OnDelete: sqlparser.SetNull, + OnUpdate: sqlparser.Cascade, + }) + vindexTable_t2.ParentForeignKeys = append(vindexTable_t2.ParentForeignKeys, vindexes.ParentFKInfo{ + Table: vindexTable_t1, + ChildColumns: sqlparserCols1, + ParentColumns: sqlparserCols1, + }) + tcases := []struct { name string srvVschema *vschemapb.SrvVSchema currentVSchema *vindexes.VSchema - schema map[string][]vindexes.Column + schema map[string]*vindexes.TableInfo expected *vindexes.VSchema }{{ name: "0 Schematracking- 1 srvVSchema", @@ -47,12 +99,12 @@ func TestVSchemaUpdate(t *testing.T) { }, { name: "1 Schematracking- 0 srvVSchema", srvVschema: makeTestSrvVSchema("ks", false, nil), - schema: map[string][]vindexes.Column{"tbl": cols1}, + schema: map[string]*vindexes.TableInfo{"tbl": {Columns: cols1}}, expected: makeTestVSchema("ks", false, map[string]*vindexes.Table{"tbl": tblCol1}), }, { name: "1 Schematracking - 1 srvVSchema (no columns) not authoritative", srvVschema: makeTestSrvVSchema("ks", false, map[string]*vschemapb.Table{"tbl": {}}), - schema: map[string][]vindexes.Column{"tbl": cols1}, + schema: map[string]*vindexes.TableInfo{"tbl": {Columns: cols1}}, // schema will override what srvSchema has. expected: makeTestVSchema("ks", false, map[string]*vindexes.Table{"tbl": tblCol1}), }, { @@ -63,7 +115,7 @@ func TestVSchemaUpdate(t *testing.T) { ColumnListAuthoritative: false, }, }), - schema: map[string][]vindexes.Column{"tbl": cols1}, + schema: map[string]*vindexes.TableInfo{"tbl": {Columns: cols1}}, // schema will override what srvSchema has. expected: makeTestVSchema("ks", false, map[string]*vindexes.Table{"tbl": tblCol1}), }, { @@ -71,7 +123,7 @@ func TestVSchemaUpdate(t *testing.T) { srvVschema: makeTestSrvVSchema("ks", false, map[string]*vschemapb.Table{"tbl": { ColumnListAuthoritative: true, }}), - schema: map[string][]vindexes.Column{"tbl": cols1}, + schema: map[string]*vindexes.TableInfo{"tbl": {Columns: cols1}}, // schema will override what srvSchema has. expected: makeTestVSchema("ks", false, map[string]*vindexes.Table{"tbl": tblNoCol}), }, { @@ -82,24 +134,127 @@ func TestVSchemaUpdate(t *testing.T) { ColumnListAuthoritative: true, }, }), - schema: map[string][]vindexes.Column{"tbl": cols1}, + schema: map[string]*vindexes.TableInfo{"tbl": {Columns: cols1}}, // schema tracker will be ignored for authoritative tables. expected: makeTestVSchema("ks", false, map[string]*vindexes.Table{"tbl": tblCol2}), }, { name: "srvVschema received as nil", - schema: map[string][]vindexes.Column{"tbl": cols1}, + schema: map[string]*vindexes.TableInfo{"tbl": {Columns: cols1}}, expected: makeTestEmptyVSchema(), }, { name: "srvVschema received as nil - have existing vschema", currentVSchema: &vindexes.VSchema{}, - schema: map[string][]vindexes.Column{"tbl": cols1}, + schema: map[string]*vindexes.TableInfo{"tbl": {Columns: cols1}}, expected: &vindexes.VSchema{}, + }, { + name: "foreign keys in schema", + currentVSchema: &vindexes.VSchema{}, + schema: map[string]*vindexes.TableInfo{ + "t1": { + Columns: cols1, + }, + "t2": { + Columns: cols1, + ForeignKeys: []*sqlparser.ForeignKeyDefinition{ + { + Source: sqlparser.MakeColumns("id"), + ReferenceDefinition: &sqlparser.ReferenceDefinition{ + ReferencedTable: sqlparser.NewTableName("t1"), + ReferencedColumns: sqlparserCols1, + OnUpdate: sqlparser.Cascade, + OnDelete: sqlparser.SetNull, + }, + }, + }, + }, + "multicol_t1": { + Columns: cols2, + }, + "multicol_t2": { + Columns: cols2, + ForeignKeys: []*sqlparser.ForeignKeyDefinition{ + { + Source: sqlparser.MakeColumns("uid", "name"), + ReferenceDefinition: &sqlparser.ReferenceDefinition{ + ReferencedTable: sqlparser.NewTableName("multicol_t1"), + ReferencedColumns: sqlparserCols2, + OnUpdate: sqlparser.Restrict, + OnDelete: sqlparser.NoAction, + }, + }, + }, + }, + }, + srvVschema: &vschemapb.SrvVSchema{ + Keyspaces: map[string]*vschemapb.Keyspace{ + "ks": { + Sharded: false, + ForeignKeyMode: vschemapb.Keyspace_managed, + Tables: map[string]*vschemapb.Table{ + "t1": { + Columns: []*vschemapb.Column{ + { + Name: "id", + Type: querypb.Type_INT64, + }, + }, + }, + "t2": { + Columns: []*vschemapb.Column{ + { + Name: "id", + Type: querypb.Type_INT64, + }, + }, + }, + "multicol_t1": { + Columns: []*vschemapb.Column{ + { + Name: "uid", + Type: querypb.Type_INT64, + }, { + Name: "name", + Type: querypb.Type_VARCHAR, + }, + }, + }, "multicol_t2": { + Columns: []*vschemapb.Column{ + { + Name: "uid", + Type: querypb.Type_INT64, + }, { + Name: "name", + Type: querypb.Type_VARCHAR, + }, + }, + }, + }, + }, + }, + }, + expected: &vindexes.VSchema{ + RoutingRules: map[string]*vindexes.RoutingRule{}, + Keyspaces: map[string]*vindexes.KeyspaceSchema{ + "ks": { + Keyspace: ks, + ForeignKeyMode: vschemapb.Keyspace_managed, + Vindexes: map[string]vindexes.Vindex{}, + Tables: map[string]*vindexes.Table{ + "t1": vindexTable_t1, + "t2": vindexTable_t2, + "multicol_t1": vindexTable_multicol_t1, + "multicol_t2": vindexTable_multicol_t2, + }, + }, + }, + }, }} vm := &VSchemaManager{} var vs *vindexes.VSchema vm.subscriber = func(vschema *vindexes.VSchema, _ *VSchemaStats) { vs = vschema + vs.ResetCreated() } for _, tcase := range tcases { t.Run(tcase.name, func(t *testing.T) { @@ -138,7 +293,7 @@ func TestRebuildVSchema(t *testing.T) { tcases := []struct { name string srvVschema *vschemapb.SrvVSchema - schema map[string][]vindexes.Column + schema map[string]*vindexes.TableInfo expected *vindexes.VSchema }{{ name: "0 Schematracking- 1 srvVSchema", @@ -152,12 +307,12 @@ func TestRebuildVSchema(t *testing.T) { }, { name: "1 Schematracking- 0 srvVSchema", srvVschema: makeTestSrvVSchema("ks", false, nil), - schema: map[string][]vindexes.Column{"tbl": cols1}, + schema: map[string]*vindexes.TableInfo{"tbl": {Columns: cols1}}, expected: makeTestVSchema("ks", false, map[string]*vindexes.Table{"tbl": tblCol1}), }, { name: "1 Schematracking - 1 srvVSchema (no columns) not authoritative", srvVschema: makeTestSrvVSchema("ks", false, map[string]*vschemapb.Table{"tbl": {}}), - schema: map[string][]vindexes.Column{"tbl": cols1}, + schema: map[string]*vindexes.TableInfo{"tbl": {Columns: cols1}}, // schema will override what srvSchema has. expected: makeTestVSchema("ks", false, map[string]*vindexes.Table{"tbl": tblCol1}), }, { @@ -168,7 +323,7 @@ func TestRebuildVSchema(t *testing.T) { ColumnListAuthoritative: false, }, }), - schema: map[string][]vindexes.Column{"tbl": cols1}, + schema: map[string]*vindexes.TableInfo{"tbl": {Columns: cols1}}, // schema will override what srvSchema has. expected: makeTestVSchema("ks", false, map[string]*vindexes.Table{"tbl": tblCol1}), }, { @@ -176,7 +331,7 @@ func TestRebuildVSchema(t *testing.T) { srvVschema: makeTestSrvVSchema("ks", false, map[string]*vschemapb.Table{"tbl": { ColumnListAuthoritative: true, }}), - schema: map[string][]vindexes.Column{"tbl": cols1}, + schema: map[string]*vindexes.TableInfo{"tbl": {Columns: cols1}}, // schema will override what srvSchema has. expected: makeTestVSchema("ks", false, map[string]*vindexes.Table{"tbl": tblNoCol}), }, { @@ -187,18 +342,19 @@ func TestRebuildVSchema(t *testing.T) { ColumnListAuthoritative: true, }, }), - schema: map[string][]vindexes.Column{"tbl": cols1}, + schema: map[string]*vindexes.TableInfo{"tbl": {Columns: cols1}}, // schema tracker will be ignored for authoritative tables. expected: makeTestVSchema("ks", false, map[string]*vindexes.Table{"tbl": tblCol2}), }, { name: "srvVschema received as nil", - schema: map[string][]vindexes.Column{"tbl": cols1}, + schema: map[string]*vindexes.TableInfo{"tbl": {Columns: cols1}}, }} vm := &VSchemaManager{} var vs *vindexes.VSchema vm.subscriber = func(vschema *vindexes.VSchema, _ *VSchemaStats) { vs = vschema + vs.ResetCreated() } for _, tcase := range tcases { t.Run(tcase.name, func(t *testing.T) { @@ -222,11 +378,14 @@ func makeTestVSchema(ks string, sharded bool, tbls map[string]*vindexes.Table) * Name: ks, Sharded: sharded, }, - Tables: tbls, - Vindexes: map[string]vindexes.Vindex{}, + // Default foreign key mode + ForeignKeyMode: vschemapb.Keyspace_unmanaged, + Tables: tbls, + Vindexes: map[string]vindexes.Vindex{}, } vs := makeTestEmptyVSchema() vs.Keyspaces[ks] = keyspaceSchema + vs.ResetCreated() return vs } @@ -241,6 +400,8 @@ func makeTestSrvVSchema(ks string, sharded bool, tbls map[string]*vschemapb.Tabl keyspaceSchema := &vschemapb.Keyspace{ Sharded: sharded, Tables: tbls, + // Default foreign key mode + ForeignKeyMode: vschemapb.Keyspace_unmanaged, } return &vschemapb.SrvVSchema{ Keyspaces: map[string]*vschemapb.Keyspace{ks: keyspaceSchema}, @@ -248,14 +409,14 @@ func makeTestSrvVSchema(ks string, sharded bool, tbls map[string]*vschemapb.Tabl } type fakeSchema struct { - t map[string][]vindexes.Column + t map[string]*vindexes.TableInfo } -func (f *fakeSchema) Tables(string) map[string][]vindexes.Column { +func (f *fakeSchema) Tables(string) map[string]*vindexes.TableInfo { return f.t } -func (f *fakeSchema) Views(ks string) map[string]sqlparser.SelectStatement { +func (f *fakeSchema) Views(string) map[string]sqlparser.SelectStatement { return nil } diff --git a/go/vt/vtgate/vschema_stats.go b/go/vt/vtgate/vschema_stats.go index ce234fdba9a..d4920d7486f 100644 --- a/go/vt/vtgate/vschema_stats.go +++ b/go/vt/vtgate/vschema_stats.go @@ -31,11 +31,12 @@ type VSchemaStats struct { // VSchemaKeyspaceStats contains a rollup of the VSchema stats for a keyspace. // It is used to display a table with the information in the status page. type VSchemaKeyspaceStats struct { - Keyspace string - Sharded bool - TableCount int - VindexCount int - Error string + Keyspace string + Sharded bool + TableCount int + VindexCount int + VindexUnknownParamsCount int + Error string } // NewVSchemaStats returns a new VSchemaStats from a VSchema. @@ -54,6 +55,11 @@ func NewVSchemaStats(vschema *vindexes.VSchema, errorMessage string) *VSchemaSta for _, t := range k.Tables { s.VindexCount += len(t.ColumnVindexes) + len(t.Ordered) + len(t.Owned) } + for _, vdx := range k.Vindexes { + if pv, ok := vdx.(vindexes.ParamValidating); ok { + s.VindexUnknownParamsCount += len(pv.UnknownParams()) + } + } } if k.Error != nil { s.Error = k.Error.Error() @@ -95,6 +101,7 @@ const ( + {{range $i, $ks := .Keyspaces}} @@ -102,6 +109,7 @@ const ( + {{end}}
Sharded Table Count Vindex CountVindex Unknown Parameters Count Error
{{if $ks.Sharded}}Yes{{else}}No{{end}} {{$ks.TableCount}} {{$ks.VindexCount}}{{$ks.VindexUnknownParamsCount}} {{$ks.Error}}
diff --git a/go/vt/vtgate/vstream_manager.go b/go/vt/vtgate/vstream_manager.go index 154f23f3941..38706a8fbee 100644 --- a/go/vt/vtgate/vstream_manager.go +++ b/go/vt/vtgate/vstream_manager.go @@ -31,13 +31,10 @@ import ( "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/topo" - vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" - - "google.golang.org/protobuf/proto" - "vitess.io/vitess/go/vt/log" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/srvtopo" "vitess.io/vitess/go/vt/vterrors" @@ -580,12 +577,12 @@ func (vs *vstream) streamFromTablet(ctx context.Context, sgtid *binlogdatapb.Sha // Update table names and send. // If we're streaming from multiple keyspaces, this will disambiguate // duplicate table names. - ev := proto.Clone(event).(*binlogdatapb.VEvent) + ev := event.CloneVT() ev.FieldEvent.TableName = sgtid.Keyspace + "." + ev.FieldEvent.TableName sendevents = append(sendevents, ev) case binlogdatapb.VEventType_ROW: // Update table names and send. - ev := proto.Clone(event).(*binlogdatapb.VEvent) + ev := event.CloneVT() ev.RowEvent.TableName = sgtid.Keyspace + "." + ev.RowEvent.TableName sendevents = append(sendevents, ev) case binlogdatapb.VEventType_COMMIT, binlogdatapb.VEventType_DDL, binlogdatapb.VEventType_OTHER: @@ -703,7 +700,7 @@ func (vs *vstream) sendAll(ctx context.Context, sgtid *binlogdatapb.ShardGtid, e sgtid.Gtid = event.Gtid events[j] = &binlogdatapb.VEvent{ Type: binlogdatapb.VEventType_VGTID, - Vgtid: proto.Clone(vs.vgtid).(*binlogdatapb.VGtid), + Vgtid: vs.vgtid.CloneVT(), Keyspace: event.Keyspace, Shard: event.Shard, } @@ -732,7 +729,7 @@ func (vs *vstream) sendAll(ctx context.Context, sgtid *binlogdatapb.ShardGtid, e } events[j] = &binlogdatapb.VEvent{ Type: binlogdatapb.VEventType_VGTID, - Vgtid: proto.Clone(vs.vgtid).(*binlogdatapb.VGtid), + Vgtid: vs.vgtid.CloneVT(), Keyspace: event.Keyspace, Shard: event.Shard, } diff --git a/go/vt/vtgate/vstream_manager_test.go b/go/vt/vtgate/vstream_manager_test.go index 926af63e9ac..3018791964f 100644 --- a/go/vt/vtgate/vstream_manager_test.go +++ b/go/vt/vtgate/vstream_manager_test.go @@ -25,24 +25,24 @@ import ( "testing" "time" - "vitess.io/vitess/go/vt/topo" - - vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/vterrors" - - "vitess.io/vitess/go/stats" - "vitess.io/vitess/go/vt/vttablet/sandboxconn" + "google.golang.org/protobuf/proto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/discovery" - "vitess.io/vitess/go/vt/proto/binlogdata" + "vitess.io/vitess/go/vt/srvtopo" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vttablet/sandboxconn" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/srvtopo" + vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + + "vitess.io/vitess/go/test/utils" ) var mu sync.Mutex @@ -88,13 +88,13 @@ func TestVStreamSkew(t *testing.T) { _ = createSandbox(ks) hc := discovery.NewFakeHealthCheck(nil) st := getSandboxTopo(ctx, cell, ks, []string{"-20", "20-40"}) - vsm := newTestVStreamManager(hc, st, cell) + vsm := newTestVStreamManager(ctx, hc, st, cell) vgtid := &binlogdatapb.VGtid{ShardGtids: []*binlogdatapb.ShardGtid{}} want := int64(0) var sbc0, sbc1 *sandboxconn.SandboxConn if tcase.shard0idx != 0 { sbc0 = hc.AddTestTablet(cell, "1.1.1.1", 1001, ks, "-20", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "-20", sbc0.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "-20", sbc0.Tablet()) sbc0.VStreamCh = make(chan *binlogdatapb.VEvent) want += 2 * tcase.numEventsPerShard vgtid.ShardGtids = append(vgtid.ShardGtids, &binlogdatapb.ShardGtid{Keyspace: ks, Gtid: "pos", Shard: "-20"}) @@ -102,7 +102,7 @@ func TestVStreamSkew(t *testing.T) { } if tcase.shard1idx != 0 { sbc1 = hc.AddTestTablet(cell, "1.1.1.1", 1002, ks, "20-40", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "20-40", sbc1.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "20-40", sbc1.Tablet()) sbc1.VStreamCh = make(chan *binlogdatapb.VEvent) want += 2 * tcase.numEventsPerShard vgtid.ShardGtids = append(vgtid.ShardGtids, &binlogdatapb.ShardGtid{Keyspace: ks, Gtid: "pos", Shard: "20-40"}) @@ -134,9 +134,9 @@ func TestVStreamEvents(t *testing.T) { hc := discovery.NewFakeHealthCheck(nil) st := getSandboxTopo(ctx, cell, ks, []string{"-20"}) - vsm := newTestVStreamManager(hc, st, cell) + vsm := newTestVStreamManager(ctx, hc, st, cell) sbc0 := hc.AddTestTablet(cell, "1.1.1.1", 1001, ks, "-20", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "-20", sbc0.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "-20", sbc0.Tablet()) send1 := []*binlogdatapb.VEvent{ {Type: binlogdatapb.VEventType_GTID, Gtid: "gtid01"}, @@ -211,11 +211,11 @@ func TestVStreamChunks(t *testing.T) { _ = createSandbox(ks) hc := discovery.NewFakeHealthCheck(nil) st := getSandboxTopo(ctx, cell, ks, []string{"-20", "20-40"}) - vsm := newTestVStreamManager(hc, st, cell) + vsm := newTestVStreamManager(ctx, hc, st, cell) sbc0 := hc.AddTestTablet("aa", "1.1.1.1", 1001, ks, "-20", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "-20", sbc0.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "-20", sbc0.Tablet()) sbc1 := hc.AddTestTablet("aa", "1.1.1.1", 1002, ks, "20-40", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "20-40", sbc1.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "20-40", sbc1.Tablet()) for i := 0; i < 100; i++ { sbc0.AddVStreamEvents([]*binlogdatapb.VEvent{{Type: binlogdatapb.VEventType_DDL}}, nil) @@ -279,11 +279,11 @@ func TestVStreamMulti(t *testing.T) { _ = createSandbox(ks) hc := discovery.NewFakeHealthCheck(nil) st := getSandboxTopo(ctx, cell, ks, []string{"-20", "20-40"}) - vsm := newTestVStreamManager(hc, st, "aa") + vsm := newTestVStreamManager(ctx, hc, st, "aa") sbc0 := hc.AddTestTablet(cell, "1.1.1.1", 1001, ks, "-20", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "-20", sbc0.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "-20", sbc0.Tablet()) sbc1 := hc.AddTestTablet(cell, "1.1.1.1", 1002, ks, "20-40", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "20-40", sbc1.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "20-40", sbc1.Tablet()) send0 := []*binlogdatapb.VEvent{ {Type: binlogdatapb.VEventType_GTID, Gtid: "gtid01"}, @@ -341,13 +341,13 @@ func TestVStreamsCreatedAndLagMetrics(t *testing.T) { _ = createSandbox(ks) hc := discovery.NewFakeHealthCheck(nil) st := getSandboxTopo(ctx, cell, ks, []string{"-20", "20-40"}) - vsm := newTestVStreamManager(hc, st, cell) + vsm := newTestVStreamManager(ctx, hc, st, cell) vsm.vstreamsCreated.ResetAll() vsm.vstreamsLag.ResetAll() sbc0 := hc.AddTestTablet(cell, "1.1.1.1", 1001, ks, "-20", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "-20", sbc0.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "-20", sbc0.Tablet()) sbc1 := hc.AddTestTablet(cell, "1.1.1.1", 1002, ks, "20-40", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "20-40", sbc1.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "20-40", sbc1.Tablet()) send0 := []*binlogdatapb.VEvent{ {Type: binlogdatapb.VEventType_GTID, Gtid: "gtid01"}, @@ -396,9 +396,9 @@ func TestVStreamRetry(t *testing.T) { hc := discovery.NewFakeHealthCheck(nil) st := getSandboxTopo(ctx, cell, ks, []string{"-20"}) - vsm := newTestVStreamManager(hc, st, "aa") + vsm := newTestVStreamManager(ctx, hc, st, "aa") sbc0 := hc.AddTestTablet(cell, "1.1.1.1", 1001, ks, "-20", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "-20", sbc0.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "-20", sbc0.Tablet()) commit := []*binlogdatapb.VEvent{ {Type: binlogdatapb.VEventType_COMMIT}, } @@ -436,9 +436,9 @@ func TestVStreamShouldNotSendSourceHeartbeats(t *testing.T) { _ = createSandbox(ks) hc := discovery.NewFakeHealthCheck(nil) st := getSandboxTopo(ctx, cell, ks, []string{"-20"}) - vsm := newTestVStreamManager(hc, st, cell) + vsm := newTestVStreamManager(ctx, hc, st, cell) sbc0 := hc.AddTestTablet(cell, "1.1.1.1", 1001, ks, "-20", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "-20", sbc0.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "-20", sbc0.Tablet()) send0 := []*binlogdatapb.VEvent{ {Type: binlogdatapb.VEventType_HEARTBEAT}, @@ -486,13 +486,13 @@ func TestVStreamJournalOneToMany(t *testing.T) { _ = createSandbox(ks) hc := discovery.NewFakeHealthCheck(nil) st := getSandboxTopo(ctx, cell, ks, []string{"-20", "-10", "10-20"}) - vsm := newTestVStreamManager(hc, st, "aa") + vsm := newTestVStreamManager(ctx, hc, st, "aa") sbc0 := hc.AddTestTablet(cell, "1.1.1.1", 1001, ks, "-20", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "-20", sbc0.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "-20", sbc0.Tablet()) sbc1 := hc.AddTestTablet(cell, "1.1.1.1", 1002, ks, "-10", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "-10", sbc1.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "-10", sbc1.Tablet()) sbc2 := hc.AddTestTablet(cell, "1.1.1.1", 1003, ks, "10-20", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "10-20", sbc2.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "10-20", sbc2.Tablet()) send1 := []*binlogdatapb.VEvent{ {Type: binlogdatapb.VEventType_GTID, Gtid: "gtid01"}, @@ -599,13 +599,13 @@ func TestVStreamJournalManyToOne(t *testing.T) { _ = createSandbox(ks) hc := discovery.NewFakeHealthCheck(nil) st := getSandboxTopo(ctx, cell, ks, []string{"-20", "-10", "10-20"}) - vsm := newTestVStreamManager(hc, st, cell) + vsm := newTestVStreamManager(ctx, hc, st, cell) sbc0 := hc.AddTestTablet(cell, "1.1.1.1", 1001, ks, "-20", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "-20", sbc0.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "-20", sbc0.Tablet()) sbc1 := hc.AddTestTablet(cell, "1.1.1.1", 1002, ks, "-10", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "-10", sbc1.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "-10", sbc1.Tablet()) sbc2 := hc.AddTestTablet(cell, "1.1.1.1", 1003, ks, "10-20", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "10-20", sbc2.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "10-20", sbc2.Tablet()) send3 := []*binlogdatapb.VEvent{ {Type: binlogdatapb.VEventType_GTID, Gtid: "gtid03"}, @@ -716,9 +716,9 @@ func TestVStreamJournalNoMatch(t *testing.T) { _ = createSandbox(ks) hc := discovery.NewFakeHealthCheck(nil) st := getSandboxTopo(ctx, cell, ks, []string{"-20"}) - vsm := newTestVStreamManager(hc, st, "aa") + vsm := newTestVStreamManager(ctx, hc, st, "aa") sbc0 := hc.AddTestTablet("aa", "1.1.1.1", 1001, ks, "-20", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "-20", sbc0.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "-20", sbc0.Tablet()) send1 := []*binlogdatapb.VEvent{ {Type: binlogdatapb.VEventType_GTID, Gtid: "gtid01"}, @@ -748,7 +748,7 @@ func TestVStreamJournalNoMatch(t *testing.T) { {Type: binlogdatapb.VEventType_GTID, Gtid: "jn1"}, {Type: binlogdatapb.VEventType_COMMIT}, } - wantjn1 := &binlogdata.VStreamResponse{Events: []*binlogdatapb.VEvent{ + wantjn1 := &binlogdatapb.VStreamResponse{Events: []*binlogdatapb.VEvent{ {Type: binlogdatapb.VEventType_VGTID, Vgtid: &binlogdatapb.VGtid{ ShardGtids: []*binlogdatapb.ShardGtid{{ Keyspace: ks, @@ -796,7 +796,7 @@ func TestVStreamJournalNoMatch(t *testing.T) { {Type: binlogdatapb.VEventType_GTID, Gtid: "jn2"}, {Type: binlogdatapb.VEventType_COMMIT}, } - wantjn2 := &binlogdata.VStreamResponse{Events: []*binlogdatapb.VEvent{ + wantjn2 := &binlogdatapb.VStreamResponse{Events: []*binlogdatapb.VEvent{ {Type: binlogdatapb.VEventType_VGTID, Vgtid: &binlogdatapb.VGtid{ ShardGtids: []*binlogdatapb.ShardGtid{{ Keyspace: ks, @@ -845,11 +845,11 @@ func TestVStreamJournalPartialMatch(t *testing.T) { _ = createSandbox(ks) hc := discovery.NewFakeHealthCheck(nil) st := getSandboxTopo(ctx, cell, ks, []string{"-20", "-10", "10-20"}) - vsm := newTestVStreamManager(hc, st, "aa") + vsm := newTestVStreamManager(ctx, hc, st, "aa") sbc1 := hc.AddTestTablet("aa", "1.1.1.1", 1002, ks, "-10", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "-10", sbc1.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "-10", sbc1.Tablet()) sbc2 := hc.AddTestTablet("aa", "1.1.1.1", 1003, ks, "10-20", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "10-20", sbc2.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "10-20", sbc2.Tablet()) send := []*binlogdatapb.VEvent{ {Type: binlogdatapb.VEventType_JOURNAL, Journal: &binlogdatapb.Journal{ @@ -922,10 +922,13 @@ func TestVStreamJournalPartialMatch(t *testing.T) { } func TestResolveVStreamParams(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + name := "TestVStream" _ = createSandbox(name) hc := discovery.NewFakeHealthCheck(nil) - vsm := newTestVStreamManager(hc, newSandboxForCells([]string{"aa"}), "aa") + vsm := newTestVStreamManager(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") testcases := []struct { input *binlogdatapb.VGtid output *binlogdatapb.VGtid @@ -1133,14 +1136,16 @@ func TestResolveVStreamParams(t *testing.T) { } func TestVStreamIdleHeartbeat(t *testing.T) { + ctx := utils.LeakCheckContext(t) + cell := "aa" ks := "TestVStream" _ = createSandbox(ks) hc := discovery.NewFakeHealthCheck(nil) st := getSandboxTopo(ctx, cell, ks, []string{"-20"}) - vsm := newTestVStreamManager(hc, st, cell) + vsm := newTestVStreamManager(ctx, hc, st, cell) sbc0 := hc.AddTestTablet("aa", "1.1.1.1", 1001, ks, "-20", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "-20", sbc0.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "-20", sbc0.Tablet()) vgtid := &binlogdatapb.VGtid{ ShardGtids: []*binlogdatapb.ShardGtid{{ Keyspace: ks, @@ -1164,7 +1169,7 @@ func TestVStreamIdleHeartbeat(t *testing.T) { t.Run(tcase.name, func(t *testing.T) { var mu sync.Mutex var heartbeatCount int - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(ctx) go func() { vsm.VStream(ctx, topodatapb.TabletType_PRIMARY, vgtid, nil, &vtgatepb.VStreamFlags{HeartbeatInterval: tcase.heartbeatInterval}, func(events []*binlogdatapb.VEvent) error { @@ -1187,13 +1192,14 @@ func TestVStreamIdleHeartbeat(t *testing.T) { } } -func newTestVStreamManager(hc discovery.HealthCheck, serv srvtopo.Server, cell string) *vstreamManager { - gw := NewTabletGateway(context.Background(), hc, serv, cell) +func newTestVStreamManager(ctx context.Context, hc discovery.HealthCheck, serv srvtopo.Server, cell string) *vstreamManager { + gw := NewTabletGateway(ctx, hc, serv, cell) srvResolver := srvtopo.NewResolver(serv, gw, cell) return newVStreamManager(srvResolver, serv, cell) } func startVStream(ctx context.Context, t *testing.T, vsm *vstreamManager, vgtid *binlogdatapb.VGtid, flags *vtgatepb.VStreamFlags) <-chan *binlogdatapb.VStreamResponse { + t.Helper() if flags == nil { flags = &vtgatepb.VStreamFlags{} } @@ -1211,7 +1217,7 @@ func verifyEvents(t *testing.T, ch <-chan *binlogdatapb.VStreamResponse, wants . t.Helper() for i, want := range wants { val := <-ch - got := proto.Clone(val).(*binlogdatapb.VStreamResponse) + got := val.CloneVT() require.NotNil(t, got) for _, event := range got.Events { event.Timestamp = 0 @@ -1250,7 +1256,7 @@ func getVEvents(keyspace, shard string, count, idx int64) []*binlogdatapb.VEvent } func getSandboxTopo(ctx context.Context, cell string, keyspace string, shards []string) *sandboxTopo { - st := newSandboxForCells([]string{cell}) + st := newSandboxForCells(ctx, []string{cell}) ts := st.topoServer ts.CreateCellInfo(ctx, cell, &topodatapb.CellInfo{}) ts.CreateKeyspace(ctx, keyspace, &topodatapb.Keyspace{}) @@ -1260,7 +1266,7 @@ func getSandboxTopo(ctx context.Context, cell string, keyspace string, shards [] return st } -func addTabletToSandboxTopo(t *testing.T, st *sandboxTopo, ks, shard string, tablet *topodatapb.Tablet) { +func addTabletToSandboxTopo(t *testing.T, ctx context.Context, st *sandboxTopo, ks, shard string, tablet *topodatapb.Tablet) { _, err := st.topoServer.UpdateShardFields(ctx, ks, shard, func(si *topo.ShardInfo) error { si.PrimaryAlias = tablet.Alias return nil diff --git a/go/vt/vtgate/vtgate.go b/go/vt/vtgate/vtgate.go index f19368013ec..72b60110ba8 100644 --- a/go/vt/vtgate/vtgate.go +++ b/go/vt/vtgate/vtgate.go @@ -33,7 +33,6 @@ import ( "github.com/spf13/pflag" "vitess.io/vitess/go/acl" - "vitess.io/vitess/go/cache" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/tb" @@ -41,6 +40,11 @@ import ( "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/sidecardb" @@ -49,14 +53,8 @@ import ( "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/vt/vtgate/vtgateservice" - - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - querypb "vitess.io/vitess/go/vt/proto/query" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" vtschema "vitess.io/vitess/go/vt/vtgate/schema" + "vitess.io/vitess/go/vt/vtgate/vtgateservice" ) var ( @@ -68,9 +66,7 @@ var ( truncateErrorLen int // plan cache related flag - queryPlanCacheSize = cache.DefaultConfig.MaxEntries - queryPlanCacheMemory = cache.DefaultConfig.MaxMemoryUsage - queryPlanCacheLFU bool + queryPlanCacheMemory int64 = 32 * 1024 * 1024 // 32mb maxMemoryRows = 300000 warnMemoryRows = 30000 @@ -102,8 +98,8 @@ var ( // vtgate schema tracking flags enableSchemaChangeSignal = true - schemaChangeUser string - queryTimeout int + + queryTimeout int // vtgate views flags enableViews bool @@ -114,6 +110,9 @@ var ( queryLogBufferSize = 10 messageStreamGracePeriod = 30 * time.Second + + // allowKillStmt to allow execution of kill statement. + allowKillStmt bool ) func registerFlags(fs *pflag.FlagSet) { @@ -122,9 +121,7 @@ func registerFlags(fs *pflag.FlagSet) { fs.BoolVar(&terseErrors, "vtgate-config-terse-errors", terseErrors, "prevent bind vars from escaping in returned errors") fs.IntVar(&truncateErrorLen, "truncate-error-len", truncateErrorLen, "truncate errors sent to client if they are longer than this value (0 means do not truncate)") fs.IntVar(&streamBufferSize, "stream_buffer_size", streamBufferSize, "the number of bytes sent from vtgate for each stream call. It's recommended to keep this value in sync with vttablet's query-server-config-stream-buffer-size.") - fs.Int64Var(&queryPlanCacheSize, "gate_query_cache_size", queryPlanCacheSize, "gate server query cache size, maximum number of queries to be cached. vtgate analyzes every incoming query and generate a query plan, these plans are being cached in a cache. This config controls the expected amount of unique entries in the cache.") fs.Int64Var(&queryPlanCacheMemory, "gate_query_cache_memory", queryPlanCacheMemory, "gate server query cache size in bytes, maximum amount of memory to be cached. vtgate analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache.") - fs.BoolVar(&queryPlanCacheLFU, "gate_query_cache_lfu", cache.DefaultConfig.LFU, "gate server cache algorithm. when set to true, a new cache algorithm based on a TinyLFU admission policy will be used to improve cache behavior and prevent pollution from sparse queries") fs.IntVar(&maxMemoryRows, "max_memory_rows", maxMemoryRows, "Maximum number of rows that will be held in memory for intermediate results as well as the final result.") fs.IntVar(&warnMemoryRows, "warn_memory_rows", warnMemoryRows, "Warning threshold for in-memory results. A row count higher than this amount will cause the VtGateWarnings.ResultsExceeded counter to be incremented.") fs.StringVar(&defaultDDLStrategy, "ddl_strategy", defaultDDLStrategy, "Set default strategy for DDL statements. Override with @@ddl_strategy session variable") @@ -143,12 +140,21 @@ func registerFlags(fs *pflag.FlagSet) { fs.BoolVar(&enableOnlineDDL, "enable_online_ddl", enableOnlineDDL, "Allow users to submit, review and control Online DDL") fs.BoolVar(&enableDirectDDL, "enable_direct_ddl", enableDirectDDL, "Allow users to submit direct DDL statements") fs.BoolVar(&enableSchemaChangeSignal, "schema_change_signal", enableSchemaChangeSignal, "Enable the schema tracker; requires queryserver-config-schema-change-signal to be enabled on the underlying vttablets for this to work") - fs.StringVar(&schemaChangeUser, "schema_change_signal_user", schemaChangeUser, "User to be used to send down query to vttablet to retrieve schema changes") fs.IntVar(&queryTimeout, "query-timeout", queryTimeout, "Sets the default query timeout (in ms). Can be overridden by session variable (query_timeout) or comment directive (QUERY_TIMEOUT_MS)") fs.StringVar(&queryLogToFile, "log_queries_to_file", queryLogToFile, "Enable query logging to the specified file") fs.IntVar(&queryLogBufferSize, "querylog-buffer-size", queryLogBufferSize, "Maximum number of buffered query logs before throttling log output") fs.DurationVar(&messageStreamGracePeriod, "message_stream_grace_period", messageStreamGracePeriod, "the amount of time to give for a vttablet to resume if it ends a message stream, usually because of a reparent.") fs.BoolVar(&enableViews, "enable-views", enableViews, "Enable views support in vtgate.") + fs.BoolVar(&allowKillStmt, "allow-kill-statement", allowKillStmt, "Allows the execution of kill statement") + + _ = fs.String("schema_change_signal_user", "", "User to be used to send down query to vttablet to retrieve schema changes") + _ = fs.MarkDeprecated("schema_change_signal_user", "schema tracking uses an internal api and does not require a user to be specified") + + fs.Int64("gate_query_cache_size", 0, "gate server query cache size, maximum number of queries to be cached. vtgate analyzes every incoming query and generate a query plan, these plans are being cached in a cache. This config controls the expected amount of unique entries in the cache.") + _ = fs.MarkDeprecated("gate_query_cache_size", "`--gate_query_cache_size` is deprecated and will be removed in `v19.0`. This option only applied to LRU caches, which are now unsupported.") + + fs.Bool("gate_query_cache_lfu", false, "gate server cache algorithm. when set to true, a new cache algorithm based on a TinyLFU admission policy will be used to improve cache behavior and prevent pollution from sparse queries") + _ = fs.MarkDeprecated("gate_query_cache_lfu", "`--gate_query_cache_lfu` is deprecated and will be removed in `v19.0`. The query cache always uses a LFU implementation now.") } func init() { servenv.OnParseFor("vtgate", registerFlags) @@ -175,8 +181,6 @@ func getTxMode() vtgatepb.TransactionMode { } var ( - rpcVTGate *VTGate - // vschemaCounters needs to be initialized before planner to // catch the initial load stats. vschemaCounters = stats.NewCountersWithSingleLabel("VtgateVSchemaCounts", "Vtgate vschema counts", "changes") @@ -188,6 +192,23 @@ var ( vstreamSkewDelayCount = stats.NewCounter("VStreamEventsDelayedBySkewAlignment", "Number of events that had to wait because the skew across shards was too high") + + vindexUnknownParams = stats.NewGauge("VindexUnknownParameters", "Number of parameterss unrecognized by Vindexes") + + timings = stats.NewMultiTimings( + "VtgateApi", + "VtgateApi timings", + []string{"Operation", "Keyspace", "DbType"}) + + rowsReturned = stats.NewCountersWithMultiLabels( + "VtgateApiRowsReturned", + "Rows returned through the VTgate API", + []string{"Operation", "Keyspace", "DbType"}) + + rowsAffected = stats.NewCountersWithMultiLabels( + "VtgateApiRowsAffected", + "Rows affected by a write (DML) operation through the VTgate API", + []string{"Operation", "Keyspace", "DbType"}) ) // VTGate is the rpc interface to vtgate. Only one instance @@ -229,17 +250,13 @@ func Init( tabletTypesToWait []topodatapb.TabletType, pv plancontext.PlannerVersion, ) *VTGate { - if rpcVTGate != nil { - log.Fatalf("VTGate already initialized") - } - // Build objects from low to high level. // Start with the gateway. If we can't reach the topology service, // we can't go on much further, so we log.Fatal out. // TabletGateway can create it's own healthcheck gw := NewTabletGateway(ctx, hc, serv, cell) gw.RegisterStats() - if err := gw.WaitForTablets(tabletTypesToWait); err != nil { + if err := gw.WaitForTablets(ctx, tabletTypesToWait); err != nil { log.Fatalf("tabletGateway.WaitForTablets failed: %v", err) } @@ -285,16 +302,12 @@ func Init( var si SchemaInfo // default nil var st *vtschema.Tracker if enableSchemaChangeSignal { - st = vtschema.NewTracker(gw.hc.Subscribe(), schemaChangeUser, enableViews) + st = vtschema.NewTracker(gw.hc.Subscribe(), enableViews) addKeyspacesToTracker(ctx, srvResolver, st, gw) si = st } - cacheCfg := &cache.Config{ - MaxEntries: queryPlanCacheSize, - MaxMemoryUsage: queryPlanCacheMemory, - LFU: queryPlanCacheLFU, - } + plans := DefaultPlanCache() executor := NewExecutor( ctx, @@ -304,12 +317,16 @@ func Init( normalizeQueries, warnShardedOnly, streamBufferSize, - cacheCfg, + plans, si, noScatter, pv, ) + if err := executor.defaultQueryLogger(); err != nil { + log.Fatalf("error initializing query logger: %v", err) + } + // connect the schema tracker with the vschema manager if enableSchemaChangeSignal { st.RegisterSignalReceiver(executor.vm.Rebuild) @@ -317,33 +334,10 @@ func Init( // TODO: call serv.WatchSrvVSchema here - rpcVTGate = &VTGate{ - executor: executor, - resolver: resolver, - vsm: vsm, - txConn: tc, - gw: gw, - timings: stats.NewMultiTimings( - "VtgateApi", - "VtgateApi timings", - []string{"Operation", "Keyspace", "DbType"}), - rowsReturned: stats.NewCountersWithMultiLabels( - "VtgateApiRowsReturned", - "Rows returned through the VTgate API", - []string{"Operation", "Keyspace", "DbType"}), - rowsAffected: stats.NewCountersWithMultiLabels( - "VtgateApiRowsAffected", - "Rows affected by a write (DML) operation through the VTgate API", - []string{"Operation", "Keyspace", "DbType"}), - - logExecute: logutil.NewThrottledLogger("Execute", 5*time.Second), - logPrepare: logutil.NewThrottledLogger("Prepare", 5*time.Second), - logStreamExecute: logutil.NewThrottledLogger("StreamExecute", 5*time.Second), - } - - _ = stats.NewRates("QPSByOperation", stats.CounterForDimension(rpcVTGate.timings, "Operation"), 15, 1*time.Minute) - _ = stats.NewRates("QPSByKeyspace", stats.CounterForDimension(rpcVTGate.timings, "Keyspace"), 15, 1*time.Minute) - _ = stats.NewRates("QPSByDbType", stats.CounterForDimension(rpcVTGate.timings, "DbType"), 15*60/5, 5*time.Second) + vtgateInst := newVTGate(executor, resolver, vsm, tc, gw) + _ = stats.NewRates("QPSByOperation", stats.CounterForDimension(vtgateInst.timings, "Operation"), 15, 1*time.Minute) + _ = stats.NewRates("QPSByKeyspace", stats.CounterForDimension(vtgateInst.timings, "Keyspace"), 15, 1*time.Minute) + _ = stats.NewRates("QPSByDbType", stats.CounterForDimension(vtgateInst.timings, "DbType"), 15*60/5, 5*time.Second) _ = stats.NewRates("ErrorsByOperation", stats.CounterForDimension(errorCounts, "Operation"), 15, 1*time.Minute) _ = stats.NewRates("ErrorsByKeyspace", stats.CounterForDimension(errorCounts, "Keyspace"), 15, 1*time.Minute) @@ -352,26 +346,25 @@ func Init( servenv.OnRun(func() { for _, f := range RegisterVTGates { - f(rpcVTGate) + f(vtgateInst) } if st != nil && enableSchemaChangeSignal { st.Start() } + srv := initMySQLProtocol(vtgateInst) + servenv.OnTermSync(srv.shutdownMysqlProtocolAndDrain) + servenv.OnClose(srv.rollbackAtShutdown) }) servenv.OnTerm(func() { if st != nil && enableSchemaChangeSignal { st.Stop() } }) - rpcVTGate.registerDebugHealthHandler() - rpcVTGate.registerDebugEnvHandler() - err = initQueryLogger(rpcVTGate) - if err != nil { - log.Fatalf("error initializing query logger: %v", err) - } + vtgateInst.registerDebugHealthHandler() + vtgateInst.registerDebugEnvHandler() initAPI(gw.hc) - return rpcVTGate + return vtgateInst } func addKeyspacesToTracker(ctx context.Context, srvResolver *srvtopo.Resolver, st *vtschema.Tracker, gw *TabletGateway) { @@ -444,8 +437,8 @@ func (vtg *VTGate) Gateway() *TabletGateway { return vtg.gw } -// Execute executes a non-streaming query. This is a V3 function. -func (vtg *VTGate) Execute(ctx context.Context, c *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (newSession *vtgatepb.Session, qr *sqltypes.Result, err error) { +// Execute executes a non-streaming query. +func (vtg *VTGate) Execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, c *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (newSession *vtgatepb.Session, qr *sqltypes.Result, err error) { // In this context, we don't care if we can't fully parse destination destKeyspace, destTabletType, _, _ := vtg.executor.ParseDestinationTarget(session.TargetString) statsKey := []string{"Execute", destKeyspace, topoproto.TabletTypeLString(destTabletType)} @@ -455,7 +448,7 @@ func (vtg *VTGate) Execute(ctx context.Context, c *mysql.Conn, session *vtgatepb err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v", bvErr) } else { safeSession := NewSafeSession(session) - qr, err = vtg.executor.Execute(ctx, c, "Execute", safeSession, sql, bindVariables) + qr, err = vtg.executor.Execute(ctx, mysqlCtx, c, "Execute", safeSession, sql, bindVariables) safeSession.RemoveInternalSavepoint() } if err == nil { @@ -473,7 +466,7 @@ func (vtg *VTGate) Execute(ctx context.Context, c *mysql.Conn, session *vtgatepb return session, nil, err } -// ExecuteBatch executes a batch of queries. This is a V3 function. +// ExecuteBatch executes a batch of queries. func (vtg *VTGate) ExecuteBatch(ctx context.Context, c *mysql.Conn, session *vtgatepb.Session, sqlList []string, bindVariablesList []map[string]*querypb.BindVariable) (*vtgatepb.Session, []sqltypes.QueryResponse, error) { // In this context, we don't care if we can't fully parse destination destKeyspace, destTabletType, _, _ := vtg.executor.ParseDestinationTarget(session.TargetString) @@ -492,7 +485,7 @@ func (vtg *VTGate) ExecuteBatch(ctx context.Context, c *mysql.Conn, session *vtg if len(bindVariablesList) != 0 { bv = bindVariablesList[i] } - session, qrl[i].QueryResult, qrl[i].QueryError = vtg.Execute(ctx, c, session, sql, bv) + session, qrl[i].QueryResult, qrl[i].QueryError = vtg.Execute(ctx, nil, c, session, sql, bv) if qr := qrl[i].QueryResult; qr != nil { vtg.rowsReturned.Add(statsKey, int64(len(qr.Rows))) vtg.rowsAffected.Add(statsKey, int64(qr.RowsAffected)) @@ -501,10 +494,9 @@ func (vtg *VTGate) ExecuteBatch(ctx context.Context, c *mysql.Conn, session *vtg return session, qrl, nil } -// StreamExecute executes a streaming query. This is a V3 function. -// Note we guarantee the callback will not be called concurrently -// by multiple go routines. -func (vtg *VTGate) StreamExecute(ctx context.Context, c *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) { +// StreamExecute executes a streaming query. +// Note we guarantee the callback will not be called concurrently by multiple go routines. +func (vtg *VTGate) StreamExecute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, c *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) { // In this context, we don't care if we can't fully parse destination destKeyspace, destTabletType, _, _ := vtg.executor.ParseDestinationTarget(session.TargetString) statsKey := []string{"StreamExecute", destKeyspace, topoproto.TabletTypeLString(destTabletType)} @@ -518,6 +510,7 @@ func (vtg *VTGate) StreamExecute(ctx context.Context, c *mysql.Conn, session *vt } else { err = vtg.executor.StreamExecute( ctx, + mysqlCtx, c, "StreamExecute", safeSession, @@ -669,3 +662,20 @@ func (vtg *VTGate) HandlePanic(err *error) { errorCounts.Add([]string{"Panic", "Unknown", "Unknown", vtrpcpb.Code_INTERNAL.String()}, 1) } } + +func newVTGate(executor *Executor, resolver *Resolver, vsm *vstreamManager, tc *TxConn, gw *TabletGateway) *VTGate { + return &VTGate{ + executor: executor, + resolver: resolver, + vsm: vsm, + txConn: tc, + gw: gw, + timings: timings, + rowsReturned: rowsReturned, + rowsAffected: rowsAffected, + + logExecute: logutil.NewThrottledLogger("Execute", 5*time.Second), + logPrepare: logutil.NewThrottledLogger("Prepare", 5*time.Second), + logStreamExecute: logutil.NewThrottledLogger("StreamExecute", 5*time.Second), + } +} diff --git a/go/vt/vtgate/vtgate_test.go b/go/vt/vtgate/vtgate_test.go index ba745e6f8ca..76a9be79d31 100644 --- a/go/vt/vtgate/vtgate_test.go +++ b/go/vt/vtgate/vtgate_test.go @@ -21,15 +21,12 @@ import ( "strings" "testing" - "vitess.io/vitess/go/mysql" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" "vitess.io/vitess/go/test/utils" - "github.com/stretchr/testify/require" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/vterrors" @@ -43,57 +40,21 @@ import ( // This file uses the sandbox_test framework. -var hcVTGateTest *discovery.FakeHealthCheck - var executeOptions = &querypb.ExecuteOptions{ IncludedFields: querypb.ExecuteOptions_TYPE_ONLY, } -var primarySession *vtgatepb.Session - -func init() { - createSandbox(KsTestUnsharded).VSchema = ` -{ - "sharded": false, - "tables": { - "t1": {} - } -} -` - createSandbox(KsTestBadVSchema).VSchema = ` -{ - "sharded": true, - "tables": { - "t2": { - "auto_increment": { - "column": "id", - "sequence": "id_seq" - } - } - } -} -` - hcVTGateTest = discovery.NewFakeHealthCheck(nil) - transactionMode = "MULTI" - Init(context.Background(), hcVTGateTest, newSandboxForCells([]string{"aa"}), "aa", nil, querypb.ExecuteOptions_Gen4) - - mysqlServerPort = 0 - mysql.SetAuthServerImpl("none") - initMySQLProtocol() -} - func TestVTGateExecute(t *testing.T) { - counts := rpcVTGate.timings.Timings.Counts() + vtg, sbc, ctx := createVtgateEnv(t) + counts := vtg.timings.Timings.Counts() - createSandbox(KsTestUnsharded) - hcVTGateTest.Reset() - sbc := hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) - _, qr, err := rpcVTGate.Execute( - context.Background(), + _, qr, err := vtg.Execute( + ctx, + nil, nil, &vtgatepb.Session{ Autocommit: true, - TargetString: "@primary", + TargetString: KsTestUnsharded + "@primary", Options: executeOptions, }, "select id from t1", @@ -110,11 +71,11 @@ func TestVTGateExecute(t *testing.T) { t.Errorf("got ExecuteOptions \n%+v, want \n%+v", sbc.Options[0], executeOptions) } - newCounts := rpcVTGate.timings.Timings.Counts() + newCounts := vtg.timings.Timings.Counts() require.Contains(t, newCounts, "All") require.Equal(t, counts["All"]+1, newCounts["All"]) - require.Contains(t, newCounts, "Execute..primary") - require.Equal(t, counts["Execute..primary"]+1, newCounts["Execute..primary"]) + require.Contains(t, newCounts, "Execute.TestUnsharded.primary") + require.Equal(t, counts["Execute.TestUnsharded.primary"]+1, newCounts["Execute.TestUnsharded.primary"]) for k, v := range newCounts { if strings.HasPrefix(k, "Prepare") { @@ -124,17 +85,17 @@ func TestVTGateExecute(t *testing.T) { } func TestVTGateExecuteError(t *testing.T) { - counts := errorCounts.Counts() + vtg, _, ctx := createVtgateEnv(t) - createSandbox(KsTestUnsharded) - hcVTGateTest.Reset() - hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) - _, qr, err := rpcVTGate.Execute( - context.Background(), + counts := vtg.timings.Timings.Counts() + + _, qr, err := vtg.Execute( + ctx, + nil, nil, &vtgatepb.Session{ Autocommit: true, - TargetString: "@primary", + TargetString: KsTestUnsharded + "@primary", Options: executeOptions, }, "bad select id from t1", @@ -144,8 +105,8 @@ func TestVTGateExecuteError(t *testing.T) { require.Nil(t, qr) newCounts := errorCounts.Counts() - require.Contains(t, newCounts, "Execute..primary.INVALID_ARGUMENT") - require.Equal(t, counts["Execute..primary.INVALID_ARGUMENT"]+1, newCounts["Execute..primary.INVALID_ARGUMENT"]) + require.Contains(t, newCounts, "Execute.TestUnsharded.primary.INVALID_ARGUMENT") + require.Equal(t, counts["Execute.TestUnsharded.primary.INVALID_ARGUMENT"]+1, newCounts["Execute.TestUnsharded.primary.INVALID_ARGUMENT"]) for k, v := range newCounts { if strings.HasPrefix(k, "Prepare") { @@ -155,16 +116,14 @@ func TestVTGateExecuteError(t *testing.T) { } func TestVTGatePrepare(t *testing.T) { - counts := rpcVTGate.timings.Timings.Counts() + vtg, sbc, ctx := createVtgateEnv(t) - createSandbox(KsTestUnsharded) - hcVTGateTest.Reset() - sbc := hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) - _, qr, err := rpcVTGate.Prepare( - context.Background(), + counts := vtg.timings.Timings.Counts() + _, qr, err := vtg.Prepare( + ctx, &vtgatepb.Session{ Autocommit: true, - TargetString: "@primary", + TargetString: KsTestUnsharded + "@primary", Options: executeOptions, }, "select id from t1", @@ -180,11 +139,11 @@ func TestVTGatePrepare(t *testing.T) { t.Errorf("got ExecuteOptions \n%+v, want \n%+v", sbc.Options[0], executeOptions) } - newCounts := rpcVTGate.timings.Timings.Counts() + newCounts := vtg.timings.Timings.Counts() require.Contains(t, newCounts, "All") require.Equal(t, counts["All"]+1, newCounts["All"]) - require.Contains(t, newCounts, "Prepare..primary") - require.Equal(t, counts["Prepare..primary"]+1, newCounts["Prepare..primary"]) + require.Contains(t, newCounts, "Prepare.TestUnsharded.primary") + require.Equal(t, counts["Prepare.TestUnsharded.primary"]+1, newCounts["Prepare.TestUnsharded.primary"]) for k, v := range newCounts { if strings.HasPrefix(k, "Execute") { @@ -194,16 +153,15 @@ func TestVTGatePrepare(t *testing.T) { } func TestVTGatePrepareError(t *testing.T) { + vtg, _, ctx := createVtgateEnv(t) + counts := errorCounts.Counts() - createSandbox(KsTestUnsharded) - hcVTGateTest.Reset() - hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) - _, qr, err := rpcVTGate.Prepare( - context.Background(), + _, qr, err := vtg.Prepare( + ctx, &vtgatepb.Session{ Autocommit: true, - TargetString: "@primary", + TargetString: KsTestUnsharded + "@primary", Options: executeOptions, }, "bad select id from t1", @@ -213,8 +171,8 @@ func TestVTGatePrepareError(t *testing.T) { require.Nil(t, qr) newCounts := errorCounts.Counts() - require.Contains(t, newCounts, "Prepare..primary.INTERNAL") - require.Equal(t, counts["Prepare..primary.INTERNAL"]+1, newCounts["Prepare..primary.INTERNAL"]) + require.Contains(t, newCounts, "Prepare.TestUnsharded.primary.INTERNAL") + require.Equal(t, counts["Prepare.TestUnsharded.primary.INTERNAL"]+1, newCounts["Prepare.TestUnsharded.primary.INTERNAL"]) for k, v := range newCounts { if strings.HasPrefix(k, "Execute") { @@ -224,16 +182,15 @@ func TestVTGatePrepareError(t *testing.T) { } func TestVTGateExecuteWithKeyspaceShard(t *testing.T) { - createSandbox(KsTestUnsharded) - hcVTGateTest.Reset() - hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) + vtg, _, ctx := createVtgateEnv(t) // Valid keyspace. - _, qr, err := rpcVTGate.Execute( - context.Background(), + _, qr, err := vtg.Execute( + ctx, + nil, nil, &vtgatepb.Session{ - TargetString: KsTestUnsharded, + TargetString: KsTestSharded + ":-20@primary", }, "select id from none", nil, @@ -246,8 +203,9 @@ func TestVTGateExecuteWithKeyspaceShard(t *testing.T) { utils.MustMatch(t, &wantQr, qr) // Invalid keyspace. - _, _, err = rpcVTGate.Execute( - context.Background(), + _, _, err = vtg.Execute( + ctx, + nil, nil, &vtgatepb.Session{ TargetString: "invalid_keyspace", @@ -259,11 +217,12 @@ func TestVTGateExecuteWithKeyspaceShard(t *testing.T) { assert.EqualError(t, err, want) // Valid keyspace/shard. - _, qr, err = rpcVTGate.Execute( - context.Background(), + _, qr, err = vtg.Execute( + ctx, + nil, nil, &vtgatepb.Session{ - TargetString: KsTestUnsharded + ":0@primary", + TargetString: KsTestSharded + ":-20@primary", }, "select id from none", nil, @@ -274,31 +233,30 @@ func TestVTGateExecuteWithKeyspaceShard(t *testing.T) { utils.MustMatch(t, &wantQr, qr) // Invalid keyspace/shard. - _, _, err = rpcVTGate.Execute( - context.Background(), + _, _, err = vtg.Execute( + ctx, + nil, nil, &vtgatepb.Session{ - TargetString: KsTestUnsharded + ":noshard@primary", + TargetString: KsTestSharded + ":noshard@primary", }, "select id from none", nil, ) require.Error(t, err) - require.Contains(t, err.Error(), `no healthy tablet available for 'keyspace:"TestUnsharded" shard:"noshard" tablet_type:PRIMARY`) + require.Contains(t, err.Error(), `no healthy tablet available for 'keyspace:"TestExecutor" shard:"noshard" tablet_type:PRIMARY`) } func TestVTGateStreamExecute(t *testing.T) { - ks := KsTestUnsharded - shard := "0" - createSandbox(ks) - hcVTGateTest.Reset() - sbc := hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, ks, shard, topodatapb.TabletType_PRIMARY, true, 1, nil) + vtg, sbc, ctx := createVtgateEnv(t) + var qrs []*sqltypes.Result - _, err := rpcVTGate.StreamExecute( - context.Background(), + _, err := vtg.StreamExecute( + ctx, + nil, nil, &vtgatepb.Session{ - TargetString: "@primary", + TargetString: KsTestUnsharded + "@primary", Options: executeOptions, }, "select id from t1", @@ -321,10 +279,8 @@ func TestVTGateStreamExecute(t *testing.T) { } func TestVTGateBindVarError(t *testing.T) { - ks := KsTestUnsharded - createSandbox(ks) - hcVTGateTest.Reset() - ctx := context.Background() + vtg, _, ctx := createVtgateEnv(t) + session := &vtgatepb.Session{} bindVars := map[string]*querypb.BindVariable{ "v": { @@ -340,19 +296,19 @@ func TestVTGateBindVarError(t *testing.T) { }{{ name: "Execute", f: func() error { - _, _, err := rpcVTGate.Execute(ctx, nil, session, "", bindVars) + _, _, err := vtg.Execute(ctx, nil, nil, session, "", bindVars) return err }, }, { name: "ExecuteBatch", f: func() error { - _, _, err := rpcVTGate.ExecuteBatch(ctx, nil, session, []string{""}, []map[string]*querypb.BindVariable{bindVars}) + _, _, err := vtg.ExecuteBatch(ctx, nil, session, []string{""}, []map[string]*querypb.BindVariable{bindVars}) return err }, }, { name: "StreamExecute", f: func() error { - _, err := rpcVTGate.StreamExecute(ctx, nil, session, "", bindVars, func(_ *sqltypes.Result) error { return nil }) + _, err := vtg.StreamExecute(ctx, nil, nil, session, "", bindVars, func(_ *sqltypes.Result) error { return nil }) return err }, }} @@ -363,16 +319,22 @@ func TestVTGateBindVarError(t *testing.T) { } } -func testErrorPropagation(t *testing.T, sbcs []*sandboxconn.SandboxConn, before func(sbc *sandboxconn.SandboxConn), after func(sbc *sandboxconn.SandboxConn), expected vtrpcpb.Code) { +func testErrorPropagation(t *testing.T, ctx context.Context, vtg *VTGate, sbcs []*sandboxconn.SandboxConn, before func(sbc *sandboxconn.SandboxConn), after func(sbc *sandboxconn.SandboxConn), expected vtrpcpb.Code) { // Execute for _, sbc := range sbcs { before(sbc) } - _, _, err := rpcVTGate.Execute( - context.Background(), + + session := &vtgatepb.Session{ + TargetString: KsTestUnsharded + "@primary", + } + + _, _, err := vtg.Execute( + ctx, + nil, nil, - primarySession, + session, "select id from t1", nil, ) @@ -392,10 +354,12 @@ func testErrorPropagation(t *testing.T, sbcs []*sandboxconn.SandboxConn, before for _, sbc := range sbcs { before(sbc) } - _, err = rpcVTGate.StreamExecute( - context.Background(), + + _, err = vtg.StreamExecute( + ctx, nil, - primarySession, + nil, + session, "select id from t1", nil, func(r *sqltypes.Result) error { @@ -420,87 +384,79 @@ func testErrorPropagation(t *testing.T, sbcs []*sandboxconn.SandboxConn, before // tablet and a rdonly tablet because we don't control the routing of // Commit. func TestErrorPropagation(t *testing.T) { - createSandbox(KsTestUnsharded) - hcVTGateTest.Reset() - // create a new session each time so that ShardSessions don't get re-used across tests - primarySession = &vtgatepb.Session{ - TargetString: "@primary", - } + vtg, sbc, ctx := createVtgateEnv(t) - sbcm := hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) - sbcrdonly := hcVTGateTest.AddTestTablet("aa", "1.1.1.2", 1001, KsTestUnsharded, "0", topodatapb.TabletType_RDONLY, true, 1, nil) sbcs := []*sandboxconn.SandboxConn{ - sbcm, - sbcrdonly, + sbc, } - testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { + testErrorPropagation(t, ctx, vtg, sbcs, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_CANCELED] = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_CANCELED] = 0 }, vtrpcpb.Code_CANCELED) - testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { + testErrorPropagation(t, ctx, vtg, sbcs, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_UNKNOWN] = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_UNKNOWN] = 0 }, vtrpcpb.Code_UNKNOWN) - testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { + testErrorPropagation(t, ctx, vtg, sbcs, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 0 }, vtrpcpb.Code_INVALID_ARGUMENT) - testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { + testErrorPropagation(t, ctx, vtg, sbcs, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_DEADLINE_EXCEEDED] = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_DEADLINE_EXCEEDED] = 0 }, vtrpcpb.Code_DEADLINE_EXCEEDED) - testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { + testErrorPropagation(t, ctx, vtg, sbcs, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_ALREADY_EXISTS] = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_ALREADY_EXISTS] = 0 }, vtrpcpb.Code_ALREADY_EXISTS) - testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { + testErrorPropagation(t, ctx, vtg, sbcs, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_PERMISSION_DENIED] = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_PERMISSION_DENIED] = 0 }, vtrpcpb.Code_PERMISSION_DENIED) - testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { + testErrorPropagation(t, ctx, vtg, sbcs, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_RESOURCE_EXHAUSTED] = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_RESOURCE_EXHAUSTED] = 0 }, vtrpcpb.Code_RESOURCE_EXHAUSTED) - testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { + testErrorPropagation(t, ctx, vtg, sbcs, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 0 }, vtrpcpb.Code_FAILED_PRECONDITION) - testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { + testErrorPropagation(t, ctx, vtg, sbcs, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_ABORTED] = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_ABORTED] = 0 }, vtrpcpb.Code_ABORTED) - testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { + testErrorPropagation(t, ctx, vtg, sbcs, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_INTERNAL] = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_INTERNAL] = 0 }, vtrpcpb.Code_INTERNAL) - testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { + testErrorPropagation(t, ctx, vtg, sbcs, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_UNAVAILABLE] = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_UNAVAILABLE] = 0 }, vtrpcpb.Code_UNAVAILABLE) - testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { + testErrorPropagation(t, ctx, vtg, sbcs, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_UNAUTHENTICATED] = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_UNAUTHENTICATED] = 0 @@ -510,30 +466,16 @@ func TestErrorPropagation(t *testing.T) { // This test makes sure that if we start a transaction and hit a critical // error, a rollback is issued. func TestErrorIssuesRollback(t *testing.T) { - createSandbox(KsTestUnsharded) - hcVTGateTest.Reset() - sbc := hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) + vtg, sbc, ctx := createVtgateEnv(t) // Start a transaction, send one statement. // Simulate an error that should trigger a rollback: // vtrpcpb.Code_ABORTED case. - session, _, err := rpcVTGate.Execute( - context.Background(), - nil, - &vtgatepb.Session{}, - "begin", - nil, - ) + session, _, err := vtg.Execute(ctx, nil, nil, &vtgatepb.Session{TargetString: KsTestUnsharded + "@primary"}, "begin", nil) if err != nil { t.Fatalf("cannot start a transaction: %v", err) } - session, _, err = rpcVTGate.Execute( - context.Background(), - nil, - session, - "select id from t1", - nil, - ) + session, _, err = vtg.Execute(ctx, nil, nil, session, "select id from t1", nil) if err != nil { t.Fatalf("want nil, got %v", err) } @@ -541,13 +483,8 @@ func TestErrorIssuesRollback(t *testing.T) { t.Errorf("want 0, got %d", sbc.RollbackCount.Load()) } sbc.MustFailCodes[vtrpcpb.Code_ABORTED] = 20 - _, _, err = rpcVTGate.Execute( - context.Background(), - nil, - session, - "select id from t1", - nil, - ) + + _, _, err = vtg.Execute(ctx, nil, nil, session, "select id from t1", nil) if err == nil { t.Fatalf("want error but got nil") } @@ -560,23 +497,11 @@ func TestErrorIssuesRollback(t *testing.T) { // Start a transaction, send one statement. // Simulate an error that should trigger a rollback: // vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED case. - session, _, err = rpcVTGate.Execute( - context.Background(), - nil, - &vtgatepb.Session{}, - "begin", - nil, - ) + session, _, err = vtg.Execute(ctx, nil, nil, &vtgatepb.Session{TargetString: KsTestUnsharded + "@primary"}, "begin", nil) if err != nil { t.Fatalf("cannot start a transaction: %v", err) } - session, _, err = rpcVTGate.Execute( - context.Background(), - nil, - session, - "select id from t1", - nil, - ) + session, _, err = vtg.Execute(ctx, nil, nil, session, "select id from t1", nil) if err != nil { t.Fatalf("want nil, got %v", err) } @@ -584,13 +509,8 @@ func TestErrorIssuesRollback(t *testing.T) { t.Errorf("want 0, got %d", sbc.RollbackCount.Load()) } sbc.MustFailCodes[vtrpcpb.Code_RESOURCE_EXHAUSTED] = 20 - _, _, err = rpcVTGate.Execute( - context.Background(), - nil, - session, - "select id from t1", - nil, - ) + + _, _, err = vtg.Execute(ctx, nil, nil, session, "select id from t1", nil) if err == nil { t.Fatalf("want error but got nil") } @@ -603,23 +523,11 @@ func TestErrorIssuesRollback(t *testing.T) { // Start a transaction, send one statement. // Simulate an error that should *not* trigger a rollback: // vtrpcpb.Code_ALREADY_EXISTS case. - session, _, err = rpcVTGate.Execute( - context.Background(), - nil, - &vtgatepb.Session{}, - "begin", - nil, - ) + session, _, err = vtg.Execute(ctx, nil, nil, &vtgatepb.Session{TargetString: KsTestUnsharded + "@primary"}, "begin", nil) if err != nil { t.Fatalf("cannot start a transaction: %v", err) } - session, _, err = rpcVTGate.Execute( - context.Background(), - nil, - session, - "select id from t1", - nil, - ) + session, _, err = vtg.Execute(ctx, nil, nil, session, "select id from t1", nil) if err != nil { t.Fatalf("want nil, got %v", err) } @@ -627,13 +535,7 @@ func TestErrorIssuesRollback(t *testing.T) { t.Errorf("want 0, got %d", sbc.RollbackCount.Load()) } sbc.MustFailCodes[vtrpcpb.Code_ALREADY_EXISTS] = 20 - _, _, err = rpcVTGate.Execute( - context.Background(), - nil, - session, - "select id from t1", - nil, - ) + _, _, err = vtg.Execute(ctx, nil, nil, session, "select id from t1", nil) if err == nil { t.Fatalf("want error but got nil") } @@ -664,33 +566,68 @@ var shardedVSchema = ` } ` +var shardedVSchemaUnknownParams = ` +{ + "sharded": true, + "vindexes": { + "hash_index": { + "type": "hash", + "params": { + "hello": "world", + "goodbye": "world" + } + }, + "binary_index": { + "type": "binary", + "params": { + "foo": "bar" + } + } + }, + "tables": { + "sp_tbl": { + "column_vindexes": [ + { + "column": "user_id", + "name": "hash_index" + } + ] + } + } +} +` + func TestMultiInternalSavepointVtGate(t *testing.T) { - s := createSandbox(KsTestSharded) + vtg, _, ctx := createVtgateEnv(t) + + const customKeyspace = "CustomSharding" + s := createSandbox(customKeyspace) s.ShardSpec = "-40-80-" s.VSchema = shardedVSchema srvSchema := getSandboxSrvVSchema() - rpcVTGate.executor.vm.VSchemaUpdate(srvSchema, nil) - hcVTGateTest.Reset() + vtg.executor.vm.VSchemaUpdate(srvSchema, nil) + + hc := vtg.resolver.scatterConn.gateway.hc.(*discovery.FakeHealthCheck) - sbc1 := hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1, KsTestSharded, "-40", topodatapb.TabletType_PRIMARY, true, 1, nil) - sbc2 := hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 2, KsTestSharded, "40-80", topodatapb.TabletType_PRIMARY, true, 1, nil) - sbc3 := hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 3, KsTestSharded, "80-", topodatapb.TabletType_PRIMARY, true, 1, nil) + sbc1 := hc.AddTestTablet("aa", "-40", 1, customKeyspace, "-40", topodatapb.TabletType_PRIMARY, true, 1, nil) + sbc2 := hc.AddTestTablet("aa", "40-80", 1, customKeyspace, "40-80", topodatapb.TabletType_PRIMARY, true, 1, nil) + sbc3 := hc.AddTestTablet("aa", "80-", 1, customKeyspace, "80-", topodatapb.TabletType_PRIMARY, true, 1, nil) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := vtg.executor.queryLogger.Subscribe("Test") + defer vtg.executor.queryLogger.Unsubscribe(logChan) - session := &vtgatepb.Session{Autocommit: true} + session := &vtgatepb.Session{Autocommit: true, TargetString: customKeyspace + "@primary"} require.True(t, session.GetAutocommit()) require.False(t, session.InTransaction) var err error - session, _, err = rpcVTGate.Execute(context.Background(), nil, session, "begin", nil) + session, _, err = vtg.Execute(ctx, nil, nil, session, "begin", nil) require.NoError(t, err) require.True(t, session.GetAutocommit()) require.True(t, session.InTransaction) // this query goes to multiple shards so internal savepoint will be created. - session, _, err = rpcVTGate.Execute(context.Background(), nil, session, "insert into sp_tbl(user_id) values (1), (3)", nil) + session, _, err = vtg.Execute(ctx, nil, nil, session, "insert into sp_tbl(user_id) values (1), (3)", nil) require.NoError(t, err) require.True(t, session.GetAutocommit()) require.True(t, session.InTransaction) @@ -702,13 +639,14 @@ func TestMultiInternalSavepointVtGate(t *testing.T) { Sql: "insert into sp_tbl(user_id) values (:_user_id_0)", BindVariables: map[string]*querypb.BindVariable{ "_user_id_0": sqltypes.Int64BindVariable(1), - "_user_id_1": sqltypes.Int64BindVariable(3), - "vtg1": sqltypes.Int64BindVariable(1), - "vtg2": sqltypes.Int64BindVariable(3), }, }} + assertQueriesWithSavepoint(t, sbc1, wantQ) wantQ[1].Sql = "insert into sp_tbl(user_id) values (:_user_id_1)" + wantQ[1].BindVariables = map[string]*querypb.BindVariable{ + "_user_id_1": sqltypes.Int64BindVariable(3), + } assertQueriesWithSavepoint(t, sbc2, wantQ) assert.Len(t, sbc3.Queries, 0) // internal savepoint should be removed. @@ -717,7 +655,7 @@ func TestMultiInternalSavepointVtGate(t *testing.T) { sbc2.Queries = nil // multi shard so new savepoint will be created. - session, _, err = rpcVTGate.Execute(context.Background(), nil, session, "insert into sp_tbl(user_id) values (2), (4)", nil) + session, _, err = vtg.Execute(ctx, nil, nil, session, "insert into sp_tbl(user_id) values (2), (4)", nil) require.NoError(t, err) wantQ = []*querypb.BoundQuery{{ Sql: "savepoint x", @@ -725,10 +663,7 @@ func TestMultiInternalSavepointVtGate(t *testing.T) { }, { Sql: "insert into sp_tbl(user_id) values (:_user_id_1)", BindVariables: map[string]*querypb.BindVariable{ - "_user_id_0": sqltypes.Int64BindVariable(2), "_user_id_1": sqltypes.Int64BindVariable(4), - "vtg1": sqltypes.Int64BindVariable(2), - "vtg2": sqltypes.Int64BindVariable(4), }, }} assertQueriesWithSavepoint(t, sbc3, wantQ) @@ -738,21 +673,66 @@ func TestMultiInternalSavepointVtGate(t *testing.T) { sbc3.Queries = nil // single shard so no savepoint will be created and neither any old savepoint will be executed - _, _, err = rpcVTGate.Execute(context.Background(), nil, session, "insert into sp_tbl(user_id) values (5)", nil) + _, _, err = vtg.Execute(ctx, nil, nil, session, "insert into sp_tbl(user_id) values (5)", nil) require.NoError(t, err) wantQ = []*querypb.BoundQuery{{ Sql: "insert into sp_tbl(user_id) values (:_user_id_0)", BindVariables: map[string]*querypb.BindVariable{ "_user_id_0": sqltypes.Int64BindVariable(5), - "vtg1": sqltypes.Int64BindVariable(5), }, }} assertQueriesWithSavepoint(t, sbc2, wantQ) - testQueryLog(t, logChan, "Execute", "BEGIN", "begin", 0) - testQueryLog(t, logChan, "MarkSavepoint", "SAVEPOINT", "savepoint x", 0) - testQueryLog(t, logChan, "Execute", "INSERT", "insert into sp_tbl(user_id) values (:vtg1 /* INT64 */), (:vtg2 /* INT64 */)", 2) - testQueryLog(t, logChan, "MarkSavepoint", "SAVEPOINT", "savepoint y", 2) - testQueryLog(t, logChan, "Execute", "INSERT", "insert into sp_tbl(user_id) values (:vtg1 /* INT64 */), (:vtg2 /* INT64 */)", 2) - testQueryLog(t, logChan, "Execute", "INSERT", "insert into sp_tbl(user_id) values (:vtg1 /* INT64 */)", 1) + testQueryLog(t, vtg.executor, logChan, "Execute", "BEGIN", "begin", 0) + testQueryLog(t, vtg.executor, logChan, "MarkSavepoint", "SAVEPOINT", "savepoint x", 0) + testQueryLog(t, vtg.executor, logChan, "Execute", "INSERT", "insert into sp_tbl(user_id) values (:vtg1 /* INT64 */), (:vtg2 /* INT64 */)", 2) + testQueryLog(t, vtg.executor, logChan, "MarkSavepoint", "SAVEPOINT", "savepoint y", 2) + testQueryLog(t, vtg.executor, logChan, "Execute", "INSERT", "insert into sp_tbl(user_id) values (:vtg1 /* INT64 */), (:vtg2 /* INT64 */)", 2) + testQueryLog(t, vtg.executor, logChan, "Execute", "INSERT", "insert into sp_tbl(user_id) values (:vtg1 /* INT64 */)", 1) +} + +func TestVSchemaVindexUnknownParams(t *testing.T) { + vtg, _, _ := createVtgateEnv(t) + + const customKeyspace = "CustomSharding" + s := createSandbox(customKeyspace) + s.ShardSpec = "-40-80-" + s.VSchema = shardedVSchema + srvSchema := getSandboxSrvVSchema() + vtg.executor.vm.VSchemaUpdate(srvSchema, nil) + + hc := vtg.resolver.scatterConn.gateway.hc.(*discovery.FakeHealthCheck) + _ = hc.AddTestTablet("aa", "-40", 1, customKeyspace, "-40", topodatapb.TabletType_PRIMARY, true, 1, nil) + _ = hc.AddTestTablet("aa", "40-80", 1, customKeyspace, "40-80", topodatapb.TabletType_PRIMARY, true, 1, nil) + _ = hc.AddTestTablet("aa", "80-", 1, customKeyspace, "80-", topodatapb.TabletType_PRIMARY, true, 1, nil) + + unknownParams := vindexUnknownParams.Get() + require.Equal(t, int64(0), unknownParams) + + s.VSchema = shardedVSchemaUnknownParams + srvSchema = getSandboxSrvVSchema() + vtg.executor.vm.VSchemaUpdate(srvSchema, nil) + + unknownParams = vindexUnknownParams.Get() + require.Equal(t, int64(3), unknownParams) + + s.VSchema = shardedVSchema + srvSchema = getSandboxSrvVSchema() + vtg.executor.vm.VSchemaUpdate(srvSchema, nil) + + unknownParams = vindexUnknownParams.Get() + require.Equal(t, int64(0), unknownParams) +} + +func createVtgateEnv(t testing.TB) (*VTGate, *sandboxconn.SandboxConn, context.Context) { + cell := "aa" + sb := createSandbox(KsTestSharded) + sb.ShardSpec = "-" + executor, _, _, sbc, ctx := createExecutorEnv(t) + executor.normalize = normalizeQueries + + vsm := newVStreamManager(executor.resolver.resolver, executor.serv, cell) + vtg := newVTGate(executor, executor.resolver, vsm, nil, executor.scatterConn.gateway) + + return vtg, sbc, ctx } diff --git a/go/vt/vtgate/vtgateconn/vtgateconn.go b/go/vt/vtgate/vtgateconn/vtgateconn.go index 5a6c5ae6b94..ae0da3fdf43 100644 --- a/go/vt/vtgate/vtgateconn/vtgateconn.go +++ b/go/vt/vtgate/vtgateconn/vtgateconn.go @@ -62,7 +62,7 @@ type VTGateConn struct { impl Impl } -// Session returns a VTGateSession that can be used to access V3 functions. +// Session returns a VTGateSession that can be used to access execution functions. func (conn *VTGateConn) Session(targetString string, options *querypb.ExecuteOptions) *VTGateSession { return &VTGateSession{ session: &vtgatepb.Session{ @@ -111,7 +111,7 @@ func (conn *VTGateConn) VStream(ctx context.Context, tabletType topodatapb.Table return conn.impl.VStream(ctx, tabletType, vgtid, filter, flags) } -// VTGateSession exposes the V3 API to the clients. +// VTGateSession exposes the Vitess Execution API to the clients. // The object maintains client-side state and is comparable to a native MySQL connection. // For example, if you enable autocommit on a Session object, all subsequent calls will respect this. // Functions within an object must not be called concurrently. @@ -163,13 +163,13 @@ func (sn *VTGateSession) Prepare(ctx context.Context, query string, bindVars map // Impl defines the interface for a vtgate client protocol // implementation. It can be used concurrently across goroutines. type Impl interface { - // Execute executes a non-streaming query on vtgate. This is a V3 function. + // Execute executes a non-streaming query on vtgate. Execute(ctx context.Context, session *vtgatepb.Session, query string, bindVars map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) - // ExecuteBatch executes a non-streaming queries on vtgate. This is a V3 function. + // ExecuteBatch executes a non-streaming queries on vtgate. ExecuteBatch(ctx context.Context, session *vtgatepb.Session, queryList []string, bindVarsList []map[string]*querypb.BindVariable) (*vtgatepb.Session, []sqltypes.QueryResponse, error) - // StreamExecute executes a streaming query on vtgate. This is a V3 function. + // StreamExecute executes a streaming query on vtgate. StreamExecute(ctx context.Context, session *vtgatepb.Session, query string, bindVars map[string]*querypb.BindVariable, processResponse func(*vtgatepb.StreamExecuteResponse)) (sqltypes.ResultStream, error) // Prepare returns the fields information for the query as part of supporting prepare statements. diff --git a/go/vt/vtgate/vtgateconn/vtgateconn_test.go b/go/vt/vtgate/vtgateconn/vtgateconn_test.go index 8bada5b406c..523492328e9 100644 --- a/go/vt/vtgate/vtgateconn/vtgateconn_test.go +++ b/go/vt/vtgate/vtgateconn/vtgateconn_test.go @@ -17,9 +17,8 @@ limitations under the License. package vtgateconn import ( - "testing" - "context" + "testing" ) func TestRegisterDialer(t *testing.T) { diff --git a/go/vt/vtgate/vtgateservice/interface.go b/go/vt/vtgate/vtgateservice/interface.go index bf5ead0ccd8..48b9c6dbc81 100644 --- a/go/vt/vtgate/vtgateservice/interface.go +++ b/go/vt/vtgate/vtgateservice/interface.go @@ -24,7 +24,6 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -34,10 +33,9 @@ import ( // VTGateService is the interface implemented by the VTGate service, // that RPC server implementations will call. type VTGateService interface { - // V3 API - Execute(ctx context.Context, c *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) + Execute(ctx context.Context, mysqlCtx MySQLConnection, c *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) ExecuteBatch(ctx context.Context, c *mysql.Conn, session *vtgatepb.Session, sqlList []string, bindVariablesList []map[string]*querypb.BindVariable) (*vtgatepb.Session, []sqltypes.QueryResponse, error) - StreamExecute(ctx context.Context, c *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) + StreamExecute(ctx context.Context, mysqlCtx MySQLConnection, c *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) // Prepare statement support Prepare(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, []*querypb.Field, error) @@ -56,3 +54,12 @@ type VTGateService interface { // RPC implementation method, before calling any of the previous methods HandlePanic(err *error) } + +// MySQLConnection is an interface that allows to execute operations on the provided connection id. +// This is used by vtgate executor to execute kill queries. +type MySQLConnection interface { + // KillQuery stops the an executing query on the connection. + KillQuery(uint32) error + // KillConnection closes the connection and also stops any executing query on it. + KillConnection(context.Context, uint32) error +} diff --git a/go/vt/vtgr/config/vtgr_config.go b/go/vt/vtgr/config/vtgr_config.go deleted file mode 100644 index 0386bd42541..00000000000 --- a/go/vt/vtgr/config/vtgr_config.go +++ /dev/null @@ -1,604 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "encoding/json" - "fmt" - "net/url" - "os" - "regexp" - "strings" - - "gopkg.in/gcfg.v1" - - "vitess.io/vitess/go/vt/vttls" - - "vitess.io/vitess/go/vt/log" -) - -// VTGRConfig is the config for VTGR -type VTGRConfig struct { - DisableReadOnlyProtection bool - BootstrapGroupSize int - MinNumReplica int - BackoffErrorWaitTimeSeconds int - BootstrapWaitTimeSeconds int -} - -var vtgrCfg = newVTGRConfig() - -func newVTGRConfig() *VTGRConfig { - config := &VTGRConfig{ - DisableReadOnlyProtection: false, - BootstrapGroupSize: 5, - MinNumReplica: 3, - BackoffErrorWaitTimeSeconds: 10, - BootstrapWaitTimeSeconds: 10 * 60, - } - return config -} - -// ReadVTGRConfig reads config for VTGR -func ReadVTGRConfig(file string) (*VTGRConfig, error) { - vtgrFile, err := os.Open(file) - if err != nil { - return nil, err - } - decoder := json.NewDecoder(vtgrFile) - err = decoder.Decode(vtgrCfg) - if err != nil { - return nil, err - } - return vtgrCfg, nil -} - -/* - Everything below has been copied over from the VTOrc package -*/ - -var ( - envVariableRegexp = regexp.MustCompile("[$][{](.*)[}]") -) - -const ( - DefaultStatusAPIEndpoint = "/api/status" -) - -const ( - MySQLTopologyMaxPoolConnections = 3 -) - -// Configuration makes for orchestrator configuration input, which can be provided by user via JSON formatted file. -// Some of the parameteres have reasonable default values, and some (like database credentials) are -// strictly expected from user. -// TODO(sougou): change this to yaml parsing, and possible merge with tabletenv. -type Configuration struct { - Debug bool // set debug mode (similar to --debug option) - EnableSyslog bool // Should logs be directed (in addition) to syslog daemon? - ListenAddress string // Where orchestrator HTTP should listen for TCP - ListenSocket string // Where orchestrator HTTP should listen for unix socket (default: empty; when given, TCP is disabled) - HTTPAdvertise string // optional, for raft setups, what is the HTTP address this node will advertise to its peers (potentially use where behind NAT or when rerouting ports; example: "http://11.22.33.44:3030") - AgentsServerPort string // port orchestrator agents talk back to - MySQLTopologyUser string // The user VTOrc will use to connect to MySQL instances - MySQLTopologyPassword string // The password VTOrc will use to connect to MySQL instances - MySQLReplicaUser string // User to set on replica MySQL instances while configuring replication settings on them. If set, use this credential instead of discovering from mysql. TODO(sougou): deprecate this in favor of fetching from vttablet - MySQLReplicaPassword string // Password to set on replica MySQL instances while configuring replication settings on them. - MySQLTopologyCredentialsConfigFile string // my.cnf style configuration file from where to pick credentials. Expecting `user`, `password` under `[client]` section - MySQLTopologySSLPrivateKeyFile string // Private key file used to authenticate with a Topology mysql instance with TLS - MySQLTopologySSLCertFile string // Certificate PEM file used to authenticate with a Topology mysql instance with TLS - MySQLTopologySSLCAFile string // Certificate Authority PEM file used to authenticate with a Topology mysql instance with TLS - MySQLTopologySSLSkipVerify bool // If true, do not strictly validate mutual TLS certs for Topology mysql instances - MySQLTopologyUseMutualTLS bool // Turn on TLS authentication with the Topology MySQL instances - MySQLTopologyUseMixedTLS bool // Mixed TLS and non-TLS authentication with the Topology MySQL instances - MySQLTopologyTLSMinVersion string // Configures the minimal required TLS version for a topology MySQL instance with TLS. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3. - TLSCacheTTLFactor uint // Factor of InstancePollSeconds that we set as TLS info cache expiry - BackendDB string // EXPERIMENTAL: type of backend db; either "mysql" or "sqlite" - SQLite3DataFile string // when BackendDB == "sqlite", full path to sqlite3 datafile - SkipOrchestratorDatabaseUpdate bool // When true, do not check backend database schema nor attempt to update it. Useful when you may be running multiple versions of orchestrator, and you only wish certain boxes to dictate the db structure (or else any time a different orchestrator version runs it will rebuild database schema) - PanicIfDifferentDatabaseDeploy bool // When true, and this process finds the orchestrator backend DB was provisioned by a different version, panic - RaftEnabled bool // When true, setup orchestrator in a raft consensus layout. When false (default) all Raft* variables are ignored - RaftBind string - RaftAdvertise string - RaftDataDir string - DefaultRaftPort int // if a RaftNodes entry does not specify port, use this one - RaftNodes []string // Raft nodes to make initial connection with - ExpectFailureAnalysisConcensus bool - MySQLOrchestratorHost string - MySQLOrchestratorMaxPoolConnections int // The maximum size of the connection pool to the Orchestrator backend. - MySQLOrchestratorPort uint - MySQLOrchestratorDatabase string - MySQLOrchestratorUser string - MySQLOrchestratorPassword string - MySQLOrchestratorCredentialsConfigFile string // my.cnf style configuration file from where to pick credentials. Expecting `user`, `password` under `[client]` section - MySQLOrchestratorSSLPrivateKeyFile string // Private key file used to authenticate with the Orchestrator mysql instance with TLS - MySQLOrchestratorSSLCertFile string // Certificate PEM file used to authenticate with the Orchestrator mysql instance with TLS - MySQLOrchestratorSSLCAFile string // Certificate Authority PEM file used to authenticate with the Orchestrator mysql instance with TLS - MySQLOrchestratorSSLSkipVerify bool // If true, do not strictly validate mutual TLS certs for the Orchestrator mysql instances - MySQLOrchestratorUseMutualTLS bool // Turn on TLS authentication with the Orchestrator MySQL instance - MySQLOrchestratorTLSMinVersion string // Configures the minimal required TLS version for the Orchestrator MySQL instance with TLS. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3. - MySQLOrchestratorReadTimeoutSeconds int // Number of seconds before backend mysql read operation is aborted (driver-side) - MySQLOrchestratorRejectReadOnly bool // Reject read only connections https://github.com/go-sql-driver/mysql#rejectreadonly - MySQLConnectTimeoutSeconds int // Number of seconds before connection is aborted (driver-side) - MySQLDiscoveryReadTimeoutSeconds int // Number of seconds before topology mysql read operation is aborted (driver-side). Used for discovery queries. - MySQLTopologyReadTimeoutSeconds int // Number of seconds before topology mysql read operation is aborted (driver-side). Used for all but discovery queries. - MySQLConnectionLifetimeSeconds int // Number of seconds the mysql driver will keep database connection alive before recycling it - DefaultInstancePort int // In case port was not specified on command line - ReplicationLagQuery string // custom query to check on replica lg (e.g. heartbeat table). Must return a single row with a single numeric column, which is the lag. - ReplicationCredentialsQuery string // custom query to get replication credentials. Must return a single row, with two text columns: 1st is username, 2nd is password. This is optional, and can be used by orchestrator to configure replication after primary takeover or setup of co-primary. You need to ensure the orchestrator user has the privileges to run this query - DiscoverByShowSlaveHosts bool // Attempt SHOW SLAVE HOSTS before PROCESSLIST - UseSuperReadOnly bool // Should orchestrator super_read_only any time it sets read_only - InstancePollSeconds uint // Number of seconds between instance reads - InstanceWriteBufferSize int // Instance write buffer size (max number of instances to flush in one INSERT ODKU) - BufferInstanceWrites bool // Set to 'true' for write-optimization on backend table (compromise: writes can be stale and overwrite non stale data) - InstanceFlushIntervalMilliseconds int // Max interval between instance write buffer flushes - UnseenInstanceForgetHours uint // Number of hours after which an unseen instance is forgotten - SnapshotTopologiesIntervalHours uint // Interval in hour between snapshot-topologies invocation. Default: 0 (disabled) - DiscoveryMaxConcurrency uint // Number of goroutines doing hosts discovery - DiscoveryQueueCapacity uint // Buffer size of the discovery queue. Should be greater than the number of DB instances being discovered - DiscoveryQueueMaxStatisticsSize int // The maximum number of individual secondly statistics taken of the discovery queue - DiscoveryCollectionRetentionSeconds uint // Number of seconds to retain the discovery collection information - DiscoverySeeds []string // Hard coded array of hostname:port, ensuring orchestrator discovers these hosts upon startup, assuming not already known to orchestrator - InstanceBulkOperationsWaitTimeoutSeconds uint // Time to wait on a single instance when doing bulk (many instances) operation - HostnameResolveMethod string // Method by which to "normalize" hostname ("none"/"default"/"cname") - MySQLHostnameResolveMethod string // Method by which to "normalize" hostname via MySQL server. ("none"/"@@hostname"/"@@report_host"; default "@@hostname") - SkipBinlogServerUnresolveCheck bool // Skip the double-check that an unresolved hostname resolves back to same hostname for binlog servers - ExpiryHostnameResolvesMinutes int // Number of minutes after which to expire hostname-resolves - RejectHostnameResolvePattern string // Regexp pattern for resolved hostname that will not be accepted (not cached, not written to db). This is done to avoid storing wrong resolves due to network glitches. - ReasonableReplicationLagSeconds int // Above this value is considered a problem - ProblemIgnoreHostnameFilters []string // Will minimize problem visualization for hostnames matching given regexp filters - VerifyReplicationFilters bool // Include replication filters check before approving topology refactoring - ReasonableMaintenanceReplicationLagSeconds int // Above this value move-up and move-below are blocked - CandidateInstanceExpireMinutes uint // Minutes after which a suggestion to use an instance as a candidate replica (to be preferably promoted on primary failover) is expired. - AuditLogFile string // Name of log file for audit operations. Disabled when empty. - AuditToSyslog bool // If true, audit messages are written to syslog - AuditToBackendDB bool // If true, audit messages are written to the backend DB's `audit` table (default: true) - AuditPurgeDays uint // Days after which audit entries are purged from the database - RemoveTextFromHostnameDisplay string // Text to strip off the hostname on cluster/clusters pages - ReadOnly bool - AuthenticationMethod string // Type of autherntication to use, if any. "" for none, "basic" for BasicAuth, "multi" for advanced BasicAuth, "proxy" for forwarded credentials via reverse proxy, "token" for token based access - OAuthClientID string - OAuthClientSecret string - OAuthScopes []string - HTTPAuthUser string // Username for HTTP Basic authentication (blank disables authentication) - HTTPAuthPassword string // Password for HTTP Basic authentication - AuthUserHeader string // HTTP header indicating auth user, when AuthenticationMethod is "proxy" - PowerAuthUsers []string // On AuthenticationMethod == "proxy", list of users that can make changes. All others are read-only. - PowerAuthGroups []string // list of unix groups the authenticated user must be a member of to make changes. - AccessTokenUseExpirySeconds uint // Time by which an issued token must be used - AccessTokenExpiryMinutes uint // Time after which HTTP access token expires - ClusterNameToAlias map[string]string // map between regex matching cluster name to a human friendly alias - DetectClusterAliasQuery string // Optional query (executed on topology instance) that returns the alias of a cluster. Query will only be executed on cluster primary (though until the topology's primary is resovled it may execute on other/all replicas). If provided, must return one row, one column - DetectClusterDomainQuery string // Optional query (executed on topology instance) that returns the VIP/CNAME/Alias/whatever domain name for the primary of this cluster. Query will only be executed on cluster primary (though until the topology's primary is resovled it may execute on other/all replicas). If provided, must return one row, one column - DetectInstanceAliasQuery string // Optional query (executed on topology instance) that returns the alias of an instance. If provided, must return one row, one column - DetectPromotionRuleQuery string // Optional query (executed on topology instance) that returns the promotion rule of an instance. If provided, must return one row, one column. - DataCenterPattern string // Regexp pattern with one group, extracting the datacenter name from the hostname - RegionPattern string // Regexp pattern with one group, extracting the region name from the hostname - PhysicalEnvironmentPattern string // Regexp pattern with one group, extracting physical environment info from hostname (e.g. combination of datacenter & prod/dev env) - DetectDataCenterQuery string // Optional query (executed on topology instance) that returns the data center of an instance. If provided, must return one row, one column. Overrides DataCenterPattern and useful for installments where DC cannot be inferred by hostname - DetectRegionQuery string // Optional query (executed on topology instance) that returns the region of an instance. If provided, must return one row, one column. Overrides RegionPattern and useful for installments where Region cannot be inferred by hostname - DetectPhysicalEnvironmentQuery string // Optional query (executed on topology instance) that returns the physical environment of an instance. If provided, must return one row, one column. Overrides PhysicalEnvironmentPattern and useful for installments where env cannot be inferred by hostname - DetectSemiSyncEnforcedQuery string // Optional query (executed on topology instance) to determine whether semi-sync is fully enforced for primary writes (async fallback is not allowed under any circumstance). If provided, must return one row, one column, value 0 or 1. - SupportFuzzyPoolHostnames bool // Should "submit-pool-instances" command be able to pass list of fuzzy instances (fuzzy means non-fqdn, but unique enough to recognize). Defaults 'true', implies more queries on backend db - InstancePoolExpiryMinutes uint // Time after which entries in database_instance_pool are expired (resubmit via `submit-pool-instances`) - PromotionIgnoreHostnameFilters []string // Orchestrator will not promote replicas with hostname matching pattern (via -c recovery; for example, avoid promoting dev-dedicated machines) - ServeAgentsHTTP bool // Spawn another HTTP interface dedicated for orchestrator-agent - AgentsUseSSL bool // When "true" orchestrator will listen on agents port with SSL as well as connect to agents via SSL - AgentsUseMutualTLS bool // When "true" Use mutual TLS for the server to agent communication - AgentSSLSkipVerify bool // When using SSL for the Agent, should we ignore SSL certification error - AgentSSLPrivateKeyFile string // Name of Agent SSL private key file, applies only when AgentsUseSSL = true - AgentSSLCertFile string // Name of Agent SSL certification file, applies only when AgentsUseSSL = true - AgentSSLCAFile string // Name of the Agent Certificate Authority file, applies only when AgentsUseSSL = true - AgentSSLValidOUs []string // Valid organizational units when using mutual TLS to communicate with the agents - UseSSL bool // Use SSL on the server web port - UseMutualTLS bool // When "true" Use mutual TLS for the server's web and API connections - SSLSkipVerify bool // When using SSL, should we ignore SSL certification error - SSLPrivateKeyFile string // Name of SSL private key file, applies only when UseSSL = true - SSLCertFile string // Name of SSL certification file, applies only when UseSSL = true - SSLCAFile string // Name of the Certificate Authority file, applies only when UseSSL = true - SSLValidOUs []string // Valid organizational units when using mutual TLS - StatusEndpoint string // Override the status endpoint. Defaults to '/api/status' - StatusOUVerify bool // If true, try to verify OUs when Mutual TLS is on. Defaults to false - AgentPollMinutes uint // Minutes between agent polling - UnseenAgentForgetHours uint // Number of hours after which an unseen agent is forgotten - StaleSeedFailMinutes uint // Number of minutes after which a stale (no progress) seed is considered failed. - SeedAcceptableBytesDiff int64 // Difference in bytes between seed source & target data size that is still considered as successful copy - SeedWaitSecondsBeforeSend int64 // Number of seconds for waiting before start send data command on agent - BinlogEventsChunkSize int // Chunk size (X) for SHOW BINLOG|RELAYLOG EVENTS LIMIT ?,X statements. Smaller means less locking and mroe work to be done - ReduceReplicationAnalysisCount bool // When true, replication analysis will only report instances where possibility of handled problems is possible in the first place (e.g. will not report most leaf nodes, that are mostly uninteresting). When false, provides an entry for every known instance - FailureDetectionPeriodBlockMinutes int // The time for which an instance's failure discovery is kept "active", so as to avoid concurrent "discoveries" of the instance's failure; this preceeds any recovery process, if any. - RecoveryPeriodBlockMinutes int // (supported for backwards compatibility but please use newer `RecoveryPeriodBlockSeconds` instead) The time for which an instance's recovery is kept "active", so as to avoid concurrent recoveries on smae instance as well as flapping - RecoveryPeriodBlockSeconds int // (overrides `RecoveryPeriodBlockMinutes`) The time for which an instance's recovery is kept "active", so as to avoid concurrent recoveries on smae instance as well as flapping - RecoveryIgnoreHostnameFilters []string // Recovery analysis will completely ignore hosts matching given patterns - RecoverPrimaryClusterFilters []string // Only do primary recovery on clusters matching these regexp patterns (of course the ".*" pattern matches everything) - RecoverIntermediatePrimaryClusterFilters []string // Only do IM recovery on clusters matching these regexp patterns (of course the ".*" pattern matches everything) - ProcessesShellCommand string // Shell that executes command scripts - OnFailureDetectionProcesses []string // Processes to execute when detecting a failover scenario (before making a decision whether to failover or not). May and should use some of these placeholders: {failureType}, {instanceType}, {isPrimary}, {isCoPrimary}, {failureDescription}, {command}, {failedHost}, {failureCluster}, {failureClusterDomain}, {failedPort}, {successorHost}, {successorPort}, {successorAlias}, {countReplicas}, {replicaHosts}, {isDowntimed}, {autoPrimaryRecovery}, {autoIntermediatePrimaryRecovery} - PreFailoverProcesses []string // Processes to execute before doing a failover (aborting operation should any once of them exits with non-zero code; order of execution undefined). May and should use some of these placeholders: {failureType}, {instanceType}, {isPrimary}, {isCoPrimary}, {failureDescription}, {command}, {failedHost}, {failureCluster}, {failureClusterDomain}, {failedPort}, {countReplicas}, {replicaHosts}, {isDowntimed} - PostFailoverProcesses []string // Processes to execute after doing a failover (order of execution undefined). May and should use some of these placeholders: {failureType}, {instanceType}, {isPrimary}, {isCoPrimary}, {failureDescription}, {command}, {failedHost}, {failureCluster}, {failureClusterDomain}, {failedPort}, {successorHost}, {successorPort}, {successorAlias}, {countReplicas}, {replicaHosts}, {isDowntimed}, {isSuccessful}, {lostReplicas}, {countLostReplicas} - PostUnsuccessfulFailoverProcesses []string // Processes to execute after a not-completely-successful failover (order of execution undefined). May and should use some of these placeholders: {failureType}, {instanceType}, {isPrimary}, {isCoPrimary}, {failureDescription}, {command}, {failedHost}, {failureCluster}, {failureClusterDomain}, {failedPort}, {successorHost}, {successorPort}, {successorAlias}, {countReplicas}, {replicaHosts}, {isDowntimed}, {isSuccessful}, {lostReplicas}, {countLostReplicas} - PostPrimaryFailoverProcesses []string // Processes to execute after doing a primary failover (order of execution undefined). Uses same placeholders as PostFailoverProcesses - PostIntermediatePrimaryFailoverProcesses []string // Processes to execute after doing a primary failover (order of execution undefined). Uses same placeholders as PostFailoverProcesses - PostTakePrimaryProcesses []string // Processes to execute after a successful Take-Primary event has taken place - CoPrimaryRecoveryMustPromoteOtherCoPrimary bool // When 'false', anything can get promoted (and candidates are prefered over others). When 'true', orchestrator will promote the other co-primary or else fail - DetachLostReplicasAfterPrimaryFailover bool // Should replicas that are not to be lost in primary recovery (i.e. were more up-to-date than promoted replica) be forcibly detached - ApplyMySQLPromotionAfterPrimaryFailover bool // Should orchestrator take upon itself to apply MySQL primary promotion: set read_only=0, detach replication, etc. - PreventCrossDataCenterPrimaryFailover bool // When true (default: false), cross-DC primary failover are not allowed, orchestrator will do all it can to only fail over within same DC, or else not fail over at all. - PreventCrossRegionPrimaryFailover bool // When true (default: false), cross-region primary failover are not allowed, orchestrator will do all it can to only fail over within same region, or else not fail over at all. - PrimaryFailoverLostInstancesDowntimeMinutes uint // Number of minutes to downtime any server that was lost after a primary failover (including failed primary & lost replicas). 0 to disable - PrimaryFailoverDetachReplicaPrimaryHost bool // Should orchestrator issue a detach-replica-primary-host on newly promoted primary (this makes sure the new primary will not attempt to replicate old primary if that comes back to life). Defaults 'false'. Meaningless if ApplyMySQLPromotionAfterPrimaryFailover is 'true'. - FailPrimaryPromotionOnLagMinutes uint // when > 0, fail a primary promotion if the candidate replica is lagging >= configured number of minutes. - FailPrimaryPromotionIfSQLThreadNotUpToDate bool // when true, and a primary failover takes place, if candidate primary has not consumed all relay logs, promotion is aborted with error - DelayPrimaryPromotionIfSQLThreadNotUpToDate bool // when true, and a primary failover takes place, if candidate primary has not consumed all relay logs, delay promotion until the sql thread has caught up - PostponeReplicaRecoveryOnLagMinutes uint // On crash recovery, replicas that are lagging more than given minutes are only resurrected late in the recovery process, after primary/IM has been elected and processes executed. Value of 0 disables this feature - OSCIgnoreHostnameFilters []string // OSC replicas recommendation will ignore replica hostnames matching given patterns - URLPrefix string // URL prefix to run orchestrator on non-root web path, e.g. /orchestrator to put it behind nginx. - DiscoveryIgnoreReplicaHostnameFilters []string // Regexp filters to apply to prevent auto-discovering new replicas. Usage: unreachable servers due to firewalls, applications which trigger binlog dumps - DiscoveryIgnorePrimaryHostnameFilters []string // Regexp filters to apply to prevent auto-discovering a primary. Usage: pointing your primary temporarily to replicate seom data from external host - DiscoveryIgnoreHostnameFilters []string // Regexp filters to apply to prevent discovering instances of any kind - WebMessage string // If provided, will be shown on all web pages below the title bar - MaxConcurrentReplicaOperations int // Maximum number of concurrent operations on replicas - InstanceDBExecContextTimeoutSeconds int // Timeout on context used while calling ExecContext on instance database - LockShardTimeoutSeconds int // Timeout on context used to lock shard. Should be a small value because we should fail-fast - WaitReplicasTimeoutSeconds int // Timeout on amount of time to wait for the replicas in case of ERS. Should be a small value because we should fail-fast. Should not be larger than LockShardTimeoutSeconds since that is the total time we use for an ERS. -} - -// ToJSONString will marshal this configuration as JSON -func (config *Configuration) ToJSONString() string { - b, _ := json.Marshal(config) - return string(b) -} - -// Config is *the* configuration instance, used globally to get configuration data -var Config = newConfiguration() - -func newConfiguration() *Configuration { - return &Configuration{ - Debug: false, - EnableSyslog: false, - ListenAddress: ":3000", - ListenSocket: "", - HTTPAdvertise: "", - AgentsServerPort: ":3001", - StatusEndpoint: DefaultStatusAPIEndpoint, - StatusOUVerify: false, - BackendDB: "sqlite", - SQLite3DataFile: "file::memory:?mode=memory&cache=shared", - SkipOrchestratorDatabaseUpdate: false, - PanicIfDifferentDatabaseDeploy: false, - RaftBind: "127.0.0.1:10008", - RaftAdvertise: "", - RaftDataDir: "", - DefaultRaftPort: 10008, - RaftNodes: []string{}, - ExpectFailureAnalysisConcensus: true, - MySQLOrchestratorMaxPoolConnections: 128, // limit concurrent conns to backend DB - MySQLOrchestratorPort: 3306, - MySQLTopologyUseMutualTLS: false, - MySQLTopologyUseMixedTLS: true, - MySQLOrchestratorUseMutualTLS: false, - MySQLConnectTimeoutSeconds: 2, - MySQLOrchestratorReadTimeoutSeconds: 30, - MySQLOrchestratorRejectReadOnly: false, - MySQLDiscoveryReadTimeoutSeconds: 10, - MySQLTopologyReadTimeoutSeconds: 600, - MySQLConnectionLifetimeSeconds: 0, - DefaultInstancePort: 3306, - TLSCacheTTLFactor: 100, - InstancePollSeconds: 5, - InstanceWriteBufferSize: 100, - BufferInstanceWrites: false, - InstanceFlushIntervalMilliseconds: 100, - UnseenInstanceForgetHours: 240, - SnapshotTopologiesIntervalHours: 0, - DiscoverByShowSlaveHosts: false, - UseSuperReadOnly: false, - DiscoveryMaxConcurrency: 300, - DiscoveryQueueCapacity: 100000, - DiscoveryQueueMaxStatisticsSize: 120, - DiscoveryCollectionRetentionSeconds: 120, - DiscoverySeeds: []string{}, - InstanceBulkOperationsWaitTimeoutSeconds: 10, - HostnameResolveMethod: "default", - MySQLHostnameResolveMethod: "none", - SkipBinlogServerUnresolveCheck: true, - ExpiryHostnameResolvesMinutes: 60, - RejectHostnameResolvePattern: "", - ReasonableReplicationLagSeconds: 10, - ProblemIgnoreHostnameFilters: []string{}, - VerifyReplicationFilters: false, - ReasonableMaintenanceReplicationLagSeconds: 20, - CandidateInstanceExpireMinutes: 60, - AuditLogFile: "", - AuditToSyslog: false, - AuditToBackendDB: false, - AuditPurgeDays: 7, - RemoveTextFromHostnameDisplay: "", - ReadOnly: false, - AuthenticationMethod: "", - HTTPAuthUser: "", - HTTPAuthPassword: "", - AuthUserHeader: "X-Forwarded-User", - PowerAuthUsers: []string{"*"}, - PowerAuthGroups: []string{}, - AccessTokenUseExpirySeconds: 60, - AccessTokenExpiryMinutes: 1440, - ClusterNameToAlias: make(map[string]string), - DetectClusterAliasQuery: "", - DetectClusterDomainQuery: "", - DetectInstanceAliasQuery: "", - DetectPromotionRuleQuery: "", - DataCenterPattern: "", - PhysicalEnvironmentPattern: "", - DetectDataCenterQuery: "", - DetectPhysicalEnvironmentQuery: "", - DetectSemiSyncEnforcedQuery: "", - SupportFuzzyPoolHostnames: true, - InstancePoolExpiryMinutes: 60, - PromotionIgnoreHostnameFilters: []string{}, - ServeAgentsHTTP: false, - AgentsUseSSL: false, - AgentsUseMutualTLS: false, - AgentSSLValidOUs: []string{}, - AgentSSLSkipVerify: false, - AgentSSLPrivateKeyFile: "", - AgentSSLCertFile: "", - AgentSSLCAFile: "", - UseSSL: false, - UseMutualTLS: false, - SSLValidOUs: []string{}, - SSLSkipVerify: false, - SSLPrivateKeyFile: "", - SSLCertFile: "", - SSLCAFile: "", - AgentPollMinutes: 60, - UnseenAgentForgetHours: 6, - StaleSeedFailMinutes: 60, - SeedAcceptableBytesDiff: 8192, - SeedWaitSecondsBeforeSend: 2, - BinlogEventsChunkSize: 10000, - ReduceReplicationAnalysisCount: true, - FailureDetectionPeriodBlockMinutes: 60, - RecoveryPeriodBlockMinutes: 60, - RecoveryPeriodBlockSeconds: 3600, - RecoveryIgnoreHostnameFilters: []string{}, - RecoverPrimaryClusterFilters: []string{"*"}, - RecoverIntermediatePrimaryClusterFilters: []string{}, - ProcessesShellCommand: "bash", - OnFailureDetectionProcesses: []string{}, - PreFailoverProcesses: []string{}, - PostPrimaryFailoverProcesses: []string{}, - PostIntermediatePrimaryFailoverProcesses: []string{}, - PostFailoverProcesses: []string{}, - PostUnsuccessfulFailoverProcesses: []string{}, - PostTakePrimaryProcesses: []string{}, - CoPrimaryRecoveryMustPromoteOtherCoPrimary: true, - DetachLostReplicasAfterPrimaryFailover: true, - ApplyMySQLPromotionAfterPrimaryFailover: true, - PreventCrossDataCenterPrimaryFailover: false, - PreventCrossRegionPrimaryFailover: false, - PrimaryFailoverLostInstancesDowntimeMinutes: 0, - PrimaryFailoverDetachReplicaPrimaryHost: false, - FailPrimaryPromotionOnLagMinutes: 0, - FailPrimaryPromotionIfSQLThreadNotUpToDate: false, - DelayPrimaryPromotionIfSQLThreadNotUpToDate: true, - PostponeReplicaRecoveryOnLagMinutes: 0, - OSCIgnoreHostnameFilters: []string{}, - URLPrefix: "", - DiscoveryIgnoreReplicaHostnameFilters: []string{}, - WebMessage: "", - MaxConcurrentReplicaOperations: 5, - InstanceDBExecContextTimeoutSeconds: 30, - LockShardTimeoutSeconds: 30, - WaitReplicasTimeoutSeconds: 30, - } -} - -func (config *Configuration) MySQLOrchestratorTLSMinVersionNumber() uint16 { - // We can ignore the error here, we already checked for valid options if it's set. - // If it's not set, we get a safe default back here. - minVersion, _ := vttls.TLSVersionToNumber(config.MySQLOrchestratorTLSMinVersion) - return minVersion -} - -func (config *Configuration) MySQLTopologyTLSMinVersionNumber() uint16 { - // We can ignore the error here, we already checked for valid options if it's set. - // If it's not set, we get a safe default back here. - minVersion, _ := vttls.TLSVersionToNumber(config.MySQLTopologyTLSMinVersion) - return minVersion -} - -func (config *Configuration) postReadAdjustments() error { - if config.MySQLOrchestratorCredentialsConfigFile != "" { - mySQLConfig := struct { - Client struct { - User string - Password string - } - }{} - err := gcfg.ReadFileInto(&mySQLConfig, config.MySQLOrchestratorCredentialsConfigFile) - if err != nil { - log.Fatalf("Failed to parse gcfg data from file: %+v", err) - } else { - log.Infof("Parsed orchestrator credentials from %s", config.MySQLOrchestratorCredentialsConfigFile) - config.MySQLOrchestratorUser = mySQLConfig.Client.User - config.MySQLOrchestratorPassword = mySQLConfig.Client.Password - } - } - { - // We accept password in the form "${SOME_ENV_VARIABLE}" in which case we pull - // the given variable from os env - submatch := envVariableRegexp.FindStringSubmatch(config.MySQLOrchestratorPassword) - if len(submatch) > 1 { - config.MySQLOrchestratorPassword = os.Getenv(submatch[1]) - } - } - if config.MySQLTopologyCredentialsConfigFile != "" { - mySQLConfig := struct { - Client struct { - User string - Password string - } - }{} - err := gcfg.ReadFileInto(&mySQLConfig, config.MySQLTopologyCredentialsConfigFile) - if err != nil { - log.Fatalf("Failed to parse gcfg data from file: %+v", err) - } else { - log.Infof("Parsed topology credentials from %s", config.MySQLTopologyCredentialsConfigFile) - config.MySQLTopologyUser = mySQLConfig.Client.User - config.MySQLTopologyPassword = mySQLConfig.Client.Password - } - } - { - // We accept password in the form "${SOME_ENV_VARIABLE}" in which case we pull - // the given variable from os env - submatch := envVariableRegexp.FindStringSubmatch(config.MySQLTopologyPassword) - if len(submatch) > 1 { - config.MySQLTopologyPassword = os.Getenv(submatch[1]) - } - } - - if config.RecoveryPeriodBlockSeconds == 0 && config.RecoveryPeriodBlockMinutes > 0 { - // RecoveryPeriodBlockSeconds is a newer addition that overrides RecoveryPeriodBlockMinutes - // The code does not consider RecoveryPeriodBlockMinutes anymore, but RecoveryPeriodBlockMinutes - // still supported in config file for backwards compatibility - config.RecoveryPeriodBlockSeconds = config.RecoveryPeriodBlockMinutes * 60 - } - - if config.FailPrimaryPromotionIfSQLThreadNotUpToDate && config.DelayPrimaryPromotionIfSQLThreadNotUpToDate { - return fmt.Errorf("Cannot have both FailPrimaryPromotionIfSQLThreadNotUpToDate and DelayPrimaryPromotionIfSQLThreadNotUpToDate enabled") - } - if config.FailPrimaryPromotionOnLagMinutes > 0 && config.ReplicationLagQuery == "" { - return fmt.Errorf("nonzero FailPrimaryPromotionOnLagMinutes requires ReplicationLagQuery to be set") - } - - if config.URLPrefix != "" { - // Ensure the prefix starts with "/" and has no trailing one. - config.URLPrefix = strings.TrimLeft(config.URLPrefix, "/") - config.URLPrefix = strings.TrimRight(config.URLPrefix, "/") - config.URLPrefix = "/" + config.URLPrefix - } - - if config.IsSQLite() && config.SQLite3DataFile == "" { - return fmt.Errorf("SQLite3DataFile must be set when BackendDB is sqlite") - } - if config.RaftEnabled && config.RaftDataDir == "" { - return fmt.Errorf("RaftDataDir must be defined since raft is enabled (RaftEnabled)") - } - if config.RaftEnabled && config.RaftBind == "" { - return fmt.Errorf("RaftBind must be defined since raft is enabled (RaftEnabled)") - } - if config.RaftAdvertise == "" { - config.RaftAdvertise = config.RaftBind - } - if config.HTTPAdvertise != "" { - u, err := url.Parse(config.HTTPAdvertise) - if err != nil { - return fmt.Errorf("Failed parsing HTTPAdvertise %s: %s", config.HTTPAdvertise, err.Error()) - } - if u.Scheme == "" { - return fmt.Errorf("If specified, HTTPAdvertise must include scheme (http:// or https://)") - } - if u.Hostname() == "" { - return fmt.Errorf("If specified, HTTPAdvertise must include host name") - } - if u.Port() == "" { - return fmt.Errorf("If specified, HTTPAdvertise must include port number") - } - if u.Path != "" { - return fmt.Errorf("If specified, HTTPAdvertise must not specify a path") - } - if config.InstanceWriteBufferSize <= 0 { - config.BufferInstanceWrites = false - } - } - - if config.MySQLOrchestratorTLSMinVersion != "" { - _, err := vttls.TLSVersionToNumber(config.MySQLOrchestratorTLSMinVersion) - if err != nil { - return fmt.Errorf("If specified, MySQLOrchestratorTLSMinVersion must be one of TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3") - } - } - - if config.MySQLTopologyTLSMinVersion != "" { - _, err := vttls.TLSVersionToNumber(config.MySQLTopologyTLSMinVersion) - if err != nil { - return fmt.Errorf("If specified, MySQLTopologyTLSMinVersion must be one of TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3") - } - } - - return nil -} - -func (config *Configuration) IsSQLite() bool { - return strings.Contains(config.BackendDB, "sqlite") -} - -func (config *Configuration) IsMySQL() bool { - return config.BackendDB == "mysql" || config.BackendDB == "" -} - -// read reads configuration from given file, or silently skips if the file does not exist. -// If the file does exist, then it is expected to be in valid JSON format or the function bails out. -func read(fileName string) (*Configuration, error) { - if fileName == "" { - return Config, fmt.Errorf("Empty file name") - } - file, err := os.Open(fileName) - if err != nil { - return Config, err - } - decoder := json.NewDecoder(file) - err = decoder.Decode(Config) - if err == nil { - log.Infof("Read config: %s", fileName) - } else { - log.Fatal("Cannot read config file:", fileName, err) - } - if err := Config.postReadAdjustments(); err != nil { - log.Fatal(err) - } - return Config, err -} - -// ForceRead reads configuration from given file name or bails out if it fails -func ForceRead(fileName string) *Configuration { - _, err := read(fileName) - if err != nil { - log.Fatal("Cannot read config file:", fileName, err) - } - return Config -} - -// CLIFlags stores some command line flags that are globally available in the process' lifetime -type CLIFlags struct { - Noop *bool - SkipUnresolve *bool - SkipUnresolveCheck *bool - BinlogFile *string - GrabElection *bool - Version *bool - Statement *string - PromotionRule *string - ConfiguredVersion string - SkipContinuousRegistration *bool - EnableDatabaseUpdate *bool - IgnoreRaftSetup *bool - Tag *string -} - -var RuntimeCLIFlags CLIFlags diff --git a/go/vt/vtgr/config/vtgr_config.json b/go/vt/vtgr/config/vtgr_config.json deleted file mode 100644 index 1c1ecae562a..00000000000 --- a/go/vt/vtgr/config/vtgr_config.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "BackoffErrorWaitTimeSeconds": 5, - "BootstrapGroupSize": 3 -} \ No newline at end of file diff --git a/go/vt/vtgr/config/vtgr_config_test.go b/go/vt/vtgr/config/vtgr_config_test.go deleted file mode 100644 index ec4312096a9..00000000000 --- a/go/vt/vtgr/config/vtgr_config_test.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestReadConfig(t *testing.T) { - path, _ := os.Getwd() - config, err := ReadVTGRConfig(filepath.Join(path, "vtgr_config.json")) - assert.NoError(t, err) - // Make sure VTGR config honors the default setting - assert.Equal(t, false, config.DisableReadOnlyProtection) - assert.Equal(t, 600, config.BootstrapWaitTimeSeconds) - // Make sure the config is load correctly - assert.Equal(t, 3, config.BootstrapGroupSize) - assert.Equal(t, 5, config.BackoffErrorWaitTimeSeconds) -} diff --git a/go/vt/vtgr/controller/diagnose.go b/go/vt/vtgr/controller/diagnose.go deleted file mode 100644 index b0896f4555a..00000000000 --- a/go/vt/vtgr/controller/diagnose.go +++ /dev/null @@ -1,586 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "errors" - "fmt" - "math/rand" - "os" - "sort" - "strings" - "sync" - "time" - - "github.com/spf13/pflag" - - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/vt/concurrency" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgr/db" -) - -var pingTabletTimeout = 2 * time.Second - -func init() { - servenv.OnParseFor("vtgr", func(fs *pflag.FlagSet) { - fs.DurationVar(&pingTabletTimeout, "ping_tablet_timeout", 2*time.Second, "time to wait when we ping a tablet") - }) -} - -// DiagnoseType is the types of Diagnose result -type DiagnoseType string - -type instanceGTIDSet struct { - gtids mysql.GTIDSet - instance *grInstance -} - -// groupGTIDRecorder is used to help us query all the instance in parallel and record the result -// it helps us to take care of the consistency / synchronization among go routines -type groupGTIDRecorder struct { - name string - gtidWithInstances []*instanceGTIDSet - hasActive bool - sync.Mutex -} - -const ( - // DiagnoseTypeError represents an DiagnoseTypeError status - DiagnoseTypeError DiagnoseType = "error" - // DiagnoseTypeHealthy represents everything is DiagnoseTypeHealthy - DiagnoseTypeHealthy = "Healthy" - // DiagnoseTypeShardHasNoGroup represents the cluster has not init yet - DiagnoseTypeShardHasNoGroup = "ShardHasNoGroup" - // DiagnoseTypeShardHasInactiveGroup represents the status where we have a group name but no member in it - DiagnoseTypeShardHasInactiveGroup = "ShardHasInactiveGroup" - // DiagnoseTypeInsufficientGroupSize represents the cluster has insufficient group members - DiagnoseTypeInsufficientGroupSize = "InsufficientGroupSize" - // DiagnoseTypeReadOnlyShard represents the cluster who has a read only node - DiagnoseTypeReadOnlyShard = "ReadOnlyShard" - // DiagnoseTypeUnreachablePrimary represents the primary tablet is unreachable - DiagnoseTypeUnreachablePrimary = "UnreachablePrimary" - // DiagnoseTypeWrongPrimaryTablet represents the primary tablet is incorrect based on mysql group - DiagnoseTypeWrongPrimaryTablet = "WrongPrimaryTablet" - // DiagnoseTypeUnconnectedReplica represents cluster with primary tablet, but a node is not connected to it - DiagnoseTypeUnconnectedReplica = "UnconnectedReplica" - // DiagnoseTypeBackoffError represents a transient error e.g., the primary is unreachable - DiagnoseTypeBackoffError = "BackoffError" - // DiagnoseTypeBootstrapBackoff represents an ongoing bootstrap - DiagnoseTypeBootstrapBackoff = "BootstrapBackoff" - - // diagnoseTypeUnknown represents a unclear intermediate diagnose state - diagnoseTypeUnknown = "Unknown" -) - -// ScanAndRepairShard scans a particular shard by first Diagnose the shard with info from grShard -// and then repair the probelm if the shard is unhealthy -func (shard *GRShard) ScanAndRepairShard(ctx context.Context) { - status, err := shard.Diagnose(ctx) - if err != nil { - shard.logger.Errorf("fail to scanAndRepairShard %v/%v because of Diagnose error: %v", shard.KeyspaceShard.Keyspace, shard.KeyspaceShard.Shard, err) - return - } - // We are able to get Diagnose without error - // - // Note: all the recovery function should first try to grab a shard level lock - // and check the trigger conditions before doing anything. This is to avoid - // other VTGR instance try to do the same thing - shard.logger.Infof("%v status is %v", formatKeyspaceShard(shard.KeyspaceShard), status) - if _, err := shard.Repair(ctx, status); err != nil { - shard.logger.Errorf("failed to repair %v: %v", status, err) - } -} - -// Diagnose the shard in the following order: -// TODO: use FSM to make sure the status transition is correct -// 1. if the shard has a group that every node agreed on -// 2. if the group has any active (online / recovering) member -// 3. if the shard has initialized a Vitess primary -// 4. if primary tablet is reachable -// 5. if Vitess primary and mysql primary reconciled -// 6. if we have enough group members -// 7. if the primary node has read_only=OFF -// 8. if there is a node that is not in Mysql group -func (shard *GRShard) Diagnose(ctx context.Context) (DiagnoseType, error) { - shard.Lock() - defer shard.Unlock() - diagnoseResult, err := shard.diagnoseLocked(ctx) - shard.shardStatusCollector.recordDiagnoseResult(diagnoseResult) - shard.populateVTGRStatusLocked() - if diagnoseResult != DiagnoseTypeHealthy { - shard.logger.Warningf(`VTGR diagnose shard as unhealthy for %s/%s: result=%v, last_result=%v, instances=%v, primary=%v, primary_tablet=%v, problematics=%v, unreachables=%v,\n%v`, - shard.KeyspaceShard.Keyspace, shard.KeyspaceShard.Shard, - shard.shardStatusCollector.status.DiagnoseResult, - shard.lastDiagnoseResult, - shard.shardStatusCollector.status.Instances, - shard.shardStatusCollector.status.Primary, - shard.primaryTabletAlias(), - shard.shardStatusCollector.status.Problematics, - shard.shardStatusCollector.status.Unreachables, - shard.sqlGroup.ToString()) - } - if diagnoseResult != shard.lastDiagnoseResult { - shard.lastDiagnoseResult = diagnoseResult - shard.lastDiagnoseSince = time.Now() - } - return diagnoseResult, err -} - -func (shard *GRShard) diagnoseLocked(ctx context.Context) (DiagnoseType, error) { - // fast path only diagnose problem Vitess primary - // which does not needed if the shard is inactive - if shard.localDbPort != 0 && shard.isActive.Load() { - localView := shard.getLocalView() - if localView != nil { - fastDiagnose := shard.fastPathDiagnose(ctx, localView) - if fastDiagnose != diagnoseTypeUnknown { - // If we can use local sql group info to diagnose - // we should record the view as well. This view is all we need - // later VTGR needs to find group name, primary etc from - // SQLGroup for repairing instead of getting nil - shard.sqlGroup.overrideView([]*db.GroupView{localView}) - shard.logger.Infof("Diagnose %v from fast path", fastDiagnose) - return fastDiagnose, nil - } - } - } - // fast path is disabled or cannot diagnose the shard - // fall back to the normal strategy where we fetch info from all the nodes - err := shard.refreshSQLGroup() - if err != nil { - if errors.Is(err, db.ErrGroupBackoffError) { - return DiagnoseTypeBackoffError, nil - } - if errors.Is(err, db.ErrGroupOngoingBootstrap) { - return DiagnoseTypeBootstrapBackoff, nil - } - return DiagnoseTypeError, vterrors.Wrap(err, "fail to refreshSQLGroup") - } - // First, we check if there is any group in the shard - // if no, we should bootstrap one - mysqlGroup := shard.shardAgreedGroupName() - if mysqlGroup == "" { - if len(shard.sqlGroup.views) != shard.sqlGroup.expectedBootstrapSize { - return DiagnoseTypeError, fmt.Errorf("fail to diagnose ShardHasNoGroup with %v nodes", len(shard.sqlGroup.views)) - } - return DiagnoseTypeShardHasNoGroup, nil - } - // We handle the case where the shard has an agreed group name but all nodes are offline - // In this situation, instead of bootstrap a group, we should re-build the - // old group for the shard - if shard.isAllOfflineOrError() { - shard.logger.Info("Found all members are OFFLINE or ERROR") - // On rebootstrap, we always want to make sure _all_ the nodes in topo are reachable - // unless we override the rebootstrap size - desiredRebootstrapSize := len(shard.instances) - if shard.sqlGroup.rebootstrapSize != 0 { - desiredRebootstrapSize = shard.sqlGroup.rebootstrapSize - } - if len(shard.sqlGroup.views) != desiredRebootstrapSize { - return DiagnoseTypeError, fmt.Errorf("fail to diagnose ShardHasInactiveGroup with %v nodes expecting %v", len(shard.sqlGroup.views), desiredRebootstrapSize) - } - return DiagnoseTypeShardHasInactiveGroup, nil - } - - // We only check Vitess primary iff shard is active. - // Otherwise VTGR will only make sure there is a mysql group in the shard. - if shard.isActive.Load() { - // Secondly, we check if there is a primary tablet. - // If there is a group but we cannot find a primary tablet - // we should set it based on mysql group - hasWrongPrimary, err := shard.hasWrongPrimaryTablet(ctx) - if err != nil { - // errMissingGroup means we cannot find a mysql group for the shard - // we are in DiagnoseTypeShardHasNoGroup state - if err == errMissingGroup { - shard.logger.Warning("Missing mysql group") - return DiagnoseTypeShardHasNoGroup, nil - } - // errMissingPrimaryTablet means we cannot find a tablet based on mysql primary - // which means the tablet disconnected from topo server and we cannot find it - if err == errMissingPrimaryTablet { - return DiagnoseTypeUnreachablePrimary, nil - } - return DiagnoseTypeError, vterrors.Wrap(err, "fail to diagnose shardNeedsInitialized") - } - if hasWrongPrimary { - return DiagnoseTypeWrongPrimaryTablet, nil - } - - // Thirdly, we check if primary tablet is reachable - isPrimaryReachable, err := shard.isPrimaryReachable(ctx) - if err != nil { - return DiagnoseTypeError, vterrors.Wrap(err, "fail to diagnose isPrimaryReachable") - } - if !isPrimaryReachable { - return DiagnoseTypeUnreachablePrimary, nil - } - } - - // At this point, the primary tablet should be consistent with mysql primary - // so the view from priamry tablet should be accurate - onlineMembers, isReadOnly := shard.getOnlineGroupInfo() - // If we found a writable shard in the inactive shard - // we should consider the shard as InsufficientGroupSize to set read only - if !isReadOnly && !shard.isActive.Load() { - return DiagnoseTypeInsufficientGroupSize, nil - } - // Then we check if we satisfy the minimum replica requirement - if shard.minNumReplicas > 0 { - if onlineMembers >= shard.minNumReplicas && isReadOnly && shard.isActive.Load() { - return DiagnoseTypeReadOnlyShard, nil - } - // If we disable readonly protection and still found we have a read only shard, - // we should return DiagnoseTypeReadOnlyShard so that VTGR can turn off read only - if shard.disableReadOnlyProtection && isReadOnly && shard.isActive.Load() { - return DiagnoseTypeReadOnlyShard, nil - } - // We don't check isActive here since if it is inactive, VTGR should already return InsufficientGroupSize - if !shard.disableReadOnlyProtection && onlineMembers < shard.minNumReplicas && !isReadOnly { - return DiagnoseTypeInsufficientGroupSize, nil - } - } - - // Lastly, we check if there is a replica that is not connected to primary node - disconnectedInstance, err := shard.disconnectedInstance() - if err != nil { - return DiagnoseTypeError, vterrors.Wrap(err, "fail to diagnose disconnectedInstance") - } - if disconnectedInstance != nil { - return DiagnoseTypeUnconnectedReplica, nil - } - - // If we get here, shard is DiagnoseTypeHealthy - return DiagnoseTypeHealthy, nil -} - -func (shard *GRShard) getLocalView() *db.GroupView { - localHostname, _ := os.Hostname() - localInst := shard.findTabletByHostAndPort(localHostname, shard.localDbPort) - if localInst == nil { - return nil - } - // TODO: consider using -db_socket to read local info - view, err := shard.dbAgent.FetchGroupView(localInst.alias, localInst.instanceKey) - // We still have the fallback logic if this failed, therefore we don't raise error - // but try to get local view with best effort - if err != nil { - shard.logger.Errorf("failed to fetch local group view: %v", err) - } - return view -} - -func (shard *GRShard) fastPathDiagnose(ctx context.Context, view *db.GroupView) DiagnoseType { - pHost, pPort, isOnline := view.GetPrimaryView() - primaryTablet := shard.findShardPrimaryTablet() - if !isOnline || pHost == "" || pPort == 0 || primaryTablet == nil { - return diagnoseTypeUnknown - } - // VTGR will only bootstrap a group when it observes same number of views as group_size - // it means if we can find an ONLINE primary, we should be able to trust the view reported locally - // together with the primary tablet from topo server, we can determine: - // - if we need to failover vitess - // - if we need to failover mysql - if primaryTablet.instanceKey.Hostname != pHost || primaryTablet.instanceKey.Port != pPort { - // we find a mismatch but if the reported mysql primary is not in - // topology we should consider it as unreachable. - if shard.findTabletByHostAndPort(pHost, pPort) == nil { - return DiagnoseTypeUnreachablePrimary - } - return DiagnoseTypeWrongPrimaryTablet - } - if !shard.instanceReachable(ctx, primaryTablet) { - return DiagnoseTypeUnreachablePrimary - } - return diagnoseTypeUnknown -} - -func (shard *GRShard) shardAgreedGroupName() string { - if len(shard.instances) == 0 { - return "" - } - return shard.sqlGroup.GetGroupName() -} - -func (shard *GRShard) isAllOfflineOrError() bool { - return shard.sqlGroup.IsAllOfflineOrError() -} - -func (shard *GRShard) getOnlineGroupInfo() (int, bool) { - return shard.sqlGroup.GetOnlineGroupInfo() -} - -func (shard *GRShard) hasWrongPrimaryTablet(ctx context.Context) (bool, error) { - // Find out the hostname and port of the primary in mysql group - // we try to use local instance and then fallback to a random instance to check mysqld - // in case the primary is unreachable - host, port, _ := shard.sqlGroup.GetPrimary() - if !isHostPortValid(host, port) { - shard.logger.Warningf("Invalid address for primary %v:%v", host, port) - return false, errMissingGroup - } - // Make sure we have a tablet available - // findTabletByHostAndPort returns nil when we cannot find a tablet - // that is running on host:port, which means the tablet get stuck - // or when the tablet is not reachable - // we retrun errMissingPrimaryTablet so that VTGR will trigger a failover - tablet := shard.findTabletByHostAndPort(host, port) - if tablet == nil || !shard.instanceReachable(ctx, tablet) { - shard.logger.Errorf("Failed to find tablet that is running with mysql on %v:%v", host, port) - return false, errMissingPrimaryTablet - } - // Now we know we have a valid mysql primary in the group - // we should make sure tablets are aligned with it - primary := shard.findShardPrimaryTablet() - // If we failed to find primary for shard, it mostly means we are initializing the shard - // return true directly so that VTGR will set primary tablet according to MySQL group - if primary == nil { - shard.logger.Infof("unable to find primary tablet for %v", formatKeyspaceShard(shard.KeyspaceShard)) - return true, nil - } - return (host != primary.instanceKey.Hostname) || (port != primary.instanceKey.Port), nil -} - -func (shard *GRShard) isPrimaryReachable(ctx context.Context) (bool, error) { - primaryTablet := shard.findShardPrimaryTablet() - if primaryTablet == nil { - return false, fmt.Errorf("unable to find primary for %v", formatKeyspaceShard(shard.KeyspaceShard)) - } - return shard.instanceReachable(ctx, primaryTablet), nil -} - -func (shard *GRShard) instanceReachable(ctx context.Context, instance *grInstance) bool { - pingCtx, cancel := context.WithTimeout(context.Background(), pingTabletTimeout) - defer cancel() - c := make(chan error, 1) - // tmc.Ping create grpc client connection first without timeout via dial - // then call the grpc endpoint using the context with timeout - // this is problematic if the host is really unreachable, we have to wait the - // all the retries inside grpc.dial with exponential backoff - go func() { c <- shard.tmc.Ping(pingCtx, instance.tablet) }() - select { - case <-pingCtx.Done(): - shard.logger.Errorf("Ping abort timeout %v", pingTabletTimeout) - return false - case err := <-c: - if err != nil { - shard.logger.Errorf("Ping error host=%v: %v", instance.instanceKey.Hostname, err) - } - return err == nil - } -} - -// findShardPrimaryTablet returns the primary for the shard -// it is either based on shard info from global topo or based on tablet types -// from local topo -func (shard *GRShard) findShardPrimaryTablet() *grInstance { - var primaryInstance *grInstance - for _, instance := range shard.instances { - if shard.primaryAlias == instance.alias { - return instance - } - } - return primaryInstance -} - -func (shard *GRShard) primaryTabletAlias() string { - primary := shard.findShardPrimaryTablet() - if primary == nil { - return "UNKNOWN" - } - return primary.alias -} - -// disconnectedInstance iterates all known the replica records -// and checks mysql to see if the group replication is setup on it -func (shard *GRShard) disconnectedInstance() (*grInstance, error) { - primaryInstance := shard.findShardPrimaryTablet() - // if there is no primary, we should recover from DiagnoseTypeWrongPrimaryTablet - if primaryInstance == nil { - return nil, fmt.Errorf("%v does not have primary", formatKeyspaceShard(shard.KeyspaceShard)) - } - // Up to this check, we know: - // - shard has an agreed group - // - shard has a primary tablet - // - shard primary tablet is running on the same node as mysql - rand.Shuffle(len(shard.instances), func(i, j int) { - shard.instances[i], shard.instances[j] = shard.instances[j], shard.instances[i] - }) - for _, instance := range shard.instances { - // Skip instance without hostname because they are not up and running - // also skip instances that raised unrecoverable errors - if shard.shardStatusCollector.isUnreachable(instance) { - shard.logger.Infof("Skip %v to check disconnectedInstance because it is unhealthy", instance.alias) - continue - } - isUnconnected := shard.sqlGroup.IsUnconnectedReplica(instance.instanceKey) - if isUnconnected { - return instance, nil - } - } - return nil, nil -} - -func (recorder *groupGTIDRecorder) recordGroupStatus(name string, isActive bool) error { - recorder.Lock() - defer recorder.Unlock() - if recorder.name != "" && recorder.name != name { - return fmt.Errorf("group has more than one group name") - } - recorder.name = name - // hasActive records true if any node finds an active member - if isActive { - recorder.hasActive = true - } - return nil -} - -func (recorder *groupGTIDRecorder) recordGroupGTIDs(gtids mysql.GTIDSet, instance *grInstance) { - recorder.Lock() - defer recorder.Unlock() - recorder.gtidWithInstances = append(recorder.gtidWithInstances, &instanceGTIDSet{gtids: gtids, instance: instance}) -} - -func (recorder *groupGTIDRecorder) sort() { - sort.SliceStable(recorder.gtidWithInstances, func(i, j int) bool { - return recorder.gtidWithInstances[i].instance.alias < recorder.gtidWithInstances[j].instance.alias - }) -} - -func (collector *shardStatusCollector) recordDiagnoseResult(result DiagnoseType) { - collector.Lock() - defer collector.Unlock() - collector.status.DiagnoseResult = result -} - -func (collector *shardStatusCollector) recordUnreachables(instance *grInstance) { - collector.Lock() - defer collector.Unlock() - // dedup - // the list size is at most same as number instances in a shard so iterate to dedup is not terrible - for _, alias := range collector.status.Unreachables { - if alias == instance.alias { - return - } - } - collector.status.Unreachables = append(collector.status.Unreachables, instance.alias) -} - -func (collector *shardStatusCollector) clear() { - collector.Lock() - defer collector.Unlock() - collector.status.Unreachables = nil - collector.status.Problematics = nil -} - -func (collector *shardStatusCollector) recordProblematics(instance *grInstance) { - collector.Lock() - defer collector.Unlock() - // dedup - // the list size is at most same as number instances in a shard so iterate to dedup is not terrible - for _, alias := range collector.status.Problematics { - if alias == instance.alias { - return - } - } - collector.status.Problematics = append(collector.status.Problematics, instance.alias) -} - -func formatKeyspaceShard(keyspaceShard *topo.KeyspaceShard) string { - return fmt.Sprintf("%v/%v", keyspaceShard.Keyspace, keyspaceShard.Shard) -} - -func isHostPortValid(host string, port int) bool { - return host != "" && port != 0 -} - -// We use forAllInstances in two cases: -// 1. FetchGroupView GTIDs to find a candidate for failover. -// If a node is not healthy it should not be considered as a failover candidate -// -// 2. FetchGroupView group member status to see if we need to bootstrap a group, -// either for the first time or rebuild a group after all the nodes are died. -// -// caller will be responsible to decide if they want to tolerate errors from the forAllInstances call -func (shard *GRShard) forAllInstances(task func(instance *grInstance, wg *sync.WaitGroup, er concurrency.ErrorRecorder)) *concurrency.AllErrorRecorder { - errorRecord := concurrency.AllErrorRecorder{} - shard.shardStatusCollector.clear() - var wg sync.WaitGroup - for _, instance := range shard.instances { - wg.Add(1) - go task(instance, &wg, &errorRecord) - } - wg.Wait() - if len(errorRecord.Errors) > 0 { - shard.logger.Errorf("get errors in forAllInstances call: %v", errorRecord.Error()) - } - return &errorRecord -} - -func unreachableError(err error) bool { - contains := []string{ - // "no such host"/"no route to host" is the error when a host is not reachalbe - "no such host", - "no route to host", - // "connect: connection refused" is the error when a mysqld refused the connection - "connect: connection refused", - // "invalid mysql instance key" is the error when a tablet does not populate mysql hostname or port - // this can happen if the tablet crashed. We keep them in the grShard.instances list to compute - // quorum but consider it as an unreachable host. - "invalid mysql instance key", - } - for _, k := range contains { - if strings.Contains(err.Error(), k) { - return true - } - } - return false -} - -// refreshSQLGroup hits all instances and renders a SQL group locally for later diagnoses -// the SQL group contains a list of "views" for the group from all the available nodes -func (shard *GRShard) refreshSQLGroup() error { - // reset views in sql group - shard.sqlGroup.clear() - er := shard.forAllInstances(func(instance *grInstance, wg *sync.WaitGroup, er concurrency.ErrorRecorder) { - defer wg.Done() - view, err := shard.dbAgent.FetchGroupView(instance.alias, instance.instanceKey) - // We just log error here because we rely on mysql tells us if it is happy or not - // If the node is unreachable - if err != nil { - er.RecordError(err) - shard.shardStatusCollector.recordProblematics(instance) - if unreachableError(err) { - shard.shardStatusCollector.recordUnreachables(instance) - } - shard.logger.Errorf("%v get error while fetch group info: %v", instance.alias, err) - return - } - shard.sqlGroup.recordView(view) - }) - // Only raise error if we failed to get any data from mysql - // otherwise, we will use what we get from mysql directly - if len(er.Errors) == len(shard.instances) { - shard.logger.Errorf("fail to fetch any data for mysql") - return db.ErrGroupBackoffError - } - return shard.sqlGroup.Resolve() -} diff --git a/go/vt/vtgr/controller/diagnose_test.go b/go/vt/vtgr/controller/diagnose_test.go deleted file mode 100644 index c8b81bb70da..00000000000 --- a/go/vt/vtgr/controller/diagnose_test.go +++ /dev/null @@ -1,900 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "errors" - "math" - "os" - "strconv" - "strings" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" - - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/topo/memorytopo" - "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver/testutil" - "vitess.io/vitess/go/vt/vtgr/config" - "vitess.io/vitess/go/vt/vtgr/db" - "vitess.io/vitess/go/vt/vtgr/inst" - - topodatapb "vitess.io/vitess/go/vt/proto/topodata" -) - -const diagnoseGroupSize = 3 - -var ( - testHost, _ = os.Hostname() - alias0 = "test_cell-0000000000" - alias1 = "test_cell-0000000001" - alias2 = "test_cell-0000000002" - testPort0 = 17000 - testPort1 = 17001 - testPort2 = 17002 -) - -type testGroupInput struct { - groupName string - readOnly bool - checkResult int - groupState []db.TestGroupState - gtid mysql.GTIDSet -} - -func TestShardIsHealthy(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - ctx := context.Background() - ts := memorytopo.NewServer("test_cell") - defer ts.Close() - ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{}) - ts.CreateShard(ctx, "ks", "0") - tmc := NewMockGRTmcClient(ctrl) - dbAgent := db.NewMockAgent(ctrl) - tablet1 := buildTabletInfo(uint32(testPort0), testHost, testPort0, topodatapb.TabletType_PRIMARY, time.Now()) - tablet2 := buildTabletInfo(uint32(testPort1), testHost, testPort1, topodatapb.TabletType_SPARE, time.Time{}) - tablet3 := buildTabletInfo(uint32(testPort2), testHost, testPort2, topodatapb.TabletType_REPLICA, time.Time{}) - testutil.AddTablet(ctx, t, ts, tablet1.Tablet, nil) - testutil.AddTablet(ctx, t, ts, tablet2.Tablet, nil) - testutil.AddTablet(ctx, t, ts, tablet3.Tablet, nil) - ts.UpdateShardFields(ctx, "ks", "0", func(si *topo.ShardInfo) error { - si.PrimaryAlias = tablet1.Alias - return nil - }) - dbAgent. - EXPECT(). - FetchGroupView(gomock.Any(), gomock.Any()). - DoAndReturn(func(alias string, target *inst.InstanceKey) (*db.GroupView, error) { - return db.BuildGroupView(alias, "group", testHost, testPort0, false, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }), nil - }). - AnyTimes() - tmc.EXPECT().Ping(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() - cfg := &config.VTGRConfig{BootstrapGroupSize: 3, MinNumReplica: 2, BackoffErrorWaitTimeSeconds: 1, BootstrapWaitTimeSeconds: 1} - shard := NewGRShard("ks", "0", nil, tmc, ts, dbAgent, cfg, testPort0, true) - shard.refreshTabletsInShardLocked(ctx) - diagnose, _ := shard.Diagnose(ctx) - assert.Equal(t, DiagnoseTypeHealthy, string(diagnose)) -} - -func TestTabletIssueDiagnoses(t *testing.T) { - type data struct { - pingable bool - ttype topodatapb.TabletType - } - var tablettests = []struct { - name string - expected DiagnoseType - errMessage string - primaryAlias string - inputs []data - }{ - {name: "healthy shard", expected: DiagnoseTypeHealthy, errMessage: "", primaryAlias: "test_cell-0000017000", inputs: []data{ - {true, topodatapb.TabletType_PRIMARY}, - {true, topodatapb.TabletType_REPLICA}, - {true, topodatapb.TabletType_REPLICA}, - }}, - {name: "non primary tablet is not pingable", expected: DiagnoseTypeHealthy, errMessage: "", primaryAlias: "test_cell-0000017000", inputs: []data{ // vtgr should do nothing - {true, topodatapb.TabletType_PRIMARY}, - {false, topodatapb.TabletType_REPLICA}, - {false, topodatapb.TabletType_REPLICA}, - }}, - {name: "primary tablet is not pingable", expected: DiagnoseTypeUnreachablePrimary, errMessage: "", primaryAlias: "test_cell-0000017000", inputs: []data{ // vtgr should trigger a failover - {false, topodatapb.TabletType_PRIMARY}, - {true, topodatapb.TabletType_REPLICA}, - {true, topodatapb.TabletType_REPLICA}, - }}, - {name: "no primary tablet", expected: DiagnoseTypeWrongPrimaryTablet, errMessage: "", primaryAlias: "", inputs: []data{ // vtgr should create one based on mysql - {true, topodatapb.TabletType_REPLICA}, - {true, topodatapb.TabletType_REPLICA}, - {true, topodatapb.TabletType_REPLICA}, - }}, - {name: "wrong primary in tablet types", expected: DiagnoseTypeWrongPrimaryTablet, errMessage: "", primaryAlias: "test_cell-0000017001", inputs: []data{ // shard info returns differently comparing with tablet type - {true, topodatapb.TabletType_PRIMARY}, - {true, topodatapb.TabletType_REPLICA}, - {true, topodatapb.TabletType_REPLICA}, - }}, - {name: "mysql and vttablet has different primary", expected: DiagnoseTypeWrongPrimaryTablet, errMessage: "", primaryAlias: "test_cell-0000017001", inputs: []data{ // vtgr should fix vttablet - {true, topodatapb.TabletType_REPLICA}, - {true, topodatapb.TabletType_PRIMARY}, - {true, topodatapb.TabletType_REPLICA}, - }}, - {name: "unreachable wrong vttablet primary", expected: DiagnoseTypeWrongPrimaryTablet, errMessage: "", primaryAlias: "test_cell-0000017001", inputs: []data{ // vtgr should fix vttablet - {true, topodatapb.TabletType_REPLICA}, - {false, topodatapb.TabletType_PRIMARY}, - {true, topodatapb.TabletType_REPLICA}, - }}, - {name: "unreachable uninitialized primary vttablet", expected: DiagnoseTypeUnreachablePrimary, errMessage: "", inputs: []data{ // vtgr should failover - {false, topodatapb.TabletType_REPLICA}, - {true, topodatapb.TabletType_REPLICA}, - {true, topodatapb.TabletType_REPLICA}, - }}, - } - for _, tt := range tablettests { - t.Run(tt.name, func(t *testing.T) { - expected := tt.expected - ctrl := gomock.NewController(t) - defer ctrl.Finish() - ts := NewMockGRTopo(ctrl) - tmc := NewMockGRTmcClient(ctrl) - dbAgent := db.NewMockAgent(ctrl) - tablets := make(map[string]*topo.TabletInfo) - if tt.primaryAlias == "" { - ts. - EXPECT(). - GetShard(gomock.Any(), gomock.Eq("ks"), gomock.Eq("0")). - Return(&topo.ShardInfo{Shard: &topodatapb.Shard{}}, nil) - } - for i, input := range tt.inputs { - id := uint32(testPort0 + i) - tablet := buildTabletInfo(id, testHost, testPort0+i, input.ttype, time.Now()) - tablets[tablet.AliasString()] = tablet - var response = struct { - pingable bool - }{input.pingable} - if tt.primaryAlias == tablet.AliasString() { - si := &topo.ShardInfo{ - Shard: &topodatapb.Shard{ - PrimaryAlias: tablet.Alias, - }, - } - ts. - EXPECT(). - GetShard(gomock.Any(), gomock.Eq("ks"), gomock.Eq("0")). - Return(si, nil) - } - dbAgent. - EXPECT(). - FetchGroupView(gomock.Any(), gomock.Any()). - DoAndReturn(func(alias string, target *inst.InstanceKey) (*db.GroupView, error) { - if target.Hostname == "" || target.Port == 0 { - return nil, errors.New("invalid mysql instance key") - } - return db.BuildGroupView(alias, "group", testHost, testPort0, false, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }), nil - }). - AnyTimes() - tmc. - EXPECT(). - Ping(gomock.Any(), &topodatapb.Tablet{ - Alias: tablet.Alias, - Hostname: tablet.Hostname, - Keyspace: tablet.Keyspace, - Shard: tablet.Shard, - Type: tablet.Type, - Tags: tablet.Tags, - MysqlHostname: tablet.MysqlHostname, - MysqlPort: tablet.MysqlPort, - PrimaryTermStartTime: tablet.PrimaryTermStartTime, - }). - DoAndReturn(func(_ context.Context, t *topodatapb.Tablet) error { - if !response.pingable { - return errors.New("unreachable") - } - return nil - }). - AnyTimes() - } - ts. - EXPECT(). - GetTabletMapForShardByCell(gomock.Any(), gomock.Eq("ks"), gomock.Eq("0"), gomock.Any()). - Return(tablets, nil) - - ctx := context.Background() - cfg := &config.VTGRConfig{BootstrapGroupSize: diagnoseGroupSize, MinNumReplica: 2, BackoffErrorWaitTimeSeconds: 1, BootstrapWaitTimeSeconds: 1} - shard := NewGRShard("ks", "0", nil, tmc, ts, dbAgent, cfg, testPort0, true) - shard.refreshTabletsInShardLocked(ctx) - diagnose, err := shard.Diagnose(ctx) - assert.Equal(t, expected, diagnose) - if tt.errMessage == "" { - assert.NoError(t, err) - } else { - assert.Error(t, err) - assert.True(t, strings.Contains(err.Error(), tt.errMessage), err.Error()) - } - }) - } -} - -func TestMysqlIssueDiagnoses(t *testing.T) { - cfg := &config.VTGRConfig{BootstrapGroupSize: diagnoseGroupSize, MinNumReplica: 2, BackoffErrorWaitTimeSeconds: 1, BootstrapWaitTimeSeconds: 1} - disableProtectionCfg := &config.VTGRConfig{BootstrapGroupSize: diagnoseGroupSize, MinNumReplica: 2, DisableReadOnlyProtection: true, BackoffErrorWaitTimeSeconds: 1, BootstrapWaitTimeSeconds: 1} - heartbeatThreshold = 10 - defer func() { - heartbeatThreshold = math.MaxInt64 - }() - type data struct { - alias string - groupName string - readOnly bool - checkResult int - groupInput []db.TestGroupState - ttype topodatapb.TabletType - } - var sqltests = []struct { - name string - expected DiagnoseType - errMessage string - config *config.VTGRConfig - inputs []data - removeTablets []string // to simulate missing tablet in topology - }{ - {name: "healthy shard", expected: DiagnoseTypeHealthy, errMessage: "", inputs: []data{ - {alias0, "group", false, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "recovering primary shard", expected: DiagnoseTypeBackoffError, errMessage: "", inputs: []data{ - {alias0, "group", false, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "RECOVERING", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "RECOVERING", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "RECOVERING", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "no group in shard", expected: DiagnoseTypeShardHasNoGroup, errMessage: "", inputs: []data{ - {alias0, "", true, 0, []db.TestGroupState{ - {MemberHost: "", MemberPort: "", MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - {alias1, "", true, 0, []db.TestGroupState{ - {MemberHost: "", MemberPort: "", MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "", true, 0, []db.TestGroupState{ - {MemberHost: "", MemberPort: "", MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "fail to bootstrap with incorrect number of nodes", expected: DiagnoseTypeError, errMessage: "fail to diagnose ShardHasNoGroup with 3 nodes", inputs: []data{ - {alias0, "", true, 0, []db.TestGroupState{ - {MemberHost: "", MemberPort: "", MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - {alias1, "", true, 0, []db.TestGroupState{ - {MemberHost: "", MemberPort: "", MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "", true, 0, []db.TestGroupState{ - {MemberHost: "", MemberPort: "", MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - }, config: &config.VTGRConfig{BootstrapGroupSize: 2, MinNumReplica: 2, BackoffErrorWaitTimeSeconds: 1, BootstrapWaitTimeSeconds: 1}}, - {name: "unreachable node", expected: DiagnoseTypeBackoffError, errMessage: "", inputs: []data{ - {alias0, "group", false, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "UNREACHABLE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "UNREACHABLE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "UNREACHABLE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "mysql and tablet has different primary", expected: DiagnoseTypeWrongPrimaryTablet, errMessage: "", inputs: []data{ // vtgr should failover vttablet - {alias0, "group", false, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias1, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias2, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "mysql primary out of topology", expected: DiagnoseTypeUnreachablePrimary, errMessage: "", inputs: []data{ // vtgr should failover mysql - {alias0, "group", false, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias1, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias2, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }, removeTablets: []string{alias0}}, - {name: "one error node", expected: DiagnoseTypeUnconnectedReplica, errMessage: "", inputs: []data{ - {alias0, "group", false, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ERROR", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ERROR", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ERROR", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "inactive group with divergent state", expected: DiagnoseTypeShardHasInactiveGroup, errMessage: "", inputs: []data{ - {alias0, "group", true, 11, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "OFFLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ERROR", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", true, 11, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, 11, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "two error node", expected: DiagnoseTypeInsufficientGroupSize, errMessage: "", inputs: []data{ - {alias0, "group", false, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ERROR", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ERROR", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ERROR", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ERROR", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "insufficient group member", expected: DiagnoseTypeInsufficientGroupSize, errMessage: "", inputs: []data{ - {alias0, "group", false, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", true, 0, []db.TestGroupState{}, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "unconnected node", expected: DiagnoseTypeBackoffError, errMessage: "", inputs: []data{ - {alias0, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "UNREACHABLE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "UNREACHABLE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "UNREACHABLE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "unreachable primary", expected: DiagnoseTypeBackoffError, errMessage: "", inputs: []data{ - {alias0, "group", false, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "UNREACHABLE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "UNREACHABLE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "UNREACHABLE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "more than one group name", expected: DiagnoseTypeError, errMessage: "fail to refreshSQLGroup: group has split brain", inputs: []data{ // vtgr should raise error - {alias0, "group", false, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group_xxx", false, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "different primary", expected: DiagnoseTypeError, errMessage: "fail to refreshSQLGroup: group has split brain", inputs: []data{ // vtgr should raise error - {alias0, "group", false, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", false, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "insufficient members in group", expected: DiagnoseTypeInsufficientGroupSize, errMessage: "", inputs: []data{ - {alias0, "group", false, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ERROR", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - // the shard has insufficient member, but the primary is already read_only - // we should try to connect the replica node - {name: "insufficient members in read only shard", expected: DiagnoseTypeUnconnectedReplica, errMessage: "", inputs: []data{ - {alias0, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ERROR", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "insufficient members in group with disable read only protection", expected: DiagnoseTypeUnconnectedReplica, errMessage: "", config: disableProtectionCfg, inputs: []data{ - {alias0, "group", false, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ERROR", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "read only with disable read only protection", expected: DiagnoseTypeReadOnlyShard, errMessage: "", config: disableProtectionCfg, inputs: []data{ - {alias0, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ERROR", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "read only healthy shard", expected: DiagnoseTypeReadOnlyShard, errMessage: "", inputs: []data{ - {alias0, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "inconsistent member state", expected: DiagnoseTypeBackoffError, errMessage: "", inputs: []data{ - {alias0, "group", true, 11, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - {alias1, "group", true, 12, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias2, "group", true, math.MaxInt64, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "network partition", expected: DiagnoseTypeBackoffError, errMessage: "", inputs: []data{ - {alias0, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "UNREACHABLE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "UNREACHABLE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "OFFLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "OFFLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "start bootstrap in progress", expected: DiagnoseTypeBootstrapBackoff, errMessage: "", inputs: []data{ - {alias0, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias1, "", true, 0, []db.TestGroupState{}, topodatapb.TabletType_REPLICA}, - {alias2, "", true, 0, []db.TestGroupState{ - {MemberHost: "", MemberPort: "", MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - }}, - } - for _, tt := range sqltests { - t.Run(tt.name, func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - ts := NewMockGRTopo(ctrl) - tmc := NewMockGRTmcClient(ctrl) - dbAgent := db.NewMockAgent(ctrl) - tablets := make(map[string]*topo.TabletInfo) - expected := tt.expected - inputMap := make(map[string]testGroupInput) - if tt.config == nil { - tt.config = cfg - } - conf := tt.config - hasPrimary := false - for i, input := range tt.inputs { - id := uint32(i) - //id := uint32(testPort0 + i) - tablet := buildTabletInfo(id, testHost, testPort0+i, input.ttype, time.Now()) - tablets[tablet.AliasString()] = tablet - inputMap[input.alias] = testGroupInput{ - input.groupName, - input.readOnly, - input.checkResult, - input.groupInput, - nil, - } - if tablet.Type == topodatapb.TabletType_PRIMARY { - si := &topo.ShardInfo{ - Shard: &topodatapb.Shard{ - PrimaryAlias: tablet.Alias, - }, - } - ts. - EXPECT(). - GetShard(gomock.Any(), gomock.Eq("ks"), gomock.Eq("0")). - Return(si, nil) - hasPrimary = true - } - dbAgent. - EXPECT(). - FetchGroupView(gomock.Any(), gomock.Any()). - DoAndReturn(func(alias string, target *inst.InstanceKey) (*db.GroupView, error) { - if target.Hostname == "" || target.Port == 0 { - return nil, errors.New("invalid mysql instance key") - } - s := inputMap[alias] - view := db.BuildGroupView(alias, s.groupName, target.Hostname, target.Port, s.readOnly, s.checkResult, s.groupState) - return view, nil - }). - AnyTimes() - } - if !hasPrimary { - ts. - EXPECT(). - GetShard(gomock.Any(), gomock.Eq("ks"), gomock.Eq("0")). - Return(&topo.ShardInfo{Shard: &topodatapb.Shard{}}, nil) - } - for _, tid := range tt.removeTablets { - delete(tablets, tid) - } - ts. - EXPECT(). - GetTabletMapForShardByCell(gomock.Any(), gomock.Eq("ks"), gomock.Eq("0"), gomock.Any()). - Return(tablets, nil) - tmc.EXPECT().Ping(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() - - ctx := context.Background() - shard := NewGRShard("ks", "0", nil, tmc, ts, dbAgent, conf, testPort0, true) - shard.refreshTabletsInShardLocked(ctx) - diagnose, err := shard.Diagnose(ctx) - assert.Equal(t, expected, diagnose) - if tt.errMessage == "" { - assert.NoError(t, err) - } else { - assert.Error(t, err) - assert.True(t, strings.Contains(err.Error(), tt.errMessage), err.Error()) - } - }) - } -} - -func TestDiagnoseWithInactive(t *testing.T) { - cfg := &config.VTGRConfig{BootstrapGroupSize: diagnoseGroupSize, MinNumReplica: 2, BackoffErrorWaitTimeSeconds: 1, BootstrapWaitTimeSeconds: 1} - type data struct { - alias string - groupName string - readOnly bool - pingable bool - groupInput []db.TestGroupState - ttype topodatapb.TabletType - } - var sqltests = []struct { - name string - expected DiagnoseType - errMessage string - config *config.VTGRConfig - inputs []data - rebootstrapGroupSize int - removeTablets []string // to simulate missing tablet in topology - }{ - // although mysql and vitess has different primary, but since this is an active shard, VTGR won't fix that - {name: "mysql and tablet has different primary", expected: DiagnoseTypeHealthy, errMessage: "", inputs: []data{ - {alias0, "group", true, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias1, "group", true, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias2, "group", true, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "different primary with unconnected node", expected: DiagnoseTypeUnconnectedReplica, errMessage: "", inputs: []data{ - {alias0, "group", true, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias1, "group", true, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias2, "group", true, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "primary tablet is not pingable", expected: DiagnoseTypeHealthy, errMessage: "", inputs: []data{ - {alias0, "group", true, false, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", true, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - // This is a read only shard, but since it's an inactive shard we will diagnose it as healthy - {name: "read only healthy shard", expected: DiagnoseTypeHealthy, errMessage: "", inputs: []data{ - {alias0, "group", true, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", true, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "writable shard", expected: DiagnoseTypeInsufficientGroupSize, errMessage: "", inputs: []data{ - {alias0, "group", false, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", true, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "error when there are only two nodes", expected: DiagnoseTypeError, errMessage: "fail to diagnose ShardHasInactiveGroup with 3 nodes expecting 2", inputs: []data{ - {alias0, "group", true, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - {alias1, "group", true, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - }, rebootstrapGroupSize: 2}, - } - for _, tt := range sqltests { - t.Run(tt.name, func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - ctx := context.Background() - ts := memorytopo.NewServer("test_cell") - defer ts.Close() - ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{}) - ts.CreateShard(ctx, "ks", "0") - tmc := NewMockGRTmcClient(ctrl) - dbAgent := db.NewMockAgent(ctrl) - expected := tt.expected - inputMap := make(map[string]testGroupInput) - pingable := make(map[string]bool) - if tt.config == nil { - tt.config = cfg - } - conf := tt.config - for i, input := range tt.inputs { - tablet := buildTabletInfo(uint32(i), testHost, testPort0+i, input.ttype, time.Now()) - testutil.AddTablet(ctx, t, ts, tablet.Tablet, nil) - inputMap[input.alias] = testGroupInput{ - input.groupName, - input.readOnly, - 0, - input.groupInput, - nil, - } - pingable[input.alias] = input.pingable - if tablet.Type == topodatapb.TabletType_PRIMARY { - ts.UpdateShardFields(ctx, "ks", "0", func(si *topo.ShardInfo) error { - si.PrimaryAlias = tablet.Alias - return nil - }) - } - dbAgent. - EXPECT(). - FetchGroupView(gomock.Any(), gomock.Any()). - DoAndReturn(func(alias string, target *inst.InstanceKey) (*db.GroupView, error) { - if target.Hostname == "" || target.Port == 0 { - return nil, errors.New("invalid mysql instance key") - } - s := inputMap[alias] - view := db.BuildGroupView(alias, s.groupName, target.Hostname, target.Port, s.readOnly, s.checkResult, s.groupState) - return view, nil - }). - AnyTimes() - tmc. - EXPECT(). - Ping(gomock.Any(), &topodatapb.Tablet{ - Alias: tablet.Alias, - Hostname: tablet.Hostname, - Keyspace: tablet.Keyspace, - Shard: tablet.Shard, - Type: tablet.Type, - Tags: tablet.Tags, - MysqlHostname: tablet.MysqlHostname, - MysqlPort: tablet.MysqlPort, - PrimaryTermStartTime: tablet.PrimaryTermStartTime, - }). - DoAndReturn(func(_ context.Context, t *topodatapb.Tablet) error { - if !pingable[tablet.Alias.String()] { - return errors.New("unreachable") - } - return nil - }). - AnyTimes() - } - shard := NewGRShard("ks", "0", nil, tmc, ts, dbAgent, conf, testPort0, false) - if tt.rebootstrapGroupSize != 0 { - shard.OverrideRebootstrapGroupSize(tt.rebootstrapGroupSize) - } - shard.refreshTabletsInShardLocked(ctx) - diagnose, err := shard.Diagnose(ctx) - assert.Equal(t, expected, diagnose) - if tt.errMessage == "" { - assert.NoError(t, err) - } else { - assert.Error(t, err) - assert.True(t, strings.Contains(err.Error(), tt.errMessage), err.Error()) - } - }) - } -} - -func TestGroupStatusRecorder(t *testing.T) { - r := &groupGTIDRecorder{} - - err := r.recordGroupStatus("group1", true) - assert.NoError(t, err) - assert.Equal(t, r.name, "group1") - assert.Equal(t, r.hasActive, true) - - err = r.recordGroupStatus("group2", false) - assert.Error(t, err, "group has more than one group name") - assert.Equal(t, r.name, "group1") - - err = r.recordGroupStatus("group1", false) - assert.NoError(t, err) - assert.Equal(t, r.name, "group1") - assert.Equal(t, r.hasActive, true) - - pos1, err := mysql.ParsePosition(mysql.Mysql56FlavorID, "264a8230-67d2-11eb-acdd-0a8d91f24125:1-22:1000019-1000021") - assert.NoError(t, err) - inst1 := &grInstance{alias: "alias1"} - r.recordGroupGTIDs(pos1.GTIDSet, inst1) - pos2, err := mysql.ParsePosition(mysql.Mysql56FlavorID, "264a8230-67d2-11eb-acdd-0a8d91f24125:1-1000021") - assert.NoError(t, err) - inst2 := &grInstance{alias: "alias2"} - r.recordGroupGTIDs(pos2.GTIDSet, inst2) - assert.Equal(t, len(r.gtidWithInstances), 2) - assert.Equal(t, r.gtidWithInstances[0].instance, inst1) - assert.Equal(t, pos1.GTIDSet.Equal(r.gtidWithInstances[0].gtids), true) - assert.Equal(t, r.gtidWithInstances[1].instance, inst2) - assert.Equal(t, pos2.GTIDSet.Equal(r.gtidWithInstances[1].gtids), true) -} diff --git a/go/vt/vtgr/controller/group.go b/go/vt/vtgr/controller/group.go deleted file mode 100644 index 3469d63acbb..00000000000 --- a/go/vt/vtgr/controller/group.go +++ /dev/null @@ -1,443 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "fmt" - "math" - "sort" - "strings" - "sync" - - "github.com/spf13/pflag" - - "vitess.io/vitess/go/stats" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/vtgr/db" - "vitess.io/vitess/go/vt/vtgr/inst" - "vitess.io/vitess/go/vt/vtgr/log" -) - -var ( - groupOnlineSize = stats.NewGaugesWithMultiLabels("MysqlGroupOnlineSize", "Online MySQL server in the group", []string{"Keyspace", "Shard"}) - isLostQuorum = stats.NewGaugesWithMultiLabels("MysqlGroupLostQuorum", "If MySQL group lost quorum", []string{"Keyspace", "Shard"}) - - heartbeatThreshold int -) - -func init() { - servenv.OnParseFor("vtgr", func(fs *pflag.FlagSet) { - fs.IntVar(&heartbeatThreshold, "group_heartbeat_threshold", 0, "VTGR will trigger backoff on inconsistent state if the group heartbeat staleness exceeds this threshold (in seconds). Should be used along with --enable_heartbeat_check.") - }) -} - -// SQLGroup contains views from all the nodes within the shard -type SQLGroup struct { - views []*db.GroupView - resolvedView *ResolvedView - logger *log.Logger - expectedBootstrapSize int - // rebootstrapSize is init to 0 - // when it is not 0, we allow some nodes to be unhealthy during a rebootstrap - rebootstrapSize int - singlePrimary bool - heartbeatThreshold int - statsTags []string - sync.Mutex -} - -// NewSQLGroup creates a new SQLGroup -func NewSQLGroup(size int, singlePrimary bool, keyspace, shard string) *SQLGroup { - return &SQLGroup{ - expectedBootstrapSize: size, - rebootstrapSize: 0, - singlePrimary: singlePrimary, - statsTags: []string{keyspace, shard}, - logger: log.NewVTGRLogger(keyspace, shard), - heartbeatThreshold: heartbeatThreshold, - } -} - -// ResolvedView is the resolved view -type ResolvedView struct { - groupName string - view map[inst.InstanceKey]db.GroupMember - logger *log.Logger -} - -// recordView adds a view to the group -func (group *SQLGroup) recordView(view *db.GroupView) { - group.Lock() - defer group.Unlock() - group.views = append(group.views, view) -} - -// overrideView overrides a view to the group -func (group *SQLGroup) overrideView(views []*db.GroupView) { - group.Lock() - defer group.Unlock() - group.views = views - group.resolveLocked() -} - -// clear reset the views -func (group *SQLGroup) clear() { - group.Lock() - defer group.Unlock() - group.views = nil - group.resolvedView = nil -} - -// GetViews returns views from everyone in the group -func (group *SQLGroup) GetViews() []*db.GroupView { - group.Lock() - defer group.Unlock() - return group.views -} - -// GetGroupName returns the group name -func (group *SQLGroup) GetGroupName() string { - group.Lock() - defer group.Unlock() - rv := group.resolvedView - return rv.groupName -} - -// GetOnlineGroupInfo returns number of online members in the group and also if the primary is read only -func (group *SQLGroup) GetOnlineGroupInfo() (int, bool) { - group.Lock() - defer group.Unlock() - rv := group.resolvedView - view := rv.view - onlineSize := 0 - isPrimaryReadOnly := false - for _, status := range view { - if status.State == db.ONLINE { - onlineSize++ - } - if status.Role == db.PRIMARY { - isPrimaryReadOnly = isPrimaryReadOnly || status.ReadOnly - } - } - return onlineSize, isPrimaryReadOnly -} - -// IsUnconnectedReplica checks if the node is connected to a group -func (group *SQLGroup) IsUnconnectedReplica(instanceKey *inst.InstanceKey) bool { - if instanceKey == nil { - return false - } - group.Lock() - defer group.Unlock() - rv := group.resolvedView - view := rv.view - status, ok := view[*instanceKey] - if !ok { - return true - } - return status.State != db.ONLINE && status.State != db.RECOVERING -} - -// IsAllOfflineOrError returns true if all the nodes are in offline mode -func (group *SQLGroup) IsAllOfflineOrError() bool { - group.Lock() - defer group.Unlock() - rv := group.resolvedView - view := rv.view - for _, status := range view { - if status.State != db.OFFLINE && status.State != db.ERROR { - return false - } - } - return true -} - -// GetStatus returns GroupMember status for given a host -func (group *SQLGroup) GetStatus(instanceKey *inst.InstanceKey) *db.GroupMember { - if instanceKey == nil { - return nil - } - group.Lock() - defer group.Unlock() - rv := group.resolvedView - view := rv.view - status, ok := view[*instanceKey] - if !ok { - return nil - } - return &status -} - -// IsSafeToBootstrap checks if it is safe to bootstrap a mysql group -func (group *SQLGroup) IsSafeToBootstrap() bool { - group.Lock() - defer group.Unlock() - // for bootstrap we require group at least has quorum number of views - // this is to make sure we don't bootstrap a group improperly - if len(group.views) < group.expectedBootstrapSize { - group.logger.Errorf("[sql_group] cannot bootstrap because we only have %v views | expected %v", len(group.views), group.expectedBootstrapSize) - return false - } - return group.isSafeToRebootstrapLocked() -} - -// IsSafeToRebootstrap checks if it is safe to rebootstrap a group -// It does not check group size as IsSafeToBootstrap, since when we -// reach here it means VTGR already checked there were group expectedBootstrapSize -// number of nodes in topo server, therefore we just rebootstrap -// as long as we can reach all the nodes in topo server -func (group *SQLGroup) IsSafeToRebootstrap() bool { - group.Lock() - defer group.Unlock() - return group.isSafeToRebootstrapLocked() -} - -func (group *SQLGroup) isSafeToRebootstrapLocked() bool { - // we think it is safe to bootstrap a group if all the views don't have a primary host - host, port, _ := group.getPrimaryLocked() - if host != "" || port != 0 { - group.logger.Warningf("not safe to bootstrap sql group because %v/%v might already be primary", host, port) - } - return host == "" && port == 0 -} - -// GetPrimary returns the hostname, port of the primary that everyone agreed on -// isActive bool indicates if there is any node in the group whose primary is "ONLINE" -func (group *SQLGroup) GetPrimary() (string, int, bool) { - group.Lock() - defer group.Unlock() - return group.getPrimaryLocked() -} - -func (group *SQLGroup) getPrimaryLocked() (string, int, bool) { - rv := group.resolvedView - view := rv.view - for instance, status := range view { - if status.Role == db.PRIMARY { - return instance.Hostname, instance.Port, status.State == db.ONLINE - } - } - return "", 0, false -} - -// Resolve merges the views into a map -func (group *SQLGroup) Resolve() error { - group.Lock() - defer group.Unlock() - return group.resolveLocked() -} -func (group *SQLGroup) resolveLocked() error { - rv := &ResolvedView{logger: group.logger} - group.resolvedView = rv - // a node that is not in the group might be outlier with big lag - // iterate over all views to get global minStalenessResult first - minStalenessResult := math.MaxInt32 - for _, view := range group.views { - if view.HeartbeatStaleness < minStalenessResult { - minStalenessResult = view.HeartbeatStaleness - } - } - m := make(map[inst.InstanceKey]db.GroupMember) - for _, view := range group.views { - if rv.groupName == "" && view.GroupName != "" { - rv.groupName = view.GroupName - } - if view.GroupName != "" && rv.groupName != view.GroupName { - group.logger.Errorf("previous group name %v found %v", rv.groupName, view.GroupName) - return db.ErrGroupSplitBrain - } - for _, member := range view.UnresolvedMembers { - instance := view.CreateInstanceKey(member) - memberState := member.State - memberRole := member.Role - isReadOnly := member.ReadOnly - st, ok := m[instance] - if !ok { - m[instance] = db.GroupMember{ - HostName: instance.Hostname, - Port: instance.Port, - State: memberState, - Role: memberRole, - ReadOnly: isReadOnly, - } - continue - } - if st.State == memberState && st.Role == memberRole && st.ReadOnly == isReadOnly { - continue - } - // Members in a group should eventually converge on a state - // if there is a partition, then a node should be removed from - // a group. If a node is reported as ONLINE together with - // some other state, we back off if we see a node with diverged state - if memberState != db.UNKNOWNSTATE && - st.State != db.UNKNOWNSTATE && - st.State != memberState && - (st.State == db.ONLINE || memberState == db.ONLINE) { - group.logger.Warningf("found inconsistent member state for %v: %v vs %v", instance.Hostname, st.State, memberState) - if group.heartbeatThreshold != 0 && - // Check minStalenessResult among the group is not math.MaxInt32 - // which means at least one node returns the lag from _vt.heartbeat table - // otherwise we don't trigger backoff on inconsistent state - minStalenessResult != math.MaxInt32 && - minStalenessResult >= group.heartbeatThreshold { - group.logger.Warningf("ErrGroupBackoffError by staled heartbeat check %v", minStalenessResult) - var sb strings.Builder - for _, view := range group.views { - sb.WriteString(fmt.Sprintf("%v staleness=%v\n", view.MySQLHost, view.HeartbeatStaleness)) - } - group.logger.Warningf("%v", sb.String()) - return db.ErrGroupBackoffError - } - } - m[instance] = db.GroupMember{ - HostName: instance.Hostname, - Port: instance.Port, - State: group.mergeState(st.State, memberState), - Role: group.mergeRole(st.Role, memberRole), - ReadOnly: st.ReadOnly || isReadOnly, - } - } - } - rv.view = m - return group.resolvedView.validate(group.singlePrimary, group.statsTags) -} - -func (rv *ResolvedView) validate(singlePrimary bool, statsTags []string) error { - if !rv.hasGroup() { - rv.logger.Info("Resolved view does not have a group") - return nil - } - hasPrimary := false - primaryState := db.UNKNOWNSTATE - var onlineCount, recoveringCount, unreachableCount, offlineCount, errorCount int - for _, status := range rv.view { - if status.Role == db.PRIMARY { - if singlePrimary && hasPrimary { - rv.logger.Errorf("Found more than one primary in the group") - return db.ErrGroupSplitBrain - } - hasPrimary = true - primaryState = status.State - if status.State != db.ONLINE { - rv.logger.Warningf("Found a PRIMARY not ONLINE (%v)", status.State) - } - } - switch status.State { - case db.ONLINE: - onlineCount++ - case db.UNREACHABLE: - unreachableCount++ - case db.OFFLINE: - offlineCount++ - case db.ERROR: - errorCount++ - case db.RECOVERING: - recoveringCount++ - } - } - groupOnlineSize.Set(statsTags, int64(onlineCount)) - if unreachableCount > 0 || errorCount > 0 || offlineCount > 0 { - rv.logger.Warningf("Some of nodes are unconnected in the group. hasPrimary=%v (%v), online_count=%v, recovering_count=%v, unreachable_count=%v, offline_count=%v, error_count=%v", hasPrimary, primaryState, onlineCount, recoveringCount, unreachableCount, offlineCount, errorCount) - } - if unreachableCount >= len(rv.view)/2+1 { - rv.logger.Errorf("Backoff error by quorum unreachable: found %v number of UNREACHABLE nodes while quorum is %v", unreachableCount, len(rv.view)/2+1) - isLostQuorum.Set(statsTags, 1) - } else { - isLostQuorum.Set(statsTags, 0) - } - // In theory there should be no UNREACHABLE nodes - // raise ErrGroupBackoffError to backoff and wait - // If we lost quorum, then the group is not writable - // If we still have a functioning group, we can backoff and wait - // the unreachable node should either be expelled or we have a frozen view - // Note: this means we should set group_replication_unreachable_majority_timeout - // greater than 0. Otherwise VTGR can see all nodes are ONLINE when a single node - // is partitioned and end up doing nothing. - if unreachableCount > 0 { - return db.ErrGroupBackoffError - } - // Ongoing bootstrap, we should backoff and wait - if recoveringCount == 1 && (offlineCount+recoveringCount == len(rv.view)) { - rv.logger.Warningf("Group has one recovery node with all others in offline mode") - return db.ErrGroupOngoingBootstrap - } - // We don't have quorum number of unreachable, but the primary is not online - // This most likely means there is a failover in the group we should back off and wait - if hasPrimary && primaryState != db.ONLINE { - rv.logger.Warningf("Found a PRIMARY that is not ONLINE (%v)", primaryState) - return db.ErrGroupBackoffError - } - // If all the node in view are OFFLINE or ERROR, it is an inactive group - // It is expected to have no primary in this case - if !hasPrimary && (offlineCount+errorCount != len(rv.view)) { - rv.logger.Warningf("Group is NOT all offline or error without a primary node") - return db.ErrGroupBackoffError - } - return nil -} - -func (rv *ResolvedView) hasGroup() bool { - return rv.groupName != "" -} - -func (group *SQLGroup) mergeState(s1, s2 db.MemberState) db.MemberState { - return db.MemberState(group.maxStatus(int(s1), int(s2))) -} - -func (group *SQLGroup) mergeRole(r1, r2 db.MemberRole) db.MemberRole { - return db.MemberRole(group.maxStatus(int(r1), int(r2))) -} - -func (group *SQLGroup) maxStatus(a, b int) int { - if a > b { - return a - } - return b -} - -// ToString returns a string representatino of the sql group -func (group *SQLGroup) ToString() string { - group.Lock() - defer group.Unlock() - var sb strings.Builder - views := group.views - for _, view := range views { - sb.WriteString(fmt.Sprintf("[%s] SQLGroup group=%s", view.TabletAlias, view.GroupName)) - for _, member := range view.UnresolvedMembers { - sb.WriteString(fmt.Sprintf(" | %s %s %s readonly=%v", member.HostName, member.Role, member.State, member.ReadOnly)) - } - sb.WriteString("\n") - } - rv := group.resolvedView - if rv != nil { - sb.WriteString("[resolved_view]\n") - sb.WriteString(fmt.Sprintf("group_name=%v\n", rv.groupName)) - keys := make([]inst.InstanceKey, 0, len(rv.view)) - for k := range rv.view { - keys = append(keys, k) - } - sort.Slice(keys, func(i, j int) bool { - return keys[i].Hostname < keys[j].Hostname - }) - for _, instance := range keys { - status := rv.view[instance] - sb.WriteString(fmt.Sprintf("[%s] state=%v role=%v readonly=%v\n", instance.Hostname, status.State, status.Role, status.ReadOnly)) - - } - } - return sb.String() -} diff --git a/go/vt/vtgr/controller/group_test.go b/go/vt/vtgr/controller/group_test.go deleted file mode 100644 index edfeca14500..00000000000 --- a/go/vt/vtgr/controller/group_test.go +++ /dev/null @@ -1,454 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "math" - "testing" - - "vitess.io/vitess/go/vt/vtgr/log" - - "vitess.io/vitess/go/vt/vtgr/db" - "vitess.io/vitess/go/vt/vtgr/inst" - - "github.com/stretchr/testify/assert" -) - -func TestSQLGroupToString(t *testing.T) { - group := NewSQLGroup(2, true, "ks", "0") - v1 := db.NewGroupView("v1", "host1", 10) - v1.GroupName = "group_name" - var l1 []*db.GroupMember - var l2 []*db.GroupMember - m1 := db.NewGroupMember("ONLINE", "PRIMARY", "host1", 10, false) - m2 := db.NewGroupMember("ONLINE", "SECONDARY", "host2", 10, true) - m3 := db.NewGroupMember("OFFLINE", "SECONDARY", "host3", 10, true) - l1 = append(l1, m1) - l1 = append(l1, m2) - v1.UnresolvedMembers = l1 - l2 = append(l2, m3) - v2 := db.NewGroupView("v2", "host2", 10) - v2.GroupName = "group_name" - v2.UnresolvedMembers = l2 - group.recordView(v2) - group.recordView(v1) - assert.Equal(t, `[v2] SQLGroup group=group_name | host3 SECONDARY OFFLINE readonly=true -[v1] SQLGroup group=group_name | host1 PRIMARY ONLINE readonly=false | host2 SECONDARY ONLINE readonly=true -`, group.ToString()) - group.Resolve() - assert.Equal(t, `[v2] SQLGroup group=group_name | host3 SECONDARY OFFLINE readonly=true -[v1] SQLGroup group=group_name | host1 PRIMARY ONLINE readonly=false | host2 SECONDARY ONLINE readonly=true -[resolved_view] -group_name=group_name -[host1] state=ONLINE role=PRIMARY readonly=false -[host2] state=ONLINE role=SECONDARY readonly=true -[host3] state=OFFLINE role=SECONDARY readonly=true -`, group.ToString()) -} - -func TestGetGroupName(t *testing.T) { - group := NewSQLGroup(3, true, "ks", "0") - v1 := db.NewGroupView("v1", "host1", 10) - v1.GroupName = "group" - v1.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("OFFLINE", "", "host1", 10, true), - } - group.recordView(v1) - v2 := db.NewGroupView("v2", "host2", 10) - v2.GroupName = "group" - v2.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("OFFLINE", "", "", 0, true), - } - group.recordView(v2) - err := group.Resolve() - assert.NoError(t, err) - name := group.GetGroupName() - assert.Equal(t, "group", name) - v3 := db.NewGroupView("v3", "host3", 10) - v3.GroupName = "group_foo" - group.recordView(v3) - err = group.Resolve() - assert.Errorf(t, err, "group has split brain") - name = group.GetGroupName() - // group keeps the group name before finding a divergent group name - assert.Equal(t, "group", name) -} - -func TestIsActiveWithMultiplePrimary(t *testing.T) { - group := NewSQLGroup(2, true, "ks", "0") - v1 := db.NewGroupView("v1", "host1", 10) - v1.GroupName = "group" - v1.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("ONLINE", "PRIMARY", "host1", 10, false), - db.NewGroupMember("ONLINE", "SECONDARY", "host2", 10, true), - } - group.recordView(v1) - v2 := db.NewGroupView("v2", "host2", 10) - v2.GroupName = "group" - v2.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("ONLINE", "SECONDARY", "host1", 10, true), - db.NewGroupMember("ONLINE", "PRIMARY", "host2", 10, false), - } - group.recordView(v2) - err := group.Resolve() - assert.Errorf(t, err, "group network partition") -} - -func TestIsSafeToBootstrap(t *testing.T) { - group := NewSQLGroup(1, true, "ks", "0") - isSafe := group.IsSafeToBootstrap() - assert.False(t, isSafe) - v1 := db.NewGroupView("v1", "host1", 10) - v1.GroupName = "group" - v1.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("OFFLINE", "", "", 0, true), - db.NewGroupMember("OFFLINE", "", "", 0, true), - } - group.recordView(v1) - group.Resolve() - isSafe = group.IsSafeToBootstrap() - assert.True(t, isSafe) -} - -func TestIsSafeToBootstrapWithPrimary(t *testing.T) { - group := NewSQLGroup(1, true, "ks", "0") - v1 := db.NewGroupView("v1", "host1", 10) - v1.GroupName = "group" - // it is not safe to bootstrap if we see a primary node in group - v1.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("ONLINE", "PRIMARY", "host1", 0, false), - db.NewGroupMember("OFFLINE", "", "", 0, true), - } - group.recordView(v1) - group.Resolve() - isSafe := group.IsSafeToBootstrap() - assert.False(t, isSafe) -} - -func TestIsUnconnectedReplica(t *testing.T) { - group := NewSQLGroup(1, true, "ks", "0") - isSafe := group.IsSafeToBootstrap() - assert.False(t, isSafe) - v1 := db.NewGroupView("v1", "host1", 10) - v1.GroupName = "group" - v1.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("ONLINE", "PRIMARY", "host1", 10, false), - db.NewGroupMember("ONLINE", "SECONDARY", "host2", 10, true), - } - group.recordView(v1) - group.Resolve() - isUnconnected := group.IsUnconnectedReplica(&inst.InstanceKey{Hostname: "host2", Port: 10}) - assert.False(t, isUnconnected) -} - -func TestGetOnlineGroupSizeFromPrimary(t *testing.T) { - group := NewSQLGroup(1, true, "ks", "0") - isSafe := group.IsSafeToBootstrap() - assert.False(t, isSafe) - v1 := db.NewGroupView("v1", "host1", 10) - v1.GroupName = "group" - v1.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("ONLINE", "PRIMARY", "host1", 10, false), - db.NewGroupMember("ONLINE", "SECONDARY", "host2", 10, true), - db.NewGroupMember("RECOVERING", "SECONDARY", "host3", 10, true), - } - v2 := db.NewGroupView("v2", "host2", 10) - v2.GroupName = "group" - v2.UnresolvedMembers = []*db.GroupMember{} - group.recordView(v1) - group.recordView(v2) - group.Resolve() - size, readOnly := group.GetOnlineGroupInfo() - assert.Equal(t, 2, size) - assert.False(t, readOnly) -} - -func TestNetworkPartition(t *testing.T) { - group := NewSQLGroup(3, true, "ks", "0") - v1 := db.NewGroupView("v1", "host1", 10) - v1.GroupName = "group" - v1.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("ONLINE", "PRIMARY", "host1", 10, false), - db.NewGroupMember("UNREACHABLE", "SECONDARY", "host2", 10, true), - db.NewGroupMember("UNREACHABLE", "SECONDARY", "host3", 10, true), - } - v2 := db.NewGroupView("v2", "host2", 10) - v2.GroupName = "group" - v2.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("OFFLINE", "", "host2", 10, true), - } - v3 := db.NewGroupView("v3", "host3", 10) - v3.GroupName = "group" - v3.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("OFFLINE", "", "host3", 10, true), - } - group.recordView(v1) - group.recordView(v2) - group.recordView(v3) - err := group.Resolve() - assert.EqualErrorf(t, err, "group backoff error", err.Error()) - rv := group.resolvedView - assert.Equal(t, "group", rv.groupName) - assert.Equal(t, map[inst.InstanceKey]db.GroupMember{ - {Hostname: "host1", Port: 10}: {HostName: "host1", Port: 10, Role: db.PRIMARY, State: db.ONLINE, ReadOnly: false}, - {Hostname: "host2", Port: 10}: {HostName: "host2", Port: 10, Role: db.SECONDARY, State: db.UNREACHABLE, ReadOnly: true}, - {Hostname: "host3", Port: 10}: {HostName: "host3", Port: 10, Role: db.SECONDARY, State: db.UNREACHABLE, ReadOnly: true}, - }, rv.view) -} - -func TestInconsistentState(t *testing.T) { - group := NewSQLGroup(3, true, "ks", "0") - v1 := db.NewGroupView("v1", "host1", 10) - v1.GroupName = "group" - v1.HeartbeatStaleness = 11 - v1.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("ONLINE", "PRIMARY", "host1", 10, false), - db.NewGroupMember("ONLINE", "SECONDARY", "host2", 10, true), - db.NewGroupMember("ONLINE", "SECONDARY", "host3", 10, true), - } - v2 := db.NewGroupView("v2", "host2", 10) - v2.GroupName = "group" - v2.HeartbeatStaleness = 11 - v2.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("OFFLINE", "", "host2", 10, true), - } - v3 := db.NewGroupView("v3", "host3", 10) - v3.GroupName = "group" - v3.HeartbeatStaleness = 13 - v3.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("OFFLINE", "", "host3", 10, true), - } - group.recordView(v1) - group.recordView(v2) - group.recordView(v3) - group.heartbeatThreshold = 10 - err := group.Resolve() - assert.EqualErrorf(t, err, "group backoff error", err.Error()) - rv := group.resolvedView - assert.Equal(t, "group", rv.groupName) - assert.Nil(t, rv.view) -} - -func TestInconsistentStateWithInvalidStaleResult(t *testing.T) { - group := NewSQLGroup(3, true, "ks", "0") - v1 := db.NewGroupView("v1", "host1", 10) - v1.GroupName = "group" - v1.HeartbeatStaleness = math.MaxInt32 - v1.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("ONLINE", "PRIMARY", "host1", 10, false), - db.NewGroupMember("ONLINE", "SECONDARY", "host2", 10, true), - db.NewGroupMember("ONLINE", "SECONDARY", "host3", 10, true), - } - v2 := db.NewGroupView("v2", "host2", 10) - v2.GroupName = "group" - v2.HeartbeatStaleness = math.MaxInt32 - v2.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("OFFLINE", "", "host2", 10, true), - } - v3 := db.NewGroupView("v3", "host3", 10) - v3.GroupName = "group" - v3.HeartbeatStaleness = math.MaxInt32 - v3.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("OFFLINE", "", "host3", 10, true), - } - group.recordView(v1) - group.recordView(v2) - group.recordView(v3) - group.heartbeatThreshold = 10 - err := group.Resolve() - // Same setup as TestInconsistentState but because HeartbeatStaleness are all MaxInt32 - // the backoff is not triggered - assert.NoError(t, err) - rv := group.resolvedView - assert.Equal(t, "group", rv.groupName) -} - -func TestInconsistentUnknownState(t *testing.T) { - group := NewSQLGroup(3, true, "ks", "0") - v1 := db.NewGroupView("v1", "host1", 10) - v1.GroupName = "group" - v1.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("ONLINE", "PRIMARY", "host1", 10, false), - db.NewGroupMember("RECOVERING", "SECONDARY", "host2", 10, true), - db.NewGroupMember("ONLINE", "SECONDARY", "host3", 10, true), - } - v2 := db.NewGroupView("v2", "host2", 10) - v2.GroupName = "group" - v2.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("", "", "host2", 10, true), - } - v3 := db.NewGroupView("v3", "host3", 10) - v3.GroupName = "group" - v3.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("ONLINE", "SECONDARY", "host3", 10, true), - } - group.recordView(v1) - group.recordView(v2) - group.recordView(v3) - err := group.Resolve() - // host 2 reports itself with empty state - // therefore we shouldn't raise error even with inconsistent state - assert.NoError(t, err) - rv := group.resolvedView - assert.Equal(t, "group", rv.groupName) - assert.Equal(t, map[inst.InstanceKey]db.GroupMember{ - {Hostname: "host1", Port: 10}: {HostName: "host1", Port: 10, Role: db.PRIMARY, State: db.ONLINE, ReadOnly: false}, - {Hostname: "host2", Port: 10}: {HostName: "host2", Port: 10, Role: db.SECONDARY, State: db.RECOVERING, ReadOnly: true}, - {Hostname: "host3", Port: 10}: {HostName: "host3", Port: 10, Role: db.SECONDARY, State: db.ONLINE, ReadOnly: true}, - }, rv.view) -} - -func TestIsBootstrapInProcess(t *testing.T) { - group := NewSQLGroup(3, true, "ks", "0") - v1 := db.NewGroupView("v1", "host1", 10) - v1.GroupName = "group" - v1.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("RECOVERING", "SECONDARY", "host1", 10, false), - } - v2 := db.NewGroupView("v2", "host2", 10) - v2.GroupName = "group" - v2.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("OFFLINE", "", "host2", 10, false), - } - v3 := db.NewGroupView("v3", "host", 10) - v3.GroupName = "group" - v3.UnresolvedMembers = []*db.GroupMember{} - group.recordView(v1) - group.recordView(v2) - group.recordView(v3) - err := group.Resolve() - assert.Errorf(t, err, "group transient error") -} - -func TestResolve(t *testing.T) { - healthyView := []*db.GroupMember{ - {HostName: "host1", Port: 10, Role: db.PRIMARY, State: db.ONLINE, ReadOnly: false}, - {HostName: "host2", Port: 10, Role: db.SECONDARY, State: db.ONLINE, ReadOnly: true}, - {HostName: "host3", Port: 10, Role: db.SECONDARY, State: db.ONLINE, ReadOnly: true}, - } - var testCases = []struct { - testName string - views []*db.GroupView - expected *ResolvedView - errorMsg string - }{ - {"test healthy shard", []*db.GroupView{ - {MySQLHost: "host1", MySQLPort: 10, GroupName: "group", UnresolvedMembers: healthyView}, - {MySQLHost: "host2", MySQLPort: 10, GroupName: "group", UnresolvedMembers: healthyView}, - {MySQLHost: "host3", MySQLPort: 10, GroupName: "group", UnresolvedMembers: healthyView}, - }, &ResolvedView{"group", map[inst.InstanceKey]db.GroupMember{ - {Hostname: "host1", Port: 10}: {HostName: "host1", Port: 10, Role: db.PRIMARY, State: db.ONLINE, ReadOnly: false}, - {Hostname: "host2", Port: 10}: {HostName: "host2", Port: 10, Role: db.SECONDARY, State: db.ONLINE, ReadOnly: true}, - {Hostname: "host3", Port: 10}: {HostName: "host3", Port: 10, Role: db.SECONDARY, State: db.ONLINE, ReadOnly: true}, - }, nil}, ""}, - {"test readonly with unreachable primary", []*db.GroupView{ // host1 is unreachable - {MySQLHost: "host2", MySQLPort: 10, GroupName: "group", UnresolvedMembers: []*db.GroupMember{ - {HostName: "host1", Port: 10, Role: db.PRIMARY, State: db.ONLINE, ReadOnly: false}, - {HostName: "host2", Port: 10, Role: db.SECONDARY, State: db.ONLINE, ReadOnly: true}, - {HostName: "host3", Port: 10, Role: db.SECONDARY, State: db.ONLINE, ReadOnly: false}, - }}, - {MySQLHost: "host3", MySQLPort: 10, GroupName: "group", UnresolvedMembers: []*db.GroupMember{ - {HostName: "host1", Port: 10, Role: db.PRIMARY, State: db.ONLINE, ReadOnly: false}, - {HostName: "host2", Port: 10, Role: db.SECONDARY, State: db.ONLINE, ReadOnly: false}, - {HostName: "host3", Port: 10, Role: db.SECONDARY, State: db.ONLINE, ReadOnly: true}, - }}, - }, &ResolvedView{"group", map[inst.InstanceKey]db.GroupMember{ - {Hostname: "host1", Port: 10}: {HostName: "host1", Port: 10, Role: db.PRIMARY, State: db.ONLINE, ReadOnly: false}, - {Hostname: "host2", Port: 10}: {HostName: "host2", Port: 10, Role: db.SECONDARY, State: db.ONLINE, ReadOnly: true}, - {Hostname: "host3", Port: 10}: {HostName: "host3", Port: 10, Role: db.SECONDARY, State: db.ONLINE, ReadOnly: true}, - }, nil}, ""}, - {"test split brain by group name", []*db.GroupView{ - {MySQLHost: "host1", MySQLPort: 10, GroupName: "group", UnresolvedMembers: healthyView}, - {MySQLHost: "host2", MySQLPort: 10, GroupName: "group1", UnresolvedMembers: healthyView}, - {MySQLHost: "host3", MySQLPort: 10, GroupName: "group", UnresolvedMembers: healthyView}, - }, nil, "group has split brain"}, - {"test empty hostname", []*db.GroupView{ - {MySQLHost: "host1", MySQLPort: 10, GroupName: "group", UnresolvedMembers: []*db.GroupMember{ - {HostName: "", Port: 0, Role: db.UNKNOWNROLE, State: db.OFFLINE, ReadOnly: true}, - }}, - {MySQLHost: "host2", MySQLPort: 10, GroupName: "", UnresolvedMembers: []*db.GroupMember{ - {HostName: "host2", Port: 10, Role: db.UNKNOWNROLE, State: db.OFFLINE, ReadOnly: true}, - }}, - {MySQLHost: "host3", MySQLPort: 10, GroupName: "group", UnresolvedMembers: []*db.GroupMember{ - {HostName: "host3", Port: 10, Role: db.UNKNOWNROLE, State: db.OFFLINE, ReadOnly: true}, - }}, - }, &ResolvedView{"group", map[inst.InstanceKey]db.GroupMember{ - {Hostname: "host1", Port: 10}: {HostName: "host1", Port: 10, Role: db.UNKNOWNROLE, State: db.OFFLINE, ReadOnly: true}, - {Hostname: "host2", Port: 10}: {HostName: "host2", Port: 10, Role: db.UNKNOWNROLE, State: db.OFFLINE, ReadOnly: true}, - {Hostname: "host3", Port: 10}: {HostName: "host3", Port: 10, Role: db.UNKNOWNROLE, State: db.OFFLINE, ReadOnly: true}, - }, nil}, ""}, - {"test network partition by majority unreachable", []*db.GroupView{ - {MySQLHost: "host1", MySQLPort: 10, GroupName: "group", UnresolvedMembers: []*db.GroupMember{ - {HostName: "host1", Port: 10, Role: db.PRIMARY, State: db.UNREACHABLE, ReadOnly: false}, - {HostName: "host2", Port: 10, Role: db.SECONDARY, State: db.ONLINE, ReadOnly: true}, - {HostName: "host3", Port: 10, Role: db.SECONDARY, State: db.UNREACHABLE, ReadOnly: true}, - }}, - }, nil, "group backoff error"}, - {"test no network partition with less then majority unreachable", []*db.GroupView{ - {MySQLHost: "host1", MySQLPort: 10, GroupName: "group", UnresolvedMembers: []*db.GroupMember{ - {HostName: "host1", Port: 10, Role: db.PRIMARY, State: db.ONLINE, ReadOnly: false}, - {HostName: "host2", Port: 10, Role: db.SECONDARY, State: db.ONLINE, ReadOnly: true}, - {HostName: "host3", Port: 10, Role: db.SECONDARY, State: db.UNREACHABLE, ReadOnly: false}, - }}, - {MySQLHost: "host2", MySQLPort: 10, GroupName: "group", UnresolvedMembers: []*db.GroupMember{ - {HostName: "host1", Port: 10, Role: db.PRIMARY, State: db.ONLINE, ReadOnly: false}, - {HostName: "host2", Port: 10, Role: db.SECONDARY, State: db.ONLINE, ReadOnly: true}, - {HostName: "host3", Port: 10, Role: db.SECONDARY, State: db.UNREACHABLE, ReadOnly: false}, - }}, - }, &ResolvedView{"group", map[inst.InstanceKey]db.GroupMember{ - {Hostname: "host1", Port: 10}: {HostName: "host1", Port: 10, Role: db.PRIMARY, State: db.ONLINE, ReadOnly: false}, - {Hostname: "host2", Port: 10}: {HostName: "host2", Port: 10, Role: db.SECONDARY, State: db.ONLINE, ReadOnly: true}, - {Hostname: "host3", Port: 10}: {HostName: "host3", Port: 10, Role: db.SECONDARY, State: db.UNREACHABLE, ReadOnly: false}, - }, nil}, "group backoff error"}, - {"test network partition by unreachable primary", []*db.GroupView{ - {MySQLHost: "host2", MySQLPort: 10, GroupName: "group", UnresolvedMembers: []*db.GroupMember{ - {HostName: "host1", Port: 10, Role: db.PRIMARY, State: db.UNREACHABLE}, - {HostName: "host2", Port: 10, Role: db.SECONDARY, State: db.ONLINE}, - {HostName: "host3", Port: 10, Role: db.SECONDARY, State: db.ONLINE}, - }}, - {MySQLHost: "host3", MySQLPort: 10, GroupName: "group", UnresolvedMembers: []*db.GroupMember{ - {HostName: "host1", Port: 10, Role: db.PRIMARY, State: db.UNREACHABLE}, - {HostName: "host2", Port: 10, Role: db.SECONDARY, State: db.ONLINE}, - {HostName: "host3", Port: 10, Role: db.SECONDARY, State: db.ONLINE}, - }}, - }, nil, "group backoff error"}, - {"test bootstrap ongoing", []*db.GroupView{ - {MySQLHost: "host1", MySQLPort: 10, GroupName: "group", UnresolvedMembers: []*db.GroupMember{ - {HostName: "", Port: 0, Role: db.SECONDARY, State: db.RECOVERING, ReadOnly: true}, - }}, - {MySQLHost: "host2", MySQLPort: 10, GroupName: "group", UnresolvedMembers: []*db.GroupMember{}}, - {MySQLHost: "host3", MySQLPort: 10, GroupName: "group", UnresolvedMembers: []*db.GroupMember{}}, - }, nil, "group ongoing bootstrap"}, - } - for _, testCase := range testCases { - t.Run(testCase.testName, func(t *testing.T) { - group := SQLGroup{views: testCase.views, statsTags: []string{"ks", "0"}, logger: log.NewVTGRLogger("ks", "0")} - err := group.Resolve() - if testCase.errorMsg != "" { - assert.EqualError(t, err, testCase.errorMsg) - } else { - assert.NoError(t, err) - } - if testCase.expected != nil { - rv := group.resolvedView - expected := testCase.expected - assert.Equal(t, expected.view, rv.view) - assert.Equal(t, expected.groupName, rv.groupName) - } - }) - } -} diff --git a/go/vt/vtgr/controller/mock_refresh.go b/go/vt/vtgr/controller/mock_refresh.go deleted file mode 100644 index 30ed5a187e7..00000000000 --- a/go/vt/vtgr/controller/mock_refresh.go +++ /dev/null @@ -1,148 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: go/vt/vtgr/controller/refresh.go -package controller - -import ( - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - context "golang.org/x/net/context" - - topodata "vitess.io/vitess/go/vt/proto/topodata" - topo "vitess.io/vitess/go/vt/topo" -) - -// MockGRTopo is a mock of GRTopo interface. -type MockGRTopo struct { - ctrl *gomock.Controller - recorder *MockGRTopoMockRecorder -} - -// MockGRTopoMockRecorder is the mock recorder for MockGRTopo. -type MockGRTopoMockRecorder struct { - mock *MockGRTopo -} - -// NewMockGRTopo creates a new mock instance. -func NewMockGRTopo(ctrl *gomock.Controller) *MockGRTopo { - mock := &MockGRTopo{ctrl: ctrl} - mock.recorder = &MockGRTopoMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockGRTopo) EXPECT() *MockGRTopoMockRecorder { - return m.recorder -} - -// GetShard mocks base method. -func (m *MockGRTopo) GetShard(ctx context.Context, keyspace, shard string) (*topo.ShardInfo, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetShard", ctx, keyspace, shard) - ret0, _ := ret[0].(*topo.ShardInfo) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetShard indicates an expected call of GetShard. -func (mr *MockGRTopoMockRecorder) GetShard(ctx, keyspace, shard any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetShard", reflect.TypeOf((*MockGRTopo)(nil).GetShard), ctx, keyspace, shard) -} - -// GetShardNames mocks base method. -func (m *MockGRTopo) GetShardNames(ctx context.Context, keyspace string) ([]string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetShardNames", ctx, keyspace) - ret0, _ := ret[0].([]string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetShardNames indicates an expected call of GetShardNames. -func (mr *MockGRTopoMockRecorder) GetShardNames(ctx, keyspace any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetShardNames", reflect.TypeOf((*MockGRTopo)(nil).GetShardNames), ctx, keyspace) -} - -// GetTabletMapForShardByCell mocks base method. -func (m *MockGRTopo) GetTabletMapForShardByCell(ctx context.Context, keyspace, shard string, cells []string) (map[string]*topo.TabletInfo, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetTabletMapForShardByCell", ctx, keyspace, shard, cells) - ret0, _ := ret[0].(map[string]*topo.TabletInfo) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetTabletMapForShardByCell indicates an expected call of GetTabletMapForShardByCell. -func (mr *MockGRTopoMockRecorder) GetTabletMapForShardByCell(ctx, keyspace, shard, cells any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTabletMapForShardByCell", reflect.TypeOf((*MockGRTopo)(nil).GetTabletMapForShardByCell), ctx, keyspace, shard, cells) -} - -// LockShard mocks base method. -func (m *MockGRTopo) LockShard(ctx context.Context, keyspace, shard, action string) (context.Context, func(*error), error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "LockShard", ctx, keyspace, shard, action) - ret0, _ := ret[0].(context.Context) - ret1, _ := ret[1].(func(*error)) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// LockShard indicates an expected call of LockShard. -func (mr *MockGRTopoMockRecorder) LockShard(ctx, keyspace, shard, action any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LockShard", reflect.TypeOf((*MockGRTopo)(nil).LockShard), ctx, keyspace, shard, action) -} - -// MockGRTmcClient is a mock of GRTmcClient interface. -type MockGRTmcClient struct { - ctrl *gomock.Controller - recorder *MockGRTmcClientMockRecorder -} - -// MockGRTmcClientMockRecorder is the mock recorder for MockGRTmcClient. -type MockGRTmcClientMockRecorder struct { - mock *MockGRTmcClient -} - -// NewMockGRTmcClient creates a new mock instance. -func NewMockGRTmcClient(ctrl *gomock.Controller) *MockGRTmcClient { - mock := &MockGRTmcClient{ctrl: ctrl} - mock.recorder = &MockGRTmcClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockGRTmcClient) EXPECT() *MockGRTmcClientMockRecorder { - return m.recorder -} - -// ChangeType mocks base method. -func (m *MockGRTmcClient) ChangeType(ctx context.Context, tablet *topodata.Tablet, dbType topodata.TabletType, semiSync bool) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ChangeType", ctx, tablet, dbType) - ret0, _ := ret[0].(error) - return ret0 -} - -// ChangeType indicates an expected call of ChangeType. -func (mr *MockGRTmcClientMockRecorder) ChangeType(ctx, tablet, dbType any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChangeType", reflect.TypeOf((*MockGRTmcClient)(nil).ChangeType), ctx, tablet, dbType) -} - -// Ping mocks base method. -func (m *MockGRTmcClient) Ping(ctx context.Context, tablet *topodata.Tablet) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Ping", ctx, tablet) - ret0, _ := ret[0].(error) - return ret0 -} - -// Ping indicates an expected call of Ping. -func (mr *MockGRTmcClientMockRecorder) Ping(ctx, tablet any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ping", reflect.TypeOf((*MockGRTmcClient)(nil).Ping), ctx, tablet) -} diff --git a/go/vt/vtgr/controller/refresh.go b/go/vt/vtgr/controller/refresh.go deleted file mode 100644 index 25e56ad21e6..00000000000 --- a/go/vt/vtgr/controller/refresh.go +++ /dev/null @@ -1,360 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "fmt" - "strconv" - "sync" - "sync/atomic" - "time" - - "vitess.io/vitess/go/vt/topo/topoproto" - - "golang.org/x/net/context" - - "vitess.io/vitess/go/stats" - "vitess.io/vitess/go/vt/logutil" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/vtgr/config" - "vitess.io/vitess/go/vt/vtgr/db" - "vitess.io/vitess/go/vt/vtgr/inst" - "vitess.io/vitess/go/vt/vtgr/log" -) - -var ( - lockShardTimingsMs = stats.NewMultiTimings("lockShard", "time vtgr takes to lock the shard", []string{"operation", "success"}) -) - -// grInstance represents an instance that's running MySQL GR -// it wraps a InstanceKey plus some tablet related information -type grInstance struct { - instanceKey *inst.InstanceKey - tablet *topodatapb.Tablet - primaryTimeStamp time.Time - alias string -} - -// GRTopo is VTGR wrapper for topo server -type GRTopo interface { - GetShardNames(ctx context.Context, keyspace string) ([]string, error) - GetShard(ctx context.Context, keyspace, shard string) (*topo.ShardInfo, error) - GetTabletMapForShardByCell(ctx context.Context, keyspace, shard string, cells []string) (map[string]*topo.TabletInfo, error) - LockShard(ctx context.Context, keyspace, shard, action string) (context.Context, func(*error), error) -} - -// GRTmcClient is VTGR wrapper for tmc client -type GRTmcClient interface { - ChangeType(ctx context.Context, tablet *topodatapb.Tablet, dbType topodatapb.TabletType, semiSync bool) error - Ping(ctx context.Context, tablet *topodatapb.Tablet) error -} - -// GRShard stores the information about a Vitess shard that's running MySQL GR -type GRShard struct { - KeyspaceShard *topo.KeyspaceShard - cells []string - instances []*grInstance - primaryAlias string - shardStatusCollector *shardStatusCollector - sqlGroup *SQLGroup - ts GRTopo - tmc GRTmcClient - dbAgent db.Agent - - // Every GRShard tracks a unlock function after it grab a topo lock for the shard - // VTGR needs to release the topo lock before gracefully shutdown - unlock func(*error) - // mutex to protect unlock function access - unlockMu sync.Mutex - - // configuration - minNumReplicas int - localDbPort int - disableReadOnlyProtection bool - - transientErrorWaitTime time.Duration - bootstrapWaitTime time.Duration - - lastDiagnoseResult DiagnoseType - lastDiagnoseSince time.Time - - isActive atomic.Bool - - logger *log.Logger - - // lock prevents multiple go routine fights with each other - sync.Mutex -} - -// shardStatusCollector is used for collecting shard status -type shardStatusCollector struct { - status *ShardStatus - sync.Mutex -} - -// ShardStatus is used for debugging purpose to get current status of a shard -type ShardStatus struct { - Keyspace string - Shard string - Instances []string - Unreachables []string - Problematics []string - Primary string - DiagnoseResult DiagnoseType -} - -func newShardStatusCollector(keyspace, shard string) *shardStatusCollector { - return &shardStatusCollector{ - status: &ShardStatus{Keyspace: keyspace, Shard: shard}, - } -} - -// NewGRShard creates a new GRShard -func NewGRShard( - keyspace, shard string, - cells []string, - tmc GRTmcClient, - ts GRTopo, - dbAgent db.Agent, - config *config.VTGRConfig, - localDbPort int, - isActive bool) *GRShard { - grShard := &GRShard{ - KeyspaceShard: &topo.KeyspaceShard{Keyspace: keyspace, Shard: shard}, - cells: cells, - shardStatusCollector: newShardStatusCollector(keyspace, shard), - tmc: tmc, - ts: ts, - dbAgent: dbAgent, - unlock: nil, - sqlGroup: NewSQLGroup(config.BootstrapGroupSize, true, keyspace, shard), - minNumReplicas: config.MinNumReplica, - disableReadOnlyProtection: config.DisableReadOnlyProtection, - localDbPort: localDbPort, - logger: log.NewVTGRLogger(keyspace, shard), - transientErrorWaitTime: time.Duration(config.BackoffErrorWaitTimeSeconds) * time.Second, - bootstrapWaitTime: time.Duration(config.BootstrapWaitTimeSeconds) * time.Second, - } - grShard.isActive.Store(isActive) - return grShard -} - -// refreshTabletsInShardLocked is called by repair to get a fresh view of the shard -// The caller is responsible to make sure the lock on GRShard -func (shard *GRShard) refreshTabletsInShardLocked(ctx context.Context) { - instances, err := shard.refreshTabletsInShardInternal(ctx) - if err == nil { - shard.instances = instances - } - primary, err := shard.refreshPrimaryShard(ctx) - if err == nil { - shard.primaryAlias = primary - return - } - // If we failed to refreshPrimaryShard, use primary from local tablets - shard.primaryAlias = shard.findPrimaryFromLocalCell() -} - -// UpdateTabletsInShardWithLock updates the shard instances with a lock -func (shard *GRShard) UpdateTabletsInShardWithLock(ctx context.Context) { - instances, err := shard.refreshTabletsInShardInternal(ctx) - if err == nil { - // Take a per shard lock here when we actually refresh the data to avoid - // race conditions bewteen controller and repair tasks - shard.Lock() - shard.instances = instances - shard.Unlock() - } - primary, err := shard.refreshPrimaryShard(ctx) - // We set primary separately from instances so that if global topo is not available - // VTGR can still discover the new tablets from local cell - shard.Lock() - defer shard.Unlock() - if err == nil { - shard.primaryAlias = primary - return - } - shard.primaryAlias = shard.findPrimaryFromLocalCell() -} - -func (shard *GRShard) refreshTabletsInShardInternal(ctx context.Context) ([]*grInstance, error) { - keyspace, shardName := shard.KeyspaceShard.Keyspace, shard.KeyspaceShard.Shard - tablets, err := shard.ts.GetTabletMapForShardByCell(ctx, keyspace, shardName, shard.cells) - if err != nil { - shard.logger.Errorf("Error fetching tablets for keyspace/shardName %v/%v: %v", keyspace, shardName, err) - return nil, err - } - return parseTabletInfos(tablets), nil -} - -func (shard *GRShard) refreshPrimaryShard(ctx context.Context) (string, error) { - keyspace, shardName := shard.KeyspaceShard.Keyspace, shard.KeyspaceShard.Shard - si, err := shard.ts.GetShard(ctx, keyspace, shardName) - if err != nil { - shard.logger.Errorf("Error calling GetShard: %v", err) - return "", err - } - return topoproto.TabletAliasString(si.PrimaryAlias), nil -} - -// findPrimaryFromLocalCell iterates through the replicas stored in grShard and returns -// the one that's marked as primary -func (shard *GRShard) findPrimaryFromLocalCell() string { - var latestPrimaryTimestamp time.Time - var primaryInstance *grInstance - for _, instance := range shard.instances { - if instance.tablet.Type == topodatapb.TabletType_PRIMARY { - // It is possible that there are more than one master in topo server - // we should compare timestamp to pick the latest one - if latestPrimaryTimestamp.Before(instance.primaryTimeStamp) { - latestPrimaryTimestamp = instance.primaryTimeStamp - primaryInstance = instance - } - } - } - if primaryInstance != nil { - return primaryInstance.alias - } - return "" -} - -// parseTabletInfos replaces the replica reports for the shard key -// Note: this is not thread-safe -func parseTabletInfos(tablets map[string]*topo.TabletInfo) []*grInstance { - // collect all replicas - var newReplicas []*grInstance - for alias, tabletInfo := range tablets { - tablet := tabletInfo.Tablet - // Only monitor primary, replica and ronly tablet types - switch tablet.Type { - case topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY: - // mysql hostname and port might be empty here if tablet is not running - // we will treat them as unreachable - instanceKey := inst.InstanceKey{ - Hostname: tablet.MysqlHostname, - Port: int(tablet.MysqlPort), - } - grInstance := grInstance{ - instanceKey: &instanceKey, - tablet: tablet, - primaryTimeStamp: logutil.ProtoToTime(tablet.PrimaryTermStartTime), - alias: alias, - } - newReplicas = append(newReplicas, &grInstance) - } - } - return newReplicas -} - -// LockShard locks the keyspace-shard on topo server to prevent others from executing conflicting actions. -func (shard *GRShard) LockShard(ctx context.Context, action string) (context.Context, error) { - if shard.KeyspaceShard.Keyspace == "" || shard.KeyspaceShard.Shard == "" { - return nil, fmt.Errorf("try to grab lock with incomplete information: %v/%v", shard.KeyspaceShard.Keyspace, shard.KeyspaceShard.Shard) - } - shard.unlockMu.Lock() - defer shard.unlockMu.Unlock() - if shard.unlock != nil { - return nil, fmt.Errorf("try to grab lock for %s/%s while the shard holds an unlock function", shard.KeyspaceShard.Keyspace, shard.KeyspaceShard.Shard) - } - start := time.Now() - ctx, unlock, err := shard.ts.LockShard(ctx, shard.KeyspaceShard.Keyspace, shard.KeyspaceShard.Shard, fmt.Sprintf("VTGR repairing %s", action)) - lockShardTimingsMs.Record([]string{action, strconv.FormatBool(err == nil)}, start) - if err != nil { - return nil, err - } - shard.unlock = unlock - return ctx, nil -} - -// UnlockShard unlocks the keyspace-shard on topo server -// and set the unlock function to nil in the container -func (shard *GRShard) UnlockShard() { - shard.unlockMu.Lock() - defer shard.unlockMu.Unlock() - if shard.unlock == nil { - shard.logger.Warningf("Shard %s/%s does not hold a lock", shard.KeyspaceShard.Keyspace, shard.KeyspaceShard.Shard) - return - } - var err error - shard.unlock(&err) - shard.unlock = nil -} - -func (shard *GRShard) findTabletByHostAndPort(host string, port int) *grInstance { - for _, instance := range shard.instances { - if instance.instanceKey.Hostname == host && instance.instanceKey.Port == port { - return instance - } - } - return nil -} - -func (shard *GRShard) populateVTGRStatusLocked() { - var instanceList []string - for _, instance := range shard.instances { - instanceList = append(instanceList, instance.alias) - } - shard.shardStatusCollector.status.Instances = instanceList - if primary := shard.findShardPrimaryTablet(); primary != nil { - shard.shardStatusCollector.status.Primary = primary.alias - } -} - -// GetCurrentShardStatuses returns the status collector has -func (shard *GRShard) GetCurrentShardStatuses() ShardStatus { - shard.Lock() - collector := shard.shardStatusCollector - // dereference status so that we return a copy of the struct - status := *collector.status - shard.Unlock() - return status -} - -// OverrideRebootstrapGroupSize force override the group expectedBootstrapSize used in safety check for rebootstrap -func (shard *GRShard) OverrideRebootstrapGroupSize(groupSize int) error { - shard.Lock() - defer shard.Unlock() - shard.logger.Infof("Override rebootstrap group size=%v", groupSize) - shard.sqlGroup.rebootstrapSize = groupSize - return nil -} - -// GetUnlock returns the unlock function for the shard for testing -func (shard *GRShard) GetUnlock() func(*error) { - shard.unlockMu.Lock() - defer shard.unlockMu.Unlock() - return shard.unlock -} - -// SetIsActive sets isActive for the shard -func (shard *GRShard) SetIsActive(isActive bool) { - shard.logger.Infof("Setting is active to %v", isActive) - shard.isActive.Store(isActive) -} - -func (collector *shardStatusCollector) isUnreachable(instance *grInstance) bool { - if instance.instanceKey == nil || instance.instanceKey.Hostname == "" { - return true - } - for _, alias := range collector.status.Unreachables { - if instance.alias == alias { - return true - } - } - return false -} diff --git a/go/vt/vtgr/controller/refresh_test.go b/go/vt/vtgr/controller/refresh_test.go deleted file mode 100644 index a1bbef74fc7..00000000000 --- a/go/vt/vtgr/controller/refresh_test.go +++ /dev/null @@ -1,159 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "fmt" - "sort" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" - - "vitess.io/vitess/go/vt/logutil" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/topo/memorytopo" - "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver/testutil" - "vitess.io/vitess/go/vt/vtgr/config" -) - -func TestRefreshTabletsInShard(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - ctx := context.Background() - ts := memorytopo.NewServer("test_cell") - defer ts.Close() - ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{}) - ts.CreateShard(ctx, "ks", "0") - tablet1 := buildTabletInfo(uint32(0), testHost, testPort0, topodatapb.TabletType_PRIMARY, time.Time{}) - tablet2 := buildTabletInfo(uint32(1), testHost, testPort1, topodatapb.TabletType_SPARE, time.Time{}) - tablet3 := buildTabletInfo(uint32(2), testHost, 0, topodatapb.TabletType_REPLICA, time.Time{}) - testutil.AddTablet(ctx, t, ts, tablet1.Tablet, nil) - testutil.AddTablet(ctx, t, ts, tablet2.Tablet, nil) - testutil.AddTablet(ctx, t, ts, tablet3.Tablet, nil) - cfg := &config.VTGRConfig{BootstrapGroupSize: 3, MinNumReplica: 0, BackoffErrorWaitTimeSeconds: 1, BootstrapWaitTimeSeconds: 1} - shard := NewGRShard("ks", "0", nil, nil, ts, nil, cfg, testPort0, true) - assert.Equal(t, "ks", shard.shardStatusCollector.status.Keyspace) - assert.Equal(t, "0", shard.shardStatusCollector.status.Shard) - shard.refreshTabletsInShardLocked(context.Background()) - instances := shard.instances - // only have 2 instances here because we filter out the spare tablet - assert.Equal(t, 2, len(instances)) - sort.Slice(instances[:], func(i, j int) bool { - return instances[i].alias < instances[j].alias - }) - assert.Equal(t, testHost, instances[0].tablet.Hostname) - assert.Equal(t, int32(testPort0), instances[0].tablet.MysqlPort) - assert.Equal(t, topodatapb.TabletType_PRIMARY, instances[0].tablet.Type) - // host 3 is missing mysql host but we still put it in the instances list here - assert.Equal(t, testHost, instances[1].instanceKey.Hostname) - assert.Equal(t, int32(0), instances[1].tablet.MysqlPort) - assert.Equal(t, topodatapb.TabletType_REPLICA, instances[1].tablet.Type) -} - -func TestRefreshWithCells(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1", "cell2", "cell3") - defer ts.Close() - ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{}) - ts.CreateShard(ctx, "ks", "0") - tablet1 := buildTabletInfoWithCell(uint32(0), testHost, "cell1", testPort0, topodatapb.TabletType_REPLICA, time.Time{}) - tablet2 := buildTabletInfoWithCell(uint32(1), testHost, "cell2", testPort1, topodatapb.TabletType_REPLICA, time.Time{}) - tablet3 := buildTabletInfoWithCell(uint32(2), testHost, "cell3", testPort2, topodatapb.TabletType_REPLICA, time.Time{}) - testutil.AddTablet(ctx, t, ts, tablet1.Tablet, nil) - testutil.AddTablet(ctx, t, ts, tablet2.Tablet, nil) - testutil.AddTablet(ctx, t, ts, tablet3.Tablet, nil) - cfg := &config.VTGRConfig{BootstrapGroupSize: 3, MinNumReplica: 0, BackoffErrorWaitTimeSeconds: 1, BootstrapWaitTimeSeconds: 1} - shard := NewGRShard("ks", "0", []string{"cell1", "cell3"}, nil, ts, nil, cfg, testPort0, true) - shard.refreshTabletsInShardLocked(context.Background()) - instances := shard.instances - // only have 2 instances here because we are not watching cell2 - assert.Equal(t, 2, len(instances)) - sort.Slice(instances[:], func(i, j int) bool { - return instances[i].alias < instances[j].alias - }) - assert.Equal(t, "cell1-0000000000", instances[0].alias) - assert.Equal(t, "cell3-0000000002", instances[1].alias) -} - -func TestRefreshWithEmptyCells(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1", "cell2", "cell3") - defer ts.Close() - ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{}) - ts.CreateShard(ctx, "ks", "0") - tablet1 := buildTabletInfoWithCell(uint32(0), testHost, "cell1", testPort0, topodatapb.TabletType_REPLICA, time.Time{}) - tablet2 := buildTabletInfoWithCell(uint32(1), testHost, "cell2", testPort1, topodatapb.TabletType_REPLICA, time.Time{}) - tablet3 := buildTabletInfoWithCell(uint32(2), testHost, "cell3", testPort2, topodatapb.TabletType_REPLICA, time.Time{}) - testutil.AddTablet(ctx, t, ts, tablet1.Tablet, nil) - testutil.AddTablet(ctx, t, ts, tablet2.Tablet, nil) - testutil.AddTablet(ctx, t, ts, tablet3.Tablet, nil) - cfg := &config.VTGRConfig{BootstrapGroupSize: 3, MinNumReplica: 0, BackoffErrorWaitTimeSeconds: 1, BootstrapWaitTimeSeconds: 1} - shard := NewGRShard("ks", "0", nil, nil, ts, nil, cfg, testPort0, true) - shard.refreshTabletsInShardLocked(context.Background()) - instances := shard.instances - // nil cell will return everything - assert.Equal(t, 3, len(instances)) - sort.Slice(instances[:], func(i, j int) bool { - return instances[i].alias < instances[j].alias - }) - assert.Equal(t, "cell1-0000000000", instances[0].alias) - assert.Equal(t, "cell2-0000000001", instances[1].alias) - assert.Equal(t, "cell3-0000000002", instances[2].alias) -} - -func TestLockRelease(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1", "cell2", "cell3") - defer ts.Close() - ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{}) - ts.CreateShard(ctx, "ks", "0") - cfg := &config.VTGRConfig{BootstrapGroupSize: 3, MinNumReplica: 0, BackoffErrorWaitTimeSeconds: 1, BootstrapWaitTimeSeconds: 1} - shard := NewGRShard("ks", "0", nil, nil, ts, nil, cfg, testPort0, true) - ctx, err := shard.LockShard(ctx, "") - assert.NoError(t, err) - // make sure we get the lock - err = shard.checkShardLocked(ctx) - assert.NoError(t, err) - assert.NotNil(t, shard.unlock) - shard.UnlockShard() - assert.Nil(t, shard.unlock) - err = shard.checkShardLocked(ctx) - assert.EqualError(t, err, "lost topology lock; aborting: shard ks/0 is not locked (no lockInfo in map)") -} - -func buildTabletInfo(id uint32, host string, mysqlPort int, ttype topodatapb.TabletType, primaryTermTime time.Time) *topo.TabletInfo { - return buildTabletInfoWithCell(id, host, "test_cell", mysqlPort, ttype, primaryTermTime) -} - -func buildTabletInfoWithCell(id uint32, host, cell string, mysqlPort int, ttype topodatapb.TabletType, primaryTermTime time.Time) *topo.TabletInfo { - alias := &topodatapb.TabletAlias{Cell: cell, Uid: id} - return &topo.TabletInfo{Tablet: &topodatapb.Tablet{ - Alias: alias, - Hostname: host, - MysqlHostname: host, - MysqlPort: int32(mysqlPort), - Keyspace: "ks", - Shard: "0", - Type: ttype, - PrimaryTermStartTime: logutil.TimeToProto(primaryTermTime), - Tags: map[string]string{"hostname": fmt.Sprintf("host_%d", id)}, - }} -} diff --git a/go/vt/vtgr/controller/repair.go b/go/vt/vtgr/controller/repair.go deleted file mode 100644 index a7fa64d7c97..00000000000 --- a/go/vt/vtgr/controller/repair.go +++ /dev/null @@ -1,767 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "errors" - "fmt" - "sort" - "strconv" - "sync" - "time" - - "github.com/spf13/pflag" - - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/stats" - "vitess.io/vitess/go/vt/concurrency" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgr/db" - - topodatapb "vitess.io/vitess/go/vt/proto/topodata" -) - -var ( - repairTimingsMs = stats.NewMultiTimings("repairTimingsMs", "time vtgr takes to repair", []string{"status", "success"}) - unexpectedLockLost = stats.NewCountersWithMultiLabels("unexpectedLockLost", "unexpected lost of the lock", []string{"Keyspace", "Shard"}) - - abortRebootstrap bool -) - -func init() { - servenv.OnParseFor("vtgr", func(fs *pflag.FlagSet) { - fs.BoolVar(&abortRebootstrap, "abort_rebootstrap", false, "Don't allow vtgr to rebootstrap an existing group.") - }) -} - -// RepairResultCode is the code for repair -type RepairResultCode string - -const ( - // Success means successfully repaired - Success RepairResultCode = "Success" - // Fail means failed to repaire - Fail RepairResultCode = "Fail" - // Noop means do nothing - Noop RepairResultCode = "Noop" -) - -// Repair tries to fix shard based on the diagnose type -func (shard *GRShard) Repair(ctx context.Context, status DiagnoseType) (RepairResultCode, error) { - shard.Lock() - defer shard.Unlock() - var err error - code := Noop - switch status { - case DiagnoseTypeShardHasNoGroup: - code, err = shard.repairShardHasNoGroup(ctx) - case DiagnoseTypeShardHasInactiveGroup: - code, err = shard.repairShardHasInactiveGroup(ctx) - case DiagnoseTypeWrongPrimaryTablet: - code, err = shard.repairWrongPrimaryTablet(ctx) - case DiagnoseTypeUnconnectedReplica: - code, err = shard.repairUnconnectedReplica(ctx) - case DiagnoseTypeUnreachablePrimary: - code, err = shard.repairUnreachablePrimary(ctx) - case DiagnoseTypeInsufficientGroupSize: - code, err = shard.repairInsufficientGroupSize(ctx) - case DiagnoseTypeReadOnlyShard: - code, err = shard.repairReadOnlyShard(ctx) - case DiagnoseTypeBootstrapBackoff, DiagnoseTypeBackoffError: - code, err = shard.repairBackoffError(ctx, status) - case DiagnoseTypeError: - shard.logger.Errorf("%v is %v", formatKeyspaceShard(shard.KeyspaceShard), status) - case DiagnoseTypeHealthy: - start := time.Now() - repairTimingsMs.Record([]string{string(status), "true"}, start) - } - if status != DiagnoseTypeHealthy { - shard.logger.Infof("VTGR repaired %v status=%v | code=%v", formatKeyspaceShard(shard.KeyspaceShard), status, code) - } - return code, vterrors.Wrap(err, "vtgr repair") -} - -func (shard *GRShard) repairShardHasNoGroup(ctx context.Context) (RepairResultCode, error) { - ctx, err := shard.LockShard(ctx, "repairShardHasNoGroup") - if err != nil { - shard.logger.Warningf("repairShardHasNoPrimaryTablet fails to grab lock for the shard %v: %v", shard.KeyspaceShard, err) - return Noop, err - } - defer shard.UnlockShard() - shard.refreshTabletsInShardLocked(ctx) - // Diagnose() will call shardAgreedGroup as the first thing - // which will update mysqlGroup stored in the shard - status, err := shard.diagnoseLocked(ctx) - if err != nil { - shard.logger.Errorf("Failed to diagnose: %v", err) - return Fail, err - } - if status != DiagnoseTypeShardHasNoGroup { - shard.logger.Infof("Shard %v is no longer in DiagnoseTypeShardHasNoGroup: %v", formatKeyspaceShard(shard.KeyspaceShard), status) - return Noop, nil - } - start := time.Now() - err = shard.repairShardHasNoGroupAction(ctx) - repairTimingsMs.Record([]string{DiagnoseTypeShardHasNoGroup, strconv.FormatBool(err == nil)}, start) - if err != nil { - return Fail, err - } - return Success, nil -} - -func (shard *GRShard) repairShardHasNoGroupAction(ctx context.Context) error { - // If group is not empty AND there is at least one active group member - // we don't need to bootstrap. Instead we should try to join the group - mysqlGroup := shard.shardAgreedGroupName() - isAllOffline := shard.isAllOfflineOrError() - if mysqlGroup != "" { - shard.logger.Infof("Shard %v already have a group %v", formatKeyspaceShard(shard.KeyspaceShard), mysqlGroup) - return nil - } - // This should not really happen in reality - if mysqlGroup == "" && !isAllOffline { - return fmt.Errorf("shard %v has empty group name but some node is not OFFLINE", formatKeyspaceShard(shard.KeyspaceShard)) - } - - // Now we know group is null and there is no active node - // we should bootstrap the group - replicas := shard.instances - // Sanity check to make sure there is at least one instance - if len(replicas) == 0 { - shard.logger.Warningf("Cannot find any instance for the shard %v", formatKeyspaceShard(shard.KeyspaceShard)) - return nil - } - if !shard.sqlGroup.IsSafeToBootstrap() { - return errors.New("unsafe to bootstrap group") - } - var candidate *grInstance - sort.SliceStable(replicas, func(i, j int) bool { - return replicas[i].alias < replicas[j].alias - }) - for _, replica := range replicas { - if !shard.shardStatusCollector.isUnreachable(replica) { - candidate = replica - break - } - } - if candidate == nil { - return errors.New("fail to find any candidate to bootstrap") - } - // Bootstrap the group - shard.logger.Infof("Bootstrapping the group for %v on host=%v", formatKeyspaceShard(shard.KeyspaceShard), candidate.instanceKey.Hostname) - // Make sure we still hold the topo server lock before moving on - if err := shard.checkShardLocked(ctx); err != nil { - return err - } - if err := shard.dbAgent.BootstrapGroupLocked(candidate.instanceKey); err != nil { - // if bootstrap failed, the next one that gets the lock will try to do it again - shard.logger.Errorf("Failed to bootstrap mysql group on %v: %v", candidate.instanceKey.Hostname, err) - return err - } - shard.logger.Infof("Bootstrapped the group for %v", formatKeyspaceShard(shard.KeyspaceShard)) - return nil -} - -func (shard *GRShard) repairShardHasInactiveGroup(ctx context.Context) (RepairResultCode, error) { - ctx, err := shard.LockShard(ctx, "repairShardHasInactiveGroup") - if err != nil { - shard.logger.Warningf("repairShardHasInactiveGroup fails to grab lock for the shard %v: %v", shard.KeyspaceShard, err) - return Noop, err - } - defer shard.UnlockShard() - shard.refreshTabletsInShardLocked(ctx) - // Diagnose() will call shardAgreedGroup as the first thing - // which will update mysqlGroup stored in the shard - status, err := shard.diagnoseLocked(ctx) - if err != nil { - shard.logger.Errorf("Failed to diagnose: %v", err) - return Fail, err - } - if status != DiagnoseTypeShardHasInactiveGroup { - shard.logger.Infof("Shard %v is no longer in DiagnoseTypeShardHasInactiveGroup: %v", formatKeyspaceShard(shard.KeyspaceShard), status) - return Noop, nil - } - // Now we know the shard has an agreed group but no member in it - // We should find one with the largest GTID set as the - // new mysql primary to bootstrap the group - start := time.Now() - err = shard.stopAndRebootstrap(ctx) - repairTimingsMs.Record([]string{DiagnoseTypeShardHasInactiveGroup, strconv.FormatBool(err == nil)}, start) - if err != nil { - return Fail, err - } - return Success, nil -} - -func (shard *GRShard) repairBackoffError(ctx context.Context, diagnose DiagnoseType) (RepairResultCode, error) { - ctx, err := shard.LockShard(ctx, "repairBackoffError") - if err != nil { - shard.logger.Warningf("repairBackoffError fails to grab lock for the shard %v: %v", shard.KeyspaceShard, err) - return Noop, err - } - defer shard.UnlockShard() - shard.refreshTabletsInShardLocked(ctx) - status, err := shard.diagnoseLocked(ctx) - if err != nil { - shard.logger.Errorf("Failed to diagnose: %v", err) - return Fail, err - } - if status != diagnose { - shard.logger.Infof("Shard %v is no longer in %v: %v", formatKeyspaceShard(shard.KeyspaceShard), diagnose, status) - return Noop, nil - } - if shard.lastDiagnoseResult != diagnose { - shard.logger.Infof("diagnose shard as %v but last diagnose result was %v", diagnose, shard.lastDiagnoseResult) - return Noop, nil - } - now := time.Now() - var waitTime time.Duration - switch diagnose { - case DiagnoseTypeBackoffError: - waitTime = shard.transientErrorWaitTime - case DiagnoseTypeBootstrapBackoff: - waitTime = shard.bootstrapWaitTime - default: - return Fail, fmt.Errorf("unsupported diagnose for repairBackoffError: %v", diagnose) - } - if now.Sub(shard.lastDiagnoseSince) < waitTime { - shard.logger.Infof("Detected %v at %v. In wait time for network partition", diagnose, shard.lastDiagnoseSince) - return Noop, nil - } - shard.logger.Infof("Detected %v at %v. Start repairing after %v", diagnose, shard.lastDiagnoseSince, shard.transientErrorWaitTime) - err = shard.stopAndRebootstrap(ctx) - repairTimingsMs.Record([]string{DiagnoseTypeBackoffError, strconv.FormatBool(err == nil)}, now) - if err != nil { - return Fail, err - } - return Success, nil -} - -func (shard *GRShard) stopAndRebootstrap(ctx context.Context) error { - // Make sure we still hold the topo server lock before moving on - if err := shard.checkShardLocked(ctx); err != nil { - return err - } - // Before bootstrap the group, we need to stop group first - // abort aggressively here as soon as we encounter an error - // StopGroupLocked will check if instance is NOT in "ONLINE"/"RECOVERING" state (i.e., UNREACHABLE, ERROR or OFFLINE) - errorRecorder := shard.forAllInstances(func(instance *grInstance, wg *sync.WaitGroup, er concurrency.ErrorRecorder) { - defer wg.Done() - status := shard.sqlGroup.GetStatus(instance.instanceKey) - if status != nil && status.State == db.OFFLINE { - shard.logger.Infof("stop group replication on %v skipped because it is already OFFLINE", instance.alias) - return - } - shard.logger.Infof("stop group replication on %v", instance.alias) - err := shard.dbAgent.StopGroupLocked(instance.instanceKey) - if err != nil { - if !unreachableError(err) { - er.RecordError(err) - } - shard.logger.Warningf("Error during stop group replication on %v: %v", instance.instanceKey.Hostname, err) - } - }) - // We don't check allowPartialUnhealthyNodes here because we don't record unreachableError here - // hence if errorRecorder has error, it indicates the mysqld is still reachable but there is nothing - // else went wrong. - if errorRecorder.HasErrors() { - shard.logger.Errorf("Failed to stop group replication %v", errorRecorder.Error()) - return errorRecorder.Error() - } - shard.logger.Infof("Stop the group for %v", formatKeyspaceShard(shard.KeyspaceShard)) - shard.logger.Info("Start find candidate to rebootstrap") - candidate, err := shard.findRebootstrapCandidate(ctx) - if err != nil { - shard.logger.Errorf("Failed to find rebootstrap candidate: %v", err) - return err - } - shard.refreshSQLGroup() - if !shard.sqlGroup.IsSafeToRebootstrap() { - return errors.New("unsafe to bootstrap group") - } - if abortRebootstrap { - shard.logger.Warningf("Abort stopAndRebootstrap because rebootstrap hook override") - return errForceAbortBootstrap - } - shard.logger.Infof("Rebootstrap %v on %v", formatKeyspaceShard(shard.KeyspaceShard), candidate.instanceKey.Hostname) - // Make sure we still hold the topo server lock before moving on - if err := shard.checkShardLocked(ctx); err != nil { - return err - } - uuid := shard.sqlGroup.GetGroupName() - if uuid == "" { - return errors.New("trying to rebootstrap without uuid") - } - return shard.dbAgent.RebootstrapGroupLocked(candidate.instanceKey, uuid) -} - -// allowPartialUnhealthyNodes returns true if rebootstrapSize is set to non-zero -// and the error we get is less than (total_num_tablet - rebootstrapSize) -func (shard *GRShard) allowPartialUnhealthyNodes(errorRecorder *concurrency.AllErrorRecorder) bool { - if shard.sqlGroup.rebootstrapSize != 0 && len(shard.instances)-shard.sqlGroup.rebootstrapSize >= len(errorRecorder.GetErrors()) { - shard.logger.Warningf("Allow unhealthy nodes during the reboot group_size=%v, rebootstrap_config=%v, error=%v", shard.sqlGroup.expectedBootstrapSize, shard.sqlGroup.rebootstrapSize, len(errorRecorder.GetErrors())) - return true - } - return false -} - -func (shard *GRShard) getGTIDSetFromAll(skipPrimary bool) (*groupGTIDRecorder, *concurrency.AllErrorRecorder, error) { - if len(shard.instances) == 0 { - return nil, nil, fmt.Errorf("%v has 0 instance", formatKeyspaceShard(shard.KeyspaceShard)) - } - // Before we do failover, we first verify if there is no one agreed group name. - // If not, VTGR is not smart enough to figure out how to failover - // Note: the caller should make sure the mysqlGroup is refreshed after we grab a shard level lock - mysqlGroup := shard.shardAgreedGroupName() - if mysqlGroup == "" { - return nil, nil, fmt.Errorf("unable to find an agreed group name in %v", formatKeyspaceShard(shard.KeyspaceShard)) - } - primary := shard.findShardPrimaryTablet() - var mysqlPrimaryHost string - var mysqlPrimaryPort int - // skipPrimary is true when we manual failover or if there is a unreachalbe primary tablet - // in both case, there should be a reconciled primary tablet - if skipPrimary && primary != nil { - status := shard.sqlGroup.GetStatus(primary.instanceKey) - mysqlPrimaryHost, mysqlPrimaryPort = status.HostName, status.Port - shard.logger.Infof("Found primary instance from MySQL on %v", mysqlPrimaryHost) - } - gtidRecorder := &groupGTIDRecorder{} - // Iterate through all the instances in the shard and find the one with largest GTID set with best effort - // We wrap it with forAllInstances so that the failover can continue if there is a host - // that is unreachable - errorRecorder := shard.forAllInstances(func(instance *grInstance, wg *sync.WaitGroup, er concurrency.ErrorRecorder) { - defer wg.Done() - if skipPrimary && instance.instanceKey.Hostname == mysqlPrimaryHost && instance.instanceKey.Port == mysqlPrimaryPort { - shard.logger.Infof("Skip %v to failover to a non-primary node", mysqlPrimaryHost) - return - } - gtids, err := shard.dbAgent.FetchApplierGTIDSet(instance.instanceKey) - if err != nil { - er.RecordError(err) - shard.logger.Errorf("%v get error while fetch applier GTIDs: %v", instance.alias, err) - shard.shardStatusCollector.recordProblematics(instance) - if unreachableError(err) { - shard.shardStatusCollector.recordUnreachables(instance) - } - return - } - if gtids == nil { - shard.logger.Warningf("[failover candidate] skip %s with empty gtid", instance.alias) - return - } - gtidRecorder.recordGroupGTIDs(gtids, instance) - }) - return gtidRecorder, errorRecorder, nil -} - -func (shard *GRShard) findRebootstrapCandidate(ctx context.Context) (*grInstance, error) { - gtidRecorder, errorRecorder, err := shard.getGTIDSetFromAll(false) - if err != nil { - shard.logger.Errorf("Failed to get gtid from all: %v", err) - return nil, err - } - err = errorRecorder.Error() - // We cannot tolerate any error from mysql during a rebootstrap. - if err != nil && !shard.allowPartialUnhealthyNodes(errorRecorder) { - shard.logger.Errorf("Failed to fetch all GTID with forAllInstances for rebootstrap: %v", err) - return nil, err - } - candidate, err := shard.findFailoverCandidateFromRecorder(ctx, gtidRecorder, nil) - if err != nil { - shard.logger.Errorf("Failed to find rebootstrap candidate by GTID after forAllInstances: %v", err) - return nil, err - } - if candidate == nil { - return nil, fmt.Errorf("failed to find rebootstrap candidate for %v", formatKeyspaceShard(shard.KeyspaceShard)) - } - if !shard.instanceReachable(ctx, candidate) { - shard.logger.Errorf("rebootstrap candidate %v (%v) is not reachable via ping", candidate.alias, candidate.instanceKey.Hostname) - return nil, fmt.Errorf("%v is unreachable", candidate.alias) - } - shard.logger.Infof("%v is the rebootstrap candidate", candidate.alias) - return candidate, nil -} - -// Caller of this function should make sure it gets the shard lock and it has the -// latest view of a shard. Otherwise, we might skip the wrong node when we locate the candidate -func (shard *GRShard) findFailoverCandidate(ctx context.Context) (*grInstance, error) { - gtidRecorder, errorRecorder, err := shard.getGTIDSetFromAll(true) - if err != nil { - shard.logger.Errorf("Failed to get gtid from all: %v", err) - return nil, err - } - err = errorRecorder.Error() - // During the repair for unreachable primary we still have a mysql group. - // Failover within the group is safe, finding the largest GTID is an optimization. - // therefore we don't check error from errorRecorder just log it - if err != nil { - shard.logger.Warningf("Errors when fetch all GTID with forAllInstances for failover: %v", err) - } - shard.forAllInstances(func(instance *grInstance, wg *sync.WaitGroup, er concurrency.ErrorRecorder) { - defer wg.Done() - if !shard.instanceReachable(ctx, instance) { - shard.logger.Errorf("%v is not reachable via ping", instance.alias) - shard.shardStatusCollector.recordProblematics(instance) - shard.shardStatusCollector.recordUnreachables(instance) - } - }) - var candidate *grInstance - candidate, err = shard.findFailoverCandidateFromRecorder(ctx, gtidRecorder, func(c context.Context, instance *grInstance) bool { - return !shard.shardStatusCollector.isUnreachable(instance) - }) - if err != nil { - shard.logger.Errorf("Failed to find failover candidate by GTID after forAllInstances: %v", err) - return nil, err - } - if candidate == nil { - return nil, fmt.Errorf("failed to find failover candidate for %v", formatKeyspaceShard(shard.KeyspaceShard)) - } - shard.logger.Infof("%v is the failover candidate", candidate.alias) - return candidate, nil -} - -func (shard *GRShard) repairWrongPrimaryTablet(ctx context.Context) (RepairResultCode, error) { - ctx, err := shard.LockShard(ctx, "repairWrongPrimaryTablet") - if err != nil { - shard.logger.Warningf("repairWrongPrimaryTablet fails to grab lock for the shard %v: %v", shard.KeyspaceShard, err) - return Noop, err - } - defer shard.UnlockShard() - // We grab shard level lock and check again if there is no primary - // to avoid race conditions - shard.refreshTabletsInShardLocked(ctx) - status, err := shard.diagnoseLocked(ctx) - if err != nil { - shard.logger.Errorf("Failed to diagnose: %v", err) - return Fail, err - } - if status != DiagnoseTypeWrongPrimaryTablet { - shard.logger.Infof("Shard %v is no longer in DiagnoseTypeWrongPrimaryTablet: %v", formatKeyspaceShard(shard.KeyspaceShard), status) - return Noop, nil - } - start := time.Now() - err = shard.fixPrimaryTabletLocked(ctx) - repairTimingsMs.Record([]string{DiagnoseTypeWrongPrimaryTablet, strconv.FormatBool(err == nil)}, start) - if err != nil { - return Fail, err - } - return Success, nil -} - -// fixPrimaryTabletLocked changes Vitess primary tablet based on mysql group -func (shard *GRShard) fixPrimaryTabletLocked(ctx context.Context) error { - host, port, isActive := shard.sqlGroup.GetPrimary() - if !isActive { - return db.ErrGroupInactive - } - // Primary tablet does not run mysql primary, we need to change it accordingly - candidate := shard.findTabletByHostAndPort(host, port) - if candidate == nil { - return errMissingPrimaryTablet - } - // Make sure we still hold the topo server lock before moving on - if err := shard.checkShardLocked(ctx); err != nil { - return err - } - err := shard.tmc.ChangeType(ctx, candidate.tablet, topodatapb.TabletType_PRIMARY, false) - if err != nil { - return fmt.Errorf("failed to change type to primary on %v: %v", candidate.alias, err) - } - shard.logger.Infof("Successfully make %v the primary tablet", candidate.alias) - return nil -} - -// repairUnconnectedReplica usually handle the case when there is a DiagnoseTypeHealthy tablet and -// it is not connected to mysql primary node -func (shard *GRShard) repairUnconnectedReplica(ctx context.Context) (RepairResultCode, error) { - ctx, err := shard.LockShard(ctx, "repairUnconnectedReplica") - if err != nil { - shard.logger.Warningf("repairUnconnectedReplica fails to grab lock for the shard %v: %v", formatKeyspaceShard(shard.KeyspaceShard), err) - return Noop, err - } - defer shard.UnlockShard() - shard.refreshTabletsInShardLocked(ctx) - status, err := shard.diagnoseLocked(ctx) - if err != nil { - shard.logger.Errorf("Failed to diagnose: %v", err) - return Fail, err - } - if status != DiagnoseTypeUnconnectedReplica { - shard.logger.Infof("Shard %v is no longer in DiagnoseTypeUnconnectedReplica: %v", formatKeyspaceShard(shard.KeyspaceShard), status) - return Noop, nil - } - start := time.Now() - err = shard.repairUnconnectedReplicaAction(ctx) - repairTimingsMs.Record([]string{DiagnoseTypeUnconnectedReplica, strconv.FormatBool(err == nil)}, start) - if err != nil { - return Fail, err - } - return Success, nil -} - -func (shard *GRShard) repairUnconnectedReplicaAction(ctx context.Context) error { - primaryInstance := shard.findShardPrimaryTablet() - target, err := shard.disconnectedInstance() - if err != nil { - return err - } - if target == nil { - shard.logger.Infof("there is no instance without group for %v", formatKeyspaceShard(shard.KeyspaceShard)) - return nil - } - shard.logger.Infof("Connecting replica %v to %v", target.instanceKey.Hostname, primaryInstance.instanceKey.Hostname) - status := shard.sqlGroup.GetStatus(target.instanceKey) - // Make sure we still hold the topo server lock before moving on - if err := shard.checkShardLocked(ctx); err != nil { - return err - } - if status != nil && status.State != db.OFFLINE { - shard.logger.Infof("stop group replication on %v (%v) before join the group", target.alias, status.State) - err := shard.dbAgent.StopGroupLocked(target.instanceKey) - if err != nil { - shard.logger.Errorf("Failed to stop group replication on %v: %v", target.instanceKey.Hostname, err) - return err - } - // Make sure we still hold the topo server lock before moving on - if err := shard.checkShardLocked(ctx); err != nil { - return err - } - } - return shard.dbAgent.JoinGroupLocked(target.instanceKey, primaryInstance.instanceKey) -} - -func (shard *GRShard) repairUnreachablePrimary(ctx context.Context) (RepairResultCode, error) { - ctx, err := shard.LockShard(ctx, "repairUnreachablePrimary") - if err != nil { - shard.logger.Warningf("repairUnreachablePrimary fails to grab lock for the shard %v: %v", formatKeyspaceShard(shard.KeyspaceShard), err) - return Noop, err - } - defer shard.UnlockShard() - shard.refreshTabletsInShardLocked(ctx) - status, err := shard.diagnoseLocked(ctx) - if err != nil { - shard.logger.Errorf("Failed to diagnose: %v", err) - return Fail, err - } - if status != DiagnoseTypeUnreachablePrimary { - shard.logger.Infof("Shard %v is no longer in DiagnoseTypeUnreachablePrimary: %v", formatKeyspaceShard(shard.KeyspaceShard), status) - return Noop, nil - } - // We are here because either: - // 1. we have a primary tablet, but it's not reachable - // 2. we cannot find primary tablet but we do have a mysql group - // we need to failover mysql manually - // - // other case will be handled by different testGroupInput, e.g., - // has reachable primary tablet, but run on different node than mysql -> DiagnoseTypeWrongPrimaryTablet - start := time.Now() - err = shard.failoverLocked(ctx) - repairTimingsMs.Record([]string{DiagnoseTypeUnreachablePrimary, strconv.FormatBool(err == nil)}, start) - if err != nil { - return Fail, err - } - return Success, nil -} - -func (shard *GRShard) repairInsufficientGroupSize(ctx context.Context) (RepairResultCode, error) { - ctx, err := shard.LockShard(ctx, "repairInsufficientGroupSize") - if err != nil { - shard.logger.Warningf("repairInsufficientGroupSize fails to grab lock for the shard %v: %v", formatKeyspaceShard(shard.KeyspaceShard), err) - return Noop, err - } - defer shard.UnlockShard() - shard.refreshTabletsInShardLocked(ctx) - status, err := shard.diagnoseLocked(ctx) - if err != nil { - shard.logger.Errorf("Failed to diagnose: %v", err) - return Fail, err - } - if status != DiagnoseTypeInsufficientGroupSize { - shard.logger.Infof("Shard %v is no longer in DiagnoseTypeInsufficientGroupSize: %v", formatKeyspaceShard(shard.KeyspaceShard), status) - return Noop, nil - } - // We check primary tablet is consistent with sql primary before InsufficientGroupSize - // therefore primary we found here is correct and healthy - primary := shard.findShardPrimaryTablet() - // Make sure we still hold the topo server lock before moving on - if err := shard.checkShardLocked(ctx); err != nil { - return Fail, err - } - // mysql group will set super_read_only properly automatically - // https://mysqlhighavailability.com/protecting-your-data-fail-safe-enhancements-to-group-replication/ - // since Vitess only knows one writable node (primary tablet) if we want to make sure there is no write - // after there is insufficient members, we can just set primary mysql node to be read only - err = shard.dbAgent.SetReadOnly(primary.instanceKey, true) - if err != nil { - return Fail, err - } - return Success, nil -} - -func (shard *GRShard) repairReadOnlyShard(ctx context.Context) (RepairResultCode, error) { - ctx, err := shard.LockShard(ctx, "repairReadOnlyShard") - if err != nil { - shard.logger.Warningf("repairReadOnlyShard fails to grab lock for the shard %v: %v", formatKeyspaceShard(shard.KeyspaceShard), err) - return Noop, err - } - defer shard.UnlockShard() - shard.refreshTabletsInShardLocked(ctx) - status, err := shard.diagnoseLocked(ctx) - if err != nil { - shard.logger.Errorf("Failed to diagnose: %v", err) - return Fail, err - } - if status != DiagnoseTypeReadOnlyShard { - shard.logger.Infof("Shard %v is no longer in DiagnoseTypeReadOnlyShard: %v", formatKeyspaceShard(shard.KeyspaceShard), status) - return Noop, nil - } - primary := shard.findShardPrimaryTablet() - // Make sure we still hold the topo server lock before moving on - if err := shard.checkShardLocked(ctx); err != nil { - return Fail, err - } - // undo what we did repairInsufficientGroupSize - err = shard.dbAgent.SetReadOnly(primary.instanceKey, false) - if err != nil { - return Fail, err - } - return Success, nil -} - -// Failover takes a shard and find an node with largest GTID as the mysql primary of the group -func (shard *GRShard) Failover(ctx context.Context) error { - ctx, err := shard.LockShard(ctx, "Failover") - if err != nil { - shard.logger.Warningf("Failover fails to grab lock for the shard %v: %v", formatKeyspaceShard(shard.KeyspaceShard), err) - return err - } - defer shard.UnlockShard() - shard.refreshTabletsInShardLocked(ctx) - return shard.failoverLocked(ctx) -} - -func (shard *GRShard) failoverLocked(ctx context.Context) error { - candidate, err := shard.findFailoverCandidate(ctx) - if err != nil { - shard.logger.Errorf("Failed to find failover candidate: %v", err) - return err - } - // Make sure we still hold the topo server lock before moving on - if err := shard.checkShardLocked(ctx); err != nil { - return err - } - err = shard.dbAgent.Failover(candidate.instanceKey) - if err != nil { - shard.logger.Errorf("Failed to failover mysql to %v", candidate.alias) - return err - } - shard.logger.Infof("Successfully failover MySQL to %v for %v", candidate.instanceKey.Hostname, formatKeyspaceShard(shard.KeyspaceShard)) - if !shard.isActive.Load() { - shard.logger.Infof("Skip vttablet failover on an inactive shard %v", formatKeyspaceShard(shard.KeyspaceShard)) - return nil - } - // Make sure we still hold the topo server lock before moving on - if err := shard.checkShardLocked(ctx); err != nil { - return err - } - err = shard.tmc.ChangeType(ctx, candidate.tablet, topodatapb.TabletType_PRIMARY, false) - if err != nil { - shard.logger.Errorf("Failed to failover Vitess %v", candidate.alias) - return err - } - shard.logger.Infof("Successfully failover Vitess to %v for %v", candidate.alias, formatKeyspaceShard(shard.KeyspaceShard)) - return nil -} - -func (shard *GRShard) findFailoverCandidateFromRecorder(ctx context.Context, recorder *groupGTIDRecorder, check func(context.Context, *grInstance) bool) (*grInstance, error) { - if len(recorder.gtidWithInstances) == 0 { - return nil, fmt.Errorf("empty failover candidate list for %v", formatKeyspaceShard(shard.KeyspaceShard)) - } - // Sort the gtidWithInstances slice so that we have consistent candidate - // in case they have same gtid set - recorder.sort() - for _, gtidInst := range recorder.gtidWithInstances { - shard.logger.Infof("[failover candidates] %s gtid %s", gtidInst.instance.alias, gtidInst.gtids.String()) - } - var largestGTIDs mysql.GTIDSet - var candidate *grInstance - var divergentCandidates []string - // All the instances in the recorder have a reachable mysqld - // hence anyone is a valid failover candidate - for _, elem := range recorder.gtidWithInstances { - gtids := elem.gtids - inst := elem.instance - if check != nil && !check(ctx, inst) { - shard.logger.Warningf("Skip %v as candidate with gtid %v because it failed the check", inst.alias, gtids.String()) - continue - } - if largestGTIDs == nil { - largestGTIDs = gtids - candidate = inst - continue - } - // If largestGTIDs is subset of current gtids, it means instance has larger GTID than candidate - // we need to swap them out - isSubset, isSuperset := compareGTIDSet(largestGTIDs, gtids) - if isSubset { - largestGTIDs = gtids - candidate = inst - continue - } - // largestGTIDs is neither subset nor super set of gtids - // we log and append to candidates so that we know there is a problem in the group - // after the iteration - if !isSuperset { - shard.logger.Errorf("FetchGroupView divergent GITD set from host=%v GTIDSet=%v", inst.instanceKey.Hostname, gtids) - divergentCandidates = append(divergentCandidates, inst.alias) - } - } - // unless GTID set diverged, the candidates should be empty - if len(divergentCandidates) > 0 { - divergentCandidates = append(divergentCandidates, candidate.alias) - return nil, fmt.Errorf("found more than one failover candidates by GTID set for %v: %v", formatKeyspaceShard(shard.KeyspaceShard), divergentCandidates) - } - return candidate, nil -} - -func compareGTIDSet(set1, set2 mysql.GTIDSet) (bool, bool) { - isSubset := set2.Contains(set1) - // If set1 is subset of set2 we find a GTID super set and just need to record it - if isSubset { - return true, false - } - // If set1 is not a subset of set2 we need to see if set1 is actually a super set of set2 - // this is to controller GTID set divergence - isSubset = set1.Contains(set2) - // We know set1 is not subset of set2 if set2 is also not subset of set1, it means - // there is a divergent in GTID sets - return false, isSubset -} - -func (shard *GRShard) checkShardLocked(ctx context.Context) error { - if err := topo.CheckShardLocked(ctx, shard.KeyspaceShard.Keyspace, shard.KeyspaceShard.Shard); err != nil { - labels := []string{shard.KeyspaceShard.Keyspace, shard.KeyspaceShard.Shard} - unexpectedLockLost.Add(labels, 1) - shard.logger.Errorf("lost topology lock; aborting") - return vterrors.Wrap(err, "lost topology lock; aborting") - } - return nil -} diff --git a/go/vt/vtgr/controller/repair_test.go b/go/vt/vtgr/controller/repair_test.go deleted file mode 100644 index ada1def2cff..00000000000 --- a/go/vt/vtgr/controller/repair_test.go +++ /dev/null @@ -1,1355 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "errors" - "fmt" - "math/rand" - "strconv" - "strings" - "sync" - "testing" - "time" - - "vitess.io/vitess/go/mysql" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/topo/memorytopo" - "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver/testutil" - "vitess.io/vitess/go/vt/vtgr/config" - "vitess.io/vitess/go/vt/vtgr/db" - "vitess.io/vitess/go/vt/vtgr/inst" - - gomock "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" -) - -const repairGroupSize = 3 - -func TestRepairShardHasNoGroup(t *testing.T) { - type data struct { - mysqlhost string - mysqlport int - groupName string - readOnly bool - groupInput []db.TestGroupState - ttype topodatapb.TabletType - } - var testcases = []struct { - name string - expectedCalls int - errorMsg string - inputs []data - }{ - {"shard without group", 1, "", []data{ - {testHost, testPort0, "", true, []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - {testHost, testPort1, "", true, []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - {testHost, testPort2, "", true, []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - }}, - {"healthy shard", 0, "", []data{ - {testHost, testPort0, "group", false, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {testHost, testPort1, "group", true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {testHost, testPort2, "group", true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {"no active member for group", 0, "", []data{ // this should rebootstrap a group by DiagnoseTypeShardHasInactiveGroup - {testHost, testPort0, "group", true, []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - {testHost, testPort1, "", false, []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "ERROR", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {testHost, testPort2, "", true, []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {"raise error for unreachable primary", 0, "", []data{ // shoud be ShardHasInactiveGroup - {testHost, testPort0, "group", true, []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - {testHost, testPort1, "", true, []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "ERROR", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {testHost, testPort2, "", true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "UNREACHABLE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {"raise error without bootstrap with only one reachable node", 0, "vtgr repair: fail to diagnose ShardHasNoGroup with 1 nodes", []data{ - {"", 0, "group", true, []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - {testHost, testPort1, "", true, []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - {"", testPort2, "", true, []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - }}, - {"raise error when there are not enough members", 0, "vtgr repair: fail to diagnose ShardHasNoGroup with 1 nodes", []data{ - {testHost, testPort0, "", true, []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - }}, - } - tablets := make(map[string]*topo.TabletInfo) - for _, tt := range testcases { - t.Run(tt.name, func(t *testing.T) { - ctx := context.Background() - ctrl := gomock.NewController(t) - defer ctrl.Finish() - ts := memorytopo.NewServer("test_cell") - defer ts.Close() - ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{}) - ts.CreateShard(ctx, "ks", "0") - tmc := NewMockGRTmcClient(ctrl) - dbAgent := db.NewMockAgent(ctrl) - inputMap := make(map[int]testGroupInput) - dbAgent. - EXPECT(). - // RepairShardHasNoGroup is fixed by calling BootstrapGroupLocked - BootstrapGroupLocked(gomock.Any()). - DoAndReturn(func(target *inst.InstanceKey) error { - if target.Hostname == "" || target.Port == 0 { - return errors.New("invalid mysql instance key") - } - input := inputMap[target.Port] - groupState := input.groupState - if len(groupState) == 1 && groupState[0].MemberState == "OFFLINE" { - groupState[0].MemberState = "ONLINE" - groupState[0].MemberRole = "PRIMARY" - groupState[0].MemberHost = target.Hostname - groupState[0].MemberPort = strconv.Itoa(target.Port) - input.groupState = groupState - } else { - for i, s := range groupState { - if s.MemberHost == target.Hostname { - s.MemberState = "ONLINE" - s.MemberRole = "PRIMARY" - groupState[i] = s - } - input.groupState = groupState - } - } - inputMap[target.Port] = input - return nil - }). - Times(tt.expectedCalls) - for i, input := range tt.inputs { - tablet := buildTabletInfo(uint32(testPort0+i), input.mysqlhost, testPort0+i, input.ttype, time.Now()) - testutil.AddTablet(ctx, t, ts, tablet.Tablet, nil) - if tablet.Type == topodatapb.TabletType_PRIMARY { - ts.UpdateShardFields(ctx, "ks", "0", func(si *topo.ShardInfo) error { - si.PrimaryAlias = tablet.Alias - return nil - }) - } - tablets[tablet.AliasString()] = tablet - inputMap[input.mysqlport] = testGroupInput{ - input.groupName, - input.readOnly, - 0, - input.groupInput, - nil, - } - dbAgent. - EXPECT(). - FetchGroupView(gomock.Eq(tablet.AliasString()), gomock.Any()). - DoAndReturn(func(alias string, target *inst.InstanceKey) (*db.GroupView, error) { - if target.Hostname == "" || target.Port == 0 { - return nil, errors.New("invalid mysql instance key") - } - s := inputMap[target.Port] - view := db.BuildGroupView(alias, s.groupName, target.Hostname, target.Port, s.readOnly, s.checkResult, s.groupState) - return view, nil - }). - AnyTimes() - } - tmc. - EXPECT(). - Ping(gomock.Any(), gomock.Any()). - Return(nil). - AnyTimes() - cfg := &config.VTGRConfig{BootstrapGroupSize: repairGroupSize, MinNumReplica: 2, BackoffErrorWaitTimeSeconds: 1, BootstrapWaitTimeSeconds: 1} - shard := NewGRShard("ks", "0", nil, tmc, ts, dbAgent, cfg, testPort0, true) - shard.UpdateTabletsInShardWithLock(ctx) - _, err := shard.Repair(ctx, DiagnoseTypeShardHasNoGroup) - if tt.errorMsg == "" { - assert.NoError(t, err) - } else { - assert.EqualError(t, err, tt.errorMsg) - } - }) - } -} - -func TestRepairShardHasInactiveGroup(t *testing.T) { - type data struct { - mysqlhost string - mysqlport int - groupName string - groupInput []db.TestGroupState - pingable bool - gtid mysql.GTIDSet - ttype topodatapb.TabletType - } - sid1 := "3e11fa47-71ca-11e1-9e33-c80aa9429562" - var testcases = []struct { - name string - errorMsg string - expectedCandidatePort int - rebootstrapSize int - inputs []data - }{ - {"shard has inactive group", "", testPort0, 0, []data{ - {testHost, testPort0, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-10"), topodatapb.TabletType_REPLICA}, - {testHost, testPort1, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-9"), topodatapb.TabletType_PRIMARY}, - {testHost, testPort2, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-9"), topodatapb.TabletType_REPLICA}, - }}, - {"shard has inactive group and partial group name", "", testPort0, 0, []data{ - {testHost, testPort0, "", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-10"), topodatapb.TabletType_REPLICA}, - {testHost, testPort1, "", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-9"), topodatapb.TabletType_PRIMARY}, - {testHost, testPort2, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-9"), topodatapb.TabletType_REPLICA}, - }}, - {"unreachable rebootstrap candidate", "vtgr repair: test_cell-0000017000 is unreachable", 0, 0, []data{ - {testHost, testPort0, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, false, getMysql56GTIDSet(sid1, "1-10"), topodatapb.TabletType_REPLICA}, - {testHost, testPort1, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-9"), topodatapb.TabletType_PRIMARY}, - {testHost, testPort2, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-9"), topodatapb.TabletType_REPLICA}, - }}, - {"inactive shard with empty gtid", "", testPort0, 0, []data{ - {testHost, testPort0, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-10"), topodatapb.TabletType_REPLICA}, - {testHost, testPort1, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet("", ""), topodatapb.TabletType_REPLICA}, - {testHost, testPort2, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet("", ""), topodatapb.TabletType_REPLICA}, - }}, - {"shard has more than one group", "vtgr repair: fail to refreshSQLGroup: group has split brain", 0, 0, []data{ // vtgr raises error - {testHost, testPort0, "group1", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-9"), topodatapb.TabletType_REPLICA}, - {testHost, testPort1, "group2", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-10"), topodatapb.TabletType_REPLICA}, - {testHost, testPort2, "group1", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-9"), topodatapb.TabletType_REPLICA}, - }}, - {"shard has inconsistent gtids", "vtgr repair: found more than one failover candidates by GTID set for ks/0", 0, 0, []data{ // vtgr raises error - {testHost, testPort0, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet("264a8230-67d2-11eb-acdd-0a8d91f24125", "1-9"), topodatapb.TabletType_REPLICA}, - {testHost, testPort1, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-10"), topodatapb.TabletType_REPLICA}, - {testHost, testPort2, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-9"), topodatapb.TabletType_REPLICA}, - }}, - {"error on one unreachable mysql", "vtgr repair: fail to diagnose ShardHasInactiveGroup with 2 nodes expecting 3", 0, 0, []data{ - {"", 0, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-11"), topodatapb.TabletType_REPLICA}, - {testHost, testPort1, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-10"), topodatapb.TabletType_REPLICA}, - {testHost, testPort2, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-9"), topodatapb.TabletType_REPLICA}, - }}, - {"error on one unreachable tablet", "vtgr repair: test_cell-0000017000 is unreachable", 0, 0, []data{ - {testHost, testPort0, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, false, getMysql56GTIDSet(sid1, "1-10"), topodatapb.TabletType_REPLICA}, - {testHost, testPort1, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-9"), topodatapb.TabletType_REPLICA}, - {testHost, testPort2, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-9"), topodatapb.TabletType_REPLICA}, - }}, - {"shard has active member", "", 0, 0, []data{ // vtgr sees an active node it should not try to bootstrap - {testHost, testPort0, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-10"), topodatapb.TabletType_REPLICA}, - {testHost, testPort1, "group", []db.TestGroupState{ - {MemberHost: "host_2", MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - }, true, getMysql56GTIDSet(sid1, "1-9"), topodatapb.TabletType_REPLICA}, - {testHost, testPort2, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-9"), topodatapb.TabletType_REPLICA}, - }}, - {"shard has active member but more than one group", "vtgr repair: fail to refreshSQLGroup: group has split brain", 0, 0, []data{ // split brain should overweight active member diagnose - {testHost, testPort0, "group1", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-10"), topodatapb.TabletType_REPLICA}, - {testHost, testPort1, "group1", []db.TestGroupState{ - {MemberHost: "host_2", MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - }, true, getMysql56GTIDSet(sid1, "1-9"), topodatapb.TabletType_REPLICA}, - {testHost, testPort2, "group2", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-9"), topodatapb.TabletType_REPLICA}, - }}, - {"error on two unreachable mysql", "vtgr repair: fail to diagnose ShardHasInactiveGroup with 1 nodes expecting 3", 0, 0, []data{ - {"", 0, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-11"), topodatapb.TabletType_REPLICA}, - {"", 0, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-10"), topodatapb.TabletType_REPLICA}, - {testHost, testPort2, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-9"), topodatapb.TabletType_REPLICA}, - }}, - {"no error on two unreachable mysqls with allowUnhealthyNodeOnReboot", "", testPort2, 1, []data{ - {"", 0, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-11"), topodatapb.TabletType_REPLICA}, - {"", 0, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-10"), topodatapb.TabletType_REPLICA}, - {testHost, testPort2, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-9"), topodatapb.TabletType_REPLICA}, - }}, - {"shard with fewer than configured members can still rebootstrap", "", testPort0, 0, []data{ - {testHost, testPort0, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-10"), topodatapb.TabletType_REPLICA}, - }}, - } - tablets := make(map[string]*topo.TabletInfo) - for _, tt := range testcases { - t.Run(tt.name, func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - ctx := context.Background() - ts := memorytopo.NewServer("test_cell") - defer ts.Close() - ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{}) - ts.CreateShard(ctx, "ks", "0") - tmc := NewMockGRTmcClient(ctrl) - dbAgent := db.NewMockAgent(ctrl) - expectedCalls := 0 - if tt.expectedCandidatePort != 0 { - expectedCalls = 1 - } - inputMap := make(map[int]testGroupInput) - pingable := make(map[string]bool) - var lock sync.Mutex - dbAgent. - EXPECT(). - // RepairShardHasNoGroup is fixed by calling RebootstrapGroupLocked - RebootstrapGroupLocked(&inst.InstanceKey{Hostname: testHost, Port: tt.expectedCandidatePort}, gomock.Any()). - DoAndReturn(func(target *inst.InstanceKey, name string) error { - if target.Hostname == "" || target.Port == 0 { - return errors.New("invalid mysql instance key") - } - input := inputMap[target.Port] - groupState := input.groupState - if len(groupState) == 1 && groupState[0].MemberState == "OFFLINE" { - groupState[0].MemberState = "ONLINE" - groupState[0].MemberRole = "PRIMARY" - groupState[0].MemberHost = target.Hostname - groupState[0].MemberPort = strconv.Itoa(target.Port) - input.groupState = groupState - } else { - for i, s := range groupState { - if s.MemberHost == target.Hostname { - s.MemberState = "ONLINE" - s.MemberRole = "PRIMARY" - groupState[i] = s - } - input.groupState = groupState - } - } - inputMap[target.Port] = input - if name != "group" { - return errors.New("unexpected group name") - } - return nil - }). - Times(expectedCalls) - for i, input := range tt.inputs { - tablet := buildTabletInfo(uint32(testPort0+i), input.mysqlhost, input.mysqlport, input.ttype, time.Now()) - testutil.AddTablet(ctx, t, ts, tablet.Tablet, nil) - if tablet.Type == topodatapb.TabletType_PRIMARY { - ts.UpdateShardFields(ctx, "ks", "0", func(si *topo.ShardInfo) error { - si.PrimaryAlias = tablet.Alias - return nil - }) - } - tablets[tablet.AliasString()] = tablet - inputMap[input.mysqlport] = testGroupInput{ - input.groupName, - false, - 0, - input.groupInput, - input.gtid, - } - pingable[tablet.Alias.String()] = input.pingable - dbAgent. - EXPECT(). - FetchGroupView(gomock.Eq(tablet.AliasString()), gomock.Any()). - DoAndReturn(func(alias string, target *inst.InstanceKey) (*db.GroupView, error) { - if target.Hostname == "" || target.Port == 0 { - return nil, errors.New("invalid mysql instance key") - } - s := inputMap[target.Port] - view := db.BuildGroupView(alias, s.groupName, target.Hostname, target.Port, s.readOnly, s.checkResult, s.groupState) - return view, nil - }). - AnyTimes() - dbAgent. - EXPECT(). - FetchApplierGTIDSet(gomock.Any()). - DoAndReturn(func(target *inst.InstanceKey) (mysql.GTIDSet, error) { - if target.Hostname == "" || target.Port == 0 { - return nil, errors.New("invalid mysql instance key") - } - return inputMap[target.Port].gtid, nil - }). - AnyTimes() - dbAgent. - EXPECT(). - StopGroupLocked(gomock.Any()). - DoAndReturn(func(target *inst.InstanceKey) error { - if target.Hostname == "" || target.Port == 0 { - return errors.New("invalid mysql instance key") - } - lock.Lock() - view := inputMap[target.Port] - view.groupState = []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(target.Port), MemberState: "OFFLINE", MemberRole: ""}, - } - inputMap[target.Port] = view - lock.Unlock() - return nil - }). - AnyTimes() - tmc. - EXPECT(). - Ping(gomock.Any(), gomock.Any()). - DoAndReturn(func(_ context.Context, t *topodatapb.Tablet) error { - if !pingable[t.Alias.String()] { - return errors.New("unreachable") - } - return nil - }). - AnyTimes() - } - cfg := &config.VTGRConfig{BootstrapGroupSize: repairGroupSize, MinNumReplica: 2, BackoffErrorWaitTimeSeconds: 1, BootstrapWaitTimeSeconds: 1} - shard := NewGRShard("ks", "0", nil, tmc, ts, dbAgent, cfg, testPort0, true) - if tt.rebootstrapSize != 0 { - shard.OverrideRebootstrapGroupSize(tt.rebootstrapSize) - } - _, err := shard.Repair(ctx, DiagnoseTypeShardHasInactiveGroup) - if tt.errorMsg == "" { - assert.NoError(t, err) - } else { - assert.Error(t, err, tt.errorMsg) - assert.True(t, strings.Contains(err.Error(), tt.errorMsg), err.Error()) - } - }) - } -} - -func TestRepairWrongPrimaryTablet(t *testing.T) { - type data struct { - mysqlport int - groupName string - groupInput []db.TestGroupState - ttype topodatapb.TabletType - } - - var testcases = []struct { - name string - errorMsg string - expectedCandidatePort int - shardPrimary string - inputs []data - }{ - {"fix no primary tablet in shard", "", testPort0, "", []data{ - {testPort0, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {testPort1, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {testPort2, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {"fix wrong primary tablet", "", testPort0, "test_cell-0000017001", []data{ - {testPort0, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {testPort1, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {testPort2, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {"fix wrong primary tablet based on shard info", "", testPort0, "test_cell-0000017001", []data{ - {testPort0, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {testPort1, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {testPort2, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {"fix shard if there is an unreachable secondary", "", testPort0, "test_cell-0000017001", []data{ - {testPort0, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "UNREACHABLE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {testPort1, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "UNREACHABLE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {testPort2, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "UNREACHABLE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {"diagnose as ShardHasInactiveGroup if quorum number of not online", "", 0, "test_cell-0000017001", []data{ - {testPort0, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "UNREACHABLE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "UNREACHABLE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {testPort1, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "UNREACHABLE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {testPort2, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "UNREACHABLE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {"tolerate failed nodes", "", testPort0, "test_cell-0000017001", []data{ - {testPort0, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "UNREACHABLE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ERROR", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {0, "group", []db.TestGroupState{}, topodatapb.TabletType_PRIMARY}, - {0, "group", []db.TestGroupState{}, topodatapb.TabletType_REPLICA}, - }}, - {"raise error if all nodes failed", "", 0, "", []data{ // diagnose as DiagnoseTypeShardNetworkPartition - {0, "group", []db.TestGroupState{}, topodatapb.TabletType_REPLICA}, - {0, "group", []db.TestGroupState{}, topodatapb.TabletType_PRIMARY}, - {0, "group", []db.TestGroupState{}, topodatapb.TabletType_REPLICA}, - }}, - } - for _, tt := range testcases { - t.Run(tt.name, func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - ctx := context.Background() - ts := memorytopo.NewServer("test_cell") - defer ts.Close() - ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{}) - ts.CreateShard(ctx, "ks", "0") - tmc := NewMockGRTmcClient(ctrl) - dbAgent := db.NewMockAgent(ctrl) - tablets := make(map[string]*topo.TabletInfo) - tmc. - EXPECT(). - Ping(gomock.Any(), gomock.Any()). - Return(nil). - AnyTimes() - expectedCalls := 0 - if tt.expectedCandidatePort != 0 { - expectedCalls = 1 - } - var candidate *topo.TabletInfo - inputMap := make(map[string]testGroupInput) - for i, input := range tt.inputs { - tablet := buildTabletInfo(uint32(testPort0+i), testHost, input.mysqlport, input.ttype, time.Now()) - testutil.AddTablet(ctx, t, ts, tablet.Tablet, nil) - if tablet.AliasString() == tt.shardPrimary { - ts.UpdateShardFields(ctx, "ks", "0", func(si *topo.ShardInfo) error { - si.PrimaryAlias = tablet.Alias - return nil - }) - } - tablets[tablet.AliasString()] = tablet - inputMap[tablet.AliasString()] = testGroupInput{ - input.groupName, - false, - 0, - input.groupInput, - nil, - } - if expectedCalls > 0 && input.mysqlport == tt.expectedCandidatePort { - candidate = tablet - } - dbAgent. - EXPECT(). - FetchGroupView(gomock.Eq(tablet.AliasString()), gomock.Eq(&inst.InstanceKey{Hostname: testHost, Port: input.mysqlport})). - DoAndReturn(func(alias string, target *inst.InstanceKey) (*db.GroupView, error) { - if target.Hostname == "" || target.Port == 0 { - return nil, errors.New("invalid mysql instance key") - } - s := inputMap[alias] - view := db.BuildGroupView(alias, s.groupName, target.Hostname, target.Port, s.readOnly, s.checkResult, s.groupState) - return view, nil - }). - AnyTimes() - } - if candidate != nil { - tmc. - EXPECT(). - ChangeType(gomock.Any(), gomock.Any(), topodatapb.TabletType_PRIMARY). - Return(nil). - Times(expectedCalls) - } - cfg := &config.VTGRConfig{BootstrapGroupSize: repairGroupSize, MinNumReplica: 2, BackoffErrorWaitTimeSeconds: 1, BootstrapWaitTimeSeconds: 1} - shard := NewGRShard("ks", "0", nil, tmc, ts, dbAgent, cfg, testPort0, true) - _, err := shard.Repair(ctx, DiagnoseTypeWrongPrimaryTablet) - if tt.errorMsg == "" { - assert.NoError(t, err) - } else { - assert.Error(t, err) - assert.True(t, strings.Contains(err.Error(), tt.errorMsg), err.Error()) - } - }) - } -} - -func TestRepairUnconnectedReplica(t *testing.T) { - type data struct { - alias string - port int - groupName string - readOnly bool - groupInput []db.TestGroupState - ttype topodatapb.TabletType - } - var testcases = []struct { - name string - errorMsg string - expectedCandidatePort int - inputs []data - }{ - {"fix unconnected replica tablet", "", testPort2, []data{ - {alias0, testPort0, "group", false, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, testPort1, "group", true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, testPort2, "", true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - }}, - {"do nothing if shard has wrong primary tablet", "", 0, []data{ // this should be diagnosed as DiagnoseTypeWrongPrimaryTablet instead - {alias0, testPort0, "group", true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, testPort1, "group", false, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, testPort2, "", true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - }}, - {"fix replica in ERROR state", "", testPort2, []data{ - {alias0, testPort0, "group", false, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, testPort1, "group", true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, testPort2, "group", true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ERROR", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - }}, - {"fix replica with two nodes in ERROR state", "", 0, []data{ // InsufficientGroupSize - {alias0, testPort0, "group", false, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, testPort1, "group", true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ERROR", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, testPort2, "group", true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ERROR", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - } - for _, tt := range testcases { - t.Run(tt.name, func(t *testing.T) { - rand.Seed(1) - ctrl := gomock.NewController(t) - defer ctrl.Finish() - ctx := context.Background() - ts := memorytopo.NewServer("test_cell") - defer ts.Close() - ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{}) - ts.CreateShard(ctx, "ks", "0") - tmc := NewMockGRTmcClient(ctrl) - dbAgent := db.NewMockAgent(ctrl) - tablets := make(map[string]*topo.TabletInfo) - tmc. - EXPECT(). - Ping(gomock.Any(), gomock.Any()). - Return(nil). - AnyTimes() - if tt.expectedCandidatePort != 0 { - dbAgent. - EXPECT(). - StopGroupLocked(gomock.Eq(&inst.InstanceKey{Hostname: testHost, Port: tt.expectedCandidatePort})). - Return(nil). - AnyTimes() - dbAgent. - EXPECT(). - JoinGroupLocked(gomock.Eq(&inst.InstanceKey{Hostname: testHost, Port: tt.expectedCandidatePort}), gomock.Any()). - Return(nil). - Times(1) - } - inputMap := make(map[string]testGroupInput) - for i, input := range tt.inputs { - tablet := buildTabletInfo(uint32(i), testHost, input.port, input.ttype, time.Now()) - testutil.AddTablet(ctx, t, ts, tablet.Tablet, nil) - if tablet.Type == topodatapb.TabletType_PRIMARY { - ts.UpdateShardFields(ctx, "ks", "0", func(si *topo.ShardInfo) error { - si.PrimaryAlias = tablet.Alias - return nil - }) - } - tablets[tablet.AliasString()] = tablet - inputMap[input.alias] = testGroupInput{ - input.groupName, - input.readOnly, - 0, - input.groupInput, - nil, - } - dbAgent. - EXPECT(). - FetchGroupView(gomock.Eq(tablet.AliasString()), gomock.Eq(&inst.InstanceKey{Hostname: testHost, Port: input.port})). - DoAndReturn(func(alias string, target *inst.InstanceKey) (*db.GroupView, error) { - if target.Hostname == "" || target.Port == 0 { - return nil, errors.New("invalid mysql instance key") - } - s := inputMap[alias] - view := db.BuildGroupView(alias, s.groupName, target.Hostname, target.Port, s.readOnly, s.checkResult, s.groupState) - return view, nil - }). - AnyTimes() - } - cfg := &config.VTGRConfig{BootstrapGroupSize: repairGroupSize, MinNumReplica: 2, BackoffErrorWaitTimeSeconds: 1, BootstrapWaitTimeSeconds: 1} - shard := NewGRShard("ks", "0", nil, tmc, ts, dbAgent, cfg, testPort0, true) - _, err := shard.Repair(ctx, DiagnoseTypeUnconnectedReplica) - if tt.errorMsg == "" { - assert.NoError(t, err) - } else { - assert.Error(t, err) - assert.True(t, strings.Contains(err.Error(), tt.errorMsg), err.Error()) - } - }) - } -} - -func TestRepairUnreachablePrimary(t *testing.T) { - type data struct { - port int - pingalbe bool - gtid mysql.GTIDSet - ttype topodatapb.TabletType - } - sid := "3e11fa47-71ca-11e1-9e33-c80aa9429562" - var testcases = []struct { - name string - errorMsg string - expectedCandidatePort int - inputs []data - }{ - {"primary is unreachable", "", testPort1, []data{ - {testPort0, false, getMysql56GTIDSet(sid, "1-11"), topodatapb.TabletType_PRIMARY}, - {testPort1, true, getMysql56GTIDSet(sid, "1-10"), topodatapb.TabletType_REPLICA}, - {testPort2, true, getMysql56GTIDSet(sid, "1-9"), topodatapb.TabletType_REPLICA}, - }}, - {"failover to reachable node when primary is unreachable", "", testPort2, []data{ - {testPort0, false, getMysql56GTIDSet(sid, "1-11"), topodatapb.TabletType_PRIMARY}, - {testPort1, false, getMysql56GTIDSet(sid, "1-10"), topodatapb.TabletType_REPLICA}, - {testPort2, true, getMysql56GTIDSet(sid, "1-9"), topodatapb.TabletType_REPLICA}, - }}, - {"do nothing if replica is unreachable", "", 0, []data{ - {testPort0, true, getMysql56GTIDSet(sid, "1-10"), topodatapb.TabletType_PRIMARY}, - {testPort1, false, getMysql56GTIDSet(sid, "1-10"), topodatapb.TabletType_REPLICA}, - {testPort2, false, getMysql56GTIDSet(sid, "1-9"), topodatapb.TabletType_REPLICA}, - }}, - {"raise error if gtid divergence", "vtgr repair: found more than one failover candidates by GTID set for ks/0", 0, []data{ - {testPort0, false, getMysql56GTIDSet(sid, "1-10"), topodatapb.TabletType_PRIMARY}, - {testPort1, true, getMysql56GTIDSet("264a8230-67d2-11eb-acdd-0a8d91f24125", "1-10"), topodatapb.TabletType_REPLICA}, - {testPort2, true, getMysql56GTIDSet(sid, "1-9"), topodatapb.TabletType_REPLICA}, - }}, - } - for _, tt := range testcases { - t.Run(tt.name, func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - ctx := context.Background() - ts := memorytopo.NewServer("test_cell") - defer ts.Close() - ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{}) - ts.CreateShard(ctx, "ks", "0") - tmc := NewMockGRTmcClient(ctrl) - dbAgent := db.NewMockAgent(ctrl) - dbAgent. - EXPECT(). - FetchGroupView(gomock.Any(), gomock.Any()). - DoAndReturn(func(alias string, target *inst.InstanceKey) (*db.GroupView, error) { - return db.BuildGroupView(alias, "group", target.Hostname, target.Port, false, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }), nil - }). - AnyTimes() - expectedCalls := 0 - if tt.expectedCandidatePort != 0 { - expectedCalls = 1 - } - dbAgent. - EXPECT(). - Failover(&inst.InstanceKey{Hostname: testHost, Port: tt.expectedCandidatePort}). - Return(nil). - Times(expectedCalls) - tmc. - EXPECT(). - ChangeType(gomock.Any(), gomock.Any(), topodatapb.TabletType_PRIMARY). - Return(nil). - Times(expectedCalls) - status := make(map[int32]struct { - pingalbe bool - gtid mysql.GTIDSet - }) - for i, input := range tt.inputs { - tablet := buildTabletInfo(uint32(i), testHost, input.port, input.ttype, time.Now()) - testutil.AddTablet(ctx, t, ts, tablet.Tablet, nil) - if tablet.Type == topodatapb.TabletType_PRIMARY { - ts.UpdateShardFields(ctx, "ks", "0", func(si *topo.ShardInfo) error { - si.PrimaryAlias = tablet.Alias - return nil - }) - } - status[tablet.MysqlPort] = struct { - pingalbe bool - gtid mysql.GTIDSet - }{ - input.pingalbe, - input.gtid, - } - dbAgent. - EXPECT(). - FetchApplierGTIDSet(gomock.Eq(&inst.InstanceKey{Hostname: testHost, Port: input.port})). - DoAndReturn(func(target *inst.InstanceKey) (mysql.GTIDSet, error) { - if target.Hostname == "" || target.Port == 0 { - return nil, errors.New("invalid mysql instance key") - } - return status[int32(target.Port)].gtid, nil - }). - AnyTimes() - tmc. - EXPECT(). - Ping(gomock.Any(), gomock.Any()). - DoAndReturn(func(_ context.Context, t *topodatapb.Tablet) error { - if !status[t.MysqlPort].pingalbe { - return errors.New("unreachable") - } - return nil - }). - AnyTimes() - } - cfg := &config.VTGRConfig{BootstrapGroupSize: repairGroupSize, MinNumReplica: 2, BackoffErrorWaitTimeSeconds: 1, BootstrapWaitTimeSeconds: 1} - shard := NewGRShard("ks", "0", nil, tmc, ts, dbAgent, cfg, testPort0, true) - _, err := shard.Repair(ctx, DiagnoseTypeUnreachablePrimary) - if tt.errorMsg == "" { - assert.NoError(t, err) - } else { - assert.Error(t, err, tt.errorMsg) - assert.True(t, strings.Contains(err.Error(), tt.errorMsg)) - } - }) - } -} - -func TestRepairInsufficientGroupSize(t *testing.T) { - type data struct { - alias string - readOnly bool - groupInput []db.TestGroupState - ttype topodatapb.TabletType - } - var testcases = []struct { - name string - errorMsg string - expectedCandidatePort int - inputs []data - }{ - {"fix insufficient group expectedBootstrapSize", "", testPort0, []data{ - {alias0, false, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - } - for _, tt := range testcases { - t.Run(tt.name, func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - ctx := context.Background() - ts := memorytopo.NewServer("test_cell") - defer ts.Close() - ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{}) - ts.CreateShard(ctx, "ks", "0") - tmc := NewMockGRTmcClient(ctrl) - dbAgent := db.NewMockAgent(ctrl) - tablets := make(map[string]*topo.TabletInfo) - tmc. - EXPECT(). - Ping(gomock.Any(), gomock.Any()). - Return(nil). - AnyTimes() - if tt.expectedCandidatePort != 0 { - dbAgent. - EXPECT(). - SetReadOnly(gomock.Eq(&inst.InstanceKey{Hostname: testHost, Port: tt.expectedCandidatePort}), true). - Return(nil). - Times(1) - } - inputMap := make(map[string]testGroupInput) - for i, input := range tt.inputs { - tablet := buildTabletInfo(uint32(i), testHost, testPort0+i, input.ttype, time.Now()) - testutil.AddTablet(ctx, t, ts, tablet.Tablet, nil) - if tablet.Type == topodatapb.TabletType_PRIMARY { - ts.UpdateShardFields(ctx, "ks", "0", func(si *topo.ShardInfo) error { - si.PrimaryAlias = tablet.Alias - return nil - }) - } - tablets[tablet.AliasString()] = tablet - inputMap[input.alias] = testGroupInput{ - "group", - input.readOnly, - 0, - input.groupInput, - nil, - } - dbAgent. - EXPECT(). - FetchGroupView(gomock.Any(), gomock.Any()). - DoAndReturn(func(alias string, target *inst.InstanceKey) (*db.GroupView, error) { - if target.Hostname == "" || target.Port == 0 { - return nil, errors.New("invalid mysql instance key") - } - s := inputMap[alias] - view := db.BuildGroupView(alias, s.groupName, target.Hostname, target.Port, s.readOnly, s.checkResult, s.groupState) - return view, nil - }). - AnyTimes() - } - cfg := &config.VTGRConfig{BootstrapGroupSize: repairGroupSize, MinNumReplica: 2, BackoffErrorWaitTimeSeconds: 1, BootstrapWaitTimeSeconds: 1} - shard := NewGRShard("ks", "0", nil, tmc, ts, dbAgent, cfg, testPort0, true) - _, err := shard.Repair(ctx, DiagnoseTypeInsufficientGroupSize) - if tt.errorMsg == "" { - assert.NoError(t, err) - } else { - assert.Error(t, err) - assert.True(t, strings.Contains(err.Error(), tt.errorMsg), err.Error()) - } - }) - } -} - -func TestRepairReadOnlyShard(t *testing.T) { - type data struct { - alias string - port int - readOnly bool - groupInput []db.TestGroupState - ttype topodatapb.TabletType - } - var testcases = []struct { - name string - errorMsg string - expectedCandidatePort int - inputs []data - }{ - {"fix readonly shard", "", testPort0, []data{ - {alias0, testPort0, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, testPort1, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, testPort2, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {"do nothing if primary is not read only", "", 0, []data{ - {alias0, testPort0, false, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, testPort1, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, testPort2, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - } - for _, tt := range testcases { - t.Run(tt.name, func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - ctx := context.Background() - ts := memorytopo.NewServer("test_cell") - defer ts.Close() - ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{}) - ts.CreateShard(ctx, "ks", "0") - tmc := NewMockGRTmcClient(ctrl) - dbAgent := db.NewMockAgent(ctrl) - tablets := make(map[string]*topo.TabletInfo) - tmc. - EXPECT(). - Ping(gomock.Any(), gomock.Any()). - Return(nil). - AnyTimes() - if tt.expectedCandidatePort != 0 { - dbAgent. - EXPECT(). - SetReadOnly(gomock.Eq(&inst.InstanceKey{Hostname: testHost, Port: tt.expectedCandidatePort}), false). - Return(nil). - Times(1) - } - inputMap := make(map[string]testGroupInput) - for i, input := range tt.inputs { - tablet := buildTabletInfo(uint32(i), testHost, input.port, input.ttype, time.Now()) - testutil.AddTablet(ctx, t, ts, tablet.Tablet, nil) - if tablet.Type == topodatapb.TabletType_PRIMARY { - ts.UpdateShardFields(ctx, "ks", "0", func(si *topo.ShardInfo) error { - si.PrimaryAlias = tablet.Alias - return nil - }) - } - tablets[tablet.AliasString()] = tablet - inputMap[input.alias] = testGroupInput{ - "group", - input.readOnly, - 0, - input.groupInput, - nil, - } - dbAgent. - EXPECT(). - FetchGroupView(gomock.Eq(tablet.AliasString()), gomock.Any()). - DoAndReturn(func(alias string, target *inst.InstanceKey) (*db.GroupView, error) { - if target.Hostname == "" || target.Port == 0 { - return nil, errors.New("invalid mysql instance key") - } - s := inputMap[alias] - view := db.BuildGroupView(alias, s.groupName, target.Hostname, target.Port, s.readOnly, s.checkResult, s.groupState) - return view, nil - }). - AnyTimes() - } - cfg := &config.VTGRConfig{BootstrapGroupSize: repairGroupSize, MinNumReplica: 2, BackoffErrorWaitTimeSeconds: 1, BootstrapWaitTimeSeconds: 1} - shard := NewGRShard("ks", "0", nil, tmc, ts, dbAgent, cfg, testPort0, true) - _, err := shard.Repair(ctx, DiagnoseTypeReadOnlyShard) - if tt.errorMsg == "" { - assert.NoError(t, err) - } else { - assert.Error(t, err) - assert.True(t, strings.Contains(err.Error(), tt.errorMsg), err.Error()) - } - }) - } -} - -func TestRepairBackoffError(t *testing.T) { - type data struct { - alias string - mysqlhost string - mysqlport int - groupName string - groupInput []db.TestGroupState - pingable bool - gtid mysql.GTIDSet - ttype topodatapb.TabletType - } - sid := "3e11fa47-71ca-11e1-9e33-c80aa9429562" - var testcases = []struct { - name string - errorMsg string - expectedCandidatePort int - diagnose DiagnoseType - inputs []data - }{ - {"shard has network partition", "", testPort0, DiagnoseTypeBackoffError, []data{ - {alias0, testHost, testPort0, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "UNREACHABLE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "UNREACHABLE", MemberRole: "SECONDARY"}, - }, true, getMysql56GTIDSet(sid, "1-10"), topodatapb.TabletType_REPLICA}, - {alias1, testHost, testPort1, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid, "1-9"), topodatapb.TabletType_REPLICA}, - {alias2, testHost, testPort2, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid, "1-9"), topodatapb.TabletType_REPLICA}, - }}, - {"shard bootstrap in progress", "", testPort0, DiagnoseTypeBootstrapBackoff, []data{ - {alias0, testHost, testPort0, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, true, getMysql56GTIDSet(sid, "1-10"), topodatapb.TabletType_REPLICA}, - {alias1, testHost, testPort1, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid, "1-9"), topodatapb.TabletType_REPLICA}, - {alias2, testHost, testPort2, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid, "1-9"), topodatapb.TabletType_REPLICA}, - }}, - } - tablets := make(map[string]*topo.TabletInfo) - for _, tt := range testcases { - t.Run(tt.name, func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - ctx := context.Background() - ts := memorytopo.NewServer("test_cell") - defer ts.Close() - ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{}) - ts.CreateShard(ctx, "ks", "0") - tmc := NewMockGRTmcClient(ctrl) - dbAgent := db.NewMockAgent(ctrl) - expectedCalls := 0 - if tt.expectedCandidatePort != 0 { - expectedCalls = 1 - } - inputMap := make(map[int]testGroupInput) - pingable := make(map[string]bool) - var lock sync.Mutex - dbAgent. - EXPECT(). - RebootstrapGroupLocked(&inst.InstanceKey{Hostname: testHost, Port: tt.expectedCandidatePort}, "group"). - DoAndReturn(func(target *inst.InstanceKey, name string) error { - if target.Hostname == "" || target.Port == 0 { - return errors.New("invalid mysql instance key") - } - input := inputMap[target.Port] - groupState := input.groupState - if len(groupState) == 1 && groupState[0].MemberState == "OFFLINE" { - groupState[0].MemberState = "ONLINE" - groupState[0].MemberRole = "PRIMARY" - groupState[0].MemberHost = target.Hostname - groupState[0].MemberPort = strconv.Itoa(target.Port) - input.groupState = groupState - } else { - for i, s := range groupState { - if s.MemberHost == target.Hostname { - s.MemberState = "ONLINE" - s.MemberRole = "PRIMARY" - groupState[i] = s - } - input.groupState = groupState - } - } - inputMap[target.Port] = input - return nil - }). - Times(expectedCalls) - for i, input := range tt.inputs { - tablet := buildTabletInfo(uint32(i), input.mysqlhost, input.mysqlport, input.ttype, time.Now()) - testutil.AddTablet(ctx, t, ts, tablet.Tablet, nil) - if tablet.Type == topodatapb.TabletType_PRIMARY { - ts.UpdateShardFields(ctx, "ks", "0", func(si *topo.ShardInfo) error { - si.PrimaryAlias = tablet.Alias - return nil - }) - } - tablets[tablet.AliasString()] = tablet - inputMap[input.mysqlport] = testGroupInput{ - input.groupName, - false, - 0, - input.groupInput, - input.gtid, - } - pingable[input.alias] = input.pingable - dbAgent. - EXPECT(). - FetchGroupView(gomock.Eq(tablet.AliasString()), gomock.Any()). - DoAndReturn(func(alias string, target *inst.InstanceKey) (*db.GroupView, error) { - if target.Hostname == "" || target.Port == 0 { - return nil, errors.New("invalid mysql instance key") - } - s := inputMap[target.Port] - view := db.BuildGroupView(alias, s.groupName, target.Hostname, target.Port, s.readOnly, s.checkResult, s.groupState) - return view, nil - }). - AnyTimes() - dbAgent. - EXPECT(). - FetchApplierGTIDSet(gomock.Any()). - DoAndReturn(func(target *inst.InstanceKey) (mysql.GTIDSet, error) { - if target.Hostname == "" || target.Port == 0 { - return nil, errors.New("invalid mysql instance key") - } - return inputMap[target.Port].gtid, nil - }). - AnyTimes() - dbAgent. - EXPECT(). - StopGroupLocked(gomock.Any()). - DoAndReturn(func(target *inst.InstanceKey) error { - lock.Lock() - view := inputMap[target.Port] - view.groupState = []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(target.Port), MemberState: "OFFLINE", MemberRole: ""}, - } - inputMap[target.Port] = view - lock.Unlock() - return nil - }). - AnyTimes() - tmc. - EXPECT(). - Ping(gomock.Any(), gomock.Any()). - DoAndReturn(func(_ context.Context, t *topodatapb.Tablet) error { - if !pingable[input.alias] { - return errors.New("unreachable") - } - return nil - }). - AnyTimes() - } - cfg := &config.VTGRConfig{BootstrapGroupSize: repairGroupSize, MinNumReplica: 2, BackoffErrorWaitTimeSeconds: 1, BootstrapWaitTimeSeconds: 1} - shard := NewGRShard("ks", "0", nil, tmc, ts, dbAgent, cfg, testPort0, true) - shard.lastDiagnoseResult = tt.diagnose - _, err := shard.Repair(ctx, tt.diagnose) - if tt.errorMsg == "" { - assert.NoError(t, err) - } else { - assert.Error(t, err, tt.errorMsg) - assert.True(t, strings.Contains(err.Error(), tt.errorMsg), err.Error()) - } - }) - } -} - -func getMysql56GTIDSet(sid, interval string) mysql.GTIDSet { - input := fmt.Sprintf("%s:%s", sid, interval) - pos, _ := mysql.ParsePosition(mysql.Mysql56FlavorID, input) - return pos.GTIDSet -} diff --git a/go/vt/vtgr/db/db.go b/go/vt/vtgr/db/db.go deleted file mode 100644 index f9a0ab2b478..00000000000 --- a/go/vt/vtgr/db/db.go +++ /dev/null @@ -1,381 +0,0 @@ -/* - Copyright 2014 Outbrain Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -/* - This file has been copied over from VTOrc package -*/ - -package db - -import ( - "database/sql" - "fmt" - "strings" - "sync" - "time" - - "vitess.io/vitess/go/vt/external/golib/sqlutils" - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/vtgr/config" -) - -var ( - EmptyArgs []any - Db DB = (*vtorcDB)(nil) -) - -var mysqlURI string -var dbMutex sync.Mutex - -type DB interface { - QueryOrchestrator(query string, argsArray []any, onRow func(sqlutils.RowMap) error) error -} - -type vtorcDB struct { -} - -var _ DB = (*vtorcDB)(nil) - -func (m *vtorcDB) QueryOrchestrator(query string, argsArray []any, onRow func(sqlutils.RowMap) error) error { - return QueryOrchestrator(query, argsArray, onRow) -} - -type DummySQLResult struct { -} - -func (dummyRes DummySQLResult) LastInsertId() (int64, error) { - return 0, nil -} - -func (dummyRes DummySQLResult) RowsAffected() (int64, error) { - return 1, nil -} - -func getMySQLURI() string { - dbMutex.Lock() - defer dbMutex.Unlock() - if mysqlURI != "" { - return mysqlURI - } - mysqlURI := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?timeout=%ds&readTimeout=%ds&rejectReadOnly=%t&interpolateParams=true", - config.Config.MySQLOrchestratorUser, - config.Config.MySQLOrchestratorPassword, - config.Config.MySQLOrchestratorHost, - config.Config.MySQLOrchestratorPort, - config.Config.MySQLOrchestratorDatabase, - config.Config.MySQLConnectTimeoutSeconds, - config.Config.MySQLOrchestratorReadTimeoutSeconds, - config.Config.MySQLOrchestratorRejectReadOnly, - ) - if config.Config.MySQLOrchestratorUseMutualTLS { - mysqlURI, _ = SetupMySQLOrchestratorTLS(mysqlURI) - } - return mysqlURI -} - -// OpenDiscovery returns a DB instance to access a topology instance. -// It has lower read timeout than OpenTopology and is intended to -// be used with low-latency discovery queries. -func OpenDiscovery(host string, port int) (*sql.DB, error) { - return openTopology(host, port, config.Config.MySQLDiscoveryReadTimeoutSeconds) -} - -// OpenTopology returns a DB instance to access a topology instance. -func OpenTopology(host string, port int) (*sql.DB, error) { - return openTopology(host, port, config.Config.MySQLTopologyReadTimeoutSeconds) -} - -func openTopology(host string, port int, readTimeout int) (db *sql.DB, err error) { - uri := fmt.Sprintf("%s:%s@tcp(%s:%d)/?timeout=%ds&readTimeout=%ds&interpolateParams=true", - config.Config.MySQLTopologyUser, - config.Config.MySQLTopologyPassword, - host, port, - config.Config.MySQLConnectTimeoutSeconds, - readTimeout, - ) - - if config.Config.MySQLTopologyUseMutualTLS || - (config.Config.MySQLTopologyUseMixedTLS && requiresTLS(host, port, uri)) { - if uri, err = SetupMySQLTopologyTLS(uri); err != nil { - return nil, err - } - } - if db, _, err = sqlutils.GetDB(uri); err != nil { - return nil, err - } - if config.Config.MySQLConnectionLifetimeSeconds > 0 { - db.SetConnMaxLifetime(time.Duration(config.Config.MySQLConnectionLifetimeSeconds) * time.Second) - } - db.SetMaxOpenConns(config.MySQLTopologyMaxPoolConnections) - db.SetMaxIdleConns(config.MySQLTopologyMaxPoolConnections) - return db, err -} - -func openOrchestratorMySQLGeneric() (db *sql.DB, fromCache bool, err error) { - uri := fmt.Sprintf("%s:%s@tcp(%s:%d)/?timeout=%ds&readTimeout=%ds&interpolateParams=true", - config.Config.MySQLOrchestratorUser, - config.Config.MySQLOrchestratorPassword, - config.Config.MySQLOrchestratorHost, - config.Config.MySQLOrchestratorPort, - config.Config.MySQLConnectTimeoutSeconds, - config.Config.MySQLOrchestratorReadTimeoutSeconds, - ) - if config.Config.MySQLOrchestratorUseMutualTLS { - uri, _ = SetupMySQLOrchestratorTLS(uri) - } - return sqlutils.GetDB(uri) -} - -func IsSQLite() bool { - return config.Config.IsSQLite() -} - -// OpenTopology returns the DB instance for the orchestrator backed database -func OpenOrchestrator() (db *sql.DB, err error) { - var fromCache bool - if IsSQLite() { - db, fromCache, err = sqlutils.GetSQLiteDB(config.Config.SQLite3DataFile) - if err == nil && !fromCache { - log.Infof("Connected to orchestrator backend: sqlite on %v", config.Config.SQLite3DataFile) - } - if db != nil { - db.SetMaxOpenConns(1) - db.SetMaxIdleConns(1) - } - } else { - if db, fromCache, err := openOrchestratorMySQLGeneric(); err != nil { - log.Errorf(err.Error()) - return db, err - } else if !fromCache { - // first time ever we talk to MySQL - query := fmt.Sprintf("create database if not exists %s", config.Config.MySQLOrchestratorDatabase) - if _, err := db.Exec(query); err != nil { - log.Errorf(err.Error()) - return db, err - } - } - db, fromCache, err = sqlutils.GetDB(getMySQLURI()) - if err == nil && !fromCache { - // do not show the password but do show what we connect to. - safeMySQLURI := fmt.Sprintf("%s:?@tcp(%s:%d)/%s?timeout=%ds", config.Config.MySQLOrchestratorUser, - config.Config.MySQLOrchestratorHost, config.Config.MySQLOrchestratorPort, config.Config.MySQLOrchestratorDatabase, config.Config.MySQLConnectTimeoutSeconds) - log.Infof("Connected to orchestrator backend: %v", safeMySQLURI) - if config.Config.MySQLOrchestratorMaxPoolConnections > 0 { - log.Infof("Orchestrator pool SetMaxOpenConns: %d", config.Config.MySQLOrchestratorMaxPoolConnections) - db.SetMaxOpenConns(config.Config.MySQLOrchestratorMaxPoolConnections) - } - if config.Config.MySQLConnectionLifetimeSeconds > 0 { - db.SetConnMaxLifetime(time.Duration(config.Config.MySQLConnectionLifetimeSeconds) * time.Second) - } - } - } - if err == nil && !fromCache { - if !config.Config.SkipOrchestratorDatabaseUpdate { - initOrchestratorDB(db) - } - // A low value here will trigger reconnects which could - // make the number of backend connections hit the tcp - // limit. That's bad. I could make this setting dynamic - // but then people need to know which value to use. For now - // allow up to 25% of MySQLOrchestratorMaxPoolConnections - // to be idle. That should provide a good number which - // does not keep the maximum number of connections open but - // at the same time does not trigger disconnections and - // reconnections too frequently. - maxIdleConns := int(config.Config.MySQLOrchestratorMaxPoolConnections * 25 / 100) - if maxIdleConns < 10 { - maxIdleConns = 10 - } - log.Infof("Connecting to backend %s:%d: maxConnections: %d, maxIdleConns: %d", - config.Config.MySQLOrchestratorHost, - config.Config.MySQLOrchestratorPort, - config.Config.MySQLOrchestratorMaxPoolConnections, - maxIdleConns) - db.SetMaxIdleConns(maxIdleConns) - } - return db, err -} - -func translateStatement(statement string) (string, error) { - if IsSQLite() { - statement = sqlutils.ToSqlite3Dialect(statement) - } - return statement, nil -} - -// versionIsDeployed checks if given version has already been deployed -func versionIsDeployed(db *sql.DB) (result bool, err error) { - query := ` - select - count(*) as is_deployed - from - orchestrator_db_deployments - where - deployed_version = ? - ` - err = db.QueryRow(query, config.RuntimeCLIFlags.ConfiguredVersion).Scan(&result) - // err means the table 'orchestrator_db_deployments' does not even exist, in which case we proceed - // to deploy. - // If there's another error to this, like DB gone bad, then we're about to find out anyway. - return result, err -} - -// registerOrchestratorDeployment updates the orchestrator_metadata table upon successful deployment -func registerOrchestratorDeployment(db *sql.DB) error { - query := ` - replace into orchestrator_db_deployments ( - deployed_version, deployed_timestamp - ) values ( - ?, NOW() - ) - ` - if _, err := execInternal(db, query, config.RuntimeCLIFlags.ConfiguredVersion); err != nil { - log.Fatalf("Unable to write to orchestrator_metadata: %+v", err) - } - log.Infof("Migrated database schema to version [%+v]", config.RuntimeCLIFlags.ConfiguredVersion) - return nil -} - -// deployStatements will issue given sql queries that are not already known to be deployed. -// This iterates both lists (to-run and already-deployed) and also verifies no contraditions. -func deployStatements(db *sql.DB, queries []string) error { - tx, err := db.Begin() - if err != nil { - log.Fatal(err.Error()) - } - // Ugly workaround ahead. - // Origin of this workaround is the existence of some "timestamp NOT NULL," column definitions, - // where in NO_ZERO_IN_DATE,NO_ZERO_DATE sql_mode are invalid (since default is implicitly "0") - // This means installation of orchestrator fails on such configured servers, and in particular on 5.7 - // where this setting is the dfault. - // For purpose of backwards compatability, what we do is force sql_mode to be more relaxed, create the schemas - // along with the "invalid" definition, and then go ahead and fix those definitions via following ALTER statements. - // My bad. - originalSQLMode := "" - if config.Config.IsMySQL() { - _ = tx.QueryRow(`select @@session.sql_mode`).Scan(&originalSQLMode) - if _, err := tx.Exec(`set @@session.sql_mode=REPLACE(@@session.sql_mode, 'NO_ZERO_DATE', '')`); err != nil { - log.Fatal(err.Error()) - } - if _, err := tx.Exec(`set @@session.sql_mode=REPLACE(@@session.sql_mode, 'NO_ZERO_IN_DATE', '')`); err != nil { - log.Fatal(err.Error()) - } - } - for _, query := range queries { - query, err := translateStatement(query) - if err != nil { - log.Fatalf("Cannot initiate orchestrator: %+v; query=%+v", err, query) - return err - } - if _, err := tx.Exec(query); err != nil { - if strings.Contains(err.Error(), "syntax error") { - log.Fatalf("Cannot initiate orchestrator: %+v; query=%+v", err, query) - return err - } - if !sqlutils.IsAlterTable(query) && !sqlutils.IsCreateIndex(query) && !sqlutils.IsDropIndex(query) { - log.Fatalf("Cannot initiate orchestrator: %+v; query=%+v", err, query) - return err - } - if !strings.Contains(err.Error(), "duplicate column name") && - !strings.Contains(err.Error(), "Duplicate column name") && - !strings.Contains(err.Error(), "check that column/key exists") && - !strings.Contains(err.Error(), "already exists") && - !strings.Contains(err.Error(), "Duplicate key name") { - log.Errorf("Error initiating orchestrator: %+v; query=%+v", err, query) - } - } - } - if config.Config.IsMySQL() { - if _, err := tx.Exec(`set session sql_mode=?`, originalSQLMode); err != nil { - log.Fatal(err.Error()) - } - } - if err := tx.Commit(); err != nil { - log.Fatal(err.Error()) - } - return nil -} - -// initOrchestratorDB attempts to create/upgrade the orchestrator backend database. It is created once in the -// application's lifetime. -func initOrchestratorDB(db *sql.DB) error { - log.Info("Initializing orchestrator") - - versionAlreadyDeployed, err := versionIsDeployed(db) - if versionAlreadyDeployed && config.RuntimeCLIFlags.ConfiguredVersion != "" && err == nil { - // Already deployed with this version - return nil - } - if config.Config.PanicIfDifferentDatabaseDeploy && config.RuntimeCLIFlags.ConfiguredVersion != "" && !versionAlreadyDeployed { - log.Fatalf("PanicIfDifferentDatabaseDeploy is set. Configured version %s is not the version found in the database", config.RuntimeCLIFlags.ConfiguredVersion) - } - log.Info("Migrating database schema") - deployStatements(db, generateSQLBase) - deployStatements(db, generateSQLPatches) - registerOrchestratorDeployment(db) - - if IsSQLite() { - ExecOrchestrator(`PRAGMA journal_mode = WAL`) - ExecOrchestrator(`PRAGMA synchronous = NORMAL`) - } - - return nil -} - -// execInternal -func execInternal(db *sql.DB, query string, args ...any) (sql.Result, error) { - var err error - query, err = translateStatement(query) - if err != nil { - return nil, err - } - res, err := sqlutils.ExecNoPrepare(db, query, args...) - return res, err -} - -// ExecOrchestrator will execute given query on the orchestrator backend database. -func ExecOrchestrator(query string, args ...any) (sql.Result, error) { - var err error - query, err = translateStatement(query) - if err != nil { - return nil, err - } - db, err := OpenOrchestrator() - if err != nil { - return nil, err - } - res, err := sqlutils.ExecNoPrepare(db, query, args...) - return res, err -} - -// QueryOrchestrator -func QueryOrchestrator(query string, argsArray []any, onRow func(sqlutils.RowMap) error) error { - query, err := translateStatement(query) - if err != nil { - log.Fatalf("Cannot query orchestrator: %+v; query=%+v", err, query) - return err - } - db, err := OpenOrchestrator() - if err != nil { - return err - } - - if err = sqlutils.QueryRowsMap(db, query, onRow, argsArray...); err != nil { - log.Warning(err.Error()) - } - - return err -} diff --git a/go/vt/vtgr/db/generate_base.go b/go/vt/vtgr/db/generate_base.go deleted file mode 100644 index d1923223e5d..00000000000 --- a/go/vt/vtgr/db/generate_base.go +++ /dev/null @@ -1,862 +0,0 @@ -/* - Copyright 2017 Shlomi Noach, GitHub Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -/* - This file has been copied over from VTOrc package -*/ - -package db - -// generateSQLBase & generateSQLPatches are lists of SQL statements required to build the orchestrator backend -var generateSQLBase = []string{ - ` - CREATE TABLE IF NOT EXISTS database_instance ( - hostname varchar(128) CHARACTER SET ascii NOT NULL, - port smallint(5) unsigned NOT NULL, - last_checked timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - last_seen timestamp NULL DEFAULT NULL, - server_id int(10) unsigned NOT NULL, - version varchar(128) CHARACTER SET ascii NOT NULL, - binlog_format varchar(16) CHARACTER SET ascii NOT NULL, - log_bin tinyint(3) unsigned NOT NULL, - log_replica_updates tinyint(3) unsigned NOT NULL, - binary_log_file varchar(128) CHARACTER SET ascii NOT NULL, - binary_log_pos bigint(20) unsigned NOT NULL, - source_host varchar(128) CHARACTER SET ascii NOT NULL, - source_port smallint(5) unsigned NOT NULL, - replica_sql_running tinyint(3) unsigned NOT NULL, - replica_io_running tinyint(3) unsigned NOT NULL, - source_log_file varchar(128) CHARACTER SET ascii NOT NULL, - read_source_log_pos bigint(20) unsigned NOT NULL, - relay_source_log_file varchar(128) CHARACTER SET ascii NOT NULL, - exec_source_log_pos bigint(20) unsigned NOT NULL, - replication_lag_seconds bigint(20) unsigned DEFAULT NULL, - replica_lag_seconds bigint(20) unsigned DEFAULT NULL, - num_replica_hosts int(10) unsigned NOT NULL, - replica_hosts text CHARACTER SET ascii NOT NULL, - cluster_name varchar(128) CHARACTER SET ascii NOT NULL, - PRIMARY KEY (hostname,port) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX cluster_name_idx ON database_instance - `, - ` - CREATE INDEX cluster_name_idx_database_instance ON database_instance(cluster_name) - `, - ` - DROP INDEX last_checked_idx ON database_instance - `, - ` - CREATE INDEX last_checked_idx_database_instance ON database_instance(last_checked) - `, - ` - DROP INDEX last_seen_idx ON database_instance - `, - ` - CREATE INDEX last_seen_idx_database_instance ON database_instance(last_seen) - `, - ` - CREATE TABLE IF NOT EXISTS database_instance_maintenance ( - database_instance_maintenance_id int(10) unsigned NOT NULL AUTO_INCREMENT, - hostname varchar(128) NOT NULL, - port smallint(5) unsigned NOT NULL, - maintenance_active tinyint(4) DEFAULT NULL, - begin_timestamp timestamp NULL DEFAULT NULL, - end_timestamp timestamp NULL DEFAULT NULL, - owner varchar(128) CHARACTER SET utf8 NOT NULL, - reason text CHARACTER SET utf8 NOT NULL, - PRIMARY KEY (database_instance_maintenance_id) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX maintenance_uidx ON database_instance_maintenance - `, - ` - CREATE UNIQUE INDEX maintenance_uidx_database_instance_maintenance ON database_instance_maintenance (maintenance_active, hostname, port) - `, - ` - CREATE TABLE IF NOT EXISTS database_instance_long_running_queries ( - hostname varchar(128) NOT NULL, - port smallint(5) unsigned NOT NULL, - process_id bigint(20) NOT NULL, - process_started_at timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - process_user varchar(16) CHARACTER SET utf8 NOT NULL, - process_host varchar(128) CHARACTER SET utf8 NOT NULL, - process_db varchar(128) CHARACTER SET utf8 NOT NULL, - process_command varchar(16) CHARACTER SET utf8 NOT NULL, - process_time_seconds int(11) NOT NULL, - process_state varchar(128) CHARACTER SET utf8 NOT NULL, - process_info varchar(1024) CHARACTER SET utf8 NOT NULL, - PRIMARY KEY (hostname,port,process_id) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX process_started_at_idx ON database_instance_long_running_queries - `, - ` - CREATE INDEX process_started_at_idx_database_instance_long_running_queries ON database_instance_long_running_queries (process_started_at) - `, - ` - CREATE TABLE IF NOT EXISTS audit ( - audit_id bigint(20) unsigned NOT NULL AUTO_INCREMENT, - audit_timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - audit_type varchar(128) CHARACTER SET ascii NOT NULL, - hostname varchar(128) CHARACTER SET ascii NOT NULL DEFAULT '', - port smallint(5) unsigned NOT NULL, - message text CHARACTER SET utf8 NOT NULL, - PRIMARY KEY (audit_id) - ) ENGINE=InnoDB DEFAULT CHARSET=latin1 - `, - ` - DROP INDEX audit_timestamp_idx ON audit - `, - ` - CREATE INDEX audit_timestamp_idx_audit ON audit (audit_timestamp) - `, - ` - DROP INDEX host_port_idx ON audit - `, - ` - CREATE INDEX host_port_idx_audit ON audit (hostname, port, audit_timestamp) - `, - ` - CREATE TABLE IF NOT EXISTS host_agent ( - hostname varchar(128) NOT NULL, - port smallint(5) unsigned NOT NULL, - token varchar(128) NOT NULL, - last_submitted timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - last_checked timestamp NULL DEFAULT NULL, - last_seen timestamp NULL DEFAULT NULL, - mysql_port smallint(5) unsigned DEFAULT NULL, - count_mysql_snapshots smallint(5) unsigned NOT NULL, - PRIMARY KEY (hostname) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX token_idx ON host_agent - `, - ` - CREATE INDEX token_idx_host_agent ON host_agent (token) - `, - ` - DROP INDEX last_submitted_idx ON host_agent - `, - ` - CREATE INDEX last_submitted_idx_host_agent ON host_agent (last_submitted) - `, - ` - DROP INDEX last_checked_idx ON host_agent - `, - ` - CREATE INDEX last_checked_idx_host_agent ON host_agent (last_checked) - `, - ` - DROP INDEX last_seen_idx ON host_agent - `, - ` - CREATE INDEX last_seen_idx_host_agent ON host_agent (last_seen) - `, - ` - CREATE TABLE IF NOT EXISTS agent_seed ( - agent_seed_id int(10) unsigned NOT NULL AUTO_INCREMENT, - target_hostname varchar(128) NOT NULL, - source_hostname varchar(128) NOT NULL, - start_timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - end_timestamp timestamp NOT NULL DEFAULT '1971-01-01 00:00:00', - is_complete tinyint(3) unsigned NOT NULL DEFAULT '0', - is_successful tinyint(3) unsigned NOT NULL DEFAULT '0', - PRIMARY KEY (agent_seed_id) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX target_hostname_idx ON agent_seed - `, - ` - CREATE INDEX target_hostname_idx_agent_seed ON agent_seed (target_hostname,is_complete) - `, - ` - DROP INDEX source_hostname_idx ON agent_seed - `, - ` - CREATE INDEX source_hostname_idx_agent_seed ON agent_seed (source_hostname,is_complete) - `, - ` - DROP INDEX start_timestamp_idx ON agent_seed - `, - ` - CREATE INDEX start_timestamp_idx_agent_seed ON agent_seed (start_timestamp) - `, - ` - DROP INDEX is_complete_idx ON agent_seed - `, - ` - CREATE INDEX is_complete_idx_agent_seed ON agent_seed (is_complete,start_timestamp) - `, - ` - DROP INDEX is_successful_idx ON agent_seed - `, - ` - CREATE INDEX is_successful_idx_agent_seed ON agent_seed (is_successful, start_timestamp) - `, - ` - CREATE TABLE IF NOT EXISTS agent_seed_state ( - agent_seed_state_id int(10) unsigned NOT NULL AUTO_INCREMENT, - agent_seed_id int(10) unsigned NOT NULL, - state_timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - state_action varchar(127) NOT NULL, - error_message varchar(255) NOT NULL, - PRIMARY KEY (agent_seed_state_id) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX agent_seed_idx ON agent_seed_state - `, - ` - CREATE INDEX agent_seed_idx_agent_seed_state ON agent_seed_state (agent_seed_id, state_timestamp) - `, - ` - CREATE TABLE IF NOT EXISTS host_attributes ( - hostname varchar(128) NOT NULL, - attribute_name varchar(128) NOT NULL, - attribute_value varchar(128) NOT NULL, - submit_timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - expire_timestamp timestamp NULL DEFAULT NULL, - PRIMARY KEY (hostname,attribute_name) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX attribute_name_idx ON host_attributes - `, - ` - CREATE INDEX attribute_name_idx_host_attributes ON host_attributes (attribute_name) - `, - ` - DROP INDEX attribute_value_idx ON host_attributes - `, - ` - CREATE INDEX attribute_value_idx_host_attributes ON host_attributes (attribute_value) - `, - ` - DROP INDEX submit_timestamp_idx ON host_attributes - `, - ` - CREATE INDEX submit_timestamp_idx_host_attributes ON host_attributes (submit_timestamp) - `, - ` - DROP INDEX expire_timestamp_idx ON host_attributes - `, - ` - CREATE INDEX expire_timestamp_idx_host_attributes ON host_attributes (expire_timestamp) - `, - ` - CREATE TABLE IF NOT EXISTS hostname_resolve ( - hostname varchar(128) NOT NULL, - resolved_hostname varchar(128) NOT NULL, - resolved_timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - PRIMARY KEY (hostname) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX resolved_timestamp_idx ON hostname_resolve - `, - ` - CREATE INDEX resolved_timestamp_idx_hostname_resolve ON hostname_resolve (resolved_timestamp) - `, - ` - CREATE TABLE IF NOT EXISTS active_node ( - anchor tinyint unsigned NOT NULL, - hostname varchar(128) CHARACTER SET ascii NOT NULL, - token varchar(128) NOT NULL, - last_seen_active timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - PRIMARY KEY (anchor) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - INSERT IGNORE INTO active_node (anchor, hostname, token, last_seen_active) - VALUES (1, '', '', NOW()) - `, - ` - CREATE TABLE IF NOT EXISTS node_health ( - hostname varchar(128) CHARACTER SET ascii NOT NULL, - token varchar(128) NOT NULL, - last_seen_active timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - PRIMARY KEY (hostname, token) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP VIEW IF EXISTS _whats_wrong - `, - ` - DROP VIEW IF EXISTS whats_wrong - `, - ` - DROP VIEW IF EXISTS whats_wrong_summary - `, - ` - CREATE TABLE IF NOT EXISTS topology_recovery ( - recovery_id bigint unsigned not null auto_increment, - hostname varchar(128) NOT NULL, - port smallint unsigned NOT NULL, - in_active_period tinyint unsigned NOT NULL DEFAULT 0, - start_active_period timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - end_active_period_unixtime int unsigned, - end_recovery timestamp NULL DEFAULT NULL, - processing_node_hostname varchar(128) CHARACTER SET ascii NOT NULL, - processcing_node_token varchar(128) NOT NULL, - successor_hostname varchar(128) DEFAULT NULL, - successor_port smallint unsigned DEFAULT NULL, - PRIMARY KEY (recovery_id) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX in_active_start_period_idx ON topology_recovery - `, - ` - CREATE INDEX in_active_start_period_idx_topology_recovery ON topology_recovery (in_active_period, start_active_period) - `, - ` - DROP INDEX start_active_period_idx ON topology_recovery - `, - ` - CREATE INDEX start_active_period_idx_topology_recovery ON topology_recovery (start_active_period) - `, - ` - DROP INDEX hostname_port_active_period_uidx ON topology_recovery - `, - ` - CREATE UNIQUE INDEX hostname_port_active_period_uidx_topology_recovery ON topology_recovery (hostname, port, in_active_period, end_active_period_unixtime) - `, - ` - CREATE TABLE IF NOT EXISTS hostname_unresolve ( - hostname varchar(128) NOT NULL, - unresolved_hostname varchar(128) NOT NULL, - PRIMARY KEY (hostname) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX unresolved_hostname_idx ON hostname_unresolve - `, - ` - CREATE INDEX unresolved_hostname_idx_hostname_unresolve ON hostname_unresolve (unresolved_hostname) - `, - ` - CREATE TABLE IF NOT EXISTS database_instance_pool ( - hostname varchar(128) CHARACTER SET ascii NOT NULL, - port smallint(5) unsigned NOT NULL, - pool varchar(128) NOT NULL, - PRIMARY KEY (hostname, port, pool) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX pool_idx ON database_instance_pool - `, - ` - CREATE INDEX pool_idx_database_instance_pool ON database_instance_pool (pool) - `, - ` - CREATE TABLE IF NOT EXISTS database_instance_topology_history ( - snapshot_unix_timestamp INT UNSIGNED NOT NULL, - hostname varchar(128) CHARACTER SET ascii NOT NULL, - port smallint(5) unsigned NOT NULL, - source_host varchar(128) CHARACTER SET ascii NOT NULL, - source_port smallint(5) unsigned NOT NULL, - cluster_name tinytext CHARACTER SET ascii NOT NULL, - PRIMARY KEY (snapshot_unix_timestamp, hostname, port) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX cluster_name_idx ON database_instance_topology_history - `, - ` - CREATE INDEX cluster_name_idx_database_instance_topology_history ON database_instance_topology_history (snapshot_unix_timestamp, cluster_name(128)) - `, - ` - CREATE TABLE IF NOT EXISTS candidate_database_instance ( - hostname varchar(128) CHARACTER SET ascii NOT NULL, - port smallint(5) unsigned NOT NULL, - last_suggested TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, - PRIMARY KEY (hostname, port) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX last_suggested_idx ON candidate_database_instance - `, - ` - CREATE INDEX last_suggested_idx_candidate_database_instance ON candidate_database_instance (last_suggested) - `, - ` - CREATE TABLE IF NOT EXISTS database_instance_downtime ( - hostname varchar(128) NOT NULL, - port smallint(5) unsigned NOT NULL, - downtime_active tinyint(4) DEFAULT NULL, - begin_timestamp timestamp DEFAULT CURRENT_TIMESTAMP, - end_timestamp timestamp NULL DEFAULT NULL, - owner varchar(128) CHARACTER SET utf8 NOT NULL, - reason text CHARACTER SET utf8 NOT NULL, - PRIMARY KEY (hostname, port) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - CREATE TABLE IF NOT EXISTS topology_failure_detection ( - detection_id bigint(20) unsigned NOT NULL AUTO_INCREMENT, - hostname varchar(128) NOT NULL, - port smallint unsigned NOT NULL, - in_active_period tinyint unsigned NOT NULL DEFAULT '0', - start_active_period timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - end_active_period_unixtime int unsigned NOT NULL, - processing_node_hostname varchar(128) NOT NULL, - processcing_node_token varchar(128) NOT NULL, - analysis varchar(128) NOT NULL, - cluster_name varchar(128) NOT NULL, - count_affected_replicas int unsigned NOT NULL, - replica_hosts text NOT NULL, - PRIMARY KEY (detection_id) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX hostname_port_active_period_uidx ON topology_failure_detection - `, - ` - DROP INDEX in_active_start_period_idx ON topology_failure_detection - `, - ` - CREATE INDEX in_active_start_period_idx_topology_failure_detection ON topology_failure_detection (in_active_period, start_active_period) - `, - ` - CREATE TABLE IF NOT EXISTS hostname_resolve_history ( - resolved_hostname varchar(128) NOT NULL, - hostname varchar(128) NOT NULL, - resolved_timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - PRIMARY KEY (resolved_hostname) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX hostname ON hostname_resolve_history - `, - ` - CREATE INDEX hostname_idx_hostname_resolve_history ON hostname_resolve_history (hostname) - `, - ` - DROP INDEX resolved_timestamp_idx ON hostname_resolve_history - `, - ` - CREATE INDEX resolved_timestamp_idx_hostname_resolve_history ON hostname_resolve_history (resolved_timestamp) - `, - ` - CREATE TABLE IF NOT EXISTS hostname_unresolve_history ( - unresolved_hostname varchar(128) NOT NULL, - hostname varchar(128) NOT NULL, - last_registered TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, - PRIMARY KEY (unresolved_hostname) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX hostname ON hostname_unresolve_history - `, - ` - CREATE INDEX hostname_idx_hostname_unresolve_history ON hostname_unresolve_history (hostname) - `, - ` - DROP INDEX last_registered_idx ON hostname_unresolve_history - `, - ` - CREATE INDEX last_registered_idx_hostname_unresolve_history ON hostname_unresolve_history (last_registered) - `, - ` - CREATE TABLE IF NOT EXISTS cluster_domain_name ( - cluster_name varchar(128) CHARACTER SET ascii NOT NULL, - domain_name varchar(128) NOT NULL, - PRIMARY KEY (cluster_name) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX domain_name_idx ON cluster_domain_name - `, - ` - CREATE INDEX domain_name_idx_cluster_domain_name ON cluster_domain_name (domain_name(32)) - `, - ` - CREATE TABLE IF NOT EXISTS primary_position_equivalence ( - equivalence_id bigint unsigned not null auto_increment, - primary1_hostname varchar(128) CHARACTER SET ascii NOT NULL, - primary1_port smallint(5) unsigned NOT NULL, - primary1_binary_log_file varchar(128) CHARACTER SET ascii NOT NULL, - primary1_binary_log_pos bigint(20) unsigned NOT NULL, - primary2_hostname varchar(128) CHARACTER SET ascii NOT NULL, - primary2_port smallint(5) unsigned NOT NULL, - primary2_binary_log_file varchar(128) CHARACTER SET ascii NOT NULL, - primary2_binary_log_pos bigint(20) unsigned NOT NULL, - last_suggested TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, - PRIMARY KEY (equivalence_id) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX equivalence_uidx ON primary_position_equivalence - `, - ` - CREATE UNIQUE INDEX equivalence_uidx_primary_position_equivalence ON primary_position_equivalence (primary1_hostname, primary1_port, primary1_binary_log_file, primary1_binary_log_pos, primary2_hostname, primary2_port) - `, - ` - DROP INDEX primary2_idx ON primary_position_equivalence - `, - ` - CREATE INDEX primary2_idx_primary_position_equivalence ON primary_position_equivalence (primary2_hostname, primary2_port, primary2_binary_log_file, primary2_binary_log_pos) - `, - ` - DROP INDEX last_suggested_idx ON primary_position_equivalence - `, - ` - CREATE INDEX last_suggested_idx_primary_position_equivalence ON primary_position_equivalence (last_suggested) - `, - ` - CREATE TABLE IF NOT EXISTS async_request ( - request_id bigint unsigned NOT NULL AUTO_INCREMENT, - command varchar(128) charset ascii not null, - hostname varchar(128) NOT NULL, - port smallint(5) unsigned NOT NULL, - destination_hostname varchar(128) NOT NULL, - destination_port smallint(5) unsigned NOT NULL, - pattern text CHARACTER SET utf8 NOT NULL, - gtid_hint varchar(32) charset ascii not null, - begin_timestamp timestamp NULL DEFAULT NULL, - end_timestamp timestamp NULL DEFAULT NULL, - story text CHARACTER SET utf8 NOT NULL, - PRIMARY KEY (request_id) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX begin_timestamp_idx ON async_request - `, - ` - CREATE INDEX begin_timestamp_idx_async_request ON async_request (begin_timestamp) - `, - ` - DROP INDEX end_timestamp_idx ON async_request - `, - ` - CREATE INDEX end_timestamp_idx_async_request ON async_request (end_timestamp) - `, - ` - CREATE TABLE IF NOT EXISTS blocked_topology_recovery ( - hostname varchar(128) NOT NULL, - port smallint(5) unsigned NOT NULL, - cluster_name varchar(128) NOT NULL, - analysis varchar(128) NOT NULL, - last_blocked_timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - blocking_recovery_id bigint unsigned, - PRIMARY KEY (hostname, port) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX cluster_blocked_idx ON blocked_topology_recovery - `, - ` - CREATE INDEX cluster_blocked_idx_blocked_topology_recovery ON blocked_topology_recovery (cluster_name, last_blocked_timestamp) - `, - ` - CREATE TABLE IF NOT EXISTS database_instance_last_analysis ( - hostname varchar(128) NOT NULL, - port smallint(5) unsigned NOT NULL, - analysis_timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - analysis varchar(128) NOT NULL, - PRIMARY KEY (hostname, port) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX analysis_timestamp_idx ON database_instance_last_analysis - `, - ` - CREATE INDEX analysis_timestamp_idx_database_instance_last_analysis ON database_instance_last_analysis (analysis_timestamp) - `, - ` - CREATE TABLE IF NOT EXISTS database_instance_analysis_changelog ( - changelog_id bigint unsigned not null auto_increment, - hostname varchar(128) NOT NULL, - port smallint(5) unsigned NOT NULL, - analysis_timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - analysis varchar(128) NOT NULL, - PRIMARY KEY (changelog_id) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX analysis_timestamp_idx ON database_instance_analysis_changelog - `, - ` - CREATE INDEX analysis_timestamp_idx_database_instance_analysis_changelog ON database_instance_analysis_changelog (analysis_timestamp) - `, - ` - CREATE TABLE IF NOT EXISTS node_health_history ( - history_id bigint unsigned not null auto_increment, - hostname varchar(128) CHARACTER SET ascii NOT NULL, - token varchar(128) NOT NULL, - first_seen_active timestamp NOT NULL, - extra_info varchar(128) CHARACTER SET utf8 NOT NULL, - PRIMARY KEY (history_id) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX first_seen_active_idx ON node_health_history - `, - ` - CREATE INDEX first_seen_active_idx_node_health_history ON node_health_history (first_seen_active) - `, - ` - DROP INDEX hostname_token_idx ON node_health_history - `, - ` - CREATE UNIQUE INDEX hostname_token_idx_node_health_history ON node_health_history (hostname, token) - `, - ` - CREATE TABLE IF NOT EXISTS database_instance_coordinates_history ( - history_id bigint unsigned not null auto_increment, - hostname varchar(128) NOT NULL, - port smallint(5) unsigned NOT NULL, - recorded_timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - binary_log_file varchar(128) NOT NULL, - binary_log_pos bigint(20) unsigned NOT NULL, - relay_log_file varchar(128) NOT NULL, - relay_log_pos bigint(20) unsigned NOT NULL, - PRIMARY KEY (history_id) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX hostname_port_recorded_timestmp_idx ON database_instance_coordinates_history - `, - ` - CREATE INDEX hostname_port_recorded_idx_database_instance_coordinates_history ON database_instance_coordinates_history (hostname, port, recorded_timestamp) - `, - ` - DROP INDEX recorded_timestmp_idx ON database_instance_coordinates_history - `, - ` - CREATE INDEX recorded_timestmp_idx_database_instance_coordinates_history ON database_instance_coordinates_history (recorded_timestamp) - `, - ` - CREATE TABLE IF NOT EXISTS database_instance_binlog_files_history ( - history_id bigint unsigned not null auto_increment, - hostname varchar(128) NOT NULL, - port smallint(5) unsigned NOT NULL, - binary_log_file varchar(128) NOT NULL, - binary_log_pos bigint(20) unsigned NOT NULL, - first_seen timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - last_seen timestamp NOT NULL DEFAULT '1971-01-01 00:00:00', - PRIMARY KEY (history_id) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX hostname_port_file_idx ON database_instance_binlog_files_history - `, - ` - CREATE UNIQUE INDEX hostname_port_file_idx_database_instance_binlog_files_history ON database_instance_binlog_files_history (hostname, port, binary_log_file) - `, - ` - DROP INDEX last_seen_idx ON database_instance_binlog_files_history - `, - ` - CREATE INDEX last_seen_idx_database_instance_binlog_files_history ON database_instance_binlog_files_history (last_seen) - `, - ` - CREATE TABLE IF NOT EXISTS access_token ( - access_token_id bigint unsigned not null auto_increment, - public_token varchar(128) NOT NULL, - secret_token varchar(128) NOT NULL, - generated_at timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - generated_by varchar(128) CHARACTER SET utf8 NOT NULL, - is_acquired tinyint unsigned NOT NULL DEFAULT '0', - PRIMARY KEY (access_token_id) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX public_token_idx ON access_token - `, - ` - CREATE UNIQUE INDEX public_token_uidx_access_token ON access_token (public_token) - `, - ` - DROP INDEX generated_at_idx ON access_token - `, - ` - CREATE INDEX generated_at_idx_access_token ON access_token (generated_at) - `, - ` - CREATE TABLE IF NOT EXISTS database_instance_recent_relaylog_history ( - hostname varchar(128) NOT NULL, - port smallint(5) unsigned NOT NULL, - current_relay_log_file varchar(128) NOT NULL, - current_relay_log_pos bigint(20) unsigned NOT NULL, - current_seen timestamp NOT NULL DEFAULT '1971-01-01 00:00:00', - prev_relay_log_file varchar(128) NOT NULL, - prev_relay_log_pos bigint(20) unsigned NOT NULL, - prev_seen timestamp NOT NULL DEFAULT '1971-01-01 00:00:00', - PRIMARY KEY (hostname, port) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX current_seen_idx ON database_instance_recent_relaylog_history - `, - ` - CREATE INDEX current_seen_idx_database_instance_recent_relaylog_history ON database_instance_recent_relaylog_history (current_seen) - `, - ` - CREATE TABLE IF NOT EXISTS orchestrator_metadata ( - anchor tinyint unsigned NOT NULL, - last_deployed_version varchar(128) CHARACTER SET ascii NOT NULL, - last_deployed_timestamp timestamp NOT NULL, - PRIMARY KEY (anchor) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - CREATE TABLE IF NOT EXISTS orchestrator_db_deployments ( - deployed_version varchar(128) CHARACTER SET ascii NOT NULL, - deployed_timestamp timestamp NOT NULL, - PRIMARY KEY (deployed_version) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - CREATE TABLE IF NOT EXISTS global_recovery_disable ( - disable_recovery tinyint unsigned NOT NULL COMMENT 'Insert 1 to disable recovery globally', - PRIMARY KEY (disable_recovery) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - CREATE TABLE IF NOT EXISTS topology_recovery_steps ( - recovery_step_id bigint unsigned not null auto_increment, - recovery_uid varchar(128) CHARACTER SET ascii NOT NULL, - audit_at timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - message text CHARACTER SET utf8 NOT NULL, - PRIMARY KEY (recovery_step_id) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - CREATE TABLE IF NOT EXISTS raft_store ( - store_id bigint unsigned not null auto_increment, - store_key varbinary(512) not null, - store_value blob not null, - PRIMARY KEY (store_id) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - CREATE INDEX store_key_idx_raft_store ON raft_store (store_key) - `, - ` - CREATE TABLE IF NOT EXISTS raft_log ( - log_index bigint unsigned not null auto_increment, - term bigint not null, - log_type int not null, - data blob not null, - PRIMARY KEY (log_index) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - CREATE TABLE IF NOT EXISTS raft_snapshot ( - snapshot_id bigint unsigned not null auto_increment, - snapshot_name varchar(128) CHARACTER SET utf8 NOT NULL, - snapshot_meta varchar(4096) CHARACTER SET utf8 NOT NULL, - PRIMARY KEY (snapshot_id) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - CREATE UNIQUE INDEX snapshot_name_uidx_raft_snapshot ON raft_snapshot (snapshot_name) - `, - ` - CREATE TABLE IF NOT EXISTS database_instance_peer_analysis ( - peer varchar(128) NOT NULL, - hostname varchar(128) NOT NULL, - port smallint(5) unsigned NOT NULL, - analysis_timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - analysis varchar(128) NOT NULL, - PRIMARY KEY (peer, hostname, port) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - CREATE TABLE IF NOT EXISTS database_instance_tls ( - hostname varchar(128) CHARACTER SET ascii NOT NULL, - port smallint(5) unsigned NOT NULL, - required tinyint unsigned NOT NULL DEFAULT 0, - PRIMARY KEY (hostname,port) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - CREATE TABLE IF NOT EXISTS cluster_injected_pseudo_gtid ( - cluster_name varchar(128) NOT NULL, - time_injected timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - PRIMARY KEY (cluster_name) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - CREATE TABLE IF NOT EXISTS hostname_ips ( - hostname varchar(128) CHARACTER SET ascii NOT NULL, - ipv4 varchar(128) CHARACTER SET ascii NOT NULL, - ipv6 varchar(128) CHARACTER SET ascii NOT NULL, - last_updated timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - PRIMARY KEY (hostname) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - CREATE TABLE IF NOT EXISTS database_instance_tags ( - hostname varchar(128) CHARACTER SET ascii NOT NULL, - port smallint(5) unsigned NOT NULL, - tag_name varchar(128) CHARACTER SET utf8 NOT NULL, - tag_value varchar(128) CHARACTER SET utf8 NOT NULL, - last_updated timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - PRIMARY KEY (hostname, port, tag_name) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - CREATE INDEX tag_name_idx_database_instance_tags ON database_instance_tags (tag_name) - `, - ` - CREATE TABLE IF NOT EXISTS database_instance_stale_binlog_coordinates ( - hostname varchar(128) CHARACTER SET ascii NOT NULL, - port smallint(5) unsigned NOT NULL, - binary_log_file varchar(128) NOT NULL, - binary_log_pos bigint(20) unsigned NOT NULL, - first_seen timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - PRIMARY KEY (hostname, port) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - CREATE INDEX first_seen_idx_database_instance_stale_binlog_coordinates ON database_instance_stale_binlog_coordinates (first_seen) - `, - ` - CREATE TABLE IF NOT EXISTS vitess_tablet ( - hostname varchar(128) CHARACTER SET ascii NOT NULL, - port smallint(5) unsigned NOT NULL, - keyspace varchar(128) CHARACTER SET ascii NOT NULL, - shard varchar(128) CHARACTER SET ascii NOT NULL, - cell varchar(128) CHARACTER SET ascii NOT NULL, - tablet_type smallint(5) NOT NULL, - primary_timestamp timestamp NOT NULL, - info varchar(512) CHARACTER SET ascii NOT NULL, - PRIMARY KEY (hostname, port) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - CREATE INDEX cell_idx_vitess_tablet ON vitess_tablet (cell) - `, - ` - CREATE INDEX ks_idx_vitess_tablet ON vitess_tablet (keyspace, shard) - `, - ` - CREATE TABLE IF NOT EXISTS vitess_keyspace ( - keyspace varchar(128) CHARACTER SET ascii NOT NULL, - keyspace_type smallint(5) NOT NULL, - durability_policy varchar(512) CHARACTER SET ascii NOT NULL, - PRIMARY KEY (keyspace) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, -} diff --git a/go/vt/vtgr/db/generate_patches.go b/go/vt/vtgr/db/generate_patches.go deleted file mode 100644 index 3760b3e694a..00000000000 --- a/go/vt/vtgr/db/generate_patches.go +++ /dev/null @@ -1,583 +0,0 @@ -/* - Copyright 2017 Shlomi Noach, GitHub Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -/* - This file has been copied over from VTOrc package -*/ - -package db - -// generateSQLPatches contains DDLs for patching schema to the latest version. -// Add new statements at the end of the list so they form a changelog. -var generateSQLPatches = []string{ - ` - ALTER TABLE - database_instance - ADD COLUMN read_only TINYINT UNSIGNED NOT NULL AFTER version - `, - ` - ALTER TABLE - database_instance - ADD COLUMN last_sql_error TEXT NOT NULL AFTER exec_source_log_pos - `, - ` - ALTER TABLE - database_instance - ADD COLUMN last_io_error TEXT NOT NULL AFTER last_sql_error - `, - ` - ALTER TABLE - database_instance - ADD COLUMN oracle_gtid TINYINT UNSIGNED NOT NULL AFTER replica_io_running - `, - ` - ALTER TABLE - database_instance - ADD COLUMN mariadb_gtid TINYINT UNSIGNED NOT NULL AFTER oracle_gtid - `, - ` - ALTER TABLE - database_instance - ADD COLUMN relay_log_file varchar(128) CHARACTER SET ascii NOT NULL AFTER exec_source_log_pos - `, - ` - ALTER TABLE - database_instance - ADD COLUMN relay_log_pos bigint unsigned NOT NULL AFTER relay_log_file - `, - ` - DROP INDEX source_host_port_idx ON database_instance - `, - ` - ALTER TABLE - database_instance - ADD INDEX source_host_port_idx_database_instance (source_host, source_port) - `, - ` - ALTER TABLE - database_instance - ADD COLUMN pseudo_gtid TINYINT UNSIGNED NOT NULL AFTER mariadb_gtid - `, - ` - ALTER TABLE - database_instance - ADD COLUMN replication_depth TINYINT UNSIGNED NOT NULL AFTER cluster_name - `, - ` - ALTER TABLE - database_instance - ADD COLUMN has_replication_filters TINYINT UNSIGNED NOT NULL AFTER replica_io_running - `, - ` - ALTER TABLE - database_instance - ADD COLUMN data_center varchar(32) CHARACTER SET ascii NOT NULL AFTER cluster_name - `, - ` - ALTER TABLE - database_instance - ADD COLUMN physical_environment varchar(32) CHARACTER SET ascii NOT NULL AFTER data_center - `, - ` - ALTER TABLE - database_instance_maintenance - ADD KEY active_timestamp_idx (maintenance_active, begin_timestamp) - `, - ` - ALTER TABLE - database_instance - ADD COLUMN is_co_primary TINYINT UNSIGNED NOT NULL AFTER replication_depth - `, - ` - ALTER TABLE - database_instance_maintenance - ADD KEY active_end_timestamp_idx (maintenance_active, end_timestamp) - `, - ` - ALTER TABLE - database_instance - ADD COLUMN sql_delay INT UNSIGNED NOT NULL AFTER replica_lag_seconds - `, - ` - ALTER TABLE - topology_recovery - ADD COLUMN analysis varchar(128) CHARACTER SET ascii NOT NULL - `, - ` - ALTER TABLE - topology_recovery - ADD COLUMN cluster_name varchar(128) CHARACTER SET ascii NOT NULL - `, - ` - ALTER TABLE - topology_recovery - ADD COLUMN count_affected_replicas int unsigned NOT NULL - `, - ` - ALTER TABLE - topology_recovery - ADD COLUMN replica_hosts text CHARACTER SET ascii NOT NULL - `, - ` - ALTER TABLE hostname_unresolve - ADD COLUMN last_registered TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP - `, - ` - ALTER TABLE hostname_unresolve - ADD KEY last_registered_idx (last_registered) - `, - ` - ALTER TABLE topology_recovery - ADD KEY cluster_name_in_active_idx (cluster_name, in_active_period) - `, - ` - ALTER TABLE topology_recovery - ADD KEY end_recovery_idx (end_recovery) - `, - ` - ALTER TABLE - database_instance - ADD COLUMN binlog_server TINYINT UNSIGNED NOT NULL AFTER version - `, - ` - ALTER TABLE cluster_domain_name - ADD COLUMN last_registered TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP - `, - ` - ALTER TABLE cluster_domain_name - ADD KEY last_registered_idx (last_registered) - `, - ` - ALTER TABLE - database_instance - ADD COLUMN supports_oracle_gtid TINYINT UNSIGNED NOT NULL AFTER oracle_gtid - `, - ` - ALTER TABLE - database_instance - ADD COLUMN executed_gtid_set text CHARACTER SET ascii NOT NULL AFTER oracle_gtid - `, - ` - ALTER TABLE - database_instance - ADD COLUMN server_uuid varchar(64) CHARACTER SET ascii NOT NULL AFTER server_id - `, - ` - ALTER TABLE - topology_recovery - ADD COLUMN is_successful TINYINT UNSIGNED NOT NULL DEFAULT 0 AFTER processcing_node_token - `, - ` - ALTER TABLE - topology_recovery - ADD COLUMN acknowledged TINYINT UNSIGNED NOT NULL DEFAULT 0 - `, - ` - ALTER TABLE - topology_recovery - ADD COLUMN acknowledged_by varchar(128) CHARACTER SET utf8 NOT NULL - `, - ` - ALTER TABLE - topology_recovery - ADD COLUMN acknowledge_comment text CHARACTER SET utf8 NOT NULL - `, - ` - ALTER TABLE - topology_recovery - ADD COLUMN participating_instances text CHARACTER SET ascii NOT NULL after replica_hosts - `, - ` - ALTER TABLE - topology_recovery - ADD COLUMN lost_replicas text CHARACTER SET ascii NOT NULL after participating_instances - `, - ` - ALTER TABLE - topology_recovery - ADD COLUMN all_errors text CHARACTER SET ascii NOT NULL after lost_replicas - `, - ` - ALTER TABLE audit - ADD COLUMN cluster_name varchar(128) CHARACTER SET ascii NOT NULL DEFAULT '' AFTER port - `, - ` - ALTER TABLE candidate_database_instance - ADD COLUMN priority TINYINT SIGNED NOT NULL DEFAULT 1 comment 'positive promote, nagative unpromotes' - `, - ` - ALTER TABLE - topology_recovery - ADD COLUMN acknowledged_at TIMESTAMP NULL after acknowledged - `, - ` - ALTER TABLE - topology_recovery - ADD KEY acknowledged_idx (acknowledged, acknowledged_at) - `, - ` - ALTER TABLE - blocked_topology_recovery - ADD KEY last_blocked_idx (last_blocked_timestamp) - `, - ` - ALTER TABLE candidate_database_instance - ADD COLUMN promotion_rule enum('must', 'prefer', 'neutral', 'prefer_not', 'must_not') NOT NULL DEFAULT 'neutral' - `, - ` - ALTER TABLE node_health /* sqlite3-skip */ - DROP PRIMARY KEY, - ADD PRIMARY KEY (hostname, token) - `, - ` - ALTER TABLE node_health - ADD COLUMN extra_info varchar(128) CHARACTER SET utf8 NOT NULL - `, - ` - ALTER TABLE agent_seed /* sqlite3-skip */ - MODIFY end_timestamp timestamp NOT NULL DEFAULT '1971-01-01 00:00:00' - `, - ` - ALTER TABLE active_node /* sqlite3-skip */ - MODIFY last_seen_active timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP - `, - - ` - ALTER TABLE node_health /* sqlite3-skip */ - MODIFY last_seen_active timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP - `, - ` - ALTER TABLE candidate_database_instance /* sqlite3-skip */ - MODIFY last_suggested timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP - `, - ` - ALTER TABLE primary_position_equivalence /* sqlite3-skip */ - MODIFY last_suggested timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP - `, - ` - ALTER TABLE - database_instance - ADD COLUMN last_attempted_check TIMESTAMP NOT NULL DEFAULT '1971-01-01 00:00:00' AFTER last_checked - `, - ` - ALTER TABLE - database_instance /* sqlite3-skip */ - MODIFY last_attempted_check TIMESTAMP NOT NULL DEFAULT '1971-01-01 00:00:00' - `, - ` - ALTER TABLE - database_instance_analysis_changelog - ADD KEY instance_timestamp_idx (hostname, port, analysis_timestamp) - `, - ` - ALTER TABLE - topology_recovery - ADD COLUMN last_detection_id bigint unsigned NOT NULL - `, - ` - ALTER TABLE - topology_recovery - ADD KEY last_detection_idx (last_detection_id) - `, - ` - ALTER TABLE node_health_history - ADD COLUMN command varchar(128) CHARACTER SET utf8 NOT NULL - `, - ` - ALTER TABLE node_health - ADD COLUMN command varchar(128) CHARACTER SET utf8 NOT NULL - `, - ` - ALTER TABLE database_instance_topology_history - ADD COLUMN version varchar(128) CHARACTER SET ascii NOT NULL - `, - ` - ALTER TABLE - database_instance - ADD COLUMN gtid_purged text CHARACTER SET ascii NOT NULL AFTER executed_gtid_set - `, - ` - ALTER TABLE - database_instance_coordinates_history - ADD COLUMN last_seen timestamp NOT NULL DEFAULT '1971-01-01 00:00:00' AFTER recorded_timestamp - `, - ` - ALTER TABLE - access_token - ADD COLUMN is_reentrant TINYINT UNSIGNED NOT NULL default 0 - `, - ` - ALTER TABLE - access_token - ADD COLUMN acquired_at timestamp NOT NULL DEFAULT '1971-01-01 00:00:00' - `, - ` - ALTER TABLE - database_instance_pool - ADD COLUMN registered_at timestamp NOT NULL DEFAULT '1971-01-01 00:00:00' - `, - ` - ALTER TABLE - database_instance - ADD COLUMN has_replication_credentials TINYINT UNSIGNED NOT NULL - `, - ` - ALTER TABLE - database_instance - ADD COLUMN allow_tls TINYINT UNSIGNED NOT NULL AFTER sql_delay - `, - ` - ALTER TABLE - database_instance - ADD COLUMN semi_sync_enforced TINYINT UNSIGNED NOT NULL AFTER physical_environment - `, - ` - ALTER TABLE - database_instance - ADD COLUMN instance_alias varchar(128) CHARACTER SET ascii NOT NULL AFTER physical_environment - `, - ` - ALTER TABLE - topology_recovery - ADD COLUMN successor_alias varchar(128) DEFAULT NULL - `, - ` - ALTER TABLE - database_instance /* sqlite3-skip */ - MODIFY cluster_name varchar(128) NOT NULL - `, - ` - ALTER TABLE - node_health - ADD INDEX last_seen_active_idx (last_seen_active) - `, - ` - ALTER TABLE - database_instance_maintenance - ADD COLUMN processing_node_hostname varchar(128) CHARACTER SET ascii NOT NULL - `, - ` - ALTER TABLE - database_instance_maintenance - ADD COLUMN processing_node_token varchar(128) NOT NULL - `, - ` - ALTER TABLE - database_instance_maintenance - ADD COLUMN explicitly_bounded TINYINT UNSIGNED NOT NULL - `, - ` - ALTER TABLE node_health_history - ADD COLUMN app_version varchar(64) CHARACTER SET ascii NOT NULL DEFAULT "" - `, - ` - ALTER TABLE node_health - ADD COLUMN app_version varchar(64) CHARACTER SET ascii NOT NULL DEFAULT "" - `, - ` - ALTER TABLE node_health_history /* sqlite3-skip */ - MODIFY app_version varchar(64) CHARACTER SET ascii NOT NULL DEFAULT "" - `, - ` - ALTER TABLE node_health /* sqlite3-skip */ - MODIFY app_version varchar(64) CHARACTER SET ascii NOT NULL DEFAULT "" - `, - ` - ALTER TABLE - database_instance - ADD COLUMN version_comment varchar(128) NOT NULL DEFAULT '' - `, - ` - ALTER TABLE active_node - ADD COLUMN first_seen_active timestamp NOT NULL DEFAULT '1971-01-01 00:00:00' - `, - ` - ALTER TABLE node_health - ADD COLUMN first_seen_active timestamp NOT NULL DEFAULT '1971-01-01 00:00:00' - `, - ` - ALTER TABLE database_instance - ADD COLUMN major_version varchar(16) CHARACTER SET ascii NOT NULL - `, - ` - ALTER TABLE - database_instance - ADD COLUMN binlog_row_image varchar(16) CHARACTER SET ascii NOT NULL - `, - ` - ALTER TABLE topology_recovery - ADD COLUMN uid varchar(128) CHARACTER SET ascii NOT NULL - `, - ` - CREATE INDEX uid_idx_topology_recovery ON topology_recovery(uid) - `, - ` - CREATE INDEX recovery_uid_idx_topology_recovery_steps ON topology_recovery_steps(recovery_uid) - `, - ` - ALTER TABLE - database_instance - ADD COLUMN last_discovery_latency bigint not null - `, - ` - CREATE INDEX end_timestamp_idx_database_instance_downtime ON database_instance_downtime(end_timestamp) - `, - ` - ALTER TABLE - topology_failure_detection - ADD COLUMN is_actionable tinyint not null default 0 - `, - ` - DROP INDEX hostname_port_active_period_uidx_topology_failure_detection ON topology_failure_detection - `, - ` - CREATE UNIQUE INDEX host_port_active_recoverable_uidx_topology_failure_detection ON topology_failure_detection (hostname, port, in_active_period, end_active_period_unixtime, is_actionable) - `, - ` - ALTER TABLE raft_snapshot - ADD COLUMN created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP - `, - ` - ALTER TABLE node_health - ADD COLUMN db_backend varchar(255) CHARACTER SET ascii NOT NULL DEFAULT "" - `, - ` - ALTER TABLE node_health - ADD COLUMN incrementing_indicator bigint not null default 0 - `, - ` - ALTER TABLE - database_instance - ADD COLUMN semi_sync_primary_enabled TINYINT UNSIGNED NOT NULL - `, - ` - ALTER TABLE - database_instance - ADD COLUMN semi_sync_replica_enabled TINYINT UNSIGNED NOT NULL - `, - ` - ALTER TABLE - database_instance - ADD COLUMN gtid_mode varchar(32) CHARACTER SET ascii NOT NULL - `, - ` - ALTER TABLE - database_instance - ADD COLUMN last_check_partial_success tinyint unsigned NOT NULL after last_attempted_check - `, - ` - ALTER TABLE - database_instance - ADD COLUMN source_uuid varchar(64) CHARACTER SET ascii NOT NULL AFTER oracle_gtid - `, - ` - ALTER TABLE - database_instance - ADD COLUMN gtid_errant text CHARACTER SET ascii NOT NULL AFTER gtid_purged - `, - ` - ALTER TABLE - database_instance - ADD COLUMN ancestry_uuid text CHARACTER SET ascii NOT NULL AFTER source_uuid - `, - ` - ALTER TABLE - database_instance - ADD COLUMN replication_sql_thread_state tinyint signed not null default 0 AFTER replica_io_running - `, - ` - ALTER TABLE - database_instance - ADD COLUMN replication_io_thread_state tinyint signed not null default 0 AFTER replication_sql_thread_state - `, - ` - ALTER TABLE - database_instance_tags /* sqlite3-skip */ - DROP PRIMARY KEY, - ADD PRIMARY KEY (hostname, port, tag_name) - `, - ` - ALTER TABLE - database_instance - ADD COLUMN region varchar(32) CHARACTER SET ascii NOT NULL AFTER data_center - `, - ` - ALTER TABLE - database_instance - ADD COLUMN semi_sync_primary_timeout INT UNSIGNED NOT NULL DEFAULT 0 AFTER semi_sync_primary_enabled - `, - ` - ALTER TABLE - database_instance - ADD COLUMN semi_sync_primary_wait_for_replica_count INT UNSIGNED NOT NULL DEFAULT 0 AFTER semi_sync_primary_timeout - `, - ` - ALTER TABLE - database_instance - ADD COLUMN semi_sync_primary_status TINYINT UNSIGNED NOT NULL DEFAULT 0 AFTER semi_sync_primary_wait_for_replica_count - `, - ` - ALTER TABLE - database_instance - ADD COLUMN semi_sync_replica_status TINYINT UNSIGNED NOT NULL DEFAULT 0 AFTER semi_sync_primary_status - `, - ` - ALTER TABLE - database_instance - ADD COLUMN semi_sync_primary_clients INT UNSIGNED NOT NULL DEFAULT 0 AFTER semi_sync_primary_status - `, - ` - ALTER TABLE /* sqlite3-skip */ - database_instance - MODIFY semi_sync_primary_timeout BIGINT UNSIGNED NOT NULL DEFAULT 0 - `, - // Fields related to Replication Group the instance belongs to - ` - ALTER TABLE - database_instance - ADD COLUMN replication_group_name VARCHAR(64) CHARACTER SET ascii NOT NULL DEFAULT '' AFTER gtid_mode - `, - ` - ALTER TABLE - database_instance - ADD COLUMN replication_group_is_single_primary_mode TINYINT UNSIGNED NOT NULL DEFAULT 1 AFTER replication_group_name - `, - ` - ALTER TABLE - database_instance - ADD COLUMN replication_group_member_state VARCHAR(16) CHARACTER SET ascii NOT NULL DEFAULT '' AFTER replication_group_is_single_primary_mode - `, - ` - ALTER TABLE - database_instance - ADD COLUMN replication_group_member_role VARCHAR(16) CHARACTER SET ascii NOT NULL DEFAULT '' AFTER replication_group_member_state - `, - ` - ALTER TABLE - database_instance - ADD COLUMN replication_group_members text CHARACTER SET ascii NOT NULL AFTER replication_group_member_role - `, - ` - ALTER TABLE - database_instance - ADD COLUMN replication_group_primary_host varchar(128) CHARACTER SET ascii NOT NULL DEFAULT '' AFTER replication_group_members - `, - ` - ALTER TABLE - database_instance - ADD COLUMN replication_group_primary_port smallint(5) unsigned NOT NULL DEFAULT 0 AFTER replication_group_primary_host - `, -} diff --git a/go/vt/vtgr/db/mock_mysql.go b/go/vt/vtgr/db/mock_mysql.go deleted file mode 100644 index a74d8359099..00000000000 --- a/go/vt/vtgr/db/mock_mysql.go +++ /dev/null @@ -1,191 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package db - -import ( - reflect "reflect" - "strconv" - - gomock "github.com/golang/mock/gomock" - - mysql "vitess.io/vitess/go/mysql" - inst "vitess.io/vitess/go/vt/vtgr/inst" -) - -// MockAgent is a mock of Agent interface -type MockAgent struct { - ctrl *gomock.Controller - recorder *MockAgentMockRecorder -} - -// MockAgentMockRecorder is the mock recorder for MockAgent -type MockAgentMockRecorder struct { - mock *MockAgent -} - -// NewMockAgent creates a new mock instance -func NewMockAgent(ctrl *gomock.Controller) *MockAgent { - mock := &MockAgent{ctrl: ctrl} - mock.recorder = &MockAgentMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockAgent) EXPECT() *MockAgentMockRecorder { - return m.recorder -} - -// BootstrapGroupLocked mocks base method -func (m *MockAgent) BootstrapGroupLocked(instanceKey *inst.InstanceKey) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BootstrapGroupLocked", instanceKey) - ret0, _ := ret[0].(error) - return ret0 -} - -// BootstrapGroupLocked indicates an expected call of BootstrapGroupLocked -func (mr *MockAgentMockRecorder) BootstrapGroupLocked(instanceKey any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BootstrapGroupLocked", reflect.TypeOf((*MockAgent)(nil).BootstrapGroupLocked), instanceKey) -} - -// RebootstrapGroupLocked mocks base method -func (m *MockAgent) RebootstrapGroupLocked(instanceKey *inst.InstanceKey, name string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RebootstrapGroupLocked", instanceKey, name) - ret0, _ := ret[0].(error) - return ret0 -} - -// RebootstrapGroupLocked indicates an expected call of RebootstrapGroupLocked -func (mr *MockAgentMockRecorder) RebootstrapGroupLocked(instanceKey, name any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RebootstrapGroupLocked", reflect.TypeOf((*MockAgent)(nil).RebootstrapGroupLocked), instanceKey, name) -} - -// StopGroupLocked mocks base method -func (m *MockAgent) StopGroupLocked(instanceKey *inst.InstanceKey) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StopGroupLocked", instanceKey) - ret0, _ := ret[0].(error) - return ret0 -} - -// StopGroupLocked indicates an expected call of StopGroupLocked -func (mr *MockAgentMockRecorder) StopGroupLocked(instanceKey any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopGroupLocked", reflect.TypeOf((*MockAgent)(nil).StopGroupLocked), instanceKey) -} - -// JoinGroupLocked mocks base method -func (m *MockAgent) JoinGroupLocked(instanceKey, primaryKey *inst.InstanceKey) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "JoinGroupLocked", instanceKey, primaryKey) - ret0, _ := ret[0].(error) - return ret0 -} - -// JoinGroupLocked indicates an expected call of JoinGroupLocked -func (mr *MockAgentMockRecorder) JoinGroupLocked(instanceKey, primaryKey any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "JoinGroupLocked", reflect.TypeOf((*MockAgent)(nil).JoinGroupLocked), instanceKey, primaryKey) -} - -// SetReadOnly mocks base method -func (m *MockAgent) SetReadOnly(instanceKey *inst.InstanceKey, readOnly bool) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetReadOnly", instanceKey, readOnly) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetReadOnly indicates an expected call of SetReadOnly -func (mr *MockAgentMockRecorder) SetReadOnly(instanceKey, readOnly any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetReadOnly", reflect.TypeOf((*MockAgent)(nil).SetReadOnly), instanceKey, readOnly) -} - -// FetchApplierGTIDSet mocks base method -func (m *MockAgent) FetchApplierGTIDSet(instanceKey *inst.InstanceKey) (mysql.GTIDSet, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FetchApplierGTIDSet", instanceKey) - ret0, _ := ret[0].(mysql.GTIDSet) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FetchApplierGTIDSet indicates an expected call of FetchApplierGTIDSet -func (mr *MockAgentMockRecorder) FetchApplierGTIDSet(instanceKey any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchApplierGTIDSet", reflect.TypeOf((*MockAgent)(nil).FetchApplierGTIDSet), instanceKey) -} - -// Failover mocks base method -func (m *MockAgent) Failover(instance *inst.InstanceKey) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Failover", instance) - ret0, _ := ret[0].(error) - return ret0 -} - -// Failover indicates an expected call of Failover -func (mr *MockAgentMockRecorder) Failover(instance any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Failover", reflect.TypeOf((*MockAgent)(nil).Failover), instance) -} - -// FetchGroupView mocks base method -func (m *MockAgent) FetchGroupView(alias string, instanceKey *inst.InstanceKey) (*GroupView, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FetchGroupView", alias, instanceKey) - ret0, _ := ret[0].(*GroupView) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FetchGroupView indicates an expected call of FetchGroupView -func (mr *MockAgentMockRecorder) FetchGroupView(alias, instanceKey any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchGroupView", reflect.TypeOf((*MockAgent)(nil).FetchGroupView), alias, instanceKey) -} - -// TestGroupState mocks a row from mysql -type TestGroupState struct { - MemberHost, MemberPort, MemberState, MemberRole string -} - -// BuildGroupView builds gruop view from input -func BuildGroupView(alias, groupName, host string, port int, readOnly bool, stalenessResult int, inputs []TestGroupState) *GroupView { - view := NewGroupView(alias, host, port) - view.GroupName = groupName - // group_name, member_host, member_port, member_state, member_role, is_local - for _, row := range inputs { - memberPort, _ := strconv.Atoi(row.MemberPort) - member := NewGroupMember( - row.MemberState, - row.MemberRole, - row.MemberHost, - memberPort, - false) - if host == row.MemberHost && port == memberPort { - member.ReadOnly = readOnly - } - view.UnresolvedMembers = append(view.UnresolvedMembers, member) - view.HeartbeatStaleness = stalenessResult - } - return view -} diff --git a/go/vt/vtgr/db/mysql.go b/go/vt/vtgr/db/mysql.go deleted file mode 100644 index 8c3787c9187..00000000000 --- a/go/vt/vtgr/db/mysql.go +++ /dev/null @@ -1,590 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package db - -import ( - "errors" - "fmt" - "math" - "strconv" - "strings" - - gouuid "github.com/google/uuid" - "github.com/spf13/pflag" - - "vitess.io/vitess/go/vt/external/golib/sqlutils" - - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/vtgr/config" - "vitess.io/vitess/go/vt/vtgr/inst" -) - -var ( - configFilePath string - dbFlavor = "MySQL56" - mysqlGroupPort = 33061 - enableHeartbeatCheck bool - - // ErrGroupSplitBrain is the error when mysql group is split-brain - ErrGroupSplitBrain = errors.New("group has split brain") - // ErrGroupBackoffError is either the transient error or network partition from the group - ErrGroupBackoffError = errors.New("group backoff error") - // ErrGroupOngoingBootstrap is the error when a bootstrap is in progress - ErrGroupOngoingBootstrap = errors.New("group ongoing bootstrap") - // ErrGroupInactive is the error when mysql group is inactive unexpectedly - ErrGroupInactive = errors.New("group is inactive") - // ErrInvalidInstance is the error when the instance key has empty hostname - ErrInvalidInstance = errors.New("invalid mysql instance key") -) - -func init() { - servenv.OnParseFor("vtgr", func(fs *pflag.FlagSet) { - fs.StringVar(&configFilePath, "db_config", "", "Full path to db config file that will be used by VTGR.") - fs.StringVar(&dbFlavor, "db_flavor", "MySQL56", "MySQL flavor override.") - fs.IntVar(&mysqlGroupPort, "gr_port", 33061, "Port to bootstrap a MySQL group.") - fs.BoolVar(&enableHeartbeatCheck, "enable_heartbeat_check", false, "Enable heartbeat checking, set together with --group_heartbeat_threshold.") - }) -} - -// Agent is used by vtgr to interact with Mysql -type Agent interface { - // BootstrapGroupLocked bootstraps a mysql group - // the caller should grab a lock before - BootstrapGroupLocked(instanceKey *inst.InstanceKey) error - - // RebootstrapGroupLocked rebootstrap a group with an existing name - RebootstrapGroupLocked(instanceKey *inst.InstanceKey, name string) error - - // StopGroupLocked stops a mysql group - StopGroupLocked(instanceKey *inst.InstanceKey) error - - // JoinGroupLocked puts an instance into a mysql group based on primary instance - // the caller should grab a lock before - JoinGroupLocked(instanceKey *inst.InstanceKey, primaryKey *inst.InstanceKey) error - - // SetReadOnly set super_read_only variable - // https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_super_read_only - SetReadOnly(instanceKey *inst.InstanceKey, readOnly bool) error - - // FetchApplierGTIDSet fetches the GTID set from group_replication_applier channel - FetchApplierGTIDSet(instanceKey *inst.InstanceKey) (mysql.GTIDSet, error) - - // Failover move the mysql primary to the node defined by memberUUID - Failover(instance *inst.InstanceKey) error - - // FetchGroupView fetches group related information - FetchGroupView(alias string, instanceKey *inst.InstanceKey) (*GroupView, error) -} - -// MemberState is member state -type MemberState int - -// MemberRole is member role -type MemberRole int - -const ( - UNKNOWNSTATE MemberState = iota - OFFLINE - UNREACHABLE - RECOVERING - ONLINE - ERROR -) - -const ( - UNKNOWNROLE MemberRole = iota - SECONDARY - PRIMARY -) - -// GroupMember represents a ROW we get from performance_schema -type GroupMember struct { - HostName string - Port int - Role MemberRole - State MemberState - ReadOnly bool -} - -// GroupView is an instance's view for the group -type GroupView struct { - TabletAlias string - MySQLHost string - MySQLPort int - GroupName string - HeartbeatStaleness int - UnresolvedMembers []*GroupMember -} - -// SQLAgentImpl implements Agent -type SQLAgentImpl struct { - config *config.Configuration - dbFlavor string - enableHeartbeat bool -} - -// NewGroupView creates a new GroupView -func NewGroupView(alias, host string, port int) *GroupView { - return &GroupView{TabletAlias: alias, MySQLHost: host, MySQLPort: port} -} - -// NewGroupMember creates a new GroupMember -func NewGroupMember(state, role, host string, port int, readonly bool) *GroupMember { - return &GroupMember{ - State: toMemberState(state), - Role: toMemberRole(role), - HostName: host, - Port: port, - ReadOnly: readonly, - } -} - -// NewVTGRSqlAgent creates a SQLAgentImpl -func NewVTGRSqlAgent() *SQLAgentImpl { - var conf *config.Configuration - if (configFilePath) != "" { - log.Infof("use config from %v", configFilePath) - conf = config.ForceRead(configFilePath) - } else { - log.Warningf("use default config") - conf = config.Config - } - agent := &SQLAgentImpl{ - config: conf, - dbFlavor: dbFlavor, - enableHeartbeat: enableHeartbeatCheck, - } - return agent -} - -// BootstrapGroupLocked implements Agent interface -func (agent *SQLAgentImpl) BootstrapGroupLocked(instanceKey *inst.InstanceKey) error { - if instanceKey == nil { - return errors.New("nil instance key for bootstrap") - } - // Before bootstrap a group, double check locally there is really nothing running locally - uuid, state, err := agent.getGroupNameAndMemberState(instanceKey) - if err != nil { - return err - } - if state != "" && state != inst.GroupReplicationMemberStateOffline { - return fmt.Errorf("%v not OFFLINE mode %v [group_name=%v]", instanceKey.Hostname, state, uuid) - } - // If there is a group name stored locally, we should try to reuse it - // for port, we will override with a new one - if uuid == "" { - uuid = gouuid.New().String() - log.Infof("Try to bootstrap with a new uuid") - } - log.Infof("Bootstrap group on %v with %v", instanceKey.Hostname, uuid) - return agent.bootstrapInternal(instanceKey, uuid) -} - -func (agent *SQLAgentImpl) RebootstrapGroupLocked(instanceKey *inst.InstanceKey, name string) error { - log.Infof("Rebootstrapping group on %v with %v", instanceKey.Hostname, name) - return agent.bootstrapInternal(instanceKey, name) -} - -func (agent *SQLAgentImpl) bootstrapInternal(instanceKey *inst.InstanceKey, uuid string) error { - // Use persist to set group_replication_group_name - // so that the instance will persist the name after restart - cmds := []string{ - "set global offline_mode=0", - fmt.Sprintf("set @@persist.group_replication_group_name=\"%s\"", uuid), - fmt.Sprintf("set global group_replication_local_address=\"%s:%d\"", instanceKey.Hostname, mysqlGroupPort), - fmt.Sprintf("set global group_replication_group_seeds=\"%s:%d\"", instanceKey.Hostname, mysqlGroupPort), - "set global group_replication_bootstrap_group=ON", - fmt.Sprintf("start group_replication user='%s', password='%s'", agent.config.MySQLReplicaUser, agent.config.MySQLReplicaPassword), - "set global group_replication_bootstrap_group=OFF", - } - for _, cmd := range cmds { - if err := execInstanceWithTopo(instanceKey, cmd); err != nil { - log.Errorf("Failed to execute: %v: %v", cmd, err) - return err - } - } - return nil -} - -// StopGroupLocked implements Agent interface -func (agent *SQLAgentImpl) StopGroupLocked(instanceKey *inst.InstanceKey) error { - cmd := "stop group_replication" - return execInstanceWithTopo(instanceKey, cmd) -} - -// SetReadOnly implements Agent interface -func (agent *SQLAgentImpl) SetReadOnly(instanceKey *inst.InstanceKey, readOnly bool) error { - // Setting super_read_only ON implicitly forces read_only ON - // Setting read_only OFF implicitly forces super_read_only OFF - // https://www.perconaicom/blog/2016/09/27/using-the-super_read_only-system-variable/ - if readOnly { - return execInstance(instanceKey, "set @@global.super_read_only=1") - } - return execInstance(instanceKey, "set @@global.read_only=0") -} - -// JoinGroupLocked implements Agent interface -// Note: caller should grab the lock before calling this -func (agent *SQLAgentImpl) JoinGroupLocked(instanceKey *inst.InstanceKey, primaryInstanceKey *inst.InstanceKey) error { - var numExistingMembers int - var uuid string - query := `select count(*) as count, @@group_replication_group_name as group_name - from performance_schema.replication_group_members where member_state='ONLINE'` - err := fetchInstance(primaryInstanceKey, query, func(m sqlutils.RowMap) error { - numExistingMembers = m.GetInt("count") - uuid = m.GetString("group_name") - return nil - }) - if err != nil { - return err - } - if numExistingMembers == 0 { - return fmt.Errorf("there is no group members found on %v:%v", primaryInstanceKey.Hostname, primaryInstanceKey.Port) - } - // The queries above are executed on the primary instance - // now let's do one more check with local information to make sure it's OK to join the primary - localGroup, state, err := agent.getGroupNameAndMemberState(instanceKey) - if err != nil { - return err - } - if localGroup != "" && localGroup != uuid { - return fmt.Errorf("%v has a different group name (%v) than primary %v (%v)", instanceKey.Hostname, localGroup, primaryInstanceKey.Hostname, uuid) - } - if state == inst.GroupReplicationMemberStateOnline || state == inst.GroupReplicationMemberStateRecovering { - return fmt.Errorf("%v [%v] is alredy in a group %v", instanceKey.Hostname, state, localGroup) - } - var primaryGrPort int - query = `select @@group_replication_local_address as address` - err = fetchInstance(primaryInstanceKey, query, func(m sqlutils.RowMap) error { - address := m.GetString("address") - arr := strings.Split(address, ":") - primaryGrPort, err = strconv.Atoi(arr[1]) - if err != nil { - log.Errorf("Failed to parse primary GR port: %v", err) - return err - } - return nil - }) - if primaryGrPort == 0 { - return fmt.Errorf("cannot find group replication port on %v", primaryInstanceKey.Hostname) - } - // Now it's safe to join the group - cmds := []string{ - "set global offline_mode=0", - fmt.Sprintf("set @@persist.group_replication_group_name=\"%s\"", uuid), - fmt.Sprintf("set global group_replication_group_seeds=\"%s:%d\"", primaryInstanceKey.Hostname, primaryGrPort), - fmt.Sprintf("set global group_replication_local_address=\"%s:%d\"", instanceKey.Hostname, mysqlGroupPort), - fmt.Sprintf("start group_replication user='%s', password='%s'", agent.config.MySQLReplicaUser, agent.config.MySQLReplicaPassword), - } - for _, cmd := range cmds { - if err := execInstanceWithTopo(instanceKey, cmd); err != nil { - return err - } - } - return nil -} - -// Failover implements Agent interface -func (agent *SQLAgentImpl) Failover(instance *inst.InstanceKey) error { - var memberUUID string - query := `select member_id - from performance_schema.replication_group_members - where member_host=convert(@@hostname using ascii) and member_port=@@port and member_state='ONLINE'` - err := fetchInstance(instance, query, func(m sqlutils.RowMap) error { - memberUUID = m.GetString("member_id") - if memberUUID == "" { - return fmt.Errorf("unable to find member_id on %v", instance.Hostname) - } - return nil - }) - if err != nil { - return err - } - cmd := fmt.Sprintf(`select group_replication_set_as_primary('%s')`, memberUUID) - if err := execInstance(instance, cmd); err != nil { - return err - } - return nil -} - -// heartbeatCheck returns heartbeat check freshness result -func (agent *SQLAgentImpl) heartbeatCheck(instanceKey *inst.InstanceKey) (int, error) { - query := `select timestampdiff(SECOND, from_unixtime(truncate(ts * 0.000000001, 0)), NOW()) as diff from _vt.heartbeat;` - var result int - err := fetchInstance(instanceKey, query, func(m sqlutils.RowMap) error { - result = m.GetInt("diff") - return nil - }) - return result, err -} - -// FetchGroupView implements Agent interface -func (agent *SQLAgentImpl) FetchGroupView(alias string, instanceKey *inst.InstanceKey) (*GroupView, error) { - view := NewGroupView(alias, instanceKey.Hostname, instanceKey.Port) - var groupName string - var isReadOnly bool - query := `select - @@group_replication_group_name as group_name, - @@super_read_only as read_only, - member_host, member_port, member_state, member_role - from performance_schema.replication_group_members` - err := fetchInstance(instanceKey, query, func(m sqlutils.RowMap) error { - if groupName == "" { - groupName = m.GetString("group_name") - } - host := m.GetString("member_host") - port := m.GetInt("member_port") - isReadOnly = m.GetBool("read_only") - unresolvedMember := NewGroupMember( - m.GetString("member_state"), - m.GetString("member_role"), - host, - port, - false) - // readOnly is used to re-enable write after we set primary to read_only to protect the shard when there is - // less than desired number of nodes - // the default value is false because if the node is reachable and read_only, it will get override by the OR op - // if the host is unreachable, we don't need to trigger the protection for it therefore assume the it's writable - if host == instanceKey.Hostname && port == instanceKey.Port && isReadOnly { - unresolvedMember.ReadOnly = true - } - view.UnresolvedMembers = append(view.UnresolvedMembers, unresolvedMember) - return nil - }) - view.GroupName = groupName - if err != nil { - return nil, err - } - view.HeartbeatStaleness = math.MaxInt32 - if agent.enableHeartbeat { - heartbeatStaleness, err := agent.heartbeatCheck(instanceKey) - if err != nil { - // We can run into Error 1146: Table '_vt.heartbeat' doesn't exist on new provisioned shard: - // vtgr is checking heartbeat table - // -> heartbeat table is waiting primary tablet - // -> primary tablet needs vtgr. - // - // Therefore if we run into error, HeartbeatStaleness will - // remain to be max int32, which is 2147483647 sec - log.Errorf("Failed to check heartbeatCheck: %v", err) - } else { - view.HeartbeatStaleness = heartbeatStaleness - } - } - return view, nil -} - -// GetPrimaryView returns the view of primary member -func (view *GroupView) GetPrimaryView() (string, int, bool) { - for _, member := range view.UnresolvedMembers { - if member.Role == PRIMARY { - return member.HostName, member.Port, member.State == ONLINE - } - } - return "", 0, false -} - -func (agent *SQLAgentImpl) getGroupNameAndMemberState(instanceKey *inst.InstanceKey) (string, string, error) { - // If there is an instance that is unreachable but we still have quorum, GR will remove it from - // the replication_group_members and Failover if it is the primary node - // If the state becomes UNREACHABLE it indicates there is a network partition inside the group - // https://dev.mysql.com/doc/refman/8.0/en/group-replication-network-partitioning.html - // And then eventually if the node does not recover, the group will transit into ERROR state - // VTGR cannot handle this case, therefore we raise error here - var name, state string - query := `select @@group_replication_group_name as group_name` - err := fetchInstance(instanceKey, query, func(m sqlutils.RowMap) error { - name = m.GetString("group_name") - return nil - }) - if err != nil { - return "", "", err - } - query = `select member_state - from performance_schema.replication_group_members - where member_host=convert(@@hostname using ascii) and member_port=@@port` - err = fetchInstance(instanceKey, query, func(m sqlutils.RowMap) error { - state = m.GetString("member_state") - if state == "" { - state = inst.GroupReplicationMemberStateOffline - } - return nil - }) - if err != nil { - return "", "", err - } - return name, state, nil -} - -// FetchApplierGTIDSet implements Agent interface -func (agent *SQLAgentImpl) FetchApplierGTIDSet(instanceKey *inst.InstanceKey) (mysql.GTIDSet, error) { - var gtidSet string - // TODO: should we also take group_replication_recovery as well? - query := `select gtid_subtract(concat(received_transaction_set, ',', @@global.gtid_executed), '') as gtid_set - from performance_schema.replication_connection_status - where channel_name='group_replication_applier'` - err := fetchInstance(instanceKey, query, func(m sqlutils.RowMap) error { - // If the instance has no committed transaction, gtidSet will be empty string - gtidSet = m.GetString("gtid_set") - return nil - }) - if err != nil { - return nil, err - } - pos, err := mysql.ParsePosition(agent.dbFlavor, gtidSet) - if err != nil { - return nil, err - } - return pos.GTIDSet, nil -} - -// execInstance executes a given query on the given MySQL discovery instance -func execInstance(instanceKey *inst.InstanceKey, query string, args ...any) error { - if err := verifyInstance(instanceKey); err != nil { - return err - } - sqlDb, err := OpenDiscovery(instanceKey.Hostname, instanceKey.Port) - if err != nil { - log.Errorf("error exec %v: %v", query, err) - return err - } - _, err = sqlutils.ExecNoPrepare(sqlDb, query, args...) - return err -} - -// execInstanceWithTopo executes a given query on the given MySQL topology instance -func execInstanceWithTopo(instanceKey *inst.InstanceKey, query string, args ...any) error { - if err := verifyInstance(instanceKey); err != nil { - return err - } - sqlDb, err := OpenTopology(instanceKey.Hostname, instanceKey.Port) - if err != nil { - log.Errorf("error exec %v: %v", query, err) - return err - } - _, err = sqlutils.ExecNoPrepare(sqlDb, query, args...) - return err -} - -// fetchInstance fetches result from mysql -func fetchInstance(instanceKey *inst.InstanceKey, query string, onRow func(sqlutils.RowMap) error) error { - if err := verifyInstance(instanceKey); err != nil { - return err - } - sqlDb, err := OpenDiscovery(instanceKey.Hostname, instanceKey.Port) - if err != nil { - return err - } - return sqlutils.QueryRowsMap(sqlDb, query, onRow) -} - -// The hostname and port can be empty if a tablet crashed and did not populate them in -// the topo server. We treat them as if the host is unreachable when we calculate the -// quorum for the shard. -func verifyInstance(instanceKey *inst.InstanceKey) error { - if instanceKey.Hostname == "" || instanceKey.Port == 0 { - return ErrInvalidInstance - } - return nil -} - -// CreateInstanceKey returns an InstanceKey based on group member input -// When the group is init for the first time, the hostname and port are not set, e.g., -// +---------------------------+-----------+-------------+-------------+--------------+-------------+ -// | CHANNEL_NAME | MEMBER_ID | MEMBER_HOST | MEMBER_PORT | MEMBER_STATE | MEMBER_ROLE | -// +---------------------------+-----------+-------------+-------------+--------------+-------------+ -// | group_replication_applier | | | NULL | OFFLINE | | -// +---------------------------+-----------+-------------+-------------+--------------+-------------+ -// therefore we substitute with view's local hostname and port -func (view *GroupView) CreateInstanceKey(member *GroupMember) inst.InstanceKey { - if member.HostName == "" && member.Port == 0 { - return inst.InstanceKey{ - Hostname: view.MySQLHost, - Port: view.MySQLPort, - } - } - return inst.InstanceKey{ - Hostname: member.HostName, - Port: member.Port, - } -} - -// ToString make string for group view -func (view *GroupView) ToString() string { - var sb strings.Builder - sb.WriteString(fmt.Sprintf("group_name:%v\n", view.GroupName)) - for _, m := range view.UnresolvedMembers { - sb.WriteString(fmt.Sprintf("host:%v:%v | role:%v | state:%v\n", m.HostName, m.Port, m.Role, m.State)) - } - return sb.String() -} - -func (state MemberState) String() string { - switch state { - case ONLINE: - return inst.GroupReplicationMemberStateOnline - case ERROR: - return inst.GroupReplicationMemberStateError - case RECOVERING: - return inst.GroupReplicationMemberStateRecovering - case OFFLINE: - return inst.GroupReplicationMemberStateOffline - case UNREACHABLE: - return inst.GroupReplicationMemberStateUnreachable - } - return "UNKNOWN" -} - -func toMemberState(state string) MemberState { - switch state { - case inst.GroupReplicationMemberStateOnline: - return ONLINE - case inst.GroupReplicationMemberStateError: - return ERROR - case inst.GroupReplicationMemberStateRecovering: - return RECOVERING - case inst.GroupReplicationMemberStateOffline: - return OFFLINE - case inst.GroupReplicationMemberStateUnreachable: - return UNREACHABLE - default: - return UNKNOWNSTATE - } -} - -func (role MemberRole) String() string { - switch role { - case PRIMARY: - return inst.GroupReplicationMemberRolePrimary - case SECONDARY: - return inst.GroupReplicationMemberRoleSecondary - } - return "UNKNOWN" -} - -func toMemberRole(role string) MemberRole { - switch role { - case inst.GroupReplicationMemberRolePrimary: - return PRIMARY - case inst.GroupReplicationMemberRoleSecondary: - return SECONDARY - default: - return UNKNOWNROLE - } -} diff --git a/go/vt/vtgr/db/tls.go b/go/vt/vtgr/db/tls.go deleted file mode 100644 index 514e3d49df3..00000000000 --- a/go/vt/vtgr/db/tls.go +++ /dev/null @@ -1,152 +0,0 @@ -/* - Copyright 2014 Outbrain Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -/* - This file has been copied over from VTOrc package -*/ - -package db - -import ( - "fmt" - "strings" - "time" - - "vitess.io/vitess/go/vt/external/golib/sqlutils" - "vitess.io/vitess/go/vt/log" - - "github.com/go-sql-driver/mysql" - "github.com/patrickmn/go-cache" - "github.com/rcrowley/go-metrics" - - "vitess.io/vitess/go/vt/vtgr/config" - "vitess.io/vitess/go/vt/vtgr/ssl" -) - -const Error3159 = "Error 3159:" -const Error1045 = "Access denied for user" - -// Track if a TLS has already been configured for topology -var topologyTLSConfigured = false - -// Track if a TLS has already been configured for Orchestrator -var orchestratorTLSConfigured = false - -var requireTLSCache *cache.Cache = cache.New(time.Duration(config.Config.TLSCacheTTLFactor*config.Config.InstancePollSeconds)*time.Second, time.Second) - -var readInstanceTLSCounter = metrics.NewCounter() -var writeInstanceTLSCounter = metrics.NewCounter() -var readInstanceTLSCacheCounter = metrics.NewCounter() -var writeInstanceTLSCacheCounter = metrics.NewCounter() - -func init() { - metrics.Register("instance_tls.read", readInstanceTLSCounter) - metrics.Register("instance_tls.write", writeInstanceTLSCounter) - metrics.Register("instance_tls.read_cache", readInstanceTLSCacheCounter) - metrics.Register("instance_tls.write_cache", writeInstanceTLSCacheCounter) -} - -func requiresTLS(host string, port int, uri string) bool { - cacheKey := fmt.Sprintf("%s:%d", host, port) - - if value, found := requireTLSCache.Get(cacheKey); found { - readInstanceTLSCacheCounter.Inc(1) - return value.(bool) - } - - required := false - db, _, _ := sqlutils.GetDB(uri) - if err := db.Ping(); err != nil && (strings.Contains(err.Error(), Error3159) || strings.Contains(err.Error(), Error1045)) { - required = true - } - - query := ` - insert into - database_instance_tls ( - hostname, port, required - ) values ( - ?, ?, ? - ) - on duplicate key update - required=values(required) - ` - if _, err := ExecOrchestrator(query, host, port, required); err != nil { - log.Error(err) - } - writeInstanceTLSCounter.Inc(1) - - requireTLSCache.Set(cacheKey, required, cache.DefaultExpiration) - writeInstanceTLSCacheCounter.Inc(1) - - return required -} - -// SetupMySQLTopologyTLS creates a TLS configuration from the config supplied CA, Certificate, and Private key. -// Register the TLS config with the mysql drivers as the "topology" config -// Modify the supplied URI to call the TLS config -func SetupMySQLTopologyTLS(uri string) (string, error) { - if !topologyTLSConfigured { - tlsConfig, err := ssl.NewTLSConfig(config.Config.MySQLTopologySSLCAFile, !config.Config.MySQLTopologySSLSkipVerify, config.Config.MySQLTopologyTLSMinVersionNumber()) - if err != nil { - log.Errorf("Can't create TLS configuration for Topology connection %s: %s", uri, err) - return "", err - } - tlsConfig.InsecureSkipVerify = config.Config.MySQLTopologySSLSkipVerify - - if (config.Config.MySQLTopologyUseMutualTLS && !config.Config.MySQLTopologySSLSkipVerify) && - config.Config.MySQLTopologySSLCertFile != "" && - config.Config.MySQLTopologySSLPrivateKeyFile != "" { - if err = ssl.AppendKeyPair(tlsConfig, config.Config.MySQLTopologySSLCertFile, config.Config.MySQLTopologySSLPrivateKeyFile); err != nil { - log.Errorf("Can't setup TLS key pairs for %s: %s", uri, err) - return "", err - } - } - if err = mysql.RegisterTLSConfig("topology", tlsConfig); err != nil { - log.Errorf("Can't register mysql TLS config for topology: %s", err) - return "", err - } - topologyTLSConfigured = true - } - return fmt.Sprintf("%s&tls=topology", uri), nil -} - -// SetupMySQLOrchestratorTLS creates a TLS configuration from the config supplied CA, Certificate, and Private key. -// Register the TLS config with the mysql drivers as the "orchestrator" config -// Modify the supplied URI to call the TLS config -func SetupMySQLOrchestratorTLS(uri string) (string, error) { - if !orchestratorTLSConfigured { - tlsConfig, err := ssl.NewTLSConfig(config.Config.MySQLOrchestratorSSLCAFile, !config.Config.MySQLOrchestratorSSLSkipVerify, config.Config.MySQLOrchestratorTLSMinVersionNumber()) - if err != nil { - log.Fatalf("Can't create TLS configuration for Orchestrator connection %s: %s", uri, err) - return "", err - } - tlsConfig.InsecureSkipVerify = config.Config.MySQLOrchestratorSSLSkipVerify - if (!config.Config.MySQLOrchestratorSSLSkipVerify) && - config.Config.MySQLOrchestratorSSLCertFile != "" && - config.Config.MySQLOrchestratorSSLPrivateKeyFile != "" { - if err = ssl.AppendKeyPair(tlsConfig, config.Config.MySQLOrchestratorSSLCertFile, config.Config.MySQLOrchestratorSSLPrivateKeyFile); err != nil { - log.Fatalf("Can't setup TLS key pairs for %s: %s", uri, err) - return "", err - } - } - if err = mysql.RegisterTLSConfig("orchestrator", tlsConfig); err != nil { - log.Fatalf("Can't register mysql TLS config for orchestrator: %s", err) - return "", err - } - orchestratorTLSConfigured = true - } - return fmt.Sprintf("%s&tls=orchestrator", uri), nil -} diff --git a/go/vt/vtgr/inst/instance_key.go b/go/vt/vtgr/inst/instance_key.go deleted file mode 100644 index cd3039537b3..00000000000 --- a/go/vt/vtgr/inst/instance_key.go +++ /dev/null @@ -1,125 +0,0 @@ -/* - Copyright 2015 Shlomi Noach, courtesy Booking.com - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -/* - This file has been copied over from VTOrc package -*/ - -package inst - -import ( - "fmt" - "regexp" - "strings" -) - -// InstanceKey is an instance indicator, identifued by hostname and port -type InstanceKey struct { - Hostname string - Port int -} - -var ( - ipv4Regexp = regexp.MustCompile(`^([0-9]+)[.]([0-9]+)[.]([0-9]+)[.]([0-9]+)$`) -) - -const detachHint = "//" - -// Constant strings for Group Replication information -// See https://dev.mysql.com/doc/refman/8.0/en/replication-group-members-table.html for additional information. -const ( - // Group member roles - GroupReplicationMemberRolePrimary = "PRIMARY" - GroupReplicationMemberRoleSecondary = "SECONDARY" - // Group member states - GroupReplicationMemberStateOnline = "ONLINE" - GroupReplicationMemberStateRecovering = "RECOVERING" - GroupReplicationMemberStateUnreachable = "UNREACHABLE" - GroupReplicationMemberStateOffline = "OFFLINE" - GroupReplicationMemberStateError = "ERROR" -) - -// Equals tests equality between this key and another key -func (instanceKey *InstanceKey) Equals(other *InstanceKey) bool { - if other == nil { - return false - } - return instanceKey.Hostname == other.Hostname && instanceKey.Port == other.Port -} - -// SmallerThan returns true if this key is dictionary-smaller than another. -// This is used for consistent sorting/ordering; there's nothing magical about it. -func (instanceKey *InstanceKey) SmallerThan(other *InstanceKey) bool { - if instanceKey.Hostname < other.Hostname { - return true - } - if instanceKey.Hostname == other.Hostname && instanceKey.Port < other.Port { - return true - } - return false -} - -// IsDetached returns 'true' when this hostname is logically "detached" -func (instanceKey *InstanceKey) IsDetached() bool { - return strings.HasPrefix(instanceKey.Hostname, detachHint) -} - -// IsValid uses simple heuristics to see whether this key represents an actual instance -func (instanceKey *InstanceKey) IsValid() bool { - if instanceKey.Hostname == "_" { - return false - } - if instanceKey.IsDetached() { - return false - } - return len(instanceKey.Hostname) > 0 && instanceKey.Port > 0 -} - -// DetachedKey returns an instance key whose hostname is detahced: invalid, but recoverable -func (instanceKey *InstanceKey) DetachedKey() *InstanceKey { - if instanceKey.IsDetached() { - return instanceKey - } - return &InstanceKey{Hostname: fmt.Sprintf("%s%s", detachHint, instanceKey.Hostname), Port: instanceKey.Port} -} - -// ReattachedKey returns an instance key whose hostname is detahced: invalid, but recoverable -func (instanceKey *InstanceKey) ReattachedKey() *InstanceKey { - if !instanceKey.IsDetached() { - return instanceKey - } - return &InstanceKey{Hostname: instanceKey.Hostname[len(detachHint):], Port: instanceKey.Port} -} - -// StringCode returns an official string representation of this key -func (instanceKey *InstanceKey) StringCode() string { - return fmt.Sprintf("%s:%d", instanceKey.Hostname, instanceKey.Port) -} - -// DisplayString returns a user-friendly string representation of this key -func (instanceKey *InstanceKey) DisplayString() string { - return instanceKey.StringCode() -} - -// String returns a user-friendly string representation of this key -func (instanceKey InstanceKey) String() string { - return instanceKey.StringCode() -} - -// IsValid uses simple heuristics to see whether this key represents an actual instance -func (instanceKey *InstanceKey) IsIPv4() bool { - return ipv4Regexp.MatchString(instanceKey.Hostname) -} diff --git a/go/vt/vtgr/inst/instance_key_test.go b/go/vt/vtgr/inst/instance_key_test.go deleted file mode 100644 index e3e016e474c..00000000000 --- a/go/vt/vtgr/inst/instance_key_test.go +++ /dev/null @@ -1,67 +0,0 @@ -/* - Copyright 2014 Outbrain Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -/* - This file has been copied over from VTOrc package -*/ - -package inst - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/vt/vtgr/config" -) - -func init() { - config.Config.HostnameResolveMethod = "none" -} - -var key1 = InstanceKey{Hostname: "host1", Port: 3306} - -func TestInstanceKeyEquals(t *testing.T) { - i1 := InstanceKey{ - Hostname: "sql00.db", - Port: 3306, - } - i2 := InstanceKey{ - Hostname: "sql00.db", - Port: 3306, - } - - require.Equal(t, i1, i2) - - i2.Port = 3307 - require.NotEqual(t, i1, i2) -} - -func TestInstanceKeyDetach(t *testing.T) { - require.False(t, key1.IsDetached()) - detached1 := key1.DetachedKey() - require.True(t, detached1.IsDetached()) - detached2 := key1.DetachedKey() - require.True(t, detached2.IsDetached()) - require.True(t, detached1.Equals(detached2)) - - reattached1 := detached1.ReattachedKey() - require.False(t, reattached1.IsDetached()) - require.True(t, reattached1.Equals(&key1)) - reattached2 := reattached1.ReattachedKey() - require.False(t, reattached2.IsDetached()) - require.True(t, reattached1.Equals(reattached2)) -} diff --git a/go/vt/vtgr/log/log.go b/go/vt/vtgr/log/log.go deleted file mode 100644 index 4133bbb39a1..00000000000 --- a/go/vt/vtgr/log/log.go +++ /dev/null @@ -1,53 +0,0 @@ -package log - -import ( - "fmt" - - "vitess.io/vitess/go/vt/log" -) - -// Logger is a wrapper that prefix loglines with keyspace/shard -type Logger struct { - prefix string -} - -// NewVTGRLogger creates a new logger -func NewVTGRLogger(keyspace, shard string) *Logger { - return &Logger{ - prefix: fmt.Sprintf("%s/%s", keyspace, shard), - } -} - -// Info formats arguments like fmt.Print -func (logger *Logger) Info(msg string) { - log.InfoDepth(1, logger.annotate(msg)) -} - -// Infof formats arguments like fmt.Printf. -func (logger *Logger) Infof(format string, args ...any) { - log.InfoDepth(1, logger.annotate(fmt.Sprintf(format, args...))) -} - -// Warning formats arguments like fmt.Print -func (logger *Logger) Warning(msg string) { - log.WarningDepth(1, logger.annotate(msg)) -} - -// Warningf formats arguments like fmt.Printf. -func (logger *Logger) Warningf(format string, args ...any) { - log.WarningDepth(1, logger.annotate(fmt.Sprintf(format, args...))) -} - -// Error formats arguments like fmt.Print -func (logger *Logger) Error(msg string) { - log.ErrorDepth(1, logger.annotate(msg)) -} - -// Errorf formats arguments like fmt.Printf. -func (logger *Logger) Errorf(format string, args ...any) { - log.ErrorDepth(1, logger.annotate(fmt.Sprintf(format, args...))) -} - -func (logger *Logger) annotate(input string) string { - return fmt.Sprintf("shard=%s %s", logger.prefix, input) -} diff --git a/go/vt/vtgr/log/log_test.go b/go/vt/vtgr/log/log_test.go deleted file mode 100644 index fd4ede386e9..00000000000 --- a/go/vt/vtgr/log/log_test.go +++ /dev/null @@ -1,16 +0,0 @@ -package log - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestVTGRLogger(t *testing.T) { - logger := NewVTGRLogger("ks", "0") - s1 := logger.annotate("abc") - assert.Equal(t, "shard=ks/0 abc", s1) - s2 := fmt.Sprintf(logger.annotate("abc %s"), "def") - assert.Equal(t, "shard=ks/0 abc def", s2) -} diff --git a/go/vt/vtgr/plugin_consultopo.go b/go/vt/vtgr/plugin_consultopo.go deleted file mode 100644 index 3786fd59c26..00000000000 --- a/go/vt/vtgr/plugin_consultopo.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreedto in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package vtgr - -// This plugin imports consultopo to register the consul implementation of TopoServer. - -import ( - _ "vitess.io/vitess/go/vt/topo/consultopo" -) diff --git a/go/vt/vtgr/plugin_grpctmclient.go b/go/vt/vtgr/plugin_grpctmclient.go deleted file mode 100644 index 529c560c207..00000000000 --- a/go/vt/vtgr/plugin_grpctmclient.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package vtgr - -// Imports and register the gRPC tabletmanager client - -import ( - _ "vitess.io/vitess/go/vt/vttablet/grpctmclient" -) diff --git a/go/vt/vtgr/plugin_zk2topo.go b/go/vt/vtgr/plugin_zk2topo.go deleted file mode 100644 index f524fd0e21a..00000000000 --- a/go/vt/vtgr/plugin_zk2topo.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreedto in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package vtgr - -// Imports and register the zk2 TopologyServer - -import ( - _ "vitess.io/vitess/go/vt/topo/zk2topo" -) diff --git a/go/vt/vtgr/ssl/ssl.go b/go/vt/vtgr/ssl/ssl.go deleted file mode 100644 index 9ca18c4c807..00000000000 --- a/go/vt/vtgr/ssl/ssl.go +++ /dev/null @@ -1,62 +0,0 @@ -package ssl - -import ( - "crypto/tls" - "crypto/x509" - "errors" - "os" - - "vitess.io/vitess/go/vt/log" -) - -/* - This file has been copied over from VTOrc package -*/ - -// NewTLSConfig returns an initialized TLS configuration suitable for client -// authentication. If caFile is non-empty, it will be loaded. -func NewTLSConfig(caFile string, verifyCert bool, minVersion uint16) (*tls.Config, error) { - var c tls.Config - - // Set to TLS 1.2 as a minimum. This is overridden for mysql communication - c.MinVersion = minVersion - - if verifyCert { - log.Info("verifyCert requested, client certificates will be verified") - c.ClientAuth = tls.VerifyClientCertIfGiven - } - caPool, err := ReadCAFile(caFile) - if err != nil { - return &c, err - } - c.ClientCAs = caPool - return &c, nil -} - -// Returns CA certificate. If caFile is non-empty, it will be loaded. -func ReadCAFile(caFile string) (*x509.CertPool, error) { - var caCertPool *x509.CertPool - if caFile != "" { - data, err := os.ReadFile(caFile) - if err != nil { - return nil, err - } - caCertPool = x509.NewCertPool() - if !caCertPool.AppendCertsFromPEM(data) { - return nil, errors.New("No certificates parsed") - } - log.Infof("Read in CA file: %v", caFile) - } - return caCertPool, nil -} - -// AppendKeyPair loads the given TLS key pair and appends it to -// tlsConfig.Certificates. -func AppendKeyPair(tlsConfig *tls.Config, certFile string, keyFile string) error { - cert, err := tls.LoadX509KeyPair(certFile, keyFile) - if err != nil { - return err - } - tlsConfig.Certificates = append(tlsConfig.Certificates, cert) - return nil -} diff --git a/go/vt/vtgr/ssl/ssl_test.go b/go/vt/vtgr/ssl/ssl_test.go deleted file mode 100644 index e9ab4a84d74..00000000000 --- a/go/vt/vtgr/ssl/ssl_test.go +++ /dev/null @@ -1,123 +0,0 @@ -package ssl_test - -import ( - "crypto/tls" - "os" - "syscall" - "testing" - - "vitess.io/vitess/go/vt/vtgr/ssl" -) - -/* - This file has been copied over from VTOrc package -*/ - -// TODO: Build a fake CA and make sure it loads up -func TestNewTLSConfig(t *testing.T) { - fakeCA := writeFakeFile(pemCertificate) - defer syscall.Unlink(fakeCA) - - conf, err := ssl.NewTLSConfig(fakeCA, true, tls.VersionTLS13) - if err != nil { - t.Errorf("Could not create new TLS config: %s", err) - } - if conf.ClientAuth != tls.VerifyClientCertIfGiven { - t.Errorf("Client certificate verification was not enabled") - } - if conf.ClientCAs == nil { - t.Errorf("ClientCA empty even though cert provided") - } - if conf.MinVersion != tls.VersionTLS13 { - t.Errorf("incorrect tls min version set") - } - - conf, err = ssl.NewTLSConfig("", false, tls.VersionTLS12) - if err != nil { - t.Errorf("Could not create new TLS config: %s", err) - } - if conf.ClientAuth == tls.VerifyClientCertIfGiven { - t.Errorf("Client certificate verification was enabled unexpectedly") - } - if conf.ClientCAs != nil { - t.Errorf("Filling in ClientCA somehow without a cert") - } - if conf.MinVersion != tls.VersionTLS12 { - t.Errorf("incorrect tls min version set") - } -} - -func TestAppendKeyPair(t *testing.T) { - c, err := ssl.NewTLSConfig("", false, tls.VersionTLS12) - if err != nil { - t.Fatal(err) - } - pemCertFile := writeFakeFile(pemCertificate) - defer syscall.Unlink(pemCertFile) - pemPKFile := writeFakeFile(pemPrivateKey) - defer syscall.Unlink(pemPKFile) - - if err := ssl.AppendKeyPair(c, pemCertFile, pemPKFile); err != nil { - t.Errorf("Failed to append certificate and key to tls config: %s", err) - } -} - -func writeFakeFile(content string) string { - f, err := os.CreateTemp("", "ssl_test") - if err != nil { - return "" - } - os.WriteFile(f.Name(), []byte(content), 0644) - return f.Name() -} - -const pemCertificate = `-----BEGIN CERTIFICATE----- -MIIDtTCCAp2gAwIBAgIJAOxKC7FsJelrMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV -BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX -aWRnaXRzIFB0eSBMdGQwHhcNMTcwODEwMTQ0MjM3WhcNMTgwODEwMTQ0MjM3WjBF -MQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50 -ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB -CgKCAQEA12vHV3gYy5zd1lujA7prEhCSkAszE6E37mViWhLQ63CuedZfyYaTAHQK -HYDZi4K1MNAySUfZRMcICSSsxlRIz6mzXrFsowaJgwx4cbMDIvXE03KstuXoTYJh -+xmXB+5yEVEtIyP2DvPqfCmwCZb3k94Y/VY1nAQDxIxciXrAxT9zT1oYd0YWr2yp -J2mgsfnY4c3zg7W5WgvOTmYz7Ey7GJjpUjGdayx+P1CilKzSWH1xZuVQFNLSHvcH -WXkEoCMVc0tW5mO5eEO1aNHo9MSjPF386l1rq+pz5OwjqCEZq2b1YxesyLnbF+8+ -iYGfYmFaDLFwG7zVDwialuI4TzIIOQIDAQABo4GnMIGkMB0GA1UdDgQWBBQ1ubGx -Yvn3wN5VXyoR0lOD7ARzVTB1BgNVHSMEbjBsgBQ1ubGxYvn3wN5VXyoR0lOD7ARz -VaFJpEcwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgTClNvbWUtU3RhdGUxITAfBgNV -BAoTGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZIIJAOxKC7FsJelrMAwGA1UdEwQF -MAMBAf8wDQYJKoZIhvcNAQEFBQADggEBALmm4Zw/4jLKDJciUGUYOcr5Xe9TP/Cs -afH7IWvaFUDfV3W6yAm9jgNfIy9aDLpuu2CdEb+0qL2hdmGLV7IM3y62Ve0UTdGV -BGsm1zMmIguew2wGbAwGr5LmIcUseatVUKAAAfDrBNwotEAdM8kmGekUZfOM+J9D -FoNQ62C0buRHGugtu6zWAcZNOe6CI7HdhaAdxZlgn8y7dfJQMacoK0NcWeUVQwii -6D4mgaqUGM2O+WcquD1vEMuBPYVcKhi43019E0+6LI5QB6w80bARY8K7tkTdRD7U -y1/C7iIqyuBVL45OdSabb37TfGlHZIPIwLaGw3i4Mr0+F0jQT8rZtTQ= ------END CERTIFICATE-----` - -const pemPrivateKey = `-----BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEA12vHV3gYy5zd1lujA7prEhCSkAszE6E37mViWhLQ63CuedZf -yYaTAHQKHYDZi4K1MNAySUfZRMcICSSsxlRIz6mzXrFsowaJgwx4cbMDIvXE03Ks -tuXoTYJh+xmXB+5yEVEtIyP2DvPqfCmwCZb3k94Y/VY1nAQDxIxciXrAxT9zT1oY -d0YWr2ypJ2mgsfnY4c3zg7W5WgvOTmYz7Ey7GJjpUjGdayx+P1CilKzSWH1xZuVQ -FNLSHvcHWXkEoCMVc0tW5mO5eEO1aNHo9MSjPF386l1rq+pz5OwjqCEZq2b1Yxes -yLnbF+8+iYGfYmFaDLFwG7zVDwialuI4TzIIOQIDAQABAoIBAHLf4pleTbqmmBWr -IC7oxhgIBmAR2Nbq7eyO2/e0ePxURnZqPwI0ZUekmZBKGbgvp3e0TlyNl+r5R+u4 -RvosD/fNQv2IF6qH3eSoTcIz98Q40xD+4eNWjp5mnOFOMB/mo6VgaHWIw7oNkElN -4bX7b2LG2QSfaE8eRPQW9XHKp+mGhYFbxgPYxUmlIXuYZF61hVwxysDA6DP3LOi8 -yUL6E64x6NqN9xtg/VoN+f6N0MOvsr4yb5+uvni1LVRFI7tNqIN4Y6P6trgKfnRR -EpZeAUu8scqyxE4NeqnnjK/wBuXxaeh3e9mN1V2SzT629c1InmmQasZ5slcCJQB+ -38cswgECgYEA+esaLKwHXT4+sOqMYemi7TrhxtNC2f5OAGUiSRVmTnum2gl4wOB+ -h5oLZAuG5nBEIoqbMEbI35vfuHqIe390IJtPdQlz4TGDsPufYj/gnnBBFy/c8f+n -f/CdRDRYrpnpKGwvUntLRB2pFbe2hlqqq+4YUqiHauJMOCJnPbOo1lECgYEA3KnF -VOXyY0fKD45G7ttfAcpw8ZI2gY99sCRwtBQGsbO61bvw5sl/3j7AmYosz+n6f7hb -uHmitIuPv4z3r1yfVysh80tTGIM3wDkpr3fLYRxpVOZU4hgxMQV9yyaSA/Hfqn48 -vIK/NC4bERqpofNNdrIqNaGWkd87ZycvpRfa0WkCgYBztbVVr4RtWG9gLAg5IRot -KhD0pEWUdpiYuDpqifznI3r6Al6lNot+rwTNGkUoFhyFvZTigjNozFuFpz3fqAAV -RLNCJdFAF1O4spd1vst5r9GDMcbjSJG9u6KkvHO+y0XXUFeMoccUT4NEqd1ZUUsp -9T/PrXWdOA9AAjW4rKDkMQKBgQC9R4NVR8mbD8Frhoeh69qbFqO7E8hdalBN/3QN -hAAZ/imNnSEPVliwsvNSwQufbPzLAcDrhKrkY7JyhOERM0oa44zDvSESLbxszpvL -P97c9hoEEW9OYaIQgr1cvUES0S8ieBZxPVX11HazPUO0/5a68ijyyCD4D5xM53gf -DU9NwQKBgQCmVthQi65xcc4mgCIwXtBZWXeaPv5x0dLEXIC5EoN6eXLK9iW//7cE -hhawtJtl+J6laB+TkEGQsyhc4v85WcywdisyR7LR7CUqFYJMKeE/VtTVKnYbfq54 -rHoQS9YotByBwPtRx0V93gkc+KWBOGmSBBxKj7lrBkYkcWAiRfpJjg== ------END RSA PRIVATE KEY-----` diff --git a/go/vt/vtgr/vtgr.go b/go/vt/vtgr/vtgr.go deleted file mode 100644 index 80a5f99fad9..00000000000 --- a/go/vt/vtgr/vtgr.go +++ /dev/null @@ -1,233 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package vtgr - -import ( - "context" - "errors" - "os" - "os/signal" - "strings" - "sync" - "sync/atomic" - "syscall" - "time" - - "github.com/spf13/pflag" - - "vitess.io/vitess/go/vt/concurrency" - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/vtgr/config" - "vitess.io/vitess/go/vt/vtgr/controller" - "vitess.io/vitess/go/vt/vtgr/db" - "vitess.io/vitess/go/vt/vttablet/tmclient" -) - -var ( - refreshInterval = 10 * time.Second - scanInterval = 3 * time.Second - scanAndRepairTimeout = 3 * time.Second - vtgrConfigFile string - - localDbPort int -) - -func init() { - servenv.OnParseFor("vtgr", func(fs *pflag.FlagSet) { - fs.DurationVar(&refreshInterval, "refresh_interval", 10*time.Second, "Refresh interval to load tablets.") - fs.DurationVar(&scanInterval, "scan_interval", 3*time.Second, "Scan interval to diagnose and repair.") - fs.DurationVar(&scanAndRepairTimeout, "scan_repair_timeout", 3*time.Second, "Time to wait for a Diagnose and repair operation.") - fs.StringVar(&vtgrConfigFile, "vtgr_config", "", "Config file for vtgr.") - fs.IntVar(&localDbPort, "db_port", 0, "Local mysql port, set this to enable local fast check.") - }) -} - -// VTGR is the interface to manage the component to set up group replication with Vitess. -// The main goal of it is to reconcile MySQL group and the Vitess topology. -// Caller should use OpenTabletDiscovery to create the VTGR instance. -type VTGR struct { - // Shards are all the shards that a VTGR is monitoring. - // Caller can choose to iterate the shards to scan and repair for more granular control (e.g., stats report) - // instead of calling ScanAndRepair() directly. - Shards []*controller.GRShard - topo controller.GRTopo - tmc tmclient.TabletManagerClient - ctx context.Context - - stopped atomic.Bool -} - -func newVTGR(ctx context.Context, ts controller.GRTopo, tmc tmclient.TabletManagerClient) *VTGR { - return &VTGR{ - topo: ts, - tmc: tmc, - ctx: ctx, - } -} - -// OpenTabletDiscovery calls OpenTabletDiscoveryWithAcitve and set the shard to be active -// it opens connection with topo server -// and triggers the first round of controller based on specified cells and keyspace/shards. -func OpenTabletDiscovery(ctx context.Context, cellsToWatch, clustersToWatch []string) *VTGR { - return OpenTabletDiscoveryWithAcitve(ctx, cellsToWatch, clustersToWatch, true) -} - -// OpenTabletDiscoveryWithAcitve opens connection with topo server -// and triggers the first round of controller based on parameter -func OpenTabletDiscoveryWithAcitve(ctx context.Context, cellsToWatch, clustersToWatch []string, active bool) *VTGR { - if vtgrConfigFile == "" { - log.Fatal("vtgr_config is required") - } - config, err := config.ReadVTGRConfig(vtgrConfigFile) - if err != nil { - log.Fatalf("Cannot load vtgr config file: %v", err) - } - vtgr := newVTGR( - ctx, - topo.Open(), - tmclient.NewTabletManagerClient(), - ) - var shards []*controller.GRShard - ctx, cancel := context.WithTimeout(vtgr.ctx, topo.RemoteOperationTimeout) - defer cancel() - for _, ks := range clustersToWatch { - if strings.Contains(ks, "/") { - // This is a keyspace/shard specification - input := strings.Split(ks, "/") - shards = append(shards, controller.NewGRShard(input[0], input[1], cellsToWatch, vtgr.tmc, vtgr.topo, db.NewVTGRSqlAgent(), config, localDbPort, active)) - } else { - // Assume this is a keyspace and find all shards in keyspace - shardNames, err := vtgr.topo.GetShardNames(ctx, ks) - if err != nil { - // Log the error and continue - log.Errorf("Error fetching shards for keyspace %v: %v", ks, err) - continue - } - if len(shardNames) == 0 { - log.Errorf("Topo has no shards for ks: %v", ks) - continue - } - for _, s := range shardNames { - shards = append(shards, controller.NewGRShard(ks, s, cellsToWatch, vtgr.tmc, vtgr.topo, db.NewVTGRSqlAgent(), config, localDbPort, active)) - } - } - } - vtgr.handleSignal(os.Exit) - vtgr.Shards = shards - log.Infof("Monitoring shards size %v", len(vtgr.Shards)) - // Force refresh all tablet here to populate data for vtgr - var wg sync.WaitGroup - for _, shard := range vtgr.Shards { - wg.Add(1) - go func(shard *controller.GRShard) { - defer wg.Done() - shard.UpdateTabletsInShardWithLock(ctx) - }(shard) - } - wg.Wait() - log.Info("Ready to start VTGR") - return vtgr -} - -// RefreshCluster get the latest tablets from topo server -func (vtgr *VTGR) RefreshCluster() { - for _, shard := range vtgr.Shards { - go func(shard *controller.GRShard) { - ticker := time.Tick(refreshInterval) - for range ticker { - ctx, cancel := context.WithTimeout(vtgr.ctx, refreshInterval) - shard.UpdateTabletsInShardWithLock(ctx) - cancel() - } - }(shard) - } -} - -// ScanAndRepair starts the scanAndFix routine -func (vtgr *VTGR) ScanAndRepair() { - for _, shard := range vtgr.Shards { - go func(shard *controller.GRShard) { - ticker := time.Tick(scanInterval) - for range ticker { - func() { - ctx, cancel := context.WithTimeout(vtgr.ctx, scanAndRepairTimeout) - defer cancel() - if !vtgr.stopped.Load() { - log.Infof("Start scan and repair %v/%v", shard.KeyspaceShard.Keyspace, shard.KeyspaceShard.Shard) - shard.ScanAndRepairShard(ctx) - log.Infof("Finished scan and repair %v/%v", shard.KeyspaceShard.Keyspace, shard.KeyspaceShard.Shard) - } - }() - } - }(shard) - } -} - -// Diagnose exposes the endpoint to diagnose a particular shard -func (vtgr *VTGR) Diagnose(ctx context.Context, shard *controller.GRShard) (controller.DiagnoseType, error) { - return shard.Diagnose(ctx) -} - -// Repair exposes the endpoint to repair a particular shard -func (vtgr *VTGR) Repair(ctx context.Context, shard *controller.GRShard, diagnose controller.DiagnoseType) (controller.RepairResultCode, error) { - if vtgr.stopped.Load() { - return controller.Fail, errors.New("VTGR is stopped") - } - return shard.Repair(ctx, diagnose) -} - -// GetCurrentShardStatuses is used when we want to know what VTGR observes -// it contains information about a list of instances and primary tablet -func (vtgr *VTGR) GetCurrentShardStatuses() []controller.ShardStatus { - var result []controller.ShardStatus - for _, shard := range vtgr.Shards { - status := shard.GetCurrentShardStatuses() - result = append(result, status) - } - return result -} - -// OverrideRebootstrapGroupSize forces an override the group size used in safety check for rebootstrap -func (vtgr *VTGR) OverrideRebootstrapGroupSize(groupSize int) error { - errorRecord := concurrency.AllErrorRecorder{} - for _, shard := range vtgr.Shards { - err := shard.OverrideRebootstrapGroupSize(groupSize) - if err != nil { - errorRecord.RecordError(err) - } - } - return errorRecord.Error() -} - -func (vtgr *VTGR) handleSignal(action func(int)) { - sigChan := make(chan os.Signal, 1) - signal.Notify(sigChan, syscall.SIGHUP) - go func() { - // block until the signal is received - <-sigChan - log.Infof("Handling SIGHUP") - // Set stopped to true so that following repair call won't do anything - // For the ongoing repairs, checkShardLocked will abort if needed - vtgr.stopped.Store(true) - for _, shard := range vtgr.Shards { - shard.UnlockShard() - } - action(1) - }() -} diff --git a/go/vt/vtgr/vtgr_test.go b/go/vt/vtgr/vtgr_test.go deleted file mode 100644 index 3632e88427c..00000000000 --- a/go/vt/vtgr/vtgr_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package vtgr - -import ( - "context" - "sync/atomic" - "syscall" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - "vitess.io/vitess/go/vt/topo/memorytopo" - "vitess.io/vitess/go/vt/vtgr/config" - "vitess.io/vitess/go/vt/vtgr/controller" - "vitess.io/vitess/go/vt/vtgr/db" - "vitess.io/vitess/go/vt/vttablet/tmclient" - - topodatapb "vitess.io/vitess/go/vt/proto/topodata" -) - -func TestSighupHandle(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1") - defer ts.Close() - ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{}) - ts.CreateShard(ctx, "ks", "0") - vtgr := newVTGR( - ctx, - ts, - tmclient.NewTabletManagerClient(), - ) - var shards []*controller.GRShard - config := &config.VTGRConfig{ - DisableReadOnlyProtection: false, - BootstrapGroupSize: 5, - MinNumReplica: 3, - BackoffErrorWaitTimeSeconds: 10, - BootstrapWaitTimeSeconds: 10 * 60, - } - shards = append(shards, controller.NewGRShard("ks", "0", nil, vtgr.tmc, vtgr.topo, db.NewVTGRSqlAgent(), config, localDbPort, true)) - vtgr.Shards = shards - shard := vtgr.Shards[0] - shard.LockShard(ctx, "test") - var res atomic.Bool - vtgr.handleSignal(func(i int) { - res.Store(true) - }) - assert.NotNil(t, shard.GetUnlock()) - assert.False(t, vtgr.stopped.Load()) - syscall.Kill(syscall.Getpid(), syscall.SIGHUP) - time.Sleep(100 * time.Millisecond) - assert.True(t, res.Load()) - assert.Nil(t, shard.GetUnlock()) - assert.True(t, vtgr.stopped.Load()) -} diff --git a/go/vt/vthash/hash.go b/go/vt/vthash/hash.go index 7b6a130dc08..3dbd85af6a3 100644 --- a/go/vt/vthash/hash.go +++ b/go/vt/vthash/hash.go @@ -17,6 +17,7 @@ limitations under the License. package vthash import ( + "vitess.io/vitess/go/vt/vthash/highway" "vitess.io/vitess/go/vt/vthash/metro" ) @@ -28,3 +29,12 @@ func New() Hasher { h.Reset() return h } + +type Hasher256 = highway.Digest +type Hash256 = [32]byte + +var defaultHash256Key = [32]byte{} + +func New256() *Hasher256 { + return highway.New(defaultHash256Key) +} diff --git a/go/vt/vthash/highway/LICENSE b/go/vt/vthash/highway/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/go/vt/vthash/highway/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/go/vt/vthash/highway/highwayhash.go b/go/vt/vthash/highway/highwayhash.go new file mode 100644 index 00000000000..a922b435d9d --- /dev/null +++ b/go/vt/vthash/highway/highwayhash.go @@ -0,0 +1,184 @@ +/* +Copyright 2023 The Vitess Authors. +Copyright (c) 2017 Minio Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package highwayhash implements the pseudo-random-function (PRF) HighwayHash. +// HighwayHash is a fast hash function designed to defend hash-flooding attacks +// or to authenticate short-lived messages. +// +// HighwayHash is not a general purpose cryptographic hash function and does not +// provide (strong) collision resistance. +package highway + +import ( + "encoding/binary" + "errors" + "unsafe" +) + +const ( + // Size is the size of HighwayHash-256 checksum in bytes. + Size = 32 + // Size128 is the size of HighwayHash-128 checksum in bytes. + Size128 = 16 +) + +var errKeySize = errors.New("highwayhash: invalid key size") + +// New returns a hash.Hash computing the HighwayHash-256 checksum. +// It returns a non-nil error if the key is not 32 bytes long. +func New(key [Size]byte) *Digest { + h := &Digest{size: Size, key: key} + h.Reset() + return h +} + +// New128 returns a hash.Hash computing the HighwayHash-128 checksum. +// It returns a non-nil error if the key is not 32 bytes long. +func New128(key [Size]byte) *Digest { + h := &Digest{size: Size128, key: key} + h.Reset() + return h +} + +// Sum computes the HighwayHash-256 checksum of data. +// It panics if the key is not 32 bytes long. +func Sum(data, key []byte) [Size]byte { + if len(key) != Size { + panic(errKeySize) + } + var state [16]uint64 + initialize(&state, key) + if n := len(data) & (^(Size - 1)); n > 0 { + update(&state, data[:n]) + data = data[n:] + } + if len(data) > 0 { + var block [Size]byte + offset := copy(block[:], data) + hashBuffer(&state, &block, offset) + } + var hash [Size]byte + finalize(hash[:], &state) + return hash +} + +// Sum128 computes the HighwayHash-128 checksum of data. +// It panics if the key is not 32 bytes long. +func Sum128(data, key []byte) [Size128]byte { + if len(key) != Size { + panic(errKeySize) + } + var state [16]uint64 + initialize(&state, key) + if n := len(data) & (^(Size - 1)); n > 0 { + update(&state, data[:n]) + data = data[n:] + } + if len(data) > 0 { + var block [Size]byte + offset := copy(block[:], data) + hashBuffer(&state, &block, offset) + } + var hash [Size128]byte + finalize(hash[:], &state) + return hash +} + +type Digest struct { + state [16]uint64 // v0 | v1 | mul0 | mul1 + + key, buffer [Size]byte + offset int + size int +} + +func (d *Digest) Size() int { return d.size } + +func (d *Digest) BlockSize() int { return Size } + +func (d *Digest) Reset() { + initialize(&d.state, d.key[:]) + d.offset = 0 +} + +func (d *Digest) WriteString(str string) (int, error) { + return d.Write(unsafe.Slice(unsafe.StringData(str), len(str))) +} + +func (d *Digest) Write(p []byte) (n int, err error) { + n = len(p) + if d.offset > 0 { + remaining := Size - d.offset + if n < remaining { + d.offset += copy(d.buffer[d.offset:], p) + return + } + copy(d.buffer[d.offset:], p[:remaining]) + update(&d.state, d.buffer[:]) + p = p[remaining:] + d.offset = 0 + } + if nn := len(p) & (^(Size - 1)); nn > 0 { + update(&d.state, p[:nn]) + p = p[nn:] + } + if len(p) > 0 { + d.offset = copy(d.buffer[d.offset:], p) + } + return +} + +func (d *Digest) Sum(b []byte) []byte { + state := d.state + if d.offset > 0 { + hashBuffer(&state, &d.buffer, d.offset) + } + var hash [Size]byte + finalize(hash[:d.size], &state) + return append(b, hash[:d.size]...) +} + +func hashBuffer(state *[16]uint64, buffer *[32]byte, offset int) { + var block [Size]byte + mod32 := (uint64(offset) << 32) + uint64(offset) + for i := range state[:4] { + state[i] += mod32 + } + for i := range state[4:8] { + t0 := uint32(state[i+4]) + t0 = (t0 << uint(offset)) | (t0 >> uint(32-offset)) + + t1 := uint32(state[i+4] >> 32) + t1 = (t1 << uint(offset)) | (t1 >> uint(32-offset)) + + state[i+4] = (uint64(t1) << 32) | uint64(t0) + } + + mod4 := offset & 3 + remain := offset - mod4 + + copy(block[:], buffer[:remain]) + if offset >= 16 { + copy(block[28:], buffer[offset-4:]) + } else if mod4 != 0 { + last := uint32(buffer[remain]) + last += uint32(buffer[remain+mod4>>1]) << 8 + last += uint32(buffer[offset-1]) << 16 + binary.LittleEndian.PutUint32(block[16:], last) + } + update(state, block[:]) +} diff --git a/go/vt/vthash/highway/highwayhashAVX2_amd64.s b/go/vt/vthash/highway/highwayhashAVX2_amd64.s new file mode 100644 index 00000000000..761eac33dfe --- /dev/null +++ b/go/vt/vthash/highway/highwayhashAVX2_amd64.s @@ -0,0 +1,258 @@ +// Copyright (c) 2017 Minio Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build amd64,!gccgo,!appengine,!nacl,!noasm + +#include "textflag.h" + +DATA ·consAVX2<>+0x00(SB)/8, $0xdbe6d5d5fe4cce2f +DATA ·consAVX2<>+0x08(SB)/8, $0xa4093822299f31d0 +DATA ·consAVX2<>+0x10(SB)/8, $0x13198a2e03707344 +DATA ·consAVX2<>+0x18(SB)/8, $0x243f6a8885a308d3 +DATA ·consAVX2<>+0x20(SB)/8, $0x3bd39e10cb0ef593 +DATA ·consAVX2<>+0x28(SB)/8, $0xc0acf169b5f18a8c +DATA ·consAVX2<>+0x30(SB)/8, $0xbe5466cf34e90c6c +DATA ·consAVX2<>+0x38(SB)/8, $0x452821e638d01377 +GLOBL ·consAVX2<>(SB), (NOPTR+RODATA), $64 + +DATA ·zipperMergeAVX2<>+0x00(SB)/8, $0xf010e05020c03 +DATA ·zipperMergeAVX2<>+0x08(SB)/8, $0x70806090d0a040b +DATA ·zipperMergeAVX2<>+0x10(SB)/8, $0xf010e05020c03 +DATA ·zipperMergeAVX2<>+0x18(SB)/8, $0x70806090d0a040b +GLOBL ·zipperMergeAVX2<>(SB), (NOPTR+RODATA), $32 + +#define REDUCE_MOD(x0, x1, x2, x3, tmp0, tmp1, y0, y1) \ + MOVQ $0x3FFFFFFFFFFFFFFF, tmp0 \ + ANDQ tmp0, x3 \ + MOVQ x2, y0 \ + MOVQ x3, y1 \ + \ + MOVQ x2, tmp0 \ + MOVQ x3, tmp1 \ + SHLQ $1, tmp1 \ + SHRQ $63, tmp0 \ + MOVQ tmp1, x3 \ + ORQ tmp0, x3 \ + \ + SHLQ $1, x2 \ + \ + MOVQ y0, tmp0 \ + MOVQ y1, tmp1 \ + SHLQ $2, tmp1 \ + SHRQ $62, tmp0 \ + MOVQ tmp1, y1 \ + ORQ tmp0, y1 \ + \ + SHLQ $2, y0 \ + \ + XORQ x0, y0 \ + XORQ x2, y0 \ + XORQ x1, y1 \ + XORQ x3, y1 + +#define UPDATE(msg) \ + VPADDQ msg, Y2, Y2 \ + VPADDQ Y3, Y2, Y2 \ + \ + VPSRLQ $32, Y1, Y0 \ + BYTE $0xC5; BYTE $0xFD; BYTE $0xF4; BYTE $0xC2 \ // VPMULUDQ Y2, Y0, Y0 + VPXOR Y0, Y3, Y3 \ + \ + VPADDQ Y4, Y1, Y1 \ + \ + VPSRLQ $32, Y2, Y0 \ + BYTE $0xC5; BYTE $0xFD; BYTE $0xF4; BYTE $0xC1 \ // VPMULUDQ Y1, Y0, Y0 + VPXOR Y0, Y4, Y4 \ + \ + VPSHUFB Y5, Y2, Y0 \ + VPADDQ Y0, Y1, Y1 \ + \ + VPSHUFB Y5, Y1, Y0 \ + VPADDQ Y0, Y2, Y2 + +// func initializeAVX2(state *[16]uint64, key []byte) +TEXT ·initializeAVX2(SB), 4, $0-32 + MOVQ state+0(FP), AX + MOVQ key_base+8(FP), BX + MOVQ $·consAVX2<>(SB), CX + + VMOVDQU 0(BX), Y1 + VPSHUFD $177, Y1, Y2 + + VMOVDQU 0(CX), Y3 + VMOVDQU 32(CX), Y4 + + VPXOR Y3, Y1, Y1 + VPXOR Y4, Y2, Y2 + + VMOVDQU Y1, 0(AX) + VMOVDQU Y2, 32(AX) + VMOVDQU Y3, 64(AX) + VMOVDQU Y4, 96(AX) + VZEROUPPER + RET + +// func updateAVX2(state *[16]uint64, msg []byte) +TEXT ·updateAVX2(SB), 4, $0-32 + MOVQ state+0(FP), AX + MOVQ msg_base+8(FP), BX + MOVQ msg_len+16(FP), CX + + CMPQ CX, $32 + JB DONE + + VMOVDQU 0(AX), Y1 + VMOVDQU 32(AX), Y2 + VMOVDQU 64(AX), Y3 + VMOVDQU 96(AX), Y4 + + VMOVDQU ·zipperMergeAVX2<>(SB), Y5 + +LOOP: + VMOVDQU 0(BX), Y0 + UPDATE(Y0) + + ADDQ $32, BX + SUBQ $32, CX + JA LOOP + + VMOVDQU Y1, 0(AX) + VMOVDQU Y2, 32(AX) + VMOVDQU Y3, 64(AX) + VMOVDQU Y4, 96(AX) + VZEROUPPER + +DONE: + RET + +// func finalizeAVX2(out []byte, state *[16]uint64) +TEXT ·finalizeAVX2(SB), 4, $0-32 + MOVQ state+24(FP), AX + MOVQ out_base+0(FP), BX + MOVQ out_len+8(FP), CX + + VMOVDQU 0(AX), Y1 + VMOVDQU 32(AX), Y2 + VMOVDQU 64(AX), Y3 + VMOVDQU 96(AX), Y4 + + VMOVDQU ·zipperMergeAVX2<>(SB), Y5 + + VPERM2I128 $1, Y1, Y1, Y0 + VPSHUFD $177, Y0, Y0 + UPDATE(Y0) + + VPERM2I128 $1, Y1, Y1, Y0 + VPSHUFD $177, Y0, Y0 + UPDATE(Y0) + + VPERM2I128 $1, Y1, Y1, Y0 + VPSHUFD $177, Y0, Y0 + UPDATE(Y0) + + VPERM2I128 $1, Y1, Y1, Y0 + VPSHUFD $177, Y0, Y0 + UPDATE(Y0) + + CMPQ CX, $8 + JE skipUpdate // Just 4 rounds for 64-bit checksum + + VPERM2I128 $1, Y1, Y1, Y0 + VPSHUFD $177, Y0, Y0 + UPDATE(Y0) + + VPERM2I128 $1, Y1, Y1, Y0 + VPSHUFD $177, Y0, Y0 + UPDATE(Y0) + + CMPQ CX, $16 + JE skipUpdate // 6 rounds for 128-bit checksum + + VPERM2I128 $1, Y1, Y1, Y0 + VPSHUFD $177, Y0, Y0 + UPDATE(Y0) + + VPERM2I128 $1, Y1, Y1, Y0 + VPSHUFD $177, Y0, Y0 + UPDATE(Y0) + + VPERM2I128 $1, Y1, Y1, Y0 + VPSHUFD $177, Y0, Y0 + UPDATE(Y0) + + VPERM2I128 $1, Y1, Y1, Y0 + VPSHUFD $177, Y0, Y0 + UPDATE(Y0) + +skipUpdate: + VMOVDQU Y1, 0(AX) + VMOVDQU Y2, 32(AX) + VMOVDQU Y3, 64(AX) + VMOVDQU Y4, 96(AX) + VZEROUPPER + + CMPQ CX, $8 + JE hash64 + CMPQ CX, $16 + JE hash128 + + // 256-bit checksum + MOVQ 0*8(AX), R8 + MOVQ 1*8(AX), R9 + MOVQ 4*8(AX), R10 + MOVQ 5*8(AX), R11 + ADDQ 8*8(AX), R8 + ADDQ 9*8(AX), R9 + ADDQ 12*8(AX), R10 + ADDQ 13*8(AX), R11 + + REDUCE_MOD(R8, R9, R10, R11, R12, R13, R14, R15) + MOVQ R14, 0(BX) + MOVQ R15, 8(BX) + + MOVQ 2*8(AX), R8 + MOVQ 3*8(AX), R9 + MOVQ 6*8(AX), R10 + MOVQ 7*8(AX), R11 + ADDQ 10*8(AX), R8 + ADDQ 11*8(AX), R9 + ADDQ 14*8(AX), R10 + ADDQ 15*8(AX), R11 + + REDUCE_MOD(R8, R9, R10, R11, R12, R13, R14, R15) + MOVQ R14, 16(BX) + MOVQ R15, 24(BX) + RET + +hash128: + MOVQ 0*8(AX), R8 + MOVQ 1*8(AX), R9 + ADDQ 6*8(AX), R8 + ADDQ 7*8(AX), R9 + ADDQ 8*8(AX), R8 + ADDQ 9*8(AX), R9 + ADDQ 14*8(AX), R8 + ADDQ 15*8(AX), R9 + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + RET + +hash64: + MOVQ 0*8(AX), DX + ADDQ 4*8(AX), DX + ADDQ 8*8(AX), DX + ADDQ 12*8(AX), DX + MOVQ DX, 0(BX) + RET + diff --git a/go/vt/vthash/highway/highwayhash_amd64.go b/go/vt/vthash/highway/highwayhash_amd64.go new file mode 100644 index 00000000000..f47a47fb1d3 --- /dev/null +++ b/go/vt/vthash/highway/highwayhash_amd64.go @@ -0,0 +1,80 @@ +//go:build amd64 && !gccgo && !appengine && !nacl && !noasm +// +build amd64,!gccgo,!appengine,!nacl,!noasm + +/* +Copyright (c) 2017 Minio Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package highway + +import "golang.org/x/sys/cpu" + +var ( + useSSE4 = cpu.X86.HasSSE41 + useAVX2 = cpu.X86.HasAVX2 + useNEON = false + useVMX = false +) + +//go:noescape +func initializeSSE4(state *[16]uint64, key []byte) + +//go:noescape +func initializeAVX2(state *[16]uint64, key []byte) + +//go:noescape +func updateSSE4(state *[16]uint64, msg []byte) + +//go:noescape +func updateAVX2(state *[16]uint64, msg []byte) + +//go:noescape +func finalizeSSE4(out []byte, state *[16]uint64) + +//go:noescape +func finalizeAVX2(out []byte, state *[16]uint64) + +func initialize(state *[16]uint64, key []byte) { + switch { + case useAVX2: + initializeAVX2(state, key) + case useSSE4: + initializeSSE4(state, key) + default: + initializeGeneric(state, key) + } +} + +func update(state *[16]uint64, msg []byte) { + switch { + case useAVX2: + updateAVX2(state, msg) + case useSSE4: + updateSSE4(state, msg) + default: + updateGeneric(state, msg) + } +} + +func finalize(out []byte, state *[16]uint64) { + switch { + case useAVX2: + finalizeAVX2(out, state) + case useSSE4: + finalizeSSE4(out, state) + default: + finalizeGeneric(out, state) + } +} diff --git a/go/vt/vthash/highway/highwayhash_amd64.s b/go/vt/vthash/highway/highwayhash_amd64.s new file mode 100644 index 00000000000..5c0f87256f6 --- /dev/null +++ b/go/vt/vthash/highway/highwayhash_amd64.s @@ -0,0 +1,304 @@ +// Copyright (c) 2017 Minio Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build amd64 !gccgo !appengine !nacl + +#include "textflag.h" + +DATA ·asmConstants<>+0x00(SB)/8, $0xdbe6d5d5fe4cce2f +DATA ·asmConstants<>+0x08(SB)/8, $0xa4093822299f31d0 +DATA ·asmConstants<>+0x10(SB)/8, $0x13198a2e03707344 +DATA ·asmConstants<>+0x18(SB)/8, $0x243f6a8885a308d3 +DATA ·asmConstants<>+0x20(SB)/8, $0x3bd39e10cb0ef593 +DATA ·asmConstants<>+0x28(SB)/8, $0xc0acf169b5f18a8c +DATA ·asmConstants<>+0x30(SB)/8, $0xbe5466cf34e90c6c +DATA ·asmConstants<>+0x38(SB)/8, $0x452821e638d01377 +GLOBL ·asmConstants<>(SB), (NOPTR+RODATA), $64 + +DATA ·asmZipperMerge<>+0x00(SB)/8, $0xf010e05020c03 +DATA ·asmZipperMerge<>+0x08(SB)/8, $0x70806090d0a040b +GLOBL ·asmZipperMerge<>(SB), (NOPTR+RODATA), $16 + +#define v00 X0 +#define v01 X1 +#define v10 X2 +#define v11 X3 +#define m00 X4 +#define m01 X5 +#define m10 X6 +#define m11 X7 + +#define t0 X8 +#define t1 X9 +#define t2 X10 + +#define REDUCE_MOD(x0, x1, x2, x3, tmp0, tmp1, y0, y1) \ + MOVQ $0x3FFFFFFFFFFFFFFF, tmp0 \ + ANDQ tmp0, x3 \ + MOVQ x2, y0 \ + MOVQ x3, y1 \ + \ + MOVQ x2, tmp0 \ + MOVQ x3, tmp1 \ + SHLQ $1, tmp1 \ + SHRQ $63, tmp0 \ + MOVQ tmp1, x3 \ + ORQ tmp0, x3 \ + \ + SHLQ $1, x2 \ + \ + MOVQ y0, tmp0 \ + MOVQ y1, tmp1 \ + SHLQ $2, tmp1 \ + SHRQ $62, tmp0 \ + MOVQ tmp1, y1 \ + ORQ tmp0, y1 \ + \ + SHLQ $2, y0 \ + \ + XORQ x0, y0 \ + XORQ x2, y0 \ + XORQ x1, y1 \ + XORQ x3, y1 + +#define UPDATE(msg0, msg1) \ + PADDQ msg0, v10 \ + PADDQ m00, v10 \ + PADDQ msg1, v11 \ + PADDQ m01, v11 \ + \ + MOVO v00, t0 \ + MOVO v01, t1 \ + PSRLQ $32, t0 \ + PSRLQ $32, t1 \ + PMULULQ v10, t0 \ + PMULULQ v11, t1 \ + PXOR t0, m00 \ + PXOR t1, m01 \ + \ + PADDQ m10, v00 \ + PADDQ m11, v01 \ + \ + MOVO v10, t0 \ + MOVO v11, t1 \ + PSRLQ $32, t0 \ + PSRLQ $32, t1 \ + PMULULQ v00, t0 \ + PMULULQ v01, t1 \ + PXOR t0, m10 \ + PXOR t1, m11 \ + \ + MOVO v10, t0 \ + PSHUFB t2, t0 \ + MOVO v11, t1 \ + PSHUFB t2, t1 \ + PADDQ t0, v00 \ + PADDQ t1, v01 \ + \ + MOVO v00, t0 \ + PSHUFB t2, t0 \ + MOVO v01, t1 \ + PSHUFB t2, t1 \ + PADDQ t0, v10 \ + PADDQ t1, v11 + +// func initializeSSE4(state *[16]uint64, key []byte) +TEXT ·initializeSSE4(SB), NOSPLIT, $0-32 + MOVQ state+0(FP), AX + MOVQ key_base+8(FP), BX + MOVQ $·asmConstants<>(SB), CX + + MOVOU 0(BX), v00 + MOVOU 16(BX), v01 + + PSHUFD $177, v00, v10 + PSHUFD $177, v01, v11 + + MOVOU 0(CX), m00 + MOVOU 16(CX), m01 + MOVOU 32(CX), m10 + MOVOU 48(CX), m11 + + PXOR m00, v00 + PXOR m01, v01 + PXOR m10, v10 + PXOR m11, v11 + + MOVOU v00, 0(AX) + MOVOU v01, 16(AX) + MOVOU v10, 32(AX) + MOVOU v11, 48(AX) + MOVOU m00, 64(AX) + MOVOU m01, 80(AX) + MOVOU m10, 96(AX) + MOVOU m11, 112(AX) + RET + +// func updateSSE4(state *[16]uint64, msg []byte) +TEXT ·updateSSE4(SB), NOSPLIT, $0-32 + MOVQ state+0(FP), AX + MOVQ msg_base+8(FP), BX + MOVQ msg_len+16(FP), CX + + CMPQ CX, $32 + JB DONE + + MOVOU 0(AX), v00 + MOVOU 16(AX), v01 + MOVOU 32(AX), v10 + MOVOU 48(AX), v11 + MOVOU 64(AX), m00 + MOVOU 80(AX), m01 + MOVOU 96(AX), m10 + MOVOU 112(AX), m11 + + MOVOU ·asmZipperMerge<>(SB), t2 + +LOOP: + MOVOU 0(BX), t0 + MOVOU 16(BX), t1 + + UPDATE(t0, t1) + + ADDQ $32, BX + SUBQ $32, CX + JA LOOP + + MOVOU v00, 0(AX) + MOVOU v01, 16(AX) + MOVOU v10, 32(AX) + MOVOU v11, 48(AX) + MOVOU m00, 64(AX) + MOVOU m01, 80(AX) + MOVOU m10, 96(AX) + MOVOU m11, 112(AX) + +DONE: + RET + +// func finalizeSSE4(out []byte, state *[16]uint64) +TEXT ·finalizeSSE4(SB), NOSPLIT, $0-32 + MOVQ state+24(FP), AX + MOVQ out_base+0(FP), BX + MOVQ out_len+8(FP), CX + + MOVOU 0(AX), v00 + MOVOU 16(AX), v01 + MOVOU 32(AX), v10 + MOVOU 48(AX), v11 + MOVOU 64(AX), m00 + MOVOU 80(AX), m01 + MOVOU 96(AX), m10 + MOVOU 112(AX), m11 + + MOVOU ·asmZipperMerge<>(SB), t2 + + PSHUFD $177, v01, t0 + PSHUFD $177, v00, t1 + UPDATE(t0, t1) + + PSHUFD $177, v01, t0 + PSHUFD $177, v00, t1 + UPDATE(t0, t1) + + PSHUFD $177, v01, t0 + PSHUFD $177, v00, t1 + UPDATE(t0, t1) + + PSHUFD $177, v01, t0 + PSHUFD $177, v00, t1 + UPDATE(t0, t1) + + CMPQ CX, $8 + JE skipUpdate // Just 4 rounds for 64-bit checksum + + PSHUFD $177, v01, t0 + PSHUFD $177, v00, t1 + UPDATE(t0, t1) + + PSHUFD $177, v01, t0 + PSHUFD $177, v00, t1 + UPDATE(t0, t1) + + CMPQ CX, $16 + JE skipUpdate // 6 rounds for 128-bit checksum + + PSHUFD $177, v01, t0 + PSHUFD $177, v00, t1 + UPDATE(t0, t1) + + PSHUFD $177, v01, t0 + PSHUFD $177, v00, t1 + UPDATE(t0, t1) + + PSHUFD $177, v01, t0 + PSHUFD $177, v00, t1 + UPDATE(t0, t1) + + PSHUFD $177, v01, t0 + PSHUFD $177, v00, t1 + UPDATE(t0, t1) + +skipUpdate: + MOVOU v00, 0(AX) + MOVOU v01, 16(AX) + MOVOU v10, 32(AX) + MOVOU v11, 48(AX) + MOVOU m00, 64(AX) + MOVOU m01, 80(AX) + MOVOU m10, 96(AX) + MOVOU m11, 112(AX) + + CMPQ CX, $8 + JE hash64 + CMPQ CX, $16 + JE hash128 + + // 256-bit checksum + PADDQ v00, m00 + PADDQ v10, m10 + PADDQ v01, m01 + PADDQ v11, m11 + + MOVQ m00, R8 + PEXTRQ $1, m00, R9 + MOVQ m10, R10 + PEXTRQ $1, m10, R11 + REDUCE_MOD(R8, R9, R10, R11, R12, R13, R14, R15) + MOVQ R14, 0(BX) + MOVQ R15, 8(BX) + + MOVQ m01, R8 + PEXTRQ $1, m01, R9 + MOVQ m11, R10 + PEXTRQ $1, m11, R11 + REDUCE_MOD(R8, R9, R10, R11, R12, R13, R14, R15) + MOVQ R14, 16(BX) + MOVQ R15, 24(BX) + RET + +hash128: + PADDQ v00, v11 + PADDQ m00, m11 + PADDQ v11, m11 + MOVOU m11, 0(BX) + RET + +hash64: + PADDQ v00, v10 + PADDQ m00, m10 + PADDQ v10, m10 + MOVQ m10, DX + MOVQ DX, 0(BX) + RET diff --git a/go/vt/vthash/highway/highwayhash_arm64.go b/go/vt/vthash/highway/highwayhash_arm64.go new file mode 100644 index 00000000000..2b22db7ff56 --- /dev/null +++ b/go/vt/vthash/highway/highwayhash_arm64.go @@ -0,0 +1,64 @@ +//go:build !noasm && !appengine +// +build !noasm,!appengine + +/* +Copyright (c) 2017 Minio Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Copyright (c) 2017 Minio Inc. All rights reserved. +// Use of this source code is governed by a license that can be +// found in the LICENSE file. + +package highway + +var ( + useSSE4 = false + useAVX2 = false + useNEON = true + useVMX = false +) + +//go:noescape +func initializeArm64(state *[16]uint64, key []byte) + +//go:noescape +func updateArm64(state *[16]uint64, msg []byte) + +//go:noescape +func finalizeArm64(out []byte, state *[16]uint64) + +func initialize(state *[16]uint64, key []byte) { + if useNEON { + initializeArm64(state, key) + } else { + initializeGeneric(state, key) + } +} + +func update(state *[16]uint64, msg []byte) { + if useNEON { + updateArm64(state, msg) + } else { + updateGeneric(state, msg) + } +} + +func finalize(out []byte, state *[16]uint64) { + if useNEON { + finalizeArm64(out, state) + } else { + finalizeGeneric(out, state) + } +} diff --git a/go/vt/vthash/highway/highwayhash_arm64.s b/go/vt/vthash/highway/highwayhash_arm64.s new file mode 100644 index 00000000000..bbf2f9822bd --- /dev/null +++ b/go/vt/vthash/highway/highwayhash_arm64.s @@ -0,0 +1,322 @@ +// Copyright (c) 2017 Minio Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//+build !noasm,!appengine + +// Use github.com/minio/asm2plan9s on this file to assemble ARM instructions to +// the opcodes of their Plan9 equivalents + +#include "textflag.h" + +#define REDUCE_MOD(x0, x1, x2, x3, tmp0, tmp1, y0, y1) \ + MOVD $0x3FFFFFFFFFFFFFFF, tmp0 \ + AND tmp0, x3 \ + MOVD x2, y0 \ + MOVD x3, y1 \ + \ + MOVD x2, tmp0 \ + MOVD x3, tmp1 \ + LSL $1, tmp1 \ + LSR $63, tmp0 \ + MOVD tmp1, x3 \ + ORR tmp0, x3 \ + \ + LSL $1, x2 \ + \ + MOVD y0, tmp0 \ + MOVD y1, tmp1 \ + LSL $2, tmp1 \ + LSR $62, tmp0 \ + MOVD tmp1, y1 \ + ORR tmp0, y1 \ + \ + LSL $2, y0 \ + \ + EOR x0, y0 \ + EOR x2, y0 \ + EOR x1, y1 \ + EOR x3, y1 + +#define UPDATE(MSG1, MSG2) \ + \ // Add message + VADD MSG1.D2, V2.D2, V2.D2 \ + VADD MSG2.D2, V3.D2, V3.D2 \ + \ + \ // v1 += mul0 + VADD V4.D2, V2.D2, V2.D2 \ + VADD V5.D2, V3.D2, V3.D2 \ + \ + \ // First pair of multiplies + VTBL V29.B16, [V0.B16, V1.B16], V10.B16 \ + VTBL V30.B16, [V2.B16, V3.B16], V11.B16 \ + \ + \ // VUMULL V10.S2, V11.S2, V12.D2 /* assembler support missing */ + \ // VUMULL2 V10.S4, V11.S4, V13.D2 /* assembler support missing */ + WORD $0x2eaac16c \ // umull v12.2d, v11.2s, v10.2s + WORD $0x6eaac16d \ // umull2 v13.2d, v11.4s, v10.4s + \ + \ // v0 += mul1 + VADD V6.D2, V0.D2, V0.D2 \ + VADD V7.D2, V1.D2, V1.D2 \ + \ + \ // Second pair of multiplies + VTBL V29.B16, [V2.B16, V3.B16], V15.B16 \ + VTBL V30.B16, [V0.B16, V1.B16], V14.B16 \ + \ + \ // EOR multiplication result in + VEOR V12.B16, V4.B16, V4.B16 \ + VEOR V13.B16, V5.B16, V5.B16 \ + \ + \ // VUMULL V14.S2, V15.S2, V16.D2 /* assembler support missing */ + \ // VUMULL2 V14.S4, V15.S4, V17.D2 /* assembler support missing */ + WORD $0x2eaec1f0 \ // umull v16.2d, v15.2s, v14.2s + WORD $0x6eaec1f1 \ // umull2 v17.2d, v15.4s, v14.4s + \ + \ // First pair of zipper-merges + VTBL V28.B16, [V2.B16], V18.B16 \ + VADD V18.D2, V0.D2, V0.D2 \ + VTBL V28.B16, [V3.B16], V19.B16 \ + VADD V19.D2, V1.D2, V1.D2 \ + \ + \ // Second pair of zipper-merges + VTBL V28.B16, [V0.B16], V20.B16 \ + VADD V20.D2, V2.D2, V2.D2 \ + VTBL V28.B16, [V1.B16], V21.B16 \ + VADD V21.D2, V3.D2, V3.D2 \ + \ + \ // EOR multiplication result in + VEOR V16.B16, V6.B16, V6.B16 \ + VEOR V17.B16, V7.B16, V7.B16 + +// func initializeArm64(state *[16]uint64, key []byte) +TEXT ·initializeArm64(SB), NOSPLIT, $0 + MOVD state+0(FP), R0 + MOVD key_base+8(FP), R1 + + VLD1 (R1), [V1.S4, V2.S4] + + VREV64 V1.S4, V3.S4 + VREV64 V2.S4, V4.S4 + + MOVD $·asmConstants(SB), R3 + VLD1 (R3), [V5.S4, V6.S4, V7.S4, V8.S4] + VEOR V5.B16, V1.B16, V1.B16 + VEOR V6.B16, V2.B16, V2.B16 + VEOR V7.B16, V3.B16, V3.B16 + VEOR V8.B16, V4.B16, V4.B16 + + VST1.P [V1.D2, V2.D2, V3.D2, V4.D2], 64(R0) + VST1 [V5.D2, V6.D2, V7.D2, V8.D2], (R0) + RET + +TEXT ·updateArm64(SB), NOSPLIT, $0 + MOVD state+0(FP), R0 + MOVD msg_base+8(FP), R1 + MOVD msg_len+16(FP), R2 // length of message + SUBS $32, R2 + BMI complete + + // Definition of registers + // v0 = v0.lo + // v1 = v0.hi + // v2 = v1.lo + // v3 = v1.hi + // v4 = mul0.lo + // v5 = mul0.hi + // v6 = mul1.lo + // v7 = mul1.hi + + // Load zipper merge constants table pointer + MOVD $·asmZipperMerge(SB), R3 + + // and load zipper merge constants into v28, v29, and v30 + VLD1 (R3), [V28.B16, V29.B16, V30.B16] + + VLD1.P 64(R0), [V0.D2, V1.D2, V2.D2, V3.D2] + VLD1 (R0), [V4.D2, V5.D2, V6.D2, V7.D2] + SUBS $64, R0 + +loop: + // Main loop + VLD1.P 32(R1), [V26.S4, V27.S4] + + UPDATE(V26, V27) + + SUBS $32, R2 + BPL loop + + // Store result + VST1.P [V0.D2, V1.D2, V2.D2, V3.D2], 64(R0) + VST1 [V4.D2, V5.D2, V6.D2, V7.D2], (R0) + +complete: + RET + +// func finalizeArm64(out []byte, state *[16]uint64) +TEXT ·finalizeArm64(SB), NOSPLIT, $0-32 + MOVD state+24(FP), R0 + MOVD out_base+0(FP), R1 + MOVD out_len+8(FP), R2 + + // Load zipper merge constants table pointer + MOVD $·asmZipperMerge(SB), R3 + + // and load zipper merge constants into v28, v29, and v30 + VLD1 (R3), [V28.B16, V29.B16, V30.B16] + + VLD1.P 64(R0), [V0.D2, V1.D2, V2.D2, V3.D2] + VLD1 (R0), [V4.D2, V5.D2, V6.D2, V7.D2] + SUB $64, R0 + + VREV64 V1.S4, V26.S4 + VREV64 V0.S4, V27.S4 + UPDATE(V26, V27) + + VREV64 V1.S4, V26.S4 + VREV64 V0.S4, V27.S4 + UPDATE(V26, V27) + + VREV64 V1.S4, V26.S4 + VREV64 V0.S4, V27.S4 + UPDATE(V26, V27) + + VREV64 V1.S4, V26.S4 + VREV64 V0.S4, V27.S4 + UPDATE(V26, V27) + + CMP $8, R2 + BEQ skipUpdate // Just 4 rounds for 64-bit checksum + + VREV64 V1.S4, V26.S4 + VREV64 V0.S4, V27.S4 + UPDATE(V26, V27) + + VREV64 V1.S4, V26.S4 + VREV64 V0.S4, V27.S4 + UPDATE(V26, V27) + + CMP $16, R2 + BEQ skipUpdate // 6 rounds for 128-bit checksum + + VREV64 V1.S4, V26.S4 + VREV64 V0.S4, V27.S4 + UPDATE(V26, V27) + + VREV64 V1.S4, V26.S4 + VREV64 V0.S4, V27.S4 + UPDATE(V26, V27) + + VREV64 V1.S4, V26.S4 + VREV64 V0.S4, V27.S4 + UPDATE(V26, V27) + + VREV64 V1.S4, V26.S4 + VREV64 V0.S4, V27.S4 + UPDATE(V26, V27) + +skipUpdate: + // Store result + VST1.P [V0.D2, V1.D2, V2.D2, V3.D2], 64(R0) + VST1 [V4.D2, V5.D2, V6.D2, V7.D2], (R0) + SUB $64, R0 + + CMP $8, R2 + BEQ hash64 + CMP $16, R2 + BEQ hash128 + + // 256-bit checksum + MOVD 0*8(R0), R8 + MOVD 1*8(R0), R9 + MOVD 4*8(R0), R10 + MOVD 5*8(R0), R11 + MOVD 8*8(R0), R4 + MOVD 9*8(R0), R5 + MOVD 12*8(R0), R6 + MOVD 13*8(R0), R7 + ADD R4, R8 + ADD R5, R9 + ADD R6, R10 + ADD R7, R11 + + REDUCE_MOD(R8, R9, R10, R11, R4, R5, R6, R7) + MOVD R6, 0(R1) + MOVD R7, 8(R1) + + MOVD 2*8(R0), R8 + MOVD 3*8(R0), R9 + MOVD 6*8(R0), R10 + MOVD 7*8(R0), R11 + MOVD 10*8(R0), R4 + MOVD 11*8(R0), R5 + MOVD 14*8(R0), R6 + MOVD 15*8(R0), R7 + ADD R4, R8 + ADD R5, R9 + ADD R6, R10 + ADD R7, R11 + + REDUCE_MOD(R8, R9, R10, R11, R4, R5, R6, R7) + MOVD R6, 16(R1) + MOVD R7, 24(R1) + RET + +hash128: + MOVD 0*8(R0), R8 + MOVD 1*8(R0), R9 + MOVD 6*8(R0), R10 + MOVD 7*8(R0), R11 + ADD R10, R8 + ADD R11, R9 + MOVD 8*8(R0), R10 + MOVD 9*8(R0), R11 + ADD R10, R8 + ADD R11, R9 + MOVD 14*8(R0), R10 + MOVD 15*8(R0), R11 + ADD R10, R8 + ADD R11, R9 + MOVD R8, 0(R1) + MOVD R9, 8(R1) + RET + +hash64: + MOVD 0*8(R0), R4 + MOVD 4*8(R0), R5 + MOVD 8*8(R0), R6 + MOVD 12*8(R0), R7 + ADD R5, R4 + ADD R7, R6 + ADD R6, R4 + MOVD R4, (R1) + RET + +DATA ·asmConstants+0x00(SB)/8, $0xdbe6d5d5fe4cce2f +DATA ·asmConstants+0x08(SB)/8, $0xa4093822299f31d0 +DATA ·asmConstants+0x10(SB)/8, $0x13198a2e03707344 +DATA ·asmConstants+0x18(SB)/8, $0x243f6a8885a308d3 +DATA ·asmConstants+0x20(SB)/8, $0x3bd39e10cb0ef593 +DATA ·asmConstants+0x28(SB)/8, $0xc0acf169b5f18a8c +DATA ·asmConstants+0x30(SB)/8, $0xbe5466cf34e90c6c +DATA ·asmConstants+0x38(SB)/8, $0x452821e638d01377 +GLOBL ·asmConstants(SB), 8, $64 + +// Constants for TBL instructions +DATA ·asmZipperMerge+0x0(SB)/8, $0x000f010e05020c03 // zipper merge constant +DATA ·asmZipperMerge+0x8(SB)/8, $0x070806090d0a040b +DATA ·asmZipperMerge+0x10(SB)/8, $0x0f0e0d0c07060504 // setup first register for multiply +DATA ·asmZipperMerge+0x18(SB)/8, $0x1f1e1d1c17161514 +DATA ·asmZipperMerge+0x20(SB)/8, $0x0b0a090803020100 // setup second register for multiply +DATA ·asmZipperMerge+0x28(SB)/8, $0x1b1a191813121110 +GLOBL ·asmZipperMerge(SB), 8, $48 diff --git a/go/vt/vthash/highway/highwayhash_generic.go b/go/vt/vthash/highway/highwayhash_generic.go new file mode 100644 index 00000000000..9ea17094843 --- /dev/null +++ b/go/vt/vthash/highway/highwayhash_generic.go @@ -0,0 +1,350 @@ +/* +Copyright (c) 2017 Minio Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package highway + +import ( + "encoding/binary" +) + +const ( + v0 = 0 + v1 = 4 + mul0 = 8 + mul1 = 12 +) + +var ( + init0 = [4]uint64{0xdbe6d5d5fe4cce2f, 0xa4093822299f31d0, 0x13198a2e03707344, 0x243f6a8885a308d3} + init1 = [4]uint64{0x3bd39e10cb0ef593, 0xc0acf169b5f18a8c, 0xbe5466cf34e90c6c, 0x452821e638d01377} +) + +func initializeGeneric(state *[16]uint64, k []byte) { + var key [4]uint64 + + key[0] = binary.LittleEndian.Uint64(k[0:]) + key[1] = binary.LittleEndian.Uint64(k[8:]) + key[2] = binary.LittleEndian.Uint64(k[16:]) + key[3] = binary.LittleEndian.Uint64(k[24:]) + + copy(state[mul0:], init0[:]) + copy(state[mul1:], init1[:]) + + for i, k := range key { + state[v0+i] = init0[i] ^ k + } + + key[0] = key[0]>>32 | key[0]<<32 + key[1] = key[1]>>32 | key[1]<<32 + key[2] = key[2]>>32 | key[2]<<32 + key[3] = key[3]>>32 | key[3]<<32 + + for i, k := range key { + state[v1+i] = init1[i] ^ k + } +} + +func updateGeneric(state *[16]uint64, msg []byte) { + for len(msg) >= 32 { + m := msg[:32] + + // add message + mul0 + // Interleave operations to hide multiplication + state[v1+0] += binary.LittleEndian.Uint64(m) + state[mul0+0] + state[mul0+0] ^= uint64(uint32(state[v1+0])) * (state[v0+0] >> 32) + state[v0+0] += state[mul1+0] + state[mul1+0] ^= uint64(uint32(state[v0+0])) * (state[v1+0] >> 32) + + state[v1+1] += binary.LittleEndian.Uint64(m[8:]) + state[mul0+1] + state[mul0+1] ^= uint64(uint32(state[v1+1])) * (state[v0+1] >> 32) + state[v0+1] += state[mul1+1] + state[mul1+1] ^= uint64(uint32(state[v0+1])) * (state[v1+1] >> 32) + + state[v1+2] += binary.LittleEndian.Uint64(m[16:]) + state[mul0+2] + state[mul0+2] ^= uint64(uint32(state[v1+2])) * (state[v0+2] >> 32) + state[v0+2] += state[mul1+2] + state[mul1+2] ^= uint64(uint32(state[v0+2])) * (state[v1+2] >> 32) + + state[v1+3] += binary.LittleEndian.Uint64(m[24:]) + state[mul0+3] + state[mul0+3] ^= uint64(uint32(state[v1+3])) * (state[v0+3] >> 32) + state[v0+3] += state[mul1+3] + state[mul1+3] ^= uint64(uint32(state[v0+3])) * (state[v1+3] >> 32) + + // inlined: zipperMerge(state[v1+0], state[v1+1], &state[v0+0], &state[v0+1]) + { + val0 := state[v1+0] + val1 := state[v1+1] + res := val0 & (0xff << (2 * 8)) + res2 := (val0 & (0xff << (7 * 8))) + (val1 & (0xff << (2 * 8))) + res += (val1 & (0xff << (7 * 8))) >> 8 + res2 += (val0 & (0xff << (6 * 8))) >> 8 + res += ((val0 & (0xff << (5 * 8))) + (val1 & (0xff << (6 * 8)))) >> 16 + res2 += (val1 & (0xff << (5 * 8))) >> 16 + res += ((val0 & (0xff << (3 * 8))) + (val1 & (0xff << (4 * 8)))) >> 24 + res2 += ((val1 & (0xff << (3 * 8))) + (val0 & (0xff << (4 * 8)))) >> 24 + res += (val0 & (0xff << (1 * 8))) << 32 + res2 += (val1 & 0xff) << 48 + res += val0 << 56 + res2 += (val1 & (0xff << (1 * 8))) << 24 + + state[v0+0] += res + state[v0+1] += res2 + } + // zipperMerge(state[v1+2], state[v1+3], &state[v0+2], &state[v0+3]) + { + val0 := state[v1+2] + val1 := state[v1+3] + res := val0 & (0xff << (2 * 8)) + res2 := (val0 & (0xff << (7 * 8))) + (val1 & (0xff << (2 * 8))) + res += (val1 & (0xff << (7 * 8))) >> 8 + res2 += (val0 & (0xff << (6 * 8))) >> 8 + res += ((val0 & (0xff << (5 * 8))) + (val1 & (0xff << (6 * 8)))) >> 16 + res2 += (val1 & (0xff << (5 * 8))) >> 16 + res += ((val0 & (0xff << (3 * 8))) + (val1 & (0xff << (4 * 8)))) >> 24 + res2 += ((val1 & (0xff << (3 * 8))) + (val0 & (0xff << (4 * 8)))) >> 24 + res += (val0 & (0xff << (1 * 8))) << 32 + res2 += (val1 & 0xff) << 48 + res += val0 << 56 + res2 += (val1 & (0xff << (1 * 8))) << 24 + + state[v0+2] += res + state[v0+3] += res2 + } + + // inlined: zipperMerge(state[v0+0], state[v0+1], &state[v1+0], &state[v1+1]) + { + val0 := state[v0+0] + val1 := state[v0+1] + res := val0 & (0xff << (2 * 8)) + res2 := (val0 & (0xff << (7 * 8))) + (val1 & (0xff << (2 * 8))) + res += (val1 & (0xff << (7 * 8))) >> 8 + res2 += (val0 & (0xff << (6 * 8))) >> 8 + res += ((val0 & (0xff << (5 * 8))) + (val1 & (0xff << (6 * 8)))) >> 16 + res2 += (val1 & (0xff << (5 * 8))) >> 16 + res += ((val0 & (0xff << (3 * 8))) + (val1 & (0xff << (4 * 8)))) >> 24 + res2 += ((val1 & (0xff << (3 * 8))) + (val0 & (0xff << (4 * 8)))) >> 24 + res += (val0 & (0xff << (1 * 8))) << 32 + res2 += (val1 & 0xff) << 48 + res += val0 << 56 + res2 += (val1 & (0xff << (1 * 8))) << 24 + + state[v1+0] += res + state[v1+1] += res2 + } + + //inlined: zipperMerge(state[v0+2], state[v0+3], &state[v1+2], &state[v1+3]) + { + val0 := state[v0+2] + val1 := state[v0+3] + res := val0 & (0xff << (2 * 8)) + res2 := (val0 & (0xff << (7 * 8))) + (val1 & (0xff << (2 * 8))) + res += (val1 & (0xff << (7 * 8))) >> 8 + res2 += (val0 & (0xff << (6 * 8))) >> 8 + res += ((val0 & (0xff << (5 * 8))) + (val1 & (0xff << (6 * 8)))) >> 16 + res2 += (val1 & (0xff << (5 * 8))) >> 16 + res += ((val0 & (0xff << (3 * 8))) + (val1 & (0xff << (4 * 8)))) >> 24 + res2 += ((val1 & (0xff << (3 * 8))) + (val0 & (0xff << (4 * 8)))) >> 24 + res += (val0 & (0xff << (1 * 8))) << 32 + res2 += (val1 & 0xff) << 48 + res += val0 << 56 + res2 += (val1 & (0xff << (1 * 8))) << 24 + + state[v1+2] += res + state[v1+3] += res2 + } + msg = msg[32:] + } +} + +func finalizeGeneric(out []byte, state *[16]uint64) { + var perm [4]uint64 + var tmp [32]byte + runs := 4 + if len(out) == 16 { + runs = 6 + } else if len(out) == 32 { + runs = 10 + } + for i := 0; i < runs; i++ { + perm[0] = state[v0+2]>>32 | state[v0+2]<<32 + perm[1] = state[v0+3]>>32 | state[v0+3]<<32 + perm[2] = state[v0+0]>>32 | state[v0+0]<<32 + perm[3] = state[v0+1]>>32 | state[v0+1]<<32 + + binary.LittleEndian.PutUint64(tmp[0:], perm[0]) + binary.LittleEndian.PutUint64(tmp[8:], perm[1]) + binary.LittleEndian.PutUint64(tmp[16:], perm[2]) + binary.LittleEndian.PutUint64(tmp[24:], perm[3]) + + update(state, tmp[:]) + } + + switch len(out) { + case 8: + binary.LittleEndian.PutUint64(out, state[v0+0]+state[v1+0]+state[mul0+0]+state[mul1+0]) + case 16: + binary.LittleEndian.PutUint64(out, state[v0+0]+state[v1+2]+state[mul0+0]+state[mul1+2]) + binary.LittleEndian.PutUint64(out[8:], state[v0+1]+state[v1+3]+state[mul0+1]+state[mul1+3]) + case 32: + h0, h1 := reduceMod(state[v0+0]+state[mul0+0], state[v0+1]+state[mul0+1], state[v1+0]+state[mul1+0], state[v1+1]+state[mul1+1]) + binary.LittleEndian.PutUint64(out[0:], h0) + binary.LittleEndian.PutUint64(out[8:], h1) + + h0, h1 = reduceMod(state[v0+2]+state[mul0+2], state[v0+3]+state[mul0+3], state[v1+2]+state[mul1+2], state[v1+3]+state[mul1+3]) + binary.LittleEndian.PutUint64(out[16:], h0) + binary.LittleEndian.PutUint64(out[24:], h1) + } +} + +// Experiments on variations left for future reference... +/* +func zipperMerge(v0, v1 uint64, d0, d1 *uint64) { + if true { + // fastest. original interleaved... + res := v0 & (0xff << (2 * 8)) + res2 := (v0 & (0xff << (7 * 8))) + (v1 & (0xff << (2 * 8))) + res += (v1 & (0xff << (7 * 8))) >> 8 + res2 += (v0 & (0xff << (6 * 8))) >> 8 + res += ((v0 & (0xff << (5 * 8))) + (v1 & (0xff << (6 * 8)))) >> 16 + res2 += (v1 & (0xff << (5 * 8))) >> 16 + res += ((v0 & (0xff << (3 * 8))) + (v1 & (0xff << (4 * 8)))) >> 24 + res2 += ((v1 & (0xff << (3 * 8))) + (v0 & (0xff << (4 * 8)))) >> 24 + res += (v0 & (0xff << (1 * 8))) << 32 + res2 += (v1 & 0xff) << 48 + res += v0 << 56 + res2 += (v1 & (0xff << (1 * 8))) << 24 + + *d0 += res + *d1 += res2 + } else if false { + // Reading bytes and combining into uint64 + var v0b [8]byte + binary.LittleEndian.PutUint64(v0b[:], v0) + var v1b [8]byte + binary.LittleEndian.PutUint64(v1b[:], v1) + var res, res2 uint64 + + res = uint64(v0b[0]) << (7 * 8) + res2 = uint64(v1b[0]) << (6 * 8) + res |= uint64(v0b[1]) << (5 * 8) + res2 |= uint64(v1b[1]) << (4 * 8) + res |= uint64(v0b[2]) << (2 * 8) + res2 |= uint64(v1b[2]) << (2 * 8) + res |= uint64(v0b[3]) + res2 |= uint64(v0b[4]) << (1 * 8) + res |= uint64(v0b[5]) << (3 * 8) + res2 |= uint64(v0b[6]) << (5 * 8) + res |= uint64(v1b[4]) << (1 * 8) + res2 |= uint64(v0b[7]) << (7 * 8) + res |= uint64(v1b[6]) << (4 * 8) + res2 |= uint64(v1b[3]) + res |= uint64(v1b[7]) << (6 * 8) + res2 |= uint64(v1b[5]) << (3 * 8) + + *d0 += res + *d1 += res2 + + } else if false { + // bytes to bytes shuffle + var v0b [8]byte + binary.LittleEndian.PutUint64(v0b[:], v0) + var v1b [8]byte + binary.LittleEndian.PutUint64(v1b[:], v1) + var res [8]byte + + //res += ((v0 & (0xff << (3 * 8))) + (v1 & (0xff << (4 * 8)))) >> 24 + res[0] = v0b[3] + res[1] = v1b[4] + + // res := v0 & (0xff << (2 * 8)) + res[2] = v0b[2] + + //res += ((v0 & (0xff << (5 * 8))) + (v1 & (0xff << (6 * 8)))) >> 16 + res[3] = v0b[5] + res[4] = v1b[6] + + //res += (v0 & (0xff << (1 * 8))) << 32 + res[5] = v0b[1] + + //res += (v1 & (0xff << (7 * 8))) >> 8 + res[6] += v1b[7] + + //res += v0 << 56 + res[7] = v0b[0] + v0 = binary.LittleEndian.Uint64(res[:]) + *d0 += v0 + + //res += ((v1 & (0xff << (3 * 8))) + (v0 & (0xff << (4 * 8)))) >> 24 + res[0] = v1b[3] + res[1] = v0b[4] + + res[2] = v1b[2] + + // res += (v1 & (0xff << (5 * 8))) >> 16 + res[3] = v1b[5] + + //res += (v1 & (0xff << (1 * 8))) << 24 + res[4] = v1b[1] + + // res += (v0 & (0xff << (6 * 8))) >> 8 + res[5] = v0b[6] + + //res := (v0 & (0xff << (7 * 8))) + (v1 & (0xff << (2 * 8))) + res[7] = v0b[7] + + //res += (v1 & 0xff) << 48 + res[6] = v1b[0] + + v0 = binary.LittleEndian.Uint64(res[:]) + *d1 += v0 + } else { + // original. + res := v0 & (0xff << (2 * 8)) + res += (v1 & (0xff << (7 * 8))) >> 8 + res += ((v0 & (0xff << (5 * 8))) + (v1 & (0xff << (6 * 8)))) >> 16 + res += ((v0 & (0xff << (3 * 8))) + (v1 & (0xff << (4 * 8)))) >> 24 + res += (v0 & (0xff << (1 * 8))) << 32 + res += v0 << 56 + + *d0 += res + + res = (v0 & (0xff << (7 * 8))) + (v1 & (0xff << (2 * 8))) + res += (v0 & (0xff << (6 * 8))) >> 8 + res += (v1 & (0xff << (5 * 8))) >> 16 + res += ((v1 & (0xff << (3 * 8))) + (v0 & (0xff << (4 * 8)))) >> 24 + res += (v1 & 0xff) << 48 + res += (v1 & (0xff << (1 * 8))) << 24 + + *d1 += res + } +} +*/ + +// reduce v = [v0, v1, v2, v3] mod the irreducible polynomial x^128 + x^2 + x +func reduceMod(v0, v1, v2, v3 uint64) (r0, r1 uint64) { + v3 &= 0x3FFFFFFFFFFFFFFF + + r0, r1 = v2, v3 + + v3 = (v3 << 1) | (v2 >> (64 - 1)) + v2 <<= 1 + r1 = (r1 << 2) | (r0 >> (64 - 2)) + r0 <<= 2 + + r0 ^= v0 ^ v2 + r1 ^= v1 ^ v3 + return +} diff --git a/go/vt/vthash/highway/highwayhash_ppc64le.go b/go/vt/vthash/highway/highwayhash_ppc64le.go new file mode 100644 index 00000000000..f70e2a41473 --- /dev/null +++ b/go/vt/vthash/highway/highwayhash_ppc64le.go @@ -0,0 +1,49 @@ +//go:build !noasm && !appengine +// +build !noasm,!appengine + +/* +Copyright (c) 2017 Minio Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Copyright (c) 2017 Minio Inc. All rights reserved. +// Use of this source code is governed by a license that can be +// found in the LICENSE file. + +package highway + +var ( + useSSE4 = false + useAVX2 = false + useNEON = false + useVMX = true +) + +//go:noescape +func updatePpc64Le(state *[16]uint64, msg []byte) + +func initialize(state *[16]uint64, key []byte) { + initializeGeneric(state, key) +} + +func update(state *[16]uint64, msg []byte) { + if useVMX { + updatePpc64Le(state, msg) + } else { + updateGeneric(state, msg) + } +} + +func finalize(out []byte, state *[16]uint64) { + finalizeGeneric(out, state) +} diff --git a/go/vt/vthash/highway/highwayhash_ppc64le.s b/go/vt/vthash/highway/highwayhash_ppc64le.s new file mode 100644 index 00000000000..957cebc4ddc --- /dev/null +++ b/go/vt/vthash/highway/highwayhash_ppc64le.s @@ -0,0 +1,180 @@ +// Copyright (c) 2017 Minio Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//+build !noasm,!appengine + +#include "textflag.h" + +// Definition of registers +#define V0_LO VS32 +#define V0_LO_ V0 +#define V0_HI VS33 +#define V0_HI_ V1 +#define V1_LO VS34 +#define V1_LO_ V2 +#define V1_HI VS35 +#define V1_HI_ V3 +#define MUL0_LO VS36 +#define MUL0_LO_ V4 +#define MUL0_HI VS37 +#define MUL0_HI_ V5 +#define MUL1_LO VS38 +#define MUL1_LO_ V6 +#define MUL1_HI VS39 +#define MUL1_HI_ V7 + +// Message +#define MSG_LO VS40 +#define MSG_LO_ V8 +#define MSG_HI VS41 + +// Constants +#define ROTATE VS42 +#define ROTATE_ V10 +#define MASK VS43 +#define MASK_ V11 + +// Temps +#define TEMP1 VS44 +#define TEMP1_ V12 +#define TEMP2 VS45 +#define TEMP2_ V13 +#define TEMP3 VS46 +#define TEMP3_ V14 +#define TEMP4_ V15 +#define TEMP5_ V16 +#define TEMP6_ V17 +#define TEMP7_ V18 + +// Regular registers +#define STATE R3 +#define MSG_BASE R4 +#define MSG_LEN R5 +#define CONSTANTS R6 +#define P1 R7 +#define P2 R8 +#define P3 R9 +#define P4 R10 +#define P5 R11 +#define P6 R12 +#define P7 R14 // avoid using R13 + +TEXT ·updatePpc64Le(SB), NOFRAME|NOSPLIT, $0-32 + MOVD state+0(FP), STATE + MOVD msg_base+8(FP), MSG_BASE + MOVD msg_len+16(FP), MSG_LEN // length of message + + // Sanity check for length + CMPU MSG_LEN, $31 + BLE complete + + // Setup offsets + MOVD $16, P1 + MOVD $32, P2 + MOVD $48, P3 + MOVD $64, P4 + MOVD $80, P5 + MOVD $96, P6 + MOVD $112, P7 + + // Load state + LXVD2X (STATE)(R0), V0_LO + LXVD2X (STATE)(P1), V0_HI + LXVD2X (STATE)(P2), V1_LO + LXVD2X (STATE)(P3), V1_HI + LXVD2X (STATE)(P4), MUL0_LO + LXVD2X (STATE)(P5), MUL0_HI + LXVD2X (STATE)(P6), MUL1_LO + LXVD2X (STATE)(P7), MUL1_HI + XXPERMDI V0_LO, V0_LO, $2, V0_LO + XXPERMDI V0_HI, V0_HI, $2, V0_HI + XXPERMDI V1_LO, V1_LO, $2, V1_LO + XXPERMDI V1_HI, V1_HI, $2, V1_HI + XXPERMDI MUL0_LO, MUL0_LO, $2, MUL0_LO + XXPERMDI MUL0_HI, MUL0_HI, $2, MUL0_HI + XXPERMDI MUL1_LO, MUL1_LO, $2, MUL1_LO + XXPERMDI MUL1_HI, MUL1_HI, $2, MUL1_HI + + // Load asmConstants table pointer + MOVD $·asmConstants(SB), CONSTANTS + LXVD2X (CONSTANTS)(R0), ROTATE + LXVD2X (CONSTANTS)(P1), MASK + XXLNAND MASK, MASK, MASK + +loop: + // Main highwayhash update loop + LXVD2X (MSG_BASE)(R0), MSG_LO + VADDUDM V0_LO_, MUL1_LO_, TEMP1_ + VRLD V0_LO_, ROTATE_, TEMP2_ + VADDUDM MUL1_HI_, V0_HI_, TEMP3_ + LXVD2X (MSG_BASE)(P1), MSG_HI + ADD $32, MSG_BASE, MSG_BASE + XXPERMDI MSG_LO, MSG_LO, $2, MSG_LO + XXPERMDI MSG_HI, MSG_HI, $2, V0_LO + VADDUDM MSG_LO_, MUL0_LO_, MSG_LO_ + VADDUDM V0_LO_, MUL0_HI_, V0_LO_ + VADDUDM MSG_LO_, V1_LO_, V1_LO_ + VSRD V0_HI_, ROTATE_, MSG_LO_ + VADDUDM V0_LO_, V1_HI_, V1_HI_ + VPERM V1_LO_, V1_LO_, MASK_, V0_LO_ + VMULOUW V1_LO_, TEMP2_, TEMP2_ + VPERM V1_HI_, V1_HI_, MASK_, TEMP7_ + VADDUDM V0_LO_, TEMP1_, V0_LO_ + VMULOUW V1_HI_, MSG_LO_, MSG_LO_ + VADDUDM TEMP7_, TEMP3_, V0_HI_ + VPERM V0_LO_, V0_LO_, MASK_, TEMP6_ + VRLD V1_LO_, ROTATE_, TEMP4_ + VSRD V1_HI_, ROTATE_, TEMP5_ + VPERM V0_HI_, V0_HI_, MASK_, TEMP7_ + XXLXOR MUL0_LO, TEMP2, MUL0_LO + VMULOUW TEMP1_, TEMP4_, TEMP1_ + VMULOUW TEMP3_, TEMP5_, TEMP3_ + XXLXOR MUL0_HI, MSG_LO, MUL0_HI + XXLXOR MUL1_LO, TEMP1, MUL1_LO + XXLXOR MUL1_HI, TEMP3, MUL1_HI + VADDUDM TEMP6_, V1_LO_, V1_LO_ + VADDUDM TEMP7_, V1_HI_, V1_HI_ + + SUB $32, MSG_LEN, MSG_LEN + CMPU MSG_LEN, $32 + BGE loop + + // Save state + XXPERMDI V0_LO, V0_LO, $2, V0_LO + XXPERMDI V0_HI, V0_HI, $2, V0_HI + XXPERMDI V1_LO, V1_LO, $2, V1_LO + XXPERMDI V1_HI, V1_HI, $2, V1_HI + XXPERMDI MUL0_LO, MUL0_LO, $2, MUL0_LO + XXPERMDI MUL0_HI, MUL0_HI, $2, MUL0_HI + XXPERMDI MUL1_LO, MUL1_LO, $2, MUL1_LO + XXPERMDI MUL1_HI, MUL1_HI, $2, MUL1_HI + STXVD2X V0_LO, (STATE)(R0) + STXVD2X V0_HI, (STATE)(P1) + STXVD2X V1_LO, (STATE)(P2) + STXVD2X V1_HI, (STATE)(P3) + STXVD2X MUL0_LO, (STATE)(P4) + STXVD2X MUL0_HI, (STATE)(P5) + STXVD2X MUL1_LO, (STATE)(P6) + STXVD2X MUL1_HI, (STATE)(P7) + +complete: + RET + +// Constants table +DATA ·asmConstants+0x0(SB)/8, $0x0000000000000020 +DATA ·asmConstants+0x8(SB)/8, $0x0000000000000020 +DATA ·asmConstants+0x10(SB)/8, $0x070806090d0a040b // zipper merge constant +DATA ·asmConstants+0x18(SB)/8, $0x000f010e05020c03 // zipper merge constant + +GLOBL ·asmConstants(SB), 8, $32 diff --git a/go/vt/vthash/highway/highwayhash_ref.go b/go/vt/vthash/highway/highwayhash_ref.go new file mode 100644 index 00000000000..3ecb0e2f6ea --- /dev/null +++ b/go/vt/vthash/highway/highwayhash_ref.go @@ -0,0 +1,39 @@ +//go:build noasm || (!amd64 && !arm64 && !ppc64le) +// +build noasm !amd64,!arm64,!ppc64le + +/* +Copyright (c) 2017 Minio Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package highway + +var ( + useSSE4 = false + useAVX2 = false + useNEON = false + useVMX = false +) + +func initialize(state *[16]uint64, k []byte) { + initializeGeneric(state, k) +} + +func update(state *[16]uint64, msg []byte) { + updateGeneric(state, msg) +} + +func finalize(out []byte, state *[16]uint64) { + finalizeGeneric(out, state) +} diff --git a/go/vt/vthash/highway/highwayhash_test.go b/go/vt/vthash/highway/highwayhash_test.go new file mode 100644 index 00000000000..896b6d13763 --- /dev/null +++ b/go/vt/vthash/highway/highwayhash_test.go @@ -0,0 +1,228 @@ +/* +Copyright (c) 2017 Minio Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Copyright (c) 2017 Minio Inc. All rights reserved. +// Use of this source code is governed by a license that can be +// found in the LICENSE file. + +package highway + +import ( + "bytes" + "encoding/hex" + "math/rand" + "runtime" + "sync/atomic" + "testing" +) + +func TestVectors(t *testing.T) { + defer func(sse4, avx2, neon, vmx bool) { + useSSE4, useAVX2, useNEON, useVMX = sse4, avx2, neon, vmx + }(useSSE4, useAVX2, useNEON, useVMX) + + if useAVX2 { + t.Run("AVX2 version", func(t *testing.T) { + testVectors(New128, testVectors128, t) + testVectors(New, testVectors256, t) + useAVX2 = false + }) + } + if useSSE4 { + t.Run("SSE4 version", func(t *testing.T) { + testVectors(New128, testVectors128, t) + testVectors(New, testVectors256, t) + useSSE4 = false + }) + } + if useNEON { + t.Run("NEON version", func(t *testing.T) { + testVectors(New128, testVectors128, t) + testVectors(New, testVectors256, t) + useNEON = false + }) + } + if useVMX { + t.Run("VMX version", func(t *testing.T) { + testVectors(New128, testVectors128, t) + testVectors(New, testVectors256, t) + useVMX = false + }) + } + t.Run("Generic version", func(t *testing.T) { + testVectors(New128, testVectors128, t) + testVectors(New, testVectors256, t) + }) +} + +func testVectors(NewFunc func([32]byte) *Digest, vectors []string, t *testing.T) { + key, err := hex.DecodeString("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f") + if err != nil { + t.Fatalf("Failed to decode key: %v", err) + } + input := make([]byte, len(vectors)) + + h := NewFunc([32]byte(key)) + for i, v := range vectors { + input[i] = byte(i) + + expected, err := hex.DecodeString(v) + if err != nil { + t.Fatalf("Failed to decode test vector: %v error: %v", v, err) + } + + _, _ = h.Write(input[:i]) + if sum := h.Sum(nil); !bytes.Equal(sum, expected[:]) { + t.Errorf("Test %d: hash mismatch: got: %v want: %v", i, hex.EncodeToString(sum), hex.EncodeToString(expected)) + } + h.Reset() + + switch h.Size() { + case Size: + if sum := Sum(input[:i], key); !bytes.Equal(sum[:], expected) { + t.Errorf("Test %d: Sum mismatch: got: %v want: %v", i, hex.EncodeToString(sum[:]), hex.EncodeToString(expected)) + } + case Size128: + if sum := Sum128(input[:i], key); !bytes.Equal(sum[:], expected) { + t.Errorf("Test %d: Sum mismatch: got: %v want: %v", i, hex.EncodeToString(sum[:]), hex.EncodeToString(expected)) + } + } + } +} + +var testVectors128 = []string{ + "c7fe8f9d8f26ed0f6f3e097f765e5633", "a8e7813689a8b0d6b4dc9cebf91d29dc", "04da165a26ad153d68e832dc38560878", "eb0b5f291b62070679ddced90f9ae6bf", + "9ee4ac6db49e392608923139d02a922e", "d82ed186c3bd50323ac2636c90103819", "476589cbb36a476f1910ed376f57de7c", "b4717169ca1f402a6c79029fff031fbe", + "e8520528846de9a1c20aec3bc6f15c69", "b2631ef302212a14cc00505b8cb9851a", "5bbcb6260eb7a1515955a42d3b1f9e92", "5b419a0562039988137d7bc4221fd2be", + "6695af1c5f1f1fcdd4c8f9e08cba18a8", "5761fe12415625a248b8ddb8784ce9b2", "1909ccd1eb2f49bda2415602bc1dcdce", "54afc42ba5372214d7bc266e0b6c79e0", + "ad01a4d5ff604441c8189f01d5a39e02", "62991cc5964b2ac5a05e9b16b178b8ec", "ceeafb118fca40d931d5f816d6463af9", "f5cbc0e50a9dc48a937c1df58dbffd3f", + "a8002d859b276dac46aaeba56b3acd7d", "568af093bd2116f1d5d93d1698c37331", "9ff88cf650e24c0ced981841da3c12b3", "ce519a3ded97ab150e0869914774e27c", + "b845488d191e00cd772daad88bd9d9d0", "793d49a017d6f334167e7f39f604d37d", "b6c6f4a99068b55c4f30676516290813", "c0d15b248b6fda308c74d93f7e8b826f", + "c0124c20490358e01c445fac0cdaf693", "453007a51b7348f67659b64f1197b85f", "06528a7354834f0291097eeb18499a50", "297ca5e865b4e70646d4f5073a5e4152", + "aa4a43c166df8419b9e4b3f95819fc16", "6cc3c6e0af7816119d84a2e59db558f9", "9004fb4084bc3f7736856543d2d56ec9", "41c9b60b71dce391e9aceec10b6a33ea", + "d4d97a5d81e3cf259ec58f828c4fe9f2", "f288c23cb838fbb904ec50f8c8c47974", "8c2b9825c5d5851df4db486fc1b1266e", "e7bd6060bd554e8ad03f8b0599d53421", + "368f7794f98f952a23641de61a2d05e8", "333245bee63a2389b9c0e8d7879ccf3a", "d5c8a97ee2f5584440512aca9bb48f41", "682ad17e83010309e661c83396f61710", + "9095d40447d80d33e4a64b3aadf19d33", "76c5f263a6639356f65ec9e3953d3b36", "3707b98685d0c8ace9284e7d08e8a02b", "20956dc8277ac2392e936051a420b68d", + "2d071a67eb4a6a8ee67ee4101a56d36e", "4ac7beb165d711002e84de6e656e0ed8", "4cc66a932bd615257d8a08d7948708ce", "af236ec152156291efcc23eb94004f26", + "803426970d88211e8610a3d3074865d8", "2d437f09af6ad7393947079de0e117a5", "145ac637f3a4170fd476f9695f21512f", "445e8912da5cfba0d13cf1d1c43d8c56", + "ce469cd800fcc893690e337e94dad5ba", "94561a1d50077c812bacbf2ce76e4d58", "bf53f073af68d691ede0c18376648ef9", "8bcf3c6befe18152d8836016dfc34cbc", + "b9eeaabe6d1bd6aa7b78160c009d96ff", "795847c04fd825432d1c5f90bd19b914", "d1a66baad176a179862b3aa5c520f7f1", "f03e2f021870bd74cb4b5fada894ea3a", + "f2c4d498711fbb98c88f91de7105bce0", +} + +var testVectors256 = []string{ + "f574c8c22a4844dd1f35c713730146d9ff1487b9ccbeaeb3f41d75453123da41", "54825fe4bc41b9ed0fc6ca3def440de2474a32cb9b1b657284e475b24c627320", + "54e4af24dff9df3f73e80a1b1abfc4117a592269cc6951112cb4330d59f60812", "5cd9d10dd7a00a48d0d111697c5e22895a86bb8b6b42a88e22c7e190c3fb3de2", + "dce42b2197c4cfc99b92d2aff69d5fa89e10f41d219fda1f9b4f4d377a27e407", "b385dca466f5b4b44201465eba634bbfe31ddccd688ef415c68580387d58740f", + "b4b9ad860ac74564b6ceb48427fb9ca913dbb2a0409de2da70119d9af26d52b6", "81ad8709a0b166d6376d8ceb38f8f1a430e063d4076e22e96c522c067dd65457", + "c08b76edb005b9f1453afffcf36f97e67897d0d98d51be4f330d1e37ebafa0d9", "81293c0dd7e4d880a1f12464d1bb0ff1d10c3f9dbe2d5ccff273b601f7e8bfc0", + "be62a2e5508ce4ade038fefdb192948e38b8e92f4bb78407cd6d65db74d5410e", "cf071853b977bea138971a6adea797ba1f268e9cef4c27afe8e84cc735b9393e", + "575840e30238ad15a053e839dccb119d25b2313c993eea232e21f4cae3e9d96c", "367cd7b15e6fc901a6951f53c1f967a3b8dcda7c42a3941fd3d53bbf0a00f197", + "418effee1ee915085ddf216efa280c0e745309ed628ead4ee6739d1cda01fd3f", "2e604278700519c146b1018501dbc362c10634fa17adf58547c3fed47bf884c8", + "1fcdb6a189d91af5d97b622ad675f0f7068af279f5d5017e9f4d176ac115d41a", "8e06a42ca8cff419b975923abd4a9d3bc610c0e9ddb000801356214909d58488", + "5d9fab817f6c6d12ee167709c5a3da4e493edda7731512af2dc380aa85ac0190", "fa559114f9beaa063d1ce744414f86dfda64bc60e8bcbafdb61c499247a52bde", + "db9f0735406bfcad656e488e32b787a0ea23465a93a9d14644ee3c0d445c89e3", "dfb3a3ee1dd3f9b533e1060ae224308f20e18f28c8384cf24997d69bcf1d3f70", + "e3ef9447850b3c2ba0ceda9b963f5d1c2eac63a5af6af1817530d0795a1c4423", "6237fd93c7f88a4124f9d761948e6bbc789e1a2a6af26f776eca17d4bfb7a03a", + "c1a355d22aea03cd2a1b9cb5e5fe8501e473974fd438f4d1e4763bf867dd69be", "fba0873887a851f9aee048a5d2317b2cfa6e18b638388044729f21bec78ec7a3", + "088c0dea51f18f958834f6b497897e4b6d38c55143078ec7faee206f557755d9", "0654b07f8017a9298c571f3584f81833faa7f6f66eea24ddffae975e469343e7", + "cb6c5e9380082498da979fb071d2d01f83b100274786e7561778749ff9491629", "56c554704f95d41beb6c597cff2edbff5b6bab1b9ac66a7c53c17f537076030f", + "9874599788e32588c13263afebf67c6417c928dc03d92b55abc5bf002c63d772", "4d641a6076e28068dab70fb1208b72b36ed110060612bdd0f22e4533ef14ef8a", + "fec3a139908ce3bc8912c1a32663d542a9aefc64f79555e3995a47c96b3cb0c9", "e5a634f0cb1501f6d046cebf75ea366c90597282d3c8173b357a0011eda2da7e", + "a2def9ed59e926130c729f73016877c42ff662d70f506951ab29250ad9d00d8a", "d442d403d549519344d1da0213b46bffec369dcd12b09c333022cc9e61531de6", + "96b650aa88c88b52fce18460a3ecaeb8763424c01e1558a144ec7c09ad4ac102", "27c31722a788d6be3f8760f71451e61ea602307db3265c3fb997156395e8f2dd", + "ad510b2bcf21dbe76cabb0f42463fcfa5b9c2dc2447285b09c84051e8d88adf0", "00cb4dcd93975105eb7d0663314a593c349e11cf1a0875ac94b05c809762c85a", + "9e77b5228c8d2209847e6b51b24d6419a04131f8abc8922b9193e125d75a787f", "4ba7d0465d2ec459646003ca653ca55eb4ae35b66b91a948d4e9543f14dfe6ba", + "e3d0036d6923b65e92a01db4bc783dd50db1f652dc4823fe118c2c6357248064", "8154b8c4b21bb643a1807e71258c31c67d689c6f4d7f4a8c7c1d4035e01702bd", + "374c824357ca517f3a701db15e4d4cb069f3f6cb1e1e514de2565421ea7567d6", "cc457ef8ee09b439b379fc59c4e8b852248c85d1180992444901ee5e647bf080", + "14d59abed19486cee73668522690a1bf7d2a90e4f6fda41efee196d658440c38", "a4a023f88be189d1d7a701e53b353b1f84282ee0b4774fa20c18f9746f64947e", + "48ec25d335c6f8af0b8d0314a40a2e2c6774441a617fd34e8914503be338ec39", "97f1835fadfd2b2acc74f2be6e3e3d0155617277043c56e17e0332e95d8a5af1", + "326312c81ef9d1d511ffb1f99b0b111032601c5426ab75a15215702857dcba87", "842808d82ca9b5c7fbee2e1bb62aa6dd2f73aefeec82988ffb4f1fc05cbd386b", + "f0323d7375f26ecf8b7dbfa22d82f0a36a4012f535744e302d17b3ebefe3280b", "dbe9b20107f898e628888a9a812aae66c9f2b8c92490ea14a4b53e52706141a7", + "b7ed07e3877e913ac15244e3dadeb41770cc11e762f189f60edd9c78fe6bce29", "8e5d15cbd83aff0ea244084cad9ecd47eb21fee60ee4c846510a34f05dc2f3de", + "4dd0822be686fd036d131707600dab32897a852b830e2b68b1393744f1e38c13", "02f9d7c454c7772feabfadd9a9e053100ae74a546863e658ca83dd729c828ac4", + "9fa066e419eb00f914d3c7a8019ebe3171f408cab8c6fe3afbe7ff870febc0b8", "fb8e3cbe8f7d27db7ba51ae17768ce537d7e9a0dd2949c71c93c459263b545b3", + "c9f2a4db3b9c6337c86d4636b3e795608ab8651e7949803ad57c92e5cd88c982", "e44a2314a7b11f6b7e46a65b252e562075d6f3402d892b3e68d71ee4fbe30cf4", + "2ac987b2b11ce18e6d263df6efaac28f039febe6873464667368d5e81da98a57", "67eb3a6a26f8b1f5dd1aec4dbe40b083aefb265b63c8e17f9fd7fede47a4a3f4", + "7524c16affe6d890f2c1da6e192a421a02b08e1ffe65379ebecf51c3c4d7bdc1", +} + +func benchmarkWrite(size int64, b *testing.B) { + var key [32]byte + data := make([]byte, size) + + h := New128(key) + b.SetBytes(size) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = h.Write(data) + } +} + +func BenchmarkWrite_8(b *testing.B) { benchmarkWrite(8, b) } +func BenchmarkWrite_16(b *testing.B) { benchmarkWrite(16, b) } +func BenchmarkWrite_64(b *testing.B) { benchmarkWrite(64, b) } +func BenchmarkWrite_1K(b *testing.B) { benchmarkWrite(1024, b) } +func BenchmarkWrite_8K(b *testing.B) { benchmarkWrite(8*1024, b) } + +func benchmarkSum256(size int64, b *testing.B) { + var key [32]byte + data := make([]byte, size) + + b.SetBytes(size) + b.ResetTimer() + for i := 0; i < b.N; i++ { + Sum(data, key[:]) + } +} + +func BenchmarkSum256_8(b *testing.B) { benchmarkSum256(8, b) } +func BenchmarkSum256_16(b *testing.B) { benchmarkSum256(16, b) } +func BenchmarkSum256_64(b *testing.B) { benchmarkSum256(64, b) } +func BenchmarkSum256_1K(b *testing.B) { benchmarkSum256(1024, b) } +func BenchmarkSum256_8K(b *testing.B) { benchmarkSum256(8*1024, b) } +func BenchmarkSum256_1M(b *testing.B) { benchmarkSum256(1024*1024, b) } +func BenchmarkSum256_5M(b *testing.B) { benchmarkSum256(5*1024*1024, b) } +func BenchmarkSum256_10M(b *testing.B) { benchmarkSum256(10*1024*1024, b) } +func BenchmarkSum256_25M(b *testing.B) { benchmarkSum256(25*1024*1024, b) } + +func benchmarkParallel(b *testing.B, size int) { + + c := runtime.GOMAXPROCS(0) + + var key [32]byte + + rng := rand.New(rand.NewSource(0xabadc0cac01a)) + data := make([][]byte, c) + for i := range data { + data[i] = make([]byte, size) + rng.Read(data[i]) + } + + b.SetBytes(int64(size)) + b.ResetTimer() + + counter := uint64(0) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + index := atomic.AddUint64(&counter, 1) + Sum(data[int(index)%len(data)], key[:]) + } + }) +} + +func BenchmarkParallel_1M(b *testing.B) { benchmarkParallel(b, 1024*1024) } +func BenchmarkParallel_5M(b *testing.B) { benchmarkParallel(b, 5*1024*1024) } +func BenchmarkParallel_10M(b *testing.B) { benchmarkParallel(b, 10*1024*1024) } +func BenchmarkParallel_25M(b *testing.B) { benchmarkParallel(b, 25*1024*1024) } diff --git a/go/vt/vthash/metro/metro.go b/go/vt/vthash/metro/metro.go index 76482408fef..66214713604 100644 --- a/go/vt/vthash/metro/metro.go +++ b/go/vt/vthash/metro/metro.go @@ -21,6 +21,7 @@ package metro import ( "encoding/binary" "math/bits" + "unsafe" ) const k0 = 0xC83A91E1 @@ -69,6 +70,10 @@ func (m *Metro128) Write64(u uint64) { _, _ = m.Write(scratch[:8]) } +func (m *Metro128) WriteString(str string) (int, error) { + return m.Write(unsafe.Slice(unsafe.StringData(str), len(str))) +} + func (m *Metro128) Write(buffer []byte) (int, error) { ptr := buffer diff --git a/go/vt/vtorc/collection/collection.go b/go/vt/vtorc/collection/collection.go index cfc8116c9c5..0ef9a71b9a3 100644 --- a/go/vt/vtorc/collection/collection.go +++ b/go/vt/vtorc/collection/collection.go @@ -97,13 +97,6 @@ func init() { namedCollection = make(map[string](*Collection)) } -// StopMonitoring stops monitoring all the collections -func StopMonitoring() { - for _, q := range namedCollection { - q.StopAutoExpiration() - } -} - // CreateOrReturnCollection allows for creation of a new collection or // returning a pointer to an existing one given the name. This allows access // to the data structure from the api interface (http/api.go) and also when writing (inst). diff --git a/go/vt/vtorc/config/config.go b/go/vt/vtorc/config/config.go index 3d3dde96034..83a39303acb 100644 --- a/go/vt/vtorc/config/config.go +++ b/go/vt/vtorc/config/config.go @@ -36,20 +36,14 @@ var configurationLoaded = make(chan bool) const ( HealthPollSeconds = 1 ActiveNodeExpireSeconds = 5 - MaintenanceOwner = "vtorc" AuditPageSize = 20 - MaintenancePurgeDays = 7 - MaintenanceExpireMinutes = 10 DebugMetricsIntervalSeconds = 10 StaleInstanceCoordinatesExpireSeconds = 60 DiscoveryMaxConcurrency = 300 // Number of goroutines doing hosts discovery DiscoveryQueueCapacity = 100000 DiscoveryQueueMaxStatisticsSize = 120 DiscoveryCollectionRetentionSeconds = 120 - HostnameResolveMethod = "default" UnseenInstanceForgetHours = 240 // Number of hours after which an unseen instance is forgotten - ExpiryHostnameResolvesMinutes = 60 // Number of minutes after which to expire hostname-resolves - CandidateInstanceExpireMinutes = 60 // Minutes after which a suggestion to use an instance as a candidate replica (to be preferably promoted on primary failover) is expired. FailureDetectionPeriodBlockMinutes = 60 // The time for which an instance's failure discovery is kept "active", so as to avoid concurrent "discoveries" of the instance's failure; this preceeds any recovery process, if any. ) @@ -67,6 +61,8 @@ var ( waitReplicasTimeout = 30 * time.Second topoInformationRefreshDuration = 15 * time.Second recoveryPollDuration = 1 * time.Second + ersEnabled = true + convertTabletsWithErrantGTIDs = false ) // RegisterFlags registers the flags required by VTOrc @@ -81,11 +77,11 @@ func RegisterFlags(fs *pflag.FlagSet) { fs.DurationVar(&auditPurgeDuration, "audit-purge-duration", auditPurgeDuration, "Duration for which audit logs are held before being purged. Should be in multiples of days") fs.DurationVar(&recoveryPeriodBlockDuration, "recovery-period-block-duration", recoveryPeriodBlockDuration, "Duration for which a new recovery is blocked on an instance after running a recovery") fs.BoolVar(&preventCrossCellFailover, "prevent-cross-cell-failover", preventCrossCellFailover, "Prevent VTOrc from promoting a primary in a different cell than the current primary in case of a failover") - fs.Duration("lock-shard-timeout", 30*time.Second, "Duration for which a shard lock is held when running a recovery") - _ = fs.MarkDeprecated("lock-shard-timeout", "Please use lock-timeout instead.") fs.DurationVar(&waitReplicasTimeout, "wait-replicas-timeout", waitReplicasTimeout, "Duration for which to wait for replica's to respond when issuing RPCs") fs.DurationVar(&topoInformationRefreshDuration, "topo-information-refresh-duration", topoInformationRefreshDuration, "Timer duration on which VTOrc refreshes the keyspace and vttablet records from the topology server") fs.DurationVar(&recoveryPollDuration, "recovery-poll-duration", recoveryPollDuration, "Timer duration on which VTOrc polls its database to run a recovery") + fs.BoolVar(&ersEnabled, "allow-emergency-reparent", ersEnabled, "Whether VTOrc should be allowed to run emergency reparent operation when it detects a dead primary") + fs.BoolVar(&convertTabletsWithErrantGTIDs, "change-tablets-with-errant-gtid-to-drained", convertTabletsWithErrantGTIDs, "Whether VTOrc should be changing the type of tablets with errant GTIDs to DRAINED") } // Configuration makes for vtorc configuration input, which can be provided by user via JSON formatted file. @@ -137,6 +133,26 @@ func UpdateConfigValuesFromFlags() { Config.RecoveryPollSeconds = int(recoveryPollDuration / time.Second) } +// ERSEnabled reports whether VTOrc is allowed to run ERS or not. +func ERSEnabled() bool { + return ersEnabled +} + +// SetERSEnabled sets the value for the ersEnabled variable. This should only be used from tests. +func SetERSEnabled(val bool) { + ersEnabled = val +} + +// ConvertTabletWithErrantGTIDs reports whether VTOrc is allowed to change the tablet type of tablets with errant GTIDs to DRAINED. +func ConvertTabletWithErrantGTIDs() bool { + return convertTabletsWithErrantGTIDs +} + +// SetConvertTabletWithErrantGTIDs sets the value for the convertTabletWithErrantGTIDs variable. This should only be used from tests. +func SetConvertTabletWithErrantGTIDs(val bool) { + convertTabletsWithErrantGTIDs = val +} + // LogConfigValues is used to log the config values. func LogConfigValues() { b, _ := json.MarshalIndent(Config, "", "\t") diff --git a/go/vt/vtorc/db/db.go b/go/vt/vtorc/db/db.go index 04150339c5c..d565c9bbdc4 100644 --- a/go/vt/vtorc/db/db.go +++ b/go/vt/vtorc/db/db.go @@ -72,7 +72,7 @@ func translateStatement(statement string) string { return sqlutils.ToSqlite3Dialect(statement) } -// registerVTOrcDeployment updates the vtorc_metadata table upon successful deployment +// registerVTOrcDeployment updates the vtorc_db_deployments table upon successful deployment func registerVTOrcDeployment(db *sql.DB) error { query := ` replace into vtorc_db_deployments ( @@ -82,7 +82,7 @@ func registerVTOrcDeployment(db *sql.DB) error { ) ` if _, err := execInternal(db, query, ""); err != nil { - log.Fatalf("Unable to write to vtorc_metadata: %+v", err) + log.Fatalf("Unable to write to vtorc_db_deployments: %+v", err) } return nil } @@ -153,14 +153,11 @@ func execInternal(db *sql.DB, query string, args ...any) (sql.Result, error) { // ExecVTOrc will execute given query on the vtorc backend database. func ExecVTOrc(query string, args ...any) (sql.Result, error) { - var err error - query = translateStatement(query) db, err := OpenVTOrc() if err != nil { return nil, err } - res, err := sqlutils.ExecNoPrepare(db, query, args...) - return res, err + return execInternal(db, query, args...) } // QueryVTOrcRowsMap @@ -188,15 +185,3 @@ func QueryVTOrc(query string, argsArray []any, onRow func(sqlutils.RowMap) error return err } - -// ReadTimeNow reads and returns the current timestamp as string. This is an unfortunate workaround -// to support both MySQL and SQLite in all possible timezones. SQLite only speaks UTC where MySQL has -// timezone support. By reading the time as string we get the database's de-facto notion of the time, -// which we can then feed back to it. -func ReadTimeNow() (timeNow string, err error) { - err = QueryVTOrc(`select now() as time_now`, nil, func(m sqlutils.RowMap) error { - timeNow = m.GetString("time_now") - return nil - }) - return timeNow, err -} diff --git a/go/vt/vtorc/db/generate_base.go b/go/vt/vtorc/db/generate_base.go index 88b26ed2c27..73238802920 100644 --- a/go/vt/vtorc/db/generate_base.go +++ b/go/vt/vtorc/db/generate_base.go @@ -23,6 +23,7 @@ DROP TABLE IF EXISTS database_instance `, ` CREATE TABLE database_instance ( + alias varchar(256) NOT NULL, hostname varchar(128) NOT NULL, port smallint NOT NULL, last_checked timestamp not null default (''), @@ -67,7 +68,6 @@ CREATE TABLE database_instance ( has_replication_credentials TINYint not null default 0, allow_tls TINYint not null default 0, semi_sync_enforced TINYint not null default 0, - instance_alias varchar(128) not null default '', version_comment varchar(128) NOT NULL DEFAULT '', major_version varchar(16) not null default '', binlog_row_image varchar(16) not null default '', @@ -87,14 +87,7 @@ CREATE TABLE database_instance ( semi_sync_primary_status TINYint NOT NULL DEFAULT 0, semi_sync_replica_status TINYint NOT NULL DEFAULT 0, semi_sync_primary_clients int NOT NULL DEFAULT 0, - replication_group_name VARCHAR(64) NOT NULL DEFAULT '', - replication_group_is_single_primary_mode TINYint NOT NULL DEFAULT 1, - replication_group_member_state VARCHAR(16) NOT NULL DEFAULT '', - replication_group_member_role VARCHAR(16) NOT NULL DEFAULT '', - replication_group_members text not null default '', - replication_group_primary_host varchar(128) NOT NULL DEFAULT '', - replication_group_primary_port smallint NOT NULL DEFAULT 0, - PRIMARY KEY (hostname,port) + PRIMARY KEY (alias) )`, ` CREATE INDEX last_checked_idx_database_instance ON database_instance(last_checked) @@ -103,48 +96,6 @@ CREATE INDEX last_checked_idx_database_instance ON database_instance(last_checke CREATE INDEX last_seen_idx_database_instance ON database_instance(last_seen) `, ` -DROP TABLE IF EXISTS database_instance_maintenance -`, - ` -CREATE TABLE database_instance_maintenance ( - database_instance_maintenance_id integer, - hostname varchar(128) NOT NULL, - port smallint NOT NULL, - maintenance_active tinyint(4) DEFAULT NULL, - begin_timestamp timestamp NULL DEFAULT NULL, - end_timestamp timestamp NULL DEFAULT NULL, - owner varchar(128) NOT NULL, - reason text NOT NULL, - processing_node_hostname varchar(128) not null default '', - processing_node_token varchar(128) not null default '', - explicitly_bounded TINYint not null default 0, - PRIMARY KEY (database_instance_maintenance_id) -)`, - ` -CREATE UNIQUE INDEX maintenance_uidx_database_instance_maintenance ON database_instance_maintenance (maintenance_active, hostname, port) - `, - ` -DROP TABLE IF EXISTS database_instance_long_running_queries -`, - ` -CREATE TABLE database_instance_long_running_queries ( - hostname varchar(128) NOT NULL, - port smallint NOT NULL, - process_id bigint(20) NOT NULL, - process_started_at timestamp not null default (''), - process_user varchar(16) NOT NULL, - process_host varchar(128) NOT NULL, - process_db varchar(128) NOT NULL, - process_command varchar(16) NOT NULL, - process_time_seconds int(11) NOT NULL, - process_state varchar(128) NOT NULL, - process_info varchar(1024) NOT NULL, - PRIMARY KEY (hostname,port,process_id) -)`, - ` -CREATE INDEX process_started_at_idx_database_instance_long_running_queries ON database_instance_long_running_queries (process_started_at) - `, - ` DROP TABLE IF EXISTS audit `, ` @@ -152,8 +103,7 @@ CREATE TABLE audit ( audit_id integer, audit_timestamp timestamp not null default (''), audit_type varchar(128) NOT NULL, - hostname varchar(128) NOT NULL DEFAULT '', - port smallint NOT NULL, + alias varchar(256) NOT NULL, message text NOT NULL, keyspace varchar(128) NOT NULL, shard varchar(128) NOT NULL, @@ -163,91 +113,7 @@ CREATE TABLE audit ( CREATE INDEX audit_timestamp_idx_audit ON audit (audit_timestamp) `, ` -CREATE INDEX host_port_idx_audit ON audit (hostname, port, audit_timestamp) - `, - ` -DROP TABLE IF EXISTS host_agent -`, - ` -CREATE TABLE host_agent ( - hostname varchar(128) NOT NULL, - port smallint NOT NULL, - token varchar(128) NOT NULL, - last_submitted timestamp not null default (''), - last_checked timestamp NULL DEFAULT NULL, - last_seen timestamp NULL DEFAULT NULL, - mysql_port smallint DEFAULT NULL, - count_mysql_snapshots smallint NOT NULL, - PRIMARY KEY (hostname) -)`, - ` -CREATE INDEX token_idx_host_agent ON host_agent (token) - `, - ` -CREATE INDEX last_submitted_idx_host_agent ON host_agent (last_submitted) - `, - ` -CREATE INDEX last_checked_idx_host_agent ON host_agent (last_checked) - `, - ` -CREATE INDEX last_seen_idx_host_agent ON host_agent (last_seen) - `, - ` -DROP TABLE IF EXISTS agent_seed -`, - ` -CREATE TABLE agent_seed ( - agent_seed_id integer, - target_hostname varchar(128) NOT NULL, - source_hostname varchar(128) NOT NULL, - start_timestamp timestamp not null default (''), - end_timestamp timestamp NOT NULL DEFAULT '1971-01-01 00:00:00', - is_complete tinyint NOT NULL DEFAULT '0', - is_successful tinyint NOT NULL DEFAULT '0', - PRIMARY KEY (agent_seed_id) -)`, - ` -CREATE INDEX target_hostname_idx_agent_seed ON agent_seed (target_hostname,is_complete) - `, - ` -CREATE INDEX source_hostname_idx_agent_seed ON agent_seed (source_hostname,is_complete) - `, - ` -CREATE INDEX start_timestamp_idx_agent_seed ON agent_seed (start_timestamp) - `, - ` -CREATE INDEX is_complete_idx_agent_seed ON agent_seed (is_complete,start_timestamp) - `, - ` -CREATE INDEX is_successful_idx_agent_seed ON agent_seed (is_successful, start_timestamp) - `, - ` -DROP TABLE IF EXISTS agent_seed_state -`, - ` -CREATE TABLE agent_seed_state ( - agent_seed_state_id integer, - agent_seed_id int NOT NULL, - state_timestamp timestamp not null default (''), - state_action varchar(127) NOT NULL, - error_message varchar(255) NOT NULL, - PRIMARY KEY (agent_seed_state_id) -)`, - ` -CREATE INDEX agent_seed_idx_agent_seed_state ON agent_seed_state (agent_seed_id, state_timestamp) - `, - ` -DROP TABLE IF EXISTS hostname_resolve -`, - ` -CREATE TABLE hostname_resolve ( - hostname varchar(128) NOT NULL, - resolved_hostname varchar(128) NOT NULL, - resolved_timestamp timestamp not null default (''), - PRIMARY KEY (hostname) -)`, - ` -CREATE INDEX resolved_timestamp_idx_hostname_resolve ON hostname_resolve (resolved_timestamp) +CREATE INDEX alias_idx_audit ON audit (alias, audit_timestamp) `, ` DROP TABLE IF EXISTS active_node @@ -283,16 +149,14 @@ DROP TABLE IF EXISTS topology_recovery ` CREATE TABLE topology_recovery ( recovery_id integer, - hostname varchar(128) NOT NULL, - port smallint NOT NULL, + alias varchar(256) NOT NULL, in_active_period tinyint NOT NULL DEFAULT 0, start_active_period timestamp not null default (''), end_active_period_unixtime int, end_recovery timestamp NULL DEFAULT NULL, processing_node_hostname varchar(128) NOT NULL, processcing_node_token varchar(128) NOT NULL, - successor_hostname varchar(128) DEFAULT NULL, - successor_port smallint DEFAULT NULL, + successor_alias varchar(256) DEFAULT NULL, analysis varchar(128) not null default '', keyspace varchar(128) NOT NULL, shard varchar(128) NOT NULL, @@ -301,12 +165,9 @@ CREATE TABLE topology_recovery ( acknowledged TINYint NOT NULL DEFAULT 0, acknowledged_by varchar(128) not null default '', acknowledge_comment text not null default '', - participating_instances text not null default '', - lost_replicas text not null default '', all_errors text not null default '', acknowledged_at TIMESTAMP NULL, last_detection_id bigint not null default 0, - successor_alias varchar(128) DEFAULT NULL, uid varchar(128) not null default '', PRIMARY KEY (recovery_id) )`, @@ -317,20 +178,7 @@ CREATE INDEX in_active_start_period_idx_topology_recovery ON topology_recovery ( CREATE INDEX start_active_period_idx_topology_recovery ON topology_recovery (start_active_period) `, ` -CREATE UNIQUE INDEX hostname_port_active_period_uidx_topology_recovery ON topology_recovery (hostname, port, in_active_period, end_active_period_unixtime) - `, - ` -DROP TABLE IF EXISTS hostname_unresolve -`, - ` -CREATE TABLE hostname_unresolve ( - hostname varchar(128) NOT NULL, - unresolved_hostname varchar(128) NOT NULL, - last_registered timestamp not null default (''), - PRIMARY KEY (hostname) -)`, - ` -CREATE INDEX unresolved_hostname_idx_hostname_unresolve ON hostname_unresolve (unresolved_hostname) +CREATE UNIQUE INDEX alias_active_period_uidx_topology_recovery ON topology_recovery (alias, in_active_period, end_active_period_unixtime) `, ` DROP TABLE IF EXISTS database_instance_topology_history @@ -338,6 +186,7 @@ DROP TABLE IF EXISTS database_instance_topology_history ` CREATE TABLE database_instance_topology_history ( snapshot_unix_timestamp int NOT NULL, + alias varchar(256) NOT NULL, hostname varchar(128) NOT NULL, port smallint NOT NULL, source_host varchar(128) NOT NULL, @@ -345,7 +194,7 @@ CREATE TABLE database_instance_topology_history ( keyspace varchar(128) NOT NULL, shard varchar(128) NOT NULL, version varchar(128) not null default '', - PRIMARY KEY (snapshot_unix_timestamp, hostname, port) + PRIMARY KEY (snapshot_unix_timestamp, alias) )`, ` CREATE INDEX keyspace_shard_idx_database_instance_topology_history ON database_instance_topology_history (snapshot_unix_timestamp, keyspace, shard) @@ -355,38 +204,22 @@ DROP TABLE IF EXISTS candidate_database_instance `, ` CREATE TABLE candidate_database_instance ( - hostname varchar(128) NOT NULL, - port smallint NOT NULL, + alias varchar(256) NOT NULL, last_suggested timestamp not null default (''), priority TINYINT SIGNED NOT NULL DEFAULT 1, promotion_rule text check(promotion_rule in ('must', 'prefer', 'neutral', 'prefer_not', 'must_not')) NOT NULL DEFAULT 'neutral', - PRIMARY KEY (hostname, port) + PRIMARY KEY (alias) )`, ` CREATE INDEX last_suggested_idx_candidate_database_instance ON candidate_database_instance (last_suggested) `, ` -DROP TABLE IF EXISTS database_instance_downtime -`, - ` -CREATE TABLE database_instance_downtime ( - hostname varchar(128) NOT NULL, - port smallint NOT NULL, - downtime_active tinyint(4) DEFAULT NULL, - begin_timestamp timestamp default (''), - end_timestamp timestamp NULL DEFAULT NULL, - owner varchar(128) NOT NULL, - reason text NOT NULL, - PRIMARY KEY (hostname, port) -)`, - ` DROP TABLE IF EXISTS topology_failure_detection `, ` CREATE TABLE topology_failure_detection ( detection_id integer, - hostname varchar(128) NOT NULL, - port smallint NOT NULL, + alias varchar(256) NOT NULL, in_active_period tinyint NOT NULL DEFAULT '0', start_active_period timestamp not null default (''), end_active_period_unixtime int NOT NULL, @@ -403,100 +236,17 @@ CREATE TABLE topology_failure_detection ( CREATE INDEX in_active_start_period_idx_topology_failure_detection ON topology_failure_detection (in_active_period, start_active_period) `, ` -DROP TABLE IF EXISTS hostname_resolve_history -`, - ` -CREATE TABLE hostname_resolve_history ( - resolved_hostname varchar(128) NOT NULL, - hostname varchar(128) NOT NULL, - resolved_timestamp timestamp not null default (''), - PRIMARY KEY (resolved_hostname) -)`, - ` -CREATE INDEX hostname_idx_hostname_resolve_history ON hostname_resolve_history (hostname) - `, - ` -CREATE INDEX resolved_timestamp_idx_hostname_resolve_history ON hostname_resolve_history (resolved_timestamp) - `, - ` -DROP TABLE IF EXISTS hostname_unresolve_history -`, - ` -CREATE TABLE hostname_unresolve_history ( - unresolved_hostname varchar(128) NOT NULL, - hostname varchar(128) NOT NULL, - last_registered timestamp not null default (''), - PRIMARY KEY (unresolved_hostname) -)`, - ` -CREATE INDEX hostname_idx_hostname_unresolve_history ON hostname_unresolve_history (hostname) - `, - ` -CREATE INDEX last_registered_idx_hostname_unresolve_history ON hostname_unresolve_history (last_registered) - `, - ` -DROP TABLE IF EXISTS primary_position_equivalence -`, - ` -CREATE TABLE primary_position_equivalence ( - equivalence_id integer, - primary1_hostname varchar(128) NOT NULL, - primary1_port smallint NOT NULL, - primary1_binary_log_file varchar(128) NOT NULL, - primary1_binary_log_pos bigint NOT NULL, - primary2_hostname varchar(128) NOT NULL, - primary2_port smallint NOT NULL, - primary2_binary_log_file varchar(128) NOT NULL, - primary2_binary_log_pos bigint NOT NULL, - last_suggested timestamp not null default (''), - PRIMARY KEY (equivalence_id) -)`, - ` -CREATE UNIQUE INDEX equivalence_uidx_primary_position_equivalence ON primary_position_equivalence (primary1_hostname, primary1_port, primary1_binary_log_file, primary1_binary_log_pos, primary2_hostname, primary2_port) - `, - ` -CREATE INDEX primary2_idx_primary_position_equivalence ON primary_position_equivalence (primary2_hostname, primary2_port, primary2_binary_log_file, primary2_binary_log_pos) - `, - ` -CREATE INDEX last_suggested_idx_primary_position_equivalence ON primary_position_equivalence (last_suggested) - `, - ` -DROP TABLE IF EXISTS async_request -`, - ` -CREATE TABLE async_request ( - request_id integer, - command varchar(128) not null, - hostname varchar(128) NOT NULL, - port smallint NOT NULL, - destination_hostname varchar(128) NOT NULL, - destination_port smallint NOT NULL, - pattern text NOT NULL, - gtid_hint varchar(32) not null, - begin_timestamp timestamp NULL DEFAULT NULL, - end_timestamp timestamp NULL DEFAULT NULL, - story text NOT NULL, - PRIMARY KEY (request_id) -)`, - ` -CREATE INDEX begin_timestamp_idx_async_request ON async_request (begin_timestamp) - `, - ` -CREATE INDEX end_timestamp_idx_async_request ON async_request (end_timestamp) - `, - ` DROP TABLE IF EXISTS blocked_topology_recovery `, ` CREATE TABLE blocked_topology_recovery ( - hostname varchar(128) NOT NULL, - port smallint NOT NULL, + alias varchar(256) NOT NULL, keyspace varchar(128) NOT NULL, shard varchar(128) NOT NULL, analysis varchar(128) NOT NULL, last_blocked_timestamp timestamp not null default (''), blocking_recovery_id bigint, - PRIMARY KEY (hostname, port) + PRIMARY KEY (alias) )`, ` CREATE INDEX keyspace_shard_blocked_idx_blocked_topology_recovery ON blocked_topology_recovery (keyspace, shard, last_blocked_timestamp) @@ -506,11 +256,10 @@ DROP TABLE IF EXISTS database_instance_last_analysis `, ` CREATE TABLE database_instance_last_analysis ( - hostname varchar(128) NOT NULL, - port smallint NOT NULL, + alias varchar(256) NOT NULL, analysis_timestamp timestamp not null default (''), analysis varchar(128) NOT NULL, - PRIMARY KEY (hostname, port) + PRIMARY KEY (alias) )`, ` CREATE INDEX analysis_timestamp_idx_database_instance_last_analysis ON database_instance_last_analysis (analysis_timestamp) @@ -521,8 +270,7 @@ DROP TABLE IF EXISTS database_instance_analysis_changelog ` CREATE TABLE database_instance_analysis_changelog ( changelog_id integer, - hostname varchar(128) NOT NULL, - port smallint NOT NULL, + alias varchar(256) NOT NULL, analysis_timestamp timestamp not null default (''), analysis varchar(128) NOT NULL, PRIMARY KEY (changelog_id) @@ -551,76 +299,6 @@ CREATE INDEX first_seen_active_idx_node_health_history ON node_health_history (f CREATE UNIQUE INDEX hostname_token_idx_node_health_history ON node_health_history (hostname, token) `, ` -DROP TABLE IF EXISTS database_instance_coordinates_history -`, - ` -CREATE TABLE database_instance_coordinates_history ( - history_id integer, - hostname varchar(128) NOT NULL, - port smallint NOT NULL, - recorded_timestamp timestamp not null default (''), - binary_log_file varchar(128) NOT NULL, - binary_log_pos bigint NOT NULL, - relay_log_file varchar(128) NOT NULL, - relay_log_pos bigint NOT NULL, - last_seen timestamp NOT NULL DEFAULT '1971-01-01 00:00:00', - PRIMARY KEY (history_id) -)`, - ` -CREATE INDEX hostname_port_recorded_idx_database_instance_coordinates_history ON database_instance_coordinates_history (hostname, port, recorded_timestamp) - `, - ` -CREATE INDEX recorded_timestmp_idx_database_instance_coordinates_history ON database_instance_coordinates_history (recorded_timestamp) - `, - ` -DROP TABLE IF EXISTS database_instance_binlog_files_history -`, - ` -CREATE TABLE database_instance_binlog_files_history ( - history_id integer, - hostname varchar(128) NOT NULL, - port smallint NOT NULL, - binary_log_file varchar(128) NOT NULL, - binary_log_pos bigint NOT NULL, - first_seen timestamp not null default (''), - last_seen timestamp NOT NULL DEFAULT '1971-01-01 00:00:00', - PRIMARY KEY (history_id) -)`, - ` -CREATE UNIQUE INDEX hostname_port_file_idx_database_instance_binlog_files_history ON database_instance_binlog_files_history (hostname, port, binary_log_file) - `, - ` -CREATE INDEX last_seen_idx_database_instance_binlog_files_history ON database_instance_binlog_files_history (last_seen) - `, - ` -DROP TABLE IF EXISTS database_instance_recent_relaylog_history -`, - ` -CREATE TABLE database_instance_recent_relaylog_history ( - hostname varchar(128) NOT NULL, - port smallint NOT NULL, - current_relay_log_file varchar(128) NOT NULL, - current_relay_log_pos bigint NOT NULL, - current_seen timestamp NOT NULL DEFAULT '1971-01-01 00:00:00', - prev_relay_log_file varchar(128) NOT NULL, - prev_relay_log_pos bigint NOT NULL, - prev_seen timestamp NOT NULL DEFAULT '1971-01-01 00:00:00', - PRIMARY KEY (hostname, port) -)`, - ` -CREATE INDEX current_seen_idx_database_instance_recent_relaylog_history ON database_instance_recent_relaylog_history (current_seen) - `, - ` -DROP TABLE IF EXISTS vtorc_metadata -`, - ` -CREATE TABLE vtorc_metadata ( - anchor tinyint NOT NULL, - last_deployed_version varchar(128) NOT NULL, - last_deployed_timestamp timestamp NOT NULL, - PRIMARY KEY (anchor) -)`, - ` DROP TABLE IF EXISTS vtorc_db_deployments `, ` @@ -649,102 +327,15 @@ CREATE TABLE topology_recovery_steps ( PRIMARY KEY (recovery_step_id) )`, ` -DROP TABLE IF EXISTS raft_store -`, - ` -CREATE TABLE raft_store ( - store_id integer, - store_key varbinary(512) not null, - store_value blob not null, - PRIMARY KEY (store_id) -)`, - ` -CREATE INDEX store_key_idx_raft_store ON raft_store (store_key) - `, - ` -DROP TABLE IF EXISTS raft_log -`, - ` -CREATE TABLE raft_log ( - log_index integer, - term bigint not null, - log_type int not null, - data blob not null, - PRIMARY KEY (log_index) -)`, - ` -DROP TABLE IF EXISTS raft_snapshot -`, - ` -CREATE TABLE raft_snapshot ( - snapshot_id integer, - snapshot_name varchar(128) NOT NULL, - snapshot_meta varchar(4096) NOT NULL, - created_at timestamp not null default (''), - PRIMARY KEY (snapshot_id) -)`, - ` -CREATE UNIQUE INDEX snapshot_name_uidx_raft_snapshot ON raft_snapshot (snapshot_name) - `, - ` -DROP TABLE IF EXISTS database_instance_peer_analysis -`, - ` -CREATE TABLE database_instance_peer_analysis ( - peer varchar(128) NOT NULL, - hostname varchar(128) NOT NULL, - port smallint NOT NULL, - analysis_timestamp timestamp not null default (''), - analysis varchar(128) NOT NULL, - PRIMARY KEY (peer, hostname, port) -)`, - ` -DROP TABLE IF EXISTS database_instance_tls -`, - ` -CREATE TABLE database_instance_tls ( - hostname varchar(128) NOT NULL, - port smallint NOT NULL, - required tinyint NOT NULL DEFAULT 0, - PRIMARY KEY (hostname,port) -)`, - ` -DROP TABLE IF EXISTS hostname_ips -`, - ` -CREATE TABLE hostname_ips ( - hostname varchar(128) NOT NULL, - ipv4 varchar(128) NOT NULL, - ipv6 varchar(128) NOT NULL, - last_updated timestamp not null default (''), - PRIMARY KEY (hostname) -)`, - ` -DROP TABLE IF EXISTS database_instance_tags -`, - ` -CREATE TABLE database_instance_tags ( - hostname varchar(128) NOT NULL, - port smallint NOT NULL, - tag_name varchar(128) NOT NULL, - tag_value varchar(128) NOT NULL, - last_updated timestamp not null default (''), - PRIMARY KEY (hostname, port, tag_name) -)`, - ` -CREATE INDEX tag_name_idx_database_instance_tags ON database_instance_tags (tag_name) - `, - ` DROP TABLE IF EXISTS database_instance_stale_binlog_coordinates `, ` CREATE TABLE database_instance_stale_binlog_coordinates ( - hostname varchar(128) NOT NULL, - port smallint NOT NULL, + alias varchar(256) NOT NULL, binary_log_file varchar(128) NOT NULL, binary_log_pos bigint NOT NULL, first_seen timestamp not null default (''), - PRIMARY KEY (hostname, port) + PRIMARY KEY (alias) )`, ` CREATE INDEX first_seen_idx_database_instance_stale_binlog_coordinates ON database_instance_stale_binlog_coordinates (first_seen) @@ -763,8 +354,7 @@ CREATE TABLE vitess_tablet ( tablet_type smallint(5) NOT NULL, primary_timestamp timestamp NOT NULL, info varchar(512) NOT NULL, - UNIQUE (alias), - PRIMARY KEY (hostname, port) + PRIMARY KEY (alias) )`, ` CREATE INDEX cell_idx_vitess_tablet ON vitess_tablet (cell) @@ -783,16 +373,18 @@ CREATE TABLE vitess_keyspace ( PRIMARY KEY (keyspace) )`, ` -CREATE INDEX source_host_port_idx_database_instance_database_instance on database_instance (source_host, source_port) - `, - ` -CREATE INDEX active_timestamp_idx_database_instance_maintenance on database_instance_maintenance (maintenance_active, begin_timestamp) - `, +DROP TABLE IF EXISTS vitess_shard +`, ` -CREATE INDEX active_end_timestamp_idx_database_instance_maintenance on database_instance_maintenance (maintenance_active, end_timestamp) - `, +CREATE TABLE vitess_shard ( + keyspace varchar(128) NOT NULL, + shard varchar(128) NOT NULL, + primary_alias varchar(512) NOT NULL, + primary_timestamp varchar(512) NOT NULL, + PRIMARY KEY (keyspace, shard) +)`, ` -CREATE INDEX last_registered_idx_hostname_unresolve on hostname_unresolve (last_registered) +CREATE INDEX source_host_port_idx_database_instance_database_instance on database_instance (source_host, source_port) `, ` CREATE INDEX keyspace_shard_in_active_idx_topology_recovery on topology_recovery (keyspace, shard, in_active_period) @@ -807,7 +399,7 @@ CREATE INDEX acknowledged_idx_topology_recovery on topology_recovery (acknowledg CREATE INDEX last_blocked_idx_blocked_topology_recovery on blocked_topology_recovery (last_blocked_timestamp) `, ` -CREATE INDEX instance_timestamp_idx_database_instance_analysis_changelog on database_instance_analysis_changelog (hostname, port, analysis_timestamp) +CREATE INDEX instance_timestamp_idx_database_instance_analysis_changelog on database_instance_analysis_changelog (alias, analysis_timestamp) `, ` CREATE INDEX last_detection_idx_topology_recovery on topology_recovery (last_detection_id) @@ -822,9 +414,6 @@ CREATE INDEX uid_idx_topology_recovery ON topology_recovery(uid) CREATE INDEX recovery_uid_idx_topology_recovery_steps ON topology_recovery_steps(recovery_uid) `, ` -CREATE INDEX end_timestamp_idx_database_instance_downtime ON database_instance_downtime(end_timestamp) - `, - ` -CREATE UNIQUE INDEX host_port_active_recoverable_uidx_topology_failure_detection ON topology_failure_detection (hostname, port, in_active_period, end_active_period_unixtime, is_actionable) +CREATE UNIQUE INDEX alias_active_recoverable_uidx_topology_failure_detection ON topology_failure_detection (alias, in_active_period, end_active_period_unixtime, is_actionable) `, } diff --git a/go/vt/vtorc/discovery/aggregated.go b/go/vt/vtorc/discovery/aggregated.go index 67b3ff336b2..37d965fa51c 100644 --- a/go/vt/vtorc/discovery/aggregated.go +++ b/go/vt/vtorc/discovery/aggregated.go @@ -121,18 +121,18 @@ func aggregate(results []collection.Metric) AggregatedDiscoveryMetrics { // different names x := names[InstanceKeys] - x[v.InstanceKey.String()] = 1 // Value doesn't matter + x[v.TabletAlias] = 1 // Value doesn't matter names[InstanceKeys] = x if v.Err == nil { // ok names x := names[OkInstanceKeys] - x[v.InstanceKey.String()] = 1 // Value doesn't matter + x[v.TabletAlias] = 1 // Value doesn't matter names[OkInstanceKeys] = x } else { // failed names x := names[FailedInstanceKeys] - x[v.InstanceKey.String()] = 1 // Value doesn't matter + x[v.TabletAlias] = 1 // Value doesn't matter names[FailedInstanceKeys] = x } diff --git a/go/vt/vtorc/discovery/metric.go b/go/vt/vtorc/discovery/metric.go index 58afaa781ef..c322739502d 100644 --- a/go/vt/vtorc/discovery/metric.go +++ b/go/vt/vtorc/discovery/metric.go @@ -20,19 +20,17 @@ package discovery import ( "time" - - "vitess.io/vitess/go/vt/vtorc/inst" ) // Metric holds a set of information of instance discovery metrics type Metric struct { - Timestamp time.Time // time the collection was taken - InstanceKey inst.InstanceKey // instance being monitored - BackendLatency time.Duration // time taken talking to the backend - InstanceLatency time.Duration // time taken talking to the instance - TotalLatency time.Duration // total time taken doing the discovery - Err error // error (if applicable) doing the discovery process - InstancePollSecondsDurationCount uint64 // total numbers of times discoverInstance exceeded InstancePollSeconds + Timestamp time.Time // time the collection was taken + TabletAlias string // instance being monitored + BackendLatency time.Duration // time taken talking to the backend + InstanceLatency time.Duration // time taken talking to the instance + TotalLatency time.Duration // total time taken doing the discovery + Err error // error (if applicable) doing the discovery process + InstancePollSecondsDurationCount uint64 // total numbers of times discoverInstance exceeded InstancePollSeconds } // When did the metric happen diff --git a/go/vt/vtorc/discovery/metric_json.go b/go/vt/vtorc/discovery/metric_json.go deleted file mode 100644 index eb204f28043..00000000000 --- a/go/vt/vtorc/discovery/metric_json.go +++ /dev/null @@ -1,74 +0,0 @@ -/* - Copyright 2017 Simon J Mudd - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package discovery - -// Collect discovery metrics and manage their storage and retrieval for monitoring purposes. - -import ( - "errors" - "fmt" - "time" - - "vitess.io/vitess/go/vt/vtorc/collection" -) - -// formattedFloat is to force the JSON output to show 3 decimal places -type formattedFloat float64 - -func (m formattedFloat) String() string { - return fmt.Sprintf("%.3f", m) -} - -// MetricJSON holds a structure which represents some discovery latency information -type MetricJSON struct { - Timestamp time.Time - Hostname string - Port int - BackendLatencySeconds formattedFloat - InstanceLatencySeconds formattedFloat - TotalLatencySeconds formattedFloat - Err error -} - -// JSONSince returns an API response of discovery metric collection information -// in a printable JSON format. -func JSONSince(c *collection.Collection, t time.Time) ([](MetricJSON), error) { - if c == nil { - return nil, errors.New("MetricCollection.JSONSince: c == nil") - } - raw, err := c.Since(t) - if err != nil { - return nil, err - } - - // build up JSON response for each Metric we received - var s []MetricJSON - for i := range raw { - m := raw[i].(*Metric) // convert back to a real Metric rather than collection.Metric interface - mj := MetricJSON{ - Timestamp: m.Timestamp, - Hostname: m.InstanceKey.Hostname, - Port: m.InstanceKey.Port, - BackendLatencySeconds: formattedFloat(m.BackendLatency.Seconds()), - InstanceLatencySeconds: formattedFloat(m.InstanceLatency.Seconds()), - TotalLatencySeconds: formattedFloat(m.TotalLatency.Seconds()), - Err: m.Err, - } - s = append(s, mj) - } - return s, nil -} diff --git a/go/vt/vtorc/discovery/queue.go b/go/vt/vtorc/discovery/queue.go index 50d5c276e4e..95751c6ae25 100644 --- a/go/vt/vtorc/discovery/queue.go +++ b/go/vt/vtorc/discovery/queue.go @@ -31,7 +31,6 @@ import ( "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/vtorc/config" - "vitess.io/vitess/go/vt/vtorc/inst" ) // QueueMetric contains the queue's active and queued sizes @@ -46,9 +45,9 @@ type Queue struct { name string done chan struct{} - queue chan inst.InstanceKey - queuedKeys map[inst.InstanceKey]time.Time - consumedKeys map[inst.InstanceKey]time.Time + queue chan string + queuedKeys map[string]time.Time + consumedKeys map[string]time.Time metrics []QueueMetric } @@ -62,13 +61,6 @@ func init() { discoveryQueue = make(map[string](*Queue)) } -// StopMonitoring stops monitoring all the queues -func StopMonitoring() { - for _, q := range discoveryQueue { - q.stopMonitoring() - } -} - // CreateOrReturnQueue allows for creation of a new discovery queue or // returning a pointer to an existing one given the name. func CreateOrReturnQueue(name string) *Queue { @@ -80,9 +72,9 @@ func CreateOrReturnQueue(name string) *Queue { q := &Queue{ name: name, - queuedKeys: make(map[inst.InstanceKey]time.Time), - consumedKeys: make(map[inst.InstanceKey]time.Time), - queue: make(chan inst.InstanceKey, config.DiscoveryQueueCapacity), + queuedKeys: make(map[string]time.Time), + consumedKeys: make(map[string]time.Time), + queue: make(chan string, config.DiscoveryQueueCapacity), } go q.startMonitoring() @@ -106,11 +98,6 @@ func (q *Queue) startMonitoring() { } } -// Stop monitoring the queue -func (q *Queue) stopMonitoring() { - q.done <- struct{}{} -} - // do a check of the entries in the queue, both those active and queued func (q *Queue) collectStatistics() { q.Lock() @@ -134,7 +121,7 @@ func (q *Queue) QueueLen() int { // Push enqueues a key if it is not on a queue and is not being // processed; silently returns otherwise. -func (q *Queue) Push(key inst.InstanceKey) { +func (q *Queue) Push(key string) { q.Lock() defer q.Unlock() @@ -154,7 +141,7 @@ func (q *Queue) Push(key inst.InstanceKey) { // Consume fetches a key to process; blocks if queue is empty. // Release must be called once after Consume. -func (q *Queue) Consume() inst.InstanceKey { +func (q *Queue) Consume() string { q.Lock() queue := q.queue q.Unlock() @@ -179,7 +166,7 @@ func (q *Queue) Consume() inst.InstanceKey { // Release removes a key from a list of being processed keys // which allows that key to be pushed into the queue again. -func (q *Queue) Release(key inst.InstanceKey) { +func (q *Queue) Release(key string) { q.Lock() defer q.Unlock() diff --git a/go/vt/vtorc/inst/analysis.go b/go/vt/vtorc/inst/analysis.go index 07291578e0f..54500621cb9 100644 --- a/go/vt/vtorc/inst/analysis.go +++ b/go/vt/vtorc/inst/analysis.go @@ -18,8 +18,6 @@ package inst import ( "encoding/json" - "fmt" - "strings" "time" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -27,11 +25,13 @@ import ( ) type AnalysisCode string -type StructureAnalysisCode string const ( NoProblem AnalysisCode = "NoProblem" ClusterHasNoPrimary AnalysisCode = "ClusterHasNoPrimary" + PrimaryTabletDeleted AnalysisCode = "PrimaryTabletDeleted" + InvalidPrimary AnalysisCode = "InvalidPrimary" + InvalidReplica AnalysisCode = "InvalidReplica" DeadPrimaryWithoutReplicas AnalysisCode = "DeadPrimaryWithoutReplicas" DeadPrimary AnalysisCode = "DeadPrimary" DeadPrimaryAndReplicas AnalysisCode = "DeadPrimaryAndReplicas" @@ -57,8 +57,11 @@ const ( PrimaryWithoutReplicas AnalysisCode = "PrimaryWithoutReplicas" BinlogServerFailingToConnectToPrimary AnalysisCode = "BinlogServerFailingToConnectToPrimary" GraceFulPrimaryTakeover AnalysisCode = "GracefulPrimaryTakeover" + ErrantGTIDDetected AnalysisCode = "ErrantGTIDDetected" ) +type StructureAnalysisCode string + const ( StatementAndMixedLoggingReplicasStructureWarning StructureAnalysisCode = "StatementAndMixedLoggingReplicasStructureWarning" StatementAndRowLoggingReplicasStructureWarning StructureAnalysisCode = "StatementAndRowLoggingReplicasStructureWarning" @@ -72,38 +75,14 @@ const ( NotEnoughValidSemiSyncReplicasStructureWarning StructureAnalysisCode = "NotEnoughValidSemiSyncReplicasStructureWarning" ) -type InstanceAnalysis struct { - key *InstanceKey - analysis AnalysisCode -} - -func NewInstanceAnalysis(instanceKey *InstanceKey, analysis AnalysisCode) *InstanceAnalysis { - return &InstanceAnalysis{ - key: instanceKey, - analysis: analysis, - } -} - -func (instanceAnalysis *InstanceAnalysis) String() string { - return fmt.Sprintf("%s/%s", instanceAnalysis.key.StringCode(), string(instanceAnalysis.analysis)) -} - // PeerAnalysisMap indicates the number of peers agreeing on an analysis. // Key of this map is a InstanceAnalysis.String() type PeerAnalysisMap map[string]int type ReplicationAnalysisHints struct { - IncludeDowntimed bool - IncludeNoProblem bool - AuditAnalysis bool + AuditAnalysis bool } -const ( - ForcePrimaryFailoverCommandHint string = "force-primary-failover" - ForcePrimaryTakeoverCommandHint string = "force-primary-takeover" - GracefulPrimaryTakeoverCommandHint string = "graceful-primary-takeover" -) - type AnalysisInstanceType string const ( @@ -114,16 +93,19 @@ const ( // ReplicationAnalysis notes analysis on replication chain status, per instance type ReplicationAnalysis struct { - AnalyzedInstanceKey InstanceKey - AnalyzedInstanceAlias *topodatapb.TabletAlias - AnalyzedInstancePrimaryKey InstanceKey - TabletType topodatapb.TabletType - PrimaryTimeStamp time.Time - ClusterDetails ClusterInfo - AnalyzedInstanceDataCenter string - AnalyzedInstanceRegion string - AnalyzedKeyspace string - AnalyzedShard string + AnalyzedInstanceHostname string + AnalyzedInstancePort int + AnalyzedInstanceAlias string + AnalyzedInstancePrimaryAlias string + TabletType topodatapb.TabletType + PrimaryTimeStamp time.Time + ClusterDetails ClusterInfo + AnalyzedInstanceDataCenter string + AnalyzedInstanceRegion string + AnalyzedKeyspace string + AnalyzedShard string + // ShardPrimaryTermTimestamp is the primary term start time stored in the shard record. + ShardPrimaryTermTimestamp string AnalyzedInstancePhysicalEnvironment string AnalyzedInstanceBinlogCoordinates BinlogCoordinates IsPrimary bool @@ -135,17 +117,13 @@ type ReplicationAnalysis struct { CountValidReplicas uint CountValidReplicatingReplicas uint CountReplicasFailingToConnectToPrimary uint - CountDowntimedReplicas uint ReplicationDepth uint IsFailingToConnectToPrimary bool ReplicationStopped bool + ErrantGTID string Analysis AnalysisCode Description string StructureAnalysis []StructureAnalysisCode - IsDowntimed bool - IsReplicasDowntimed bool // as good as downtimed because all replicas are downtimed AND analysis is all about the replicas (e.e. AllPrimaryReplicasNotReplicating) - DowntimeEndTimestamp string - DowntimeRemainingSeconds int IsBinlogServer bool OracleGTIDImmediateTopology bool MariaDBGTIDImmediateTopology bool @@ -166,24 +144,14 @@ type ReplicationAnalysis struct { IsActionableRecovery bool ProcessingNodeHostname string ProcessingNodeToken string - CountAdditionalAgreeingNodes int StartActivePeriod string - SkippableDueToDowntime bool GTIDMode string MinReplicaGTIDMode string MaxReplicaGTIDMode string MaxReplicaGTIDErrant string - CommandHint string IsReadOnly bool } -type AnalysisMap map[string](*ReplicationAnalysis) - -type ReplicationAnalysisChangelog struct { - AnalyzedInstanceKey InstanceKey - Changelog []string -} - func (replicationAnalysis *ReplicationAnalysis) MarshalJSON() ([]byte, error) { i := struct { ReplicationAnalysis @@ -193,18 +161,6 @@ func (replicationAnalysis *ReplicationAnalysis) MarshalJSON() ([]byte, error) { return json.Marshal(i) } -// AnalysisString returns a human friendly description of all analysis issues -func (replicationAnalysis *ReplicationAnalysis) AnalysisString() string { - result := []string{} - if replicationAnalysis.Analysis != NoProblem { - result = append(result, string(replicationAnalysis.Analysis)) - } - for _, structureAnalysis := range replicationAnalysis.StructureAnalysis { - result = append(result, string(structureAnalysis)) - } - return strings.Join(result, ", ") -} - // Get a string description of the analyzed instance type (primary? co-primary? intermediate-primary?) func (replicationAnalysis *ReplicationAnalysis) GetAnalysisInstanceType() AnalysisInstanceType { if replicationAnalysis.IsCoPrimary { diff --git a/go/vt/vtorc/inst/analysis_dao.go b/go/vt/vtorc/inst/analysis_dao.go index c2252287b1c..25082f133da 100644 --- a/go/vt/vtorc/inst/analysis_dao.go +++ b/go/vt/vtorc/inst/analysis_dao.go @@ -22,6 +22,7 @@ import ( "vitess.io/vitess/go/vt/external/golib/sqlutils" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/topo/topoproto" "google.golang.org/protobuf/encoding/prototext" @@ -37,13 +38,11 @@ import ( "github.com/rcrowley/go-metrics" ) -var analysisChangeWriteAttemptCounter = metrics.NewCounter() var analysisChangeWriteCounter = metrics.NewCounter() var recentInstantAnalysis *cache.Cache func init() { - _ = metrics.Register("analysis.change.write.attempt", analysisChangeWriteAttemptCounter) _ = metrics.Register("analysis.change.write", analysisChangeWriteCounter) go initializeAnalysisDaoPostConfiguration() @@ -57,13 +56,20 @@ func initializeAnalysisDaoPostConfiguration() { type clusterAnalysis struct { hasClusterwideAction bool - primaryKey *InstanceKey + totalTablets int + primaryAlias string durability reparentutil.Durabler } // GetReplicationAnalysis will check for replication problems (dead primary; unreachable primary; etc) -func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAnalysisHints) ([]ReplicationAnalysis, error) { - result := []ReplicationAnalysis{} +func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAnalysisHints) ([]*ReplicationAnalysis, error) { + var result []*ReplicationAnalysis + appendAnalysis := func(analysis *ReplicationAnalysis) { + if analysis.Analysis == NoProblem && len(analysis.StructureAnalysis) == 0 { + return + } + result = append(result, analysis) + } // TODO(sougou); deprecate ReduceReplicationAnalysisCount args := sqlutils.Args(config.Config.ReasonableReplicationLagSeconds, ValidSecondsFromSeenToLastAttemptedCheck(), config.Config.ReasonableReplicationLagSeconds, keyspace, shard) @@ -78,13 +84,13 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna vitess_keyspace.keyspace AS keyspace, vitess_keyspace.keyspace_type AS keyspace_type, vitess_keyspace.durability_policy AS durability_policy, + vitess_shard.primary_timestamp AS shard_primary_term_timestamp, primary_instance.read_only AS read_only, - MIN(primary_instance.hostname) IS NULL AS is_invalid, + MIN(primary_instance.gtid_errant) AS gtid_errant, + MIN(primary_instance.alias) IS NULL AS is_invalid, MIN(primary_instance.data_center) AS data_center, MIN(primary_instance.region) AS region, MIN(primary_instance.physical_environment) AS physical_environment, - MIN(primary_instance.source_host) AS source_host, - MIN(primary_instance.source_port) AS source_port, MIN(primary_instance.binary_log_file) AS binary_log_file, MIN(primary_instance.binary_log_pos) AS binary_log_pos, MIN(primary_tablet.info) AS primary_tablet_info, @@ -109,10 +115,6 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna OR primary_instance.source_port = 0 OR substr(primary_instance.source_host, 1, 2) = '//' ) - AND ( - primary_instance.replication_group_name = '' - OR primary_instance.replication_group_member_role = 'PRIMARY' - ) ) AS is_primary, MIN(primary_instance.is_co_primary) AS is_co_primary, MIN(primary_instance.gtid_mode) AS gtid_mode, @@ -150,19 +152,6 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna primary_instance.replica_sql_running = 0 OR primary_instance.replica_io_running = 0 ) AS replication_stopped, - MIN( - primary_downtime.downtime_active is not null - and ifnull(primary_downtime.end_timestamp, now()) > now() - ) AS is_downtimed, - MIN( - IFNULL(primary_downtime.end_timestamp, '') - ) AS downtime_end_timestamp, - MIN( - IFNULL( - unix_timestamp() - unix_timestamp(primary_downtime.end_timestamp), - 0 - ) - ) AS downtime_remaining_seconds, MIN( primary_instance.binlog_server ) AS is_binlog_server, @@ -267,18 +256,10 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna IFNULL(MAX(replica_instance.gtid_mode), '') AS max_replica_gtid_mode, IFNULL( MAX( - case when replica_downtime.downtime_active is not null - and ifnull(replica_downtime.end_timestamp, now()) > now() then '' else replica_instance.gtid_errant end + replica_instance.gtid_errant ), '' ) AS max_replica_gtid_errant, - IFNULL( - SUM( - replica_downtime.downtime_active is not null - and ifnull(replica_downtime.end_timestamp, now()) > now() - ), - 0 - ) AS count_downtimed_replicas, COUNT( DISTINCT case when replica_instance.log_bin AND replica_instance.log_replica_updates then replica_instance.major_version else NULL end @@ -288,50 +269,31 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna JOIN vitess_keyspace ON ( vitess_tablet.keyspace = vitess_keyspace.keyspace ) + JOIN vitess_shard ON ( + vitess_tablet.keyspace = vitess_shard.keyspace + AND vitess_tablet.shard = vitess_shard.shard + ) LEFT JOIN database_instance primary_instance ON ( - vitess_tablet.hostname = primary_instance.hostname + vitess_tablet.alias = primary_instance.alias + AND vitess_tablet.hostname = primary_instance.hostname AND vitess_tablet.port = primary_instance.port ) LEFT JOIN vitess_tablet primary_tablet ON ( primary_tablet.hostname = primary_instance.source_host AND primary_tablet.port = primary_instance.source_port ) - LEFT JOIN hostname_resolve ON ( - primary_instance.hostname = hostname_resolve.hostname - ) LEFT JOIN database_instance replica_instance ON ( - COALESCE( - hostname_resolve.resolved_hostname, - primary_instance.hostname - ) = replica_instance.source_host + primary_instance.hostname = replica_instance.source_host AND primary_instance.port = replica_instance.source_port ) - LEFT JOIN database_instance_maintenance ON ( - primary_instance.hostname = database_instance_maintenance.hostname - AND primary_instance.port = database_instance_maintenance.port - AND database_instance_maintenance.maintenance_active = 1 - ) LEFT JOIN database_instance_stale_binlog_coordinates ON ( - primary_instance.hostname = database_instance_stale_binlog_coordinates.hostname - AND primary_instance.port = database_instance_stale_binlog_coordinates.port - ) - LEFT JOIN database_instance_downtime as primary_downtime ON ( - primary_instance.hostname = primary_downtime.hostname - AND primary_instance.port = primary_downtime.port - AND primary_downtime.downtime_active = 1 - ) - LEFT JOIN database_instance_downtime as replica_downtime ON ( - replica_instance.hostname = replica_downtime.hostname - AND replica_instance.port = replica_downtime.port - AND replica_downtime.downtime_active = 1 + vitess_tablet.alias = database_instance_stale_binlog_coordinates.alias ) WHERE - database_instance_maintenance.database_instance_maintenance_id IS NULL - AND ? IN ('', vitess_keyspace.keyspace) + ? IN ('', vitess_keyspace.keyspace) AND ? IN ('', vitess_tablet.shard) GROUP BY - vitess_tablet.hostname, - vitess_tablet.port + vitess_tablet.alias ORDER BY vitess_tablet.tablet_type ASC, vitess_tablet.primary_timestamp DESC @@ -339,7 +301,7 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna clusters := make(map[string]*clusterAnalysis) err := db.Db.QueryVTOrc(query, args, func(m sqlutils.RowMap) error { - a := ReplicationAnalysis{ + a := &ReplicationAnalysis{ Analysis: NoProblem, ProcessingNodeHostname: process.ThisHostname, ProcessingNodeToken: util.ProcessToken.Hash, @@ -370,12 +332,14 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna return nil } + a.ShardPrimaryTermTimestamp = m.GetString("shard_primary_term_timestamp") a.IsPrimary = m.GetBool("is_primary") countCoPrimaryReplicas := m.GetUint("count_co_primary_replicas") a.IsCoPrimary = m.GetBool("is_co_primary") || (countCoPrimaryReplicas > 0) - a.AnalyzedInstanceKey = InstanceKey{Hostname: m.GetString("hostname"), Port: m.GetInt("port")} - a.AnalyzedInstanceAlias = tablet.Alias - a.AnalyzedInstancePrimaryKey = InstanceKey{Hostname: m.GetString("source_host"), Port: m.GetInt("source_port")} + a.AnalyzedInstanceHostname = m.GetString("hostname") + a.AnalyzedInstancePort = m.GetInt("port") + a.AnalyzedInstanceAlias = topoproto.TabletAliasString(tablet.Alias) + a.AnalyzedInstancePrimaryAlias = topoproto.TabletAliasString(primaryTablet.Alias) a.AnalyzedInstanceDataCenter = m.GetString("data_center") a.AnalyzedInstanceRegion = m.GetString("region") a.AnalyzedInstancePhysicalEnvironment = m.GetString("physical_environment") @@ -394,15 +358,12 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna a.CountValidReplicas = m.GetUint("count_valid_replicas") a.CountValidReplicatingReplicas = m.GetUint("count_valid_replicating_replicas") a.CountReplicasFailingToConnectToPrimary = m.GetUint("count_replicas_failing_to_connect_to_primary") - a.CountDowntimedReplicas = m.GetUint("count_downtimed_replicas") a.ReplicationDepth = m.GetUint("replication_depth") a.IsFailingToConnectToPrimary = m.GetBool("is_failing_to_connect_to_primary") a.ReplicationStopped = m.GetBool("replication_stopped") - a.IsDowntimed = m.GetBool("is_downtimed") - a.DowntimeEndTimestamp = m.GetString("downtime_end_timestamp") - a.DowntimeRemainingSeconds = m.GetInt("downtime_remaining_seconds") a.IsBinlogServer = m.GetBool("is_binlog_server") a.ClusterDetails.ReadRecoveryInfo() + a.ErrantGTID = m.GetString("gtid_errant") countValidOracleGTIDReplicas := m.GetUint("count_valid_oracle_gtid_replicas") a.OracleGTIDImmediateTopology = countValidOracleGTIDReplicas == a.CountValidReplicas && a.CountValidReplicas > 0 @@ -434,8 +395,8 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna a.IsReadOnly = m.GetUint("read_only") == 1 if !a.LastCheckValid { - analysisMessage := fmt.Sprintf("analysis: Keyspace: %+v, Shard: %+v, IsPrimary: %+v, LastCheckValid: %+v, LastCheckPartialSuccess: %+v, CountReplicas: %+v, CountValidReplicas: %+v, CountValidReplicatingReplicas: %+v, CountLaggingReplicas: %+v, CountDelayedReplicas: %+v, CountReplicasFailingToConnectToPrimary: %+v", - a.ClusterDetails.Keyspace, a.ClusterDetails.Shard, a.IsPrimary, a.LastCheckValid, a.LastCheckPartialSuccess, a.CountReplicas, a.CountValidReplicas, a.CountValidReplicatingReplicas, a.CountLaggingReplicas, a.CountDelayedReplicas, a.CountReplicasFailingToConnectToPrimary, + analysisMessage := fmt.Sprintf("analysis: Alias: %+v, Keyspace: %+v, Shard: %+v, IsPrimary: %+v, LastCheckValid: %+v, LastCheckPartialSuccess: %+v, CountReplicas: %+v, CountValidReplicas: %+v, CountValidReplicatingReplicas: %+v, CountLaggingReplicas: %+v, CountDelayedReplicas: %+v, CountReplicasFailingToConnectToPrimary: %+v", + a.AnalyzedInstanceAlias, a.ClusterDetails.Keyspace, a.ClusterDetails.Shard, a.IsPrimary, a.LastCheckValid, a.LastCheckPartialSuccess, a.CountReplicas, a.CountValidReplicas, a.CountValidReplicatingReplicas, a.CountLaggingReplicas, a.CountDelayedReplicas, a.CountReplicasFailingToConnectToPrimary, ) if util.ClearToLog("analysis_dao", analysisMessage) { log.Infof(analysisMessage) @@ -446,7 +407,7 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna clusters[keyspaceShard] = &clusterAnalysis{} if a.TabletType == topodatapb.TabletType_PRIMARY { a.IsClusterPrimary = true - clusters[keyspaceShard].primaryKey = &a.AnalyzedInstanceKey + clusters[keyspaceShard].primaryAlias = a.AnalyzedInstanceAlias } durabilityPolicy := m.GetString("durability_policy") if durabilityPolicy == "" { @@ -462,6 +423,8 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna } // ca has clusterwide info ca := clusters[keyspaceShard] + // Increment the total number of tablets. + ca.totalTablets += 1 if ca.hasClusterwideAction { // We can only take one cluster level action at a time. return nil @@ -471,10 +434,13 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna return nil } isInvalid := m.GetBool("is_invalid") - if isInvalid { - return nil - } - if a.IsClusterPrimary && !a.LastCheckValid && a.CountReplicas == 0 { + if a.IsClusterPrimary && isInvalid { + a.Analysis = InvalidPrimary + a.Description = "VTOrc hasn't been able to reach the primary even once since restart/shutdown" + } else if isInvalid { + a.Analysis = InvalidReplica + a.Description = "VTOrc hasn't been able to reach the replica even once since restart/shutdown" + } else if a.IsClusterPrimary && !a.LastCheckValid && a.CountReplicas == 0 { a.Analysis = DeadPrimaryWithoutReplicas a.Description = "Primary cannot be reached by vtorc and has no replica" ca.hasClusterwideAction = true @@ -503,18 +469,28 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna a.Analysis = PrimaryIsReadOnly a.Description = "Primary is read-only" // - } else if a.IsClusterPrimary && SemiSyncAckers(ca.durability, tablet) != 0 && !a.SemiSyncPrimaryEnabled { + } else if a.IsClusterPrimary && reparentutil.SemiSyncAckers(ca.durability, tablet) != 0 && !a.SemiSyncPrimaryEnabled { a.Analysis = PrimarySemiSyncMustBeSet a.Description = "Primary semi-sync must be set" // - } else if a.IsClusterPrimary && SemiSyncAckers(ca.durability, tablet) == 0 && a.SemiSyncPrimaryEnabled { + } else if a.IsClusterPrimary && reparentutil.SemiSyncAckers(ca.durability, tablet) == 0 && a.SemiSyncPrimaryEnabled { a.Analysis = PrimarySemiSyncMustNotBeSet a.Description = "Primary semi-sync must not be set" // - } else if topo.IsReplicaType(a.TabletType) && ca.primaryKey == nil { + } else if topo.IsReplicaType(a.TabletType) && a.ErrantGTID != "" { + a.Analysis = ErrantGTIDDetected + a.Description = "Tablet has errant GTIDs" + } else if topo.IsReplicaType(a.TabletType) && ca.primaryAlias == "" && a.ShardPrimaryTermTimestamp == "" { + // ClusterHasNoPrimary should only be detected when the shard record doesn't have any primary term start time specified either. a.Analysis = ClusterHasNoPrimary a.Description = "Cluster has no primary" ca.hasClusterwideAction = true + } else if topo.IsReplicaType(a.TabletType) && ca.primaryAlias == "" && a.ShardPrimaryTermTimestamp != "" { + // If there are no primary tablets, but the shard primary start time isn't empty, then we know + // the primary tablet was deleted. + a.Analysis = PrimaryTabletDeleted + a.Description = "Primary tablet has been deleted" + ca.hasClusterwideAction = true } else if topo.IsReplicaType(a.TabletType) && !a.IsReadOnly { a.Analysis = ReplicaIsWritable a.Description = "Replica is writable" @@ -523,7 +499,7 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna a.Analysis = NotConnectedToPrimary a.Description = "Not connected to the primary" // - } else if topo.IsReplicaType(a.TabletType) && !a.IsPrimary && ca.primaryKey != nil && a.AnalyzedInstancePrimaryKey != *ca.primaryKey { + } else if topo.IsReplicaType(a.TabletType) && !a.IsPrimary && ca.primaryAlias != "" && a.AnalyzedInstancePrimaryAlias != ca.primaryAlias { a.Analysis = ConnectedToWrongPrimary a.Description = "Connected to wrong primary" // @@ -531,11 +507,11 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna a.Analysis = ReplicationStopped a.Description = "Replication is stopped" // - } else if topo.IsReplicaType(a.TabletType) && !a.IsPrimary && IsReplicaSemiSync(ca.durability, primaryTablet, tablet) && !a.SemiSyncReplicaEnabled { + } else if topo.IsReplicaType(a.TabletType) && !a.IsPrimary && reparentutil.IsReplicaSemiSync(ca.durability, primaryTablet, tablet) && !a.SemiSyncReplicaEnabled { a.Analysis = ReplicaSemiSyncMustBeSet a.Description = "Replica semi-sync must be set" // - } else if topo.IsReplicaType(a.TabletType) && !a.IsPrimary && !IsReplicaSemiSync(ca.durability, primaryTablet, tablet) && a.SemiSyncReplicaEnabled { + } else if topo.IsReplicaType(a.TabletType) && !a.IsPrimary && !reparentutil.IsReplicaSemiSync(ca.durability, primaryTablet, tablet) && a.SemiSyncReplicaEnabled { a.Analysis = ReplicaSemiSyncMustNotBeSet a.Description = "Replica semi-sync must not be set" // @@ -588,28 +564,6 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna // a.Description = "Primary has no replicas" // } - appendAnalysis := func(analysis *ReplicationAnalysis) { - if a.Analysis == NoProblem && len(a.StructureAnalysis) == 0 && !hints.IncludeNoProblem { - return - } - if a.IsDowntimed { - a.SkippableDueToDowntime = true - } - if a.CountReplicas == a.CountDowntimedReplicas { - switch a.Analysis { - case AllPrimaryReplicasNotReplicating, - AllPrimaryReplicasNotReplicatingOrDead, - PrimarySingleReplicaDead: - a.IsReplicasDowntimed = true - a.SkippableDueToDowntime = true - } - } - if a.SkippableDueToDowntime && !hints.IncludeDowntimed { - return - } - result = append(result, a) - } - { // Moving on to structure analysis // We also do structural checks. See if there's potential danger in promotions @@ -650,17 +604,19 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna a.StructureAnalysis = append(a.StructureAnalysis, NotEnoughValidSemiSyncReplicasStructureWarning) } } - appendAnalysis(&a) + appendAnalysis(a) if a.CountReplicas > 0 && hints.AuditAnalysis { // Interesting enough for analysis go func() { - _ = auditInstanceAnalysisInChangelog(&a.AnalyzedInstanceKey, a.Analysis) + _ = auditInstanceAnalysisInChangelog(a.AnalyzedInstanceAlias, a.Analysis) }() } return nil }) + result = postProcessAnalyses(result, clusters) + if err != nil { log.Error(err) } @@ -668,23 +624,65 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna return result, err } +// postProcessAnalyses is used to update different analyses based on the information gleaned from looking at all the analyses together instead of individual data. +func postProcessAnalyses(result []*ReplicationAnalysis, clusters map[string]*clusterAnalysis) []*ReplicationAnalysis { + for { + // Store whether we have changed the result of replication analysis or not. + resultChanged := false + + // Go over all the analyses. + for _, analysis := range result { + // If one of them is an InvalidPrimary, then we see if all the other tablets in this keyspace shard are + // unable to replicate or not. + if analysis.Analysis == InvalidPrimary { + keyspaceName := analysis.ClusterDetails.Keyspace + shardName := analysis.ClusterDetails.Shard + keyspaceShard := getKeyspaceShardName(keyspaceName, shardName) + totalReplicas := clusters[keyspaceShard].totalTablets - 1 + var notReplicatingReplicas []int + for idx, replicaAnalysis := range result { + if replicaAnalysis.ClusterDetails.Keyspace == keyspaceName && + replicaAnalysis.ClusterDetails.Shard == shardName && topo.IsReplicaType(replicaAnalysis.TabletType) { + // If the replica's last check is invalid or its replication is stopped, then we consider as not replicating. + if !replicaAnalysis.LastCheckValid || replicaAnalysis.ReplicationStopped { + notReplicatingReplicas = append(notReplicatingReplicas, idx) + } + } + } + // If none of the other tablets are able to replicate, then we conclude that this primary is not just Invalid, but also Dead. + // In this case, we update the analysis for the primary tablet and remove all the analyses of the replicas. + if totalReplicas > 0 && len(notReplicatingReplicas) == totalReplicas { + resultChanged = true + analysis.Analysis = DeadPrimary + for i := len(notReplicatingReplicas) - 1; i >= 0; i-- { + idxToRemove := notReplicatingReplicas[i] + result = append(result[0:idxToRemove], result[idxToRemove+1:]...) + } + break + } + } + } + if !resultChanged { + break + } + } + return result +} + // auditInstanceAnalysisInChangelog will write down an instance's analysis in the database_instance_analysis_changelog table. // To not repeat recurring analysis code, the database_instance_last_analysis table is used, so that only changes to // analysis codes are written. -func auditInstanceAnalysisInChangelog(instanceKey *InstanceKey, analysisCode AnalysisCode) error { - if lastWrittenAnalysis, found := recentInstantAnalysis.Get(instanceKey.DisplayString()); found { +func auditInstanceAnalysisInChangelog(tabletAlias string, analysisCode AnalysisCode) error { + if lastWrittenAnalysis, found := recentInstantAnalysis.Get(tabletAlias); found { if lastWrittenAnalysis == analysisCode { // Surely nothing new. // And let's expand the timeout - recentInstantAnalysis.Set(instanceKey.DisplayString(), analysisCode, cache.DefaultExpiration) + recentInstantAnalysis.Set(tabletAlias, analysisCode, cache.DefaultExpiration) return nil } } - // Passed the cache; but does database agree that there's a change? Here's a persistent cache; this comes here - // to verify no two vtorc services are doing this without coordinating (namely, one dies, the other taking its place - // and has no familiarity of the former's cache) - analysisChangeWriteAttemptCounter.Inc(1) + // Find if the lastAnalysisHasChanged or not while updating the row if it has. lastAnalysisChanged := false { sqlResult, err := db.ExecVTOrc(` @@ -692,11 +690,10 @@ func auditInstanceAnalysisInChangelog(instanceKey *InstanceKey, analysisCode Ana analysis = ?, analysis_timestamp = now() where - hostname = ? - and port = ? + alias = ? and analysis != ? `, - string(analysisCode), instanceKey.Hostname, instanceKey.Port, string(analysisCode), + string(analysisCode), tabletAlias, string(analysisCode), ) if err != nil { log.Error(err) @@ -707,36 +704,48 @@ func auditInstanceAnalysisInChangelog(instanceKey *InstanceKey, analysisCode Ana log.Error(err) return err } - lastAnalysisChanged = (rows > 0) + lastAnalysisChanged = rows > 0 } + + // If the last analysis has not changed, then there is a chance that this is the first insertion. + // We need to find that out too when we insert into the database. + firstInsertion := false if !lastAnalysisChanged { - _, err := db.ExecVTOrc(` + // The insert only returns more than 1 row changed if this is the first insertion. + sqlResult, err := db.ExecVTOrc(` insert ignore into database_instance_last_analysis ( - hostname, port, analysis_timestamp, analysis + alias, analysis_timestamp, analysis ) values ( - ?, ?, now(), ? + ?, now(), ? ) `, - instanceKey.Hostname, instanceKey.Port, string(analysisCode), + tabletAlias, string(analysisCode), ) if err != nil { log.Error(err) return err } + rows, err := sqlResult.RowsAffected() + if err != nil { + log.Error(err) + return err + } + firstInsertion = rows > 0 } - recentInstantAnalysis.Set(instanceKey.DisplayString(), analysisCode, cache.DefaultExpiration) - if !lastAnalysisChanged { + recentInstantAnalysis.Set(tabletAlias, analysisCode, cache.DefaultExpiration) + // If the analysis has changed or if it is the first insertion, we need to make sure we write this change to the database. + if !lastAnalysisChanged && !firstInsertion { return nil } _, err := db.ExecVTOrc(` insert into database_instance_analysis_changelog ( - hostname, port, analysis_timestamp, analysis + alias, analysis_timestamp, analysis ) values ( - ?, ?, now(), ? + ?, now(), ? ) `, - instanceKey.Hostname, instanceKey.Port, string(analysisCode), + tabletAlias, string(analysisCode), ) if err == nil { analysisChangeWriteCounter.Inc(1) diff --git a/go/vt/vtorc/inst/analysis_dao_test.go b/go/vt/vtorc/inst/analysis_dao_test.go index 480986e34ba..c1926fca089 100644 --- a/go/vt/vtorc/inst/analysis_dao_test.go +++ b/go/vt/vtorc/inst/analysis_dao_test.go @@ -18,16 +18,35 @@ package inst import ( "testing" + "time" + "github.com/patrickmn/go-cache" + "github.com/rcrowley/go-metrics" "github.com/stretchr/testify/require" "vitess.io/vitess/go/vt/external/golib/sqlutils" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/vtorc/db" "vitess.io/vitess/go/vt/vtorc/test" ) +var ( + // The initialSQL is a set of insert commands copied from a dump of an actual running VTOrc instances. The relevant insert commands are here. + // This is a dump taken from a test running 4 tablets, zone1-101 is the primary, zone1-100 is a replica, zone1-112 is a rdonly and zone2-200 is a cross-cell replica. + initialSQL = []string{ + `INSERT INTO database_instance VALUES('zone1-0000000112','localhost',6747,'2022-12-28 07:26:04','2022-12-28 07:26:04',213696377,'8.0.31','ROW',1,1,'vt-0000000112-bin.000001',15963,'localhost',6714,1,1,'vt-0000000101-bin.000001',15583,'vt-0000000101-bin.000001',15583,0,0,1,'','',1,0,'vt-0000000112-relay-bin.000002',15815,0,1,0,'zone1','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a5138-8680-11ed-9240-92a06c3be3c2','2022-12-28 07:26:04','',1,0,0,'Homebrew','8.0','FULL',10816929,0,0,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a5138-8680-11ed-9240-92a06c3be3c2',1,1,'',1000000000000000000,1,0,0,0);`, + `INSERT INTO database_instance VALUES('zone1-0000000100','localhost',6711,'2022-12-28 07:26:04','2022-12-28 07:26:04',1094500338,'8.0.31','ROW',1,1,'vt-0000000100-bin.000001',15963,'localhost',6714,1,1,'vt-0000000101-bin.000001',15583,'vt-0000000101-bin.000001',15583,0,0,1,'','',1,0,'vt-0000000100-relay-bin.000002',15815,0,1,0,'zone1','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a5138-8680-11ed-acf8-d6b0ef9f4eaa','2022-12-28 07:26:04','',1,0,0,'Homebrew','8.0','FULL',10103920,0,1,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a5138-8680-11ed-acf8-d6b0ef9f4eaa',1,1,'',1000000000000000000,1,0,1,0);`, + `INSERT INTO database_instance VALUES('zone1-0000000101','localhost',6714,'2022-12-28 07:26:04','2022-12-28 07:26:04',390954723,'8.0.31','ROW',1,1,'vt-0000000101-bin.000001',15583,'',0,0,0,'',0,'',0,NULL,NULL,0,'','',0,0,'',0,0,0,0,'zone1','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a4cc4-8680-11ed-a104-47706090afbd','2022-12-28 07:26:04','',0,0,0,'Homebrew','8.0','FULL',11366095,1,1,'ON',1,'','','729a4cc4-8680-11ed-a104-47706090afbd',-1,-1,'',1000000000000000000,1,1,0,2);`, + `INSERT INTO database_instance VALUES('zone2-0000000200','localhost',6756,'2022-12-28 07:26:05','2022-12-28 07:26:05',444286571,'8.0.31','ROW',1,1,'vt-0000000200-bin.000001',15963,'localhost',6714,1,1,'vt-0000000101-bin.000001',15583,'vt-0000000101-bin.000001',15583,0,0,1,'','',1,0,'vt-0000000200-relay-bin.000002',15815,0,1,0,'zone2','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a497c-8680-11ed-8ad4-3f51d747db75','2022-12-28 07:26:05','',1,0,0,'Homebrew','8.0','FULL',10443112,0,1,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a497c-8680-11ed-8ad4-3f51d747db75',1,1,'',1000000000000000000,1,0,1,0);`, + `INSERT INTO vitess_tablet VALUES('zone1-0000000100','localhost',6711,'ks','0','zone1',2,'0001-01-01 00:00:00+00:00',X'616c6961733a7b63656c6c3a227a6f6e653122207569643a3130307d20686f73746e616d653a226c6f63616c686f73742220706f72745f6d61703a7b6b65793a2267727063222076616c75653a363731307d20706f72745f6d61703a7b6b65793a227674222076616c75653a363730397d206b657973706163653a226b73222073686172643a22302220747970653a5245504c494341206d7973716c5f686f73746e616d653a226c6f63616c686f737422206d7973716c5f706f72743a363731312064625f7365727665725f76657273696f6e3a22382e302e3331222064656661756c745f636f6e6e5f636f6c6c6174696f6e3a3435');`, + `INSERT INTO vitess_tablet VALUES('zone1-0000000101','localhost',6714,'ks','0','zone1',1,'2022-12-28 07:23:25.129898+00:00',X'616c6961733a7b63656c6c3a227a6f6e653122207569643a3130317d20686f73746e616d653a226c6f63616c686f73742220706f72745f6d61703a7b6b65793a2267727063222076616c75653a363731337d20706f72745f6d61703a7b6b65793a227674222076616c75653a363731327d206b657973706163653a226b73222073686172643a22302220747970653a5052494d415259206d7973716c5f686f73746e616d653a226c6f63616c686f737422206d7973716c5f706f72743a36373134207072696d6172795f7465726d5f73746172745f74696d653a7b7365636f6e64733a31363732323132323035206e616e6f7365636f6e64733a3132393839383030307d2064625f7365727665725f76657273696f6e3a22382e302e3331222064656661756c745f636f6e6e5f636f6c6c6174696f6e3a3435');`, + `INSERT INTO vitess_tablet VALUES('zone1-0000000112','localhost',6747,'ks','0','zone1',3,'0001-01-01 00:00:00+00:00',X'616c6961733a7b63656c6c3a227a6f6e653122207569643a3131327d20686f73746e616d653a226c6f63616c686f73742220706f72745f6d61703a7b6b65793a2267727063222076616c75653a363734367d20706f72745f6d61703a7b6b65793a227674222076616c75653a363734357d206b657973706163653a226b73222073686172643a22302220747970653a52444f4e4c59206d7973716c5f686f73746e616d653a226c6f63616c686f737422206d7973716c5f706f72743a363734372064625f7365727665725f76657273696f6e3a22382e302e3331222064656661756c745f636f6e6e5f636f6c6c6174696f6e3a3435');`, + `INSERT INTO vitess_tablet VALUES('zone2-0000000200','localhost',6756,'ks','0','zone2',2,'0001-01-01 00:00:00+00:00',X'616c6961733a7b63656c6c3a227a6f6e653222207569643a3230307d20686f73746e616d653a226c6f63616c686f73742220706f72745f6d61703a7b6b65793a2267727063222076616c75653a363735357d20706f72745f6d61703a7b6b65793a227674222076616c75653a363735347d206b657973706163653a226b73222073686172643a22302220747970653a5245504c494341206d7973716c5f686f73746e616d653a226c6f63616c686f737422206d7973716c5f706f72743a363735362064625f7365727665725f76657273696f6e3a22382e302e3331222064656661756c745f636f6e6e5f636f6c6c6174696f6e3a3435');`, + `INSERT INTO vitess_shard VALUES('ks','0','zone1-0000000101','2022-12-28 07:23:25.129898+00:00');`, + `INSERT INTO vitess_keyspace VALUES('ks',0,'semi_sync');`, + } +) + // TestGetReplicationAnalysisDecision tests the code of GetReplicationAnalysis decision-making. It doesn't check the SQL query // run by it. It only checks the analysis part after the rows have been read. This tests fakes the db and explicitly returns the // rows that are specified in the test. @@ -58,6 +77,25 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { keyspaceWanted: "ks", shardWanted: "0", codeWanted: ClusterHasNoPrimary, + }, { + name: "PrimaryTabletDeleted", + info: []*test.InfoForRecoveryAnalysis{{ + TabletInfo: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 100}, + Hostname: "localhost", + Keyspace: "ks", + Shard: "0", + Type: topodatapb.TabletType_REPLICA, + MysqlHostname: "localhost", + MysqlPort: 6709, + }, + ShardPrimaryTermTimestamp: "2022-12-28 07:23:25.129898+00:00", + DurabilityPolicy: "none", + LastCheckValid: 1, + }}, + keyspaceWanted: "ks", + shardWanted: "0", + codeWanted: PrimaryTabletDeleted, }, { name: "DeadPrimary", info: []*test.InfoForRecoveryAnalysis{{ @@ -297,10 +335,11 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlPort: 6709, }, DurabilityPolicy: "none", - SourceHost: "localhost", - SourcePort: 6708, - LastCheckValid: 1, - ReadOnly: 0, + PrimaryTabletInfo: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, + }, + LastCheckValid: 1, + ReadOnly: 0, }}, keyspaceWanted: "ks", shardWanted: "0", @@ -336,10 +375,11 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlPort: 6709, }, DurabilityPolicy: "none", - SourceHost: "localhost", - SourcePort: 6706, - LastCheckValid: 1, - ReadOnly: 1, + PrimaryTabletInfo: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 102}, + }, + LastCheckValid: 1, + ReadOnly: 1, }}, keyspaceWanted: "ks", shardWanted: "0", @@ -374,9 +414,10 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: "none", - SourceHost: "localhost", - SourcePort: 6708, + DurabilityPolicy: "none", + PrimaryTabletInfo: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, + }, LastCheckValid: 1, ReadOnly: 1, ReplicationStopped: 1, @@ -417,17 +458,9 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlPort: 6709, }, PrimaryTabletInfo: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, - Hostname: "localhost", - Keyspace: "ks", - Shard: "0", - Type: topodatapb.TabletType_PRIMARY, - MysqlHostname: "localhost", - MysqlPort: 6708, + Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, }, DurabilityPolicy: "semi_sync", - SourceHost: "localhost", - SourcePort: 6708, LastCheckValid: 1, ReadOnly: 1, SemiSyncReplicaEnabled: 0, @@ -466,17 +499,9 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlPort: 6709, }, PrimaryTabletInfo: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, - Hostname: "localhost", - Keyspace: "ks", - Shard: "0", - Type: topodatapb.TabletType_PRIMARY, - MysqlHostname: "localhost", - MysqlPort: 6708, + Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, }, DurabilityPolicy: "none", - SourceHost: "localhost", - SourcePort: 6708, LastCheckValid: 1, ReadOnly: 1, SemiSyncReplicaEnabled: 1, @@ -560,6 +585,148 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { }}, keyspaceWanted: "ks", shardWanted: "0", + codeWanted: InvalidReplica, + }, { + name: "DeadPrimary when VTOrc is starting up", + info: []*test.InfoForRecoveryAnalysis{{ + TabletInfo: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, + Hostname: "localhost", + Keyspace: "ks", + Shard: "0", + Type: topodatapb.TabletType_PRIMARY, + MysqlHostname: "localhost", + MysqlPort: 6708, + }, + DurabilityPolicy: "none", + IsInvalid: 1, + }, { + TabletInfo: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 100}, + Hostname: "localhost", + Keyspace: "ks", + Shard: "0", + Type: topodatapb.TabletType_REPLICA, + MysqlHostname: "localhost", + MysqlPort: 6709, + }, + LastCheckValid: 1, + ReplicationStopped: 1, + }, { + TabletInfo: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 103}, + Hostname: "localhost", + Keyspace: "ks", + Shard: "0", + Type: topodatapb.TabletType_REPLICA, + MysqlHostname: "localhost", + MysqlPort: 6710, + }, + LastCheckValid: 1, + ReplicationStopped: 1, + }}, + keyspaceWanted: "ks", + shardWanted: "0", + codeWanted: DeadPrimary, + }, { + name: "Invalid Primary", + info: []*test.InfoForRecoveryAnalysis{{ + TabletInfo: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, + Hostname: "localhost", + Keyspace: "ks", + Shard: "0", + Type: topodatapb.TabletType_PRIMARY, + MysqlHostname: "localhost", + MysqlPort: 6708, + }, + DurabilityPolicy: "none", + IsInvalid: 1, + }}, + keyspaceWanted: "ks", + shardWanted: "0", + codeWanted: InvalidPrimary, + }, { + name: "ErrantGTID", + info: []*test.InfoForRecoveryAnalysis{{ + TabletInfo: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, + Hostname: "localhost", + Keyspace: "ks", + Shard: "0", + Type: topodatapb.TabletType_PRIMARY, + MysqlHostname: "localhost", + MysqlPort: 6708, + }, + DurabilityPolicy: "none", + LastCheckValid: 1, + CountReplicas: 4, + CountValidReplicas: 4, + CountValidReplicatingReplicas: 3, + CountValidOracleGTIDReplicas: 4, + CountLoggingReplicas: 2, + IsPrimary: 1, + }, { + TabletInfo: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 100}, + Hostname: "localhost", + Keyspace: "ks", + Shard: "0", + Type: topodatapb.TabletType_REPLICA, + MysqlHostname: "localhost", + MysqlPort: 6709, + }, + DurabilityPolicy: "none", + ErrantGTID: "some errant GTID", + PrimaryTabletInfo: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, + }, + LastCheckValid: 1, + ReadOnly: 1, + }}, + keyspaceWanted: "ks", + shardWanted: "0", + codeWanted: ErrantGTIDDetected, + }, { + name: "ErrantGTID on a non-replica", + info: []*test.InfoForRecoveryAnalysis{{ + TabletInfo: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, + Hostname: "localhost", + Keyspace: "ks", + Shard: "0", + Type: topodatapb.TabletType_PRIMARY, + MysqlHostname: "localhost", + MysqlPort: 6708, + }, + DurabilityPolicy: "none", + LastCheckValid: 1, + CountReplicas: 4, + CountValidReplicas: 4, + CountValidReplicatingReplicas: 3, + CountValidOracleGTIDReplicas: 4, + CountLoggingReplicas: 2, + IsPrimary: 1, + }, { + TabletInfo: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 100}, + Hostname: "localhost", + Keyspace: "ks", + Shard: "0", + Type: topodatapb.TabletType_DRAINED, + MysqlHostname: "localhost", + MysqlPort: 6709, + }, + DurabilityPolicy: "none", + ErrantGTID: "some errant GTID", + PrimaryTabletInfo: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, + }, + LastCheckValid: 1, + ReadOnly: 1, + }}, + keyspaceWanted: "ks", + shardWanted: "0", codeWanted: NoProblem, }, } @@ -600,20 +767,6 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { // This test is somewhere between a unit test, and an end-to-end test. It is specifically useful for testing situations which are hard to come by in end-to-end test, but require // real-world data to test specifically. func TestGetReplicationAnalysis(t *testing.T) { - // The initialSQL is a set of insert commands copied from a dump of an actual running VTOrc instances. The relevant insert commands are here. - // This is a dump taken from a test running 4 tablets, zone1-101 is the primary, zone1-100 is a replica, zone1-112 is a rdonly and zone2-200 is a cross-cell replica. - initialSQL := []string{ - `INSERT INTO database_instance VALUES('localhost',6747,'2022-12-28 07:26:04','2022-12-28 07:26:04',213696377,'8.0.31','ROW',1,1,'vt-0000000112-bin.000001',15963,'localhost',6714,1,1,'vt-0000000101-bin.000001',15583,'vt-0000000101-bin.000001',15583,0,0,1,'','',1,0,'vt-0000000112-relay-bin.000002',15815,0,1,0,'zone1','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a5138-8680-11ed-9240-92a06c3be3c2','2022-12-28 07:26:04','',1,0,0,'zone1-0000000112','Homebrew','8.0','FULL',10816929,0,0,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a5138-8680-11ed-9240-92a06c3be3c2',1,1,'',1000000000000000000,1,0,0,0,'',0,'','','[]','',0);`, - `INSERT INTO database_instance VALUES('localhost',6711,'2022-12-28 07:26:04','2022-12-28 07:26:04',1094500338,'8.0.31','ROW',1,1,'vt-0000000100-bin.000001',15963,'localhost',6714,1,1,'vt-0000000101-bin.000001',15583,'vt-0000000101-bin.000001',15583,0,0,1,'','',1,0,'vt-0000000100-relay-bin.000002',15815,0,1,0,'zone1','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a5138-8680-11ed-acf8-d6b0ef9f4eaa','2022-12-28 07:26:04','',1,0,0,'zone1-0000000100','Homebrew','8.0','FULL',10103920,0,1,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a5138-8680-11ed-acf8-d6b0ef9f4eaa',1,1,'',1000000000000000000,1,0,1,0,'',0,'','','[]','',0);`, - `INSERT INTO database_instance VALUES('localhost',6714,'2022-12-28 07:26:04','2022-12-28 07:26:04',390954723,'8.0.31','ROW',1,1,'vt-0000000101-bin.000001',15583,'',0,0,0,'',0,'',0,NULL,NULL,0,'','',0,0,'',0,0,0,0,'zone1','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a4cc4-8680-11ed-a104-47706090afbd','2022-12-28 07:26:04','',0,0,0,'zone1-0000000101','Homebrew','8.0','FULL',11366095,1,1,'ON',1,'','','729a4cc4-8680-11ed-a104-47706090afbd',-1,-1,'',1000000000000000000,1,1,0,2,'',0,'','','[]','',0);`, - `INSERT INTO database_instance VALUES('localhost',6756,'2022-12-28 07:26:05','2022-12-28 07:26:05',444286571,'8.0.31','ROW',1,1,'vt-0000000200-bin.000001',15963,'localhost',6714,1,1,'vt-0000000101-bin.000001',15583,'vt-0000000101-bin.000001',15583,0,0,1,'','',1,0,'vt-0000000200-relay-bin.000002',15815,0,1,0,'zone2','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a497c-8680-11ed-8ad4-3f51d747db75','2022-12-28 07:26:05','',1,0,0,'zone2-0000000200','Homebrew','8.0','FULL',10443112,0,1,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a497c-8680-11ed-8ad4-3f51d747db75',1,1,'',1000000000000000000,1,0,1,0,'',0,'','','[]','',0);`, - `INSERT INTO vitess_tablet VALUES('zone1-0000000100','localhost',6711,'ks','0','zone1',2,'0001-01-01 00:00:00+00:00',X'616c6961733a7b63656c6c3a227a6f6e653122207569643a3130307d20686f73746e616d653a226c6f63616c686f73742220706f72745f6d61703a7b6b65793a2267727063222076616c75653a363731307d20706f72745f6d61703a7b6b65793a227674222076616c75653a363730397d206b657973706163653a226b73222073686172643a22302220747970653a5245504c494341206d7973716c5f686f73746e616d653a226c6f63616c686f737422206d7973716c5f706f72743a363731312064625f7365727665725f76657273696f6e3a22382e302e3331222064656661756c745f636f6e6e5f636f6c6c6174696f6e3a3435');`, - `INSERT INTO vitess_tablet VALUES('zone1-0000000101','localhost',6714,'ks','0','zone1',1,'2022-12-28 07:23:25.129898+00:00',X'616c6961733a7b63656c6c3a227a6f6e653122207569643a3130317d20686f73746e616d653a226c6f63616c686f73742220706f72745f6d61703a7b6b65793a2267727063222076616c75653a363731337d20706f72745f6d61703a7b6b65793a227674222076616c75653a363731327d206b657973706163653a226b73222073686172643a22302220747970653a5052494d415259206d7973716c5f686f73746e616d653a226c6f63616c686f737422206d7973716c5f706f72743a36373134207072696d6172795f7465726d5f73746172745f74696d653a7b7365636f6e64733a31363732323132323035206e616e6f7365636f6e64733a3132393839383030307d2064625f7365727665725f76657273696f6e3a22382e302e3331222064656661756c745f636f6e6e5f636f6c6c6174696f6e3a3435');`, - `INSERT INTO vitess_tablet VALUES('zone1-0000000112','localhost',6747,'ks','0','zone1',3,'0001-01-01 00:00:00+00:00',X'616c6961733a7b63656c6c3a227a6f6e653122207569643a3131327d20686f73746e616d653a226c6f63616c686f73742220706f72745f6d61703a7b6b65793a2267727063222076616c75653a363734367d20706f72745f6d61703a7b6b65793a227674222076616c75653a363734357d206b657973706163653a226b73222073686172643a22302220747970653a52444f4e4c59206d7973716c5f686f73746e616d653a226c6f63616c686f737422206d7973716c5f706f72743a363734372064625f7365727665725f76657273696f6e3a22382e302e3331222064656661756c745f636f6e6e5f636f6c6c6174696f6e3a3435');`, - `INSERT INTO vitess_tablet VALUES('zone2-0000000200','localhost',6756,'ks','0','zone2',2,'0001-01-01 00:00:00+00:00',X'616c6961733a7b63656c6c3a227a6f6e653222207569643a3230307d20686f73746e616d653a226c6f63616c686f73742220706f72745f6d61703a7b6b65793a2267727063222076616c75653a363735357d20706f72745f6d61703a7b6b65793a227674222076616c75653a363735347d206b657973706163653a226b73222073686172643a22302220747970653a5245504c494341206d7973716c5f686f73746e616d653a226c6f63616c686f737422206d7973716c5f706f72743a363735362064625f7365727665725f76657273696f6e3a22382e302e3331222064656661756c745f636f6e6e5f636f6c6c6174696f6e3a3435');`, - `INSERT INTO vitess_keyspace VALUES('ks',0,'semi_sync');`, - } - // The test is intended to be used as follows. The initial data is stored into the database. Following this, some specific queries are run that each individual test specifies to get the desired state. tests := []struct { name string @@ -632,7 +785,7 @@ func TestGetReplicationAnalysis(t *testing.T) { // This query removes the primary tablet's vitess_tablet record `delete from vitess_tablet where port = 6714`, }, - codeWanted: ClusterHasNoPrimary, + codeWanted: PrimaryTabletDeleted, keyspaceWanted: "ks", shardWanted: "0", }, { @@ -643,9 +796,10 @@ func TestGetReplicationAnalysis(t *testing.T) { }, // As long as we have the vitess record stating that this tablet is the primary // It would be incorrect to run a PRS. - // This situation only happens when we haven't been able to read the MySQL information even once for this tablet. - // So it is likely a new tablet. - codeWanted: NoProblem, + // We should still flag this tablet as Invalid. + codeWanted: InvalidPrimary, + keyspaceWanted: "ks", + shardWanted: "0", }, { name: "Removing Replica Tablet's MySQL record", sql: []string{ @@ -656,13 +810,15 @@ func TestGetReplicationAnalysis(t *testing.T) { // We should wait for the MySQL information to be refreshed once. // This situation only happens when we haven't been able to read the MySQL information even once for this tablet. // So it is likely a new tablet. - codeWanted: NoProblem, + codeWanted: InvalidReplica, + keyspaceWanted: "ks", + shardWanted: "0", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - // Each test should clear the database. The easiest way to do that is to run all the initialization commands again + // Each test should clear the database. The easiest way to do that is to run all the initialization commands again. defer func() { db.ClearVTOrcDatabase() }() @@ -685,3 +841,232 @@ func TestGetReplicationAnalysis(t *testing.T) { }) } } + +// TestAuditInstanceAnalysisInChangelog tests the functionality of the auditInstanceAnalysisInChangelog function +// and verifies that we write the correct number of times to the database. +func TestAuditInstanceAnalysisInChangelog(t *testing.T) { + tests := []struct { + name string + cacheExpiration time.Duration + }{ + { + name: "Long expiration", + cacheExpiration: 2 * time.Minute, + }, { + name: "Very short expiration", + cacheExpiration: 100 * time.Millisecond, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create the cache for the test to use. + oldRecentInstantAnalysisCache := recentInstantAnalysis + oldAnalysisChangeWriteCounter := analysisChangeWriteCounter + + recentInstantAnalysis = cache.New(tt.cacheExpiration, 100*time.Millisecond) + analysisChangeWriteCounter = metrics.NewCounter() + + defer func() { + // Set the old values back. + recentInstantAnalysis = oldRecentInstantAnalysisCache + analysisChangeWriteCounter = oldAnalysisChangeWriteCounter + // Each test should clear the database. The easiest way to do that is to run all the initialization commands again. + db.ClearVTOrcDatabase() + }() + + updates := []struct { + tabletAlias string + analysisCode AnalysisCode + writeCounterExpectation int + wantErr string + }{ + { + // Store a new analysis for the zone1-100 tablet. + tabletAlias: "zone1-100", + analysisCode: ReplicationStopped, + writeCounterExpectation: 1, + }, { + // Write the same analysis, no new write should happen. + tabletAlias: "zone1-100", + analysisCode: ReplicationStopped, + writeCounterExpectation: 1, + }, { + // Change the analysis. This should trigger an update. + tabletAlias: "zone1-100", + analysisCode: ReplicaSemiSyncMustBeSet, + writeCounterExpectation: 2, + }, + } + + for _, upd := range updates { + // We sleep 200 milliseconds to make sure that the cache has had time to update. + // It should be able to delete entries if the expiration is less than 200 milliseconds. + time.Sleep(200 * time.Millisecond) + err := auditInstanceAnalysisInChangelog(upd.tabletAlias, upd.analysisCode) + if upd.wantErr != "" { + require.EqualError(t, err, upd.wantErr) + continue + } + require.NoError(t, err) + require.EqualValues(t, upd.writeCounterExpectation, analysisChangeWriteCounter.Count()) + } + }) + } +} + +// TestPostProcessAnalyses tests the functionality of the postProcessAnalyses function. +func TestPostProcessAnalyses(t *testing.T) { + ks0 := ClusterInfo{ + Keyspace: "ks", + Shard: "0", + CountInstances: 4, + } + ks80 := ClusterInfo{ + Keyspace: "ks", + Shard: "80-", + CountInstances: 3, + } + clusters := map[string]*clusterAnalysis{ + getKeyspaceShardName(ks0.Keyspace, ks0.Shard): { + totalTablets: int(ks0.CountInstances), + }, + getKeyspaceShardName(ks80.Keyspace, ks80.Shard): { + totalTablets: int(ks80.CountInstances), + }, + } + + tests := []struct { + name string + analyses []*ReplicationAnalysis + want []*ReplicationAnalysis + }{ + { + name: "No processing needed", + analyses: []*ReplicationAnalysis{ + { + Analysis: ReplicationStopped, + TabletType: topodatapb.TabletType_REPLICA, + LastCheckValid: true, + ClusterDetails: ks0, + }, { + Analysis: ReplicaSemiSyncMustBeSet, + LastCheckValid: true, + TabletType: topodatapb.TabletType_REPLICA, + ClusterDetails: ks0, + }, { + Analysis: PrimaryHasPrimary, + LastCheckValid: true, + TabletType: topodatapb.TabletType_REPLICA, + ClusterDetails: ks0, + }, + }, + }, { + name: "Conversion of InvalidPrimary to DeadPrimary", + analyses: []*ReplicationAnalysis{ + { + Analysis: InvalidPrimary, + AnalyzedInstanceAlias: "zone1-100", + TabletType: topodatapb.TabletType_PRIMARY, + ClusterDetails: ks0, + }, { + Analysis: NoProblem, + LastCheckValid: true, + AnalyzedInstanceAlias: "zone1-202", + TabletType: topodatapb.TabletType_RDONLY, + ClusterDetails: ks80, + }, { + Analysis: ConnectedToWrongPrimary, + LastCheckValid: true, + AnalyzedInstanceAlias: "zone1-101", + TabletType: topodatapb.TabletType_REPLICA, + ReplicationStopped: true, + ClusterDetails: ks0, + }, { + Analysis: ReplicationStopped, + LastCheckValid: true, + AnalyzedInstanceAlias: "zone1-102", + TabletType: topodatapb.TabletType_RDONLY, + ReplicationStopped: true, + ClusterDetails: ks0, + }, { + Analysis: InvalidReplica, + AnalyzedInstanceAlias: "zone1-108", + TabletType: topodatapb.TabletType_REPLICA, + LastCheckValid: false, + ClusterDetails: ks0, + }, { + Analysis: NoProblem, + AnalyzedInstanceAlias: "zone1-302", + LastCheckValid: true, + TabletType: topodatapb.TabletType_REPLICA, + ClusterDetails: ks80, + }, + }, + want: []*ReplicationAnalysis{ + { + Analysis: DeadPrimary, + AnalyzedInstanceAlias: "zone1-100", + TabletType: topodatapb.TabletType_PRIMARY, + ClusterDetails: ks0, + }, { + Analysis: NoProblem, + LastCheckValid: true, + AnalyzedInstanceAlias: "zone1-202", + TabletType: topodatapb.TabletType_RDONLY, + ClusterDetails: ks80, + }, { + Analysis: NoProblem, + LastCheckValid: true, + AnalyzedInstanceAlias: "zone1-302", + TabletType: topodatapb.TabletType_REPLICA, + ClusterDetails: ks80, + }, + }, + }, + { + name: "Unable to convert InvalidPrimary to DeadPrimary", + analyses: []*ReplicationAnalysis{ + { + Analysis: InvalidPrimary, + AnalyzedInstanceAlias: "zone1-100", + TabletType: topodatapb.TabletType_PRIMARY, + ClusterDetails: ks0, + }, { + Analysis: NoProblem, + AnalyzedInstanceAlias: "zone1-202", + LastCheckValid: true, + TabletType: topodatapb.TabletType_RDONLY, + ClusterDetails: ks80, + }, { + Analysis: NoProblem, + LastCheckValid: true, + AnalyzedInstanceAlias: "zone1-101", + TabletType: topodatapb.TabletType_REPLICA, + ClusterDetails: ks0, + }, { + Analysis: ReplicationStopped, + LastCheckValid: true, + AnalyzedInstanceAlias: "zone1-102", + TabletType: topodatapb.TabletType_RDONLY, + ReplicationStopped: true, + ClusterDetails: ks0, + }, { + Analysis: NoProblem, + LastCheckValid: true, + AnalyzedInstanceAlias: "zone1-302", + TabletType: topodatapb.TabletType_REPLICA, + ClusterDetails: ks80, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.want == nil { + tt.want = tt.analyses + } + result := postProcessAnalyses(tt.analyses, clusters) + require.ElementsMatch(t, tt.want, result) + }) + } +} diff --git a/go/vt/vtorc/inst/audit.go b/go/vt/vtorc/inst/audit.go deleted file mode 100644 index 6650b01ac18..00000000000 --- a/go/vt/vtorc/inst/audit.go +++ /dev/null @@ -1,26 +0,0 @@ -/* - Copyright 2014 Outbrain Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -// Audit presents a single audit entry (namely in the database) -type Audit struct { - AuditID int64 - AuditTimestamp string - AuditType string - AuditInstanceKey InstanceKey - Message string -} diff --git a/go/vt/vtorc/inst/audit_dao.go b/go/vt/vtorc/inst/audit_dao.go index 7882449c655..96db7f32ccf 100644 --- a/go/vt/vtorc/inst/audit_dao.go +++ b/go/vt/vtorc/inst/audit_dao.go @@ -22,7 +22,6 @@ import ( "os" "time" - "vitess.io/vitess/go/vt/external/golib/sqlutils" "vitess.io/vitess/go/vt/log" "github.com/rcrowley/go-metrics" @@ -50,14 +49,11 @@ func EnableAuditSyslog() (err error) { } // AuditOperation creates and writes a new audit entry by given params -func AuditOperation(auditType string, instanceKey *InstanceKey, message string) error { - if instanceKey == nil { - instanceKey = &InstanceKey{} - } +func AuditOperation(auditType string, tabletAlias string, message string) error { keyspace := "" shard := "" - if instanceKey.Hostname != "" { - keyspace, shard, _ = GetKeyspaceShardName(instanceKey) + if tabletAlias != "" { + keyspace, shard, _ = GetKeyspaceShardName(tabletAlias) } auditWrittenToFile := false @@ -71,7 +67,7 @@ func AuditOperation(auditType string, instanceKey *InstanceKey, message string) } defer f.Close() - text := fmt.Sprintf("%s\t%s\t%s\t%d\t[%s:%s]\t%s\t\n", time.Now().Format("2006-01-02 15:04:05"), auditType, instanceKey.Hostname, instanceKey.Port, keyspace, shard, message) + text := fmt.Sprintf("%s\t%s\t%s\t[%s:%s]\t%s\t\n", time.Now().Format("2006-01-02 15:04:05"), auditType, tabletAlias, keyspace, shard, message) if _, err = f.WriteString(text); err != nil { log.Error(err) } @@ -81,14 +77,13 @@ func AuditOperation(auditType string, instanceKey *InstanceKey, message string) _, err := db.ExecVTOrc(` insert into audit ( - audit_timestamp, audit_type, hostname, port, keyspace, shard, message + audit_timestamp, audit_type, alias, keyspace, shard, message ) VALUES ( - NOW(), ?, ?, ?, ?, ?, ? + NOW(), ?, ?, ?, ?, ? ) `, auditType, - instanceKey.Hostname, - instanceKey.Port, + tabletAlias, keyspace, shard, message, @@ -98,7 +93,7 @@ func AuditOperation(auditType string, instanceKey *InstanceKey, message string) return err } } - logMessage := fmt.Sprintf("auditType:%s instance:%s keyspace:%s shard:%s message:%s", auditType, instanceKey.DisplayString(), keyspace, shard, message) + logMessage := fmt.Sprintf("auditType:%s alias:%s keyspace:%s shard:%s message:%s", auditType, tabletAlias, keyspace, shard, message) if syslogWriter != nil { auditWrittenToFile = true go func() { @@ -113,52 +108,6 @@ func AuditOperation(auditType string, instanceKey *InstanceKey, message string) return nil } -// ReadRecentAudit returns a list of audit entries order chronologically descending, using page number. -func ReadRecentAudit(instanceKey *InstanceKey, page int) ([]Audit, error) { - res := []Audit{} - args := sqlutils.Args() - whereCondition := `` - if instanceKey != nil { - whereCondition = `where hostname=? and port=?` - args = append(args, instanceKey.Hostname, instanceKey.Port) - } - query := fmt.Sprintf(` - select - audit_id, - audit_timestamp, - audit_type, - hostname, - port, - message - from - audit - %s - order by - audit_timestamp desc - limit ? - offset ? - `, whereCondition) - args = append(args, config.AuditPageSize, page*config.AuditPageSize) - err := db.QueryVTOrc(query, args, func(m sqlutils.RowMap) error { - audit := Audit{} - audit.AuditID = m.GetInt64("audit_id") - audit.AuditTimestamp = m.GetString("audit_timestamp") - audit.AuditType = m.GetString("audit_type") - audit.AuditInstanceKey.Hostname = m.GetString("hostname") - audit.AuditInstanceKey.Port = m.GetInt("port") - audit.Message = m.GetString("message") - - res = append(res, audit) - return nil - }) - - if err != nil { - log.Error(err) - } - return res, err - -} - // ExpireAudit removes old rows from the audit table func ExpireAudit() error { return ExpireTableData("audit", "audit_timestamp") diff --git a/go/vt/vtorc/inst/audit_dao_test.go b/go/vt/vtorc/inst/audit_dao_test.go index 4a6533077c2..1d50de4c146 100644 --- a/go/vt/vtorc/inst/audit_dao_test.go +++ b/go/vt/vtorc/inst/audit_dao_test.go @@ -17,18 +17,22 @@ limitations under the License. package inst import ( + "fmt" "os" "testing" "time" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/vt/external/golib/sqlutils" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtorc/config" "vitess.io/vitess/go/vt/vtorc/db" ) // TestAuditOperation tests that auditing a operation works as intended based on the configurations. +// This test also verifies that we are able to read the recent audits that are written to the databaes. func TestAuditOperation(t *testing.T) { // Restore original configurations originalAuditSysLog := config.Config.AuditToSyslog @@ -69,32 +73,39 @@ func TestAuditOperation(t *testing.T) { err = SaveTablet(tab100) require.NoError(t, err) - instance100 := &InstanceKey{ - Hostname: hostname, - Port: int(port), - } + tab100Alias := topoproto.TabletAliasString(tab100.Alias) auditType := "test-audit-operation" message := "test-message" - t.Run("Audit to backend", func(t *testing.T) { + t.Run("audit to backend", func(t *testing.T) { config.Config.AuditLogFile = "" config.Config.AuditToSyslog = false config.Config.AuditToBackendDB = true // Auditing should succeed as expected - err = AuditOperation(auditType, instance100, message) + err = AuditOperation(auditType, tab100Alias, message) + require.NoError(t, err) + + // Check that we can read the recent audits + audits, err := readRecentAudit(tab100Alias, 0) require.NoError(t, err) + require.Len(t, audits, 1) + require.EqualValues(t, 1, audits[0].AuditID) + require.EqualValues(t, auditType, audits[0].AuditType) + require.EqualValues(t, message, audits[0].Message) + require.EqualValues(t, tab100Alias, audits[0].AuditTabletAlias) - audits, err := ReadRecentAudit(instance100, 0) + // Check the same for no-filtering + audits, err = readRecentAudit("", 0) require.NoError(t, err) require.Len(t, audits, 1) require.EqualValues(t, 1, audits[0].AuditID) require.EqualValues(t, auditType, audits[0].AuditType) require.EqualValues(t, message, audits[0].Message) - require.EqualValues(t, *instance100, audits[0].AuditInstanceKey) + require.EqualValues(t, tab100Alias, audits[0].AuditTabletAlias) }) - t.Run("Audit to File", func(t *testing.T) { + t.Run("audit to File", func(t *testing.T) { config.Config.AuditToBackendDB = false config.Config.AuditToSyslog = false @@ -103,7 +114,7 @@ func TestAuditOperation(t *testing.T) { defer os.Remove(file.Name()) config.Config.AuditLogFile = file.Name() - err = AuditOperation(auditType, instance100, message) + err = AuditOperation(auditType, tab100Alias, message) require.NoError(t, err) // Give a little time for the write to succeed since it happens in a separate go-routine @@ -112,6 +123,54 @@ func TestAuditOperation(t *testing.T) { time.Sleep(100 * time.Millisecond) fileContent, err := os.ReadFile(file.Name()) require.NoError(t, err) - require.Contains(t, string(fileContent), "\ttest-audit-operation\tlocalhost\t100\t[ks:0]\ttest-message") + require.Contains(t, string(fileContent), "\ttest-audit-operation\tzone-1-0000000100\t[ks:0]\ttest-message") + }) +} + +// audit presents a single audit entry (namely in the database) +type audit struct { + AuditID int64 + AuditTimestamp string + AuditType string + AuditTabletAlias string + Message string +} + +// readRecentAudit returns a list of audit entries order chronologically descending, using page number. +func readRecentAudit(tabletAlias string, page int) ([]audit, error) { + res := []audit{} + var args []any + whereCondition := `` + if tabletAlias != "" { + whereCondition = `where alias=?` + args = append(args, tabletAlias) + } + query := fmt.Sprintf(` + select + audit_id, + audit_timestamp, + audit_type, + alias, + message + from + audit + %s + order by + audit_timestamp desc + limit ? + offset ? + `, whereCondition) + args = append(args, config.AuditPageSize, page*config.AuditPageSize) + err := db.QueryVTOrc(query, args, func(m sqlutils.RowMap) error { + a := audit{} + a.AuditID = m.GetInt64("audit_id") + a.AuditTimestamp = m.GetString("audit_timestamp") + a.AuditType = m.GetString("audit_type") + a.AuditTabletAlias = m.GetString("alias") + a.Message = m.GetString("message") + + res = append(res, a) + return nil }) + return res, err } diff --git a/go/vt/vtorc/inst/candidate_database_instance.go b/go/vt/vtorc/inst/candidate_database_instance.go deleted file mode 100644 index 5cd4b5c6a0b..00000000000 --- a/go/vt/vtorc/inst/candidate_database_instance.go +++ /dev/null @@ -1,56 +0,0 @@ -/* - Copyright 2016 Simon J Mudd - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -import ( - "fmt" - - "vitess.io/vitess/go/vt/vtctl/reparentutil/promotionrule" - "vitess.io/vitess/go/vt/vtorc/db" -) - -// CandidateDatabaseInstance contains information about explicit promotion rules for an instance -type CandidateDatabaseInstance struct { - Hostname string - Port int - PromotionRule promotionrule.CandidatePromotionRule - LastSuggestedString string - PromotionRuleExpiry string // generated when retrieved from database for consistency reasons -} - -func NewCandidateDatabaseInstance(instanceKey *InstanceKey, promotionRule promotionrule.CandidatePromotionRule) *CandidateDatabaseInstance { - return &CandidateDatabaseInstance{ - Hostname: instanceKey.Hostname, - Port: instanceKey.Port, - PromotionRule: promotionRule, - } -} - -func (cdi *CandidateDatabaseInstance) WithCurrentTime() *CandidateDatabaseInstance { - cdi.LastSuggestedString, _ = db.ReadTimeNow() - return cdi -} - -// String returns a string representation of the CandidateDatabaseInstance struct -func (cdi *CandidateDatabaseInstance) String() string { - return fmt.Sprintf("%s:%d %s", cdi.Hostname, cdi.Port, cdi.PromotionRule) -} - -// Key returns an instance key representing this candidate -func (cdi *CandidateDatabaseInstance) Key() *InstanceKey { - return &InstanceKey{Hostname: cdi.Hostname, Port: cdi.Port} -} diff --git a/go/vt/vtorc/inst/candidate_database_instance_dao.go b/go/vt/vtorc/inst/candidate_database_instance_dao.go deleted file mode 100644 index 95bbb53f617..00000000000 --- a/go/vt/vtorc/inst/candidate_database_instance_dao.go +++ /dev/null @@ -1,69 +0,0 @@ -/* - Copyright 2016 Simon J Mudd - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -import ( - "vitess.io/vitess/go/vt/external/golib/sqlutils" - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/vtorc/config" - "vitess.io/vitess/go/vt/vtorc/db" -) - -// RegisterCandidateInstance markes a given instance as suggested for succeeding a primary in the event of failover. -func RegisterCandidateInstance(candidate *CandidateDatabaseInstance) error { - if candidate.LastSuggestedString == "" { - candidate = candidate.WithCurrentTime() - } - args := sqlutils.Args(candidate.Hostname, candidate.Port, string(candidate.PromotionRule), candidate.LastSuggestedString) - - query := ` - insert into candidate_database_instance ( - hostname, - port, - promotion_rule, - last_suggested - ) values ( - ?, ?, ?, ? - ) on duplicate key update - last_suggested=values(last_suggested), - promotion_rule=values(promotion_rule) - ` - writeFunc := func() error { - _, err := db.ExecVTOrc(query, args...) - if err != nil { - log.Error(err) - } - return err - } - return ExecDBWriteFunc(writeFunc) -} - -// ExpireCandidateInstances removes stale primary candidate suggestions. -func ExpireCandidateInstances() error { - writeFunc := func() error { - _, err := db.ExecVTOrc(` - delete from candidate_database_instance - where last_suggested < NOW() - INTERVAL ? MINUTE - `, config.CandidateInstanceExpireMinutes, - ) - if err != nil { - log.Error(err) - } - return err - } - return ExecDBWriteFunc(writeFunc) -} diff --git a/go/vt/vtorc/inst/downtime.go b/go/vt/vtorc/inst/downtime.go deleted file mode 100644 index 7110df1e60b..00000000000 --- a/go/vt/vtorc/inst/downtime.go +++ /dev/null @@ -1,52 +0,0 @@ -/* - Copyright 2017 Shlomi Noach, GitHub Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -import ( - "time" -) - -type Downtime struct { - Key *InstanceKey - Owner string - Reason string - Duration time.Duration - BeginsAt time.Time - EndsAt time.Time - BeginsAtString string - EndsAtString string -} - -func NewDowntime(instanceKey *InstanceKey, owner string, reason string, duration time.Duration) *Downtime { - downtime := &Downtime{ - Key: instanceKey, - Owner: owner, - Reason: reason, - Duration: duration, - BeginsAt: time.Now(), - } - downtime.EndsAt = downtime.BeginsAt.Add(downtime.Duration) - return downtime -} - -func (downtime *Downtime) Ended() bool { - return downtime.EndsAt.Before(time.Now()) -} - -func (downtime *Downtime) EndsIn() time.Duration { - return time.Until(downtime.EndsAt) -} diff --git a/go/vt/vtorc/inst/downtime_dao.go b/go/vt/vtorc/inst/downtime_dao.go deleted file mode 100644 index 53b12e325e8..00000000000 --- a/go/vt/vtorc/inst/downtime_dao.go +++ /dev/null @@ -1,193 +0,0 @@ -/* - Copyright 2015 Shlomi Noach, courtesy Booking.com - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -import ( - "fmt" - "time" - - "vitess.io/vitess/go/vt/log" - - "vitess.io/vitess/go/vt/vtorc/config" - "vitess.io/vitess/go/vt/vtorc/db" -) - -// BeginDowntime will make mark an instance as downtimed (or override existing downtime period) -func BeginDowntime(downtime *Downtime) (err error) { - if downtime.Duration == 0 { - downtime.Duration = config.MaintenanceExpireMinutes * time.Minute - } - if downtime.EndsAtString != "" { - _, err = db.ExecVTOrc(` - insert - into database_instance_downtime ( - hostname, port, downtime_active, begin_timestamp, end_timestamp, owner, reason - ) VALUES ( - ?, ?, 1, ?, ?, ?, ? - ) - on duplicate key update - downtime_active=values(downtime_active), - begin_timestamp=values(begin_timestamp), - end_timestamp=values(end_timestamp), - owner=values(owner), - reason=values(reason) - `, - downtime.Key.Hostname, - downtime.Key.Port, - downtime.BeginsAtString, - downtime.EndsAtString, - downtime.Owner, - downtime.Reason, - ) - } else { - if downtime.Ended() { - // No point in writing it down; it's expired - return nil - } - - _, err = db.ExecVTOrc(` - insert - into database_instance_downtime ( - hostname, port, downtime_active, begin_timestamp, end_timestamp, owner, reason - ) VALUES ( - ?, ?, 1, NOW(), NOW() + INTERVAL ? SECOND, ?, ? - ) - on duplicate key update - downtime_active=values(downtime_active), - begin_timestamp=values(begin_timestamp), - end_timestamp=values(end_timestamp), - owner=values(owner), - reason=values(reason) - `, - downtime.Key.Hostname, - downtime.Key.Port, - int(downtime.EndsIn().Seconds()), - downtime.Owner, - downtime.Reason, - ) - } - if err != nil { - log.Error(err) - return err - } - _ = AuditOperation("begin-downtime", downtime.Key, fmt.Sprintf("owner: %s, reason: %s", downtime.Owner, downtime.Reason)) - - return nil -} - -// EndDowntime will remove downtime flag from an instance -func EndDowntime(instanceKey *InstanceKey) (wasDowntimed bool, err error) { - res, err := db.ExecVTOrc(` - delete from - database_instance_downtime - where - hostname = ? - and port = ? - `, - instanceKey.Hostname, - instanceKey.Port, - ) - if err != nil { - log.Error(err) - return wasDowntimed, err - } - - if affected, _ := res.RowsAffected(); affected > 0 { - wasDowntimed = true - _ = AuditOperation("end-downtime", instanceKey, "") - } - return wasDowntimed, err -} - -// renewLostInRecoveryDowntime renews hosts who are downtimed due to being lost in recovery, such that -// their downtime never expires. -func renewLostInRecoveryDowntime() error { - _, err := db.ExecVTOrc(` - update - database_instance_downtime - set - end_timestamp = NOW() + INTERVAL ? SECOND - where - end_timestamp > NOW() - and reason = ? - `, - config.LostInRecoveryDowntimeSeconds, - DowntimeLostInRecoveryMessage, - ) - - return err -} - -// expireLostInRecoveryDowntime expires downtime for servers who have been lost in recovery in the last, -// but are now replicating. -func expireLostInRecoveryDowntime() error { - instances, err := ReadLostInRecoveryInstances("", "") - if err != nil { - return err - } - if len(instances) == 0 { - return nil - } - for _, instance := range instances { - // We _may_ expire this downtime, but only after a minute - // This is a graceful period, during which other servers can claim ownership of the alias, - // or can update their own cluster name to match a new primary's name - if instance.ElapsedDowntime < time.Minute { - continue - } - if !instance.IsLastCheckValid { - continue - } - if instance.ReplicaRunning() { - // back, alive, replicating in some topology - if _, err := EndDowntime(&instance.Key); err != nil { - return err - } - } - } - return nil -} - -// ExpireDowntime will remove the maintenance flag on old downtimes -func ExpireDowntime() error { - if err := renewLostInRecoveryDowntime(); err != nil { - log.Error(err) - return err - } - if err := expireLostInRecoveryDowntime(); err != nil { - log.Error(err) - return err - } - { - res, err := db.ExecVTOrc(` - delete from - database_instance_downtime - where - end_timestamp < NOW() - `, - ) - if err != nil { - log.Error(err) - return err - } - if rowsAffected, _ := res.RowsAffected(); rowsAffected > 0 { - _ = AuditOperation("expire-downtime", nil, fmt.Sprintf("Expired %d entries", rowsAffected)) - } - } - - return nil -} diff --git a/go/vt/vtorc/inst/durability.go b/go/vt/vtorc/inst/durability.go deleted file mode 100644 index 272fa838af8..00000000000 --- a/go/vt/vtorc/inst/durability.go +++ /dev/null @@ -1,83 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package inst - -import ( - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/vtctl/reparentutil" - "vitess.io/vitess/go/vt/vtctl/reparentutil/promotionrule" -) - -// IsReplicaSemiSync returns the replica semi-sync setting for the instance. -func IsReplicaSemiSync[V InstanceKey | *topodatapb.Tablet](durabilityPolicy reparentutil.Durabler, primaryInstance V, replicaInstance V) bool { - primary, err := getTablet(primaryInstance) - if err != nil { - return false - } - replica, err := getTablet(replicaInstance) - if err != nil { - return false - } - return reparentutil.IsReplicaSemiSync(durabilityPolicy, primary, replica) -} - -// SemiSyncAckers returns the primary semi-sync setting for the instance. -// 0 means none. Non-zero specifies the number of required ackers. -func SemiSyncAckers[V InstanceKey | *topodatapb.Tablet](durabilityPolicy reparentutil.Durabler, instance V) int { - primary, err := getTablet(instance) - if err != nil { - return 0 - } - return reparentutil.SemiSyncAckers(durabilityPolicy, primary) -} - -// PromotionRule returns the promotion rule for the instance. -func PromotionRule[V InstanceKey | *topodatapb.Tablet](durabilityPolicy reparentutil.Durabler, instance V) promotionrule.CandidatePromotionRule { - tablet, err := getTablet(instance) - if err != nil { - return promotionrule.MustNot - } - return reparentutil.PromotionRule(durabilityPolicy, tablet) -} - -func getTablet[V InstanceKey | *topodatapb.Tablet](instance V) (*topodatapb.Tablet, error) { - var instanceTablet *topodatapb.Tablet - var err error - switch node := any(instance).(type) { - case InstanceKey: - instanceTablet, err = ReadTablet(node) - if err != nil { - return nil, err - } - case *topodatapb.Tablet: - instanceTablet = node - } - return instanceTablet, nil -} - -// GetDurabilityPolicy gets the durability policy for the keyspace of the given instance -func GetDurabilityPolicy[V InstanceKey | *topodatapb.Tablet](instance V) (reparentutil.Durabler, error) { - tablet, err := getTablet(instance) - if err != nil { - return nil, err - } - ki, err := ReadKeyspace(tablet.Keyspace) - if err != nil { - return nil, err - } - return reparentutil.GetDurabilityPolicy(ki.DurabilityPolicy) -} diff --git a/go/vt/vtorc/inst/instance.go b/go/vt/vtorc/inst/instance.go index dd1526ff090..1216d4c24ae 100644 --- a/go/vt/vtorc/inst/instance.go +++ b/go/vt/vtorc/inst/instance.go @@ -21,16 +21,13 @@ import ( "encoding/json" "strings" "time" - - "vitess.io/vitess/go/vt/vtctl/reparentutil/promotionrule" ) -const ReasonableDiscoveryLatency = 500 * time.Millisecond - // Instance represents a database instance, including its current configuration & status. // It presents important replication configuration and detailed replication status. type Instance struct { - Key InstanceKey + Hostname string + Port int InstanceAlias string ServerID uint ServerUUID string @@ -43,10 +40,10 @@ type Instance struct { LogBinEnabled bool LogReplicationUpdatesEnabled bool SelfBinlogCoordinates BinlogCoordinates - SourceKey InstanceKey + SourceHost string + SourcePort int SourceUUID string AncestryUUID string - IsDetachedPrimary bool ReplicationSQLThreadRuning bool ReplicationIOThreadRuning bool @@ -95,50 +92,18 @@ type Instance struct { IsRecentlyChecked bool SecondsSinceLastSeen sql.NullInt64 - // Careful. IsCandidate and PromotionRule are used together - // and probably need to be merged. IsCandidate's value may - // be picked up from daabase_candidate_instance's value when - // reading an instance from the db. - IsCandidate bool - PromotionRule promotionrule.CandidatePromotionRule - IsDowntimed bool - DowntimeReason string - DowntimeOwner string - DowntimeEndTimestamp string - ElapsedDowntime time.Duration - UnresolvedHostname string - AllowTLS bool + AllowTLS bool Problems []string LastDiscoveryLatency time.Duration - - seed bool // Means we force this instance to be written to backend, even if it's invalid, empty or forgotten - - /* All things Group Replication below */ - - // Group replication global variables - ReplicationGroupName string - ReplicationGroupIsSinglePrimary bool - - // Replication group members information. See - // https://dev.mysql.com/doc/refman/8.0/en/replication-group-members-table.html for details. - ReplicationGroupMemberState string - ReplicationGroupMemberRole string - - // List of all known members of the same group - ReplicationGroupMembers InstanceKeyMap - - // Primary of the replication group - ReplicationGroupPrimaryInstanceKey InstanceKey } // NewInstance creates a new, empty instance func NewInstance() *Instance { return &Instance{ - ReplicationGroupMembers: make(map[InstanceKey]bool), - Problems: []string{}, + Problems: []string{}, } } @@ -154,7 +119,7 @@ func (instance *Instance) MarshalJSON() ([]byte, error) { // Equals tests that this instance is the same instance as other. The function does not test // configuration or status. func (instance *Instance) Equals(other *Instance) bool { - return instance.Key == other.Key + return instance.InstanceAlias == other.InstanceAlias } // MajorVersion returns this instance's major version number (e.g. for 5.5.36 it returns "5.5") @@ -162,48 +127,11 @@ func (instance *Instance) MajorVersion() []string { return MajorVersion(instance.Version) } -// MajorVersion returns this instance's major version number (e.g. for 5.5.36 it returns "5.5") +// MajorVersionString returns this instance's major version number (e.g. for 5.5.36 it returns "5.5") func (instance *Instance) MajorVersionString() string { return strings.Join(instance.MajorVersion(), ".") } -func (instance *Instance) IsMySQL51() bool { - return instance.MajorVersionString() == "5.1" -} - -func (instance *Instance) IsMySQL55() bool { - return instance.MajorVersionString() == "5.5" -} - -func (instance *Instance) IsMySQL56() bool { - return instance.MajorVersionString() == "5.6" -} - -func (instance *Instance) IsMySQL57() bool { - return instance.MajorVersionString() == "5.7" -} - -func (instance *Instance) IsMySQL80() bool { - return instance.MajorVersionString() == "8.0" -} - -// IsSmallerBinlogFormat returns true when this instance's binlgo format is -// "smaller" than the other's, i.e. binary logs cannot flow from the other instance to this one -func (instance *Instance) IsSmallerBinlogFormat(other *Instance) bool { - return IsSmallerBinlogFormat(instance.BinlogFormat, other.BinlogFormat) -} - -// IsSmallerMajorVersion tests this instance against another and returns true if this instance is of a smaller "major" varsion. -// e.g. 5.5.36 is NOT a smaller major version as comapred to 5.5.36, but IS as compared to 5.6.9 -func (instance *Instance) IsSmallerMajorVersion(other *Instance) bool { - return IsSmallerMajorVersion(instance.Version, other.Version) -} - -// IsSmallerMajorVersionByString checks if this instance has a smaller major version number than given one -func (instance *Instance) IsSmallerMajorVersionByString(otherVersion string) bool { - return IsSmallerMajorVersion(instance.Version, otherVersion) -} - // IsMariaDB checks whether this is any version of MariaDB func (instance *Instance) IsMariaDB() bool { return strings.Contains(instance.Version, "MariaDB") @@ -214,26 +142,6 @@ func (instance *Instance) IsPercona() bool { return strings.Contains(instance.VersionComment, "Percona") } -// isNDB check whether this is NDB Cluster (aka MySQL Cluster) -func (instance *Instance) IsNDB() bool { - return strings.Contains(instance.Version, "-ndb-") -} - -// IsReplicationGroup checks whether the host thinks it is part of a known replication group. Notice that this might -// return True even if the group has decided to expel the member represented by this instance, as the instance might not -// know that under certain circumstances -func (instance *Instance) IsReplicationGroupMember() bool { - return instance.ReplicationGroupName != "" -} - -func (instance *Instance) IsReplicationGroupPrimary() bool { - return instance.IsReplicationGroupMember() && instance.ReplicationGroupPrimaryInstanceKey.Equals(&instance.Key) -} - -func (instance *Instance) IsReplicationGroupSecondary() bool { - return instance.IsReplicationGroupMember() && !instance.ReplicationGroupPrimaryInstanceKey.Equals(&instance.Key) -} - // IsBinlogServer checks whether this is any type of a binlog server func (instance *Instance) IsBinlogServer() bool { return false @@ -253,13 +161,6 @@ func (instance *Instance) IsOracleMySQL() bool { return true } -func (instance *Instance) SetSeed() { - instance.seed = true -} -func (instance *Instance) IsSeed() bool { - return instance.seed -} - // applyFlavorName func (instance *Instance) applyFlavorName() { if instance == nil { @@ -288,27 +189,13 @@ func (instance *Instance) FlavorNameAndMajorVersion() string { // IsReplica makes simple heuristics to decide whether this instance is a replica of another instance func (instance *Instance) IsReplica() bool { - return instance.SourceKey.Hostname != "" && instance.SourceKey.Hostname != "_" && instance.SourceKey.Port != 0 && (instance.ReadBinlogCoordinates.LogFile != "" || instance.UsingGTID()) + return instance.SourceHost != "" && instance.SourceHost != "_" && instance.SourcePort != 0 && (instance.ReadBinlogCoordinates.LogFile != "" || instance.UsingGTID()) } // IsPrimary makes simple heuristics to decide whether this instance is a primary (not replicating from any other server), // either via traditional async/semisync replication or group replication func (instance *Instance) IsPrimary() bool { - // If traditional replication is configured, it is for sure not a primary - if instance.IsReplica() { - return false - } - // If traditional replication is not configured, and it is also not part of a replication group, this host is - // a primary - if !instance.IsReplicationGroupMember() { - return true - } - // If traditional replication is not configured, and this host is part of a group, it is only considered a - // primary if it has the role of group Primary. Otherwise it is not a primary. - if instance.ReplicationGroupMemberRole == GroupReplicationMemberRolePrimary { - return true - } - return false + return !instance.IsReplica() } // ReplicaRunning returns true when this instance's status is of a replicating replica. @@ -335,8 +222,3 @@ func (instance *Instance) SQLThreadUpToDate() bool { func (instance *Instance) UsingGTID() bool { return instance.UsingOracleGTID || instance.UsingMariaDBGTID } - -// AddGroupMemberKey adds a group member to the list of this instance's group members. -func (instance *Instance) AddGroupMemberKey(groupMemberKey *InstanceKey) { - instance.ReplicationGroupMembers.AddKey(*groupMemberKey) -} diff --git a/go/vt/vtorc/inst/instance_binlog.go b/go/vt/vtorc/inst/instance_binlog.go deleted file mode 100644 index 629eb11a3e4..00000000000 --- a/go/vt/vtorc/inst/instance_binlog.go +++ /dev/null @@ -1,62 +0,0 @@ -/* - Copyright 2014 Outbrain Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -import ( - "regexp" -) - -// Event entries may contains table IDs (can be different for same tables on different servers) -// and also COMMIT transaction IDs (different values on different servers). -// So these need to be removed from the event entry if we're to compare and validate matching -// entries. -var eventInfoTransformations = map[*regexp.Regexp]string{ - regexp.MustCompile(`(.*) [/][*].*?[*][/](.*$)`): "$1 $2", // strip comments - regexp.MustCompile(`(COMMIT) .*$`): "$1", // commit number varies cross servers - regexp.MustCompile(`(table_id:) [0-9]+ (.*$)`): "$1 ### $2", // table ids change cross servers - regexp.MustCompile(`(table_id:) [0-9]+$`): "$1 ###", // table ids change cross servers - regexp.MustCompile(` X'([0-9a-fA-F]+)' COLLATE`): " 0x$1 COLLATE", // different ways to represent collate - regexp.MustCompile(`(BEGIN GTID [^ ]+) cid=.*`): "$1", // MariaDB GTID someimtes gets addition of "cid=...". Stripping -} - -type BinlogEvent struct { - Coordinates BinlogCoordinates - NextEventPos uint32 - EventType string - Info string -} - -func (binlogEvent *BinlogEvent) NextBinlogCoordinates() BinlogCoordinates { - return BinlogCoordinates{LogFile: binlogEvent.Coordinates.LogFile, LogPos: binlogEvent.NextEventPos, Type: binlogEvent.Coordinates.Type} -} - -func (binlogEvent *BinlogEvent) NormalizeInfo() { - for reg, replace := range eventInfoTransformations { - binlogEvent.Info = reg.ReplaceAllString(binlogEvent.Info, replace) - } -} - -func (binlogEvent *BinlogEvent) Equals(other *BinlogEvent) bool { - return binlogEvent.Coordinates.Equals(&other.Coordinates) && - binlogEvent.NextEventPos == other.NextEventPos && - binlogEvent.EventType == other.EventType && binlogEvent.Info == other.Info -} - -func (binlogEvent *BinlogEvent) EqualsIgnoreCoordinates(other *BinlogEvent) bool { - return binlogEvent.NextEventPos == other.NextEventPos && - binlogEvent.EventType == other.EventType && binlogEvent.Info == other.Info -} diff --git a/go/vt/vtorc/inst/instance_dao.go b/go/vt/vtorc/inst/instance_dao.go index 26c1eb1bfa6..211ddce69b1 100644 --- a/go/vt/vtorc/inst/instance_dao.go +++ b/go/vt/vtorc/inst/instance_dao.go @@ -25,28 +25,27 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "time" "github.com/patrickmn/go-cache" "github.com/rcrowley/go-metrics" "github.com/sjmudd/stopwatch" - "vitess.io/vitess/go/vt/external/golib/sqlutils" - - vitessmysql "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/stats" "vitess.io/vitess/go/tb" + "vitess.io/vitess/go/vt/external/golib/sqlutils" "vitess.io/vitess/go/vt/log" - replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo/topoproto" - "vitess.io/vitess/go/vt/vtctl/reparentutil" - "vitess.io/vitess/go/vt/vtctl/reparentutil/promotionrule" "vitess.io/vitess/go/vt/vtorc/collection" "vitess.io/vitess/go/vt/vtorc/config" "vitess.io/vitess/go/vt/vtorc/db" "vitess.io/vitess/go/vt/vtorc/metrics/query" "vitess.io/vitess/go/vt/vtorc/util" - math "vitess.io/vitess/go/vt/vtorc/util" + + replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) const ( @@ -56,21 +55,7 @@ const ( var instanceReadChan = make(chan bool, backendDBConcurrency) var instanceWriteChan = make(chan bool, backendDBConcurrency) -// Constant strings for Group Replication information -// See https://dev.mysql.com/doc/refman/8.0/en/replication-group-members-table.html for additional information. -const ( - // Group member roles - GroupReplicationMemberRolePrimary = "PRIMARY" - GroupReplicationMemberRoleSecondary = "SECONDARY" - // Group member states - GroupReplicationMemberStateOnline = "ONLINE" - GroupReplicationMemberStateRecovering = "RECOVERING" - GroupReplicationMemberStateUnreachable = "UNREACHABLE" - GroupReplicationMemberStateOffline = "OFFLINE" - GroupReplicationMemberStateError = "ERROR" -) - -var forgetInstanceKeys *cache.Cache +var forgetAliases *cache.Cache var accessDeniedCounter = metrics.NewCounter() var readTopologyInstanceCounter = metrics.NewCounter() @@ -80,6 +65,7 @@ var backendWrites = collection.CreateOrReturnCollection("BACKEND_WRITES") var writeBufferLatency = stopwatch.NewNamedStopwatch() var emptyQuotesRegexp = regexp.MustCompile(`^""$`) +var cacheInitializationCompleted atomic.Bool func init() { _ = metrics.Register("instance.access_denied", accessDeniedCounter) @@ -94,7 +80,8 @@ func init() { func initializeInstanceDao() { config.WaitForConfigurationToBeLoaded() - forgetInstanceKeys = cache.New(time.Duration(config.Config.InstancePollSeconds*3)*time.Second, time.Second) + forgetAliases = cache.New(time.Duration(config.Config.InstancePollSeconds*3)*time.Second, time.Second) + cacheInitializationCompleted.Store(true) } // ExecDBWriteFunc chooses how to execute a write onto the database: whether synchronuously or not @@ -137,19 +124,19 @@ func ExpireTableData(tableName string, timestampColumn string) error { // logReadTopologyInstanceError logs an error, if applicable, for a ReadTopologyInstance operation, // providing context and hint as for the source of the error. If there's no hint just provide the // original error. -func logReadTopologyInstanceError(instanceKey *InstanceKey, hint string, err error) error { +func logReadTopologyInstanceError(tabletAlias string, hint string, err error) error { if err == nil { return nil } - if !util.ClearToLog("ReadTopologyInstance", instanceKey.StringCode()) { + if !util.ClearToLog("ReadTopologyInstance", tabletAlias) { return err } var msg string if hint == "" { - msg = fmt.Sprintf("ReadTopologyInstance(%+v): %+v", *instanceKey, err) + msg = fmt.Sprintf("ReadTopologyInstance(%+v): %+v", tabletAlias, err) } else { msg = fmt.Sprintf("ReadTopologyInstance(%+v) %+v: %+v", - *instanceKey, + tabletAlias, strings.Replace(hint, "%", "%%", -1), // escape % err) } @@ -157,11 +144,19 @@ func logReadTopologyInstanceError(instanceKey *InstanceKey, hint string, err err return fmt.Errorf(msg) } +// RegisterStats registers stats from the inst package +func RegisterStats() { + stats.NewGaugeFunc("ErrantGtidTabletCount", "Number of tablets with errant GTIDs", func() int64 { + instances, _ := ReadInstancesWithErrantGTIds("", "") + return int64(len(instances)) + }) +} + // ReadTopologyInstance collects information on the state of a MySQL // server and writes the result synchronously to the vtorc // backend. -func ReadTopologyInstance(instanceKey *InstanceKey) (*Instance, error) { - return ReadTopologyInstanceBufferable(instanceKey, nil) +func ReadTopologyInstance(tabletAlias string) (*Instance, error) { + return ReadTopologyInstanceBufferable(tabletAlias, nil) } // ReadTopologyInstanceBufferable connects to a topology MySQL instance @@ -169,43 +164,35 @@ func ReadTopologyInstance(instanceKey *InstanceKey) (*Instance, error) { // It writes the information retrieved into vtorc's backend. // - writes are optionally buffered. // - timing information can be collected for the stages performed. -func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, latency *stopwatch.NamedStopwatch) (inst *Instance, err error) { +func ReadTopologyInstanceBufferable(tabletAlias string, latency *stopwatch.NamedStopwatch) (inst *Instance, err error) { defer func() { if r := recover(); r != nil { - err = logReadTopologyInstanceError(instanceKey, "Unexpected, aborting", tb.Errorf("%+v", r)) + err = logReadTopologyInstanceError(tabletAlias, "Unexpected, aborting", tb.Errorf("%+v", r)) } }() var waitGroup sync.WaitGroup var tablet *topodatapb.Tablet - var durability reparentutil.Durabler var fullStatus *replicationdatapb.FullStatus readingStartTime := time.Now() instance := NewInstance() instanceFound := false partialSuccess := false - resolvedHostname := "" errorChan := make(chan error, 32) - var resolveErr error - if !instanceKey.IsValid() { - latency.Start("backend") - if err := UpdateInstanceLastAttemptedCheck(instanceKey); err != nil { - log.Errorf("ReadTopologyInstanceBufferable: %+v: %v", instanceKey, err) - } - latency.Stop("backend") - return instance, fmt.Errorf("ReadTopologyInstance will not act on invalid instance key: %+v", *instanceKey) + if tabletAlias == "" { + return instance, fmt.Errorf("ReadTopologyInstance will not act on empty tablet alias") } lastAttemptedCheckTimer := time.AfterFunc(time.Second, func() { go func() { - _ = UpdateInstanceLastAttemptedCheck(instanceKey) + _ = UpdateInstanceLastAttemptedCheck(tabletAlias) }() }) latency.Start("instance") - tablet, err = ReadTablet(*instanceKey) + tablet, err = ReadTablet(tabletAlias) if err != nil { goto Cleanup } @@ -216,18 +203,14 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, latency *stopwatch goto Cleanup } - durability, err = GetDurabilityPolicy(tablet) - if err != nil { - goto Cleanup - } - - fullStatus, err = FullStatus(*instanceKey) + fullStatus, err = FullStatus(tabletAlias) if err != nil { goto Cleanup } partialSuccess = true // We at least managed to read something from the server. - instance.Key = *instanceKey + instance.Hostname = tablet.MysqlHostname + instance.Port = int(tablet.MysqlPort) { // We begin with a few operations we can run concurrently, and which do not depend on anything instance.ServerID = uint(fullStatus.ServerId) @@ -237,7 +220,6 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, latency *stopwatch instance.BinlogFormat = fullStatus.BinlogFormat instance.LogReplicationUpdatesEnabled = fullStatus.LogReplicaUpdates instance.VersionComment = fullStatus.VersionComment - resolvedHostname = instance.Key.Hostname if instance.LogBinEnabled && fullStatus.PrimaryStatus != nil { binlogPos, err := getBinlogCoordinatesFromPositionString(fullStatus.PrimaryStatus.FilePosition) @@ -254,20 +236,20 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, latency *stopwatch instance.SemiSyncPrimaryStatus = fullStatus.SemiSyncPrimaryStatus instance.SemiSyncReplicaStatus = fullStatus.SemiSyncReplicaStatus - if (instance.IsOracleMySQL() || instance.IsPercona()) && !instance.IsSmallerMajorVersionByString("5.6") { - // Stuff only supported on Oracle MySQL >= 5.6 + if instance.IsOracleMySQL() || instance.IsPercona() { + // Stuff only supported on Oracle / Percona MySQL // ... - // @@gtid_mode only available in Orcale MySQL >= 5.6 + // @@gtid_mode only available in Oracle / Percona MySQL >= 5.6 instance.GTIDMode = fullStatus.GtidMode instance.ServerUUID = fullStatus.ServerUuid if fullStatus.PrimaryStatus != nil { - GtidExecutedPos, err := vitessmysql.DecodePosition(fullStatus.PrimaryStatus.Position) + GtidExecutedPos, err := replication.DecodePosition(fullStatus.PrimaryStatus.Position) errorChan <- err if err == nil && GtidExecutedPos.GTIDSet != nil { instance.ExecutedGtidSet = GtidExecutedPos.GTIDSet.String() } } - GtidPurgedPos, err := vitessmysql.DecodePosition(fullStatus.GtidPurged) + GtidPurgedPos, err := replication.DecodePosition(fullStatus.GtidPurged) errorChan <- err if err == nil && GtidPurgedPos.GTIDSet != nil { instance.GtidPurged = GtidPurgedPos.GTIDSet.String() @@ -279,27 +261,14 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, latency *stopwatch } } } - if resolvedHostname != instance.Key.Hostname { - latency.Start("backend") - UpdateResolvedHostname(instance.Key.Hostname, resolvedHostname) - latency.Stop("backend") - instance.Key.Hostname = resolvedHostname - } - if instance.Key.Hostname == "" { - err = fmt.Errorf("ReadTopologyInstance: empty hostname (%+v). Bailing out", *instanceKey) - goto Cleanup - } - go func() { - _ = ResolveHostnameIPs(instance.Key.Hostname) - }() instance.ReplicationIOThreadState = ReplicationThreadStateNoThread instance.ReplicationSQLThreadState = ReplicationThreadStateNoThread if fullStatus.ReplicationStatus != nil { instance.HasReplicationCredentials = fullStatus.ReplicationStatus.SourceUser != "" - instance.ReplicationIOThreadState = ReplicationThreadStateFromReplicationState(vitessmysql.ReplicationState(fullStatus.ReplicationStatus.IoState)) - instance.ReplicationSQLThreadState = ReplicationThreadStateFromReplicationState(vitessmysql.ReplicationState(fullStatus.ReplicationStatus.SqlState)) + instance.ReplicationIOThreadState = ReplicationThreadStateFromReplicationState(replication.ReplicationState(fullStatus.ReplicationStatus.IoState)) + instance.ReplicationSQLThreadState = ReplicationThreadStateFromReplicationState(replication.ReplicationState(fullStatus.ReplicationStatus.SqlState)) instance.ReplicationIOThreadRuning = instance.ReplicationIOThreadState.IsRunning() instance.ReplicationSQLThreadRuning = instance.ReplicationSQLThreadState.IsRunning() @@ -326,17 +295,8 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, latency *stopwatch instance.SourceUUID = fullStatus.ReplicationStatus.SourceUuid instance.HasReplicationFilters = fullStatus.ReplicationStatus.HasReplicationFilters - primaryHostname := fullStatus.ReplicationStatus.SourceHost - primaryKey, err := NewResolveInstanceKey(primaryHostname, int(fullStatus.ReplicationStatus.SourcePort)) - if err != nil { - _ = logReadTopologyInstanceError(instanceKey, "NewResolveInstanceKey", err) - } - primaryKey.Hostname, resolveErr = ResolveHostname(primaryKey.Hostname) - if resolveErr != nil { - _ = logReadTopologyInstanceError(instanceKey, fmt.Sprintf("ResolveHostname(%q)", primaryKey.Hostname), resolveErr) - } - instance.SourceKey = *primaryKey - instance.IsDetachedPrimary = instance.SourceKey.IsDetached() + instance.SourceHost = fullStatus.ReplicationStatus.SourceHost + instance.SourcePort = int(fullStatus.ReplicationStatus.SourcePort) if fullStatus.ReplicationStatus.ReplicationLagUnknown { instance.SecondsBehindPrimary.Valid = false @@ -345,7 +305,7 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, latency *stopwatch instance.SecondsBehindPrimary.Int64 = int64(fullStatus.ReplicationStatus.ReplicationLagSeconds) } if instance.SecondsBehindPrimary.Valid && instance.SecondsBehindPrimary.Int64 < 0 { - log.Warningf("Host: %+v, instance.ReplicationLagSeconds < 0 [%+v], correcting to 0", instanceKey, instance.SecondsBehindPrimary.Int64) + log.Warningf("Alias: %+v, instance.SecondsBehindPrimary < 0 [%+v], correcting to 0", tabletAlias, instance.SecondsBehindPrimary.Int64) instance.SecondsBehindPrimary.Int64 = 0 } // And until told otherwise: @@ -368,16 +328,9 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, latency *stopwatch latency.Start("backend") err = ReadInstanceClusterAttributes(instance) latency.Stop("backend") - _ = logReadTopologyInstanceError(instanceKey, "ReadInstanceClusterAttributes", err) + _ = logReadTopologyInstanceError(tabletAlias, "ReadInstanceClusterAttributes", err) } - // We need to update candidate_database_instance. - // We register the rule even if it hasn't changed, - // to bump the last_suggested time. - instance.PromotionRule = PromotionRule(durability, tablet) - err = RegisterCandidateInstance(NewCandidateDatabaseInstance(instanceKey, instance.PromotionRule).WithCurrentTime()) - _ = logReadTopologyInstanceError(instanceKey, "RegisterCandidateInstance", err) - Cleanup: waitGroup.Wait() close(errorChan) @@ -403,7 +356,6 @@ Cleanup: } // Add replication group ancestry UUID as well. Otherwise, VTOrc thinks there are errant GTIDs in group // members and its replicas, even though they are not. - instance.AncestryUUID = fmt.Sprintf("%s,%s", instance.AncestryUUID, instance.ReplicationGroupName) instance.AncestryUUID = strings.Trim(instance.AncestryUUID, ",") if instance.ExecutedGtidSet != "" && instance.primaryExecutedGtidSet != "" { // Compare primary & replica GTID sets, but ignore the sets that present the primary's UUID. @@ -426,7 +378,7 @@ Cleanup: redactedPrimaryExecutedGtidSet, _ := NewOracleGtidSet(instance.primaryExecutedGtidSet) redactedPrimaryExecutedGtidSet.RemoveUUID(instance.SourceUUID) - instance.GtidErrant, err = vitessmysql.Subtract(redactedExecutedGtidSet.String(), redactedPrimaryExecutedGtidSet.String()) + instance.GtidErrant, err = replication.Subtract(redactedExecutedGtidSet.String(), redactedPrimaryExecutedGtidSet.String()) } } } @@ -450,7 +402,7 @@ Cleanup: // tried to check the instance. last_attempted_check is also // updated on success by writeInstance. latency.Start("backend") - _ = UpdateInstanceLastChecked(instanceKey, partialSuccess) + _ = UpdateInstanceLastChecked(tabletAlias, partialSuccess) latency.Stop("backend") return nil, err } @@ -461,7 +413,7 @@ func getKeyspaceShardName(keyspace, shard string) string { } func getBinlogCoordinatesFromPositionString(position string) (BinlogCoordinates, error) { - pos, err := vitessmysql.DecodePosition(position) + pos, err := replication.DecodePosition(position) if err != nil || pos.GTIDSet == nil { return BinlogCoordinates{}, err } @@ -472,41 +424,15 @@ func getBinlogCoordinatesFromPositionString(position string) (BinlogCoordinates, return *binLogCoordinates, nil } -func ReadReplicationGroupPrimary(instance *Instance) (err error) { - query := ` - SELECT - replication_group_primary_host, - replication_group_primary_port - FROM - database_instance - WHERE - replication_group_name = ? - AND replication_group_member_role = 'PRIMARY' -` - queryArgs := sqlutils.Args(instance.ReplicationGroupName) - err = db.QueryVTOrc(query, queryArgs, func(row sqlutils.RowMap) error { - groupPrimaryHost := row.GetString("replication_group_primary_host") - groupPrimaryPort := row.GetInt("replication_group_primary_port") - resolvedGroupPrimary, err := NewResolveInstanceKey(groupPrimaryHost, groupPrimaryPort) - if err != nil { - return err - } - instance.ReplicationGroupPrimaryInstanceKey = *resolvedGroupPrimary - return nil - }) - return err -} - // ReadInstanceClusterAttributes will return the cluster name for a given instance by looking at its primary // and getting it from there. // It is a non-recursive function and so-called-recursion is performed upon periodic reading of // instances. func ReadInstanceClusterAttributes(instance *Instance) (err error) { - var primaryOrGroupPrimaryInstanceKey InstanceKey - var primaryOrGroupPrimaryReplicationDepth uint + var primaryReplicationDepth uint var ancestryUUID string - var primaryOrGroupPrimaryExecutedGtidSet string - primaryOrGroupPrimaryDataFound := false + var primaryExecutedGtidSet string + primaryDataFound := false query := ` select @@ -518,22 +444,16 @@ func ReadInstanceClusterAttributes(instance *Instance) (err error) { from database_instance where hostname=? and port=? ` - // For instances that are part of a replication group, if the host is not the group's primary, we use the - // information from the group primary. If it is the group primary, we use the information of its primary - // (if it has any). If it is not a group member, we use the information from the host's primary. - if instance.IsReplicationGroupSecondary() { - primaryOrGroupPrimaryInstanceKey = instance.ReplicationGroupPrimaryInstanceKey - } else { - primaryOrGroupPrimaryInstanceKey = instance.SourceKey - } - args := sqlutils.Args(primaryOrGroupPrimaryInstanceKey.Hostname, primaryOrGroupPrimaryInstanceKey.Port) + primaryHostname := instance.SourceHost + primaryPort := instance.SourcePort + args := sqlutils.Args(primaryHostname, primaryPort) err = db.QueryVTOrc(query, args, func(m sqlutils.RowMap) error { - primaryOrGroupPrimaryReplicationDepth = m.GetUint("replication_depth") - primaryOrGroupPrimaryInstanceKey.Hostname = m.GetString("source_host") - primaryOrGroupPrimaryInstanceKey.Port = m.GetInt("source_port") + primaryReplicationDepth = m.GetUint("replication_depth") + primaryHostname = m.GetString("source_host") + primaryPort = m.GetInt("source_port") ancestryUUID = m.GetString("ancestry_uuid") - primaryOrGroupPrimaryExecutedGtidSet = m.GetString("executed_gtid_set") - primaryOrGroupPrimaryDataFound = true + primaryExecutedGtidSet = m.GetString("executed_gtid_set") + primaryDataFound = true return nil }) if err != nil { @@ -542,18 +462,18 @@ func ReadInstanceClusterAttributes(instance *Instance) (err error) { } var replicationDepth uint - if primaryOrGroupPrimaryDataFound { - replicationDepth = primaryOrGroupPrimaryReplicationDepth + 1 + if primaryDataFound { + replicationDepth = primaryReplicationDepth + 1 } isCoPrimary := false - if primaryOrGroupPrimaryInstanceKey.Equals(&instance.Key) { + if primaryHostname == instance.Hostname && primaryPort == instance.Port { // co-primary calls for special case, in fear of the infinite loop isCoPrimary = true } instance.ReplicationDepth = replicationDepth instance.IsCoPrimary = isCoPrimary instance.AncestryUUID = ancestryUUID - instance.primaryExecutedGtidSet = primaryOrGroupPrimaryExecutedGtidSet + instance.primaryExecutedGtidSet = primaryExecutedGtidSet return nil } @@ -561,8 +481,8 @@ func ReadInstanceClusterAttributes(instance *Instance) (err error) { func readInstanceRow(m sqlutils.RowMap) *Instance { instance := NewInstance() - instance.Key.Hostname = m.GetString("hostname") - instance.Key.Port = m.GetInt("port") + instance.Hostname = m.GetString("hostname") + instance.Port = m.GetInt("port") instance.ServerID = m.GetUint("server_id") instance.ServerUUID = m.GetString("server_uuid") instance.Version = m.GetString("version") @@ -572,9 +492,8 @@ func readInstanceRow(m sqlutils.RowMap) *Instance { instance.BinlogRowImage = m.GetString("binlog_row_image") instance.LogBinEnabled = m.GetBool("log_bin") instance.LogReplicationUpdatesEnabled = m.GetBool("log_replica_updates") - instance.SourceKey.Hostname = m.GetString("source_host") - instance.SourceKey.Port = m.GetInt("source_port") - instance.IsDetachedPrimary = instance.SourceKey.IsDetached() + instance.SourceHost = m.GetString("source_host") + instance.SourcePort = m.GetInt("source_port") instance.ReplicationSQLThreadRuning = m.GetBool("replica_sql_running") instance.ReplicationIOThreadRuning = m.GetBool("replica_io_running") instance.ReplicationSQLThreadState = ReplicationThreadState(m.GetInt("replication_sql_thread_state")) @@ -623,30 +542,12 @@ func readInstanceRow(m sqlutils.RowMap) *Instance { instance.LastSeenTimestamp = m.GetString("last_seen") instance.IsLastCheckValid = m.GetBool("is_last_check_valid") instance.SecondsSinceLastSeen = m.GetNullInt64("seconds_since_last_seen") - instance.IsCandidate = m.GetBool("is_candidate") - instance.PromotionRule = promotionrule.CandidatePromotionRule(m.GetString("promotion_rule")) - instance.IsDowntimed = m.GetBool("is_downtimed") - instance.DowntimeReason = m.GetString("downtime_reason") - instance.DowntimeOwner = m.GetString("downtime_owner") - instance.DowntimeEndTimestamp = m.GetString("downtime_end_timestamp") - instance.ElapsedDowntime = time.Second * time.Duration(m.GetInt("elapsed_downtime_seconds")) - instance.UnresolvedHostname = m.GetString("unresolved_hostname") instance.AllowTLS = m.GetBool("allow_tls") - instance.InstanceAlias = m.GetString("instance_alias") + instance.InstanceAlias = m.GetString("alias") instance.LastDiscoveryLatency = time.Duration(m.GetInt64("last_discovery_latency")) * time.Nanosecond instance.applyFlavorName() - /* Read Group Replication variables below */ - instance.ReplicationGroupName = m.GetString("replication_group_name") - instance.ReplicationGroupIsSinglePrimary = m.GetBool("replication_group_is_single_primary_mode") - instance.ReplicationGroupMemberState = m.GetString("replication_group_member_state") - instance.ReplicationGroupMemberRole = m.GetString("replication_group_member_role") - instance.ReplicationGroupPrimaryInstanceKey = InstanceKey{Hostname: m.GetString("replication_group_primary_host"), - Port: m.GetInt("replication_group_primary_port")} - _ = instance.ReplicationGroupMembers.ReadJSON(m.GetString("replication_group_members")) - //instance.ReplicationGroup = m.GetString("replication_group_") - // problems if !instance.IsLastCheckValid { instance.Problems = append(instance.Problems, "last_check_invalid") @@ -654,49 +555,33 @@ func readInstanceRow(m sqlutils.RowMap) *Instance { instance.Problems = append(instance.Problems, "not_recently_checked") } else if instance.ReplicationThreadsExist() && !instance.ReplicaRunning() { instance.Problems = append(instance.Problems, "not_replicating") - } else if instance.ReplicationLagSeconds.Valid && math.AbsInt64(instance.ReplicationLagSeconds.Int64-int64(instance.SQLDelay)) > int64(config.Config.ReasonableReplicationLagSeconds) { + } else if instance.ReplicationLagSeconds.Valid && util.AbsInt64(instance.ReplicationLagSeconds.Int64-int64(instance.SQLDelay)) > int64(config.Config.ReasonableReplicationLagSeconds) { instance.Problems = append(instance.Problems, "replication_lag") } if instance.GtidErrant != "" { instance.Problems = append(instance.Problems, "errant_gtid") } - // Group replication problems - if instance.ReplicationGroupName != "" && instance.ReplicationGroupMemberState != GroupReplicationMemberStateOnline { - instance.Problems = append(instance.Problems, "group_replication_member_not_online") - } return instance } // readInstancesByCondition is a generic function to read instances from the backend database func readInstancesByCondition(condition string, args []any, sort string) ([](*Instance), error) { - readFunc := func() ([](*Instance), error) { - instances := [](*Instance){} + readFunc := func() ([]*Instance, error) { + var instances []*Instance if sort == "" { - sort = `hostname, port` + sort = `alias` } query := fmt.Sprintf(` select *, unix_timestamp() - unix_timestamp(last_checked) as seconds_since_last_checked, ifnull(last_checked <= last_seen, 0) as is_last_check_valid, - unix_timestamp() - unix_timestamp(last_seen) as seconds_since_last_seen, - candidate_database_instance.last_suggested is not null - and candidate_database_instance.promotion_rule in ('must', 'prefer') as is_candidate, - ifnull(nullif(candidate_database_instance.promotion_rule, ''), 'neutral') as promotion_rule, - ifnull(unresolved_hostname, '') as unresolved_hostname, - (database_instance_downtime.downtime_active is not null and ifnull(database_instance_downtime.end_timestamp, now()) > now()) as is_downtimed, - ifnull(database_instance_downtime.reason, '') as downtime_reason, - ifnull(database_instance_downtime.owner, '') as downtime_owner, - ifnull(unix_timestamp() - unix_timestamp(begin_timestamp), 0) as elapsed_downtime_seconds, - ifnull(database_instance_downtime.end_timestamp, '') as downtime_end_timestamp + unix_timestamp() - unix_timestamp(last_seen) as seconds_since_last_seen from - database_instance - left join vitess_tablet using (hostname, port) - left join candidate_database_instance using (hostname, port) - left join hostname_unresolve using (hostname) - left join database_instance_downtime using (hostname, port) + vitess_tablet + left join database_instance using (alias, hostname, port) where %s order by @@ -720,19 +605,14 @@ func readInstancesByCondition(condition string, args []any, sort string) ([](*In return instances, err } -func readInstancesByExactKey(instanceKey *InstanceKey) ([](*Instance), error) { +// ReadInstance reads an instance from the vtorc backend database +func ReadInstance(tabletAlias string) (*Instance, bool, error) { condition := ` - hostname = ? - and port = ? + alias = ? ` - return readInstancesByCondition(condition, sqlutils.Args(instanceKey.Hostname, instanceKey.Port), "") -} - -// ReadInstance reads an instance from the vtorc backend database -func ReadInstance(instanceKey *InstanceKey) (*Instance, bool, error) { - instances, err := readInstancesByExactKey(instanceKey) - // We know there will be at most one (hostname & port are PK) - // And we expect to find one + instances, err := readInstancesByCondition(condition, sqlutils.Args(tabletAlias), "") + // We know there will be at most one (alias is the PK). + // And we expect to find one. readInstanceCounter.Inc(1) if len(instances) == 0 { return nil, false, err @@ -744,25 +624,25 @@ func ReadInstance(instanceKey *InstanceKey) (*Instance, bool, error) { } // ReadReplicaInstances reads replicas of a given primary -func ReadReplicaInstances(primaryKey *InstanceKey) ([](*Instance), error) { +func ReadReplicaInstances(primaryHost string, primaryPort int) ([](*Instance), error) { condition := ` source_host = ? and source_port = ? ` - return readInstancesByCondition(condition, sqlutils.Args(primaryKey.Hostname, primaryKey.Port), "") + return readInstancesByCondition(condition, sqlutils.Args(primaryHost, primaryPort), "") } // ReadReplicaInstancesIncludingBinlogServerSubReplicas returns a list of direct slves including any replicas // of a binlog server replica -func ReadReplicaInstancesIncludingBinlogServerSubReplicas(primaryKey *InstanceKey) ([](*Instance), error) { - replicas, err := ReadReplicaInstances(primaryKey) +func ReadReplicaInstancesIncludingBinlogServerSubReplicas(primaryHost string, primaryPort int) ([](*Instance), error) { + replicas, err := ReadReplicaInstances(primaryHost, primaryPort) if err != nil { return replicas, err } for _, replica := range replicas { replica := replica if replica.IsBinlogServer() { - binlogServerReplicas, err := ReadReplicaInstancesIncludingBinlogServerSubReplicas(&replica.Key) + binlogServerReplicas, err := ReadReplicaInstancesIncludingBinlogServerSubReplicas(replica.Hostname, replica.Port) if err != nil { return replicas, err } @@ -785,140 +665,27 @@ func ReadProblemInstances(keyspace string, shard string) ([](*Instance), error) or (abs(cast(replication_lag_seconds as signed) - cast(sql_delay as signed)) > ?) or (abs(cast(replica_lag_seconds as signed) - cast(sql_delay as signed)) > ?) or (gtid_errant != '') - or (replication_group_name != '' and replication_group_member_state != 'ONLINE') ) ` args := sqlutils.Args(keyspace, keyspace, shard, shard, config.Config.InstancePollSeconds*5, config.Config.ReasonableReplicationLagSeconds, config.Config.ReasonableReplicationLagSeconds) - instances, err := readInstancesByCondition(condition, args, "") - if err != nil { - return instances, err - } - var reportedInstances [](*Instance) - for _, instance := range instances { - skip := false - if instance.IsDowntimed { - skip = true - } - if !skip { - reportedInstances = append(reportedInstances, instance) - } - } - return reportedInstances, nil + return readInstancesByCondition(condition, args, "") } -// ReadLostInRecoveryInstances returns all instances (potentially filtered by cluster) -// which are currently indicated as downtimed due to being lost during a topology recovery. -func ReadLostInRecoveryInstances(keyspace string, shard string) ([](*Instance), error) { +// ReadInstancesWithErrantGTIds reads all instances with errant GTIDs +func ReadInstancesWithErrantGTIds(keyspace string, shard string) ([]*Instance, error) { condition := ` - ifnull( - database_instance_downtime.downtime_active = 1 - and database_instance_downtime.end_timestamp > now() - and database_instance_downtime.reason = ?, 0) - and ? IN ('', keyspace) - and ? IN ('', shard) - ` - return readInstancesByCondition(condition, sqlutils.Args(DowntimeLostInRecoveryMessage, keyspace, shard), "keyspace asc, shard asc, replication_depth asc") -} - -// ForgetUnseenInstancesDifferentlyResolved will purge instances which are invalid, and whose hostname -// appears on the hostname_resolved table; this means some time in the past their hostname was unresovled, and now -// resovled to a different value; the old hostname is never accessed anymore and the old entry should be removed. -func ForgetUnseenInstancesDifferentlyResolved() error { - query := ` - select - database_instance.hostname, database_instance.port - from - hostname_resolve - JOIN database_instance ON (hostname_resolve.hostname = database_instance.hostname) - where - hostname_resolve.hostname != hostname_resolve.resolved_hostname - AND ifnull(last_checked <= last_seen, 0) = 0 - ` - keys := NewInstanceKeyMap() - err := db.QueryVTOrc(query, nil, func(m sqlutils.RowMap) error { - key := InstanceKey{ - Hostname: m.GetString("hostname"), - Port: m.GetInt("port"), - } - keys.AddKey(key) - return nil - }) - var rowsAffected int64 - for _, key := range keys.GetInstanceKeys() { - sqlResult, err := db.ExecVTOrc(` - delete from - database_instance - where - hostname = ? and port = ? - `, key.Hostname, key.Port, - ) - if err != nil { - log.Error(err) - return err - } - rows, err := sqlResult.RowsAffected() - if err != nil { - log.Error(err) - return err - } - rowsAffected = rowsAffected + rows - } - _ = AuditOperation("forget-unseen-differently-resolved", nil, fmt.Sprintf("Forgotten instances: %d", rowsAffected)) - return err -} - -// readUnknownPrimaryHostnameResolves will figure out the resolved hostnames of primary-hosts which cannot be found. -// It uses the hostname_resolve_history table to heuristically guess the correct hostname (based on "this was the -// last time we saw this hostname and it resolves into THAT") -func readUnknownPrimaryHostnameResolves() (map[string]string, error) { - res := make(map[string]string) - err := db.QueryVTOrcRowsMap(` - SELECT DISTINCT - replica_instance.source_host, hostname_resolve_history.resolved_hostname - FROM - database_instance replica_instance - LEFT JOIN hostname_resolve ON (replica_instance.source_host = hostname_resolve.hostname) - LEFT JOIN database_instance primary_instance ON ( - COALESCE(hostname_resolve.resolved_hostname, replica_instance.source_host) = primary_instance.hostname - and replica_instance.source_port = primary_instance.port - ) LEFT JOIN hostname_resolve_history ON (replica_instance.source_host = hostname_resolve_history.hostname) - WHERE - primary_instance.last_checked IS NULL - and replica_instance.source_host != '' - and replica_instance.source_host != '_' - and replica_instance.source_port > 0 - `, func(m sqlutils.RowMap) error { - res[m.GetString("source_host")] = m.GetString("resolved_hostname") - return nil - }) - if err != nil { - log.Error(err) - return res, err - } - - return res, nil -} - -// ResolveUnknownPrimaryHostnameResolves fixes missing hostname resolves based on hostname_resolve_history -// The use case is replicas replicating from some unknown-hostname which cannot be otherwise found. This could -// happen due to an expire unresolve together with clearing up of hostname cache. -func ResolveUnknownPrimaryHostnameResolves() error { - - hostnameResolves, err := readUnknownPrimaryHostnameResolves() - if err != nil { - return err - } - for hostname, resolvedHostname := range hostnameResolves { - UpdateResolvedHostname(hostname, resolvedHostname) - } + keyspace LIKE (CASE WHEN ? = '' THEN '%' ELSE ? END) + and shard LIKE (CASE WHEN ? = '' THEN '%' ELSE ? END) + and gtid_errant != '' + ` - _ = AuditOperation("resolve-unknown-primaries", nil, fmt.Sprintf("Num resolved hostnames: %d", len(hostnameResolves))) - return err + args := sqlutils.Args(keyspace, keyspace, shard, shard) + return readInstancesByCondition(condition, args, "") } // GetKeyspaceShardName gets the keyspace shard name for the given instance key -func GetKeyspaceShardName(instanceKey *InstanceKey) (keyspace string, shard string, err error) { +func GetKeyspaceShardName(tabletAlias string) (keyspace string, shard string, err error) { query := ` select keyspace, @@ -926,10 +693,9 @@ func GetKeyspaceShardName(instanceKey *InstanceKey) (keyspace string, shard stri from vitess_tablet where - hostname = ? - and port = ? + alias = ? ` - err = db.QueryVTOrc(query, sqlutils.Args(instanceKey.Hostname, instanceKey.Port), func(m sqlutils.RowMap) error { + err = db.QueryVTOrc(query, sqlutils.Args(tabletAlias), func(m sqlutils.RowMap) error { keyspace = m.GetString("keyspace") shard = m.GetString("shard") return nil @@ -949,11 +715,11 @@ func GetKeyspaceShardName(instanceKey *InstanceKey) (keyspace string, shard stri // resulted in an actual check! This can happen when TCP/IP connections are hung, in which case the "check" // never returns. In such case we multiply interval by a factor, so as not to open too many connections on // the instance. -func ReadOutdatedInstanceKeys() ([]InstanceKey, error) { - res := []InstanceKey{} +func ReadOutdatedInstanceKeys() ([]string, error) { + var res []string query := ` SELECT - hostname, port + alias FROM database_instance WHERE @@ -964,24 +730,21 @@ func ReadOutdatedInstanceKeys() ([]InstanceKey, error) { END UNION SELECT - vitess_tablet.hostname, vitess_tablet.port + vitess_tablet.alias FROM vitess_tablet LEFT JOIN database_instance ON ( - vitess_tablet.hostname = database_instance.hostname - AND vitess_tablet.port = database_instance.port + vitess_tablet.alias = database_instance.alias ) WHERE - database_instance.hostname IS NULL + database_instance.alias IS NULL ` args := sqlutils.Args(config.Config.InstancePollSeconds, 2*config.Config.InstancePollSeconds) err := db.QueryVTOrc(query, args, func(m sqlutils.RowMap) error { - instanceKey, merr := NewResolveInstanceKey(m.GetString("hostname"), m.GetInt("port")) - if merr != nil { - log.Error(merr) - } else if !InstanceIsForgotten(instanceKey) { + tabletAlias := m.GetString("alias") + if !InstanceIsForgotten(tabletAlias) { // only if not in "forget" cache - res = append(res, *instanceKey) + res = append(res, tabletAlias) } // We don;t return an error because we want to keep filling the outdated instances list. return nil @@ -1048,6 +811,7 @@ func mkInsertOdkuForInstances(instances []*Instance, instanceWasActuallyFound bo insertIgnore = true } var columns = []string{ + "alias", "hostname", "port", "last_checked", @@ -1109,24 +873,16 @@ func mkInsertOdkuForInstances(instances []*Instance, instanceWasActuallyFound bo "semi_sync_primary_status", "semi_sync_primary_clients", "semi_sync_replica_status", - "instance_alias", "last_discovery_latency", - "replication_group_name", - "replication_group_is_single_primary_mode", - "replication_group_member_state", - "replication_group_member_role", - "replication_group_members", - "replication_group_primary_host", - "replication_group_primary_port", } var values = make([]string, len(columns)) for i := range columns { values[i] = "?" } - values[2] = "NOW()" // last_checked - values[3] = "NOW()" // last_attempted_check - values[4] = "1" // last_check_partial_success + values[3] = "NOW()" // last_checked + values[4] = "NOW()" // last_attempted_check + values[5] = "1" // last_check_partial_success if updateLastSeen { columns = append(columns, "last_seen") @@ -1137,8 +893,9 @@ func mkInsertOdkuForInstances(instances []*Instance, instanceWasActuallyFound bo for _, instance := range instances { // number of columns minus 2 as last_checked and last_attempted_check // updated with NOW() - args = append(args, instance.Key.Hostname) - args = append(args, instance.Key.Port) + args = append(args, instance.InstanceAlias) + args = append(args, instance.Hostname) + args = append(args, instance.Port) args = append(args, instance.ServerID) args = append(args, instance.ServerUUID) args = append(args, instance.Version) @@ -1152,8 +909,8 @@ func mkInsertOdkuForInstances(instances []*Instance, instanceWasActuallyFound bo args = append(args, instance.LogReplicationUpdatesEnabled) args = append(args, instance.SelfBinlogCoordinates.LogFile) args = append(args, instance.SelfBinlogCoordinates.LogPos) - args = append(args, instance.SourceKey.Hostname) - args = append(args, instance.SourceKey.Port) + args = append(args, instance.SourceHost) + args = append(args, instance.SourcePort) args = append(args, instance.ReplicationSQLThreadRuning) args = append(args, instance.ReplicationIOThreadRuning) args = append(args, instance.ReplicationSQLThreadState) @@ -1195,15 +952,7 @@ func mkInsertOdkuForInstances(instances []*Instance, instanceWasActuallyFound bo args = append(args, instance.SemiSyncPrimaryStatus) args = append(args, instance.SemiSyncPrimaryClients) args = append(args, instance.SemiSyncReplicaStatus) - args = append(args, instance.InstanceAlias) args = append(args, instance.LastDiscoveryLatency.Nanoseconds()) - args = append(args, instance.ReplicationGroupName) - args = append(args, instance.ReplicationGroupIsSinglePrimary) - args = append(args, instance.ReplicationGroupMemberState) - args = append(args, instance.ReplicationGroupMemberRole) - args = append(args, instance.ReplicationGroupMembers.ToJSONString()) - args = append(args, instance.ReplicationGroupPrimaryInstanceKey.Hostname) - args = append(args, instance.ReplicationGroupPrimaryInstanceKey.Port) } sql, err := mkInsertOdku("database_instance", columns, values, len(instances), insertIgnore) @@ -1220,7 +969,7 @@ func mkInsertOdkuForInstances(instances []*Instance, instanceWasActuallyFound bo func writeManyInstances(instances []*Instance, instanceWasActuallyFound bool, updateLastSeen bool) error { writeInstances := [](*Instance){} for _, instance := range instances { - if InstanceIsForgotten(&instance.Key) && !instance.IsSeed() { + if InstanceIsForgotten(instance.InstanceAlias) { continue } writeInstances = append(writeInstances, instance) @@ -1249,7 +998,7 @@ func WriteInstance(instance *Instance, instanceWasActuallyFound bool, lastError // UpdateInstanceLastChecked updates the last_check timestamp in the vtorc backed database // for a given instance -func UpdateInstanceLastChecked(instanceKey *InstanceKey, partialSuccess bool) error { +func UpdateInstanceLastChecked(tabletAlias string, partialSuccess bool) error { writeFunc := func() error { _, err := db.ExecVTOrc(` update @@ -1258,11 +1007,9 @@ func UpdateInstanceLastChecked(instanceKey *InstanceKey, partialSuccess bool) er last_checked = NOW(), last_check_partial_success = ? where - hostname = ? - and port = ?`, + alias = ?`, partialSuccess, - instanceKey.Hostname, - instanceKey.Port, + tabletAlias, ) if err != nil { log.Error(err) @@ -1280,7 +1027,7 @@ func UpdateInstanceLastChecked(instanceKey *InstanceKey, partialSuccess bool) er // And so we make sure to note down *before* we even attempt to access the instance; and this raises a red flag when we // wish to access the instance again: if last_attempted_check is *newer* than last_checked, that's bad news and means // we have a "hanging" issue. -func UpdateInstanceLastAttemptedCheck(instanceKey *InstanceKey) error { +func UpdateInstanceLastAttemptedCheck(tabletAlias string) error { writeFunc := func() error { _, err := db.ExecVTOrc(` update @@ -1288,10 +1035,8 @@ func UpdateInstanceLastAttemptedCheck(instanceKey *InstanceKey) error { set last_attempted_check = NOW() where - hostname = ? - and port = ?`, - instanceKey.Hostname, - instanceKey.Port, + alias = ?`, + tabletAlias, ) if err != nil { log.Error(err) @@ -1301,43 +1046,59 @@ func UpdateInstanceLastAttemptedCheck(instanceKey *InstanceKey) error { return ExecDBWriteFunc(writeFunc) } -func InstanceIsForgotten(instanceKey *InstanceKey) bool { - _, found := forgetInstanceKeys.Get(instanceKey.StringCode()) +func InstanceIsForgotten(tabletAlias string) bool { + _, found := forgetAliases.Get(tabletAlias) return found } // ForgetInstance removes an instance entry from the vtorc backed database. // It may be auto-rediscovered through topology or requested for discovery by multiple means. -func ForgetInstance(instanceKey *InstanceKey) error { - if instanceKey == nil { - errMsg := "ForgetInstance(): nil instanceKey" +func ForgetInstance(tabletAlias string) error { + if tabletAlias == "" { + errMsg := "ForgetInstance(): empty tabletAlias" log.Errorf(errMsg) return fmt.Errorf(errMsg) } - forgetInstanceKeys.Set(instanceKey.StringCode(), true, cache.DefaultExpiration) + forgetAliases.Set(tabletAlias, true, cache.DefaultExpiration) + log.Infof("Forgetting: %v", tabletAlias) + + // Delete from the 'vitess_tablet' table. + _, err := db.ExecVTOrc(` + delete + from vitess_tablet + where + alias = ?`, + tabletAlias, + ) + if err != nil { + log.Error(err) + return err + } + + // Also delete from the 'database_instance' table. sqlResult, err := db.ExecVTOrc(` delete from database_instance where - hostname = ? and port = ?`, - instanceKey.Hostname, - instanceKey.Port, + alias = ?`, + tabletAlias, ) if err != nil { log.Error(err) return err } + // Get the number of rows affected. If they are zero, then we tried to forget an instance that doesn't exist. rows, err := sqlResult.RowsAffected() if err != nil { log.Error(err) return err } if rows == 0 { - errMsg := fmt.Sprintf("ForgetInstance(): instance %+v not found", *instanceKey) + errMsg := fmt.Sprintf("ForgetInstance(): tablet %+v not found", tabletAlias) log.Errorf(errMsg) return fmt.Errorf(errMsg) } - _ = AuditOperation("forget", instanceKey, "") + _ = AuditOperation("forget", tabletAlias, "") return nil } @@ -1359,7 +1120,9 @@ func ForgetLongUnseenInstances() error { log.Error(err) return err } - _ = AuditOperation("forget-unseen", nil, fmt.Sprintf("Forgotten instances: %d", rows)) + if rows > 0 { + _ = AuditOperation("forget-unseen", "", fmt.Sprintf("Forgotten instances: %d", rows)) + } return err } @@ -1369,12 +1132,14 @@ func SnapshotTopologies() error { _, err := db.ExecVTOrc(` insert ignore into database_instance_topology_history (snapshot_unix_timestamp, - hostname, port, source_host, source_port, version) + alias, hostname, port, source_host, source_port, keyspace, shard, version) select UNIX_TIMESTAMP(NOW()), - hostname, port, source_host, source_port, version + vitess_tablet.alias, vitess_tablet.hostname, vitess_tablet.port, + database_instance.source_host, database_instance.source_port, + vitess_tablet.keyspace, vitess_tablet.shard, database_instance.version from - database_instance + vitess_tablet left join database_instance using (alias, hostname, port) `, ) if err != nil { @@ -1388,16 +1153,16 @@ func SnapshotTopologies() error { } // RecordStaleInstanceBinlogCoordinates snapshots the binlog coordinates of instances -func RecordStaleInstanceBinlogCoordinates(instanceKey *InstanceKey, binlogCoordinates *BinlogCoordinates) error { +func RecordStaleInstanceBinlogCoordinates(tabletAlias string, binlogCoordinates *BinlogCoordinates) error { args := sqlutils.Args( - instanceKey.Hostname, instanceKey.Port, + tabletAlias, binlogCoordinates.LogFile, binlogCoordinates.LogPos, ) _, err := db.ExecVTOrc(` delete from database_instance_stale_binlog_coordinates where - hostname=? and port=? + alias = ? and ( binary_log_file != ? or binary_log_pos != ? @@ -1412,10 +1177,10 @@ func RecordStaleInstanceBinlogCoordinates(instanceKey *InstanceKey, binlogCoordi _, err = db.ExecVTOrc(` insert ignore into database_instance_stale_binlog_coordinates ( - hostname, port, binary_log_file, binary_log_pos, first_seen + alias, binary_log_file, binary_log_pos, first_seen ) values ( - ?, ?, ?, ?, NOW() + ?, ?, ?, NOW() )`, args...) if err != nil { diff --git a/go/vt/vtorc/inst/instance_dao_test.go b/go/vt/vtorc/inst/instance_dao_test.go index 71d0ed94ff9..549389f91fe 100644 --- a/go/vt/vtorc/inst/instance_dao_test.go +++ b/go/vt/vtorc/inst/instance_dao_test.go @@ -6,19 +6,19 @@ import ( "regexp" "strings" "testing" + "time" + "github.com/patrickmn/go-cache" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/vt/external/golib/sqlutils" + "vitess.io/vitess/go/vt/log" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtorc/config" "vitess.io/vitess/go/vt/vtorc/db" ) -var ( - i710k = InstanceKey{Hostname: "i710", Port: 3306} - i720k = InstanceKey{Hostname: "i720", Port: 3306} - i730k = InstanceKey{Hostname: "i730", Port: 3306} -) - var ( spacesRegexp = regexp.MustCompile(`[ \t\n\r]+`) ) @@ -36,9 +36,9 @@ func stripSpaces(s string) string { } func mkTestInstances() []*Instance { - i710 := Instance{Key: i710k, ServerID: 710, ExecBinlogCoordinates: BinlogCoordinates{LogFile: "mysql.000007", LogPos: 10}} - i720 := Instance{Key: i720k, ServerID: 720, ExecBinlogCoordinates: BinlogCoordinates{LogFile: "mysql.000007", LogPos: 20}} - i730 := Instance{Key: i730k, ServerID: 730, ExecBinlogCoordinates: BinlogCoordinates{LogFile: "mysql.000007", LogPos: 30}} + i710 := Instance{InstanceAlias: "zone1-i710", Hostname: "i710", Port: 3306, ServerID: 710, ExecBinlogCoordinates: BinlogCoordinates{LogFile: "mysql.000007", LogPos: 10}} + i720 := Instance{InstanceAlias: "zone1-i720", Hostname: "i720", Port: 3306, ServerID: 720, ExecBinlogCoordinates: BinlogCoordinates{LogFile: "mysql.000007", LogPos: 20}} + i730 := Instance{InstanceAlias: "zone1-i730", Hostname: "i730", Port: 3306, ServerID: 730, ExecBinlogCoordinates: BinlogCoordinates{LogFile: "mysql.000007", LogPos: 30}} instances := []*Instance{&i710, &i720, &i730} for _, instance := range instances { instance.Version = "5.6.7" @@ -59,21 +59,21 @@ func TestMkInsertOdkuSingle(t *testing.T) { // one instance s1 := `INSERT ignore INTO database_instance - (hostname, port, last_checked, last_attempted_check, last_check_partial_success, server_id, server_uuid, - version, major_version, version_comment, binlog_server, read_only, binlog_format, - binlog_row_image, log_bin, log_replica_updates, binary_log_file, binary_log_pos, source_host, source_port, - replica_sql_running, replica_io_running, replication_sql_thread_state, replication_io_thread_state, has_replication_filters, supports_oracle_gtid, oracle_gtid, source_uuid, ancestry_uuid, executed_gtid_set, gtid_mode, gtid_purged, gtid_errant, mariadb_gtid, pseudo_gtid, - source_log_file, read_source_log_pos, relay_source_log_file, exec_source_log_pos, relay_log_file, relay_log_pos, last_sql_error, last_io_error, replication_lag_seconds, replica_lag_seconds, sql_delay, data_center, region, physical_environment, replication_depth, is_co_primary, has_replication_credentials, allow_tls, semi_sync_enforced, semi_sync_primary_enabled, semi_sync_primary_timeout, semi_sync_primary_wait_for_replica_count, semi_sync_replica_enabled, semi_sync_primary_status, semi_sync_primary_clients, semi_sync_replica_status, instance_alias, last_discovery_latency, replication_group_name, replication_group_is_single_primary_mode, replication_group_member_state, replication_group_member_role, replication_group_members, replication_group_primary_host, replication_group_primary_port, last_seen) - VALUES - (?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()) - ON DUPLICATE KEY UPDATE - hostname=VALUES(hostname), port=VALUES(port), last_checked=VALUES(last_checked), last_attempted_check=VALUES(last_attempted_check), last_check_partial_success=VALUES(last_check_partial_success), server_id=VALUES(server_id), server_uuid=VALUES(server_uuid), version=VALUES(version), major_version=VALUES(major_version), version_comment=VALUES(version_comment), binlog_server=VALUES(binlog_server), read_only=VALUES(read_only), binlog_format=VALUES(binlog_format), binlog_row_image=VALUES(binlog_row_image), log_bin=VALUES(log_bin), log_replica_updates=VALUES(log_replica_updates), binary_log_file=VALUES(binary_log_file), binary_log_pos=VALUES(binary_log_pos), source_host=VALUES(source_host), source_port=VALUES(source_port), replica_sql_running=VALUES(replica_sql_running), replica_io_running=VALUES(replica_io_running), replication_sql_thread_state=VALUES(replication_sql_thread_state), replication_io_thread_state=VALUES(replication_io_thread_state), has_replication_filters=VALUES(has_replication_filters), supports_oracle_gtid=VALUES(supports_oracle_gtid), oracle_gtid=VALUES(oracle_gtid), source_uuid=VALUES(source_uuid), ancestry_uuid=VALUES(ancestry_uuid), executed_gtid_set=VALUES(executed_gtid_set), gtid_mode=VALUES(gtid_mode), gtid_purged=VALUES(gtid_purged), gtid_errant=VALUES(gtid_errant), mariadb_gtid=VALUES(mariadb_gtid), pseudo_gtid=VALUES(pseudo_gtid), source_log_file=VALUES(source_log_file), read_source_log_pos=VALUES(read_source_log_pos), relay_source_log_file=VALUES(relay_source_log_file), exec_source_log_pos=VALUES(exec_source_log_pos), relay_log_file=VALUES(relay_log_file), relay_log_pos=VALUES(relay_log_pos), last_sql_error=VALUES(last_sql_error), last_io_error=VALUES(last_io_error), replication_lag_seconds=VALUES(replication_lag_seconds), replica_lag_seconds=VALUES(replica_lag_seconds), sql_delay=VALUES(sql_delay), data_center=VALUES(data_center), region=VALUES(region), physical_environment=VALUES(physical_environment), replication_depth=VALUES(replication_depth), is_co_primary=VALUES(is_co_primary), has_replication_credentials=VALUES(has_replication_credentials), allow_tls=VALUES(allow_tls), - semi_sync_enforced=VALUES(semi_sync_enforced), semi_sync_primary_enabled=VALUES(semi_sync_primary_enabled), semi_sync_primary_timeout=VALUES(semi_sync_primary_timeout), semi_sync_primary_wait_for_replica_count=VALUES(semi_sync_primary_wait_for_replica_count), semi_sync_replica_enabled=VALUES(semi_sync_replica_enabled), semi_sync_primary_status=VALUES(semi_sync_primary_status), semi_sync_primary_clients=VALUES(semi_sync_primary_clients), semi_sync_replica_status=VALUES(semi_sync_replica_status), - instance_alias=VALUES(instance_alias), last_discovery_latency=VALUES(last_discovery_latency), replication_group_name=VALUES(replication_group_name), replication_group_is_single_primary_mode=VALUES(replication_group_is_single_primary_mode), replication_group_member_state=VALUES(replication_group_member_state), replication_group_member_role=VALUES(replication_group_member_role), replication_group_members=VALUES(replication_group_members), replication_group_primary_host=VALUES(replication_group_primary_host), replication_group_primary_port=VALUES(replication_group_primary_port), last_seen=VALUES(last_seen) - ` - a1 := `i710, 3306, 710, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, + (alias, hostname, port, last_checked, last_attempted_check, last_check_partial_success, server_id, server_uuid, + version, major_version, version_comment, binlog_server, read_only, binlog_format, + binlog_row_image, log_bin, log_replica_updates, binary_log_file, binary_log_pos, source_host, source_port, + replica_sql_running, replica_io_running, replication_sql_thread_state, replication_io_thread_state, has_replication_filters, supports_oracle_gtid, oracle_gtid, source_uuid, ancestry_uuid, executed_gtid_set, gtid_mode, gtid_purged, gtid_errant, mariadb_gtid, pseudo_gtid, + source_log_file, read_source_log_pos, relay_source_log_file, exec_source_log_pos, relay_log_file, relay_log_pos, last_sql_error, last_io_error, replication_lag_seconds, replica_lag_seconds, sql_delay, data_center, region, physical_environment, replication_depth, is_co_primary, has_replication_credentials, allow_tls, semi_sync_enforced, semi_sync_primary_enabled, semi_sync_primary_timeout, semi_sync_primary_wait_for_replica_count, semi_sync_replica_enabled, semi_sync_primary_status, semi_sync_primary_clients, semi_sync_replica_status, last_discovery_latency, last_seen) + VALUES + (?, ?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()) + ON DUPLICATE KEY UPDATE + alias=VALUES(alias), hostname=VALUES(hostname), port=VALUES(port), last_checked=VALUES(last_checked), last_attempted_check=VALUES(last_attempted_check), last_check_partial_success=VALUES(last_check_partial_success), server_id=VALUES(server_id), server_uuid=VALUES(server_uuid), version=VALUES(version), major_version=VALUES(major_version), version_comment=VALUES(version_comment), binlog_server=VALUES(binlog_server), read_only=VALUES(read_only), binlog_format=VALUES(binlog_format), binlog_row_image=VALUES(binlog_row_image), log_bin=VALUES(log_bin), log_replica_updates=VALUES(log_replica_updates), binary_log_file=VALUES(binary_log_file), binary_log_pos=VALUES(binary_log_pos), source_host=VALUES(source_host), source_port=VALUES(source_port), replica_sql_running=VALUES(replica_sql_running), replica_io_running=VALUES(replica_io_running), replication_sql_thread_state=VALUES(replication_sql_thread_state), replication_io_thread_state=VALUES(replication_io_thread_state), has_replication_filters=VALUES(has_replication_filters), supports_oracle_gtid=VALUES(supports_oracle_gtid), oracle_gtid=VALUES(oracle_gtid), source_uuid=VALUES(source_uuid), ancestry_uuid=VALUES(ancestry_uuid), executed_gtid_set=VALUES(executed_gtid_set), gtid_mode=VALUES(gtid_mode), gtid_purged=VALUES(gtid_purged), gtid_errant=VALUES(gtid_errant), mariadb_gtid=VALUES(mariadb_gtid), pseudo_gtid=VALUES(pseudo_gtid), source_log_file=VALUES(source_log_file), read_source_log_pos=VALUES(read_source_log_pos), relay_source_log_file=VALUES(relay_source_log_file), exec_source_log_pos=VALUES(exec_source_log_pos), relay_log_file=VALUES(relay_log_file), relay_log_pos=VALUES(relay_log_pos), last_sql_error=VALUES(last_sql_error), last_io_error=VALUES(last_io_error), replication_lag_seconds=VALUES(replication_lag_seconds), replica_lag_seconds=VALUES(replica_lag_seconds), sql_delay=VALUES(sql_delay), data_center=VALUES(data_center), region=VALUES(region), physical_environment=VALUES(physical_environment), replication_depth=VALUES(replication_depth), is_co_primary=VALUES(is_co_primary), has_replication_credentials=VALUES(has_replication_credentials), allow_tls=VALUES(allow_tls), + semi_sync_enforced=VALUES(semi_sync_enforced), semi_sync_primary_enabled=VALUES(semi_sync_primary_enabled), semi_sync_primary_timeout=VALUES(semi_sync_primary_timeout), semi_sync_primary_wait_for_replica_count=VALUES(semi_sync_primary_wait_for_replica_count), semi_sync_replica_enabled=VALUES(semi_sync_replica_enabled), semi_sync_primary_status=VALUES(semi_sync_primary_status), semi_sync_primary_clients=VALUES(semi_sync_primary_clients), semi_sync_replica_status=VALUES(semi_sync_replica_status), + last_discovery_latency=VALUES(last_discovery_latency), last_seen=VALUES(last_seen) + ` + a1 := `zone1-i710, i710, 3306, 710, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, - false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 10, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, , 0, , false, , , [], , 0, ` + false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 10, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, 0,` sql1, args1, err := mkInsertOdkuForInstances(instances[:1], false, true) require.NoError(t, err) @@ -86,22 +86,25 @@ func TestMkInsertOdkuThree(t *testing.T) { // three instances s3 := `INSERT INTO database_instance - (hostname, port, last_checked, last_attempted_check, last_check_partial_success, server_id, server_uuid, version, major_version, version_comment, binlog_server, read_only, binlog_format, binlog_row_image, log_bin, log_replica_updates, binary_log_file, binary_log_pos, source_host, source_port, replica_sql_running, replica_io_running, replication_sql_thread_state, replication_io_thread_state, has_replication_filters, supports_oracle_gtid, oracle_gtid, source_uuid, ancestry_uuid, executed_gtid_set, gtid_mode, gtid_purged, gtid_errant, mariadb_gtid, pseudo_gtid, source_log_file, read_source_log_pos, relay_source_log_file, exec_source_log_pos, relay_log_file, relay_log_pos, last_sql_error, last_io_error, replication_lag_seconds, replica_lag_seconds, sql_delay, data_center, region, physical_environment, replication_depth, is_co_primary, has_replication_credentials, allow_tls, semi_sync_enforced, semi_sync_primary_enabled, semi_sync_primary_timeout, semi_sync_primary_wait_for_replica_count, - semi_sync_replica_enabled, semi_sync_primary_status, semi_sync_primary_clients, semi_sync_replica_status, instance_alias, last_discovery_latency, replication_group_name, replication_group_is_single_primary_mode, replication_group_member_state, replication_group_member_role, replication_group_members, replication_group_primary_host, replication_group_primary_port, last_seen) - VALUES - (?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()), - (?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()), - (?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()) - ON DUPLICATE KEY UPDATE - hostname=VALUES(hostname), port=VALUES(port), last_checked=VALUES(last_checked), last_attempted_check=VALUES(last_attempted_check), last_check_partial_success=VALUES(last_check_partial_success), server_id=VALUES(server_id), server_uuid=VALUES(server_uuid), version=VALUES(version), major_version=VALUES(major_version), version_comment=VALUES(version_comment), binlog_server=VALUES(binlog_server), read_only=VALUES(read_only), binlog_format=VALUES(binlog_format), binlog_row_image=VALUES(binlog_row_image), log_bin=VALUES(log_bin), log_replica_updates=VALUES(log_replica_updates), binary_log_file=VALUES(binary_log_file), binary_log_pos=VALUES(binary_log_pos), source_host=VALUES(source_host), source_port=VALUES(source_port), replica_sql_running=VALUES(replica_sql_running), replica_io_running=VALUES(replica_io_running), replication_sql_thread_state=VALUES(replication_sql_thread_state), replication_io_thread_state=VALUES(replication_io_thread_state), has_replication_filters=VALUES(has_replication_filters), supports_oracle_gtid=VALUES(supports_oracle_gtid), oracle_gtid=VALUES(oracle_gtid), source_uuid=VALUES(source_uuid), ancestry_uuid=VALUES(ancestry_uuid), executed_gtid_set=VALUES(executed_gtid_set), gtid_mode=VALUES(gtid_mode), gtid_purged=VALUES(gtid_purged), gtid_errant=VALUES(gtid_errant), mariadb_gtid=VALUES(mariadb_gtid), pseudo_gtid=VALUES(pseudo_gtid), source_log_file=VALUES(source_log_file), read_source_log_pos=VALUES(read_source_log_pos), relay_source_log_file=VALUES(relay_source_log_file), exec_source_log_pos=VALUES(exec_source_log_pos), relay_log_file=VALUES(relay_log_file), relay_log_pos=VALUES(relay_log_pos), last_sql_error=VALUES(last_sql_error), last_io_error=VALUES(last_io_error), replication_lag_seconds=VALUES(replication_lag_seconds), replica_lag_seconds=VALUES(replica_lag_seconds), sql_delay=VALUES(sql_delay), data_center=VALUES(data_center), region=VALUES(region), - physical_environment=VALUES(physical_environment), replication_depth=VALUES(replication_depth), is_co_primary=VALUES(is_co_primary), has_replication_credentials=VALUES(has_replication_credentials), allow_tls=VALUES(allow_tls), semi_sync_enforced=VALUES(semi_sync_enforced), - semi_sync_primary_enabled=VALUES(semi_sync_primary_enabled), semi_sync_primary_timeout=VALUES(semi_sync_primary_timeout), semi_sync_primary_wait_for_replica_count=VALUES(semi_sync_primary_wait_for_replica_count), semi_sync_replica_enabled=VALUES(semi_sync_replica_enabled), semi_sync_primary_status=VALUES(semi_sync_primary_status), semi_sync_primary_clients=VALUES(semi_sync_primary_clients), semi_sync_replica_status=VALUES(semi_sync_replica_status), - instance_alias=VALUES(instance_alias), last_discovery_latency=VALUES(last_discovery_latency), replication_group_name=VALUES(replication_group_name), replication_group_is_single_primary_mode=VALUES(replication_group_is_single_primary_mode), replication_group_member_state=VALUES(replication_group_member_state), replication_group_member_role=VALUES(replication_group_member_role), replication_group_members=VALUES(replication_group_members), replication_group_primary_host=VALUES(replication_group_primary_host), replication_group_primary_port=VALUES(replication_group_primary_port), last_seen=VALUES(last_seen) - ` + (alias, hostname, port, last_checked, last_attempted_check, last_check_partial_success, server_id, server_uuid, + version, major_version, version_comment, binlog_server, read_only, binlog_format, + binlog_row_image, log_bin, log_replica_updates, binary_log_file, binary_log_pos, source_host, source_port, + replica_sql_running, replica_io_running, replication_sql_thread_state, replication_io_thread_state, has_replication_filters, supports_oracle_gtid, oracle_gtid, source_uuid, ancestry_uuid, executed_gtid_set, gtid_mode, gtid_purged, gtid_errant, mariadb_gtid, pseudo_gtid, + source_log_file, read_source_log_pos, relay_source_log_file, exec_source_log_pos, relay_log_file, relay_log_pos, last_sql_error, last_io_error, replication_lag_seconds, replica_lag_seconds, sql_delay, data_center, region, physical_environment, replication_depth, is_co_primary, has_replication_credentials, allow_tls, semi_sync_enforced, semi_sync_primary_enabled, semi_sync_primary_timeout, semi_sync_primary_wait_for_replica_count, semi_sync_replica_enabled, semi_sync_primary_status, semi_sync_primary_clients, semi_sync_replica_status, last_discovery_latency, last_seen) + VALUES + (?, ?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()), + (?, ?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()), + (?, ?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()) + ON DUPLICATE KEY UPDATE + alias=VALUES(alias), hostname=VALUES(hostname), port=VALUES(port), last_checked=VALUES(last_checked), last_attempted_check=VALUES(last_attempted_check), last_check_partial_success=VALUES(last_check_partial_success), server_id=VALUES(server_id), server_uuid=VALUES(server_uuid), version=VALUES(version), major_version=VALUES(major_version), version_comment=VALUES(version_comment), binlog_server=VALUES(binlog_server), read_only=VALUES(read_only), binlog_format=VALUES(binlog_format), binlog_row_image=VALUES(binlog_row_image), log_bin=VALUES(log_bin), log_replica_updates=VALUES(log_replica_updates), binary_log_file=VALUES(binary_log_file), binary_log_pos=VALUES(binary_log_pos), source_host=VALUES(source_host), source_port=VALUES(source_port), replica_sql_running=VALUES(replica_sql_running), replica_io_running=VALUES(replica_io_running), replication_sql_thread_state=VALUES(replication_sql_thread_state), replication_io_thread_state=VALUES(replication_io_thread_state), has_replication_filters=VALUES(has_replication_filters), supports_oracle_gtid=VALUES(supports_oracle_gtid), oracle_gtid=VALUES(oracle_gtid), source_uuid=VALUES(source_uuid), ancestry_uuid=VALUES(ancestry_uuid), executed_gtid_set=VALUES(executed_gtid_set), gtid_mode=VALUES(gtid_mode), gtid_purged=VALUES(gtid_purged), gtid_errant=VALUES(gtid_errant), mariadb_gtid=VALUES(mariadb_gtid), pseudo_gtid=VALUES(pseudo_gtid), source_log_file=VALUES(source_log_file), read_source_log_pos=VALUES(read_source_log_pos), relay_source_log_file=VALUES(relay_source_log_file), exec_source_log_pos=VALUES(exec_source_log_pos), relay_log_file=VALUES(relay_log_file), relay_log_pos=VALUES(relay_log_pos), last_sql_error=VALUES(last_sql_error), last_io_error=VALUES(last_io_error), replication_lag_seconds=VALUES(replication_lag_seconds), replica_lag_seconds=VALUES(replica_lag_seconds), sql_delay=VALUES(sql_delay), data_center=VALUES(data_center), region=VALUES(region), + physical_environment=VALUES(physical_environment), replication_depth=VALUES(replication_depth), is_co_primary=VALUES(is_co_primary), has_replication_credentials=VALUES(has_replication_credentials), allow_tls=VALUES(allow_tls), semi_sync_enforced=VALUES(semi_sync_enforced), + semi_sync_primary_enabled=VALUES(semi_sync_primary_enabled), semi_sync_primary_timeout=VALUES(semi_sync_primary_timeout), semi_sync_primary_wait_for_replica_count=VALUES(semi_sync_primary_wait_for_replica_count), semi_sync_replica_enabled=VALUES(semi_sync_replica_enabled), semi_sync_primary_status=VALUES(semi_sync_primary_status), semi_sync_primary_clients=VALUES(semi_sync_primary_clients), semi_sync_replica_status=VALUES(semi_sync_replica_status), + last_discovery_latency=VALUES(last_discovery_latency), last_seen=VALUES(last_seen) + ` a3 := ` - i710, 3306, 710, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 10, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, , 0, , false, , , [], , 0, - i720, 3306, 720, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 20, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, , 0, , false, , , [], , 0, - i730, 3306, 730, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 30, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, , 0, , false, , , [], , 0, + zone1-i710, i710, 3306, 710, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 10, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, 0, + zone1-i720, i720, 3306, 720, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 20, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, 0, + zone1-i730, i730, 3306, 730, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 30, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, 0, ` sql3, args3, err := mkInsertOdkuForInstances(instances[:3], true, true) @@ -147,11 +150,599 @@ func TestGetKeyspaceShardName(t *testing.T) { err = SaveTablet(tab100) require.NoError(t, err) - keyspaceRead, shardRead, err := GetKeyspaceShardName(&InstanceKey{ - Hostname: hostname, - Port: int(port), - }) + keyspaceRead, shardRead, err := GetKeyspaceShardName(topoproto.TabletAliasString(tab100.Alias)) require.NoError(t, err) require.Equal(t, ks, keyspaceRead) require.Equal(t, shard, shardRead) } + +// TestReadInstance is used to test the functionality of ReadInstance and verify its failure modes and successes. +func TestReadInstance(t *testing.T) { + tests := []struct { + name string + tabletAliasToRead string + instanceFound bool + }{ + { + name: "Read success", + tabletAliasToRead: "zone1-0000000100", + instanceFound: true, + }, { + name: "Unknown tablet", + tabletAliasToRead: "unknown-tablet", + instanceFound: false, + }, + } + + // Clear the database after the test. The easiest way to do that is to run all the initialization commands again. + defer func() { + db.ClearVTOrcDatabase() + }() + for _, query := range initialSQL { + _, err := db.ExecVTOrc(query) + require.NoError(t, err) + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + got, found, err := ReadInstance(tt.tabletAliasToRead) + require.NoError(t, err) + require.Equal(t, tt.instanceFound, found) + if tt.instanceFound { + require.EqualValues(t, tt.tabletAliasToRead, got.InstanceAlias) + } + }) + } +} + +// TestReadReplicaInstances is used to test the functionality of ReadReplicaInstances and verify its failure modes and successes. +func TestReadReplicaInstances(t *testing.T) { + tests := []struct { + name string + tabletPort int + replicasLen int + }{ + { + name: "Read success - Multiple replicas", + // This tabletPort corresponds to zone1-0000000101. That is the primary for the data inserted. + // Check initialSQL for more details. + tabletPort: 6714, + replicasLen: 3, + }, { + name: "Unknown tablet", + // This tabletPort corresponds to none of the tablets. + // Check initialSQL for more details. + tabletPort: 343, + replicasLen: 0, + }, { + name: "Read success - No replicas", + // This tabletPort corresponds to zone1-0000000100. That is a replica tablet, with no replicas of its own. + // Check initialSQL for more details. + tabletPort: 6711, + replicasLen: 0, + }, + } + + // Clear the database after the test. The easiest way to do that is to run all the initialization commands again. + defer func() { + db.ClearVTOrcDatabase() + }() + for _, query := range initialSQL { + _, err := db.ExecVTOrc(query) + require.NoError(t, err) + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + instances, err := ReadReplicaInstances("localhost", tt.tabletPort) + require.NoError(t, err) + require.EqualValues(t, tt.replicasLen, len(instances)) + }) + } +} + +// TestReadProblemInstances is used to test the functionality of ReadProblemInstances and verify its failure modes and successes. +func TestReadProblemInstances(t *testing.T) { + // The test is intended to be used as follows. The initial data is stored into the database. Following this, some specific queries are run that each individual test specifies to get the desired state. + tests := []struct { + name string + sql []string + instancesRequired []string + }{ + { + name: "No problems", + sql: nil, + instancesRequired: nil, + }, { + name: "Replication stopped on a replica", + sql: []string{ + "update database_instance set replication_sql_thread_state = 0 where alias = 'zone1-0000000112'", + }, + instancesRequired: []string{"zone1-0000000112"}, + }, { + name: "IO thread stopped on a replica", + sql: []string{ + "update database_instance set replication_io_thread_state = 0 where alias = 'zone1-0000000112'", + }, + instancesRequired: []string{"zone1-0000000112"}, + }, { + name: "High replication lag", + sql: []string{ + "update database_instance set replication_lag_seconds = 1000 where alias = 'zone1-0000000112'", + }, + instancesRequired: []string{"zone1-0000000112"}, + }, { + name: "High replication lag - replica_lag", + sql: []string{ + "update database_instance set replica_lag_seconds = 1000 where alias = 'zone1-0000000112'", + }, + instancesRequired: []string{"zone1-0000000112"}, + }, { + name: "errant GTID", + sql: []string{ + "update database_instance set gtid_errant = '729a4cc4-8680-11ed-a104-47706090afbd:1' where alias = 'zone1-0000000112'", + }, + instancesRequired: []string{"zone1-0000000112"}, + }, { + name: "Many failures", + sql: []string{ + "update database_instance set gtid_errant = '729a4cc4-8680-11ed-a104-47706090afbd:1' where alias = 'zone1-0000000112'", + "update database_instance set replication_sql_thread_state = 0 where alias = 'zone1-0000000100'", + }, + instancesRequired: []string{"zone1-0000000112", "zone1-0000000100"}, + }, + } + + // We need to set InstancePollSeconds to a large value otherwise all the instances are reported as having problems since their last_checked is very old. + // Setting this value to a hundred years, we ensure that this test doesn't fail with this issue for the next hundred years. + oldVal := config.Config.InstancePollSeconds + defer func() { + config.Config.InstancePollSeconds = oldVal + }() + config.Config.InstancePollSeconds = 60 * 60 * 24 * 365 * 100 + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Each test should clear the database. The easiest way to do that is to run all the initialization commands again + defer func() { + db.ClearVTOrcDatabase() + }() + + for _, query := range append(initialSQL, tt.sql...) { + _, err := db.ExecVTOrc(query) + require.NoError(t, err) + } + + instances, err := ReadProblemInstances("ks", "0") + require.NoError(t, err) + var tabletAliases []string + for _, instance := range instances { + tabletAliases = append(tabletAliases, instance.InstanceAlias) + } + require.ElementsMatch(t, tabletAliases, tt.instancesRequired) + }) + } +} + +// TestReadInstancesWithErrantGTIds is used to test the functionality of ReadInstancesWithErrantGTIds and verify its failure modes and successes. +func TestReadInstancesWithErrantGTIds(t *testing.T) { + // The test is intended to be used as follows. The initial data is stored into the database. Following this, some specific queries are run that each individual test specifies to get the desired state. + tests := []struct { + name string + keyspace string + shard string + sql []string + instancesRequired []string + }{ + { + name: "No instances with errant GTID", + sql: nil, + instancesRequired: nil, + }, { + name: "errant GTID", + sql: []string{ + "update database_instance set gtid_errant = '729a4cc4-8680-11ed-a104-47706090afbd:1' where alias = 'zone1-0000000112'", + }, + instancesRequired: []string{"zone1-0000000112"}, + }, { + name: "keyspace filtering - success", + keyspace: "ks", + sql: []string{ + "update database_instance set gtid_errant = '729a4cc4-8680-11ed-a104-47706090afbd:1' where alias = 'zone1-0000000112'", + }, + instancesRequired: []string{"zone1-0000000112"}, + }, { + name: "keyspace filtering - failure", + keyspace: "unknown", + sql: []string{ + "update database_instance set gtid_errant = '729a4cc4-8680-11ed-a104-47706090afbd:1' where alias = 'zone1-0000000112'", + }, + instancesRequired: nil, + }, { + name: "shard filtering - success", + keyspace: "ks", + shard: "0", + sql: []string{ + "update database_instance set gtid_errant = '729a4cc4-8680-11ed-a104-47706090afbd:1' where alias = 'zone1-0000000112'", + }, + instancesRequired: []string{"zone1-0000000112"}, + }, { + name: "shard filtering - failure", + keyspace: "ks", + shard: "unknown", + sql: []string{ + "update database_instance set gtid_errant = '729a4cc4-8680-11ed-a104-47706090afbd:1' where alias = 'zone1-0000000112'", + }, + instancesRequired: nil, + }, + } + + // We need to set InstancePollSeconds to a large value otherwise all the instances are reported as having problems since their last_checked is very old. + // Setting this value to a hundred years, we ensure that this test doesn't fail with this issue for the next hundred years. + oldVal := config.Config.InstancePollSeconds + defer func() { + config.Config.InstancePollSeconds = oldVal + }() + config.Config.InstancePollSeconds = 60 * 60 * 24 * 365 * 100 + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Each test should clear the database. The easiest way to do that is to run all the initialization commands again + defer func() { + db.ClearVTOrcDatabase() + }() + + for _, query := range append(initialSQL, tt.sql...) { + _, err := db.ExecVTOrc(query) + require.NoError(t, err) + } + + instances, err := ReadInstancesWithErrantGTIds(tt.keyspace, tt.shard) + require.NoError(t, err) + var tabletAliases []string + for _, instance := range instances { + tabletAliases = append(tabletAliases, instance.InstanceAlias) + } + require.ElementsMatch(t, tabletAliases, tt.instancesRequired) + }) + } +} + +// TestReadInstancesByCondition is used to test the functionality of readInstancesByCondition and verify its failure modes and successes. +func TestReadInstancesByCondition(t *testing.T) { + tests := []struct { + name string + condition string + args []any + sort string + instancesRequired []string + }{ + { + name: "All instances with no sort", + condition: "1=1", + instancesRequired: []string{"zone1-0000000100", "zone1-0000000101", "zone1-0000000112", "zone2-0000000200"}, + }, { + name: "All instances sort by data_center descending and then alias ascending", + condition: "1=1", + sort: "data_center desc, alias asc", + instancesRequired: []string{"zone2-0000000200", "zone1-0000000100", "zone1-0000000101", "zone1-0000000112"}, + }, { + name: "Filtering by replication_depth", + condition: "replication_depth=1", + instancesRequired: []string{"zone1-0000000100", "zone1-0000000112", "zone2-0000000200"}, + }, { + name: "Filtering by exact alias", + condition: "alias='zone1-0000000100'", + instancesRequired: []string{"zone1-0000000100"}, + }, { + name: "No qualifying tablets", + condition: "replication_depth=15", + }, + } + + // Clear the database after the test. The easiest way to do that is to run all the initialization commands again. + defer func() { + db.ClearVTOrcDatabase() + }() + for _, query := range initialSQL { + _, err := db.ExecVTOrc(query) + require.NoError(t, err) + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + instances, err := readInstancesByCondition(tt.condition, tt.args, tt.sort) + require.NoError(t, err) + var tabletAliases []string + for _, instance := range instances { + tabletAliases = append(tabletAliases, instance.InstanceAlias) + } + require.EqualValues(t, tt.instancesRequired, tabletAliases) + }) + } +} + +// TestReadOutdatedInstanceKeys is used to test the functionality of ReadOutdatedInstanceKeys and verify its failure modes and successes. +func TestReadOutdatedInstanceKeys(t *testing.T) { + // The test is intended to be used as follows. The initial data is stored into the database. Following this, some specific queries are run that each individual test specifies to get the desired state. + tests := []struct { + name string + sql []string + instancesRequired []string + }{ + { + name: "No problems", + sql: []string{"update database_instance set last_checked = now()"}, + instancesRequired: nil, + }, { + name: "One instance is outdated", + sql: []string{ + "update database_instance set last_checked = now()", + "update database_instance set last_checked = datetime(now(), '-1 hour') where alias = 'zone1-0000000100'", + }, + instancesRequired: []string{"zone1-0000000100"}, + }, { + name: "One instance doesn't have myql data", + sql: []string{ + "update database_instance set last_checked = now()", + `INSERT INTO vitess_tablet VALUES('zone1-0000000103','localhost',7706,'ks','0','zone1',2,'0001-01-01 00:00:00+00:00','');`, + }, + instancesRequired: []string{"zone1-0000000103"}, + }, { + name: "One instance doesn't have myql data and one is outdated", + sql: []string{ + "update database_instance set last_checked = now()", + "update database_instance set last_checked = datetime(now(), '-1 hour') where alias = 'zone1-0000000100'", + `INSERT INTO vitess_tablet VALUES('zone1-0000000103','localhost',7706,'ks','0','zone1',2,'0001-01-01 00:00:00+00:00','');`, + }, + instancesRequired: []string{"zone1-0000000103", "zone1-0000000100"}, + }, + } + + // wait for the forgetAliases cache to be initialized to prevent data race. + waitForCacheInitialization() + + // We are setting InstancePollSeconds to 59 minutes, just for the test. + oldVal := config.Config.InstancePollSeconds + oldCache := forgetAliases + defer func() { + forgetAliases = oldCache + config.Config.InstancePollSeconds = oldVal + }() + config.Config.InstancePollSeconds = 60 * 25 + forgetAliases = cache.New(time.Minute, time.Minute) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Each test should clear the database. The easiest way to do that is to run all the initialization commands again + defer func() { + db.ClearVTOrcDatabase() + }() + + for _, query := range append(initialSQL, tt.sql...) { + _, err := db.ExecVTOrc(query) + require.NoError(t, err) + } + + tabletAliases, err := ReadOutdatedInstanceKeys() + + errInDataCollection := db.QueryVTOrcRowsMap(`select alias, +last_checked, +last_attempted_check, +ROUND((JULIANDAY(now()) - JULIANDAY(last_checked)) * 86400) AS difference, +last_attempted_check <= last_checked as use1, +last_checked < now() - interval 1500 second as is_outdated1, +last_checked < now() - interval 3000 second as is_outdated2 +from database_instance`, func(rowMap sqlutils.RowMap) error { + log.Errorf("Row in database_instance - %+v", rowMap) + return nil + }) + require.NoError(t, errInDataCollection) + require.NoError(t, err) + require.ElementsMatch(t, tabletAliases, tt.instancesRequired) + }) + } +} + +// TestUpdateInstanceLastChecked is used to test the functionality of UpdateInstanceLastChecked and verify its failure modes and successes. +func TestUpdateInstanceLastChecked(t *testing.T) { + tests := []struct { + name string + tabletAlias string + partialSuccess bool + conditionToCheck string + }{ + { + name: "Verify updated last checked", + tabletAlias: "zone1-0000000100", + partialSuccess: false, + conditionToCheck: "last_checked >= now() - interval 30 second and last_check_partial_success = false", + }, { + name: "Verify partial success", + tabletAlias: "zone1-0000000100", + partialSuccess: true, + conditionToCheck: "last_checked >= now() - interval 30 second and last_check_partial_success = true", + }, { + name: "Verify no error on unknown tablet", + tabletAlias: "unknown tablet", + partialSuccess: true, + }, + } + + // Clear the database after the test. The easiest way to do that is to run all the initialization commands again. + defer func() { + db.ClearVTOrcDatabase() + }() + for _, query := range initialSQL { + _, err := db.ExecVTOrc(query) + require.NoError(t, err) + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := UpdateInstanceLastChecked(tt.tabletAlias, tt.partialSuccess) + require.NoError(t, err) + + if tt.conditionToCheck != "" { + // Verify the instance we just updated satisfies the condition specified. + instances, err := readInstancesByCondition(tt.conditionToCheck, nil, "") + require.NoError(t, err) + var tabletAliases []string + for _, instance := range instances { + tabletAliases = append(tabletAliases, instance.InstanceAlias) + } + require.Contains(t, tabletAliases, tt.tabletAlias) + } + }) + } +} + +// UpdateInstanceLastAttemptedCheck is used to test the functionality of UpdateInstanceLastAttemptedCheck and verify its failure modes and successes. +func TestUpdateInstanceLastAttemptedCheck(t *testing.T) { + tests := []struct { + name string + tabletAlias string + conditionToCheck string + }{ + { + name: "Verify updated last checked", + tabletAlias: "zone1-0000000100", + conditionToCheck: "last_attempted_check >= now() - interval 30 second", + }, { + name: "Verify no error on unknown tablet", + tabletAlias: "unknown tablet", + }, + } + + // Clear the database after the test. The easiest way to do that is to run all the initialization commands again. + defer func() { + db.ClearVTOrcDatabase() + }() + for _, query := range initialSQL { + _, err := db.ExecVTOrc(query) + require.NoError(t, err) + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := UpdateInstanceLastAttemptedCheck(tt.tabletAlias) + require.NoError(t, err) + + if tt.conditionToCheck != "" { + // Verify the instance we just updated satisfies the condition specified. + instances, err := readInstancesByCondition(tt.conditionToCheck, nil, "") + require.NoError(t, err) + var tabletAliases []string + for _, instance := range instances { + tabletAliases = append(tabletAliases, instance.InstanceAlias) + } + require.Contains(t, tabletAliases, tt.tabletAlias) + } + }) + } +} + +// TestForgetInstanceAndInstanceIsForgotten tests the functionality of ForgetInstance and InstanceIsForgotten together. +func TestForgetInstanceAndInstanceIsForgotten(t *testing.T) { + tests := []struct { + name string + tabletAlias string + errExpected string + instanceForgotten bool + tabletsExpected []string + }{ + { + name: "Unknown tablet", + tabletAlias: "unknown-tablet", + errExpected: "ForgetInstance(): tablet unknown-tablet not found", + instanceForgotten: true, + tabletsExpected: []string{"zone1-0000000100", "zone1-0000000101", "zone1-0000000112", "zone2-0000000200"}, + }, { + name: "Empty tabletAlias", + tabletAlias: "", + errExpected: "ForgetInstance(): empty tabletAlias", + instanceForgotten: false, + tabletsExpected: []string{"zone1-0000000100", "zone1-0000000101", "zone1-0000000112", "zone2-0000000200"}, + }, { + name: "Success", + tabletAlias: "zone1-0000000112", + instanceForgotten: true, + tabletsExpected: []string{"zone1-0000000100", "zone1-0000000101", "zone2-0000000200"}, + }, + } + + // wait for the forgetAliases cache to be initialized to prevent data race. + waitForCacheInitialization() + + oldCache := forgetAliases + // Clear the database after the test. The easiest way to do that is to run all the initialization commands again. + defer func() { + forgetAliases = oldCache + db.ClearVTOrcDatabase() + }() + forgetAliases = cache.New(time.Minute, time.Minute) + + for _, query := range initialSQL { + _, err := db.ExecVTOrc(query) + require.NoError(t, err) + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := ForgetInstance(tt.tabletAlias) + if tt.errExpected != "" { + require.EqualError(t, err, tt.errExpected) + } else { + require.NoError(t, err) + } + isForgotten := InstanceIsForgotten(tt.tabletAlias) + require.Equal(t, tt.instanceForgotten, isForgotten) + + instances, err := readInstancesByCondition("1=1", nil, "") + require.NoError(t, err) + var tabletAliases []string + for _, instance := range instances { + tabletAliases = append(tabletAliases, instance.InstanceAlias) + } + require.EqualValues(t, tt.tabletsExpected, tabletAliases) + }) + } +} + +func TestSnapshotTopologies(t *testing.T) { + // Clear the database after the test. The easiest way to do that is to run all the initialization commands again. + defer func() { + db.ClearVTOrcDatabase() + }() + + for _, query := range initialSQL { + _, err := db.ExecVTOrc(query) + require.NoError(t, err) + } + + err := SnapshotTopologies() + require.NoError(t, err) + + query := "select alias from database_instance_topology_history" + var tabletAliases []string + err = db.QueryVTOrc(query, nil, func(rowMap sqlutils.RowMap) error { + tabletAliases = append(tabletAliases, rowMap.GetString("alias")) + return nil + }) + require.NoError(t, err) + + require.Equal(t, []string{"zone1-0000000100", "zone1-0000000101", "zone1-0000000112", "zone2-0000000200"}, tabletAliases) +} + +// waitForCacheInitialization waits for the cache to be initialized to prevent data race in tests +// that alter the cache or depend on its behaviour. +func waitForCacheInitialization() { + for { + if cacheInitializationCompleted.Load() { + return + } + time.Sleep(100 * time.Millisecond) + } +} diff --git a/go/vt/vtorc/inst/instance_key.go b/go/vt/vtorc/inst/instance_key.go deleted file mode 100644 index 2a3124aeb57..00000000000 --- a/go/vt/vtorc/inst/instance_key.go +++ /dev/null @@ -1,189 +0,0 @@ -/* - Copyright 2015 Shlomi Noach, courtesy Booking.com - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -import ( - "fmt" - "regexp" - "strconv" - "strings" -) - -// InstanceKey is an instance indicator, identifued by hostname and port -type InstanceKey struct { - Hostname string - Port int -} - -var ( - ipv4Regexp = regexp.MustCompile(`^([0-9]+)[.]([0-9]+)[.]([0-9]+)[.]([0-9]+)$`) - ipv4HostPortRegexp = regexp.MustCompile(`^([^:]+):([0-9]+)$`) - ipv4HostRegexp = regexp.MustCompile(`^([^:]+)$`) - ipv6HostPortRegexp = regexp.MustCompile(`^\[([:0-9a-fA-F]+)\]:([0-9]+)$`) // e.g. [2001:db8:1f70::999:de8:7648:6e8]:3308 - ipv6HostRegexp = regexp.MustCompile(`^([:0-9a-fA-F]+)$`) // e.g. 2001:db8:1f70::999:de8:7648:6e8 -) - -const detachHint = "//" - -func newInstanceKey(hostname string, port int, resolve bool) (instanceKey *InstanceKey, err error) { - if hostname == "" { - return instanceKey, fmt.Errorf("NewResolveInstanceKey: Empty hostname") - } - - instanceKey = &InstanceKey{Hostname: hostname, Port: port} - if resolve { - instanceKey, err = instanceKey.ResolveHostname() - } - return instanceKey, err -} - -// newInstanceKeyStrings -func newInstanceKeyStrings(hostname string, port string, resolve bool) (*InstanceKey, error) { - portInt, err := strconv.Atoi(port) - if err != nil { - return nil, fmt.Errorf("Invalid port: %s", port) - } - return newInstanceKey(hostname, portInt, resolve) -} - -func parseRawInstanceKey(hostPort string, resolve bool) (instanceKey *InstanceKey, err error) { - hostname := "" - port := "" - if submatch := ipv4HostPortRegexp.FindStringSubmatch(hostPort); len(submatch) > 0 { - hostname = submatch[1] - port = submatch[2] - } else if submatch := ipv4HostRegexp.FindStringSubmatch(hostPort); len(submatch) > 0 { - hostname = submatch[1] - } else if submatch := ipv6HostPortRegexp.FindStringSubmatch(hostPort); len(submatch) > 0 { - hostname = submatch[1] - port = submatch[2] - } else if submatch := ipv6HostRegexp.FindStringSubmatch(hostPort); len(submatch) > 0 { - hostname = submatch[1] - } else { - return nil, fmt.Errorf("Cannot parse address: %s", hostPort) - } - if port == "" { - port = "3306" - } - return newInstanceKeyStrings(hostname, port, resolve) -} - -func NewResolveInstanceKey(hostname string, port int) (instanceKey *InstanceKey, err error) { - return newInstanceKey(hostname, port, true) -} - -// NewResolveInstanceKeyStrings creates and resolves a new instance key based on string params -func NewResolveInstanceKeyStrings(hostname string, port string) (*InstanceKey, error) { - return newInstanceKeyStrings(hostname, port, true) -} - -func ParseResolveInstanceKey(hostPort string) (instanceKey *InstanceKey, err error) { - return parseRawInstanceKey(hostPort, true) -} - -func ParseRawInstanceKey(hostPort string) (instanceKey *InstanceKey, err error) { - return parseRawInstanceKey(hostPort, false) -} - -// NewResolveInstanceKeyStrings creates and resolves a new instance key based on string params -func NewRawInstanceKeyStrings(hostname string, port string) (*InstanceKey, error) { - return newInstanceKeyStrings(hostname, port, false) -} - -func (instanceKey *InstanceKey) ResolveHostname() (*InstanceKey, error) { - if !instanceKey.IsValid() { - return instanceKey, nil - } - - hostname, err := ResolveHostname(instanceKey.Hostname) - if err == nil { - instanceKey.Hostname = hostname - } - return instanceKey, err -} - -// Equals tests equality between this key and another key -func (instanceKey *InstanceKey) Equals(other *InstanceKey) bool { - if other == nil { - return false - } - return instanceKey.Hostname == other.Hostname && instanceKey.Port == other.Port -} - -// SmallerThan returns true if this key is dictionary-smaller than another. -// This is used for consistent sorting/ordering; there's nothing magical about it. -func (instanceKey *InstanceKey) SmallerThan(other *InstanceKey) bool { - if instanceKey.Hostname < other.Hostname { - return true - } - if instanceKey.Hostname == other.Hostname && instanceKey.Port < other.Port { - return true - } - return false -} - -// IsDetached returns 'true' when this hostname is logically "detached" -func (instanceKey *InstanceKey) IsDetached() bool { - return strings.HasPrefix(instanceKey.Hostname, detachHint) -} - -// IsValid uses simple heuristics to see whether this key represents an actual instance -func (instanceKey *InstanceKey) IsValid() bool { - if instanceKey.Hostname == "_" { - return false - } - if instanceKey.IsDetached() { - return false - } - return len(instanceKey.Hostname) > 0 && instanceKey.Port > 0 -} - -// DetachedKey returns an instance key whose hostname is detahced: invalid, but recoverable -func (instanceKey *InstanceKey) DetachedKey() *InstanceKey { - if instanceKey.IsDetached() { - return instanceKey - } - return &InstanceKey{Hostname: fmt.Sprintf("%s%s", detachHint, instanceKey.Hostname), Port: instanceKey.Port} -} - -// ReattachedKey returns an instance key whose hostname is detahced: invalid, but recoverable -func (instanceKey *InstanceKey) ReattachedKey() *InstanceKey { - if !instanceKey.IsDetached() { - return instanceKey - } - return &InstanceKey{Hostname: instanceKey.Hostname[len(detachHint):], Port: instanceKey.Port} -} - -// StringCode returns an official string representation of this key -func (instanceKey *InstanceKey) StringCode() string { - return fmt.Sprintf("%s:%d", instanceKey.Hostname, instanceKey.Port) -} - -// DisplayString returns a user-friendly string representation of this key -func (instanceKey *InstanceKey) DisplayString() string { - return instanceKey.StringCode() -} - -// String returns a user-friendly string representation of this key -func (instanceKey InstanceKey) String() string { - return instanceKey.StringCode() -} - -// IsValid uses simple heuristics to see whether this key represents an actual instance -func (instanceKey *InstanceKey) IsIPv4() bool { - return ipv4Regexp.MatchString(instanceKey.Hostname) -} diff --git a/go/vt/vtorc/inst/instance_key_map.go b/go/vt/vtorc/inst/instance_key_map.go deleted file mode 100644 index 15d21151f12..00000000000 --- a/go/vt/vtorc/inst/instance_key_map.go +++ /dev/null @@ -1,141 +0,0 @@ -/* - Copyright 2015 Shlomi Noach, courtesy Booking.com - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -import ( - "encoding/json" - "sort" - "strings" -) - -// InstanceKeyMap is a convenience struct for listing InstanceKey-s -type InstanceKeyMap map[InstanceKey]bool - -func NewInstanceKeyMap() *InstanceKeyMap { - return &InstanceKeyMap{} -} - -// AddKey adds a single key to this map -func (instanceKeyMap *InstanceKeyMap) AddKey(key InstanceKey) { - (*instanceKeyMap)[key] = true -} - -// AddKeys adds all given keys to this map -func (instanceKeyMap *InstanceKeyMap) AddKeys(keys []InstanceKey) { - for _, key := range keys { - instanceKeyMap.AddKey(key) - } -} - -// AddInstances adds keys of all given instances to this map -func (instanceKeyMap *InstanceKeyMap) AddInstances(instances [](*Instance)) { - for _, instance := range instances { - instanceKeyMap.AddKey(instance.Key) - } -} - -// HasKey checks if given key is within the map -func (instanceKeyMap *InstanceKeyMap) HasKey(key InstanceKey) bool { - _, ok := (*instanceKeyMap)[key] - return ok -} - -// GetInstanceKeys returns keys in this map in the form of an array -func (instanceKeyMap *InstanceKeyMap) GetInstanceKeys() []InstanceKey { - res := []InstanceKey{} - for key := range *instanceKeyMap { - res = append(res, key) - } - sort.Slice(res, func(i, j int) bool { - return res[i].Hostname < res[j].Hostname || res[i].Hostname == res[j].Hostname && res[i].Port < res[j].Port - }) - return res -} - -// Intersect returns a keymap which is the intersection of this and another map -func (instanceKeyMap *InstanceKeyMap) Intersect(other *InstanceKeyMap) *InstanceKeyMap { - intersected := NewInstanceKeyMap() - for key := range *other { - if instanceKeyMap.HasKey(key) { - intersected.AddKey(key) - } - } - return intersected -} - -// MarshalJSON will marshal this map as JSON -func (instanceKeyMap InstanceKeyMap) MarshalJSON() ([]byte, error) { - return json.Marshal(instanceKeyMap.GetInstanceKeys()) -} - -// UnmarshalJSON reds this object from JSON -func (instanceKeyMap *InstanceKeyMap) UnmarshalJSON(b []byte) error { - var keys []InstanceKey - if err := json.Unmarshal(b, &keys); err != nil { - return err - } - *instanceKeyMap = make(InstanceKeyMap) - for _, key := range keys { - instanceKeyMap.AddKey(key) - } - return nil -} - -// ToJSON will marshal this map as JSON -func (instanceKeyMap *InstanceKeyMap) ToJSON() (string, error) { - bytes, err := instanceKeyMap.MarshalJSON() - return string(bytes), err -} - -// ToJSONString will marshal this map as JSON -func (instanceKeyMap *InstanceKeyMap) ToJSONString() string { - s, _ := instanceKeyMap.ToJSON() - return s -} - -// ToCommaDelimitedList will export this map in comma delimited format -func (instanceKeyMap *InstanceKeyMap) ToCommaDelimitedList() string { - keyDisplays := []string{} - for key := range *instanceKeyMap { - keyDisplays = append(keyDisplays, key.DisplayString()) - } - return strings.Join(keyDisplays, ",") -} - -// ReadJSON unmarshalls a json into this map -func (instanceKeyMap *InstanceKeyMap) ReadJSON(jsonString string) error { - var keys []InstanceKey - err := json.Unmarshal([]byte(jsonString), &keys) - if err != nil { - return err - } - instanceKeyMap.AddKeys(keys) - return err -} - -// ReadJSON unmarshalls a json into this map -func (instanceKeyMap *InstanceKeyMap) ReadCommaDelimitedList(list string) error { - tokens := strings.Split(list, ",") - for _, token := range tokens { - key, err := ParseResolveInstanceKey(token) - if err != nil { - return err - } - instanceKeyMap.AddKey(*key) - } - return nil -} diff --git a/go/vt/vtorc/inst/instance_key_map_test.go b/go/vt/vtorc/inst/instance_key_map_test.go deleted file mode 100644 index a390ef99532..00000000000 --- a/go/vt/vtorc/inst/instance_key_map_test.go +++ /dev/null @@ -1,125 +0,0 @@ -/* - Copyright 2014 Outbrain Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -import ( - "math/rand" - "testing" - - "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/vt/vtorc/config" -) - -func init() { - config.MarkConfigurationLoaded() -} - -func TestGetInstanceKeys(t *testing.T) { - for range rand.Perm(10) { // Just running many iterations to cover multiple possible map iteration ordering. Perm() is just used as an array generator here. - m := *NewInstanceKeyMap() - m.AddKey(key1) - m.AddKey(key2) - keys := m.GetInstanceKeys() - require.Equal(t, keys[0], key1) - require.Equal(t, keys[1], key2) - } - for range rand.Perm(10) { // Just running many iterations to cover multiple possible map iteration ordering. Perm() is just used as an array generator here. - m := *NewInstanceKeyMap() - m.AddKey(key2) - m.AddKey(key1) - keys := m.GetInstanceKeys() - require.Equal(t, keys[0], key1) - require.Equal(t, keys[1], key2) - } -} - -func TestInstanceKeyMapToJSON(t *testing.T) { - m := *NewInstanceKeyMap() - m.AddKey(key1) - m.AddKey(key2) - json, err := m.ToJSON() - require.NoError(t, err) - ok := (json == `[{"Hostname":"host1","Port":3306},{"Hostname":"host2","Port":3306}]`) || (json == `[{"Hostname":"host2","Port":3306},{"Hostname":"host1","Port":3306}]`) - require.True(t, ok) -} - -func TestInstanceKeyMapReadJSON(t *testing.T) { - json := `[{"Hostname":"host1","Port":3306},{"Hostname":"host2","Port":3306}]` - m := *NewInstanceKeyMap() - _ = m.ReadJSON(json) - require.Equal(t, len(m), 2) - require.True(t, m[key1]) - require.True(t, m[key2]) -} - -func TestEmptyInstanceKeyMapToCommaDelimitedList(t *testing.T) { - m := *NewInstanceKeyMap() - res := m.ToCommaDelimitedList() - - require.Equal(t, res, "") -} - -func TestInstanceKeyMapToCommaDelimitedList(t *testing.T) { - m := *NewInstanceKeyMap() - m.AddKey(key1) - m.AddKey(key2) - res := m.ToCommaDelimitedList() - - ok := (res == `host1:3306,host2:3306`) || (res == `host2:3306,host1:3306`) - require.True(t, ok) -} - -func TestIntersect(t *testing.T) { - { - m := NewInstanceKeyMap() - m.AddKey(key1) - m.AddKey(key2) - - other := NewInstanceKeyMap() - other.AddKey(key3) - other.AddKey(key2) - - intersected := m.Intersect(other) - require.Equal(t, len(*intersected), 1) - } - { - m := NewInstanceKeyMap() - m.AddKey(key1) - - other := NewInstanceKeyMap() - other.AddKey(key3) - other.AddKey(key2) - - intersected := m.Intersect(other) - require.Equal(t, len(*intersected), 0) - } - { - m := NewInstanceKeyMap() - m.AddKey(key1) - m.AddKey(key2) - - other := NewInstanceKeyMap() - other.AddKey(key1) - other.AddKey(key3) - other.AddKey(key2) - - intersected := m.Intersect(other) - require.Equal(t, len(*intersected), 2) - } - -} diff --git a/go/vt/vtorc/inst/instance_key_test.go b/go/vt/vtorc/inst/instance_key_test.go deleted file mode 100644 index 1374aad570e..00000000000 --- a/go/vt/vtorc/inst/instance_key_test.go +++ /dev/null @@ -1,209 +0,0 @@ -/* - Copyright 2014 Outbrain Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/vt/vtorc/config" -) - -func init() { - config.MarkConfigurationLoaded() -} - -var key1 = InstanceKey{Hostname: "host1", Port: 3306} -var key2 = InstanceKey{Hostname: "host2", Port: 3306} -var key3 = InstanceKey{Hostname: "host3", Port: 3306} - -func TestInstanceKeyEquals(t *testing.T) { - i1 := Instance{ - Key: InstanceKey{ - Hostname: "sql00.db", - Port: 3306, - }, - Version: "5.6", - } - i2 := Instance{ - Key: InstanceKey{ - Hostname: "sql00.db", - Port: 3306, - }, - Version: "5.5", - } - - require.Equal(t, i1.Key, i2.Key) - - i2.Key.Port = 3307 - require.NotEqual(t, i1.Key, i2.Key) -} - -func TestNewResolveInstanceKey(t *testing.T) { - { - i, err := NewResolveInstanceKey("127.0.0.1", 3308) - require.NoError(t, err) - require.Equal(t, i.Hostname, "127.0.0.1") - require.Equal(t, i.Port, 3308) - } - { - _, err := NewResolveInstanceKey("", 3309) - require.Error(t, err) - } - { - i, err := NewResolveInstanceKey("127.0.0.1", 0) - require.NoError(t, err) - require.False(t, i.IsValid()) - } -} - -func TestParseResolveInstanceKey(t *testing.T) { - { - key, err := ParseResolveInstanceKey("myhost:1234") - require.NoError(t, err) - require.Equal(t, key.Hostname, "myhost") - require.Equal(t, key.Port, 1234) - } - { - key, err := ParseResolveInstanceKey("myhost") - require.NoError(t, err) - require.Equal(t, key.Hostname, "myhost") - require.Equal(t, key.Port, 3306) - } - { - key, err := ParseResolveInstanceKey("10.0.0.3:3307") - require.NoError(t, err) - require.Equal(t, key.Hostname, "10.0.0.3") - require.Equal(t, key.Port, 3307) - } - { - key, err := ParseResolveInstanceKey("10.0.0.3") - require.NoError(t, err) - require.Equal(t, key.Hostname, "10.0.0.3") - require.Equal(t, key.Port, 3306) - } - { - key, err := ParseResolveInstanceKey("[2001:db8:1f70::999:de8:7648:6e8]:3308") - require.NoError(t, err) - require.Equal(t, key.Hostname, "2001:db8:1f70::999:de8:7648:6e8") - require.Equal(t, key.Port, 3308) - } - { - key, err := ParseResolveInstanceKey("::1") - require.NoError(t, err) - require.Equal(t, key.Hostname, "::1") - require.Equal(t, key.Port, 3306) - } - { - key, err := ParseResolveInstanceKey("0:0:0:0:0:0:0:0") - require.NoError(t, err) - require.Equal(t, key.Hostname, "0:0:0:0:0:0:0:0") - require.Equal(t, key.Port, 3306) - } - { - _, err := ParseResolveInstanceKey("[2001:xxxx:1f70::999:de8:7648:6e8]:3308") - require.Error(t, err) - } - { - _, err := ParseResolveInstanceKey("10.0.0.4:") - require.Error(t, err) - } - { - _, err := ParseResolveInstanceKey("10.0.0.4:5.6.7") - require.Error(t, err) - } -} - -func TestNewResolveInstanceKeyStrings(t *testing.T) { - { - i, err := NewResolveInstanceKeyStrings("127.0.0.1", "3306") - require.NoError(t, err) - require.Equal(t, i.Hostname, "127.0.0.1") - require.Equal(t, i.Port, 3306) - } - { - _, err := NewResolveInstanceKeyStrings("127.0.0.1", "") - require.Error(t, err) - } - { - _, err := NewResolveInstanceKeyStrings("127.0.0.1", "3306x") - require.Error(t, err) - } -} - -func TestInstanceKeyValid(t *testing.T) { - require.True(t, key1.IsValid()) - i, err := ParseResolveInstanceKey("_:3306") - require.NoError(t, err) - require.False(t, i.IsValid()) - i, err = ParseResolveInstanceKey("//myhost:3306") - require.NoError(t, err) - require.False(t, i.IsValid()) -} - -func TestInstanceKeyDetach(t *testing.T) { - require.False(t, key1.IsDetached()) - detached1 := key1.DetachedKey() - require.True(t, detached1.IsDetached()) - detached2 := key1.DetachedKey() - require.True(t, detached2.IsDetached()) - require.True(t, detached1.Equals(detached2)) - - reattached1 := detached1.ReattachedKey() - require.False(t, reattached1.IsDetached()) - require.True(t, reattached1.Equals(&key1)) - reattached2 := reattached1.ReattachedKey() - require.False(t, reattached2.IsDetached()) - require.True(t, reattached1.Equals(reattached2)) -} - -func TestIsIPv4(t *testing.T) { - require.False(t, key1.IsIPv4()) - { - k, _ := ParseRawInstanceKey("mysql-server-1:3306") - require.False(t, k.IsIPv4()) - } - { - k, _ := ParseRawInstanceKey("mysql-server-1") - require.False(t, k.IsIPv4()) - } - { - k, _ := ParseRawInstanceKey("my.sql.server.1") - require.False(t, k.IsIPv4()) - } - { - k, _ := ParseRawInstanceKey("mysql-server-1:3306") - require.False(t, k.IsIPv4()) - } - { - k, _ := ParseRawInstanceKey("127.0.0:3306") - require.False(t, k.IsIPv4()) - } - { - k, _ := ParseRawInstanceKey("127::0::0::1:3306") - require.False(t, k.IsIPv4()) - } - { - k, _ := ParseRawInstanceKey("127.0.0.1:3306") - require.True(t, k.IsIPv4()) - } - { - k, _ := ParseRawInstanceKey("127.0.0.1") - require.True(t, k.IsIPv4()) - } -} diff --git a/go/vt/vtorc/inst/instance_test.go b/go/vt/vtorc/inst/instance_test.go index ebc2d9d0c89..9ca2f243999 100644 --- a/go/vt/vtorc/inst/instance_test.go +++ b/go/vt/vtorc/inst/instance_test.go @@ -28,45 +28,7 @@ func init() { config.MarkConfigurationLoaded() } -var instance1 = Instance{Key: key1} - -func TestIsSmallerMajorVersion(t *testing.T) { - i55 := Instance{Version: "5.5"} - i5517 := Instance{Version: "5.5.17"} - i56 := Instance{Version: "5.6"} - - require.False(t, i55.IsSmallerMajorVersion(&i5517)) - require.False(t, i56.IsSmallerMajorVersion(&i5517)) - require.True(t, i55.IsSmallerMajorVersion(&i56)) -} - -func TestIsVersion(t *testing.T) { - i51 := Instance{Version: "5.1.19"} - i55 := Instance{Version: "5.5.17-debug"} - i56 := Instance{Version: "5.6.20"} - i57 := Instance{Version: "5.7.8-log"} - - require.True(t, i51.IsMySQL51()) - require.True(t, i55.IsMySQL55()) - require.True(t, i56.IsMySQL56()) - require.False(t, i55.IsMySQL56()) - require.True(t, i57.IsMySQL57()) - require.False(t, i56.IsMySQL57()) -} - -func TestIsSmallerBinlogFormat(t *testing.T) { - iStatement := &Instance{Key: key1, BinlogFormat: "STATEMENT"} - iRow := &Instance{Key: key2, BinlogFormat: "ROW"} - iMixed := &Instance{Key: key3, BinlogFormat: "MIXED"} - require.True(t, iStatement.IsSmallerBinlogFormat(iRow)) - require.False(t, iStatement.IsSmallerBinlogFormat(iStatement)) - require.False(t, iRow.IsSmallerBinlogFormat(iStatement)) - - require.True(t, iStatement.IsSmallerBinlogFormat(iMixed)) - require.True(t, iMixed.IsSmallerBinlogFormat(iRow)) - require.False(t, iMixed.IsSmallerBinlogFormat(iStatement)) - require.False(t, iRow.IsSmallerBinlogFormat(iMixed)) -} +var instance1 = Instance{InstanceAlias: "zone1-100"} func TestReplicationThreads(t *testing.T) { { @@ -79,7 +41,7 @@ func TestReplicationThreads(t *testing.T) { require.True(t, instance1.ReplicationThreadsStopped()) } { - i := Instance{Key: key1, ReplicationIOThreadState: ReplicationThreadStateNoThread, ReplicationSQLThreadState: ReplicationThreadStateNoThread} + i := Instance{InstanceAlias: "zone1-100", ReplicationIOThreadState: ReplicationThreadStateNoThread, ReplicationSQLThreadState: ReplicationThreadStateNoThread} require.False(t, i.ReplicationThreadsExist()) } } diff --git a/go/vt/vtorc/inst/instance_utils.go b/go/vt/vtorc/inst/instance_utils.go index b14a0794c31..f6bde729822 100644 --- a/go/vt/vtorc/inst/instance_utils.go +++ b/go/vt/vtorc/inst/instance_utils.go @@ -18,14 +18,9 @@ package inst import ( "regexp" - "strconv" "strings" ) -var ( - DowntimeLostInRecoveryMessage = "lost-in-recovery" -) - // MajorVersion returns a MySQL major version number (e.g. given "5.5.36" it returns "5.5") func MajorVersion(version string) []string { tokens := strings.Split(version, ".") @@ -35,37 +30,6 @@ func MajorVersion(version string) []string { return tokens[:2] } -// IsSmallerMajorVersion tests two versions against another and returns true if -// the former is a smaller "major" varsion than the latter. -// e.g. 5.5.36 is NOT a smaller major version as comapred to 5.5.40, but IS as compared to 5.6.9 -func IsSmallerMajorVersion(version string, otherVersion string) bool { - thisMajorVersion := MajorVersion(version) - otherMajorVersion := MajorVersion(otherVersion) - for i := 0; i < len(thisMajorVersion); i++ { - thisToken, _ := strconv.Atoi(thisMajorVersion[i]) - otherToken, _ := strconv.Atoi(otherMajorVersion[i]) - if thisToken < otherToken { - return true - } - if thisToken > otherToken { - return false - } - } - return false -} - -// IsSmallerBinlogFormat tests two binlog formats and sees if one is "smaller" than the other. -// "smaller" binlog format means you can replicate from the smaller to the larger. -func IsSmallerBinlogFormat(binlogFormat string, otherBinlogFormat string) bool { - if binlogFormat == "STATEMENT" { - return (otherBinlogFormat == "ROW" || otherBinlogFormat == "MIXED") - } - if binlogFormat == "MIXED" { - return otherBinlogFormat == "ROW" - } - return false -} - // RegexpMatchPatterns returns true if s matches any of the provided regexpPatterns func RegexpMatchPatterns(s string, regexpPatterns []string) bool { for _, filter := range regexpPatterns { diff --git a/go/vt/vtorc/inst/keyspace_dao.go b/go/vt/vtorc/inst/keyspace_dao.go index a06d9baa11e..d764e3fc56a 100644 --- a/go/vt/vtorc/inst/keyspace_dao.go +++ b/go/vt/vtorc/inst/keyspace_dao.go @@ -22,6 +22,7 @@ import ( "vitess.io/vitess/go/vt/external/golib/sqlutils" topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vtctl/reparentutil" "vitess.io/vitess/go/vt/vtorc/db" ) @@ -77,3 +78,12 @@ func SaveKeyspace(keyspace *topo.KeyspaceInfo) error { ) return err } + +// GetDurabilityPolicy gets the durability policy for the given keyspace. +func GetDurabilityPolicy(keyspace string) (reparentutil.Durabler, error) { + ki, err := ReadKeyspace(keyspace) + if err != nil { + return nil, err + } + return reparentutil.GetDurabilityPolicy(ki.DurabilityPolicy) +} diff --git a/go/vt/vtorc/inst/keyspace_dao_test.go b/go/vt/vtorc/inst/keyspace_dao_test.go index 56ad06ec9e5..015d3e75256 100644 --- a/go/vt/vtorc/inst/keyspace_dao_test.go +++ b/go/vt/vtorc/inst/keyspace_dao_test.go @@ -25,23 +25,24 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vtctl/reparentutil" "vitess.io/vitess/go/vt/vtorc/db" ) func TestSaveAndReadKeyspace(t *testing.T) { - orcDb, err := db.OpenVTOrc() - require.NoError(t, err) + // Clear the database after the test. The easiest way to do that is to run all the initialization commands again. defer func() { - _, err = orcDb.Exec("delete from vitess_keyspace") - require.NoError(t, err) + db.ClearVTOrcDatabase() }() tests := []struct { - name string - keyspaceName string - keyspace *topodatapb.Keyspace - keyspaceWanted *topodatapb.Keyspace - err string + name string + keyspaceName string + keyspace *topodatapb.Keyspace + keyspaceWanted *topodatapb.Keyspace + err string + errInDurabilityPolicy string + semiSyncAckersWanted int }{ { name: "Success with keyspaceType and durability", @@ -50,16 +51,16 @@ func TestSaveAndReadKeyspace(t *testing.T) { KeyspaceType: topodatapb.KeyspaceType_NORMAL, DurabilityPolicy: "semi_sync", }, - keyspaceWanted: nil, - err: "", + keyspaceWanted: nil, + semiSyncAckersWanted: 1, }, { name: "Success with keyspaceType and no durability", keyspaceName: "ks2", keyspace: &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, }, - keyspaceWanted: nil, - err: "", + keyspaceWanted: nil, + errInDurabilityPolicy: "durability policy not found", }, { name: "Success with snapshot keyspaceType", keyspaceName: "ks3", @@ -67,7 +68,6 @@ func TestSaveAndReadKeyspace(t *testing.T) { KeyspaceType: topodatapb.KeyspaceType_SNAPSHOT, }, keyspaceWanted: nil, - err: "", }, { name: "Success with fields that are not stored", keyspaceName: "ks4", @@ -80,7 +80,7 @@ func TestSaveAndReadKeyspace(t *testing.T) { KeyspaceType: topodatapb.KeyspaceType_NORMAL, DurabilityPolicy: "none", }, - err: "", + semiSyncAckersWanted: 0, }, { name: "No keyspace found", keyspaceName: "ks5", @@ -107,11 +107,21 @@ func TestSaveAndReadKeyspace(t *testing.T) { readKeyspaceInfo, err := ReadKeyspace(tt.keyspaceName) if tt.err != "" { require.EqualError(t, err, tt.err) - } else { - require.NoError(t, err) - require.True(t, topotools.KeyspaceEquality(tt.keyspaceWanted, readKeyspaceInfo.Keyspace)) - require.Equal(t, tt.keyspaceName, readKeyspaceInfo.KeyspaceName()) + return + } + require.NoError(t, err) + require.True(t, topotools.KeyspaceEquality(tt.keyspaceWanted, readKeyspaceInfo.Keyspace)) + require.Equal(t, tt.keyspaceName, readKeyspaceInfo.KeyspaceName()) + if tt.keyspace.KeyspaceType == topodatapb.KeyspaceType_SNAPSHOT { + return + } + durabilityPolicy, err := GetDurabilityPolicy(tt.keyspaceName) + if tt.errInDurabilityPolicy != "" { + require.EqualError(t, err, tt.errInDurabilityPolicy) + return } + require.NoError(t, err) + require.EqualValues(t, tt.semiSyncAckersWanted, reparentutil.SemiSyncAckers(durabilityPolicy, nil)) }) } } diff --git a/go/vt/vtorc/inst/maintenance.go b/go/vt/vtorc/inst/maintenance.go deleted file mode 100644 index 08fa3554d1e..00000000000 --- a/go/vt/vtorc/inst/maintenance.go +++ /dev/null @@ -1,45 +0,0 @@ -/* - Copyright 2014 Outbrain Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -import ( - "vitess.io/vitess/go/vt/vtorc/config" -) - -// Maintenance indicates a maintenance entry (also in the database) -type Maintenance struct { - MaintenanceID uint - Key InstanceKey - BeginTimestamp string - SecondsElapsed uint - IsActive bool - Owner string - Reason string -} - -var maintenanceOwner string - -func GetMaintenanceOwner() string { - if maintenanceOwner != "" { - return maintenanceOwner - } - return config.MaintenanceOwner -} - -func SetMaintenanceOwner(owner string) { - maintenanceOwner = owner -} diff --git a/go/vt/vtorc/inst/maintenance_dao.go b/go/vt/vtorc/inst/maintenance_dao.go deleted file mode 100644 index b2ac833b353..00000000000 --- a/go/vt/vtorc/inst/maintenance_dao.go +++ /dev/null @@ -1,86 +0,0 @@ -/* - Copyright 2014 Outbrain Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -import ( - "fmt" - - "vitess.io/vitess/go/vt/log" - - "vitess.io/vitess/go/vt/vtorc/config" - "vitess.io/vitess/go/vt/vtorc/db" -) - -// ExpireMaintenance will remove the maintenance flag on old maintenances and on bounded maintenances -func ExpireMaintenance() error { - { - res, err := db.ExecVTOrc(` - delete from - database_instance_maintenance - where - maintenance_active is null - and end_timestamp < NOW() - INTERVAL ? DAY - `, - config.MaintenancePurgeDays, - ) - if err != nil { - log.Error(err) - return err - } - if rowsAffected, _ := res.RowsAffected(); rowsAffected > 0 { - _ = AuditOperation("expire-maintenance", nil, fmt.Sprintf("Purged historical entries: %d", rowsAffected)) - } - } - { - res, err := db.ExecVTOrc(` - delete from - database_instance_maintenance - where - maintenance_active = 1 - and end_timestamp < NOW() - `, - ) - if err != nil { - log.Error(err) - return err - } - if rowsAffected, _ := res.RowsAffected(); rowsAffected > 0 { - _ = AuditOperation("expire-maintenance", nil, fmt.Sprintf("Expired bounded: %d", rowsAffected)) - } - } - { - res, err := db.ExecVTOrc(` - delete from - database_instance_maintenance - where - explicitly_bounded = 0 - and concat(processing_node_hostname, ':', processing_node_token) not in ( - select concat(hostname, ':', token) from node_health - ) - `, - ) - if err != nil { - log.Error(err) - return err - } - if rowsAffected, _ := res.RowsAffected(); rowsAffected > 0 { - _ = AuditOperation("expire-maintenance", nil, fmt.Sprintf("Expired dead: %d", rowsAffected)) - } - } - - return nil -} diff --git a/go/vt/vtorc/inst/oracle_gtid_set.go b/go/vt/vtorc/inst/oracle_gtid_set.go index c4e88fccbd3..0ddab05ef55 100644 --- a/go/vt/vtorc/inst/oracle_gtid_set.go +++ b/go/vt/vtorc/inst/oracle_gtid_set.go @@ -22,9 +22,10 @@ import ( // OracleGtidSet represents a set of GTID ranges as depicted by Retrieved_Gtid_Set, Executed_Gtid_Set or @@gtid_purged. type OracleGtidSet struct { - GtidEntries [](*OracleGtidSetEntry) + GtidEntries []*OracleGtidSetEntry } +// NewOracleGtidSet creates a new GTID set. // Example input: `230ea8ea-81e3-11e4-972a-e25ec4bd140a:1-10539, // 316d193c-70e5-11e5-adb2-ecf4bb2262ff:1-8935:8984-6124596, // 321f5c0d-70e5-11e5-adb2-ecf4bb2262ff:1-56` @@ -54,7 +55,7 @@ func NewOracleGtidSet(gtidSet string) (res *OracleGtidSet, err error) { // By way of how this works there can only be one entry matching our UUID, but we generalize. // We keep order of entries. func (oracleGTIDSet *OracleGtidSet) RemoveUUID(uuid string) (removed bool) { - filteredEntries := [](*OracleGtidSetEntry){} + var filteredEntries []*OracleGtidSetEntry for _, entry := range oracleGTIDSet.GtidEntries { if entry.UUID == uuid { removed = true @@ -79,7 +80,7 @@ func (oracleGTIDSet *OracleGtidSet) RetainUUIDs(uuids []string) (anythingRemoved for _, uuid := range uuids { retainUUIDs[uuid] = true } - filteredEntries := [](*OracleGtidSetEntry){} + var filteredEntries []*OracleGtidSetEntry for _, entry := range oracleGTIDSet.GtidEntries { if retainUUIDs[entry.UUID] { filteredEntries = append(filteredEntries, entry) @@ -107,8 +108,8 @@ func (oracleGTIDSet *OracleGtidSet) SharedUUIDs(other *OracleGtidSet) (shared [] return shared } -// String returns a user-friendly string representation of this entry -func (oracleGTIDSet *OracleGtidSet) Explode() (result [](*OracleGtidSetEntry)) { +// Explode returns a user-friendly string representation of this entry +func (oracleGTIDSet *OracleGtidSet) Explode() (result []*OracleGtidSetEntry) { for _, entries := range oracleGTIDSet.GtidEntries { result = append(result, entries.Explode()...) } @@ -116,7 +117,7 @@ func (oracleGTIDSet *OracleGtidSet) Explode() (result [](*OracleGtidSetEntry)) { } func (oracleGTIDSet *OracleGtidSet) String() string { - tokens := []string{} + var tokens []string for _, entry := range oracleGTIDSet.GtidEntries { tokens = append(tokens, entry.String()) } diff --git a/go/vt/vtorc/inst/postponed_functions.go b/go/vt/vtorc/inst/postponed_functions.go deleted file mode 100644 index 1ce750964a5..00000000000 --- a/go/vt/vtorc/inst/postponed_functions.go +++ /dev/null @@ -1,69 +0,0 @@ -/* - Copyright 2015 Shlomi Noach, courtesy Booking.com - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -import ( - "sync" - - "vitess.io/vitess/go/vt/log" -) - -type PostponedFunctionsContainer struct { - waitGroup sync.WaitGroup - mutex sync.Mutex - descriptions []string -} - -func NewPostponedFunctionsContainer() *PostponedFunctionsContainer { - postponedFunctionsContainer := &PostponedFunctionsContainer{ - descriptions: []string{}, - } - return postponedFunctionsContainer -} - -func (postponedFuncsContainer *PostponedFunctionsContainer) AddPostponedFunction(postponedFunction func() error, description string) { - postponedFuncsContainer.mutex.Lock() - defer postponedFuncsContainer.mutex.Unlock() - - postponedFuncsContainer.descriptions = append(postponedFuncsContainer.descriptions, description) - - postponedFuncsContainer.waitGroup.Add(1) - go func() { - defer postponedFuncsContainer.waitGroup.Done() - _ = postponedFunction() - }() -} - -func (postponedFuncsContainer *PostponedFunctionsContainer) Wait() { - log.Infof("PostponedFunctionsContainer: waiting on %+v postponed functions", postponedFuncsContainer.Len()) - postponedFuncsContainer.waitGroup.Wait() - log.Infof("PostponedFunctionsContainer: done waiting") -} - -func (postponedFuncsContainer *PostponedFunctionsContainer) Len() int { - postponedFuncsContainer.mutex.Lock() - defer postponedFuncsContainer.mutex.Unlock() - - return len(postponedFuncsContainer.descriptions) -} - -func (postponedFuncsContainer *PostponedFunctionsContainer) Descriptions() []string { - postponedFuncsContainer.mutex.Lock() - defer postponedFuncsContainer.mutex.Unlock() - - return postponedFuncsContainer.descriptions -} diff --git a/go/vt/vtorc/inst/process.go b/go/vt/vtorc/inst/process.go deleted file mode 100644 index 99985045b56..00000000000 --- a/go/vt/vtorc/inst/process.go +++ /dev/null @@ -1,32 +0,0 @@ -/* - Copyright 2014 Outbrain Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -// Process presents a MySQL executing thread (as observed by PROCESSLIST) -type Process struct { - InstanceHostname string - InstancePort int - ID int64 - User string - Host string - Db string - Command string - Time int64 - State string - Info string - StartedAt string -} diff --git a/go/vt/vtorc/inst/replication_thread_state.go b/go/vt/vtorc/inst/replication_thread_state.go index e885625aa3f..a95e65ca8ec 100644 --- a/go/vt/vtorc/inst/replication_thread_state.go +++ b/go/vt/vtorc/inst/replication_thread_state.go @@ -16,7 +16,9 @@ package inst -import "vitess.io/vitess/go/mysql" +import ( + "vitess.io/vitess/go/mysql/replication" +) type ReplicationThreadState int @@ -27,25 +29,15 @@ const ( ReplicationThreadStateOther ReplicationThreadState = 2 ) -func ReplicationThreadStateFromStatus(status string) ReplicationThreadState { - switch status { - case "No": - return ReplicationThreadStateStopped - case "Yes": - return ReplicationThreadStateRunning - } - return ReplicationThreadStateOther -} - // ReplicationThreadStateFromReplicationState gets the replication thread state from replication state // TODO: Merge these two into one -func ReplicationThreadStateFromReplicationState(state mysql.ReplicationState) ReplicationThreadState { +func ReplicationThreadStateFromReplicationState(state replication.ReplicationState) ReplicationThreadState { switch state { - case mysql.ReplicationStateStopped: + case replication.ReplicationStateStopped: return ReplicationThreadStateStopped - case mysql.ReplicationStateRunning: + case replication.ReplicationStateRunning: return ReplicationThreadStateRunning - case mysql.ReplicationStateConnecting: + case replication.ReplicationStateConnecting: return ReplicationThreadStateOther default: return ReplicationThreadStateNoThread diff --git a/go/vt/vtorc/inst/resolve.go b/go/vt/vtorc/inst/resolve.go deleted file mode 100644 index ac3d3f6dc88..00000000000 --- a/go/vt/vtorc/inst/resolve.go +++ /dev/null @@ -1,265 +0,0 @@ -/* - Copyright 2014 Outbrain Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -import ( - "errors" - "fmt" - "net" - "strings" - "sync" - "time" - - "github.com/patrickmn/go-cache" - - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/vtorc/config" -) - -type HostnameResolve struct { - hostname string - resolvedHostname string -} - -func (hostnameResolve HostnameResolve) String() string { - return fmt.Sprintf("%s %s", hostnameResolve.hostname, hostnameResolve.resolvedHostname) -} - -type HostnameUnresolve struct { - hostname string - unresolvedHostname string -} - -func (hostnameUnresolve HostnameUnresolve) String() string { - return fmt.Sprintf("%s %s", hostnameUnresolve.hostname, hostnameUnresolve.unresolvedHostname) -} - -type HostnameRegistration struct { - CreatedAt time.Time - Key InstanceKey - Hostname string -} - -func NewHostnameRegistration(instanceKey *InstanceKey, hostname string) *HostnameRegistration { - return &HostnameRegistration{ - CreatedAt: time.Now(), - Key: *instanceKey, - Hostname: hostname, - } -} - -func NewHostnameDeregistration(instanceKey *InstanceKey) *HostnameRegistration { - return &HostnameRegistration{ - CreatedAt: time.Now(), - Key: *instanceKey, - Hostname: "", - } -} - -var hostnameResolvesLightweightCache *cache.Cache -var hostnameResolvesLightweightCacheInit = &sync.Mutex{} -var hostnameResolvesLightweightCacheLoadedOnceFromDB = false -var hostnameIPsCache = cache.New(10*time.Minute, time.Minute) - -func getHostnameResolvesLightweightCache() *cache.Cache { - hostnameResolvesLightweightCacheInit.Lock() - defer hostnameResolvesLightweightCacheInit.Unlock() - if hostnameResolvesLightweightCache == nil { - hostnameResolvesLightweightCache = cache.New(time.Duration(config.ExpiryHostnameResolvesMinutes)*time.Minute, time.Minute) - } - return hostnameResolvesLightweightCache -} - -func HostnameResolveMethodIsNone() bool { - return strings.ToLower(config.HostnameResolveMethod) == "none" -} - -// GetCNAME resolves an IP or hostname into a normalized valid CNAME -func GetCNAME(hostname string) (string, error) { - res, err := net.LookupCNAME(hostname) - if err != nil { - return hostname, err - } - res = strings.TrimRight(res, ".") - return res, nil -} - -func resolveHostname(hostname string) (string, error) { - switch strings.ToLower(config.HostnameResolveMethod) { - case "none": - return hostname, nil - case "default": - return hostname, nil - case "cname": - return GetCNAME(hostname) - case "ip": - return getHostnameIP(hostname) - } - return hostname, nil -} - -// Attempt to resolve a hostname. This may return a database cached hostname or otherwise -// it may resolve the hostname via CNAME -func ResolveHostname(hostname string) (string, error) { - hostname = strings.TrimSpace(hostname) - if hostname == "" { - return hostname, errors.New("Will not resolve empty hostname") - } - if strings.Contains(hostname, ",") { - return hostname, fmt.Errorf("Will not resolve multi-hostname: %+v", hostname) - } - if (&InstanceKey{Hostname: hostname}).IsDetached() { - // quietly abort. Nothing to do. The hostname is detached for a reason: it - // will not be resolved, for sure. - return hostname, nil - } - - // First go to lightweight cache - if resolvedHostname, found := getHostnameResolvesLightweightCache().Get(hostname); found { - return resolvedHostname.(string), nil - } - - if !hostnameResolvesLightweightCacheLoadedOnceFromDB { - // A continuous-discovery will first make sure to load all resolves from DB. - // However cli does not do so. - // Anyway, it seems like the cache was not loaded from DB. Before doing real resolves, - // let's try and get the resolved hostname from database. - if !HostnameResolveMethodIsNone() { - go func() { - if resolvedHostname, err := ReadResolvedHostname(hostname); err == nil && resolvedHostname != "" { - getHostnameResolvesLightweightCache().Set(hostname, resolvedHostname, 0) - } - }() - } - } - - // Unfound: resolve! - log.Infof("Hostname unresolved yet: %s", hostname) - resolvedHostname, err := resolveHostname(hostname) - if err != nil { - // Problem. What we'll do is cache the hostname for just one minute, so as to avoid flooding requests - // on one hand, yet make it refresh shortly on the other hand. Anyway do not write to database. - getHostnameResolvesLightweightCache().Set(hostname, resolvedHostname, time.Minute) - return hostname, err - } - // Good result! Cache it, also to DB - log.Infof("Cache hostname resolve %s as %s", hostname, resolvedHostname) - go UpdateResolvedHostname(hostname, resolvedHostname) - return resolvedHostname, nil -} - -// UpdateResolvedHostname will store the given resolved hostname in cache -// Returns false when the key already existed with same resolved value (similar -// to AFFECTED_ROWS() in mysql) -func UpdateResolvedHostname(hostname string, resolvedHostname string) bool { - if resolvedHostname == "" { - return false - } - if existingResolvedHostname, found := getHostnameResolvesLightweightCache().Get(hostname); found && (existingResolvedHostname == resolvedHostname) { - return false - } - getHostnameResolvesLightweightCache().Set(hostname, resolvedHostname, 0) - if !HostnameResolveMethodIsNone() { - _ = WriteResolvedHostname(hostname, resolvedHostname) - } - return true -} - -func LoadHostnameResolveCache() error { - if !HostnameResolveMethodIsNone() { - return loadHostnameResolveCacheFromDatabase() - } - return nil -} - -func loadHostnameResolveCacheFromDatabase() error { - allHostnamesResolves, err := ReadAllHostnameResolves() - if err != nil { - return err - } - for _, hostnameResolve := range allHostnamesResolves { - getHostnameResolvesLightweightCache().Set(hostnameResolve.hostname, hostnameResolve.resolvedHostname, 0) - } - hostnameResolvesLightweightCacheLoadedOnceFromDB = true - return nil -} - -func FlushNontrivialResolveCacheToDatabase() error { - if HostnameResolveMethodIsNone() { - return nil - } - items, _ := HostnameResolveCache() - for hostname := range items { - resolvedHostname, found := getHostnameResolvesLightweightCache().Get(hostname) - if found && (resolvedHostname.(string) != hostname) { - _ = WriteResolvedHostname(hostname, resolvedHostname.(string)) - } - } - return nil -} - -func HostnameResolveCache() (map[string]cache.Item, error) { - return getHostnameResolvesLightweightCache().Items(), nil -} - -func extractIPs(ips []net.IP) (ipv4String string, ipv6String string) { - for _, ip := range ips { - if ip4 := ip.To4(); ip4 != nil { - ipv4String = ip.String() - } else { - ipv6String = ip.String() - } - } - return ipv4String, ipv6String -} - -func getHostnameIPs(hostname string) (ips []net.IP, fromCache bool, err error) { - if ips, found := hostnameIPsCache.Get(hostname); found { - return ips.([]net.IP), true, nil - } - ips, err = net.LookupIP(hostname) - if err != nil { - log.Error(err) - return ips, false, err - } - hostnameIPsCache.Set(hostname, ips, cache.DefaultExpiration) - return ips, false, nil -} - -func getHostnameIP(hostname string) (ipString string, err error) { - ips, _, err := getHostnameIPs(hostname) - if err != nil { - return ipString, err - } - ipv4String, ipv6String := extractIPs(ips) - if ipv4String != "" { - return ipv4String, nil - } - return ipv6String, nil -} - -func ResolveHostnameIPs(hostname string) error { - ips, fromCache, err := getHostnameIPs(hostname) - if err != nil { - return err - } - if fromCache { - return nil - } - ipv4String, ipv6String := extractIPs(ips) - return writeHostnameIPs(hostname, ipv4String, ipv6String) -} diff --git a/go/vt/vtorc/inst/resolve_dao.go b/go/vt/vtorc/inst/resolve_dao.go deleted file mode 100644 index d38146469d2..00000000000 --- a/go/vt/vtorc/inst/resolve_dao.go +++ /dev/null @@ -1,219 +0,0 @@ -/* - Copyright 2014 Outbrain Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -import ( - "github.com/rcrowley/go-metrics" - - "vitess.io/vitess/go/vt/external/golib/sqlutils" - - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/vtorc/config" - "vitess.io/vitess/go/vt/vtorc/db" -) - -var writeResolvedHostnameCounter = metrics.NewCounter() -var writeUnresolvedHostnameCounter = metrics.NewCounter() -var readResolvedHostnameCounter = metrics.NewCounter() -var readUnresolvedHostnameCounter = metrics.NewCounter() -var readAllResolvedHostnamesCounter = metrics.NewCounter() - -func init() { - _ = metrics.Register("resolve.write_resolved", writeResolvedHostnameCounter) - _ = metrics.Register("resolve.write_unresolved", writeUnresolvedHostnameCounter) - _ = metrics.Register("resolve.read_resolved", readResolvedHostnameCounter) - _ = metrics.Register("resolve.read_unresolved", readUnresolvedHostnameCounter) - _ = metrics.Register("resolve.read_resolved_all", readAllResolvedHostnamesCounter) -} - -// WriteResolvedHostname stores a hostname and the resolved hostname to backend database -func WriteResolvedHostname(hostname string, resolvedHostname string) error { - writeFunc := func() error { - _, err := db.ExecVTOrc(` - insert into - hostname_resolve (hostname, resolved_hostname, resolved_timestamp) - values - (?, ?, NOW()) - on duplicate key update - resolved_hostname = VALUES(resolved_hostname), - resolved_timestamp = VALUES(resolved_timestamp) - `, - hostname, - resolvedHostname) - if err != nil { - log.Error(err) - return err - } - if hostname != resolvedHostname { - // history is only interesting when there's actually something to resolve... - _, _ = db.ExecVTOrc(` - insert into - hostname_resolve_history (hostname, resolved_hostname, resolved_timestamp) - values - (?, ?, NOW()) - on duplicate key update - hostname=values(hostname), - resolved_timestamp=values(resolved_timestamp) - `, - hostname, - resolvedHostname) - } - writeResolvedHostnameCounter.Inc(1) - return nil - } - return ExecDBWriteFunc(writeFunc) -} - -// ReadResolvedHostname returns the resolved hostname given a hostname, or empty if not exists -func ReadResolvedHostname(hostname string) (string, error) { - var resolvedHostname string - - query := ` - select - resolved_hostname - from - hostname_resolve - where - hostname = ? - ` - - err := db.QueryVTOrc(query, sqlutils.Args(hostname), func(m sqlutils.RowMap) error { - resolvedHostname = m.GetString("resolved_hostname") - return nil - }) - readResolvedHostnameCounter.Inc(1) - - if err != nil { - log.Error(err) - } - return resolvedHostname, err -} - -func ReadAllHostnameResolves() ([]HostnameResolve, error) { - res := []HostnameResolve{} - query := ` - select - hostname, - resolved_hostname - from - hostname_resolve - ` - err := db.QueryVTOrcRowsMap(query, func(m sqlutils.RowMap) error { - hostnameResolve := HostnameResolve{hostname: m.GetString("hostname"), resolvedHostname: m.GetString("resolved_hostname")} - - res = append(res, hostnameResolve) - return nil - }) - readAllResolvedHostnamesCounter.Inc(1) - - if err != nil { - log.Error(err) - } - return res, err -} - -// ExpireHostnameUnresolve expires hostname_unresolve entries that haven't been updated recently. -func ExpireHostnameUnresolve() error { - writeFunc := func() error { - _, err := db.ExecVTOrc(` - delete from hostname_unresolve - where last_registered < NOW() - INTERVAL ? MINUTE - `, config.ExpiryHostnameResolvesMinutes, - ) - if err != nil { - log.Error(err) - } - return err - } - return ExecDBWriteFunc(writeFunc) -} - -// ForgetExpiredHostnameResolves -func ForgetExpiredHostnameResolves() error { - _, err := db.ExecVTOrc(` - delete - from hostname_resolve - where - resolved_timestamp < NOW() - interval ? minute`, - 2*config.ExpiryHostnameResolvesMinutes, - ) - return err -} - -// DeleteInvalidHostnameResolves removes invalid resolves. At this time these are: -// - infinite loop resolves (A->B and B->A), remove earlier mapping -func DeleteInvalidHostnameResolves() error { - var invalidHostnames []string - - query := ` - select - early.hostname - from - hostname_resolve as latest - join hostname_resolve early on (latest.resolved_hostname = early.hostname and latest.hostname = early.resolved_hostname) - where - latest.hostname != latest.resolved_hostname - and latest.resolved_timestamp > early.resolved_timestamp - ` - - err := db.QueryVTOrcRowsMap(query, func(m sqlutils.RowMap) error { - invalidHostnames = append(invalidHostnames, m.GetString("hostname")) - return nil - }) - if err != nil { - return err - } - - for _, invalidHostname := range invalidHostnames { - _, err = db.ExecVTOrc(` - delete - from hostname_resolve - where - hostname = ?`, - invalidHostname, - ) - if err != nil { - log.Error(err) - } - } - return err -} - -// writeHostnameIPs stroes an ipv4 and ipv6 associated witha hostname, if available -func writeHostnameIPs(hostname string, ipv4String string, ipv6String string) error { - writeFunc := func() error { - _, err := db.ExecVTOrc(` - insert into - hostname_ips (hostname, ipv4, ipv6, last_updated) - values - (?, ?, ?, NOW()) - on duplicate key update - ipv4 = VALUES(ipv4), - ipv6 = VALUES(ipv6), - last_updated = VALUES(last_updated) - `, - hostname, - ipv4String, - ipv6String, - ) - if err != nil { - log.Error(err) - } - return err - } - return ExecDBWriteFunc(writeFunc) -} diff --git a/go/vt/vtorc/inst/shard_dao.go b/go/vt/vtorc/inst/shard_dao.go new file mode 100644 index 00000000000..a90eed0f509 --- /dev/null +++ b/go/vt/vtorc/inst/shard_dao.go @@ -0,0 +1,97 @@ +/* +Copyright 2022 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package inst + +import ( + "errors" + + "vitess.io/vitess/go/protoutil" + "vitess.io/vitess/go/vt/external/golib/sqlutils" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtorc/db" +) + +// ErrShardNotFound is a fixed error message used when a shard is not found in the database. +var ErrShardNotFound = errors.New("shard not found") + +// ReadShardPrimaryInformation reads the vitess shard record and gets the shard primary alias and timestamp. +func ReadShardPrimaryInformation(keyspaceName, shardName string) (primaryAlias string, primaryTimestamp string, err error) { + if err = topo.ValidateKeyspaceName(keyspaceName); err != nil { + return + } + if _, _, err = topo.ValidateShardName(shardName); err != nil { + return + } + + query := ` + select + primary_alias, primary_timestamp + from + vitess_shard + where keyspace=? and shard=? + ` + args := sqlutils.Args(keyspaceName, shardName) + shardFound := false + err = db.QueryVTOrc(query, args, func(row sqlutils.RowMap) error { + shardFound = true + primaryAlias = row.GetString("primary_alias") + primaryTimestamp = row.GetString("primary_timestamp") + return nil + }) + if err != nil { + return + } + if !shardFound { + return "", "", ErrShardNotFound + } + return primaryAlias, primaryTimestamp, nil +} + +// SaveShard saves the shard record against the shard name. +func SaveShard(shard *topo.ShardInfo) error { + _, err := db.ExecVTOrc(` + replace + into vitess_shard ( + keyspace, shard, primary_alias, primary_timestamp + ) values ( + ?, ?, ?, ? + ) + `, + shard.Keyspace(), + shard.ShardName(), + getShardPrimaryAliasString(shard), + getShardPrimaryTermStartTimeString(shard), + ) + return err +} + +// getShardPrimaryAliasString gets the shard primary alias to be stored as a string in the database. +func getShardPrimaryAliasString(shard *topo.ShardInfo) string { + if shard.PrimaryAlias == nil { + return "" + } + return topoproto.TabletAliasString(shard.PrimaryAlias) +} + +// getShardPrimaryAliasString gets the shard primary term start time to be stored as a string in the database. +func getShardPrimaryTermStartTimeString(shard *topo.ShardInfo) string { + if shard.PrimaryTermStartTime == nil { + return "" + } + return protoutil.TimeFromProto(shard.PrimaryTermStartTime).UTC().String() +} diff --git a/go/vt/vtorc/inst/shard_dao_test.go b/go/vt/vtorc/inst/shard_dao_test.go new file mode 100644 index 00000000000..3357bd2ee36 --- /dev/null +++ b/go/vt/vtorc/inst/shard_dao_test.go @@ -0,0 +1,107 @@ +/* +Copyright 2022 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package inst + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + _ "modernc.org/sqlite" + + "vitess.io/vitess/go/protoutil" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vtorc/db" +) + +func TestSaveAndReadShard(t *testing.T) { + // Clear the database after the test. The easiest way to do that is to run all the initialization commands again. + defer func() { + db.ClearVTOrcDatabase() + }() + timeToUse := time.Date(2023, 7, 24, 5, 0, 5, 1000, time.UTC) + tests := []struct { + name string + keyspaceName string + shardName string + shard *topodatapb.Shard + primaryAliasWanted string + primaryTimestampWanted string + err string + }{ + { + name: "Success", + keyspaceName: "ks1", + shardName: "80-", + shard: &topodatapb.Shard{ + PrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 301, + }, + PrimaryTermStartTime: protoutil.TimeToProto(timeToUse.Add(1 * time.Hour)), + }, + primaryTimestampWanted: "2023-07-24 06:00:05.000001 +0000 UTC", + primaryAliasWanted: "zone1-0000000301", + }, { + name: "Success with empty primary alias", + keyspaceName: "ks1", + shardName: "-", + shard: &topodatapb.Shard{ + PrimaryTermStartTime: protoutil.TimeToProto(timeToUse), + }, + primaryTimestampWanted: "2023-07-24 05:00:05.000001 +0000 UTC", + primaryAliasWanted: "", + }, { + name: "Success with empty primary term start time", + keyspaceName: "ks1", + shardName: "80-", + shard: &topodatapb.Shard{ + PrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 301, + }, + }, + primaryTimestampWanted: "", + primaryAliasWanted: "zone1-0000000301", + }, + { + name: "No shard found", + keyspaceName: "ks1", + shardName: "-80", + err: ErrShardNotFound.Error(), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.shard != nil { + shardInfo := topo.NewShardInfo(tt.keyspaceName, tt.shardName, tt.shard, nil) + err := SaveShard(shardInfo) + require.NoError(t, err) + } + + shardPrimaryAlias, primaryTimestamp, err := ReadShardPrimaryInformation(tt.keyspaceName, tt.shardName) + if tt.err != "" { + require.EqualError(t, err, tt.err) + return + } + require.NoError(t, err) + require.EqualValues(t, tt.primaryAliasWanted, shardPrimaryAlias) + require.EqualValues(t, tt.primaryTimestampWanted, primaryTimestamp) + }) + } +} diff --git a/go/vt/vtorc/inst/tablet_dao.go b/go/vt/vtorc/inst/tablet_dao.go index 9ef8c1fde80..3ee49a75781 100644 --- a/go/vt/vtorc/inst/tablet_dao.go +++ b/go/vt/vtorc/inst/tablet_dao.go @@ -20,14 +20,11 @@ import ( "context" "errors" - "vitess.io/vitess/go/vt/external/golib/sqlutils" - "vitess.io/vitess/go/vt/log" - "google.golang.org/protobuf/encoding/prototext" - "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/protoutil" + "vitess.io/vitess/go/vt/external/golib/sqlutils" - "vitess.io/vitess/go/vt/logutil" replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo" @@ -36,94 +33,12 @@ import ( "vitess.io/vitess/go/vt/vttablet/tmclient" ) -// TopoServ is the connection to the topo server. -var TopoServ *topo.Server - // ErrTabletAliasNil is a fixed error message. var ErrTabletAliasNil = errors.New("tablet alias is nil") -// SwitchPrimary makes the new tablet the primary and proactively performs -// the necessary propagation to the old primary. The propagation is best -// effort. If it fails, the tablet's shard sync will eventually converge. -// The proactive propagation allows a competing VTOrc from discovering -// the successful action of a previous one, which reduces churn. -func SwitchPrimary(newPrimaryKey, oldPrimaryKey InstanceKey) error { - durability, err := GetDurabilityPolicy(newPrimaryKey) - if err != nil { - return err - } - newPrimaryTablet, err := ChangeTabletType(newPrimaryKey, topodatapb.TabletType_PRIMARY, SemiSyncAckers(durability, newPrimaryKey) > 0) - if err != nil { - return err - } - // The following operations are best effort. - if newPrimaryTablet.Type != topodatapb.TabletType_PRIMARY { - log.Errorf("Unexpected: tablet type did not change to primary: %v", newPrimaryTablet.Type) - return nil - } - ctx, cancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout) - defer cancel() - _, err = TopoServ.UpdateShardFields(ctx, newPrimaryTablet.Keyspace, newPrimaryTablet.Shard, func(si *topo.ShardInfo) error { - if proto.Equal(si.PrimaryAlias, newPrimaryTablet.Alias) && proto.Equal(si.PrimaryTermStartTime, newPrimaryTablet.PrimaryTermStartTime) { - return topo.NewError(topo.NoUpdateNeeded, "") - } - - // We just successfully reparented. We should check timestamps, but always overwrite. - lastTerm := si.GetPrimaryTermStartTime() - newTerm := logutil.ProtoToTime(newPrimaryTablet.PrimaryTermStartTime) - if !newTerm.After(lastTerm) { - log.Errorf("Possible clock skew. New primary start time is before previous one: %v vs %v", newTerm, lastTerm) - } - - aliasStr := topoproto.TabletAliasString(newPrimaryTablet.Alias) - log.Infof("Updating shard record: primary_alias=%v, primary_term_start_time=%v", aliasStr, newTerm) - si.PrimaryAlias = newPrimaryTablet.Alias - si.PrimaryTermStartTime = newPrimaryTablet.PrimaryTermStartTime - return nil - }) - // Don't proceed if shard record could not be updated. - if err != nil { - log.Error(err) - return nil - } - if _, err := ChangeTabletType(oldPrimaryKey, topodatapb.TabletType_REPLICA, IsReplicaSemiSync(durability, newPrimaryKey, oldPrimaryKey)); err != nil { - // This is best effort. - log.Error(err) - } - return nil -} - -// ChangeTabletType designates the tablet that owns an instance as the primary. -func ChangeTabletType(instanceKey InstanceKey, tabletType topodatapb.TabletType, semiSync bool) (*topodatapb.Tablet, error) { - if instanceKey.Hostname == "" { - return nil, errors.New("can't set tablet to primary: instance is unspecified") - } - tablet, err := ReadTablet(instanceKey) - if err != nil { - return nil, err - } - tmc := tmclient.NewTabletManagerClient() - tmcCtx, tmcCancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout) - defer tmcCancel() - if err := tmc.ChangeType(tmcCtx, tablet, tabletType, semiSync); err != nil { - return nil, err - } - tsCtx, tsCancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout) - defer tsCancel() - ti, err := TopoServ.GetTablet(tsCtx, tablet.Alias) - if err != nil { - log.Error(err) - return nil, err - } - if err := SaveTablet(ti.Tablet); err != nil { - log.Error(err) - } - return ti.Tablet, nil -} - // ResetReplicationParameters resets the replication parameters on the given tablet. -func ResetReplicationParameters(instanceKey InstanceKey) error { - tablet, err := ReadTablet(instanceKey) +func ResetReplicationParameters(tabletAlias string) error { + tablet, err := ReadTablet(tabletAlias) if err != nil { return err } @@ -137,8 +52,8 @@ func ResetReplicationParameters(instanceKey InstanceKey) error { } // FullStatus gets the full status of the MySQL running in vttablet. -func FullStatus(instanceKey InstanceKey) (*replicationdatapb.FullStatus, error) { - tablet, err := ReadTablet(instanceKey) +func FullStatus(tabletAlias string) (*replicationdatapb.FullStatus, error) { + tablet, err := ReadTablet(tabletAlias) if err != nil { return nil, err } @@ -149,15 +64,15 @@ func FullStatus(instanceKey InstanceKey) (*replicationdatapb.FullStatus, error) } // ReadTablet reads the vitess tablet record. -func ReadTablet(instanceKey InstanceKey) (*topodatapb.Tablet, error) { +func ReadTablet(tabletAlias string) (*topodatapb.Tablet, error) { query := ` select info from vitess_tablet - where hostname=? and port=? + where alias = ? ` - args := sqlutils.Args(instanceKey.Hostname, instanceKey.Port) + args := sqlutils.Args(tabletAlias) tablet := &topodatapb.Tablet{} opts := prototext.UnmarshalOptions{DiscardUnknown: true} err := db.QueryVTOrc(query, args, func(row sqlutils.RowMap) error { @@ -193,7 +108,7 @@ func SaveTablet(tablet *topodatapb.Tablet) error { tablet.Keyspace, tablet.Shard, int(tablet.Type), - logutil.ProtoToTime(tablet.PrimaryTermStartTime), + protoutil.TimeFromProto(tablet.PrimaryTermStartTime).UTC(), tabletp, ) return err diff --git a/go/vt/vtorc/inst/tablet_dao_test.go b/go/vt/vtorc/inst/tablet_dao_test.go new file mode 100644 index 00000000000..a876d857ace --- /dev/null +++ b/go/vt/vtorc/inst/tablet_dao_test.go @@ -0,0 +1,93 @@ +package inst + +import ( + "testing" + + "github.com/stretchr/testify/require" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/proto/vttime" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vtorc/db" +) + +func TestSaveAndReadTablet(t *testing.T) { + // Clear the database after the test. The easiest way to do that is to run all the initialization commands again. + defer func() { + db.ClearVTOrcDatabase() + }() + + tests := []struct { + name string + tabletAlias string + tablet *topodatapb.Tablet + tabletWanted *topodatapb.Tablet + err string + }{ + { + name: "Success with primary type", + tabletAlias: "zone1-0000000100", + tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Hostname: "localhost", + Keyspace: "ks", + Shard: "0", + Type: topodatapb.TabletType_PRIMARY, + MysqlHostname: "localhost", + MysqlPort: 1030, + PrimaryTermStartTime: &vttime.Time{ + Seconds: 1000, + Nanoseconds: 387, + }, + }, + tabletWanted: nil, + }, { + name: "Success with replica type", + tabletAlias: "zone1-0000000100", + tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Hostname: "localhost", + Keyspace: "ks", + Shard: "0", + Type: topodatapb.TabletType_REPLICA, + MysqlHostname: "localhost", + MysqlPort: 1030, + }, + tabletWanted: nil, + }, { + name: "No tablet found", + tabletAlias: "zone1-190734", + tablet: nil, + tabletWanted: nil, + err: ErrTabletAliasNil.Error(), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.tabletWanted == nil { + tt.tabletWanted = tt.tablet + } + + if tt.tablet != nil { + err := SaveTablet(tt.tablet) + require.NoError(t, err) + } + + readTable, err := ReadTablet(tt.tabletAlias) + if tt.err != "" { + require.EqualError(t, err, tt.err) + return + } + require.NoError(t, err) + require.True(t, topotools.TabletEquality(tt.tabletWanted, readTable)) + require.Equal(t, tt.tabletAlias, topoproto.TabletAliasString(readTable.Alias)) + }) + } +} diff --git a/go/vt/vtorc/inst/tag.go b/go/vt/vtorc/inst/tag.go deleted file mode 100644 index 3b9705b7dff..00000000000 --- a/go/vt/vtorc/inst/tag.go +++ /dev/null @@ -1,121 +0,0 @@ -/* - Copyright 2015 Shlomi Noach, courtesy Booking.com - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -import ( - "fmt" - "regexp" - "strings" -) - -type Tag struct { - TagName string - TagValue string - HasValue bool - Negate bool -} - -var ( - negateTagEqualsRegexp = regexp.MustCompile("^~([^=]+)=(.*)$") - TagEqualsRegexp = regexp.MustCompile("^([^=]+)=(.*)$") - negateTagExistsRegexp = regexp.MustCompile("^~([^=]+)$") - tagExistsRegexp = regexp.MustCompile("^([^=]+)$") -) - -func NewTag(tagName string, tagValue string) (*Tag, error) { - tagName = strings.TrimSpace(tagName) - if tagName == "" { - return nil, fmt.Errorf("NewTag: empty tag name") - } - return &Tag{TagName: tagName, TagValue: tagValue}, nil -} - -func ParseTag(tagString string) (*Tag, error) { - tagString = strings.Replace(tagString, "!", "~", -1) - tagString = strings.TrimSpace(tagString) - - if submatch := negateTagEqualsRegexp.FindStringSubmatch(tagString); len(submatch) > 0 { - return &Tag{ - TagName: submatch[1], - TagValue: submatch[2], - HasValue: true, - Negate: true, - }, nil - } else if submatch := TagEqualsRegexp.FindStringSubmatch(tagString); len(submatch) > 0 { - return &Tag{ - TagName: submatch[1], - TagValue: submatch[2], - HasValue: true, - }, nil - } else if submatch := negateTagExistsRegexp.FindStringSubmatch(tagString); len(submatch) > 0 { - return &Tag{ - TagName: submatch[1], - Negate: true, - }, nil - } else if submatch := tagExistsRegexp.FindStringSubmatch(tagString); len(submatch) > 0 { - return &Tag{ - TagName: submatch[1], - }, nil - } - return nil, fmt.Errorf("Unable to parse tag: %s", tagString) -} - -func (tag *Tag) String() string { - return fmt.Sprintf("%s=%s", tag.TagName, tag.TagValue) -} - -func (tag *Tag) Display() string { - if tag.TagValue == "" { - return tag.TagName - } - return fmt.Sprintf("%s=%s", tag.TagName, tag.TagValue) -} - -func ParseIntersectTags(tagsString string) (tags [](*Tag), err error) { - for _, tagString := range strings.Split(tagsString, ",") { - tag, err := ParseTag(tagString) - if err != nil { - return tags, err - } - tags = append(tags, tag) - } - return tags, nil -} - -type InstanceTag struct { - Key InstanceKey - T Tag -} - -func GetInstanceKeysByTags(tagsString string) (tagged *InstanceKeyMap, err error) { - tags, err := ParseIntersectTags(tagsString) - if err != nil { - return tagged, err - } - for i, tag := range tags { - taggedByTag, err := GetInstanceKeysByTag(tag) - if err != nil { - return tagged, err - } - if i == 0 { - tagged = taggedByTag - } else { - tagged = tagged.Intersect(taggedByTag) - } - } - return tagged, nil -} diff --git a/go/vt/vtorc/inst/tag_dao.go b/go/vt/vtorc/inst/tag_dao.go deleted file mode 100644 index 5b5962a9326..00000000000 --- a/go/vt/vtorc/inst/tag_dao.go +++ /dev/null @@ -1,206 +0,0 @@ -/* - Copyright 2015 Shlomi Noach, courtesy Booking.com - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -import ( - "fmt" - - "vitess.io/vitess/go/vt/external/golib/sqlutils" - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/vtorc/db" -) - -func PutInstanceTag(instanceKey *InstanceKey, tag *Tag) (err error) { - _, err = db.ExecVTOrc(` - insert - into database_instance_tags ( - hostname, port, tag_name, tag_value, last_updated - ) VALUES ( - ?, ?, ?, ?, NOW() - ) - on duplicate key update - tag_value=values(tag_value), - last_updated=values(last_updated) - `, - instanceKey.Hostname, - instanceKey.Port, - tag.TagName, - tag.TagValue, - ) - return err -} - -func Untag(instanceKey *InstanceKey, tag *Tag) (tagged *InstanceKeyMap, err error) { - if tag == nil { - errMsg := "untag: tag is nil" - log.Errorf(errMsg) - return nil, fmt.Errorf(errMsg) - } - if tag.Negate { - errMsg := "untag: does not support negation" - log.Errorf(errMsg) - return nil, fmt.Errorf(errMsg) - } - if instanceKey == nil && !tag.HasValue { - errMsg := "untag: either indicate an instance or a tag value. Will not delete on-valued tag across instances" - log.Errorf(errMsg) - return nil, fmt.Errorf(errMsg) - } - var clause string - args := sqlutils.Args() - if tag.HasValue { - clause = `tag_name=? and tag_value=?` - args = append(args, tag.TagName, tag.TagValue) - } else { - clause = `tag_name=?` - args = append(args, tag.TagName) - } - if instanceKey != nil { - clause = fmt.Sprintf("%s and hostname=? and port=?", clause) - args = append(args, instanceKey.Hostname, instanceKey.Port) - } - tagged = NewInstanceKeyMap() - query := fmt.Sprintf(` - select - hostname, - port - from - database_instance_tags - where - %s - order by hostname, port - `, clause, - ) - _ = db.QueryVTOrc(query, args, func(m sqlutils.RowMap) error { - key, _ := NewResolveInstanceKey(m.GetString("hostname"), m.GetInt("port")) - tagged.AddKey(*key) - return nil - }) - - query = fmt.Sprintf(` - delete from - database_instance_tags - where - %s - `, clause, - ) - if _, err = db.ExecVTOrc(query, args...); err != nil { - log.Error(err) - return tagged, err - } - _ = AuditOperation("delete-instance-tag", instanceKey, tag.String()) - return tagged, nil -} - -func ReadInstanceTag(instanceKey *InstanceKey, tag *Tag) (tagExists bool, err error) { - query := ` - select - tag_value - from - database_instance_tags - where - hostname = ? - and port = ? - and tag_name = ? - ` - args := sqlutils.Args(instanceKey.Hostname, instanceKey.Port, tag.TagName) - err = db.QueryVTOrc(query, args, func(m sqlutils.RowMap) error { - tag.TagValue = m.GetString("tag_value") - tagExists = true - return nil - }) - - if err != nil { - log.Error(err) - } - return tagExists, err -} - -func ReadInstanceTags(instanceKey *InstanceKey) (tags [](*Tag), err error) { - tags = [](*Tag){} - query := ` - select - tag_name, tag_value - from - database_instance_tags - where - hostname = ? - and port = ? - order by tag_name - ` - args := sqlutils.Args(instanceKey.Hostname, instanceKey.Port) - err = db.QueryVTOrc(query, args, func(m sqlutils.RowMap) error { - tag := &Tag{ - TagName: m.GetString("tag_name"), - TagValue: m.GetString("tag_value"), - } - tags = append(tags, tag) - return nil - }) - - if err != nil { - log.Error(err) - } - return tags, err -} - -func GetInstanceKeysByTag(tag *Tag) (tagged *InstanceKeyMap, err error) { - if tag == nil { - errMsg := "GetInstanceKeysByTag: tag is nil" - log.Errorf(errMsg) - return nil, fmt.Errorf(errMsg) - } - clause := `` - args := sqlutils.Args() - if tag.HasValue && !tag.Negate { - // exists and equals - clause = `tag_name=? and tag_value=?` - args = append(args, tag.TagName, tag.TagValue) - } else if !tag.HasValue && !tag.Negate { - // exists - clause = `tag_name=?` - args = append(args, tag.TagName) - } else if tag.HasValue && tag.Negate { - // exists and not equal - clause = `tag_name=? and tag_value!=?` - args = append(args, tag.TagName, tag.TagValue) - } else if !tag.HasValue && tag.Negate { - // does not exist - clause = `1=1 group by hostname, port having sum(tag_name=?)=0` - args = append(args, tag.TagName) - } - tagged = NewInstanceKeyMap() - query := fmt.Sprintf(` - select - hostname, - port - from - database_instance_tags - where - %s - order by hostname, port - `, clause) - err = db.QueryVTOrc(query, args, func(m sqlutils.RowMap) error { - key, _ := NewResolveInstanceKey(m.GetString("hostname"), m.GetInt("port")) - tagged.AddKey(*key) - return nil - }) - if err != nil { - log.Error(err) - } - return tagged, err -} diff --git a/go/vt/vtorc/inst/tag_test.go b/go/vt/vtorc/inst/tag_test.go deleted file mode 100644 index 0ce182b7fb2..00000000000 --- a/go/vt/vtorc/inst/tag_test.go +++ /dev/null @@ -1,141 +0,0 @@ -package inst - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -//nolint:staticcheck -func TestParseTag(t *testing.T) { - { - tag, err := ParseTag("") - require.True(t, tag == nil) - require.Error(t, err) - } - { - tag, err := ParseTag("=") - require.True(t, tag == nil) - require.Error(t, err) - } - { - tag, err := ParseTag("=backup") - require.True(t, tag == nil) - require.Error(t, err) - } - { - tag, err := ParseTag(" =backup") - require.True(t, tag == nil) - require.Error(t, err) - } - { - tag, err := ParseTag("role") - require.NoError(t, err) - require.True(t, tag != nil) - require.Equal(t, tag.TagName, "role") - require.Equal(t, tag.TagValue, "") - require.False(t, tag.Negate) - require.False(t, tag.HasValue) - - require.Equal(t, tag.String(), "role=") - } - { - tag, err := ParseTag("role=") - require.NoError(t, err) - require.True(t, tag != nil) - require.Equal(t, tag.TagName, "role") - require.Equal(t, tag.TagValue, "") - require.False(t, tag.Negate) - require.True(t, tag.HasValue) - - require.Equal(t, tag.String(), "role=") - - } - { - tag, err := ParseTag("role=backup") - require.NoError(t, err) - require.True(t, tag != nil) - require.Equal(t, tag.TagName, "role") - require.Equal(t, tag.TagValue, "backup") - require.False(t, tag.Negate) - require.True(t, tag.HasValue) - - require.Equal(t, tag.String(), "role=backup") - } - { - tag, err := ParseTag("!role") - require.NoError(t, err) - require.True(t, tag != nil) - require.Equal(t, tag.TagName, "role") - require.True(t, tag.Negate) - require.False(t, tag.HasValue) - } - { - tag, err := ParseTag("~role=backup") - require.NoError(t, err) - require.True(t, tag != nil) - require.Equal(t, tag.TagName, "role") - require.Equal(t, tag.TagValue, "backup") - require.True(t, tag.Negate) - require.True(t, tag.HasValue) - } -} - -func TestParseIntersectTags(t *testing.T) { - { - _, err := ParseIntersectTags("") - require.Error(t, err) - } - { - _, err := ParseIntersectTags(",") - require.Error(t, err) - } - { - _, err := ParseIntersectTags(",,,") - require.Error(t, err) - } - { - _, err := ParseIntersectTags("role,") - require.Error(t, err) - } - { - tags, err := ParseIntersectTags("role") - require.NoError(t, err) - require.Equal(t, len(tags), 1) - - require.Equal(t, tags[0].TagName, "role") - require.Equal(t, tags[0].TagValue, "") - require.False(t, tags[0].Negate) - require.False(t, tags[0].HasValue) - } - { - tags, err := ParseIntersectTags("role,dc") - require.NoError(t, err) - require.Equal(t, len(tags), 2) - - require.Equal(t, tags[0].TagName, "role") - require.Equal(t, tags[0].TagValue, "") - require.False(t, tags[0].Negate) - require.False(t, tags[0].HasValue) - - require.Equal(t, tags[1].TagName, "dc") - require.Equal(t, tags[1].TagValue, "") - require.False(t, tags[1].Negate) - require.False(t, tags[1].HasValue) - } - { - tags, err := ParseIntersectTags("role=backup, !dc=ny") - require.NoError(t, err) - require.Equal(t, len(tags), 2) - - require.Equal(t, tags[0].TagName, "role") - require.Equal(t, tags[0].TagValue, "backup") - require.False(t, tags[0].Negate) - require.True(t, tags[0].HasValue) - - require.Equal(t, tags[1].TagName, "dc") - require.Equal(t, tags[1].TagValue, "ny") - require.True(t, tags[1].Negate) - require.True(t, tags[1].HasValue) - } -} diff --git a/go/vt/vtorc/logic/keyspace_discovery.go b/go/vt/vtorc/logic/keyspace_shard_discovery.go similarity index 54% rename from go/vt/vtorc/logic/keyspace_discovery.go rename to go/vt/vtorc/logic/keyspace_shard_discovery.go index 4065c3c0857..c79ace5bdc3 100644 --- a/go/vt/vtorc/logic/keyspace_discovery.go +++ b/go/vt/vtorc/logic/keyspace_shard_discovery.go @@ -28,8 +28,8 @@ import ( "vitess.io/vitess/go/vt/vtorc/inst" ) -// RefreshAllKeyspaces reloads the keyspace information for the keyspaces that vtorc is concerned with. -func RefreshAllKeyspaces() { +// RefreshAllKeyspacesAndShards reloads the keyspace and shard information for the keyspaces that vtorc is concerned with. +func RefreshAllKeyspacesAndShards() { var keyspaces []string if len(clustersToWatch) == 0 { // all known keyspaces ctx, cancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout) @@ -72,24 +72,44 @@ func RefreshAllKeyspaces() { if idx != 0 && keyspace == keyspaces[idx-1] { continue } - wg.Add(1) + wg.Add(2) go func(keyspace string) { defer wg.Done() - _ = refreshKeyspace(refreshCtx, keyspace) + _ = refreshKeyspaceHelper(refreshCtx, keyspace) + }(keyspace) + go func(keyspace string) { + defer wg.Done() + _ = refreshAllShards(refreshCtx, keyspace) }(keyspace) } wg.Wait() } -// RefreshKeyspace refreshes the keyspace's information for the given keyspace from the topo -func RefreshKeyspace(keyspaceName string) error { +// RefreshKeyspaceAndShard refreshes the keyspace record and shard record for the given keyspace and shard. +func RefreshKeyspaceAndShard(keyspaceName string, shardName string) error { + err := refreshKeyspace(keyspaceName) + if err != nil { + return err + } + return refreshShard(keyspaceName, shardName) +} + +// refreshKeyspace refreshes the keyspace's information for the given keyspace from the topo +func refreshKeyspace(keyspaceName string) error { refreshCtx, refreshCancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout) defer refreshCancel() - return refreshKeyspace(refreshCtx, keyspaceName) + return refreshKeyspaceHelper(refreshCtx, keyspaceName) } -// refreshKeyspace is a helper function which reloads the given keyspace's information -func refreshKeyspace(ctx context.Context, keyspaceName string) error { +// refreshShard refreshes the shard's information for the given keyspace/shard from the topo +func refreshShard(keyspaceName, shardName string) error { + refreshCtx, refreshCancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout) + defer refreshCancel() + return refreshSingleShardHelper(refreshCtx, keyspaceName, shardName) +} + +// refreshKeyspaceHelper is a helper function which reloads the given keyspace's information +func refreshKeyspaceHelper(ctx context.Context, keyspaceName string) error { keyspaceInfo, err := ts.GetKeyspace(ctx, keyspaceName) if err != nil { log.Error(err) @@ -101,3 +121,34 @@ func refreshKeyspace(ctx context.Context, keyspaceName string) error { } return err } + +// refreshAllShards refreshes all the shard records in the given keyspace. +func refreshAllShards(ctx context.Context, keyspaceName string) error { + shardInfos, err := ts.FindAllShardsInKeyspace(ctx, keyspaceName) + if err != nil { + log.Error(err) + return err + } + for _, shardInfo := range shardInfos { + err = inst.SaveShard(shardInfo) + if err != nil { + log.Error(err) + return err + } + } + return nil +} + +// refreshSingleShardHelper is a helper function that refreshes the shard record of the given keyspace/shard. +func refreshSingleShardHelper(ctx context.Context, keyspaceName string, shardName string) error { + shardInfo, err := ts.GetShard(ctx, keyspaceName, shardName) + if err != nil { + log.Error(err) + return err + } + err = inst.SaveShard(shardInfo) + if err != nil { + log.Error(err) + } + return err +} diff --git a/go/vt/vtorc/logic/keyspace_discovery_test.go b/go/vt/vtorc/logic/keyspace_shard_discovery_test.go similarity index 58% rename from go/vt/vtorc/logic/keyspace_discovery_test.go rename to go/vt/vtorc/logic/keyspace_shard_discovery_test.go index e5be1fd82f2..2911b3d29c2 100644 --- a/go/vt/vtorc/logic/keyspace_discovery_test.go +++ b/go/vt/vtorc/logic/keyspace_shard_discovery_test.go @@ -18,6 +18,7 @@ package logic import ( "context" + "fmt" "testing" "github.com/stretchr/testify/assert" @@ -60,47 +61,65 @@ func TestRefreshAllKeyspaces(t *testing.T) { clustersToWatch = oldClustersToWatch }() - // Open the vtorc - // After the test completes delete everything from the vitess_keyspace table - orcDb, err := db.OpenVTOrc() - require.NoError(t, err) + db.ClearVTOrcDatabase() defer func() { - _, err = orcDb.Exec("delete from vitess_keyspace") - require.NoError(t, err) + db.ClearVTOrcDatabase() }() - ts = memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts = memorytopo.NewServer(ctx, "zone1") keyspaceNames := []string{"ks1", "ks2", "ks3", "ks4"} keyspaces := []*topodatapb.Keyspace{keyspaceDurabilityNone, keyspaceDurabilitySemiSync, keyspaceSnapshot, keyspaceDurabilityTest} // Create 4 keyspaces for i, keyspace := range keyspaces { - err := ts.CreateKeyspace(context.Background(), keyspaceNames[i], keyspace) + err := ts.CreateKeyspace(ctx, keyspaceNames[i], keyspace) require.NoError(t, err) + for idx, shardName := range []string{"-80", "80-"} { + err = ts.CreateShard(ctx, keyspaceNames[i], shardName) + require.NoError(t, err) + _, err = ts.UpdateShardFields(ctx, keyspaceNames[i], shardName, func(si *topo.ShardInfo) error { + si.PrimaryAlias = &topodatapb.TabletAlias{ + Cell: fmt.Sprintf("zone_%v", keyspaceNames[i]), + Uid: uint32(100 + idx), + } + return nil + }) + require.NoError(t, err) + } } // Set clusters to watch to only watch ks1 and ks3 - onlyKs1and3 := []string{"ks1/-", "ks3/-80", "ks3/80-"} + onlyKs1and3 := []string{"ks1/-80", "ks3/-80", "ks3/80-"} clustersToWatch = onlyKs1and3 - RefreshAllKeyspaces() + RefreshAllKeyspacesAndShards() // Verify that we only have ks1 and ks3 in vtorc's db. verifyKeyspaceInfo(t, "ks1", keyspaceDurabilityNone, "") + verifyPrimaryAlias(t, "ks1", "-80", "zone_ks1-0000000100", "") verifyKeyspaceInfo(t, "ks2", nil, "keyspace not found") + verifyPrimaryAlias(t, "ks2", "80-", "", "shard not found") verifyKeyspaceInfo(t, "ks3", keyspaceSnapshot, "") + verifyPrimaryAlias(t, "ks3", "80-", "zone_ks3-0000000101", "") verifyKeyspaceInfo(t, "ks4", nil, "keyspace not found") // Set clusters to watch to watch all keyspaces clustersToWatch = nil // Change the durability policy of ks1 - reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "ks1", "semi_sync") - RefreshAllKeyspaces() + reparenttestutil.SetKeyspaceDurability(ctx, t, ts, "ks1", "semi_sync") + RefreshAllKeyspacesAndShards() // Verify that all the keyspaces are correctly reloaded verifyKeyspaceInfo(t, "ks1", keyspaceDurabilitySemiSync, "") + verifyPrimaryAlias(t, "ks1", "-80", "zone_ks1-0000000100", "") verifyKeyspaceInfo(t, "ks2", keyspaceDurabilitySemiSync, "") + verifyPrimaryAlias(t, "ks2", "80-", "zone_ks2-0000000101", "") verifyKeyspaceInfo(t, "ks3", keyspaceSnapshot, "") + verifyPrimaryAlias(t, "ks3", "80-", "zone_ks3-0000000101", "") verifyKeyspaceInfo(t, "ks4", keyspaceDurabilityTest, "") + verifyPrimaryAlias(t, "ks4", "80-", "zone_ks4-0000000101", "") + } func TestRefreshKeyspace(t *testing.T) { @@ -110,27 +129,20 @@ func TestRefreshKeyspace(t *testing.T) { ts = oldTs }() - // Open the vtorc - // After the test completes delete everything from the vitess_keyspace table - orcDb, err := db.OpenVTOrc() - require.NoError(t, err) defer func() { - _, err = orcDb.Exec("delete from vitess_keyspace") - require.NoError(t, err) + db.ClearVTOrcDatabase() }() tests := []struct { name string keyspaceName string keyspace *topodatapb.Keyspace - ts *topo.Server keyspaceWanted *topodatapb.Keyspace err string }{ { name: "Success with keyspaceType and durability", keyspaceName: "ks1", - ts: memorytopo.NewServer("zone1"), keyspace: &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, DurabilityPolicy: "semi_sync", @@ -140,7 +152,6 @@ func TestRefreshKeyspace(t *testing.T) { }, { name: "Success with keyspaceType and no durability", keyspaceName: "ks2", - ts: memorytopo.NewServer("zone1"), keyspace: &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, }, @@ -149,7 +160,6 @@ func TestRefreshKeyspace(t *testing.T) { }, { name: "Success with snapshot keyspaceType", keyspaceName: "ks3", - ts: memorytopo.NewServer("zone1"), keyspace: &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_SNAPSHOT, }, @@ -158,7 +168,6 @@ func TestRefreshKeyspace(t *testing.T) { }, { name: "Success with fields that are not stored", keyspaceName: "ks4", - ts: memorytopo.NewServer("zone1"), keyspace: &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, DurabilityPolicy: "none", @@ -172,7 +181,6 @@ func TestRefreshKeyspace(t *testing.T) { }, { name: "No keyspace found", keyspaceName: "ks5", - ts: memorytopo.NewServer("zone1"), keyspace: nil, keyspaceWanted: nil, err: "node doesn't exist: keyspaces/ks5/Keyspace", @@ -184,13 +192,16 @@ func TestRefreshKeyspace(t *testing.T) { tt.keyspaceWanted = tt.keyspace } - ts = tt.ts + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts = memorytopo.NewServer(ctx, "zone1") if tt.keyspace != nil { err := ts.CreateKeyspace(context.Background(), tt.keyspaceName, tt.keyspace) require.NoError(t, err) } - err := RefreshKeyspace(tt.keyspaceName) + err := refreshKeyspace(tt.keyspaceName) if tt.err != "" { require.EqualError(t, err, tt.err) } else { @@ -209,7 +220,91 @@ func verifyKeyspaceInfo(t *testing.T, keyspaceName string, keyspace *topodatapb. if errString != "" { assert.EqualError(t, err, errString) } else { + assert.NoError(t, err) assert.Equal(t, keyspaceName, ksInfo.KeyspaceName()) assert.True(t, topotools.KeyspaceEquality(keyspace, ksInfo.Keyspace)) } } + +func TestRefreshShard(t *testing.T) { + // Store the old flags and restore on test completion + oldTs := ts + defer func() { + ts = oldTs + }() + + defer func() { + db.ClearVTOrcDatabase() + }() + + tests := []struct { + name string + keyspaceName string + shardName string + shard *topodatapb.Shard + primaryAliasWanted string + err string + }{ + { + name: "Success with primaryAlias", + keyspaceName: "ks1", + shardName: "0", + shard: &topodatapb.Shard{ + PrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 302, + }, + }, + primaryAliasWanted: "zone1-0000000302", + err: "", + }, { + name: "Success with empty primaryAlias", + keyspaceName: "ks1", + shardName: "-80", + shard: &topodatapb.Shard{}, + primaryAliasWanted: "", + err: "", + }, { + name: "No shard found", + keyspaceName: "ks2", + shardName: "-", + err: "node doesn't exist: keyspaces/ks2/shards/-/Shard", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts = memorytopo.NewServer(ctx, "zone1") + if tt.shard != nil { + _, err := ts.GetOrCreateShard(context.Background(), tt.keyspaceName, tt.shardName) + require.NoError(t, err) + _, err = ts.UpdateShardFields(context.Background(), tt.keyspaceName, tt.shardName, func(info *topo.ShardInfo) error { + info.PrimaryAlias = tt.shard.PrimaryAlias + return nil + }) + require.NoError(t, err) + } + + err := refreshShard(tt.keyspaceName, tt.shardName) + if tt.err != "" { + require.EqualError(t, err, tt.err) + } else { + require.NoError(t, err) + verifyPrimaryAlias(t, tt.keyspaceName, tt.shardName, tt.primaryAliasWanted, "") + } + }) + } +} + +// verifyPrimaryAlias verifies the correct primary alias is stored in the database for the given keyspace shard. +func verifyPrimaryAlias(t *testing.T, keyspaceName, shardName string, primaryAliasWanted string, errString string) { + primaryAlias, _, err := inst.ReadShardPrimaryInformation(keyspaceName, shardName) + if errString != "" { + require.ErrorContains(t, err, errString) + return + } + require.NoError(t, err) + require.Equal(t, primaryAliasWanted, primaryAlias) +} diff --git a/go/vt/vtorc/logic/tablet_discovery.go b/go/vt/vtorc/logic/tablet_discovery.go index c40131a39cb..dd2e65237bf 100644 --- a/go/vt/vtorc/logic/tablet_discovery.go +++ b/go/vt/vtorc/logic/tablet_discovery.go @@ -19,6 +19,8 @@ package logic import ( "context" "errors" + "fmt" + "slices" "strings" "sync" "sync/atomic" @@ -26,8 +28,6 @@ import ( "github.com/spf13/pflag" - "k8s.io/utils/strings/slices" - "google.golang.org/protobuf/encoding/prototext" "google.golang.org/protobuf/proto" @@ -36,6 +36,7 @@ import ( "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vtctl/reparentutil" "vitess.io/vitess/go/vt/vtorc/config" "vitess.io/vitess/go/vt/vtorc/db" "vitess.io/vitess/go/vt/vtorc/inst" @@ -65,8 +66,6 @@ func RegisterFlags(fs *pflag.FlagSet) { func OpenTabletDiscovery() <-chan time.Time { // TODO(sougou): If there's a shutdown signal, we have to close the topo. ts = topo.Open() - // TODO(sougou): remove ts and push some functions into inst. - inst.TopoServ = ts tmc = tmclient.NewTabletManagerClient() // Clear existing cache and perform a new refresh. if _, err := db.ExecVTOrc("delete from vitess_tablet"); err != nil { @@ -77,12 +76,12 @@ func OpenTabletDiscovery() <-chan time.Time { // refreshAllTablets reloads the tablets from topo and discovers the ones which haven't been refreshed in a while func refreshAllTablets() { - refreshTabletsUsing(func(instanceKey *inst.InstanceKey) { - DiscoverInstance(*instanceKey, false /* forceDiscovery */) + refreshTabletsUsing(func(tabletAlias string) { + DiscoverInstance(tabletAlias, false /* forceDiscovery */) }, false /* forceRefresh */) } -func refreshTabletsUsing(loader func(instanceKey *inst.InstanceKey), forceRefresh bool) { +func refreshTabletsUsing(loader func(tabletAlias string), forceRefresh bool) { if !IsLeaderOrActive() { return } @@ -151,13 +150,13 @@ func refreshTabletsUsing(loader func(instanceKey *inst.InstanceKey), forceRefres } } -func refreshTabletsInCell(ctx context.Context, cell string, loader func(instanceKey *inst.InstanceKey), forceRefresh bool) { +func refreshTabletsInCell(ctx context.Context, cell string, loader func(tabletAlias string), forceRefresh bool) { tablets, err := topotools.GetTabletMapForCell(ctx, ts, cell) if err != nil { log.Errorf("Error fetching topo info for cell %v: %v", cell, err) return } - query := "select hostname, port, info from vitess_tablet where cell = ?" + query := "select alias from vitess_tablet where cell = ?" args := sqlutils.Args(cell) refreshTablets(tablets, query, args, loader, forceRefresh, nil) } @@ -168,8 +167,8 @@ func refreshTabletsInCell(ctx context.Context, cell string, loader func(instance func forceRefreshAllTabletsInShard(ctx context.Context, keyspace, shard string, tabletsToIgnore []string) { refreshCtx, refreshCancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) defer refreshCancel() - refreshTabletsInKeyspaceShard(refreshCtx, keyspace, shard, func(instanceKey *inst.InstanceKey) { - DiscoverInstance(*instanceKey, true) + refreshTabletsInKeyspaceShard(refreshCtx, keyspace, shard, func(tabletAlias string) { + DiscoverInstance(tabletAlias, true) }, true, tabletsToIgnore) } @@ -177,27 +176,25 @@ func forceRefreshAllTabletsInShard(ctx context.Context, keyspace, shard string, // of the given keyspace-shard. func refreshTabletInfoOfShard(ctx context.Context, keyspace, shard string) { log.Infof("refresh of tablet records of shard - %v/%v", keyspace, shard) - refreshTabletsInKeyspaceShard(ctx, keyspace, shard, func(instanceKey *inst.InstanceKey) { + refreshTabletsInKeyspaceShard(ctx, keyspace, shard, func(tabletAlias string) { // No-op // We only want to refresh the tablet information for the given shard }, false, nil) } -func refreshTabletsInKeyspaceShard(ctx context.Context, keyspace, shard string, loader func(instanceKey *inst.InstanceKey), forceRefresh bool, tabletsToIgnore []string) { +func refreshTabletsInKeyspaceShard(ctx context.Context, keyspace, shard string, loader func(tabletAlias string), forceRefresh bool, tabletsToIgnore []string) { tablets, err := ts.GetTabletMapForShard(ctx, keyspace, shard) if err != nil { log.Errorf("Error fetching tablets for keyspace/shard %v/%v: %v", keyspace, shard, err) return } - query := "select hostname, port, info from vitess_tablet where keyspace = ? and shard = ?" + query := "select alias from vitess_tablet where keyspace = ? and shard = ?" args := sqlutils.Args(keyspace, shard) refreshTablets(tablets, query, args, loader, forceRefresh, tabletsToIgnore) } -func refreshTablets(tablets map[string]*topo.TabletInfo, query string, args []any, loader func(instanceKey *inst.InstanceKey), forceRefresh bool, tabletsToIgnore []string) { +func refreshTablets(tablets map[string]*topo.TabletInfo, query string, args []any, loader func(tabletAlias string), forceRefresh bool, tabletsToIgnore []string) { // Discover new tablets. - // TODO(sougou): enhance this to work with multi-schema, - // where each instanceKey can have multiple tablets. latestInstances := make(map[string]bool) var wg sync.WaitGroup for _, tabletInfo := range tablets { @@ -205,15 +202,9 @@ func refreshTablets(tablets map[string]*topo.TabletInfo, query string, args []an if tablet.Type != topodatapb.TabletType_PRIMARY && !topo.IsReplicaType(tablet.Type) { continue } - latestInstances[topoproto.TabletAliasString(tablet.Alias)] = true - if tablet.MysqlHostname == "" { - continue - } - instanceKey := inst.InstanceKey{ - Hostname: tablet.MysqlHostname, - Port: int(tablet.MysqlPort), - } - old, err := inst.ReadTablet(instanceKey) + tabletAliasString := topoproto.TabletAliasString(tablet.Alias) + latestInstances[tabletAliasString] = true + old, err := inst.ReadTablet(tabletAliasString) if err != nil && err != inst.ErrTabletAliasNil { log.Error(err) continue @@ -231,69 +222,52 @@ func refreshTablets(tablets map[string]*topo.TabletInfo, query string, args []an if slices.Contains(tabletsToIgnore, topoproto.TabletAliasString(tablet.Alias)) { return } - loader(&instanceKey) + loader(tabletAliasString) }() log.Infof("Discovered: %v", tablet) } wg.Wait() // Forget tablets that were removed. - toForget := make(map[inst.InstanceKey]*topodatapb.Tablet) + var toForget []string err := db.QueryVTOrc(query, args, func(row sqlutils.RowMap) error { - curKey := inst.InstanceKey{ - Hostname: row.GetString("hostname"), - Port: row.GetInt("port"), - } - tablet := &topodatapb.Tablet{} - opts := prototext.UnmarshalOptions{DiscardUnknown: true} - if err := opts.Unmarshal([]byte(row.GetString("info")), tablet); err != nil { - log.Error(err) - return nil - } - if !latestInstances[topoproto.TabletAliasString(tablet.Alias)] { - toForget[curKey] = tablet + tabletAlias := row.GetString("alias") + if !latestInstances[tabletAlias] { + toForget = append(toForget, tabletAlias) } return nil }) if err != nil { log.Error(err) } - for instanceKey, tablet := range toForget { - log.Infof("Forgetting: %v", tablet) - _, err := db.ExecVTOrc(` - delete - from vitess_tablet - where - hostname=? and port=?`, - instanceKey.Hostname, - instanceKey.Port, - ) - if err != nil { - log.Error(err) - } - if err := inst.ForgetInstance(&instanceKey); err != nil { + for _, tabletAlias := range toForget { + if err := inst.ForgetInstance(tabletAlias); err != nil { log.Error(err) } } } +func getLockAction(analysedInstance string, code inst.AnalysisCode) string { + return fmt.Sprintf("VTOrc Recovery for %v on %v", code, analysedInstance) +} + // LockShard locks the keyspace-shard preventing others from performing conflicting actions. -func LockShard(ctx context.Context, instanceKey inst.InstanceKey) (context.Context, func(*error), error) { - if instanceKey.Hostname == "" { - return nil, nil, errors.New("Can't lock shard: instance is unspecified") +func LockShard(ctx context.Context, tabletAlias string, lockAction string) (context.Context, func(*error), error) { + if tabletAlias == "" { + return nil, nil, errors.New("can't lock shard: instance is unspecified") } val := atomic.LoadInt32(&hasReceivedSIGTERM) if val > 0 { - return nil, nil, errors.New("Can't lock shard: SIGTERM received") + return nil, nil, errors.New("can't lock shard: SIGTERM received") } - tablet, err := inst.ReadTablet(instanceKey) + tablet, err := inst.ReadTablet(tabletAlias) if err != nil { return nil, nil, err } atomic.AddInt32(&shardsLockCounter, 1) - ctx, unlock, err := ts.TryLockShard(ctx, tablet.Keyspace, tablet.Shard, "Orc Recovery") + ctx, unlock, err := ts.TryLockShard(ctx, tablet.Keyspace, tablet.Shard, lockAction) if err != nil { atomic.AddInt32(&shardsLockCounter, -1) return nil, nil, err @@ -314,6 +288,11 @@ func setReadOnly(ctx context.Context, tablet *topodatapb.Tablet) error { return tmc.SetReadOnly(ctx, tablet) } +// changeTabletType calls the said RPC for the given tablet with the given parameters. +func changeTabletType(ctx context.Context, tablet *topodatapb.Tablet, tabletType topodatapb.TabletType, semiSync bool) error { + return tmc.ChangeType(ctx, tablet, tabletType, semiSync) +} + // setReplicationSource calls the said RPC with the parameters provided func setReplicationSource(ctx context.Context, replica *topodatapb.Tablet, primary *topodatapb.Tablet, semiSync bool) error { return tmc.SetReplicationSource(ctx, replica, primary.Alias, 0, "", true, semiSync) @@ -322,11 +301,7 @@ func setReplicationSource(ctx context.Context, replica *topodatapb.Tablet, prima // shardPrimary finds the primary of the given keyspace-shard by reading the vtorc backend func shardPrimary(keyspace string, shard string) (primary *topodatapb.Tablet, err error) { query := `SELECT - info, - hostname, - port, - tablet_type, - primary_timestamp + info FROM vitess_tablet WHERE @@ -351,10 +326,10 @@ func shardPrimary(keyspace string, shard string) (primary *topodatapb.Tablet, er } // restartsReplication restarts the replication on the provided replicaKey. It also sets the correct semi-sync settings when it starts replication -func restartReplication(replicaKey *inst.InstanceKey) error { - replicaTablet, err := inst.ReadTablet(*replicaKey) +func restartReplication(replicaAlias string) error { + replicaTablet, err := inst.ReadTablet(replicaAlias) if err != nil { - log.Info("Could not read tablet - %+v", replicaKey) + log.Info("Could not read tablet - %+v", replicaAlias) return err } @@ -364,7 +339,7 @@ func restartReplication(replicaKey *inst.InstanceKey) error { return err } - durabilityPolicy, err := inst.GetDurabilityPolicy(replicaTablet) + durabilityPolicy, err := inst.GetDurabilityPolicy(replicaTablet.Keyspace) if err != nil { log.Info("Could not read the durability policy for %v/%v", replicaTablet.Keyspace, replicaTablet.Shard) return err @@ -374,12 +349,12 @@ func restartReplication(replicaKey *inst.InstanceKey) error { defer cancel() err = tmc.StopReplication(ctx, replicaTablet) if err != nil { - log.Info("Could not stop replication on %v", topoproto.TabletAliasString(replicaTablet.Alias)) + log.Info("Could not stop replication on %v", replicaAlias) return err } - err = tmc.StartReplication(ctx, replicaTablet, inst.IsReplicaSemiSync(durabilityPolicy, primaryTablet, replicaTablet)) + err = tmc.StartReplication(ctx, replicaTablet, reparentutil.IsReplicaSemiSync(durabilityPolicy, primaryTablet, replicaTablet)) if err != nil { - log.Info("Could not start replication on %v", topoproto.TabletAliasString(replicaTablet.Alias)) + log.Info("Could not start replication on %v", replicaAlias) return err } return nil diff --git a/go/vt/vtorc/logic/tablet_discovery_test.go b/go/vt/vtorc/logic/tablet_discovery_test.go index ee107056033..0e8ac72fabf 100644 --- a/go/vt/vtorc/logic/tablet_discovery_test.go +++ b/go/vt/vtorc/logic/tablet_discovery_test.go @@ -18,6 +18,7 @@ package logic import ( "context" + "fmt" "sync/atomic" "testing" @@ -27,11 +28,10 @@ import ( "google.golang.org/protobuf/proto" "vitess.io/vitess/go/vt/external/golib/sqlutils" - "vitess.io/vitess/go/vt/topo/topoproto" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/proto/vttime" "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtorc/db" "vitess.io/vitess/go/vt/vtorc/inst" ) @@ -105,18 +105,17 @@ func TestRefreshTabletsInKeyspaceShard(t *testing.T) { ts = oldTs }() - // Open the vtorc - // After the test completes delete everything from the vitess_tablet table - orcDb, err := db.OpenVTOrc() - require.NoError(t, err) + // Clear the database after the test. The easiest way to do that is to run all the initialization commands again. defer func() { - _, err = orcDb.Exec("delete from vitess_tablet") - require.NoError(t, err) + db.ClearVTOrcDatabase() }() // Create a memory topo-server and create the keyspace and shard records - ts = memorytopo.NewServer(cell1) - _, err = ts.GetOrCreateShard(context.Background(), keyspace, shard) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts = memorytopo.NewServer(ctx, cell1) + _, err := ts.GetOrCreateShard(context.Background(), keyspace, shard) require.NoError(t, err) // Add tablets to the topo-server @@ -147,22 +146,28 @@ func TestRefreshTabletsInKeyspaceShard(t *testing.T) { }) t.Run("tablet shutdown removes mysql hostname and port. We shouldn't forget the tablet", func(t *testing.T) { + startPort := tab100.MysqlPort + startHostname := tab100.MysqlHostname defer func() { + tab100.MysqlPort = startPort + tab100.MysqlHostname = startHostname _, err = ts.UpdateTabletFields(context.Background(), tab100.Alias, func(tablet *topodatapb.Tablet) error { - tablet.MysqlHostname = hostname - tablet.MysqlPort = 100 + tablet.MysqlHostname = startHostname + tablet.MysqlPort = startPort return nil }) }() - // Let's assume tab100 shutdown. This would clear its tablet hostname and port + // Let's assume tab100 shutdown. This would clear its tablet hostname and port. + tab100.MysqlPort = 0 + tab100.MysqlHostname = "" _, err = ts.UpdateTabletFields(context.Background(), tab100.Alias, func(tablet *topodatapb.Tablet) error { tablet.MysqlHostname = "" tablet.MysqlPort = 0 return nil }) require.NoError(t, err) - // We expect no tablets to be refreshed. Also, tab100 shouldn't be forgotten - verifyRefreshTabletsInKeyspaceShard(t, false, 0, tablets, nil) + // tab100 shouldn't be forgotten + verifyRefreshTabletsInKeyspaceShard(t, false, 1, tablets, nil) }) t.Run("change a tablet and call refreshTabletsInKeyspaceShard again", func(t *testing.T) { @@ -233,22 +238,18 @@ func TestShardPrimary(t *testing.T) { ts = oldTs }() - // Open the vtorc - // After the test completes delete everything from the vitess_tablet table - orcDb, err := db.OpenVTOrc() - require.NoError(t, err) - defer func() { - _, err = orcDb.Exec("delete from vitess_tablet") - require.NoError(t, err) - }() - for _, testcase := range testcases { t.Run(testcase.name, func(t *testing.T) { - _, err = orcDb.Exec("delete from vitess_tablet") + // Clear the database after the test. The easiest way to do that is to run all the initialization commands again. + defer func() { + db.ClearVTOrcDatabase() + }() // Create a memory topo-server and create the keyspace and shard records - ts = memorytopo.NewServer(cell1) - _, err = ts.GetOrCreateShard(context.Background(), keyspace, shard) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts = memorytopo.NewServer(ctx, cell1) + _, err := ts.GetOrCreateShard(context.Background(), keyspace, shard) require.NoError(t, err) // Add tablets to the topo-server @@ -279,7 +280,7 @@ func verifyRefreshTabletsInKeyspaceShard(t *testing.T, forceRefresh bool, instan var instancesRefreshed atomic.Int32 instancesRefreshed.Store(0) // call refreshTabletsInKeyspaceShard while counting all the instances that are refreshed - refreshTabletsInKeyspaceShard(context.Background(), keyspace, shard, func(instanceKey *inst.InstanceKey) { + refreshTabletsInKeyspaceShard(context.Background(), keyspace, shard, func(string) { instancesRefreshed.Add(1) }, forceRefresh, tabletsToIgnore) // Verify that all the tablets are present in the database @@ -295,16 +296,13 @@ func verifyRefreshTabletsInKeyspaceShard(t *testing.T, forceRefresh bool, instan // is the same as the one provided or reading it gives the same error as expected func verifyTabletInfo(t *testing.T, tabletWanted *topodatapb.Tablet, errString string) { t.Helper() - tabletKey := inst.InstanceKey{ - Hostname: hostname, - Port: int(tabletWanted.MysqlPort), - } - tablet, err := inst.ReadTablet(tabletKey) + tabletAlias := topoproto.TabletAliasString(tabletWanted.Alias) + tablet, err := inst.ReadTablet(tabletAlias) if errString != "" { assert.EqualError(t, err, errString) } else { assert.NoError(t, err) - assert.EqualValues(t, tabletKey.Port, tablet.MysqlPort) + assert.EqualValues(t, tabletAlias, topoproto.TabletAliasString(tablet.Alias)) diff := cmp.Diff(tablet, tabletWanted, cmp.Comparer(proto.Equal)) assert.Empty(t, diff) } @@ -321,3 +319,26 @@ func verifyTabletCount(t *testing.T, countWanted int) { require.NoError(t, err) require.Equal(t, countWanted, totalTablets) } + +func TestGetLockAction(t *testing.T) { + tests := []struct { + analysedInstance string + code inst.AnalysisCode + want string + }{ + { + analysedInstance: "zone1-100", + code: inst.DeadPrimary, + want: "VTOrc Recovery for DeadPrimary on zone1-100", + }, { + analysedInstance: "zone1-200", + code: inst.ReplicationStopped, + want: "VTOrc Recovery for ReplicationStopped on zone1-200", + }, + } + for _, tt := range tests { + t.Run(fmt.Sprintf("%v-%v", tt.analysedInstance, tt.code), func(t *testing.T) { + require.Equal(t, tt.want, getLockAction(tt.analysedInstance, tt.code)) + }) + } +} diff --git a/go/vt/vtorc/logic/topology_recovery.go b/go/vt/vtorc/logic/topology_recovery.go index 3b2734bcda0..d3e73c00886 100644 --- a/go/vt/vtorc/logic/topology_recovery.go +++ b/go/vt/vtorc/logic/topology_recovery.go @@ -21,17 +21,15 @@ import ( "encoding/json" "fmt" "math/rand" - "strings" "time" "github.com/patrickmn/go-cache" - logutilpb "vitess.io/vitess/go/vt/proto/logutil" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" + logutilpb "vitess.io/vitess/go/vt/proto/logutil" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtctl/reparentutil" "vitess.io/vitess/go/vt/vtorc/config" @@ -43,17 +41,15 @@ import ( type RecoveryType string const ( - PrimaryRecovery RecoveryType = "PrimaryRecovery" - CoPrimaryRecovery RecoveryType = "CoPrimaryRecovery" - IntermediatePrimaryRecovery RecoveryType = "IntermediatePrimaryRecovery" - CheckAndRecoverGenericProblemRecoveryName string = "CheckAndRecoverGenericProblem" RecoverDeadPrimaryRecoveryName string = "RecoverDeadPrimary" + RecoverPrimaryTabletDeletedRecoveryName string = "RecoverPrimaryTabletDeleted" RecoverPrimaryHasPrimaryRecoveryName string = "RecoverPrimaryHasPrimary" CheckAndRecoverLockedSemiSyncPrimaryRecoveryName string = "CheckAndRecoverLockedSemiSyncPrimary" ElectNewPrimaryRecoveryName string = "ElectNewPrimary" FixPrimaryRecoveryName string = "FixPrimary" FixReplicaRecoveryName string = "FixReplica" + RecoverErrantGTIDDetectedName string = "RecoverErrantGTIDDetected" ) var ( @@ -67,6 +63,17 @@ var ( countPendingRecoveries = stats.NewGauge("PendingRecoveries", "Count of the number of pending recoveries") + // detectedProblems is used to track the number of detected problems. + // + // When an issue is active it will be set to 1, when it is no longer active + // it will be reset back to 0. + detectedProblems = stats.NewGaugesWithMultiLabels("DetectedProblems", "Count of the different detected problems", []string{ + "Analysis", + "TabletAlias", + "Keyspace", + "Shard", + }) + // recoveriesCounter counts the number of recoveries that VTOrc has performed recoveriesCounter = stats.NewCountersWithSingleLabel("RecoveriesCount", "Count of the different recoveries performed", "RecoveryType", actionableRecoveriesNames...) @@ -86,69 +93,44 @@ const ( noRecoveryFunc recoveryFunction = iota recoverGenericProblemFunc recoverDeadPrimaryFunc + recoverPrimaryTabletDeletedFunc recoverPrimaryHasPrimaryFunc recoverLockedSemiSyncPrimaryFunc electNewPrimaryFunc fixPrimaryFunc fixReplicaFunc + recoverErrantGTIDDetectedFunc ) -type RecoveryAcknowledgement struct { - CreatedAt time.Time - Owner string - Comment string - - Key inst.InstanceKey - ID int64 - UID string - AllRecoveries bool -} - -// BlockedTopologyRecovery represents an entry in the blocked_topology_recovery table -type BlockedTopologyRecovery struct { - FailedInstanceKey inst.InstanceKey - Analysis inst.AnalysisCode - LastBlockedTimestamp string - BlockingRecoveryID int64 -} - // TopologyRecovery represents an entry in the topology_recovery table type TopologyRecovery struct { - inst.PostponedFunctionsContainer - - ID int64 - UID string - AnalysisEntry inst.ReplicationAnalysis - SuccessorKey *inst.InstanceKey - SuccessorAlias string - IsActive bool - IsSuccessful bool - LostReplicas inst.InstanceKeyMap - ParticipatingInstanceKeys inst.InstanceKeyMap - AllErrors []string - RecoveryStartTimestamp string - RecoveryEndTimestamp string - ProcessingNodeHostname string - ProcessingNodeToken string - Acknowledged bool - AcknowledgedAt string - AcknowledgedBy string - AcknowledgedComment string - LastDetectionID int64 - RelatedRecoveryID int64 - Type RecoveryType - RecoveryType PrimaryRecoveryType + ID int64 + UID string + AnalysisEntry inst.ReplicationAnalysis + SuccessorHostname string + SuccessorPort int + SuccessorAlias string + IsActive bool + IsSuccessful bool + AllErrors []string + RecoveryStartTimestamp string + RecoveryEndTimestamp string + ProcessingNodeHostname string + ProcessingNodeToken string + Acknowledged bool + AcknowledgedAt string + AcknowledgedBy string + AcknowledgedComment string + LastDetectionID int64 + RelatedRecoveryID int64 + Type RecoveryType } func NewTopologyRecovery(replicationAnalysis inst.ReplicationAnalysis) *TopologyRecovery { topologyRecovery := &TopologyRecovery{} topologyRecovery.UID = util.PrettyUniqueToken() topologyRecovery.AnalysisEntry = replicationAnalysis - topologyRecovery.SuccessorKey = nil - topologyRecovery.LostReplicas = *inst.NewInstanceKeyMap() - topologyRecovery.ParticipatingInstanceKeys = *inst.NewInstanceKeyMap() topologyRecovery.AllErrors = []string{} - topologyRecovery.RecoveryType = NotPrimaryRecovery return topologyRecovery } @@ -179,15 +161,6 @@ func NewTopologyRecoveryStep(uid string, message string) *TopologyRecoveryStep { } } -type PrimaryRecoveryType string - -const ( - NotPrimaryRecovery PrimaryRecoveryType = "NotPrimaryRecovery" - PrimaryRecoveryGTID PrimaryRecoveryType = "PrimaryRecoveryGTID" - PrimaryRecoveryBinlogServer PrimaryRecoveryType = "PrimaryRecoveryBinlogServer" - PrimaryRecoveryUnknown PrimaryRecoveryType = "PrimaryRecoveryUnknown" -) - var emergencyReadTopologyInstanceMap *cache.Cache var emergencyRestartReplicaTopologyInstanceMap *cache.Cache var emergencyOperationGracefulPeriodMap *cache.Cache @@ -217,7 +190,6 @@ func AuditTopologyRecovery(topologyRecovery *TopologyRecovery, message string) e func resolveRecovery(topologyRecovery *TopologyRecovery, successorInstance *inst.Instance) error { if successorInstance != nil { - topologyRecovery.SuccessorKey = &successorInstance.Key topologyRecovery.SuccessorAlias = successorInstance.InstanceAlias topologyRecovery.IsSuccessful = true } @@ -225,13 +197,13 @@ func resolveRecovery(topologyRecovery *TopologyRecovery, successorInstance *inst } // recoverPrimaryHasPrimary resets the replication on the primary instance -func recoverPrimaryHasPrimary(ctx context.Context, analysisEntry inst.ReplicationAnalysis, candidateInstanceKey *inst.InstanceKey, forceInstanceRecovery bool, skipProcesses bool) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { - topologyRecovery, err = AttemptRecoveryRegistration(&analysisEntry, false, true) +func recoverPrimaryHasPrimary(ctx context.Context, analysisEntry *inst.ReplicationAnalysis) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { + topologyRecovery, err = AttemptRecoveryRegistration(analysisEntry, false, true) if topologyRecovery == nil { - _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("found an active or recent recovery on %+v. Will not issue another fixPrimaryHasPrimary.", analysisEntry.AnalyzedInstanceKey)) + _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("found an active or recent recovery on %+v. Will not issue another fixPrimaryHasPrimary.", analysisEntry.AnalyzedInstanceAlias)) return false, nil, err } - log.Infof("Analysis: %v, will fix incorrect primaryship %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceKey) + log.Infof("Analysis: %v, will fix incorrect primaryship on %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias) // This has to be done in the end; whether successful or not, we should mark that the recovery is done. // So that after the active period passes, we are able to run other recoveries. defer func() { @@ -239,40 +211,32 @@ func recoverPrimaryHasPrimary(ctx context.Context, analysisEntry inst.Replicatio }() // Reset replication on current primary. - err = inst.ResetReplicationParameters(analysisEntry.AnalyzedInstanceKey) + err = inst.ResetReplicationParameters(analysisEntry.AnalyzedInstanceAlias) if err != nil { return false, topologyRecovery, err } return true, topologyRecovery, nil } -// recoverDeadPrimary checks a given analysis, decides whether to take action, and possibly takes action -// Returns true when action was taken. -func recoverDeadPrimary(ctx context.Context, analysisEntry inst.ReplicationAnalysis, candidateInstanceKey *inst.InstanceKey, forceInstanceRecovery bool, skipProcesses bool) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { - if !(forceInstanceRecovery || analysisEntry.ClusterDetails.HasAutomatedPrimaryRecovery) { +// runEmergencyReparentOp runs a recovery for which we have to run ERS. Here waitForAllTablets is a boolean telling ERS whether it should wait for all the tablets +// or is it okay to skip 1. +func runEmergencyReparentOp(ctx context.Context, analysisEntry *inst.ReplicationAnalysis, recoveryName string, waitForAllTablets bool) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { + if !analysisEntry.ClusterDetails.HasAutomatedPrimaryRecovery { return false, nil, nil } // Read the tablet information from the database to find the shard and keyspace of the tablet - tablet, err := inst.ReadTablet(analysisEntry.AnalyzedInstanceKey) + tablet, err := inst.ReadTablet(analysisEntry.AnalyzedInstanceAlias) if err != nil { return false, nil, err } - var candidateTabletAlias *topodatapb.TabletAlias - if candidateInstanceKey != nil { - candidateTablet, err := inst.ReadTablet(*candidateInstanceKey) - if err != nil { - return false, nil, err - } - candidateTabletAlias = candidateTablet.Alias - } - topologyRecovery, err = AttemptRecoveryRegistration(&analysisEntry, !forceInstanceRecovery, !forceInstanceRecovery) + topologyRecovery, err = AttemptRecoveryRegistration(analysisEntry, true, true) if topologyRecovery == nil { - _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("found an active or recent recovery on %+v. Will not issue another RecoverDeadPrimary.", analysisEntry.AnalyzedInstanceKey)) + _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("found an active or recent recovery on %+v. Will not issue another %v.", analysisEntry.AnalyzedInstanceAlias, recoveryName)) return false, nil, err } - log.Infof("Analysis: %v, deadprimary %+v with candidate %s", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceKey, candidateTabletAlias) + log.Infof("Analysis: %v, %v %+v", analysisEntry.Analysis, recoveryName, analysisEntry.AnalyzedInstanceAlias) var promotedReplica *inst.Instance // This has to be done in the end; whether successful or not, we should mark that the recovery is done. // So that after the active period passes, we are able to run other recoveries. @@ -297,10 +261,10 @@ func recoverDeadPrimary(ctx context.Context, analysisEntry inst.ReplicationAnaly tablet.Keyspace, tablet.Shard, reparentutil.EmergencyReparentOptions{ - NewPrimaryAlias: candidateTabletAlias, IgnoreReplicas: nil, WaitReplicasTimeout: time.Duration(config.Config.WaitReplicasTimeoutSeconds) * time.Second, PreventCrossCellPromotion: config.Config.PreventCrossDataCenterPrimaryFailover, + WaitAllTablets: waitForAllTablets, }, ) if err != nil { @@ -308,80 +272,84 @@ func recoverDeadPrimary(ctx context.Context, analysisEntry inst.ReplicationAnaly } if ev != nil && ev.NewPrimary != nil { - promotedReplica, _, _ = inst.ReadInstance(&inst.InstanceKey{ - Hostname: ev.NewPrimary.MysqlHostname, - Port: int(ev.NewPrimary.MysqlPort), - }) + promotedReplica, _, _ = inst.ReadInstance(topoproto.TabletAliasString(ev.NewPrimary.Alias)) } - postErsCompletion(topologyRecovery, analysisEntry, skipProcesses, promotedReplica) + postErsCompletion(topologyRecovery, analysisEntry, recoveryName, promotedReplica) return true, topologyRecovery, err } -func postErsCompletion(topologyRecovery *TopologyRecovery, analysisEntry inst.ReplicationAnalysis, skipProcesses bool, promotedReplica *inst.Instance) { +// recoverDeadPrimary checks a given analysis, decides whether to take action, and possibly takes action +// Returns true when action was taken. +func recoverDeadPrimary(ctx context.Context, analysisEntry *inst.ReplicationAnalysis) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { + return runEmergencyReparentOp(ctx, analysisEntry, "RecoverDeadPrimary", false) +} + +// recoverPrimaryTabletDeleted tries to run a recovery for the case where the primary tablet has been deleted. +func recoverPrimaryTabletDeleted(ctx context.Context, analysisEntry *inst.ReplicationAnalysis) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { + return runEmergencyReparentOp(ctx, analysisEntry, "PrimaryTabletDeleted", true) +} + +func postErsCompletion(topologyRecovery *TopologyRecovery, analysisEntry *inst.ReplicationAnalysis, recoveryName string, promotedReplica *inst.Instance) { if promotedReplica != nil { - message := fmt.Sprintf("promoted replica: %+v", promotedReplica.Key) + message := fmt.Sprintf("promoted replica: %+v", promotedReplica.InstanceAlias) _ = AuditTopologyRecovery(topologyRecovery, message) - _ = inst.AuditOperation("recover-dead-primary", &analysisEntry.AnalyzedInstanceKey, message) - } - // Now, see whether we are successful or not. From this point there's no going back. - if promotedReplica != nil { - // Success! - _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("RecoverDeadPrimary: successfully promoted %+v", promotedReplica.Key)) + _ = inst.AuditOperation(recoveryName, analysisEntry.AnalyzedInstanceAlias, message) + _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("%v: successfully promoted %+v", recoveryName, promotedReplica.InstanceAlias)) } } // checkAndRecoverGenericProblem is a general-purpose recovery function -func checkAndRecoverLockedSemiSyncPrimary(ctx context.Context, analysisEntry inst.ReplicationAnalysis, candidateInstanceKey *inst.InstanceKey, forceInstanceRecovery bool, skipProcesses bool) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { +func checkAndRecoverLockedSemiSyncPrimary(ctx context.Context, analysisEntry *inst.ReplicationAnalysis) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { return false, nil, nil } // checkAndRecoverGenericProblem is a general-purpose recovery function -func checkAndRecoverGenericProblem(ctx context.Context, analysisEntry inst.ReplicationAnalysis, candidateInstanceKey *inst.InstanceKey, forceInstanceRecovery bool, skipProcesses bool) (bool, *TopologyRecovery, error) { +func checkAndRecoverGenericProblem(ctx context.Context, analysisEntry *inst.ReplicationAnalysis) (bool, *TopologyRecovery, error) { return false, nil, nil } // Force a re-read of a topology instance; this is done because we need to substantiate a suspicion // that we may have a failover scenario. we want to speed up reading the complete picture. -func emergentlyReadTopologyInstance(instanceKey *inst.InstanceKey, analysisCode inst.AnalysisCode) (instance *inst.Instance) { - if existsInCacheError := emergencyReadTopologyInstanceMap.Add(instanceKey.StringCode(), true, cache.DefaultExpiration); existsInCacheError != nil { +func emergentlyReadTopologyInstance(tabletAlias string, analysisCode inst.AnalysisCode) (instance *inst.Instance) { + if existsInCacheError := emergencyReadTopologyInstanceMap.Add(tabletAlias, true, cache.DefaultExpiration); existsInCacheError != nil { // Just recently attempted return nil } - instance, _ = inst.ReadTopologyInstance(instanceKey) - _ = inst.AuditOperation("emergently-read-topology-instance", instanceKey, string(analysisCode)) + instance, _ = inst.ReadTopologyInstance(tabletAlias) + _ = inst.AuditOperation("emergently-read-topology-instance", tabletAlias, string(analysisCode)) return instance } // Force reading of replicas of given instance. This is because we suspect the instance is dead, and want to speed up // detection of replication failure from its replicas. -func emergentlyReadTopologyInstanceReplicas(instanceKey *inst.InstanceKey, analysisCode inst.AnalysisCode) { - replicas, err := inst.ReadReplicaInstancesIncludingBinlogServerSubReplicas(instanceKey) +func emergentlyReadTopologyInstanceReplicas(primaryHost string, primaryPort int, analysisCode inst.AnalysisCode) { + replicas, err := inst.ReadReplicaInstancesIncludingBinlogServerSubReplicas(primaryHost, primaryPort) if err != nil { return } for _, replica := range replicas { - go emergentlyReadTopologyInstance(&replica.Key, analysisCode) + go emergentlyReadTopologyInstance(replica.InstanceAlias, analysisCode) } } // emergentlyRestartReplicationOnTopologyInstance forces a RestartReplication on a given instance. -func emergentlyRestartReplicationOnTopologyInstance(instanceKey *inst.InstanceKey, analysisCode inst.AnalysisCode) { - if existsInCacheError := emergencyRestartReplicaTopologyInstanceMap.Add(instanceKey.StringCode(), true, cache.DefaultExpiration); existsInCacheError != nil { +func emergentlyRestartReplicationOnTopologyInstance(tabletAlias string, analysisCode inst.AnalysisCode) { + if existsInCacheError := emergencyRestartReplicaTopologyInstanceMap.Add(tabletAlias, true, cache.DefaultExpiration); existsInCacheError != nil { // Just recently attempted on this specific replica return } go inst.ExecuteOnTopology(func() { - _ = restartReplication(instanceKey) - _ = inst.AuditOperation("emergently-restart-replication-topology-instance", instanceKey, string(analysisCode)) + _ = restartReplication(tabletAlias) + _ = inst.AuditOperation("emergently-restart-replication-topology-instance", tabletAlias, string(analysisCode)) }) } -func beginEmergencyOperationGracefulPeriod(instanceKey *inst.InstanceKey) { - emergencyOperationGracefulPeriodMap.Set(instanceKey.StringCode(), true, cache.DefaultExpiration) +func beginEmergencyOperationGracefulPeriod(tabletAlias string) { + emergencyOperationGracefulPeriodMap.Set(tabletAlias, true, cache.DefaultExpiration) } -func isInEmergencyOperationGracefulPeriod(instanceKey *inst.InstanceKey) bool { - _, found := emergencyOperationGracefulPeriodMap.Get(instanceKey.StringCode()) +func isInEmergencyOperationGracefulPeriod(tabletAlias string) bool { + _, found := emergencyOperationGracefulPeriodMap.Get(tabletAlias) return found } @@ -390,26 +358,25 @@ func isInEmergencyOperationGracefulPeriod(instanceKey *inst.InstanceKey) bool { // This can be useful in scenarios where the primary has Too Many Connections, but long-time connected // replicas are not seeing this; when they stop+start replication, they need to re-authenticate and // that's where we hope they realize the primary is bad. -func emergentlyRestartReplicationOnTopologyInstanceReplicas(instanceKey *inst.InstanceKey, analysisCode inst.AnalysisCode) { - if existsInCacheError := emergencyRestartReplicaTopologyInstanceMap.Add(instanceKey.StringCode(), true, cache.DefaultExpiration); existsInCacheError != nil { +func emergentlyRestartReplicationOnTopologyInstanceReplicas(primaryHost string, primaryPort int, tabletAlias string, analysisCode inst.AnalysisCode) { + if existsInCacheError := emergencyRestartReplicaTopologyInstanceMap.Add(tabletAlias, true, cache.DefaultExpiration); existsInCacheError != nil { // While each replica's RestartReplication() is throttled on its own, it's also wasteful to // iterate all replicas all the time. This is the reason why we do grand-throttle check. return } - beginEmergencyOperationGracefulPeriod(instanceKey) + beginEmergencyOperationGracefulPeriod(tabletAlias) - replicas, err := inst.ReadReplicaInstancesIncludingBinlogServerSubReplicas(instanceKey) + replicas, err := inst.ReadReplicaInstancesIncludingBinlogServerSubReplicas(primaryHost, primaryPort) if err != nil { return } for _, replica := range replicas { - replicaKey := &replica.Key - go emergentlyRestartReplicationOnTopologyInstance(replicaKey, analysisCode) + go emergentlyRestartReplicationOnTopologyInstance(replica.InstanceAlias, analysisCode) } } -func emergentlyRecordStaleBinlogCoordinates(instanceKey *inst.InstanceKey, binlogCoordinates *inst.BinlogCoordinates) { - err := inst.RecordStaleInstanceBinlogCoordinates(instanceKey, binlogCoordinates) +func emergentlyRecordStaleBinlogCoordinates(tabletAlias string, binlogCoordinates *inst.BinlogCoordinates) { + err := inst.RecordStaleInstanceBinlogCoordinates(tabletAlias, binlogCoordinates) if err != nil { log.Error(err) } @@ -417,30 +384,51 @@ func emergentlyRecordStaleBinlogCoordinates(instanceKey *inst.InstanceKey, binlo // checkAndExecuteFailureDetectionProcesses tries to register for failure detection and potentially executes // failure-detection processes. -func checkAndExecuteFailureDetectionProcesses(analysisEntry inst.ReplicationAnalysis, skipProcesses bool) (detectionRegistrationSuccess bool, processesExecutionAttempted bool, err error) { - if ok, _ := AttemptFailureDetectionRegistration(&analysisEntry); !ok { - if util.ClearToLog("checkAndExecuteFailureDetectionProcesses", analysisEntry.AnalyzedInstanceKey.StringCode()) { - log.Infof("checkAndExecuteFailureDetectionProcesses: could not register %+v detection on %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceKey) +func checkAndExecuteFailureDetectionProcesses(analysisEntry *inst.ReplicationAnalysis) (detectionRegistrationSuccess bool, processesExecutionAttempted bool, err error) { + if ok, _ := AttemptFailureDetectionRegistration(analysisEntry); !ok { + if util.ClearToLog("checkAndExecuteFailureDetectionProcesses", analysisEntry.AnalyzedInstanceAlias) { + log.Infof("checkAndExecuteFailureDetectionProcesses: could not register %+v detection on %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias) } return false, false, nil } - log.Infof("topology_recovery: detected %+v failure on %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceKey) + log.Infof("topology_recovery: detected %+v failure on %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias) return true, false, nil } // getCheckAndRecoverFunctionCode gets the recovery function code to use for the given analysis. -func getCheckAndRecoverFunctionCode(analysisCode inst.AnalysisCode, analyzedInstanceKey *inst.InstanceKey) recoveryFunction { +func getCheckAndRecoverFunctionCode(analysisCode inst.AnalysisCode, tabletAlias string) recoveryFunction { switch analysisCode { // primary case inst.DeadPrimary, inst.DeadPrimaryAndSomeReplicas: - if isInEmergencyOperationGracefulPeriod(analyzedInstanceKey) { + // If ERS is disabled, we have no way of repairing the cluster. + if !config.ERSEnabled() { + log.Infof("VTOrc not configured to run ERS, skipping recovering %v", analysisCode) + return noRecoveryFunc + } + if isInEmergencyOperationGracefulPeriod(tabletAlias) { return recoverGenericProblemFunc } return recoverDeadPrimaryFunc + case inst.PrimaryTabletDeleted: + // If ERS is disabled, we have no way of repairing the cluster. + if !config.ERSEnabled() { + log.Infof("VTOrc not configured to run ERS, skipping recovering %v", analysisCode) + return noRecoveryFunc + } + if isInEmergencyOperationGracefulPeriod(tabletAlias) { + return recoverGenericProblemFunc + } + return recoverPrimaryTabletDeletedFunc + case inst.ErrantGTIDDetected: + if !config.ConvertTabletWithErrantGTIDs() { + log.Infof("VTOrc not configured to do anything on detecting errant GTIDs, skipping recovering %v", analysisCode) + return noRecoveryFunc + } + return recoverErrantGTIDDetectedFunc case inst.PrimaryHasPrimary: return recoverPrimaryHasPrimaryFunc case inst.LockedSemiSyncPrimary: - if isInEmergencyOperationGracefulPeriod(analyzedInstanceKey) { + if isInEmergencyOperationGracefulPeriod(tabletAlias) { return recoverGenericProblemFunc } return recoverLockedSemiSyncPrimaryFunc @@ -481,6 +469,8 @@ func hasActionableRecovery(recoveryFunctionCode recoveryFunction) bool { return false case recoverDeadPrimaryFunc: return true + case recoverPrimaryTabletDeletedFunc: + return true case recoverPrimaryHasPrimaryFunc: return true case recoverLockedSemiSyncPrimaryFunc: @@ -491,6 +481,8 @@ func hasActionableRecovery(recoveryFunctionCode recoveryFunction) bool { return true case fixReplicaFunc: return true + case recoverErrantGTIDDetectedFunc: + return true default: return false } @@ -498,7 +490,7 @@ func hasActionableRecovery(recoveryFunctionCode recoveryFunction) bool { // getCheckAndRecoverFunction gets the recovery function for the given code. func getCheckAndRecoverFunction(recoveryFunctionCode recoveryFunction) ( - checkAndRecoverFunction func(ctx context.Context, analysisEntry inst.ReplicationAnalysis, candidateInstanceKey *inst.InstanceKey, forceInstanceRecovery bool, skipProcesses bool) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error), + checkAndRecoverFunction func(ctx context.Context, analysisEntry *inst.ReplicationAnalysis) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error), ) { switch recoveryFunctionCode { case noRecoveryFunc: @@ -507,6 +499,8 @@ func getCheckAndRecoverFunction(recoveryFunctionCode recoveryFunction) ( return checkAndRecoverGenericProblem case recoverDeadPrimaryFunc: return recoverDeadPrimary + case recoverPrimaryTabletDeletedFunc: + return recoverPrimaryTabletDeleted case recoverPrimaryHasPrimaryFunc: return recoverPrimaryHasPrimary case recoverLockedSemiSyncPrimaryFunc: @@ -517,6 +511,8 @@ func getCheckAndRecoverFunction(recoveryFunctionCode recoveryFunction) ( return fixPrimary case fixReplicaFunc: return fixReplica + case recoverErrantGTIDDetectedFunc: + return recoverErrantGTIDDetected default: return nil } @@ -532,6 +528,8 @@ func getRecoverFunctionName(recoveryFunctionCode recoveryFunction) string { return CheckAndRecoverGenericProblemRecoveryName case recoverDeadPrimaryFunc: return RecoverDeadPrimaryRecoveryName + case recoverPrimaryTabletDeletedFunc: + return RecoverPrimaryTabletDeletedRecoveryName case recoverPrimaryHasPrimaryFunc: return RecoverPrimaryHasPrimaryRecoveryName case recoverLockedSemiSyncPrimaryFunc: @@ -542,6 +540,8 @@ func getRecoverFunctionName(recoveryFunctionCode recoveryFunction) string { return FixPrimaryRecoveryName case fixReplicaFunc: return FixReplicaRecoveryName + case recoverErrantGTIDDetectedFunc: + return RecoverErrantGTIDDetectedName default: return "" } @@ -550,7 +550,7 @@ func getRecoverFunctionName(recoveryFunctionCode recoveryFunction) string { // isClusterWideRecovery returns whether the given recovery is a cluster-wide recovery or not func isClusterWideRecovery(recoveryFunctionCode recoveryFunction) bool { switch recoveryFunctionCode { - case recoverDeadPrimaryFunc, electNewPrimaryFunc: + case recoverDeadPrimaryFunc, electNewPrimaryFunc, recoverPrimaryTabletDeletedFunc: return true default: return false @@ -558,65 +558,65 @@ func isClusterWideRecovery(recoveryFunctionCode recoveryFunction) bool { } // analysisEntriesHaveSameRecovery tells whether the two analysis entries have the same recovery function or not -func analysisEntriesHaveSameRecovery(prevAnalysis, newAnalysis inst.ReplicationAnalysis) bool { - prevRecoveryFunctionCode := getCheckAndRecoverFunctionCode(prevAnalysis.Analysis, &prevAnalysis.AnalyzedInstanceKey) - newRecoveryFunctionCode := getCheckAndRecoverFunctionCode(newAnalysis.Analysis, &newAnalysis.AnalyzedInstanceKey) +func analysisEntriesHaveSameRecovery(prevAnalysis, newAnalysis *inst.ReplicationAnalysis) bool { + prevRecoveryFunctionCode := getCheckAndRecoverFunctionCode(prevAnalysis.Analysis, prevAnalysis.AnalyzedInstanceAlias) + newRecoveryFunctionCode := getCheckAndRecoverFunctionCode(newAnalysis.Analysis, newAnalysis.AnalyzedInstanceAlias) return prevRecoveryFunctionCode == newRecoveryFunctionCode } func runEmergentOperations(analysisEntry *inst.ReplicationAnalysis) { switch analysisEntry.Analysis { case inst.DeadPrimaryAndReplicas: - go emergentlyReadTopologyInstance(&analysisEntry.AnalyzedInstancePrimaryKey, analysisEntry.Analysis) + go emergentlyReadTopologyInstance(analysisEntry.AnalyzedInstancePrimaryAlias, analysisEntry.Analysis) case inst.UnreachablePrimary: - go emergentlyReadTopologyInstance(&analysisEntry.AnalyzedInstanceKey, analysisEntry.Analysis) - go emergentlyReadTopologyInstanceReplicas(&analysisEntry.AnalyzedInstanceKey, analysisEntry.Analysis) + go emergentlyReadTopologyInstance(analysisEntry.AnalyzedInstanceAlias, analysisEntry.Analysis) + go emergentlyReadTopologyInstanceReplicas(analysisEntry.AnalyzedInstanceHostname, analysisEntry.AnalyzedInstancePort, analysisEntry.Analysis) case inst.UnreachablePrimaryWithLaggingReplicas: - go emergentlyRestartReplicationOnTopologyInstanceReplicas(&analysisEntry.AnalyzedInstanceKey, analysisEntry.Analysis) + go emergentlyRestartReplicationOnTopologyInstanceReplicas(analysisEntry.AnalyzedInstanceHostname, analysisEntry.AnalyzedInstancePort, analysisEntry.AnalyzedInstanceAlias, analysisEntry.Analysis) case inst.LockedSemiSyncPrimaryHypothesis: - go emergentlyReadTopologyInstance(&analysisEntry.AnalyzedInstanceKey, analysisEntry.Analysis) - go emergentlyRecordStaleBinlogCoordinates(&analysisEntry.AnalyzedInstanceKey, &analysisEntry.AnalyzedInstanceBinlogCoordinates) + go emergentlyReadTopologyInstance(analysisEntry.AnalyzedInstanceAlias, analysisEntry.Analysis) + go emergentlyRecordStaleBinlogCoordinates(analysisEntry.AnalyzedInstanceAlias, &analysisEntry.AnalyzedInstanceBinlogCoordinates) case inst.AllPrimaryReplicasNotReplicating: - go emergentlyReadTopologyInstance(&analysisEntry.AnalyzedInstanceKey, analysisEntry.Analysis) + go emergentlyReadTopologyInstance(analysisEntry.AnalyzedInstanceAlias, analysisEntry.Analysis) case inst.AllPrimaryReplicasNotReplicatingOrDead: - go emergentlyReadTopologyInstance(&analysisEntry.AnalyzedInstanceKey, analysisEntry.Analysis) + go emergentlyReadTopologyInstance(analysisEntry.AnalyzedInstanceAlias, analysisEntry.Analysis) } } // executeCheckAndRecoverFunction will choose the correct check & recovery function based on analysis. // It executes the function synchronuously -func executeCheckAndRecoverFunction(analysisEntry inst.ReplicationAnalysis, candidateInstanceKey *inst.InstanceKey, forceInstanceRecovery bool, skipProcesses bool) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { +func executeCheckAndRecoverFunction(analysisEntry *inst.ReplicationAnalysis) (err error) { countPendingRecoveries.Add(1) defer countPendingRecoveries.Add(-1) - checkAndRecoverFunctionCode := getCheckAndRecoverFunctionCode(analysisEntry.Analysis, &analysisEntry.AnalyzedInstanceKey) + checkAndRecoverFunctionCode := getCheckAndRecoverFunctionCode(analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias) isActionableRecovery := hasActionableRecovery(checkAndRecoverFunctionCode) analysisEntry.IsActionableRecovery = isActionableRecovery - runEmergentOperations(&analysisEntry) + runEmergentOperations(analysisEntry) if checkAndRecoverFunctionCode == noRecoveryFunc { // Unhandled problem type if analysisEntry.Analysis != inst.NoProblem { - if util.ClearToLog("executeCheckAndRecoverFunction", analysisEntry.AnalyzedInstanceKey.StringCode()) { - log.Warningf("executeCheckAndRecoverFunction: ignoring analysisEntry that has no action plan: %+v; key: %+v", - analysisEntry.Analysis, analysisEntry.AnalyzedInstanceKey) + if util.ClearToLog("executeCheckAndRecoverFunction", analysisEntry.AnalyzedInstanceAlias) { + log.Warningf("executeCheckAndRecoverFunction: ignoring analysisEntry that has no action plan: %+v; tablet: %+v", + analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias) } } - return false, nil, nil + return nil } // we have a recovery function; its execution still depends on filters if not disabled. - if isActionableRecovery || util.ClearToLog("executeCheckAndRecoverFunction: detection", analysisEntry.AnalyzedInstanceKey.StringCode()) { - log.Infof("executeCheckAndRecoverFunction: proceeding with %+v detection on %+v; isActionable?: %+v; skipProcesses: %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceKey, isActionableRecovery, skipProcesses) + if isActionableRecovery || util.ClearToLog("executeCheckAndRecoverFunction: detection", analysisEntry.AnalyzedInstanceAlias) { + log.Infof("executeCheckAndRecoverFunction: proceeding with %+v detection on %+v; isActionable?: %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias, isActionableRecovery) } // At this point we have validated there's a failure scenario for which we have a recovery path. // Initiate detection: - _, _, err = checkAndExecuteFailureDetectionProcesses(analysisEntry, skipProcesses) + _, _, err = checkAndExecuteFailureDetectionProcesses(analysisEntry) if err != nil { log.Errorf("executeCheckAndRecoverFunction: error on failure detection: %+v", err) - return false, nil, err + return err } // We don't mind whether detection really executed the processes or not // (it may have been silenced due to previous detection). We only care there's no error. @@ -628,22 +628,16 @@ func executeCheckAndRecoverFunction(analysisEntry inst.ReplicationAnalysis, cand // Unexpected. Shouldn't get this log.Errorf("Unable to determine if recovery is disabled globally: %v", err) } else if recoveryDisabledGlobally { - if !forceInstanceRecovery { - log.Infof("CheckAndRecover: Analysis: %+v, InstanceKey: %+v, candidateInstanceKey: %+v, "+ - "skipProcesses: %v: NOT Recovering host (disabled globally)", - analysisEntry.Analysis, analysisEntry.AnalyzedInstanceKey, candidateInstanceKey, skipProcesses) + log.Infof("CheckAndRecover: Analysis: %+v, Tablet: %+v: NOT Recovering host (disabled globally)", + analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias) - return false, nil, err - } - log.Infof("CheckAndRecover: Analysis: %+v, InstanceKey: %+v, candidateInstanceKey: %+v, "+ - "skipProcesses: %v: recoveries disabled globally but forcing this recovery", - analysisEntry.Analysis, analysisEntry.AnalyzedInstanceKey, candidateInstanceKey, skipProcesses) + return err } // We lock the shard here and then refresh the tablets information - ctx, unlock, err := LockShard(context.Background(), analysisEntry.AnalyzedInstanceKey) + ctx, unlock, err := LockShard(context.Background(), analysisEntry.AnalyzedInstanceAlias, getLockAction(analysisEntry.AnalyzedInstanceAlias, analysisEntry.Analysis)) if err != nil { - return false, nil, err + return err } defer unlock(&err) @@ -652,13 +646,15 @@ func executeCheckAndRecoverFunction(analysisEntry inst.ReplicationAnalysis, cand // changes, we should be checking that this failure is indeed needed to be fixed. We do this after locking the shard to be sure // that the data that we use now is up-to-date. if isActionableRecovery { - log.Errorf("executeCheckAndRecoverFunction: Proceeding with %v recovery validation after acquiring shard lock.", analysisEntry.Analysis) - // The first step we have to do is refresh the keyspace information + log.Errorf("executeCheckAndRecoverFunction: Proceeding with %v recovery on %v validation after acquiring shard lock.", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias) + // The first step we have to do is refresh the keyspace and shard information // This is required to know if the durability policies have changed or not - // If they have, then recoveries like ReplicaSemiSyncMustNotBeSet, etc won't be valid anymore - err := RefreshKeyspace(analysisEntry.AnalyzedKeyspace) + // If they have, then recoveries like ReplicaSemiSyncMustNotBeSet, etc won't be valid anymore. + // Similarly, a new primary could have been elected in the mean-time that can cause + // a change in the recovery we run. + err = RefreshKeyspaceAndShard(analysisEntry.AnalyzedKeyspace, analysisEntry.AnalyzedShard) if err != nil { - return false, nil, err + return err } // If we are about to run a cluster-wide recovery, it is imperative to first refresh all the tablets // of a shard because a new tablet could have been promoted, and we need to have this visibility before we @@ -666,7 +662,7 @@ func executeCheckAndRecoverFunction(analysisEntry inst.ReplicationAnalysis, cand if isClusterWideRecovery(checkAndRecoverFunctionCode) { var tabletsToIgnore []string if checkAndRecoverFunctionCode == recoverDeadPrimaryFunc { - tabletsToIgnore = append(tabletsToIgnore, topoproto.TabletAliasString(analysisEntry.AnalyzedInstanceAlias)) + tabletsToIgnore = append(tabletsToIgnore, analysisEntry.AnalyzedInstanceAlias) } // We ignore the dead primary tablet because it is going to be unreachable. If all the other tablets aren't able to reach this tablet either, // we can proceed with the dead primary recovery. We don't need to refresh the information for this dead tablet. @@ -679,42 +675,39 @@ func executeCheckAndRecoverFunction(analysisEntry inst.ReplicationAnalysis, cand // So, we only need to refresh the tablet info records (to know if the primary tablet has changed), // and the replication data of the new primary and this tablet. refreshTabletInfoOfShard(ctx, analysisEntry.AnalyzedKeyspace, analysisEntry.AnalyzedShard) - DiscoverInstance(analysisEntry.AnalyzedInstanceKey, true) + DiscoverInstance(analysisEntry.AnalyzedInstanceAlias, true) primaryTablet, err := shardPrimary(analysisEntry.AnalyzedKeyspace, analysisEntry.AnalyzedShard) if err != nil { - log.Errorf("executeCheckAndRecoverFunction: Analysis: %+v, InstanceKey: %+v, candidateInstanceKey: %+v, "+"skipProcesses: %v: error while finding the shard primary: %v", - analysisEntry.Analysis, analysisEntry.AnalyzedInstanceKey, candidateInstanceKey, skipProcesses, err) - return false, nil, err - } - primaryInstanceKey := inst.InstanceKey{ - Hostname: primaryTablet.MysqlHostname, - Port: int(primaryTablet.MysqlPort), + log.Errorf("executeCheckAndRecoverFunction: Analysis: %+v, Tablet: %+v: error while finding the shard primary: %v", + analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias, err) + return err } + primaryTabletAlias := topoproto.TabletAliasString(primaryTablet.Alias) // We can skip the refresh if we know the tablet we are looking at is the primary tablet. // This would be the case for PrimaryHasPrimary recovery. We don't need to refresh the same tablet twice. - if !analysisEntry.AnalyzedInstanceKey.Equals(&primaryInstanceKey) { - DiscoverInstance(primaryInstanceKey, true) + if analysisEntry.AnalyzedInstanceAlias != primaryTabletAlias { + DiscoverInstance(primaryTabletAlias, true) } } alreadyFixed, err := checkIfAlreadyFixed(analysisEntry) if err != nil { - log.Errorf("executeCheckAndRecoverFunction: Analysis: %+v, InstanceKey: %+v, candidateInstanceKey: %+v, "+"skipProcesses: %v: error while trying to find if the problem is already fixed: %v", - analysisEntry.Analysis, analysisEntry.AnalyzedInstanceKey, candidateInstanceKey, skipProcesses, err) - return false, nil, err + log.Errorf("executeCheckAndRecoverFunction: Analysis: %+v, Tablet: %+v: error while trying to find if the problem is already fixed: %v", + analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias, err) + return err } if alreadyFixed { - log.Infof("Analysis: %v - No longer valid, some other agent must have fixed the problem.", analysisEntry.Analysis) - return false, nil, nil + log.Infof("Analysis: %v on tablet %v - No longer valid, some other agent must have fixed the problem.", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias) + return nil } } // Actually attempt recovery: - if isActionableRecovery || util.ClearToLog("executeCheckAndRecoverFunction: recovery", analysisEntry.AnalyzedInstanceKey.StringCode()) { - log.Infof("executeCheckAndRecoverFunction: proceeding with %+v recovery on %+v; isRecoverable?: %+v; skipProcesses: %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceKey, isActionableRecovery, skipProcesses) + if isActionableRecovery || util.ClearToLog("executeCheckAndRecoverFunction: recovery", analysisEntry.AnalyzedInstanceAlias) { + log.Infof("executeCheckAndRecoverFunction: proceeding with %+v recovery on %+v; isRecoverable?: %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias, isActionableRecovery) } - recoveryAttempted, topologyRecovery, err = getCheckAndRecoverFunction(checkAndRecoverFunctionCode)(ctx, analysisEntry, candidateInstanceKey, forceInstanceRecovery, skipProcesses) + recoveryAttempted, topologyRecovery, err := getCheckAndRecoverFunction(checkAndRecoverFunctionCode)(ctx, analysisEntry) if !recoveryAttempted { - return recoveryAttempted, topologyRecovery, err + return err } recoveryName := getRecoverFunctionName(checkAndRecoverFunctionCode) recoveriesCounter.Add(recoveryName, 1) @@ -724,7 +717,7 @@ func executeCheckAndRecoverFunction(analysisEntry inst.ReplicationAnalysis, cand recoveriesSuccessfulCounter.Add(recoveryName, 1) } if topologyRecovery == nil { - return recoveryAttempted, topologyRecovery, err + return err } if b, err := json.Marshal(topologyRecovery); err == nil { log.Infof("Topology recovery: %+v", string(b)) @@ -741,19 +734,13 @@ func executeCheckAndRecoverFunction(analysisEntry inst.ReplicationAnalysis, cand // For all other recoveries, we would have changed the replication status of the analyzed tablet // so it doesn't hurt to re-read the information of this tablet, otherwise we'll requeue the same recovery // that we just completed because we would be using stale data. - DiscoverInstance(analysisEntry.AnalyzedInstanceKey, true) - } - _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("Waiting for %d postponed functions", topologyRecovery.PostponedFunctionsContainer.Len())) - topologyRecovery.Wait() - _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("Executed %d postponed functions", topologyRecovery.PostponedFunctionsContainer.Len())) - if topologyRecovery.PostponedFunctionsContainer.Len() > 0 { - _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("Executed postponed functions: %+v", strings.Join(topologyRecovery.PostponedFunctionsContainer.Descriptions(), ", "))) + DiscoverInstance(analysisEntry.AnalyzedInstanceAlias, true) } - return recoveryAttempted, topologyRecovery, err + return err } // checkIfAlreadyFixed checks whether the problem that the analysis entry represents has already been fixed by another agent or not -func checkIfAlreadyFixed(analysisEntry inst.ReplicationAnalysis) (bool, error) { +func checkIfAlreadyFixed(analysisEntry *inst.ReplicationAnalysis) (bool, error) { // Run a replication analysis again. We will check if the problem persisted analysisEntries, err := inst.GetReplicationAnalysis(analysisEntry.ClusterDetails.Keyspace, analysisEntry.ClusterDetails.Shard, &inst.ReplicationAnalysisHints{}) if err != nil { @@ -762,7 +749,7 @@ func checkIfAlreadyFixed(analysisEntry inst.ReplicationAnalysis) (bool, error) { for _, entry := range analysisEntries { // If there is a analysis which has the same recovery required, then we should proceed with the recovery - if entry.AnalyzedInstanceKey.Equals(&analysisEntry.AnalyzedInstanceKey) && analysisEntriesHaveSameRecovery(analysisEntry, entry) { + if entry.AnalyzedInstanceAlias == analysisEntry.AnalyzedInstanceAlias && analysisEntriesHaveSameRecovery(analysisEntry, entry) { return false, nil } } @@ -772,67 +759,66 @@ func checkIfAlreadyFixed(analysisEntry inst.ReplicationAnalysis) (bool, error) { } // CheckAndRecover is the main entry point for the recovery mechanism -func CheckAndRecover(specificInstance *inst.InstanceKey, candidateInstanceKey *inst.InstanceKey, skipProcesses bool) (recoveryAttempted bool, promotedReplicaKey *inst.InstanceKey, err error) { +func CheckAndRecover() { // Allow the analysis to run even if we don't want to recover - replicationAnalysis, err := inst.GetReplicationAnalysis("", "", &inst.ReplicationAnalysisHints{IncludeDowntimed: true, AuditAnalysis: true}) + replicationAnalysis, err := inst.GetReplicationAnalysis("", "", &inst.ReplicationAnalysisHints{AuditAnalysis: true}) if err != nil { log.Error(err) - return false, nil, err + return } - // intentionally iterating entries in random order - for _, j := range rand.Perm(len(replicationAnalysis)) { - analysisEntry := replicationAnalysis[j] - if specificInstance != nil { - // We are looking for a specific instance; if this is not the one, skip! - if !specificInstance.Equals(&analysisEntry.AnalyzedInstanceKey) { - continue + + // Regardless of if the problem is solved or not we want to monitor active + // issues, we use a map of labels and set a counter to `1` for each problem + // then we reset any counter that is not present in the current analysis. + active := make(map[string]struct{}) + for _, e := range replicationAnalysis { + if e.Analysis != inst.NoProblem { + names := [...]string{ + string(e.Analysis), + e.AnalyzedInstanceAlias, + e.AnalyzedKeyspace, + e.AnalyzedShard, } + + key := detectedProblems.GetLabelName(names[:]...) + active[key] = struct{}{} + detectedProblems.Set(names[:], 1) } - if analysisEntry.SkippableDueToDowntime && specificInstance == nil { - // Only recover a downtimed server if explicitly requested - continue + } + + // Reset any non-active problems. + for key := range detectedProblems.Counts() { + if _, ok := active[key]; !ok { + detectedProblems.ResetKey(key) } + } - if specificInstance != nil { - // force mode. Keep it synchronuous - var topologyRecovery *TopologyRecovery - recoveryAttempted, topologyRecovery, err = executeCheckAndRecoverFunction(analysisEntry, candidateInstanceKey, true, skipProcesses) - if err != nil { + // intentionally iterating entries in random order + for _, j := range rand.Perm(len(replicationAnalysis)) { + analysisEntry := replicationAnalysis[j] + + go func() { + if err := executeCheckAndRecoverFunction(analysisEntry); err != nil { log.Error(err) } - if topologyRecovery != nil { - promotedReplicaKey = topologyRecovery.SuccessorKey - } - } else { - go func() { - _, _, err := executeCheckAndRecoverFunction(analysisEntry, candidateInstanceKey, false, skipProcesses) - if err != nil { - log.Error(err) - } - }() - } + }() } - return recoveryAttempted, promotedReplicaKey, err } -func postPrsCompletion(topologyRecovery *TopologyRecovery, analysisEntry inst.ReplicationAnalysis, promotedReplica *inst.Instance) { +func postPrsCompletion(topologyRecovery *TopologyRecovery, analysisEntry *inst.ReplicationAnalysis, promotedReplica *inst.Instance) { if promotedReplica != nil { - message := fmt.Sprintf("promoted replica: %+v", promotedReplica.Key) + message := fmt.Sprintf("promoted replica: %+v", promotedReplica.InstanceAlias) _ = AuditTopologyRecovery(topologyRecovery, message) - _ = inst.AuditOperation(string(analysisEntry.Analysis), &analysisEntry.AnalyzedInstanceKey, message) - } - // Now, see whether we are successful or not. From this point there's no going back. - if promotedReplica != nil { - // Success! - _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("%+v: successfully promoted %+v", analysisEntry.Analysis, promotedReplica.Key)) + _ = inst.AuditOperation(string(analysisEntry.Analysis), analysisEntry.AnalyzedInstanceAlias, message) + _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("%+v: successfully promoted %+v", analysisEntry.Analysis, promotedReplica.InstanceAlias)) } } // electNewPrimary elects a new primary while none were present before. -func electNewPrimary(ctx context.Context, analysisEntry inst.ReplicationAnalysis, candidateInstanceKey *inst.InstanceKey, forceInstanceRecovery bool, skipProcesses bool) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { - topologyRecovery, err = AttemptRecoveryRegistration(&analysisEntry, false /*failIfFailedInstanceInActiveRecovery*/, true /*failIfClusterInActiveRecovery*/) +func electNewPrimary(ctx context.Context, analysisEntry *inst.ReplicationAnalysis) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { + topologyRecovery, err = AttemptRecoveryRegistration(analysisEntry, false /*failIfFailedInstanceInActiveRecovery*/, true /*failIfClusterInActiveRecovery*/) if topologyRecovery == nil || err != nil { - _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("found an active or recent recovery on %+v. Will not issue another electNewPrimary.", analysisEntry.AnalyzedInstanceKey)) + _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("found an active or recent recovery on %+v. Will not issue another electNewPrimary.", analysisEntry.AnalyzedInstanceAlias)) return false, nil, err } log.Infof("Analysis: %v, will elect a new primary for %v:%v", analysisEntry.Analysis, analysisEntry.ClusterDetails.Keyspace, analysisEntry.ClusterDetails.Shard) @@ -844,7 +830,7 @@ func electNewPrimary(ctx context.Context, analysisEntry inst.ReplicationAnalysis _ = resolveRecovery(topologyRecovery, promotedReplica) }() - analyzedTablet, err := inst.ReadTablet(analysisEntry.AnalyzedInstanceKey) + analyzedTablet, err := inst.ReadTablet(analysisEntry.AnalyzedInstanceAlias) if err != nil { return false, topologyRecovery, err } @@ -870,61 +856,58 @@ func electNewPrimary(ctx context.Context, analysisEntry inst.ReplicationAnalysis ) if ev != nil && ev.NewPrimary != nil { - promotedReplica, _, _ = inst.ReadInstance(&inst.InstanceKey{ - Hostname: ev.NewPrimary.MysqlHostname, - Port: int(ev.NewPrimary.MysqlPort), - }) + promotedReplica, _, _ = inst.ReadInstance(topoproto.TabletAliasString(ev.NewPrimary.Alias)) } postPrsCompletion(topologyRecovery, analysisEntry, promotedReplica) return true, topologyRecovery, err } // fixPrimary sets the primary as read-write. -func fixPrimary(ctx context.Context, analysisEntry inst.ReplicationAnalysis, candidateInstanceKey *inst.InstanceKey, forceInstanceRecovery bool, skipProcesses bool) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { - topologyRecovery, err = AttemptRecoveryRegistration(&analysisEntry, false, true) +func fixPrimary(ctx context.Context, analysisEntry *inst.ReplicationAnalysis) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { + topologyRecovery, err = AttemptRecoveryRegistration(analysisEntry, false, true) if topologyRecovery == nil { - _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("found an active or recent recovery on %+v. Will not issue another fixPrimary.", analysisEntry.AnalyzedInstanceKey)) + _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("found an active or recent recovery on %+v. Will not issue another fixPrimary.", analysisEntry.AnalyzedInstanceAlias)) return false, nil, err } - log.Infof("Analysis: %v, will fix primary to read-write %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceKey) + log.Infof("Analysis: %v, will fix primary to read-write %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias) // This has to be done in the end; whether successful or not, we should mark that the recovery is done. // So that after the active period passes, we are able to run other recoveries. defer func() { _ = resolveRecovery(topologyRecovery, nil) }() - analyzedTablet, err := inst.ReadTablet(analysisEntry.AnalyzedInstanceKey) + analyzedTablet, err := inst.ReadTablet(analysisEntry.AnalyzedInstanceAlias) if err != nil { return false, topologyRecovery, err } - durabilityPolicy, err := inst.GetDurabilityPolicy(analyzedTablet) + durabilityPolicy, err := inst.GetDurabilityPolicy(analyzedTablet.Keyspace) if err != nil { log.Info("Could not read the durability policy for %v/%v", analyzedTablet.Keyspace, analyzedTablet.Shard) return false, topologyRecovery, err } - if err := tabletUndoDemotePrimary(ctx, analyzedTablet, inst.SemiSyncAckers(durabilityPolicy, analyzedTablet) > 0); err != nil { + if err := tabletUndoDemotePrimary(ctx, analyzedTablet, reparentutil.SemiSyncAckers(durabilityPolicy, analyzedTablet) > 0); err != nil { return true, topologyRecovery, err } return true, topologyRecovery, nil } // fixReplica sets the replica as read-only and points it at the current primary. -func fixReplica(ctx context.Context, analysisEntry inst.ReplicationAnalysis, candidateInstanceKey *inst.InstanceKey, forceInstanceRecovery bool, skipProcesses bool) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { - topologyRecovery, err = AttemptRecoveryRegistration(&analysisEntry, false, true) +func fixReplica(ctx context.Context, analysisEntry *inst.ReplicationAnalysis) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { + topologyRecovery, err = AttemptRecoveryRegistration(analysisEntry, false, true) if topologyRecovery == nil { - _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("found an active or recent recovery on %+v. Will not issue another fixReplica.", analysisEntry.AnalyzedInstanceKey)) + _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("found an active or recent recovery on %+v. Will not issue another fixReplica.", analysisEntry.AnalyzedInstanceAlias)) return false, nil, err } - log.Infof("Analysis: %v, will fix replica %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceKey) + log.Infof("Analysis: %v, will fix replica %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias) // This has to be done in the end; whether successful or not, we should mark that the recovery is done. // So that after the active period passes, we are able to run other recoveries. defer func() { _ = resolveRecovery(topologyRecovery, nil) }() - analyzedTablet, err := inst.ReadTablet(analysisEntry.AnalyzedInstanceKey) + analyzedTablet, err := inst.ReadTablet(analysisEntry.AnalyzedInstanceAlias) if err != nil { return false, topologyRecovery, err } @@ -935,7 +918,7 @@ func fixReplica(ctx context.Context, analysisEntry inst.ReplicationAnalysis, can return false, topologyRecovery, err } - durabilityPolicy, err := inst.GetDurabilityPolicy(analyzedTablet) + durabilityPolicy, err := inst.GetDurabilityPolicy(analyzedTablet.Keyspace) if err != nil { log.Info("Could not read the durability policy for %v/%v", analyzedTablet.Keyspace, analyzedTablet.Shard) return false, topologyRecovery, err @@ -943,10 +926,45 @@ func fixReplica(ctx context.Context, analysisEntry inst.ReplicationAnalysis, can err = setReadOnly(ctx, analyzedTablet) if err != nil { - log.Info("Could not set the tablet %v to readonly - %v", topoproto.TabletAliasString(analyzedTablet.Alias), err) + log.Info("Could not set the tablet %v to readonly - %v", analysisEntry.AnalyzedInstanceAlias, err) return true, topologyRecovery, err } - err = setReplicationSource(ctx, analyzedTablet, primaryTablet, inst.IsReplicaSemiSync(durabilityPolicy, primaryTablet, analyzedTablet)) + err = setReplicationSource(ctx, analyzedTablet, primaryTablet, reparentutil.IsReplicaSemiSync(durabilityPolicy, primaryTablet, analyzedTablet)) + return true, topologyRecovery, err +} + +// recoverErrantGTIDDetected changes the tablet type of a replica tablet that has errant GTIDs. +func recoverErrantGTIDDetected(ctx context.Context, analysisEntry *inst.ReplicationAnalysis) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { + topologyRecovery, err = AttemptRecoveryRegistration(analysisEntry, false, true) + if topologyRecovery == nil { + _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("found an active or recent recovery on %+v. Will not issue another recoverErrantGTIDDetected.", analysisEntry.AnalyzedInstanceAlias)) + return false, nil, err + } + log.Infof("Analysis: %v, will fix tablet %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias) + // This has to be done in the end; whether successful or not, we should mark that the recovery is done. + // So that after the active period passes, we are able to run other recoveries. + defer func() { + _ = resolveRecovery(topologyRecovery, nil) + }() + + analyzedTablet, err := inst.ReadTablet(analysisEntry.AnalyzedInstanceAlias) + if err != nil { + return false, topologyRecovery, err + } + + primaryTablet, err := shardPrimary(analyzedTablet.Keyspace, analyzedTablet.Shard) + if err != nil { + log.Info("Could not compute primary for %v/%v", analyzedTablet.Keyspace, analyzedTablet.Shard) + return false, topologyRecovery, err + } + + durabilityPolicy, err := inst.GetDurabilityPolicy(analyzedTablet.Keyspace) + if err != nil { + log.Info("Could not read the durability policy for %v/%v", analyzedTablet.Keyspace, analyzedTablet.Shard) + return false, topologyRecovery, err + } + + err = changeTabletType(ctx, analyzedTablet, topodatapb.TabletType_DRAINED, reparentutil.IsReplicaSemiSync(durabilityPolicy, primaryTablet, analyzedTablet)) return true, topologyRecovery, err } diff --git a/go/vt/vtorc/logic/topology_recovery_dao.go b/go/vt/vtorc/logic/topology_recovery_dao.go index 13ecba5762c..c835b9ecfe4 100644 --- a/go/vt/vtorc/logic/topology_recovery_dao.go +++ b/go/vt/vtorc/logic/topology_recovery_dao.go @@ -32,8 +32,7 @@ import ( // AttemptFailureDetectionRegistration tries to add a failure-detection entry; if this fails that means the problem has already been detected func AttemptFailureDetectionRegistration(analysisEntry *inst.ReplicationAnalysis) (registrationSuccessful bool, err error) { args := sqlutils.Args( - analysisEntry.AnalyzedInstanceKey.Hostname, - analysisEntry.AnalyzedInstanceKey.Port, + analysisEntry.AnalyzedInstanceAlias, process.ThisHostname, util.ProcessToken.Hash, string(analysisEntry.Analysis), @@ -51,8 +50,7 @@ func AttemptFailureDetectionRegistration(analysisEntry *inst.ReplicationAnalysis query := fmt.Sprintf(` insert ignore into topology_failure_detection ( - hostname, - port, + alias, in_active_period, end_active_period_unixtime, processing_node_hostname, @@ -64,7 +62,6 @@ func AttemptFailureDetectionRegistration(analysisEntry *inst.ReplicationAnalysis is_actionable, start_active_period ) values ( - ?, ?, 1, 0, @@ -118,8 +115,7 @@ func writeTopologyRecovery(topologyRecovery *TopologyRecovery) (*TopologyRecover into topology_recovery ( recovery_id, uid, - hostname, - port, + alias, in_active_period, start_active_period, end_active_period_unixtime, @@ -134,7 +130,6 @@ func writeTopologyRecovery(topologyRecovery *TopologyRecovery) (*TopologyRecover ?, ?, ?, - ?, 1, NOW(), 0, @@ -144,18 +139,18 @@ func writeTopologyRecovery(topologyRecovery *TopologyRecovery) (*TopologyRecover ?, ?, ?, - (select ifnull(max(detection_id), 0) from topology_failure_detection where hostname=? and port=?) + (select ifnull(max(detection_id), 0) from topology_failure_detection where alias = ?) ) `, sqlutils.NilIfZero(topologyRecovery.ID), topologyRecovery.UID, - analysisEntry.AnalyzedInstanceKey.Hostname, analysisEntry.AnalyzedInstanceKey.Port, + analysisEntry.AnalyzedInstanceAlias, process.ThisHostname, util.ProcessToken.Hash, string(analysisEntry.Analysis), analysisEntry.ClusterDetails.Keyspace, analysisEntry.ClusterDetails.Shard, analysisEntry.CountReplicas, - analysisEntry.AnalyzedInstanceKey.Hostname, analysisEntry.AnalyzedInstanceKey.Port, + analysisEntry.AnalyzedInstanceAlias, ) if err != nil { return nil, err @@ -180,14 +175,14 @@ func AttemptRecoveryRegistration(analysisEntry *inst.ReplicationAnalysis, failIf if failIfFailedInstanceInActiveRecovery { // Let's check if this instance has just been promoted recently and is still in active period. // If so, we reject recovery registration to avoid flapping. - recoveries, err := ReadInActivePeriodSuccessorInstanceRecovery(&analysisEntry.AnalyzedInstanceKey) + recoveries, err := ReadInActivePeriodSuccessorInstanceRecovery(analysisEntry.AnalyzedInstanceAlias) if err != nil { log.Error(err) return nil, err } if len(recoveries) > 0 { _ = RegisterBlockedRecoveries(analysisEntry, recoveries) - errMsg := fmt.Sprintf("AttemptRecoveryRegistration: instance %+v has recently been promoted (by failover of %+v) and is in active period. It will not be failed over. You may acknowledge the failure on %+v (-c ack-instance-recoveries) to remove this blockage", analysisEntry.AnalyzedInstanceKey, recoveries[0].AnalysisEntry.AnalyzedInstanceKey, recoveries[0].AnalysisEntry.AnalyzedInstanceKey) + errMsg := fmt.Sprintf("AttemptRecoveryRegistration: tablet %+v has recently been promoted (by failover of %+v) and is in active period. It will not be failed over. You may acknowledge the failure on %+v (-c ack-instance-recoveries) to remove this blockage", analysisEntry.AnalyzedInstanceAlias, recoveries[0].AnalysisEntry.AnalyzedInstanceAlias, recoveries[0].AnalysisEntry.AnalyzedInstanceAlias) log.Errorf(errMsg) return nil, fmt.Errorf(errMsg) } @@ -202,14 +197,14 @@ func AttemptRecoveryRegistration(analysisEntry *inst.ReplicationAnalysis, failIf } if len(recoveries) > 0 { _ = RegisterBlockedRecoveries(analysisEntry, recoveries) - errMsg := fmt.Sprintf("AttemptRecoveryRegistration: keyspace %+v shard %+v has recently experienced a failover (of %+v) and is in active period. It will not be failed over again. You may acknowledge the failure on this cluster (-c ack-cluster-recoveries) or on %+v (-c ack-instance-recoveries) to remove this blockage", analysisEntry.ClusterDetails.Keyspace, analysisEntry.ClusterDetails.Shard, recoveries[0].AnalysisEntry.AnalyzedInstanceKey, recoveries[0].AnalysisEntry.AnalyzedInstanceKey) + errMsg := fmt.Sprintf("AttemptRecoveryRegistration: keyspace %+v shard %+v has recently experienced a failover (of %+v) and is in active period. It will not be failed over again. You may acknowledge the failure on this cluster (-c ack-cluster-recoveries) or on %+v (-c ack-instance-recoveries) to remove this blockage", analysisEntry.ClusterDetails.Keyspace, analysisEntry.ClusterDetails.Shard, recoveries[0].AnalysisEntry.AnalyzedInstanceAlias, recoveries[0].AnalysisEntry.AnalyzedInstanceAlias) log.Errorf(errMsg) return nil, fmt.Errorf(errMsg) } } if !failIfFailedInstanceInActiveRecovery { // Implicitly acknowledge this instance's possibly existing active recovery, provided they are completed. - _, _ = AcknowledgeInstanceCompletedRecoveries(&analysisEntry.AnalyzedInstanceKey, "vtorc", fmt.Sprintf("implicit acknowledge due to user invocation of recovery on same instance: %+v", analysisEntry.AnalyzedInstanceKey)) + _, _ = AcknowledgeInstanceCompletedRecoveries(analysisEntry.AnalyzedInstanceAlias, "vtorc", fmt.Sprintf("implicit acknowledge due to user invocation of recovery on same instance: %+v", analysisEntry.AnalyzedInstanceAlias)) // The fact we only acknowledge a completed recovery solves the possible case of two DBAs simultaneously // trying to recover the same instance at the same time } @@ -250,8 +245,7 @@ func RegisterBlockedRecoveries(analysisEntry *inst.ReplicationAnalysis, blocking _, err := db.ExecVTOrc(` insert into blocked_topology_recovery ( - hostname, - port, + alias, keyspace, shard, analysis, @@ -262,7 +256,6 @@ func RegisterBlockedRecoveries(analysisEntry *inst.ReplicationAnalysis, blocking ?, ?, ?, - ?, NOW(), ? ) @@ -272,8 +265,7 @@ func RegisterBlockedRecoveries(analysisEntry *inst.ReplicationAnalysis, blocking analysis=values(analysis), last_blocked_timestamp=values(last_blocked_timestamp), blocking_recovery_id=values(blocking_recovery_id) - `, analysisEntry.AnalyzedInstanceKey.Hostname, - analysisEntry.AnalyzedInstanceKey.Port, + `, analysisEntry.AnalyzedInstanceAlias, analysisEntry.ClusterDetails.Keyspace, analysisEntry.ClusterDetails.Shard, string(analysisEntry.Analysis), @@ -294,30 +286,27 @@ func ExpireBlockedRecoveries() error { query := ` select - blocked_topology_recovery.hostname, - blocked_topology_recovery.port + blocked_topology_recovery.alias from blocked_topology_recovery left join topology_recovery on (blocking_recovery_id = topology_recovery.recovery_id and acknowledged = 0) where acknowledged is null ` - expiredKeys := inst.NewInstanceKeyMap() + var expiredAliases []string err := db.QueryVTOrc(query, sqlutils.Args(), func(m sqlutils.RowMap) error { - key := inst.InstanceKey{Hostname: m.GetString("hostname"), Port: m.GetInt("port")} - expiredKeys.AddKey(key) + expiredAliases = append(expiredAliases, m.GetString("alias")) return nil }) - for _, expiredKey := range expiredKeys.GetInstanceKeys() { + for _, expiredAlias := range expiredAliases { _, err := db.ExecVTOrc(` delete from blocked_topology_recovery where - hostname = ? - and port = ? + alias = ? `, - expiredKey.Hostname, expiredKey.Port, + expiredAlias, ) if err != nil { log.Error(err) @@ -382,13 +371,12 @@ func acknowledgeRecoveries(owner string, comment string, markEndRecovery bool, w // AcknowledgeInstanceCompletedRecoveries marks active and COMPLETED recoveries for given instane as acknowledged. // This also implied clearing their active period, which in turn enables further recoveries on those topologies -func AcknowledgeInstanceCompletedRecoveries(instanceKey *inst.InstanceKey, owner string, comment string) (countAcknowledgedEntries int64, err error) { +func AcknowledgeInstanceCompletedRecoveries(tabletAlias string, owner string, comment string) (countAcknowledgedEntries int64, err error) { whereClause := ` - hostname = ? - and port = ? + alias = ? and end_recovery is not null ` - return acknowledgeRecoveries(owner, comment, false, whereClause, sqlutils.Args(instanceKey.Hostname, instanceKey.Port)) + return acknowledgeRecoveries(owner, comment, false, whereClause, sqlutils.Args(tabletAlias)) } // AcknowledgeCrashedRecoveries marks recoveries whose processing nodes has crashed as acknowledged. @@ -406,25 +394,16 @@ func AcknowledgeCrashedRecoveries() (countAcknowledgedEntries int64, err error) // ResolveRecovery is called on completion of a recovery process and updates the recovery status. // It does not clear the "active period" as this still takes place in order to avoid flapping. func writeResolveRecovery(topologyRecovery *TopologyRecovery) error { - var successorKeyToWrite inst.InstanceKey - if topologyRecovery.IsSuccessful { - successorKeyToWrite = *topologyRecovery.SuccessorKey - } _, err := db.ExecVTOrc(` update topology_recovery set is_successful = ?, - successor_hostname = ?, - successor_port = ?, successor_alias = ?, - lost_replicas = ?, - participating_instances = ?, all_errors = ?, end_recovery = NOW() where uid = ? - `, topologyRecovery.IsSuccessful, successorKeyToWrite.Hostname, successorKeyToWrite.Port, - topologyRecovery.SuccessorAlias, topologyRecovery.LostReplicas.ToCommaDelimitedList(), - topologyRecovery.ParticipatingInstanceKeys.ToCommaDelimitedList(), + `, topologyRecovery.IsSuccessful, + topologyRecovery.SuccessorAlias, strings.Join(topologyRecovery.AllErrors, "\n"), topologyRecovery.UID, ) @@ -439,32 +418,27 @@ func readRecoveries(whereCondition string, limit string, args []any) ([]*Topolog res := []*TopologyRecovery{} query := fmt.Sprintf(` select - recovery_id, - uid, - hostname, - port, - (IFNULL(end_active_period_unixtime, 0) = 0) as is_active, - start_active_period, - IFNULL(end_active_period_unixtime, 0) as end_active_period_unixtime, - IFNULL(end_recovery, '') AS end_recovery, - is_successful, - processing_node_hostname, - processcing_node_token, - ifnull(successor_hostname, '') as successor_hostname, - ifnull(successor_port, 0) as successor_port, - ifnull(successor_alias, '') as successor_alias, - analysis, - keyspace, - shard, - count_affected_replicas, - participating_instances, - lost_replicas, - all_errors, - acknowledged, - acknowledged_at, - acknowledged_by, - acknowledge_comment, - last_detection_id + recovery_id, + uid, + alias, + (IFNULL(end_active_period_unixtime, 0) = 0) as is_active, + start_active_period, + IFNULL(end_active_period_unixtime, 0) as end_active_period_unixtime, + IFNULL(end_recovery, '') AS end_recovery, + is_successful, + processing_node_hostname, + processcing_node_token, + ifnull(successor_alias, '') as successor_alias, + analysis, + keyspace, + shard, + count_affected_replicas, + all_errors, + acknowledged, + acknowledged_at, + acknowledged_by, + acknowledge_comment, + last_detection_id from topology_recovery %s @@ -484,23 +458,17 @@ func readRecoveries(whereCondition string, limit string, args []any) ([]*Topolog topologyRecovery.ProcessingNodeHostname = m.GetString("processing_node_hostname") topologyRecovery.ProcessingNodeToken = m.GetString("processcing_node_token") - topologyRecovery.AnalysisEntry.AnalyzedInstanceKey.Hostname = m.GetString("hostname") - topologyRecovery.AnalysisEntry.AnalyzedInstanceKey.Port = m.GetInt("port") + topologyRecovery.AnalysisEntry.AnalyzedInstanceAlias = m.GetString("alias") topologyRecovery.AnalysisEntry.Analysis = inst.AnalysisCode(m.GetString("analysis")) topologyRecovery.AnalysisEntry.ClusterDetails.Keyspace = m.GetString("keyspace") topologyRecovery.AnalysisEntry.ClusterDetails.Shard = m.GetString("shard") topologyRecovery.AnalysisEntry.CountReplicas = m.GetUint("count_affected_replicas") - topologyRecovery.SuccessorKey = &inst.InstanceKey{} - topologyRecovery.SuccessorKey.Hostname = m.GetString("successor_hostname") - topologyRecovery.SuccessorKey.Port = m.GetInt("successor_port") topologyRecovery.SuccessorAlias = m.GetString("successor_alias") topologyRecovery.AnalysisEntry.ClusterDetails.ReadRecoveryInfo() topologyRecovery.AllErrors = strings.Split(m.GetString("all_errors"), "\n") - _ = topologyRecovery.LostReplicas.ReadCommaDelimitedList(m.GetString("lost_replicas")) - _ = topologyRecovery.ParticipatingInstanceKeys.ReadCommaDelimitedList(m.GetString("participating_instances")) topologyRecovery.Acknowledged = m.GetBool("acknowledged") topologyRecovery.AcknowledgedAt = m.GetString("acknowledged_at") @@ -533,20 +501,20 @@ func ReadInActivePeriodClusterRecovery(keyspace string, shard, analysis string) // ReadInActivePeriodSuccessorInstanceRecovery reads completed recoveries for a given instance, where said instance // was promoted as result, still in active period (may be used to block further recoveries should this instance die) -func ReadInActivePeriodSuccessorInstanceRecovery(instanceKey *inst.InstanceKey) ([]*TopologyRecovery, error) { +func ReadInActivePeriodSuccessorInstanceRecovery(tabletAlias string) ([]*TopologyRecovery, error) { whereClause := ` where in_active_period=1 and - successor_hostname=? and successor_port=?` - return readRecoveries(whereClause, ``, sqlutils.Args(instanceKey.Hostname, instanceKey.Port)) + successor_alias=?` + return readRecoveries(whereClause, ``, sqlutils.Args(tabletAlias)) } // ReadRecentRecoveries reads latest recovery entries from topology_recovery func ReadRecentRecoveries(unacknowledgedOnly bool, page int) ([]*TopologyRecovery, error) { whereConditions := []string{} whereClause := "" - args := sqlutils.Args() + var args []any if unacknowledgedOnly { whereConditions = append(whereConditions, `acknowledged=0`) } diff --git a/go/vt/vtorc/logic/topology_recovery_dao_test.go b/go/vt/vtorc/logic/topology_recovery_dao_test.go index f01e16560a8..f9a9026a4a1 100644 --- a/go/vt/vtorc/logic/topology_recovery_dao_test.go +++ b/go/vt/vtorc/logic/topology_recovery_dao_test.go @@ -22,7 +22,6 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/vt/external/golib/sqlutils" - "vitess.io/vitess/go/vt/vtorc/db" "vitess.io/vitess/go/vt/vtorc/inst" ) @@ -39,11 +38,8 @@ func TestTopologyRecovery(t *testing.T) { }() replicationAnalysis := inst.ReplicationAnalysis{ - AnalyzedInstanceKey: inst.InstanceKey{ - Hostname: hostname, - Port: 101, - }, - TabletType: tab101.Type, + AnalyzedInstanceAlias: "zone1-0000000101", + TabletType: tab101.Type, ClusterDetails: inst.ClusterInfo{ Keyspace: keyspace, Shard: shard, @@ -81,10 +77,7 @@ func TestBlockedRecoveryInsertion(t *testing.T) { }() analysisEntry := &inst.ReplicationAnalysis{ - AnalyzedInstanceKey: inst.InstanceKey{ - Hostname: "localhost", - Port: 100, - }, + AnalyzedInstanceAlias: "zone1-0000000100", ClusterDetails: inst.ClusterInfo{ Keyspace: "ks", Shard: "0", diff --git a/go/vt/vtorc/logic/topology_recovery_status.go b/go/vt/vtorc/logic/topology_recovery_status.go index 4e85b0529de..d1195963ba1 100644 --- a/go/vt/vtorc/logic/topology_recovery_status.go +++ b/go/vt/vtorc/logic/topology_recovery_status.go @@ -35,13 +35,15 @@ const TopologyRecoveriesTemplate = ` Recovery ID Failure Type - Instance + Tablet Alias + Timestamp {{range $i, $recovery := .}} {{$recovery.ID}} {{$recovery.AnalysisEntry.Analysis}} - {{$recovery.AnalysisEntry.AnalyzedInstanceKey}} + {{$recovery.AnalysisEntry.AnalyzedInstanceAlias}} + {{$recovery.RecoveryStartTimestamp}} {{end}} diff --git a/go/vt/vtorc/logic/topology_recovery_test.go b/go/vt/vtorc/logic/topology_recovery_test.go index 211d8ff3467..d517649fd13 100644 --- a/go/vt/vtorc/logic/topology_recovery_test.go +++ b/go/vt/vtorc/logic/topology_recovery_test.go @@ -26,10 +26,10 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtorc/config" "vitess.io/vitess/go/vt/vtorc/db" "vitess.io/vitess/go/vt/vtorc/inst" - - // import the gRPC client implementation for tablet manager _ "vitess.io/vitess/go/vt/vttablet/grpctmclient" ) @@ -44,6 +44,11 @@ func TestAnalysisEntriesHaveSameRecovery(t *testing.T) { prevAnalysisCode: inst.DeadPrimary, newAnalysisCode: inst.DeadPrimaryAndSomeReplicas, shouldBeEqual: true, + }, { + // DeadPrimary and PrimaryTabletDeleted are different recoveries. + prevAnalysisCode: inst.DeadPrimary, + newAnalysisCode: inst.PrimaryTabletDeleted, + shouldBeEqual: false, }, { // same codes will always have same recovery prevAnalysisCode: inst.DeadPrimary, @@ -87,7 +92,7 @@ func TestAnalysisEntriesHaveSameRecovery(t *testing.T) { t.Parallel() for _, tt := range tests { t.Run(string(tt.prevAnalysisCode)+","+string(tt.newAnalysisCode), func(t *testing.T) { - res := analysisEntriesHaveSameRecovery(inst.ReplicationAnalysis{Analysis: tt.prevAnalysisCode}, inst.ReplicationAnalysis{Analysis: tt.newAnalysisCode}) + res := analysisEntriesHaveSameRecovery(&inst.ReplicationAnalysis{Analysis: tt.prevAnalysisCode}, &inst.ReplicationAnalysis{Analysis: tt.newAnalysisCode}) require.Equal(t, tt.shouldBeEqual, res) }) } @@ -117,14 +122,14 @@ func TestElectNewPrimaryPanic(t *testing.T) { } err = inst.SaveTablet(tablet) require.NoError(t, err) - analysisEntry := inst.ReplicationAnalysis{ - AnalyzedInstanceKey: inst.InstanceKey{ - Hostname: tablet.MysqlHostname, - Port: int(tablet.MysqlPort), - }, + analysisEntry := &inst.ReplicationAnalysis{ + AnalyzedInstanceAlias: topoproto.TabletAliasString(tablet.Alias), } - ts = memorytopo.NewServer("zone1") - recoveryAttempted, _, err := electNewPrimary(context.Background(), analysisEntry, nil, false, false) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts = memorytopo.NewServer(ctx, "zone1") + recoveryAttempted, _, err := electNewPrimary(context.Background(), analysisEntry) require.True(t, recoveryAttempted) require.Error(t, err) } @@ -168,20 +173,17 @@ func TestDifferentAnalysescHaveDifferentCooldowns(t *testing.T) { err = inst.SaveTablet(replica) require.NoError(t, err) primaryAnalysisEntry := inst.ReplicationAnalysis{ - AnalyzedInstanceKey: inst.InstanceKey{ - Hostname: primary.MysqlHostname, - Port: int(primary.MysqlPort), - }, - Analysis: inst.ReplicationStopped, + AnalyzedInstanceAlias: topoproto.TabletAliasString(primary.Alias), + Analysis: inst.ReplicationStopped, } replicaAnalysisEntry := inst.ReplicationAnalysis{ - AnalyzedInstanceKey: inst.InstanceKey{ - Hostname: replica.MysqlHostname, - Port: int(replica.MysqlPort), - }, - Analysis: inst.DeadPrimary, + AnalyzedInstanceAlias: topoproto.TabletAliasString(replica.Alias), + Analysis: inst.DeadPrimary, } - ts = memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts = memorytopo.NewServer(ctx, "zone1") _, err = AttemptRecoveryRegistration(&replicaAnalysisEntry, false, true) require.Nil(t, err) @@ -190,3 +192,88 @@ func TestDifferentAnalysescHaveDifferentCooldowns(t *testing.T) { _, err = AttemptRecoveryRegistration(&primaryAnalysisEntry, true, true) require.Nil(t, err) } + +func TestGetCheckAndRecoverFunctionCode(t *testing.T) { + tests := []struct { + name string + ersEnabled bool + convertTabletWithErrantGTIDs bool + analysisCode inst.AnalysisCode + wantRecoveryFunction recoveryFunction + }{ + { + name: "DeadPrimary with ERS enabled", + ersEnabled: true, + analysisCode: inst.DeadPrimary, + wantRecoveryFunction: recoverDeadPrimaryFunc, + }, { + name: "DeadPrimary with ERS disabled", + ersEnabled: false, + analysisCode: inst.DeadPrimary, + wantRecoveryFunction: noRecoveryFunc, + }, { + name: "PrimaryTabletDeleted with ERS enabled", + ersEnabled: true, + analysisCode: inst.PrimaryTabletDeleted, + wantRecoveryFunction: recoverPrimaryTabletDeletedFunc, + }, { + name: "PrimaryTabletDeleted with ERS disabled", + ersEnabled: false, + analysisCode: inst.PrimaryTabletDeleted, + wantRecoveryFunction: noRecoveryFunc, + }, { + name: "PrimaryHasPrimary", + ersEnabled: false, + analysisCode: inst.PrimaryHasPrimary, + wantRecoveryFunction: recoverPrimaryHasPrimaryFunc, + }, { + name: "ClusterHasNoPrimary", + ersEnabled: false, + analysisCode: inst.ClusterHasNoPrimary, + wantRecoveryFunction: electNewPrimaryFunc, + }, { + name: "ReplicationStopped", + ersEnabled: false, + analysisCode: inst.ReplicationStopped, + wantRecoveryFunction: fixReplicaFunc, + }, { + name: "PrimarySemiSyncMustBeSet", + ersEnabled: false, + analysisCode: inst.PrimarySemiSyncMustBeSet, + wantRecoveryFunction: fixPrimaryFunc, + }, { + name: "ErrantGTIDDetected", + ersEnabled: false, + convertTabletWithErrantGTIDs: true, + analysisCode: inst.ErrantGTIDDetected, + wantRecoveryFunction: recoverErrantGTIDDetectedFunc, + }, { + name: "ErrantGTIDDetected with --change-tablets-with-errant-gtid-to-drained false", + ersEnabled: false, + convertTabletWithErrantGTIDs: false, + analysisCode: inst.ErrantGTIDDetected, + wantRecoveryFunction: noRecoveryFunc, + }, + } + + // Needed for the test to work + oldMap := emergencyOperationGracefulPeriodMap + emergencyOperationGracefulPeriodMap = cache.New(time.Second*5, time.Millisecond*500) + defer func() { + emergencyOperationGracefulPeriodMap = oldMap + }() + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + prevVal := config.ERSEnabled() + config.SetERSEnabled(tt.ersEnabled) + defer config.SetERSEnabled(prevVal) + + convertErrantVal := config.ConvertTabletWithErrantGTIDs() + config.SetConvertTabletWithErrantGTIDs(tt.convertTabletWithErrantGTIDs) + defer config.SetConvertTabletWithErrantGTIDs(convertErrantVal) + + gotFunc := getCheckAndRecoverFunctionCode(tt.analysisCode, "") + require.EqualValues(t, tt.wantRecoveryFunction, gotFunc) + }) + } +} diff --git a/go/vt/vtorc/logic/orchestrator.go b/go/vt/vtorc/logic/vtorc.go similarity index 82% rename from go/vt/vtorc/logic/orchestrator.go rename to go/vt/vtorc/logic/vtorc.go index c2ce4eef179..02fb41daa21 100644 --- a/go/vt/vtorc/logic/orchestrator.go +++ b/go/vt/vtorc/logic/vtorc.go @@ -47,7 +47,7 @@ const ( // that were requested for discovery. It can be continuously updated // as discovery process progresses. var discoveryQueue *discovery.Queue -var snapshotDiscoveryKeys chan inst.InstanceKey +var snapshotDiscoveryKeys chan string var snapshotDiscoveryKeysMutex sync.Mutex var hasReceivedSIGTERM int32 @@ -65,7 +65,7 @@ var isElectedNode int64 var recentDiscoveryOperationKeys *cache.Cache func init() { - snapshotDiscoveryKeys = make(chan inst.InstanceKey, 10) + snapshotDiscoveryKeys = make(chan string, 10) _ = metrics.Register("discoveries.attempt", discoveriesCounter) _ = metrics.Register("discoveries.fail", failedDiscoveriesCounter) @@ -113,7 +113,7 @@ func acceptSighupSignal() { go func() { for range c { log.Infof("Received SIGHUP. Reloading configuration") - _ = inst.AuditOperation("reload-configuration", nil, "Triggered via SIGHUP") + _ = inst.AuditOperation("reload-configuration", "", "Triggered via SIGHUP") config.Reload() discoveryMetrics.SetExpirePeriod(time.Duration(config.DiscoveryCollectionRetentionSeconds) * time.Second) } @@ -126,7 +126,7 @@ func closeVTOrc() { atomic.StoreInt32(&hasReceivedSIGTERM, 1) discoveryMetrics.StopAutoExpiration() // Poke other go routines to stop cleanly here ... - _ = inst.AuditOperation("shutdown", nil, "Triggered via SIGTERM") + _ = inst.AuditOperation("shutdown", "", "Triggered via SIGTERM") // wait for the locks to be released waitForLocksRelease() log.Infof("VTOrc closed") @@ -159,18 +159,18 @@ func handleDiscoveryRequests() { for i := uint(0); i < config.DiscoveryMaxConcurrency; i++ { go func() { for { - instanceKey := discoveryQueue.Consume() + tabletAlias := discoveryQueue.Consume() // Possibly this used to be the elected node, but has // been demoted, while still the queue is full. if !IsLeaderOrActive() { log.Infof("Node apparently demoted. Skipping discovery of %+v. "+ - "Remaining queue size: %+v", instanceKey, discoveryQueue.QueueLen()) - discoveryQueue.Release(instanceKey) + "Remaining queue size: %+v", tabletAlias, discoveryQueue.QueueLen()) + discoveryQueue.Release(tabletAlias) continue } - DiscoverInstance(instanceKey, false /* forceDiscovery */) - discoveryQueue.Release(instanceKey) + DiscoverInstance(tabletAlias, false /* forceDiscovery */) + discoveryQueue.Release(tabletAlias) } }() } @@ -179,9 +179,9 @@ func handleDiscoveryRequests() { // DiscoverInstance will attempt to discover (poll) an instance (unless // it is already up-to-date) and will also ensure that its primary and // replicas (if any) are also checked. -func DiscoverInstance(instanceKey inst.InstanceKey, forceDiscovery bool) { - if inst.InstanceIsForgotten(&instanceKey) { - log.Infof("discoverInstance: skipping discovery of %+v because it is set to be forgotten", instanceKey) +func DiscoverInstance(tabletAlias string, forceDiscovery bool) { + if inst.InstanceIsForgotten(tabletAlias) { + log.Infof("discoverInstance: skipping discovery of %+v because it is set to be forgotten", tabletAlias) return } @@ -198,28 +198,27 @@ func DiscoverInstance(instanceKey inst.InstanceKey, forceDiscovery bool) { discoveryTime := latency.Elapsed("total") if discoveryTime > instancePollSecondsDuration() { instancePollSecondsExceededCounter.Inc(1) - log.Warningf("discoverInstance exceeded InstancePollSeconds for %+v, took %.4fs", instanceKey, discoveryTime.Seconds()) + log.Warningf("discoverInstance exceeded InstancePollSeconds for %+v, took %.4fs", tabletAlias, discoveryTime.Seconds()) if metric != nil { metric.InstancePollSecondsDurationCount = 1 } } }() - _, _ = instanceKey.ResolveHostname() - if !instanceKey.IsValid() { + if tabletAlias == "" { return } // Calculate the expiry period each time as InstancePollSeconds // _may_ change during the run of the process (via SIGHUP) and // it is not possible to change the cache's default expiry.. - if existsInCacheError := recentDiscoveryOperationKeys.Add(instanceKey.DisplayString(), true, instancePollSecondsDuration()); existsInCacheError != nil && !forceDiscovery { + if existsInCacheError := recentDiscoveryOperationKeys.Add(tabletAlias, true, instancePollSecondsDuration()); existsInCacheError != nil && !forceDiscovery { // Just recently attempted return } latency.Start("backend") - instance, found, _ := inst.ReadInstance(&instanceKey) + instance, found, _ := inst.ReadInstance(tabletAlias) latency.Stop("backend") if !forceDiscovery && found && instance.IsUpToDate && instance.IsLastCheckValid { // we've already discovered this one. Skip! @@ -229,7 +228,7 @@ func DiscoverInstance(instanceKey inst.InstanceKey, forceDiscovery bool) { discoveriesCounter.Inc(1) // First we've ever heard of this instance. Continue investigation: - instance, err := inst.ReadTopologyInstanceBufferable(&instanceKey, latency) + instance, err := inst.ReadTopologyInstanceBufferable(tabletAlias, latency) // panic can occur (IO stuff). Therefore it may happen // that instance is nil. Check it, but first get the timing metrics. totalLatency := latency.Elapsed("total") @@ -244,16 +243,16 @@ func DiscoverInstance(instanceKey inst.InstanceKey, forceDiscovery bool) { failedDiscoveriesCounter.Inc(1) metric = &discovery.Metric{ Timestamp: time.Now(), - InstanceKey: instanceKey, + TabletAlias: tabletAlias, TotalLatency: totalLatency, BackendLatency: backendLatency, InstanceLatency: instanceLatency, Err: err, } _ = discoveryMetrics.Append(metric) - if util.ClearToLog("discoverInstance", instanceKey.StringCode()) { + if util.ClearToLog("discoverInstance", tabletAlias) { log.Warningf(" DiscoverInstance(%+v) instance is nil in %.3fs (Backend: %.3fs, Instance: %.3fs), error=%+v", - instanceKey, + tabletAlias, totalLatency.Seconds(), backendLatency.Seconds(), instanceLatency.Seconds(), @@ -264,7 +263,7 @@ func DiscoverInstance(instanceKey inst.InstanceKey, forceDiscovery bool) { metric = &discovery.Metric{ Timestamp: time.Now(), - InstanceKey: instanceKey, + TabletAlias: tabletAlias, TotalLatency: totalLatency, BackendLatency: backendLatency, InstanceLatency: instanceLatency, @@ -297,7 +296,7 @@ func onHealthTick() { if !IsLeaderOrActive() { return } - instanceKeys, err := inst.ReadOutdatedInstanceKeys() + tabletAliases, err := inst.ReadOutdatedInstanceKeys() if err != nil { log.Error(err) } @@ -307,9 +306,6 @@ func onHealthTick() { go func() { _, _ = process.RegisterNode(process.ThisNodeHealth) }() - go func() { - _ = inst.ExpireMaintenance() - }() } func() { @@ -320,14 +316,14 @@ func onHealthTick() { countSnapshotKeys := len(snapshotDiscoveryKeys) for i := 0; i < countSnapshotKeys; i++ { - instanceKeys = append(instanceKeys, <-snapshotDiscoveryKeys) + tabletAliases = append(tabletAliases, <-snapshotDiscoveryKeys) } }() // avoid any logging unless there's something to be done - if len(instanceKeys) > 0 { - for _, instanceKey := range instanceKeys { - if instanceKey.IsValid() { - discoveryQueue.Push(instanceKey) + if len(tabletAliases) > 0 { + for _, tabletAlias := range tabletAliases { + if tabletAlias != "" { + discoveryQueue.Push(tabletAlias) } } } @@ -343,11 +339,9 @@ func ContinuousDiscovery() { checkAndRecoverWaitPeriod := 3 * instancePollSecondsDuration() recentDiscoveryOperationKeys = cache.New(instancePollSecondsDuration(), time.Second) - _ = inst.LoadHostnameResolveCache() go handleDiscoveryRequests() healthTick := time.Tick(config.HealthPollSeconds * time.Second) - instancePollTick := time.Tick(instancePollSecondsDuration()) caretakingTick := time.Tick(time.Minute) recoveryTick := time.Tick(time.Duration(config.Config.RecoveryPollSeconds) * time.Second) tabletTopoTick := OpenTabletDiscovery() @@ -375,39 +369,19 @@ func ContinuousDiscovery() { go func() { onHealthTick() }() - case <-instancePollTick: - go func() { - // This tick does NOT do instance poll (these are handled by the oversampling discoveryTick) - // But rather should invoke such routinely operations that need to be as (or roughly as) frequent - // as instance poll - if IsLeaderOrActive() { - go inst.ExpireDowntime() - } - }() case <-caretakingTick: // Various periodic internal maintenance tasks go func() { if IsLeaderOrActive() { go inst.ForgetLongUnseenInstances() - go inst.ForgetUnseenInstancesDifferentlyResolved() - go inst.ForgetExpiredHostnameResolves() - go inst.DeleteInvalidHostnameResolves() - go inst.ResolveUnknownPrimaryHostnameResolves() - go inst.ExpireMaintenance() - go inst.ExpireCandidateInstances() - go inst.ExpireHostnameUnresolve() go inst.ExpireAudit() - go inst.FlushNontrivialResolveCacheToDatabase() go inst.ExpireStaleInstanceBinlogCoordinates() go process.ExpireNodesHistory() go process.ExpireAvailableNodes() go ExpireFailureDetectionHistory() go ExpireTopologyRecoveryHistory() go ExpireTopologyRecoveryStepsHistory() - } else { - // Take this opportunity to refresh yourself - go inst.LoadHostnameResolveCache() } }() case <-recoveryTick: @@ -427,7 +401,7 @@ func ContinuousDiscovery() { return } if runCheckAndRecoverOperationsTimeRipe() { - CheckAndRecover(nil, nil, false) + CheckAndRecover() } else { log.Infof("Waiting for %+v seconds to pass before running failure detection/recovery", checkAndRecoverWaitPeriod.Seconds()) } @@ -448,7 +422,7 @@ func ContinuousDiscovery() { wg.Add(1) go func() { defer wg.Done() - RefreshAllKeyspaces() + RefreshAllKeyspacesAndShards() }() // Refresh all tablets. diff --git a/go/vt/vtorc/logic/orchestrator_test.go b/go/vt/vtorc/logic/vtorc_test.go similarity index 100% rename from go/vt/vtorc/logic/orchestrator_test.go rename to go/vt/vtorc/logic/vtorc_test.go diff --git a/go/vt/vtorc/server/api.go b/go/vt/vtorc/server/api.go index 66032d961ce..f053336e64e 100644 --- a/go/vt/vtorc/server/api.go +++ b/go/vt/vtorc/server/api.go @@ -41,6 +41,7 @@ type vtorcAPI struct{} const ( problemsAPI = "/api/problems" + errantGTIDsAPI = "/api/errant-gtids" disableGlobalRecoveriesAPI = "/api/disable-global-recoveries" enableGlobalRecoveriesAPI = "/api/enable-global-recoveries" replicationAnalysisAPI = "/api/replication-analysis" @@ -55,6 +56,7 @@ var ( apiHandler = &vtorcAPI{} vtorcAPIPaths = []string{ problemsAPI, + errantGTIDsAPI, disableGlobalRecoveriesAPI, enableGlobalRecoveriesAPI, replicationAnalysisAPI, @@ -80,6 +82,8 @@ func (v *vtorcAPI) ServeHTTP(response http.ResponseWriter, request *http.Request healthAPIHandler(response, request) case problemsAPI: problemsAPIHandler(response, request) + case errantGTIDsAPI: + errantGTIDsAPIHandler(response, request) case replicationAnalysisAPI: replicationAnalysisAPIHandler(response, request) case AggregatedDiscoveryMetricsAPI: @@ -94,7 +98,7 @@ func (v *vtorcAPI) ServeHTTP(response http.ResponseWriter, request *http.Request // getACLPermissionLevelForAPI returns the acl permission level that is required to run a given API func getACLPermissionLevelForAPI(apiEndpoint string) string { switch apiEndpoint { - case problemsAPI: + case problemsAPI, errantGTIDsAPI: return acl.MONITORING case disableGlobalRecoveriesAPI, enableGlobalRecoveriesAPI: return acl.ADMIN @@ -144,6 +148,24 @@ func problemsAPIHandler(response http.ResponseWriter, request *http.Request) { returnAsJSON(response, http.StatusOK, instances) } +// errantGTIDsAPIHandler is the handler for the errantGTIDsAPI endpoint +func errantGTIDsAPIHandler(response http.ResponseWriter, request *http.Request) { + // This api also supports filtering by shard and keyspace provided. + shard := request.URL.Query().Get("shard") + keyspace := request.URL.Query().Get("keyspace") + if shard != "" && keyspace == "" { + http.Error(response, shardWithoutKeyspaceFilteringErrorStr, http.StatusBadRequest) + return + } + + instances, err := inst.ReadInstancesWithErrantGTIds(keyspace, shard) + if err != nil { + http.Error(response, err.Error(), http.StatusInternalServerError) + return + } + returnAsJSON(response, http.StatusOK, instances) +} + // AggregatedDiscoveryMetricsAPIHandler is the handler for the discovery metrics endpoint func AggregatedDiscoveryMetricsAPIHandler(response http.ResponseWriter, request *http.Request) { // return metrics for last x seconds diff --git a/go/vt/vtorc/server/api_test.go b/go/vt/vtorc/server/api_test.go index 3c9b792afae..c352d1e600f 100644 --- a/go/vt/vtorc/server/api_test.go +++ b/go/vt/vtorc/server/api_test.go @@ -16,6 +16,9 @@ func TestGetACLPermissionLevelForAPI(t *testing.T) { { apiEndpoint: problemsAPI, want: acl.MONITORING, + }, { + apiEndpoint: errantGTIDsAPI, + want: acl.MONITORING, }, { apiEndpoint: disableGlobalRecoveriesAPI, want: acl.ADMIN, diff --git a/go/vt/vtorc/server/discovery.go b/go/vt/vtorc/server/discovery.go index 1f0011cfabd..0e5cf5923c8 100644 --- a/go/vt/vtorc/server/discovery.go +++ b/go/vt/vtorc/server/discovery.go @@ -17,24 +17,14 @@ limitations under the License. package server import ( - "github.com/spf13/pflag" - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/vtorc/inst" "vitess.io/vitess/go/vt/vtorc/logic" "vitess.io/vitess/go/vt/vtorc/process" ) -// RegisterFlags registers the flags required by VTOrc -func RegisterFlags(fs *pflag.FlagSet) { - fs.String("orc_web_dir", "", "") - fs.MarkDeprecated("orc_web_dir", "Web directory is no longer needed by VTOrc, please specify the --port flag to gain access to the debug pages and API of VTOrc") -} - // StartVTOrcDiscovery starts VTOrc discovery serving func StartVTOrcDiscovery() { process.ContinuousRegistration(string(process.VTOrcExecutionHTTPMode), "") - inst.SetMaintenanceOwner(process.ThisHostname) log.Info("Starting Discovery") go logic.ContinuousDiscovery() diff --git a/go/vt/vtorc/test/recovery_analysis.go b/go/vt/vtorc/test/recovery_analysis.go index 7585fa17441..b2ae4ce9520 100644 --- a/go/vt/vtorc/test/recovery_analysis.go +++ b/go/vt/vtorc/test/recovery_analysis.go @@ -33,6 +33,7 @@ type InfoForRecoveryAnalysis struct { PrimaryTimestamp *time.Time Keyspace string Shard string + ShardPrimaryTermTimestamp string KeyspaceType int DurabilityPolicy string IsInvalid int @@ -40,8 +41,6 @@ type InfoForRecoveryAnalysis struct { IsCoPrimary int Hostname string Port int - SourceHost string - SourcePort int DataCenter string Region string PhysicalEnvironment string @@ -49,6 +48,7 @@ type InfoForRecoveryAnalysis struct { LogPos uint32 IsStaleBinlogCoordinates int GTIDMode string + ErrantGTID string LastCheckValid int LastCheckPartialSuccess int CountReplicas uint @@ -113,6 +113,7 @@ func (info *InfoForRecoveryAnalysis) ConvertToRowMap() sqlutils.RowMap { rowMap["downtime_end_timestamp"] = sqlutils.CellData{String: info.DowntimeEndTimestamp, Valid: true} rowMap["downtime_remaining_seconds"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.DowntimeRemainingSeconds), Valid: true} rowMap["durability_policy"] = sqlutils.CellData{String: info.DurabilityPolicy, Valid: true} + rowMap["gtid_errant"] = sqlutils.CellData{String: info.ErrantGTID, Valid: true} rowMap["gtid_mode"] = sqlutils.CellData{String: info.GTIDMode, Valid: true} rowMap["hostname"] = sqlutils.CellData{String: info.Hostname, Valid: true} rowMap["is_binlog_server"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.IsBinlogServer), Valid: true} @@ -126,6 +127,7 @@ func (info *InfoForRecoveryAnalysis) ConvertToRowMap() sqlutils.RowMap { rowMap["keyspace_type"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.KeyspaceType), Valid: true} rowMap["keyspace"] = sqlutils.CellData{String: info.Keyspace, Valid: true} rowMap["shard"] = sqlutils.CellData{String: info.Shard, Valid: true} + rowMap["shard_primary_term_timestamp"] = sqlutils.CellData{String: info.ShardPrimaryTermTimestamp, Valid: true} rowMap["last_check_partial_success"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.LastCheckPartialSuccess), Valid: true} rowMap["max_replica_gtid_errant"] = sqlutils.CellData{String: info.MaxReplicaGTIDErrant, Valid: true} rowMap["max_replica_gtid_mode"] = sqlutils.CellData{String: info.MaxReplicaGTIDMode, Valid: true} @@ -148,8 +150,6 @@ func (info *InfoForRecoveryAnalysis) ConvertToRowMap() sqlutils.RowMap { rowMap["semi_sync_primary_status"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.SemiSyncPrimaryStatus), Valid: true} rowMap["semi_sync_primary_wait_for_replica_count"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.SemiSyncPrimaryWaitForReplicaCount), Valid: true} rowMap["semi_sync_replica_enabled"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.SemiSyncReplicaEnabled), Valid: true} - rowMap["source_host"] = sqlutils.CellData{String: info.SourceHost, Valid: true} - rowMap["source_port"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.SourcePort), Valid: true} res, _ := prototext.Marshal(info.TabletInfo) rowMap["tablet_info"] = sqlutils.CellData{String: string(res), Valid: true} return rowMap diff --git a/go/vt/vtorc/util/math.go b/go/vt/vtorc/util/math.go index 869b7ac354d..7f1e3057b07 100644 --- a/go/vt/vtorc/util/math.go +++ b/go/vt/vtorc/util/math.go @@ -16,64 +16,6 @@ package util -func MinInt(i1, i2 int) int { - if i1 < i2 { - return i1 - } - return i2 -} - -func MaxInt(i1, i2 int) int { - if i1 > i2 { - return i1 - } - return i2 -} - -func MinInt64(i1, i2 int64) int64 { - if i1 < i2 { - return i1 - } - return i2 -} - -func MaxInt64(i1, i2 int64) int64 { - if i1 > i2 { - return i1 - } - return i2 -} - -func MaxUInt64(i1, i2 uint64) uint64 { - if i1 > i2 { - return i1 - } - return i2 -} - -func MinString(i1, i2 string) string { - if i1 < i2 { - return i1 - } - return i2 -} - -// TernaryString acts like a "? :" C-style ternary operator for strings -func TernaryString(condition bool, resTrue string, resFalse string) string { - if condition { - return resTrue - } - return resFalse -} - -// TernaryInt acts like a "? :" C-style ternary operator for ints -func TernaryInt(condition bool, resTrue int, resFalse int) int { - if condition { - return resTrue - } - return resFalse -} - // AbsInt64 is an ABS function for int64 type func AbsInt64(i int64) int64 { if i >= 0 { diff --git a/go/vt/vttablet/customrule/topocustomrule/topocustomrule_test.go b/go/vt/vttablet/customrule/topocustomrule/topocustomrule_test.go index 2f7090e5450..e1d5c3fd2cd 100644 --- a/go/vt/vttablet/customrule/topocustomrule/topocustomrule_test.go +++ b/go/vt/vttablet/customrule/topocustomrule/topocustomrule_test.go @@ -78,11 +78,13 @@ func TestUpdate(t *testing.T) { cell := "cell1" filePath := "/keyspaces/ks1/configs/CustomRules" - ts := memorytopo.NewServer(cell) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, cell) qsc := tabletservermock.NewController() qsc.TS = ts sleepDuringTopoFailure = time.Millisecond - ctx := context.Background() cr, err := newTopoCustomRule(qsc, cell, filePath) if err != nil { diff --git a/go/vt/vttablet/endtoend/config_test.go b/go/vt/vttablet/endtoend/config_test.go index 759deb87ba2..60303cf4bf5 100644 --- a/go/vt/vttablet/endtoend/config_test.go +++ b/go/vt/vttablet/endtoend/config_test.go @@ -73,14 +73,6 @@ func TestStreamPoolSize(t *testing.T) { verifyIntValue(t, vstart, "StreamConnPoolCapacity", 1) } -func TestQueryCacheCapacity(t *testing.T) { - revert := changeVar(t, "QueryCacheCapacity", "1") - defer revert() - - vstart := framework.DebugVars() - verifyIntValue(t, vstart, "QueryCacheCapacity", 1) -} - func TestDisableConsolidator(t *testing.T) { totalConsolidationsTag := "Waits/Histograms/Consolidations/Count" initial := framework.FetchInt(framework.DebugVars(), totalConsolidationsTag) @@ -182,8 +174,6 @@ func TestQueryPlanCache(t *testing.T) { //sleep to avoid race between SchemaChanged event clearing out the plans cache which breaks this test framework.Server.WaitForSchemaReset(2 * time.Second) - defer framework.Server.SetQueryPlanCacheCap(framework.Server.QueryPlanCacheCap()) - bindVars := map[string]*querypb.BindVariable{ "ival1": sqltypes.Int64BindVariable(1), "ival2": sqltypes.Int64BindVariable(1), @@ -197,21 +187,18 @@ func TestQueryPlanCache(t *testing.T) { assert.Equal(t, 1, framework.Server.QueryPlanCacheLen()) vend := framework.DebugVars() - assert.Equal(t, 1, framework.FetchInt(vend, "QueryCacheLength")) assert.GreaterOrEqual(t, framework.FetchInt(vend, "QueryCacheSize"), cachedPlanSize) _, _ = client.Execute("select * from vitess_test where intval=:ival2", bindVars) require.Equal(t, 2, framework.Server.QueryPlanCacheLen()) vend = framework.DebugVars() - assert.Equal(t, 2, framework.FetchInt(vend, "QueryCacheLength")) assert.GreaterOrEqual(t, framework.FetchInt(vend, "QueryCacheSize"), 2*cachedPlanSize) _, _ = client.Execute("select * from vitess_test where intval=1", bindVars) require.Equal(t, 3, framework.Server.QueryPlanCacheLen()) vend = framework.DebugVars() - assert.Equal(t, 3, framework.FetchInt(vend, "QueryCacheLength")) assert.GreaterOrEqual(t, framework.FetchInt(vend, "QueryCacheSize"), 3*cachedPlanSize) } diff --git a/go/vt/vttablet/endtoend/connkilling/main_test.go b/go/vt/vttablet/endtoend/connkilling/main_test.go index 98c4b9671d1..e7486c397eb 100644 --- a/go/vt/vttablet/endtoend/connkilling/main_test.go +++ b/go/vt/vttablet/endtoend/connkilling/main_test.go @@ -17,6 +17,7 @@ limitations under the License. package connkilling import ( + "context" "errors" "flag" "fmt" @@ -81,7 +82,9 @@ func TestMain(m *testing.M) { connAppDebugParams = cluster.MySQLAppDebugConnParams() config := tabletenv.NewDefaultConfig() _ = config.Oltp.TxTimeoutSeconds.Set("3s") - err := framework.StartCustomServer(connParams, connAppDebugParams, cluster.DbName(), config) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + err := framework.StartCustomServer(ctx, connParams, connAppDebugParams, cluster.DbName(), config) if err != nil { fmt.Fprintf(os.Stderr, "%v", err) return 1 diff --git a/go/vt/vttablet/endtoend/framework/client.go b/go/vt/vttablet/endtoend/framework/client.go index 9d3abdbbf26..3c06f9b465c 100644 --- a/go/vt/vttablet/endtoend/framework/client.go +++ b/go/vt/vttablet/endtoend/framework/client.go @@ -19,10 +19,9 @@ package framework import ( "context" "errors" + "sync" "time" - "google.golang.org/protobuf/proto" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/callerid" "vitess.io/vitess/go/vt/vttablet/tabletserver" @@ -40,6 +39,7 @@ type QueryClient struct { target *querypb.Target server *tabletserver.TabletServer transactionID int64 + reservedIDMu sync.Mutex reservedID int64 sessionStateChanges string } @@ -59,7 +59,7 @@ func NewClient() *QueryClient { // NewClientWithTabletType creates a new client for Server with the provided tablet type. func NewClientWithTabletType(tabletType topodatapb.TabletType) *QueryClient { - targetCopy := proto.Clone(Target).(*querypb.Target) + targetCopy := Target.CloneVT() targetCopy.TabletType = tabletType return &QueryClient{ ctx: callerid.NewContext( @@ -114,6 +114,8 @@ func (client *QueryClient) Commit() error { func (client *QueryClient) Rollback() error { defer func() { client.transactionID = 0 }() rID, err := client.server.Rollback(client.ctx, client.target, client.transactionID) + client.reservedIDMu.Lock() + defer client.reservedIDMu.Unlock() client.reservedID = rID if err != nil { return err @@ -293,6 +295,8 @@ func (client *QueryClient) MessageAck(name string, ids []string) (int64, error) // ReserveExecute performs a ReserveExecute. func (client *QueryClient) ReserveExecute(query string, preQueries []string, bindvars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { + client.reservedIDMu.Lock() + defer client.reservedIDMu.Unlock() if client.reservedID != 0 { return nil, errors.New("already reserved a connection") } diff --git a/go/vt/vttablet/endtoend/framework/server.go b/go/vt/vttablet/endtoend/framework/server.go index 169055faba3..4f8043fba5a 100644 --- a/go/vt/vttablet/endtoend/framework/server.go +++ b/go/vt/vttablet/endtoend/framework/server.go @@ -58,7 +58,7 @@ var ( // StartCustomServer starts the server and initializes // all the global variables. This function should only be called // once at the beginning of the test. -func StartCustomServer(connParams, connAppDebugParams mysql.ConnParams, dbName string, config *tabletenv.TabletConfig) error { +func StartCustomServer(ctx context.Context, connParams, connAppDebugParams mysql.ConnParams, dbName string, config *tabletenv.TabletConfig) error { // Setup a fake vtgate server. protocol := "resolveTest" vtgateconn.SetVTGateProtocol(protocol) @@ -75,9 +75,9 @@ func StartCustomServer(connParams, connAppDebugParams mysql.ConnParams, dbName s Shard: "0", TabletType: topodatapb.TabletType_PRIMARY, } - TopoServer = memorytopo.NewServer("") + TopoServer = memorytopo.NewServer(ctx, "") - Server = tabletserver.NewTabletServer("", config, TopoServer, &topodatapb.TabletAlias{}) + Server = tabletserver.NewTabletServer(ctx, "", config, TopoServer, &topodatapb.TabletAlias{}) Server.Register() err := Server.StartService(Target, dbcfgs, nil /* mysqld */) if err != nil { @@ -110,7 +110,7 @@ func StartCustomServer(connParams, connAppDebugParams mysql.ConnParams, dbName s // StartServer starts the server and initializes // all the global variables. This function should only be called // once at the beginning of the test. -func StartServer(connParams, connAppDebugParams mysql.ConnParams, dbName string) error { +func StartServer(ctx context.Context, connParams, connAppDebugParams mysql.ConnParams, dbName string) error { config := tabletenv.NewDefaultConfig() config.StrictTableACL = true config.TwoPCEnable = true @@ -124,9 +124,10 @@ func StartServer(connParams, connAppDebugParams mysql.ConnParams, dbName string) _ = config.Oltp.TxTimeoutSeconds.Set("5s") _ = config.Olap.TxTimeoutSeconds.Set("5s") config.EnableViews = true + config.QueryCacheDoorkeeper = false gotBytes, _ := yaml2.Marshal(config) log.Infof("Config:\n%s", gotBytes) - return StartCustomServer(connParams, connAppDebugParams, dbName, config) + return StartCustomServer(ctx, connParams, connAppDebugParams, dbName, config) } // StopServer must be called once all the tests are done. diff --git a/go/vt/vttablet/endtoend/framework/testcase.go b/go/vt/vttablet/endtoend/framework/testcase.go index 37808c5aa7a..e02227b4eb6 100644 --- a/go/vt/vttablet/endtoend/framework/testcase.go +++ b/go/vt/vttablet/endtoend/framework/testcase.go @@ -21,6 +21,7 @@ import ( "fmt" "reflect" "strings" + "time" "vitess.io/vitess/go/vt/vterrors" @@ -122,7 +123,7 @@ func (tc *TestCase) Test(name string, client *QueryClient) error { } // wait for all previous test cases to have been settled in cache - client.server.QueryPlanCacheWait() + time.Sleep(100 * time.Millisecond) catcher := NewQueryCatcher() defer catcher.Close() diff --git a/go/vt/vttablet/endtoend/healthstream_test.go b/go/vt/vttablet/endtoend/healthstream_test.go index 1afe1238913..4bc13aa9084 100644 --- a/go/vt/vttablet/endtoend/healthstream_test.go +++ b/go/vt/vttablet/endtoend/healthstream_test.go @@ -17,11 +17,11 @@ limitations under the License. package endtoend import ( + "slices" "testing" "time" "github.com/stretchr/testify/assert" - "golang.org/x/exp/slices" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/vttablet/endtoend/framework" @@ -34,35 +34,48 @@ func TestSchemaChange(t *testing.T) { tName string expectedChange string ddl string + expectTimeout bool }{ { "create table 1", "vitess_sc1", "create table vitess_sc1(id bigint primary key)", + false, }, { "create table 2", "vitess_sc2", "create table vitess_sc2(id bigint primary key)", + false, + }, { + "create internal table", + "_vt_HOLD_6ace8bcef73211ea87e9f875a4d24e90_20200915120410", + "create table _vt_HOLD_6ace8bcef73211ea87e9f875a4d24e90_20200915120410(id bigint primary key)", + true, }, { "add column 1", "vitess_sc1", "alter table vitess_sc1 add column newCol varchar(50)", + false, }, { "add column 2", "vitess_sc2", "alter table vitess_sc2 add column newCol varchar(50)", + false, }, { "remove column", "vitess_sc1", "alter table vitess_sc1 drop column newCol", + false, }, { "drop table 2", "vitess_sc2", "drop table vitess_sc2", + false, }, { "drop table 1", "vitess_sc1", "drop table vitess_sc1", + false, }, } @@ -85,9 +98,14 @@ func TestSchemaChange(t *testing.T) { select { case res := <-ch: // get the schema notification if slices.Contains(res, tc.expectedChange) { + assert.False(t, tc.expectTimeout) return } case <-timeout: + if tc.expectTimeout { + // This is what we wanted! + return + } t.Errorf("timed out waiting for a schema notification") return } diff --git a/go/vt/vttablet/endtoend/main_test.go b/go/vt/vttablet/endtoend/main_test.go index d4a6bd70899..b5256be0994 100644 --- a/go/vt/vttablet/endtoend/main_test.go +++ b/go/vt/vttablet/endtoend/main_test.go @@ -17,6 +17,7 @@ limitations under the License. package endtoend import ( + "context" "errors" "flag" "fmt" @@ -84,7 +85,9 @@ func TestMain(m *testing.M) { connParams = cluster.MySQLConnParams() connAppDebugParams = cluster.MySQLAppDebugConnParams() - err = framework.StartServer(connParams, connAppDebugParams, cluster.DbName()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + err = framework.StartServer(ctx, connParams, connAppDebugParams, cluster.DbName()) if err != nil { fmt.Fprintf(os.Stderr, "%v", err) return 1 @@ -311,7 +314,7 @@ var tableACLConfig = `{ }, { "name": "vitess_healthstream", - "table_names_or_prefixes": ["vitess_sc1", "vitess_sc2"], + "table_names_or_prefixes": ["vitess_sc1", "vitess_sc2", "_vt_HOLD_6ace8bcef73211ea87e9f875a4d24e90_20200915120410"], "readers": ["dev"], "writers": ["dev"], "admins": ["dev"] diff --git a/go/vt/vttablet/endtoend/misc_test.go b/go/vt/vttablet/endtoend/misc_test.go index 45eaf93289d..5c37a5d9bb0 100644 --- a/go/vt/vttablet/endtoend/misc_test.go +++ b/go/vt/vttablet/endtoend/misc_test.go @@ -185,8 +185,7 @@ func TestIntegrityError(t *testing.T) { } func TestTrailingComment(t *testing.T) { - vstart := framework.DebugVars() - v1 := framework.FetchInt(vstart, "QueryCacheLength") + v1 := framework.Server.QueryPlanCacheLen() bindVars := map[string]*querypb.BindVariable{"ival": sqltypes.Int64BindVariable(1)} client := framework.NewClient() @@ -201,7 +200,7 @@ func TestTrailingComment(t *testing.T) { t.Error(err) return } - v2 := framework.FetchInt(framework.DebugVars(), "QueryCacheLength") + v2 := framework.Server.QueryPlanCacheLen() if v2 != v1+1 { t.Errorf("QueryCacheLength(%s): %d, want %d", query, v2, v1+1) } @@ -994,3 +993,48 @@ func TestShowTablesWithSizes(t *testing.T) { } assert.Equalf(t, len(expectTables), len(matchedTables), "%v", matchedTables) } + +// TestTuple tests that bind variables having tuple values work with vttablet. +func TestTuple(t *testing.T) { + client := framework.NewClient() + _, err := client.Execute(`insert into vitess_a (eid, id) values (100, 103), (193, 235)`, nil) + require.NoError(t, err) + + bv := map[string]*querypb.BindVariable{ + "__vals": { + Type: querypb.Type_TUPLE, + Values: []*querypb.Value{ + sqltypes.TupleToProto([]sqltypes.Value{sqltypes.NewInt64(100), sqltypes.NewInt64(103)}), + sqltypes.TupleToProto([]sqltypes.Value{sqltypes.NewInt64(87), sqltypes.NewInt64(4473)}), + }, + }, + } + res, err := client.Execute("select * from vitess_a where (eid, id) in ::__vals", bv) + require.NoError(t, err) + assert.Equal(t, `[[INT64(100) INT32(103) NULL NULL]]`, fmt.Sprintf("%v", res.Rows)) + + res, err = client.Execute("update vitess_a set name = 'a' where (eid, id) in ::__vals", bv) + require.NoError(t, err) + assert.EqualValues(t, 1, res.RowsAffected) + + res, err = client.Execute("select * from vitess_a where (eid, id) in ::__vals", bv) + require.NoError(t, err) + assert.Equal(t, `[[INT64(100) INT32(103) VARCHAR("a") NULL]]`, fmt.Sprintf("%v", res.Rows)) + + bv = map[string]*querypb.BindVariable{ + "__vals": { + Type: querypb.Type_TUPLE, + Values: []*querypb.Value{ + sqltypes.TupleToProto([]sqltypes.Value{sqltypes.NewInt64(100), sqltypes.NewInt64(103)}), + sqltypes.TupleToProto([]sqltypes.Value{sqltypes.NewInt64(193), sqltypes.NewInt64(235)}), + }, + }, + } + res, err = client.Execute("delete from vitess_a where (eid, id) in ::__vals", bv) + require.NoError(t, err) + assert.EqualValues(t, 2, res.RowsAffected) + + res, err = client.Execute("select * from vitess_a where (eid, id) in ::__vals", bv) + require.NoError(t, err) + require.Zero(t, len(res.Rows)) +} diff --git a/go/vt/vttablet/endtoend/streamtimeout/healthstream_test.go b/go/vt/vttablet/endtoend/streamtimeout/healthstream_test.go index d08e56b17ad..d69ce193ef9 100644 --- a/go/vt/vttablet/endtoend/streamtimeout/healthstream_test.go +++ b/go/vt/vttablet/endtoend/streamtimeout/healthstream_test.go @@ -17,23 +17,31 @@ limitations under the License. package streamtimeout import ( - "fmt" + "context" + "slices" "testing" "time" "github.com/stretchr/testify/require" - "golang.org/x/exp/slices" + "vitess.io/vitess/go/mysql" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/vttablet/endtoend/framework" ) // TestSchemaChangeTimedout ensures that the timeout functionality is working properly // to prevent queries from hanging up and causing a mutex to be locked forever. -func testSchemaChangeTimedout(t *testing.T) { +func TestSchemaChangeTimedout(t *testing.T) { + const TableName = "vitess_healthstream" + client := framework.NewClient() reloadEstimatedTime := 2 * time.Second + err := cluster.SimulateMySQLHang() + require.NoError(t, err) + + defer cluster.StopSimulateMySQLHang() + ch := make(chan []string, 100) go func(ch chan []string) { client.StreamHealth(func(response *querypb.StreamHealthResponse) error { @@ -44,39 +52,22 @@ func testSchemaChangeTimedout(t *testing.T) { }) }(ch) - // We will set up the MySQLHang simulation. - // To avoid flakiness, we will retry the setup if the health_streamer sends a notification before the MySQLHang is simulated. - attempt := 1 - var tableName string -loop: - for { - tableName = fmt.Sprintf("vitess_sc%d", attempt) - - // change the schema to trigger the health_streamer to send a notification at a later time. - _, err := client.Execute("create table "+tableName+"(id bigint primary key)", nil) - require.NoError(t, err) + // get a clean connection that skips toxyproxy to be able to change the schema in the underlying DB + cleanParams := cluster.MySQLCleanConnParams() + cleanConn, err := mysql.Connect(context.Background(), &cleanParams) + require.NoError(t, err) + defer cleanConn.Close() - // start simulating a mysql stall until a query issued by the health_streamer would hang. - err = cluster.SimulateMySQLHang() - require.NoError(t, err) + // change the schema to trigger the health_streamer to send a notification at a later time. + _, err = cleanConn.ExecuteFetch("create table "+TableName+"(id bigint primary key)", -1, false) + require.NoError(t, err) - select { - case <-ch: // get the schema notification - // The health_streamer can send a notification between the time the schema is changed and the mysql stall is simulated. - // In this rare case, we must retry the same setup again. - cluster.StopSimulateMySQLHang() - attempt++ - - if attempt > 5 { - t.Errorf("failed to setup MySQLHang even after several attempts") - return - } - t.Logf("retrying setup for attempt %d", attempt) - case <-time.After(reloadEstimatedTime): - break loop - } + select { + case <-ch: // get the schema notification + t.Fatalf("received an schema change event from the HealthStreamer (is toxyproxy working?)") + case <-time.After(reloadEstimatedTime): + // Good, continue } - defer cluster.StopSimulateMySQLHang() // We will wait for the health_streamer to attempt sending a notification. // It's important to keep in mind that the total wait time after the simulation should be shorter than the reload timeout. @@ -87,7 +78,7 @@ loop: time.Sleep(reloadInterval) // pause simulating the mysql stall to allow the health_streamer to resume. - err := cluster.PauseSimulateMySQLHang() + err = cluster.PauseSimulateMySQLHang() require.NoError(t, err) // wait for the health_streamer to complete retrying the notification. @@ -97,7 +88,7 @@ loop: for { select { case res := <-ch: // get the schema notification - if slices.Contains(res, tableName) { + if slices.Contains(res, TableName) { return } case <-timeout: diff --git a/go/vt/vttablet/endtoend/streamtimeout/main_test.go b/go/vt/vttablet/endtoend/streamtimeout/main_test.go index 16cf3815433..ab7c32fa251 100644 --- a/go/vt/vttablet/endtoend/streamtimeout/main_test.go +++ b/go/vt/vttablet/endtoend/streamtimeout/main_test.go @@ -20,6 +20,7 @@ All tests in this package come with toxiproxy in front of the MySQL server package streamtimeout import ( + "context" "flag" "fmt" "os" @@ -86,7 +87,10 @@ func testMain(m *testing.M) { _ = config.SchemaReloadIntervalSeconds.Set("2100ms") config.SchemaChangeReloadTimeout = 10 * time.Second config.SignalWhenSchemaChange = true - err = framework.StartCustomServer(connParams, connAppDebugParams, cluster.DbName(), config) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + err = framework.StartCustomServer(ctx, connParams, connAppDebugParams, cluster.DbName(), config) if err != nil { fmt.Fprintf(os.Stderr, "%v", err) return 1 diff --git a/go/vt/vttablet/endtoend/transaction_test.go b/go/vt/vttablet/endtoend/transaction_test.go index 6751e60f9ad..8f6546df5f1 100644 --- a/go/vt/vttablet/endtoend/transaction_test.go +++ b/go/vt/vttablet/endtoend/transaction_test.go @@ -321,7 +321,7 @@ func TestShutdownGracePeriod(t *testing.T) { err := client.Begin(false) require.NoError(t, err) go func() { - _, err = client.Execute("select sleep(10) from dual", nil) + _, err := client.Execute("select sleep(10) from dual", nil) assert.Error(t, err) }() @@ -346,7 +346,7 @@ func TestShutdownGracePeriod(t *testing.T) { err = client.Begin(false) require.NoError(t, err) go func() { - _, err = client.Execute("select sleep(11) from dual", nil) + _, err := client.Execute("select sleep(11) from dual", nil) assert.Error(t, err) }() @@ -373,7 +373,7 @@ func TestShutdownGracePeriodWithStreamExecute(t *testing.T) { err := client.Begin(false) require.NoError(t, err) go func() { - _, err = client.StreamExecute("select sleep(10) from dual", nil) + _, err := client.StreamExecute("select sleep(10) from dual", nil) assert.Error(t, err) }() @@ -398,7 +398,7 @@ func TestShutdownGracePeriodWithStreamExecute(t *testing.T) { err = client.Begin(false) require.NoError(t, err) go func() { - _, err = client.StreamExecute("select sleep(11) from dual", nil) + _, err := client.StreamExecute("select sleep(11) from dual", nil) assert.Error(t, err) }() @@ -425,7 +425,7 @@ func TestShutdownGracePeriodWithReserveExecute(t *testing.T) { err := client.Begin(false) require.NoError(t, err) go func() { - _, err = client.ReserveExecute("select sleep(10) from dual", nil, nil) + _, err := client.ReserveExecute("select sleep(10) from dual", nil, nil) assert.Error(t, err) }() @@ -450,7 +450,7 @@ func TestShutdownGracePeriodWithReserveExecute(t *testing.T) { err = client.Begin(false) require.NoError(t, err) go func() { - _, err = client.ReserveExecute("select sleep(11) from dual", nil, nil) + _, err := client.ReserveExecute("select sleep(11) from dual", nil, nil) assert.Error(t, err) }() diff --git a/go/vt/vttablet/endtoend/vstreamer_test.go b/go/vt/vttablet/endtoend/vstreamer_test.go index 6ffacff95ce..312273e0c84 100644 --- a/go/vt/vttablet/endtoend/vstreamer_test.go +++ b/go/vt/vttablet/endtoend/vstreamer_test.go @@ -59,8 +59,6 @@ func TestSchemaVersioning(t *testing.T) { tsv.EnableHistorian(false) tsv.SetTracking(false) tsv.EnableHeartbeat(false) - tsv.EnableThrottler(false) - defer tsv.EnableThrottler(true) defer tsv.EnableHeartbeat(true) defer tsv.EnableHistorian(true) defer tsv.SetTracking(true) @@ -386,6 +384,10 @@ func expectLogs(ctx context.Context, t *testing.T, query string, eventCh chan [] if ev.Type == binlogdatapb.VEventType_HEARTBEAT { continue } + if ev.Type == binlogdatapb.VEventType_ROW { + ev.RowEvent.Flags = 0 // null Flags, so we don't have to define flags in every wanted row event. + } + if ev.Throttled { continue } diff --git a/go/vt/vttablet/faketmclient/fake_client.go b/go/vt/vttablet/faketmclient/fake_client.go index 6df153c855a..e8747b98fcc 100644 --- a/go/vt/vttablet/faketmclient/fake_client.go +++ b/go/vt/vttablet/faketmclient/fake_client.go @@ -62,7 +62,23 @@ type FakeTabletManagerClient struct { tmc tmclient.TabletManagerClient } -func (client *FakeTabletManagerClient) UpdateVRWorkflow(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.UpdateVRWorkflowRequest) (*tabletmanagerdatapb.UpdateVRWorkflowResponse, error) { +func (client *FakeTabletManagerClient) CreateVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.CreateVReplicationWorkflowRequest) (*tabletmanagerdatapb.CreateVReplicationWorkflowResponse, error) { + return nil, nil +} + +func (client *FakeTabletManagerClient) DeleteVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.DeleteVReplicationWorkflowRequest) (*tabletmanagerdatapb.DeleteVReplicationWorkflowResponse, error) { + return nil, nil +} + +func (client *FakeTabletManagerClient) ReadVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.ReadVReplicationWorkflowRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowResponse, error) { + return nil, nil +} + +func (client *FakeTabletManagerClient) ResetSequences(ctx context.Context, tablet *topodatapb.Tablet, tables []string) error { + return nil +} + +func (client *FakeTabletManagerClient) UpdateVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.UpdateVReplicationWorkflowRequest) (*tabletmanagerdatapb.UpdateVReplicationWorkflowResponse, error) { return nil, nil } @@ -327,6 +343,12 @@ func (client *FakeTabletManagerClient) RestoreFromBackup(ctx context.Context, ta return &eofEventStream{}, nil } +// Throttler related methods + +func (client *FakeTabletManagerClient) CheckThrottler(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.CheckThrottlerRequest) (*tabletmanagerdatapb.CheckThrottlerResponse, error) { + return &tabletmanagerdatapb.CheckThrottlerResponse{}, nil +} + // // Management related methods // diff --git a/go/vt/vttablet/grpcqueryservice/server.go b/go/vt/vttablet/grpcqueryservice/server.go index 7801dc93399..e9b5cd75bc5 100644 --- a/go/vt/vttablet/grpcqueryservice/server.go +++ b/go/vt/vttablet/grpcqueryservice/server.go @@ -356,6 +356,16 @@ func (q *query) VStreamRows(request *binlogdatapb.VStreamRowsRequest, stream que return vterrors.ToGRPC(err) } +func (q *query) VStreamTables(request *binlogdatapb.VStreamTablesRequest, stream queryservicepb.Query_VStreamTablesServer) (err error) { + defer q.server.HandlePanic(&err) + ctx := callerid.NewContext(callinfo.GRPCCallInfo(stream.Context()), + request.EffectiveCallerId, + request.ImmediateCallerId, + ) + err = q.server.VStreamTables(ctx, request, stream.Send) + return vterrors.ToGRPC(err) +} + // VStreamResults is part of the queryservice.QueryServer interface func (q *query) VStreamResults(request *binlogdatapb.VStreamResultsRequest, stream queryservicepb.Query_VStreamResultsServer) (err error) { defer q.server.HandlePanic(&err) diff --git a/go/vt/vttablet/grpctabletconn/conn.go b/go/vt/vttablet/grpctabletconn/conn.go index deefe3f84a2..004f10ecca9 100644 --- a/go/vt/vttablet/grpctabletconn/conn.go +++ b/go/vt/vttablet/grpctabletconn/conn.go @@ -735,6 +735,46 @@ func (conn *gRPCQueryClient) VStreamRows(ctx context.Context, request *binlogdat } } +// VStreamTables streams rows of a query from the specified starting point. +func (conn *gRPCQueryClient) VStreamTables(ctx context.Context, request *binlogdatapb.VStreamTablesRequest, send func(*binlogdatapb.VStreamTablesResponse) error) error { + stream, err := func() (queryservicepb.Query_VStreamTablesClient, error) { + conn.mu.RLock() + defer conn.mu.RUnlock() + if conn.cc == nil { + return nil, tabletconn.ConnClosed + } + + req := &binlogdatapb.VStreamTablesRequest{ + Target: request.Target, + EffectiveCallerId: callerid.EffectiveCallerIDFromContext(ctx), + ImmediateCallerId: callerid.ImmediateCallerIDFromContext(ctx), + } + stream, err := conn.c.VStreamTables(ctx, req) + if err != nil { + return nil, tabletconn.ErrorFromGRPC(err) + } + return stream, nil + }() + if err != nil { + return err + } + r := binlogdatapb.VStreamTablesResponseFromVTPool() + defer r.ReturnToVTPool() + for { + err := stream.RecvMsg(r) + if err != nil { + return tabletconn.ErrorFromGRPC(err) + } + if ctx.Err() != nil { + return ctx.Err() + } + if err := send(r); err != nil { + return err + } + r.ResetVT() + } +} + // VStreamResults streams rows of a query from the specified starting point. func (conn *gRPCQueryClient) VStreamResults(ctx context.Context, target *querypb.Target, query string, send func(*binlogdatapb.VStreamResultsResponse) error) error { stream, err := func() (queryservicepb.Query_VStreamResultsClient, error) { diff --git a/go/vt/vttablet/grpctmclient/client.go b/go/vt/vttablet/grpctmclient/client.go index 1899c82179c..0068ed74706 100644 --- a/go/vt/vttablet/grpctmclient/client.go +++ b/go/vt/vttablet/grpctmclient/client.go @@ -69,7 +69,6 @@ var _binaries = []string{ // binaries that require the flags in this package "vtctl", "vtctld", "vtctldclient", - "vtgr", "vtorc", "vttablet", "vttestserver", @@ -363,6 +362,18 @@ func (client *Client) ReloadSchema(ctx context.Context, tablet *topodatapb.Table return err } +func (client *Client) ResetSequences(ctx context.Context, tablet *topodatapb.Tablet, tables []string) error { + c, closer, err := client.dialer.dial(ctx, tablet) + if err != nil { + return err + } + defer closer.Close() + _, err = c.ResetSequences(ctx, &tabletmanagerdatapb.ResetSequencesRequest{ + Tables: tables, + }) + return err +} + // PreflightSchema is part of the tmclient.TabletManagerClient interface. func (client *Client) PreflightSchema(ctx context.Context, tablet *topodatapb.Tablet, changes []string) ([]*tabletmanagerdatapb.SchemaChangeResult, error) { c, closer, err := client.dialer.dial(ctx, tablet) @@ -679,6 +690,49 @@ func (client *Client) GetReplicas(ctx context.Context, tablet *topodatapb.Tablet return response.Addrs, nil } +// +// VReplication related methods +// + +func (client *Client) CreateVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.CreateVReplicationWorkflowRequest) (*tabletmanagerdatapb.CreateVReplicationWorkflowResponse, error) { + c, closer, err := client.dialer.dial(ctx, tablet) + if err != nil { + return nil, err + } + defer closer.Close() + response, err := c.CreateVReplicationWorkflow(ctx, request) + if err != nil { + return nil, err + } + return response, nil +} + +func (client *Client) DeleteVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.DeleteVReplicationWorkflowRequest) (*tabletmanagerdatapb.DeleteVReplicationWorkflowResponse, error) { + c, closer, err := client.dialer.dial(ctx, tablet) + if err != nil { + return nil, err + } + defer closer.Close() + response, err := c.DeleteVReplicationWorkflow(ctx, request) + if err != nil { + return nil, err + } + return response, nil +} + +func (client *Client) ReadVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.ReadVReplicationWorkflowRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowResponse, error) { + c, closer, err := client.dialer.dial(ctx, tablet) + if err != nil { + return nil, err + } + defer closer.Close() + response, err := c.ReadVReplicationWorkflow(ctx, request) + if err != nil { + return nil, err + } + return response, nil +} + // VReplicationExec is part of the tmclient.TabletManagerClient interface. func (client *Client) VReplicationExec(ctx context.Context, tablet *topodatapb.Tablet, query string) (*querypb.QueryResult, error) { c, closer, err := client.dialer.dial(ctx, tablet) @@ -706,13 +760,13 @@ func (client *Client) VReplicationWaitForPos(ctx context.Context, tablet *topoda return nil } -func (client *Client) UpdateVRWorkflow(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.UpdateVRWorkflowRequest) (*tabletmanagerdatapb.UpdateVRWorkflowResponse, error) { +func (client *Client) UpdateVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.UpdateVReplicationWorkflowRequest) (*tabletmanagerdatapb.UpdateVReplicationWorkflowResponse, error) { c, closer, err := client.dialer.dial(ctx, tablet) if err != nil { return nil, err } defer closer.Close() - response, err := c.UpdateVRWorkflow(ctx, request) + response, err := c.UpdateVReplicationWorkflow(ctx, request) if err != nil { return nil, err } @@ -947,6 +1001,20 @@ func (client *Client) Backup(ctx context.Context, tablet *topodatapb.Tablet, req }, nil } +// CheckThrottler is part of the tmclient.TabletManagerClient interface. +func (client *Client) CheckThrottler(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.CheckThrottlerRequest) (*tabletmanagerdatapb.CheckThrottlerResponse, error) { + c, closer, err := client.dialer.dial(ctx, tablet) + if err != nil { + return nil, err + } + defer closer.Close() + response, err := c.CheckThrottler(ctx, req) + if err != nil { + return nil, err + } + return response, nil +} + type restoreFromBackupStreamAdapter struct { stream tabletmanagerservicepb.TabletManager_RestoreFromBackupClient closer io.Closer diff --git a/go/vt/vttablet/grpctmserver/server.go b/go/vt/vttablet/grpctmserver/server.go index 5f12a5caf24..d0fe5a2cbe1 100644 --- a/go/vt/vttablet/grpctmserver/server.go +++ b/go/vt/vttablet/grpctmserver/server.go @@ -20,11 +20,9 @@ import ( "context" "time" - "vitess.io/vitess/go/vt/callerid" - querypb "vitess.io/vitess/go/vt/proto/query" - "google.golang.org/grpc" + "vitess.io/vitess/go/vt/callerid" "vitess.io/vitess/go/vt/callinfo" "vitess.io/vitess/go/vt/hook" "vitess.io/vitess/go/vt/logutil" @@ -34,6 +32,7 @@ import ( "vitess.io/vitess/go/vt/vttablet/tabletmanager" logutilpb "vitess.io/vitess/go/vt/proto/logutil" + querypb "vitess.io/vitess/go/vt/proto/query" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" tabletmanagerservicepb "vitess.io/vitess/go/vt/proto/tabletmanagerservice" ) @@ -176,6 +175,13 @@ func (s *server) ApplySchema(ctx context.Context, request *tabletmanagerdatapb.A return response, err } +func (s *server) ResetSequences(ctx context.Context, request *tabletmanagerdatapb.ResetSequencesRequest) (response *tabletmanagerdatapb.ResetSequencesResponse, err error) { + defer s.tm.HandleRPCPanic(ctx, "ResetSequences", request, response, true /*verbose*/, &err) + response = &tabletmanagerdatapb.ResetSequencesResponse{} + err = s.tm.ResetSequences(ctx, request.Tables) + return response, err +} + func (s *server) LockTables(ctx context.Context, req *tabletmanagerdatapb.LockTablesRequest) (*tabletmanagerdatapb.LockTablesResponse, error) { err := s.tm.LockTables(ctx) if err != nil { @@ -343,6 +349,31 @@ func (s *server) GetReplicas(ctx context.Context, request *tabletmanagerdatapb.G return response, err } +// +// VReplication related methods +// + +func (s *server) CreateVReplicationWorkflow(ctx context.Context, request *tabletmanagerdatapb.CreateVReplicationWorkflowRequest) (response *tabletmanagerdatapb.CreateVReplicationWorkflowResponse, err error) { + defer s.tm.HandleRPCPanic(ctx, "CreateVReplicationWorkflow", request, response, true /*verbose*/, &err) + ctx = callinfo.GRPCCallInfo(ctx) + response = &tabletmanagerdatapb.CreateVReplicationWorkflowResponse{} + return s.tm.CreateVReplicationWorkflow(ctx, request) +} + +func (s *server) DeleteVReplicationWorkflow(ctx context.Context, request *tabletmanagerdatapb.DeleteVReplicationWorkflowRequest) (response *tabletmanagerdatapb.DeleteVReplicationWorkflowResponse, err error) { + defer s.tm.HandleRPCPanic(ctx, "DeleteVReplicationWorkflow", request, response, true /*verbose*/, &err) + ctx = callinfo.GRPCCallInfo(ctx) + response = &tabletmanagerdatapb.DeleteVReplicationWorkflowResponse{} + return s.tm.DeleteVReplicationWorkflow(ctx, request) +} + +func (s *server) ReadVReplicationWorkflow(ctx context.Context, request *tabletmanagerdatapb.ReadVReplicationWorkflowRequest) (response *tabletmanagerdatapb.ReadVReplicationWorkflowResponse, err error) { + defer s.tm.HandleRPCPanic(ctx, "ReadVReplicationWorkflow", request, response, true /*verbose*/, &err) + ctx = callinfo.GRPCCallInfo(ctx) + response = &tabletmanagerdatapb.ReadVReplicationWorkflowResponse{} + return s.tm.ReadVReplicationWorkflow(ctx, request) +} + func (s *server) VReplicationExec(ctx context.Context, request *tabletmanagerdatapb.VReplicationExecRequest) (response *tabletmanagerdatapb.VReplicationExecResponse, err error) { defer s.tm.HandleRPCPanic(ctx, "VReplicationExec", request, response, true /*verbose*/, &err) ctx = callinfo.GRPCCallInfo(ctx) @@ -358,11 +389,11 @@ func (s *server) VReplicationWaitForPos(ctx context.Context, request *tabletmana return &tabletmanagerdatapb.VReplicationWaitForPosResponse{}, err } -func (s *server) UpdateVRWorkflow(ctx context.Context, request *tabletmanagerdatapb.UpdateVRWorkflowRequest) (response *tabletmanagerdatapb.UpdateVRWorkflowResponse, err error) { - defer s.tm.HandleRPCPanic(ctx, "UpdateVRWorkflow", request, response, true /*verbose*/, &err) +func (s *server) UpdateVReplicationWorkflow(ctx context.Context, request *tabletmanagerdatapb.UpdateVReplicationWorkflowRequest) (response *tabletmanagerdatapb.UpdateVReplicationWorkflowResponse, err error) { + defer s.tm.HandleRPCPanic(ctx, "UpdateVReplicationWorkflow", request, response, true /*verbose*/, &err) ctx = callinfo.GRPCCallInfo(ctx) - response = &tabletmanagerdatapb.UpdateVRWorkflowResponse{} - return s.tm.UpdateVRWorkflow(ctx, request) + response = &tabletmanagerdatapb.UpdateVReplicationWorkflowResponse{} + return s.tm.UpdateVReplicationWorkflow(ctx, request) } func (s *server) VDiff(ctx context.Context, request *tabletmanagerdatapb.VDiffRequest) (response *tabletmanagerdatapb.VDiffResponse, err error) { @@ -513,6 +544,13 @@ func (s *server) RestoreFromBackup(request *tabletmanagerdatapb.RestoreFromBacku return s.tm.RestoreFromBackup(ctx, logger, request) } +func (s *server) CheckThrottler(ctx context.Context, request *tabletmanagerdatapb.CheckThrottlerRequest) (response *tabletmanagerdatapb.CheckThrottlerResponse, err error) { + defer s.tm.HandleRPCPanic(ctx, "CheckThrottler", request, response, false /*verbose*/, &err) + ctx = callinfo.GRPCCallInfo(ctx) + response, err = s.tm.CheckThrottler(ctx, request) + return response, err +} + // registration glue func init() { diff --git a/go/vt/vttablet/onlineddl/analysis.go b/go/vt/vttablet/onlineddl/analysis.go index 040f79d861e..987f09124a1 100644 --- a/go/vt/vttablet/onlineddl/analysis.go +++ b/go/vt/vttablet/onlineddl/analysis.go @@ -217,6 +217,7 @@ func alterOptionAvailableViaInstantDDL(alterOption sqlparser.AlterOption, create strippedCol := sqlparser.CloneRefOfColumnDefinition(col) if stripDefault { strippedCol.Type.Options.Default = nil + strippedCol.Type.Options.DefaultLiteral = false } if stripEnum { strippedCol.Type.EnumValues = nil diff --git a/go/vt/vttablet/onlineddl/executor.go b/go/vt/vttablet/onlineddl/executor.go index 6c112063eb8..66e81aef949 100644 --- a/go/vt/vttablet/onlineddl/executor.go +++ b/go/vt/vttablet/onlineddl/executor.go @@ -35,12 +35,12 @@ import ( "time" "github.com/spf13/pflag" - - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/encoding/prototext" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/textutil" @@ -55,7 +55,6 @@ import ( "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/schemadiff" "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" @@ -76,8 +75,16 @@ var ( ErrMigrationNotFound = errors.New("migration not found") ) +var ( + // fixCompletedTimestampDone fixes a nil `completed_tiemstamp` columns, see + // https://github.com/vitessio/vitess/issues/13927 + // The fix is in release-18.0 + // TODO: remove in release-19.0 + fixCompletedTimestampDone bool +) + var emptyResult = &sqltypes.Result{} -var acceptableDropTableIfExistsErrorCodes = []mysql.ErrorCode{mysql.ERCantFindFile, mysql.ERNoSuchTable} +var acceptableDropTableIfExistsErrorCodes = []sqlerror.ErrorCode{sqlerror.ERCantFindFile, sqlerror.ERNoSuchTable} var copyAlgorithm = sqlparser.AlgorithmValue(sqlparser.CopyStr) var ( @@ -117,6 +124,7 @@ const ( readyToCompleteHint = "ready_to_complete" databasePoolSize = 3 qrBufferExtraTimeout = 5 * time.Second + grpcTimeout = 30 * time.Second vreplicationTestSuiteWaitSeconds = 5 ) @@ -125,7 +133,6 @@ var ( migrationFailureFileName = "migration-failure.log" onlineDDLUser = "vt-online-ddl-internal" onlineDDLGrant = fmt.Sprintf("'%s'@'%s'", onlineDDLUser, "%") - throttleCheckFlags = &throttle.CheckFlags{} ) type ConstraintType int @@ -251,7 +258,7 @@ func NewExecutor(env tabletenv.Env, tabletAlias *topodatapb.TabletAlias, ts *top } return &Executor{ env: env, - tabletAlias: proto.Clone(tabletAlias).(*topodatapb.TabletAlias), + tabletAlias: tabletAlias.CloneVT(), pool: connpool.NewPool(env, "OnlineDDLExecutorPool", tabletenv.ConnPoolConfig{ Size: databasePoolSize, @@ -292,7 +299,7 @@ func (e *Executor) executeQueryWithSidecarDBReplacement(ctx context.Context, que defer conn.Recycle() // Replace any provided sidecar DB qualifiers with the correct one. - uq, err := sqlparser.ReplaceTableQualifiers(query, sidecardb.DefaultName, sidecardb.GetName()) + uq, err := sqlparser.ReplaceTableQualifiers(query, sidecar.DefaultName, sidecar.GetName()) if err != nil { return nil, err } @@ -327,7 +334,7 @@ func (e *Executor) Open() error { }) e.vreplicationLastError = make(map[string]*vterrors.LastError) - if sidecardb.GetName() != sidecardb.DefaultName { + if sidecar.GetName() != sidecar.DefaultName { e.execQuery = e.executeQueryWithSidecarDBReplacement } else { e.execQuery = e.executeQuery @@ -620,7 +627,7 @@ func (e *Executor) parseAlterOptions(ctx context.Context, onlineDDL *schema.Onli } // executeDirectly runs a DDL query directly on the backend MySQL server -func (e *Executor) executeDirectly(ctx context.Context, onlineDDL *schema.OnlineDDL, acceptableMySQLErrorCodes ...mysql.ErrorCode) (acceptableErrorCodeFound bool, err error) { +func (e *Executor) executeDirectly(ctx context.Context, onlineDDL *schema.OnlineDDL, acceptableMySQLErrorCodes ...sqlerror.ErrorCode) (acceptableErrorCodeFound bool, err error) { conn, err := dbconnpool.NewDBConnection(ctx, e.env.Config().DB.DbaWithDB()) if err != nil { return false, err @@ -638,7 +645,7 @@ func (e *Executor) executeDirectly(ctx context.Context, onlineDDL *schema.Online if err != nil { // let's see if this error is actually acceptable - if merr, ok := err.(*mysql.SQLError); ok { + if merr, ok := err.(*sqlerror.SQLError); ok { for _, acceptableCode := range acceptableMySQLErrorCodes { if merr.Num == acceptableCode { // we don't consider this to be an error. @@ -716,7 +723,7 @@ func (e *Executor) validateTableForAlterAction(ctx context.Context, onlineDDL *s } // primaryPosition returns the MySQL/MariaDB position (typically GTID pos) on the tablet -func (e *Executor) primaryPosition(ctx context.Context) (pos mysql.Position, err error) { +func (e *Executor) primaryPosition(ctx context.Context) (pos replication.Position, err error) { conn, err := dbconnpool.NewDBConnection(ctx, e.env.Config().DB.DbaWithDB()) if err != nil { return pos, err @@ -729,9 +736,6 @@ func (e *Executor) primaryPosition(ctx context.Context) (pos mysql.Position, err // terminateVReplMigration stops vreplication, then removes the _vt.vreplication entry for the given migration func (e *Executor) terminateVReplMigration(ctx context.Context, uuid string) error { - tmClient := e.tabletManagerClient() - defer tmClient.Close() - tablet, err := e.ts.GetTablet(ctx, e.tabletAlias) if err != nil { return err @@ -787,11 +791,11 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream) er migrationCutOverThreshold := getMigrationCutOverThreshold(onlineDDL) - waitForPos := func(s *VReplStream, pos mysql.Position) error { + waitForPos := func(s *VReplStream, pos replication.Position) error { ctx, cancel := context.WithTimeout(ctx, migrationCutOverThreshold) defer cancel() // Wait for target to reach the up-to-date pos - if err := tmClient.VReplicationWaitForPos(ctx, tablet.Tablet, s.id, mysql.EncodePosition(pos)); err != nil { + if err := tmClient.VReplicationWaitForPos(ctx, tablet.Tablet, s.id, replication.EncodePosition(pos)); err != nil { return err } // Target is now in sync with source! @@ -845,7 +849,7 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream) er if err != nil { return err } - e.updateMigrationStage(ctx, onlineDDL.UUID, "waiting for post-sentry pos: %v", mysql.EncodePosition(postSentryPos)) + e.updateMigrationStage(ctx, onlineDDL.UUID, "waiting for post-sentry pos: %v", replication.EncodePosition(postSentryPos)) if err := waitForPos(s, postSentryPos); err != nil { return err } @@ -910,11 +914,13 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream) er e.toggleBufferTableFunc(bufferingCtx, onlineDDL.Table, timeout, bufferQueries) if !bufferQueries { + grpcCtx, cancel := context.WithTimeout(ctx, grpcTimeout) + defer cancel() // called after new table is in place. // unbuffer existing queries: bufferingContextCancel() // force re-read of tables - if err := tmClient.RefreshState(ctx, tablet.Tablet); err != nil { + if err := tmClient.RefreshState(grpcCtx, tablet.Tablet); err != nil { return err } } @@ -998,12 +1004,12 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream) er return err } - e.updateMigrationStage(ctx, onlineDDL.UUID, "waiting for post-lock pos: %v", mysql.EncodePosition(postWritesPos)) + e.updateMigrationStage(ctx, onlineDDL.UUID, "waiting for post-lock pos: %v", replication.EncodePosition(postWritesPos)) if err := waitForPos(s, postWritesPos); err != nil { e.updateMigrationStage(ctx, onlineDDL.UUID, "timeout while waiting for post-lock pos: %v", err) return err } - go log.Infof("cutOverVReplMigration %v: done waiting for position %v", s.workflow, mysql.EncodePosition(postWritesPos)) + go log.Infof("cutOverVReplMigration %v: done waiting for position %v", s.workflow, replication.EncodePosition(postWritesPos)) // Stop vreplication e.updateMigrationStage(ctx, onlineDDL.UUID, "stopping vreplication") if _, err := e.vreplicationExec(ctx, tablet.Tablet, binlogplayer.StopVReplication(s.id, "stopped for online DDL cutover")); err != nil { @@ -1316,7 +1322,7 @@ func (e *Executor) initVreplicationOriginalMigration(ctx context.Context, online } } } - v = NewVRepl(onlineDDL.UUID, e.keyspace, e.shard, e.dbName, onlineDDL.Table, vreplTableName, onlineDDL.SQL) + v = NewVRepl(onlineDDL.UUID, e.keyspace, e.shard, e.dbName, onlineDDL.Table, vreplTableName, onlineDDL.SQL, onlineDDL.StrategySetting().IsAnalyzeTableFlag()) return v, nil } @@ -1370,7 +1376,7 @@ func (e *Executor) initVreplicationRevertMigration(ctx context.Context, onlineDD if err := e.updateArtifacts(ctx, onlineDDL.UUID, vreplTableName); err != nil { return v, err } - v = NewVRepl(onlineDDL.UUID, e.keyspace, e.shard, e.dbName, onlineDDL.Table, vreplTableName, "") + v = NewVRepl(onlineDDL.UUID, e.keyspace, e.shard, e.dbName, onlineDDL.Table, vreplTableName, "", false) v.pos = revertStream.pos return v, nil } @@ -2115,7 +2121,7 @@ func (e *Executor) validateThrottleParams(ctx context.Context, expireString stri return duration, ratio, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid EXPIRE value: %s. Try '120s', '30m', '1h', etc. Allowed units are (s)ec, (m)in, (h)hour", expireString) } } - ratio = 1.0 + ratio = throttle.DefaultThrottleRatio if ratioLiteral != nil { ratio, err = strconv.ParseFloat(ratioLiteral.Val, 64) if err != nil || ratio < 0 || ratio > 1 { @@ -2134,7 +2140,7 @@ func (e *Executor) ThrottleMigration(ctx context.Context, uuid string, expireStr if err := e.lagThrottler.CheckIsOpen(); err != nil { return nil, err } - _ = e.lagThrottler.ThrottleApp(uuid, time.Now().Add(duration), ratio) + _ = e.lagThrottler.ThrottleApp(uuid, time.Now().Add(duration), ratio, false) return emptyResult, nil } @@ -2147,7 +2153,7 @@ func (e *Executor) ThrottleAllMigrations(ctx context.Context, expireString strin if err := e.lagThrottler.CheckIsOpen(); err != nil { return nil, err } - _ = e.lagThrottler.ThrottleApp(throttlerapp.OnlineDDLName.String(), time.Now().Add(duration), ratio) + _ = e.lagThrottler.ThrottleApp(throttlerapp.OnlineDDLName.String(), time.Now().Add(duration), ratio, false) return emptyResult, nil } @@ -2704,7 +2710,7 @@ func (e *Executor) executeDropDDLActionMigration(ctx context.Context, onlineDDL return err } - acceptableErrorCodes := []mysql.ErrorCode{} + acceptableErrorCodes := []sqlerror.ErrorCode{} if ddlStmt.GetIfExists() { acceptableErrorCodes = acceptableDropTableIfExistsErrorCodes } @@ -2800,32 +2806,6 @@ func (e *Executor) generateSwapTablesStatement(ctx context.Context, tableName1, return parsed.Query, swapTableName, nil } -// renameTableIfApplicable renames a table, assuming it exists and that the target does not exist. -func (e *Executor) renameTableIfApplicable(ctx context.Context, fromTableName, toTableName string) (attemptMade bool, err error) { - if fromTableName == "" { - return false, nil - } - exists, err := e.tableExists(ctx, fromTableName) - if err != nil { - return false, err - } - if !exists { - // can't rename from table when it does not exist - return false, nil - } - exists, err = e.tableExists(ctx, toTableName) - if err != nil { - return false, err - } - if exists { - // target table exists, abort. - return false, nil - } - parsed := sqlparser.BuildParsedQuery(sqlRenameTable, fromTableName, toTableName) - _, err = e.execQuery(ctx, parsed.Query) - return true, err -} - func (e *Executor) executeAlterViewOnline(ctx context.Context, onlineDDL *schema.OnlineDDL) (err error) { artifactViewName, err := schema.GenerateGCTableName(schema.HoldTableGCState, newGCTableRetainTime()) if err != nil { @@ -3371,7 +3351,7 @@ func (e *Executor) readVReplStream(ctx context.Context, uuid string, okIfMissing timeThrottled: row.AsInt64("time_throttled", 0), componentThrottled: row.AsString("component_throttled", ""), transactionTimestamp: row.AsInt64("transaction_timestamp", 0), - state: row.AsString("state", ""), + state: binlogdatapb.VReplicationWorkflowState(binlogdatapb.VReplicationWorkflowState_value[row.AsString("state", "")]), message: row.AsString("message", ""), rowsCopied: row.AsInt64("rows_copied", 0), bls: &binlogdatapb.BinlogSource{}, @@ -3440,27 +3420,6 @@ func (e *Executor) isVReplMigrationReadyToCutOver(ctx context.Context, onlineDDL return true, nil } -// isVReplMigrationRunning sees if there is a VReplication migration actively running -func (e *Executor) isVReplMigrationRunning(ctx context.Context, uuid string) (isRunning bool, s *VReplStream, err error) { - s, err = e.readVReplStream(ctx, uuid, true) - if err != nil { - return false, s, err - } - if s == nil { - return false, s, nil - } - switch s.state { - case binlogplayer.BlpError: - return false, s, nil - case binlogplayer.VReplicationInit, binlogplayer.VReplicationCopying, binlogplayer.BlpRunning: - return true, s, nil - } - if strings.Contains(strings.ToLower(s.message), "error") { - return false, s, nil - } - return false, s, nil -} - // reviewRunningMigrations iterates migrations in 'running' state. Normally there's only one running, which was // spawned by this tablet; but vreplication migrations could also resume from failure. func (e *Executor) reviewRunningMigrations(ctx context.Context) (countRunnning int, cancellable []*cancellableMigration, err error) { @@ -3481,7 +3440,6 @@ func (e *Executor) reviewRunningMigrations(ctx context.Context) (countRunnning i } } - var throttlerOnce sync.Once r, err := e.execQuery(ctx, sqlSelectRunningMigrations) if err != nil { return countRunnning, cancellable, err @@ -3582,35 +3540,15 @@ func (e *Executor) reviewRunningMigrations(ctx context.Context) (countRunnning i if err := e.cutOverVReplMigration(ctx, s); err != nil { _ = e.updateMigrationMessage(ctx, uuid, err.Error()) log.Errorf("cutOverVReplMigration failed: err=%v", err) - if merr, ok := err.(*mysql.SQLError); ok { + if merr, ok := err.(*sqlerror.SQLError); ok { switch merr.Num { - case mysql.ERTooLongIdent: + case sqlerror.ERTooLongIdent: go e.CancelMigration(ctx, uuid, err.Error(), false) } } return countRunnning, cancellable, err } } - go throttlerOnce.Do(func() { - if !e.lagThrottler.IsRunning() { - return - } - // Self healing: in the following scenario: - // - a vitess migration - // - with on demand heartbeats - // - the streamer running on a replica - // - the streamer was throttled for long enough - // - then vplayer and vcopier are locked, waiting for the streamer to do something - // - since they are blocked, they're not running throttler checks - // - since streamer runs on replica, it only checks that replica - // - therefore no one asking for on-demand heartbeats - // - then, if the conditions for the streamer's throttling are done, the streamer then thinks there's replication lag, with nothing to remediate it. - // - it's a deadlock. - // And so, once per reviewRunningMigrations(), and assuming there _are_ running migrations, we ensure to hit a throttler check. This will kick - // on-demand heartbeats, unlocking the deadlock. - e.lagThrottler.CheckByType(ctx, throttlerapp.OnlineDDLName.String(), "", throttleCheckFlags, throttle.ThrottleCheckPrimaryWrite) - }) - } } case schema.DDLStrategyPTOSC: @@ -3746,7 +3684,10 @@ func (e *Executor) vreplicationExec(ctx context.Context, tablet *topodatapb.Tabl tmClient := e.tabletManagerClient() defer tmClient.Close() - return tmClient.VReplicationExec(ctx, tablet, query) + grpcCtx, cancel := context.WithTimeout(ctx, grpcTimeout) + defer cancel() + + return tmClient.VReplicationExec(grpcCtx, tablet, query) } // reloadSchema issues a ReloadSchema on this tablet @@ -3758,7 +3699,11 @@ func (e *Executor) reloadSchema(ctx context.Context) error { if err != nil { return err } - return tmClient.ReloadSchema(ctx, tablet.Tablet, "") + + grpcCtx, cancel := context.WithTimeout(ctx, grpcTimeout) + defer cancel() + + return tmClient.ReloadSchema(grpcCtx, tablet.Tablet, "") } // deleteVReplicationEntry cleans up a _vt.vreplication entry; this function is called as part of @@ -3810,6 +3755,17 @@ func (e *Executor) gcArtifacts(ctx context.Context) error { e.migrationMutex.Lock() defer e.migrationMutex.Unlock() + // v18 fix. Remove in v19 + if !fixCompletedTimestampDone { + if _, err := e.execQuery(ctx, sqlFixCompletedTimestamp); err != nil { + // This query fixes a bug where stale migrations were marked as 'cancelled' or 'failed' without updating 'completed_timestamp' + // Running this query retroactively sets completed_timestamp + // This fix is created in v18 and can be removed in v19 + return err + } + fixCompletedTimestampDone = true + } + query, err := sqlparser.ParseAndBind(sqlSelectUncollectedArtifacts, sqltypes.Int64BindVariable(int64((retainOnlineDDLTables).Seconds())), ) @@ -4207,6 +4163,7 @@ func (e *Executor) updateMigrationProgress(ctx context.Context, uuid string, pro func (e *Executor) updateMigrationProgressByRowsCopied(ctx context.Context, uuid string, rowsCopied int64) error { query, err := sqlparser.ParseAndBind(sqlUpdateMigrationProgressByRowsCopied, + sqltypes.Int64BindVariable(rowsCopied), sqltypes.Int64BindVariable(rowsCopied), sqltypes.StringBindVariable(uuid), ) @@ -4684,6 +4641,11 @@ func (e *Executor) SubmitMigration( revertedUUID, _ := onlineDDL.GetRevertUUID() // Empty value if the migration is not actually a REVERT. Safe to ignore error. retainArtifactsSeconds := int64((retainOnlineDDLTables).Seconds()) + if retainArtifacts, _ := onlineDDL.StrategySetting().RetainArtifactsDuration(); retainArtifacts != 0 { + // Explicit retention indicated by `--retain-artifact` DDL strategy flag for this migration. Override! + retainArtifactsSeconds = int64((retainArtifacts).Seconds()) + } + _, allowConcurrentMigration := e.allowConcurrentMigration(onlineDDL) submitQuery, err := sqlparser.ParseAndBind(sqlInsertMigration, sqltypes.StringBindVariable(onlineDDL.UUID), diff --git a/go/vt/vttablet/onlineddl/schema.go b/go/vt/vttablet/onlineddl/schema.go index 39288248cdb..4698c75a9d5 100644 --- a/go/vt/vttablet/onlineddl/schema.go +++ b/go/vt/vttablet/onlineddl/schema.go @@ -66,7 +66,8 @@ const ( migration_uuid=%a ` sqlUpdateMigrationStatusFailedOrCancelled = `UPDATE _vt.schema_migrations - SET migration_status=IF(cancelled_timestamp IS NULL, 'failed', 'cancelled') + SET migration_status=IF(cancelled_timestamp IS NULL, 'failed', 'cancelled'), + completed_timestamp=NOW(6) WHERE migration_uuid=%a ` @@ -213,6 +214,7 @@ const ( ` sqlUpdateMigrationProgressByRowsCopied = `UPDATE _vt.schema_migrations SET + table_rows=GREATEST(table_rows, %a), progress=CASE WHEN table_rows=0 THEN 100 ELSE LEAST(100, 100*%a/table_rows) @@ -344,7 +346,7 @@ const ( log_path FROM _vt.schema_migrations WHERE - migration_status IN ('complete', 'failed') + migration_status IN ('complete', 'cancelled', 'failed') AND cleanup_timestamp IS NULL AND completed_timestamp <= IF(retain_artifacts_seconds=0, NOW() - INTERVAL %a SECOND, @@ -355,7 +357,7 @@ const ( SET completed_timestamp=NOW(6) WHERE - migration_status='failed' + migration_status IN ('cancelled', 'failed') AND cleanup_timestamp IS NULL AND completed_timestamp IS NULL ` @@ -520,6 +522,7 @@ const ( sqlDropTableIfExists = "DROP TABLE IF EXISTS `%a`" sqlShowColumnsFrom = "SHOW COLUMNS FROM `%a`" sqlShowTableStatus = "SHOW TABLE STATUS LIKE '%a'" + sqlAnalyzeTable = "ANALYZE NO_WRITE_TO_BINLOG TABLE `%a`" sqlShowCreateTable = "SHOW CREATE TABLE `%a`" sqlGetAutoIncrement = ` SELECT @@ -568,13 +571,6 @@ const ( sqlFindProcess = "SELECT id, Info as info FROM information_schema.processlist WHERE id=%a AND Info LIKE %a" ) -const ( - retryMigrationHint = "retry" - cancelMigrationHint = "cancel" - cancelAllMigrationHint = "cancel-all" - completeMigrationHint = "complete" -) - var ( sqlCreateOnlineDDLUser = []string{ `CREATE USER IF NOT EXISTS %s IDENTIFIED BY '%s'`, diff --git a/go/vt/vttablet/onlineddl/vrepl.go b/go/vt/vttablet/onlineddl/vrepl.go index d877c4f37d3..cc669e11c11 100644 --- a/go/vt/vttablet/onlineddl/vrepl.go +++ b/go/vt/vttablet/onlineddl/vrepl.go @@ -31,18 +31,20 @@ import ( "strconv" "strings" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/collations/charset" + "vitess.io/vitess/go/mysql/collations/colldata" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/textutil" - "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/dbconnpool" - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/onlineddl/vrepl" "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) // VReplStream represents a row in _vt.vreplication table @@ -56,7 +58,7 @@ type VReplStream struct { timeThrottled int64 componentThrottled string transactionTimestamp int64 - state string + state binlogdatapb.VReplicationWorkflowState message string rowsCopied int64 bls *binlogdatapb.BinlogSource @@ -75,7 +77,7 @@ func (v *VReplStream) livenessTimeIndicator() int64 { // isRunning() returns true when the workflow is actively running func (v *VReplStream) isRunning() bool { switch v.state { - case binlogplayer.VReplicationInit, binlogplayer.VReplicationCopying, binlogplayer.BlpRunning: + case binlogdatapb.VReplicationWorkflowState_Init, binlogdatapb.VReplicationWorkflowState_Copying, binlogdatapb.VReplicationWorkflowState_Running: return true } return false @@ -84,7 +86,7 @@ func (v *VReplStream) isRunning() bool { // hasError() returns true when the workflow has failed and will not retry func (v *VReplStream) hasError() (isTerminal bool, vreplError error) { switch { - case v.state == binlogplayer.BlpError: + case v.state == binlogdatapb.VReplicationWorkflowState_Error: return true, errors.New(v.message) case strings.Contains(strings.ToLower(v.message), "error"): return false, errors.New(v.message) @@ -104,6 +106,8 @@ type VRepl struct { alterQuery string tableRows int64 + analyzeTable bool + sourceSharedColumns *vrepl.ColumnList targetSharedColumns *vrepl.ColumnList droppedSourceNonGeneratedColumns *vrepl.ColumnList @@ -130,7 +134,7 @@ type VRepl struct { } // NewVRepl creates a VReplication handler for Online DDL -func NewVRepl(workflow, keyspace, shard, dbName, sourceTable, targetTable, alterQuery string) *VRepl { +func NewVRepl(workflow, keyspace, shard, dbName, sourceTable, targetTable, alterQuery string, analyzeTable bool) *VRepl { return &VRepl{ workflow: workflow, keyspace: keyspace, @@ -139,6 +143,7 @@ func NewVRepl(workflow, keyspace, shard, dbName, sourceTable, targetTable, alter sourceTable: sourceTable, targetTable: targetTable, alterQuery: alterQuery, + analyzeTable: analyzeTable, parser: vrepl.NewAlterTableParser(), enumToTextMap: map[string]string{}, intToEnumMap: map[string]bool{}, @@ -226,6 +231,13 @@ func (v *VRepl) readTableUniqueKeys(ctx context.Context, conn *dbconnpool.DBConn return uniqueKeys, nil } +// executeAnalyzeTable runs an ANALYZE TABLE command +func (v *VRepl) executeAnalyzeTable(ctx context.Context, conn *dbconnpool.DBConnection, tableName string) error { + parsed := sqlparser.BuildParsedQuery(sqlAnalyzeTable, tableName) + _, err := conn.ExecuteFetch(parsed.Query, 1, false) + return err +} + // readTableStatus reads table status information func (v *VRepl) readTableStatus(ctx context.Context, conn *dbconnpool.DBConnection, tableName string) (tableRows int64, err error) { parsed := sqlparser.BuildParsedQuery(sqlShowTableStatus, tableName) @@ -335,6 +347,11 @@ func (v *VRepl) analyzeAlter(ctx context.Context) error { } func (v *VRepl) analyzeTables(ctx context.Context, conn *dbconnpool.DBConnection) (err error) { + if v.analyzeTable { + if err := v.executeAnalyzeTable(ctx, conn, v.sourceTable); err != nil { + return err + } + } v.tableRows, err = v.readTableStatus(ctx, conn, v.sourceTable) if err != nil { return err @@ -482,20 +499,19 @@ func (v *VRepl) generateFilterQuery(ctx context.Context) error { case sourceCol.Type == vrepl.StringColumnType: // Check source and target charset/encoding. If needed, create // a binlogdatapb.CharsetConversion entry (later written to vreplication) - fromEncoding, ok := mysql.CharacterSetEncoding[sourceCol.Charset] - if !ok { + fromCollation := collations.Local().DefaultCollationForCharset(sourceCol.Charset) + if fromCollation == collations.Unknown { return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Character set %s not supported for column %s", sourceCol.Charset, sourceCol.Name) } - toEncoding, ok := mysql.CharacterSetEncoding[targetCol.Charset] + toCollation := collations.Local().DefaultCollationForCharset(targetCol.Charset) // Let's see if target col is at all textual - if targetCol.Type == vrepl.StringColumnType && !ok { + if targetCol.Type == vrepl.StringColumnType && toCollation == collations.Unknown { return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Character set %s not supported for column %s", targetCol.Charset, targetCol.Name) } - if fromEncoding == nil && toEncoding == nil && targetCol.Type != vrepl.JSONColumnType { - // Both source and target have trivial charsets + + if trivialCharset(fromCollation) && trivialCharset(toCollation) && targetCol.Type != vrepl.JSONColumnType { sb.WriteString(escapeName(name)) } else { - // encoding can be nil for trivial charsets, like utf8, ascii, binary, etc. v.convertCharset[targetName] = &binlogdatapb.CharsetConversion{ FromCharset: sourceCol.Charset, ToCharset: targetCol.Charset, @@ -518,6 +534,14 @@ func (v *VRepl) generateFilterQuery(ctx context.Context) error { return nil } +func trivialCharset(c collations.ID) bool { + if c == collations.Unknown { + return true + } + utf8mb4Charset := charset.Charset_utf8mb4{} + return utf8mb4Charset.IsSuperset(colldata.Lookup(c).Charset()) || c == collations.CollationBinaryID +} + func (v *VRepl) analyzeBinlogSource(ctx context.Context) { bls := &binlogdatapb.BinlogSource{ Keyspace: v.keyspace, @@ -566,7 +590,7 @@ func (v *VRepl) analyze(ctx context.Context, conn *dbconnpool.DBConnection) erro // generateInsertStatement generates the INSERT INTO _vt.replication stataement that creates the vreplication workflow func (v *VRepl) generateInsertStatement(ctx context.Context) (string, error) { - ig := vreplication.NewInsertGenerator(binlogplayer.BlpStopped, v.dbName) + ig := vreplication.NewInsertGenerator(binlogdatapb.VReplicationWorkflowState_Stopped, v.dbName) ig.AddRow(v.workflow, v.bls, v.pos, "", "in_order:REPLICA,PRIMARY", binlogdatapb.VReplicationWorkflowType_OnlineDDL, binlogdatapb.VReplicationWorkflowSubType_None, false) diff --git a/go/vt/vttablet/queryservice/fakes/stream_health_query_service.go b/go/vt/vttablet/queryservice/fakes/stream_health_query_service.go index 6b874f6f98f..6b430603088 100644 --- a/go/vt/vttablet/queryservice/fakes/stream_health_query_service.go +++ b/go/vt/vttablet/queryservice/fakes/stream_health_query_service.go @@ -19,8 +19,6 @@ package fakes import ( "context" - "google.golang.org/protobuf/proto" - "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -53,7 +51,7 @@ func NewStreamHealthQueryService(target *querypb.Target) *StreamHealthQueryServi return &StreamHealthQueryService{ QueryService: ErrorQueryService, healthResponses: make(chan *querypb.StreamHealthResponse, 1000), - target: proto.Clone(target).(*querypb.Target), + target: target.CloneVT(), } } @@ -81,7 +79,7 @@ func (q *StreamHealthQueryService) StreamHealth(ctx context.Context, callback fu // The response will have default values typical for a healthy tablet. func (q *StreamHealthQueryService) AddDefaultHealthResponse() { q.healthResponses <- &querypb.StreamHealthResponse{ - Target: proto.Clone(q.target).(*querypb.Target), + Target: q.target.CloneVT(), Serving: true, RealtimeStats: &querypb.RealtimeStats{ ReplicationLagSeconds: DefaultReplicationLagSeconds, @@ -93,7 +91,7 @@ func (q *StreamHealthQueryService) AddDefaultHealthResponse() { // Only "qps" is different in this message. func (q *StreamHealthQueryService) AddHealthResponseWithQPS(qps float64) { q.healthResponses <- &querypb.StreamHealthResponse{ - Target: proto.Clone(q.target).(*querypb.Target), + Target: q.target.CloneVT(), Serving: true, RealtimeStats: &querypb.RealtimeStats{ Qps: qps, @@ -106,7 +104,7 @@ func (q *StreamHealthQueryService) AddHealthResponseWithQPS(qps float64) { // buffer channel. Only "replication_lag_seconds" is different in this message. func (q *StreamHealthQueryService) AddHealthResponseWithReplicationLag(replicationLag uint32) { q.healthResponses <- &querypb.StreamHealthResponse{ - Target: proto.Clone(q.target).(*querypb.Target), + Target: q.target.CloneVT(), Serving: true, RealtimeStats: &querypb.RealtimeStats{ ReplicationLagSeconds: replicationLag, @@ -118,7 +116,7 @@ func (q *StreamHealthQueryService) AddHealthResponseWithReplicationLag(replicati // buffer channel. Only "Serving" is different in this message. func (q *StreamHealthQueryService) AddHealthResponseWithNotServing() { q.healthResponses <- &querypb.StreamHealthResponse{ - Target: proto.Clone(q.target).(*querypb.Target), + Target: q.target.CloneVT(), Serving: false, RealtimeStats: &querypb.RealtimeStats{ ReplicationLagSeconds: DefaultReplicationLagSeconds, diff --git a/go/vt/vttablet/queryservice/queryservice.go b/go/vt/vttablet/queryservice/queryservice.go index abb68176ace..923d4140140 100644 --- a/go/vt/vttablet/queryservice/queryservice.go +++ b/go/vt/vttablet/queryservice/queryservice.go @@ -98,6 +98,10 @@ type QueryService interface { // VStreamRows streams rows of a table from the specified starting point. VStreamRows(ctx context.Context, request *binlogdatapb.VStreamRowsRequest, send func(*binlogdatapb.VStreamRowsResponse) error) error + // VStreamTables streams rows of all tables + + VStreamTables(ctx context.Context, request *binlogdatapb.VStreamTablesRequest, send func(*binlogdatapb.VStreamTablesResponse) error) error + // VStreamResults streams results along with the gtid of the snapshot. VStreamResults(ctx context.Context, target *querypb.Target, query string, send func(*binlogdatapb.VStreamResultsResponse) error) error diff --git a/go/vt/vttablet/queryservice/wrapped.go b/go/vt/vttablet/queryservice/wrapped.go index 70ee8a9d493..260333a51d2 100644 --- a/go/vt/vttablet/queryservice/wrapped.go +++ b/go/vt/vttablet/queryservice/wrapped.go @@ -254,6 +254,13 @@ func (ws *wrappedService) VStreamRows(ctx context.Context, request *binlogdatapb }) } +func (ws *wrappedService) VStreamTables(ctx context.Context, request *binlogdatapb.VStreamTablesRequest, send func(response *binlogdatapb.VStreamTablesResponse) error) error { + return ws.wrapper(ctx, request.Target, ws.impl, "VStreamTables", false, func(ctx context.Context, target *querypb.Target, conn QueryService) (bool, error) { + innerErr := conn.VStreamTables(ctx, request, send) + return false, innerErr + }) +} + func (ws *wrappedService) VStreamResults(ctx context.Context, target *querypb.Target, query string, send func(*binlogdatapb.VStreamResultsResponse) error) error { return ws.wrapper(ctx, target, ws.impl, "VStreamResults", false, func(ctx context.Context, target *querypb.Target, conn QueryService) (bool, error) { innerErr := conn.VStreamResults(ctx, target, query, send) diff --git a/go/vt/vttablet/sandboxconn/sandboxconn.go b/go/vt/vttablet/sandboxconn/sandboxconn.go index b46f5c7c2da..a0ebe5fc0b2 100644 --- a/go/vt/vttablet/sandboxconn/sandboxconn.go +++ b/go/vt/vttablet/sandboxconn/sandboxconn.go @@ -46,6 +46,9 @@ type SandboxConn struct { // These errors work for all functions. MustFailCodes map[vtrpcpb.Code]int + // ServingKeyspaces is a list of serving keyspaces + ServingKeyspaces []string + // These errors are triggered only for specific functions. // For now these are just for the 2PC functions. MustFailPrepare int @@ -111,7 +114,7 @@ type SandboxConn struct { // reserve id generator ReserveID atomic.Int64 - mapMu sync.Mutex //protects the map txIDToRID + mapMu sync.Mutex // protects the map txIDToRID txIDToRID map[int64]int64 sExecMu sync.Mutex @@ -415,9 +418,9 @@ func (sbc *SandboxConn) MessageAck(ctx context.Context, target *querypb.Target, // SandboxSQRowCount is the default number of fake splits returned. var SandboxSQRowCount = int64(10) -// StreamHealth is not implemented. +// StreamHealth always mocks a "healthy" result. func (sbc *SandboxConn) StreamHealth(ctx context.Context, callback func(*querypb.StreamHealthResponse) error) error { - return fmt.Errorf("not implemented in test") + return nil } // ExpectVStreamStartPos makes the conn verify that that the next vstream request has the right startPos. @@ -500,6 +503,11 @@ func (sbc *SandboxConn) VStreamRows(ctx context.Context, request *binlogdatapb.V return fmt.Errorf("not implemented in test") } +// VStreamTables is part of the QueryService interface. +func (sbc *SandboxConn) VStreamTables(ctx context.Context, request *binlogdatapb.VStreamTablesRequest, send func(response *binlogdatapb.VStreamTablesResponse) error) error { + return fmt.Errorf("not implemented in test") +} + // VStreamResults is part of the QueryService interface. func (sbc *SandboxConn) VStreamResults(ctx context.Context, target *querypb.Target, query string, send func(*binlogdatapb.VStreamResultsResponse) error) error { return fmt.Errorf("not implemented in test") @@ -510,6 +518,11 @@ func (sbc *SandboxConn) QueryServiceByAlias(_ *topodatapb.TabletAlias, _ *queryp return sbc, nil } +// GetServingKeyspaces returns list of serving keyspaces. +func (sbc *SandboxConn) GetServingKeyspaces() []string { + return sbc.ServingKeyspaces +} + // HandlePanic is part of the QueryService interface. func (sbc *SandboxConn) HandlePanic(err *error) { } @@ -632,7 +645,7 @@ func (sbc *SandboxConn) getNextResult(stmt sqlparser.Statement) *sqltypes.Result *sqlparser.Union, *sqlparser.Show, sqlparser.Explain, - *sqlparser.OtherRead: + *sqlparser.Analyze: return getSingleRowResult() case *sqlparser.Set, sqlparser.DDLStatement, diff --git a/go/vt/vttablet/tabletconntest/fakequeryservice.go b/go/vt/vttablet/tabletconntest/fakequeryservice.go index b1779e9d8b5..734ee03ebed 100644 --- a/go/vt/vttablet/tabletconntest/fakequeryservice.go +++ b/go/vt/vttablet/tabletconntest/fakequeryservice.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "io" "testing" "vitess.io/vitess/go/vt/vttablet/queryservice" @@ -655,7 +656,7 @@ var TestStreamHealthStreamHealthResponse = &querypb.StreamHealthResponse{ }, Serving: true, - TabletExternallyReparentedTimestamp: 1234589, + PrimaryTermStartTimestamp: 1234589, RealtimeStats: &querypb.RealtimeStats{ CpuUsage: 1.0, @@ -681,7 +682,7 @@ func (f *FakeQueryService) StreamHealth(ctx context.Context, callback func(*quer if shr == nil { shr = TestStreamHealthStreamHealthResponse } - if err := callback(shr); err != nil { + if err := callback(shr); err != nil && err != io.EOF { f.t.Logf("StreamHealth callback failed: %v", err) } return nil @@ -697,6 +698,11 @@ func (f *FakeQueryService) VStreamRows(ctx context.Context, request *binlogdatap panic("not implemented") } +// VStreamTables is part of the QueryService interface. +func (f *FakeQueryService) VStreamTables(ctx context.Context, request *binlogdatapb.VStreamTablesRequest, send func(*binlogdatapb.VStreamTablesResponse) error) error { + panic("not implemented") +} + // VStreamResults is part of the QueryService interface. func (f *FakeQueryService) VStreamResults(ctx context.Context, target *querypb.Target, query string, send func(*binlogdatapb.VStreamResultsResponse) error) error { panic("not implemented") @@ -707,6 +713,11 @@ func (f *FakeQueryService) QueryServiceByAlias(_ *topodatapb.TabletAlias, _ *que panic("not implemented") } +// GetServingKeyspaces returns list of serving keyspaces. +func (f *FakeQueryService) GetServingKeyspaces() []string { + panic("not implemented") +} + // ReserveBeginExecute satisfies the Gateway interface func (f *FakeQueryService) ReserveBeginExecute(ctx context.Context, target *querypb.Target, preQueries []string, postBeginQueries []string, sql string, bindVariables map[string]*querypb.BindVariable, options *querypb.ExecuteOptions) (queryservice.ReservedTransactionState, *sqltypes.Result, error) { panic("implement me") diff --git a/go/vt/vttablet/tabletconntest/tabletconntest.go b/go/vt/vttablet/tabletconntest/tabletconntest.go index 23d4a3ce2e2..b279ac53726 100644 --- a/go/vt/vttablet/tabletconntest/tabletconntest.go +++ b/go/vt/vttablet/tabletconntest/tabletconntest.go @@ -1049,7 +1049,7 @@ func SetProtocol(name string, protocol string) { tabletconn.RegisterFlags(fs) }) - servenv.ParseFlags(name) + servenv.ParseFlagsForTests(name) if err := pflag.Set(tabletProtocolFlagName, protocol); err != nil { msg := "failed to set flag %q to %q: %v" diff --git a/go/vt/vttablet/tabletmanager/framework_test.go b/go/vt/vttablet/tabletmanager/framework_test.go new file mode 100644 index 00000000000..4734ab9ee96 --- /dev/null +++ b/go/vt/vttablet/tabletmanager/framework_test.go @@ -0,0 +1,492 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tabletmanager + +import ( + "context" + "fmt" + "regexp" + "strings" + "sync" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql/fakesqldb" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/binlog/binlogplayer" + "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/grpcclient" + "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/vttablet/queryservice" + "vitess.io/vitess/go/vt/vttablet/tabletconn" + "vitess.io/vitess/go/vt/vttablet/tabletconntest" + "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" + "vitess.io/vitess/go/vt/vttablet/tmclient" + "vitess.io/vitess/go/vt/vttablet/tmclienttest" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" +) + +const ( + gtidFlavor = "MySQL56" + gtidPosition = "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-220" +) + +func init() { + tabletconn.RegisterDialer("grpc", func(tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { + return &tabletconntest.FakeQueryService{ + StreamHealthResponse: &querypb.StreamHealthResponse{ + Serving: true, + Target: &querypb.Target{ + Keyspace: tablet.Keyspace, + Shard: tablet.Shard, + TabletType: tablet.Type, + Cell: tablet.Alias.Cell, + }, + RealtimeStats: &querypb.RealtimeStats{}, + }, + }, nil + }) +} + +type testEnv struct { + mu sync.Mutex + ctx context.Context + ts *topo.Server + cells []string + mysqld *mysqlctl.FakeMysqlDaemon + tmc *fakeTMClient + dbName string + protoName string +} + +func newTestEnv(t *testing.T, ctx context.Context, sourceKeyspace string, sourceShards []string) *testEnv { + tenv := &testEnv{ + ctx: context.Background(), + tmc: newFakeTMClient(), + cells: []string{"zone1"}, + dbName: "tmtestdb", + protoName: t.Name(), + } + tenv.mu.Lock() + defer tenv.mu.Unlock() + tenv.ts = memorytopo.NewServer(ctx, tenv.cells...) + tenv.tmc.sourceKeyspace = sourceKeyspace + tenv.tmc.sourceShards = sourceShards + tenv.tmc.schema = defaultSchema + + tabletconn.RegisterDialer(t.Name(), func(tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { + tenv.mu.Lock() + defer tenv.mu.Unlock() + if qs, ok := tenv.tmc.tablets[int(tablet.Alias.Uid)]; ok { + return qs, nil + } + return nil, fmt.Errorf("tablet %d not found", tablet.Alias.Uid) + }) + tabletconntest.SetProtocol(fmt.Sprintf("go.vt.vttablet.tabletmanager.framework_test_%s", t.Name()), tenv.protoName) + tmclient.RegisterTabletManagerClientFactory(t.Name(), func() tmclient.TabletManagerClient { + return tenv.tmc + }) + tmclienttest.SetProtocol(fmt.Sprintf("go.vt.vttablet.tabletmanager.framework_test_%s", t.Name()), tenv.protoName) + + tenv.mysqld = mysqlctl.NewFakeMysqlDaemon(fakesqldb.New(t)) + var err error + tenv.mysqld.CurrentPrimaryPosition, err = replication.ParsePosition(gtidFlavor, gtidPosition) + require.NoError(t, err) + + return tenv +} + +func (tenv *testEnv) close() { + tenv.mu.Lock() + defer tenv.mu.Unlock() + tenv.ts.Close() + tenv.mysqld.Close() +} + +//-------------------------------------- +// Tablets + +func (tenv *testEnv) addTablet(t *testing.T, id int, keyspace, shard string) *fakeTabletConn { + tenv.mu.Lock() + defer tenv.mu.Unlock() + tablet := &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: tenv.cells[0], + Uid: uint32(id), + }, + Keyspace: keyspace, + Shard: shard, + Type: topodatapb.TabletType_PRIMARY, + PortMap: map[string]int32{ + tenv.protoName: int32(id), + }, + } + if err := tenv.ts.InitTablet(tenv.ctx, tablet, false /* allowPrimaryOverride */, true /* createShardAndKeyspace */, false /* allowUpdate */); err != nil { + panic(err) + } + if _, err := tenv.ts.UpdateShardFields(tenv.ctx, keyspace, shard, func(si *topo.ShardInfo) error { + si.PrimaryAlias = tablet.Alias + si.IsPrimaryServing = true + return nil + }); err != nil { + panic(err) + } + if err := tenv.ts.EnsureVSchema(tenv.ctx, keyspace); err != nil { + panic(err) + } + + vrdbClient := binlogplayer.NewMockDBClient(t) + vrdbClient.Tag = fmt.Sprintf("tablet:%d", id) + tenv.tmc.tablets[id] = &fakeTabletConn{ + tablet: tablet, + vrdbClient: vrdbClient, + } + + dbClientFactory := func() binlogplayer.DBClient { + return tenv.tmc.tablets[id].vrdbClient + } + tenv.tmc.tablets[id].vrengine = vreplication.NewTestEngine(tenv.ts, tenv.cells[0], tenv.mysqld, dbClientFactory, dbClientFactory, tenv.dbName, nil) + tenv.tmc.tablets[id].vrdbClient.ExpectRequest(fmt.Sprintf("select * from _vt.vreplication where db_name='%s'", tenv.dbName), &sqltypes.Result{}, nil) + tenv.tmc.tablets[id].vrengine.Open(tenv.ctx) + require.True(t, tenv.tmc.tablets[id].vrengine.IsOpen(), "vreplication engine was not open") + + tenv.tmc.tablets[id].tm = &TabletManager{ + VREngine: tenv.tmc.tablets[id].vrengine, + DBConfigs: &dbconfigs.DBConfigs{ + DBName: tenv.dbName, + }, + } + + return tenv.tmc.tablets[id] +} + +func (tenv *testEnv) deleteTablet(tablet *topodatapb.Tablet) { + tenv.mu.Lock() + defer tenv.mu.Unlock() + tenv.tmc.tablets[int(tablet.Alias.Uid)].vrdbClient.Close() + tenv.tmc.tablets[int(tablet.Alias.Uid)].vrengine.Close() + tenv.ts.DeleteTablet(tenv.ctx, tablet.Alias) + // This is not automatically removed from shard replication, which results in log spam. + topo.DeleteTabletReplicationData(tenv.ctx, tenv.ts, tablet) +} + +// fakeTabletConn implements the TabletConn and QueryService interfaces. +type fakeTabletConn struct { + queryservice.QueryService + tablet *topodatapb.Tablet + tm *TabletManager + vrdbClient *binlogplayer.MockDBClient + vrengine *vreplication.Engine +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) Begin(ctx context.Context, target *querypb.Target, options *querypb.ExecuteOptions) (queryservice.TransactionState, error) { + return queryservice.TransactionState{ + TransactionID: 1, + }, nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) Commit(ctx context.Context, target *querypb.Target, transactionID int64) (int64, error) { + return 0, nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) Rollback(ctx context.Context, target *querypb.Target, transactionID int64) (int64, error) { + return 0, nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) Prepare(ctx context.Context, target *querypb.Target, transactionID int64, dtid string) (err error) { + return nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) CommitPrepared(ctx context.Context, target *querypb.Target, dtid string) (err error) { + return nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) RollbackPrepared(ctx context.Context, target *querypb.Target, dtid string, originalID int64) (err error) { + return nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) CreateTransaction(ctx context.Context, target *querypb.Target, dtid string, participants []*querypb.Target) (err error) { + return nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) StartCommit(ctx context.Context, target *querypb.Target, transactionID int64, dtid string) (err error) { + return nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) SetRollback(ctx context.Context, target *querypb.Target, dtid string, transactionID int64) (err error) { + return nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) ConcludeTransaction(ctx context.Context, target *querypb.Target, dtid string) (err error) { + return nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) ReadTransaction(ctx context.Context, target *querypb.Target, dtid string) (metadata *querypb.TransactionMetadata, err error) { + return nil, nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) Execute(ctx context.Context, target *querypb.Target, sql string, bindVariables map[string]*querypb.BindVariable, transactionID, reservedID int64, options *querypb.ExecuteOptions) (*sqltypes.Result, error) { + return nil, nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) StreamExecute(ctx context.Context, target *querypb.Target, sql string, bindVariables map[string]*querypb.BindVariable, transactionID int64, reservedID int64, options *querypb.ExecuteOptions, callback func(*sqltypes.Result) error) error { + return nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) BeginExecute(ctx context.Context, target *querypb.Target, preQueries []string, sql string, bindVariables map[string]*querypb.BindVariable, reservedID int64, options *querypb.ExecuteOptions) (queryservice.TransactionState, *sqltypes.Result, error) { + return queryservice.TransactionState{ + TransactionID: 1, + }, nil, nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) BeginStreamExecute(ctx context.Context, target *querypb.Target, preQueries []string, sql string, bindVariables map[string]*querypb.BindVariable, reservedID int64, options *querypb.ExecuteOptions, callback func(*sqltypes.Result) error) (queryservice.TransactionState, error) { + return queryservice.TransactionState{ + TransactionID: 1, + }, nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) MessageStream(ctx context.Context, target *querypb.Target, name string, callback func(*sqltypes.Result) error) error { + return nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) MessageAck(ctx context.Context, target *querypb.Target, name string, ids []*querypb.Value) (count int64, err error) { + return 0, nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) VStream(ctx context.Context, request *binlogdatapb.VStreamRequest, send func([]*binlogdatapb.VEvent) error) error { + return nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) VStreamRows(ctx context.Context, request *binlogdatapb.VStreamRowsRequest, send func(*binlogdatapb.VStreamRowsResponse) error) error { + return nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) VStreamResults(ctx context.Context, target *querypb.Target, query string, send func(*binlogdatapb.VStreamResultsResponse) error) error { + return nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) HandlePanic(err *error) { +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) ReserveBeginExecute(ctx context.Context, target *querypb.Target, preQueries []string, postBeginQueries []string, sql string, bindVariables map[string]*querypb.BindVariable, options *querypb.ExecuteOptions) (queryservice.ReservedTransactionState, *sqltypes.Result, error) { + return queryservice.ReservedTransactionState{ + ReservedID: 1, + }, nil, nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) ReserveBeginStreamExecute(ctx context.Context, target *querypb.Target, preQueries []string, postBeginQueries []string, sql string, bindVariables map[string]*querypb.BindVariable, options *querypb.ExecuteOptions, callback func(*sqltypes.Result) error) (queryservice.ReservedTransactionState, error) { + return queryservice.ReservedTransactionState{ + ReservedID: 1, + }, nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) ReserveExecute(ctx context.Context, target *querypb.Target, preQueries []string, sql string, bindVariables map[string]*querypb.BindVariable, transactionID int64, options *querypb.ExecuteOptions) (queryservice.ReservedState, *sqltypes.Result, error) { + return queryservice.ReservedState{ + ReservedID: 1, + }, nil, nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) ReserveStreamExecute(ctx context.Context, target *querypb.Target, preQueries []string, sql string, bindVariables map[string]*querypb.BindVariable, transactionID int64, options *querypb.ExecuteOptions, callback func(*sqltypes.Result) error) (queryservice.ReservedState, error) { + return queryservice.ReservedState{ + ReservedID: 1, + }, nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) Release(ctx context.Context, target *querypb.Target, transactionID, reservedID int64) error { + return nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) GetSchema(ctx context.Context, target *querypb.Target, tableType querypb.SchemaTableType, tableNames []string, callback func(schemaRes *querypb.GetSchemaResponse) error) error { + return nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) Close(ctx context.Context) error { + return nil +} + +func (ftc *fakeTabletConn) StreamHealth(ctx context.Context, callback func(*querypb.StreamHealthResponse) error) error { + return callback(&querypb.StreamHealthResponse{ + Serving: true, + Target: &querypb.Target{ + Keyspace: ftc.tablet.Keyspace, + Shard: ftc.tablet.Shard, + TabletType: ftc.tablet.Type, + Cell: ftc.tablet.Alias.Cell, + }, + RealtimeStats: &querypb.RealtimeStats{}, + }) +} + +//---------------------------------------------- +// fakeTMClient + +type fakeTMClient struct { + tmclient.TabletManagerClient + sourceKeyspace string + sourceShards []string + tablets map[int]*fakeTabletConn + schema *tabletmanagerdatapb.SchemaDefinition + vreQueries map[int]map[string]*querypb.QueryResult +} + +func newFakeTMClient() *fakeTMClient { + return &fakeTMClient{ + tablets: make(map[int]*fakeTabletConn), + vreQueries: make(map[int]map[string]*querypb.QueryResult), + schema: &tabletmanagerdatapb.SchemaDefinition{}, + } +} + +func (tmc *fakeTMClient) GetSchema(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.GetSchemaRequest) (*tabletmanagerdatapb.SchemaDefinition, error) { + return tmc.schema, nil +} + +func (tmc *fakeTMClient) SetSchema(schema *tabletmanagerdatapb.SchemaDefinition) { + tmc.schema = schema +} + +func (tmc *fakeTMClient) ExecuteFetchAsApp(ctx context.Context, tablet *topodatapb.Tablet, usePool bool, req *tabletmanagerdatapb.ExecuteFetchAsAppRequest) (*querypb.QueryResult, error) { + // Reuse VReplicationExec + return tmc.VReplicationExec(ctx, tablet, string(req.Query)) +} + +func (tmc *fakeTMClient) ExecuteFetchAsDba(ctx context.Context, tablet *topodatapb.Tablet, usePool bool, req *tabletmanagerdatapb.ExecuteFetchAsDbaRequest) (*querypb.QueryResult, error) { + // Reuse VReplicationExec + return tmc.VReplicationExec(ctx, tablet, string(req.Query)) +} + +// setVReplicationExecResults allows you to specify VReplicationExec queries +// and their results. You can specify exact strings or strings prefixed with +// a '/', in which case they will be treated as a valid regexp. +func (tmc *fakeTMClient) setVReplicationExecResults(tablet *topodatapb.Tablet, query string, result *sqltypes.Result) { + queries, ok := tmc.vreQueries[int(tablet.Alias.Uid)] + if !ok { + queries = make(map[string]*querypb.QueryResult) + tmc.vreQueries[int(tablet.Alias.Uid)] = queries + } + queries[query] = sqltypes.ResultToProto3(result) +} + +func (tmc *fakeTMClient) VReplicationExec(ctx context.Context, tablet *topodatapb.Tablet, query string) (*querypb.QueryResult, error) { + if result, ok := tmc.vreQueries[int(tablet.Alias.Uid)][query]; ok { + return result, nil + } + for qry, res := range tmc.vreQueries[int(tablet.Alias.Uid)] { + if strings.HasPrefix(qry, "/") { + re := regexp.MustCompile(qry) + if re.MatchString(qry) { + return res, nil + } + } + } + return nil, fmt.Errorf("query %q not found for tablet %d", query, tablet.Alias.Uid) +} + +func (tmc *fakeTMClient) CreateVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.CreateVReplicationWorkflowRequest) (*tabletmanagerdatapb.CreateVReplicationWorkflowResponse, error) { + return tmc.tablets[int(tablet.Alias.Uid)].tm.CreateVReplicationWorkflow(ctx, req) +} + +func (tmc *fakeTMClient) ReadVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.ReadVReplicationWorkflowRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowResponse, error) { + resp := &tabletmanagerdatapb.ReadVReplicationWorkflowResponse{ + Workflow: req.Workflow, + WorkflowSubType: binlogdatapb.VReplicationWorkflowSubType_None, + WorkflowType: binlogdatapb.VReplicationWorkflowType_MoveTables, + TabletTypes: []topodatapb.TabletType{topodatapb.TabletType_PRIMARY}, + Streams: make([]*tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream, len(tmc.sourceShards)), + } + rules := make([]*binlogdatapb.Rule, len(defaultSchema.TableDefinitions)) + for i, table := range defaultSchema.TableDefinitions { + rules[i] = &binlogdatapb.Rule{ + Match: table.Name, + Filter: tablet.Shard, + } + } + for i, shard := range tmc.sourceShards { + resp.Streams[i] = &tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream{ + Id: int32(i + 1), + Bls: &binlogdatapb.BinlogSource{ + Keyspace: tmc.sourceKeyspace, + Shard: shard, + Filter: &binlogdatapb.Filter{ + Rules: rules, + }, + }, + } + } + + return resp, nil +} + +func (tmc *fakeTMClient) PrimaryPosition(ctx context.Context, tablet *topodatapb.Tablet) (string, error) { + return fmt.Sprintf("%s/%s", gtidFlavor, gtidPosition), nil +} + +func (tmc *fakeTMClient) VReplicationWaitForPos(ctx context.Context, tablet *topodatapb.Tablet, id int32, pos string) error { + return nil +} + +func (tmc *fakeTMClient) ExecuteFetchAsAllPrivs(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.ExecuteFetchAsAllPrivsRequest) (*querypb.QueryResult, error) { + return &querypb.QueryResult{ + RowsAffected: 1, + }, nil +} + +func (tmc *fakeTMClient) VDiff(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.VDiffRequest) (*tabletmanagerdatapb.VDiffResponse, error) { + return &tabletmanagerdatapb.VDiffResponse{ + Id: 1, + VdiffUuid: req.VdiffUuid, + Output: &querypb.QueryResult{ + RowsAffected: 1, + }, + }, nil +} diff --git a/go/vt/vttablet/tabletmanager/restore.go b/go/vt/vttablet/tabletmanager/restore.go index 7822b361e62..4512b546f2c 100644 --- a/go/vt/vttablet/tabletmanager/restore.go +++ b/go/vt/vttablet/tabletmanager/restore.go @@ -24,6 +24,9 @@ import ( "github.com/spf13/pflag" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/protoutil" + "vitess.io/vitess/go/stats" "vitess.io/vitess/go/mysql" @@ -68,7 +71,18 @@ func registerRestoreFlags(fs *pflag.FlagSet) { } var ( - // Flags for PITR + // Flags for incremental restore (PITR) - new iteration + restoreToTimestampStr string + restoreToPos string +) + +func registerIncrementalRestoreFlags(fs *pflag.FlagSet) { + fs.StringVar(&restoreToTimestampStr, "restore-to-timestamp", restoreToTimestampStr, "(init incremental restore parameter) if set, run a point in time recovery that restores up to the given timestamp, if possible. Given timestamp in RFC3339 format. Example: '2006-01-02T15:04:05Z07:00'") + fs.StringVar(&restoreToPos, "restore-to-pos", restoreToPos, "(init incremental restore parameter) if set, run a point in time recovery that ends with the given position. This will attempt to use one full backup followed by zero or more incremental backups") +} + +var ( + // Flags for PITR - old iteration binlogHost string binlogPort int binlogUser string @@ -96,6 +110,9 @@ func init() { servenv.OnParseFor("vtcombo", registerRestoreFlags) servenv.OnParseFor("vttablet", registerRestoreFlags) + servenv.OnParseFor("vtcombo", registerIncrementalRestoreFlags) + servenv.OnParseFor("vttablet", registerIncrementalRestoreFlags) + servenv.OnParseFor("vtcombo", registerPointInTimeRestoreFlags) servenv.OnParseFor("vttablet", registerPointInTimeRestoreFlags) @@ -107,7 +124,14 @@ func init() { // It will either work, fail gracefully, or return // an error in case of a non-recoverable error. // It takes the action lock so no RPC interferes. -func (tm *TabletManager) RestoreData(ctx context.Context, logger logutil.Logger, waitForBackupInterval time.Duration, deleteBeforeRestore bool, backupTime time.Time) error { +func (tm *TabletManager) RestoreData( + ctx context.Context, + logger logutil.Logger, + waitForBackupInterval time.Duration, + deleteBeforeRestore bool, + backupTime time.Time, + restoreToTimetamp time.Time, + restoreToPos string) error { if err := tm.lock(ctx); err != nil { return err } @@ -152,7 +176,9 @@ func (tm *TabletManager) RestoreData(ctx context.Context, logger logutil.Logger, startTime = time.Now() req := &tabletmanagerdatapb.RestoreFromBackupRequest{ - BackupTime: logutil.TimeToProto(backupTime), + BackupTime: protoutil.TimeToProto(backupTime), + RestoreToPos: restoreToPos, + RestoreToTimestamp: protoutil.TimeToProto(restoreToTimetamp), } err = tm.restoreDataLocked(ctx, logger, waitForBackupInterval, deleteBeforeRestore, req) if err != nil { @@ -182,12 +208,12 @@ func (tm *TabletManager) restoreDataLocked(ctx context.Context, logger logutil.L return vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, fmt.Sprintf("snapshot keyspace %v has no base_keyspace set", tablet.Keyspace)) } keyspace = keyspaceInfo.BaseKeyspace - log.Infof("Using base_keyspace %v to restore keyspace %v using a backup time of %v", keyspace, tablet.Keyspace, logutil.ProtoToTime(request.BackupTime)) + log.Infof("Using base_keyspace %v to restore keyspace %v using a backup time of %v", keyspace, tablet.Keyspace, protoutil.TimeFromProto(request.BackupTime).UTC()) } - startTime := logutil.ProtoToTime(request.BackupTime) + startTime := protoutil.TimeFromProto(request.BackupTime).UTC() if startTime.IsZero() { - startTime = logutil.ProtoToTime(keyspaceInfo.SnapshotTime) + startTime = protoutil.TimeFromProto(keyspaceInfo.SnapshotTime).UTC() } params := mysqlctl.RestoreParams{ @@ -204,13 +230,21 @@ func (tm *TabletManager) restoreDataLocked(ctx context.Context, logger logutil.L DryRun: request.DryRun, Stats: backupstats.RestoreStats(), } + restoreToTimestamp := protoutil.TimeFromProto(request.RestoreToTimestamp).UTC() + if request.RestoreToPos != "" && !restoreToTimestamp.IsZero() { + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "--restore-to-pos and --restore-to-timestamp are mutually exclusive") + } if request.RestoreToPos != "" { - pos, err := mysql.DecodePosition(request.RestoreToPos) + pos, err := replication.DecodePosition(request.RestoreToPos) if err != nil { - return vterrors.Wrapf(err, "restore failed: unable to decode --restore_to_pos: %s", request.RestoreToPos) + return vterrors.Wrapf(err, "restore failed: unable to decode --restore-to-pos: %s", request.RestoreToPos) } params.RestoreToPos = pos } + if !restoreToTimestamp.IsZero() { + // Restore to given timestamp + params.RestoreToTimestamp = restoreToTimestamp + } params.Logger.Infof("Restore: original tablet type=%v", originalType) // Check whether we're going to restore before changing to RESTORE type, @@ -236,7 +270,7 @@ func (tm *TabletManager) restoreDataLocked(ctx context.Context, logger logutil.L for { backupManifest, err = mysqlctl.Restore(ctx, params) if backupManifest != nil { - statsRestoreBackupPosition.Set(mysql.EncodePosition(backupManifest.Position)) + statsRestoreBackupPosition.Set(replication.EncodePosition(backupManifest.Position)) statsRestoreBackupTime.Set(backupManifest.BackupTime) } params.Logger.Infof("Restore: got a restore manifest: %v, err=%v, waitForBackupInterval=%v", backupManifest, err, waitForBackupInterval) @@ -256,10 +290,10 @@ func (tm *TabletManager) restoreDataLocked(ctx context.Context, logger logutil.L } } - var pos mysql.Position + var pos replication.Position if backupManifest != nil { pos = backupManifest.Position - params.Logger.Infof("Restore: pos=%v", mysql.EncodePosition(pos)) + params.Logger.Infof("Restore: pos=%v", replication.EncodePosition(pos)) } // If SnapshotTime is set , then apply the incremental change if keyspaceInfo.SnapshotTime != nil { @@ -329,7 +363,7 @@ func (tm *TabletManager) restoreDataLocked(ctx context.Context, logger logutil.L // restoreToTimeFromBinlog restores to the snapshot time of the keyspace // currently this works with mysql based database only (as it uses mysql specific queries for restoring) -func (tm *TabletManager) restoreToTimeFromBinlog(ctx context.Context, pos mysql.Position, restoreTime *vttime.Time) error { +func (tm *TabletManager) restoreToTimeFromBinlog(ctx context.Context, pos replication.Position, restoreTime *vttime.Time) error { // validate the minimal settings necessary for connecting to binlog server if binlogHost == "" || binlogPort <= 0 || binlogUser == "" { log.Warning("invalid binlog server setting, restoring to last available backup.") @@ -369,7 +403,7 @@ func (tm *TabletManager) restoreToTimeFromBinlog(ctx context.Context, pos mysql. // beforePos is the GTID of the last event before restoreTime. This is the GTID upto which replication will be applied // afterPos can be used directly in the query `START SLAVE UNTIL SQL_BEFORE_GTIDS = ”` // beforePos will be used to check if replication was able to catch up from the binlog server -func (tm *TabletManager) getGTIDFromTimestamp(ctx context.Context, pos mysql.Position, restoreTime int64) (afterPos string, beforePos string, err error) { +func (tm *TabletManager) getGTIDFromTimestamp(ctx context.Context, pos replication.Position, restoreTime int64) (afterPos string, beforePos string, err error) { connParams := &mysql.ConnParams{ Host: binlogHost, Port: binlogPort, @@ -412,11 +446,11 @@ func (tm *TabletManager) getGTIDFromTimestamp(ctx context.Context, pos mysql.Pos gtidsChan := make(chan []string, 1) go func() { - err := vsClient.VStream(ctx, mysql.EncodePosition(pos), filter, func(events []*binlogdatapb.VEvent) error { + err := vsClient.VStream(ctx, replication.EncodePosition(pos), filter, func(events []*binlogdatapb.VEvent) error { for _, event := range events { if event.Gtid != "" { // check if we reached the lastPos then return - eventPos, err := mysql.DecodePosition(event.Gtid) + eventPos, err := replication.DecodePosition(event.Gtid) if err != nil { return err } @@ -459,14 +493,14 @@ func (tm *TabletManager) getGTIDFromTimestamp(ctx context.Context, pos mysql.Pos func (tm *TabletManager) catchupToGTID(ctx context.Context, afterGTIDPos string, beforeGTIDPos string) error { var afterGTIDStr string if afterGTIDPos != "" { - afterGTIDParsed, err := mysql.DecodePosition(afterGTIDPos) + afterGTIDParsed, err := replication.DecodePosition(afterGTIDPos) if err != nil { return err } afterGTIDStr = afterGTIDParsed.GTIDSet.Last() } - beforeGTIDPosParsed, err := mysql.DecodePosition(beforeGTIDPos) + beforeGTIDPosParsed, err := replication.DecodePosition(beforeGTIDPos) if err != nil { return err } @@ -565,7 +599,7 @@ func (tm *TabletManager) disableReplication(ctx context.Context) error { return nil } -func (tm *TabletManager) startReplication(ctx context.Context, pos mysql.Position, tabletType topodatapb.TabletType) error { +func (tm *TabletManager) startReplication(ctx context.Context, pos replication.Position, tabletType topodatapb.TabletType) error { cmds := []string{ "STOP SLAVE", "RESET SLAVE ALL", // "ALL" makes it forget primary host:port. @@ -605,7 +639,7 @@ func (tm *TabletManager) startReplication(ctx context.Context, pos mysql.Positio log.Warningf("Can't get primary replication position after restore: %v", err) return nil } - primaryPos, err := mysql.DecodePosition(posStr) + primaryPos, err := replication.DecodePosition(posStr) if err != nil { return vterrors.Wrapf(err, "can't decode primary replication position: %q", posStr) } @@ -629,17 +663,3 @@ func (tm *TabletManager) startReplication(ctx context.Context, pos mysql.Positio return nil } - -func (tm *TabletManager) getLocalMetadataValues(tabletType topodatapb.TabletType) map[string]string { - tablet := tm.Tablet() - values := map[string]string{ - "Alias": topoproto.TabletAliasString(tablet.Alias), - "ClusterAlias": fmt.Sprintf("%s.%s", tablet.Keyspace, tablet.Shard), - "DataCenter": tablet.Alias.Cell, - "PromotionRule": "must_not", - } - if isPrimaryEligible(tabletType) { - values["PromotionRule"] = "neutral" - } - return values -} diff --git a/go/vt/vttablet/tabletmanager/rpc_agent.go b/go/vt/vttablet/tabletmanager/rpc_agent.go index a3fb4ba6be7..06c0e5cda94 100644 --- a/go/vt/vttablet/tabletmanager/rpc_agent.go +++ b/go/vt/vttablet/tabletmanager/rpc_agent.go @@ -63,6 +63,8 @@ type RPCTM interface { ApplySchema(ctx context.Context, change *tmutils.SchemaChange) (*tabletmanagerdatapb.SchemaChangeResult, error) + ResetSequences(ctx context.Context, tables []string) error + LockTables(ctx context.Context) error UnlockTables(ctx context.Context) error @@ -97,9 +99,12 @@ type RPCTM interface { WaitForPosition(ctx context.Context, pos string) error // VReplication API + CreateVReplicationWorkflow(ctx context.Context, req *tabletmanagerdatapb.CreateVReplicationWorkflowRequest) (*tabletmanagerdatapb.CreateVReplicationWorkflowResponse, error) + DeleteVReplicationWorkflow(ctx context.Context, req *tabletmanagerdatapb.DeleteVReplicationWorkflowRequest) (*tabletmanagerdatapb.DeleteVReplicationWorkflowResponse, error) + ReadVReplicationWorkflow(ctx context.Context, req *tabletmanagerdatapb.ReadVReplicationWorkflowRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowResponse, error) VReplicationExec(ctx context.Context, query string) (*querypb.QueryResult, error) VReplicationWaitForPos(ctx context.Context, id int32, pos string) error - UpdateVRWorkflow(ctx context.Context, req *tabletmanagerdatapb.UpdateVRWorkflowRequest) (*tabletmanagerdatapb.UpdateVRWorkflowResponse, error) + UpdateVReplicationWorkflow(ctx context.Context, req *tabletmanagerdatapb.UpdateVReplicationWorkflowRequest) (*tabletmanagerdatapb.UpdateVReplicationWorkflowResponse, error) // VDiff API VDiff(ctx context.Context, req *tabletmanagerdatapb.VDiffRequest) (*tabletmanagerdatapb.VDiffResponse, error) @@ -139,4 +144,7 @@ type RPCTM interface { // HandleRPCPanic is to be called in a defer statement in each // RPC input point. HandleRPCPanic(ctx context.Context, name string, args, reply any, verbose bool, err *error) + + // Throttler + CheckThrottler(ctx context.Context, request *tabletmanagerdatapb.CheckThrottlerRequest) (*tabletmanagerdatapb.CheckThrottlerResponse, error) } diff --git a/go/vt/vttablet/tabletmanager/rpc_backup.go b/go/vt/vttablet/tabletmanager/rpc_backup.go index eb365843e65..b3d2e2794f6 100644 --- a/go/vt/vttablet/tabletmanager/rpc_backup.go +++ b/go/vt/vttablet/tabletmanager/rpc_backup.go @@ -161,6 +161,7 @@ func (tm *TabletManager) Backup(ctx context.Context, logger logutil.Logger, req TabletAlias: topoproto.TabletAliasString(tablet.Alias), BackupTime: time.Now(), Stats: backupstats.BackupStats(), + UpgradeSafe: req.UpgradeSafe, } returnErr := mysqlctl.Backup(ctx, backupParams) diff --git a/go/vt/vttablet/tabletmanager/rpc_query.go b/go/vt/vttablet/tabletmanager/rpc_query.go index 37ab3367009..0d21cee7677 100644 --- a/go/vt/vttablet/tabletmanager/rpc_query.go +++ b/go/vt/vttablet/tabletmanager/rpc_query.go @@ -19,10 +19,10 @@ package tabletmanager import ( "context" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/sqlparser" querypb "vitess.io/vitess/go/vt/proto/query" @@ -66,7 +66,7 @@ func (tm *TabletManager) ExecuteFetchAsDba(ctx context.Context, req *tabletmanag } // Replace any provided sidecar database qualifiers with the correct one. - uq, err := sqlparser.ReplaceTableQualifiers(string(req.Query), sidecardb.DefaultName, sidecardb.GetName()) + uq, err := sqlparser.ReplaceTableQualifiers(string(req.Query), sidecar.DefaultName, sidecar.GetName()) if err != nil { return nil, err } @@ -107,7 +107,7 @@ func (tm *TabletManager) ExecuteFetchAsAllPrivs(ctx context.Context, req *tablet } // Replace any provided sidecar database qualifiers with the correct one. - uq, err := sqlparser.ReplaceTableQualifiers(string(req.Query), sidecardb.DefaultName, sidecardb.GetName()) + uq, err := sqlparser.ReplaceTableQualifiers(string(req.Query), sidecar.DefaultName, sidecar.GetName()) if err != nil { return nil, err } @@ -131,7 +131,7 @@ func (tm *TabletManager) ExecuteFetchAsApp(ctx context.Context, req *tabletmanag } defer conn.Recycle() // Replace any provided sidecar database qualifiers with the correct one. - uq, err := sqlparser.ReplaceTableQualifiers(string(req.Query), sidecardb.DefaultName, sidecardb.GetName()) + uq, err := sqlparser.ReplaceTableQualifiers(string(req.Query), sidecar.DefaultName, sidecar.GetName()) if err != nil { return nil, err } @@ -145,7 +145,7 @@ func (tm *TabletManager) ExecuteQuery(ctx context.Context, req *tabletmanagerdat tablet := tm.Tablet() target := &querypb.Target{Keyspace: tablet.Keyspace, Shard: tablet.Shard, TabletType: tablet.Type} // Replace any provided sidecar database qualifiers with the correct one. - uq, err := sqlparser.ReplaceTableQualifiers(string(req.Query), sidecardb.DefaultName, sidecardb.GetName()) + uq, err := sqlparser.ReplaceTableQualifiers(string(req.Query), sidecar.DefaultName, sidecar.GetName()) if err != nil { return nil, err } diff --git a/go/vt/vttablet/tabletmanager/rpc_replication.go b/go/vt/vttablet/tabletmanager/rpc_replication.go index 62cf93e6247..9981219e4a2 100644 --- a/go/vt/vttablet/tabletmanager/rpc_replication.go +++ b/go/vt/vttablet/tabletmanager/rpc_replication.go @@ -22,14 +22,14 @@ import ( "strings" "time" - "github.com/spf13/pflag" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vterrors" @@ -37,27 +37,13 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) -var disableReplicationManager bool - -func registerReplicationFlags(fs *pflag.FlagSet) { - fs.Bool("use_super_read_only", true, "Set super_read_only flag when performing planned failover.") - fs.MarkDeprecated("use_super_read_only", "From v17 onwards MySQL server will always try to start with super_read_only=ON") - fs.BoolVar(&disableReplicationManager, "disable-replication-manager", disableReplicationManager, "Disable replication manager to prevent replication repairs.") - fs.MarkDeprecated("disable-replication-manager", "Replication manager is deleted") -} - -func init() { - servenv.OnParseFor("vtcombo", registerReplicationFlags) - servenv.OnParseFor("vttablet", registerReplicationFlags) -} - // ReplicationStatus returns the replication status func (tm *TabletManager) ReplicationStatus(ctx context.Context) (*replicationdatapb.Status, error) { status, err := tm.MysqlDaemon.ReplicationStatus() if err != nil { return nil, err } - return mysql.ReplicationStatusToProto(status), nil + return replication.ReplicationStatusToProto(status), nil } // FullStatus returns the full status of MySQL including the replication information, semi-sync information, GTID information among others @@ -81,7 +67,7 @@ func (tm *TabletManager) FullStatus(ctx context.Context) (*replicationdatapb.Ful return nil, err } if err == nil { - replicationStatusProto = mysql.ReplicationStatusToProto(replicationStatus) + replicationStatusProto = replication.ReplicationStatusToProto(replicationStatus) } // Primary status - "SHOW MASTER STATUS" @@ -91,7 +77,7 @@ func (tm *TabletManager) FullStatus(ctx context.Context) (*replicationdatapb.Ful return nil, err } if err == nil { - primaryStatusProto = mysql.PrimaryStatusToProto(primaryStatus) + primaryStatusProto = replication.PrimaryStatusToProto(primaryStatus) } // Purged GTID set @@ -101,10 +87,21 @@ func (tm *TabletManager) FullStatus(ctx context.Context) (*replicationdatapb.Ful } // Version string "majorVersion.minorVersion.patchRelease" - version := tm.MysqlDaemon.GetVersionString(ctx) + version, err := tm.MysqlDaemon.GetVersionString(ctx) + if err != nil { + return nil, err + } + _, v, err := mysqlctl.ParseVersionString(version) + if err != nil { + return nil, err + } + version = fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch) // Version comment "select @@global.version_comment" - versionComment := tm.MysqlDaemon.GetVersionComment(ctx) + versionComment, err := tm.MysqlDaemon.GetVersionComment(ctx) + if err != nil { + return nil, err + } // Read only - "SHOW VARIABLES LIKE 'read_only'" readOnly, err := tm.MysqlDaemon.IsReadOnly() @@ -147,7 +144,7 @@ func (tm *TabletManager) FullStatus(ctx context.Context) (*replicationdatapb.Ful ServerUuid: serverUUID, ReplicationStatus: replicationStatusProto, PrimaryStatus: primaryStatusProto, - GtidPurged: mysql.EncodePosition(purgedGTIDs), + GtidPurged: replication.EncodePosition(purgedGTIDs), Version: version, VersionComment: versionComment, ReadOnly: readOnly, @@ -173,7 +170,7 @@ func (tm *TabletManager) PrimaryStatus(ctx context.Context) (*replicationdatapb. if err != nil { return nil, err } - return mysql.PrimaryStatusToProto(status), nil + return replication.PrimaryStatusToProto(status), nil } // PrimaryPosition returns the position of a primary database @@ -182,13 +179,13 @@ func (tm *TabletManager) PrimaryPosition(ctx context.Context) (string, error) { if err != nil { return "", err } - return mysql.EncodePosition(pos), nil + return replication.EncodePosition(pos), nil } // WaitForPosition waits until replication reaches the desired position func (tm *TabletManager) WaitForPosition(ctx context.Context, pos string) error { log.Infof("WaitForPosition: %v", pos) - mpos, err := mysql.DecodePosition(pos) + mpos, err := replication.DecodePosition(pos) if err != nil { return err } @@ -225,7 +222,7 @@ func (tm *TabletManager) StopReplicationMinimum(ctx context.Context, position st } defer tm.unlock() - pos, err := mysql.DecodePosition(position) + pos, err := replication.DecodePosition(position) if err != nil { return "", err } @@ -241,7 +238,7 @@ func (tm *TabletManager) StopReplicationMinimum(ctx context.Context, position st if err != nil { return "", err } - return mysql.EncodePosition(pos), nil + return replication.EncodePosition(pos), nil } // StartReplication will start the mysql. Works both when Vitess manages @@ -276,7 +273,7 @@ func (tm *TabletManager) StartReplicationUntilAfter(ctx context.Context, positio waitCtx, cancel := context.WithTimeout(ctx, waitTime) defer cancel() - pos, err := mysql.DecodePosition(position) + pos, err := replication.DecodePosition(position) if err != nil { return err } @@ -311,7 +308,7 @@ func (tm *TabletManager) InitPrimary(ctx context.Context, semiSync bool) (string // Setting super_read_only `OFF` so that we can run the DDL commands if _, err := tm.MysqlDaemon.SetSuperReadOnly(false); err != nil { - if sqlErr, ok := err.(*mysql.SQLError); ok && sqlErr.Number() == mysql.ERUnknownSystemVariable { + if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Number() == sqlerror.ERUnknownSystemVariable { log.Warningf("server does not know about super_read_only, continuing anyway...") } else { return "", err @@ -348,14 +345,14 @@ func (tm *TabletManager) InitPrimary(ctx context.Context, semiSync bool) (string return "", err } - return mysql.EncodePosition(pos), nil + return replication.EncodePosition(pos), nil } // PopulateReparentJournal adds an entry into the reparent_journal table. func (tm *TabletManager) PopulateReparentJournal(ctx context.Context, timeCreatedNS int64, actionName string, primaryAlias *topodatapb.TabletAlias, position string) error { log.Infof("PopulateReparentJournal: action: %v parent: %v position: %v timeCreatedNS: %d actionName: %s primaryAlias: %s", actionName, primaryAlias, position, timeCreatedNS, actionName, primaryAlias) - pos, err := mysql.DecodePosition(position) + pos, err := replication.DecodePosition(position) if err != nil { return err } @@ -388,7 +385,7 @@ func (tm *TabletManager) InitReplica(ctx context.Context, parent *topodatapb.Tab } } - pos, err := mysql.DecodePosition(position) + pos, err := replication.DecodePosition(position) if err != nil { return err } @@ -470,12 +467,12 @@ func (tm *TabletManager) demotePrimary(ctx context.Context, revertPartialFailure // considered successful. If we are already not serving, this will be // idempotent. log.Infof("DemotePrimary disabling query service") - if err := tm.QueryServiceControl.SetServingType(tablet.Type, logutil.ProtoToTime(tablet.PrimaryTermStartTime), false, "demotion in progress"); err != nil { + if err := tm.QueryServiceControl.SetServingType(tablet.Type, protoutil.TimeFromProto(tablet.PrimaryTermStartTime).UTC(), false, "demotion in progress"); err != nil { return nil, vterrors.Wrap(err, "SetServingType(serving=false) failed") } defer func() { if finalErr != nil && revertPartialFailure && wasServing { - if err := tm.QueryServiceControl.SetServingType(tablet.Type, logutil.ProtoToTime(tablet.PrimaryTermStartTime), true, ""); err != nil { + if err := tm.QueryServiceControl.SetServingType(tablet.Type, protoutil.TimeFromProto(tablet.PrimaryTermStartTime).UTC(), true, ""); err != nil { log.Warningf("SetServingType(serving=true) failed during revert: %v", err) } } @@ -487,7 +484,7 @@ func (tm *TabletManager) demotePrimary(ctx context.Context, revertPartialFailure // previous demotion, or because we are not primary anyway, this should be // idempotent. if _, err := tm.MysqlDaemon.SetSuperReadOnly(true); err != nil { - if sqlErr, ok := err.(*mysql.SQLError); ok && sqlErr.Number() == mysql.ERUnknownSystemVariable { + if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Number() == sqlerror.ERUnknownSystemVariable { log.Warningf("server does not know about super_read_only, continuing anyway...") } else { return nil, err @@ -525,7 +522,7 @@ func (tm *TabletManager) demotePrimary(ctx context.Context, revertPartialFailure if err != nil { return nil, err } - return mysql.PrimaryStatusToProto(status), nil + return replication.PrimaryStatusToProto(status), nil } // UndoDemotePrimary reverts a previous call to DemotePrimary @@ -556,7 +553,7 @@ func (tm *TabletManager) UndoDemotePrimary(ctx context.Context, semiSync bool) e // Update serving graph tablet := tm.Tablet() log.Infof("UndoDemotePrimary re-enabling query service") - if err := tm.QueryServiceControl.SetServingType(tablet.Type, logutil.ProtoToTime(tablet.PrimaryTermStartTime), true, ""); err != nil { + if err := tm.QueryServiceControl.SetServingType(tablet.Type, protoutil.TimeFromProto(tablet.PrimaryTermStartTime).UTC(), true, ""); err != nil { return vterrors.Wrap(err, "SetServingType(serving=true) failed") } return nil @@ -611,22 +608,6 @@ func (tm *TabletManager) SetReplicationSource(ctx context.Context, parentAlias * return tm.setReplicationSourceLocked(ctx, parentAlias, timeCreatedNS, waitPosition, forceStartReplication, semiSyncAction) } -func (tm *TabletManager) setReplicationSourceRepairReplication(ctx context.Context, parentAlias *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool) (err error) { - parent, err := tm.TopoServer.GetTablet(ctx, parentAlias) - if err != nil { - return err - } - - ctx, unlock, lockErr := tm.TopoServer.LockShard(ctx, parent.Tablet.GetKeyspace(), parent.Tablet.GetShard(), fmt.Sprintf("repairReplication to %v as parent)", topoproto.TabletAliasString(parentAlias))) - if lockErr != nil { - return lockErr - } - - defer unlock(&err) - - return tm.setReplicationSourceLocked(ctx, parentAlias, timeCreatedNS, waitPosition, forceStartReplication, SemiSyncActionNone) -} - func (tm *TabletManager) setReplicationSourceSemiSyncNoAction(ctx context.Context, parentAlias *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool) error { log.Infof("SetReplicationSource: parent: %v position: %v force: %v", parentAlias, waitPosition, forceStartReplication) if err := tm.lock(ctx); err != nil { @@ -663,7 +644,7 @@ func (tm *TabletManager) setReplicationSourceLocked(ctx context.Context, parentA shouldbeReplicating = true // Since we continue in the case of this error, make sure 'status' is // in a known, empty state. - status = mysql.ReplicationStatus{} + status = replication.ReplicationStatus{} } else if err != nil { // Abort on any other non-nil error. return err @@ -730,7 +711,7 @@ func (tm *TabletManager) setReplicationSourceLocked(ctx context.Context, parentA if shouldbeReplicating { log.Infof("Set up MySQL replication; should now be replicating from %s at %s", parentAlias, waitPosition) if waitPosition != "" { - pos, err := mysql.DecodePosition(waitPosition) + pos, err := replication.DecodePosition(waitPosition) if err != nil { return err } @@ -781,7 +762,7 @@ func (tm *TabletManager) StopReplicationAndGetStatus(ctx context.Context, stopRe if err != nil { return StopReplicationAndGetStatusResponse{}, vterrors.Wrap(err, "before status failed") } - before := mysql.ReplicationStatusToProto(rs) + before := replication.ReplicationStatusToProto(rs) if stopReplicationMode == replicationdatapb.StopReplicationMode_IOTHREADONLY { if !rs.IOHealthy() { @@ -827,7 +808,7 @@ func (tm *TabletManager) StopReplicationAndGetStatus(ctx context.Context, stopRe }, }, vterrors.Wrap(err, "acquiring replication status failed") } - after := mysql.ReplicationStatusToProto(rsAfter) + after := replication.ReplicationStatusToProto(rsAfter) rs.Position = rsAfter.Position rs.RelayLogPosition = rsAfter.RelayLogPosition @@ -875,7 +856,7 @@ func (tm *TabletManager) PromoteReplica(ctx context.Context, semiSync bool) (str if err := tm.changeTypeLocked(ctx, topodatapb.TabletType_PRIMARY, DBActionSetReadWrite, SemiSyncActionNone); err != nil { return "", err } - return mysql.EncodePosition(pos), nil + return replication.EncodePosition(pos), nil } func isPrimaryEligible(tabletType topodatapb.TabletType) bool { @@ -937,7 +918,7 @@ func (tm *TabletManager) fixSemiSyncAndReplication(tabletType topodatapb.TabletT return nil } - //shouldAck := semiSync == SemiSyncActionSet + // shouldAck := semiSync == SemiSyncActionSet shouldAck := isPrimaryEligible(tabletType) acking, err := tm.MysqlDaemon.SemiSyncReplicationStatus() if err != nil { @@ -977,26 +958,3 @@ func (tm *TabletManager) handleRelayLogError(err error) error { } return err } - -// repairReplication tries to connect this server to whoever is -// the current primary of the shard, and start replicating. -func (tm *TabletManager) repairReplication(ctx context.Context) error { - tablet := tm.Tablet() - - si, err := tm.TopoServer.GetShard(ctx, tablet.Keyspace, tablet.Shard) - if err != nil { - return err - } - if !si.HasPrimary() { - return fmt.Errorf("no primary tablet for shard %v/%v", tablet.Keyspace, tablet.Shard) - } - - if topoproto.TabletAliasEqual(si.PrimaryAlias, tablet.Alias) { - // The shard record says we are primary, but we disagree; we wouldn't - // reach this point unless we were told to check replication. - // Hopefully someone is working on fixing that, but in any case, - // we should not try to reparent to ourselves. - return fmt.Errorf("shard %v/%v record claims tablet %v is primary, but its type is %v", tablet.Keyspace, tablet.Shard, topoproto.TabletAliasString(tablet.Alias), tablet.Type) - } - return tm.setReplicationSourceRepairReplication(ctx, si.PrimaryAlias, 0, "", true) -} diff --git a/go/vt/vttablet/tabletmanager/rpc_schema.go b/go/vt/vttablet/tabletmanager/rpc_schema.go index 791ed42f994..9fe8ce27170 100644 --- a/go/vt/vttablet/tabletmanager/rpc_schema.go +++ b/go/vt/vttablet/tabletmanager/rpc_schema.go @@ -17,11 +17,11 @@ limitations under the License. package tabletmanager import ( + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/vterrors" "context" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/mysqlctl/tmutils" "vitess.io/vitess/go/vt/topo/topoproto" @@ -44,7 +44,7 @@ func (tm *TabletManager) ReloadSchema(ctx context.Context, waitPosition string) } if waitPosition != "" { - pos, err := mysql.DecodePosition(waitPosition) + pos, err := replication.DecodePosition(waitPosition) if err != nil { return vterrors.Wrapf(err, "ReloadSchema: can't parse wait position (%q)", waitPosition) } @@ -58,6 +58,11 @@ func (tm *TabletManager) ReloadSchema(ctx context.Context, waitPosition string) return tm.QueryServiceControl.ReloadSchema(ctx) } +// ResetSequences will reset the auto-inc counters on the specified tables. +func (tm *TabletManager) ResetSequences(ctx context.Context, tables []string) error { + return tm.QueryServiceControl.SchemaEngine().ResetSequences(tables) +} + // PreflightSchema will try out the schema changes in "changes". func (tm *TabletManager) PreflightSchema(ctx context.Context, changes []string) ([]*tabletmanagerdatapb.SchemaChangeResult, error) { if err := tm.lock(ctx); err != nil { diff --git a/go/vt/vttablet/tabletmanager/rpc_throttler.go b/go/vt/vttablet/tabletmanager/rpc_throttler.go new file mode 100644 index 00000000000..dfdc0d230fb --- /dev/null +++ b/go/vt/vttablet/tabletmanager/rpc_throttler.go @@ -0,0 +1,53 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tabletmanager + +import ( + "context" + + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" +) + +// CheckThrottler executes a throttler check +func (tm *TabletManager) CheckThrottler(ctx context.Context, req *tabletmanagerdatapb.CheckThrottlerRequest) (*tabletmanagerdatapb.CheckThrottlerResponse, error) { + if req.AppName == "" { + req.AppName = throttlerapp.VitessName.String() + } + flags := &throttle.CheckFlags{ + LowPriority: false, + SkipRequestHeartbeats: true, + } + checkResult := tm.QueryServiceControl.CheckThrottler(ctx, req.AppName, flags) + if checkResult == nil { + return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "nil checkResult") + } + resp := &tabletmanagerdatapb.CheckThrottlerResponse{ + StatusCode: int32(checkResult.StatusCode), + Value: checkResult.Value, + Threshold: checkResult.Threshold, + Message: checkResult.Message, + RecentlyChecked: checkResult.RecentlyChecked, + } + if checkResult.Error != nil { + resp.Error = checkResult.Error.Error() + } + return resp, nil +} diff --git a/go/vt/vttablet/tabletmanager/rpc_vreplication.go b/go/vt/vttablet/tabletmanager/rpc_vreplication.go index c6f2e8c5f3c..b18caa1063f 100644 --- a/go/vt/vttablet/tabletmanager/rpc_vreplication.go +++ b/go/vt/vttablet/tabletmanager/rpc_vreplication.go @@ -22,56 +22,225 @@ import ( "google.golang.org/protobuf/encoding/prototext" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/textutil" + "vitess.io/vitess/go/vt/discovery" + "vitess.io/vitess/go/vt/proto/vttime" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtctl/workflow" + "vitess.io/vitess/go/vt/vterrors" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" - "vitess.io/vitess/go/vt/sidecardb" - "vitess.io/vitess/go/vt/sqlparser" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) const ( - // Retrieve the current configuration values for a workflow's vreplication stream. - sqlSelectVRWorkflowConfig = "select id, source, cell, tablet_types from %s.vreplication where workflow = %a" + // Create a new VReplication workflow record. + sqlCreateVReplicationWorkflow = "insert into %s.vreplication (workflow, source, pos, max_tps, max_replication_lag, cell, tablet_types, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, defer_secondary_keys) values (%a, %a, '', 0, 0, %a, %a, now(), 0, %a, %a, %a, %a, %a)" + // Read a VReplication workflow. + sqlReadVReplicationWorkflow = "select id, source, pos, stop_pos, max_tps, max_replication_lag, cell, tablet_types, time_updated, transaction_timestamp, state, message, db_name, rows_copied, tags, time_heartbeat, workflow_type, time_throttled, component_throttled, workflow_sub_type, defer_secondary_keys from %s.vreplication where workflow = %a and db_name = %a" + // Delete VReplication records for the given workflow. + sqlDeleteVReplicationWorkflow = "delete from %s.vreplication where workflow = %a and db_name = %a" + // Retrieve the current configuration values for a workflow's vreplication stream(s). + sqlSelectVReplicationWorkflowConfig = "select id, source, cell, tablet_types, state, message from %s.vreplication where workflow = %a" // Update the configuration values for a workflow's vreplication stream. - sqlUpdateVRWorkflowConfig = "update %s.vreplication set source = %a, cell = %a, tablet_types = %a where id = %a" + sqlUpdateVReplicationWorkflowStreamConfig = "update %s.vreplication set state = %a, source = %a, cell = %a, tablet_types = %a where id = %a" ) -// VReplicationExec executes a vreplication command. -func (tm *TabletManager) VReplicationExec(ctx context.Context, query string) (*querypb.QueryResult, error) { - // Replace any provided sidecar databsae qualifiers with the correct one. - uq, err := sqlparser.ReplaceTableQualifiers(query, sidecardb.DefaultName, sidecardb.GetName()) +func (tm *TabletManager) CreateVReplicationWorkflow(ctx context.Context, req *tabletmanagerdatapb.CreateVReplicationWorkflowRequest) (*tabletmanagerdatapb.CreateVReplicationWorkflowResponse, error) { + if req == nil || len(req.BinlogSource) == 0 { + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid request, no binlog source specified") + } + res := &sqltypes.Result{} + for _, bls := range req.BinlogSource { + source, err := prototext.Marshal(bls) + if err != nil { + return nil, err + } + // Use the local cell if none are specified. + if len(req.Cells) == 0 || strings.TrimSpace(req.Cells[0]) == "" { + req.Cells = append(req.Cells, tm.Tablet().Alias.Cell) + } + wfState := binlogdatapb.VReplicationWorkflowState_Stopped.String() + tabletTypesStr := topoproto.MakeStringTypeCSV(req.TabletTypes) + if req.TabletSelectionPreference == tabletmanagerdatapb.TabletSelectionPreference_INORDER { + tabletTypesStr = discovery.InOrderHint + tabletTypesStr + } + bindVars := map[string]*querypb.BindVariable{ + "workflow": sqltypes.StringBindVariable(req.Workflow), + "source": sqltypes.StringBindVariable(string(source)), + "cells": sqltypes.StringBindVariable(strings.Join(req.Cells, ",")), + "tabletTypes": sqltypes.StringBindVariable(tabletTypesStr), + "state": sqltypes.StringBindVariable(wfState), + "dbname": sqltypes.StringBindVariable(tm.DBConfigs.DBName), + "workflowType": sqltypes.Int64BindVariable(int64(req.WorkflowType)), + "workflowSubType": sqltypes.Int64BindVariable(int64(req.WorkflowSubType)), + "deferSecondaryKeys": sqltypes.BoolBindVariable(req.DeferSecondaryKeys), + } + parsed := sqlparser.BuildParsedQuery(sqlCreateVReplicationWorkflow, sidecar.GetIdentifier(), + ":workflow", ":source", ":cells", ":tabletTypes", ":state", ":dbname", ":workflowType", ":workflowSubType", ":deferSecondaryKeys", + ) + stmt, err := parsed.GenerateQuery(bindVars, nil) + if err != nil { + return nil, err + } + streamres, err := tm.VREngine.Exec(stmt) + + if err != nil { + return nil, err + } + res.RowsAffected += streamres.RowsAffected + } + return &tabletmanagerdatapb.CreateVReplicationWorkflowResponse{Result: sqltypes.ResultToProto3(res)}, nil +} + +func (tm *TabletManager) DeleteVReplicationWorkflow(ctx context.Context, req *tabletmanagerdatapb.DeleteVReplicationWorkflowRequest) (*tabletmanagerdatapb.DeleteVReplicationWorkflowResponse, error) { + if req == nil || req.Workflow == "" { + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid request, no workflow provided") + } + res := &sqltypes.Result{} + bindVars := map[string]*querypb.BindVariable{ + "wf": sqltypes.StringBindVariable(req.Workflow), + "db": sqltypes.StringBindVariable(tm.DBConfigs.DBName), + } + parsed := sqlparser.BuildParsedQuery(sqlDeleteVReplicationWorkflow, sidecar.GetIdentifier(), ":wf", ":db") + stmt, err := parsed.GenerateQuery(bindVars, nil) if err != nil { return nil, err } - qr, err := tm.VREngine.ExecWithDBA(uq) + streamres, err := tm.VREngine.Exec(stmt) + if err != nil { return nil, err } - return sqltypes.ResultToProto3(qr), nil + res.RowsAffected += streamres.RowsAffected + + return &tabletmanagerdatapb.DeleteVReplicationWorkflowResponse{Result: sqltypes.ResultToProto3(res)}, nil } -// VReplicationWaitForPos waits for the specified position. -func (tm *TabletManager) VReplicationWaitForPos(ctx context.Context, id int32, pos string) error { - return tm.VREngine.WaitForPos(ctx, id, pos) +func (tm *TabletManager) ReadVReplicationWorkflow(ctx context.Context, req *tabletmanagerdatapb.ReadVReplicationWorkflowRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowResponse, error) { + if req == nil || req.Workflow == "" { + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid request, no workflow provided") + } + bindVars := map[string]*querypb.BindVariable{ + "wf": sqltypes.StringBindVariable(req.Workflow), + "db": sqltypes.StringBindVariable(tm.DBConfigs.DBName), + } + parsed := sqlparser.BuildParsedQuery(sqlReadVReplicationWorkflow, sidecar.GetIdentifier(), ":wf", ":db") + stmt, err := parsed.GenerateQuery(bindVars, nil) + if err != nil { + return nil, err + } + res, err := tm.VREngine.Exec(stmt) + if err != nil { + return nil, err + } + if res == nil || len(res.Rows) == 0 { + return nil, nil + } + rows := res.Named().Rows + resp := &tabletmanagerdatapb.ReadVReplicationWorkflowResponse{Workflow: req.Workflow} + streams := make([]*tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream, len(rows)) + + // First the things that are common to all streams. + resp.Cells = rows[0]["cell"].ToString() + tabletTypes, inorder, err := discovery.ParseTabletTypesAndOrder(rows[0]["tablet_types"].ToString()) + if err != nil { + return nil, vterrors.Wrap(err, "error parsing the tablet_types field from vreplication table record") + } + resp.TabletTypes = tabletTypes + resp.TabletSelectionPreference = tabletmanagerdatapb.TabletSelectionPreference_ANY + if inorder { + resp.TabletSelectionPreference = tabletmanagerdatapb.TabletSelectionPreference_INORDER + } + resp.DbName = rows[0]["db_name"].ToString() + resp.Tags = rows[0]["tags"].ToString() + wft, err := rows[0]["workflow_type"].ToInt32() + if err != nil { + return nil, vterrors.Wrap(err, "error parsing workflow_type field from vreplication table record") + } + resp.WorkflowType = binlogdatapb.VReplicationWorkflowType(wft) + wfst, err := rows[0]["workflow_sub_type"].ToInt32() + if err != nil { + return nil, vterrors.Wrap(err, "error parsing workflow_sub_type field from vreplication table record") + } + resp.WorkflowSubType = binlogdatapb.VReplicationWorkflowSubType(wfst) + resp.DeferSecondaryKeys = rows[0]["defer_secondary_keys"].ToString() == "1" + + // Now the individual streams (there can be more than 1 with shard merges). + for i, row := range rows { + streams[i] = &tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream{} + if streams[i].Id, err = row["id"].ToInt32(); err != nil { + return nil, vterrors.Wrap(err, "error parsing id field from vreplication table record") + } + srcBytes, err := row["source"].ToBytes() + if err != nil { + return nil, vterrors.Wrap(err, "error parsing binlog_source field from vreplication table record") + } + blspb := &binlogdatapb.BinlogSource{} + err = prototext.Unmarshal(srcBytes, blspb) + if err != nil { + return nil, vterrors.Wrap(err, "error unmarshaling binlog_source field from vreplication table record") + } + streams[i].Bls = blspb + streams[i].Pos = row["pos"].ToString() + streams[i].StopPos = row["stop_pos"].ToString() + if streams[i].MaxTps, err = row["max_tps"].ToInt64(); err != nil { + return nil, vterrors.Wrap(err, "error parsing max_tps field from vreplication table record") + } + if streams[i].MaxReplicationLag, err = row["max_replication_lag"].ToInt64(); err != nil { + return nil, vterrors.Wrap(err, "error parsing max_replication_lag field from vreplication table record") + } + timeUpdated, err := row["time_updated"].ToInt64() + if err != nil { + return nil, vterrors.Wrap(err, "error parsing time_updated field from vreplication table record") + } + streams[i].TimeUpdated = &vttime.Time{Seconds: timeUpdated} + txTimestamp, err := row["transaction_timestamp"].ToInt64() + if err != nil { + return nil, vterrors.Wrap(err, "error parsing transaction_timestamp field from vreplication table record") + } + streams[i].TransactionTimestamp = &vttime.Time{Seconds: txTimestamp} + streams[i].State = binlogdatapb.VReplicationWorkflowState(binlogdatapb.VReplicationWorkflowState_value[row["state"].ToString()]) + streams[i].Message = row["message"].ToString() + if streams[i].RowsCopied, err = row["rows_copied"].ToInt64(); err != nil { + return nil, vterrors.Wrap(err, "error parsing rows_copied field from vreplication table record") + } + timeHeartbeat, err := row["time_heartbeat"].ToInt64() + if err != nil { + return nil, vterrors.Wrap(err, "error parsing time_heartbeat field from vreplication table record") + } + streams[i].TimeHeartbeat = &vttime.Time{Seconds: timeHeartbeat} + timeThrottled, err := row["time_throttled"].ToInt64() + if err != nil { + return nil, vterrors.Wrap(err, "error parsing time_throttled field from vreplication table record") + } + streams[i].TimeThrottled = &vttime.Time{Seconds: timeThrottled} + streams[i].ComponentThrottled = row["component_throttled"].ToString() + } + resp.Streams = streams + + return resp, nil } -// UpdateVRWorkflow updates the sidecar databases's vreplication -// record for this tablet's vreplication workflow stream(s). If there -// is no stream for the given workflow on the tablet then a nil result +// UpdateVReplicationWorkflow updates the sidecar databases's vreplication +// record(s) for this tablet's vreplication workflow stream(s). If there +// are no streams for the given workflow on the tablet then a nil result // is returned as this is expected e.g. on source tablets of a // Reshard workflow (source and target are the same keyspace). The // caller can consider this case an error if they choose to. // Note: the VReplication engine creates a new controller for the // workflow stream when the record is updated, so we also in effect // restart the workflow stream via the update. -func (tm *TabletManager) UpdateVRWorkflow(ctx context.Context, req *tabletmanagerdatapb.UpdateVRWorkflowRequest) (*tabletmanagerdatapb.UpdateVRWorkflowResponse, error) { +func (tm *TabletManager) UpdateVReplicationWorkflow(ctx context.Context, req *tabletmanagerdatapb.UpdateVReplicationWorkflowRequest) (*tabletmanagerdatapb.UpdateVReplicationWorkflowResponse, error) { bindVars := map[string]*querypb.BindVariable{ "wf": sqltypes.StringBindVariable(req.Workflow), } - parsed := sqlparser.BuildParsedQuery(sqlSelectVRWorkflowConfig, sidecardb.GetIdentifier(), ":wf") + parsed := sqlparser.BuildParsedQuery(sqlSelectVReplicationWorkflowConfig, sidecar.GetIdentifier(), ":wf") stmt, err := parsed.GenerateQuery(bindVars, nil) if err != nil { return nil, err @@ -85,51 +254,93 @@ func (tm *TabletManager) UpdateVRWorkflow(ctx context.Context, req *tabletmanage // expected e.g. on source tablets for Reshard // workflows. If callers want to treat this // scenario as an error they can. - return &tabletmanagerdatapb.UpdateVRWorkflowResponse{Result: nil}, nil + return &tabletmanagerdatapb.UpdateVReplicationWorkflowResponse{Result: nil}, nil } - row := res.Named().Row() - id := row.AsInt64("id", 0) - cells := strings.Split(row.AsString("cell", ""), ",") - tabletTypes := strings.Split(row.AsString("tablet_types", ""), ",") - bls := &binlogdatapb.BinlogSource{} - source := row.AsBytes("source", []byte{}) - // For the string based values, we use NULL to differentiate - // from an empty string. The NULL value indicates that we - // should keep the existing value. - if !textutil.ValueIsSimulatedNull(req.Cells) { - cells = req.Cells - } - if !textutil.ValueIsSimulatedNull(req.TabletTypes) { - tabletTypes = req.TabletTypes - } - if err = prototext.Unmarshal(source, bls); err != nil { - return nil, err - } - // If we don't want to update the existing value then pass - // the simulated NULL value of -1. - if !textutil.ValueIsSimulatedNull(req.OnDdl) { - bls.OnDdl = req.OnDdl + for _, row := range res.Named().Rows { + id := row.AsInt64("id", 0) + cells := strings.Split(row.AsString("cell", ""), ",") + tabletTypes, inorder, err := discovery.ParseTabletTypesAndOrder(row.AsString("tablet_types", "")) + if err != nil { + return nil, err + } + bls := &binlogdatapb.BinlogSource{} + source := row.AsBytes("source", []byte{}) + state := row.AsString("state", "") + message := row.AsString("message", "") + if req.State == binlogdatapb.VReplicationWorkflowState_Running && strings.ToUpper(message) == workflow.Frozen { + return &tabletmanagerdatapb.UpdateVReplicationWorkflowResponse{Result: nil}, + vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, "cannot start a workflow when it is frozen") + } + // For the string based values, we use NULL to differentiate + // from an empty string. The NULL value indicates that we + // should keep the existing value. + if !textutil.ValueIsSimulatedNull(req.Cells) { + cells = req.Cells + } + if !textutil.ValueIsSimulatedNull(req.TabletTypes) { + tabletTypes = req.TabletTypes + } + tabletTypesStr := topoproto.MakeStringTypeCSV(tabletTypes) + if inorder && req.TabletSelectionPreference == tabletmanagerdatapb.TabletSelectionPreference_UNKNOWN || + req.TabletSelectionPreference == tabletmanagerdatapb.TabletSelectionPreference_INORDER { + tabletTypesStr = discovery.InOrderHint + tabletTypesStr + } + if err = prototext.Unmarshal(source, bls); err != nil { + return nil, err + } + // If we don't want to update the existing value then pass + // the simulated NULL value of -1. + if !textutil.ValueIsSimulatedNull(req.OnDdl) { + bls.OnDdl = req.OnDdl + } + source, err = prototext.Marshal(bls) + if err != nil { + return nil, err + } + if !textutil.ValueIsSimulatedNull(req.State) { + state = binlogdatapb.VReplicationWorkflowState_name[int32(req.State)] + } + bindVars = map[string]*querypb.BindVariable{ + "st": sqltypes.StringBindVariable(state), + "sc": sqltypes.StringBindVariable(string(source)), + "cl": sqltypes.StringBindVariable(strings.Join(cells, ",")), + "tt": sqltypes.StringBindVariable(tabletTypesStr), + "id": sqltypes.Int64BindVariable(id), + } + parsed = sqlparser.BuildParsedQuery(sqlUpdateVReplicationWorkflowStreamConfig, sidecar.GetIdentifier(), ":st", ":sc", ":cl", ":tt", ":id") + stmt, err = parsed.GenerateQuery(bindVars, nil) + if err != nil { + return nil, err + } + res, err = tm.VREngine.Exec(stmt) + if err != nil { + return nil, err + } } - source, err = prototext.Marshal(bls) + + return &tabletmanagerdatapb.UpdateVReplicationWorkflowResponse{ + Result: &querypb.QueryResult{ + RowsAffected: uint64(len(res.Rows)), + }, + }, nil +} + +// VReplicationExec executes a vreplication command. +func (tm *TabletManager) VReplicationExec(ctx context.Context, query string) (*querypb.QueryResult, error) { + // Replace any provided sidecar databsae qualifiers with the correct one. + uq, err := sqlparser.ReplaceTableQualifiers(query, sidecar.DefaultName, sidecar.GetName()) if err != nil { return nil, err } - bindVars = map[string]*querypb.BindVariable{ - "sc": sqltypes.StringBindVariable(string(source)), - "cl": sqltypes.StringBindVariable(strings.Join(cells, ",")), - "tt": sqltypes.StringBindVariable(strings.Join(tabletTypes, ",")), - "id": sqltypes.Int64BindVariable(id), - } - parsed = sqlparser.BuildParsedQuery(sqlUpdateVRWorkflowConfig, sidecardb.GetIdentifier(), ":sc", ":cl", ":tt", ":id") - stmt, err = parsed.GenerateQuery(bindVars, nil) + qr, err := tm.VREngine.ExecWithDBA(uq) if err != nil { return nil, err } - res, err = tm.VREngine.Exec(stmt) + return sqltypes.ResultToProto3(qr), nil +} - if err != nil { - return nil, err - } - return &tabletmanagerdatapb.UpdateVRWorkflowResponse{Result: sqltypes.ResultToProto3(res)}, nil +// VReplicationWaitForPos waits for the specified position. +func (tm *TabletManager) VReplicationWaitForPos(ctx context.Context, id int32, pos string) error { + return tm.VREngine.WaitForPos(ctx, id, pos) } diff --git a/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go b/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go index f7f680f10ad..ef2ac591c2b 100644 --- a/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go +++ b/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go @@ -18,62 +18,452 @@ package tabletmanager import ( "context" + "errors" "fmt" + "math" + "runtime/debug" + "strings" "testing" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/mysql/fakesqldb" + "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/textutil" - "vitess.io/vitess/go/vt/binlog/binlogplayer" - "vitess.io/vitess/go/vt/dbconfigs" - "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vtctl/workflow" + "vitess.io/vitess/go/vt/vtgate/vindexes" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" - "vitess.io/vitess/go/vt/sidecardb" - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/topo/memorytopo" - "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" - "vitess.io/vitess/go/vt/vttablet/tabletservermock" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" + "vitess.io/vitess/go/vt/proto/vttime" +) + +const ( + insertVReplicationPrefix = "insert into _vt.vreplication (workflow, source, pos, max_tps, max_replication_lag, cell, tablet_types, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, defer_secondary_keys)" + getWorkflow = "select id from _vt.vreplication where db_name='vt_%s' and workflow='%s'" + checkForWorkflow = "select 1 from _vt.vreplication where db_name='vt_%s' and workflow='%s'" + checkForFrozenWorkflow = "select 1 from _vt.vreplication where db_name='vt_%s' and message='FROZEN' and workflow_sub_type != 1" + freezeWorkflow = "update _vt.vreplication set message = 'FROZEN' where db_name='vt_%s' and workflow='%s'" + checkForJournal = "/select val from _vt.resharding_journal where id=" + getWorkflowStatus = "select id, workflow, source, pos, stop_pos, max_replication_lag, state, db_name, time_updated, transaction_timestamp, message, tags, workflow_type, workflow_sub_type, time_heartbeat, defer_secondary_keys, component_throttled, time_throttled, rows_copied from _vt.vreplication where workflow = '%s' and db_name = 'vt_%s'" + getWorkflowState = "select pos, stop_pos, max_tps, max_replication_lag, state, workflow_type, workflow, workflow_sub_type, defer_secondary_keys from _vt.vreplication where id=1" + getCopyState = "select distinct table_name from _vt.copy_state cs, _vt.vreplication vr where vr.id = cs.vrepl_id and vr.id = 1" + getNumCopyStateTable = "select count(distinct table_name) from _vt.copy_state where vrepl_id=1" + getLatestCopyState = "select table_name, lastpk from _vt.copy_state where vrepl_id = 1 and id in (select max(id) from _vt.copy_state where vrepl_id = 1 group by vrepl_id, table_name)" + getAutoIncrementStep = "select @@session.auto_increment_increment" + setSessionTZ = "set @@session.time_zone = '+00:00'" + setNames = "set names 'binary'" + getBinlogRowImage = "select @@binlog_row_image" + insertStreamsCreatedLog = "insert into _vt.vreplication_log(vrepl_id, type, state, message) values(1, 'Stream Created', '', '%s'" + getVReplicationRecord = "select * from _vt.vreplication where id = 1" + startWorkflow = "update _vt.vreplication set state='Running' where db_name='vt_%s' and workflow='%s'" + stopForCutover = "update _vt.vreplication set state='Stopped', message='stopped for cutover' where id=1" + getMaxValForSequence = "select max(`id`) as maxval from `vt_%s`.`%s`" + initSequenceTable = "insert into %a.%a (id, next_id, cache) values (0, %d, 1000) on duplicate key update next_id = if(next_id < %d, %d, next_id)" + deleteWorkflow = "delete from _vt.vreplication where db_name = 'vt_%s' and workflow = '%s'" + updatePickedSourceTablet = `update _vt.vreplication set message='Picked source tablet: cell:\"%s\" uid:%d' where id=1` + getRowsCopied = "SELECT rows_copied FROM _vt.vreplication WHERE id=1" +) + +var ( + errShortCircuit = fmt.Errorf("short circuiting test") + defaultSchema = &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "t1", + Columns: []string{"id", "c2"}, + PrimaryKeyColumns: []string{"id"}, + Fields: sqltypes.MakeTestFields("id|c2", "int64|int64"), + }, + }, + } + position = fmt.Sprintf("%s/%s", gtidFlavor, gtidPosition) ) -func TestUpdateVRWorkflow(t *testing.T) { - ctx := context.Background() +// TestCreateVReplicationWorkflow tests the query generated +// from a VtctldServer MoveTablesCreate request to ensure +// that the VReplication stream(s) are created correctly. +func TestCreateVReplicationWorkflow(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sourceKs := "sourceks" + sourceTabletUID := 200 + targetKs := "targetks" + targetTabletUID := 300 + shard := "0" + wf := "testwf" + tenv := newTestEnv(t, ctx, sourceKs, []string{shard}) + defer tenv.close() + + sourceTablet := tenv.addTablet(t, sourceTabletUID, sourceKs, shard) + defer tenv.deleteTablet(sourceTablet.tablet) + targetTablet := tenv.addTablet(t, targetTabletUID, targetKs, shard) + defer tenv.deleteTablet(targetTablet.tablet) + + ws := workflow.NewServer(tenv.ts, tenv.tmc) + + tests := []struct { + name string + req *vtctldatapb.MoveTablesCreateRequest + schema *tabletmanagerdatapb.SchemaDefinition + query string + }{ + { + name: "defaults", + req: &vtctldatapb.MoveTablesCreateRequest{ + SourceKeyspace: sourceKs, + TargetKeyspace: targetKs, + Workflow: wf, + Cells: tenv.cells, + AllTables: true, + }, + query: fmt.Sprintf(`%s values ('%s', 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"t1\" filter:\"select * from t1\"}}', '', 0, 0, '%s', '', now(), 0, 'Stopped', '%s', 1, 0, 0)`, + insertVReplicationPrefix, wf, sourceKs, shard, tenv.cells[0], tenv.dbName), + }, + { + name: "all values", + schema: &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "t1", + Columns: []string{"id", "c2"}, + PrimaryKeyColumns: []string{"id"}, + Fields: sqltypes.MakeTestFields("id|c2", "int64|int64"), + }, + { + Name: "wut", + Columns: []string{"id"}, + PrimaryKeyColumns: []string{"id"}, + Fields: sqltypes.MakeTestFields("id", "int64"), + }, + }, + }, + req: &vtctldatapb.MoveTablesCreateRequest{ + SourceKeyspace: sourceKs, + TargetKeyspace: targetKs, + Workflow: wf, + Cells: tenv.cells, + IncludeTables: []string{defaultSchema.TableDefinitions[0].Name}, + ExcludeTables: []string{"wut"}, + SourceTimeZone: "EDT", + OnDdl: binlogdatapb.OnDDLAction_EXEC.String(), + StopAfterCopy: true, + DropForeignKeys: true, + DeferSecondaryKeys: true, + AutoStart: true, + }, + query: fmt.Sprintf(`%s values ('%s', 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"t1\" filter:\"select * from t1\"}} on_ddl:EXEC stop_after_copy:true source_time_zone:\"EDT\" target_time_zone:\"UTC\"', '', 0, 0, '%s', '', now(), 0, 'Stopped', '%s', 1, 0, 1)`, + insertVReplicationPrefix, wf, sourceKs, shard, tenv.cells[0], tenv.dbName), + }, + } + + tenv.tmc.setVReplicationExecResults(targetTablet.tablet, fmt.Sprintf("select 1 from _vt.vreplication where db_name='vt_%s' and workflow='%s'", + targetKs, wf), &sqltypes.Result{}) + tenv.tmc.setVReplicationExecResults(targetTablet.tablet, fmt.Sprintf("select 1 from _vt.vreplication where db_name='vt_%s' and message='FROZEN' and workflow_sub_type != 1", + targetKs), &sqltypes.Result{}) + tenv.tmc.setVReplicationExecResults(sourceTablet.tablet, "select val from _vt.resharding_journal where id=7224776740563431192", &sqltypes.Result{}) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // This is needed because MockDBClient uses t.Fatal() + // which doesn't play well with subtests. + defer func() { + if err := recover(); err != nil { + t.Errorf("Recovered from panic: %v; Stack: %s", err, string(debug.Stack())) + } + }() + + require.NotNil(t, tt.req, "No MoveTablesCreate request provided") + require.NotEmpty(t, tt.query, "No expected query provided") + + if tt.schema == nil { + tt.schema = defaultSchema + } + tenv.tmc.SetSchema(tt.schema) + + tenv.tmc.tablets[targetTabletUID].vrdbClient.ExpectRequest("use _vt", &sqltypes.Result{}, nil) + // This is our expected query, which will also short circuit + // the test with an error as at this point we've tested what + // we wanted to test. + tenv.tmc.tablets[targetTabletUID].vrdbClient.ExpectRequest(tt.query, nil, errShortCircuit) + _, err := ws.MoveTablesCreate(ctx, tt.req) + tenv.tmc.tablets[targetTabletUID].vrdbClient.Wait() + require.ErrorIs(t, err, errShortCircuit) + }) + } +} + +// TestMoveTables tests the query generated from a VtctldServer +// MoveTablesCreate request to ensure that the VReplication +// stream(s) are created correctly. Followed by ensuring that +// SwitchTraffic and ReverseTraffic work as expected. +func TestMoveTables(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sourceKs := "sourceks" + sourceTabletUID := 200 + targetKs := "targetks" + targetShards := make(map[string]*fakeTabletConn) + sourceShard := "0" + globalKs := "global" + globalShard := "0" + wf := "testwf" + tabletTypes := []topodatapb.TabletType{ + topodatapb.TabletType_PRIMARY, + topodatapb.TabletType_REPLICA, + topodatapb.TabletType_RDONLY, + } + + tenv := newTestEnv(t, ctx, sourceKs, []string{sourceShard}) + defer tenv.close() + + sourceTablet := tenv.addTablet(t, sourceTabletUID, sourceKs, sourceShard) + defer tenv.deleteTablet(sourceTablet.tablet) + + targetShards["-80"] = tenv.addTablet(t, 300, targetKs, "-80") + defer tenv.deleteTablet(targetShards["-80"].tablet) + targetShards["80-"] = tenv.addTablet(t, 310, targetKs, "80-") + defer tenv.deleteTablet(targetShards["80-"].tablet) + + globalTablet := tenv.addTablet(t, 500, globalKs, globalShard) + defer tenv.deleteTablet(globalTablet.tablet) + + tenv.ts.SaveVSchema(ctx, globalKs, &vschemapb.Keyspace{ + Sharded: false, + Tables: map[string]*vschemapb.Table{ + "t1_seq": { + Type: vindexes.TypeSequence, + }, + }, + }) + tenv.ts.SaveVSchema(ctx, targetKs, &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "hash": { + Type: "hash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "id", + Name: "hash", + }}, + AutoIncrement: &vschemapb.AutoIncrement{ + Column: "id", + Sequence: "t1_seq", + }, + }, + }, + }) + + ws := workflow.NewServer(tenv.ts, tenv.tmc) + + tenv.mysqld.Schema = defaultSchema + tenv.mysqld.Schema.DatabaseSchema = tenv.dbName + tenv.mysqld.FetchSuperQueryMap = make(map[string]*sqltypes.Result) + tenv.mysqld.FetchSuperQueryMap[`select character_set_name, collation_name, column_name, data_type, column_type, extra from information_schema.columns where .*`] = sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "character_set_name|collation_name|column_name|data_type|column_type|extra", + "varchar|varchar|varchar|varchar|varchar|varchar", + ), + "NULL|NULL|id|bigint|bigint|", + "NULL|NULL|c2|bigint|bigint|", + ) + + bls := fmt.Sprintf("keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"t1\" filter:\"select * from t1\"}}", sourceKs, sourceShard) + + tenv.tmc.SetSchema(defaultSchema) + + tenv.tmc.setVReplicationExecResults(sourceTablet.tablet, checkForJournal, &sqltypes.Result{}) + + for _, ftc := range targetShards { + tenv.tmc.setVReplicationExecResults(ftc.tablet, fmt.Sprintf(checkForWorkflow, targetKs, wf), &sqltypes.Result{}) + tenv.tmc.setVReplicationExecResults(ftc.tablet, fmt.Sprintf(checkForFrozenWorkflow, targetKs), &sqltypes.Result{}) + tenv.tmc.setVReplicationExecResults(ftc.tablet, fmt.Sprintf(getWorkflow, targetKs, wf), + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id", + "int64", + ), + "1", + ), + ) + tenv.tmc.setVReplicationExecResults(ftc.tablet, getCopyState, &sqltypes.Result{}) + tenv.tmc.setVReplicationExecResults(ftc.tablet, fmt.Sprintf(getWorkflowStatus, wf, targetKs), + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id|workflow|source|pos|stop_pos|max_replication_log|state|db_name|time_updated|transaction_timestamp|message|tags|workflow_type|workflow_sub_type|time_heartbeat|defer_secondary_keys|component_throttled|time_throttled|rows_copied", + "int64|varchar|blob|varchar|varchar|int64|varchar|varchar|int64|int64|varchar|varchar|int64|int64|int64|int64|varchar|int64|int64", + ), + fmt.Sprintf("1|%s|%s|%s|NULL|0|running|vt_%s|1686577659|0|||1|0|0|0||0|10", wf, bls, position, targetKs), + ), + ) + tenv.tmc.setVReplicationExecResults(ftc.tablet, getLatestCopyState, &sqltypes.Result{}) + + ftc.vrdbClient.ExpectRequest("use _vt", &sqltypes.Result{}, nil) + insert := fmt.Sprintf(`%s values ('%s', 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"t1\" filter:\"select * from t1 where in_keyrange(id, \'%s.hash\', \'%s\')\"}}', '', 0, 0, '%s', 'primary,replica,rdonly', now(), 0, 'Stopped', '%s', 1, 0, 0)`, + insertVReplicationPrefix, wf, sourceKs, sourceShard, targetKs, ftc.tablet.Shard, tenv.cells[0], tenv.dbName) + ftc.vrdbClient.ExpectRequest(insert, &sqltypes.Result{InsertID: 1}, nil) + ftc.vrdbClient.ExpectRequest(getAutoIncrementStep, &sqltypes.Result{}, nil) + ftc.vrdbClient.ExpectRequest(getVReplicationRecord, + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id|source", + "int64|varchar", + ), + fmt.Sprintf("1|%s", bls), + ), nil) + ftc.vrdbClient.ExpectRequest(fmt.Sprintf(updatePickedSourceTablet, tenv.cells[0], sourceTabletUID), &sqltypes.Result{}, nil) + ftc.vrdbClient.ExpectRequest(setSessionTZ, &sqltypes.Result{}, nil) + ftc.vrdbClient.ExpectRequest(setNames, &sqltypes.Result{}, nil) + ftc.vrdbClient.ExpectRequest(getRowsCopied, + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "rows_copied", + "int64", + ), + "0", + ), + nil, + ) + ftc.vrdbClient.ExpectRequest(getWorkflowState, sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "pos|stop_pos|max_tps|max_replication_lag|state|workflow_type|workflow|workflow_sub_type|defer_secondary_keys", + "varchar|varchar|int64|int64|varchar|int64|varchar|int64|int64", + ), + fmt.Sprintf("||0|0|Stopped|1|%s|0|0", wf), + ), nil) + ftc.vrdbClient.ExpectRequest(getNumCopyStateTable, sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "count(distinct table_name)", + "int64", + ), + "1", + ), nil) + ftc.vrdbClient.ExpectRequest(getWorkflowState, sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "pos|stop_pos|max_tps|max_replication_lag|state|workflow_type|workflow|workflow_sub_type|defer_secondary_keys", + "varchar|varchar|int64|int64|varchar|int64|varchar|int64|int64", + ), + fmt.Sprintf("||0|0|Stopped|1|%s|0|0", wf), + ), nil) + ftc.vrdbClient.ExpectRequest(getNumCopyStateTable, sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "count(distinct table_name)", + "int64", + ), + "1", + ), nil) + ftc.vrdbClient.ExpectRequest(getBinlogRowImage, sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "@@binlog_row_image", + "varchar", + ), + "FULL", + ), nil) + + ftc.vrdbClient.ExpectRequest(fmt.Sprintf(insertStreamsCreatedLog, bls), &sqltypes.Result{}, nil) + tenv.tmc.setVReplicationExecResults(ftc.tablet, fmt.Sprintf(getWorkflow, targetKs, wf), + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id", + "int64", + ), + "1", + ), + ) + tenv.tmc.setVReplicationExecResults(ftc.tablet, fmt.Sprintf(startWorkflow, targetKs, wf), &sqltypes.Result{}) + ftc.vrdbClient.ExpectRequest(fmt.Sprintf("use %s", sidecar.DefaultName), &sqltypes.Result{}, nil) + + tenv.tmc.setVReplicationExecResults(ftc.tablet, stopForCutover, &sqltypes.Result{}) + tenv.tmc.setVReplicationExecResults(ftc.tablet, fmt.Sprintf(freezeWorkflow, targetKs, wf), &sqltypes.Result{}) + + tenv.tmc.setVReplicationExecResults(ftc.tablet, fmt.Sprintf(getMaxValForSequence, targetKs, "t1"), + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "maxval", + "int64", + ), + fmt.Sprintf("%d", ftc.tablet.Alias.Uid), // Use the tablet's UID as the max value + ), + ) + } + + // We use the tablet's UID in the mocked results for the max value used on each target shard. + nextSeqVal := int(math.Max(float64(targetShards["-80"].tablet.Alias.Uid), float64(targetShards["80-"].tablet.Alias.Uid))) + 1 + tenv.tmc.setVReplicationExecResults(globalTablet.tablet, + sqlparser.BuildParsedQuery(initSequenceTable, sqlescape.EscapeID(fmt.Sprintf("vt_%s", globalKs)), sqlescape.EscapeID("t1_seq"), nextSeqVal, nextSeqVal, nextSeqVal).Query, + &sqltypes.Result{RowsAffected: 0}, + ) + + _, err := ws.MoveTablesCreate(ctx, &vtctldatapb.MoveTablesCreateRequest{ + SourceKeyspace: sourceKs, + TargetKeyspace: targetKs, + Workflow: wf, + TabletTypes: tabletTypes, + Cells: tenv.cells, + AllTables: true, + AutoStart: true, + }) + require.NoError(t, err) + + _, err = ws.WorkflowSwitchTraffic(ctx, &vtctldatapb.WorkflowSwitchTrafficRequest{ + Keyspace: targetKs, + Workflow: wf, + Cells: tenv.cells, + MaxReplicationLagAllowed: &vttime.Duration{Seconds: 922337203}, + EnableReverseReplication: true, + InitializeTargetSequences: true, + Direction: int32(workflow.DirectionForward), + }) + require.NoError(t, err) + + tenv.tmc.setVReplicationExecResults(sourceTablet.tablet, fmt.Sprintf(getWorkflowStatus, workflow.ReverseWorkflowName(wf), sourceKs), + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id|workflow|source|pos|stop_pos|max_replication_log|state|db_name|time_updated|transaction_timestamp|message|tags|workflow_type|workflow_sub_type|time_heartbeat|defer_secondary_keys|component_throttled|time_throttled|rows_copied", + "int64|varchar|blob|varchar|varchar|int64|varchar|varchar|int64|int64|varchar|varchar|int64|int64|int64|int64|varchar|int64|int64", + ), + fmt.Sprintf("1|%s|%s|%s|NULL|0|running|vt_%s|1686577659|0|||1|0|0|0||0|10", workflow.ReverseWorkflowName(wf), bls, position, sourceKs), + ), + ) + + _, err = ws.WorkflowSwitchTraffic(ctx, &vtctldatapb.WorkflowSwitchTrafficRequest{ + Keyspace: targetKs, + Workflow: wf, + Cells: tenv.cells, + MaxReplicationLagAllowed: &vttime.Duration{Seconds: 922337203}, + EnableReverseReplication: true, + Direction: int32(workflow.DirectionBackward), + }) + require.NoError(t, err) +} + +func TestUpdateVReplicationWorkflow(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() cells := []string{"zone1"} tabletTypes := []string{"replica"} workflow := "testwf" - dbName := "test" + keyspace := "testks" vreplID := 1 - shortCircuitErr := fmt.Errorf("short circuiting test") - cp := mysql.ConnParams{} - db := fakesqldb.New(t) - ts := memorytopo.NewServer(cells[0]) - mysqld := mysqlctl.NewFakeMysqlDaemon(db) - dbClient := binlogplayer.NewMockDBClient(t) - dbClientFactory := func() binlogplayer.DBClient { return dbClient } - // Intentionally using Sprintf here as the query matching is exact - // and the engine uses `db_name=` w/o any spacing and the parser - // will add spacing. - dbClient.ExpectRequest(fmt.Sprintf("select * from _vt.vreplication where db_name='%s'", dbName), - &sqltypes.Result{}, nil) - vre := vreplication.NewSimpleTestEngine(ts, cells[0], mysqld, dbClientFactory, dbClientFactory, dbName, nil) - vre.Open(context.Background()) - tm := &TabletManager{ - MysqlDaemon: mysqld, - DBConfigs: dbconfigs.NewTestDBConfigs(cp, cp, dbName), - QueryServiceControl: tabletservermock.NewController(), - VREngine: vre, - } - defer func() { - vre.Close() - dbClient.Close() - mysqld.Close() - db.Close() - }() - parsed := sqlparser.BuildParsedQuery(sqlSelectVRWorkflowConfig, sidecardb.DefaultName, ":wf") + tabletUID := 100 + + tenv := newTestEnv(t, ctx, keyspace, []string{shard}) + defer tenv.close() + + tablet := tenv.addTablet(t, tabletUID, keyspace, shard) + defer tenv.deleteTablet(tablet.tablet) + + parsed := sqlparser.BuildParsedQuery(sqlSelectVReplicationWorkflowConfig, sidecar.DefaultName, ":wf") bindVars := map[string]*querypb.BindVariable{ "wf": sqltypes.StringBindVariable(workflow), } @@ -83,10 +473,10 @@ func TestUpdateVRWorkflow(t *testing.T) { keyspace, shard) selectRes := sqltypes.MakeTestResult( sqltypes.MakeTestFields( - "id|source|cell|tablet_types", - "int64|varchar|varchar|varchar", + "id|source|cell|tablet_types|state|message", + "int64|varchar|varchar|varchar|varchar|varbinary", ), - fmt.Sprintf("%d|%s|%s|%s", vreplID, blsStr, cells[0], tabletTypes[0]), + fmt.Sprintf("%d|%s|%s|%s|Running|", vreplID, blsStr, cells[0], tabletTypes[0]), ) idQuery, err := sqlparser.ParseAndBind("select id from _vt.vreplication where id = %a", sqltypes.Int64BindVariable(int64(vreplID))) @@ -101,67 +491,86 @@ func TestUpdateVRWorkflow(t *testing.T) { tests := []struct { name string - request *tabletmanagerdatapb.UpdateVRWorkflowRequest + request *tabletmanagerdatapb.UpdateVReplicationWorkflowRequest query string }{ { name: "update cells", - request: &tabletmanagerdatapb.UpdateVRWorkflowRequest{ + request: &tabletmanagerdatapb.UpdateVReplicationWorkflowRequest{ Workflow: workflow, + State: binlogdatapb.VReplicationWorkflowState(textutil.SimulatedNullInt), Cells: []string{"zone2"}, // TabletTypes is an empty value, so the current value should be cleared }, - query: fmt.Sprintf(`update _vt.vreplication set source = 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"customer\" filter:\"select * from customer\"} rules:{match:\"corder\" filter:\"select * from corder\"}}', cell = '%s', tablet_types = '' where id in (%d)`, + query: fmt.Sprintf(`update _vt.vreplication set state = 'Running', source = 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"customer\" filter:\"select * from customer\"} rules:{match:\"corder\" filter:\"select * from corder\"}}', cell = '%s', tablet_types = '' where id in (%d)`, keyspace, shard, "zone2", vreplID), }, { name: "update cells, NULL tablet_types", - request: &tabletmanagerdatapb.UpdateVRWorkflowRequest{ + request: &tabletmanagerdatapb.UpdateVReplicationWorkflowRequest{ Workflow: workflow, + State: binlogdatapb.VReplicationWorkflowState(textutil.SimulatedNullInt), Cells: []string{"zone3"}, - TabletTypes: textutil.SimulatedNullStringSlice, // So keep the current value of replica + TabletTypes: []topodatapb.TabletType{topodatapb.TabletType(textutil.SimulatedNullInt)}, // So keep the current value of replica }, - query: fmt.Sprintf(`update _vt.vreplication set source = 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"customer\" filter:\"select * from customer\"} rules:{match:\"corder\" filter:\"select * from corder\"}}', cell = '%s', tablet_types = '%s' where id in (%d)`, + query: fmt.Sprintf(`update _vt.vreplication set state = 'Running', source = 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"customer\" filter:\"select * from customer\"} rules:{match:\"corder\" filter:\"select * from corder\"}}', cell = '%s', tablet_types = '%s' where id in (%d)`, keyspace, shard, "zone3", tabletTypes[0], vreplID), }, { name: "update tablet_types", - request: &tabletmanagerdatapb.UpdateVRWorkflowRequest{ - Workflow: workflow, - TabletTypes: []string{"in_order:rdonly", "replica"}, + request: &tabletmanagerdatapb.UpdateVReplicationWorkflowRequest{ + Workflow: workflow, + State: binlogdatapb.VReplicationWorkflowState(textutil.SimulatedNullInt), + TabletSelectionPreference: tabletmanagerdatapb.TabletSelectionPreference_INORDER, + TabletTypes: []topodatapb.TabletType{topodatapb.TabletType_RDONLY, topodatapb.TabletType_REPLICA}, }, - query: fmt.Sprintf(`update _vt.vreplication set source = 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"customer\" filter:\"select * from customer\"} rules:{match:\"corder\" filter:\"select * from corder\"}}', cell = '', tablet_types = '%s' where id in (%d)`, + query: fmt.Sprintf(`update _vt.vreplication set state = 'Running', source = 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"customer\" filter:\"select * from customer\"} rules:{match:\"corder\" filter:\"select * from corder\"}}', cell = '', tablet_types = '%s' where id in (%d)`, keyspace, shard, "in_order:rdonly,replica", vreplID), }, { name: "update tablet_types, NULL cells", - request: &tabletmanagerdatapb.UpdateVRWorkflowRequest{ + request: &tabletmanagerdatapb.UpdateVReplicationWorkflowRequest{ Workflow: workflow, + State: binlogdatapb.VReplicationWorkflowState(textutil.SimulatedNullInt), Cells: textutil.SimulatedNullStringSlice, // So keep the current value of zone1 - TabletTypes: []string{"rdonly"}, + TabletTypes: []topodatapb.TabletType{topodatapb.TabletType_RDONLY}, }, - query: fmt.Sprintf(`update _vt.vreplication set source = 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"customer\" filter:\"select * from customer\"} rules:{match:\"corder\" filter:\"select * from corder\"}}', cell = '%s', tablet_types = '%s' where id in (%d)`, + query: fmt.Sprintf(`update _vt.vreplication set state = 'Running', source = 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"customer\" filter:\"select * from customer\"} rules:{match:\"corder\" filter:\"select * from corder\"}}', cell = '%s', tablet_types = '%s' where id in (%d)`, keyspace, shard, cells[0], "rdonly", vreplID), }, { name: "update on_ddl", - request: &tabletmanagerdatapb.UpdateVRWorkflowRequest{ + request: &tabletmanagerdatapb.UpdateVReplicationWorkflowRequest{ Workflow: workflow, + State: binlogdatapb.VReplicationWorkflowState(textutil.SimulatedNullInt), OnDdl: binlogdatapb.OnDDLAction_EXEC, }, - query: fmt.Sprintf(`update _vt.vreplication set source = 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"customer\" filter:\"select * from customer\"} rules:{match:\"corder\" filter:\"select * from corder\"}} on_ddl:%s', cell = '', tablet_types = '' where id in (%d)`, - keyspace, shard, binlogdatapb.OnDDLAction_name[int32(binlogdatapb.OnDDLAction_EXEC)], vreplID), + query: fmt.Sprintf(`update _vt.vreplication set state = 'Running', source = 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"customer\" filter:\"select * from customer\"} rules:{match:\"corder\" filter:\"select * from corder\"}} on_ddl:%s', cell = '', tablet_types = '' where id in (%d)`, + keyspace, shard, binlogdatapb.OnDDLAction_EXEC.String(), vreplID), }, { name: "update cell,tablet_types,on_ddl", - request: &tabletmanagerdatapb.UpdateVRWorkflowRequest{ + request: &tabletmanagerdatapb.UpdateVReplicationWorkflowRequest{ Workflow: workflow, + State: binlogdatapb.VReplicationWorkflowState(textutil.SimulatedNullInt), Cells: []string{"zone1", "zone2", "zone3"}, - TabletTypes: []string{"rdonly", "replica", "primary"}, + TabletTypes: []topodatapb.TabletType{topodatapb.TabletType_RDONLY, topodatapb.TabletType_REPLICA, topodatapb.TabletType_PRIMARY}, OnDdl: binlogdatapb.OnDDLAction_EXEC_IGNORE, }, - query: fmt.Sprintf(`update _vt.vreplication set source = 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"customer\" filter:\"select * from customer\"} rules:{match:\"corder\" filter:\"select * from corder\"}} on_ddl:%s', cell = '%s', tablet_types = '%s' where id in (%d)`, - keyspace, shard, binlogdatapb.OnDDLAction_name[int32(binlogdatapb.OnDDLAction_EXEC_IGNORE)], "zone1,zone2,zone3", "rdonly,replica,primary", vreplID), + query: fmt.Sprintf(`update _vt.vreplication set state = 'Running', source = 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"customer\" filter:\"select * from customer\"} rules:{match:\"corder\" filter:\"select * from corder\"}} on_ddl:%s', cell = '%s', tablet_types = '%s' where id in (%d)`, + keyspace, shard, binlogdatapb.OnDDLAction_EXEC_IGNORE.String(), "zone1,zone2,zone3", "rdonly,replica,primary", vreplID), + }, + { + name: "update state", + request: &tabletmanagerdatapb.UpdateVReplicationWorkflowRequest{ + Workflow: workflow, + State: binlogdatapb.VReplicationWorkflowState_Stopped, + Cells: textutil.SimulatedNullStringSlice, + TabletTypes: []topodatapb.TabletType{topodatapb.TabletType(textutil.SimulatedNullInt)}, + OnDdl: binlogdatapb.OnDDLAction(textutil.SimulatedNullInt), + }, + query: fmt.Sprintf(`update _vt.vreplication set state = '%s', source = 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"customer\" filter:\"select * from customer\"} rules:{match:\"corder\" filter:\"select * from corder\"}}', cell = '%s', tablet_types = '%s' where id in (%d)`, + binlogdatapb.VReplicationWorkflowState_Stopped.String(), keyspace, shard, cells[0], tabletTypes[0], vreplID), }, } @@ -179,18 +588,435 @@ func TestUpdateVRWorkflow(t *testing.T) { require.NotEqual(t, "", tt.query, "No expected query provided") // These are the same for each RPC call. - dbClient.ExpectRequest(fmt.Sprintf("use %s", sidecardb.DefaultName), &sqltypes.Result{}, nil) - dbClient.ExpectRequest(selectQuery, selectRes, nil) - dbClient.ExpectRequest(fmt.Sprintf("use %s", sidecardb.DefaultName), &sqltypes.Result{}, nil) - dbClient.ExpectRequest(idQuery, idRes, nil) + tenv.tmc.tablets[tabletUID].vrdbClient.ExpectRequest(fmt.Sprintf("use %s", sidecar.DefaultName), &sqltypes.Result{}, nil) + tenv.tmc.tablets[tabletUID].vrdbClient.ExpectRequest(selectQuery, selectRes, nil) + tenv.tmc.tablets[tabletUID].vrdbClient.ExpectRequest(fmt.Sprintf("use %s", sidecar.DefaultName), &sqltypes.Result{}, nil) + tenv.tmc.tablets[tabletUID].vrdbClient.ExpectRequest(idQuery, idRes, nil) // This is our expected query, which will also short circuit // the test with an error as at this point we've tested what // we wanted to test. - dbClient.ExpectRequest(tt.query, &sqltypes.Result{RowsAffected: 1}, shortCircuitErr) - _, err = tm.UpdateVRWorkflow(ctx, tt.request) - dbClient.Wait() - require.ErrorIs(t, err, shortCircuitErr) + tenv.tmc.tablets[tabletUID].vrdbClient.ExpectRequest(tt.query, &sqltypes.Result{RowsAffected: 1}, errShortCircuit) + _, err = tenv.tmc.tablets[tabletUID].tm.UpdateVReplicationWorkflow(ctx, tt.request) + tenv.tmc.tablets[tabletUID].vrdbClient.Wait() + require.ErrorIs(t, err, errShortCircuit) + }) + } +} + +// TestSourceShardSelection tests the RPC calls made by VtctldServer to tablet +// managers include the correct set of BLS settings. +// +// errShortCircuit is intentionally injected into the MoveTables workflow to +// short-circuit the workflow after we've validated everything we wanted to in +// the test. +func TestSourceShardSelection(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sourceKs := "sourceks" + sourceShard0 := "-55" + sourceShard1 := "55-aa" + sourceShard2 := "aa-" + sourceTabletUID0 := 200 + sourceTabletUID1 := 201 + sourceTabletUID2 := 202 + + targetKs := "targetks" + targetShard0 := "-80" + targetShard1 := "80-" + targetTabletUID0 := 300 + targetTabletUID1 := 301 + + wf := "testwf" + + tenv := newTestEnv(t, ctx, sourceKs, []string{sourceShard0, sourceShard1, sourceShard2}) + defer tenv.close() + + sourceTablets := map[int]*fakeTabletConn{ + sourceTabletUID0: tenv.addTablet(t, sourceTabletUID0, sourceKs, sourceShard0), + sourceTabletUID1: tenv.addTablet(t, sourceTabletUID1, sourceKs, sourceShard1), + sourceTabletUID2: tenv.addTablet(t, sourceTabletUID2, sourceKs, sourceShard2), + } + for _, st := range sourceTablets { + defer tenv.deleteTablet(st.tablet) + } + + targetTablets := map[int]*fakeTabletConn{ + targetTabletUID0: tenv.addTablet(t, targetTabletUID0, targetKs, targetShard0), + targetTabletUID1: tenv.addTablet(t, targetTabletUID1, targetKs, targetShard1), + } + for _, tt := range targetTablets { + defer tenv.deleteTablet(tt.tablet) + } + + ws := workflow.NewServer(tenv.ts, tenv.tmc) + + tenv.ts.SaveVSchema(ctx, sourceKs, &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "hash": { + Type: "hash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "id", + Name: "hash", + }}, + }, + }, + }) + tenv.ts.SaveVSchema(ctx, targetKs, &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "hash": { + Type: "hash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "id", + Name: "hash", + }}, + }, + }, + }) + + tests := []struct { + name string + req *vtctldatapb.MoveTablesCreateRequest + schema *tabletmanagerdatapb.SchemaDefinition + vschema *vschemapb.Keyspace + streams map[int][]string + }{ + { + name: "same primary vindexes, use intersecting source shards", + req: &vtctldatapb.MoveTablesCreateRequest{ + SourceKeyspace: sourceKs, + TargetKeyspace: targetKs, + Workflow: wf, + Cells: tenv.cells, + AllTables: true, + AutoStart: false, + }, + streams: map[int][]string{ + targetTabletUID0: { + sourceShard0, + sourceShard1, + }, + targetTabletUID1: { + sourceShard1, + sourceShard2, + }, + }, + }, + { + name: "different primary vindexes, use all source shards", + req: &vtctldatapb.MoveTablesCreateRequest{ + SourceKeyspace: sourceKs, + TargetKeyspace: targetKs, + Workflow: wf, + Cells: tenv.cells, + AllTables: true, + AutoStart: false, + }, + vschema: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "hash": { + Type: "xxhash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "id", + Name: "hash", + }}, + }, + }, + }, + streams: map[int][]string{ + targetTabletUID0: { + sourceShard0, + sourceShard1, + sourceShard2, + }, + targetTabletUID1: { + sourceShard0, + sourceShard1, + sourceShard2, + }, + }, + }, + } + + for _, tt := range targetTablets { + tenv.tmc.setVReplicationExecResults(tt.tablet, fmt.Sprintf("select 1 from _vt.vreplication where db_name='vt_%s' and workflow='%s'", + targetKs, wf), &sqltypes.Result{}) + tenv.tmc.setVReplicationExecResults(tt.tablet, fmt.Sprintf("select 1 from _vt.vreplication where db_name='vt_%s' and message='FROZEN' and workflow_sub_type != 1", + targetKs), &sqltypes.Result{}) + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // This is needed because MockDBClient uses t.Fatal() + // which doesn't play well with subtests. + defer func() { + if err := recover(); err != nil { + t.Errorf("Recovered from panic: %v; Stack: %s", err, string(debug.Stack())) + } + }() + + require.NotNil(t, tt.req, "No MoveTablesCreate request provided") + require.NotEmpty(t, tt.streams, "No expected streams provided") + + if tt.schema == nil { + tt.schema = defaultSchema + } + tenv.tmc.SetSchema(tt.schema) + + if tt.vschema != nil { + tenv.ts.SaveVSchema(ctx, targetKs, tt.vschema) + } + + for uid, streams := range tt.streams { + tt := targetTablets[uid] + for i, sourceShard := range streams { + tt.vrdbClient.ExpectRequest("use _vt", &sqltypes.Result{}, nil) + var err error + if i == len(streams)-1 { + // errShortCircuit is intentionally injected into the MoveTables + // workflow to short-circuit the workflow after we've validated + // everything we wanted to in the test. + err = errShortCircuit + } + tt.vrdbClient.ExpectRequest( + fmt.Sprintf(`%s values ('%s', 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"t1\" filter:\"select * from t1 where in_keyrange(id, \'%s.hash\', \'%s\')\"}}', '', 0, 0, '%s', '', now(), 0, 'Stopped', '%s', 1, 0, 0)`, + insertVReplicationPrefix, wf, sourceKs, sourceShard, targetKs, tt.tablet.Shard, tenv.cells[0], tenv.dbName), + &sqltypes.Result{InsertID: uint64(i + 1)}, + err, + ) + if errors.Is(err, errShortCircuit) { + break + } + tt.vrdbClient.ExpectRequest(getAutoIncrementStep, &sqltypes.Result{}, nil) + tt.vrdbClient.ExpectRequest( + fmt.Sprintf("select * from _vt.vreplication where id = %d", uint64(i+1)), + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id|source|state", + "int64|varchar|varchar", + ), + fmt.Sprintf("%d|%s|Stopped", uint64(i+1), fmt.Sprintf(`keyspace:"%s" shard:"%s" filter:{rules:{match:"t1" filter:"select * from t1 where in_keyrange(id, '%s.hash', '%s')"}}`, sourceKs, sourceShard, targetKs, tt.tablet.Shard)), + ), + nil, + ) + } + } + + _, err := ws.MoveTablesCreate(ctx, tt.req) + for _, tt := range targetTablets { + tt.vrdbClient.Wait() + } + // errShortCircuit is intentionally injected into the MoveTables + // workflow to short-circuit the workflow after we've validated + // everything we wanted to in the test. + require.ErrorContains(t, err, fmt.Sprintf("%s\n%s", errShortCircuit.Error(), errShortCircuit.Error())) }) } } + +// TestFailedMoveTablesCreateCleanup tests that the workflow +// and its artifacts are cleaned up when the workflow creation +// fails -- specifically after the point where we have created +// the workflow streams. +func TestFailedMoveTablesCreateCleanup(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sourceKs := "sourceks" + sourceTabletUID := 200 + shard := "0" + targetTabletUID := 300 + targetKs := "targetks" + wf := "testwf" + table := defaultSchema.TableDefinitions[0].Name + invalidTimeZone := "NOPE" + bls := fmt.Sprintf("keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"%s\" filter:\"select * from %s\"}}", + sourceKs, shard, table, table) + tenv := newTestEnv(t, ctx, sourceKs, []string{shard}) + defer tenv.close() + ws := workflow.NewServer(tenv.ts, tenv.tmc) + + sourceTablet := tenv.addTablet(t, sourceTabletUID, sourceKs, shard) + defer tenv.deleteTablet(sourceTablet.tablet) + targetTablet := tenv.addTablet(t, targetTabletUID, targetKs, shard) + defer tenv.deleteTablet(targetTablet.tablet) + + tenv.mysqld.Schema = defaultSchema + tenv.mysqld.Schema.DatabaseSchema = tenv.dbName + tenv.mysqld.FetchSuperQueryMap = make(map[string]*sqltypes.Result) + tenv.mysqld.FetchSuperQueryMap[`select character_set_name, collation_name, column_name, data_type, column_type, extra from information_schema.columns where .*`] = sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "character_set_name|collation_name|column_name|data_type|column_type|extra", + "varchar|varchar|varchar|varchar|varchar|varchar", + ), + "NULL|NULL|id|bigint|bigint|", + "NULL|NULL|c2|bigint|bigint|", + ) + + // Let's be sure that the routing rules are empty to start. + err := topotools.SaveRoutingRules(ctx, tenv.ts, nil) + require.NoError(t, err, "failed to save routing rules") + + tenv.tmc.setVReplicationExecResults(targetTablet.tablet, fmt.Sprintf(checkForWorkflow, targetKs, wf), &sqltypes.Result{}) + tenv.tmc.setVReplicationExecResults(targetTablet.tablet, fmt.Sprintf(checkForFrozenWorkflow, targetKs), &sqltypes.Result{}) + tenv.tmc.setVReplicationExecResults(targetTablet.tablet, fmt.Sprintf(getWorkflow, targetKs, wf), + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id", + "int64", + ), + "1", + ), + ) + targetTablet.vrdbClient.ExpectRequest("use _vt", &sqltypes.Result{}, nil) + targetTablet.vrdbClient.ExpectRequest( + fmt.Sprintf("%s %s", + insertVReplicationPrefix, + fmt.Sprintf(`values ('%s', 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"%s\" filter:\"select * from %s\"}} source_time_zone:\"%s\" target_time_zone:\"UTC\"', '', 0, 0, '%s', 'primary', now(), 0, 'Stopped', '%s', 1, 0, 0)`, + wf, sourceKs, shard, table, table, invalidTimeZone, strings.Join(tenv.cells, ","), tenv.dbName), + ), + &sqltypes.Result{ + RowsAffected: 1, + InsertID: 1, + }, + nil, + ) + targetTablet.vrdbClient.ExpectRequest(getAutoIncrementStep, &sqltypes.Result{}, nil) + targetTablet.vrdbClient.ExpectRequest(getVReplicationRecord, + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id|source", + "int64|varchar", + ), + fmt.Sprintf("1|%s", bls), + ), + nil, + ) + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(updatePickedSourceTablet, tenv.cells[0], sourceTabletUID), + &sqltypes.Result{}, nil) + targetTablet.vrdbClient.ExpectRequest(setSessionTZ, &sqltypes.Result{}, nil) + targetTablet.vrdbClient.ExpectRequest(setNames, &sqltypes.Result{}, nil) + targetTablet.vrdbClient.ExpectRequest(getRowsCopied, + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "rows_copied", + "int64", + ), + "0", + ), + nil, + ) + targetTablet.vrdbClient.ExpectRequest(getWorkflowState, + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "pos|stop_pos|max_tps|max_replication_lag|state|workflow_type|workflow|workflow_sub_type|defer_secondary_keys", + "varchar|varchar|int64|int64|varchar|int64|varchar|int64|int64", + ), + fmt.Sprintf("||0|0|Stopped|1|%s|0|0", wf), + ), + nil, + ) + targetTablet.vrdbClient.ExpectRequest(getNumCopyStateTable, + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "count(distinct table_name)", + "int64", + ), + "1", + ), + nil, + ) + targetTablet.vrdbClient.ExpectRequest(getWorkflowState, + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "pos|stop_pos|max_tps|max_replication_lag|state|workflow_type|workflow|workflow_sub_type|defer_secondary_keys", + "varchar|varchar|int64|int64|varchar|int64|varchar|int64|int64", + ), + fmt.Sprintf("||0|0|Stopped|1|%s|0|0", wf), + ), + nil, + ) + targetTablet.vrdbClient.ExpectRequest(getNumCopyStateTable, + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "count(distinct table_name)", + "int64", + ), + "1", + ), + nil, + ) + targetTablet.vrdbClient.ExpectRequest(getBinlogRowImage, + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "@@binlog_row_image", + "varchar", + ), + "FULL", + ), + nil, + ) + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(insertStreamsCreatedLog, bls), &sqltypes.Result{}, nil) + + tenv.tmc.setVReplicationExecResults(targetTablet.tablet, + fmt.Sprintf("select convert_tz('2006-01-02 15:04:05', '%s', 'UTC')", invalidTimeZone), + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + fmt.Sprintf("convert_tz('2006-01-02 15:04:05', '%s', 'UTC')", invalidTimeZone), + "datetime", + ), + "NULL", + ), + ) + + // We expect the workflow creation to fail due to the invalid time + // zone and thus the workflow iteslf to be cleaned up. + tenv.tmc.setVReplicationExecResults(sourceTablet.tablet, + fmt.Sprintf(deleteWorkflow, sourceKs, workflow.ReverseWorkflowName(wf)), + &sqltypes.Result{RowsAffected: 1}, + ) + tenv.tmc.setVReplicationExecResults(targetTablet.tablet, + fmt.Sprintf(deleteWorkflow, targetKs, wf), + &sqltypes.Result{RowsAffected: 1}, + ) + + // Save the current target vschema. + vs, err := tenv.ts.GetVSchema(ctx, targetKs) + require.NoError(t, err, "failed to get target vschema") + + _, err = ws.MoveTablesCreate(ctx, &vtctldatapb.MoveTablesCreateRequest{ + Workflow: wf, + SourceKeyspace: sourceKs, + TargetKeyspace: targetKs, + Cells: tenv.cells, + TabletTypes: []topodatapb.TabletType{topodatapb.TabletType_PRIMARY}, + IncludeTables: []string{table}, + SourceTimeZone: invalidTimeZone, + }) + require.ErrorContains(t, err, fmt.Sprintf("unable to perform time_zone conversions from %s to UTC", invalidTimeZone)) + + // Check that there are no orphaned routing rules. + rules, err := topotools.GetRoutingRules(ctx, tenv.ts) + require.NoError(t, err, "failed to get routing rules") + require.Equal(t, 0, len(rules), "expected no routing rules to be present") + + // Check that our vschema changes were also rolled back. + vs2, err := tenv.ts.GetVSchema(ctx, targetKs) + require.NoError(t, err, "failed to get target vschema") + require.Equal(t, vs, vs2, "expected vschema to be unchanged") +} diff --git a/go/vt/vttablet/tabletmanager/shard_sync.go b/go/vt/vttablet/tabletmanager/shard_sync.go index 7dc8710758e..ab995ec14b1 100644 --- a/go/vt/vttablet/tabletmanager/shard_sync.go +++ b/go/vt/vttablet/tabletmanager/shard_sync.go @@ -22,8 +22,8 @@ import ( "github.com/spf13/pflag" + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/topo" @@ -114,7 +114,7 @@ func (tm *TabletManager) shardSyncLoop(ctx context.Context, notifyChan <-chan st } // If we think we're primary, check if we need to update the shard record. // Fetch the start time from the record we just got, because the tm's tablet can change. - primaryAlias, shouldDemote, err := syncShardPrimary(ctx, tm.TopoServer, tablet, logutil.ProtoToTime(tablet.PrimaryTermStartTime)) + primaryAlias, shouldDemote, err := syncShardPrimary(ctx, tm.TopoServer, tablet, protoutil.TimeFromProto(tablet.PrimaryTermStartTime).UTC()) if err != nil { log.Errorf("Failed to sync shard record: %v", err) // Start retry timer and go back to sleep. @@ -191,7 +191,7 @@ func syncShardPrimary(ctx context.Context, ts *topo.Server, tablet *topodatapb.T aliasStr := topoproto.TabletAliasString(tablet.Alias) log.Infof("Updating shard record: primary_alias=%v, primary_term_start_time=%v", aliasStr, PrimaryTermStartTime) si.PrimaryAlias = tablet.Alias - si.PrimaryTermStartTime = logutil.TimeToProto(PrimaryTermStartTime) + si.PrimaryTermStartTime = protoutil.TimeToProto(PrimaryTermStartTime) return nil }) if err != nil { diff --git a/go/vt/vttablet/tabletmanager/shard_sync_test.go b/go/vt/vttablet/tabletmanager/shard_sync_test.go index 83a7cede2e1..24078efa977 100644 --- a/go/vt/vttablet/tabletmanager/shard_sync_test.go +++ b/go/vt/vttablet/tabletmanager/shard_sync_test.go @@ -44,8 +44,9 @@ const ( ) func TestShardSync(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") statsTabletTypeCount.ResetAll() tm := newTestTM(t, ts, 100, keyspace, shard) defer tm.Stop() diff --git a/go/vt/vttablet/tabletmanager/tm_init.go b/go/vt/vttablet/tabletmanager/tm_init.go index 67d50c71e78..2cd21c09a21 100644 --- a/go/vt/vttablet/tabletmanager/tm_init.go +++ b/go/vt/vttablet/tabletmanager/tm_init.go @@ -46,9 +46,11 @@ import ( "github.com/spf13/pflag" "golang.org/x/sync/semaphore" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/flagutil" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/netutil" + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/sets" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/binlog" @@ -61,7 +63,6 @@ import ( querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" @@ -97,8 +98,6 @@ func registerInitFlags(fs *pflag.FlagSet) { fs.StringVar(&initDbNameOverride, "init_db_name_override", initDbNameOverride, "(init parameter) override the name of the db used by vttablet. Without this flag, the db name defaults to vt_") fs.StringVar(&skipBuildInfoTags, "vttablet_skip_buildinfo_tags", skipBuildInfoTags, "comma-separated list of buildinfo tags to skip from merging with --init_tags. each tag is either an exact match or a regular expression of the form '/regexp/'.") fs.Var(&initTags, "init_tags", "(init parameter) comma separated list of key:value pairs used to tag the tablet") - fs.BoolVar(&initPopulateMetadata, "init_populate_metadata", initPopulateMetadata, "(init parameter) populate metadata tables even if restore_from_backup is disabled. If restore_from_backup is enabled, metadata tables are always populated regardless of this flag.") - fs.MarkDeprecated("init_populate_metadata", "this flag is no longer being used and will be removed in future versions") fs.DurationVar(&initTimeout, "init_timeout", initTimeout, "(init parameter) timeout to use for the init phase.") } @@ -452,6 +451,10 @@ func (tm *TabletManager) Stop() { tm.stopShardSync() tm.stopRebuildKeyspace() + if tm.QueryServiceControl != nil { + tm.QueryServiceControl.Stats().Stop() + } + if tm.UpdateStream != nil { tm.UpdateStream.Disable() } @@ -496,7 +499,7 @@ func (tm *TabletManager) createKeyspaceShard(ctx context.Context) (*topo.ShardIn // If the keyspace exists but this is the first tablet added, then // update the keyspace record to the default. if ks.SidecarDbName == "" { - ks.SidecarDbName = sidecardb.DefaultName + ks.SidecarDbName = sidecar.DefaultName getlockctx, cancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout) defer cancel() lockctx, unlock, lockErr := tm.TopoServer.LockKeyspace(getlockctx, tablet.Keyspace, "Setting sidecar database name") @@ -513,7 +516,7 @@ func (tm *TabletManager) createKeyspaceShard(ctx context.Context) (*topo.ShardIn } } // Have the tablet use the sidecar database that's set for the keyspace. - sidecardb.SetName(ks.SidecarDbName) + sidecar.SetName(ks.SidecarDbName) return nil } if err := tm.withRetry(ctx, "setting sidecar database name", setSidecarDBName); err != nil { @@ -629,7 +632,7 @@ func (tm *TabletManager) checkPrimaryShip(ctx context.Context, si *topo.ShardInf // Update the primary term start time (current value is 0) because we // assume that we are actually the PRIMARY and in case of a tiebreak, // vtgate should prefer us. - tablet.PrimaryTermStartTime = logutil.TimeToProto(time.Now()) + tablet.PrimaryTermStartTime = protoutil.TimeToProto(time.Now()) }) case err == nil: if oldTablet.Type == topodatapb.TabletType_PRIMARY { @@ -764,6 +767,9 @@ func (tm *TabletManager) handleRestore(ctx context.Context) (bool, error) { if tm.Cnf == nil && restoreFromBackup { return false, fmt.Errorf("you cannot enable --restore_from_backup without a my.cnf file") } + if restoreToTimestampStr != "" && restoreToPos != "" { + return false, fmt.Errorf("--restore-to-timestamp and --restore-to-pos are mutually exclusive") + } // Restore in the background if restoreFromBackup { @@ -773,7 +779,6 @@ func (tm *TabletManager) handleRestore(ctx context.Context) (bool, error) { // Zero date will cause us to use the latest, which is the default backupTime := time.Time{} - // Or if a backup timestamp was specified then we use the last backup taken at or before that time if restoreFromBackupTsStr != "" { var err error @@ -783,9 +788,17 @@ func (tm *TabletManager) handleRestore(ctx context.Context) (bool, error) { } } + restoreToTimestamp := time.Time{} + if restoreToTimestampStr != "" { + var err error + restoreToTimestamp, err = mysqlctl.ParseRFC3339(restoreToTimestampStr) + if err != nil { + log.Exitf(fmt.Sprintf("RestoreFromBackup failed: unable to parse the --restore-to-timestamp value provided of '%s'. Error: %v", restoreToTimestampStr, err)) + } + } // restoreFromBackup will just be a regular action // (same as if it was triggered remotely) - if err := tm.RestoreData(ctx, logutil.NewConsoleLogger(), waitForBackupInterval, false /* deleteBeforeRestore */, backupTime); err != nil { + if err := tm.RestoreData(ctx, logutil.NewConsoleLogger(), waitForBackupInterval, false /* deleteBeforeRestore */, backupTime, restoreToTimestamp, restoreToPos); err != nil { log.Exitf("RestoreFromBackup failed: %v", err) } }() diff --git a/go/vt/vttablet/tabletmanager/tm_init_test.go b/go/vt/vttablet/tabletmanager/tm_init_test.go index ccaedf0c272..148042bd6b1 100644 --- a/go/vt/vttablet/tabletmanager/tm_init_test.go +++ b/go/vt/vttablet/tabletmanager/tm_init_test.go @@ -28,6 +28,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/fakesqldb" + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/dbconfigs" @@ -164,10 +165,11 @@ func TestStartCreateKeyspaceShard(t *testing.T) { defer func(saved time.Duration) { rebuildKeyspaceRetryInterval = saved }(rebuildKeyspaceRetryInterval) rebuildKeyspaceRetryInterval = 10 * time.Millisecond - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() statsTabletTypeCount.ResetAll() cell := "cell1" - ts := memorytopo.NewServer(cell) + ts := memorytopo.NewServer(ctx, cell) tm := newTestTM(t, ts, 1, "ks", "0") defer tm.Stop() @@ -178,7 +180,7 @@ func TestStartCreateKeyspaceShard(t *testing.T) { _, err := ts.GetShard(ctx, "ks", "0") require.NoError(t, err) - ensureSrvKeyspace(t, ts, cell, "ks") + ensureSrvKeyspace(t, ctx, ts, cell, "ks") srvVSchema, err := ts.GetSrvVSchema(context.Background(), cell) require.NoError(t, err) @@ -192,7 +194,7 @@ func TestStartCreateKeyspaceShard(t *testing.T) { defer tm.Stop() _, err = ts.GetShard(ctx, "ks1", "0") require.NoError(t, err) - ensureSrvKeyspace(t, ts, cell, "ks1") + ensureSrvKeyspace(t, ctx, ts, cell, "ks1") srvVSchema, err = ts.GetSrvVSchema(context.Background(), cell) require.NoError(t, err) assert.Equal(t, wantVSchema, srvVSchema.Keyspaces["ks1"]) @@ -241,16 +243,17 @@ func TestStartCreateKeyspaceShard(t *testing.T) { tm2 := newTestTM(t, ts, 6, "ks4", "80-") defer tm2.Stop() // Now that we've started the tablet for the other shard, srvKeyspace will succeed. - ensureSrvKeyspace(t, ts, cell, "ks4") + ensureSrvKeyspace(t, ctx, ts, cell, "ks4") } func TestCheckPrimaryShip(t *testing.T) { defer func(saved time.Duration) { rebuildKeyspaceRetryInterval = saved }(rebuildKeyspaceRetryInterval) rebuildKeyspaceRetryInterval = 10 * time.Millisecond - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() cell := "cell1" - ts := memorytopo.NewServer(cell) + ts := memorytopo.NewServer(ctx, cell) alias := &topodatapb.TabletAlias{ Cell: "cell1", Uid: 1, @@ -260,7 +263,7 @@ func TestCheckPrimaryShip(t *testing.T) { // This will create the respective topology records. tm := newTestTM(t, ts, 1, "ks", "0") tablet := tm.Tablet() - ensureSrvKeyspace(t, ts, cell, "ks") + ensureSrvKeyspace(t, ctx, ts, cell, "ks") ti, err := ts.GetTablet(ctx, alias) require.NoError(t, err) assert.Equal(t, topodatapb.TabletType_REPLICA, ti.Type) @@ -273,7 +276,7 @@ func TestCheckPrimaryShip(t *testing.T) { now := time.Now() _, err = ts.UpdateShardFields(ctx, "ks", "0", func(si *topo.ShardInfo) error { si.PrimaryAlias = alias - si.PrimaryTermStartTime = logutil.TimeToProto(now) + si.PrimaryTermStartTime = protoutil.TimeToProto(now) // Reassign to now for easier comparison. now = si.GetPrimaryTermStartTime() return nil @@ -346,7 +349,7 @@ func TestCheckPrimaryShip(t *testing.T) { require.NoError(t, err) _, err = ts.UpdateShardFields(ctx, "ks", "0", func(si *topo.ShardInfo) error { si.PrimaryAlias = otherAlias - si.PrimaryTermStartTime = logutil.TimeToProto(ter1.Add(-10 * time.Second)) + si.PrimaryTermStartTime = protoutil.TimeToProto(ter1.Add(-10 * time.Second)) return nil }) require.NoError(t, err) @@ -363,7 +366,7 @@ func TestCheckPrimaryShip(t *testing.T) { // timestamp, we remain replica. _, err = ts.UpdateShardFields(ctx, "ks", "0", func(si *topo.ShardInfo) error { si.PrimaryAlias = otherAlias - si.PrimaryTermStartTime = logutil.TimeToProto(ter4.Add(10 * time.Second)) + si.PrimaryTermStartTime = protoutil.TimeToProto(ter4.Add(10 * time.Second)) return nil }) require.NoError(t, err) @@ -388,9 +391,10 @@ func TestCheckPrimaryShip(t *testing.T) { } func TestStartCheckMysql(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() cell := "cell1" - ts := memorytopo.NewServer(cell) + ts := memorytopo.NewServer(ctx, cell) tablet := newTestTablet(t, 1, "ks", "0") cp := mysql.ConnParams{ Host: "foo", @@ -418,9 +422,10 @@ func TestStartFindMysqlPort(t *testing.T) { defer func(saved time.Duration) { mysqlPortRetryInterval = saved }(mysqlPortRetryInterval) mysqlPortRetryInterval = 50 * time.Millisecond - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() cell := "cell1" - ts := memorytopo.NewServer(cell) + ts := memorytopo.NewServer(ctx, cell) tablet := newTestTablet(t, 1, "ks", "0") fmd := newTestMysqlDaemon(t, -1) tm := &TabletManager{ @@ -460,9 +465,10 @@ func TestStartFindMysqlPort(t *testing.T) { // Init tablet fixes replication data when safe func TestStartFixesReplicationData(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() cell := "cell1" - ts := memorytopo.NewServer(cell, "cell2") + ts := memorytopo.NewServer(ctx, cell, "cell2") tm := newTestTM(t, ts, 1, "ks", "0") defer tm.Stop() tabletAlias := tm.tabletAlias @@ -493,8 +499,9 @@ func TestStartFixesReplicationData(t *testing.T) { // to be created due to a NodeExists error. During this particular error we were not doing // the sanity checks that the provided tablet was the same in the topo. func TestStartDoesNotUpdateReplicationDataForTabletInWrongShard(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1", "cell2") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1", "cell2") tm := newTestTM(t, ts, 1, "ks", "0") tm.Stop() @@ -516,9 +523,10 @@ func TestCheckTabletTypeResets(t *testing.T) { defer func(saved time.Duration) { rebuildKeyspaceRetryInterval = saved }(rebuildKeyspaceRetryInterval) rebuildKeyspaceRetryInterval = 10 * time.Millisecond - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() cell := "cell1" - ts := memorytopo.NewServer(cell) + ts := memorytopo.NewServer(ctx, cell) alias := &topodatapb.TabletAlias{ Cell: "cell1", Uid: 1, @@ -528,7 +536,7 @@ func TestCheckTabletTypeResets(t *testing.T) { // This will create the respective topology records. tm := newTestTM(t, ts, 1, "ks", "0") tablet := tm.Tablet() - ensureSrvKeyspace(t, ts, cell, "ks") + ensureSrvKeyspace(t, ctx, ts, cell, "ks") ti, err := ts.GetTablet(ctx, alias) require.NoError(t, err) assert.Equal(t, topodatapb.TabletType_REPLICA, ti.Type) @@ -555,7 +563,7 @@ func TestCheckTabletTypeResets(t *testing.T) { now := time.Now() _, err = ts.UpdateShardFields(ctx, "ks", "0", func(si *topo.ShardInfo) error { si.PrimaryAlias = alias - si.PrimaryTermStartTime = logutil.TimeToProto(now) + si.PrimaryTermStartTime = protoutil.TimeToProto(now) // Reassign to now for easier comparison. now = si.GetPrimaryTermStartTime() return nil @@ -711,11 +719,11 @@ func newTestTablet(t *testing.T, uid int, keyspace, shard string) *topodatapb.Ta } } -func ensureSrvKeyspace(t *testing.T, ts *topo.Server, cell, keyspace string) { +func ensureSrvKeyspace(t *testing.T, ctx context.Context, ts *topo.Server, cell, keyspace string) { t.Helper() found := false for i := 0; i < 10; i++ { - _, err := ts.GetSrvKeyspace(context.Background(), cell, "ks") + _, err := ts.GetSrvKeyspace(ctx, cell, keyspace) if err == nil { found = true break diff --git a/go/vt/vttablet/tabletmanager/tm_state.go b/go/vt/vttablet/tabletmanager/tm_state.go index d7124130174..df814ba5bee 100644 --- a/go/vt/vttablet/tabletmanager/tm_state.go +++ b/go/vt/vttablet/tabletmanager/tm_state.go @@ -27,10 +27,10 @@ import ( "github.com/spf13/pflag" "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/trace" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/topo" @@ -89,7 +89,7 @@ func newTMState(tm *TabletManager, tablet *topodatapb.Tablet) *tmState { return &tmState{ tm: tm, displayState: displayState{ - tablet: proto.Clone(tablet).(*topodatapb.Tablet), + tablet: tablet.CloneVT(), }, tablet: tablet, ctx: ctx, @@ -186,7 +186,7 @@ func (ts *tmState) ChangeTabletType(ctx context.Context, tabletType topodatapb.T log.Infof("Changing Tablet Type: %v for %s", tabletType, ts.tablet.Alias.String()) if tabletType == topodatapb.TabletType_PRIMARY { - PrimaryTermStartTime := logutil.TimeToProto(time.Now()) + PrimaryTermStartTime := protoutil.TimeToProto(time.Now()) // Update the tablet record first. _, err := topotools.ChangeType(ctx, ts.tm.TopoServer, ts.tm.tabletAlias, tabletType, PrimaryTermStartTime) @@ -264,7 +264,7 @@ func (ts *tmState) updateLocked(ctx context.Context) error { return nil } - terTime := logutil.ProtoToTime(ts.tablet.PrimaryTermStartTime) + ptsTime := protoutil.TimeFromProto(ts.tablet.PrimaryTermStartTime).UTC() // Disable TabletServer first so the nonserving state gets advertised // before other services are shutdown. @@ -277,7 +277,7 @@ func (ts *tmState) updateLocked(ctx context.Context) error { // always return error from 'SetServingType' and 'applyDenyList' to our client. It is up to them to handle it accordingly. // UpdateLock is called from 'ChangeTabletType', 'Open' and 'RefreshFromTopoInfo'. For 'Open' and 'RefreshFromTopoInfo' we don't need // to propagate error to client hence no changes there but we will propagate error from 'ChangeTabletType' to client. - if err := ts.tm.QueryServiceControl.SetServingType(ts.tablet.Type, terTime, false, reason); err != nil { + if err := ts.tm.QueryServiceControl.SetServingType(ts.tablet.Type, ptsTime, false, reason); err != nil { errStr := fmt.Sprintf("SetServingType(serving=false) failed: %v", err) log.Errorf(errStr) // No need to short circuit. Apply all steps and return error in the end. @@ -326,7 +326,7 @@ func (ts *tmState) updateLocked(ctx context.Context) error { // Open TabletServer last so that it advertises serving after all other services are up. if reason == "" { - if err := ts.tm.QueryServiceControl.SetServingType(ts.tablet.Type, terTime, true, ""); err != nil { + if err := ts.tm.QueryServiceControl.SetServingType(ts.tablet.Type, ptsTime, true, ""); err != nil { errStr := fmt.Sprintf("Cannot start query service: %v", err) log.Errorf(errStr) returnErr = vterrors.Wrapf(err, errStr) @@ -459,15 +459,14 @@ type displayState struct { func (ts *tmState) publishForDisplay() { ts.displayState.mu.Lock() defer ts.displayState.mu.Unlock() - - ts.displayState.tablet = proto.Clone(ts.tablet).(*topodatapb.Tablet) + ts.displayState.tablet = ts.tablet.CloneVT() ts.displayState.deniedTables = ts.deniedTables[ts.tablet.Type] } func (ts *tmState) Tablet() *topodatapb.Tablet { ts.displayState.mu.Lock() defer ts.displayState.mu.Unlock() - return proto.Clone(ts.displayState.tablet).(*topodatapb.Tablet) + return ts.displayState.tablet.CloneVT() } func (ts *tmState) DeniedTables() []string { diff --git a/go/vt/vttablet/tabletmanager/tm_state_test.go b/go/vt/vttablet/tabletmanager/tm_state_test.go index 537580d4853..8bd98edefff 100644 --- a/go/vt/vttablet/tabletmanager/tm_state_test.go +++ b/go/vt/vttablet/tabletmanager/tm_state_test.go @@ -42,7 +42,10 @@ import ( ) func TestStateOpenClose(t *testing.T) { - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, "cell1") tm := newTestTM(t, ts, 1, "ks", "0") // Re-Open should be a no-op @@ -63,8 +66,9 @@ func TestStateOpenClose(t *testing.T) { } func TestStateRefreshFromTopo(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") tm := newTestTM(t, ts, 1, "ks", "0") defer tm.Stop() @@ -73,8 +77,9 @@ func TestStateRefreshFromTopo(t *testing.T) { } func TestStateResharding(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") tm := newTestTM(t, ts, 1, "ks", "0") defer tm.Stop() @@ -100,8 +105,9 @@ func TestStateResharding(t *testing.T) { } func TestStateDenyList(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") tm := newTestTM(t, ts, 1, "ks", "0") defer tm.Stop() @@ -131,8 +137,9 @@ func TestStateDenyList(t *testing.T) { } func TestStateTabletControls(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") tm := newTestTM(t, ts, 1, "ks", "0") defer tm.Stop() @@ -159,8 +166,9 @@ func TestStateTabletControls(t *testing.T) { } func TestStateIsShardServingisInSrvKeyspace(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") tm := newTestTM(t, ts, 1, "ks", "0") defer tm.Stop() @@ -330,8 +338,9 @@ func TestStateIsShardServingisInSrvKeyspace(t *testing.T) { } func TestStateNonServing(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") tm := newTestTM(t, ts, 1, "ks", "0") defer tm.Stop() @@ -346,8 +355,9 @@ func TestStateNonServing(t *testing.T) { } func TestStateChangeTabletType(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") statsTabletTypeCount.ResetAll() tm := newTestTM(t, ts, 2, "ks", "0") defer tm.Stop() @@ -387,8 +397,9 @@ func TestStateChangeTabletType(t *testing.T) { the new table type */ func TestStateChangeTabletTypeWithFailure(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") statsTabletTypeCount.ResetAll() // create TM with replica and put a hook to return error during SetServingType tm := newTestTM(t, ts, 2, "ks", "0") @@ -472,7 +483,7 @@ func TestChangeTypeErrorWhileWritingToTopo(t *testing.T) { factory := faketopo.NewFakeTopoFactory() // add cell1 to the factory. This returns a fake connection which we will use to set the get and update errors as we require. fakeConn := factory.AddCell("cell1") - ts := faketopo.NewFakeTopoServer(factory) + ts := faketopo.NewFakeTopoServer(context.TODO(), factory) statsTabletTypeCount.ResetAll() tm := newTestTM(t, ts, 2, "ks", "0") defer tm.Stop() @@ -519,8 +530,9 @@ func TestPublishStateNew(t *testing.T) { // we can't do using memorytopo, but we do test the retry // code path. - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") tm := newTestTM(t, ts, 42, "ks", "0") ttablet, err := tm.TopoServer.GetTablet(ctx, tm.tabletAlias) require.NoError(t, err) @@ -565,8 +577,9 @@ func TestPublishStateNew(t *testing.T) { } func TestPublishDeleted(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") tm := newTestTM(t, ts, 2, "ks", "0") defer tm.Stop() diff --git a/go/vt/vttablet/tabletmanager/vdiff/action.go b/go/vt/vttablet/tabletmanager/vdiff/action.go index 7a18015fc24..59ee79077f7 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/action.go +++ b/go/vt/vttablet/tabletmanager/vdiff/action.go @@ -20,15 +20,19 @@ import ( "context" "encoding/json" "fmt" + "sort" + "strings" "github.com/google/uuid" - - "vitess.io/vitess/go/vt/topo/topoproto" - "vitess.io/vitess/go/vt/vterrors" + "google.golang.org/protobuf/encoding/protojson" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vterrors" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) @@ -43,11 +47,20 @@ const ( DeleteAction VDiffAction = "delete" AllActionArg = "all" LastActionArg = "last" + + maxVDiffsToReport = 100 ) var ( Actions = []VDiffAction{CreateAction, ShowAction, StopAction, ResumeAction, DeleteAction} ActionArgs = []string{AllActionArg, LastActionArg} + + // The real zero value has nested nil pointers. + optionsZeroVal = &tabletmanagerdatapb.VDiffOptions{ + PickerOptions: &tabletmanagerdatapb.VDiffPickerOptions{}, + CoreOptions: &tabletmanagerdatapb.VDiffCoreOptions{}, + ReportOptions: &tabletmanagerdatapb.VDiffReportOptions{}, + } ) func (vde *Engine) PerformVDiffAction(ctx context.Context, req *tabletmanagerdatapb.VDiffRequest) (*tabletmanagerdatapb.VDiffResponse, error) { @@ -62,7 +75,7 @@ func (vde *Engine) PerformVDiffAction(ctx context.Context, req *tabletmanagerdat Id: 0, Output: nil, } - // We use the db_filtered user for vreplication related work + // We use the db_filtered user for vreplication related work. dbClient := vde.dbClientFactoryFiltered() if err := dbClient.Connect(); err != nil { return nil, err @@ -97,7 +110,10 @@ func (vde *Engine) getVDiffSummary(vdiffID int64, dbClient binlogplayer.DBClient var qr *sqltypes.Result var err error - query := fmt.Sprintf(sqlVDiffSummary, vdiffID) + query, err := sqlparser.ParseAndBind(sqlVDiffSummary, sqltypes.Int64BindVariable(vdiffID)) + if err != nil { + return nil, err + } if qr, err = dbClient.ExecuteFetch(query, -1); err != nil { return nil, err } @@ -108,6 +124,9 @@ func (vde *Engine) getVDiffSummary(vdiffID int64, dbClient binlogplayer.DBClient // Validate vdiff options. Also setup defaults where applicable. func (vde *Engine) fixupOptions(options *tabletmanagerdatapb.VDiffOptions) (*tabletmanagerdatapb.VDiffOptions, error) { // Assign defaults to sourceCell and targetCell if not specified. + if options == nil { + options = optionsZeroVal + } sourceCell := options.PickerOptions.SourceCell targetCell := options.PickerOptions.TargetCell var defaultCell string @@ -118,10 +137,10 @@ func (vde *Engine) fixupOptions(options *tabletmanagerdatapb.VDiffOptions) (*tab return nil, err } } - if sourceCell == "" { + if sourceCell == "" { // Default is all cells sourceCell = defaultCell } - if targetCell == "" { + if targetCell == "" { // Default is all cells targetCell = defaultCell } options.PickerOptions.SourceCell = sourceCell @@ -130,6 +149,8 @@ func (vde *Engine) fixupOptions(options *tabletmanagerdatapb.VDiffOptions) (*tab return options, nil } +// getDefaultCell returns all of the cells in the topo as a comma +// separated string as the default value is all available cells. func (vde *Engine) getDefaultCell() (string, error) { cells, err := vde.ts.GetCellInfoNames(vde.ctx) if err != nil { @@ -139,15 +160,18 @@ func (vde *Engine) getDefaultCell() (string, error) { // Unreachable return "", fmt.Errorf("there are no cells in the topo") } - return cells[0], nil + sort.Strings(cells) // Ensure that the resulting value is deterministic + return strings.Join(cells, ","), nil } func (vde *Engine) handleCreateResumeAction(ctx context.Context, dbClient binlogplayer.DBClient, action VDiffAction, req *tabletmanagerdatapb.VDiffRequest, resp *tabletmanagerdatapb.VDiffResponse) error { var qr *sqltypes.Result - var err error options := req.Options - query := fmt.Sprintf(sqlGetVDiffID, encodeString(req.VdiffUuid)) + query, err := sqlparser.ParseAndBind(sqlGetVDiffID, sqltypes.StringBindVariable(req.VdiffUuid)) + if err != nil { + return err + } if qr, err = dbClient.ExecuteFetch(query, 1); err != nil { return err } @@ -165,17 +189,28 @@ func (vde *Engine) handleCreateResumeAction(ctx context.Context, dbClient binlog vde.thisTablet.Alias, err) } } - if options, err = vde.fixupOptions(options); err != nil { - return err - } - optionsJSON, err := json.Marshal(options) - if err != nil { - return err - } if action == CreateAction { - query := fmt.Sprintf(sqlNewVDiff, - encodeString(req.Keyspace), encodeString(req.Workflow), "pending", encodeString(string(optionsJSON)), - vde.thisTablet.Shard, topoproto.TabletDbName(vde.thisTablet), req.VdiffUuid) + // Use the options specified via the vdiff create client + // command, which we'll then store in the vdiff record. + if options, err = vde.fixupOptions(options); err != nil { + return err + } + optionsJSON, err := json.Marshal(options) + if err != nil { + return err + } + query, err := sqlparser.ParseAndBind(sqlNewVDiff, + sqltypes.StringBindVariable(req.Keyspace), + sqltypes.StringBindVariable(req.Workflow), + sqltypes.StringBindVariable("pending"), + sqltypes.StringBindVariable(string(optionsJSON)), + sqltypes.StringBindVariable(vde.thisTablet.Shard), + sqltypes.StringBindVariable(topoproto.TabletDbName(vde.thisTablet)), + sqltypes.StringBindVariable(req.VdiffUuid), + ) + if err != nil { + return err + } if qr, err = dbClient.ExecuteFetch(query, 1); err != nil { return err } @@ -185,7 +220,12 @@ func (vde *Engine) handleCreateResumeAction(ctx context.Context, dbClient binlog } resp.Id = int64(qr.InsertID) } else { - query := fmt.Sprintf(sqlResumeVDiff, encodeString(string(optionsJSON)), encodeString(req.VdiffUuid)) + query, err := sqlparser.ParseAndBind(sqlResumeVDiff, + sqltypes.StringBindVariable(req.VdiffUuid), + ) + if err != nil { + return err + } if qr, err = dbClient.ExecuteFetch(query, 1); err != nil { return err } @@ -204,9 +244,23 @@ func (vde *Engine) handleCreateResumeAction(ctx context.Context, dbClient binlog if err != nil { return err } + vdiffRecord := qr.Named().Row() + if vdiffRecord == nil { + return fmt.Errorf("unable to %s vdiff for UUID %s as it was not found on tablet %v (%w)", + action, req.VdiffUuid, vde.thisTablet.Alias, err) + } + if action == ResumeAction { + // Use the existing options from the vdiff record. + options = optionsZeroVal + err = protojson.Unmarshal(vdiffRecord.AsBytes("options", []byte("{}")), options) + if err != nil { + return err + } + } + vde.mu.Lock() defer vde.mu.Unlock() - if err := vde.addController(qr.Named().Row(), options); err != nil { + if err := vde.addController(vdiffRecord, options); err != nil { return err } @@ -215,11 +269,17 @@ func (vde *Engine) handleCreateResumeAction(ctx context.Context, dbClient binlog func (vde *Engine) handleShowAction(ctx context.Context, dbClient binlogplayer.DBClient, action VDiffAction, req *tabletmanagerdatapb.VDiffRequest, resp *tabletmanagerdatapb.VDiffResponse) error { var qr *sqltypes.Result - var err error vdiffUUID := "" if req.ActionArg == LastActionArg { - query := fmt.Sprintf(sqlGetMostRecentVDiff, encodeString(req.Keyspace), encodeString(req.Workflow)) + query, err := sqlparser.ParseAndBind(sqlGetMostRecentVDiffByKeyspaceWorkflow, + sqltypes.StringBindVariable(req.Keyspace), + sqltypes.StringBindVariable(req.Workflow), + sqltypes.Int64BindVariable(1), + ) + if err != nil { + return err + } if qr, err = dbClient.ExecuteFetch(query, 1); err != nil { return err } @@ -234,7 +294,14 @@ func (vde *Engine) handleShowAction(ctx context.Context, dbClient binlogplayer.D } if vdiffUUID != "" { resp.VdiffUuid = vdiffUUID - query := fmt.Sprintf(sqlGetVDiffByKeyspaceWorkflowUUID, encodeString(req.Keyspace), encodeString(req.Workflow), encodeString(vdiffUUID)) + query, err := sqlparser.ParseAndBind(sqlGetVDiffByKeyspaceWorkflowUUID, + sqltypes.StringBindVariable(req.Keyspace), + sqltypes.StringBindVariable(req.Workflow), + sqltypes.StringBindVariable(vdiffUUID), + ) + if err != nil { + return err + } if qr, err = dbClient.ExecuteFetch(query, 1); err != nil { return err } @@ -257,7 +324,15 @@ func (vde *Engine) handleShowAction(ctx context.Context, dbClient binlogplayer.D } switch req.ActionArg { case AllActionArg: - if qr, err = dbClient.ExecuteFetch(sqlGetAllVDiffs, -1); err != nil { + query, err := sqlparser.ParseAndBind(sqlGetMostRecentVDiffByKeyspaceWorkflow, + sqltypes.StringBindVariable(req.Keyspace), + sqltypes.StringBindVariable(req.Workflow), + sqltypes.Int64BindVariable(maxVDiffsToReport), + ) + if err != nil { + return err + } + if qr, err = dbClient.ExecuteFetch(query, -1); err != nil { return err } resp.Output = sqltypes.ResultToProto3(qr) @@ -278,7 +353,7 @@ func (vde *Engine) handleStopAction(ctx context.Context, dbClient binlogplayer.D if controller.uuid == req.VdiffUuid { controller.Stop() if err := controller.markStoppedByRequest(); err != nil { - return err + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "encountered an error marking vdiff %s as stopped: %v", controller.uuid, err) } break } @@ -287,20 +362,74 @@ func (vde *Engine) handleStopAction(ctx context.Context, dbClient binlogplayer.D } func (vde *Engine) handleDeleteAction(ctx context.Context, dbClient binlogplayer.DBClient, action VDiffAction, req *tabletmanagerdatapb.VDiffRequest, resp *tabletmanagerdatapb.VDiffResponse) error { - var err error - query := "" + vde.mu.Lock() + defer vde.mu.Unlock() + var deleteQuery string + cleanupController := func(controller *controller) { + if controller == nil { + return + } + controller.Stop() + delete(vde.controllers, controller.id) + } switch req.ActionArg { case AllActionArg: - query = fmt.Sprintf(sqlDeleteVDiffs, encodeString(req.Keyspace), encodeString(req.Workflow)) + // We need to stop any running controllers before we delete + // the vdiff records. + query, err := sqlparser.ParseAndBind(sqlGetVDiffIDsByKeyspaceWorkflow, + sqltypes.StringBindVariable(req.Keyspace), + sqltypes.StringBindVariable(req.Workflow), + ) + if err != nil { + return err + } + res, err := dbClient.ExecuteFetch(query, -1) + if err != nil { + return err + } + for _, row := range res.Named().Rows { + cleanupController(vde.controllers[row.AsInt64("id", -1)]) + } + deleteQuery, err = sqlparser.ParseAndBind(sqlDeleteVDiffs, + sqltypes.StringBindVariable(req.Keyspace), + sqltypes.StringBindVariable(req.Workflow), + ) + if err != nil { + return err + } default: uuid, err := uuid.Parse(req.ActionArg) if err != nil { return fmt.Errorf("action argument %s not supported", req.ActionArg) } - query = fmt.Sprintf(sqlDeleteVDiffByUUID, encodeString(uuid.String())) + // We need to be sure that the controller is stopped, if + // it's still running, before we delete the vdiff record. + query, err := sqlparser.ParseAndBind(sqlGetVDiffID, + sqltypes.StringBindVariable(uuid.String()), + ) + if err != nil { + return err + } + res, err := dbClient.ExecuteFetch(query, 1) + if err != nil { + return err + } + row := res.Named().Row() // Must only be one + if row == nil { + return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "no vdiff found for UUID %s on tablet %v", + uuid, vde.thisTablet.Alias) + } + cleanupController(vde.controllers[row.AsInt64("id", -1)]) + deleteQuery, err = sqlparser.ParseAndBind(sqlDeleteVDiffByUUID, + sqltypes.StringBindVariable(uuid.String()), + ) + if err != nil { + return err + } } - if _, err = dbClient.ExecuteFetch(query, 1); err != nil { + // Execute the query which deletes the vdiff record(s). + if _, err := dbClient.ExecuteFetch(deleteQuery, 1); err != nil { return err } diff --git a/go/vt/vttablet/tabletmanager/vdiff/action_test.go b/go/vt/vttablet/tabletmanager/vdiff/action_test.go index b6ad3d65775..9bc729942dd 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/action_test.go +++ b/go/vt/vttablet/tabletmanager/vdiff/action_test.go @@ -25,10 +25,14 @@ import ( "github.com/google/uuid" + "github.com/stretchr/testify/require" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/vterrors" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/vterrors" ) func TestPerformVDiffAction(t *testing.T) { @@ -39,12 +43,19 @@ func TestPerformVDiffAction(t *testing.T) { keyspace := "ks" workflow := "wf" uuid := uuid.New().String() + type queryAndResult struct { + query string + result *sqltypes.Result // Optional if you need a non-empty result + } tests := []struct { - name string - vde *Engine - req *tabletmanagerdatapb.VDiffRequest + name string + vde *Engine + req *tabletmanagerdatapb.VDiffRequest + + preFunc func() error + postFunc func() error want *tabletmanagerdatapb.VDiffResponse - expectQueries []string + expectQueries []queryAndResult wantErr error }{ { @@ -52,15 +63,91 @@ func TestPerformVDiffAction(t *testing.T) { vde: &Engine{isOpen: false}, wantErr: vterrors.New(vtrpcpb.Code_UNAVAILABLE, "vdiff engine is closed"), }, + { + name: "create with defaults", + req: &tabletmanagerdatapb.VDiffRequest{ + Action: string(CreateAction), + VdiffUuid: uuid, + Options: &tabletmanagerdatapb.VDiffOptions{ + PickerOptions: &tabletmanagerdatapb.VDiffPickerOptions{}, + }, + }, + // Add a second cell. The default for source_cell and target_cell is all + // available cells, so this additional cell should then show up in the + // created vdiff record. + preFunc: func() error { + return tstenv.TopoServ.CreateCellInfo(ctx, "zone100_test", &topodatapb.CellInfo{}) + }, + expectQueries: []queryAndResult{ + { + query: fmt.Sprintf("select id as id from _vt.vdiff where vdiff_uuid = %s", encodeString(uuid)), + }, + { + query: fmt.Sprintf(`insert into _vt.vdiff(keyspace, workflow, state, options, shard, db_name, vdiff_uuid) values('', '', 'pending', '{\"picker_options\":{\"source_cell\":\"cell1,zone100_test\",\"target_cell\":\"cell1,zone100_test\"}}', '0', 'vt_vttest', %s)`, encodeString(uuid)), + }, + }, + postFunc: func() error { + return tstenv.TopoServ.DeleteCellInfo(ctx, "zone100_test", true) + }, + }, + { + name: "create with cell alias", + req: &tabletmanagerdatapb.VDiffRequest{ + Action: string(CreateAction), + VdiffUuid: uuid, + Options: &tabletmanagerdatapb.VDiffOptions{ + PickerOptions: &tabletmanagerdatapb.VDiffPickerOptions{ + SourceCell: "all", + TargetCell: "all", + }, + }, + }, + // Add a second cell and create an cell alias that contains it. + preFunc: func() error { + if err := tstenv.TopoServ.CreateCellInfo(ctx, "zone100_test", &topodatapb.CellInfo{}); err != nil { + return err + } + cells := append(tstenv.Cells, "zone100_test") + return tstenv.TopoServ.CreateCellsAlias(ctx, "all", &topodatapb.CellsAlias{ + Cells: cells, + }) + }, + expectQueries: []queryAndResult{ + { + query: fmt.Sprintf("select id as id from _vt.vdiff where vdiff_uuid = %s", encodeString(uuid)), + }, + { + query: fmt.Sprintf(`insert into _vt.vdiff(keyspace, workflow, state, options, shard, db_name, vdiff_uuid) values('', '', 'pending', '{\"picker_options\":{\"source_cell\":\"all\",\"target_cell\":\"all\"}}', '0', 'vt_vttest', %s)`, encodeString(uuid)), + }, + }, + postFunc: func() error { + if err := tstenv.TopoServ.DeleteCellInfo(ctx, "zone100_test", true); err != nil { + return err + } + return tstenv.TopoServ.DeleteCellsAlias(ctx, "all") + }, + }, { name: "delete by uuid", req: &tabletmanagerdatapb.VDiffRequest{ Action: string(DeleteAction), ActionArg: uuid, }, - expectQueries: []string{ - fmt.Sprintf(`delete from vd, vdt using _vt.vdiff as vd left join _vt.vdiff_table as vdt on (vd.id = vdt.vdiff_id) + expectQueries: []queryAndResult{ + { + query: fmt.Sprintf("select id as id from _vt.vdiff where vdiff_uuid = %s", encodeString(uuid)), + result: sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id", + "int64", + ), + "1", + ), + }, + { + query: fmt.Sprintf(`delete from vd, vdt using _vt.vdiff as vd left join _vt.vdiff_table as vdt on (vd.id = vdt.vdiff_id) where vd.vdiff_uuid = %s`, encodeString(uuid)), + }, }, }, { @@ -71,22 +158,75 @@ func TestPerformVDiffAction(t *testing.T) { Keyspace: keyspace, Workflow: workflow, }, - expectQueries: []string{ - fmt.Sprintf(`delete from vd, vdt, vdl using _vt.vdiff as vd left join _vt.vdiff_table as vdt on (vd.id = vdt.vdiff_id) + expectQueries: []queryAndResult{ + { + query: fmt.Sprintf("select id as id from _vt.vdiff where keyspace = %s and workflow = %s", encodeString(keyspace), encodeString(workflow)), + result: sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id", + "int64", + ), + "1", + "2", + ), + }, + { + query: fmt.Sprintf(`delete from vd, vdt, vdl using _vt.vdiff as vd left join _vt.vdiff_table as vdt on (vd.id = vdt.vdiff_id) left join _vt.vdiff_log as vdl on (vd.id = vdl.vdiff_id) where vd.keyspace = %s and vd.workflow = %s`, encodeString(keyspace), encodeString(workflow)), + }, + }, + }, + { + name: "show last", + req: &tabletmanagerdatapb.VDiffRequest{ + Action: string(ShowAction), + ActionArg: "last", + Keyspace: keyspace, + Workflow: workflow, + }, + expectQueries: []queryAndResult{ + { + query: fmt.Sprintf("select * from _vt.vdiff where keyspace = %s and workflow = %s order by id desc limit %d", + encodeString(keyspace), encodeString(workflow), 1), + result: noResults, + }, + }, + }, + { + name: "show all", + req: &tabletmanagerdatapb.VDiffRequest{ + Action: string(ShowAction), + ActionArg: "all", + Keyspace: keyspace, + Workflow: workflow, + }, + expectQueries: []queryAndResult{ + { + query: fmt.Sprintf("select * from _vt.vdiff where keyspace = %s and workflow = %s order by id desc limit %d", + encodeString(keyspace), encodeString(workflow), maxVDiffsToReport), + result: noResults, + }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + if tt.preFunc != nil { + err := tt.preFunc() + require.NoError(t, err, "pre function failed: %v", err) + } if tt.vde == nil { tt.vde = vdiffenv.vde } - for _, query := range tt.expectQueries { - vdiffenv.dbClient.ExpectRequest(query, &sqltypes.Result{}, nil) + for _, queryResult := range tt.expectQueries { + if queryResult.result == nil { + queryResult.result = &sqltypes.Result{} + } + vdiffenv.dbClient.ExpectRequest(queryResult.query, queryResult.result, nil) } got, err := tt.vde.PerformVDiffAction(ctx, tt.req) + vdiffenv.dbClient.Wait() if tt.wantErr != nil && !vterrors.Equals(err, tt.wantErr) { t.Errorf("Engine.PerformVDiffAction() error = %v, wantErr %v", err, tt.wantErr) return @@ -94,6 +234,13 @@ func TestPerformVDiffAction(t *testing.T) { if tt.want != nil && !reflect.DeepEqual(got, tt.want) { t.Errorf("Engine.PerformVDiffAction() = %v, want %v", got, tt.want) } + if tt.postFunc != nil { + err := tt.postFunc() + require.NoError(t, err, "post function failed: %v", err) + } + // No VDiffs should be running anymore. + require.Equal(t, 0, len(vdiffenv.vde.controllers), "expected no controllers to be running, but found %d", + len(vdiffenv.vde.controllers)) }) } } diff --git a/go/vt/vttablet/tabletmanager/vdiff/controller.go b/go/vt/vttablet/tabletmanager/vdiff/controller.go index 8edc4b333e4..22b1d3f5374 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/controller.go +++ b/go/vt/vttablet/tabletmanager/vdiff/controller.go @@ -23,19 +23,20 @@ import ( "strings" "time" - "vitess.io/vitess/go/vt/proto/tabletmanagerdata" - "vitess.io/vitess/go/vt/vterrors" - "google.golang.org/protobuf/encoding/prototext" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/log" - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tmclient" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) /* @@ -57,6 +58,7 @@ type controller struct { id int64 // id from row in _vt.vdiff uuid string workflow string + workflowType binlogdatapb.VReplicationWorkflowType cancel context.CancelFunc dbClientFactory func() binlogplayer.DBClient ts *topo.Server @@ -149,7 +151,7 @@ type migrationSource struct { *shardStreamer vrID int32 - position mysql.Position + position replication.Position } func (ct *controller) updateState(dbClient binlogplayer.DBClient, state VDiffState, err error) error { @@ -165,8 +167,13 @@ func (ct *controller) updateState(dbClient binlogplayer.DBClient, state VDiffSta // Clear out any previous error for the vdiff on this shard err = errors.New("") } - query := fmt.Sprintf(sqlUpdateVDiffState, encodeString(string(state)), encodeString(err.Error()), extraCols, ct.id) - if _, err := dbClient.ExecuteFetch(query, 1); err != nil { + query := sqlparser.BuildParsedQuery(sqlUpdateVDiffState, + encodeString(string(state)), + encodeString(err.Error()), + extraCols, + ct.id, + ) + if _, err := dbClient.ExecuteFetch(query.Query, 1); err != nil { return err } insertVDiffLog(ct.vde.ctx, dbClient, ct.id, fmt.Sprintf("State changed to: %s", state)) @@ -177,11 +184,14 @@ func (ct *controller) start(ctx context.Context, dbClient binlogplayer.DBClient) select { case <-ctx.Done(): return vterrors.Errorf(vtrpcpb.Code_CANCELED, "context has expired") + case <-ct.done: + return vterrors.Errorf(vtrpcpb.Code_CANCELED, "vdiff was stopped") default: } - ct.workflowFilter = fmt.Sprintf("where workflow = %s and db_name = %s", encodeString(ct.workflow), encodeString(ct.vde.dbName)) - query := fmt.Sprintf(sqlGetVReplicationEntry, ct.workflowFilter) - qr, err := dbClient.ExecuteFetch(query, -1) + ct.workflowFilter = fmt.Sprintf("where workflow = %s and db_name = %s", encodeString(ct.workflow), + encodeString(ct.vde.dbName)) + query := sqlparser.BuildParsedQuery(sqlGetVReplicationEntry, ct.workflowFilter) + qr, err := dbClient.ExecuteFetch(query.Query, -1) if err != nil { return err } @@ -190,6 +200,8 @@ func (ct *controller) start(ctx context.Context, dbClient binlogplayer.DBClient) select { case <-ctx.Done(): return vterrors.Errorf(vtrpcpb.Code_CANCELED, "context has expired") + case <-ct.done: + return vterrors.Errorf(vtrpcpb.Code_CANCELED, "vdiff was stopped") default: } source := newMigrationSource() @@ -216,6 +228,12 @@ func (ct *controller) start(ctx context.Context, dbClient binlogplayer.DBClient) ct.sourceKeyspace = bls.Keyspace ct.filter = bls.Filter } + + workflowType, err := row["workflow_type"].ToInt64() + if err != nil { + return err + } + ct.workflowType = binlogdatapb.VReplicationWorkflowType(workflowType) } if err := ct.validate(); err != nil { @@ -248,15 +266,17 @@ func (ct *controller) start(ctx context.Context, dbClient binlogplayer.DBClient) func (ct *controller) markStoppedByRequest() error { dbClient := ct.vde.dbClientFactoryFiltered() if err := dbClient.Connect(); err != nil { - return fmt.Errorf("encountered an error marking vdiff %s as stopped: %v", ct.uuid, err) + return err } defer dbClient.Close() - query := fmt.Sprintf(sqlUpdateVDiffStopped, ct.id) + query, err := sqlparser.ParseAndBind(sqlUpdateVDiffStopped, sqltypes.Int64BindVariable(ct.id)) + if err != nil { + return err + } var res *sqltypes.Result - var err error if res, err = dbClient.ExecuteFetch(query, 1); err != nil { - return fmt.Errorf("encountered an error marking vdiff %s as stopped: %v", ct.uuid, err) + return err } // We don't mark it as stopped if it's already completed if res.RowsAffected > 0 { @@ -306,9 +326,9 @@ func (ct *controller) saveErrorState(ctx context.Context, saveErr error) error { log.Warningf("Failed to persist vdiff error state: %v. Will retry in %s", err, retryDelay.String()) select { case <-ctx.Done(): - return fmt.Errorf("engine is shutting down") + return vterrors.Errorf(vtrpcpb.Code_CANCELED, "engine is shutting down") case <-ct.done: - return fmt.Errorf("vdiff was stopped") + return vterrors.Errorf(vtrpcpb.Code_CANCELED, "vdiff was stopped") case <-time.After(retryDelay): if retryDelay < maxRetryDelay { retryDelay = time.Duration(float64(retryDelay) * 1.5) diff --git a/go/vt/vttablet/tabletmanager/vdiff/engine.go b/go/vt/vttablet/tabletmanager/vdiff/engine.go index 92e4a2a4555..72098eb52be 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/engine.go +++ b/go/vt/vttablet/tabletmanager/vdiff/engine.go @@ -24,9 +24,10 @@ import ( "sync" "time" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/vt/proto/tabletmanagerdata" "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" "vitess.io/vitess/go/vt/vttablet/tmclient" @@ -297,7 +298,11 @@ func (vde *Engine) getVDiffsToRetry(ctx context.Context, dbClient binlogplayer.D } func (vde *Engine) getVDiffByID(ctx context.Context, dbClient binlogplayer.DBClient, id int64) (*sqltypes.Result, error) { - qr, err := dbClient.ExecuteFetch(fmt.Sprintf(sqlGetVDiffByID, id), -1) + query, err := sqlparser.ParseAndBind(sqlGetVDiffByID, sqltypes.Int64BindVariable(id)) + if err != nil { + return nil, err + } + qr, err := dbClient.ExecuteFetch(query, -1) if err != nil { return nil, err } @@ -330,8 +335,8 @@ func (vde *Engine) retryVDiffs(ctx context.Context) error { return ctx.Err() default: } - lastError := mysql.NewSQLErrorFromError(errors.New(row.AsString("last_error", ""))) - if !mysql.IsEphemeralError(lastError) { + lastError := sqlerror.NewSQLErrorFromError(errors.New(row.AsString("last_error", ""))) + if !sqlerror.IsEphemeralError(lastError) { continue } uuid := row.AsString("vdiff_uuid", "") @@ -340,7 +345,11 @@ func (vde *Engine) retryVDiffs(ctx context.Context) error { return err } log.Infof("Retrying vdiff %s that had an ephemeral error of '%v'", uuid, lastError) - if _, err = dbClient.ExecuteFetch(fmt.Sprintf(sqlRetryVDiff, id), 1); err != nil { + query, err := sqlparser.ParseAndBind(sqlRetryVDiff, sqltypes.Int64BindVariable(id)) + if err != nil { + return err + } + if _, err = dbClient.ExecuteFetch(query, 1); err != nil { return err } options := &tabletmanagerdata.VDiffOptions{} diff --git a/go/vt/vttablet/tabletmanager/vdiff/engine_test.go b/go/vt/vttablet/tabletmanager/vdiff/engine_test.go index 69c323d837a..f8ba581c06b 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/engine_test.go +++ b/go/vt/vttablet/tabletmanager/vdiff/engine_test.go @@ -19,13 +19,15 @@ package vdiff import ( "context" "fmt" + "strings" "testing" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" @@ -83,7 +85,7 @@ func TestEngineOpen(t *testing.T) { ), nil) // Now let's short circuit the vdiff as we know that the open has worked as expected. - shortCircuitTestAfterQuery("update _vt.vdiff set state = 'started', last_error = '' , started_at = utc_timestamp() where id = 1", vdiffenv.dbClient) + shortCircuitTestAfterQuery("update _vt.vdiff set state = 'started', last_error = left('', 1024) , started_at = utc_timestamp() where id = 1", vdiffenv.dbClient) vdenv.vde.Open(context.Background(), vdiffenv.vre) defer vdenv.vde.Close() @@ -106,8 +108,8 @@ func TestVDiff(t *testing.T) { MaxRows: 100, }, PickerOptions: &tabletmanagerdatapb.VDiffPickerOptions{ - SourceCell: tstenv.Cells[0], - TargetCell: tstenv.Cells[0], + SourceCell: strings.Join(tstenv.Cells, ","), + TargetCell: strings.Join(tstenv.Cells, ","), TabletTypes: "primary", }, ReportOptions: &tabletmanagerdatapb.VDiffReportOptions{ @@ -130,7 +132,7 @@ func TestVDiff(t *testing.T) { ), fmt.Sprintf("1|%s|%s|%s||9223372036854775807|9223372036854775807||PRIMARY,REPLICA|1669511347|0|Running||%s|200||1669511347|1|0||1", vdiffenv.workflow, vreplSource, vdiffSourceGtid, vdiffDBName), ), nil) - vdenv.dbClient.ExpectRequest("update _vt.vdiff set state = 'started', last_error = '' , started_at = utc_timestamp() where id = 1", singleRowAffected, nil) + vdenv.dbClient.ExpectRequest("update _vt.vdiff set state = 'started', last_error = left('', 1024) , started_at = utc_timestamp() where id = 1", singleRowAffected, nil) vdenv.dbClient.ExpectRequest("insert into _vt.vdiff_log(vdiff_id, message) values (1, 'State changed to: started')", singleRowAffected, nil) vdenv.dbClient.ExpectRequest(`select vdt.lastpk as lastpk, vdt.mismatch as mismatch, vdt.report as report from _vt.vdiff as vd inner join _vt.vdiff_table as vdt on (vd.id = vdt.vdiff_id) @@ -192,7 +194,7 @@ func TestVDiff(t *testing.T) { vdenv.dbClient.ExpectRequest("update _vt.vdiff_table set state = 'completed' where vdiff_id = 1 and table_name = 't1'", singleRowAffected, nil) vdenv.dbClient.ExpectRequest(`insert into _vt.vdiff_log(vdiff_id, message) values (1, 'completed: table \'t1\'')`, singleRowAffected, nil) vdenv.dbClient.ExpectRequest("select table_name as table_name from _vt.vdiff_table where vdiff_id = 1 and state != 'completed'", singleRowAffected, nil) - vdenv.dbClient.ExpectRequest("update _vt.vdiff set state = 'completed', last_error = '' , completed_at = utc_timestamp() where id = 1", singleRowAffected, nil) + vdenv.dbClient.ExpectRequest("update _vt.vdiff set state = 'completed', last_error = left('', 1024) , completed_at = utc_timestamp() where id = 1", singleRowAffected, nil) vdenv.dbClient.ExpectRequest("insert into _vt.vdiff_log(vdiff_id, message) values (1, 'State changed to: completed')", singleRowAffected, nil) vdenv.vde.mu.Lock() @@ -224,7 +226,7 @@ func TestEngineRetryErroredVDiffs(t *testing.T) { vdiffTestColTypes, ), fmt.Sprintf("1|%s|%s|%s|%s|%s|error|%s|%v", UUID, vdiffenv.workflow, tstenv.KeyspaceName, tstenv.ShardName, vdiffDBName, optionsJS, - mysql.NewSQLError(mysql.ERNoSuchTable, "42S02", "Table 'foo' doesn't exist")), + sqlerror.NewSQLError(sqlerror.ERNoSuchTable, "42S02", "Table 'foo' doesn't exist")), ), }, { @@ -234,7 +236,7 @@ func TestEngineRetryErroredVDiffs(t *testing.T) { vdiffTestColTypes, ), fmt.Sprintf("1|%s|%s|%s|%s|%s|error|%s|%v", UUID, vdiffenv.workflow, tstenv.KeyspaceName, tstenv.ShardName, vdiffDBName, optionsJS, - mysql.NewSQLError(mysql.ERLockWaitTimeout, "HY000", "Lock wait timeout exceeded; try restarting transaction")), + sqlerror.NewSQLError(sqlerror.ERLockWaitTimeout, "HY000", "Lock wait timeout exceeded; try restarting transaction")), ), expectRetry: true, }, @@ -269,7 +271,7 @@ func TestEngineRetryErroredVDiffs(t *testing.T) { ), nil) // At this point we know that we kicked off the expected retry so we can short circit the vdiff. - shortCircuitTestAfterQuery(fmt.Sprintf("update _vt.vdiff set state = 'started', last_error = '' , started_at = utc_timestamp() where id = %s", id), vdiffenv.dbClient) + shortCircuitTestAfterQuery(fmt.Sprintf("update _vt.vdiff set state = 'started', last_error = left('', 1024) , started_at = utc_timestamp() where id = %s", id), vdiffenv.dbClient) expectedControllerCnt++ } diff --git a/go/vt/vttablet/tabletmanager/vdiff/framework_test.go b/go/vt/vttablet/tabletmanager/vdiff/framework_test.go index bbda63340de..d5e8c134814 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/framework_test.go +++ b/go/vt/vttablet/tabletmanager/vdiff/framework_test.go @@ -174,7 +174,9 @@ func init() { func TestMain(m *testing.M) { exitCode := func() int { var err error - tstenv, err = testenv.Init() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tstenv, err = testenv.Init(ctx) if err != nil { fmt.Fprintf(os.Stderr, "%v", err) return 1 @@ -195,7 +197,7 @@ func resetBinlogClient() { // has verified the necessary behavior. func shortCircuitTestAfterQuery(query string, dbClient *binlogplayer.MockDBClient) { dbClient.ExpectRequest(query, singleRowAffected, fmt.Errorf("Short circuiting test")) - dbClient.ExpectRequest("update _vt.vdiff set state = 'error', last_error = 'Short circuiting test' where id = 1", singleRowAffected, nil) + dbClient.ExpectRequest("update _vt.vdiff set state = 'error', last_error = left('Short circuiting test', 1024) where id = 1", singleRowAffected, nil) dbClient.ExpectRequest("insert into _vt.vdiff_log(vdiff_id, message) values (1, 'State changed to: error')", singleRowAffected, nil) dbClient.ExpectRequest("insert into _vt.vdiff_log(vdiff_id, message) values (1, 'Error: Short circuiting test')", singleRowAffected, nil) } @@ -478,6 +480,10 @@ func (tmc *fakeTMClient) PrimaryPosition(ctx context.Context, tablet *topodatapb return pos, nil } +func (tmc *fakeTMClient) CheckThrottler(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.CheckThrottlerRequest) (*tabletmanagerdatapb.CheckThrottlerResponse, error) { + return &tabletmanagerdatapb.CheckThrottlerResponse{}, nil +} + // ---------------------------------------------- // testVDiffEnv diff --git a/go/vt/vttablet/tabletmanager/vdiff/primitive_executor.go b/go/vt/vttablet/tabletmanager/vdiff/primitive_executor.go index f2be4bae995..32f93858ec1 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/primitive_executor.go +++ b/go/vt/vttablet/tabletmanager/vdiff/primitive_executor.go @@ -67,7 +67,7 @@ func newPrimitiveExecutor(ctx context.Context, prim vtgateEngine.Primitive, name select { case pe.resultch <- qr: case <-ctx.Done(): - return vterrors.Wrap(ctx.Err(), "Outer Stream") + return vterrors.Wrap(ctx.Err(), "LHS Stream") } return nil }) diff --git a/go/vt/vttablet/tabletmanager/vdiff/schema.go b/go/vt/vttablet/tabletmanager/vdiff/schema.go index f82f03106b4..a63e60d9434 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/schema.go +++ b/go/vt/vttablet/tabletmanager/vdiff/schema.go @@ -18,48 +18,49 @@ package vdiff const ( sqlAnalyzeTable = "analyze table `%s`.`%s`" - sqlNewVDiff = "insert into _vt.vdiff(keyspace, workflow, state, options, shard, db_name, vdiff_uuid) values(%s, %s, '%s', %s, '%s', '%s', '%s')" - sqlResumeVDiff = `update _vt.vdiff as vd, _vt.vdiff_table as vdt set vd.options = %s, vd.started_at = NULL, vd.completed_at = NULL, vd.state = 'pending', - vdt.state = 'pending' where vd.vdiff_uuid = %s and vd.id = vdt.vdiff_id and vd.state in ('completed', 'stopped') + sqlNewVDiff = "insert into _vt.vdiff(keyspace, workflow, state, options, shard, db_name, vdiff_uuid) values(%a, %a, %a, %a, %a, %a, %a)" + sqlResumeVDiff = `update _vt.vdiff as vd, _vt.vdiff_table as vdt set vd.started_at = NULL, vd.completed_at = NULL, vd.state = 'pending', + vdt.state = 'pending' where vd.vdiff_uuid = %a and vd.id = vdt.vdiff_id and vd.state in ('completed', 'stopped') and vdt.state in ('completed', 'stopped')` sqlRetryVDiff = `update _vt.vdiff as vd left join _vt.vdiff_table as vdt on (vd.id = vdt.vdiff_id) set vd.state = 'pending', - vd.last_error = '', vdt.state = 'pending' where vd.id = %d and (vd.state = 'error' or vdt.state = 'error')` - sqlGetVDiffByKeyspaceWorkflowUUID = "select * from _vt.vdiff where keyspace = %s and workflow = %s and vdiff_uuid = %s" - sqlGetMostRecentVDiff = "select * from _vt.vdiff where keyspace = %s and workflow = %s order by id desc limit 1" - sqlGetVDiffByID = "select * from _vt.vdiff where id = %d" - sqlDeleteVDiffs = `delete from vd, vdt, vdl using _vt.vdiff as vd left join _vt.vdiff_table as vdt on (vd.id = vdt.vdiff_id) + vd.last_error = '', vdt.state = 'pending' where vd.id = %a and (vd.state = 'error' or vdt.state = 'error')` + sqlGetVDiffByKeyspaceWorkflowUUID = "select * from _vt.vdiff where keyspace = %a and workflow = %a and vdiff_uuid = %a" + sqlGetMostRecentVDiffByKeyspaceWorkflow = "select * from _vt.vdiff where keyspace = %a and workflow = %a order by id desc limit %a" + sqlGetVDiffByID = "select * from _vt.vdiff where id = %a" + sqlDeleteVDiffs = `delete from vd, vdt, vdl using _vt.vdiff as vd left join _vt.vdiff_table as vdt on (vd.id = vdt.vdiff_id) left join _vt.vdiff_log as vdl on (vd.id = vdl.vdiff_id) - where vd.keyspace = %s and vd.workflow = %s` + where vd.keyspace = %a and vd.workflow = %a` sqlDeleteVDiffByUUID = `delete from vd, vdt using _vt.vdiff as vd left join _vt.vdiff_table as vdt on (vd.id = vdt.vdiff_id) - where vd.vdiff_uuid = %s` + where vd.vdiff_uuid = %a` sqlVDiffSummary = `select vd.state as vdiff_state, vd.last_error as last_error, vdt.table_name as table_name, vd.vdiff_uuid as 'uuid', vdt.state as table_state, vdt.table_rows as table_rows, vd.started_at as started_at, vdt.rows_compared as rows_compared, vd.completed_at as completed_at, IF(vdt.mismatch = 1, 1, 0) as has_mismatch, vdt.report as report from _vt.vdiff as vd left join _vt.vdiff_table as vdt on (vd.id = vdt.vdiff_id) - where vd.id = %d` - // sqlUpdateVDiffState has a penultimate placeholder for any additional columns you want to update, e.g. `, foo = 1` - sqlUpdateVDiffState = "update _vt.vdiff set state = %s, last_error = %s %s where id = %d" + where vd.id = %a` + // sqlUpdateVDiffState has a penultimate placeholder for any additional columns you want to update, e.g. `, foo = 1`. + // It also truncates the error if needed to ensure that we can save the state when the error text is very long. + sqlUpdateVDiffState = "update _vt.vdiff set state = %s, last_error = left(%s, 1024) %s where id = %d" sqlUpdateVDiffStopped = `update _vt.vdiff as vd, _vt.vdiff_table as vdt set vd.state = 'stopped', vdt.state = 'stopped', vd.last_error = '' - where vd.id = vdt.vdiff_id and vd.id = %d and vd.state != 'completed'` - sqlGetVReplicationEntry = "select * from _vt.vreplication %s" - sqlGetVDiffsToRun = "select * from _vt.vdiff where state in ('started','pending')" // what VDiffs have not been stopped or completed - sqlGetVDiffsToRetry = "select * from _vt.vdiff where state = 'error' and json_unquote(json_extract(options, '$.core_options.auto_retry')) = 'true'" - sqlGetVDiffID = "select id as id from _vt.vdiff where vdiff_uuid = %s" - sqlGetAllVDiffs = "select * from _vt.vdiff order by id desc" - sqlGetTableRows = "select table_rows as table_rows from INFORMATION_SCHEMA.TABLES where table_schema = %a and table_name = %a" - sqlGetAllTableRows = "select table_name as table_name, table_rows as table_rows from INFORMATION_SCHEMA.TABLES where table_schema = %s and table_name in (%s)" + where vd.id = vdt.vdiff_id and vd.id = %a and vd.state != 'completed'` + sqlGetVReplicationEntry = "select * from _vt.vreplication %s" + sqlGetVDiffsToRun = "select * from _vt.vdiff where state in ('started','pending')" // what VDiffs have not been stopped or completed + sqlGetVDiffsToRetry = "select * from _vt.vdiff where state = 'error' and json_unquote(json_extract(options, '$.core_options.auto_retry')) = 'true'" + sqlGetVDiffID = "select id as id from _vt.vdiff where vdiff_uuid = %a" + sqlGetVDiffIDsByKeyspaceWorkflow = "select id as id from _vt.vdiff where keyspace = %a and workflow = %a" + sqlGetTableRows = "select table_rows as table_rows from INFORMATION_SCHEMA.TABLES where table_schema = %a and table_name = %a" + sqlGetAllTableRows = "select table_name as table_name, table_rows as table_rows from INFORMATION_SCHEMA.TABLES where table_schema = %s and table_name in (%s)" - sqlNewVDiffTable = "insert into _vt.vdiff_table(vdiff_id, table_name, state, table_rows) values(%d, %s, 'pending', %d)" + sqlNewVDiffTable = "insert into _vt.vdiff_table(vdiff_id, table_name, state, table_rows) values(%a, %a, 'pending', %a)" sqlGetVDiffTable = `select vdt.lastpk as lastpk, vdt.mismatch as mismatch, vdt.report as report from _vt.vdiff as vd inner join _vt.vdiff_table as vdt on (vd.id = vdt.vdiff_id) - where vdt.vdiff_id = %d and vdt.table_name = %s` + where vdt.vdiff_id = %a and vdt.table_name = %a` sqlUpdateTableRows = "update _vt.vdiff_table set table_rows = %a where vdiff_id = %a and table_name = %a" - sqlUpdateTableProgress = "update _vt.vdiff_table set rows_compared = %d, lastpk = %s, report = %s where vdiff_id = %d and table_name = %s" - sqlUpdateTableNoProgress = "update _vt.vdiff_table set rows_compared = %d, report = %s where vdiff_id = %d and table_name = %s" - sqlUpdateTableState = "update _vt.vdiff_table set state = %s where vdiff_id = %d and table_name = %s" - sqlUpdateTableStateAndReport = "update _vt.vdiff_table set state = %s, rows_compared = %d, report = %s where vdiff_id = %d and table_name = %s" - sqlUpdateTableMismatch = "update _vt.vdiff_table set mismatch = true where vdiff_id = %d and table_name = %s" + sqlUpdateTableProgress = "update _vt.vdiff_table set rows_compared = %a, lastpk = %a, report = %a where vdiff_id = %a and table_name = %a" + sqlUpdateTableNoProgress = "update _vt.vdiff_table set rows_compared = %a, report = %a where vdiff_id = %a and table_name = %a" + sqlUpdateTableState = "update _vt.vdiff_table set state = %a where vdiff_id = %a and table_name = %a" + sqlUpdateTableStateAndReport = "update _vt.vdiff_table set state = %a, rows_compared = %a, report = %a where vdiff_id = %a and table_name = %a" + sqlUpdateTableMismatch = "update _vt.vdiff_table set mismatch = true where vdiff_id = %a and table_name = %a" - sqlGetIncompleteTables = "select table_name as table_name from _vt.vdiff_table where vdiff_id = %d and state != 'completed'" + sqlGetIncompleteTables = "select table_name as table_name from _vt.vdiff_table where vdiff_id = %a and state != 'completed'" ) diff --git a/go/vt/vttablet/tabletmanager/vdiff/table_differ.go b/go/vt/vttablet/tabletmanager/vdiff/table_differ.go index 0f650e3eaa1..c0cba599bdd 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/table_differ.go +++ b/go/vt/vttablet/tabletmanager/vdiff/table_differ.go @@ -20,32 +20,33 @@ import ( "context" "encoding/json" "fmt" + "strings" "sync" "time" - "vitess.io/vitess/go/vt/proto/topodata" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/topo" - "google.golang.org/protobuf/encoding/prototext" - "google.golang.org/protobuf/proto" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/log" - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - querypb "vitess.io/vitess/go/vt/proto/query" - tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vttablet/tabletconn" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) // how long to wait for background operations to complete @@ -53,9 +54,9 @@ var BackgroundOperationTimeout = topo.RemoteOperationTimeout * 4 // compareColInfo contains the metadata for a column of the table being diffed type compareColInfo struct { - colIndex int // index of the column in the filter's select - collation collations.Collation // is the collation of the column, if any - isPK bool // is this column part of the primary key + colIndex int // index of the column in the filter's select + collation collations.ID // is the collation of the column, if any + isPK bool // is this column part of the primary key colName string } @@ -72,6 +73,12 @@ type tableDiffer struct { sourceQuery string table *tabletmanagerdatapb.TableDefinition lastPK *querypb.QueryResult + + // wgShardStreamers is used, with a cancellable context, to wait for all shard streamers + // to finish after each diff is complete. + wgShardStreamers sync.WaitGroup + shardStreamsCtx context.Context + shardStreamsCancel context.CancelFunc } func newTableDiffer(wd *workflowDiffer, table *tabletmanagerdatapb.TableDefinition, sourceQuery string) *tableDiffer { @@ -121,19 +128,21 @@ func (td *tableDiffer) initialize(ctx context.Context) error { } }() - if err := td.selectTablets(ctx, td.wd.opts.PickerOptions.SourceCell, td.wd.opts.PickerOptions.TabletTypes); err != nil { + td.shardStreamsCtx, td.shardStreamsCancel = context.WithCancel(ctx) + + if err := td.selectTablets(ctx); err != nil { return err } if err := td.syncSourceStreams(ctx); err != nil { return err } - if err := td.startSourceDataStreams(ctx); err != nil { + if err := td.startSourceDataStreams(td.shardStreamsCtx); err != nil { return err } if err := td.syncTargetStreams(ctx); err != nil { return err } - if err := td.startTargetDataStream(ctx); err != nil { + if err := td.startTargetDataStream(td.shardStreamsCtx); err != nil { return err } td.setupRowSorters() @@ -199,30 +208,38 @@ func (td *tableDiffer) forEachSource(cb func(source *migrationSource) error) err return allErrors.AggrError(vterrors.Aggregate) } -func (td *tableDiffer) selectTablets(ctx context.Context, cell, tabletTypes string) error { - var wg sync.WaitGroup - ct := td.wd.ct - var err1, err2 error +func (td *tableDiffer) selectTablets(ctx context.Context) error { + var ( + wg sync.WaitGroup + sourceErr, targetErr error + targetTablet *topodatapb.Tablet + ) + + // The cells from the vdiff record are a comma separated list. + sourceCells := strings.Split(td.wd.opts.PickerOptions.SourceCell, ",") + targetCells := strings.Split(td.wd.opts.PickerOptions.TargetCell, ",") // For Mount+Migrate, the source tablets will be in a different // Vitess cluster with its own TopoServer. - sourceTopoServer := ct.ts - if ct.externalCluster != "" { - extTS, err := ct.ts.OpenExternalVitessClusterServer(ctx, ct.externalCluster) + sourceTopoServer := td.wd.ct.ts + if td.wd.ct.externalCluster != "" { + extTS, err := td.wd.ct.ts.OpenExternalVitessClusterServer(ctx, td.wd.ct.externalCluster) if err != nil { return err } sourceTopoServer = extTS } + tabletPickerOptions := discovery.TabletPickerOptions{} wg.Add(1) go func() { defer wg.Done() - err1 = td.forEachSource(func(source *migrationSource) error { - tablet, err := pickTablet(ctx, sourceTopoServer, cell, ct.vde.thisTablet.Alias.Cell, ct.sourceKeyspace, source.shard, tabletTypes) + sourceErr = td.forEachSource(func(source *migrationSource) error { + sourceTablet, err := td.pickTablet(ctx, sourceTopoServer, sourceCells, td.wd.ct.sourceKeyspace, + source.shard, td.wd.opts.PickerOptions.TabletTypes, tabletPickerOptions) if err != nil { return err } - source.tablet = tablet + source.tablet = sourceTablet return nil }) }() @@ -230,26 +247,36 @@ func (td *tableDiffer) selectTablets(ctx context.Context, cell, tabletTypes stri wg.Add(1) go func() { defer wg.Done() - tablet, err2 := pickTablet(ctx, ct.ts, td.wd.opts.PickerOptions.TargetCell, ct.vde.thisTablet.Alias.Cell, ct.vde.thisTablet.Keyspace, - ct.vde.thisTablet.Shard, td.wd.opts.PickerOptions.TabletTypes) - if err2 != nil { + if td.wd.ct.workflowType == binlogdatapb.VReplicationWorkflowType_Reshard { + // For resharding, the target shards could be non-serving if traffic has already been switched once. + // When shards are created their IsPrimaryServing attribute is set to true. However, when the traffic is switched + // it is set to false for the shards we are switching from. We don't have a way to know if we have + // switched or not, so we just include non-serving tablets for all reshards. + tabletPickerOptions.IncludeNonServingTablets = true + } + targetTablet, targetErr = td.pickTablet(ctx, td.wd.ct.ts, targetCells, td.wd.ct.vde.thisTablet.Keyspace, + td.wd.ct.vde.thisTablet.Shard, td.wd.opts.PickerOptions.TabletTypes, tabletPickerOptions) + if targetErr != nil { return } - ct.targetShardStreamer = &shardStreamer{ - tablet: tablet, - shard: tablet.Shard, + td.wd.ct.targetShardStreamer = &shardStreamer{ + tablet: targetTablet, + shard: targetTablet.Shard, } }() wg.Wait() - if err1 != nil { - return err1 + if sourceErr != nil { + return sourceErr } - return err2 + return targetErr } -func pickTablet(ctx context.Context, ts *topo.Server, cell, localCell, keyspace, shard, tabletTypes string) (*topodata.Tablet, error) { - tp, err := discovery.NewTabletPicker(ctx, ts, []string{cell}, localCell, keyspace, shard, tabletTypes, discovery.TabletPickerOptions{}) +func (td *tableDiffer) pickTablet(ctx context.Context, ts *topo.Server, cells []string, keyspace, + shard, tabletTypes string, options discovery.TabletPickerOptions) (*topodatapb.Tablet, error) { + + tp, err := discovery.NewTabletPicker(ctx, ts, cells, td.wd.ct.vde.thisTablet.Alias.Cell, keyspace, + shard, tabletTypes, options) if err != nil { return nil, err } @@ -264,7 +291,7 @@ func (td *tableDiffer) syncSourceStreams(ctx context.Context) error { if err := td.forEachSource(func(source *migrationSource) error { log.Flush() - if err := ct.tmc.WaitForPosition(waitCtx, source.tablet, mysql.EncodePosition(source.position)); err != nil { + if err := ct.tmc.WaitForPosition(waitCtx, source.tablet, replication.EncodePosition(source.position)); err != nil { return vterrors.Wrapf(err, "WaitForPosition for tablet %v", topoproto.TabletAliasString(source.tablet.Alias)) } return nil @@ -337,7 +364,7 @@ func (td *tableDiffer) restartTargetVReplicationStreams(ctx context.Context) err // Let's retry a few times if we get a retryable error. for i := 1; i <= 3; i++ { _, err := ct.tmc.VReplicationExec(ctx, ct.vde.thisTablet, query) - if err == nil || !mysql.IsEphemeralError(err) { + if err == nil || !sqlerror.IsEphemeralError(err) { break } log.Warningf("Encountered the following error while restarting the %q VReplication workflow, will retry (attempt #%d): %v", @@ -348,10 +375,12 @@ func (td *tableDiffer) restartTargetVReplicationStreams(ctx context.Context) err func (td *tableDiffer) streamOneShard(ctx context.Context, participant *shardStreamer, query string, lastPK *querypb.QueryResult, gtidch chan string) { log.Infof("streamOneShard Start on %s using query: %s", participant.tablet.Alias.String(), query) + td.wgShardStreamers.Add(1) defer func() { log.Infof("streamOneShard End on %s", participant.tablet.Alias.String()) close(participant.result) close(gtidch) + td.wgShardStreamers.Done() }() participant.err = func() error { conn, err := tabletconn.GetDialer()(participant.tablet, false) @@ -375,8 +404,7 @@ func (td *tableDiffer) streamOneShard(ctx context.Context, participant *shardStr // unbuffered channels which would present a major performance bottleneck. // This need arises from the gRPC VStreamRowsResponse pooling and re-use/recycling done for // gRPCQueryClient.VStreamRows() in vttablet/grpctabletconn/conn. - vsr := proto.Clone(vsrRaw).(*binlogdatapb.VStreamRowsResponse) - + vsr := vsrRaw.CloneVT() if len(fields) == 0 { if len(vsr.Fields) == 0 { return fmt.Errorf("did not received expected fields in response %+v on tablet %v", @@ -395,13 +423,15 @@ func (td *tableDiffer) streamOneShard(ctx context.Context, participant *shardStr result := sqltypes.Proto3ToResult(p3qr) // Fields should be received only once, and sent only once. - if vsr.Fields == nil { + if len(vsr.Fields) == 0 { result.Fields = nil } select { case participant.result <- result: case <-ctx.Done(): return vterrors.Wrap(ctx.Err(), "VStreamRows") + case <-td.wd.ct.done: + return vterrors.Errorf(vtrpcpb.Code_CANCELED, "vdiff was stopped") } return nil }) @@ -442,7 +472,13 @@ func (td *tableDiffer) diff(ctx context.Context, rowsToCompare int64, debug, onl // We need to continue were we left off when appropriate. This can be an // auto-retry on error, or a manual retry via the resume command. // Otherwise the existing state will be empty and we start from scratch. - query := fmt.Sprintf(sqlGetVDiffTable, td.wd.ct.id, encodeString(td.table.Name)) + query, err := sqlparser.ParseAndBind(sqlGetVDiffTable, + sqltypes.Int64BindVariable(td.wd.ct.id), + sqltypes.StringBindVariable(td.table.Name), + ) + if err != nil { + return nil, err + } cs, err := dbClient.ExecuteFetch(query, -1) if err != nil { return nil, err @@ -483,6 +519,8 @@ func (td *tableDiffer) diff(ctx context.Context, rowsToCompare int64, debug, onl select { case <-ctx.Done(): return nil, vterrors.Errorf(vtrpcpb.Code_CANCELED, "context has expired") + case <-td.wd.ct.done: + return nil, vterrors.Errorf(vtrpcpb.Code_CANCELED, "vdiff was stopped") default: } @@ -630,10 +668,9 @@ func (td *tableDiffer) compare(sourceRow, targetRow []sqltypes.Value, cols []com collationID collations.ID ) // If the collation is nil or unknown, use binary collation to compare as bytes. - if col.collation == nil { + collationID = col.collation + if collationID == collations.Unknown { collationID = collations.CollationBinaryID - } else { - collationID = col.collation.ID() } c, err = evalengine.NullsafeCompare(sourceRow[compareIndex], targetRow[compareIndex], collationID) if err != nil { @@ -663,9 +700,26 @@ func (td *tableDiffer) updateTableProgress(dbClient binlogplayer.DBClient, dr *D return err } - query = fmt.Sprintf(sqlUpdateTableProgress, dr.ProcessedRows, encodeString(string(lastPK)), encodeString(string(rpt)), td.wd.ct.id, encodeString(td.table.Name)) + query, err = sqlparser.ParseAndBind(sqlUpdateTableProgress, + sqltypes.Int64BindVariable(dr.ProcessedRows), + sqltypes.StringBindVariable(string(lastPK)), + sqltypes.StringBindVariable(string(rpt)), + sqltypes.Int64BindVariable(td.wd.ct.id), + sqltypes.StringBindVariable(td.table.Name), + ) + if err != nil { + return err + } } else { - query = fmt.Sprintf(sqlUpdateTableNoProgress, dr.ProcessedRows, encodeString(string(rpt)), td.wd.ct.id, encodeString(td.table.Name)) + query, err = sqlparser.ParseAndBind(sqlUpdateTableNoProgress, + sqltypes.Int64BindVariable(dr.ProcessedRows), + sqltypes.StringBindVariable(string(rpt)), + sqltypes.Int64BindVariable(td.wd.ct.id), + sqltypes.StringBindVariable(td.table.Name), + ) + if err != nil { + return err + } } if _, err := dbClient.ExecuteFetch(query, 1); err != nil { return err @@ -674,8 +728,15 @@ func (td *tableDiffer) updateTableProgress(dbClient binlogplayer.DBClient, dr *D } func (td *tableDiffer) updateTableState(ctx context.Context, dbClient binlogplayer.DBClient, state VDiffState) error { - query := fmt.Sprintf(sqlUpdateTableState, encodeString(string(state)), td.wd.ct.id, encodeString(td.table.Name)) - if _, err := dbClient.ExecuteFetch(query, 1); err != nil { + query, err := sqlparser.ParseAndBind(sqlUpdateTableState, + sqltypes.StringBindVariable(string(state)), + sqltypes.Int64BindVariable(td.wd.ct.id), + sqltypes.StringBindVariable(td.table.Name), + ) + if err != nil { + return err + } + if _, err = dbClient.ExecuteFetch(query, 1); err != nil { return err } insertVDiffLog(ctx, dbClient, td.wd.ct.id, fmt.Sprintf("%s: table %s", state, encodeString(td.table.Name))) @@ -694,8 +755,17 @@ func (td *tableDiffer) updateTableStateAndReport(ctx context.Context, dbClient b } else { report = "{}" } - query := fmt.Sprintf(sqlUpdateTableStateAndReport, encodeString(string(state)), dr.ProcessedRows, encodeString(report), td.wd.ct.id, encodeString(td.table.Name)) - if _, err := dbClient.ExecuteFetch(query, 1); err != nil { + query, err := sqlparser.ParseAndBind(sqlUpdateTableStateAndReport, + sqltypes.StringBindVariable(string(state)), + sqltypes.Int64BindVariable(dr.ProcessedRows), + sqltypes.StringBindVariable(report), + sqltypes.Int64BindVariable(td.wd.ct.id), + sqltypes.StringBindVariable(td.table.Name), + ) + if err != nil { + return err + } + if _, err = dbClient.ExecuteFetch(query, 1); err != nil { return err } insertVDiffLog(ctx, dbClient, td.wd.ct.id, fmt.Sprintf("%s: table %s", state, encodeString(td.table.Name))) @@ -704,8 +774,14 @@ func (td *tableDiffer) updateTableStateAndReport(ctx context.Context, dbClient b } func updateTableMismatch(dbClient binlogplayer.DBClient, vdiffID int64, table string) error { - query := fmt.Sprintf(sqlUpdateTableMismatch, vdiffID, encodeString(table)) - if _, err := dbClient.ExecuteFetch(query, 1); err != nil { + query, err := sqlparser.ParseAndBind(sqlUpdateTableMismatch, + sqltypes.Int64BindVariable(vdiffID), + sqltypes.StringBindVariable(table), + ) + if err != nil { + return err + } + if _, err = dbClient.ExecuteFetch(query, 1); err != nil { return err } return nil @@ -772,55 +848,6 @@ func (td *tableDiffer) adjustForSourceTimeZone(targetSelectExprs sqlparser.Selec return targetSelectExprs } -// updateTableStats runs ANALYZE TABLE on the table in order to update the -// statistics, then it reads those updated stats (specifically the number of -// rows in the table) and saves them in the vdiff_table record. -func (td *tableDiffer) updateTableStats(dbClient binlogplayer.DBClient) error { - // First update the stats. - stmt := sqlparser.BuildParsedQuery(sqlAnalyzeTable, td.wd.ct.vde.dbName, td.table.Name) - if _, err := dbClient.ExecuteFetch(stmt.Query, -1); err != nil { - return err - } - // Now read the updated stats. - query, err := sqlparser.ParseAndBind(sqlGetTableRows, - sqltypes.StringBindVariable(td.wd.ct.vde.dbName), - sqltypes.StringBindVariable(td.table.Name), - ) - if err != nil { - return err - } - isqr, err := dbClient.ExecuteFetch(query, 1) - if err != nil { - return err - } - if isqr == nil || len(isqr.Rows) != 1 { - rows := 0 - if isqr != nil { - rows = len(isqr.Rows) - } - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected number of rows returned from %s: %d", query, rows) - } - // And finally save the updated stats. - row := isqr.Named().Row() - tableRows, err := row.ToInt64("table_rows") - if err != nil { - strVal, _ := row.ToString("table_rows") - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "invalid value (%s) returned from %s: %v", strVal, query, err) - } - query, err = sqlparser.ParseAndBind(sqlUpdateTableRows, - sqltypes.Int64BindVariable(tableRows), - sqltypes.Int64BindVariable(td.wd.ct.id), - sqltypes.StringBindVariable(td.table.Name), - ) - if err != nil { - return err - } - if _, err := dbClient.ExecuteFetch(query, 1); err != nil { - return err - } - return nil -} - func getColumnNameForSelectExpr(selectExpression sqlparser.SelectExpr) (string, error) { aliasedExpr := selectExpression.(*sqlparser.AliasedExpr) expr := aliasedExpr.Expr diff --git a/go/vt/vttablet/tabletmanager/vdiff/table_plan.go b/go/vt/vttablet/tabletmanager/vdiff/table_plan.go index 61d1e64f6ec..e669dbd9a33 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/table_plan.go +++ b/go/vt/vttablet/tabletmanager/vdiff/table_plan.go @@ -103,17 +103,16 @@ func (td *tableDiffer) buildTablePlan(dbClient binlogplayer.DBClient, dbName str // Check if it's an aggregate expression if expr, ok := selExpr.Expr.(sqlparser.AggrFunc); ok { - switch fname := strings.ToLower(expr.AggrName()); fname { + switch fname := expr.AggrName(); fname { case "count", "sum": // this will only work as long as aggregates can be pushed down to tablets // this won't work: "select count(*) from (select id from t limit 1)" // since vreplication only handles simple tables (no joins/derived tables) this is fine for now // but will need to be revisited when we add such support to vreplication - aggregateFuncType := "sum" - aggregates = append(aggregates, &engine.AggregateParams{ - Opcode: opcode.SupportedAggregates[aggregateFuncType], - Col: len(sourceSelect.SelectExprs) - 1, - }) + aggregates = append(aggregates, engine.NewAggregateParam( + /*opcode*/ opcode.AggregateSum, + /*offset*/ len(sourceSelect.SelectExprs)-1, + /*alias*/ "")) } } default: diff --git a/go/vt/vttablet/tabletmanager/vdiff/utils.go b/go/vt/vttablet/tabletmanager/vdiff/utils.go index 34a9b3f164b..12ea1e8a68c 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/utils.go +++ b/go/vt/vttablet/tabletmanager/vdiff/utils.go @@ -40,10 +40,10 @@ func newMergeSorter(participants map[string]*shardStreamer, comparePKs []compare for i, cpk := range comparePKs { weightStringCol := -1 // if the collation is nil or unknown, use binary collation to compare as bytes - if cpk.collation == nil { + if cpk.collation == collations.Unknown { ob[i] = engine.OrderByParams{Col: cpk.colIndex, WeightStringCol: weightStringCol, Type: sqltypes.Unknown, CollationID: collations.CollationBinaryID} } else { - ob[i] = engine.OrderByParams{Col: cpk.colIndex, WeightStringCol: weightStringCol, Type: sqltypes.Unknown, CollationID: cpk.collation.ID()} + ob[i] = engine.OrderByParams{Col: cpk.colIndex, WeightStringCol: weightStringCol, Type: sqltypes.Unknown, CollationID: cpk.collation} } } return &engine.MergeSort{ diff --git a/go/vt/vttablet/tabletmanager/vdiff/workflow_differ.go b/go/vt/vttablet/tabletmanager/vdiff/workflow_differ.go index ed4879eeb95..d7d2583a5d3 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/workflow_differ.go +++ b/go/vt/vttablet/tabletmanager/vdiff/workflow_differ.go @@ -134,9 +134,19 @@ func (wd *workflowDiffer) reconcileExtraRows(dr *DiffReport, maxExtraRowsToCompa } func (wd *workflowDiffer) diffTable(ctx context.Context, dbClient binlogplayer.DBClient, td *tableDiffer) error { + defer func() { + if td.shardStreamsCancel != nil { + td.shardStreamsCancel() + } + // Wait for all the shard streams to finish before returning. + td.wgShardStreamers.Wait() + }() + select { case <-ctx.Done(): return vterrors.Errorf(vtrpcpb.Code_CANCELED, "context has expired") + case <-wd.ct.done: + return vterrors.Errorf(vtrpcpb.Code_CANCELED, "vdiff was stopped") default: } @@ -184,6 +194,8 @@ func (wd *workflowDiffer) diff(ctx context.Context) error { select { case <-ctx.Done(): return vterrors.Errorf(vtrpcpb.Code_CANCELED, "context has expired") + case <-wd.ct.done: + return vterrors.Errorf(vtrpcpb.Code_CANCELED, "vdiff was stopped") default: } @@ -203,9 +215,17 @@ func (wd *workflowDiffer) diff(ctx context.Context) error { select { case <-ctx.Done(): return vterrors.Errorf(vtrpcpb.Code_CANCELED, "context has expired") + case <-wd.ct.done: + return vterrors.Errorf(vtrpcpb.Code_CANCELED, "vdiff was stopped") default: } - query := fmt.Sprintf(sqlGetVDiffTable, wd.ct.id, encodeString(td.table.Name)) + query, err := sqlparser.ParseAndBind(sqlGetVDiffTable, + sqltypes.Int64BindVariable(wd.ct.id), + sqltypes.StringBindVariable(td.table.Name), + ) + if err != nil { + return err + } qr, err := dbClient.ExecuteFetch(query, 1) if err != nil { return err @@ -235,7 +255,10 @@ func (wd *workflowDiffer) diff(ctx context.Context) error { } func (wd *workflowDiffer) markIfCompleted(ctx context.Context, dbClient binlogplayer.DBClient) error { - query := fmt.Sprintf(sqlGetIncompleteTables, wd.ct.id) + query, err := sqlparser.ParseAndBind(sqlGetIncompleteTables, sqltypes.Int64BindVariable(wd.ct.id)) + if err != nil { + return err + } qr, err := dbClient.ExecuteFetch(query, -1) if err != nil { return err @@ -305,7 +328,13 @@ func (wd *workflowDiffer) buildPlan(dbClient binlogplayer.DBClient, filter *binl // getTableLastPK gets the lastPK protobuf message for a given vdiff table. func (wd *workflowDiffer) getTableLastPK(dbClient binlogplayer.DBClient, tableName string) (*querypb.QueryResult, error) { - query := fmt.Sprintf(sqlGetVDiffTable, wd.ct.id, encodeString(tableName)) + query, err := sqlparser.ParseAndBind(sqlGetVDiffTable, + sqltypes.Int64BindVariable(wd.ct.id), + sqltypes.StringBindVariable(tableName), + ) + if err != nil { + return nil, err + } qr, err := dbClient.ExecuteFetch(query, 1) if err != nil { return nil, err @@ -332,7 +361,10 @@ func (wd *workflowDiffer) initVDiffTables(dbClient binlogplayer.DBClient) error for tableName := range wd.tableDiffers { // Update the table statistics for each table if requested. if wd.opts.CoreOptions.UpdateTableStats { - stmt := sqlparser.BuildParsedQuery(sqlAnalyzeTable, wd.ct.vde.dbName, tableName) + stmt := sqlparser.BuildParsedQuery(sqlAnalyzeTable, + wd.ct.vde.dbName, + tableName, + ) log.Infof("Updating the table stats for %s.%s using: %q", wd.ct.vde.dbName, tableName, stmt.Query) if _, err := dbClient.ExecuteFetch(stmt.Query, -1); err != nil { return err @@ -344,8 +376,11 @@ func (wd *workflowDiffer) initVDiffTables(dbClient binlogplayer.DBClient) error tableIn.WriteByte(',') } } - query := fmt.Sprintf(sqlGetAllTableRows, encodeString(wd.ct.vde.dbName), tableIn.String()) - isqr, err := dbClient.ExecuteFetch(query, -1) + query := sqlparser.BuildParsedQuery(sqlGetAllTableRows, + encodeString(wd.ct.vde.dbName), + tableIn.String(), + ) + isqr, err := dbClient.ExecuteFetch(query.Query, -1) if err != nil { return err } @@ -353,13 +388,26 @@ func (wd *workflowDiffer) initVDiffTables(dbClient binlogplayer.DBClient) error tableName, _ := row.ToString("table_name") tableRows, _ := row.ToInt64("table_rows") - query := fmt.Sprintf(sqlGetVDiffTable, wd.ct.id, encodeString(tableName)) + query, err := sqlparser.ParseAndBind(sqlGetVDiffTable, + sqltypes.Int64BindVariable(wd.ct.id), + sqltypes.StringBindVariable(tableName), + ) + if err != nil { + return err + } qr, err := dbClient.ExecuteFetch(query, -1) if err != nil { return err } if len(qr.Rows) == 0 { - query = fmt.Sprintf(sqlNewVDiffTable, wd.ct.id, encodeString(tableName), tableRows) + query, err = sqlparser.ParseAndBind(sqlNewVDiffTable, + sqltypes.Int64BindVariable(wd.ct.id), + sqltypes.StringBindVariable(tableName), + sqltypes.Int64BindVariable(tableRows), + ) + if err != nil { + return err + } } else if len(qr.Rows) == 1 { query, err = sqlparser.ParseAndBind(sqlUpdateTableRows, sqltypes.Int64BindVariable(tableRows), diff --git a/go/vt/vttablet/tabletmanager/vdiff/workflow_differ_test.go b/go/vt/vttablet/tabletmanager/vdiff/workflow_differ_test.go index 31a014a28f4..10c6406f046 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/workflow_differ_test.go +++ b/go/vt/vttablet/tabletmanager/vdiff/workflow_differ_test.go @@ -437,13 +437,10 @@ func TestBuildPlanSuccess(t *testing.T) { Expr: &sqlparser.ColName{Name: sqlparser.NewIdentifierCI("c1")}, Direction: sqlparser.AscOrder, }}, - aggregates: []*engine.AggregateParams{{ - Opcode: opcode.AggregateSum, - Col: 2, - }, { - Opcode: opcode.AggregateSum, - Col: 3, - }}, + aggregates: []*engine.AggregateParams{ + engine.NewAggregateParam(opcode.AggregateSum, 2, ""), + engine.NewAggregateParam(opcode.AggregateSum, 3, ""), + }, }, }, { // date conversion on import. @@ -486,10 +483,11 @@ func TestBuildPlanSuccess(t *testing.T) { dbc.ExpectRequestRE("select vdt.lastpk as lastpk, vdt.mismatch as mismatch, vdt.report as report", noResults, nil) columnList := make([]string, len(tcase.tablePlan.comparePKs)) collationList := make([]string, len(tcase.tablePlan.comparePKs)) + env := collations.Local() for i := range tcase.tablePlan.comparePKs { columnList[i] = tcase.tablePlan.comparePKs[i].colName - if tcase.tablePlan.comparePKs[i].collation != nil { - collationList[i] = tcase.tablePlan.comparePKs[i].collation.Name() + if tcase.tablePlan.comparePKs[i].collation != collations.Unknown { + collationList[i] = env.LookupName(tcase.tablePlan.comparePKs[i].collation) } else { collationList[i] = sqltypes.NULL.String() } diff --git a/go/vt/vttablet/tabletmanager/vreplication/controller.go b/go/vt/vttablet/tabletmanager/vreplication/controller.go index 8f9974a5424..f8f096d97d3 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/controller.go +++ b/go/vt/vttablet/tabletmanager/vreplication/controller.go @@ -85,10 +85,9 @@ func newController(ctx context.Context, params map[string]string, dbClientFactor done: make(chan struct{}), source: &binlogdatapb.BinlogSource{}, } - ct.sourceTablet.Store("") + ct.sourceTablet.Store(&topodatapb.TabletAlias{}) log.Infof("creating controller with cell: %v, tabletTypes: %v, and params: %v", cell, tabletTypesStr, params) - // id id, err := strconv.ParseInt(params["id"], 10, 32) if err != nil { return nil, err @@ -99,21 +98,21 @@ func newController(ctx context.Context, params map[string]string, dbClientFactor state := params["state"] blpStats.State.Store(state) + if err := prototext.Unmarshal([]byte(params["source"]), ct.source); err != nil { + return nil, err + } + // Nothing to do if replication is stopped or is known to have an unrecoverable error. - if state == binlogplayer.BlpStopped || state == binlogplayer.BlpError { + if state == binlogdatapb.VReplicationWorkflowState_Stopped.String() || state == binlogdatapb.VReplicationWorkflowState_Error.String() { ct.cancel = func() {} close(ct.done) + blpStats.Stop() return ct, nil } - // source, stopPos - if err := prototext.Unmarshal([]byte(params["source"]), ct.source); err != nil { - return nil, err - } ct.stopPos = params["stop_pos"] if ct.source.GetExternalMysql() == "" { - // tabletPicker if v := params["cell"]; v != "" { cell = v } @@ -137,7 +136,6 @@ func newController(ctx context.Context, params map[string]string, dbClientFactor ct.tabletPicker = tp } - // cancel ctx, ct.cancel = context.WithCancel(ctx) go ct.run(ctx) @@ -166,7 +164,7 @@ func (ct *controller) run(ctx context.Context) { } ct.blpStats.ErrorCounts.Add([]string{"Stream Error"}, 1) - binlogplayer.LogError(fmt.Sprintf("error in stream %v, retrying after %v", ct.id, retryDelay), err) + binlogplayer.LogError(fmt.Sprintf("error in stream %v, will retry after %v", ct.id, retryDelay), err) timer := time.NewTimer(retryDelay) select { case <-ctx.Done(): @@ -180,7 +178,7 @@ func (ct *controller) run(ctx context.Context) { func (ct *controller) runBlp(ctx context.Context) (err error) { defer func() { - ct.sourceTablet.Store("") + ct.sourceTablet.Store(&topodatapb.TabletAlias{}) if x := recover(); x != nil { log.Errorf("stream %v: caught panic: %v\n%s", ct.id, x, tb.Stack(4)) err = fmt.Errorf("panic: %v", x) @@ -199,25 +197,11 @@ func (ct *controller) runBlp(ctx context.Context) (err error) { } defer dbClient.Close() - var tablet *topodatapb.Tablet - if ct.source.GetExternalMysql() == "" { - log.Infof("trying to find a tablet eligible for vreplication. stream id: %v", ct.id) - tpCtx, tpCancel := context.WithTimeout(ctx, discovery.GetTabletPickerRetryDelay()*tabletPickerRetries) - defer tpCancel() - tablet, err = ct.tabletPicker.PickForStreaming(tpCtx) - if err != nil { - select { - case <-ctx.Done(): - default: - ct.blpStats.ErrorCounts.Add([]string{"No Source Tablet Found"}, 1) - ct.setMessage(dbClient, fmt.Sprintf("Error picking tablet: %s", err.Error())) - } - return err - } - ct.setMessage(dbClient, fmt.Sprintf("Picked source tablet: %s", tablet.Alias.String())) - log.Infof("found a tablet eligible for vreplication. stream id: %v tablet: %s", ct.id, tablet.Alias.String()) - ct.sourceTablet.Store(tablet.Alias.String()) + tablet, err := ct.pickSourceTablet(ctx, dbClient) + if err != nil { + return err } + switch { case len(ct.source.Tables) > 0: // Table names can have search patterns. Resolve them against the schema. @@ -266,13 +250,19 @@ func (ct *controller) runBlp(ctx context.Context) (err error) { vr := newVReplicator(ct.id, ct.source, vsClient, ct.blpStats, dbClient, ct.mysqld, ct.vre) err = vr.Replicate(ctx) ct.lastWorkflowError.Record(err) + // If this is a mysql error that we know needs manual intervention OR - // we cannot identify this as non-recoverable, but it has persisted beyond the retry limit (maxTimeToRetryError) - if isUnrecoverableError(err) || !ct.lastWorkflowError.ShouldRetry() { - log.Errorf("vreplication stream %d going into error state due to %+v", ct.id, err) - if errSetState := vr.setState(binlogplayer.BlpError, err.Error()); errSetState != nil { + // we cannot identify this as non-recoverable, but it has persisted + // beyond the retry limit (maxTimeToRetryError). + // In addition, we cannot restart a workflow started with AtomicCopy which has _any_ error. + if (err != nil && vr.WorkflowSubType == int32(binlogdatapb.VReplicationWorkflowSubType_AtomicCopy)) || + isUnrecoverableError(err) || !ct.lastWorkflowError.ShouldRetry() { + + if errSetState := vr.setState(binlogdatapb.VReplicationWorkflowState_Error, err.Error()); errSetState != nil { + log.Errorf("INTERNAL: unable to setState() in controller: %v. Could not set error text to: %v.", errSetState, err) return err // yes, err and not errSetState. } + log.Errorf("vreplication stream %d going into error state due to %+v", ct.id, err) return nil // this will cause vreplicate to quit the workflow } return err @@ -292,7 +282,37 @@ func (ct *controller) setMessage(dbClient binlogplayer.DBClient, message string) } return nil } + +// pickSourceTablet picks a healthy serving tablet to source for +// the vreplication stream. If the source is marked as external, it +// returns nil. +func (ct *controller) pickSourceTablet(ctx context.Context, dbClient binlogplayer.DBClient) (*topodatapb.Tablet, error) { + if ct.source.GetExternalMysql() != "" { + return nil, nil + } + log.Infof("Trying to find an eligible source tablet for vreplication stream id %d for workflow: %s", + ct.id, ct.workflow) + tpCtx, tpCancel := context.WithTimeout(ctx, discovery.GetTabletPickerRetryDelay()*tabletPickerRetries) + defer tpCancel() + tablet, err := ct.tabletPicker.PickForStreaming(tpCtx) + if err != nil { + select { + case <-ctx.Done(): + default: + ct.blpStats.ErrorCounts.Add([]string{"No Source Tablet Found"}, 1) + ct.setMessage(dbClient, fmt.Sprintf("Error picking tablet: %s", err.Error())) + } + return tablet, err + } + ct.setMessage(dbClient, fmt.Sprintf("Picked source tablet: %s", tablet.Alias.String())) + log.Infof("Found eligible source tablet %s for vreplication stream id %d for workflow %s", + tablet.Alias.String(), ct.id, ct.workflow) + ct.sourceTablet.Store(tablet.Alias) + return tablet, err +} + func (ct *controller) Stop() { ct.cancel() + ct.blpStats.Stop() <-ct.done } diff --git a/go/vt/vttablet/tabletmanager/vreplication/controller_plan.go b/go/vt/vttablet/tabletmanager/vreplication/controller_plan.go index 5d2091d335d..b168625d20a 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/controller_plan.go +++ b/go/vt/vttablet/tabletmanager/vreplication/controller_plan.go @@ -19,7 +19,7 @@ package vreplication import ( "fmt" - "vitess.io/vitess/go/vt/sidecardb" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/vt/sqlparser" ) @@ -85,7 +85,7 @@ func buildInsertPlan(ins *sqlparser.Insert) (*controllerPlan, error) { if err != nil { return nil, err } - if tableName.Qualifier.String() != sidecardb.GetName() && tableName.Qualifier.String() != sidecardb.DefaultName { + if tableName.Qualifier.String() != sidecar.GetName() && tableName.Qualifier.String() != sidecar.DefaultName { return nil, fmt.Errorf("invalid database name: %s", tableName.Qualifier.String()) } switch tableName.Name.String() { @@ -154,7 +154,7 @@ func buildUpdatePlan(upd *sqlparser.Update) (*controllerPlan, error) { if err != nil { return nil, err } - if tableName.Qualifier.String() != sidecardb.GetName() && tableName.Qualifier.String() != sidecardb.DefaultName { + if tableName.Qualifier.String() != sidecar.GetName() && tableName.Qualifier.String() != sidecar.DefaultName { return nil, fmt.Errorf("invalid database name: %s", tableName.Qualifier.String()) } switch tableName.Name.String() { @@ -177,7 +177,7 @@ func buildUpdatePlan(upd *sqlparser.Update) (*controllerPlan, error) { } buf1 := sqlparser.NewTrackedBuffer(nil) - buf1.Myprintf("select id from %s.%s%v", sidecardb.GetIdentifier(), vreplicationTableName, upd.Where) + buf1.Myprintf("select id from %s.%s%v", sidecar.GetIdentifier(), vreplicationTableName, upd.Where) upd.Where = &sqlparser.Where{ Type: sqlparser.WhereClause, Expr: &sqlparser.ComparisonExpr{ @@ -211,7 +211,7 @@ func buildDeletePlan(del *sqlparser.Delete) (*controllerPlan, error) { if err != nil { return nil, err } - if tableName.Qualifier.String() != sidecardb.GetName() && tableName.Qualifier.String() != sidecardb.DefaultName { + if tableName.Qualifier.String() != sidecar.GetName() && tableName.Qualifier.String() != sidecar.DefaultName { return nil, fmt.Errorf("invalid database name: %s", tableName.Qualifier.String()) } switch tableName.Name.String() { @@ -235,7 +235,7 @@ func buildDeletePlan(del *sqlparser.Delete) (*controllerPlan, error) { } buf1 := sqlparser.NewTrackedBuffer(nil) - buf1.Myprintf("select id from %s.%s%v", sidecardb.GetIdentifier(), vreplicationTableName, del.Where) + buf1.Myprintf("select id from %s.%s%v", sidecar.GetIdentifier(), vreplicationTableName, del.Where) del.Where = &sqlparser.Where{ Type: sqlparser.WhereClause, Expr: &sqlparser.ComparisonExpr{ @@ -257,10 +257,10 @@ func buildDeletePlan(del *sqlparser.Delete) (*controllerPlan, error) { }, } buf3 := sqlparser.NewTrackedBuffer(nil) - buf3.Myprintf("delete from %s.%s%v", sidecardb.GetIdentifier(), copyStateTableName, copyStateWhere) + buf3.Myprintf("delete from %s.%s%v", sidecar.GetIdentifier(), copyStateTableName, copyStateWhere) buf4 := sqlparser.NewTrackedBuffer(nil) - buf4.Myprintf("delete from %s.%s%v", sidecardb.GetIdentifier(), postCopyActionTableName, copyStateWhere) + buf4.Myprintf("delete from %s.%s%v", sidecar.GetIdentifier(), postCopyActionTableName, copyStateWhere) return &controllerPlan{ opcode: deleteQuery, @@ -285,7 +285,7 @@ func buildSelectPlan(sel *sqlparser.Select) (*controllerPlan, error) { if err != nil { return nil, err } - if tableName.Qualifier.String() != sidecardb.GetName() && tableName.Qualifier.String() != sidecardb.DefaultName { + if tableName.Qualifier.String() != sidecar.GetName() && tableName.Qualifier.String() != sidecar.DefaultName { return nil, fmt.Errorf("invalid database name: %s", tableName.Qualifier.String()) } switch tableName.Name.String() { diff --git a/go/vt/vttablet/tabletmanager/vreplication/controller_test.go b/go/vt/vttablet/tabletmanager/vreplication/controller_test.go index 94762102f8f..efab9693fa2 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/controller_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/controller_test.go @@ -23,13 +23,13 @@ import ( "testing" "time" - "vitess.io/vitess/go/vt/mysqlctl" - querypb "vitess.io/vitess/go/vt/proto/query" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" + "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/mysqlctl/tmutils" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) @@ -54,11 +54,11 @@ var ( sqltypes.NULL, // stop_pos sqltypes.NewInt64(9223372036854775807), // max_tps sqltypes.NewInt64(9223372036854775807), // max_replication_lag - sqltypes.NewVarBinary("Running"), // state - sqltypes.NewInt64(1), // workflow_type - sqltypes.NewVarChar("wf"), // workflow - sqltypes.NewInt64(0), // workflow_sub_type - sqltypes.NewInt64(0), // defer_secondary_keys + sqltypes.NewVarBinary(binlogdatapb.VReplicationWorkflowState_Running.String()), // state + sqltypes.NewInt64(1), // workflow_type + sqltypes.NewVarChar("wf"), // workflow + sqltypes.NewInt64(0), // workflow_sub_type + sqltypes.NewInt64(0), // defer_secondary_keys }, }, } @@ -74,7 +74,7 @@ func TestControllerKeyRange(t *testing.T) { defer deleteTablet(wantTablet) params := map[string]string{ "id": "1", - "state": binlogplayer.BlpRunning, + "state": binlogdatapb.VReplicationWorkflowState_Running.String(), "source": fmt.Sprintf(`keyspace:"%s" shard:"0" key_range:{end:"\x80"}`, env.KeyspaceName), } @@ -112,7 +112,7 @@ func TestControllerTables(t *testing.T) { params := map[string]string{ "id": "1", - "state": binlogplayer.BlpRunning, + "state": binlogdatapb.VReplicationWorkflowState_Running.String(), "source": fmt.Sprintf(`keyspace:"%s" shard:"0" tables:"table1" tables:"/funtables_/" `, env.KeyspaceName), } @@ -181,7 +181,7 @@ func TestControllerBadID(t *testing.T) { func TestControllerStopped(t *testing.T) { params := map[string]string{ "id": "1", - "state": binlogplayer.BlpStopped, + "state": binlogdatapb.VReplicationWorkflowState_Stopped.String(), } ct, err := newController(context.Background(), params, nil, nil, nil, "", "", nil, nil) @@ -204,7 +204,7 @@ func TestControllerOverrides(t *testing.T) { params := map[string]string{ "id": "1", - "state": binlogplayer.BlpRunning, + "state": binlogdatapb.VReplicationWorkflowState_Running.String(), "source": fmt.Sprintf(`keyspace:"%s" shard:"0" key_range:{end:"\x80"}`, env.KeyspaceName), "cell": env.Cells[0], "tablet_types": "replica", @@ -243,7 +243,7 @@ func TestControllerCanceledContext(t *testing.T) { params := map[string]string{ "id": "1", - "state": binlogplayer.BlpRunning, + "state": binlogdatapb.VReplicationWorkflowState_Running.String(), "source": fmt.Sprintf(`keyspace:"%s" shard:"0" key_range:{end:"\x80"}`, env.KeyspaceName), } @@ -274,7 +274,7 @@ func TestControllerRetry(t *testing.T) { params := map[string]string{ "id": "1", - "state": binlogplayer.BlpRunning, + "state": binlogdatapb.VReplicationWorkflowState_Running.String(), "source": fmt.Sprintf(`keyspace:"%s" shard:"0" key_range:{end:"\x80"}`, env.KeyspaceName), "cell": env.Cells[0], "tablet_types": "replica", @@ -313,7 +313,7 @@ func TestControllerStopPosition(t *testing.T) { params := map[string]string{ "id": "1", - "state": binlogplayer.BlpRunning, + "state": binlogdatapb.VReplicationWorkflowState_Running.String(), "source": fmt.Sprintf(`keyspace:"%s" shard:"0" key_range:{end:"\x80"}`, env.KeyspaceName), } @@ -335,15 +335,15 @@ func TestControllerStopPosition(t *testing.T) { InsertID: 0, Rows: [][]sqltypes.Value{ { - sqltypes.NewVarBinary("MariaDB/0-1-1083"), // pos - sqltypes.NewVarBinary("MariaDB/0-1-1235"), // stop_pos - sqltypes.NewInt64(9223372036854775807), // max_tps - sqltypes.NewInt64(9223372036854775807), // max_replication_lag - sqltypes.NewVarBinary("Running"), // state - sqltypes.NewInt64(1), // workflow_type - sqltypes.NewVarChar("wf"), // workflow - sqltypes.NewInt64(1), // workflow_sub_type - sqltypes.NewInt64(1), // defer_secondary_keys + sqltypes.NewVarBinary("MariaDB/0-1-1083"), // pos + sqltypes.NewVarBinary("MariaDB/0-1-1235"), // stop_pos + sqltypes.NewInt64(9223372036854775807), // max_tps + sqltypes.NewInt64(9223372036854775807), // max_replication_lag + sqltypes.NewVarBinary(binlogdatapb.VReplicationWorkflowState_Running.String()), // state + sqltypes.NewInt64(1), // workflow_type + sqltypes.NewVarChar("wf"), // workflow + sqltypes.NewInt64(1), // workflow_sub_type + sqltypes.NewInt64(1), // defer_secondary_keys }, }, } diff --git a/go/vt/vttablet/tabletmanager/vreplication/engine.go b/go/vt/vttablet/tabletmanager/vreplication/engine.go index 52f9c072d49..8b81dd722c6 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/engine.go +++ b/go/vt/vttablet/tabletmanager/vreplication/engine.go @@ -27,9 +27,10 @@ import ( "sync/atomic" "time" - "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/constants/sidecar" + + "vitess.io/vitess/go/mysql/sqlerror" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/dbconfigs" @@ -38,7 +39,6 @@ import ( binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" @@ -221,7 +221,6 @@ func (vre *Engine) Open(ctx context.Context) { } func (vre *Engine) openLocked(ctx context.Context) error { - rows, err := vre.readAllRows(ctx) if err != nil { return err @@ -377,10 +376,12 @@ func (vre *Engine) exec(query string, runAsAdmin bool) (*sqltypes.Result, error) // Change the database to ensure that these events don't get // replicated by another vreplication. This can happen when // we reverse replication. - if _, err := dbClient.ExecuteFetch(fmt.Sprintf("use %s", sidecardb.GetIdentifier()), 1); err != nil { + if _, err := dbClient.ExecuteFetch(fmt.Sprintf("use %s", sidecar.GetIdentifier()), 1); err != nil { return nil, err } + stats := binlogplayer.NewStats() + defer stats.Stop() switch plan.opcode { case insertQuery: qr, err := dbClient.ExecuteFetch(plan.query, 1) @@ -395,7 +396,7 @@ func (vre *Engine) exec(query string, runAsAdmin bool) (*sqltypes.Result, error) return nil, fmt.Errorf("insert id %v out of range", qr.InsertID) } - vdbc := newVDBClient(dbClient, binlogplayer.NewStats()) + vdbc := newVDBClient(dbClient, stats) // If we are creating multiple streams, for example in a // merge workflow going from 2 shards to 1 shard, we @@ -454,7 +455,7 @@ func (vre *Engine) exec(query string, runAsAdmin bool) (*sqltypes.Result, error) if err != nil { return nil, err } - vdbc := newVDBClient(dbClient, binlogplayer.NewStats()) + vdbc := newVDBClient(dbClient, stats) for _, id := range ids { params, err := readRow(dbClient, id) if err != nil { @@ -481,7 +482,7 @@ func (vre *Engine) exec(query string, runAsAdmin bool) (*sqltypes.Result, error) return &sqltypes.Result{}, nil } // Stop and delete the current controllers. - vdbc := newVDBClient(dbClient, binlogplayer.NewStats()) + vdbc := newVDBClient(dbClient, stats) for _, id := range ids { if ct := vre.controllers[id]; ct != nil { ct.Stop() @@ -679,13 +680,13 @@ func (vre *Engine) transitionJournal(je *journalEvent) { var newids []int32 for _, shard := range shardGTIDs { sgtid := je.shardGTIDs[shard] - bls := proto.Clone(vre.controllers[refid].source).(*binlogdatapb.BinlogSource) + bls := vre.controllers[refid].source.CloneVT() bls.Keyspace, bls.Shard = sgtid.Keyspace, sgtid.Shard workflowType, _ := strconv.ParseInt(params["workflow_type"], 10, 32) workflowSubType, _ := strconv.ParseInt(params["workflow_sub_type"], 10, 32) deferSecondaryKeys, _ := strconv.ParseBool(params["defer_secondary_keys"]) - ig := NewInsertGenerator(binlogplayer.BlpRunning, vre.dbName) + ig := NewInsertGenerator(binlogdatapb.VReplicationWorkflowState_Running, vre.dbName) ig.AddRow(params["workflow"], bls, sgtid.Gtid, params["cell"], params["tablet_types"], binlogdatapb.VReplicationWorkflowType(workflowType), binlogdatapb.VReplicationWorkflowSubType(workflowSubType), deferSecondaryKeys) qr, err := dbClient.ExecuteFetch(ig.String(), maxRows) @@ -780,7 +781,7 @@ func (vre *Engine) WaitForPos(ctx context.Context, id int32, pos string) error { // The full error we get back from MySQL in that case is: // Deadlock found when trying to get lock; try restarting transaction (errno 1213) (sqlstate 40001) // Docs: https://dev.mysql.com/doc/mysql-errors/en/server-error-reference.html#error_er_lock_deadlock - if sqlErr, ok := err.(*mysql.SQLError); ok && sqlErr.Number() == mysql.ERLockDeadlock { + if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Number() == sqlerror.ERLockDeadlock { log.Infof("Deadlock detected waiting for pos %s: %v; will retry", pos, err) } else { return err @@ -791,7 +792,7 @@ func (vre *Engine) WaitForPos(ctx context.Context, id int32, pos string) error { return fmt.Errorf("unexpected result: %v", qr) } - // When err is not nil then we got a retryable error and will loop again + // When err is not nil then we got a retryable error and will loop again. if err == nil { current, dcerr := binlogplayer.DecodePosition(qr.Rows[0][0].ToString()) if dcerr != nil { @@ -803,7 +804,7 @@ func (vre *Engine) WaitForPos(ctx context.Context, id int32, pos string) error { return nil } - if qr.Rows[0][1].ToString() == binlogplayer.BlpStopped { + if qr.Rows[0][1].ToString() == binlogdatapb.VReplicationWorkflowState_Stopped.String() { return fmt.Errorf("replication has stopped at %v before reaching position %v, message: %s", current, mPos, qr.Rows[0][2].ToString()) } } diff --git a/go/vt/vttablet/tabletmanager/vreplication/engine_test.go b/go/vt/vttablet/tabletmanager/vreplication/engine_test.go index d490417784f..32add04c8e0 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/engine_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/engine_test.go @@ -31,6 +31,8 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/mysqlctl" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) func TestEngineOpen(t *testing.T) { @@ -383,12 +385,12 @@ func TestWaitForPos(t *testing.T) { dbClient.ExpectRequest("select pos, state, message from _vt.vreplication where id=1", &sqltypes.Result{Rows: [][]sqltypes.Value{{ sqltypes.NewVarBinary("MariaDB/0-1-1083"), - sqltypes.NewVarBinary("Running"), + sqltypes.NewVarBinary(binlogdatapb.VReplicationWorkflowState_Running.String()), sqltypes.NewVarBinary(""), }}}, nil) dbClient.ExpectRequest("select pos, state, message from _vt.vreplication where id=1", &sqltypes.Result{Rows: [][]sqltypes.Value{{ sqltypes.NewVarBinary("MariaDB/0-1-1084"), - sqltypes.NewVarBinary("Running"), + sqltypes.NewVarBinary(binlogdatapb.VReplicationWorkflowState_Running.String()), sqltypes.NewVarBinary(""), }}}, nil) start := time.Now() @@ -451,7 +453,7 @@ func TestWaitForPosCancel(t *testing.T) { dbClient.ExpectRequest("select pos, state, message from _vt.vreplication where id=1", &sqltypes.Result{Rows: [][]sqltypes.Value{{ sqltypes.NewVarBinary("MariaDB/0-1-1083"), - sqltypes.NewVarBinary("Running"), + sqltypes.NewVarBinary(binlogdatapb.VReplicationWorkflowState_Running.String()), sqltypes.NewVarBinary(""), }}}, nil) ctx, cancel := context.WithCancel(context.Background()) @@ -469,7 +471,7 @@ func TestWaitForPosCancel(t *testing.T) { }() dbClient.ExpectRequest("select pos, state, message from _vt.vreplication where id=1", &sqltypes.Result{Rows: [][]sqltypes.Value{{ sqltypes.NewVarBinary("MariaDB/0-1-1083"), - sqltypes.NewVarBinary("Running"), + sqltypes.NewVarBinary(binlogdatapb.VReplicationWorkflowState_Running.String()), sqltypes.NewVarBinary(""), }}}, nil) err = vre.WaitForPos(context.Background(), 1, "MariaDB/0-1-1084") diff --git a/go/vt/vttablet/tabletmanager/vreplication/external_connector.go b/go/vt/vttablet/tabletmanager/vreplication/external_connector.go index 5bec2b4f78a..1c20e2054be 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/external_connector.go +++ b/go/vt/vttablet/tabletmanager/vreplication/external_connector.go @@ -52,6 +52,9 @@ type VStreamerClient interface { // VStreamRows streams rows of a table from the specified starting point. VStreamRows(ctx context.Context, query string, lastpk *querypb.QueryResult, send func(*binlogdatapb.VStreamRowsResponse) error) error + + // VStreamTables streams rows of a table from the specified starting point. + VStreamTables(ctx context.Context, send func(*binlogdatapb.VStreamTablesResponse) error) error } type externalConnector struct { @@ -142,6 +145,10 @@ func (c *mysqlConnector) VStreamRows(ctx context.Context, query string, lastpk * return c.vstreamer.StreamRows(ctx, query, row, send) } +func (c *mysqlConnector) VStreamTables(ctx context.Context, send func(response *binlogdatapb.VStreamTablesResponse) error) error { + return c.vstreamer.StreamTables(ctx, send) +} + //----------------------------------------------------------- type tabletConnector struct { @@ -180,3 +187,8 @@ func (tc *tabletConnector) VStreamRows(ctx context.Context, query string, lastpk req := &binlogdatapb.VStreamRowsRequest{Target: tc.target, Query: query, Lastpk: lastpk} return tc.qs.VStreamRows(ctx, req, send) } + +func (tc *tabletConnector) VStreamTables(ctx context.Context, send func(*binlogdatapb.VStreamTablesResponse) error) error { + req := &binlogdatapb.VStreamTablesRequest{Target: tc.target} + return tc.qs.VStreamTables(ctx, req, send) +} diff --git a/go/vt/vttablet/tabletmanager/vreplication/framework_test.go b/go/vt/vttablet/tabletmanager/vreplication/framework_test.go index 8260d95b462..576ce4c22a8 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/framework_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/framework_test.go @@ -28,6 +28,7 @@ import ( "testing" "time" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/vttablet" "vitess.io/vitess/go/test/utils" @@ -61,17 +62,16 @@ import ( ) var ( - playerEngine *Engine - streamerEngine *vstreamer.Engine - - envMu sync.Mutex - env *testenv.Env - - globalFBC = &fakeBinlogClient{} - vrepldb = "vrepl" - globalDBQueries = make(chan string, 1000) - testForeignKeyQueries = false - doNotLogDBQueries = false + playerEngine *Engine + streamerEngine *vstreamer.Engine + env *testenv.Env + envMu sync.Mutex + globalFBC = &fakeBinlogClient{} + vrepldb = "vrepl" + globalDBQueries = make(chan string, 1000) + testForeignKeyQueries = false + testSetForeignKeyQueries = false + doNotLogDBQueries = false ) type LogExpectation struct { @@ -126,9 +126,9 @@ func cleanup() { envMu.Unlock() } -func setup() (func(), int) { +func setup(ctx context.Context) (func(), int) { var err error - env, err = testenv.Init() + env, err = testenv.Init(ctx) if err != nil { fmt.Fprintf(os.Stderr, "%v", err) return nil, 1 @@ -144,12 +144,12 @@ func setup() (func(), int) { streamerEngine.InitDBConfig(env.KeyspaceName, env.ShardName) streamerEngine.Open() - if err := env.Mysqld.ExecuteSuperQuery(context.Background(), fmt.Sprintf("create database %s", vrepldb)); err != nil { + if err := env.Mysqld.ExecuteSuperQuery(ctx, fmt.Sprintf("create database %s", vrepldb)); err != nil { fmt.Fprintf(os.Stderr, "%v", err) return nil, 1 } - if err := env.Mysqld.ExecuteSuperQuery(context.Background(), "set @@global.innodb_lock_wait_timeout=1"); err != nil { + if err := env.Mysqld.ExecuteSuperQuery(ctx, "set @@global.innodb_lock_wait_timeout=1"); err != nil { fmt.Fprintf(os.Stderr, "%v", err) return nil, 1 } @@ -158,7 +158,7 @@ func setup() (func(), int) { "extb": env.Dbcfgs, } playerEngine = NewTestEngine(env.TopoServ, env.Cells[0], env.Mysqld, realDBClientFactory, realDBClientFactory, vrepldb, externalConfig) - playerEngine.Open(context.Background()) + playerEngine.Open(ctx) return cleanup, 0 } @@ -173,11 +173,13 @@ func TestMain(m *testing.M) { binlogplayer.SetProtocol("vreplication_test_framework", "test") _flag.ParseFlagsForTest() exitCode := func() int { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() if err := utils.SetBinlogRowImageMode("full", tempDir); err != nil { panic(err) } defer utils.SetBinlogRowImageMode("", tempDir) - cancel, ret := setup() + cancel, ret := setup(ctx) if ret > 0 { return ret } @@ -192,7 +194,7 @@ func TestMain(m *testing.M) { panic(err) } defer utils.SetBinlogRowImageMode("", tempDir) - cancel, ret = setup() + cancel, ret = setup(ctx) if ret > 0 { return ret } @@ -212,7 +214,7 @@ func primaryPosition(t *testing.T) string { if err != nil { t.Fatal(err) } - return mysql.EncodePosition(pos) + return replication.EncodePosition(pos) } func execStatements(t *testing.T, queries []string) { @@ -474,6 +476,8 @@ func (dbc *realDBClient) ExecuteFetch(query string, maxrows int) (*sqltypes.Resu } if !strings.HasPrefix(query, "select") && !strings.HasPrefix(query, "set") && !dbc.nolog { globalDBQueries <- query + } else if testSetForeignKeyQueries && strings.Contains(query, "set foreign_key_checks") { + globalDBQueries <- query } else if testForeignKeyQueries && strings.Contains(query, "foreign_key_checks") { //allow select/set for foreign_key_checks globalDBQueries <- query } @@ -482,6 +486,9 @@ func (dbc *realDBClient) ExecuteFetch(query string, maxrows int) (*sqltypes.Resu func expectDeleteQueries(t *testing.T) { t.Helper() + if doNotLogDBQueries { + return + } expectNontxQueries(t, qh.Expect( "/delete from _vt.vreplication", "/delete from _vt.copy_state", @@ -532,6 +539,7 @@ func shouldIgnoreQuery(query string) bool { ", time_throttled=", // update of last throttle time, can happen out-of-band, so can't test for it ", component_throttled=", // update of last throttle time, can happen out-of-band, so can't test for it "context cancel", + "SELECT rows_copied FROM _vt.vreplication WHERE id=", } if sidecardb.MatchesInitQuery(query) { return true @@ -546,6 +554,9 @@ func shouldIgnoreQuery(query string) bool { func expectDBClientQueries(t *testing.T, expectations qh.ExpectationSequence, skippableOnce ...string) { t.Helper() + if doNotLogDBQueries { + return + } failed := false skippedOnce := false validator := qh.NewVerifier(expectations) @@ -606,7 +617,9 @@ func expectDBClientQueries(t *testing.T, expectations qh.ExpectationSequence, sk // It also disregards updates to _vt.vreplication. func expectNontxQueries(t *testing.T, expectations qh.ExpectationSequence) { t.Helper() - + if doNotLogDBQueries { + return + } failed := false validator := qh.NewVerifier(expectations) @@ -696,6 +709,7 @@ func customExpectData(t *testing.T, table string, values [][]string, exec func(c if err == nil { return } + log.Errorf("data mismatch: %v, retrying", err) time.Sleep(tick) } } @@ -718,7 +732,7 @@ func compareQueryResults(t *testing.T, query string, values [][]string, } for j, val := range row { if got := qr.Rows[i][j].ToString(); got != val { - return fmt.Errorf("mismatch at (%d, %d): %v, want %s", i, j, qr.Rows[i][j], val) + return fmt.Errorf("mismatch at (%d, %d): got '%s', want '%s'", i, j, qr.Rows[i][j].ToString(), val) } } } diff --git a/go/vt/vttablet/tabletmanager/vreplication/fuzz.go b/go/vt/vttablet/tabletmanager/vreplication/fuzz.go index ef04d479c6d..98183e726df 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/fuzz.go +++ b/go/vt/vttablet/tabletmanager/vreplication/fuzz.go @@ -22,10 +22,10 @@ import ( "sync" "testing" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/mysqlctl" - "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo/memorytopo" @@ -102,7 +102,7 @@ func FuzzEngine(data []byte) int { // Fuzzer fails if this expectation is not made first: dbClient.ExpectRequest(sqlparser.BuildParsedQuery("select * from %s.vreplication where db_name='db'", - sidecardb.GetIdentifier()).Query, &sqltypes.Result{}, nil) + sidecar.GetIdentifier()).Query, &sqltypes.Result{}, nil) err = makeExpectations(dbClient, f) if err != nil { return 0 diff --git a/go/vt/vttablet/tabletmanager/vreplication/insert_generator.go b/go/vt/vttablet/tabletmanager/vreplication/insert_generator.go index 19e5933f428..da1753a8444 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/insert_generator.go +++ b/go/vt/vttablet/tabletmanager/vreplication/insert_generator.go @@ -36,12 +36,12 @@ type InsertGenerator struct { } // NewInsertGenerator creates a new InsertGenerator. -func NewInsertGenerator(state, dbname string) *InsertGenerator { +func NewInsertGenerator(state binlogdatapb.VReplicationWorkflowState, dbname string) *InsertGenerator { buf := &strings.Builder{} buf.WriteString("insert into _vt.vreplication(workflow, source, pos, max_tps, max_replication_lag, cell, tablet_types, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, defer_secondary_keys) values ") return &InsertGenerator{ buf: buf, - state: state, + state: state.String(), dbname: dbname, now: time.Now().Unix(), } diff --git a/go/vt/vttablet/tabletmanager/vreplication/insert_generator_test.go b/go/vt/vttablet/tabletmanager/vreplication/insert_generator_test.go index 3f79a28a765..5ccdfe3da10 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/insert_generator_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/insert_generator_test.go @@ -21,12 +21,11 @@ import ( "github.com/stretchr/testify/assert" - "vitess.io/vitess/go/vt/binlog/binlogplayer" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) func TestInsertGenerator(t *testing.T) { - ig := NewInsertGenerator(binlogplayer.BlpStopped, "a") + ig := NewInsertGenerator(binlogdatapb.VReplicationWorkflowState_Stopped, "a") ig.now = 111 ig.AddRow("b", &binlogdatapb.BinlogSource{Keyspace: "c"}, "d", "e", "f", binlogdatapb.VReplicationWorkflowType_Materialize, binlogdatapb.VReplicationWorkflowSubType_None, false) want := `insert into _vt.vreplication(workflow, source, pos, max_tps, max_replication_lag, cell, tablet_types, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, defer_secondary_keys) values ` + diff --git a/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go b/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go index b07933519a6..39ffdef04ae 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go +++ b/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go @@ -22,13 +22,10 @@ import ( "sort" "strings" - "vitess.io/vitess/go/vt/vttablet" - - "google.golang.org/protobuf/proto" - "vitess.io/vitess/go/bytes2" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/collations/charset" + "vitess.io/vitess/go/mysql/collations/colldata" vjson "vitess.io/vitess/go/mysql/json" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" @@ -38,6 +35,7 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/evalengine" + "vitess.io/vitess/go/vt/vttablet" ) // ReplicatorPlan is the execution plan for the replicator. It contains @@ -79,7 +77,7 @@ func (rp *ReplicatorPlan) buildExecutionPlan(fieldEvent *binlogdatapb.FieldEvent // bind var names. tplanv.Fields = make([]*querypb.Field, 0, len(fieldEvent.Fields)) for _, fld := range fieldEvent.Fields { - trimmed := proto.Clone(fld).(*querypb.Field) + trimmed := fld.CloneVT() trimmed.Name = strings.Trim(trimmed.Name, "`") tplanv.Fields = append(tplanv.Fields, trimmed) } @@ -317,21 +315,15 @@ func (tp *TablePlan) isOutsidePKRange(bindvars map[string]*querypb.BindVariable, func (tp *TablePlan) bindFieldVal(field *querypb.Field, val *sqltypes.Value) (*querypb.BindVariable, error) { if conversion, ok := tp.ConvertCharset[field.Name]; ok && !val.IsNull() { // Non-null string value, for which we have a charset conversion instruction - valString := val.ToString() - fromEncoding, encodingOK := mysql.CharacterSetEncoding[conversion.FromCharset] - if !encodingOK { + fromCollation := collations.Local().DefaultCollationForCharset(conversion.FromCharset) + if fromCollation == collations.Unknown { return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Character set %s not supported for column %s", conversion.FromCharset, field.Name) } - if fromEncoding != nil { - // As reminder, encoding can be nil for trivial charsets, like utf8 or ascii. - // encoding will be non-nil for charsets like latin1, gbk, etc. - var err error - valString, err = fromEncoding.NewDecoder().String(valString) - if err != nil { - return nil, err - } + out, err := charset.Convert(nil, charset.Charset_utf8mb4{}, val.Raw(), colldata.Lookup(fromCollation).Charset()) + if err != nil { + return nil, err } - return sqltypes.StringBindVariable(valString), nil + return sqltypes.StringBindVariable(string(out)), nil } if tp.ConvertIntToEnum[field.Name] && !val.IsNull() { // An integer converted to an enum. We must write the textual value of the int. i.e. 0 turns to '0' @@ -386,9 +378,13 @@ func (tp *TablePlan) applyChange(rowChange *binlogdatapb.RowChange, executor fun var newVal *sqltypes.Value var err error if field.Type == querypb.Type_JSON { - newVal, err = vjson.MarshalSQLValue(vals[i].Raw()) - if err != nil { - return nil, err + if vals[i].IsNull() { // An SQL NULL and not an actual JSON value + newVal = &sqltypes.NULL + } else { // A JSON value (which may be a JSON null literal value) + newVal, err = vjson.MarshalSQLValue(vals[i].Raw()) + if err != nil { + return nil, err + } } bindVar, err = tp.bindFieldVal(field, newVal) } else { diff --git a/go/vt/vttablet/tabletmanager/vreplication/replicator_plan_test.go b/go/vt/vttablet/tabletmanager/vreplication/replicator_plan_test.go index 8b97f02dc1e..780b1c0d064 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/replicator_plan_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/replicator_plan_test.go @@ -414,6 +414,55 @@ func TestBuildPlayerPlan(t *testing.T) { }, }, }, + }, { + input: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select c1, convert(c using utf8mb4) as c2 from t1", + }}, + }, + plan: &TestReplicatorPlan{ + VStreamFilter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select c1, convert(c using utf8mb4) as c2 from t1", + }}, + }, + TargetTables: []string{"t1"}, + TablePlans: map[string]*TestTablePlan{ + "t1": { + TargetName: "t1", + SendRule: "t1", + PKReferences: []string{"c1"}, + InsertFront: "insert into t1(c1,c2)", + InsertValues: "(:a_c1,convert(:a_c using utf8mb4))", + Insert: "insert into t1(c1,c2) values (:a_c1,convert(:a_c using utf8mb4))", + Update: "update t1 set c2=convert(:a_c using utf8mb4) where c1=:b_c1", + Delete: "delete from t1 where c1=:b_c1", + }, + }, + }, + planpk: &TestReplicatorPlan{ + VStreamFilter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select c1, convert(c using utf8mb4) as c2, pk1, pk2 from t1", + }}, + }, + TargetTables: []string{"t1"}, + TablePlans: map[string]*TestTablePlan{ + "t1": { + TargetName: "t1", + SendRule: "t1", + PKReferences: []string{"c1", "pk1", "pk2"}, + InsertFront: "insert into t1(c1,c2)", + InsertValues: "(:a_c1,convert(:a_c using utf8mb4))", + Insert: "insert into t1(c1,c2) select :a_c1, convert(:a_c using utf8mb4) from dual where (:a_pk1,:a_pk2) <= (1,'aaa')", + Update: "update t1 set c2=convert(:a_c using utf8mb4) where c1=:b_c1 and (:b_pk1,:b_pk2) <= (1,'aaa')", + Delete: "delete from t1 where c1=:b_c1 and (:b_pk1,:b_pk2) <= (1,'aaa')", + }, + }, + }, }, { // Keywords as names. input: &binlogdatapb.Filter{ diff --git a/go/vt/vttablet/tabletmanager/vreplication/stats.go b/go/vt/vttablet/tabletmanager/vreplication/stats.go index fbf53fa7da4..6379a9ba04f 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/stats.go +++ b/go/vt/vttablet/tabletmanager/vreplication/stats.go @@ -27,6 +27,8 @@ import ( "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/servenv" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) var ( @@ -152,7 +154,10 @@ func (st *vrStats) register() { defer st.mu.Unlock() result := make(map[string]string, len(st.controllers)) for _, ct := range st.controllers { - result[fmt.Sprintf("%v", ct.id)] = ct.sourceTablet.Load().(string) + ta := ct.sourceTablet.Load() + if ta != nil { + result[fmt.Sprintf("%v", ct.id)] = ta.(*topodatapb.TabletAlias).String() + } } return result })) @@ -468,7 +473,7 @@ func (st *vrStats) status() *EngineStatus { ReplicationLagSeconds: ct.blpStats.ReplicationLagSeconds.Load(), Counts: ct.blpStats.Timings.Counts(), Rates: ct.blpStats.Rates.Get(), - SourceTablet: ct.sourceTablet.Load().(string), + SourceTablet: ct.sourceTablet.Load().(*topodatapb.TabletAlias), Messages: ct.blpStats.MessageHistory(), QueryCounts: ct.blpStats.QueryCount.Counts(), PhaseTimings: ct.blpStats.PhaseTimings.Counts(), @@ -506,7 +511,7 @@ type ControllerStatus struct { Counts map[string]int64 Rates map[string][]float64 State string - SourceTablet string + SourceTablet *topodatapb.TabletAlias Messages []string QueryCounts map[string]int64 PhaseTimings map[string]int64 diff --git a/go/vt/vttablet/tabletmanager/vreplication/stats_test.go b/go/vt/vttablet/tabletmanager/vreplication/stats_test.go index 2a05a726b5d..d5b5eacbdf2 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/stats_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/stats_test.go @@ -25,9 +25,12 @@ import ( "github.com/google/safehtml/template" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/proto/binlogdata" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) var wantOut = ` @@ -72,12 +75,13 @@ VReplication state: Open
` func TestStatusHtml(t *testing.T) { - pos, err := mysql.DecodePosition("MariaDB/1-2-3") + pos, err := replication.DecodePosition("MariaDB/1-2-3") if err != nil { t.Fatal(err) } blpStats := binlogplayer.NewStats() + defer blpStats.Stop() blpStats.SetLastPosition(pos) blpStats.ReplicationLagSeconds.Store(2) blpStats.History.Add(&binlogplayer.StatsHistoryRecord{Time: time.Now(), Message: "Test Message1"}) @@ -107,8 +111,14 @@ func TestStatusHtml(t *testing.T) { done: make(chan struct{}), }, } - testStats.controllers[1].sourceTablet.Store("src1") - testStats.controllers[2].sourceTablet.Store("src2") + testStats.controllers[1].sourceTablet.Store(&topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 01, + }) + testStats.controllers[2].sourceTablet.Store(&topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 02, + }) close(testStats.controllers[2].done) tpl := template.Must(template.New("test").Parse(vreplicationTemplate)) @@ -121,7 +131,7 @@ func TestStatusHtml(t *testing.T) { func TestVReplicationStats(t *testing.T) { blpStats := binlogplayer.NewStats() - + defer blpStats.Stop() testStats := &vrStats{} testStats.isOpen = true testStats.controllers = map[int32]*controller{ @@ -135,7 +145,10 @@ func TestVReplicationStats(t *testing.T) { done: make(chan struct{}), }, } - testStats.controllers[1].sourceTablet.Store("src1") + testStats.controllers[1].sourceTablet.Store(&topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 01, + }) sleepTime := 1 * time.Millisecond record := func(phase string) { diff --git a/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go b/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go index b0b8e658c27..da1b4dfc2f3 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go +++ b/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go @@ -425,9 +425,28 @@ func (tpb *tablePlanBuilder) analyzeExpr(selExpr sqlparser.SelectExpr) (*colExpr references: make(map[string]bool), } if expr, ok := aliased.Expr.(*sqlparser.ConvertUsingExpr); ok { + // Here we find the actual column name in the convert, in case + // this is a column rename and the AS is the new column. + // For example, in convert(c1 using utf8mb4) as c2, we want to find + // c1, because c1 exists in the current table whereas c2 is the renamed column + // in the desired table. + var colName sqlparser.IdentifierCI + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch node := node.(type) { + case *sqlparser.ColName: + if !node.Qualifier.IsEmpty() { + return false, fmt.Errorf("unsupported qualifier for column: %v", sqlparser.String(node)) + } + colName = node.Name + } + return true, nil + }, aliased.Expr) + if err != nil { + return nil, fmt.Errorf("failed to find column name for convert using expression: %v, %v", sqlparser.String(aliased.Expr), err) + } selExpr := &sqlparser.ConvertUsingExpr{ Type: "utf8mb4", - Expr: &sqlparser.ColName{Name: as}, + Expr: &sqlparser.ColName{Name: colName}, } cexpr.expr = expr cexpr.operation = opExpr @@ -448,10 +467,10 @@ func (tpb *tablePlanBuilder) analyzeExpr(selExpr sqlparser.SelectExpr) (*colExpr } } if expr, ok := aliased.Expr.(sqlparser.AggrFunc); ok { - if expr.IsDistinct() { + if sqlparser.IsDistinct(expr) { return nil, fmt.Errorf("unexpected: %v", sqlparser.String(expr)) } - switch fname := strings.ToLower(expr.AggrName()); fname { + switch fname := expr.AggrName(); fname { case "count": if _, ok := expr.(*sqlparser.CountStar); !ok { return nil, fmt.Errorf("only count(*) is supported: %v", sqlparser.String(expr)) diff --git a/go/vt/vttablet/tabletmanager/vreplication/utils.go b/go/vt/vttablet/tabletmanager/vreplication/utils.go index 02bcbb235be..42aa4351647 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/utils.go +++ b/go/vt/vttablet/tabletmanager/vreplication/utils.go @@ -21,12 +21,11 @@ import ( "fmt" "strconv" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/evalengine" ) const ( @@ -65,7 +64,7 @@ const ( func getLastLog(dbClient *vdbClient, vreplID int32) (id int64, typ, state, message string, err error) { var qr *sqltypes.Result query := fmt.Sprintf("select id, type, state, message from %s.vreplication_log where vrepl_id = %d order by id desc limit 1", - sidecardb.GetIdentifier(), vreplID) + sidecar.GetIdentifier(), vreplID) if qr, err = dbClient.Execute(query); err != nil { return 0, "", "", "", err } @@ -73,7 +72,7 @@ func getLastLog(dbClient *vdbClient, vreplID int32) (id int64, typ, state, messa return 0, "", "", "", nil } row := qr.Rows[0] - id, _ = evalengine.ToInt64(row[0]) + id, _ = row[0].ToCastInt64() typ = row[1].ToString() state = row[2].ToString() message = row[3].ToString() @@ -93,11 +92,11 @@ func insertLog(dbClient *vdbClient, typ string, vreplID int32, state, message st } var query string if id > 0 && message == lastLogMessage { - query = fmt.Sprintf("update %s.vreplication_log set count = count + 1 where id = %d", sidecardb.GetIdentifier(), id) + query = fmt.Sprintf("update %s.vreplication_log set count = count + 1 where id = %d", sidecar.GetIdentifier(), id) } else { buf := sqlparser.NewTrackedBuffer(nil) buf.Myprintf("insert into %s.vreplication_log(vrepl_id, type, state, message) values(%s, %s, %s, %s)", - sidecardb.GetIdentifier(), strconv.Itoa(int(vreplID)), encodeString(typ), encodeString(state), encodeString(message)) + sidecar.GetIdentifier(), strconv.Itoa(int(vreplID)), encodeString(typ), encodeString(state), encodeString(message)) query = buf.ParsedQuery().Query } if _, err = dbClient.ExecuteFetch(query, 10000); err != nil { @@ -124,72 +123,92 @@ func isUnrecoverableError(err error) bool { if err == nil { return false } - sqlErr, isSQLErr := mysql.NewSQLErrorFromError(err).(*mysql.SQLError) + sqlErr, isSQLErr := sqlerror.NewSQLErrorFromError(err).(*sqlerror.SQLError) if !isSQLErr { return false } - if sqlErr.Num == mysql.ERUnknownError { + if sqlErr.Num == sqlerror.ERUnknownError { return false } switch sqlErr.Num { case // in case-insensitive alphabetical order - mysql.ERAccessDeniedError, - mysql.ERBadFieldError, - mysql.ERBadNullError, - mysql.ERCantDropFieldOrKey, - mysql.ERDataOutOfRange, - mysql.ERDataTooLong, - mysql.ERDBAccessDenied, - mysql.ERDupEntry, - mysql.ERDupFieldName, - mysql.ERDupKeyName, - mysql.ERDupUnique, - mysql.ERFeatureDisabled, - mysql.ERFunctionNotDefined, - mysql.ERIllegalValueForType, - mysql.ERInvalidCastToJSON, - mysql.ERInvalidJSONBinaryData, - mysql.ERInvalidJSONCharset, - mysql.ERInvalidJSONText, - mysql.ERInvalidJSONTextInParams, - mysql.ERJSONDocumentTooDeep, - mysql.ERJSONValueTooBig, - mysql.ERNoDefault, - mysql.ERNoDefaultForField, - mysql.ERNonUniq, - mysql.ERNonUpdateableTable, - mysql.ERNoSuchTable, - mysql.ERNotAllowedCommand, - mysql.ERNotSupportedYet, - mysql.EROptionPreventsStatement, - mysql.ERParseError, - mysql.ERPrimaryCantHaveNull, - mysql.ErrCantCreateGeometryObject, - mysql.ErrGISDataWrongEndianess, - mysql.ErrNonPositiveRadius, - mysql.ErrNotImplementedForCartesianSRS, - mysql.ErrNotImplementedForProjectedSRS, - mysql.ErrWrongValueForType, - mysql.ERSPDoesNotExist, - mysql.ERSpecifiedAccessDenied, - mysql.ERSyntaxError, - mysql.ERTooBigRowSize, - mysql.ERTooBigSet, - mysql.ERTruncatedWrongValue, - mysql.ERTruncatedWrongValueForField, - mysql.ERUnknownCollation, - mysql.ERUnknownProcedure, - mysql.ERUnknownTable, - mysql.ERWarnDataOutOfRange, - mysql.ERWarnDataTruncated, - mysql.ERWrongFKDef, - mysql.ERWrongFieldSpec, - mysql.ERWrongParamCountToProcedure, - mysql.ERWrongParametersToProcedure, - mysql.ERWrongUsage, - mysql.ERWrongValue, - mysql.ERWrongValueCountOnRow: + sqlerror.ERAccessDeniedError, + sqlerror.ERBadFieldError, + sqlerror.ERBadNullError, + sqlerror.ERCantDropFieldOrKey, + sqlerror.ERDataOutOfRange, + sqlerror.ERDataTooLong, + sqlerror.ERDBAccessDenied, + sqlerror.ERDupEntry, + sqlerror.ERDupFieldName, + sqlerror.ERDupKeyName, + sqlerror.ERDupUnique, + sqlerror.ERFeatureDisabled, + sqlerror.ERFunctionNotDefined, + sqlerror.ERIllegalValueForType, + sqlerror.ERInvalidCastToJSON, + sqlerror.ERInvalidJSONBinaryData, + sqlerror.ERInvalidJSONCharset, + sqlerror.ERInvalidJSONText, + sqlerror.ERInvalidJSONTextInParams, + sqlerror.ERJSONDocumentTooDeep, + sqlerror.ERJSONValueTooBig, + sqlerror.ERRegexpError, + sqlerror.ERRegexpStringNotTerminated, + sqlerror.ERRegexpIllegalArgument, + sqlerror.ERRegexpIndexOutOfBounds, + sqlerror.ERRegexpInternal, + sqlerror.ERRegexpRuleSyntax, + sqlerror.ERRegexpBadEscapeSequence, + sqlerror.ERRegexpUnimplemented, + sqlerror.ERRegexpMismatchParen, + sqlerror.ERRegexpBadInterval, + sqlerror.ERRRegexpMaxLtMin, + sqlerror.ERRegexpInvalidBackRef, + sqlerror.ERRegexpLookBehindLimit, + sqlerror.ERRegexpMissingCloseBracket, + sqlerror.ERRegexpInvalidRange, + sqlerror.ERRegexpStackOverflow, + sqlerror.ERRegexpTimeOut, + sqlerror.ERRegexpPatternTooBig, + sqlerror.ERRegexpInvalidCaptureGroup, + sqlerror.ERRegexpInvalidFlag, + sqlerror.ERNoDefault, + sqlerror.ERNoDefaultForField, + sqlerror.ERNonUniq, + sqlerror.ERNonUpdateableTable, + sqlerror.ERNoSuchTable, + sqlerror.ERNotAllowedCommand, + sqlerror.ERNotSupportedYet, + sqlerror.EROptionPreventsStatement, + sqlerror.ERParseError, + sqlerror.ERPrimaryCantHaveNull, + sqlerror.ErrCantCreateGeometryObject, + sqlerror.ErrGISDataWrongEndianess, + sqlerror.ErrNonPositiveRadius, + sqlerror.ErrNotImplementedForCartesianSRS, + sqlerror.ErrNotImplementedForProjectedSRS, + sqlerror.ErrWrongValueForType, + sqlerror.ERSPDoesNotExist, + sqlerror.ERSpecifiedAccessDenied, + sqlerror.ERSyntaxError, + sqlerror.ERTooBigRowSize, + sqlerror.ERTooBigSet, + sqlerror.ERTruncatedWrongValue, + sqlerror.ERTruncatedWrongValueForField, + sqlerror.ERUnknownCollation, + sqlerror.ERUnknownProcedure, + sqlerror.ERUnknownTable, + sqlerror.ERWarnDataOutOfRange, + sqlerror.ERWarnDataTruncated, + sqlerror.ERWrongFKDef, + sqlerror.ERWrongFieldSpec, + sqlerror.ERWrongParamCountToProcedure, + sqlerror.ERWrongParametersToProcedure, + sqlerror.ERWrongUsage, + sqlerror.ERWrongValue, + sqlerror.ERWrongValueCountOnRow: log.Errorf("Got unrecoverable error: %v", sqlErr) return true } diff --git a/go/vt/vttablet/tabletmanager/vreplication/vcopier.go b/go/vt/vttablet/tabletmanager/vreplication/vcopier.go index 0f2bf9c109a..ebfe0e22343 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vcopier.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vcopier.go @@ -26,10 +26,9 @@ import ( "time" "google.golang.org/protobuf/encoding/prototext" - "google.golang.org/protobuf/proto" "vitess.io/vitess/go/bytes2" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/pools" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" @@ -142,7 +141,7 @@ type vcopierCopyWorker struct { func newVCopier(vr *vreplicator) *vcopier { return &vcopier{ vr: vr, - throttlerAppName: vr.throttlerAppName(), + throttlerAppName: throttlerapp.VCopierName.ConcatenateString(vr.throttlerAppName()), } } @@ -237,7 +236,7 @@ func (vc *vcopier) initTablesForCopy(ctx context.Context) error { if _, err := vc.vr.dbClient.Execute(buf.String()); err != nil { return err } - if err := vc.vr.setState(binlogplayer.VReplicationCopying, ""); err != nil { + if err := vc.vr.setState(binlogdatapb.VReplicationWorkflowState_Copying, ""); err != nil { return err } if err := vc.vr.insertLog(LogCopyStart, fmt.Sprintf("Copy phase started for %d table(s)", @@ -268,7 +267,7 @@ func (vc *vcopier) initTablesForCopy(ctx context.Context) error { } } } else { - if err := vc.vr.setState(binlogplayer.BlpStopped, "There is nothing to replicate"); err != nil { + if err := vc.vr.setState(binlogdatapb.VReplicationWorkflowState_Stopped, "There is nothing to replicate"); err != nil { return err } } @@ -344,7 +343,7 @@ func (vc *vcopier) catchup(ctx context.Context, copyState map[string]*sqltypes.R // Start vreplication. errch := make(chan error, 1) go func() { - errch <- newVPlayer(vc.vr, settings, copyState, mysql.Position{}, "catchup").play(ctx) + errch <- newVPlayer(vc.vr, settings, copyState, replication.Position{}, "catchup").play(ctx) }() // Wait for catchup. @@ -407,7 +406,7 @@ func (vc *vcopier) copyTable(ctx context.Context, tableName string, copyState ma copyStateGCTicker := time.NewTicker(copyStateGCInterval) defer copyStateGCTicker.Stop() - parallelism := int(math.Max(1, float64(vreplicationParallelInsertWorkers))) + parallelism := getInsertParallelism() copyWorkerFactory := vc.newCopyWorkerFactory(parallelism) copyWorkQueue := vc.newCopyWorkQueue(parallelism, copyWorkerFactory) defer copyWorkQueue.close() @@ -482,12 +481,16 @@ func (vc *vcopier) copyTable(ctx context.Context, tableName string, copyState ma fieldEvent := &binlogdatapb.FieldEvent{ TableName: initialPlan.SendRule.Match, } - fieldEvent.Fields = append(fieldEvent.Fields, rows.Fields...) + for _, f := range rows.Fields { + fieldEvent.Fields = append(fieldEvent.Fields, f.CloneVT()) + } tablePlan, err := plan.buildExecutionPlan(fieldEvent) if err != nil { return err } - pkfields = append(pkfields, rows.Pkfields...) + for _, f := range rows.Pkfields { + pkfields = append(pkfields, f.CloneVT()) + } buf := sqlparser.NewTrackedBuffer(nil) buf.Myprintf( "insert into _vt.copy_state (lastpk, vrepl_id, table_name) values (%a, %s, %s)", ":lastpk", @@ -503,9 +506,15 @@ func (vc *vcopier) copyTable(ctx context.Context, tableName string, copyState ma // Clone rows, since pointer values will change while async work is // happening. Can skip this when there's no parallelism. if parallelism > 1 { - rows = proto.Clone(rows).(*binlogdatapb.VStreamRowsResponse) + rows = rows.CloneVT() } + // Code below is copied from vcopier.go. It was implemented to facilitate + // parallel bulk inserts in https://github.com/vitessio/vitess/pull/10828. + // We can probably extract this into a common package and use it for both + // flavors of the vcopier. But cut/pasting it for now, so as to not change + // vcopier at the moment to avoid any regressions. + // Prepare a vcopierCopyTask for the current batch of work. // TODO(maxeng) see if using a pre-allocated pool will speed things up. currCh := make(chan *vcopierCopyTaskResult, 1) @@ -668,9 +677,21 @@ func (vc *vcopier) copyTable(ctx context.Context, tableName string, copyState ma return nil } +// updatePos is called after the last table is copied in an atomic copy, to set the gtid so that the replicating phase +// can start from the gtid where the snapshot with all tables was taken. It also updates the final copy row count. +func (vc *vcopier) updatePos(ctx context.Context, gtid string) error { + pos, err := replication.DecodePosition(gtid) + if err != nil { + return err + } + update := binlogplayer.GenerateUpdatePos(vc.vr.id, pos, time.Now().Unix(), 0, vc.vr.stats.CopyRowCount.Get(), vreplicationStoreCompressedGTID) + _, err = vc.vr.dbClient.Execute(update) + return err +} + func (vc *vcopier) fastForward(ctx context.Context, copyState map[string]*sqltypes.Result, gtid string) error { defer vc.vr.stats.PhaseTimings.Record("fastforward", time.Now()) - pos, err := mysql.DecodePosition(gtid) + pos, err := replication.DecodePosition(gtid) if err != nil { return err } @@ -1071,6 +1092,10 @@ func (vbc *vcopierCopyWorker) execute(ctx context.Context, task *vcopierCopyTask } case vcopierCopyTaskInsertCopyState: advanceFn = func(ctx context.Context, args *vcopierCopyTaskArgs) error { + if vbc.copyStateInsert == nil { // we don't insert copy state for atomic copy + log.Infof("Skipping copy_state insert") + return nil + } if err := vbc.insertCopyState(ctx, args.lastpk); err != nil { return vterrors.Wrapf(err, "error updating _vt.copy_state") } @@ -1197,3 +1222,9 @@ func vcopierCopyTaskGetNextState(vts vcopierCopyTaskState) vcopierCopyTaskState } return vts } + +// getInsertParallelism returns the number of parallel workers to use for inserting batches during the copy phase. +func getInsertParallelism() int { + parallelism := int(math.Max(1, float64(vreplicationParallelInsertWorkers))) + return parallelism +} diff --git a/go/vt/vttablet/tabletmanager/vreplication/vcopier_atomic.go b/go/vt/vttablet/tabletmanager/vreplication/vcopier_atomic.go new file mode 100644 index 00000000000..6252690a629 --- /dev/null +++ b/go/vt/vttablet/tabletmanager/vreplication/vcopier_atomic.go @@ -0,0 +1,310 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "context" + "fmt" + "io" + "strconv" + "time" + + "google.golang.org/protobuf/encoding/prototext" + + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/binlog/binlogplayer" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" +) + +/* +This file is similar to vcopier.go: it handles the copy phase for the AtomicCopy where all tables +are streamed in a single phase. +*/ + +type copyAllState struct { + vc *vcopier + plan *ReplicatorPlan + currentTableName string + tables map[string]bool +} + +// newCopyAllState creates the required table plans and sets up the copy state for all tables in the source. +func newCopyAllState(vc *vcopier) (*copyAllState, error) { + state := ©AllState{ + vc: vc, + } + plan, err := buildReplicatorPlan(vc.vr.source, vc.vr.colInfoMap, nil, vc.vr.stats) + if err != nil { + return nil, err + } + state.plan = plan + state.tables = make(map[string]bool, len(plan.TargetTables)) + for _, table := range plan.TargetTables { + state.tables[table.TargetName] = false + } + return state, nil +} + +// copyAll copies all tables from the source to the target sequentially, finishing one table first and then moving to the next.. +func (vc *vcopier) copyAll(ctx context.Context, settings binlogplayer.VRSettings) error { + var err error + + log.Infof("Starting copyAll for %s", settings.WorkflowName) + defer log.Infof("Returning from copyAll for %s", settings.WorkflowName) + defer vc.vr.dbClient.Rollback() + + state, err := newCopyAllState(vc) + if err != nil { + return err + } + + ctx, cancel := context.WithTimeout(ctx, copyPhaseDuration) + defer cancel() + + rowsCopiedTicker := time.NewTicker(rowsCopiedUpdateInterval) + defer rowsCopiedTicker.Stop() + + parallelism := getInsertParallelism() + copyWorkerFactory := vc.newCopyWorkerFactory(parallelism) + var copyWorkQueue *vcopierCopyWorkQueue + + // Allocate a result channel to collect results from tasks. To not block fast workers, we allocate a buffer of + // MaxResultsInFlight results per worker. + const MaxResultsInFlight = 4 + resultCh := make(chan *vcopierCopyTaskResult, parallelism*MaxResultsInFlight) + defer close(resultCh) + + var lastpk *querypb.Row + var pkfields []*querypb.Field + var lastpkbv map[string]*querypb.BindVariable + // Use this for task sequencing. + var prevCh <-chan *vcopierCopyTaskResult + var gtid string + + serr := vc.vr.sourceVStreamer.VStreamTables(ctx, func(resp *binlogdatapb.VStreamTablesResponse) error { + defer vc.vr.stats.PhaseTimings.Record("copy", time.Now()) + defer vc.vr.stats.CopyLoopCount.Add(1) + log.Infof("VStreamTablesResponse: received table %s, #fields %d, #rows %d, gtid %s, lastpk %+v", + resp.TableName, len(resp.Fields), len(resp.Rows), resp.Gtid, resp.Lastpk) + tableName := resp.TableName + gtid = resp.Gtid + + updateRowsCopied := func() error { + updateRowsQuery := binlogplayer.GenerateUpdateRowsCopied(vc.vr.id, vc.vr.stats.CopyRowCount.Get()) + _, err := vc.vr.dbClient.Execute(updateRowsQuery) + return err + } + + if err := updateRowsCopied(); err != nil { + return err + } + select { + case <-rowsCopiedTicker.C: + if err := updateRowsCopied(); err != nil { + return err + } + case <-ctx.Done(): + return io.EOF + default: + } + if tableName != state.currentTableName { + if copyWorkQueue != nil { + copyWorkQueue.close() + } + copyWorkQueue = vc.newCopyWorkQueue(parallelism, copyWorkerFactory) + if state.currentTableName != "" { + log.Infof("copy of table %s is done at lastpk %+v", state.currentTableName, lastpkbv) + if err := vc.deleteCopyState(state.currentTableName); err != nil { + return err + } + } else { + log.Infof("starting copy phase with table %s", tableName) + } + + state.currentTableName = tableName + } + + // A new copy queue is created for each table. The queue is closed when the table is done. + if !copyWorkQueue.isOpen { + if len(resp.Fields) == 0 { + return fmt.Errorf("expecting field event first, got: %v", resp) + } + + lastpk = nil + // pkfields are only used for logging, so that we can monitor progress. + pkfields = make([]*querypb.Field, len(resp.Pkfields)) + for _, f := range resp.Pkfields { + pkfields = append(pkfields, f.CloneVT()) + } + + fieldEvent := &binlogdatapb.FieldEvent{ + TableName: tableName, + } + for _, f := range resp.Fields { + fieldEvent.Fields = append(fieldEvent.Fields, f.CloneVT()) + } + tablePlan, err := state.plan.buildExecutionPlan(fieldEvent) + if err != nil { + return err + } + + buf := sqlparser.NewTrackedBuffer(nil) + buf.Myprintf( + "insert into _vt.copy_state (lastpk, vrepl_id, table_name) values (%a, %s, %s)", ":lastpk", + strconv.Itoa(int(vc.vr.id)), + encodeString(tableName)) + addLatestCopyState := buf.ParsedQuery() + copyWorkQueue.open(addLatestCopyState, pkfields, tablePlan) + } + // When rowstreamer has finished streaming all rows, we get a callback with empty rows. + if len(resp.Rows) == 0 { + return nil + } + // Get the last committed pk into a loggable form. + lastpkbuf, merr := prototext.Marshal(&querypb.QueryResult{ + Fields: pkfields, + Rows: []*querypb.Row{lastpk}, + }) + + if merr != nil { + return fmt.Errorf("failed to marshal pk fields and value into query result: %s", merr.Error()) + } + lastpkbv = map[string]*querypb.BindVariable{ + "lastpk": { + Type: sqltypes.VarBinary, + Value: lastpkbuf, + }, + } + log.Infof("copying table %s with lastpk %v", tableName, lastpkbv) + // Prepare a vcopierCopyTask for the current batch of work. + currCh := make(chan *vcopierCopyTaskResult, 1) + currT := newVCopierCopyTask(newVCopierCopyTaskArgs(resp.Rows, resp.Lastpk)) + + // Send result to the global resultCh and currCh. resultCh is used by + // the loop to return results to VStreamRows. currCh will be used to + // sequence the start of the nextT. + currT.lifecycle.onResult().sendTo(currCh) + currT.lifecycle.onResult().sendTo(resultCh) + + // Use prevCh to Sequence the prevT with the currT so that: + // * The prevT is completed before we begin updating + // _vt.copy_state for currT. + // * If prevT fails or is canceled, the current task is + // canceled. + // prevCh is nil only for the first task in the vcopier run. + if prevCh != nil { + // prevT publishes to prevCh, and currT is the only thing that can + // consume from prevCh. If prevT is already done, then prevCh will + // have a value in it. If prevT isn't yet done, then prevCh will + // have a value later. Either way, AwaitCompletion should + // eventually get a value, unless there is a context expiry. + currT.lifecycle.before(vcopierCopyTaskInsertCopyState).awaitCompletion(prevCh) + } + + // Store currCh in prevCh. The nextT will use this for sequencing. + prevCh = currCh + + // Update stats after task is done. + currT.lifecycle.onResult().do(func(_ context.Context, result *vcopierCopyTaskResult) { + if result.state == vcopierCopyTaskFail { + vc.vr.stats.ErrorCounts.Add([]string{"Copy"}, 1) + } + if result.state == vcopierCopyTaskComplete { + vc.vr.stats.CopyRowCount.Add(int64(len(result.args.rows))) + vc.vr.stats.QueryCount.Add("copy", 1) + vc.vr.stats.TableCopyRowCounts.Add(tableName, int64(len(result.args.rows))) + vc.vr.stats.TableCopyTimings.Add(tableName, time.Since(result.startedAt)) + } + }) + + if err := copyWorkQueue.enqueue(ctx, currT); err != nil { + log.Warningf("failed to enqueue task in workflow %s: %s", vc.vr.WorkflowName, err.Error()) + return err + } + + // When async execution is not enabled, a done task will be available + // in the resultCh after each Enqueue, unless there was a queue state + // error (e.g. couldn't obtain a worker from pool). + // + // When async execution is enabled, results will show up in the channel + // eventually, possibly in a subsequent VStreamRows loop. It's still + // a good idea to check this channel on every pass so that: + // + // * resultCh doesn't fill up. If it does fill up then tasks won't be + // able to add their results to the channel, and progress in this + // goroutine will be blocked. + // * We keep lastpk up-to-date. + select { + case result := <-resultCh: + if result != nil { + switch result.state { + case vcopierCopyTaskCancel: + log.Warningf("task was canceled in workflow %s: %v", vc.vr.WorkflowName, result.err) + return io.EOF + case vcopierCopyTaskComplete: + // Collect lastpk. Needed for logging at the end. + lastpk = result.args.lastpk + case vcopierCopyTaskFail: + return vterrors.Wrapf(result.err, "task error") + } + } else { + return io.EOF + } + default: + } + return nil + }) + if serr != nil { + log.Infof("VStreamTables failed: %v", serr) + return serr + } + // A context expiration was probably caused by a PlannedReparentShard or an + // elapsed copy phase duration. CopyAll is not resilient to these events. + select { + case <-ctx.Done(): + log.Infof("Copy of %v stopped", state.currentTableName) + return fmt.Errorf("CopyAll was interrupted due to context expiration") + default: + if err := vc.deleteCopyState(state.currentTableName); err != nil { + return err + } + if copyWorkQueue != nil { + copyWorkQueue.close() + } + if err := vc.updatePos(ctx, gtid); err != nil { + return err + } + log.Infof("Completed copy of all tables") + } + return nil +} + +// deleteCopyState deletes the copy state entry for a table, signifying that the copy phase is complete for that table. +func (vc *vcopier) deleteCopyState(tableName string) error { + log.Infof("Deleting copy state for table %s", tableName) + //FIXME get sidecar db name + delQuery := fmt.Sprintf("delete from _vt.copy_state where table_name=%s and vrepl_id = %d", encodeString(tableName), vc.vr.id) + if _, err := vc.vr.dbClient.Execute(delQuery); err != nil { + return err + } + return nil +} diff --git a/go/vt/vttablet/tabletmanager/vreplication/vcopier_test.go b/go/vt/vttablet/tabletmanager/vreplication/vcopier_test.go index e1b1686b0d7..ff9b9daf00f 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vcopier_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vcopier_test.go @@ -157,7 +157,7 @@ func testPlayerCopyCharPK(t *testing.T) { OnDdl: binlogdatapb.OnDDLAction_IGNORE, } - query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.VReplicationInit, playerEngine.dbName, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogdatapb.VReplicationWorkflowState_Init, playerEngine.dbName, 0, 0) qr, err := playerEngine.Exec(query) if err != nil { t.Fatal(err) @@ -264,7 +264,7 @@ func testPlayerCopyVarcharPKCaseInsensitive(t *testing.T) { OnDdl: binlogdatapb.OnDDLAction_IGNORE, } - query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.VReplicationInit, playerEngine.dbName, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogdatapb.VReplicationWorkflowState_Init, playerEngine.dbName, 0, 0) qr, err := playerEngine.Exec(query) if err != nil { t.Fatal(err) @@ -387,7 +387,7 @@ func testPlayerCopyVarcharCompositePKCaseSensitiveCollation(t *testing.T) { OnDdl: binlogdatapb.OnDDLAction_IGNORE, } - query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.VReplicationInit, playerEngine.dbName, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogdatapb.VReplicationWorkflowState_Init, playerEngine.dbName, 0, 0) qr, err := playerEngine.Exec(query) if err != nil { t.Fatal(err) @@ -470,26 +470,26 @@ func testPlayerCopyTablesWithFK(t *testing.T) { Filter: filter, OnDdl: binlogdatapb.OnDDLAction_IGNORE, } - query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.VReplicationInit, playerEngine.dbName, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogdatapb.VReplicationWorkflowState_Init, playerEngine.dbName, 0, 0) qr, err := playerEngine.Exec(query) require.NoError(t, err) expectDBClientQueries(t, qh.Expect( "/insert into _vt.vreplication", "/update _vt.vreplication set message='Picked source tablet.*", - "select @@foreign_key_checks;", + "select @@foreign_key_checks", // Create the list of tables to copy and transition to Copying state. "begin", "/insert into _vt.copy_state", "/update _vt.vreplication set state='Copying'", "commit", - "set foreign_key_checks=0;", + "set @@session.foreign_key_checks=0", // The first fast-forward has no starting point. So, it just saves the current position. "/update _vt.vreplication set pos=", ).Then(func(expect qh.ExpectationSequencer) qh.ExpectationSequencer { // With parallel inserts, new db client connects are created on-the-fly. if vreplicationParallelInsertWorkers > 1 { - return expect.Then(qh.Eventually("set foreign_key_checks=0;")) + return expect.Then(qh.Eventually("set @@session.foreign_key_checks=0")) } return expect }).Then(qh.Eventually( @@ -500,18 +500,18 @@ func testPlayerCopyTablesWithFK(t *testing.T) { `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32 charset:63 flags:53251} rows:{lengths:1 values:\\"2\\"}'.*`, "commit", )).Then(qh.Immediately( - "set foreign_key_checks=0;", + "set @@session.foreign_key_checks=0", // copy of dst1 is done: delete from copy_state. "/delete cs, pca from _vt.copy_state as cs left join _vt.post_copy_action as pca on cs.vrepl_id=pca.vrepl_id and cs.table_name=pca.table_name.*dst1", // The next FF executes and updates the position before copying. - "set foreign_key_checks=0;", + "set @@session.foreign_key_checks=0", "begin", "/update _vt.vreplication set pos=", "commit", )).Then(func(expect qh.ExpectationSequencer) qh.ExpectationSequencer { // With parallel inserts, new db client connects are created on-the-fly. if vreplicationParallelInsertWorkers > 1 { - return expect.Then(qh.Eventually("set foreign_key_checks=0;")) + return expect.Then(qh.Eventually("set @@session.foreign_key_checks=0")) } return expect }).Then(qh.Eventually( @@ -521,11 +521,11 @@ func testPlayerCopyTablesWithFK(t *testing.T) { `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32 charset:63 flags:53251} rows:{lengths:1 values:\\"2\\"}'.*`, "commit", )).Then(qh.Immediately( - "set foreign_key_checks=0;", + "set @@session.foreign_key_checks=0", // copy of dst1 is done: delete from copy_state. "/delete cs, pca from _vt.copy_state as cs left join _vt.post_copy_action as pca on cs.vrepl_id=pca.vrepl_id and cs.table_name=pca.table_name.*dst2", // All tables copied. Final catch up followed by Running state. - "set foreign_key_checks=1;", + "set @@session.foreign_key_checks=1", "/update _vt.vreplication set state='Running'", ))) @@ -545,7 +545,7 @@ func testPlayerCopyTablesWithFK(t *testing.T) { t.Fatal(err) } expectDBClientQueries(t, qh.Expect( - "set foreign_key_checks=1;", + "set @@session.foreign_key_checks=1", "begin", "/delete from _vt.vreplication", "/delete from _vt.copy_state", @@ -563,7 +563,7 @@ func testPlayerCopyTables(t *testing.T) { execStatements(t, []string{ "create table src1(id int, val varbinary(128), d decimal(8,0), j json, primary key(id))", - "insert into src1 values(2, 'bbb', 1, '{\"foo\": \"bar\"}'), (1, 'aaa', 0, JSON_ARRAY(123456789012345678901234567890, \"abcd\"))", + "insert into src1 values(2, 'bbb', 1, '{\"foo\": \"bar\"}'), (1, 'aaa', 0, JSON_ARRAY(123456789012345678901234567890, \"abcd\")), (3, 'ccc', 2, 'null'), (4, 'ddd', 3, '{\"name\": \"matt\", \"size\": null}'), (5, 'eee', 4, null)", fmt.Sprintf("create table %s.dst1(id int, val varbinary(128), val2 varbinary(128), d decimal(8,0), j json, primary key(id))", vrepldb), "create table yes(id int, val varbinary(128), primary key(id))", fmt.Sprintf("create table %s.yes(id int, val varbinary(128), primary key(id))", vrepldb), @@ -593,7 +593,7 @@ func testPlayerCopyTables(t *testing.T) { Filter: filter, OnDdl: binlogdatapb.OnDDLAction_IGNORE, } - query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.VReplicationInit, playerEngine.dbName, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogdatapb.VReplicationWorkflowState_Init, playerEngine.dbName, 0, 0) qr, err := playerEngine.Exec(query) if err != nil { t.Fatal(err) @@ -617,8 +617,8 @@ func testPlayerCopyTables(t *testing.T) { // The first fast-forward has no starting point. So, it just saves the current position. "/update _vt.vreplication set pos=", "begin", - "insert into dst1(id,val,val2,d,j) values (1,'aaa','aaa',0,JSON_ARRAY(123456789012345678901234567890, _utf8mb4'abcd')), (2,'bbb','bbb',1,JSON_OBJECT(_utf8mb4'foo', _utf8mb4'bar'))", - `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32 charset:63 flags:53251} rows:{lengths:1 values:\\"2\\"}'.*`, + "insert into dst1(id,val,val2,d,j) values (1,'aaa','aaa',0,JSON_ARRAY(123456789012345678901234567890, _utf8mb4'abcd')), (2,'bbb','bbb',1,JSON_OBJECT(_utf8mb4'foo', _utf8mb4'bar')), (3,'ccc','ccc',2,CAST(_utf8mb4'null' as JSON)), (4,'ddd','ddd',3,JSON_OBJECT(_utf8mb4'name', _utf8mb4'matt', _utf8mb4'size', null)), (5,'eee','eee',4,null)", + `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32 charset:63 flags:53251} rows:{lengths:1 values:\\"5\\"}'.*`, "commit", // copy of dst1 is done: delete from copy_state. "/delete cs, pca from _vt.copy_state as cs left join _vt.post_copy_action as pca on cs.vrepl_id=pca.vrepl_id and cs.table_name=pca.table_name.*dst1", @@ -634,9 +634,12 @@ func testPlayerCopyTables(t *testing.T) { expectData(t, "dst1", [][]string{ {"1", "aaa", "aaa", "0", "[123456789012345678901234567890, \"abcd\"]"}, {"2", "bbb", "bbb", "1", "{\"foo\": \"bar\"}"}, + {"3", "ccc", "ccc", "2", "null"}, + {"4", "ddd", "ddd", "3", "{\"name\": \"matt\", \"size\": null}"}, + {"5", "eee", "eee", "4", ""}, }) expectData(t, "yes", [][]string{}) - validateCopyRowCountStat(t, 2) + validateCopyRowCountStat(t, 5) ctx, cancel := context.WithCancel(context.Background()) type logTestCase struct { @@ -732,7 +735,7 @@ func testPlayerCopyBigTable(t *testing.T) { OnDdl: binlogdatapb.OnDDLAction_IGNORE, } - query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.VReplicationInit, playerEngine.dbName, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogdatapb.VReplicationWorkflowState_Init, playerEngine.dbName, 0, 0) qr, err := playerEngine.Exec(query) if err != nil { t.Fatal(err) @@ -862,7 +865,7 @@ func testPlayerCopyWildcardRule(t *testing.T) { Filter: filter, OnDdl: binlogdatapb.OnDDLAction_IGNORE, } - query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.VReplicationInit, playerEngine.dbName, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogdatapb.VReplicationWorkflowState_Init, playerEngine.dbName, 0, 0) qr, err := playerEngine.Exec(query) if err != nil { t.Fatal(err) @@ -999,7 +1002,7 @@ func testPlayerCopyTableContinuation(t *testing.T) { Filter: filter, OnDdl: binlogdatapb.OnDDLAction_IGNORE, } - query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.BlpStopped, playerEngine.dbName, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogdatapb.VReplicationWorkflowState_Stopped, playerEngine.dbName, 0, 0) qr, err := playerEngine.Exec(query) if err != nil { t.Fatal(err) @@ -1136,7 +1139,7 @@ func testPlayerCopyWildcardTableContinuation(t *testing.T) { Filter: filter, OnDdl: binlogdatapb.OnDDLAction_IGNORE, } - query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.BlpStopped, playerEngine.dbName, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogdatapb.VReplicationWorkflowState_Stopped, playerEngine.dbName, 0, 0) qr, err := playerEngine.Exec(query) if err != nil { t.Fatal(err) @@ -1233,7 +1236,7 @@ func TestPlayerCopyWildcardTableContinuationWithOptimizeInserts(t *testing.T) { Filter: filter, OnDdl: binlogdatapb.OnDDLAction_IGNORE, } - query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.BlpStopped, playerEngine.dbName, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogdatapb.VReplicationWorkflowState_Stopped, playerEngine.dbName, 0, 0) qr, err := playerEngine.Exec(query) if err != nil { t.Fatal(err) @@ -1302,7 +1305,7 @@ func testPlayerCopyTablesNone(t *testing.T) { Filter: filter, OnDdl: binlogdatapb.OnDDLAction_IGNORE, } - query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.VReplicationInit, playerEngine.dbName, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogdatapb.VReplicationWorkflowState_Init, playerEngine.dbName, 0, 0) qr, err := playerEngine.Exec(query) if err != nil { t.Fatal(err) @@ -1356,7 +1359,7 @@ func testPlayerCopyTablesStopAfterCopy(t *testing.T) { OnDdl: binlogdatapb.OnDDLAction_IGNORE, StopAfterCopy: true, } - query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.VReplicationInit, playerEngine.dbName, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogdatapb.VReplicationWorkflowState_Init, playerEngine.dbName, 0, 0) qr, err := playerEngine.Exec(query) if err != nil { t.Fatal(err) @@ -1445,7 +1448,7 @@ func testPlayerCopyTablesGIPK(t *testing.T) { OnDdl: binlogdatapb.OnDDLAction_IGNORE, StopAfterCopy: true, } - query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.VReplicationInit, playerEngine.dbName, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogdatapb.VReplicationWorkflowState_Init, playerEngine.dbName, 0, 0) qr, err := playerEngine.Exec(query) if err != nil { t.Fatal(err) @@ -1543,7 +1546,7 @@ func testPlayerCopyTableCancel(t *testing.T) { Filter: filter, OnDdl: binlogdatapb.OnDDLAction_IGNORE, } - query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.VReplicationInit, playerEngine.dbName, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogdatapb.VReplicationWorkflowState_Init, playerEngine.dbName, 0, 0) qr, err := playerEngine.Exec(query) if err != nil { t.Fatal(err) @@ -1626,7 +1629,7 @@ func testPlayerCopyTablesWithGeneratedColumn(t *testing.T) { Filter: filter, OnDdl: binlogdatapb.OnDDLAction_IGNORE, } - query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.VReplicationInit, playerEngine.dbName, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogdatapb.VReplicationWorkflowState_Init, playerEngine.dbName, 0, 0) qr, err := playerEngine.Exec(query) if err != nil { t.Fatal(err) @@ -1707,7 +1710,7 @@ func testCopyTablesWithInvalidDates(t *testing.T) { Filter: filter, OnDdl: binlogdatapb.OnDDLAction_IGNORE, } - query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.VReplicationInit, playerEngine.dbName, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogdatapb.VReplicationWorkflowState_Init, playerEngine.dbName, 0, 0) qr, err := playerEngine.Exec(query) require.NoError(t, err) @@ -1794,7 +1797,7 @@ func testCopyInvisibleColumns(t *testing.T) { Filter: filter, OnDdl: binlogdatapb.OnDDLAction_IGNORE, } - query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.VReplicationInit, playerEngine.dbName, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogdatapb.VReplicationWorkflowState_Init, playerEngine.dbName, 0, 0) qr, err := playerEngine.Exec(query) if err != nil { t.Fatal(err) diff --git a/go/vt/vttablet/tabletmanager/vreplication/vdbclient.go b/go/vt/vttablet/tabletmanager/vreplication/vdbclient.go index cc7776720ba..c3941b0f1bb 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vdbclient.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vdbclient.go @@ -17,12 +17,11 @@ limitations under the License. package vreplication import ( + "context" "io" "time" - "context" - - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/log" @@ -100,7 +99,7 @@ func (vc *vdbClient) Execute(query string) (*sqltypes.Result, error) { func (vc *vdbClient) ExecuteWithRetry(ctx context.Context, query string) (*sqltypes.Result, error) { qr, err := vc.Execute(query) for err != nil { - if sqlErr, ok := err.(*mysql.SQLError); ok && sqlErr.Number() == mysql.ERLockDeadlock || sqlErr.Number() == mysql.ERLockWaitTimeout { + if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Number() == sqlerror.ERLockDeadlock || sqlErr.Number() == sqlerror.ERLockWaitTimeout { log.Infof("retryable error: %v, waiting for %v and retrying", sqlErr, dbLockRetryDelay) if err := vc.Rollback(); err != nil { return nil, err diff --git a/go/vt/vttablet/tabletmanager/vreplication/vplayer.go b/go/vt/vttablet/tabletmanager/vreplication/vplayer.go index 084079506d9..8eee211ff9e 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vplayer.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vplayer.go @@ -26,7 +26,7 @@ import ( "strings" "time" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" @@ -39,15 +39,15 @@ import ( // vplayer replays binlog events by pulling them from a vstreamer. type vplayer struct { vr *vreplicator - startPos mysql.Position - stopPos mysql.Position + startPos replication.Position + stopPos replication.Position saveStop bool copyState map[string]*sqltypes.Result replicatorPlan *ReplicatorPlan tablePlans map[string]*TablePlan - pos mysql.Position + pos replication.Position // unsavedEvent is set any time we skip an event without // saving, which is on an empty commit. // If nothing else happens for idleTimeout since timeLastSaved, @@ -68,8 +68,22 @@ type vplayer struct { phase string throttlerAppName string + + // See updateFKCheck for more details on how the two fields below are used. + + // foreignKeyChecksEnabled is the current state of the foreign key checks for the current session. + // It reflects what we have set the @@session.foreign_key_checks session variable to. + foreignKeyChecksEnabled bool + + // foreignKeyChecksStateInitialized is set to true once we have initialized the foreignKeyChecksEnabled. + // The initialization is done on the first row event that this vplayer sees. + foreignKeyChecksStateInitialized bool } +// NoForeignKeyCheckFlagBitmask is the bitmask for the 2nd bit (least significant) of the flags in a binlog row event. +// This bit is set if foreign key checks are disabled. +const NoForeignKeyCheckFlagBitmask uint32 = 1 << 1 + // newVPlayer creates a new vplayer. Parameters: // vreplicator: the outer replicator. It's used for common functions like setState. // @@ -84,7 +98,7 @@ type vplayer struct { // pausePos: if set, replication will stop at that position without updating the state to "Stopped". // // This is used by the fastForward function during copying. -func newVPlayer(vr *vreplicator, settings binlogplayer.VRSettings, copyState map[string]*sqltypes.Result, pausePos mysql.Position, phase string) *vplayer { +func newVPlayer(vr *vreplicator, settings binlogplayer.VRSettings, copyState map[string]*sqltypes.Result, pausePos replication.Position, phase string) *vplayer { saveStop := true if !pausePos.IsZero() { settings.StopPos = pausePos @@ -100,7 +114,7 @@ func newVPlayer(vr *vreplicator, settings binlogplayer.VRSettings, copyState map timeLastSaved: time.Now(), tablePlans: make(map[string]*TablePlan), phase: phase, - throttlerAppName: vr.throttlerAppName(), + throttlerAppName: throttlerapp.VCopierName.ConcatenateString(vr.throttlerAppName()), } } @@ -109,7 +123,7 @@ func (vp *vplayer) play(ctx context.Context) error { if !vp.stopPos.IsZero() && vp.startPos.AtLeast(vp.stopPos) { log.Infof("Stop position %v already reached: %v", vp.startPos, vp.stopPos) if vp.saveStop { - return vp.vr.setState(binlogplayer.BlpStopped, fmt.Sprintf("Stop position %v already reached: %v", vp.startPos, vp.stopPos)) + return vp.vr.setState(binlogdatapb.VReplicationWorkflowState_Stopped, fmt.Sprintf("Stop position %v already reached: %v", vp.startPos, vp.stopPos)) } return nil } @@ -133,6 +147,34 @@ func (vp *vplayer) play(ctx context.Context) error { return vp.fetchAndApply(ctx) } +// updateFKCheck updates the @@session.foreign_key_checks variable based on the binlog row event flags. +// The function only does it if it has changed to avoid redundant updates, using the cached vplayer.foreignKeyChecksEnabled +// The foreign_key_checks value for a transaction is determined by the 2nd bit (least significant) of the flags: +// - If set (1), foreign key checks are disabled. +// - If unset (0), foreign key checks are enabled. +// updateFKCheck also updates the state for the first row event that this vplayer and hence the connection sees. +func (vp *vplayer) updateFKCheck(ctx context.Context, flags2 uint32) error { + dbForeignKeyChecksEnabled := true + if flags2&NoForeignKeyCheckFlagBitmask == NoForeignKeyCheckFlagBitmask { + dbForeignKeyChecksEnabled = false + } + + if vp.foreignKeyChecksStateInitialized /* already set earlier */ && + dbForeignKeyChecksEnabled == vp.foreignKeyChecksEnabled /* no change in the state, no need to update */ { + return nil + } + log.Infof("Setting this session's foreign_key_checks to %s", strconv.FormatBool(dbForeignKeyChecksEnabled)) + if _, err := vp.vr.dbClient.ExecuteWithRetry(ctx, "set @@session.foreign_key_checks="+strconv.FormatBool(dbForeignKeyChecksEnabled)); err != nil { + return fmt.Errorf("failed to set session foreign_key_checks: %w", err) + } + vp.foreignKeyChecksEnabled = dbForeignKeyChecksEnabled + if !vp.foreignKeyChecksStateInitialized { + log.Infof("First foreign_key_checks update to: %s", strconv.FormatBool(dbForeignKeyChecksEnabled)) + vp.foreignKeyChecksStateInitialized = true + } + return nil +} + // fetchAndApply performs the fetching and application of the binlogs. // This is done by two different threads. The fetcher thread pulls // events from the vstreamer and adds them to the relayLog. @@ -153,7 +195,7 @@ func (vp *vplayer) fetchAndApply(ctx context.Context) (err error) { streamErr := make(chan error, 1) go func() { - streamErr <- vp.vr.sourceVStreamer.VStream(ctx, mysql.EncodePosition(vp.startPos), nil, vp.replicatorPlan.VStreamFilter, func(events []*binlogdatapb.VEvent) error { + streamErr <- vp.vr.sourceVStreamer.VStream(ctx, replication.EncodePosition(vp.startPos), nil, vp.replicatorPlan.VStreamFilter, func(events []*binlogdatapb.VEvent) error { return relay.Send(events) }) }() @@ -217,6 +259,9 @@ func (vp *vplayer) applyStmtEvent(ctx context.Context, event *binlogdatapb.VEven } func (vp *vplayer) applyRowEvent(ctx context.Context, rowEvent *binlogdatapb.RowEvent) error { + if err := vp.updateFKCheck(ctx, rowEvent.Flags); err != nil { + return err + } tplan := vp.tablePlans[rowEvent.TableName] if tplan == nil { return fmt.Errorf("unexpected event on table %s", rowEvent.TableName) @@ -251,7 +296,7 @@ func (vp *vplayer) updatePos(ts int64) (posReached bool, err error) { if posReached { log.Infof("Stopped at position: %v", vp.stopPos) if vp.saveStop { - if err := vp.vr.setState(binlogplayer.BlpStopped, fmt.Sprintf("Stopped at position %v", vp.stopPos)); err != nil { + if err := vp.vr.setState(binlogdatapb.VReplicationWorkflowState_Stopped, fmt.Sprintf("Stopped at position %v", vp.stopPos)); err != nil { return false, err } } @@ -503,6 +548,7 @@ func (vp *vplayer) applyEvent(ctx context.Context, event *binlogdatapb.VEvent, m return err } if err := vp.applyRowEvent(ctx, event.RowEvent); err != nil { + log.Infof("Error applying row event: %s", err.Error()) return err } //Row event is logged AFTER RowChanges are applied so as to calculate the total elapsed time for the Row event @@ -544,7 +590,7 @@ func (vp *vplayer) applyEvent(ctx context.Context, event *binlogdatapb.VEvent, m if _, err := vp.updatePos(event.Timestamp); err != nil { return err } - if err := vp.vr.setState(binlogplayer.BlpStopped, fmt.Sprintf("Stopped at DDL %s", event.Statement)); err != nil { + if err := vp.vr.setState(binlogdatapb.VReplicationWorkflowState_Stopped, fmt.Sprintf("Stopped at DDL %s", event.Statement)); err != nil { return err } if err := vp.vr.dbClient.Commit(); err != nil { @@ -608,7 +654,7 @@ func (vp *vplayer) applyEvent(ctx context.Context, event *binlogdatapb.VEvent, m switch { case found && notFound: // Some were found and some were not found. We can't handle this. - if err := vp.vr.setState(binlogplayer.BlpStopped, "unable to handle journal event: tables were partially matched"); err != nil { + if err := vp.vr.setState(binlogdatapb.VReplicationWorkflowState_Stopped, "unable to handle journal event: tables were partially matched"); err != nil { return err } return io.EOF @@ -620,7 +666,7 @@ func (vp *vplayer) applyEvent(ctx context.Context, event *binlogdatapb.VEvent, m } log.Infof("Binlog event registering journal event %+v", event.Journal) if err := vp.vr.vre.registerJournal(event.Journal, vp.vr.id); err != nil { - if err := vp.vr.setState(binlogplayer.BlpStopped, err.Error()); err != nil { + if err := vp.vr.setState(binlogdatapb.VReplicationWorkflowState_Stopped, err.Error()); err != nil { return err } return io.EOF diff --git a/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go b/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go index a16ecb9e4e0..3b215d03791 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go @@ -27,6 +27,7 @@ import ( "testing" "time" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/vttablet/tabletserver/vstreamer/testenv" "vitess.io/vitess/go/vt/vttablet" @@ -34,7 +35,6 @@ import ( "github.com/nsf/jsondiff" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/log" @@ -202,6 +202,7 @@ func TestHeartbeatFrequencyFlag(t *testing.T) { }() stats := binlogplayer.NewStats() + defer stats.Stop() vp := &vplayer{vr: &vreplicator{dbClient: newVDBClient(realDBClientFactory(), stats), stats: stats}} type testcount struct { @@ -527,6 +528,62 @@ func TestPlayerSavepoint(t *testing.T) { cancel() } +// TestPlayerForeignKeyCheck tests that we can insert a row into a child table without the corresponding foreign key +// if the foreign_key_checks is not set. +func TestPlayerForeignKeyCheck(t *testing.T) { + doNotLogDBQueries = true + defer func() { doNotLogDBQueries = false }() + + defer deleteTablet(addTablet(100)) + execStatements(t, []string{ + "create table parent(id int, name varchar(128), primary key(id))", + fmt.Sprintf("create table %s.parent(id int, name varchar(128), primary key(id))", vrepldb), + "create table child(id int, parent_id int, name varchar(128), primary key(id), foreign key(parent_id) references parent(id) on delete cascade)", + fmt.Sprintf("create table %s.child(id int, parent_id int, name varchar(128), primary key(id), foreign key(parent_id) references parent(id) on delete cascade)", vrepldb), + }) + defer execStatements(t, []string{ + "drop table child", + fmt.Sprintf("drop table %s.child", vrepldb), + "drop table parent", + fmt.Sprintf("drop table %s.parent", vrepldb), + }) + + env.SchemaEngine.Reload(context.Background()) + + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + }}, + } + bls := &binlogdatapb.BinlogSource{ + Keyspace: env.KeyspaceName, + Shard: env.ShardName, + Filter: filter, + OnDdl: binlogdatapb.OnDDLAction_IGNORE, + } + cancel, _ := startVReplication(t, bls, "") + + testSetForeignKeyQueries = true + defer func() { + testSetForeignKeyQueries = false + }() + + execStatements(t, []string{ + "insert into parent values(1, 'parent1')", + "insert into child values(1, 1, 'child1')", + "set foreign_key_checks=0", + "insert into child values(2, 100, 'child100')", + }) + expectData(t, "parent", [][]string{ + {"1", "parent1"}, + }) + expectData(t, "child", [][]string{ + {"1", "1", "child1"}, + {"2", "100", "child100"}, + }) + cancel() +} + func TestPlayerStatementModeWithFilter(t *testing.T) { defer deleteTablet(addTablet(100)) @@ -1584,17 +1641,26 @@ func TestPlayerTypes(t *testing.T) { }, }, { input: "insert into vitess_json(val1,val2,val3,val4,val5) values (null,'{}','123','{\"a\":[42,100]}','{\"foo\": \"bar\"}')", - output: "insert into vitess_json(id,val1,val2,val3,val4,val5) values (1,CAST(null as JSON),JSON_OBJECT(),CAST(123 as JSON),JSON_OBJECT(_utf8mb4'a', JSON_ARRAY(42, 100)),JSON_OBJECT(_utf8mb4'foo', _utf8mb4'bar'))", + output: "insert into vitess_json(id,val1,val2,val3,val4,val5) values (1,null,JSON_OBJECT(),CAST(123 as JSON),JSON_OBJECT(_utf8mb4'a', JSON_ARRAY(42, 100)),JSON_OBJECT(_utf8mb4'foo', _utf8mb4'bar'))", + table: "vitess_json", + data: [][]string{ + {"1", "", "{}", "123", `{"a": [42, 100]}`, `{"foo": "bar"}`}, + }, + }, { + input: "insert into vitess_json(val1,val2,val3,val4,val5) values ('null', '{\"name\":null}','123','{\"a\":[42,100]}','{\"foo\": \"bar\"}')", + output: "insert into vitess_json(id,val1,val2,val3,val4,val5) values (2,CAST(_utf8mb4'null' as JSON),JSON_OBJECT(_utf8mb4'name', null),CAST(123 as JSON),JSON_OBJECT(_utf8mb4'a', JSON_ARRAY(42, 100)),JSON_OBJECT(_utf8mb4'foo', _utf8mb4'bar'))", table: "vitess_json", data: [][]string{ {"1", "", "{}", "123", `{"a": [42, 100]}`, `{"foo": "bar"}`}, + {"2", "null", `{"name": null}`, "123", `{"a": [42, 100]}`, `{"foo": "bar"}`}, }, }, { - input: "update vitess_json set val1 = '{\"bar\": \"foo\"}', val4 = '{\"a\": [98, 123]}', val5 = convert(x'7b7d' using utf8mb4)", + input: "update vitess_json set val1 = '{\"bar\": \"foo\"}', val4 = '{\"a\": [98, 123]}', val5 = convert(x'7b7d' using utf8mb4) where id=1", output: "update vitess_json set val1=JSON_OBJECT(_utf8mb4'bar', _utf8mb4'foo'), val2=JSON_OBJECT(), val3=CAST(123 as JSON), val4=JSON_OBJECT(_utf8mb4'a', JSON_ARRAY(98, 123)), val5=JSON_OBJECT() where id=1", table: "vitess_json", data: [][]string{ {"1", `{"bar": "foo"}`, "{}", "123", `{"a": [98, 123]}`, `{}`}, + {"2", "null", `{"name": null}`, "123", `{"a": [42, 100]}`, `{"foo": "bar"}`}, }, }} @@ -1780,13 +1846,13 @@ func TestGTIDCompress(t *testing.T) { require.NotNil(t, qr) require.Equal(t, 1, len(qr.Rows)) gotGTID := qr.Rows[0][0].ToString() - pos, err := mysql.DecodePosition(gotGTID) + pos, err := replication.DecodePosition(gotGTID) if tCase.compress { require.True(t, pos.IsZero()) pos, err = binlogplayer.DecodePosition(gotGTID) require.NoError(t, err) require.NotNil(t, pos) - tpos, err := mysql.DecodePosition(tCase.gtid) + tpos, err := replication.DecodePosition(tCase.gtid) require.NoError(t, err) require.Equal(t, tpos.String(), pos.String()) } else { @@ -1828,7 +1894,7 @@ func TestPlayerStopPos(t *testing.T) { OnDdl: binlogdatapb.OnDDLAction_IGNORE, } startPos := primaryPosition(t) - query := binlogplayer.CreateVReplicationState("test", bls, startPos, binlogplayer.BlpStopped, vrepldb, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, startPos, binlogdatapb.VReplicationWorkflowState_Stopped, vrepldb, 0, 0) qr, err := playerEngine.Exec(query) if err != nil { t.Fatal(err) @@ -1932,7 +1998,7 @@ func TestPlayerStopAtOther(t *testing.T) { Filter: filter, OnDdl: binlogdatapb.OnDDLAction_IGNORE, } - query := binlogplayer.CreateVReplicationState("test", bls, startPos, binlogplayer.BlpStopped, vrepldb, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, startPos, binlogdatapb.VReplicationWorkflowState_Stopped, vrepldb, 0, 0) qr, err := playerEngine.Exec(query) if err != nil { t.Fatal(err) @@ -2733,7 +2799,7 @@ func TestVReplicationLogs(t *testing.T) { for _, want := range expected { t.Run("", func(t *testing.T) { - err = insertLog(vdbc, LogMessage, 1, "Running", "message1") + err = insertLog(vdbc, LogMessage, 1, binlogdatapb.VReplicationWorkflowState_Running.String(), "message1") require.NoError(t, err) qr, err := env.Mysqld.FetchSuperQuery(context.Background(), query) require.NoError(t, err) diff --git a/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go b/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go index fd8117a6b5f..0e63068d7a1 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go @@ -26,6 +26,8 @@ import ( "strings" "time" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/timer" "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/sqlparser" @@ -35,9 +37,6 @@ import ( querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/vtgate/evalengine" - - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/log" @@ -100,7 +99,7 @@ type vreplicator struct { // source source *binlogdatapb.BinlogSource sourceVStreamer VStreamerClient - state string + state binlogdatapb.VReplicationWorkflowState stats *binlogplayer.Stats // mysqld is used to fetch the local schema. mysqld mysqlctl.MysqlDaemon @@ -109,8 +108,9 @@ type vreplicator struct { originalFKCheckSetting int64 originalSQLMode string - WorkflowType int32 - WorkflowName string + WorkflowType int32 + WorkflowSubType int32 + WorkflowName string throttleUpdatesRateLimiter *timer.RateLimiter } @@ -142,7 +142,7 @@ func newVReplicator(id int32, source *binlogdatapb.BinlogSource, sourceVStreamer log.Warningf("The supplied value for vreplication_heartbeat_update_interval:%d seconds is larger than the maximum allowed:%d seconds, vreplication will fallback to %d", vreplicationHeartbeatUpdateInterval, vreplicationMinimumHeartbeatUpdateInterval, vreplicationMinimumHeartbeatUpdateInterval) } - return &vreplicator{ + vr := &vreplicator{ vre: vre, id: id, source: source, @@ -151,6 +151,8 @@ func newVReplicator(id int32, source *binlogdatapb.BinlogSource, sourceVStreamer dbClient: newVDBClient(dbClient, stats), mysqld: mysqld, } + vr.setExistingRowsCopied() + return vr } // Replicate starts a vreplication stream. It can be in one of three phases: @@ -263,7 +265,7 @@ func (vr *vreplicator) replicate(ctx context.Context) error { } // If any of the operations below changed state to Stopped or Error, we should return. - if settings.State == binlogplayer.BlpStopped || settings.State == binlogplayer.BlpError { + if settings.State == binlogdatapb.VReplicationWorkflowState_Stopped || settings.State == binlogdatapb.VReplicationWorkflowState_Error { return nil } switch { @@ -272,18 +274,26 @@ func (vr *vreplicator) replicate(ctx context.Context) error { log.Warningf("Unable to clear FK check %v", err) return err } - if err := newVCopier(vr).copyNext(ctx, settings); err != nil { - vr.stats.ErrorCounts.Add([]string{"Copy"}, 1) - return err - } - settings, numTablesToCopy, err = vr.loadSettings(ctx, vr.dbClient) - if err != nil { - return err - } - if numTablesToCopy == 0 { - if err := vr.insertLog(LogCopyEnd, fmt.Sprintf("Copy phase completed at gtid %s", settings.StartPos)); err != nil { + if vr.WorkflowSubType == int32(binlogdatapb.VReplicationWorkflowSubType_AtomicCopy) { + if err := newVCopier(vr).copyAll(ctx, settings); err != nil { + log.Infof("Error atomically copying all tables: %v", err) + vr.stats.ErrorCounts.Add([]string{"CopyAll"}, 1) return err } + } else { + if err := newVCopier(vr).copyNext(ctx, settings); err != nil { + vr.stats.ErrorCounts.Add([]string{"Copy"}, 1) + return err + } + settings, numTablesToCopy, err = vr.loadSettings(ctx, vr.dbClient) + if err != nil { + return err + } + if numTablesToCopy == 0 { + if err := vr.insertLog(LogCopyEnd, fmt.Sprintf("Copy phase completed at gtid %s", settings.StartPos)); err != nil { + return err + } + } } case settings.StartPos.IsZero(): if err := newVCopier(vr).initTablesForCopy(ctx); err != nil { @@ -296,13 +306,13 @@ func (vr *vreplicator) replicate(ctx context.Context) error { return err } if vr.source.StopAfterCopy { - return vr.setState(binlogplayer.BlpStopped, "Stopped after copy.") + return vr.setState(binlogdatapb.VReplicationWorkflowState_Stopped, "Stopped after copy.") } - if err := vr.setState(binlogplayer.BlpRunning, ""); err != nil { + if err := vr.setState(binlogdatapb.VReplicationWorkflowState_Running, ""); err != nil { vr.stats.ErrorCounts.Add([]string{"Replicate"}, 1) return err } - return newVPlayer(vr, settings, nil, mysql.Position{}, "replicate").play(ctx) + return newVPlayer(vr, settings, nil, replication.Position{}, "replicate").play(ctx) } } } @@ -407,6 +417,7 @@ func (vr *vreplicator) loadSettings(ctx context.Context, dbClient *vdbClient) (s settings, numTablesToCopy, err = vr.readSettings(ctx, dbClient) if err == nil { vr.WorkflowType = int32(settings.WorkflowType) + vr.WorkflowSubType = int32(settings.WorkflowSubType) vr.WorkflowName = settings.WorkflowName } return settings, numTablesToCopy, err @@ -426,7 +437,7 @@ func (vr *vreplicator) readSettings(ctx context.Context, dbClient *vdbClient) (s if len(qr.Rows) == 0 || len(qr.Rows[0]) == 0 { return settings, numTablesToCopy, fmt.Errorf("unexpected result from %s: %v", query, qr) } - numTablesToCopy, err = evalengine.ToInt64(qr.Rows[0][0]) + numTablesToCopy, err = qr.Rows[0][0].ToCastInt64() if err != nil { return settings, numTablesToCopy, err } @@ -445,24 +456,24 @@ func (vr *vreplicator) setMessage(message string) error { if _, err := vr.dbClient.Execute(query); err != nil { return fmt.Errorf("could not set message: %v: %v", query, err) } - if err := insertLog(vr.dbClient, LogMessage, vr.id, vr.state, message); err != nil { + if err := insertLog(vr.dbClient, LogMessage, vr.id, vr.state.String(), message); err != nil { return err } return nil } func (vr *vreplicator) insertLog(typ, message string) error { - return insertLog(vr.dbClient, typ, vr.id, vr.state, message) + return insertLog(vr.dbClient, typ, vr.id, vr.state.String(), message) } -func (vr *vreplicator) setState(state, message string) error { +func (vr *vreplicator) setState(state binlogdatapb.VReplicationWorkflowState, message string) error { if message != "" { vr.stats.History.Add(&binlogplayer.StatsHistoryRecord{ Time: time.Now(), Message: message, }) } - vr.stats.State.Store(state) + vr.stats.State.Store(state.String()) query := fmt.Sprintf("update _vt.vreplication set state='%v', message=%v where id=%v", state, encodeString(binlogplayer.MessageTruncate(message)), vr.id) if _, err := vr.dbClient.ExecuteFetch(query, 1); err != nil { return fmt.Errorf("could not set state: %v: %v", query, err) @@ -470,7 +481,7 @@ func (vr *vreplicator) setState(state, message string) error { if state == vr.state { return nil } - if err := insertLog(vr.dbClient, LogStateChange, vr.id, state, message); err != nil { + if err := insertLog(vr.dbClient, LogStateChange, vr.id, state.String(), message); err != nil { return err } vr.state = state @@ -485,14 +496,14 @@ func encodeString(in string) string { } func (vr *vreplicator) getSettingFKCheck() error { - qr, err := vr.dbClient.Execute("select @@foreign_key_checks;") + qr, err := vr.dbClient.Execute("select @@foreign_key_checks") if err != nil { return err } if len(qr.Rows) != 1 || len(qr.Fields) != 1 { return fmt.Errorf("unable to select @@foreign_key_checks") } - vr.originalFKCheckSetting, err = evalengine.ToInt64(qr.Rows[0][0]) + vr.originalFKCheckSetting, err = qr.Rows[0][0].ToCastInt64() if err != nil { return err } @@ -500,7 +511,7 @@ func (vr *vreplicator) getSettingFKCheck() error { } func (vr *vreplicator) resetFKCheckAfterCopy(dbClient *vdbClient) error { - _, err := dbClient.Execute(fmt.Sprintf("set foreign_key_checks=%d;", vr.originalFKCheckSetting)) + _, err := dbClient.Execute(fmt.Sprintf("set @@session.foreign_key_checks=%d", vr.originalFKCheckSetting)) return err } @@ -558,7 +569,7 @@ func (vr *vreplicator) throttlerAppName() string { if vr.WorkflowType == int32(binlogdatapb.VReplicationWorkflowType_OnlineDDL) { names = append(names, throttlerapp.OnlineDDLName.String()) } - return strings.Join(names, ":") + return throttlerapp.Concatenate(names...) } func (vr *vreplicator) updateTimeThrottled(appThrottled throttlerapp.Name) error { @@ -588,7 +599,7 @@ func (vr *vreplicator) updateHeartbeatTime(tm int64) error { } func (vr *vreplicator) clearFKCheck(dbClient *vdbClient) error { - _, err := dbClient.Execute("set foreign_key_checks=0;") + _, err := dbClient.Execute("set @@session.foreign_key_checks=0") return err } @@ -691,7 +702,7 @@ func (vr *vreplicator) stashSecondaryKeys(ctx context.Context, tableName string) if _, err := dbClient.ExecuteFetch(sqlparser.String(alterDrop), 1); err != nil { // If they've already been dropped, e.g. by another controller running on the tablet // when doing a shard merge, then we can ignore the error. - if sqlErr, ok := err.(*mysql.SQLError); ok && sqlErr.Num == mysql.ERCantDropFieldOrKey { + if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Num == sqlerror.ERCantDropFieldOrKey { secondaryKeys, err := vr.getTableSecondaryKeys(ctx, tableName) if err == nil && len(secondaryKeys) == 0 { return nil @@ -944,7 +955,7 @@ func (vr *vreplicator) execPostCopyActions(ctx context.Context, tableName string // index definitions that we would have added already exist in // the table schema and if so move forward and delete the // post_copy_action record. - if sqlErr, ok := err.(*mysql.SQLError); ok && sqlErr.Number() == mysql.ERDupKeyName { + if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Number() == sqlerror.ERDupKeyName { stmt, err := sqlparser.ParseStrictDDL(action.Task) if err != nil { return failedAlterErr @@ -1021,3 +1032,39 @@ func (vr *vreplicator) newClientConnection(ctx context.Context) (*vdbClient, err } return dbClient, nil } + +// setExistingRowsCopied deals with the case where another tablet started +// the workflow and a reparent occurred, and now that we manage the +// workflow, we need to read the rows_copied that already exists and add +// them to our counter, otherwise it will look like the reparent wiped all the +// rows_copied. So in the event that our CopyRowCount counter is zero, and +// the existing rows_copied in the vreplication table is not, copy the value of +// vreplication.rows_copied into our CopyRowCount. +func (vr *vreplicator) setExistingRowsCopied() { + if vr.stats.CopyRowCount.Get() == 0 { + rowsCopiedExisting, err := vr.readExistingRowsCopied(vr.id) + if err != nil { + log.Warningf("Failed to read existing rows copied value for %s worfklow: %v", vr.WorkflowName, err) + } else if rowsCopiedExisting != 0 { + log.Infof("Resuming the %s vreplication workflow started on another tablet, setting rows copied counter to %v", vr.WorkflowName, rowsCopiedExisting) + vr.stats.CopyRowCount.Set(rowsCopiedExisting) + } + } +} + +func (vr *vreplicator) readExistingRowsCopied(id int32) (int64, error) { + query, err := sqlparser.ParseAndBind(`SELECT rows_copied FROM _vt.vreplication WHERE id=%a`, + sqltypes.Int32BindVariable(id), + ) + if err != nil { + return 0, err + } + r, err := vr.dbClient.Execute(query) + if err != nil { + return 0, err + } + if len(r.Rows) != 1 { + return 0, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "did not get expected single row value when getting rows_copied for workflow id: %d", id) + } + return r.Rows[0][0].ToInt64() +} diff --git a/go/vt/vttablet/tabletmanager/vreplication/vreplicator_test.go b/go/vt/vttablet/tabletmanager/vreplication/vreplicator_test.go index 66591bbcb81..346e6b67eb3 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vreplicator_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vreplicator_test.go @@ -207,6 +207,7 @@ func TestDeferSecondaryKeys(t *testing.T) { id := int32(1) vsclient := newTabletConnector(tablet) stats := binlogplayer.NewStats() + defer stats.Stop() dbClient := playerEngine.dbClientFactoryFiltered() err := dbClient.Connect() require.NoError(t, err) @@ -538,6 +539,7 @@ func TestCancelledDeferSecondaryKeys(t *testing.T) { id := int32(1) vsclient := newTabletConnector(tablet) stats := binlogplayer.NewStats() + defer stats.Stop() dbaconn := playerEngine.dbClientFactoryDba() err = dbaconn.Connect() require.NoError(t, err) @@ -626,6 +628,58 @@ func TestCancelledDeferSecondaryKeys(t *testing.T) { require.Equal(t, 1, len(res.Rows)) } +// TestResumingFromPreviousWorkflowKeepingRowsCopied tests that when you +// resume a workflow started by another tablet (eg. a reparent occurred), +// the rows_copied does not reset to zero but continues along from where +// it left off. +func TestResumingFromPreviousWorkflowKeepingRowsCopied(t *testing.T) { + _, cancel := context.WithCancel(context.Background()) + defer cancel() + tablet := addTablet(100) + defer deleteTablet(tablet) + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + }}, + } + bls := &binlogdatapb.BinlogSource{ + Keyspace: env.KeyspaceName, + Shard: env.ShardName, + Filter: filter, + } + // The test env uses the same factory for both dba and + // filtered connections. + dbconfigs.GlobalDBConfigs.Filtered.User = "vt_dba" + id := int32(1) + + vsclient := newTabletConnector(tablet) + stats := binlogplayer.NewStats() + defer stats.Stop() + + dbaconn := playerEngine.dbClientFactoryDba() + err := dbaconn.Connect() + require.NoError(t, err) + defer dbaconn.Close() + + dbClient := playerEngine.dbClientFactoryFiltered() + err = dbClient.Connect() + require.NoError(t, err) + defer dbClient.Close() + + dbName := dbClient.DBName() + rowsCopied := int64(500000) + // Ensure there's an existing vreplication workflow + _, err = dbClient.ExecuteFetch(fmt.Sprintf("insert into _vt.vreplication (id, workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name, rows_copied) values (%d, 'test', '', '', 99999, 99999, 0, 0, 'Running', '%s', %v) on duplicate key update workflow='test', source='', pos='', max_tps=99999, max_replication_lag=99999, time_updated=0, transaction_timestamp=0, state='Running', db_name='%s', rows_copied=%v", + id, dbName, rowsCopied, dbName, rowsCopied), 1) + require.NoError(t, err) + defer func() { + _, err = dbClient.ExecuteFetch(fmt.Sprintf("delete from _vt.vreplication where id = %d", id), 1) + require.NoError(t, err) + }() + vr := newVReplicator(id, bls, vsclient, stats, dbClient, env.Mysqld, playerEngine) + assert.Equal(t, rowsCopied, vr.stats.CopyRowCount.Get()) +} + // stripCruft removes all whitespace unicode chars and backticks. func stripCruft(in string) string { out := strings.Builder{} diff --git a/go/vt/vttablet/tabletserver/bench_test.go b/go/vt/vttablet/tabletserver/bench_test.go index 270e98019ec..fd2d86c2812 100644 --- a/go/vt/vttablet/tabletserver/bench_test.go +++ b/go/vt/vttablet/tabletserver/bench_test.go @@ -18,11 +18,10 @@ package tabletserver import ( "bytes" + "context" "fmt" "testing" - "context" - "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" @@ -55,7 +54,9 @@ func init() { } func BenchmarkExecuteVarBinary(b *testing.B) { - db, tsv := setupTabletServerTest(nil, "") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, tsv := setupTabletServerTest(b, ctx, "") defer db.Close() defer tsv.StopService() @@ -70,14 +71,16 @@ func BenchmarkExecuteVarBinary(b *testing.B) { target := querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} db.SetAllowAll(true) for i := 0; i < b.N; i++ { - if _, err := tsv.Execute(context.Background(), &target, benchQuery, bv, 0, 0, nil); err != nil { + if _, err := tsv.Execute(ctx, &target, benchQuery, bv, 0, 0, nil); err != nil { panic(err) } } } func BenchmarkExecuteExpression(b *testing.B) { - db, tsv := setupTabletServerTest(nil, "") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, tsv := setupTabletServerTest(b, ctx, "") defer db.Close() defer tsv.StopService() @@ -95,7 +98,7 @@ func BenchmarkExecuteExpression(b *testing.B) { target := querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} db.SetAllowAll(true) for i := 0; i < b.N; i++ { - if _, err := tsv.Execute(context.Background(), &target, benchQuery, bv, 0, 0, nil); err != nil { + if _, err := tsv.Execute(ctx, &target, benchQuery, bv, 0, 0, nil); err != nil { panic(err) } } diff --git a/go/vt/vttablet/tabletserver/connpool/dbconn.go b/go/vt/vttablet/tabletserver/connpool/dbconn.go index b11072f5811..f857b80b101 100644 --- a/go/vt/vttablet/tabletserver/connpool/dbconn.go +++ b/go/vt/vttablet/tabletserver/connpool/dbconn.go @@ -24,12 +24,12 @@ import ( "sync/atomic" "time" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/pools" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/trace" "vitess.io/vitess/go/vt/dbconnpool" @@ -130,10 +130,10 @@ func (dbc *DBConn) Exec(ctx context.Context, query string, maxrows int, wantfiel case err == nil: // Success. return r, nil - case mysql.IsConnLostDuringQuery(err): + case sqlerror.IsConnLostDuringQuery(err): // Query probably killed. Don't retry. return nil, err - case !mysql.IsConnErr(err): + case !sqlerror.IsConnErr(err): // Not a connection error. Don't retry. return nil, err case attempt == 2: @@ -233,10 +233,10 @@ func (dbc *DBConn) Stream(ctx context.Context, query string, callback func(*sqlt case err == nil: // Success. return nil - case mysql.IsConnLostDuringQuery(err): + case sqlerror.IsConnLostDuringQuery(err): // Query probably killed. Don't retry. return err - case !mysql.IsConnErr(err): + case !sqlerror.IsConnErr(err): // Not a connection error. Don't retry. return err case attempt == 2: diff --git a/go/vt/vttablet/tabletserver/connpool/dbconn_test.go b/go/vt/vttablet/tabletserver/connpool/dbconn_test.go index 62ec0b6d12e..54792e17fa5 100644 --- a/go/vt/vttablet/tabletserver/connpool/dbconn_test.go +++ b/go/vt/vttablet/tabletserver/connpool/dbconn_test.go @@ -27,7 +27,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/pools" "vitess.io/vitess/go/sqltypes" @@ -87,7 +88,7 @@ func TestDBConnExec(t *testing.T) { startCounts = mysqlTimings.Counts() // Exec fail due to client side error - db.AddRejectedQuery(sql, &mysql.SQLError{ + db.AddRejectedQuery(sql, &sqlerror.SQLError{ Num: 2012, Message: "connection fail", Query: "", @@ -159,7 +160,7 @@ func TestDBConnExecLost(t *testing.T) { // Exec fail due to server side error (e.g. query kill) startCounts = mysqlTimings.Counts() - db.AddRejectedQuery(sql, &mysql.SQLError{ + db.AddRejectedQuery(sql, &sqlerror.SQLError{ Num: 2013, Message: "Lost connection to MySQL server during query", Query: "", diff --git a/go/vt/vttablet/tabletserver/connpool/pool.go b/go/vt/vttablet/tabletserver/connpool/pool.go index 6cb9adf7387..d2f8efb7af0 100644 --- a/go/vt/vttablet/tabletserver/connpool/pool.go +++ b/go/vt/vttablet/tabletserver/connpool/pool.go @@ -55,21 +55,20 @@ const ( // Other than the connection type, ConnPool maintains an additional // pool of dba connections that are used to kill connections. type Pool struct { - env tabletenv.Env - name string - mu sync.Mutex - connections pools.IResourcePool - capacity int - prefillParallelism int - timeout time.Duration - idleTimeout time.Duration - maxLifetime time.Duration - waiterCap int64 - waiterCount atomic.Int64 - waiterQueueFull atomic.Int64 - dbaPool *dbconnpool.ConnectionPool - appDebugParams dbconfigs.Connector - getConnTime *servenv.TimingsWrapper + env tabletenv.Env + name string + mu sync.Mutex + connections pools.IResourcePool + capacity int + timeout time.Duration + idleTimeout time.Duration + maxLifetime time.Duration + waiterCap int64 + waiterCount atomic.Int64 + waiterQueueFull atomic.Int64 + dbaPool *dbconnpool.ConnectionPool + appDebugParams dbconfigs.Connector + getConnTime *servenv.TimingsWrapper } // NewPool creates a new Pool. The name is used @@ -78,15 +77,14 @@ func NewPool(env tabletenv.Env, name string, cfg tabletenv.ConnPoolConfig) *Pool idleTimeout := cfg.IdleTimeoutSeconds.Get() maxLifetime := cfg.MaxLifetimeSeconds.Get() cp := &Pool{ - env: env, - name: name, - capacity: cfg.Size, - prefillParallelism: cfg.PrefillParallelism, - timeout: cfg.TimeoutSeconds.Get(), - idleTimeout: idleTimeout, - maxLifetime: maxLifetime, - waiterCap: int64(cfg.MaxWaiters), - dbaPool: dbconnpool.NewConnectionPool("", 1, idleTimeout, maxLifetime, 0), + env: env, + name: name, + capacity: cfg.Size, + timeout: cfg.TimeoutSeconds.Get(), + idleTimeout: idleTimeout, + maxLifetime: maxLifetime, + waiterCap: int64(cfg.MaxWaiters), + dbaPool: dbconnpool.NewConnectionPool("", 1, idleTimeout, maxLifetime, 0), } if name == "" { return cp @@ -124,11 +122,6 @@ func (cp *Pool) Open(appParams, dbaParams, appDebugParams dbconfigs.Connector) { cp.mu.Lock() defer cp.mu.Unlock() - if cp.prefillParallelism != 0 { - log.Infof("Opening pool: '%s'", cp.name) - defer log.Infof("Done opening pool: '%s'", cp.name) - } - f := func(ctx context.Context) (pools.Resource, error) { return NewDBConn(ctx, cp, appParams) } @@ -170,6 +163,7 @@ func (cp *Pool) Close() { log.Infof("connpool - acquiring lock") cp.mu.Lock() log.Infof("connpool - acquired lock") + cp.connections.Close() cp.connections = nil cp.mu.Unlock() log.Infof("connpool - closing dbaPool") diff --git a/go/vt/vttablet/tabletserver/controller.go b/go/vt/vttablet/tabletserver/controller.go index d612ef1109d..ca4eeb8747b 100644 --- a/go/vt/vttablet/tabletserver/controller.go +++ b/go/vt/vttablet/tabletserver/controller.go @@ -18,6 +18,7 @@ package tabletserver import ( "context" + "time" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/mysqlctl" @@ -26,8 +27,7 @@ import ( "vitess.io/vitess/go/vt/vttablet/tabletserver/rules" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" - - "time" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -52,7 +52,7 @@ type Controller interface { // SetServingType transitions the query service to the required serving type. // Returns true if the state of QueryService or the tablet type changed. - SetServingType(tabletType topodatapb.TabletType, terTimestamp time.Time, serving bool, reason string) error + SetServingType(tabletType topodatapb.TabletType, ptsTimestamp time.Time, serving bool, reason string) error // EnterLameduck causes tabletserver to enter the lameduck state. EnterLameduck() @@ -72,7 +72,7 @@ type Controller interface { // RegisterQueryRuleSource adds a query rule source RegisterQueryRuleSource(ruleSource string) - // RegisterQueryRuleSource removes a query rule source + // UnRegisterQueryRuleSource removes a query rule source UnRegisterQueryRuleSource(ruleSource string) // SetQueryRules sets the query rules for this QueryService @@ -89,6 +89,9 @@ type Controller interface { // TopoServer returns the topo server. TopoServer() *topo.Server + + // CheckThrottler + CheckThrottler(ctx context.Context, appName string, flags *throttle.CheckFlags) *throttle.CheckResult } // Ensure TabletServer satisfies Controller interface. diff --git a/go/vt/vttablet/tabletserver/debugenv.go b/go/vt/vttablet/tabletserver/debugenv.go index d7127176d3f..e229c46cadd 100644 --- a/go/vt/vttablet/tabletserver/debugenv.go +++ b/go/vt/vttablet/tabletserver/debugenv.go @@ -116,8 +116,6 @@ func debugEnvHandler(tsv *TabletServer, w http.ResponseWriter, r *http.Request) setIntVal(tsv.SetStreamPoolSize) case "TxPoolSize": setIntVal(tsv.SetTxPoolSize) - case "QueryCacheCapacity": - setIntVal(tsv.SetQueryPlanCacheCap) case "MaxResultSize": setIntVal(tsv.SetMaxResultSize) case "WarnResultSize": diff --git a/go/vt/vttablet/tabletserver/exclude_race_test.go b/go/vt/vttablet/tabletserver/exclude_race_test.go new file mode 100644 index 00000000000..6e55671ac96 --- /dev/null +++ b/go/vt/vttablet/tabletserver/exclude_race_test.go @@ -0,0 +1,62 @@ +//go:build !race + +package tabletserver + +import ( + "context" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" +) + +// TestHandlePanicAndSendLogStatsMessageTruncation tests that when an error truncation +// length is set and a panic occurs, the code in handlePanicAndSendLogStats will +// truncate the error text in logs, but will not truncate the error text in the +// error value. +func TestHandlePanicAndSendLogStatsMessageTruncation(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tl := newTestLogger() + defer tl.Close() + logStats := tabletenv.NewLogStats(ctx, "TestHandlePanicAndSendLogStatsMessageTruncation") + db, tsv := setupTabletServerTest(t, ctx, "") + defer tsv.StopService() + defer db.Close() + + longSql := "select * from test_table_loooooooooooooooooooooooooooooooooooong" + longBv := map[string]*querypb.BindVariable{ + "bv1": sqltypes.Int64BindVariable(1111111111), + "bv2": sqltypes.Int64BindVariable(2222222222), + "bv3": sqltypes.Int64BindVariable(3333333333), + "bv4": sqltypes.Int64BindVariable(4444444444), + } + origTruncateErrLen := sqlparser.GetTruncateErrLen() + sqlparser.SetTruncateErrLen(32) + defer sqlparser.SetTruncateErrLen(origTruncateErrLen) + + defer func() { + err := logStats.Error + want := "Uncaught panic for Sql: \"select * from test_table_loooooooooooooooooooooooooooooooooooong\", BindVars: {bv1: \"type:INT64 value:\\\"1111111111\\\"\"bv2: \"type:INT64 value:\\\"2222222222\\\"\"bv3: \"type:INT64 value:\\\"3333333333\\\"\"bv4: \"type:INT64 value:\\\"4444444444\\\"\"}" + require.Error(t, err) + assert.Contains(t, err.Error(), want) + want = "Uncaught panic for Sql: \"select * from test_t [TRUNCATED]\", BindVars: {bv1: \"typ [TRUNCATED]" + gotWhatWeWant := false + for _, log := range tl.getLogs() { + if strings.HasPrefix(log, want) { + gotWhatWeWant = true + break + } + } + assert.True(t, gotWhatWeWant) + }() + + defer tsv.handlePanicAndSendLogStats(longSql, longBv, logStats) + panic("panic from TestHandlePanicAndSendLogStatsMessageTruncation") +} diff --git a/go/vt/vttablet/tabletserver/gc/tablegc.go b/go/vt/vttablet/tabletserver/gc/tablegc.go index 829d97039a1..cf3595c973c 100644 --- a/go/vt/vttablet/tabletserver/gc/tablegc.go +++ b/go/vt/vttablet/tabletserver/gc/tablegc.go @@ -20,7 +20,6 @@ import ( "context" "fmt" "math" - "math/rand" "sort" "sync" "sync/atomic" @@ -28,6 +27,8 @@ import ( "github.com/spf13/pflag" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/timer" "vitess.io/vitess/go/vt/dbconnpool" @@ -71,9 +72,15 @@ var ( sqlPurgeTable = `delete from %a limit 50` sqlShowVtTables = `show full tables like '\_vt\_%'` sqlDropTable = "drop table if exists `%a`" + sqlDropView = "drop view if exists `%a`" purgeReentranceFlag int64 ) +type gcTable struct { + tableName string + isBaseTable bool +} + // transitionRequest encapsulates a request to transition a table to next state type transitionRequest struct { fromTableName string @@ -82,10 +89,6 @@ type transitionRequest struct { uuid string } -func init() { - rand.Seed(time.Now().UnixNano()) -} - // TableGC is the main entity in the table garbage collection mechanism. // This service "garbage collects" tables: // - it checks for magically-named tables (e.g. _vt_EVAC_f6338b2af8af11eaa210f875a4d24e90_20200920063522) @@ -122,8 +125,7 @@ type Status struct { Keyspace string Shard string - isPrimary bool - IsOpen bool + IsOpen bool purgingTables []string } @@ -227,7 +229,7 @@ func (collector *TableGC) Close() { // operate is the main entry point for the table garbage collector operation and logic. func (collector *TableGC) operate(ctx context.Context) { - dropTablesChan := make(chan string) + dropTablesChan := make(chan *gcTable) purgeRequestsChan := make(chan bool) transitionRequestsChan := make(chan *transitionRequest) @@ -255,7 +257,11 @@ func (collector *TableGC) operate(ctx context.Context) { case <-tableCheckTicker.C: { log.Info("TableGC: tableCheckTicker") - _ = collector.checkTables(ctx, dropTablesChan, transitionRequestsChan) + if gcTables, err := collector.readTables(ctx); err != nil { + log.Errorf("TableGC: error while reading tables: %+v", err) + } else { + _ = collector.checkTables(ctx, gcTables, dropTablesChan, transitionRequestsChan) + } } case <-purgeReentranceTicker.C: { @@ -284,11 +290,11 @@ func (collector *TableGC) operate(ctx context.Context) { time.AfterFunc(time.Second, func() { purgeRequestsChan <- true }) }() } - case dropTableName := <-dropTablesChan: + case dropTable := <-dropTablesChan: { - log.Info("TableGC: dropTablesChan") - if err := collector.dropTable(ctx, dropTableName); err != nil { - log.Errorf("TableGC: error dropping table %s: %+v", dropTableName, err) + log.Infof("TableGC: found %v in dropTablesChan", dropTable.tableName) + if err := collector.dropTable(ctx, dropTable.tableName, dropTable.isBaseTable); err != nil { + log.Errorf("TableGC: error dropping table %s: %+v", dropTable.tableName, err) } } case transition := <-transitionRequestsChan: @@ -372,29 +378,39 @@ func (collector *TableGC) shouldTransitionTable(tableName string) (shouldTransit return true, state, uuid, nil } -// checkTables looks for potential GC tables in the MySQL server+schema. -// It lists _vt_% tables, then filters through those which are due-date. -// It then applies the necessary operation per table. -func (collector *TableGC) checkTables(ctx context.Context, dropTablesChan chan<- string, transitionRequestsChan chan<- *transitionRequest) error { +// readTables reads the list of _vt_% tables from the database +func (collector *TableGC) readTables(ctx context.Context) (gcTables []*gcTable, err error) { + log.Infof("TableGC: read tables") + conn, err := collector.pool.Get(ctx, nil) if err != nil { - return err + return nil, err } defer conn.Recycle() - log.Infof("TableGC: check tables") - res, err := conn.Exec(ctx, sqlShowVtTables, math.MaxInt32, true) if err != nil { - return err + return nil, err } for _, row := range res.Rows { tableName := row[0].ToString() tableType := row[1].ToString() isBaseTable := (tableType == "BASE TABLE") + gcTables = append(gcTables, &gcTable{tableName: tableName, isBaseTable: isBaseTable}) + } + return gcTables, nil +} - shouldTransition, state, uuid, err := collector.shouldTransitionTable(tableName) +// checkTables looks for potential GC tables in the MySQL server+schema. +// It lists _vt_% tables, then filters through those which are due-date. +// It then applies the necessary operation per table. +func (collector *TableGC) checkTables(ctx context.Context, gcTables []*gcTable, dropTablesChan chan<- *gcTable, transitionRequestsChan chan<- *transitionRequest) error { + log.Infof("TableGC: check tables") + + for i := range gcTables { + table := gcTables[i] // we capture as local variable as we will later use this in a goroutine + shouldTransition, state, uuid, err := collector.shouldTransitionTable(table.tableName) if err != nil { log.Errorf("TableGC: error while checking tables: %+v", err) @@ -405,30 +421,32 @@ func (collector *TableGC) checkTables(ctx context.Context, dropTablesChan chan<- continue } - log.Infof("TableGC: will operate on table %s", tableName) + log.Infof("TableGC: will operate on table %s", table.tableName) if state == schema.HoldTableGCState { // Hold period expired. Moving to next state - collector.submitTransitionRequest(ctx, transitionRequestsChan, state, tableName, isBaseTable, uuid) + collector.submitTransitionRequest(ctx, transitionRequestsChan, state, table.tableName, table.isBaseTable, uuid) } if state == schema.PurgeTableGCState { - if isBaseTable { + if table.isBaseTable { // This table needs to be purged. Make sure to enlist it (we may already have) - if !collector.addPurgingTable(tableName) { - collector.submitTransitionRequest(ctx, transitionRequestsChan, state, tableName, isBaseTable, uuid) + if !collector.addPurgingTable(table.tableName) { + collector.submitTransitionRequest(ctx, transitionRequestsChan, state, table.tableName, table.isBaseTable, uuid) } } else { // This is a view. We don't need to delete rows from views. Just transition into next phase - collector.submitTransitionRequest(ctx, transitionRequestsChan, state, tableName, isBaseTable, uuid) + collector.submitTransitionRequest(ctx, transitionRequestsChan, state, table.tableName, table.isBaseTable, uuid) } } if state == schema.EvacTableGCState { // This table was in EVAC state for the required period. It will transition into DROP state - collector.submitTransitionRequest(ctx, transitionRequestsChan, state, tableName, isBaseTable, uuid) + collector.submitTransitionRequest(ctx, transitionRequestsChan, state, table.tableName, table.isBaseTable, uuid) } if state == schema.DropTableGCState { // This table needs to be dropped immediately. - go func() { dropTablesChan <- tableName }() + go func() { + dropTablesChan <- table + }() } } @@ -474,8 +492,8 @@ func (collector *TableGC) purge(ctx context.Context) (tableName string, err erro if err == nil { return true, nil } - if merr, ok := err.(*mysql.SQLError); ok { - if merr.Num == mysql.ERSpecifiedAccessDenied { + if merr, ok := err.(*sqlerror.SQLError); ok { + if merr.Num == sqlerror.ERSpecifiedAccessDenied { // We do not have privileges to disable binary logging. That's fine, we're on best effort, // so we're going to silently ignore this error. return false, nil @@ -524,21 +542,25 @@ func (collector *TableGC) purge(ctx context.Context) (tableName string, err erro // dropTable runs an actual DROP TABLE statement, and marks the end of the line for the // tables' GC lifecycle. -func (collector *TableGC) dropTable(ctx context.Context, tableName string) error { - conn, err := collector.pool.Get(ctx, nil) +func (collector *TableGC) dropTable(ctx context.Context, tableName string, isBaseTable bool) error { + conn, err := dbconnpool.NewDBConnection(ctx, collector.env.Config().DB.DbaWithDB()) if err != nil { return err } - defer conn.Recycle() + defer conn.Close() - parsed := sqlparser.BuildParsedQuery(sqlDropTable, tableName) + sqlDrop := sqlDropTable + if !isBaseTable { + sqlDrop = sqlDropView + } + parsed := sqlparser.BuildParsedQuery(sqlDrop, tableName) log.Infof("TableGC: dropping table: %s", tableName) - _, err = conn.Exec(ctx, parsed.Query, 1, true) + _, err = conn.ExecuteFetch(parsed.Query, 1, false) if err != nil { return err } - log.Infof("TableGC: dropped table: %s", tableName) + log.Infof("TableGC: dropped table: %s, isBaseTable: %v", tableName, isBaseTable) return nil } diff --git a/go/vt/vttablet/tabletserver/gc/tablegc_test.go b/go/vt/vttablet/tabletserver/gc/tablegc_test.go index d6b383a2ce8..446f6e6ff85 100644 --- a/go/vt/vttablet/tabletserver/gc/tablegc_test.go +++ b/go/vt/vttablet/tabletserver/gc/tablegc_test.go @@ -17,11 +17,14 @@ limitations under the License. package gc import ( + "context" "testing" + "time" "vitess.io/vitess/go/vt/schema" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestNextTableToPurge(t *testing.T) { @@ -250,3 +253,87 @@ func TestShouldTransitionTable(t *testing.T) { }) } } + +func TestCheckTables(t *testing.T) { + collector := &TableGC{ + isOpen: 0, + purgingTables: map[string]bool{}, + } + var err error + collector.lifecycleStates, err = schema.ParseGCLifecycle("hold,purge,evac,drop") + require.NoError(t, err) + + gcTables := []*gcTable{ + { + tableName: "_vt_something_that_isnt_a_gc_table", + isBaseTable: true, + }, + { + tableName: "_vt_HOLD_11111111111111111111111111111111_20990920093324", // 2099 is in the far future + isBaseTable: true, + }, + { + tableName: "_vt_HOLD_22222222222222222222222222222222_20200920093324", + isBaseTable: true, + }, + { + tableName: "_vt_DROP_33333333333333333333333333333333_20200919083451", + isBaseTable: true, + }, + { + tableName: "_vt_DROP_44444444444444444444444444444444_20200919083451", + isBaseTable: false, + }, + } + // one gcTable above is irrelevant, does not have a GC table name + // one will not transition: its date is 2099 + expectResponses := len(gcTables) - 2 + expectDropTables := []*gcTable{ + { + tableName: "_vt_DROP_33333333333333333333333333333333_20200919083451", + isBaseTable: true, + }, + { + tableName: "_vt_DROP_44444444444444444444444444444444_20200919083451", + isBaseTable: false, + }, + } + expectTransitionRequests := []*transitionRequest{ + { + fromTableName: "_vt_HOLD_22222222222222222222222222222222_20200920093324", + isBaseTable: true, + toGCState: schema.PurgeTableGCState, + uuid: "22222222222222222222222222222222", + }, + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + dropTablesChan := make(chan *gcTable) + transitionRequestsChan := make(chan *transitionRequest) + + err = collector.checkTables(ctx, gcTables, dropTablesChan, transitionRequestsChan) + assert.NoError(t, err) + + var responses int + var foundDropTables []*gcTable + var foundTransitionRequests []*transitionRequest + for { + if responses == expectResponses { + break + } + select { + case <-ctx.Done(): + assert.FailNow(t, "timeout") + return + case gcTable := <-dropTablesChan: + responses++ + foundDropTables = append(foundDropTables, gcTable) + case request := <-transitionRequestsChan: + responses++ + foundTransitionRequests = append(foundTransitionRequests, request) + } + } + assert.ElementsMatch(t, expectDropTables, foundDropTables) + assert.ElementsMatch(t, expectTransitionRequests, foundTransitionRequests) +} diff --git a/go/vt/vttablet/tabletserver/health_streamer.go b/go/vt/vttablet/tabletserver/health_streamer.go index ad9acf495d8..9ee3c6ba726 100644 --- a/go/vt/vttablet/tabletserver/health_streamer.go +++ b/go/vt/vttablet/tabletserver/health_streamer.go @@ -27,27 +27,21 @@ import ( "github.com/spf13/pflag" - "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" - - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/sidecardb" - - "vitess.io/vitess/go/vt/sqlparser" - - "vitess.io/vitess/go/vt/dbconfigs" - - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" - - "google.golang.org/protobuf/proto" - + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/history" + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + vtschema "vitess.io/vitess/go/vt/schema" + "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" + "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" + "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" ) @@ -129,7 +123,7 @@ func newHealthStreamer(env tabletenv.Env, alias *topodatapb.TabletAlias, engine } func (hs *healthStreamer) InitDBConfig(target *querypb.Target, cp dbconfigs.Connector) { - hs.state.Target = proto.Clone(target).(*querypb.Target) + hs.state.Target = target.CloneVT() hs.dbConfig = cp } @@ -156,6 +150,10 @@ func (hs *healthStreamer) Close() { hs.cancel() hs.cancel = nil } + if hs.conns != nil { + hs.conns.Close() + hs.conns = nil + } } func (hs *healthStreamer) Stream(ctx context.Context, callback func(*querypb.StreamHealthResponse) error) error { @@ -197,7 +195,7 @@ func (hs *healthStreamer) register() (chan *querypb.StreamHealthResponse, contex hs.clients[ch] = struct{}{} // Send the current state immediately. - ch <- proto.Clone(hs.state).(*querypb.StreamHealthResponse) + ch <- hs.state.CloneVT() return ch, hs.ctx } @@ -208,15 +206,15 @@ func (hs *healthStreamer) unregister(ch chan *querypb.StreamHealthResponse) { delete(hs.clients, ch) } -func (hs *healthStreamer) ChangeState(tabletType topodatapb.TabletType, terTimestamp time.Time, lag time.Duration, err error, serving bool) { +func (hs *healthStreamer) ChangeState(tabletType topodatapb.TabletType, ptsTimestamp time.Time, lag time.Duration, err error, serving bool) { hs.mu.Lock() defer hs.mu.Unlock() hs.state.Target.TabletType = tabletType if tabletType == topodatapb.TabletType_PRIMARY { - hs.state.TabletExternallyReparentedTimestamp = terTimestamp.Unix() + hs.state.PrimaryTermStartTimestamp = ptsTimestamp.Unix() } else { - hs.state.TabletExternallyReparentedTimestamp = 0 + hs.state.PrimaryTermStartTimestamp = 0 } if err != nil { hs.state.RealtimeStats.HealthError = err.Error() @@ -228,9 +226,7 @@ func (hs *healthStreamer) ChangeState(tabletType topodatapb.TabletType, terTimes hs.state.RealtimeStats.FilteredReplicationLagSeconds, hs.state.RealtimeStats.BinlogPlayersCount = blpFunc() hs.state.RealtimeStats.Qps = hs.stats.QPSRates.TotalRate() - - shr := proto.Clone(hs.state).(*querypb.StreamHealthResponse) - + shr := hs.state.CloneVT() hs.broadCastToClients(shr) hs.history.Add(&historyRecord{ Time: time.Now(), @@ -297,7 +293,7 @@ func (hs *healthStreamer) AppendDetails(details []*kv) []*kv { func (hs *healthStreamer) SetUnhealthyThreshold(v time.Duration) { hs.unhealthyThreshold.Store(v.Nanoseconds()) - shr := proto.Clone(hs.state).(*querypb.StreamHealthResponse) + shr := hs.state.CloneVT() for ch := range hs.clients { select { case ch <- shr: @@ -361,6 +357,9 @@ func (hs *healthStreamer) reload(full map[string]*schema.Table, created, altered // Range over the tables that are created/altered and split them up based on their type. for _, table := range append(append(dropped, created...), altered...) { tableName := table.Name.String() + if vtschema.IsInternalOperationTableName(tableName) { + continue + } if table.Type == schema.View && hs.viewsEnabled { views = append(views, tableName) } else { @@ -383,7 +382,7 @@ func (hs *healthStreamer) reload(full map[string]*schema.Table, created, altered hs.state.RealtimeStats.TableSchemaChanged = tables hs.state.RealtimeStats.ViewSchemaChanged = views - shr := proto.Clone(hs.state).(*querypb.StreamHealthResponse) + shr := hs.state.CloneVT() hs.broadCastToClients(shr) hs.state.RealtimeStats.TableSchemaChanged = nil hs.state.RealtimeStats.ViewSchemaChanged = nil @@ -402,8 +401,8 @@ func (hs *healthStreamer) reloadTables(ctx context.Context, conn *connpool.DBCon } tableNamePredicate := fmt.Sprintf("table_name IN (%s)", strings.Join(escapedTableNames, ", ")) - del := fmt.Sprintf("%s AND %s", sqlparser.BuildParsedQuery(mysql.ClearSchemaCopy, sidecardb.GetIdentifier()).Query, tableNamePredicate) - upd := fmt.Sprintf("%s AND %s", sqlparser.BuildParsedQuery(mysql.InsertIntoSchemaCopy, sidecardb.GetIdentifier()).Query, tableNamePredicate) + del := fmt.Sprintf("%s AND %s", sqlparser.BuildParsedQuery(mysql.ClearSchemaCopy, sidecar.GetIdentifier()).Query, tableNamePredicate) + upd := fmt.Sprintf("%s AND %s", sqlparser.BuildParsedQuery(mysql.InsertIntoSchemaCopy, sidecar.GetIdentifier()).Query, tableNamePredicate) // Reload the schema in a transaction. _, err := conn.Exec(ctx, "begin", 1, false) diff --git a/go/vt/vttablet/tabletserver/health_streamer_test.go b/go/vt/vttablet/tabletserver/health_streamer_test.go index 7f19d640d7f..b2fbb2db1ea 100644 --- a/go/vt/vttablet/tabletserver/health_streamer_test.go +++ b/go/vt/vttablet/tabletserver/health_streamer_test.go @@ -29,12 +29,13 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" @@ -150,9 +151,9 @@ func TestHealthStreamerBroadcast(t *testing.T) { Target: &querypb.Target{ TabletType: topodatapb.TabletType_PRIMARY, }, - TabletAlias: alias, - Serving: true, - TabletExternallyReparentedTimestamp: now.Unix(), + TabletAlias: alias, + Serving: true, + PrimaryTermStartTimestamp: now.Unix(), RealtimeStats: &querypb.RealtimeStats{ FilteredReplicationLagSeconds: 1, BinlogPlayersCount: 2, @@ -209,6 +210,8 @@ func TestReloadSchema(t *testing.T) { for _, testcase := range testcases { t.Run(testcase.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() db := fakesqldb.New(t) defer db.Close() config := newConfig(db) @@ -227,8 +230,8 @@ func TestReloadSchema(t *testing.T) { target := &querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} configs := config.DB - db.AddQueryPattern(sqlparser.BuildParsedQuery(mysql.ClearSchemaCopy, sidecardb.GetIdentifier()).Query+".*", &sqltypes.Result{}) - db.AddQueryPattern(sqlparser.BuildParsedQuery(mysql.InsertIntoSchemaCopy, sidecardb.GetIdentifier()).Query+".*", &sqltypes.Result{}) + db.AddQueryPattern(sqlparser.BuildParsedQuery(mysql.ClearSchemaCopy, sidecar.GetIdentifier()).Query+".*", &sqltypes.Result{}) + db.AddQueryPattern(sqlparser.BuildParsedQuery(mysql.InsertIntoSchemaCopy, sidecar.GetIdentifier()).Query+".*", &sqltypes.Result{}) db.AddQueryPattern("SELECT UNIX_TIMESTAMP()"+".*", sqltypes.MakeTestResult( sqltypes.MakeTestFields( "UNIX_TIMESTAMP(now())", @@ -324,6 +327,8 @@ func TestReloadSchema(t *testing.T) { // TestReloadView tests that the health streamer tracks view changes correctly func TestReloadView(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() db := fakesqldb.New(t) defer db.Close() config := newConfig(db) @@ -339,8 +344,8 @@ func TestReloadView(t *testing.T) { target := &querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} configs := config.DB - db.AddQueryPattern(sqlparser.BuildParsedQuery(mysql.ClearSchemaCopy, sidecardb.GetIdentifier()).Query+".*", &sqltypes.Result{}) - db.AddQueryPattern(sqlparser.BuildParsedQuery(mysql.InsertIntoSchemaCopy, sidecardb.GetIdentifier()).Query+".*", &sqltypes.Result{}) + db.AddQueryPattern(sqlparser.BuildParsedQuery(mysql.ClearSchemaCopy, sidecar.GetIdentifier()).Query+".*", &sqltypes.Result{}) + db.AddQueryPattern(sqlparser.BuildParsedQuery(mysql.InsertIntoSchemaCopy, sidecar.GetIdentifier()).Query+".*", &sqltypes.Result{}) db.AddQueryPattern("SELECT UNIX_TIMESTAMP()"+".*", sqltypes.MakeTestResult( sqltypes.MakeTestFields( "UNIX_TIMESTAMP(now())", diff --git a/go/vt/vttablet/tabletserver/messager/message_manager.go b/go/vt/vttablet/tabletserver/messager/message_manager.go index 5c925150322..0629b31629f 100644 --- a/go/vt/vttablet/tabletserver/messager/message_manager.go +++ b/go/vt/vttablet/tabletserver/messager/message_manager.go @@ -27,7 +27,8 @@ import ( "golang.org/x/sync/semaphore" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/timer" @@ -35,7 +36,6 @@ import ( binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" @@ -223,7 +223,7 @@ type messageManager struct { // where a replica could have received and processed a GTID that the primary // may not have yet commited; but this is harmless because any events missed // will be picked up during the next poller run. - lastPollPosition *mysql.Position + lastPollPosition *replication.Position // wg is for ensuring all running goroutines have returned // before we can close the manager. You need to Add before @@ -703,7 +703,7 @@ func (mm *messageManager) runOneVStream(ctx context.Context) error { if curPos == "" { return true, nil } - cur, err := mysql.DecodePosition(curPos) + cur, err := replication.DecodePosition(curPos) if err != nil { return false, err } @@ -905,28 +905,28 @@ func (mm *messageManager) GeneratePurgeQuery(timeCutoff int64) (string, map[stri func BuildMessageRow(row []sqltypes.Value) (*MessageRow, error) { mr := &MessageRow{Row: row[4:]} if !row[0].IsNull() { - v, err := evalengine.ToInt64(row[0]) + v, err := row[0].ToCastInt64() if err != nil { return nil, err } mr.Priority = v } if !row[1].IsNull() { - v, err := evalengine.ToInt64(row[1]) + v, err := row[1].ToCastInt64() if err != nil { return nil, err } mr.TimeNext = v } if !row[2].IsNull() { - v, err := evalengine.ToInt64(row[2]) + v, err := row[2].ToCastInt64() if err != nil { return nil, err } mr.Epoch = v } if !row[3].IsNull() { - v, err := evalengine.ToInt64(row[3]) + v, err := row[3].ToCastInt64() if err != nil { return nil, err } @@ -948,7 +948,7 @@ func (mm *messageManager) readPending(ctx context.Context, bindVars map[string]* qr.Fields = response.Fields } if response.Gtid != "" { - pos, err := mysql.DecodePosition(response.Gtid) + pos, err := replication.DecodePosition(response.Gtid) if err != nil { return err } @@ -965,13 +965,7 @@ func (mm *messageManager) readPending(ctx context.Context, bindVars map[string]* return qr, err } -func (mm *messageManager) getReceiverCount() int { - mm.mu.Lock() - defer mm.mu.Unlock() - return len(mm.receivers) -} - -func (mm *messageManager) getLastPollPosition() *mysql.Position { +func (mm *messageManager) getLastPollPosition() *replication.Position { mm.cacheManagementMu.Lock() defer mm.cacheManagementMu.Unlock() return mm.lastPollPosition diff --git a/go/vt/vttablet/tabletserver/messager/message_manager_test.go b/go/vt/vttablet/tabletserver/messager/message_manager_test.go index 59286403885..b8ca47ae46d 100644 --- a/go/vt/vttablet/tabletserver/messager/message_manager_test.go +++ b/go/vt/vttablet/tabletserver/messager/message_manager_test.go @@ -34,7 +34,6 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" @@ -741,7 +740,7 @@ func TestMMGenerate(t *testing.T) { t.Errorf("GenerateAckQuery query: %s, want %s", query, wantQuery) } bvv, _ := sqltypes.BindVariableToValue(bv["time_acked"]) - gotAcked, _ := evalengine.ToInt64(bvv) + gotAcked, _ := bvv.ToCastInt64() wantAcked := time.Now().UnixNano() if wantAcked-gotAcked > 10e9 { t.Errorf("gotAcked: %d, should be with 10s of %d", gotAcked, wantAcked) diff --git a/go/vt/vttablet/tabletserver/planbuilder/permission.go b/go/vt/vttablet/tabletserver/planbuilder/permission.go index 463b8b8673c..5bc68bd186d 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/permission.go +++ b/go/vt/vttablet/tabletserver/planbuilder/permission.go @@ -62,9 +62,10 @@ func BuildPermissions(stmt sqlparser.Statement) []Permission { for _, t := range node.TableNames { permissions = buildTableNamePermissions(t, tableacl.ADMIN, permissions) } + case *sqlparser.Analyze: + permissions = buildTableNamePermissions(node.Table, tableacl.WRITER, permissions) case *sqlparser.OtherAdmin, *sqlparser.CallProc, *sqlparser.Begin, *sqlparser.Commit, *sqlparser.Rollback, - *sqlparser.Load, *sqlparser.Savepoint, *sqlparser.Release, *sqlparser.SRollback, *sqlparser.Set, *sqlparser.Show, - *sqlparser.OtherRead, sqlparser.Explain, *sqlparser.LoadDataStmt: + *sqlparser.Load, *sqlparser.Savepoint, *sqlparser.Release, *sqlparser.SRollback, *sqlparser.Set, *sqlparser.Show, sqlparser.Explain, *sqlparser.LoadDataStmt: // no op default: panic(fmt.Errorf("BUG: unexpected statement type: %T", node)) diff --git a/go/vt/vttablet/tabletserver/planbuilder/plan.go b/go/vt/vttablet/tabletserver/planbuilder/plan.go index f4372c91cef..90712278efe 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/plan.go +++ b/go/vt/vttablet/tabletserver/planbuilder/plan.go @@ -237,7 +237,7 @@ func Build(statement sqlparser.Statement, tables map[string]*schema.Table, dbNam plan, err = &Plan{PlanID: PlanShowThrottlerStatus, FullStmt: stmt}, nil case *sqlparser.Show: plan, err = analyzeShow(stmt, dbName) - case *sqlparser.OtherRead, sqlparser.Explain: + case *sqlparser.Analyze, sqlparser.Explain: plan, err = &Plan{PlanID: PlanOtherRead}, nil case *sqlparser.OtherAdmin: plan, err = &Plan{PlanID: PlanOtherAdmin}, nil @@ -284,8 +284,9 @@ func BuildStreaming(sql string, tables map[string]*schema.Table) (*Plan, error) plan.NeedsReservedConn = true } plan.Table, plan.AllTables = lookupTables(stmt.From, tables) - case *sqlparser.OtherRead, *sqlparser.Show, *sqlparser.Union, *sqlparser.CallProc, sqlparser.Explain: - // pass + case *sqlparser.Show, *sqlparser.Union, *sqlparser.CallProc, sqlparser.Explain: + case *sqlparser.Analyze: + plan.PlanID = PlanOtherRead default: return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "%s not allowed for streaming", sqlparser.ASTToStatementType(statement)) } @@ -294,7 +295,7 @@ func BuildStreaming(sql string, tables map[string]*schema.Table) (*Plan, error) } // BuildStreamLoadPlan BuildStreaming builds a streaming plan based on the schema. -func BuildStreamLoadPlan(sql string, tables map[string]*schema.Table) (*Plan, error) { +func BuildStreamLoadPlan(sql string) (*Plan, error) { statement, err := sqlparser.Parse(sql) if err != nil { return nil, err diff --git a/go/vt/vttablet/tabletserver/planbuilder/testdata/exec_cases.txt b/go/vt/vttablet/tabletserver/planbuilder/testdata/exec_cases.txt index 71d39ba65a6..42e822885c2 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/testdata/exec_cases.txt +++ b/go/vt/vttablet/tabletserver/planbuilder/testdata/exec_cases.txt @@ -764,7 +764,13 @@ options:PassthroughDMLs "analyze table a" { "PlanID": "OtherRead", - "TableName": "" + "TableName": "", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ] } # show diff --git a/go/vt/vttablet/tabletserver/planbuilder/testdata/stream_cases.txt b/go/vt/vttablet/tabletserver/planbuilder/testdata/stream_cases.txt index 9f53e803152..98d4f354072 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/testdata/stream_cases.txt +++ b/go/vt/vttablet/tabletserver/planbuilder/testdata/stream_cases.txt @@ -84,3 +84,17 @@ # set statement unsafe with pooling "set @udv = 10" "SET not allowed for streaming" + +# analyze statement +"analyze table a" +{ + "PlanID": "OtherRead", + "TableName": "", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], + "FullQuery": "analyze table a" +} diff --git a/go/vt/vttablet/tabletserver/query_engine.go b/go/vt/vttablet/tabletserver/query_engine.go index ff64736cc07..0f8c4de578a 100644 --- a/go/vt/vttablet/tabletserver/query_engine.go +++ b/go/vt/vttablet/tabletserver/query_engine.go @@ -20,19 +20,16 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "net/http" - "strings" "sync" "sync/atomic" "time" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/acl" - "vitess.io/vitess/go/cache" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/cache/theine" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/pools" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/streamlog" @@ -41,9 +38,13 @@ import ( "vitess.io/vitess/go/vt/dbconnpool" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/proto/topodata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/tableacl" tacl "vitess.io/vitess/go/vt/tableacl/acl" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vthash" "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" "vitess.io/vitess/go/vt/vttablet/tabletserver/planbuilder" "vitess.io/vitess/go/vt/vttablet/tabletserver/rules" @@ -122,6 +123,17 @@ func isValid(planType planbuilder.PlanType, hasReservedCon bool, hasSysSettings // _______________________________________________ +type PlanCacheKey = theine.StringKey +type PlanCache = theine.Store[PlanCacheKey, *TabletPlan] + +type SettingsCacheKey = theine.HashKey256 +type SettingsCache = theine.Store[SettingsCacheKey, *pools.Setting] + +type currentSchema struct { + tables map[string]*schema.Table + epoch uint32 +} + // QueryEngine implements the core functionality of tabletserver. // It assumes that no requests will be sent to it before Open is // called and succeeds. @@ -130,14 +142,17 @@ func isValid(planType planbuilder.PlanType, hasReservedCon bool, hasSysSettings // Close: There should be no more pending queries when this // function is called. type QueryEngine struct { - isOpen bool + isOpen atomic.Bool env tabletenv.Env se *schema.Engine // mu protects the following fields. - mu sync.RWMutex - tables map[string]*schema.Table - plans cache.Cache + schemaMu sync.Mutex + epoch uint32 + schema atomic.Pointer[currentSchema] + + plans *PlanCache + settings *SettingsCache queryRuleSources *rules.Map // Pools @@ -172,7 +187,7 @@ type QueryEngine struct { // stats // Note: queryErrorCountsWithCode is similar to queryErrorCounts except it contains error code as an additional dimension - queryCounts, queryTimes, queryErrorCounts, queryErrorCountsWithCode, queryRowsAffected, queryRowsReturned *stats.CountersWithMultiLabels + queryCounts, queryCountsWithTabletType, queryTimes, queryErrorCounts, queryErrorCountsWithCode, queryRowsAffected, queryRowsReturned *stats.CountersWithMultiLabels // stats flags enablePerWorkloadTableMetrics bool @@ -186,21 +201,29 @@ type QueryEngine struct { // You must call this only once. func NewQueryEngine(env tabletenv.Env, se *schema.Engine) *QueryEngine { config := env.Config() - cacheCfg := &cache.Config{ - MaxEntries: int64(config.QueryCacheSize), - MaxMemoryUsage: config.QueryCacheMemory, - LFU: config.QueryCacheLFU, - } qe := &QueryEngine{ env: env, se: se, - tables: make(map[string]*schema.Table), - plans: cache.NewDefaultCacheImpl(cacheCfg), queryRuleSources: rules.NewMap(), enablePerWorkloadTableMetrics: config.EnablePerWorkloadTableMetrics, } + // Cache for query plans: user configured size with a doorkeeper by default to prevent one-off queries + // from thrashing the cache. + qe.plans = theine.NewStore[PlanCacheKey, *TabletPlan](config.QueryCacheMemory, config.QueryCacheDoorkeeper) + + // cache for connection settings: default to 1/4th of the size for the query cache and do + // not use a doorkeeper because custom connection settings are rarely one-off and we always + // want to cache them + var settingsCacheMemory = config.QueryCacheMemory / 4 + qe.settings = theine.NewStore[SettingsCacheKey, *pools.Setting](settingsCacheMemory, false) + + qe.schema.Store(¤tSchema{ + tables: make(map[string]*schema.Table), + epoch: 0, + }) + qe.conns = connpool.NewPool(env, "ConnPool", config.OltpReadPool) qe.streamConns = connpool.NewPool(env, "StreamConnPool", config.OlapReadPool) qe.consolidatorMode.Store(config.Consolidator) @@ -248,9 +271,15 @@ func NewQueryEngine(env tabletenv.Env, se *schema.Engine) *QueryEngine { env.Exporter().NewGaugeFunc("QueryCacheLength", "Query engine query cache length", func() int64 { return int64(qe.plans.Len()) }) - env.Exporter().NewGaugeFunc("QueryCacheSize", "Query engine query cache size", qe.plans.UsedCapacity) - env.Exporter().NewGaugeFunc("QueryCacheCapacity", "Query engine query cache capacity", qe.plans.MaxCapacity) - env.Exporter().NewCounterFunc("QueryCacheEvictions", "Query engine query cache evictions", qe.plans.Evictions) + env.Exporter().NewGaugeFunc("QueryCacheSize", "Query engine query cache size", func() int64 { + return int64(qe.plans.UsedCapacity()) + }) + env.Exporter().NewGaugeFunc("QueryCacheCapacity", "Query engine query cache capacity", func() int64 { + return int64(qe.plans.MaxCapacity()) + }) + env.Exporter().NewCounterFunc("QueryCacheEvictions", "Query engine query cache evictions", func() int64 { + return qe.plans.Metrics.Evicted() + }) labels := []string{"Table", "Plan"} if config.EnablePerWorkloadTableMetrics { @@ -258,6 +287,7 @@ func NewQueryEngine(env tabletenv.Env, se *schema.Engine) *QueryEngine { } qe.queryCounts = env.Exporter().NewCountersWithMultiLabels("QueryCounts", "query counts", labels) + qe.queryCountsWithTabletType = env.Exporter().NewCountersWithMultiLabels("QueryCountsWithTabletType", "query counts with tablet type labels", []string{"Table", "Plan", "TabletType"}) qe.queryTimes = env.Exporter().NewCountersWithMultiLabels("QueryTimesNs", "query times in ns", labels) qe.queryRowsAffected = env.Exporter().NewCountersWithMultiLabels("QueryRowsAffected", "query rows affected", labels) qe.queryRowsReturned = env.Exporter().NewCountersWithMultiLabels("QueryRowsReturned", "query rows returned", labels) @@ -276,12 +306,14 @@ func NewQueryEngine(env tabletenv.Env, se *schema.Engine) *QueryEngine { // Open must be called before sending requests to QueryEngine. func (qe *QueryEngine) Open() error { - if qe.isOpen { + if qe.isOpen.Load() { return nil } log.Info("Query Engine: opening") - qe.conns.Open(qe.env.Config().DB.AppWithDB(), qe.env.Config().DB.DbaWithDB(), qe.env.Config().DB.AppDebugWithDB()) + config := qe.env.Config() + + qe.conns.Open(config.DB.AppWithDB(), config.DB.DbaWithDB(), config.DB.AppDebugWithDB()) conn, err := qe.conns.Get(tabletenv.LocalContext(), nil) if err != nil { @@ -298,9 +330,11 @@ func (qe *QueryEngine) Open() error { return err } - qe.streamConns.Open(qe.env.Config().DB.AppWithDB(), qe.env.Config().DB.DbaWithDB(), qe.env.Config().DB.AppDebugWithDB()) + qe.streamConns.Open(config.DB.AppWithDB(), config.DB.DbaWithDB(), config.DB.AppDebugWithDB()) qe.se.RegisterNotifier("qe", qe.schemaChanged, true) - qe.isOpen = true + qe.plans.EnsureOpen() + qe.settings.EnsureOpen() + qe.isOpen.Store(true) return nil } @@ -308,63 +342,69 @@ func (qe *QueryEngine) Open() error { // You must ensure that no more queries will be sent // before calling Close. func (qe *QueryEngine) Close() { - if !qe.isOpen { + if !qe.isOpen.Swap(false) { return } // Close in reverse order of Open. qe.se.UnregisterNotifier("qe") - qe.plans.Clear() - qe.tables = make(map[string]*schema.Table) + + qe.plans.Close() + qe.settings.Close() + qe.streamConns.Close() qe.conns.Close() - qe.isOpen = false log.Info("Query Engine: closed") } -// GetPlan returns the TabletPlan that for the query. Plans are cached in a cache.LRUCache. -func (qe *QueryEngine) GetPlan(ctx context.Context, logStats *tabletenv.LogStats, sql string, skipQueryPlanCache bool) (*TabletPlan, error) { - span, _ := trace.NewSpan(ctx, "QueryEngine.GetPlan") - defer span.Finish() - if !skipQueryPlanCache { - if plan := qe.getQuery(sql); plan != nil { - logStats.CachedPlan = true - return plan, nil - } - } - // Obtain read lock to prevent schema from changing while - // we build a plan. The read lock allows multiple identical - // queries to build the same plan. One of them will win by - // updating the query cache and prevent future races. Due to - // this, query stats reporting may not be accurate, but it's - // acceptable because those numbers are best effort. - qe.mu.RLock() - defer qe.mu.RUnlock() +var errNoCache = errors.New("plan should not be cached") + +func (qe *QueryEngine) getPlan(curSchema *currentSchema, sql string) (*TabletPlan, error) { statement, err := sqlparser.Parse(sql) if err != nil { return nil, err } - splan, err := planbuilder.Build(statement, qe.tables, qe.env.Config().DB.DBName, qe.env.Config().EnableViews) + splan, err := planbuilder.Build(statement, curSchema.tables, qe.env.Config().DB.DBName, qe.env.Config().EnableViews) if err != nil { return nil, err } plan := &TabletPlan{Plan: splan, Original: sql} plan.Rules = qe.queryRuleSources.FilterByPlan(sql, plan.PlanID, plan.TableNames()...) plan.buildAuthorized() - if plan.PlanID == planbuilder.PlanDDL || plan.PlanID == planbuilder.PlanSet { - return plan, nil - } - if !skipQueryPlanCache && !sqlparser.SkipQueryPlanCacheDirective(statement) { - qe.plans.Set(sql, plan) + if plan.PlanID == planbuilder.PlanDDL || plan.PlanID == planbuilder.PlanSet || sqlparser.SkipQueryPlanCacheDirective(statement) { + return plan, errNoCache } + return plan, nil } +// GetPlan returns the TabletPlan that for the query. Plans are cached in a cache.LRUCache. +func (qe *QueryEngine) GetPlan(ctx context.Context, logStats *tabletenv.LogStats, sql string, skipQueryPlanCache bool) (*TabletPlan, error) { + span, _ := trace.NewSpan(ctx, "QueryEngine.GetPlan") + defer span.Finish() + + var plan *TabletPlan + var err error + + curSchema := qe.schema.Load() + + if skipQueryPlanCache { + plan, err = qe.getPlan(curSchema, sql) + } else { + plan, logStats.CachedPlan, err = qe.plans.GetOrLoad(PlanCacheKey(sql), curSchema.epoch, func() (*TabletPlan, error) { + return qe.getPlan(curSchema, sql) + }) + } + + if errors.Is(err, errNoCache) { + err = nil + } + return plan, err +} + // GetStreamPlan is similar to GetPlan, but doesn't use the cache // and doesn't enforce a limit. It just returns the parsed query. func (qe *QueryEngine) GetStreamPlan(sql string) (*TabletPlan, error) { - qe.mu.RLock() - defer qe.mu.RUnlock() - splan, err := planbuilder.BuildStreaming(sql, qe.tables) + splan, err := planbuilder.BuildStreaming(sql, qe.schema.Load().tables) if err != nil { return nil, err } @@ -376,9 +416,7 @@ func (qe *QueryEngine) GetStreamPlan(sql string) (*TabletPlan, error) { // GetMessageStreamPlan builds a plan for Message streaming. func (qe *QueryEngine) GetMessageStreamPlan(name string) (*TabletPlan, error) { - qe.mu.RLock() - defer qe.mu.RUnlock() - splan, err := planbuilder.BuildMessageStreaming(name, qe.tables) + splan, err := planbuilder.BuildMessageStreaming(name, qe.schema.Load().tables) if err != nil { return nil, err } @@ -393,33 +431,44 @@ func (qe *QueryEngine) GetConnSetting(ctx context.Context, settings []string) (* span, _ := trace.NewSpan(ctx, "QueryEngine.GetConnSetting") defer span.Finish() - var keyBuilder strings.Builder + hasher := vthash.New256() for _, q := range settings { - keyBuilder.WriteString(q) - } - - // try to get the connSetting from the cache - cacheKey := keyBuilder.String() - if plan := qe.getConnSetting(cacheKey); plan != nil { - return plan, nil + _, _ = hasher.WriteString(q) } - // build the setting queries - query, resetQuery, err := planbuilder.BuildSettingQuery(settings) - if err != nil { - return nil, err - } - connSetting := pools.NewSetting(query, resetQuery) + var cacheKey SettingsCacheKey + hasher.Sum(cacheKey[:0]) - // store the connSetting in the cache - qe.plans.Set(cacheKey, connSetting) - - return connSetting, nil + connSetting, _, err := qe.settings.GetOrLoad(cacheKey, 0, func() (*pools.Setting, error) { + // build the setting queries + query, resetQuery, err := planbuilder.BuildSettingQuery(settings) + if err != nil { + return nil, err + } + return pools.NewSetting(query, resetQuery), nil + }) + return connSetting, err } // ClearQueryPlanCache should be called if query plan cache is potentially obsolete func (qe *QueryEngine) ClearQueryPlanCache() { - qe.plans.Clear() + qe.schemaMu.Lock() + defer qe.schemaMu.Unlock() + + qe.epoch++ + + current := qe.schema.Load() + qe.schema.Store(¤tSchema{ + tables: current.tables, + epoch: qe.epoch, + }) +} + +func (qe *QueryEngine) ForEachPlan(each func(plan *TabletPlan) bool) { + curSchema := qe.schema.Load() + qe.plans.Range(curSchema.epoch, func(_ PlanCacheKey, plan *TabletPlan) bool { + return each(plan) + }) } // IsMySQLReachable returns an error if it cannot connect to MySQL. @@ -427,7 +476,7 @@ func (qe *QueryEngine) ClearQueryPlanCache() { func (qe *QueryEngine) IsMySQLReachable() error { conn, err := dbconnpool.NewDBConnection(context.TODO(), qe.env.Config().DB.AppWithDB()) if err != nil { - if mysql.IsTooManyConnectionsErr(err) { + if sqlerror.IsTooManyConnectionsErr(err) { return nil } return err @@ -437,60 +486,35 @@ func (qe *QueryEngine) IsMySQLReachable() error { } func (qe *QueryEngine) schemaChanged(tables map[string]*schema.Table, created, altered, dropped []*schema.Table) { - qe.mu.Lock() - defer qe.mu.Unlock() - qe.tables = tables - if len(altered) != 0 || len(dropped) != 0 { - qe.plans.Clear() - } -} + qe.schemaMu.Lock() + defer qe.schemaMu.Unlock() -// getQuery fetches the plan and makes it the most recent. -func (qe *QueryEngine) getQuery(sql string) *TabletPlan { - cacheResult, ok := qe.plans.Get(sql) - if !ok { - return nil - } - plan, ok := cacheResult.(*TabletPlan) - if ok { - return plan - } - return nil -} - -func (qe *QueryEngine) getConnSetting(key string) *pools.Setting { - cacheResult, ok := qe.plans.Get(key) - if !ok { - return nil - } - plan, ok := cacheResult.(*pools.Setting) - if ok { - return plan + if len(altered) != 0 || len(dropped) != 0 { + qe.epoch++ } - return nil -} -// SetQueryPlanCacheCap sets the query plan cache capacity. -func (qe *QueryEngine) SetQueryPlanCacheCap(size int) { - if size <= 0 { - size = 1 - } - qe.plans.SetCapacity(int64(size)) + qe.schema.Store(¤tSchema{ + tables: tables, + epoch: qe.epoch, + }) } // QueryPlanCacheCap returns the capacity of the query cache. func (qe *QueryEngine) QueryPlanCacheCap() int { - return int(qe.plans.MaxCapacity()) + return qe.plans.MaxCapacity() } // QueryPlanCacheLen returns the length (size in entries) of the query cache -func (qe *QueryEngine) QueryPlanCacheLen() int { - qe.plans.Wait() - return qe.plans.Len() +func (qe *QueryEngine) QueryPlanCacheLen() (count int) { + qe.ForEachPlan(func(plan *TabletPlan) bool { + count++ + return true + }) + return } // AddStats adds the given stats for the planName.tableName -func (qe *QueryEngine) AddStats(planType planbuilder.PlanType, tableName, workload string, queryCount int64, duration, mysqlTime time.Duration, rowsAffected, rowsReturned, errorCount int64, errorCode string) { +func (qe *QueryEngine) AddStats(planType planbuilder.PlanType, tableName, workload string, tabletType topodata.TabletType, queryCount int64, duration, mysqlTime time.Duration, rowsAffected, rowsReturned, errorCount int64, errorCode string) { // table names can contain "." characters, replace them! keys := []string{tableName, planType.String()} // Only use the workload as a label if that's enabled in the configuration. @@ -500,6 +524,9 @@ func (qe *QueryEngine) AddStats(planType planbuilder.PlanType, tableName, worklo qe.queryCounts.Add(keys, queryCount) qe.queryTimes.Add(keys, int64(duration)) qe.queryErrorCounts.Add(keys, errorCount) + + qe.queryCountsWithTabletType.Add([]string{tableName, planType.String(), tabletType.String()}, queryCount) + // queryErrorCountsWithCode is similar to queryErrorCounts except we have an additional dimension // of error code. if errorCount > 0 { @@ -543,8 +570,7 @@ func (qe *QueryEngine) handleHTTPQueryPlans(response http.ResponseWriter, reques } response.Header().Set("Content-Type", "text/plain") - qe.plans.ForEach(func(value any) bool { - plan := value.(*TabletPlan) + qe.ForEachPlan(func(plan *TabletPlan) bool { response.Write([]byte(fmt.Sprintf("%#v\n", sqlparser.TruncateForUI(plan.Original)))) if b, err := json.MarshalIndent(plan.Plan, "", " "); err != nil { response.Write([]byte(err.Error())) @@ -563,9 +589,7 @@ func (qe *QueryEngine) handleHTTPQueryStats(response http.ResponseWriter, reques } response.Header().Set("Content-Type", "application/json; charset=utf-8") var qstats []perQueryStats - qe.plans.ForEach(func(value any) bool { - plan := value.(*TabletPlan) - + qe.ForEachPlan(func(plan *TabletPlan) bool { var pqstats perQueryStats pqstats.Query = unicoded(sqlparser.TruncateForUI(plan.Original)) pqstats.Table = plan.TableName().String() @@ -621,10 +645,6 @@ func (qe *QueryEngine) handleHTTPAclJSON(response http.ResponseWriter, request * // ServeHTTP lists the most recent, cached queries and their count. func (qe *QueryEngine) handleHTTPConsolidations(response http.ResponseWriter, request *http.Request) { - if err := acl.CheckAccessHTTP(request, acl.DEBUGGING); err != nil { - acl.SendError(response, err) - return - } if err := acl.CheckAccessHTTP(request, acl.DEBUGGING); err != nil { acl.SendError(response, err) return @@ -660,9 +680,9 @@ func unicoded(in string) (out string) { // GetStreamLoadDataPlan is similar to GetPlan, but doesn't use the cache // and doesn't enforce a limit. It just returns the parsed query. func (qe *QueryEngine) GetStreamLoadDataPlan(sql string) (*TabletPlan, error) { - qe.mu.RLock() - defer qe.mu.RUnlock() - splan, err := planbuilder.BuildStreamLoadPlan(sql, qe.tables) + qe.schemaMu.Lock() + defer qe.schemaMu.Unlock() + splan, err := planbuilder.BuildStreamLoadPlan(sql) if err != nil { return nil, err diff --git a/go/vt/vttablet/tabletserver/query_engine_test.go b/go/vt/vttablet/tabletserver/query_engine_test.go index 33849c3f5e6..73ac1ca5e37 100644 --- a/go/vt/vttablet/tabletserver/query_engine_test.go +++ b/go/vt/vttablet/tabletserver/query_engine_test.go @@ -18,7 +18,6 @@ package tabletserver import ( "context" - "expvar" "fmt" "math/rand" "net/http" @@ -32,6 +31,9 @@ import ( "testing" "time" + "vitess.io/vitess/go/cache/theine" + "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/mysql" @@ -39,7 +41,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/cache" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/streamlog" @@ -146,7 +147,7 @@ func TestGetMessageStreamPlan(t *testing.T) { } wantPlan := &planbuilder.Plan{ PlanID: planbuilder.PlanMessageStream, - Table: qe.tables["msg"], + Table: qe.schema.Load().tables["msg"], Permissions: []planbuilder.Permission{{ TableName: "msg", Role: tableacl.WRITER, @@ -162,12 +163,8 @@ func TestGetMessageStreamPlan(t *testing.T) { func assertPlanCacheSize(t *testing.T, qe *QueryEngine, expected int) { t.Helper() - var size int - qe.plans.Wait() - qe.plans.ForEach(func(_ any) bool { - size++ - return true - }) + time.Sleep(100 * time.Millisecond) + size := qe.plans.Len() require.Equal(t, expected, size, "expected query plan cache to contain %d entries, found %d", expected, size) } @@ -177,7 +174,6 @@ func TestQueryPlanCache(t *testing.T) { schematest.AddDefaultQueries(db) firstQuery := "select * from test_table_01" - secondQuery := "select * from test_table_02" db.AddQuery("select * from test_table_01 where 1 != 1", &sqltypes.Result{}) db.AddQuery("select * from test_table_02 where 1 != 1", &sqltypes.Result{}) @@ -188,23 +184,11 @@ func TestQueryPlanCache(t *testing.T) { ctx := context.Background() logStats := tabletenv.NewLogStats(ctx, "GetPlanStats") - if cache.DefaultConfig.LFU { - // this cache capacity is in bytes - qe.SetQueryPlanCacheCap(528) - } else { - // this cache capacity is in number of elements - qe.SetQueryPlanCacheCap(1) - } + firstPlan, err := qe.GetPlan(ctx, logStats, firstQuery, false) require.NoError(t, err) require.NotNil(t, firstPlan, "plan should not be nil") - secondPlan, err := qe.GetPlan(ctx, logStats, secondQuery, false) - fmt.Println(secondPlan.CachedSize(true)) - require.NoError(t, err) - require.NotNil(t, secondPlan, "plan should not be nil") - expvar.Do(func(kv expvar.KeyValue) { - _ = kv.Value.String() - }) + assertPlanCacheSize(t, qe, 1) qe.ClearQueryPlanCache() } @@ -225,7 +209,7 @@ func TestNoQueryPlanCache(t *testing.T) { ctx := context.Background() logStats := tabletenv.NewLogStats(ctx, "GetPlanStats") - qe.SetQueryPlanCacheCap(1024) + firstPlan, err := qe.GetPlan(ctx, logStats, firstQuery, true) if err != nil { t.Fatal(err) @@ -254,7 +238,7 @@ func TestNoQueryPlanCacheDirective(t *testing.T) { ctx := context.Background() logStats := tabletenv.NewLogStats(ctx, "GetPlanStats") - qe.SetQueryPlanCacheCap(1024) + firstPlan, err := qe.GetPlan(ctx, logStats, firstQuery, false) if err != nil { t.Fatal(err) @@ -303,6 +287,8 @@ func newTestQueryEngine(idleTimeout time.Duration, strict bool, dbcfgs *dbconfig env := tabletenv.NewEnv(config, "TabletServerTest") se := schema.NewEngine(env) qe := NewQueryEngine(env, se) + // the integration tests that check cache behavior do not expect a doorkeeper; disable it + qe.plans = theine.NewStore[PlanCacheKey, *TabletPlan](4*1024*1024, false) se.InitDBConfig(dbcfgs.DbaWithDB()) return qe } @@ -391,13 +377,12 @@ func BenchmarkPlanCacheThroughput(b *testing.B) { } } -func benchmarkPlanCache(b *testing.B, db *fakesqldb.DB, lfu bool, par int) { +func benchmarkPlanCache(b *testing.B, db *fakesqldb.DB, par int) { b.Helper() dbcfgs := newDBConfigs(db) config := tabletenv.NewDefaultConfig() config.DB = dbcfgs - config.QueryCacheLFU = lfu env := tabletenv.NewEnv(config, "TabletServerTest") se := schema.NewEngine(env) @@ -430,12 +415,8 @@ func BenchmarkPlanCacheContention(b *testing.B) { db.AddQueryPattern(".*", &sqltypes.Result{}) for par := 1; par <= 8; par *= 2 { - b.Run(fmt.Sprintf("ContentionLRU-%d", par), func(b *testing.B) { - benchmarkPlanCache(b, db, false, par) - }) - b.Run(fmt.Sprintf("ContentionLFU-%d", par), func(b *testing.B) { - benchmarkPlanCache(b, db, true, par) + benchmarkPlanCache(b, db, par) }) } } @@ -481,16 +462,9 @@ func TestPlanCachePollution(t *testing.T) { var wg sync.WaitGroup go func() { - cacheMode := "lru" - if config.QueryCacheLFU { - cacheMode = "lfu" - } + cacheMode := "lfu" - out, err := os.Create(path.Join(plotPath, - fmt.Sprintf("cache_plot_%d_%d_%s.dat", - config.QueryCacheSize, config.QueryCacheMemory, cacheMode, - )), - ) + out, err := os.Create(path.Join(plotPath, fmt.Sprintf("cache_plot_%d_%s.dat", config.QueryCacheMemory, cacheMode))) require.NoError(t, err) defer out.Close() @@ -577,6 +551,7 @@ func TestAddQueryStats(t *testing.T) { name string planType planbuilder.PlanType tableName string + tabletType topodata.TabletType queryCount int64 duration time.Duration mysqlTime time.Duration @@ -587,6 +562,7 @@ func TestAddQueryStats(t *testing.T) { enablePerWorkloadTableMetrics bool workload string expectedQueryCounts string + expectedQueryCountsWithTableType string expectedQueryTimes string expectedQueryRowsAffected string expectedQueryRowsReturned string @@ -597,6 +573,27 @@ func TestAddQueryStats(t *testing.T) { name: "select query", planType: planbuilder.PlanSelect, tableName: "A", + tabletType: topodata.TabletType_PRIMARY, + queryCount: 1, + duration: 10, + rowsAffected: 0, + rowsReturned: 15, + errorCount: 0, + errorCode: "OK", + enablePerWorkloadTableMetrics: false, + workload: "some-workload", + expectedQueryCounts: `{"A.Select": 1}`, + expectedQueryTimes: `{"A.Select": 10}`, + expectedQueryRowsAffected: `{}`, + expectedQueryRowsReturned: `{"A.Select": 15}`, + expectedQueryErrorCounts: `{"A.Select": 0}`, + expectedQueryErrorCountsWithCode: `{}`, + expectedQueryCountsWithTableType: `{"A.Select.PRIMARY": 1}`, + }, { + name: "select query against a replica", + planType: planbuilder.PlanSelect, + tableName: "A", + tabletType: topodata.TabletType_REPLICA, queryCount: 1, duration: 10, rowsAffected: 0, @@ -611,10 +608,12 @@ func TestAddQueryStats(t *testing.T) { expectedQueryRowsReturned: `{"A.Select": 15}`, expectedQueryErrorCounts: `{"A.Select": 0}`, expectedQueryErrorCountsWithCode: `{}`, + expectedQueryCountsWithTableType: `{"A.Select.REPLICA": 1}`, }, { name: "select into query", planType: planbuilder.PlanSelect, tableName: "A", + tabletType: topodata.TabletType_PRIMARY, queryCount: 1, duration: 10, rowsAffected: 15, @@ -629,10 +628,12 @@ func TestAddQueryStats(t *testing.T) { expectedQueryRowsReturned: `{"A.Select": 0}`, expectedQueryErrorCounts: `{"A.Select": 0}`, expectedQueryErrorCountsWithCode: `{}`, + expectedQueryCountsWithTableType: `{"A.Select.PRIMARY": 1}`, }, { name: "error", planType: planbuilder.PlanSelect, tableName: "A", + tabletType: topodata.TabletType_PRIMARY, queryCount: 1, duration: 10, rowsAffected: 0, @@ -647,10 +648,12 @@ func TestAddQueryStats(t *testing.T) { expectedQueryRowsReturned: `{"A.Select": 0}`, expectedQueryErrorCounts: `{"A.Select": 1}`, expectedQueryErrorCountsWithCode: `{"A.Select.RESOURCE_EXHAUSTED": 1}`, + expectedQueryCountsWithTableType: `{"A.Select.PRIMARY": 1}`, }, { name: "insert query", planType: planbuilder.PlanInsert, tableName: "A", + tabletType: topodata.TabletType_PRIMARY, queryCount: 1, duration: 10, rowsAffected: 15, @@ -665,10 +668,12 @@ func TestAddQueryStats(t *testing.T) { expectedQueryRowsReturned: `{}`, expectedQueryErrorCounts: `{"A.Insert": 0}`, expectedQueryErrorCountsWithCode: `{}`, + expectedQueryCountsWithTableType: `{"A.Insert.PRIMARY": 1}`, }, { name: "select query with per workload metrics", planType: planbuilder.PlanSelect, tableName: "A", + tabletType: topodata.TabletType_PRIMARY, queryCount: 1, duration: 10, rowsAffected: 0, @@ -683,10 +688,12 @@ func TestAddQueryStats(t *testing.T) { expectedQueryRowsReturned: `{"A.Select.some-workload": 15}`, expectedQueryErrorCounts: `{"A.Select.some-workload": 0}`, expectedQueryErrorCountsWithCode: `{}`, + expectedQueryCountsWithTableType: `{"A.Select.PRIMARY": 1}`, }, { name: "select into query with per workload metrics", planType: planbuilder.PlanSelect, tableName: "A", + tabletType: topodata.TabletType_PRIMARY, queryCount: 1, duration: 10, rowsAffected: 15, @@ -701,10 +708,12 @@ func TestAddQueryStats(t *testing.T) { expectedQueryRowsReturned: `{"A.Select.some-workload": 0}`, expectedQueryErrorCounts: `{"A.Select.some-workload": 0}`, expectedQueryErrorCountsWithCode: `{}`, + expectedQueryCountsWithTableType: `{"A.Select.PRIMARY": 1}`, }, { name: "error with per workload metrics", planType: planbuilder.PlanSelect, tableName: "A", + tabletType: topodata.TabletType_PRIMARY, queryCount: 1, duration: 10, rowsAffected: 0, @@ -719,10 +728,12 @@ func TestAddQueryStats(t *testing.T) { expectedQueryRowsReturned: `{"A.Select.some-workload": 0}`, expectedQueryErrorCounts: `{"A.Select.some-workload": 1}`, expectedQueryErrorCountsWithCode: `{"A.Select.RESOURCE_EXHAUSTED": 1}`, + expectedQueryCountsWithTableType: `{"A.Select.PRIMARY": 1}`, }, { name: "insert query with per workload metrics", planType: planbuilder.PlanInsert, tableName: "A", + tabletType: topodata.TabletType_PRIMARY, queryCount: 1, duration: 10, rowsAffected: 15, @@ -737,6 +748,7 @@ func TestAddQueryStats(t *testing.T) { expectedQueryRowsReturned: `{}`, expectedQueryErrorCounts: `{"A.Insert.some-workload": 0}`, expectedQueryErrorCountsWithCode: `{}`, + expectedQueryCountsWithTableType: `{"A.Insert.PRIMARY": 1}`, }, } @@ -749,8 +761,9 @@ func TestAddQueryStats(t *testing.T) { env := tabletenv.NewEnv(config, "TestAddQueryStats_"+testcase.name) se := schema.NewEngine(env) qe := NewQueryEngine(env, se) - qe.AddStats(testcase.planType, testcase.tableName, testcase.workload, testcase.queryCount, testcase.duration, testcase.mysqlTime, testcase.rowsAffected, testcase.rowsReturned, testcase.errorCount, testcase.errorCode) + qe.AddStats(testcase.planType, testcase.tableName, testcase.workload, testcase.tabletType, testcase.queryCount, testcase.duration, testcase.mysqlTime, testcase.rowsAffected, testcase.rowsReturned, testcase.errorCount, testcase.errorCode) assert.Equal(t, testcase.expectedQueryCounts, qe.queryCounts.String()) + assert.Equal(t, testcase.expectedQueryCountsWithTableType, qe.queryCountsWithTabletType.String()) assert.Equal(t, testcase.expectedQueryTimes, qe.queryTimes.String()) assert.Equal(t, testcase.expectedQueryRowsAffected, qe.queryRowsAffected.String()) assert.Equal(t, testcase.expectedQueryRowsReturned, qe.queryRowsReturned.String()) diff --git a/go/vt/vttablet/tabletserver/query_executor.go b/go/vt/vttablet/tabletserver/query_executor.go index 323e9cc9dbd..e2d9f1354a2 100644 --- a/go/vt/vttablet/tabletserver/query_executor.go +++ b/go/vt/vttablet/tabletserver/query_executor.go @@ -24,11 +24,11 @@ import ( "sync" "time" - "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/pools" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/trace" @@ -115,6 +115,15 @@ func (qre *QueryExecutor) shouldConsolidate() bool { } } +// addUagInfoToQuery add ip trace route info +func (qre *QueryExecutor) addUagInfoToQuery(sql string) string { + if !strings.Contains(sql, "uag::") && qre.options != nil && qre.options.UagInfo != "" { + sqlReset := fmt.Sprintf("%s%s", qre.options.UagInfo, sql) + return sqlReset + } + return sql +} + // Execute performs a non-streaming query execution. func (qre *QueryExecutor) Execute() (reply *sqltypes.Result, err error) { planName := qre.plan.PlanID.String() @@ -122,6 +131,7 @@ func (qre *QueryExecutor) Execute() (reply *sqltypes.Result, err error) { defer func(start time.Time) { duration := time.Since(start) qre.tsv.stats.QueryTimings.Add(planName, duration) + qre.tsv.stats.QueryTimingsByTabletType.Add(qre.tabletType.String(), duration) qre.recordUserQuery("Execute", int64(duration)) mysqlTime := qre.logStats.MysqlResponseTime @@ -133,13 +143,14 @@ func (qre *QueryExecutor) Execute() (reply *sqltypes.Result, err error) { var errCode string vtErrorCode := vterrors.Code(err) errCode = vtErrorCode.String() + if reply == nil { - qre.tsv.qe.AddStats(qre.plan.PlanID, tableName, qre.options.GetWorkloadName(), 1, duration, mysqlTime, 0, 0, 1, errCode) + qre.tsv.qe.AddStats(qre.plan.PlanID, tableName, qre.options.GetWorkloadName(), qre.tabletType, 1, duration, mysqlTime, 0, 0, 1, errCode) qre.plan.AddStats(1, duration, mysqlTime, 0, 0, 1) return } - qre.tsv.qe.AddStats(qre.plan.PlanID, tableName, qre.options.GetWorkloadName(), 1, duration, mysqlTime, int64(reply.RowsAffected), int64(len(reply.Rows)), 0, errCode) + qre.tsv.qe.AddStats(qre.plan.PlanID, tableName, qre.options.GetWorkloadName(), qre.tabletType, 1, duration, mysqlTime, int64(reply.RowsAffected), int64(len(reply.Rows)), 0, errCode) qre.plan.AddStats(1, duration, mysqlTime, reply.RowsAffected, uint64(len(reply.Rows)), 0) qre.logStats.RowsAffected = int(reply.RowsAffected) qre.logStats.Rows = reply.Rows @@ -219,11 +230,11 @@ func (qre *QueryExecutor) execAutocommit(f func(conn *StatefulConnection) (*sqlt if qre.options == nil { qre.options = &querypb.ExecuteOptions{} } else { - qre.options = proto.Clone(qre.options).(*querypb.ExecuteOptions) + qre.options = qre.options.CloneVT() } qre.options.TransactionIsolation = querypb.ExecuteOptions_AUTOCOMMIT - if qre.tsv.txThrottler.Throttle(qre.tsv.getPriorityFromOptions(qre.options)) { + if qre.tsv.txThrottler.Throttle(qre.tsv.getPriorityFromOptions(qre.options), qre.options.GetWorkloadName()) { return nil, errTxThrottled } @@ -238,7 +249,7 @@ func (qre *QueryExecutor) execAutocommit(f func(conn *StatefulConnection) (*sqlt } func (qre *QueryExecutor) execAsTransaction(f func(conn *StatefulConnection) (*sqltypes.Result, error)) (*sqltypes.Result, error) { - if qre.tsv.txThrottler.Throttle(qre.tsv.getPriorityFromOptions(qre.options)) { + if qre.tsv.txThrottler.Throttle(qre.tsv.getPriorityFromOptions(qre.options), qre.options.GetWorkloadName()) { return nil, errTxThrottled } conn, beginSQL, _, err := qre.tsv.te.txPool.Begin(qre.ctx, qre.options, false, 0, nil, qre.setting) @@ -311,6 +322,7 @@ func (qre *QueryExecutor) Stream(callback StreamCallback) error { defer func(start time.Time) { qre.tsv.stats.QueryTimings.Record(qre.plan.PlanID.String(), start) + qre.tsv.stats.QueryTimingsByTabletType.Record(qre.tabletType.String(), start) qre.recordUserQuery("Stream", int64(time.Since(start))) }(time.Now()) @@ -400,6 +412,7 @@ func (qre *QueryExecutor) MessageStream(callback StreamCallback) error { defer func(start time.Time) { qre.tsv.stats.QueryTimings.Record(qre.plan.PlanID.String(), start) + qre.tsv.stats.QueryTimingsByTabletType.Record(qre.tabletType.String(), start) qre.recordUserQuery("MessageStream", int64(time.Since(start))) }(time.Now()) @@ -554,7 +567,7 @@ func (qre *QueryExecutor) execDDL(conn *StatefulConnection) (*sqltypes.Result, e // Instead of synchronously recalculating table size stats // after every DDL, let them be outdated until the periodic // schema reload fixes it. - if err := qre.tsv.se.ReloadAtEx(qre.ctx, mysql.Position{}, false); err != nil { + if err := qre.tsv.se.ReloadAtEx(qre.ctx, replication.Position{}, false); err != nil { log.Errorf("failed to reload schema %v", err) } }() @@ -632,7 +645,7 @@ func (qre *QueryExecutor) execNextval() (*sqltypes.Result, error) { if len(qr.Rows) != 1 { return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unexpected rows from reading sequence %s (possible mis-route): %d", tableName, len(qr.Rows)) } - nextID, err := evalengine.ToInt64(qr.Rows[0][0]) + nextID, err := qr.Rows[0][0].ToCastInt64() if err != nil { return nil, vterrors.Wrapf(err, "error loading sequence %s", tableName) } @@ -647,7 +660,7 @@ func (qre *QueryExecutor) execNextval() (*sqltypes.Result, error) { t.SequenceInfo.NextVal = nextID t.SequenceInfo.LastVal = nextID } - cache, err := evalengine.ToInt64(qr.Rows[0][1]) + cache, err := qr.Rows[0][1].ToCastInt64() if err != nil { return nil, vterrors.Wrapf(err, "error loading sequence %s", tableName) } @@ -849,11 +862,11 @@ func (qre *QueryExecutor) generateFinalSQL(parsedQuery *sqlparser.ParsedQuery, b } func rewriteOUTParamError(err error) error { - sqlErr, ok := err.(*mysql.SQLError) + sqlErr, ok := err.(*sqlerror.SQLError) if !ok { return err } - if sqlErr.Num == mysql.ErSPNotVarArg { + if sqlErr.Num == sqlerror.ErSPNotVarArg { return vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "OUT and INOUT parameters are not supported") } return err @@ -1174,28 +1187,14 @@ func (qre *QueryExecutor) executeGetSchemaQuery(query string, callback func(sche }) } -// addUagInfoToQuery add ip trace route info -func (qre *QueryExecutor) addUagInfoToQuery(sql string) string { - if !strings.Contains(sql, "uag::") && qre.options != nil && qre.options.UagInfo != "" { - sqlReset := fmt.Sprintf("%s%s", qre.options.UagInfo, sql) - return sqlReset - } - return sql -} - // execSteamLoadData stream load data func (qre *QueryExecutor) execSteamLoadData(ctx context.Context, lines chan string) (*sqltypes.Result, error) { - //conn, err := qre.getConn() conn, err := qre.tsv.te.txPool.GetAndLock(qre.connID, "for steam load data") if err != nil { return nil, err } - /* for line := range lines{ - log.Infof("%v",line) - }*/ defer conn.Unlock() return conn.StreamLoadData(ctx, lines, qre.query) - //return qre.execSQL(conn, qre.query, false) } diff --git a/go/vt/vttablet/tabletserver/query_executor_test.go b/go/vt/vttablet/tabletserver/query_executor_test.go index 3ab653bf50c..2385dbc3a31 100644 --- a/go/vt/vttablet/tabletserver/query_executor_test.go +++ b/go/vt/vttablet/tabletserver/query_executor_test.go @@ -23,6 +23,7 @@ import ( "math/rand" "strings" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -319,7 +320,7 @@ func TestQueryExecutorPlans(t *testing.T) { assert.True(t, vterrors.Equals(err, tcase.errorWant)) } // Wait for the existing query to be processed by the cache - tsv.QueryPlanCacheWait() + time.Sleep(100 * time.Millisecond) // Test inside a transaction. target := tsv.sm.Target() @@ -412,7 +413,7 @@ func TestQueryExecutorQueryAnnotation(t *testing.T) { assert.Equal(t, tcase.logWant, qre.logStats.RewrittenSQL(), tcase.input) // Wait for the existing query to be processed by the cache - tsv.QueryPlanCacheWait() + time.Sleep(100 * time.Millisecond) // Test inside a transaction. target := tsv.sm.Target() @@ -788,6 +789,8 @@ func TestQueryExecutorPlanNextval(t *testing.T) { } func TestQueryExecutorMessageStreamACL(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() aclName := fmt.Sprintf("simpleacl-test-%d", rand.Int63()) tableacl.Register(aclName, &simpleacl.Factory{}) tableacl.SetDefaultACL(aclName) @@ -817,7 +820,7 @@ func TestQueryExecutorMessageStreamACL(t *testing.T) { callerID := &querypb.VTGateCallerID{ Username: "u1", } - ctx := callerid.NewContext(context.Background(), nil, callerID) + ctx = callerid.NewContext(ctx, nil, callerID) qre := &QueryExecutor{ ctx: ctx, query: "stream from msg", @@ -1484,7 +1487,7 @@ func newTestTabletServer(ctx context.Context, flags executorFlags, db *fakesqldb } dbconfigs := newDBConfigs(db) config.DB = dbconfigs - tsv := NewTabletServer("TabletServerTest", config, memorytopo.NewServer(""), &topodatapb.TabletAlias{}) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) target := &querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} err := tsv.StartService(target, dbconfigs, nil /* mysqld */) if config.TwoPCEnable { @@ -1806,6 +1809,6 @@ func (m mockTxThrottler) Open() (err error) { func (m mockTxThrottler) Close() { } -func (m mockTxThrottler) Throttle(priority int) (result bool) { +func (m mockTxThrottler) Throttle(priority int, workload string) (result bool) { return m.throttle } diff --git a/go/vt/vttablet/tabletserver/queryz.go b/go/vt/vttablet/tabletserver/queryz.go index 7d976d6b6b7..151f028ca09 100644 --- a/go/vt/vttablet/tabletserver/queryz.go +++ b/go/vt/vttablet/tabletserver/queryz.go @@ -152,8 +152,7 @@ func queryzHandler(qe *QueryEngine, w http.ResponseWriter, r *http.Request) { return row1.timePQ() > row2.timePQ() }, } - qe.plans.ForEach(func(value any) bool { - plan := value.(*TabletPlan) + qe.ForEachPlan(func(plan *TabletPlan) bool { if plan == nil { return true } diff --git a/go/vt/vttablet/tabletserver/queryz_test.go b/go/vt/vttablet/tabletserver/queryz_test.go index a0bea742e04..8e1b7b38cfd 100644 --- a/go/vt/vttablet/tabletserver/queryz_test.go +++ b/go/vt/vttablet/tabletserver/queryz_test.go @@ -46,7 +46,7 @@ func TestQueryzHandler(t *testing.T) { }, } plan1.AddStats(10, 2*time.Second, 1*time.Second, 0, 2, 0) - qe.plans.Set(query1, plan1) + qe.plans.Set(query1, plan1, 0, 0) const query2 = "insert into test_table values 1" plan2 := &TabletPlan{ @@ -57,7 +57,7 @@ func TestQueryzHandler(t *testing.T) { }, } plan2.AddStats(1, 2*time.Millisecond, 1*time.Millisecond, 1, 0, 0) - qe.plans.Set(query2, plan2) + qe.plans.Set(query2, plan2, 0, 0) const query3 = "show tables" plan3 := &TabletPlan{ @@ -68,8 +68,8 @@ func TestQueryzHandler(t *testing.T) { }, } plan3.AddStats(1, 75*time.Millisecond, 50*time.Millisecond, 0, 1, 0) - qe.plans.Set(query3, plan3) - qe.plans.Set("", (*TabletPlan)(nil)) + qe.plans.Set(query3, plan3, 0, 0) + qe.plans.Set("", (*TabletPlan)(nil), 0, 0) hugeInsert := "insert into test_table values 0" for i := 1; i < 1000; i++ { @@ -83,11 +83,11 @@ func TestQueryzHandler(t *testing.T) { }, } plan4.AddStats(1, 1*time.Millisecond, 1*time.Millisecond, 1, 0, 0) - qe.plans.Set(hugeInsert, plan4) - qe.plans.Set("", (*TabletPlan)(nil)) + qe.plans.Set(PlanCacheKey(hugeInsert), plan4, 0, 0) + qe.plans.Set("", (*TabletPlan)(nil), 0, 0) // Wait for cache to settle - qe.plans.Wait() + time.Sleep(100 * time.Millisecond) queryzHandler(qe, resp, req) body, _ := io.ReadAll(resp.Body) diff --git a/go/vt/vttablet/tabletserver/repltracker/reader.go b/go/vt/vttablet/tabletserver/repltracker/reader.go index 5bbe62eb1bf..fc42a367989 100644 --- a/go/vt/vttablet/tabletserver/repltracker/reader.go +++ b/go/vt/vttablet/tabletserver/repltracker/reader.go @@ -17,17 +17,14 @@ limitations under the License. package repltracker import ( + "context" "fmt" "sync" "time" - "vitess.io/vitess/go/vt/sidecardb" - "vitess.io/vitess/go/vt/vtgate/evalengine" - + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/vt/vterrors" - "context" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/timer" "vitess.io/vitess/go/vt/log" @@ -193,7 +190,7 @@ func (r *heartbeatReader) bindHeartbeatFetch() (string, error) { bindVars := map[string]*querypb.BindVariable{ "ks": sqltypes.StringBindVariable(r.keyspaceShard), } - parsed := sqlparser.BuildParsedQuery(sqlFetchMostRecentHeartbeat, sidecardb.GetIdentifier(), ":ks") + parsed := sqlparser.BuildParsedQuery(sqlFetchMostRecentHeartbeat, sidecar.GetIdentifier(), ":ks") bound, err := parsed.GenerateQuery(bindVars, nil) if err != nil { return "", err @@ -206,7 +203,7 @@ func parseHeartbeatResult(res *sqltypes.Result) (int64, error) { if len(res.Rows) != 1 { return 0, fmt.Errorf("failed to read heartbeat: writer query did not result in 1 row. Got %v", len(res.Rows)) } - ts, err := evalengine.ToInt64(res.Rows[0][0]) + ts, err := res.Rows[0][0].ToCastInt64() if err != nil { return 0, err } diff --git a/go/vt/vttablet/tabletserver/repltracker/repltracker.go b/go/vt/vttablet/tabletserver/repltracker/repltracker.go index 3d6359ed902..5ab44eb774e 100644 --- a/go/vt/vttablet/tabletserver/repltracker/repltracker.go +++ b/go/vt/vttablet/tabletserver/repltracker/repltracker.go @@ -66,7 +66,7 @@ type ReplTracker struct { func NewReplTracker(env tabletenv.Env, alias *topodatapb.TabletAlias) *ReplTracker { return &ReplTracker{ mode: env.Config().ReplicationTracker.Mode, - forceHeartbeat: env.Config().EnableLagThrottler || env.Config().ReplicationTracker.HeartbeatOnDemandSeconds.Get() > 0, + forceHeartbeat: env.Config().ReplicationTracker.HeartbeatOnDemandSeconds.Get() > 0, hw: newHeartbeatWriter(env, alias), hr: newHeartbeatReader(env), poller: &poller{}, diff --git a/go/vt/vttablet/tabletserver/repltracker/writer.go b/go/vt/vttablet/tabletserver/repltracker/writer.go index 310ee80021a..2b7dcd1ff2e 100644 --- a/go/vt/vttablet/tabletserver/repltracker/writer.go +++ b/go/vt/vttablet/tabletserver/repltracker/writer.go @@ -17,14 +17,13 @@ limitations under the License. package repltracker import ( + "context" "fmt" "sync" "sync/atomic" "time" - "google.golang.org/protobuf/proto" - - "context" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/timer" @@ -32,7 +31,6 @@ import ( "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl" - "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" @@ -74,14 +72,14 @@ func newHeartbeatWriter(env tabletenv.Env, alias *topodatapb.TabletAlias) *heart config := env.Config() // config.EnableLagThrottler is a feature flag for the throttler; if throttler runs, then heartbeat must also run - if config.ReplicationTracker.Mode != tabletenv.Heartbeat && !config.EnableLagThrottler && config.ReplicationTracker.HeartbeatOnDemandSeconds.Get() == 0 { + if config.ReplicationTracker.Mode != tabletenv.Heartbeat && config.ReplicationTracker.HeartbeatOnDemandSeconds.Get() == 0 { return &heartbeatWriter{} } heartbeatInterval := config.ReplicationTracker.HeartbeatIntervalSeconds.Get() w := &heartbeatWriter{ env: env, enabled: true, - tabletAlias: proto.Clone(alias).(*topodatapb.TabletAlias), + tabletAlias: alias.CloneVT(), now: time.Now, interval: heartbeatInterval, onDemandDuration: config.ReplicationTracker.HeartbeatOnDemandSeconds.Get(), @@ -173,7 +171,7 @@ func (w *heartbeatWriter) bindHeartbeatVars(query string) (string, error) { "ts": sqltypes.Int64BindVariable(w.now().UnixNano()), "uid": sqltypes.Int64BindVariable(int64(w.tabletAlias.Uid)), } - parsed := sqlparser.BuildParsedQuery(query, sidecardb.GetIdentifier(), ":ts", ":uid", ":ks") + parsed := sqlparser.BuildParsedQuery(query, sidecar.GetIdentifier(), ":ts", ":uid", ":ks") bound, err := parsed.GenerateQuery(bindVars, nil) if err != nil { return "", err diff --git a/go/vt/vttablet/tabletserver/rules/rules.go b/go/vt/vttablet/tabletserver/rules/rules.go index 14ac632d778..efbfcdf87e4 100644 --- a/go/vt/vttablet/tabletserver/rules/rules.go +++ b/go/vt/vttablet/tabletserver/rules/rules.go @@ -26,8 +26,6 @@ import ( "strconv" "time" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" @@ -832,7 +830,7 @@ func getuint64(val *querypb.BindVariable) (uv uint64, status int) { if err != nil { return 0, QROutOfRange } - v, err := evalengine.ToUint64(bv) + v, err := bv.ToCastUint64() if err != nil { return 0, QROutOfRange } @@ -845,7 +843,7 @@ func getint64(val *querypb.BindVariable) (iv int64, status int) { if err != nil { return 0, QROutOfRange } - v, err := evalengine.ToInt64(bv) + v, err := bv.ToCastInt64() if err != nil { return 0, QROutOfRange } diff --git a/go/vt/vttablet/tabletserver/schema/db.go b/go/vt/vttablet/tabletserver/schema/db.go index c8a33c17be0..85ebf3b1457 100644 --- a/go/vt/vttablet/tabletserver/schema/db.go +++ b/go/vt/vttablet/tabletserver/schema/db.go @@ -19,9 +19,9 @@ package schema import ( "context" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" - "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" ) @@ -164,7 +164,7 @@ func reloadTablesDataInDB(ctx context.Context, conn *connpool.DBConn, tables []* // generateFullQuery generates the full query from the query as a string. func generateFullQuery(query string) (*sqlparser.ParsedQuery, error) { stmt, err := sqlparser.Parse( - sqlparser.BuildParsedQuery(query, sidecardb.GetIdentifier(), sidecardb.GetIdentifier()).Query) + sqlparser.BuildParsedQuery(query, sidecar.GetIdentifier(), sidecar.GetIdentifier()).Query) if err != nil { return nil, err } @@ -304,7 +304,7 @@ func getChangedViewNames(ctx context.Context, conn *connpool.DBConn, isServingPr alloc := func() *sqltypes.Result { return &sqltypes.Result{} } bufferSize := 1000 - viewChangeQuery := sqlparser.BuildParsedQuery(detectViewChange, sidecardb.GetIdentifier()).Query + viewChangeQuery := sqlparser.BuildParsedQuery(detectViewChange, sidecar.GetIdentifier()).Query err := conn.Stream(ctx, viewChangeQuery, callback, alloc, bufferSize, 0) if err != nil { return nil, err @@ -337,7 +337,7 @@ func (se *Engine) getMismatchedTableNames(ctx context.Context, conn *connpool.DB } alloc := func() *sqltypes.Result { return &sqltypes.Result{} } bufferSize := 1000 - readTableCreateTimesQuery := sqlparser.BuildParsedQuery(readTableCreateTimes, sidecardb.GetIdentifier()).Query + readTableCreateTimesQuery := sqlparser.BuildParsedQuery(readTableCreateTimes, sidecar.GetIdentifier()).Query err := conn.Stream(ctx, readTableCreateTimesQuery, callback, alloc, bufferSize, 0) if err != nil { return nil, err diff --git a/go/vt/vttablet/tabletserver/schema/db_test.go b/go/vt/vttablet/tabletserver/schema/db_test.go index a066b50a8c9..44a3fd0c687 100644 --- a/go/vt/vttablet/tabletserver/schema/db_test.go +++ b/go/vt/vttablet/tabletserver/schema/db_test.go @@ -23,12 +23,13 @@ import ( "testing" "github.com/stretchr/testify/require" - "golang.org/x/exp/maps" + + "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/maps2" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" - "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" ) @@ -134,7 +135,7 @@ func TestGetChangedViewNames(t *testing.T) { require.NoError(t, err) // Success - query := fmt.Sprintf(detectViewChange, sidecardb.GetIdentifier()) + query := fmt.Sprintf(detectViewChange, sidecar.GetIdentifier()) db.AddQuery(query, sqltypes.MakeTestResult( sqltypes.MakeTestFields("table_name", "varchar"), "lead", @@ -144,7 +145,7 @@ func TestGetChangedViewNames(t *testing.T) { got, err := getChangedViewNames(context.Background(), conn, true) require.NoError(t, err) require.Len(t, got, 3) - require.ElementsMatch(t, maps.Keys(got), []string{"v1", "v2", "lead"}) + require.ElementsMatch(t, maps2.Keys(got), []string{"v1", "v2", "lead"}) require.NoError(t, db.LastError()) // Not serving primary @@ -180,7 +181,7 @@ func TestGetViewDefinition(t *testing.T) { got, err := collectGetViewDefinitions(conn, bv) require.NoError(t, err) require.Len(t, got, 2) - require.ElementsMatch(t, maps.Keys(got), []string{"v1", "lead"}) + require.ElementsMatch(t, maps2.Keys(got), []string{"v1", "lead"}) require.Equal(t, "create_view_v1", got["v1"]) require.Equal(t, "create_view_lead", got["lead"]) require.NoError(t, db.LastError()) @@ -331,7 +332,7 @@ func TestGetMismatchedTableNames(t *testing.T) { }, } - query := fmt.Sprintf(readTableCreateTimes, sidecardb.GetIdentifier()) + query := fmt.Sprintf(readTableCreateTimes, sidecar.GetIdentifier()) for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { db := fakesqldb.New(t) @@ -350,7 +351,7 @@ func TestGetMismatchedTableNames(t *testing.T) { if tc.expectedError != "" { require.ErrorContains(t, err, tc.expectedError) } else { - require.ElementsMatch(t, maps.Keys(mismatchedTableNames), tc.expectedTableNames) + require.ElementsMatch(t, maps2.Keys(mismatchedTableNames), tc.expectedTableNames) require.NoError(t, db.LastError()) } }) diff --git a/go/vt/vttablet/tabletserver/schema/engine.go b/go/vt/vttablet/tabletserver/schema/engine.go index 25e465602e5..4d7bd96bce5 100644 --- a/go/vt/vttablet/tabletserver/schema/engine.go +++ b/go/vt/vttablet/tabletserver/schema/engine.go @@ -26,8 +26,10 @@ import ( "sync" "time" - "golang.org/x/exp/maps" - + "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/maps2" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/mysqlctl/tmutils" @@ -44,7 +46,6 @@ import ( "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" @@ -69,7 +70,7 @@ type Engine struct { tables map[string]*Table lastChange int64 // the position at which the schema was last loaded. it is only used in conjunction with ReloadAt - reloadAtPos mysql.Position + reloadAtPos replication.Position notifierMu sync.Mutex notifiers map[string]notifier // isServingPrimary stores if this tablet is currently the serving primary or not. @@ -152,7 +153,7 @@ func (se *Engine) syncSidecarDB(ctx context.Context, conn *dbconnpool.DBConnecti var exec sidecardb.Exec = func(ctx context.Context, query string, maxRows int, useDB bool) (*sqltypes.Result, error) { if useDB { - _, err := conn.ExecuteFetch(sqlparser.BuildParsedQuery("use %s", sidecardb.GetIdentifier()).Query, maxRows, false) + _, err := conn.ExecuteFetch(sqlparser.BuildParsedQuery("use %s", sidecar.GetIdentifier()).Query, maxRows, false) if err != nil { return nil, err } @@ -194,7 +195,7 @@ func (se *Engine) EnsureConnectionAndDB(tabletType topodatapb.TabletType) error if tabletType != topodatapb.TabletType_PRIMARY { return err } - if merr, isSQLErr := err.(*mysql.SQLError); !isSQLErr || merr.Num != mysql.ERBadDb { + if merr, isSQLErr := err.(*sqlerror.SQLError); !isSQLErr || merr.Num != sqlerror.ERBadDb { return err } @@ -330,10 +331,7 @@ func (se *Engine) MakeNonPrimary() { se.isServingPrimary = false for _, t := range se.tables { if t.SequenceInfo != nil { - t.SequenceInfo.Lock() - t.SequenceInfo.NextVal = 0 - t.SequenceInfo.LastVal = 0 - t.SequenceInfo.Unlock() + t.SequenceInfo.Reset() } } } @@ -357,14 +355,14 @@ func (se *Engine) EnableHistorian(enabled bool) error { // The includeStats argument controls whether table size statistics should be // emitted, as they can be expensive to calculate for a large number of tables func (se *Engine) Reload(ctx context.Context) error { - return se.ReloadAt(ctx, mysql.Position{}) + return se.ReloadAt(ctx, replication.Position{}) } // ReloadAt reloads the schema info from the db. // Any tables that have changed since the last load are updated. // It maintains the position at which the schema was reloaded and if the same position is provided // (say by multiple vstreams) it returns the cached schema. In case of a newer or empty pos it always reloads the schema -func (se *Engine) ReloadAt(ctx context.Context, pos mysql.Position) error { +func (se *Engine) ReloadAt(ctx context.Context, pos replication.Position) error { return se.ReloadAtEx(ctx, pos, true) } @@ -374,7 +372,7 @@ func (se *Engine) ReloadAt(ctx context.Context, pos mysql.Position) error { // (say by multiple vstreams) it returns the cached schema. In case of a newer or empty pos it always reloads the schema // The includeStats argument controls whether table size statistics should be // emitted, as they can be expensive to calculate for a large number of tables -func (se *Engine) ReloadAtEx(ctx context.Context, pos mysql.Position, includeStats bool) error { +func (se *Engine) ReloadAtEx(ctx context.Context, pos replication.Position, includeStats bool) error { se.mu.Lock() defer se.mu.Unlock() if !se.isOpen { @@ -382,7 +380,7 @@ func (se *Engine) ReloadAtEx(ctx context.Context, pos mysql.Position, includeSta return nil } if !pos.IsZero() && se.reloadAtPos.AtLeast(pos) { - log.V(2).Infof("ReloadAtEx: found cached schema at %s", mysql.EncodePosition(pos)) + log.V(2).Infof("ReloadAtEx: found cached schema at %s", replication.EncodePosition(pos)) return nil } if err := se.reload(ctx, includeStats); err != nil { @@ -457,12 +455,12 @@ func (se *Engine) reload(ctx context.Context, includeStats bool) error { for _, row := range tableData.Rows { tableName := row[0].ToString() curTables[tableName] = true - createTime, _ := evalengine.ToInt64(row[2]) + createTime, _ := row[2].ToCastInt64() var fileSize, allocatedSize uint64 if includeStats { - fileSize, _ = evalengine.ToUint64(row[4]) - allocatedSize, _ = evalengine.ToUint64(row[5]) + fileSize, _ = row[4].ToCastUint64() + allocatedSize, _ = row[5].ToCastUint64() // publish the size metrics se.tableFileSizeGauge.Set(tableName, int64(fileSize)) se.tableAllocatedSizeGauge.Set(tableName, int64(allocatedSize)) @@ -588,7 +586,7 @@ func (se *Engine) getDroppedTables(curTables map[string]bool, changedViews map[s } } - return maps.Values(dropped) + return maps2.Values(dropped) } func getTableData(ctx context.Context, conn *connpool.DBConn, includeStats bool) (*sqltypes.Result, error) { @@ -608,7 +606,7 @@ func (se *Engine) updateInnoDBRowsRead(ctx context.Context, conn *connpool.DBCon } if len(readRowsData.Rows) == 1 && len(readRowsData.Rows[0]) == 2 { - value, err := evalengine.ToInt64(readRowsData.Rows[0][1]) + value, err := readRowsData.Rows[0][1].ToCastInt64() if err != nil { return err } @@ -629,7 +627,7 @@ func (se *Engine) mysqlTime(ctx context.Context, conn *connpool.DBConn) (int64, if len(tm.Rows) != 1 || len(tm.Rows[0]) != 1 || tm.Rows[0][0].IsNull() { return 0, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "unexpected result for MySQL time: %+v", tm.Rows) } - t, err := evalengine.ToInt64(tm.Rows[0][0]) + t, err := tm.Rows[0][0].ToCastInt64() if err != nil { return 0, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "could not parse time %v: %v", tm, err) } @@ -764,6 +762,32 @@ func (se *Engine) GetSchema() map[string]*Table { return tables } +// MarshalMinimalSchema returns a protobuf encoded binlogdata.MinimalSchema +func (se *Engine) MarshalMinimalSchema() ([]byte, error) { + se.mu.Lock() + defer se.mu.Unlock() + dbSchema := &binlogdatapb.MinimalSchema{ + Tables: make([]*binlogdatapb.MinimalTable, 0, len(se.tables)), + } + for _, table := range se.tables { + dbSchema.Tables = append(dbSchema.Tables, newMinimalTable(table)) + } + return dbSchema.MarshalVT() +} + +func newMinimalTable(st *Table) *binlogdatapb.MinimalTable { + table := &binlogdatapb.MinimalTable{ + Name: st.Name.String(), + Fields: st.Fields, + } + pkc := make([]int64, len(st.PKColumns)) + for i, pk := range st.PKColumns { + pkc[i] = int64(pk) + } + table.PKColumns = pkc + return table +} + // GetConnection returns a connection from the pool func (se *Engine) GetConnection(ctx context.Context) (*connpool.DBConn, error) { return se.conns.Get(ctx, nil) @@ -829,3 +853,19 @@ func extractNamesFromTablesList(tables []*Table) []string { } return tableNames } + +func (se *Engine) ResetSequences(tables []string) error { + se.mu.Lock() + defer se.mu.Unlock() + for _, tableName := range tables { + if table, ok := se.tables[tableName]; ok { + if table.SequenceInfo != nil { + log.Infof("Resetting sequence info for table %v: %s", tableName, table.SequenceInfo) + table.SequenceInfo.Reset() + } + } else { + return vterrors.Errorf(vtrpcpb.Code_NOT_FOUND, "table %v not found in schema", tableName) + } + } + return nil +} diff --git a/go/vt/vttablet/tabletserver/schema/engine_test.go b/go/vt/vttablet/tabletserver/schema/engine_test.go index 950619f95de..4000795d9d0 100644 --- a/go/vt/vttablet/tabletserver/schema/engine_test.go +++ b/go/vt/vttablet/tabletserver/schema/engine_test.go @@ -31,6 +31,11 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/constants/sidecar" + + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/event/syslogger" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/fakesqldb" @@ -39,7 +44,6 @@ import ( "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/dbconfigs" querypb "vitess.io/vitess/go/vt/proto/query" - "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema/schematest" @@ -78,7 +82,7 @@ func TestOpenAndReload(t *testing.T) { )) firstReadRowsValue := 12 AddFakeInnoDBReadRowsResult(db, firstReadRowsValue) - se := newEngine(10, 10*time.Second, 10*time.Second, 0, db) + se := newEngine(10*time.Second, 10*time.Second, 0, db) se.Open() defer se.Close() @@ -205,13 +209,13 @@ func TestOpenAndReload(t *testing.T) { assert.Equal(t, int64(0), se.tableFileSizeGauge.Counts()["msg"]) // ReloadAt tests - pos1, err := mysql.DecodePosition("MariaDB/0-41983-20") + pos1, err := replication.DecodePosition("MariaDB/0-41983-20") require.NoError(t, err) - pos2, err := mysql.DecodePosition("MariaDB/0-41983-40") + pos2, err := replication.DecodePosition("MariaDB/0-41983-40") require.NoError(t, err) se.UnregisterNotifier("test") - err = se.ReloadAt(context.Background(), mysql.Position{}) + err = se.ReloadAt(context.Background(), replication.Position{}) require.NoError(t, err) assert.Equal(t, want, se.GetSchema()) @@ -269,7 +273,7 @@ func TestReloadWithSwappedTables(t *testing.T) { firstReadRowsValue := 12 AddFakeInnoDBReadRowsResult(db, firstReadRowsValue) - se := newEngine(10, 10*time.Second, 10*time.Second, 0, db) + se := newEngine(10*time.Second, 10*time.Second, 0, db) se.Open() defer se.Close() want := initialSchema() @@ -419,7 +423,7 @@ func TestOpenFailedDueToExecErr(t *testing.T) { schematest.AddDefaultQueries(db) want := "injected error" db.RejectQueryPattern(baseShowTablesPattern, want) - se := newEngine(10, 1*time.Second, 1*time.Second, 0, db) + se := newEngine(1*time.Second, 1*time.Second, 0, db) err := se.Open() if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("se.Open: %v, want %s", err, want) @@ -447,10 +451,10 @@ func TestOpenFailedDueToLoadTableErr(t *testing.T) { db.AddQueryPattern(fmt.Sprintf(mysql.GetColumnNamesQueryPatternForTable, "test_view"), sqltypes.MakeTestResult(sqltypes.MakeTestFields("column_name", "varchar"), "")) // rejecting the impossible query - db.AddRejectedQuery("SELECT * FROM `fakesqldb`.`test_view` WHERE 1 != 1", mysql.NewSQLErrorFromError(errors.New("The user specified as a definer ('root'@'%') does not exist (errno 1449) (sqlstate HY000)"))) + db.AddRejectedQuery("SELECT * FROM `fakesqldb`.`test_view` WHERE 1 != 1", sqlerror.NewSQLErrorFromError(errors.New("The user specified as a definer ('root'@'%') does not exist (errno 1449) (sqlstate HY000)"))) AddFakeInnoDBReadRowsResult(db, 0) - se := newEngine(10, 1*time.Second, 1*time.Second, 0, db) + se := newEngine(1*time.Second, 1*time.Second, 0, db) err := se.Open() // failed load should return an error because of test_table assert.ErrorContains(t, err, "Row count exceeded") @@ -482,10 +486,10 @@ func TestOpenNoErrorDueToInvalidViews(t *testing.T) { db.AddQueryPattern(fmt.Sprintf(mysql.GetColumnNamesQueryPatternForTable, "bar_view"), sqltypes.MakeTestResult(sqltypes.MakeTestFields("column_name", "varchar"), "col1", "col2")) // rejecting the impossible query - db.AddRejectedQuery("SELECT `col1`, `col2` FROM `fakesqldb`.`bar_view` WHERE 1 != 1", mysql.NewSQLError(mysql.ERWrongFieldWithGroup, mysql.SSClientError, "random error for table bar_view")) + db.AddRejectedQuery("SELECT `col1`, `col2` FROM `fakesqldb`.`bar_view` WHERE 1 != 1", sqlerror.NewSQLError(sqlerror.ERWrongFieldWithGroup, sqlerror.SSClientError, "random error for table bar_view")) AddFakeInnoDBReadRowsResult(db, 0) - se := newEngine(10, 1*time.Second, 1*time.Second, 0, db) + se := newEngine(1*time.Second, 1*time.Second, 0, db) err := se.Open() require.NoError(t, err) @@ -501,7 +505,7 @@ func TestExportVars(t *testing.T) { db := fakesqldb.New(t) defer db.Close() schematest.AddDefaultQueries(db) - se := newEngine(10, 1*time.Second, 1*time.Second, 0, db) + se := newEngine(1*time.Second, 1*time.Second, 0, db) se.Open() defer se.Close() expvar.Do(func(kv expvar.KeyValue) { @@ -513,7 +517,7 @@ func TestStatsURL(t *testing.T) { db := fakesqldb.New(t) defer db.Close() schematest.AddDefaultQueries(db) - se := newEngine(10, 1*time.Second, 1*time.Second, 0, db) + se := newEngine(1*time.Second, 1*time.Second, 0, db) se.Open() defer se.Close() @@ -543,7 +547,7 @@ func TestSchemaEngineCloseTickRace(t *testing.T) { }) AddFakeInnoDBReadRowsResult(db, 12) // Start the engine with a small reload tick - se := newEngine(10, 100*time.Millisecond, 1*time.Second, 0, db) + se := newEngine(100*time.Millisecond, 1*time.Second, 0, db) err := se.Open() require.NoError(t, err) @@ -570,9 +574,8 @@ func TestSchemaEngineCloseTickRace(t *testing.T) { } } -func newEngine(queryCacheSize int, reloadTime time.Duration, idleTimeout time.Duration, schemaMaxAgeSeconds int64, db *fakesqldb.DB) *Engine { +func newEngine(reloadTime time.Duration, idleTimeout time.Duration, schemaMaxAgeSeconds int64, db *fakesqldb.DB) *Engine { config := tabletenv.NewDefaultConfig() - config.QueryCacheSize = queryCacheSize _ = config.SchemaReloadIntervalSeconds.Set(reloadTime.String()) _ = config.OltpReadPool.IdleTimeoutSeconds.Set(idleTimeout.String()) _ = config.OlapReadPool.IdleTimeoutSeconds.Set(idleTimeout.String()) @@ -1110,7 +1113,7 @@ func TestEngineReload(t *testing.T) { conn, err := connpool.NewDBConnNoPool(context.Background(), db.ConnParams(), nil, nil) require.NoError(t, err) - se := newEngine(10, 10*time.Second, 10*time.Second, 0, db) + se := newEngine(10*time.Second, 10*time.Second, 0, db) se.conns.Open(se.cp, se.cp, se.cp) se.isOpen = true se.notifiers = make(map[string]notifier) @@ -1173,7 +1176,7 @@ func TestEngineReload(t *testing.T) { // Detecting view changes. // According to the database, v2, v3, v4, and v5 require updating. - db.AddQuery(fmt.Sprintf(detectViewChange, sidecardb.GetIdentifier()), sqltypes.MakeTestResult(sqltypes.MakeTestFields("table_name", "varchar"), + db.AddQuery(fmt.Sprintf(detectViewChange, sidecar.GetIdentifier()), sqltypes.MakeTestResult(sqltypes.MakeTestFields("table_name", "varchar"), "v2", "v3", "v4", diff --git a/go/vt/vttablet/tabletserver/schema/historian.go b/go/vt/vttablet/tabletserver/schema/historian.go index 4cd83b4d48f..e40777c6fe5 100644 --- a/go/vt/vttablet/tabletserver/schema/historian.go +++ b/go/vt/vttablet/tabletserver/schema/historian.go @@ -22,12 +22,11 @@ import ( "sync" "time" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/log" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - "vitess.io/vitess/go/vt/sidecardb" - "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" @@ -43,7 +42,7 @@ const vl = 10 // trackedSchema has the snapshot of the table at a given pos (reached by ddl) type trackedSchema struct { schema map[string]*binlogdatapb.MinimalTable - pos mysql.Position + pos replication.Position ddl string timeUpdated int64 } @@ -146,7 +145,7 @@ func (h *historian) GetTableForPos(tableName sqlparser.IdentifierCS, gtid string if gtid == "" { return nil, nil } - pos, err := mysql.DecodePosition(gtid) + pos, err := replication.DecodePosition(gtid) if err != nil { return nil, err } @@ -172,10 +171,10 @@ func (h *historian) loadFromDB(ctx context.Context) error { var tableData *sqltypes.Result if h.lastID == 0 && h.schemaMaxAgeSeconds > 0 { // only at vttablet start schemaMaxAge := time.Now().UTC().Add(time.Duration(-h.schemaMaxAgeSeconds) * time.Second) - tableData, err = conn.Exec(ctx, sqlparser.BuildParsedQuery(getInitialSchemaVersions, sidecardb.GetIdentifier(), + tableData, err = conn.Exec(ctx, sqlparser.BuildParsedQuery(getInitialSchemaVersions, sidecar.GetIdentifier(), schemaMaxAge.Unix()).Query, 10000, true) } else { - tableData, err = conn.Exec(ctx, sqlparser.BuildParsedQuery(getNextSchemaVersions, sidecardb.GetIdentifier(), + tableData, err = conn.Exec(ctx, sqlparser.BuildParsedQuery(getNextSchemaVersions, sidecar.GetIdentifier(), h.lastID).Query, 10000, true) } @@ -205,12 +204,12 @@ func (h *historian) loadFromDB(ctx context.Context) error { // readRow converts a row from the schema_version table to a trackedSchema func (h *historian) readRow(row []sqltypes.Value) (*trackedSchema, int64, error) { - id, _ := evalengine.ToInt64(row[0]) + id, _ := row[0].ToCastInt64() rowBytes, err := row[1].ToBytes() if err != nil { return nil, 0, err } - pos, err := mysql.DecodePosition(string(rowBytes)) + pos, err := replication.DecodePosition(string(rowBytes)) if err != nil { return nil, 0, err } @@ -219,7 +218,7 @@ func (h *historian) readRow(row []sqltypes.Value) (*trackedSchema, int64, error) return nil, 0, err } ddl := string(rowBytes) - timeUpdated, err := evalengine.ToInt64(row[3]) + timeUpdated, err := row[3].ToCastInt64() if err != nil { return nil, 0, err } @@ -232,7 +231,7 @@ func (h *historian) readRow(row []sqltypes.Value) (*trackedSchema, int64, error) return nil, 0, err } log.V(vl).Infof("Read tracked schema from db: id %d, pos %v, ddl %s, schema len %d, time_updated %d \n", - id, mysql.EncodePosition(pos), ddl, len(sch.Tables), timeUpdated) + id, replication.EncodePosition(pos), ddl, len(sch.Tables), timeUpdated) tables := map[string]*binlogdatapb.MinimalTable{} for _, t := range sch.Tables { @@ -281,7 +280,7 @@ func (h *historian) sortSchemas() { } // getTableFromHistoryForPos looks in the cache for a schema for a specific gtid -func (h *historian) getTableFromHistoryForPos(tableName sqlparser.IdentifierCS, pos mysql.Position) *binlogdatapb.MinimalTable { +func (h *historian) getTableFromHistoryForPos(tableName sqlparser.IdentifierCS, pos replication.Position) *binlogdatapb.MinimalTable { idx := sort.Search(len(h.schemas), func(i int) bool { return pos.Equal(h.schemas[i].pos) || !pos.AtLeast(h.schemas[i].pos) }) diff --git a/go/vt/vttablet/tabletserver/schema/load_table_test.go b/go/vt/vttablet/tabletserver/schema/load_table_test.go index 24dff88793e..eeefb688e61 100644 --- a/go/vt/vttablet/tabletserver/schema/load_table_test.go +++ b/go/vt/vttablet/tabletserver/schema/load_table_test.go @@ -19,11 +19,12 @@ package schema import ( "context" "errors" - "reflect" "strings" "testing" "time" + "vitess.io/vitess/go/test/utils" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -80,6 +81,8 @@ func TestLoadView(t *testing.T) { assert.Equal(t, want, table) } +// TestLoadTableSequence tests that sequence tables are loaded correctly. +// It also confirms that a reset of a sequence table works. func TestLoadTableSequence(t *testing.T) { db := fakesqldb.New(t) defer db.Close() @@ -93,9 +96,12 @@ func TestLoadTableSequence(t *testing.T) { } table.Fields = nil table.PKColumns = nil - if !reflect.DeepEqual(table, want) { - t.Errorf("Table:\n%#v, want\n%#v", table, want) - } + utils.MustMatch(t, want, table) + + table.SequenceInfo.NextVal = 10 + table.SequenceInfo.LastVal = 5 + table.SequenceInfo.Reset() + utils.MustMatch(t, want, table) } func TestLoadTableMessage(t *testing.T) { diff --git a/go/vt/vttablet/tabletserver/schema/main_test.go b/go/vt/vttablet/tabletserver/schema/main_test.go index 19fc66c36d1..0948c1313fc 100644 --- a/go/vt/vttablet/tabletserver/schema/main_test.go +++ b/go/vt/vttablet/tabletserver/schema/main_test.go @@ -37,7 +37,7 @@ func getTestSchemaEngine(t *testing.T, schemaMaxAgeSeconds int64) (*Engine, *fak db.AddQueryPattern(baseShowTablesPattern, &sqltypes.Result{}) db.AddQuery(mysql.BaseShowPrimary, &sqltypes.Result{}) AddFakeInnoDBReadRowsResult(db, 1) - se := newEngine(10, 10*time.Second, 10*time.Second, schemaMaxAgeSeconds, db) + se := newEngine(10*time.Second, 10*time.Second, schemaMaxAgeSeconds, db) require.NoError(t, se.Open()) cancel := func() { defer db.Close() diff --git a/go/vt/vttablet/tabletserver/schema/schema.go b/go/vt/vttablet/tabletserver/schema/schema.go index cd23b57607a..95c191392cd 100644 --- a/go/vt/vttablet/tabletserver/schema/schema.go +++ b/go/vt/vttablet/tabletserver/schema/schema.go @@ -20,6 +20,8 @@ import ( "sync" "time" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/sqlparser" querypb "vitess.io/vitess/go/vt/proto/query" @@ -71,6 +73,23 @@ type SequenceInfo struct { LastVal int64 } +// Reset clears the cache for the sequence. This is called to ensure that we always start with a fresh cache, +// when a new primary is elected, and, when a table is moved into a new keyspace. +// When we first need a new value from a sequence, i.e. when the schema engine sees a uninitialized sequence, it will +// get the next set of values from the backing sequence table and cache them. +func (seq *SequenceInfo) Reset() { + seq.Lock() + defer seq.Unlock() + seq.NextVal = 0 + seq.LastVal = 0 +} + +func (seq *SequenceInfo) String() { + seq.Lock() + defer seq.Unlock() + log.Infof("SequenceInfo: NextVal: %d, LastVal: %d", seq.NextVal, seq.LastVal) +} + // MessageInfo contains info specific to message tables. type MessageInfo struct { // Fields stores the field info to be diff --git a/go/vt/vttablet/tabletserver/schema/tracker.go b/go/vt/vttablet/tabletserver/schema/tracker.go index ec9050b5c7e..9e036bb5139 100644 --- a/go/vt/vttablet/tabletserver/schema/tracker.go +++ b/go/vt/vttablet/tabletserver/schema/tracker.go @@ -23,11 +23,9 @@ import ( "sync" "time" + "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/schema" - "vitess.io/vitess/go/vt/sidecardb" - - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/sqltypes" @@ -157,10 +155,10 @@ func (tr *Tracker) process(ctx context.Context) { } } -func (tr *Tracker) currentPosition(ctx context.Context) (mysql.Position, error) { +func (tr *Tracker) currentPosition(ctx context.Context) (replication.Position, error) { conn, err := tr.engine.cp.Connect(ctx) if err != nil { - return mysql.Position{}, err + return replication.Position{}, err } defer conn.Close() return conn.PrimaryPosition() @@ -173,7 +171,7 @@ func (tr *Tracker) isSchemaVersionTableEmpty(ctx context.Context) (bool, error) } defer conn.Recycle() result, err := conn.Exec(ctx, sqlparser.BuildParsedQuery("select id from %s.schema_version limit 1", - sidecardb.GetIdentifier()).Query, 1, false) + sidecar.GetIdentifier()).Query, 1, false) if err != nil { return false, err } @@ -204,7 +202,7 @@ func (tr *Tracker) possiblyInsertInitialSchema(ctx context.Context) error { if err != nil { return err } - gtid := mysql.EncodePosition(pos) + gtid := replication.EncodePosition(pos) log.Infof("Saving initial schema for gtid %s", gtid) return tr.saveCurrentSchemaToDb(ctx, gtid, ddl, timestamp) @@ -221,14 +219,10 @@ func (tr *Tracker) schemaUpdated(gtid string, ddl string, timestamp int64) error } func (tr *Tracker) saveCurrentSchemaToDb(ctx context.Context, gtid, ddl string, timestamp int64) error { - tables := tr.engine.GetSchema() - dbSchema := &binlogdatapb.MinimalSchema{ - Tables: []*binlogdatapb.MinimalTable{}, - } - for _, table := range tables { - dbSchema.Tables = append(dbSchema.Tables, newMinimalTable(table)) + blob, err := tr.engine.MarshalMinimalSchema() + if err != nil { + return err } - blob, _ := dbSchema.MarshalVT() conn, err := tr.engine.GetConnection(ctx) if err != nil { @@ -238,7 +232,7 @@ func (tr *Tracker) saveCurrentSchemaToDb(ctx context.Context, gtid, ddl string, query := sqlparser.BuildParsedQuery("insert into %s.schema_version "+ "(pos, ddl, schemax, time_updated) "+ - "values (%s, %s, %s, %d)", sidecardb.GetIdentifier(), encodeString(gtid), + "values (%s, %s, %s, %d)", sidecar.GetIdentifier(), encodeString(gtid), encodeString(ddl), encodeString(string(blob)), timestamp).Query _, err = conn.Exec(ctx, query, 1, false) if err != nil { @@ -247,19 +241,6 @@ func (tr *Tracker) saveCurrentSchemaToDb(ctx context.Context, gtid, ddl string, return nil } -func newMinimalTable(st *Table) *binlogdatapb.MinimalTable { - table := &binlogdatapb.MinimalTable{ - Name: st.Name.String(), - Fields: st.Fields, - } - var pkc []int64 - for _, pk := range st.PKColumns { - pkc = append(pkc, int64(pk)) - } - table.PKColumns = pkc - return table -} - func encodeString(in string) string { buf := bytes.NewBuffer(nil) sqltypes.NewVarChar(in).EncodeSQL(buf) diff --git a/go/vt/vttablet/tabletserver/state_manager.go b/go/vt/vttablet/tabletserver/state_manager.go index 8453e685b5b..2115871c6bb 100644 --- a/go/vt/vttablet/tabletserver/state_manager.go +++ b/go/vt/vttablet/tabletserver/state_manager.go @@ -24,7 +24,6 @@ import ( "time" "golang.org/x/sync/semaphore" - "google.golang.org/protobuf/proto" "vitess.io/vitess/go/timer" "vitess.io/vitess/go/vt/log" @@ -90,7 +89,7 @@ type stateManager struct { wantTabletType topodatapb.TabletType state servingState target *querypb.Target - terTimestamp time.Time + ptsTimestamp time.Time retrying bool replHealthy bool lameduck bool @@ -192,7 +191,7 @@ type ( // Init performs the second phase of initialization. func (sm *stateManager) Init(env tabletenv.Env, target *querypb.Target) { - sm.target = proto.Clone(target).(*querypb.Target) + sm.target = target.CloneVT() sm.transitioning = semaphore.NewWeighted(1) sm.checkMySQLThrottler = semaphore.NewWeighted(1) sm.timebombDuration = env.Config().OltpReadPool.TimeoutSeconds.Get() * 10 @@ -209,7 +208,7 @@ func (sm *stateManager) Init(env tabletenv.Env, target *querypb.Target) { // be honored. // If sm is already in the requested state, it returns stateChanged as // false. -func (sm *stateManager) SetServingType(tabletType topodatapb.TabletType, terTimestamp time.Time, state servingState, reason string) error { +func (sm *stateManager) SetServingType(tabletType topodatapb.TabletType, ptsTimestamp time.Time, state servingState, reason string) error { defer sm.ExitLameduck() sm.hs.Open() @@ -219,8 +218,8 @@ func (sm *stateManager) SetServingType(tabletType topodatapb.TabletType, terTime state = StateNotConnected } - log.Infof("Starting transition to %v %v, timestamp: %v", tabletType, state, terTimestamp) - if sm.mustTransition(tabletType, terTimestamp, state, reason) { + log.Infof("Starting transition to %v %v, primary term start timestamp: %v", tabletType, state, ptsTimestamp) + if sm.mustTransition(tabletType, ptsTimestamp, state, reason) { return sm.execTransition(tabletType, state) } return nil @@ -230,7 +229,7 @@ func (sm *stateManager) SetServingType(tabletType topodatapb.TabletType, terTime // state. If so, it acquires the semaphore and returns true. If a transition is // already in progress, it waits. If the desired state is already reached, it // returns false without acquiring the semaphore. -func (sm *stateManager) mustTransition(tabletType topodatapb.TabletType, terTimestamp time.Time, state servingState, reason string) bool { +func (sm *stateManager) mustTransition(tabletType topodatapb.TabletType, ptsTimestamp time.Time, state servingState, reason string) bool { if sm.transitioning.Acquire(context.Background(), 1) != nil { return false } @@ -239,7 +238,7 @@ func (sm *stateManager) mustTransition(tabletType topodatapb.TabletType, terTime sm.wantTabletType = tabletType sm.wantState = state - sm.terTimestamp = terTimestamp + sm.ptsTimestamp = ptsTimestamp sm.reason = reason if sm.target.TabletType == tabletType && sm.state == state { sm.transitioning.Release(1) @@ -639,7 +638,7 @@ func (sm *stateManager) stateStringLocked(tabletType topodatapb.TabletType, stat if tabletType != topodatapb.TabletType_PRIMARY { return fmt.Sprintf("%v: %v", tabletType, state) } - return fmt.Sprintf("%v: %v, %v", tabletType, state, sm.terTimestamp.Local().Format("Jan 2, 2006 at 15:04:05 (MST)")) + return fmt.Sprintf("%v: %v, %v", tabletType, state, sm.ptsTimestamp.Local().Format("Jan 2, 2006 at 15:04:05 (MST)")) } func (sm *stateManager) handleGracePeriod(tabletType topodatapb.TabletType) { @@ -674,7 +673,7 @@ func (sm *stateManager) Broadcast() { defer sm.mu.Unlock() lag, err := sm.refreshReplHealthLocked() - sm.hs.ChangeState(sm.target.TabletType, sm.terTimestamp, lag, err, sm.isServingLocked()) + sm.hs.ChangeState(sm.target.TabletType, sm.ptsTimestamp, lag, err, sm.isServingLocked()) } func (sm *stateManager) refreshReplHealthLocked() (time.Duration, error) { @@ -806,7 +805,7 @@ func (sm *stateManager) State() servingState { func (sm *stateManager) Target() *querypb.Target { sm.mu.Lock() defer sm.mu.Unlock() - return proto.Clone(sm.target).(*querypb.Target) + return sm.target.CloneVT() } // IsServingString returns the name of the current TabletServer state. diff --git a/go/vt/vttablet/tabletserver/state_manager_test.go b/go/vt/vttablet/tabletserver/state_manager_test.go index b4793915c00..23e70a66760 100644 --- a/go/vt/vttablet/tabletserver/state_manager_test.go +++ b/go/vt/vttablet/tabletserver/state_manager_test.go @@ -74,7 +74,7 @@ func TestStateManagerServePrimary(t *testing.T) { require.NoError(t, err) assert.Equal(t, false, sm.lameduck) - assert.Equal(t, testNow, sm.terTimestamp) + assert.Equal(t, testNow, sm.ptsTimestamp) verifySubcomponent(t, 1, sm.watcher, testStateClosed) @@ -517,10 +517,11 @@ func TestStateManagerCheckMySQL(t *testing.T) { } func TestStateManagerValidations(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() sm := newTestStateManager(t) target := &querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} - sm.target = proto.Clone(target).(*querypb.Target) - + sm.target = target.CloneVT() err := sm.StartRequest(ctx, target, false) assert.Contains(t, err.Error(), "operation not allowed") @@ -579,6 +580,8 @@ func TestStateManagerValidations(t *testing.T) { } func TestStateManagerWaitForRequests(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() sm := newTestStateManager(t) defer sm.StopService() target := &querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} diff --git a/go/vt/vttablet/tabletserver/stateful_connection.go b/go/vt/vttablet/tabletserver/stateful_connection.go index 5e14ee7d904..c851f5f0eed 100644 --- a/go/vt/vttablet/tabletserver/stateful_connection.go +++ b/go/vt/vttablet/tabletserver/stateful_connection.go @@ -21,11 +21,10 @@ import ( "fmt" "time" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/pools" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/callerid" - "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" @@ -97,7 +96,7 @@ func (sc *StatefulConnection) Exec(ctx context.Context, query string, maxrows in } r, err := sc.dbConn.ExecOnce(ctx, query, maxrows, wantfields) if err != nil { - if mysql.IsConnErr(err) { + if sqlerror.IsConnErr(err) { select { case <-ctx.Done(): // If the context is done, the query was killed. @@ -277,9 +276,6 @@ func (sc *StatefulConnection) LogTransaction(reason tx.ReleaseReason) { sc.Stats().UserTransactionCount.Add([]string{username, reason.Name()}, 1) sc.Stats().UserTransactionTimesNs.Add([]string{username, reason.Name()}, int64(duration)) sc.txProps.Stats.Add(reason.Name(), duration) - if sc.txProps.LogToFile { - log.Infof("Logged transaction: %s", sc.String(sc.env.Config().SanitizeLogMessages)) - } tabletenv.TxLogger.Send(sc) } @@ -337,7 +333,7 @@ func (sc *StatefulConnection) StreamLoadData(ctx context.Context, lines chan str case err == nil: // Success. return result, err - case !mysql.IsConnErr(err): + case !sqlerror.IsConnErr(err): // Not a connection error. Don't retry. return nil, err case attempt == 2: diff --git a/go/vt/vttablet/tabletserver/stateful_connection_pool_test.go b/go/vt/vttablet/tabletserver/stateful_connection_pool_test.go index 79e70dc4ffe..b9ea4dfc185 100644 --- a/go/vt/vttablet/tabletserver/stateful_connection_pool_test.go +++ b/go/vt/vttablet/tabletserver/stateful_connection_pool_test.go @@ -29,9 +29,9 @@ import ( "vitess.io/vitess/go/vt/vttablet/tabletserver/tx" ) -var ctx = context.Background() - func TestActivePoolClientRowsFound(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() db := fakesqldb.New(t) defer db.Close() db.AddQuery("begin", &sqltypes.Result{}) @@ -58,6 +58,8 @@ func TestActivePoolClientRowsFound(t *testing.T) { } func TestActivePoolForAllTxProps(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() db := fakesqldb.New(t) defer db.Close() pool := newActivePool() @@ -84,6 +86,8 @@ func TestActivePoolForAllTxProps(t *testing.T) { } func TestStatefulPoolShutdownNonTx(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() db := fakesqldb.New(t) defer db.Close() pool := newActivePool() @@ -122,6 +126,8 @@ func TestStatefulPoolShutdownNonTx(t *testing.T) { } func TestStatefulPoolShutdownAll(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() db := fakesqldb.New(t) defer db.Close() pool := newActivePool() @@ -157,7 +163,7 @@ func TestActivePoolGetConnNonExistentTransaction(t *testing.T) { } func TestExecWithAbortedCtx(t *testing.T) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(context.Background()) db := fakesqldb.New(t) defer db.Close() pool := newActivePool() @@ -170,6 +176,8 @@ func TestExecWithAbortedCtx(t *testing.T) { } func TestExecWithDbconnClosed(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() db := fakesqldb.New(t) defer db.Close() pool := newActivePool() @@ -183,6 +191,8 @@ func TestExecWithDbconnClosed(t *testing.T) { } func TestExecWithDbconnClosedHavingTx(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() db := fakesqldb.New(t) defer db.Close() pool := newActivePool() @@ -197,6 +207,8 @@ func TestExecWithDbconnClosedHavingTx(t *testing.T) { } func TestFailOnConnectionRegistering(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() db := fakesqldb.New(t) defer db.Close() pool := newActivePool() diff --git a/go/vt/vttablet/tabletserver/tabletenv/config.go b/go/vt/vttablet/tabletserver/tabletenv/config.go index c52754b1569..d490c97326a 100644 --- a/go/vt/vttablet/tabletserver/tabletenv/config.go +++ b/go/vt/vttablet/tabletserver/tabletenv/config.go @@ -26,19 +26,20 @@ import ( "github.com/spf13/pflag" "google.golang.org/protobuf/encoding/prototext" - "vitess.io/vitess/go/cache" "vitess.io/vitess/go/flagutil" "vitess.io/vitess/go/streamlog" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" - querypb "vitess.io/vitess/go/vt/proto/query" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/throttler" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vterrors" + + querypb "vitess.io/vitess/go/vt/proto/query" + throttlerdatapb "vitess.io/vitess/go/vt/proto/throttlerdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) // These constants represent values for various config parameters. @@ -89,6 +90,24 @@ var ( txLogHandler = "/debug/txlog" ) +type TxThrottlerConfigFlag struct { + *throttlerdatapb.Configuration +} + +func NewTxThrottlerConfigFlag() *TxThrottlerConfigFlag { + return &TxThrottlerConfigFlag{&throttlerdatapb.Configuration{}} +} + +func (t *TxThrottlerConfigFlag) Get() *throttlerdatapb.Configuration { + return t.Configuration +} + +func (t *TxThrottlerConfigFlag) Set(arg string) error { + return prototext.Unmarshal([]byte(arg), t.Configuration) +} + +func (t *TxThrottlerConfigFlag) Type() string { return "string" } + // RegisterTabletEnvFlags is a public API to register tabletenv flags for use by test cases that expect // some flags to be set with default values func RegisterTabletEnvFlags(fs *pflag.FlagSet) { @@ -100,14 +119,8 @@ func registerTabletEnvFlags(fs *pflag.FlagSet) { fs.StringVar(&txLogHandler, "transaction-log-stream-handler", txLogHandler, "URL handler for streaming transactions log") fs.IntVar(¤tConfig.OltpReadPool.Size, "queryserver-config-pool-size", defaultConfig.OltpReadPool.Size, "query server read pool size, connection pool is used by regular queries (non streaming, not in a transaction)") - fs.IntVar(¤tConfig.OltpReadPool.PrefillParallelism, "queryserver-config-pool-prefill-parallelism", defaultConfig.OltpReadPool.PrefillParallelism, "Query server read pool prefill parallelism, a non-zero value will prefill the pool using the specified parallism.") - _ = fs.MarkDeprecated("queryserver-config-pool-prefill-parallelism", "it will be removed in a future release.") fs.IntVar(¤tConfig.OlapReadPool.Size, "queryserver-config-stream-pool-size", defaultConfig.OlapReadPool.Size, "query server stream connection pool size, stream pool is used by stream queries: queries that return results to client in a streaming fashion") - fs.IntVar(¤tConfig.OlapReadPool.PrefillParallelism, "queryserver-config-stream-pool-prefill-parallelism", defaultConfig.OlapReadPool.PrefillParallelism, "Query server stream pool prefill parallelism, a non-zero value will prefill the pool using the specified parallelism") - _ = fs.MarkDeprecated("queryserver-config-stream-pool-prefill-parallelism", "it will be removed in a future release.") fs.IntVar(¤tConfig.TxPool.Size, "queryserver-config-transaction-cap", defaultConfig.TxPool.Size, "query server transaction cap is the maximum number of transactions allowed to happen at any given point of a time for a single vttablet. E.g. by setting transaction cap to 100, there are at most 100 transactions will be processed by a vttablet and the 101th transaction will be blocked (and fail if it cannot get connection within specified timeout)") - fs.IntVar(¤tConfig.TxPool.PrefillParallelism, "queryserver-config-transaction-prefill-parallelism", defaultConfig.TxPool.PrefillParallelism, "Query server transaction prefill parallelism, a non-zero value will prefill the pool using the specified parallism.") - _ = fs.MarkDeprecated("queryserver-config-transaction-prefill-parallelism", "it will be removed in a future release.") fs.IntVar(¤tConfig.MessagePostponeParallelism, "queryserver-config-message-postpone-cap", defaultConfig.MessagePostponeParallelism, "query server message postpone cap is the maximum number of messages that can be postponed at any given time. Set this number to substantially lower than transaction cap, so that the transaction pool isn't exhausted by the message subsystem.") currentConfig.Oltp.TxTimeoutSeconds = defaultConfig.Oltp.TxTimeoutSeconds.Clone() fs.Var(¤tConfig.Oltp.TxTimeoutSeconds, currentConfig.Oltp.TxTimeoutSeconds.Name(), "query server transaction timeout (in seconds), a transaction will be killed if it takes longer than this value") @@ -118,15 +131,17 @@ func registerTabletEnvFlags(fs *pflag.FlagSet) { fs.BoolVar(¤tConfig.PassthroughDML, "queryserver-config-passthrough-dmls", defaultConfig.PassthroughDML, "query server pass through all dml statements without rewriting") fs.IntVar(¤tConfig.StreamBufferSize, "queryserver-config-stream-buffer-size", defaultConfig.StreamBufferSize, "query server stream buffer size, the maximum number of bytes sent from vttablet for each stream call. It's recommended to keep this value in sync with vtgate's stream_buffer_size.") - fs.IntVar(¤tConfig.QueryCacheSize, "queryserver-config-query-cache-size", defaultConfig.QueryCacheSize, "query server query cache size, maximum number of queries to be cached. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache.") + + fs.Int("queryserver-config-query-cache-size", 0, "query server query cache size, maximum number of queries to be cached. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache.") + _ = fs.MarkDeprecated("queryserver-config-query-cache-size", "`--queryserver-config-query-cache-size` is deprecated and will be removed in `v19.0`. This option only applied to LRU caches, which are now unsupported.") + fs.Int64Var(¤tConfig.QueryCacheMemory, "queryserver-config-query-cache-memory", defaultConfig.QueryCacheMemory, "query server query cache size in bytes, maximum amount of memory to be used for caching. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache.") - fs.BoolVar(¤tConfig.QueryCacheLFU, "queryserver-config-query-cache-lfu", defaultConfig.QueryCacheLFU, "query server cache algorithm. when set to true, a new cache algorithm based on a TinyLFU admission policy will be used to improve cache behavior and prevent pollution from sparse queries") + + fs.Bool("queryserver-config-query-cache-lfu", false, "query server cache algorithm. when set to true, a new cache algorithm based on a TinyLFU admission policy will be used to improve cache behavior and prevent pollution from sparse queries") + _ = fs.MarkDeprecated("queryserver-config-query-cache-lfu", "`--queryserver-config-query-cache-lfu` is deprecated and will be removed in `v19.0`. The query cache always uses a LFU implementation now.") currentConfig.SchemaReloadIntervalSeconds = defaultConfig.SchemaReloadIntervalSeconds.Clone() fs.Var(¤tConfig.SchemaReloadIntervalSeconds, currentConfig.SchemaReloadIntervalSeconds.Name(), "query server schema reload time, how often vttablet reloads schemas from underlying MySQL instance in seconds. vttablet keeps table schemas in its own memory and periodically refreshes it from MySQL. This config controls the reload time.") - currentConfig.SignalSchemaChangeReloadIntervalSeconds = defaultConfig.SignalSchemaChangeReloadIntervalSeconds.Clone() - fs.Var(¤tConfig.SignalSchemaChangeReloadIntervalSeconds, "queryserver-config-schema-change-signal-interval", "query server schema change signal interval defines at which interval the query server shall send schema updates to vtgate.") - _ = fs.MarkDeprecated("queryserver-config-schema-change-signal-interval", "We no longer poll for finding schema changes.") fs.DurationVar(¤tConfig.SchemaChangeReloadTimeout, "schema-change-reload-timeout", defaultConfig.SchemaChangeReloadTimeout, "query server schema change reload timeout, this is how long to wait for the signaled schema reload operation to complete before giving up") fs.BoolVar(¤tConfig.SignalWhenSchemaChange, "queryserver-config-schema-change-signal", defaultConfig.SignalWhenSchemaChange, "query server schema signal, will signal connected vtgates that schema has changed whenever this is detected. VTGates will need to have -schema_change_signal enabled for this to work") currentConfig.Olap.TxTimeoutSeconds = defaultConfig.Olap.TxTimeoutSeconds.Clone() @@ -161,10 +176,12 @@ func registerTabletEnvFlags(fs *pflag.FlagSet) { SecondsVar(fs, ¤tConfig.TwoPCAbandonAge, "twopc_abandon_age", defaultConfig.TwoPCAbandonAge, "time in seconds. Any unresolved transaction older than this time will be sent to the coordinator to be resolved.") // Tx throttler config flagutil.DualFormatBoolVar(fs, ¤tConfig.EnableTxThrottler, "enable_tx_throttler", defaultConfig.EnableTxThrottler, "If true replication-lag-based throttling on transactions will be enabled.") - flagutil.DualFormatStringVar(fs, ¤tConfig.TxThrottlerConfig, "tx_throttler_config", defaultConfig.TxThrottlerConfig, "The configuration of the transaction throttler as a text-formatted throttlerdata.Configuration protocol buffer message.") + flagutil.DualFormatVar(fs, currentConfig.TxThrottlerConfig, "tx_throttler_config", "The configuration of the transaction throttler as a text-formatted throttlerdata.Configuration protocol buffer message.") flagutil.DualFormatStringListVar(fs, ¤tConfig.TxThrottlerHealthCheckCells, "tx_throttler_healthcheck_cells", defaultConfig.TxThrottlerHealthCheckCells, "A comma-separated list of cells. Only tabletservers running in these cells will be monitored for replication lag by the transaction throttler.") fs.IntVar(¤tConfig.TxThrottlerDefaultPriority, "tx-throttler-default-priority", defaultConfig.TxThrottlerDefaultPriority, "Default priority assigned to queries that lack priority information") fs.Var(currentConfig.TxThrottlerTabletTypes, "tx-throttler-tablet-types", "A comma-separated list of tablet types. Only tablets of this type are monitored for replication lag by the transaction throttler. Supported types are replica and/or rdonly.") + fs.BoolVar(¤tConfig.TxThrottlerDryRun, "tx-throttler-dry-run", defaultConfig.TxThrottlerDryRun, "If present, the transaction throttler only records metrics about requests received and throttled, but does not actually throttle any requests.") + fs.DurationVar(¤tConfig.TxThrottlerTopoRefreshInterval, "tx-throttler-topo-refresh-interval", time.Minute*5, "The rate that the transaction throttler will refresh the topology to find cells.") fs.BoolVar(&enableHotRowProtection, "enable_hot_row_protection", false, "If true, incoming transactions for the same row (range) will be queued and cannot consume all txpool slots.") fs.BoolVar(&enableHotRowProtectionDryRun, "enable_hot_row_protection_dry_run", false, "If true, hot row protection is not enforced but logs if transactions would have been queued.") @@ -183,7 +200,6 @@ func registerTabletEnvFlags(fs *pflag.FlagSet) { fs.BoolVar(&enableHeartbeat, "heartbeat_enable", false, "If true, vttablet records (if master) or checks (if replica) the current time of a replication heartbeat in the sidecar database's heartbeat table. The result is used to inform the serving state of the vttablet via healthchecks.") fs.DurationVar(&heartbeatInterval, "heartbeat_interval", 1*time.Second, "How frequently to read and write replication heartbeat.") fs.DurationVar(&heartbeatOnDemandDuration, "heartbeat_on_demand_duration", 0, "If non-zero, heartbeats are only written upon consumer request, and only run for up to given duration following the request. Frequent requests can keep the heartbeat running consistently; when requests are infrequent heartbeat may completely stop between requests") - flagutil.DualFormatBoolVar(fs, ¤tConfig.EnableLagThrottler, "enable_lag_throttler", defaultConfig.EnableLagThrottler, "If true, vttablet will run a throttler service, and will implicitly enable heartbeats") fs.BoolVar(¤tConfig.EnforceStrictTransTables, "enforce_strict_trans_tables", defaultConfig.EnforceStrictTransTables, "If true, vttablet requires MySQL to run with STRICT_TRANS_TABLES or STRICT_ALL_TABLES on. It is recommended to not turn this flag off. Otherwise MySQL may alter your supplied values before saving them to the database.") flagutil.DualFormatBoolVar(fs, &enableConsolidator, "enable_consolidator", true, "This option enables the query consolidator.") @@ -315,9 +331,8 @@ type TabletConfig struct { StreamBufferSize int `json:"streamBufferSize,omitempty"` ConsolidatorStreamTotalSize int64 `json:"consolidatorStreamTotalSize,omitempty"` ConsolidatorStreamQuerySize int64 `json:"consolidatorStreamQuerySize,omitempty"` - QueryCacheSize int `json:"queryCacheSize,omitempty"` QueryCacheMemory int64 `json:"queryCacheMemory,omitempty"` - QueryCacheLFU bool `json:"queryCacheLFU,omitempty"` + QueryCacheDoorkeeper bool `json:"queryCacheDoorkeeper,omitempty"` SchemaReloadIntervalSeconds flagutil.DeprecatedFloat64Seconds `json:"schemaReloadIntervalSeconds,omitempty"` SignalSchemaChangeReloadIntervalSeconds flagutil.DeprecatedFloat64Seconds `json:"signalSchemaChangeReloadIntervalSeconds,omitempty"` SchemaChangeReloadTimeout time.Duration `json:"schemaChangeReloadTimeout,omitempty"` @@ -340,14 +355,15 @@ type TabletConfig struct { TwoPCCoordinatorAddress string `json:"-"` TwoPCAbandonAge Seconds `json:"-"` - EnableTxThrottler bool `json:"-"` - TxThrottlerConfig string `json:"-"` - TxThrottlerHealthCheckCells []string `json:"-"` - TxThrottlerDefaultPriority int `json:"-"` - TxThrottlerTabletTypes *topoproto.TabletTypeListFlag `json:"-"` + EnableTxThrottler bool `json:"-"` + TxThrottlerConfig *TxThrottlerConfigFlag `json:"-"` + TxThrottlerHealthCheckCells []string `json:"-"` + TxThrottlerDefaultPriority int `json:"-"` + TxThrottlerTabletTypes *topoproto.TabletTypeListFlag `json:"-"` + TxThrottlerTopoRefreshInterval time.Duration `json:"-"` + TxThrottlerDryRun bool `json:"-"` - EnableLagThrottler bool `json:"-"` - EnableTableGC bool `json:"-"` // can be turned off programmatically by tests + EnableTableGC bool `json:"-"` // can be turned off programmatically by tests TransactionLimitConfig `json:"-"` @@ -659,9 +675,6 @@ func (c *TabletConfig) Verify() error { if v := c.HotRowProtection.MaxConcurrency; v <= 0 { return fmt.Errorf("--hot_row_protection_concurrent_transactions must be > 0 (specified value: %v)", v) } - if v := c.TxThrottlerDefaultPriority; v > sqlparser.MaxPriorityValue || v < 0 { - return fmt.Errorf("--tx-throttler-default-priority must be > 0 and < 100 (specified value: %d)", v) - } return nil } @@ -697,6 +710,19 @@ func (c *TabletConfig) verifyTransactionLimitConfig() error { // verifyTxThrottlerConfig checks the TxThrottler related config for sanity. func (c *TabletConfig) verifyTxThrottlerConfig() error { + if !c.EnableTxThrottler { + return nil + } + + err := throttler.MaxReplicationLagModuleConfig{Configuration: c.TxThrottlerConfig.Get()}.Verify() + if err != nil { + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "failed to parse throttlerdatapb.Configuration config: %v", err) + } + + if v := c.TxThrottlerDefaultPriority; v > sqlparser.MaxPriorityValue || v < 0 { + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "--tx-throttler-default-priority must be > 0 and < 100 (specified value: %d)", v) + } + if c.TxThrottlerTabletTypes == nil || len(*c.TxThrottlerTabletTypes) == 0 { return vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, "--tx-throttler-tablet-types must be defined when transaction throttler is enabled") } @@ -708,6 +734,7 @@ func (c *TabletConfig) verifyTxThrottlerConfig() error { return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unsupported tablet type %q", tabletType) } } + return nil } @@ -782,10 +809,11 @@ var defaultConfig = TabletConfig{ // memory copies. so with the encoding overhead, this seems to work // great (the overhead makes the final packets on the wire about twice // bigger than this). - StreamBufferSize: 32 * 1024, - QueryCacheSize: int(cache.DefaultConfig.MaxEntries), - QueryCacheMemory: cache.DefaultConfig.MaxMemoryUsage, - QueryCacheLFU: cache.DefaultConfig.LFU, + StreamBufferSize: 32 * 1024, + QueryCacheMemory: 32 * 1024 * 1024, // 32 mb for our query cache + // The doorkeeper for the plan cache is disabled by default in endtoend tests to ensure + // results are consistent between runs. + QueryCacheDoorkeeper: !servenv.TestingEndtoend, SchemaReloadIntervalSeconds: flagutil.NewDeprecatedFloat64Seconds("queryserver-config-schema-reload-time", 30*time.Minute), // SchemaChangeReloadTimeout is used for the signal reload operation where we have to query mysqld. // The queries during the signal reload operation are typically expected to have low load, @@ -795,13 +823,13 @@ var defaultConfig = TabletConfig{ MessagePostponeParallelism: 4, SignalWhenSchemaChange: true, - EnableTxThrottler: false, - TxThrottlerConfig: defaultTxThrottlerConfig(), - TxThrottlerHealthCheckCells: []string{}, - TxThrottlerDefaultPriority: sqlparser.MaxPriorityValue, // This leads to all queries being candidates to throttle - TxThrottlerTabletTypes: &topoproto.TabletTypeListFlag{topodatapb.TabletType_REPLICA}, - - EnableLagThrottler: false, // Feature flag; to switch to 'true' at some stage in the future + EnableTxThrottler: false, + TxThrottlerConfig: defaultTxThrottlerConfig(), + TxThrottlerHealthCheckCells: []string{}, + TxThrottlerDefaultPriority: sqlparser.MaxPriorityValue, // This leads to all queries being candidates to throttle + TxThrottlerTabletTypes: &topoproto.TabletTypeListFlag{topodatapb.TabletType_REPLICA}, + TxThrottlerDryRun: false, + TxThrottlerTopoRefreshInterval: time.Minute * 5, TransactionLimitConfig: defaultTransactionLimitConfig(), @@ -818,17 +846,16 @@ var defaultConfig = TabletConfig{ EnableSettingsPool: true, } -// defaultTxThrottlerConfig formats the default throttlerdata.Configuration -// object in text format. It uses the object returned by -// throttler.DefaultMaxReplicationLagModuleConfig().Configuration and overrides some of its -// fields. It panics on error. -func defaultTxThrottlerConfig() string { +// defaultTxThrottlerConfig returns the default TxThrottlerConfigFlag object based on +// a throttler.DefaultMaxReplicationLagModuleConfig().Configuration and overrides some of +// its fields. It panics on error. +func defaultTxThrottlerConfig() *TxThrottlerConfigFlag { // Take throttler.DefaultMaxReplicationLagModuleConfig and override some fields. config := throttler.DefaultMaxReplicationLagModuleConfig().Configuration // TODO(erez): Make DefaultMaxReplicationLagModuleConfig() return a MaxReplicationLagSec of 10 // and remove this line. config.MaxReplicationLagSec = 10 - return prototext.Format(config) + return &TxThrottlerConfigFlag{config} } func defaultTransactionLimitConfig() TransactionLimitConfig { diff --git a/go/vt/vttablet/tabletserver/tabletenv/config_test.go b/go/vt/vttablet/tabletserver/tabletenv/config_test.go index 84848517adc..e472cbb4789 100644 --- a/go/vt/vttablet/tabletserver/tabletenv/config_test.go +++ b/go/vt/vttablet/tabletserver/tabletenv/config_test.go @@ -26,11 +26,13 @@ import ( "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/dbconfigs" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/throttler" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/yaml2" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) func TestConfigParse(t *testing.T) { @@ -45,9 +47,8 @@ func TestConfigParse(t *testing.T) { }, }, OltpReadPool: ConnPoolConfig{ - Size: 16, - PrefillParallelism: 30, - MaxWaiters: 40, + Size: 16, + MaxWaiters: 40, }, RowStreamer: RowStreamerConfig{ MaxInnoDBTrxHistLen: 1000, @@ -87,7 +88,6 @@ oltpReadPool: idleTimeoutSeconds: 20s maxLifetimeSeconds: 50s maxWaiters: 40 - prefillParallelism: 30 size: 16 timeoutSeconds: 10s replicationTracker: {} @@ -110,7 +110,6 @@ txPool: {} oltpReadPool: size: 16 idleTimeoutSeconds: 20 - prefillParallelism: 30 maxWaiters: 40 maxLifetimeSeconds: 50 `) @@ -152,9 +151,8 @@ oltpReadPool: idleTimeoutSeconds: 30m0s maxWaiters: 5000 size: 16 -queryCacheLFU: true +queryCacheDoorkeeper: true queryCacheMemory: 33554432 -queryCacheSize: 5000 replicationTracker: heartbeatIntervalSeconds: 250ms mode: disable @@ -180,9 +178,8 @@ func TestClone(t *testing.T) { cfg1 := &TabletConfig{ OltpReadPool: ConnPoolConfig{ - Size: 16, - PrefillParallelism: 30, - MaxWaiters: 40, + Size: 16, + MaxWaiters: 40, }, RowStreamer: RowStreamerConfig{ MaxInnoDBTrxHistLen: 1000000, @@ -331,31 +328,126 @@ func TestFlags(t *testing.T) { assert.Equal(t, want, currentConfig) } -func TestVerifyTxThrottlerConfig(t *testing.T) { +func TestTxThrottlerConfigFlag(t *testing.T) { + f := NewTxThrottlerConfigFlag() + defaultMaxReplicationLagModuleConfig := throttler.DefaultMaxReplicationLagModuleConfig().Configuration + { - // default config (replica) - assert.Nil(t, currentConfig.verifyTxThrottlerConfig()) + assert.Nil(t, f.Set(defaultMaxReplicationLagModuleConfig.String())) + assert.Equal(t, defaultMaxReplicationLagModuleConfig.String(), f.String()) + assert.Equal(t, "string", f.Type()) } { - // replica + rdonly (allowed) - currentConfig.TxThrottlerTabletTypes = &topoproto.TabletTypeListFlag{ - topodatapb.TabletType_REPLICA, - topodatapb.TabletType_RDONLY, - } - assert.Nil(t, currentConfig.verifyTxThrottlerConfig()) + defaultMaxReplicationLagModuleConfig.TargetReplicationLagSec = 5 + assert.Nil(t, f.Set(defaultMaxReplicationLagModuleConfig.String())) + assert.NotNil(t, f.Get()) + assert.Equal(t, int64(5), f.Get().TargetReplicationLagSec) } { - // no tablet types - currentConfig.TxThrottlerTabletTypes = &topoproto.TabletTypeListFlag{} - err := currentConfig.verifyTxThrottlerConfig() - assert.NotNil(t, err) - assert.Equal(t, vtrpcpb.Code_FAILED_PRECONDITION, vterrors.Code(err)) + assert.NotNil(t, f.Set("should not parse")) } - { - // disallowed tablet type - currentConfig.TxThrottlerTabletTypes = &topoproto.TabletTypeListFlag{topodatapb.TabletType_DRAINED} - err := currentConfig.verifyTxThrottlerConfig() - assert.NotNil(t, err) - assert.Equal(t, vtrpcpb.Code_INVALID_ARGUMENT, vterrors.Code(err)) +} + +func TestVerifyTxThrottlerConfig(t *testing.T) { + defaultMaxReplicationLagModuleConfig := throttler.DefaultMaxReplicationLagModuleConfig().Configuration + invalidMaxReplicationLagModuleConfig := throttler.DefaultMaxReplicationLagModuleConfig().Configuration + invalidMaxReplicationLagModuleConfig.TargetReplicationLagSec = -1 + + type testConfig struct { + Name string + ExpectedErrorCode vtrpcpb.Code + // + EnableTxThrottler bool + TxThrottlerConfig *TxThrottlerConfigFlag + TxThrottlerHealthCheckCells []string + TxThrottlerTabletTypes *topoproto.TabletTypeListFlag + TxThrottlerDefaultPriority int + } + + tests := []testConfig{ + { + // default (disabled) + Name: "default", + EnableTxThrottler: false, + }, + { + // enabled with invalid throttler config + Name: "enabled invalid config", + ExpectedErrorCode: vtrpcpb.Code_INVALID_ARGUMENT, + EnableTxThrottler: true, + TxThrottlerConfig: &TxThrottlerConfigFlag{invalidMaxReplicationLagModuleConfig}, + }, + { + // enabled with good config (default/replica tablet type) + Name: "enabled", + EnableTxThrottler: true, + TxThrottlerConfig: &TxThrottlerConfigFlag{defaultMaxReplicationLagModuleConfig}, + TxThrottlerHealthCheckCells: []string{"cell1"}, + }, + { + // enabled + replica and rdonly tablet types + Name: "enabled plus rdonly", + EnableTxThrottler: true, + TxThrottlerConfig: &TxThrottlerConfigFlag{defaultMaxReplicationLagModuleConfig}, + TxThrottlerHealthCheckCells: []string{"cell1"}, + TxThrottlerTabletTypes: &topoproto.TabletTypeListFlag{ + topodatapb.TabletType_REPLICA, + topodatapb.TabletType_RDONLY, + }, + }, + { + // enabled without tablet types + Name: "enabled without tablet types", + ExpectedErrorCode: vtrpcpb.Code_FAILED_PRECONDITION, + EnableTxThrottler: true, + TxThrottlerConfig: &TxThrottlerConfigFlag{defaultMaxReplicationLagModuleConfig}, + TxThrottlerHealthCheckCells: []string{"cell1"}, + TxThrottlerTabletTypes: &topoproto.TabletTypeListFlag{}, + }, + { + // enabled + disallowed tablet type + Name: "enabled disallowed tablet type", + ExpectedErrorCode: vtrpcpb.Code_INVALID_ARGUMENT, + EnableTxThrottler: true, + TxThrottlerConfig: &TxThrottlerConfigFlag{defaultMaxReplicationLagModuleConfig}, + TxThrottlerHealthCheckCells: []string{"cell1"}, + TxThrottlerTabletTypes: &topoproto.TabletTypeListFlag{topodatapb.TabletType_DRAINED}, + }, + { + // enabled + disallowed priority + Name: "enabled disallowed priority", + ExpectedErrorCode: vtrpcpb.Code_INVALID_ARGUMENT, + EnableTxThrottler: true, + TxThrottlerConfig: &TxThrottlerConfigFlag{defaultMaxReplicationLagModuleConfig}, + TxThrottlerDefaultPriority: 12345, + TxThrottlerHealthCheckCells: []string{"cell1"}, + }, + } + + for _, test := range tests { + test := test + t.Run(test.Name, func(t *testing.T) { + t.Parallel() + + config := defaultConfig + config.EnableTxThrottler = test.EnableTxThrottler + if test.TxThrottlerConfig == nil { + test.TxThrottlerConfig = NewTxThrottlerConfigFlag() + } + config.TxThrottlerConfig = test.TxThrottlerConfig + config.TxThrottlerHealthCheckCells = test.TxThrottlerHealthCheckCells + config.TxThrottlerDefaultPriority = test.TxThrottlerDefaultPriority + if test.TxThrottlerTabletTypes != nil { + config.TxThrottlerTabletTypes = test.TxThrottlerTabletTypes + } + + err := config.verifyTxThrottlerConfig() + if test.ExpectedErrorCode == vtrpcpb.Code_OK { + assert.Nil(t, err) + } else { + assert.NotNil(t, err) + assert.Equal(t, test.ExpectedErrorCode, vterrors.Code(err)) + } + }) } } diff --git a/go/vt/vttablet/tabletserver/tabletenv/stats.go b/go/vt/vttablet/tabletserver/tabletenv/stats.go index 30510f9c32e..1ad93532719 100644 --- a/go/vt/vttablet/tabletserver/tabletenv/stats.go +++ b/go/vt/vttablet/tabletserver/tabletenv/stats.go @@ -47,6 +47,8 @@ type Stats struct { UserActiveReservedCount *stats.CountersWithSingleLabel // Per CallerID active reserved connection counts UserReservedCount *stats.CountersWithSingleLabel // Per CallerID reserved connection counts UserReservedTimesNs *stats.CountersWithSingleLabel // Per CallerID reserved connection duration + + QueryTimingsByTabletType *servenv.TimingsWrapper // Query timings split by current tablet type } // NewStats instantiates a new set of stats scoped by exporter. @@ -94,7 +96,13 @@ func NewStats(exporter *servenv.Exporter) *Stats { UserActiveReservedCount: exporter.NewCountersWithSingleLabel("UserActiveReservedCount", "active reserved connection for each CallerID", "CallerID"), UserReservedCount: exporter.NewCountersWithSingleLabel("UserReservedCount", "reserved connection received for each CallerID", "CallerID"), UserReservedTimesNs: exporter.NewCountersWithSingleLabel("UserReservedTimesNs", "Total reserved connection latency for each CallerID", "CallerID"), + + QueryTimingsByTabletType: exporter.NewTimings("QueryTimingsByTabletType", "Query timings broken down by active tablet type", "TabletType"), } stats.QPSRates = exporter.NewRates("QPS", stats.QueryTimings, 15*60/5, 5*time.Second) return stats } + +func (st *Stats) Stop() { + st.QPSRates.Stop() +} diff --git a/go/vt/vttablet/tabletserver/tabletserver.go b/go/vt/vttablet/tabletserver/tabletserver.go index 54950adf893..83b431c1e03 100644 --- a/go/vt/vttablet/tabletserver/tabletserver.go +++ b/go/vt/vttablet/tabletserver/tabletserver.go @@ -33,10 +33,9 @@ import ( "syscall" "time" - "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/acl" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/pools" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/stats" @@ -139,8 +138,8 @@ var _ queryservice.QueryService = (*TabletServer)(nil) var RegisterFunctions []func(Controller) // NewServer creates a new TabletServer based on the command line flags. -func NewServer(name string, topoServer *topo.Server, alias *topodatapb.TabletAlias) *TabletServer { - return NewTabletServer(name, tabletenv.NewCurrentConfig(), topoServer, alias) +func NewServer(ctx context.Context, name string, topoServer *topo.Server, alias *topodatapb.TabletAlias) *TabletServer { + return NewTabletServer(ctx, name, tabletenv.NewCurrentConfig(), topoServer, alias) } var ( @@ -150,7 +149,7 @@ var ( // NewTabletServer creates an instance of TabletServer. Only the first // instance of TabletServer will expose its state variables. -func NewTabletServer(name string, config *tabletenv.TabletConfig, topoServer *topo.Server, alias *topodatapb.TabletAlias) *TabletServer { +func NewTabletServer(ctx context.Context, name string, config *tabletenv.TabletConfig, topoServer *topo.Server, alias *topodatapb.TabletAlias) *TabletServer { exporter := servenv.NewExporter(name, "Tablet") tsv := &TabletServer{ exporter: exporter, @@ -160,11 +159,11 @@ func NewTabletServer(name string, config *tabletenv.TabletConfig, topoServer *to TruncateErrorLen: config.TruncateErrorLen, enableHotRowProtection: config.HotRowProtection.Mode != tabletenv.Disable, topoServer: topoServer, - alias: proto.Clone(alias).(*topodatapb.TabletAlias), + alias: alias.CloneVT(), } tsv.QueryTimeout.Store(config.Oltp.QueryTimeoutSeconds.Get().Nanoseconds()) - tsOnce.Do(func() { srvTopoServer = srvtopo.NewResilientServer(topoServer, "TabletSrvTopo") }) + tsOnce.Do(func() { srvTopoServer = srvtopo.NewResilientServer(ctx, topoServer, "TabletSrvTopo") }) tabletTypeFunc := func() topodatapb.TabletType { if tsv.sm == nil { @@ -265,7 +264,7 @@ func (tsv *TabletServer) InitDBConfig(target *querypb.Target, dbcfgs *dbconfigs. return vterrors.NewErrorf(vtrpcpb.Code_UNAVAILABLE, vterrors.ServerNotAvailable, "Server isn't available") } tsv.sm.Init(tsv, target) - tsv.sm.target = proto.Clone(target).(*querypb.Target) + tsv.sm.target = target.CloneVT() tsv.config.DB = dbcfgs tsv.se.InitDBConfig(tsv.config.DB.DbaWithDB()) @@ -371,12 +370,12 @@ func (tsv *TabletServer) InitACL(tableACLConfigFile string, enforceTableACLConfi // SetServingType changes the serving type of the tabletserver. It starts or // stops internal services as deemed necessary. // Returns true if the state of QueryService or the tablet type changed. -func (tsv *TabletServer) SetServingType(tabletType topodatapb.TabletType, terTimestamp time.Time, serving bool, reason string) error { +func (tsv *TabletServer) SetServingType(tabletType topodatapb.TabletType, ptsTimestamp time.Time, serving bool, reason string) error { state := StateNotServing if serving { state = StateServing } - return tsv.sm.SetServingType(tabletType, terTimestamp, state, reason) + return tsv.sm.SetServingType(tabletType, ptsTimestamp, state, reason) } // StartService is a convenience function for InitDBConfig->SetServingType @@ -494,7 +493,7 @@ func (tsv *TabletServer) begin(ctx context.Context, target *querypb.Target, save target, options, false, /* allowOnShutdown */ func(ctx context.Context, logStats *tabletenv.LogStats) error { startTime := time.Now() - if tsv.txThrottler.Throttle(tsv.getPriorityFromOptions(options)) { + if tsv.txThrottler.Throttle(tsv.getPriorityFromOptions(options), options.GetWorkloadName()) { return errTxThrottled } var connSetting *pools.Setting @@ -517,6 +516,7 @@ func (tsv *TabletServer) begin(ctx context.Context, target *querypb.Target, save logStats.OriginalSQL = beginSQL if beginSQL != "" { tsv.stats.QueryTimings.Record("BEGIN", startTime) + tsv.stats.QueryTimingsByTabletType.Record(target.TabletType.String(), startTime) } else { logStats.Method = "" } @@ -572,6 +572,7 @@ func (tsv *TabletServer) Commit(ctx context.Context, target *querypb.Target, tra // handlePanicAndSendLogStats doesn't log the no-op. if commitSQL != "" { tsv.stats.QueryTimings.Record("COMMIT", startTime) + tsv.stats.QueryTimingsByTabletType.Record(target.TabletType.String(), startTime) } else { logStats.Method = "" } @@ -589,6 +590,7 @@ func (tsv *TabletServer) Rollback(ctx context.Context, target *querypb.Target, t target, nil, true, /* allowOnShutdown */ func(ctx context.Context, logStats *tabletenv.LogStats) error { defer tsv.stats.QueryTimings.Record("ROLLBACK", time.Now()) + defer tsv.stats.QueryTimingsByTabletType.Record(target.TabletType.String(), time.Now()) logStats.TransactionID = transactionID newReservedID, err = tsv.te.Rollback(ctx, transactionID) if newReservedID > 0 { @@ -846,10 +848,7 @@ func smallerTimeout(t1, t2 time.Duration) time.Duration { if t2 == 0 { return t1 } - if t1 < t2 { - return t1 - } - return t2 + return min(t1, t2) } // StreamExecute executes the query and streams the result. @@ -1164,6 +1163,14 @@ func (tsv *TabletServer) VStreamRows(ctx context.Context, request *binlogdatapb. return tsv.vstreamer.StreamRows(ctx, request.Query, row, send) } +// VStreamTables streams all tables. +func (tsv *TabletServer) VStreamTables(ctx context.Context, request *binlogdatapb.VStreamTablesRequest, send func(*binlogdatapb.VStreamTablesResponse) error) error { + if err := tsv.sm.VerifyTarget(ctx, request.Target); err != nil { + return err + } + return tsv.vstreamer.StreamTables(ctx, send) +} + // VStreamResults streams rows from the specified starting point. func (tsv *TabletServer) VStreamResults(ctx context.Context, target *querypb.Target, query string, send func(*binlogdatapb.VStreamResultsResponse) error) error { if err := tsv.sm.VerifyTarget(ctx, target); err != nil { @@ -1197,6 +1204,7 @@ func (tsv *TabletServer) ReserveBeginExecute(ctx context.Context, target *queryp target, options, false, /* allowOnShutdown */ func(ctx context.Context, logStats *tabletenv.LogStats) error { defer tsv.stats.QueryTimings.Record("RESERVE", time.Now()) + defer tsv.stats.QueryTimingsByTabletType.Record(target.TabletType.String(), time.Now()) connID, sessionStateChanges, err = tsv.te.ReserveBegin(ctx, options, preQueries, postBeginQueries) if err != nil { return err @@ -1242,6 +1250,7 @@ func (tsv *TabletServer) ReserveBeginStreamExecute( target, options, false, /* allowOnShutdown */ func(ctx context.Context, logStats *tabletenv.LogStats) error { defer tsv.stats.QueryTimings.Record("RESERVE", time.Now()) + defer tsv.stats.QueryTimingsByTabletType.Record(target.TabletType.String(), time.Now()) connID, sessionStateChanges, err = tsv.te.ReserveBegin(ctx, options, preQueries, postBeginQueries) if err != nil { return err @@ -1295,6 +1304,7 @@ func (tsv *TabletServer) ReserveExecute(ctx context.Context, target *querypb.Tar target, options, allowOnShutdown, func(ctx context.Context, logStats *tabletenv.LogStats) error { defer tsv.stats.QueryTimings.Record("RESERVE", time.Now()) + defer tsv.stats.QueryTimingsByTabletType.Record(target.TabletType.String(), time.Now()) state.ReservedID, err = tsv.te.Reserve(ctx, options, transactionID, preQueries) if err != nil { return err @@ -1345,6 +1355,7 @@ func (tsv *TabletServer) ReserveStreamExecute( target, options, allowOnShutdown, func(ctx context.Context, logStats *tabletenv.LogStats) error { defer tsv.stats.QueryTimings.Record("RESERVE", time.Now()) + defer tsv.stats.QueryTimingsByTabletType.Record(target.TabletType.String(), time.Now()) state.ReservedID, err = tsv.te.Reserve(ctx, options, transactionID, preQueries) if err != nil { return err @@ -1374,6 +1385,7 @@ func (tsv *TabletServer) Release(ctx context.Context, target *querypb.Target, tr target, nil, true, /* allowOnShutdown */ func(ctx context.Context, logStats *tabletenv.LogStats) error { defer tsv.stats.QueryTimings.Record("RELEASE", time.Now()) + defer tsv.stats.QueryTimingsByTabletType.Record(target.TabletType.String(), time.Now()) logStats.TransactionID = transactionID logStats.ReservedID = reservedID if reservedID != 0 { @@ -1561,7 +1573,7 @@ func (tsv *TabletServer) convertAndLogError(ctx context.Context, sql string, bin // If so, we don't want to suppress the error. This will allow VTGate to // detect and perform buffering during failovers. var message string - sqlErr, ok := err.(*mysql.SQLError) + sqlErr, ok := err.(*sqlerror.SQLError) if ok { sqlState := sqlErr.SQLState() errnum := sqlErr.Number() @@ -1596,64 +1608,64 @@ func (tsv *TabletServer) convertAndLogError(ctx context.Context, sql string, bin func convertErrorCode(err error) vtrpcpb.Code { errCode := vterrors.Code(err) - sqlErr, ok := err.(*mysql.SQLError) + sqlErr, ok := err.(*sqlerror.SQLError) if !ok { return errCode } switch sqlErr.Number() { - case mysql.ERNotSupportedYet: + case sqlerror.ERNotSupportedYet: errCode = vtrpcpb.Code_UNIMPLEMENTED - case mysql.ERDiskFull, mysql.EROutOfMemory, mysql.EROutOfSortMemory, mysql.ERConCount, mysql.EROutOfResources, mysql.ERRecordFileFull, mysql.ERHostIsBlocked, - mysql.ERCantCreateThread, mysql.ERTooManyDelayedThreads, mysql.ERNetPacketTooLarge, mysql.ERTooManyUserConnections, mysql.ERLockTableFull, mysql.ERUserLimitReached: + case sqlerror.ERDiskFull, sqlerror.EROutOfMemory, sqlerror.EROutOfSortMemory, sqlerror.ERConCount, sqlerror.EROutOfResources, sqlerror.ERRecordFileFull, sqlerror.ERHostIsBlocked, + sqlerror.ERCantCreateThread, sqlerror.ERTooManyDelayedThreads, sqlerror.ERNetPacketTooLarge, sqlerror.ERTooManyUserConnections, sqlerror.ERLockTableFull, sqlerror.ERUserLimitReached: errCode = vtrpcpb.Code_RESOURCE_EXHAUSTED - case mysql.ERLockWaitTimeout: + case sqlerror.ERLockWaitTimeout: errCode = vtrpcpb.Code_DEADLINE_EXCEEDED - case mysql.CRServerGone, mysql.ERServerShutdown, mysql.ERServerIsntAvailable, mysql.CRConnectionError, mysql.CRConnHostError: + case sqlerror.CRServerGone, sqlerror.ERServerShutdown, sqlerror.ERServerIsntAvailable, sqlerror.CRConnectionError, sqlerror.CRConnHostError: errCode = vtrpcpb.Code_UNAVAILABLE - case mysql.ERFormNotFound, mysql.ERKeyNotFound, mysql.ERBadFieldError, mysql.ERNoSuchThread, mysql.ERUnknownTable, mysql.ERCantFindUDF, mysql.ERNonExistingGrant, - mysql.ERNoSuchTable, mysql.ERNonExistingTableGrant, mysql.ERKeyDoesNotExist: + case sqlerror.ERFormNotFound, sqlerror.ERKeyNotFound, sqlerror.ERBadFieldError, sqlerror.ERNoSuchThread, sqlerror.ERUnknownTable, sqlerror.ERCantFindUDF, sqlerror.ERNonExistingGrant, + sqlerror.ERNoSuchTable, sqlerror.ERNonExistingTableGrant, sqlerror.ERKeyDoesNotExist: errCode = vtrpcpb.Code_NOT_FOUND - case mysql.ERDBAccessDenied, mysql.ERAccessDeniedError, mysql.ERKillDenied, mysql.ERNoPermissionToCreateUsers: + case sqlerror.ERDBAccessDenied, sqlerror.ERAccessDeniedError, sqlerror.ERKillDenied, sqlerror.ERNoPermissionToCreateUsers: errCode = vtrpcpb.Code_PERMISSION_DENIED - case mysql.ERNoDb, mysql.ERNoSuchIndex, mysql.ERCantDropFieldOrKey, mysql.ERTableNotLockedForWrite, mysql.ERTableNotLocked, mysql.ERTooBigSelect, mysql.ERNotAllowedCommand, - mysql.ERTooLongString, mysql.ERDelayedInsertTableLocked, mysql.ERDupUnique, mysql.ERRequiresPrimaryKey, mysql.ERCantDoThisDuringAnTransaction, mysql.ERReadOnlyTransaction, - mysql.ERCannotAddForeign, mysql.ERNoReferencedRow, mysql.ERRowIsReferenced, mysql.ERCantUpdateWithReadLock, mysql.ERNoDefault, mysql.EROperandColumns, - mysql.ERSubqueryNo1Row, mysql.ERNonUpdateableTable, mysql.ERFeatureDisabled, mysql.ERDuplicatedValueInType, mysql.ERRowIsReferenced2, - mysql.ErNoReferencedRow2, mysql.ERWarnDataOutOfRange: + case sqlerror.ERNoDb, sqlerror.ERNoSuchIndex, sqlerror.ERCantDropFieldOrKey, sqlerror.ERTableNotLockedForWrite, sqlerror.ERTableNotLocked, sqlerror.ERTooBigSelect, sqlerror.ERNotAllowedCommand, + sqlerror.ERTooLongString, sqlerror.ERDelayedInsertTableLocked, sqlerror.ERDupUnique, sqlerror.ERRequiresPrimaryKey, sqlerror.ERCantDoThisDuringAnTransaction, sqlerror.ERReadOnlyTransaction, + sqlerror.ERCannotAddForeign, sqlerror.ERNoReferencedRow, sqlerror.ERRowIsReferenced, sqlerror.ERCantUpdateWithReadLock, sqlerror.ERNoDefault, sqlerror.EROperandColumns, + sqlerror.ERSubqueryNo1Row, sqlerror.ERNonUpdateableTable, sqlerror.ERFeatureDisabled, sqlerror.ERDuplicatedValueInType, sqlerror.ERRowIsReferenced2, + sqlerror.ErNoReferencedRow2, sqlerror.ERWarnDataOutOfRange: errCode = vtrpcpb.Code_FAILED_PRECONDITION - case mysql.EROptionPreventsStatement: + case sqlerror.EROptionPreventsStatement: errCode = vtrpcpb.Code_CLUSTER_EVENT - case mysql.ERTableExists, mysql.ERDupEntry, mysql.ERFileExists, mysql.ERUDFExists: + case sqlerror.ERTableExists, sqlerror.ERDupEntry, sqlerror.ERFileExists, sqlerror.ERUDFExists: errCode = vtrpcpb.Code_ALREADY_EXISTS - case mysql.ERGotSignal, mysql.ERForcingClose, mysql.ERAbortingConnection, mysql.ERLockDeadlock: + case sqlerror.ERGotSignal, sqlerror.ERForcingClose, sqlerror.ERAbortingConnection, sqlerror.ERLockDeadlock: // For ERLockDeadlock, a deadlock rolls back the transaction. errCode = vtrpcpb.Code_ABORTED - case mysql.ERUnknownComError, mysql.ERBadNullError, mysql.ERBadDb, mysql.ERBadTable, mysql.ERNonUniq, mysql.ERWrongFieldWithGroup, mysql.ERWrongGroupField, - mysql.ERWrongSumSelect, mysql.ERWrongValueCount, mysql.ERTooLongIdent, mysql.ERDupFieldName, mysql.ERDupKeyName, mysql.ERWrongFieldSpec, mysql.ERParseError, - mysql.EREmptyQuery, mysql.ERNonUniqTable, mysql.ERInvalidDefault, mysql.ERMultiplePriKey, mysql.ERTooManyKeys, mysql.ERTooManyKeyParts, mysql.ERTooLongKey, - mysql.ERKeyColumnDoesNotExist, mysql.ERBlobUsedAsKey, mysql.ERTooBigFieldLength, mysql.ERWrongAutoKey, mysql.ERWrongFieldTerminators, mysql.ERBlobsAndNoTerminated, - mysql.ERTextFileNotReadable, mysql.ERWrongSubKey, mysql.ERCantRemoveAllFields, mysql.ERUpdateTableUsed, mysql.ERNoTablesUsed, mysql.ERTooBigSet, - mysql.ERBlobCantHaveDefault, mysql.ERWrongDbName, mysql.ERWrongTableName, mysql.ERUnknownProcedure, mysql.ERWrongParamCountToProcedure, - mysql.ERWrongParametersToProcedure, mysql.ERFieldSpecifiedTwice, mysql.ERInvalidGroupFuncUse, mysql.ERTableMustHaveColumns, mysql.ERUnknownCharacterSet, - mysql.ERTooManyTables, mysql.ERTooManyFields, mysql.ERTooBigRowSize, mysql.ERWrongOuterJoin, mysql.ERNullColumnInIndex, mysql.ERFunctionNotDefined, - mysql.ERWrongValueCountOnRow, mysql.ERInvalidUseOfNull, mysql.ERRegexpError, mysql.ERMixOfGroupFuncAndFields, mysql.ERIllegalGrantForTable, mysql.ERSyntaxError, - mysql.ERWrongColumnName, mysql.ERWrongKeyColumn, mysql.ERBlobKeyWithoutLength, mysql.ERPrimaryCantHaveNull, mysql.ERTooManyRows, mysql.ERUnknownSystemVariable, - mysql.ERSetConstantsOnly, mysql.ERWrongArguments, mysql.ERWrongUsage, mysql.ERWrongNumberOfColumnsInSelect, mysql.ERDupArgument, mysql.ERLocalVariable, - mysql.ERGlobalVariable, mysql.ERWrongValueForVar, mysql.ERWrongTypeForVar, mysql.ERVarCantBeRead, mysql.ERCantUseOptionHere, mysql.ERIncorrectGlobalLocalVar, - mysql.ERWrongFKDef, mysql.ERKeyRefDoNotMatchTableRef, mysql.ERCyclicReference, mysql.ERCollationCharsetMismatch, mysql.ERCantAggregate2Collations, - mysql.ERCantAggregate3Collations, mysql.ERCantAggregateNCollations, mysql.ERVariableIsNotStruct, mysql.ERUnknownCollation, mysql.ERWrongNameForIndex, - mysql.ERWrongNameForCatalog, mysql.ERBadFTColumn, mysql.ERTruncatedWrongValue, mysql.ERTooMuchAutoTimestampCols, mysql.ERInvalidOnUpdate, mysql.ERUnknownTimeZone, - mysql.ERInvalidCharacterString, mysql.ERIllegalReference, mysql.ERDerivedMustHaveAlias, mysql.ERTableNameNotAllowedHere, mysql.ERDataTooLong, mysql.ERDataOutOfRange, - mysql.ERTruncatedWrongValueForField, mysql.ERIllegalValueForType: + case sqlerror.ERUnknownComError, sqlerror.ERBadNullError, sqlerror.ERBadDb, sqlerror.ERBadTable, sqlerror.ERNonUniq, sqlerror.ERWrongFieldWithGroup, sqlerror.ERWrongGroupField, + sqlerror.ERWrongSumSelect, sqlerror.ERWrongValueCount, sqlerror.ERTooLongIdent, sqlerror.ERDupFieldName, sqlerror.ERDupKeyName, sqlerror.ERWrongFieldSpec, sqlerror.ERParseError, + sqlerror.EREmptyQuery, sqlerror.ERNonUniqTable, sqlerror.ERInvalidDefault, sqlerror.ERMultiplePriKey, sqlerror.ERTooManyKeys, sqlerror.ERTooManyKeyParts, sqlerror.ERTooLongKey, + sqlerror.ERKeyColumnDoesNotExist, sqlerror.ERBlobUsedAsKey, sqlerror.ERTooBigFieldLength, sqlerror.ERWrongAutoKey, sqlerror.ERWrongFieldTerminators, sqlerror.ERBlobsAndNoTerminated, + sqlerror.ERTextFileNotReadable, sqlerror.ERWrongSubKey, sqlerror.ERCantRemoveAllFields, sqlerror.ERUpdateTableUsed, sqlerror.ERNoTablesUsed, sqlerror.ERTooBigSet, + sqlerror.ERBlobCantHaveDefault, sqlerror.ERWrongDbName, sqlerror.ERWrongTableName, sqlerror.ERUnknownProcedure, sqlerror.ERWrongParamCountToProcedure, + sqlerror.ERWrongParametersToProcedure, sqlerror.ERFieldSpecifiedTwice, sqlerror.ERInvalidGroupFuncUse, sqlerror.ERTableMustHaveColumns, sqlerror.ERUnknownCharacterSet, + sqlerror.ERTooManyTables, sqlerror.ERTooManyFields, sqlerror.ERTooBigRowSize, sqlerror.ERWrongOuterJoin, sqlerror.ERNullColumnInIndex, sqlerror.ERFunctionNotDefined, + sqlerror.ERWrongValueCountOnRow, sqlerror.ERInvalidUseOfNull, sqlerror.ERRegexpError, sqlerror.ERMixOfGroupFuncAndFields, sqlerror.ERIllegalGrantForTable, sqlerror.ERSyntaxError, + sqlerror.ERWrongColumnName, sqlerror.ERWrongKeyColumn, sqlerror.ERBlobKeyWithoutLength, sqlerror.ERPrimaryCantHaveNull, sqlerror.ERTooManyRows, sqlerror.ERUnknownSystemVariable, + sqlerror.ERSetConstantsOnly, sqlerror.ERWrongArguments, sqlerror.ERWrongUsage, sqlerror.ERWrongNumberOfColumnsInSelect, sqlerror.ERDupArgument, sqlerror.ERLocalVariable, + sqlerror.ERGlobalVariable, sqlerror.ERWrongValueForVar, sqlerror.ERWrongTypeForVar, sqlerror.ERVarCantBeRead, sqlerror.ERCantUseOptionHere, sqlerror.ERIncorrectGlobalLocalVar, + sqlerror.ERWrongFKDef, sqlerror.ERKeyRefDoNotMatchTableRef, sqlerror.ERCyclicReference, sqlerror.ERCollationCharsetMismatch, sqlerror.ERCantAggregate2Collations, + sqlerror.ERCantAggregate3Collations, sqlerror.ERCantAggregateNCollations, sqlerror.ERVariableIsNotStruct, sqlerror.ERUnknownCollation, sqlerror.ERWrongNameForIndex, + sqlerror.ERWrongNameForCatalog, sqlerror.ERBadFTColumn, sqlerror.ERTruncatedWrongValue, sqlerror.ERTooMuchAutoTimestampCols, sqlerror.ERInvalidOnUpdate, sqlerror.ERUnknownTimeZone, + sqlerror.ERInvalidCharacterString, sqlerror.ERIllegalReference, sqlerror.ERDerivedMustHaveAlias, sqlerror.ERTableNameNotAllowedHere, sqlerror.ERDataTooLong, sqlerror.ERDataOutOfRange, + sqlerror.ERTruncatedWrongValueForField, sqlerror.ERIllegalValueForType: errCode = vtrpcpb.Code_INVALID_ARGUMENT - case mysql.ERSpecifiedAccessDenied: + case sqlerror.ERSpecifiedAccessDenied: errCode = vtrpcpb.Code_PERMISSION_DENIED // This code is also utilized for Google internal failover error code. if strings.Contains(err.Error(), "failover in progress") { errCode = vtrpcpb.Code_FAILED_PRECONDITION } - case mysql.CRServerLost: + case sqlerror.CRServerLost: // Query was killed. errCode = vtrpcpb.Code_CANCELED } @@ -1702,6 +1714,12 @@ func (tsv *TabletServer) TopoServer() *topo.Server { return tsv.topoServer } +// CheckThrottler issues a self check +func (tsv *TabletServer) CheckThrottler(ctx context.Context, appName string, flags *throttle.CheckFlags) *throttle.CheckResult { + r := tsv.lagThrottler.CheckByType(ctx, appName, "", flags, throttle.ThrottleCheckSelf) + return r +} + // HandlePanic is part of the queryservice.QueryService interface func (tsv *TabletServer) HandlePanic(err *error) { if x := recover(); x != nil { @@ -1709,8 +1727,10 @@ func (tsv *TabletServer) HandlePanic(err *error) { } } -// Close is a no-op. +// Close shuts down any remaining go routines func (tsv *TabletServer) Close(ctx context.Context) error { + tsv.sm.closeAll() + tsv.stats.Stop() return nil } @@ -1843,7 +1863,7 @@ func (tsv *TabletServer) registerThrottlerThrottleAppHandler() { http.Error(w, fmt.Sprintf("not ok: %v", err), http.StatusInternalServerError) return } - var ratio = 1.0 + var ratio = throttle.DefaultThrottleRatio if ratioParam := r.URL.Query().Get("ratio"); ratioParam != "" { ratio, err = strconv.ParseFloat(ratioParam, 64) if err != nil { @@ -1851,7 +1871,7 @@ func (tsv *TabletServer) registerThrottlerThrottleAppHandler() { return } } - appThrottle := tsv.lagThrottler.ThrottleApp(appName, time.Now().Add(d), ratio) + appThrottle := tsv.lagThrottler.ThrottleApp(appName, time.Now().Add(d), ratio, false) w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(appThrottle) @@ -1890,13 +1910,6 @@ func (tsv *TabletServer) EnableHeartbeat(enabled bool) { tsv.rt.EnableHeartbeat(enabled) } -// EnableThrottler forces throttler to be on or off. -// When throttler is off, it responds to all check requests with HTTP 200 OK -// Only to be used for testing. -func (tsv *TabletServer) EnableThrottler(enabled bool) { - tsv.Config().EnableLagThrottler = enabled -} - // SetTracking forces tracking to be on or off. // Only to be used for testing. func (tsv *TabletServer) SetTracking(enabled bool) { @@ -1947,11 +1960,6 @@ func (tsv *TabletServer) TxPoolSize() int { return tsv.te.txPool.scp.Capacity() } -// SetQueryPlanCacheCap changes the plan cache capacity to the specified value. -func (tsv *TabletServer) SetQueryPlanCacheCap(val int) { - tsv.qe.SetQueryPlanCacheCap(val) -} - // QueryPlanCacheCap returns the plan cache capacity func (tsv *TabletServer) QueryPlanCacheCap() int { return tsv.qe.QueryPlanCacheCap() @@ -1962,11 +1970,6 @@ func (tsv *TabletServer) QueryPlanCacheLen() int { return tsv.qe.QueryPlanCacheLen() } -// QueryPlanCacheWait waits until the query plan cache has processed all recent queries -func (tsv *TabletServer) QueryPlanCacheWait() { - tsv.qe.plans.Wait() -} - // SetMaxResultSize changes the max result size to the specified value. func (tsv *TabletServer) SetMaxResultSize(val int) { tsv.qe.maxResultSize.Store(int64(val)) diff --git a/go/vt/vttablet/tabletserver/tabletserver_test.go b/go/vt/vttablet/tabletserver/tabletserver_test.go index b26baa35cac..d2fb10e5a77 100644 --- a/go/vt/vttablet/tabletserver/tabletserver_test.go +++ b/go/vt/vttablet/tabletserver/tabletserver_test.go @@ -21,17 +21,16 @@ import ( "errors" "fmt" "io" - "math/rand" "net/http" "net/http/httptest" "os" - "reflect" "strings" "sync" "syscall" "testing" "time" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/callerid" @@ -59,7 +58,9 @@ import ( ) func TestTabletServerHealthz(t *testing.T) { - db, tsv := setupTabletServerTest(t, "") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, tsv := setupTabletServerTest(t, ctx, "") defer tsv.StopService() defer db.Close() @@ -86,7 +87,9 @@ func TestTabletServerHealthz(t *testing.T) { } func TestTabletServerHealthzNotConnected(t *testing.T) { - db, tsv := setupTabletServerTest(t, "") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, tsv := setupTabletServerTest(t, ctx, "") defer tsv.StopService() defer db.Close() @@ -115,7 +118,9 @@ func TestTabletServerHealthzNotConnected(t *testing.T) { } func TestBeginOnReplica(t *testing.T) { - db, tsv := setupTabletServerTest(t, "") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, tsv := setupTabletServerTest(t, ctx, "") defer tsv.StopService() defer db.Close() @@ -142,8 +147,10 @@ func TestBeginOnReplica(t *testing.T) { } func TestTabletServerPrimaryToReplica(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // Reuse code from tx_executor_test. - _, tsv, db := newTestTxExecutor(t) + _, tsv, db := newTestTxExecutor(t, ctx) defer tsv.StopService() defer db.Close() target := querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} @@ -181,8 +188,10 @@ func TestTabletServerPrimaryToReplica(t *testing.T) { } func TestTabletServerRedoLogIsKeptBetweenRestarts(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // Reuse code from tx_executor_test. - _, tsv, db := newTestTxExecutor(t) + _, tsv, db := newTestTxExecutor(t, ctx) defer tsv.StopService() defer db.Close() tsv.SetServingType(topodatapb.TabletType_REPLICA, time.Time{}, true, "") @@ -256,9 +265,7 @@ func TestTabletServerRedoLogIsKeptBetweenRestarts(t *testing.T) { want = []string{"update test_table set `name` = 2 where pk = 1 limit 10001"} utils.MustMatch(t, want, got, "Prepared queries") wantFailed := map[string]error{"a:b:20": errPrepFailed} - if !reflect.DeepEqual(tsv.te.preparedPool.reserved, wantFailed) { - t.Errorf("Failed dtids: %v, want %v", tsv.te.preparedPool.reserved, wantFailed) - } + utils.MustMatch(t, tsv.te.preparedPool.reserved, wantFailed, fmt.Sprintf("Failed dtids: %v, want %v", tsv.te.preparedPool.reserved, wantFailed)) // Verify last id got adjusted. assert.EqualValues(t, 20, tsv.te.txPool.scp.lastID.Load(), "tsv.te.txPool.lastID.Get()") turnOffTxEngine() @@ -266,7 +273,9 @@ func TestTabletServerRedoLogIsKeptBetweenRestarts(t *testing.T) { } func TestTabletServerCreateTransaction(t *testing.T) { - _, tsv, db := newTestTxExecutor(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, tsv, db := newTestTxExecutor(t, ctx) defer tsv.StopService() defer db.Close() target := querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} @@ -281,43 +290,49 @@ func TestTabletServerCreateTransaction(t *testing.T) { } func TestTabletServerStartCommit(t *testing.T) { - _, tsv, db := newTestTxExecutor(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, tsv, db := newTestTxExecutor(t, ctx) defer tsv.StopService() defer db.Close() target := querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} commitTransition := fmt.Sprintf("update _vt.dt_state set state = %d where dtid = 'aa' and state = %d", int(querypb.TransactionState_COMMIT), int(querypb.TransactionState_PREPARE)) db.AddQuery(commitTransition, &sqltypes.Result{RowsAffected: 1}) - txid := newTxForPrep(tsv) + txid := newTxForPrep(ctx, tsv) err := tsv.StartCommit(ctx, &target, txid, "aa") require.NoError(t, err) db.AddQuery(commitTransition, &sqltypes.Result{}) - txid = newTxForPrep(tsv) + txid = newTxForPrep(ctx, tsv) err = tsv.StartCommit(ctx, &target, txid, "aa") assert.EqualError(t, err, "could not transition to COMMIT: aa", "Prepare err") } func TestTabletserverSetRollback(t *testing.T) { - _, tsv, db := newTestTxExecutor(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, tsv, db := newTestTxExecutor(t, ctx) defer tsv.StopService() defer db.Close() target := querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} rollbackTransition := fmt.Sprintf("update _vt.dt_state set state = %d where dtid = 'aa' and state = %d", int(querypb.TransactionState_ROLLBACK), int(querypb.TransactionState_PREPARE)) db.AddQuery(rollbackTransition, &sqltypes.Result{RowsAffected: 1}) - txid := newTxForPrep(tsv) + txid := newTxForPrep(ctx, tsv) err := tsv.SetRollback(ctx, &target, "aa", txid) require.NoError(t, err) db.AddQuery(rollbackTransition, &sqltypes.Result{}) - txid = newTxForPrep(tsv) + txid = newTxForPrep(ctx, tsv) err = tsv.SetRollback(ctx, &target, "aa", txid) assert.EqualError(t, err, "could not transition to ROLLBACK: aa", "Prepare err") } func TestTabletServerReadTransaction(t *testing.T) { - _, tsv, db := newTestTxExecutor(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, tsv, db := newTestTxExecutor(t, ctx) defer tsv.StopService() defer db.Close() target := querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} @@ -410,7 +425,9 @@ func TestTabletServerReadTransaction(t *testing.T) { } func TestTabletServerConcludeTransaction(t *testing.T) { - _, tsv, db := newTestTxExecutor(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, tsv, db := newTestTxExecutor(t, ctx) defer tsv.StopService() defer db.Close() target := querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} @@ -422,14 +439,16 @@ func TestTabletServerConcludeTransaction(t *testing.T) { } func TestTabletServerBeginFail(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() config := tabletenv.NewDefaultConfig() config.TxPool.Size = 1 - db, tsv := setupTabletServerTestCustom(t, config, "") + db, tsv := setupTabletServerTestCustom(t, ctx, config, "") defer tsv.StopService() defer db.Close() target := querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Nanosecond) defer cancel() tsv.Begin(ctx, &target, nil) _, err := tsv.Begin(ctx, &target, nil) @@ -437,7 +456,9 @@ func TestTabletServerBeginFail(t *testing.T) { } func TestTabletServerCommitTransaction(t *testing.T) { - db, tsv := setupTabletServerTest(t, "") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, tsv := setupTabletServerTest(t, ctx, "") defer tsv.StopService() defer db.Close() @@ -462,7 +483,9 @@ func TestTabletServerCommitTransaction(t *testing.T) { } func TestTabletServerCommiRollbacktFail(t *testing.T) { - db, tsv := setupTabletServerTest(t, "") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, tsv := setupTabletServerTest(t, ctx, "") defer tsv.StopService() defer db.Close() @@ -475,7 +498,9 @@ func TestTabletServerCommiRollbacktFail(t *testing.T) { } func TestTabletServerRollback(t *testing.T) { - db, tsv := setupTabletServerTest(t, "") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, tsv := setupTabletServerTest(t, ctx, "") defer tsv.StopService() defer db.Close() @@ -503,8 +528,10 @@ func TestTabletServerRollback(t *testing.T) { } func TestTabletServerPrepare(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // Reuse code from tx_executor_test. - _, tsv, db := newTestTxExecutor(t) + _, tsv, db := newTestTxExecutor(t, ctx) defer tsv.StopService() defer db.Close() target := querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} @@ -518,8 +545,10 @@ func TestTabletServerPrepare(t *testing.T) { } func TestTabletServerCommitPrepared(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // Reuse code from tx_executor_test. - _, tsv, db := newTestTxExecutor(t) + _, tsv, db := newTestTxExecutor(t, ctx) defer tsv.StopService() defer db.Close() target := querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} @@ -565,7 +594,9 @@ func TestSmallerTimeout(t *testing.T) { } func TestTabletServerReserveConnection(t *testing.T) { - db, tsv := setupTabletServerTest(t, "") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, tsv := setupTabletServerTest(t, ctx, "") tsv.config.EnableSettingsPool = false defer tsv.StopService() defer db.Close() @@ -588,7 +619,9 @@ func TestTabletServerReserveConnection(t *testing.T) { } func TestTabletServerExecNonExistentConnection(t *testing.T) { - db, tsv := setupTabletServerTest(t, "") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, tsv := setupTabletServerTest(t, ctx, "") defer tsv.StopService() defer db.Close() @@ -602,7 +635,9 @@ func TestTabletServerExecNonExistentConnection(t *testing.T) { } func TestTabletServerReleaseNonExistentConnection(t *testing.T) { - db, tsv := setupTabletServerTest(t, "") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, tsv := setupTabletServerTest(t, ctx, "") defer tsv.StopService() defer db.Close() @@ -615,7 +650,9 @@ func TestTabletServerReleaseNonExistentConnection(t *testing.T) { } func TestMakeSureToCloseDbConnWhenBeginQueryFails(t *testing.T) { - db, tsv := setupTabletServerTest(t, "") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, tsv := setupTabletServerTest(t, ctx, "") defer tsv.StopService() defer db.Close() @@ -629,7 +666,9 @@ func TestMakeSureToCloseDbConnWhenBeginQueryFails(t *testing.T) { } func TestTabletServerReserveAndBeginCommit(t *testing.T) { - db, tsv := setupTabletServerTest(t, "") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, tsv := setupTabletServerTest(t, ctx, "") tsv.config.EnableSettingsPool = false defer tsv.StopService() defer db.Close() @@ -683,8 +722,10 @@ func TestTabletServerReserveAndBeginCommit(t *testing.T) { } func TestTabletServerRollbackPrepared(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // Reuse code from tx_executor_test. - _, tsv, db := newTestTxExecutor(t) + _, tsv, db := newTestTxExecutor(t, ctx) defer tsv.StopService() defer db.Close() target := querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} @@ -699,7 +740,9 @@ func TestTabletServerRollbackPrepared(t *testing.T) { } func TestTabletServerStreamExecute(t *testing.T) { - db, tsv := setupTabletServerTest(t, "") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, tsv := setupTabletServerTest(t, ctx, "") defer tsv.StopService() defer db.Close() @@ -723,7 +766,9 @@ func TestTabletServerStreamExecute(t *testing.T) { } func TestTabletServerStreamExecuteComments(t *testing.T) { - db, tsv := setupTabletServerTest(t, "") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, tsv := setupTabletServerTest(t, ctx, "") defer tsv.StopService() defer db.Close() @@ -761,7 +806,9 @@ func TestTabletServerStreamExecuteComments(t *testing.T) { } func TestTabletServerBeginStreamExecute(t *testing.T) { - db, tsv := setupTabletServerTest(t, "") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, tsv := setupTabletServerTest(t, ctx, "") defer tsv.StopService() defer db.Close() @@ -789,7 +836,9 @@ func TestTabletServerBeginStreamExecute(t *testing.T) { } func TestTabletServerBeginStreamExecuteWithError(t *testing.T) { - db, tsv := setupTabletServerTest(t, "") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, tsv := setupTabletServerTest(t, ctx, "") defer tsv.StopService() defer db.Close() @@ -816,6 +865,8 @@ func TestTabletServerBeginStreamExecuteWithError(t *testing.T) { } func TestSerializeTransactionsSameRow(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // This test runs three transaction in parallel: // tx1 | tx2 | tx3 // However, tx1 and tx2 have the same WHERE clause (i.e. target the same row) @@ -828,7 +879,7 @@ func TestSerializeTransactionsSameRow(t *testing.T) { config.HotRowProtection.MaxConcurrency = 1 // Reduce the txpool to 2 because we should never consume more than two slots. config.TxPool.Size = 2 - db, tsv := setupTabletServerTestCustom(t, config, "") + db, tsv := setupTabletServerTestCustom(t, ctx, config, "") defer tsv.StopService() defer db.Close() @@ -929,11 +980,13 @@ func TestSerializeTransactionsSameRow(t *testing.T) { } func TestDMLQueryWithoutWhereClause(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() config := tabletenv.NewDefaultConfig() config.HotRowProtection.Mode = tabletenv.Enable config.HotRowProtection.MaxConcurrency = 1 config.TxPool.Size = 2 - db, tsv := setupTabletServerTestCustom(t, config, "") + db, tsv := setupTabletServerTestCustom(t, ctx, config, "") defer tsv.StopService() defer db.Close() @@ -949,6 +1002,8 @@ func TestDMLQueryWithoutWhereClause(t *testing.T) { } func TestSerializeTransactionsSameRow_ConcurrentTransactions(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // This test runs three transaction in parallel: // tx1 | tx2 | tx3 // Out of these three, two can run in parallel because we increased the @@ -959,7 +1014,7 @@ func TestSerializeTransactionsSameRow_ConcurrentTransactions(t *testing.T) { config.HotRowProtection.MaxConcurrency = 2 // Reduce the txpool to 2 because we should never consume more than two slots. config.TxPool.Size = 2 - db, tsv := setupTabletServerTestCustom(t, config, "") + db, tsv := setupTabletServerTestCustom(t, ctx, config, "") defer tsv.StopService() defer db.Close() @@ -1084,6 +1139,8 @@ func waitForTxSerializationPendingQueries(tsv *TabletServer, key string, i int) } func TestSerializeTransactionsSameRow_TooManyPendingRequests(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // This test is similar to TestSerializeTransactionsSameRow, but tests only // that there must not be too many pending BeginExecute() requests which are // serialized. @@ -1093,7 +1150,7 @@ func TestSerializeTransactionsSameRow_TooManyPendingRequests(t *testing.T) { config.HotRowProtection.Mode = tabletenv.Enable config.HotRowProtection.MaxQueueSize = 1 config.HotRowProtection.MaxConcurrency = 1 - db, tsv := setupTabletServerTestCustom(t, config, "") + db, tsv := setupTabletServerTestCustom(t, ctx, config, "") defer tsv.StopService() defer db.Close() @@ -1165,6 +1222,8 @@ func TestSerializeTransactionsSameRow_TooManyPendingRequests(t *testing.T) { } func TestSerializeTransactionsSameRow_RequestCanceled(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // This test is similar to TestSerializeTransactionsSameRow, but tests only // that a queued request unblocks itself when its context is done. // @@ -1174,7 +1233,7 @@ func TestSerializeTransactionsSameRow_RequestCanceled(t *testing.T) { config := tabletenv.NewDefaultConfig() config.HotRowProtection.Mode = tabletenv.Enable config.HotRowProtection.MaxConcurrency = 1 - db, tsv := setupTabletServerTestCustom(t, config, "") + db, tsv := setupTabletServerTestCustom(t, ctx, config, "") defer tsv.StopService() defer db.Close() @@ -1282,7 +1341,9 @@ func TestSerializeTransactionsSameRow_RequestCanceled(t *testing.T) { } func TestMessageStream(t *testing.T) { - _, tsv, db := newTestTxExecutor(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, tsv, db := newTestTxExecutor(t, ctx) defer db.Close() defer tsv.StopService() target := querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} @@ -1308,7 +1369,9 @@ func TestMessageStream(t *testing.T) { } func TestCheckMySQLGauge(t *testing.T) { - _, tsv, db := newTestTxExecutor(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, tsv, db := newTestTxExecutor(t, ctx) defer db.Close() defer tsv.StopService() @@ -1336,7 +1399,9 @@ func TestCheckMySQLGauge(t *testing.T) { } func TestMessageAck(t *testing.T) { - _, tsv, db := newTestTxExecutor(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, tsv, db := newTestTxExecutor(t, ctx) defer db.Close() defer tsv.StopService() target := querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} @@ -1365,7 +1430,9 @@ func TestMessageAck(t *testing.T) { } func TestRescheduleMessages(t *testing.T) { - _, tsv, db := newTestTxExecutor(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, tsv, db := newTestTxExecutor(t, ctx) defer db.Close() defer tsv.StopService() target := querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} @@ -1389,7 +1456,9 @@ func TestRescheduleMessages(t *testing.T) { } func TestPurgeMessages(t *testing.T) { - _, tsv, db := newTestTxExecutor(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, tsv, db := newTestTxExecutor(t, ctx) defer db.Close() defer tsv.StopService() target := querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} @@ -1414,56 +1483,15 @@ func TestPurgeMessages(t *testing.T) { } func TestHandleExecUnknownError(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() logStats := tabletenv.NewLogStats(ctx, "TestHandleExecError") config := tabletenv.NewDefaultConfig() - tsv := NewTabletServer("TabletServerTest", config, memorytopo.NewServer(""), &topodatapb.TabletAlias{}) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) defer tsv.handlePanicAndSendLogStats("select * from test_table", nil, logStats) panic("unknown exec error") } -// TestHandlePanicAndSendLogStatsMessageTruncation tests that when an error truncation -// length is set and a panic occurs, the code in handlePanicAndSendLogStats will -// truncate the error text in logs, but will not truncate the error text in the -// error value. -func TestHandlePanicAndSendLogStatsMessageTruncation(t *testing.T) { - tl := newTestLogger() - defer tl.Close() - logStats := tabletenv.NewLogStats(ctx, "TestHandlePanicAndSendLogStatsMessageTruncation") - db, tsv := setupTabletServerTest(t, "") - defer tsv.StopService() - defer db.Close() - - longSql := "select * from test_table_loooooooooooooooooooooooooooooooooooong" - longBv := map[string]*querypb.BindVariable{ - "bv1": sqltypes.Int64BindVariable(1111111111), - "bv2": sqltypes.Int64BindVariable(2222222222), - "bv3": sqltypes.Int64BindVariable(3333333333), - "bv4": sqltypes.Int64BindVariable(4444444444), - } - origTruncateErrLen := sqlparser.GetTruncateErrLen() - sqlparser.SetTruncateErrLen(32) - defer sqlparser.SetTruncateErrLen(origTruncateErrLen) - - defer func() { - err := logStats.Error - want := "Uncaught panic for Sql: \"select * from test_table_loooooooooooooooooooooooooooooooooooong\", BindVars: {bv1: \"type:INT64 value:\\\"1111111111\\\"\"bv2: \"type:INT64 value:\\\"2222222222\\\"\"bv3: \"type:INT64 value:\\\"3333333333\\\"\"bv4: \"type:INT64 value:\\\"4444444444\\\"\"}" - require.Error(t, err) - assert.Contains(t, err.Error(), want) - want = "Uncaught panic for Sql: \"select * from test_t [TRUNCATED]\", BindVars: {bv1: \"typ [TRUNCATED]" - gotWhatWeWant := false - for _, log := range tl.getLogs() { - if strings.HasPrefix(log, want) { - gotWhatWeWant = true - break - } - } - assert.True(t, gotWhatWeWant) - }() - - defer tsv.handlePanicAndSendLogStats(longSql, longBv, logStats) - panic("panic from TestHandlePanicAndSendLogStatsMessageTruncation") -} - func TestQueryAsString(t *testing.T) { longSql := "select * from test_table_loooooooooooooooooooooooooooooooooooong" longBv := map[string]*querypb.BindVariable{ @@ -1576,8 +1604,10 @@ func (tl *testLogger) getLogs() []string { } func TestHandleExecTabletError(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() config := tabletenv.NewDefaultConfig() - tsv := NewTabletServer("TabletServerTest", config, memorytopo.NewServer(""), &topodatapb.TabletAlias{}) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) tl := newTestLogger() defer tl.Close() err := tsv.convertAndLogError( @@ -1597,15 +1627,17 @@ func TestHandleExecTabletError(t *testing.T) { } func TestTerseErrors(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() config := tabletenv.NewDefaultConfig() config.TerseErrors = true config.SanitizeLogMessages = false - tsv := NewTabletServer("TabletServerTest", config, memorytopo.NewServer(""), &topodatapb.TabletAlias{}) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) tl := newTestLogger() defer tl.Close() sql := "select * from test_table where xyz = :vtg1 order by abc desc" - sqlErr := mysql.NewSQLError(10, "HY000", "sensitive message") + sqlErr := sqlerror.NewSQLError(10, "HY000", "sensitive message") sqlErr.Query = "select * from test_table where xyz = 'this is kinda long eh'" err := tsv.convertAndLogError( ctx, @@ -1629,15 +1661,17 @@ func TestTerseErrors(t *testing.T) { } func TestSanitizeLogMessages(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() config := tabletenv.NewDefaultConfig() config.TerseErrors = false config.SanitizeLogMessages = true - tsv := NewTabletServer("TabletServerTest", config, memorytopo.NewServer(""), &topodatapb.TabletAlias{}) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) tl := newTestLogger() defer tl.Close() sql := "select * from test_table where xyz = :vtg1 order by abc desc" - sqlErr := mysql.NewSQLError(10, "HY000", "sensitive message") + sqlErr := sqlerror.NewSQLError(10, "HY000", "sensitive message") sqlErr.Query = "select * from test_table where xyz = 'this is pretty rad my doo, getting swole'" err := tsv.convertAndLogError( ctx, @@ -1661,9 +1695,11 @@ func TestSanitizeLogMessages(t *testing.T) { } func TestTerseErrorsNonSQLError(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() config := tabletenv.NewDefaultConfig() config.TerseErrors = true - tsv := NewTabletServer("TabletServerTest", config, memorytopo.NewServer(""), &topodatapb.TabletAlias{}) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) tl := newTestLogger() defer tl.Close() err := tsv.convertAndLogError( @@ -1683,10 +1719,12 @@ func TestTerseErrorsNonSQLError(t *testing.T) { } func TestSanitizeLogMessagesNonSQLError(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() config := tabletenv.NewDefaultConfig() config.TerseErrors = false config.SanitizeLogMessages = true - tsv := NewTabletServer("TabletServerTest", config, memorytopo.NewServer(""), &topodatapb.TabletAlias{}) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) tl := newTestLogger() defer tl.Close() err := tsv.convertAndLogError( @@ -1706,14 +1744,16 @@ func TestSanitizeLogMessagesNonSQLError(t *testing.T) { } func TestSanitizeMessagesBindVars(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() config := tabletenv.NewDefaultConfig() config.TerseErrors = true config.SanitizeLogMessages = true - tsv := NewTabletServer("TabletServerTest", config, memorytopo.NewServer(""), &topodatapb.TabletAlias{}) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) tl := newTestLogger() defer tl.Close() - sqlErr := mysql.NewSQLError(10, "HY000", "sensitive message") + sqlErr := sqlerror.NewSQLError(10, "HY000", "sensitive message") sqlErr.Query = "select * from test_table where a = 1" err := tsv.convertAndLogError( @@ -1735,10 +1775,12 @@ func TestSanitizeMessagesBindVars(t *testing.T) { } func TestSanitizeMessagesNoBindVars(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() config := tabletenv.NewDefaultConfig() config.TerseErrors = true config.SanitizeLogMessages = true - tsv := NewTabletServer("TabletServerTest", config, memorytopo.NewServer(""), &topodatapb.TabletAlias{}) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) tl := newTestLogger() defer tl.Close() err := tsv.convertAndLogError(ctx, "", nil, vterrors.Errorf(vtrpcpb.Code_DEADLINE_EXCEEDED, "sensitive message"), nil) @@ -1752,9 +1794,11 @@ func TestSanitizeMessagesNoBindVars(t *testing.T) { } func TestTruncateErrorLen(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() config := tabletenv.NewDefaultConfig() config.TruncateErrorLen = 32 - tsv := NewTabletServer("TabletServerTest", config, memorytopo.NewServer(""), &topodatapb.TabletAlias{}) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) tl := newTestLogger() defer tl.Close() err := tsv.convertAndLogError( @@ -1774,17 +1818,19 @@ func TestTruncateErrorLen(t *testing.T) { } func TestTruncateMessages(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() config := tabletenv.NewDefaultConfig() config.TerseErrors = false // Sanitize the log messages, which means that the bind vars are omitted config.SanitizeLogMessages = true - tsv := NewTabletServer("TabletServerTest", config, memorytopo.NewServer(""), &topodatapb.TabletAlias{}) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) tl := newTestLogger() defer tl.Close() sqlparser.SetTruncateErrLen(52) sql := "select * from test_table where xyz = :vtg1 order by abc desc" - sqlErr := mysql.NewSQLError(10, "HY000", "sensitive message") + sqlErr := sqlerror.NewSQLError(10, "HY000", "sensitive message") sqlErr.Query = "select * from test_table where xyz = 'this is kinda long eh'" err := tsv.convertAndLogError( ctx, @@ -1830,14 +1876,16 @@ func TestTruncateMessages(t *testing.T) { } func TestTerseErrorsIgnoreFailoverInProgress(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() config := tabletenv.NewDefaultConfig() config.TerseErrors = true - tsv := NewTabletServer("TabletServerTest", config, memorytopo.NewServer(""), &topodatapb.TabletAlias{}) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) tl := newTestLogger() defer tl.Close() err := tsv.convertAndLogError(ctx, "select * from test_table where id = :a", map[string]*querypb.BindVariable{"a": sqltypes.Int64BindVariable(1)}, - mysql.NewSQLError(1227, mysql.SSClientError, "failover in progress"), + sqlerror.NewSQLError(1227, sqlerror.SSClientError, "failover in progress"), nil, ) if got, want := err.Error(), "failover in progress (errno 1227) (sqlstate 42000)"; !strings.HasPrefix(got, want) { @@ -1870,9 +1918,11 @@ var aclJSON2 = `{ }` func TestACLHUP(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() tableacl.Register("simpleacl", &simpleacl.Factory{}) config := tabletenv.NewDefaultConfig() - tsv := NewTabletServer("TabletServerTest", config, memorytopo.NewServer(""), &topodatapb.TabletAlias{}) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) f, err := os.CreateTemp("", "tableacl") require.NoError(t, err) @@ -1915,7 +1965,9 @@ func TestACLHUP(t *testing.T) { } func TestConfigChanges(t *testing.T) { - db, tsv := setupTabletServerTest(t, "") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, tsv := setupTabletServerTest(t, ctx, "") defer tsv.StopService() defer db.Close() @@ -1954,14 +2006,6 @@ func TestConfigChanges(t *testing.T) { t.Errorf("tsv.te.Pool().Timeout: %v, want %v", val, newDuration) } - tsv.SetQueryPlanCacheCap(newSize) - if val := tsv.QueryPlanCacheCap(); val != newSize { - t.Errorf("QueryPlanCacheCap: %d, want %d", val, newSize) - } - if val := int(tsv.qe.QueryPlanCacheCap()); val != newSize { - t.Errorf("tsv.qe.QueryPlanCacheCap: %d, want %d", val, newSize) - } - tsv.SetMaxResultSize(newSize) if val := tsv.MaxResultSize(); val != newSize { t.Errorf("MaxResultSize: %d, want %d", val, newSize) @@ -1980,7 +2024,9 @@ func TestConfigChanges(t *testing.T) { } func TestReserveBeginExecute(t *testing.T) { - db, tsv := setupTabletServerTest(t, "") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, tsv := setupTabletServerTest(t, ctx, "") tsv.config.EnableSettingsPool = false defer tsv.StopService() defer db.Close() @@ -2005,7 +2051,9 @@ func TestReserveBeginExecute(t *testing.T) { } func TestReserveExecute_WithoutTx(t *testing.T) { - db, tsv := setupTabletServerTest(t, "") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, tsv := setupTabletServerTest(t, ctx, "") tsv.config.EnableSettingsPool = false defer tsv.StopService() defer db.Close() @@ -2028,7 +2076,9 @@ func TestReserveExecute_WithoutTx(t *testing.T) { } func TestReserveExecute_WithTx(t *testing.T) { - db, tsv := setupTabletServerTest(t, "") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, tsv := setupTabletServerTest(t, ctx, "") tsv.config.EnableSettingsPool = false defer tsv.StopService() defer db.Close() @@ -2088,7 +2138,9 @@ func TestRelease(t *testing.T) { name += " reserve" } t.Run(name, func(t *testing.T) { - db, tsv := setupTabletServerTest(t, "") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, tsv := setupTabletServerTest(t, ctx, "") tsv.config.EnableSettingsPool = false defer tsv.StopService() defer db.Close() @@ -2131,7 +2183,9 @@ func TestRelease(t *testing.T) { } func TestReserveStats(t *testing.T) { - db, tsv := setupTabletServerTest(t, "") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, tsv := setupTabletServerTest(t, ctx, "") tsv.config.EnableSettingsPool = false defer tsv.StopService() defer db.Close() @@ -2141,7 +2195,7 @@ func TestReserveStats(t *testing.T) { callerID := &querypb.VTGateCallerID{ Username: "test", } - ctx := callerid.NewContext(context.Background(), nil, callerID) + ctx = callerid.NewContext(ctx, nil, callerID) // Starts reserved connection and transaction rbeState, _, err := tsv.ReserveBeginExecute(ctx, &target, nil, nil, "select 42", nil, &querypb.ExecuteOptions{}) @@ -2186,7 +2240,9 @@ func TestReserveStats(t *testing.T) { } func TestDatabaseNameReplaceByKeyspaceNameExecuteMethod(t *testing.T) { - db, tsv := setupTabletServerTest(t, "keyspaceName") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, tsv := setupTabletServerTest(t, ctx, "keyspaceName") setDBName(db, tsv, "databaseInMysql") defer tsv.StopService() defer db.Close() @@ -2222,7 +2278,9 @@ func TestDatabaseNameReplaceByKeyspaceNameExecuteMethod(t *testing.T) { } func TestDatabaseNameReplaceByKeyspaceNameStreamExecuteMethod(t *testing.T) { - db, tsv := setupTabletServerTest(t, "keyspaceName") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, tsv := setupTabletServerTest(t, ctx, "keyspaceName") setDBName(db, tsv, "databaseInMysql") defer tsv.StopService() defer db.Close() @@ -2259,7 +2317,9 @@ func TestDatabaseNameReplaceByKeyspaceNameStreamExecuteMethod(t *testing.T) { } func TestDatabaseNameReplaceByKeyspaceNameBeginExecuteMethod(t *testing.T) { - db, tsv := setupTabletServerTest(t, "keyspaceName") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, tsv := setupTabletServerTest(t, ctx, "keyspaceName") setDBName(db, tsv, "databaseInMysql") defer tsv.StopService() defer db.Close() @@ -2298,7 +2358,9 @@ func setDBName(db *fakesqldb.DB, tsv *TabletServer, s string) { } func TestDatabaseNameReplaceByKeyspaceNameReserveExecuteMethod(t *testing.T) { - db, tsv := setupTabletServerTest(t, "keyspaceName") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, tsv := setupTabletServerTest(t, ctx, "keyspaceName") tsv.config.EnableSettingsPool = false setDBName(db, tsv, "databaseInMysql") defer tsv.StopService() @@ -2333,7 +2395,9 @@ func TestDatabaseNameReplaceByKeyspaceNameReserveExecuteMethod(t *testing.T) { } func TestDatabaseNameReplaceByKeyspaceNameReserveBeginExecuteMethod(t *testing.T) { - db, tsv := setupTabletServerTest(t, "keyspaceName") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, tsv := setupTabletServerTest(t, ctx, "keyspaceName") setDBName(db, tsv, "databaseInMysql") defer tsv.StopService() defer db.Close() @@ -2366,15 +2430,15 @@ func TestDatabaseNameReplaceByKeyspaceNameReserveBeginExecuteMethod(t *testing.T require.NoError(t, err) } -func setupTabletServerTest(t *testing.T, keyspaceName string) (*fakesqldb.DB, *TabletServer) { +func setupTabletServerTest(t testing.TB, ctx context.Context, keyspaceName string) (*fakesqldb.DB, *TabletServer) { config := tabletenv.NewDefaultConfig() - return setupTabletServerTestCustom(t, config, keyspaceName) + return setupTabletServerTestCustom(t, ctx, config, keyspaceName) } -func setupTabletServerTestCustom(t *testing.T, config *tabletenv.TabletConfig, keyspaceName string) (*fakesqldb.DB, *TabletServer) { +func setupTabletServerTestCustom(t testing.TB, ctx context.Context, config *tabletenv.TabletConfig, keyspaceName string) (*fakesqldb.DB, *TabletServer) { db := setupFakeDB(t) sidecardb.AddSchemaInitQueries(db, true) - tsv := NewTabletServer("TabletServerTest", config, memorytopo.NewServer(""), &topodatapb.TabletAlias{}) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) require.Equal(t, StateNotConnected, tsv.sm.State()) dbcfgs := newDBConfigs(db) target := &querypb.Target{ @@ -2386,7 +2450,7 @@ func setupTabletServerTestCustom(t *testing.T, config *tabletenv.TabletConfig, k return db, tsv } -func setupFakeDB(t *testing.T) *fakesqldb.DB { +func setupFakeDB(t testing.TB) *fakesqldb.DB { db := fakesqldb.New(t) addTabletServerSupportedQueries(db) db.AddQueryPattern(baseShowTablesPattern, &sqltypes.Result{ @@ -2562,7 +2626,3 @@ func addTabletServerSupportedQueries(db *fakesqldb.DB) { }}, }) } - -func init() { - rand.Seed(time.Now().UnixNano()) -} diff --git a/go/vt/vttablet/tabletserver/throttle/base/app_throttle.go b/go/vt/vttablet/tabletserver/throttle/base/app_throttle.go index 0633e99c95c..36d57bdc973 100644 --- a/go/vt/vttablet/tabletserver/throttle/base/app_throttle.go +++ b/go/vt/vttablet/tabletserver/throttle/base/app_throttle.go @@ -26,14 +26,16 @@ type AppThrottle struct { AppName string ExpireAt time.Time Ratio float64 + Exempt bool } // NewAppThrottle creates an AppThrottle struct -func NewAppThrottle(appName string, expireAt time.Time, ratio float64) *AppThrottle { +func NewAppThrottle(appName string, expireAt time.Time, ratio float64, exempt bool) *AppThrottle { result := &AppThrottle{ AppName: appName, ExpireAt: expireAt, Ratio: ratio, + Exempt: exempt, } return result } diff --git a/go/vt/vttablet/tabletserver/throttle/client.go b/go/vt/vttablet/tabletserver/throttle/client.go index 30d6c79afdf..41888340b5a 100644 --- a/go/vt/vttablet/tabletserver/throttle/client.go +++ b/go/vt/vttablet/tabletserver/throttle/client.go @@ -52,7 +52,8 @@ type Client struct { checkType ThrottleCheckType flags CheckFlags - lastSuccessfulThrottle int64 + lastSuccessfulThrottleMu sync.Mutex + lastSuccessfulThrottle int64 } // NewProductionClient creates a client suitable for foreground/production jobs, which have normal priority. @@ -68,7 +69,7 @@ func NewProductionClient(throttler *Throttler, appName throttlerapp.Name, checkT } } -// NewBackgroundClient creates a client suitable for background jobs, which have low priority over productio ntraffic, +// NewBackgroundClient creates a client suitable for background jobs, which have low priority over production traffic, // e.g. migration, table pruning, vreplication func NewBackgroundClient(throttler *Throttler, appName throttlerapp.Name, checkType ThrottleCheckType) *Client { initThrottleTicker() @@ -96,6 +97,8 @@ func (c *Client) ThrottleCheckOK(ctx context.Context, overrideAppName throttlera // no throttler return true } + c.lastSuccessfulThrottleMu.Lock() + defer c.lastSuccessfulThrottleMu.Unlock() if c.lastSuccessfulThrottle >= atomic.LoadInt64(&throttleTicks) { // if last check was OK just very recently there is no need to check again return true diff --git a/go/vt/vttablet/tabletserver/throttle/mysql/probe.go b/go/vt/vttablet/tabletserver/throttle/mysql/probe.go index 5e0dd7fa1fa..53b835497b4 100644 --- a/go/vt/vttablet/tabletserver/throttle/mysql/probe.go +++ b/go/vt/vttablet/tabletserver/throttle/mysql/probe.go @@ -8,12 +8,15 @@ package mysql import ( "fmt" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) // Probe is the minimal configuration required to connect to a MySQL server type Probe struct { Key InstanceKey MetricQuery string + Tablet *topodatapb.Tablet TabletHost string TabletPort int CacheMillis int diff --git a/go/vt/vttablet/tabletserver/throttle/throttler.go b/go/vt/vttablet/tabletserver/throttle/throttler.go index c58422a8f91..6558b052c9a 100644 --- a/go/vt/vttablet/tabletserver/throttle/throttler.go +++ b/go/vt/vttablet/tabletserver/throttle/throttler.go @@ -24,12 +24,15 @@ import ( "github.com/patrickmn/go-cache" "github.com/spf13/pflag" + "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/protoutil" + "vitess.io/vitess/go/textutil" "vitess.io/vitess/go/timer" "vitess.io/vitess/go/vt/log" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/srvtopo" "vitess.io/vitess/go/vt/topo" @@ -40,6 +43,7 @@ import ( "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/config" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/mysql" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" + "vitess.io/vitess/go/vt/vttablet/tmclient" ) const ( @@ -55,9 +59,9 @@ const ( nonDeprioritizedAppMapExpiration = time.Second - dormantPeriod = time.Minute - defaultThrottleTTLMinutes = 60 - defaultThrottleRatio = 1.0 + dormantPeriod = time.Minute + DefaultAppThrottleDuration = time.Hour + DefaultThrottleRatio = 1.0 shardStoreName = "shard" selfStoreName = "self" @@ -67,12 +71,8 @@ const ( var ( // flag vars - throttleThreshold = 1 * time.Second - throttleTabletTypes = "replica" - throttleMetricQuery string - throttleMetricThreshold = math.MaxFloat64 - throttlerCheckAsCheckSelf = false - throttlerConfigViaTopo = true + defaultThrottleLagThreshold = 5 * time.Second + throttleTabletTypes = "replica" ) func init() { @@ -83,11 +83,17 @@ func init() { func registerThrottlerFlags(fs *pflag.FlagSet) { fs.StringVar(&throttleTabletTypes, "throttle_tablet_types", throttleTabletTypes, "Comma separated VTTablet types to be considered by the throttler. default: 'replica'. example: 'replica,rdonly'. 'replica' aways implicitly included") - fs.DurationVar(&throttleThreshold, "throttle_threshold", throttleThreshold, "Replication lag threshold for default lag throttling") - fs.StringVar(&throttleMetricQuery, "throttle_metrics_query", throttleMetricQuery, "Override default heartbeat/lag metric. Use either `SELECT` (must return single row, single value) or `SHOW GLOBAL ... LIKE ...` queries. Set -throttle_metrics_threshold respectively.") - fs.Float64Var(&throttleMetricThreshold, "throttle_metrics_threshold", throttleMetricThreshold, "Override default throttle threshold, respective to --throttle_metrics_query") - fs.BoolVar(&throttlerCheckAsCheckSelf, "throttle_check_as_check_self", throttlerCheckAsCheckSelf, "Should throttler/check return a throttler/check-self result (changes throttler behavior for writes)") - fs.BoolVar(&throttlerConfigViaTopo, "throttler-config-via-topo", throttlerConfigViaTopo, "When 'true', read config from topo service and ignore throttle_threshold, throttle_metrics_threshold, throttle_metrics_query, throttle_check_as_check_self") + fs.Duration("throttle_threshold", 0, "Replication lag threshold for default lag throttling") + fs.String("throttle_metrics_query", "", "Override default heartbeat/lag metric. Use either `SELECT` (must return single row, single value) or `SHOW GLOBAL ... LIKE ...` queries. Set -throttle_metrics_threshold respectively.") + fs.Float64("throttle_metrics_threshold", 0, "Override default throttle threshold, respective to --throttle_metrics_query") + fs.Bool("throttle_check_as_check_self", false, "Should throttler/check return a throttler/check-self result (changes throttler behavior for writes)") + fs.Bool("throttler-config-via-topo", false, "Deprecated, will be removed in v19. Assumed to be 'true'") + + fs.MarkDeprecated("throttle_threshold", "Replication lag threshold for default lag throttling") + fs.MarkDeprecated("throttle_metrics_query", "Override default heartbeat/lag metric. Use either `SELECT` (must return single row, single value) or `SHOW GLOBAL ... LIKE ...` queries. Set -throttle_metrics_threshold respectively.") + fs.MarkDeprecated("throttle_metrics_threshold", "Override default throttle threshold, respective to --throttle_metrics_query") + fs.MarkDeprecated("throttle_check_as_check_self", "Should throttler/check return a throttler/check-self result (changes throttler behavior for writes)") + fs.MarkDeprecated("throttler-config-via-topo", "Assumed to be 'true'") } var ( @@ -104,8 +110,12 @@ const ( ThrottleCheckSelf ) -func init() { - rand.Seed(time.Now().UnixNano()) +// throttlerTopoService represents the functionality we expect from a TopoServer, abstracted so that +// it can be mocked in unit tests +type throttlerTopoService interface { + GetTablet(ctx context.Context, alias *topodatapb.TabletAlias) (*topo.TabletInfo, error) + FindAllTabletAliasesInShard(ctx context.Context, keyspace, shard string) ([]*topodatapb.TabletAlias, error) + GetSrvKeyspace(ctx context.Context, cell, keyspace string) (*topodatapb.SrvKeyspace, error) } // Throttler is the main entity in the throttling mechanism. This service runs, probes, collects data, @@ -116,14 +126,14 @@ type Throttler struct { cell string check *ThrottlerCheck - isEnabled int64 - isLeader int64 - isOpen int64 + isEnabled atomic.Bool + isLeader atomic.Bool + isOpen atomic.Bool env tabletenv.Env pool *connpool.Pool tabletTypeFunc func() topodatapb.TabletType - ts *topo.Server + ts throttlerTopoService srvTopoServer srvtopo.Server heartbeatWriter heartbeat.HeartbeatWriter @@ -144,6 +154,7 @@ type Throttler struct { metricsQuery atomic.Value MetricsThreshold atomic.Uint64 + checkAsCheckSelf atomic.Bool mysqlClusterThresholds *cache.Cache aggregatedMetrics *cache.Cache @@ -155,6 +166,7 @@ type Throttler struct { initMutex sync.Mutex enableMutex sync.Mutex + cancelOpenContext context.CancelFunc cancelEnableContext context.CancelFunc throttledAppsMutex sync.Mutex watchSrvKeyspaceOnce sync.Once @@ -183,9 +195,6 @@ type ThrottlerStatus struct { // NewThrottler creates a Throttler func NewThrottler(env tabletenv.Env, srvTopoServer srvtopo.Server, ts *topo.Server, cell string, heartbeatWriter heartbeat.HeartbeatWriter, tabletTypeFunc func() topodatapb.TabletType) *Throttler { throttler := &Throttler{ - isLeader: 0, - isOpen: 0, - cell: cell, env: env, tabletTypeFunc: tabletTypeFunc, @@ -215,10 +224,7 @@ func NewThrottler(env tabletenv.Env, srvTopoServer srvtopo.Server, ts *topo.Serv throttler.initThrottleTabletTypes() throttler.check = NewThrottlerCheck(throttler) - throttler.StoreMetricsThreshold(throttleThreshold.Seconds()) //default - if throttleMetricThreshold != math.MaxFloat64 { - throttler.StoreMetricsThreshold(throttleMetricThreshold) // override - } + throttler.StoreMetricsThreshold(defaultThrottleLagThreshold.Seconds()) //default return throttler } @@ -291,17 +297,20 @@ func (throttler *Throttler) readThrottlerConfig(ctx context.Context) (*topodatap } // normalizeThrottlerConfig noramlizes missing throttler config information, as needed. -func (throttler *Throttler) normalizeThrottlerConfig(thottlerConfig *topodatapb.ThrottlerConfig) *topodatapb.ThrottlerConfig { - if thottlerConfig == nil { - thottlerConfig = &topodatapb.ThrottlerConfig{} +func (throttler *Throttler) normalizeThrottlerConfig(throttlerConfig *topodatapb.ThrottlerConfig) *topodatapb.ThrottlerConfig { + if throttlerConfig == nil { + throttlerConfig = &topodatapb.ThrottlerConfig{} + } + if throttlerConfig.ThrottledApps == nil { + throttlerConfig.ThrottledApps = make(map[string]*topodatapb.ThrottledAppRule) } - if thottlerConfig.CustomQuery == "" { + if throttlerConfig.CustomQuery == "" { // no custom query; we check replication lag - if thottlerConfig.Threshold == 0 { - thottlerConfig.Threshold = throttleThreshold.Seconds() + if throttlerConfig.Threshold == 0 { + throttlerConfig.Threshold = defaultThrottleLagThreshold.Seconds() } } - return thottlerConfig + return throttlerConfig } func (throttler *Throttler) WatchSrvKeyspaceCallback(srvks *topodatapb.SrvKeyspace, err error) bool { @@ -333,17 +342,17 @@ func (throttler *Throttler) WatchSrvKeyspaceCallback(srvks *topodatapb.SrvKeyspa // This may cause the throttler to be enabled/disabled, and of course it affects the throttling query/threshold. // Note: you should be holding the initMutex when calling this function. func (throttler *Throttler) applyThrottlerConfig(ctx context.Context, throttlerConfig *topodatapb.ThrottlerConfig) { - if !throttlerConfigViaTopo { - return - } log.Infof("Throttler: applying topo config: %+v", throttlerConfig) if throttlerConfig.CustomQuery == "" { - throttler.metricsQuery.Store(sqlparser.BuildParsedQuery(defaultReplicationLagQuery, sidecardb.GetIdentifier()).Query) + throttler.metricsQuery.Store(sqlparser.BuildParsedQuery(defaultReplicationLagQuery, sidecar.GetIdentifier()).Query) } else { throttler.metricsQuery.Store(throttlerConfig.CustomQuery) } throttler.StoreMetricsThreshold(throttlerConfig.Threshold) - throttlerCheckAsCheckSelf = throttlerConfig.CheckAsCheckSelf + throttler.checkAsCheckSelf.Store(throttlerConfig.CheckAsCheckSelf) + for _, appRule := range throttlerConfig.ThrottledApps { + throttler.ThrottleApp(appRule.Name, protoutil.TimeFromProto(appRule.ExpiresAt).UTC(), appRule.Ratio, appRule.Exempt) + } if throttlerConfig.Enabled { go throttler.Enable(ctx) } else { @@ -352,11 +361,11 @@ func (throttler *Throttler) applyThrottlerConfig(ctx context.Context, throttlerC } func (throttler *Throttler) IsEnabled() bool { - return atomic.LoadInt64(&throttler.isEnabled) > 0 + return throttler.isEnabled.Load() } func (throttler *Throttler) IsOpen() bool { - return atomic.LoadInt64(&throttler.isOpen) > 0 + return throttler.isOpen.Load() } // CheckIsOpen checks if this throttler is ready to serve. If not, it @@ -379,12 +388,12 @@ func (throttler *Throttler) Enable(ctx context.Context) bool { throttler.enableMutex.Lock() defer throttler.enableMutex.Unlock() - if throttler.IsEnabled() { + isEnabled := throttler.isEnabled.Swap(true) + if isEnabled { log.Infof("Throttler: already enabled") return false } log.Infof("Throttler: enabling") - atomic.StoreInt64(&throttler.isEnabled, 1) ctx, throttler.cancelEnableContext = context.WithCancel(ctx) throttler.check.SelfChecks(ctx) @@ -402,14 +411,13 @@ func (throttler *Throttler) Disable(ctx context.Context) bool { throttler.enableMutex.Lock() defer throttler.enableMutex.Unlock() - if !throttler.IsEnabled() { + isEnabled := throttler.isEnabled.Swap(false) + if !isEnabled { log.Infof("Throttler: already disabled") return false } log.Infof("Throttler: disabling") // _ = throttler.updateConfig(ctx, false, throttler.MetricsThreshold.Get()) // TODO(shlomi) - atomic.StoreInt64(&throttler.isEnabled, 0) - throttler.aggregatedMetrics.Flush() throttler.recentApps.Flush() throttler.nonLowPriorityAppRequestsThrottled.Flush() @@ -421,81 +429,75 @@ func (throttler *Throttler) Disable(ctx context.Context) bool { // Open opens database pool and initializes the schema func (throttler *Throttler) Open() error { - // TODO: remove `EnableLagThrottler` in v18 - if throttler.env.Config().EnableLagThrottler { - log.Warningf("The flags `--enable_lag_throttler` and `--throttle_threshold` will be removed in v18. Use 'vtctl UpdateThrottlerConfig', see https://vitess.io/docs/17.0/reference/programs/vtctldclient/vtctldclient_updatethrottlerconfig/") - } log.Infof("Throttler: started execution of Open. Acquiring initMutex lock") throttler.initMutex.Lock() defer throttler.initMutex.Unlock() - if throttler.IsOpen() { + + isOpen := throttler.isOpen.Swap(true) + if isOpen { // already open log.Infof("Throttler: throttler is already open") return nil } log.Infof("Throttler: opening") - ctx := context.Background() + var ctx context.Context + ctx, throttler.cancelOpenContext = context.WithCancel(context.Background()) // The query needs to be dynamically built because the sidecar database name // is not known when the TabletServer is created, which in turn creates the // Throttler. - throttler.metricsQuery.Store(sqlparser.BuildParsedQuery(defaultReplicationLagQuery, sidecardb.GetIdentifier()).Query) // default - if throttleMetricQuery != "" { - throttler.metricsQuery.Store(throttleMetricQuery) // override - } + throttler.metricsQuery.Store(sqlparser.BuildParsedQuery(defaultReplicationLagQuery, sidecar.GetIdentifier()).Query) // default throttler.initConfig() throttler.pool.Open(throttler.env.Config().DB.AppWithDB(), throttler.env.Config().DB.DbaWithDB(), throttler.env.Config().DB.AppDebugWithDB()) - atomic.StoreInt64(&throttler.isOpen, 1) - - throttler.ThrottleApp("always-throttled-app", time.Now().Add(time.Hour*24*365*10), defaultThrottleRatio) - - if throttlerConfigViaTopo { - log.Infof("Throttler: throttler-config-via-topo detected") - // We want to read throttler config from topo and apply it. - // But also, we're in an Open() function, which blocks state manager's operation, and affects - // opening of all other components. We thus read the throttler config in the background. - // However, we want to handle a situation where the read errors out. - // So we kick a loop that keeps retrying reading the config, for as long as this throttler is open. - retryReadAndApplyThrottlerConfig := func() { - retryInterval := 10 * time.Second - retryTicker := time.NewTicker(retryInterval) - defer retryTicker.Stop() - for { - if !throttler.IsOpen() { - // Throttler is not open so no need to keep retrying. - log.Errorf("Throttler.retryReadAndApplyThrottlerConfig(): throttler no longer seems to be open, exiting") - return - } - throttlerConfig, err := throttler.readThrottlerConfig(ctx) - if err == nil { - log.Errorf("Throttler.retryReadAndApplyThrottlerConfig(): success reading throttler config: %+v", throttlerConfig) - // It's possible that during a retry-sleep, the throttler is closed and opened again, leading - // to two (or more) instances of this goroutine. That's not a big problem; it's fine if all - // attempt to read the throttler config; but we just want to ensure they don't step on each other - // while applying the changes. - throttler.initMutex.Lock() - defer throttler.initMutex.Unlock() - throttler.applyThrottlerConfig(ctx, throttlerConfig) // may issue an Enable - go throttler.watchSrvKeyspaceOnce.Do(func() { - // We start watching SrvKeyspace only after we know it's been created. Now is that time! - throttler.srvTopoServer.WatchSrvKeyspace(context.Background(), throttler.cell, throttler.keyspace, throttler.WatchSrvKeyspaceCallback) - }) - return - } - // It's possible, especially in CI, that this throttler opened before the SrvKeyspace entry is created in topo. - // We thus retry until the entry is found. - log.Errorf("Throttler.retryReadAndApplyThrottlerConfig(): error reading throttler config. Will retry in %v. Err=%+v", retryInterval, err) - <-retryTicker.C + throttler.ThrottleApp("always-throttled-app", time.Now().Add(time.Hour*24*365*10), DefaultThrottleRatio, false) + + log.Infof("Throttler: throttler-config-via-topo detected") + // We want to read throttler config from topo and apply it. + // But also, we're in an Open() function, which blocks state manager's operation, and affects + // opening of all other components. We thus read the throttler config in the background. + // However, we want to handle a situation where the read errors out. + // So we kick a loop that keeps retrying reading the config, for as long as this throttler is open. + retryReadAndApplyThrottlerConfig := func(ctx context.Context) { + retryInterval := 10 * time.Second + retryTicker := time.NewTicker(retryInterval) + defer retryTicker.Stop() + for { + if !throttler.IsOpen() { + // Throttler is not open so no need to keep retrying. + log.Errorf("Throttler.retryReadAndApplyThrottlerConfig(): throttler no longer seems to be open, exiting") + return + } + + requestCtx, requestCancel := context.WithTimeout(ctx, 5*time.Second) + defer requestCancel() + throttlerConfig, err := throttler.readThrottlerConfig(requestCtx) + if err == nil { + log.Errorf("Throttler.retryReadAndApplyThrottlerConfig(): success reading throttler config: %+v", throttlerConfig) + // It's possible that during a retry-sleep, the throttler is closed and opened again, leading + // to two (or more) instances of this goroutine. That's not a big problem; it's fine if all + // attempt to read the throttler config; but we just want to ensure they don't step on each other + // while applying the changes. + throttler.initMutex.Lock() + defer throttler.initMutex.Unlock() + throttler.applyThrottlerConfig(ctx, throttlerConfig) // may issue an Enable + go throttler.watchSrvKeyspaceOnce.Do(func() { + // We start watching SrvKeyspace only after we know it's been created. Now is that time! + throttler.srvTopoServer.WatchSrvKeyspace(context.Background(), throttler.cell, throttler.keyspace, throttler.WatchSrvKeyspaceCallback) + }) + return + } + log.Errorf("Throttler.retryReadAndApplyThrottlerConfig(): error reading throttler config. Will retry in %v. Err=%+v", retryInterval, err) + select { + case <-ctx.Done(): + // Throttler is not open so no need to keep retrying. + log.Errorf("Throttler.retryReadAndApplyThrottlerConfig(): throttler no longer seems to be open, exiting") + return + case <-retryTicker.C: } - } - go retryReadAndApplyThrottlerConfig() - } else { - // backwards-cmpatible: check for --enable-lag-throttler flag in vttablet - // this will be removed in a future version - if throttler.env.Config().EnableLagThrottler { - go throttler.Enable(ctx) } } + go retryReadAndApplyThrottlerConfig(ctx) + return nil } @@ -505,17 +507,18 @@ func (throttler *Throttler) Close() { throttler.initMutex.Lock() log.Infof("Throttler: acquired initMutex lock") defer throttler.initMutex.Unlock() - if !throttler.IsOpen() { + isOpen := throttler.isOpen.Swap(false) + if !isOpen { log.Infof("Throttler: throttler is not open") return } ctx := context.Background() throttler.Disable(ctx) - atomic.StoreInt64(&throttler.isLeader, 0) + throttler.isLeader.Store(false) log.Infof("Throttler: closing pool") throttler.pool.Close() - atomic.StoreInt64(&throttler.isOpen, 0) + throttler.cancelOpenContext() log.Infof("Throttler: finished execution of Close") } @@ -606,8 +609,11 @@ func (throttler *Throttler) Operate(ctx context.Context) { throttledAppsTicker := addTicker(throttledAppsSnapshotInterval) recentCheckTicker := addTicker(time.Second) + tmClient := tmclient.NewTabletManagerClient() + go func() { defer log.Infof("Throttler: Operate terminated, tickers stopped") + defer tmClient.Close() for _, t := range tickers { defer t.Stop() // since we just started the tickers now, speed up the ticks by forcing an immediate tick @@ -625,24 +631,23 @@ func (throttler *Throttler) Operate(ctx context.Context) { defer throttler.initMutex.Unlock() // sparse - shouldBeLeader := int64(0) + shouldBeLeader := false if throttler.IsOpen() { if throttler.tabletTypeFunc() == topodatapb.TabletType_PRIMARY { - shouldBeLeader = 1 + shouldBeLeader = true } } + isLeader := throttler.isLeader.Swap(shouldBeLeader) transitionedIntoLeader := false - if shouldBeLeader > throttler.isLeader { + if shouldBeLeader && !isLeader { log.Infof("Throttler: transition into leadership") transitionedIntoLeader = true } - if shouldBeLeader < throttler.isLeader { + if !shouldBeLeader && isLeader { log.Infof("Throttler: transition out of leadership") } - atomic.StoreInt64(&throttler.isLeader, shouldBeLeader) - if transitionedIntoLeader { // transitioned into leadership, let's speed up the next 'refresh' and 'collect' ticks go mysqlRefreshTicker.TickNow() @@ -655,7 +660,7 @@ func (throttler *Throttler) Operate(ctx context.Context) { if throttler.IsOpen() { // frequent if !throttler.isDormant() { - throttler.collectMySQLMetrics(ctx) + throttler.collectMySQLMetrics(ctx, tmClient) } } } @@ -664,7 +669,7 @@ func (throttler *Throttler) Operate(ctx context.Context) { if throttler.IsOpen() { // infrequent if throttler.isDormant() { - throttler.collectMySQLMetrics(ctx) + throttler.collectMySQLMetrics(ctx, tmClient) } } } @@ -677,7 +682,7 @@ func (throttler *Throttler) Operate(ctx context.Context) { { // sparse if throttler.IsOpen() { - go throttler.refreshMySQLInventory(ctx) + throttler.refreshMySQLInventory(ctx) } } case probes := <-throttler.mysqlClusterProbesChan: @@ -707,13 +712,41 @@ func (throttler *Throttler) Operate(ctx context.Context) { }() } -func (throttler *Throttler) generateTabletHTTPProbeFunction(ctx context.Context, clusterName string, probe *mysql.Probe) (probeFunc func() *mysql.MySQLThrottleMetric) { +func (throttler *Throttler) generateTabletHTTPProbeFunction(ctx context.Context, tmClient tmclient.TabletManagerClient, clusterName string, probe *mysql.Probe) (probeFunc func() *mysql.MySQLThrottleMetric) { return func() *mysql.MySQLThrottleMetric { + // Some reasonable timeout, to ensure we release connections even if they're hanging (otherwise grpc-go keeps polling those connections forever) + ctx, cancel := context.WithTimeout(ctx, 4*mysqlCollectInterval) + defer cancel() + // Hit a tablet's `check-self` via HTTP, and convert its CheckResult JSON output into a MySQLThrottleMetric mySQLThrottleMetric := mysql.NewMySQLThrottleMetric() mySQLThrottleMetric.ClusterName = clusterName mySQLThrottleMetric.Key = probe.Key + { + req := &tabletmanagerdatapb.CheckThrottlerRequest{} // We leave AppName empty; it will default to VitessName anyway, and we can save some proto space + if resp, gRPCErr := tmClient.CheckThrottler(ctx, probe.Tablet, req); gRPCErr == nil { + mySQLThrottleMetric.Value = resp.Value + if resp.StatusCode == http.StatusInternalServerError { + mySQLThrottleMetric.Err = fmt.Errorf("Status code: %d", resp.StatusCode) + } + if resp.RecentlyChecked { + // We have just probed a tablet, and it reported back that someone just recently "check"ed it. + // We therefore renew the heartbeats lease. + go throttler.heartbeatWriter.RequestHeartbeats() + } + return mySQLThrottleMetric + + // } else { + // In v18 we need to be backwards compatible. If we have a gRPC error it might be because the replica is v17 and + // does not support CheckThrottler() RPC. This is why: + // 1. We fall back to HTTP + // 2. We don't log an error (it would just spam the logs) + // In v19 we will remove all HTTP code, and will *potentially* log an error. + // log.Errorf("error in GRPC call to tablet %v: %v", probe.Tablet.GetAlias(), gRPCErr) + } + } + // Backwards compatibility to v17: if the underlying tablets do not support CheckThrottler gRPC, attempt a HTTP cehck: tabletCheckSelfURL := fmt.Sprintf("http://%s:%d/throttler/check-self?app=%s", probe.TabletHost, probe.TabletPort, throttlerapp.VitessName) resp, err := throttler.httpClient.Get(tabletCheckSelfURL) if err != nil { @@ -745,7 +778,7 @@ func (throttler *Throttler) generateTabletHTTPProbeFunction(ctx context.Context, } } -func (throttler *Throttler) collectMySQLMetrics(ctx context.Context) error { +func (throttler *Throttler) collectMySQLMetrics(ctx context.Context, tmClient tmclient.TabletManagerClient) error { // synchronously, get lists of probes for clusterName, probes := range throttler.mysqlInventory.ClustersProbes { clusterName := clusterName @@ -765,9 +798,11 @@ func (throttler *Throttler) collectMySQLMetrics(ctx context.Context) error { var throttleMetricFunc func() *mysql.MySQLThrottleMetric if clusterName == selfStoreName { + // Throttler is probing its own tablet's metrics: throttleMetricFunc = throttler.generateSelfMySQLThrottleMetricFunc(ctx, probe) } else { - throttleMetricFunc = throttler.generateTabletHTTPProbeFunction(ctx, clusterName, probe) + // Throttler probing other tablets: + throttleMetricFunc = throttler.generateTabletHTTPProbeFunction(ctx, tmClient, clusterName, probe) } throttleMetrics := mysql.ReadThrottleMetric(probe, clusterName, throttleMetricFunc) throttler.mysqlThrottleMetricChan <- throttleMetrics @@ -780,11 +815,10 @@ func (throttler *Throttler) collectMySQLMetrics(ctx context.Context) error { // refreshMySQLInventory will re-structure the inventory based on reading config settings func (throttler *Throttler) refreshMySQLInventory(ctx context.Context) error { - // distribute the query/threshold from the throttler down to the cluster settings and from there to the probes metricsQuery := throttler.GetMetricsQuery() metricsThreshold := throttler.MetricsThreshold.Load() - addInstanceKey := func(tabletHost string, tabletPort int, key *mysql.InstanceKey, clusterName string, clusterSettings *config.MySQLClusterConfigurationSettings, probes *mysql.Probes) { + addInstanceKey := func(tablet *topodatapb.Tablet, tabletHost string, tabletPort int, key *mysql.InstanceKey, clusterName string, clusterSettings *config.MySQLClusterConfigurationSettings, probes *mysql.Probes) { for _, ignore := range clusterSettings.IgnoreHosts { if strings.Contains(key.StringCode(), ignore) { log.Infof("Throttler: instance key ignored: %+v", key) @@ -798,6 +832,7 @@ func (throttler *Throttler) refreshMySQLInventory(ctx context.Context) error { probe := &mysql.Probe{ Key: *key, + Tablet: tablet, TabletHost: tabletHost, TabletPort: tabletPort, MetricQuery: clusterSettings.MetricQuery, @@ -822,18 +857,28 @@ func (throttler *Throttler) refreshMySQLInventory(ctx context.Context) error { } if clusterName == selfStoreName { - // special case: just looking at this tablet's MySQL server + // special case: just looking at this tablet's MySQL server. // We will probe this "cluster" (of one server) is a special way. - addInstanceKey("", 0, mysql.SelfInstanceKey, clusterName, clusterSettings, clusterProbes.InstanceProbes) + addInstanceKey(nil, "", 0, mysql.SelfInstanceKey, clusterName, clusterSettings, clusterProbes.InstanceProbes) throttler.mysqlClusterProbesChan <- clusterProbes return } - if atomic.LoadInt64(&throttler.isLeader) == 0 { + if !throttler.isLeader.Load() { + // This tablet may have used to be the primary, but it isn't now. It may have a recollection + // of previous clusters it used to probe. It may have recollection of specific probes for such clusters. + // This now ensures any existing cluster probes are overrridden with an empty list of probes. + // `clusterProbes` was created above as empty, and identificable via `clusterName`. This will in turn + // be used to overwrite throttler.mysqlInventory.ClustersProbes[clusterProbes.ClusterName] in + // updateMySQLClusterProbes(). + throttler.mysqlClusterProbesChan <- clusterProbes // not the leader (primary tablet)? Then no more work for us. return } // The primary tablet is also in charge of collecting the shard's metrics err := func() error { + ctx, cancel := context.WithTimeout(ctx, mysqlRefreshInterval) + defer cancel() + tabletAliases, err := throttler.ts.FindAllTabletAliasesInShard(ctx, throttler.keyspace, throttler.shard) if err != nil { return err @@ -845,7 +890,7 @@ func (throttler *Throttler) refreshMySQLInventory(ctx context.Context) error { } if throttler.throttleTabletTypesMap[tablet.Type] { key := mysql.InstanceKey{Hostname: tablet.MysqlHostname, Port: int(tablet.MysqlPort)} - addInstanceKey(tablet.Hostname, int(tablet.PortMap["vt"]), &key, clusterName, clusterSettings, clusterProbes.InstanceProbes) + addInstanceKey(tablet.Tablet, tablet.Hostname, int(tablet.PortMap["vt"]), &key, clusterName, clusterSettings, clusterProbes.InstanceProbes) } } throttler.mysqlClusterProbesChan <- clusterProbes @@ -916,7 +961,7 @@ func (throttler *Throttler) expireThrottledApps() { } // ThrottleApp instructs the throttler to begin throttling an app, to som eperiod and with some ratio. -func (throttler *Throttler) ThrottleApp(appName string, expireAt time.Time, ratio float64) (appThrottle *base.AppThrottle) { +func (throttler *Throttler) ThrottleApp(appName string, expireAt time.Time, ratio float64, exempt bool) (appThrottle *base.AppThrottle) { throttler.throttledAppsMutex.Lock() defer throttler.throttledAppsMutex.Unlock() @@ -929,14 +974,15 @@ func (throttler *Throttler) ThrottleApp(appName string, expireAt time.Time, rati if ratio >= 0 { appThrottle.Ratio = ratio } + appThrottle.Exempt = exempt } else { if expireAt.IsZero() { - expireAt = now.Add(defaultThrottleTTLMinutes * time.Minute) + expireAt = now.Add(DefaultAppThrottleDuration) } if ratio < 0 { - ratio = defaultThrottleRatio + ratio = DefaultThrottleRatio } - appThrottle = base.NewAppThrottle(appName, expireAt, ratio) + appThrottle = base.NewAppThrottle(appName, expireAt, ratio, exempt) } if now.Before(appThrottle.ExpireAt) { throttler.throttledApps.Set(appName, appThrottle, cache.DefaultExpiration) @@ -951,7 +997,7 @@ func (throttler *Throttler) UnthrottleApp(appName string) (appThrottle *base.App throttler.throttledApps.Delete(appName) // the app is likely to check go throttler.heartbeatWriter.RequestHeartbeats() - return base.NewAppThrottle(appName, time.Now(), 0) + return base.NewAppThrottle(appName, time.Now(), 0, false) } // IsAppThrottled tells whether some app should be throttled. @@ -959,16 +1005,21 @@ func (throttler *Throttler) UnthrottleApp(appName string) (appThrottle *base.App // on the throttle ratio func (throttler *Throttler) IsAppThrottled(appName string) bool { isSingleAppNameThrottled := func(singleAppName string) bool { - if object, found := throttler.throttledApps.Get(singleAppName); found { - appThrottle := object.(*base.AppThrottle) - if appThrottle.ExpireAt.Before(time.Now()) { - // throttling cleanup hasn't purged yet, but it is expired - return false - } - // handle ratio - if rand.Float64() < appThrottle.Ratio { - return true - } + object, found := throttler.throttledApps.Get(singleAppName) + if !found { + return false + } + appThrottle := object.(*base.AppThrottle) + if !appThrottle.ExpireAt.After(time.Now()) { + // throttling cleanup hasn't purged yet, but it is expired + return false + } + if appThrottle.Exempt { + return false + } + // handle ratio + if rand.Float64() < appThrottle.Ratio { + return true } return false } @@ -986,6 +1037,40 @@ func (throttler *Throttler) IsAppThrottled(appName string) bool { return false } +// IsAppExempt +func (throttler *Throttler) IsAppExempted(appName string) bool { + isSingleAppNameExempted := func(singleAppName string) bool { + if throttlerapp.ExemptFromChecks(appName) { // well known statically exempted apps + return true + } + object, found := throttler.throttledApps.Get(singleAppName) + if !found { + return false + } + appThrottle := object.(*base.AppThrottle) + if !appThrottle.ExpireAt.After(time.Now()) { + // throttling cleanup hasn't purged yet, but it is expired + return false + } + if appThrottle.Exempt { + return true + } + return false + } + if isSingleAppNameExempted(appName) { + return true + } + for _, singleAppName := range strings.Split(appName, ":") { + if singleAppName == "" { + continue + } + if isSingleAppNameExempted(singleAppName) { + return true + } + } + return false +} + // ThrottledAppsMap returns a (copy) map of currently throttled apps func (throttler *Throttler) ThrottledAppsMap() (result map[string](*base.AppThrottle)) { result = make(map[string](*base.AppThrottle)) @@ -1052,7 +1137,7 @@ func (throttler *Throttler) checkStore(ctx context.Context, appName string, stor if !throttler.IsRunning() { return okMetricCheckResult } - if throttlerapp.ExemptFromChecks(appName) { + if throttler.IsAppExempted(appName) { // Some apps are exempt from checks. They are always responded with OK. This is because those apps are // continuous and do not generate a substantial load. return okMetricCheckResult @@ -1093,7 +1178,7 @@ func (throttler *Throttler) CheckByType(ctx context.Context, appName string, rem case ThrottleCheckSelf: return throttler.checkSelf(ctx, appName, remoteAddr, flags) case ThrottleCheckPrimaryWrite: - if throttlerCheckAsCheckSelf { + if throttler.checkAsCheckSelf.Load() { return throttler.checkSelf(ctx, appName, remoteAddr, flags) } return throttler.checkShard(ctx, appName, remoteAddr, flags) @@ -1108,9 +1193,9 @@ func (throttler *Throttler) Status() *ThrottlerStatus { Keyspace: throttler.keyspace, Shard: throttler.shard, - IsLeader: (atomic.LoadInt64(&throttler.isLeader) > 0), - IsOpen: (atomic.LoadInt64(&throttler.isOpen) > 0), - IsEnabled: (atomic.LoadInt64(&throttler.isEnabled) > 0), + IsLeader: throttler.isLeader.Load(), + IsOpen: throttler.isOpen.Load(), + IsEnabled: throttler.isEnabled.Load(), IsDormant: throttler.isDormant(), Query: throttler.GetMetricsQuery(), diff --git a/go/vt/vttablet/tabletserver/throttle/throttler_test.go b/go/vt/vttablet/tabletserver/throttle/throttler_test.go new file mode 100644 index 00000000000..c47466df522 --- /dev/null +++ b/go/vt/vttablet/tabletserver/throttle/throttler_test.go @@ -0,0 +1,221 @@ +/* + Copyright 2017 GitHub Inc. + + Licensed under MIT License. See https://github.com/github/freno/blob/master/LICENSE +*/ + +package throttle + +import ( + "context" + "fmt" + "sync/atomic" + "testing" + "time" + + "github.com/patrickmn/go-cache" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/config" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/mysql" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" +) + +const ( + waitForProbesTimeout = 30 * time.Second +) + +type FakeTopoServer struct { +} + +func (ts *FakeTopoServer) GetTablet(ctx context.Context, alias *topodatapb.TabletAlias) (*topo.TabletInfo, error) { + tablet := &topo.TabletInfo{ + Tablet: &topodatapb.Tablet{ + Alias: alias, + Hostname: "127.0.0.1", + MysqlHostname: "127.0.0.1", + MysqlPort: 3306, + PortMap: map[string]int32{"vt": 5000}, + Type: topodatapb.TabletType_REPLICA, + }, + } + return tablet, nil +} + +func (ts *FakeTopoServer) FindAllTabletAliasesInShard(ctx context.Context, keyspace, shard string) ([]*topodatapb.TabletAlias, error) { + aliases := []*topodatapb.TabletAlias{ + {Cell: "zone1", Uid: 100}, + {Cell: "zone2", Uid: 101}, + } + return aliases, nil +} + +func (ts *FakeTopoServer) GetSrvKeyspace(ctx context.Context, cell, keyspace string) (*topodatapb.SrvKeyspace, error) { + ks := &topodatapb.SrvKeyspace{} + return ks, nil +} + +type FakeHeartbeatWriter struct { +} + +func (w FakeHeartbeatWriter) RequestHeartbeats() { +} + +func TestIsAppThrottled(t *testing.T) { + throttler := Throttler{ + throttledApps: cache.New(cache.NoExpiration, 0), + heartbeatWriter: FakeHeartbeatWriter{}, + } + assert.False(t, throttler.IsAppThrottled("app1")) + assert.False(t, throttler.IsAppThrottled("app2")) + assert.False(t, throttler.IsAppThrottled("app3")) + assert.False(t, throttler.IsAppThrottled("app4")) + // + throttler.ThrottleApp("app1", time.Now().Add(time.Hour), DefaultThrottleRatio, true) + throttler.ThrottleApp("app2", time.Now(), DefaultThrottleRatio, false) + throttler.ThrottleApp("app3", time.Now().Add(time.Hour), DefaultThrottleRatio, false) + throttler.ThrottleApp("app4", time.Now().Add(time.Hour), 0, false) + assert.False(t, throttler.IsAppThrottled("app1")) // exempted + assert.False(t, throttler.IsAppThrottled("app2")) // expired + assert.True(t, throttler.IsAppThrottled("app3")) + assert.False(t, throttler.IsAppThrottled("app4")) // ratio is zero + // + throttler.UnthrottleApp("app1") + throttler.UnthrottleApp("app2") + throttler.UnthrottleApp("app3") + throttler.UnthrottleApp("app4") + assert.False(t, throttler.IsAppThrottled("app1")) + assert.False(t, throttler.IsAppThrottled("app2")) + assert.False(t, throttler.IsAppThrottled("app3")) + assert.False(t, throttler.IsAppThrottled("app4")) +} + +func TestIsAppExempted(t *testing.T) { + + throttler := Throttler{ + throttledApps: cache.New(cache.NoExpiration, 0), + heartbeatWriter: FakeHeartbeatWriter{}, + } + assert.False(t, throttler.IsAppExempted("app1")) + assert.False(t, throttler.IsAppExempted("app2")) + assert.False(t, throttler.IsAppExempted("app3")) + // + throttler.ThrottleApp("app1", time.Now().Add(time.Hour), DefaultThrottleRatio, true) + throttler.ThrottleApp("app2", time.Now(), DefaultThrottleRatio, true) // instantly expire + assert.True(t, throttler.IsAppExempted("app1")) + assert.True(t, throttler.IsAppExempted("app1:other-tag")) + assert.False(t, throttler.IsAppExempted("app2")) // expired + assert.False(t, throttler.IsAppExempted("app3")) + // + throttler.UnthrottleApp("app1") + throttler.ThrottleApp("app2", time.Now().Add(time.Hour), DefaultThrottleRatio, false) + assert.False(t, throttler.IsAppExempted("app1")) + assert.False(t, throttler.IsAppExempted("app2")) + assert.False(t, throttler.IsAppExempted("app3")) + // + assert.True(t, throttler.IsAppExempted("schema-tracker")) + throttler.UnthrottleApp("schema-tracker") // meaningless. App is statically exempted + assert.True(t, throttler.IsAppExempted("schema-tracker")) +} + +// TestRefreshMySQLInventory tests the behavior of the throttler's RefreshMySQLInventory() function, which +// is called periodically in actual throttler. For a given cluster name, it generates a list of probes +// the throttler will use to check metrics. +// On a "self" cluster, that list is expect to probe the tablet itself. +// On any other cluster, the list is expected to be empty if non-leader (only leader throttler, on a +// `PRIMARY` tablet, probes other tablets). On the leader, the list is expected to be non-empty. +func TestRefreshMySQLInventory(t *testing.T) { + metricsQuery := "select 1" + config.Settings().Stores.MySQL.Clusters = map[string]*config.MySQLClusterConfigurationSettings{ + selfStoreName: {}, + "ks1": {}, + "ks2": {}, + } + clusters := config.Settings().Stores.MySQL.Clusters + for _, s := range clusters { + s.MetricQuery = metricsQuery + s.ThrottleThreshold = &atomic.Uint64{} + s.ThrottleThreshold.Store(1) + } + + throttler := &Throttler{ + mysqlClusterProbesChan: make(chan *mysql.ClusterProbes), + mysqlClusterThresholds: cache.New(cache.NoExpiration, 0), + ts: &FakeTopoServer{}, + mysqlInventory: mysql.NewInventory(), + } + throttler.metricsQuery.Store(metricsQuery) + throttler.initThrottleTabletTypes() + + validateClusterProbes := func(t *testing.T, ctx context.Context) { + testName := fmt.Sprintf("leader=%t", throttler.isLeader.Load()) + t.Run(testName, func(t *testing.T) { + // validateProbesCount expectes number of probes according to cluster name and throttler's leadership status + validateProbesCount := func(t *testing.T, clusterName string, probes *mysql.Probes) { + if clusterName == selfStoreName { + assert.Equal(t, 1, len(*probes)) + } else if throttler.isLeader.Load() { + assert.NotZero(t, len(*probes)) + } else { + assert.Empty(t, *probes) + } + } + t.Run("waiting for probes", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, waitForProbesTimeout) + defer cancel() + numClusterProbesResults := 0 + for { + select { + case probes := <-throttler.mysqlClusterProbesChan: + // Worth noting that in this unit test, the throttler is _closed_. Its own Operate() function does + // not run, and therefore there is none but us to both populate `mysqlClusterProbesChan` as well as + // read from it. We do not compete here with any other goroutine. + assert.NotNil(t, probes) + + throttler.updateMySQLClusterProbes(ctx, probes) + + numClusterProbesResults++ + validateProbesCount(t, probes.ClusterName, probes.InstanceProbes) + + if numClusterProbesResults == len(clusters) { + // Achieved our goal + return + } + case <-ctx.Done(): + assert.FailNowf(t, ctx.Err().Error(), "waiting for %d cluster probes", len(clusters)) + } + } + }) + t.Run("validating probes", func(t *testing.T) { + for clusterName := range clusters { + probes, ok := throttler.mysqlInventory.ClustersProbes[clusterName] + require.True(t, ok) + validateProbesCount(t, clusterName, probes) + } + }) + }) + } + // + ctx := context.Background() + + t.Run("initial, not leader", func(t *testing.T) { + throttler.isLeader.Store(false) + throttler.refreshMySQLInventory(ctx) + validateClusterProbes(t, ctx) + }) + + t.Run("promote", func(t *testing.T) { + throttler.isLeader.Store(true) + throttler.refreshMySQLInventory(ctx) + validateClusterProbes(t, ctx) + }) + + t.Run("demote, expect cleanup", func(t *testing.T) { + throttler.isLeader.Store(false) + throttler.refreshMySQLInventory(ctx) + validateClusterProbes(t, ctx) + }) +} diff --git a/go/vt/vttablet/tabletserver/throttle/throttlerapp/app.go b/go/vt/vttablet/tabletserver/throttle/throttlerapp/app.go index 41ac3d5a671..cc86ad0620b 100644 --- a/go/vt/vttablet/tabletserver/throttle/throttlerapp/app.go +++ b/go/vt/vttablet/tabletserver/throttle/throttlerapp/app.go @@ -16,6 +16,12 @@ limitations under the License. package throttlerapp +import "strings" + +func Concatenate(names ...string) string { + return strings.Join(names, ":") +} + type Name string func (n Name) String() string { @@ -26,6 +32,14 @@ func (n Name) Equals(s string) bool { return string(n) == s } +func (n Name) ConcatenateString(s string) string { + return Concatenate(n.String(), s) +} + +func (n Name) Concatenate(other Name) Name { + return Name(n.ConcatenateString(other.String())) +} + const ( // DefaultName is the app name used by vitess when app doesn't indicate its name DefaultName Name = "default" @@ -36,8 +50,8 @@ const ( GhostName Name = "gh-ost" PTOSCName Name = "pt-osc" - VStreamerName Name = "vstreamer" VReplicationName Name = "vreplication" + VStreamerName Name = "vstreamer" VPlayerName Name = "vplayer" VCopierName Name = "vcopier" ResultStreamerName Name = "resultstreamer" diff --git a/go/vt/vttablet/tabletserver/throttle/throttlerapp/app_test.go b/go/vt/vttablet/tabletserver/throttle/throttlerapp/app_test.go new file mode 100644 index 00000000000..bd14624f49b --- /dev/null +++ b/go/vt/vttablet/tabletserver/throttle/throttlerapp/app_test.go @@ -0,0 +1,43 @@ +/* + Copyright 2017 GitHub Inc. + + Licensed under MIT License. See https://github.com/github/freno/blob/master/LICENSE +*/ + +package throttlerapp + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestExemptFromChecks(t *testing.T) { + tcases := map[string]bool{ + "": false, + VReplicationName.String(): false, + VPlayerName.String(): false, + VCopierName.String(): false, + OnlineDDLName.String(): false, + BinlogWatcherName.String(): true, + MessagerName.String(): true, + SchemaTrackerName.String(): true, + } + for app, expectExempt := range tcases { + t.Run(app, func(t *testing.T) { + exempt := ExemptFromChecks(app) + assert.Equal(t, expectExempt, exempt) + }) + } +} + +func TestConcatenate(t *testing.T) { + n := VReplicationName + vcopierName := n.Concatenate("vcopier") + assert.Equal(t, Name("vreplication:vcopier"), vcopierName) + vplayerName := n.Concatenate("vplayer") + assert.Equal(t, Name("vreplication:vplayer"), vplayerName) + rowstreamerName := n.Concatenate(RowStreamerName) + assert.Equal(t, Name("vreplication:rowstreamer"), rowstreamerName) + assert.Equal(t, "vreplication:rowstreamer", rowstreamerName.String()) +} diff --git a/go/vt/vttablet/tabletserver/twopc.go b/go/vt/vttablet/tabletserver/twopc.go index bc6c3816121..7784f7f1702 100644 --- a/go/vt/vttablet/tabletserver/twopc.go +++ b/go/vt/vttablet/tabletserver/twopc.go @@ -17,16 +17,13 @@ limitations under the License. package tabletserver import ( + "context" "fmt" "time" - "vitess.io/vitess/go/vt/sidecardb" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/vt/vttablet/tabletserver/tx" - "vitess.io/vitess/go/vt/vtgate/evalengine" - - "context" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/dbconnpool" @@ -89,7 +86,7 @@ type TwoPC struct { // NewTwoPC creates a TwoPC variable. func NewTwoPC(readPool *connpool.Pool) *TwoPC { tpc := &TwoPC{readPool: readPool} - dbname := sidecardb.GetIdentifier() + dbname := sidecar.GetIdentifier() tpc.insertRedoTx = sqlparser.BuildParsedQuery( "insert into %s.redo_state(dtid, state, time_created) values (%a, %a, %a)", dbname, ":dtid", ":state", ":time_created") @@ -229,12 +226,12 @@ func (tpc *TwoPC) ReadAllRedo(ctx context.Context) (prepared, failed []*tx.Prepa // Initialize the new element. // A failure in time parsing will show up as a very old time, // which is harmless. - tm, _ := evalengine.ToInt64(row[2]) + tm, _ := row[2].ToCastInt64() curTx = &tx.PreparedTx{ Dtid: dtid, Time: time.Unix(0, tm), } - st, err := evalengine.ToInt64(row[1]) + st, err := row[1].ToCastInt64() if err != nil { log.Errorf("Error parsing state for dtid %s: %v.", dtid, err) } @@ -271,7 +268,7 @@ func (tpc *TwoPC) CountUnresolvedRedo(ctx context.Context, unresolvedTime time.T if len(qr.Rows) < 1 { return 0, nil } - v, _ := evalengine.ToInt64(qr.Rows[0][0]) + v, _ := qr.Rows[0][0].ToCastInt64() return v, nil } @@ -358,7 +355,7 @@ func (tpc *TwoPC) ReadTransaction(ctx context.Context, dtid string) (*querypb.Tr return result, nil } result.Dtid = qr.Rows[0][0].ToString() - st, err := evalengine.ToInt64(qr.Rows[0][1]) + st, err := qr.Rows[0][1].ToCastInt64() if err != nil { return nil, vterrors.Wrapf(err, "error parsing state for dtid %s", dtid) } @@ -368,7 +365,7 @@ func (tpc *TwoPC) ReadTransaction(ctx context.Context, dtid string) (*querypb.Tr } // A failure in time parsing will show up as a very old time, // which is harmless. - tm, _ := evalengine.ToInt64(qr.Rows[0][2]) + tm, _ := qr.Rows[0][2].ToCastInt64() result.TimeCreated = tm qr, err = tpc.read(ctx, conn, tpc.readParticipants, bindVars) @@ -405,7 +402,7 @@ func (tpc *TwoPC) ReadAbandoned(ctx context.Context, abandonTime time.Time) (map } txs := make(map[string]time.Time, len(qr.Rows)) for _, row := range qr.Rows { - t, err := evalengine.ToInt64(row[1]) + t, err := row[1].ToCastInt64() if err != nil { return nil, err } @@ -435,8 +432,8 @@ func (tpc *TwoPC) ReadAllTransactions(ctx context.Context) ([]*tx.DistributedTx, // Initialize the new element. // A failure in time parsing will show up as a very old time, // which is harmless. - tm, _ := evalengine.ToInt64(row[2]) - st, err := evalengine.ToInt64(row[1]) + tm, _ := row[2].ToCastInt64() + st, err := row[1].ToCastInt64() // Just log on error and continue. The state will show up as UNKNOWN // on the display. if err != nil { diff --git a/go/vt/vttablet/tabletserver/twopc_test.go b/go/vt/vttablet/tabletserver/twopc_test.go index fe2acd983c8..2ef5e05b7c7 100644 --- a/go/vt/vttablet/tabletserver/twopc_test.go +++ b/go/vt/vttablet/tabletserver/twopc_test.go @@ -17,6 +17,7 @@ limitations under the License. package tabletserver import ( + "context" "encoding/json" "reflect" "testing" @@ -24,20 +25,19 @@ import ( "vitess.io/vitess/go/vt/vttablet/tabletserver/tx" - "context" - "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" ) func TestReadAllRedo(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // Reuse code from tx_executor_test. - _, tsv, db := newTestTxExecutor(t) + _, tsv, db := newTestTxExecutor(t, ctx) defer db.Close() defer tsv.StopService() tpc := tsv.te.twoPC - ctx := context.Background() conn, err := tsv.qe.conns.Get(ctx, nil) if err != nil { @@ -237,11 +237,12 @@ func TestReadAllRedo(t *testing.T) { } func TestReadAllTransactions(t *testing.T) { - _, tsv, db := newTestTxExecutor(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, tsv, db := newTestTxExecutor(t, ctx) defer db.Close() defer tsv.StopService() tpc := tsv.te.twoPC - ctx := context.Background() conn, err := tsv.qe.conns.Get(ctx, nil) if err != nil { diff --git a/go/vt/vttablet/tabletserver/tx_engine_test.go b/go/vt/vttablet/tabletserver/tx_engine_test.go index 21ba81d84b6..6a86d044bcd 100644 --- a/go/vt/vttablet/tabletserver/tx_engine_test.go +++ b/go/vt/vttablet/tabletserver/tx_engine_test.go @@ -144,6 +144,8 @@ func TestTxEngineClose(t *testing.T) { } func TestTxEngineBegin(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() db := setUpQueryExecutorTest(t) defer db.Close() db.AddQueryPattern(".*", &sqltypes.Result{}) @@ -188,6 +190,8 @@ func TestTxEngineBegin(t *testing.T) { } func TestTxEngineRenewFails(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() db := setUpQueryExecutorTest(t) defer db.Close() db.AddQueryPattern(".*", &sqltypes.Result{}) @@ -556,6 +560,8 @@ func startTx(te *TxEngine, writeTransaction bool) error { } func TestTxEngineFailReserve(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() db := setUpQueryExecutorTest(t) defer db.Close() db.AddQueryPattern(".*", &sqltypes.Result{}) diff --git a/go/vt/vttablet/tabletserver/tx_executor_test.go b/go/vt/vttablet/tabletserver/tx_executor_test.go index edeb5ea5e1d..2651eb2a6cc 100644 --- a/go/vt/vttablet/tabletserver/tx_executor_test.go +++ b/go/vt/vttablet/tabletserver/tx_executor_test.go @@ -40,7 +40,9 @@ import ( ) func TestTxExecutorEmptyPrepare(t *testing.T) { - txe, tsv, db := newTestTxExecutor(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + txe, tsv, db := newTestTxExecutor(t, ctx) defer db.Close() defer tsv.StopService() txid := newTransaction(tsv, nil) @@ -51,10 +53,12 @@ func TestTxExecutorEmptyPrepare(t *testing.T) { } func TestTxExecutorPrepare(t *testing.T) { - txe, tsv, db := newTestTxExecutor(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + txe, tsv, db := newTestTxExecutor(t, ctx) defer db.Close() defer tsv.StopService() - txid := newTxForPrep(tsv) + txid := newTxForPrep(ctx, tsv) err := txe.Prepare(txid, "aa") require.NoError(t, err) err = txe.RollbackPrepared("aa", 1) @@ -68,7 +72,9 @@ func TestTxExecutorPrepare(t *testing.T) { } func TestTxExecutorPrepareNotInTx(t *testing.T) { - txe, tsv, db := newTestTxExecutor(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + txe, tsv, db := newTestTxExecutor(t, ctx) defer db.Close() defer tsv.StopService() err := txe.Prepare(0, "aa") @@ -76,11 +82,13 @@ func TestTxExecutorPrepareNotInTx(t *testing.T) { } func TestTxExecutorPreparePoolFail(t *testing.T) { - txe, tsv, db := newTestTxExecutor(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + txe, tsv, db := newTestTxExecutor(t, ctx) defer db.Close() defer tsv.StopService() - txid1 := newTxForPrep(tsv) - txid2 := newTxForPrep(tsv) + txid1 := newTxForPrep(ctx, tsv) + txid2 := newTxForPrep(ctx, tsv) err := txe.Prepare(txid1, "aa") require.NoError(t, err) defer txe.RollbackPrepared("aa", 0) @@ -90,10 +98,12 @@ func TestTxExecutorPreparePoolFail(t *testing.T) { } func TestTxExecutorPrepareRedoBeginFail(t *testing.T) { - txe, tsv, db := newTestTxExecutor(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + txe, tsv, db := newTestTxExecutor(t, ctx) defer db.Close() defer tsv.StopService() - txid := newTxForPrep(tsv) + txid := newTxForPrep(ctx, tsv) db.AddRejectedQuery("begin", errors.New("begin fail")) err := txe.Prepare(txid, "aa") defer txe.RollbackPrepared("aa", 0) @@ -102,10 +112,12 @@ func TestTxExecutorPrepareRedoBeginFail(t *testing.T) { } func TestTxExecutorPrepareRedoFail(t *testing.T) { - txe, tsv, db := newTestTxExecutor(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + txe, tsv, db := newTestTxExecutor(t, ctx) defer db.Close() defer tsv.StopService() - txid := newTxForPrep(tsv) + txid := newTxForPrep(ctx, tsv) err := txe.Prepare(txid, "bb") defer txe.RollbackPrepared("bb", 0) require.Error(t, err) @@ -113,10 +125,12 @@ func TestTxExecutorPrepareRedoFail(t *testing.T) { } func TestTxExecutorPrepareRedoCommitFail(t *testing.T) { - txe, tsv, db := newTestTxExecutor(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + txe, tsv, db := newTestTxExecutor(t, ctx) defer db.Close() defer tsv.StopService() - txid := newTxForPrep(tsv) + txid := newTxForPrep(ctx, tsv) db.AddRejectedQuery("commit", errors.New("commit fail")) err := txe.Prepare(txid, "aa") defer txe.RollbackPrepared("aa", 0) @@ -125,10 +139,12 @@ func TestTxExecutorPrepareRedoCommitFail(t *testing.T) { } func TestTxExecutorCommit(t *testing.T) { - txe, tsv, db := newTestTxExecutor(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + txe, tsv, db := newTestTxExecutor(t, ctx) defer db.Close() defer tsv.StopService() - txid := newTxForPrep(tsv) + txid := newTxForPrep(ctx, tsv) err := txe.Prepare(txid, "aa") require.NoError(t, err) err = txe.CommitPrepared("aa") @@ -139,10 +155,12 @@ func TestTxExecutorCommit(t *testing.T) { } func TestTxExecutorCommitRedoFail(t *testing.T) { - txe, tsv, db := newTestTxExecutor(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + txe, tsv, db := newTestTxExecutor(t, ctx) defer db.Close() defer tsv.StopService() - txid := newTxForPrep(tsv) + txid := newTxForPrep(ctx, tsv) // Allow all additions to redo logs to succeed db.AddQueryPattern("insert into _vt\\.redo_state.*", &sqltypes.Result{}) err := txe.Prepare(txid, "bb") @@ -159,10 +177,12 @@ func TestTxExecutorCommitRedoFail(t *testing.T) { } func TestTxExecutorCommitRedoCommitFail(t *testing.T) { - txe, tsv, db := newTestTxExecutor(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + txe, tsv, db := newTestTxExecutor(t, ctx) defer db.Close() defer tsv.StopService() - txid := newTxForPrep(tsv) + txid := newTxForPrep(ctx, tsv) err := txe.Prepare(txid, "aa") require.NoError(t, err) defer txe.RollbackPrepared("aa", 0) @@ -173,10 +193,12 @@ func TestTxExecutorCommitRedoCommitFail(t *testing.T) { } func TestTxExecutorRollbackBeginFail(t *testing.T) { - txe, tsv, db := newTestTxExecutor(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + txe, tsv, db := newTestTxExecutor(t, ctx) defer db.Close() defer tsv.StopService() - txid := newTxForPrep(tsv) + txid := newTxForPrep(ctx, tsv) err := txe.Prepare(txid, "aa") require.NoError(t, err) db.AddRejectedQuery("begin", errors.New("begin fail")) @@ -186,10 +208,12 @@ func TestTxExecutorRollbackBeginFail(t *testing.T) { } func TestTxExecutorRollbackRedoFail(t *testing.T) { - txe, tsv, db := newTestTxExecutor(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + txe, tsv, db := newTestTxExecutor(t, ctx) defer db.Close() defer tsv.StopService() - txid := newTxForPrep(tsv) + txid := newTxForPrep(ctx, tsv) // Allow all additions to redo logs to succeed db.AddQueryPattern("insert into _vt\\.redo_state.*", &sqltypes.Result{}) err := txe.Prepare(txid, "bb") @@ -200,7 +224,9 @@ func TestTxExecutorRollbackRedoFail(t *testing.T) { } func TestExecutorCreateTransaction(t *testing.T) { - txe, tsv, db := newTestTxExecutor(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + txe, tsv, db := newTestTxExecutor(t, ctx) defer db.Close() defer tsv.StopService() @@ -214,43 +240,49 @@ func TestExecutorCreateTransaction(t *testing.T) { } func TestExecutorStartCommit(t *testing.T) { - txe, tsv, db := newTestTxExecutor(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + txe, tsv, db := newTestTxExecutor(t, ctx) defer db.Close() defer tsv.StopService() commitTransition := fmt.Sprintf("update _vt.dt_state set state = %d where dtid = 'aa' and state = %d", int(querypb.TransactionState_COMMIT), int(querypb.TransactionState_PREPARE)) db.AddQuery(commitTransition, &sqltypes.Result{RowsAffected: 1}) - txid := newTxForPrep(tsv) + txid := newTxForPrep(ctx, tsv) err := txe.StartCommit(txid, "aa") require.NoError(t, err) db.AddQuery(commitTransition, &sqltypes.Result{}) - txid = newTxForPrep(tsv) + txid = newTxForPrep(ctx, tsv) err = txe.StartCommit(txid, "aa") require.Error(t, err) require.Contains(t, err.Error(), "could not transition to COMMIT: aa") } func TestExecutorSetRollback(t *testing.T) { - txe, tsv, db := newTestTxExecutor(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + txe, tsv, db := newTestTxExecutor(t, ctx) defer db.Close() defer tsv.StopService() rollbackTransition := fmt.Sprintf("update _vt.dt_state set state = %d where dtid = 'aa' and state = %d", int(querypb.TransactionState_ROLLBACK), int(querypb.TransactionState_PREPARE)) db.AddQuery(rollbackTransition, &sqltypes.Result{RowsAffected: 1}) - txid := newTxForPrep(tsv) + txid := newTxForPrep(ctx, tsv) err := txe.SetRollback("aa", txid) require.NoError(t, err) db.AddQuery(rollbackTransition, &sqltypes.Result{}) - txid = newTxForPrep(tsv) + txid = newTxForPrep(ctx, tsv) err = txe.SetRollback("aa", txid) require.Error(t, err) require.Contains(t, err.Error(), "could not transition to ROLLBACK: aa") } func TestExecutorConcludeTransaction(t *testing.T) { - txe, tsv, db := newTestTxExecutor(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + txe, tsv, db := newTestTxExecutor(t, ctx) defer db.Close() defer tsv.StopService() @@ -261,7 +293,9 @@ func TestExecutorConcludeTransaction(t *testing.T) { } func TestExecutorReadTransaction(t *testing.T) { - txe, tsv, db := newTestTxExecutor(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + txe, tsv, db := newTestTxExecutor(t, ctx) defer db.Close() defer tsv.StopService() @@ -361,7 +395,9 @@ func TestExecutorReadTransaction(t *testing.T) { } func TestExecutorReadAllTransactions(t *testing.T) { - txe, tsv, db := newTestTxExecutor(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + txe, tsv, db := newTestTxExecutor(t, ctx) defer db.Close() defer tsv.StopService() @@ -410,6 +446,8 @@ func (conn *FakeVTGateConn) ResolveTransaction(ctx context.Context, dtid string) } func TestExecutorResolveTransaction(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() protocol := "resolveTest" oldValue := vtgateconn.GetVTGateProtocol() vtgateconn.SetVTGateProtocol(protocol) @@ -421,7 +459,7 @@ func TestExecutorResolveTransaction(t *testing.T) { FakeVTGateConn: fakerpcvtgateconn.FakeVTGateConn{}, }, nil }) - _, tsv, db := newShortAgeExecutor(t) + _, tsv, db := newShortAgeExecutor(t, ctx) defer db.Close() defer tsv.StopService() want := "aa" @@ -444,7 +482,9 @@ func TestExecutorResolveTransaction(t *testing.T) { } func TestNoTwopc(t *testing.T) { - txe, tsv, db := newNoTwopcExecutor(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + txe, tsv, db := newNoTwopcExecutor(t, ctx) defer db.Close() defer tsv.StopService() @@ -493,7 +533,7 @@ func TestNoTwopc(t *testing.T) { } } -func newTestTxExecutor(t *testing.T) (txe *TxExecutor, tsv *TabletServer, db *fakesqldb.DB) { +func newTestTxExecutor(t *testing.T, ctx context.Context) (txe *TxExecutor, tsv *TabletServer, db *fakesqldb.DB) { db = setUpQueryExecutorTest(t) logStats := tabletenv.NewLogStats(ctx, "TestTxExecutor") tsv = newTestTabletServer(ctx, smallTxPool, db) @@ -510,7 +550,7 @@ func newTestTxExecutor(t *testing.T) (txe *TxExecutor, tsv *TabletServer, db *fa } // newShortAgeExecutor is same as newTestTxExecutor, but shorter transaction abandon age. -func newShortAgeExecutor(t *testing.T) (txe *TxExecutor, tsv *TabletServer, db *fakesqldb.DB) { +func newShortAgeExecutor(t *testing.T, ctx context.Context) (txe *TxExecutor, tsv *TabletServer, db *fakesqldb.DB) { db = setUpQueryExecutorTest(t) logStats := tabletenv.NewLogStats(ctx, "TestTxExecutor") tsv = newTestTabletServer(ctx, smallTxPool|shortTwopcAge, db) @@ -527,7 +567,7 @@ func newShortAgeExecutor(t *testing.T) (txe *TxExecutor, tsv *TabletServer, db * } // newNoTwopcExecutor is same as newTestTxExecutor, but 2pc disabled. -func newNoTwopcExecutor(t *testing.T) (txe *TxExecutor, tsv *TabletServer, db *fakesqldb.DB) { +func newNoTwopcExecutor(t *testing.T, ctx context.Context) (txe *TxExecutor, tsv *TabletServer, db *fakesqldb.DB) { db = setUpQueryExecutorTest(t) logStats := tabletenv.NewLogStats(ctx, "TestTxExecutor") tsv = newTestTabletServer(ctx, noTwopc, db) @@ -539,7 +579,7 @@ func newNoTwopcExecutor(t *testing.T) (txe *TxExecutor, tsv *TabletServer, db *f } // newTxForPrep creates a non-empty transaction. -func newTxForPrep(tsv *TabletServer) int64 { +func newTxForPrep(ctx context.Context, tsv *TabletServer) int64 { txid := newTransaction(tsv, nil) target := querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} _, err := tsv.Execute(ctx, &target, "update test_table set name = 2 where pk = 1", nil, txid, 0, nil) diff --git a/go/vt/vttablet/tabletserver/tx_pool_test.go b/go/vt/vttablet/tabletserver/tx_pool_test.go index 46b0552a0a3..3515310c481 100644 --- a/go/vt/vttablet/tabletserver/tx_pool_test.go +++ b/go/vt/vttablet/tabletserver/tx_pool_test.go @@ -39,6 +39,8 @@ import ( ) func TestTxPoolExecuteCommit(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() db, txPool, _, closer := setup(t) defer closer() @@ -73,6 +75,9 @@ func TestTxPoolExecuteCommit(t *testing.T) { } func TestTxPoolExecuteRollback(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, txPool, _, closer := setup(t) defer closer() @@ -91,6 +96,9 @@ func TestTxPoolExecuteRollback(t *testing.T) { } func TestTxPoolExecuteRollbackOnClosedConn(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, txPool, _, closer := setup(t) defer closer() @@ -108,6 +116,9 @@ func TestTxPoolExecuteRollbackOnClosedConn(t *testing.T) { } func TestTxPoolRollbackNonBusy(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, txPool, _, closer := setup(t) defer closer() @@ -135,6 +146,9 @@ func TestTxPoolRollbackNonBusy(t *testing.T) { } func TestTxPoolTransactionIsolation(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, txPool, _, closer := setup(t) defer closer() @@ -146,6 +160,9 @@ func TestTxPoolTransactionIsolation(t *testing.T) { } func TestTxPoolAutocommit(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, txPool, _, closer := setup(t) defer closer() @@ -173,7 +190,10 @@ func TestTxPoolAutocommit(t *testing.T) { // db connection. DBConn.Exec() is going to reconnect and retry automatically // due to this connection error and the BEGIN will succeed. func TestTxPoolBeginWithPoolConnectionError_Errno2006_Transient(t *testing.T) { - db, txPool := primeTxPoolWithConnection(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + db, txPool := primeTxPoolWithConnection(t, ctx) defer db.Close() defer txPool.Close() @@ -189,7 +209,7 @@ func TestTxPoolBeginWithPoolConnectionError_Errno2006_Transient(t *testing.T) { // primeTxPoolWithConnection is a helper function. It reconstructs the // scenario where future transactions are going to reuse an open db connection. -func primeTxPoolWithConnection(t *testing.T) (*fakesqldb.DB, *TxPool) { +func primeTxPoolWithConnection(t *testing.T, ctx context.Context) (*fakesqldb.DB, *TxPool) { t.Helper() db := fakesqldb.New(t) txPool, _ := newTxPool() @@ -209,6 +229,9 @@ func primeTxPoolWithConnection(t *testing.T) (*fakesqldb.DB, *TxPool) { } func TestTxPoolBeginWithError(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, txPool, limiter, closer := setup(t) defer closer() db.AddRejectedQuery("begin", errRejected) @@ -244,6 +267,9 @@ func TestTxPoolBeginWithError(t *testing.T) { } func TestTxPoolBeginWithPreQueryError(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, txPool, _, closer := setup(t) defer closer() db.AddRejectedQuery("pre_query", errRejected) @@ -257,7 +283,7 @@ func TestTxPoolCancelledContextError(t *testing.T) { // given db, txPool, _, closer := setup(t) defer closer() - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(context.Background()) cancel() // when @@ -271,6 +297,9 @@ func TestTxPoolCancelledContextError(t *testing.T) { } func TestTxPoolWaitTimeoutError(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newEnv("TabletServerTest") env.Config().TxPool.Size = 1 env.Config().TxPool.MaxWaiters = 0 @@ -297,6 +326,9 @@ func TestTxPoolWaitTimeoutError(t *testing.T) { } func TestTxPoolRollbackFailIsPassedThrough(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sql := "alter table test_table add test_column int" db, txPool, _, closer := setup(t) defer closer() @@ -317,6 +349,9 @@ func TestTxPoolRollbackFailIsPassedThrough(t *testing.T) { } func TestTxPoolGetConnRecentlyRemovedTransaction(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, txPool, _, _ := setup(t) defer db.Close() conn1, _, _, _ := txPool.Begin(ctx, &querypb.ExecuteOptions{}, false, 0, nil, nil) @@ -384,6 +419,9 @@ func TestTxPoolCloseKillsStrayTransactions(t *testing.T) { } func TestTxTimeoutKillsTransactions(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newEnv("TabletServerTest") env.Config().TxPool.Size = 1 env.Config().TxPool.MaxWaiters = 0 @@ -430,6 +468,9 @@ func TestTxTimeoutKillsTransactions(t *testing.T) { } func TestTxTimeoutDoesNotKillShortLivedTransactions(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newEnv("TabletServerTest") env.Config().TxPool.Size = 1 env.Config().TxPool.MaxWaiters = 0 @@ -460,6 +501,9 @@ func TestTxTimeoutDoesNotKillShortLivedTransactions(t *testing.T) { } func TestTxTimeoutKillsOlapTransactions(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newEnv("TabletServerTest") env.Config().TxPool.Size = 1 env.Config().TxPool.MaxWaiters = 0 @@ -495,6 +539,9 @@ func TestTxTimeoutKillsOlapTransactions(t *testing.T) { } func TestTxTimeoutNotEnforcedForZeroLengthTimeouts(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newEnv("TabletServerTest") env.Config().TxPool.Size = 2 env.Config().TxPool.MaxWaiters = 0 @@ -535,6 +582,9 @@ func TestTxTimeoutNotEnforcedForZeroLengthTimeouts(t *testing.T) { } func TestTxTimeoutReservedConn(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newEnv("TabletServerTest") env.Config().TxPool.Size = 1 env.Config().TxPool.MaxWaiters = 0 @@ -575,6 +625,9 @@ func TestTxTimeoutReservedConn(t *testing.T) { } func TestTxTimeoutReusedReservedConn(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newEnv("TabletServerTest") env.Config().TxPool.Size = 1 env.Config().TxPool.MaxWaiters = 0 @@ -728,6 +781,9 @@ func TestTxPoolBeginStatements(t *testing.T) { for _, tc := range testCases { t.Run(fmt.Sprintf("%v:%v:readOnly:%v", tc.txIsolationLevel, tc.txAccessModes, tc.readOnly), func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + options := &querypb.ExecuteOptions{ TransactionIsolation: tc.txIsolationLevel, TransactionAccessMode: tc.txAccessModes, diff --git a/go/vt/vttablet/tabletserver/txthrottler/tx_throttler.go b/go/vt/vttablet/tabletserver/txthrottler/tx_throttler.go index bc5235593ac..e14ed5c6adf 100644 --- a/go/vt/vttablet/tabletserver/txthrottler/tx_throttler.go +++ b/go/vt/vttablet/tabletserver/txthrottler/tx_throttler.go @@ -18,26 +18,23 @@ package txthrottler import ( "context" - "fmt" "math/rand" + "reflect" "strings" "sync" "time" - "google.golang.org/protobuf/encoding/prototext" - "google.golang.org/protobuf/proto" - "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/throttler" "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" querypb "vitess.io/vitess/go/vt/proto/query" throttlerdatapb "vitess.io/vitess/go/vt/proto/throttlerdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) // These vars store the functions used to create the topo server, healthcheck, @@ -65,16 +62,16 @@ func resetTxThrottlerFactories() { } } +func init() { + resetTxThrottlerFactories() +} + // TxThrottler defines the interface for the transaction throttler. type TxThrottler interface { InitDBConfig(target *querypb.Target) Open() (err error) Close() - Throttle(priority int) (result bool) -} - -func init() { - resetTxThrottlerFactories() + Throttle(priority int, workload string) (result bool) } // ThrottlerInterface defines the public interface that is implemented by go/vt/throttler.Throttler @@ -103,6 +100,17 @@ type TopologyWatcherInterface interface { // go/vt/throttler.GlobalManager. const TxThrottlerName = "TransactionThrottler" +// fetchKnownCells gathers a list of known cells from the topology. On error, +// the cell of the local tablet will be used and an error is logged. +func fetchKnownCells(ctx context.Context, topoServer *topo.Server, target *querypb.Target) []string { + cells, err := topoServer.GetKnownCells(ctx) + if err != nil { + log.Errorf("txThrottler: falling back to local cell due to error fetching cells from topology: %+v", err) + cells = []string{target.Cell} + } + return cells +} + // txThrottler implements TxThrottle for throttling transactions based on replication lag. // It's a thin wrapper around the throttler found in vitess/go/vt/throttler. // It uses a discovery.HealthCheck to send replication-lag updates to the wrapped throttler. @@ -131,124 +139,85 @@ const TxThrottlerName = "TransactionThrottler" // be executing a method. The only exception is the 'Throttle' method where multiple goroutines are // allowed to execute it concurrently. type txThrottler struct { - // config stores the transaction throttler's configuration. - // It is populated in NewTxThrottler and is not modified - // since. - config *txThrottlerConfig + config *tabletenv.TabletConfig // state holds an open transaction throttler state. It is nil // if the TransactionThrottler is closed. - state *txThrottlerState + state txThrottlerState target *querypb.Target topoServer *topo.Server // stats - throttlerRunning *stats.Gauge - requestsTotal *stats.Counter - requestsThrottled *stats.Counter + throttlerRunning *stats.Gauge + healthChecksReadTotal *stats.CountersWithMultiLabels + healthChecksRecordedTotal *stats.CountersWithMultiLabels + requestsTotal *stats.CountersWithSingleLabel + requestsThrottled *stats.CountersWithSingleLabel } -// txThrottlerConfig holds the parameters that need to be -// passed when constructing a TxThrottler object. -type txThrottlerConfig struct { - // enabled is true if the transaction throttler is enabled. All methods - // of a disabled transaction throttler do nothing and Throttle() always - // returns false. - enabled bool - - throttlerConfig *throttlerdatapb.Configuration - // healthCheckCells stores the cell names in which running vttablets will be monitored for - // replication lag. - healthCheckCells []string - - // tabletTypes stores the tablet types for throttling - tabletTypes *topoproto.TabletTypeListFlag +type txThrottlerState interface { + deallocateResources() + StatsUpdate(tabletStats *discovery.TabletHealth) + throttle() bool } -// txThrottlerState holds the state of an open TxThrottler object. -type txThrottlerState struct { - config *txThrottlerConfig +// txThrottlerStateImpl holds the state of an open TxThrottler object. +type txThrottlerStateImpl struct { + config *tabletenv.TabletConfig + txThrottler *txThrottler // throttleMu serializes calls to throttler.Throttler.Throttle(threadId). // That method is required to be called in serial for each threadId. - throttleMu sync.Mutex - throttler ThrottlerInterface - stopHealthCheck context.CancelFunc + throttleMu sync.Mutex + throttler ThrottlerInterface + stopHealthCheck context.CancelFunc + topologyWatchers map[string]TopologyWatcherInterface healthCheck discovery.HealthCheck - topologyWatchers []TopologyWatcherInterface -} - -// NewTxThrottler tries to construct a txThrottler from the -// relevant fields in the tabletenv.Config object. It returns a disabled TxThrottler if -// any error occurs. -// This function calls tryCreateTxThrottler that does the actual creation work -// and returns an error if one occurred. -func NewTxThrottler(env tabletenv.Env, topoServer *topo.Server) TxThrottler { - txThrottler, err := tryCreateTxThrottler(env, topoServer) - if err != nil { - log.Errorf("Error creating transaction throttler. Transaction throttling will"+ - " be disabled. Error: %v", err) - // newTxThrottler with disabled config never returns an error - txThrottler, _ = newTxThrottler(env, topoServer, &txThrottlerConfig{enabled: false}) - } else { - log.Infof("Initialized transaction throttler with config: %+v", txThrottler.config) - } - return txThrottler -} + healthCheckChan chan *discovery.TabletHealth + healthCheckCells []string + cellsFromTopo bool -// InitDBConfig initializes the target parameters for the throttler. -func (t *txThrottler) InitDBConfig(target *querypb.Target) { - t.target = proto.Clone(target).(*querypb.Target) + // tabletTypes stores the tablet types for throttling + tabletTypes map[topodatapb.TabletType]bool } -func tryCreateTxThrottler(env tabletenv.Env, topoServer *topo.Server) (*txThrottler, error) { - if !env.Config().EnableTxThrottler { - return newTxThrottler(env, topoServer, &txThrottlerConfig{enabled: false}) +// NewTxThrottler tries to construct a txThrottler from the relevant +// fields in the tabletenv.Env and topo.Server objects. +func NewTxThrottler(env tabletenv.Env, topoServer *topo.Server) TxThrottler { + config := env.Config() + if config.EnableTxThrottler { + if len(config.TxThrottlerHealthCheckCells) == 0 { + defer log.Infof("Initialized transaction throttler using tabletTypes: %+v, cellsFromTopo: true, topoRefreshInterval: %s, throttlerConfig: %q", + config.TxThrottlerTabletTypes, config.TxThrottlerTopoRefreshInterval, config.TxThrottlerConfig.Get(), + ) + } else { + defer log.Infof("Initialized transaction throttler using tabletTypes: %+v, healthCheckCells: %+v, throttlerConfig: %q", + config.TxThrottlerTabletTypes, config.TxThrottlerHealthCheckCells, config.TxThrottlerConfig.Get(), + ) + } } - var throttlerConfig throttlerdatapb.Configuration - if err := prototext.Unmarshal([]byte(env.Config().TxThrottlerConfig), &throttlerConfig); err != nil { - return nil, err + return &txThrottler{ + config: config, + topoServer: topoServer, + throttlerRunning: env.Exporter().NewGauge(TxThrottlerName+"Running", "transaction throttler running state"), + healthChecksReadTotal: env.Exporter().NewCountersWithMultiLabels(TxThrottlerName+"HealthchecksRead", "transaction throttler healthchecks read", + []string{"cell", "DbType"}), + healthChecksRecordedTotal: env.Exporter().NewCountersWithMultiLabels(TxThrottlerName+"HealthchecksRecorded", "transaction throttler healthchecks recorded", + []string{"cell", "DbType"}), + requestsTotal: env.Exporter().NewCountersWithSingleLabel(TxThrottlerName+"Requests", "transaction throttler requests", "workload"), + requestsThrottled: env.Exporter().NewCountersWithSingleLabel(TxThrottlerName+"Throttled", "transaction throttler requests throttled", "workload"), } - - // Clone tsv.TxThrottlerHealthCheckCells so that we don't assume tsv.TxThrottlerHealthCheckCells - // is immutable. - healthCheckCells := make([]string, len(env.Config().TxThrottlerHealthCheckCells)) - copy(healthCheckCells, env.Config().TxThrottlerHealthCheckCells) - - return newTxThrottler(env, topoServer, &txThrottlerConfig{ - enabled: true, - tabletTypes: env.Config().TxThrottlerTabletTypes, - throttlerConfig: &throttlerConfig, - healthCheckCells: healthCheckCells, - }) } -func newTxThrottler(env tabletenv.Env, topoServer *topo.Server, config *txThrottlerConfig) (*txThrottler, error) { - if config.enabled { - // Verify config. - err := throttler.MaxReplicationLagModuleConfig{Configuration: config.throttlerConfig}.Verify() - if err != nil { - return nil, err - } - if len(config.healthCheckCells) == 0 { - return nil, fmt.Errorf("empty healthCheckCells given. %+v", config) - } - } - return &txThrottler{ - config: config, - topoServer: topoServer, - throttlerRunning: env.Exporter().NewGauge("TransactionThrottlerRunning", "transaction throttler running state"), - requestsTotal: env.Exporter().NewCounter("TransactionThrottlerRequests", "transaction throttler requests"), - requestsThrottled: env.Exporter().NewCounter("TransactionThrottlerThrottled", "transaction throttler requests throttled"), - }, nil -} +// InitDBConfig initializes the target parameters for the throttler. +func (t *txThrottler) InitDBConfig(target *querypb.Target) { t.target = target.CloneVT() } // Open opens the transaction throttler. It must be called prior to 'Throttle'. func (t *txThrottler) Open() (err error) { - if !t.config.enabled { + if !t.config.EnableTxThrottler { return nil } if t.state != nil { @@ -256,7 +225,7 @@ func (t *txThrottler) Open() (err error) { } log.Info("txThrottler: opening") t.throttlerRunning.Set(1) - t.state, err = newTxThrottlerState(t.topoServer, t.config, t.target) + t.state, err = newTxThrottlerState(t, t.config, t.target) return err } @@ -264,7 +233,7 @@ func (t *txThrottler) Open() (err error) { // It should be called after the throttler is no longer needed. // It's ok to call this method on a closed throttler--in which case the method does nothing. func (t *txThrottler) Close() { - if !t.config.enabled { + if !t.config.EnableTxThrottler { return } if t.state == nil { @@ -280,8 +249,8 @@ func (t *txThrottler) Close() { // It returns true if the transaction should not proceed (the caller // should back off). Throttle requires that Open() was previously called // successfully. -func (t *txThrottler) Throttle(priority int) (result bool) { - if !t.config.enabled { +func (t *txThrottler) Throttle(priority int, workload string) (result bool) { + if !t.config.EnableTxThrottler { return false } if t.state == nil { @@ -292,16 +261,16 @@ func (t *txThrottler) Throttle(priority int) (result bool) { // are less likely to be throttled. result = t.state.throttle() && rand.Intn(sqlparser.MaxPriorityValue) < priority - t.requestsTotal.Add(1) + t.requestsTotal.Add(workload, 1) if result { - t.requestsThrottled.Add(1) + t.requestsThrottled.Add(workload, 1) } - return result + return result && !t.config.TxThrottlerDryRun } -func newTxThrottlerState(topoServer *topo.Server, config *txThrottlerConfig, target *querypb.Target) (*txThrottlerState, error) { - maxReplicationLagModuleConfig := throttler.MaxReplicationLagModuleConfig{Configuration: config.throttlerConfig} +func newTxThrottlerState(txThrottler *txThrottler, config *tabletenv.TabletConfig, target *querypb.Target) (txThrottlerState, error) { + maxReplicationLagModuleConfig := throttler.MaxReplicationLagModuleConfig{Configuration: config.TxThrottlerConfig.Get()} t, err := throttlerFactory( TxThrottlerName, @@ -313,53 +282,106 @@ func newTxThrottlerState(topoServer *topo.Server, config *txThrottlerConfig, tar if err != nil { return nil, err } - if err := t.UpdateConfiguration(config.throttlerConfig, true /* copyZeroValues */); err != nil { + if err := t.UpdateConfiguration(config.TxThrottlerConfig.Get(), true /* copyZeroValues */); err != nil { t.Close() return nil, err } - result := &txThrottlerState{ - config: config, - throttler: t, + + tabletTypes := make(map[topodatapb.TabletType]bool, len(*config.TxThrottlerTabletTypes)) + for _, tabletType := range *config.TxThrottlerTabletTypes { + tabletTypes[tabletType] = true } - createTxThrottlerHealthCheck(topoServer, config, result, target.Cell) - - result.topologyWatchers = make( - []TopologyWatcherInterface, 0, len(config.healthCheckCells)) - for _, cell := range config.healthCheckCells { - result.topologyWatchers = append( - result.topologyWatchers, - topologyWatcherFactory( - topoServer, - result.healthCheck, - cell, - target.Keyspace, - target.Shard, - discovery.DefaultTopologyWatcherRefreshInterval, - discovery.DefaultTopoReadConcurrency)) + + state := &txThrottlerStateImpl{ + config: config, + healthCheckCells: config.TxThrottlerHealthCheckCells, + tabletTypes: tabletTypes, + throttler: t, + txThrottler: txThrottler, + } + + // get cells from topo if none defined in tabletenv config + if len(state.healthCheckCells) == 0 { + ctx, cancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout) + defer cancel() + state.healthCheckCells = fetchKnownCells(ctx, txThrottler.topoServer, target) + state.cellsFromTopo = true } - return result, nil -} -func createTxThrottlerHealthCheck(topoServer *topo.Server, config *txThrottlerConfig, result *txThrottlerState, cell string) { ctx, cancel := context.WithCancel(context.Background()) - result.stopHealthCheck = cancel - result.healthCheck = healthCheckFactory(topoServer, cell, config.healthCheckCells) - ch := result.healthCheck.Subscribe() - go func(ctx context.Context) { - for { - select { - case <-ctx.Done(): - return - case th := <-ch: - result.StatsUpdate(th) - } + state.stopHealthCheck = cancel + state.initHealthCheckStream(txThrottler.topoServer, target) + go state.healthChecksProcessor(ctx, txThrottler.topoServer, target) + + return state, nil +} + +func (ts *txThrottlerStateImpl) initHealthCheckStream(topoServer *topo.Server, target *querypb.Target) { + ts.healthCheck = healthCheckFactory(topoServer, target.Cell, ts.healthCheckCells) + ts.healthCheckChan = ts.healthCheck.Subscribe() + + ts.topologyWatchers = make( + map[string]TopologyWatcherInterface, len(ts.healthCheckCells)) + for _, cell := range ts.healthCheckCells { + ts.topologyWatchers[cell] = topologyWatcherFactory( + topoServer, + ts.healthCheck, + cell, + target.Keyspace, + target.Shard, + discovery.DefaultTopologyWatcherRefreshInterval, + discovery.DefaultTopoReadConcurrency, + ) + } +} + +func (ts *txThrottlerStateImpl) closeHealthCheckStream() { + if ts.healthCheck == nil { + return + } + for _, watcher := range ts.topologyWatchers { + watcher.Stop() + } + ts.topologyWatchers = nil + ts.stopHealthCheck() + ts.healthCheck.Close() +} + +func (ts *txThrottlerStateImpl) updateHealthCheckCells(ctx context.Context, topoServer *topo.Server, target *querypb.Target) { + fetchCtx, cancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) + defer cancel() + + knownCells := fetchKnownCells(fetchCtx, topoServer, target) + if !reflect.DeepEqual(knownCells, ts.healthCheckCells) { + log.Info("txThrottler: restarting healthcheck stream due to topology cells update") + ts.healthCheckCells = knownCells + ts.closeHealthCheckStream() + ts.initHealthCheckStream(topoServer, target) + } +} + +func (ts *txThrottlerStateImpl) healthChecksProcessor(ctx context.Context, topoServer *topo.Server, target *querypb.Target) { + var cellsUpdateTicks <-chan time.Time + if ts.cellsFromTopo { + ticker := time.NewTicker(ts.config.TxThrottlerTopoRefreshInterval) + cellsUpdateTicks = ticker.C + defer ticker.Stop() + } + for { + select { + case <-ctx.Done(): + return + case <-cellsUpdateTicks: + ts.updateHealthCheckCells(ctx, topoServer, target) + case th := <-ts.healthCheckChan: + ts.StatsUpdate(th) } - }(ctx) + } } -func (ts *txThrottlerState) throttle() bool { +func (ts *txThrottlerStateImpl) throttle() bool { if ts.throttler == nil { - log.Error("throttle called after deallocateResources was called") + log.Error("txThrottler: throttle called after deallocateResources was called") return false } // Serialize calls to ts.throttle.Throttle() @@ -368,36 +390,31 @@ func (ts *txThrottlerState) throttle() bool { return ts.throttler.Throttle(0 /* threadId */) > 0 } -func (ts *txThrottlerState) deallocateResources() { - // We don't really need to nil out the fields here - // as deallocateResources is not expected to be called - // more than once, but it doesn't hurt to do so. - for _, watcher := range ts.topologyWatchers { - watcher.Stop() - } - ts.topologyWatchers = nil - - ts.healthCheck.Close() +func (ts *txThrottlerStateImpl) deallocateResources() { + // Close healthcheck and topo watchers + ts.closeHealthCheckStream() ts.healthCheck = nil - // After ts.healthCheck is closed txThrottlerState.StatsUpdate() is guaranteed not + // After ts.healthCheck is closed txThrottlerStateImpl.StatsUpdate() is guaranteed not // to be executing, so we can safely close the throttler. ts.throttler.Close() ts.throttler = nil } // StatsUpdate updates the health of a tablet with the given healthcheck. -func (ts *txThrottlerState) StatsUpdate(tabletStats *discovery.TabletHealth) { - if ts.config.tabletTypes == nil { +func (ts *txThrottlerStateImpl) StatsUpdate(tabletStats *discovery.TabletHealth) { + if len(ts.tabletTypes) == 0 { return } + tabletType := tabletStats.Target.TabletType + metricLabels := []string{tabletStats.Target.Cell, tabletType.String()} + ts.txThrottler.healthChecksReadTotal.Add(metricLabels, 1) + // Monitor tablets for replication lag if they have a tablet - // type specified by the --tx_throttler_tablet_types flag. - for _, expectedTabletType := range *ts.config.tabletTypes { - if tabletStats.Target.TabletType == expectedTabletType { - ts.throttler.RecordReplicationLag(time.Now(), tabletStats) - return - } + // type specified by the --tx-throttler-tablet-types flag. + if ts.tabletTypes[tabletType] { + ts.throttler.RecordReplicationLag(time.Now(), tabletStats) + ts.txThrottler.healthChecksRecordedTotal.Add(metricLabels, 1) } } diff --git a/go/vt/vttablet/tabletserver/txthrottler/tx_throttler_test.go b/go/vt/vttablet/tabletserver/txthrottler/tx_throttler_test.go index 4e95ebe7097..61c408cc1f9 100644 --- a/go/vt/vttablet/tabletserver/txthrottler/tx_throttler_test.go +++ b/go/vt/vttablet/tabletserver/txthrottler/tx_throttler_test.go @@ -22,6 +22,7 @@ package txthrottler //go:generate mockgen -destination mock_topology_watcher_test.go -package txthrottler vitess.io/vitess/go/vt/vttablet/tabletserver/txthrottler TopologyWatcherInterface import ( + "context" "testing" "time" @@ -36,7 +37,6 @@ import ( "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" querypb "vitess.io/vitess/go/vt/proto/query" - throttlerdatapb "vitess.io/vitess/go/vt/proto/throttlerdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) @@ -50,18 +50,20 @@ func TestDisabledThrottler(t *testing.T) { Shard: "shard", }) assert.Nil(t, throttler.Open()) - assert.False(t, throttler.Throttle(0)) + assert.False(t, throttler.Throttle(0, "some_workload")) throttlerImpl, _ := throttler.(*txThrottler) assert.Zero(t, throttlerImpl.throttlerRunning.Get()) throttler.Close() } func TestEnabledThrottler(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() defer resetTxThrottlerFactories() - ts := memorytopo.NewServer("cell1", "cell2") + ts := memorytopo.NewServer(ctx, "cell1", "cell2") mockHealthCheck := NewMockHealthCheck(mockCtrl) hcCall1 := mockHealthCheck.EXPECT().Subscribe() @@ -93,6 +95,7 @@ func TestEnabledThrottler(t *testing.T) { call1.Return(0 * time.Second) tabletStats := &discovery.TabletHealth{ Target: &querypb.Target{ + Cell: "cell1", TabletType: topodatapb.TabletType_REPLICA, }, } @@ -112,71 +115,116 @@ func TestEnabledThrottler(t *testing.T) { config := tabletenv.NewDefaultConfig() config.EnableTxThrottler = true - config.TxThrottlerHealthCheckCells = []string{"cell1", "cell2"} config.TxThrottlerTabletTypes = &topoproto.TabletTypeListFlag{topodatapb.TabletType_REPLICA} env := tabletenv.NewEnv(config, t.Name()) - throttler, err := tryCreateTxThrottler(env, ts) - assert.Nil(t, err) + throttler := NewTxThrottler(env, ts) + throttlerImpl, _ := throttler.(*txThrottler) + assert.NotNil(t, throttlerImpl) throttler.InitDBConfig(&querypb.Target{ + Cell: "cell1", Keyspace: "keyspace", Shard: "shard", }) - assert.Nil(t, throttler.Open()) - assert.Equal(t, int64(1), throttler.throttlerRunning.Get()) - assert.False(t, throttler.Throttle(100)) - assert.Equal(t, int64(1), throttler.requestsTotal.Get()) - assert.Zero(t, throttler.requestsThrottled.Get()) + assert.Nil(t, throttlerImpl.Open()) + throttlerStateImpl := throttlerImpl.state.(*txThrottlerStateImpl) + assert.Equal(t, map[topodatapb.TabletType]bool{topodatapb.TabletType_REPLICA: true}, throttlerStateImpl.tabletTypes) + assert.Equal(t, int64(1), throttlerImpl.throttlerRunning.Get()) - throttler.state.StatsUpdate(tabletStats) // This calls replication lag thing + assert.False(t, throttlerImpl.Throttle(100, "some_workload")) + assert.Equal(t, int64(1), throttlerImpl.requestsTotal.Counts()["some_workload"]) + assert.Zero(t, throttlerImpl.requestsThrottled.Counts()["some_workload"]) + + throttlerImpl.state.StatsUpdate(tabletStats) // This calls replication lag thing + assert.Equal(t, map[string]int64{"cell1.REPLICA": 1}, throttlerImpl.healthChecksReadTotal.Counts()) + assert.Equal(t, map[string]int64{"cell1.REPLICA": 1}, throttlerImpl.healthChecksRecordedTotal.Counts()) rdonlyTabletStats := &discovery.TabletHealth{ Target: &querypb.Target{ + Cell: "cell2", TabletType: topodatapb.TabletType_RDONLY, }, } - // This call should not be forwarded to the go/vt/throttler.Throttler object. - throttler.state.StatsUpdate(rdonlyTabletStats) + // This call should not be forwarded to the go/vt/throttlerImpl.Throttler object. + throttlerImpl.state.StatsUpdate(rdonlyTabletStats) + assert.Equal(t, map[string]int64{"cell1.REPLICA": 1, "cell2.RDONLY": 1}, throttlerImpl.healthChecksReadTotal.Counts()) + assert.Equal(t, map[string]int64{"cell1.REPLICA": 1}, throttlerImpl.healthChecksRecordedTotal.Counts()) + // The second throttle call should reject. - assert.True(t, throttler.Throttle(100)) - assert.Equal(t, int64(2), throttler.requestsTotal.Get()) - assert.Equal(t, int64(1), throttler.requestsThrottled.Get()) + assert.True(t, throttlerImpl.Throttle(100, "some_workload")) + assert.Equal(t, int64(2), throttlerImpl.requestsTotal.Counts()["some_workload"]) + assert.Equal(t, int64(1), throttlerImpl.requestsThrottled.Counts()["some_workload"]) // This call should not throttle due to priority. Check that's the case and counters agree. - assert.False(t, throttler.Throttle(0)) - assert.Equal(t, int64(3), throttler.requestsTotal.Get()) - assert.Equal(t, int64(1), throttler.requestsThrottled.Get()) - throttler.Close() - assert.Zero(t, throttler.throttlerRunning.Get()) + assert.False(t, throttlerImpl.Throttle(0, "some_workload")) + assert.Equal(t, int64(3), throttlerImpl.requestsTotal.Counts()["some_workload"]) + assert.Equal(t, int64(1), throttlerImpl.requestsThrottled.Counts()["some_workload"]) + throttlerImpl.Close() + assert.Zero(t, throttlerImpl.throttlerRunning.Get()) } -func TestNewTxThrottler(t *testing.T) { - config := tabletenv.NewDefaultConfig() - env := tabletenv.NewEnv(config, t.Name()) - +func TestFetchKnownCells(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() { - // disabled config - throttler, err := newTxThrottler(env, nil, &txThrottlerConfig{enabled: false}) - assert.Nil(t, err) - assert.NotNil(t, throttler) + ts := memorytopo.NewServer(ctx, "cell1", "cell2") + cells := fetchKnownCells(context.Background(), ts, &querypb.Target{Cell: "cell1"}) + assert.Equal(t, []string{"cell1", "cell2"}, cells) } { - // enabled with invalid throttler config - throttler, err := newTxThrottler(env, nil, &txThrottlerConfig{ - enabled: true, - throttlerConfig: &throttlerdatapb.Configuration{}, - }) - assert.NotNil(t, err) - assert.Nil(t, throttler) + ts := memorytopo.NewServer(ctx) + cells := fetchKnownCells(context.Background(), ts, &querypb.Target{Cell: "cell1"}) + assert.Equal(t, []string{"cell1"}, cells) } - { - // enabled - throttler, err := newTxThrottler(env, nil, &txThrottlerConfig{ - enabled: true, - healthCheckCells: []string{"cell1"}, - throttlerConfig: throttler.DefaultMaxReplicationLagModuleConfig().Configuration, +} + +func TestDryRunThrottler(t *testing.T) { + config := tabletenv.NewDefaultConfig() + env := tabletenv.NewEnv(config, t.Name()) + + testCases := []struct { + Name string + txThrottlerStateShouldThrottle bool + throttlerDryRun bool + expectedResult bool + }{ + {Name: "Real run throttles when txThrottlerStateImpl says it should", txThrottlerStateShouldThrottle: true, throttlerDryRun: false, expectedResult: true}, + {Name: "Real run does not throttle when txThrottlerStateImpl says it should not", txThrottlerStateShouldThrottle: false, throttlerDryRun: false, expectedResult: false}, + {Name: "Dry run does not throttle when txThrottlerStateImpl says it should", txThrottlerStateShouldThrottle: true, throttlerDryRun: true, expectedResult: false}, + {Name: "Dry run does not throttle when txThrottlerStateImpl says it should not", txThrottlerStateShouldThrottle: false, throttlerDryRun: true, expectedResult: false}, + } + + for _, aTestCase := range testCases { + theTestCase := aTestCase + + t.Run(theTestCase.Name, func(t *testing.T) { + aTxThrottler := &txThrottler{ + config: &tabletenv.TabletConfig{ + EnableTxThrottler: true, + TxThrottlerDryRun: theTestCase.throttlerDryRun, + }, + state: &mockTxThrottlerState{shouldThrottle: theTestCase.txThrottlerStateShouldThrottle}, + throttlerRunning: env.Exporter().NewGauge("TransactionThrottlerRunning", "transaction throttler running state"), + requestsTotal: env.Exporter().NewCountersWithSingleLabel("TransactionThrottlerRequests", "transaction throttler requests", "workload"), + requestsThrottled: env.Exporter().NewCountersWithSingleLabel("TransactionThrottlerThrottled", "transaction throttler requests throttled", "workload"), + } + + assert.Equal(t, theTestCase.expectedResult, aTxThrottler.Throttle(100, "some-workload")) }) - assert.Nil(t, err) - assert.NotNil(t, throttler) } } + +type mockTxThrottlerState struct { + shouldThrottle bool +} + +func (t *mockTxThrottlerState) deallocateResources() { + +} +func (t *mockTxThrottlerState) StatsUpdate(tabletStats *discovery.TabletHealth) { + +} + +func (t *mockTxThrottlerState) throttle() bool { + return t.shouldThrottle +} diff --git a/go/vt/vttablet/tabletserver/vstreamer/copy.go b/go/vt/vttablet/tabletserver/vstreamer/copy.go index 06e90688482..585be09dec3 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/copy.go +++ b/go/vt/vttablet/tabletserver/vstreamer/copy.go @@ -23,7 +23,8 @@ import ( "math" "time" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/slice" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/log" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" @@ -72,7 +73,7 @@ func (uvs *uvstreamer) catchup(ctx context.Context) error { errch := make(chan error, 1) go func() { - startPos := mysql.EncodePosition(uvs.pos) + startPos := replication.EncodePosition(uvs.pos) vs := newVStreamer(ctx, uvs.cp, uvs.se, startPos, "", uvs.filter, uvs.getVSchema(), uvs.throttlerApp, uvs.send2, "catchup", uvs.vse) uvs.setVs(vs) errch <- vs.Stream() @@ -225,7 +226,7 @@ func (uvs *uvstreamer) copyTable(ctx context.Context, tableName string) error { if len(rows.Fields) == 0 { return fmt.Errorf("expecting field event first, got: %v", rows) } - pos, _ := mysql.DecodePosition(rows.Gtid) + pos, _ := replication.DecodePosition(rows.Gtid) if !uvs.pos.IsZero() && !uvs.pos.AtLeast(pos) { if err := uvs.fastForward(rows.Gtid); err != nil { uvs.setVs(nil) @@ -233,7 +234,7 @@ func (uvs *uvstreamer) copyTable(ctx context.Context, tableName string) error { return err } uvs.setVs(nil) - if mysql.EncodePosition(uvs.pos) != rows.Gtid { + if replication.EncodePosition(uvs.pos) != rows.Gtid { return fmt.Errorf("position after fastforward was %s but stopPos was %s", uvs.pos, rows.Gtid) } if err := uvs.setPosition(rows.Gtid, false); err != nil { @@ -243,14 +244,21 @@ func (uvs *uvstreamer) copyTable(ctx context.Context, tableName string) error { log.V(2).Infof("Not starting fastforward pos is %s, uvs.pos is %s, rows.gtid %s", pos, uvs.pos, rows.Gtid) } + // Store a copy of the fields and pkfields because the original will be cleared + // when GRPC returns our request to the pool + uvs.fields = slice.Map(rows.Fields, func(f *querypb.Field) *querypb.Field { + return f.CloneVT() + }) + uvs.pkfields = slice.Map(rows.Pkfields, func(f *querypb.Field) *querypb.Field { + return f.CloneVT() + }) + fieldEvent := &binlogdatapb.FieldEvent{ TableName: tableName, - Fields: rows.Fields, + Fields: uvs.fields, Keyspace: uvs.vse.keyspace, Shard: uvs.vse.shard, } - uvs.fields = rows.Fields - uvs.pkfields = rows.Pkfields if err := uvs.sendFieldEvent(ctx, rows.Gtid, fieldEvent); err != nil { log.Infof("sendFieldEvent returned error %v", err) return err @@ -277,7 +285,7 @@ func (uvs *uvstreamer) copyTable(ctx context.Context, tableName string) error { newLastPK = sqltypes.CustomProto3ToResult(uvs.pkfields, &querypb.QueryResult{ Fields: uvs.pkfields, - Rows: []*querypb.Row{rows.Lastpk}, + Rows: []*querypb.Row{rows.Lastpk.CloneVT()}, }) qrLastPK := sqltypes.ResultToProto3(newLastPK) log.V(2).Infof("Calling sendEventForRows with gtid %s", rows.Gtid) @@ -316,9 +324,9 @@ func (uvs *uvstreamer) fastForward(stopPos string) error { defer func() { uvs.vse.vstreamerPhaseTimings.Record("fastforward", time.Now()) }() - log.Infof("starting fastForward from %s upto pos %s", mysql.EncodePosition(uvs.pos), stopPos) - uvs.stopPos, _ = mysql.DecodePosition(stopPos) - vs := newVStreamer(uvs.ctx, uvs.cp, uvs.se, mysql.EncodePosition(uvs.pos), "", uvs.filter, uvs.getVSchema(), uvs.throttlerApp, uvs.send2, "fastforward", uvs.vse) + log.Infof("starting fastForward from %s upto pos %s", replication.EncodePosition(uvs.pos), stopPos) + uvs.stopPos, _ = replication.DecodePosition(stopPos) + vs := newVStreamer(uvs.ctx, uvs.cp, uvs.se, replication.EncodePosition(uvs.pos), "", uvs.filter, uvs.getVSchema(), uvs.throttlerApp, uvs.send2, "fastforward", uvs.vse) uvs.setVs(vs) return vs.Stream() } diff --git a/go/vt/vttablet/tabletserver/vstreamer/engine.go b/go/vt/vttablet/tabletserver/vstreamer/engine.go index c55c312f442..adbd117c2f2 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/engine.go +++ b/go/vt/vttablet/tabletserver/vstreamer/engine.go @@ -70,6 +70,7 @@ type Engine struct { streamIdx int streamers map[int]*uvstreamer rowStreamers map[int]*rowStreamer + tableStreamers map[int]*tableStreamer resultStreamers map[int]*resultStreamer // watcherOnce is used for initializing vschema @@ -99,6 +100,7 @@ type Engine struct { vstreamersCreated *stats.Counter vstreamersEndedWithErrors *stats.Counter vstreamerFlushedBinlogs *stats.Counter + tableStreamerNumTables *stats.Counter throttlerClient *throttle.Client } @@ -116,6 +118,7 @@ func NewEngine(env tabletenv.Env, ts srvtopo.Server, se *schema.Engine, lagThrot streamers: make(map[int]*uvstreamer), rowStreamers: make(map[int]*rowStreamer), + tableStreamers: make(map[int]*tableStreamer), resultStreamers: make(map[int]*resultStreamer), lvschema: &localVSchema{vschema: &vindexes.VSchema{}}, @@ -135,6 +138,7 @@ func NewEngine(env tabletenv.Env, ts srvtopo.Server, se *schema.Engine, lagThrot rowStreamerNumRows: env.Exporter().NewCounter("RowStreamerNumRows", "Number of rows sent in row streamer"), rowStreamerWaits: env.Exporter().NewTimings("RowStreamerWaits", "Total counts and time we've waited when streaming rows in the vstream copy phase", "copy-phase-waits"), vstreamersCreated: env.Exporter().NewCounter("VStreamersCreated", "Count of vstreamers created"), + tableStreamerNumTables: env.Exporter().NewCounter("TableStreamerNumTables", "Number of tables streamed by the table streamer"), vstreamersEndedWithErrors: env.Exporter().NewCounter("VStreamersEndedWithErrors", "Count of vstreamers that ended with errors"), errorCounts: env.Exporter().NewCountersWithSingleLabel("VStreamerErrors", "Tracks errors in vstreamer", "type", "Catchup", "Copy", "Send", "TablePlan"), vstreamerFlushedBinlogs: env.Exporter().NewCounter("VStreamerFlushedBinlogs", "Number of times we've successfully executed a FLUSH BINARY LOGS statement when starting a vstream"), @@ -283,7 +287,7 @@ func (vse *Engine) StreamRows(ctx context.Context, query string, lastpk []sqltyp vse.mu.Lock() defer vse.mu.Unlock() - rowStreamer := newRowStreamer(ctx, vse.env.Config().DB.FilteredWithDB(), vse.se, query, lastpk, vse.lvschema, send, vse) + rowStreamer := newRowStreamer(ctx, vse.env.Config().DB.FilteredWithDB(), vse.se, query, lastpk, vse.lvschema, send, vse, RowStreamerModeSingleTable, nil) idx := vse.streamIdx vse.rowStreamers[idx] = rowStreamer vse.streamIdx++ @@ -308,6 +312,47 @@ func (vse *Engine) StreamRows(ctx context.Context, query string, lastpk []sqltyp return rowStreamer.Stream() } +// StreamTables streams all tables. +func (vse *Engine) StreamTables(ctx context.Context, send func(*binlogdatapb.VStreamTablesResponse) error) error { + // Ensure vschema is initialized and the watcher is started. + // Starting of the watcher is delayed till the first call to StreamTables + // so that this overhead is incurred only if someone uses this feature. + vse.watcherOnce.Do(vse.setWatch) + log.Infof("Streaming all tables") + + // Create stream and add it to the map. + tableStreamer, idx, err := func() (*tableStreamer, int, error) { + if atomic.LoadInt32(&vse.isOpen) == 0 { + return nil, 0, errors.New("VStreamer is not open") + } + vse.mu.Lock() + defer vse.mu.Unlock() + + tableStreamer := newTableStreamer(ctx, vse.env.Config().DB.FilteredWithDB(), vse.se, vse.lvschema, send, vse) + idx := vse.streamIdx + vse.tableStreamers[idx] = tableStreamer + vse.streamIdx++ + // Now that we've added the stream, increment wg. + // This must be done before releasing the lock. + vse.wg.Add(1) + return tableStreamer, idx, nil + }() + if err != nil { + return err + } + + // Remove stream from map and decrement wg when it ends. + defer func() { + vse.mu.Lock() + defer vse.mu.Unlock() + delete(vse.tableStreamers, idx) + vse.wg.Done() + }() + + // No lock is held while streaming, but wg is incremented. + return tableStreamer.Stream() +} + // StreamResults streams results of the query with the gtid. func (vse *Engine) StreamResults(ctx context.Context, query string, send func(*binlogdatapb.VStreamResultsResponse) error) error { // Create stream and add it to the map. diff --git a/go/vt/vttablet/tabletserver/vstreamer/engine_test.go b/go/vt/vttablet/tabletserver/vstreamer/engine_test.go index 7324e59c3b9..36bcc8f181a 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/engine_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/engine_test.go @@ -110,6 +110,7 @@ func TestUpdateVSchema(t *testing.T) { "keyspaces": { "vttest": { "sharded": true, + "foreignKeyMode": "unmanaged", "tables": { "t1": { "name": "t1", @@ -173,6 +174,7 @@ func TestVStreamerWaitForMySQL(t *testing.T) { tableName := "test" expectedWaits := int64(0) testDB := fakesqldb.New(t) + defer testDB.Close() hostres := sqltypes.MakeTestResult(sqltypes.MakeTestFields( "hostname|port", "varchar|int64"), diff --git a/go/vt/vttablet/tabletserver/vstreamer/main_flaky_test.go b/go/vt/vttablet/tabletserver/vstreamer/main_flaky_test.go index a62489e2fe0..f3743c6de46 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/main_flaky_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/main_flaky_test.go @@ -17,6 +17,7 @@ limitations under the License. package vstreamer import ( + "context" "fmt" "os" "testing" @@ -31,11 +32,11 @@ import ( ) var ( - engine *Engine - env *testenv.Env - schemaDir string + engine *Engine + env *testenv.Env ignoreKeyspaceShardInFieldAndRowEvents bool + testRowEventFlags bool ) func TestMain(m *testing.M) { @@ -44,7 +45,9 @@ func TestMain(m *testing.M) { exitCode := func() int { var err error - env, err = testenv.Init() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env, err = testenv.Init(ctx) if err != nil { fmt.Fprintf(os.Stderr, "%v", err) return 1 @@ -63,7 +66,7 @@ func TestMain(m *testing.M) { os.Exit(exitCode) } -func newEngine(t *testing.T, binlogRowImage string) { +func newEngine(t *testing.T, ctx context.Context, binlogRowImage string) { if engine != nil { engine.Close() } @@ -71,7 +74,7 @@ func newEngine(t *testing.T, binlogRowImage string) { env.Close() } var err error - env, err = testenv.Init() + env, err = testenv.Init(ctx) require.NoError(t, err) setBinlogRowImage(t, binlogRowImage) diff --git a/go/vt/vttablet/tabletserver/vstreamer/packet_size_test.go b/go/vt/vttablet/tabletserver/vstreamer/packet_size_test.go index ae71603ea52..35542e0a879 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/packet_size_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/packet_size_test.go @@ -21,6 +21,8 @@ import ( "math/rand" "testing" "time" + + "github.com/stretchr/testify/assert" ) type polynomial []float64 @@ -33,7 +35,7 @@ func (p polynomial) fit(x float64) float64 { return y } -func simulate(t *testing.T, ps PacketSizer, base, mustSend int, interpolate func(float64) float64) (time.Duration, int) { +func simulate(t *testing.T, rand *rand.Rand, ps PacketSizer, base, mustSend int, interpolate func(float64) float64) (time.Duration, int) { t.Helper() var elapsed time.Duration @@ -90,25 +92,21 @@ func TestPacketSizeSimulation(t *testing.T) { for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { seed := time.Now().UnixNano() - rand.Seed(seed) + rand := rand.New(rand.NewSource(seed)) // Simulate a replication using the given polynomial and the dynamic packet sizer ps1 := newDynamicPacketSizer(tc.baseSize) - elapsed1, sent1 := simulate(t, ps1, tc.baseSize, tc.baseSize*1000, tc.p.fit) + elapsed1, sent1 := simulate(t, rand, ps1, tc.baseSize, tc.baseSize*1000, tc.p.fit) // Simulate the same polynomial using a fixed packet size ps2 := newFixedPacketSize(tc.baseSize) - elapsed2, sent2 := simulate(t, ps2, tc.baseSize, tc.baseSize*1000, tc.p.fit) + elapsed2, sent2 := simulate(t, rand, ps2, tc.baseSize, tc.baseSize*1000, tc.p.fit) // the simulation for dynamic packet sizing should always be faster then the fixed packet, // and should also send fewer packets in total delta := elapsed1 - elapsed2 - if delta > tc.error { - t.Errorf("packet-adjusted simulation is %v slower than fixed approach, seed %d", delta, seed) - } - if sent1 > sent2 { - t.Errorf("packet-adjusted simulation sent more packets (%d) than fixed approach (%d), seed %d", sent1, sent2, seed) - } + assert.LessOrEqualf(t, delta, tc.error, "packet-adjusted simulation is %v slower than fixed approach", delta) + assert.LessOrEqualf(t, sent1, sent2, "packet-adjusted simulation sent more packets (%d) than fixed approach (%d)", sent1, sent2) // t.Logf("dynamic = (%v, %d), fixed = (%v, %d)", elapsed1, sent1, elapsed2, sent2) }) } diff --git a/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go b/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go index d1a75e6e9ed..c9bb0121571 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go +++ b/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go @@ -79,6 +79,8 @@ const ( GreaterThanEqual // NotEqual is used to filter a comparable column if != specific value NotEqual + // IsNotNull is used to filter a column if it is NULL + IsNotNull ) // Filter contains opcodes for filtering. @@ -135,7 +137,7 @@ func (ta *Table) FindColumn(name sqlparser.IdentifierCI) int { func (plan *Plan) fields() []*querypb.Field { fields := make([]*querypb.Field, len(plan.ColExprs)) for i, ce := range plan.ColExprs { - fields[i] = ce.Field + fields[i] = ce.Field.CloneVT() } return fields } @@ -224,6 +226,10 @@ func (plan *Plan) filter(values, result []sqltypes.Value, charsets []collations. if !key.KeyRangeContains(filter.KeyRange, ksid) { return false, nil } + case IsNotNull: + if values[filter.ColNum].IsNull() { + return false, nil + } default: match, err := compare(filter.Opcode, values[filter.ColNum], filter.Value, charsets[filter.ColNum]) if err != nil { @@ -550,6 +556,25 @@ func (plan *Plan) analyzeWhere(vschema *localVSchema, where *sqlparser.Where) er if err := plan.analyzeInKeyRange(vschema, expr.Exprs); err != nil { return err } + case *sqlparser.IsExpr: // Needed for CreateLookupVindex with ignore_nulls + if expr.Right != sqlparser.IsNotNullOp { + return fmt.Errorf("unsupported constraint: %v", sqlparser.String(expr)) + } + qualifiedName, ok := expr.Left.(*sqlparser.ColName) + if !ok { + return fmt.Errorf("unexpected: %v", sqlparser.String(expr)) + } + if !qualifiedName.Qualifier.IsEmpty() { + return fmt.Errorf("unsupported qualifier for column: %v", sqlparser.String(qualifiedName)) + } + colnum, err := findColumn(plan.Table, qualifiedName.Name) + if err != nil { + return err + } + plan.Filters = append(plan.Filters, Filter{ + Opcode: IsNotNull, + ColNum: colnum, + }) default: return fmt.Errorf("unsupported constraint: %v", sqlparser.String(expr)) } @@ -613,7 +638,7 @@ func (plan *Plan) analyzeExpr(vschema *localVSchema, selExpr sqlparser.SelectExp Field: plan.Table.Fields[colnum], }, nil case sqlparser.AggrFunc: - if strings.ToLower(inner.AggrName()) != "keyspace_id" { + if inner.AggrName() != "keyspace_id" { return ColExpr{}, fmt.Errorf("unsupported function: %v", sqlparser.String(inner)) } if len(inner.GetArgs()) != 0 { @@ -702,7 +727,26 @@ func (plan *Plan) analyzeExpr(vschema *localVSchema, selExpr sqlparser.SelectExp FixedValue: sqltypes.NewInt64(num), }, nil case *sqlparser.ConvertUsingExpr: - colnum, err := findColumn(plan.Table, aliased.As) + // Here we find the actual column name in the convert, in case + // this is a column rename and the AS is the new column. + // For example, in convert(c1 using utf8mb4) as c2, we want to find + // c1, because c1 exists in the current table whereas c2 is the renamed column + // in the desired table. + var colName sqlparser.IdentifierCI + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch node := node.(type) { + case *sqlparser.ColName: + if !node.Qualifier.IsEmpty() { + return false, fmt.Errorf("unsupported qualifier for column: %v", sqlparser.String(node)) + } + colName = node.Name + } + return true, nil + }, aliased.Expr) + if err != nil { + return ColExpr{}, fmt.Errorf("failed to find column name for convert using expression: %v, %v", sqlparser.String(aliased.Expr), err) + } + colnum, err := findColumn(plan.Table, colName) if err != nil { return ColExpr{}, err } diff --git a/go/vt/vttablet/tabletserver/vstreamer/planbuilder_test.go b/go/vt/vttablet/tabletserver/vstreamer/planbuilder_test.go index 8de29403548..03001362073 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/planbuilder_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/planbuilder_test.go @@ -479,6 +479,29 @@ func TestPlanBuilder(t *testing.T) { KeyRange: nil, }}, }, + }, { + inTable: t1, + inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select convert(val using utf8mb4) as val2, id as id from t1"}, + outPlan: &Plan{ + ColExprs: []ColExpr{{ + ColNum: 1, + Field: &querypb.Field{ + Name: "val", + Type: sqltypes.VarBinary, + Charset: collations.CollationBinaryID, + Flags: uint32(querypb.MySqlFlag_BINARY_FLAG), + }, + }, { + ColNum: 0, + Field: &querypb.Field{ + Name: "id", + Type: sqltypes.Int64, + Charset: collations.CollationBinaryID, + Flags: uint32(querypb.MySqlFlag_NUM_FLAG), + }, + }}, + convertUsingUTF8Columns: map[string]bool{"val": true}, + }, }, { inTable: regional, inRule: &binlogdatapb.Rule{Match: "regional", Filter: "select id, keyspace_id() from regional"}, @@ -657,7 +680,7 @@ func TestPlanBuilderFilterComparison(t *testing.T) { Flags: uint32(querypb.MySqlFlag_BINARY_FLAG), }}, } - hashVindex, err := vindexes.NewHash("hash", nil) + hashVindex, err := vindexes.CreateVindex("hash", "hash", nil) require.NoError(t, err) testcases := []struct { name string diff --git a/go/vt/vttablet/tabletserver/vstreamer/rowstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/rowstreamer.go index 5824eaf6569..00194b5cc0b 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/rowstreamer.go +++ b/go/vt/vttablet/tabletserver/vstreamer/rowstreamer.go @@ -22,8 +22,8 @@ import ( "sync" "time" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/textutil" "vitess.io/vitess/go/timer" @@ -50,10 +50,17 @@ type RowStreamer interface { } // NewRowStreamer returns a RowStreamer -func NewRowStreamer(ctx context.Context, cp dbconfigs.Connector, se *schema.Engine, query string, lastpk []sqltypes.Value, send func(*binlogdatapb.VStreamRowsResponse) error, vse *Engine) RowStreamer { - return newRowStreamer(ctx, cp, se, query, lastpk, &localVSchema{vschema: &vindexes.VSchema{}}, send, vse) +func NewRowStreamer(ctx context.Context, cp dbconfigs.Connector, se *schema.Engine, query string, lastpk []sqltypes.Value, send func(*binlogdatapb.VStreamRowsResponse) error, vse *Engine, mode RowStreamerMode) RowStreamer { + return newRowStreamer(ctx, cp, se, query, lastpk, &localVSchema{vschema: &vindexes.VSchema{}}, send, vse, mode, nil) } +type RowStreamerMode int32 + +const ( + RowStreamerModeSingleTable RowStreamerMode = iota + RowStreamerModeAllTables +) + // rowStreamer is used for copying the existing rows of a table // before vreplication begins streaming binlogs. The rowStreamer // responds to a request with the GTID position as of which it @@ -79,9 +86,15 @@ type rowStreamer struct { sendQuery string vse *Engine pktsize PacketSizer + + mode RowStreamerMode + conn *snapshotConn } -func newRowStreamer(ctx context.Context, cp dbconfigs.Connector, se *schema.Engine, query string, lastpk []sqltypes.Value, vschema *localVSchema, send func(*binlogdatapb.VStreamRowsResponse) error, vse *Engine) *rowStreamer { +func newRowStreamer(ctx context.Context, cp dbconfigs.Connector, se *schema.Engine, query string, + lastpk []sqltypes.Value, vschema *localVSchema, send func(*binlogdatapb.VStreamRowsResponse) error, vse *Engine, + mode RowStreamerMode, conn *snapshotConn) *rowStreamer { + ctx, cancel := context.WithCancel(ctx) return &rowStreamer{ ctx: ctx, @@ -94,6 +107,8 @@ func newRowStreamer(ctx context.Context, cp dbconfigs.Connector, se *schema.Engi vschema: vschema, vse: vse, pktsize: DefaultPacketSizer(), + mode: mode, + conn: conn, } } @@ -111,15 +126,18 @@ func (rs *rowStreamer) Stream() error { if err := rs.buildPlan(); err != nil { return err } - conn, err := snapshotConnect(rs.ctx, rs.cp) - if err != nil { - return err - } - defer conn.Close() - if _, err := conn.ExecuteFetch("set names 'binary'", 1, false); err != nil { - return err + if rs.conn == nil { + conn, err := snapshotConnect(rs.ctx, rs.cp) + if err != nil { + return err + } + rs.conn = conn + defer rs.conn.Close() + if _, err := rs.conn.ExecuteFetch("set names 'binary'", 1, false); err != nil { + return err + } } - return rs.streamQuery(conn, rs.send) + return rs.streamQuery(rs.send) } func (rs *rowStreamer) buildPlan() error { @@ -143,16 +161,21 @@ func (rs *rowStreamer) buildPlan() error { // where it in fact does exist. // For this reason we give vstreamer a "second chance" to review the up-to-date state of the schema. // In the future, we will reduce this operation to reading a single table rather than the entire schema. - rs.se.ReloadAt(context.Background(), mysql.Position{}) + rs.se.ReloadAt(context.Background(), replication.Position{}) st, err = rs.se.GetTableForPos(fromTable, "") } if err != nil { return err } ti := &Table{ - Name: st.Name, - Fields: st.Fields, + Name: st.Name, } + + ti.Fields, err = getFields(rs.ctx, rs.cp, st.Name, rs.cp.DBName(), st.Fields) + if err != nil { + return err + } + // The plan we build is identical to the one for vstreamer. // This is because the row format of a read is identical // to the row format of a binlog event. So, the same @@ -274,7 +297,7 @@ func (rs *rowStreamer) buildSelect() (string, error) { return buf.String(), nil } -func (rs *rowStreamer) streamQuery(conn *snapshotConn, send func(*binlogdatapb.VStreamRowsResponse) error) error { +func (rs *rowStreamer) streamQuery(send func(*binlogdatapb.VStreamRowsResponse) error) error { throttleResponseRateLimiter := timer.NewRateLimiter(rowStreamertHeartbeatInterval) defer throttleResponseRateLimiter.Stop() @@ -288,14 +311,25 @@ func (rs *rowStreamer) streamQuery(conn *snapshotConn, send func(*binlogdatapb.V if err := rs.vse.waitForMySQL(rs.ctx, rs.cp, rs.plan.Table.Name); err != nil { return err } - + var ( + gtid string + rotatedLog bool + err error + ) log.Infof("Streaming query: %v\n", rs.sendQuery) - gtid, rotatedLog, err := conn.streamWithSnapshot(rs.ctx, rs.plan.Table.Name, rs.sendQuery) - if rotatedLog { - rs.vse.vstreamerFlushedBinlogs.Add(1) - } - if err != nil { - return err + if rs.mode == RowStreamerModeSingleTable { + gtid, rotatedLog, err = rs.conn.streamWithSnapshot(rs.ctx, rs.plan.Table.Name, rs.sendQuery) + if err != nil { + return err + } + if rotatedLog { + rs.vse.vstreamerFlushedBinlogs.Add(1) + } + } else { + // Comes here when we stream all tables. The snapshot is created just once at the start. + if err := rs.conn.ExecuteStreamFetch(rs.query); err != nil { + return err + } } pkfields := make([]*querypb.Field, len(rs.pkColumns)) @@ -326,15 +360,20 @@ func (rs *rowStreamer) streamQuery(conn *snapshotConn, send func(*binlogdatapb.V heartbeatTicker := time.NewTicker(rowStreamertHeartbeatInterval) defer heartbeatTicker.Stop() go func() { - for range heartbeatTicker.C { + select { + case <-rs.ctx.Done(): + return + case <-heartbeatTicker.C: safeSend(&binlogdatapb.VStreamRowsResponse{Heartbeat: true}) } }() - var response binlogdatapb.VStreamRowsResponse - var rows []*querypb.Row - var rowCount int - var mysqlrow []sqltypes.Value + var ( + response binlogdatapb.VStreamRowsResponse + rows []*querypb.Row + rowCount int + mysqlrow []sqltypes.Value + ) filtered := make([]sqltypes.Value, len(rs.plan.ColExprs)) lastpk := make([]sqltypes.Value, len(rs.pkColumns)) @@ -356,7 +395,7 @@ func (rs *rowStreamer) streamQuery(conn *snapshotConn, send func(*binlogdatapb.V if mysqlrow != nil { mysqlrow = mysqlrow[:0] } - mysqlrow, err = conn.FetchNext(mysqlrow) + mysqlrow, err = rs.conn.FetchNext(mysqlrow) if err != nil { return err } diff --git a/go/vt/vttablet/tabletserver/vstreamer/rowstreamer_test.go b/go/vt/vttablet/tabletserver/vstreamer/rowstreamer_test.go index fe8fe439030..6ba5a3a5d02 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/rowstreamer_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/rowstreamer_test.go @@ -72,7 +72,7 @@ func TestStreamRowsScan(t *testing.T) { // t1: simulates rollup, with non-pk column wantStream = []string{ - `fields:{name:"1" type:INT64 charset:63} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63} pkfields:{name:"id" type:INT32 charset:63}`, + `fields:{name:"1" type:INT64 charset:63} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} pkfields:{name:"id" type:INT32 charset:63}`, `rows:{lengths:1 lengths:3 values:"1aaa"} rows:{lengths:1 lengths:3 values:"1bbb"} lastpk:{lengths:1 values:"2"}`, } wantQuery = "select id, val from t1 order by id" @@ -80,7 +80,7 @@ func TestStreamRowsScan(t *testing.T) { // t1: simulates rollup, with pk and non-pk column wantStream = []string{ - `fields:{name:"1" type:INT64 charset:63} fields:{name:"id" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id" column_length:11 charset:63} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63} pkfields:{name:"id" type:INT32 charset:63}`, + `fields:{name:"1" type:INT64 charset:63} fields:{name:"id" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} pkfields:{name:"id" type:INT32 charset:63}`, `rows:{lengths:1 lengths:1 lengths:3 values:"11aaa"} rows:{lengths:1 lengths:1 lengths:3 values:"12bbb"} lastpk:{lengths:1 values:"2"}`, } wantQuery = "select id, val from t1 order by id" @@ -88,7 +88,7 @@ func TestStreamRowsScan(t *testing.T) { // t1: no pk in select list wantStream = []string{ - `fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63} pkfields:{name:"id" type:INT32 charset:63}`, + `fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} pkfields:{name:"id" type:INT32 charset:63}`, `rows:{lengths:3 values:"aaa"} rows:{lengths:3 values:"bbb"} lastpk:{lengths:1 values:"2"}`, } wantQuery = "select id, val from t1 order by id" @@ -96,7 +96,7 @@ func TestStreamRowsScan(t *testing.T) { // t1: all rows wantStream = []string{ - `fields:{name:"id" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id" column_length:11 charset:63} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63} pkfields:{name:"id" type:INT32 charset:63}`, + `fields:{name:"id" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} pkfields:{name:"id" type:INT32 charset:63}`, `rows:{lengths:1 lengths:3 values:"1aaa"} rows:{lengths:1 lengths:3 values:"2bbb"} lastpk:{lengths:1 values:"2"}`, } wantQuery = "select id, val from t1 order by id" @@ -104,7 +104,7 @@ func TestStreamRowsScan(t *testing.T) { // t1: lastpk=1 wantStream = []string{ - `fields:{name:"id" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id" column_length:11 charset:63} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63} pkfields:{name:"id" type:INT32 charset:63}`, + `fields:{name:"id" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} pkfields:{name:"id" type:INT32 charset:63}`, `rows:{lengths:1 lengths:3 values:"2bbb"} lastpk:{lengths:1 values:"2"}`, } wantQuery = "select id, val from t1 where (id > 1) order by id" @@ -112,7 +112,7 @@ func TestStreamRowsScan(t *testing.T) { // t1: different column ordering wantStream = []string{ - `fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63} fields:{name:"id" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id" column_length:11 charset:63} pkfields:{name:"id" type:INT32 charset:63}`, + `fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} fields:{name:"id" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} pkfields:{name:"id" type:INT32 charset:63}`, `rows:{lengths:3 lengths:1 values:"aaa1"} rows:{lengths:3 lengths:1 values:"bbb2"} lastpk:{lengths:1 values:"2"}`, } wantQuery = "select id, val from t1 order by id" @@ -120,7 +120,7 @@ func TestStreamRowsScan(t *testing.T) { // t2: all rows wantStream = []string{ - `fields:{name:"id1" type:INT32 table:"t2" org_table:"t2" database:"vttest" org_name:"id1" column_length:11 charset:63} fields:{name:"id2" type:INT32 table:"t2" org_table:"t2" database:"vttest" org_name:"id2" column_length:11 charset:63} fields:{name:"val" type:VARBINARY table:"t2" org_table:"t2" database:"vttest" org_name:"val" column_length:128 charset:63} pkfields:{name:"id1" type:INT32 charset:63} pkfields:{name:"id2" type:INT32 charset:63}`, + `fields:{name:"id1" type:INT32 table:"t2" org_table:"t2" database:"vttest" org_name:"id1" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"id2" type:INT32 table:"t2" org_table:"t2" database:"vttest" org_name:"id2" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t2" org_table:"t2" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} pkfields:{name:"id1" type:INT32 charset:63} pkfields:{name:"id2" type:INT32 charset:63}`, `rows:{lengths:1 lengths:1 lengths:3 values:"12aaa"} rows:{lengths:1 lengths:1 lengths:3 values:"13bbb"} lastpk:{lengths:1 lengths:1 values:"13"}`, } wantQuery = "select id1, id2, val from t2 order by id1, id2" @@ -128,7 +128,7 @@ func TestStreamRowsScan(t *testing.T) { // t2: lastpk=1,2 wantStream = []string{ - `fields:{name:"id1" type:INT32 table:"t2" org_table:"t2" database:"vttest" org_name:"id1" column_length:11 charset:63} fields:{name:"id2" type:INT32 table:"t2" org_table:"t2" database:"vttest" org_name:"id2" column_length:11 charset:63} fields:{name:"val" type:VARBINARY table:"t2" org_table:"t2" database:"vttest" org_name:"val" column_length:128 charset:63} pkfields:{name:"id1" type:INT32 charset:63} pkfields:{name:"id2" type:INT32 charset:63}`, + `fields:{name:"id1" type:INT32 table:"t2" org_table:"t2" database:"vttest" org_name:"id1" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"id2" type:INT32 table:"t2" org_table:"t2" database:"vttest" org_name:"id2" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t2" org_table:"t2" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} pkfields:{name:"id1" type:INT32 charset:63} pkfields:{name:"id2" type:INT32 charset:63}`, `rows:{lengths:1 lengths:1 lengths:3 values:"13bbb"} lastpk:{lengths:1 lengths:1 values:"13"}`, } wantQuery = "select id1, id2, val from t2 where (id1 = 1 and id2 > 2) or (id1 > 1) order by id1, id2" @@ -136,7 +136,7 @@ func TestStreamRowsScan(t *testing.T) { // t3: all rows wantStream = []string{ - `fields:{name:"id" type:INT32 table:"t3" org_table:"t3" database:"vttest" org_name:"id" column_length:11 charset:63} fields:{name:"val" type:VARBINARY table:"t3" org_table:"t3" database:"vttest" org_name:"val" column_length:128 charset:63} pkfields:{name:"id" type:INT32 charset:63} pkfields:{name:"val" type:VARBINARY charset:63}`, + `fields:{name:"id" type:INT32 table:"t3" org_table:"t3" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t3" org_table:"t3" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} pkfields:{name:"id" type:INT32 charset:63} pkfields:{name:"val" type:VARBINARY charset:63}`, `rows:{lengths:1 lengths:3 values:"1aaa"} rows:{lengths:1 lengths:3 values:"2bbb"} lastpk:{lengths:1 lengths:3 values:"2bbb"}`, } wantQuery = "select id, val from t3 order by id, val" @@ -144,7 +144,7 @@ func TestStreamRowsScan(t *testing.T) { // t3: lastpk: 1,'aaa' wantStream = []string{ - `fields:{name:"id" type:INT32 table:"t3" org_table:"t3" database:"vttest" org_name:"id" column_length:11 charset:63} fields:{name:"val" type:VARBINARY table:"t3" org_table:"t3" database:"vttest" org_name:"val" column_length:128 charset:63} pkfields:{name:"id" type:INT32 charset:63} pkfields:{name:"val" type:VARBINARY charset:63}`, + `fields:{name:"id" type:INT32 table:"t3" org_table:"t3" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t3" org_table:"t3" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} pkfields:{name:"id" type:INT32 charset:63} pkfields:{name:"val" type:VARBINARY charset:63}`, `rows:{lengths:1 lengths:3 values:"2bbb"} lastpk:{lengths:1 lengths:3 values:"2bbb"}`, } wantQuery = "select id, val from t3 where (id = 1 and val > 'aaa') or (id > 1) order by id, val" @@ -152,7 +152,7 @@ func TestStreamRowsScan(t *testing.T) { // t4: all rows wantStream = []string{ - `fields:{name:"id1" type:INT32 table:"t4" org_table:"t4" database:"vttest" org_name:"id1" column_length:11 charset:63} fields:{name:"id2" type:INT32 table:"t4" org_table:"t4" database:"vttest" org_name:"id2" column_length:11 charset:63} fields:{name:"id3" type:INT32 table:"t4" org_table:"t4" database:"vttest" org_name:"id3" column_length:11 charset:63} fields:{name:"val" type:VARBINARY table:"t4" org_table:"t4" database:"vttest" org_name:"val" column_length:128 charset:63} pkfields:{name:"id1" type:INT32 charset:63} pkfields:{name:"id2" type:INT32 charset:63} pkfields:{name:"id3" type:INT32 charset:63}`, + `fields:{name:"id1" type:INT32 table:"t4" org_table:"t4" database:"vttest" org_name:"id1" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"id2" type:INT32 table:"t4" org_table:"t4" database:"vttest" org_name:"id2" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"id3" type:INT32 table:"t4" org_table:"t4" database:"vttest" org_name:"id3" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t4" org_table:"t4" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} pkfields:{name:"id1" type:INT32 charset:63} pkfields:{name:"id2" type:INT32 charset:63} pkfields:{name:"id3" type:INT32 charset:63}`, `rows:{lengths:1 lengths:1 lengths:1 lengths:3 values:"123aaa"} rows:{lengths:1 lengths:1 lengths:1 lengths:3 values:"234bbb"} lastpk:{lengths:1 lengths:1 lengths:1 values:"234"}`, } wantQuery = "select id1, id2, id3, val from t4 order by id1, id2, id3" @@ -160,7 +160,7 @@ func TestStreamRowsScan(t *testing.T) { // t4: lastpk: 1,2,3 wantStream = []string{ - `fields:{name:"id1" type:INT32 table:"t4" org_table:"t4" database:"vttest" org_name:"id1" column_length:11 charset:63} fields:{name:"id2" type:INT32 table:"t4" org_table:"t4" database:"vttest" org_name:"id2" column_length:11 charset:63} fields:{name:"id3" type:INT32 table:"t4" org_table:"t4" database:"vttest" org_name:"id3" column_length:11 charset:63} fields:{name:"val" type:VARBINARY table:"t4" org_table:"t4" database:"vttest" org_name:"val" column_length:128 charset:63} pkfields:{name:"id1" type:INT32 charset:63} pkfields:{name:"id2" type:INT32 charset:63} pkfields:{name:"id3" type:INT32 charset:63}`, + `fields:{name:"id1" type:INT32 table:"t4" org_table:"t4" database:"vttest" org_name:"id1" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"id2" type:INT32 table:"t4" org_table:"t4" database:"vttest" org_name:"id2" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"id3" type:INT32 table:"t4" org_table:"t4" database:"vttest" org_name:"id3" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t4" org_table:"t4" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} pkfields:{name:"id1" type:INT32 charset:63} pkfields:{name:"id2" type:INT32 charset:63} pkfields:{name:"id3" type:INT32 charset:63}`, `rows:{lengths:1 lengths:1 lengths:1 lengths:3 values:"234bbb"} lastpk:{lengths:1 lengths:1 lengths:1 values:"234"}`, } wantQuery = "select id1, id2, id3, val from t4 where (id1 = 1 and id2 = 2 and id3 > 3) or (id1 = 1 and id2 > 2) or (id1 > 1) order by id1, id2, id3" @@ -255,7 +255,7 @@ func TestStreamRowsKeyRange(t *testing.T) { // Only the first row should be returned, but lastpk should be 6. wantStream := []string{ - `fields:{name:"id1" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id1" column_length:11 charset:63} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63} pkfields:{name:"id1" type:INT32 charset:63}`, + `fields:{name:"id1" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id1" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} pkfields:{name:"id1" type:INT32 charset:63}`, `rows:{lengths:1 lengths:3 values:"1aaa"} lastpk:{lengths:1 values:"6"}`, } wantQuery := "select id1, val from t1 order by id1" @@ -287,7 +287,7 @@ func TestStreamRowsFilterInt(t *testing.T) { time.Sleep(1 * time.Second) wantStream := []string{ - `fields:{name:"id1" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id1" column_length:11 charset:63} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63} pkfields:{name:"id1" type:INT32 charset:63}`, + `fields:{name:"id1" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id1" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} pkfields:{name:"id1" type:INT32 charset:63}`, `rows:{lengths:1 lengths:3 values:"1aaa"} rows:{lengths:1 lengths:3 values:"4ddd"} lastpk:{lengths:1 values:"5"}`, } wantQuery := "select id1, id2, val from t1 order by id1" @@ -320,7 +320,7 @@ func TestStreamRowsFilterVarBinary(t *testing.T) { time.Sleep(1 * time.Second) wantStream := []string{ - `fields:{name:"id1" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id1" column_length:11 charset:63} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63} pkfields:{name:"id1" type:INT32 charset:63}`, + `fields:{name:"id1" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id1" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} pkfields:{name:"id1" type:INT32 charset:63}`, `rows:{lengths:1 lengths:6 values:"2newton"} rows:{lengths:1 lengths:6 values:"3newton"} rows:{lengths:1 lengths:6 values:"5newton"} lastpk:{lengths:1 values:"6"}`, } wantQuery := "select id1, val from t1 order by id1" @@ -346,7 +346,7 @@ func TestStreamRowsMultiPacket(t *testing.T) { engine.se.Reload(context.Background()) wantStream := []string{ - `fields:{name:"id" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id" column_length:11 charset:63} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63} pkfields:{name:"id" type:INT32 charset:63}`, + `fields:{name:"id" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} pkfields:{name:"id" type:INT32 charset:63}`, `rows:{lengths:1 lengths:3 values:"1234"} rows:{lengths:1 lengths:4 values:"26789"} rows:{lengths:1 lengths:1 values:"31"} lastpk:{lengths:1 values:"3"}`, `rows:{lengths:1 lengths:10 values:"42345678901"} lastpk:{lengths:1 values:"4"}`, `rows:{lengths:1 lengths:1 values:"52"} lastpk:{lengths:1 values:"5"}`, @@ -415,7 +415,9 @@ func checkStream(t *testing.T, query string, lastpk []sqltypes.Value, wantQuery re, _ := regexp.Compile(` flags:[\d]+`) srows = re.ReplaceAllString(srows, "") - if srows != wantStream[i] { + want := env.RemoveAnyDeprecatedDisplayWidths(wantStream[i]) + + if srows != want { ch <- fmt.Errorf("stream %d:\n%s, want\n%s", i, srows, wantStream[i]) } i++ diff --git a/go/vt/vttablet/tabletserver/vstreamer/snapshot_conn.go b/go/vt/vttablet/tabletserver/vstreamer/snapshot_conn.go index ce71bb48a90..b9a3a70ea98 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/snapshot_conn.go +++ b/go/vt/vttablet/tabletserver/vstreamer/snapshot_conn.go @@ -20,9 +20,12 @@ import ( "context" "fmt" "sync/atomic" + "time" "github.com/spf13/pflag" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" @@ -139,7 +142,7 @@ func (conn *snapshotConn) startSnapshot(ctx context.Context, table string) (gtid if _, err := conn.ExecuteFetch("set @@session.time_zone = '+00:00'", 1, false); err != nil { return "", err } - return mysql.EncodePosition(mpos), nil + return replication.EncodePosition(mpos), nil } // startSnapshotWithConsistentGTID performs the snapshotting without locking tables. This assumes @@ -155,14 +158,14 @@ func (conn *snapshotConn) startSnapshotWithConsistentGTID(ctx context.Context) ( } // The "session_track_gtids = START_GTID" patch is only applicable to MySQL56 GTID, which is // why we hardcode the position as mysql.Mysql56FlavorID - mpos, err := mysql.ParsePosition(mysql.Mysql56FlavorID, result.SessionStateChanges) + mpos, err := replication.ParsePosition(replication.Mysql56FlavorID, result.SessionStateChanges) if err != nil { return "", err } if _, err := conn.ExecuteFetch("set @@session.time_zone = '+00:00'", 1, false); err != nil { return "", err } - return mysql.EncodePosition(mpos), nil + return replication.EncodePosition(mpos), nil } // Close rollsback any open transactions and closes the connection. @@ -215,3 +218,47 @@ func GetBinlogRotationThreshold() int64 { func SetBinlogRotationThreshold(threshold int64) { atomic.StoreInt64(&binlogRotationThreshold, threshold) } + +// startSnapshotAllTables starts a streaming query with a snapshot view of all tables, returning the +// GTID set from the time when the snapshot was taken. +func (conn *snapshotConn) startSnapshotAllTables(ctx context.Context) (gtid string, err error) { + const MaxLockWaitTime = 30 * time.Second + shortCtx, cancel := context.WithTimeout(ctx, MaxLockWaitTime) + defer cancel() + + lockConn, err := mysqlConnect(shortCtx, conn.cp) + if err != nil { + return "", err + } + // To be safe, always unlock tables, even if lock tables might fail. + defer func() { + _, err := lockConn.ExecuteFetch("unlock tables", 0, false) + if err != nil { + log.Warning("Unlock tables failed: %v", err) + } + lockConn.Close() + }() + + log.Infof("Locking all tables") + if _, err := lockConn.ExecuteFetch("FLUSH TABLES WITH READ LOCK", 1, false); err != nil { + log.Infof("Error locking all tables") + return "", err + } + mpos, err := lockConn.PrimaryPosition() + if err != nil { + return "", err + } + + // Starting a transaction now will allow us to start the read later, + // which will happen after we release the lock on the table. + if _, err := conn.ExecuteFetch("set transaction isolation level repeatable read", 1, false); err != nil { + return "", err + } + if _, err := conn.ExecuteFetch("start transaction with consistent snapshot", 1, false); err != nil { + return "", err + } + if _, err := conn.ExecuteFetch("set @@session.time_zone = '+00:00'", 1, false); err != nil { + return "", err + } + return replication.EncodePosition(mpos), nil +} diff --git a/go/vt/vttablet/tabletserver/vstreamer/tablestreamer.go b/go/vt/vttablet/tabletserver/vstreamer/tablestreamer.go new file mode 100644 index 00000000000..1039f21b4d6 --- /dev/null +++ b/go/vt/vttablet/tabletserver/vstreamer/tablestreamer.go @@ -0,0 +1,191 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vstreamer + +import ( + "context" + "errors" + "fmt" + "strings" + "sync/atomic" + + "vitess.io/vitess/go/sqlescape" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + schema2 "vitess.io/vitess/go/vt/schema" +) + +/* + TableStreamer is a VStreamer that streams all tables in a keyspace. It iterates through all tables in a keyspace + and streams them one by one. It is not resilient: if there is any error that breaks the stream, for example, + reparenting or a network error, it will not recover and a new workflow will have to be created. +*/ + +// TableStreamer exposes an externally usable interface to tableStreamer. +type TableStreamer interface { + Stream() error + Cancel() +} + +type tableStreamer struct { + ctx context.Context + cancel func() + + cp dbconfigs.Connector + se *schema.Engine + send func(*binlogdatapb.VStreamTablesResponse) error + vschema *localVSchema + vse *Engine + + snapshotConn *snapshotConn + tables []string + gtid string +} + +func newTableStreamer(ctx context.Context, cp dbconfigs.Connector, se *schema.Engine, vschema *localVSchema, + send func(response *binlogdatapb.VStreamTablesResponse) error, vse *Engine) *tableStreamer { + ctx, cancel := context.WithCancel(ctx) + return &tableStreamer{ + ctx: ctx, + cancel: cancel, + cp: cp, + se: se, + send: send, + vschema: vschema, + vse: vse, + } +} + +func (ts *tableStreamer) Cancel() { + log.Info("TableStreamer Cancel() called") + ts.cancel() +} + +func (ts *tableStreamer) Stream() error { + // Ensure that the schema engine is Open. If vttablet came up non_serving, it may not have been initialized. + var err error + if err = ts.se.Open(); err != nil { + return err + } + + conn, err := snapshotConnect(ts.ctx, ts.cp) + if err != nil { + return err + } + defer conn.Close() + ts.snapshotConn = conn + + _, err = conn.ExecuteFetch("set session session_track_gtids = START_GTID", 1, false) + if err != nil { + // session_track_gtids = START_GTID unsupported or cannot execute. Resort to LOCK-based snapshot + ts.gtid, err = conn.startSnapshotAllTables(ts.ctx) + } else { + // session_track_gtids = START_GTID supported. Get a transaction with consistent GTID without LOCKing tables. + ts.gtid, err = conn.startSnapshotWithConsistentGTID(ts.ctx) + } + if err != nil { + return err + } + + if _, err := conn.ExecuteFetch("set names 'binary'", 1, false); err != nil { + return err + } + + rs, err := conn.ExecuteFetch("show tables", -1, true) + if err != nil { + return err + } + for _, row := range rs.Rows { + tableName := row[0].ToString() + if schema2.IsInternalOperationTableName(tableName) { + log.Infof("Skipping internal table %s", tableName) + continue + } + ts.tables = append(ts.tables, tableName) + } + log.Infof("Found %d tables to stream: %s", len(ts.tables), strings.Join(ts.tables, ", ")) + for _, tableName := range ts.tables { + log.Infof("Streaming table %s", tableName) + if err := ts.streamTable(ts.ctx, tableName); err != nil { + return err + } + log.Infof("Finished streaming table %s", tableName) + } + log.Infof("Finished streaming %d tables", len(ts.tables)) + return nil +} + +func (ts *tableStreamer) newRowStreamer(ctx context.Context, query string, lastpk []sqltypes.Value, + send func(*binlogdatapb.VStreamRowsResponse) error) (*rowStreamer, func(), error) { + + vse := ts.vse + if atomic.LoadInt32(&vse.isOpen) == 0 { + return nil, nil, errors.New("VStreamer is not open") + } + vse.mu.Lock() + defer vse.mu.Unlock() + + rowStreamer := newRowStreamer(ctx, vse.env.Config().DB.FilteredWithDB(), vse.se, query, lastpk, vse.lvschema, + send, vse, RowStreamerModeAllTables, ts.snapshotConn) + + idx := vse.streamIdx + vse.rowStreamers[idx] = rowStreamer + vse.streamIdx++ + // Now that we've added the stream, increment wg. + // This must be done before releasing the lock. + vse.wg.Add(1) + + // Remove stream from map and decrement wg when it ends. + cancel := func() { + vse.mu.Lock() + defer vse.mu.Unlock() + delete(vse.rowStreamers, idx) + vse.wg.Done() + } + return rowStreamer, cancel, nil +} + +func (ts *tableStreamer) streamTable(ctx context.Context, tableName string) error { + query := fmt.Sprintf("select * from %s", sqlescape.EscapeID(tableName)) + + send := func(response *binlogdatapb.VStreamRowsResponse) error { + return ts.send(&binlogdatapb.VStreamTablesResponse{ + TableName: tableName, + Fields: response.GetFields(), + Pkfields: response.GetPkfields(), + Gtid: ts.gtid, + Rows: response.GetRows(), + Lastpk: response.Lastpk, + }) + } + rs, cancel, err := ts.newRowStreamer(ctx, query, nil, send) + if err != nil { + return err + } + defer cancel() + + if rs.Stream() != nil { + return err + } + rs.vse.tableStreamerNumTables.Add(int64(1)) + + return nil +} diff --git a/go/vt/vttablet/tabletserver/vstreamer/tablestreamer_test.go b/go/vt/vttablet/tabletserver/vstreamer/tablestreamer_test.go new file mode 100644 index 00000000000..bc6ba98d636 --- /dev/null +++ b/go/vt/vttablet/tabletserver/vstreamer/tablestreamer_test.go @@ -0,0 +1,78 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vstreamer + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" +) + +// TestTableStreamer streams all tables and ensures all rows are received in the correct order. +func TestTableStreamer(t *testing.T) { + ctx := context.Background() + execStatements(t, []string{ + // Single PK + "create table t1(id int, val varbinary(128), primary key(id))", + "insert into t1 values (1, 'aaa'), (2, 'bbb')", + // Composite PK + "create table t2(id1 int, id2 int, val varbinary(128), primary key(id1, id2))", + "insert into t2 values (1, 2, 'aaa'), (1, 3, 'bbb')", + // No PK + "create table t3(id int, val varbinary(128))", + "insert into t3 values (1, 'aaa'), (2, 'bbb')", + // Three-column PK + "create table t4(id1 int, id2 int, id3 int, val varbinary(128), primary key(id1, id2, id3))", + "insert into t4 values (1, 2, 3, 'aaa'), (2, 3, 4, 'bbb')", + }) + + defer execStatements(t, []string{ + "drop table t1", + "drop table t2", + "drop table t3", + "drop table t4", + }) + + engine.se.Reload(context.Background()) + + wantStream := []string{ + "table_name:\"t1\" fields:{name:\"id\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id\" column_length:11 charset:63 flags:53251} fields:{name:\"val\" type:VARBINARY table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"val\" column_length:128 charset:63 flags:128} pkfields:{name:\"id\" type:INT32 charset:63 flags:53251}", + "table_name:\"t1\" rows:{lengths:1 lengths:3 values:\"1aaa\"} rows:{lengths:1 lengths:3 values:\"2bbb\"} lastpk:{lengths:1 values:\"2\"}", + "table_name:\"t2\" fields:{name:\"id1\" type:INT32 table:\"t2\" org_table:\"t2\" database:\"vttest\" org_name:\"id1\" column_length:11 charset:63 flags:53251} fields:{name:\"id2\" type:INT32 table:\"t2\" org_table:\"t2\" database:\"vttest\" org_name:\"id2\" column_length:11 charset:63 flags:53251} fields:{name:\"val\" type:VARBINARY table:\"t2\" org_table:\"t2\" database:\"vttest\" org_name:\"val\" column_length:128 charset:63 flags:128} pkfields:{name:\"id1\" type:INT32 charset:63 flags:53251} pkfields:{name:\"id2\" type:INT32 charset:63 flags:53251}", + "table_name:\"t2\" rows:{lengths:1 lengths:1 lengths:3 values:\"12aaa\"} rows:{lengths:1 lengths:1 lengths:3 values:\"13bbb\"} lastpk:{lengths:1 lengths:1 values:\"13\"}", + "table_name:\"t3\" fields:{name:\"id\" type:INT32 table:\"t3\" org_table:\"t3\" database:\"vttest\" org_name:\"id\" column_length:11 charset:63 flags:32768} fields:{name:\"val\" type:VARBINARY table:\"t3\" org_table:\"t3\" database:\"vttest\" org_name:\"val\" column_length:128 charset:63 flags:128} pkfields:{name:\"id\" type:INT32 charset:63 flags:32768} pkfields:{name:\"val\" type:VARBINARY charset:63 flags:128}", + "table_name:\"t3\" rows:{lengths:1 lengths:3 values:\"1aaa\"} rows:{lengths:1 lengths:3 values:\"2bbb\"} lastpk:{lengths:1 lengths:3 values:\"2bbb\"}", + "table_name:\"t4\" fields:{name:\"id1\" type:INT32 table:\"t4\" org_table:\"t4\" database:\"vttest\" org_name:\"id1\" column_length:11 charset:63 flags:53251} fields:{name:\"id2\" type:INT32 table:\"t4\" org_table:\"t4\" database:\"vttest\" org_name:\"id2\" column_length:11 charset:63 flags:53251} fields:{name:\"id3\" type:INT32 table:\"t4\" org_table:\"t4\" database:\"vttest\" org_name:\"id3\" column_length:11 charset:63 flags:53251} fields:{name:\"val\" type:VARBINARY table:\"t4\" org_table:\"t4\" database:\"vttest\" org_name:\"val\" column_length:128 charset:63 flags:128} pkfields:{name:\"id1\" type:INT32 charset:63 flags:53251} pkfields:{name:\"id2\" type:INT32 charset:63 flags:53251} pkfields:{name:\"id3\" type:INT32 charset:63 flags:53251}", + "table_name:\"t4\" rows:{lengths:1 lengths:1 lengths:1 lengths:3 values:\"123aaa\"} rows:{lengths:1 lengths:1 lengths:1 lengths:3 values:\"234bbb\"} lastpk:{lengths:1 lengths:1 lengths:1 values:\"234\"}", + } + var gotStream []string + err := engine.StreamTables(ctx, func(response *binlogdatapb.VStreamTablesResponse) error { + response.Gtid = "" + for _, fld := range response.Fields { + fld.ColumnType = "" + } + gotStream = append(gotStream, fmt.Sprintf("%v", response)) + return nil + }) + require.NoError(t, err) + require.EqualValues(t, wantStream, gotStream) + require.Equal(t, int64(4), engine.tableStreamerNumTables.Get()) +} diff --git a/go/vt/vttablet/tabletserver/vstreamer/testenv/testenv.go b/go/vt/vttablet/tabletserver/vstreamer/testenv/testenv.go index 9f2138ae19f..c40e180110f 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/testenv/testenv.go +++ b/go/vt/vttablet/tabletserver/vstreamer/testenv/testenv.go @@ -22,7 +22,6 @@ import ( "fmt" "os" "regexp" - "strconv" "strings" "vitess.io/vitess/go/json2" @@ -61,25 +60,22 @@ type Env struct { DBPatchVersion int } -var versionRegex = regexp.MustCompile(`([0-9]+)\.([0-9]+)\.([0-9]+)`) - // Init initializes an Env. -func Init() (*Env, error) { +func Init(ctx context.Context) (*Env, error) { te := &Env{ KeyspaceName: "vttest", ShardName: "0", Cells: []string{"cell1"}, } - ctx := context.Background() - te.TopoServ = memorytopo.NewServer(te.Cells...) + te.TopoServ = memorytopo.NewServer(ctx, te.Cells...) if err := te.TopoServ.CreateKeyspace(ctx, te.KeyspaceName, &topodatapb.Keyspace{}); err != nil { return nil, err } if err := te.TopoServ.CreateShard(ctx, te.KeyspaceName, te.ShardName); err != nil { panic(err) } - te.SrvTopo = srvtopo.NewResilientServer(te.TopoServ, "TestTopo") + te.SrvTopo = srvtopo.NewResilientServer(ctx, te.TopoServ, "TestTopo") cfg := vttest.Config{ Topology: &vttestpb.VTTestTopology{ @@ -118,26 +114,19 @@ func Init() (*Env, error) { // MySQL and Percona are equivalent for the tests te.DBType = string(mysqlctl.FlavorMySQL) } - dbVersionStr := te.Mysqld.GetVersionString(context.Background()) - dbVersionStrParts := versionRegex.FindStringSubmatch(dbVersionStr) - if len(dbVersionStrParts) != 4 { - return nil, fmt.Errorf("could not parse server version from: %s", dbVersionStr) - } - - var err error - te.DBMajorVersion, err = strconv.Atoi(dbVersionStrParts[1]) + dbVersionStr, err := te.Mysqld.GetVersionString(context.Background()) if err != nil { - return nil, fmt.Errorf("could not parse database major version from '%s': %v", dbVersionStr, err) + return nil, fmt.Errorf("could not get server version: %w", err) } - te.DBMinorVersion, err = strconv.Atoi(dbVersionStrParts[2]) + _, version, err := mysqlctl.ParseVersionString(dbVersionStr) if err != nil { - return nil, fmt.Errorf("could not parse database minor version from '%s': %v", dbVersionStr, err) - } - te.DBPatchVersion, err = strconv.Atoi(dbVersionStrParts[3]) - if err != nil { - return nil, fmt.Errorf("could not parse database patch version from '%s': %v", dbVersionStr, err) + return nil, fmt.Errorf("could not parse server version %q: %w", dbVersionStr, err) } + te.DBMajorVersion = version.Major + te.DBMinorVersion = version.Minor + te.DBPatchVersion = version.Patch + te.SchemaEngine = schema.NewEngine(te.TabletEnv) te.SchemaEngine.InitDBConfig(te.Dbcfgs.DbaWithDB()) if err := te.SchemaEngine.Open(); err != nil { diff --git a/go/vt/vttablet/tabletserver/vstreamer/uvstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/uvstreamer.go index a1ea07a92fa..2b770c1d4f4 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/uvstreamer.go +++ b/go/vt/vttablet/tabletserver/vstreamer/uvstreamer.go @@ -26,9 +26,9 @@ import ( "sync" "time" + "vitess.io/vitess/go/mysql/replication" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/log" @@ -76,10 +76,10 @@ type uvstreamer struct { pkfields []*querypb.Field // current position in the binlog for this streamer - pos mysql.Position + pos replication.Position // fast forward uses this to stop replicating upto the point of the last snapshot - stopPos mysql.Position + stopPos replication.Position // lastTimestampNs is the last timestamp seen so far. lastTimestampNs int64 @@ -324,7 +324,7 @@ func (uvs *uvstreamer) send2(evs []*binlogdatapb.VEvent) error { } for _, ev := range evs2 { if ev.Type == binlogdatapb.VEventType_GTID { - uvs.pos, _ = mysql.DecodePosition(ev.Gtid) + uvs.pos, _ = replication.DecodePosition(ev.Gtid) if !uvs.stopPos.IsZero() && uvs.pos.AtLeast(uvs.stopPos) { err = io.EOF } @@ -340,7 +340,7 @@ func (uvs *uvstreamer) sendEventsForCurrentPos() error { log.Infof("sendEventsForCurrentPos") evs := []*binlogdatapb.VEvent{{ Type: binlogdatapb.VEventType_GTID, - Gtid: mysql.EncodePosition(uvs.pos), + Gtid: replication.EncodePosition(uvs.pos), }, { Type: binlogdatapb.VEventType_OTHER, }} @@ -362,7 +362,7 @@ func (uvs *uvstreamer) setStreamStartPosition() error { } return nil } - pos, err := mysql.DecodePosition(uvs.startPos) + pos, err := replication.DecodePosition(uvs.startPos) if err != nil { return vterrors.Wrap(err, "could not decode position") } @@ -370,16 +370,16 @@ func (uvs *uvstreamer) setStreamStartPosition() error { uvs.vse.errorCounts.Add("GTIDSet Mismatch", 1) return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "GTIDSet Mismatch: requested source position:%v, current target vrep position: %v", - mysql.EncodePosition(pos), mysql.EncodePosition(curPos)) + replication.EncodePosition(pos), replication.EncodePosition(curPos)) } uvs.pos = pos return nil } -func (uvs *uvstreamer) currentPosition() (mysql.Position, error) { +func (uvs *uvstreamer) currentPosition() (replication.Position, error) { conn, err := uvs.cp.Connect(uvs.ctx) if err != nil { - return mysql.Position{}, err + return replication.Position{}, err } defer conn.Close() return conn.PrimaryPosition() @@ -424,7 +424,7 @@ func (uvs *uvstreamer) Stream() error { return err } } - vs := newVStreamer(uvs.ctx, uvs.cp, uvs.se, mysql.EncodePosition(uvs.pos), mysql.EncodePosition(uvs.stopPos), + vs := newVStreamer(uvs.ctx, uvs.cp, uvs.se, replication.EncodePosition(uvs.pos), replication.EncodePosition(uvs.stopPos), uvs.filter, uvs.getVSchema(), uvs.throttlerApp, uvs.send, "replicate", uvs.vse) uvs.setVs(vs) @@ -519,7 +519,7 @@ func (uvs *uvstreamer) setPosition(gtid string, isInTx bool) error { if gtid == "" { return fmt.Errorf("empty gtid passed to setPosition") } - pos, err := mysql.DecodePosition(gtid) + pos, err := replication.DecodePosition(gtid) if err != nil { return err } diff --git a/go/vt/vttablet/tabletserver/vstreamer/uvstreamer_flaky_test.go b/go/vt/vttablet/tabletserver/vstreamer/uvstreamer_flaky_test.go index 91505ba14af..34f860d2120 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/uvstreamer_flaky_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/uvstreamer_flaky_test.go @@ -450,6 +450,9 @@ func startVStreamCopy(ctx context.Context, t *testing.T, filter *binlogdatapb.Fi if ev.Type == binlogdatapb.VEventType_HEARTBEAT { continue } + if ev.Type == binlogdatapb.VEventType_ROW { + ev.RowEvent.Flags = 0 + } if ev.Throttled { continue } diff --git a/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go index d1446310a34..f210e756da1 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go +++ b/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go @@ -23,13 +23,13 @@ import ( "io" "time" - "vitess.io/vitess/go/vt/vttablet" - "google.golang.org/protobuf/encoding/prototext" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/mysql" mysqlbinlog "vitess.io/vitess/go/mysql/binlog" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/timer" "vitess.io/vitess/go/vt/binlog" @@ -39,9 +39,9 @@ import ( querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" vtschema "vitess.io/vitess/go/vt/schema" - "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vttablet" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" ) @@ -77,7 +77,7 @@ type vstreamer struct { // format and pos are updated by parseEvent. format mysql.BinlogFormat - pos mysql.Position + pos replication.Position stopPos string phase string @@ -157,7 +157,6 @@ func (vs *vstreamer) Cancel() { // Stream streams binlog events. func (vs *vstreamer) Stream() error { - //defer vs.cancel() ctx := context.Background() vs.vse.vstreamerCount.Add(1) defer func() { @@ -166,7 +165,7 @@ func (vs *vstreamer) Stream() error { }() vs.vse.vstreamersCreated.Add(1) log.Infof("Starting Stream() with startPos %s", vs.startPos) - pos, err := mysql.DecodePosition(vs.startPos) + pos, err := replication.DecodePosition(vs.startPos) if err != nil { vs.vse.errorCounts.Add("StreamRows", 1) vs.vse.vstreamersEndedWithErrors.Add(1) @@ -447,11 +446,11 @@ func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent) ([]*binlogdatapb.VEvent, e Type: binlogdatapb.VEventType_BEGIN, }) } - vs.pos = mysql.AppendGTID(vs.pos, gtid) + vs.pos = replication.AppendGTID(vs.pos, gtid) case ev.IsXID(): vevents = append(vevents, &binlogdatapb.VEvent{ Type: binlogdatapb.VEventType_GTID, - Gtid: mysql.EncodePosition(vs.pos), + Gtid: replication.EncodePosition(vs.pos), }, &binlogdatapb.VEvent{ Type: binlogdatapb.VEventType_COMMIT, }) @@ -462,7 +461,6 @@ func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent) ([]*binlogdatapb.VEvent, e } // Insert/Delete/Update are supported only to be used in the context of external mysql streams where source databases // could be using SBR. Vitess itself will never run into cases where it needs to consume non rbr statements. - switch cat := sqlparser.Preview(q.SQL); cat { case sqlparser.StmtInsert: mustSend := mustSendStmt(q, vs.cp.DBName()) @@ -508,7 +506,7 @@ func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent) ([]*binlogdatapb.VEvent, e if mustSendDDL(q, vs.cp.DBName(), vs.filter) { vevents = append(vevents, &binlogdatapb.VEvent{ Type: binlogdatapb.VEventType_GTID, - Gtid: mysql.EncodePosition(vs.pos), + Gtid: replication.EncodePosition(vs.pos), }, &binlogdatapb.VEvent{ Type: binlogdatapb.VEventType_DDL, Statement: q.SQL, @@ -517,7 +515,7 @@ func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent) ([]*binlogdatapb.VEvent, e // If the DDL need not be sent, send a dummy OTHER event. vevents = append(vevents, &binlogdatapb.VEvent{ Type: binlogdatapb.VEventType_GTID, - Gtid: mysql.EncodePosition(vs.pos), + Gtid: replication.EncodePosition(vs.pos), }, &binlogdatapb.VEvent{ Type: binlogdatapb.VEventType_OTHER, }) @@ -535,14 +533,14 @@ func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent) ([]*binlogdatapb.VEvent, e // // Vitess only supports row based replication, so skipping the creation of savepoints // reduces the amount of data send over to vplayer. - case sqlparser.StmtOther, sqlparser.StmtPriv, sqlparser.StmtSet, sqlparser.StmtComment, sqlparser.StmtFlush: + case sqlparser.StmtOther, sqlparser.StmtAnalyze, sqlparser.StmtPriv, sqlparser.StmtSet, sqlparser.StmtComment, sqlparser.StmtFlush: // These are either: // 1) DBA statements like REPAIR that can be ignored. // 2) Privilege-altering statements like GRANT/REVOKE // that we want to keep out of the stream for now. vevents = append(vevents, &binlogdatapb.VEvent{ Type: binlogdatapb.VEventType_GTID, - Gtid: mysql.EncodePosition(vs.pos), + Gtid: replication.EncodePosition(vs.pos), }, &binlogdatapb.VEvent{ Type: binlogdatapb.VEventType_OTHER, }) @@ -575,10 +573,10 @@ func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent) ([]*binlogdatapb.VEvent, e log.Infof("table map changed: id %d for %s has changed to %s", id, plan.Table.Name, tm.Name) } - if tm.Database == sidecardb.GetName() && tm.Name == "resharding_journal" { + if tm.Database == sidecar.GetName() && tm.Name == "resharding_journal" { // A journal is a special case that generates a JOURNAL event. return nil, vs.buildJournalPlan(id, tm) - } else if tm.Database == sidecardb.GetName() && tm.Name == "schema_version" && !vs.se.SkipMetaCheck { + } else if tm.Database == sidecar.GetName() && tm.Name == "schema_version" && !vs.se.SkipMetaCheck { // Generates a Version event when it detects that a schema is stored in the schema_version table. return nil, vs.buildVersionPlan(id, tm) } @@ -634,7 +632,7 @@ func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent) ([]*binlogdatapb.VEvent, e return nil, err } case ev.IsTransactionPayload(): - if !vs.pos.MatchesFlavor(mysql.Mysql56FlavorID) { + if !vs.pos.MatchesFlavor(replication.Mysql56FlavorID) { return nil, fmt.Errorf("compressed transaction payload events are not supported with database flavor %s", vs.vse.env.Config().DB.Flavor) } @@ -669,7 +667,7 @@ func (vs *vstreamer) buildJournalPlan(id uint64, tm *mysql.TableMap) error { } defer conn.Close() qr, err := conn.ExecuteFetch(sqlparser.BuildParsedQuery("select * from %s.resharding_journal where 1 != 1", - sidecardb.GetIdentifier()).Query, 1, true) + sidecar.GetIdentifier()).Query, 1, true) if err != nil { return err } @@ -678,7 +676,7 @@ func (vs *vstreamer) buildJournalPlan(id uint64, tm *mysql.TableMap) error { return fmt.Errorf("cannot determine table columns for %s: event has %v, schema has %v", tm.Name, tm.Types, fields) } table := &Table{ - Name: fmt.Sprintf("%s.resharding_journal", sidecardb.GetIdentifier()), + Name: fmt.Sprintf("%s.resharding_journal", sidecar.GetIdentifier()), Fields: fields[:len(tm.Types)], } // Build a normal table plan, which means, return all rows @@ -703,7 +701,7 @@ func (vs *vstreamer) buildVersionPlan(id uint64, tm *mysql.TableMap) error { } defer conn.Close() qr, err := conn.ExecuteFetch(sqlparser.BuildParsedQuery("select * from %s.schema_version where 1 != 1", - sidecardb.GetIdentifier()).Query, 1, true) + sidecar.GetIdentifier()).Query, 1, true) if err != nil { return err } @@ -712,7 +710,7 @@ func (vs *vstreamer) buildVersionPlan(id uint64, tm *mysql.TableMap) error { return fmt.Errorf("cannot determine table columns for %s: event has %v, schema has %v", tm.Name, tm.Types, fields) } table := &Table{ - Name: fmt.Sprintf("%s.schema_version", sidecardb.GetIdentifier()), + Name: fmt.Sprintf("%s.schema_version", sidecar.GetIdentifier()), Fields: fields[:len(tm.Types)], } // Build a normal table plan, which means, return all rows @@ -777,7 +775,7 @@ func (vs *vstreamer) buildTableColumns(tm *mysql.TableMap) ([]*querypb.Field, er Flags: mysql.FlagsForColumn(t, collations.DefaultCollationForType(t)), }) } - st, err := vs.se.GetTableForPos(sqlparser.NewIdentifierCS(tm.Name), mysql.EncodePosition(vs.pos)) + st, err := vs.se.GetTableForPos(sqlparser.NewIdentifierCS(tm.Name), replication.EncodePosition(vs.pos)) if err != nil { if vs.filter.FieldEventMode == binlogdatapb.Filter_ERR_ON_MISMATCH { log.Infof("No schema found for table %s", tm.Name) @@ -802,39 +800,16 @@ func (vs *vstreamer) buildTableColumns(tm *mysql.TableMap) ([]*querypb.Field, er } // Columns should be truncated to match those in tm. - fields = st.Fields[:len(tm.Types)] - extColInfos, err := vs.getExtColInfos(tm.Name, tm.Database) + fieldsCopy, err := getFields(vs.ctx, vs.cp, tm.Name, tm.Database, st.Fields[:len(tm.Types)]) if err != nil { return nil, err } - for _, field := range fields { - // we want the MySQL column type info so that we can properly handle - // ambiguous binlog events and other cases where the internal types - // don't match the MySQL column type. One example being that in binlog - // events CHAR columns with a binary collation are indistinguishable - // from BINARY columns. - if extColInfo, ok := extColInfos[field.Name]; ok { - field.ColumnType = extColInfo.columnType - } - } - return fields, nil -} - -// additional column attributes from information_schema.columns. Currently only column_type is used, but -// we expect to add more in the future -type extColInfo struct { - columnType string -} - -func encodeString(in string) string { - buf := bytes.NewBuffer(nil) - sqltypes.NewVarChar(in).EncodeSQL(buf) - return buf.String() + return fieldsCopy, nil } -func (vs *vstreamer) getExtColInfos(table, database string) (map[string]*extColInfo, error) { +func getExtColInfos(ctx context.Context, cp dbconfigs.Connector, table, database string) (map[string]*extColInfo, error) { extColInfos := make(map[string]*extColInfo) - conn, err := vs.cp.Connect(vs.ctx) + conn, err := cp.Connect(ctx) if err != nil { return nil, err } @@ -854,6 +829,37 @@ func (vs *vstreamer) getExtColInfos(table, database string) (map[string]*extColI return extColInfos, nil } +func getFields(ctx context.Context, cp dbconfigs.Connector, table, database string, fields []*querypb.Field) ([]*querypb.Field, error) { + // Make a deep copy of the schema.Engine fields as they are pointers and + // will be modified by adding ColumnType below + fieldsCopy := make([]*querypb.Field, len(fields)) + for i, field := range fields { + fieldsCopy[i] = field.CloneVT() + } + extColInfos, err := getExtColInfos(ctx, cp, table, database) + if err != nil { + return nil, err + } + for _, field := range fieldsCopy { + if colInfo, ok := extColInfos[field.Name]; ok { + field.ColumnType = colInfo.columnType + } + } + return fieldsCopy, nil +} + +// additional column attributes from information_schema.columns. Currently only column_type is used, but +// we expect to add more in the future +type extColInfo struct { + columnType string +} + +func encodeString(in string) string { + buf := bytes.NewBuffer(nil) + sqltypes.NewVarChar(in).EncodeSQL(buf) + return buf.String() +} + func (vs *vstreamer) processJournalEvent(vevents []*binlogdatapb.VEvent, plan *streamerPlan, rows mysql.Rows) ([]*binlogdatapb.VEvent, error) { // Get DbName params, err := vs.cp.MysqlParams() @@ -936,6 +942,7 @@ func (vs *vstreamer) processRowEvent(vevents []*binlogdatapb.VEvent, plan *strea RowChanges: rowChanges, Keyspace: vs.vse.keyspace, Shard: vs.vse.shard, + Flags: uint32(rows.Flags), }, }) } @@ -1007,7 +1014,7 @@ func (vs *vstreamer) extractRowAndFilter(plan *streamerPlan, data []byte, dataCo return ok, filtered, partial, err } -func wrapError(err error, stopPos mysql.Position, vse *Engine) error { +func wrapError(err error, stopPos replication.Position, vse *Engine) error { if err != nil { vse.vstreamersEndedWithErrors.Add(1) vse.errorCounts.Add("StreamEnded", 1) diff --git a/go/vt/vttablet/tabletserver/vstreamer/vstreamer_flaky_test.go b/go/vt/vttablet/tabletserver/vstreamer/vstreamer_flaky_test.go index 52b81c138a4..0eda0d6c52e 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/vstreamer_flaky_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/vstreamer_flaky_test.go @@ -26,6 +26,7 @@ import ( "testing" "time" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" "vitess.io/vitess/go/vt/vttablet/tabletserver/vstreamer/testenv" @@ -85,8 +86,17 @@ func (tfe *TestFieldEvent) String() string { // TestPlayerNoBlob sets up a new environment with mysql running with binlog_row_image as noblob. It confirms that // the VEvents created are correct: that they don't contain the missing columns and that the DataColumns bitmap is sent func TestNoBlob(t *testing.T) { - newEngine(t, "noblob") - defer newEngine(t, "full") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + oldEngine := engine + engine = nil + oldEnv := env + env = nil + newEngine(t, ctx, "noblob") + defer func() { + engine = oldEngine + env = oldEnv + }() execStatements(t, []string{ "create table t1(id int, blb blob, val varchar(4), primary key(id))", "create table t2(id int, txt text, val varchar(4), unique key(id, val))", @@ -281,6 +291,52 @@ func TestSetStatement(t *testing.T) { runCases(t, nil, testcases, "current", nil) } +// TestSetForeignKeyCheck confirms that the binlog RowEvent flags are set correctly when foreign_key_checks are on and off. +func TestSetForeignKeyCheck(t *testing.T) { + testRowEventFlags = true + defer func() { testRowEventFlags = false }() + + execStatements(t, []string{ + "create table t1(id int, val binary(4), primary key(id))", + }) + defer execStatements(t, []string{ + "drop table t1", + }) + engine.se.Reload(context.Background()) + queries := []string{ + "begin", + "insert into t1 values (1, 'aaa')", + "set @@session.foreign_key_checks=1", + "insert into t1 values (2, 'bbb')", + "set @@session.foreign_key_checks=0", + "insert into t1 values (3, 'ccc')", + "commit", + } + + fe := &TestFieldEvent{ + table: "t1", + db: "vttest", + cols: []*TestColumn{ + {name: "id", dataType: "INT32", colType: "int(11)", len: 11, charset: 63}, + {name: "val", dataType: "BINARY", colType: "binary(4)", len: 4, charset: 63}, + }, + } + + testcases := []testcase{{ + input: queries, + output: [][]string{{ + `begin`, + fe.String(), + `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:4 values:"1aaa\x00"}} flags:1}`, + `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:4 values:"2bbb\x00"}} flags:1}`, + `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:4 values:"3ccc\x00"}} flags:3}`, + `gtid`, + `commit`, + }}, + }} + runCases(t, nil, testcases, "current", nil) +} + func TestStmtComment(t *testing.T) { if testing.Short() { @@ -487,8 +543,8 @@ func TestVStreamCopySimpleFlow(t *testing.T) { tablePKs = append(tablePKs, getTablePK("t1", 1)) tablePKs = append(tablePKs, getTablePK("t2", 2)) - t1FieldEvent := []string{"begin", "type:FIELD field_event:{table_name:\"t1\" fields:{name:\"id11\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id11\" column_length:11 charset:63} fields:{name:\"id12\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id12\" column_length:11 charset:63}}"} - t2FieldEvent := []string{"begin", "type:FIELD field_event:{table_name:\"t2\" fields:{name:\"id21\" type:INT32 table:\"t2\" org_table:\"t2\" database:\"vttest\" org_name:\"id21\" column_length:11 charset:63} fields:{name:\"id22\" type:INT32 table:\"t2\" org_table:\"t2\" database:\"vttest\" org_name:\"id22\" column_length:11 charset:63}}"} + t1FieldEvent := []string{"begin", "type:FIELD field_event:{table_name:\"t1\" fields:{name:\"id11\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id11\" column_length:11 charset:63 column_type:\"int(11)\"} fields:{name:\"id12\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id12\" column_length:11 charset:63 column_type:\"int(11)\"}}"} + t2FieldEvent := []string{"begin", "type:FIELD field_event:{table_name:\"t2\" fields:{name:\"id21\" type:INT32 table:\"t2\" org_table:\"t2\" database:\"vttest\" org_name:\"id21\" column_length:11 charset:63 column_type:\"int(11)\"} fields:{name:\"id22\" type:INT32 table:\"t2\" org_table:\"t2\" database:\"vttest\" org_name:\"id22\" column_length:11 charset:63 column_type:\"int(11)\"}}"} t1Events := []string{} t2Events := []string{} for i := 1; i <= 10; i++ { @@ -572,7 +628,7 @@ func TestVStreamCopyWithDifferentFilters(t *testing.T) { var expectedEvents = []string{ "type:BEGIN", - "type:FIELD field_event:{table_name:\"t1\" fields:{name:\"id1\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id1\" column_length:11 charset:63} fields:{name:\"id2\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id2\" column_length:11 charset:63}}", + "type:FIELD field_event:{table_name:\"t1\" fields:{name:\"id1\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id1\" column_length:11 charset:63 column_type:\"int(11)\"} fields:{name:\"id2\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id2\" column_length:11 charset:63 column_type:\"int(11)\"}}", "type:GTID", "type:ROW row_event:{table_name:\"t1\" row_changes:{after:{lengths:1 lengths:1 values:\"12\"}}}", "type:LASTPK last_p_k_event:{table_last_p_k:{table_name:\"t1\" lastpk:{fields:{name:\"id1\" type:INT32 charset:63 flags:53251} rows:{lengths:1 values:\"1\"}}}}", @@ -581,7 +637,7 @@ func TestVStreamCopyWithDifferentFilters(t *testing.T) { "type:LASTPK last_p_k_event:{table_last_p_k:{table_name:\"t1\"} completed:true}", "type:COMMIT", "type:BEGIN", - "type:FIELD field_event:{table_name:\"t2a\" fields:{name:\"id1\" type:INT32 table:\"t2a\" org_table:\"t2a\" database:\"vttest\" org_name:\"id1\" column_length:11 charset:63} fields:{name:\"id2\" type:INT32 table:\"t2a\" org_table:\"t2a\" database:\"vttest\" org_name:\"id2\" column_length:11 charset:63}}", + "type:FIELD field_event:{table_name:\"t2a\" fields:{name:\"id1\" type:INT32 table:\"t2a\" org_table:\"t2a\" database:\"vttest\" org_name:\"id1\" column_length:11 charset:63 column_type:\"int(11)\"} fields:{name:\"id2\" type:INT32 table:\"t2a\" org_table:\"t2a\" database:\"vttest\" org_name:\"id2\" column_length:11 charset:63 column_type:\"int(11)\"}}", "type:ROW row_event:{table_name:\"t2a\" row_changes:{after:{lengths:1 lengths:1 values:\"14\"}}}", "type:LASTPK last_p_k_event:{table_last_p_k:{table_name:\"t2a\" lastpk:{fields:{name:\"id1\" type:INT32 charset:63 flags:53251} rows:{lengths:1 values:\"1\"}}}}", "type:COMMIT", @@ -589,7 +645,7 @@ func TestVStreamCopyWithDifferentFilters(t *testing.T) { "type:LASTPK last_p_k_event:{table_last_p_k:{table_name:\"t2a\"} completed:true}", "type:COMMIT", "type:BEGIN", - "type:FIELD field_event:{table_name:\"t2b\" fields:{name:\"id1\" type:VARCHAR table:\"t2b\" org_table:\"t2b\" database:\"vttest\" org_name:\"id1\" column_length:80 charset:45} fields:{name:\"id2\" type:INT32 table:\"t2b\" org_table:\"t2b\" database:\"vttest\" org_name:\"id2\" column_length:11 charset:63}}", + "type:FIELD field_event:{table_name:\"t2b\" fields:{name:\"id1\" type:VARCHAR table:\"t2b\" org_table:\"t2b\" database:\"vttest\" org_name:\"id1\" column_length:80 charset:45 column_type:\"varchar(20)\"} fields:{name:\"id2\" type:INT32 table:\"t2b\" org_table:\"t2b\" database:\"vttest\" org_name:\"id2\" column_length:11 charset:63 column_type:\"int(11)\"}}", "type:ROW row_event:{table_name:\"t2b\" row_changes:{after:{lengths:1 lengths:1 values:\"a5\"}}}", "type:ROW row_event:{table_name:\"t2b\" row_changes:{after:{lengths:1 lengths:1 values:\"b6\"}}}", "type:LASTPK last_p_k_event:{table_last_p_k:{table_name:\"t2b\" lastpk:{fields:{name:\"id1\" type:VARCHAR charset:45 flags:20483} rows:{lengths:1 values:\"b\"}}}}", @@ -635,8 +691,11 @@ func TestVStreamCopyWithDifferentFilters(t *testing.T) { } got := ev.String() want := expectedEvents[i] + + want = env.RemoveAnyDeprecatedDisplayWidths(want) + if !strings.HasPrefix(got, want) { - errGoroutine = fmt.Errorf("Event %d did not match, want %s, got %s", i, want, got) + errGoroutine = fmt.Errorf("event %d did not match, want %s, got %s", i, want, got) return errGoroutine } } @@ -1924,8 +1983,17 @@ func TestMinimalMode(t *testing.T) { t.Skip() } - newEngine(t, "minimal") - defer newEngine(t, "full") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + oldEngine := engine + engine = nil + oldEnv := env + env = nil + newEngine(t, ctx, "minimal") + defer func() { + engine = oldEngine + env = oldEnv + }() err := engine.Stream(context.Background(), "current", nil, nil, throttlerapp.VStreamerName, func(evs []*binlogdatapb.VEvent) error { return nil }) require.Error(t, err, "minimal binlog_row_image is not supported by Vitess VReplication") } @@ -2272,6 +2340,9 @@ func expectLog(ctx context.Context, t *testing.T, input any, ch <-chan []*binlog evs[i].RowEvent.Keyspace = "" evs[i].RowEvent.Shard = "" } + if !testRowEventFlags && evs[i].Type == binlogdatapb.VEventType_ROW { + evs[i].RowEvent.Flags = 0 + } want = env.RemoveAnyDeprecatedDisplayWidths(want) if got := fmt.Sprintf("%v", evs[i]); got != want { log.Errorf("%v (%d): event:\n%q, want\n%q", input, i, got, want) @@ -2358,7 +2429,7 @@ func primaryPosition(t *testing.T) string { if err != nil { t.Fatal(err) } - return mysql.EncodePosition(pos) + return replication.EncodePosition(pos) } func setVSchema(t *testing.T, vschema string) { diff --git a/go/vt/vttablet/tabletservermock/controller.go b/go/vt/vttablet/tabletservermock/controller.go index 1ae5772587a..33a6b94d327 100644 --- a/go/vt/vttablet/tabletservermock/controller.go +++ b/go/vt/vttablet/tabletservermock/controller.go @@ -22,8 +22,6 @@ import ( "sync" "time" - "google.golang.org/protobuf/proto" - "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/servenv" @@ -32,6 +30,7 @@ import ( "vitess.io/vitess/go/vt/vttablet/tabletserver/rules" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -126,13 +125,12 @@ func (tqsc *Controller) AddStatusPart() { func (tqsc *Controller) InitDBConfig(target *querypb.Target, dbcfgs *dbconfigs.DBConfigs, _ mysqlctl.MysqlDaemon) error { tqsc.mu.Lock() defer tqsc.mu.Unlock() - - tqsc.target = proto.Clone(target).(*querypb.Target) + tqsc.target = target.CloneVT() return nil } // SetServingType is part of the tabletserver.Controller interface -func (tqsc *Controller) SetServingType(tabletType topodatapb.TabletType, terTime time.Time, serving bool, reason string) error { +func (tqsc *Controller) SetServingType(tabletType topodatapb.TabletType, ptsTime time.Time, serving bool, reason string) error { tqsc.mu.Lock() defer tqsc.mu.Unlock() @@ -160,7 +158,7 @@ func (tqsc *Controller) IsServing() bool { func (tqsc *Controller) CurrentTarget() *querypb.Target { tqsc.mu.Lock() defer tqsc.mu.Unlock() - return proto.Clone(tqsc.target).(*querypb.Target) + return tqsc.target.CloneVT() } // IsHealthy is part of the tabletserver.Controller interface @@ -218,6 +216,11 @@ func (tqsc *Controller) TopoServer() *topo.Server { return tqsc.TS } +// CheckThrottler is part of the tabletserver.Controller interface +func (tqsc *Controller) CheckThrottler(ctx context.Context, appName string, flags *throttle.CheckFlags) *throttle.CheckResult { + return nil +} + // EnterLameduck implements tabletserver.Controller. func (tqsc *Controller) EnterLameduck() { tqsc.mu.Lock() diff --git a/go/vt/vttablet/tmclient/rpc_client_api.go b/go/vt/vttablet/tmclient/rpc_client_api.go index 710d8df64d7..2e75dbd45fc 100644 --- a/go/vt/vttablet/tmclient/rpc_client_api.go +++ b/go/vt/vttablet/tmclient/rpc_client_api.go @@ -51,7 +51,6 @@ func init() { "vtctl", "vtctld", "vtctldclient", - "vtgr", "vtorc", "vttablet", "vttestserver", @@ -79,6 +78,9 @@ type TabletManagerClient interface { // Various read-write methods // + // ResetSequences asks the remote tablet to reset the sequences for the specified tables + ResetSequences(ctx context.Context, tablet *topodatapb.Tablet, tables []string) error + // SetReadOnly makes the mysql instance read-only SetReadOnly(ctx context.Context, tablet *topodatapb.Tablet) error @@ -169,11 +171,17 @@ type TabletManagerClient interface { // WaitForPosition waits for the position to be reached WaitForPosition(ctx context.Context, tablet *topodatapb.Tablet, pos string) error + // + // VReplication related methods + // + + CreateVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.CreateVReplicationWorkflowRequest) (*tabletmanagerdatapb.CreateVReplicationWorkflowResponse, error) + DeleteVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.DeleteVReplicationWorkflowRequest) (*tabletmanagerdatapb.DeleteVReplicationWorkflowResponse, error) + ReadVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.ReadVReplicationWorkflowRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowResponse, error) + UpdateVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.UpdateVReplicationWorkflowRequest) (*tabletmanagerdatapb.UpdateVReplicationWorkflowResponse, error) // VReplicationExec executes a VReplication command VReplicationExec(ctx context.Context, tablet *topodatapb.Tablet, query string) (*querypb.QueryResult, error) VReplicationWaitForPos(ctx context.Context, tablet *topodatapb.Tablet, id int32, pos string) error - UpdateVRWorkflow(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.UpdateVRWorkflowRequest) (*tabletmanagerdatapb.UpdateVRWorkflowResponse, error) - VDiff(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.VDiffRequest) (*tabletmanagerdatapb.VDiffResponse, error) // @@ -238,6 +246,9 @@ type TabletManagerClient interface { // RestoreFromBackup deletes local data and restores database from backup RestoreFromBackup(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.RestoreFromBackupRequest) (logutil.EventStream, error) + // Throttler + CheckThrottler(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.CheckThrottlerRequest) (*tabletmanagerdatapb.CheckThrottlerResponse, error) + // // Management methods // diff --git a/go/vt/vttablet/tmrpctest/test_tm_rpc.go b/go/vt/vttablet/tmrpctest/test_tm_rpc.go index b40061ffae3..2393a3fb2f0 100644 --- a/go/vt/vttablet/tmrpctest/test_tm_rpc.go +++ b/go/vt/vttablet/tmrpctest/test_tm_rpc.go @@ -28,7 +28,8 @@ import ( "google.golang.org/protobuf/proto" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/hook" @@ -54,7 +55,27 @@ type fakeRPCTM struct { mu sync.Mutex } -func (fra *fakeRPCTM) UpdateVRWorkflow(ctx context.Context, req *tabletmanagerdatapb.UpdateVRWorkflowRequest) (*tabletmanagerdatapb.UpdateVRWorkflowResponse, error) { +func (fra *fakeRPCTM) CreateVReplicationWorkflow(ctx context.Context, req *tabletmanagerdatapb.CreateVReplicationWorkflowRequest) (*tabletmanagerdatapb.CreateVReplicationWorkflowResponse, error) { + //TODO implement me + panic("implement me") +} + +func (fra *fakeRPCTM) DeleteVReplicationWorkflow(ctx context.Context, req *tabletmanagerdatapb.DeleteVReplicationWorkflowRequest) (*tabletmanagerdatapb.DeleteVReplicationWorkflowResponse, error) { + //TODO implement me + panic("implement me") +} + +func (fra *fakeRPCTM) ReadVReplicationWorkflow(ctx context.Context, req *tabletmanagerdatapb.ReadVReplicationWorkflowRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowResponse, error) { + //TODO implement me + panic("implement me") +} + +func (fra *fakeRPCTM) UpdateVReplicationWorkflow(ctx context.Context, req *tabletmanagerdatapb.UpdateVReplicationWorkflowRequest) (*tabletmanagerdatapb.UpdateVReplicationWorkflowResponse, error) { + //TODO implement me + panic("implement me") +} + +func (fra *fakeRPCTM) ResetSequences(ctx context.Context, tables []string) error { //TODO implement me panic("implement me") } @@ -732,8 +753,8 @@ func tmRPCTestExecuteFetchPanic(ctx context.Context, t *testing.T, client tmclie var testReplicationStatus = &replicationdatapb.Status{ Position: "MariaDB/1-345-789", - IoState: int32(mysql.ReplicationStateRunning), - SqlState: int32(mysql.ReplicationStateRunning), + IoState: int32(replication.ReplicationStateRunning), + SqlState: int32(replication.ReplicationStateRunning), ReplicationLagSeconds: 654, SourceHost: "source.host", SourcePort: 3366, @@ -1266,6 +1287,15 @@ func (fra *fakeRPCTM) RestoreFromBackup(ctx context.Context, logger logutil.Logg return nil } +func (fra *fakeRPCTM) CheckThrottler(ctx context.Context, req *tabletmanagerdatapb.CheckThrottlerRequest) (*tabletmanagerdatapb.CheckThrottlerResponse, error) { + if fra.panics { + panic(fmt.Errorf("test-triggered panic")) + } + + //TODO implement me + panic("implement me") +} + func tmRPCTestRestoreFromBackup(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.RestoreFromBackupRequest) { stream, err := client.RestoreFromBackup(ctx, tablet, req) if err != nil { @@ -1287,6 +1317,11 @@ func tmRPCTestRestoreFromBackupPanic(ctx context.Context, t *testing.T, client t expectHandleRPCPanic(t, "RestoreFromBackup", true /*verbose*/, err) } +func tmRPCTestCheckThrottler(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.CheckThrottlerRequest) { + _, err := client.CheckThrottler(ctx, tablet, req) + expectHandleRPCPanic(t, "CheckThrottler", false /*verbose*/, err) +} + // // RPC helpers // @@ -1311,6 +1346,9 @@ func Run(t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.T restoreFromBackupRequest := &tabletmanagerdatapb.RestoreFromBackupRequest{ BackupTime: protoutil.TimeToProto(time.Time{}), } + checkThrottlerRequest := &tabletmanagerdatapb.CheckThrottlerRequest{ + AppName: "test", + } // Test RPC specific methods of the interface. tmRPCTestDialExpiredContext(ctx, t, client, tablet) @@ -1368,6 +1406,9 @@ func Run(t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.T tmRPCTestBackup(ctx, t, client, tablet) tmRPCTestRestoreFromBackup(ctx, t, client, tablet, restoreFromBackupRequest) + // Throttler related methods + tmRPCTestCheckThrottler(ctx, t, client, tablet, checkThrottlerRequest) + // // Tests panic handling everywhere now // diff --git a/go/vt/vttest/environment.go b/go/vt/vttest/environment.go index 864d081c284..dac9d718827 100644 --- a/go/vt/vttest/environment.go +++ b/go/vt/vttest/environment.go @@ -22,7 +22,6 @@ import ( "os" "path" "strings" - "time" "vitess.io/vitess/go/vt/proto/vttest" @@ -293,6 +292,7 @@ func NewLocalTestEnvWithDirectory(flavor string, basePort int, directory string) Env: []string{ fmt.Sprintf("VTDATAROOT=%s", directory), fmt.Sprintf("MYSQL_FLAVOR=%s", flavor), + "VTTEST=endtoend", }, }, nil } @@ -301,10 +301,6 @@ func defaultEnvFactory() (Environment, error) { return NewLocalTestEnv("", 0) } -func init() { - rand.Seed(time.Now().UnixNano()) -} - // NewDefaultEnv is an user-configurable callback that returns a new Environment // instance with the default settings. // This callback is only used in cases where the user hasn't explicitly set diff --git a/go/vt/vttest/local_cluster.go b/go/vt/vttest/local_cluster.go index d162cb9f64b..43493127414 100644 --- a/go/vt/vttest/local_cluster.go +++ b/go/vt/vttest/local_cluster.go @@ -32,12 +32,14 @@ import ( "time" "unicode" - "vitess.io/vitess/go/vt/sidecardb" - "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/encoding/prototext" "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/constants/sidecar" + + "vitess.io/vitess/go/vt/sidecardb" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/log" @@ -89,7 +91,7 @@ type Config struct { Charset string // PlannerVersion is the planner version to use for the vtgate. - // Choose between V3, V3Insert, Gen4, Gen4Greedy and Gen4Fallback + // Choose between Gen4, Gen4Greedy and Gen4Left2Right PlannerVersion string // ExtraMyCnf are the extra .CNF files to be added to the MySQL config @@ -288,6 +290,18 @@ func (db *LocalCluster) MySQLAppDebugConnParams() mysql.ConnParams { return connParams } +// MySQLCleanConnParams returns connection params that can be used to connect +// directly to MySQL, even if there's a toxyproxy instance on the way. +func (db *LocalCluster) MySQLCleanConnParams() mysql.ConnParams { + mysqlctl := db.mysql + if toxiproxy, ok := mysqlctl.(*Toxiproxyctl); ok { + mysqlctl = toxiproxy.mysqlctl + } + connParams := mysqlctl.Params(db.DbName()) + connParams.Charset = db.Config.Charset + return connParams +} + // SimulateMySQLHang simulates a scenario where the backend MySQL stops all data from flowing through. // Please ensure to `defer db.StopSimulateMySQLHang()` after calling this method. func (db *LocalCluster) SimulateMySQLHang() error { @@ -491,28 +505,26 @@ func (db *LocalCluster) loadSchema(shouldRunDatabaseMigrations bool) error { } } - glob, _ := filepath.Glob(path.Join(schemaDir, "*.sql")) - for _, filepath := range glob { - cmds, err := LoadSQLFile(filepath, schemaDir) - if err != nil { - return err - } - - // One single vschema migration per file - if !db.OnlyMySQL && len(cmds) == 1 && strings.HasPrefix(strings.ToUpper(cmds[0]), "ALTER VSCHEMA") { - if err = db.applyVschema(keyspace, cmds[0]); err != nil { + if shouldRunDatabaseMigrations { + glob, _ := filepath.Glob(path.Join(schemaDir, "*.sql")) + for _, filepath := range glob { + cmds, err := LoadSQLFile(filepath, schemaDir) + if err != nil { return err } - continue - } - if !shouldRunDatabaseMigrations { - continue - } + // One single vschema migration per file + if !db.OnlyMySQL && len(cmds) == 1 && strings.HasPrefix(strings.ToUpper(cmds[0]), "ALTER VSCHEMA") { + if err = db.applyVschema(keyspace, cmds[0]); err != nil { + return err + } + continue + } - for _, dbname := range db.shardNames(kpb) { - if err := db.Execute(cmds, dbname); err != nil { - return err + for _, dbname := range db.shardNames(kpb) { + if err := db.Execute(cmds, dbname); err != nil { + return err + } } } } @@ -530,7 +542,7 @@ func (db *LocalCluster) loadSchema(shouldRunDatabaseMigrations bool) error { func (db *LocalCluster) createVTSchema() error { var sidecardbExec sidecardb.Exec = func(ctx context.Context, query string, maxRows int, useDB bool) (*sqltypes.Result, error) { if useDB { - if err := db.Execute([]string{fmt.Sprintf("use %s", sidecardb.GetIdentifier())}, ""); err != nil { + if err := db.Execute([]string{fmt.Sprintf("use %s", sidecar.GetIdentifier())}, ""); err != nil { return nil, err } } @@ -661,7 +673,7 @@ func (db *LocalCluster) applyVschema(keyspace string, migration string) error { func (db *LocalCluster) reloadSchemaKeyspace(keyspace string) error { server := fmt.Sprintf("localhost:%v", db.vt.PortGrpc) args := []string{"ReloadSchemaKeyspace", "--include_primary=true", keyspace} - fmt.Printf("Reloading keyspace schema %v", args) + log.Infof("Reloading keyspace schema %v", args) err := vtctlclient.RunCommandAndWait(context.Background(), server, args, func(e *logutil.Event) { log.Info(e) diff --git a/go/vt/vttest/mysqlctl.go b/go/vt/vttest/mysqlctl.go index 6c2cb6e10d2..19be100e339 100644 --- a/go/vt/vttest/mysqlctl.go +++ b/go/vt/vttest/mysqlctl.go @@ -66,7 +66,7 @@ func (ctl *Mysqlctl) Setup() error { "--alsologtostderr", "--tablet_uid", fmt.Sprintf("%d", ctl.UID), "--mysql_port", fmt.Sprintf("%d", ctl.Port), - "init", "--", + "init", "--init_db_sql_file", ctl.InitFile, ) diff --git a/go/vt/vttest/toxiproxyctl.go b/go/vt/vttest/toxiproxyctl.go index 1ed79547fe5..436739fcf4c 100644 --- a/go/vt/vttest/toxiproxyctl.go +++ b/go/vt/vttest/toxiproxyctl.go @@ -63,13 +63,12 @@ func NewToxiproxyctl(binary string, apiPort, mysqlPort int, mysqlctl *Mysqlctl, // The original initFile does not have any users who can access through TCP/IP connection. // Here we update the init file to create the user. - // We're using IPv6 localhost because that's what toxiproxy uses by default. initDb, _ := os.ReadFile(mysqlctl.InitFile) createUserCmd := fmt.Sprintf(` # Admin user for TCP/IP connection with all privileges. - CREATE USER '%s'@'::1'; - GRANT ALL ON *.* TO '%s'@'::1'; - GRANT GRANT OPTION ON *.* TO '%s'@'::1'; + CREATE USER '%s'@'127.0.0.1'; + GRANT ALL ON *.* TO '%s'@'127.0.0.1'; + GRANT GRANT OPTION ON *.* TO '%s'@'127.0.0.1'; `, dbaUser, dbaUser, dbaUser) sql, err := getInitDBSQL(string(initDb), createUserCmd) if err != nil { @@ -143,11 +142,11 @@ func (ctl *Toxiproxyctl) run() error { // Wait for toxiproxy to start time.Sleep(1 * time.Second) - toxiClient := toxiproxy.NewClient("localhost:" + fmt.Sprintf("%d", ctl.apiPort)) + toxiClient := toxiproxy.NewClient("127.0.0.1:" + fmt.Sprintf("%d", ctl.apiPort)) proxy, err := toxiClient.CreateProxy( "mysql", - "localhost:"+fmt.Sprintf("%d", ctl.port), - "localhost:"+fmt.Sprintf("%d", ctl.mysqlctl.Port), + "127.0.0.1:"+fmt.Sprintf("%d", ctl.port), + "127.0.0.1:"+fmt.Sprintf("%d", ctl.mysqlctl.Port), ) if err == nil { ctl.proxy = proxy @@ -206,7 +205,7 @@ func (ctl *Toxiproxyctl) Params(dbname string) mysql.ConnParams { params := ctl.mysqlctl.Params(dbname) params.UnixSocket = "" - params.Host = "localhost" + params.Host = "127.0.0.1" params.Port = ctl.port params.Uname = dbaUser return params diff --git a/go/vt/vttest/vtprocess.go b/go/vt/vttest/vtprocess.go index 37582542c02..2053973b766 100644 --- a/go/vt/vttest/vtprocess.go +++ b/go/vt/vttest/vtprocess.go @@ -23,8 +23,10 @@ import ( "net/http" "os" "os/exec" + "path" "strings" "syscall" + "testing" "time" "google.golang.org/protobuf/encoding/prototext" @@ -126,6 +128,7 @@ func (vtp *VtProcess) WaitStart() (err error) { vtp.proc = exec.Command( vtp.Binary, "--port", fmt.Sprintf("%d", vtp.Port), + "--bind-address", "127.0.0.1", "--log_dir", vtp.LogDirectory, "--alsologtostderr", ) @@ -139,8 +142,10 @@ func (vtp *VtProcess) WaitStart() (err error) { vtp.proc.Env = append(vtp.proc.Env, os.Environ()...) vtp.proc.Env = append(vtp.proc.Env, vtp.Env...) - vtp.proc.Stderr = os.Stderr - vtp.proc.Stdout = os.Stdout + if testing.Verbose() { + vtp.proc.Stderr = os.Stderr + vtp.proc.Stdout = os.Stdout + } log.Infof("%v %v", strings.Join(vtp.proc.Args, " ")) err = vtp.proc.Start() @@ -242,6 +247,9 @@ func VtcomboProcess(environment Environment, args *Config, mysql MySQLManager) ( if args.SchemaDir != "" { vt.ExtraArgs = append(vt.ExtraArgs, []string{"--schema_dir", args.SchemaDir}...) } + if args.PersistentMode && args.DataDir != "" { + vt.ExtraArgs = append(vt.ExtraArgs, []string{"--vschema-persistence-dir", path.Join(args.DataDir, "vschema_data")}...) + } if args.TransactionMode != "" { vt.ExtraArgs = append(vt.ExtraArgs, []string{"--transaction_mode", args.TransactionMode}...) } diff --git a/go/vt/vttls/crl.go b/go/vt/vttls/crl.go index 63c97ecfa81..fde4f0b2b66 100644 --- a/go/vt/vttls/crl.go +++ b/go/vt/vttls/crl.go @@ -33,7 +33,7 @@ func certIsRevoked(cert *x509.Certificate, crl *x509.RevocationList) bool { log.Warningf("The current Certificate Revocation List (CRL) is past expiry date and must be updated. Revoked certificates will still be rejected in this state.") } - for _, revoked := range crl.RevokedCertificates { + for _, revoked := range crl.RevokedCertificateEntries { if cert.SerialNumber.Cmp(revoked.SerialNumber) == 0 { return true } diff --git a/go/vt/wrangler/external_cluster_test.go b/go/vt/wrangler/external_cluster_test.go index 3592c4b6895..3c878411b6b 100644 --- a/go/vt/wrangler/external_cluster_test.go +++ b/go/vt/wrangler/external_cluster_test.go @@ -14,8 +14,9 @@ import ( ) func TestVitessCluster(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") tmc := newTestWranglerTMClient() wr := New(logutil.NewConsoleLogger(), ts, tmc) name, topoType, topoServer, topoRoot := "c1", "x", "y", "z" diff --git a/go/vt/wrangler/fake_dbclient_test.go b/go/vt/wrangler/fake_dbclient_test.go index 057d7bacd75..7bcc5f5bcf2 100644 --- a/go/vt/wrangler/fake_dbclient_test.go +++ b/go/vt/wrangler/fake_dbclient_test.go @@ -152,10 +152,6 @@ func (dc *fakeDBClient) Rollback() error { func (dc *fakeDBClient) Close() { } -func (dc *fakeDBClient) id() string { - return fmt.Sprintf("FakeDBClient(%s)", dc.name) -} - // ExecuteFetch is part of the DBClient interface func (dc *fakeDBClient) ExecuteFetch(query string, maxrows int) (*sqltypes.Result, error) { dc.mu.Lock() diff --git a/go/vt/wrangler/fake_tablet_test.go b/go/vt/wrangler/fake_tablet_test.go index 8ffae2d7328..66d5cf474d6 100644 --- a/go/vt/wrangler/fake_tablet_test.go +++ b/go/vt/wrangler/fake_tablet_test.go @@ -23,6 +23,9 @@ import ( "testing" "time" + vdiff2 "vitess.io/vitess/go/vt/vttablet/tabletmanager/vdiff" + "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" + "github.com/stretchr/testify/require" "google.golang.org/grpc" @@ -32,6 +35,8 @@ import ( "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vttablet/grpctmserver" + "vitess.io/vitess/go/vt/vttablet/queryservice" + "vitess.io/vitess/go/vt/vttablet/queryservice/fakes" "vitess.io/vitess/go/vt/vttablet/tabletconntest" "vitess.io/vitess/go/vt/vttablet/tabletmanager" "vitess.io/vitess/go/vt/vttablet/tabletservermock" @@ -48,6 +53,12 @@ import ( _ "vitess.io/vitess/go/vt/vttablet/grpctabletconn" ) +func init() { + // Ensure we will use the right protocol (gRPC) in all unit tests. + tabletconntest.SetProtocol("go.vt.wrangler.fake_tablet_test", "grpc") + tmclienttest.SetProtocol("go.vt.wrangler.fake_tablet_test", "grpc") +} + // This file was copied from testlib. All tests from testlib should be moved // to the current directory. In order to move tests from there, we have to // remove the circular dependency it causes (through vtctl dependence). @@ -81,6 +92,8 @@ type fakeTablet struct { StartHTTPServer bool HTTPListener net.Listener HTTPServer *http.Server + + queryservice.QueryService } // TabletOption is an interface for changing tablet parameters. @@ -141,6 +154,7 @@ func newFakeTablet(t *testing.T, wr *Wrangler, cell string, uid uint32, tabletTy Tablet: tablet, FakeMysqlDaemon: fakeMysqlDaemon, RPCServer: grpc.NewServer(), + QueryService: fakes.ErrorQueryService, } } @@ -153,7 +167,7 @@ func (ft *fakeTablet) StartActionLoop(t *testing.T, wr *Wrangler) { // Listen on a random port for gRPC. var err error - ft.Listener, err = net.Listen("tcp", ":0") + ft.Listener, err = net.Listen("tcp", "127.0.0.1:0") if err != nil { t.Fatalf("Cannot listen: %v", err) } @@ -162,7 +176,7 @@ func (ft *fakeTablet) StartActionLoop(t *testing.T, wr *Wrangler) { // If needed, listen on a random port for HTTP. vtPort := ft.Tablet.PortMap["vt"] if ft.StartHTTPServer { - ft.HTTPListener, err = net.Listen("tcp", ":0") + ft.HTTPListener, err = net.Listen("tcp", "127.0.0.1:0") if err != nil { t.Fatalf("Cannot listen on http port: %v", err) } @@ -175,7 +189,8 @@ func (ft *fakeTablet) StartActionLoop(t *testing.T, wr *Wrangler) { } ft.Tablet.PortMap["vt"] = vtPort ft.Tablet.PortMap["grpc"] = gRPCPort - + ft.Tablet.Hostname = "127.0.0.1" + config := &tabletenv.TabletConfig{} // Create a test tm on that port, and re-read the record // (it has new ports and IP). ft.TM = &tabletmanager.TabletManager{ @@ -184,6 +199,7 @@ func (ft *fakeTablet) StartActionLoop(t *testing.T, wr *Wrangler) { MysqlDaemon: ft.FakeMysqlDaemon, DBConfigs: &dbconfigs.DBConfigs{}, QueryServiceControl: tabletservermock.NewController(), + VDiffEngine: vdiff2.NewEngine(config, wr.TopoServer(), ft.Tablet), } if err := ft.TM.Start(ft.Tablet, 0); err != nil { t.Fatal(err) @@ -222,6 +238,9 @@ func (ft *fakeTablet) StopActionLoop(t *testing.T) { if ft.StartHTTPServer { ft.HTTPListener.Close() } + if ft.RPCServer != nil { + ft.RPCServer.Stop() + } ft.Listener.Close() ft.TM.Stop() ft.TM = nil @@ -238,8 +257,14 @@ func (ft *fakeTablet) Target() querypb.Target { } } -func init() { - // enforce we will use the right protocol (gRPC) in all unit tests - tabletconntest.SetProtocol("go.vt.wrangler.fake_tablet_test", "grpc") - tmclienttest.SetProtocol("go.vt.wrangler.fake_tablet_test", "grpc") +func (ft *fakeTablet) StreamHealth(ctx context.Context, callback func(*querypb.StreamHealthResponse) error) error { + return callback(&querypb.StreamHealthResponse{ + Serving: true, + Target: &querypb.Target{ + Keyspace: ft.Tablet.Keyspace, + Shard: ft.Tablet.Shard, + TabletType: ft.Tablet.Type, + }, + RealtimeStats: &querypb.RealtimeStats{}, + }) } diff --git a/go/vt/wrangler/keyspace.go b/go/vt/wrangler/keyspace.go index c1f253fedf3..7f3f00da4f8 100644 --- a/go/vt/wrangler/keyspace.go +++ b/go/vt/wrangler/keyspace.go @@ -20,19 +20,18 @@ import ( "bytes" "context" "fmt" - "strings" "sync" "time" - "vitess.io/vitess/go/vt/proto/binlogdata" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/concurrency" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" "vitess.io/vitess/go/vt/vterrors" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) const ( @@ -72,7 +71,7 @@ func (wr *Wrangler) validateNewWorkflow(ctx context.Context, keyspace, workflow fmt.Sprintf("select 1 from _vt.vreplication where db_name=%s and workflow=%s", encodeString(primary.DbName()), encodeString(workflow)), fmt.Sprintf("workflow %s already exists in keyspace %s on tablet %d", workflow, keyspace, primary.Alias.Uid), }, { - fmt.Sprintf("select 1 from _vt.vreplication where db_name=%s and message='FROZEN' and workflow_sub_type != %d", encodeString(primary.DbName()), binlogdata.VReplicationWorkflowSubType_Partial), + fmt.Sprintf("select 1 from _vt.vreplication where db_name=%s and message='FROZEN' and workflow_sub_type != %d", encodeString(primary.DbName()), binlogdatapb.VReplicationWorkflowSubType_Partial), fmt.Sprintf("found previous frozen workflow on tablet %d, please review and delete it first before creating a new workflow", primary.Alias.Uid), }} @@ -93,112 +92,6 @@ func (wr *Wrangler) validateNewWorkflow(ctx context.Context, keyspace, workflow return allErrors.AggrError(vterrors.Aggregate) } -func (wr *Wrangler) printShards(ctx context.Context, si []*topo.ShardInfo) error { - for _, si := range si { - wr.Logger().Printf(" Shard: %v\n", si.ShardName()) - if len(si.SourceShards) != 0 { - wr.Logger().Printf(" Source Shards: %v\n", si.SourceShards) - } - ti, err := wr.ts.GetTablet(ctx, si.PrimaryAlias) - if err != nil { - return err - } - qr, err := wr.tmc.VReplicationExec(ctx, ti.Tablet, fmt.Sprintf("select * from _vt.vreplication where db_name=%v", encodeString(ti.DbName()))) - if err != nil { - return err - } - res := sqltypes.Proto3ToResult(qr) - if len(res.Rows) != 0 { - wr.Logger().Printf(" VReplication:\n") - for _, row := range res.Rows { - wr.Logger().Printf(" %v\n", row) - } - } - wr.Logger().Printf(" Is Primary Serving: %v\n", si.IsPrimaryServing) - if len(si.TabletControls) != 0 { - wr.Logger().Printf(" Tablet Controls: %v\n", si.TabletControls) - } - } - return nil -} - -func (wr *Wrangler) getPrimaryPositions(ctx context.Context, shards []*topo.ShardInfo) (map[*topo.ShardInfo]string, error) { - mu := sync.Mutex{} - result := make(map[*topo.ShardInfo]string) - - wg := sync.WaitGroup{} - rec := concurrency.AllErrorRecorder{} - for _, si := range shards { - wg.Add(1) - go func(si *topo.ShardInfo) { - defer wg.Done() - wr.Logger().Infof("Gathering primary position for %v", topoproto.TabletAliasString(si.PrimaryAlias)) - ti, err := wr.ts.GetTablet(ctx, si.PrimaryAlias) - if err != nil { - rec.RecordError(err) - return - } - - pos, err := wr.tmc.PrimaryPosition(ctx, ti.Tablet) - if err != nil { - rec.RecordError(err) - return - } - - wr.Logger().Infof("Got primary position for %v", topoproto.TabletAliasString(si.PrimaryAlias)) - mu.Lock() - result[si] = pos - mu.Unlock() - }(si) - } - wg.Wait() - return result, rec.Error() -} - -func (wr *Wrangler) waitForFilteredReplication(ctx context.Context, sourcePositions map[*topo.ShardInfo]string, destinationShards []*topo.ShardInfo, waitTime time.Duration) error { - wg := sync.WaitGroup{} - rec := concurrency.AllErrorRecorder{} - for _, si := range destinationShards { - wg.Add(1) - go func(si *topo.ShardInfo) { - defer wg.Done() - ctx, cancel := context.WithTimeout(ctx, waitTime) - defer cancel() - - var pos string - for _, sourceShard := range si.SourceShards { - // find the position it should be at - for s, sp := range sourcePositions { - if s.Keyspace() == sourceShard.Keyspace && s.ShardName() == sourceShard.Shard { - pos = sp - break - } - } - - // and wait for it - wr.Logger().Infof("Waiting for %v to catch up", topoproto.TabletAliasString(si.PrimaryAlias)) - ti, err := wr.ts.GetTablet(ctx, si.PrimaryAlias) - if err != nil { - rec.RecordError(err) - return - } - - if err := wr.tmc.VReplicationWaitForPos(ctx, ti.Tablet, sourceShard.Uid, pos); err != nil { - if strings.Contains(err.Error(), "not found") { - wr.Logger().Infof("%v stream %d was not found. Skipping wait.", topoproto.TabletAliasString(si.PrimaryAlias), sourceShard.Uid) - } else { - rec.RecordError(err) - } - } else { - wr.Logger().Infof("%v caught up", topoproto.TabletAliasString(si.PrimaryAlias)) - } - } - }(si) - } - wg.Wait() - return rec.Error() -} - // refreshPrimaryTablets will just RPC-ping all the primary tablets with RefreshState func (wr *Wrangler) refreshPrimaryTablets(ctx context.Context, shards []*topo.ShardInfo) error { wg := sync.WaitGroup{} @@ -230,33 +123,6 @@ func (wr *Wrangler) updateShardRecords(ctx context.Context, keyspace string, sha return topotools.UpdateShardRecords(ctx, wr.ts, wr.tmc, keyspace, shards, cells, servedType, isFrom, clearSourceShards, wr.Logger()) } -// updateFrozenFlag sets or unsets the Frozen flag for primary migration. This is performed -// for all primary tablet control records. -func (wr *Wrangler) updateFrozenFlag(ctx context.Context, shards []*topo.ShardInfo, value bool) (err error) { - for i, si := range shards { - updatedShard, err := wr.ts.UpdateShardFields(ctx, si.Keyspace(), si.ShardName(), func(si *topo.ShardInfo) error { - tc := si.GetTabletControl(topodatapb.TabletType_PRIMARY) - if tc != nil { - tc.Frozen = value - return nil - } - // This shard does not have a tablet control record, adding one to set frozen flag - tc = &topodatapb.Shard_TabletControl{ - TabletType: topodatapb.TabletType_PRIMARY, - Frozen: value, - } - si.TabletControls = append(si.TabletControls, tc) - return nil - }) - if err != nil { - return err - } - - shards[i] = updatedShard - } - return nil -} - func encodeString(in string) string { buf := bytes.NewBuffer(nil) sqltypes.NewVarChar(in).EncodeSQL(buf) diff --git a/go/vt/wrangler/materializer.go b/go/vt/wrangler/materializer.go index 6dea5d3b9f1..0fba424eacd 100644 --- a/go/vt/wrangler/materializer.go +++ b/go/vt/wrangler/materializer.go @@ -33,19 +33,19 @@ import ( "vitess.io/vitess/go/json2" "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/concurrency" + "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/mysqlctl/tmutils" "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" "vitess.io/vitess/go/vt/vtctl/schematools" "vitess.io/vitess/go/vt/vtctl/workflow" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" @@ -54,15 +54,17 @@ import ( tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" vschemapb "vitess.io/vitess/go/vt/proto/vschema" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) type materializer struct { - wr *Wrangler - ms *vtctldatapb.MaterializeSettings - targetVSchema *vindexes.KeyspaceSchema - sourceShards []*topo.ShardInfo - targetShards []*topo.ShardInfo - isPartial bool + wr *Wrangler + ms *vtctldatapb.MaterializeSettings + targetVSchema *vindexes.KeyspaceSchema + sourceShards []*topo.ShardInfo + targetShards []*topo.ShardInfo + isPartial bool + primaryVindexesDiffer bool } const ( @@ -128,12 +130,12 @@ func shouldInclude(table string, excludes []string) bool { // MoveTables initiates moving table(s) over to another keyspace func (wr *Wrangler) MoveTables(ctx context.Context, workflow, sourceKeyspace, targetKeyspace, tableSpecs, - cell, tabletTypes string, allTables bool, excludeTables string, autoStart, stopAfterCopy bool, - externalCluster string, dropForeignKeys, deferSecondaryKeys bool, sourceTimeZone, onDDL string, sourceShards []string) error { + cell, tabletTypesStr string, allTables bool, excludeTables string, autoStart, stopAfterCopy bool, + externalCluster string, dropForeignKeys, deferSecondaryKeys bool, sourceTimeZone, onDDL string, + sourceShards []string, noRoutingRules bool, atomicCopy bool) (err error) { //FIXME validate tableSpecs, allTables, excludeTables var tables []string var externalTopo *topo.Server - var err error if externalCluster != "" { // when the source is an external mysql cluster mounted using the Mount command externalTopo, err = wr.ts.OpenExternalVitessClusterServer(ctx, externalCluster) @@ -145,6 +147,7 @@ func (wr *Wrangler) MoveTables(ctx context.Context, workflow, sourceKeyspace, ta } var vschema *vschemapb.Keyspace + var origVSchema *vschemapb.Keyspace // If we need to rollback a failed create vschema, err = wr.ts.GetVSchema(ctx, targetKeyspace) if err != nil { return err @@ -207,56 +210,36 @@ func (wr *Wrangler) MoveTables(ctx context.Context, workflow, sourceKeyspace, ta log.Infof("Found tables to move: %s", strings.Join(tables, ",")) if !vschema.Sharded { + // Save the original in case we need to restore it for a late failure + // in the defer(). + origVSchema = vschema.CloneVT() if err := wr.addTablesToVSchema(ctx, sourceKeyspace, vschema, tables, externalTopo == nil); err != nil { return err } } } - if externalTopo == nil { - // Save routing rules before vschema. If we save vschema first, and routing rules - // fails to save, we may generate duplicate table errors. - rules, err := topotools.GetRoutingRules(ctx, wr.ts) - if err != nil { - return err - } - for _, table := range tables { - toSource := []string{sourceKeyspace + "." + table} - rules[table] = toSource - rules[table+"@replica"] = toSource - rules[table+"@rdonly"] = toSource - rules[targetKeyspace+"."+table] = toSource - rules[targetKeyspace+"."+table+"@replica"] = toSource - rules[targetKeyspace+"."+table+"@rdonly"] = toSource - rules[targetKeyspace+"."+table] = toSource - rules[sourceKeyspace+"."+table+"@replica"] = toSource - rules[sourceKeyspace+"."+table+"@rdonly"] = toSource - } - if err := topotools.SaveRoutingRules(ctx, wr.ts, rules); err != nil { - return err - } - - if vschema != nil { - // We added to the vschema. - if err := wr.ts.SaveVSchema(ctx, targetKeyspace, vschema); err != nil { - return err - } - } - } - if err := wr.ts.RebuildSrvVSchema(ctx, nil); err != nil { + tabletTypes, inorder, err := discovery.ParseTabletTypesAndOrder(tabletTypesStr) + if err != nil { return err } + tsp := tabletmanagerdatapb.TabletSelectionPreference_ANY + if inorder { + tsp = tabletmanagerdatapb.TabletSelectionPreference_INORDER + } ms := &vtctldatapb.MaterializeSettings{ - Workflow: workflow, - MaterializationIntent: vtctldatapb.MaterializationIntent_MOVETABLES, - SourceKeyspace: sourceKeyspace, - TargetKeyspace: targetKeyspace, - Cell: cell, - TabletTypes: tabletTypes, - StopAfterCopy: stopAfterCopy, - ExternalCluster: externalCluster, - SourceShards: sourceShards, - OnDdl: onDDL, - DeferSecondaryKeys: deferSecondaryKeys, + Workflow: workflow, + MaterializationIntent: vtctldatapb.MaterializationIntent_MOVETABLES, + SourceKeyspace: sourceKeyspace, + TargetKeyspace: targetKeyspace, + Cell: cell, + TabletTypes: topoproto.MakeStringTypeCSV(tabletTypes), + TabletSelectionPreference: tsp, + StopAfterCopy: stopAfterCopy, + ExternalCluster: externalCluster, + SourceShards: sourceShards, + OnDdl: onDDL, + DeferSecondaryKeys: deferSecondaryKeys, + AtomicCopy: atomicCopy, } if sourceTimeZone != "" { ms.SourceTimeZone = sourceTimeZone @@ -281,6 +264,71 @@ func (wr *Wrangler) MoveTables(ctx context.Context, workflow, sourceKeyspace, ta return err } + // If we get an error after this point, where the vreplication streams/records + // have been created, then we clean up the workflow's artifacts. + defer func() { + if err != nil { + ts, cerr := wr.buildTrafficSwitcher(ctx, ms.TargetKeyspace, ms.Workflow) + if cerr != nil { + err = vterrors.Wrapf(err, "failed to cleanup workflow artifacts: %v", cerr) + } + if cerr := wr.dropArtifacts(ctx, false, &switcher{ts: ts, wr: wr}); cerr != nil { + err = vterrors.Wrapf(err, "failed to cleanup workflow artifacts: %v", cerr) + } + if origVSchema == nil { // There's no previous version to restore + return + } + if cerr := wr.ts.SaveVSchema(ctx, targetKeyspace, origVSchema); cerr != nil { + err = vterrors.Wrapf(err, "failed to restore original target vschema: %v", cerr) + } + } + }() + + // Now that the streams have been successfully created, let's put the associated + // routing rules in place. + if externalTopo == nil { + if noRoutingRules { + log.Warningf("Found --no-routing-rules flag, not creating routing rules for workflow %s.%s", targetKeyspace, workflow) + } else { + // Save routing rules before vschema. If we save vschema first, and routing rules + // fails to save, we may generate duplicate table errors. + if mz.isPartial { + if err := wr.createDefaultShardRoutingRules(ctx, ms); err != nil { + return err + } + } + rules, err := topotools.GetRoutingRules(ctx, wr.ts) + if err != nil { + return err + } + for _, table := range tables { + toSource := []string{sourceKeyspace + "." + table} + rules[table] = toSource + rules[table+"@replica"] = toSource + rules[table+"@rdonly"] = toSource + rules[targetKeyspace+"."+table] = toSource + rules[targetKeyspace+"."+table+"@replica"] = toSource + rules[targetKeyspace+"."+table+"@rdonly"] = toSource + rules[targetKeyspace+"."+table] = toSource + rules[sourceKeyspace+"."+table+"@replica"] = toSource + rules[sourceKeyspace+"."+table+"@rdonly"] = toSource + } + if err := topotools.SaveRoutingRules(ctx, wr.ts, rules); err != nil { + return err + } + } + if vschema != nil { + // We added to the vschema. + if err := wr.ts.SaveVSchema(ctx, targetKeyspace, vschema); err != nil { + return err + } + } + + } + if err := wr.ts.RebuildSrvVSchema(ctx, nil); err != nil { + return err + } + if sourceTimeZone != "" { if err := mz.checkTZConversion(ctx, sourceTimeZone); err != nil { return err @@ -314,7 +362,7 @@ func (wr *Wrangler) MoveTables(ctx context.Context, workflow, sourceKeyspace, ta if autoStart { return mz.startStreams(ctx) } - wr.Logger().Infof("Streams will not be started since -auto_start is set to false") + wr.Logger().Infof("Streams will not be started since --auto_start is set to false") return nil } @@ -424,7 +472,7 @@ func (wr *Wrangler) checkIfPreviousJournalExists(ctx context.Context, mz *materi } // CreateLookupVindex creates a lookup vindex and sets up the backfill. -func (wr *Wrangler) CreateLookupVindex(ctx context.Context, keyspace string, specs *vschemapb.Keyspace, cell, tabletTypes string, continueAfterCopyWithOwner bool) error { +func (wr *Wrangler) CreateLookupVindex(ctx context.Context, keyspace string, specs *vschemapb.Keyspace, cell, tabletTypesStr string, continueAfterCopyWithOwner bool) error { ms, sourceVSchema, targetVSchema, err := wr.prepareCreateLookup(ctx, keyspace, specs, continueAfterCopyWithOwner) if err != nil { return err @@ -433,7 +481,17 @@ func (wr *Wrangler) CreateLookupVindex(ctx context.Context, keyspace string, spe return err } ms.Cell = cell - ms.TabletTypes = tabletTypes + + tabletTypes, inorder, err := discovery.ParseTabletTypesAndOrder(tabletTypesStr) + if err != nil { + return err + } + tsp := tabletmanagerdatapb.TabletSelectionPreference_ANY + if inorder { + tsp = tabletmanagerdatapb.TabletSelectionPreference_INORDER + } + ms.TabletTypes = topoproto.MakeStringTypeCSV(tabletTypes) + ms.TabletSelectionPreference = tsp if err := wr.Materialize(ctx, ms); err != nil { return err } @@ -449,12 +507,13 @@ func (wr *Wrangler) prepareCreateLookup(ctx context.Context, keyspace string, sp // Important variables are pulled out here. var ( // lookup vindex info - vindexName string - vindex *vschemapb.Vindex - targetKeyspace string - targetTableName string - vindexFromCols []string - vindexToCol string + vindexName string + vindex *vschemapb.Vindex + targetKeyspace string + targetTableName string + vindexFromCols []string + vindexToCol string + vindexIgnoreNulls bool // source table info sourceTableName string @@ -505,6 +564,18 @@ func (wr *Wrangler) prepareCreateLookup(ctx context.Context, keyspace string, sp if _, err := vindexes.CreateVindex(vindex.Type, vindexName, vindex.Params); err != nil { return nil, nil, nil, err } + if ignoreNullsStr, ok := vindex.Params["ignore_nulls"]; ok { + // This mirrors the behavior of vindexes.boolFromMap(). + switch ignoreNullsStr { + case "true": + vindexIgnoreNulls = true + case "false": + vindexIgnoreNulls = false + default: + return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "ignore_nulls value must be 'true' or 'false': '%s'", + ignoreNullsStr) + } + } // Validate input table if len(specs.Tables) != 1 { @@ -641,21 +712,31 @@ func (wr *Wrangler) prepareCreateLookup(ctx context.Context, keyspace string, sp buf = sqlparser.NewTrackedBuffer(nil) buf.Myprintf("select ") for i := range vindexFromCols { - buf.Myprintf("%v as %v, ", sqlparser.NewIdentifierCI(sourceVindexColumns[i]), sqlparser.NewIdentifierCI(vindexFromCols[i])) + buf.Myprintf("%s as %s, ", sqlparser.String(sqlparser.NewIdentifierCI(sourceVindexColumns[i])), sqlparser.String(sqlparser.NewIdentifierCI(vindexFromCols[i]))) } if strings.EqualFold(vindexToCol, "keyspace_id") || strings.EqualFold(vindex.Type, "consistent_lookup_unique") || strings.EqualFold(vindex.Type, "consistent_lookup") { - buf.Myprintf("keyspace_id() as %v ", sqlparser.NewIdentifierCI(vindexToCol)) + buf.Myprintf("keyspace_id() as %s ", sqlparser.String(sqlparser.NewIdentifierCI(vindexToCol))) } else { - buf.Myprintf("%v as %v ", sqlparser.NewIdentifierCI(vindexToCol), sqlparser.NewIdentifierCI(vindexToCol)) + buf.Myprintf("%s as %s ", sqlparser.String(sqlparser.NewIdentifierCI(vindexToCol)), sqlparser.String(sqlparser.NewIdentifierCI(vindexToCol))) + } + buf.Myprintf("from %s", sqlparser.String(sqlparser.NewIdentifierCS(sourceTableName))) + if vindexIgnoreNulls { + buf.Myprintf(" where ") + lastValIdx := len(vindexFromCols) - 1 + for i := range vindexFromCols { + buf.Myprintf("%s is not null", sqlparser.String(sqlparser.NewIdentifierCI(vindexFromCols[i]))) + if i != lastValIdx { + buf.Myprintf(" and ") + } + } } - buf.Myprintf("from %v", sqlparser.NewIdentifierCS(sourceTableName)) if vindex.Owner != "" { // Only backfill buf.Myprintf(" group by ") for i := range vindexFromCols { - buf.Myprintf("%v, ", sqlparser.NewIdentifierCI(vindexFromCols[i])) + buf.Myprintf("%s, ", sqlparser.String(sqlparser.NewIdentifierCI(vindexFromCols[i]))) } - buf.Myprintf("%v", sqlparser.NewIdentifierCI(vindexToCol)) + buf.Myprintf("%s", sqlparser.String(sqlparser.NewIdentifierCI(vindexToCol))) } materializeQuery = buf.String() @@ -796,11 +877,11 @@ func (wr *Wrangler) ExternalizeVindex(ctx context.Context, qualifiedVindexName s } qr := sqltypes.Proto3ToResult(p3qr) for _, row := range qr.Rows { - id, err := evalengine.ToInt64(row[0]) + id, err := row[0].ToCastInt64() if err != nil { return err } - state := row[1].ToString() + state := binlogdatapb.VReplicationWorkflowState(binlogdatapb.VReplicationWorkflowState_value[row[1].ToString()]) message := row[2].ToString() var bls binlogdatapb.BinlogSource sourceBytes, err := row[3].ToBytes() @@ -813,12 +894,12 @@ func (wr *Wrangler) ExternalizeVindex(ctx context.Context, qualifiedVindexName s if sourceVindex.Owner == "" || !bls.StopAfterCopy { // If there's no owner or we've requested that the workflow NOT be stopped // after the copy phase completes, then all streams need to be running. - if state != binlogplayer.BlpRunning { + if state != binlogdatapb.VReplicationWorkflowState_Running { return fmt.Errorf("stream %d for %v.%v is not in Running state: %v", id, targetShard.Keyspace(), targetShard.ShardName(), state) } } else { // If there is an owner, all streams need to be stopped after copy. - if state != binlogplayer.BlpStopped || !strings.Contains(message, "Stopped after copy") { + if state != binlogdatapb.VReplicationWorkflowState_Stopped || !strings.Contains(message, "Stopped after copy") { return fmt.Errorf("stream %d for %v.%v is not in Stopped after copy state: %v, %v", id, targetShard.Keyspace(), targetShard.ShardName(), state, message) } } @@ -873,7 +954,7 @@ func (wr *Wrangler) collectTargetStreams(ctx context.Context, mz *materializer) } qr := sqltypes.Proto3ToResult(qrproto) for i := 0; i < len(qr.Rows); i++ { - id, err = evalengine.ToInt64(qr.Rows[i][0]) + id, err = qr.Rows[i][0].ToCastInt64() if err != nil { return err } @@ -942,17 +1023,13 @@ func (wr *Wrangler) prepareMaterializerStreams(ctx context.Context, ms *vtctldat if err != nil { return nil, err } - if mz.isPartial { - if err := wr.createDefaultShardRoutingRules(ctx, ms); err != nil { - return nil, err - } - } if err := mz.deploySchema(ctx); err != nil { return nil, err } insertMap := make(map[string]string, len(mz.targetShards)) for _, targetShard := range mz.targetShards { - inserts, err := mz.generateInserts(ctx, targetShard) + sourceShards := mz.filterSourceShards(targetShard) + inserts, err := mz.generateInserts(ctx, sourceShards) if err != nil { return nil, err } @@ -982,6 +1059,7 @@ func (wr *Wrangler) buildMaterializer(ctx context.Context, ms *vtctldatapb.Mater if err != nil { return nil, err } + if targetVSchema.Keyspace.Sharded { for _, ts := range ms.TableSettings { if targetVSchema.Tables[ts.TargetTable] == nil { @@ -1031,13 +1109,29 @@ func (wr *Wrangler) buildMaterializer(ctx context.Context, ms *vtctldatapb.Mater return nil, fmt.Errorf("no target shards specified for workflow %s ", ms.Workflow) } + sourceTs := wr.ts + if ms.ExternalCluster != "" { // when the source is an external mysql cluster mounted using the Mount command + externalTopo, err := wr.ts.OpenExternalVitessClusterServer(ctx, ms.ExternalCluster) + if err != nil { + return nil, fmt.Errorf("failed to open external topo: %v", err) + } + sourceTs = externalTopo + } + differentPVs := false + sourceVSchema, err := sourceTs.GetVSchema(ctx, ms.SourceKeyspace) + if err != nil { + return nil, fmt.Errorf("failed to get source keyspace vschema: %v", err) + } + differentPVs = primaryVindexesDiffer(ms, sourceVSchema, vschema) + return &materializer{ - wr: wr, - ms: ms, - targetVSchema: targetVSchema, - sourceShards: sourceShards, - targetShards: targetShards, - isPartial: isPartial, + wr: wr, + ms: ms, + targetVSchema: targetVSchema, + sourceShards: sourceShards, + targetShards: targetShards, + isPartial: isPartial, + primaryVindexesDiffer: differentPVs, }, nil } @@ -1225,17 +1319,10 @@ func stripTableConstraints(ddl string) (string, error) { return newDDL, nil } -func (mz *materializer) generateInserts(ctx context.Context, targetShard *topo.ShardInfo) (string, error) { - ig := vreplication.NewInsertGenerator(binlogplayer.BlpStopped, "{{.dbname}}") +func (mz *materializer) generateInserts(ctx context.Context, sourceShards []*topo.ShardInfo) (string, error) { + ig := vreplication.NewInsertGenerator(binlogdatapb.VReplicationWorkflowState_Stopped, "{{.dbname}}") - for _, sourceShard := range mz.sourceShards { - // Don't create streams from sources which won't contain data for the target shard. - // We only do it for MoveTables for now since this doesn't hold for materialize flows - // where the target's sharding key might differ from that of the source - if mz.ms.MaterializationIntent == vtctldatapb.MaterializationIntent_MOVETABLES && - !key.KeyRangeIntersect(sourceShard.KeyRange, targetShard.KeyRange) { - continue - } + for _, sourceShard := range sourceShards { bls := &binlogdatapb.BinlogSource{ Keyspace: mz.ms.SourceKeyspace, Shard: sourceShard.ShardName(), @@ -1312,21 +1399,20 @@ func (mz *materializer) generateInserts(ctx context.Context, targetShard *topo.S bls.Filter.Rules = append(bls.Filter.Rules, rule) } - workflowSubType := binlogdatapb.VReplicationWorkflowSubType_None - if mz.isPartial { - workflowSubType = binlogdatapb.VReplicationWorkflowSubType_Partial + var workflowSubType binlogdatapb.VReplicationWorkflowSubType + workflowSubType, s, err := mz.getWorkflowSubType() + if err != nil { + return s, err } - var workflowType binlogdatapb.VReplicationWorkflowType - switch mz.ms.MaterializationIntent { - case vtctldatapb.MaterializationIntent_CUSTOM: - workflowType = binlogdatapb.VReplicationWorkflowType_Materialize - case vtctldatapb.MaterializationIntent_MOVETABLES: - workflowType = binlogdatapb.VReplicationWorkflowType_MoveTables - case vtctldatapb.MaterializationIntent_CREATELOOKUPINDEX: - workflowType = binlogdatapb.VReplicationWorkflowType_CreateLookupIndex + + workflowType := mz.getWorkflowType() + + tabletTypeStr := mz.ms.TabletTypes + if mz.ms.TabletSelectionPreference == tabletmanagerdatapb.TabletSelectionPreference_INORDER { + tabletTypeStr = discovery.InOrderHint + tabletTypeStr } - ig.AddRow(mz.ms.Workflow, bls, "", mz.ms.Cell, mz.ms.TabletTypes, + ig.AddRow(mz.ms.Workflow, bls, "", mz.ms.Cell, tabletTypeStr, workflowType, workflowSubType, mz.ms.DeferSecondaryKeys, @@ -1335,6 +1421,34 @@ func (mz *materializer) generateInserts(ctx context.Context, targetShard *topo.S return ig.String(), nil } +func (mz *materializer) getWorkflowType() binlogdatapb.VReplicationWorkflowType { + var workflowType binlogdatapb.VReplicationWorkflowType + switch mz.ms.MaterializationIntent { + case vtctldatapb.MaterializationIntent_CUSTOM: + workflowType = binlogdatapb.VReplicationWorkflowType_Materialize + case vtctldatapb.MaterializationIntent_MOVETABLES: + workflowType = binlogdatapb.VReplicationWorkflowType_MoveTables + case vtctldatapb.MaterializationIntent_CREATELOOKUPINDEX: + workflowType = binlogdatapb.VReplicationWorkflowType_CreateLookupIndex + } + return workflowType +} + +func (mz *materializer) getWorkflowSubType() (binlogdatapb.VReplicationWorkflowSubType, string, error) { + workflowSubType := binlogdatapb.VReplicationWorkflowSubType_None + switch { + case mz.isPartial && mz.ms.AtomicCopy: + return workflowSubType, "", fmt.Errorf("both atomic copy and partial mode cannot be specified for the same workflow") + case mz.isPartial: + workflowSubType = binlogdatapb.VReplicationWorkflowSubType_Partial + case mz.ms.AtomicCopy: + workflowSubType = binlogdatapb.VReplicationWorkflowSubType_AtomicCopy + default: + workflowSubType = binlogdatapb.VReplicationWorkflowSubType_None + } + return workflowSubType, "", nil +} + func matchColInSelect(col sqlparser.IdentifierCI, sel *sqlparser.Select) (*sqlparser.ColName, error) { for _, selExpr := range sel.SelectExprs { switch selExpr := selExpr.(type) { @@ -1446,3 +1560,117 @@ func (mz *materializer) checkTZConversion(ctx context.Context, tz string) error }) return err } + +// filterSourceShards filters out source shards that do not overlap with the +// provided target shard. This is an optimization to avoid copying unnecessary +// data between the shards. This optimization is only applied for MoveTables +// when the source and target shard have the same primary vindexes. +func (mz *materializer) filterSourceShards(targetShard *topo.ShardInfo) []*topo.ShardInfo { + if mz.primaryVindexesDiffer || mz.ms.MaterializationIntent != vtctldatapb.MaterializationIntent_MOVETABLES { + // Use all source shards. + return mz.sourceShards + } + // Use intersecting source shards. + var filteredSourceShards []*topo.ShardInfo + for _, sourceShard := range mz.sourceShards { + if !key.KeyRangeIntersect(sourceShard.KeyRange, targetShard.KeyRange) { + continue + } + filteredSourceShards = append(filteredSourceShards, sourceShard) + } + return filteredSourceShards +} + +// primaryVindexesDiffer returns true if, for any tables defined in the provided +// materialize settings, the source and target vschema definitions for those +// tables have different primary vindexes. +// +// The result of this function is used to determine whether to apply a source +// shard selection optimization in MoveTables. +func primaryVindexesDiffer(ms *vtctldatapb.MaterializeSettings, source, target *vschemapb.Keyspace) bool { + // Unless both keyspaces are sharded, treat the answer to the question as + // trivially false. + if source.Sharded != target.Sharded { + return false + } + + // For source and target keyspaces that are sharded, we can optimize source + // shard selection if source and target tables' primary vindexes are equal. + // + // To determine this, iterate over all target tables, looking for primary + // vindexes that differ from the corresponding source table. + for _, ts := range ms.TableSettings { + sColumnVindexes := []*vschemapb.ColumnVindex{} + tColumnVindexes := []*vschemapb.ColumnVindex{} + if tt, ok := source.Tables[ts.TargetTable]; ok { + sColumnVindexes = tt.ColumnVindexes + } + if tt, ok := target.Tables[ts.TargetTable]; ok { + tColumnVindexes = tt.ColumnVindexes + } + + // If source does not have a primary vindex, but the target does, then + // the primary vindexes differ. + if len(sColumnVindexes) == 0 && len(tColumnVindexes) > 0 { + return true + } + // If source has a primary vindex, but the target does not, then the + // primary vindexes differ. + if len(sColumnVindexes) > 0 && len(tColumnVindexes) == 0 { + return true + } + // If neither source nor target have any vindexes, treat the answer to + // the question as trivially false. + if len(sColumnVindexes) == 0 && len(tColumnVindexes) == 0 { + return true + } + + sPrimaryVindex := sColumnVindexes[0] + tPrimaryVindex := tColumnVindexes[0] + + // Compare source and target primary vindex columns. + var sColumns, tColumns []string + if sPrimaryVindex.Column != "" { + sColumns = []string{sPrimaryVindex.Column} + } else { + sColumns = sPrimaryVindex.Columns + } + if tPrimaryVindex.Column != "" { + tColumns = []string{tPrimaryVindex.Column} + } else { + tColumns = tPrimaryVindex.Columns + } + if len(sColumns) != len(tColumns) { + return true + } + for i := 0; i < len(sColumns); i++ { + if !strings.EqualFold(sColumns[i], tColumns[i]) { + return true + } + } + + // Get source and target vindex definitions. + spv := source.Vindexes[sColumnVindexes[0].Name] + tpv := target.Vindexes[tColumnVindexes[0].Name] + // If the source has vindex definition, but target does not, then the + // target vschema is invalid. Assume the primary vindexes differ. + if spv != nil && tpv == nil { + return true + } + // If the target has vindex definition, but source does not, then the + // source vschema is invalid. Assume the primary vindexes differ. + if spv == nil && tpv != nil { + return true + } + // If both target and source are missing vindex definitions, then both + // are equally invalid. + if spv == nil && tpv == nil { + continue + } + // Compare source and target vindex type. + if !strings.EqualFold(spv.Type, tpv.Type) { + return true + } + } + return false +} diff --git a/go/vt/wrangler/materializer_env_test.go b/go/vt/wrangler/materializer_env_test.go index 7370abffd60..fc4b5364b03 100644 --- a/go/vt/wrangler/materializer_env_test.go +++ b/go/vt/wrangler/materializer_env_test.go @@ -21,12 +21,17 @@ import ( "fmt" "os" "regexp" + "runtime" "strconv" "strings" "sync" "testing" + "time" + + "go.uber.org/goleak" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl/tmutils" "vitess.io/vitess/go/vt/sqlparser" @@ -55,19 +60,72 @@ type testMaterializerEnv struct { //---------------------------------------------- // testMaterializerEnv -func TestMain(m *testing.M) { +// EnsureNoLeaks is a helper function to fail tests if there are goroutine leaks. +// At this moment we still have a lot of goroutine leaks in the unit tests in this package. +// So we only use this while debugging and fixing the leaks. Once fixed we will use this +// in TestMain instead of just logging the number of leaked goroutines. +func EnsureNoLeaks(t testing.TB) { + if t.Failed() { + return + } + err := ensureNoGoroutines() + if err != nil { + t.Fatal(err) + } +} + +func ensureNoGoroutines() error { + // These goroutines have been found to stay around. + // Need to investigate and fix the Vitess ones at some point, if we indeed find out that they are unintended leaks. + var leaksToIgnore = []goleak.Option{ + goleak.IgnoreTopFunction("github.com/golang/glog.(*fileSink).flushDaemon"), + goleak.IgnoreTopFunction("github.com/golang/glog.(*loggingT).flushDaemon"), + goleak.IgnoreTopFunction("vitess.io/vitess/go/vt/dbconfigs.init.0.func1"), + goleak.IgnoreTopFunction("vitess.io/vitess/go/vt/vtgate.resetAggregators"), + goleak.IgnoreTopFunction("vitess.io/vitess/go/vt/vtgate.processQueryInfo"), + goleak.IgnoreTopFunction("github.com/patrickmn/go-cache.(*janitor).Run"), + } + + const ( + // give ample time for the goroutines to exit in CI. + waitTime = 100 * time.Millisecond + numIterations = 50 // 5 seconds + ) + var err error + for i := 0; i < numIterations; i++ { + err = goleak.Find(leaksToIgnore...) + if err == nil { + return nil + } + time.Sleep(waitTime) + } + return err +} + +func testMainWrapper(m *testing.M) int { + startingNumGoRoutines := runtime.NumGoroutine() + defer func() { + numGoroutines := runtime.NumGoroutine() + if numGoroutines > startingNumGoRoutines { + log.Infof("!!!!!!!!!!!! Wrangler unit tests Leaked %d goroutines", numGoroutines-startingNumGoRoutines) + } + }() _flag.ParseFlagsForTest() - os.Exit(m.Run()) + return m.Run() +} + +func TestMain(m *testing.M) { + os.Exit(testMainWrapper(m)) } -func newTestMaterializerEnv(t *testing.T, ms *vtctldatapb.MaterializeSettings, sources, targets []string) *testMaterializerEnv { +func newTestMaterializerEnv(t *testing.T, ctx context.Context, ms *vtctldatapb.MaterializeSettings, sources, targets []string) *testMaterializerEnv { t.Helper() env := &testMaterializerEnv{ ms: ms, sources: sources, targets: targets, tablets: make(map[int]*topodatapb.Tablet), - topoServ: memorytopo.NewServer("cell"), + topoServ: memorytopo.NewServer(ctx, "cell"), cell: "cell", tmc: newTestMaterializerTMClient(), } diff --git a/go/vt/wrangler/materializer_test.go b/go/vt/wrangler/materializer_test.go index ac4b0c14589..26620a4e762 100644 --- a/go/vt/wrangler/materializer_test.go +++ b/go/vt/wrangler/materializer_test.go @@ -30,6 +30,10 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtgate/vindexes" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" @@ -45,7 +49,36 @@ const mzSelectIDQuery = "select id from _vt.vreplication where db_name='targetks const mzSelectFrozenQuery = "select 1 from _vt.vreplication where db_name='targetks' and message='FROZEN' and workflow_sub_type != 1" const mzCheckJournal = "/select val from _vt.resharding_journal where id=" -var defaultOnDDL = binlogdatapb.OnDDLAction_name[int32(binlogdatapb.OnDDLAction_IGNORE)] +var defaultOnDDL = binlogdatapb.OnDDLAction_IGNORE.String() + +// TestMoveTablesNoRoutingRules confirms that MoveTables does not create routing rules if --no-routing-rules is specified. +func TestMoveTablesNoRoutingRules(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + Workflow: "workflow", + SourceKeyspace: "sourceks", + TargetKeyspace: "targetks", + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select * from t1", + }}, + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) + defer env.close() + + env.tmc.expectVRQuery(100, mzCheckJournal, &sqltypes.Result{}) + env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) + env.tmc.expectVRQuery(200, insertPrefix, &sqltypes.Result{}) + env.tmc.expectVRQuery(200, mzSelectIDQuery, &sqltypes.Result{}) + env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) + + err := env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", "t1", "", "", false, "", true, false, "", false, false, "", defaultOnDDL, nil, true, false) + require.NoError(t, err) + rr, err := env.wr.ts.GetRoutingRules(ctx) + require.NoError(t, err) + require.Equal(t, 0, len(rr.Rules)) +} func TestMigrateTables(t *testing.T) { ms := &vtctldatapb.MaterializeSettings{ @@ -57,7 +90,9 @@ func TestMigrateTables(t *testing.T) { SourceExpression: "select * from t1", }}, } - env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) defer env.close() env.tmc.expectVRQuery(100, mzCheckJournal, &sqltypes.Result{}) @@ -66,8 +101,7 @@ func TestMigrateTables(t *testing.T) { env.tmc.expectVRQuery(200, mzSelectIDQuery, &sqltypes.Result{}) env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) - ctx := context.Background() - err := env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", "t1", "", "", false, "", true, false, "", false, false, "", defaultOnDDL, nil) + err := env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", "t1", "", "", false, "", true, false, "", false, false, "", defaultOnDDL, nil, false, false) require.NoError(t, err) vschema, err := env.wr.ts.GetSrvVSchema(ctx, env.cell) require.NoError(t, err) @@ -98,7 +132,10 @@ func TestMissingTables(t *testing.T) { SourceExpression: "select * from t3", }}, } - env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) defer env.close() env.tmc.expectVRQuery(100, mzCheckJournal, &sqltypes.Result{}) @@ -107,12 +144,11 @@ func TestMissingTables(t *testing.T) { env.tmc.expectVRQuery(200, mzSelectIDQuery, &sqltypes.Result{}) env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) - ctx := context.Background() - err := env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", "t1,tyt", "", "", false, "", true, false, "", false, false, "", defaultOnDDL, nil) + err := env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", "t1,tyt", "", "", false, "", true, false, "", false, false, "", defaultOnDDL, nil, false, false) require.EqualError(t, err, "table(s) not found in source keyspace sourceks: tyt") - err = env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", "t1,tyt,t2,txt", "", "", false, "", true, false, "", false, false, "", defaultOnDDL, nil) + err = env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", "t1,tyt,t2,txt", "", "", false, "", true, false, "", false, false, "", defaultOnDDL, nil, false, false) require.EqualError(t, err, "table(s) not found in source keyspace sourceks: tyt,txt") - err = env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", "t1", "", "", false, "", true, false, "", false, false, "", defaultOnDDL, nil) + err = env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", "t1", "", "", false, "", true, false, "", false, false, "", defaultOnDDL, nil, false, false) require.NoError(t, err) } @@ -133,10 +169,9 @@ func TestMoveTablesAllAndExclude(t *testing.T) { }}, } - ctx := context.Background() var err error - var targetTables = func(env *testMaterializerEnv) []string { + var targetTables = func(ctx context.Context, env *testMaterializerEnv) []string { vschema, err := env.wr.ts.GetSrvVSchema(ctx, env.cell) require.NoError(t, err) var targetTables []string @@ -161,16 +196,19 @@ func TestMoveTablesAllAndExclude(t *testing.T) { } for _, tcase := range testCases { t.Run("", func(t *testing.T) { - env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) defer env.close() env.tmc.expectVRQuery(100, mzCheckJournal, &sqltypes.Result{}) env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) env.tmc.expectVRQuery(200, insertPrefix, &sqltypes.Result{}) env.tmc.expectVRQuery(200, mzSelectIDQuery, &sqltypes.Result{}) env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) - err = env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", "", "", "", tcase.allTables, tcase.excludeTables, true, false, "", false, false, "", defaultOnDDL, nil) + err = env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", "", "", "", tcase.allTables, tcase.excludeTables, true, false, "", false, false, "", defaultOnDDL, nil, false, false) require.NoError(t, err) - require.EqualValues(t, tcase.want, targetTables(env)) + require.EqualValues(t, tcase.want, targetTables(ctx, env)) }) } @@ -188,10 +226,12 @@ func TestMoveTablesStopFlags(t *testing.T) { }}, } - ctx := context.Background() var err error t.Run("StopStartedAndStopAfterCopyFlags", func(t *testing.T) { - env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) defer env.close() env.tmc.expectVRQuery(100, mzCheckJournal, &sqltypes.Result{}) env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) @@ -202,7 +242,7 @@ func TestMoveTablesStopFlags(t *testing.T) { env.tmc.expectVRQuery(200, mzSelectIDQuery, &sqltypes.Result{}) // -auto_start=false is tested by NOT expecting the update query which sets state to RUNNING err = env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", "t1", "", - "", false, "", false, true, "", false, false, "", defaultOnDDL, nil) + "", false, "", false, true, "", false, false, "", defaultOnDDL, nil, false, false) require.NoError(t, err) env.tmc.verifyQueries(t) }) @@ -218,7 +258,10 @@ func TestMigrateVSchema(t *testing.T) { SourceExpression: "select * from t1", }}, } - env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) defer env.close() env.tmc.expectVRQuery(100, mzCheckJournal, &sqltypes.Result{}) @@ -227,8 +270,7 @@ func TestMigrateVSchema(t *testing.T) { env.tmc.expectVRQuery(200, mzSelectIDQuery, &sqltypes.Result{}) env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) - ctx := context.Background() - err := env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", `{"t1":{}}`, "", "", false, "", true, false, "", false, false, "", defaultOnDDL, nil) + err := env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", `{"t1":{}}`, "", "", false, "", true, false, "", false, false, "", defaultOnDDL, nil, false, false) require.NoError(t, err) vschema, err := env.wr.ts.GetSrvVSchema(ctx, env.cell) require.NoError(t, err) @@ -249,7 +291,10 @@ func TestCreateLookupVindexFull(t *testing.T) { SourceKeyspace: "sourceks", TargetKeyspace: "targetks", } - env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) defer env.close() specs := &vschemapb.Keyspace{ @@ -283,14 +328,14 @@ func TestCreateLookupVindexFull(t *testing.T) { sourceVSchema := &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ - "hash": { - Type: "hash", + "xxhash": { + Type: "xxhash", }, }, Tables: map[string]*vschemapb.Table{ "t1": { ColumnVindexes: []*vschemapb.ColumnVindex{{ - Name: "hash", + Name: "xxhash", Column: "col1", }}, }, @@ -321,15 +366,14 @@ func TestCreateLookupVindexFull(t *testing.T) { env.tmc.expectVRQuery(200, insertPrefix, &sqltypes.Result{}) env.tmc.expectVRQuery(200, "update _vt.vreplication set state='Running' where db_name='targetks' and workflow='lkp_vdx'", &sqltypes.Result{}) - ctx := context.Background() err := env.wr.CreateLookupVindex(ctx, ms.SourceKeyspace, specs, "cell", "PRIMARY", false) require.NoError(t, err) wantvschema := &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ - "hash": { - Type: "hash", + "xxhash": { + Type: "xxhash", }, "v": { Type: "lookup_unique", @@ -345,7 +389,7 @@ func TestCreateLookupVindexFull(t *testing.T) { Tables: map[string]*vschemapb.Table{ "t1": { ColumnVindexes: []*vschemapb.ColumnVindex{{ - Name: "hash", + Name: "xxhash", Column: "col1", }, { Name: "v", @@ -373,20 +417,23 @@ func TestCreateLookupVindexCreateDDL(t *testing.T) { SourceKeyspace: "sourceks", TargetKeyspace: "targetks", } - env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) defer env.close() vs := &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ - "hash": { - Type: "hash", + "xxhash": { + Type: "xxhash", }, }, Tables: map[string]*vschemapb.Table{ "t1": { ColumnVindexes: []*vschemapb.ColumnVindex{{ Column: "col1", - Name: "hash", + Name: "xxhash", }}, }, }, @@ -590,7 +637,10 @@ func TestCreateLookupVindexSourceVSchema(t *testing.T) { SourceKeyspace: "sourceks", TargetKeyspace: "targetks", } - env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) defer env.close() specs := &vschemapb.Keyspace{ @@ -630,14 +680,14 @@ func TestCreateLookupVindexSourceVSchema(t *testing.T) { sourceVSchema: &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ - "hash": { - Type: "hash", + "xxhash": { + Type: "xxhash", }, }, Tables: map[string]*vschemapb.Table{ "t1": { ColumnVindexes: []*vschemapb.ColumnVindex{{ - Name: "hash", + Name: "xxhash", Column: "col1", }}, }, @@ -646,8 +696,8 @@ func TestCreateLookupVindexSourceVSchema(t *testing.T) { out: &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ - "hash": { - Type: "hash", + "xxhash": { + Type: "xxhash", }, "v": { Type: "lookup_unique", @@ -663,7 +713,7 @@ func TestCreateLookupVindexSourceVSchema(t *testing.T) { Tables: map[string]*vschemapb.Table{ "t1": { ColumnVindexes: []*vschemapb.ColumnVindex{{ - Name: "hash", + Name: "xxhash", Column: "col1", }, { Name: "v", @@ -677,8 +727,8 @@ func TestCreateLookupVindexSourceVSchema(t *testing.T) { sourceVSchema: &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ - "hash": { - Type: "hash", + "xxhash": { + Type: "xxhash", }, "v": { Type: "lookup_unique", @@ -694,7 +744,7 @@ func TestCreateLookupVindexSourceVSchema(t *testing.T) { Tables: map[string]*vschemapb.Table{ "t1": { ColumnVindexes: []*vschemapb.ColumnVindex{{ - Name: "hash", + Name: "xxhash", Column: "col1", }}, }, @@ -703,8 +753,8 @@ func TestCreateLookupVindexSourceVSchema(t *testing.T) { out: &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ - "hash": { - Type: "hash", + "xxhash": { + Type: "xxhash", }, "v": { Type: "lookup_unique", @@ -720,7 +770,7 @@ func TestCreateLookupVindexSourceVSchema(t *testing.T) { Tables: map[string]*vschemapb.Table{ "t1": { ColumnVindexes: []*vschemapb.ColumnVindex{{ - Name: "hash", + Name: "xxhash", Column: "col1", }, { Name: "v", @@ -734,8 +784,8 @@ func TestCreateLookupVindexSourceVSchema(t *testing.T) { sourceVSchema: &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ - "hash": { - Type: "hash", + "xxhash": { + Type: "xxhash", }, "v": { Type: "lookup_unique", @@ -751,10 +801,10 @@ func TestCreateLookupVindexSourceVSchema(t *testing.T) { Tables: map[string]*vschemapb.Table{ "t1": { ColumnVindexes: []*vschemapb.ColumnVindex{{ - Name: "hash", + Name: "xxhash", Column: "col1", }, { - Name: "hash", + Name: "xxhash", Column: "col2", }}, }, @@ -763,8 +813,8 @@ func TestCreateLookupVindexSourceVSchema(t *testing.T) { out: &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ - "hash": { - Type: "hash", + "xxhash": { + Type: "xxhash", }, "v": { Type: "lookup_unique", @@ -780,10 +830,10 @@ func TestCreateLookupVindexSourceVSchema(t *testing.T) { Tables: map[string]*vschemapb.Table{ "t1": { ColumnVindexes: []*vschemapb.ColumnVindex{{ - Name: "hash", + Name: "xxhash", Column: "col1", }, { - Name: "hash", + Name: "xxhash", Column: "col2", }, { Name: "v", @@ -826,20 +876,23 @@ func TestCreateLookupVindexTargetVSchema(t *testing.T) { SourceKeyspace: "sourceks", TargetKeyspace: "targetks", } - env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) defer env.close() sourcevs := &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ - "hash": { - Type: "hash", + "xxhash": { + Type: "xxhash", }, }, Tables: map[string]*vschemapb.Table{ "t1": { ColumnVindexes: []*vschemapb.ColumnVindex{{ Column: "col1", - Name: "hash", + Name: "xxhash", }}, }, }, @@ -852,15 +905,15 @@ func TestCreateLookupVindexTargetVSchema(t *testing.T) { withTable := &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ - "hash": { - Type: "hash", + "xxhash": { + Type: "xxhash", }, }, Tables: map[string]*vschemapb.Table{ "t2": { ColumnVindexes: []*vschemapb.ColumnVindex{{ Column: "c1", - Name: "hash", + Name: "xxhash", }}, }, }, @@ -909,15 +962,15 @@ func TestCreateLookupVindexTargetVSchema(t *testing.T) { out: &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ - "hash": { - Type: "hash", + "xxhash": { + Type: "xxhash", }, }, Tables: map[string]*vschemapb.Table{ "lkp": { ColumnVindexes: []*vschemapb.ColumnVindex{{ Column: "c1", - Name: "hash", + Name: "xxhash", }}, }, }, @@ -950,24 +1003,23 @@ func TestCreateLookupVindexTargetVSchema(t *testing.T) { targetVSchema: &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ - // Create a misleading vindex name. - "hash": { - Type: "hash", + "xxhash": { + Type: "xxhash", }, }, }, out: &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ - "hash": { - Type: "hash", + "xxhash": { + Type: "xxhash", }, }, Tables: map[string]*vschemapb.Table{ "lkp": { ColumnVindexes: []*vschemapb.ColumnVindex{{ Column: "c1", - Name: "hash", + Name: "xxhash", }}, }, }, @@ -980,12 +1032,12 @@ func TestCreateLookupVindexTargetVSchema(t *testing.T) { Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ // Create a misleading vindex name. - "hash": { + "xxhash": { Type: "unicode_loose_md5", }, }, }, - err: "a conflicting vindex named hash already exists in the target vschema", + err: "a conflicting vindex named xxhash already exists in the target vschema", }, { description: "sharded, int64, good table", targetTable: "t2", @@ -994,15 +1046,15 @@ func TestCreateLookupVindexTargetVSchema(t *testing.T) { out: &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ - "hash": { - Type: "hash", + "xxhash": { + Type: "xxhash", }, }, Tables: map[string]*vschemapb.Table{ "t2": { ColumnVindexes: []*vschemapb.ColumnVindex{{ Column: "c1", - Name: "hash", + Name: "xxhash", }}, }, }, @@ -1063,7 +1115,10 @@ func TestCreateLookupVindexSameKeyspace(t *testing.T) { SourceKeyspace: "ks", TargetKeyspace: "ks", } - env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) defer env.close() specs := &vschemapb.Keyspace{ @@ -1097,14 +1152,14 @@ func TestCreateLookupVindexSameKeyspace(t *testing.T) { vschema := &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ - "hash": { - Type: "hash", + "xxhash": { + Type: "xxhash", }, }, Tables: map[string]*vschemapb.Table{ "t1": { ColumnVindexes: []*vschemapb.ColumnVindex{{ - Name: "hash", + Name: "xxhash", Column: "col1", }}, }, @@ -1113,8 +1168,8 @@ func TestCreateLookupVindexSameKeyspace(t *testing.T) { want := &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ - "hash": { - Type: "hash", + "xxhash": { + Type: "xxhash", }, "v": { Type: "lookup_unique", @@ -1130,7 +1185,7 @@ func TestCreateLookupVindexSameKeyspace(t *testing.T) { Tables: map[string]*vschemapb.Table{ "t1": { ColumnVindexes: []*vschemapb.ColumnVindex{{ - Name: "hash", + Name: "xxhash", Column: "col1", }, { Name: "v", @@ -1140,7 +1195,7 @@ func TestCreateLookupVindexSameKeyspace(t *testing.T) { "lkp": { ColumnVindexes: []*vschemapb.ColumnVindex{{ Column: "c1", - Name: "hash", + Name: "xxhash", }}, }, }, @@ -1172,7 +1227,10 @@ func TestCreateCustomizedVindex(t *testing.T) { SourceKeyspace: "ks", TargetKeyspace: "ks", } - env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) defer env.close() specs := &vschemapb.Keyspace{ @@ -1180,10 +1238,9 @@ func TestCreateCustomizedVindex(t *testing.T) { "v": { Type: "lookup_unique", Params: map[string]string{ - "table": "ks.lkp", - "from": "c1", - "to": "col2", - "data_type": "bigint(20)", + "table": "ks.lkp", + "from": "c1", + "to": "col2", }, Owner: "t1", }, @@ -1207,14 +1264,14 @@ func TestCreateCustomizedVindex(t *testing.T) { vschema := &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ - "hash": { - Type: "hash", + "xxhash": { + Type: "xxhash", }, }, Tables: map[string]*vschemapb.Table{ "t1": { ColumnVindexes: []*vschemapb.ColumnVindex{{ - Name: "hash", + Name: "xxhash", Column: "col1", }}, }, @@ -1223,8 +1280,8 @@ func TestCreateCustomizedVindex(t *testing.T) { want := &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ - "hash": { - Type: "hash", + "xxhash": { + Type: "xxhash", }, "v": { Type: "lookup_unique", @@ -1232,7 +1289,6 @@ func TestCreateCustomizedVindex(t *testing.T) { "table": "ks.lkp", "from": "c1", "to": "col2", - "data_type": "bigint(20)", "write_only": "true", }, Owner: "t1", @@ -1241,7 +1297,7 @@ func TestCreateCustomizedVindex(t *testing.T) { Tables: map[string]*vschemapb.Table{ "t1": { ColumnVindexes: []*vschemapb.ColumnVindex{{ - Name: "hash", + Name: "xxhash", Column: "col1", }, { Name: "v", @@ -1251,7 +1307,7 @@ func TestCreateCustomizedVindex(t *testing.T) { "lkp": { ColumnVindexes: []*vschemapb.ColumnVindex{{ Column: "c1", - Name: "hash", + Name: "xxhash", }}, }, }, @@ -1279,12 +1335,136 @@ func TestCreateCustomizedVindex(t *testing.T) { } } +func TestCreateLookupVindexIgnoreNulls(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + SourceKeyspace: "ks", + TargetKeyspace: "ks", + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) + defer env.close() + + specs := &vschemapb.Keyspace{ + Vindexes: map[string]*vschemapb.Vindex{ + "v": { + Type: "consistent_lookup", + Params: map[string]string{ + "table": "ks.lkp", + "from": "col2,col1", + "to": "keyspace_id", + "ignore_nulls": "true", + }, + Owner: "t1", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "v", + Columns: []string{"col2", "col1"}, + }}, + }, + }, + } + // Dummy sourceSchema + sourceSchema := "CREATE TABLE `t1` (\n" + + " `col1` int(11) NOT NULL AUTO_INCREMENT,\n" + + " `col2` int(11) DEFAULT NULL,\n" + + " PRIMARY KEY (`id`)\n" + + ") ENGINE=InnoDB AUTO_INCREMENT=3 DEFAULT CHARSET=latin1" + + vschema := &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "xxhash", + Column: "col1", + }}, + }, + }, + } + + wantKs := &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + "v": { + Type: "consistent_lookup", + Params: map[string]string{ + "table": "ks.lkp", + "from": "col2,col1", + "to": "keyspace_id", + "write_only": "true", + "ignore_nulls": "true", + }, + Owner: "t1", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "xxhash", + Column: "col1", + }, { + Name: "v", + Columns: []string{"col2", "col1"}, + }}, + }, + "lkp": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "col2", + Name: "xxhash", + }}, + }, + }, + } + wantQuery := "select col2 as col2, col1 as col1, keyspace_id() as keyspace_id from t1 where col2 is not null and col1 is not null group by col2, col1, keyspace_id" + + env.tmc.schema[ms.SourceKeyspace+".t1"] = &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{{ + Fields: []*querypb.Field{{ + Name: "col1", + Type: querypb.Type_INT64, + }, { + Name: "col2", + Type: querypb.Type_INT64, + }}, + Schema: sourceSchema, + }}, + } + if err := env.topoServ.SaveVSchema(context.Background(), ms.SourceKeyspace, vschema); err != nil { + t.Fatal(err) + } + + ms, ks, _, err := env.wr.prepareCreateLookup(context.Background(), ms.SourceKeyspace, specs, false) + require.NoError(t, err) + if !proto.Equal(wantKs, ks) { + t.Errorf("unexpected keyspace value: got:\n%v, want\n%v", ks, wantKs) + } + require.NotNil(t, ms) + require.GreaterOrEqual(t, len(ms.TableSettings), 1) + require.Equal(t, wantQuery, ms.TableSettings[0].SourceExpression, "unexpected query") +} + func TestStopAfterCopyFlag(t *testing.T) { ms := &vtctldatapb.MaterializeSettings{ SourceKeyspace: "ks", TargetKeyspace: "ks", } - env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) defer env.close() specs := &vschemapb.Keyspace{ Vindexes: map[string]*vschemapb.Vindex{ @@ -1317,14 +1497,14 @@ func TestStopAfterCopyFlag(t *testing.T) { vschema := &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ - "hash": { - Type: "hash", + "xxhash": { + Type: "xxhash", }, }, Tables: map[string]*vschemapb.Table{ "t1": { ColumnVindexes: []*vschemapb.ColumnVindex{{ - Name: "hash", + Name: "xxhash", Column: "col1", }}, }, @@ -1356,7 +1536,10 @@ func TestStopAfterCopyFlag(t *testing.T) { } func TestCreateLookupVindexFailures(t *testing.T) { - topoServ := memorytopo.NewServer("cell") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + topoServ := memorytopo.NewServer(ctx, "cell") wr := New(logutil.NewConsoleLogger(), topoServ, nil) unique := map[string]*vschemapb.Vindex{ @@ -1374,7 +1557,7 @@ func TestCreateLookupVindexFailures(t *testing.T) { Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ "other": { - Type: "hash", + Type: "xxhash", }, "v": { Type: "lookup_unique", @@ -1411,10 +1594,10 @@ func TestCreateLookupVindexFailures(t *testing.T) { input: &vschemapb.Keyspace{ Vindexes: map[string]*vschemapb.Vindex{ "v1": { - Type: "hash", + Type: "xxhash", }, "v2": { - Type: "hash", + Type: "xxhash", }, }, }, @@ -1424,11 +1607,11 @@ func TestCreateLookupVindexFailures(t *testing.T) { input: &vschemapb.Keyspace{ Vindexes: map[string]*vschemapb.Vindex{ "v": { - Type: "hash", + Type: "xxhash", }, }, }, - err: "vindex hash is not a lookup type", + err: "vindex xxhash is not a lookup type", }, { description: "unqualified table", input: &vschemapb.Keyspace{ @@ -1451,6 +1634,7 @@ func TestCreateLookupVindexFailures(t *testing.T) { Params: map[string]string{ "table": "targetks.t", "from": "c1,c2", + "to": "c3", }, }, }, @@ -1465,6 +1649,7 @@ func TestCreateLookupVindexFailures(t *testing.T) { Params: map[string]string{ "table": "targetks.t", "from": "c1", + "to": "c2", }, }, }, @@ -1479,6 +1664,7 @@ func TestCreateLookupVindexFailures(t *testing.T) { Params: map[string]string{ "table": "targetks.t", "from": "c1,c2", + "to": "c2", }, }, }, @@ -1521,6 +1707,7 @@ func TestCreateLookupVindexFailures(t *testing.T) { Params: map[string]string{ "table": "targetks.t", "from": "c1", + "to": "c2", }, Owner: "otherTable", }, @@ -1570,6 +1757,7 @@ func TestCreateLookupVindexFailures(t *testing.T) { Params: map[string]string{ "table": "targetks.t", "from": "c1", + "to": "c2", }, Owner: "t1", }, @@ -1626,14 +1814,17 @@ func TestExternalizeVindex(t *testing.T) { SourceKeyspace: "sourceks", TargetKeyspace: "targetks", } - env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"-80", "80-"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) defer env.close() sourceVSchema := &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ - "hash": { - Type: "hash", + "xxhash": { + Type: "xxhash", }, "owned": { Type: "lookup_unique", @@ -1666,7 +1857,7 @@ func TestExternalizeVindex(t *testing.T) { Tables: map[string]*vschemapb.Table{ "t1": { ColumnVindexes: []*vschemapb.ColumnVindex{{ - Name: "hash", + Name: "xxhash", Column: "col1", }, { Name: "owned", @@ -1768,10 +1959,16 @@ func TestMaterializerOneToOne(t *testing.T) { CreateDdl: "t4ddl", }, }, - Cell: "zone1", - TabletTypes: "primary,rdonly", + Cell: "zone1", + TabletTypes: topoproto.MakeStringTypeCSV([]topodatapb.TabletType{ + topodatapb.TabletType_PRIMARY, + topodatapb.TabletType_RDONLY, + }), } - env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) defer env.close() env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) @@ -1792,7 +1989,7 @@ func TestMaterializerOneToOne(t *testing.T) { ) env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) - err := env.wr.Materialize(context.Background(), ms) + err := env.wr.Materialize(ctx, ms) require.NoError(t, err) env.tmc.verifyQueries(t) } @@ -1812,7 +2009,10 @@ func TestMaterializerManyToOne(t *testing.T) { CreateDdl: "t2ddl", }}, } - env := newTestMaterializerEnv(t, ms, []string{"-80", "80-"}, []string{"0"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"-80", "80-"}, []string{"0"}) defer env.close() env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) @@ -1827,7 +2027,7 @@ func TestMaterializerManyToOne(t *testing.T) { ) env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) - err := env.wr.Materialize(context.Background(), ms) + err := env.wr.Materialize(ctx, ms) require.NoError(t, err) env.tmc.verifyQueries(t) } @@ -1843,21 +2043,24 @@ func TestMaterializerOneToMany(t *testing.T) { CreateDdl: "t1ddl", }}, } - env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"-80", "80-"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) defer env.close() vs := &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ - "hash": { - Type: "hash", + "xxhash": { + Type: "xxhash", }, }, Tables: map[string]*vschemapb.Table{ "t1": { ColumnVindexes: []*vschemapb.ColumnVindex{{ Column: "c1", - Name: "hash", + Name: "xxhash", }}, }, }, @@ -1872,19 +2075,19 @@ func TestMaterializerOneToMany(t *testing.T) { env.tmc.expectVRQuery( 200, insertPrefix+ - `.*shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c1.*targetks\.hash.*-80.*`, + `.*shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c1.*targetks\.xxhash.*-80.*`, &sqltypes.Result{}, ) env.tmc.expectVRQuery( 210, insertPrefix+ - `.*shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c1.*targetks\.hash.*80-.*`, + `.*shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c1.*targetks\.xxhash.*80-.*`, &sqltypes.Result{}, ) env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) env.tmc.expectVRQuery(210, mzUpdateQuery, &sqltypes.Result{}) - err := env.wr.Materialize(context.Background(), ms) + err := env.wr.Materialize(ctx, ms) require.NoError(t, err) env.tmc.verifyQueries(t) } @@ -1900,21 +2103,24 @@ func TestMaterializerManyToMany(t *testing.T) { CreateDdl: "t1ddl", }}, } - env := newTestMaterializerEnv(t, ms, []string{"-40", "40-"}, []string{"-80", "80-"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"-40", "40-"}, []string{"-80", "80-"}) defer env.close() vs := &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ - "hash": { - Type: "hash", + "xxhash": { + Type: "xxhash", }, }, Tables: map[string]*vschemapb.Table{ "t1": { ColumnVindexes: []*vschemapb.ColumnVindex{{ Column: "c1", - Name: "hash", + Name: "xxhash", }}, }, }, @@ -1929,20 +2135,20 @@ func TestMaterializerManyToMany(t *testing.T) { env.tmc.expectVRQuery( 200, insertPrefix+ - `.*shard:\\"-40\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c1.*targetks\.hash.*-80.*`+ - `.*shard:\\"40-\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c1.*targetks\.hash.*-80.*`, + `.*shard:\\"-40\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c1.*targetks\.xxhash.*-80.*`+ + `.*shard:\\"40-\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c1.*targetks\.xxhash.*-80.*`, &sqltypes.Result{}, ) env.tmc.expectVRQuery( 210, insertPrefix+ - `.*shard:\\"-40\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c1.*targetks\.hash.*80-.*`+ - `.*shard:\\"40-\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c1.*targetks\.hash.*80-.*`, + `.*shard:\\"-40\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c1.*targetks\.xxhash.*80-.*`+ + `.*shard:\\"40-\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c1.*targetks\.xxhash.*80-.*`, &sqltypes.Result{}, ) env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) env.tmc.expectVRQuery(210, mzUpdateQuery, &sqltypes.Result{}) - err := env.wr.Materialize(context.Background(), ms) + err := env.wr.Materialize(ctx, ms) require.NoError(t, err) env.tmc.verifyQueries(t) } @@ -1958,7 +2164,10 @@ func TestMaterializerMulticolumnVindex(t *testing.T) { CreateDdl: "t1ddl", }}, } - env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"-80", "80-"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) defer env.close() vs := &vschemapb.Keyspace{ @@ -2002,7 +2211,7 @@ func TestMaterializerMulticolumnVindex(t *testing.T) { env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) env.tmc.expectVRQuery(210, mzUpdateQuery, &sqltypes.Result{}) - err := env.wr.Materialize(context.Background(), ms) + err := env.wr.Materialize(ctx, ms) require.NoError(t, err) env.tmc.verifyQueries(t) } @@ -2022,7 +2231,10 @@ func TestMaterializerDeploySchema(t *testing.T) { CreateDdl: "t2ddl", }}, } - env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) defer env.close() delete(env.tmc.schema, "targetks.t2") @@ -2038,7 +2250,7 @@ func TestMaterializerDeploySchema(t *testing.T) { ) env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) - err := env.wr.Materialize(context.Background(), ms) + err := env.wr.Materialize(ctx, ms) require.NoError(t, err) env.tmc.verifyQueries(t) require.Equal(t, env.tmc.getSchemaRequestCount(100), 1) @@ -2060,7 +2272,10 @@ func TestMaterializerCopySchema(t *testing.T) { CreateDdl: "t2ddl", }}, } - env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) defer env.close() delete(env.tmc.schema, "targetks.t1") @@ -2076,7 +2291,7 @@ func TestMaterializerCopySchema(t *testing.T) { ) env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) - err := env.wr.Materialize(context.Background(), ms) + err := env.wr.Materialize(ctx, ms) require.NoError(t, err) env.tmc.verifyQueries(t) require.Equal(t, env.tmc.getSchemaRequestCount(100), 1) @@ -2095,7 +2310,10 @@ func TestMaterializerExplicitColumns(t *testing.T) { CreateDdl: "t1ddl", }}, } - env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"-80", "80-"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) defer env.close() vs := &vschemapb.Keyspace{ @@ -2139,7 +2357,7 @@ func TestMaterializerExplicitColumns(t *testing.T) { env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) env.tmc.expectVRQuery(210, mzUpdateQuery, &sqltypes.Result{}) - err := env.wr.Materialize(context.Background(), ms) + err := env.wr.Materialize(ctx, ms) require.NoError(t, err) env.tmc.verifyQueries(t) } @@ -2155,7 +2373,10 @@ func TestMaterializerRenamedColumns(t *testing.T) { CreateDdl: "t1ddl", }}, } - env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"-80", "80-"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) defer env.close() vs := &vschemapb.Keyspace{ @@ -2199,7 +2420,7 @@ func TestMaterializerRenamedColumns(t *testing.T) { env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) env.tmc.expectVRQuery(210, mzUpdateQuery, &sqltypes.Result{}) - err := env.wr.Materialize(context.Background(), ms) + err := env.wr.Materialize(ctx, ms) require.NoError(t, err) env.tmc.verifyQueries(t) } @@ -2220,14 +2441,17 @@ func TestMaterializerStopAfterCopy(t *testing.T) { CreateDdl: "t2ddl", }}, } - env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) defer env.close() env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) env.tmc.expectVRQuery(200, insertPrefix+`.*stop_after_copy:true`, &sqltypes.Result{}) env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) - err := env.wr.Materialize(context.Background(), ms) + err := env.wr.Materialize(ctx, ms) require.NoError(t, err) env.tmc.verifyQueries(t) } @@ -2243,7 +2467,10 @@ func TestMaterializerNoTargetVSchema(t *testing.T) { CreateDdl: "t1ddl", }}, } - env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"-80", "80-"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) defer env.close() vs := &vschemapb.Keyspace{ @@ -2255,7 +2482,7 @@ func TestMaterializerNoTargetVSchema(t *testing.T) { } env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) env.tmc.expectVRQuery(210, mzSelectFrozenQuery, &sqltypes.Result{}) - err := env.wr.Materialize(context.Background(), ms) + err := env.wr.Materialize(ctx, ms) require.EqualError(t, err, "table t1 not found in vschema for keyspace targetks") } @@ -2270,13 +2497,16 @@ func TestMaterializerNoDDL(t *testing.T) { CreateDdl: "", }}, } - env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) defer env.close() delete(env.tmc.schema, "targetks.t1") env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) - err := env.wr.Materialize(context.Background(), ms) + err := env.wr.Materialize(ctx, ms) require.EqualError(t, err, "target table t1 does not exist and there is no create ddl defined") require.Equal(t, env.tmc.getSchemaRequestCount(100), 0) require.Equal(t, env.tmc.getSchemaRequestCount(200), 1) @@ -2297,13 +2527,16 @@ func TestMaterializerNoSourcePrimary(t *testing.T) { sources := []string{"0"} targets := []string{"0"} + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Copied from newTestMaterializerEnv env := &testMaterializerEnv{ ms: ms, sources: sources, targets: targets, tablets: make(map[int]*topodatapb.Tablet), - topoServ: memorytopo.NewServer("cell"), + topoServ: memorytopo.NewServer(ctx, "cell"), cell: "cell", tmc: newTestMaterializerTMClient(), } @@ -2326,7 +2559,7 @@ func TestMaterializerNoSourcePrimary(t *testing.T) { env.expectValidation() env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) - err := env.wr.Materialize(context.Background(), ms) + err := env.wr.Materialize(ctx, ms) require.EqualError(t, err, "source shard must have a primary for copying schema: 0") } @@ -2341,13 +2574,16 @@ func TestMaterializerTableMismatchNonCopy(t *testing.T) { CreateDdl: "", }}, } - env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) defer env.close() delete(env.tmc.schema, "targetks.t1") env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) - err := env.wr.Materialize(context.Background(), ms) + err := env.wr.Materialize(ctx, ms) require.EqualError(t, err, "target table t1 does not exist and there is no create ddl defined") } @@ -2362,13 +2598,16 @@ func TestMaterializerTableMismatchCopy(t *testing.T) { CreateDdl: "copy", }}, } - env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) defer env.close() delete(env.tmc.schema, "targetks.t1") env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) - err := env.wr.Materialize(context.Background(), ms) + err := env.wr.Materialize(ctx, ms) require.EqualError(t, err, "source and target table names must match for copying schema: t2 vs t1") } @@ -2383,14 +2622,17 @@ func TestMaterializerNoSourceTable(t *testing.T) { CreateDdl: "copy", }}, } - env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) defer env.close() delete(env.tmc.schema, "targetks.t1") delete(env.tmc.schema, "sourceks.t1") env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) - err := env.wr.Materialize(context.Background(), ms) + err := env.wr.Materialize(ctx, ms) require.EqualError(t, err, "source table t1 does not exist") } @@ -2405,11 +2647,14 @@ func TestMaterializerSyntaxError(t *testing.T) { CreateDdl: "t1ddl", }}, } - env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) defer env.close() env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) - err := env.wr.Materialize(context.Background(), ms) + err := env.wr.Materialize(ctx, ms) require.EqualError(t, err, "syntax error at position 4 near 'bad'") } @@ -2424,11 +2669,14 @@ func TestMaterializerNotASelect(t *testing.T) { CreateDdl: "t1ddl", }}, } - env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) defer env.close() env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) - err := env.wr.Materialize(context.Background(), ms) + err := env.wr.Materialize(ctx, ms) require.EqualError(t, err, "unrecognized statement: update t1 set val=1") } @@ -2443,7 +2691,10 @@ func TestMaterializerNoGoodVindex(t *testing.T) { CreateDdl: "t1ddl", }}, } - env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"-80", "80-"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) defer env.close() vs := &vschemapb.Keyspace{ @@ -2451,6 +2702,11 @@ func TestMaterializerNoGoodVindex(t *testing.T) { Vindexes: map[string]*vschemapb.Vindex{ "lookup_unique": { Type: "lookup_unique", + Params: map[string]string{ + "table": "t1", + "from": "c1", + "to": "c2", + }, }, }, Tables: map[string]*vschemapb.Table{ @@ -2469,7 +2725,7 @@ func TestMaterializerNoGoodVindex(t *testing.T) { env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) env.tmc.expectVRQuery(210, mzSelectFrozenQuery, &sqltypes.Result{}) - err := env.wr.Materialize(context.Background(), ms) + err := env.wr.Materialize(ctx, ms) require.EqualError(t, err, "could not find a vindex to compute keyspace id for table t1") } @@ -2484,21 +2740,23 @@ func TestMaterializerComplexVindexExpression(t *testing.T) { CreateDdl: "t1ddl", }}, } - env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"-80", "80-"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) defer env.close() vs := &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ - "hash": { - Type: "hash", + "xxhash": { + Type: "xxhash", }, }, Tables: map[string]*vschemapb.Table{ "t1": { ColumnVindexes: []*vschemapb.ColumnVindex{{ Column: "c1", - Name: "hash", + Name: "xxhash", }}, }, }, @@ -2510,7 +2768,7 @@ func TestMaterializerComplexVindexExpression(t *testing.T) { env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) env.tmc.expectVRQuery(210, mzSelectFrozenQuery, &sqltypes.Result{}) - err := env.wr.Materialize(context.Background(), ms) + err := env.wr.Materialize(ctx, ms) require.EqualError(t, err, "vindex column cannot be a complex expression: a + b as c1") } @@ -2525,21 +2783,23 @@ func TestMaterializerNoVindexInExpression(t *testing.T) { CreateDdl: "t1ddl", }}, } - env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"-80", "80-"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) defer env.close() vs := &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ - "hash": { - Type: "hash", + "xxhash": { + Type: "xxhash", }, }, Tables: map[string]*vschemapb.Table{ "t1": { ColumnVindexes: []*vschemapb.ColumnVindex{{ Column: "c1", - Name: "hash", + Name: "xxhash", }}, }, }, @@ -2551,7 +2811,7 @@ func TestMaterializerNoVindexInExpression(t *testing.T) { env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) env.tmc.expectVRQuery(210, mzSelectFrozenQuery, &sqltypes.Result{}) - err := env.wr.Materialize(context.Background(), ms) + err := env.wr.Materialize(ctx, ms) require.EqualError(t, err, "could not find vindex column c1") } @@ -2694,7 +2954,7 @@ func TestStripConstraints(t *testing.T) { } } -func TestMaterializerManyToManySomeUnreachable(t *testing.T) { +func TestMaterializerSourceShardSelection(t *testing.T) { ms := &vtctldatapb.MaterializeSettings{ Workflow: "workflow", SourceKeyspace: "sourceks", @@ -2706,69 +2966,292 @@ func TestMaterializerManyToManySomeUnreachable(t *testing.T) { }}, } - vs := &vschemapb.Keyspace{ + getStreamInsert := func(sourceShard, sourceColumn, targetVindex, targetShard string) string { + return fmt.Sprintf(`.*shard:\\"%s\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(%s.*targetks\.%s.*%s.*`, sourceShard, sourceColumn, targetVindex, targetShard) + } + + targetVSchema := &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ - "hash": { - Type: "hash", + "xxhash": { + Type: "xxhash", }, }, Tables: map[string]*vschemapb.Table{ "t1": { ColumnVindexes: []*vschemapb.ColumnVindex{{ Column: "c1", - Name: "hash", + Name: "xxhash", }}, }, }, } + type testcase struct { - targetShards, sourceShards []string - insertMap map[string][]string + name string + targetShards, sourceShards []string + sourceColumn string + targetVindex string + insertMap map[string][]string + targetVSchema, sourceVSchema *vschemapb.Keyspace + getStreamInsert func(sourceShard, sourceColumn, targetVindexName, targetShard string) string } testcases := []testcase{ { - targetShards: []string{"-40", "40-80", "80-c0", "c0-"}, - sourceShards: []string{"-80", "80-"}, - insertMap: map[string][]string{"-40": {"-80"}, "40-80": {"-80"}, "80-c0": {"80-"}, "c0-": {"80-"}}, + targetShards: []string{"-40", "40-80", "80-c0", "c0-"}, + sourceShards: []string{"-80", "80-"}, + sourceColumn: "c1", + targetVindex: "xxhash", + insertMap: map[string][]string{"-40": {"-80"}, "40-80": {"-80"}, "80-c0": {"80-"}, "c0-": {"80-"}}, + targetVSchema: targetVSchema, + getStreamInsert: getStreamInsert, + }, + { + targetShards: []string{"-20", "20-40", "40-a0", "a0-f0", "f0-"}, + sourceShards: []string{"-40", "40-80", "80-c0", "c0-"}, + sourceColumn: "c1", + targetVindex: "xxhash", + insertMap: map[string][]string{"-20": {"-40"}, "20-40": {"-40"}, "40-a0": {"40-80", "80-c0"}, "a0-f0": {"80-c0", "c0-"}, "f0-": {"c0-"}}, + targetVSchema: targetVSchema, + getStreamInsert: getStreamInsert, + }, + { + targetShards: []string{"-40", "40-80", "80-"}, + sourceShards: []string{"-80", "80-"}, + sourceColumn: "c1", + targetVindex: "xxhash", + insertMap: map[string][]string{"-40": {"-80"}, "40-80": {"-80"}, "80-": {"80-"}}, + targetVSchema: targetVSchema, + getStreamInsert: getStreamInsert, + }, + { + targetShards: []string{"-80", "80-"}, + sourceShards: []string{"-40", "40-80", "80-c0", "c0-"}, + sourceColumn: "c1", + targetVindex: "xxhash", + insertMap: map[string][]string{"-80": {"-40", "40-80"}, "80-": {"80-c0", "c0-"}}, + targetVSchema: targetVSchema, + getStreamInsert: getStreamInsert, + }, + { + targetShards: []string{"0"}, + sourceShards: []string{"-80", "80-"}, + sourceColumn: "c1", + targetVindex: "xxhash", + insertMap: map[string][]string{"0": {"-80", "80-"}}, + targetVSchema: targetVSchema, + getStreamInsert: getStreamInsert, + }, + { + targetShards: []string{"-80", "80-"}, + sourceShards: []string{"0"}, + sourceColumn: "c1", + targetVindex: "xxhash", + insertMap: map[string][]string{"-80": {"0"}, "80-": {"0"}}, + targetVSchema: targetVSchema, + getStreamInsert: getStreamInsert, }, { - targetShards: []string{"-20", "20-40", "40-a0", "a0-f0", "f0-"}, + name: "different primary vindex type, use all source shards", + targetShards: []string{"-80", "80-"}, sourceShards: []string{"-40", "40-80", "80-c0", "c0-"}, - insertMap: map[string][]string{"-20": {"-40"}, "20-40": {"-40"}, "40-a0": {"40-80", "80-c0"}, "a0-f0": {"80-c0", "c0-"}, "f0-": {"c0-"}}, + sourceColumn: "c1", + targetVindex: "hash", + insertMap: map[string][]string{ + "-80": {"-40", "40-80", "80-c0", "c0-"}, + "80-": {"-40", "40-80", "80-c0", "c0-"}, + }, + targetVSchema: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "hash": { + Type: "xxhash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "c1", + Name: "hash", + }}, + }, + }, + }, + getStreamInsert: getStreamInsert, }, { - targetShards: []string{"-40", "40-80", "80-"}, - sourceShards: []string{"-80", "80-"}, - insertMap: map[string][]string{"-40": {"-80"}, "40-80": {"-80"}, "80-": {"80-"}}, + name: "different vindex type and name, use all source shards", + targetShards: []string{"-80", "80-"}, + sourceShards: []string{"-40", "40-80", "80-c0", "c0-"}, + sourceColumn: "c1", + targetVindex: "xxhash", + insertMap: map[string][]string{ + "-80": {"-40", "40-80", "80-c0", "c0-"}, + "80-": {"-40", "40-80", "80-c0", "c0-"}, + }, + targetVSchema: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "c1", + Name: "xxhash", + }}, + }, + }, + }, + getStreamInsert: getStreamInsert, }, { + name: "same vindex type but different name, use intersecting source shards", targetShards: []string{"-80", "80-"}, sourceShards: []string{"-40", "40-80", "80-c0", "c0-"}, + sourceColumn: "c1", + targetVindex: "hash", insertMap: map[string][]string{"-80": {"-40", "40-80"}, "80-": {"80-c0", "c0-"}}, + targetVSchema: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "hash_vdx": { + Type: "hash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "c1", + Name: "hash_vdx", + }}, + }, + }, + }, + getStreamInsert: getStreamInsert, }, { - targetShards: []string{"0"}, + name: "unsharded source, sharded target, use all source shards", + targetShards: []string{"-80", "80-"}, + sourceShards: []string{"-"}, + targetVindex: "xxhash", + insertMap: map[string][]string{ + "-80": {"-"}, + "80-": {"-"}, + }, + sourceVSchema: &vschemapb.Keyspace{ + Sharded: false, + }, + targetVSchema: targetVSchema, + getStreamInsert: getStreamInsert, + }, + { + name: "sharded source, unsharded target, use all source shards", + targetShards: []string{"-"}, sourceShards: []string{"-80", "80-"}, - insertMap: map[string][]string{"0": {"-80", "80-"}}, + insertMap: map[string][]string{ + "-": {"-80", "80-"}, + }, + targetVSchema: &vschemapb.Keyspace{ + Sharded: false, + }, + // The single target shard streams all data from each source shard + // without any keyrange filtering. + getStreamInsert: func(sourceShard, _, _, targetShard string) string { + return fmt.Sprintf(`.*shard:\\"%s\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1`, sourceShard) + }, }, { + name: "target secondary vindexes, use intersecting source shards", targetShards: []string{"-80", "80-"}, - sourceShards: []string{"0"}, - insertMap: map[string][]string{"-80": {"0"}, "80-": {"0"}}, + sourceShards: []string{"-40", "40-80", "80-c0", "c0-"}, + sourceColumn: "c1", + targetVindex: "hash", + insertMap: map[string][]string{"-80": {"-40", "40-80"}, "80-": {"80-c0", "c0-"}}, + targetVSchema: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "hash": { + Type: "hash", + }, + "lookup_vdx": { + Type: "lookup", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{ + { + Column: "c1", + Name: "hash", + }, + { + Column: "c2", + Name: "lookup_vdx", + }, + }, + }, + }, + }, + getStreamInsert: getStreamInsert, + }, + { + name: "same vindex type but different cols, use all source shards", + targetShards: []string{"-80", "80-"}, + sourceShards: []string{"-40", "40-80", "80-c0", "c0-"}, + sourceColumn: "c2", + targetVindex: "hash", + sourceVSchema: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "hash": { + Type: "hash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "c1", + Name: "hash", + }}, + }, + }, + }, + targetVSchema: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "hash": { + Type: "hash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "c2", + Name: "hash", + }}, + }, + }, + }, + getStreamInsert: getStreamInsert, }, - } - - getStreamInsert := func(sourceShard, targetShard string) string { - return fmt.Sprintf(`.*shard:\\"%s\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c1.*targetks\.hash.*%s.*`, sourceShard, targetShard) } for _, tcase := range testcases { - t.Run("", func(t *testing.T) { - env := newTestMaterializerEnv(t, ms, tcase.sourceShards, tcase.targetShards) - if err := env.topoServ.SaveVSchema(context.Background(), "targetks", vs); err != nil { + t.Run(tcase.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestMaterializerEnv(t, ctx, ms, tcase.sourceShards, tcase.targetShards) + if err := env.topoServ.SaveVSchema(ctx, "targetks", tcase.targetVSchema); err != nil { t.Fatal(err) } + if tcase.sourceVSchema != nil { + if err := env.topoServ.SaveVSchema(context.Background(), "sourceks", tcase.sourceVSchema); err != nil { + t.Fatal(err) + } + } defer env.close() for i, targetShard := range tcase.targetShards { tabletID := 200 + i*10 @@ -2777,7 +3260,7 @@ func TestMaterializerManyToManySomeUnreachable(t *testing.T) { streamsInsert := "" sourceShards := tcase.insertMap[targetShard] for _, sourceShard := range sourceShards { - streamsInsert += getStreamInsert(sourceShard, targetShard) + streamsInsert += tcase.getStreamInsert(sourceShard, tcase.sourceColumn, tcase.targetVindex, targetShard) } env.tmc.expectVRQuery( tabletID, @@ -2786,7 +3269,7 @@ func TestMaterializerManyToManySomeUnreachable(t *testing.T) { ) env.tmc.expectVRQuery(tabletID, mzUpdateQuery, &sqltypes.Result{}) } - err := env.wr.Materialize(context.Background(), ms) + err := env.wr.Materialize(ctx, ms) require.NoError(t, err) env.tmc.verifyQueries(t) }) @@ -2812,12 +3295,14 @@ func TestMoveTablesDDLFlag(t *testing.T) { for onDDLAction := range binlogdatapb.OnDDLAction_value { t.Run(fmt.Sprintf("OnDDL Flag:%v", onDDLAction), func(t *testing.T) { - env := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) defer env.close() env.tmc.expectVRQuery(100, mzCheckJournal, &sqltypes.Result{}) env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) - if onDDLAction == binlogdatapb.OnDDLAction_name[int32(binlogdatapb.OnDDLAction_IGNORE)] { + if onDDLAction == binlogdatapb.OnDDLAction_IGNORE.String() { // This is the default and go does not marshal defaults // for prototext fields so we use the default insert stmt. env.tmc.expectVRQuery(200, insertPrefix, &sqltypes.Result{}) @@ -2829,8 +3314,201 @@ func TestMoveTablesDDLFlag(t *testing.T) { env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) err := env.wr.MoveTables(ctx, "workflow", "sourceks", "targetks", "t1", "", - "", false, "", false, true, "", false, false, "", onDDLAction, nil) + "", false, "", false, true, "", false, false, "", onDDLAction, nil, false, false) + require.NoError(t, err) + }) + } +} + +func TestAddTablesToVSchema(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + srcks := "source" + wr := &Wrangler{ + logger: logutil.NewMemoryLogger(), + ts: ts, + sourceTs: ts, + } + tests := []struct { + name string + sourceVSchema *vschemapb.Keyspace + inTargetVSchema *vschemapb.Keyspace + tables []string + copyVSchema bool + wantTargetVSchema *vschemapb.Keyspace + }{ + { + name: "no target vschema; copy source vschema", + sourceVSchema: &vschemapb.Keyspace{ + Tables: map[string]*vschemapb.Table{ + "t1": { + Type: vindexes.TypeReference, + }, + "t2": { + Type: vindexes.TypeSequence, + }, + "t3": { + AutoIncrement: &vschemapb.AutoIncrement{ + Column: "c1", + Sequence: "t2", + }, + }, + }, + }, + inTargetVSchema: &vschemapb.Keyspace{}, + tables: []string{"t1", "t2", "t3", "t4"}, + copyVSchema: true, + wantTargetVSchema: &vschemapb.Keyspace{ + Tables: map[string]*vschemapb.Table{ + "t1": { + Type: vindexes.TypeReference, + }, + "t2": { + Type: vindexes.TypeSequence, + }, + "t3": { + AutoIncrement: &vschemapb.AutoIncrement{ + Column: "c1", + Sequence: "t2", + }, + }, + "t4": {}, + }, + }, + }, + { + name: "no target vschema; copy source vschema; sharded source", + sourceVSchema: &vschemapb.Keyspace{ + Sharded: true, + Tables: map[string]*vschemapb.Table{ + "t1": { + Type: vindexes.TypeReference, + }, + "t2": { + Type: vindexes.TypeSequence, + }, + "t3": { + AutoIncrement: &vschemapb.AutoIncrement{ + Column: "c1", + Sequence: "t2", + }, + }, + "t4": { + ColumnVindexes: []*vschemapb.ColumnVindex{ // Should be stripped on target + { + Column: "c1", + Name: "xxhash", + }, + }, + }, + }, + }, + inTargetVSchema: &vschemapb.Keyspace{}, + tables: []string{"t1", "t2", "t3", "t4"}, + copyVSchema: true, + wantTargetVSchema: &vschemapb.Keyspace{ + Tables: map[string]*vschemapb.Table{ + "t1": { + Type: vindexes.TypeReference, + }, + "t2": { + Type: vindexes.TypeSequence, + }, + "t3": { + AutoIncrement: &vschemapb.AutoIncrement{ + Column: "c1", + Sequence: "t2", + }, + }, + "t4": {}, + }, + }, + }, + { + name: "target vschema; copy source vschema", + sourceVSchema: &vschemapb.Keyspace{ + Tables: map[string]*vschemapb.Table{ + "t1": { + Type: vindexes.TypeReference, + }, + "t2": { + Type: vindexes.TypeSequence, + }, + "t3": { + AutoIncrement: &vschemapb.AutoIncrement{ + Column: "c1", + Sequence: "t2", + }, + }, + "t4": { + ColumnVindexes: []*vschemapb.ColumnVindex{ // Should be stripped on target + { + Column: "c1", + Name: "xxhash", + }, + }, + }, + }, + }, + inTargetVSchema: &vschemapb.Keyspace{ + Tables: map[string]*vschemapb.Table{ + "t1": { + Type: vindexes.TypeReference, + }, + "t2": {}, + "t3": {}, + "t4": {}, + }, + }, + tables: []string{"t1", "t2", "t3", "t4"}, + copyVSchema: true, + wantTargetVSchema: &vschemapb.Keyspace{ + Tables: map[string]*vschemapb.Table{ + "t1": { + Type: vindexes.TypeReference, + }, + "t2": {}, + "t3": {}, + "t4": {}, + }, + }, + }, + { + name: "no target vschema; do not copy source vschema", + sourceVSchema: &vschemapb.Keyspace{ + Tables: map[string]*vschemapb.Table{ + "t1": { + Type: vindexes.TypeReference, + }, + "t2": { + Type: vindexes.TypeSequence, + }, + "t3": { + AutoIncrement: &vschemapb.AutoIncrement{ + Column: "c1", + Sequence: "t2", + }, + }, + }, + }, + inTargetVSchema: &vschemapb.Keyspace{}, + tables: []string{"t1", "t2"}, + copyVSchema: false, + wantTargetVSchema: &vschemapb.Keyspace{ + Tables: map[string]*vschemapb.Table{ + "t1": {}, + "t2": {}, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts.SaveVSchema(ctx, srcks, tt.sourceVSchema) + err := wr.addTablesToVSchema(ctx, srcks, tt.inTargetVSchema, tt.tables, tt.copyVSchema) require.NoError(t, err) + require.Equal(t, tt.wantTargetVSchema, tt.inTargetVSchema) }) } } diff --git a/go/vt/wrangler/reparent.go b/go/vt/wrangler/reparent.go index 52d991f6749..fbeec55cbbc 100644 --- a/go/vt/wrangler/reparent.go +++ b/go/vt/wrangler/reparent.go @@ -93,7 +93,7 @@ func (wr *Wrangler) PlannedReparentShard(ctx context.Context, keyspace, shard st // EmergencyReparentShard will make the provided tablet the primary for // the shard, when the old primary is completely unreachable. -func (wr *Wrangler) EmergencyReparentShard(ctx context.Context, keyspace, shard string, primaryElectTabletAlias *topodatapb.TabletAlias, waitReplicasTimeout time.Duration, ignoredTablets sets.Set[string], preventCrossCellPromotion bool) (err error) { +func (wr *Wrangler) EmergencyReparentShard(ctx context.Context, keyspace, shard string, primaryElectTabletAlias *topodatapb.TabletAlias, waitReplicasTimeout time.Duration, ignoredTablets sets.Set[string], preventCrossCellPromotion bool, waitForAllTablets bool) (err error) { _, err = reparentutil.NewEmergencyReparenter(wr.ts, wr.tmc, wr.logger).ReparentShard( ctx, keyspace, @@ -103,6 +103,7 @@ func (wr *Wrangler) EmergencyReparentShard(ctx context.Context, keyspace, shard WaitReplicasTimeout: waitReplicasTimeout, IgnoreReplicas: ignoredTablets, PreventCrossCellPromotion: preventCrossCellPromotion, + WaitAllTablets: waitForAllTablets, }, ) diff --git a/go/vt/wrangler/resharder.go b/go/vt/wrangler/resharder.go index 4a3ae9d8bb0..a81c3e8d598 100644 --- a/go/vt/wrangler/resharder.go +++ b/go/vt/wrangler/resharder.go @@ -30,16 +30,16 @@ import ( "vitess.io/vitess/go/vt/vtctl/workflow" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/key" - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - vschemapb "vitess.io/vitess/go/vt/proto/vschema" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topotools" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" ) type resharder struct { @@ -312,7 +312,7 @@ func (rs *resharder) createStreams(ctx context.Context) error { err := rs.forAll(rs.targetShards, func(target *topo.ShardInfo) error { targetPrimary := rs.targetPrimaries[target.ShardName()] - ig := vreplication.NewInsertGenerator(binlogplayer.BlpStopped, targetPrimary.DbName()) + ig := vreplication.NewInsertGenerator(binlogdatapb.VReplicationWorkflowState_Stopped, targetPrimary.DbName()) // copy excludeRules to prevent data race. copyExcludeRules := append([]*binlogdatapb.Rule(nil), excludeRules...) diff --git a/go/vt/wrangler/resharder_env_test.go b/go/vt/wrangler/resharder_env_test.go index 77baae00761..6f4df0a4d52 100644 --- a/go/vt/wrangler/resharder_env_test.go +++ b/go/vt/wrangler/resharder_env_test.go @@ -82,14 +82,14 @@ func initTopo(t *testing.T, topo *topo.Server, keyspace string, sources, targets topo.ValidateSrvKeyspace(ctx, keyspace, strings.Join(cells, ",")) } -func newTestResharderEnv(t *testing.T, sources, targets []string) *testResharderEnv { +func newTestResharderEnv(t *testing.T, ctx context.Context, sources, targets []string) *testResharderEnv { env := &testResharderEnv{ keyspace: "ks", workflow: "resharderTest", sources: sources, targets: targets, tablets: make(map[int]*topodatapb.Tablet), - topoServ: memorytopo.NewServer("cell"), + topoServ: memorytopo.NewServer(ctx, "cell"), cell: "cell", tmc: newTestResharderTMClient(), } diff --git a/go/vt/wrangler/resharder_test.go b/go/vt/wrangler/resharder_test.go index 0b3f229bd14..38fff05884d 100644 --- a/go/vt/wrangler/resharder_test.go +++ b/go/vt/wrangler/resharder_test.go @@ -17,14 +17,13 @@ limitations under the License. package wrangler import ( + "context" "fmt" "strings" "testing" "github.com/stretchr/testify/require" - "context" - "github.com/stretchr/testify/assert" "vitess.io/vitess/go/sqltypes" @@ -39,7 +38,9 @@ const insertPrefix = `/insert into _vt.vreplication\(workflow, source, pos, max_ const eol = "$" func TestResharderOneToMany(t *testing.T) { - env := newTestResharderEnv(t, []string{"0"}, []string{"-80", "80-"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestResharderEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}) defer env.close() schm := &tabletmanagerdatapb.SchemaDefinition{ @@ -73,7 +74,7 @@ func TestResharderOneToMany(t *testing.T) { testCases = append(testCases, newTestCase("", "replica,rdonly")) for _, tc := range testCases { - env := newTestResharderEnv(t, []string{"0"}, []string{"-80", "80-"}) + env := newTestResharderEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}) schm := &tabletmanagerdatapb.SchemaDefinition{ TableDefinitions: []*tabletmanagerdatapb.TableDefinition{{ @@ -115,7 +116,9 @@ func TestResharderOneToMany(t *testing.T) { } func TestResharderManyToOne(t *testing.T) { - env := newTestResharderEnv(t, []string{"-80", "80-"}, []string{"0"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestResharderEnv(t, ctx, []string{"-80", "80-"}, []string{"0"}) defer env.close() schm := &tabletmanagerdatapb.SchemaDefinition{ @@ -148,7 +151,9 @@ func TestResharderManyToOne(t *testing.T) { } func TestResharderManyToMany(t *testing.T) { - env := newTestResharderEnv(t, []string{"-40", "40-"}, []string{"-80", "80-"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestResharderEnv(t, ctx, []string{"-40", "40-"}, []string{"-80", "80-"}) defer env.close() schm := &tabletmanagerdatapb.SchemaDefinition{ @@ -191,7 +196,9 @@ func TestResharderManyToMany(t *testing.T) { // TestResharderOneRefTable tests the case where there's one ref table, but no stream for it. // This means that the table is being updated manually. func TestResharderOneRefTable(t *testing.T) { - env := newTestResharderEnv(t, []string{"0"}, []string{"-80", "80-"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestResharderEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}) defer env.close() schm := &tabletmanagerdatapb.SchemaDefinition{ @@ -243,7 +250,9 @@ func TestResharderOneRefTable(t *testing.T) { // TestReshardStopFlags tests the flags -stop_started and -stop_after_copy func TestReshardStopFlags(t *testing.T) { - env := newTestResharderEnv(t, []string{"0"}, []string{"-80", "80-"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestResharderEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}) defer env.close() schm := &tabletmanagerdatapb.SchemaDefinition{ @@ -294,7 +303,9 @@ func TestReshardStopFlags(t *testing.T) { // TestResharderOneRefStream tests the case where there's one ref table and an associated stream. func TestResharderOneRefStream(t *testing.T) { - env := newTestResharderEnv(t, []string{"0"}, []string{"-80", "80-"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestResharderEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}) defer env.close() schm := &tabletmanagerdatapb.SchemaDefinition{ @@ -362,7 +373,9 @@ func TestResharderOneRefStream(t *testing.T) { // TestResharderNoRefStream tests the case where there's a stream, but it's not a reference. func TestResharderNoRefStream(t *testing.T) { - env := newTestResharderEnv(t, []string{"0"}, []string{"-80", "80-"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestResharderEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}) defer env.close() schm := &tabletmanagerdatapb.SchemaDefinition{ @@ -438,7 +451,9 @@ func TestResharderNoRefStream(t *testing.T) { } func TestResharderCopySchema(t *testing.T) { - env := newTestResharderEnv(t, []string{"0"}, []string{"-80", "80-"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestResharderEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}) defer env.close() schm := &tabletmanagerdatapb.SchemaDefinition{ @@ -478,7 +493,9 @@ func TestResharderCopySchema(t *testing.T) { } func TestResharderDupWorkflow(t *testing.T) { - env := newTestResharderEnv(t, []string{"0"}, []string{"-80", "80-"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestResharderEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}) defer env.close() schm := &tabletmanagerdatapb.SchemaDefinition{ @@ -509,7 +526,9 @@ func TestResharderDupWorkflow(t *testing.T) { } func TestResharderServingState(t *testing.T) { - env := newTestResharderEnv(t, []string{"0"}, []string{"-80", "80-"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestResharderEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}) defer env.close() schm := &tabletmanagerdatapb.SchemaDefinition{ @@ -551,7 +570,9 @@ func TestResharderServingState(t *testing.T) { } func TestResharderTargetAlreadyResharding(t *testing.T) { - env := newTestResharderEnv(t, []string{"0"}, []string{"-80", "80-"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestResharderEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}) defer env.close() schm := &tabletmanagerdatapb.SchemaDefinition{ @@ -584,7 +605,9 @@ func TestResharderTargetAlreadyResharding(t *testing.T) { } func TestResharderUnnamedStream(t *testing.T) { - env := newTestResharderEnv(t, []string{"0"}, []string{"-80", "80-"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestResharderEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}) defer env.close() schm := &tabletmanagerdatapb.SchemaDefinition{ @@ -632,7 +655,9 @@ func TestResharderUnnamedStream(t *testing.T) { } func TestResharderMismatchedRefStreams(t *testing.T) { - env := newTestResharderEnv(t, []string{"-80", "80-"}, []string{"0"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestResharderEnv(t, ctx, []string{"-80", "80-"}, []string{"0"}) defer env.close() schm := &tabletmanagerdatapb.SchemaDefinition{ @@ -699,7 +724,9 @@ func TestResharderMismatchedRefStreams(t *testing.T) { } func TestResharderTableNotInVSchema(t *testing.T) { - env := newTestResharderEnv(t, []string{"0"}, []string{"-80", "80-"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestResharderEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}) defer env.close() schm := &tabletmanagerdatapb.SchemaDefinition{ @@ -736,7 +763,9 @@ func TestResharderTableNotInVSchema(t *testing.T) { } func TestResharderMixedTablesOrder1(t *testing.T) { - env := newTestResharderEnv(t, []string{"0"}, []string{"-80", "80-"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestResharderEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}) defer env.close() schm := &tabletmanagerdatapb.SchemaDefinition{ @@ -803,7 +832,9 @@ func TestResharderMixedTablesOrder1(t *testing.T) { } func TestResharderMixedTablesOrder2(t *testing.T) { - env := newTestResharderEnv(t, []string{"0"}, []string{"-80", "80-"}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestResharderEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}) defer env.close() schm := &tabletmanagerdatapb.SchemaDefinition{ diff --git a/go/vt/wrangler/schema.go b/go/vt/wrangler/schema.go index f4e7ad5e2d2..84bc078f240 100644 --- a/go/vt/wrangler/schema.go +++ b/go/vt/wrangler/schema.go @@ -39,11 +39,6 @@ import ( vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" ) -const ( - // DefaultWaitReplicasTimeout is the default value for waitReplicasTimeout, which is used when calling method CopySchemaShardFromShard. - DefaultWaitReplicasTimeout = 10 * time.Second -) - // helper method to asynchronously diff a schema func (wr *Wrangler) diffSchema(ctx context.Context, primarySchema *tabletmanagerdatapb.SchemaDefinition, primaryTabletAlias, alias *topodatapb.TabletAlias, excludeTables []string, includeViews bool, wg *sync.WaitGroup, er concurrency.ErrorRecorder) { defer wg.Done() diff --git a/go/vt/wrangler/stream_migrater_test.go b/go/vt/wrangler/stream_migrater_test.go index 7a835590239..f912e492d7a 100644 --- a/go/vt/wrangler/stream_migrater_test.go +++ b/go/vt/wrangler/stream_migrater_test.go @@ -169,7 +169,7 @@ func TestStreamMigrateMainflow(t *testing.T) { tme.expectCreateReverseVReplication() tme.expectStartReverseVReplication() tme.expectFrozenTargetVReplication() - if _, _, err := tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false); err != nil { + if _, _, err := tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false, false); err != nil { t.Fatal(err) } @@ -345,7 +345,7 @@ func TestStreamMigrateTwoStreams(t *testing.T) { tme.expectStartReverseVReplication() tme.expectFrozenTargetVReplication() - if _, _, err := tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false); err != nil { + if _, _, err := tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false, false); err != nil { t.Fatal(err) } @@ -480,7 +480,7 @@ func TestStreamMigrateOneToMany(t *testing.T) { tme.expectStartReverseVReplication() tme.expectFrozenTargetVReplication() - if _, _, err := tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false); err != nil { + if _, _, err := tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false, false); err != nil { t.Fatal(err) } @@ -618,7 +618,7 @@ func TestStreamMigrateManyToOne(t *testing.T) { tme.expectStartReverseVReplication() tme.expectFrozenTargetVReplication() - if _, _, err := tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false); err != nil { + if _, _, err := tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false, false); err != nil { t.Fatal(err) } @@ -810,7 +810,7 @@ func TestStreamMigrateSyncSuccess(t *testing.T) { tme.expectStartReverseVReplication() tme.expectFrozenTargetVReplication() - if _, _, err := tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false); err != nil { + if _, _, err := tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false, false); err != nil { t.Fatal(err) } @@ -941,7 +941,7 @@ func TestStreamMigrateSyncFail(t *testing.T) { tme.expectCancelMigration() - _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false) + _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false, false) want := "does not match" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("SwitchWrites err: %v, want %s", err, want) @@ -1037,7 +1037,7 @@ func TestStreamMigrateCancel(t *testing.T) { } cancelMigration() - _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false) + _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false, false) want := "intentionally failed" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("SwitchWrites err: %v, want %s", err, want) @@ -1107,8 +1107,8 @@ func TestStreamMigrateStoppedStreams(t *testing.T) { } stopStreams() - _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false) - want := "cannot migrate until all streams are running: 0: 10" + _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false, false) + want := "failed to migrate the workflow streams: cannot migrate until all streams are running: 0: 10" if err == nil || err.Error() != want { t.Errorf("SwitchWrites err: %v, want %v", err, want) } @@ -1175,7 +1175,7 @@ func TestStreamMigrateCancelWithStoppedStreams(t *testing.T) { tme.expectCancelMigration() - _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, true, false, false, false) + _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, true, false, false, false, false) if err != nil { t.Fatal(err) } @@ -1237,8 +1237,8 @@ func TestStreamMigrateStillCopying(t *testing.T) { } stopStreams() - _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false) - want := "cannot migrate while vreplication streams in source shards are still copying: 0" + _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false, false) + want := "failed to migrate the workflow streams: cannot migrate while vreplication streams in source shards are still copying: 0" if err == nil || err.Error() != want { t.Errorf("SwitchWrites err: %v, want %v", err, want) } @@ -1299,8 +1299,8 @@ func TestStreamMigrateEmptyWorkflow(t *testing.T) { } stopStreams() - _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false) - want := "VReplication streams must have named workflows for migration: shard: ks:0, stream: 1" + _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false, false) + want := "failed to migrate the workflow streams: VReplication streams must have named workflows for migration: shard: ks:0, stream: 1" if err == nil || err.Error() != want { t.Errorf("SwitchWrites err: %v, want %v", err, want) } @@ -1361,8 +1361,8 @@ func TestStreamMigrateDupWorkflow(t *testing.T) { } stopStreams() - _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false) - want := "VReplication stream has the same workflow name as the resharding workflow: shard: ks:0, stream: 1" + _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false, false) + want := "failed to migrate the workflow streams: VReplication stream has the same workflow name as the resharding workflow: shard: ks:0, stream: 1" if err == nil || err.Error() != want { t.Errorf("SwitchWrites err: %v, want %v", err, want) } @@ -1434,7 +1434,7 @@ func TestStreamMigrateStreamsMismatch(t *testing.T) { } stopStreams() - _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false) + _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false, false) want := "streams are mismatched across source shards" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("SwitchWrites err: %v, must contain %v", err, want) diff --git a/go/vt/wrangler/switcher.go b/go/vt/wrangler/switcher.go index e46b62c57e8..0e1f33b90ea 100644 --- a/go/vt/wrangler/switcher.go +++ b/go/vt/wrangler/switcher.go @@ -48,6 +48,10 @@ func (r *switcher) dropSourceDeniedTables(ctx context.Context) error { return r.ts.dropSourceDeniedTables(ctx) } +func (r *switcher) dropTargetDeniedTables(ctx context.Context) error { + return r.ts.dropTargetDeniedTables(ctx) +} + func (r *switcher) validateWorkflowHasCompleted(ctx context.Context) error { return r.ts.validateWorkflowHasCompleted(ctx) } @@ -140,3 +144,11 @@ func (r *switcher) dropTargetShards(ctx context.Context) error { func (r *switcher) logs() *[]string { return nil } + +func (r *switcher) resetSequences(ctx context.Context) error { + return r.ts.resetSequences(ctx) +} + +func (r *switcher) initializeTargetSequences(ctx context.Context, sequencesByBackingTable map[string]*sequenceMetadata) error { + return r.ts.initializeTargetSequences(ctx, sequencesByBackingTable) +} diff --git a/go/vt/wrangler/switcher_dry_run.go b/go/vt/wrangler/switcher_dry_run.go index 4ef0ea1d026..7b21ac65fe0 100644 --- a/go/vt/wrangler/switcher_dry_run.go +++ b/go/vt/wrangler/switcher_dry_run.go @@ -19,11 +19,13 @@ package wrangler import ( "context" "fmt" + "slices" "sort" "strings" "time" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/maps2" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/vtctl/workflow" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" @@ -166,7 +168,7 @@ func (dr *switcherDryRun) migrateStreams(ctx context.Context, sm *workflow.Strea dr.drLog.Log(fmt.Sprintf("Migrate streams to %s:", dr.ts.TargetKeyspaceName())) for key, streams := range sm.Streams() { for _, stream := range streams { - logs = append(logs, fmt.Sprintf("\tShard %s Id %d, Workflow %s, Pos %s, BinLogSource %v", key, stream.ID, stream.Workflow, mysql.EncodePosition(stream.Position), stream.BinlogSource)) + logs = append(logs, fmt.Sprintf("\tShard %s Id %d, Workflow %s, Pos %s, BinLogSource %v", key, stream.ID, stream.Workflow, replication.EncodePosition(stream.Position), stream.BinlogSource)) } } if len(logs) > 0 { @@ -178,7 +180,7 @@ func (dr *switcherDryRun) migrateStreams(ctx context.Context, sm *workflow.Strea tabletStreams := templates for _, vrs := range tabletStreams { logs = append(logs, fmt.Sprintf("\t Keyspace %s, Shard %s, Tablet %d, Workflow %s, Id %d, Pos %v, BinLogSource %s", - vrs.BinlogSource.Keyspace, vrs.BinlogSource.Shard, target.GetPrimary().Alias.Uid, vrs.Workflow, vrs.ID, mysql.EncodePosition(vrs.Position), vrs.BinlogSource)) + vrs.BinlogSource.Keyspace, vrs.BinlogSource.Shard, target.GetPrimary().Alias.Uid, vrs.Workflow, vrs.ID, replication.EncodePosition(vrs.Position), vrs.BinlogSource)) } } if len(logs) > 0 { @@ -327,6 +329,18 @@ func (dr *switcherDryRun) dropSourceDeniedTables(ctx context.Context) error { return nil } +func (dr *switcherDryRun) dropTargetDeniedTables(ctx context.Context) error { + logs := make([]string, 0) + for _, si := range dr.ts.TargetShards() { + logs = append(logs, fmt.Sprintf("\tKeyspace %s Shard %s Tablet %d", si.Keyspace(), si.ShardName(), si.PrimaryAlias.Uid)) + } + if len(logs) > 0 { + dr.drLog.Log(fmt.Sprintf("Denied tables [%s] will be removed from:", strings.Join(dr.ts.Tables(), ","))) + dr.drLog.LogSlice(logs) + } + return nil +} + func (dr *switcherDryRun) logs() *[]string { return &dr.drLog.logs } @@ -370,3 +384,24 @@ func (dr *switcherDryRun) dropTargetShards(ctx context.Context) error { return nil } + +func (dr *switcherDryRun) resetSequences(ctx context.Context) error { + var err error + mustReset := false + if mustReset, err = dr.ts.mustResetSequences(ctx); err != nil { + return err + } + if !mustReset { + return nil + } + dr.drLog.Log("The sequence caches will be reset on the source since sequence tables are being moved") + return nil +} + +func (dr *switcherDryRun) initializeTargetSequences(ctx context.Context, sequencesByBackingTable map[string]*sequenceMetadata) error { + sortedBackingTableNames := maps2.Keys(sequencesByBackingTable) + slices.Sort(sortedBackingTableNames) + dr.drLog.Log(fmt.Sprintf("The following sequence backing tables used by tables being moved will be initialized: %s", + strings.Join(sortedBackingTableNames, ","))) + return nil +} diff --git a/go/vt/wrangler/switcher_interface.go b/go/vt/wrangler/switcher_interface.go index 26bd5f53a63..bae165ec2ea 100644 --- a/go/vt/wrangler/switcher_interface.go +++ b/go/vt/wrangler/switcher_interface.go @@ -44,6 +44,7 @@ type iswitcher interface { removeSourceTables(ctx context.Context, removalType workflow.TableRemovalType) error dropSourceShards(ctx context.Context) error dropSourceDeniedTables(ctx context.Context) error + dropTargetDeniedTables(ctx context.Context) error freezeTargetVReplication(ctx context.Context) error dropSourceReverseVReplicationStreams(ctx context.Context) error dropTargetVReplicationStreams(ctx context.Context) error @@ -52,5 +53,7 @@ type iswitcher interface { deleteRoutingRules(ctx context.Context) error deleteShardRoutingRules(ctx context.Context) error addParticipatingTablesToKeyspace(ctx context.Context, keyspace, tableSpecs string) error + resetSequences(ctx context.Context) error + initializeTargetSequences(ctx context.Context, sequencesByBackingTable map[string]*sequenceMetadata) error logs() *[]string } diff --git a/go/vt/wrangler/tablet.go b/go/vt/wrangler/tablet.go index d52a1676e97..17c547ade58 100644 --- a/go/vt/wrangler/tablet.go +++ b/go/vt/wrangler/tablet.go @@ -21,8 +21,6 @@ import ( "fmt" "time" - "google.golang.org/protobuf/proto" - "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" @@ -105,7 +103,7 @@ func (wr *Wrangler) ChangeTabletType(ctx context.Context, tabletAlias *topodatap // We should clone the tablet and change its type to the expected type before checking the durability rules // Since we want to check the durability rules for the desired state and not before we make that change - expectedTablet := proto.Clone(ti.Tablet).(*topodatapb.Tablet) + expectedTablet := ti.Tablet.CloneVT() expectedTablet.Type = tabletType semiSync, err := wr.shouldSendSemiSyncAck(ctx, expectedTablet) if err != nil { diff --git a/go/vt/wrangler/tablet_test.go b/go/vt/wrangler/tablet_test.go index 8aa02864dd3..1350b6b574c 100644 --- a/go/vt/wrangler/tablet_test.go +++ b/go/vt/wrangler/tablet_test.go @@ -31,8 +31,11 @@ import ( // shard name to lower case when it's a keyrange, and populates // KeyRange properly. func TestInitTabletShardConversion(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cell := "cell1" - ts := memorytopo.NewServer(cell) + ts := memorytopo.NewServer(ctx, cell) wr := New(logutil.NewConsoleLogger(), ts, nil) tablet := &topodatapb.Tablet{ @@ -62,8 +65,11 @@ func TestInitTabletShardConversion(t *testing.T) { // TestDeleteTabletBasic tests delete of non-primary tablet func TestDeleteTabletBasic(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cell := "cell1" - ts := memorytopo.NewServer(cell) + ts := memorytopo.NewServer(ctx, cell) wr := New(logutil.NewConsoleLogger(), ts, nil) tablet := &topodatapb.Tablet{ @@ -91,8 +97,11 @@ func TestDeleteTabletBasic(t *testing.T) { // TestDeleteTabletTruePrimary tests that you can delete a true primary tablet // only if allowPrimary is set to true func TestDeleteTabletTruePrimary(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cell := "cell1" - ts := memorytopo.NewServer(cell) + ts := memorytopo.NewServer(ctx, cell) wr := New(logutil.NewConsoleLogger(), ts, nil) tablet := &topodatapb.Tablet{ @@ -135,8 +144,11 @@ func TestDeleteTabletTruePrimary(t *testing.T) { // TestDeleteTabletFalsePrimary tests that you can delete a false primary tablet // with allowPrimary set to false func TestDeleteTabletFalsePrimary(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cell := "cell1" - ts := memorytopo.NewServer(cell) + ts := memorytopo.NewServer(ctx, cell) wr := New(logutil.NewConsoleLogger(), ts, nil) tablet1 := &topodatapb.Tablet{ @@ -184,8 +196,11 @@ func TestDeleteTabletFalsePrimary(t *testing.T) { // TestDeleteTabletShardNonExisting tests that you can delete a true primary // tablet if a shard does not exists anymore. func TestDeleteTabletShardNonExisting(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cell := "cell1" - ts := memorytopo.NewServer(cell) + ts := memorytopo.NewServer(ctx, cell) wr := New(logutil.NewConsoleLogger(), ts, nil) tablet := &topodatapb.Tablet{ diff --git a/go/vt/wrangler/testlib/apply_schema_flaky_test.go b/go/vt/wrangler/testlib/apply_schema_flaky_test.go deleted file mode 100644 index acf429a658d..00000000000 --- a/go/vt/wrangler/testlib/apply_schema_flaky_test.go +++ /dev/null @@ -1,125 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package testlib - -import ( - "context" - "strings" - "testing" - "time" - - "vitess.io/vitess/go/mysql/fakesqldb" - "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/discovery" - "vitess.io/vitess/go/vt/logutil" - "vitess.io/vitess/go/vt/mysqlctl/tmutils" - "vitess.io/vitess/go/vt/topo/memorytopo" - "vitess.io/vitess/go/vt/vttablet/tmclient" - "vitess.io/vitess/go/vt/wrangler" - - tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" -) - -// TestApplySchema_AllowLongUnavailability is an integration test for the -// --allow_long_unavailability flag of vtctl ApplySchema. -// Only if the flag is specified, potentially long running schema changes are -// allowed. -func TestApplySchema_AllowLongUnavailability(t *testing.T) { - delay := discovery.GetTabletPickerRetryDelay() - defer func() { - discovery.SetTabletPickerRetryDelay(delay) - }() - discovery.SetTabletPickerRetryDelay(5 * time.Millisecond) - - cell := "cell1" - db := fakesqldb.New(t) - defer db.Close() - ts := memorytopo.NewServer(cell) - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) - vp := NewVtctlPipe(t, ts) - defer vp.Close() - - if err := ts.CreateKeyspace(context.Background(), "ks", &topodatapb.Keyspace{}); err != nil { - t.Fatalf("CreateKeyspace failed: %v", err) - } - - beforeSchema := &tabletmanagerdatapb.SchemaDefinition{ - DatabaseSchema: "CREATE DATABASE `{{.DatabaseName}}` /*!40100 DEFAULT CHARACTER SET utf8 */", - TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ - { - Name: "table1", - Schema: "CREATE TABLE `table1` (\n `id` bigint(20) NOT NULL AUTO_INCREMENT,\n `msg` varchar(64) DEFAULT NULL,\n `keyspace_id` bigint(20) unsigned NOT NULL,\n PRIMARY KEY (`id`),\n KEY `by_msg` (`msg`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8", - Type: tmutils.TableBaseTable, - RowCount: 3000000, - }, - }, - } - afterSchema := &tabletmanagerdatapb.SchemaDefinition{ - DatabaseSchema: "CREATE DATABASE `{{.DatabaseName}}` /*!40100 DEFAULT CHARACTER SET utf8 */", - TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ - { - Name: "table1", - Schema: "CREATE TABLE `table1` (\n `id` bigint(20) NOT NULL AUTO_INCREMENT,\n `msg` varchar(64) DEFAULT NULL,\n `keyspace_id` bigint(20) unsigned NOT NULL,\n `id` bigint(20),\n PRIMARY KEY (`id`),\n KEY `by_msg` (`msg`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8", - Type: tmutils.TableBaseTable, - RowCount: 3000000, - }, - }, - } - preflightSchemaChanges := []*tabletmanagerdatapb.SchemaChangeResult{ - { - BeforeSchema: beforeSchema, - AfterSchema: afterSchema, - }, - } - - tShard1 := NewFakeTablet(t, wr, cell, 0, - topodatapb.TabletType_PRIMARY, db, TabletKeyspaceShard(t, "ks", "-80")) - tShard2 := NewFakeTablet(t, wr, cell, 1, - topodatapb.TabletType_PRIMARY, db, TabletKeyspaceShard(t, "ks", "80-")) - for _, ft := range []*FakeTablet{tShard1, tShard2} { - ft.StartActionLoop(t, wr) - defer ft.StopActionLoop(t) - - ft.FakeMysqlDaemon.Schema = beforeSchema - ft.FakeMysqlDaemon.PreflightSchemaChangeResult = preflightSchemaChanges - } - - changeToDb := "USE `ks`" - addColumn := "ALTER TABLE table1 ADD COLUMN new_id bigint(20)" - db.AddQuery(changeToDb, &sqltypes.Result{}) - db.AddQuery(addColumn, &sqltypes.Result{}) - - // First ApplySchema fails because the table is very big and -allow_long_unavailability is missing. - if err := vp.Run([]string{"ApplySchema", "--sql", addColumn, "ks"}); err == nil { - t.Fatal("ApplySchema should have failed but did not.") - } else if !strings.Contains(err.Error(), "big schema change detected") || - !strings.Contains(strings.ToLower(err.Error()), "alter table table1") { - t.Fatalf("ApplySchema failed with wrong error. got: %v", err) - } - - // Second ApplySchema succeeds because -allow_long_unavailability is set. - if err := vp.Run([]string{"ApplySchema", "--allow_long_unavailability", "--sql", addColumn, "ks"}); err != nil { - t.Fatalf("ApplySchema failed: %v", err) - } - if count := db.GetQueryCalledNum(changeToDb); count != 2 { - t.Fatalf("ApplySchema: unexpected call count. Query: %v got: %v want: %v", changeToDb, count, 2) - } - if count := db.GetQueryCalledNum(addColumn); count != 2 { - t.Fatalf("ApplySchema: unexpected call count. Query: %v got: %v want: %v", addColumn, count, 2) - } -} diff --git a/go/vt/wrangler/testlib/backup_test.go b/go/vt/wrangler/testlib/backup_test.go index 134c206df64..494fe56589b 100644 --- a/go/vt/wrangler/testlib/backup_test.go +++ b/go/vt/wrangler/testlib/backup_test.go @@ -27,6 +27,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/mysql" @@ -83,10 +85,11 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error { discovery.SetTabletPickerRetryDelay(5 * time.Millisecond) // Initialize our environment - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() db := fakesqldb.New(t) defer db.Close() - ts := memorytopo.NewServer("cell1", "cell2") + ts := memorytopo.NewServer(ctx, "cell1", "cell2") wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -145,9 +148,9 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error { primary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, db) primary.FakeMysqlDaemon.ReadOnly = false primary.FakeMysqlDaemon.Replicating = false - primary.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 2: mysql.MariadbGTID{ + primary.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 2: replication.MariadbGTID{ Domain: 2, Server: 123, Sequence: 457, @@ -165,9 +168,9 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error { sourceTablet.FakeMysqlDaemon.ReadOnly = true sourceTablet.FakeMysqlDaemon.Replicating = true sourceTablet.FakeMysqlDaemon.SetReplicationSourceInputs = []string{fmt.Sprintf("%s:%d", primary.Tablet.MysqlHostname, primary.Tablet.MysqlPort)} - sourceTablet.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 2: mysql.MariadbGTID{ + sourceTablet.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 2: replication.MariadbGTID{ Domain: 2, Server: 123, Sequence: 457, @@ -185,9 +188,9 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error { "STOP SLAVE", "START SLAVE", // These commands come from SetReplicationSource RPC called - // to set the correct primary and semi-sync after Backup has concluded + // to set the correct primary and semi-sync after Backup has concluded. + // Since the primary hasn't changed, we only restart replication after fixing semi-sync. "STOP SLAVE", - "FAKE SET MASTER", "START SLAVE", } sourceTablet.FakeMysqlDaemon.FetchSuperQueryMap = map[string]*sqltypes.Result{ @@ -218,9 +221,9 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error { destTablet := NewFakeTablet(t, wr, "cell1", 2, topodatapb.TabletType_REPLICA, db) destTablet.FakeMysqlDaemon.ReadOnly = true destTablet.FakeMysqlDaemon.Replicating = true - destTablet.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 2: mysql.MariadbGTID{ + destTablet.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 2: replication.MariadbGTID{ Domain: 2, Server: 123, Sequence: 457, @@ -260,7 +263,7 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error { RelayLogInfoPath: path.Join(root, "relay-log.info"), } - err = destTablet.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* backupTime */) + err = destTablet.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* backupTime */, time.Time{} /* restoreToTimestamp */, "") if err != nil { return err } @@ -299,7 +302,7 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error { primary.FakeMysqlDaemon.SetReplicationPositionPos = primary.FakeMysqlDaemon.CurrentPrimaryPosition // restore primary from latest backup - require.NoError(t, primary.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* restoreFromBackupTs */), + require.NoError(t, primary.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* restoreFromBackupTs */, time.Time{} /* restoreToTimestamp */, ""), "RestoreData failed") // tablet was created as PRIMARY, so it's baseTabletType is PRIMARY assert.Equal(t, topodatapb.TabletType_PRIMARY, primary.Tablet.Type) @@ -315,7 +318,7 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error { } // Test restore with the backup timestamp - require.NoError(t, primary.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, backupTime), + require.NoError(t, primary.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, backupTime, time.Time{} /* restoreToTimestamp */, ""), "RestoreData with backup timestamp failed") assert.Equal(t, topodatapb.TabletType_PRIMARY, primary.Tablet.Type) assert.False(t, primary.FakeMysqlDaemon.Replicating) @@ -334,10 +337,11 @@ func TestBackupRestoreLagged(t *testing.T) { discovery.SetTabletPickerRetryDelay(5 * time.Millisecond) // Initialize our environment - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() db := fakesqldb.New(t) defer db.Close() - ts := memorytopo.NewServer("cell1", "cell2") + ts := memorytopo.NewServer(ctx, "cell1", "cell2") wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -382,9 +386,9 @@ func TestBackupRestoreLagged(t *testing.T) { primary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, db) primary.FakeMysqlDaemon.ReadOnly = false primary.FakeMysqlDaemon.Replicating = false - primary.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 2: mysql.MariadbGTID{ + primary.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 2: replication.MariadbGTID{ Domain: 2, Server: 123, Sequence: 457, @@ -401,9 +405,9 @@ func TestBackupRestoreLagged(t *testing.T) { sourceTablet := NewFakeTablet(t, wr, "cell1", 1, topodatapb.TabletType_REPLICA, db) sourceTablet.FakeMysqlDaemon.ReadOnly = true sourceTablet.FakeMysqlDaemon.Replicating = true - sourceTablet.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 2: mysql.MariadbGTID{ + sourceTablet.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 2: replication.MariadbGTID{ Domain: 2, Server: 123, Sequence: 456, @@ -422,9 +426,9 @@ func TestBackupRestoreLagged(t *testing.T) { "STOP SLAVE", "START SLAVE", // These commands come from SetReplicationSource RPC called - // to set the correct primary and semi-sync after Backup has concluded + // to set the correct primary and semi-sync after Backup has concluded. + // Since the primary hasn't changed, we only restart replication after fixing semi-sync. "STOP SLAVE", - "FAKE SET MASTER", "START SLAVE", } sourceTablet.StartActionLoop(t, wr) @@ -443,9 +447,9 @@ func TestBackupRestoreLagged(t *testing.T) { timer := time.NewTicker(1 * time.Second) <-timer.C - sourceTablet.FakeMysqlDaemon.CurrentPrimaryPositionLocked(mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 2: mysql.MariadbGTID{ + sourceTablet.FakeMysqlDaemon.CurrentPrimaryPositionLocked(replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 2: replication.MariadbGTID{ Domain: 2, Server: 123, Sequence: 457, @@ -471,9 +475,9 @@ func TestBackupRestoreLagged(t *testing.T) { destTablet := NewFakeTablet(t, wr, "cell1", 2, topodatapb.TabletType_REPLICA, db) destTablet.FakeMysqlDaemon.ReadOnly = true destTablet.FakeMysqlDaemon.Replicating = true - destTablet.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 2: mysql.MariadbGTID{ + destTablet.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 2: replication.MariadbGTID{ Domain: 2, Server: 123, Sequence: 456, @@ -515,14 +519,14 @@ func TestBackupRestoreLagged(t *testing.T) { errCh = make(chan error, 1) go func(ctx context.Context, tablet *FakeTablet) { - errCh <- tablet.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* restoreFromBackupTs */) + errCh <- tablet.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* restoreFromBackupTs */, time.Time{} /* restoreToTimestamp */, "") }(ctx, destTablet) timer = time.NewTicker(1 * time.Second) <-timer.C - destTablet.FakeMysqlDaemon.CurrentPrimaryPositionLocked(mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 2: mysql.MariadbGTID{ + destTablet.FakeMysqlDaemon.CurrentPrimaryPositionLocked(replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 2: replication.MariadbGTID{ Domain: 2, Server: 123, Sequence: 457, @@ -552,10 +556,11 @@ func TestRestoreUnreachablePrimary(t *testing.T) { discovery.SetTabletPickerRetryDelay(5 * time.Millisecond) // Initialize our environment - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() db := fakesqldb.New(t) defer db.Close() - ts := memorytopo.NewServer("cell1") + ts := memorytopo.NewServer(ctx, "cell1") wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -600,9 +605,9 @@ func TestRestoreUnreachablePrimary(t *testing.T) { primary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, db) primary.FakeMysqlDaemon.ReadOnly = false primary.FakeMysqlDaemon.Replicating = false - primary.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 2: mysql.MariadbGTID{ + primary.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 2: replication.MariadbGTID{ Domain: 2, Server: 123, Sequence: 457, @@ -618,9 +623,9 @@ func TestRestoreUnreachablePrimary(t *testing.T) { sourceTablet := NewFakeTablet(t, wr, "cell1", 1, topodatapb.TabletType_REPLICA, db) sourceTablet.FakeMysqlDaemon.ReadOnly = true sourceTablet.FakeMysqlDaemon.Replicating = true - sourceTablet.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 2: mysql.MariadbGTID{ + sourceTablet.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 2: replication.MariadbGTID{ Domain: 2, Server: 123, Sequence: 457, @@ -639,9 +644,9 @@ func TestRestoreUnreachablePrimary(t *testing.T) { "STOP SLAVE", "START SLAVE", // These commands come from SetReplicationSource RPC called - // to set the correct primary and semi-sync after Backup has concluded + // to set the correct primary and semi-sync after Backup has concluded. + // Since the primary hasn't changed, we only restart replication after fixing semi-sync. "STOP SLAVE", - "FAKE SET MASTER", "START SLAVE", } sourceTablet.StartActionLoop(t, wr) @@ -660,9 +665,9 @@ func TestRestoreUnreachablePrimary(t *testing.T) { destTablet := NewFakeTablet(t, wr, "cell1", 2, topodatapb.TabletType_REPLICA, db) destTablet.FakeMysqlDaemon.ReadOnly = true destTablet.FakeMysqlDaemon.Replicating = true - destTablet.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 2: mysql.MariadbGTID{ + destTablet.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 2: replication.MariadbGTID{ Domain: 2, Server: 123, Sequence: 457, @@ -708,7 +713,7 @@ func TestRestoreUnreachablePrimary(t *testing.T) { // set a short timeout so that we don't have to wait 30 seconds topo.RemoteOperationTimeout = 2 * time.Second // Restore should still succeed - require.NoError(t, destTablet.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* restoreFromBackupTs */)) + require.NoError(t, destTablet.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* restoreFromBackupTs */, time.Time{} /* restoreToTimestamp */, "")) // verify the full status require.NoError(t, destTablet.FakeMysqlDaemon.CheckSuperQueryList(), "destTablet.FakeMysqlDaemon.CheckSuperQueryList failed") assert.True(t, destTablet.FakeMysqlDaemon.Replicating) @@ -726,10 +731,11 @@ func TestDisableActiveReparents(t *testing.T) { discovery.SetTabletPickerRetryDelay(5 * time.Millisecond) // Initialize our environment - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() db := fakesqldb.New(t) defer db.Close() - ts := memorytopo.NewServer("cell1", "cell2") + ts := memorytopo.NewServer(ctx, "cell1", "cell2") wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -774,9 +780,9 @@ func TestDisableActiveReparents(t *testing.T) { primary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, db) primary.FakeMysqlDaemon.ReadOnly = false primary.FakeMysqlDaemon.Replicating = false - primary.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 2: mysql.MariadbGTID{ + primary.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 2: replication.MariadbGTID{ Domain: 2, Server: 123, Sequence: 457, @@ -793,9 +799,9 @@ func TestDisableActiveReparents(t *testing.T) { sourceTablet := NewFakeTablet(t, wr, "cell1", 1, topodatapb.TabletType_REPLICA, db) sourceTablet.FakeMysqlDaemon.ReadOnly = true sourceTablet.FakeMysqlDaemon.Replicating = true - sourceTablet.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 2: mysql.MariadbGTID{ + sourceTablet.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 2: replication.MariadbGTID{ Domain: 2, Server: 123, Sequence: 457, @@ -826,9 +832,9 @@ func TestDisableActiveReparents(t *testing.T) { destTablet := NewFakeTablet(t, wr, "cell1", 2, topodatapb.TabletType_REPLICA, db) destTablet.FakeMysqlDaemon.ReadOnly = true destTablet.FakeMysqlDaemon.Replicating = true - destTablet.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 2: mysql.MariadbGTID{ + destTablet.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 2: replication.MariadbGTID{ Domain: 2, Server: 123, Sequence: 457, @@ -861,7 +867,7 @@ func TestDisableActiveReparents(t *testing.T) { RelayLogInfoPath: path.Join(root, "relay-log.info"), } - require.NoError(t, destTablet.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* restoreFromBackupTs */)) + require.NoError(t, destTablet.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* restoreFromBackupTs */, time.Time{} /* restoreToTimestamp */, "")) // verify the full status require.NoError(t, destTablet.FakeMysqlDaemon.CheckSuperQueryList(), "destTablet.FakeMysqlDaemon.CheckSuperQueryList failed") assert.False(t, destTablet.FakeMysqlDaemon.Replicating) diff --git a/go/vt/wrangler/testlib/copy_schema_shard_test.go b/go/vt/wrangler/testlib/copy_schema_shard_test.go index cb2b34e3fa3..70f55f14bd0 100644 --- a/go/vt/wrangler/testlib/copy_schema_shard_test.go +++ b/go/vt/wrangler/testlib/copy_schema_shard_test.go @@ -53,7 +53,9 @@ func copySchema(t *testing.T, useShardAsSource bool) { }() discovery.SetTabletPickerRetryDelay(5 * time.Millisecond) - ts := memorytopo.NewServer("cell1", "cell2") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1", "cell2") wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) vp := NewVtctlPipe(t, ts) defer vp.Close() diff --git a/go/vt/wrangler/testlib/emergency_reparent_shard_test.go b/go/vt/wrangler/testlib/emergency_reparent_shard_test.go index 859cd606fdb..99cc1839186 100644 --- a/go/vt/wrangler/testlib/emergency_reparent_shard_test.go +++ b/go/vt/wrangler/testlib/emergency_reparent_shard_test.go @@ -25,6 +25,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sets" "vitess.io/vitess/go/vt/discovery" @@ -45,7 +47,9 @@ func TestEmergencyReparentShard(t *testing.T) { }() discovery.SetTabletPickerRetryDelay(5 * time.Millisecond) - ts := memorytopo.NewServer("cell1", "cell2") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1", "cell2") wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -58,34 +62,34 @@ func TestEmergencyReparentShard(t *testing.T) { reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "test_keyspace", "semi_sync") oldPrimary.FakeMysqlDaemon.Replicating = false - oldPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 2: mysql.MariadbGTID{ + oldPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 2: replication.MariadbGTID{ Domain: 2, Server: 123, Sequence: 456, }, }, } - currentPrimaryFilePosition, _ := mysql.ParseFilePosGTIDSet("mariadb-bin.000010:456") - oldPrimary.FakeMysqlDaemon.CurrentSourceFilePosition = mysql.Position{ + currentPrimaryFilePosition, _ := replication.ParseFilePosGTIDSet("mariadb-bin.000010:456") + oldPrimary.FakeMysqlDaemon.CurrentSourceFilePosition = replication.Position{ GTIDSet: currentPrimaryFilePosition, } // new primary newPrimary.FakeMysqlDaemon.ReadOnly = true newPrimary.FakeMysqlDaemon.Replicating = true - newPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 2: mysql.MariadbGTID{ + newPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 2: replication.MariadbGTID{ Domain: 2, Server: 123, Sequence: 456, }, }, } - newPrimaryRelayLogPos, _ := mysql.ParseFilePosGTIDSet("relay-bin.000004:456") - newPrimary.FakeMysqlDaemon.CurrentSourceFilePosition = mysql.Position{ + newPrimaryRelayLogPos, _ := replication.ParseFilePosGTIDSet("relay-bin.000004:456") + newPrimary.FakeMysqlDaemon.CurrentSourceFilePosition = replication.Position{ GTIDSet: newPrimaryRelayLogPos, } newPrimary.FakeMysqlDaemon.WaitPrimaryPositions = append(newPrimary.FakeMysqlDaemon.WaitPrimaryPositions, newPrimary.FakeMysqlDaemon.CurrentSourceFilePosition) @@ -93,9 +97,9 @@ func TestEmergencyReparentShard(t *testing.T) { "STOP SLAVE IO_THREAD", "SUBINSERT INTO _vt.reparent_journal (time_created_ns, action_name, primary_alias, replication_position) VALUES", } - newPrimary.FakeMysqlDaemon.PromoteResult = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 2: mysql.MariadbGTID{ + newPrimary.FakeMysqlDaemon.PromoteResult = replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 2: replication.MariadbGTID{ Domain: 2, Server: 123, Sequence: 456, @@ -118,17 +122,17 @@ func TestEmergencyReparentShard(t *testing.T) { // good replica 1 is replicating goodReplica1.FakeMysqlDaemon.ReadOnly = true goodReplica1.FakeMysqlDaemon.Replicating = true - goodReplica1.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 2: mysql.MariadbGTID{ + goodReplica1.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 2: replication.MariadbGTID{ Domain: 2, Server: 123, Sequence: 455, }, }, } - goodReplica1RelayLogPos, _ := mysql.ParseFilePosGTIDSet("relay-bin.000004:455") - goodReplica1.FakeMysqlDaemon.CurrentSourceFilePosition = mysql.Position{ + goodReplica1RelayLogPos, _ := replication.ParseFilePosGTIDSet("relay-bin.000004:455") + goodReplica1.FakeMysqlDaemon.CurrentSourceFilePosition = replication.Position{ GTIDSet: goodReplica1RelayLogPos, } goodReplica1.FakeMysqlDaemon.WaitPrimaryPositions = append(goodReplica1.FakeMysqlDaemon.WaitPrimaryPositions, goodReplica1.FakeMysqlDaemon.CurrentSourceFilePosition) @@ -149,17 +153,17 @@ func TestEmergencyReparentShard(t *testing.T) { // good replica 2 is not replicating goodReplica2.FakeMysqlDaemon.ReadOnly = true goodReplica2.FakeMysqlDaemon.Replicating = false - goodReplica2.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 2: mysql.MariadbGTID{ + goodReplica2.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 2: replication.MariadbGTID{ Domain: 2, Server: 123, Sequence: 454, }, }, } - goodReplica2RelayLogPos, _ := mysql.ParseFilePosGTIDSet("relay-bin.000004:454") - goodReplica2.FakeMysqlDaemon.CurrentSourceFilePosition = mysql.Position{ + goodReplica2RelayLogPos, _ := replication.ParseFilePosGTIDSet("relay-bin.000004:454") + goodReplica2.FakeMysqlDaemon.CurrentSourceFilePosition = replication.Position{ GTIDSet: goodReplica2RelayLogPos, } goodReplica2.FakeMysqlDaemon.WaitPrimaryPositions = append(goodReplica2.FakeMysqlDaemon.WaitPrimaryPositions, goodReplica2.FakeMysqlDaemon.CurrentSourceFilePosition) @@ -199,7 +203,7 @@ func TestEmergencyReparentShardPrimaryElectNotBest(t *testing.T) { }() discovery.SetTabletPickerRetryDelay(5 * time.Millisecond) - ts := memorytopo.NewServer("cell1", "cell2") + ts := memorytopo.NewServer(ctx, "cell1", "cell2") wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) // Create a primary, a couple good replicas @@ -212,17 +216,17 @@ func TestEmergencyReparentShardPrimaryElectNotBest(t *testing.T) { newPrimary.FakeMysqlDaemon.Replicating = true // It has transactions in its relay log, but not as many as // moreAdvancedReplica - newPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 2: mysql.MariadbGTID{ + newPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 2: replication.MariadbGTID{ Domain: 2, Server: 123, Sequence: 456, }, }, } - newPrimaryRelayLogPos, _ := mysql.ParseFilePosGTIDSet("relay-bin.000004:456") - newPrimary.FakeMysqlDaemon.CurrentSourceFilePosition = mysql.Position{ + newPrimaryRelayLogPos, _ := replication.ParseFilePosGTIDSet("relay-bin.000004:456") + newPrimary.FakeMysqlDaemon.CurrentSourceFilePosition = replication.Position{ GTIDSet: newPrimaryRelayLogPos, } newPrimary.FakeMysqlDaemon.WaitPrimaryPositions = append(newPrimary.FakeMysqlDaemon.WaitPrimaryPositions, newPrimary.FakeMysqlDaemon.CurrentSourceFilePosition) @@ -245,17 +249,17 @@ func TestEmergencyReparentShardPrimaryElectNotBest(t *testing.T) { // more advanced replica moreAdvancedReplica.FakeMysqlDaemon.Replicating = true // relay log position is more advanced than desired new primary - moreAdvancedReplica.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 2: mysql.MariadbGTID{ + moreAdvancedReplica.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 2: replication.MariadbGTID{ Domain: 2, Server: 123, Sequence: 457, }, }, } - moreAdvancedReplicaLogPos, _ := mysql.ParseFilePosGTIDSet("relay-bin.000004:457") - moreAdvancedReplica.FakeMysqlDaemon.CurrentSourceFilePosition = mysql.Position{ + moreAdvancedReplicaLogPos, _ := replication.ParseFilePosGTIDSet("relay-bin.000004:457") + moreAdvancedReplica.FakeMysqlDaemon.CurrentSourceFilePosition = replication.Position{ GTIDSet: moreAdvancedReplicaLogPos, } moreAdvancedReplica.FakeMysqlDaemon.SetReplicationSourceInputs = append(moreAdvancedReplica.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet), topoproto.MysqlAddr(oldPrimary.Tablet)) @@ -275,7 +279,7 @@ func TestEmergencyReparentShardPrimaryElectNotBest(t *testing.T) { defer moreAdvancedReplica.StopActionLoop(t) // run EmergencyReparentShard - err := wr.EmergencyReparentShard(ctx, newPrimary.Tablet.Keyspace, newPrimary.Tablet.Shard, newPrimary.Tablet.Alias, 10*time.Second, sets.New[string](), false) + err := wr.EmergencyReparentShard(ctx, newPrimary.Tablet.Keyspace, newPrimary.Tablet.Shard, newPrimary.Tablet.Alias, 10*time.Second, sets.New[string](), false, false) cancel() assert.NoError(t, err) diff --git a/go/vt/wrangler/testlib/external_reparent_test.go b/go/vt/wrangler/testlib/external_reparent_test.go index c105d1dfd94..c0152de3cf3 100644 --- a/go/vt/wrangler/testlib/external_reparent_test.go +++ b/go/vt/wrangler/testlib/external_reparent_test.go @@ -47,8 +47,9 @@ func TestTabletExternallyReparentedBasic(t *testing.T) { }() discovery.SetTabletPickerRetryDelay(5 * time.Millisecond) - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -139,8 +140,9 @@ func TestTabletExternallyReparentedToReplica(t *testing.T) { }() discovery.SetTabletPickerRetryDelay(5 * time.Millisecond) - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) // Create an old primary, a new primary, two good replicas, one bad replica @@ -221,8 +223,9 @@ func TestTabletExternallyReparentedWithDifferentMysqlPort(t *testing.T) { }() discovery.SetTabletPickerRetryDelay(5 * time.Millisecond) - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) // Create an old primary, a new primary, two good replicas, one bad replica @@ -313,8 +316,9 @@ func TestTabletExternallyReparentedContinueOnUnexpectedPrimary(t *testing.T) { }() discovery.SetTabletPickerRetryDelay(5 * time.Millisecond) - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) // Create an old primary, a new primary, two good replicas, one bad replica @@ -398,8 +402,9 @@ func TestTabletExternallyReparentedRerun(t *testing.T) { }() discovery.SetTabletPickerRetryDelay(5 * time.Millisecond) - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) // Create an old primary, a new primary, and a good replica. @@ -501,8 +506,9 @@ func TestRPCTabletExternallyReparentedDemotesPrimaryToConfiguredTabletType(t *te flag.Set("disable_active_reparents", "true") defer flag.Set("disable_active_reparents", "false") - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) // Create an old primary and a new primary diff --git a/go/vt/wrangler/testlib/fake_tablet.go b/go/vt/wrangler/testlib/fake_tablet.go index 7d840e13c16..a1b30813f53 100644 --- a/go/vt/wrangler/testlib/fake_tablet.go +++ b/go/vt/wrangler/testlib/fake_tablet.go @@ -27,11 +27,9 @@ import ( "testing" "time" - "github.com/stretchr/testify/require" "google.golang.org/grpc" "vitess.io/vitess/go/mysql/fakesqldb" - "vitess.io/vitess/go/netutil" "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/mysqlctl" @@ -132,12 +130,10 @@ func NewFakeTablet(t *testing.T, wr *wrangler.Wrangler, cell string, uid uint32, t.Fatalf("uid has to be between 0 and 99: %v", uid) } mysqlPort := int32(3300 + uid) - hostname, err := netutil.FullyQualifiedHostname() - require.NoError(t, err) tablet := &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{Cell: cell, Uid: uid}, - Hostname: hostname, - MysqlHostname: hostname, + Hostname: "127.0.0.1", + MysqlHostname: "127.0.0.1", PortMap: map[string]int32{ "vt": int32(8100 + uid), "grpc": int32(8200 + uid), @@ -180,7 +176,7 @@ func (ft *FakeTablet) StartActionLoop(t *testing.T, wr *wrangler.Wrangler) { // Listen on a random port for gRPC. var err error - ft.Listener, err = net.Listen("tcp", ":0") + ft.Listener, err = net.Listen("tcp", "127.0.0.1:0") if err != nil { t.Fatalf("Cannot listen: %v", err) } @@ -189,7 +185,7 @@ func (ft *FakeTablet) StartActionLoop(t *testing.T, wr *wrangler.Wrangler) { // If needed, listen on a random port for HTTP. vtPort := ft.Tablet.PortMap["vt"] if ft.StartHTTPServer { - ft.HTTPListener, err = net.Listen("tcp", ":0") + ft.HTTPListener, err = net.Listen("tcp", "127.0.0.1:0") if err != nil { t.Fatalf("Cannot listen on http port: %v", err) } @@ -202,6 +198,7 @@ func (ft *FakeTablet) StartActionLoop(t *testing.T, wr *wrangler.Wrangler) { } ft.Tablet.PortMap["vt"] = vtPort ft.Tablet.PortMap["grpc"] = gRPCPort + ft.Tablet.Hostname = "127.0.0.1" // Create a test tm on that port, and re-read the record // (it has new ports and IP). diff --git a/go/vt/wrangler/testlib/find_tablet_test.go b/go/vt/wrangler/testlib/find_tablet_test.go index 783e6258751..5b6f26f7056 100644 --- a/go/vt/wrangler/testlib/find_tablet_test.go +++ b/go/vt/wrangler/testlib/find_tablet_test.go @@ -17,9 +17,9 @@ limitations under the License. package testlib import ( - "testing" - "context" + "testing" + "time" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/topo" @@ -33,8 +33,9 @@ import ( ) func TestFindTablet(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1", "cell2") + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1", "cell2") wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) // Create an old primary, two good replicas diff --git a/go/vt/wrangler/testlib/permissions_test.go b/go/vt/wrangler/testlib/permissions_test.go index 9d2a950c8e3..4a0e71512f3 100644 --- a/go/vt/wrangler/testlib/permissions_test.go +++ b/go/vt/wrangler/testlib/permissions_test.go @@ -44,8 +44,9 @@ func TestPermissions(t *testing.T) { discovery.SetTabletPickerRetryDelay(5 * time.Millisecond) // Initialize our environment - ctx := context.Background() - ts := memorytopo.NewServer("cell1", "cell2") + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1", "cell2") wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) vp := NewVtctlPipe(t, ts) defer vp.Close() diff --git a/go/vt/wrangler/testlib/planned_reparent_shard_test.go b/go/vt/wrangler/testlib/planned_reparent_shard_test.go index 4f626cca29f..72ce30a96fe 100644 --- a/go/vt/wrangler/testlib/planned_reparent_shard_test.go +++ b/go/vt/wrangler/testlib/planned_reparent_shard_test.go @@ -22,6 +22,7 @@ import ( "testing" "time" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/mysqlctl" "github.com/stretchr/testify/assert" @@ -47,7 +48,9 @@ func TestPlannedReparentShardNoPrimaryProvided(t *testing.T) { }() discovery.SetTabletPickerRetryDelay(5 * time.Millisecond) - ts := memorytopo.NewServer("cell1", "cell2") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1", "cell2") wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -61,18 +64,18 @@ func TestPlannedReparentShardNoPrimaryProvided(t *testing.T) { // new primary newPrimary.FakeMysqlDaemon.ReadOnly = true newPrimary.FakeMysqlDaemon.Replicating = true - newPrimary.FakeMysqlDaemon.WaitPrimaryPositions = []mysql.Position{{ - GTIDSet: mysql.MariadbGTIDSet{ - 7: mysql.MariadbGTID{ + newPrimary.FakeMysqlDaemon.WaitPrimaryPositions = []replication.Position{{ + GTIDSet: replication.MariadbGTIDSet{ + 7: replication.MariadbGTID{ Domain: 7, Server: 123, Sequence: 990, }, }, }} - newPrimary.FakeMysqlDaemon.PromoteResult = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 7: mysql.MariadbGTID{ + newPrimary.FakeMysqlDaemon.PromoteResult = replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 7: replication.MariadbGTID{ Domain: 7, Server: 456, Sequence: 991, @@ -161,7 +164,9 @@ func TestPlannedReparentShardNoError(t *testing.T) { }() discovery.SetTabletPickerRetryDelay(5 * time.Millisecond) - ts := memorytopo.NewServer("cell1", "cell2") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1", "cell2") wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -176,18 +181,18 @@ func TestPlannedReparentShardNoError(t *testing.T) { // new primary newPrimary.FakeMysqlDaemon.ReadOnly = true newPrimary.FakeMysqlDaemon.Replicating = true - newPrimary.FakeMysqlDaemon.WaitPrimaryPositions = []mysql.Position{{ - GTIDSet: mysql.MariadbGTIDSet{ - 7: mysql.MariadbGTID{ + newPrimary.FakeMysqlDaemon.WaitPrimaryPositions = []replication.Position{{ + GTIDSet: replication.MariadbGTIDSet{ + 7: replication.MariadbGTID{ Domain: 7, Server: 123, Sequence: 990, }, }, }} - newPrimary.FakeMysqlDaemon.PromoteResult = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 7: mysql.MariadbGTID{ + newPrimary.FakeMysqlDaemon.PromoteResult = replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 7: replication.MariadbGTID{ Domain: 7, Server: 456, Sequence: 991, @@ -295,7 +300,9 @@ func TestPlannedReparentInitialization(t *testing.T) { }() discovery.SetTabletPickerRetryDelay(5 * time.Millisecond) - ts := memorytopo.NewServer("cell1", "cell2") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1", "cell2") wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -309,9 +316,9 @@ func TestPlannedReparentInitialization(t *testing.T) { // new primary newPrimary.FakeMysqlDaemon.ReadOnly = true newPrimary.FakeMysqlDaemon.Replicating = true - newPrimary.FakeMysqlDaemon.PromoteResult = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 7: mysql.MariadbGTID{ + newPrimary.FakeMysqlDaemon.PromoteResult = replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 7: replication.MariadbGTID{ Domain: 7, Server: 456, Sequence: 991, @@ -379,7 +386,9 @@ func TestPlannedReparentShardWaitForPositionFail(t *testing.T) { }() discovery.SetTabletPickerRetryDelay(5 * time.Millisecond) - ts := memorytopo.NewServer("cell1", "cell2") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1", "cell2") wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -393,18 +402,18 @@ func TestPlannedReparentShardWaitForPositionFail(t *testing.T) { // new primary newPrimary.FakeMysqlDaemon.ReadOnly = true newPrimary.FakeMysqlDaemon.Replicating = true - newPrimary.FakeMysqlDaemon.WaitPrimaryPositions = []mysql.Position{{ - GTIDSet: mysql.MariadbGTIDSet{ - 7: mysql.MariadbGTID{ + newPrimary.FakeMysqlDaemon.WaitPrimaryPositions = []replication.Position{{ + GTIDSet: replication.MariadbGTIDSet{ + 7: replication.MariadbGTID{ Domain: 7, Server: 123, Sequence: 990, }, }, }} - newPrimary.FakeMysqlDaemon.PromoteResult = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 7: mysql.MariadbGTID{ + newPrimary.FakeMysqlDaemon.PromoteResult = replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 7: replication.MariadbGTID{ Domain: 7, Server: 456, Sequence: 991, @@ -485,7 +494,9 @@ func TestPlannedReparentShardWaitForPositionTimeout(t *testing.T) { }() discovery.SetTabletPickerRetryDelay(5 * time.Millisecond) - ts := memorytopo.NewServer("cell1", "cell2") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1", "cell2") wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -500,18 +511,18 @@ func TestPlannedReparentShardWaitForPositionTimeout(t *testing.T) { newPrimary.FakeMysqlDaemon.TimeoutHook = func() error { return context.DeadlineExceeded } newPrimary.FakeMysqlDaemon.ReadOnly = true newPrimary.FakeMysqlDaemon.Replicating = true - newPrimary.FakeMysqlDaemon.WaitPrimaryPositions = []mysql.Position{{ - GTIDSet: mysql.MariadbGTIDSet{ - 7: mysql.MariadbGTID{ + newPrimary.FakeMysqlDaemon.WaitPrimaryPositions = []replication.Position{{ + GTIDSet: replication.MariadbGTIDSet{ + 7: replication.MariadbGTID{ Domain: 7, Server: 123, Sequence: 990, }, }, }} - newPrimary.FakeMysqlDaemon.PromoteResult = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 7: mysql.MariadbGTID{ + newPrimary.FakeMysqlDaemon.PromoteResult = replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 7: replication.MariadbGTID{ Domain: 7, Server: 456, Sequence: 991, @@ -589,7 +600,9 @@ func TestPlannedReparentShardRelayLogError(t *testing.T) { }() discovery.SetTabletPickerRetryDelay(5 * time.Millisecond) - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -602,9 +615,9 @@ func TestPlannedReparentShardRelayLogError(t *testing.T) { primary.FakeMysqlDaemon.ReadOnly = false primary.FakeMysqlDaemon.Replicating = false primary.FakeMysqlDaemon.ReplicationStatusError = mysql.ErrNotReplica - primary.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 7: mysql.MariadbGTID{ + primary.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 7: replication.MariadbGTID{ Domain: 7, Server: 123, Sequence: 990, @@ -631,9 +644,10 @@ func TestPlannedReparentShardRelayLogError(t *testing.T) { "STOP SLAVE", "RESET SLAVE", "START SLAVE", + "START SLAVE", } goodReplica1.StartActionLoop(t, wr) - goodReplica1.FakeMysqlDaemon.SetReplicationSourceError = errors.New("Slave failed to initialize relay log info structure from the repository") + goodReplica1.FakeMysqlDaemon.StopReplicationError = errors.New("Slave failed to initialize relay log info structure from the repository") defer goodReplica1.StopActionLoop(t) // run PlannedReparentShard @@ -666,7 +680,9 @@ func TestPlannedReparentShardRelayLogErrorStartReplication(t *testing.T) { }() discovery.SetTabletPickerRetryDelay(5 * time.Millisecond) - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -680,9 +696,9 @@ func TestPlannedReparentShardRelayLogErrorStartReplication(t *testing.T) { primary.FakeMysqlDaemon.ReadOnly = false primary.FakeMysqlDaemon.Replicating = false primary.FakeMysqlDaemon.ReplicationStatusError = mysql.ErrNotReplica - primary.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 7: mysql.MariadbGTID{ + primary.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 7: replication.MariadbGTID{ Domain: 7, Server: 123, Sequence: 990, @@ -749,7 +765,9 @@ func TestPlannedReparentShardPromoteReplicaFail(t *testing.T) { }() discovery.SetTabletPickerRetryDelay(5 * time.Millisecond) - ts := memorytopo.NewServer("cell1", "cell2") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1", "cell2") wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -765,18 +783,18 @@ func TestPlannedReparentShardPromoteReplicaFail(t *testing.T) { newPrimary.FakeMysqlDaemon.Replicating = true // make promote fail newPrimary.FakeMysqlDaemon.PromoteError = errors.New("some error") - newPrimary.FakeMysqlDaemon.WaitPrimaryPositions = []mysql.Position{{ - GTIDSet: mysql.MariadbGTIDSet{ - 7: mysql.MariadbGTID{ + newPrimary.FakeMysqlDaemon.WaitPrimaryPositions = []replication.Position{{ + GTIDSet: replication.MariadbGTIDSet{ + 7: replication.MariadbGTID{ Domain: 7, Server: 123, Sequence: 990, }, }, }} - newPrimary.FakeMysqlDaemon.PromoteResult = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 7: mysql.MariadbGTID{ + newPrimary.FakeMysqlDaemon.PromoteResult = replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 7: replication.MariadbGTID{ Domain: 7, Server: 456, Sequence: 991, @@ -823,12 +841,12 @@ func TestPlannedReparentShardPromoteReplicaFail(t *testing.T) { "STOP SLAVE", "FAKE SET MASTER", "START SLAVE", + // extra SetReplicationSource call due to retry "STOP SLAVE", "FAKE SET MASTER", "START SLAVE", // extra SetReplicationSource call due to retry "STOP SLAVE", - "FAKE SET MASTER", "START SLAVE", } goodReplica1.StartActionLoop(t, wr) @@ -843,8 +861,6 @@ func TestPlannedReparentShardPromoteReplicaFail(t *testing.T) { "FAKE SET MASTER", "START SLAVE", "FAKE SET MASTER", - // extra SetReplicationSource call due to retry - "FAKE SET MASTER", } goodReplica2.StartActionLoop(t, wr) goodReplica2.FakeMysqlDaemon.Replicating = false @@ -890,7 +906,9 @@ func TestPlannedReparentShardSamePrimary(t *testing.T) { }() discovery.SetTabletPickerRetryDelay(5 * time.Millisecond) - ts := memorytopo.NewServer("cell1", "cell2") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1", "cell2") wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -904,9 +922,9 @@ func TestPlannedReparentShardSamePrimary(t *testing.T) { oldPrimary.FakeMysqlDaemon.ReadOnly = true oldPrimary.FakeMysqlDaemon.Replicating = false oldPrimary.FakeMysqlDaemon.ReplicationStatusError = mysql.ErrNotReplica - oldPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 7: mysql.MariadbGTID{ + oldPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 7: replication.MariadbGTID{ Domain: 7, Server: 123, Sequence: 990, @@ -930,7 +948,6 @@ func TestPlannedReparentShardSamePrimary(t *testing.T) { "FAKE SET MASTER", "START SLAVE", "STOP SLAVE", - "FAKE SET MASTER", "START SLAVE", } goodReplica1.StartActionLoop(t, wr) diff --git a/go/vt/wrangler/testlib/reparent_utils_test.go b/go/vt/wrangler/testlib/reparent_utils_test.go index 6cb67714411..f9fc8e2326e 100644 --- a/go/vt/wrangler/testlib/reparent_utils_test.go +++ b/go/vt/wrangler/testlib/reparent_utils_test.go @@ -24,12 +24,13 @@ import ( "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/vt/vtctl/reparentutil/reparenttestutil" "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/vtctl/reparentutil" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" @@ -47,8 +48,9 @@ func TestShardReplicationStatuses(t *testing.T) { }() discovery.SetTabletPickerRetryDelay(5 * time.Millisecond) - ctx := context.Background() - ts := memorytopo.NewServer("cell1", "cell2") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1", "cell2") wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) // create shard and tablets @@ -67,9 +69,9 @@ func TestShardReplicationStatuses(t *testing.T) { } // primary action loop (to initialize host and port) - primary.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 5: mysql.MariadbGTID{ + primary.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 5: replication.MariadbGTID{ Domain: 5, Server: 456, Sequence: 892, @@ -80,9 +82,9 @@ func TestShardReplicationStatuses(t *testing.T) { defer primary.StopActionLoop(t) // replica loop - replica.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 5: mysql.MariadbGTID{ + replica.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 5: replication.MariadbGTID{ Domain: 5, Server: 456, Sequence: 890, @@ -130,8 +132,9 @@ func TestReparentTablet(t *testing.T) { }() discovery.SetTabletPickerRetryDelay(5 * time.Millisecond) - ctx := context.Background() - ts := memorytopo.NewServer("cell1", "cell2") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1", "cell2") wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) // create shard and tablets @@ -167,7 +170,6 @@ func TestReparentTablet(t *testing.T) { "FAKE SET MASTER", "START SLAVE", "STOP SLAVE", - "FAKE SET MASTER", "START SLAVE", } replica.StartActionLoop(t, wr) @@ -187,8 +189,10 @@ func TestReparentTablet(t *testing.T) { // TestSetReplicationSource tests that SetReplicationSource works as intended under various circumstances. func TestSetReplicationSource(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1", "cell2") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1", "cell2") wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) // create shard and tablets @@ -233,7 +237,7 @@ func TestSetReplicationSource(t *testing.T) { defer replica.StopActionLoop(t) // Set the correct error message that indicates we have received a relay log error. - replica.FakeMysqlDaemon.SetReplicationSourceError = errors.New("ERROR 1201 (HY000): Could not initialize master info structure; more error messages can be found in the MySQL error log") + replica.FakeMysqlDaemon.StartReplicationError = errors.New("ERROR 1201 (HY000): Could not initialize master info structure; more error messages can be found in the MySQL error log") // run ReparentTablet err = wr.SetReplicationSource(ctx, replica.Tablet) require.NoError(t, err, "SetReplicationSource failed") diff --git a/go/vt/wrangler/testlib/shard_test.go b/go/vt/wrangler/testlib/shard_test.go index 1b00ad82769..a0b1b0a3562 100644 --- a/go/vt/wrangler/testlib/shard_test.go +++ b/go/vt/wrangler/testlib/shard_test.go @@ -17,11 +17,10 @@ limitations under the License. package testlib import ( + "context" "strings" "testing" - "context" - "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" @@ -33,8 +32,9 @@ import ( ) func TestDeleteShardCleanup(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1", "cell2") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1", "cell2") wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) vp := NewVtctlPipe(t, ts) defer vp.Close() diff --git a/go/vt/wrangler/testlib/version_test.go b/go/vt/wrangler/testlib/version_test.go index c54a0811948..102bcdfe6e5 100644 --- a/go/vt/wrangler/testlib/version_test.go +++ b/go/vt/wrangler/testlib/version_test.go @@ -17,6 +17,7 @@ limitations under the License. package testlib import ( + "context" "encoding/json" "fmt" "net/http" @@ -65,12 +66,10 @@ func TestVersion(t *testing.T) { }() discovery.SetTabletPickerRetryDelay(5 * time.Millisecond) - // We need to run this test with the /debug/vars version of the - // plugin. - wrangler.ResetDebugVarsGetVersion() - // Initialize our environment - ts := memorytopo.NewServer("cell1", "cell2") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1", "cell2") wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) vp := NewVtctlPipe(t, ts) defer vp.Close() diff --git a/go/vt/wrangler/testlib/vtctl_topo_test.go b/go/vt/wrangler/testlib/vtctl_topo_test.go index bd3e4134d62..a13535f4111 100644 --- a/go/vt/wrangler/testlib/vtctl_topo_test.go +++ b/go/vt/wrangler/testlib/vtctl_topo_test.go @@ -52,7 +52,10 @@ func testVtctlTopoCommand(t *testing.T, vp *VtctlPipe, args []string, want strin // TestVtctlTopoCommands tests all vtctl commands from the // "Topo" group. func TestVtctlTopoCommands(t *testing.T) { - ts := memorytopo.NewServer("cell1", "cell2") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, "cell1", "cell2") if err := ts.CreateKeyspace(context.Background(), "ks1", &topodatapb.Keyspace{KeyspaceType: topodatapb.KeyspaceType_NORMAL}); err != nil { t.Fatalf("CreateKeyspace() failed: %v", err) } diff --git a/go/vt/wrangler/traffic_switcher.go b/go/vt/wrangler/traffic_switcher.go index c9bbb539807..654a5bd1588 100644 --- a/go/vt/wrangler/traffic_switcher.go +++ b/go/vt/wrangler/traffic_switcher.go @@ -26,12 +26,15 @@ import ( "sync" "time" - "vitess.io/vitess/go/sqlescape" - "vitess.io/vitess/go/vt/discovery" + "golang.org/x/sync/errgroup" "vitess.io/vitess/go/json2" + "vitess.io/vitess/go/maps2" + "vitess.io/vitess/go/sqlescape" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/concurrency" + "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" @@ -53,22 +56,42 @@ import ( const ( errorNoStreams = "no streams found in keyspace %s for: %s" - // use pt-osc's naming convention, this format also ensures vstreamer ignores such tables + // Use pt-osc's naming convention, this format also ensures vstreamer ignores such tables. renameTableTemplate = "_%.59s_old" // limit table name to 64 characters sqlDeleteWorkflow = "delete from _vt.vreplication where db_name = %s and workflow = %s" + + sqlGetMaxSequenceVal = "select max(%a) as maxval from %a.%a" + sqlInitSequenceTable = "insert into %a.%a (id, next_id, cache) values (0, %d, 1000) on duplicate key update next_id = if(next_id < %d, %d, next_id)" ) // accessType specifies the type of access for a shard (allow/disallow writes). type accessType int +// sequenceMetadata contains all of the relevant metadata for a sequence that +// is being used by a table involved in a vreplication workflow. +type sequenceMetadata struct { + // The name of the sequence table. + backingTableName string + // The keyspace where the backing table lives. + backingTableKeyspace string + // The dbName in use by the keyspace where the backing table lives. + backingTableDBName string + // The name of the table using the sequence. + usingTableName string + // The dbName in use by the keyspace where the using table lives. + usingTableDBName string + // The using table definition. + usingTableDefinition *vschemapb.Table +} + const ( allowWrites = accessType(iota) disallowWrites - // number of LOCK TABLES cycles to perform on the sources during SwitchWrites + // Number of LOCK TABLES cycles to perform on the sources during SwitchWrites. lockTablesCycles = 2 - // time to wait between LOCK TABLES cycles on the sources during SwitchWrites + // Time to wait between LOCK TABLES cycles on the sources during SwitchWrites. lockTablesCycleDelay = time.Duration(100 * time.Millisecond) // How long to wait when refreshing the state of each tablet in a shard. Note that these @@ -90,7 +113,7 @@ type trafficSwitcher struct { wr *Wrangler workflow string - // if frozen is true, the rest of the fields are not set. + // If frozen is true, the rest of the fields are not set. frozen bool reverseWorkflow string id int64 @@ -101,8 +124,8 @@ type trafficSwitcher struct { tables []string keepRoutingRules bool sourceKSSchema *vindexes.KeyspaceSchema - optCells string //cells option passed to MoveTables/Reshard - optTabletTypes string //tabletTypes option passed to MoveTables/Reshard + optCells string // cells option passed to MoveTables/Reshard + optTabletTypes string // tabletTypes option passed to MoveTables/Reshard externalCluster string externalTopo *topo.Server sourceTimeZone string @@ -111,17 +134,6 @@ type trafficSwitcher struct { workflowSubType binlogdatapb.VReplicationWorkflowSubType } -/* -begin: implementation of workflow.ITrafficSwitcher - -(NOTE:@ajm188) Please see comments on that interface type for why this exists. -This is temporary to allow workflow.StreamMigrator to use this trafficSwitcher -code and should be removed in the very near-term when we move trafficSwitcher to -package workflow as well. -*/ - -var _ workflow.ITrafficSwitcher = (*trafficSwitcher)(nil) - func (ts *trafficSwitcher) TopoServer() *topo.Server { return ts.wr.ts } func (ts *trafficSwitcher) TabletManagerClient() tmclient.TabletManagerClient { return ts.wr.tmc } func (ts *trafficSwitcher) Logger() logutil.Logger { return ts.wr.logger } @@ -218,8 +230,8 @@ func (wr *Wrangler) getWorkflowState(ctx context.Context, targetKeyspace, workfl } var ( - reverse bool - keyspace string + reverse bool + sourceKeyspace string ) // We reverse writes by using the source_keyspace.workflowname_reverse workflow @@ -229,17 +241,19 @@ func (wr *Wrangler) getWorkflowState(ctx context.Context, targetKeyspace, workfl // source to check if writes have been switched. if strings.HasSuffix(workflowName, "_reverse") { reverse = true - keyspace = state.SourceKeyspace + // Flip the source and target keyspaces. + sourceKeyspace = state.TargetKeyspace + targetKeyspace = state.SourceKeyspace workflowName = workflow.ReverseWorkflowName(workflowName) } else { - keyspace = targetKeyspace + sourceKeyspace = state.SourceKeyspace } if ts.MigrationType() == binlogdatapb.MigrationType_TABLES { state.WorkflowType = workflow.TypeMoveTables // We assume a consistent state, so only choose routing rule for one table. if len(ts.Tables()) == 0 { - return nil, nil, fmt.Errorf("no tables in workflow %s.%s", keyspace, workflowName) + return nil, nil, fmt.Errorf("no tables in workflow %s.%s", targetKeyspace, workflowName) } table := ts.Tables()[0] @@ -252,19 +266,22 @@ func (wr *Wrangler) getWorkflowState(ctx context.Context, targetKeyspace, workfl rules := shardRoutingRules.Rules for _, rule := range rules { - if rule.ToKeyspace == ts.SourceKeyspaceName() { + switch rule.ToKeyspace { + case sourceKeyspace: state.ShardsNotYetSwitched = append(state.ShardsNotYetSwitched, rule.Shard) - } else { + case targetKeyspace: state.ShardsAlreadySwitched = append(state.ShardsAlreadySwitched, rule.Shard) + default: + // Not a relevant rule. } } } else { - state.RdonlyCellsSwitched, state.RdonlyCellsNotSwitched, err = ws.GetCellsWithTableReadsSwitched(ctx, keyspace, table, topodatapb.TabletType_RDONLY) + state.RdonlyCellsSwitched, state.RdonlyCellsNotSwitched, err = ws.GetCellsWithTableReadsSwitched(ctx, targetKeyspace, table, topodatapb.TabletType_RDONLY) if err != nil { return nil, nil, err } - state.ReplicaCellsSwitched, state.ReplicaCellsNotSwitched, err = ws.GetCellsWithTableReadsSwitched(ctx, keyspace, table, topodatapb.TabletType_REPLICA) + state.ReplicaCellsSwitched, state.ReplicaCellsNotSwitched, err = ws.GetCellsWithTableReadsSwitched(ctx, targetKeyspace, table, topodatapb.TabletType_REPLICA) if err != nil { return nil, nil, err } @@ -274,8 +291,9 @@ func (wr *Wrangler) getWorkflowState(ctx context.Context, targetKeyspace, workfl } for _, table := range ts.Tables() { rr := globalRules[table] - // if a rule exists for the table and points to the target keyspace, writes have been switched - if len(rr) > 0 && rr[0] == fmt.Sprintf("%s.%s", keyspace, table) { + // If a rule exists for the table and points to the target keyspace, writes + // have been switched. + if len(rr) > 0 && rr[0] == fmt.Sprintf("%s.%s", targetKeyspace, table) { state.WritesSwitched = true break } @@ -284,7 +302,7 @@ func (wr *Wrangler) getWorkflowState(ctx context.Context, targetKeyspace, workfl } else { state.WorkflowType = workflow.TypeReshard - // we assume a consistent state, so only choose one shard + // We assume a consistent state, so only choose one shard. var shard *topo.ShardInfo if reverse { shard = ts.TargetShards()[0] @@ -292,12 +310,12 @@ func (wr *Wrangler) getWorkflowState(ctx context.Context, targetKeyspace, workfl shard = ts.SourceShards()[0] } - state.RdonlyCellsSwitched, state.RdonlyCellsNotSwitched, err = ws.GetCellsWithShardReadsSwitched(ctx, keyspace, shard, topodatapb.TabletType_RDONLY) + state.RdonlyCellsSwitched, state.RdonlyCellsNotSwitched, err = ws.GetCellsWithShardReadsSwitched(ctx, targetKeyspace, shard, topodatapb.TabletType_RDONLY) if err != nil { return nil, nil, err } - state.ReplicaCellsSwitched, state.ReplicaCellsNotSwitched, err = ws.GetCellsWithShardReadsSwitched(ctx, keyspace, shard, topodatapb.TabletType_REPLICA) + state.ReplicaCellsSwitched, state.ReplicaCellsNotSwitched, err = ws.GetCellsWithShardReadsSwitched(ctx, targetKeyspace, shard, topodatapb.TabletType_REPLICA) if err != nil { return nil, nil, err } @@ -313,28 +331,34 @@ func (wr *Wrangler) getWorkflowState(ctx context.Context, targetKeyspace, workfl // SwitchReads is a generic way of switching read traffic for a resharding workflow. func (wr *Wrangler) SwitchReads(ctx context.Context, targetKeyspace, workflowName string, servedTypes []topodatapb.TabletType, cells []string, direction workflow.TrafficSwitchDirection, dryRun bool) (*[]string, error) { + // Consistently handle errors by logging and returning them. + handleError := func(message string, err error) (*[]string, error) { + werr := vterrors.Errorf(vtrpcpb.Code_INTERNAL, fmt.Sprintf("%s: %v", message, err)) + wr.Logger().Error(werr) + return nil, werr + } ts, ws, err := wr.getWorkflowState(ctx, targetKeyspace, workflowName) if err != nil { - wr.Logger().Errorf("getWorkflowState failed: %v", err) - return nil, err + return handleError("failed to get the current state of the workflow", err) } if ts == nil { errorMsg := fmt.Sprintf("workflow %s not found in keyspace %s", workflowName, targetKeyspace) - wr.Logger().Errorf(errorMsg) - return nil, fmt.Errorf(errorMsg) + return handleError("failed to get the current state of the workflow", fmt.Errorf(errorMsg)) } log.Infof("Switching reads: %s.%s tt %+v, cells %+v, workflow state: %+v", targetKeyspace, workflowName, servedTypes, cells, ws) var switchReplicas, switchRdonly bool for _, servedType := range servedTypes { if servedType != topodatapb.TabletType_REPLICA && servedType != topodatapb.TabletType_RDONLY { - return nil, fmt.Errorf("tablet type must be REPLICA or RDONLY: %v", servedType) + return handleError("invalid tablet type", fmt.Errorf("tablet type must be REPLICA or RDONLY: %v", servedType)) } - if direction == workflow.DirectionBackward && servedType == topodatapb.TabletType_REPLICA && len(ws.ReplicaCellsSwitched) == 0 { - return nil, fmt.Errorf("requesting reversal of read traffic for REPLICAs but REPLICA reads have not been switched") - } - if direction == workflow.DirectionBackward && servedType == topodatapb.TabletType_RDONLY && len(ws.RdonlyCellsSwitched) == 0 { - return nil, fmt.Errorf("requesting reversal of SwitchReads for RDONLYs but RDONLY reads have not been switched") + if !ts.isPartialMigration { // shard level traffic switching is all or nothing + if direction == workflow.DirectionBackward && servedType == topodatapb.TabletType_REPLICA && len(ws.ReplicaCellsSwitched) == 0 { + return handleError("invalid request", fmt.Errorf("requesting reversal of read traffic for REPLICAs but REPLICA reads have not been switched")) + } + if direction == workflow.DirectionBackward && servedType == topodatapb.TabletType_RDONLY && len(ws.RdonlyCellsSwitched) == 0 { + return handleError("invalid request", fmt.Errorf("requesting reversal of SwitchReads for RDONLYs but RDONLY reads have not been switched")) + } } switch servedType { case topodatapb.TabletType_REPLICA: @@ -344,9 +368,10 @@ func (wr *Wrangler) SwitchReads(ctx context.Context, targetKeyspace, workflowNam } } - // if there are no rdonly tablets in the cells ask to switch rdonly tablets as well so that routing rules + // If there are no rdonly tablets in the cells ask to switch rdonly tablets as well so that routing rules // are updated for rdonly as well. Otherwise vitess will not know that the workflow has completed and will - // incorrectly report that not all reads have been switched. User currently is forced to switch non-existent rdonly tablets + // incorrectly report that not all reads have been switched. User currently is forced to switch + // non-existent rdonly tablets. if switchReplicas && !switchRdonly { var err error rdonlyTabletsExist, err := topotools.DoCellsHaveRdonlyTablets(ctx, wr.ts, cells) @@ -358,11 +383,10 @@ func (wr *Wrangler) SwitchReads(ctx context.Context, targetKeyspace, workflowNam } } - // If journals exist notify user and fail + // If journals exist notify user and fail. journalsExist, _, err := ts.checkJournals(ctx) if err != nil { - wr.Logger().Errorf("checkJournals failed: %v", err) - return nil, err + return handleError(fmt.Sprintf("failed to read journal in the %s keyspace", ts.SourceKeyspaceName()), err) } if journalsExist { log.Infof("Found a previous journal entry for %d", ts.id) @@ -375,15 +399,13 @@ func (wr *Wrangler) SwitchReads(ctx context.Context, targetKeyspace, workflowNam } if err := ts.validate(ctx); err != nil { - ts.Logger().Errorf("validate failed: %v", err) - return nil, err + return handleError("workflow validation failed", err) } // For reads, locking the source keyspace is sufficient. ctx, unlock, lockErr := sw.lockKeyspace(ctx, ts.SourceKeyspaceName(), "SwitchReads") if lockErr != nil { - ts.Logger().Errorf("LockKeyspace failed: %v", lockErr) - return nil, lockErr + return handleError(fmt.Sprintf("failed to lock the %s keyspace", ts.SourceKeyspaceName()), lockErr) } defer unlock(&err) @@ -391,23 +413,20 @@ func (wr *Wrangler) SwitchReads(ctx context.Context, targetKeyspace, workflowNam if ts.isPartialMigration { ts.Logger().Infof("Partial migration, skipping switchTableReads as traffic is all or nothing per shard and overridden for reads AND writes in the ShardRoutingRule created when switching writes.") } else if err := sw.switchTableReads(ctx, cells, servedTypes, direction); err != nil { - ts.Logger().Errorf("switchTableReads failed: %v", err) - return nil, err + return handleError("failed to switch read traffic for the tables", err) } return sw.logs(), nil } wr.Logger().Infof("About to switchShardReads: %+v, %+v, %+v", cells, servedTypes, direction) if err := sw.switchShardReads(ctx, cells, servedTypes, direction); err != nil { - ts.Logger().Errorf("switchShardReads failed: %v", err) - return nil, err + return handleError("failed to switch read traffic for the shards", err) } wr.Logger().Infof("switchShardReads Completed: %+v, %+v, %+v", cells, servedTypes, direction) if err := wr.ts.ValidateSrvKeyspace(ctx, targetKeyspace, strings.Join(cells, ",")); err != nil { err2 := vterrors.Wrapf(err, "After switching shard reads, found SrvKeyspace for %s is corrupt in cell %s", targetKeyspace, strings.Join(cells, ",")) - log.Errorf("%w", err2) - return nil, err2 + return handleError("failed to validate SrvKeyspace record", err2) } return sw.logs(), nil } @@ -457,17 +476,22 @@ func (wr *Wrangler) areTabletsAvailableToStreamFrom(ctx context.Context, ts *tra // SwitchWrites is a generic way of migrating write traffic for a resharding workflow. func (wr *Wrangler) SwitchWrites(ctx context.Context, targetKeyspace, workflowName string, timeout time.Duration, - cancel, reverse, reverseReplication bool, dryRun bool) (journalID int64, dryRunResults *[]string, err error) { + cancel, reverse, reverseReplication bool, dryRun, initializeTargetSequences bool) (journalID int64, dryRunResults *[]string, err error) { + // Consistently handle errors by logging and returning them. + handleError := func(message string, err error) (int64, *[]string, error) { + werr := vterrors.Errorf(vtrpcpb.Code_INTERNAL, fmt.Sprintf("%s: %v", message, err)) + wr.Logger().Error(werr) + return 0, nil, werr + } + ts, ws, err := wr.getWorkflowState(ctx, targetKeyspace, workflowName) _ = ws if err != nil { - wr.Logger().Errorf("getWorkflowState failed: %v", err) - return 0, nil, err + handleError("failed to get the current workflow state", err) } if ts == nil { errorMsg := fmt.Sprintf("workflow %s not found in keyspace %s", workflowName, targetKeyspace) - wr.Logger().Errorf(errorMsg) - return 0, nil, fmt.Errorf(errorMsg) + handleError("failed to get the current workflow state", fmt.Errorf(errorMsg)) } var sw iswitcher @@ -484,47 +508,57 @@ func (wr *Wrangler) SwitchWrites(ctx context.Context, targetKeyspace, workflowNa ts.Logger().Infof("Built switching metadata: %+v", ts) if err := ts.validate(ctx); err != nil { - ts.Logger().Errorf("validate failed: %v", err) - return 0, nil, err + handleError("workflow validation failed", err) } if reverseReplication { err := wr.areTabletsAvailableToStreamFrom(ctx, ts, ts.TargetKeyspaceName(), ts.TargetShards()) if err != nil { - return 0, nil, err + return handleError(fmt.Sprintf("no tablets were available to stream from in the %s keyspace", ts.SourceKeyspaceName()), err) } } // Need to lock both source and target keyspaces. tctx, sourceUnlock, lockErr := sw.lockKeyspace(ctx, ts.SourceKeyspaceName(), "SwitchWrites") if lockErr != nil { - ts.Logger().Errorf("LockKeyspace failed: %v", lockErr) - return 0, nil, lockErr + return handleError(fmt.Sprintf("failed to lock the %s keyspace", ts.SourceKeyspaceName()), lockErr) } ctx = tctx defer sourceUnlock(&err) if ts.TargetKeyspaceName() != ts.SourceKeyspaceName() { tctx, targetUnlock, lockErr := sw.lockKeyspace(ctx, ts.TargetKeyspaceName(), "SwitchWrites") if lockErr != nil { - ts.Logger().Errorf("LockKeyspace failed: %v", lockErr) - return 0, nil, lockErr + return handleError(fmt.Sprintf("failed to lock the %s keyspace", ts.TargetKeyspaceName()), lockErr) } ctx = tctx defer targetUnlock(&err) } + // Find out if the target is using any sequence tables for auto_increment + // value generation. If so, then we'll need to ensure that they are + // initialized properly before allowing new writes on the target. + sequenceMetadata := make(map[string]*sequenceMetadata) + // For sharded to sharded migrations the sequence must already be setup. + // For reshards the sequence usage is not changed. + if initializeTargetSequences && ts.workflowType == binlogdatapb.VReplicationWorkflowType_MoveTables && + ts.SourceKeyspaceSchema() != nil && ts.SourceKeyspaceSchema().Keyspace != nil && + !ts.SourceKeyspaceSchema().Keyspace.Sharded { + sequenceMetadata, err = ts.getTargetSequenceMetadata(ctx) + if err != nil { + return handleError(fmt.Sprintf("failed to get the sequence information in the %s keyspace", ts.TargetKeyspaceName()), err) + } + } + // If no journals exist, sourceWorkflows will be initialized by sm.MigrateStreams. journalsExist, sourceWorkflows, err := ts.checkJournals(ctx) if err != nil { - ts.Logger().Errorf("checkJournals failed: %v", err) - return 0, nil, err + return handleError(fmt.Sprintf("failed to read journal in the %s keyspace", ts.SourceKeyspaceName()), err) } if !journalsExist { ts.Logger().Infof("No previous journals were found. Proceeding normally.") sm, err := workflow.BuildStreamMigrator(ctx, ts, cancel) if err != nil { - ts.Logger().Errorf("buildStreamMigrater failed: %v", err) - return 0, nil, err + return handleError("failed to migrate the workflow streams", err) } if cancel { sw.cancelMigration(ctx, sm) @@ -534,21 +568,19 @@ func (wr *Wrangler) SwitchWrites(ctx context.Context, targetKeyspace, workflowNa ts.Logger().Infof("Stopping streams") sourceWorkflows, err = sw.stopStreams(ctx, sm) if err != nil { - ts.Logger().Errorf("stopStreams failed: %v", err) for key, streams := range sm.Streams() { for _, stream := range streams { ts.Logger().Errorf("stream in stopStreams: key %s shard %s stream %+v", key, stream.BinlogSource.Shard, stream.BinlogSource) } } sw.cancelMigration(ctx, sm) - return 0, nil, err + return handleError("failed to stop the workflow streams", err) } ts.Logger().Infof("Stopping source writes") if err := sw.stopSourceWrites(ctx); err != nil { - ts.Logger().Errorf("stopSourceWrites failed: %v", err) sw.cancelMigration(ctx, sm) - return 0, nil, err + return handleError(fmt.Sprintf("failed to stop writes in the %s keyspace", ts.SourceKeyspaceName()), err) } if ts.MigrationType() == binlogdatapb.MigrationType_TABLES { @@ -557,9 +589,8 @@ func (wr *Wrangler) SwitchWrites(ctx context.Context, targetKeyspace, workflowNa // the tablet's deny list check and the first mysqld side table lock. for cnt := 1; cnt <= lockTablesCycles; cnt++ { if err := ts.executeLockTablesOnSource(ctx); err != nil { - ts.Logger().Errorf("Failed to execute LOCK TABLES (attempt %d of %d) on sources: %v", cnt, lockTablesCycles, err) sw.cancelMigration(ctx, sm) - return 0, nil, err + return handleError(fmt.Sprintf("failed to execute LOCK TABLES (attempt %d of %d) on sources", cnt, lockTablesCycles), err) } // No need to UNLOCK the tables as the connection was closed once the locks were acquired // and thus the locks released. @@ -569,72 +600,78 @@ func (wr *Wrangler) SwitchWrites(ctx context.Context, targetKeyspace, workflowNa ts.Logger().Infof("Waiting for streams to catchup") if err := sw.waitForCatchup(ctx, timeout); err != nil { - ts.Logger().Errorf("waitForCatchup failed: %v", err) sw.cancelMigration(ctx, sm) - return 0, nil, err + return handleError("failed to sync up replication between the source and target", err) } ts.Logger().Infof("Migrating streams") if err := sw.migrateStreams(ctx, sm); err != nil { - ts.Logger().Errorf("migrateStreams failed: %v", err) sw.cancelMigration(ctx, sm) - return 0, nil, err + return handleError("failed to migrate the workflow streams", err) + } + + ts.Logger().Infof("Resetting sequences") + if err := sw.resetSequences(ctx); err != nil { + sw.cancelMigration(ctx, sm) + return handleError("failed to reset the sequences", err) } ts.Logger().Infof("Creating reverse streams") if err := sw.createReverseVReplication(ctx); err != nil { - ts.Logger().Errorf("createReverseVReplication failed: %v", err) sw.cancelMigration(ctx, sm) - return 0, nil, err + return handleError("failed to create the reverse vreplication streams", err) } } else { if cancel { - err := fmt.Errorf("traffic switching has reached the point of no return, cannot cancel") - ts.Logger().Errorf("%v", err) - return 0, nil, err + return handleError("invalid cancel", fmt.Errorf("traffic switching has reached the point of no return, cannot cancel")) } ts.Logger().Infof("Journals were found. Completing the left over steps.") // Need to gather positions in case all journals were not created. if err := ts.gatherPositions(ctx); err != nil { - ts.Logger().Errorf("gatherPositions failed: %v", err) - return 0, nil, err + return handleError("failed to gather replication positions", err) } } // This is the point of no return. Once a journal is created, // traffic can be redirected to target shards. if err := sw.createJournals(ctx, sourceWorkflows); err != nil { - ts.Logger().Errorf("createJournals failed: %v", err) - return 0, nil, err + return handleError("failed to create the journal", err) + } + // Initialize any target sequences, if there are any, before allowing new writes. + if initializeTargetSequences && len(sequenceMetadata) > 0 { + // Writes are blocked so we can safely initialize the sequence tables but + // we also want to use a shorter timeout than the parent context. + // We use up at most half of the overall timeout. + initSeqCtx, cancel := context.WithTimeout(ctx, timeout/2) + defer cancel() + if err := sw.initializeTargetSequences(initSeqCtx, sequenceMetadata); err != nil { + return handleError(fmt.Sprintf("failed to initialize the sequences used in the %s keyspace", ts.TargetKeyspaceName()), err) + } } if err := sw.allowTargetWrites(ctx); err != nil { - ts.Logger().Errorf("allowTargetWrites failed: %v", err) - return 0, nil, err + return handleError(fmt.Sprintf("failed to allow writes in the %s keyspace", ts.TargetKeyspaceName()), err) } if err := sw.changeRouting(ctx); err != nil { - ts.Logger().Errorf("changeRouting failed: %v", err) - return 0, nil, err + return handleError("failed to update the routing rules", err) } if err := sw.streamMigraterfinalize(ctx, ts, sourceWorkflows); err != nil { - ts.Logger().Errorf("finalize failed: %v", err) - return 0, nil, err + handleError("failed to finalize the traffic switch", err) } if reverseReplication { if err := sw.startReverseVReplication(ctx); err != nil { - ts.Logger().Errorf("startReverseVReplication failed: %v", err) - return 0, nil, err + return handleError("failed to start the reverse workflow", err) } } if err := sw.freezeTargetVReplication(ctx); err != nil { - ts.Logger().Errorf("deleteTargetVReplication failed: %v", err) - return 0, nil, err + return handleError(fmt.Sprintf("failed to freeze the workflow in the %s keyspace", ts.TargetKeyspaceName()), err) } return ts.id, sw.logs(), nil } -// DropTargets cleans up target tables, shards and denied tables if a MoveTables/Reshard is cancelled +// DropTargets cleans up target tables, shards and denied tables if a +// MoveTables/Reshard is cancelled. func (wr *Wrangler) DropTargets(ctx context.Context, targetKeyspace, workflow string, keepData, keepRoutingRules, dryRun bool) (*[]string, error) { ts, err := wr.buildTrafficSwitcher(ctx, targetKeyspace, workflow) if err != nil { @@ -675,6 +712,9 @@ func (wr *Wrangler) DropTargets(ctx context.Context, targetKeyspace, workflow st if err := sw.dropSourceDeniedTables(ctx); err != nil { return nil, err } + if err := sw.dropTargetDeniedTables(ctx); err != nil { + return nil, err + } case binlogdatapb.MigrationType_SHARDS: log.Infof("Removing target shards") if err := sw.dropTargetShards(ctx); err != nil { @@ -711,7 +751,7 @@ func (wr *Wrangler) dropArtifacts(ctx context.Context, keepRoutingRules bool, sw } // finalizeMigrateWorkflow deletes the streams for the Migrate workflow. -// We only cleanup the target for external sources +// We only cleanup the target for external sources. func (wr *Wrangler) finalizeMigrateWorkflow(ctx context.Context, targetKeyspace, workflow, tableSpecs string, cancel, keepData, keepRoutingRules, dryRun bool) (*[]string, error) { ts, err := wr.buildTrafficSwitcher(ctx, targetKeyspace, workflow) @@ -751,7 +791,8 @@ func (wr *Wrangler) finalizeMigrateWorkflow(ctx context.Context, targetKeyspace, return sw.logs(), nil } -// DropSources cleans up source tables, shards and denied tables after a MoveTables/Reshard is completed +// DropSources cleans up source tables, shards and denied tables after a +// MoveTables/Reshard is completed. func (wr *Wrangler) DropSources(ctx context.Context, targetKeyspace, workflowName string, removalType workflow.TableRemovalType, keepData, keepRoutingRules, force, dryRun bool) (*[]string, error) { ts, err := wr.buildTrafficSwitcher(ctx, targetKeyspace, workflowName) if err != nil { @@ -797,6 +838,9 @@ func (wr *Wrangler) DropSources(ctx context.Context, targetKeyspace, workflowNam if err := sw.dropSourceDeniedTables(ctx); err != nil { return nil, err } + if err := sw.dropTargetDeniedTables(ctx); err != nil { + return nil, err + } case binlogdatapb.MigrationType_SHARDS: log.Infof("Removing shards") @@ -816,7 +860,7 @@ func (wr *Wrangler) DropSources(ctx context.Context, targetKeyspace, workflowNam } func (wr *Wrangler) buildTrafficSwitcher(ctx context.Context, targetKeyspace, workflowName string) (*trafficSwitcher, error) { - tgtInfo, err := workflow.BuildTargets(ctx, wr.ts, wr.tmc, targetKeyspace, workflowName) + tgtInfo, err := workflow.LegacyBuildTargets(ctx, wr.ts, wr.tmc, targetKeyspace, workflowName) if err != nil { log.Infof("Error building targets: %s", err) return nil, err @@ -840,7 +884,7 @@ func (wr *Wrangler) buildTrafficSwitcher(ctx context.Context, targetKeyspace, wo log.Infof("Migration ID for workflow %s: %d", workflowName, ts.id) sourceTopo := wr.ts - // Build the sources + // Build the sources. for _, target := range targets { for _, bls := range target.Sources { if ts.sourceKeyspace == "" { @@ -883,6 +927,10 @@ func (wr *Wrangler) buildTrafficSwitcher(ctx context.Context, targetKeyspace, wo if err != nil { return nil, err } + if sourcesi.PrimaryAlias == nil { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "source shard %s/%s currently has no primary tablet", + bls.Keyspace, bls.Shard) + } sourcePrimary, err := sourceTopo.GetTablet(ctx, sourcesi.PrimaryAlias) if err != nil { return nil, err @@ -936,8 +984,9 @@ func (ts *trafficSwitcher) getSourceAndTargetShardsNames() ([]string, []string) return sourceShards, targetShards } -// isPartialMoveTables returns true if whe workflow is MoveTables, -// has the same number of shards, is not covering the entire shard range, and has one-to-one shards in source and target +// isPartialMoveTables returns true if whe workflow is MoveTables, has the +// same number of shards, is not covering the entire shard range, and has +// one-to-one shards in source and target. func (ts *trafficSwitcher) isPartialMoveTables(sourceShards, targetShards []string) (bool, error) { if ts.MigrationType() != binlogdatapb.MigrationType_TABLES { @@ -969,11 +1018,12 @@ func getSourceAndTargetKeyRanges(sourceShards, targetShards []string) (*topodata return krs[0], nil } - // happily string sorting of shards also sorts them in the ascending order of key ranges in vitess + // Happily string sorting of shards also sorts them in the ascending order of + // key ranges in vitess. sort.Strings(sourceShards) sort.Strings(targetShards) getFullKeyRange := func(shards []string) (*topodatapb.KeyRange, error) { - // expect sorted shards + // Expect sorted shards. kr1, err := getKeyRange(sourceShards[0]) if err != nil { return nil, err @@ -1037,7 +1087,7 @@ func (ts *trafficSwitcher) switchTableReads(ctx context.Context, cells []string, // table -> sourceKeyspace.table // targetKeyspace.table -> sourceKeyspace.table // For forward migration, we add tablet type specific rules to redirect traffic to the target. - // For backward, we redirect to source + // For backward, we redirect to source. for _, servedType := range servedTypes { tt := strings.ToLower(servedType.String()) for _, table := range ts.Tables() { @@ -1147,9 +1197,9 @@ func (ts *trafficSwitcher) stopSourceWrites(ctx context.Context) error { } func (ts *trafficSwitcher) changeTableSourceWrites(ctx context.Context, access accessType) error { - return ts.ForAllSources(func(source *workflow.MigrationSource) error { + err := ts.ForAllSources(func(source *workflow.MigrationSource) error { if _, err := ts.TopoServer().UpdateShardFields(ctx, ts.SourceKeyspaceName(), source.GetShard().ShardName(), func(si *topo.ShardInfo) error { - return si.UpdateSourceDeniedTables(ctx, topodatapb.TabletType_PRIMARY, nil, access == allowWrites /* remove */, ts.Tables()) + return si.UpdateDeniedTables(ctx, topodatapb.TabletType_PRIMARY, nil, access == allowWrites /* remove */, ts.Tables()) }); err != nil { return err } @@ -1162,6 +1212,14 @@ func (ts *trafficSwitcher) changeTableSourceWrites(ctx context.Context, access a } return err }) + if err != nil { + log.Warningf("Error in changeTableSourceWrites: %s", err) + return err + } + // Note that the denied tables, which are being updated in this method, are not part of the SrvVSchema in the topo. + // However, we are using the notification of a SrvVSchema change in VTGate to recompute the state of a + // MoveTables workflow (which also looks up denied tables from the topo). So we need to trigger a SrvVSchema change here. + return ts.TopoServer().RebuildSrvVSchema(ctx, nil) } // executeLockTablesOnSource executes a LOCK TABLES tb1 READ, tbl2 READ,... statement on each @@ -1178,7 +1236,7 @@ func (ts *trafficSwitcher) executeLockTablesOnSource(ctx context.Context) error for _, tableName := range ts.Tables() { sb.WriteString(fmt.Sprintf("%s READ,", sqlescape.EscapeID(tableName))) } - // trim extra trailing comma + // Trim extra trailing comma. lockStmt := sb.String()[:sb.Len()-1] return ts.ForAllSources(func(source *workflow.MigrationSource) error { @@ -1199,7 +1257,7 @@ func (ts *trafficSwitcher) executeLockTablesOnSource(ctx context.Context) error func (ts *trafficSwitcher) waitForCatchup(ctx context.Context, filteredReplicationWaitTime time.Duration) error { ctx, cancel := context.WithTimeout(ctx, filteredReplicationWaitTime) defer cancel() - // source writes have been stopped, wait for all streams on targets to catch up + // Source writes have been stopped, wait for all streams on targets to catch up. if err := ts.ForAllUIDs(func(target *workflow.MigrationTarget, uid int32) error { ts.Logger().Infof("Before Catchup: uid: %d, target primary %s, target position %s, shard %s", uid, target.GetPrimary().AliasString(), target.Position, target.GetShard().String()) @@ -1340,7 +1398,7 @@ func (ts *trafficSwitcher) createReverseVReplication(ctx context.Context) error source.GetPrimary().Alias, ts.ReverseWorkflowName(), target.Position) _, err := ts.VReplicationExec(ctx, source.GetPrimary().Alias, binlogplayer.CreateVReplicationState(ts.ReverseWorkflowName(), reverseBls, target.Position, - binlogplayer.BlpStopped, source.GetPrimary().DbName(), ts.workflowType, ts.workflowSubType)) + binlogdatapb.VReplicationWorkflowState_Stopped, source.GetPrimary().DbName(), ts.workflowType, ts.workflowSubType)) if err != nil { return err } @@ -1446,7 +1504,7 @@ func (ts *trafficSwitcher) allowTargetWrites(ctx context.Context) error { func (ts *trafficSwitcher) allowTableTargetWrites(ctx context.Context) error { return ts.ForAllTargets(func(target *workflow.MigrationTarget) error { if _, err := ts.TopoServer().UpdateShardFields(ctx, ts.TargetKeyspaceName(), target.GetShard().ShardName(), func(si *topo.ShardInfo) error { - return si.UpdateSourceDeniedTables(ctx, topodatapb.TabletType_PRIMARY, nil, true, ts.Tables()) + return si.UpdateDeniedTables(ctx, topodatapb.TabletType_PRIMARY, nil, true, ts.Tables()) }); err != nil { return err } @@ -1497,7 +1555,6 @@ func (ts *trafficSwitcher) changeWriteRoute(ctx context.Context) error { return err } } - return ts.TopoServer().RebuildSrvVSchema(ctx, nil) } @@ -1590,7 +1647,7 @@ func (ts *trafficSwitcher) TargetShards() []*topo.ShardInfo { func (ts *trafficSwitcher) dropSourceDeniedTables(ctx context.Context) error { return ts.ForAllSources(func(source *workflow.MigrationSource) error { if _, err := ts.TopoServer().UpdateShardFields(ctx, ts.SourceKeyspaceName(), source.GetShard().ShardName(), func(si *topo.ShardInfo) error { - return si.UpdateSourceDeniedTables(ctx, topodatapb.TabletType_PRIMARY, nil, true, ts.Tables()) + return si.UpdateDeniedTables(ctx, topodatapb.TabletType_PRIMARY, nil, true, ts.Tables()) }); err != nil { return err } @@ -1601,6 +1658,20 @@ func (ts *trafficSwitcher) dropSourceDeniedTables(ctx context.Context) error { }) } +func (ts *trafficSwitcher) dropTargetDeniedTables(ctx context.Context) error { + return ts.ForAllTargets(func(target *workflow.MigrationTarget) error { + if _, err := ts.TopoServer().UpdateShardFields(ctx, ts.TargetKeyspaceName(), target.GetShard().ShardName(), func(si *topo.ShardInfo) error { + return si.UpdateDeniedTables(ctx, topodatapb.TabletType_PRIMARY, nil, true, ts.Tables()) + }); err != nil { + return err + } + rtbsCtx, cancel := context.WithTimeout(ctx, shardTabletRefreshTimeout) + defer cancel() + _, _, err := topotools.RefreshTabletsByShard(rtbsCtx, ts.TopoServer(), ts.TabletManagerClient(), target.GetShard(), nil, ts.Logger()) + return err + }) +} + func (ts *trafficSwitcher) validateWorkflowHasCompleted(ctx context.Context) error { return doValidateWorkflowHasCompleted(ctx, ts) } @@ -1868,3 +1939,332 @@ func (ts *trafficSwitcher) addParticipatingTablesToKeyspace(ctx context.Context, } return ts.TopoServer().SaveVSchema(ctx, keyspace, vschema) } + +func (ts *trafficSwitcher) isSequenceParticipating(ctx context.Context) (bool, error) { + vschema, err := ts.TopoServer().GetVSchema(ctx, ts.targetKeyspace) + if err != nil { + return false, err + } + if vschema == nil || len(vschema.Tables) == 0 { + return false, nil + } + sequenceFound := false + for _, table := range ts.Tables() { + vs, ok := vschema.Tables[table] + if !ok || vs == nil { + continue + } + if vs.Type == vindexes.TypeSequence { + sequenceFound = true + break + } + } + return sequenceFound, nil +} + +// getTargetSequenceMetadata returns a map of sequence metadata keyed by the +// backing sequence table name. If the target keyspace has no tables +// defined that use sequences for auto_increment generation then a nil +// map will be returned. +func (ts *trafficSwitcher) getTargetSequenceMetadata(ctx context.Context) (map[string]*sequenceMetadata, error) { + vschema, err := ts.TopoServer().GetVSchema(ctx, ts.targetKeyspace) + if err != nil { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get vschema for target keyspace %s: %v", + ts.targetKeyspace, err) + } + if vschema == nil || len(vschema.Tables) == 0 { // Nothing to do + return nil, nil + } + + sequencesByBackingTable, backingTablesFound, err := ts.findSequenceUsageInKeyspace(vschema) + if err != nil { + return nil, err + } + // If all of the sequence tables were defined using qualified table + // names then we don't need to search for them in other keyspaces. + if len(sequencesByBackingTable) == 0 || backingTablesFound { + return sequencesByBackingTable, nil + } + + if err := ctx.Err(); err != nil { + return nil, err + } + + // Now we need to locate the backing sequence table(s) which will + // be in another unsharded keyspace. + smMu := sync.Mutex{} + tableCount := len(sequencesByBackingTable) + tablesFound := 0 // Used to short circuit the search + // Define the function used to search each keyspace. + searchKeyspace := func(sctx context.Context, done chan struct{}, keyspace string) error { + kvs, kerr := ts.TopoServer().GetVSchema(sctx, keyspace) + if kerr != nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get vschema for keyspace %s: %v", + keyspace, kerr) + } + if kvs == nil || kvs.Sharded || len(kvs.Tables) == 0 { + return nil + } + for tableName, tableDef := range kvs.Tables { + select { + case <-sctx.Done(): + return sctx.Err() + case <-done: // We've found everything we need in other goroutines + return nil + default: + } + if complete := func() bool { + smMu.Lock() // Prevent concurrent access to the map + defer smMu.Unlock() + sm := sequencesByBackingTable[tableName] + if tableDef != nil && tableDef.Type == vindexes.TypeSequence && + sm != nil && tableName == sm.backingTableName { + tablesFound++ // This is also protected by the mutex + sm.backingTableKeyspace = keyspace + // Set the default keyspace name. We will later check to + // see if the tablet we send requests to is using a dbname + // override and use that if it is. + sm.backingTableDBName = "vt_" + keyspace + if tablesFound == tableCount { // Short circuit the search + select { + case <-done: // It's already been closed + return true + default: + close(done) // Mark the search as completed + return true + } + } + } + return false + }(); complete { + return nil + } + } + return nil + } + keyspaces, err := ts.TopoServer().GetKeyspaces(ctx) + if err != nil { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get keyspaces: %v", err) + } + searchGroup, gctx := errgroup.WithContext(ctx) + searchCompleted := make(chan struct{}) + for _, keyspace := range keyspaces { + keyspace := keyspace // https://golang.org/doc/faq#closures_and_goroutines + searchGroup.Go(func() error { + return searchKeyspace(gctx, searchCompleted, keyspace) + }) + } + if err := searchGroup.Wait(); err != nil { + return nil, err + } + + if tablesFound != tableCount { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to locate all of the backing sequence tables being used; sequence table metadata: %+v", + sequencesByBackingTable) + } + return sequencesByBackingTable, nil +} + +// findSequenceUsageInKeyspace searches the keyspace's vschema for usage +// of sequences. It returns a map of sequence metadata keyed by the backing +// sequence table name -- if any usage is found -- along with a boolean to +// indicate if all of the backing sequence tables were defined using +// qualified table names (so we know where they all live) along with an +// error if any is seen. +func (ts *trafficSwitcher) findSequenceUsageInKeyspace(vschema *vschemapb.Keyspace) (map[string]*sequenceMetadata, bool, error) { + allFullyQualified := true + targets := maps2.Values(ts.Targets()) + if len(targets) == 0 || targets[0].GetPrimary() == nil { // This should never happen + return nil, false, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "no primary tablet found for target keyspace %s", ts.targetKeyspace) + } + targetDBName := targets[0].GetPrimary().DbName() + sequencesByBackingTable := make(map[string]*sequenceMetadata) + + for _, table := range ts.Tables() { + vs, ok := vschema.Tables[table] + if !ok || vs == nil || vs.AutoIncrement == nil || vs.AutoIncrement.Sequence == "" { + continue + } + sm := &sequenceMetadata{ + backingTableName: vs.AutoIncrement.Sequence, + usingTableName: table, + usingTableDefinition: vs, + usingTableDBName: targetDBName, + } + // If the sequence table is fully qualified in the vschema then + // we don't need to find it later. + if strings.Contains(vs.AutoIncrement.Sequence, ".") { + keyspace, tableName, found := strings.Cut(vs.AutoIncrement.Sequence, ".") + if !found { + return nil, false, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "invalid sequence table name %s defined in the %s keyspace", + vs.AutoIncrement.Sequence, ts.targetKeyspace) + } + sm.backingTableName = tableName + sm.backingTableKeyspace = keyspace + // Set the default keyspace name. We will later check to + // see if the tablet we send requests to is using a dbname + // override and use that if it is. + sm.backingTableDBName = "vt_" + keyspace + } else { + allFullyQualified = false + } + sequencesByBackingTable[sm.backingTableName] = sm + } + + return sequencesByBackingTable, allFullyQualified, nil +} + +// initializeTargetSequences initializes the backing sequence tables +// using a map keyed by the backing sequence table name. +// +// The backing tables must have already been created. This function will +// then ensure that the next value is set to a value greater than any +// currently stored in the using table on the target keyspace. If the +// backing table is updated to a new higher value then it will also tell +// the primary tablet serving the sequence to refresh/reset its cache to +// be sure that it does not provide a value that is less than the current max. +func (ts *trafficSwitcher) initializeTargetSequences(ctx context.Context, sequencesByBackingTable map[string]*sequenceMetadata) error { + initSequenceTable := func(ictx context.Context, sequenceTableName string, sequenceMetadata *sequenceMetadata) error { + // Now we need to run this query on the target shards in order + // to get the max value and set the next id for the sequence to + // a higher value. + shardResults := make([]int64, 0, len(ts.TargetShards())) + srMu := sync.Mutex{} + ierr := ts.ForAllTargets(func(target *workflow.MigrationTarget) error { + primary := target.GetPrimary() + if primary == nil || primary.GetAlias() == nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "no primary tablet found for target shard %s/%s", + ts.targetKeyspace, target.GetShard().ShardName()) + } + query := sqlparser.BuildParsedQuery(sqlGetMaxSequenceVal, + sqlescape.EscapeID(sequenceMetadata.usingTableDefinition.AutoIncrement.Column), + sqlescape.EscapeID(sequenceMetadata.usingTableDBName), + sqlescape.EscapeID(sequenceMetadata.usingTableName), + ) + qr, terr := ts.wr.ExecuteFetchAsApp(ictx, primary.GetAlias(), true, query.Query, 1) + if terr != nil || len(qr.Rows) != 1 { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get the max used sequence value for target table %s.%s in order to initialize the backing sequence table: %v", + ts.targetKeyspace, sequenceMetadata.usingTableName, terr) + } + maxID, terr := sqltypes.Proto3ToResult(qr).Rows[0][0].ToInt64() + if terr != nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get the max used sequence value for target table %s.%s in order to initialize the backing sequence table: %v", + ts.targetKeyspace, sequenceMetadata.usingTableName, terr) + } + srMu.Lock() + defer srMu.Unlock() + shardResults = append(shardResults, maxID) + return nil + }) + if ierr != nil { + return ierr + } + select { + case <-ictx.Done(): + return ictx.Err() + default: + } + if len(shardResults) == 0 { // This should never happen + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "did not get any results for the max used sequence value for target table %s.%s in order to initialize the backing sequence table", + ts.targetKeyspace, sequenceMetadata.usingTableName) + } + // Sort the values to find the max value across all shards. + sort.Slice(shardResults, func(i, j int) bool { + return shardResults[i] < shardResults[j] + }) + nextVal := shardResults[len(shardResults)-1] + 1 + // Now we need to update the sequence table, if needed, in order to + // ensure that that the next value it provides is > the current max. + sequenceShard, ierr := ts.wr.TopoServer().GetOnlyShard(ictx, sequenceMetadata.backingTableKeyspace) + if ierr != nil || sequenceShard == nil || sequenceShard.PrimaryAlias == nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get the primary tablet for keyspace %s: %v", + sequenceMetadata.backingTableKeyspace, ierr) + } + sequenceTablet, ierr := ts.wr.TopoServer().GetTablet(ictx, sequenceShard.PrimaryAlias) + if ierr != nil || sequenceTablet == nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get the primary tablet for keyspace %s: %v", + sequenceMetadata.backingTableKeyspace, ierr) + } + select { + case <-ictx.Done(): + return ictx.Err() + default: + } + if sequenceTablet.DbNameOverride != "" { + sequenceMetadata.backingTableDBName = sequenceTablet.DbNameOverride + } + query := sqlparser.BuildParsedQuery(sqlInitSequenceTable, + sqlescape.EscapeID(sequenceMetadata.backingTableDBName), + sqlescape.EscapeID(sequenceMetadata.backingTableName), + nextVal, + nextVal, + nextVal, + ) + // Now execute this on the primary tablet of the unsharded keyspace + // housing the backing table. + qr, ierr := ts.wr.ExecuteFetchAsApp(ictx, sequenceShard.PrimaryAlias, true, query.Query, 1) + if ierr != nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to initialize the backing sequence table %s.%s: %v", + sequenceMetadata.backingTableDBName, sequenceMetadata.backingTableName, ierr) + } + // If we actually updated the backing sequence table, then we need + // to tell the primary tablet managing the sequence to refresh/reset + // its cache for the table. + if qr.RowsAffected == 0 { + return nil + } + select { + case <-ictx.Done(): + return ictx.Err() + default: + } + ts.Logger().Infof("Resetting sequence cache for backing table %s on shard %s/%s using tablet %s", + sequenceMetadata.backingTableName, sequenceShard.Keyspace(), sequenceShard.ShardName(), sequenceShard.PrimaryAlias) + ti, ierr := ts.TopoServer().GetTablet(ictx, sequenceShard.PrimaryAlias) + if ierr != nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get primary tablet for keyspace %s: %v", + sequenceMetadata.backingTableKeyspace, ierr) + } + ierr = ts.TabletManagerClient().ResetSequences(ictx, ti.Tablet, []string{sequenceMetadata.backingTableName}) + if ierr != nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to reset the sequence cache for backing table %s on shard %s/%s using tablet %s: %v", + sequenceMetadata.backingTableName, sequenceShard.Keyspace(), sequenceShard.ShardName(), sequenceShard.PrimaryAlias, ierr) + } + return nil + } + + initGroup, gctx := errgroup.WithContext(ctx) + for sequenceTableName, sequenceMetadata := range sequencesByBackingTable { + sequenceTableName, sequenceMetadata := sequenceTableName, sequenceMetadata // https://golang.org/doc/faq#closures_and_goroutines + initGroup.Go(func() error { + return initSequenceTable(gctx, sequenceTableName, sequenceMetadata) + }) + } + return initGroup.Wait() +} + +func (ts *trafficSwitcher) mustResetSequences(ctx context.Context) (bool, error) { + switch ts.workflowType { + case binlogdatapb.VReplicationWorkflowType_Migrate, + binlogdatapb.VReplicationWorkflowType_MoveTables: + return ts.isSequenceParticipating(ctx) + default: + return false, nil + } +} + +func (ts *trafficSwitcher) resetSequences(ctx context.Context) error { + var err error + mustReset := false + if mustReset, err = ts.mustResetSequences(ctx); err != nil { + return err + } + if !mustReset { + return nil + } + return ts.ForAllSources(func(source *workflow.MigrationSource) error { + ts.Logger().Infof("Resetting sequences for source shard %s.%s on tablet %s", + source.GetShard().Keyspace(), source.GetShard().ShardName(), source.GetPrimary().String()) + return ts.TabletManagerClient().ResetSequences(ctx, source.GetPrimary().Tablet, ts.Tables()) + }) +} diff --git a/go/vt/wrangler/traffic_switcher_env_test.go b/go/vt/wrangler/traffic_switcher_env_test.go index 0a5f883411b..ca5be2069a7 100644 --- a/go/vt/wrangler/traffic_switcher_env_test.go +++ b/go/vt/wrangler/traffic_switcher_env_test.go @@ -19,34 +19,45 @@ package wrangler import ( "context" "fmt" + "math/rand" "sync" "testing" "time" + "github.com/stretchr/testify/require" "golang.org/x/sync/semaphore" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/fakesqldb" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" + "vitess.io/vitess/go/vt/grpcclient" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/proto/vschema" - vschemapb "vitess.io/vitess/go/vt/proto/vschema" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vtgate/vindexes" + "vitess.io/vitess/go/vt/vttablet/queryservice" + "vitess.io/vitess/go/vt/vttablet/tabletconn" + "vitess.io/vitess/go/vt/vttablet/tabletconntest" "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" "vitess.io/vitess/go/vt/vttablet/tmclient" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" ) const ( streamInfoQuery = "select id, source, message, cell, tablet_types, workflow_type, workflow_sub_type, defer_secondary_keys from _vt.vreplication where workflow='%s' and db_name='%s'" streamExtInfoQuery = "select id, source, pos, stop_pos, max_replication_lag, state, db_name, time_updated, transaction_timestamp, time_heartbeat, time_throttled, component_throttled, message, tags, workflow_type, workflow_sub_type, defer_secondary_keys, rows_copied from _vt.vreplication where db_name = '%s' and workflow = '%s'" copyStateQuery = "select table_name, lastpk from _vt.copy_state where vrepl_id = %d and id in (select max(id) from _vt.copy_state where vrepl_id = %d group by vrepl_id, table_name)" + maxValForSequence = "select max(`id`) as maxval from `vt_%s`.`%s`" ) var ( @@ -61,20 +72,22 @@ var ( ) type testMigraterEnv struct { - ts *topo.Server - wr *Wrangler - sourcePrimaries []*fakeTablet - targetPrimaries []*fakeTablet - dbSourceClients []*fakeDBClient - dbTargetClients []*fakeDBClient - allDBClients []*fakeDBClient - targetKeyspace string - sourceShards []string - targetShards []string - sourceKeyRanges []*topodatapb.KeyRange - targetKeyRanges []*topodatapb.KeyRange - tmeDB *fakesqldb.DB - mu sync.Mutex + ts *topo.Server + wr *Wrangler + sourcePrimaries []*fakeTablet + targetPrimaries []*fakeTablet + additionalPrimaries []*fakeTablet + dbSourceClients []*fakeDBClient + dbTargetClients []*fakeDBClient + dbAdditionalClients []*fakeDBClient + allDBClients []*fakeDBClient + targetKeyspace string + sourceShards []string + targetShards []string + sourceKeyRanges []*topodatapb.KeyRange + targetKeyRanges []*topodatapb.KeyRange + tmeDB *fakesqldb.DB + mu sync.Mutex } // testShardMigraterEnv has some convenience functions for adding expected queries. @@ -104,12 +117,16 @@ func newTestTableMigrater(ctx context.Context, t *testing.T) *testMigraterEnv { // The test will Sprintf a from clause and where clause as needed. func newTestTableMigraterCustom(ctx context.Context, t *testing.T, sourceShards, targetShards []string, fmtQuery string) *testMigraterEnv { tme := &testMigraterEnv{} - tme.ts = memorytopo.NewServer("cell1", "cell2") + tme.ts = memorytopo.NewServer(ctx, "cell1", "cell2") tme.wr = New(logutil.NewConsoleLogger(), tme.ts, tmclient.NewTabletManagerClient()) tme.wr.sem = semaphore.NewWeighted(1) tme.sourceShards = sourceShards tme.targetShards = targetShards tme.tmeDB = fakesqldb.New(t) + useSequences := false + if len(sourceShards) == 1 && len(targetShards) > 1 { + useSequences = true + } expectVDiffQueries(tme.tmeDB) tabletID := 10 for _, shard := range sourceShards { @@ -138,6 +155,20 @@ func newTestTableMigraterCustom(ctx context.Context, t *testing.T, sourceShards, tme.targetKeyRanges = append(tme.targetKeyRanges, targetKeyRange) } + dialerName := fmt.Sprintf("TrafficSwitcherTest-%s-%d", t.Name(), rand.Intn(1000000000)) + tabletconn.RegisterDialer(dialerName, func(tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { + tme.mu.Lock() + defer tme.mu.Unlock() + allPrimaries := append(tme.sourcePrimaries, tme.targetPrimaries...) + for _, ft := range append(allPrimaries, tme.additionalPrimaries...) { + if ft.Tablet.Alias.Uid == tablet.Alias.Uid { + return ft, nil + } + } + return nil, nil + }) + tabletconntest.SetProtocol("go.vt.wrangler.traffic_switcher_env_test", dialerName) + vs := &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ @@ -160,6 +191,17 @@ func newTestTableMigraterCustom(ctx context.Context, t *testing.T, sourceShards, }, }, } + schema := &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "t1", + }, + { + Name: "t2", + }, + }, + } + tme.setPrimarySchemas(schema) if len(sourceShards) != 1 { if err := tme.ts.SaveVSchema(ctx, "ks1", vs); err != nil { t.Fatal(err) @@ -170,6 +212,73 @@ func newTestTableMigraterCustom(ctx context.Context, t *testing.T, sourceShards, t.Fatal(err) } } + if useSequences { + // Add another unsharded keyspace with sequence tables in + // order to test sequence handling. + uvs := &vschemapb.Keyspace{ + Sharded: false, + Tables: map[string]*vschemapb.Table{ + "t1_seq": { + Type: vindexes.TypeSequence, + }, + "t2_seq": { + Type: vindexes.TypeSequence, + }, + }, + } + tabletID += 10 + gfdb := fakesqldb.New(t) + tme.additionalPrimaries = append(tme.additionalPrimaries, newFakeTablet(t, tme.wr, "cell1", uint32(tabletID), topodatapb.TabletType_PRIMARY, gfdb, TabletKeyspaceShard(t, "global", "0"))) + if err := tme.ts.SaveVSchema(ctx, "global", uvs); err != nil { + t.Fatal(err) + } + + // Now use these sequence tables in the target sharded keyspace. + tks := vs.CloneVT() + tks.Tables["t1"].AutoIncrement = &vschemapb.AutoIncrement{ + Column: "id", + Sequence: "t1_seq", + } + tks.Tables["t2"].AutoIncrement = &vschemapb.AutoIncrement{ + Column: "id", + Sequence: "t2_seq", + } + if err := tme.ts.SaveVSchema(ctx, "ks2", tks); err != nil { + t.Fatal(err) + } + + // Now tell the fakesqldb used by the target keyspace tablets to expect + // the sequence management related queries against the target keyspace. + tme.tmeDB.AddQuery(fmt.Sprintf(maxValForSequence, "ks2", "t1"), + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "maxval", + "int64", + ), + "5", + ), + ) + tme.tmeDB.AddQuery(fmt.Sprintf(maxValForSequence, "ks2", "t2"), + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "maxval", + "int64", + ), + "7", + ), + ) + + // Now tell the fakesqldb used by the global keyspace tablets to expect + // the sequence management related queries against the target keyspace. + gfdb.AddQuery( + sqlparser.BuildParsedQuery(sqlInitSequenceTable, sqlescape.EscapeID("vt_global"), sqlescape.EscapeID("t1_seq"), 6, 6, 6).Query, + &sqltypes.Result{RowsAffected: 0}, + ) + gfdb.AddQuery( + sqlparser.BuildParsedQuery(sqlInitSequenceTable, sqlescape.EscapeID("vt_global"), sqlescape.EscapeID("t2_seq"), 8, 8, 8).Query, + &sqltypes.Result{RowsAffected: 0}, + ) + } if err := tme.ts.RebuildSrvVSchema(ctx, nil); err != nil { t.Fatal(err) } @@ -203,13 +312,13 @@ func newTestTableMigraterCustom(ctx context.Context, t *testing.T, sourceShards, }}, }, } - streamInfoRows = append(streamInfoRows, fmt.Sprintf("%d|%v|||", j+1, bls)) - streamExtInfoRows = append(streamExtInfoRows, fmt.Sprintf("%d|||||Running|ks1|%d|%d|0|0||||0", j+1, now, now)) + streamInfoRows = append(streamInfoRows, fmt.Sprintf("%d|%v||||1|0|0", j+1, bls)) + streamExtInfoRows = append(streamExtInfoRows, fmt.Sprintf("%d|||||Running|vt_ks1|%d|%d|0|0||1||0", j+1, now, now)) tme.dbTargetClients[i].addInvariant(fmt.Sprintf(copyStateQuery, j+1, j+1), noResult) } tme.dbTargetClients[i].addInvariant(streamInfoKs2, sqltypes.MakeTestResult(sqltypes.MakeTestFields( - "id|source|message|cell|tablet_types", - "int64|varchar|varchar|varchar|varchar"), + "id|source|message|cell|tablet_types|workflow_type|workflow_sub_type|defer_secondary_keys", + "int64|varchar|varchar|varchar|varchar|int64|int64|int64"), streamInfoRows...)) tme.dbTargetClients[i].addInvariant(streamExtInfoKs2, sqltypes.MakeTestResult(sqltypes.MakeTestFields( "id|source|pos|stop_pos|max_replication_lag|state|db_name|time_updated|transaction_timestamp|time_heartbeat|time_throttled|component_throttled|message|tags|workflow_type|workflow_sub_type|defer_secondary_keys", @@ -237,12 +346,12 @@ func newTestTableMigraterCustom(ctx context.Context, t *testing.T, sourceShards, }}, }, } - streamInfoRows = append(streamInfoRows, fmt.Sprintf("%d|%v|||", j+1, bls)) + streamInfoRows = append(streamInfoRows, fmt.Sprintf("%d|%v||||1|0|0", j+1, bls)) tme.dbTargetClients[i].addInvariant(fmt.Sprintf(copyStateQuery, j+1, j+1), noResult) } tme.dbSourceClients[i].addInvariant(reverseStreamInfoKs1, sqltypes.MakeTestResult(sqltypes.MakeTestFields( - "id|source|message|cell|tablet_types", - "int64|varchar|varchar|varchar|varchar"), + "id|source|message|cell|tablet_types|workflow_type|workflow_sub_type|defer_secondary_keys", + "int64|varchar|varchar|varchar|varchar|int64|int64|int64"), streamInfoRows...), ) } @@ -263,9 +372,172 @@ func newTestTableMigraterCustom(ctx context.Context, t *testing.T, sourceShards, return tme } +// newTestTablePartialMigrater creates a test tablet migrater +// specifially for partial or shard by shard migrations. +// The shards must be the same on the source and target, and we +// must be moving a subset of them. +// fmtQuery should be of the form: 'select a, b %s group by a'. +// The test will Sprintf a from clause and where clause as needed. +func newTestTablePartialMigrater(ctx context.Context, t *testing.T, shards, shardsToMove []string, fmtQuery string) *testMigraterEnv { + require.Greater(t, len(shards), 1, "shard by shard migrations can only be done on sharded keyspaces") + tme := &testMigraterEnv{} + tme.ts = memorytopo.NewServer(ctx, "cell1", "cell2") + tme.wr = New(logutil.NewConsoleLogger(), tme.ts, tmclient.NewTabletManagerClient()) + tme.wr.sem = semaphore.NewWeighted(1) + tme.sourceShards = shards + tme.targetShards = shards + tme.tmeDB = fakesqldb.New(t) + expectVDiffQueries(tme.tmeDB) + tabletID := 10 + for _, shard := range tme.sourceShards { + tme.sourcePrimaries = append(tme.sourcePrimaries, newFakeTablet(t, tme.wr, "cell1", uint32(tabletID), topodatapb.TabletType_PRIMARY, tme.tmeDB, TabletKeyspaceShard(t, "ks1", shard))) + tabletID += 10 + + _, sourceKeyRange, err := topo.ValidateShardName(shard) + if err != nil { + t.Fatal(err) + } + tme.sourceKeyRanges = append(tme.sourceKeyRanges, sourceKeyRange) + } + tpChoiceTablet := tme.sourcePrimaries[0].Tablet + tpChoice = &testTabletPickerChoice{ + keyspace: tpChoiceTablet.Keyspace, + shard: tpChoiceTablet.Shard, + } + for _, shard := range tme.targetShards { + tme.targetPrimaries = append(tme.targetPrimaries, newFakeTablet(t, tme.wr, "cell1", uint32(tabletID), topodatapb.TabletType_PRIMARY, tme.tmeDB, TabletKeyspaceShard(t, "ks2", shard))) + tabletID += 10 + + _, targetKeyRange, err := topo.ValidateShardName(shard) + if err != nil { + t.Fatal(err) + } + tme.targetKeyRanges = append(tme.targetKeyRanges, targetKeyRange) + } + + dialerName := fmt.Sprintf("TrafficSwitcherTest-%s-%d", t.Name(), rand.Intn(1000000000)) + tabletconn.RegisterDialer(dialerName, func(tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { + tme.mu.Lock() + defer tme.mu.Unlock() + for _, ft := range append(tme.sourcePrimaries, tme.targetPrimaries...) { + if ft.Tablet.Alias.Uid == tablet.Alias.Uid { + return ft, nil + } + } + return nil, nil + }) + tabletconntest.SetProtocol("go.vt.wrangler.traffic_switcher_env_test", dialerName) + + vs := &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "hash": { + Type: "hash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "c1", + Name: "hash", + }}, + }, + "t2": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "c1", + Name: "hash", + }}, + }, + }, + } + err := tme.ts.SaveVSchema(ctx, "ks1", vs) + require.NoError(t, err) + err = tme.ts.SaveVSchema(ctx, "ks2", vs) + require.NoError(t, err) + err = tme.ts.RebuildSrvVSchema(ctx, nil) + require.NoError(t, err) + err = topotools.RebuildKeyspace(ctx, logutil.NewConsoleLogger(), tme.ts, "ks1", []string{"cell1"}, false) + require.NoError(t, err) + err = topotools.RebuildKeyspace(ctx, logutil.NewConsoleLogger(), tme.ts, "ks2", []string{"cell1"}, false) + require.NoError(t, err) + + tme.startTablets(t) + tme.createDBClients(ctx, t) + tme.setPrimaryPositions() + now := time.Now().Unix() + + for i, shard := range shards { + for _, shardToMove := range shardsToMove { + var streamInfoRows []string + var streamExtInfoRows []string + if shardToMove == shard { + bls := &binlogdatapb.BinlogSource{ + Keyspace: "ks1", + Shard: shard, + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: fmt.Sprintf(fmtQuery, fmt.Sprintf("from t1 where in_keyrange('%s')", shard)), + }, { + Match: "t2", + Filter: fmt.Sprintf(fmtQuery, fmt.Sprintf("from t2 where in_keyrange('%s')", shard)), + }}, + }, + } + streamInfoRows = append(streamInfoRows, fmt.Sprintf("%d|%v||||1|0|0", i+1, bls)) + streamExtInfoRows = append(streamExtInfoRows, fmt.Sprintf("%d|||||Running|vt_ks1|%d|%d|0|0|||1||0", i+1, now, now)) + } + tme.dbTargetClients[i].addInvariant(fmt.Sprintf(copyStateQuery, i+1, i+1), noResult) + tme.dbTargetClients[i].addInvariant(streamInfoKs2, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|source|message|cell|tablet_types|workflow_type|workflow_sub_type|defer_secondary_keys", + "int64|varchar|varchar|varchar|varchar|int64|int64|int64"), + streamInfoRows...)) + tme.dbTargetClients[i].addInvariant(streamExtInfoKs2, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|source|pos|stop_pos|max_replication_lag|state|db_name|time_updated|transaction_timestamp|time_heartbeat|time_throttled|component_throttled|message|tags|workflow_type|workflow_sub_type|defer_secondary_keys", + "int64|varchar|int64|int64|int64|varchar|varchar|int64|int64|int64|int64|int64|varchar|varchar|int64|int64|int64"), + streamExtInfoRows...)) + tme.dbTargetClients[i].addInvariant(reverseStreamExtInfoKs2, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|source|pos|stop_pos|max_replication_lag|state|db_name|time_updated|transaction_timestamp|time_heartbeat|time_throttled|component_throttled|message|tags|workflow_type|workflow_sub_type|defer_secondary_keys", + "int64|varchar|int64|int64|int64|varchar|varchar|int64|int64|int64|int64|int64|varchar|varchar|int64|int64|int64"), + streamExtInfoRows...)) + } + } + + for i, shard := range shards { + for _, shardToMove := range shardsToMove { + var streamInfoRows []string + if shardToMove == shard { + bls := &binlogdatapb.BinlogSource{ + Keyspace: "ks2", + Shard: shard, + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: fmt.Sprintf(fmtQuery, fmt.Sprintf("from t1 where in_keyrange('%s')", shard)), + }, { + Match: "t2", + Filter: fmt.Sprintf(fmtQuery, fmt.Sprintf("from t2 where in_keyrange('%s')", shard)), + }}, + }, + } + streamInfoRows = append(streamInfoRows, fmt.Sprintf("%d|%v||||1|0|0", i+1, bls)) + tme.dbTargetClients[i].addInvariant(fmt.Sprintf(copyStateQuery, i+1, i+1), noResult) + } + tme.dbSourceClients[i].addInvariant(reverseStreamInfoKs1, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|source|message|cell|tablet_types|workflow_type|workflow_sub_type|defer_secondary_keys", + "int64|varchar|varchar|varchar|varchar|int64|int64|int64"), + streamInfoRows...), + ) + } + } + + tme.targetKeyspace = "ks2" + return tme +} + func newTestShardMigrater(ctx context.Context, t *testing.T, sourceShards, targetShards []string) *testShardMigraterEnv { tme := &testShardMigraterEnv{} - tme.ts = memorytopo.NewServer("cell1", "cell2") + tme.ts = memorytopo.NewServer(ctx, "cell1", "cell2") tme.wr = New(logutil.NewConsoleLogger(), tme.ts, tmclient.NewTabletManagerClient()) tme.sourceShards = sourceShards tme.targetShards = targetShards @@ -301,28 +573,41 @@ func newTestShardMigrater(ctx context.Context, t *testing.T, sourceShards, targe tme.targetKeyRanges = append(tme.targetKeyRanges, targetKeyRange) } + dialerName := fmt.Sprintf("TrafficSwitcherTest-%s-%d", t.Name(), rand.Intn(1000000000)) + tabletconn.RegisterDialer(dialerName, func(tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { + tme.mu.Lock() + defer tme.mu.Unlock() + for _, ft := range append(tme.sourcePrimaries, tme.targetPrimaries...) { + if ft.Tablet.Alias.Uid == tablet.Alias.Uid { + return ft, nil + } + } + return nil, nil + }) + tabletconntest.SetProtocol("go.vt.wrangler.traffic_switcher_env_test", dialerName) + vs := &vschemapb.Keyspace{ Sharded: true, - Vindexes: map[string]*vschema.Vindex{ + Vindexes: map[string]*vschemapb.Vindex{ "thash": { Type: "hash", }, }, - Tables: map[string]*vschema.Table{ + Tables: map[string]*vschemapb.Table{ "t1": { - ColumnVindexes: []*vschema.ColumnVindex{{ + ColumnVindexes: []*vschemapb.ColumnVindex{{ Columns: []string{"c1"}, Name: "thash", }}, }, "t2": { - ColumnVindexes: []*vschema.ColumnVindex{{ + ColumnVindexes: []*vschemapb.ColumnVindex{{ Columns: []string{"c1"}, Name: "thash", }}, }, "t3": { - ColumnVindexes: []*vschema.ColumnVindex{{ + ColumnVindexes: []*vschemapb.ColumnVindex{{ Columns: []string{"c1"}, Name: "thash", }}, @@ -361,9 +646,9 @@ func newTestShardMigrater(ctx context.Context, t *testing.T, sourceShards, targe }}, }, } - rows = append(rows, fmt.Sprintf("%d|%v||||0|0|0", j+1, bls)) - rowsRdOnly = append(rows, fmt.Sprintf("%d|%v|||RDONLY|0|0|0", j+1, bls)) - streamExtInfoRows = append(streamExtInfoRows, fmt.Sprintf("%d|||||Running|ks1|%d|%d|0|0|||", j+1, now, now)) + rows = append(rows, fmt.Sprintf("%d|%v||||1|0|0", j+1, bls)) + rowsRdOnly = append(rows, fmt.Sprintf("%d|%v|||RDONLY|1|0|0", j+1, bls)) + streamExtInfoRows = append(streamExtInfoRows, fmt.Sprintf("%d|||||Running|vt_ks1|%d|%d|0|0|||", j+1, now, now)) tme.dbTargetClients[i].addInvariant(fmt.Sprintf(copyStateQuery, j+1, j+1), noResult) } tme.dbTargetClients[i].addInvariant(streamInfoKs, sqltypes.MakeTestResult(sqltypes.MakeTestFields( @@ -402,6 +687,7 @@ func (tme *testMigraterEnv) startTablets(t *testing.T) { tme.mu.Lock() defer tme.mu.Unlock() allPrimarys := append(tme.sourcePrimaries, tme.targetPrimaries...) + allPrimarys = append(allPrimarys, tme.additionalPrimaries...) for _, primary := range allPrimarys { primary.StartActionLoop(t, tme.wr) } @@ -432,6 +718,9 @@ func (tme *testMigraterEnv) stopTablets(t *testing.T) { for _, primary := range tme.targetPrimaries { primary.StopActionLoop(t) } + for _, primary := range tme.additionalPrimaries { + primary.StopActionLoop(t) + } } func (tme *testMigraterEnv) createDBClients(ctx context.Context, t *testing.T) { @@ -454,14 +743,20 @@ func (tme *testMigraterEnv) createDBClients(ctx context.Context, t *testing.T) { primary.TM.VREngine = vreplication.NewTestEngine(tme.ts, primary.Tablet.GetAlias().GetCell(), primary.FakeMysqlDaemon, dbClientFactory, dbClientFactory, dbclient.DBName(), nil) primary.TM.VREngine.Open(ctx) } + for _, primary := range tme.additionalPrimaries { + log.Infof("Adding as additionalPrimary %s", primary.Tablet.Alias) + dbclient := newFakeDBClient(primary.Tablet.Alias.String()) + tme.dbAdditionalClients = append(tme.dbTargetClients, dbclient) + } tme.allDBClients = append(tme.dbSourceClients, tme.dbTargetClients...) + tme.allDBClients = append(tme.allDBClients, tme.dbAdditionalClients...) } func (tme *testMigraterEnv) setPrimaryPositions() { for _, primary := range tme.sourcePrimaries { - primary.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 5: mysql.MariadbGTID{ + primary.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 5: replication.MariadbGTID{ Domain: 5, Server: 456, Sequence: 892, @@ -470,9 +765,9 @@ func (tme *testMigraterEnv) setPrimaryPositions() { } } for _, primary := range tme.targetPrimaries { - primary.FakeMysqlDaemon.CurrentPrimaryPosition = mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 5: mysql.MariadbGTID{ + primary.FakeMysqlDaemon.CurrentPrimaryPosition = replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 5: replication.MariadbGTID{ Domain: 5, Server: 456, Sequence: 893, @@ -482,6 +777,15 @@ func (tme *testMigraterEnv) setPrimaryPositions() { } } +func (tme *testMigraterEnv) setPrimarySchemas(schema *tabletmanagerdatapb.SchemaDefinition) { + for _, primary := range tme.sourcePrimaries { + primary.FakeMysqlDaemon.Schema = schema + } + for _, primary := range tme.targetPrimaries { + primary.FakeMysqlDaemon.Schema = schema + } +} + func (tme *testMigraterEnv) expectNoPreviousJournals() { // validate that no previous journals exist for _, dbclient := range tme.dbSourceClients { @@ -614,8 +918,12 @@ func (tme *testMigraterEnv) close(t *testing.T) { for _, dbclient := range tme.dbTargetClients { dbclient.Close() } + for _, dbclient := range tme.dbAdditionalClients { + dbclient.Close() + } tme.tmeDB.CloseAllConnections() tme.ts.Close() tme.wr.tmc.Close() tme.wr = nil + tme.tmeDB.Close() } diff --git a/go/vt/wrangler/traffic_switcher_test.go b/go/vt/wrangler/traffic_switcher_test.go index c0a9c41f98e..19ce3001797 100644 --- a/go/vt/wrangler/traffic_switcher_test.go +++ b/go/vt/wrangler/traffic_switcher_test.go @@ -29,12 +29,14 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topotools" "vitess.io/vitess/go/vt/vtctl/workflow" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" ) var ( @@ -277,7 +279,7 @@ func TestTableMigrateMainflow(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Can't switch primary with SwitchReads. _, err = tme.wr.SwitchReads(ctx, tme.targetKeyspace, "test", []topodatapb.TabletType{topodatapb.TabletType_PRIMARY}, nil, workflow.DirectionForward, false) - want := "tablet type must be REPLICA or RDONLY: PRIMARY" + want := "invalid tablet type: tablet type must be REPLICA or RDONLY: PRIMARY" if err == nil || err.Error() != want { t.Errorf("SwitchReads(primary) err: %v, want %v", err, want) } @@ -347,7 +349,7 @@ func TestTableMigrateMainflow(t *testing.T) { cancelMigration() switchWrites(tme) - _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 0*time.Second, false, false, true, false) + _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 0*time.Second, false, false, true, false, true) want = "DeadlineExceeded" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("SwitchWrites(0 timeout) err: %v, must contain %v", err, want) @@ -455,7 +457,7 @@ func TestTableMigrateMainflow(t *testing.T) { } deleteTargetVReplication() - journalID, _, err := tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false) + journalID, _, err := tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false, true) if err != nil { t.Fatal(err) } @@ -598,7 +600,7 @@ func TestShardMigrateMainflow(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Can't switch primary with SwitchReads. _, err = tme.wr.SwitchReads(ctx, tme.targetKeyspace, "test", []topodatapb.TabletType{topodatapb.TabletType_PRIMARY}, nil, workflow.DirectionForward, false) - want := "tablet type must be REPLICA or RDONLY: PRIMARY" + want := "invalid tablet type: tablet type must be REPLICA or RDONLY: PRIMARY" if err == nil || err.Error() != want { t.Errorf("SwitchReads(primary) err: %v, want %v", err, want) } @@ -662,7 +664,7 @@ func TestShardMigrateMainflow(t *testing.T) { } cancelMigration() - _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 0*time.Second, false, false, true, false) + _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 0*time.Second, false, false, true, false, true) want = "DeadlineExceeded" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("SwitchWrites(0 timeout) err: %v, must contain %v", err, want) @@ -751,7 +753,7 @@ func TestShardMigrateMainflow(t *testing.T) { } freezeTargetVReplication() - journalID, _, err := tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false) + journalID, _, err := tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false, true) if err != nil { t.Fatal(err) } @@ -870,7 +872,7 @@ func testTableMigrateOneToMany(t *testing.T, keepData, keepRoutingRules bool) { tme.dbSourceClients[0].addQueryRE(tsCheckJournals, &sqltypes.Result{}, nil) switchWrites(tme) - _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, false, false) + _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, false, false, true) if err != nil { t.Fatal(err) } @@ -889,7 +891,11 @@ func testTableMigrateOneToMany(t *testing.T, keepData, keepRoutingRules bool) { " Keyspace ks1 Shard 0 DbName ks1 Tablet 10 Table t1", " Keyspace ks1 Shard 0 DbName ks1 Tablet 10 Table t2", "Denied tables [t1,t2] will be removed from:", - " Keyspace ks1 Shard 0 Tablet 10") + " Keyspace ks1 Shard 0 Tablet 10", + "Denied tables [t1,t2] will be removed from:", + " Keyspace ks2 Shard -80 Tablet 20", + " Keyspace ks2 Shard 80- Tablet 30", + ) } wantdryRunDropSources = append(wantdryRunDropSources, "Delete reverse vreplication streams on source:", " Keyspace ks1 Shard 0 Workflow test_reverse DbName ks1 Tablet 10", @@ -919,7 +925,11 @@ func testTableMigrateOneToMany(t *testing.T, keepData, keepRoutingRules bool) { "Keyspace ks1 Shard 0 DbName ks1 Tablet 10 Table t1", " Keyspace ks1 Shard 0 DbName ks1 Tablet 10 Table t2", "Denied tables [t1,t2] will be removed from:", - " Keyspace ks1 Shard 0 Tablet 10") + " Keyspace ks1 Shard 0 Tablet 10", + "Denied tables [t1,t2] will be removed from:", + " Keyspace ks2 Shard -80 Tablet 20", + " Keyspace ks2 Shard 80- Tablet 30", + ) } wantdryRunRenameSources = append(wantdryRunRenameSources, "Delete reverse vreplication streams on source:", " Keyspace ks1 Shard 0 Workflow test_reverse DbName ks1 Tablet 10", @@ -1000,6 +1010,7 @@ func TestTableMigrateOneToManyDryRun(t *testing.T) { "Wait for VReplication on stopped streams to catchup for up to 1s", "Create reverse replication workflow test_reverse", "Create journal entries on source databases", + "The following sequence backing tables used by tables being moved will be initialized: t1_seq,t2_seq", "Enable writes on keyspace ks2 tables [t1,t2]", "Switch routing from keyspace ks1 to keyspace ks2", "Routing rules for tables [t1,t2] will be updated", @@ -1092,7 +1103,7 @@ func TestTableMigrateOneToManyDryRun(t *testing.T) { deleteTargetVReplication() switchWrites(tme) - _, results, err := tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, false, true) + _, results, err := tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, false, true, true) require.NoError(t, err) require.Empty(t, cmp.Diff(wantdryRunWrites, *results)) } @@ -1180,7 +1191,7 @@ func TestMigrateFailJournal(t *testing.T) { tme.dbSourceClients[1].addQueryRE("insert into _vt.resharding_journal", nil, errors.New("journaling intentionally failed")) switchWrites(tme) - _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false) + _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false, true) want := "journaling intentionally failed" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("SwitchWrites(0 timeout) err: %v, must contain %v", err, want) @@ -1242,7 +1253,7 @@ func TestTableMigrateJournalExists(t *testing.T) { tme.dbTargetClients[1].addQuery("select * from _vt.vreplication where id = 2", stoppedResult(2), nil) switchWrites(tme) - _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false) + _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false, true) if err != nil { t.Fatal(err) } @@ -1320,7 +1331,7 @@ func TestShardMigrateJournalExists(t *testing.T) { tme.dbTargetClients[1].addQuery("select * from _vt.vreplication where id = 2", stoppedResult(2), nil) switchWrites(tme) - _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false) + _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false, true) if err != nil { t.Fatal(err) } @@ -1385,7 +1396,7 @@ func TestTableMigrateCancel(t *testing.T) { cancelMigration() switchWrites(tme) - _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, true, false, false, false) + _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, true, false, false, false, true) if err != nil { t.Fatal(err) } @@ -1447,7 +1458,7 @@ func TestTableMigrateCancelDryRun(t *testing.T) { cancelMigration() switchWrites(tme) - _, dryRunResults, err := tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, true, false, false, true) + _, dryRunResults, err := tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, true, false, false, true, true) require.NoError(t, err) require.Empty(t, cmp.Diff(want, *dryRunResults)) } @@ -1548,7 +1559,7 @@ func TestTableMigrateNoReverse(t *testing.T) { deleteTargetVReplication() switchWrites(tme) - _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, false, false) + _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, false, false, true) if err != nil { t.Fatal(err) } @@ -1590,7 +1601,7 @@ func TestMigrateFrozen(t *testing.T) { tme.dbTargetClients[1].addQuery(streamInfoKs2, &sqltypes.Result{}, nil) switchWrites(tme) - _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 0*time.Second, false, false, true, false) + _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 0*time.Second, false, false, true, false, true) if err != nil { t.Fatal(err) } @@ -1900,7 +1911,7 @@ func TestShardMigrateNoAvailableTabletsForReverseReplication(t *testing.T) { //------------------------------------------------------------------------------------------------------------------- // Can't switch primary with SwitchReads. _, err = tme.wr.SwitchReads(ctx, tme.targetKeyspace, "test", []topodatapb.TabletType{topodatapb.TabletType_PRIMARY}, nil, workflow.DirectionForward, false) - want := "tablet type must be REPLICA or RDONLY: PRIMARY" + want := "invalid tablet type: tablet type must be REPLICA or RDONLY: PRIMARY" if err == nil || err.Error() != want { t.Errorf("SwitchReads(primary) err: %v, want %v", err, want) } @@ -1965,7 +1976,7 @@ func TestShardMigrateNoAvailableTabletsForReverseReplication(t *testing.T) { cancelMigration() switchWrites(tme) - _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 0*time.Second, false, false, true, false) + _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 0*time.Second, false, false, true, false, true) want = "DeadlineExceeded" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("SwitchWrites(0 timeout) err: %v, must contain %v", err, want) @@ -2060,7 +2071,7 @@ func TestShardMigrateNoAvailableTabletsForReverseReplication(t *testing.T) { invariants[fmt.Sprintf("%s-%d", streamInfoKs, i)] = tme.dbTargetClients[i].getInvariant(streamInfoKs) tme.dbTargetClients[i].addInvariant(streamInfoKs, tme.dbTargetClients[i].getInvariant(streamInfoKs+"-rdonly")) } - _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false) + _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false, true) require.Error(t, err) require.True(t, strings.Contains(err.Error(), "no tablet found")) require.True(t, strings.Contains(err.Error(), "-80")) @@ -2070,7 +2081,7 @@ func TestShardMigrateNoAvailableTabletsForReverseReplication(t *testing.T) { tme.dbTargetClients[i].addInvariant(streamInfoKs, invariants[fmt.Sprintf("%s-%d", streamInfoKs, i)]) } - journalID, _, err := tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false) + journalID, _, err := tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false, true) if err != nil { t.Fatal(err) } @@ -2143,6 +2154,37 @@ func TestIsPartialMoveTables(t *testing.T) { } } +// TestNoOrphanedRoutingRulesOnFailedCreate tests that no orphaned +// routing rules are left in place when the workflow creation +// fails -- specifically at the point where we try and create the +// workflow streams. +func TestNoOrphanedRoutingRulesOnFailedCreate(t *testing.T) { + ctx := context.Background() + tme := newTestTableMigraterCustom(ctx, t, []string{"0"}, []string{"-80", "80-"}, "select * %s") + defer tme.close(t) + + // The target keyspace is sharded. Let's remove any vschema + // table definitions so that we know the workflow creation will + // fail. Let's also be sure that the routing rules are empty. + err := topotools.SaveRoutingRules(ctx, tme.wr.ts, nil) + require.NoError(t, err, "failed to save routing rules") + err = tme.ts.SaveVSchema(ctx, "ks2", &vschemapb.Keyspace{ + Sharded: true, + }) + require.NoError(t, err, "failed to save vschema") + err = tme.ts.RebuildSrvVSchema(ctx, nil) + require.NoError(t, err, "failed to rebuild serving vschema") + err = topotools.RebuildKeyspace(ctx, logutil.NewConsoleLogger(), tme.ts, "ks1", []string{"cell1"}, false) + require.NoError(t, err, "failed to rebuild keyspace") + + err = tme.wr.MoveTables(ctx, "testwf", "ks1", "ks2", "t1,t2", "cell1", "primary,replica", false, "", true, false, "", false, false, "", "", nil, false, false) + require.Error(t, err) + + // Check that there are no orphaned routing rules. + emptyRules := make(map[string][]string) + checkRouting(t, tme.wr, emptyRules) +} + func checkRouting(t *testing.T, wr *Wrangler, want map[string][]string) { t.Helper() ctx := context.Background() @@ -2257,11 +2299,11 @@ func getResult(id int, state string, keyspace string, shard string) *sqltypes.Re } func stoppedResult(id int) *sqltypes.Result { - return getResult(id, "Stopped", tpChoice.keyspace, tpChoice.shard) + return getResult(id, binlogdatapb.VReplicationWorkflowState_Stopped.String(), tpChoice.keyspace, tpChoice.shard) } func runningResult(id int) *sqltypes.Result { - return getResult(id, "Running", tpChoice.keyspace, tpChoice.shard) + return getResult(id, binlogdatapb.VReplicationWorkflowState_Running.String(), tpChoice.keyspace, tpChoice.shard) } func switchWrites(tmeT any) { diff --git a/go/vt/wrangler/vdiff.go b/go/vt/wrangler/vdiff.go index 47fe99388a8..05ff4a82151 100644 --- a/go/vt/wrangler/vdiff.go +++ b/go/vt/wrangler/vdiff.go @@ -29,7 +29,9 @@ import ( "google.golang.org/protobuf/encoding/prototext" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" @@ -112,9 +114,9 @@ type vdiff struct { // compareColInfo contains the metadata for a column of the table being diffed type compareColInfo struct { - colIndex int // index of the column in the filter's select - collation collations.Collation // is the collation of the column, if any - isPK bool // is this column part of the primary key + colIndex int // index of the column in the filter's select + collation collations.ID // is the collation of the column, if any + isPK bool // is this column part of the primary key } // tableDiffer performs a diff for one table in the workflow. @@ -152,7 +154,7 @@ type tableDiffer struct { type shardStreamer struct { primary *topo.TabletInfo tablet *topodatapb.Tablet - position mysql.Position + position replication.Position snapshotPosition string result chan *sqltypes.Result err error @@ -528,7 +530,7 @@ func findPKs(table *tabletmanagerdatapb.TableDefinition, targetSelect *sqlparser // getColumnCollations determines the proper collation to use for each // column in the table definition leveraging MySQL's collation inheritence // rules. -func getColumnCollations(table *tabletmanagerdatapb.TableDefinition) (map[string]collations.Collation, error) { +func getColumnCollations(table *tabletmanagerdatapb.TableDefinition) (map[string]collations.ID, error) { collationEnv := collations.Local() createstmt, err := sqlparser.Parse(table.Schema) if err != nil { @@ -546,7 +548,7 @@ func getColumnCollations(table *tabletmanagerdatapb.TableDefinition) (map[string tableCollation := tableschema.GetCollation() // If no explicit collation is specified for the column then we need // to walk the inheritence tree. - getColumnCollation := func(column *sqlparser.ColumnDefinition) collations.Collation { + getColumnCollation := func(column *sqlparser.ColumnDefinition) collations.ID { // If there's an explicit collation listed then use that. if column.Type.Options.Collate != "" { return collationEnv.LookupByName(strings.ToLower(column.Type.Options.Collate)) @@ -567,14 +569,14 @@ func getColumnCollations(table *tabletmanagerdatapb.TableDefinition) (map[string } // The table is using the global default charset and collation and // we inherite that. - return collations.Default().Get() + return collations.Default() } - columnCollations := make(map[string]collations.Collation) + columnCollations := make(map[string]collations.ID) for _, column := range tableschema.TableSpec.Columns { // If it's not a character based type then no collation is used. if !sqltypes.IsQuoted(column.Type.SQLType()) { - columnCollations[column.Name.Lowered()] = nil + columnCollations[column.Name.Lowered()] = collations.Unknown continue } columnCollations[column.Name.Lowered()] = getColumnCollation(column) @@ -685,17 +687,16 @@ func (df *vdiff) buildTablePlan(table *tabletmanagerdatapb.TableDefinition, quer // Check if it's an aggregate expression if expr, ok := selExpr.Expr.(sqlparser.AggrFunc); ok { - switch fname := strings.ToLower(expr.AggrName()); fname { + switch fname := expr.AggrName(); fname { case "count", "sum": // this will only work as long as aggregates can be pushed down to tablets // this won't work: "select count(*) from (select id from t limit 1)" // since vreplication only handles simple tables (no joins/derived tables) this is fine for now // but will need to be revisited when we add such support to vreplication - aggregateFuncType := "sum" - aggregates = append(aggregates, &engine.AggregateParams{ - Opcode: opcode.SupportedAggregates[aggregateFuncType], - Col: len(sourceSelect.SelectExprs) - 1, - }) + aggregates = append(aggregates, engine.NewAggregateParam( + /*opcode*/ opcode.AggregateSum, + /*offset*/ len(sourceSelect.SelectExprs)-1, + /*alias*/ "")) } } default: @@ -783,10 +784,10 @@ func newMergeSorter(participants map[string]*shardStreamer, comparePKs []compare for _, cpk := range comparePKs { weightStringCol := -1 // if the collation is nil or unknown, use binary collation to compare as bytes - if cpk.collation == nil { + if cpk.collation == collations.Unknown { ob = append(ob, engine.OrderByParams{Col: cpk.colIndex, WeightStringCol: weightStringCol, Type: sqltypes.Unknown, CollationID: collations.CollationBinaryID}) } else { - ob = append(ob, engine.OrderByParams{Col: cpk.colIndex, WeightStringCol: weightStringCol, Type: sqltypes.Unknown, CollationID: cpk.collation.ID()}) + ob = append(ob, engine.OrderByParams{Col: cpk.colIndex, WeightStringCol: weightStringCol, Type: sqltypes.Unknown, CollationID: cpk.collation}) } } return &engine.MergeSort{ @@ -809,7 +810,8 @@ func (df *vdiff) selectTablets(ctx context.Context, ts *trafficSwitcher) error { if ts.ExternalTopo() != nil { sourceTopo = ts.ExternalTopo() } - tp, err := discovery.NewTabletPicker(ctx, sourceTopo, []string{df.sourceCell}, df.sourceCell, df.ts.SourceKeyspaceName(), shard, df.tabletTypesStr, discovery.TabletPickerOptions{}) + tp, err := discovery.NewTabletPicker(ctx, sourceTopo, []string{df.sourceCell}, df.sourceCell, + df.ts.SourceKeyspaceName(), shard, df.tabletTypesStr, discovery.TabletPickerOptions{}) if err != nil { return err } @@ -826,8 +828,18 @@ func (df *vdiff) selectTablets(ctx context.Context, ts *trafficSwitcher) error { wg.Add(1) go func() { defer wg.Done() + includeNonServingTablets := false + if df.ts.workflowType == binlogdatapb.VReplicationWorkflowType_Reshard { + // For resharding, the target shards could be non-serving if traffic has already been switched once. + // When shards are created their IsPrimaryServing attribute is set to true. However, when the traffic is switched + // it is set to false for the shards we are switching from. We don't have a way to know if we have + // switched or not, so we just include non-serving tablets for all reshards. + includeNonServingTablets = true + } err2 = df.forAll(df.targets, func(shard string, target *shardStreamer) error { - tp, err := discovery.NewTabletPicker(ctx, df.ts.TopoServer(), []string{df.targetCell}, df.targetCell, df.ts.TargetKeyspaceName(), shard, df.tabletTypesStr, discovery.TabletPickerOptions{}) + tp, err := discovery.NewTabletPicker(ctx, df.ts.TopoServer(), []string{df.targetCell}, df.targetCell, + df.ts.TargetKeyspaceName(), shard, df.tabletTypesStr, + discovery.TabletPickerOptions{IncludeNonServingTablets: includeNonServingTablets}) if err != nil { return err } @@ -912,8 +924,8 @@ func (df *vdiff) startQueryStreams(ctx context.Context, keyspace string, partici if participant.position.IsZero() { return fmt.Errorf("workflow %s.%s: stream has not started on tablet %s", df.targetKeyspace, df.workflow, participant.primary.Alias.String()) } - log.Infof("WaitForPosition: tablet %s should reach position %s", participant.tablet.Alias.String(), mysql.EncodePosition(participant.position)) - if err := df.ts.TabletManagerClient().WaitForPosition(waitCtx, participant.tablet, mysql.EncodePosition(participant.position)); err != nil { + log.Infof("WaitForPosition: tablet %s should reach position %s", participant.tablet.Alias.String(), replication.EncodePosition(participant.position)) + if err := df.ts.TabletManagerClient().WaitForPosition(waitCtx, participant.tablet, replication.EncodePosition(participant.position)); err != nil { log.Errorf("WaitForPosition error: %s", err) return vterrors.Wrapf(err, "WaitForPosition for tablet %v", topoproto.TabletAliasString(participant.tablet.Alias)) } @@ -1030,7 +1042,7 @@ func (df *vdiff) restartTargets(ctx context.Context) error { // Let's retry a few times if we get a retryable error. for i := 1; i <= 3; i++ { _, err = df.ts.TabletManagerClient().VReplicationExec(ctx, target.primary.Tablet, query) - if err == nil || !mysql.IsEphemeralError(err) { + if err == nil || !sqlerror.IsEphemeralError(err) { break } log.Warningf("Encountered the following error while restarting the %q VReplication workflow on %q, will retry (attempt #%d): %v", @@ -1081,7 +1093,7 @@ func newPrimitiveExecutor(ctx context.Context, prim engine.Primitive) *primitive select { case pe.resultch <- qr: case <-ctx.Done(): - return vterrors.Wrap(ctx.Err(), "Outer Stream") + return vterrors.Wrap(ctx.Err(), "LHS Stream") } return nil }) @@ -1293,10 +1305,9 @@ func (td *tableDiffer) compare(sourceRow, targetRow []sqltypes.Value, cols []com var err error var collationID collations.ID // if the collation is nil or unknown, use binary collation to compare as bytes - if col.collation == nil { + collationID = col.collation + if col.collation == collations.Unknown { collationID = collations.CollationBinaryID - } else { - collationID = col.collation.ID() } c, err = evalengine.NullsafeCompare(sourceRow[compareIndex], targetRow[compareIndex], collationID) if err != nil { diff --git a/go/vt/wrangler/vdiff_env_test.go b/go/vt/wrangler/vdiff_env_test.go index e5996774249..36d4fda1171 100644 --- a/go/vt/wrangler/vdiff_env_test.go +++ b/go/vt/wrangler/vdiff_env_test.go @@ -69,11 +69,11 @@ type testVDiffEnv struct { //---------------------------------------------- // testVDiffEnv -func newTestVDiffEnv(t testing.TB, sourceShards, targetShards []string, query string, positions map[string]string) *testVDiffEnv { +func newTestVDiffEnv(t testing.TB, ctx context.Context, sourceShards, targetShards []string, query string, positions map[string]string) *testVDiffEnv { env := &testVDiffEnv{ workflow: "vdiffTest", tablets: make(map[int]*testVDiffTablet), - topoServ: memorytopo.NewServer("cell"), + topoServ: memorytopo.NewServer(ctx, "cell"), cell: "cell", tabletType: topodatapb.TabletType_REPLICA, tmc: newTestVDiffTMClient(), diff --git a/go/vt/wrangler/vdiff_test.go b/go/vt/wrangler/vdiff_test.go index 9dbdf5e3a9c..ac57c9bcf68 100644 --- a/go/vt/wrangler/vdiff_test.go +++ b/go/vt/wrangler/vdiff_test.go @@ -91,12 +91,12 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "t1", sourceExpression: "select c1, c2 from t1 order by c1 asc", targetExpression: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), true}, {1, collations.Collation(nil), false}}, - comparePKs: []compareColInfo{{0, collations.Collation(nil), true}}, + compareCols: []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, false}}, + comparePKs: []compareColInfo{{0, collations.Unknown, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Collation(nil), true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Collation(nil), true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), }, }, { input: &binlogdatapb.Rule{ @@ -108,12 +108,12 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "t1", sourceExpression: "select c1, c2 from t1 order by c1 asc", targetExpression: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), true}, {1, collations.Collation(nil), false}}, - comparePKs: []compareColInfo{{0, collations.Collation(nil), true}}, + compareCols: []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, false}}, + comparePKs: []compareColInfo{{0, collations.Unknown, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Collation(nil), true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Collation(nil), true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), }, }, { input: &binlogdatapb.Rule{ @@ -125,12 +125,12 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "t1", sourceExpression: "select c1, c2 from t1 order by c1 asc", targetExpression: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), true}, {1, collations.Collation(nil), false}}, - comparePKs: []compareColInfo{{0, collations.Collation(nil), true}}, + compareCols: []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, false}}, + comparePKs: []compareColInfo{{0, collations.Unknown, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Collation(nil), true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Collation(nil), true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), }, }, { input: &binlogdatapb.Rule{ @@ -142,12 +142,12 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "t1", sourceExpression: "select c2, c1 from t1 order by c1 asc", targetExpression: "select c2, c1 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), false}, {1, collations.Collation(nil), true}}, - comparePKs: []compareColInfo{{1, collations.Collation(nil), true}}, + compareCols: []compareColInfo{{0, collations.Unknown, false}, {1, collations.Unknown, true}}, + comparePKs: []compareColInfo{{1, collations.Unknown, true}}, pkCols: []int{1}, selectPks: []int{1}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Collation(nil), true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Collation(nil), true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Unknown, true}}), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Unknown, true}}), }, }, { input: &binlogdatapb.Rule{ @@ -159,12 +159,12 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "t1", sourceExpression: "select c0 as c1, c2 from t2 order by c1 asc", targetExpression: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), true}, {1, collations.Collation(nil), false}}, - comparePKs: []compareColInfo{{0, collations.Collation(nil), true}}, + compareCols: []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, false}}, + comparePKs: []compareColInfo{{0, collations.Unknown, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Collation(nil), true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Collation(nil), true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), }, }, { // non-pk text column. @@ -177,12 +177,12 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "nonpktext", sourceExpression: "select c1, textcol from nonpktext order by c1 asc", targetExpression: "select c1, textcol from nonpktext order by c1 asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), true}, {1, collations.Collation(nil), false}}, - comparePKs: []compareColInfo{{0, collations.Collation(nil), true}}, + compareCols: []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, false}}, + comparePKs: []compareColInfo{{0, collations.Unknown, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Collation(nil), true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Collation(nil), true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), }, }, { // non-pk text column, different order. @@ -195,12 +195,12 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "nonpktext", sourceExpression: "select textcol, c1 from nonpktext order by c1 asc", targetExpression: "select textcol, c1 from nonpktext order by c1 asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), false}, {1, collations.Collation(nil), true}}, - comparePKs: []compareColInfo{{1, collations.Collation(nil), true}}, + compareCols: []compareColInfo{{0, collations.Unknown, false}, {1, collations.Unknown, true}}, + comparePKs: []compareColInfo{{1, collations.Unknown, true}}, pkCols: []int{1}, selectPks: []int{1}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Collation(nil), true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Collation(nil), true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Unknown, true}}), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Unknown, true}}), }, }, { // pk text column. @@ -213,12 +213,12 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "pktext", sourceExpression: "select textcol, c2 from pktext order by textcol asc", targetExpression: "select textcol, c2 from pktext order by textcol asc", - compareCols: []compareColInfo{{0, collations.Default().Get(), true}, {1, collations.Collation(nil), false}}, - comparePKs: []compareColInfo{{0, collations.Default().Get(), true}}, + compareCols: []compareColInfo{{0, collations.Default(), true}, {1, collations.Unknown, false}}, + comparePKs: []compareColInfo{{0, collations.Default(), true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Default().Get(), false}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Default().Get(), false}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Default(), false}}), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Default(), false}}), }, }, { // pk text column, different order. @@ -231,12 +231,12 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "pktext", sourceExpression: "select c2, textcol from pktext order by textcol asc", targetExpression: "select c2, textcol from pktext order by textcol asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), false}, {1, collations.Default().Get(), true}}, - comparePKs: []compareColInfo{{1, collations.Default().Get(), true}}, + compareCols: []compareColInfo{{0, collations.Unknown, false}, {1, collations.Default(), true}}, + comparePKs: []compareColInfo{{1, collations.Default(), true}}, pkCols: []int{1}, selectPks: []int{1}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Default().Get(), false}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Default().Get(), false}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Default(), false}}), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Default(), false}}), }, }, { // text column as expression. @@ -249,12 +249,12 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "pktext", sourceExpression: "select c2, a + b as textcol from pktext order by textcol asc", targetExpression: "select c2, textcol from pktext order by textcol asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), false}, {1, collations.Default().Get(), true}}, - comparePKs: []compareColInfo{{1, collations.Default().Get(), true}}, + compareCols: []compareColInfo{{0, collations.Unknown, false}, {1, collations.Default(), true}}, + comparePKs: []compareColInfo{{1, collations.Default(), true}}, pkCols: []int{1}, selectPks: []int{1}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Default().Get(), false}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Default().Get(), false}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Default(), false}}), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Default(), false}}), }, }, { input: &binlogdatapb.Rule{ @@ -265,12 +265,12 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "multipk", sourceExpression: "select c1, c2 from multipk order by c1 asc, c2 asc", targetExpression: "select c1, c2 from multipk order by c1 asc, c2 asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), true}, {1, collations.Collation(nil), true}}, - comparePKs: []compareColInfo{{0, collations.Collation(nil), true}, {1, collations.Collation(nil), true}}, + compareCols: []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, true}}, + comparePKs: []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, true}}, pkCols: []int{0, 1}, selectPks: []int{0, 1}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Collation(nil), true}, {1, collations.Collation(nil), true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Collation(nil), true}, {1, collations.Collation(nil), true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, true}}), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, true}}), }, }, { // in_keyrange @@ -283,12 +283,12 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "t1", sourceExpression: "select c1, c2 from t1 order by c1 asc", targetExpression: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), true}, {1, collations.Collation(nil), false}}, - comparePKs: []compareColInfo{{0, collations.Collation(nil), true}}, + compareCols: []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, false}}, + comparePKs: []compareColInfo{{0, collations.Unknown, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Collation(nil), true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Collation(nil), true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), }, }, { // in_keyrange on RHS of AND. @@ -302,12 +302,12 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "t1", sourceExpression: "select c1, c2 from t1 where c2 = 2 order by c1 asc", targetExpression: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), true}, {1, collations.Collation(nil), false}}, - comparePKs: []compareColInfo{{0, collations.Collation(nil), true}}, + compareCols: []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, false}}, + comparePKs: []compareColInfo{{0, collations.Unknown, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Collation(nil), true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Collation(nil), true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), }, }, { // in_keyrange on LHS of AND. @@ -321,12 +321,12 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "t1", sourceExpression: "select c1, c2 from t1 where c2 = 2 order by c1 asc", targetExpression: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), true}, {1, collations.Collation(nil), false}}, - comparePKs: []compareColInfo{{0, collations.Collation(nil), true}}, + compareCols: []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, false}}, + comparePKs: []compareColInfo{{0, collations.Unknown, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Collation(nil), true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Collation(nil), true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), }, }, { // in_keyrange on cascaded AND expression @@ -340,12 +340,12 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "t1", sourceExpression: "select c1, c2 from t1 where c2 = 2 and c1 = 1 order by c1 asc", targetExpression: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), true}, {1, collations.Collation(nil), false}}, - comparePKs: []compareColInfo{{0, collations.Collation(nil), true}}, + compareCols: []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, false}}, + comparePKs: []compareColInfo{{0, collations.Unknown, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Collation(nil), true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Collation(nil), true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), }, }, { // in_keyrange parenthesized @@ -359,12 +359,12 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "t1", sourceExpression: "select c1, c2 from t1 where c2 = 2 order by c1 asc", targetExpression: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), true}, {1, collations.Collation(nil), false}}, - comparePKs: []compareColInfo{{0, collations.Collation(nil), true}}, + compareCols: []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, false}}, + comparePKs: []compareColInfo{{0, collations.Unknown, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Collation(nil), true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Collation(nil), true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), }, }, { // group by @@ -377,12 +377,12 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "t1", sourceExpression: "select c1, c2 from t1 group by c1 order by c1 asc", targetExpression: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), true}, {1, collations.Collation(nil), false}}, - comparePKs: []compareColInfo{{0, collations.Collation(nil), true}}, + compareCols: []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, false}}, + comparePKs: []compareColInfo{{0, collations.Unknown, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Collation(nil), true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Collation(nil), true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), }, }, { // aggregations @@ -395,22 +395,19 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "aggr", sourceExpression: "select c1, c2, count(*) as c3, sum(c4) as c4 from t1 group by c1 order by c1 asc", targetExpression: "select c1, c2, c3, c4 from aggr order by c1 asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), true}, {1, collations.Collation(nil), false}, {2, collations.Collation(nil), false}, {3, collations.Collation(nil), false}}, - comparePKs: []compareColInfo{{0, collations.Collation(nil), true}}, + compareCols: []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, false}, {2, collations.Unknown, false}, {3, collations.Unknown, false}}, + comparePKs: []compareColInfo{{0, collations.Unknown, true}}, pkCols: []int{0}, selectPks: []int{0}, sourcePrimitive: &engine.OrderedAggregate{ - Aggregates: []*engine.AggregateParams{{ - Opcode: opcode.AggregateSum, - Col: 2, - }, { - Opcode: opcode.AggregateSum, - Col: 3, - }}, - GroupByKeys: []*engine.GroupByParams{{KeyCol: 0, WeightStringCol: -1, Type: sqltypes.Unknown, CollationID: collations.Unknown}}, - Input: newMergeSorter(nil, []compareColInfo{{0, collations.Collation(nil), true}}), + Aggregates: []*engine.AggregateParams{ + engine.NewAggregateParam(opcode.AggregateSum, 2, ""), + engine.NewAggregateParam(opcode.AggregateSum, 3, ""), + }, + GroupByKeys: []*engine.GroupByParams{{KeyCol: 0, WeightStringCol: -1, Type: sqltypes.Unknown}}, + Input: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), }, - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Collation(nil), true}}), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), }, }, { input: &binlogdatapb.Rule{ @@ -422,12 +419,12 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "datze", sourceExpression: "select id, dt from datze order by id asc", targetExpression: "select id, convert_tz(dt, 'UTC', 'US/Pacific') as dt from datze order by id asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), true}, {1, collations.Collation(nil), false}}, - comparePKs: []compareColInfo{{0, collations.Collation(nil), true}}, + compareCols: []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, false}}, + comparePKs: []compareColInfo{{0, collations.Unknown, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Collation(nil), true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Collation(nil), true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), }, }} @@ -496,7 +493,9 @@ func TestVDiffPlanFailure(t *testing.T) { } func TestVDiffUnsharded(t *testing.T) { - env := newTestVDiffEnv(t, []string{"0"}, []string{"0"}, "", nil) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestVDiffEnv(t, ctx, []string{"0"}, []string{"0"}, "", nil) defer env.close() schm := &tabletmanagerdatapb.SchemaDefinition{ @@ -774,7 +773,9 @@ func TestVDiffUnsharded(t *testing.T) { func TestVDiffSharded(t *testing.T) { // Also test that highest position ""MariaDB/5-456-892" will be used // if lower positions are found. - env := newTestVDiffEnv(t, []string{"-40", "40-"}, []string{"-80", "80-"}, "", map[string]string{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestVDiffEnv(t, ctx, []string{"-40", "40-"}, []string{"-80", "80-"}, "", map[string]string{ "-40-80": "MariaDB/5-456-890", "40-80-": "MariaDB/5-456-891", }) @@ -847,7 +848,9 @@ func TestVDiffSharded(t *testing.T) { } func TestVDiffAggregates(t *testing.T) { - env := newTestVDiffEnv(t, []string{"-40", "40-"}, []string{"-80", "80-"}, "select c1, count(*) c2, sum(c3) c3 from t group by c1", nil) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestVDiffEnv(t, ctx, []string{"-40", "40-"}, []string{"-80", "80-"}, "select c1, count(*) c2, sum(c3) c3 from t group by c1", nil) defer env.close() schm := &tabletmanagerdatapb.SchemaDefinition{ @@ -915,7 +918,9 @@ func TestVDiffAggregates(t *testing.T) { } func TestVDiffDefaults(t *testing.T) { - env := newTestVDiffEnv(t, []string{"0"}, []string{"0"}, "", nil) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestVDiffEnv(t, ctx, []string{"0"}, []string{"0"}, "", nil) defer env.close() schm := &tabletmanagerdatapb.SchemaDefinition{ @@ -969,7 +974,9 @@ func TestVDiffDefaults(t *testing.T) { } func TestVDiffReplicationWait(t *testing.T) { - env := newTestVDiffEnv(t, []string{"0"}, []string{"0"}, "", nil) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestVDiffEnv(t, ctx, []string{"0"}, []string{"0"}, "", nil) defer env.close() schm := &tabletmanagerdatapb.SchemaDefinition{ @@ -1029,13 +1036,13 @@ func TestVDiffFindPKs(t *testing.T) { }, }, tdIn: &tableDiffer{ - compareCols: []compareColInfo{{0, collations.Collation(nil), false}, {1, collations.Collation(nil), false}}, + compareCols: []compareColInfo{{0, collations.Unknown, false}, {1, collations.Unknown, false}}, comparePKs: []compareColInfo{}, pkCols: []int{}, }, tdOut: &tableDiffer{ - compareCols: []compareColInfo{{0, collations.Collation(nil), true}, {1, collations.Collation(nil), false}}, - comparePKs: []compareColInfo{{0, collations.Collation(nil), true}}, + compareCols: []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, false}}, + comparePKs: []compareColInfo{{0, collations.Unknown, true}}, pkCols: []int{0}, selectPks: []int{0}, }, @@ -1057,13 +1064,13 @@ func TestVDiffFindPKs(t *testing.T) { }, }, tdIn: &tableDiffer{ - compareCols: []compareColInfo{{0, collations.Collation(nil), false}, {1, collations.Collation(nil), false}, {2, collations.Collation(nil), false}, {3, collations.Collation(nil), false}}, + compareCols: []compareColInfo{{0, collations.Unknown, false}, {1, collations.Unknown, false}, {2, collations.Unknown, false}, {3, collations.Unknown, false}}, comparePKs: []compareColInfo{}, pkCols: []int{}, }, tdOut: &tableDiffer{ - compareCols: []compareColInfo{{0, collations.Collation(nil), true}, {1, collations.Collation(nil), false}, {2, collations.Collation(nil), false}, {3, collations.Collation(nil), true}}, - comparePKs: []compareColInfo{{0, collations.Collation(nil), true}, {3, collations.Collation(nil), true}}, + compareCols: []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, false}, {2, collations.Unknown, false}, {3, collations.Unknown, true}}, + comparePKs: []compareColInfo{{0, collations.Unknown, true}, {3, collations.Unknown, true}}, pkCols: []int{0, 3}, selectPks: []int{0, 3}, }, @@ -1136,7 +1143,7 @@ func TestGetColumnCollations(t *testing.T) { tests := []struct { name string table *tabletmanagerdatapb.TableDefinition - want map[string]collations.Collation + want map[string]collations.ID wantErr bool }{ { @@ -1151,9 +1158,9 @@ func TestGetColumnCollations(t *testing.T) { table: &tabletmanagerdatapb.TableDefinition{ Schema: "create table t1 (c1 int, name varchar(10), primary key(c1))", }, - want: map[string]collations.Collation{ - "c1": collations.Collation(nil), - "name": collations.Default().Get(), + want: map[string]collations.ID{ + "c1": collations.Unknown, + "name": collations.Default(), }, }, { @@ -1161,9 +1168,9 @@ func TestGetColumnCollations(t *testing.T) { table: &tabletmanagerdatapb.TableDefinition{ Schema: "create table t1 (c1 varchar(10), name varchar(10), primary key(c1))", }, - want: map[string]collations.Collation{ - "c1": collations.Default().Get(), - "name": collations.Default().Get(), + want: map[string]collations.ID{ + "c1": collations.Default(), + "name": collations.Default(), }, }, { @@ -1171,9 +1178,9 @@ func TestGetColumnCollations(t *testing.T) { table: &tabletmanagerdatapb.TableDefinition{ Schema: "create table t1 (c1 int, name varchar(10), primary key(c1, name))", }, - want: map[string]collations.Collation{ - "c1": collations.Collation(nil), - "name": collations.Default().Get(), + want: map[string]collations.ID{ + "c1": collations.Unknown, + "name": collations.Default(), }, }, { @@ -1181,7 +1188,7 @@ func TestGetColumnCollations(t *testing.T) { table: &tabletmanagerdatapb.TableDefinition{ Schema: "create table t1 (c1 varchar(10), name varchar(10), primary key(c1)) default character set ucs2", }, - want: map[string]collations.Collation{ + want: map[string]collations.ID{ "c1": collationEnv.DefaultCollationForCharset("ucs2"), "name": collationEnv.DefaultCollationForCharset("ucs2"), }, @@ -1191,7 +1198,7 @@ func TestGetColumnCollations(t *testing.T) { table: &tabletmanagerdatapb.TableDefinition{ Schema: "create table t1 (c1 varchar(10), name varchar(10), primary key(c1)) charset=utf32 collate=utf32_icelandic_ci", }, - want: map[string]collations.Collation{ + want: map[string]collations.ID{ "c1": collationEnv.LookupByName("utf32_icelandic_ci"), "name": collationEnv.LookupByName("utf32_icelandic_ci"), }, @@ -1201,7 +1208,7 @@ func TestGetColumnCollations(t *testing.T) { table: &tabletmanagerdatapb.TableDefinition{ Schema: "create table t1 (c1 varchar(10) charset sjis, name varchar(10), primary key(c1)) character set=utf8", }, - want: map[string]collations.Collation{ + want: map[string]collations.ID{ "c1": collationEnv.DefaultCollationForCharset("sjis"), "name": collationEnv.DefaultCollationForCharset("utf8mb3"), }, @@ -1211,7 +1218,7 @@ func TestGetColumnCollations(t *testing.T) { table: &tabletmanagerdatapb.TableDefinition{ Schema: "create table t1 (c1 varchar(10) collate hebrew_bin, name varchar(10), primary key(c1)) charset=hebrew", }, - want: map[string]collations.Collation{ + want: map[string]collations.ID{ "c1": collationEnv.LookupByName("hebrew_bin"), "name": collationEnv.DefaultCollationForCharset("hebrew"), }, @@ -1221,9 +1228,9 @@ func TestGetColumnCollations(t *testing.T) { table: &tabletmanagerdatapb.TableDefinition{ Schema: "create table t1 (c1 varchar(10) collate utf16_turkish_ci, c2 int, name varchar(10), primary key(c1, c2)) charset=utf16 collate=utf16_icelandic_ci", }, - want: map[string]collations.Collation{ + want: map[string]collations.ID{ "c1": collationEnv.LookupByName("utf16_turkish_ci"), - "c2": collations.Collation(nil), + "c2": collations.Unknown, "name": collationEnv.LookupByName("utf16_icelandic_ci"), }, }, diff --git a/go/vt/wrangler/version.go b/go/vt/wrangler/version.go index be0bd019331..c93b3c5705a 100644 --- a/go/vt/wrangler/version.go +++ b/go/vt/wrangler/version.go @@ -17,10 +17,7 @@ limitations under the License. package wrangler import ( - "encoding/json" "fmt" - "io" - "net/http" "context" @@ -31,42 +28,6 @@ import ( vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" ) -var getVersionFromTabletDebugVars = func(tabletAddr string) (string, error) { - resp, err := http.Get("http://" + tabletAddr + "/debug/vars") - if err != nil { - return "", err - } - defer resp.Body.Close() - body, err := io.ReadAll(resp.Body) - if err != nil { - return "", err - } - - var vars struct { - BuildHost string - BuildUser string - BuildTimestamp int64 - BuildGitRev string - } - err = json.Unmarshal(body, &vars) - if err != nil { - return "", err - } - - version := fmt.Sprintf("%v", vars) - return version, nil -} - -var getVersionFromTablet = getVersionFromTabletDebugVars - -// ResetDebugVarsGetVersion is used by tests to reset the -// getVersionFromTablet variable to the default one. That way we can -// run the unit tests in testlib/ even when another implementation of -// getVersionFromTablet is used. -func ResetDebugVarsGetVersion() { - getVersionFromTablet = getVersionFromTabletDebugVars -} - // GetVersion returns the version string from a tablet func (wr *Wrangler) GetVersion(ctx context.Context, tabletAlias *topodatapb.TabletAlias) (string, error) { resp, err := wr.VtctldServer().GetVersion(ctx, &vtctldatapb.GetVersionRequest{ diff --git a/go/vt/wrangler/vexec.go b/go/vt/wrangler/vexec.go index 75529a11044..0734fa7b593 100644 --- a/go/vt/wrangler/vexec.go +++ b/go/vt/wrangler/vexec.go @@ -26,25 +26,26 @@ import ( "sync" "time" - "vitess.io/vitess/go/textutil" - "vitess.io/vitess/go/vt/log" - workflow2 "vitess.io/vitess/go/vt/vtctl/workflow" - "google.golang.org/protobuf/encoding/prototext" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sets" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/textutil" "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/concurrency" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vterrors" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/topo" + workflow2 "vitess.io/vitess/go/vt/vtctl/workflow" vtctldvexec "vitess.io/vitess/go/vt/vtctl/workflow/vexec" // renamed to avoid a collision with the vexec struct in this package - "vitess.io/vitess/go/vt/vterrors" ) const ( @@ -358,6 +359,9 @@ func (wr *Wrangler) WorkflowAction(ctx context.Context, workflow, keyspace, acti default: } results, err := wr.execWorkflowAction(ctx, workflow, keyspace, action, dryRun, rpcReq) + if err != nil { + return nil, err + } if len(results) == 0 && !dryRun { // Dry runs produce no actual tablet results return nil, fmt.Errorf("the %s workflow does not exist in the %s keyspace", workflow, keyspace) } @@ -369,9 +373,9 @@ func (wr *Wrangler) getWorkflowActionQuery(action string) (string, error) { updateSQL := "update _vt.vreplication set state = %s" switch action { case "stop": - query = fmt.Sprintf(updateSQL, encodeString("Stopped")) + query = fmt.Sprintf(updateSQL, encodeString(binlogdatapb.VReplicationWorkflowState_Stopped.String())) case "start": - query = fmt.Sprintf(updateSQL, encodeString("Running")) + query = fmt.Sprintf(updateSQL, encodeString(binlogdatapb.VReplicationWorkflowState_Running.String())) case "update": // We don't use the SQL interface, so there's no query // and no error. @@ -383,14 +387,43 @@ func (wr *Wrangler) getWorkflowActionQuery(action string) (string, error) { return query, nil } +// canRestartWorkflow validates that, for an atomic copy workflow, none of the streams are still in the copy phase. +// Since we copy all tables in a single snapshot, we cannot restart a workflow which broke before all tables were copied. +func (wr *Wrangler) canRestartWorkflow(ctx context.Context, workflow, keyspace string) error { + res, err := wr.ShowWorkflow(ctx, workflow, keyspace) + if err != nil { + return err + } + for _, shardStatus := range res.ShardStatuses { + if len(shardStatus.PrimaryReplicationStatuses) == 0 { + return fmt.Errorf("no replication streams found for workflow %s", workflow) + } + status := shardStatus.PrimaryReplicationStatuses[0] + + if status.WorkflowSubType == binlogdatapb.VReplicationWorkflowSubType_AtomicCopy.String() && + status.RowsCopied > 0 && len(status.CopyState) > 0 { + return fmt.Errorf("cannot restart an atomic copy workflow which previously stopped in the Copying phase") + } + break // We only need to check one shard + } + return nil +} + func (wr *Wrangler) execWorkflowAction(ctx context.Context, workflow, keyspace, action string, dryRun bool, rpcReq any) (map[*topo.TabletInfo]*querypb.QueryResult, error) { var callback func(context.Context, *topo.TabletInfo) (*querypb.QueryResult, error) = nil query, err := wr.getWorkflowActionQuery(action) if err != nil { return nil, err } - if action == "update" { - rpcReq, ok := rpcReq.(*tabletmanagerdatapb.UpdateVRWorkflowRequest) + + switch action { + case "start": + err = wr.canRestartWorkflow(ctx, workflow, keyspace) + if err != nil { + return nil, err + } + case "update": + rpcReq, ok := rpcReq.(*tabletmanagerdatapb.UpdateVReplicationWorkflowRequest) if !ok { return nil, fmt.Errorf("invalid RPC request: %+v", rpcReq) } @@ -406,7 +439,7 @@ func (wr *Wrangler) execWorkflowAction(ctx context.Context, workflow, keyspace, } if !textutil.ValueIsSimulatedNull(rpcReq.TabletTypes) { changes = true - dryRunChanges.WriteString(fmt.Sprintf(" tablet_types=%q\n", strings.Join(rpcReq.TabletTypes, ","))) + dryRunChanges.WriteString(fmt.Sprintf(" tablet_types=%q\n", topoproto.MakeStringTypeCSV(rpcReq.TabletTypes))) } if !textutil.ValueIsSimulatedNull(rpcReq.OnDdl) { changes = true @@ -433,7 +466,7 @@ func (wr *Wrangler) execWorkflowAction(ctx context.Context, workflow, keyspace, return nil, nil } else { callback = func(ctx context.Context, tablet *topo.TabletInfo) (*querypb.QueryResult, error) { - res, err := wr.tmc.UpdateVRWorkflow(ctx, tablet.Tablet, rpcReq) + res, err := wr.tmc.UpdateVReplicationWorkflow(ctx, tablet.Tablet, rpcReq) if err != nil { return nil, err } @@ -441,6 +474,7 @@ func (wr *Wrangler) execWorkflowAction(ctx context.Context, workflow, keyspace, } } } + return wr.runVexec(ctx, workflow, keyspace, query, callback, dryRun) } @@ -552,11 +586,12 @@ func (wr *Wrangler) getReplicationStatusFromRow(ctx context.Context, row sqltype var err error var id int32 var timeUpdated, transactionTimestamp, timeHeartbeat, timeThrottled int64 - var state, dbName, pos, stopPos, message, tags, componentThrottled string + var dbName, pos, stopPos, message, tags, componentThrottled string + var state string var workflowType, workflowSubType int32 var deferSecondaryKeys bool var bls binlogdatapb.BinlogSource - var mpos mysql.Position + var mpos replication.Position var rowsCopied int64 id, err = row.ToInt32("id") if err != nil { @@ -658,7 +693,7 @@ func (wr *Wrangler) getReplicationStatusFromRow(ctx context.Context, row sqltype return nil, "", err } - status.State = updateState(message, status.State, status.CopyState, timeUpdated) + status.State = updateState(message, binlogdatapb.VReplicationWorkflowState(binlogdatapb.VReplicationWorkflowState_value[state]), status.CopyState, timeUpdated) return status, bls.Keyspace, nil } @@ -754,7 +789,7 @@ func (wr *Wrangler) getStreams(ctx context.Context, workflow, keyspace string) ( // All timestamps are in seconds since epoch lastTransactionTimestamp := status.TransactionTimestamp lastHeartbeatTime := status.TimeHeartbeat - if status.State == "Copying" { + if status.State == binlogdatapb.VReplicationWorkflowState_Copying.String() { rsr.MaxVReplicationTransactionLag = math.MaxInt64 } else { if lastTransactionTimestamp == 0 /* no new events after copy */ || @@ -838,15 +873,15 @@ func (wr *Wrangler) ShowWorkflow(ctx context.Context, workflow, keyspace string) return replStatus, nil } -func updateState(message, state string, cs []copyState, timeUpdated int64) string { +func updateState(message string, state binlogdatapb.VReplicationWorkflowState, cs []copyState, timeUpdated int64) string { if strings.Contains(strings.ToLower(message), "error") { - state = "Error" - } else if state == "Running" && len(cs) > 0 { - state = "Copying" - } else if state == "Running" && int64(time.Now().Second())-timeUpdated > 10 /* seconds */ { - state = "Lagging" + state = binlogdatapb.VReplicationWorkflowState_Error + } else if state == binlogdatapb.VReplicationWorkflowState_Running && len(cs) > 0 { + state = binlogdatapb.VReplicationWorkflowState_Copying + } else if state == binlogdatapb.VReplicationWorkflowState_Running && int64(time.Now().Second())-timeUpdated > 10 /* seconds */ { + state = binlogdatapb.VReplicationWorkflowState_Lagging } - return state + return state.String() } func dumpStreamListAsJSON(replStatus *ReplicationStatusResult, wr *Wrangler) error { diff --git a/go/vt/wrangler/vexec_test.go b/go/vt/wrangler/vexec_test.go index 6dde5825c3f..3aa5ed3ddb3 100644 --- a/go/vt/wrangler/vexec_test.go +++ b/go/vt/wrangler/vexec_test.go @@ -34,14 +34,16 @@ import ( "vitess.io/vitess/go/vt/logutil" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) func TestVExec(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() workflow := "wrWorkflow" keyspace := "target" query := "update _vt.vreplication set state = 'Running'" - env := newWranglerTestEnv(t, []string{"0"}, []string{"-80", "80-"}, "", nil, time.Now().Unix()) + env := newWranglerTestEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}, "", nil, time.Now().Unix()) defer env.close() var logger = logutil.NewMemoryLogger() wr := New(logger, env.topoServ, env.tmc) @@ -174,18 +176,19 @@ func TestVExec(t *testing.T) { } func TestWorkflowStatusUpdate(t *testing.T) { - require.Equal(t, "Running", updateState("for vdiff", "Running", nil, int64(time.Now().Second()))) - require.Equal(t, "Running", updateState("", "Running", nil, int64(time.Now().Second()))) - require.Equal(t, "Lagging", updateState("", "Running", nil, int64(time.Now().Second())-100)) - require.Equal(t, "Copying", updateState("", "Running", []copyState{{Table: "t1", LastPK: "[[INT64(10)]]"}}, int64(time.Now().Second()))) - require.Equal(t, "Error", updateState("error: primary tablet not contactable", "Running", nil, 0)) + require.Equal(t, binlogdatapb.VReplicationWorkflowState_Running.String(), updateState("for vdiff", binlogdatapb.VReplicationWorkflowState_Running, nil, int64(time.Now().Second()))) + require.Equal(t, binlogdatapb.VReplicationWorkflowState_Running.String(), updateState("", binlogdatapb.VReplicationWorkflowState_Running, nil, int64(time.Now().Second()))) + require.Equal(t, binlogdatapb.VReplicationWorkflowState_Lagging.String(), updateState("", binlogdatapb.VReplicationWorkflowState_Running, nil, int64(time.Now().Second())-100)) + require.Equal(t, binlogdatapb.VReplicationWorkflowState_Copying.String(), updateState("", binlogdatapb.VReplicationWorkflowState_Running, []copyState{{Table: "t1", LastPK: "[[INT64(10)]]"}}, int64(time.Now().Second()))) + require.Equal(t, binlogdatapb.VReplicationWorkflowState_Error.String(), updateState("error: primary tablet not contactable", binlogdatapb.VReplicationWorkflowState_Running, nil, 0)) } func TestWorkflowListStreams(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() workflow := "wrWorkflow" keyspace := "target" - env := newWranglerTestEnv(t, []string{"0"}, []string{"-80", "80-"}, "", nil, 1234) + env := newWranglerTestEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}, "", nil, 1234) defer env.close() logger := logutil.NewMemoryLogger() wr := New(logger, env.topoServ, env.tmc) @@ -357,10 +360,11 @@ will be run on the following streams in keyspace target for workflow wrWorkflow: } func TestWorkflowListAll(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() keyspace := "target" workflow := "wrWorkflow" - env := newWranglerTestEnv(t, []string{"0"}, []string{"-80", "80-"}, "", nil, 0) + env := newWranglerTestEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}, "", nil, 0) defer env.close() logger := logutil.NewMemoryLogger() wr := New(logger, env.topoServ, env.tmc) @@ -376,11 +380,12 @@ func TestWorkflowListAll(t *testing.T) { } func TestVExecValidations(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() workflow := "wf" keyspace := "ks" query := "" - env := newWranglerTestEnv(t, []string{"0"}, []string{"-80", "80-"}, "", nil, 0) + env := newWranglerTestEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}, "", nil, 0) defer env.close() wr := New(logutil.NewConsoleLogger(), env.topoServ, env.tmc) @@ -427,12 +432,12 @@ func TestVExecValidations(t *testing.T) { actions := []action{ { name: "start", - want: fmt.Sprintf(updateSQL, encodeString("Running")), + want: fmt.Sprintf(updateSQL, encodeString(binlogdatapb.VReplicationWorkflowState_Running.String())), expectedError: nil, }, { name: "stop", - want: fmt.Sprintf(updateSQL, encodeString("Stopped")), + want: fmt.Sprintf(updateSQL, encodeString(binlogdatapb.VReplicationWorkflowState_Stopped.String())), expectedError: nil, }, { @@ -462,10 +467,11 @@ func TestVExecValidations(t *testing.T) { // tabletmanager and the behavior is tested // there. func TestWorkflowUpdate(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() workflow := "wrWorkflow" keyspace := "target" - env := newWranglerTestEnv(t, []string{"0"}, []string{"-80", "80-"}, "", nil, 1234) + env := newWranglerTestEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}, "", nil, 1234) defer env.close() logger := logutil.NewMemoryLogger() wr := New(logger, env.topoServ, env.tmc) @@ -474,7 +480,7 @@ func TestWorkflowUpdate(t *testing.T) { tests := []struct { name string cells []string - tabletTypes []string + tabletTypes []topodatapb.TabletType onDDL binlogdatapb.OnDDLAction output string wantErr string @@ -482,35 +488,35 @@ func TestWorkflowUpdate(t *testing.T) { { name: "no flags", cells: nullSlice, - tabletTypes: nullSlice, + tabletTypes: []topodatapb.TabletType{topodatapb.TabletType(textutil.SimulatedNullInt)}, onDDL: nullOnDDL, wantErr: "no updates were provided; use --cells, --tablet-types, or --on-ddl to specify new values", }, { name: "only cells", cells: []string{"zone1"}, - tabletTypes: nullSlice, + tabletTypes: []topodatapb.TabletType{topodatapb.TabletType(textutil.SimulatedNullInt)}, onDDL: nullOnDDL, output: "The following workflow fields will be updated:\n cells=\"zone1\"\nOn the following tablets in the target keyspace for workflow wrWorkflow:\n zone1-0000000200 (target/-80)\n zone1-0000000210 (target/80-)\n", }, { name: "only tablet types", cells: nullSlice, - tabletTypes: []string{"primary", "replica"}, + tabletTypes: []topodatapb.TabletType{topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA}, onDDL: nullOnDDL, output: "The following workflow fields will be updated:\n tablet_types=\"primary,replica\"\nOn the following tablets in the target keyspace for workflow wrWorkflow:\n zone1-0000000200 (target/-80)\n zone1-0000000210 (target/80-)\n", }, { name: "only on-ddl", cells: nullSlice, - tabletTypes: nullSlice, + tabletTypes: []topodatapb.TabletType{topodatapb.TabletType(textutil.SimulatedNullInt)}, onDDL: binlogdatapb.OnDDLAction_EXEC_IGNORE, output: "The following workflow fields will be updated:\n on_ddl=\"EXEC_IGNORE\"\nOn the following tablets in the target keyspace for workflow wrWorkflow:\n zone1-0000000200 (target/-80)\n zone1-0000000210 (target/80-)\n", }, { name: "all flags", cells: []string{"zone1", "zone2"}, - tabletTypes: []string{"rdonly", "spare"}, + tabletTypes: []topodatapb.TabletType{topodatapb.TabletType_RDONLY, topodatapb.TabletType_SPARE}, onDDL: binlogdatapb.OnDDLAction_EXEC, output: "The following workflow fields will be updated:\n cells=\"zone1,zone2\"\n tablet_types=\"rdonly,spare\"\n on_ddl=\"EXEC\"\nOn the following tablets in the target keyspace for workflow wrWorkflow:\n zone1-0000000200 (target/-80)\n zone1-0000000210 (target/80-)\n", }, @@ -518,7 +524,7 @@ func TestWorkflowUpdate(t *testing.T) { for _, tcase := range tests { t.Run(tcase.name, func(t *testing.T) { - rpcReq := &tabletmanagerdatapb.UpdateVRWorkflowRequest{ + rpcReq := &tabletmanagerdatapb.UpdateVReplicationWorkflowRequest{ Cells: tcase.cells, TabletTypes: tcase.tabletTypes, OnDdl: tcase.onDDL, diff --git a/go/vt/wrangler/workflow.go b/go/vt/wrangler/workflow.go index 0cbe1dc2062..d9dbcee7291 100644 --- a/go/vt/wrangler/workflow.go +++ b/go/vt/wrangler/workflow.go @@ -8,15 +8,16 @@ import ( "sync" "time" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topotools" "vitess.io/vitess/go/vt/vtctl/workflow" - "vitess.io/vitess/go/vt/vtgate/evalengine" + vdiff2 "vitess.io/vitess/go/vt/vttablet/tabletmanager/vdiff" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) @@ -56,10 +57,12 @@ type VReplicationWorkflowParams struct { OnDDL string // MoveTables/Migrate specific - SourceKeyspace, Tables string - AllTables, RenameTables bool - SourceTimeZone string - DropForeignKeys bool + SourceKeyspace, Tables string + AllTables, RenameTables bool + SourceTimeZone string + DropForeignKeys bool + InitializeTargetSequences bool + AtomicCopy bool // Reshard specific SourceShards, TargetShards []string @@ -71,6 +74,9 @@ type VReplicationWorkflowParams struct { // Migrate specific ExternalCluster string + + // MoveTables only + NoRoutingRules bool } // VReplicationWorkflow stores various internal objects for a workflow @@ -267,7 +273,7 @@ func (vrw *VReplicationWorkflow) GetStreamCount() (int64, int64, []*WorkflowErro if st.Pos == "" { continue } - if st.State == "Running" || st.State == "Copying" { + if st.State == binlogdatapb.VReplicationWorkflowState_Running.String() || st.State == binlogdatapb.VReplicationWorkflowState_Copying.String() { started++ } } @@ -433,7 +439,8 @@ func (vrw *VReplicationWorkflow) initMoveTables() error { return vrw.wr.MoveTables(vrw.ctx, vrw.params.Workflow, vrw.params.SourceKeyspace, vrw.params.TargetKeyspace, vrw.params.Tables, vrw.params.Cells, vrw.params.TabletTypes, vrw.params.AllTables, vrw.params.ExcludeTables, vrw.params.AutoStart, vrw.params.StopAfterCopy, vrw.params.ExternalCluster, vrw.params.DropForeignKeys, - vrw.params.DeferSecondaryKeys, vrw.params.SourceTimeZone, vrw.params.OnDDL, vrw.params.SourceShards) + vrw.params.DeferSecondaryKeys, vrw.params.SourceTimeZone, vrw.params.OnDDL, vrw.params.SourceShards, + vrw.params.NoRoutingRules, vrw.params.AtomicCopy) } func (vrw *VReplicationWorkflow) initReshard() error { @@ -477,7 +484,8 @@ func (vrw *VReplicationWorkflow) switchWrites() (*[]string, error) { log.Infof("In VReplicationWorkflow.switchWrites(reverse) for %+v", vrw) } journalID, dryRunResults, err = vrw.wr.SwitchWrites(vrw.ctx, vrw.params.TargetKeyspace, vrw.params.Workflow, vrw.params.Timeout, - false, vrw.params.Direction == workflow.DirectionBackward, vrw.params.EnableReverseReplication, vrw.params.DryRun) + false, vrw.params.Direction == workflow.DirectionBackward, vrw.params.EnableReverseReplication, vrw.params.DryRun, + vrw.params.InitializeTargetSequences) if err != nil { return nil, err } @@ -525,9 +533,9 @@ func (vrw *VReplicationWorkflow) canSwitch(keyspace, workflowName string) (reaso statuses := result.ShardStatuses[ksShard].PrimaryReplicationStatuses for _, st := range statuses { switch st.State { - case "Copying": + case binlogdatapb.VReplicationWorkflowState_Copying.String(): return cannotSwitchCopyIncomplete, nil - case "Error": + case binlogdatapb.VReplicationWorkflowState_Error.String(): return cannotSwitchError, nil } } @@ -636,11 +644,11 @@ func (vrw *VReplicationWorkflow) GetCopyProgress() (*CopyProgress, error) { qr := sqltypes.Proto3ToResult(p3qr) for i := 0; i < len(qr.Rows); i++ { table := qr.Rows[i][0].ToString() - rowCount, err := evalengine.ToInt64(qr.Rows[i][1]) + rowCount, err := qr.Rows[i][1].ToCastInt64() if err != nil { return err } - tableSize, err := evalengine.ToInt64(qr.Rows[i][2]) + tableSize, err := qr.Rows[i][2].ToCastInt64() if err != nil { return err } @@ -703,20 +711,16 @@ func (vrw *VReplicationWorkflow) GetCopyProgress() (*CopyProgress, error) { // region Workflow related utility functions -// deleteWorkflowVDiffData cleans up any potential VDiff related data associated with the workflow on the given tablet +// deleteWorkflowVDiffData cleans up any potential VDiff related data associated +// with the workflow on the given tablet. func (wr *Wrangler) deleteWorkflowVDiffData(ctx context.Context, tablet *topodatapb.Tablet, workflow string) { - sqlDeleteVDiffs := `delete from vd, vdt, vdl using _vt.vdiff as vd inner join _vt.vdiff_table as vdt on (vd.id = vdt.vdiff_id) - inner join _vt.vdiff_log as vdl on (vd.id = vdl.vdiff_id) - where vd.keyspace = %s and vd.workflow = %s` - query := fmt.Sprintf(sqlDeleteVDiffs, encodeString(tablet.Keyspace), encodeString(workflow)) - rows := -1 - if _, err := wr.tmc.ExecuteFetchAsDba(ctx, tablet, false, &tabletmanagerdatapb.ExecuteFetchAsDbaRequest{ - Query: []byte(query), - MaxRows: uint64(rows), + if _, err := wr.tmc.VDiff(ctx, tablet, &tabletmanagerdatapb.VDiffRequest{ + Keyspace: tablet.Keyspace, + Workflow: workflow, + Action: string(vdiff2.DeleteAction), + ActionArg: vdiff2.AllActionArg, }); err != nil { - if sqlErr, ok := err.(*mysql.SQLError); ok && sqlErr.Num != mysql.ERNoSuchTable { // the tables may not exist if no vdiffs have been run - wr.Logger().Errorf("Error deleting vdiff data for %s.%s workflow: %v", tablet.Keyspace, workflow, err) - } + log.Errorf("Error deleting vdiff data for %s.%s workflow: %v", tablet.Keyspace, workflow, err) } } @@ -754,7 +758,7 @@ func (wr *Wrangler) optimizeCopyStateTable(tablet *topodatapb.Tablet) { Query: []byte(sqlOptimizeTable), MaxRows: uint64(100), // always produces 1+rows with notes and status }); err != nil { - if sqlErr, ok := err.(*mysql.SQLError); ok && sqlErr.Num == mysql.ERNoSuchTable { // the table may not exist + if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Num == sqlerror.ERNoSuchTable { // the table may not exist return } log.Warningf("Failed to optimize the copy_state table on %q: %v", tablet.Alias.String(), err) diff --git a/go/vt/wrangler/workflow_test.go b/go/vt/wrangler/workflow_test.go index 12c6fe3a2f7..bf068edf5ae 100644 --- a/go/vt/wrangler/workflow_test.go +++ b/go/vt/wrangler/workflow_test.go @@ -31,6 +31,7 @@ import ( "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/proto/vschema" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vtctl/workflow" @@ -49,7 +50,7 @@ func getMoveTablesWorkflow(t *testing.T, cells, tabletTypes string) *VReplicatio Cells: cells, TabletTypes: tabletTypes, MaxAllowedTransactionLagSeconds: defaultMaxAllowedTransactionLagSeconds, - OnDDL: binlogdatapb.OnDDLAction_name[int32(binlogdatapb.OnDDLAction_EXEC)], + OnDDL: binlogdatapb.OnDDLAction_EXEC.String(), } mtwf := &VReplicationWorkflow{ workflowType: MoveTablesWorkflow, @@ -152,9 +153,9 @@ func TestCanSwitch(t *testing.T) { } testCases := []testCase{ - {"In Copy Phase", "Copying", 0, 0, regexp.MustCompile(cannotSwitchCopyIncomplete)}, - {"High Lag", "Running", 6, 5, regexp.MustCompile(strings.ReplaceAll(cannotSwitchHighLag, "%d", "(\\d+)"))}, - {"Acceptable Lag", "Running", 4, 5, nil}, + {"In Copy Phase", binlogdatapb.VReplicationWorkflowState_Copying.String(), 0, 0, regexp.MustCompile(cannotSwitchCopyIncomplete)}, + {"High Lag", binlogdatapb.VReplicationWorkflowState_Running.String(), 6, 5, regexp.MustCompile(strings.ReplaceAll(cannotSwitchHighLag, "%d", "(\\d+)"))}, + {"Acceptable Lag", binlogdatapb.VReplicationWorkflowState_Running.String(), 4, 5, nil}, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { @@ -280,7 +281,7 @@ func TestMoveTablesV2(t *testing.T) { TabletTypes: "REPLICA,RDONLY,PRIMARY", Timeout: DefaultActionTimeout, MaxAllowedTransactionLagSeconds: defaultMaxAllowedTransactionLagSeconds, - OnDDL: binlogdatapb.OnDDLAction_name[int32(binlogdatapb.OnDDLAction_STOP)], + OnDDL: binlogdatapb.OnDDLAction_STOP.String(), } tme := newTestTableMigrater(ctx, t) defer tme.stopTablets(t) @@ -300,6 +301,83 @@ func TestMoveTablesV2(t *testing.T) { require.Equal(t, WorkflowStateNotSwitched, wf.CurrentState()) } +// TestPartialMoveTables ensures that shard by shard migrations work +// as expected. This test moves tables from one sharded keyspace (ks1) +// to another sharded keyspace (ks2), but only for the -80 shard. +func TestPartialMoveTables(t *testing.T) { + ctx := context.Background() + shards := []string{"-80", "80-"} + shardsToMove := shards[0:1] + p := &VReplicationWorkflowParams{ + Workflow: "test", + WorkflowType: MoveTablesWorkflow, + SourceKeyspace: "ks1", + SourceShards: shardsToMove, // shard by shard + TargetShards: shardsToMove, // shard by shard + TargetKeyspace: "ks2", + Tables: "t1,t2", + Cells: "cell1,cell2", + TabletTypes: "REPLICA,RDONLY,PRIMARY", + Timeout: DefaultActionTimeout, + MaxAllowedTransactionLagSeconds: defaultMaxAllowedTransactionLagSeconds, + OnDDL: binlogdatapb.OnDDLAction_STOP.String(), + } + tme := newTestTablePartialMigrater(ctx, t, shards, shards[0:1], "select * %s") + defer tme.stopTablets(t) + + // Save some unrelated shard routing rules to be sure that + // they don't interfere in any way. + srr, err := tme.ts.GetShardRoutingRules(ctx) + require.NoError(t, err) + srr.Rules = append(srr.Rules, []*vschema.ShardRoutingRule{ + { + FromKeyspace: "wut", + Shard: "40-80", + ToKeyspace: "bloop", + }, + { + FromKeyspace: "haylo", + Shard: "-80", + ToKeyspace: "blarg", + }, + }...) + err = tme.ts.SaveShardRoutingRules(ctx, srr) + require.NoError(t, err) + + wf, err := tme.wr.NewVReplicationWorkflow(ctx, MoveTablesWorkflow, p) + require.NoError(t, err) + require.NotNil(t, wf) + require.Equal(t, WorkflowStateNotSwitched, wf.CurrentState()) + require.True(t, wf.ts.isPartialMigration, "expected partial shard migration") + + // The default shard routing rule for the keyspace's other shard would + // normally be put in place, but the unit test does not execute the + // wrangler.MoveTables function which adds all of the default shard + // routing rules in the topo for the keyspace when the first workflow + // is run against it. So we simulate it here. + srr, err = tme.ts.GetShardRoutingRules(ctx) + require.NoError(t, err) + srr.Rules = append(srr.Rules, &vschema.ShardRoutingRule{ + FromKeyspace: "ks2", + Shard: "80-", + ToKeyspace: "ks1", + }) + err = tme.ts.SaveShardRoutingRules(ctx, srr) + require.NoError(t, err) + + tme.expectNoPreviousJournals() + expectMoveTablesQueries(t, tme, p) + tme.expectNoPreviousJournals() + require.NoError(t, testSwitchForward(t, wf)) + require.Equal(t, "Reads partially switched, for shards: -80. Writes partially switched, for shards: -80", wf.CurrentState()) + require.NoError(t, err) + + tme.expectNoPreviousJournals() + tme.expectNoPreviousReverseJournals() + require.NoError(t, testReverse(t, wf)) + require.Equal(t, WorkflowStateNotSwitched, wf.CurrentState()) +} + func validateRoutingRuleCount(ctx context.Context, t *testing.T, ts *topo.Server, cnt int) { rr, err := ts.GetRoutingRules(ctx) require.NoError(t, err) @@ -365,7 +443,7 @@ func testReverse(t *testing.T, wf *VReplicationWorkflow) error { return err } -func TestMoveTablesV2Partial(t *testing.T) { +func TestMoveTablesV2SwitchTraffic(t *testing.T) { ctx := context.Background() p := &VReplicationWorkflowParams{ Workflow: "test", @@ -485,7 +563,7 @@ func TestReshardV2(t *testing.T) { TabletTypes: "replica,rdonly,primary", Timeout: DefaultActionTimeout, MaxAllowedTransactionLagSeconds: defaultMaxAllowedTransactionLagSeconds, - OnDDL: binlogdatapb.OnDDLAction_name[int32(binlogdatapb.OnDDLAction_EXEC_IGNORE)], + OnDDL: binlogdatapb.OnDDLAction_EXEC_IGNORE.String(), } tme := newTestShardMigrater(ctx, t, sourceShards, targetShards) defer tme.stopTablets(t) diff --git a/go/vt/wrangler/wrangler_env_test.go b/go/vt/wrangler/wrangler_env_test.go index 5b64e4511bb..e6139e9aceb 100644 --- a/go/vt/wrangler/wrangler_env_test.go +++ b/go/vt/wrangler/wrangler_env_test.go @@ -60,10 +60,10 @@ type testWranglerEnv struct { //---------------------------------------------- // testWranglerEnv -func newWranglerTestEnv(t testing.TB, sourceShards, targetShards []string, query string, positions map[string]string, timeUpdated int64) *testWranglerEnv { +func newWranglerTestEnv(t testing.TB, ctx context.Context, sourceShards, targetShards []string, query string, positions map[string]string, timeUpdated int64) *testWranglerEnv { env := &testWranglerEnv{ workflow: "wrWorkflow", - topoServ: memorytopo.NewServer("zone1"), + topoServ: memorytopo.NewServer(ctx, "zone1"), cell: "zone1", tabletType: topodatapb.TabletType_REPLICA, tmc: newTestWranglerTMClient(), diff --git a/go/vt/zkctl/zkconf.go b/go/vt/zkctl/zkconf.go index 9cca90adac0..15a912231ff 100644 --- a/go/vt/zkctl/zkconf.go +++ b/go/vt/zkctl/zkconf.go @@ -49,6 +49,7 @@ type ZkConfig struct { ServerId uint32 // nolint:revive ClientPort int Servers []zkServerAddr + Extra []string Global bool } @@ -117,6 +118,11 @@ func MakeZooCfg(cnfFiles []string, cnf *ZkConfig, header string) (string, error) return "", dataErr } + myTemplateSource.WriteString("\n") // in case `data` did not end with a newline + for _, extra := range cnf.Extra { + myTemplateSource.WriteString(fmt.Sprintf("%s\n", extra)) + } + myTemplate, err := template.New("foo").Parse(myTemplateSource.String()) if err != nil { return "", err diff --git a/go/vt/zkctl/zkctl_test.go b/go/vt/zkctl/zkctl_test.go index bcf1c531301..e237c572eae 100644 --- a/go/vt/zkctl/zkctl_test.go +++ b/go/vt/zkctl/zkctl_test.go @@ -17,6 +17,8 @@ limitations under the License. package zkctl import ( + "fmt" + "strings" "testing" ) @@ -33,6 +35,15 @@ func TestLifeCycle(t *testing.T) { myID := 255 zkConf := MakeZkConfigFromString(config, uint32(myID)) + zkExtraConfLine := "tcpKeepAlive=true" + zkConf.Extra = []string{zkExtraConfLine} + + if zkObservedConf, err := MakeZooCfg([]string{zkConf.ConfigFile()}, zkConf, "header"); err != nil { + t.Fatalf("MakeZooCfg err: %v", err) + } else if !strings.Contains(string(zkObservedConf), fmt.Sprintf("\n%s\n", zkExtraConfLine)) { + t.Fatalf("Expected zkExtraConfLine in zkObservedConf") + } + zkd := NewZkd(zkConf) if err := zkd.Init(); err != nil { t.Fatalf("Init() err: %v", err) @@ -49,4 +60,5 @@ func TestLifeCycle(t *testing.T) { if err := zkd.Teardown(); err != nil { t.Fatalf("Teardown() err: %v", err) } + } diff --git a/java/client/pom.xml b/java/client/pom.xml index b95f4d1b1b2..7c765038c23 100644 --- a/java/client/pom.xml +++ b/java/client/pom.xml @@ -5,7 +5,7 @@ io.vitess vitess-parent - 17.0.2 + 18.0.0 vitess-client diff --git a/java/example/pom.xml b/java/example/pom.xml index 113292979e3..028987e3103 100644 --- a/java/example/pom.xml +++ b/java/example/pom.xml @@ -5,7 +5,7 @@ io.vitess vitess-parent - 17.0.2 + 18.0.0 vitess-example diff --git a/java/grpc-client/pom.xml b/java/grpc-client/pom.xml index bb7a9213f62..c305a873081 100644 --- a/java/grpc-client/pom.xml +++ b/java/grpc-client/pom.xml @@ -5,7 +5,7 @@ io.vitess vitess-parent - 17.0.2 + 18.0.0 vitess-grpc-client diff --git a/java/jdbc/pom.xml b/java/jdbc/pom.xml index 0621011b051..45b59c8abf6 100644 --- a/java/jdbc/pom.xml +++ b/java/jdbc/pom.xml @@ -5,7 +5,7 @@ io.vitess vitess-parent - 17.0.2 + 18.0.0 vitess-jdbc @@ -54,26 +54,26 @@ org.mockito mockito-core - 3.5.15 + 3.12.4 test org.powermock powermock-api-mockito2 - 2.0.7 + 2.0.9 test org.powermock powermock-core - 2.0.7 + 2.0.9 test org.powermock powermock-module-junit4 - 2.0.7 + 2.0.9 test diff --git a/java/pom.xml b/java/pom.xml index 87b03492338..f174c083f78 100644 --- a/java/pom.xml +++ b/java/pom.xml @@ -11,7 +11,7 @@ io.vitess vitess-parent - 17.0.2 + 18.0.0 pom Vitess Java Client libraries [Parent] @@ -66,14 +66,14 @@ UTF-8 - 1.44.0 + 1.57.1 - 4.1.72.Final - 2.0.46.Final + 4.1.93.Final + 2.0.61.Final - 3.19.6 - 3.19.4 + 3.24.3 + 3.24.3 3.0.0 2.17.1 @@ -248,6 +248,7 @@ true mysql:mysql-connector-java + io.grpc:grpc-context @@ -287,7 +288,7 @@ org.sonatype.plugins nexus-staging-maven-plugin - 1.6.8 + 1.6.13 true ossrh @@ -298,7 +299,7 @@ org.apache.maven.plugins maven-source-plugin - 2.2.1 + 2.4 attach-sources @@ -311,7 +312,7 @@ org.apache.maven.plugins maven-javadoc-plugin - 2.9.1 + 2.10.4 attach-javadocs @@ -324,7 +325,7 @@ org.apache.maven.plugins maven-gpg-plugin - 1.5 + 1.6 sign-artifacts diff --git a/proto/binlogdata.proto b/proto/binlogdata.proto index 4ebcf272952..660f8fedfce 100644 --- a/proto/binlogdata.proto +++ b/proto/binlogdata.proto @@ -222,6 +222,18 @@ enum VReplicationWorkflowType { enum VReplicationWorkflowSubType { None = 0; Partial = 1; + AtomicCopy = 2; +} + +// VReplicationWorklfowState defines the valid states that a workflow can be in. +enum VReplicationWorkflowState { + Unknown = 0; + Init = 1; + Stopped = 2; + Copying = 3; + Running = 4; + Error = 5; + Lagging = 6; } // BinlogSource specifies the source and filter parameters for @@ -328,6 +340,7 @@ message RowEvent { repeated RowChange row_changes = 2; string keyspace = 3; string shard = 4; + uint32 flags = 5; // https://dev.mysql.com/doc/dev/mysql-server/latest/classbinary__log_1_1Rows__event.html } // FieldEvent represents the field info for a table. @@ -485,6 +498,24 @@ message VStreamRowsResponse { bool heartbeat = 7; } + +// VStreamTablesRequest is the payload for VStreamTables +message VStreamTablesRequest { + vtrpc.CallerID effective_caller_id = 1; + query.VTGateCallerID immediate_caller_id = 2; + query.Target target = 3; +} + +// VStreamTablesResponse is the response from VStreamTables +message VStreamTablesResponse { + string table_name = 1; + repeated query.Field fields = 2; + repeated query.Field pkfields = 3; + string gtid = 4; + repeated query.Row rows = 5; + query.Row lastpk = 6; +} + message LastPKEvent { TableLastPK table_last_p_k = 1; bool completed = 2; diff --git a/proto/mysqlctl.proto b/proto/mysqlctl.proto index e6c1b40a3c7..bc67cef07c1 100644 --- a/proto/mysqlctl.proto +++ b/proto/mysqlctl.proto @@ -44,10 +44,26 @@ message RunMysqlUpgradeResponse{} message ApplyBinlogFileRequest{ string binlog_file_name = 1; string binlog_restore_position = 2; + vttime.Time binlog_restore_datetime = 3; } message ApplyBinlogFileResponse{} +message ReadBinlogFilesTimestampsRequest{ + repeated string binlog_file_names = 1; +} + +message ReadBinlogFilesTimestampsResponse{ + // FirstTimestamp is the timestamp of the first found transaction searching in order of given binlog files + vttime.Time first_timestamp = 1; + // FirstTimestampBinlog is the name of the binary log in which the first timestamp is found + string first_timestamp_binlog = 2; + // LastTimestamp is the timestamp of the last found transaction in given binlog files + vttime.Time last_timestamp = 3; + // LastTimestampBinlog is the name of the binary log in which the last timestamp is found + string last_timestamp_binlog = 4; +} + message ReinitConfigRequest{} message ReinitConfigResponse{} @@ -56,14 +72,22 @@ message RefreshConfigRequest{} message RefreshConfigResponse{} +message VersionStringRequest{} + +message VersionStringResponse{ + string version = 1; +} + // MysqlCtl is the service definition service MysqlCtl { rpc Start(StartRequest) returns (StartResponse) {}; rpc Shutdown(ShutdownRequest) returns (ShutdownResponse) {}; rpc RunMysqlUpgrade(RunMysqlUpgradeRequest) returns (RunMysqlUpgradeResponse) {}; rpc ApplyBinlogFile(ApplyBinlogFileRequest) returns (ApplyBinlogFileResponse) {}; + rpc ReadBinlogFilesTimestamps(ReadBinlogFilesTimestampsRequest) returns (ReadBinlogFilesTimestampsResponse) {}; rpc ReinitConfig(ReinitConfigRequest) returns (ReinitConfigResponse) {}; rpc RefreshConfig(RefreshConfigRequest) returns (RefreshConfigResponse) {}; + rpc VersionString(VersionStringRequest) returns (VersionStringResponse) {}; } // BackupInfo is the read-only attributes of a mysqlctl/backupstorage.BackupHandle. diff --git a/proto/query.proto b/proto/query.proto index 01edeff1845..fa7a6cc47d1 100644 --- a/proto/query.proto +++ b/proto/query.proto @@ -925,8 +925,8 @@ message StreamHealthResponse { // or if a replica should not be used because the keyspace is being resharded. bool serving = 2; - // tablet_externally_reparented_timestamp can be interpreted as the - // last time we knew that this tablet was the PRIMARY of this shard + // primary_term_start_timestamp can be interpreted as the + // last time we knew that this tablet was promoted to a PRIMARY of this shard // (if StreamHealthResponse describes a group of tablets, between // two vtgates, only one primary will be present in the group, and // this is this primary's value). @@ -950,8 +950,8 @@ message StreamHealthResponse { // as PRIMARY because it was recorded as the shard's current primary in the // topology (see go/vt/vttablet/tabletmanager/init_tablet.go) // OR - // d) 0 if the vttablet was never a PRIMARY. - int64 tablet_externally_reparented_timestamp = 3; + // d) 0 if the vttablet is not a PRIMARY. + int64 primary_term_start_timestamp = 3; // realtime_stats contains information about the tablet status. // It is only filled in if the information is about a tablet. diff --git a/proto/queryservice.proto b/proto/queryservice.proto index b2bf08f13dd..1ad3749e47b 100644 --- a/proto/queryservice.proto +++ b/proto/queryservice.proto @@ -106,6 +106,9 @@ service Query { // VStreamRows streams rows from the specified starting point. rpc VStreamRows(binlogdata.VStreamRowsRequest) returns (stream binlogdata.VStreamRowsResponse) {}; + // VStreamTables streams rows from the specified starting point. + rpc VStreamTables(binlogdata.VStreamTablesRequest) returns (stream binlogdata.VStreamTablesResponse) {}; + // VStreamResults streams results along with the gtid of the snapshot. rpc VStreamResults(binlogdata.VStreamResultsRequest) returns (stream binlogdata.VStreamResultsResponse) {}; diff --git a/proto/tabletmanagerdata.proto b/proto/tabletmanagerdata.proto index 0179b32d4d5..fc9f6fa97b9 100644 --- a/proto/tabletmanagerdata.proto +++ b/proto/tabletmanagerdata.proto @@ -34,6 +34,14 @@ import "vtrpc.proto"; // Data structures // +// This structure allows us to manage tablet selection preferences +// which are eventually passed to a TabletPicker. +enum TabletSelectionPreference { + ANY = 0; + INORDER = 1; + UNKNOWN = 3; // Don't change any existing value +} + message TableDefinition { // the table name string name = 1; @@ -211,6 +219,8 @@ message ApplySchemaRequest { SchemaDefinition before_schema = 4; SchemaDefinition after_schema = 5; string sql_mode = 6; + // BatchSize indicates how many queries to apply together + int64 batch_size = 7; } message ApplySchemaResponse { @@ -478,6 +488,9 @@ message BackupRequest { // IncrementalFromPos indicates a position of a previous backup. When this value is non-empty // then the backup becomes incremental and applies as of given position. string incremental_from_pos = 3; + // UpgradeSafe indicates if the backup should be taken with innodb_fast_shutdown=0 + // so that it's a backup that can be used for an upgrade. + bool upgrade_safe = 4; } message BackupResponse { @@ -492,12 +505,83 @@ message RestoreFromBackupRequest { string restore_to_pos = 2; // Dry run does not actually performs the restore, but validates the steps and availability of backups bool dry_run = 3; + // RestoreToTimestamp, if given, requested an inremental restore up to (and excluding) the given timestamp. + // RestoreToTimestamp and RestoreToPos are mutually exclusive. + vttime.Time restore_to_timestamp = 4; } message RestoreFromBackupResponse { logutil.Event event = 1; } +// +// VReplication related messages +// + +message CreateVReplicationWorkflowRequest { + string workflow = 1; + repeated binlogdata.BinlogSource binlog_source = 2; + // Optional parameters. + repeated string cells = 3; + // TabletTypes is the list of tablet types to use when selecting source tablets. + repeated topodata.TabletType tablet_types = 4; + TabletSelectionPreference tablet_selection_preference = 5; + binlogdata.VReplicationWorkflowType workflow_type = 6; + binlogdata.VReplicationWorkflowSubType workflow_sub_type = 7; + // DeferSecondaryKeys specifies if secondary keys should be created in one shot after table + // copy finishes. + bool defer_secondary_keys = 8; + // AutoStart specifies if the workflow should be started when created. + bool auto_start = 9; + // Should the workflow stop after the copy phase. + bool stop_after_copy = 10; +} + +message CreateVReplicationWorkflowResponse { + query.QueryResult result = 1; +} + +message DeleteVReplicationWorkflowRequest { + string workflow = 1; +} + +message DeleteVReplicationWorkflowResponse { + query.QueryResult result = 1; +} + +message ReadVReplicationWorkflowRequest { + string workflow = 1; +} + +message ReadVReplicationWorkflowResponse { + string workflow = 2; + string cells = 3; + repeated topodata.TabletType tablet_types = 4; + TabletSelectionPreference tablet_selection_preference = 5; + string db_name = 6; + string tags = 7; + binlogdata.VReplicationWorkflowType workflow_type = 8; + binlogdata.VReplicationWorkflowSubType workflow_sub_type = 9; + bool defer_secondary_keys = 10; + message Stream { + int32 id = 1; + binlogdata.BinlogSource bls = 2; + string pos = 3; + string stop_pos = 4; + int64 max_tps = 5; + int64 max_replication_lag = 6; + vttime.Time time_updated = 7; + vttime.Time transaction_timestamp = 8; + binlogdata.VReplicationWorkflowState state = 9; + string message = 10; + int64 rows_copied = 11; + vttime.Time time_heartbeat = 12; + vttime.Time time_throttled = 13; + string component_throttled = 14; + } + repeated Stream streams = 11; +} + message VDiffRequest { string keyspace = 1; string workflow = 2; @@ -544,13 +628,42 @@ message VDiffOptions { VDiffReportOptions report_options = 3; } -message UpdateVRWorkflowRequest { +message UpdateVReplicationWorkflowRequest { string workflow = 1; repeated string cells = 2; - repeated string tablet_types = 3; - binlogdata.OnDDLAction on_ddl = 4; + repeated topodata.TabletType tablet_types = 3; + TabletSelectionPreference tablet_selection_preference = 4; + binlogdata.OnDDLAction on_ddl = 5; + binlogdata.VReplicationWorkflowState state = 6; } -message UpdateVRWorkflowResponse { +message UpdateVReplicationWorkflowResponse { query.QueryResult result = 1; } + +message ResetSequencesRequest { + repeated string tables = 1; +} + +message ResetSequencesResponse { +} + +message CheckThrottlerRequest { + string app_name = 1; +} + +message CheckThrottlerResponse { + // StatusCode is HTTP compliant response code (e.g. 200 for OK) + int32 status_code = 1; + // Value is the metric value collected by the tablet + double value = 2; + // Threshold is the throttling threshold the table was comparing the value with + double threshold = 3; + // Error indicates an error retrieving the value + string error = 4; + // Message + string message = 5; + // RecentlyChecked indicates that the tablet has been hit with a user-facing check, which can then imply + // that heartbeats lease should be renwed. + bool recently_checked = 6; +} diff --git a/proto/tabletmanagerservice.proto b/proto/tabletmanagerservice.proto index 5fcb889e48a..7492bdd7cca 100644 --- a/proto/tabletmanagerservice.proto +++ b/proto/tabletmanagerservice.proto @@ -66,6 +66,8 @@ service TabletManager { rpc ApplySchema(tabletmanagerdata.ApplySchemaRequest) returns (tabletmanagerdata.ApplySchemaResponse) {}; + rpc ResetSequences(tabletmanagerdata.ResetSequencesRequest) returns (tabletmanagerdata.ResetSequencesResponse) {}; + rpc LockTables(tabletmanagerdata.LockTablesRequest) returns (tabletmanagerdata.LockTablesResponse) {}; rpc UnlockTables(tabletmanagerdata.UnlockTablesRequest) returns (tabletmanagerdata.UnlockTablesResponse) {}; @@ -112,9 +114,12 @@ service TabletManager { rpc GetReplicas(tabletmanagerdata.GetReplicasRequest) returns (tabletmanagerdata.GetReplicasResponse) {}; // VReplication API + rpc CreateVReplicationWorkflow(tabletmanagerdata.CreateVReplicationWorkflowRequest) returns (tabletmanagerdata.CreateVReplicationWorkflowResponse) {}; + rpc DeleteVReplicationWorkflow(tabletmanagerdata.DeleteVReplicationWorkflowRequest) returns(tabletmanagerdata.DeleteVReplicationWorkflowResponse) {}; + rpc ReadVReplicationWorkflow(tabletmanagerdata.ReadVReplicationWorkflowRequest) returns(tabletmanagerdata.ReadVReplicationWorkflowResponse) {}; rpc VReplicationExec(tabletmanagerdata.VReplicationExecRequest) returns(tabletmanagerdata.VReplicationExecResponse) {}; rpc VReplicationWaitForPos(tabletmanagerdata.VReplicationWaitForPosRequest) returns(tabletmanagerdata.VReplicationWaitForPosResponse) {}; - rpc UpdateVRWorkflow(tabletmanagerdata.UpdateVRWorkflowRequest) returns(tabletmanagerdata.UpdateVRWorkflowResponse) {}; + rpc UpdateVReplicationWorkflow(tabletmanagerdata.UpdateVReplicationWorkflowRequest) returns(tabletmanagerdata.UpdateVReplicationWorkflowResponse) {}; // VDiff API rpc VDiff(tabletmanagerdata.VDiffRequest) returns(tabletmanagerdata.VDiffResponse) {}; @@ -173,4 +178,6 @@ service TabletManager { // RestoreFromBackup deletes all local data and restores it from the latest backup. rpc RestoreFromBackup(tabletmanagerdata.RestoreFromBackupRequest) returns (stream tabletmanagerdata.RestoreFromBackupResponse) {}; + // CheckThrottler issues a 'check' on a tablet's throttler + rpc CheckThrottler(tabletmanagerdata.CheckThrottlerRequest) returns (tabletmanagerdata.CheckThrottlerResponse) {}; } diff --git a/proto/topodata.proto b/proto/topodata.proto index dc49974573a..c921f72dfa4 100644 --- a/proto/topodata.proto +++ b/proto/topodata.proto @@ -366,6 +366,19 @@ message ShardTabletControl { bool query_service_disabled = 3; } +// ThrottledAppRule defines an app-specific throttling rule, with expiration. +message ThrottledAppRule { + // Name of the app to be throttled, e.g. "vreplication" or "online-ddl" + string name = 1; + // Ratio defines how much the app should be throttled, range [0.0...1.0]. 1.0 means fully throttled. 0.0 means not throttled at all. + // Negative values are reserved for a future implementation. + double ratio = 2; + // ExpiresAt is the time at which the rule expires. + vttime.Time expires_at = 3; + // Exempt indicates the app should never be throttled, even if the throttler is, in general, throttling other apps. + bool exempt = 4; +} + message ThrottlerConfig { // Enabled indicates that the throttler is actually checking state for // requests. When disabled, it automatically returns 200 OK for all @@ -383,6 +396,9 @@ message ThrottlerConfig { // CheckAsCheckSelf indicates whether a throttler /check request // should behave like a /check-self. bool check_as_check_self = 4; + + // ThrottledApps is a map of rules for app-specific throttling + map throttled_apps = 5; } // SrvKeyspace is a rollup node for the keyspace itself. diff --git a/proto/vschema.proto b/proto/vschema.proto index 3edace9f215..1ba7a64dff6 100644 --- a/proto/vschema.proto +++ b/proto/vschema.proto @@ -45,10 +45,19 @@ message Keyspace { map tables = 3; // If require_explicit_routing is true, vindexes and tables are not added to global routing bool require_explicit_routing = 4; + // foreign_key_mode dictates how Vitess should handle foreign keys for this keyspace. + ForeignKeyMode foreign_key_mode = 5; bool cross_tablet = 85; bool attach_enable = 86; string attach_to = 87; + + enum ForeignKeyMode { + unspecified = 0; + disallow = 1; + unmanaged = 2; + managed = 3; + } } // Vindex is the vindex info for a Keyspace. diff --git a/proto/vtctldata.proto b/proto/vtctldata.proto index 407a20256f9..cfa77dde335 100644 --- a/proto/vtctldata.proto +++ b/proto/vtctldata.proto @@ -86,7 +86,7 @@ message MaterializeSettings { // MaterializationIntent is used to identify the reason behind the materialization workflow: eg. MoveTables, CreateLookupVindex MaterializationIntent materialization_intent = 9; // SourceTimeZone is the time zone in which datetimes on the source were stored, provided as an option in MoveTable - string source_time_zone =10; + string source_time_zone = 10; // TargetTimeZone is not currently specifiable by the user, defaults to UTC for the forward workflows // and to the SourceTimeZone in reverse workflows string target_time_zone = 11; @@ -95,6 +95,8 @@ message MaterializeSettings { string on_ddl = 13; // DeferSecondaryKeys specifies if secondary keys should be created in one shot after table copy finishes. bool defer_secondary_keys = 14; + tabletmanagerdata.TabletSelectionPreference tablet_selection_preference = 15; + bool atomic_copy = 16; } /* Data types for VtctldServer */ @@ -104,6 +106,98 @@ message Keyspace { topodata.Keyspace keyspace = 2; } +enum QueryOrdering { + NONE = 0; + ASCENDING = 1; + DESCENDING = 2; +} + +// SchemaMigration represents a row in the schema_migrations sidecar table. +message SchemaMigration { + string uuid = 1; + string keyspace = 2; + string shard = 3; + string schema = 4; + string table = 5; + string migration_statement = 6; + Strategy strategy = 7; + string options = 8; + vttime.Time added_at = 9; + vttime.Time requested_at = 10; + vttime.Time ready_at = 11; + vttime.Time started_at = 12; + vttime.Time liveness_timestamp = 13; + vttime.Time completed_at = 14; + vttime.Time cleaned_up_at = 15; + Status status = 16; + string log_path = 17; + string artifacts = 18; + uint64 retries = 19; + topodata.TabletAlias tablet = 20; + bool tablet_failure = 21; + float progress = 22; + string migration_context = 23; + string ddl_action = 24; + string message = 25; + int64 eta_seconds = 26; + uint64 rows_copied = 27; + int64 table_rows = 28; + uint32 added_unique_keys = 29; + uint32 removed_unique_keys = 30; + string log_file = 31; + vttime.Duration artifact_retention = 32; + bool postpone_completion = 33; + string removed_unique_key_names = 34; + string dropped_no_default_column_names = 35; + string expanded_column_names = 36; + string revertible_notes = 37; + bool allow_concurrent = 38; + string reverted_uuid = 39; + bool is_view = 40; + bool ready_to_complete = 41; + int64 vitess_liveness_indicator = 42; + float user_throttle_ratio = 43; + string special_plan = 44; + vttime.Time last_throttled_at = 45; + string component_throttled = 46; + vttime.Time cancelled_at = 47; + bool postpone_launch = 48; + string stage = 49; // enum? + uint32 cutover_attempts = 50; + bool is_immediate_operation = 51; + vttime.Time reviewed_at = 52; + vttime.Time ready_to_complete_at = 53; + + enum Strategy { + option allow_alias = true; + // SchemaMigration_VITESS uses vreplication to run the schema migration. It is + // the default strategy for OnlineDDL requests. + // + // SchemaMigration_VITESS was also formerly called "ONLINE". + VITESS = 0; + ONLINE = 0; + GHOST = 1; + PTOSC = 2; + // SchemaMigration_DIRECT runs the migration directly against MySQL (e.g. `ALTER TABLE ...`), + // meaning it is not actually an "online" DDL migration. + DIRECT = 3; + // SchemaMigration_MYSQL is a managed migration (queued and executed by the + // scheduler) but runs through a MySQL `ALTER TABLE`. + MYSQL = 4; + } + + enum Status { + UNKNOWN = 0; + REQUESTED = 1; + CANCELLED = 2; + QUEUED = 3; + READY = 4; + RUNNING = 5; + COMPLETE = 6; + FAILED = 7; + } +} + message Shard { string keyspace = 1; string name = 2; @@ -115,10 +209,18 @@ message Workflow { string name = 1; ReplicationLocation source = 2; ReplicationLocation target = 3; + // This represents how long it's been since we processed any event in the + // stream. int64 max_v_replication_lag = 4; map shard_streams = 5; string workflow_type = 6; string workflow_sub_type = 7; + // This represents the lag across all shards, between the current time and + // the timestamp of the last transaction OR heartbeat timestamp (if there + // have been no writes to replicate from the source). + int64 max_v_replication_transaction_lag = 8; + // This specifies whether to defer the creation of secondary keys. + bool defer_secondary_keys = 9; message ReplicationLocation { string keyspace = 1; @@ -155,6 +257,8 @@ message Workflow { // ith log, we will still return logs in [0, i) + (i, N]. string log_fetch_error = 14; repeated string tags = 15; + int64 rows_copied = 16; + ThrottlerStatus throttler_status = 17; message CopyState { string table = 1; @@ -171,6 +275,11 @@ message Workflow { string message = 7; int64 count = 8; } + + message ThrottlerStatus { + string component_throttled = 1; + vttime.Time time_throttled = 2; + } } } @@ -225,8 +334,7 @@ message ApplyShardRoutingRulesResponse { message ApplySchemaRequest { string keyspace = 1; - // Allow large schema changes which incur a longer unavailability of the database. - bool allow_long_unavailability = 2; + reserved 2; // SQL commands to run. repeated string sql = 3; // Online DDL strategy, compatible with @@ddl_strategy session variable (examples: 'gh-ost', 'pt-osc', 'gh-ost --max-load=Threads_running=100'") @@ -240,15 +348,18 @@ message ApplySchemaRequest { // WaitReplicasTimeout is the duration of time to wait for replicas to catch // up in reparenting. vttime.Duration wait_replicas_timeout = 7; - // Skip pre-apply schema checks, and directly forward schema change query to shards - bool skip_preflight = 8; + + reserved 8; // caller_id identifies the caller. This is the effective caller ID, // set by the application to further identify the caller. vtrpc.CallerID caller_id = 9; + // BatchSize indicates how many queries to apply together + int64 batch_size = 10; } message ApplySchemaResponse { repeated string uuid_list = 1; + map rows_affected_by_shard = 2; } message ApplyVSchemaRequest { @@ -277,6 +388,9 @@ message BackupRequest { // IncrementalFromPos indicates a position of a previous backup. When this value is non-empty // then the backup becomes incremental and applies as of given position. string incremental_from_pos = 4; + // UpgradeSafe indicates if the backup should be taken with innodb_fast_shutdown=0 + // so that it's a backup that can be used for an upgrade. + bool upgrade_safe = 5; } message BackupResponse { @@ -296,6 +410,21 @@ message BackupShardRequest { // Concurrency specifies the number of compression/checksum jobs to run // simultaneously. uint64 concurrency = 4; + // UpgradeSafe indicates if the backup should be taken with innodb_fast_shutdown=0 + // so that it's a backup that can be used for an upgrade. + bool upgrade_safe = 5; + // IncrementalFromPos indicates a position of a previous backup. When this value is non-empty + // then the backup becomes incremental and applies as of given position. + string incremental_from_pos = 6; +} + +message CancelSchemaMigrationRequest { + string keyspace = 1; + string uuid = 2; +} + +message CancelSchemaMigrationResponse { + map rows_affected_by_shard = 1; } message ChangeTabletTypeRequest { @@ -310,6 +439,25 @@ message ChangeTabletTypeResponse { bool was_dry_run = 3; } +message CleanupSchemaMigrationRequest { + string keyspace = 1; + string uuid = 2; +} + +message CleanupSchemaMigrationResponse { + map rows_affected_by_shard = 1; +} + + +message CompleteSchemaMigrationRequest { + string keyspace = 1; + string uuid = 2; +} + +message CompleteSchemaMigrationResponse { + map rows_affected_by_shard = 1; +} + message CreateKeyspaceRequest { // Name is the name of the keyspace. string name = 1; @@ -459,6 +607,9 @@ message EmergencyReparentShardRequest { // PreventCrossCellPromotion is used to only promote the new primary from the same cell // as the failed primary. bool prevent_cross_cell_promotion = 6; + // WaitForAllTablets makes ERS wait for a response from all the tablets before proceeding. + // Useful when all the tablets are up and reachable. + bool wait_for_all_tablets = 7; } message EmergencyReparentShardResponse { @@ -640,6 +791,41 @@ message GetSchemaResponse { tabletmanagerdata.SchemaDefinition schema = 1; } +// GetSchemaMigrationsRequest controls the behavior of the GetSchemaMigrations +// rpc. +// +// Keyspace is a required field, while all other fields are optional. +// +// If UUID is set, other optional fields will be ignored, since there will be at +// most one migration with that UUID. Furthermore, if no migration with that +// UUID exists, an empty response, not an error, is returned. +// +// MigrationContext, Status, and Recent are mutually exclusive. +message GetSchemaMigrationsRequest { + string keyspace = 1; + // Uuid, if set, will cause GetSchemaMigrations to return exactly 1 migration, + // namely the one with that UUID. If no migration exists, the response will + // be an empty slice, not an error. + // + // If this field is set, other fields (status filters, limit, skip, order) are + // ignored. + string uuid = 2; + + string migration_context = 3; + SchemaMigration.Status status = 4; + // Recent, if set, returns migrations requested between now and the provided + // value. + vttime.Duration recent = 5; + + QueryOrdering order = 6; + uint64 limit = 7; + uint64 skip = 8; +} + +message GetSchemaMigrationsResponse { + repeated SchemaMigration migrations = 1; +} + message GetShardRequest { string keyspace = 1; string shard_name = 2; @@ -694,9 +880,11 @@ message UpdateThrottlerConfigRequest { // CustomQuerySet indicates that the value of CustomQuery has changed bool custom_query_set = 6; // CheckAsCheckSelf instructs the throttler to respond to /check requests by checking the tablet's own health - bool check_as_check_self=7; + bool check_as_check_self = 7; // CheckAsCheckShard instructs the throttler to respond to /check requests by checking the shard's health (this is the default behavior) - bool check_as_check_shard=8; + bool check_as_check_shard = 8; + // ThrottledApp indicates a single throttled app rule (ignored if name is empty) + topodata.ThrottledAppRule throttled_app = 9; } message UpdateThrottlerConfigResponse { @@ -793,6 +981,9 @@ message GetWorkflowsRequest { string keyspace = 1; bool active_only = 2; bool name_only = 3; + // If you only want a specific workflow then set this field. + string workflow = 4; + bool include_logs = 5; } message GetWorkflowsResponse { @@ -811,6 +1002,184 @@ message InitShardPrimaryResponse { repeated logutil.Event events = 1; } +message LaunchSchemaMigrationRequest { + string keyspace = 1; + string uuid = 2; +} + +message LaunchSchemaMigrationResponse { + map rows_affected_by_shard = 1; +} + +message LookupVindexCreateRequest { + string keyspace = 1; + string workflow = 2; + repeated string cells = 3; + vschema.Keyspace vindex = 4; + bool continue_after_copy_with_owner = 5; + repeated topodata.TabletType tablet_types = 6; + tabletmanagerdata.TabletSelectionPreference tablet_selection_preference = 7; +} + +message LookupVindexCreateResponse { +} + +message LookupVindexExternalizeRequest { + // Where the lookup vindex lives. + string keyspace = 1; + // This is the name of the lookup vindex and the vreplication workflow. + string name = 2; + // Where the vreplication workflow lives. + string table_keyspace = 3; +} + +message LookupVindexExternalizeResponse { + // Was the workflow also deleted. + bool workflow_deleted = 1; +} + +message MaterializeCreateRequest { + MaterializeSettings settings = 1; +} + +message MaterializeCreateResponse { +} + +message MigrateCreateRequest { + // The necessary info gets passed on to each primary tablet involved + // in the workflow via the CreateVReplicationWorkflow tabletmanager RPC. + string workflow = 1; + string source_keyspace = 2; + string target_keyspace = 3; + string mount_name = 4; + repeated string cells = 5; + repeated topodata.TabletType tablet_types = 6; + tabletmanagerdata.TabletSelectionPreference tablet_selection_preference = 7; + bool all_tables = 8; + repeated string include_tables = 9; + repeated string exclude_tables = 10; + // SourceTimeZone is the time zone in which datetimes on the source were stored, provided as an option in MoveTables + string source_time_zone = 11; + // OnDdl specifies the action to be taken when a DDL is encountered. + string on_ddl = 12; + // StopAfterCopy specifies if vreplication should be stopped after copying. + bool stop_after_copy = 13; + // DropForeignKeys specifies if foreign key constraints should be elided on the target. + bool drop_foreign_keys = 14; + // DeferSecondaryKeys specifies if secondary keys should be created in one shot after table copy finishes. + bool defer_secondary_keys = 15; + // Start the workflow after creating it. + bool auto_start = 16; + // NoRoutingRules is set to true if routing rules should not be created on the target when the workflow is created. + bool no_routing_rules = 17; +} + +message MigrateCompleteRequest { + string workflow = 1; + string target_keyspace = 3; + bool keep_data = 4; + bool keep_routing_rules = 5; + bool rename_tables = 6; + bool dry_run = 7; +} + +message MigrateCompleteResponse { + string summary = 1; + repeated string dry_run_results = 2; +} + +message MountRegisterRequest { + string topo_type = 1; + string topo_server = 2; + string topo_root = 3; + string name = 4; +} + +message MountRegisterResponse { +} + +message MountUnregisterRequest { + string name = 4; +} + +message MountUnregisterResponse { +} + +message MountShowRequest { + string name = 4; +} + +message MountShowResponse { + string topo_type = 1; + string topo_server = 2; + string topo_root = 3; + string name = 4; +} + +message MountListRequest { +} + +message MountListResponse { + repeated string names = 1; +} + +message MoveTablesCreateRequest { + // The necessary info gets passed on to each primary tablet involved + // in the workflow via the CreateVReplicationWorkflow tabletmanager RPC. + string workflow = 1; + string source_keyspace = 2; + string target_keyspace = 3; + repeated string cells = 4; + repeated topodata.TabletType tablet_types = 5; + tabletmanagerdata.TabletSelectionPreference tablet_selection_preference = 6; + repeated string source_shards = 7; + bool all_tables = 8; + repeated string include_tables = 9; + repeated string exclude_tables = 10; + // The name of the external cluster mounted in topo server. + string external_cluster_name = 11; + // SourceTimeZone is the time zone in which datetimes on the source were stored, provided as an option in MoveTables + string source_time_zone = 12; + // OnDdl specifies the action to be taken when a DDL is encountered. + string on_ddl = 13; + // StopAfterCopy specifies if vreplication should be stopped after copying. + bool stop_after_copy = 14; + // DropForeignKeys specifies if foreign key constraints should be elided on the target. + bool drop_foreign_keys = 15; + // DeferSecondaryKeys specifies if secondary keys should be created in one shot after table copy finishes. + bool defer_secondary_keys = 16; + // Start the workflow after creating it. + bool auto_start = 17; + // NoRoutingRules is set to true if routing rules should not be created on the target when the workflow is created. + bool no_routing_rules = 18; + // Run a single copy phase for the entire database. + bool atomic_copy = 19; +} + +message MoveTablesCreateResponse { + message TabletInfo { + topodata.TabletAlias tablet = 1; + // Created is set if the workflow was created on this tablet or not. + bool created = 2; + } + string summary = 1; + repeated TabletInfo details = 2; +} + +message MoveTablesCompleteRequest { + string workflow = 1; + string target_keyspace = 3; + bool keep_data = 4; + bool keep_routing_rules = 5; + bool rename_tables = 6; + bool dry_run = 7; +} + +message MoveTablesCompleteResponse { + string summary = 1; + repeated string dry_run_results = 2; +} + message PingTabletRequest { topodata.TabletAlias tablet_alias = 1; } @@ -988,6 +1357,27 @@ message ReparentTabletResponse { topodata.TabletAlias primary = 3; } +message ReshardCreateRequest { + string workflow = 1; + string keyspace = 2; + repeated string source_shards = 3; + repeated string target_shards = 4; + repeated string cells = 5; + repeated topodata.TabletType tablet_types = 6; + tabletmanagerdata.TabletSelectionPreference tablet_selection_preference = 7; + // SkipSchemaCopy specifies if the schema should be copied from the source shard, set false if + // schema is already created on the target shard before Reshard is invoked. + bool skip_schema_copy = 8; + // OnDdl specifies the action to be taken when a DDL is encountered. + string on_ddl = 9; + // StopAfterCopy specifies if vreplication should be stopped after copying. + bool stop_after_copy = 10; + // DeferSecondaryKeys specifies if secondary keys should be created in one shot after table copy finishes. + bool defer_secondary_keys = 11; + // Start the workflow after creating it. + bool auto_start = 12; +} + message RestoreFromBackupRequest { topodata.TabletAlias tablet_alias = 1; // BackupTime, if set, will use the backup taken most closely at or before @@ -999,6 +1389,9 @@ message RestoreFromBackupRequest { string restore_to_pos = 3; // Dry run does not actually performs the restore, but validates the steps and availability of backups bool dry_run = 4; + // RestoreToTimestamp, if given, requested an inremental restore up to (and excluding) the given timestamp. + // RestoreToTimestamp and RestoreToPos are mutually exclusive. + vttime.Time restore_to_timestamp = 5; } message RestoreFromBackupResponse { @@ -1009,6 +1402,15 @@ message RestoreFromBackupResponse { logutil.Event event = 4; } +message RetrySchemaMigrationRequest { + string keyspace = 1; + string uuid = 2; +} + +message RetrySchemaMigrationResponse { + map rows_affected_by_shard = 1; +} + message RunHealthCheckRequest { topodata.TabletAlias tablet_alias = 1; } @@ -1303,19 +1705,153 @@ message ValidateVSchemaResponse { map results_by_shard = 2; } +message VDiffCreateRequest { + string workflow = 1; + string target_keyspace = 2; + string uuid = 3; + repeated string source_cells = 4; + repeated string target_cells = 5; + repeated topodata.TabletType tablet_types = 6; + tabletmanagerdata.TabletSelectionPreference tablet_selection_preference = 7; + repeated string tables = 8; + int64 limit = 9; + vttime.Duration filtered_replication_wait_time = 10; + bool debug_query = 11; + bool only_p_ks = 12; + bool update_table_stats = 13; + int64 max_extra_rows_to_compare = 14; + bool wait = 15; + vttime.Duration wait_update_interval = 16; + bool auto_retry = 17; + bool verbose = 18; +} + +message VDiffCreateResponse { + // Intentionally upper case to maintain compatibility with + // vtctlclient and other VDiff client command output. + string UUID = 1; +} + +message VDiffDeleteRequest { + string workflow = 1; + string target_keyspace = 2; + // This will be 'all' or a UUID. + string arg = 3; +} + +message VDiffDeleteResponse { +} + +message VDiffResumeRequest { + string workflow = 1; + string target_keyspace = 2; + string uuid = 3; +} + +message VDiffResumeResponse { +} + +message VDiffShowRequest { + string workflow = 1; + string target_keyspace = 2; + // This will be 'all', 'last', or a UUID. + string arg = 3; +} + +message VDiffShowResponse { + // The key is keyspace/shard. + map tablet_responses = 1; +} + +message VDiffStopRequest { + string workflow = 1; + string target_keyspace = 2; + string uuid = 3; +} + +message VDiffStopResponse { +} + +message WorkflowDeleteRequest { + string keyspace = 1; + string workflow = 2; + bool keep_data = 3; + bool keep_routing_rules = 4; +} + +message WorkflowDeleteResponse { + message TabletInfo { + topodata.TabletAlias tablet = 1; + // Delete is set if the workflow was deleted on this tablet. + bool deleted = 2; + } + string summary = 1; + repeated TabletInfo details = 2; +} + +message WorkflowStatusRequest { + string keyspace = 1; + string workflow = 2; +} + +message WorkflowStatusResponse { + message TableCopyState { + int64 rows_copied = 1; + int64 rows_total = 2; + float rows_percentage = 3; + int64 bytes_copied = 4; + int64 bytes_total = 5; + float bytes_percentage = 6; + } + message ShardStreamState { + int32 id = 1; + topodata.TabletAlias tablet = 2; + string source_shard = 3; + string position = 4; + string status = 5; + string info = 6; + } + message ShardStreams { + repeated ShardStreamState streams = 2; + } + // The key is keyspace/shard. + map table_copy_state = 1; + map shard_streams = 2; + string traffic_state = 3; +} + +message WorkflowSwitchTrafficRequest { + string keyspace = 1; + string workflow = 2; + repeated string cells = 3; + repeated topodata.TabletType tablet_types = 4; + vttime.Duration max_replication_lag_allowed = 5; + bool enable_reverse_replication = 6; + int32 direction = 7; + vttime.Duration timeout = 8; + bool dry_run = 9; + bool initialize_target_sequences = 10; +} + +message WorkflowSwitchTrafficResponse { + string summary = 1; + string start_state = 2; + string current_state = 3; + repeated string dry_run_results = 4; +} + message WorkflowUpdateRequest { string keyspace = 1; // TabletRequest gets passed on to each primary tablet involved - // in the workflow via the UpdateVRWorkflow tabletmanager RPC. - tabletmanagerdata.UpdateVRWorkflowRequest tablet_request = 2; + // in the workflow via the UpdateVReplicationWorkflow tabletmanager RPC. + tabletmanagerdata.UpdateVReplicationWorkflowRequest tablet_request = 2; } message WorkflowUpdateResponse { message TabletInfo { - string tablet = 1; + topodata.TabletAlias tablet = 1; // Changed is true if any of the provided values were different - // than what was already stored. The value is based on the query - // result's RowsAffected being 0 or not. + // than what was already stored on this tablet. bool changed = 2; } string summary = 1; diff --git a/proto/vtctlservice.proto b/proto/vtctlservice.proto index 82a60433055..59c24dc8445 100644 --- a/proto/vtctlservice.proto +++ b/proto/vtctlservice.proto @@ -54,12 +54,18 @@ service Vtctld { rpc Backup(vtctldata.BackupRequest) returns (stream vtctldata.BackupResponse) {}; // BackupShard chooses a tablet in the shard and uses it to create a backup. rpc BackupShard(vtctldata.BackupShardRequest) returns (stream vtctldata.BackupResponse) {}; + // CancelSchemaMigration cancels one or all migrations, terminating any runnign ones as needed. + rpc CancelSchemaMigration(vtctldata.CancelSchemaMigrationRequest) returns (vtctldata.CancelSchemaMigrationResponse) {}; // ChangeTabletType changes the db type for the specified tablet, if possible. // This is used primarily to arrange replicas, and it will not convert a // primary. For that, use InitShardPrimary. // // NOTE: This command automatically updates the serving graph. rpc ChangeTabletType(vtctldata.ChangeTabletTypeRequest) returns (vtctldata.ChangeTabletTypeResponse) {}; + // CleanupSchemaMigration marks a schema migration as ready for artifact cleanup. + rpc CleanupSchemaMigration(vtctldata.CleanupSchemaMigrationRequest) returns (vtctldata.CleanupSchemaMigrationResponse) {}; + // CompleteSchemaMigration completes one or all migrations executed with --postpone-completion. + rpc CompleteSchemaMigration(vtctldata.CompleteSchemaMigrationRequest) returns (vtctldata.CompleteSchemaMigrationResponse) {}; // CreateKeyspace creates the specified keyspace in the topology. For a // SNAPSHOT keyspace, the request must specify the name of a base keyspace, // as well as a snapshot time. @@ -120,6 +126,12 @@ service Vtctld { // GetSchema returns the schema for a tablet, or just the schema for the // specified tables in that tablet. rpc GetSchema(vtctldata.GetSchemaRequest) returns (vtctldata.GetSchemaResponse) {}; + // GetSchemaMigrations returns one or more online schema migrations for the + // specified keyspace, analagous to `SHOW VITESS_MIGRATIONS`. + // + // Different fields in the request message result in different filtering + // behaviors. See the documentation on GetSchemaMigrationsRequest for details. + rpc GetSchemaMigrations(vtctldata.GetSchemaMigrationsRequest) returns (vtctldata.GetSchemaMigrationsResponse) {}; // GetShard returns information about a shard in the topology. rpc GetShard(vtctldata.GetShardRequest) returns (vtctldata.GetShardResponse) {}; // GetShardRoutingRules returns the VSchema shard routing rules. @@ -156,6 +168,35 @@ service Vtctld { // PlannedReparentShard or EmergencyReparentShard should be used in those // cases instead. rpc InitShardPrimary(vtctldata.InitShardPrimaryRequest) returns (vtctldata.InitShardPrimaryResponse) {}; + // LaunchSchemaMigration launches one or all migrations executed with --postpone-launch. + rpc LaunchSchemaMigration(vtctldata.LaunchSchemaMigrationRequest) returns (vtctldata.LaunchSchemaMigrationResponse) {}; + + rpc LookupVindexCreate(vtctldata.LookupVindexCreateRequest) returns (vtctldata.LookupVindexCreateResponse) {}; + rpc LookupVindexExternalize(vtctldata.LookupVindexExternalizeRequest) returns (vtctldata.LookupVindexExternalizeResponse) {}; + + // MaterializeCreate creates a workflow to materialize one or more tables + // from a source keyspace to a target keyspace using a provided expressions. + rpc MaterializeCreate(vtctldata.MaterializeCreateRequest) returns (vtctldata.MaterializeCreateResponse) {}; + + // MigrateCreate creates a workflow which migrates one or more tables from an + // external cluster into Vitess. + rpc MigrateCreate(vtctldata.MigrateCreateRequest) returns (vtctldata.WorkflowStatusResponse) {}; + + // MountRegister registers a new external Vitess cluster. + rpc MountRegister(vtctldata.MountRegisterRequest) returns (vtctldata.MountRegisterResponse) {}; + // MountUnregister unregisters an external Vitess cluster. + rpc MountUnregister(vtctldata.MountUnregisterRequest) returns (vtctldata.MountUnregisterResponse) {}; + // MountShow returns information about an external Vitess cluster. + rpc MountShow(vtctldata.MountShowRequest) returns (vtctldata.MountShowResponse) {}; + // MountList lists all registered external Vitess clusters. + rpc MountList(vtctldata.MountListRequest) returns (vtctldata.MountListResponse) {}; + + // MoveTablesCreate creates a workflow which moves one or more tables from a + // source keyspace to a target keyspace. + rpc MoveTablesCreate(vtctldata.MoveTablesCreateRequest) returns (vtctldata.WorkflowStatusResponse) {}; + // MoveTablesComplete completes the move and cleans up the workflow and + // its related artifacts. + rpc MoveTablesComplete(vtctldata.MoveTablesCompleteRequest) returns (vtctldata.MoveTablesCompleteResponse) {}; // PingTablet checks that the specified tablet is awake and responding to RPCs. // This command can be blocked by other in-flight operations. rpc PingTablet(vtctldata.PingTabletRequest) returns (vtctldata.PingTabletResponse) {}; @@ -203,8 +244,12 @@ service Vtctld { // only works if the current replica position matches the last known reparent // action. rpc ReparentTablet(vtctldata.ReparentTabletRequest) returns (vtctldata.ReparentTabletResponse) {}; + // ReshardCreate creates a workflow to reshard a keyspace. + rpc ReshardCreate(vtctldata.ReshardCreateRequest) returns (vtctldata.WorkflowStatusResponse) {}; // RestoreFromBackup stops mysqld for the given tablet and restores a backup. rpc RestoreFromBackup(vtctldata.RestoreFromBackupRequest) returns (stream vtctldata.RestoreFromBackupResponse) {}; + // RetrySchemaMigration marks a given schema migration for retry. + rpc RetrySchemaMigration(vtctldata.RetrySchemaMigrationRequest) returns (vtctldata.RetrySchemaMigrationResponse) {}; // RunHealthCheck runs a healthcheck on the remote tablet. rpc RunHealthCheck(vtctldata.RunHealthCheckRequest) returns (vtctldata.RunHealthCheckResponse) {}; // SetKeyspaceDurabilityPolicy updates the DurabilityPolicy for a keyspace. @@ -293,6 +338,15 @@ service Vtctld { rpc ValidateVersionShard(vtctldata.ValidateVersionShardRequest) returns (vtctldata.ValidateVersionShardResponse) {}; // ValidateVSchema compares the schema of each primary tablet in "keyspace/shards..." to the vschema and errs if there are differences. rpc ValidateVSchema(vtctldata.ValidateVSchemaRequest) returns (vtctldata.ValidateVSchemaResponse) {}; + rpc VDiffCreate(vtctldata.VDiffCreateRequest) returns (vtctldata.VDiffCreateResponse) {}; + rpc VDiffDelete(vtctldata.VDiffDeleteRequest) returns (vtctldata.VDiffDeleteResponse) {}; + rpc VDiffResume(vtctldata.VDiffResumeRequest) returns (vtctldata.VDiffResumeResponse) {}; + rpc VDiffShow(vtctldata.VDiffShowRequest) returns (vtctldata.VDiffShowResponse) {}; + rpc VDiffStop(vtctldata.VDiffStopRequest) returns (vtctldata.VDiffStopResponse) {}; + // WorkflowDelete deletes a vreplication workflow. + rpc WorkflowDelete(vtctldata.WorkflowDeleteRequest) returns (vtctldata.WorkflowDeleteResponse) {}; + rpc WorkflowStatus(vtctldata.WorkflowStatusRequest) returns (vtctldata.WorkflowStatusResponse) {}; + rpc WorkflowSwitchTraffic(vtctldata.WorkflowSwitchTrafficRequest) returns (vtctldata.WorkflowSwitchTrafficResponse) {}; // WorkflowUpdate updates the configuration of a vreplication workflow // using the provided updated parameters. rpc WorkflowUpdate(vtctldata.WorkflowUpdateRequest) returns (vtctldata.WorkflowUpdateResponse) {}; diff --git a/proto/vtgate.proto b/proto/vtgate.proto index c9ce25e1bca..0d8781bcd61 100644 --- a/proto/vtgate.proto +++ b/proto/vtgate.proto @@ -154,6 +154,9 @@ message Session { int64 query_timeout = 25; map prepare_statement = 26; + + // MigrationContext + string migration_context = 27; } // PrepareData keeps the prepared statement and other information related for execution of it. diff --git a/test.go b/test.go index 2c943e9ea82..d797647db3d 100755 --- a/test.go +++ b/test.go @@ -77,7 +77,7 @@ For example: // Flags var ( flavor = flag.String("flavor", "mysql57", "comma-separated bootstrap flavor(s) to run against (when using Docker mode). Available flavors: all,"+flavors) - bootstrapVersion = flag.String("bootstrap-version", "18.0", "the version identifier to use for the docker images") + bootstrapVersion = flag.String("bootstrap-version", "22.1", "the version identifier to use for the docker images") runCount = flag.Int("runs", 1, "run each test this many times") retryMax = flag.Int("retry", 3, "max number of retries, to detect flaky tests") logPass = flag.Bool("log-pass", false, "log test output even if it passes") @@ -95,8 +95,9 @@ var ( skipBuild = flag.Bool("skip-build", false, "skip running 'make build'. Assumes pre-existing binaries exist") partialKeyspace = flag.Bool("partial-keyspace", false, "add a second keyspace for sharded tests and mark first shard as moved to this keyspace in the shard routing rules") // `go run test.go --dry-run --skip-build` to quickly test this file and see what tests will run - dryRun = flag.Bool("dry-run", false, "For each test to be run, it will output the test attributes, but NOT run the tests. Useful while debugging changes to test.go (this file)") - remoteStats = flag.String("remote-stats", "", "url to send remote stats") + dryRun = flag.Bool("dry-run", false, "For each test to be run, it will output the test attributes, but NOT run the tests. Useful while debugging changes to test.go (this file)") + remoteStats = flag.String("remote-stats", "", "url to send remote stats") + buildVTAdmin = flag.Bool("build-vtadmin", false, "Enable or disable VTAdmin build during 'make build'") ) var ( @@ -200,6 +201,9 @@ func (t *Test) run(dir, dataDir string) ([]byte, error) { } else { // If there is no cache, we have to call 'make build' before each test. args = []string{t.flavor, t.bootstrapVersion, "make build && " + testArgs} + if !*buildVTAdmin { + args[len(args)-1] = "NOVTADMINBUILD=1 " + args[len(args)-1] + } } cmd = exec.Command(path.Join(dir, "docker/test/run.sh"), args...) @@ -425,8 +429,13 @@ func main() { } else { // Since we're sharing the working dir, do the build once for all tests. log.Printf("Running make build...") - if out, err := exec.Command("make", "build").CombinedOutput(); err != nil { - log.Fatalf("make build failed: %v\n%s", err, out) + command := exec.Command("make", "build") + if !*buildVTAdmin { + command.Env = append(os.Environ(), "NOVTADMINBUILD=1") + } + if out, err := command.CombinedOutput(); err != nil { + log.Fatalf("make build failed; exit code: %d, error: %v\n%s", + command.ProcessState.ExitCode(), err, out) } } diff --git a/test/ci_workflow_gen.go b/test/ci_workflow_gen.go index 711beb23717..5a3031d7307 100644 --- a/test/ci_workflow_gen.go +++ b/test/ci_workflow_gen.go @@ -76,6 +76,7 @@ var ( "18", "xb_backup", "backup_pitr", + "backup_pitr_xtrabackup", "21", "22", "mysql_server_vault", @@ -91,9 +92,7 @@ var ( "vreplication_migrate_vdiff2_convert_tz", "onlineddl_revert", "onlineddl_scheduler", - "tabletmanager_throttler", "tabletmanager_throttler_topo", - "tabletmanager_throttler_custom_config", "tabletmanager_tablegc", "tabletmanager_consul", "vtgate_concurrentdml", @@ -112,6 +111,7 @@ var ( "vtgate_vschema", "vtgate_queries", "vtgate_schema_tracker", + "vtgate_foreignkey_stress", "vtorc", "xb_recovery", "mysql80", @@ -120,6 +120,8 @@ var ( "vreplication_cellalias", "vreplication_basic", "vreplication_v2", + "vreplication_partial_movetables_basic", + "vreplication_partial_movetables_sequences", "schemadiff_vrepl", "topo_connection_cache", "vtgate_partial_keyspace", @@ -131,7 +133,7 @@ var ( clustersRequiringXtraBackup = []string{ "xb_backup", "xb_recovery", - "backup_pitr", + "backup_pitr_xtrabackup", } clustersRequiringMakeTools = []string{ "18", @@ -139,6 +141,14 @@ var ( "vtgate_topo_consul", "tabletmanager_consul", } + clusterRequiring16CoresMachines = []string{ + "onlineddl_vrepl", + "onlineddl_vrepl_stress", + "onlineddl_vrepl_stress_suite", + "onlineddl_vrepl_suite", + "vreplication_basic", + "vreplication_migrate_vdiff2_convert_tz", + } ) type unitTest struct { @@ -153,6 +163,7 @@ type clusterTest struct { LimitResourceUsage bool EnableBinlogTransactionCompression bool PartialKeyspace bool + Cores16 bool } type selfHostedTest struct { @@ -170,6 +181,8 @@ func clusterMySQLVersions(clusterName string) mysqlVersions { return allMySQLVersions case clusterName == "backup_pitr": return allMySQLVersions + case clusterName == "backup_pitr_xtrabackup": + return allMySQLVersions case clusterName == "tabletmanager_tablegc": return allMySQLVersions case clusterName == "vtorc": @@ -328,6 +341,13 @@ func generateClusterWorkflows(list []string, tpl string) { Name: fmt.Sprintf("Cluster (%s)", cluster), Shard: cluster, } + cores16Clusters := canonnizeList(clusterRequiring16CoresMachines) + for _, cores16Cluster := range cores16Clusters { + if cores16Cluster == cluster { + test.Cores16 = true + break + } + } makeToolClusters := canonnizeList(clustersRequiringMakeTools) for _, makeToolCluster := range makeToolClusters { if makeToolCluster == cluster { diff --git a/test/client/client.go b/test/client/client.go index df1d13e7d57..d1a174d8d04 100644 --- a/test/client/client.go +++ b/test/client/client.go @@ -42,7 +42,6 @@ var ( func main() { pflag.Parse() - rand.Seed(time.Now().UnixNano()) // Connect to vtgate. db, err := vitessdriver.Open(*server, "@primary") diff --git a/test/config.json b/test/config.json index 41d9f8f01b7..feffd6551ae 100644 --- a/test/config.json +++ b/test/config.json @@ -100,6 +100,15 @@ "RetryMax": 1, "Tags": [] }, + "backup_pitr_xtrabackup": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/backup/pitr_xtrabackup", "-timeout", "30m"], + "Command": [], + "Manual": false, + "Shard": "backup_pitr_xtrabackup", + "RetryMax": 1, + "Tags": [] + }, "backup": { "File": "unused.go", "Args": ["vitess.io/vitess/go/test/endtoend/backup/vtctlbackup", "-timeout", "30m"], @@ -438,17 +447,6 @@ "site_test" ] }, - "tabletmanager_throttler": { - "File": "unused.go", - "Args": ["vitess.io/vitess/go/test/endtoend/tabletmanager/throttler"], - "Command": [], - "Manual": false, - "Shard": "tabletmanager_throttler", - "RetryMax": 1, - "Tags": [ - "site_test" - ] - }, "tabletmanager_throttler_topo": { "File": "unused.go", "Args": ["vitess.io/vitess/go/test/endtoend/tabletmanager/throttler_topo"], @@ -460,17 +458,6 @@ "site_test" ] }, - "tabletmanager_throttler_custom_config": { - "File": "unused.go", - "Args": ["vitess.io/vitess/go/test/endtoend/tabletmanager/throttler_custom_config"], - "Command": [], - "Manual": false, - "Shard": "tabletmanager_throttler_custom_config", - "RetryMax": 1, - "Tags": [ - "site_test" - ] - }, "tabletmanager_tablegc": { "File": "unused.go", "Args": ["vitess.io/vitess/go/test/endtoend/tabletmanager/tablegc"], @@ -558,6 +545,15 @@ "RetryMax": 1, "Tags": ["vtgate"] }, + "vtgate_queries_timeout": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/vtgate/queries/timeout"], + "Command": [], + "Manual": false, + "Shard": "vtgate_queries", + "RetryMax": 1, + "Tags": [] + }, "vtgate_queries_normalize": { "File": "unused.go", "Args": ["vitess.io/vitess/go/test/endtoend/vtgate/queries/normalize"], @@ -621,6 +617,24 @@ "RetryMax": 1, "Tags": ["vtgate"] }, + "vtgate_queries_random": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/vtgate/queries/random"], + "Command": [], + "Manual": false, + "Shard": "vtgate_queries", + "RetryMax": 1, + "Tags": [] + }, + "vtgate_kill": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/vtgate/queries/kill"], + "Command": [], + "Manual": false, + "Shard": "vtgate_queries", + "RetryMax": 1, + "Tags": [] + }, "vtgate_concurrentdml": { "File": "unused.go", "Args": ["vitess.io/vitess/go/test/endtoend/vtgate/concurrentdml"], @@ -675,15 +689,6 @@ "RetryMax": 1, "Tags": ["upgrade_downgrade_query_serving_schema"] }, - "vtgate_schematracker_unauthorized": { - "File": "unused.go", - "Args": ["vitess.io/vitess/go/test/endtoend/vtgate/schematracker/unauthorized", "-timeout", "20m"], - "Command": [], - "Manual": false, - "Shard": "vtgate_schema_tracker", - "RetryMax": 1, - "Tags": ["upgrade_downgrade_query_serving_schema"] - }, "vtgate_schematracker_unsharded": { "File": "unused.go", "Args": ["vitess.io/vitess/go/test/endtoend/vtgate/schematracker/unsharded", "-timeout", "20m"], @@ -846,6 +851,24 @@ "RetryMax": 1, "Tags": ["vtgate"] }, + "vtgate_foreignkey": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/vtgate/foreignkey"], + "Command": [], + "Manual": false, + "Shard": "vtgate_foreignkey_stress", + "RetryMax": 1, + "Tags": [] + }, + "vtgate_foreignkey_stress": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/vtgate/foreignkey/stress"], + "Command": [], + "Manual": false, + "Shard": "vtgate_foreignkey_stress", + "RetryMax": 1, + "Tags": [] + }, "vtgate_gen4": { "File": "unused.go", "Args": ["vitess.io/vitess/go/test/endtoend/vtgate/gen4"], @@ -990,6 +1013,15 @@ "RetryMax": 0, "Tags": [] }, + "vreplication_vtctldclient_materialize": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestMaterializeVtctldClient"], + "Command": [], + "Manual": false, + "Shard": "vreplication_multicell", + "RetryMax": 0, + "Tags": [] + }, "vreplication_cellalias": { "File": "unused.go", "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "CellAlias"], @@ -999,9 +1031,27 @@ "RetryMax": 0, "Tags": [] }, - "vreplication_partialmovetables": { + "vreplication_partial_movetables_basic": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "PartialMoveTablesBasic"], + "Command": [], + "Manual": false, + "Shard": "vreplication_partial_movetables_basic", + "RetryMax": 0, + "Tags": [] + }, + "vdiff_multiple_movetables_test.go": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestMultipleConcurrentVDiffs"], + "Command": [], + "Manual": false, + "Shard": "vreplication_partial_movetables_basic", + "RetryMax": 0, + "Tags": [] + }, + "vreplication_movetables_buffering": { "File": "unused.go", - "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "PartialMoveTables"], + "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestMoveTablesBuffering"], "Command": [], "Manual": true, "Shard": "vreplication_cellalias", @@ -1028,7 +1078,7 @@ }, "vreplication_basic": { "File": "unused.go", - "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestBasicVreplicationWorkflow"], + "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestBasicVreplicationWorkflow", "-timeout", "20m"], "Command": [], "Manual": true, "Shard": "vreplication_basic", @@ -1037,13 +1087,22 @@ }, "vreplication_copy_parallel": { "File": "unused.go", - "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestVreplicationCopyParallel"], + "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestVreplicationCopyParallel", "-timeout", "20m"], "Command": [], "Manual": false, "Shard": "vreplication_basic", "RetryMax": 1, "Tags": [] }, + "vreplication_partial_movetables_sequences": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestPartialMoveTablesWithSequences"], + "Command": [], + "Manual": false, + "Shard": "vreplication_partial_movetables_sequences", + "RetryMax": 1, + "Tags": [] + }, "vstream_flush_binlog": { "File": "unused.go", "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestVStreamFlushBinlog"], @@ -1143,6 +1202,15 @@ "RetryMax": 1, "Tags": [] }, + "vreplication_fk": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestFKWorkflow"], + "Command": [], + "Manual": false, + "Shard": "vreplication_cellalias", + "RetryMax": 1, + "Tags": [] + }, "vreplication_across_db_versions": { "File": "unused.go", "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestV2WorkflowsAcrossDBVersions", "-timeout", "20m"], @@ -1152,18 +1220,27 @@ "RetryMax": 1, "Tags": [] }, - "vreplication_mariadb_to_mysql": { - "File": "unused.go", - "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestMoveTablesMariaDBToMySQL", "-timeout", "10m"], - "Command": [], - "Manual": true, - "Shard": "vreplication_across_db_versions", - "RetryMax": 1, - "Tags": [] + "vreplication_mariadb_to_mysql": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestMoveTablesMariaDBToMySQL", "-timeout", "20m"], + "Command": [], + "Manual": false, + "Shard": "vreplication_across_db_versions", + "RetryMax": 1, + "Tags": [] + }, + "vreplication_vtctl_migrate": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestVtctlMigrate", "-timeout", "30m"], + "Command": [], + "Manual": false, + "Shard": "vreplication_migrate_vdiff2_convert_tz", + "RetryMax": 1, + "Tags": [] }, - "vreplication_migrate": { + "vreplication_vtctld_migrate": { "File": "unused.go", - "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestMigrate"], + "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestVtctldMigrate", "-timeout", "30m"], "Command": [], "Manual": false, "Shard": "vreplication_migrate_vdiff2_convert_tz", @@ -1172,7 +1249,7 @@ }, "vdiff2": { "File": "unused.go", - "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestVDiff2"], + "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestVDiff2", "-timeout", "20m"], "Command": [], "Manual": false, "Shard": "vreplication_migrate_vdiff2_convert_tz", diff --git a/test/local_example.sh b/test/local_example.sh index 0274a44403c..391e75a9224 100755 --- a/test/local_example.sh +++ b/test/local_example.sh @@ -56,7 +56,6 @@ for shard in "customer/0"; do done ./202_move_tables.sh -sleep 3 # required for now ./203_switch_reads.sh @@ -67,7 +66,7 @@ mysql --table < ../common/select_customer0_data.sql # We expect this to fail due to the denied tables # rules in place. # For some reason this succeeds... -# $(mysql --table < ../common/select_commerce_data.sql &>/dev/null || true) +$(mysql --table < ../common/select_commerce_data.sql &>/dev/null || true) ./205_clean_commerce.sh # We expect this to fail as the keyspace is now gone. diff --git a/test/templates/cluster_endtoend_test.tpl b/test/templates/cluster_endtoend_test.tpl index 8d7542696e2..b5c409d8f51 100644 --- a/test/templates/cluster_endtoend_test.tpl +++ b/test/templates/cluster_endtoend_test.tpl @@ -14,7 +14,7 @@ env: jobs: build: name: Run endtoend tests on {{.Name}} - runs-on: ubuntu-22.04 + runs-on: {{if .Cores16}}gh-hosted-runners-16cores-1{{else}}gh-hosted-runners-4cores-1{{end}} steps: - name: Skip CI @@ -34,6 +34,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "{{"Authorization: token ${{ secrets.GITHUB_TOKEN }}"}}" \ + -H "Accept: application/vnd.github.v3+json" \ + "{{"https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}"}}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -65,7 +72,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,7 +129,7 @@ jobs: {{if .InstallXtraBackup}} - sudo apt-get install percona-xtrabackup-80 lz4 + sudo apt-get install -y percona-xtrabackup-80 lz4 {{end}} @@ -136,7 +143,7 @@ jobs: {{end}} - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -145,7 +152,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -156,7 +163,7 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail {{if .LimitResourceUsage}} # Increase our open file descriptor limit as we could hit this @@ -187,11 +194,13 @@ jobs: # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker={{if .Docker}}true -flavor={{.Platform}}{{else}}false{{end}} -follow -shard {{.Shard}}{{if .PartialKeyspace}} -partial-keyspace=true {{end}} | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "{{"${{steps.skip-workflow.outputs.is_draft}}"}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/test/templates/cluster_endtoend_test_docker.tpl b/test/templates/cluster_endtoend_test_docker.tpl index a2e37e40e0d..f53c705e2c1 100644 --- a/test/templates/cluster_endtoend_test_docker.tpl +++ b/test/templates/cluster_endtoend_test_docker.tpl @@ -6,7 +6,7 @@ permissions: read-all jobs: build: name: Run endtoend tests on {{.Name}} - runs-on: ubuntu-22.04 + runs-on: {{if .Cores16}}gh-hosted-runners-16cores-1{{else}}gh-hosted-runners-4cores-1{{end}} steps: - name: Skip CI @@ -54,7 +54,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -65,4 +65,8 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' timeout-minutes: 30 run: | + # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file + # which musn't be more than 107 characters long. + export VTDATAROOT="/tmp/" + go run test.go -docker=true --follow -shard {{.Shard}} diff --git a/test/templates/cluster_endtoend_test_mysql57.tpl b/test/templates/cluster_endtoend_test_mysql57.tpl index b63081fd398..f5bc482cda9 100644 --- a/test/templates/cluster_endtoend_test_mysql57.tpl +++ b/test/templates/cluster_endtoend_test_mysql57.tpl @@ -19,7 +19,7 @@ env: jobs: build: name: Run endtoend tests on {{.Name}} - runs-on: ubuntu-22.04 + runs-on: {{if .Cores16}}gh-hosted-runners-16cores-1{{else}}gh-hosted-runners-4cores-1{{end}} steps: - name: Skip CI @@ -39,6 +39,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "{{"Authorization: token ${{ secrets.GITHUB_TOKEN }}"}}" \ + -H "Accept: application/vnd.github.v3+json" \ + "{{"https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}"}}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -70,7 +77,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -146,7 +153,7 @@ jobs: {{end}} - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -155,7 +162,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -166,7 +173,7 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail {{if .LimitResourceUsage}} # Increase our local ephemeral port range as we could exhaust this @@ -192,11 +199,13 @@ jobs: # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker={{if .Docker}}true -flavor={{.Platform}}{{else}}false{{end}} -follow -shard {{.Shard}}{{if .PartialKeyspace}} -partial-keyspace=true {{end}} | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "{{"${{steps.skip-workflow.outputs.is_draft}}"}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/test/templates/cluster_endtoend_test_self_hosted.tpl b/test/templates/cluster_endtoend_test_self_hosted.tpl index 9c6e04680c2..d9b48f6aecf 100644 --- a/test/templates/cluster_endtoend_test_self_hosted.tpl +++ b/test/templates/cluster_endtoend_test_self_hosted.tpl @@ -61,7 +61,12 @@ jobs: - name: Run test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' timeout-minutes: 30 - run: docker run --name "{{.ImageName}}_$GITHUB_SHA" {{.ImageName}}:$GITHUB_SHA /bin/bash -c 'source build.env && go run test.go -keep-data=true -docker=false -print-log -follow -shard {{.Shard}} -- -- --keep-data=true' + run: | + # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file + # which musn't be more than 107 characters long. + export VTDATAROOT="/tmp/" + + docker run --name "{{.ImageName}}_$GITHUB_SHA" {{.ImageName}}:$GITHUB_SHA /bin/bash -c 'source build.env && go run test.go -keep-data=true -docker=false -print-log -follow -shard {{.Shard}} -- -- --keep-data=true' - name: Print Volume Used if: always() && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' diff --git a/test/templates/dockerfile.tpl b/test/templates/dockerfile.tpl index 38b27674069..d1ef429db79 100644 --- a/test/templates/dockerfile.tpl +++ b/test/templates/dockerfile.tpl @@ -1,4 +1,4 @@ -ARG bootstrap_version=18.0 +ARG bootstrap_version=22.1 ARG image="vitess/bootstrap:${bootstrap_version}-{{.Platform}}" FROM "${image}" diff --git a/test/templates/unit_test.tpl b/test/templates/unit_test.tpl index a52e667c156..7dffd83267b 100644 --- a/test/templates/unit_test.tpl +++ b/test/templates/unit_test.tpl @@ -14,7 +14,7 @@ env: jobs: test: name: {{.Name}} - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +34,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "{{"Authorization: token ${{ secrets.GITHUB_TOKEN }}"}}" \ + -H "Accept: application/vnd.github.v3+json" \ + "{{"https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}"}}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -62,7 +69,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' uses: actions/setup-go@v4 with: - go-version: 1.20.5 + go-version: 1.21.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' @@ -142,7 +149,7 @@ jobs: make tools - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -151,19 +158,27 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' timeout-minutes: 30 run: | + set -exo pipefail + # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file + # which musn't be more than 107 characters long. + export VTDATAROOT="/tmp/" + + export NOVTADMINBUILD=1 eatmydata -- make unit_test | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "{{"${{steps.skip-workflow.outputs.is_draft}}"}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/test/templates/unit_test_self_hosted.tpl b/test/templates/unit_test_self_hosted.tpl index c7f720dc724..45d88392b9b 100644 --- a/test/templates/unit_test_self_hosted.tpl +++ b/test/templates/unit_test_self_hosted.tpl @@ -59,7 +59,13 @@ jobs: - name: Run test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' timeout-minutes: 30 - run: docker run --name "{{.ImageName}}_$GITHUB_SHA" {{.ImageName}}:$GITHUB_SHA /bin/bash -c 'make unit_test' + run: | + set -exo pipefail + # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file + # which musn't be more than 107 characters long. + export VTDATAROOT="/tmp/" + + docker run --name "{{.ImageName}}_$GITHUB_SHA" {{.ImageName}}:$GITHUB_SHA /bin/bash -c 'NOVTADMINBUILD=1 make unit_test' - name: Print Volume Used if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' diff --git a/tools/back_to_dev_mode.sh b/tools/back_to_dev_mode.sh index 7bbc6bce25f..9f4cc25b71f 100755 --- a/tools/back_to_dev_mode.sh +++ b/tools/back_to_dev_mode.sh @@ -74,7 +74,7 @@ echo " " echo " git push upstream $current_branch" echo " " echo " " -echo "Once pushed, please execute the following gh command to create the Pull Requests. Please replace 'USER_ON_WHICH_YOU_PUSHED' with the user/org on which you pushed the two branches." +echo "Once pushed, please execute the following gh command to create the Pull Requests. Please replace 'USER_ON_WHICH_YOU_PUSHED' with the user/org on which you pushed the branch." echo " " echo " gh pr create -w --title 'Back to dev mode after v$RELEASE_VERSION' --base $BASE_BRANCH --head USER_ON_WHICH_YOU_PUSHED:$current_branch --label 'Type: Release','Component: General' --body 'Includes the changes required to go back into dev mode (v$DEV_VERSION) after the release of v$RELEASE_VERSION.'" echo " " diff --git a/tools/code_freeze.sh b/tools/code_freeze.sh index 7537ffbb13c..8e4f3ec12a6 100755 --- a/tools/code_freeze.sh +++ b/tools/code_freeze.sh @@ -34,7 +34,7 @@ freeze=$1 branch=$2 code_freeze_workflow="./.github/workflows/code_freeze.yml" -if [ "$freeze" != "freeze" && "$freeze" != "unfreeze" ]; then +if [[ "$freeze" != "freeze" && "$freeze" != "unfreeze" ]]; then echo "the first argument must be either 'freeze' or 'unfreeze'" exit 1 fi diff --git a/tools/create_release.sh b/tools/create_release.sh index 546b60d40c3..77c9cb9d423 100755 --- a/tools/create_release.sh +++ b/tools/create_release.sh @@ -88,7 +88,7 @@ echo " " echo " git push upstream $current_branch" echo " " echo " " -echo "Once pushed, please execute the following gh command to create the Pull Requests. Please replace 'USER_ON_WHICH_YOU_PUSHED' with the user/org on which you pushed the two branches." +echo "Once pushed, please execute the following gh command to create the Pull Requests. Please replace 'USER_ON_WHICH_YOU_PUSHED' with the user/org on which you pushed the branch." echo " " echo " gh pr create -w --title 'Release of v$RELEASE_VERSION' --base $BASE_BRANCH --head USER_ON_WHICH_YOU_PUSHED:$current_branch --label 'Type: Release','Component: General','Do Not Merge' --body 'Includes the release notes and release commit for the v$RELEASE_VERSION release. Once this PR is merged, we will be able to tag v$RELEASE_VERSION on the merge commit.'" echo " " diff --git a/tools/dependency_check.sh b/tools/dependency_check.sh index cfaa912f954..7d5179c1616 100755 --- a/tools/dependency_check.sh +++ b/tools/dependency_check.sh @@ -21,14 +21,9 @@ function fail() { exit 1 } -PLATFORM_BINARIES="" -case "$(uname -s)" in - Linux*) PLATFORM_BINARIES="k3s";; -esac - # These binaries are required to 'make test' # mysqld might be in /usr/sbin which will not be in the default PATH PATH="/usr/sbin:$PATH" -for binary in mysqld consul etcd etcdctl zksrv.sh javadoc mvn ant curl wget zip unzip $PLATFORM_BINARIES; do +for binary in mysqld consul etcd etcdctl zksrv.sh javadoc mvn ant curl wget zip unzip; do command -v "$binary" > /dev/null || fail "${binary} is not installed in PATH. See https://vitess.io/contributing/build-from-source for install instructions." done; diff --git a/tools/e2e_test_race.sh b/tools/e2e_test_race.sh index 7dad5b259a3..b072e1261e2 100755 --- a/tools/e2e_test_race.sh +++ b/tools/e2e_test_race.sh @@ -38,9 +38,11 @@ packages_with_tests=$(echo "$packages_with_tests" | grep -vE "go/test/endtoend" # endtoend tests should be in a directory called endtoend all_e2e_tests=$(echo "$packages_with_tests" | cut -d" " -f1) +set -exo pipefail + # Run all endtoend tests. echo "$all_e2e_tests" | xargs go test $VT_GO_PARALLEL -race 2>&1 | tee $temp_log_file -if [ ${PIPESTATUS[0]} -ne 0 ]; then +if [ ${PIPESTATUS[1]} -ne 0 ]; then if grep "WARNING: DATA RACE" -q $temp_log_file; then echo echo "ERROR: go test -race found a data race. See log above." diff --git a/tools/make-release-packages.sh b/tools/make-release-packages.sh index 21ecdcda7ee..e1a189d6507 100755 --- a/tools/make-release-packages.sh +++ b/tools/make-release-packages.sh @@ -35,7 +35,7 @@ mkdir -p releases # Copy a subset of binaries from issue #5421 mkdir -p "${RELEASE_DIR}/bin" -for binary in vttestserver mysqlctl mysqlctld query_analyzer topo2topo vtaclcheck vtadmin vtbackup vtbench vtclient vtcombo vtctl vtctldclient vtctlclient vtctld vtexplain vtgate vttablet vtorc zk zkctl zkctld; do +for binary in vttestserver mysqlctl mysqlctld topo2topo vtaclcheck vtadmin vtbackup vtbench vtclient vtcombo vtctl vtctldclient vtctlclient vtctld vtexplain vtgate vttablet vtorc zk zkctl zkctld; do cp "bin/$binary" "${RELEASE_DIR}/bin/" done; diff --git a/tools/rowlog/rowlog.go b/tools/rowlog/rowlog.go index 831998580c5..475006b2b59 100644 --- a/tools/rowlog/rowlog.go +++ b/tools/rowlog/rowlog.go @@ -15,7 +15,7 @@ import ( "github.com/spf13/pflag" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/discovery" @@ -199,7 +199,7 @@ func startStreaming(ctx context.Context, vtgate, vtctld, keyspace, tablet, table } } var err error - var currentPosition, stopPosition mysql.Position + var currentPosition, stopPosition replication.Position currentPosition, err = binlogplayer.DecodePosition(gtid) if err != nil { fmt.Printf("Error decoding position for %s:%vs\n", gtid, err.Error()) diff --git a/tools/tools.go b/tools/tools.go index 68a97b603f6..36f3e59ab93 100644 --- a/tools/tools.go +++ b/tools/tools.go @@ -25,8 +25,4 @@ import ( _ "github.com/planetscale/vtprotobuf/cmd/protoc-gen-go-vtproto" _ "google.golang.org/grpc/cmd/protoc-gen-go-grpc" _ "google.golang.org/protobuf/cmd/protoc-gen-go" - _ "k8s.io/code-generator/cmd/client-gen" - _ "k8s.io/code-generator/cmd/deepcopy-gen" - _ "k8s.io/code-generator/cmd/informer-gen" - _ "k8s.io/code-generator/cmd/lister-gen" ) diff --git a/vitess-mixin/e2e/package-lock.json b/vitess-mixin/e2e/package-lock.json index a2bdfd9e851..3c4da9aca0f 100644 --- a/vitess-mixin/e2e/package-lock.json +++ b/vitess-mixin/e2e/package-lock.json @@ -36,9 +36,9 @@ } }, "@cypress/request": { - "version": "2.88.10", - "resolved": "https://registry.npmjs.org/@cypress/request/-/request-2.88.10.tgz", - "integrity": "sha512-Zp7F+R93N0yZyG34GutyTNr+okam7s/Fzc1+i3kcqOP8vk6OuajuE9qZJ6Rs+10/1JFtXFYMdyarnU1rZuJesg==", + "version": "2.88.12", + "resolved": "https://registry.npmjs.org/@cypress/request/-/request-2.88.12.tgz", + "integrity": "sha512-tOn+0mDZxASFM+cuAP9szGUGPI1HwWVSvdzm7V4cCsPdFTx6qMj29CwaQmRAMIEhORIUBFBsYROYJcveK4uOjA==", "dev": true, "requires": { "aws-sign2": "~0.7.0", @@ -54,11 +54,40 @@ "json-stringify-safe": "~5.0.1", "mime-types": "~2.1.19", "performance-now": "^2.1.0", - "qs": "~6.5.2", + "qs": "~6.10.3", "safe-buffer": "^5.1.2", - "tough-cookie": "~2.5.0", + "tough-cookie": "^4.1.3", "tunnel-agent": "^0.6.0", "uuid": "^8.3.2" + }, + "dependencies": { + "qs": { + "version": "6.10.4", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.10.4.tgz", + "integrity": "sha512-OQiU+C+Ds5qiH91qh/mg0w+8nwQuLjM4F4M/PbmhDOoYehPh+Fb0bDjtR1sOvy7YKxvj28Y/M0PhP5uVX0kB+g==", + "dev": true, + "requires": { + "side-channel": "^1.0.4" + } + }, + "tough-cookie": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-4.1.3.tgz", + "integrity": "sha512-aX/y5pVRkfRnfmuX+OdbSdXvPe6ieKX/G2s7e98f4poJHnqH3281gDPm/metm6E/WRamfx7WC4HUqkWHfQHprw==", + "dev": true, + "requires": { + "psl": "^1.1.33", + "punycode": "^2.1.1", + "universalify": "^0.2.0", + "url-parse": "^1.5.3" + } + }, + "universalify": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.2.0.tgz", + "integrity": "sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==", + "dev": true + } } }, "@cypress/xvfb": { @@ -221,6 +250,16 @@ "integrity": "sha512-A+Fezp4zxnit6FanDmv9EqXNAi3vt9DWp51/71UEhXukb7QUuvtv9344h91dyAxuTLoSYJFU299qzR3tzwPAhw==", "dev": true }, + "call-bind": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", + "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", + "dev": true, + "requires": { + "function-bind": "^1.1.1", + "get-intrinsic": "^1.0.2" + } + }, "caseless": { "version": "0.12.0", "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz", @@ -652,6 +691,24 @@ "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", "dev": true }, + "function-bind": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", + "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", + "dev": true + }, + "get-intrinsic": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.1.tgz", + "integrity": "sha512-2DcsyfABl+gVHEfCOaTrWgyt+tb6MSEGmKq+kI5HwLbIYgjgmMcV8KQ41uaKz1xxUcn9tJtgFbQUEVcEbd0FYw==", + "dev": true, + "requires": { + "function-bind": "^1.1.1", + "has": "^1.0.3", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3" + } + }, "get-stream": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz", @@ -708,6 +765,15 @@ "integrity": "sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==", "dev": true }, + "has": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", + "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", + "dev": true, + "requires": { + "function-bind": "^1.1.1" + } + }, "has-ansi": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/has-ansi/-/has-ansi-2.0.0.tgz", @@ -723,6 +789,18 @@ "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", "dev": true }, + "has-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.1.tgz", + "integrity": "sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg==", + "dev": true + }, + "has-symbols": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", + "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", + "dev": true + }, "http-signature": { "version": "1.3.6", "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.3.6.tgz", @@ -1149,6 +1227,12 @@ "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", "dev": true }, + "object-inspect": { + "version": "1.12.3", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.3.tgz", + "integrity": "sha512-geUvdk7c+eizMNUDkRpW1wJwgfOiOeHbxBR/hLXK1aT6zmVSO0jsQcs7fj6MGw89jC/cjGfLcNOrtMYtGqm81g==", + "dev": true + }, "once": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", @@ -1246,18 +1330,18 @@ "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==", "dev": true }, - "qs": { - "version": "6.5.3", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.3.tgz", - "integrity": "sha512-qxXIEh4pCGfHICj1mAJQ2/2XVZkjCDTcEgfoSQxc/fYivUZxTkk7L3bDBJSoNrEzXI17oUO5Dp07ktqE5KzczA==", - "dev": true - }, "querystring": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/querystring/-/querystring-0.2.0.tgz", "integrity": "sha512-X/xY82scca2tau62i9mDyU9K+I+djTMUsvwf7xnUX5GLvVzgJybOJf4Y6o9Zx3oJK/LSXg5tTZBjwzqVPaPO2g==", "dev": true }, + "querystringify": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz", + "integrity": "sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==", + "dev": true + }, "ramda": { "version": "0.26.1", "resolved": "https://registry.npmjs.org/ramda/-/ramda-0.26.1.tgz", @@ -1296,6 +1380,12 @@ "throttleit": "^1.0.0" } }, + "requires-port": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", + "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==", + "dev": true + }, "restore-cursor": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-1.0.1.tgz", @@ -1357,6 +1447,17 @@ "integrity": "sha512-wpoSFAxys6b2a2wHZ1XpDSgD7N9iVjg29Ph9uV/uaP9Ex/KXlkTZTeddxDPSYQpgvzKLGJke2UU0AzoGCjNIvQ==", "dev": true }, + "side-channel": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", + "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", + "dev": true, + "requires": { + "call-bind": "^1.0.0", + "get-intrinsic": "^1.0.2", + "object-inspect": "^1.9.0" + } + }, "signal-exit": { "version": "3.0.7", "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", @@ -1483,16 +1584,6 @@ "rimraf": "^2.6.3" } }, - "tough-cookie": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.5.0.tgz", - "integrity": "sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g==", - "dev": true, - "requires": { - "psl": "^1.1.28", - "punycode": "^2.1.1" - } - }, "tslib": { "version": "1.14.1", "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz", @@ -1550,6 +1641,16 @@ } } }, + "url-parse": { + "version": "1.5.10", + "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz", + "integrity": "sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==", + "dev": true, + "requires": { + "querystringify": "^2.1.1", + "requires-port": "^1.0.0" + } + }, "util-deprecate": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", diff --git a/vitess-mixin/e2e/vttablet-up.sh b/vitess-mixin/e2e/vttablet-up.sh index 4978840866c..7e58d1c24ad 100755 --- a/vitess-mixin/e2e/vttablet-up.sh +++ b/vitess-mixin/e2e/vttablet-up.sh @@ -154,7 +154,6 @@ exec $VTROOT/bin/vttablet \ --port $web_port \ --grpc_port $grpc_port \ --service_map 'grpc-queryservice,grpc-tabletmanager,grpc-updatestream' \ - --vtctld_addr "http://vtctld:$WEB_PORT/" \ --init_keyspace $keyspace \ --init_shard $shard \ --backup_storage_implementation file \ diff --git a/web/vtadmin/README.md b/web/vtadmin/README.md index 3b72d0eab23..843c0711de2 100644 --- a/web/vtadmin/README.md +++ b/web/vtadmin/README.md @@ -2,9 +2,8 @@ ## Prerequisites -- [node](https://nodejs.org) >= 16.19.0 LTS - - _Note_: If you are using Node >= 17.x.y, you may see errors like `Error: error:0308010C:digital envelope routines::unsupported` when running `npm run build`. This is due to node dropping support for older versions of `openssl`. A workaround was added in [nodejs/node#40455](https://github.com/nodejs/node/issues/40455), allowing you to `export NODE_OPTIONS="--openssl-legacy-provider"` before running `npm run build`. -- npm >= 8.1.0 (comes with node) +- [node](https://nodejs.org) >= 18.16.0 LTS +- npm >= 9.7.1 (comes with node) ## Available scripts diff --git a/web/vtadmin/build.sh b/web/vtadmin/build.sh new file mode 100755 index 00000000000..54d1a5b1926 --- /dev/null +++ b/web/vtadmin/build.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +# Copyright 2023 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +function output() { + echo -e "$@" +} + +script_dir="$(dirname "${BASH_SOURCE[0]:-$0}")" +source "${script_dir}/../../build.env" +web_dir="${script_dir}" + +vtadmin_api_port=14200 + +# Download nvm and node +if [[ -z ${NVM_DIR} ]]; then + export NVM_DIR="$HOME/.nvm" +fi + +if [[ -z ${NODE_VERSION} ]]; then + export NODE_VERSION="18.16.0" +fi + +output "\nInstalling nvm...\n" + +if [ -d "$NVM_DIR" ]; then + output "\033[1;32mnvm is already installed!\033[0m" +else + curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.0/install.sh | bash && output "\033[1;32mnvm is installed!\033[0m" || fail "\033[1;32mnvm failed to install!\033[0m" +fi + +source "$NVM_DIR/nvm.sh" + +output "\nConfiguring Node.js $NODE_VERSION\n" +nvm install "$NODE_VERSION" || fail "Could not install and use nvm $NODE_VERSION." + +npm --prefix "$web_dir" --silent install + +export PATH=$PATH:$web_dir/node_modules/.bin/ + +VITE_VTADMIN_API_ADDRESS="http://${hostname}:${vtadmin_api_port}" \ + VITE_ENABLE_EXPERIMENTAL_TABLET_DEBUG_VARS="true" \ + npm run --prefix "$web_dir" build diff --git a/web/vtadmin/package-lock.json b/web/vtadmin/package-lock.json index bbaf86ab494..55e5cf75ce2 100644 --- a/web/vtadmin/package-lock.json +++ b/web/vtadmin/package-lock.json @@ -52,7 +52,7 @@ "jsdom": "^21.1.1", "msw": "^0.36.8", "npm": "^9.6.3", - "postcss": "^8.4.6", + "postcss": "^8.4.31", "prettier": "^2.2.1", "protobufjs-cli": "^1.1.1", "serve": "^14.2.0", @@ -67,8 +67,8 @@ "vitest": "^0.29.8" }, "engines": { - "node": ">=16.19.0", - "npm": ">=8.1.0" + "node": ">=18.16.0", + "npm": ">=9.5.1" } }, "node_modules/@adobe/css-tools": { @@ -91,16 +91,73 @@ } }, "node_modules/@babel/code-frame": { - "version": "7.21.4", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.21.4.tgz", - "integrity": "sha512-LYvhNKfwWSPpocw8GI7gpK2nq3HSDuEPC/uSYaALSJu9xjsalaaYFOq0Pwt5KmVqwEbZlDu81aLXwBOmD/Fv9g==", + "version": "7.22.13", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.13.tgz", + "integrity": "sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w==", "dependencies": { - "@babel/highlight": "^7.18.6" + "@babel/highlight": "^7.22.13", + "chalk": "^2.4.2" }, "engines": { "node": ">=6.9.0" } }, + "node_modules/@babel/code-frame/node_modules/ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/code-frame/node_modules/chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/code-frame/node_modules/color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/@babel/code-frame/node_modules/color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" + }, + "node_modules/@babel/code-frame/node_modules/has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/code-frame/node_modules/supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, "node_modules/@babel/compat-data": { "version": "7.21.4", "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.21.4.tgz", @@ -168,12 +225,12 @@ } }, "node_modules/@babel/generator": { - "version": "7.21.4", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.21.4.tgz", - "integrity": "sha512-NieM3pVIYW2SwGzKoqfPrQsf4xGs9M9AIG3ThppsSRmO+m7eQhmI6amajKMUeIO37wFfsvnvcxQFx6x6iqxDnA==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.23.0.tgz", + "integrity": "sha512-lN85QRR+5IbYrMWM6Y4pE/noaQtg4pNiqeNGX60eqOfo6gtEj6uw/JagelB8vVztSd7R6M5n1+PQkDbHbBRU4g==", "dev": true, "dependencies": { - "@babel/types": "^7.21.4", + "@babel/types": "^7.23.0", "@jridgewell/gen-mapping": "^0.3.2", "@jridgewell/trace-mapping": "^0.3.17", "jsesc": "^2.5.1" @@ -296,9 +353,9 @@ } }, "node_modules/@babel/helper-environment-visitor": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.18.9.tgz", - "integrity": "sha512-3r/aACDJ3fhQ/EVgFy0hpj8oHyHpQc+LPtJoY9SzTThAsStm4Ptegq92vqKoE3vD706ZVFWITnMnxucw+S9Ipg==", + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz", + "integrity": "sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==", "dev": true, "engines": { "node": ">=6.9.0" @@ -317,25 +374,25 @@ } }, "node_modules/@babel/helper-function-name": { - "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.21.0.tgz", - "integrity": "sha512-HfK1aMRanKHpxemaY2gqBmL04iAPOPRj7DxtNbiDOrJK+gdwkiNRVpCpUJYbUT+aZyemKN8brqTOxzCaG6ExRg==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz", + "integrity": "sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==", "dev": true, "dependencies": { - "@babel/template": "^7.20.7", - "@babel/types": "^7.21.0" + "@babel/template": "^7.22.15", + "@babel/types": "^7.23.0" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-hoist-variables": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.18.6.tgz", - "integrity": "sha512-UlJQPkFqFULIcyW5sbzgbkxn2FKRgwWiRexcuaR8RNJRy8+LLveqPjwZV/bwrLZCN0eUHD/x8D0heK1ozuoo6Q==", + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz", + "integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==", "dev": true, "dependencies": { - "@babel/types": "^7.18.6" + "@babel/types": "^7.22.5" }, "engines": { "node": ">=6.9.0" @@ -465,30 +522,30 @@ } }, "node_modules/@babel/helper-split-export-declaration": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.18.6.tgz", - "integrity": "sha512-bde1etTx6ZyTmobl9LLMMQsaizFVZrquTEHOqKeQESMKo4PlObf+8+JA25ZsIpZhT/WEd39+vOdLXAFG/nELpA==", + "version": "7.22.6", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz", + "integrity": "sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==", "dev": true, "dependencies": { - "@babel/types": "^7.18.6" + "@babel/types": "^7.22.5" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-string-parser": { - "version": "7.19.4", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.19.4.tgz", - "integrity": "sha512-nHtDoQcuqFmwYNYPz3Rah5ph2p8PFeFCsZk9A/48dPc/rGocJ5J3hAAZ7pb76VWX3fZKu+uEr/FhH5jLx7umrw==", + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz", + "integrity": "sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw==", "dev": true, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-validator-identifier": { - "version": "7.19.1", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.19.1.tgz", - "integrity": "sha512-awrNfaMtnHUr653GgGEs++LlAvW6w+DcPrOliSMXWCKo597CwL5Acf/wWdNkf/tfEQE3mjkeD1YOVZOUV/od1w==", + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz", + "integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==", "engines": { "node": ">=6.9.0" } @@ -532,12 +589,12 @@ } }, "node_modules/@babel/highlight": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.18.6.tgz", - "integrity": "sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g==", + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.20.tgz", + "integrity": "sha512-dkdMCN3py0+ksCgYmGG8jKeGA/8Tk+gJwSYYlFGxG5lmhfKNoAy004YpLxpS1W2J8m/EK2Ew+yOs9pVRwO89mg==", "dependencies": { - "@babel/helper-validator-identifier": "^7.18.6", - "chalk": "^2.0.0", + "@babel/helper-validator-identifier": "^7.22.20", + "chalk": "^2.4.2", "js-tokens": "^4.0.0" }, "engines": { @@ -601,9 +658,9 @@ } }, "node_modules/@babel/parser": { - "version": "7.21.4", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.21.4.tgz", - "integrity": "sha512-alVJj7k7zIxqBZ7BTRhz0IqJFxW1VJbm6N8JbcYhQ186df9ZBPbZBmWSqAMXwHGsCJdYks7z/voa3ibiS5bCIw==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.0.tgz", + "integrity": "sha512-vvPKKdMemU85V9WE/l5wZEmImpCtLqbnTvqDS2U1fJ96KrxoW7KrXhNsNCblQlg8Ck4b85yxdTyelsMUgFUXiw==", "dev": true, "bin": { "parser": "bin/babel-parser.js" @@ -1995,33 +2052,33 @@ } }, "node_modules/@babel/template": { - "version": "7.20.7", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.20.7.tgz", - "integrity": "sha512-8SegXApWe6VoNw0r9JHpSteLKTpTiLZ4rMlGIm9JQ18KiCtyQiAMEazujAHrUS5flrcqYZa75ukev3P6QmUwUw==", + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz", + "integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==", "dev": true, "dependencies": { - "@babel/code-frame": "^7.18.6", - "@babel/parser": "^7.20.7", - "@babel/types": "^7.20.7" + "@babel/code-frame": "^7.22.13", + "@babel/parser": "^7.22.15", + "@babel/types": "^7.22.15" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/traverse": { - "version": "7.21.4", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.21.4.tgz", - "integrity": "sha512-eyKrRHKdyZxqDm+fV1iqL9UAHMoIg0nDaGqfIOd8rKH17m5snv7Gn4qgjBoFfLz9APvjFU/ICT00NVCv1Epp8Q==", - "dev": true, - "dependencies": { - "@babel/code-frame": "^7.21.4", - "@babel/generator": "^7.21.4", - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-function-name": "^7.21.0", - "@babel/helper-hoist-variables": "^7.18.6", - "@babel/helper-split-export-declaration": "^7.18.6", - "@babel/parser": "^7.21.4", - "@babel/types": "^7.21.4", + "version": "7.23.2", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.2.tgz", + "integrity": "sha512-azpe59SQ48qG6nu2CzcMLbxUudtN+dOM9kDbUqGq3HXUJRlo7i8fvPoxQUzYgLZ4cMVmuZgm8vvBpNeRhd6XSw==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.22.13", + "@babel/generator": "^7.23.0", + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-function-name": "^7.23.0", + "@babel/helper-hoist-variables": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.6", + "@babel/parser": "^7.23.0", + "@babel/types": "^7.23.0", "debug": "^4.1.0", "globals": "^11.1.0" }, @@ -2030,13 +2087,13 @@ } }, "node_modules/@babel/types": { - "version": "7.21.4", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.21.4.tgz", - "integrity": "sha512-rU2oY501qDxE8Pyo7i/Orqma4ziCOrby0/9mvbDUGEfvZjb279Nk9k19e2fiCxHbRRpY2ZyrgW1eq22mvmOIzA==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.0.tgz", + "integrity": "sha512-0oIyUfKoI3mSqMvsxBdclDwxXKXAUA8v/apZbc+iSyARYou1o8ZGDxbUYyLFoW2arqS2jDGqJuZvv1d/io1axg==", "dev": true, "dependencies": { - "@babel/helper-string-parser": "^7.19.4", - "@babel/helper-validator-identifier": "^7.19.1", + "@babel/helper-string-parser": "^7.22.5", + "@babel/helper-validator-identifier": "^7.22.20", "to-fast-properties": "^2.0.0" }, "engines": { @@ -13701,9 +13758,9 @@ } }, "node_modules/postcss": { - "version": "8.4.21", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.21.tgz", - "integrity": "sha512-tP7u/Sn/dVxK2NnruI4H9BG+x+Wxz6oeZ1cJ8P6G/PZY0IKk4k/63TDsQf2kQq3+qoJeLm2kIBUNlZe3zgb4Zg==", + "version": "8.4.31", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", + "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", "funding": [ { "type": "opencollective", @@ -13712,10 +13769,14 @@ { "type": "tidelift", "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" } ], "dependencies": { - "nanoid": "^3.3.4", + "nanoid": "^3.3.6", "picocolors": "^1.0.0", "source-map-js": "^1.0.2" }, @@ -14458,9 +14519,9 @@ "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==" }, "node_modules/protobufjs": { - "version": "7.2.3", - "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.2.3.tgz", - "integrity": "sha512-TtpvOqwB5Gdz/PQmOjgsrGH1nHjAQVCN7JG4A6r1sXRWESL5rNMAiRcBQlCAdKxZcAbstExQePYG8xof/JVRgg==", + "version": "7.2.5", + "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.2.5.tgz", + "integrity": "sha512-gGXRSXvxQ7UiPgfw8gevrfRWcTlSbOFg+p/N+JVJEK5VhueL2miT6qTymqAmjr1Q5WbOCyJbyrk6JfWKwlFn6A==", "dev": true, "hasInstallScript": true, "peer": true, @@ -16374,9 +16435,9 @@ } }, "node_modules/tough-cookie": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-4.1.2.tgz", - "integrity": "sha512-G9fqXWoYFZgTc2z8Q5zaHy/vJMjm+WV0AkAeHxVCQiEB1b+dGvWzFW6QV07cY5jQ5gRkeid2qIkzkxUnmoQZUQ==", + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-4.1.3.tgz", + "integrity": "sha512-aX/y5pVRkfRnfmuX+OdbSdXvPe6ieKX/G2s7e98f4poJHnqH3281gDPm/metm6E/WRamfx7WC4HUqkWHfQHprw==", "dev": true, "dependencies": { "psl": "^1.1.33", @@ -17178,9 +17239,9 @@ } }, "node_modules/word-wrap": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz", - "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==", + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.4.tgz", + "integrity": "sha512-2V81OA4ugVo5pRo46hAoD2ivUJx8jXmWXfUkY4KFNw0hEptvN0QfH3K4nHiwzGeKl5rFKedV48QVoqYavy4YpA==", "dev": true, "engines": { "node": ">=0.10.0" @@ -17370,11 +17431,58 @@ } }, "@babel/code-frame": { - "version": "7.21.4", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.21.4.tgz", - "integrity": "sha512-LYvhNKfwWSPpocw8GI7gpK2nq3HSDuEPC/uSYaALSJu9xjsalaaYFOq0Pwt5KmVqwEbZlDu81aLXwBOmD/Fv9g==", + "version": "7.22.13", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.13.tgz", + "integrity": "sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w==", "requires": { - "@babel/highlight": "^7.18.6" + "@babel/highlight": "^7.22.13", + "chalk": "^2.4.2" + }, + "dependencies": { + "ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "requires": { + "color-convert": "^1.9.0" + } + }, + "chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "requires": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + } + }, + "color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "requires": { + "color-name": "1.1.3" + } + }, + "color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" + }, + "has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==" + }, + "supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "requires": { + "has-flag": "^3.0.0" + } + } } }, "@babel/compat-data": { @@ -17426,12 +17534,12 @@ } }, "@babel/generator": { - "version": "7.21.4", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.21.4.tgz", - "integrity": "sha512-NieM3pVIYW2SwGzKoqfPrQsf4xGs9M9AIG3ThppsSRmO+m7eQhmI6amajKMUeIO37wFfsvnvcxQFx6x6iqxDnA==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.23.0.tgz", + "integrity": "sha512-lN85QRR+5IbYrMWM6Y4pE/noaQtg4pNiqeNGX60eqOfo6gtEj6uw/JagelB8vVztSd7R6M5n1+PQkDbHbBRU4g==", "dev": true, "requires": { - "@babel/types": "^7.21.4", + "@babel/types": "^7.23.0", "@jridgewell/gen-mapping": "^0.3.2", "@jridgewell/trace-mapping": "^0.3.17", "jsesc": "^2.5.1" @@ -17523,9 +17631,9 @@ } }, "@babel/helper-environment-visitor": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.18.9.tgz", - "integrity": "sha512-3r/aACDJ3fhQ/EVgFy0hpj8oHyHpQc+LPtJoY9SzTThAsStm4Ptegq92vqKoE3vD706ZVFWITnMnxucw+S9Ipg==", + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz", + "integrity": "sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==", "dev": true }, "@babel/helper-explode-assignable-expression": { @@ -17538,22 +17646,22 @@ } }, "@babel/helper-function-name": { - "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.21.0.tgz", - "integrity": "sha512-HfK1aMRanKHpxemaY2gqBmL04iAPOPRj7DxtNbiDOrJK+gdwkiNRVpCpUJYbUT+aZyemKN8brqTOxzCaG6ExRg==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz", + "integrity": "sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==", "dev": true, "requires": { - "@babel/template": "^7.20.7", - "@babel/types": "^7.21.0" + "@babel/template": "^7.22.15", + "@babel/types": "^7.23.0" } }, "@babel/helper-hoist-variables": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.18.6.tgz", - "integrity": "sha512-UlJQPkFqFULIcyW5sbzgbkxn2FKRgwWiRexcuaR8RNJRy8+LLveqPjwZV/bwrLZCN0eUHD/x8D0heK1ozuoo6Q==", + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz", + "integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==", "dev": true, "requires": { - "@babel/types": "^7.18.6" + "@babel/types": "^7.22.5" } }, "@babel/helper-member-expression-to-functions": { @@ -17650,24 +17758,24 @@ } }, "@babel/helper-split-export-declaration": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.18.6.tgz", - "integrity": "sha512-bde1etTx6ZyTmobl9LLMMQsaizFVZrquTEHOqKeQESMKo4PlObf+8+JA25ZsIpZhT/WEd39+vOdLXAFG/nELpA==", + "version": "7.22.6", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz", + "integrity": "sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==", "dev": true, "requires": { - "@babel/types": "^7.18.6" + "@babel/types": "^7.22.5" } }, "@babel/helper-string-parser": { - "version": "7.19.4", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.19.4.tgz", - "integrity": "sha512-nHtDoQcuqFmwYNYPz3Rah5ph2p8PFeFCsZk9A/48dPc/rGocJ5J3hAAZ7pb76VWX3fZKu+uEr/FhH5jLx7umrw==", + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz", + "integrity": "sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw==", "dev": true }, "@babel/helper-validator-identifier": { - "version": "7.19.1", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.19.1.tgz", - "integrity": "sha512-awrNfaMtnHUr653GgGEs++LlAvW6w+DcPrOliSMXWCKo597CwL5Acf/wWdNkf/tfEQE3mjkeD1YOVZOUV/od1w==" + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz", + "integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==" }, "@babel/helper-validator-option": { "version": "7.21.0", @@ -17699,12 +17807,12 @@ } }, "@babel/highlight": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.18.6.tgz", - "integrity": "sha512-u7stbOuYjaPezCuLj29hNW1v64M2Md2qupEKP1fHc7WdOA3DgLh37suiSrZYY7haUB7iBeQZ9P1uiRF359do3g==", + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.20.tgz", + "integrity": "sha512-dkdMCN3py0+ksCgYmGG8jKeGA/8Tk+gJwSYYlFGxG5lmhfKNoAy004YpLxpS1W2J8m/EK2Ew+yOs9pVRwO89mg==", "requires": { - "@babel/helper-validator-identifier": "^7.18.6", - "chalk": "^2.0.0", + "@babel/helper-validator-identifier": "^7.22.20", + "chalk": "^2.4.2", "js-tokens": "^4.0.0" }, "dependencies": { @@ -17755,9 +17863,9 @@ } }, "@babel/parser": { - "version": "7.21.4", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.21.4.tgz", - "integrity": "sha512-alVJj7k7zIxqBZ7BTRhz0IqJFxW1VJbm6N8JbcYhQ186df9ZBPbZBmWSqAMXwHGsCJdYks7z/voa3ibiS5bCIw==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.0.tgz", + "integrity": "sha512-vvPKKdMemU85V9WE/l5wZEmImpCtLqbnTvqDS2U1fJ96KrxoW7KrXhNsNCblQlg8Ck4b85yxdTyelsMUgFUXiw==", "dev": true }, "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": { @@ -18681,42 +18789,42 @@ } }, "@babel/template": { - "version": "7.20.7", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.20.7.tgz", - "integrity": "sha512-8SegXApWe6VoNw0r9JHpSteLKTpTiLZ4rMlGIm9JQ18KiCtyQiAMEazujAHrUS5flrcqYZa75ukev3P6QmUwUw==", + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz", + "integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==", "dev": true, "requires": { - "@babel/code-frame": "^7.18.6", - "@babel/parser": "^7.20.7", - "@babel/types": "^7.20.7" + "@babel/code-frame": "^7.22.13", + "@babel/parser": "^7.22.15", + "@babel/types": "^7.22.15" } }, "@babel/traverse": { - "version": "7.21.4", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.21.4.tgz", - "integrity": "sha512-eyKrRHKdyZxqDm+fV1iqL9UAHMoIg0nDaGqfIOd8rKH17m5snv7Gn4qgjBoFfLz9APvjFU/ICT00NVCv1Epp8Q==", - "dev": true, - "requires": { - "@babel/code-frame": "^7.21.4", - "@babel/generator": "^7.21.4", - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-function-name": "^7.21.0", - "@babel/helper-hoist-variables": "^7.18.6", - "@babel/helper-split-export-declaration": "^7.18.6", - "@babel/parser": "^7.21.4", - "@babel/types": "^7.21.4", + "version": "7.23.2", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.2.tgz", + "integrity": "sha512-azpe59SQ48qG6nu2CzcMLbxUudtN+dOM9kDbUqGq3HXUJRlo7i8fvPoxQUzYgLZ4cMVmuZgm8vvBpNeRhd6XSw==", + "dev": true, + "requires": { + "@babel/code-frame": "^7.22.13", + "@babel/generator": "^7.23.0", + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-function-name": "^7.23.0", + "@babel/helper-hoist-variables": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.6", + "@babel/parser": "^7.23.0", + "@babel/types": "^7.23.0", "debug": "^4.1.0", "globals": "^11.1.0" } }, "@babel/types": { - "version": "7.21.4", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.21.4.tgz", - "integrity": "sha512-rU2oY501qDxE8Pyo7i/Orqma4ziCOrby0/9mvbDUGEfvZjb279Nk9k19e2fiCxHbRRpY2ZyrgW1eq22mvmOIzA==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.0.tgz", + "integrity": "sha512-0oIyUfKoI3mSqMvsxBdclDwxXKXAUA8v/apZbc+iSyARYou1o8ZGDxbUYyLFoW2arqS2jDGqJuZvv1d/io1axg==", "dev": true, "requires": { - "@babel/helper-string-parser": "^7.19.4", - "@babel/helper-validator-identifier": "^7.19.1", + "@babel/helper-string-parser": "^7.22.5", + "@babel/helper-validator-identifier": "^7.22.20", "to-fast-properties": "^2.0.0" } }, @@ -26948,11 +27056,11 @@ } }, "postcss": { - "version": "8.4.21", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.21.tgz", - "integrity": "sha512-tP7u/Sn/dVxK2NnruI4H9BG+x+Wxz6oeZ1cJ8P6G/PZY0IKk4k/63TDsQf2kQq3+qoJeLm2kIBUNlZe3zgb4Zg==", + "version": "8.4.31", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", + "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", "requires": { - "nanoid": "^3.3.4", + "nanoid": "^3.3.6", "picocolors": "^1.0.0", "source-map-js": "^1.0.2" } @@ -27363,9 +27471,9 @@ } }, "protobufjs": { - "version": "7.2.3", - "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.2.3.tgz", - "integrity": "sha512-TtpvOqwB5Gdz/PQmOjgsrGH1nHjAQVCN7JG4A6r1sXRWESL5rNMAiRcBQlCAdKxZcAbstExQePYG8xof/JVRgg==", + "version": "7.2.5", + "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.2.5.tgz", + "integrity": "sha512-gGXRSXvxQ7UiPgfw8gevrfRWcTlSbOFg+p/N+JVJEK5VhueL2miT6qTymqAmjr1Q5WbOCyJbyrk6JfWKwlFn6A==", "dev": true, "peer": true, "requires": { @@ -28839,9 +28947,9 @@ } }, "tough-cookie": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-4.1.2.tgz", - "integrity": "sha512-G9fqXWoYFZgTc2z8Q5zaHy/vJMjm+WV0AkAeHxVCQiEB1b+dGvWzFW6QV07cY5jQ5gRkeid2qIkzkxUnmoQZUQ==", + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-4.1.3.tgz", + "integrity": "sha512-aX/y5pVRkfRnfmuX+OdbSdXvPe6ieKX/G2s7e98f4poJHnqH3281gDPm/metm6E/WRamfx7WC4HUqkWHfQHprw==", "dev": true, "requires": { "psl": "^1.1.33", @@ -29395,9 +29503,9 @@ } }, "word-wrap": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz", - "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==", + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.4.tgz", + "integrity": "sha512-2V81OA4ugVo5pRo46hAoD2ivUJx8jXmWXfUkY4KFNw0hEptvN0QfH3K4nHiwzGeKl5rFKedV48QVoqYavy4YpA==", "dev": true }, "wrap-ansi": { diff --git a/web/vtadmin/package.json b/web/vtadmin/package.json index 71af6572b87..bbfab0eab26 100644 --- a/web/vtadmin/package.json +++ b/web/vtadmin/package.json @@ -3,8 +3,8 @@ "version": "0.1.0", "private": true, "engines": { - "node": ">=16.19.0", - "npm": ">=8.1.0" + "node": ">=18.16.0", + "npm": ">=9.5.1" }, "dependencies": { "@bugsnag/js": "^7.20.0", @@ -89,7 +89,7 @@ "jsdom": "^21.1.1", "msw": "^0.36.8", "npm": "^9.6.3", - "postcss": "^8.4.6", + "postcss": "^8.4.31", "prettier": "^2.2.1", "protobufjs-cli": "^1.1.1", "serve": "^14.2.0", diff --git a/web/vtadmin/src/proto/vtadmin.d.ts b/web/vtadmin/src/proto/vtadmin.d.ts index bcd82c93234..ea35c329f66 100644 --- a/web/vtadmin/src/proto/vtadmin.d.ts +++ b/web/vtadmin/src/proto/vtadmin.d.ts @@ -13022,6 +13022,9 @@ export namespace mysqlctl { /** ApplyBinlogFileRequest binlog_restore_position */ binlog_restore_position?: (string|null); + + /** ApplyBinlogFileRequest binlog_restore_datetime */ + binlog_restore_datetime?: (vttime.ITime|null); } /** Represents an ApplyBinlogFileRequest. */ @@ -13039,6 +13042,9 @@ export namespace mysqlctl { /** ApplyBinlogFileRequest binlog_restore_position. */ public binlog_restore_position: string; + /** ApplyBinlogFileRequest binlog_restore_datetime. */ + public binlog_restore_datetime?: (vttime.ITime|null); + /** * Creates a new ApplyBinlogFileRequest instance using the specified properties. * @param [properties] Properties to set @@ -13208,6 +13214,218 @@ export namespace mysqlctl { public static getTypeUrl(typeUrlPrefix?: string): string; } + /** Properties of a ReadBinlogFilesTimestampsRequest. */ + interface IReadBinlogFilesTimestampsRequest { + + /** ReadBinlogFilesTimestampsRequest binlog_file_names */ + binlog_file_names?: (string[]|null); + } + + /** Represents a ReadBinlogFilesTimestampsRequest. */ + class ReadBinlogFilesTimestampsRequest implements IReadBinlogFilesTimestampsRequest { + + /** + * Constructs a new ReadBinlogFilesTimestampsRequest. + * @param [properties] Properties to set + */ + constructor(properties?: mysqlctl.IReadBinlogFilesTimestampsRequest); + + /** ReadBinlogFilesTimestampsRequest binlog_file_names. */ + public binlog_file_names: string[]; + + /** + * Creates a new ReadBinlogFilesTimestampsRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns ReadBinlogFilesTimestampsRequest instance + */ + public static create(properties?: mysqlctl.IReadBinlogFilesTimestampsRequest): mysqlctl.ReadBinlogFilesTimestampsRequest; + + /** + * Encodes the specified ReadBinlogFilesTimestampsRequest message. Does not implicitly {@link mysqlctl.ReadBinlogFilesTimestampsRequest.verify|verify} messages. + * @param message ReadBinlogFilesTimestampsRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: mysqlctl.IReadBinlogFilesTimestampsRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ReadBinlogFilesTimestampsRequest message, length delimited. Does not implicitly {@link mysqlctl.ReadBinlogFilesTimestampsRequest.verify|verify} messages. + * @param message ReadBinlogFilesTimestampsRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: mysqlctl.IReadBinlogFilesTimestampsRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a ReadBinlogFilesTimestampsRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ReadBinlogFilesTimestampsRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): mysqlctl.ReadBinlogFilesTimestampsRequest; + + /** + * Decodes a ReadBinlogFilesTimestampsRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ReadBinlogFilesTimestampsRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): mysqlctl.ReadBinlogFilesTimestampsRequest; + + /** + * Verifies a ReadBinlogFilesTimestampsRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a ReadBinlogFilesTimestampsRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ReadBinlogFilesTimestampsRequest + */ + public static fromObject(object: { [k: string]: any }): mysqlctl.ReadBinlogFilesTimestampsRequest; + + /** + * Creates a plain object from a ReadBinlogFilesTimestampsRequest message. Also converts values to other types if specified. + * @param message ReadBinlogFilesTimestampsRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: mysqlctl.ReadBinlogFilesTimestampsRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ReadBinlogFilesTimestampsRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ReadBinlogFilesTimestampsRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a ReadBinlogFilesTimestampsResponse. */ + interface IReadBinlogFilesTimestampsResponse { + + /** ReadBinlogFilesTimestampsResponse first_timestamp */ + first_timestamp?: (vttime.ITime|null); + + /** ReadBinlogFilesTimestampsResponse first_timestamp_binlog */ + first_timestamp_binlog?: (string|null); + + /** ReadBinlogFilesTimestampsResponse last_timestamp */ + last_timestamp?: (vttime.ITime|null); + + /** ReadBinlogFilesTimestampsResponse last_timestamp_binlog */ + last_timestamp_binlog?: (string|null); + } + + /** Represents a ReadBinlogFilesTimestampsResponse. */ + class ReadBinlogFilesTimestampsResponse implements IReadBinlogFilesTimestampsResponse { + + /** + * Constructs a new ReadBinlogFilesTimestampsResponse. + * @param [properties] Properties to set + */ + constructor(properties?: mysqlctl.IReadBinlogFilesTimestampsResponse); + + /** ReadBinlogFilesTimestampsResponse first_timestamp. */ + public first_timestamp?: (vttime.ITime|null); + + /** ReadBinlogFilesTimestampsResponse first_timestamp_binlog. */ + public first_timestamp_binlog: string; + + /** ReadBinlogFilesTimestampsResponse last_timestamp. */ + public last_timestamp?: (vttime.ITime|null); + + /** ReadBinlogFilesTimestampsResponse last_timestamp_binlog. */ + public last_timestamp_binlog: string; + + /** + * Creates a new ReadBinlogFilesTimestampsResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns ReadBinlogFilesTimestampsResponse instance + */ + public static create(properties?: mysqlctl.IReadBinlogFilesTimestampsResponse): mysqlctl.ReadBinlogFilesTimestampsResponse; + + /** + * Encodes the specified ReadBinlogFilesTimestampsResponse message. Does not implicitly {@link mysqlctl.ReadBinlogFilesTimestampsResponse.verify|verify} messages. + * @param message ReadBinlogFilesTimestampsResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: mysqlctl.IReadBinlogFilesTimestampsResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ReadBinlogFilesTimestampsResponse message, length delimited. Does not implicitly {@link mysqlctl.ReadBinlogFilesTimestampsResponse.verify|verify} messages. + * @param message ReadBinlogFilesTimestampsResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: mysqlctl.IReadBinlogFilesTimestampsResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a ReadBinlogFilesTimestampsResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ReadBinlogFilesTimestampsResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): mysqlctl.ReadBinlogFilesTimestampsResponse; + + /** + * Decodes a ReadBinlogFilesTimestampsResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ReadBinlogFilesTimestampsResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): mysqlctl.ReadBinlogFilesTimestampsResponse; + + /** + * Verifies a ReadBinlogFilesTimestampsResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a ReadBinlogFilesTimestampsResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ReadBinlogFilesTimestampsResponse + */ + public static fromObject(object: { [k: string]: any }): mysqlctl.ReadBinlogFilesTimestampsResponse; + + /** + * Creates a plain object from a ReadBinlogFilesTimestampsResponse message. Also converts values to other types if specified. + * @param message ReadBinlogFilesTimestampsResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: mysqlctl.ReadBinlogFilesTimestampsResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ReadBinlogFilesTimestampsResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ReadBinlogFilesTimestampsResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + /** Properties of a ReinitConfigRequest. */ interface IReinitConfigRequest { } @@ -13572,6 +13790,194 @@ export namespace mysqlctl { public static getTypeUrl(typeUrlPrefix?: string): string; } + /** Properties of a VersionStringRequest. */ + interface IVersionStringRequest { + } + + /** Represents a VersionStringRequest. */ + class VersionStringRequest implements IVersionStringRequest { + + /** + * Constructs a new VersionStringRequest. + * @param [properties] Properties to set + */ + constructor(properties?: mysqlctl.IVersionStringRequest); + + /** + * Creates a new VersionStringRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns VersionStringRequest instance + */ + public static create(properties?: mysqlctl.IVersionStringRequest): mysqlctl.VersionStringRequest; + + /** + * Encodes the specified VersionStringRequest message. Does not implicitly {@link mysqlctl.VersionStringRequest.verify|verify} messages. + * @param message VersionStringRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: mysqlctl.IVersionStringRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified VersionStringRequest message, length delimited. Does not implicitly {@link mysqlctl.VersionStringRequest.verify|verify} messages. + * @param message VersionStringRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: mysqlctl.IVersionStringRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a VersionStringRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns VersionStringRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): mysqlctl.VersionStringRequest; + + /** + * Decodes a VersionStringRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns VersionStringRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): mysqlctl.VersionStringRequest; + + /** + * Verifies a VersionStringRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a VersionStringRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns VersionStringRequest + */ + public static fromObject(object: { [k: string]: any }): mysqlctl.VersionStringRequest; + + /** + * Creates a plain object from a VersionStringRequest message. Also converts values to other types if specified. + * @param message VersionStringRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: mysqlctl.VersionStringRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this VersionStringRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for VersionStringRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a VersionStringResponse. */ + interface IVersionStringResponse { + + /** VersionStringResponse version */ + version?: (string|null); + } + + /** Represents a VersionStringResponse. */ + class VersionStringResponse implements IVersionStringResponse { + + /** + * Constructs a new VersionStringResponse. + * @param [properties] Properties to set + */ + constructor(properties?: mysqlctl.IVersionStringResponse); + + /** VersionStringResponse version. */ + public version: string; + + /** + * Creates a new VersionStringResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns VersionStringResponse instance + */ + public static create(properties?: mysqlctl.IVersionStringResponse): mysqlctl.VersionStringResponse; + + /** + * Encodes the specified VersionStringResponse message. Does not implicitly {@link mysqlctl.VersionStringResponse.verify|verify} messages. + * @param message VersionStringResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: mysqlctl.IVersionStringResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified VersionStringResponse message, length delimited. Does not implicitly {@link mysqlctl.VersionStringResponse.verify|verify} messages. + * @param message VersionStringResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: mysqlctl.IVersionStringResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a VersionStringResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns VersionStringResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): mysqlctl.VersionStringResponse; + + /** + * Decodes a VersionStringResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns VersionStringResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): mysqlctl.VersionStringResponse; + + /** + * Verifies a VersionStringResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a VersionStringResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns VersionStringResponse + */ + public static fromObject(object: { [k: string]: any }): mysqlctl.VersionStringResponse; + + /** + * Creates a plain object from a VersionStringResponse message. Also converts values to other types if specified. + * @param message VersionStringResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: mysqlctl.VersionStringResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this VersionStringResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for VersionStringResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + /** Represents a MysqlCtl */ class MysqlCtl extends $protobuf.rpc.Service { @@ -13648,6 +14054,20 @@ export namespace mysqlctl { */ public applyBinlogFile(request: mysqlctl.IApplyBinlogFileRequest): Promise; + /** + * Calls ReadBinlogFilesTimestamps. + * @param request ReadBinlogFilesTimestampsRequest message or plain object + * @param callback Node-style callback called with the error, if any, and ReadBinlogFilesTimestampsResponse + */ + public readBinlogFilesTimestamps(request: mysqlctl.IReadBinlogFilesTimestampsRequest, callback: mysqlctl.MysqlCtl.ReadBinlogFilesTimestampsCallback): void; + + /** + * Calls ReadBinlogFilesTimestamps. + * @param request ReadBinlogFilesTimestampsRequest message or plain object + * @returns Promise + */ + public readBinlogFilesTimestamps(request: mysqlctl.IReadBinlogFilesTimestampsRequest): Promise; + /** * Calls ReinitConfig. * @param request ReinitConfigRequest message or plain object @@ -13675,6 +14095,20 @@ export namespace mysqlctl { * @returns Promise */ public refreshConfig(request: mysqlctl.IRefreshConfigRequest): Promise; + + /** + * Calls VersionString. + * @param request VersionStringRequest message or plain object + * @param callback Node-style callback called with the error, if any, and VersionStringResponse + */ + public versionString(request: mysqlctl.IVersionStringRequest, callback: mysqlctl.MysqlCtl.VersionStringCallback): void; + + /** + * Calls VersionString. + * @param request VersionStringRequest message or plain object + * @returns Promise + */ + public versionString(request: mysqlctl.IVersionStringRequest): Promise; } namespace MysqlCtl { @@ -13707,6 +14141,13 @@ export namespace mysqlctl { */ type ApplyBinlogFileCallback = (error: (Error|null), response?: mysqlctl.ApplyBinlogFileResponse) => void; + /** + * Callback as used by {@link mysqlctl.MysqlCtl#readBinlogFilesTimestamps}. + * @param error Error, if any + * @param [response] ReadBinlogFilesTimestampsResponse + */ + type ReadBinlogFilesTimestampsCallback = (error: (Error|null), response?: mysqlctl.ReadBinlogFilesTimestampsResponse) => void; + /** * Callback as used by {@link mysqlctl.MysqlCtl#reinitConfig}. * @param error Error, if any @@ -13720,6 +14161,13 @@ export namespace mysqlctl { * @param [response] RefreshConfigResponse */ type RefreshConfigCallback = (error: (Error|null), response?: mysqlctl.RefreshConfigResponse) => void; + + /** + * Callback as used by {@link mysqlctl.MysqlCtl#versionString}. + * @param error Error, if any + * @param [response] VersionStringResponse + */ + type VersionStringCallback = (error: (Error|null), response?: mysqlctl.VersionStringResponse) => void; } /** Properties of a BackupInfo. */ @@ -15406,6 +15854,121 @@ export namespace topodata { public static getTypeUrl(typeUrlPrefix?: string): string; } + /** Properties of a ThrottledAppRule. */ + interface IThrottledAppRule { + + /** ThrottledAppRule name */ + name?: (string|null); + + /** ThrottledAppRule ratio */ + ratio?: (number|null); + + /** ThrottledAppRule expires_at */ + expires_at?: (vttime.ITime|null); + + /** ThrottledAppRule exempt */ + exempt?: (boolean|null); + } + + /** Represents a ThrottledAppRule. */ + class ThrottledAppRule implements IThrottledAppRule { + + /** + * Constructs a new ThrottledAppRule. + * @param [properties] Properties to set + */ + constructor(properties?: topodata.IThrottledAppRule); + + /** ThrottledAppRule name. */ + public name: string; + + /** ThrottledAppRule ratio. */ + public ratio: number; + + /** ThrottledAppRule expires_at. */ + public expires_at?: (vttime.ITime|null); + + /** ThrottledAppRule exempt. */ + public exempt: boolean; + + /** + * Creates a new ThrottledAppRule instance using the specified properties. + * @param [properties] Properties to set + * @returns ThrottledAppRule instance + */ + public static create(properties?: topodata.IThrottledAppRule): topodata.ThrottledAppRule; + + /** + * Encodes the specified ThrottledAppRule message. Does not implicitly {@link topodata.ThrottledAppRule.verify|verify} messages. + * @param message ThrottledAppRule message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: topodata.IThrottledAppRule, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ThrottledAppRule message, length delimited. Does not implicitly {@link topodata.ThrottledAppRule.verify|verify} messages. + * @param message ThrottledAppRule message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: topodata.IThrottledAppRule, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a ThrottledAppRule message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ThrottledAppRule + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): topodata.ThrottledAppRule; + + /** + * Decodes a ThrottledAppRule message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ThrottledAppRule + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): topodata.ThrottledAppRule; + + /** + * Verifies a ThrottledAppRule message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a ThrottledAppRule message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ThrottledAppRule + */ + public static fromObject(object: { [k: string]: any }): topodata.ThrottledAppRule; + + /** + * Creates a plain object from a ThrottledAppRule message. Also converts values to other types if specified. + * @param message ThrottledAppRule + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: topodata.ThrottledAppRule, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ThrottledAppRule to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ThrottledAppRule + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + /** Properties of a ThrottlerConfig. */ interface IThrottlerConfig { @@ -15420,6 +15983,9 @@ export namespace topodata { /** ThrottlerConfig check_as_check_self */ check_as_check_self?: (boolean|null); + + /** ThrottlerConfig throttled_apps */ + throttled_apps?: ({ [k: string]: topodata.IThrottledAppRule }|null); } /** Represents a ThrottlerConfig. */ @@ -15443,6 +16009,9 @@ export namespace topodata { /** ThrottlerConfig check_as_check_self. */ public check_as_check_self: boolean; + /** ThrottlerConfig throttled_apps. */ + public throttled_apps: { [k: string]: topodata.IThrottledAppRule }; + /** * Creates a new ThrottlerConfig instance using the specified properties. * @param [properties] Properties to set @@ -16352,6 +16921,13 @@ export namespace topodata { /** Namespace tabletmanagerdata. */ export namespace tabletmanagerdata { + /** TabletSelectionPreference enum. */ + enum TabletSelectionPreference { + ANY = 0, + INORDER = 1, + UNKNOWN = 3 + } + /** Properties of a TableDefinition. */ interface ITableDefinition { @@ -19354,6 +19930,9 @@ export namespace tabletmanagerdata { /** ApplySchemaRequest sql_mode */ sql_mode?: (string|null); + + /** ApplySchemaRequest batch_size */ + batch_size?: (number|Long|null); } /** Represents an ApplySchemaRequest. */ @@ -19383,6 +19962,9 @@ export namespace tabletmanagerdata { /** ApplySchemaRequest sql_mode. */ public sql_mode: string; + /** ApplySchemaRequest batch_size. */ + public batch_size: (number|Long); + /** * Creates a new ApplySchemaRequest instance using the specified properties. * @param [properties] Properties to set @@ -25377,6 +25959,9 @@ export namespace tabletmanagerdata { /** BackupRequest incremental_from_pos */ incremental_from_pos?: (string|null); + + /** BackupRequest upgrade_safe */ + upgrade_safe?: (boolean|null); } /** Represents a BackupRequest. */ @@ -25397,6 +25982,9 @@ export namespace tabletmanagerdata { /** BackupRequest incremental_from_pos. */ public incremental_from_pos: string; + /** BackupRequest upgrade_safe. */ + public upgrade_safe: boolean; + /** * Creates a new BackupRequest instance using the specified properties. * @param [properties] Properties to set @@ -25583,6 +26171,9 @@ export namespace tabletmanagerdata { /** RestoreFromBackupRequest dry_run */ dry_run?: (boolean|null); + + /** RestoreFromBackupRequest restore_to_timestamp */ + restore_to_timestamp?: (vttime.ITime|null); } /** Represents a RestoreFromBackupRequest. */ @@ -25603,6 +26194,9 @@ export namespace tabletmanagerdata { /** RestoreFromBackupRequest dry_run. */ public dry_run: boolean; + /** RestoreFromBackupRequest restore_to_timestamp. */ + public restore_to_timestamp?: (vttime.ITime|null); + /** * Creates a new RestoreFromBackupRequest instance using the specified properties. * @param [properties] Properties to set @@ -25778,14312 +26372,14649 @@ export namespace tabletmanagerdata { public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a VDiffRequest. */ - interface IVDiffRequest { - - /** VDiffRequest keyspace */ - keyspace?: (string|null); + /** Properties of a CreateVReplicationWorkflowRequest. */ + interface ICreateVReplicationWorkflowRequest { - /** VDiffRequest workflow */ + /** CreateVReplicationWorkflowRequest workflow */ workflow?: (string|null); - /** VDiffRequest action */ - action?: (string|null); + /** CreateVReplicationWorkflowRequest binlog_source */ + binlog_source?: (binlogdata.IBinlogSource[]|null); - /** VDiffRequest action_arg */ - action_arg?: (string|null); - - /** VDiffRequest vdiff_uuid */ - vdiff_uuid?: (string|null); - - /** VDiffRequest options */ - options?: (tabletmanagerdata.IVDiffOptions|null); - } + /** CreateVReplicationWorkflowRequest cells */ + cells?: (string[]|null); - /** Represents a VDiffRequest. */ - class VDiffRequest implements IVDiffRequest { + /** CreateVReplicationWorkflowRequest tablet_types */ + tablet_types?: (topodata.TabletType[]|null); - /** - * Constructs a new VDiffRequest. - * @param [properties] Properties to set - */ - constructor(properties?: tabletmanagerdata.IVDiffRequest); + /** CreateVReplicationWorkflowRequest tablet_selection_preference */ + tablet_selection_preference?: (tabletmanagerdata.TabletSelectionPreference|null); - /** VDiffRequest keyspace. */ - public keyspace: string; + /** CreateVReplicationWorkflowRequest workflow_type */ + workflow_type?: (binlogdata.VReplicationWorkflowType|null); - /** VDiffRequest workflow. */ - public workflow: string; + /** CreateVReplicationWorkflowRequest workflow_sub_type */ + workflow_sub_type?: (binlogdata.VReplicationWorkflowSubType|null); - /** VDiffRequest action. */ - public action: string; + /** CreateVReplicationWorkflowRequest defer_secondary_keys */ + defer_secondary_keys?: (boolean|null); - /** VDiffRequest action_arg. */ - public action_arg: string; + /** CreateVReplicationWorkflowRequest auto_start */ + auto_start?: (boolean|null); - /** VDiffRequest vdiff_uuid. */ - public vdiff_uuid: string; + /** CreateVReplicationWorkflowRequest stop_after_copy */ + stop_after_copy?: (boolean|null); + } - /** VDiffRequest options. */ - public options?: (tabletmanagerdata.IVDiffOptions|null); + /** Represents a CreateVReplicationWorkflowRequest. */ + class CreateVReplicationWorkflowRequest implements ICreateVReplicationWorkflowRequest { /** - * Creates a new VDiffRequest instance using the specified properties. + * Constructs a new CreateVReplicationWorkflowRequest. * @param [properties] Properties to set - * @returns VDiffRequest instance - */ - public static create(properties?: tabletmanagerdata.IVDiffRequest): tabletmanagerdata.VDiffRequest; - - /** - * Encodes the specified VDiffRequest message. Does not implicitly {@link tabletmanagerdata.VDiffRequest.verify|verify} messages. - * @param message VDiffRequest message or plain object to encode - * @param [writer] Writer to encode to - * @returns Writer - */ - public static encode(message: tabletmanagerdata.IVDiffRequest, writer?: $protobuf.Writer): $protobuf.Writer; - - /** - * Encodes the specified VDiffRequest message, length delimited. Does not implicitly {@link tabletmanagerdata.VDiffRequest.verify|verify} messages. - * @param message VDiffRequest message or plain object to encode - * @param [writer] Writer to encode to - * @returns Writer - */ - public static encodeDelimited(message: tabletmanagerdata.IVDiffRequest, writer?: $protobuf.Writer): $protobuf.Writer; - - /** - * Decodes a VDiffRequest message from the specified reader or buffer. - * @param reader Reader or buffer to decode from - * @param [length] Message length if known beforehand - * @returns VDiffRequest - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): tabletmanagerdata.VDiffRequest; - - /** - * Decodes a VDiffRequest message from the specified reader or buffer, length delimited. - * @param reader Reader or buffer to decode from - * @returns VDiffRequest - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): tabletmanagerdata.VDiffRequest; - - /** - * Verifies a VDiffRequest message. - * @param message Plain object to verify - * @returns `null` if valid, otherwise the reason why it is not */ - public static verify(message: { [k: string]: any }): (string|null); + constructor(properties?: tabletmanagerdata.ICreateVReplicationWorkflowRequest); - /** - * Creates a VDiffRequest message from a plain object. Also converts values to their respective internal types. - * @param object Plain object - * @returns VDiffRequest - */ - public static fromObject(object: { [k: string]: any }): tabletmanagerdata.VDiffRequest; + /** CreateVReplicationWorkflowRequest workflow. */ + public workflow: string; - /** - * Creates a plain object from a VDiffRequest message. Also converts values to other types if specified. - * @param message VDiffRequest - * @param [options] Conversion options - * @returns Plain object - */ - public static toObject(message: tabletmanagerdata.VDiffRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + /** CreateVReplicationWorkflowRequest binlog_source. */ + public binlog_source: binlogdata.IBinlogSource[]; - /** - * Converts this VDiffRequest to JSON. - * @returns JSON object - */ - public toJSON(): { [k: string]: any }; + /** CreateVReplicationWorkflowRequest cells. */ + public cells: string[]; - /** - * Gets the default type url for VDiffRequest - * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns The default type url - */ - public static getTypeUrl(typeUrlPrefix?: string): string; - } + /** CreateVReplicationWorkflowRequest tablet_types. */ + public tablet_types: topodata.TabletType[]; - /** Properties of a VDiffResponse. */ - interface IVDiffResponse { + /** CreateVReplicationWorkflowRequest tablet_selection_preference. */ + public tablet_selection_preference: tabletmanagerdata.TabletSelectionPreference; - /** VDiffResponse id */ - id?: (number|Long|null); + /** CreateVReplicationWorkflowRequest workflow_type. */ + public workflow_type: binlogdata.VReplicationWorkflowType; - /** VDiffResponse output */ - output?: (query.IQueryResult|null); + /** CreateVReplicationWorkflowRequest workflow_sub_type. */ + public workflow_sub_type: binlogdata.VReplicationWorkflowSubType; - /** VDiffResponse vdiff_uuid */ - vdiff_uuid?: (string|null); - } - - /** Represents a VDiffResponse. */ - class VDiffResponse implements IVDiffResponse { - - /** - * Constructs a new VDiffResponse. - * @param [properties] Properties to set - */ - constructor(properties?: tabletmanagerdata.IVDiffResponse); - - /** VDiffResponse id. */ - public id: (number|Long); + /** CreateVReplicationWorkflowRequest defer_secondary_keys. */ + public defer_secondary_keys: boolean; - /** VDiffResponse output. */ - public output?: (query.IQueryResult|null); + /** CreateVReplicationWorkflowRequest auto_start. */ + public auto_start: boolean; - /** VDiffResponse vdiff_uuid. */ - public vdiff_uuid: string; + /** CreateVReplicationWorkflowRequest stop_after_copy. */ + public stop_after_copy: boolean; /** - * Creates a new VDiffResponse instance using the specified properties. + * Creates a new CreateVReplicationWorkflowRequest instance using the specified properties. * @param [properties] Properties to set - * @returns VDiffResponse instance + * @returns CreateVReplicationWorkflowRequest instance */ - public static create(properties?: tabletmanagerdata.IVDiffResponse): tabletmanagerdata.VDiffResponse; + public static create(properties?: tabletmanagerdata.ICreateVReplicationWorkflowRequest): tabletmanagerdata.CreateVReplicationWorkflowRequest; /** - * Encodes the specified VDiffResponse message. Does not implicitly {@link tabletmanagerdata.VDiffResponse.verify|verify} messages. - * @param message VDiffResponse message or plain object to encode + * Encodes the specified CreateVReplicationWorkflowRequest message. Does not implicitly {@link tabletmanagerdata.CreateVReplicationWorkflowRequest.verify|verify} messages. + * @param message CreateVReplicationWorkflowRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: tabletmanagerdata.IVDiffResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: tabletmanagerdata.ICreateVReplicationWorkflowRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified VDiffResponse message, length delimited. Does not implicitly {@link tabletmanagerdata.VDiffResponse.verify|verify} messages. - * @param message VDiffResponse message or plain object to encode + * Encodes the specified CreateVReplicationWorkflowRequest message, length delimited. Does not implicitly {@link tabletmanagerdata.CreateVReplicationWorkflowRequest.verify|verify} messages. + * @param message CreateVReplicationWorkflowRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: tabletmanagerdata.IVDiffResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: tabletmanagerdata.ICreateVReplicationWorkflowRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a VDiffResponse message from the specified reader or buffer. + * Decodes a CreateVReplicationWorkflowRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns VDiffResponse + * @returns CreateVReplicationWorkflowRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): tabletmanagerdata.VDiffResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): tabletmanagerdata.CreateVReplicationWorkflowRequest; /** - * Decodes a VDiffResponse message from the specified reader or buffer, length delimited. + * Decodes a CreateVReplicationWorkflowRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns VDiffResponse + * @returns CreateVReplicationWorkflowRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): tabletmanagerdata.VDiffResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): tabletmanagerdata.CreateVReplicationWorkflowRequest; /** - * Verifies a VDiffResponse message. + * Verifies a CreateVReplicationWorkflowRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a VDiffResponse message from a plain object. Also converts values to their respective internal types. + * Creates a CreateVReplicationWorkflowRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns VDiffResponse + * @returns CreateVReplicationWorkflowRequest */ - public static fromObject(object: { [k: string]: any }): tabletmanagerdata.VDiffResponse; + public static fromObject(object: { [k: string]: any }): tabletmanagerdata.CreateVReplicationWorkflowRequest; /** - * Creates a plain object from a VDiffResponse message. Also converts values to other types if specified. - * @param message VDiffResponse + * Creates a plain object from a CreateVReplicationWorkflowRequest message. Also converts values to other types if specified. + * @param message CreateVReplicationWorkflowRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: tabletmanagerdata.VDiffResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: tabletmanagerdata.CreateVReplicationWorkflowRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this VDiffResponse to JSON. + * Converts this CreateVReplicationWorkflowRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for VDiffResponse + * Gets the default type url for CreateVReplicationWorkflowRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a VDiffPickerOptions. */ - interface IVDiffPickerOptions { - - /** VDiffPickerOptions tablet_types */ - tablet_types?: (string|null); - - /** VDiffPickerOptions source_cell */ - source_cell?: (string|null); + /** Properties of a CreateVReplicationWorkflowResponse. */ + interface ICreateVReplicationWorkflowResponse { - /** VDiffPickerOptions target_cell */ - target_cell?: (string|null); + /** CreateVReplicationWorkflowResponse result */ + result?: (query.IQueryResult|null); } - /** Represents a VDiffPickerOptions. */ - class VDiffPickerOptions implements IVDiffPickerOptions { + /** Represents a CreateVReplicationWorkflowResponse. */ + class CreateVReplicationWorkflowResponse implements ICreateVReplicationWorkflowResponse { /** - * Constructs a new VDiffPickerOptions. + * Constructs a new CreateVReplicationWorkflowResponse. * @param [properties] Properties to set */ - constructor(properties?: tabletmanagerdata.IVDiffPickerOptions); - - /** VDiffPickerOptions tablet_types. */ - public tablet_types: string; - - /** VDiffPickerOptions source_cell. */ - public source_cell: string; + constructor(properties?: tabletmanagerdata.ICreateVReplicationWorkflowResponse); - /** VDiffPickerOptions target_cell. */ - public target_cell: string; + /** CreateVReplicationWorkflowResponse result. */ + public result?: (query.IQueryResult|null); /** - * Creates a new VDiffPickerOptions instance using the specified properties. + * Creates a new CreateVReplicationWorkflowResponse instance using the specified properties. * @param [properties] Properties to set - * @returns VDiffPickerOptions instance + * @returns CreateVReplicationWorkflowResponse instance */ - public static create(properties?: tabletmanagerdata.IVDiffPickerOptions): tabletmanagerdata.VDiffPickerOptions; + public static create(properties?: tabletmanagerdata.ICreateVReplicationWorkflowResponse): tabletmanagerdata.CreateVReplicationWorkflowResponse; /** - * Encodes the specified VDiffPickerOptions message. Does not implicitly {@link tabletmanagerdata.VDiffPickerOptions.verify|verify} messages. - * @param message VDiffPickerOptions message or plain object to encode + * Encodes the specified CreateVReplicationWorkflowResponse message. Does not implicitly {@link tabletmanagerdata.CreateVReplicationWorkflowResponse.verify|verify} messages. + * @param message CreateVReplicationWorkflowResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: tabletmanagerdata.IVDiffPickerOptions, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: tabletmanagerdata.ICreateVReplicationWorkflowResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified VDiffPickerOptions message, length delimited. Does not implicitly {@link tabletmanagerdata.VDiffPickerOptions.verify|verify} messages. - * @param message VDiffPickerOptions message or plain object to encode + * Encodes the specified CreateVReplicationWorkflowResponse message, length delimited. Does not implicitly {@link tabletmanagerdata.CreateVReplicationWorkflowResponse.verify|verify} messages. + * @param message CreateVReplicationWorkflowResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: tabletmanagerdata.IVDiffPickerOptions, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: tabletmanagerdata.ICreateVReplicationWorkflowResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a VDiffPickerOptions message from the specified reader or buffer. + * Decodes a CreateVReplicationWorkflowResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns VDiffPickerOptions + * @returns CreateVReplicationWorkflowResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): tabletmanagerdata.VDiffPickerOptions; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): tabletmanagerdata.CreateVReplicationWorkflowResponse; /** - * Decodes a VDiffPickerOptions message from the specified reader or buffer, length delimited. + * Decodes a CreateVReplicationWorkflowResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns VDiffPickerOptions + * @returns CreateVReplicationWorkflowResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): tabletmanagerdata.VDiffPickerOptions; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): tabletmanagerdata.CreateVReplicationWorkflowResponse; /** - * Verifies a VDiffPickerOptions message. + * Verifies a CreateVReplicationWorkflowResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a VDiffPickerOptions message from a plain object. Also converts values to their respective internal types. + * Creates a CreateVReplicationWorkflowResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns VDiffPickerOptions + * @returns CreateVReplicationWorkflowResponse */ - public static fromObject(object: { [k: string]: any }): tabletmanagerdata.VDiffPickerOptions; + public static fromObject(object: { [k: string]: any }): tabletmanagerdata.CreateVReplicationWorkflowResponse; /** - * Creates a plain object from a VDiffPickerOptions message. Also converts values to other types if specified. - * @param message VDiffPickerOptions + * Creates a plain object from a CreateVReplicationWorkflowResponse message. Also converts values to other types if specified. + * @param message CreateVReplicationWorkflowResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: tabletmanagerdata.VDiffPickerOptions, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: tabletmanagerdata.CreateVReplicationWorkflowResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this VDiffPickerOptions to JSON. + * Converts this CreateVReplicationWorkflowResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for VDiffPickerOptions + * Gets the default type url for CreateVReplicationWorkflowResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a VDiffReportOptions. */ - interface IVDiffReportOptions { + /** Properties of a DeleteVReplicationWorkflowRequest. */ + interface IDeleteVReplicationWorkflowRequest { - /** VDiffReportOptions only_pks */ - only_pks?: (boolean|null); - - /** VDiffReportOptions debug_query */ - debug_query?: (boolean|null); - - /** VDiffReportOptions format */ - format?: (string|null); + /** DeleteVReplicationWorkflowRequest workflow */ + workflow?: (string|null); } - /** Represents a VDiffReportOptions. */ - class VDiffReportOptions implements IVDiffReportOptions { + /** Represents a DeleteVReplicationWorkflowRequest. */ + class DeleteVReplicationWorkflowRequest implements IDeleteVReplicationWorkflowRequest { /** - * Constructs a new VDiffReportOptions. + * Constructs a new DeleteVReplicationWorkflowRequest. * @param [properties] Properties to set */ - constructor(properties?: tabletmanagerdata.IVDiffReportOptions); - - /** VDiffReportOptions only_pks. */ - public only_pks: boolean; - - /** VDiffReportOptions debug_query. */ - public debug_query: boolean; + constructor(properties?: tabletmanagerdata.IDeleteVReplicationWorkflowRequest); - /** VDiffReportOptions format. */ - public format: string; + /** DeleteVReplicationWorkflowRequest workflow. */ + public workflow: string; /** - * Creates a new VDiffReportOptions instance using the specified properties. + * Creates a new DeleteVReplicationWorkflowRequest instance using the specified properties. * @param [properties] Properties to set - * @returns VDiffReportOptions instance + * @returns DeleteVReplicationWorkflowRequest instance */ - public static create(properties?: tabletmanagerdata.IVDiffReportOptions): tabletmanagerdata.VDiffReportOptions; + public static create(properties?: tabletmanagerdata.IDeleteVReplicationWorkflowRequest): tabletmanagerdata.DeleteVReplicationWorkflowRequest; /** - * Encodes the specified VDiffReportOptions message. Does not implicitly {@link tabletmanagerdata.VDiffReportOptions.verify|verify} messages. - * @param message VDiffReportOptions message or plain object to encode + * Encodes the specified DeleteVReplicationWorkflowRequest message. Does not implicitly {@link tabletmanagerdata.DeleteVReplicationWorkflowRequest.verify|verify} messages. + * @param message DeleteVReplicationWorkflowRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: tabletmanagerdata.IVDiffReportOptions, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: tabletmanagerdata.IDeleteVReplicationWorkflowRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified VDiffReportOptions message, length delimited. Does not implicitly {@link tabletmanagerdata.VDiffReportOptions.verify|verify} messages. - * @param message VDiffReportOptions message or plain object to encode + * Encodes the specified DeleteVReplicationWorkflowRequest message, length delimited. Does not implicitly {@link tabletmanagerdata.DeleteVReplicationWorkflowRequest.verify|verify} messages. + * @param message DeleteVReplicationWorkflowRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: tabletmanagerdata.IVDiffReportOptions, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: tabletmanagerdata.IDeleteVReplicationWorkflowRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a VDiffReportOptions message from the specified reader or buffer. + * Decodes a DeleteVReplicationWorkflowRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns VDiffReportOptions + * @returns DeleteVReplicationWorkflowRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): tabletmanagerdata.VDiffReportOptions; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): tabletmanagerdata.DeleteVReplicationWorkflowRequest; /** - * Decodes a VDiffReportOptions message from the specified reader or buffer, length delimited. + * Decodes a DeleteVReplicationWorkflowRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns VDiffReportOptions + * @returns DeleteVReplicationWorkflowRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): tabletmanagerdata.VDiffReportOptions; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): tabletmanagerdata.DeleteVReplicationWorkflowRequest; /** - * Verifies a VDiffReportOptions message. + * Verifies a DeleteVReplicationWorkflowRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a VDiffReportOptions message from a plain object. Also converts values to their respective internal types. + * Creates a DeleteVReplicationWorkflowRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns VDiffReportOptions + * @returns DeleteVReplicationWorkflowRequest */ - public static fromObject(object: { [k: string]: any }): tabletmanagerdata.VDiffReportOptions; + public static fromObject(object: { [k: string]: any }): tabletmanagerdata.DeleteVReplicationWorkflowRequest; /** - * Creates a plain object from a VDiffReportOptions message. Also converts values to other types if specified. - * @param message VDiffReportOptions + * Creates a plain object from a DeleteVReplicationWorkflowRequest message. Also converts values to other types if specified. + * @param message DeleteVReplicationWorkflowRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: tabletmanagerdata.VDiffReportOptions, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: tabletmanagerdata.DeleteVReplicationWorkflowRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this VDiffReportOptions to JSON. + * Converts this DeleteVReplicationWorkflowRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for VDiffReportOptions + * Gets the default type url for DeleteVReplicationWorkflowRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a VDiffCoreOptions. */ - interface IVDiffCoreOptions { - - /** VDiffCoreOptions tables */ - tables?: (string|null); - - /** VDiffCoreOptions auto_retry */ - auto_retry?: (boolean|null); - - /** VDiffCoreOptions max_rows */ - max_rows?: (number|Long|null); - - /** VDiffCoreOptions checksum */ - checksum?: (boolean|null); - - /** VDiffCoreOptions sample_pct */ - sample_pct?: (number|Long|null); - - /** VDiffCoreOptions timeout_seconds */ - timeout_seconds?: (number|Long|null); - - /** VDiffCoreOptions max_extra_rows_to_compare */ - max_extra_rows_to_compare?: (number|Long|null); + /** Properties of a DeleteVReplicationWorkflowResponse. */ + interface IDeleteVReplicationWorkflowResponse { - /** VDiffCoreOptions update_table_stats */ - update_table_stats?: (boolean|null); + /** DeleteVReplicationWorkflowResponse result */ + result?: (query.IQueryResult|null); } - /** Represents a VDiffCoreOptions. */ - class VDiffCoreOptions implements IVDiffCoreOptions { + /** Represents a DeleteVReplicationWorkflowResponse. */ + class DeleteVReplicationWorkflowResponse implements IDeleteVReplicationWorkflowResponse { /** - * Constructs a new VDiffCoreOptions. + * Constructs a new DeleteVReplicationWorkflowResponse. * @param [properties] Properties to set */ - constructor(properties?: tabletmanagerdata.IVDiffCoreOptions); - - /** VDiffCoreOptions tables. */ - public tables: string; - - /** VDiffCoreOptions auto_retry. */ - public auto_retry: boolean; - - /** VDiffCoreOptions max_rows. */ - public max_rows: (number|Long); - - /** VDiffCoreOptions checksum. */ - public checksum: boolean; - - /** VDiffCoreOptions sample_pct. */ - public sample_pct: (number|Long); - - /** VDiffCoreOptions timeout_seconds. */ - public timeout_seconds: (number|Long); - - /** VDiffCoreOptions max_extra_rows_to_compare. */ - public max_extra_rows_to_compare: (number|Long); + constructor(properties?: tabletmanagerdata.IDeleteVReplicationWorkflowResponse); - /** VDiffCoreOptions update_table_stats. */ - public update_table_stats: boolean; + /** DeleteVReplicationWorkflowResponse result. */ + public result?: (query.IQueryResult|null); /** - * Creates a new VDiffCoreOptions instance using the specified properties. + * Creates a new DeleteVReplicationWorkflowResponse instance using the specified properties. * @param [properties] Properties to set - * @returns VDiffCoreOptions instance + * @returns DeleteVReplicationWorkflowResponse instance */ - public static create(properties?: tabletmanagerdata.IVDiffCoreOptions): tabletmanagerdata.VDiffCoreOptions; + public static create(properties?: tabletmanagerdata.IDeleteVReplicationWorkflowResponse): tabletmanagerdata.DeleteVReplicationWorkflowResponse; /** - * Encodes the specified VDiffCoreOptions message. Does not implicitly {@link tabletmanagerdata.VDiffCoreOptions.verify|verify} messages. - * @param message VDiffCoreOptions message or plain object to encode + * Encodes the specified DeleteVReplicationWorkflowResponse message. Does not implicitly {@link tabletmanagerdata.DeleteVReplicationWorkflowResponse.verify|verify} messages. + * @param message DeleteVReplicationWorkflowResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: tabletmanagerdata.IVDiffCoreOptions, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: tabletmanagerdata.IDeleteVReplicationWorkflowResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified VDiffCoreOptions message, length delimited. Does not implicitly {@link tabletmanagerdata.VDiffCoreOptions.verify|verify} messages. - * @param message VDiffCoreOptions message or plain object to encode + * Encodes the specified DeleteVReplicationWorkflowResponse message, length delimited. Does not implicitly {@link tabletmanagerdata.DeleteVReplicationWorkflowResponse.verify|verify} messages. + * @param message DeleteVReplicationWorkflowResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: tabletmanagerdata.IVDiffCoreOptions, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: tabletmanagerdata.IDeleteVReplicationWorkflowResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a VDiffCoreOptions message from the specified reader or buffer. + * Decodes a DeleteVReplicationWorkflowResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns VDiffCoreOptions + * @returns DeleteVReplicationWorkflowResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): tabletmanagerdata.VDiffCoreOptions; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): tabletmanagerdata.DeleteVReplicationWorkflowResponse; /** - * Decodes a VDiffCoreOptions message from the specified reader or buffer, length delimited. + * Decodes a DeleteVReplicationWorkflowResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns VDiffCoreOptions + * @returns DeleteVReplicationWorkflowResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): tabletmanagerdata.VDiffCoreOptions; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): tabletmanagerdata.DeleteVReplicationWorkflowResponse; /** - * Verifies a VDiffCoreOptions message. + * Verifies a DeleteVReplicationWorkflowResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a VDiffCoreOptions message from a plain object. Also converts values to their respective internal types. + * Creates a DeleteVReplicationWorkflowResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns VDiffCoreOptions + * @returns DeleteVReplicationWorkflowResponse */ - public static fromObject(object: { [k: string]: any }): tabletmanagerdata.VDiffCoreOptions; + public static fromObject(object: { [k: string]: any }): tabletmanagerdata.DeleteVReplicationWorkflowResponse; /** - * Creates a plain object from a VDiffCoreOptions message. Also converts values to other types if specified. - * @param message VDiffCoreOptions + * Creates a plain object from a DeleteVReplicationWorkflowResponse message. Also converts values to other types if specified. + * @param message DeleteVReplicationWorkflowResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: tabletmanagerdata.VDiffCoreOptions, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: tabletmanagerdata.DeleteVReplicationWorkflowResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this VDiffCoreOptions to JSON. + * Converts this DeleteVReplicationWorkflowResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for VDiffCoreOptions + * Gets the default type url for DeleteVReplicationWorkflowResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a VDiffOptions. */ - interface IVDiffOptions { - - /** VDiffOptions picker_options */ - picker_options?: (tabletmanagerdata.IVDiffPickerOptions|null); + /** Properties of a ReadVReplicationWorkflowRequest. */ + interface IReadVReplicationWorkflowRequest { - /** VDiffOptions core_options */ - core_options?: (tabletmanagerdata.IVDiffCoreOptions|null); - - /** VDiffOptions report_options */ - report_options?: (tabletmanagerdata.IVDiffReportOptions|null); + /** ReadVReplicationWorkflowRequest workflow */ + workflow?: (string|null); } - /** Represents a VDiffOptions. */ - class VDiffOptions implements IVDiffOptions { + /** Represents a ReadVReplicationWorkflowRequest. */ + class ReadVReplicationWorkflowRequest implements IReadVReplicationWorkflowRequest { /** - * Constructs a new VDiffOptions. + * Constructs a new ReadVReplicationWorkflowRequest. * @param [properties] Properties to set */ - constructor(properties?: tabletmanagerdata.IVDiffOptions); - - /** VDiffOptions picker_options. */ - public picker_options?: (tabletmanagerdata.IVDiffPickerOptions|null); - - /** VDiffOptions core_options. */ - public core_options?: (tabletmanagerdata.IVDiffCoreOptions|null); + constructor(properties?: tabletmanagerdata.IReadVReplicationWorkflowRequest); - /** VDiffOptions report_options. */ - public report_options?: (tabletmanagerdata.IVDiffReportOptions|null); + /** ReadVReplicationWorkflowRequest workflow. */ + public workflow: string; /** - * Creates a new VDiffOptions instance using the specified properties. + * Creates a new ReadVReplicationWorkflowRequest instance using the specified properties. * @param [properties] Properties to set - * @returns VDiffOptions instance + * @returns ReadVReplicationWorkflowRequest instance */ - public static create(properties?: tabletmanagerdata.IVDiffOptions): tabletmanagerdata.VDiffOptions; + public static create(properties?: tabletmanagerdata.IReadVReplicationWorkflowRequest): tabletmanagerdata.ReadVReplicationWorkflowRequest; /** - * Encodes the specified VDiffOptions message. Does not implicitly {@link tabletmanagerdata.VDiffOptions.verify|verify} messages. - * @param message VDiffOptions message or plain object to encode + * Encodes the specified ReadVReplicationWorkflowRequest message. Does not implicitly {@link tabletmanagerdata.ReadVReplicationWorkflowRequest.verify|verify} messages. + * @param message ReadVReplicationWorkflowRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: tabletmanagerdata.IVDiffOptions, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: tabletmanagerdata.IReadVReplicationWorkflowRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified VDiffOptions message, length delimited. Does not implicitly {@link tabletmanagerdata.VDiffOptions.verify|verify} messages. - * @param message VDiffOptions message or plain object to encode + * Encodes the specified ReadVReplicationWorkflowRequest message, length delimited. Does not implicitly {@link tabletmanagerdata.ReadVReplicationWorkflowRequest.verify|verify} messages. + * @param message ReadVReplicationWorkflowRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: tabletmanagerdata.IVDiffOptions, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: tabletmanagerdata.IReadVReplicationWorkflowRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a VDiffOptions message from the specified reader or buffer. + * Decodes a ReadVReplicationWorkflowRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns VDiffOptions + * @returns ReadVReplicationWorkflowRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): tabletmanagerdata.VDiffOptions; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): tabletmanagerdata.ReadVReplicationWorkflowRequest; /** - * Decodes a VDiffOptions message from the specified reader or buffer, length delimited. + * Decodes a ReadVReplicationWorkflowRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns VDiffOptions + * @returns ReadVReplicationWorkflowRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): tabletmanagerdata.VDiffOptions; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): tabletmanagerdata.ReadVReplicationWorkflowRequest; /** - * Verifies a VDiffOptions message. + * Verifies a ReadVReplicationWorkflowRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a VDiffOptions message from a plain object. Also converts values to their respective internal types. + * Creates a ReadVReplicationWorkflowRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns VDiffOptions + * @returns ReadVReplicationWorkflowRequest */ - public static fromObject(object: { [k: string]: any }): tabletmanagerdata.VDiffOptions; + public static fromObject(object: { [k: string]: any }): tabletmanagerdata.ReadVReplicationWorkflowRequest; /** - * Creates a plain object from a VDiffOptions message. Also converts values to other types if specified. - * @param message VDiffOptions + * Creates a plain object from a ReadVReplicationWorkflowRequest message. Also converts values to other types if specified. + * @param message ReadVReplicationWorkflowRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: tabletmanagerdata.VDiffOptions, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: tabletmanagerdata.ReadVReplicationWorkflowRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this VDiffOptions to JSON. + * Converts this ReadVReplicationWorkflowRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for VDiffOptions + * Gets the default type url for ReadVReplicationWorkflowRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of an UpdateVRWorkflowRequest. */ - interface IUpdateVRWorkflowRequest { + /** Properties of a ReadVReplicationWorkflowResponse. */ + interface IReadVReplicationWorkflowResponse { - /** UpdateVRWorkflowRequest workflow */ + /** ReadVReplicationWorkflowResponse workflow */ workflow?: (string|null); - /** UpdateVRWorkflowRequest cells */ - cells?: (string[]|null); + /** ReadVReplicationWorkflowResponse cells */ + cells?: (string|null); - /** UpdateVRWorkflowRequest tablet_types */ - tablet_types?: (string[]|null); + /** ReadVReplicationWorkflowResponse tablet_types */ + tablet_types?: (topodata.TabletType[]|null); - /** UpdateVRWorkflowRequest on_ddl */ - on_ddl?: (binlogdata.OnDDLAction|null); + /** ReadVReplicationWorkflowResponse tablet_selection_preference */ + tablet_selection_preference?: (tabletmanagerdata.TabletSelectionPreference|null); + + /** ReadVReplicationWorkflowResponse db_name */ + db_name?: (string|null); + + /** ReadVReplicationWorkflowResponse tags */ + tags?: (string|null); + + /** ReadVReplicationWorkflowResponse workflow_type */ + workflow_type?: (binlogdata.VReplicationWorkflowType|null); + + /** ReadVReplicationWorkflowResponse workflow_sub_type */ + workflow_sub_type?: (binlogdata.VReplicationWorkflowSubType|null); + + /** ReadVReplicationWorkflowResponse defer_secondary_keys */ + defer_secondary_keys?: (boolean|null); + + /** ReadVReplicationWorkflowResponse streams */ + streams?: (tabletmanagerdata.ReadVReplicationWorkflowResponse.IStream[]|null); } - /** Represents an UpdateVRWorkflowRequest. */ - class UpdateVRWorkflowRequest implements IUpdateVRWorkflowRequest { + /** Represents a ReadVReplicationWorkflowResponse. */ + class ReadVReplicationWorkflowResponse implements IReadVReplicationWorkflowResponse { /** - * Constructs a new UpdateVRWorkflowRequest. + * Constructs a new ReadVReplicationWorkflowResponse. * @param [properties] Properties to set */ - constructor(properties?: tabletmanagerdata.IUpdateVRWorkflowRequest); + constructor(properties?: tabletmanagerdata.IReadVReplicationWorkflowResponse); - /** UpdateVRWorkflowRequest workflow. */ + /** ReadVReplicationWorkflowResponse workflow. */ public workflow: string; - /** UpdateVRWorkflowRequest cells. */ - public cells: string[]; + /** ReadVReplicationWorkflowResponse cells. */ + public cells: string; - /** UpdateVRWorkflowRequest tablet_types. */ - public tablet_types: string[]; + /** ReadVReplicationWorkflowResponse tablet_types. */ + public tablet_types: topodata.TabletType[]; - /** UpdateVRWorkflowRequest on_ddl. */ - public on_ddl: binlogdata.OnDDLAction; + /** ReadVReplicationWorkflowResponse tablet_selection_preference. */ + public tablet_selection_preference: tabletmanagerdata.TabletSelectionPreference; + + /** ReadVReplicationWorkflowResponse db_name. */ + public db_name: string; + + /** ReadVReplicationWorkflowResponse tags. */ + public tags: string; + + /** ReadVReplicationWorkflowResponse workflow_type. */ + public workflow_type: binlogdata.VReplicationWorkflowType; + + /** ReadVReplicationWorkflowResponse workflow_sub_type. */ + public workflow_sub_type: binlogdata.VReplicationWorkflowSubType; + + /** ReadVReplicationWorkflowResponse defer_secondary_keys. */ + public defer_secondary_keys: boolean; + + /** ReadVReplicationWorkflowResponse streams. */ + public streams: tabletmanagerdata.ReadVReplicationWorkflowResponse.IStream[]; /** - * Creates a new UpdateVRWorkflowRequest instance using the specified properties. + * Creates a new ReadVReplicationWorkflowResponse instance using the specified properties. * @param [properties] Properties to set - * @returns UpdateVRWorkflowRequest instance + * @returns ReadVReplicationWorkflowResponse instance */ - public static create(properties?: tabletmanagerdata.IUpdateVRWorkflowRequest): tabletmanagerdata.UpdateVRWorkflowRequest; + public static create(properties?: tabletmanagerdata.IReadVReplicationWorkflowResponse): tabletmanagerdata.ReadVReplicationWorkflowResponse; /** - * Encodes the specified UpdateVRWorkflowRequest message. Does not implicitly {@link tabletmanagerdata.UpdateVRWorkflowRequest.verify|verify} messages. - * @param message UpdateVRWorkflowRequest message or plain object to encode + * Encodes the specified ReadVReplicationWorkflowResponse message. Does not implicitly {@link tabletmanagerdata.ReadVReplicationWorkflowResponse.verify|verify} messages. + * @param message ReadVReplicationWorkflowResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: tabletmanagerdata.IUpdateVRWorkflowRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: tabletmanagerdata.IReadVReplicationWorkflowResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified UpdateVRWorkflowRequest message, length delimited. Does not implicitly {@link tabletmanagerdata.UpdateVRWorkflowRequest.verify|verify} messages. - * @param message UpdateVRWorkflowRequest message or plain object to encode + * Encodes the specified ReadVReplicationWorkflowResponse message, length delimited. Does not implicitly {@link tabletmanagerdata.ReadVReplicationWorkflowResponse.verify|verify} messages. + * @param message ReadVReplicationWorkflowResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: tabletmanagerdata.IUpdateVRWorkflowRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: tabletmanagerdata.IReadVReplicationWorkflowResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an UpdateVRWorkflowRequest message from the specified reader or buffer. + * Decodes a ReadVReplicationWorkflowResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns UpdateVRWorkflowRequest + * @returns ReadVReplicationWorkflowResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): tabletmanagerdata.UpdateVRWorkflowRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): tabletmanagerdata.ReadVReplicationWorkflowResponse; /** - * Decodes an UpdateVRWorkflowRequest message from the specified reader or buffer, length delimited. + * Decodes a ReadVReplicationWorkflowResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns UpdateVRWorkflowRequest + * @returns ReadVReplicationWorkflowResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): tabletmanagerdata.UpdateVRWorkflowRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): tabletmanagerdata.ReadVReplicationWorkflowResponse; /** - * Verifies an UpdateVRWorkflowRequest message. + * Verifies a ReadVReplicationWorkflowResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an UpdateVRWorkflowRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ReadVReplicationWorkflowResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns UpdateVRWorkflowRequest + * @returns ReadVReplicationWorkflowResponse */ - public static fromObject(object: { [k: string]: any }): tabletmanagerdata.UpdateVRWorkflowRequest; + public static fromObject(object: { [k: string]: any }): tabletmanagerdata.ReadVReplicationWorkflowResponse; /** - * Creates a plain object from an UpdateVRWorkflowRequest message. Also converts values to other types if specified. - * @param message UpdateVRWorkflowRequest + * Creates a plain object from a ReadVReplicationWorkflowResponse message. Also converts values to other types if specified. + * @param message ReadVReplicationWorkflowResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: tabletmanagerdata.UpdateVRWorkflowRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: tabletmanagerdata.ReadVReplicationWorkflowResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this UpdateVRWorkflowRequest to JSON. + * Converts this ReadVReplicationWorkflowResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for UpdateVRWorkflowRequest + * Gets the default type url for ReadVReplicationWorkflowResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of an UpdateVRWorkflowResponse. */ - interface IUpdateVRWorkflowResponse { + namespace ReadVReplicationWorkflowResponse { - /** UpdateVRWorkflowResponse result */ - result?: (query.IQueryResult|null); + /** Properties of a Stream. */ + interface IStream { + + /** Stream id */ + id?: (number|null); + + /** Stream bls */ + bls?: (binlogdata.IBinlogSource|null); + + /** Stream pos */ + pos?: (string|null); + + /** Stream stop_pos */ + stop_pos?: (string|null); + + /** Stream max_tps */ + max_tps?: (number|Long|null); + + /** Stream max_replication_lag */ + max_replication_lag?: (number|Long|null); + + /** Stream time_updated */ + time_updated?: (vttime.ITime|null); + + /** Stream transaction_timestamp */ + transaction_timestamp?: (vttime.ITime|null); + + /** Stream state */ + state?: (binlogdata.VReplicationWorkflowState|null); + + /** Stream message */ + message?: (string|null); + + /** Stream rows_copied */ + rows_copied?: (number|Long|null); + + /** Stream time_heartbeat */ + time_heartbeat?: (vttime.ITime|null); + + /** Stream time_throttled */ + time_throttled?: (vttime.ITime|null); + + /** Stream component_throttled */ + component_throttled?: (string|null); + } + + /** Represents a Stream. */ + class Stream implements IStream { + + /** + * Constructs a new Stream. + * @param [properties] Properties to set + */ + constructor(properties?: tabletmanagerdata.ReadVReplicationWorkflowResponse.IStream); + + /** Stream id. */ + public id: number; + + /** Stream bls. */ + public bls?: (binlogdata.IBinlogSource|null); + + /** Stream pos. */ + public pos: string; + + /** Stream stop_pos. */ + public stop_pos: string; + + /** Stream max_tps. */ + public max_tps: (number|Long); + + /** Stream max_replication_lag. */ + public max_replication_lag: (number|Long); + + /** Stream time_updated. */ + public time_updated?: (vttime.ITime|null); + + /** Stream transaction_timestamp. */ + public transaction_timestamp?: (vttime.ITime|null); + + /** Stream state. */ + public state: binlogdata.VReplicationWorkflowState; + + /** Stream message. */ + public message: string; + + /** Stream rows_copied. */ + public rows_copied: (number|Long); + + /** Stream time_heartbeat. */ + public time_heartbeat?: (vttime.ITime|null); + + /** Stream time_throttled. */ + public time_throttled?: (vttime.ITime|null); + + /** Stream component_throttled. */ + public component_throttled: string; + + /** + * Creates a new Stream instance using the specified properties. + * @param [properties] Properties to set + * @returns Stream instance + */ + public static create(properties?: tabletmanagerdata.ReadVReplicationWorkflowResponse.IStream): tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream; + + /** + * Encodes the specified Stream message. Does not implicitly {@link tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.verify|verify} messages. + * @param message Stream message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: tabletmanagerdata.ReadVReplicationWorkflowResponse.IStream, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified Stream message, length delimited. Does not implicitly {@link tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.verify|verify} messages. + * @param message Stream message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: tabletmanagerdata.ReadVReplicationWorkflowResponse.IStream, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a Stream message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns Stream + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream; + + /** + * Decodes a Stream message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns Stream + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream; + + /** + * Verifies a Stream message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a Stream message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns Stream + */ + public static fromObject(object: { [k: string]: any }): tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream; + + /** + * Creates a plain object from a Stream message. Also converts values to other types if specified. + * @param message Stream + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this Stream to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for Stream + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + } + + /** Properties of a VDiffRequest. */ + interface IVDiffRequest { + + /** VDiffRequest keyspace */ + keyspace?: (string|null); + + /** VDiffRequest workflow */ + workflow?: (string|null); + + /** VDiffRequest action */ + action?: (string|null); + + /** VDiffRequest action_arg */ + action_arg?: (string|null); + + /** VDiffRequest vdiff_uuid */ + vdiff_uuid?: (string|null); + + /** VDiffRequest options */ + options?: (tabletmanagerdata.IVDiffOptions|null); } - /** Represents an UpdateVRWorkflowResponse. */ - class UpdateVRWorkflowResponse implements IUpdateVRWorkflowResponse { + /** Represents a VDiffRequest. */ + class VDiffRequest implements IVDiffRequest { /** - * Constructs a new UpdateVRWorkflowResponse. + * Constructs a new VDiffRequest. * @param [properties] Properties to set */ - constructor(properties?: tabletmanagerdata.IUpdateVRWorkflowResponse); + constructor(properties?: tabletmanagerdata.IVDiffRequest); - /** UpdateVRWorkflowResponse result. */ - public result?: (query.IQueryResult|null); + /** VDiffRequest keyspace. */ + public keyspace: string; + + /** VDiffRequest workflow. */ + public workflow: string; + + /** VDiffRequest action. */ + public action: string; + + /** VDiffRequest action_arg. */ + public action_arg: string; + + /** VDiffRequest vdiff_uuid. */ + public vdiff_uuid: string; + + /** VDiffRequest options. */ + public options?: (tabletmanagerdata.IVDiffOptions|null); /** - * Creates a new UpdateVRWorkflowResponse instance using the specified properties. + * Creates a new VDiffRequest instance using the specified properties. * @param [properties] Properties to set - * @returns UpdateVRWorkflowResponse instance + * @returns VDiffRequest instance */ - public static create(properties?: tabletmanagerdata.IUpdateVRWorkflowResponse): tabletmanagerdata.UpdateVRWorkflowResponse; + public static create(properties?: tabletmanagerdata.IVDiffRequest): tabletmanagerdata.VDiffRequest; /** - * Encodes the specified UpdateVRWorkflowResponse message. Does not implicitly {@link tabletmanagerdata.UpdateVRWorkflowResponse.verify|verify} messages. - * @param message UpdateVRWorkflowResponse message or plain object to encode + * Encodes the specified VDiffRequest message. Does not implicitly {@link tabletmanagerdata.VDiffRequest.verify|verify} messages. + * @param message VDiffRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: tabletmanagerdata.IUpdateVRWorkflowResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: tabletmanagerdata.IVDiffRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified UpdateVRWorkflowResponse message, length delimited. Does not implicitly {@link tabletmanagerdata.UpdateVRWorkflowResponse.verify|verify} messages. - * @param message UpdateVRWorkflowResponse message or plain object to encode + * Encodes the specified VDiffRequest message, length delimited. Does not implicitly {@link tabletmanagerdata.VDiffRequest.verify|verify} messages. + * @param message VDiffRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: tabletmanagerdata.IUpdateVRWorkflowResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: tabletmanagerdata.IVDiffRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an UpdateVRWorkflowResponse message from the specified reader or buffer. + * Decodes a VDiffRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns UpdateVRWorkflowResponse + * @returns VDiffRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): tabletmanagerdata.UpdateVRWorkflowResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): tabletmanagerdata.VDiffRequest; /** - * Decodes an UpdateVRWorkflowResponse message from the specified reader or buffer, length delimited. + * Decodes a VDiffRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns UpdateVRWorkflowResponse + * @returns VDiffRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): tabletmanagerdata.UpdateVRWorkflowResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): tabletmanagerdata.VDiffRequest; /** - * Verifies an UpdateVRWorkflowResponse message. + * Verifies a VDiffRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an UpdateVRWorkflowResponse message from a plain object. Also converts values to their respective internal types. + * Creates a VDiffRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns UpdateVRWorkflowResponse + * @returns VDiffRequest */ - public static fromObject(object: { [k: string]: any }): tabletmanagerdata.UpdateVRWorkflowResponse; + public static fromObject(object: { [k: string]: any }): tabletmanagerdata.VDiffRequest; /** - * Creates a plain object from an UpdateVRWorkflowResponse message. Also converts values to other types if specified. - * @param message UpdateVRWorkflowResponse + * Creates a plain object from a VDiffRequest message. Also converts values to other types if specified. + * @param message VDiffRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: tabletmanagerdata.UpdateVRWorkflowResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: tabletmanagerdata.VDiffRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this UpdateVRWorkflowResponse to JSON. + * Converts this VDiffRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for UpdateVRWorkflowResponse + * Gets the default type url for VDiffRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } -} - -/** Namespace binlogdata. */ -export namespace binlogdata { - /** Properties of a Charset. */ - interface ICharset { + /** Properties of a VDiffResponse. */ + interface IVDiffResponse { - /** Charset client */ - client?: (number|null); + /** VDiffResponse id */ + id?: (number|Long|null); - /** Charset conn */ - conn?: (number|null); + /** VDiffResponse output */ + output?: (query.IQueryResult|null); - /** Charset server */ - server?: (number|null); + /** VDiffResponse vdiff_uuid */ + vdiff_uuid?: (string|null); } - /** Represents a Charset. */ - class Charset implements ICharset { + /** Represents a VDiffResponse. */ + class VDiffResponse implements IVDiffResponse { /** - * Constructs a new Charset. + * Constructs a new VDiffResponse. * @param [properties] Properties to set */ - constructor(properties?: binlogdata.ICharset); + constructor(properties?: tabletmanagerdata.IVDiffResponse); - /** Charset client. */ - public client: number; + /** VDiffResponse id. */ + public id: (number|Long); - /** Charset conn. */ - public conn: number; + /** VDiffResponse output. */ + public output?: (query.IQueryResult|null); - /** Charset server. */ - public server: number; + /** VDiffResponse vdiff_uuid. */ + public vdiff_uuid: string; /** - * Creates a new Charset instance using the specified properties. + * Creates a new VDiffResponse instance using the specified properties. * @param [properties] Properties to set - * @returns Charset instance + * @returns VDiffResponse instance */ - public static create(properties?: binlogdata.ICharset): binlogdata.Charset; + public static create(properties?: tabletmanagerdata.IVDiffResponse): tabletmanagerdata.VDiffResponse; /** - * Encodes the specified Charset message. Does not implicitly {@link binlogdata.Charset.verify|verify} messages. - * @param message Charset message or plain object to encode + * Encodes the specified VDiffResponse message. Does not implicitly {@link tabletmanagerdata.VDiffResponse.verify|verify} messages. + * @param message VDiffResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: binlogdata.ICharset, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: tabletmanagerdata.IVDiffResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified Charset message, length delimited. Does not implicitly {@link binlogdata.Charset.verify|verify} messages. - * @param message Charset message or plain object to encode + * Encodes the specified VDiffResponse message, length delimited. Does not implicitly {@link tabletmanagerdata.VDiffResponse.verify|verify} messages. + * @param message VDiffResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: binlogdata.ICharset, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: tabletmanagerdata.IVDiffResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a Charset message from the specified reader or buffer. + * Decodes a VDiffResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns Charset + * @returns VDiffResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.Charset; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): tabletmanagerdata.VDiffResponse; /** - * Decodes a Charset message from the specified reader or buffer, length delimited. + * Decodes a VDiffResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns Charset + * @returns VDiffResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.Charset; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): tabletmanagerdata.VDiffResponse; /** - * Verifies a Charset message. + * Verifies a VDiffResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a Charset message from a plain object. Also converts values to their respective internal types. + * Creates a VDiffResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns Charset + * @returns VDiffResponse */ - public static fromObject(object: { [k: string]: any }): binlogdata.Charset; + public static fromObject(object: { [k: string]: any }): tabletmanagerdata.VDiffResponse; /** - * Creates a plain object from a Charset message. Also converts values to other types if specified. - * @param message Charset + * Creates a plain object from a VDiffResponse message. Also converts values to other types if specified. + * @param message VDiffResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: binlogdata.Charset, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: tabletmanagerdata.VDiffResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this Charset to JSON. + * Converts this VDiffResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for Charset + * Gets the default type url for VDiffResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a BinlogTransaction. */ - interface IBinlogTransaction { + /** Properties of a VDiffPickerOptions. */ + interface IVDiffPickerOptions { - /** BinlogTransaction statements */ - statements?: (binlogdata.BinlogTransaction.IStatement[]|null); + /** VDiffPickerOptions tablet_types */ + tablet_types?: (string|null); - /** BinlogTransaction event_token */ - event_token?: (query.IEventToken|null); + /** VDiffPickerOptions source_cell */ + source_cell?: (string|null); + + /** VDiffPickerOptions target_cell */ + target_cell?: (string|null); } - /** Represents a BinlogTransaction. */ - class BinlogTransaction implements IBinlogTransaction { + /** Represents a VDiffPickerOptions. */ + class VDiffPickerOptions implements IVDiffPickerOptions { /** - * Constructs a new BinlogTransaction. + * Constructs a new VDiffPickerOptions. * @param [properties] Properties to set */ - constructor(properties?: binlogdata.IBinlogTransaction); + constructor(properties?: tabletmanagerdata.IVDiffPickerOptions); - /** BinlogTransaction statements. */ - public statements: binlogdata.BinlogTransaction.IStatement[]; + /** VDiffPickerOptions tablet_types. */ + public tablet_types: string; - /** BinlogTransaction event_token. */ - public event_token?: (query.IEventToken|null); + /** VDiffPickerOptions source_cell. */ + public source_cell: string; + + /** VDiffPickerOptions target_cell. */ + public target_cell: string; /** - * Creates a new BinlogTransaction instance using the specified properties. + * Creates a new VDiffPickerOptions instance using the specified properties. * @param [properties] Properties to set - * @returns BinlogTransaction instance + * @returns VDiffPickerOptions instance */ - public static create(properties?: binlogdata.IBinlogTransaction): binlogdata.BinlogTransaction; + public static create(properties?: tabletmanagerdata.IVDiffPickerOptions): tabletmanagerdata.VDiffPickerOptions; /** - * Encodes the specified BinlogTransaction message. Does not implicitly {@link binlogdata.BinlogTransaction.verify|verify} messages. - * @param message BinlogTransaction message or plain object to encode + * Encodes the specified VDiffPickerOptions message. Does not implicitly {@link tabletmanagerdata.VDiffPickerOptions.verify|verify} messages. + * @param message VDiffPickerOptions message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: binlogdata.IBinlogTransaction, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: tabletmanagerdata.IVDiffPickerOptions, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified BinlogTransaction message, length delimited. Does not implicitly {@link binlogdata.BinlogTransaction.verify|verify} messages. - * @param message BinlogTransaction message or plain object to encode + * Encodes the specified VDiffPickerOptions message, length delimited. Does not implicitly {@link tabletmanagerdata.VDiffPickerOptions.verify|verify} messages. + * @param message VDiffPickerOptions message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: binlogdata.IBinlogTransaction, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: tabletmanagerdata.IVDiffPickerOptions, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a BinlogTransaction message from the specified reader or buffer. + * Decodes a VDiffPickerOptions message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns BinlogTransaction + * @returns VDiffPickerOptions * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.BinlogTransaction; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): tabletmanagerdata.VDiffPickerOptions; /** - * Decodes a BinlogTransaction message from the specified reader or buffer, length delimited. + * Decodes a VDiffPickerOptions message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns BinlogTransaction + * @returns VDiffPickerOptions * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.BinlogTransaction; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): tabletmanagerdata.VDiffPickerOptions; /** - * Verifies a BinlogTransaction message. + * Verifies a VDiffPickerOptions message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a BinlogTransaction message from a plain object. Also converts values to their respective internal types. + * Creates a VDiffPickerOptions message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns BinlogTransaction + * @returns VDiffPickerOptions */ - public static fromObject(object: { [k: string]: any }): binlogdata.BinlogTransaction; + public static fromObject(object: { [k: string]: any }): tabletmanagerdata.VDiffPickerOptions; /** - * Creates a plain object from a BinlogTransaction message. Also converts values to other types if specified. - * @param message BinlogTransaction + * Creates a plain object from a VDiffPickerOptions message. Also converts values to other types if specified. + * @param message VDiffPickerOptions * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: binlogdata.BinlogTransaction, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: tabletmanagerdata.VDiffPickerOptions, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this BinlogTransaction to JSON. + * Converts this VDiffPickerOptions to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for BinlogTransaction + * Gets the default type url for VDiffPickerOptions * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - namespace BinlogTransaction { - - /** Properties of a Statement. */ - interface IStatement { + /** Properties of a VDiffReportOptions. */ + interface IVDiffReportOptions { - /** Statement category */ - category?: (binlogdata.BinlogTransaction.Statement.Category|null); + /** VDiffReportOptions only_pks */ + only_pks?: (boolean|null); - /** Statement charset */ - charset?: (binlogdata.ICharset|null); + /** VDiffReportOptions debug_query */ + debug_query?: (boolean|null); - /** Statement sql */ - sql?: (Uint8Array|null); - } + /** VDiffReportOptions format */ + format?: (string|null); + } - /** Represents a Statement. */ - class Statement implements IStatement { + /** Represents a VDiffReportOptions. */ + class VDiffReportOptions implements IVDiffReportOptions { - /** - * Constructs a new Statement. - * @param [properties] Properties to set - */ - constructor(properties?: binlogdata.BinlogTransaction.IStatement); + /** + * Constructs a new VDiffReportOptions. + * @param [properties] Properties to set + */ + constructor(properties?: tabletmanagerdata.IVDiffReportOptions); - /** Statement category. */ - public category: binlogdata.BinlogTransaction.Statement.Category; + /** VDiffReportOptions only_pks. */ + public only_pks: boolean; - /** Statement charset. */ - public charset?: (binlogdata.ICharset|null); + /** VDiffReportOptions debug_query. */ + public debug_query: boolean; - /** Statement sql. */ - public sql: Uint8Array; + /** VDiffReportOptions format. */ + public format: string; - /** - * Creates a new Statement instance using the specified properties. - * @param [properties] Properties to set - * @returns Statement instance - */ - public static create(properties?: binlogdata.BinlogTransaction.IStatement): binlogdata.BinlogTransaction.Statement; + /** + * Creates a new VDiffReportOptions instance using the specified properties. + * @param [properties] Properties to set + * @returns VDiffReportOptions instance + */ + public static create(properties?: tabletmanagerdata.IVDiffReportOptions): tabletmanagerdata.VDiffReportOptions; - /** - * Encodes the specified Statement message. Does not implicitly {@link binlogdata.BinlogTransaction.Statement.verify|verify} messages. - * @param message Statement message or plain object to encode - * @param [writer] Writer to encode to - * @returns Writer - */ - public static encode(message: binlogdata.BinlogTransaction.IStatement, writer?: $protobuf.Writer): $protobuf.Writer; + /** + * Encodes the specified VDiffReportOptions message. Does not implicitly {@link tabletmanagerdata.VDiffReportOptions.verify|verify} messages. + * @param message VDiffReportOptions message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: tabletmanagerdata.IVDiffReportOptions, writer?: $protobuf.Writer): $protobuf.Writer; - /** - * Encodes the specified Statement message, length delimited. Does not implicitly {@link binlogdata.BinlogTransaction.Statement.verify|verify} messages. - * @param message Statement message or plain object to encode - * @param [writer] Writer to encode to - * @returns Writer - */ - public static encodeDelimited(message: binlogdata.BinlogTransaction.IStatement, writer?: $protobuf.Writer): $protobuf.Writer; + /** + * Encodes the specified VDiffReportOptions message, length delimited. Does not implicitly {@link tabletmanagerdata.VDiffReportOptions.verify|verify} messages. + * @param message VDiffReportOptions message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: tabletmanagerdata.IVDiffReportOptions, writer?: $protobuf.Writer): $protobuf.Writer; - /** - * Decodes a Statement message from the specified reader or buffer. - * @param reader Reader or buffer to decode from - * @param [length] Message length if known beforehand - * @returns Statement - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.BinlogTransaction.Statement; + /** + * Decodes a VDiffReportOptions message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns VDiffReportOptions + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): tabletmanagerdata.VDiffReportOptions; - /** - * Decodes a Statement message from the specified reader or buffer, length delimited. - * @param reader Reader or buffer to decode from - * @returns Statement - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.BinlogTransaction.Statement; + /** + * Decodes a VDiffReportOptions message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns VDiffReportOptions + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): tabletmanagerdata.VDiffReportOptions; - /** - * Verifies a Statement message. - * @param message Plain object to verify - * @returns `null` if valid, otherwise the reason why it is not - */ - public static verify(message: { [k: string]: any }): (string|null); + /** + * Verifies a VDiffReportOptions message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); - /** - * Creates a Statement message from a plain object. Also converts values to their respective internal types. - * @param object Plain object - * @returns Statement - */ - public static fromObject(object: { [k: string]: any }): binlogdata.BinlogTransaction.Statement; + /** + * Creates a VDiffReportOptions message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns VDiffReportOptions + */ + public static fromObject(object: { [k: string]: any }): tabletmanagerdata.VDiffReportOptions; - /** - * Creates a plain object from a Statement message. Also converts values to other types if specified. - * @param message Statement - * @param [options] Conversion options - * @returns Plain object - */ - public static toObject(message: binlogdata.BinlogTransaction.Statement, options?: $protobuf.IConversionOptions): { [k: string]: any }; + /** + * Creates a plain object from a VDiffReportOptions message. Also converts values to other types if specified. + * @param message VDiffReportOptions + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: tabletmanagerdata.VDiffReportOptions, options?: $protobuf.IConversionOptions): { [k: string]: any }; - /** - * Converts this Statement to JSON. - * @returns JSON object - */ - public toJSON(): { [k: string]: any }; + /** + * Converts this VDiffReportOptions to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; - /** - * Gets the default type url for Statement - * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns The default type url - */ - public static getTypeUrl(typeUrlPrefix?: string): string; - } + /** + * Gets the default type url for VDiffReportOptions + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } - namespace Statement { + /** Properties of a VDiffCoreOptions. */ + interface IVDiffCoreOptions { - /** Category enum. */ - enum Category { - BL_UNRECOGNIZED = 0, - BL_BEGIN = 1, - BL_COMMIT = 2, - BL_ROLLBACK = 3, - BL_DML_DEPRECATED = 4, - BL_DDL = 5, - BL_SET = 6, - BL_INSERT = 7, - BL_UPDATE = 8, - BL_DELETE = 9 - } - } - } + /** VDiffCoreOptions tables */ + tables?: (string|null); - /** Properties of a StreamKeyRangeRequest. */ - interface IStreamKeyRangeRequest { + /** VDiffCoreOptions auto_retry */ + auto_retry?: (boolean|null); - /** StreamKeyRangeRequest position */ - position?: (string|null); + /** VDiffCoreOptions max_rows */ + max_rows?: (number|Long|null); - /** StreamKeyRangeRequest key_range */ - key_range?: (topodata.IKeyRange|null); + /** VDiffCoreOptions checksum */ + checksum?: (boolean|null); - /** StreamKeyRangeRequest charset */ - charset?: (binlogdata.ICharset|null); + /** VDiffCoreOptions sample_pct */ + sample_pct?: (number|Long|null); + + /** VDiffCoreOptions timeout_seconds */ + timeout_seconds?: (number|Long|null); + + /** VDiffCoreOptions max_extra_rows_to_compare */ + max_extra_rows_to_compare?: (number|Long|null); + + /** VDiffCoreOptions update_table_stats */ + update_table_stats?: (boolean|null); } - /** Represents a StreamKeyRangeRequest. */ - class StreamKeyRangeRequest implements IStreamKeyRangeRequest { + /** Represents a VDiffCoreOptions. */ + class VDiffCoreOptions implements IVDiffCoreOptions { /** - * Constructs a new StreamKeyRangeRequest. + * Constructs a new VDiffCoreOptions. * @param [properties] Properties to set */ - constructor(properties?: binlogdata.IStreamKeyRangeRequest); + constructor(properties?: tabletmanagerdata.IVDiffCoreOptions); - /** StreamKeyRangeRequest position. */ - public position: string; + /** VDiffCoreOptions tables. */ + public tables: string; - /** StreamKeyRangeRequest key_range. */ - public key_range?: (topodata.IKeyRange|null); + /** VDiffCoreOptions auto_retry. */ + public auto_retry: boolean; - /** StreamKeyRangeRequest charset. */ - public charset?: (binlogdata.ICharset|null); + /** VDiffCoreOptions max_rows. */ + public max_rows: (number|Long); + + /** VDiffCoreOptions checksum. */ + public checksum: boolean; + + /** VDiffCoreOptions sample_pct. */ + public sample_pct: (number|Long); + + /** VDiffCoreOptions timeout_seconds. */ + public timeout_seconds: (number|Long); + + /** VDiffCoreOptions max_extra_rows_to_compare. */ + public max_extra_rows_to_compare: (number|Long); + + /** VDiffCoreOptions update_table_stats. */ + public update_table_stats: boolean; /** - * Creates a new StreamKeyRangeRequest instance using the specified properties. + * Creates a new VDiffCoreOptions instance using the specified properties. * @param [properties] Properties to set - * @returns StreamKeyRangeRequest instance + * @returns VDiffCoreOptions instance */ - public static create(properties?: binlogdata.IStreamKeyRangeRequest): binlogdata.StreamKeyRangeRequest; + public static create(properties?: tabletmanagerdata.IVDiffCoreOptions): tabletmanagerdata.VDiffCoreOptions; /** - * Encodes the specified StreamKeyRangeRequest message. Does not implicitly {@link binlogdata.StreamKeyRangeRequest.verify|verify} messages. - * @param message StreamKeyRangeRequest message or plain object to encode + * Encodes the specified VDiffCoreOptions message. Does not implicitly {@link tabletmanagerdata.VDiffCoreOptions.verify|verify} messages. + * @param message VDiffCoreOptions message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: binlogdata.IStreamKeyRangeRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: tabletmanagerdata.IVDiffCoreOptions, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified StreamKeyRangeRequest message, length delimited. Does not implicitly {@link binlogdata.StreamKeyRangeRequest.verify|verify} messages. - * @param message StreamKeyRangeRequest message or plain object to encode + * Encodes the specified VDiffCoreOptions message, length delimited. Does not implicitly {@link tabletmanagerdata.VDiffCoreOptions.verify|verify} messages. + * @param message VDiffCoreOptions message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: binlogdata.IStreamKeyRangeRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: tabletmanagerdata.IVDiffCoreOptions, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a StreamKeyRangeRequest message from the specified reader or buffer. + * Decodes a VDiffCoreOptions message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns StreamKeyRangeRequest + * @returns VDiffCoreOptions * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.StreamKeyRangeRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): tabletmanagerdata.VDiffCoreOptions; /** - * Decodes a StreamKeyRangeRequest message from the specified reader or buffer, length delimited. + * Decodes a VDiffCoreOptions message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns StreamKeyRangeRequest + * @returns VDiffCoreOptions * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.StreamKeyRangeRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): tabletmanagerdata.VDiffCoreOptions; /** - * Verifies a StreamKeyRangeRequest message. + * Verifies a VDiffCoreOptions message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a StreamKeyRangeRequest message from a plain object. Also converts values to their respective internal types. + * Creates a VDiffCoreOptions message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns StreamKeyRangeRequest + * @returns VDiffCoreOptions */ - public static fromObject(object: { [k: string]: any }): binlogdata.StreamKeyRangeRequest; + public static fromObject(object: { [k: string]: any }): tabletmanagerdata.VDiffCoreOptions; /** - * Creates a plain object from a StreamKeyRangeRequest message. Also converts values to other types if specified. - * @param message StreamKeyRangeRequest + * Creates a plain object from a VDiffCoreOptions message. Also converts values to other types if specified. + * @param message VDiffCoreOptions * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: binlogdata.StreamKeyRangeRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: tabletmanagerdata.VDiffCoreOptions, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this StreamKeyRangeRequest to JSON. + * Converts this VDiffCoreOptions to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for StreamKeyRangeRequest + * Gets the default type url for VDiffCoreOptions * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a StreamKeyRangeResponse. */ - interface IStreamKeyRangeResponse { + /** Properties of a VDiffOptions. */ + interface IVDiffOptions { - /** StreamKeyRangeResponse binlog_transaction */ - binlog_transaction?: (binlogdata.IBinlogTransaction|null); + /** VDiffOptions picker_options */ + picker_options?: (tabletmanagerdata.IVDiffPickerOptions|null); + + /** VDiffOptions core_options */ + core_options?: (tabletmanagerdata.IVDiffCoreOptions|null); + + /** VDiffOptions report_options */ + report_options?: (tabletmanagerdata.IVDiffReportOptions|null); } - /** Represents a StreamKeyRangeResponse. */ - class StreamKeyRangeResponse implements IStreamKeyRangeResponse { + /** Represents a VDiffOptions. */ + class VDiffOptions implements IVDiffOptions { /** - * Constructs a new StreamKeyRangeResponse. + * Constructs a new VDiffOptions. * @param [properties] Properties to set */ - constructor(properties?: binlogdata.IStreamKeyRangeResponse); + constructor(properties?: tabletmanagerdata.IVDiffOptions); - /** StreamKeyRangeResponse binlog_transaction. */ - public binlog_transaction?: (binlogdata.IBinlogTransaction|null); + /** VDiffOptions picker_options. */ + public picker_options?: (tabletmanagerdata.IVDiffPickerOptions|null); + + /** VDiffOptions core_options. */ + public core_options?: (tabletmanagerdata.IVDiffCoreOptions|null); + + /** VDiffOptions report_options. */ + public report_options?: (tabletmanagerdata.IVDiffReportOptions|null); /** - * Creates a new StreamKeyRangeResponse instance using the specified properties. + * Creates a new VDiffOptions instance using the specified properties. * @param [properties] Properties to set - * @returns StreamKeyRangeResponse instance + * @returns VDiffOptions instance */ - public static create(properties?: binlogdata.IStreamKeyRangeResponse): binlogdata.StreamKeyRangeResponse; + public static create(properties?: tabletmanagerdata.IVDiffOptions): tabletmanagerdata.VDiffOptions; /** - * Encodes the specified StreamKeyRangeResponse message. Does not implicitly {@link binlogdata.StreamKeyRangeResponse.verify|verify} messages. - * @param message StreamKeyRangeResponse message or plain object to encode + * Encodes the specified VDiffOptions message. Does not implicitly {@link tabletmanagerdata.VDiffOptions.verify|verify} messages. + * @param message VDiffOptions message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: binlogdata.IStreamKeyRangeResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: tabletmanagerdata.IVDiffOptions, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified StreamKeyRangeResponse message, length delimited. Does not implicitly {@link binlogdata.StreamKeyRangeResponse.verify|verify} messages. - * @param message StreamKeyRangeResponse message or plain object to encode + * Encodes the specified VDiffOptions message, length delimited. Does not implicitly {@link tabletmanagerdata.VDiffOptions.verify|verify} messages. + * @param message VDiffOptions message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: binlogdata.IStreamKeyRangeResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: tabletmanagerdata.IVDiffOptions, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a StreamKeyRangeResponse message from the specified reader or buffer. + * Decodes a VDiffOptions message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns StreamKeyRangeResponse + * @returns VDiffOptions * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.StreamKeyRangeResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): tabletmanagerdata.VDiffOptions; /** - * Decodes a StreamKeyRangeResponse message from the specified reader or buffer, length delimited. + * Decodes a VDiffOptions message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns StreamKeyRangeResponse + * @returns VDiffOptions * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.StreamKeyRangeResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): tabletmanagerdata.VDiffOptions; /** - * Verifies a StreamKeyRangeResponse message. + * Verifies a VDiffOptions message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a StreamKeyRangeResponse message from a plain object. Also converts values to their respective internal types. + * Creates a VDiffOptions message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns StreamKeyRangeResponse + * @returns VDiffOptions */ - public static fromObject(object: { [k: string]: any }): binlogdata.StreamKeyRangeResponse; + public static fromObject(object: { [k: string]: any }): tabletmanagerdata.VDiffOptions; /** - * Creates a plain object from a StreamKeyRangeResponse message. Also converts values to other types if specified. - * @param message StreamKeyRangeResponse + * Creates a plain object from a VDiffOptions message. Also converts values to other types if specified. + * @param message VDiffOptions * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: binlogdata.StreamKeyRangeResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: tabletmanagerdata.VDiffOptions, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this StreamKeyRangeResponse to JSON. + * Converts this VDiffOptions to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for StreamKeyRangeResponse + * Gets the default type url for VDiffOptions * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a StreamTablesRequest. */ - interface IStreamTablesRequest { + /** Properties of an UpdateVReplicationWorkflowRequest. */ + interface IUpdateVReplicationWorkflowRequest { - /** StreamTablesRequest position */ - position?: (string|null); + /** UpdateVReplicationWorkflowRequest workflow */ + workflow?: (string|null); - /** StreamTablesRequest tables */ - tables?: (string[]|null); + /** UpdateVReplicationWorkflowRequest cells */ + cells?: (string[]|null); - /** StreamTablesRequest charset */ - charset?: (binlogdata.ICharset|null); + /** UpdateVReplicationWorkflowRequest tablet_types */ + tablet_types?: (topodata.TabletType[]|null); + + /** UpdateVReplicationWorkflowRequest tablet_selection_preference */ + tablet_selection_preference?: (tabletmanagerdata.TabletSelectionPreference|null); + + /** UpdateVReplicationWorkflowRequest on_ddl */ + on_ddl?: (binlogdata.OnDDLAction|null); + + /** UpdateVReplicationWorkflowRequest state */ + state?: (binlogdata.VReplicationWorkflowState|null); } - /** Represents a StreamTablesRequest. */ - class StreamTablesRequest implements IStreamTablesRequest { + /** Represents an UpdateVReplicationWorkflowRequest. */ + class UpdateVReplicationWorkflowRequest implements IUpdateVReplicationWorkflowRequest { /** - * Constructs a new StreamTablesRequest. + * Constructs a new UpdateVReplicationWorkflowRequest. * @param [properties] Properties to set */ - constructor(properties?: binlogdata.IStreamTablesRequest); + constructor(properties?: tabletmanagerdata.IUpdateVReplicationWorkflowRequest); - /** StreamTablesRequest position. */ - public position: string; + /** UpdateVReplicationWorkflowRequest workflow. */ + public workflow: string; - /** StreamTablesRequest tables. */ - public tables: string[]; + /** UpdateVReplicationWorkflowRequest cells. */ + public cells: string[]; - /** StreamTablesRequest charset. */ - public charset?: (binlogdata.ICharset|null); + /** UpdateVReplicationWorkflowRequest tablet_types. */ + public tablet_types: topodata.TabletType[]; + + /** UpdateVReplicationWorkflowRequest tablet_selection_preference. */ + public tablet_selection_preference: tabletmanagerdata.TabletSelectionPreference; + + /** UpdateVReplicationWorkflowRequest on_ddl. */ + public on_ddl: binlogdata.OnDDLAction; + + /** UpdateVReplicationWorkflowRequest state. */ + public state: binlogdata.VReplicationWorkflowState; /** - * Creates a new StreamTablesRequest instance using the specified properties. + * Creates a new UpdateVReplicationWorkflowRequest instance using the specified properties. * @param [properties] Properties to set - * @returns StreamTablesRequest instance + * @returns UpdateVReplicationWorkflowRequest instance */ - public static create(properties?: binlogdata.IStreamTablesRequest): binlogdata.StreamTablesRequest; + public static create(properties?: tabletmanagerdata.IUpdateVReplicationWorkflowRequest): tabletmanagerdata.UpdateVReplicationWorkflowRequest; /** - * Encodes the specified StreamTablesRequest message. Does not implicitly {@link binlogdata.StreamTablesRequest.verify|verify} messages. - * @param message StreamTablesRequest message or plain object to encode + * Encodes the specified UpdateVReplicationWorkflowRequest message. Does not implicitly {@link tabletmanagerdata.UpdateVReplicationWorkflowRequest.verify|verify} messages. + * @param message UpdateVReplicationWorkflowRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: binlogdata.IStreamTablesRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: tabletmanagerdata.IUpdateVReplicationWorkflowRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified StreamTablesRequest message, length delimited. Does not implicitly {@link binlogdata.StreamTablesRequest.verify|verify} messages. - * @param message StreamTablesRequest message or plain object to encode + * Encodes the specified UpdateVReplicationWorkflowRequest message, length delimited. Does not implicitly {@link tabletmanagerdata.UpdateVReplicationWorkflowRequest.verify|verify} messages. + * @param message UpdateVReplicationWorkflowRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: binlogdata.IStreamTablesRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: tabletmanagerdata.IUpdateVReplicationWorkflowRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a StreamTablesRequest message from the specified reader or buffer. + * Decodes an UpdateVReplicationWorkflowRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns StreamTablesRequest + * @returns UpdateVReplicationWorkflowRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.StreamTablesRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): tabletmanagerdata.UpdateVReplicationWorkflowRequest; /** - * Decodes a StreamTablesRequest message from the specified reader or buffer, length delimited. + * Decodes an UpdateVReplicationWorkflowRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns StreamTablesRequest + * @returns UpdateVReplicationWorkflowRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.StreamTablesRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): tabletmanagerdata.UpdateVReplicationWorkflowRequest; /** - * Verifies a StreamTablesRequest message. + * Verifies an UpdateVReplicationWorkflowRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a StreamTablesRequest message from a plain object. Also converts values to their respective internal types. + * Creates an UpdateVReplicationWorkflowRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns StreamTablesRequest + * @returns UpdateVReplicationWorkflowRequest */ - public static fromObject(object: { [k: string]: any }): binlogdata.StreamTablesRequest; + public static fromObject(object: { [k: string]: any }): tabletmanagerdata.UpdateVReplicationWorkflowRequest; /** - * Creates a plain object from a StreamTablesRequest message. Also converts values to other types if specified. - * @param message StreamTablesRequest + * Creates a plain object from an UpdateVReplicationWorkflowRequest message. Also converts values to other types if specified. + * @param message UpdateVReplicationWorkflowRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: binlogdata.StreamTablesRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: tabletmanagerdata.UpdateVReplicationWorkflowRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this StreamTablesRequest to JSON. + * Converts this UpdateVReplicationWorkflowRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for StreamTablesRequest + * Gets the default type url for UpdateVReplicationWorkflowRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a StreamTablesResponse. */ - interface IStreamTablesResponse { + /** Properties of an UpdateVReplicationWorkflowResponse. */ + interface IUpdateVReplicationWorkflowResponse { - /** StreamTablesResponse binlog_transaction */ - binlog_transaction?: (binlogdata.IBinlogTransaction|null); + /** UpdateVReplicationWorkflowResponse result */ + result?: (query.IQueryResult|null); } - /** Represents a StreamTablesResponse. */ - class StreamTablesResponse implements IStreamTablesResponse { + /** Represents an UpdateVReplicationWorkflowResponse. */ + class UpdateVReplicationWorkflowResponse implements IUpdateVReplicationWorkflowResponse { /** - * Constructs a new StreamTablesResponse. + * Constructs a new UpdateVReplicationWorkflowResponse. * @param [properties] Properties to set */ - constructor(properties?: binlogdata.IStreamTablesResponse); + constructor(properties?: tabletmanagerdata.IUpdateVReplicationWorkflowResponse); - /** StreamTablesResponse binlog_transaction. */ - public binlog_transaction?: (binlogdata.IBinlogTransaction|null); + /** UpdateVReplicationWorkflowResponse result. */ + public result?: (query.IQueryResult|null); /** - * Creates a new StreamTablesResponse instance using the specified properties. + * Creates a new UpdateVReplicationWorkflowResponse instance using the specified properties. * @param [properties] Properties to set - * @returns StreamTablesResponse instance + * @returns UpdateVReplicationWorkflowResponse instance */ - public static create(properties?: binlogdata.IStreamTablesResponse): binlogdata.StreamTablesResponse; + public static create(properties?: tabletmanagerdata.IUpdateVReplicationWorkflowResponse): tabletmanagerdata.UpdateVReplicationWorkflowResponse; /** - * Encodes the specified StreamTablesResponse message. Does not implicitly {@link binlogdata.StreamTablesResponse.verify|verify} messages. - * @param message StreamTablesResponse message or plain object to encode + * Encodes the specified UpdateVReplicationWorkflowResponse message. Does not implicitly {@link tabletmanagerdata.UpdateVReplicationWorkflowResponse.verify|verify} messages. + * @param message UpdateVReplicationWorkflowResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: binlogdata.IStreamTablesResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: tabletmanagerdata.IUpdateVReplicationWorkflowResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified StreamTablesResponse message, length delimited. Does not implicitly {@link binlogdata.StreamTablesResponse.verify|verify} messages. - * @param message StreamTablesResponse message or plain object to encode + * Encodes the specified UpdateVReplicationWorkflowResponse message, length delimited. Does not implicitly {@link tabletmanagerdata.UpdateVReplicationWorkflowResponse.verify|verify} messages. + * @param message UpdateVReplicationWorkflowResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: binlogdata.IStreamTablesResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: tabletmanagerdata.IUpdateVReplicationWorkflowResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a StreamTablesResponse message from the specified reader or buffer. + * Decodes an UpdateVReplicationWorkflowResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns StreamTablesResponse + * @returns UpdateVReplicationWorkflowResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.StreamTablesResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): tabletmanagerdata.UpdateVReplicationWorkflowResponse; /** - * Decodes a StreamTablesResponse message from the specified reader or buffer, length delimited. + * Decodes an UpdateVReplicationWorkflowResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns StreamTablesResponse + * @returns UpdateVReplicationWorkflowResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.StreamTablesResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): tabletmanagerdata.UpdateVReplicationWorkflowResponse; /** - * Verifies a StreamTablesResponse message. + * Verifies an UpdateVReplicationWorkflowResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a StreamTablesResponse message from a plain object. Also converts values to their respective internal types. + * Creates an UpdateVReplicationWorkflowResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns StreamTablesResponse + * @returns UpdateVReplicationWorkflowResponse */ - public static fromObject(object: { [k: string]: any }): binlogdata.StreamTablesResponse; + public static fromObject(object: { [k: string]: any }): tabletmanagerdata.UpdateVReplicationWorkflowResponse; /** - * Creates a plain object from a StreamTablesResponse message. Also converts values to other types if specified. - * @param message StreamTablesResponse + * Creates a plain object from an UpdateVReplicationWorkflowResponse message. Also converts values to other types if specified. + * @param message UpdateVReplicationWorkflowResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: binlogdata.StreamTablesResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: tabletmanagerdata.UpdateVReplicationWorkflowResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this StreamTablesResponse to JSON. + * Converts this UpdateVReplicationWorkflowResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for StreamTablesResponse + * Gets the default type url for UpdateVReplicationWorkflowResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a CharsetConversion. */ - interface ICharsetConversion { - - /** CharsetConversion from_charset */ - from_charset?: (string|null); + /** Properties of a ResetSequencesRequest. */ + interface IResetSequencesRequest { - /** CharsetConversion to_charset */ - to_charset?: (string|null); + /** ResetSequencesRequest tables */ + tables?: (string[]|null); } - /** Represents a CharsetConversion. */ - class CharsetConversion implements ICharsetConversion { + /** Represents a ResetSequencesRequest. */ + class ResetSequencesRequest implements IResetSequencesRequest { /** - * Constructs a new CharsetConversion. + * Constructs a new ResetSequencesRequest. * @param [properties] Properties to set */ - constructor(properties?: binlogdata.ICharsetConversion); - - /** CharsetConversion from_charset. */ - public from_charset: string; + constructor(properties?: tabletmanagerdata.IResetSequencesRequest); - /** CharsetConversion to_charset. */ - public to_charset: string; + /** ResetSequencesRequest tables. */ + public tables: string[]; /** - * Creates a new CharsetConversion instance using the specified properties. + * Creates a new ResetSequencesRequest instance using the specified properties. * @param [properties] Properties to set - * @returns CharsetConversion instance + * @returns ResetSequencesRequest instance */ - public static create(properties?: binlogdata.ICharsetConversion): binlogdata.CharsetConversion; + public static create(properties?: tabletmanagerdata.IResetSequencesRequest): tabletmanagerdata.ResetSequencesRequest; /** - * Encodes the specified CharsetConversion message. Does not implicitly {@link binlogdata.CharsetConversion.verify|verify} messages. - * @param message CharsetConversion message or plain object to encode + * Encodes the specified ResetSequencesRequest message. Does not implicitly {@link tabletmanagerdata.ResetSequencesRequest.verify|verify} messages. + * @param message ResetSequencesRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: binlogdata.ICharsetConversion, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: tabletmanagerdata.IResetSequencesRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified CharsetConversion message, length delimited. Does not implicitly {@link binlogdata.CharsetConversion.verify|verify} messages. - * @param message CharsetConversion message or plain object to encode + * Encodes the specified ResetSequencesRequest message, length delimited. Does not implicitly {@link tabletmanagerdata.ResetSequencesRequest.verify|verify} messages. + * @param message ResetSequencesRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: binlogdata.ICharsetConversion, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: tabletmanagerdata.IResetSequencesRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a CharsetConversion message from the specified reader or buffer. + * Decodes a ResetSequencesRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns CharsetConversion + * @returns ResetSequencesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.CharsetConversion; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): tabletmanagerdata.ResetSequencesRequest; /** - * Decodes a CharsetConversion message from the specified reader or buffer, length delimited. + * Decodes a ResetSequencesRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns CharsetConversion + * @returns ResetSequencesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.CharsetConversion; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): tabletmanagerdata.ResetSequencesRequest; /** - * Verifies a CharsetConversion message. + * Verifies a ResetSequencesRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a CharsetConversion message from a plain object. Also converts values to their respective internal types. + * Creates a ResetSequencesRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns CharsetConversion + * @returns ResetSequencesRequest */ - public static fromObject(object: { [k: string]: any }): binlogdata.CharsetConversion; + public static fromObject(object: { [k: string]: any }): tabletmanagerdata.ResetSequencesRequest; /** - * Creates a plain object from a CharsetConversion message. Also converts values to other types if specified. - * @param message CharsetConversion + * Creates a plain object from a ResetSequencesRequest message. Also converts values to other types if specified. + * @param message ResetSequencesRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: binlogdata.CharsetConversion, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: tabletmanagerdata.ResetSequencesRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this CharsetConversion to JSON. + * Converts this ResetSequencesRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for CharsetConversion + * Gets the default type url for ResetSequencesRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a Rule. */ - interface IRule { - - /** Rule match */ - match?: (string|null); - - /** Rule filter */ - filter?: (string|null); - - /** Rule convert_enum_to_text */ - convert_enum_to_text?: ({ [k: string]: string }|null); - - /** Rule convert_charset */ - convert_charset?: ({ [k: string]: binlogdata.ICharsetConversion }|null); - - /** Rule source_unique_key_columns */ - source_unique_key_columns?: (string|null); - - /** Rule target_unique_key_columns */ - target_unique_key_columns?: (string|null); - - /** Rule source_unique_key_target_columns */ - source_unique_key_target_columns?: (string|null); - - /** Rule convert_int_to_enum */ - convert_int_to_enum?: ({ [k: string]: boolean }|null); + /** Properties of a ResetSequencesResponse. */ + interface IResetSequencesResponse { } - /** Represents a Rule. */ - class Rule implements IRule { + /** Represents a ResetSequencesResponse. */ + class ResetSequencesResponse implements IResetSequencesResponse { /** - * Constructs a new Rule. + * Constructs a new ResetSequencesResponse. * @param [properties] Properties to set */ - constructor(properties?: binlogdata.IRule); - - /** Rule match. */ - public match: string; - - /** Rule filter. */ - public filter: string; - - /** Rule convert_enum_to_text. */ - public convert_enum_to_text: { [k: string]: string }; - - /** Rule convert_charset. */ - public convert_charset: { [k: string]: binlogdata.ICharsetConversion }; - - /** Rule source_unique_key_columns. */ - public source_unique_key_columns: string; - - /** Rule target_unique_key_columns. */ - public target_unique_key_columns: string; - - /** Rule source_unique_key_target_columns. */ - public source_unique_key_target_columns: string; - - /** Rule convert_int_to_enum. */ - public convert_int_to_enum: { [k: string]: boolean }; + constructor(properties?: tabletmanagerdata.IResetSequencesResponse); /** - * Creates a new Rule instance using the specified properties. + * Creates a new ResetSequencesResponse instance using the specified properties. * @param [properties] Properties to set - * @returns Rule instance + * @returns ResetSequencesResponse instance */ - public static create(properties?: binlogdata.IRule): binlogdata.Rule; + public static create(properties?: tabletmanagerdata.IResetSequencesResponse): tabletmanagerdata.ResetSequencesResponse; /** - * Encodes the specified Rule message. Does not implicitly {@link binlogdata.Rule.verify|verify} messages. - * @param message Rule message or plain object to encode + * Encodes the specified ResetSequencesResponse message. Does not implicitly {@link tabletmanagerdata.ResetSequencesResponse.verify|verify} messages. + * @param message ResetSequencesResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: binlogdata.IRule, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: tabletmanagerdata.IResetSequencesResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified Rule message, length delimited. Does not implicitly {@link binlogdata.Rule.verify|verify} messages. - * @param message Rule message or plain object to encode + * Encodes the specified ResetSequencesResponse message, length delimited. Does not implicitly {@link tabletmanagerdata.ResetSequencesResponse.verify|verify} messages. + * @param message ResetSequencesResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: binlogdata.IRule, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: tabletmanagerdata.IResetSequencesResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a Rule message from the specified reader or buffer. + * Decodes a ResetSequencesResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns Rule + * @returns ResetSequencesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.Rule; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): tabletmanagerdata.ResetSequencesResponse; /** - * Decodes a Rule message from the specified reader or buffer, length delimited. + * Decodes a ResetSequencesResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns Rule + * @returns ResetSequencesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.Rule; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): tabletmanagerdata.ResetSequencesResponse; /** - * Verifies a Rule message. + * Verifies a ResetSequencesResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a Rule message from a plain object. Also converts values to their respective internal types. + * Creates a ResetSequencesResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns Rule + * @returns ResetSequencesResponse */ - public static fromObject(object: { [k: string]: any }): binlogdata.Rule; + public static fromObject(object: { [k: string]: any }): tabletmanagerdata.ResetSequencesResponse; /** - * Creates a plain object from a Rule message. Also converts values to other types if specified. - * @param message Rule + * Creates a plain object from a ResetSequencesResponse message. Also converts values to other types if specified. + * @param message ResetSequencesResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: binlogdata.Rule, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: tabletmanagerdata.ResetSequencesResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this Rule to JSON. + * Converts this ResetSequencesResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for Rule + * Gets the default type url for ResetSequencesResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a Filter. */ - interface IFilter { - - /** Filter rules */ - rules?: (binlogdata.IRule[]|null); - - /** Filter field_event_mode */ - field_event_mode?: (binlogdata.Filter.FieldEventMode|null); - - /** Filter workflow_type */ - workflow_type?: (number|Long|null); + /** Properties of a CheckThrottlerRequest. */ + interface ICheckThrottlerRequest { - /** Filter workflow_name */ - workflow_name?: (string|null); + /** CheckThrottlerRequest app_name */ + app_name?: (string|null); } - /** Represents a Filter. */ - class Filter implements IFilter { + /** Represents a CheckThrottlerRequest. */ + class CheckThrottlerRequest implements ICheckThrottlerRequest { /** - * Constructs a new Filter. + * Constructs a new CheckThrottlerRequest. * @param [properties] Properties to set */ - constructor(properties?: binlogdata.IFilter); - - /** Filter rules. */ - public rules: binlogdata.IRule[]; - - /** Filter field_event_mode. */ - public field_event_mode: binlogdata.Filter.FieldEventMode; - - /** Filter workflow_type. */ - public workflow_type: (number|Long); + constructor(properties?: tabletmanagerdata.ICheckThrottlerRequest); - /** Filter workflow_name. */ - public workflow_name: string; + /** CheckThrottlerRequest app_name. */ + public app_name: string; /** - * Creates a new Filter instance using the specified properties. + * Creates a new CheckThrottlerRequest instance using the specified properties. * @param [properties] Properties to set - * @returns Filter instance + * @returns CheckThrottlerRequest instance */ - public static create(properties?: binlogdata.IFilter): binlogdata.Filter; + public static create(properties?: tabletmanagerdata.ICheckThrottlerRequest): tabletmanagerdata.CheckThrottlerRequest; /** - * Encodes the specified Filter message. Does not implicitly {@link binlogdata.Filter.verify|verify} messages. - * @param message Filter message or plain object to encode + * Encodes the specified CheckThrottlerRequest message. Does not implicitly {@link tabletmanagerdata.CheckThrottlerRequest.verify|verify} messages. + * @param message CheckThrottlerRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: binlogdata.IFilter, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: tabletmanagerdata.ICheckThrottlerRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified Filter message, length delimited. Does not implicitly {@link binlogdata.Filter.verify|verify} messages. - * @param message Filter message or plain object to encode + * Encodes the specified CheckThrottlerRequest message, length delimited. Does not implicitly {@link tabletmanagerdata.CheckThrottlerRequest.verify|verify} messages. + * @param message CheckThrottlerRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: binlogdata.IFilter, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: tabletmanagerdata.ICheckThrottlerRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a Filter message from the specified reader or buffer. + * Decodes a CheckThrottlerRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns Filter + * @returns CheckThrottlerRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.Filter; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): tabletmanagerdata.CheckThrottlerRequest; /** - * Decodes a Filter message from the specified reader or buffer, length delimited. + * Decodes a CheckThrottlerRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns Filter + * @returns CheckThrottlerRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.Filter; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): tabletmanagerdata.CheckThrottlerRequest; /** - * Verifies a Filter message. + * Verifies a CheckThrottlerRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a Filter message from a plain object. Also converts values to their respective internal types. + * Creates a CheckThrottlerRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns Filter + * @returns CheckThrottlerRequest */ - public static fromObject(object: { [k: string]: any }): binlogdata.Filter; + public static fromObject(object: { [k: string]: any }): tabletmanagerdata.CheckThrottlerRequest; /** - * Creates a plain object from a Filter message. Also converts values to other types if specified. - * @param message Filter + * Creates a plain object from a CheckThrottlerRequest message. Also converts values to other types if specified. + * @param message CheckThrottlerRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: binlogdata.Filter, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: tabletmanagerdata.CheckThrottlerRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this Filter to JSON. + * Converts this CheckThrottlerRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for Filter + * Gets the default type url for CheckThrottlerRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - namespace Filter { + /** Properties of a CheckThrottlerResponse. */ + interface ICheckThrottlerResponse { - /** FieldEventMode enum. */ - enum FieldEventMode { - ERR_ON_MISMATCH = 0, - BEST_EFFORT = 1 - } - } + /** CheckThrottlerResponse status_code */ + status_code?: (number|null); - /** OnDDLAction enum. */ - enum OnDDLAction { - IGNORE = 0, - STOP = 1, - EXEC = 2, - EXEC_IGNORE = 3 - } + /** CheckThrottlerResponse value */ + value?: (number|null); - /** VReplicationWorkflowType enum. */ - enum VReplicationWorkflowType { - Materialize = 0, - MoveTables = 1, - CreateLookupIndex = 2, - Migrate = 3, - Reshard = 4, - OnlineDDL = 5 - } + /** CheckThrottlerResponse threshold */ + threshold?: (number|null); - /** VReplicationWorkflowSubType enum. */ - enum VReplicationWorkflowSubType { - None = 0, - Partial = 1 + /** CheckThrottlerResponse error */ + error?: (string|null); + + /** CheckThrottlerResponse message */ + message?: (string|null); + + /** CheckThrottlerResponse recently_checked */ + recently_checked?: (boolean|null); } - /** Properties of a BinlogSource. */ - interface IBinlogSource { + /** Represents a CheckThrottlerResponse. */ + class CheckThrottlerResponse implements ICheckThrottlerResponse { - /** BinlogSource keyspace */ - keyspace?: (string|null); + /** + * Constructs a new CheckThrottlerResponse. + * @param [properties] Properties to set + */ + constructor(properties?: tabletmanagerdata.ICheckThrottlerResponse); - /** BinlogSource shard */ - shard?: (string|null); + /** CheckThrottlerResponse status_code. */ + public status_code: number; - /** BinlogSource tablet_type */ - tablet_type?: (topodata.TabletType|null); + /** CheckThrottlerResponse value. */ + public value: number; - /** BinlogSource key_range */ - key_range?: (topodata.IKeyRange|null); + /** CheckThrottlerResponse threshold. */ + public threshold: number; - /** BinlogSource tables */ - tables?: (string[]|null); + /** CheckThrottlerResponse error. */ + public error: string; - /** BinlogSource filter */ - filter?: (binlogdata.IFilter|null); + /** CheckThrottlerResponse message. */ + public message: string; - /** BinlogSource on_ddl */ - on_ddl?: (binlogdata.OnDDLAction|null); + /** CheckThrottlerResponse recently_checked. */ + public recently_checked: boolean; - /** BinlogSource external_mysql */ - external_mysql?: (string|null); + /** + * Creates a new CheckThrottlerResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns CheckThrottlerResponse instance + */ + public static create(properties?: tabletmanagerdata.ICheckThrottlerResponse): tabletmanagerdata.CheckThrottlerResponse; - /** BinlogSource stop_after_copy */ - stop_after_copy?: (boolean|null); + /** + * Encodes the specified CheckThrottlerResponse message. Does not implicitly {@link tabletmanagerdata.CheckThrottlerResponse.verify|verify} messages. + * @param message CheckThrottlerResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: tabletmanagerdata.ICheckThrottlerResponse, writer?: $protobuf.Writer): $protobuf.Writer; - /** BinlogSource external_cluster */ - external_cluster?: (string|null); + /** + * Encodes the specified CheckThrottlerResponse message, length delimited. Does not implicitly {@link tabletmanagerdata.CheckThrottlerResponse.verify|verify} messages. + * @param message CheckThrottlerResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: tabletmanagerdata.ICheckThrottlerResponse, writer?: $protobuf.Writer): $protobuf.Writer; - /** BinlogSource source_time_zone */ - source_time_zone?: (string|null); + /** + * Decodes a CheckThrottlerResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns CheckThrottlerResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): tabletmanagerdata.CheckThrottlerResponse; - /** BinlogSource target_time_zone */ - target_time_zone?: (string|null); - } + /** + * Decodes a CheckThrottlerResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns CheckThrottlerResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): tabletmanagerdata.CheckThrottlerResponse; - /** Represents a BinlogSource. */ - class BinlogSource implements IBinlogSource { + /** + * Verifies a CheckThrottlerResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); /** - * Constructs a new BinlogSource. - * @param [properties] Properties to set + * Creates a CheckThrottlerResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns CheckThrottlerResponse */ - constructor(properties?: binlogdata.IBinlogSource); + public static fromObject(object: { [k: string]: any }): tabletmanagerdata.CheckThrottlerResponse; - /** BinlogSource keyspace. */ - public keyspace: string; + /** + * Creates a plain object from a CheckThrottlerResponse message. Also converts values to other types if specified. + * @param message CheckThrottlerResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: tabletmanagerdata.CheckThrottlerResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; - /** BinlogSource shard. */ - public shard: string; + /** + * Converts this CheckThrottlerResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; - /** BinlogSource tablet_type. */ - public tablet_type: topodata.TabletType; + /** + * Gets the default type url for CheckThrottlerResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } +} - /** BinlogSource key_range. */ - public key_range?: (topodata.IKeyRange|null); +/** Namespace binlogdata. */ +export namespace binlogdata { - /** BinlogSource tables. */ - public tables: string[]; + /** Properties of a Charset. */ + interface ICharset { - /** BinlogSource filter. */ - public filter?: (binlogdata.IFilter|null); + /** Charset client */ + client?: (number|null); - /** BinlogSource on_ddl. */ - public on_ddl: binlogdata.OnDDLAction; + /** Charset conn */ + conn?: (number|null); - /** BinlogSource external_mysql. */ - public external_mysql: string; + /** Charset server */ + server?: (number|null); + } - /** BinlogSource stop_after_copy. */ - public stop_after_copy: boolean; + /** Represents a Charset. */ + class Charset implements ICharset { - /** BinlogSource external_cluster. */ - public external_cluster: string; + /** + * Constructs a new Charset. + * @param [properties] Properties to set + */ + constructor(properties?: binlogdata.ICharset); - /** BinlogSource source_time_zone. */ - public source_time_zone: string; + /** Charset client. */ + public client: number; - /** BinlogSource target_time_zone. */ - public target_time_zone: string; + /** Charset conn. */ + public conn: number; + + /** Charset server. */ + public server: number; /** - * Creates a new BinlogSource instance using the specified properties. + * Creates a new Charset instance using the specified properties. * @param [properties] Properties to set - * @returns BinlogSource instance + * @returns Charset instance */ - public static create(properties?: binlogdata.IBinlogSource): binlogdata.BinlogSource; + public static create(properties?: binlogdata.ICharset): binlogdata.Charset; /** - * Encodes the specified BinlogSource message. Does not implicitly {@link binlogdata.BinlogSource.verify|verify} messages. - * @param message BinlogSource message or plain object to encode + * Encodes the specified Charset message. Does not implicitly {@link binlogdata.Charset.verify|verify} messages. + * @param message Charset message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: binlogdata.IBinlogSource, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: binlogdata.ICharset, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified BinlogSource message, length delimited. Does not implicitly {@link binlogdata.BinlogSource.verify|verify} messages. - * @param message BinlogSource message or plain object to encode + * Encodes the specified Charset message, length delimited. Does not implicitly {@link binlogdata.Charset.verify|verify} messages. + * @param message Charset message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: binlogdata.IBinlogSource, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: binlogdata.ICharset, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a BinlogSource message from the specified reader or buffer. + * Decodes a Charset message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns BinlogSource + * @returns Charset * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.BinlogSource; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.Charset; /** - * Decodes a BinlogSource message from the specified reader or buffer, length delimited. + * Decodes a Charset message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns BinlogSource + * @returns Charset * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.BinlogSource; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.Charset; /** - * Verifies a BinlogSource message. + * Verifies a Charset message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a BinlogSource message from a plain object. Also converts values to their respective internal types. + * Creates a Charset message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns BinlogSource + * @returns Charset */ - public static fromObject(object: { [k: string]: any }): binlogdata.BinlogSource; + public static fromObject(object: { [k: string]: any }): binlogdata.Charset; /** - * Creates a plain object from a BinlogSource message. Also converts values to other types if specified. - * @param message BinlogSource + * Creates a plain object from a Charset message. Also converts values to other types if specified. + * @param message Charset * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: binlogdata.BinlogSource, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: binlogdata.Charset, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this BinlogSource to JSON. + * Converts this Charset to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for BinlogSource + * Gets the default type url for Charset * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** VEventType enum. */ - enum VEventType { - UNKNOWN = 0, - GTID = 1, - BEGIN = 2, - COMMIT = 3, - ROLLBACK = 4, - DDL = 5, - INSERT = 6, - REPLACE = 7, - UPDATE = 8, - DELETE = 9, - SET = 10, - OTHER = 11, - ROW = 12, - FIELD = 13, - HEARTBEAT = 14, - VGTID = 15, - JOURNAL = 16, - VERSION = 17, - LASTPK = 18, - SAVEPOINT = 19, - COPY_COMPLETED = 20 - } - - /** Properties of a RowChange. */ - interface IRowChange { - - /** RowChange before */ - before?: (query.IRow|null); + /** Properties of a BinlogTransaction. */ + interface IBinlogTransaction { - /** RowChange after */ - after?: (query.IRow|null); + /** BinlogTransaction statements */ + statements?: (binlogdata.BinlogTransaction.IStatement[]|null); - /** RowChange data_columns */ - data_columns?: (binlogdata.RowChange.IBitmap|null); + /** BinlogTransaction event_token */ + event_token?: (query.IEventToken|null); } - /** Represents a RowChange. */ - class RowChange implements IRowChange { + /** Represents a BinlogTransaction. */ + class BinlogTransaction implements IBinlogTransaction { /** - * Constructs a new RowChange. + * Constructs a new BinlogTransaction. * @param [properties] Properties to set */ - constructor(properties?: binlogdata.IRowChange); - - /** RowChange before. */ - public before?: (query.IRow|null); + constructor(properties?: binlogdata.IBinlogTransaction); - /** RowChange after. */ - public after?: (query.IRow|null); + /** BinlogTransaction statements. */ + public statements: binlogdata.BinlogTransaction.IStatement[]; - /** RowChange data_columns. */ - public data_columns?: (binlogdata.RowChange.IBitmap|null); + /** BinlogTransaction event_token. */ + public event_token?: (query.IEventToken|null); /** - * Creates a new RowChange instance using the specified properties. + * Creates a new BinlogTransaction instance using the specified properties. * @param [properties] Properties to set - * @returns RowChange instance + * @returns BinlogTransaction instance */ - public static create(properties?: binlogdata.IRowChange): binlogdata.RowChange; + public static create(properties?: binlogdata.IBinlogTransaction): binlogdata.BinlogTransaction; /** - * Encodes the specified RowChange message. Does not implicitly {@link binlogdata.RowChange.verify|verify} messages. - * @param message RowChange message or plain object to encode + * Encodes the specified BinlogTransaction message. Does not implicitly {@link binlogdata.BinlogTransaction.verify|verify} messages. + * @param message BinlogTransaction message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: binlogdata.IRowChange, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: binlogdata.IBinlogTransaction, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified RowChange message, length delimited. Does not implicitly {@link binlogdata.RowChange.verify|verify} messages. - * @param message RowChange message or plain object to encode + * Encodes the specified BinlogTransaction message, length delimited. Does not implicitly {@link binlogdata.BinlogTransaction.verify|verify} messages. + * @param message BinlogTransaction message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: binlogdata.IRowChange, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: binlogdata.IBinlogTransaction, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a RowChange message from the specified reader or buffer. + * Decodes a BinlogTransaction message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns RowChange + * @returns BinlogTransaction * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.RowChange; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.BinlogTransaction; /** - * Decodes a RowChange message from the specified reader or buffer, length delimited. + * Decodes a BinlogTransaction message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns RowChange + * @returns BinlogTransaction * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.RowChange; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.BinlogTransaction; /** - * Verifies a RowChange message. + * Verifies a BinlogTransaction message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a RowChange message from a plain object. Also converts values to their respective internal types. + * Creates a BinlogTransaction message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns RowChange + * @returns BinlogTransaction */ - public static fromObject(object: { [k: string]: any }): binlogdata.RowChange; + public static fromObject(object: { [k: string]: any }): binlogdata.BinlogTransaction; /** - * Creates a plain object from a RowChange message. Also converts values to other types if specified. - * @param message RowChange + * Creates a plain object from a BinlogTransaction message. Also converts values to other types if specified. + * @param message BinlogTransaction * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: binlogdata.RowChange, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: binlogdata.BinlogTransaction, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this RowChange to JSON. + * Converts this BinlogTransaction to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for RowChange + * Gets the default type url for BinlogTransaction * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - namespace RowChange { + namespace BinlogTransaction { - /** Properties of a Bitmap. */ - interface IBitmap { + /** Properties of a Statement. */ + interface IStatement { - /** Bitmap count */ - count?: (number|Long|null); + /** Statement category */ + category?: (binlogdata.BinlogTransaction.Statement.Category|null); - /** Bitmap cols */ - cols?: (Uint8Array|null); + /** Statement charset */ + charset?: (binlogdata.ICharset|null); + + /** Statement sql */ + sql?: (Uint8Array|null); } - /** Represents a Bitmap. */ - class Bitmap implements IBitmap { + /** Represents a Statement. */ + class Statement implements IStatement { /** - * Constructs a new Bitmap. + * Constructs a new Statement. * @param [properties] Properties to set */ - constructor(properties?: binlogdata.RowChange.IBitmap); + constructor(properties?: binlogdata.BinlogTransaction.IStatement); - /** Bitmap count. */ - public count: (number|Long); + /** Statement category. */ + public category: binlogdata.BinlogTransaction.Statement.Category; - /** Bitmap cols. */ - public cols: Uint8Array; + /** Statement charset. */ + public charset?: (binlogdata.ICharset|null); + + /** Statement sql. */ + public sql: Uint8Array; /** - * Creates a new Bitmap instance using the specified properties. + * Creates a new Statement instance using the specified properties. * @param [properties] Properties to set - * @returns Bitmap instance + * @returns Statement instance */ - public static create(properties?: binlogdata.RowChange.IBitmap): binlogdata.RowChange.Bitmap; + public static create(properties?: binlogdata.BinlogTransaction.IStatement): binlogdata.BinlogTransaction.Statement; /** - * Encodes the specified Bitmap message. Does not implicitly {@link binlogdata.RowChange.Bitmap.verify|verify} messages. - * @param message Bitmap message or plain object to encode + * Encodes the specified Statement message. Does not implicitly {@link binlogdata.BinlogTransaction.Statement.verify|verify} messages. + * @param message Statement message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: binlogdata.RowChange.IBitmap, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: binlogdata.BinlogTransaction.IStatement, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified Bitmap message, length delimited. Does not implicitly {@link binlogdata.RowChange.Bitmap.verify|verify} messages. - * @param message Bitmap message or plain object to encode + * Encodes the specified Statement message, length delimited. Does not implicitly {@link binlogdata.BinlogTransaction.Statement.verify|verify} messages. + * @param message Statement message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: binlogdata.RowChange.IBitmap, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: binlogdata.BinlogTransaction.IStatement, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a Bitmap message from the specified reader or buffer. + * Decodes a Statement message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns Bitmap + * @returns Statement * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.RowChange.Bitmap; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.BinlogTransaction.Statement; /** - * Decodes a Bitmap message from the specified reader or buffer, length delimited. + * Decodes a Statement message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns Bitmap + * @returns Statement * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.RowChange.Bitmap; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.BinlogTransaction.Statement; /** - * Verifies a Bitmap message. + * Verifies a Statement message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a Bitmap message from a plain object. Also converts values to their respective internal types. + * Creates a Statement message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns Bitmap + * @returns Statement */ - public static fromObject(object: { [k: string]: any }): binlogdata.RowChange.Bitmap; + public static fromObject(object: { [k: string]: any }): binlogdata.BinlogTransaction.Statement; /** - * Creates a plain object from a Bitmap message. Also converts values to other types if specified. - * @param message Bitmap + * Creates a plain object from a Statement message. Also converts values to other types if specified. + * @param message Statement * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: binlogdata.RowChange.Bitmap, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: binlogdata.BinlogTransaction.Statement, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this Bitmap to JSON. + * Converts this Statement to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for Bitmap + * Gets the default type url for Statement * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - } - /** Properties of a RowEvent. */ - interface IRowEvent { + namespace Statement { - /** RowEvent table_name */ - table_name?: (string|null); + /** Category enum. */ + enum Category { + BL_UNRECOGNIZED = 0, + BL_BEGIN = 1, + BL_COMMIT = 2, + BL_ROLLBACK = 3, + BL_DML_DEPRECATED = 4, + BL_DDL = 5, + BL_SET = 6, + BL_INSERT = 7, + BL_UPDATE = 8, + BL_DELETE = 9 + } + } + } - /** RowEvent row_changes */ - row_changes?: (binlogdata.IRowChange[]|null); + /** Properties of a StreamKeyRangeRequest. */ + interface IStreamKeyRangeRequest { - /** RowEvent keyspace */ - keyspace?: (string|null); + /** StreamKeyRangeRequest position */ + position?: (string|null); - /** RowEvent shard */ - shard?: (string|null); + /** StreamKeyRangeRequest key_range */ + key_range?: (topodata.IKeyRange|null); + + /** StreamKeyRangeRequest charset */ + charset?: (binlogdata.ICharset|null); } - /** Represents a RowEvent. */ - class RowEvent implements IRowEvent { + /** Represents a StreamKeyRangeRequest. */ + class StreamKeyRangeRequest implements IStreamKeyRangeRequest { /** - * Constructs a new RowEvent. + * Constructs a new StreamKeyRangeRequest. * @param [properties] Properties to set */ - constructor(properties?: binlogdata.IRowEvent); - - /** RowEvent table_name. */ - public table_name: string; + constructor(properties?: binlogdata.IStreamKeyRangeRequest); - /** RowEvent row_changes. */ - public row_changes: binlogdata.IRowChange[]; + /** StreamKeyRangeRequest position. */ + public position: string; - /** RowEvent keyspace. */ - public keyspace: string; + /** StreamKeyRangeRequest key_range. */ + public key_range?: (topodata.IKeyRange|null); - /** RowEvent shard. */ - public shard: string; + /** StreamKeyRangeRequest charset. */ + public charset?: (binlogdata.ICharset|null); /** - * Creates a new RowEvent instance using the specified properties. + * Creates a new StreamKeyRangeRequest instance using the specified properties. * @param [properties] Properties to set - * @returns RowEvent instance + * @returns StreamKeyRangeRequest instance */ - public static create(properties?: binlogdata.IRowEvent): binlogdata.RowEvent; + public static create(properties?: binlogdata.IStreamKeyRangeRequest): binlogdata.StreamKeyRangeRequest; /** - * Encodes the specified RowEvent message. Does not implicitly {@link binlogdata.RowEvent.verify|verify} messages. - * @param message RowEvent message or plain object to encode + * Encodes the specified StreamKeyRangeRequest message. Does not implicitly {@link binlogdata.StreamKeyRangeRequest.verify|verify} messages. + * @param message StreamKeyRangeRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: binlogdata.IRowEvent, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: binlogdata.IStreamKeyRangeRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified RowEvent message, length delimited. Does not implicitly {@link binlogdata.RowEvent.verify|verify} messages. - * @param message RowEvent message or plain object to encode + * Encodes the specified StreamKeyRangeRequest message, length delimited. Does not implicitly {@link binlogdata.StreamKeyRangeRequest.verify|verify} messages. + * @param message StreamKeyRangeRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: binlogdata.IRowEvent, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: binlogdata.IStreamKeyRangeRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a RowEvent message from the specified reader or buffer. + * Decodes a StreamKeyRangeRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns RowEvent + * @returns StreamKeyRangeRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.RowEvent; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.StreamKeyRangeRequest; /** - * Decodes a RowEvent message from the specified reader or buffer, length delimited. + * Decodes a StreamKeyRangeRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns RowEvent + * @returns StreamKeyRangeRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.RowEvent; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.StreamKeyRangeRequest; /** - * Verifies a RowEvent message. + * Verifies a StreamKeyRangeRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a RowEvent message from a plain object. Also converts values to their respective internal types. + * Creates a StreamKeyRangeRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns RowEvent + * @returns StreamKeyRangeRequest */ - public static fromObject(object: { [k: string]: any }): binlogdata.RowEvent; + public static fromObject(object: { [k: string]: any }): binlogdata.StreamKeyRangeRequest; /** - * Creates a plain object from a RowEvent message. Also converts values to other types if specified. - * @param message RowEvent + * Creates a plain object from a StreamKeyRangeRequest message. Also converts values to other types if specified. + * @param message StreamKeyRangeRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: binlogdata.RowEvent, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: binlogdata.StreamKeyRangeRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this RowEvent to JSON. + * Converts this StreamKeyRangeRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for RowEvent + * Gets the default type url for StreamKeyRangeRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a FieldEvent. */ - interface IFieldEvent { - - /** FieldEvent table_name */ - table_name?: (string|null); + /** Properties of a StreamKeyRangeResponse. */ + interface IStreamKeyRangeResponse { - /** FieldEvent fields */ - fields?: (query.IField[]|null); + /** StreamKeyRangeResponse binlog_transaction */ + binlog_transaction?: (binlogdata.IBinlogTransaction|null); + } - /** FieldEvent keyspace */ - keyspace?: (string|null); - - /** FieldEvent shard */ - shard?: (string|null); - } - - /** Represents a FieldEvent. */ - class FieldEvent implements IFieldEvent { + /** Represents a StreamKeyRangeResponse. */ + class StreamKeyRangeResponse implements IStreamKeyRangeResponse { /** - * Constructs a new FieldEvent. + * Constructs a new StreamKeyRangeResponse. * @param [properties] Properties to set */ - constructor(properties?: binlogdata.IFieldEvent); - - /** FieldEvent table_name. */ - public table_name: string; - - /** FieldEvent fields. */ - public fields: query.IField[]; - - /** FieldEvent keyspace. */ - public keyspace: string; + constructor(properties?: binlogdata.IStreamKeyRangeResponse); - /** FieldEvent shard. */ - public shard: string; + /** StreamKeyRangeResponse binlog_transaction. */ + public binlog_transaction?: (binlogdata.IBinlogTransaction|null); /** - * Creates a new FieldEvent instance using the specified properties. + * Creates a new StreamKeyRangeResponse instance using the specified properties. * @param [properties] Properties to set - * @returns FieldEvent instance + * @returns StreamKeyRangeResponse instance */ - public static create(properties?: binlogdata.IFieldEvent): binlogdata.FieldEvent; + public static create(properties?: binlogdata.IStreamKeyRangeResponse): binlogdata.StreamKeyRangeResponse; /** - * Encodes the specified FieldEvent message. Does not implicitly {@link binlogdata.FieldEvent.verify|verify} messages. - * @param message FieldEvent message or plain object to encode + * Encodes the specified StreamKeyRangeResponse message. Does not implicitly {@link binlogdata.StreamKeyRangeResponse.verify|verify} messages. + * @param message StreamKeyRangeResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: binlogdata.IFieldEvent, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: binlogdata.IStreamKeyRangeResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified FieldEvent message, length delimited. Does not implicitly {@link binlogdata.FieldEvent.verify|verify} messages. - * @param message FieldEvent message or plain object to encode + * Encodes the specified StreamKeyRangeResponse message, length delimited. Does not implicitly {@link binlogdata.StreamKeyRangeResponse.verify|verify} messages. + * @param message StreamKeyRangeResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: binlogdata.IFieldEvent, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: binlogdata.IStreamKeyRangeResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a FieldEvent message from the specified reader or buffer. + * Decodes a StreamKeyRangeResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns FieldEvent + * @returns StreamKeyRangeResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.FieldEvent; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.StreamKeyRangeResponse; /** - * Decodes a FieldEvent message from the specified reader or buffer, length delimited. + * Decodes a StreamKeyRangeResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns FieldEvent + * @returns StreamKeyRangeResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.FieldEvent; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.StreamKeyRangeResponse; /** - * Verifies a FieldEvent message. + * Verifies a StreamKeyRangeResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a FieldEvent message from a plain object. Also converts values to their respective internal types. + * Creates a StreamKeyRangeResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns FieldEvent + * @returns StreamKeyRangeResponse */ - public static fromObject(object: { [k: string]: any }): binlogdata.FieldEvent; + public static fromObject(object: { [k: string]: any }): binlogdata.StreamKeyRangeResponse; /** - * Creates a plain object from a FieldEvent message. Also converts values to other types if specified. - * @param message FieldEvent + * Creates a plain object from a StreamKeyRangeResponse message. Also converts values to other types if specified. + * @param message StreamKeyRangeResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: binlogdata.FieldEvent, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: binlogdata.StreamKeyRangeResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this FieldEvent to JSON. + * Converts this StreamKeyRangeResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for FieldEvent + * Gets the default type url for StreamKeyRangeResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ShardGtid. */ - interface IShardGtid { - - /** ShardGtid keyspace */ - keyspace?: (string|null); + /** Properties of a StreamTablesRequest. */ + interface IStreamTablesRequest { - /** ShardGtid shard */ - shard?: (string|null); + /** StreamTablesRequest position */ + position?: (string|null); - /** ShardGtid gtid */ - gtid?: (string|null); + /** StreamTablesRequest tables */ + tables?: (string[]|null); - /** ShardGtid table_p_ks */ - table_p_ks?: (binlogdata.ITableLastPK[]|null); + /** StreamTablesRequest charset */ + charset?: (binlogdata.ICharset|null); } - /** Represents a ShardGtid. */ - class ShardGtid implements IShardGtid { + /** Represents a StreamTablesRequest. */ + class StreamTablesRequest implements IStreamTablesRequest { /** - * Constructs a new ShardGtid. + * Constructs a new StreamTablesRequest. * @param [properties] Properties to set */ - constructor(properties?: binlogdata.IShardGtid); - - /** ShardGtid keyspace. */ - public keyspace: string; + constructor(properties?: binlogdata.IStreamTablesRequest); - /** ShardGtid shard. */ - public shard: string; + /** StreamTablesRequest position. */ + public position: string; - /** ShardGtid gtid. */ - public gtid: string; + /** StreamTablesRequest tables. */ + public tables: string[]; - /** ShardGtid table_p_ks. */ - public table_p_ks: binlogdata.ITableLastPK[]; + /** StreamTablesRequest charset. */ + public charset?: (binlogdata.ICharset|null); /** - * Creates a new ShardGtid instance using the specified properties. + * Creates a new StreamTablesRequest instance using the specified properties. * @param [properties] Properties to set - * @returns ShardGtid instance + * @returns StreamTablesRequest instance */ - public static create(properties?: binlogdata.IShardGtid): binlogdata.ShardGtid; + public static create(properties?: binlogdata.IStreamTablesRequest): binlogdata.StreamTablesRequest; /** - * Encodes the specified ShardGtid message. Does not implicitly {@link binlogdata.ShardGtid.verify|verify} messages. - * @param message ShardGtid message or plain object to encode + * Encodes the specified StreamTablesRequest message. Does not implicitly {@link binlogdata.StreamTablesRequest.verify|verify} messages. + * @param message StreamTablesRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: binlogdata.IShardGtid, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: binlogdata.IStreamTablesRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ShardGtid message, length delimited. Does not implicitly {@link binlogdata.ShardGtid.verify|verify} messages. - * @param message ShardGtid message or plain object to encode + * Encodes the specified StreamTablesRequest message, length delimited. Does not implicitly {@link binlogdata.StreamTablesRequest.verify|verify} messages. + * @param message StreamTablesRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: binlogdata.IShardGtid, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: binlogdata.IStreamTablesRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ShardGtid message from the specified reader or buffer. + * Decodes a StreamTablesRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ShardGtid + * @returns StreamTablesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.ShardGtid; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.StreamTablesRequest; /** - * Decodes a ShardGtid message from the specified reader or buffer, length delimited. + * Decodes a StreamTablesRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ShardGtid + * @returns StreamTablesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.ShardGtid; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.StreamTablesRequest; /** - * Verifies a ShardGtid message. + * Verifies a StreamTablesRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ShardGtid message from a plain object. Also converts values to their respective internal types. + * Creates a StreamTablesRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ShardGtid + * @returns StreamTablesRequest */ - public static fromObject(object: { [k: string]: any }): binlogdata.ShardGtid; + public static fromObject(object: { [k: string]: any }): binlogdata.StreamTablesRequest; /** - * Creates a plain object from a ShardGtid message. Also converts values to other types if specified. - * @param message ShardGtid + * Creates a plain object from a StreamTablesRequest message. Also converts values to other types if specified. + * @param message StreamTablesRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: binlogdata.ShardGtid, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: binlogdata.StreamTablesRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ShardGtid to JSON. + * Converts this StreamTablesRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ShardGtid + * Gets the default type url for StreamTablesRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a VGtid. */ - interface IVGtid { + /** Properties of a StreamTablesResponse. */ + interface IStreamTablesResponse { - /** VGtid shard_gtids */ - shard_gtids?: (binlogdata.IShardGtid[]|null); + /** StreamTablesResponse binlog_transaction */ + binlog_transaction?: (binlogdata.IBinlogTransaction|null); } - /** Represents a VGtid. */ - class VGtid implements IVGtid { + /** Represents a StreamTablesResponse. */ + class StreamTablesResponse implements IStreamTablesResponse { /** - * Constructs a new VGtid. + * Constructs a new StreamTablesResponse. * @param [properties] Properties to set */ - constructor(properties?: binlogdata.IVGtid); + constructor(properties?: binlogdata.IStreamTablesResponse); - /** VGtid shard_gtids. */ - public shard_gtids: binlogdata.IShardGtid[]; + /** StreamTablesResponse binlog_transaction. */ + public binlog_transaction?: (binlogdata.IBinlogTransaction|null); /** - * Creates a new VGtid instance using the specified properties. + * Creates a new StreamTablesResponse instance using the specified properties. * @param [properties] Properties to set - * @returns VGtid instance + * @returns StreamTablesResponse instance */ - public static create(properties?: binlogdata.IVGtid): binlogdata.VGtid; + public static create(properties?: binlogdata.IStreamTablesResponse): binlogdata.StreamTablesResponse; /** - * Encodes the specified VGtid message. Does not implicitly {@link binlogdata.VGtid.verify|verify} messages. - * @param message VGtid message or plain object to encode + * Encodes the specified StreamTablesResponse message. Does not implicitly {@link binlogdata.StreamTablesResponse.verify|verify} messages. + * @param message StreamTablesResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: binlogdata.IVGtid, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: binlogdata.IStreamTablesResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified VGtid message, length delimited. Does not implicitly {@link binlogdata.VGtid.verify|verify} messages. - * @param message VGtid message or plain object to encode + * Encodes the specified StreamTablesResponse message, length delimited. Does not implicitly {@link binlogdata.StreamTablesResponse.verify|verify} messages. + * @param message StreamTablesResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: binlogdata.IVGtid, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: binlogdata.IStreamTablesResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a VGtid message from the specified reader or buffer. + * Decodes a StreamTablesResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns VGtid + * @returns StreamTablesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.VGtid; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.StreamTablesResponse; /** - * Decodes a VGtid message from the specified reader or buffer, length delimited. + * Decodes a StreamTablesResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns VGtid + * @returns StreamTablesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.VGtid; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.StreamTablesResponse; /** - * Verifies a VGtid message. + * Verifies a StreamTablesResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a VGtid message from a plain object. Also converts values to their respective internal types. + * Creates a StreamTablesResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns VGtid + * @returns StreamTablesResponse */ - public static fromObject(object: { [k: string]: any }): binlogdata.VGtid; + public static fromObject(object: { [k: string]: any }): binlogdata.StreamTablesResponse; /** - * Creates a plain object from a VGtid message. Also converts values to other types if specified. - * @param message VGtid + * Creates a plain object from a StreamTablesResponse message. Also converts values to other types if specified. + * @param message StreamTablesResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: binlogdata.VGtid, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: binlogdata.StreamTablesResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this VGtid to JSON. + * Converts this StreamTablesResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for VGtid + * Gets the default type url for StreamTablesResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a KeyspaceShard. */ - interface IKeyspaceShard { + /** Properties of a CharsetConversion. */ + interface ICharsetConversion { - /** KeyspaceShard keyspace */ - keyspace?: (string|null); + /** CharsetConversion from_charset */ + from_charset?: (string|null); - /** KeyspaceShard shard */ - shard?: (string|null); + /** CharsetConversion to_charset */ + to_charset?: (string|null); } - /** Represents a KeyspaceShard. */ - class KeyspaceShard implements IKeyspaceShard { + /** Represents a CharsetConversion. */ + class CharsetConversion implements ICharsetConversion { /** - * Constructs a new KeyspaceShard. + * Constructs a new CharsetConversion. * @param [properties] Properties to set */ - constructor(properties?: binlogdata.IKeyspaceShard); + constructor(properties?: binlogdata.ICharsetConversion); - /** KeyspaceShard keyspace. */ - public keyspace: string; + /** CharsetConversion from_charset. */ + public from_charset: string; - /** KeyspaceShard shard. */ - public shard: string; + /** CharsetConversion to_charset. */ + public to_charset: string; /** - * Creates a new KeyspaceShard instance using the specified properties. + * Creates a new CharsetConversion instance using the specified properties. * @param [properties] Properties to set - * @returns KeyspaceShard instance + * @returns CharsetConversion instance */ - public static create(properties?: binlogdata.IKeyspaceShard): binlogdata.KeyspaceShard; + public static create(properties?: binlogdata.ICharsetConversion): binlogdata.CharsetConversion; /** - * Encodes the specified KeyspaceShard message. Does not implicitly {@link binlogdata.KeyspaceShard.verify|verify} messages. - * @param message KeyspaceShard message or plain object to encode + * Encodes the specified CharsetConversion message. Does not implicitly {@link binlogdata.CharsetConversion.verify|verify} messages. + * @param message CharsetConversion message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: binlogdata.IKeyspaceShard, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: binlogdata.ICharsetConversion, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified KeyspaceShard message, length delimited. Does not implicitly {@link binlogdata.KeyspaceShard.verify|verify} messages. - * @param message KeyspaceShard message or plain object to encode + * Encodes the specified CharsetConversion message, length delimited. Does not implicitly {@link binlogdata.CharsetConversion.verify|verify} messages. + * @param message CharsetConversion message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: binlogdata.IKeyspaceShard, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: binlogdata.ICharsetConversion, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a KeyspaceShard message from the specified reader or buffer. + * Decodes a CharsetConversion message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns KeyspaceShard + * @returns CharsetConversion * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.KeyspaceShard; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.CharsetConversion; /** - * Decodes a KeyspaceShard message from the specified reader or buffer, length delimited. + * Decodes a CharsetConversion message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns KeyspaceShard + * @returns CharsetConversion * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.KeyspaceShard; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.CharsetConversion; /** - * Verifies a KeyspaceShard message. + * Verifies a CharsetConversion message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a KeyspaceShard message from a plain object. Also converts values to their respective internal types. + * Creates a CharsetConversion message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns KeyspaceShard + * @returns CharsetConversion */ - public static fromObject(object: { [k: string]: any }): binlogdata.KeyspaceShard; + public static fromObject(object: { [k: string]: any }): binlogdata.CharsetConversion; /** - * Creates a plain object from a KeyspaceShard message. Also converts values to other types if specified. - * @param message KeyspaceShard + * Creates a plain object from a CharsetConversion message. Also converts values to other types if specified. + * @param message CharsetConversion * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: binlogdata.KeyspaceShard, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: binlogdata.CharsetConversion, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this KeyspaceShard to JSON. + * Converts this CharsetConversion to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for KeyspaceShard + * Gets the default type url for CharsetConversion * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** MigrationType enum. */ - enum MigrationType { - TABLES = 0, - SHARDS = 1 - } + /** Properties of a Rule. */ + interface IRule { - /** Properties of a Journal. */ - interface IJournal { + /** Rule match */ + match?: (string|null); - /** Journal id */ - id?: (number|Long|null); + /** Rule filter */ + filter?: (string|null); - /** Journal migration_type */ - migration_type?: (binlogdata.MigrationType|null); + /** Rule convert_enum_to_text */ + convert_enum_to_text?: ({ [k: string]: string }|null); - /** Journal tables */ - tables?: (string[]|null); + /** Rule convert_charset */ + convert_charset?: ({ [k: string]: binlogdata.ICharsetConversion }|null); - /** Journal local_position */ - local_position?: (string|null); + /** Rule source_unique_key_columns */ + source_unique_key_columns?: (string|null); - /** Journal shard_gtids */ - shard_gtids?: (binlogdata.IShardGtid[]|null); + /** Rule target_unique_key_columns */ + target_unique_key_columns?: (string|null); - /** Journal participants */ - participants?: (binlogdata.IKeyspaceShard[]|null); + /** Rule source_unique_key_target_columns */ + source_unique_key_target_columns?: (string|null); - /** Journal source_workflows */ - source_workflows?: (string[]|null); + /** Rule convert_int_to_enum */ + convert_int_to_enum?: ({ [k: string]: boolean }|null); } - /** Represents a Journal. */ - class Journal implements IJournal { + /** Represents a Rule. */ + class Rule implements IRule { /** - * Constructs a new Journal. + * Constructs a new Rule. * @param [properties] Properties to set */ - constructor(properties?: binlogdata.IJournal); + constructor(properties?: binlogdata.IRule); - /** Journal id. */ - public id: (number|Long); + /** Rule match. */ + public match: string; - /** Journal migration_type. */ - public migration_type: binlogdata.MigrationType; + /** Rule filter. */ + public filter: string; - /** Journal tables. */ - public tables: string[]; + /** Rule convert_enum_to_text. */ + public convert_enum_to_text: { [k: string]: string }; - /** Journal local_position. */ - public local_position: string; + /** Rule convert_charset. */ + public convert_charset: { [k: string]: binlogdata.ICharsetConversion }; - /** Journal shard_gtids. */ - public shard_gtids: binlogdata.IShardGtid[]; + /** Rule source_unique_key_columns. */ + public source_unique_key_columns: string; - /** Journal participants. */ - public participants: binlogdata.IKeyspaceShard[]; + /** Rule target_unique_key_columns. */ + public target_unique_key_columns: string; - /** Journal source_workflows. */ - public source_workflows: string[]; + /** Rule source_unique_key_target_columns. */ + public source_unique_key_target_columns: string; + + /** Rule convert_int_to_enum. */ + public convert_int_to_enum: { [k: string]: boolean }; /** - * Creates a new Journal instance using the specified properties. + * Creates a new Rule instance using the specified properties. * @param [properties] Properties to set - * @returns Journal instance + * @returns Rule instance */ - public static create(properties?: binlogdata.IJournal): binlogdata.Journal; + public static create(properties?: binlogdata.IRule): binlogdata.Rule; /** - * Encodes the specified Journal message. Does not implicitly {@link binlogdata.Journal.verify|verify} messages. - * @param message Journal message or plain object to encode + * Encodes the specified Rule message. Does not implicitly {@link binlogdata.Rule.verify|verify} messages. + * @param message Rule message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: binlogdata.IJournal, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: binlogdata.IRule, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified Journal message, length delimited. Does not implicitly {@link binlogdata.Journal.verify|verify} messages. - * @param message Journal message or plain object to encode + * Encodes the specified Rule message, length delimited. Does not implicitly {@link binlogdata.Rule.verify|verify} messages. + * @param message Rule message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: binlogdata.IJournal, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: binlogdata.IRule, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a Journal message from the specified reader or buffer. + * Decodes a Rule message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns Journal + * @returns Rule * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.Journal; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.Rule; /** - * Decodes a Journal message from the specified reader or buffer, length delimited. + * Decodes a Rule message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns Journal + * @returns Rule * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.Journal; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.Rule; /** - * Verifies a Journal message. + * Verifies a Rule message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a Journal message from a plain object. Also converts values to their respective internal types. + * Creates a Rule message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns Journal + * @returns Rule */ - public static fromObject(object: { [k: string]: any }): binlogdata.Journal; + public static fromObject(object: { [k: string]: any }): binlogdata.Rule; /** - * Creates a plain object from a Journal message. Also converts values to other types if specified. - * @param message Journal + * Creates a plain object from a Rule message. Also converts values to other types if specified. + * @param message Rule * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: binlogdata.Journal, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: binlogdata.Rule, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this Journal to JSON. + * Converts this Rule to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for Journal + * Gets the default type url for Rule * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a VEvent. */ - interface IVEvent { - - /** VEvent type */ - type?: (binlogdata.VEventType|null); - - /** VEvent timestamp */ - timestamp?: (number|Long|null); - - /** VEvent gtid */ - gtid?: (string|null); - - /** VEvent statement */ - statement?: (string|null); - - /** VEvent row_event */ - row_event?: (binlogdata.IRowEvent|null); + /** Properties of a Filter. */ + interface IFilter { - /** VEvent field_event */ - field_event?: (binlogdata.IFieldEvent|null); + /** Filter rules */ + rules?: (binlogdata.IRule[]|null); - /** VEvent vgtid */ - vgtid?: (binlogdata.IVGtid|null); + /** Filter field_event_mode */ + field_event_mode?: (binlogdata.Filter.FieldEventMode|null); - /** VEvent journal */ - journal?: (binlogdata.IJournal|null); + /** Filter workflow_type */ + workflow_type?: (number|Long|null); - /** VEvent dml */ - dml?: (string|null); + /** Filter workflow_name */ + workflow_name?: (string|null); + } - /** VEvent current_time */ - current_time?: (number|Long|null); + /** Represents a Filter. */ + class Filter implements IFilter { - /** VEvent last_p_k_event */ - last_p_k_event?: (binlogdata.ILastPKEvent|null); + /** + * Constructs a new Filter. + * @param [properties] Properties to set + */ + constructor(properties?: binlogdata.IFilter); - /** VEvent keyspace */ - keyspace?: (string|null); + /** Filter rules. */ + public rules: binlogdata.IRule[]; - /** VEvent shard */ - shard?: (string|null); + /** Filter field_event_mode. */ + public field_event_mode: binlogdata.Filter.FieldEventMode; - /** VEvent throttled */ - throttled?: (boolean|null); - } + /** Filter workflow_type. */ + public workflow_type: (number|Long); - /** Represents a VEvent. */ - class VEvent implements IVEvent { + /** Filter workflow_name. */ + public workflow_name: string; /** - * Constructs a new VEvent. + * Creates a new Filter instance using the specified properties. * @param [properties] Properties to set + * @returns Filter instance */ - constructor(properties?: binlogdata.IVEvent); + public static create(properties?: binlogdata.IFilter): binlogdata.Filter; - /** VEvent type. */ - public type: binlogdata.VEventType; - - /** VEvent timestamp. */ - public timestamp: (number|Long); - - /** VEvent gtid. */ - public gtid: string; - - /** VEvent statement. */ - public statement: string; - - /** VEvent row_event. */ - public row_event?: (binlogdata.IRowEvent|null); - - /** VEvent field_event. */ - public field_event?: (binlogdata.IFieldEvent|null); - - /** VEvent vgtid. */ - public vgtid?: (binlogdata.IVGtid|null); - - /** VEvent journal. */ - public journal?: (binlogdata.IJournal|null); - - /** VEvent dml. */ - public dml: string; - - /** VEvent current_time. */ - public current_time: (number|Long); - - /** VEvent last_p_k_event. */ - public last_p_k_event?: (binlogdata.ILastPKEvent|null); - - /** VEvent keyspace. */ - public keyspace: string; - - /** VEvent shard. */ - public shard: string; - - /** VEvent throttled. */ - public throttled: boolean; - - /** - * Creates a new VEvent instance using the specified properties. - * @param [properties] Properties to set - * @returns VEvent instance - */ - public static create(properties?: binlogdata.IVEvent): binlogdata.VEvent; - - /** - * Encodes the specified VEvent message. Does not implicitly {@link binlogdata.VEvent.verify|verify} messages. - * @param message VEvent message or plain object to encode - * @param [writer] Writer to encode to - * @returns Writer - */ - public static encode(message: binlogdata.IVEvent, writer?: $protobuf.Writer): $protobuf.Writer; + /** + * Encodes the specified Filter message. Does not implicitly {@link binlogdata.Filter.verify|verify} messages. + * @param message Filter message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: binlogdata.IFilter, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified VEvent message, length delimited. Does not implicitly {@link binlogdata.VEvent.verify|verify} messages. - * @param message VEvent message or plain object to encode + * Encodes the specified Filter message, length delimited. Does not implicitly {@link binlogdata.Filter.verify|verify} messages. + * @param message Filter message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: binlogdata.IVEvent, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: binlogdata.IFilter, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a VEvent message from the specified reader or buffer. + * Decodes a Filter message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns VEvent + * @returns Filter * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.VEvent; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.Filter; /** - * Decodes a VEvent message from the specified reader or buffer, length delimited. + * Decodes a Filter message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns VEvent + * @returns Filter * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.VEvent; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.Filter; /** - * Verifies a VEvent message. + * Verifies a Filter message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a VEvent message from a plain object. Also converts values to their respective internal types. + * Creates a Filter message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns VEvent + * @returns Filter */ - public static fromObject(object: { [k: string]: any }): binlogdata.VEvent; + public static fromObject(object: { [k: string]: any }): binlogdata.Filter; /** - * Creates a plain object from a VEvent message. Also converts values to other types if specified. - * @param message VEvent + * Creates a plain object from a Filter message. Also converts values to other types if specified. + * @param message Filter * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: binlogdata.VEvent, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: binlogdata.Filter, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this VEvent to JSON. + * Converts this Filter to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for VEvent + * Gets the default type url for Filter * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a MinimalTable. */ - interface IMinimalTable { - - /** MinimalTable name */ - name?: (string|null); - - /** MinimalTable fields */ - fields?: (query.IField[]|null); + namespace Filter { - /** MinimalTable p_k_columns */ - p_k_columns?: ((number|Long)[]|null); + /** FieldEventMode enum. */ + enum FieldEventMode { + ERR_ON_MISMATCH = 0, + BEST_EFFORT = 1 + } } - /** Represents a MinimalTable. */ - class MinimalTable implements IMinimalTable { + /** OnDDLAction enum. */ + enum OnDDLAction { + IGNORE = 0, + STOP = 1, + EXEC = 2, + EXEC_IGNORE = 3 + } - /** - * Constructs a new MinimalTable. - * @param [properties] Properties to set - */ - constructor(properties?: binlogdata.IMinimalTable); + /** VReplicationWorkflowType enum. */ + enum VReplicationWorkflowType { + Materialize = 0, + MoveTables = 1, + CreateLookupIndex = 2, + Migrate = 3, + Reshard = 4, + OnlineDDL = 5 + } - /** MinimalTable name. */ - public name: string; + /** VReplicationWorkflowSubType enum. */ + enum VReplicationWorkflowSubType { + None = 0, + Partial = 1, + AtomicCopy = 2 + } - /** MinimalTable fields. */ - public fields: query.IField[]; + /** VReplicationWorkflowState enum. */ + enum VReplicationWorkflowState { + Unknown = 0, + Init = 1, + Stopped = 2, + Copying = 3, + Running = 4, + Error = 5, + Lagging = 6 + } - /** MinimalTable p_k_columns. */ - public p_k_columns: (number|Long)[]; + /** Properties of a BinlogSource. */ + interface IBinlogSource { - /** - * Creates a new MinimalTable instance using the specified properties. - * @param [properties] Properties to set - * @returns MinimalTable instance - */ - public static create(properties?: binlogdata.IMinimalTable): binlogdata.MinimalTable; + /** BinlogSource keyspace */ + keyspace?: (string|null); - /** - * Encodes the specified MinimalTable message. Does not implicitly {@link binlogdata.MinimalTable.verify|verify} messages. - * @param message MinimalTable message or plain object to encode - * @param [writer] Writer to encode to - * @returns Writer - */ - public static encode(message: binlogdata.IMinimalTable, writer?: $protobuf.Writer): $protobuf.Writer; + /** BinlogSource shard */ + shard?: (string|null); - /** - * Encodes the specified MinimalTable message, length delimited. Does not implicitly {@link binlogdata.MinimalTable.verify|verify} messages. - * @param message MinimalTable message or plain object to encode - * @param [writer] Writer to encode to - * @returns Writer - */ - public static encodeDelimited(message: binlogdata.IMinimalTable, writer?: $protobuf.Writer): $protobuf.Writer; + /** BinlogSource tablet_type */ + tablet_type?: (topodata.TabletType|null); - /** - * Decodes a MinimalTable message from the specified reader or buffer. - * @param reader Reader or buffer to decode from - * @param [length] Message length if known beforehand - * @returns MinimalTable - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.MinimalTable; + /** BinlogSource key_range */ + key_range?: (topodata.IKeyRange|null); - /** - * Decodes a MinimalTable message from the specified reader or buffer, length delimited. - * @param reader Reader or buffer to decode from - * @returns MinimalTable - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.MinimalTable; + /** BinlogSource tables */ + tables?: (string[]|null); - /** - * Verifies a MinimalTable message. - * @param message Plain object to verify - * @returns `null` if valid, otherwise the reason why it is not - */ - public static verify(message: { [k: string]: any }): (string|null); + /** BinlogSource filter */ + filter?: (binlogdata.IFilter|null); - /** - * Creates a MinimalTable message from a plain object. Also converts values to their respective internal types. - * @param object Plain object - * @returns MinimalTable - */ - public static fromObject(object: { [k: string]: any }): binlogdata.MinimalTable; + /** BinlogSource on_ddl */ + on_ddl?: (binlogdata.OnDDLAction|null); - /** - * Creates a plain object from a MinimalTable message. Also converts values to other types if specified. - * @param message MinimalTable - * @param [options] Conversion options - * @returns Plain object - */ - public static toObject(message: binlogdata.MinimalTable, options?: $protobuf.IConversionOptions): { [k: string]: any }; + /** BinlogSource external_mysql */ + external_mysql?: (string|null); - /** - * Converts this MinimalTable to JSON. - * @returns JSON object - */ - public toJSON(): { [k: string]: any }; + /** BinlogSource stop_after_copy */ + stop_after_copy?: (boolean|null); - /** - * Gets the default type url for MinimalTable - * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns The default type url - */ - public static getTypeUrl(typeUrlPrefix?: string): string; - } + /** BinlogSource external_cluster */ + external_cluster?: (string|null); - /** Properties of a MinimalSchema. */ - interface IMinimalSchema { + /** BinlogSource source_time_zone */ + source_time_zone?: (string|null); - /** MinimalSchema tables */ - tables?: (binlogdata.IMinimalTable[]|null); + /** BinlogSource target_time_zone */ + target_time_zone?: (string|null); } - /** Represents a MinimalSchema. */ - class MinimalSchema implements IMinimalSchema { + /** Represents a BinlogSource. */ + class BinlogSource implements IBinlogSource { /** - * Constructs a new MinimalSchema. + * Constructs a new BinlogSource. * @param [properties] Properties to set */ - constructor(properties?: binlogdata.IMinimalSchema); + constructor(properties?: binlogdata.IBinlogSource); - /** MinimalSchema tables. */ - public tables: binlogdata.IMinimalTable[]; + /** BinlogSource keyspace. */ + public keyspace: string; + + /** BinlogSource shard. */ + public shard: string; + + /** BinlogSource tablet_type. */ + public tablet_type: topodata.TabletType; + + /** BinlogSource key_range. */ + public key_range?: (topodata.IKeyRange|null); + + /** BinlogSource tables. */ + public tables: string[]; + + /** BinlogSource filter. */ + public filter?: (binlogdata.IFilter|null); + + /** BinlogSource on_ddl. */ + public on_ddl: binlogdata.OnDDLAction; + + /** BinlogSource external_mysql. */ + public external_mysql: string; + + /** BinlogSource stop_after_copy. */ + public stop_after_copy: boolean; + + /** BinlogSource external_cluster. */ + public external_cluster: string; + + /** BinlogSource source_time_zone. */ + public source_time_zone: string; + + /** BinlogSource target_time_zone. */ + public target_time_zone: string; /** - * Creates a new MinimalSchema instance using the specified properties. + * Creates a new BinlogSource instance using the specified properties. * @param [properties] Properties to set - * @returns MinimalSchema instance + * @returns BinlogSource instance */ - public static create(properties?: binlogdata.IMinimalSchema): binlogdata.MinimalSchema; + public static create(properties?: binlogdata.IBinlogSource): binlogdata.BinlogSource; /** - * Encodes the specified MinimalSchema message. Does not implicitly {@link binlogdata.MinimalSchema.verify|verify} messages. - * @param message MinimalSchema message or plain object to encode + * Encodes the specified BinlogSource message. Does not implicitly {@link binlogdata.BinlogSource.verify|verify} messages. + * @param message BinlogSource message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: binlogdata.IMinimalSchema, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: binlogdata.IBinlogSource, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified MinimalSchema message, length delimited. Does not implicitly {@link binlogdata.MinimalSchema.verify|verify} messages. - * @param message MinimalSchema message or plain object to encode + * Encodes the specified BinlogSource message, length delimited. Does not implicitly {@link binlogdata.BinlogSource.verify|verify} messages. + * @param message BinlogSource message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: binlogdata.IMinimalSchema, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: binlogdata.IBinlogSource, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a MinimalSchema message from the specified reader or buffer. + * Decodes a BinlogSource message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns MinimalSchema + * @returns BinlogSource * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.MinimalSchema; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.BinlogSource; /** - * Decodes a MinimalSchema message from the specified reader or buffer, length delimited. + * Decodes a BinlogSource message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns MinimalSchema + * @returns BinlogSource * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.MinimalSchema; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.BinlogSource; /** - * Verifies a MinimalSchema message. + * Verifies a BinlogSource message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a MinimalSchema message from a plain object. Also converts values to their respective internal types. + * Creates a BinlogSource message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns MinimalSchema + * @returns BinlogSource */ - public static fromObject(object: { [k: string]: any }): binlogdata.MinimalSchema; + public static fromObject(object: { [k: string]: any }): binlogdata.BinlogSource; /** - * Creates a plain object from a MinimalSchema message. Also converts values to other types if specified. - * @param message MinimalSchema + * Creates a plain object from a BinlogSource message. Also converts values to other types if specified. + * @param message BinlogSource * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: binlogdata.MinimalSchema, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: binlogdata.BinlogSource, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this MinimalSchema to JSON. + * Converts this BinlogSource to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for MinimalSchema + * Gets the default type url for BinlogSource * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a VStreamRequest. */ - interface IVStreamRequest { - - /** VStreamRequest effective_caller_id */ - effective_caller_id?: (vtrpc.ICallerID|null); - - /** VStreamRequest immediate_caller_id */ - immediate_caller_id?: (query.IVTGateCallerID|null); + /** VEventType enum. */ + enum VEventType { + UNKNOWN = 0, + GTID = 1, + BEGIN = 2, + COMMIT = 3, + ROLLBACK = 4, + DDL = 5, + INSERT = 6, + REPLACE = 7, + UPDATE = 8, + DELETE = 9, + SET = 10, + OTHER = 11, + ROW = 12, + FIELD = 13, + HEARTBEAT = 14, + VGTID = 15, + JOURNAL = 16, + VERSION = 17, + LASTPK = 18, + SAVEPOINT = 19, + COPY_COMPLETED = 20 + } - /** VStreamRequest target */ - target?: (query.ITarget|null); + /** Properties of a RowChange. */ + interface IRowChange { - /** VStreamRequest position */ - position?: (string|null); + /** RowChange before */ + before?: (query.IRow|null); - /** VStreamRequest filter */ - filter?: (binlogdata.IFilter|null); + /** RowChange after */ + after?: (query.IRow|null); - /** VStreamRequest table_last_p_ks */ - table_last_p_ks?: (binlogdata.ITableLastPK[]|null); + /** RowChange data_columns */ + data_columns?: (binlogdata.RowChange.IBitmap|null); } - /** Represents a VStreamRequest. */ - class VStreamRequest implements IVStreamRequest { + /** Represents a RowChange. */ + class RowChange implements IRowChange { /** - * Constructs a new VStreamRequest. + * Constructs a new RowChange. * @param [properties] Properties to set */ - constructor(properties?: binlogdata.IVStreamRequest); - - /** VStreamRequest effective_caller_id. */ - public effective_caller_id?: (vtrpc.ICallerID|null); - - /** VStreamRequest immediate_caller_id. */ - public immediate_caller_id?: (query.IVTGateCallerID|null); - - /** VStreamRequest target. */ - public target?: (query.ITarget|null); + constructor(properties?: binlogdata.IRowChange); - /** VStreamRequest position. */ - public position: string; + /** RowChange before. */ + public before?: (query.IRow|null); - /** VStreamRequest filter. */ - public filter?: (binlogdata.IFilter|null); + /** RowChange after. */ + public after?: (query.IRow|null); - /** VStreamRequest table_last_p_ks. */ - public table_last_p_ks: binlogdata.ITableLastPK[]; + /** RowChange data_columns. */ + public data_columns?: (binlogdata.RowChange.IBitmap|null); /** - * Creates a new VStreamRequest instance using the specified properties. + * Creates a new RowChange instance using the specified properties. * @param [properties] Properties to set - * @returns VStreamRequest instance + * @returns RowChange instance */ - public static create(properties?: binlogdata.IVStreamRequest): binlogdata.VStreamRequest; + public static create(properties?: binlogdata.IRowChange): binlogdata.RowChange; /** - * Encodes the specified VStreamRequest message. Does not implicitly {@link binlogdata.VStreamRequest.verify|verify} messages. - * @param message VStreamRequest message or plain object to encode + * Encodes the specified RowChange message. Does not implicitly {@link binlogdata.RowChange.verify|verify} messages. + * @param message RowChange message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: binlogdata.IVStreamRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: binlogdata.IRowChange, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified VStreamRequest message, length delimited. Does not implicitly {@link binlogdata.VStreamRequest.verify|verify} messages. - * @param message VStreamRequest message or plain object to encode + * Encodes the specified RowChange message, length delimited. Does not implicitly {@link binlogdata.RowChange.verify|verify} messages. + * @param message RowChange message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: binlogdata.IVStreamRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: binlogdata.IRowChange, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a VStreamRequest message from the specified reader or buffer. + * Decodes a RowChange message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns VStreamRequest + * @returns RowChange * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.VStreamRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.RowChange; /** - * Decodes a VStreamRequest message from the specified reader or buffer, length delimited. + * Decodes a RowChange message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns VStreamRequest + * @returns RowChange * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.VStreamRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.RowChange; /** - * Verifies a VStreamRequest message. + * Verifies a RowChange message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a VStreamRequest message from a plain object. Also converts values to their respective internal types. + * Creates a RowChange message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns VStreamRequest + * @returns RowChange */ - public static fromObject(object: { [k: string]: any }): binlogdata.VStreamRequest; + public static fromObject(object: { [k: string]: any }): binlogdata.RowChange; /** - * Creates a plain object from a VStreamRequest message. Also converts values to other types if specified. - * @param message VStreamRequest + * Creates a plain object from a RowChange message. Also converts values to other types if specified. + * @param message RowChange * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: binlogdata.VStreamRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: binlogdata.RowChange, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this VStreamRequest to JSON. + * Converts this RowChange to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for VStreamRequest + * Gets the default type url for RowChange * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a VStreamResponse. */ - interface IVStreamResponse { + namespace RowChange { - /** VStreamResponse events */ - events?: (binlogdata.IVEvent[]|null); - } + /** Properties of a Bitmap. */ + interface IBitmap { - /** Represents a VStreamResponse. */ - class VStreamResponse implements IVStreamResponse { + /** Bitmap count */ + count?: (number|Long|null); - /** - * Constructs a new VStreamResponse. - * @param [properties] Properties to set - */ - constructor(properties?: binlogdata.IVStreamResponse); + /** Bitmap cols */ + cols?: (Uint8Array|null); + } - /** VStreamResponse events. */ - public events: binlogdata.IVEvent[]; + /** Represents a Bitmap. */ + class Bitmap implements IBitmap { - /** - * Creates a new VStreamResponse instance using the specified properties. - * @param [properties] Properties to set - * @returns VStreamResponse instance - */ - public static create(properties?: binlogdata.IVStreamResponse): binlogdata.VStreamResponse; + /** + * Constructs a new Bitmap. + * @param [properties] Properties to set + */ + constructor(properties?: binlogdata.RowChange.IBitmap); - /** - * Encodes the specified VStreamResponse message. Does not implicitly {@link binlogdata.VStreamResponse.verify|verify} messages. - * @param message VStreamResponse message or plain object to encode - * @param [writer] Writer to encode to - * @returns Writer - */ - public static encode(message: binlogdata.IVStreamResponse, writer?: $protobuf.Writer): $protobuf.Writer; + /** Bitmap count. */ + public count: (number|Long); - /** - * Encodes the specified VStreamResponse message, length delimited. Does not implicitly {@link binlogdata.VStreamResponse.verify|verify} messages. - * @param message VStreamResponse message or plain object to encode - * @param [writer] Writer to encode to - * @returns Writer - */ - public static encodeDelimited(message: binlogdata.IVStreamResponse, writer?: $protobuf.Writer): $protobuf.Writer; + /** Bitmap cols. */ + public cols: Uint8Array; - /** - * Decodes a VStreamResponse message from the specified reader or buffer. - * @param reader Reader or buffer to decode from - * @param [length] Message length if known beforehand - * @returns VStreamResponse - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.VStreamResponse; + /** + * Creates a new Bitmap instance using the specified properties. + * @param [properties] Properties to set + * @returns Bitmap instance + */ + public static create(properties?: binlogdata.RowChange.IBitmap): binlogdata.RowChange.Bitmap; - /** - * Decodes a VStreamResponse message from the specified reader or buffer, length delimited. - * @param reader Reader or buffer to decode from - * @returns VStreamResponse - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.VStreamResponse; + /** + * Encodes the specified Bitmap message. Does not implicitly {@link binlogdata.RowChange.Bitmap.verify|verify} messages. + * @param message Bitmap message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: binlogdata.RowChange.IBitmap, writer?: $protobuf.Writer): $protobuf.Writer; - /** - * Verifies a VStreamResponse message. - * @param message Plain object to verify - * @returns `null` if valid, otherwise the reason why it is not - */ - public static verify(message: { [k: string]: any }): (string|null); + /** + * Encodes the specified Bitmap message, length delimited. Does not implicitly {@link binlogdata.RowChange.Bitmap.verify|verify} messages. + * @param message Bitmap message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: binlogdata.RowChange.IBitmap, writer?: $protobuf.Writer): $protobuf.Writer; - /** - * Creates a VStreamResponse message from a plain object. Also converts values to their respective internal types. - * @param object Plain object - * @returns VStreamResponse - */ - public static fromObject(object: { [k: string]: any }): binlogdata.VStreamResponse; + /** + * Decodes a Bitmap message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns Bitmap + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.RowChange.Bitmap; - /** - * Creates a plain object from a VStreamResponse message. Also converts values to other types if specified. - * @param message VStreamResponse - * @param [options] Conversion options - * @returns Plain object - */ - public static toObject(message: binlogdata.VStreamResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + /** + * Decodes a Bitmap message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns Bitmap + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.RowChange.Bitmap; - /** - * Converts this VStreamResponse to JSON. - * @returns JSON object - */ - public toJSON(): { [k: string]: any }; + /** + * Verifies a Bitmap message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); - /** - * Gets the default type url for VStreamResponse - * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns The default type url - */ - public static getTypeUrl(typeUrlPrefix?: string): string; + /** + * Creates a Bitmap message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns Bitmap + */ + public static fromObject(object: { [k: string]: any }): binlogdata.RowChange.Bitmap; + + /** + * Creates a plain object from a Bitmap message. Also converts values to other types if specified. + * @param message Bitmap + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: binlogdata.RowChange.Bitmap, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this Bitmap to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for Bitmap + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } } - /** Properties of a VStreamRowsRequest. */ - interface IVStreamRowsRequest { + /** Properties of a RowEvent. */ + interface IRowEvent { - /** VStreamRowsRequest effective_caller_id */ - effective_caller_id?: (vtrpc.ICallerID|null); + /** RowEvent table_name */ + table_name?: (string|null); - /** VStreamRowsRequest immediate_caller_id */ - immediate_caller_id?: (query.IVTGateCallerID|null); + /** RowEvent row_changes */ + row_changes?: (binlogdata.IRowChange[]|null); - /** VStreamRowsRequest target */ - target?: (query.ITarget|null); + /** RowEvent keyspace */ + keyspace?: (string|null); - /** VStreamRowsRequest query */ - query?: (string|null); + /** RowEvent shard */ + shard?: (string|null); - /** VStreamRowsRequest lastpk */ - lastpk?: (query.IQueryResult|null); + /** RowEvent flags */ + flags?: (number|null); } - /** Represents a VStreamRowsRequest. */ - class VStreamRowsRequest implements IVStreamRowsRequest { + /** Represents a RowEvent. */ + class RowEvent implements IRowEvent { /** - * Constructs a new VStreamRowsRequest. + * Constructs a new RowEvent. * @param [properties] Properties to set */ - constructor(properties?: binlogdata.IVStreamRowsRequest); + constructor(properties?: binlogdata.IRowEvent); - /** VStreamRowsRequest effective_caller_id. */ - public effective_caller_id?: (vtrpc.ICallerID|null); + /** RowEvent table_name. */ + public table_name: string; - /** VStreamRowsRequest immediate_caller_id. */ - public immediate_caller_id?: (query.IVTGateCallerID|null); + /** RowEvent row_changes. */ + public row_changes: binlogdata.IRowChange[]; - /** VStreamRowsRequest target. */ - public target?: (query.ITarget|null); + /** RowEvent keyspace. */ + public keyspace: string; - /** VStreamRowsRequest query. */ - public query: string; + /** RowEvent shard. */ + public shard: string; - /** VStreamRowsRequest lastpk. */ - public lastpk?: (query.IQueryResult|null); + /** RowEvent flags. */ + public flags: number; /** - * Creates a new VStreamRowsRequest instance using the specified properties. + * Creates a new RowEvent instance using the specified properties. * @param [properties] Properties to set - * @returns VStreamRowsRequest instance + * @returns RowEvent instance */ - public static create(properties?: binlogdata.IVStreamRowsRequest): binlogdata.VStreamRowsRequest; + public static create(properties?: binlogdata.IRowEvent): binlogdata.RowEvent; /** - * Encodes the specified VStreamRowsRequest message. Does not implicitly {@link binlogdata.VStreamRowsRequest.verify|verify} messages. - * @param message VStreamRowsRequest message or plain object to encode + * Encodes the specified RowEvent message. Does not implicitly {@link binlogdata.RowEvent.verify|verify} messages. + * @param message RowEvent message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: binlogdata.IVStreamRowsRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: binlogdata.IRowEvent, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified VStreamRowsRequest message, length delimited. Does not implicitly {@link binlogdata.VStreamRowsRequest.verify|verify} messages. - * @param message VStreamRowsRequest message or plain object to encode + * Encodes the specified RowEvent message, length delimited. Does not implicitly {@link binlogdata.RowEvent.verify|verify} messages. + * @param message RowEvent message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: binlogdata.IVStreamRowsRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: binlogdata.IRowEvent, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a VStreamRowsRequest message from the specified reader or buffer. + * Decodes a RowEvent message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns VStreamRowsRequest + * @returns RowEvent * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.VStreamRowsRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.RowEvent; /** - * Decodes a VStreamRowsRequest message from the specified reader or buffer, length delimited. + * Decodes a RowEvent message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns VStreamRowsRequest + * @returns RowEvent * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.VStreamRowsRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.RowEvent; /** - * Verifies a VStreamRowsRequest message. + * Verifies a RowEvent message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a VStreamRowsRequest message from a plain object. Also converts values to their respective internal types. + * Creates a RowEvent message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns VStreamRowsRequest + * @returns RowEvent */ - public static fromObject(object: { [k: string]: any }): binlogdata.VStreamRowsRequest; + public static fromObject(object: { [k: string]: any }): binlogdata.RowEvent; /** - * Creates a plain object from a VStreamRowsRequest message. Also converts values to other types if specified. - * @param message VStreamRowsRequest + * Creates a plain object from a RowEvent message. Also converts values to other types if specified. + * @param message RowEvent * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: binlogdata.VStreamRowsRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: binlogdata.RowEvent, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this VStreamRowsRequest to JSON. + * Converts this RowEvent to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for VStreamRowsRequest + * Gets the default type url for RowEvent * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a VStreamRowsResponse. */ - interface IVStreamRowsResponse { - - /** VStreamRowsResponse fields */ - fields?: (query.IField[]|null); - - /** VStreamRowsResponse pkfields */ - pkfields?: (query.IField[]|null); - - /** VStreamRowsResponse gtid */ - gtid?: (string|null); + /** Properties of a FieldEvent. */ + interface IFieldEvent { - /** VStreamRowsResponse rows */ - rows?: (query.IRow[]|null); + /** FieldEvent table_name */ + table_name?: (string|null); - /** VStreamRowsResponse lastpk */ - lastpk?: (query.IRow|null); + /** FieldEvent fields */ + fields?: (query.IField[]|null); - /** VStreamRowsResponse throttled */ - throttled?: (boolean|null); + /** FieldEvent keyspace */ + keyspace?: (string|null); - /** VStreamRowsResponse heartbeat */ - heartbeat?: (boolean|null); + /** FieldEvent shard */ + shard?: (string|null); } - /** Represents a VStreamRowsResponse. */ - class VStreamRowsResponse implements IVStreamRowsResponse { + /** Represents a FieldEvent. */ + class FieldEvent implements IFieldEvent { /** - * Constructs a new VStreamRowsResponse. + * Constructs a new FieldEvent. * @param [properties] Properties to set */ - constructor(properties?: binlogdata.IVStreamRowsResponse); - - /** VStreamRowsResponse fields. */ - public fields: query.IField[]; - - /** VStreamRowsResponse pkfields. */ - public pkfields: query.IField[]; - - /** VStreamRowsResponse gtid. */ - public gtid: string; + constructor(properties?: binlogdata.IFieldEvent); - /** VStreamRowsResponse rows. */ - public rows: query.IRow[]; + /** FieldEvent table_name. */ + public table_name: string; - /** VStreamRowsResponse lastpk. */ - public lastpk?: (query.IRow|null); + /** FieldEvent fields. */ + public fields: query.IField[]; - /** VStreamRowsResponse throttled. */ - public throttled: boolean; + /** FieldEvent keyspace. */ + public keyspace: string; - /** VStreamRowsResponse heartbeat. */ - public heartbeat: boolean; + /** FieldEvent shard. */ + public shard: string; /** - * Creates a new VStreamRowsResponse instance using the specified properties. + * Creates a new FieldEvent instance using the specified properties. * @param [properties] Properties to set - * @returns VStreamRowsResponse instance + * @returns FieldEvent instance */ - public static create(properties?: binlogdata.IVStreamRowsResponse): binlogdata.VStreamRowsResponse; + public static create(properties?: binlogdata.IFieldEvent): binlogdata.FieldEvent; /** - * Encodes the specified VStreamRowsResponse message. Does not implicitly {@link binlogdata.VStreamRowsResponse.verify|verify} messages. - * @param message VStreamRowsResponse message or plain object to encode + * Encodes the specified FieldEvent message. Does not implicitly {@link binlogdata.FieldEvent.verify|verify} messages. + * @param message FieldEvent message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: binlogdata.IVStreamRowsResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: binlogdata.IFieldEvent, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified VStreamRowsResponse message, length delimited. Does not implicitly {@link binlogdata.VStreamRowsResponse.verify|verify} messages. - * @param message VStreamRowsResponse message or plain object to encode + * Encodes the specified FieldEvent message, length delimited. Does not implicitly {@link binlogdata.FieldEvent.verify|verify} messages. + * @param message FieldEvent message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: binlogdata.IVStreamRowsResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: binlogdata.IFieldEvent, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a VStreamRowsResponse message from the specified reader or buffer. + * Decodes a FieldEvent message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns VStreamRowsResponse + * @returns FieldEvent * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.VStreamRowsResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.FieldEvent; /** - * Decodes a VStreamRowsResponse message from the specified reader or buffer, length delimited. + * Decodes a FieldEvent message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns VStreamRowsResponse + * @returns FieldEvent * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.VStreamRowsResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.FieldEvent; /** - * Verifies a VStreamRowsResponse message. + * Verifies a FieldEvent message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a VStreamRowsResponse message from a plain object. Also converts values to their respective internal types. + * Creates a FieldEvent message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns VStreamRowsResponse + * @returns FieldEvent */ - public static fromObject(object: { [k: string]: any }): binlogdata.VStreamRowsResponse; + public static fromObject(object: { [k: string]: any }): binlogdata.FieldEvent; /** - * Creates a plain object from a VStreamRowsResponse message. Also converts values to other types if specified. - * @param message VStreamRowsResponse + * Creates a plain object from a FieldEvent message. Also converts values to other types if specified. + * @param message FieldEvent * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: binlogdata.VStreamRowsResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: binlogdata.FieldEvent, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this VStreamRowsResponse to JSON. + * Converts this FieldEvent to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for VStreamRowsResponse + * Gets the default type url for FieldEvent * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a LastPKEvent. */ - interface ILastPKEvent { + /** Properties of a ShardGtid. */ + interface IShardGtid { - /** LastPKEvent table_last_p_k */ - table_last_p_k?: (binlogdata.ITableLastPK|null); + /** ShardGtid keyspace */ + keyspace?: (string|null); - /** LastPKEvent completed */ - completed?: (boolean|null); + /** ShardGtid shard */ + shard?: (string|null); + + /** ShardGtid gtid */ + gtid?: (string|null); + + /** ShardGtid table_p_ks */ + table_p_ks?: (binlogdata.ITableLastPK[]|null); } - /** Represents a LastPKEvent. */ - class LastPKEvent implements ILastPKEvent { + /** Represents a ShardGtid. */ + class ShardGtid implements IShardGtid { /** - * Constructs a new LastPKEvent. + * Constructs a new ShardGtid. * @param [properties] Properties to set */ - constructor(properties?: binlogdata.ILastPKEvent); + constructor(properties?: binlogdata.IShardGtid); - /** LastPKEvent table_last_p_k. */ - public table_last_p_k?: (binlogdata.ITableLastPK|null); + /** ShardGtid keyspace. */ + public keyspace: string; - /** LastPKEvent completed. */ - public completed: boolean; + /** ShardGtid shard. */ + public shard: string; + + /** ShardGtid gtid. */ + public gtid: string; + + /** ShardGtid table_p_ks. */ + public table_p_ks: binlogdata.ITableLastPK[]; /** - * Creates a new LastPKEvent instance using the specified properties. + * Creates a new ShardGtid instance using the specified properties. * @param [properties] Properties to set - * @returns LastPKEvent instance + * @returns ShardGtid instance */ - public static create(properties?: binlogdata.ILastPKEvent): binlogdata.LastPKEvent; + public static create(properties?: binlogdata.IShardGtid): binlogdata.ShardGtid; /** - * Encodes the specified LastPKEvent message. Does not implicitly {@link binlogdata.LastPKEvent.verify|verify} messages. - * @param message LastPKEvent message or plain object to encode + * Encodes the specified ShardGtid message. Does not implicitly {@link binlogdata.ShardGtid.verify|verify} messages. + * @param message ShardGtid message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: binlogdata.ILastPKEvent, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: binlogdata.IShardGtid, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified LastPKEvent message, length delimited. Does not implicitly {@link binlogdata.LastPKEvent.verify|verify} messages. - * @param message LastPKEvent message or plain object to encode + * Encodes the specified ShardGtid message, length delimited. Does not implicitly {@link binlogdata.ShardGtid.verify|verify} messages. + * @param message ShardGtid message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: binlogdata.ILastPKEvent, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: binlogdata.IShardGtid, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a LastPKEvent message from the specified reader or buffer. + * Decodes a ShardGtid message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns LastPKEvent + * @returns ShardGtid * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.LastPKEvent; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.ShardGtid; /** - * Decodes a LastPKEvent message from the specified reader or buffer, length delimited. + * Decodes a ShardGtid message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns LastPKEvent + * @returns ShardGtid * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.LastPKEvent; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.ShardGtid; /** - * Verifies a LastPKEvent message. + * Verifies a ShardGtid message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a LastPKEvent message from a plain object. Also converts values to their respective internal types. + * Creates a ShardGtid message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns LastPKEvent + * @returns ShardGtid */ - public static fromObject(object: { [k: string]: any }): binlogdata.LastPKEvent; + public static fromObject(object: { [k: string]: any }): binlogdata.ShardGtid; /** - * Creates a plain object from a LastPKEvent message. Also converts values to other types if specified. - * @param message LastPKEvent + * Creates a plain object from a ShardGtid message. Also converts values to other types if specified. + * @param message ShardGtid * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: binlogdata.LastPKEvent, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: binlogdata.ShardGtid, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this LastPKEvent to JSON. + * Converts this ShardGtid to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for LastPKEvent + * Gets the default type url for ShardGtid * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a TableLastPK. */ - interface ITableLastPK { - - /** TableLastPK table_name */ - table_name?: (string|null); + /** Properties of a VGtid. */ + interface IVGtid { - /** TableLastPK lastpk */ - lastpk?: (query.IQueryResult|null); + /** VGtid shard_gtids */ + shard_gtids?: (binlogdata.IShardGtid[]|null); } - /** Represents a TableLastPK. */ - class TableLastPK implements ITableLastPK { + /** Represents a VGtid. */ + class VGtid implements IVGtid { /** - * Constructs a new TableLastPK. + * Constructs a new VGtid. * @param [properties] Properties to set */ - constructor(properties?: binlogdata.ITableLastPK); - - /** TableLastPK table_name. */ - public table_name: string; + constructor(properties?: binlogdata.IVGtid); - /** TableLastPK lastpk. */ - public lastpk?: (query.IQueryResult|null); + /** VGtid shard_gtids. */ + public shard_gtids: binlogdata.IShardGtid[]; /** - * Creates a new TableLastPK instance using the specified properties. + * Creates a new VGtid instance using the specified properties. * @param [properties] Properties to set - * @returns TableLastPK instance + * @returns VGtid instance */ - public static create(properties?: binlogdata.ITableLastPK): binlogdata.TableLastPK; + public static create(properties?: binlogdata.IVGtid): binlogdata.VGtid; /** - * Encodes the specified TableLastPK message. Does not implicitly {@link binlogdata.TableLastPK.verify|verify} messages. - * @param message TableLastPK message or plain object to encode + * Encodes the specified VGtid message. Does not implicitly {@link binlogdata.VGtid.verify|verify} messages. + * @param message VGtid message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: binlogdata.ITableLastPK, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: binlogdata.IVGtid, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified TableLastPK message, length delimited. Does not implicitly {@link binlogdata.TableLastPK.verify|verify} messages. - * @param message TableLastPK message or plain object to encode + * Encodes the specified VGtid message, length delimited. Does not implicitly {@link binlogdata.VGtid.verify|verify} messages. + * @param message VGtid message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: binlogdata.ITableLastPK, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: binlogdata.IVGtid, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a TableLastPK message from the specified reader or buffer. + * Decodes a VGtid message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns TableLastPK + * @returns VGtid * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.TableLastPK; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.VGtid; /** - * Decodes a TableLastPK message from the specified reader or buffer, length delimited. + * Decodes a VGtid message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns TableLastPK + * @returns VGtid * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.TableLastPK; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.VGtid; /** - * Verifies a TableLastPK message. + * Verifies a VGtid message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a TableLastPK message from a plain object. Also converts values to their respective internal types. + * Creates a VGtid message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns TableLastPK + * @returns VGtid */ - public static fromObject(object: { [k: string]: any }): binlogdata.TableLastPK; + public static fromObject(object: { [k: string]: any }): binlogdata.VGtid; /** - * Creates a plain object from a TableLastPK message. Also converts values to other types if specified. - * @param message TableLastPK + * Creates a plain object from a VGtid message. Also converts values to other types if specified. + * @param message VGtid * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: binlogdata.TableLastPK, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: binlogdata.VGtid, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this TableLastPK to JSON. + * Converts this VGtid to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for TableLastPK + * Gets the default type url for VGtid * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a VStreamResultsRequest. */ - interface IVStreamResultsRequest { - - /** VStreamResultsRequest effective_caller_id */ - effective_caller_id?: (vtrpc.ICallerID|null); - - /** VStreamResultsRequest immediate_caller_id */ - immediate_caller_id?: (query.IVTGateCallerID|null); + /** Properties of a KeyspaceShard. */ + interface IKeyspaceShard { - /** VStreamResultsRequest target */ - target?: (query.ITarget|null); + /** KeyspaceShard keyspace */ + keyspace?: (string|null); - /** VStreamResultsRequest query */ - query?: (string|null); + /** KeyspaceShard shard */ + shard?: (string|null); } - /** Represents a VStreamResultsRequest. */ - class VStreamResultsRequest implements IVStreamResultsRequest { + /** Represents a KeyspaceShard. */ + class KeyspaceShard implements IKeyspaceShard { /** - * Constructs a new VStreamResultsRequest. + * Constructs a new KeyspaceShard. * @param [properties] Properties to set */ - constructor(properties?: binlogdata.IVStreamResultsRequest); - - /** VStreamResultsRequest effective_caller_id. */ - public effective_caller_id?: (vtrpc.ICallerID|null); + constructor(properties?: binlogdata.IKeyspaceShard); - /** VStreamResultsRequest immediate_caller_id. */ - public immediate_caller_id?: (query.IVTGateCallerID|null); - - /** VStreamResultsRequest target. */ - public target?: (query.ITarget|null); + /** KeyspaceShard keyspace. */ + public keyspace: string; - /** VStreamResultsRequest query. */ - public query: string; + /** KeyspaceShard shard. */ + public shard: string; /** - * Creates a new VStreamResultsRequest instance using the specified properties. + * Creates a new KeyspaceShard instance using the specified properties. * @param [properties] Properties to set - * @returns VStreamResultsRequest instance + * @returns KeyspaceShard instance */ - public static create(properties?: binlogdata.IVStreamResultsRequest): binlogdata.VStreamResultsRequest; + public static create(properties?: binlogdata.IKeyspaceShard): binlogdata.KeyspaceShard; /** - * Encodes the specified VStreamResultsRequest message. Does not implicitly {@link binlogdata.VStreamResultsRequest.verify|verify} messages. - * @param message VStreamResultsRequest message or plain object to encode + * Encodes the specified KeyspaceShard message. Does not implicitly {@link binlogdata.KeyspaceShard.verify|verify} messages. + * @param message KeyspaceShard message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: binlogdata.IVStreamResultsRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: binlogdata.IKeyspaceShard, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified VStreamResultsRequest message, length delimited. Does not implicitly {@link binlogdata.VStreamResultsRequest.verify|verify} messages. - * @param message VStreamResultsRequest message or plain object to encode + * Encodes the specified KeyspaceShard message, length delimited. Does not implicitly {@link binlogdata.KeyspaceShard.verify|verify} messages. + * @param message KeyspaceShard message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: binlogdata.IVStreamResultsRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: binlogdata.IKeyspaceShard, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a VStreamResultsRequest message from the specified reader or buffer. + * Decodes a KeyspaceShard message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns VStreamResultsRequest + * @returns KeyspaceShard * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.VStreamResultsRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.KeyspaceShard; /** - * Decodes a VStreamResultsRequest message from the specified reader or buffer, length delimited. + * Decodes a KeyspaceShard message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns VStreamResultsRequest + * @returns KeyspaceShard * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.VStreamResultsRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.KeyspaceShard; /** - * Verifies a VStreamResultsRequest message. + * Verifies a KeyspaceShard message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a VStreamResultsRequest message from a plain object. Also converts values to their respective internal types. + * Creates a KeyspaceShard message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns VStreamResultsRequest + * @returns KeyspaceShard */ - public static fromObject(object: { [k: string]: any }): binlogdata.VStreamResultsRequest; + public static fromObject(object: { [k: string]: any }): binlogdata.KeyspaceShard; /** - * Creates a plain object from a VStreamResultsRequest message. Also converts values to other types if specified. - * @param message VStreamResultsRequest + * Creates a plain object from a KeyspaceShard message. Also converts values to other types if specified. + * @param message KeyspaceShard * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: binlogdata.VStreamResultsRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: binlogdata.KeyspaceShard, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this VStreamResultsRequest to JSON. + * Converts this KeyspaceShard to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for VStreamResultsRequest + * Gets the default type url for KeyspaceShard * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a VStreamResultsResponse. */ - interface IVStreamResultsResponse { + /** MigrationType enum. */ + enum MigrationType { + TABLES = 0, + SHARDS = 1 + } - /** VStreamResultsResponse fields */ - fields?: (query.IField[]|null); + /** Properties of a Journal. */ + interface IJournal { - /** VStreamResultsResponse gtid */ - gtid?: (string|null); + /** Journal id */ + id?: (number|Long|null); - /** VStreamResultsResponse rows */ - rows?: (query.IRow[]|null); + /** Journal migration_type */ + migration_type?: (binlogdata.MigrationType|null); + + /** Journal tables */ + tables?: (string[]|null); + + /** Journal local_position */ + local_position?: (string|null); + + /** Journal shard_gtids */ + shard_gtids?: (binlogdata.IShardGtid[]|null); + + /** Journal participants */ + participants?: (binlogdata.IKeyspaceShard[]|null); + + /** Journal source_workflows */ + source_workflows?: (string[]|null); } - /** Represents a VStreamResultsResponse. */ - class VStreamResultsResponse implements IVStreamResultsResponse { + /** Represents a Journal. */ + class Journal implements IJournal { /** - * Constructs a new VStreamResultsResponse. + * Constructs a new Journal. * @param [properties] Properties to set */ - constructor(properties?: binlogdata.IVStreamResultsResponse); + constructor(properties?: binlogdata.IJournal); - /** VStreamResultsResponse fields. */ - public fields: query.IField[]; + /** Journal id. */ + public id: (number|Long); - /** VStreamResultsResponse gtid. */ - public gtid: string; + /** Journal migration_type. */ + public migration_type: binlogdata.MigrationType; - /** VStreamResultsResponse rows. */ - public rows: query.IRow[]; + /** Journal tables. */ + public tables: string[]; + + /** Journal local_position. */ + public local_position: string; + + /** Journal shard_gtids. */ + public shard_gtids: binlogdata.IShardGtid[]; + + /** Journal participants. */ + public participants: binlogdata.IKeyspaceShard[]; + + /** Journal source_workflows. */ + public source_workflows: string[]; /** - * Creates a new VStreamResultsResponse instance using the specified properties. + * Creates a new Journal instance using the specified properties. * @param [properties] Properties to set - * @returns VStreamResultsResponse instance + * @returns Journal instance */ - public static create(properties?: binlogdata.IVStreamResultsResponse): binlogdata.VStreamResultsResponse; + public static create(properties?: binlogdata.IJournal): binlogdata.Journal; /** - * Encodes the specified VStreamResultsResponse message. Does not implicitly {@link binlogdata.VStreamResultsResponse.verify|verify} messages. - * @param message VStreamResultsResponse message or plain object to encode + * Encodes the specified Journal message. Does not implicitly {@link binlogdata.Journal.verify|verify} messages. + * @param message Journal message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: binlogdata.IVStreamResultsResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: binlogdata.IJournal, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified VStreamResultsResponse message, length delimited. Does not implicitly {@link binlogdata.VStreamResultsResponse.verify|verify} messages. - * @param message VStreamResultsResponse message or plain object to encode + * Encodes the specified Journal message, length delimited. Does not implicitly {@link binlogdata.Journal.verify|verify} messages. + * @param message Journal message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: binlogdata.IVStreamResultsResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: binlogdata.IJournal, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a VStreamResultsResponse message from the specified reader or buffer. + * Decodes a Journal message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns VStreamResultsResponse + * @returns Journal * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.VStreamResultsResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.Journal; /** - * Decodes a VStreamResultsResponse message from the specified reader or buffer, length delimited. + * Decodes a Journal message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns VStreamResultsResponse + * @returns Journal * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.VStreamResultsResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.Journal; /** - * Verifies a VStreamResultsResponse message. + * Verifies a Journal message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a VStreamResultsResponse message from a plain object. Also converts values to their respective internal types. + * Creates a Journal message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns VStreamResultsResponse + * @returns Journal */ - public static fromObject(object: { [k: string]: any }): binlogdata.VStreamResultsResponse; + public static fromObject(object: { [k: string]: any }): binlogdata.Journal; /** - * Creates a plain object from a VStreamResultsResponse message. Also converts values to other types if specified. - * @param message VStreamResultsResponse + * Creates a plain object from a Journal message. Also converts values to other types if specified. + * @param message Journal * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: binlogdata.VStreamResultsResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: binlogdata.Journal, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this VStreamResultsResponse to JSON. + * Converts this Journal to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for VStreamResultsResponse + * Gets the default type url for Journal * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } -} -/** Namespace vtrpc. */ -export namespace vtrpc { + /** Properties of a VEvent. */ + interface IVEvent { - /** Properties of a CallerID. */ - interface ICallerID { + /** VEvent type */ + type?: (binlogdata.VEventType|null); - /** CallerID principal */ - principal?: (string|null); + /** VEvent timestamp */ + timestamp?: (number|Long|null); - /** CallerID component */ - component?: (string|null); + /** VEvent gtid */ + gtid?: (string|null); - /** CallerID subcomponent */ - subcomponent?: (string|null); + /** VEvent statement */ + statement?: (string|null); - /** CallerID groups */ - groups?: (string[]|null); + /** VEvent row_event */ + row_event?: (binlogdata.IRowEvent|null); + + /** VEvent field_event */ + field_event?: (binlogdata.IFieldEvent|null); + + /** VEvent vgtid */ + vgtid?: (binlogdata.IVGtid|null); + + /** VEvent journal */ + journal?: (binlogdata.IJournal|null); + + /** VEvent dml */ + dml?: (string|null); + + /** VEvent current_time */ + current_time?: (number|Long|null); + + /** VEvent last_p_k_event */ + last_p_k_event?: (binlogdata.ILastPKEvent|null); + + /** VEvent keyspace */ + keyspace?: (string|null); + + /** VEvent shard */ + shard?: (string|null); + + /** VEvent throttled */ + throttled?: (boolean|null); } - /** Represents a CallerID. */ - class CallerID implements ICallerID { + /** Represents a VEvent. */ + class VEvent implements IVEvent { /** - * Constructs a new CallerID. + * Constructs a new VEvent. * @param [properties] Properties to set */ - constructor(properties?: vtrpc.ICallerID); + constructor(properties?: binlogdata.IVEvent); - /** CallerID principal. */ - public principal: string; + /** VEvent type. */ + public type: binlogdata.VEventType; - /** CallerID component. */ - public component: string; + /** VEvent timestamp. */ + public timestamp: (number|Long); - /** CallerID subcomponent. */ - public subcomponent: string; + /** VEvent gtid. */ + public gtid: string; - /** CallerID groups. */ - public groups: string[]; + /** VEvent statement. */ + public statement: string; + + /** VEvent row_event. */ + public row_event?: (binlogdata.IRowEvent|null); + + /** VEvent field_event. */ + public field_event?: (binlogdata.IFieldEvent|null); + + /** VEvent vgtid. */ + public vgtid?: (binlogdata.IVGtid|null); + + /** VEvent journal. */ + public journal?: (binlogdata.IJournal|null); + + /** VEvent dml. */ + public dml: string; + + /** VEvent current_time. */ + public current_time: (number|Long); + + /** VEvent last_p_k_event. */ + public last_p_k_event?: (binlogdata.ILastPKEvent|null); + + /** VEvent keyspace. */ + public keyspace: string; + + /** VEvent shard. */ + public shard: string; + + /** VEvent throttled. */ + public throttled: boolean; /** - * Creates a new CallerID instance using the specified properties. + * Creates a new VEvent instance using the specified properties. * @param [properties] Properties to set - * @returns CallerID instance + * @returns VEvent instance */ - public static create(properties?: vtrpc.ICallerID): vtrpc.CallerID; + public static create(properties?: binlogdata.IVEvent): binlogdata.VEvent; /** - * Encodes the specified CallerID message. Does not implicitly {@link vtrpc.CallerID.verify|verify} messages. - * @param message CallerID message or plain object to encode + * Encodes the specified VEvent message. Does not implicitly {@link binlogdata.VEvent.verify|verify} messages. + * @param message VEvent message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtrpc.ICallerID, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: binlogdata.IVEvent, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified CallerID message, length delimited. Does not implicitly {@link vtrpc.CallerID.verify|verify} messages. - * @param message CallerID message or plain object to encode + * Encodes the specified VEvent message, length delimited. Does not implicitly {@link binlogdata.VEvent.verify|verify} messages. + * @param message VEvent message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtrpc.ICallerID, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: binlogdata.IVEvent, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a CallerID message from the specified reader or buffer. + * Decodes a VEvent message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns CallerID + * @returns VEvent * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtrpc.CallerID; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.VEvent; /** - * Decodes a CallerID message from the specified reader or buffer, length delimited. + * Decodes a VEvent message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns CallerID + * @returns VEvent * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtrpc.CallerID; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.VEvent; /** - * Verifies a CallerID message. + * Verifies a VEvent message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a CallerID message from a plain object. Also converts values to their respective internal types. + * Creates a VEvent message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns CallerID + * @returns VEvent */ - public static fromObject(object: { [k: string]: any }): vtrpc.CallerID; + public static fromObject(object: { [k: string]: any }): binlogdata.VEvent; /** - * Creates a plain object from a CallerID message. Also converts values to other types if specified. - * @param message CallerID + * Creates a plain object from a VEvent message. Also converts values to other types if specified. + * @param message VEvent * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtrpc.CallerID, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: binlogdata.VEvent, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this CallerID to JSON. + * Converts this VEvent to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for CallerID + * Gets the default type url for VEvent * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Code enum. */ - enum Code { - OK = 0, - CANCELED = 1, - UNKNOWN = 2, - INVALID_ARGUMENT = 3, - DEADLINE_EXCEEDED = 4, - NOT_FOUND = 5, - ALREADY_EXISTS = 6, - PERMISSION_DENIED = 7, - RESOURCE_EXHAUSTED = 8, - FAILED_PRECONDITION = 9, - ABORTED = 10, - OUT_OF_RANGE = 11, - UNIMPLEMENTED = 12, - INTERNAL = 13, - UNAVAILABLE = 14, - DATA_LOSS = 15, - UNAUTHENTICATED = 16, - CLUSTER_EVENT = 17, - READ_ONLY = 18 - } + /** Properties of a MinimalTable. */ + interface IMinimalTable { - /** Properties of a RPCError. */ - interface IRPCError { + /** MinimalTable name */ + name?: (string|null); - /** RPCError message */ - message?: (string|null); + /** MinimalTable fields */ + fields?: (query.IField[]|null); - /** RPCError code */ - code?: (vtrpc.Code|null); + /** MinimalTable p_k_columns */ + p_k_columns?: ((number|Long)[]|null); } - /** Represents a RPCError. */ - class RPCError implements IRPCError { + /** Represents a MinimalTable. */ + class MinimalTable implements IMinimalTable { /** - * Constructs a new RPCError. + * Constructs a new MinimalTable. * @param [properties] Properties to set */ - constructor(properties?: vtrpc.IRPCError); + constructor(properties?: binlogdata.IMinimalTable); - /** RPCError message. */ - public message: string; + /** MinimalTable name. */ + public name: string; - /** RPCError code. */ - public code: vtrpc.Code; + /** MinimalTable fields. */ + public fields: query.IField[]; + + /** MinimalTable p_k_columns. */ + public p_k_columns: (number|Long)[]; /** - * Creates a new RPCError instance using the specified properties. + * Creates a new MinimalTable instance using the specified properties. * @param [properties] Properties to set - * @returns RPCError instance + * @returns MinimalTable instance */ - public static create(properties?: vtrpc.IRPCError): vtrpc.RPCError; + public static create(properties?: binlogdata.IMinimalTable): binlogdata.MinimalTable; /** - * Encodes the specified RPCError message. Does not implicitly {@link vtrpc.RPCError.verify|verify} messages. - * @param message RPCError message or plain object to encode + * Encodes the specified MinimalTable message. Does not implicitly {@link binlogdata.MinimalTable.verify|verify} messages. + * @param message MinimalTable message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtrpc.IRPCError, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: binlogdata.IMinimalTable, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified RPCError message, length delimited. Does not implicitly {@link vtrpc.RPCError.verify|verify} messages. - * @param message RPCError message or plain object to encode + * Encodes the specified MinimalTable message, length delimited. Does not implicitly {@link binlogdata.MinimalTable.verify|verify} messages. + * @param message MinimalTable message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtrpc.IRPCError, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: binlogdata.IMinimalTable, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a RPCError message from the specified reader or buffer. + * Decodes a MinimalTable message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns RPCError + * @returns MinimalTable * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtrpc.RPCError; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.MinimalTable; /** - * Decodes a RPCError message from the specified reader or buffer, length delimited. + * Decodes a MinimalTable message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns RPCError + * @returns MinimalTable * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtrpc.RPCError; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.MinimalTable; /** - * Verifies a RPCError message. + * Verifies a MinimalTable message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a RPCError message from a plain object. Also converts values to their respective internal types. + * Creates a MinimalTable message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns RPCError + * @returns MinimalTable */ - public static fromObject(object: { [k: string]: any }): vtrpc.RPCError; + public static fromObject(object: { [k: string]: any }): binlogdata.MinimalTable; /** - * Creates a plain object from a RPCError message. Also converts values to other types if specified. - * @param message RPCError + * Creates a plain object from a MinimalTable message. Also converts values to other types if specified. + * @param message MinimalTable * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtrpc.RPCError, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: binlogdata.MinimalTable, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this RPCError to JSON. + * Converts this MinimalTable to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for RPCError + * Gets the default type url for MinimalTable * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } -} - -/** Namespace query. */ -export namespace query { - - /** Properties of a Target. */ - interface ITarget { - - /** Target keyspace */ - keyspace?: (string|null); - /** Target shard */ - shard?: (string|null); - - /** Target tablet_type */ - tablet_type?: (topodata.TabletType|null); + /** Properties of a MinimalSchema. */ + interface IMinimalSchema { - /** Target cell */ - cell?: (string|null); + /** MinimalSchema tables */ + tables?: (binlogdata.IMinimalTable[]|null); } - /** Represents a Target. */ - class Target implements ITarget { + /** Represents a MinimalSchema. */ + class MinimalSchema implements IMinimalSchema { /** - * Constructs a new Target. + * Constructs a new MinimalSchema. * @param [properties] Properties to set */ - constructor(properties?: query.ITarget); - - /** Target keyspace. */ - public keyspace: string; - - /** Target shard. */ - public shard: string; - - /** Target tablet_type. */ - public tablet_type: topodata.TabletType; + constructor(properties?: binlogdata.IMinimalSchema); - /** Target cell. */ - public cell: string; + /** MinimalSchema tables. */ + public tables: binlogdata.IMinimalTable[]; /** - * Creates a new Target instance using the specified properties. + * Creates a new MinimalSchema instance using the specified properties. * @param [properties] Properties to set - * @returns Target instance + * @returns MinimalSchema instance */ - public static create(properties?: query.ITarget): query.Target; + public static create(properties?: binlogdata.IMinimalSchema): binlogdata.MinimalSchema; /** - * Encodes the specified Target message. Does not implicitly {@link query.Target.verify|verify} messages. - * @param message Target message or plain object to encode + * Encodes the specified MinimalSchema message. Does not implicitly {@link binlogdata.MinimalSchema.verify|verify} messages. + * @param message MinimalSchema message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.ITarget, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: binlogdata.IMinimalSchema, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified Target message, length delimited. Does not implicitly {@link query.Target.verify|verify} messages. - * @param message Target message or plain object to encode + * Encodes the specified MinimalSchema message, length delimited. Does not implicitly {@link binlogdata.MinimalSchema.verify|verify} messages. + * @param message MinimalSchema message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.ITarget, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: binlogdata.IMinimalSchema, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a Target message from the specified reader or buffer. + * Decodes a MinimalSchema message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns Target + * @returns MinimalSchema * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.Target; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.MinimalSchema; /** - * Decodes a Target message from the specified reader or buffer, length delimited. + * Decodes a MinimalSchema message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns Target + * @returns MinimalSchema * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.Target; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.MinimalSchema; /** - * Verifies a Target message. + * Verifies a MinimalSchema message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a Target message from a plain object. Also converts values to their respective internal types. + * Creates a MinimalSchema message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns Target + * @returns MinimalSchema */ - public static fromObject(object: { [k: string]: any }): query.Target; + public static fromObject(object: { [k: string]: any }): binlogdata.MinimalSchema; /** - * Creates a plain object from a Target message. Also converts values to other types if specified. - * @param message Target + * Creates a plain object from a MinimalSchema message. Also converts values to other types if specified. + * @param message MinimalSchema * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.Target, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: binlogdata.MinimalSchema, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this Target to JSON. + * Converts this MinimalSchema to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for Target + * Gets the default type url for MinimalSchema * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a VTGateCallerID. */ - interface IVTGateCallerID { + /** Properties of a VStreamRequest. */ + interface IVStreamRequest { - /** VTGateCallerID username */ - username?: (string|null); + /** VStreamRequest effective_caller_id */ + effective_caller_id?: (vtrpc.ICallerID|null); - /** VTGateCallerID groups */ - groups?: (string[]|null); + /** VStreamRequest immediate_caller_id */ + immediate_caller_id?: (query.IVTGateCallerID|null); + + /** VStreamRequest target */ + target?: (query.ITarget|null); + + /** VStreamRequest position */ + position?: (string|null); + + /** VStreamRequest filter */ + filter?: (binlogdata.IFilter|null); + + /** VStreamRequest table_last_p_ks */ + table_last_p_ks?: (binlogdata.ITableLastPK[]|null); } - /** Represents a VTGateCallerID. */ - class VTGateCallerID implements IVTGateCallerID { + /** Represents a VStreamRequest. */ + class VStreamRequest implements IVStreamRequest { /** - * Constructs a new VTGateCallerID. + * Constructs a new VStreamRequest. * @param [properties] Properties to set */ - constructor(properties?: query.IVTGateCallerID); + constructor(properties?: binlogdata.IVStreamRequest); - /** VTGateCallerID username. */ - public username: string; + /** VStreamRequest effective_caller_id. */ + public effective_caller_id?: (vtrpc.ICallerID|null); - /** VTGateCallerID groups. */ - public groups: string[]; + /** VStreamRequest immediate_caller_id. */ + public immediate_caller_id?: (query.IVTGateCallerID|null); + + /** VStreamRequest target. */ + public target?: (query.ITarget|null); + + /** VStreamRequest position. */ + public position: string; + + /** VStreamRequest filter. */ + public filter?: (binlogdata.IFilter|null); + + /** VStreamRequest table_last_p_ks. */ + public table_last_p_ks: binlogdata.ITableLastPK[]; /** - * Creates a new VTGateCallerID instance using the specified properties. + * Creates a new VStreamRequest instance using the specified properties. * @param [properties] Properties to set - * @returns VTGateCallerID instance + * @returns VStreamRequest instance */ - public static create(properties?: query.IVTGateCallerID): query.VTGateCallerID; + public static create(properties?: binlogdata.IVStreamRequest): binlogdata.VStreamRequest; /** - * Encodes the specified VTGateCallerID message. Does not implicitly {@link query.VTGateCallerID.verify|verify} messages. - * @param message VTGateCallerID message or plain object to encode + * Encodes the specified VStreamRequest message. Does not implicitly {@link binlogdata.VStreamRequest.verify|verify} messages. + * @param message VStreamRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IVTGateCallerID, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: binlogdata.IVStreamRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified VTGateCallerID message, length delimited. Does not implicitly {@link query.VTGateCallerID.verify|verify} messages. - * @param message VTGateCallerID message or plain object to encode + * Encodes the specified VStreamRequest message, length delimited. Does not implicitly {@link binlogdata.VStreamRequest.verify|verify} messages. + * @param message VStreamRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IVTGateCallerID, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: binlogdata.IVStreamRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a VTGateCallerID message from the specified reader or buffer. + * Decodes a VStreamRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns VTGateCallerID + * @returns VStreamRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.VTGateCallerID; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.VStreamRequest; /** - * Decodes a VTGateCallerID message from the specified reader or buffer, length delimited. + * Decodes a VStreamRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns VTGateCallerID + * @returns VStreamRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.VTGateCallerID; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.VStreamRequest; /** - * Verifies a VTGateCallerID message. + * Verifies a VStreamRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a VTGateCallerID message from a plain object. Also converts values to their respective internal types. + * Creates a VStreamRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns VTGateCallerID + * @returns VStreamRequest */ - public static fromObject(object: { [k: string]: any }): query.VTGateCallerID; + public static fromObject(object: { [k: string]: any }): binlogdata.VStreamRequest; /** - * Creates a plain object from a VTGateCallerID message. Also converts values to other types if specified. - * @param message VTGateCallerID + * Creates a plain object from a VStreamRequest message. Also converts values to other types if specified. + * @param message VStreamRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.VTGateCallerID, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: binlogdata.VStreamRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this VTGateCallerID to JSON. + * Converts this VStreamRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for VTGateCallerID + * Gets the default type url for VStreamRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of an EventToken. */ - interface IEventToken { - - /** EventToken timestamp */ - timestamp?: (number|Long|null); - - /** EventToken shard */ - shard?: (string|null); + /** Properties of a VStreamResponse. */ + interface IVStreamResponse { - /** EventToken position */ - position?: (string|null); + /** VStreamResponse events */ + events?: (binlogdata.IVEvent[]|null); } - /** Represents an EventToken. */ - class EventToken implements IEventToken { + /** Represents a VStreamResponse. */ + class VStreamResponse implements IVStreamResponse { /** - * Constructs a new EventToken. + * Constructs a new VStreamResponse. * @param [properties] Properties to set */ - constructor(properties?: query.IEventToken); - - /** EventToken timestamp. */ - public timestamp: (number|Long); - - /** EventToken shard. */ - public shard: string; + constructor(properties?: binlogdata.IVStreamResponse); - /** EventToken position. */ - public position: string; + /** VStreamResponse events. */ + public events: binlogdata.IVEvent[]; /** - * Creates a new EventToken instance using the specified properties. + * Creates a new VStreamResponse instance using the specified properties. * @param [properties] Properties to set - * @returns EventToken instance + * @returns VStreamResponse instance */ - public static create(properties?: query.IEventToken): query.EventToken; + public static create(properties?: binlogdata.IVStreamResponse): binlogdata.VStreamResponse; /** - * Encodes the specified EventToken message. Does not implicitly {@link query.EventToken.verify|verify} messages. - * @param message EventToken message or plain object to encode + * Encodes the specified VStreamResponse message. Does not implicitly {@link binlogdata.VStreamResponse.verify|verify} messages. + * @param message VStreamResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IEventToken, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: binlogdata.IVStreamResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified EventToken message, length delimited. Does not implicitly {@link query.EventToken.verify|verify} messages. - * @param message EventToken message or plain object to encode + * Encodes the specified VStreamResponse message, length delimited. Does not implicitly {@link binlogdata.VStreamResponse.verify|verify} messages. + * @param message VStreamResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IEventToken, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: binlogdata.IVStreamResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an EventToken message from the specified reader or buffer. + * Decodes a VStreamResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns EventToken + * @returns VStreamResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.EventToken; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.VStreamResponse; /** - * Decodes an EventToken message from the specified reader or buffer, length delimited. + * Decodes a VStreamResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns EventToken + * @returns VStreamResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.EventToken; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.VStreamResponse; /** - * Verifies an EventToken message. + * Verifies a VStreamResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an EventToken message from a plain object. Also converts values to their respective internal types. + * Creates a VStreamResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns EventToken + * @returns VStreamResponse */ - public static fromObject(object: { [k: string]: any }): query.EventToken; + public static fromObject(object: { [k: string]: any }): binlogdata.VStreamResponse; /** - * Creates a plain object from an EventToken message. Also converts values to other types if specified. - * @param message EventToken + * Creates a plain object from a VStreamResponse message. Also converts values to other types if specified. + * @param message VStreamResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.EventToken, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: binlogdata.VStreamResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this EventToken to JSON. + * Converts this VStreamResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for EventToken + * Gets the default type url for VStreamResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** MySqlFlag enum. */ - enum MySqlFlag { - EMPTY = 0, - NOT_NULL_FLAG = 1, - PRI_KEY_FLAG = 2, - UNIQUE_KEY_FLAG = 4, - MULTIPLE_KEY_FLAG = 8, - BLOB_FLAG = 16, - UNSIGNED_FLAG = 32, - ZEROFILL_FLAG = 64, - BINARY_FLAG = 128, - ENUM_FLAG = 256, - AUTO_INCREMENT_FLAG = 512, - TIMESTAMP_FLAG = 1024, - SET_FLAG = 2048, - NO_DEFAULT_VALUE_FLAG = 4096, - ON_UPDATE_NOW_FLAG = 8192, - NUM_FLAG = 32768, - PART_KEY_FLAG = 16384, - GROUP_FLAG = 32768, - UNIQUE_FLAG = 65536, - BINCMP_FLAG = 131072 - } + /** Properties of a VStreamRowsRequest. */ + interface IVStreamRowsRequest { - /** Flag enum. */ - enum Flag { - NONE = 0, - ISINTEGRAL = 256, - ISUNSIGNED = 512, - ISFLOAT = 1024, - ISQUOTED = 2048, - ISTEXT = 4096, - ISBINARY = 8192 - } + /** VStreamRowsRequest effective_caller_id */ + effective_caller_id?: (vtrpc.ICallerID|null); - /** Type enum. */ - enum Type { - NULL_TYPE = 0, - INT8 = 257, - UINT8 = 770, - INT16 = 259, - UINT16 = 772, - INT24 = 261, - UINT24 = 774, - INT32 = 263, - UINT32 = 776, - INT64 = 265, - UINT64 = 778, - FLOAT32 = 1035, - FLOAT64 = 1036, - TIMESTAMP = 2061, - DATE = 2062, - TIME = 2063, - DATETIME = 2064, - YEAR = 785, - DECIMAL = 18, - TEXT = 6163, - BLOB = 10260, - VARCHAR = 6165, - VARBINARY = 10262, - CHAR = 6167, - BINARY = 10264, - BIT = 2073, - ENUM = 2074, - SET = 2075, - TUPLE = 28, - GEOMETRY = 2077, - JSON = 2078, - EXPRESSION = 31, - HEXNUM = 4128, - HEXVAL = 4129, - BITNUM = 4130 - } + /** VStreamRowsRequest immediate_caller_id */ + immediate_caller_id?: (query.IVTGateCallerID|null); - /** Properties of a Value. */ - interface IValue { + /** VStreamRowsRequest target */ + target?: (query.ITarget|null); - /** Value type */ - type?: (query.Type|null); + /** VStreamRowsRequest query */ + query?: (string|null); - /** Value value */ - value?: (Uint8Array|null); + /** VStreamRowsRequest lastpk */ + lastpk?: (query.IQueryResult|null); } - /** Represents a Value. */ - class Value implements IValue { + /** Represents a VStreamRowsRequest. */ + class VStreamRowsRequest implements IVStreamRowsRequest { /** - * Constructs a new Value. + * Constructs a new VStreamRowsRequest. * @param [properties] Properties to set */ - constructor(properties?: query.IValue); + constructor(properties?: binlogdata.IVStreamRowsRequest); - /** Value type. */ - public type: query.Type; + /** VStreamRowsRequest effective_caller_id. */ + public effective_caller_id?: (vtrpc.ICallerID|null); - /** Value value. */ - public value: Uint8Array; + /** VStreamRowsRequest immediate_caller_id. */ + public immediate_caller_id?: (query.IVTGateCallerID|null); + + /** VStreamRowsRequest target. */ + public target?: (query.ITarget|null); + + /** VStreamRowsRequest query. */ + public query: string; + + /** VStreamRowsRequest lastpk. */ + public lastpk?: (query.IQueryResult|null); /** - * Creates a new Value instance using the specified properties. + * Creates a new VStreamRowsRequest instance using the specified properties. * @param [properties] Properties to set - * @returns Value instance + * @returns VStreamRowsRequest instance */ - public static create(properties?: query.IValue): query.Value; + public static create(properties?: binlogdata.IVStreamRowsRequest): binlogdata.VStreamRowsRequest; /** - * Encodes the specified Value message. Does not implicitly {@link query.Value.verify|verify} messages. - * @param message Value message or plain object to encode + * Encodes the specified VStreamRowsRequest message. Does not implicitly {@link binlogdata.VStreamRowsRequest.verify|verify} messages. + * @param message VStreamRowsRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IValue, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: binlogdata.IVStreamRowsRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified Value message, length delimited. Does not implicitly {@link query.Value.verify|verify} messages. - * @param message Value message or plain object to encode + * Encodes the specified VStreamRowsRequest message, length delimited. Does not implicitly {@link binlogdata.VStreamRowsRequest.verify|verify} messages. + * @param message VStreamRowsRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IValue, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: binlogdata.IVStreamRowsRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a Value message from the specified reader or buffer. + * Decodes a VStreamRowsRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns Value + * @returns VStreamRowsRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.Value; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.VStreamRowsRequest; /** - * Decodes a Value message from the specified reader or buffer, length delimited. + * Decodes a VStreamRowsRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns Value + * @returns VStreamRowsRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.Value; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.VStreamRowsRequest; /** - * Verifies a Value message. + * Verifies a VStreamRowsRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a Value message from a plain object. Also converts values to their respective internal types. + * Creates a VStreamRowsRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns Value + * @returns VStreamRowsRequest */ - public static fromObject(object: { [k: string]: any }): query.Value; + public static fromObject(object: { [k: string]: any }): binlogdata.VStreamRowsRequest; /** - * Creates a plain object from a Value message. Also converts values to other types if specified. - * @param message Value + * Creates a plain object from a VStreamRowsRequest message. Also converts values to other types if specified. + * @param message VStreamRowsRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.Value, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: binlogdata.VStreamRowsRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this Value to JSON. + * Converts this VStreamRowsRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for Value + * Gets the default type url for VStreamRowsRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a BindVariable. */ - interface IBindVariable { + /** Properties of a VStreamRowsResponse. */ + interface IVStreamRowsResponse { - /** BindVariable type */ - type?: (query.Type|null); + /** VStreamRowsResponse fields */ + fields?: (query.IField[]|null); - /** BindVariable value */ - value?: (Uint8Array|null); + /** VStreamRowsResponse pkfields */ + pkfields?: (query.IField[]|null); - /** BindVariable values */ - values?: (query.IValue[]|null); + /** VStreamRowsResponse gtid */ + gtid?: (string|null); + + /** VStreamRowsResponse rows */ + rows?: (query.IRow[]|null); + + /** VStreamRowsResponse lastpk */ + lastpk?: (query.IRow|null); + + /** VStreamRowsResponse throttled */ + throttled?: (boolean|null); + + /** VStreamRowsResponse heartbeat */ + heartbeat?: (boolean|null); } - /** Represents a BindVariable. */ - class BindVariable implements IBindVariable { + /** Represents a VStreamRowsResponse. */ + class VStreamRowsResponse implements IVStreamRowsResponse { /** - * Constructs a new BindVariable. + * Constructs a new VStreamRowsResponse. * @param [properties] Properties to set */ - constructor(properties?: query.IBindVariable); + constructor(properties?: binlogdata.IVStreamRowsResponse); - /** BindVariable type. */ - public type: query.Type; + /** VStreamRowsResponse fields. */ + public fields: query.IField[]; - /** BindVariable value. */ - public value: Uint8Array; + /** VStreamRowsResponse pkfields. */ + public pkfields: query.IField[]; - /** BindVariable values. */ - public values: query.IValue[]; + /** VStreamRowsResponse gtid. */ + public gtid: string; + + /** VStreamRowsResponse rows. */ + public rows: query.IRow[]; + + /** VStreamRowsResponse lastpk. */ + public lastpk?: (query.IRow|null); + + /** VStreamRowsResponse throttled. */ + public throttled: boolean; + + /** VStreamRowsResponse heartbeat. */ + public heartbeat: boolean; /** - * Creates a new BindVariable instance using the specified properties. + * Creates a new VStreamRowsResponse instance using the specified properties. * @param [properties] Properties to set - * @returns BindVariable instance + * @returns VStreamRowsResponse instance */ - public static create(properties?: query.IBindVariable): query.BindVariable; + public static create(properties?: binlogdata.IVStreamRowsResponse): binlogdata.VStreamRowsResponse; /** - * Encodes the specified BindVariable message. Does not implicitly {@link query.BindVariable.verify|verify} messages. - * @param message BindVariable message or plain object to encode + * Encodes the specified VStreamRowsResponse message. Does not implicitly {@link binlogdata.VStreamRowsResponse.verify|verify} messages. + * @param message VStreamRowsResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IBindVariable, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: binlogdata.IVStreamRowsResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified BindVariable message, length delimited. Does not implicitly {@link query.BindVariable.verify|verify} messages. - * @param message BindVariable message or plain object to encode + * Encodes the specified VStreamRowsResponse message, length delimited. Does not implicitly {@link binlogdata.VStreamRowsResponse.verify|verify} messages. + * @param message VStreamRowsResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IBindVariable, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: binlogdata.IVStreamRowsResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a BindVariable message from the specified reader or buffer. + * Decodes a VStreamRowsResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns BindVariable + * @returns VStreamRowsResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.BindVariable; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.VStreamRowsResponse; /** - * Decodes a BindVariable message from the specified reader or buffer, length delimited. + * Decodes a VStreamRowsResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns BindVariable + * @returns VStreamRowsResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.BindVariable; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.VStreamRowsResponse; /** - * Verifies a BindVariable message. + * Verifies a VStreamRowsResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a BindVariable message from a plain object. Also converts values to their respective internal types. + * Creates a VStreamRowsResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns BindVariable + * @returns VStreamRowsResponse */ - public static fromObject(object: { [k: string]: any }): query.BindVariable; + public static fromObject(object: { [k: string]: any }): binlogdata.VStreamRowsResponse; /** - * Creates a plain object from a BindVariable message. Also converts values to other types if specified. - * @param message BindVariable + * Creates a plain object from a VStreamRowsResponse message. Also converts values to other types if specified. + * @param message VStreamRowsResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.BindVariable, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: binlogdata.VStreamRowsResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this BindVariable to JSON. + * Converts this VStreamRowsResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for BindVariable + * Gets the default type url for VStreamRowsResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a BoundQuery. */ - interface IBoundQuery { + /** Properties of a VStreamTablesRequest. */ + interface IVStreamTablesRequest { - /** BoundQuery sql */ - sql?: (string|null); + /** VStreamTablesRequest effective_caller_id */ + effective_caller_id?: (vtrpc.ICallerID|null); - /** BoundQuery bind_variables */ - bind_variables?: ({ [k: string]: query.IBindVariable }|null); + /** VStreamTablesRequest immediate_caller_id */ + immediate_caller_id?: (query.IVTGateCallerID|null); + + /** VStreamTablesRequest target */ + target?: (query.ITarget|null); } - /** Represents a BoundQuery. */ - class BoundQuery implements IBoundQuery { + /** Represents a VStreamTablesRequest. */ + class VStreamTablesRequest implements IVStreamTablesRequest { /** - * Constructs a new BoundQuery. + * Constructs a new VStreamTablesRequest. * @param [properties] Properties to set */ - constructor(properties?: query.IBoundQuery); + constructor(properties?: binlogdata.IVStreamTablesRequest); - /** BoundQuery sql. */ - public sql: string; + /** VStreamTablesRequest effective_caller_id. */ + public effective_caller_id?: (vtrpc.ICallerID|null); - /** BoundQuery bind_variables. */ - public bind_variables: { [k: string]: query.IBindVariable }; + /** VStreamTablesRequest immediate_caller_id. */ + public immediate_caller_id?: (query.IVTGateCallerID|null); + + /** VStreamTablesRequest target. */ + public target?: (query.ITarget|null); /** - * Creates a new BoundQuery instance using the specified properties. + * Creates a new VStreamTablesRequest instance using the specified properties. * @param [properties] Properties to set - * @returns BoundQuery instance + * @returns VStreamTablesRequest instance */ - public static create(properties?: query.IBoundQuery): query.BoundQuery; + public static create(properties?: binlogdata.IVStreamTablesRequest): binlogdata.VStreamTablesRequest; /** - * Encodes the specified BoundQuery message. Does not implicitly {@link query.BoundQuery.verify|verify} messages. - * @param message BoundQuery message or plain object to encode + * Encodes the specified VStreamTablesRequest message. Does not implicitly {@link binlogdata.VStreamTablesRequest.verify|verify} messages. + * @param message VStreamTablesRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IBoundQuery, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: binlogdata.IVStreamTablesRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified BoundQuery message, length delimited. Does not implicitly {@link query.BoundQuery.verify|verify} messages. - * @param message BoundQuery message or plain object to encode + * Encodes the specified VStreamTablesRequest message, length delimited. Does not implicitly {@link binlogdata.VStreamTablesRequest.verify|verify} messages. + * @param message VStreamTablesRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IBoundQuery, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: binlogdata.IVStreamTablesRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a BoundQuery message from the specified reader or buffer. + * Decodes a VStreamTablesRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns BoundQuery + * @returns VStreamTablesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.BoundQuery; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.VStreamTablesRequest; /** - * Decodes a BoundQuery message from the specified reader or buffer, length delimited. + * Decodes a VStreamTablesRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns BoundQuery + * @returns VStreamTablesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.BoundQuery; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.VStreamTablesRequest; /** - * Verifies a BoundQuery message. + * Verifies a VStreamTablesRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a BoundQuery message from a plain object. Also converts values to their respective internal types. + * Creates a VStreamTablesRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns BoundQuery + * @returns VStreamTablesRequest */ - public static fromObject(object: { [k: string]: any }): query.BoundQuery; + public static fromObject(object: { [k: string]: any }): binlogdata.VStreamTablesRequest; /** - * Creates a plain object from a BoundQuery message. Also converts values to other types if specified. - * @param message BoundQuery + * Creates a plain object from a VStreamTablesRequest message. Also converts values to other types if specified. + * @param message VStreamTablesRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.BoundQuery, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: binlogdata.VStreamTablesRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this BoundQuery to JSON. + * Converts this VStreamTablesRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for BoundQuery + * Gets the default type url for VStreamTablesRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of an ExecuteOptions. */ - interface IExecuteOptions { + /** Properties of a VStreamTablesResponse. */ + interface IVStreamTablesResponse { - /** ExecuteOptions included_fields */ - included_fields?: (query.ExecuteOptions.IncludedFields|null); + /** VStreamTablesResponse table_name */ + table_name?: (string|null); - /** ExecuteOptions client_found_rows */ - client_found_rows?: (boolean|null); + /** VStreamTablesResponse fields */ + fields?: (query.IField[]|null); - /** ExecuteOptions workload */ - workload?: (query.ExecuteOptions.Workload|null); - - /** ExecuteOptions sql_select_limit */ - sql_select_limit?: (number|Long|null); - - /** ExecuteOptions transaction_isolation */ - transaction_isolation?: (query.ExecuteOptions.TransactionIsolation|null); - - /** ExecuteOptions skip_query_plan_cache */ - skip_query_plan_cache?: (boolean|null); - - /** ExecuteOptions planner_version */ - planner_version?: (query.ExecuteOptions.PlannerVersion|null); - - /** ExecuteOptions has_created_temp_tables */ - has_created_temp_tables?: (boolean|null); - - /** ExecuteOptions consolidator */ - consolidator?: (query.ExecuteOptions.Consolidator|null); - - /** ExecuteOptions transaction_access_mode */ - transaction_access_mode?: (query.ExecuteOptions.TransactionAccessMode[]|null); + /** VStreamTablesResponse pkfields */ + pkfields?: (query.IField[]|null); - /** ExecuteOptions WorkloadName */ - WorkloadName?: (string|null); + /** VStreamTablesResponse gtid */ + gtid?: (string|null); - /** ExecuteOptions priority */ - priority?: (string|null); + /** VStreamTablesResponse rows */ + rows?: (query.IRow[]|null); - /** ExecuteOptions uag_info */ - uag_info?: (string|null); + /** VStreamTablesResponse lastpk */ + lastpk?: (query.IRow|null); } - /** Represents an ExecuteOptions. */ - class ExecuteOptions implements IExecuteOptions { + /** Represents a VStreamTablesResponse. */ + class VStreamTablesResponse implements IVStreamTablesResponse { /** - * Constructs a new ExecuteOptions. + * Constructs a new VStreamTablesResponse. * @param [properties] Properties to set */ - constructor(properties?: query.IExecuteOptions); - - /** ExecuteOptions included_fields. */ - public included_fields: query.ExecuteOptions.IncludedFields; - - /** ExecuteOptions client_found_rows. */ - public client_found_rows: boolean; - - /** ExecuteOptions workload. */ - public workload: query.ExecuteOptions.Workload; - - /** ExecuteOptions sql_select_limit. */ - public sql_select_limit: (number|Long); - - /** ExecuteOptions transaction_isolation. */ - public transaction_isolation: query.ExecuteOptions.TransactionIsolation; - - /** ExecuteOptions skip_query_plan_cache. */ - public skip_query_plan_cache: boolean; - - /** ExecuteOptions planner_version. */ - public planner_version: query.ExecuteOptions.PlannerVersion; + constructor(properties?: binlogdata.IVStreamTablesResponse); - /** ExecuteOptions has_created_temp_tables. */ - public has_created_temp_tables: boolean; + /** VStreamTablesResponse table_name. */ + public table_name: string; - /** ExecuteOptions consolidator. */ - public consolidator: query.ExecuteOptions.Consolidator; + /** VStreamTablesResponse fields. */ + public fields: query.IField[]; - /** ExecuteOptions transaction_access_mode. */ - public transaction_access_mode: query.ExecuteOptions.TransactionAccessMode[]; + /** VStreamTablesResponse pkfields. */ + public pkfields: query.IField[]; - /** ExecuteOptions WorkloadName. */ - public WorkloadName: string; + /** VStreamTablesResponse gtid. */ + public gtid: string; - /** ExecuteOptions priority. */ - public priority: string; + /** VStreamTablesResponse rows. */ + public rows: query.IRow[]; - /** ExecuteOptions uag_info. */ - public uag_info: string; + /** VStreamTablesResponse lastpk. */ + public lastpk?: (query.IRow|null); /** - * Creates a new ExecuteOptions instance using the specified properties. + * Creates a new VStreamTablesResponse instance using the specified properties. * @param [properties] Properties to set - * @returns ExecuteOptions instance + * @returns VStreamTablesResponse instance */ - public static create(properties?: query.IExecuteOptions): query.ExecuteOptions; + public static create(properties?: binlogdata.IVStreamTablesResponse): binlogdata.VStreamTablesResponse; /** - * Encodes the specified ExecuteOptions message. Does not implicitly {@link query.ExecuteOptions.verify|verify} messages. - * @param message ExecuteOptions message or plain object to encode + * Encodes the specified VStreamTablesResponse message. Does not implicitly {@link binlogdata.VStreamTablesResponse.verify|verify} messages. + * @param message VStreamTablesResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IExecuteOptions, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: binlogdata.IVStreamTablesResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ExecuteOptions message, length delimited. Does not implicitly {@link query.ExecuteOptions.verify|verify} messages. - * @param message ExecuteOptions message or plain object to encode + * Encodes the specified VStreamTablesResponse message, length delimited. Does not implicitly {@link binlogdata.VStreamTablesResponse.verify|verify} messages. + * @param message VStreamTablesResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IExecuteOptions, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: binlogdata.IVStreamTablesResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an ExecuteOptions message from the specified reader or buffer. + * Decodes a VStreamTablesResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ExecuteOptions + * @returns VStreamTablesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.ExecuteOptions; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.VStreamTablesResponse; /** - * Decodes an ExecuteOptions message from the specified reader or buffer, length delimited. + * Decodes a VStreamTablesResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ExecuteOptions + * @returns VStreamTablesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.ExecuteOptions; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.VStreamTablesResponse; /** - * Verifies an ExecuteOptions message. + * Verifies a VStreamTablesResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an ExecuteOptions message from a plain object. Also converts values to their respective internal types. + * Creates a VStreamTablesResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ExecuteOptions + * @returns VStreamTablesResponse */ - public static fromObject(object: { [k: string]: any }): query.ExecuteOptions; + public static fromObject(object: { [k: string]: any }): binlogdata.VStreamTablesResponse; /** - * Creates a plain object from an ExecuteOptions message. Also converts values to other types if specified. - * @param message ExecuteOptions + * Creates a plain object from a VStreamTablesResponse message. Also converts values to other types if specified. + * @param message VStreamTablesResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.ExecuteOptions, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: binlogdata.VStreamTablesResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ExecuteOptions to JSON. + * Converts this VStreamTablesResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ExecuteOptions + * Gets the default type url for VStreamTablesResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - namespace ExecuteOptions { - - /** IncludedFields enum. */ - enum IncludedFields { - TYPE_AND_NAME = 0, - TYPE_ONLY = 1, - ALL = 2 - } - - /** Workload enum. */ - enum Workload { - UNSPECIFIED = 0, - OLTP = 1, - OLAP = 2, - DBA = 3 - } - - /** TransactionIsolation enum. */ - enum TransactionIsolation { - DEFAULT = 0, - REPEATABLE_READ = 1, - READ_COMMITTED = 2, - READ_UNCOMMITTED = 3, - SERIALIZABLE = 4, - CONSISTENT_SNAPSHOT_READ_ONLY = 5, - AUTOCOMMIT = 6 - } - - /** PlannerVersion enum. */ - enum PlannerVersion { - DEFAULT_PLANNER = 0, - V3 = 1, - Gen4 = 2, - Gen4Greedy = 3, - Gen4Left2Right = 4, - Gen4WithFallback = 5, - Gen4CompareV3 = 6, - V3Insert = 7 - } - - /** Consolidator enum. */ - enum Consolidator { - CONSOLIDATOR_UNSPECIFIED = 0, - CONSOLIDATOR_DISABLED = 1, - CONSOLIDATOR_ENABLED = 2, - CONSOLIDATOR_ENABLED_REPLICAS = 3 - } - - /** TransactionAccessMode enum. */ - enum TransactionAccessMode { - CONSISTENT_SNAPSHOT = 0, - READ_WRITE = 1, - READ_ONLY = 2 - } - } - - /** Properties of a Field. */ - interface IField { - - /** Field name */ - name?: (string|null); - - /** Field type */ - type?: (query.Type|null); - - /** Field table */ - table?: (string|null); - - /** Field org_table */ - org_table?: (string|null); - - /** Field database */ - database?: (string|null); - - /** Field org_name */ - org_name?: (string|null); - - /** Field column_length */ - column_length?: (number|null); - - /** Field charset */ - charset?: (number|null); - - /** Field decimals */ - decimals?: (number|null); + /** Properties of a LastPKEvent. */ + interface ILastPKEvent { - /** Field flags */ - flags?: (number|null); + /** LastPKEvent table_last_p_k */ + table_last_p_k?: (binlogdata.ITableLastPK|null); - /** Field column_type */ - column_type?: (string|null); + /** LastPKEvent completed */ + completed?: (boolean|null); } - /** Represents a Field. */ - class Field implements IField { + /** Represents a LastPKEvent. */ + class LastPKEvent implements ILastPKEvent { /** - * Constructs a new Field. + * Constructs a new LastPKEvent. * @param [properties] Properties to set */ - constructor(properties?: query.IField); - - /** Field name. */ - public name: string; - - /** Field type. */ - public type: query.Type; - - /** Field table. */ - public table: string; - - /** Field org_table. */ - public org_table: string; - - /** Field database. */ - public database: string; - - /** Field org_name. */ - public org_name: string; - - /** Field column_length. */ - public column_length: number; - - /** Field charset. */ - public charset: number; - - /** Field decimals. */ - public decimals: number; + constructor(properties?: binlogdata.ILastPKEvent); - /** Field flags. */ - public flags: number; + /** LastPKEvent table_last_p_k. */ + public table_last_p_k?: (binlogdata.ITableLastPK|null); - /** Field column_type. */ - public column_type: string; + /** LastPKEvent completed. */ + public completed: boolean; /** - * Creates a new Field instance using the specified properties. + * Creates a new LastPKEvent instance using the specified properties. * @param [properties] Properties to set - * @returns Field instance + * @returns LastPKEvent instance */ - public static create(properties?: query.IField): query.Field; + public static create(properties?: binlogdata.ILastPKEvent): binlogdata.LastPKEvent; /** - * Encodes the specified Field message. Does not implicitly {@link query.Field.verify|verify} messages. - * @param message Field message or plain object to encode + * Encodes the specified LastPKEvent message. Does not implicitly {@link binlogdata.LastPKEvent.verify|verify} messages. + * @param message LastPKEvent message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IField, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: binlogdata.ILastPKEvent, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified Field message, length delimited. Does not implicitly {@link query.Field.verify|verify} messages. - * @param message Field message or plain object to encode + * Encodes the specified LastPKEvent message, length delimited. Does not implicitly {@link binlogdata.LastPKEvent.verify|verify} messages. + * @param message LastPKEvent message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IField, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: binlogdata.ILastPKEvent, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a Field message from the specified reader or buffer. + * Decodes a LastPKEvent message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns Field + * @returns LastPKEvent * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.Field; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.LastPKEvent; /** - * Decodes a Field message from the specified reader or buffer, length delimited. + * Decodes a LastPKEvent message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns Field + * @returns LastPKEvent * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.Field; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.LastPKEvent; /** - * Verifies a Field message. + * Verifies a LastPKEvent message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a Field message from a plain object. Also converts values to their respective internal types. + * Creates a LastPKEvent message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns Field + * @returns LastPKEvent */ - public static fromObject(object: { [k: string]: any }): query.Field; + public static fromObject(object: { [k: string]: any }): binlogdata.LastPKEvent; /** - * Creates a plain object from a Field message. Also converts values to other types if specified. - * @param message Field + * Creates a plain object from a LastPKEvent message. Also converts values to other types if specified. + * @param message LastPKEvent * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.Field, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: binlogdata.LastPKEvent, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this Field to JSON. + * Converts this LastPKEvent to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for Field + * Gets the default type url for LastPKEvent * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a Row. */ - interface IRow { + /** Properties of a TableLastPK. */ + interface ITableLastPK { - /** Row lengths */ - lengths?: ((number|Long)[]|null); + /** TableLastPK table_name */ + table_name?: (string|null); - /** Row values */ - values?: (Uint8Array|null); + /** TableLastPK lastpk */ + lastpk?: (query.IQueryResult|null); } - /** Represents a Row. */ - class Row implements IRow { + /** Represents a TableLastPK. */ + class TableLastPK implements ITableLastPK { /** - * Constructs a new Row. + * Constructs a new TableLastPK. * @param [properties] Properties to set */ - constructor(properties?: query.IRow); + constructor(properties?: binlogdata.ITableLastPK); - /** Row lengths. */ - public lengths: (number|Long)[]; + /** TableLastPK table_name. */ + public table_name: string; - /** Row values. */ - public values: Uint8Array; + /** TableLastPK lastpk. */ + public lastpk?: (query.IQueryResult|null); /** - * Creates a new Row instance using the specified properties. + * Creates a new TableLastPK instance using the specified properties. * @param [properties] Properties to set - * @returns Row instance + * @returns TableLastPK instance */ - public static create(properties?: query.IRow): query.Row; + public static create(properties?: binlogdata.ITableLastPK): binlogdata.TableLastPK; /** - * Encodes the specified Row message. Does not implicitly {@link query.Row.verify|verify} messages. - * @param message Row message or plain object to encode + * Encodes the specified TableLastPK message. Does not implicitly {@link binlogdata.TableLastPK.verify|verify} messages. + * @param message TableLastPK message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IRow, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: binlogdata.ITableLastPK, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified Row message, length delimited. Does not implicitly {@link query.Row.verify|verify} messages. - * @param message Row message or plain object to encode + * Encodes the specified TableLastPK message, length delimited. Does not implicitly {@link binlogdata.TableLastPK.verify|verify} messages. + * @param message TableLastPK message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IRow, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: binlogdata.ITableLastPK, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a Row message from the specified reader or buffer. + * Decodes a TableLastPK message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns Row + * @returns TableLastPK * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.Row; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.TableLastPK; /** - * Decodes a Row message from the specified reader or buffer, length delimited. + * Decodes a TableLastPK message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns Row + * @returns TableLastPK * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.Row; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.TableLastPK; /** - * Verifies a Row message. + * Verifies a TableLastPK message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a Row message from a plain object. Also converts values to their respective internal types. + * Creates a TableLastPK message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns Row + * @returns TableLastPK */ - public static fromObject(object: { [k: string]: any }): query.Row; + public static fromObject(object: { [k: string]: any }): binlogdata.TableLastPK; /** - * Creates a plain object from a Row message. Also converts values to other types if specified. - * @param message Row + * Creates a plain object from a TableLastPK message. Also converts values to other types if specified. + * @param message TableLastPK * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.Row, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: binlogdata.TableLastPK, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this Row to JSON. + * Converts this TableLastPK to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for Row + * Gets the default type url for TableLastPK * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a QueryResult. */ - interface IQueryResult { - - /** QueryResult fields */ - fields?: (query.IField[]|null); - - /** QueryResult rows_affected */ - rows_affected?: (number|Long|null); + /** Properties of a VStreamResultsRequest. */ + interface IVStreamResultsRequest { - /** QueryResult insert_id */ - insert_id?: (number|Long|null); + /** VStreamResultsRequest effective_caller_id */ + effective_caller_id?: (vtrpc.ICallerID|null); - /** QueryResult rows */ - rows?: (query.IRow[]|null); + /** VStreamResultsRequest immediate_caller_id */ + immediate_caller_id?: (query.IVTGateCallerID|null); - /** QueryResult info */ - info?: (string|null); + /** VStreamResultsRequest target */ + target?: (query.ITarget|null); - /** QueryResult session_state_changes */ - session_state_changes?: (string|null); + /** VStreamResultsRequest query */ + query?: (string|null); } - /** Represents a QueryResult. */ - class QueryResult implements IQueryResult { + /** Represents a VStreamResultsRequest. */ + class VStreamResultsRequest implements IVStreamResultsRequest { /** - * Constructs a new QueryResult. + * Constructs a new VStreamResultsRequest. * @param [properties] Properties to set */ - constructor(properties?: query.IQueryResult); - - /** QueryResult fields. */ - public fields: query.IField[]; - - /** QueryResult rows_affected. */ - public rows_affected: (number|Long); + constructor(properties?: binlogdata.IVStreamResultsRequest); - /** QueryResult insert_id. */ - public insert_id: (number|Long); + /** VStreamResultsRequest effective_caller_id. */ + public effective_caller_id?: (vtrpc.ICallerID|null); - /** QueryResult rows. */ - public rows: query.IRow[]; + /** VStreamResultsRequest immediate_caller_id. */ + public immediate_caller_id?: (query.IVTGateCallerID|null); - /** QueryResult info. */ - public info: string; + /** VStreamResultsRequest target. */ + public target?: (query.ITarget|null); - /** QueryResult session_state_changes. */ - public session_state_changes: string; + /** VStreamResultsRequest query. */ + public query: string; /** - * Creates a new QueryResult instance using the specified properties. + * Creates a new VStreamResultsRequest instance using the specified properties. * @param [properties] Properties to set - * @returns QueryResult instance + * @returns VStreamResultsRequest instance */ - public static create(properties?: query.IQueryResult): query.QueryResult; + public static create(properties?: binlogdata.IVStreamResultsRequest): binlogdata.VStreamResultsRequest; /** - * Encodes the specified QueryResult message. Does not implicitly {@link query.QueryResult.verify|verify} messages. - * @param message QueryResult message or plain object to encode + * Encodes the specified VStreamResultsRequest message. Does not implicitly {@link binlogdata.VStreamResultsRequest.verify|verify} messages. + * @param message VStreamResultsRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IQueryResult, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: binlogdata.IVStreamResultsRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified QueryResult message, length delimited. Does not implicitly {@link query.QueryResult.verify|verify} messages. - * @param message QueryResult message or plain object to encode + * Encodes the specified VStreamResultsRequest message, length delimited. Does not implicitly {@link binlogdata.VStreamResultsRequest.verify|verify} messages. + * @param message VStreamResultsRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IQueryResult, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: binlogdata.IVStreamResultsRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a QueryResult message from the specified reader or buffer. + * Decodes a VStreamResultsRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns QueryResult + * @returns VStreamResultsRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.QueryResult; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.VStreamResultsRequest; /** - * Decodes a QueryResult message from the specified reader or buffer, length delimited. + * Decodes a VStreamResultsRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns QueryResult + * @returns VStreamResultsRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.QueryResult; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.VStreamResultsRequest; /** - * Verifies a QueryResult message. + * Verifies a VStreamResultsRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a QueryResult message from a plain object. Also converts values to their respective internal types. + * Creates a VStreamResultsRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns QueryResult + * @returns VStreamResultsRequest */ - public static fromObject(object: { [k: string]: any }): query.QueryResult; + public static fromObject(object: { [k: string]: any }): binlogdata.VStreamResultsRequest; /** - * Creates a plain object from a QueryResult message. Also converts values to other types if specified. - * @param message QueryResult + * Creates a plain object from a VStreamResultsRequest message. Also converts values to other types if specified. + * @param message VStreamResultsRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.QueryResult, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: binlogdata.VStreamResultsRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this QueryResult to JSON. + * Converts this VStreamResultsRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for QueryResult + * Gets the default type url for VStreamResultsRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a QueryWarning. */ - interface IQueryWarning { + /** Properties of a VStreamResultsResponse. */ + interface IVStreamResultsResponse { - /** QueryWarning code */ - code?: (number|null); + /** VStreamResultsResponse fields */ + fields?: (query.IField[]|null); - /** QueryWarning message */ - message?: (string|null); + /** VStreamResultsResponse gtid */ + gtid?: (string|null); + + /** VStreamResultsResponse rows */ + rows?: (query.IRow[]|null); } - /** Represents a QueryWarning. */ - class QueryWarning implements IQueryWarning { + /** Represents a VStreamResultsResponse. */ + class VStreamResultsResponse implements IVStreamResultsResponse { /** - * Constructs a new QueryWarning. + * Constructs a new VStreamResultsResponse. * @param [properties] Properties to set */ - constructor(properties?: query.IQueryWarning); + constructor(properties?: binlogdata.IVStreamResultsResponse); - /** QueryWarning code. */ - public code: number; + /** VStreamResultsResponse fields. */ + public fields: query.IField[]; - /** QueryWarning message. */ - public message: string; + /** VStreamResultsResponse gtid. */ + public gtid: string; + + /** VStreamResultsResponse rows. */ + public rows: query.IRow[]; /** - * Creates a new QueryWarning instance using the specified properties. + * Creates a new VStreamResultsResponse instance using the specified properties. * @param [properties] Properties to set - * @returns QueryWarning instance + * @returns VStreamResultsResponse instance */ - public static create(properties?: query.IQueryWarning): query.QueryWarning; + public static create(properties?: binlogdata.IVStreamResultsResponse): binlogdata.VStreamResultsResponse; /** - * Encodes the specified QueryWarning message. Does not implicitly {@link query.QueryWarning.verify|verify} messages. - * @param message QueryWarning message or plain object to encode + * Encodes the specified VStreamResultsResponse message. Does not implicitly {@link binlogdata.VStreamResultsResponse.verify|verify} messages. + * @param message VStreamResultsResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IQueryWarning, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: binlogdata.IVStreamResultsResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified QueryWarning message, length delimited. Does not implicitly {@link query.QueryWarning.verify|verify} messages. - * @param message QueryWarning message or plain object to encode + * Encodes the specified VStreamResultsResponse message, length delimited. Does not implicitly {@link binlogdata.VStreamResultsResponse.verify|verify} messages. + * @param message VStreamResultsResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IQueryWarning, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: binlogdata.IVStreamResultsResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a QueryWarning message from the specified reader or buffer. + * Decodes a VStreamResultsResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns QueryWarning + * @returns VStreamResultsResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.QueryWarning; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): binlogdata.VStreamResultsResponse; /** - * Decodes a QueryWarning message from the specified reader or buffer, length delimited. + * Decodes a VStreamResultsResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns QueryWarning + * @returns VStreamResultsResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.QueryWarning; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): binlogdata.VStreamResultsResponse; /** - * Verifies a QueryWarning message. + * Verifies a VStreamResultsResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a QueryWarning message from a plain object. Also converts values to their respective internal types. + * Creates a VStreamResultsResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns QueryWarning + * @returns VStreamResultsResponse */ - public static fromObject(object: { [k: string]: any }): query.QueryWarning; + public static fromObject(object: { [k: string]: any }): binlogdata.VStreamResultsResponse; /** - * Creates a plain object from a QueryWarning message. Also converts values to other types if specified. - * @param message QueryWarning + * Creates a plain object from a VStreamResultsResponse message. Also converts values to other types if specified. + * @param message VStreamResultsResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.QueryWarning, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: binlogdata.VStreamResultsResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this QueryWarning to JSON. + * Converts this VStreamResultsResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for QueryWarning + * Gets the default type url for VStreamResultsResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } +} - /** Properties of a StreamEvent. */ - interface IStreamEvent { +/** Namespace vtrpc. */ +export namespace vtrpc { - /** StreamEvent statements */ - statements?: (query.StreamEvent.IStatement[]|null); + /** Properties of a CallerID. */ + interface ICallerID { - /** StreamEvent event_token */ - event_token?: (query.IEventToken|null); + /** CallerID principal */ + principal?: (string|null); + + /** CallerID component */ + component?: (string|null); + + /** CallerID subcomponent */ + subcomponent?: (string|null); + + /** CallerID groups */ + groups?: (string[]|null); } - /** Represents a StreamEvent. */ - class StreamEvent implements IStreamEvent { + /** Represents a CallerID. */ + class CallerID implements ICallerID { /** - * Constructs a new StreamEvent. + * Constructs a new CallerID. * @param [properties] Properties to set */ - constructor(properties?: query.IStreamEvent); + constructor(properties?: vtrpc.ICallerID); - /** StreamEvent statements. */ - public statements: query.StreamEvent.IStatement[]; + /** CallerID principal. */ + public principal: string; - /** StreamEvent event_token. */ - public event_token?: (query.IEventToken|null); + /** CallerID component. */ + public component: string; + + /** CallerID subcomponent. */ + public subcomponent: string; + + /** CallerID groups. */ + public groups: string[]; /** - * Creates a new StreamEvent instance using the specified properties. + * Creates a new CallerID instance using the specified properties. * @param [properties] Properties to set - * @returns StreamEvent instance + * @returns CallerID instance */ - public static create(properties?: query.IStreamEvent): query.StreamEvent; + public static create(properties?: vtrpc.ICallerID): vtrpc.CallerID; /** - * Encodes the specified StreamEvent message. Does not implicitly {@link query.StreamEvent.verify|verify} messages. - * @param message StreamEvent message or plain object to encode + * Encodes the specified CallerID message. Does not implicitly {@link vtrpc.CallerID.verify|verify} messages. + * @param message CallerID message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IStreamEvent, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtrpc.ICallerID, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified StreamEvent message, length delimited. Does not implicitly {@link query.StreamEvent.verify|verify} messages. - * @param message StreamEvent message or plain object to encode + * Encodes the specified CallerID message, length delimited. Does not implicitly {@link vtrpc.CallerID.verify|verify} messages. + * @param message CallerID message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IStreamEvent, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtrpc.ICallerID, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a StreamEvent message from the specified reader or buffer. + * Decodes a CallerID message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns StreamEvent + * @returns CallerID * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.StreamEvent; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtrpc.CallerID; /** - * Decodes a StreamEvent message from the specified reader or buffer, length delimited. + * Decodes a CallerID message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns StreamEvent + * @returns CallerID * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.StreamEvent; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtrpc.CallerID; /** - * Verifies a StreamEvent message. + * Verifies a CallerID message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a StreamEvent message from a plain object. Also converts values to their respective internal types. + * Creates a CallerID message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns StreamEvent + * @returns CallerID */ - public static fromObject(object: { [k: string]: any }): query.StreamEvent; + public static fromObject(object: { [k: string]: any }): vtrpc.CallerID; /** - * Creates a plain object from a StreamEvent message. Also converts values to other types if specified. - * @param message StreamEvent + * Creates a plain object from a CallerID message. Also converts values to other types if specified. + * @param message CallerID * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.StreamEvent, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtrpc.CallerID, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this StreamEvent to JSON. + * Converts this CallerID to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for StreamEvent + * Gets the default type url for CallerID * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - namespace StreamEvent { - - /** Properties of a Statement. */ - interface IStatement { - - /** Statement category */ - category?: (query.StreamEvent.Statement.Category|null); - - /** Statement table_name */ - table_name?: (string|null); - - /** Statement primary_key_fields */ - primary_key_fields?: (query.IField[]|null); - - /** Statement primary_key_values */ - primary_key_values?: (query.IRow[]|null); - - /** Statement sql */ - sql?: (Uint8Array|null); - } - - /** Represents a Statement. */ - class Statement implements IStatement { - - /** - * Constructs a new Statement. - * @param [properties] Properties to set - */ - constructor(properties?: query.StreamEvent.IStatement); - - /** Statement category. */ - public category: query.StreamEvent.Statement.Category; - - /** Statement table_name. */ - public table_name: string; - - /** Statement primary_key_fields. */ - public primary_key_fields: query.IField[]; - - /** Statement primary_key_values. */ - public primary_key_values: query.IRow[]; - - /** Statement sql. */ - public sql: Uint8Array; - - /** - * Creates a new Statement instance using the specified properties. - * @param [properties] Properties to set - * @returns Statement instance - */ - public static create(properties?: query.StreamEvent.IStatement): query.StreamEvent.Statement; - - /** - * Encodes the specified Statement message. Does not implicitly {@link query.StreamEvent.Statement.verify|verify} messages. - * @param message Statement message or plain object to encode - * @param [writer] Writer to encode to - * @returns Writer - */ - public static encode(message: query.StreamEvent.IStatement, writer?: $protobuf.Writer): $protobuf.Writer; - - /** - * Encodes the specified Statement message, length delimited. Does not implicitly {@link query.StreamEvent.Statement.verify|verify} messages. - * @param message Statement message or plain object to encode - * @param [writer] Writer to encode to - * @returns Writer - */ - public static encodeDelimited(message: query.StreamEvent.IStatement, writer?: $protobuf.Writer): $protobuf.Writer; - - /** - * Decodes a Statement message from the specified reader or buffer. - * @param reader Reader or buffer to decode from - * @param [length] Message length if known beforehand - * @returns Statement - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.StreamEvent.Statement; - - /** - * Decodes a Statement message from the specified reader or buffer, length delimited. - * @param reader Reader or buffer to decode from - * @returns Statement - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.StreamEvent.Statement; - - /** - * Verifies a Statement message. - * @param message Plain object to verify - * @returns `null` if valid, otherwise the reason why it is not - */ - public static verify(message: { [k: string]: any }): (string|null); - - /** - * Creates a Statement message from a plain object. Also converts values to their respective internal types. - * @param object Plain object - * @returns Statement - */ - public static fromObject(object: { [k: string]: any }): query.StreamEvent.Statement; - - /** - * Creates a plain object from a Statement message. Also converts values to other types if specified. - * @param message Statement - * @param [options] Conversion options - * @returns Plain object - */ - public static toObject(message: query.StreamEvent.Statement, options?: $protobuf.IConversionOptions): { [k: string]: any }; - - /** - * Converts this Statement to JSON. - * @returns JSON object - */ - public toJSON(): { [k: string]: any }; - - /** - * Gets the default type url for Statement - * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns The default type url - */ - public static getTypeUrl(typeUrlPrefix?: string): string; - } - - namespace Statement { - - /** Category enum. */ - enum Category { - Error = 0, - DML = 1, - DDL = 2 - } - } + /** Code enum. */ + enum Code { + OK = 0, + CANCELED = 1, + UNKNOWN = 2, + INVALID_ARGUMENT = 3, + DEADLINE_EXCEEDED = 4, + NOT_FOUND = 5, + ALREADY_EXISTS = 6, + PERMISSION_DENIED = 7, + RESOURCE_EXHAUSTED = 8, + FAILED_PRECONDITION = 9, + ABORTED = 10, + OUT_OF_RANGE = 11, + UNIMPLEMENTED = 12, + INTERNAL = 13, + UNAVAILABLE = 14, + DATA_LOSS = 15, + UNAUTHENTICATED = 16, + CLUSTER_EVENT = 17, + READ_ONLY = 18 } - /** Properties of an ExecuteRequest. */ - interface IExecuteRequest { - - /** ExecuteRequest effective_caller_id */ - effective_caller_id?: (vtrpc.ICallerID|null); - - /** ExecuteRequest immediate_caller_id */ - immediate_caller_id?: (query.IVTGateCallerID|null); - - /** ExecuteRequest target */ - target?: (query.ITarget|null); - - /** ExecuteRequest query */ - query?: (query.IBoundQuery|null); - - /** ExecuteRequest transaction_id */ - transaction_id?: (number|Long|null); + /** Properties of a RPCError. */ + interface IRPCError { - /** ExecuteRequest options */ - options?: (query.IExecuteOptions|null); + /** RPCError message */ + message?: (string|null); - /** ExecuteRequest reserved_id */ - reserved_id?: (number|Long|null); + /** RPCError code */ + code?: (vtrpc.Code|null); } - /** Represents an ExecuteRequest. */ - class ExecuteRequest implements IExecuteRequest { + /** Represents a RPCError. */ + class RPCError implements IRPCError { /** - * Constructs a new ExecuteRequest. + * Constructs a new RPCError. * @param [properties] Properties to set */ - constructor(properties?: query.IExecuteRequest); - - /** ExecuteRequest effective_caller_id. */ - public effective_caller_id?: (vtrpc.ICallerID|null); - - /** ExecuteRequest immediate_caller_id. */ - public immediate_caller_id?: (query.IVTGateCallerID|null); - - /** ExecuteRequest target. */ - public target?: (query.ITarget|null); - - /** ExecuteRequest query. */ - public query?: (query.IBoundQuery|null); - - /** ExecuteRequest transaction_id. */ - public transaction_id: (number|Long); + constructor(properties?: vtrpc.IRPCError); - /** ExecuteRequest options. */ - public options?: (query.IExecuteOptions|null); + /** RPCError message. */ + public message: string; - /** ExecuteRequest reserved_id. */ - public reserved_id: (number|Long); + /** RPCError code. */ + public code: vtrpc.Code; /** - * Creates a new ExecuteRequest instance using the specified properties. + * Creates a new RPCError instance using the specified properties. * @param [properties] Properties to set - * @returns ExecuteRequest instance + * @returns RPCError instance */ - public static create(properties?: query.IExecuteRequest): query.ExecuteRequest; + public static create(properties?: vtrpc.IRPCError): vtrpc.RPCError; /** - * Encodes the specified ExecuteRequest message. Does not implicitly {@link query.ExecuteRequest.verify|verify} messages. - * @param message ExecuteRequest message or plain object to encode + * Encodes the specified RPCError message. Does not implicitly {@link vtrpc.RPCError.verify|verify} messages. + * @param message RPCError message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IExecuteRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtrpc.IRPCError, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ExecuteRequest message, length delimited. Does not implicitly {@link query.ExecuteRequest.verify|verify} messages. - * @param message ExecuteRequest message or plain object to encode + * Encodes the specified RPCError message, length delimited. Does not implicitly {@link vtrpc.RPCError.verify|verify} messages. + * @param message RPCError message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IExecuteRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtrpc.IRPCError, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an ExecuteRequest message from the specified reader or buffer. + * Decodes a RPCError message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ExecuteRequest + * @returns RPCError * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.ExecuteRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtrpc.RPCError; /** - * Decodes an ExecuteRequest message from the specified reader or buffer, length delimited. + * Decodes a RPCError message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ExecuteRequest + * @returns RPCError * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.ExecuteRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtrpc.RPCError; /** - * Verifies an ExecuteRequest message. + * Verifies a RPCError message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an ExecuteRequest message from a plain object. Also converts values to their respective internal types. + * Creates a RPCError message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ExecuteRequest + * @returns RPCError */ - public static fromObject(object: { [k: string]: any }): query.ExecuteRequest; + public static fromObject(object: { [k: string]: any }): vtrpc.RPCError; /** - * Creates a plain object from an ExecuteRequest message. Also converts values to other types if specified. - * @param message ExecuteRequest + * Creates a plain object from a RPCError message. Also converts values to other types if specified. + * @param message RPCError * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.ExecuteRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtrpc.RPCError, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ExecuteRequest to JSON. + * Converts this RPCError to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ExecuteRequest + * Gets the default type url for RPCError * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } +} - /** Properties of an ExecuteResponse. */ - interface IExecuteResponse { +/** Namespace query. */ +export namespace query { - /** ExecuteResponse result */ - result?: (query.IQueryResult|null); + /** Properties of a Target. */ + interface ITarget { + + /** Target keyspace */ + keyspace?: (string|null); + + /** Target shard */ + shard?: (string|null); + + /** Target tablet_type */ + tablet_type?: (topodata.TabletType|null); + + /** Target cell */ + cell?: (string|null); } - /** Represents an ExecuteResponse. */ - class ExecuteResponse implements IExecuteResponse { + /** Represents a Target. */ + class Target implements ITarget { /** - * Constructs a new ExecuteResponse. + * Constructs a new Target. * @param [properties] Properties to set */ - constructor(properties?: query.IExecuteResponse); + constructor(properties?: query.ITarget); - /** ExecuteResponse result. */ - public result?: (query.IQueryResult|null); + /** Target keyspace. */ + public keyspace: string; + + /** Target shard. */ + public shard: string; + + /** Target tablet_type. */ + public tablet_type: topodata.TabletType; + + /** Target cell. */ + public cell: string; /** - * Creates a new ExecuteResponse instance using the specified properties. + * Creates a new Target instance using the specified properties. * @param [properties] Properties to set - * @returns ExecuteResponse instance + * @returns Target instance */ - public static create(properties?: query.IExecuteResponse): query.ExecuteResponse; + public static create(properties?: query.ITarget): query.Target; /** - * Encodes the specified ExecuteResponse message. Does not implicitly {@link query.ExecuteResponse.verify|verify} messages. - * @param message ExecuteResponse message or plain object to encode + * Encodes the specified Target message. Does not implicitly {@link query.Target.verify|verify} messages. + * @param message Target message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IExecuteResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.ITarget, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ExecuteResponse message, length delimited. Does not implicitly {@link query.ExecuteResponse.verify|verify} messages. - * @param message ExecuteResponse message or plain object to encode + * Encodes the specified Target message, length delimited. Does not implicitly {@link query.Target.verify|verify} messages. + * @param message Target message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IExecuteResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.ITarget, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an ExecuteResponse message from the specified reader or buffer. + * Decodes a Target message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ExecuteResponse + * @returns Target * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.ExecuteResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.Target; /** - * Decodes an ExecuteResponse message from the specified reader or buffer, length delimited. + * Decodes a Target message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ExecuteResponse + * @returns Target * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.ExecuteResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.Target; /** - * Verifies an ExecuteResponse message. + * Verifies a Target message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an ExecuteResponse message from a plain object. Also converts values to their respective internal types. + * Creates a Target message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ExecuteResponse + * @returns Target */ - public static fromObject(object: { [k: string]: any }): query.ExecuteResponse; + public static fromObject(object: { [k: string]: any }): query.Target; /** - * Creates a plain object from an ExecuteResponse message. Also converts values to other types if specified. - * @param message ExecuteResponse + * Creates a plain object from a Target message. Also converts values to other types if specified. + * @param message Target * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.ExecuteResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.Target, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ExecuteResponse to JSON. + * Converts this Target to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ExecuteResponse + * Gets the default type url for Target * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ResultWithError. */ - interface IResultWithError { + /** Properties of a VTGateCallerID. */ + interface IVTGateCallerID { - /** ResultWithError error */ - error?: (vtrpc.IRPCError|null); + /** VTGateCallerID username */ + username?: (string|null); - /** ResultWithError result */ - result?: (query.IQueryResult|null); + /** VTGateCallerID groups */ + groups?: (string[]|null); } - /** Represents a ResultWithError. */ - class ResultWithError implements IResultWithError { + /** Represents a VTGateCallerID. */ + class VTGateCallerID implements IVTGateCallerID { /** - * Constructs a new ResultWithError. + * Constructs a new VTGateCallerID. * @param [properties] Properties to set */ - constructor(properties?: query.IResultWithError); + constructor(properties?: query.IVTGateCallerID); - /** ResultWithError error. */ - public error?: (vtrpc.IRPCError|null); + /** VTGateCallerID username. */ + public username: string; - /** ResultWithError result. */ - public result?: (query.IQueryResult|null); + /** VTGateCallerID groups. */ + public groups: string[]; /** - * Creates a new ResultWithError instance using the specified properties. + * Creates a new VTGateCallerID instance using the specified properties. * @param [properties] Properties to set - * @returns ResultWithError instance + * @returns VTGateCallerID instance */ - public static create(properties?: query.IResultWithError): query.ResultWithError; + public static create(properties?: query.IVTGateCallerID): query.VTGateCallerID; /** - * Encodes the specified ResultWithError message. Does not implicitly {@link query.ResultWithError.verify|verify} messages. - * @param message ResultWithError message or plain object to encode + * Encodes the specified VTGateCallerID message. Does not implicitly {@link query.VTGateCallerID.verify|verify} messages. + * @param message VTGateCallerID message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IResultWithError, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IVTGateCallerID, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ResultWithError message, length delimited. Does not implicitly {@link query.ResultWithError.verify|verify} messages. - * @param message ResultWithError message or plain object to encode + * Encodes the specified VTGateCallerID message, length delimited. Does not implicitly {@link query.VTGateCallerID.verify|verify} messages. + * @param message VTGateCallerID message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IResultWithError, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IVTGateCallerID, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ResultWithError message from the specified reader or buffer. + * Decodes a VTGateCallerID message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ResultWithError + * @returns VTGateCallerID * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.ResultWithError; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.VTGateCallerID; /** - * Decodes a ResultWithError message from the specified reader or buffer, length delimited. + * Decodes a VTGateCallerID message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ResultWithError + * @returns VTGateCallerID * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.ResultWithError; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.VTGateCallerID; /** - * Verifies a ResultWithError message. + * Verifies a VTGateCallerID message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ResultWithError message from a plain object. Also converts values to their respective internal types. + * Creates a VTGateCallerID message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ResultWithError + * @returns VTGateCallerID */ - public static fromObject(object: { [k: string]: any }): query.ResultWithError; + public static fromObject(object: { [k: string]: any }): query.VTGateCallerID; /** - * Creates a plain object from a ResultWithError message. Also converts values to other types if specified. - * @param message ResultWithError + * Creates a plain object from a VTGateCallerID message. Also converts values to other types if specified. + * @param message VTGateCallerID * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.ResultWithError, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.VTGateCallerID, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ResultWithError to JSON. + * Converts this VTGateCallerID to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ResultWithError + * Gets the default type url for VTGateCallerID * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a StreamExecuteRequest. */ - interface IStreamExecuteRequest { - - /** StreamExecuteRequest effective_caller_id */ - effective_caller_id?: (vtrpc.ICallerID|null); - - /** StreamExecuteRequest immediate_caller_id */ - immediate_caller_id?: (query.IVTGateCallerID|null); - - /** StreamExecuteRequest target */ - target?: (query.ITarget|null); - - /** StreamExecuteRequest query */ - query?: (query.IBoundQuery|null); + /** Properties of an EventToken. */ + interface IEventToken { - /** StreamExecuteRequest options */ - options?: (query.IExecuteOptions|null); + /** EventToken timestamp */ + timestamp?: (number|Long|null); - /** StreamExecuteRequest transaction_id */ - transaction_id?: (number|Long|null); + /** EventToken shard */ + shard?: (string|null); - /** StreamExecuteRequest reserved_id */ - reserved_id?: (number|Long|null); + /** EventToken position */ + position?: (string|null); } - /** Represents a StreamExecuteRequest. */ - class StreamExecuteRequest implements IStreamExecuteRequest { + /** Represents an EventToken. */ + class EventToken implements IEventToken { /** - * Constructs a new StreamExecuteRequest. + * Constructs a new EventToken. * @param [properties] Properties to set */ - constructor(properties?: query.IStreamExecuteRequest); - - /** StreamExecuteRequest effective_caller_id. */ - public effective_caller_id?: (vtrpc.ICallerID|null); - - /** StreamExecuteRequest immediate_caller_id. */ - public immediate_caller_id?: (query.IVTGateCallerID|null); - - /** StreamExecuteRequest target. */ - public target?: (query.ITarget|null); - - /** StreamExecuteRequest query. */ - public query?: (query.IBoundQuery|null); + constructor(properties?: query.IEventToken); - /** StreamExecuteRequest options. */ - public options?: (query.IExecuteOptions|null); + /** EventToken timestamp. */ + public timestamp: (number|Long); - /** StreamExecuteRequest transaction_id. */ - public transaction_id: (number|Long); + /** EventToken shard. */ + public shard: string; - /** StreamExecuteRequest reserved_id. */ - public reserved_id: (number|Long); + /** EventToken position. */ + public position: string; /** - * Creates a new StreamExecuteRequest instance using the specified properties. + * Creates a new EventToken instance using the specified properties. * @param [properties] Properties to set - * @returns StreamExecuteRequest instance + * @returns EventToken instance */ - public static create(properties?: query.IStreamExecuteRequest): query.StreamExecuteRequest; + public static create(properties?: query.IEventToken): query.EventToken; /** - * Encodes the specified StreamExecuteRequest message. Does not implicitly {@link query.StreamExecuteRequest.verify|verify} messages. - * @param message StreamExecuteRequest message or plain object to encode + * Encodes the specified EventToken message. Does not implicitly {@link query.EventToken.verify|verify} messages. + * @param message EventToken message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IStreamExecuteRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IEventToken, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified StreamExecuteRequest message, length delimited. Does not implicitly {@link query.StreamExecuteRequest.verify|verify} messages. - * @param message StreamExecuteRequest message or plain object to encode + * Encodes the specified EventToken message, length delimited. Does not implicitly {@link query.EventToken.verify|verify} messages. + * @param message EventToken message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IStreamExecuteRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IEventToken, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a StreamExecuteRequest message from the specified reader or buffer. + * Decodes an EventToken message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns StreamExecuteRequest + * @returns EventToken * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.StreamExecuteRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.EventToken; /** - * Decodes a StreamExecuteRequest message from the specified reader or buffer, length delimited. + * Decodes an EventToken message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns StreamExecuteRequest + * @returns EventToken * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.StreamExecuteRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.EventToken; /** - * Verifies a StreamExecuteRequest message. + * Verifies an EventToken message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a StreamExecuteRequest message from a plain object. Also converts values to their respective internal types. + * Creates an EventToken message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns StreamExecuteRequest + * @returns EventToken */ - public static fromObject(object: { [k: string]: any }): query.StreamExecuteRequest; + public static fromObject(object: { [k: string]: any }): query.EventToken; /** - * Creates a plain object from a StreamExecuteRequest message. Also converts values to other types if specified. - * @param message StreamExecuteRequest + * Creates a plain object from an EventToken message. Also converts values to other types if specified. + * @param message EventToken * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.StreamExecuteRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.EventToken, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this StreamExecuteRequest to JSON. + * Converts this EventToken to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for StreamExecuteRequest + * Gets the default type url for EventToken * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a StreamExecuteResponse. */ - interface IStreamExecuteResponse { + /** MySqlFlag enum. */ + enum MySqlFlag { + EMPTY = 0, + NOT_NULL_FLAG = 1, + PRI_KEY_FLAG = 2, + UNIQUE_KEY_FLAG = 4, + MULTIPLE_KEY_FLAG = 8, + BLOB_FLAG = 16, + UNSIGNED_FLAG = 32, + ZEROFILL_FLAG = 64, + BINARY_FLAG = 128, + ENUM_FLAG = 256, + AUTO_INCREMENT_FLAG = 512, + TIMESTAMP_FLAG = 1024, + SET_FLAG = 2048, + NO_DEFAULT_VALUE_FLAG = 4096, + ON_UPDATE_NOW_FLAG = 8192, + NUM_FLAG = 32768, + PART_KEY_FLAG = 16384, + GROUP_FLAG = 32768, + UNIQUE_FLAG = 65536, + BINCMP_FLAG = 131072 + } - /** StreamExecuteResponse result */ - result?: (query.IQueryResult|null); + /** Flag enum. */ + enum Flag { + NONE = 0, + ISINTEGRAL = 256, + ISUNSIGNED = 512, + ISFLOAT = 1024, + ISQUOTED = 2048, + ISTEXT = 4096, + ISBINARY = 8192 } - /** Represents a StreamExecuteResponse. */ - class StreamExecuteResponse implements IStreamExecuteResponse { + /** Type enum. */ + enum Type { + NULL_TYPE = 0, + INT8 = 257, + UINT8 = 770, + INT16 = 259, + UINT16 = 772, + INT24 = 261, + UINT24 = 774, + INT32 = 263, + UINT32 = 776, + INT64 = 265, + UINT64 = 778, + FLOAT32 = 1035, + FLOAT64 = 1036, + TIMESTAMP = 2061, + DATE = 2062, + TIME = 2063, + DATETIME = 2064, + YEAR = 785, + DECIMAL = 18, + TEXT = 6163, + BLOB = 10260, + VARCHAR = 6165, + VARBINARY = 10262, + CHAR = 6167, + BINARY = 10264, + BIT = 2073, + ENUM = 2074, + SET = 2075, + TUPLE = 28, + GEOMETRY = 2077, + JSON = 2078, + EXPRESSION = 31, + HEXNUM = 4128, + HEXVAL = 4129, + BITNUM = 4130 + } + + /** Properties of a Value. */ + interface IValue { + + /** Value type */ + type?: (query.Type|null); + + /** Value value */ + value?: (Uint8Array|null); + } + + /** Represents a Value. */ + class Value implements IValue { /** - * Constructs a new StreamExecuteResponse. + * Constructs a new Value. * @param [properties] Properties to set */ - constructor(properties?: query.IStreamExecuteResponse); + constructor(properties?: query.IValue); - /** StreamExecuteResponse result. */ - public result?: (query.IQueryResult|null); + /** Value type. */ + public type: query.Type; + + /** Value value. */ + public value: Uint8Array; /** - * Creates a new StreamExecuteResponse instance using the specified properties. + * Creates a new Value instance using the specified properties. * @param [properties] Properties to set - * @returns StreamExecuteResponse instance + * @returns Value instance */ - public static create(properties?: query.IStreamExecuteResponse): query.StreamExecuteResponse; + public static create(properties?: query.IValue): query.Value; /** - * Encodes the specified StreamExecuteResponse message. Does not implicitly {@link query.StreamExecuteResponse.verify|verify} messages. - * @param message StreamExecuteResponse message or plain object to encode + * Encodes the specified Value message. Does not implicitly {@link query.Value.verify|verify} messages. + * @param message Value message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IStreamExecuteResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IValue, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified StreamExecuteResponse message, length delimited. Does not implicitly {@link query.StreamExecuteResponse.verify|verify} messages. - * @param message StreamExecuteResponse message or plain object to encode + * Encodes the specified Value message, length delimited. Does not implicitly {@link query.Value.verify|verify} messages. + * @param message Value message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IStreamExecuteResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IValue, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a StreamExecuteResponse message from the specified reader or buffer. + * Decodes a Value message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns StreamExecuteResponse + * @returns Value * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.StreamExecuteResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.Value; /** - * Decodes a StreamExecuteResponse message from the specified reader or buffer, length delimited. + * Decodes a Value message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns StreamExecuteResponse + * @returns Value * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.StreamExecuteResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.Value; /** - * Verifies a StreamExecuteResponse message. + * Verifies a Value message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a StreamExecuteResponse message from a plain object. Also converts values to their respective internal types. + * Creates a Value message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns StreamExecuteResponse + * @returns Value */ - public static fromObject(object: { [k: string]: any }): query.StreamExecuteResponse; + public static fromObject(object: { [k: string]: any }): query.Value; /** - * Creates a plain object from a StreamExecuteResponse message. Also converts values to other types if specified. - * @param message StreamExecuteResponse + * Creates a plain object from a Value message. Also converts values to other types if specified. + * @param message Value * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.StreamExecuteResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.Value, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this StreamExecuteResponse to JSON. + * Converts this Value to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for StreamExecuteResponse + * Gets the default type url for Value * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a BeginRequest. */ - interface IBeginRequest { - - /** BeginRequest effective_caller_id */ - effective_caller_id?: (vtrpc.ICallerID|null); + /** Properties of a BindVariable. */ + interface IBindVariable { - /** BeginRequest immediate_caller_id */ - immediate_caller_id?: (query.IVTGateCallerID|null); + /** BindVariable type */ + type?: (query.Type|null); - /** BeginRequest target */ - target?: (query.ITarget|null); + /** BindVariable value */ + value?: (Uint8Array|null); - /** BeginRequest options */ - options?: (query.IExecuteOptions|null); + /** BindVariable values */ + values?: (query.IValue[]|null); } - /** Represents a BeginRequest. */ - class BeginRequest implements IBeginRequest { + /** Represents a BindVariable. */ + class BindVariable implements IBindVariable { /** - * Constructs a new BeginRequest. + * Constructs a new BindVariable. * @param [properties] Properties to set */ - constructor(properties?: query.IBeginRequest); - - /** BeginRequest effective_caller_id. */ - public effective_caller_id?: (vtrpc.ICallerID|null); + constructor(properties?: query.IBindVariable); - /** BeginRequest immediate_caller_id. */ - public immediate_caller_id?: (query.IVTGateCallerID|null); + /** BindVariable type. */ + public type: query.Type; - /** BeginRequest target. */ - public target?: (query.ITarget|null); + /** BindVariable value. */ + public value: Uint8Array; - /** BeginRequest options. */ - public options?: (query.IExecuteOptions|null); + /** BindVariable values. */ + public values: query.IValue[]; /** - * Creates a new BeginRequest instance using the specified properties. + * Creates a new BindVariable instance using the specified properties. * @param [properties] Properties to set - * @returns BeginRequest instance + * @returns BindVariable instance */ - public static create(properties?: query.IBeginRequest): query.BeginRequest; + public static create(properties?: query.IBindVariable): query.BindVariable; /** - * Encodes the specified BeginRequest message. Does not implicitly {@link query.BeginRequest.verify|verify} messages. - * @param message BeginRequest message or plain object to encode + * Encodes the specified BindVariable message. Does not implicitly {@link query.BindVariable.verify|verify} messages. + * @param message BindVariable message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IBeginRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IBindVariable, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified BeginRequest message, length delimited. Does not implicitly {@link query.BeginRequest.verify|verify} messages. - * @param message BeginRequest message or plain object to encode + * Encodes the specified BindVariable message, length delimited. Does not implicitly {@link query.BindVariable.verify|verify} messages. + * @param message BindVariable message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IBeginRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IBindVariable, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a BeginRequest message from the specified reader or buffer. + * Decodes a BindVariable message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns BeginRequest + * @returns BindVariable * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.BeginRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.BindVariable; /** - * Decodes a BeginRequest message from the specified reader or buffer, length delimited. + * Decodes a BindVariable message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns BeginRequest + * @returns BindVariable * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.BeginRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.BindVariable; /** - * Verifies a BeginRequest message. + * Verifies a BindVariable message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a BeginRequest message from a plain object. Also converts values to their respective internal types. + * Creates a BindVariable message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns BeginRequest + * @returns BindVariable */ - public static fromObject(object: { [k: string]: any }): query.BeginRequest; + public static fromObject(object: { [k: string]: any }): query.BindVariable; /** - * Creates a plain object from a BeginRequest message. Also converts values to other types if specified. - * @param message BeginRequest + * Creates a plain object from a BindVariable message. Also converts values to other types if specified. + * @param message BindVariable * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.BeginRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.BindVariable, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this BeginRequest to JSON. + * Converts this BindVariable to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for BeginRequest + * Gets the default type url for BindVariable * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a BeginResponse. */ - interface IBeginResponse { - - /** BeginResponse transaction_id */ - transaction_id?: (number|Long|null); + /** Properties of a BoundQuery. */ + interface IBoundQuery { - /** BeginResponse tablet_alias */ - tablet_alias?: (topodata.ITabletAlias|null); + /** BoundQuery sql */ + sql?: (string|null); - /** BeginResponse session_state_changes */ - session_state_changes?: (string|null); + /** BoundQuery bind_variables */ + bind_variables?: ({ [k: string]: query.IBindVariable }|null); } - /** Represents a BeginResponse. */ - class BeginResponse implements IBeginResponse { + /** Represents a BoundQuery. */ + class BoundQuery implements IBoundQuery { /** - * Constructs a new BeginResponse. + * Constructs a new BoundQuery. * @param [properties] Properties to set */ - constructor(properties?: query.IBeginResponse); - - /** BeginResponse transaction_id. */ - public transaction_id: (number|Long); + constructor(properties?: query.IBoundQuery); - /** BeginResponse tablet_alias. */ - public tablet_alias?: (topodata.ITabletAlias|null); + /** BoundQuery sql. */ + public sql: string; - /** BeginResponse session_state_changes. */ - public session_state_changes: string; + /** BoundQuery bind_variables. */ + public bind_variables: { [k: string]: query.IBindVariable }; /** - * Creates a new BeginResponse instance using the specified properties. + * Creates a new BoundQuery instance using the specified properties. * @param [properties] Properties to set - * @returns BeginResponse instance + * @returns BoundQuery instance */ - public static create(properties?: query.IBeginResponse): query.BeginResponse; + public static create(properties?: query.IBoundQuery): query.BoundQuery; /** - * Encodes the specified BeginResponse message. Does not implicitly {@link query.BeginResponse.verify|verify} messages. - * @param message BeginResponse message or plain object to encode + * Encodes the specified BoundQuery message. Does not implicitly {@link query.BoundQuery.verify|verify} messages. + * @param message BoundQuery message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IBeginResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IBoundQuery, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified BeginResponse message, length delimited. Does not implicitly {@link query.BeginResponse.verify|verify} messages. - * @param message BeginResponse message or plain object to encode + * Encodes the specified BoundQuery message, length delimited. Does not implicitly {@link query.BoundQuery.verify|verify} messages. + * @param message BoundQuery message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IBeginResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IBoundQuery, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a BeginResponse message from the specified reader or buffer. + * Decodes a BoundQuery message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns BeginResponse + * @returns BoundQuery * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.BeginResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.BoundQuery; /** - * Decodes a BeginResponse message from the specified reader or buffer, length delimited. + * Decodes a BoundQuery message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns BeginResponse + * @returns BoundQuery * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.BeginResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.BoundQuery; /** - * Verifies a BeginResponse message. + * Verifies a BoundQuery message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a BeginResponse message from a plain object. Also converts values to their respective internal types. + * Creates a BoundQuery message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns BeginResponse + * @returns BoundQuery */ - public static fromObject(object: { [k: string]: any }): query.BeginResponse; + public static fromObject(object: { [k: string]: any }): query.BoundQuery; /** - * Creates a plain object from a BeginResponse message. Also converts values to other types if specified. - * @param message BeginResponse + * Creates a plain object from a BoundQuery message. Also converts values to other types if specified. + * @param message BoundQuery * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.BeginResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.BoundQuery, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this BeginResponse to JSON. + * Converts this BoundQuery to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for BeginResponse + * Gets the default type url for BoundQuery * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a CommitRequest. */ - interface ICommitRequest { - - /** CommitRequest effective_caller_id */ - effective_caller_id?: (vtrpc.ICallerID|null); + /** Properties of an ExecuteOptions. */ + interface IExecuteOptions { - /** CommitRequest immediate_caller_id */ - immediate_caller_id?: (query.IVTGateCallerID|null); + /** ExecuteOptions included_fields */ + included_fields?: (query.ExecuteOptions.IncludedFields|null); - /** CommitRequest target */ - target?: (query.ITarget|null); + /** ExecuteOptions client_found_rows */ + client_found_rows?: (boolean|null); - /** CommitRequest transaction_id */ - transaction_id?: (number|Long|null); - } + /** ExecuteOptions workload */ + workload?: (query.ExecuteOptions.Workload|null); - /** Represents a CommitRequest. */ - class CommitRequest implements ICommitRequest { + /** ExecuteOptions sql_select_limit */ + sql_select_limit?: (number|Long|null); - /** - * Constructs a new CommitRequest. - * @param [properties] Properties to set - */ - constructor(properties?: query.ICommitRequest); + /** ExecuteOptions transaction_isolation */ + transaction_isolation?: (query.ExecuteOptions.TransactionIsolation|null); - /** CommitRequest effective_caller_id. */ - public effective_caller_id?: (vtrpc.ICallerID|null); + /** ExecuteOptions skip_query_plan_cache */ + skip_query_plan_cache?: (boolean|null); - /** CommitRequest immediate_caller_id. */ - public immediate_caller_id?: (query.IVTGateCallerID|null); + /** ExecuteOptions planner_version */ + planner_version?: (query.ExecuteOptions.PlannerVersion|null); - /** CommitRequest target. */ - public target?: (query.ITarget|null); + /** ExecuteOptions has_created_temp_tables */ + has_created_temp_tables?: (boolean|null); - /** CommitRequest transaction_id. */ - public transaction_id: (number|Long); + /** ExecuteOptions consolidator */ + consolidator?: (query.ExecuteOptions.Consolidator|null); + + /** ExecuteOptions transaction_access_mode */ + transaction_access_mode?: (query.ExecuteOptions.TransactionAccessMode[]|null); + + /** ExecuteOptions WorkloadName */ + WorkloadName?: (string|null); + + /** ExecuteOptions priority */ + priority?: (string|null); + + /** ExecuteOptions uag_info */ + uag_info?: (string|null); + } + + /** Represents an ExecuteOptions. */ + class ExecuteOptions implements IExecuteOptions { /** - * Creates a new CommitRequest instance using the specified properties. + * Constructs a new ExecuteOptions. * @param [properties] Properties to set - * @returns CommitRequest instance */ - public static create(properties?: query.ICommitRequest): query.CommitRequest; + constructor(properties?: query.IExecuteOptions); + + /** ExecuteOptions included_fields. */ + public included_fields: query.ExecuteOptions.IncludedFields; + + /** ExecuteOptions client_found_rows. */ + public client_found_rows: boolean; + + /** ExecuteOptions workload. */ + public workload: query.ExecuteOptions.Workload; + + /** ExecuteOptions sql_select_limit. */ + public sql_select_limit: (number|Long); + + /** ExecuteOptions transaction_isolation. */ + public transaction_isolation: query.ExecuteOptions.TransactionIsolation; + + /** ExecuteOptions skip_query_plan_cache. */ + public skip_query_plan_cache: boolean; + + /** ExecuteOptions planner_version. */ + public planner_version: query.ExecuteOptions.PlannerVersion; + + /** ExecuteOptions has_created_temp_tables. */ + public has_created_temp_tables: boolean; + + /** ExecuteOptions consolidator. */ + public consolidator: query.ExecuteOptions.Consolidator; + + /** ExecuteOptions transaction_access_mode. */ + public transaction_access_mode: query.ExecuteOptions.TransactionAccessMode[]; + + /** ExecuteOptions WorkloadName. */ + public WorkloadName: string; + + /** ExecuteOptions priority. */ + public priority: string; + + /** ExecuteOptions uag_info. */ + public uag_info: string; /** - * Encodes the specified CommitRequest message. Does not implicitly {@link query.CommitRequest.verify|verify} messages. - * @param message CommitRequest message or plain object to encode + * Creates a new ExecuteOptions instance using the specified properties. + * @param [properties] Properties to set + * @returns ExecuteOptions instance + */ + public static create(properties?: query.IExecuteOptions): query.ExecuteOptions; + + /** + * Encodes the specified ExecuteOptions message. Does not implicitly {@link query.ExecuteOptions.verify|verify} messages. + * @param message ExecuteOptions message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.ICommitRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IExecuteOptions, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified CommitRequest message, length delimited. Does not implicitly {@link query.CommitRequest.verify|verify} messages. - * @param message CommitRequest message or plain object to encode + * Encodes the specified ExecuteOptions message, length delimited. Does not implicitly {@link query.ExecuteOptions.verify|verify} messages. + * @param message ExecuteOptions message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.ICommitRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IExecuteOptions, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a CommitRequest message from the specified reader or buffer. + * Decodes an ExecuteOptions message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns CommitRequest + * @returns ExecuteOptions * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.CommitRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.ExecuteOptions; /** - * Decodes a CommitRequest message from the specified reader or buffer, length delimited. + * Decodes an ExecuteOptions message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns CommitRequest + * @returns ExecuteOptions * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.CommitRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.ExecuteOptions; /** - * Verifies a CommitRequest message. + * Verifies an ExecuteOptions message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a CommitRequest message from a plain object. Also converts values to their respective internal types. + * Creates an ExecuteOptions message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns CommitRequest + * @returns ExecuteOptions */ - public static fromObject(object: { [k: string]: any }): query.CommitRequest; + public static fromObject(object: { [k: string]: any }): query.ExecuteOptions; /** - * Creates a plain object from a CommitRequest message. Also converts values to other types if specified. - * @param message CommitRequest + * Creates a plain object from an ExecuteOptions message. Also converts values to other types if specified. + * @param message ExecuteOptions * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.CommitRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.ExecuteOptions, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this CommitRequest to JSON. + * Converts this ExecuteOptions to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for CommitRequest + * Gets the default type url for ExecuteOptions * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a CommitResponse. */ - interface ICommitResponse { + namespace ExecuteOptions { - /** CommitResponse reserved_id */ - reserved_id?: (number|Long|null); + /** IncludedFields enum. */ + enum IncludedFields { + TYPE_AND_NAME = 0, + TYPE_ONLY = 1, + ALL = 2 + } + + /** Workload enum. */ + enum Workload { + UNSPECIFIED = 0, + OLTP = 1, + OLAP = 2, + DBA = 3 + } + + /** TransactionIsolation enum. */ + enum TransactionIsolation { + DEFAULT = 0, + REPEATABLE_READ = 1, + READ_COMMITTED = 2, + READ_UNCOMMITTED = 3, + SERIALIZABLE = 4, + CONSISTENT_SNAPSHOT_READ_ONLY = 5, + AUTOCOMMIT = 6 + } + + /** PlannerVersion enum. */ + enum PlannerVersion { + DEFAULT_PLANNER = 0, + V3 = 1, + Gen4 = 2, + Gen4Greedy = 3, + Gen4Left2Right = 4, + Gen4WithFallback = 5, + Gen4CompareV3 = 6, + V3Insert = 7 + } + + /** Consolidator enum. */ + enum Consolidator { + CONSOLIDATOR_UNSPECIFIED = 0, + CONSOLIDATOR_DISABLED = 1, + CONSOLIDATOR_ENABLED = 2, + CONSOLIDATOR_ENABLED_REPLICAS = 3 + } + + /** TransactionAccessMode enum. */ + enum TransactionAccessMode { + CONSISTENT_SNAPSHOT = 0, + READ_WRITE = 1, + READ_ONLY = 2 + } } - /** Represents a CommitResponse. */ - class CommitResponse implements ICommitResponse { + /** Properties of a Field. */ + interface IField { + + /** Field name */ + name?: (string|null); + + /** Field type */ + type?: (query.Type|null); + + /** Field table */ + table?: (string|null); + + /** Field org_table */ + org_table?: (string|null); + + /** Field database */ + database?: (string|null); + + /** Field org_name */ + org_name?: (string|null); + + /** Field column_length */ + column_length?: (number|null); + + /** Field charset */ + charset?: (number|null); + + /** Field decimals */ + decimals?: (number|null); + + /** Field flags */ + flags?: (number|null); + + /** Field column_type */ + column_type?: (string|null); + } + + /** Represents a Field. */ + class Field implements IField { /** - * Constructs a new CommitResponse. + * Constructs a new Field. * @param [properties] Properties to set */ - constructor(properties?: query.ICommitResponse); + constructor(properties?: query.IField); - /** CommitResponse reserved_id. */ - public reserved_id: (number|Long); + /** Field name. */ + public name: string; + + /** Field type. */ + public type: query.Type; + + /** Field table. */ + public table: string; + + /** Field org_table. */ + public org_table: string; + + /** Field database. */ + public database: string; + + /** Field org_name. */ + public org_name: string; + + /** Field column_length. */ + public column_length: number; + + /** Field charset. */ + public charset: number; + + /** Field decimals. */ + public decimals: number; + + /** Field flags. */ + public flags: number; + + /** Field column_type. */ + public column_type: string; /** - * Creates a new CommitResponse instance using the specified properties. + * Creates a new Field instance using the specified properties. * @param [properties] Properties to set - * @returns CommitResponse instance + * @returns Field instance */ - public static create(properties?: query.ICommitResponse): query.CommitResponse; + public static create(properties?: query.IField): query.Field; /** - * Encodes the specified CommitResponse message. Does not implicitly {@link query.CommitResponse.verify|verify} messages. - * @param message CommitResponse message or plain object to encode + * Encodes the specified Field message. Does not implicitly {@link query.Field.verify|verify} messages. + * @param message Field message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.ICommitResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IField, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified CommitResponse message, length delimited. Does not implicitly {@link query.CommitResponse.verify|verify} messages. - * @param message CommitResponse message or plain object to encode + * Encodes the specified Field message, length delimited. Does not implicitly {@link query.Field.verify|verify} messages. + * @param message Field message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.ICommitResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IField, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a CommitResponse message from the specified reader or buffer. + * Decodes a Field message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns CommitResponse + * @returns Field * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.CommitResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.Field; /** - * Decodes a CommitResponse message from the specified reader or buffer, length delimited. + * Decodes a Field message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns CommitResponse + * @returns Field * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.CommitResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.Field; /** - * Verifies a CommitResponse message. + * Verifies a Field message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a CommitResponse message from a plain object. Also converts values to their respective internal types. + * Creates a Field message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns CommitResponse + * @returns Field */ - public static fromObject(object: { [k: string]: any }): query.CommitResponse; + public static fromObject(object: { [k: string]: any }): query.Field; /** - * Creates a plain object from a CommitResponse message. Also converts values to other types if specified. - * @param message CommitResponse + * Creates a plain object from a Field message. Also converts values to other types if specified. + * @param message Field * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.CommitResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.Field, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this CommitResponse to JSON. + * Converts this Field to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for CommitResponse + * Gets the default type url for Field * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a RollbackRequest. */ - interface IRollbackRequest { - - /** RollbackRequest effective_caller_id */ - effective_caller_id?: (vtrpc.ICallerID|null); - - /** RollbackRequest immediate_caller_id */ - immediate_caller_id?: (query.IVTGateCallerID|null); + /** Properties of a Row. */ + interface IRow { - /** RollbackRequest target */ - target?: (query.ITarget|null); + /** Row lengths */ + lengths?: ((number|Long)[]|null); - /** RollbackRequest transaction_id */ - transaction_id?: (number|Long|null); + /** Row values */ + values?: (Uint8Array|null); } - /** Represents a RollbackRequest. */ - class RollbackRequest implements IRollbackRequest { + /** Represents a Row. */ + class Row implements IRow { /** - * Constructs a new RollbackRequest. + * Constructs a new Row. * @param [properties] Properties to set */ - constructor(properties?: query.IRollbackRequest); - - /** RollbackRequest effective_caller_id. */ - public effective_caller_id?: (vtrpc.ICallerID|null); - - /** RollbackRequest immediate_caller_id. */ - public immediate_caller_id?: (query.IVTGateCallerID|null); + constructor(properties?: query.IRow); - /** RollbackRequest target. */ - public target?: (query.ITarget|null); + /** Row lengths. */ + public lengths: (number|Long)[]; - /** RollbackRequest transaction_id. */ - public transaction_id: (number|Long); + /** Row values. */ + public values: Uint8Array; /** - * Creates a new RollbackRequest instance using the specified properties. + * Creates a new Row instance using the specified properties. * @param [properties] Properties to set - * @returns RollbackRequest instance + * @returns Row instance */ - public static create(properties?: query.IRollbackRequest): query.RollbackRequest; + public static create(properties?: query.IRow): query.Row; /** - * Encodes the specified RollbackRequest message. Does not implicitly {@link query.RollbackRequest.verify|verify} messages. - * @param message RollbackRequest message or plain object to encode + * Encodes the specified Row message. Does not implicitly {@link query.Row.verify|verify} messages. + * @param message Row message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IRollbackRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IRow, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified RollbackRequest message, length delimited. Does not implicitly {@link query.RollbackRequest.verify|verify} messages. - * @param message RollbackRequest message or plain object to encode + * Encodes the specified Row message, length delimited. Does not implicitly {@link query.Row.verify|verify} messages. + * @param message Row message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IRollbackRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IRow, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a RollbackRequest message from the specified reader or buffer. + * Decodes a Row message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns RollbackRequest + * @returns Row * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.RollbackRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.Row; /** - * Decodes a RollbackRequest message from the specified reader or buffer, length delimited. + * Decodes a Row message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns RollbackRequest + * @returns Row * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.RollbackRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.Row; /** - * Verifies a RollbackRequest message. + * Verifies a Row message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a RollbackRequest message from a plain object. Also converts values to their respective internal types. + * Creates a Row message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns RollbackRequest + * @returns Row */ - public static fromObject(object: { [k: string]: any }): query.RollbackRequest; + public static fromObject(object: { [k: string]: any }): query.Row; /** - * Creates a plain object from a RollbackRequest message. Also converts values to other types if specified. - * @param message RollbackRequest + * Creates a plain object from a Row message. Also converts values to other types if specified. + * @param message Row * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.RollbackRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.Row, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this RollbackRequest to JSON. + * Converts this Row to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for RollbackRequest + * Gets the default type url for Row * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a RollbackResponse. */ - interface IRollbackResponse { + /** Properties of a QueryResult. */ + interface IQueryResult { - /** RollbackResponse reserved_id */ - reserved_id?: (number|Long|null); + /** QueryResult fields */ + fields?: (query.IField[]|null); + + /** QueryResult rows_affected */ + rows_affected?: (number|Long|null); + + /** QueryResult insert_id */ + insert_id?: (number|Long|null); + + /** QueryResult rows */ + rows?: (query.IRow[]|null); + + /** QueryResult info */ + info?: (string|null); + + /** QueryResult session_state_changes */ + session_state_changes?: (string|null); } - /** Represents a RollbackResponse. */ - class RollbackResponse implements IRollbackResponse { + /** Represents a QueryResult. */ + class QueryResult implements IQueryResult { /** - * Constructs a new RollbackResponse. + * Constructs a new QueryResult. * @param [properties] Properties to set */ - constructor(properties?: query.IRollbackResponse); + constructor(properties?: query.IQueryResult); - /** RollbackResponse reserved_id. */ - public reserved_id: (number|Long); + /** QueryResult fields. */ + public fields: query.IField[]; + + /** QueryResult rows_affected. */ + public rows_affected: (number|Long); + + /** QueryResult insert_id. */ + public insert_id: (number|Long); + + /** QueryResult rows. */ + public rows: query.IRow[]; + + /** QueryResult info. */ + public info: string; + + /** QueryResult session_state_changes. */ + public session_state_changes: string; /** - * Creates a new RollbackResponse instance using the specified properties. + * Creates a new QueryResult instance using the specified properties. * @param [properties] Properties to set - * @returns RollbackResponse instance + * @returns QueryResult instance */ - public static create(properties?: query.IRollbackResponse): query.RollbackResponse; + public static create(properties?: query.IQueryResult): query.QueryResult; /** - * Encodes the specified RollbackResponse message. Does not implicitly {@link query.RollbackResponse.verify|verify} messages. - * @param message RollbackResponse message or plain object to encode + * Encodes the specified QueryResult message. Does not implicitly {@link query.QueryResult.verify|verify} messages. + * @param message QueryResult message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IRollbackResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IQueryResult, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified RollbackResponse message, length delimited. Does not implicitly {@link query.RollbackResponse.verify|verify} messages. - * @param message RollbackResponse message or plain object to encode + * Encodes the specified QueryResult message, length delimited. Does not implicitly {@link query.QueryResult.verify|verify} messages. + * @param message QueryResult message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IRollbackResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IQueryResult, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a RollbackResponse message from the specified reader or buffer. + * Decodes a QueryResult message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns RollbackResponse + * @returns QueryResult * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.RollbackResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.QueryResult; /** - * Decodes a RollbackResponse message from the specified reader or buffer, length delimited. + * Decodes a QueryResult message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns RollbackResponse + * @returns QueryResult * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.RollbackResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.QueryResult; /** - * Verifies a RollbackResponse message. + * Verifies a QueryResult message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a RollbackResponse message from a plain object. Also converts values to their respective internal types. + * Creates a QueryResult message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns RollbackResponse + * @returns QueryResult */ - public static fromObject(object: { [k: string]: any }): query.RollbackResponse; + public static fromObject(object: { [k: string]: any }): query.QueryResult; /** - * Creates a plain object from a RollbackResponse message. Also converts values to other types if specified. - * @param message RollbackResponse + * Creates a plain object from a QueryResult message. Also converts values to other types if specified. + * @param message QueryResult * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.RollbackResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.QueryResult, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this RollbackResponse to JSON. + * Converts this QueryResult to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for RollbackResponse + * Gets the default type url for QueryResult * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a PrepareRequest. */ - interface IPrepareRequest { - - /** PrepareRequest effective_caller_id */ - effective_caller_id?: (vtrpc.ICallerID|null); - - /** PrepareRequest immediate_caller_id */ - immediate_caller_id?: (query.IVTGateCallerID|null); - - /** PrepareRequest target */ - target?: (query.ITarget|null); + /** Properties of a QueryWarning. */ + interface IQueryWarning { - /** PrepareRequest transaction_id */ - transaction_id?: (number|Long|null); + /** QueryWarning code */ + code?: (number|null); - /** PrepareRequest dtid */ - dtid?: (string|null); + /** QueryWarning message */ + message?: (string|null); } - /** Represents a PrepareRequest. */ - class PrepareRequest implements IPrepareRequest { + /** Represents a QueryWarning. */ + class QueryWarning implements IQueryWarning { /** - * Constructs a new PrepareRequest. + * Constructs a new QueryWarning. * @param [properties] Properties to set */ - constructor(properties?: query.IPrepareRequest); + constructor(properties?: query.IQueryWarning); - /** PrepareRequest effective_caller_id. */ - public effective_caller_id?: (vtrpc.ICallerID|null); - - /** PrepareRequest immediate_caller_id. */ - public immediate_caller_id?: (query.IVTGateCallerID|null); - - /** PrepareRequest target. */ - public target?: (query.ITarget|null); - - /** PrepareRequest transaction_id. */ - public transaction_id: (number|Long); + /** QueryWarning code. */ + public code: number; - /** PrepareRequest dtid. */ - public dtid: string; + /** QueryWarning message. */ + public message: string; /** - * Creates a new PrepareRequest instance using the specified properties. + * Creates a new QueryWarning instance using the specified properties. * @param [properties] Properties to set - * @returns PrepareRequest instance + * @returns QueryWarning instance */ - public static create(properties?: query.IPrepareRequest): query.PrepareRequest; + public static create(properties?: query.IQueryWarning): query.QueryWarning; /** - * Encodes the specified PrepareRequest message. Does not implicitly {@link query.PrepareRequest.verify|verify} messages. - * @param message PrepareRequest message or plain object to encode + * Encodes the specified QueryWarning message. Does not implicitly {@link query.QueryWarning.verify|verify} messages. + * @param message QueryWarning message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IPrepareRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IQueryWarning, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified PrepareRequest message, length delimited. Does not implicitly {@link query.PrepareRequest.verify|verify} messages. - * @param message PrepareRequest message or plain object to encode + * Encodes the specified QueryWarning message, length delimited. Does not implicitly {@link query.QueryWarning.verify|verify} messages. + * @param message QueryWarning message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IPrepareRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IQueryWarning, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a PrepareRequest message from the specified reader or buffer. + * Decodes a QueryWarning message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns PrepareRequest + * @returns QueryWarning * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.PrepareRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.QueryWarning; /** - * Decodes a PrepareRequest message from the specified reader or buffer, length delimited. + * Decodes a QueryWarning message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns PrepareRequest + * @returns QueryWarning * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.PrepareRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.QueryWarning; /** - * Verifies a PrepareRequest message. + * Verifies a QueryWarning message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a PrepareRequest message from a plain object. Also converts values to their respective internal types. + * Creates a QueryWarning message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns PrepareRequest + * @returns QueryWarning */ - public static fromObject(object: { [k: string]: any }): query.PrepareRequest; + public static fromObject(object: { [k: string]: any }): query.QueryWarning; /** - * Creates a plain object from a PrepareRequest message. Also converts values to other types if specified. - * @param message PrepareRequest + * Creates a plain object from a QueryWarning message. Also converts values to other types if specified. + * @param message QueryWarning * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.PrepareRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.QueryWarning, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this PrepareRequest to JSON. + * Converts this QueryWarning to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for PrepareRequest + * Gets the default type url for QueryWarning * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a PrepareResponse. */ - interface IPrepareResponse { + /** Properties of a StreamEvent. */ + interface IStreamEvent { + + /** StreamEvent statements */ + statements?: (query.StreamEvent.IStatement[]|null); + + /** StreamEvent event_token */ + event_token?: (query.IEventToken|null); } - /** Represents a PrepareResponse. */ - class PrepareResponse implements IPrepareResponse { + /** Represents a StreamEvent. */ + class StreamEvent implements IStreamEvent { /** - * Constructs a new PrepareResponse. + * Constructs a new StreamEvent. * @param [properties] Properties to set */ - constructor(properties?: query.IPrepareResponse); + constructor(properties?: query.IStreamEvent); + + /** StreamEvent statements. */ + public statements: query.StreamEvent.IStatement[]; + + /** StreamEvent event_token. */ + public event_token?: (query.IEventToken|null); /** - * Creates a new PrepareResponse instance using the specified properties. + * Creates a new StreamEvent instance using the specified properties. * @param [properties] Properties to set - * @returns PrepareResponse instance + * @returns StreamEvent instance */ - public static create(properties?: query.IPrepareResponse): query.PrepareResponse; + public static create(properties?: query.IStreamEvent): query.StreamEvent; /** - * Encodes the specified PrepareResponse message. Does not implicitly {@link query.PrepareResponse.verify|verify} messages. - * @param message PrepareResponse message or plain object to encode + * Encodes the specified StreamEvent message. Does not implicitly {@link query.StreamEvent.verify|verify} messages. + * @param message StreamEvent message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IPrepareResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IStreamEvent, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified PrepareResponse message, length delimited. Does not implicitly {@link query.PrepareResponse.verify|verify} messages. - * @param message PrepareResponse message or plain object to encode + * Encodes the specified StreamEvent message, length delimited. Does not implicitly {@link query.StreamEvent.verify|verify} messages. + * @param message StreamEvent message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IPrepareResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IStreamEvent, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a PrepareResponse message from the specified reader or buffer. + * Decodes a StreamEvent message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns PrepareResponse + * @returns StreamEvent * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.PrepareResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.StreamEvent; /** - * Decodes a PrepareResponse message from the specified reader or buffer, length delimited. + * Decodes a StreamEvent message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns PrepareResponse + * @returns StreamEvent * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.PrepareResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.StreamEvent; /** - * Verifies a PrepareResponse message. + * Verifies a StreamEvent message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a PrepareResponse message from a plain object. Also converts values to their respective internal types. + * Creates a StreamEvent message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns PrepareResponse + * @returns StreamEvent */ - public static fromObject(object: { [k: string]: any }): query.PrepareResponse; + public static fromObject(object: { [k: string]: any }): query.StreamEvent; /** - * Creates a plain object from a PrepareResponse message. Also converts values to other types if specified. - * @param message PrepareResponse + * Creates a plain object from a StreamEvent message. Also converts values to other types if specified. + * @param message StreamEvent * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.PrepareResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.StreamEvent, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this PrepareResponse to JSON. + * Converts this StreamEvent to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for PrepareResponse + * Gets the default type url for StreamEvent * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a CommitPreparedRequest. */ - interface ICommitPreparedRequest { + namespace StreamEvent { - /** CommitPreparedRequest effective_caller_id */ + /** Properties of a Statement. */ + interface IStatement { + + /** Statement category */ + category?: (query.StreamEvent.Statement.Category|null); + + /** Statement table_name */ + table_name?: (string|null); + + /** Statement primary_key_fields */ + primary_key_fields?: (query.IField[]|null); + + /** Statement primary_key_values */ + primary_key_values?: (query.IRow[]|null); + + /** Statement sql */ + sql?: (Uint8Array|null); + } + + /** Represents a Statement. */ + class Statement implements IStatement { + + /** + * Constructs a new Statement. + * @param [properties] Properties to set + */ + constructor(properties?: query.StreamEvent.IStatement); + + /** Statement category. */ + public category: query.StreamEvent.Statement.Category; + + /** Statement table_name. */ + public table_name: string; + + /** Statement primary_key_fields. */ + public primary_key_fields: query.IField[]; + + /** Statement primary_key_values. */ + public primary_key_values: query.IRow[]; + + /** Statement sql. */ + public sql: Uint8Array; + + /** + * Creates a new Statement instance using the specified properties. + * @param [properties] Properties to set + * @returns Statement instance + */ + public static create(properties?: query.StreamEvent.IStatement): query.StreamEvent.Statement; + + /** + * Encodes the specified Statement message. Does not implicitly {@link query.StreamEvent.Statement.verify|verify} messages. + * @param message Statement message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: query.StreamEvent.IStatement, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified Statement message, length delimited. Does not implicitly {@link query.StreamEvent.Statement.verify|verify} messages. + * @param message Statement message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: query.StreamEvent.IStatement, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a Statement message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns Statement + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.StreamEvent.Statement; + + /** + * Decodes a Statement message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns Statement + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.StreamEvent.Statement; + + /** + * Verifies a Statement message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a Statement message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns Statement + */ + public static fromObject(object: { [k: string]: any }): query.StreamEvent.Statement; + + /** + * Creates a plain object from a Statement message. Also converts values to other types if specified. + * @param message Statement + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: query.StreamEvent.Statement, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this Statement to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for Statement + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + namespace Statement { + + /** Category enum. */ + enum Category { + Error = 0, + DML = 1, + DDL = 2 + } + } + } + + /** Properties of an ExecuteRequest. */ + interface IExecuteRequest { + + /** ExecuteRequest effective_caller_id */ effective_caller_id?: (vtrpc.ICallerID|null); - /** CommitPreparedRequest immediate_caller_id */ + /** ExecuteRequest immediate_caller_id */ immediate_caller_id?: (query.IVTGateCallerID|null); - /** CommitPreparedRequest target */ + /** ExecuteRequest target */ target?: (query.ITarget|null); - /** CommitPreparedRequest dtid */ - dtid?: (string|null); + /** ExecuteRequest query */ + query?: (query.IBoundQuery|null); + + /** ExecuteRequest transaction_id */ + transaction_id?: (number|Long|null); + + /** ExecuteRequest options */ + options?: (query.IExecuteOptions|null); + + /** ExecuteRequest reserved_id */ + reserved_id?: (number|Long|null); } - /** Represents a CommitPreparedRequest. */ - class CommitPreparedRequest implements ICommitPreparedRequest { + /** Represents an ExecuteRequest. */ + class ExecuteRequest implements IExecuteRequest { /** - * Constructs a new CommitPreparedRequest. + * Constructs a new ExecuteRequest. * @param [properties] Properties to set */ - constructor(properties?: query.ICommitPreparedRequest); + constructor(properties?: query.IExecuteRequest); - /** CommitPreparedRequest effective_caller_id. */ + /** ExecuteRequest effective_caller_id. */ public effective_caller_id?: (vtrpc.ICallerID|null); - /** CommitPreparedRequest immediate_caller_id. */ + /** ExecuteRequest immediate_caller_id. */ public immediate_caller_id?: (query.IVTGateCallerID|null); - /** CommitPreparedRequest target. */ + /** ExecuteRequest target. */ public target?: (query.ITarget|null); - /** CommitPreparedRequest dtid. */ - public dtid: string; + /** ExecuteRequest query. */ + public query?: (query.IBoundQuery|null); + + /** ExecuteRequest transaction_id. */ + public transaction_id: (number|Long); + + /** ExecuteRequest options. */ + public options?: (query.IExecuteOptions|null); + + /** ExecuteRequest reserved_id. */ + public reserved_id: (number|Long); /** - * Creates a new CommitPreparedRequest instance using the specified properties. + * Creates a new ExecuteRequest instance using the specified properties. * @param [properties] Properties to set - * @returns CommitPreparedRequest instance + * @returns ExecuteRequest instance */ - public static create(properties?: query.ICommitPreparedRequest): query.CommitPreparedRequest; + public static create(properties?: query.IExecuteRequest): query.ExecuteRequest; /** - * Encodes the specified CommitPreparedRequest message. Does not implicitly {@link query.CommitPreparedRequest.verify|verify} messages. - * @param message CommitPreparedRequest message or plain object to encode + * Encodes the specified ExecuteRequest message. Does not implicitly {@link query.ExecuteRequest.verify|verify} messages. + * @param message ExecuteRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.ICommitPreparedRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IExecuteRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified CommitPreparedRequest message, length delimited. Does not implicitly {@link query.CommitPreparedRequest.verify|verify} messages. - * @param message CommitPreparedRequest message or plain object to encode + * Encodes the specified ExecuteRequest message, length delimited. Does not implicitly {@link query.ExecuteRequest.verify|verify} messages. + * @param message ExecuteRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.ICommitPreparedRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IExecuteRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a CommitPreparedRequest message from the specified reader or buffer. + * Decodes an ExecuteRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns CommitPreparedRequest + * @returns ExecuteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.CommitPreparedRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.ExecuteRequest; /** - * Decodes a CommitPreparedRequest message from the specified reader or buffer, length delimited. + * Decodes an ExecuteRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns CommitPreparedRequest + * @returns ExecuteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.CommitPreparedRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.ExecuteRequest; /** - * Verifies a CommitPreparedRequest message. + * Verifies an ExecuteRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a CommitPreparedRequest message from a plain object. Also converts values to their respective internal types. + * Creates an ExecuteRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns CommitPreparedRequest + * @returns ExecuteRequest */ - public static fromObject(object: { [k: string]: any }): query.CommitPreparedRequest; + public static fromObject(object: { [k: string]: any }): query.ExecuteRequest; /** - * Creates a plain object from a CommitPreparedRequest message. Also converts values to other types if specified. - * @param message CommitPreparedRequest + * Creates a plain object from an ExecuteRequest message. Also converts values to other types if specified. + * @param message ExecuteRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.CommitPreparedRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.ExecuteRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this CommitPreparedRequest to JSON. + * Converts this ExecuteRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for CommitPreparedRequest + * Gets the default type url for ExecuteRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a CommitPreparedResponse. */ - interface ICommitPreparedResponse { + /** Properties of an ExecuteResponse. */ + interface IExecuteResponse { + + /** ExecuteResponse result */ + result?: (query.IQueryResult|null); } - /** Represents a CommitPreparedResponse. */ - class CommitPreparedResponse implements ICommitPreparedResponse { + /** Represents an ExecuteResponse. */ + class ExecuteResponse implements IExecuteResponse { /** - * Constructs a new CommitPreparedResponse. + * Constructs a new ExecuteResponse. * @param [properties] Properties to set */ - constructor(properties?: query.ICommitPreparedResponse); + constructor(properties?: query.IExecuteResponse); + + /** ExecuteResponse result. */ + public result?: (query.IQueryResult|null); /** - * Creates a new CommitPreparedResponse instance using the specified properties. + * Creates a new ExecuteResponse instance using the specified properties. * @param [properties] Properties to set - * @returns CommitPreparedResponse instance + * @returns ExecuteResponse instance */ - public static create(properties?: query.ICommitPreparedResponse): query.CommitPreparedResponse; + public static create(properties?: query.IExecuteResponse): query.ExecuteResponse; /** - * Encodes the specified CommitPreparedResponse message. Does not implicitly {@link query.CommitPreparedResponse.verify|verify} messages. - * @param message CommitPreparedResponse message or plain object to encode + * Encodes the specified ExecuteResponse message. Does not implicitly {@link query.ExecuteResponse.verify|verify} messages. + * @param message ExecuteResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.ICommitPreparedResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IExecuteResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified CommitPreparedResponse message, length delimited. Does not implicitly {@link query.CommitPreparedResponse.verify|verify} messages. - * @param message CommitPreparedResponse message or plain object to encode + * Encodes the specified ExecuteResponse message, length delimited. Does not implicitly {@link query.ExecuteResponse.verify|verify} messages. + * @param message ExecuteResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.ICommitPreparedResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IExecuteResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a CommitPreparedResponse message from the specified reader or buffer. + * Decodes an ExecuteResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns CommitPreparedResponse + * @returns ExecuteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.CommitPreparedResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.ExecuteResponse; /** - * Decodes a CommitPreparedResponse message from the specified reader or buffer, length delimited. + * Decodes an ExecuteResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns CommitPreparedResponse + * @returns ExecuteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.CommitPreparedResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.ExecuteResponse; /** - * Verifies a CommitPreparedResponse message. + * Verifies an ExecuteResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a CommitPreparedResponse message from a plain object. Also converts values to their respective internal types. + * Creates an ExecuteResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns CommitPreparedResponse + * @returns ExecuteResponse */ - public static fromObject(object: { [k: string]: any }): query.CommitPreparedResponse; + public static fromObject(object: { [k: string]: any }): query.ExecuteResponse; /** - * Creates a plain object from a CommitPreparedResponse message. Also converts values to other types if specified. - * @param message CommitPreparedResponse + * Creates a plain object from an ExecuteResponse message. Also converts values to other types if specified. + * @param message ExecuteResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.CommitPreparedResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.ExecuteResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this CommitPreparedResponse to JSON. + * Converts this ExecuteResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for CommitPreparedResponse + * Gets the default type url for ExecuteResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a RollbackPreparedRequest. */ - interface IRollbackPreparedRequest { - - /** RollbackPreparedRequest effective_caller_id */ - effective_caller_id?: (vtrpc.ICallerID|null); - - /** RollbackPreparedRequest immediate_caller_id */ - immediate_caller_id?: (query.IVTGateCallerID|null); - - /** RollbackPreparedRequest target */ - target?: (query.ITarget|null); + /** Properties of a ResultWithError. */ + interface IResultWithError { - /** RollbackPreparedRequest transaction_id */ - transaction_id?: (number|Long|null); + /** ResultWithError error */ + error?: (vtrpc.IRPCError|null); - /** RollbackPreparedRequest dtid */ - dtid?: (string|null); + /** ResultWithError result */ + result?: (query.IQueryResult|null); } - /** Represents a RollbackPreparedRequest. */ - class RollbackPreparedRequest implements IRollbackPreparedRequest { + /** Represents a ResultWithError. */ + class ResultWithError implements IResultWithError { /** - * Constructs a new RollbackPreparedRequest. + * Constructs a new ResultWithError. * @param [properties] Properties to set */ - constructor(properties?: query.IRollbackPreparedRequest); - - /** RollbackPreparedRequest effective_caller_id. */ - public effective_caller_id?: (vtrpc.ICallerID|null); - - /** RollbackPreparedRequest immediate_caller_id. */ - public immediate_caller_id?: (query.IVTGateCallerID|null); - - /** RollbackPreparedRequest target. */ - public target?: (query.ITarget|null); + constructor(properties?: query.IResultWithError); - /** RollbackPreparedRequest transaction_id. */ - public transaction_id: (number|Long); + /** ResultWithError error. */ + public error?: (vtrpc.IRPCError|null); - /** RollbackPreparedRequest dtid. */ - public dtid: string; + /** ResultWithError result. */ + public result?: (query.IQueryResult|null); /** - * Creates a new RollbackPreparedRequest instance using the specified properties. + * Creates a new ResultWithError instance using the specified properties. * @param [properties] Properties to set - * @returns RollbackPreparedRequest instance + * @returns ResultWithError instance */ - public static create(properties?: query.IRollbackPreparedRequest): query.RollbackPreparedRequest; + public static create(properties?: query.IResultWithError): query.ResultWithError; /** - * Encodes the specified RollbackPreparedRequest message. Does not implicitly {@link query.RollbackPreparedRequest.verify|verify} messages. - * @param message RollbackPreparedRequest message or plain object to encode + * Encodes the specified ResultWithError message. Does not implicitly {@link query.ResultWithError.verify|verify} messages. + * @param message ResultWithError message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IRollbackPreparedRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IResultWithError, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified RollbackPreparedRequest message, length delimited. Does not implicitly {@link query.RollbackPreparedRequest.verify|verify} messages. - * @param message RollbackPreparedRequest message or plain object to encode + * Encodes the specified ResultWithError message, length delimited. Does not implicitly {@link query.ResultWithError.verify|verify} messages. + * @param message ResultWithError message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IRollbackPreparedRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IResultWithError, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a RollbackPreparedRequest message from the specified reader or buffer. + * Decodes a ResultWithError message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns RollbackPreparedRequest + * @returns ResultWithError * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.RollbackPreparedRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.ResultWithError; /** - * Decodes a RollbackPreparedRequest message from the specified reader or buffer, length delimited. + * Decodes a ResultWithError message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns RollbackPreparedRequest + * @returns ResultWithError * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.RollbackPreparedRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.ResultWithError; /** - * Verifies a RollbackPreparedRequest message. - * @param message Plain object to verify - * @returns `null` if valid, otherwise the reason why it is not - */ - public static verify(message: { [k: string]: any }): (string|null); - - /** - * Creates a RollbackPreparedRequest message from a plain object. Also converts values to their respective internal types. - * @param object Plain object - * @returns RollbackPreparedRequest - */ - public static fromObject(object: { [k: string]: any }): query.RollbackPreparedRequest; - - /** - * Creates a plain object from a RollbackPreparedRequest message. Also converts values to other types if specified. - * @param message RollbackPreparedRequest - * @param [options] Conversion options - * @returns Plain object - */ - public static toObject(message: query.RollbackPreparedRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; - - /** - * Converts this RollbackPreparedRequest to JSON. - * @returns JSON object - */ - public toJSON(): { [k: string]: any }; - - /** - * Gets the default type url for RollbackPreparedRequest - * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns The default type url - */ - public static getTypeUrl(typeUrlPrefix?: string): string; - } - - /** Properties of a RollbackPreparedResponse. */ - interface IRollbackPreparedResponse { - } - - /** Represents a RollbackPreparedResponse. */ - class RollbackPreparedResponse implements IRollbackPreparedResponse { - - /** - * Constructs a new RollbackPreparedResponse. - * @param [properties] Properties to set - */ - constructor(properties?: query.IRollbackPreparedResponse); - - /** - * Creates a new RollbackPreparedResponse instance using the specified properties. - * @param [properties] Properties to set - * @returns RollbackPreparedResponse instance - */ - public static create(properties?: query.IRollbackPreparedResponse): query.RollbackPreparedResponse; - - /** - * Encodes the specified RollbackPreparedResponse message. Does not implicitly {@link query.RollbackPreparedResponse.verify|verify} messages. - * @param message RollbackPreparedResponse message or plain object to encode - * @param [writer] Writer to encode to - * @returns Writer - */ - public static encode(message: query.IRollbackPreparedResponse, writer?: $protobuf.Writer): $protobuf.Writer; - - /** - * Encodes the specified RollbackPreparedResponse message, length delimited. Does not implicitly {@link query.RollbackPreparedResponse.verify|verify} messages. - * @param message RollbackPreparedResponse message or plain object to encode - * @param [writer] Writer to encode to - * @returns Writer - */ - public static encodeDelimited(message: query.IRollbackPreparedResponse, writer?: $protobuf.Writer): $protobuf.Writer; - - /** - * Decodes a RollbackPreparedResponse message from the specified reader or buffer. - * @param reader Reader or buffer to decode from - * @param [length] Message length if known beforehand - * @returns RollbackPreparedResponse - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.RollbackPreparedResponse; - - /** - * Decodes a RollbackPreparedResponse message from the specified reader or buffer, length delimited. - * @param reader Reader or buffer to decode from - * @returns RollbackPreparedResponse - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.RollbackPreparedResponse; - - /** - * Verifies a RollbackPreparedResponse message. + * Verifies a ResultWithError message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a RollbackPreparedResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ResultWithError message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns RollbackPreparedResponse + * @returns ResultWithError */ - public static fromObject(object: { [k: string]: any }): query.RollbackPreparedResponse; + public static fromObject(object: { [k: string]: any }): query.ResultWithError; /** - * Creates a plain object from a RollbackPreparedResponse message. Also converts values to other types if specified. - * @param message RollbackPreparedResponse + * Creates a plain object from a ResultWithError message. Also converts values to other types if specified. + * @param message ResultWithError * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.RollbackPreparedResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.ResultWithError, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this RollbackPreparedResponse to JSON. + * Converts this ResultWithError to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for RollbackPreparedResponse + * Gets the default type url for ResultWithError * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a CreateTransactionRequest. */ - interface ICreateTransactionRequest { + /** Properties of a StreamExecuteRequest. */ + interface IStreamExecuteRequest { - /** CreateTransactionRequest effective_caller_id */ + /** StreamExecuteRequest effective_caller_id */ effective_caller_id?: (vtrpc.ICallerID|null); - /** CreateTransactionRequest immediate_caller_id */ + /** StreamExecuteRequest immediate_caller_id */ immediate_caller_id?: (query.IVTGateCallerID|null); - /** CreateTransactionRequest target */ + /** StreamExecuteRequest target */ target?: (query.ITarget|null); - /** CreateTransactionRequest dtid */ - dtid?: (string|null); + /** StreamExecuteRequest query */ + query?: (query.IBoundQuery|null); - /** CreateTransactionRequest participants */ - participants?: (query.ITarget[]|null); + /** StreamExecuteRequest options */ + options?: (query.IExecuteOptions|null); + + /** StreamExecuteRequest transaction_id */ + transaction_id?: (number|Long|null); + + /** StreamExecuteRequest reserved_id */ + reserved_id?: (number|Long|null); } - /** Represents a CreateTransactionRequest. */ - class CreateTransactionRequest implements ICreateTransactionRequest { + /** Represents a StreamExecuteRequest. */ + class StreamExecuteRequest implements IStreamExecuteRequest { /** - * Constructs a new CreateTransactionRequest. + * Constructs a new StreamExecuteRequest. * @param [properties] Properties to set */ - constructor(properties?: query.ICreateTransactionRequest); + constructor(properties?: query.IStreamExecuteRequest); - /** CreateTransactionRequest effective_caller_id. */ + /** StreamExecuteRequest effective_caller_id. */ public effective_caller_id?: (vtrpc.ICallerID|null); - /** CreateTransactionRequest immediate_caller_id. */ + /** StreamExecuteRequest immediate_caller_id. */ public immediate_caller_id?: (query.IVTGateCallerID|null); - /** CreateTransactionRequest target. */ + /** StreamExecuteRequest target. */ public target?: (query.ITarget|null); - /** CreateTransactionRequest dtid. */ - public dtid: string; + /** StreamExecuteRequest query. */ + public query?: (query.IBoundQuery|null); - /** CreateTransactionRequest participants. */ - public participants: query.ITarget[]; + /** StreamExecuteRequest options. */ + public options?: (query.IExecuteOptions|null); + + /** StreamExecuteRequest transaction_id. */ + public transaction_id: (number|Long); + + /** StreamExecuteRequest reserved_id. */ + public reserved_id: (number|Long); /** - * Creates a new CreateTransactionRequest instance using the specified properties. + * Creates a new StreamExecuteRequest instance using the specified properties. * @param [properties] Properties to set - * @returns CreateTransactionRequest instance + * @returns StreamExecuteRequest instance */ - public static create(properties?: query.ICreateTransactionRequest): query.CreateTransactionRequest; + public static create(properties?: query.IStreamExecuteRequest): query.StreamExecuteRequest; /** - * Encodes the specified CreateTransactionRequest message. Does not implicitly {@link query.CreateTransactionRequest.verify|verify} messages. - * @param message CreateTransactionRequest message or plain object to encode + * Encodes the specified StreamExecuteRequest message. Does not implicitly {@link query.StreamExecuteRequest.verify|verify} messages. + * @param message StreamExecuteRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.ICreateTransactionRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IStreamExecuteRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified CreateTransactionRequest message, length delimited. Does not implicitly {@link query.CreateTransactionRequest.verify|verify} messages. - * @param message CreateTransactionRequest message or plain object to encode + * Encodes the specified StreamExecuteRequest message, length delimited. Does not implicitly {@link query.StreamExecuteRequest.verify|verify} messages. + * @param message StreamExecuteRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.ICreateTransactionRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IStreamExecuteRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a CreateTransactionRequest message from the specified reader or buffer. + * Decodes a StreamExecuteRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns CreateTransactionRequest + * @returns StreamExecuteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.CreateTransactionRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.StreamExecuteRequest; /** - * Decodes a CreateTransactionRequest message from the specified reader or buffer, length delimited. + * Decodes a StreamExecuteRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns CreateTransactionRequest + * @returns StreamExecuteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.CreateTransactionRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.StreamExecuteRequest; /** - * Verifies a CreateTransactionRequest message. + * Verifies a StreamExecuteRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a CreateTransactionRequest message from a plain object. Also converts values to their respective internal types. + * Creates a StreamExecuteRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns CreateTransactionRequest + * @returns StreamExecuteRequest */ - public static fromObject(object: { [k: string]: any }): query.CreateTransactionRequest; + public static fromObject(object: { [k: string]: any }): query.StreamExecuteRequest; /** - * Creates a plain object from a CreateTransactionRequest message. Also converts values to other types if specified. - * @param message CreateTransactionRequest + * Creates a plain object from a StreamExecuteRequest message. Also converts values to other types if specified. + * @param message StreamExecuteRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.CreateTransactionRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.StreamExecuteRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this CreateTransactionRequest to JSON. + * Converts this StreamExecuteRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for CreateTransactionRequest + * Gets the default type url for StreamExecuteRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a CreateTransactionResponse. */ - interface ICreateTransactionResponse { + /** Properties of a StreamExecuteResponse. */ + interface IStreamExecuteResponse { + + /** StreamExecuteResponse result */ + result?: (query.IQueryResult|null); } - /** Represents a CreateTransactionResponse. */ - class CreateTransactionResponse implements ICreateTransactionResponse { + /** Represents a StreamExecuteResponse. */ + class StreamExecuteResponse implements IStreamExecuteResponse { /** - * Constructs a new CreateTransactionResponse. + * Constructs a new StreamExecuteResponse. * @param [properties] Properties to set */ - constructor(properties?: query.ICreateTransactionResponse); + constructor(properties?: query.IStreamExecuteResponse); + + /** StreamExecuteResponse result. */ + public result?: (query.IQueryResult|null); /** - * Creates a new CreateTransactionResponse instance using the specified properties. + * Creates a new StreamExecuteResponse instance using the specified properties. * @param [properties] Properties to set - * @returns CreateTransactionResponse instance + * @returns StreamExecuteResponse instance */ - public static create(properties?: query.ICreateTransactionResponse): query.CreateTransactionResponse; + public static create(properties?: query.IStreamExecuteResponse): query.StreamExecuteResponse; /** - * Encodes the specified CreateTransactionResponse message. Does not implicitly {@link query.CreateTransactionResponse.verify|verify} messages. - * @param message CreateTransactionResponse message or plain object to encode + * Encodes the specified StreamExecuteResponse message. Does not implicitly {@link query.StreamExecuteResponse.verify|verify} messages. + * @param message StreamExecuteResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.ICreateTransactionResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IStreamExecuteResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified CreateTransactionResponse message, length delimited. Does not implicitly {@link query.CreateTransactionResponse.verify|verify} messages. - * @param message CreateTransactionResponse message or plain object to encode + * Encodes the specified StreamExecuteResponse message, length delimited. Does not implicitly {@link query.StreamExecuteResponse.verify|verify} messages. + * @param message StreamExecuteResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.ICreateTransactionResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IStreamExecuteResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a CreateTransactionResponse message from the specified reader or buffer. + * Decodes a StreamExecuteResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns CreateTransactionResponse + * @returns StreamExecuteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.CreateTransactionResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.StreamExecuteResponse; /** - * Decodes a CreateTransactionResponse message from the specified reader or buffer, length delimited. + * Decodes a StreamExecuteResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns CreateTransactionResponse + * @returns StreamExecuteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.CreateTransactionResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.StreamExecuteResponse; /** - * Verifies a CreateTransactionResponse message. + * Verifies a StreamExecuteResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a CreateTransactionResponse message from a plain object. Also converts values to their respective internal types. + * Creates a StreamExecuteResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns CreateTransactionResponse + * @returns StreamExecuteResponse */ - public static fromObject(object: { [k: string]: any }): query.CreateTransactionResponse; + public static fromObject(object: { [k: string]: any }): query.StreamExecuteResponse; /** - * Creates a plain object from a CreateTransactionResponse message. Also converts values to other types if specified. - * @param message CreateTransactionResponse + * Creates a plain object from a StreamExecuteResponse message. Also converts values to other types if specified. + * @param message StreamExecuteResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.CreateTransactionResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.StreamExecuteResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this CreateTransactionResponse to JSON. + * Converts this StreamExecuteResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for CreateTransactionResponse + * Gets the default type url for StreamExecuteResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a StartCommitRequest. */ - interface IStartCommitRequest { + /** Properties of a BeginRequest. */ + interface IBeginRequest { - /** StartCommitRequest effective_caller_id */ + /** BeginRequest effective_caller_id */ effective_caller_id?: (vtrpc.ICallerID|null); - /** StartCommitRequest immediate_caller_id */ + /** BeginRequest immediate_caller_id */ immediate_caller_id?: (query.IVTGateCallerID|null); - /** StartCommitRequest target */ + /** BeginRequest target */ target?: (query.ITarget|null); - /** StartCommitRequest transaction_id */ - transaction_id?: (number|Long|null); - - /** StartCommitRequest dtid */ - dtid?: (string|null); + /** BeginRequest options */ + options?: (query.IExecuteOptions|null); } - /** Represents a StartCommitRequest. */ - class StartCommitRequest implements IStartCommitRequest { + /** Represents a BeginRequest. */ + class BeginRequest implements IBeginRequest { /** - * Constructs a new StartCommitRequest. + * Constructs a new BeginRequest. * @param [properties] Properties to set */ - constructor(properties?: query.IStartCommitRequest); + constructor(properties?: query.IBeginRequest); - /** StartCommitRequest effective_caller_id. */ + /** BeginRequest effective_caller_id. */ public effective_caller_id?: (vtrpc.ICallerID|null); - /** StartCommitRequest immediate_caller_id. */ + /** BeginRequest immediate_caller_id. */ public immediate_caller_id?: (query.IVTGateCallerID|null); - /** StartCommitRequest target. */ + /** BeginRequest target. */ public target?: (query.ITarget|null); - /** StartCommitRequest transaction_id. */ - public transaction_id: (number|Long); - - /** StartCommitRequest dtid. */ - public dtid: string; + /** BeginRequest options. */ + public options?: (query.IExecuteOptions|null); /** - * Creates a new StartCommitRequest instance using the specified properties. + * Creates a new BeginRequest instance using the specified properties. * @param [properties] Properties to set - * @returns StartCommitRequest instance + * @returns BeginRequest instance */ - public static create(properties?: query.IStartCommitRequest): query.StartCommitRequest; + public static create(properties?: query.IBeginRequest): query.BeginRequest; /** - * Encodes the specified StartCommitRequest message. Does not implicitly {@link query.StartCommitRequest.verify|verify} messages. - * @param message StartCommitRequest message or plain object to encode + * Encodes the specified BeginRequest message. Does not implicitly {@link query.BeginRequest.verify|verify} messages. + * @param message BeginRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IStartCommitRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IBeginRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified StartCommitRequest message, length delimited. Does not implicitly {@link query.StartCommitRequest.verify|verify} messages. - * @param message StartCommitRequest message or plain object to encode + * Encodes the specified BeginRequest message, length delimited. Does not implicitly {@link query.BeginRequest.verify|verify} messages. + * @param message BeginRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IStartCommitRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IBeginRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a StartCommitRequest message from the specified reader or buffer. + * Decodes a BeginRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns StartCommitRequest + * @returns BeginRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.StartCommitRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.BeginRequest; /** - * Decodes a StartCommitRequest message from the specified reader or buffer, length delimited. + * Decodes a BeginRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns StartCommitRequest + * @returns BeginRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.StartCommitRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.BeginRequest; /** - * Verifies a StartCommitRequest message. + * Verifies a BeginRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a StartCommitRequest message from a plain object. Also converts values to their respective internal types. + * Creates a BeginRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns StartCommitRequest + * @returns BeginRequest */ - public static fromObject(object: { [k: string]: any }): query.StartCommitRequest; + public static fromObject(object: { [k: string]: any }): query.BeginRequest; /** - * Creates a plain object from a StartCommitRequest message. Also converts values to other types if specified. - * @param message StartCommitRequest + * Creates a plain object from a BeginRequest message. Also converts values to other types if specified. + * @param message BeginRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.StartCommitRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.BeginRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this StartCommitRequest to JSON. + * Converts this BeginRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for StartCommitRequest + * Gets the default type url for BeginRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a StartCommitResponse. */ - interface IStartCommitResponse { + /** Properties of a BeginResponse. */ + interface IBeginResponse { + + /** BeginResponse transaction_id */ + transaction_id?: (number|Long|null); + + /** BeginResponse tablet_alias */ + tablet_alias?: (topodata.ITabletAlias|null); + + /** BeginResponse session_state_changes */ + session_state_changes?: (string|null); } - /** Represents a StartCommitResponse. */ - class StartCommitResponse implements IStartCommitResponse { + /** Represents a BeginResponse. */ + class BeginResponse implements IBeginResponse { /** - * Constructs a new StartCommitResponse. + * Constructs a new BeginResponse. * @param [properties] Properties to set */ - constructor(properties?: query.IStartCommitResponse); + constructor(properties?: query.IBeginResponse); + + /** BeginResponse transaction_id. */ + public transaction_id: (number|Long); + + /** BeginResponse tablet_alias. */ + public tablet_alias?: (topodata.ITabletAlias|null); + + /** BeginResponse session_state_changes. */ + public session_state_changes: string; /** - * Creates a new StartCommitResponse instance using the specified properties. + * Creates a new BeginResponse instance using the specified properties. * @param [properties] Properties to set - * @returns StartCommitResponse instance + * @returns BeginResponse instance */ - public static create(properties?: query.IStartCommitResponse): query.StartCommitResponse; + public static create(properties?: query.IBeginResponse): query.BeginResponse; /** - * Encodes the specified StartCommitResponse message. Does not implicitly {@link query.StartCommitResponse.verify|verify} messages. - * @param message StartCommitResponse message or plain object to encode + * Encodes the specified BeginResponse message. Does not implicitly {@link query.BeginResponse.verify|verify} messages. + * @param message BeginResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IStartCommitResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IBeginResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified StartCommitResponse message, length delimited. Does not implicitly {@link query.StartCommitResponse.verify|verify} messages. - * @param message StartCommitResponse message or plain object to encode + * Encodes the specified BeginResponse message, length delimited. Does not implicitly {@link query.BeginResponse.verify|verify} messages. + * @param message BeginResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IStartCommitResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IBeginResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a StartCommitResponse message from the specified reader or buffer. + * Decodes a BeginResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns StartCommitResponse + * @returns BeginResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.StartCommitResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.BeginResponse; /** - * Decodes a StartCommitResponse message from the specified reader or buffer, length delimited. + * Decodes a BeginResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns StartCommitResponse + * @returns BeginResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.StartCommitResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.BeginResponse; /** - * Verifies a StartCommitResponse message. + * Verifies a BeginResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a StartCommitResponse message from a plain object. Also converts values to their respective internal types. + * Creates a BeginResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns StartCommitResponse + * @returns BeginResponse */ - public static fromObject(object: { [k: string]: any }): query.StartCommitResponse; + public static fromObject(object: { [k: string]: any }): query.BeginResponse; /** - * Creates a plain object from a StartCommitResponse message. Also converts values to other types if specified. - * @param message StartCommitResponse + * Creates a plain object from a BeginResponse message. Also converts values to other types if specified. + * @param message BeginResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.StartCommitResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.BeginResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this StartCommitResponse to JSON. + * Converts this BeginResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for StartCommitResponse + * Gets the default type url for BeginResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a SetRollbackRequest. */ - interface ISetRollbackRequest { + /** Properties of a CommitRequest. */ + interface ICommitRequest { - /** SetRollbackRequest effective_caller_id */ + /** CommitRequest effective_caller_id */ effective_caller_id?: (vtrpc.ICallerID|null); - /** SetRollbackRequest immediate_caller_id */ + /** CommitRequest immediate_caller_id */ immediate_caller_id?: (query.IVTGateCallerID|null); - /** SetRollbackRequest target */ + /** CommitRequest target */ target?: (query.ITarget|null); - /** SetRollbackRequest transaction_id */ + /** CommitRequest transaction_id */ transaction_id?: (number|Long|null); - - /** SetRollbackRequest dtid */ - dtid?: (string|null); } - /** Represents a SetRollbackRequest. */ - class SetRollbackRequest implements ISetRollbackRequest { + /** Represents a CommitRequest. */ + class CommitRequest implements ICommitRequest { /** - * Constructs a new SetRollbackRequest. + * Constructs a new CommitRequest. * @param [properties] Properties to set */ - constructor(properties?: query.ISetRollbackRequest); + constructor(properties?: query.ICommitRequest); - /** SetRollbackRequest effective_caller_id. */ + /** CommitRequest effective_caller_id. */ public effective_caller_id?: (vtrpc.ICallerID|null); - /** SetRollbackRequest immediate_caller_id. */ + /** CommitRequest immediate_caller_id. */ public immediate_caller_id?: (query.IVTGateCallerID|null); - /** SetRollbackRequest target. */ + /** CommitRequest target. */ public target?: (query.ITarget|null); - /** SetRollbackRequest transaction_id. */ + /** CommitRequest transaction_id. */ public transaction_id: (number|Long); - /** SetRollbackRequest dtid. */ - public dtid: string; - /** - * Creates a new SetRollbackRequest instance using the specified properties. + * Creates a new CommitRequest instance using the specified properties. * @param [properties] Properties to set - * @returns SetRollbackRequest instance + * @returns CommitRequest instance */ - public static create(properties?: query.ISetRollbackRequest): query.SetRollbackRequest; + public static create(properties?: query.ICommitRequest): query.CommitRequest; /** - * Encodes the specified SetRollbackRequest message. Does not implicitly {@link query.SetRollbackRequest.verify|verify} messages. - * @param message SetRollbackRequest message or plain object to encode + * Encodes the specified CommitRequest message. Does not implicitly {@link query.CommitRequest.verify|verify} messages. + * @param message CommitRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.ISetRollbackRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.ICommitRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified SetRollbackRequest message, length delimited. Does not implicitly {@link query.SetRollbackRequest.verify|verify} messages. - * @param message SetRollbackRequest message or plain object to encode + * Encodes the specified CommitRequest message, length delimited. Does not implicitly {@link query.CommitRequest.verify|verify} messages. + * @param message CommitRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.ISetRollbackRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.ICommitRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a SetRollbackRequest message from the specified reader or buffer. + * Decodes a CommitRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns SetRollbackRequest + * @returns CommitRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.SetRollbackRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.CommitRequest; /** - * Decodes a SetRollbackRequest message from the specified reader or buffer, length delimited. + * Decodes a CommitRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns SetRollbackRequest + * @returns CommitRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.SetRollbackRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.CommitRequest; /** - * Verifies a SetRollbackRequest message. + * Verifies a CommitRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a SetRollbackRequest message from a plain object. Also converts values to their respective internal types. + * Creates a CommitRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns SetRollbackRequest + * @returns CommitRequest */ - public static fromObject(object: { [k: string]: any }): query.SetRollbackRequest; + public static fromObject(object: { [k: string]: any }): query.CommitRequest; /** - * Creates a plain object from a SetRollbackRequest message. Also converts values to other types if specified. - * @param message SetRollbackRequest + * Creates a plain object from a CommitRequest message. Also converts values to other types if specified. + * @param message CommitRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.SetRollbackRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.CommitRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this SetRollbackRequest to JSON. + * Converts this CommitRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for SetRollbackRequest + * Gets the default type url for CommitRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a SetRollbackResponse. */ - interface ISetRollbackResponse { + /** Properties of a CommitResponse. */ + interface ICommitResponse { + + /** CommitResponse reserved_id */ + reserved_id?: (number|Long|null); } - /** Represents a SetRollbackResponse. */ - class SetRollbackResponse implements ISetRollbackResponse { + /** Represents a CommitResponse. */ + class CommitResponse implements ICommitResponse { /** - * Constructs a new SetRollbackResponse. + * Constructs a new CommitResponse. * @param [properties] Properties to set */ - constructor(properties?: query.ISetRollbackResponse); + constructor(properties?: query.ICommitResponse); + + /** CommitResponse reserved_id. */ + public reserved_id: (number|Long); /** - * Creates a new SetRollbackResponse instance using the specified properties. + * Creates a new CommitResponse instance using the specified properties. * @param [properties] Properties to set - * @returns SetRollbackResponse instance + * @returns CommitResponse instance */ - public static create(properties?: query.ISetRollbackResponse): query.SetRollbackResponse; + public static create(properties?: query.ICommitResponse): query.CommitResponse; /** - * Encodes the specified SetRollbackResponse message. Does not implicitly {@link query.SetRollbackResponse.verify|verify} messages. - * @param message SetRollbackResponse message or plain object to encode + * Encodes the specified CommitResponse message. Does not implicitly {@link query.CommitResponse.verify|verify} messages. + * @param message CommitResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.ISetRollbackResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.ICommitResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified SetRollbackResponse message, length delimited. Does not implicitly {@link query.SetRollbackResponse.verify|verify} messages. - * @param message SetRollbackResponse message or plain object to encode + * Encodes the specified CommitResponse message, length delimited. Does not implicitly {@link query.CommitResponse.verify|verify} messages. + * @param message CommitResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.ISetRollbackResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.ICommitResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a SetRollbackResponse message from the specified reader or buffer. + * Decodes a CommitResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns SetRollbackResponse + * @returns CommitResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.SetRollbackResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.CommitResponse; /** - * Decodes a SetRollbackResponse message from the specified reader or buffer, length delimited. + * Decodes a CommitResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns SetRollbackResponse + * @returns CommitResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.SetRollbackResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.CommitResponse; /** - * Verifies a SetRollbackResponse message. + * Verifies a CommitResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a SetRollbackResponse message from a plain object. Also converts values to their respective internal types. + * Creates a CommitResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns SetRollbackResponse + * @returns CommitResponse */ - public static fromObject(object: { [k: string]: any }): query.SetRollbackResponse; + public static fromObject(object: { [k: string]: any }): query.CommitResponse; /** - * Creates a plain object from a SetRollbackResponse message. Also converts values to other types if specified. - * @param message SetRollbackResponse + * Creates a plain object from a CommitResponse message. Also converts values to other types if specified. + * @param message CommitResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.SetRollbackResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.CommitResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this SetRollbackResponse to JSON. + * Converts this CommitResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for SetRollbackResponse + * Gets the default type url for CommitResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ConcludeTransactionRequest. */ - interface IConcludeTransactionRequest { + /** Properties of a RollbackRequest. */ + interface IRollbackRequest { - /** ConcludeTransactionRequest effective_caller_id */ + /** RollbackRequest effective_caller_id */ effective_caller_id?: (vtrpc.ICallerID|null); - /** ConcludeTransactionRequest immediate_caller_id */ + /** RollbackRequest immediate_caller_id */ immediate_caller_id?: (query.IVTGateCallerID|null); - /** ConcludeTransactionRequest target */ + /** RollbackRequest target */ target?: (query.ITarget|null); - /** ConcludeTransactionRequest dtid */ - dtid?: (string|null); + /** RollbackRequest transaction_id */ + transaction_id?: (number|Long|null); } - /** Represents a ConcludeTransactionRequest. */ - class ConcludeTransactionRequest implements IConcludeTransactionRequest { + /** Represents a RollbackRequest. */ + class RollbackRequest implements IRollbackRequest { /** - * Constructs a new ConcludeTransactionRequest. + * Constructs a new RollbackRequest. * @param [properties] Properties to set */ - constructor(properties?: query.IConcludeTransactionRequest); + constructor(properties?: query.IRollbackRequest); - /** ConcludeTransactionRequest effective_caller_id. */ + /** RollbackRequest effective_caller_id. */ public effective_caller_id?: (vtrpc.ICallerID|null); - /** ConcludeTransactionRequest immediate_caller_id. */ + /** RollbackRequest immediate_caller_id. */ public immediate_caller_id?: (query.IVTGateCallerID|null); - /** ConcludeTransactionRequest target. */ + /** RollbackRequest target. */ public target?: (query.ITarget|null); - /** ConcludeTransactionRequest dtid. */ - public dtid: string; + /** RollbackRequest transaction_id. */ + public transaction_id: (number|Long); /** - * Creates a new ConcludeTransactionRequest instance using the specified properties. + * Creates a new RollbackRequest instance using the specified properties. * @param [properties] Properties to set - * @returns ConcludeTransactionRequest instance + * @returns RollbackRequest instance */ - public static create(properties?: query.IConcludeTransactionRequest): query.ConcludeTransactionRequest; + public static create(properties?: query.IRollbackRequest): query.RollbackRequest; /** - * Encodes the specified ConcludeTransactionRequest message. Does not implicitly {@link query.ConcludeTransactionRequest.verify|verify} messages. - * @param message ConcludeTransactionRequest message or plain object to encode + * Encodes the specified RollbackRequest message. Does not implicitly {@link query.RollbackRequest.verify|verify} messages. + * @param message RollbackRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IConcludeTransactionRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IRollbackRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ConcludeTransactionRequest message, length delimited. Does not implicitly {@link query.ConcludeTransactionRequest.verify|verify} messages. - * @param message ConcludeTransactionRequest message or plain object to encode + * Encodes the specified RollbackRequest message, length delimited. Does not implicitly {@link query.RollbackRequest.verify|verify} messages. + * @param message RollbackRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IConcludeTransactionRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IRollbackRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ConcludeTransactionRequest message from the specified reader or buffer. + * Decodes a RollbackRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ConcludeTransactionRequest + * @returns RollbackRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.ConcludeTransactionRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.RollbackRequest; /** - * Decodes a ConcludeTransactionRequest message from the specified reader or buffer, length delimited. + * Decodes a RollbackRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ConcludeTransactionRequest + * @returns RollbackRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.ConcludeTransactionRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.RollbackRequest; /** - * Verifies a ConcludeTransactionRequest message. + * Verifies a RollbackRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ConcludeTransactionRequest message from a plain object. Also converts values to their respective internal types. + * Creates a RollbackRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ConcludeTransactionRequest + * @returns RollbackRequest */ - public static fromObject(object: { [k: string]: any }): query.ConcludeTransactionRequest; + public static fromObject(object: { [k: string]: any }): query.RollbackRequest; /** - * Creates a plain object from a ConcludeTransactionRequest message. Also converts values to other types if specified. - * @param message ConcludeTransactionRequest + * Creates a plain object from a RollbackRequest message. Also converts values to other types if specified. + * @param message RollbackRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.ConcludeTransactionRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.RollbackRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ConcludeTransactionRequest to JSON. + * Converts this RollbackRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ConcludeTransactionRequest + * Gets the default type url for RollbackRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ConcludeTransactionResponse. */ - interface IConcludeTransactionResponse { + /** Properties of a RollbackResponse. */ + interface IRollbackResponse { + + /** RollbackResponse reserved_id */ + reserved_id?: (number|Long|null); } - /** Represents a ConcludeTransactionResponse. */ - class ConcludeTransactionResponse implements IConcludeTransactionResponse { + /** Represents a RollbackResponse. */ + class RollbackResponse implements IRollbackResponse { /** - * Constructs a new ConcludeTransactionResponse. + * Constructs a new RollbackResponse. * @param [properties] Properties to set */ - constructor(properties?: query.IConcludeTransactionResponse); + constructor(properties?: query.IRollbackResponse); + + /** RollbackResponse reserved_id. */ + public reserved_id: (number|Long); /** - * Creates a new ConcludeTransactionResponse instance using the specified properties. + * Creates a new RollbackResponse instance using the specified properties. * @param [properties] Properties to set - * @returns ConcludeTransactionResponse instance + * @returns RollbackResponse instance */ - public static create(properties?: query.IConcludeTransactionResponse): query.ConcludeTransactionResponse; + public static create(properties?: query.IRollbackResponse): query.RollbackResponse; /** - * Encodes the specified ConcludeTransactionResponse message. Does not implicitly {@link query.ConcludeTransactionResponse.verify|verify} messages. - * @param message ConcludeTransactionResponse message or plain object to encode + * Encodes the specified RollbackResponse message. Does not implicitly {@link query.RollbackResponse.verify|verify} messages. + * @param message RollbackResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IConcludeTransactionResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IRollbackResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ConcludeTransactionResponse message, length delimited. Does not implicitly {@link query.ConcludeTransactionResponse.verify|verify} messages. - * @param message ConcludeTransactionResponse message or plain object to encode + * Encodes the specified RollbackResponse message, length delimited. Does not implicitly {@link query.RollbackResponse.verify|verify} messages. + * @param message RollbackResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IConcludeTransactionResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IRollbackResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ConcludeTransactionResponse message from the specified reader or buffer. + * Decodes a RollbackResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ConcludeTransactionResponse + * @returns RollbackResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.ConcludeTransactionResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.RollbackResponse; /** - * Decodes a ConcludeTransactionResponse message from the specified reader or buffer, length delimited. + * Decodes a RollbackResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ConcludeTransactionResponse + * @returns RollbackResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.ConcludeTransactionResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.RollbackResponse; /** - * Verifies a ConcludeTransactionResponse message. + * Verifies a RollbackResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ConcludeTransactionResponse message from a plain object. Also converts values to their respective internal types. + * Creates a RollbackResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ConcludeTransactionResponse + * @returns RollbackResponse */ - public static fromObject(object: { [k: string]: any }): query.ConcludeTransactionResponse; + public static fromObject(object: { [k: string]: any }): query.RollbackResponse; /** - * Creates a plain object from a ConcludeTransactionResponse message. Also converts values to other types if specified. - * @param message ConcludeTransactionResponse + * Creates a plain object from a RollbackResponse message. Also converts values to other types if specified. + * @param message RollbackResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.ConcludeTransactionResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.RollbackResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ConcludeTransactionResponse to JSON. + * Converts this RollbackResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ConcludeTransactionResponse + * Gets the default type url for RollbackResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ReadTransactionRequest. */ - interface IReadTransactionRequest { + /** Properties of a PrepareRequest. */ + interface IPrepareRequest { - /** ReadTransactionRequest effective_caller_id */ + /** PrepareRequest effective_caller_id */ effective_caller_id?: (vtrpc.ICallerID|null); - /** ReadTransactionRequest immediate_caller_id */ + /** PrepareRequest immediate_caller_id */ immediate_caller_id?: (query.IVTGateCallerID|null); - /** ReadTransactionRequest target */ + /** PrepareRequest target */ target?: (query.ITarget|null); - /** ReadTransactionRequest dtid */ + /** PrepareRequest transaction_id */ + transaction_id?: (number|Long|null); + + /** PrepareRequest dtid */ dtid?: (string|null); } - /** Represents a ReadTransactionRequest. */ - class ReadTransactionRequest implements IReadTransactionRequest { + /** Represents a PrepareRequest. */ + class PrepareRequest implements IPrepareRequest { /** - * Constructs a new ReadTransactionRequest. + * Constructs a new PrepareRequest. * @param [properties] Properties to set */ - constructor(properties?: query.IReadTransactionRequest); + constructor(properties?: query.IPrepareRequest); - /** ReadTransactionRequest effective_caller_id. */ + /** PrepareRequest effective_caller_id. */ public effective_caller_id?: (vtrpc.ICallerID|null); - /** ReadTransactionRequest immediate_caller_id. */ + /** PrepareRequest immediate_caller_id. */ public immediate_caller_id?: (query.IVTGateCallerID|null); - /** ReadTransactionRequest target. */ + /** PrepareRequest target. */ public target?: (query.ITarget|null); - /** ReadTransactionRequest dtid. */ + /** PrepareRequest transaction_id. */ + public transaction_id: (number|Long); + + /** PrepareRequest dtid. */ public dtid: string; /** - * Creates a new ReadTransactionRequest instance using the specified properties. + * Creates a new PrepareRequest instance using the specified properties. * @param [properties] Properties to set - * @returns ReadTransactionRequest instance + * @returns PrepareRequest instance */ - public static create(properties?: query.IReadTransactionRequest): query.ReadTransactionRequest; + public static create(properties?: query.IPrepareRequest): query.PrepareRequest; /** - * Encodes the specified ReadTransactionRequest message. Does not implicitly {@link query.ReadTransactionRequest.verify|verify} messages. - * @param message ReadTransactionRequest message or plain object to encode + * Encodes the specified PrepareRequest message. Does not implicitly {@link query.PrepareRequest.verify|verify} messages. + * @param message PrepareRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IReadTransactionRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IPrepareRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ReadTransactionRequest message, length delimited. Does not implicitly {@link query.ReadTransactionRequest.verify|verify} messages. - * @param message ReadTransactionRequest message or plain object to encode + * Encodes the specified PrepareRequest message, length delimited. Does not implicitly {@link query.PrepareRequest.verify|verify} messages. + * @param message PrepareRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IReadTransactionRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IPrepareRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ReadTransactionRequest message from the specified reader or buffer. + * Decodes a PrepareRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ReadTransactionRequest + * @returns PrepareRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.ReadTransactionRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.PrepareRequest; /** - * Decodes a ReadTransactionRequest message from the specified reader or buffer, length delimited. + * Decodes a PrepareRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ReadTransactionRequest + * @returns PrepareRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.ReadTransactionRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.PrepareRequest; /** - * Verifies a ReadTransactionRequest message. + * Verifies a PrepareRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ReadTransactionRequest message from a plain object. Also converts values to their respective internal types. + * Creates a PrepareRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ReadTransactionRequest + * @returns PrepareRequest */ - public static fromObject(object: { [k: string]: any }): query.ReadTransactionRequest; + public static fromObject(object: { [k: string]: any }): query.PrepareRequest; /** - * Creates a plain object from a ReadTransactionRequest message. Also converts values to other types if specified. - * @param message ReadTransactionRequest + * Creates a plain object from a PrepareRequest message. Also converts values to other types if specified. + * @param message PrepareRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.ReadTransactionRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.PrepareRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ReadTransactionRequest to JSON. + * Converts this PrepareRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ReadTransactionRequest + * Gets the default type url for PrepareRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ReadTransactionResponse. */ - interface IReadTransactionResponse { - - /** ReadTransactionResponse metadata */ - metadata?: (query.ITransactionMetadata|null); + /** Properties of a PrepareResponse. */ + interface IPrepareResponse { } - /** Represents a ReadTransactionResponse. */ - class ReadTransactionResponse implements IReadTransactionResponse { + /** Represents a PrepareResponse. */ + class PrepareResponse implements IPrepareResponse { /** - * Constructs a new ReadTransactionResponse. + * Constructs a new PrepareResponse. * @param [properties] Properties to set */ - constructor(properties?: query.IReadTransactionResponse); - - /** ReadTransactionResponse metadata. */ - public metadata?: (query.ITransactionMetadata|null); + constructor(properties?: query.IPrepareResponse); /** - * Creates a new ReadTransactionResponse instance using the specified properties. + * Creates a new PrepareResponse instance using the specified properties. * @param [properties] Properties to set - * @returns ReadTransactionResponse instance + * @returns PrepareResponse instance */ - public static create(properties?: query.IReadTransactionResponse): query.ReadTransactionResponse; + public static create(properties?: query.IPrepareResponse): query.PrepareResponse; /** - * Encodes the specified ReadTransactionResponse message. Does not implicitly {@link query.ReadTransactionResponse.verify|verify} messages. - * @param message ReadTransactionResponse message or plain object to encode + * Encodes the specified PrepareResponse message. Does not implicitly {@link query.PrepareResponse.verify|verify} messages. + * @param message PrepareResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IReadTransactionResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IPrepareResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ReadTransactionResponse message, length delimited. Does not implicitly {@link query.ReadTransactionResponse.verify|verify} messages. - * @param message ReadTransactionResponse message or plain object to encode + * Encodes the specified PrepareResponse message, length delimited. Does not implicitly {@link query.PrepareResponse.verify|verify} messages. + * @param message PrepareResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IReadTransactionResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IPrepareResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ReadTransactionResponse message from the specified reader or buffer. + * Decodes a PrepareResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ReadTransactionResponse + * @returns PrepareResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.ReadTransactionResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.PrepareResponse; /** - * Decodes a ReadTransactionResponse message from the specified reader or buffer, length delimited. + * Decodes a PrepareResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ReadTransactionResponse + * @returns PrepareResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.ReadTransactionResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.PrepareResponse; /** - * Verifies a ReadTransactionResponse message. + * Verifies a PrepareResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ReadTransactionResponse message from a plain object. Also converts values to their respective internal types. + * Creates a PrepareResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ReadTransactionResponse + * @returns PrepareResponse */ - public static fromObject(object: { [k: string]: any }): query.ReadTransactionResponse; + public static fromObject(object: { [k: string]: any }): query.PrepareResponse; /** - * Creates a plain object from a ReadTransactionResponse message. Also converts values to other types if specified. - * @param message ReadTransactionResponse + * Creates a plain object from a PrepareResponse message. Also converts values to other types if specified. + * @param message PrepareResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.ReadTransactionResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.PrepareResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ReadTransactionResponse to JSON. + * Converts this PrepareResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ReadTransactionResponse + * Gets the default type url for PrepareResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a BeginExecuteRequest. */ - interface IBeginExecuteRequest { + /** Properties of a CommitPreparedRequest. */ + interface ICommitPreparedRequest { - /** BeginExecuteRequest effective_caller_id */ + /** CommitPreparedRequest effective_caller_id */ effective_caller_id?: (vtrpc.ICallerID|null); - /** BeginExecuteRequest immediate_caller_id */ + /** CommitPreparedRequest immediate_caller_id */ immediate_caller_id?: (query.IVTGateCallerID|null); - /** BeginExecuteRequest target */ + /** CommitPreparedRequest target */ target?: (query.ITarget|null); - /** BeginExecuteRequest query */ - query?: (query.IBoundQuery|null); - - /** BeginExecuteRequest options */ - options?: (query.IExecuteOptions|null); - - /** BeginExecuteRequest reserved_id */ - reserved_id?: (number|Long|null); - - /** BeginExecuteRequest pre_queries */ - pre_queries?: (string[]|null); + /** CommitPreparedRequest dtid */ + dtid?: (string|null); } - /** Represents a BeginExecuteRequest. */ - class BeginExecuteRequest implements IBeginExecuteRequest { + /** Represents a CommitPreparedRequest. */ + class CommitPreparedRequest implements ICommitPreparedRequest { /** - * Constructs a new BeginExecuteRequest. + * Constructs a new CommitPreparedRequest. * @param [properties] Properties to set */ - constructor(properties?: query.IBeginExecuteRequest); + constructor(properties?: query.ICommitPreparedRequest); - /** BeginExecuteRequest effective_caller_id. */ + /** CommitPreparedRequest effective_caller_id. */ public effective_caller_id?: (vtrpc.ICallerID|null); - /** BeginExecuteRequest immediate_caller_id. */ + /** CommitPreparedRequest immediate_caller_id. */ public immediate_caller_id?: (query.IVTGateCallerID|null); - /** BeginExecuteRequest target. */ + /** CommitPreparedRequest target. */ public target?: (query.ITarget|null); - /** BeginExecuteRequest query. */ - public query?: (query.IBoundQuery|null); - - /** BeginExecuteRequest options. */ - public options?: (query.IExecuteOptions|null); - - /** BeginExecuteRequest reserved_id. */ - public reserved_id: (number|Long); - - /** BeginExecuteRequest pre_queries. */ - public pre_queries: string[]; + /** CommitPreparedRequest dtid. */ + public dtid: string; /** - * Creates a new BeginExecuteRequest instance using the specified properties. + * Creates a new CommitPreparedRequest instance using the specified properties. * @param [properties] Properties to set - * @returns BeginExecuteRequest instance + * @returns CommitPreparedRequest instance */ - public static create(properties?: query.IBeginExecuteRequest): query.BeginExecuteRequest; + public static create(properties?: query.ICommitPreparedRequest): query.CommitPreparedRequest; /** - * Encodes the specified BeginExecuteRequest message. Does not implicitly {@link query.BeginExecuteRequest.verify|verify} messages. - * @param message BeginExecuteRequest message or plain object to encode + * Encodes the specified CommitPreparedRequest message. Does not implicitly {@link query.CommitPreparedRequest.verify|verify} messages. + * @param message CommitPreparedRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IBeginExecuteRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.ICommitPreparedRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified BeginExecuteRequest message, length delimited. Does not implicitly {@link query.BeginExecuteRequest.verify|verify} messages. - * @param message BeginExecuteRequest message or plain object to encode + * Encodes the specified CommitPreparedRequest message, length delimited. Does not implicitly {@link query.CommitPreparedRequest.verify|verify} messages. + * @param message CommitPreparedRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IBeginExecuteRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.ICommitPreparedRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a BeginExecuteRequest message from the specified reader or buffer. + * Decodes a CommitPreparedRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns BeginExecuteRequest + * @returns CommitPreparedRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.BeginExecuteRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.CommitPreparedRequest; /** - * Decodes a BeginExecuteRequest message from the specified reader or buffer, length delimited. + * Decodes a CommitPreparedRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns BeginExecuteRequest + * @returns CommitPreparedRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.BeginExecuteRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.CommitPreparedRequest; /** - * Verifies a BeginExecuteRequest message. + * Verifies a CommitPreparedRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a BeginExecuteRequest message from a plain object. Also converts values to their respective internal types. + * Creates a CommitPreparedRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns BeginExecuteRequest + * @returns CommitPreparedRequest */ - public static fromObject(object: { [k: string]: any }): query.BeginExecuteRequest; + public static fromObject(object: { [k: string]: any }): query.CommitPreparedRequest; /** - * Creates a plain object from a BeginExecuteRequest message. Also converts values to other types if specified. - * @param message BeginExecuteRequest + * Creates a plain object from a CommitPreparedRequest message. Also converts values to other types if specified. + * @param message CommitPreparedRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.BeginExecuteRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.CommitPreparedRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this BeginExecuteRequest to JSON. + * Converts this CommitPreparedRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for BeginExecuteRequest + * Gets the default type url for CommitPreparedRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a BeginExecuteResponse. */ - interface IBeginExecuteResponse { - - /** BeginExecuteResponse error */ - error?: (vtrpc.IRPCError|null); - - /** BeginExecuteResponse result */ - result?: (query.IQueryResult|null); - - /** BeginExecuteResponse transaction_id */ - transaction_id?: (number|Long|null); - - /** BeginExecuteResponse tablet_alias */ - tablet_alias?: (topodata.ITabletAlias|null); - - /** BeginExecuteResponse session_state_changes */ - session_state_changes?: (string|null); + /** Properties of a CommitPreparedResponse. */ + interface ICommitPreparedResponse { } - /** Represents a BeginExecuteResponse. */ - class BeginExecuteResponse implements IBeginExecuteResponse { + /** Represents a CommitPreparedResponse. */ + class CommitPreparedResponse implements ICommitPreparedResponse { /** - * Constructs a new BeginExecuteResponse. + * Constructs a new CommitPreparedResponse. * @param [properties] Properties to set */ - constructor(properties?: query.IBeginExecuteResponse); - - /** BeginExecuteResponse error. */ - public error?: (vtrpc.IRPCError|null); - - /** BeginExecuteResponse result. */ - public result?: (query.IQueryResult|null); - - /** BeginExecuteResponse transaction_id. */ - public transaction_id: (number|Long); - - /** BeginExecuteResponse tablet_alias. */ - public tablet_alias?: (topodata.ITabletAlias|null); - - /** BeginExecuteResponse session_state_changes. */ - public session_state_changes: string; + constructor(properties?: query.ICommitPreparedResponse); /** - * Creates a new BeginExecuteResponse instance using the specified properties. + * Creates a new CommitPreparedResponse instance using the specified properties. * @param [properties] Properties to set - * @returns BeginExecuteResponse instance + * @returns CommitPreparedResponse instance */ - public static create(properties?: query.IBeginExecuteResponse): query.BeginExecuteResponse; + public static create(properties?: query.ICommitPreparedResponse): query.CommitPreparedResponse; /** - * Encodes the specified BeginExecuteResponse message. Does not implicitly {@link query.BeginExecuteResponse.verify|verify} messages. - * @param message BeginExecuteResponse message or plain object to encode + * Encodes the specified CommitPreparedResponse message. Does not implicitly {@link query.CommitPreparedResponse.verify|verify} messages. + * @param message CommitPreparedResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IBeginExecuteResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.ICommitPreparedResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified BeginExecuteResponse message, length delimited. Does not implicitly {@link query.BeginExecuteResponse.verify|verify} messages. - * @param message BeginExecuteResponse message or plain object to encode + * Encodes the specified CommitPreparedResponse message, length delimited. Does not implicitly {@link query.CommitPreparedResponse.verify|verify} messages. + * @param message CommitPreparedResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IBeginExecuteResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.ICommitPreparedResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a BeginExecuteResponse message from the specified reader or buffer. + * Decodes a CommitPreparedResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns BeginExecuteResponse + * @returns CommitPreparedResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.BeginExecuteResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.CommitPreparedResponse; /** - * Decodes a BeginExecuteResponse message from the specified reader or buffer, length delimited. + * Decodes a CommitPreparedResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns BeginExecuteResponse + * @returns CommitPreparedResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.BeginExecuteResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.CommitPreparedResponse; /** - * Verifies a BeginExecuteResponse message. + * Verifies a CommitPreparedResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a BeginExecuteResponse message from a plain object. Also converts values to their respective internal types. + * Creates a CommitPreparedResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns BeginExecuteResponse + * @returns CommitPreparedResponse */ - public static fromObject(object: { [k: string]: any }): query.BeginExecuteResponse; + public static fromObject(object: { [k: string]: any }): query.CommitPreparedResponse; /** - * Creates a plain object from a BeginExecuteResponse message. Also converts values to other types if specified. - * @param message BeginExecuteResponse + * Creates a plain object from a CommitPreparedResponse message. Also converts values to other types if specified. + * @param message CommitPreparedResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.BeginExecuteResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.CommitPreparedResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this BeginExecuteResponse to JSON. + * Converts this CommitPreparedResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for BeginExecuteResponse + * Gets the default type url for CommitPreparedResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a BeginStreamExecuteRequest. */ - interface IBeginStreamExecuteRequest { + /** Properties of a RollbackPreparedRequest. */ + interface IRollbackPreparedRequest { - /** BeginStreamExecuteRequest effective_caller_id */ + /** RollbackPreparedRequest effective_caller_id */ effective_caller_id?: (vtrpc.ICallerID|null); - /** BeginStreamExecuteRequest immediate_caller_id */ + /** RollbackPreparedRequest immediate_caller_id */ immediate_caller_id?: (query.IVTGateCallerID|null); - /** BeginStreamExecuteRequest target */ + /** RollbackPreparedRequest target */ target?: (query.ITarget|null); - /** BeginStreamExecuteRequest query */ - query?: (query.IBoundQuery|null); - - /** BeginStreamExecuteRequest options */ - options?: (query.IExecuteOptions|null); - - /** BeginStreamExecuteRequest pre_queries */ - pre_queries?: (string[]|null); + /** RollbackPreparedRequest transaction_id */ + transaction_id?: (number|Long|null); - /** BeginStreamExecuteRequest reserved_id */ - reserved_id?: (number|Long|null); + /** RollbackPreparedRequest dtid */ + dtid?: (string|null); } - /** Represents a BeginStreamExecuteRequest. */ - class BeginStreamExecuteRequest implements IBeginStreamExecuteRequest { + /** Represents a RollbackPreparedRequest. */ + class RollbackPreparedRequest implements IRollbackPreparedRequest { /** - * Constructs a new BeginStreamExecuteRequest. + * Constructs a new RollbackPreparedRequest. * @param [properties] Properties to set */ - constructor(properties?: query.IBeginStreamExecuteRequest); + constructor(properties?: query.IRollbackPreparedRequest); - /** BeginStreamExecuteRequest effective_caller_id. */ + /** RollbackPreparedRequest effective_caller_id. */ public effective_caller_id?: (vtrpc.ICallerID|null); - /** BeginStreamExecuteRequest immediate_caller_id. */ + /** RollbackPreparedRequest immediate_caller_id. */ public immediate_caller_id?: (query.IVTGateCallerID|null); - /** BeginStreamExecuteRequest target. */ + /** RollbackPreparedRequest target. */ public target?: (query.ITarget|null); - /** BeginStreamExecuteRequest query. */ - public query?: (query.IBoundQuery|null); - - /** BeginStreamExecuteRequest options. */ - public options?: (query.IExecuteOptions|null); - - /** BeginStreamExecuteRequest pre_queries. */ - public pre_queries: string[]; + /** RollbackPreparedRequest transaction_id. */ + public transaction_id: (number|Long); - /** BeginStreamExecuteRequest reserved_id. */ - public reserved_id: (number|Long); + /** RollbackPreparedRequest dtid. */ + public dtid: string; /** - * Creates a new BeginStreamExecuteRequest instance using the specified properties. + * Creates a new RollbackPreparedRequest instance using the specified properties. * @param [properties] Properties to set - * @returns BeginStreamExecuteRequest instance + * @returns RollbackPreparedRequest instance */ - public static create(properties?: query.IBeginStreamExecuteRequest): query.BeginStreamExecuteRequest; + public static create(properties?: query.IRollbackPreparedRequest): query.RollbackPreparedRequest; /** - * Encodes the specified BeginStreamExecuteRequest message. Does not implicitly {@link query.BeginStreamExecuteRequest.verify|verify} messages. - * @param message BeginStreamExecuteRequest message or plain object to encode + * Encodes the specified RollbackPreparedRequest message. Does not implicitly {@link query.RollbackPreparedRequest.verify|verify} messages. + * @param message RollbackPreparedRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IBeginStreamExecuteRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IRollbackPreparedRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified BeginStreamExecuteRequest message, length delimited. Does not implicitly {@link query.BeginStreamExecuteRequest.verify|verify} messages. - * @param message BeginStreamExecuteRequest message or plain object to encode + * Encodes the specified RollbackPreparedRequest message, length delimited. Does not implicitly {@link query.RollbackPreparedRequest.verify|verify} messages. + * @param message RollbackPreparedRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IBeginStreamExecuteRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IRollbackPreparedRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a BeginStreamExecuteRequest message from the specified reader or buffer. + * Decodes a RollbackPreparedRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns BeginStreamExecuteRequest + * @returns RollbackPreparedRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.BeginStreamExecuteRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.RollbackPreparedRequest; /** - * Decodes a BeginStreamExecuteRequest message from the specified reader or buffer, length delimited. + * Decodes a RollbackPreparedRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns BeginStreamExecuteRequest + * @returns RollbackPreparedRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.BeginStreamExecuteRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.RollbackPreparedRequest; /** - * Verifies a BeginStreamExecuteRequest message. + * Verifies a RollbackPreparedRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a BeginStreamExecuteRequest message from a plain object. Also converts values to their respective internal types. + * Creates a RollbackPreparedRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns BeginStreamExecuteRequest + * @returns RollbackPreparedRequest */ - public static fromObject(object: { [k: string]: any }): query.BeginStreamExecuteRequest; + public static fromObject(object: { [k: string]: any }): query.RollbackPreparedRequest; /** - * Creates a plain object from a BeginStreamExecuteRequest message. Also converts values to other types if specified. - * @param message BeginStreamExecuteRequest + * Creates a plain object from a RollbackPreparedRequest message. Also converts values to other types if specified. + * @param message RollbackPreparedRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.BeginStreamExecuteRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.RollbackPreparedRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this BeginStreamExecuteRequest to JSON. + * Converts this RollbackPreparedRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for BeginStreamExecuteRequest + * Gets the default type url for RollbackPreparedRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a BeginStreamExecuteResponse. */ - interface IBeginStreamExecuteResponse { - - /** BeginStreamExecuteResponse error */ - error?: (vtrpc.IRPCError|null); - - /** BeginStreamExecuteResponse result */ - result?: (query.IQueryResult|null); - - /** BeginStreamExecuteResponse transaction_id */ - transaction_id?: (number|Long|null); - - /** BeginStreamExecuteResponse tablet_alias */ - tablet_alias?: (topodata.ITabletAlias|null); - - /** BeginStreamExecuteResponse session_state_changes */ - session_state_changes?: (string|null); + /** Properties of a RollbackPreparedResponse. */ + interface IRollbackPreparedResponse { } - /** Represents a BeginStreamExecuteResponse. */ - class BeginStreamExecuteResponse implements IBeginStreamExecuteResponse { + /** Represents a RollbackPreparedResponse. */ + class RollbackPreparedResponse implements IRollbackPreparedResponse { /** - * Constructs a new BeginStreamExecuteResponse. + * Constructs a new RollbackPreparedResponse. * @param [properties] Properties to set */ - constructor(properties?: query.IBeginStreamExecuteResponse); - - /** BeginStreamExecuteResponse error. */ - public error?: (vtrpc.IRPCError|null); - - /** BeginStreamExecuteResponse result. */ - public result?: (query.IQueryResult|null); - - /** BeginStreamExecuteResponse transaction_id. */ - public transaction_id: (number|Long); - - /** BeginStreamExecuteResponse tablet_alias. */ - public tablet_alias?: (topodata.ITabletAlias|null); - - /** BeginStreamExecuteResponse session_state_changes. */ - public session_state_changes: string; + constructor(properties?: query.IRollbackPreparedResponse); /** - * Creates a new BeginStreamExecuteResponse instance using the specified properties. + * Creates a new RollbackPreparedResponse instance using the specified properties. * @param [properties] Properties to set - * @returns BeginStreamExecuteResponse instance + * @returns RollbackPreparedResponse instance */ - public static create(properties?: query.IBeginStreamExecuteResponse): query.BeginStreamExecuteResponse; + public static create(properties?: query.IRollbackPreparedResponse): query.RollbackPreparedResponse; /** - * Encodes the specified BeginStreamExecuteResponse message. Does not implicitly {@link query.BeginStreamExecuteResponse.verify|verify} messages. - * @param message BeginStreamExecuteResponse message or plain object to encode + * Encodes the specified RollbackPreparedResponse message. Does not implicitly {@link query.RollbackPreparedResponse.verify|verify} messages. + * @param message RollbackPreparedResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IBeginStreamExecuteResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IRollbackPreparedResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified BeginStreamExecuteResponse message, length delimited. Does not implicitly {@link query.BeginStreamExecuteResponse.verify|verify} messages. - * @param message BeginStreamExecuteResponse message or plain object to encode + * Encodes the specified RollbackPreparedResponse message, length delimited. Does not implicitly {@link query.RollbackPreparedResponse.verify|verify} messages. + * @param message RollbackPreparedResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IBeginStreamExecuteResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IRollbackPreparedResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a BeginStreamExecuteResponse message from the specified reader or buffer. + * Decodes a RollbackPreparedResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns BeginStreamExecuteResponse + * @returns RollbackPreparedResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.BeginStreamExecuteResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.RollbackPreparedResponse; /** - * Decodes a BeginStreamExecuteResponse message from the specified reader or buffer, length delimited. + * Decodes a RollbackPreparedResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns BeginStreamExecuteResponse + * @returns RollbackPreparedResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.BeginStreamExecuteResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.RollbackPreparedResponse; /** - * Verifies a BeginStreamExecuteResponse message. + * Verifies a RollbackPreparedResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a BeginStreamExecuteResponse message from a plain object. Also converts values to their respective internal types. + * Creates a RollbackPreparedResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns BeginStreamExecuteResponse + * @returns RollbackPreparedResponse */ - public static fromObject(object: { [k: string]: any }): query.BeginStreamExecuteResponse; + public static fromObject(object: { [k: string]: any }): query.RollbackPreparedResponse; /** - * Creates a plain object from a BeginStreamExecuteResponse message. Also converts values to other types if specified. - * @param message BeginStreamExecuteResponse + * Creates a plain object from a RollbackPreparedResponse message. Also converts values to other types if specified. + * @param message RollbackPreparedResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.BeginStreamExecuteResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.RollbackPreparedResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this BeginStreamExecuteResponse to JSON. + * Converts this RollbackPreparedResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for BeginStreamExecuteResponse + * Gets the default type url for RollbackPreparedResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a MessageStreamRequest. */ - interface IMessageStreamRequest { + /** Properties of a CreateTransactionRequest. */ + interface ICreateTransactionRequest { - /** MessageStreamRequest effective_caller_id */ + /** CreateTransactionRequest effective_caller_id */ effective_caller_id?: (vtrpc.ICallerID|null); - /** MessageStreamRequest immediate_caller_id */ + /** CreateTransactionRequest immediate_caller_id */ immediate_caller_id?: (query.IVTGateCallerID|null); - /** MessageStreamRequest target */ + /** CreateTransactionRequest target */ target?: (query.ITarget|null); - /** MessageStreamRequest name */ - name?: (string|null); + /** CreateTransactionRequest dtid */ + dtid?: (string|null); + + /** CreateTransactionRequest participants */ + participants?: (query.ITarget[]|null); } - /** Represents a MessageStreamRequest. */ - class MessageStreamRequest implements IMessageStreamRequest { + /** Represents a CreateTransactionRequest. */ + class CreateTransactionRequest implements ICreateTransactionRequest { /** - * Constructs a new MessageStreamRequest. + * Constructs a new CreateTransactionRequest. * @param [properties] Properties to set */ - constructor(properties?: query.IMessageStreamRequest); + constructor(properties?: query.ICreateTransactionRequest); - /** MessageStreamRequest effective_caller_id. */ + /** CreateTransactionRequest effective_caller_id. */ public effective_caller_id?: (vtrpc.ICallerID|null); - /** MessageStreamRequest immediate_caller_id. */ + /** CreateTransactionRequest immediate_caller_id. */ public immediate_caller_id?: (query.IVTGateCallerID|null); - /** MessageStreamRequest target. */ + /** CreateTransactionRequest target. */ public target?: (query.ITarget|null); - /** MessageStreamRequest name. */ - public name: string; + /** CreateTransactionRequest dtid. */ + public dtid: string; + + /** CreateTransactionRequest participants. */ + public participants: query.ITarget[]; /** - * Creates a new MessageStreamRequest instance using the specified properties. + * Creates a new CreateTransactionRequest instance using the specified properties. * @param [properties] Properties to set - * @returns MessageStreamRequest instance + * @returns CreateTransactionRequest instance */ - public static create(properties?: query.IMessageStreamRequest): query.MessageStreamRequest; + public static create(properties?: query.ICreateTransactionRequest): query.CreateTransactionRequest; /** - * Encodes the specified MessageStreamRequest message. Does not implicitly {@link query.MessageStreamRequest.verify|verify} messages. - * @param message MessageStreamRequest message or plain object to encode + * Encodes the specified CreateTransactionRequest message. Does not implicitly {@link query.CreateTransactionRequest.verify|verify} messages. + * @param message CreateTransactionRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IMessageStreamRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.ICreateTransactionRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified MessageStreamRequest message, length delimited. Does not implicitly {@link query.MessageStreamRequest.verify|verify} messages. - * @param message MessageStreamRequest message or plain object to encode + * Encodes the specified CreateTransactionRequest message, length delimited. Does not implicitly {@link query.CreateTransactionRequest.verify|verify} messages. + * @param message CreateTransactionRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IMessageStreamRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.ICreateTransactionRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a MessageStreamRequest message from the specified reader or buffer. + * Decodes a CreateTransactionRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns MessageStreamRequest + * @returns CreateTransactionRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.MessageStreamRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.CreateTransactionRequest; /** - * Decodes a MessageStreamRequest message from the specified reader or buffer, length delimited. + * Decodes a CreateTransactionRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns MessageStreamRequest + * @returns CreateTransactionRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.MessageStreamRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.CreateTransactionRequest; /** - * Verifies a MessageStreamRequest message. + * Verifies a CreateTransactionRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a MessageStreamRequest message from a plain object. Also converts values to their respective internal types. + * Creates a CreateTransactionRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns MessageStreamRequest + * @returns CreateTransactionRequest */ - public static fromObject(object: { [k: string]: any }): query.MessageStreamRequest; + public static fromObject(object: { [k: string]: any }): query.CreateTransactionRequest; /** - * Creates a plain object from a MessageStreamRequest message. Also converts values to other types if specified. - * @param message MessageStreamRequest + * Creates a plain object from a CreateTransactionRequest message. Also converts values to other types if specified. + * @param message CreateTransactionRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.MessageStreamRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.CreateTransactionRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this MessageStreamRequest to JSON. + * Converts this CreateTransactionRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for MessageStreamRequest + * Gets the default type url for CreateTransactionRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a MessageStreamResponse. */ - interface IMessageStreamResponse { - - /** MessageStreamResponse result */ - result?: (query.IQueryResult|null); + /** Properties of a CreateTransactionResponse. */ + interface ICreateTransactionResponse { } - /** Represents a MessageStreamResponse. */ - class MessageStreamResponse implements IMessageStreamResponse { + /** Represents a CreateTransactionResponse. */ + class CreateTransactionResponse implements ICreateTransactionResponse { /** - * Constructs a new MessageStreamResponse. + * Constructs a new CreateTransactionResponse. * @param [properties] Properties to set */ - constructor(properties?: query.IMessageStreamResponse); - - /** MessageStreamResponse result. */ - public result?: (query.IQueryResult|null); + constructor(properties?: query.ICreateTransactionResponse); /** - * Creates a new MessageStreamResponse instance using the specified properties. + * Creates a new CreateTransactionResponse instance using the specified properties. * @param [properties] Properties to set - * @returns MessageStreamResponse instance + * @returns CreateTransactionResponse instance */ - public static create(properties?: query.IMessageStreamResponse): query.MessageStreamResponse; + public static create(properties?: query.ICreateTransactionResponse): query.CreateTransactionResponse; /** - * Encodes the specified MessageStreamResponse message. Does not implicitly {@link query.MessageStreamResponse.verify|verify} messages. - * @param message MessageStreamResponse message or plain object to encode + * Encodes the specified CreateTransactionResponse message. Does not implicitly {@link query.CreateTransactionResponse.verify|verify} messages. + * @param message CreateTransactionResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IMessageStreamResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.ICreateTransactionResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified MessageStreamResponse message, length delimited. Does not implicitly {@link query.MessageStreamResponse.verify|verify} messages. - * @param message MessageStreamResponse message or plain object to encode + * Encodes the specified CreateTransactionResponse message, length delimited. Does not implicitly {@link query.CreateTransactionResponse.verify|verify} messages. + * @param message CreateTransactionResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IMessageStreamResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.ICreateTransactionResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a MessageStreamResponse message from the specified reader or buffer. + * Decodes a CreateTransactionResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns MessageStreamResponse + * @returns CreateTransactionResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.MessageStreamResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.CreateTransactionResponse; /** - * Decodes a MessageStreamResponse message from the specified reader or buffer, length delimited. + * Decodes a CreateTransactionResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns MessageStreamResponse + * @returns CreateTransactionResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.MessageStreamResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.CreateTransactionResponse; /** - * Verifies a MessageStreamResponse message. + * Verifies a CreateTransactionResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a MessageStreamResponse message from a plain object. Also converts values to their respective internal types. + * Creates a CreateTransactionResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns MessageStreamResponse + * @returns CreateTransactionResponse */ - public static fromObject(object: { [k: string]: any }): query.MessageStreamResponse; + public static fromObject(object: { [k: string]: any }): query.CreateTransactionResponse; /** - * Creates a plain object from a MessageStreamResponse message. Also converts values to other types if specified. - * @param message MessageStreamResponse + * Creates a plain object from a CreateTransactionResponse message. Also converts values to other types if specified. + * @param message CreateTransactionResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.MessageStreamResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.CreateTransactionResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this MessageStreamResponse to JSON. + * Converts this CreateTransactionResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for MessageStreamResponse + * Gets the default type url for CreateTransactionResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a MessageAckRequest. */ - interface IMessageAckRequest { + /** Properties of a StartCommitRequest. */ + interface IStartCommitRequest { - /** MessageAckRequest effective_caller_id */ + /** StartCommitRequest effective_caller_id */ effective_caller_id?: (vtrpc.ICallerID|null); - /** MessageAckRequest immediate_caller_id */ + /** StartCommitRequest immediate_caller_id */ immediate_caller_id?: (query.IVTGateCallerID|null); - /** MessageAckRequest target */ + /** StartCommitRequest target */ target?: (query.ITarget|null); - /** MessageAckRequest name */ - name?: (string|null); + /** StartCommitRequest transaction_id */ + transaction_id?: (number|Long|null); - /** MessageAckRequest ids */ - ids?: (query.IValue[]|null); + /** StartCommitRequest dtid */ + dtid?: (string|null); } - /** Represents a MessageAckRequest. */ - class MessageAckRequest implements IMessageAckRequest { + /** Represents a StartCommitRequest. */ + class StartCommitRequest implements IStartCommitRequest { /** - * Constructs a new MessageAckRequest. + * Constructs a new StartCommitRequest. * @param [properties] Properties to set */ - constructor(properties?: query.IMessageAckRequest); + constructor(properties?: query.IStartCommitRequest); - /** MessageAckRequest effective_caller_id. */ + /** StartCommitRequest effective_caller_id. */ public effective_caller_id?: (vtrpc.ICallerID|null); - /** MessageAckRequest immediate_caller_id. */ + /** StartCommitRequest immediate_caller_id. */ public immediate_caller_id?: (query.IVTGateCallerID|null); - /** MessageAckRequest target. */ + /** StartCommitRequest target. */ public target?: (query.ITarget|null); - /** MessageAckRequest name. */ - public name: string; + /** StartCommitRequest transaction_id. */ + public transaction_id: (number|Long); - /** MessageAckRequest ids. */ - public ids: query.IValue[]; + /** StartCommitRequest dtid. */ + public dtid: string; /** - * Creates a new MessageAckRequest instance using the specified properties. + * Creates a new StartCommitRequest instance using the specified properties. * @param [properties] Properties to set - * @returns MessageAckRequest instance + * @returns StartCommitRequest instance */ - public static create(properties?: query.IMessageAckRequest): query.MessageAckRequest; + public static create(properties?: query.IStartCommitRequest): query.StartCommitRequest; /** - * Encodes the specified MessageAckRequest message. Does not implicitly {@link query.MessageAckRequest.verify|verify} messages. - * @param message MessageAckRequest message or plain object to encode + * Encodes the specified StartCommitRequest message. Does not implicitly {@link query.StartCommitRequest.verify|verify} messages. + * @param message StartCommitRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IMessageAckRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IStartCommitRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified MessageAckRequest message, length delimited. Does not implicitly {@link query.MessageAckRequest.verify|verify} messages. - * @param message MessageAckRequest message or plain object to encode + * Encodes the specified StartCommitRequest message, length delimited. Does not implicitly {@link query.StartCommitRequest.verify|verify} messages. + * @param message StartCommitRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IMessageAckRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IStartCommitRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a MessageAckRequest message from the specified reader or buffer. + * Decodes a StartCommitRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns MessageAckRequest + * @returns StartCommitRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.MessageAckRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.StartCommitRequest; /** - * Decodes a MessageAckRequest message from the specified reader or buffer, length delimited. + * Decodes a StartCommitRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns MessageAckRequest + * @returns StartCommitRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.MessageAckRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.StartCommitRequest; /** - * Verifies a MessageAckRequest message. + * Verifies a StartCommitRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a MessageAckRequest message from a plain object. Also converts values to their respective internal types. + * Creates a StartCommitRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns MessageAckRequest + * @returns StartCommitRequest */ - public static fromObject(object: { [k: string]: any }): query.MessageAckRequest; + public static fromObject(object: { [k: string]: any }): query.StartCommitRequest; /** - * Creates a plain object from a MessageAckRequest message. Also converts values to other types if specified. - * @param message MessageAckRequest + * Creates a plain object from a StartCommitRequest message. Also converts values to other types if specified. + * @param message StartCommitRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.MessageAckRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.StartCommitRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this MessageAckRequest to JSON. + * Converts this StartCommitRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for MessageAckRequest + * Gets the default type url for StartCommitRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a MessageAckResponse. */ - interface IMessageAckResponse { - - /** MessageAckResponse result */ - result?: (query.IQueryResult|null); + /** Properties of a StartCommitResponse. */ + interface IStartCommitResponse { } - /** Represents a MessageAckResponse. */ - class MessageAckResponse implements IMessageAckResponse { + /** Represents a StartCommitResponse. */ + class StartCommitResponse implements IStartCommitResponse { /** - * Constructs a new MessageAckResponse. + * Constructs a new StartCommitResponse. * @param [properties] Properties to set */ - constructor(properties?: query.IMessageAckResponse); - - /** MessageAckResponse result. */ - public result?: (query.IQueryResult|null); + constructor(properties?: query.IStartCommitResponse); /** - * Creates a new MessageAckResponse instance using the specified properties. + * Creates a new StartCommitResponse instance using the specified properties. * @param [properties] Properties to set - * @returns MessageAckResponse instance + * @returns StartCommitResponse instance */ - public static create(properties?: query.IMessageAckResponse): query.MessageAckResponse; + public static create(properties?: query.IStartCommitResponse): query.StartCommitResponse; /** - * Encodes the specified MessageAckResponse message. Does not implicitly {@link query.MessageAckResponse.verify|verify} messages. - * @param message MessageAckResponse message or plain object to encode + * Encodes the specified StartCommitResponse message. Does not implicitly {@link query.StartCommitResponse.verify|verify} messages. + * @param message StartCommitResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IMessageAckResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IStartCommitResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified MessageAckResponse message, length delimited. Does not implicitly {@link query.MessageAckResponse.verify|verify} messages. - * @param message MessageAckResponse message or plain object to encode + * Encodes the specified StartCommitResponse message, length delimited. Does not implicitly {@link query.StartCommitResponse.verify|verify} messages. + * @param message StartCommitResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IMessageAckResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IStartCommitResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a MessageAckResponse message from the specified reader or buffer. + * Decodes a StartCommitResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns MessageAckResponse + * @returns StartCommitResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.MessageAckResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.StartCommitResponse; /** - * Decodes a MessageAckResponse message from the specified reader or buffer, length delimited. + * Decodes a StartCommitResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns MessageAckResponse + * @returns StartCommitResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.MessageAckResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.StartCommitResponse; /** - * Verifies a MessageAckResponse message. + * Verifies a StartCommitResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a MessageAckResponse message from a plain object. Also converts values to their respective internal types. + * Creates a StartCommitResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns MessageAckResponse + * @returns StartCommitResponse */ - public static fromObject(object: { [k: string]: any }): query.MessageAckResponse; + public static fromObject(object: { [k: string]: any }): query.StartCommitResponse; /** - * Creates a plain object from a MessageAckResponse message. Also converts values to other types if specified. - * @param message MessageAckResponse + * Creates a plain object from a StartCommitResponse message. Also converts values to other types if specified. + * @param message StartCommitResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.MessageAckResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.StartCommitResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this MessageAckResponse to JSON. + * Converts this StartCommitResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for MessageAckResponse + * Gets the default type url for StartCommitResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ReserveExecuteRequest. */ - interface IReserveExecuteRequest { + /** Properties of a SetRollbackRequest. */ + interface ISetRollbackRequest { - /** ReserveExecuteRequest effective_caller_id */ + /** SetRollbackRequest effective_caller_id */ effective_caller_id?: (vtrpc.ICallerID|null); - /** ReserveExecuteRequest immediate_caller_id */ + /** SetRollbackRequest immediate_caller_id */ immediate_caller_id?: (query.IVTGateCallerID|null); - /** ReserveExecuteRequest target */ + /** SetRollbackRequest target */ target?: (query.ITarget|null); - /** ReserveExecuteRequest query */ - query?: (query.IBoundQuery|null); - - /** ReserveExecuteRequest transaction_id */ + /** SetRollbackRequest transaction_id */ transaction_id?: (number|Long|null); - /** ReserveExecuteRequest options */ - options?: (query.IExecuteOptions|null); - - /** ReserveExecuteRequest pre_queries */ - pre_queries?: (string[]|null); + /** SetRollbackRequest dtid */ + dtid?: (string|null); } - /** Represents a ReserveExecuteRequest. */ - class ReserveExecuteRequest implements IReserveExecuteRequest { + /** Represents a SetRollbackRequest. */ + class SetRollbackRequest implements ISetRollbackRequest { /** - * Constructs a new ReserveExecuteRequest. + * Constructs a new SetRollbackRequest. * @param [properties] Properties to set */ - constructor(properties?: query.IReserveExecuteRequest); + constructor(properties?: query.ISetRollbackRequest); - /** ReserveExecuteRequest effective_caller_id. */ + /** SetRollbackRequest effective_caller_id. */ public effective_caller_id?: (vtrpc.ICallerID|null); - /** ReserveExecuteRequest immediate_caller_id. */ + /** SetRollbackRequest immediate_caller_id. */ public immediate_caller_id?: (query.IVTGateCallerID|null); - /** ReserveExecuteRequest target. */ + /** SetRollbackRequest target. */ public target?: (query.ITarget|null); - /** ReserveExecuteRequest query. */ - public query?: (query.IBoundQuery|null); - - /** ReserveExecuteRequest transaction_id. */ + /** SetRollbackRequest transaction_id. */ public transaction_id: (number|Long); - /** ReserveExecuteRequest options. */ - public options?: (query.IExecuteOptions|null); - - /** ReserveExecuteRequest pre_queries. */ - public pre_queries: string[]; + /** SetRollbackRequest dtid. */ + public dtid: string; /** - * Creates a new ReserveExecuteRequest instance using the specified properties. + * Creates a new SetRollbackRequest instance using the specified properties. * @param [properties] Properties to set - * @returns ReserveExecuteRequest instance + * @returns SetRollbackRequest instance */ - public static create(properties?: query.IReserveExecuteRequest): query.ReserveExecuteRequest; + public static create(properties?: query.ISetRollbackRequest): query.SetRollbackRequest; /** - * Encodes the specified ReserveExecuteRequest message. Does not implicitly {@link query.ReserveExecuteRequest.verify|verify} messages. - * @param message ReserveExecuteRequest message or plain object to encode + * Encodes the specified SetRollbackRequest message. Does not implicitly {@link query.SetRollbackRequest.verify|verify} messages. + * @param message SetRollbackRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IReserveExecuteRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.ISetRollbackRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ReserveExecuteRequest message, length delimited. Does not implicitly {@link query.ReserveExecuteRequest.verify|verify} messages. - * @param message ReserveExecuteRequest message or plain object to encode + * Encodes the specified SetRollbackRequest message, length delimited. Does not implicitly {@link query.SetRollbackRequest.verify|verify} messages. + * @param message SetRollbackRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IReserveExecuteRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.ISetRollbackRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ReserveExecuteRequest message from the specified reader or buffer. + * Decodes a SetRollbackRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ReserveExecuteRequest + * @returns SetRollbackRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.ReserveExecuteRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.SetRollbackRequest; /** - * Decodes a ReserveExecuteRequest message from the specified reader or buffer, length delimited. + * Decodes a SetRollbackRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ReserveExecuteRequest + * @returns SetRollbackRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.ReserveExecuteRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.SetRollbackRequest; /** - * Verifies a ReserveExecuteRequest message. + * Verifies a SetRollbackRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ReserveExecuteRequest message from a plain object. Also converts values to their respective internal types. + * Creates a SetRollbackRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ReserveExecuteRequest + * @returns SetRollbackRequest */ - public static fromObject(object: { [k: string]: any }): query.ReserveExecuteRequest; + public static fromObject(object: { [k: string]: any }): query.SetRollbackRequest; /** - * Creates a plain object from a ReserveExecuteRequest message. Also converts values to other types if specified. - * @param message ReserveExecuteRequest + * Creates a plain object from a SetRollbackRequest message. Also converts values to other types if specified. + * @param message SetRollbackRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.ReserveExecuteRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.SetRollbackRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ReserveExecuteRequest to JSON. + * Converts this SetRollbackRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ReserveExecuteRequest + * Gets the default type url for SetRollbackRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ReserveExecuteResponse. */ - interface IReserveExecuteResponse { - - /** ReserveExecuteResponse error */ - error?: (vtrpc.IRPCError|null); - - /** ReserveExecuteResponse result */ - result?: (query.IQueryResult|null); - - /** ReserveExecuteResponse reserved_id */ - reserved_id?: (number|Long|null); - - /** ReserveExecuteResponse tablet_alias */ - tablet_alias?: (topodata.ITabletAlias|null); + /** Properties of a SetRollbackResponse. */ + interface ISetRollbackResponse { } - /** Represents a ReserveExecuteResponse. */ - class ReserveExecuteResponse implements IReserveExecuteResponse { + /** Represents a SetRollbackResponse. */ + class SetRollbackResponse implements ISetRollbackResponse { /** - * Constructs a new ReserveExecuteResponse. + * Constructs a new SetRollbackResponse. * @param [properties] Properties to set */ - constructor(properties?: query.IReserveExecuteResponse); - - /** ReserveExecuteResponse error. */ - public error?: (vtrpc.IRPCError|null); - - /** ReserveExecuteResponse result. */ - public result?: (query.IQueryResult|null); - - /** ReserveExecuteResponse reserved_id. */ - public reserved_id: (number|Long); - - /** ReserveExecuteResponse tablet_alias. */ - public tablet_alias?: (topodata.ITabletAlias|null); + constructor(properties?: query.ISetRollbackResponse); /** - * Creates a new ReserveExecuteResponse instance using the specified properties. + * Creates a new SetRollbackResponse instance using the specified properties. * @param [properties] Properties to set - * @returns ReserveExecuteResponse instance + * @returns SetRollbackResponse instance */ - public static create(properties?: query.IReserveExecuteResponse): query.ReserveExecuteResponse; + public static create(properties?: query.ISetRollbackResponse): query.SetRollbackResponse; /** - * Encodes the specified ReserveExecuteResponse message. Does not implicitly {@link query.ReserveExecuteResponse.verify|verify} messages. - * @param message ReserveExecuteResponse message or plain object to encode + * Encodes the specified SetRollbackResponse message. Does not implicitly {@link query.SetRollbackResponse.verify|verify} messages. + * @param message SetRollbackResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IReserveExecuteResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.ISetRollbackResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ReserveExecuteResponse message, length delimited. Does not implicitly {@link query.ReserveExecuteResponse.verify|verify} messages. - * @param message ReserveExecuteResponse message or plain object to encode + * Encodes the specified SetRollbackResponse message, length delimited. Does not implicitly {@link query.SetRollbackResponse.verify|verify} messages. + * @param message SetRollbackResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IReserveExecuteResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.ISetRollbackResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ReserveExecuteResponse message from the specified reader or buffer. + * Decodes a SetRollbackResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ReserveExecuteResponse + * @returns SetRollbackResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.ReserveExecuteResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.SetRollbackResponse; /** - * Decodes a ReserveExecuteResponse message from the specified reader or buffer, length delimited. + * Decodes a SetRollbackResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ReserveExecuteResponse + * @returns SetRollbackResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.ReserveExecuteResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.SetRollbackResponse; /** - * Verifies a ReserveExecuteResponse message. + * Verifies a SetRollbackResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ReserveExecuteResponse message from a plain object. Also converts values to their respective internal types. + * Creates a SetRollbackResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ReserveExecuteResponse + * @returns SetRollbackResponse */ - public static fromObject(object: { [k: string]: any }): query.ReserveExecuteResponse; + public static fromObject(object: { [k: string]: any }): query.SetRollbackResponse; /** - * Creates a plain object from a ReserveExecuteResponse message. Also converts values to other types if specified. - * @param message ReserveExecuteResponse + * Creates a plain object from a SetRollbackResponse message. Also converts values to other types if specified. + * @param message SetRollbackResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.ReserveExecuteResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.SetRollbackResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ReserveExecuteResponse to JSON. + * Converts this SetRollbackResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ReserveExecuteResponse + * Gets the default type url for SetRollbackResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ReserveStreamExecuteRequest. */ - interface IReserveStreamExecuteRequest { + /** Properties of a ConcludeTransactionRequest. */ + interface IConcludeTransactionRequest { - /** ReserveStreamExecuteRequest effective_caller_id */ + /** ConcludeTransactionRequest effective_caller_id */ effective_caller_id?: (vtrpc.ICallerID|null); - /** ReserveStreamExecuteRequest immediate_caller_id */ + /** ConcludeTransactionRequest immediate_caller_id */ immediate_caller_id?: (query.IVTGateCallerID|null); - /** ReserveStreamExecuteRequest target */ + /** ConcludeTransactionRequest target */ target?: (query.ITarget|null); - /** ReserveStreamExecuteRequest query */ - query?: (query.IBoundQuery|null); - - /** ReserveStreamExecuteRequest options */ - options?: (query.IExecuteOptions|null); - - /** ReserveStreamExecuteRequest transaction_id */ - transaction_id?: (number|Long|null); - - /** ReserveStreamExecuteRequest pre_queries */ - pre_queries?: (string[]|null); + /** ConcludeTransactionRequest dtid */ + dtid?: (string|null); } - /** Represents a ReserveStreamExecuteRequest. */ - class ReserveStreamExecuteRequest implements IReserveStreamExecuteRequest { + /** Represents a ConcludeTransactionRequest. */ + class ConcludeTransactionRequest implements IConcludeTransactionRequest { /** - * Constructs a new ReserveStreamExecuteRequest. + * Constructs a new ConcludeTransactionRequest. * @param [properties] Properties to set */ - constructor(properties?: query.IReserveStreamExecuteRequest); + constructor(properties?: query.IConcludeTransactionRequest); - /** ReserveStreamExecuteRequest effective_caller_id. */ + /** ConcludeTransactionRequest effective_caller_id. */ public effective_caller_id?: (vtrpc.ICallerID|null); - /** ReserveStreamExecuteRequest immediate_caller_id. */ + /** ConcludeTransactionRequest immediate_caller_id. */ public immediate_caller_id?: (query.IVTGateCallerID|null); - /** ReserveStreamExecuteRequest target. */ + /** ConcludeTransactionRequest target. */ public target?: (query.ITarget|null); - /** ReserveStreamExecuteRequest query. */ - public query?: (query.IBoundQuery|null); - - /** ReserveStreamExecuteRequest options. */ - public options?: (query.IExecuteOptions|null); - - /** ReserveStreamExecuteRequest transaction_id. */ - public transaction_id: (number|Long); - - /** ReserveStreamExecuteRequest pre_queries. */ - public pre_queries: string[]; + /** ConcludeTransactionRequest dtid. */ + public dtid: string; /** - * Creates a new ReserveStreamExecuteRequest instance using the specified properties. + * Creates a new ConcludeTransactionRequest instance using the specified properties. * @param [properties] Properties to set - * @returns ReserveStreamExecuteRequest instance + * @returns ConcludeTransactionRequest instance */ - public static create(properties?: query.IReserveStreamExecuteRequest): query.ReserveStreamExecuteRequest; + public static create(properties?: query.IConcludeTransactionRequest): query.ConcludeTransactionRequest; /** - * Encodes the specified ReserveStreamExecuteRequest message. Does not implicitly {@link query.ReserveStreamExecuteRequest.verify|verify} messages. - * @param message ReserveStreamExecuteRequest message or plain object to encode + * Encodes the specified ConcludeTransactionRequest message. Does not implicitly {@link query.ConcludeTransactionRequest.verify|verify} messages. + * @param message ConcludeTransactionRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IReserveStreamExecuteRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IConcludeTransactionRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ReserveStreamExecuteRequest message, length delimited. Does not implicitly {@link query.ReserveStreamExecuteRequest.verify|verify} messages. - * @param message ReserveStreamExecuteRequest message or plain object to encode + * Encodes the specified ConcludeTransactionRequest message, length delimited. Does not implicitly {@link query.ConcludeTransactionRequest.verify|verify} messages. + * @param message ConcludeTransactionRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IReserveStreamExecuteRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IConcludeTransactionRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ReserveStreamExecuteRequest message from the specified reader or buffer. + * Decodes a ConcludeTransactionRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ReserveStreamExecuteRequest + * @returns ConcludeTransactionRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.ReserveStreamExecuteRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.ConcludeTransactionRequest; /** - * Decodes a ReserveStreamExecuteRequest message from the specified reader or buffer, length delimited. + * Decodes a ConcludeTransactionRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ReserveStreamExecuteRequest + * @returns ConcludeTransactionRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.ReserveStreamExecuteRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.ConcludeTransactionRequest; /** - * Verifies a ReserveStreamExecuteRequest message. + * Verifies a ConcludeTransactionRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ReserveStreamExecuteRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ConcludeTransactionRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ReserveStreamExecuteRequest + * @returns ConcludeTransactionRequest */ - public static fromObject(object: { [k: string]: any }): query.ReserveStreamExecuteRequest; + public static fromObject(object: { [k: string]: any }): query.ConcludeTransactionRequest; /** - * Creates a plain object from a ReserveStreamExecuteRequest message. Also converts values to other types if specified. - * @param message ReserveStreamExecuteRequest + * Creates a plain object from a ConcludeTransactionRequest message. Also converts values to other types if specified. + * @param message ConcludeTransactionRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.ReserveStreamExecuteRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.ConcludeTransactionRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ReserveStreamExecuteRequest to JSON. + * Converts this ConcludeTransactionRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ReserveStreamExecuteRequest + * Gets the default type url for ConcludeTransactionRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ReserveStreamExecuteResponse. */ - interface IReserveStreamExecuteResponse { - - /** ReserveStreamExecuteResponse error */ - error?: (vtrpc.IRPCError|null); - - /** ReserveStreamExecuteResponse result */ - result?: (query.IQueryResult|null); - - /** ReserveStreamExecuteResponse reserved_id */ - reserved_id?: (number|Long|null); - - /** ReserveStreamExecuteResponse tablet_alias */ - tablet_alias?: (topodata.ITabletAlias|null); + /** Properties of a ConcludeTransactionResponse. */ + interface IConcludeTransactionResponse { } - /** Represents a ReserveStreamExecuteResponse. */ - class ReserveStreamExecuteResponse implements IReserveStreamExecuteResponse { + /** Represents a ConcludeTransactionResponse. */ + class ConcludeTransactionResponse implements IConcludeTransactionResponse { /** - * Constructs a new ReserveStreamExecuteResponse. + * Constructs a new ConcludeTransactionResponse. * @param [properties] Properties to set */ - constructor(properties?: query.IReserveStreamExecuteResponse); - - /** ReserveStreamExecuteResponse error. */ - public error?: (vtrpc.IRPCError|null); - - /** ReserveStreamExecuteResponse result. */ - public result?: (query.IQueryResult|null); - - /** ReserveStreamExecuteResponse reserved_id. */ - public reserved_id: (number|Long); - - /** ReserveStreamExecuteResponse tablet_alias. */ - public tablet_alias?: (topodata.ITabletAlias|null); + constructor(properties?: query.IConcludeTransactionResponse); /** - * Creates a new ReserveStreamExecuteResponse instance using the specified properties. + * Creates a new ConcludeTransactionResponse instance using the specified properties. * @param [properties] Properties to set - * @returns ReserveStreamExecuteResponse instance + * @returns ConcludeTransactionResponse instance */ - public static create(properties?: query.IReserveStreamExecuteResponse): query.ReserveStreamExecuteResponse; + public static create(properties?: query.IConcludeTransactionResponse): query.ConcludeTransactionResponse; /** - * Encodes the specified ReserveStreamExecuteResponse message. Does not implicitly {@link query.ReserveStreamExecuteResponse.verify|verify} messages. - * @param message ReserveStreamExecuteResponse message or plain object to encode + * Encodes the specified ConcludeTransactionResponse message. Does not implicitly {@link query.ConcludeTransactionResponse.verify|verify} messages. + * @param message ConcludeTransactionResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IReserveStreamExecuteResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IConcludeTransactionResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ReserveStreamExecuteResponse message, length delimited. Does not implicitly {@link query.ReserveStreamExecuteResponse.verify|verify} messages. - * @param message ReserveStreamExecuteResponse message or plain object to encode + * Encodes the specified ConcludeTransactionResponse message, length delimited. Does not implicitly {@link query.ConcludeTransactionResponse.verify|verify} messages. + * @param message ConcludeTransactionResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IReserveStreamExecuteResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IConcludeTransactionResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ReserveStreamExecuteResponse message from the specified reader or buffer. + * Decodes a ConcludeTransactionResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ReserveStreamExecuteResponse + * @returns ConcludeTransactionResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.ReserveStreamExecuteResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.ConcludeTransactionResponse; /** - * Decodes a ReserveStreamExecuteResponse message from the specified reader or buffer, length delimited. + * Decodes a ConcludeTransactionResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ReserveStreamExecuteResponse + * @returns ConcludeTransactionResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.ReserveStreamExecuteResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.ConcludeTransactionResponse; /** - * Verifies a ReserveStreamExecuteResponse message. + * Verifies a ConcludeTransactionResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ReserveStreamExecuteResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ConcludeTransactionResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ReserveStreamExecuteResponse + * @returns ConcludeTransactionResponse */ - public static fromObject(object: { [k: string]: any }): query.ReserveStreamExecuteResponse; + public static fromObject(object: { [k: string]: any }): query.ConcludeTransactionResponse; /** - * Creates a plain object from a ReserveStreamExecuteResponse message. Also converts values to other types if specified. - * @param message ReserveStreamExecuteResponse + * Creates a plain object from a ConcludeTransactionResponse message. Also converts values to other types if specified. + * @param message ConcludeTransactionResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.ReserveStreamExecuteResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.ConcludeTransactionResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ReserveStreamExecuteResponse to JSON. + * Converts this ConcludeTransactionResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ReserveStreamExecuteResponse + * Gets the default type url for ConcludeTransactionResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ReserveBeginExecuteRequest. */ - interface IReserveBeginExecuteRequest { + /** Properties of a ReadTransactionRequest. */ + interface IReadTransactionRequest { - /** ReserveBeginExecuteRequest effective_caller_id */ + /** ReadTransactionRequest effective_caller_id */ effective_caller_id?: (vtrpc.ICallerID|null); - /** ReserveBeginExecuteRequest immediate_caller_id */ + /** ReadTransactionRequest immediate_caller_id */ immediate_caller_id?: (query.IVTGateCallerID|null); - /** ReserveBeginExecuteRequest target */ + /** ReadTransactionRequest target */ target?: (query.ITarget|null); - /** ReserveBeginExecuteRequest query */ - query?: (query.IBoundQuery|null); - - /** ReserveBeginExecuteRequest options */ - options?: (query.IExecuteOptions|null); - - /** ReserveBeginExecuteRequest pre_queries */ - pre_queries?: (string[]|null); - - /** ReserveBeginExecuteRequest post_begin_queries */ - post_begin_queries?: (string[]|null); + /** ReadTransactionRequest dtid */ + dtid?: (string|null); } - /** Represents a ReserveBeginExecuteRequest. */ - class ReserveBeginExecuteRequest implements IReserveBeginExecuteRequest { + /** Represents a ReadTransactionRequest. */ + class ReadTransactionRequest implements IReadTransactionRequest { /** - * Constructs a new ReserveBeginExecuteRequest. + * Constructs a new ReadTransactionRequest. * @param [properties] Properties to set */ - constructor(properties?: query.IReserveBeginExecuteRequest); + constructor(properties?: query.IReadTransactionRequest); - /** ReserveBeginExecuteRequest effective_caller_id. */ + /** ReadTransactionRequest effective_caller_id. */ public effective_caller_id?: (vtrpc.ICallerID|null); - /** ReserveBeginExecuteRequest immediate_caller_id. */ + /** ReadTransactionRequest immediate_caller_id. */ public immediate_caller_id?: (query.IVTGateCallerID|null); - /** ReserveBeginExecuteRequest target. */ + /** ReadTransactionRequest target. */ public target?: (query.ITarget|null); - /** ReserveBeginExecuteRequest query. */ - public query?: (query.IBoundQuery|null); - - /** ReserveBeginExecuteRequest options. */ - public options?: (query.IExecuteOptions|null); - - /** ReserveBeginExecuteRequest pre_queries. */ - public pre_queries: string[]; - - /** ReserveBeginExecuteRequest post_begin_queries. */ - public post_begin_queries: string[]; + /** ReadTransactionRequest dtid. */ + public dtid: string; /** - * Creates a new ReserveBeginExecuteRequest instance using the specified properties. + * Creates a new ReadTransactionRequest instance using the specified properties. * @param [properties] Properties to set - * @returns ReserveBeginExecuteRequest instance + * @returns ReadTransactionRequest instance */ - public static create(properties?: query.IReserveBeginExecuteRequest): query.ReserveBeginExecuteRequest; + public static create(properties?: query.IReadTransactionRequest): query.ReadTransactionRequest; /** - * Encodes the specified ReserveBeginExecuteRequest message. Does not implicitly {@link query.ReserveBeginExecuteRequest.verify|verify} messages. - * @param message ReserveBeginExecuteRequest message or plain object to encode + * Encodes the specified ReadTransactionRequest message. Does not implicitly {@link query.ReadTransactionRequest.verify|verify} messages. + * @param message ReadTransactionRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IReserveBeginExecuteRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IReadTransactionRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ReserveBeginExecuteRequest message, length delimited. Does not implicitly {@link query.ReserveBeginExecuteRequest.verify|verify} messages. - * @param message ReserveBeginExecuteRequest message or plain object to encode + * Encodes the specified ReadTransactionRequest message, length delimited. Does not implicitly {@link query.ReadTransactionRequest.verify|verify} messages. + * @param message ReadTransactionRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IReserveBeginExecuteRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IReadTransactionRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ReserveBeginExecuteRequest message from the specified reader or buffer. + * Decodes a ReadTransactionRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ReserveBeginExecuteRequest + * @returns ReadTransactionRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.ReserveBeginExecuteRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.ReadTransactionRequest; /** - * Decodes a ReserveBeginExecuteRequest message from the specified reader or buffer, length delimited. + * Decodes a ReadTransactionRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ReserveBeginExecuteRequest + * @returns ReadTransactionRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.ReserveBeginExecuteRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.ReadTransactionRequest; /** - * Verifies a ReserveBeginExecuteRequest message. + * Verifies a ReadTransactionRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ReserveBeginExecuteRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ReadTransactionRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ReserveBeginExecuteRequest + * @returns ReadTransactionRequest */ - public static fromObject(object: { [k: string]: any }): query.ReserveBeginExecuteRequest; + public static fromObject(object: { [k: string]: any }): query.ReadTransactionRequest; /** - * Creates a plain object from a ReserveBeginExecuteRequest message. Also converts values to other types if specified. - * @param message ReserveBeginExecuteRequest + * Creates a plain object from a ReadTransactionRequest message. Also converts values to other types if specified. + * @param message ReadTransactionRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.ReserveBeginExecuteRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.ReadTransactionRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ReserveBeginExecuteRequest to JSON. + * Converts this ReadTransactionRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ReserveBeginExecuteRequest + * Gets the default type url for ReadTransactionRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ReserveBeginExecuteResponse. */ - interface IReserveBeginExecuteResponse { + /** Properties of a ReadTransactionResponse. */ + interface IReadTransactionResponse { - /** ReserveBeginExecuteResponse error */ - error?: (vtrpc.IRPCError|null); + /** ReadTransactionResponse metadata */ + metadata?: (query.ITransactionMetadata|null); + } - /** ReserveBeginExecuteResponse result */ - result?: (query.IQueryResult|null); + /** Represents a ReadTransactionResponse. */ + class ReadTransactionResponse implements IReadTransactionResponse { - /** ReserveBeginExecuteResponse transaction_id */ - transaction_id?: (number|Long|null); + /** + * Constructs a new ReadTransactionResponse. + * @param [properties] Properties to set + */ + constructor(properties?: query.IReadTransactionResponse); - /** ReserveBeginExecuteResponse reserved_id */ - reserved_id?: (number|Long|null); - - /** ReserveBeginExecuteResponse tablet_alias */ - tablet_alias?: (topodata.ITabletAlias|null); - - /** ReserveBeginExecuteResponse session_state_changes */ - session_state_changes?: (string|null); - } - - /** Represents a ReserveBeginExecuteResponse. */ - class ReserveBeginExecuteResponse implements IReserveBeginExecuteResponse { - - /** - * Constructs a new ReserveBeginExecuteResponse. - * @param [properties] Properties to set - */ - constructor(properties?: query.IReserveBeginExecuteResponse); - - /** ReserveBeginExecuteResponse error. */ - public error?: (vtrpc.IRPCError|null); - - /** ReserveBeginExecuteResponse result. */ - public result?: (query.IQueryResult|null); - - /** ReserveBeginExecuteResponse transaction_id. */ - public transaction_id: (number|Long); - - /** ReserveBeginExecuteResponse reserved_id. */ - public reserved_id: (number|Long); - - /** ReserveBeginExecuteResponse tablet_alias. */ - public tablet_alias?: (topodata.ITabletAlias|null); - - /** ReserveBeginExecuteResponse session_state_changes. */ - public session_state_changes: string; + /** ReadTransactionResponse metadata. */ + public metadata?: (query.ITransactionMetadata|null); /** - * Creates a new ReserveBeginExecuteResponse instance using the specified properties. + * Creates a new ReadTransactionResponse instance using the specified properties. * @param [properties] Properties to set - * @returns ReserveBeginExecuteResponse instance + * @returns ReadTransactionResponse instance */ - public static create(properties?: query.IReserveBeginExecuteResponse): query.ReserveBeginExecuteResponse; + public static create(properties?: query.IReadTransactionResponse): query.ReadTransactionResponse; /** - * Encodes the specified ReserveBeginExecuteResponse message. Does not implicitly {@link query.ReserveBeginExecuteResponse.verify|verify} messages. - * @param message ReserveBeginExecuteResponse message or plain object to encode + * Encodes the specified ReadTransactionResponse message. Does not implicitly {@link query.ReadTransactionResponse.verify|verify} messages. + * @param message ReadTransactionResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IReserveBeginExecuteResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IReadTransactionResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ReserveBeginExecuteResponse message, length delimited. Does not implicitly {@link query.ReserveBeginExecuteResponse.verify|verify} messages. - * @param message ReserveBeginExecuteResponse message or plain object to encode + * Encodes the specified ReadTransactionResponse message, length delimited. Does not implicitly {@link query.ReadTransactionResponse.verify|verify} messages. + * @param message ReadTransactionResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IReserveBeginExecuteResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IReadTransactionResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ReserveBeginExecuteResponse message from the specified reader or buffer. + * Decodes a ReadTransactionResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ReserveBeginExecuteResponse + * @returns ReadTransactionResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.ReserveBeginExecuteResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.ReadTransactionResponse; /** - * Decodes a ReserveBeginExecuteResponse message from the specified reader or buffer, length delimited. + * Decodes a ReadTransactionResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ReserveBeginExecuteResponse + * @returns ReadTransactionResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.ReserveBeginExecuteResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.ReadTransactionResponse; /** - * Verifies a ReserveBeginExecuteResponse message. + * Verifies a ReadTransactionResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ReserveBeginExecuteResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ReadTransactionResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ReserveBeginExecuteResponse + * @returns ReadTransactionResponse */ - public static fromObject(object: { [k: string]: any }): query.ReserveBeginExecuteResponse; + public static fromObject(object: { [k: string]: any }): query.ReadTransactionResponse; /** - * Creates a plain object from a ReserveBeginExecuteResponse message. Also converts values to other types if specified. - * @param message ReserveBeginExecuteResponse + * Creates a plain object from a ReadTransactionResponse message. Also converts values to other types if specified. + * @param message ReadTransactionResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.ReserveBeginExecuteResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.ReadTransactionResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ReserveBeginExecuteResponse to JSON. + * Converts this ReadTransactionResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ReserveBeginExecuteResponse + * Gets the default type url for ReadTransactionResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ReserveBeginStreamExecuteRequest. */ - interface IReserveBeginStreamExecuteRequest { + /** Properties of a BeginExecuteRequest. */ + interface IBeginExecuteRequest { - /** ReserveBeginStreamExecuteRequest effective_caller_id */ + /** BeginExecuteRequest effective_caller_id */ effective_caller_id?: (vtrpc.ICallerID|null); - /** ReserveBeginStreamExecuteRequest immediate_caller_id */ + /** BeginExecuteRequest immediate_caller_id */ immediate_caller_id?: (query.IVTGateCallerID|null); - /** ReserveBeginStreamExecuteRequest target */ + /** BeginExecuteRequest target */ target?: (query.ITarget|null); - /** ReserveBeginStreamExecuteRequest query */ + /** BeginExecuteRequest query */ query?: (query.IBoundQuery|null); - /** ReserveBeginStreamExecuteRequest options */ + /** BeginExecuteRequest options */ options?: (query.IExecuteOptions|null); - /** ReserveBeginStreamExecuteRequest pre_queries */ - pre_queries?: (string[]|null); + /** BeginExecuteRequest reserved_id */ + reserved_id?: (number|Long|null); - /** ReserveBeginStreamExecuteRequest post_begin_queries */ - post_begin_queries?: (string[]|null); + /** BeginExecuteRequest pre_queries */ + pre_queries?: (string[]|null); } - /** Represents a ReserveBeginStreamExecuteRequest. */ - class ReserveBeginStreamExecuteRequest implements IReserveBeginStreamExecuteRequest { + /** Represents a BeginExecuteRequest. */ + class BeginExecuteRequest implements IBeginExecuteRequest { /** - * Constructs a new ReserveBeginStreamExecuteRequest. + * Constructs a new BeginExecuteRequest. * @param [properties] Properties to set */ - constructor(properties?: query.IReserveBeginStreamExecuteRequest); + constructor(properties?: query.IBeginExecuteRequest); - /** ReserveBeginStreamExecuteRequest effective_caller_id. */ + /** BeginExecuteRequest effective_caller_id. */ public effective_caller_id?: (vtrpc.ICallerID|null); - /** ReserveBeginStreamExecuteRequest immediate_caller_id. */ + /** BeginExecuteRequest immediate_caller_id. */ public immediate_caller_id?: (query.IVTGateCallerID|null); - /** ReserveBeginStreamExecuteRequest target. */ + /** BeginExecuteRequest target. */ public target?: (query.ITarget|null); - /** ReserveBeginStreamExecuteRequest query. */ + /** BeginExecuteRequest query. */ public query?: (query.IBoundQuery|null); - /** ReserveBeginStreamExecuteRequest options. */ + /** BeginExecuteRequest options. */ public options?: (query.IExecuteOptions|null); - /** ReserveBeginStreamExecuteRequest pre_queries. */ - public pre_queries: string[]; + /** BeginExecuteRequest reserved_id. */ + public reserved_id: (number|Long); - /** ReserveBeginStreamExecuteRequest post_begin_queries. */ - public post_begin_queries: string[]; + /** BeginExecuteRequest pre_queries. */ + public pre_queries: string[]; /** - * Creates a new ReserveBeginStreamExecuteRequest instance using the specified properties. + * Creates a new BeginExecuteRequest instance using the specified properties. * @param [properties] Properties to set - * @returns ReserveBeginStreamExecuteRequest instance + * @returns BeginExecuteRequest instance */ - public static create(properties?: query.IReserveBeginStreamExecuteRequest): query.ReserveBeginStreamExecuteRequest; + public static create(properties?: query.IBeginExecuteRequest): query.BeginExecuteRequest; /** - * Encodes the specified ReserveBeginStreamExecuteRequest message. Does not implicitly {@link query.ReserveBeginStreamExecuteRequest.verify|verify} messages. - * @param message ReserveBeginStreamExecuteRequest message or plain object to encode + * Encodes the specified BeginExecuteRequest message. Does not implicitly {@link query.BeginExecuteRequest.verify|verify} messages. + * @param message BeginExecuteRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IReserveBeginStreamExecuteRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IBeginExecuteRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ReserveBeginStreamExecuteRequest message, length delimited. Does not implicitly {@link query.ReserveBeginStreamExecuteRequest.verify|verify} messages. - * @param message ReserveBeginStreamExecuteRequest message or plain object to encode + * Encodes the specified BeginExecuteRequest message, length delimited. Does not implicitly {@link query.BeginExecuteRequest.verify|verify} messages. + * @param message BeginExecuteRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IReserveBeginStreamExecuteRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IBeginExecuteRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ReserveBeginStreamExecuteRequest message from the specified reader or buffer. + * Decodes a BeginExecuteRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ReserveBeginStreamExecuteRequest + * @returns BeginExecuteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.ReserveBeginStreamExecuteRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.BeginExecuteRequest; /** - * Decodes a ReserveBeginStreamExecuteRequest message from the specified reader or buffer, length delimited. + * Decodes a BeginExecuteRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ReserveBeginStreamExecuteRequest + * @returns BeginExecuteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.ReserveBeginStreamExecuteRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.BeginExecuteRequest; /** - * Verifies a ReserveBeginStreamExecuteRequest message. + * Verifies a BeginExecuteRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ReserveBeginStreamExecuteRequest message from a plain object. Also converts values to their respective internal types. + * Creates a BeginExecuteRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ReserveBeginStreamExecuteRequest + * @returns BeginExecuteRequest */ - public static fromObject(object: { [k: string]: any }): query.ReserveBeginStreamExecuteRequest; + public static fromObject(object: { [k: string]: any }): query.BeginExecuteRequest; /** - * Creates a plain object from a ReserveBeginStreamExecuteRequest message. Also converts values to other types if specified. - * @param message ReserveBeginStreamExecuteRequest + * Creates a plain object from a BeginExecuteRequest message. Also converts values to other types if specified. + * @param message BeginExecuteRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.ReserveBeginStreamExecuteRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.BeginExecuteRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ReserveBeginStreamExecuteRequest to JSON. + * Converts this BeginExecuteRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ReserveBeginStreamExecuteRequest + * Gets the default type url for BeginExecuteRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ReserveBeginStreamExecuteResponse. */ - interface IReserveBeginStreamExecuteResponse { + /** Properties of a BeginExecuteResponse. */ + interface IBeginExecuteResponse { - /** ReserveBeginStreamExecuteResponse error */ + /** BeginExecuteResponse error */ error?: (vtrpc.IRPCError|null); - /** ReserveBeginStreamExecuteResponse result */ + /** BeginExecuteResponse result */ result?: (query.IQueryResult|null); - /** ReserveBeginStreamExecuteResponse transaction_id */ + /** BeginExecuteResponse transaction_id */ transaction_id?: (number|Long|null); - /** ReserveBeginStreamExecuteResponse reserved_id */ - reserved_id?: (number|Long|null); - - /** ReserveBeginStreamExecuteResponse tablet_alias */ + /** BeginExecuteResponse tablet_alias */ tablet_alias?: (topodata.ITabletAlias|null); - /** ReserveBeginStreamExecuteResponse session_state_changes */ + /** BeginExecuteResponse session_state_changes */ session_state_changes?: (string|null); } - /** Represents a ReserveBeginStreamExecuteResponse. */ - class ReserveBeginStreamExecuteResponse implements IReserveBeginStreamExecuteResponse { + /** Represents a BeginExecuteResponse. */ + class BeginExecuteResponse implements IBeginExecuteResponse { /** - * Constructs a new ReserveBeginStreamExecuteResponse. + * Constructs a new BeginExecuteResponse. * @param [properties] Properties to set */ - constructor(properties?: query.IReserveBeginStreamExecuteResponse); + constructor(properties?: query.IBeginExecuteResponse); - /** ReserveBeginStreamExecuteResponse error. */ + /** BeginExecuteResponse error. */ public error?: (vtrpc.IRPCError|null); - /** ReserveBeginStreamExecuteResponse result. */ + /** BeginExecuteResponse result. */ public result?: (query.IQueryResult|null); - /** ReserveBeginStreamExecuteResponse transaction_id. */ + /** BeginExecuteResponse transaction_id. */ public transaction_id: (number|Long); - /** ReserveBeginStreamExecuteResponse reserved_id. */ - public reserved_id: (number|Long); - - /** ReserveBeginStreamExecuteResponse tablet_alias. */ + /** BeginExecuteResponse tablet_alias. */ public tablet_alias?: (topodata.ITabletAlias|null); - /** ReserveBeginStreamExecuteResponse session_state_changes. */ + /** BeginExecuteResponse session_state_changes. */ public session_state_changes: string; /** - * Creates a new ReserveBeginStreamExecuteResponse instance using the specified properties. + * Creates a new BeginExecuteResponse instance using the specified properties. * @param [properties] Properties to set - * @returns ReserveBeginStreamExecuteResponse instance + * @returns BeginExecuteResponse instance */ - public static create(properties?: query.IReserveBeginStreamExecuteResponse): query.ReserveBeginStreamExecuteResponse; + public static create(properties?: query.IBeginExecuteResponse): query.BeginExecuteResponse; /** - * Encodes the specified ReserveBeginStreamExecuteResponse message. Does not implicitly {@link query.ReserveBeginStreamExecuteResponse.verify|verify} messages. - * @param message ReserveBeginStreamExecuteResponse message or plain object to encode + * Encodes the specified BeginExecuteResponse message. Does not implicitly {@link query.BeginExecuteResponse.verify|verify} messages. + * @param message BeginExecuteResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IReserveBeginStreamExecuteResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IBeginExecuteResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ReserveBeginStreamExecuteResponse message, length delimited. Does not implicitly {@link query.ReserveBeginStreamExecuteResponse.verify|verify} messages. - * @param message ReserveBeginStreamExecuteResponse message or plain object to encode + * Encodes the specified BeginExecuteResponse message, length delimited. Does not implicitly {@link query.BeginExecuteResponse.verify|verify} messages. + * @param message BeginExecuteResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IReserveBeginStreamExecuteResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IBeginExecuteResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ReserveBeginStreamExecuteResponse message from the specified reader or buffer. + * Decodes a BeginExecuteResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ReserveBeginStreamExecuteResponse + * @returns BeginExecuteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.ReserveBeginStreamExecuteResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.BeginExecuteResponse; /** - * Decodes a ReserveBeginStreamExecuteResponse message from the specified reader or buffer, length delimited. + * Decodes a BeginExecuteResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ReserveBeginStreamExecuteResponse + * @returns BeginExecuteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.ReserveBeginStreamExecuteResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.BeginExecuteResponse; /** - * Verifies a ReserveBeginStreamExecuteResponse message. + * Verifies a BeginExecuteResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ReserveBeginStreamExecuteResponse message from a plain object. Also converts values to their respective internal types. + * Creates a BeginExecuteResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ReserveBeginStreamExecuteResponse + * @returns BeginExecuteResponse */ - public static fromObject(object: { [k: string]: any }): query.ReserveBeginStreamExecuteResponse; + public static fromObject(object: { [k: string]: any }): query.BeginExecuteResponse; /** - * Creates a plain object from a ReserveBeginStreamExecuteResponse message. Also converts values to other types if specified. - * @param message ReserveBeginStreamExecuteResponse + * Creates a plain object from a BeginExecuteResponse message. Also converts values to other types if specified. + * @param message BeginExecuteResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.ReserveBeginStreamExecuteResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.BeginExecuteResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ReserveBeginStreamExecuteResponse to JSON. + * Converts this BeginExecuteResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ReserveBeginStreamExecuteResponse + * Gets the default type url for BeginExecuteResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ReleaseRequest. */ - interface IReleaseRequest { + /** Properties of a BeginStreamExecuteRequest. */ + interface IBeginStreamExecuteRequest { - /** ReleaseRequest effective_caller_id */ + /** BeginStreamExecuteRequest effective_caller_id */ effective_caller_id?: (vtrpc.ICallerID|null); - /** ReleaseRequest immediate_caller_id */ + /** BeginStreamExecuteRequest immediate_caller_id */ immediate_caller_id?: (query.IVTGateCallerID|null); - /** ReleaseRequest target */ + /** BeginStreamExecuteRequest target */ target?: (query.ITarget|null); - /** ReleaseRequest transaction_id */ - transaction_id?: (number|Long|null); + /** BeginStreamExecuteRequest query */ + query?: (query.IBoundQuery|null); - /** ReleaseRequest reserved_id */ + /** BeginStreamExecuteRequest options */ + options?: (query.IExecuteOptions|null); + + /** BeginStreamExecuteRequest pre_queries */ + pre_queries?: (string[]|null); + + /** BeginStreamExecuteRequest reserved_id */ reserved_id?: (number|Long|null); } - /** Represents a ReleaseRequest. */ - class ReleaseRequest implements IReleaseRequest { + /** Represents a BeginStreamExecuteRequest. */ + class BeginStreamExecuteRequest implements IBeginStreamExecuteRequest { /** - * Constructs a new ReleaseRequest. + * Constructs a new BeginStreamExecuteRequest. * @param [properties] Properties to set */ - constructor(properties?: query.IReleaseRequest); + constructor(properties?: query.IBeginStreamExecuteRequest); - /** ReleaseRequest effective_caller_id. */ + /** BeginStreamExecuteRequest effective_caller_id. */ public effective_caller_id?: (vtrpc.ICallerID|null); - /** ReleaseRequest immediate_caller_id. */ + /** BeginStreamExecuteRequest immediate_caller_id. */ public immediate_caller_id?: (query.IVTGateCallerID|null); - /** ReleaseRequest target. */ + /** BeginStreamExecuteRequest target. */ public target?: (query.ITarget|null); - /** ReleaseRequest transaction_id. */ - public transaction_id: (number|Long); + /** BeginStreamExecuteRequest query. */ + public query?: (query.IBoundQuery|null); - /** ReleaseRequest reserved_id. */ + /** BeginStreamExecuteRequest options. */ + public options?: (query.IExecuteOptions|null); + + /** BeginStreamExecuteRequest pre_queries. */ + public pre_queries: string[]; + + /** BeginStreamExecuteRequest reserved_id. */ public reserved_id: (number|Long); /** - * Creates a new ReleaseRequest instance using the specified properties. + * Creates a new BeginStreamExecuteRequest instance using the specified properties. * @param [properties] Properties to set - * @returns ReleaseRequest instance + * @returns BeginStreamExecuteRequest instance */ - public static create(properties?: query.IReleaseRequest): query.ReleaseRequest; + public static create(properties?: query.IBeginStreamExecuteRequest): query.BeginStreamExecuteRequest; /** - * Encodes the specified ReleaseRequest message. Does not implicitly {@link query.ReleaseRequest.verify|verify} messages. - * @param message ReleaseRequest message or plain object to encode + * Encodes the specified BeginStreamExecuteRequest message. Does not implicitly {@link query.BeginStreamExecuteRequest.verify|verify} messages. + * @param message BeginStreamExecuteRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IReleaseRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IBeginStreamExecuteRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ReleaseRequest message, length delimited. Does not implicitly {@link query.ReleaseRequest.verify|verify} messages. - * @param message ReleaseRequest message or plain object to encode + * Encodes the specified BeginStreamExecuteRequest message, length delimited. Does not implicitly {@link query.BeginStreamExecuteRequest.verify|verify} messages. + * @param message BeginStreamExecuteRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IReleaseRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IBeginStreamExecuteRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ReleaseRequest message from the specified reader or buffer. + * Decodes a BeginStreamExecuteRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ReleaseRequest + * @returns BeginStreamExecuteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.ReleaseRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.BeginStreamExecuteRequest; /** - * Decodes a ReleaseRequest message from the specified reader or buffer, length delimited. + * Decodes a BeginStreamExecuteRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ReleaseRequest + * @returns BeginStreamExecuteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.ReleaseRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.BeginStreamExecuteRequest; /** - * Verifies a ReleaseRequest message. + * Verifies a BeginStreamExecuteRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ReleaseRequest message from a plain object. Also converts values to their respective internal types. + * Creates a BeginStreamExecuteRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ReleaseRequest + * @returns BeginStreamExecuteRequest */ - public static fromObject(object: { [k: string]: any }): query.ReleaseRequest; + public static fromObject(object: { [k: string]: any }): query.BeginStreamExecuteRequest; /** - * Creates a plain object from a ReleaseRequest message. Also converts values to other types if specified. - * @param message ReleaseRequest + * Creates a plain object from a BeginStreamExecuteRequest message. Also converts values to other types if specified. + * @param message BeginStreamExecuteRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.ReleaseRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.BeginStreamExecuteRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ReleaseRequest to JSON. + * Converts this BeginStreamExecuteRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ReleaseRequest + * Gets the default type url for BeginStreamExecuteRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ReleaseResponse. */ - interface IReleaseResponse { + /** Properties of a BeginStreamExecuteResponse. */ + interface IBeginStreamExecuteResponse { + + /** BeginStreamExecuteResponse error */ + error?: (vtrpc.IRPCError|null); + + /** BeginStreamExecuteResponse result */ + result?: (query.IQueryResult|null); + + /** BeginStreamExecuteResponse transaction_id */ + transaction_id?: (number|Long|null); + + /** BeginStreamExecuteResponse tablet_alias */ + tablet_alias?: (topodata.ITabletAlias|null); + + /** BeginStreamExecuteResponse session_state_changes */ + session_state_changes?: (string|null); } - /** Represents a ReleaseResponse. */ - class ReleaseResponse implements IReleaseResponse { + /** Represents a BeginStreamExecuteResponse. */ + class BeginStreamExecuteResponse implements IBeginStreamExecuteResponse { /** - * Constructs a new ReleaseResponse. + * Constructs a new BeginStreamExecuteResponse. * @param [properties] Properties to set */ - constructor(properties?: query.IReleaseResponse); + constructor(properties?: query.IBeginStreamExecuteResponse); + + /** BeginStreamExecuteResponse error. */ + public error?: (vtrpc.IRPCError|null); + + /** BeginStreamExecuteResponse result. */ + public result?: (query.IQueryResult|null); + + /** BeginStreamExecuteResponse transaction_id. */ + public transaction_id: (number|Long); + + /** BeginStreamExecuteResponse tablet_alias. */ + public tablet_alias?: (topodata.ITabletAlias|null); + + /** BeginStreamExecuteResponse session_state_changes. */ + public session_state_changes: string; /** - * Creates a new ReleaseResponse instance using the specified properties. + * Creates a new BeginStreamExecuteResponse instance using the specified properties. * @param [properties] Properties to set - * @returns ReleaseResponse instance + * @returns BeginStreamExecuteResponse instance */ - public static create(properties?: query.IReleaseResponse): query.ReleaseResponse; + public static create(properties?: query.IBeginStreamExecuteResponse): query.BeginStreamExecuteResponse; /** - * Encodes the specified ReleaseResponse message. Does not implicitly {@link query.ReleaseResponse.verify|verify} messages. - * @param message ReleaseResponse message or plain object to encode + * Encodes the specified BeginStreamExecuteResponse message. Does not implicitly {@link query.BeginStreamExecuteResponse.verify|verify} messages. + * @param message BeginStreamExecuteResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IReleaseResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IBeginStreamExecuteResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ReleaseResponse message, length delimited. Does not implicitly {@link query.ReleaseResponse.verify|verify} messages. - * @param message ReleaseResponse message or plain object to encode + * Encodes the specified BeginStreamExecuteResponse message, length delimited. Does not implicitly {@link query.BeginStreamExecuteResponse.verify|verify} messages. + * @param message BeginStreamExecuteResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IReleaseResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IBeginStreamExecuteResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ReleaseResponse message from the specified reader or buffer. + * Decodes a BeginStreamExecuteResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ReleaseResponse + * @returns BeginStreamExecuteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.ReleaseResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.BeginStreamExecuteResponse; /** - * Decodes a ReleaseResponse message from the specified reader or buffer, length delimited. + * Decodes a BeginStreamExecuteResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ReleaseResponse + * @returns BeginStreamExecuteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.ReleaseResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.BeginStreamExecuteResponse; /** - * Verifies a ReleaseResponse message. + * Verifies a BeginStreamExecuteResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ReleaseResponse message from a plain object. Also converts values to their respective internal types. + * Creates a BeginStreamExecuteResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ReleaseResponse + * @returns BeginStreamExecuteResponse */ - public static fromObject(object: { [k: string]: any }): query.ReleaseResponse; + public static fromObject(object: { [k: string]: any }): query.BeginStreamExecuteResponse; /** - * Creates a plain object from a ReleaseResponse message. Also converts values to other types if specified. - * @param message ReleaseResponse + * Creates a plain object from a BeginStreamExecuteResponse message. Also converts values to other types if specified. + * @param message BeginStreamExecuteResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.ReleaseResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.BeginStreamExecuteResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ReleaseResponse to JSON. + * Converts this BeginStreamExecuteResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ReleaseResponse + * Gets the default type url for BeginStreamExecuteResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a StreamHealthRequest. */ - interface IStreamHealthRequest { + /** Properties of a MessageStreamRequest. */ + interface IMessageStreamRequest { + + /** MessageStreamRequest effective_caller_id */ + effective_caller_id?: (vtrpc.ICallerID|null); + + /** MessageStreamRequest immediate_caller_id */ + immediate_caller_id?: (query.IVTGateCallerID|null); + + /** MessageStreamRequest target */ + target?: (query.ITarget|null); + + /** MessageStreamRequest name */ + name?: (string|null); } - /** Represents a StreamHealthRequest. */ - class StreamHealthRequest implements IStreamHealthRequest { + /** Represents a MessageStreamRequest. */ + class MessageStreamRequest implements IMessageStreamRequest { /** - * Constructs a new StreamHealthRequest. + * Constructs a new MessageStreamRequest. * @param [properties] Properties to set */ - constructor(properties?: query.IStreamHealthRequest); + constructor(properties?: query.IMessageStreamRequest); + + /** MessageStreamRequest effective_caller_id. */ + public effective_caller_id?: (vtrpc.ICallerID|null); + + /** MessageStreamRequest immediate_caller_id. */ + public immediate_caller_id?: (query.IVTGateCallerID|null); + + /** MessageStreamRequest target. */ + public target?: (query.ITarget|null); + + /** MessageStreamRequest name. */ + public name: string; /** - * Creates a new StreamHealthRequest instance using the specified properties. + * Creates a new MessageStreamRequest instance using the specified properties. * @param [properties] Properties to set - * @returns StreamHealthRequest instance + * @returns MessageStreamRequest instance */ - public static create(properties?: query.IStreamHealthRequest): query.StreamHealthRequest; + public static create(properties?: query.IMessageStreamRequest): query.MessageStreamRequest; /** - * Encodes the specified StreamHealthRequest message. Does not implicitly {@link query.StreamHealthRequest.verify|verify} messages. - * @param message StreamHealthRequest message or plain object to encode + * Encodes the specified MessageStreamRequest message. Does not implicitly {@link query.MessageStreamRequest.verify|verify} messages. + * @param message MessageStreamRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IStreamHealthRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IMessageStreamRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified StreamHealthRequest message, length delimited. Does not implicitly {@link query.StreamHealthRequest.verify|verify} messages. - * @param message StreamHealthRequest message or plain object to encode + * Encodes the specified MessageStreamRequest message, length delimited. Does not implicitly {@link query.MessageStreamRequest.verify|verify} messages. + * @param message MessageStreamRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IStreamHealthRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IMessageStreamRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a StreamHealthRequest message from the specified reader or buffer. + * Decodes a MessageStreamRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns StreamHealthRequest + * @returns MessageStreamRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.StreamHealthRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.MessageStreamRequest; /** - * Decodes a StreamHealthRequest message from the specified reader or buffer, length delimited. + * Decodes a MessageStreamRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns StreamHealthRequest + * @returns MessageStreamRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.StreamHealthRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.MessageStreamRequest; /** - * Verifies a StreamHealthRequest message. + * Verifies a MessageStreamRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a StreamHealthRequest message from a plain object. Also converts values to their respective internal types. + * Creates a MessageStreamRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns StreamHealthRequest + * @returns MessageStreamRequest */ - public static fromObject(object: { [k: string]: any }): query.StreamHealthRequest; + public static fromObject(object: { [k: string]: any }): query.MessageStreamRequest; /** - * Creates a plain object from a StreamHealthRequest message. Also converts values to other types if specified. - * @param message StreamHealthRequest + * Creates a plain object from a MessageStreamRequest message. Also converts values to other types if specified. + * @param message MessageStreamRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.StreamHealthRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.MessageStreamRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this StreamHealthRequest to JSON. + * Converts this MessageStreamRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for StreamHealthRequest + * Gets the default type url for MessageStreamRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a RealtimeStats. */ - interface IRealtimeStats { - - /** RealtimeStats health_error */ - health_error?: (string|null); - - /** RealtimeStats replication_lag_seconds */ - replication_lag_seconds?: (number|null); - - /** RealtimeStats binlog_players_count */ - binlog_players_count?: (number|null); - - /** RealtimeStats filtered_replication_lag_seconds */ - filtered_replication_lag_seconds?: (number|Long|null); - - /** RealtimeStats cpu_usage */ - cpu_usage?: (number|null); - - /** RealtimeStats qps */ - qps?: (number|null); - - /** RealtimeStats table_schema_changed */ - table_schema_changed?: (string[]|null); + /** Properties of a MessageStreamResponse. */ + interface IMessageStreamResponse { - /** RealtimeStats view_schema_changed */ - view_schema_changed?: (string[]|null); + /** MessageStreamResponse result */ + result?: (query.IQueryResult|null); } - /** Represents a RealtimeStats. */ - class RealtimeStats implements IRealtimeStats { + /** Represents a MessageStreamResponse. */ + class MessageStreamResponse implements IMessageStreamResponse { /** - * Constructs a new RealtimeStats. + * Constructs a new MessageStreamResponse. * @param [properties] Properties to set */ - constructor(properties?: query.IRealtimeStats); - - /** RealtimeStats health_error. */ - public health_error: string; - - /** RealtimeStats replication_lag_seconds. */ - public replication_lag_seconds: number; - - /** RealtimeStats binlog_players_count. */ - public binlog_players_count: number; - - /** RealtimeStats filtered_replication_lag_seconds. */ - public filtered_replication_lag_seconds: (number|Long); - - /** RealtimeStats cpu_usage. */ - public cpu_usage: number; - - /** RealtimeStats qps. */ - public qps: number; - - /** RealtimeStats table_schema_changed. */ - public table_schema_changed: string[]; + constructor(properties?: query.IMessageStreamResponse); - /** RealtimeStats view_schema_changed. */ - public view_schema_changed: string[]; + /** MessageStreamResponse result. */ + public result?: (query.IQueryResult|null); /** - * Creates a new RealtimeStats instance using the specified properties. + * Creates a new MessageStreamResponse instance using the specified properties. * @param [properties] Properties to set - * @returns RealtimeStats instance + * @returns MessageStreamResponse instance */ - public static create(properties?: query.IRealtimeStats): query.RealtimeStats; + public static create(properties?: query.IMessageStreamResponse): query.MessageStreamResponse; /** - * Encodes the specified RealtimeStats message. Does not implicitly {@link query.RealtimeStats.verify|verify} messages. - * @param message RealtimeStats message or plain object to encode + * Encodes the specified MessageStreamResponse message. Does not implicitly {@link query.MessageStreamResponse.verify|verify} messages. + * @param message MessageStreamResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IRealtimeStats, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IMessageStreamResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified RealtimeStats message, length delimited. Does not implicitly {@link query.RealtimeStats.verify|verify} messages. - * @param message RealtimeStats message or plain object to encode + * Encodes the specified MessageStreamResponse message, length delimited. Does not implicitly {@link query.MessageStreamResponse.verify|verify} messages. + * @param message MessageStreamResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IRealtimeStats, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IMessageStreamResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a RealtimeStats message from the specified reader or buffer. + * Decodes a MessageStreamResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns RealtimeStats + * @returns MessageStreamResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.RealtimeStats; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.MessageStreamResponse; /** - * Decodes a RealtimeStats message from the specified reader or buffer, length delimited. + * Decodes a MessageStreamResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns RealtimeStats + * @returns MessageStreamResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.RealtimeStats; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.MessageStreamResponse; /** - * Verifies a RealtimeStats message. + * Verifies a MessageStreamResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a RealtimeStats message from a plain object. Also converts values to their respective internal types. + * Creates a MessageStreamResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns RealtimeStats + * @returns MessageStreamResponse */ - public static fromObject(object: { [k: string]: any }): query.RealtimeStats; + public static fromObject(object: { [k: string]: any }): query.MessageStreamResponse; /** - * Creates a plain object from a RealtimeStats message. Also converts values to other types if specified. - * @param message RealtimeStats + * Creates a plain object from a MessageStreamResponse message. Also converts values to other types if specified. + * @param message MessageStreamResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.RealtimeStats, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.MessageStreamResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this RealtimeStats to JSON. + * Converts this MessageStreamResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for RealtimeStats + * Gets the default type url for MessageStreamResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of an AggregateStats. */ - interface IAggregateStats { + /** Properties of a MessageAckRequest. */ + interface IMessageAckRequest { - /** AggregateStats healthy_tablet_count */ - healthy_tablet_count?: (number|null); + /** MessageAckRequest effective_caller_id */ + effective_caller_id?: (vtrpc.ICallerID|null); - /** AggregateStats unhealthy_tablet_count */ - unhealthy_tablet_count?: (number|null); + /** MessageAckRequest immediate_caller_id */ + immediate_caller_id?: (query.IVTGateCallerID|null); - /** AggregateStats replication_lag_seconds_min */ - replication_lag_seconds_min?: (number|null); + /** MessageAckRequest target */ + target?: (query.ITarget|null); - /** AggregateStats replication_lag_seconds_max */ - replication_lag_seconds_max?: (number|null); + /** MessageAckRequest name */ + name?: (string|null); + + /** MessageAckRequest ids */ + ids?: (query.IValue[]|null); } - /** Represents an AggregateStats. */ - class AggregateStats implements IAggregateStats { + /** Represents a MessageAckRequest. */ + class MessageAckRequest implements IMessageAckRequest { /** - * Constructs a new AggregateStats. + * Constructs a new MessageAckRequest. * @param [properties] Properties to set */ - constructor(properties?: query.IAggregateStats); + constructor(properties?: query.IMessageAckRequest); - /** AggregateStats healthy_tablet_count. */ - public healthy_tablet_count: number; + /** MessageAckRequest effective_caller_id. */ + public effective_caller_id?: (vtrpc.ICallerID|null); - /** AggregateStats unhealthy_tablet_count. */ - public unhealthy_tablet_count: number; + /** MessageAckRequest immediate_caller_id. */ + public immediate_caller_id?: (query.IVTGateCallerID|null); - /** AggregateStats replication_lag_seconds_min. */ - public replication_lag_seconds_min: number; + /** MessageAckRequest target. */ + public target?: (query.ITarget|null); - /** AggregateStats replication_lag_seconds_max. */ - public replication_lag_seconds_max: number; + /** MessageAckRequest name. */ + public name: string; + + /** MessageAckRequest ids. */ + public ids: query.IValue[]; /** - * Creates a new AggregateStats instance using the specified properties. + * Creates a new MessageAckRequest instance using the specified properties. * @param [properties] Properties to set - * @returns AggregateStats instance + * @returns MessageAckRequest instance */ - public static create(properties?: query.IAggregateStats): query.AggregateStats; + public static create(properties?: query.IMessageAckRequest): query.MessageAckRequest; /** - * Encodes the specified AggregateStats message. Does not implicitly {@link query.AggregateStats.verify|verify} messages. - * @param message AggregateStats message or plain object to encode + * Encodes the specified MessageAckRequest message. Does not implicitly {@link query.MessageAckRequest.verify|verify} messages. + * @param message MessageAckRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IAggregateStats, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IMessageAckRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified AggregateStats message, length delimited. Does not implicitly {@link query.AggregateStats.verify|verify} messages. - * @param message AggregateStats message or plain object to encode + * Encodes the specified MessageAckRequest message, length delimited. Does not implicitly {@link query.MessageAckRequest.verify|verify} messages. + * @param message MessageAckRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IAggregateStats, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IMessageAckRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an AggregateStats message from the specified reader or buffer. + * Decodes a MessageAckRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns AggregateStats + * @returns MessageAckRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.AggregateStats; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.MessageAckRequest; /** - * Decodes an AggregateStats message from the specified reader or buffer, length delimited. + * Decodes a MessageAckRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns AggregateStats + * @returns MessageAckRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.AggregateStats; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.MessageAckRequest; /** - * Verifies an AggregateStats message. + * Verifies a MessageAckRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an AggregateStats message from a plain object. Also converts values to their respective internal types. + * Creates a MessageAckRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns AggregateStats + * @returns MessageAckRequest */ - public static fromObject(object: { [k: string]: any }): query.AggregateStats; + public static fromObject(object: { [k: string]: any }): query.MessageAckRequest; /** - * Creates a plain object from an AggregateStats message. Also converts values to other types if specified. - * @param message AggregateStats + * Creates a plain object from a MessageAckRequest message. Also converts values to other types if specified. + * @param message MessageAckRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.AggregateStats, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.MessageAckRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this AggregateStats to JSON. + * Converts this MessageAckRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for AggregateStats + * Gets the default type url for MessageAckRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a StreamHealthResponse. */ - interface IStreamHealthResponse { - - /** StreamHealthResponse target */ - target?: (query.ITarget|null); - - /** StreamHealthResponse serving */ - serving?: (boolean|null); - - /** StreamHealthResponse tablet_externally_reparented_timestamp */ - tablet_externally_reparented_timestamp?: (number|Long|null); - - /** StreamHealthResponse realtime_stats */ - realtime_stats?: (query.IRealtimeStats|null); + /** Properties of a MessageAckResponse. */ + interface IMessageAckResponse { - /** StreamHealthResponse tablet_alias */ - tablet_alias?: (topodata.ITabletAlias|null); + /** MessageAckResponse result */ + result?: (query.IQueryResult|null); } - /** Represents a StreamHealthResponse. */ - class StreamHealthResponse implements IStreamHealthResponse { + /** Represents a MessageAckResponse. */ + class MessageAckResponse implements IMessageAckResponse { /** - * Constructs a new StreamHealthResponse. + * Constructs a new MessageAckResponse. * @param [properties] Properties to set */ - constructor(properties?: query.IStreamHealthResponse); - - /** StreamHealthResponse target. */ - public target?: (query.ITarget|null); - - /** StreamHealthResponse serving. */ - public serving: boolean; - - /** StreamHealthResponse tablet_externally_reparented_timestamp. */ - public tablet_externally_reparented_timestamp: (number|Long); - - /** StreamHealthResponse realtime_stats. */ - public realtime_stats?: (query.IRealtimeStats|null); + constructor(properties?: query.IMessageAckResponse); - /** StreamHealthResponse tablet_alias. */ - public tablet_alias?: (topodata.ITabletAlias|null); + /** MessageAckResponse result. */ + public result?: (query.IQueryResult|null); /** - * Creates a new StreamHealthResponse instance using the specified properties. + * Creates a new MessageAckResponse instance using the specified properties. * @param [properties] Properties to set - * @returns StreamHealthResponse instance + * @returns MessageAckResponse instance */ - public static create(properties?: query.IStreamHealthResponse): query.StreamHealthResponse; + public static create(properties?: query.IMessageAckResponse): query.MessageAckResponse; /** - * Encodes the specified StreamHealthResponse message. Does not implicitly {@link query.StreamHealthResponse.verify|verify} messages. - * @param message StreamHealthResponse message or plain object to encode + * Encodes the specified MessageAckResponse message. Does not implicitly {@link query.MessageAckResponse.verify|verify} messages. + * @param message MessageAckResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IStreamHealthResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IMessageAckResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified StreamHealthResponse message, length delimited. Does not implicitly {@link query.StreamHealthResponse.verify|verify} messages. - * @param message StreamHealthResponse message or plain object to encode + * Encodes the specified MessageAckResponse message, length delimited. Does not implicitly {@link query.MessageAckResponse.verify|verify} messages. + * @param message MessageAckResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IStreamHealthResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IMessageAckResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a StreamHealthResponse message from the specified reader or buffer. + * Decodes a MessageAckResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns StreamHealthResponse + * @returns MessageAckResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.StreamHealthResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.MessageAckResponse; /** - * Decodes a StreamHealthResponse message from the specified reader or buffer, length delimited. + * Decodes a MessageAckResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns StreamHealthResponse + * @returns MessageAckResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.StreamHealthResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.MessageAckResponse; /** - * Verifies a StreamHealthResponse message. + * Verifies a MessageAckResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a StreamHealthResponse message from a plain object. Also converts values to their respective internal types. + * Creates a MessageAckResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns StreamHealthResponse + * @returns MessageAckResponse */ - public static fromObject(object: { [k: string]: any }): query.StreamHealthResponse; + public static fromObject(object: { [k: string]: any }): query.MessageAckResponse; /** - * Creates a plain object from a StreamHealthResponse message. Also converts values to other types if specified. - * @param message StreamHealthResponse + * Creates a plain object from a MessageAckResponse message. Also converts values to other types if specified. + * @param message MessageAckResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.StreamHealthResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.MessageAckResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this StreamHealthResponse to JSON. + * Converts this MessageAckResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for StreamHealthResponse + * Gets the default type url for MessageAckResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** TransactionState enum. */ - enum TransactionState { - UNKNOWN = 0, - PREPARE = 1, - COMMIT = 2, - ROLLBACK = 3 - } + /** Properties of a ReserveExecuteRequest. */ + interface IReserveExecuteRequest { - /** Properties of a TransactionMetadata. */ - interface ITransactionMetadata { + /** ReserveExecuteRequest effective_caller_id */ + effective_caller_id?: (vtrpc.ICallerID|null); - /** TransactionMetadata dtid */ - dtid?: (string|null); + /** ReserveExecuteRequest immediate_caller_id */ + immediate_caller_id?: (query.IVTGateCallerID|null); - /** TransactionMetadata state */ - state?: (query.TransactionState|null); + /** ReserveExecuteRequest target */ + target?: (query.ITarget|null); - /** TransactionMetadata time_created */ - time_created?: (number|Long|null); + /** ReserveExecuteRequest query */ + query?: (query.IBoundQuery|null); - /** TransactionMetadata participants */ - participants?: (query.ITarget[]|null); + /** ReserveExecuteRequest transaction_id */ + transaction_id?: (number|Long|null); + + /** ReserveExecuteRequest options */ + options?: (query.IExecuteOptions|null); + + /** ReserveExecuteRequest pre_queries */ + pre_queries?: (string[]|null); } - /** Represents a TransactionMetadata. */ - class TransactionMetadata implements ITransactionMetadata { + /** Represents a ReserveExecuteRequest. */ + class ReserveExecuteRequest implements IReserveExecuteRequest { /** - * Constructs a new TransactionMetadata. + * Constructs a new ReserveExecuteRequest. * @param [properties] Properties to set */ - constructor(properties?: query.ITransactionMetadata); + constructor(properties?: query.IReserveExecuteRequest); - /** TransactionMetadata dtid. */ - public dtid: string; + /** ReserveExecuteRequest effective_caller_id. */ + public effective_caller_id?: (vtrpc.ICallerID|null); - /** TransactionMetadata state. */ - public state: query.TransactionState; + /** ReserveExecuteRequest immediate_caller_id. */ + public immediate_caller_id?: (query.IVTGateCallerID|null); - /** TransactionMetadata time_created. */ - public time_created: (number|Long); + /** ReserveExecuteRequest target. */ + public target?: (query.ITarget|null); - /** TransactionMetadata participants. */ - public participants: query.ITarget[]; + /** ReserveExecuteRequest query. */ + public query?: (query.IBoundQuery|null); + + /** ReserveExecuteRequest transaction_id. */ + public transaction_id: (number|Long); + + /** ReserveExecuteRequest options. */ + public options?: (query.IExecuteOptions|null); + + /** ReserveExecuteRequest pre_queries. */ + public pre_queries: string[]; /** - * Creates a new TransactionMetadata instance using the specified properties. + * Creates a new ReserveExecuteRequest instance using the specified properties. * @param [properties] Properties to set - * @returns TransactionMetadata instance + * @returns ReserveExecuteRequest instance */ - public static create(properties?: query.ITransactionMetadata): query.TransactionMetadata; + public static create(properties?: query.IReserveExecuteRequest): query.ReserveExecuteRequest; /** - * Encodes the specified TransactionMetadata message. Does not implicitly {@link query.TransactionMetadata.verify|verify} messages. - * @param message TransactionMetadata message or plain object to encode + * Encodes the specified ReserveExecuteRequest message. Does not implicitly {@link query.ReserveExecuteRequest.verify|verify} messages. + * @param message ReserveExecuteRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.ITransactionMetadata, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IReserveExecuteRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified TransactionMetadata message, length delimited. Does not implicitly {@link query.TransactionMetadata.verify|verify} messages. - * @param message TransactionMetadata message or plain object to encode + * Encodes the specified ReserveExecuteRequest message, length delimited. Does not implicitly {@link query.ReserveExecuteRequest.verify|verify} messages. + * @param message ReserveExecuteRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.ITransactionMetadata, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IReserveExecuteRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a TransactionMetadata message from the specified reader or buffer. + * Decodes a ReserveExecuteRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns TransactionMetadata + * @returns ReserveExecuteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.TransactionMetadata; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.ReserveExecuteRequest; /** - * Decodes a TransactionMetadata message from the specified reader or buffer, length delimited. + * Decodes a ReserveExecuteRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns TransactionMetadata + * @returns ReserveExecuteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.TransactionMetadata; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.ReserveExecuteRequest; /** - * Verifies a TransactionMetadata message. + * Verifies a ReserveExecuteRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a TransactionMetadata message from a plain object. Also converts values to their respective internal types. + * Creates a ReserveExecuteRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns TransactionMetadata + * @returns ReserveExecuteRequest */ - public static fromObject(object: { [k: string]: any }): query.TransactionMetadata; + public static fromObject(object: { [k: string]: any }): query.ReserveExecuteRequest; /** - * Creates a plain object from a TransactionMetadata message. Also converts values to other types if specified. - * @param message TransactionMetadata + * Creates a plain object from a ReserveExecuteRequest message. Also converts values to other types if specified. + * @param message ReserveExecuteRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.TransactionMetadata, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.ReserveExecuteRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this TransactionMetadata to JSON. + * Converts this ReserveExecuteRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for TransactionMetadata + * Gets the default type url for ReserveExecuteRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** SchemaTableType enum. */ - enum SchemaTableType { - VIEWS = 0, - TABLES = 1, - ALL = 2 - } + /** Properties of a ReserveExecuteResponse. */ + interface IReserveExecuteResponse { - /** Properties of a GetSchemaRequest. */ - interface IGetSchemaRequest { + /** ReserveExecuteResponse error */ + error?: (vtrpc.IRPCError|null); - /** GetSchemaRequest target */ - target?: (query.ITarget|null); + /** ReserveExecuteResponse result */ + result?: (query.IQueryResult|null); - /** GetSchemaRequest table_type */ - table_type?: (query.SchemaTableType|null); + /** ReserveExecuteResponse reserved_id */ + reserved_id?: (number|Long|null); - /** GetSchemaRequest table_names */ - table_names?: (string[]|null); + /** ReserveExecuteResponse tablet_alias */ + tablet_alias?: (topodata.ITabletAlias|null); } - /** Represents a GetSchemaRequest. */ - class GetSchemaRequest implements IGetSchemaRequest { + /** Represents a ReserveExecuteResponse. */ + class ReserveExecuteResponse implements IReserveExecuteResponse { /** - * Constructs a new GetSchemaRequest. + * Constructs a new ReserveExecuteResponse. * @param [properties] Properties to set */ - constructor(properties?: query.IGetSchemaRequest); + constructor(properties?: query.IReserveExecuteResponse); - /** GetSchemaRequest target. */ - public target?: (query.ITarget|null); + /** ReserveExecuteResponse error. */ + public error?: (vtrpc.IRPCError|null); - /** GetSchemaRequest table_type. */ - public table_type: query.SchemaTableType; + /** ReserveExecuteResponse result. */ + public result?: (query.IQueryResult|null); - /** GetSchemaRequest table_names. */ - public table_names: string[]; + /** ReserveExecuteResponse reserved_id. */ + public reserved_id: (number|Long); + + /** ReserveExecuteResponse tablet_alias. */ + public tablet_alias?: (topodata.ITabletAlias|null); /** - * Creates a new GetSchemaRequest instance using the specified properties. + * Creates a new ReserveExecuteResponse instance using the specified properties. * @param [properties] Properties to set - * @returns GetSchemaRequest instance + * @returns ReserveExecuteResponse instance */ - public static create(properties?: query.IGetSchemaRequest): query.GetSchemaRequest; + public static create(properties?: query.IReserveExecuteResponse): query.ReserveExecuteResponse; /** - * Encodes the specified GetSchemaRequest message. Does not implicitly {@link query.GetSchemaRequest.verify|verify} messages. - * @param message GetSchemaRequest message or plain object to encode + * Encodes the specified ReserveExecuteResponse message. Does not implicitly {@link query.ReserveExecuteResponse.verify|verify} messages. + * @param message ReserveExecuteResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IGetSchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IReserveExecuteResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetSchemaRequest message, length delimited. Does not implicitly {@link query.GetSchemaRequest.verify|verify} messages. - * @param message GetSchemaRequest message or plain object to encode + * Encodes the specified ReserveExecuteResponse message, length delimited. Does not implicitly {@link query.ReserveExecuteResponse.verify|verify} messages. + * @param message ReserveExecuteResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IGetSchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IReserveExecuteResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetSchemaRequest message from the specified reader or buffer. + * Decodes a ReserveExecuteResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetSchemaRequest + * @returns ReserveExecuteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.GetSchemaRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.ReserveExecuteResponse; /** - * Decodes a GetSchemaRequest message from the specified reader or buffer, length delimited. + * Decodes a ReserveExecuteResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetSchemaRequest + * @returns ReserveExecuteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.GetSchemaRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.ReserveExecuteResponse; /** - * Verifies a GetSchemaRequest message. + * Verifies a ReserveExecuteResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetSchemaRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ReserveExecuteResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetSchemaRequest + * @returns ReserveExecuteResponse */ - public static fromObject(object: { [k: string]: any }): query.GetSchemaRequest; + public static fromObject(object: { [k: string]: any }): query.ReserveExecuteResponse; /** - * Creates a plain object from a GetSchemaRequest message. Also converts values to other types if specified. - * @param message GetSchemaRequest + * Creates a plain object from a ReserveExecuteResponse message. Also converts values to other types if specified. + * @param message ReserveExecuteResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.GetSchemaRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.ReserveExecuteResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetSchemaRequest to JSON. + * Converts this ReserveExecuteResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetSchemaRequest + * Gets the default type url for ReserveExecuteResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetSchemaResponse. */ - interface IGetSchemaResponse { + /** Properties of a ReserveStreamExecuteRequest. */ + interface IReserveStreamExecuteRequest { - /** GetSchemaResponse table_definition */ - table_definition?: ({ [k: string]: string }|null); + /** ReserveStreamExecuteRequest effective_caller_id */ + effective_caller_id?: (vtrpc.ICallerID|null); + + /** ReserveStreamExecuteRequest immediate_caller_id */ + immediate_caller_id?: (query.IVTGateCallerID|null); + + /** ReserveStreamExecuteRequest target */ + target?: (query.ITarget|null); + + /** ReserveStreamExecuteRequest query */ + query?: (query.IBoundQuery|null); + + /** ReserveStreamExecuteRequest options */ + options?: (query.IExecuteOptions|null); + + /** ReserveStreamExecuteRequest transaction_id */ + transaction_id?: (number|Long|null); + + /** ReserveStreamExecuteRequest pre_queries */ + pre_queries?: (string[]|null); } - /** Represents a GetSchemaResponse. */ - class GetSchemaResponse implements IGetSchemaResponse { + /** Represents a ReserveStreamExecuteRequest. */ + class ReserveStreamExecuteRequest implements IReserveStreamExecuteRequest { /** - * Constructs a new GetSchemaResponse. + * Constructs a new ReserveStreamExecuteRequest. * @param [properties] Properties to set */ - constructor(properties?: query.IGetSchemaResponse); + constructor(properties?: query.IReserveStreamExecuteRequest); - /** GetSchemaResponse table_definition. */ - public table_definition: { [k: string]: string }; + /** ReserveStreamExecuteRequest effective_caller_id. */ + public effective_caller_id?: (vtrpc.ICallerID|null); + + /** ReserveStreamExecuteRequest immediate_caller_id. */ + public immediate_caller_id?: (query.IVTGateCallerID|null); + + /** ReserveStreamExecuteRequest target. */ + public target?: (query.ITarget|null); + + /** ReserveStreamExecuteRequest query. */ + public query?: (query.IBoundQuery|null); + + /** ReserveStreamExecuteRequest options. */ + public options?: (query.IExecuteOptions|null); + + /** ReserveStreamExecuteRequest transaction_id. */ + public transaction_id: (number|Long); + + /** ReserveStreamExecuteRequest pre_queries. */ + public pre_queries: string[]; /** - * Creates a new GetSchemaResponse instance using the specified properties. + * Creates a new ReserveStreamExecuteRequest instance using the specified properties. * @param [properties] Properties to set - * @returns GetSchemaResponse instance + * @returns ReserveStreamExecuteRequest instance */ - public static create(properties?: query.IGetSchemaResponse): query.GetSchemaResponse; + public static create(properties?: query.IReserveStreamExecuteRequest): query.ReserveStreamExecuteRequest; /** - * Encodes the specified GetSchemaResponse message. Does not implicitly {@link query.GetSchemaResponse.verify|verify} messages. - * @param message GetSchemaResponse message or plain object to encode + * Encodes the specified ReserveStreamExecuteRequest message. Does not implicitly {@link query.ReserveStreamExecuteRequest.verify|verify} messages. + * @param message ReserveStreamExecuteRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.IGetSchemaResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IReserveStreamExecuteRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetSchemaResponse message, length delimited. Does not implicitly {@link query.GetSchemaResponse.verify|verify} messages. - * @param message GetSchemaResponse message or plain object to encode + * Encodes the specified ReserveStreamExecuteRequest message, length delimited. Does not implicitly {@link query.ReserveStreamExecuteRequest.verify|verify} messages. + * @param message ReserveStreamExecuteRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.IGetSchemaResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IReserveStreamExecuteRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetSchemaResponse message from the specified reader or buffer. + * Decodes a ReserveStreamExecuteRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetSchemaResponse + * @returns ReserveStreamExecuteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.GetSchemaResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.ReserveStreamExecuteRequest; /** - * Decodes a GetSchemaResponse message from the specified reader or buffer, length delimited. + * Decodes a ReserveStreamExecuteRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetSchemaResponse + * @returns ReserveStreamExecuteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.GetSchemaResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.ReserveStreamExecuteRequest; /** - * Verifies a GetSchemaResponse message. + * Verifies a ReserveStreamExecuteRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetSchemaResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ReserveStreamExecuteRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetSchemaResponse + * @returns ReserveStreamExecuteRequest */ - public static fromObject(object: { [k: string]: any }): query.GetSchemaResponse; + public static fromObject(object: { [k: string]: any }): query.ReserveStreamExecuteRequest; /** - * Creates a plain object from a GetSchemaResponse message. Also converts values to other types if specified. - * @param message GetSchemaResponse + * Creates a plain object from a ReserveStreamExecuteRequest message. Also converts values to other types if specified. + * @param message ReserveStreamExecuteRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.GetSchemaResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.ReserveStreamExecuteRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetSchemaResponse to JSON. + * Converts this ReserveStreamExecuteRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetSchemaResponse + * Gets the default type url for ReserveStreamExecuteRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a LoadDataStreamRequest. */ - interface ILoadDataStreamRequest { + /** Properties of a ReserveStreamExecuteResponse. */ + interface IReserveStreamExecuteResponse { - /** LoadDataStreamRequest effective_caller_id */ + /** ReserveStreamExecuteResponse error */ + error?: (vtrpc.IRPCError|null); + + /** ReserveStreamExecuteResponse result */ + result?: (query.IQueryResult|null); + + /** ReserveStreamExecuteResponse reserved_id */ + reserved_id?: (number|Long|null); + + /** ReserveStreamExecuteResponse tablet_alias */ + tablet_alias?: (topodata.ITabletAlias|null); + } + + /** Represents a ReserveStreamExecuteResponse. */ + class ReserveStreamExecuteResponse implements IReserveStreamExecuteResponse { + + /** + * Constructs a new ReserveStreamExecuteResponse. + * @param [properties] Properties to set + */ + constructor(properties?: query.IReserveStreamExecuteResponse); + + /** ReserveStreamExecuteResponse error. */ + public error?: (vtrpc.IRPCError|null); + + /** ReserveStreamExecuteResponse result. */ + public result?: (query.IQueryResult|null); + + /** ReserveStreamExecuteResponse reserved_id. */ + public reserved_id: (number|Long); + + /** ReserveStreamExecuteResponse tablet_alias. */ + public tablet_alias?: (topodata.ITabletAlias|null); + + /** + * Creates a new ReserveStreamExecuteResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns ReserveStreamExecuteResponse instance + */ + public static create(properties?: query.IReserveStreamExecuteResponse): query.ReserveStreamExecuteResponse; + + /** + * Encodes the specified ReserveStreamExecuteResponse message. Does not implicitly {@link query.ReserveStreamExecuteResponse.verify|verify} messages. + * @param message ReserveStreamExecuteResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: query.IReserveStreamExecuteResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ReserveStreamExecuteResponse message, length delimited. Does not implicitly {@link query.ReserveStreamExecuteResponse.verify|verify} messages. + * @param message ReserveStreamExecuteResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: query.IReserveStreamExecuteResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a ReserveStreamExecuteResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ReserveStreamExecuteResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.ReserveStreamExecuteResponse; + + /** + * Decodes a ReserveStreamExecuteResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ReserveStreamExecuteResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.ReserveStreamExecuteResponse; + + /** + * Verifies a ReserveStreamExecuteResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a ReserveStreamExecuteResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ReserveStreamExecuteResponse + */ + public static fromObject(object: { [k: string]: any }): query.ReserveStreamExecuteResponse; + + /** + * Creates a plain object from a ReserveStreamExecuteResponse message. Also converts values to other types if specified. + * @param message ReserveStreamExecuteResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: query.ReserveStreamExecuteResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ReserveStreamExecuteResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ReserveStreamExecuteResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a ReserveBeginExecuteRequest. */ + interface IReserveBeginExecuteRequest { + + /** ReserveBeginExecuteRequest effective_caller_id */ effective_caller_id?: (vtrpc.ICallerID|null); - /** LoadDataStreamRequest immediate_caller_id */ + /** ReserveBeginExecuteRequest immediate_caller_id */ immediate_caller_id?: (query.IVTGateCallerID|null); - /** LoadDataStreamRequest target */ + /** ReserveBeginExecuteRequest target */ target?: (query.ITarget|null); - /** LoadDataStreamRequest query */ + /** ReserveBeginExecuteRequest query */ query?: (query.IBoundQuery|null); - /** LoadDataStreamRequest transaction_id */ - transaction_id?: (number|Long|null); - - /** LoadDataStreamRequest options */ + /** ReserveBeginExecuteRequest options */ options?: (query.IExecuteOptions|null); - /** LoadDataStreamRequest lines */ - lines?: (string[]|null); + /** ReserveBeginExecuteRequest pre_queries */ + pre_queries?: (string[]|null); + + /** ReserveBeginExecuteRequest post_begin_queries */ + post_begin_queries?: (string[]|null); } - /** Represents a LoadDataStreamRequest. */ - class LoadDataStreamRequest implements ILoadDataStreamRequest { + /** Represents a ReserveBeginExecuteRequest. */ + class ReserveBeginExecuteRequest implements IReserveBeginExecuteRequest { /** - * Constructs a new LoadDataStreamRequest. + * Constructs a new ReserveBeginExecuteRequest. * @param [properties] Properties to set */ - constructor(properties?: query.ILoadDataStreamRequest); + constructor(properties?: query.IReserveBeginExecuteRequest); - /** LoadDataStreamRequest effective_caller_id. */ + /** ReserveBeginExecuteRequest effective_caller_id. */ public effective_caller_id?: (vtrpc.ICallerID|null); - /** LoadDataStreamRequest immediate_caller_id. */ + /** ReserveBeginExecuteRequest immediate_caller_id. */ public immediate_caller_id?: (query.IVTGateCallerID|null); - /** LoadDataStreamRequest target. */ + /** ReserveBeginExecuteRequest target. */ public target?: (query.ITarget|null); - /** LoadDataStreamRequest query. */ + /** ReserveBeginExecuteRequest query. */ public query?: (query.IBoundQuery|null); - /** LoadDataStreamRequest transaction_id. */ - public transaction_id: (number|Long); - - /** LoadDataStreamRequest options. */ + /** ReserveBeginExecuteRequest options. */ public options?: (query.IExecuteOptions|null); - /** LoadDataStreamRequest lines. */ - public lines: string[]; + /** ReserveBeginExecuteRequest pre_queries. */ + public pre_queries: string[]; + + /** ReserveBeginExecuteRequest post_begin_queries. */ + public post_begin_queries: string[]; /** - * Creates a new LoadDataStreamRequest instance using the specified properties. + * Creates a new ReserveBeginExecuteRequest instance using the specified properties. * @param [properties] Properties to set - * @returns LoadDataStreamRequest instance + * @returns ReserveBeginExecuteRequest instance */ - public static create(properties?: query.ILoadDataStreamRequest): query.LoadDataStreamRequest; + public static create(properties?: query.IReserveBeginExecuteRequest): query.ReserveBeginExecuteRequest; /** - * Encodes the specified LoadDataStreamRequest message. Does not implicitly {@link query.LoadDataStreamRequest.verify|verify} messages. - * @param message LoadDataStreamRequest message or plain object to encode + * Encodes the specified ReserveBeginExecuteRequest message. Does not implicitly {@link query.ReserveBeginExecuteRequest.verify|verify} messages. + * @param message ReserveBeginExecuteRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.ILoadDataStreamRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IReserveBeginExecuteRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified LoadDataStreamRequest message, length delimited. Does not implicitly {@link query.LoadDataStreamRequest.verify|verify} messages. - * @param message LoadDataStreamRequest message or plain object to encode + * Encodes the specified ReserveBeginExecuteRequest message, length delimited. Does not implicitly {@link query.ReserveBeginExecuteRequest.verify|verify} messages. + * @param message ReserveBeginExecuteRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.ILoadDataStreamRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IReserveBeginExecuteRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a LoadDataStreamRequest message from the specified reader or buffer. + * Decodes a ReserveBeginExecuteRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns LoadDataStreamRequest + * @returns ReserveBeginExecuteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.LoadDataStreamRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.ReserveBeginExecuteRequest; /** - * Decodes a LoadDataStreamRequest message from the specified reader or buffer, length delimited. + * Decodes a ReserveBeginExecuteRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns LoadDataStreamRequest + * @returns ReserveBeginExecuteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.LoadDataStreamRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.ReserveBeginExecuteRequest; /** - * Verifies a LoadDataStreamRequest message. + * Verifies a ReserveBeginExecuteRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a LoadDataStreamRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ReserveBeginExecuteRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns LoadDataStreamRequest + * @returns ReserveBeginExecuteRequest */ - public static fromObject(object: { [k: string]: any }): query.LoadDataStreamRequest; + public static fromObject(object: { [k: string]: any }): query.ReserveBeginExecuteRequest; /** - * Creates a plain object from a LoadDataStreamRequest message. Also converts values to other types if specified. - * @param message LoadDataStreamRequest + * Creates a plain object from a ReserveBeginExecuteRequest message. Also converts values to other types if specified. + * @param message ReserveBeginExecuteRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.LoadDataStreamRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.ReserveBeginExecuteRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this LoadDataStreamRequest to JSON. + * Converts this ReserveBeginExecuteRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for LoadDataStreamRequest + * Gets the default type url for ReserveBeginExecuteRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a LoadDataStreamResponse. */ - interface ILoadDataStreamResponse { + /** Properties of a ReserveBeginExecuteResponse. */ + interface IReserveBeginExecuteResponse { - /** LoadDataStreamResponse result */ + /** ReserveBeginExecuteResponse error */ + error?: (vtrpc.IRPCError|null); + + /** ReserveBeginExecuteResponse result */ result?: (query.IQueryResult|null); + + /** ReserveBeginExecuteResponse transaction_id */ + transaction_id?: (number|Long|null); + + /** ReserveBeginExecuteResponse reserved_id */ + reserved_id?: (number|Long|null); + + /** ReserveBeginExecuteResponse tablet_alias */ + tablet_alias?: (topodata.ITabletAlias|null); + + /** ReserveBeginExecuteResponse session_state_changes */ + session_state_changes?: (string|null); } - /** Represents a LoadDataStreamResponse. */ - class LoadDataStreamResponse implements ILoadDataStreamResponse { + /** Represents a ReserveBeginExecuteResponse. */ + class ReserveBeginExecuteResponse implements IReserveBeginExecuteResponse { /** - * Constructs a new LoadDataStreamResponse. + * Constructs a new ReserveBeginExecuteResponse. * @param [properties] Properties to set */ - constructor(properties?: query.ILoadDataStreamResponse); + constructor(properties?: query.IReserveBeginExecuteResponse); - /** LoadDataStreamResponse result. */ + /** ReserveBeginExecuteResponse error. */ + public error?: (vtrpc.IRPCError|null); + + /** ReserveBeginExecuteResponse result. */ public result?: (query.IQueryResult|null); + /** ReserveBeginExecuteResponse transaction_id. */ + public transaction_id: (number|Long); + + /** ReserveBeginExecuteResponse reserved_id. */ + public reserved_id: (number|Long); + + /** ReserveBeginExecuteResponse tablet_alias. */ + public tablet_alias?: (topodata.ITabletAlias|null); + + /** ReserveBeginExecuteResponse session_state_changes. */ + public session_state_changes: string; + /** - * Creates a new LoadDataStreamResponse instance using the specified properties. + * Creates a new ReserveBeginExecuteResponse instance using the specified properties. * @param [properties] Properties to set - * @returns LoadDataStreamResponse instance + * @returns ReserveBeginExecuteResponse instance */ - public static create(properties?: query.ILoadDataStreamResponse): query.LoadDataStreamResponse; + public static create(properties?: query.IReserveBeginExecuteResponse): query.ReserveBeginExecuteResponse; /** - * Encodes the specified LoadDataStreamResponse message. Does not implicitly {@link query.LoadDataStreamResponse.verify|verify} messages. - * @param message LoadDataStreamResponse message or plain object to encode + * Encodes the specified ReserveBeginExecuteResponse message. Does not implicitly {@link query.ReserveBeginExecuteResponse.verify|verify} messages. + * @param message ReserveBeginExecuteResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: query.ILoadDataStreamResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IReserveBeginExecuteResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified LoadDataStreamResponse message, length delimited. Does not implicitly {@link query.LoadDataStreamResponse.verify|verify} messages. - * @param message LoadDataStreamResponse message or plain object to encode + * Encodes the specified ReserveBeginExecuteResponse message, length delimited. Does not implicitly {@link query.ReserveBeginExecuteResponse.verify|verify} messages. + * @param message ReserveBeginExecuteResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: query.ILoadDataStreamResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IReserveBeginExecuteResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a LoadDataStreamResponse message from the specified reader or buffer. + * Decodes a ReserveBeginExecuteResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns LoadDataStreamResponse + * @returns ReserveBeginExecuteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.LoadDataStreamResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.ReserveBeginExecuteResponse; /** - * Decodes a LoadDataStreamResponse message from the specified reader or buffer, length delimited. + * Decodes a ReserveBeginExecuteResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns LoadDataStreamResponse + * @returns ReserveBeginExecuteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.LoadDataStreamResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.ReserveBeginExecuteResponse; /** - * Verifies a LoadDataStreamResponse message. + * Verifies a ReserveBeginExecuteResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a LoadDataStreamResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ReserveBeginExecuteResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns LoadDataStreamResponse + * @returns ReserveBeginExecuteResponse */ - public static fromObject(object: { [k: string]: any }): query.LoadDataStreamResponse; + public static fromObject(object: { [k: string]: any }): query.ReserveBeginExecuteResponse; /** - * Creates a plain object from a LoadDataStreamResponse message. Also converts values to other types if specified. - * @param message LoadDataStreamResponse + * Creates a plain object from a ReserveBeginExecuteResponse message. Also converts values to other types if specified. + * @param message ReserveBeginExecuteResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: query.LoadDataStreamResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.ReserveBeginExecuteResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this LoadDataStreamResponse to JSON. + * Converts this ReserveBeginExecuteResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for LoadDataStreamResponse + * Gets the default type url for ReserveBeginExecuteResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } -} -/** Namespace replicationdata. */ -export namespace replicationdata { + /** Properties of a ReserveBeginStreamExecuteRequest. */ + interface IReserveBeginStreamExecuteRequest { - /** Properties of a Status. */ - interface IStatus { + /** ReserveBeginStreamExecuteRequest effective_caller_id */ + effective_caller_id?: (vtrpc.ICallerID|null); - /** Status position */ - position?: (string|null); + /** ReserveBeginStreamExecuteRequest immediate_caller_id */ + immediate_caller_id?: (query.IVTGateCallerID|null); - /** Status replication_lag_seconds */ - replication_lag_seconds?: (number|null); + /** ReserveBeginStreamExecuteRequest target */ + target?: (query.ITarget|null); - /** Status source_host */ - source_host?: (string|null); + /** ReserveBeginStreamExecuteRequest query */ + query?: (query.IBoundQuery|null); - /** Status source_port */ - source_port?: (number|null); + /** ReserveBeginStreamExecuteRequest options */ + options?: (query.IExecuteOptions|null); - /** Status connect_retry */ - connect_retry?: (number|null); + /** ReserveBeginStreamExecuteRequest pre_queries */ + pre_queries?: (string[]|null); - /** Status relay_log_position */ - relay_log_position?: (string|null); + /** ReserveBeginStreamExecuteRequest post_begin_queries */ + post_begin_queries?: (string[]|null); + } - /** Status file_position */ - file_position?: (string|null); + /** Represents a ReserveBeginStreamExecuteRequest. */ + class ReserveBeginStreamExecuteRequest implements IReserveBeginStreamExecuteRequest { - /** Status relay_log_source_binlog_equivalent_position */ - relay_log_source_binlog_equivalent_position?: (string|null); + /** + * Constructs a new ReserveBeginStreamExecuteRequest. + * @param [properties] Properties to set + */ + constructor(properties?: query.IReserveBeginStreamExecuteRequest); - /** Status source_server_id */ - source_server_id?: (number|null); + /** ReserveBeginStreamExecuteRequest effective_caller_id. */ + public effective_caller_id?: (vtrpc.ICallerID|null); - /** Status source_uuid */ - source_uuid?: (string|null); + /** ReserveBeginStreamExecuteRequest immediate_caller_id. */ + public immediate_caller_id?: (query.IVTGateCallerID|null); - /** Status io_state */ - io_state?: (number|null); + /** ReserveBeginStreamExecuteRequest target. */ + public target?: (query.ITarget|null); - /** Status last_io_error */ - last_io_error?: (string|null); + /** ReserveBeginStreamExecuteRequest query. */ + public query?: (query.IBoundQuery|null); - /** Status sql_state */ - sql_state?: (number|null); + /** ReserveBeginStreamExecuteRequest options. */ + public options?: (query.IExecuteOptions|null); - /** Status last_sql_error */ - last_sql_error?: (string|null); + /** ReserveBeginStreamExecuteRequest pre_queries. */ + public pre_queries: string[]; - /** Status relay_log_file_position */ - relay_log_file_position?: (string|null); + /** ReserveBeginStreamExecuteRequest post_begin_queries. */ + public post_begin_queries: string[]; - /** Status source_user */ - source_user?: (string|null); + /** + * Creates a new ReserveBeginStreamExecuteRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns ReserveBeginStreamExecuteRequest instance + */ + public static create(properties?: query.IReserveBeginStreamExecuteRequest): query.ReserveBeginStreamExecuteRequest; - /** Status sql_delay */ - sql_delay?: (number|null); - - /** Status auto_position */ - auto_position?: (boolean|null); - - /** Status using_gtid */ - using_gtid?: (boolean|null); - - /** Status has_replication_filters */ - has_replication_filters?: (boolean|null); - - /** Status ssl_allowed */ - ssl_allowed?: (boolean|null); - - /** Status replication_lag_unknown */ - replication_lag_unknown?: (boolean|null); - } - - /** Represents a Status. */ - class Status implements IStatus { + /** + * Encodes the specified ReserveBeginStreamExecuteRequest message. Does not implicitly {@link query.ReserveBeginStreamExecuteRequest.verify|verify} messages. + * @param message ReserveBeginStreamExecuteRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: query.IReserveBeginStreamExecuteRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Constructs a new Status. - * @param [properties] Properties to set + * Encodes the specified ReserveBeginStreamExecuteRequest message, length delimited. Does not implicitly {@link query.ReserveBeginStreamExecuteRequest.verify|verify} messages. + * @param message ReserveBeginStreamExecuteRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer */ - constructor(properties?: replicationdata.IStatus); + public static encodeDelimited(message: query.IReserveBeginStreamExecuteRequest, writer?: $protobuf.Writer): $protobuf.Writer; - /** Status position. */ - public position: string; + /** + * Decodes a ReserveBeginStreamExecuteRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ReserveBeginStreamExecuteRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.ReserveBeginStreamExecuteRequest; - /** Status replication_lag_seconds. */ - public replication_lag_seconds: number; + /** + * Decodes a ReserveBeginStreamExecuteRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ReserveBeginStreamExecuteRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.ReserveBeginStreamExecuteRequest; - /** Status source_host. */ - public source_host: string; + /** + * Verifies a ReserveBeginStreamExecuteRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); - /** Status source_port. */ - public source_port: number; + /** + * Creates a ReserveBeginStreamExecuteRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ReserveBeginStreamExecuteRequest + */ + public static fromObject(object: { [k: string]: any }): query.ReserveBeginStreamExecuteRequest; - /** Status connect_retry. */ - public connect_retry: number; + /** + * Creates a plain object from a ReserveBeginStreamExecuteRequest message. Also converts values to other types if specified. + * @param message ReserveBeginStreamExecuteRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: query.ReserveBeginStreamExecuteRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; - /** Status relay_log_position. */ - public relay_log_position: string; + /** + * Converts this ReserveBeginStreamExecuteRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; - /** Status file_position. */ - public file_position: string; + /** + * Gets the default type url for ReserveBeginStreamExecuteRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } - /** Status relay_log_source_binlog_equivalent_position. */ - public relay_log_source_binlog_equivalent_position: string; + /** Properties of a ReserveBeginStreamExecuteResponse. */ + interface IReserveBeginStreamExecuteResponse { - /** Status source_server_id. */ - public source_server_id: number; + /** ReserveBeginStreamExecuteResponse error */ + error?: (vtrpc.IRPCError|null); - /** Status source_uuid. */ - public source_uuid: string; + /** ReserveBeginStreamExecuteResponse result */ + result?: (query.IQueryResult|null); - /** Status io_state. */ - public io_state: number; + /** ReserveBeginStreamExecuteResponse transaction_id */ + transaction_id?: (number|Long|null); - /** Status last_io_error. */ - public last_io_error: string; + /** ReserveBeginStreamExecuteResponse reserved_id */ + reserved_id?: (number|Long|null); - /** Status sql_state. */ - public sql_state: number; + /** ReserveBeginStreamExecuteResponse tablet_alias */ + tablet_alias?: (topodata.ITabletAlias|null); - /** Status last_sql_error. */ - public last_sql_error: string; + /** ReserveBeginStreamExecuteResponse session_state_changes */ + session_state_changes?: (string|null); + } - /** Status relay_log_file_position. */ - public relay_log_file_position: string; + /** Represents a ReserveBeginStreamExecuteResponse. */ + class ReserveBeginStreamExecuteResponse implements IReserveBeginStreamExecuteResponse { - /** Status source_user. */ - public source_user: string; + /** + * Constructs a new ReserveBeginStreamExecuteResponse. + * @param [properties] Properties to set + */ + constructor(properties?: query.IReserveBeginStreamExecuteResponse); - /** Status sql_delay. */ - public sql_delay: number; + /** ReserveBeginStreamExecuteResponse error. */ + public error?: (vtrpc.IRPCError|null); - /** Status auto_position. */ - public auto_position: boolean; + /** ReserveBeginStreamExecuteResponse result. */ + public result?: (query.IQueryResult|null); - /** Status using_gtid. */ - public using_gtid: boolean; + /** ReserveBeginStreamExecuteResponse transaction_id. */ + public transaction_id: (number|Long); - /** Status has_replication_filters. */ - public has_replication_filters: boolean; + /** ReserveBeginStreamExecuteResponse reserved_id. */ + public reserved_id: (number|Long); - /** Status ssl_allowed. */ - public ssl_allowed: boolean; + /** ReserveBeginStreamExecuteResponse tablet_alias. */ + public tablet_alias?: (topodata.ITabletAlias|null); - /** Status replication_lag_unknown. */ - public replication_lag_unknown: boolean; + /** ReserveBeginStreamExecuteResponse session_state_changes. */ + public session_state_changes: string; /** - * Creates a new Status instance using the specified properties. + * Creates a new ReserveBeginStreamExecuteResponse instance using the specified properties. * @param [properties] Properties to set - * @returns Status instance + * @returns ReserveBeginStreamExecuteResponse instance */ - public static create(properties?: replicationdata.IStatus): replicationdata.Status; + public static create(properties?: query.IReserveBeginStreamExecuteResponse): query.ReserveBeginStreamExecuteResponse; /** - * Encodes the specified Status message. Does not implicitly {@link replicationdata.Status.verify|verify} messages. - * @param message Status message or plain object to encode + * Encodes the specified ReserveBeginStreamExecuteResponse message. Does not implicitly {@link query.ReserveBeginStreamExecuteResponse.verify|verify} messages. + * @param message ReserveBeginStreamExecuteResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: replicationdata.IStatus, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IReserveBeginStreamExecuteResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified Status message, length delimited. Does not implicitly {@link replicationdata.Status.verify|verify} messages. - * @param message Status message or plain object to encode + * Encodes the specified ReserveBeginStreamExecuteResponse message, length delimited. Does not implicitly {@link query.ReserveBeginStreamExecuteResponse.verify|verify} messages. + * @param message ReserveBeginStreamExecuteResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: replicationdata.IStatus, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IReserveBeginStreamExecuteResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a Status message from the specified reader or buffer. + * Decodes a ReserveBeginStreamExecuteResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns Status + * @returns ReserveBeginStreamExecuteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): replicationdata.Status; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.ReserveBeginStreamExecuteResponse; /** - * Decodes a Status message from the specified reader or buffer, length delimited. + * Decodes a ReserveBeginStreamExecuteResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns Status + * @returns ReserveBeginStreamExecuteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): replicationdata.Status; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.ReserveBeginStreamExecuteResponse; /** - * Verifies a Status message. + * Verifies a ReserveBeginStreamExecuteResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a Status message from a plain object. Also converts values to their respective internal types. + * Creates a ReserveBeginStreamExecuteResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns Status + * @returns ReserveBeginStreamExecuteResponse */ - public static fromObject(object: { [k: string]: any }): replicationdata.Status; + public static fromObject(object: { [k: string]: any }): query.ReserveBeginStreamExecuteResponse; /** - * Creates a plain object from a Status message. Also converts values to other types if specified. - * @param message Status + * Creates a plain object from a ReserveBeginStreamExecuteResponse message. Also converts values to other types if specified. + * @param message ReserveBeginStreamExecuteResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: replicationdata.Status, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.ReserveBeginStreamExecuteResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this Status to JSON. + * Converts this ReserveBeginStreamExecuteResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for Status + * Gets the default type url for ReserveBeginStreamExecuteResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a StopReplicationStatus. */ - interface IStopReplicationStatus { + /** Properties of a ReleaseRequest. */ + interface IReleaseRequest { - /** StopReplicationStatus before */ - before?: (replicationdata.IStatus|null); + /** ReleaseRequest effective_caller_id */ + effective_caller_id?: (vtrpc.ICallerID|null); - /** StopReplicationStatus after */ - after?: (replicationdata.IStatus|null); + /** ReleaseRequest immediate_caller_id */ + immediate_caller_id?: (query.IVTGateCallerID|null); + + /** ReleaseRequest target */ + target?: (query.ITarget|null); + + /** ReleaseRequest transaction_id */ + transaction_id?: (number|Long|null); + + /** ReleaseRequest reserved_id */ + reserved_id?: (number|Long|null); } - /** Represents a StopReplicationStatus. */ - class StopReplicationStatus implements IStopReplicationStatus { + /** Represents a ReleaseRequest. */ + class ReleaseRequest implements IReleaseRequest { /** - * Constructs a new StopReplicationStatus. + * Constructs a new ReleaseRequest. * @param [properties] Properties to set */ - constructor(properties?: replicationdata.IStopReplicationStatus); + constructor(properties?: query.IReleaseRequest); - /** StopReplicationStatus before. */ - public before?: (replicationdata.IStatus|null); + /** ReleaseRequest effective_caller_id. */ + public effective_caller_id?: (vtrpc.ICallerID|null); - /** StopReplicationStatus after. */ - public after?: (replicationdata.IStatus|null); + /** ReleaseRequest immediate_caller_id. */ + public immediate_caller_id?: (query.IVTGateCallerID|null); + + /** ReleaseRequest target. */ + public target?: (query.ITarget|null); + + /** ReleaseRequest transaction_id. */ + public transaction_id: (number|Long); + + /** ReleaseRequest reserved_id. */ + public reserved_id: (number|Long); /** - * Creates a new StopReplicationStatus instance using the specified properties. + * Creates a new ReleaseRequest instance using the specified properties. * @param [properties] Properties to set - * @returns StopReplicationStatus instance + * @returns ReleaseRequest instance */ - public static create(properties?: replicationdata.IStopReplicationStatus): replicationdata.StopReplicationStatus; + public static create(properties?: query.IReleaseRequest): query.ReleaseRequest; /** - * Encodes the specified StopReplicationStatus message. Does not implicitly {@link replicationdata.StopReplicationStatus.verify|verify} messages. - * @param message StopReplicationStatus message or plain object to encode + * Encodes the specified ReleaseRequest message. Does not implicitly {@link query.ReleaseRequest.verify|verify} messages. + * @param message ReleaseRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: replicationdata.IStopReplicationStatus, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IReleaseRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified StopReplicationStatus message, length delimited. Does not implicitly {@link replicationdata.StopReplicationStatus.verify|verify} messages. - * @param message StopReplicationStatus message or plain object to encode + * Encodes the specified ReleaseRequest message, length delimited. Does not implicitly {@link query.ReleaseRequest.verify|verify} messages. + * @param message ReleaseRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: replicationdata.IStopReplicationStatus, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IReleaseRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a StopReplicationStatus message from the specified reader or buffer. + * Decodes a ReleaseRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns StopReplicationStatus + * @returns ReleaseRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): replicationdata.StopReplicationStatus; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.ReleaseRequest; /** - * Decodes a StopReplicationStatus message from the specified reader or buffer, length delimited. + * Decodes a ReleaseRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns StopReplicationStatus + * @returns ReleaseRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): replicationdata.StopReplicationStatus; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.ReleaseRequest; /** - * Verifies a StopReplicationStatus message. + * Verifies a ReleaseRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a StopReplicationStatus message from a plain object. Also converts values to their respective internal types. + * Creates a ReleaseRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns StopReplicationStatus + * @returns ReleaseRequest */ - public static fromObject(object: { [k: string]: any }): replicationdata.StopReplicationStatus; + public static fromObject(object: { [k: string]: any }): query.ReleaseRequest; /** - * Creates a plain object from a StopReplicationStatus message. Also converts values to other types if specified. - * @param message StopReplicationStatus + * Creates a plain object from a ReleaseRequest message. Also converts values to other types if specified. + * @param message ReleaseRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: replicationdata.StopReplicationStatus, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.ReleaseRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this StopReplicationStatus to JSON. + * Converts this ReleaseRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for StopReplicationStatus + * Gets the default type url for ReleaseRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** StopReplicationMode enum. */ - enum StopReplicationMode { - IOANDSQLTHREAD = 0, - IOTHREADONLY = 1 - } - - /** Properties of a PrimaryStatus. */ - interface IPrimaryStatus { - - /** PrimaryStatus position */ - position?: (string|null); - - /** PrimaryStatus file_position */ - file_position?: (string|null); + /** Properties of a ReleaseResponse. */ + interface IReleaseResponse { } - /** Represents a PrimaryStatus. */ - class PrimaryStatus implements IPrimaryStatus { + /** Represents a ReleaseResponse. */ + class ReleaseResponse implements IReleaseResponse { /** - * Constructs a new PrimaryStatus. + * Constructs a new ReleaseResponse. * @param [properties] Properties to set */ - constructor(properties?: replicationdata.IPrimaryStatus); - - /** PrimaryStatus position. */ - public position: string; - - /** PrimaryStatus file_position. */ - public file_position: string; + constructor(properties?: query.IReleaseResponse); /** - * Creates a new PrimaryStatus instance using the specified properties. + * Creates a new ReleaseResponse instance using the specified properties. * @param [properties] Properties to set - * @returns PrimaryStatus instance + * @returns ReleaseResponse instance */ - public static create(properties?: replicationdata.IPrimaryStatus): replicationdata.PrimaryStatus; + public static create(properties?: query.IReleaseResponse): query.ReleaseResponse; /** - * Encodes the specified PrimaryStatus message. Does not implicitly {@link replicationdata.PrimaryStatus.verify|verify} messages. - * @param message PrimaryStatus message or plain object to encode + * Encodes the specified ReleaseResponse message. Does not implicitly {@link query.ReleaseResponse.verify|verify} messages. + * @param message ReleaseResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: replicationdata.IPrimaryStatus, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IReleaseResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified PrimaryStatus message, length delimited. Does not implicitly {@link replicationdata.PrimaryStatus.verify|verify} messages. - * @param message PrimaryStatus message or plain object to encode + * Encodes the specified ReleaseResponse message, length delimited. Does not implicitly {@link query.ReleaseResponse.verify|verify} messages. + * @param message ReleaseResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: replicationdata.IPrimaryStatus, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IReleaseResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a PrimaryStatus message from the specified reader or buffer. + * Decodes a ReleaseResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns PrimaryStatus + * @returns ReleaseResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): replicationdata.PrimaryStatus; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.ReleaseResponse; /** - * Decodes a PrimaryStatus message from the specified reader or buffer, length delimited. + * Decodes a ReleaseResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns PrimaryStatus + * @returns ReleaseResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): replicationdata.PrimaryStatus; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.ReleaseResponse; /** - * Verifies a PrimaryStatus message. + * Verifies a ReleaseResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a PrimaryStatus message from a plain object. Also converts values to their respective internal types. + * Creates a ReleaseResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns PrimaryStatus + * @returns ReleaseResponse */ - public static fromObject(object: { [k: string]: any }): replicationdata.PrimaryStatus; + public static fromObject(object: { [k: string]: any }): query.ReleaseResponse; /** - * Creates a plain object from a PrimaryStatus message. Also converts values to other types if specified. - * @param message PrimaryStatus + * Creates a plain object from a ReleaseResponse message. Also converts values to other types if specified. + * @param message ReleaseResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: replicationdata.PrimaryStatus, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.ReleaseResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this PrimaryStatus to JSON. + * Converts this ReleaseResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for PrimaryStatus + * Gets the default type url for ReleaseResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a FullStatus. */ - interface IFullStatus { - - /** FullStatus server_id */ - server_id?: (number|null); + /** Properties of a StreamHealthRequest. */ + interface IStreamHealthRequest { + } - /** FullStatus server_uuid */ - server_uuid?: (string|null); + /** Represents a StreamHealthRequest. */ + class StreamHealthRequest implements IStreamHealthRequest { - /** FullStatus replication_status */ - replication_status?: (replicationdata.IStatus|null); + /** + * Constructs a new StreamHealthRequest. + * @param [properties] Properties to set + */ + constructor(properties?: query.IStreamHealthRequest); - /** FullStatus primary_status */ - primary_status?: (replicationdata.IPrimaryStatus|null); + /** + * Creates a new StreamHealthRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns StreamHealthRequest instance + */ + public static create(properties?: query.IStreamHealthRequest): query.StreamHealthRequest; - /** FullStatus gtid_purged */ - gtid_purged?: (string|null); + /** + * Encodes the specified StreamHealthRequest message. Does not implicitly {@link query.StreamHealthRequest.verify|verify} messages. + * @param message StreamHealthRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: query.IStreamHealthRequest, writer?: $protobuf.Writer): $protobuf.Writer; - /** FullStatus version */ - version?: (string|null); + /** + * Encodes the specified StreamHealthRequest message, length delimited. Does not implicitly {@link query.StreamHealthRequest.verify|verify} messages. + * @param message StreamHealthRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: query.IStreamHealthRequest, writer?: $protobuf.Writer): $protobuf.Writer; - /** FullStatus version_comment */ - version_comment?: (string|null); + /** + * Decodes a StreamHealthRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns StreamHealthRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.StreamHealthRequest; - /** FullStatus read_only */ - read_only?: (boolean|null); + /** + * Decodes a StreamHealthRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns StreamHealthRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.StreamHealthRequest; - /** FullStatus gtid_mode */ - gtid_mode?: (string|null); + /** + * Verifies a StreamHealthRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); - /** FullStatus binlog_format */ - binlog_format?: (string|null); + /** + * Creates a StreamHealthRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns StreamHealthRequest + */ + public static fromObject(object: { [k: string]: any }): query.StreamHealthRequest; - /** FullStatus binlog_row_image */ - binlog_row_image?: (string|null); + /** + * Creates a plain object from a StreamHealthRequest message. Also converts values to other types if specified. + * @param message StreamHealthRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: query.StreamHealthRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; - /** FullStatus log_bin_enabled */ - log_bin_enabled?: (boolean|null); + /** + * Converts this StreamHealthRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; - /** FullStatus log_replica_updates */ - log_replica_updates?: (boolean|null); + /** + * Gets the default type url for StreamHealthRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } - /** FullStatus semi_sync_primary_enabled */ - semi_sync_primary_enabled?: (boolean|null); + /** Properties of a RealtimeStats. */ + interface IRealtimeStats { - /** FullStatus semi_sync_replica_enabled */ - semi_sync_replica_enabled?: (boolean|null); + /** RealtimeStats health_error */ + health_error?: (string|null); - /** FullStatus semi_sync_primary_status */ - semi_sync_primary_status?: (boolean|null); + /** RealtimeStats replication_lag_seconds */ + replication_lag_seconds?: (number|null); - /** FullStatus semi_sync_replica_status */ - semi_sync_replica_status?: (boolean|null); + /** RealtimeStats binlog_players_count */ + binlog_players_count?: (number|null); - /** FullStatus semi_sync_primary_clients */ - semi_sync_primary_clients?: (number|null); + /** RealtimeStats filtered_replication_lag_seconds */ + filtered_replication_lag_seconds?: (number|Long|null); - /** FullStatus semi_sync_primary_timeout */ - semi_sync_primary_timeout?: (number|Long|null); + /** RealtimeStats cpu_usage */ + cpu_usage?: (number|null); - /** FullStatus semi_sync_wait_for_replica_count */ - semi_sync_wait_for_replica_count?: (number|null); + /** RealtimeStats qps */ + qps?: (number|null); - /** FullStatus super_read_only */ - super_read_only?: (boolean|null); + /** RealtimeStats table_schema_changed */ + table_schema_changed?: (string[]|null); + + /** RealtimeStats view_schema_changed */ + view_schema_changed?: (string[]|null); } - /** Represents a FullStatus. */ - class FullStatus implements IFullStatus { + /** Represents a RealtimeStats. */ + class RealtimeStats implements IRealtimeStats { /** - * Constructs a new FullStatus. + * Constructs a new RealtimeStats. * @param [properties] Properties to set */ - constructor(properties?: replicationdata.IFullStatus); - - /** FullStatus server_id. */ - public server_id: number; - - /** FullStatus server_uuid. */ - public server_uuid: string; - - /** FullStatus replication_status. */ - public replication_status?: (replicationdata.IStatus|null); - - /** FullStatus primary_status. */ - public primary_status?: (replicationdata.IPrimaryStatus|null); - - /** FullStatus gtid_purged. */ - public gtid_purged: string; - - /** FullStatus version. */ - public version: string; - - /** FullStatus version_comment. */ - public version_comment: string; - - /** FullStatus read_only. */ - public read_only: boolean; - - /** FullStatus gtid_mode. */ - public gtid_mode: string; - - /** FullStatus binlog_format. */ - public binlog_format: string; - - /** FullStatus binlog_row_image. */ - public binlog_row_image: string; - - /** FullStatus log_bin_enabled. */ - public log_bin_enabled: boolean; - - /** FullStatus log_replica_updates. */ - public log_replica_updates: boolean; + constructor(properties?: query.IRealtimeStats); - /** FullStatus semi_sync_primary_enabled. */ - public semi_sync_primary_enabled: boolean; + /** RealtimeStats health_error. */ + public health_error: string; - /** FullStatus semi_sync_replica_enabled. */ - public semi_sync_replica_enabled: boolean; + /** RealtimeStats replication_lag_seconds. */ + public replication_lag_seconds: number; - /** FullStatus semi_sync_primary_status. */ - public semi_sync_primary_status: boolean; + /** RealtimeStats binlog_players_count. */ + public binlog_players_count: number; - /** FullStatus semi_sync_replica_status. */ - public semi_sync_replica_status: boolean; + /** RealtimeStats filtered_replication_lag_seconds. */ + public filtered_replication_lag_seconds: (number|Long); - /** FullStatus semi_sync_primary_clients. */ - public semi_sync_primary_clients: number; + /** RealtimeStats cpu_usage. */ + public cpu_usage: number; - /** FullStatus semi_sync_primary_timeout. */ - public semi_sync_primary_timeout: (number|Long); + /** RealtimeStats qps. */ + public qps: number; - /** FullStatus semi_sync_wait_for_replica_count. */ - public semi_sync_wait_for_replica_count: number; + /** RealtimeStats table_schema_changed. */ + public table_schema_changed: string[]; - /** FullStatus super_read_only. */ - public super_read_only: boolean; + /** RealtimeStats view_schema_changed. */ + public view_schema_changed: string[]; /** - * Creates a new FullStatus instance using the specified properties. + * Creates a new RealtimeStats instance using the specified properties. * @param [properties] Properties to set - * @returns FullStatus instance + * @returns RealtimeStats instance */ - public static create(properties?: replicationdata.IFullStatus): replicationdata.FullStatus; + public static create(properties?: query.IRealtimeStats): query.RealtimeStats; /** - * Encodes the specified FullStatus message. Does not implicitly {@link replicationdata.FullStatus.verify|verify} messages. - * @param message FullStatus message or plain object to encode + * Encodes the specified RealtimeStats message. Does not implicitly {@link query.RealtimeStats.verify|verify} messages. + * @param message RealtimeStats message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: replicationdata.IFullStatus, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IRealtimeStats, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified FullStatus message, length delimited. Does not implicitly {@link replicationdata.FullStatus.verify|verify} messages. - * @param message FullStatus message or plain object to encode + * Encodes the specified RealtimeStats message, length delimited. Does not implicitly {@link query.RealtimeStats.verify|verify} messages. + * @param message RealtimeStats message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: replicationdata.IFullStatus, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IRealtimeStats, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a FullStatus message from the specified reader or buffer. + * Decodes a RealtimeStats message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns FullStatus + * @returns RealtimeStats * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): replicationdata.FullStatus; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.RealtimeStats; /** - * Decodes a FullStatus message from the specified reader or buffer, length delimited. + * Decodes a RealtimeStats message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns FullStatus + * @returns RealtimeStats * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): replicationdata.FullStatus; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.RealtimeStats; /** - * Verifies a FullStatus message. + * Verifies a RealtimeStats message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a FullStatus message from a plain object. Also converts values to their respective internal types. + * Creates a RealtimeStats message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns FullStatus + * @returns RealtimeStats */ - public static fromObject(object: { [k: string]: any }): replicationdata.FullStatus; + public static fromObject(object: { [k: string]: any }): query.RealtimeStats; /** - * Creates a plain object from a FullStatus message. Also converts values to other types if specified. - * @param message FullStatus + * Creates a plain object from a RealtimeStats message. Also converts values to other types if specified. + * @param message RealtimeStats * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: replicationdata.FullStatus, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.RealtimeStats, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this FullStatus to JSON. + * Converts this RealtimeStats to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for FullStatus + * Gets the default type url for RealtimeStats * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } -} -/** Namespace vschema. */ -export namespace vschema { + /** Properties of an AggregateStats. */ + interface IAggregateStats { - /** Properties of a RoutingRules. */ - interface IRoutingRules { + /** AggregateStats healthy_tablet_count */ + healthy_tablet_count?: (number|null); - /** RoutingRules rules */ - rules?: (vschema.IRoutingRule[]|null); + /** AggregateStats unhealthy_tablet_count */ + unhealthy_tablet_count?: (number|null); + + /** AggregateStats replication_lag_seconds_min */ + replication_lag_seconds_min?: (number|null); + + /** AggregateStats replication_lag_seconds_max */ + replication_lag_seconds_max?: (number|null); } - /** Represents a RoutingRules. */ - class RoutingRules implements IRoutingRules { + /** Represents an AggregateStats. */ + class AggregateStats implements IAggregateStats { /** - * Constructs a new RoutingRules. + * Constructs a new AggregateStats. * @param [properties] Properties to set */ - constructor(properties?: vschema.IRoutingRules); + constructor(properties?: query.IAggregateStats); - /** RoutingRules rules. */ - public rules: vschema.IRoutingRule[]; + /** AggregateStats healthy_tablet_count. */ + public healthy_tablet_count: number; + + /** AggregateStats unhealthy_tablet_count. */ + public unhealthy_tablet_count: number; + + /** AggregateStats replication_lag_seconds_min. */ + public replication_lag_seconds_min: number; + + /** AggregateStats replication_lag_seconds_max. */ + public replication_lag_seconds_max: number; /** - * Creates a new RoutingRules instance using the specified properties. + * Creates a new AggregateStats instance using the specified properties. * @param [properties] Properties to set - * @returns RoutingRules instance + * @returns AggregateStats instance */ - public static create(properties?: vschema.IRoutingRules): vschema.RoutingRules; + public static create(properties?: query.IAggregateStats): query.AggregateStats; /** - * Encodes the specified RoutingRules message. Does not implicitly {@link vschema.RoutingRules.verify|verify} messages. - * @param message RoutingRules message or plain object to encode + * Encodes the specified AggregateStats message. Does not implicitly {@link query.AggregateStats.verify|verify} messages. + * @param message AggregateStats message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vschema.IRoutingRules, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IAggregateStats, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified RoutingRules message, length delimited. Does not implicitly {@link vschema.RoutingRules.verify|verify} messages. - * @param message RoutingRules message or plain object to encode + * Encodes the specified AggregateStats message, length delimited. Does not implicitly {@link query.AggregateStats.verify|verify} messages. + * @param message AggregateStats message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vschema.IRoutingRules, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IAggregateStats, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a RoutingRules message from the specified reader or buffer. + * Decodes an AggregateStats message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns RoutingRules + * @returns AggregateStats * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vschema.RoutingRules; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.AggregateStats; /** - * Decodes a RoutingRules message from the specified reader or buffer, length delimited. + * Decodes an AggregateStats message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns RoutingRules + * @returns AggregateStats * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vschema.RoutingRules; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.AggregateStats; /** - * Verifies a RoutingRules message. + * Verifies an AggregateStats message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a RoutingRules message from a plain object. Also converts values to their respective internal types. + * Creates an AggregateStats message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns RoutingRules + * @returns AggregateStats */ - public static fromObject(object: { [k: string]: any }): vschema.RoutingRules; + public static fromObject(object: { [k: string]: any }): query.AggregateStats; /** - * Creates a plain object from a RoutingRules message. Also converts values to other types if specified. - * @param message RoutingRules + * Creates a plain object from an AggregateStats message. Also converts values to other types if specified. + * @param message AggregateStats * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vschema.RoutingRules, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.AggregateStats, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this RoutingRules to JSON. + * Converts this AggregateStats to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for RoutingRules + * Gets the default type url for AggregateStats * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a RoutingRule. */ - interface IRoutingRule { + /** Properties of a StreamHealthResponse. */ + interface IStreamHealthResponse { - /** RoutingRule from_table */ - from_table?: (string|null); + /** StreamHealthResponse target */ + target?: (query.ITarget|null); - /** RoutingRule to_tables */ - to_tables?: (string[]|null); + /** StreamHealthResponse serving */ + serving?: (boolean|null); + + /** StreamHealthResponse primary_term_start_timestamp */ + primary_term_start_timestamp?: (number|Long|null); + + /** StreamHealthResponse realtime_stats */ + realtime_stats?: (query.IRealtimeStats|null); + + /** StreamHealthResponse tablet_alias */ + tablet_alias?: (topodata.ITabletAlias|null); } - /** Represents a RoutingRule. */ - class RoutingRule implements IRoutingRule { + /** Represents a StreamHealthResponse. */ + class StreamHealthResponse implements IStreamHealthResponse { /** - * Constructs a new RoutingRule. + * Constructs a new StreamHealthResponse. * @param [properties] Properties to set */ - constructor(properties?: vschema.IRoutingRule); + constructor(properties?: query.IStreamHealthResponse); - /** RoutingRule from_table. */ - public from_table: string; + /** StreamHealthResponse target. */ + public target?: (query.ITarget|null); - /** RoutingRule to_tables. */ - public to_tables: string[]; + /** StreamHealthResponse serving. */ + public serving: boolean; + + /** StreamHealthResponse primary_term_start_timestamp. */ + public primary_term_start_timestamp: (number|Long); + + /** StreamHealthResponse realtime_stats. */ + public realtime_stats?: (query.IRealtimeStats|null); + + /** StreamHealthResponse tablet_alias. */ + public tablet_alias?: (topodata.ITabletAlias|null); /** - * Creates a new RoutingRule instance using the specified properties. + * Creates a new StreamHealthResponse instance using the specified properties. * @param [properties] Properties to set - * @returns RoutingRule instance + * @returns StreamHealthResponse instance */ - public static create(properties?: vschema.IRoutingRule): vschema.RoutingRule; + public static create(properties?: query.IStreamHealthResponse): query.StreamHealthResponse; /** - * Encodes the specified RoutingRule message. Does not implicitly {@link vschema.RoutingRule.verify|verify} messages. - * @param message RoutingRule message or plain object to encode + * Encodes the specified StreamHealthResponse message. Does not implicitly {@link query.StreamHealthResponse.verify|verify} messages. + * @param message StreamHealthResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vschema.IRoutingRule, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IStreamHealthResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified RoutingRule message, length delimited. Does not implicitly {@link vschema.RoutingRule.verify|verify} messages. - * @param message RoutingRule message or plain object to encode + * Encodes the specified StreamHealthResponse message, length delimited. Does not implicitly {@link query.StreamHealthResponse.verify|verify} messages. + * @param message StreamHealthResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vschema.IRoutingRule, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IStreamHealthResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a RoutingRule message from the specified reader or buffer. + * Decodes a StreamHealthResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns RoutingRule + * @returns StreamHealthResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vschema.RoutingRule; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.StreamHealthResponse; /** - * Decodes a RoutingRule message from the specified reader or buffer, length delimited. + * Decodes a StreamHealthResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns RoutingRule + * @returns StreamHealthResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vschema.RoutingRule; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.StreamHealthResponse; /** - * Verifies a RoutingRule message. + * Verifies a StreamHealthResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a RoutingRule message from a plain object. Also converts values to their respective internal types. + * Creates a StreamHealthResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns RoutingRule + * @returns StreamHealthResponse */ - public static fromObject(object: { [k: string]: any }): vschema.RoutingRule; + public static fromObject(object: { [k: string]: any }): query.StreamHealthResponse; /** - * Creates a plain object from a RoutingRule message. Also converts values to other types if specified. - * @param message RoutingRule + * Creates a plain object from a StreamHealthResponse message. Also converts values to other types if specified. + * @param message StreamHealthResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vschema.RoutingRule, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.StreamHealthResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this RoutingRule to JSON. + * Converts this StreamHealthResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for RoutingRule + * Gets the default type url for StreamHealthResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a Keyspace. */ - interface IKeyspace { - - /** Keyspace sharded */ - sharded?: (boolean|null); - - /** Keyspace vindexes */ - vindexes?: ({ [k: string]: vschema.IVindex }|null); + /** TransactionState enum. */ + enum TransactionState { + UNKNOWN = 0, + PREPARE = 1, + COMMIT = 2, + ROLLBACK = 3 + } - /** Keyspace tables */ - tables?: ({ [k: string]: vschema.ITable }|null); + /** Properties of a TransactionMetadata. */ + interface ITransactionMetadata { - /** Keyspace require_explicit_routing */ - require_explicit_routing?: (boolean|null); + /** TransactionMetadata dtid */ + dtid?: (string|null); - /** Keyspace cross_tablet */ - cross_tablet?: (boolean|null); + /** TransactionMetadata state */ + state?: (query.TransactionState|null); - /** Keyspace attach_enable */ - attach_enable?: (boolean|null); + /** TransactionMetadata time_created */ + time_created?: (number|Long|null); - /** Keyspace attach_to */ - attach_to?: (string|null); + /** TransactionMetadata participants */ + participants?: (query.ITarget[]|null); } - /** Represents a Keyspace. */ - class Keyspace implements IKeyspace { + /** Represents a TransactionMetadata. */ + class TransactionMetadata implements ITransactionMetadata { /** - * Constructs a new Keyspace. + * Constructs a new TransactionMetadata. * @param [properties] Properties to set */ - constructor(properties?: vschema.IKeyspace); - - /** Keyspace sharded. */ - public sharded: boolean; - - /** Keyspace vindexes. */ - public vindexes: { [k: string]: vschema.IVindex }; - - /** Keyspace tables. */ - public tables: { [k: string]: vschema.ITable }; + constructor(properties?: query.ITransactionMetadata); - /** Keyspace require_explicit_routing. */ - public require_explicit_routing: boolean; + /** TransactionMetadata dtid. */ + public dtid: string; - /** Keyspace cross_tablet. */ - public cross_tablet: boolean; + /** TransactionMetadata state. */ + public state: query.TransactionState; - /** Keyspace attach_enable. */ - public attach_enable: boolean; + /** TransactionMetadata time_created. */ + public time_created: (number|Long); - /** Keyspace attach_to. */ - public attach_to: string; + /** TransactionMetadata participants. */ + public participants: query.ITarget[]; /** - * Creates a new Keyspace instance using the specified properties. + * Creates a new TransactionMetadata instance using the specified properties. * @param [properties] Properties to set - * @returns Keyspace instance + * @returns TransactionMetadata instance */ - public static create(properties?: vschema.IKeyspace): vschema.Keyspace; + public static create(properties?: query.ITransactionMetadata): query.TransactionMetadata; /** - * Encodes the specified Keyspace message. Does not implicitly {@link vschema.Keyspace.verify|verify} messages. - * @param message Keyspace message or plain object to encode + * Encodes the specified TransactionMetadata message. Does not implicitly {@link query.TransactionMetadata.verify|verify} messages. + * @param message TransactionMetadata message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vschema.IKeyspace, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.ITransactionMetadata, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified Keyspace message, length delimited. Does not implicitly {@link vschema.Keyspace.verify|verify} messages. - * @param message Keyspace message or plain object to encode + * Encodes the specified TransactionMetadata message, length delimited. Does not implicitly {@link query.TransactionMetadata.verify|verify} messages. + * @param message TransactionMetadata message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vschema.IKeyspace, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.ITransactionMetadata, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a Keyspace message from the specified reader or buffer. + * Decodes a TransactionMetadata message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns Keyspace + * @returns TransactionMetadata * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vschema.Keyspace; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.TransactionMetadata; /** - * Decodes a Keyspace message from the specified reader or buffer, length delimited. + * Decodes a TransactionMetadata message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns Keyspace + * @returns TransactionMetadata * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vschema.Keyspace; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.TransactionMetadata; /** - * Verifies a Keyspace message. + * Verifies a TransactionMetadata message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a Keyspace message from a plain object. Also converts values to their respective internal types. + * Creates a TransactionMetadata message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns Keyspace + * @returns TransactionMetadata */ - public static fromObject(object: { [k: string]: any }): vschema.Keyspace; + public static fromObject(object: { [k: string]: any }): query.TransactionMetadata; /** - * Creates a plain object from a Keyspace message. Also converts values to other types if specified. - * @param message Keyspace + * Creates a plain object from a TransactionMetadata message. Also converts values to other types if specified. + * @param message TransactionMetadata * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vschema.Keyspace, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.TransactionMetadata, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this Keyspace to JSON. + * Converts this TransactionMetadata to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for Keyspace + * Gets the default type url for TransactionMetadata * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a Vindex. */ - interface IVindex { + /** SchemaTableType enum. */ + enum SchemaTableType { + VIEWS = 0, + TABLES = 1, + ALL = 2 + } - /** Vindex type */ - type?: (string|null); + /** Properties of a GetSchemaRequest. */ + interface IGetSchemaRequest { - /** Vindex params */ - params?: ({ [k: string]: string }|null); + /** GetSchemaRequest target */ + target?: (query.ITarget|null); - /** Vindex owner */ - owner?: (string|null); + /** GetSchemaRequest table_type */ + table_type?: (query.SchemaTableType|null); + + /** GetSchemaRequest table_names */ + table_names?: (string[]|null); } - /** Represents a Vindex. */ - class Vindex implements IVindex { + /** Represents a GetSchemaRequest. */ + class GetSchemaRequest implements IGetSchemaRequest { /** - * Constructs a new Vindex. + * Constructs a new GetSchemaRequest. * @param [properties] Properties to set */ - constructor(properties?: vschema.IVindex); + constructor(properties?: query.IGetSchemaRequest); - /** Vindex type. */ - public type: string; + /** GetSchemaRequest target. */ + public target?: (query.ITarget|null); - /** Vindex params. */ - public params: { [k: string]: string }; + /** GetSchemaRequest table_type. */ + public table_type: query.SchemaTableType; - /** Vindex owner. */ - public owner: string; + /** GetSchemaRequest table_names. */ + public table_names: string[]; /** - * Creates a new Vindex instance using the specified properties. + * Creates a new GetSchemaRequest instance using the specified properties. * @param [properties] Properties to set - * @returns Vindex instance + * @returns GetSchemaRequest instance */ - public static create(properties?: vschema.IVindex): vschema.Vindex; + public static create(properties?: query.IGetSchemaRequest): query.GetSchemaRequest; /** - * Encodes the specified Vindex message. Does not implicitly {@link vschema.Vindex.verify|verify} messages. - * @param message Vindex message or plain object to encode + * Encodes the specified GetSchemaRequest message. Does not implicitly {@link query.GetSchemaRequest.verify|verify} messages. + * @param message GetSchemaRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vschema.IVindex, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IGetSchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified Vindex message, length delimited. Does not implicitly {@link vschema.Vindex.verify|verify} messages. - * @param message Vindex message or plain object to encode + * Encodes the specified GetSchemaRequest message, length delimited. Does not implicitly {@link query.GetSchemaRequest.verify|verify} messages. + * @param message GetSchemaRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vschema.IVindex, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IGetSchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a Vindex message from the specified reader or buffer. + * Decodes a GetSchemaRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns Vindex + * @returns GetSchemaRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vschema.Vindex; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.GetSchemaRequest; /** - * Decodes a Vindex message from the specified reader or buffer, length delimited. + * Decodes a GetSchemaRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns Vindex + * @returns GetSchemaRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vschema.Vindex; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.GetSchemaRequest; /** - * Verifies a Vindex message. + * Verifies a GetSchemaRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a Vindex message from a plain object. Also converts values to their respective internal types. + * Creates a GetSchemaRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns Vindex + * @returns GetSchemaRequest */ - public static fromObject(object: { [k: string]: any }): vschema.Vindex; + public static fromObject(object: { [k: string]: any }): query.GetSchemaRequest; /** - * Creates a plain object from a Vindex message. Also converts values to other types if specified. - * @param message Vindex + * Creates a plain object from a GetSchemaRequest message. Also converts values to other types if specified. + * @param message GetSchemaRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vschema.Vindex, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.GetSchemaRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this Vindex to JSON. + * Converts this GetSchemaRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for Vindex + * Gets the default type url for GetSchemaRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a Table. */ - interface ITable { - - /** Table type */ - type?: (string|null); - - /** Table column_vindexes */ - column_vindexes?: (vschema.IColumnVindex[]|null); - - /** Table auto_increment */ - auto_increment?: (vschema.IAutoIncrement|null); - - /** Table columns */ - columns?: (vschema.IColumn[]|null); - - /** Table pinned */ - pinned?: (string|null); - - /** Table column_list_authoritative */ - column_list_authoritative?: (boolean|null); + /** Properties of a GetSchemaResponse. */ + interface IGetSchemaResponse { - /** Table source */ - source?: (string|null); + /** GetSchemaResponse table_definition */ + table_definition?: ({ [k: string]: string }|null); } - /** Represents a Table. */ - class Table implements ITable { + /** Represents a GetSchemaResponse. */ + class GetSchemaResponse implements IGetSchemaResponse { /** - * Constructs a new Table. + * Constructs a new GetSchemaResponse. * @param [properties] Properties to set */ - constructor(properties?: vschema.ITable); - - /** Table type. */ - public type: string; - - /** Table column_vindexes. */ - public column_vindexes: vschema.IColumnVindex[]; - - /** Table auto_increment. */ - public auto_increment?: (vschema.IAutoIncrement|null); - - /** Table columns. */ - public columns: vschema.IColumn[]; - - /** Table pinned. */ - public pinned: string; - - /** Table column_list_authoritative. */ - public column_list_authoritative: boolean; + constructor(properties?: query.IGetSchemaResponse); - /** Table source. */ - public source: string; + /** GetSchemaResponse table_definition. */ + public table_definition: { [k: string]: string }; /** - * Creates a new Table instance using the specified properties. + * Creates a new GetSchemaResponse instance using the specified properties. * @param [properties] Properties to set - * @returns Table instance + * @returns GetSchemaResponse instance */ - public static create(properties?: vschema.ITable): vschema.Table; + public static create(properties?: query.IGetSchemaResponse): query.GetSchemaResponse; /** - * Encodes the specified Table message. Does not implicitly {@link vschema.Table.verify|verify} messages. - * @param message Table message or plain object to encode + * Encodes the specified GetSchemaResponse message. Does not implicitly {@link query.GetSchemaResponse.verify|verify} messages. + * @param message GetSchemaResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vschema.ITable, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.IGetSchemaResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified Table message, length delimited. Does not implicitly {@link vschema.Table.verify|verify} messages. - * @param message Table message or plain object to encode + * Encodes the specified GetSchemaResponse message, length delimited. Does not implicitly {@link query.GetSchemaResponse.verify|verify} messages. + * @param message GetSchemaResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vschema.ITable, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.IGetSchemaResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a Table message from the specified reader or buffer. + * Decodes a GetSchemaResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns Table + * @returns GetSchemaResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vschema.Table; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.GetSchemaResponse; /** - * Decodes a Table message from the specified reader or buffer, length delimited. + * Decodes a GetSchemaResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns Table + * @returns GetSchemaResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vschema.Table; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.GetSchemaResponse; /** - * Verifies a Table message. + * Verifies a GetSchemaResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a Table message from a plain object. Also converts values to their respective internal types. + * Creates a GetSchemaResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns Table + * @returns GetSchemaResponse */ - public static fromObject(object: { [k: string]: any }): vschema.Table; + public static fromObject(object: { [k: string]: any }): query.GetSchemaResponse; /** - * Creates a plain object from a Table message. Also converts values to other types if specified. - * @param message Table + * Creates a plain object from a GetSchemaResponse message. Also converts values to other types if specified. + * @param message GetSchemaResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vschema.Table, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.GetSchemaResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this Table to JSON. + * Converts this GetSchemaResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for Table + * Gets the default type url for GetSchemaResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ColumnVindex. */ - interface IColumnVindex { + /** Properties of a LoadDataStreamRequest. */ + interface ILoadDataStreamRequest { - /** ColumnVindex column */ - column?: (string|null); + /** LoadDataStreamRequest effective_caller_id */ + effective_caller_id?: (vtrpc.ICallerID|null); - /** ColumnVindex name */ - name?: (string|null); + /** LoadDataStreamRequest immediate_caller_id */ + immediate_caller_id?: (query.IVTGateCallerID|null); - /** ColumnVindex columns */ - columns?: (string[]|null); + /** LoadDataStreamRequest target */ + target?: (query.ITarget|null); + + /** LoadDataStreamRequest query */ + query?: (query.IBoundQuery|null); + + /** LoadDataStreamRequest transaction_id */ + transaction_id?: (number|Long|null); + + /** LoadDataStreamRequest options */ + options?: (query.IExecuteOptions|null); + + /** LoadDataStreamRequest lines */ + lines?: (string[]|null); } - /** Represents a ColumnVindex. */ - class ColumnVindex implements IColumnVindex { + /** Represents a LoadDataStreamRequest. */ + class LoadDataStreamRequest implements ILoadDataStreamRequest { /** - * Constructs a new ColumnVindex. + * Constructs a new LoadDataStreamRequest. * @param [properties] Properties to set */ - constructor(properties?: vschema.IColumnVindex); + constructor(properties?: query.ILoadDataStreamRequest); - /** ColumnVindex column. */ - public column: string; + /** LoadDataStreamRequest effective_caller_id. */ + public effective_caller_id?: (vtrpc.ICallerID|null); - /** ColumnVindex name. */ - public name: string; + /** LoadDataStreamRequest immediate_caller_id. */ + public immediate_caller_id?: (query.IVTGateCallerID|null); - /** ColumnVindex columns. */ - public columns: string[]; + /** LoadDataStreamRequest target. */ + public target?: (query.ITarget|null); + + /** LoadDataStreamRequest query. */ + public query?: (query.IBoundQuery|null); + + /** LoadDataStreamRequest transaction_id. */ + public transaction_id: (number|Long); + + /** LoadDataStreamRequest options. */ + public options?: (query.IExecuteOptions|null); + + /** LoadDataStreamRequest lines. */ + public lines: string[]; /** - * Creates a new ColumnVindex instance using the specified properties. + * Creates a new LoadDataStreamRequest instance using the specified properties. * @param [properties] Properties to set - * @returns ColumnVindex instance + * @returns LoadDataStreamRequest instance */ - public static create(properties?: vschema.IColumnVindex): vschema.ColumnVindex; + public static create(properties?: query.ILoadDataStreamRequest): query.LoadDataStreamRequest; /** - * Encodes the specified ColumnVindex message. Does not implicitly {@link vschema.ColumnVindex.verify|verify} messages. - * @param message ColumnVindex message or plain object to encode + * Encodes the specified LoadDataStreamRequest message. Does not implicitly {@link query.LoadDataStreamRequest.verify|verify} messages. + * @param message LoadDataStreamRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vschema.IColumnVindex, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.ILoadDataStreamRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ColumnVindex message, length delimited. Does not implicitly {@link vschema.ColumnVindex.verify|verify} messages. - * @param message ColumnVindex message or plain object to encode + * Encodes the specified LoadDataStreamRequest message, length delimited. Does not implicitly {@link query.LoadDataStreamRequest.verify|verify} messages. + * @param message LoadDataStreamRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vschema.IColumnVindex, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.ILoadDataStreamRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ColumnVindex message from the specified reader or buffer. + * Decodes a LoadDataStreamRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ColumnVindex + * @returns LoadDataStreamRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vschema.ColumnVindex; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.LoadDataStreamRequest; /** - * Decodes a ColumnVindex message from the specified reader or buffer, length delimited. + * Decodes a LoadDataStreamRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ColumnVindex + * @returns LoadDataStreamRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vschema.ColumnVindex; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.LoadDataStreamRequest; /** - * Verifies a ColumnVindex message. + * Verifies a LoadDataStreamRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ColumnVindex message from a plain object. Also converts values to their respective internal types. + * Creates a LoadDataStreamRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ColumnVindex + * @returns LoadDataStreamRequest */ - public static fromObject(object: { [k: string]: any }): vschema.ColumnVindex; + public static fromObject(object: { [k: string]: any }): query.LoadDataStreamRequest; /** - * Creates a plain object from a ColumnVindex message. Also converts values to other types if specified. - * @param message ColumnVindex + * Creates a plain object from a LoadDataStreamRequest message. Also converts values to other types if specified. + * @param message LoadDataStreamRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vschema.ColumnVindex, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.LoadDataStreamRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ColumnVindex to JSON. + * Converts this LoadDataStreamRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ColumnVindex + * Gets the default type url for LoadDataStreamRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of an AutoIncrement. */ - interface IAutoIncrement { - - /** AutoIncrement column */ - column?: (string|null); + /** Properties of a LoadDataStreamResponse. */ + interface ILoadDataStreamResponse { - /** AutoIncrement sequence */ - sequence?: (string|null); + /** LoadDataStreamResponse result */ + result?: (query.IQueryResult|null); } - /** Represents an AutoIncrement. */ - class AutoIncrement implements IAutoIncrement { + /** Represents a LoadDataStreamResponse. */ + class LoadDataStreamResponse implements ILoadDataStreamResponse { /** - * Constructs a new AutoIncrement. + * Constructs a new LoadDataStreamResponse. * @param [properties] Properties to set */ - constructor(properties?: vschema.IAutoIncrement); - - /** AutoIncrement column. */ - public column: string; + constructor(properties?: query.ILoadDataStreamResponse); - /** AutoIncrement sequence. */ - public sequence: string; + /** LoadDataStreamResponse result. */ + public result?: (query.IQueryResult|null); /** - * Creates a new AutoIncrement instance using the specified properties. + * Creates a new LoadDataStreamResponse instance using the specified properties. * @param [properties] Properties to set - * @returns AutoIncrement instance + * @returns LoadDataStreamResponse instance */ - public static create(properties?: vschema.IAutoIncrement): vschema.AutoIncrement; + public static create(properties?: query.ILoadDataStreamResponse): query.LoadDataStreamResponse; /** - * Encodes the specified AutoIncrement message. Does not implicitly {@link vschema.AutoIncrement.verify|verify} messages. - * @param message AutoIncrement message or plain object to encode + * Encodes the specified LoadDataStreamResponse message. Does not implicitly {@link query.LoadDataStreamResponse.verify|verify} messages. + * @param message LoadDataStreamResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vschema.IAutoIncrement, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: query.ILoadDataStreamResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified AutoIncrement message, length delimited. Does not implicitly {@link vschema.AutoIncrement.verify|verify} messages. - * @param message AutoIncrement message or plain object to encode + * Encodes the specified LoadDataStreamResponse message, length delimited. Does not implicitly {@link query.LoadDataStreamResponse.verify|verify} messages. + * @param message LoadDataStreamResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vschema.IAutoIncrement, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: query.ILoadDataStreamResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an AutoIncrement message from the specified reader or buffer. + * Decodes a LoadDataStreamResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns AutoIncrement + * @returns LoadDataStreamResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vschema.AutoIncrement; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.LoadDataStreamResponse; /** - * Decodes an AutoIncrement message from the specified reader or buffer, length delimited. + * Decodes a LoadDataStreamResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns AutoIncrement + * @returns LoadDataStreamResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vschema.AutoIncrement; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.LoadDataStreamResponse; /** - * Verifies an AutoIncrement message. + * Verifies a LoadDataStreamResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an AutoIncrement message from a plain object. Also converts values to their respective internal types. + * Creates a LoadDataStreamResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns AutoIncrement + * @returns LoadDataStreamResponse */ - public static fromObject(object: { [k: string]: any }): vschema.AutoIncrement; + public static fromObject(object: { [k: string]: any }): query.LoadDataStreamResponse; /** - * Creates a plain object from an AutoIncrement message. Also converts values to other types if specified. - * @param message AutoIncrement + * Creates a plain object from a LoadDataStreamResponse message. Also converts values to other types if specified. + * @param message LoadDataStreamResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vschema.AutoIncrement, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: query.LoadDataStreamResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this AutoIncrement to JSON. + * Converts this LoadDataStreamResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for AutoIncrement + * Gets the default type url for LoadDataStreamResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } +} - /** Properties of a Column. */ - interface IColumn { +/** Namespace replicationdata. */ +export namespace replicationdata { - /** Column name */ - name?: (string|null); + /** Properties of a Status. */ + interface IStatus { - /** Column type */ - type?: (query.Type|null); + /** Status position */ + position?: (string|null); + + /** Status replication_lag_seconds */ + replication_lag_seconds?: (number|null); + + /** Status source_host */ + source_host?: (string|null); + + /** Status source_port */ + source_port?: (number|null); + + /** Status connect_retry */ + connect_retry?: (number|null); + + /** Status relay_log_position */ + relay_log_position?: (string|null); + + /** Status file_position */ + file_position?: (string|null); + + /** Status relay_log_source_binlog_equivalent_position */ + relay_log_source_binlog_equivalent_position?: (string|null); + + /** Status source_server_id */ + source_server_id?: (number|null); + + /** Status source_uuid */ + source_uuid?: (string|null); + + /** Status io_state */ + io_state?: (number|null); + + /** Status last_io_error */ + last_io_error?: (string|null); + + /** Status sql_state */ + sql_state?: (number|null); + + /** Status last_sql_error */ + last_sql_error?: (string|null); + + /** Status relay_log_file_position */ + relay_log_file_position?: (string|null); + + /** Status source_user */ + source_user?: (string|null); + + /** Status sql_delay */ + sql_delay?: (number|null); + + /** Status auto_position */ + auto_position?: (boolean|null); + + /** Status using_gtid */ + using_gtid?: (boolean|null); + + /** Status has_replication_filters */ + has_replication_filters?: (boolean|null); + + /** Status ssl_allowed */ + ssl_allowed?: (boolean|null); + + /** Status replication_lag_unknown */ + replication_lag_unknown?: (boolean|null); } - /** Represents a Column. */ - class Column implements IColumn { + /** Represents a Status. */ + class Status implements IStatus { /** - * Constructs a new Column. + * Constructs a new Status. * @param [properties] Properties to set */ - constructor(properties?: vschema.IColumn); + constructor(properties?: replicationdata.IStatus); - /** Column name. */ - public name: string; + /** Status position. */ + public position: string; - /** Column type. */ - public type: query.Type; + /** Status replication_lag_seconds. */ + public replication_lag_seconds: number; + + /** Status source_host. */ + public source_host: string; + + /** Status source_port. */ + public source_port: number; + + /** Status connect_retry. */ + public connect_retry: number; + + /** Status relay_log_position. */ + public relay_log_position: string; + + /** Status file_position. */ + public file_position: string; + + /** Status relay_log_source_binlog_equivalent_position. */ + public relay_log_source_binlog_equivalent_position: string; + + /** Status source_server_id. */ + public source_server_id: number; + + /** Status source_uuid. */ + public source_uuid: string; + + /** Status io_state. */ + public io_state: number; + + /** Status last_io_error. */ + public last_io_error: string; + + /** Status sql_state. */ + public sql_state: number; + + /** Status last_sql_error. */ + public last_sql_error: string; + + /** Status relay_log_file_position. */ + public relay_log_file_position: string; + + /** Status source_user. */ + public source_user: string; + + /** Status sql_delay. */ + public sql_delay: number; + + /** Status auto_position. */ + public auto_position: boolean; + + /** Status using_gtid. */ + public using_gtid: boolean; + + /** Status has_replication_filters. */ + public has_replication_filters: boolean; + + /** Status ssl_allowed. */ + public ssl_allowed: boolean; + + /** Status replication_lag_unknown. */ + public replication_lag_unknown: boolean; /** - * Creates a new Column instance using the specified properties. + * Creates a new Status instance using the specified properties. * @param [properties] Properties to set - * @returns Column instance + * @returns Status instance */ - public static create(properties?: vschema.IColumn): vschema.Column; + public static create(properties?: replicationdata.IStatus): replicationdata.Status; /** - * Encodes the specified Column message. Does not implicitly {@link vschema.Column.verify|verify} messages. - * @param message Column message or plain object to encode + * Encodes the specified Status message. Does not implicitly {@link replicationdata.Status.verify|verify} messages. + * @param message Status message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vschema.IColumn, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: replicationdata.IStatus, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified Column message, length delimited. Does not implicitly {@link vschema.Column.verify|verify} messages. - * @param message Column message or plain object to encode + * Encodes the specified Status message, length delimited. Does not implicitly {@link replicationdata.Status.verify|verify} messages. + * @param message Status message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vschema.IColumn, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: replicationdata.IStatus, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a Column message from the specified reader or buffer. + * Decodes a Status message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns Column + * @returns Status * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vschema.Column; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): replicationdata.Status; /** - * Decodes a Column message from the specified reader or buffer, length delimited. + * Decodes a Status message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns Column + * @returns Status * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vschema.Column; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): replicationdata.Status; /** - * Verifies a Column message. + * Verifies a Status message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a Column message from a plain object. Also converts values to their respective internal types. + * Creates a Status message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns Column + * @returns Status */ - public static fromObject(object: { [k: string]: any }): vschema.Column; + public static fromObject(object: { [k: string]: any }): replicationdata.Status; /** - * Creates a plain object from a Column message. Also converts values to other types if specified. - * @param message Column + * Creates a plain object from a Status message. Also converts values to other types if specified. + * @param message Status * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vschema.Column, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: replicationdata.Status, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this Column to JSON. + * Converts this Status to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for Column + * Gets the default type url for Status * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a SrvVSchema. */ - interface ISrvVSchema { - - /** SrvVSchema keyspaces */ - keyspaces?: ({ [k: string]: vschema.IKeyspace }|null); + /** Properties of a StopReplicationStatus. */ + interface IStopReplicationStatus { - /** SrvVSchema routing_rules */ - routing_rules?: (vschema.IRoutingRules|null); + /** StopReplicationStatus before */ + before?: (replicationdata.IStatus|null); - /** SrvVSchema shard_routing_rules */ - shard_routing_rules?: (vschema.IShardRoutingRules|null); + /** StopReplicationStatus after */ + after?: (replicationdata.IStatus|null); } - /** Represents a SrvVSchema. */ - class SrvVSchema implements ISrvVSchema { + /** Represents a StopReplicationStatus. */ + class StopReplicationStatus implements IStopReplicationStatus { /** - * Constructs a new SrvVSchema. + * Constructs a new StopReplicationStatus. * @param [properties] Properties to set */ - constructor(properties?: vschema.ISrvVSchema); - - /** SrvVSchema keyspaces. */ - public keyspaces: { [k: string]: vschema.IKeyspace }; + constructor(properties?: replicationdata.IStopReplicationStatus); - /** SrvVSchema routing_rules. */ - public routing_rules?: (vschema.IRoutingRules|null); + /** StopReplicationStatus before. */ + public before?: (replicationdata.IStatus|null); - /** SrvVSchema shard_routing_rules. */ - public shard_routing_rules?: (vschema.IShardRoutingRules|null); + /** StopReplicationStatus after. */ + public after?: (replicationdata.IStatus|null); /** - * Creates a new SrvVSchema instance using the specified properties. + * Creates a new StopReplicationStatus instance using the specified properties. * @param [properties] Properties to set - * @returns SrvVSchema instance + * @returns StopReplicationStatus instance */ - public static create(properties?: vschema.ISrvVSchema): vschema.SrvVSchema; + public static create(properties?: replicationdata.IStopReplicationStatus): replicationdata.StopReplicationStatus; /** - * Encodes the specified SrvVSchema message. Does not implicitly {@link vschema.SrvVSchema.verify|verify} messages. - * @param message SrvVSchema message or plain object to encode + * Encodes the specified StopReplicationStatus message. Does not implicitly {@link replicationdata.StopReplicationStatus.verify|verify} messages. + * @param message StopReplicationStatus message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vschema.ISrvVSchema, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: replicationdata.IStopReplicationStatus, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified SrvVSchema message, length delimited. Does not implicitly {@link vschema.SrvVSchema.verify|verify} messages. - * @param message SrvVSchema message or plain object to encode + * Encodes the specified StopReplicationStatus message, length delimited. Does not implicitly {@link replicationdata.StopReplicationStatus.verify|verify} messages. + * @param message StopReplicationStatus message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vschema.ISrvVSchema, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: replicationdata.IStopReplicationStatus, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a SrvVSchema message from the specified reader or buffer. + * Decodes a StopReplicationStatus message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns SrvVSchema + * @returns StopReplicationStatus * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vschema.SrvVSchema; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): replicationdata.StopReplicationStatus; /** - * Decodes a SrvVSchema message from the specified reader or buffer, length delimited. + * Decodes a StopReplicationStatus message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns SrvVSchema + * @returns StopReplicationStatus * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vschema.SrvVSchema; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): replicationdata.StopReplicationStatus; /** - * Verifies a SrvVSchema message. + * Verifies a StopReplicationStatus message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a SrvVSchema message from a plain object. Also converts values to their respective internal types. + * Creates a StopReplicationStatus message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns SrvVSchema + * @returns StopReplicationStatus */ - public static fromObject(object: { [k: string]: any }): vschema.SrvVSchema; + public static fromObject(object: { [k: string]: any }): replicationdata.StopReplicationStatus; /** - * Creates a plain object from a SrvVSchema message. Also converts values to other types if specified. - * @param message SrvVSchema + * Creates a plain object from a StopReplicationStatus message. Also converts values to other types if specified. + * @param message StopReplicationStatus * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vschema.SrvVSchema, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: replicationdata.StopReplicationStatus, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this SrvVSchema to JSON. + * Converts this StopReplicationStatus to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for SrvVSchema + * Gets the default type url for StopReplicationStatus * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ShardRoutingRules. */ - interface IShardRoutingRules { + /** StopReplicationMode enum. */ + enum StopReplicationMode { + IOANDSQLTHREAD = 0, + IOTHREADONLY = 1 + } - /** ShardRoutingRules rules */ - rules?: (vschema.IShardRoutingRule[]|null); + /** Properties of a PrimaryStatus. */ + interface IPrimaryStatus { + + /** PrimaryStatus position */ + position?: (string|null); + + /** PrimaryStatus file_position */ + file_position?: (string|null); } - /** Represents a ShardRoutingRules. */ - class ShardRoutingRules implements IShardRoutingRules { + /** Represents a PrimaryStatus. */ + class PrimaryStatus implements IPrimaryStatus { /** - * Constructs a new ShardRoutingRules. + * Constructs a new PrimaryStatus. * @param [properties] Properties to set */ - constructor(properties?: vschema.IShardRoutingRules); + constructor(properties?: replicationdata.IPrimaryStatus); - /** ShardRoutingRules rules. */ - public rules: vschema.IShardRoutingRule[]; + /** PrimaryStatus position. */ + public position: string; + + /** PrimaryStatus file_position. */ + public file_position: string; /** - * Creates a new ShardRoutingRules instance using the specified properties. + * Creates a new PrimaryStatus instance using the specified properties. * @param [properties] Properties to set - * @returns ShardRoutingRules instance + * @returns PrimaryStatus instance */ - public static create(properties?: vschema.IShardRoutingRules): vschema.ShardRoutingRules; + public static create(properties?: replicationdata.IPrimaryStatus): replicationdata.PrimaryStatus; /** - * Encodes the specified ShardRoutingRules message. Does not implicitly {@link vschema.ShardRoutingRules.verify|verify} messages. - * @param message ShardRoutingRules message or plain object to encode + * Encodes the specified PrimaryStatus message. Does not implicitly {@link replicationdata.PrimaryStatus.verify|verify} messages. + * @param message PrimaryStatus message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vschema.IShardRoutingRules, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: replicationdata.IPrimaryStatus, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ShardRoutingRules message, length delimited. Does not implicitly {@link vschema.ShardRoutingRules.verify|verify} messages. - * @param message ShardRoutingRules message or plain object to encode + * Encodes the specified PrimaryStatus message, length delimited. Does not implicitly {@link replicationdata.PrimaryStatus.verify|verify} messages. + * @param message PrimaryStatus message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vschema.IShardRoutingRules, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: replicationdata.IPrimaryStatus, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ShardRoutingRules message from the specified reader or buffer. + * Decodes a PrimaryStatus message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ShardRoutingRules + * @returns PrimaryStatus * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vschema.ShardRoutingRules; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): replicationdata.PrimaryStatus; /** - * Decodes a ShardRoutingRules message from the specified reader or buffer, length delimited. + * Decodes a PrimaryStatus message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ShardRoutingRules + * @returns PrimaryStatus * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vschema.ShardRoutingRules; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): replicationdata.PrimaryStatus; /** - * Verifies a ShardRoutingRules message. + * Verifies a PrimaryStatus message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ShardRoutingRules message from a plain object. Also converts values to their respective internal types. + * Creates a PrimaryStatus message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ShardRoutingRules + * @returns PrimaryStatus */ - public static fromObject(object: { [k: string]: any }): vschema.ShardRoutingRules; + public static fromObject(object: { [k: string]: any }): replicationdata.PrimaryStatus; /** - * Creates a plain object from a ShardRoutingRules message. Also converts values to other types if specified. - * @param message ShardRoutingRules + * Creates a plain object from a PrimaryStatus message. Also converts values to other types if specified. + * @param message PrimaryStatus * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vschema.ShardRoutingRules, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: replicationdata.PrimaryStatus, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ShardRoutingRules to JSON. + * Converts this PrimaryStatus to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ShardRoutingRules + * Gets the default type url for PrimaryStatus * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ShardRoutingRule. */ - interface IShardRoutingRule { + /** Properties of a FullStatus. */ + interface IFullStatus { - /** ShardRoutingRule from_keyspace */ - from_keyspace?: (string|null); + /** FullStatus server_id */ + server_id?: (number|null); - /** ShardRoutingRule to_keyspace */ - to_keyspace?: (string|null); + /** FullStatus server_uuid */ + server_uuid?: (string|null); - /** ShardRoutingRule shard */ - shard?: (string|null); + /** FullStatus replication_status */ + replication_status?: (replicationdata.IStatus|null); + + /** FullStatus primary_status */ + primary_status?: (replicationdata.IPrimaryStatus|null); + + /** FullStatus gtid_purged */ + gtid_purged?: (string|null); + + /** FullStatus version */ + version?: (string|null); + + /** FullStatus version_comment */ + version_comment?: (string|null); + + /** FullStatus read_only */ + read_only?: (boolean|null); + + /** FullStatus gtid_mode */ + gtid_mode?: (string|null); + + /** FullStatus binlog_format */ + binlog_format?: (string|null); + + /** FullStatus binlog_row_image */ + binlog_row_image?: (string|null); + + /** FullStatus log_bin_enabled */ + log_bin_enabled?: (boolean|null); + + /** FullStatus log_replica_updates */ + log_replica_updates?: (boolean|null); + + /** FullStatus semi_sync_primary_enabled */ + semi_sync_primary_enabled?: (boolean|null); + + /** FullStatus semi_sync_replica_enabled */ + semi_sync_replica_enabled?: (boolean|null); + + /** FullStatus semi_sync_primary_status */ + semi_sync_primary_status?: (boolean|null); + + /** FullStatus semi_sync_replica_status */ + semi_sync_replica_status?: (boolean|null); + + /** FullStatus semi_sync_primary_clients */ + semi_sync_primary_clients?: (number|null); + + /** FullStatus semi_sync_primary_timeout */ + semi_sync_primary_timeout?: (number|Long|null); + + /** FullStatus semi_sync_wait_for_replica_count */ + semi_sync_wait_for_replica_count?: (number|null); + + /** FullStatus super_read_only */ + super_read_only?: (boolean|null); } - /** Represents a ShardRoutingRule. */ - class ShardRoutingRule implements IShardRoutingRule { + /** Represents a FullStatus. */ + class FullStatus implements IFullStatus { /** - * Constructs a new ShardRoutingRule. + * Constructs a new FullStatus. * @param [properties] Properties to set */ - constructor(properties?: vschema.IShardRoutingRule); + constructor(properties?: replicationdata.IFullStatus); - /** ShardRoutingRule from_keyspace. */ - public from_keyspace: string; + /** FullStatus server_id. */ + public server_id: number; - /** ShardRoutingRule to_keyspace. */ - public to_keyspace: string; + /** FullStatus server_uuid. */ + public server_uuid: string; - /** ShardRoutingRule shard. */ - public shard: string; + /** FullStatus replication_status. */ + public replication_status?: (replicationdata.IStatus|null); + + /** FullStatus primary_status. */ + public primary_status?: (replicationdata.IPrimaryStatus|null); + + /** FullStatus gtid_purged. */ + public gtid_purged: string; + + /** FullStatus version. */ + public version: string; + + /** FullStatus version_comment. */ + public version_comment: string; + + /** FullStatus read_only. */ + public read_only: boolean; + + /** FullStatus gtid_mode. */ + public gtid_mode: string; + + /** FullStatus binlog_format. */ + public binlog_format: string; + + /** FullStatus binlog_row_image. */ + public binlog_row_image: string; + + /** FullStatus log_bin_enabled. */ + public log_bin_enabled: boolean; + + /** FullStatus log_replica_updates. */ + public log_replica_updates: boolean; + + /** FullStatus semi_sync_primary_enabled. */ + public semi_sync_primary_enabled: boolean; + + /** FullStatus semi_sync_replica_enabled. */ + public semi_sync_replica_enabled: boolean; + + /** FullStatus semi_sync_primary_status. */ + public semi_sync_primary_status: boolean; + + /** FullStatus semi_sync_replica_status. */ + public semi_sync_replica_status: boolean; + + /** FullStatus semi_sync_primary_clients. */ + public semi_sync_primary_clients: number; + + /** FullStatus semi_sync_primary_timeout. */ + public semi_sync_primary_timeout: (number|Long); + + /** FullStatus semi_sync_wait_for_replica_count. */ + public semi_sync_wait_for_replica_count: number; + + /** FullStatus super_read_only. */ + public super_read_only: boolean; /** - * Creates a new ShardRoutingRule instance using the specified properties. + * Creates a new FullStatus instance using the specified properties. * @param [properties] Properties to set - * @returns ShardRoutingRule instance + * @returns FullStatus instance */ - public static create(properties?: vschema.IShardRoutingRule): vschema.ShardRoutingRule; + public static create(properties?: replicationdata.IFullStatus): replicationdata.FullStatus; /** - * Encodes the specified ShardRoutingRule message. Does not implicitly {@link vschema.ShardRoutingRule.verify|verify} messages. - * @param message ShardRoutingRule message or plain object to encode + * Encodes the specified FullStatus message. Does not implicitly {@link replicationdata.FullStatus.verify|verify} messages. + * @param message FullStatus message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vschema.IShardRoutingRule, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: replicationdata.IFullStatus, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ShardRoutingRule message, length delimited. Does not implicitly {@link vschema.ShardRoutingRule.verify|verify} messages. - * @param message ShardRoutingRule message or plain object to encode + * Encodes the specified FullStatus message, length delimited. Does not implicitly {@link replicationdata.FullStatus.verify|verify} messages. + * @param message FullStatus message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vschema.IShardRoutingRule, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: replicationdata.IFullStatus, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ShardRoutingRule message from the specified reader or buffer. + * Decodes a FullStatus message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ShardRoutingRule + * @returns FullStatus * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vschema.ShardRoutingRule; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): replicationdata.FullStatus; /** - * Decodes a ShardRoutingRule message from the specified reader or buffer, length delimited. + * Decodes a FullStatus message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ShardRoutingRule + * @returns FullStatus * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vschema.ShardRoutingRule; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): replicationdata.FullStatus; /** - * Verifies a ShardRoutingRule message. + * Verifies a FullStatus message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ShardRoutingRule message from a plain object. Also converts values to their respective internal types. + * Creates a FullStatus message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ShardRoutingRule + * @returns FullStatus */ - public static fromObject(object: { [k: string]: any }): vschema.ShardRoutingRule; + public static fromObject(object: { [k: string]: any }): replicationdata.FullStatus; /** - * Creates a plain object from a ShardRoutingRule message. Also converts values to other types if specified. - * @param message ShardRoutingRule + * Creates a plain object from a FullStatus message. Also converts values to other types if specified. + * @param message FullStatus * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vschema.ShardRoutingRule, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: replicationdata.FullStatus, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ShardRoutingRule to JSON. + * Converts this FullStatus to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ShardRoutingRule + * Gets the default type url for FullStatus * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ @@ -40091,19230 +41022,27220 @@ export namespace vschema { } } -/** Namespace vtctldata. */ -export namespace vtctldata { - - /** Properties of an ExecuteVtctlCommandRequest. */ - interface IExecuteVtctlCommandRequest { +/** Namespace vschema. */ +export namespace vschema { - /** ExecuteVtctlCommandRequest args */ - args?: (string[]|null); + /** Properties of a RoutingRules. */ + interface IRoutingRules { - /** ExecuteVtctlCommandRequest action_timeout */ - action_timeout?: (number|Long|null); + /** RoutingRules rules */ + rules?: (vschema.IRoutingRule[]|null); } - /** Represents an ExecuteVtctlCommandRequest. */ - class ExecuteVtctlCommandRequest implements IExecuteVtctlCommandRequest { + /** Represents a RoutingRules. */ + class RoutingRules implements IRoutingRules { /** - * Constructs a new ExecuteVtctlCommandRequest. + * Constructs a new RoutingRules. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IExecuteVtctlCommandRequest); - - /** ExecuteVtctlCommandRequest args. */ - public args: string[]; + constructor(properties?: vschema.IRoutingRules); - /** ExecuteVtctlCommandRequest action_timeout. */ - public action_timeout: (number|Long); + /** RoutingRules rules. */ + public rules: vschema.IRoutingRule[]; /** - * Creates a new ExecuteVtctlCommandRequest instance using the specified properties. + * Creates a new RoutingRules instance using the specified properties. * @param [properties] Properties to set - * @returns ExecuteVtctlCommandRequest instance + * @returns RoutingRules instance */ - public static create(properties?: vtctldata.IExecuteVtctlCommandRequest): vtctldata.ExecuteVtctlCommandRequest; + public static create(properties?: vschema.IRoutingRules): vschema.RoutingRules; /** - * Encodes the specified ExecuteVtctlCommandRequest message. Does not implicitly {@link vtctldata.ExecuteVtctlCommandRequest.verify|verify} messages. - * @param message ExecuteVtctlCommandRequest message or plain object to encode + * Encodes the specified RoutingRules message. Does not implicitly {@link vschema.RoutingRules.verify|verify} messages. + * @param message RoutingRules message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IExecuteVtctlCommandRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vschema.IRoutingRules, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ExecuteVtctlCommandRequest message, length delimited. Does not implicitly {@link vtctldata.ExecuteVtctlCommandRequest.verify|verify} messages. - * @param message ExecuteVtctlCommandRequest message or plain object to encode + * Encodes the specified RoutingRules message, length delimited. Does not implicitly {@link vschema.RoutingRules.verify|verify} messages. + * @param message RoutingRules message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IExecuteVtctlCommandRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vschema.IRoutingRules, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an ExecuteVtctlCommandRequest message from the specified reader or buffer. + * Decodes a RoutingRules message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ExecuteVtctlCommandRequest + * @returns RoutingRules * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ExecuteVtctlCommandRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vschema.RoutingRules; /** - * Decodes an ExecuteVtctlCommandRequest message from the specified reader or buffer, length delimited. + * Decodes a RoutingRules message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ExecuteVtctlCommandRequest + * @returns RoutingRules * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ExecuteVtctlCommandRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vschema.RoutingRules; /** - * Verifies an ExecuteVtctlCommandRequest message. + * Verifies a RoutingRules message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an ExecuteVtctlCommandRequest message from a plain object. Also converts values to their respective internal types. + * Creates a RoutingRules message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ExecuteVtctlCommandRequest + * @returns RoutingRules */ - public static fromObject(object: { [k: string]: any }): vtctldata.ExecuteVtctlCommandRequest; + public static fromObject(object: { [k: string]: any }): vschema.RoutingRules; /** - * Creates a plain object from an ExecuteVtctlCommandRequest message. Also converts values to other types if specified. - * @param message ExecuteVtctlCommandRequest + * Creates a plain object from a RoutingRules message. Also converts values to other types if specified. + * @param message RoutingRules * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ExecuteVtctlCommandRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vschema.RoutingRules, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ExecuteVtctlCommandRequest to JSON. + * Converts this RoutingRules to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ExecuteVtctlCommandRequest + * Gets the default type url for RoutingRules * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of an ExecuteVtctlCommandResponse. */ - interface IExecuteVtctlCommandResponse { + /** Properties of a RoutingRule. */ + interface IRoutingRule { - /** ExecuteVtctlCommandResponse event */ - event?: (logutil.IEvent|null); + /** RoutingRule from_table */ + from_table?: (string|null); + + /** RoutingRule to_tables */ + to_tables?: (string[]|null); } - /** Represents an ExecuteVtctlCommandResponse. */ - class ExecuteVtctlCommandResponse implements IExecuteVtctlCommandResponse { + /** Represents a RoutingRule. */ + class RoutingRule implements IRoutingRule { /** - * Constructs a new ExecuteVtctlCommandResponse. + * Constructs a new RoutingRule. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IExecuteVtctlCommandResponse); + constructor(properties?: vschema.IRoutingRule); - /** ExecuteVtctlCommandResponse event. */ - public event?: (logutil.IEvent|null); + /** RoutingRule from_table. */ + public from_table: string; + + /** RoutingRule to_tables. */ + public to_tables: string[]; /** - * Creates a new ExecuteVtctlCommandResponse instance using the specified properties. + * Creates a new RoutingRule instance using the specified properties. * @param [properties] Properties to set - * @returns ExecuteVtctlCommandResponse instance + * @returns RoutingRule instance */ - public static create(properties?: vtctldata.IExecuteVtctlCommandResponse): vtctldata.ExecuteVtctlCommandResponse; + public static create(properties?: vschema.IRoutingRule): vschema.RoutingRule; /** - * Encodes the specified ExecuteVtctlCommandResponse message. Does not implicitly {@link vtctldata.ExecuteVtctlCommandResponse.verify|verify} messages. - * @param message ExecuteVtctlCommandResponse message or plain object to encode + * Encodes the specified RoutingRule message. Does not implicitly {@link vschema.RoutingRule.verify|verify} messages. + * @param message RoutingRule message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IExecuteVtctlCommandResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vschema.IRoutingRule, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ExecuteVtctlCommandResponse message, length delimited. Does not implicitly {@link vtctldata.ExecuteVtctlCommandResponse.verify|verify} messages. - * @param message ExecuteVtctlCommandResponse message or plain object to encode + * Encodes the specified RoutingRule message, length delimited. Does not implicitly {@link vschema.RoutingRule.verify|verify} messages. + * @param message RoutingRule message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IExecuteVtctlCommandResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vschema.IRoutingRule, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an ExecuteVtctlCommandResponse message from the specified reader or buffer. + * Decodes a RoutingRule message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ExecuteVtctlCommandResponse + * @returns RoutingRule * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ExecuteVtctlCommandResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vschema.RoutingRule; /** - * Decodes an ExecuteVtctlCommandResponse message from the specified reader or buffer, length delimited. + * Decodes a RoutingRule message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ExecuteVtctlCommandResponse + * @returns RoutingRule * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ExecuteVtctlCommandResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vschema.RoutingRule; /** - * Verifies an ExecuteVtctlCommandResponse message. + * Verifies a RoutingRule message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an ExecuteVtctlCommandResponse message from a plain object. Also converts values to their respective internal types. + * Creates a RoutingRule message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ExecuteVtctlCommandResponse + * @returns RoutingRule */ - public static fromObject(object: { [k: string]: any }): vtctldata.ExecuteVtctlCommandResponse; + public static fromObject(object: { [k: string]: any }): vschema.RoutingRule; /** - * Creates a plain object from an ExecuteVtctlCommandResponse message. Also converts values to other types if specified. - * @param message ExecuteVtctlCommandResponse + * Creates a plain object from a RoutingRule message. Also converts values to other types if specified. + * @param message RoutingRule * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ExecuteVtctlCommandResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vschema.RoutingRule, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ExecuteVtctlCommandResponse to JSON. + * Converts this RoutingRule to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ExecuteVtctlCommandResponse + * Gets the default type url for RoutingRule * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** MaterializationIntent enum. */ - enum MaterializationIntent { - CUSTOM = 0, - MOVETABLES = 1, - CREATELOOKUPINDEX = 2 - } + /** Properties of a Keyspace. */ + interface IKeyspace { - /** Properties of a TableMaterializeSettings. */ - interface ITableMaterializeSettings { + /** Keyspace sharded */ + sharded?: (boolean|null); - /** TableMaterializeSettings target_table */ - target_table?: (string|null); + /** Keyspace vindexes */ + vindexes?: ({ [k: string]: vschema.IVindex }|null); - /** TableMaterializeSettings source_expression */ - source_expression?: (string|null); + /** Keyspace tables */ + tables?: ({ [k: string]: vschema.ITable }|null); - /** TableMaterializeSettings create_ddl */ - create_ddl?: (string|null); + /** Keyspace require_explicit_routing */ + require_explicit_routing?: (boolean|null); + + /** Keyspace foreign_key_mode */ + foreign_key_mode?: (vschema.Keyspace.ForeignKeyMode|null); + + /** Keyspace cross_tablet */ + cross_tablet?: (boolean|null); + + /** Keyspace attach_enable */ + attach_enable?: (boolean|null); + + /** Keyspace attach_to */ + attach_to?: (string|null); } - /** Represents a TableMaterializeSettings. */ - class TableMaterializeSettings implements ITableMaterializeSettings { + /** Represents a Keyspace. */ + class Keyspace implements IKeyspace { /** - * Constructs a new TableMaterializeSettings. + * Constructs a new Keyspace. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.ITableMaterializeSettings); + constructor(properties?: vschema.IKeyspace); - /** TableMaterializeSettings target_table. */ - public target_table: string; + /** Keyspace sharded. */ + public sharded: boolean; - /** TableMaterializeSettings source_expression. */ - public source_expression: string; + /** Keyspace vindexes. */ + public vindexes: { [k: string]: vschema.IVindex }; - /** TableMaterializeSettings create_ddl. */ - public create_ddl: string; + /** Keyspace tables. */ + public tables: { [k: string]: vschema.ITable }; + + /** Keyspace require_explicit_routing. */ + public require_explicit_routing: boolean; + + /** Keyspace foreign_key_mode. */ + public foreign_key_mode: vschema.Keyspace.ForeignKeyMode; + + /** Keyspace cross_tablet. */ + public cross_tablet: boolean; + + /** Keyspace attach_enable. */ + public attach_enable: boolean; + + /** Keyspace attach_to. */ + public attach_to: string; /** - * Creates a new TableMaterializeSettings instance using the specified properties. + * Creates a new Keyspace instance using the specified properties. * @param [properties] Properties to set - * @returns TableMaterializeSettings instance + * @returns Keyspace instance */ - public static create(properties?: vtctldata.ITableMaterializeSettings): vtctldata.TableMaterializeSettings; + public static create(properties?: vschema.IKeyspace): vschema.Keyspace; /** - * Encodes the specified TableMaterializeSettings message. Does not implicitly {@link vtctldata.TableMaterializeSettings.verify|verify} messages. - * @param message TableMaterializeSettings message or plain object to encode + * Encodes the specified Keyspace message. Does not implicitly {@link vschema.Keyspace.verify|verify} messages. + * @param message Keyspace message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.ITableMaterializeSettings, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vschema.IKeyspace, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified TableMaterializeSettings message, length delimited. Does not implicitly {@link vtctldata.TableMaterializeSettings.verify|verify} messages. - * @param message TableMaterializeSettings message or plain object to encode + * Encodes the specified Keyspace message, length delimited. Does not implicitly {@link vschema.Keyspace.verify|verify} messages. + * @param message Keyspace message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.ITableMaterializeSettings, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vschema.IKeyspace, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a TableMaterializeSettings message from the specified reader or buffer. + * Decodes a Keyspace message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns TableMaterializeSettings + * @returns Keyspace * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.TableMaterializeSettings; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vschema.Keyspace; /** - * Decodes a TableMaterializeSettings message from the specified reader or buffer, length delimited. + * Decodes a Keyspace message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns TableMaterializeSettings + * @returns Keyspace * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.TableMaterializeSettings; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vschema.Keyspace; /** - * Verifies a TableMaterializeSettings message. + * Verifies a Keyspace message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a TableMaterializeSettings message from a plain object. Also converts values to their respective internal types. + * Creates a Keyspace message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns TableMaterializeSettings + * @returns Keyspace */ - public static fromObject(object: { [k: string]: any }): vtctldata.TableMaterializeSettings; + public static fromObject(object: { [k: string]: any }): vschema.Keyspace; /** - * Creates a plain object from a TableMaterializeSettings message. Also converts values to other types if specified. - * @param message TableMaterializeSettings + * Creates a plain object from a Keyspace message. Also converts values to other types if specified. + * @param message Keyspace * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.TableMaterializeSettings, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vschema.Keyspace, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this TableMaterializeSettings to JSON. + * Converts this Keyspace to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for TableMaterializeSettings + * Gets the default type url for Keyspace * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a MaterializeSettings. */ - interface IMaterializeSettings { - - /** MaterializeSettings workflow */ - workflow?: (string|null); - - /** MaterializeSettings source_keyspace */ - source_keyspace?: (string|null); - - /** MaterializeSettings target_keyspace */ - target_keyspace?: (string|null); - - /** MaterializeSettings stop_after_copy */ - stop_after_copy?: (boolean|null); - - /** MaterializeSettings table_settings */ - table_settings?: (vtctldata.ITableMaterializeSettings[]|null); - - /** MaterializeSettings cell */ - cell?: (string|null); - - /** MaterializeSettings tablet_types */ - tablet_types?: (string|null); - - /** MaterializeSettings external_cluster */ - external_cluster?: (string|null); - - /** MaterializeSettings materialization_intent */ - materialization_intent?: (vtctldata.MaterializationIntent|null); + namespace Keyspace { - /** MaterializeSettings source_time_zone */ - source_time_zone?: (string|null); + /** ForeignKeyMode enum. */ + enum ForeignKeyMode { + unspecified = 0, + disallow = 1, + unmanaged = 2, + managed = 3 + } + } - /** MaterializeSettings target_time_zone */ - target_time_zone?: (string|null); + /** Properties of a Vindex. */ + interface IVindex { - /** MaterializeSettings source_shards */ - source_shards?: (string[]|null); + /** Vindex type */ + type?: (string|null); - /** MaterializeSettings on_ddl */ - on_ddl?: (string|null); + /** Vindex params */ + params?: ({ [k: string]: string }|null); - /** MaterializeSettings defer_secondary_keys */ - defer_secondary_keys?: (boolean|null); + /** Vindex owner */ + owner?: (string|null); } - /** Represents a MaterializeSettings. */ - class MaterializeSettings implements IMaterializeSettings { + /** Represents a Vindex. */ + class Vindex implements IVindex { /** - * Constructs a new MaterializeSettings. + * Constructs a new Vindex. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IMaterializeSettings); - - /** MaterializeSettings workflow. */ - public workflow: string; - - /** MaterializeSettings source_keyspace. */ - public source_keyspace: string; - - /** MaterializeSettings target_keyspace. */ - public target_keyspace: string; - - /** MaterializeSettings stop_after_copy. */ - public stop_after_copy: boolean; - - /** MaterializeSettings table_settings. */ - public table_settings: vtctldata.ITableMaterializeSettings[]; - - /** MaterializeSettings cell. */ - public cell: string; - - /** MaterializeSettings tablet_types. */ - public tablet_types: string; - - /** MaterializeSettings external_cluster. */ - public external_cluster: string; - - /** MaterializeSettings materialization_intent. */ - public materialization_intent: vtctldata.MaterializationIntent; - - /** MaterializeSettings source_time_zone. */ - public source_time_zone: string; - - /** MaterializeSettings target_time_zone. */ - public target_time_zone: string; + constructor(properties?: vschema.IVindex); - /** MaterializeSettings source_shards. */ - public source_shards: string[]; + /** Vindex type. */ + public type: string; - /** MaterializeSettings on_ddl. */ - public on_ddl: string; + /** Vindex params. */ + public params: { [k: string]: string }; - /** MaterializeSettings defer_secondary_keys. */ - public defer_secondary_keys: boolean; + /** Vindex owner. */ + public owner: string; /** - * Creates a new MaterializeSettings instance using the specified properties. + * Creates a new Vindex instance using the specified properties. * @param [properties] Properties to set - * @returns MaterializeSettings instance + * @returns Vindex instance */ - public static create(properties?: vtctldata.IMaterializeSettings): vtctldata.MaterializeSettings; + public static create(properties?: vschema.IVindex): vschema.Vindex; /** - * Encodes the specified MaterializeSettings message. Does not implicitly {@link vtctldata.MaterializeSettings.verify|verify} messages. - * @param message MaterializeSettings message or plain object to encode + * Encodes the specified Vindex message. Does not implicitly {@link vschema.Vindex.verify|verify} messages. + * @param message Vindex message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IMaterializeSettings, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vschema.IVindex, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified MaterializeSettings message, length delimited. Does not implicitly {@link vtctldata.MaterializeSettings.verify|verify} messages. - * @param message MaterializeSettings message or plain object to encode + * Encodes the specified Vindex message, length delimited. Does not implicitly {@link vschema.Vindex.verify|verify} messages. + * @param message Vindex message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IMaterializeSettings, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vschema.IVindex, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a MaterializeSettings message from the specified reader or buffer. + * Decodes a Vindex message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns MaterializeSettings + * @returns Vindex * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.MaterializeSettings; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vschema.Vindex; /** - * Decodes a MaterializeSettings message from the specified reader or buffer, length delimited. + * Decodes a Vindex message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns MaterializeSettings + * @returns Vindex * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.MaterializeSettings; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vschema.Vindex; /** - * Verifies a MaterializeSettings message. + * Verifies a Vindex message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a MaterializeSettings message from a plain object. Also converts values to their respective internal types. + * Creates a Vindex message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns MaterializeSettings + * @returns Vindex */ - public static fromObject(object: { [k: string]: any }): vtctldata.MaterializeSettings; + public static fromObject(object: { [k: string]: any }): vschema.Vindex; /** - * Creates a plain object from a MaterializeSettings message. Also converts values to other types if specified. - * @param message MaterializeSettings + * Creates a plain object from a Vindex message. Also converts values to other types if specified. + * @param message Vindex * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.MaterializeSettings, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vschema.Vindex, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this MaterializeSettings to JSON. + * Converts this Vindex to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for MaterializeSettings + * Gets the default type url for Vindex * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a Keyspace. */ - interface IKeyspace { + /** Properties of a Table. */ + interface ITable { - /** Keyspace name */ - name?: (string|null); + /** Table type */ + type?: (string|null); - /** Keyspace keyspace */ - keyspace?: (topodata.IKeyspace|null); + /** Table column_vindexes */ + column_vindexes?: (vschema.IColumnVindex[]|null); + + /** Table auto_increment */ + auto_increment?: (vschema.IAutoIncrement|null); + + /** Table columns */ + columns?: (vschema.IColumn[]|null); + + /** Table pinned */ + pinned?: (string|null); + + /** Table column_list_authoritative */ + column_list_authoritative?: (boolean|null); + + /** Table source */ + source?: (string|null); } - /** Represents a Keyspace. */ - class Keyspace implements IKeyspace { + /** Represents a Table. */ + class Table implements ITable { /** - * Constructs a new Keyspace. + * Constructs a new Table. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IKeyspace); + constructor(properties?: vschema.ITable); - /** Keyspace name. */ - public name: string; + /** Table type. */ + public type: string; - /** Keyspace keyspace. */ - public keyspace?: (topodata.IKeyspace|null); + /** Table column_vindexes. */ + public column_vindexes: vschema.IColumnVindex[]; + + /** Table auto_increment. */ + public auto_increment?: (vschema.IAutoIncrement|null); + + /** Table columns. */ + public columns: vschema.IColumn[]; + + /** Table pinned. */ + public pinned: string; + + /** Table column_list_authoritative. */ + public column_list_authoritative: boolean; + + /** Table source. */ + public source: string; /** - * Creates a new Keyspace instance using the specified properties. + * Creates a new Table instance using the specified properties. * @param [properties] Properties to set - * @returns Keyspace instance + * @returns Table instance */ - public static create(properties?: vtctldata.IKeyspace): vtctldata.Keyspace; + public static create(properties?: vschema.ITable): vschema.Table; /** - * Encodes the specified Keyspace message. Does not implicitly {@link vtctldata.Keyspace.verify|verify} messages. - * @param message Keyspace message or plain object to encode + * Encodes the specified Table message. Does not implicitly {@link vschema.Table.verify|verify} messages. + * @param message Table message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IKeyspace, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vschema.ITable, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified Keyspace message, length delimited. Does not implicitly {@link vtctldata.Keyspace.verify|verify} messages. - * @param message Keyspace message or plain object to encode + * Encodes the specified Table message, length delimited. Does not implicitly {@link vschema.Table.verify|verify} messages. + * @param message Table message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IKeyspace, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vschema.ITable, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a Keyspace message from the specified reader or buffer. + * Decodes a Table message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns Keyspace + * @returns Table * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.Keyspace; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vschema.Table; /** - * Decodes a Keyspace message from the specified reader or buffer, length delimited. + * Decodes a Table message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns Keyspace + * @returns Table * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.Keyspace; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vschema.Table; /** - * Verifies a Keyspace message. + * Verifies a Table message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a Keyspace message from a plain object. Also converts values to their respective internal types. + * Creates a Table message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns Keyspace + * @returns Table */ - public static fromObject(object: { [k: string]: any }): vtctldata.Keyspace; + public static fromObject(object: { [k: string]: any }): vschema.Table; /** - * Creates a plain object from a Keyspace message. Also converts values to other types if specified. - * @param message Keyspace + * Creates a plain object from a Table message. Also converts values to other types if specified. + * @param message Table * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.Keyspace, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vschema.Table, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this Keyspace to JSON. + * Converts this Table to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for Keyspace + * Gets the default type url for Table * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a Shard. */ - interface IShard { + /** Properties of a ColumnVindex. */ + interface IColumnVindex { - /** Shard keyspace */ - keyspace?: (string|null); + /** ColumnVindex column */ + column?: (string|null); - /** Shard name */ + /** ColumnVindex name */ name?: (string|null); - /** Shard shard */ - shard?: (topodata.IShard|null); + /** ColumnVindex columns */ + columns?: (string[]|null); } - /** Represents a Shard. */ - class Shard implements IShard { + /** Represents a ColumnVindex. */ + class ColumnVindex implements IColumnVindex { /** - * Constructs a new Shard. + * Constructs a new ColumnVindex. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IShard); + constructor(properties?: vschema.IColumnVindex); - /** Shard keyspace. */ - public keyspace: string; + /** ColumnVindex column. */ + public column: string; - /** Shard name. */ + /** ColumnVindex name. */ public name: string; - /** Shard shard. */ - public shard?: (topodata.IShard|null); + /** ColumnVindex columns. */ + public columns: string[]; /** - * Creates a new Shard instance using the specified properties. + * Creates a new ColumnVindex instance using the specified properties. * @param [properties] Properties to set - * @returns Shard instance + * @returns ColumnVindex instance */ - public static create(properties?: vtctldata.IShard): vtctldata.Shard; + public static create(properties?: vschema.IColumnVindex): vschema.ColumnVindex; /** - * Encodes the specified Shard message. Does not implicitly {@link vtctldata.Shard.verify|verify} messages. - * @param message Shard message or plain object to encode + * Encodes the specified ColumnVindex message. Does not implicitly {@link vschema.ColumnVindex.verify|verify} messages. + * @param message ColumnVindex message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IShard, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vschema.IColumnVindex, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified Shard message, length delimited. Does not implicitly {@link vtctldata.Shard.verify|verify} messages. - * @param message Shard message or plain object to encode + * Encodes the specified ColumnVindex message, length delimited. Does not implicitly {@link vschema.ColumnVindex.verify|verify} messages. + * @param message ColumnVindex message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IShard, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vschema.IColumnVindex, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a Shard message from the specified reader or buffer. + * Decodes a ColumnVindex message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns Shard + * @returns ColumnVindex * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.Shard; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vschema.ColumnVindex; /** - * Decodes a Shard message from the specified reader or buffer, length delimited. + * Decodes a ColumnVindex message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns Shard + * @returns ColumnVindex * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.Shard; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vschema.ColumnVindex; /** - * Verifies a Shard message. + * Verifies a ColumnVindex message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a Shard message from a plain object. Also converts values to their respective internal types. + * Creates a ColumnVindex message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns Shard + * @returns ColumnVindex */ - public static fromObject(object: { [k: string]: any }): vtctldata.Shard; + public static fromObject(object: { [k: string]: any }): vschema.ColumnVindex; /** - * Creates a plain object from a Shard message. Also converts values to other types if specified. - * @param message Shard + * Creates a plain object from a ColumnVindex message. Also converts values to other types if specified. + * @param message ColumnVindex * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.Shard, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vschema.ColumnVindex, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this Shard to JSON. + * Converts this ColumnVindex to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for Shard + * Gets the default type url for ColumnVindex * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a Workflow. */ - interface IWorkflow { - - /** Workflow name */ - name?: (string|null); - - /** Workflow source */ - source?: (vtctldata.Workflow.IReplicationLocation|null); - - /** Workflow target */ - target?: (vtctldata.Workflow.IReplicationLocation|null); - - /** Workflow max_v_replication_lag */ - max_v_replication_lag?: (number|Long|null); - - /** Workflow shard_streams */ - shard_streams?: ({ [k: string]: vtctldata.Workflow.IShardStream }|null); + /** Properties of an AutoIncrement. */ + interface IAutoIncrement { - /** Workflow workflow_type */ - workflow_type?: (string|null); + /** AutoIncrement column */ + column?: (string|null); - /** Workflow workflow_sub_type */ - workflow_sub_type?: (string|null); + /** AutoIncrement sequence */ + sequence?: (string|null); } - /** Represents a Workflow. */ - class Workflow implements IWorkflow { + /** Represents an AutoIncrement. */ + class AutoIncrement implements IAutoIncrement { /** - * Constructs a new Workflow. + * Constructs a new AutoIncrement. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IWorkflow); - - /** Workflow name. */ - public name: string; - - /** Workflow source. */ - public source?: (vtctldata.Workflow.IReplicationLocation|null); - - /** Workflow target. */ - public target?: (vtctldata.Workflow.IReplicationLocation|null); - - /** Workflow max_v_replication_lag. */ - public max_v_replication_lag: (number|Long); - - /** Workflow shard_streams. */ - public shard_streams: { [k: string]: vtctldata.Workflow.IShardStream }; + constructor(properties?: vschema.IAutoIncrement); - /** Workflow workflow_type. */ - public workflow_type: string; + /** AutoIncrement column. */ + public column: string; - /** Workflow workflow_sub_type. */ - public workflow_sub_type: string; + /** AutoIncrement sequence. */ + public sequence: string; /** - * Creates a new Workflow instance using the specified properties. + * Creates a new AutoIncrement instance using the specified properties. * @param [properties] Properties to set - * @returns Workflow instance + * @returns AutoIncrement instance */ - public static create(properties?: vtctldata.IWorkflow): vtctldata.Workflow; + public static create(properties?: vschema.IAutoIncrement): vschema.AutoIncrement; /** - * Encodes the specified Workflow message. Does not implicitly {@link vtctldata.Workflow.verify|verify} messages. - * @param message Workflow message or plain object to encode + * Encodes the specified AutoIncrement message. Does not implicitly {@link vschema.AutoIncrement.verify|verify} messages. + * @param message AutoIncrement message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IWorkflow, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vschema.IAutoIncrement, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified Workflow message, length delimited. Does not implicitly {@link vtctldata.Workflow.verify|verify} messages. - * @param message Workflow message or plain object to encode + * Encodes the specified AutoIncrement message, length delimited. Does not implicitly {@link vschema.AutoIncrement.verify|verify} messages. + * @param message AutoIncrement message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IWorkflow, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vschema.IAutoIncrement, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a Workflow message from the specified reader or buffer. + * Decodes an AutoIncrement message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns Workflow + * @returns AutoIncrement * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.Workflow; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vschema.AutoIncrement; /** - * Decodes a Workflow message from the specified reader or buffer, length delimited. + * Decodes an AutoIncrement message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns Workflow + * @returns AutoIncrement * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.Workflow; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vschema.AutoIncrement; /** - * Verifies a Workflow message. + * Verifies an AutoIncrement message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a Workflow message from a plain object. Also converts values to their respective internal types. + * Creates an AutoIncrement message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns Workflow + * @returns AutoIncrement */ - public static fromObject(object: { [k: string]: any }): vtctldata.Workflow; + public static fromObject(object: { [k: string]: any }): vschema.AutoIncrement; /** - * Creates a plain object from a Workflow message. Also converts values to other types if specified. - * @param message Workflow + * Creates a plain object from an AutoIncrement message. Also converts values to other types if specified. + * @param message AutoIncrement * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.Workflow, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vschema.AutoIncrement, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this Workflow to JSON. + * Converts this AutoIncrement to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for Workflow + * Gets the default type url for AutoIncrement * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - namespace Workflow { - - /** Properties of a ReplicationLocation. */ - interface IReplicationLocation { + /** Properties of a Column. */ + interface IColumn { - /** ReplicationLocation keyspace */ - keyspace?: (string|null); + /** Column name */ + name?: (string|null); - /** ReplicationLocation shards */ - shards?: (string[]|null); - } + /** Column type */ + type?: (query.Type|null); + } - /** Represents a ReplicationLocation. */ - class ReplicationLocation implements IReplicationLocation { + /** Represents a Column. */ + class Column implements IColumn { - /** - * Constructs a new ReplicationLocation. - * @param [properties] Properties to set - */ - constructor(properties?: vtctldata.Workflow.IReplicationLocation); + /** + * Constructs a new Column. + * @param [properties] Properties to set + */ + constructor(properties?: vschema.IColumn); - /** ReplicationLocation keyspace. */ - public keyspace: string; + /** Column name. */ + public name: string; - /** ReplicationLocation shards. */ - public shards: string[]; + /** Column type. */ + public type: query.Type; - /** - * Creates a new ReplicationLocation instance using the specified properties. - * @param [properties] Properties to set - * @returns ReplicationLocation instance - */ - public static create(properties?: vtctldata.Workflow.IReplicationLocation): vtctldata.Workflow.ReplicationLocation; + /** + * Creates a new Column instance using the specified properties. + * @param [properties] Properties to set + * @returns Column instance + */ + public static create(properties?: vschema.IColumn): vschema.Column; - /** - * Encodes the specified ReplicationLocation message. Does not implicitly {@link vtctldata.Workflow.ReplicationLocation.verify|verify} messages. - * @param message ReplicationLocation message or plain object to encode - * @param [writer] Writer to encode to - * @returns Writer - */ - public static encode(message: vtctldata.Workflow.IReplicationLocation, writer?: $protobuf.Writer): $protobuf.Writer; + /** + * Encodes the specified Column message. Does not implicitly {@link vschema.Column.verify|verify} messages. + * @param message Column message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vschema.IColumn, writer?: $protobuf.Writer): $protobuf.Writer; - /** - * Encodes the specified ReplicationLocation message, length delimited. Does not implicitly {@link vtctldata.Workflow.ReplicationLocation.verify|verify} messages. - * @param message ReplicationLocation message or plain object to encode - * @param [writer] Writer to encode to - * @returns Writer - */ - public static encodeDelimited(message: vtctldata.Workflow.IReplicationLocation, writer?: $protobuf.Writer): $protobuf.Writer; + /** + * Encodes the specified Column message, length delimited. Does not implicitly {@link vschema.Column.verify|verify} messages. + * @param message Column message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vschema.IColumn, writer?: $protobuf.Writer): $protobuf.Writer; - /** - * Decodes a ReplicationLocation message from the specified reader or buffer. - * @param reader Reader or buffer to decode from - * @param [length] Message length if known beforehand - * @returns ReplicationLocation - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.Workflow.ReplicationLocation; + /** + * Decodes a Column message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns Column + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vschema.Column; - /** - * Decodes a ReplicationLocation message from the specified reader or buffer, length delimited. - * @param reader Reader or buffer to decode from - * @returns ReplicationLocation - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.Workflow.ReplicationLocation; + /** + * Decodes a Column message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns Column + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vschema.Column; - /** - * Verifies a ReplicationLocation message. - * @param message Plain object to verify - * @returns `null` if valid, otherwise the reason why it is not - */ - public static verify(message: { [k: string]: any }): (string|null); + /** + * Verifies a Column message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); - /** - * Creates a ReplicationLocation message from a plain object. Also converts values to their respective internal types. - * @param object Plain object - * @returns ReplicationLocation - */ - public static fromObject(object: { [k: string]: any }): vtctldata.Workflow.ReplicationLocation; + /** + * Creates a Column message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns Column + */ + public static fromObject(object: { [k: string]: any }): vschema.Column; - /** - * Creates a plain object from a ReplicationLocation message. Also converts values to other types if specified. - * @param message ReplicationLocation - * @param [options] Conversion options - * @returns Plain object - */ - public static toObject(message: vtctldata.Workflow.ReplicationLocation, options?: $protobuf.IConversionOptions): { [k: string]: any }; + /** + * Creates a plain object from a Column message. Also converts values to other types if specified. + * @param message Column + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vschema.Column, options?: $protobuf.IConversionOptions): { [k: string]: any }; - /** - * Converts this ReplicationLocation to JSON. - * @returns JSON object - */ - public toJSON(): { [k: string]: any }; + /** + * Converts this Column to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; - /** - * Gets the default type url for ReplicationLocation - * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns The default type url - */ - public static getTypeUrl(typeUrlPrefix?: string): string; - } + /** + * Gets the default type url for Column + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } - /** Properties of a ShardStream. */ - interface IShardStream { + /** Properties of a SrvVSchema. */ + interface ISrvVSchema { - /** ShardStream streams */ - streams?: (vtctldata.Workflow.IStream[]|null); + /** SrvVSchema keyspaces */ + keyspaces?: ({ [k: string]: vschema.IKeyspace }|null); - /** ShardStream tablet_controls */ - tablet_controls?: (topodata.Shard.ITabletControl[]|null); + /** SrvVSchema routing_rules */ + routing_rules?: (vschema.IRoutingRules|null); - /** ShardStream is_primary_serving */ - is_primary_serving?: (boolean|null); - } + /** SrvVSchema shard_routing_rules */ + shard_routing_rules?: (vschema.IShardRoutingRules|null); + } - /** Represents a ShardStream. */ - class ShardStream implements IShardStream { + /** Represents a SrvVSchema. */ + class SrvVSchema implements ISrvVSchema { - /** - * Constructs a new ShardStream. - * @param [properties] Properties to set - */ - constructor(properties?: vtctldata.Workflow.IShardStream); + /** + * Constructs a new SrvVSchema. + * @param [properties] Properties to set + */ + constructor(properties?: vschema.ISrvVSchema); - /** ShardStream streams. */ - public streams: vtctldata.Workflow.IStream[]; + /** SrvVSchema keyspaces. */ + public keyspaces: { [k: string]: vschema.IKeyspace }; - /** ShardStream tablet_controls. */ - public tablet_controls: topodata.Shard.ITabletControl[]; + /** SrvVSchema routing_rules. */ + public routing_rules?: (vschema.IRoutingRules|null); - /** ShardStream is_primary_serving. */ - public is_primary_serving: boolean; + /** SrvVSchema shard_routing_rules. */ + public shard_routing_rules?: (vschema.IShardRoutingRules|null); - /** - * Creates a new ShardStream instance using the specified properties. - * @param [properties] Properties to set - * @returns ShardStream instance - */ - public static create(properties?: vtctldata.Workflow.IShardStream): vtctldata.Workflow.ShardStream; + /** + * Creates a new SrvVSchema instance using the specified properties. + * @param [properties] Properties to set + * @returns SrvVSchema instance + */ + public static create(properties?: vschema.ISrvVSchema): vschema.SrvVSchema; - /** - * Encodes the specified ShardStream message. Does not implicitly {@link vtctldata.Workflow.ShardStream.verify|verify} messages. - * @param message ShardStream message or plain object to encode - * @param [writer] Writer to encode to - * @returns Writer - */ - public static encode(message: vtctldata.Workflow.IShardStream, writer?: $protobuf.Writer): $protobuf.Writer; + /** + * Encodes the specified SrvVSchema message. Does not implicitly {@link vschema.SrvVSchema.verify|verify} messages. + * @param message SrvVSchema message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vschema.ISrvVSchema, writer?: $protobuf.Writer): $protobuf.Writer; - /** - * Encodes the specified ShardStream message, length delimited. Does not implicitly {@link vtctldata.Workflow.ShardStream.verify|verify} messages. - * @param message ShardStream message or plain object to encode - * @param [writer] Writer to encode to - * @returns Writer - */ - public static encodeDelimited(message: vtctldata.Workflow.IShardStream, writer?: $protobuf.Writer): $protobuf.Writer; + /** + * Encodes the specified SrvVSchema message, length delimited. Does not implicitly {@link vschema.SrvVSchema.verify|verify} messages. + * @param message SrvVSchema message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vschema.ISrvVSchema, writer?: $protobuf.Writer): $protobuf.Writer; - /** - * Decodes a ShardStream message from the specified reader or buffer. - * @param reader Reader or buffer to decode from - * @param [length] Message length if known beforehand - * @returns ShardStream - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.Workflow.ShardStream; + /** + * Decodes a SrvVSchema message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns SrvVSchema + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vschema.SrvVSchema; - /** - * Decodes a ShardStream message from the specified reader or buffer, length delimited. - * @param reader Reader or buffer to decode from - * @returns ShardStream - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.Workflow.ShardStream; + /** + * Decodes a SrvVSchema message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns SrvVSchema + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vschema.SrvVSchema; - /** - * Verifies a ShardStream message. - * @param message Plain object to verify - * @returns `null` if valid, otherwise the reason why it is not - */ - public static verify(message: { [k: string]: any }): (string|null); + /** + * Verifies a SrvVSchema message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); - /** - * Creates a ShardStream message from a plain object. Also converts values to their respective internal types. - * @param object Plain object - * @returns ShardStream - */ - public static fromObject(object: { [k: string]: any }): vtctldata.Workflow.ShardStream; + /** + * Creates a SrvVSchema message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns SrvVSchema + */ + public static fromObject(object: { [k: string]: any }): vschema.SrvVSchema; - /** - * Creates a plain object from a ShardStream message. Also converts values to other types if specified. - * @param message ShardStream - * @param [options] Conversion options - * @returns Plain object - */ - public static toObject(message: vtctldata.Workflow.ShardStream, options?: $protobuf.IConversionOptions): { [k: string]: any }; + /** + * Creates a plain object from a SrvVSchema message. Also converts values to other types if specified. + * @param message SrvVSchema + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vschema.SrvVSchema, options?: $protobuf.IConversionOptions): { [k: string]: any }; - /** - * Converts this ShardStream to JSON. - * @returns JSON object - */ - public toJSON(): { [k: string]: any }; + /** + * Converts this SrvVSchema to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; - /** - * Gets the default type url for ShardStream - * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns The default type url - */ - public static getTypeUrl(typeUrlPrefix?: string): string; - } + /** + * Gets the default type url for SrvVSchema + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } - /** Properties of a Stream. */ - interface IStream { + /** Properties of a ShardRoutingRules. */ + interface IShardRoutingRules { - /** Stream id */ - id?: (number|Long|null); + /** ShardRoutingRules rules */ + rules?: (vschema.IShardRoutingRule[]|null); + } - /** Stream shard */ - shard?: (string|null); + /** Represents a ShardRoutingRules. */ + class ShardRoutingRules implements IShardRoutingRules { - /** Stream tablet */ - tablet?: (topodata.ITabletAlias|null); + /** + * Constructs a new ShardRoutingRules. + * @param [properties] Properties to set + */ + constructor(properties?: vschema.IShardRoutingRules); - /** Stream binlog_source */ - binlog_source?: (binlogdata.IBinlogSource|null); + /** ShardRoutingRules rules. */ + public rules: vschema.IShardRoutingRule[]; - /** Stream position */ - position?: (string|null); + /** + * Creates a new ShardRoutingRules instance using the specified properties. + * @param [properties] Properties to set + * @returns ShardRoutingRules instance + */ + public static create(properties?: vschema.IShardRoutingRules): vschema.ShardRoutingRules; - /** Stream stop_position */ - stop_position?: (string|null); + /** + * Encodes the specified ShardRoutingRules message. Does not implicitly {@link vschema.ShardRoutingRules.verify|verify} messages. + * @param message ShardRoutingRules message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vschema.IShardRoutingRules, writer?: $protobuf.Writer): $protobuf.Writer; - /** Stream state */ - state?: (string|null); + /** + * Encodes the specified ShardRoutingRules message, length delimited. Does not implicitly {@link vschema.ShardRoutingRules.verify|verify} messages. + * @param message ShardRoutingRules message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vschema.IShardRoutingRules, writer?: $protobuf.Writer): $protobuf.Writer; - /** Stream db_name */ - db_name?: (string|null); + /** + * Decodes a ShardRoutingRules message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ShardRoutingRules + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vschema.ShardRoutingRules; - /** Stream transaction_timestamp */ - transaction_timestamp?: (vttime.ITime|null); + /** + * Decodes a ShardRoutingRules message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ShardRoutingRules + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vschema.ShardRoutingRules; - /** Stream time_updated */ - time_updated?: (vttime.ITime|null); + /** + * Verifies a ShardRoutingRules message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); - /** Stream message */ - message?: (string|null); + /** + * Creates a ShardRoutingRules message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ShardRoutingRules + */ + public static fromObject(object: { [k: string]: any }): vschema.ShardRoutingRules; - /** Stream copy_states */ - copy_states?: (vtctldata.Workflow.Stream.ICopyState[]|null); + /** + * Creates a plain object from a ShardRoutingRules message. Also converts values to other types if specified. + * @param message ShardRoutingRules + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vschema.ShardRoutingRules, options?: $protobuf.IConversionOptions): { [k: string]: any }; - /** Stream logs */ - logs?: (vtctldata.Workflow.Stream.ILog[]|null); + /** + * Converts this ShardRoutingRules to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; - /** Stream log_fetch_error */ - log_fetch_error?: (string|null); + /** + * Gets the default type url for ShardRoutingRules + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } - /** Stream tags */ - tags?: (string[]|null); - } + /** Properties of a ShardRoutingRule. */ + interface IShardRoutingRule { - /** Represents a Stream. */ - class Stream implements IStream { + /** ShardRoutingRule from_keyspace */ + from_keyspace?: (string|null); - /** - * Constructs a new Stream. - * @param [properties] Properties to set - */ - constructor(properties?: vtctldata.Workflow.IStream); + /** ShardRoutingRule to_keyspace */ + to_keyspace?: (string|null); - /** Stream id. */ - public id: (number|Long); + /** ShardRoutingRule shard */ + shard?: (string|null); + } + + /** Represents a ShardRoutingRule. */ + class ShardRoutingRule implements IShardRoutingRule { + + /** + * Constructs a new ShardRoutingRule. + * @param [properties] Properties to set + */ + constructor(properties?: vschema.IShardRoutingRule); + + /** ShardRoutingRule from_keyspace. */ + public from_keyspace: string; + + /** ShardRoutingRule to_keyspace. */ + public to_keyspace: string; + + /** ShardRoutingRule shard. */ + public shard: string; + + /** + * Creates a new ShardRoutingRule instance using the specified properties. + * @param [properties] Properties to set + * @returns ShardRoutingRule instance + */ + public static create(properties?: vschema.IShardRoutingRule): vschema.ShardRoutingRule; + + /** + * Encodes the specified ShardRoutingRule message. Does not implicitly {@link vschema.ShardRoutingRule.verify|verify} messages. + * @param message ShardRoutingRule message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vschema.IShardRoutingRule, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ShardRoutingRule message, length delimited. Does not implicitly {@link vschema.ShardRoutingRule.verify|verify} messages. + * @param message ShardRoutingRule message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vschema.IShardRoutingRule, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a ShardRoutingRule message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ShardRoutingRule + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vschema.ShardRoutingRule; + + /** + * Decodes a ShardRoutingRule message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ShardRoutingRule + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vschema.ShardRoutingRule; + + /** + * Verifies a ShardRoutingRule message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a ShardRoutingRule message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ShardRoutingRule + */ + public static fromObject(object: { [k: string]: any }): vschema.ShardRoutingRule; + + /** + * Creates a plain object from a ShardRoutingRule message. Also converts values to other types if specified. + * @param message ShardRoutingRule + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vschema.ShardRoutingRule, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ShardRoutingRule to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ShardRoutingRule + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } +} + +/** Namespace vtctldata. */ +export namespace vtctldata { + + /** Properties of an ExecuteVtctlCommandRequest. */ + interface IExecuteVtctlCommandRequest { + + /** ExecuteVtctlCommandRequest args */ + args?: (string[]|null); + + /** ExecuteVtctlCommandRequest action_timeout */ + action_timeout?: (number|Long|null); + } + + /** Represents an ExecuteVtctlCommandRequest. */ + class ExecuteVtctlCommandRequest implements IExecuteVtctlCommandRequest { + + /** + * Constructs a new ExecuteVtctlCommandRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IExecuteVtctlCommandRequest); + + /** ExecuteVtctlCommandRequest args. */ + public args: string[]; + + /** ExecuteVtctlCommandRequest action_timeout. */ + public action_timeout: (number|Long); + + /** + * Creates a new ExecuteVtctlCommandRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns ExecuteVtctlCommandRequest instance + */ + public static create(properties?: vtctldata.IExecuteVtctlCommandRequest): vtctldata.ExecuteVtctlCommandRequest; + + /** + * Encodes the specified ExecuteVtctlCommandRequest message. Does not implicitly {@link vtctldata.ExecuteVtctlCommandRequest.verify|verify} messages. + * @param message ExecuteVtctlCommandRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IExecuteVtctlCommandRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ExecuteVtctlCommandRequest message, length delimited. Does not implicitly {@link vtctldata.ExecuteVtctlCommandRequest.verify|verify} messages. + * @param message ExecuteVtctlCommandRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IExecuteVtctlCommandRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes an ExecuteVtctlCommandRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ExecuteVtctlCommandRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ExecuteVtctlCommandRequest; + + /** + * Decodes an ExecuteVtctlCommandRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ExecuteVtctlCommandRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ExecuteVtctlCommandRequest; + + /** + * Verifies an ExecuteVtctlCommandRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates an ExecuteVtctlCommandRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ExecuteVtctlCommandRequest + */ + public static fromObject(object: { [k: string]: any }): vtctldata.ExecuteVtctlCommandRequest; + + /** + * Creates a plain object from an ExecuteVtctlCommandRequest message. Also converts values to other types if specified. + * @param message ExecuteVtctlCommandRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.ExecuteVtctlCommandRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ExecuteVtctlCommandRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ExecuteVtctlCommandRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of an ExecuteVtctlCommandResponse. */ + interface IExecuteVtctlCommandResponse { + + /** ExecuteVtctlCommandResponse event */ + event?: (logutil.IEvent|null); + } + + /** Represents an ExecuteVtctlCommandResponse. */ + class ExecuteVtctlCommandResponse implements IExecuteVtctlCommandResponse { + + /** + * Constructs a new ExecuteVtctlCommandResponse. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IExecuteVtctlCommandResponse); + + /** ExecuteVtctlCommandResponse event. */ + public event?: (logutil.IEvent|null); + + /** + * Creates a new ExecuteVtctlCommandResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns ExecuteVtctlCommandResponse instance + */ + public static create(properties?: vtctldata.IExecuteVtctlCommandResponse): vtctldata.ExecuteVtctlCommandResponse; + + /** + * Encodes the specified ExecuteVtctlCommandResponse message. Does not implicitly {@link vtctldata.ExecuteVtctlCommandResponse.verify|verify} messages. + * @param message ExecuteVtctlCommandResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IExecuteVtctlCommandResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ExecuteVtctlCommandResponse message, length delimited. Does not implicitly {@link vtctldata.ExecuteVtctlCommandResponse.verify|verify} messages. + * @param message ExecuteVtctlCommandResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IExecuteVtctlCommandResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes an ExecuteVtctlCommandResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ExecuteVtctlCommandResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ExecuteVtctlCommandResponse; + + /** + * Decodes an ExecuteVtctlCommandResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ExecuteVtctlCommandResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ExecuteVtctlCommandResponse; + + /** + * Verifies an ExecuteVtctlCommandResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates an ExecuteVtctlCommandResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ExecuteVtctlCommandResponse + */ + public static fromObject(object: { [k: string]: any }): vtctldata.ExecuteVtctlCommandResponse; + + /** + * Creates a plain object from an ExecuteVtctlCommandResponse message. Also converts values to other types if specified. + * @param message ExecuteVtctlCommandResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.ExecuteVtctlCommandResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ExecuteVtctlCommandResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ExecuteVtctlCommandResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** MaterializationIntent enum. */ + enum MaterializationIntent { + CUSTOM = 0, + MOVETABLES = 1, + CREATELOOKUPINDEX = 2 + } + + /** Properties of a TableMaterializeSettings. */ + interface ITableMaterializeSettings { + + /** TableMaterializeSettings target_table */ + target_table?: (string|null); + + /** TableMaterializeSettings source_expression */ + source_expression?: (string|null); + + /** TableMaterializeSettings create_ddl */ + create_ddl?: (string|null); + } + + /** Represents a TableMaterializeSettings. */ + class TableMaterializeSettings implements ITableMaterializeSettings { + + /** + * Constructs a new TableMaterializeSettings. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.ITableMaterializeSettings); + + /** TableMaterializeSettings target_table. */ + public target_table: string; + + /** TableMaterializeSettings source_expression. */ + public source_expression: string; + + /** TableMaterializeSettings create_ddl. */ + public create_ddl: string; + + /** + * Creates a new TableMaterializeSettings instance using the specified properties. + * @param [properties] Properties to set + * @returns TableMaterializeSettings instance + */ + public static create(properties?: vtctldata.ITableMaterializeSettings): vtctldata.TableMaterializeSettings; + + /** + * Encodes the specified TableMaterializeSettings message. Does not implicitly {@link vtctldata.TableMaterializeSettings.verify|verify} messages. + * @param message TableMaterializeSettings message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.ITableMaterializeSettings, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified TableMaterializeSettings message, length delimited. Does not implicitly {@link vtctldata.TableMaterializeSettings.verify|verify} messages. + * @param message TableMaterializeSettings message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.ITableMaterializeSettings, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a TableMaterializeSettings message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns TableMaterializeSettings + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.TableMaterializeSettings; + + /** + * Decodes a TableMaterializeSettings message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns TableMaterializeSettings + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.TableMaterializeSettings; + + /** + * Verifies a TableMaterializeSettings message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a TableMaterializeSettings message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns TableMaterializeSettings + */ + public static fromObject(object: { [k: string]: any }): vtctldata.TableMaterializeSettings; + + /** + * Creates a plain object from a TableMaterializeSettings message. Also converts values to other types if specified. + * @param message TableMaterializeSettings + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.TableMaterializeSettings, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this TableMaterializeSettings to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for TableMaterializeSettings + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a MaterializeSettings. */ + interface IMaterializeSettings { + + /** MaterializeSettings workflow */ + workflow?: (string|null); + + /** MaterializeSettings source_keyspace */ + source_keyspace?: (string|null); + + /** MaterializeSettings target_keyspace */ + target_keyspace?: (string|null); + + /** MaterializeSettings stop_after_copy */ + stop_after_copy?: (boolean|null); + + /** MaterializeSettings table_settings */ + table_settings?: (vtctldata.ITableMaterializeSettings[]|null); + + /** MaterializeSettings cell */ + cell?: (string|null); + + /** MaterializeSettings tablet_types */ + tablet_types?: (string|null); + + /** MaterializeSettings external_cluster */ + external_cluster?: (string|null); + + /** MaterializeSettings materialization_intent */ + materialization_intent?: (vtctldata.MaterializationIntent|null); + + /** MaterializeSettings source_time_zone */ + source_time_zone?: (string|null); + + /** MaterializeSettings target_time_zone */ + target_time_zone?: (string|null); + + /** MaterializeSettings source_shards */ + source_shards?: (string[]|null); + + /** MaterializeSettings on_ddl */ + on_ddl?: (string|null); + + /** MaterializeSettings defer_secondary_keys */ + defer_secondary_keys?: (boolean|null); + + /** MaterializeSettings tablet_selection_preference */ + tablet_selection_preference?: (tabletmanagerdata.TabletSelectionPreference|null); + + /** MaterializeSettings atomic_copy */ + atomic_copy?: (boolean|null); + } + + /** Represents a MaterializeSettings. */ + class MaterializeSettings implements IMaterializeSettings { + + /** + * Constructs a new MaterializeSettings. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IMaterializeSettings); + + /** MaterializeSettings workflow. */ + public workflow: string; + + /** MaterializeSettings source_keyspace. */ + public source_keyspace: string; + + /** MaterializeSettings target_keyspace. */ + public target_keyspace: string; + + /** MaterializeSettings stop_after_copy. */ + public stop_after_copy: boolean; + + /** MaterializeSettings table_settings. */ + public table_settings: vtctldata.ITableMaterializeSettings[]; + + /** MaterializeSettings cell. */ + public cell: string; + + /** MaterializeSettings tablet_types. */ + public tablet_types: string; + + /** MaterializeSettings external_cluster. */ + public external_cluster: string; + + /** MaterializeSettings materialization_intent. */ + public materialization_intent: vtctldata.MaterializationIntent; + + /** MaterializeSettings source_time_zone. */ + public source_time_zone: string; + + /** MaterializeSettings target_time_zone. */ + public target_time_zone: string; + + /** MaterializeSettings source_shards. */ + public source_shards: string[]; + + /** MaterializeSettings on_ddl. */ + public on_ddl: string; + + /** MaterializeSettings defer_secondary_keys. */ + public defer_secondary_keys: boolean; + + /** MaterializeSettings tablet_selection_preference. */ + public tablet_selection_preference: tabletmanagerdata.TabletSelectionPreference; + + /** MaterializeSettings atomic_copy. */ + public atomic_copy: boolean; + + /** + * Creates a new MaterializeSettings instance using the specified properties. + * @param [properties] Properties to set + * @returns MaterializeSettings instance + */ + public static create(properties?: vtctldata.IMaterializeSettings): vtctldata.MaterializeSettings; + + /** + * Encodes the specified MaterializeSettings message. Does not implicitly {@link vtctldata.MaterializeSettings.verify|verify} messages. + * @param message MaterializeSettings message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IMaterializeSettings, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified MaterializeSettings message, length delimited. Does not implicitly {@link vtctldata.MaterializeSettings.verify|verify} messages. + * @param message MaterializeSettings message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IMaterializeSettings, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a MaterializeSettings message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns MaterializeSettings + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.MaterializeSettings; + + /** + * Decodes a MaterializeSettings message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns MaterializeSettings + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.MaterializeSettings; + + /** + * Verifies a MaterializeSettings message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a MaterializeSettings message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns MaterializeSettings + */ + public static fromObject(object: { [k: string]: any }): vtctldata.MaterializeSettings; + + /** + * Creates a plain object from a MaterializeSettings message. Also converts values to other types if specified. + * @param message MaterializeSettings + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.MaterializeSettings, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this MaterializeSettings to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for MaterializeSettings + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a Keyspace. */ + interface IKeyspace { + + /** Keyspace name */ + name?: (string|null); + + /** Keyspace keyspace */ + keyspace?: (topodata.IKeyspace|null); + } + + /** Represents a Keyspace. */ + class Keyspace implements IKeyspace { + + /** + * Constructs a new Keyspace. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IKeyspace); + + /** Keyspace name. */ + public name: string; + + /** Keyspace keyspace. */ + public keyspace?: (topodata.IKeyspace|null); + + /** + * Creates a new Keyspace instance using the specified properties. + * @param [properties] Properties to set + * @returns Keyspace instance + */ + public static create(properties?: vtctldata.IKeyspace): vtctldata.Keyspace; + + /** + * Encodes the specified Keyspace message. Does not implicitly {@link vtctldata.Keyspace.verify|verify} messages. + * @param message Keyspace message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IKeyspace, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified Keyspace message, length delimited. Does not implicitly {@link vtctldata.Keyspace.verify|verify} messages. + * @param message Keyspace message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IKeyspace, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a Keyspace message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns Keyspace + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.Keyspace; + + /** + * Decodes a Keyspace message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns Keyspace + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.Keyspace; + + /** + * Verifies a Keyspace message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a Keyspace message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns Keyspace + */ + public static fromObject(object: { [k: string]: any }): vtctldata.Keyspace; + + /** + * Creates a plain object from a Keyspace message. Also converts values to other types if specified. + * @param message Keyspace + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.Keyspace, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this Keyspace to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for Keyspace + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** QueryOrdering enum. */ + enum QueryOrdering { + NONE = 0, + ASCENDING = 1, + DESCENDING = 2 + } + + /** Properties of a SchemaMigration. */ + interface ISchemaMigration { + + /** SchemaMigration uuid */ + uuid?: (string|null); + + /** SchemaMigration keyspace */ + keyspace?: (string|null); + + /** SchemaMigration shard */ + shard?: (string|null); + + /** SchemaMigration schema */ + schema?: (string|null); + + /** SchemaMigration table */ + table?: (string|null); + + /** SchemaMigration migration_statement */ + migration_statement?: (string|null); + + /** SchemaMigration strategy */ + strategy?: (vtctldata.SchemaMigration.Strategy|null); + + /** SchemaMigration options */ + options?: (string|null); + + /** SchemaMigration added_at */ + added_at?: (vttime.ITime|null); + + /** SchemaMigration requested_at */ + requested_at?: (vttime.ITime|null); + + /** SchemaMigration ready_at */ + ready_at?: (vttime.ITime|null); + + /** SchemaMigration started_at */ + started_at?: (vttime.ITime|null); + + /** SchemaMigration liveness_timestamp */ + liveness_timestamp?: (vttime.ITime|null); + + /** SchemaMigration completed_at */ + completed_at?: (vttime.ITime|null); + + /** SchemaMigration cleaned_up_at */ + cleaned_up_at?: (vttime.ITime|null); + + /** SchemaMigration status */ + status?: (vtctldata.SchemaMigration.Status|null); + + /** SchemaMigration log_path */ + log_path?: (string|null); + + /** SchemaMigration artifacts */ + artifacts?: (string|null); + + /** SchemaMigration retries */ + retries?: (number|Long|null); + + /** SchemaMigration tablet */ + tablet?: (topodata.ITabletAlias|null); + + /** SchemaMigration tablet_failure */ + tablet_failure?: (boolean|null); + + /** SchemaMigration progress */ + progress?: (number|null); + + /** SchemaMigration migration_context */ + migration_context?: (string|null); + + /** SchemaMigration ddl_action */ + ddl_action?: (string|null); + + /** SchemaMigration message */ + message?: (string|null); + + /** SchemaMigration eta_seconds */ + eta_seconds?: (number|Long|null); + + /** SchemaMigration rows_copied */ + rows_copied?: (number|Long|null); + + /** SchemaMigration table_rows */ + table_rows?: (number|Long|null); + + /** SchemaMigration added_unique_keys */ + added_unique_keys?: (number|null); + + /** SchemaMigration removed_unique_keys */ + removed_unique_keys?: (number|null); + + /** SchemaMigration log_file */ + log_file?: (string|null); + + /** SchemaMigration artifact_retention */ + artifact_retention?: (vttime.IDuration|null); + + /** SchemaMigration postpone_completion */ + postpone_completion?: (boolean|null); + + /** SchemaMigration removed_unique_key_names */ + removed_unique_key_names?: (string|null); + + /** SchemaMigration dropped_no_default_column_names */ + dropped_no_default_column_names?: (string|null); + + /** SchemaMigration expanded_column_names */ + expanded_column_names?: (string|null); + + /** SchemaMigration revertible_notes */ + revertible_notes?: (string|null); + + /** SchemaMigration allow_concurrent */ + allow_concurrent?: (boolean|null); + + /** SchemaMigration reverted_uuid */ + reverted_uuid?: (string|null); + + /** SchemaMigration is_view */ + is_view?: (boolean|null); + + /** SchemaMigration ready_to_complete */ + ready_to_complete?: (boolean|null); + + /** SchemaMigration vitess_liveness_indicator */ + vitess_liveness_indicator?: (number|Long|null); + + /** SchemaMigration user_throttle_ratio */ + user_throttle_ratio?: (number|null); + + /** SchemaMigration special_plan */ + special_plan?: (string|null); + + /** SchemaMigration last_throttled_at */ + last_throttled_at?: (vttime.ITime|null); + + /** SchemaMigration component_throttled */ + component_throttled?: (string|null); + + /** SchemaMigration cancelled_at */ + cancelled_at?: (vttime.ITime|null); + + /** SchemaMigration postpone_launch */ + postpone_launch?: (boolean|null); + + /** SchemaMigration stage */ + stage?: (string|null); + + /** SchemaMigration cutover_attempts */ + cutover_attempts?: (number|null); + + /** SchemaMigration is_immediate_operation */ + is_immediate_operation?: (boolean|null); + + /** SchemaMigration reviewed_at */ + reviewed_at?: (vttime.ITime|null); + + /** SchemaMigration ready_to_complete_at */ + ready_to_complete_at?: (vttime.ITime|null); + } + + /** Represents a SchemaMigration. */ + class SchemaMigration implements ISchemaMigration { + + /** + * Constructs a new SchemaMigration. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.ISchemaMigration); + + /** SchemaMigration uuid. */ + public uuid: string; + + /** SchemaMigration keyspace. */ + public keyspace: string; + + /** SchemaMigration shard. */ + public shard: string; + + /** SchemaMigration schema. */ + public schema: string; + + /** SchemaMigration table. */ + public table: string; + + /** SchemaMigration migration_statement. */ + public migration_statement: string; + + /** SchemaMigration strategy. */ + public strategy: vtctldata.SchemaMigration.Strategy; + + /** SchemaMigration options. */ + public options: string; + + /** SchemaMigration added_at. */ + public added_at?: (vttime.ITime|null); + + /** SchemaMigration requested_at. */ + public requested_at?: (vttime.ITime|null); + + /** SchemaMigration ready_at. */ + public ready_at?: (vttime.ITime|null); + + /** SchemaMigration started_at. */ + public started_at?: (vttime.ITime|null); + + /** SchemaMigration liveness_timestamp. */ + public liveness_timestamp?: (vttime.ITime|null); + + /** SchemaMigration completed_at. */ + public completed_at?: (vttime.ITime|null); + + /** SchemaMigration cleaned_up_at. */ + public cleaned_up_at?: (vttime.ITime|null); + + /** SchemaMigration status. */ + public status: vtctldata.SchemaMigration.Status; + + /** SchemaMigration log_path. */ + public log_path: string; + + /** SchemaMigration artifacts. */ + public artifacts: string; + + /** SchemaMigration retries. */ + public retries: (number|Long); + + /** SchemaMigration tablet. */ + public tablet?: (topodata.ITabletAlias|null); + + /** SchemaMigration tablet_failure. */ + public tablet_failure: boolean; + + /** SchemaMigration progress. */ + public progress: number; + + /** SchemaMigration migration_context. */ + public migration_context: string; + + /** SchemaMigration ddl_action. */ + public ddl_action: string; + + /** SchemaMigration message. */ + public message: string; + + /** SchemaMigration eta_seconds. */ + public eta_seconds: (number|Long); + + /** SchemaMigration rows_copied. */ + public rows_copied: (number|Long); + + /** SchemaMigration table_rows. */ + public table_rows: (number|Long); + + /** SchemaMigration added_unique_keys. */ + public added_unique_keys: number; + + /** SchemaMigration removed_unique_keys. */ + public removed_unique_keys: number; + + /** SchemaMigration log_file. */ + public log_file: string; + + /** SchemaMigration artifact_retention. */ + public artifact_retention?: (vttime.IDuration|null); + + /** SchemaMigration postpone_completion. */ + public postpone_completion: boolean; + + /** SchemaMigration removed_unique_key_names. */ + public removed_unique_key_names: string; + + /** SchemaMigration dropped_no_default_column_names. */ + public dropped_no_default_column_names: string; + + /** SchemaMigration expanded_column_names. */ + public expanded_column_names: string; + + /** SchemaMigration revertible_notes. */ + public revertible_notes: string; + + /** SchemaMigration allow_concurrent. */ + public allow_concurrent: boolean; + + /** SchemaMigration reverted_uuid. */ + public reverted_uuid: string; + + /** SchemaMigration is_view. */ + public is_view: boolean; + + /** SchemaMigration ready_to_complete. */ + public ready_to_complete: boolean; + + /** SchemaMigration vitess_liveness_indicator. */ + public vitess_liveness_indicator: (number|Long); + + /** SchemaMigration user_throttle_ratio. */ + public user_throttle_ratio: number; + + /** SchemaMigration special_plan. */ + public special_plan: string; + + /** SchemaMigration last_throttled_at. */ + public last_throttled_at?: (vttime.ITime|null); + + /** SchemaMigration component_throttled. */ + public component_throttled: string; + + /** SchemaMigration cancelled_at. */ + public cancelled_at?: (vttime.ITime|null); + + /** SchemaMigration postpone_launch. */ + public postpone_launch: boolean; + + /** SchemaMigration stage. */ + public stage: string; + + /** SchemaMigration cutover_attempts. */ + public cutover_attempts: number; + + /** SchemaMigration is_immediate_operation. */ + public is_immediate_operation: boolean; + + /** SchemaMigration reviewed_at. */ + public reviewed_at?: (vttime.ITime|null); + + /** SchemaMigration ready_to_complete_at. */ + public ready_to_complete_at?: (vttime.ITime|null); + + /** + * Creates a new SchemaMigration instance using the specified properties. + * @param [properties] Properties to set + * @returns SchemaMigration instance + */ + public static create(properties?: vtctldata.ISchemaMigration): vtctldata.SchemaMigration; + + /** + * Encodes the specified SchemaMigration message. Does not implicitly {@link vtctldata.SchemaMigration.verify|verify} messages. + * @param message SchemaMigration message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.ISchemaMigration, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified SchemaMigration message, length delimited. Does not implicitly {@link vtctldata.SchemaMigration.verify|verify} messages. + * @param message SchemaMigration message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.ISchemaMigration, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a SchemaMigration message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns SchemaMigration + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SchemaMigration; + + /** + * Decodes a SchemaMigration message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns SchemaMigration + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SchemaMigration; + + /** + * Verifies a SchemaMigration message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a SchemaMigration message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns SchemaMigration + */ + public static fromObject(object: { [k: string]: any }): vtctldata.SchemaMigration; + + /** + * Creates a plain object from a SchemaMigration message. Also converts values to other types if specified. + * @param message SchemaMigration + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.SchemaMigration, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this SchemaMigration to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for SchemaMigration + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + namespace SchemaMigration { + + /** Strategy enum. */ + enum Strategy { + VITESS = 0, + ONLINE = 0, + GHOST = 1, + PTOSC = 2, + DIRECT = 3, + MYSQL = 4 + } + + /** Status enum. */ + enum Status { + UNKNOWN = 0, + REQUESTED = 1, + CANCELLED = 2, + QUEUED = 3, + READY = 4, + RUNNING = 5, + COMPLETE = 6, + FAILED = 7 + } + } + + /** Properties of a Shard. */ + interface IShard { + + /** Shard keyspace */ + keyspace?: (string|null); + + /** Shard name */ + name?: (string|null); + + /** Shard shard */ + shard?: (topodata.IShard|null); + } + + /** Represents a Shard. */ + class Shard implements IShard { + + /** + * Constructs a new Shard. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IShard); + + /** Shard keyspace. */ + public keyspace: string; + + /** Shard name. */ + public name: string; + + /** Shard shard. */ + public shard?: (topodata.IShard|null); + + /** + * Creates a new Shard instance using the specified properties. + * @param [properties] Properties to set + * @returns Shard instance + */ + public static create(properties?: vtctldata.IShard): vtctldata.Shard; + + /** + * Encodes the specified Shard message. Does not implicitly {@link vtctldata.Shard.verify|verify} messages. + * @param message Shard message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IShard, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified Shard message, length delimited. Does not implicitly {@link vtctldata.Shard.verify|verify} messages. + * @param message Shard message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IShard, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a Shard message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns Shard + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.Shard; + + /** + * Decodes a Shard message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns Shard + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.Shard; + + /** + * Verifies a Shard message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a Shard message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns Shard + */ + public static fromObject(object: { [k: string]: any }): vtctldata.Shard; + + /** + * Creates a plain object from a Shard message. Also converts values to other types if specified. + * @param message Shard + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.Shard, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this Shard to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for Shard + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a Workflow. */ + interface IWorkflow { + + /** Workflow name */ + name?: (string|null); + + /** Workflow source */ + source?: (vtctldata.Workflow.IReplicationLocation|null); + + /** Workflow target */ + target?: (vtctldata.Workflow.IReplicationLocation|null); + + /** Workflow max_v_replication_lag */ + max_v_replication_lag?: (number|Long|null); + + /** Workflow shard_streams */ + shard_streams?: ({ [k: string]: vtctldata.Workflow.IShardStream }|null); + + /** Workflow workflow_type */ + workflow_type?: (string|null); + + /** Workflow workflow_sub_type */ + workflow_sub_type?: (string|null); + + /** Workflow max_v_replication_transaction_lag */ + max_v_replication_transaction_lag?: (number|Long|null); + + /** Workflow defer_secondary_keys */ + defer_secondary_keys?: (boolean|null); + } + + /** Represents a Workflow. */ + class Workflow implements IWorkflow { + + /** + * Constructs a new Workflow. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IWorkflow); + + /** Workflow name. */ + public name: string; + + /** Workflow source. */ + public source?: (vtctldata.Workflow.IReplicationLocation|null); + + /** Workflow target. */ + public target?: (vtctldata.Workflow.IReplicationLocation|null); + + /** Workflow max_v_replication_lag. */ + public max_v_replication_lag: (number|Long); + + /** Workflow shard_streams. */ + public shard_streams: { [k: string]: vtctldata.Workflow.IShardStream }; + + /** Workflow workflow_type. */ + public workflow_type: string; + + /** Workflow workflow_sub_type. */ + public workflow_sub_type: string; + + /** Workflow max_v_replication_transaction_lag. */ + public max_v_replication_transaction_lag: (number|Long); + + /** Workflow defer_secondary_keys. */ + public defer_secondary_keys: boolean; + + /** + * Creates a new Workflow instance using the specified properties. + * @param [properties] Properties to set + * @returns Workflow instance + */ + public static create(properties?: vtctldata.IWorkflow): vtctldata.Workflow; + + /** + * Encodes the specified Workflow message. Does not implicitly {@link vtctldata.Workflow.verify|verify} messages. + * @param message Workflow message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IWorkflow, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified Workflow message, length delimited. Does not implicitly {@link vtctldata.Workflow.verify|verify} messages. + * @param message Workflow message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IWorkflow, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a Workflow message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns Workflow + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.Workflow; + + /** + * Decodes a Workflow message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns Workflow + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.Workflow; + + /** + * Verifies a Workflow message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a Workflow message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns Workflow + */ + public static fromObject(object: { [k: string]: any }): vtctldata.Workflow; + + /** + * Creates a plain object from a Workflow message. Also converts values to other types if specified. + * @param message Workflow + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.Workflow, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this Workflow to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for Workflow + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + namespace Workflow { + + /** Properties of a ReplicationLocation. */ + interface IReplicationLocation { + + /** ReplicationLocation keyspace */ + keyspace?: (string|null); + + /** ReplicationLocation shards */ + shards?: (string[]|null); + } + + /** Represents a ReplicationLocation. */ + class ReplicationLocation implements IReplicationLocation { + + /** + * Constructs a new ReplicationLocation. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.Workflow.IReplicationLocation); + + /** ReplicationLocation keyspace. */ + public keyspace: string; + + /** ReplicationLocation shards. */ + public shards: string[]; + + /** + * Creates a new ReplicationLocation instance using the specified properties. + * @param [properties] Properties to set + * @returns ReplicationLocation instance + */ + public static create(properties?: vtctldata.Workflow.IReplicationLocation): vtctldata.Workflow.ReplicationLocation; + + /** + * Encodes the specified ReplicationLocation message. Does not implicitly {@link vtctldata.Workflow.ReplicationLocation.verify|verify} messages. + * @param message ReplicationLocation message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.Workflow.IReplicationLocation, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ReplicationLocation message, length delimited. Does not implicitly {@link vtctldata.Workflow.ReplicationLocation.verify|verify} messages. + * @param message ReplicationLocation message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.Workflow.IReplicationLocation, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a ReplicationLocation message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ReplicationLocation + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.Workflow.ReplicationLocation; + + /** + * Decodes a ReplicationLocation message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ReplicationLocation + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.Workflow.ReplicationLocation; + + /** + * Verifies a ReplicationLocation message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a ReplicationLocation message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ReplicationLocation + */ + public static fromObject(object: { [k: string]: any }): vtctldata.Workflow.ReplicationLocation; + + /** + * Creates a plain object from a ReplicationLocation message. Also converts values to other types if specified. + * @param message ReplicationLocation + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.Workflow.ReplicationLocation, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ReplicationLocation to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ReplicationLocation + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a ShardStream. */ + interface IShardStream { + + /** ShardStream streams */ + streams?: (vtctldata.Workflow.IStream[]|null); + + /** ShardStream tablet_controls */ + tablet_controls?: (topodata.Shard.ITabletControl[]|null); + + /** ShardStream is_primary_serving */ + is_primary_serving?: (boolean|null); + } + + /** Represents a ShardStream. */ + class ShardStream implements IShardStream { + + /** + * Constructs a new ShardStream. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.Workflow.IShardStream); + + /** ShardStream streams. */ + public streams: vtctldata.Workflow.IStream[]; + + /** ShardStream tablet_controls. */ + public tablet_controls: topodata.Shard.ITabletControl[]; + + /** ShardStream is_primary_serving. */ + public is_primary_serving: boolean; + + /** + * Creates a new ShardStream instance using the specified properties. + * @param [properties] Properties to set + * @returns ShardStream instance + */ + public static create(properties?: vtctldata.Workflow.IShardStream): vtctldata.Workflow.ShardStream; + + /** + * Encodes the specified ShardStream message. Does not implicitly {@link vtctldata.Workflow.ShardStream.verify|verify} messages. + * @param message ShardStream message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.Workflow.IShardStream, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ShardStream message, length delimited. Does not implicitly {@link vtctldata.Workflow.ShardStream.verify|verify} messages. + * @param message ShardStream message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.Workflow.IShardStream, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a ShardStream message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ShardStream + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.Workflow.ShardStream; + + /** + * Decodes a ShardStream message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ShardStream + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.Workflow.ShardStream; + + /** + * Verifies a ShardStream message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a ShardStream message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ShardStream + */ + public static fromObject(object: { [k: string]: any }): vtctldata.Workflow.ShardStream; + + /** + * Creates a plain object from a ShardStream message. Also converts values to other types if specified. + * @param message ShardStream + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.Workflow.ShardStream, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ShardStream to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ShardStream + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a Stream. */ + interface IStream { + + /** Stream id */ + id?: (number|Long|null); + + /** Stream shard */ + shard?: (string|null); + + /** Stream tablet */ + tablet?: (topodata.ITabletAlias|null); + + /** Stream binlog_source */ + binlog_source?: (binlogdata.IBinlogSource|null); + + /** Stream position */ + position?: (string|null); + + /** Stream stop_position */ + stop_position?: (string|null); + + /** Stream state */ + state?: (string|null); + + /** Stream db_name */ + db_name?: (string|null); + + /** Stream transaction_timestamp */ + transaction_timestamp?: (vttime.ITime|null); + + /** Stream time_updated */ + time_updated?: (vttime.ITime|null); + + /** Stream message */ + message?: (string|null); + + /** Stream copy_states */ + copy_states?: (vtctldata.Workflow.Stream.ICopyState[]|null); + + /** Stream logs */ + logs?: (vtctldata.Workflow.Stream.ILog[]|null); + + /** Stream log_fetch_error */ + log_fetch_error?: (string|null); + + /** Stream tags */ + tags?: (string[]|null); + + /** Stream rows_copied */ + rows_copied?: (number|Long|null); + + /** Stream throttler_status */ + throttler_status?: (vtctldata.Workflow.Stream.IThrottlerStatus|null); + } + + /** Represents a Stream. */ + class Stream implements IStream { + + /** + * Constructs a new Stream. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.Workflow.IStream); + + /** Stream id. */ + public id: (number|Long); /** Stream shard. */ public shard: string; - /** Stream tablet. */ - public tablet?: (topodata.ITabletAlias|null); + /** Stream tablet. */ + public tablet?: (topodata.ITabletAlias|null); + + /** Stream binlog_source. */ + public binlog_source?: (binlogdata.IBinlogSource|null); + + /** Stream position. */ + public position: string; + + /** Stream stop_position. */ + public stop_position: string; + + /** Stream state. */ + public state: string; + + /** Stream db_name. */ + public db_name: string; + + /** Stream transaction_timestamp. */ + public transaction_timestamp?: (vttime.ITime|null); + + /** Stream time_updated. */ + public time_updated?: (vttime.ITime|null); + + /** Stream message. */ + public message: string; + + /** Stream copy_states. */ + public copy_states: vtctldata.Workflow.Stream.ICopyState[]; + + /** Stream logs. */ + public logs: vtctldata.Workflow.Stream.ILog[]; + + /** Stream log_fetch_error. */ + public log_fetch_error: string; + + /** Stream tags. */ + public tags: string[]; + + /** Stream rows_copied. */ + public rows_copied: (number|Long); + + /** Stream throttler_status. */ + public throttler_status?: (vtctldata.Workflow.Stream.IThrottlerStatus|null); + + /** + * Creates a new Stream instance using the specified properties. + * @param [properties] Properties to set + * @returns Stream instance + */ + public static create(properties?: vtctldata.Workflow.IStream): vtctldata.Workflow.Stream; + + /** + * Encodes the specified Stream message. Does not implicitly {@link vtctldata.Workflow.Stream.verify|verify} messages. + * @param message Stream message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.Workflow.IStream, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified Stream message, length delimited. Does not implicitly {@link vtctldata.Workflow.Stream.verify|verify} messages. + * @param message Stream message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.Workflow.IStream, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a Stream message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns Stream + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.Workflow.Stream; + + /** + * Decodes a Stream message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns Stream + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.Workflow.Stream; + + /** + * Verifies a Stream message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a Stream message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns Stream + */ + public static fromObject(object: { [k: string]: any }): vtctldata.Workflow.Stream; + + /** + * Creates a plain object from a Stream message. Also converts values to other types if specified. + * @param message Stream + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.Workflow.Stream, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this Stream to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for Stream + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + namespace Stream { + + /** Properties of a CopyState. */ + interface ICopyState { + + /** CopyState table */ + table?: (string|null); + + /** CopyState last_pk */ + last_pk?: (string|null); + } + + /** Represents a CopyState. */ + class CopyState implements ICopyState { + + /** + * Constructs a new CopyState. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.Workflow.Stream.ICopyState); + + /** CopyState table. */ + public table: string; + + /** CopyState last_pk. */ + public last_pk: string; + + /** + * Creates a new CopyState instance using the specified properties. + * @param [properties] Properties to set + * @returns CopyState instance + */ + public static create(properties?: vtctldata.Workflow.Stream.ICopyState): vtctldata.Workflow.Stream.CopyState; + + /** + * Encodes the specified CopyState message. Does not implicitly {@link vtctldata.Workflow.Stream.CopyState.verify|verify} messages. + * @param message CopyState message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.Workflow.Stream.ICopyState, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified CopyState message, length delimited. Does not implicitly {@link vtctldata.Workflow.Stream.CopyState.verify|verify} messages. + * @param message CopyState message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.Workflow.Stream.ICopyState, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a CopyState message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns CopyState + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.Workflow.Stream.CopyState; + + /** + * Decodes a CopyState message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns CopyState + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.Workflow.Stream.CopyState; + + /** + * Verifies a CopyState message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a CopyState message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns CopyState + */ + public static fromObject(object: { [k: string]: any }): vtctldata.Workflow.Stream.CopyState; + + /** + * Creates a plain object from a CopyState message. Also converts values to other types if specified. + * @param message CopyState + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.Workflow.Stream.CopyState, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this CopyState to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for CopyState + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a Log. */ + interface ILog { + + /** Log id */ + id?: (number|Long|null); + + /** Log stream_id */ + stream_id?: (number|Long|null); + + /** Log type */ + type?: (string|null); + + /** Log state */ + state?: (string|null); + + /** Log created_at */ + created_at?: (vttime.ITime|null); + + /** Log updated_at */ + updated_at?: (vttime.ITime|null); + + /** Log message */ + message?: (string|null); + + /** Log count */ + count?: (number|Long|null); + } + + /** Represents a Log. */ + class Log implements ILog { + + /** + * Constructs a new Log. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.Workflow.Stream.ILog); + + /** Log id. */ + public id: (number|Long); + + /** Log stream_id. */ + public stream_id: (number|Long); + + /** Log type. */ + public type: string; + + /** Log state. */ + public state: string; + + /** Log created_at. */ + public created_at?: (vttime.ITime|null); + + /** Log updated_at. */ + public updated_at?: (vttime.ITime|null); + + /** Log message. */ + public message: string; + + /** Log count. */ + public count: (number|Long); + + /** + * Creates a new Log instance using the specified properties. + * @param [properties] Properties to set + * @returns Log instance + */ + public static create(properties?: vtctldata.Workflow.Stream.ILog): vtctldata.Workflow.Stream.Log; + + /** + * Encodes the specified Log message. Does not implicitly {@link vtctldata.Workflow.Stream.Log.verify|verify} messages. + * @param message Log message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.Workflow.Stream.ILog, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified Log message, length delimited. Does not implicitly {@link vtctldata.Workflow.Stream.Log.verify|verify} messages. + * @param message Log message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.Workflow.Stream.ILog, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a Log message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns Log + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.Workflow.Stream.Log; + + /** + * Decodes a Log message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns Log + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.Workflow.Stream.Log; + + /** + * Verifies a Log message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a Log message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns Log + */ + public static fromObject(object: { [k: string]: any }): vtctldata.Workflow.Stream.Log; + + /** + * Creates a plain object from a Log message. Also converts values to other types if specified. + * @param message Log + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.Workflow.Stream.Log, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this Log to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for Log + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a ThrottlerStatus. */ + interface IThrottlerStatus { + + /** ThrottlerStatus component_throttled */ + component_throttled?: (string|null); + + /** ThrottlerStatus time_throttled */ + time_throttled?: (vttime.ITime|null); + } + + /** Represents a ThrottlerStatus. */ + class ThrottlerStatus implements IThrottlerStatus { + + /** + * Constructs a new ThrottlerStatus. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.Workflow.Stream.IThrottlerStatus); + + /** ThrottlerStatus component_throttled. */ + public component_throttled: string; + + /** ThrottlerStatus time_throttled. */ + public time_throttled?: (vttime.ITime|null); + + /** + * Creates a new ThrottlerStatus instance using the specified properties. + * @param [properties] Properties to set + * @returns ThrottlerStatus instance + */ + public static create(properties?: vtctldata.Workflow.Stream.IThrottlerStatus): vtctldata.Workflow.Stream.ThrottlerStatus; + + /** + * Encodes the specified ThrottlerStatus message. Does not implicitly {@link vtctldata.Workflow.Stream.ThrottlerStatus.verify|verify} messages. + * @param message ThrottlerStatus message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.Workflow.Stream.IThrottlerStatus, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ThrottlerStatus message, length delimited. Does not implicitly {@link vtctldata.Workflow.Stream.ThrottlerStatus.verify|verify} messages. + * @param message ThrottlerStatus message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.Workflow.Stream.IThrottlerStatus, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a ThrottlerStatus message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ThrottlerStatus + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.Workflow.Stream.ThrottlerStatus; + + /** + * Decodes a ThrottlerStatus message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ThrottlerStatus + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.Workflow.Stream.ThrottlerStatus; + + /** + * Verifies a ThrottlerStatus message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a ThrottlerStatus message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ThrottlerStatus + */ + public static fromObject(object: { [k: string]: any }): vtctldata.Workflow.Stream.ThrottlerStatus; + + /** + * Creates a plain object from a ThrottlerStatus message. Also converts values to other types if specified. + * @param message ThrottlerStatus + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.Workflow.Stream.ThrottlerStatus, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ThrottlerStatus to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ThrottlerStatus + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + } + } + + /** Properties of an AddCellInfoRequest. */ + interface IAddCellInfoRequest { + + /** AddCellInfoRequest name */ + name?: (string|null); + + /** AddCellInfoRequest cell_info */ + cell_info?: (topodata.ICellInfo|null); + } + + /** Represents an AddCellInfoRequest. */ + class AddCellInfoRequest implements IAddCellInfoRequest { + + /** + * Constructs a new AddCellInfoRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IAddCellInfoRequest); + + /** AddCellInfoRequest name. */ + public name: string; + + /** AddCellInfoRequest cell_info. */ + public cell_info?: (topodata.ICellInfo|null); + + /** + * Creates a new AddCellInfoRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns AddCellInfoRequest instance + */ + public static create(properties?: vtctldata.IAddCellInfoRequest): vtctldata.AddCellInfoRequest; + + /** + * Encodes the specified AddCellInfoRequest message. Does not implicitly {@link vtctldata.AddCellInfoRequest.verify|verify} messages. + * @param message AddCellInfoRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IAddCellInfoRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified AddCellInfoRequest message, length delimited. Does not implicitly {@link vtctldata.AddCellInfoRequest.verify|verify} messages. + * @param message AddCellInfoRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IAddCellInfoRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes an AddCellInfoRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns AddCellInfoRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.AddCellInfoRequest; + + /** + * Decodes an AddCellInfoRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns AddCellInfoRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.AddCellInfoRequest; + + /** + * Verifies an AddCellInfoRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates an AddCellInfoRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns AddCellInfoRequest + */ + public static fromObject(object: { [k: string]: any }): vtctldata.AddCellInfoRequest; + + /** + * Creates a plain object from an AddCellInfoRequest message. Also converts values to other types if specified. + * @param message AddCellInfoRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.AddCellInfoRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this AddCellInfoRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for AddCellInfoRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of an AddCellInfoResponse. */ + interface IAddCellInfoResponse { + } + + /** Represents an AddCellInfoResponse. */ + class AddCellInfoResponse implements IAddCellInfoResponse { + + /** + * Constructs a new AddCellInfoResponse. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IAddCellInfoResponse); + + /** + * Creates a new AddCellInfoResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns AddCellInfoResponse instance + */ + public static create(properties?: vtctldata.IAddCellInfoResponse): vtctldata.AddCellInfoResponse; + + /** + * Encodes the specified AddCellInfoResponse message. Does not implicitly {@link vtctldata.AddCellInfoResponse.verify|verify} messages. + * @param message AddCellInfoResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IAddCellInfoResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified AddCellInfoResponse message, length delimited. Does not implicitly {@link vtctldata.AddCellInfoResponse.verify|verify} messages. + * @param message AddCellInfoResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IAddCellInfoResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes an AddCellInfoResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns AddCellInfoResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.AddCellInfoResponse; + + /** + * Decodes an AddCellInfoResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns AddCellInfoResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.AddCellInfoResponse; + + /** + * Verifies an AddCellInfoResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates an AddCellInfoResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns AddCellInfoResponse + */ + public static fromObject(object: { [k: string]: any }): vtctldata.AddCellInfoResponse; + + /** + * Creates a plain object from an AddCellInfoResponse message. Also converts values to other types if specified. + * @param message AddCellInfoResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.AddCellInfoResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this AddCellInfoResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for AddCellInfoResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of an AddCellsAliasRequest. */ + interface IAddCellsAliasRequest { + + /** AddCellsAliasRequest name */ + name?: (string|null); + + /** AddCellsAliasRequest cells */ + cells?: (string[]|null); + } + + /** Represents an AddCellsAliasRequest. */ + class AddCellsAliasRequest implements IAddCellsAliasRequest { + + /** + * Constructs a new AddCellsAliasRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IAddCellsAliasRequest); + + /** AddCellsAliasRequest name. */ + public name: string; + + /** AddCellsAliasRequest cells. */ + public cells: string[]; + + /** + * Creates a new AddCellsAliasRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns AddCellsAliasRequest instance + */ + public static create(properties?: vtctldata.IAddCellsAliasRequest): vtctldata.AddCellsAliasRequest; + + /** + * Encodes the specified AddCellsAliasRequest message. Does not implicitly {@link vtctldata.AddCellsAliasRequest.verify|verify} messages. + * @param message AddCellsAliasRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IAddCellsAliasRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified AddCellsAliasRequest message, length delimited. Does not implicitly {@link vtctldata.AddCellsAliasRequest.verify|verify} messages. + * @param message AddCellsAliasRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IAddCellsAliasRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes an AddCellsAliasRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns AddCellsAliasRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.AddCellsAliasRequest; + + /** + * Decodes an AddCellsAliasRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns AddCellsAliasRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.AddCellsAliasRequest; + + /** + * Verifies an AddCellsAliasRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates an AddCellsAliasRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns AddCellsAliasRequest + */ + public static fromObject(object: { [k: string]: any }): vtctldata.AddCellsAliasRequest; + + /** + * Creates a plain object from an AddCellsAliasRequest message. Also converts values to other types if specified. + * @param message AddCellsAliasRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.AddCellsAliasRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this AddCellsAliasRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for AddCellsAliasRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of an AddCellsAliasResponse. */ + interface IAddCellsAliasResponse { + } + + /** Represents an AddCellsAliasResponse. */ + class AddCellsAliasResponse implements IAddCellsAliasResponse { + + /** + * Constructs a new AddCellsAliasResponse. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IAddCellsAliasResponse); + + /** + * Creates a new AddCellsAliasResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns AddCellsAliasResponse instance + */ + public static create(properties?: vtctldata.IAddCellsAliasResponse): vtctldata.AddCellsAliasResponse; + + /** + * Encodes the specified AddCellsAliasResponse message. Does not implicitly {@link vtctldata.AddCellsAliasResponse.verify|verify} messages. + * @param message AddCellsAliasResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IAddCellsAliasResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified AddCellsAliasResponse message, length delimited. Does not implicitly {@link vtctldata.AddCellsAliasResponse.verify|verify} messages. + * @param message AddCellsAliasResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IAddCellsAliasResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes an AddCellsAliasResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns AddCellsAliasResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.AddCellsAliasResponse; + + /** + * Decodes an AddCellsAliasResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns AddCellsAliasResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.AddCellsAliasResponse; + + /** + * Verifies an AddCellsAliasResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates an AddCellsAliasResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns AddCellsAliasResponse + */ + public static fromObject(object: { [k: string]: any }): vtctldata.AddCellsAliasResponse; + + /** + * Creates a plain object from an AddCellsAliasResponse message. Also converts values to other types if specified. + * @param message AddCellsAliasResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.AddCellsAliasResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this AddCellsAliasResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for AddCellsAliasResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of an ApplyRoutingRulesRequest. */ + interface IApplyRoutingRulesRequest { + + /** ApplyRoutingRulesRequest routing_rules */ + routing_rules?: (vschema.IRoutingRules|null); + + /** ApplyRoutingRulesRequest skip_rebuild */ + skip_rebuild?: (boolean|null); + + /** ApplyRoutingRulesRequest rebuild_cells */ + rebuild_cells?: (string[]|null); + } + + /** Represents an ApplyRoutingRulesRequest. */ + class ApplyRoutingRulesRequest implements IApplyRoutingRulesRequest { + + /** + * Constructs a new ApplyRoutingRulesRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IApplyRoutingRulesRequest); + + /** ApplyRoutingRulesRequest routing_rules. */ + public routing_rules?: (vschema.IRoutingRules|null); + + /** ApplyRoutingRulesRequest skip_rebuild. */ + public skip_rebuild: boolean; + + /** ApplyRoutingRulesRequest rebuild_cells. */ + public rebuild_cells: string[]; + + /** + * Creates a new ApplyRoutingRulesRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns ApplyRoutingRulesRequest instance + */ + public static create(properties?: vtctldata.IApplyRoutingRulesRequest): vtctldata.ApplyRoutingRulesRequest; + + /** + * Encodes the specified ApplyRoutingRulesRequest message. Does not implicitly {@link vtctldata.ApplyRoutingRulesRequest.verify|verify} messages. + * @param message ApplyRoutingRulesRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IApplyRoutingRulesRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ApplyRoutingRulesRequest message, length delimited. Does not implicitly {@link vtctldata.ApplyRoutingRulesRequest.verify|verify} messages. + * @param message ApplyRoutingRulesRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IApplyRoutingRulesRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes an ApplyRoutingRulesRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ApplyRoutingRulesRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ApplyRoutingRulesRequest; + + /** + * Decodes an ApplyRoutingRulesRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ApplyRoutingRulesRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ApplyRoutingRulesRequest; + + /** + * Verifies an ApplyRoutingRulesRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates an ApplyRoutingRulesRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ApplyRoutingRulesRequest + */ + public static fromObject(object: { [k: string]: any }): vtctldata.ApplyRoutingRulesRequest; + + /** + * Creates a plain object from an ApplyRoutingRulesRequest message. Also converts values to other types if specified. + * @param message ApplyRoutingRulesRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.ApplyRoutingRulesRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ApplyRoutingRulesRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ApplyRoutingRulesRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of an ApplyRoutingRulesResponse. */ + interface IApplyRoutingRulesResponse { + } + + /** Represents an ApplyRoutingRulesResponse. */ + class ApplyRoutingRulesResponse implements IApplyRoutingRulesResponse { + + /** + * Constructs a new ApplyRoutingRulesResponse. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IApplyRoutingRulesResponse); + + /** + * Creates a new ApplyRoutingRulesResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns ApplyRoutingRulesResponse instance + */ + public static create(properties?: vtctldata.IApplyRoutingRulesResponse): vtctldata.ApplyRoutingRulesResponse; + + /** + * Encodes the specified ApplyRoutingRulesResponse message. Does not implicitly {@link vtctldata.ApplyRoutingRulesResponse.verify|verify} messages. + * @param message ApplyRoutingRulesResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IApplyRoutingRulesResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ApplyRoutingRulesResponse message, length delimited. Does not implicitly {@link vtctldata.ApplyRoutingRulesResponse.verify|verify} messages. + * @param message ApplyRoutingRulesResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IApplyRoutingRulesResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes an ApplyRoutingRulesResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ApplyRoutingRulesResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ApplyRoutingRulesResponse; + + /** + * Decodes an ApplyRoutingRulesResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ApplyRoutingRulesResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ApplyRoutingRulesResponse; + + /** + * Verifies an ApplyRoutingRulesResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates an ApplyRoutingRulesResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ApplyRoutingRulesResponse + */ + public static fromObject(object: { [k: string]: any }): vtctldata.ApplyRoutingRulesResponse; + + /** + * Creates a plain object from an ApplyRoutingRulesResponse message. Also converts values to other types if specified. + * @param message ApplyRoutingRulesResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.ApplyRoutingRulesResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ApplyRoutingRulesResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ApplyRoutingRulesResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of an ApplyShardRoutingRulesRequest. */ + interface IApplyShardRoutingRulesRequest { + + /** ApplyShardRoutingRulesRequest shard_routing_rules */ + shard_routing_rules?: (vschema.IShardRoutingRules|null); + + /** ApplyShardRoutingRulesRequest skip_rebuild */ + skip_rebuild?: (boolean|null); + + /** ApplyShardRoutingRulesRequest rebuild_cells */ + rebuild_cells?: (string[]|null); + } + + /** Represents an ApplyShardRoutingRulesRequest. */ + class ApplyShardRoutingRulesRequest implements IApplyShardRoutingRulesRequest { + + /** + * Constructs a new ApplyShardRoutingRulesRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IApplyShardRoutingRulesRequest); + + /** ApplyShardRoutingRulesRequest shard_routing_rules. */ + public shard_routing_rules?: (vschema.IShardRoutingRules|null); + + /** ApplyShardRoutingRulesRequest skip_rebuild. */ + public skip_rebuild: boolean; + + /** ApplyShardRoutingRulesRequest rebuild_cells. */ + public rebuild_cells: string[]; + + /** + * Creates a new ApplyShardRoutingRulesRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns ApplyShardRoutingRulesRequest instance + */ + public static create(properties?: vtctldata.IApplyShardRoutingRulesRequest): vtctldata.ApplyShardRoutingRulesRequest; + + /** + * Encodes the specified ApplyShardRoutingRulesRequest message. Does not implicitly {@link vtctldata.ApplyShardRoutingRulesRequest.verify|verify} messages. + * @param message ApplyShardRoutingRulesRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IApplyShardRoutingRulesRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ApplyShardRoutingRulesRequest message, length delimited. Does not implicitly {@link vtctldata.ApplyShardRoutingRulesRequest.verify|verify} messages. + * @param message ApplyShardRoutingRulesRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IApplyShardRoutingRulesRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes an ApplyShardRoutingRulesRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ApplyShardRoutingRulesRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ApplyShardRoutingRulesRequest; + + /** + * Decodes an ApplyShardRoutingRulesRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ApplyShardRoutingRulesRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ApplyShardRoutingRulesRequest; + + /** + * Verifies an ApplyShardRoutingRulesRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates an ApplyShardRoutingRulesRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ApplyShardRoutingRulesRequest + */ + public static fromObject(object: { [k: string]: any }): vtctldata.ApplyShardRoutingRulesRequest; + + /** + * Creates a plain object from an ApplyShardRoutingRulesRequest message. Also converts values to other types if specified. + * @param message ApplyShardRoutingRulesRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.ApplyShardRoutingRulesRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ApplyShardRoutingRulesRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ApplyShardRoutingRulesRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of an ApplyShardRoutingRulesResponse. */ + interface IApplyShardRoutingRulesResponse { + } + + /** Represents an ApplyShardRoutingRulesResponse. */ + class ApplyShardRoutingRulesResponse implements IApplyShardRoutingRulesResponse { + + /** + * Constructs a new ApplyShardRoutingRulesResponse. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IApplyShardRoutingRulesResponse); + + /** + * Creates a new ApplyShardRoutingRulesResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns ApplyShardRoutingRulesResponse instance + */ + public static create(properties?: vtctldata.IApplyShardRoutingRulesResponse): vtctldata.ApplyShardRoutingRulesResponse; + + /** + * Encodes the specified ApplyShardRoutingRulesResponse message. Does not implicitly {@link vtctldata.ApplyShardRoutingRulesResponse.verify|verify} messages. + * @param message ApplyShardRoutingRulesResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IApplyShardRoutingRulesResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ApplyShardRoutingRulesResponse message, length delimited. Does not implicitly {@link vtctldata.ApplyShardRoutingRulesResponse.verify|verify} messages. + * @param message ApplyShardRoutingRulesResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IApplyShardRoutingRulesResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes an ApplyShardRoutingRulesResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ApplyShardRoutingRulesResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ApplyShardRoutingRulesResponse; + + /** + * Decodes an ApplyShardRoutingRulesResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ApplyShardRoutingRulesResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ApplyShardRoutingRulesResponse; + + /** + * Verifies an ApplyShardRoutingRulesResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates an ApplyShardRoutingRulesResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ApplyShardRoutingRulesResponse + */ + public static fromObject(object: { [k: string]: any }): vtctldata.ApplyShardRoutingRulesResponse; + + /** + * Creates a plain object from an ApplyShardRoutingRulesResponse message. Also converts values to other types if specified. + * @param message ApplyShardRoutingRulesResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.ApplyShardRoutingRulesResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ApplyShardRoutingRulesResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ApplyShardRoutingRulesResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of an ApplySchemaRequest. */ + interface IApplySchemaRequest { + + /** ApplySchemaRequest keyspace */ + keyspace?: (string|null); + + /** ApplySchemaRequest sql */ + sql?: (string[]|null); + + /** ApplySchemaRequest ddl_strategy */ + ddl_strategy?: (string|null); + + /** ApplySchemaRequest uuid_list */ + uuid_list?: (string[]|null); + + /** ApplySchemaRequest migration_context */ + migration_context?: (string|null); + + /** ApplySchemaRequest wait_replicas_timeout */ + wait_replicas_timeout?: (vttime.IDuration|null); + + /** ApplySchemaRequest caller_id */ + caller_id?: (vtrpc.ICallerID|null); + + /** ApplySchemaRequest batch_size */ + batch_size?: (number|Long|null); + } + + /** Represents an ApplySchemaRequest. */ + class ApplySchemaRequest implements IApplySchemaRequest { + + /** + * Constructs a new ApplySchemaRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IApplySchemaRequest); + + /** ApplySchemaRequest keyspace. */ + public keyspace: string; + + /** ApplySchemaRequest sql. */ + public sql: string[]; + + /** ApplySchemaRequest ddl_strategy. */ + public ddl_strategy: string; + + /** ApplySchemaRequest uuid_list. */ + public uuid_list: string[]; + + /** ApplySchemaRequest migration_context. */ + public migration_context: string; + + /** ApplySchemaRequest wait_replicas_timeout. */ + public wait_replicas_timeout?: (vttime.IDuration|null); + + /** ApplySchemaRequest caller_id. */ + public caller_id?: (vtrpc.ICallerID|null); + + /** ApplySchemaRequest batch_size. */ + public batch_size: (number|Long); + + /** + * Creates a new ApplySchemaRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns ApplySchemaRequest instance + */ + public static create(properties?: vtctldata.IApplySchemaRequest): vtctldata.ApplySchemaRequest; + + /** + * Encodes the specified ApplySchemaRequest message. Does not implicitly {@link vtctldata.ApplySchemaRequest.verify|verify} messages. + * @param message ApplySchemaRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IApplySchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ApplySchemaRequest message, length delimited. Does not implicitly {@link vtctldata.ApplySchemaRequest.verify|verify} messages. + * @param message ApplySchemaRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IApplySchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes an ApplySchemaRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ApplySchemaRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ApplySchemaRequest; + + /** + * Decodes an ApplySchemaRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ApplySchemaRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ApplySchemaRequest; + + /** + * Verifies an ApplySchemaRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates an ApplySchemaRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ApplySchemaRequest + */ + public static fromObject(object: { [k: string]: any }): vtctldata.ApplySchemaRequest; + + /** + * Creates a plain object from an ApplySchemaRequest message. Also converts values to other types if specified. + * @param message ApplySchemaRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.ApplySchemaRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ApplySchemaRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ApplySchemaRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of an ApplySchemaResponse. */ + interface IApplySchemaResponse { + + /** ApplySchemaResponse uuid_list */ + uuid_list?: (string[]|null); + + /** ApplySchemaResponse rows_affected_by_shard */ + rows_affected_by_shard?: ({ [k: string]: (number|Long) }|null); + } + + /** Represents an ApplySchemaResponse. */ + class ApplySchemaResponse implements IApplySchemaResponse { + + /** + * Constructs a new ApplySchemaResponse. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IApplySchemaResponse); + + /** ApplySchemaResponse uuid_list. */ + public uuid_list: string[]; + + /** ApplySchemaResponse rows_affected_by_shard. */ + public rows_affected_by_shard: { [k: string]: (number|Long) }; + + /** + * Creates a new ApplySchemaResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns ApplySchemaResponse instance + */ + public static create(properties?: vtctldata.IApplySchemaResponse): vtctldata.ApplySchemaResponse; + + /** + * Encodes the specified ApplySchemaResponse message. Does not implicitly {@link vtctldata.ApplySchemaResponse.verify|verify} messages. + * @param message ApplySchemaResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IApplySchemaResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ApplySchemaResponse message, length delimited. Does not implicitly {@link vtctldata.ApplySchemaResponse.verify|verify} messages. + * @param message ApplySchemaResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IApplySchemaResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes an ApplySchemaResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ApplySchemaResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ApplySchemaResponse; + + /** + * Decodes an ApplySchemaResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ApplySchemaResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ApplySchemaResponse; + + /** + * Verifies an ApplySchemaResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates an ApplySchemaResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ApplySchemaResponse + */ + public static fromObject(object: { [k: string]: any }): vtctldata.ApplySchemaResponse; + + /** + * Creates a plain object from an ApplySchemaResponse message. Also converts values to other types if specified. + * @param message ApplySchemaResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.ApplySchemaResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ApplySchemaResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ApplySchemaResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of an ApplyVSchemaRequest. */ + interface IApplyVSchemaRequest { + + /** ApplyVSchemaRequest keyspace */ + keyspace?: (string|null); + + /** ApplyVSchemaRequest skip_rebuild */ + skip_rebuild?: (boolean|null); + + /** ApplyVSchemaRequest dry_run */ + dry_run?: (boolean|null); + + /** ApplyVSchemaRequest cells */ + cells?: (string[]|null); + + /** ApplyVSchemaRequest v_schema */ + v_schema?: (vschema.IKeyspace|null); + + /** ApplyVSchemaRequest sql */ + sql?: (string|null); + } + + /** Represents an ApplyVSchemaRequest. */ + class ApplyVSchemaRequest implements IApplyVSchemaRequest { + + /** + * Constructs a new ApplyVSchemaRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IApplyVSchemaRequest); + + /** ApplyVSchemaRequest keyspace. */ + public keyspace: string; + + /** ApplyVSchemaRequest skip_rebuild. */ + public skip_rebuild: boolean; + + /** ApplyVSchemaRequest dry_run. */ + public dry_run: boolean; + + /** ApplyVSchemaRequest cells. */ + public cells: string[]; + + /** ApplyVSchemaRequest v_schema. */ + public v_schema?: (vschema.IKeyspace|null); + + /** ApplyVSchemaRequest sql. */ + public sql: string; + + /** + * Creates a new ApplyVSchemaRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns ApplyVSchemaRequest instance + */ + public static create(properties?: vtctldata.IApplyVSchemaRequest): vtctldata.ApplyVSchemaRequest; + + /** + * Encodes the specified ApplyVSchemaRequest message. Does not implicitly {@link vtctldata.ApplyVSchemaRequest.verify|verify} messages. + * @param message ApplyVSchemaRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IApplyVSchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ApplyVSchemaRequest message, length delimited. Does not implicitly {@link vtctldata.ApplyVSchemaRequest.verify|verify} messages. + * @param message ApplyVSchemaRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IApplyVSchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes an ApplyVSchemaRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ApplyVSchemaRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ApplyVSchemaRequest; + + /** + * Decodes an ApplyVSchemaRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ApplyVSchemaRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ApplyVSchemaRequest; + + /** + * Verifies an ApplyVSchemaRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates an ApplyVSchemaRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ApplyVSchemaRequest + */ + public static fromObject(object: { [k: string]: any }): vtctldata.ApplyVSchemaRequest; + + /** + * Creates a plain object from an ApplyVSchemaRequest message. Also converts values to other types if specified. + * @param message ApplyVSchemaRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.ApplyVSchemaRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ApplyVSchemaRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ApplyVSchemaRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of an ApplyVSchemaResponse. */ + interface IApplyVSchemaResponse { + + /** ApplyVSchemaResponse v_schema */ + v_schema?: (vschema.IKeyspace|null); + } + + /** Represents an ApplyVSchemaResponse. */ + class ApplyVSchemaResponse implements IApplyVSchemaResponse { + + /** + * Constructs a new ApplyVSchemaResponse. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IApplyVSchemaResponse); + + /** ApplyVSchemaResponse v_schema. */ + public v_schema?: (vschema.IKeyspace|null); + + /** + * Creates a new ApplyVSchemaResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns ApplyVSchemaResponse instance + */ + public static create(properties?: vtctldata.IApplyVSchemaResponse): vtctldata.ApplyVSchemaResponse; + + /** + * Encodes the specified ApplyVSchemaResponse message. Does not implicitly {@link vtctldata.ApplyVSchemaResponse.verify|verify} messages. + * @param message ApplyVSchemaResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IApplyVSchemaResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ApplyVSchemaResponse message, length delimited. Does not implicitly {@link vtctldata.ApplyVSchemaResponse.verify|verify} messages. + * @param message ApplyVSchemaResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IApplyVSchemaResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes an ApplyVSchemaResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ApplyVSchemaResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ApplyVSchemaResponse; + + /** + * Decodes an ApplyVSchemaResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ApplyVSchemaResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ApplyVSchemaResponse; + + /** + * Verifies an ApplyVSchemaResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates an ApplyVSchemaResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ApplyVSchemaResponse + */ + public static fromObject(object: { [k: string]: any }): vtctldata.ApplyVSchemaResponse; + + /** + * Creates a plain object from an ApplyVSchemaResponse message. Also converts values to other types if specified. + * @param message ApplyVSchemaResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.ApplyVSchemaResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ApplyVSchemaResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ApplyVSchemaResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a BackupRequest. */ + interface IBackupRequest { + + /** BackupRequest tablet_alias */ + tablet_alias?: (topodata.ITabletAlias|null); + + /** BackupRequest allow_primary */ + allow_primary?: (boolean|null); + + /** BackupRequest concurrency */ + concurrency?: (number|Long|null); + + /** BackupRequest incremental_from_pos */ + incremental_from_pos?: (string|null); + + /** BackupRequest upgrade_safe */ + upgrade_safe?: (boolean|null); + } + + /** Represents a BackupRequest. */ + class BackupRequest implements IBackupRequest { + + /** + * Constructs a new BackupRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IBackupRequest); + + /** BackupRequest tablet_alias. */ + public tablet_alias?: (topodata.ITabletAlias|null); + + /** BackupRequest allow_primary. */ + public allow_primary: boolean; + + /** BackupRequest concurrency. */ + public concurrency: (number|Long); + + /** BackupRequest incremental_from_pos. */ + public incremental_from_pos: string; + + /** BackupRequest upgrade_safe. */ + public upgrade_safe: boolean; + + /** + * Creates a new BackupRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns BackupRequest instance + */ + public static create(properties?: vtctldata.IBackupRequest): vtctldata.BackupRequest; + + /** + * Encodes the specified BackupRequest message. Does not implicitly {@link vtctldata.BackupRequest.verify|verify} messages. + * @param message BackupRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IBackupRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified BackupRequest message, length delimited. Does not implicitly {@link vtctldata.BackupRequest.verify|verify} messages. + * @param message BackupRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IBackupRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a BackupRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns BackupRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.BackupRequest; + + /** + * Decodes a BackupRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns BackupRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.BackupRequest; + + /** + * Verifies a BackupRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a BackupRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns BackupRequest + */ + public static fromObject(object: { [k: string]: any }): vtctldata.BackupRequest; + + /** + * Creates a plain object from a BackupRequest message. Also converts values to other types if specified. + * @param message BackupRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.BackupRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this BackupRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for BackupRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a BackupResponse. */ + interface IBackupResponse { + + /** BackupResponse tablet_alias */ + tablet_alias?: (topodata.ITabletAlias|null); + + /** BackupResponse keyspace */ + keyspace?: (string|null); + + /** BackupResponse shard */ + shard?: (string|null); + + /** BackupResponse event */ + event?: (logutil.IEvent|null); + } + + /** Represents a BackupResponse. */ + class BackupResponse implements IBackupResponse { + + /** + * Constructs a new BackupResponse. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IBackupResponse); + + /** BackupResponse tablet_alias. */ + public tablet_alias?: (topodata.ITabletAlias|null); + + /** BackupResponse keyspace. */ + public keyspace: string; + + /** BackupResponse shard. */ + public shard: string; + + /** BackupResponse event. */ + public event?: (logutil.IEvent|null); + + /** + * Creates a new BackupResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns BackupResponse instance + */ + public static create(properties?: vtctldata.IBackupResponse): vtctldata.BackupResponse; + + /** + * Encodes the specified BackupResponse message. Does not implicitly {@link vtctldata.BackupResponse.verify|verify} messages. + * @param message BackupResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IBackupResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified BackupResponse message, length delimited. Does not implicitly {@link vtctldata.BackupResponse.verify|verify} messages. + * @param message BackupResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IBackupResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a BackupResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns BackupResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.BackupResponse; + + /** + * Decodes a BackupResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns BackupResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.BackupResponse; + + /** + * Verifies a BackupResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a BackupResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns BackupResponse + */ + public static fromObject(object: { [k: string]: any }): vtctldata.BackupResponse; + + /** + * Creates a plain object from a BackupResponse message. Also converts values to other types if specified. + * @param message BackupResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.BackupResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this BackupResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for BackupResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a BackupShardRequest. */ + interface IBackupShardRequest { + + /** BackupShardRequest keyspace */ + keyspace?: (string|null); + + /** BackupShardRequest shard */ + shard?: (string|null); + + /** BackupShardRequest allow_primary */ + allow_primary?: (boolean|null); + + /** BackupShardRequest concurrency */ + concurrency?: (number|Long|null); + + /** BackupShardRequest upgrade_safe */ + upgrade_safe?: (boolean|null); + + /** BackupShardRequest incremental_from_pos */ + incremental_from_pos?: (string|null); + } + + /** Represents a BackupShardRequest. */ + class BackupShardRequest implements IBackupShardRequest { + + /** + * Constructs a new BackupShardRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IBackupShardRequest); + + /** BackupShardRequest keyspace. */ + public keyspace: string; + + /** BackupShardRequest shard. */ + public shard: string; + + /** BackupShardRequest allow_primary. */ + public allow_primary: boolean; + + /** BackupShardRequest concurrency. */ + public concurrency: (number|Long); + + /** BackupShardRequest upgrade_safe. */ + public upgrade_safe: boolean; + + /** BackupShardRequest incremental_from_pos. */ + public incremental_from_pos: string; + + /** + * Creates a new BackupShardRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns BackupShardRequest instance + */ + public static create(properties?: vtctldata.IBackupShardRequest): vtctldata.BackupShardRequest; + + /** + * Encodes the specified BackupShardRequest message. Does not implicitly {@link vtctldata.BackupShardRequest.verify|verify} messages. + * @param message BackupShardRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IBackupShardRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified BackupShardRequest message, length delimited. Does not implicitly {@link vtctldata.BackupShardRequest.verify|verify} messages. + * @param message BackupShardRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IBackupShardRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a BackupShardRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns BackupShardRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.BackupShardRequest; + + /** + * Decodes a BackupShardRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns BackupShardRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.BackupShardRequest; + + /** + * Verifies a BackupShardRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a BackupShardRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns BackupShardRequest + */ + public static fromObject(object: { [k: string]: any }): vtctldata.BackupShardRequest; + + /** + * Creates a plain object from a BackupShardRequest message. Also converts values to other types if specified. + * @param message BackupShardRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.BackupShardRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this BackupShardRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for BackupShardRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a CancelSchemaMigrationRequest. */ + interface ICancelSchemaMigrationRequest { + + /** CancelSchemaMigrationRequest keyspace */ + keyspace?: (string|null); + + /** CancelSchemaMigrationRequest uuid */ + uuid?: (string|null); + } + + /** Represents a CancelSchemaMigrationRequest. */ + class CancelSchemaMigrationRequest implements ICancelSchemaMigrationRequest { + + /** + * Constructs a new CancelSchemaMigrationRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.ICancelSchemaMigrationRequest); + + /** CancelSchemaMigrationRequest keyspace. */ + public keyspace: string; + + /** CancelSchemaMigrationRequest uuid. */ + public uuid: string; + + /** + * Creates a new CancelSchemaMigrationRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns CancelSchemaMigrationRequest instance + */ + public static create(properties?: vtctldata.ICancelSchemaMigrationRequest): vtctldata.CancelSchemaMigrationRequest; + + /** + * Encodes the specified CancelSchemaMigrationRequest message. Does not implicitly {@link vtctldata.CancelSchemaMigrationRequest.verify|verify} messages. + * @param message CancelSchemaMigrationRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.ICancelSchemaMigrationRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified CancelSchemaMigrationRequest message, length delimited. Does not implicitly {@link vtctldata.CancelSchemaMigrationRequest.verify|verify} messages. + * @param message CancelSchemaMigrationRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.ICancelSchemaMigrationRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a CancelSchemaMigrationRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns CancelSchemaMigrationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.CancelSchemaMigrationRequest; + + /** + * Decodes a CancelSchemaMigrationRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns CancelSchemaMigrationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.CancelSchemaMigrationRequest; + + /** + * Verifies a CancelSchemaMigrationRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a CancelSchemaMigrationRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns CancelSchemaMigrationRequest + */ + public static fromObject(object: { [k: string]: any }): vtctldata.CancelSchemaMigrationRequest; + + /** + * Creates a plain object from a CancelSchemaMigrationRequest message. Also converts values to other types if specified. + * @param message CancelSchemaMigrationRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.CancelSchemaMigrationRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this CancelSchemaMigrationRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for CancelSchemaMigrationRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a CancelSchemaMigrationResponse. */ + interface ICancelSchemaMigrationResponse { + + /** CancelSchemaMigrationResponse rows_affected_by_shard */ + rows_affected_by_shard?: ({ [k: string]: (number|Long) }|null); + } + + /** Represents a CancelSchemaMigrationResponse. */ + class CancelSchemaMigrationResponse implements ICancelSchemaMigrationResponse { + + /** + * Constructs a new CancelSchemaMigrationResponse. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.ICancelSchemaMigrationResponse); + + /** CancelSchemaMigrationResponse rows_affected_by_shard. */ + public rows_affected_by_shard: { [k: string]: (number|Long) }; + + /** + * Creates a new CancelSchemaMigrationResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns CancelSchemaMigrationResponse instance + */ + public static create(properties?: vtctldata.ICancelSchemaMigrationResponse): vtctldata.CancelSchemaMigrationResponse; + + /** + * Encodes the specified CancelSchemaMigrationResponse message. Does not implicitly {@link vtctldata.CancelSchemaMigrationResponse.verify|verify} messages. + * @param message CancelSchemaMigrationResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.ICancelSchemaMigrationResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified CancelSchemaMigrationResponse message, length delimited. Does not implicitly {@link vtctldata.CancelSchemaMigrationResponse.verify|verify} messages. + * @param message CancelSchemaMigrationResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.ICancelSchemaMigrationResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a CancelSchemaMigrationResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns CancelSchemaMigrationResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.CancelSchemaMigrationResponse; + + /** + * Decodes a CancelSchemaMigrationResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns CancelSchemaMigrationResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.CancelSchemaMigrationResponse; + + /** + * Verifies a CancelSchemaMigrationResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a CancelSchemaMigrationResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns CancelSchemaMigrationResponse + */ + public static fromObject(object: { [k: string]: any }): vtctldata.CancelSchemaMigrationResponse; + + /** + * Creates a plain object from a CancelSchemaMigrationResponse message. Also converts values to other types if specified. + * @param message CancelSchemaMigrationResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.CancelSchemaMigrationResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this CancelSchemaMigrationResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for CancelSchemaMigrationResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a ChangeTabletTypeRequest. */ + interface IChangeTabletTypeRequest { + + /** ChangeTabletTypeRequest tablet_alias */ + tablet_alias?: (topodata.ITabletAlias|null); + + /** ChangeTabletTypeRequest db_type */ + db_type?: (topodata.TabletType|null); + + /** ChangeTabletTypeRequest dry_run */ + dry_run?: (boolean|null); + } + + /** Represents a ChangeTabletTypeRequest. */ + class ChangeTabletTypeRequest implements IChangeTabletTypeRequest { + + /** + * Constructs a new ChangeTabletTypeRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IChangeTabletTypeRequest); + + /** ChangeTabletTypeRequest tablet_alias. */ + public tablet_alias?: (topodata.ITabletAlias|null); + + /** ChangeTabletTypeRequest db_type. */ + public db_type: topodata.TabletType; + + /** ChangeTabletTypeRequest dry_run. */ + public dry_run: boolean; + + /** + * Creates a new ChangeTabletTypeRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns ChangeTabletTypeRequest instance + */ + public static create(properties?: vtctldata.IChangeTabletTypeRequest): vtctldata.ChangeTabletTypeRequest; + + /** + * Encodes the specified ChangeTabletTypeRequest message. Does not implicitly {@link vtctldata.ChangeTabletTypeRequest.verify|verify} messages. + * @param message ChangeTabletTypeRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IChangeTabletTypeRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ChangeTabletTypeRequest message, length delimited. Does not implicitly {@link vtctldata.ChangeTabletTypeRequest.verify|verify} messages. + * @param message ChangeTabletTypeRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IChangeTabletTypeRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a ChangeTabletTypeRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ChangeTabletTypeRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ChangeTabletTypeRequest; + + /** + * Decodes a ChangeTabletTypeRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ChangeTabletTypeRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ChangeTabletTypeRequest; + + /** + * Verifies a ChangeTabletTypeRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a ChangeTabletTypeRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ChangeTabletTypeRequest + */ + public static fromObject(object: { [k: string]: any }): vtctldata.ChangeTabletTypeRequest; + + /** + * Creates a plain object from a ChangeTabletTypeRequest message. Also converts values to other types if specified. + * @param message ChangeTabletTypeRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.ChangeTabletTypeRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ChangeTabletTypeRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ChangeTabletTypeRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a ChangeTabletTypeResponse. */ + interface IChangeTabletTypeResponse { + + /** ChangeTabletTypeResponse before_tablet */ + before_tablet?: (topodata.ITablet|null); + + /** ChangeTabletTypeResponse after_tablet */ + after_tablet?: (topodata.ITablet|null); + + /** ChangeTabletTypeResponse was_dry_run */ + was_dry_run?: (boolean|null); + } + + /** Represents a ChangeTabletTypeResponse. */ + class ChangeTabletTypeResponse implements IChangeTabletTypeResponse { + + /** + * Constructs a new ChangeTabletTypeResponse. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IChangeTabletTypeResponse); + + /** ChangeTabletTypeResponse before_tablet. */ + public before_tablet?: (topodata.ITablet|null); + + /** ChangeTabletTypeResponse after_tablet. */ + public after_tablet?: (topodata.ITablet|null); + + /** ChangeTabletTypeResponse was_dry_run. */ + public was_dry_run: boolean; + + /** + * Creates a new ChangeTabletTypeResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns ChangeTabletTypeResponse instance + */ + public static create(properties?: vtctldata.IChangeTabletTypeResponse): vtctldata.ChangeTabletTypeResponse; + + /** + * Encodes the specified ChangeTabletTypeResponse message. Does not implicitly {@link vtctldata.ChangeTabletTypeResponse.verify|verify} messages. + * @param message ChangeTabletTypeResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IChangeTabletTypeResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ChangeTabletTypeResponse message, length delimited. Does not implicitly {@link vtctldata.ChangeTabletTypeResponse.verify|verify} messages. + * @param message ChangeTabletTypeResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IChangeTabletTypeResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a ChangeTabletTypeResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ChangeTabletTypeResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ChangeTabletTypeResponse; + + /** + * Decodes a ChangeTabletTypeResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ChangeTabletTypeResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ChangeTabletTypeResponse; + + /** + * Verifies a ChangeTabletTypeResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a ChangeTabletTypeResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ChangeTabletTypeResponse + */ + public static fromObject(object: { [k: string]: any }): vtctldata.ChangeTabletTypeResponse; + + /** + * Creates a plain object from a ChangeTabletTypeResponse message. Also converts values to other types if specified. + * @param message ChangeTabletTypeResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.ChangeTabletTypeResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ChangeTabletTypeResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ChangeTabletTypeResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a CleanupSchemaMigrationRequest. */ + interface ICleanupSchemaMigrationRequest { + + /** CleanupSchemaMigrationRequest keyspace */ + keyspace?: (string|null); + + /** CleanupSchemaMigrationRequest uuid */ + uuid?: (string|null); + } + + /** Represents a CleanupSchemaMigrationRequest. */ + class CleanupSchemaMigrationRequest implements ICleanupSchemaMigrationRequest { + + /** + * Constructs a new CleanupSchemaMigrationRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.ICleanupSchemaMigrationRequest); + + /** CleanupSchemaMigrationRequest keyspace. */ + public keyspace: string; + + /** CleanupSchemaMigrationRequest uuid. */ + public uuid: string; + + /** + * Creates a new CleanupSchemaMigrationRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns CleanupSchemaMigrationRequest instance + */ + public static create(properties?: vtctldata.ICleanupSchemaMigrationRequest): vtctldata.CleanupSchemaMigrationRequest; + + /** + * Encodes the specified CleanupSchemaMigrationRequest message. Does not implicitly {@link vtctldata.CleanupSchemaMigrationRequest.verify|verify} messages. + * @param message CleanupSchemaMigrationRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.ICleanupSchemaMigrationRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified CleanupSchemaMigrationRequest message, length delimited. Does not implicitly {@link vtctldata.CleanupSchemaMigrationRequest.verify|verify} messages. + * @param message CleanupSchemaMigrationRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.ICleanupSchemaMigrationRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a CleanupSchemaMigrationRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns CleanupSchemaMigrationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.CleanupSchemaMigrationRequest; + + /** + * Decodes a CleanupSchemaMigrationRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns CleanupSchemaMigrationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.CleanupSchemaMigrationRequest; + + /** + * Verifies a CleanupSchemaMigrationRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a CleanupSchemaMigrationRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns CleanupSchemaMigrationRequest + */ + public static fromObject(object: { [k: string]: any }): vtctldata.CleanupSchemaMigrationRequest; + + /** + * Creates a plain object from a CleanupSchemaMigrationRequest message. Also converts values to other types if specified. + * @param message CleanupSchemaMigrationRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.CleanupSchemaMigrationRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this CleanupSchemaMigrationRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for CleanupSchemaMigrationRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a CleanupSchemaMigrationResponse. */ + interface ICleanupSchemaMigrationResponse { + + /** CleanupSchemaMigrationResponse rows_affected_by_shard */ + rows_affected_by_shard?: ({ [k: string]: (number|Long) }|null); + } + + /** Represents a CleanupSchemaMigrationResponse. */ + class CleanupSchemaMigrationResponse implements ICleanupSchemaMigrationResponse { + + /** + * Constructs a new CleanupSchemaMigrationResponse. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.ICleanupSchemaMigrationResponse); + + /** CleanupSchemaMigrationResponse rows_affected_by_shard. */ + public rows_affected_by_shard: { [k: string]: (number|Long) }; + + /** + * Creates a new CleanupSchemaMigrationResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns CleanupSchemaMigrationResponse instance + */ + public static create(properties?: vtctldata.ICleanupSchemaMigrationResponse): vtctldata.CleanupSchemaMigrationResponse; + + /** + * Encodes the specified CleanupSchemaMigrationResponse message. Does not implicitly {@link vtctldata.CleanupSchemaMigrationResponse.verify|verify} messages. + * @param message CleanupSchemaMigrationResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.ICleanupSchemaMigrationResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified CleanupSchemaMigrationResponse message, length delimited. Does not implicitly {@link vtctldata.CleanupSchemaMigrationResponse.verify|verify} messages. + * @param message CleanupSchemaMigrationResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.ICleanupSchemaMigrationResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a CleanupSchemaMigrationResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns CleanupSchemaMigrationResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.CleanupSchemaMigrationResponse; + + /** + * Decodes a CleanupSchemaMigrationResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns CleanupSchemaMigrationResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.CleanupSchemaMigrationResponse; + + /** + * Verifies a CleanupSchemaMigrationResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a CleanupSchemaMigrationResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns CleanupSchemaMigrationResponse + */ + public static fromObject(object: { [k: string]: any }): vtctldata.CleanupSchemaMigrationResponse; + + /** + * Creates a plain object from a CleanupSchemaMigrationResponse message. Also converts values to other types if specified. + * @param message CleanupSchemaMigrationResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.CleanupSchemaMigrationResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this CleanupSchemaMigrationResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for CleanupSchemaMigrationResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a CompleteSchemaMigrationRequest. */ + interface ICompleteSchemaMigrationRequest { + + /** CompleteSchemaMigrationRequest keyspace */ + keyspace?: (string|null); + + /** CompleteSchemaMigrationRequest uuid */ + uuid?: (string|null); + } + + /** Represents a CompleteSchemaMigrationRequest. */ + class CompleteSchemaMigrationRequest implements ICompleteSchemaMigrationRequest { + + /** + * Constructs a new CompleteSchemaMigrationRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.ICompleteSchemaMigrationRequest); + + /** CompleteSchemaMigrationRequest keyspace. */ + public keyspace: string; + + /** CompleteSchemaMigrationRequest uuid. */ + public uuid: string; + + /** + * Creates a new CompleteSchemaMigrationRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns CompleteSchemaMigrationRequest instance + */ + public static create(properties?: vtctldata.ICompleteSchemaMigrationRequest): vtctldata.CompleteSchemaMigrationRequest; + + /** + * Encodes the specified CompleteSchemaMigrationRequest message. Does not implicitly {@link vtctldata.CompleteSchemaMigrationRequest.verify|verify} messages. + * @param message CompleteSchemaMigrationRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.ICompleteSchemaMigrationRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified CompleteSchemaMigrationRequest message, length delimited. Does not implicitly {@link vtctldata.CompleteSchemaMigrationRequest.verify|verify} messages. + * @param message CompleteSchemaMigrationRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.ICompleteSchemaMigrationRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a CompleteSchemaMigrationRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns CompleteSchemaMigrationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.CompleteSchemaMigrationRequest; + + /** + * Decodes a CompleteSchemaMigrationRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns CompleteSchemaMigrationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.CompleteSchemaMigrationRequest; + + /** + * Verifies a CompleteSchemaMigrationRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a CompleteSchemaMigrationRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns CompleteSchemaMigrationRequest + */ + public static fromObject(object: { [k: string]: any }): vtctldata.CompleteSchemaMigrationRequest; + + /** + * Creates a plain object from a CompleteSchemaMigrationRequest message. Also converts values to other types if specified. + * @param message CompleteSchemaMigrationRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.CompleteSchemaMigrationRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this CompleteSchemaMigrationRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for CompleteSchemaMigrationRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a CompleteSchemaMigrationResponse. */ + interface ICompleteSchemaMigrationResponse { + + /** CompleteSchemaMigrationResponse rows_affected_by_shard */ + rows_affected_by_shard?: ({ [k: string]: (number|Long) }|null); + } + + /** Represents a CompleteSchemaMigrationResponse. */ + class CompleteSchemaMigrationResponse implements ICompleteSchemaMigrationResponse { + + /** + * Constructs a new CompleteSchemaMigrationResponse. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.ICompleteSchemaMigrationResponse); + + /** CompleteSchemaMigrationResponse rows_affected_by_shard. */ + public rows_affected_by_shard: { [k: string]: (number|Long) }; + + /** + * Creates a new CompleteSchemaMigrationResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns CompleteSchemaMigrationResponse instance + */ + public static create(properties?: vtctldata.ICompleteSchemaMigrationResponse): vtctldata.CompleteSchemaMigrationResponse; + + /** + * Encodes the specified CompleteSchemaMigrationResponse message. Does not implicitly {@link vtctldata.CompleteSchemaMigrationResponse.verify|verify} messages. + * @param message CompleteSchemaMigrationResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.ICompleteSchemaMigrationResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified CompleteSchemaMigrationResponse message, length delimited. Does not implicitly {@link vtctldata.CompleteSchemaMigrationResponse.verify|verify} messages. + * @param message CompleteSchemaMigrationResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.ICompleteSchemaMigrationResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a CompleteSchemaMigrationResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns CompleteSchemaMigrationResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.CompleteSchemaMigrationResponse; + + /** + * Decodes a CompleteSchemaMigrationResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns CompleteSchemaMigrationResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.CompleteSchemaMigrationResponse; + + /** + * Verifies a CompleteSchemaMigrationResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a CompleteSchemaMigrationResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns CompleteSchemaMigrationResponse + */ + public static fromObject(object: { [k: string]: any }): vtctldata.CompleteSchemaMigrationResponse; + + /** + * Creates a plain object from a CompleteSchemaMigrationResponse message. Also converts values to other types if specified. + * @param message CompleteSchemaMigrationResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.CompleteSchemaMigrationResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this CompleteSchemaMigrationResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for CompleteSchemaMigrationResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a CreateKeyspaceRequest. */ + interface ICreateKeyspaceRequest { + + /** CreateKeyspaceRequest name */ + name?: (string|null); + + /** CreateKeyspaceRequest force */ + force?: (boolean|null); + + /** CreateKeyspaceRequest allow_empty_v_schema */ + allow_empty_v_schema?: (boolean|null); + + /** CreateKeyspaceRequest served_froms */ + served_froms?: (topodata.Keyspace.IServedFrom[]|null); + + /** CreateKeyspaceRequest type */ + type?: (topodata.KeyspaceType|null); + + /** CreateKeyspaceRequest base_keyspace */ + base_keyspace?: (string|null); + + /** CreateKeyspaceRequest snapshot_time */ + snapshot_time?: (vttime.ITime|null); + + /** CreateKeyspaceRequest durability_policy */ + durability_policy?: (string|null); + + /** CreateKeyspaceRequest sidecar_db_name */ + sidecar_db_name?: (string|null); + } + + /** Represents a CreateKeyspaceRequest. */ + class CreateKeyspaceRequest implements ICreateKeyspaceRequest { + + /** + * Constructs a new CreateKeyspaceRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.ICreateKeyspaceRequest); + + /** CreateKeyspaceRequest name. */ + public name: string; + + /** CreateKeyspaceRequest force. */ + public force: boolean; + + /** CreateKeyspaceRequest allow_empty_v_schema. */ + public allow_empty_v_schema: boolean; + + /** CreateKeyspaceRequest served_froms. */ + public served_froms: topodata.Keyspace.IServedFrom[]; + + /** CreateKeyspaceRequest type. */ + public type: topodata.KeyspaceType; + + /** CreateKeyspaceRequest base_keyspace. */ + public base_keyspace: string; + + /** CreateKeyspaceRequest snapshot_time. */ + public snapshot_time?: (vttime.ITime|null); + + /** CreateKeyspaceRequest durability_policy. */ + public durability_policy: string; + + /** CreateKeyspaceRequest sidecar_db_name. */ + public sidecar_db_name: string; + + /** + * Creates a new CreateKeyspaceRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns CreateKeyspaceRequest instance + */ + public static create(properties?: vtctldata.ICreateKeyspaceRequest): vtctldata.CreateKeyspaceRequest; + + /** + * Encodes the specified CreateKeyspaceRequest message. Does not implicitly {@link vtctldata.CreateKeyspaceRequest.verify|verify} messages. + * @param message CreateKeyspaceRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.ICreateKeyspaceRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified CreateKeyspaceRequest message, length delimited. Does not implicitly {@link vtctldata.CreateKeyspaceRequest.verify|verify} messages. + * @param message CreateKeyspaceRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.ICreateKeyspaceRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a CreateKeyspaceRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns CreateKeyspaceRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.CreateKeyspaceRequest; + + /** + * Decodes a CreateKeyspaceRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns CreateKeyspaceRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.CreateKeyspaceRequest; + + /** + * Verifies a CreateKeyspaceRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a CreateKeyspaceRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns CreateKeyspaceRequest + */ + public static fromObject(object: { [k: string]: any }): vtctldata.CreateKeyspaceRequest; + + /** + * Creates a plain object from a CreateKeyspaceRequest message. Also converts values to other types if specified. + * @param message CreateKeyspaceRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.CreateKeyspaceRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this CreateKeyspaceRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for CreateKeyspaceRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a CreateKeyspaceResponse. */ + interface ICreateKeyspaceResponse { + + /** CreateKeyspaceResponse keyspace */ + keyspace?: (vtctldata.IKeyspace|null); + } + + /** Represents a CreateKeyspaceResponse. */ + class CreateKeyspaceResponse implements ICreateKeyspaceResponse { + + /** + * Constructs a new CreateKeyspaceResponse. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.ICreateKeyspaceResponse); + + /** CreateKeyspaceResponse keyspace. */ + public keyspace?: (vtctldata.IKeyspace|null); + + /** + * Creates a new CreateKeyspaceResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns CreateKeyspaceResponse instance + */ + public static create(properties?: vtctldata.ICreateKeyspaceResponse): vtctldata.CreateKeyspaceResponse; + + /** + * Encodes the specified CreateKeyspaceResponse message. Does not implicitly {@link vtctldata.CreateKeyspaceResponse.verify|verify} messages. + * @param message CreateKeyspaceResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.ICreateKeyspaceResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified CreateKeyspaceResponse message, length delimited. Does not implicitly {@link vtctldata.CreateKeyspaceResponse.verify|verify} messages. + * @param message CreateKeyspaceResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.ICreateKeyspaceResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a CreateKeyspaceResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns CreateKeyspaceResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.CreateKeyspaceResponse; + + /** + * Decodes a CreateKeyspaceResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns CreateKeyspaceResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.CreateKeyspaceResponse; + + /** + * Verifies a CreateKeyspaceResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a CreateKeyspaceResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns CreateKeyspaceResponse + */ + public static fromObject(object: { [k: string]: any }): vtctldata.CreateKeyspaceResponse; + + /** + * Creates a plain object from a CreateKeyspaceResponse message. Also converts values to other types if specified. + * @param message CreateKeyspaceResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.CreateKeyspaceResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this CreateKeyspaceResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for CreateKeyspaceResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a CreateShardRequest. */ + interface ICreateShardRequest { + + /** CreateShardRequest keyspace */ + keyspace?: (string|null); + + /** CreateShardRequest shard_name */ + shard_name?: (string|null); + + /** CreateShardRequest force */ + force?: (boolean|null); + + /** CreateShardRequest include_parent */ + include_parent?: (boolean|null); + } + + /** Represents a CreateShardRequest. */ + class CreateShardRequest implements ICreateShardRequest { + + /** + * Constructs a new CreateShardRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.ICreateShardRequest); + + /** CreateShardRequest keyspace. */ + public keyspace: string; + + /** CreateShardRequest shard_name. */ + public shard_name: string; + + /** CreateShardRequest force. */ + public force: boolean; + + /** CreateShardRequest include_parent. */ + public include_parent: boolean; + + /** + * Creates a new CreateShardRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns CreateShardRequest instance + */ + public static create(properties?: vtctldata.ICreateShardRequest): vtctldata.CreateShardRequest; + + /** + * Encodes the specified CreateShardRequest message. Does not implicitly {@link vtctldata.CreateShardRequest.verify|verify} messages. + * @param message CreateShardRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.ICreateShardRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified CreateShardRequest message, length delimited. Does not implicitly {@link vtctldata.CreateShardRequest.verify|verify} messages. + * @param message CreateShardRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.ICreateShardRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a CreateShardRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns CreateShardRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.CreateShardRequest; + + /** + * Decodes a CreateShardRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns CreateShardRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.CreateShardRequest; + + /** + * Verifies a CreateShardRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a CreateShardRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns CreateShardRequest + */ + public static fromObject(object: { [k: string]: any }): vtctldata.CreateShardRequest; + + /** + * Creates a plain object from a CreateShardRequest message. Also converts values to other types if specified. + * @param message CreateShardRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.CreateShardRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this CreateShardRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for CreateShardRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a CreateShardResponse. */ + interface ICreateShardResponse { + + /** CreateShardResponse keyspace */ + keyspace?: (vtctldata.IKeyspace|null); + + /** CreateShardResponse shard */ + shard?: (vtctldata.IShard|null); + + /** CreateShardResponse shard_already_exists */ + shard_already_exists?: (boolean|null); + } + + /** Represents a CreateShardResponse. */ + class CreateShardResponse implements ICreateShardResponse { + + /** + * Constructs a new CreateShardResponse. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.ICreateShardResponse); + + /** CreateShardResponse keyspace. */ + public keyspace?: (vtctldata.IKeyspace|null); + + /** CreateShardResponse shard. */ + public shard?: (vtctldata.IShard|null); + + /** CreateShardResponse shard_already_exists. */ + public shard_already_exists: boolean; + + /** + * Creates a new CreateShardResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns CreateShardResponse instance + */ + public static create(properties?: vtctldata.ICreateShardResponse): vtctldata.CreateShardResponse; + + /** + * Encodes the specified CreateShardResponse message. Does not implicitly {@link vtctldata.CreateShardResponse.verify|verify} messages. + * @param message CreateShardResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.ICreateShardResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified CreateShardResponse message, length delimited. Does not implicitly {@link vtctldata.CreateShardResponse.verify|verify} messages. + * @param message CreateShardResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.ICreateShardResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a CreateShardResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns CreateShardResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.CreateShardResponse; + + /** + * Decodes a CreateShardResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns CreateShardResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.CreateShardResponse; + + /** + * Verifies a CreateShardResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a CreateShardResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns CreateShardResponse + */ + public static fromObject(object: { [k: string]: any }): vtctldata.CreateShardResponse; + + /** + * Creates a plain object from a CreateShardResponse message. Also converts values to other types if specified. + * @param message CreateShardResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.CreateShardResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this CreateShardResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for CreateShardResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a DeleteCellInfoRequest. */ + interface IDeleteCellInfoRequest { + + /** DeleteCellInfoRequest name */ + name?: (string|null); + + /** DeleteCellInfoRequest force */ + force?: (boolean|null); + } + + /** Represents a DeleteCellInfoRequest. */ + class DeleteCellInfoRequest implements IDeleteCellInfoRequest { + + /** + * Constructs a new DeleteCellInfoRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IDeleteCellInfoRequest); + + /** DeleteCellInfoRequest name. */ + public name: string; + + /** DeleteCellInfoRequest force. */ + public force: boolean; + + /** + * Creates a new DeleteCellInfoRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns DeleteCellInfoRequest instance + */ + public static create(properties?: vtctldata.IDeleteCellInfoRequest): vtctldata.DeleteCellInfoRequest; + + /** + * Encodes the specified DeleteCellInfoRequest message. Does not implicitly {@link vtctldata.DeleteCellInfoRequest.verify|verify} messages. + * @param message DeleteCellInfoRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IDeleteCellInfoRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified DeleteCellInfoRequest message, length delimited. Does not implicitly {@link vtctldata.DeleteCellInfoRequest.verify|verify} messages. + * @param message DeleteCellInfoRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IDeleteCellInfoRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a DeleteCellInfoRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns DeleteCellInfoRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.DeleteCellInfoRequest; + + /** + * Decodes a DeleteCellInfoRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns DeleteCellInfoRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.DeleteCellInfoRequest; + + /** + * Verifies a DeleteCellInfoRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a DeleteCellInfoRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns DeleteCellInfoRequest + */ + public static fromObject(object: { [k: string]: any }): vtctldata.DeleteCellInfoRequest; + + /** + * Creates a plain object from a DeleteCellInfoRequest message. Also converts values to other types if specified. + * @param message DeleteCellInfoRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.DeleteCellInfoRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this DeleteCellInfoRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for DeleteCellInfoRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a DeleteCellInfoResponse. */ + interface IDeleteCellInfoResponse { + } + + /** Represents a DeleteCellInfoResponse. */ + class DeleteCellInfoResponse implements IDeleteCellInfoResponse { + + /** + * Constructs a new DeleteCellInfoResponse. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IDeleteCellInfoResponse); + + /** + * Creates a new DeleteCellInfoResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns DeleteCellInfoResponse instance + */ + public static create(properties?: vtctldata.IDeleteCellInfoResponse): vtctldata.DeleteCellInfoResponse; + + /** + * Encodes the specified DeleteCellInfoResponse message. Does not implicitly {@link vtctldata.DeleteCellInfoResponse.verify|verify} messages. + * @param message DeleteCellInfoResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IDeleteCellInfoResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified DeleteCellInfoResponse message, length delimited. Does not implicitly {@link vtctldata.DeleteCellInfoResponse.verify|verify} messages. + * @param message DeleteCellInfoResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IDeleteCellInfoResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a DeleteCellInfoResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns DeleteCellInfoResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.DeleteCellInfoResponse; + + /** + * Decodes a DeleteCellInfoResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns DeleteCellInfoResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.DeleteCellInfoResponse; + + /** + * Verifies a DeleteCellInfoResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a DeleteCellInfoResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns DeleteCellInfoResponse + */ + public static fromObject(object: { [k: string]: any }): vtctldata.DeleteCellInfoResponse; + + /** + * Creates a plain object from a DeleteCellInfoResponse message. Also converts values to other types if specified. + * @param message DeleteCellInfoResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.DeleteCellInfoResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this DeleteCellInfoResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for DeleteCellInfoResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a DeleteCellsAliasRequest. */ + interface IDeleteCellsAliasRequest { + + /** DeleteCellsAliasRequest name */ + name?: (string|null); + } + + /** Represents a DeleteCellsAliasRequest. */ + class DeleteCellsAliasRequest implements IDeleteCellsAliasRequest { + + /** + * Constructs a new DeleteCellsAliasRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IDeleteCellsAliasRequest); + + /** DeleteCellsAliasRequest name. */ + public name: string; + + /** + * Creates a new DeleteCellsAliasRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns DeleteCellsAliasRequest instance + */ + public static create(properties?: vtctldata.IDeleteCellsAliasRequest): vtctldata.DeleteCellsAliasRequest; + + /** + * Encodes the specified DeleteCellsAliasRequest message. Does not implicitly {@link vtctldata.DeleteCellsAliasRequest.verify|verify} messages. + * @param message DeleteCellsAliasRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IDeleteCellsAliasRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified DeleteCellsAliasRequest message, length delimited. Does not implicitly {@link vtctldata.DeleteCellsAliasRequest.verify|verify} messages. + * @param message DeleteCellsAliasRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IDeleteCellsAliasRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a DeleteCellsAliasRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns DeleteCellsAliasRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.DeleteCellsAliasRequest; + + /** + * Decodes a DeleteCellsAliasRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns DeleteCellsAliasRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.DeleteCellsAliasRequest; + + /** + * Verifies a DeleteCellsAliasRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a DeleteCellsAliasRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns DeleteCellsAliasRequest + */ + public static fromObject(object: { [k: string]: any }): vtctldata.DeleteCellsAliasRequest; + + /** + * Creates a plain object from a DeleteCellsAliasRequest message. Also converts values to other types if specified. + * @param message DeleteCellsAliasRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.DeleteCellsAliasRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this DeleteCellsAliasRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for DeleteCellsAliasRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a DeleteCellsAliasResponse. */ + interface IDeleteCellsAliasResponse { + } + + /** Represents a DeleteCellsAliasResponse. */ + class DeleteCellsAliasResponse implements IDeleteCellsAliasResponse { + + /** + * Constructs a new DeleteCellsAliasResponse. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IDeleteCellsAliasResponse); + + /** + * Creates a new DeleteCellsAliasResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns DeleteCellsAliasResponse instance + */ + public static create(properties?: vtctldata.IDeleteCellsAliasResponse): vtctldata.DeleteCellsAliasResponse; + + /** + * Encodes the specified DeleteCellsAliasResponse message. Does not implicitly {@link vtctldata.DeleteCellsAliasResponse.verify|verify} messages. + * @param message DeleteCellsAliasResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IDeleteCellsAliasResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified DeleteCellsAliasResponse message, length delimited. Does not implicitly {@link vtctldata.DeleteCellsAliasResponse.verify|verify} messages. + * @param message DeleteCellsAliasResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IDeleteCellsAliasResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a DeleteCellsAliasResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns DeleteCellsAliasResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.DeleteCellsAliasResponse; + + /** + * Decodes a DeleteCellsAliasResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns DeleteCellsAliasResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.DeleteCellsAliasResponse; + + /** + * Verifies a DeleteCellsAliasResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a DeleteCellsAliasResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns DeleteCellsAliasResponse + */ + public static fromObject(object: { [k: string]: any }): vtctldata.DeleteCellsAliasResponse; + + /** + * Creates a plain object from a DeleteCellsAliasResponse message. Also converts values to other types if specified. + * @param message DeleteCellsAliasResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.DeleteCellsAliasResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this DeleteCellsAliasResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for DeleteCellsAliasResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a DeleteKeyspaceRequest. */ + interface IDeleteKeyspaceRequest { + + /** DeleteKeyspaceRequest keyspace */ + keyspace?: (string|null); + + /** DeleteKeyspaceRequest recursive */ + recursive?: (boolean|null); + + /** DeleteKeyspaceRequest force */ + force?: (boolean|null); + } + + /** Represents a DeleteKeyspaceRequest. */ + class DeleteKeyspaceRequest implements IDeleteKeyspaceRequest { + + /** + * Constructs a new DeleteKeyspaceRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IDeleteKeyspaceRequest); + + /** DeleteKeyspaceRequest keyspace. */ + public keyspace: string; + + /** DeleteKeyspaceRequest recursive. */ + public recursive: boolean; + + /** DeleteKeyspaceRequest force. */ + public force: boolean; + + /** + * Creates a new DeleteKeyspaceRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns DeleteKeyspaceRequest instance + */ + public static create(properties?: vtctldata.IDeleteKeyspaceRequest): vtctldata.DeleteKeyspaceRequest; + + /** + * Encodes the specified DeleteKeyspaceRequest message. Does not implicitly {@link vtctldata.DeleteKeyspaceRequest.verify|verify} messages. + * @param message DeleteKeyspaceRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IDeleteKeyspaceRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified DeleteKeyspaceRequest message, length delimited. Does not implicitly {@link vtctldata.DeleteKeyspaceRequest.verify|verify} messages. + * @param message DeleteKeyspaceRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IDeleteKeyspaceRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a DeleteKeyspaceRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns DeleteKeyspaceRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.DeleteKeyspaceRequest; + + /** + * Decodes a DeleteKeyspaceRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns DeleteKeyspaceRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.DeleteKeyspaceRequest; + + /** + * Verifies a DeleteKeyspaceRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a DeleteKeyspaceRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns DeleteKeyspaceRequest + */ + public static fromObject(object: { [k: string]: any }): vtctldata.DeleteKeyspaceRequest; + + /** + * Creates a plain object from a DeleteKeyspaceRequest message. Also converts values to other types if specified. + * @param message DeleteKeyspaceRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.DeleteKeyspaceRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this DeleteKeyspaceRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for DeleteKeyspaceRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a DeleteKeyspaceResponse. */ + interface IDeleteKeyspaceResponse { + } + + /** Represents a DeleteKeyspaceResponse. */ + class DeleteKeyspaceResponse implements IDeleteKeyspaceResponse { + + /** + * Constructs a new DeleteKeyspaceResponse. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IDeleteKeyspaceResponse); + + /** + * Creates a new DeleteKeyspaceResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns DeleteKeyspaceResponse instance + */ + public static create(properties?: vtctldata.IDeleteKeyspaceResponse): vtctldata.DeleteKeyspaceResponse; + + /** + * Encodes the specified DeleteKeyspaceResponse message. Does not implicitly {@link vtctldata.DeleteKeyspaceResponse.verify|verify} messages. + * @param message DeleteKeyspaceResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IDeleteKeyspaceResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified DeleteKeyspaceResponse message, length delimited. Does not implicitly {@link vtctldata.DeleteKeyspaceResponse.verify|verify} messages. + * @param message DeleteKeyspaceResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IDeleteKeyspaceResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a DeleteKeyspaceResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns DeleteKeyspaceResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.DeleteKeyspaceResponse; + + /** + * Decodes a DeleteKeyspaceResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns DeleteKeyspaceResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.DeleteKeyspaceResponse; + + /** + * Verifies a DeleteKeyspaceResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a DeleteKeyspaceResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns DeleteKeyspaceResponse + */ + public static fromObject(object: { [k: string]: any }): vtctldata.DeleteKeyspaceResponse; + + /** + * Creates a plain object from a DeleteKeyspaceResponse message. Also converts values to other types if specified. + * @param message DeleteKeyspaceResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.DeleteKeyspaceResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this DeleteKeyspaceResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for DeleteKeyspaceResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a DeleteShardsRequest. */ + interface IDeleteShardsRequest { + + /** DeleteShardsRequest shards */ + shards?: (vtctldata.IShard[]|null); + + /** DeleteShardsRequest recursive */ + recursive?: (boolean|null); + + /** DeleteShardsRequest even_if_serving */ + even_if_serving?: (boolean|null); + + /** DeleteShardsRequest force */ + force?: (boolean|null); + } + + /** Represents a DeleteShardsRequest. */ + class DeleteShardsRequest implements IDeleteShardsRequest { + + /** + * Constructs a new DeleteShardsRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IDeleteShardsRequest); + + /** DeleteShardsRequest shards. */ + public shards: vtctldata.IShard[]; + + /** DeleteShardsRequest recursive. */ + public recursive: boolean; + + /** DeleteShardsRequest even_if_serving. */ + public even_if_serving: boolean; + + /** DeleteShardsRequest force. */ + public force: boolean; + + /** + * Creates a new DeleteShardsRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns DeleteShardsRequest instance + */ + public static create(properties?: vtctldata.IDeleteShardsRequest): vtctldata.DeleteShardsRequest; + + /** + * Encodes the specified DeleteShardsRequest message. Does not implicitly {@link vtctldata.DeleteShardsRequest.verify|verify} messages. + * @param message DeleteShardsRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IDeleteShardsRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified DeleteShardsRequest message, length delimited. Does not implicitly {@link vtctldata.DeleteShardsRequest.verify|verify} messages. + * @param message DeleteShardsRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IDeleteShardsRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a DeleteShardsRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns DeleteShardsRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.DeleteShardsRequest; + + /** + * Decodes a DeleteShardsRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns DeleteShardsRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.DeleteShardsRequest; + + /** + * Verifies a DeleteShardsRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a DeleteShardsRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns DeleteShardsRequest + */ + public static fromObject(object: { [k: string]: any }): vtctldata.DeleteShardsRequest; + + /** + * Creates a plain object from a DeleteShardsRequest message. Also converts values to other types if specified. + * @param message DeleteShardsRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.DeleteShardsRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this DeleteShardsRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for DeleteShardsRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a DeleteShardsResponse. */ + interface IDeleteShardsResponse { + } + + /** Represents a DeleteShardsResponse. */ + class DeleteShardsResponse implements IDeleteShardsResponse { + + /** + * Constructs a new DeleteShardsResponse. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IDeleteShardsResponse); + + /** + * Creates a new DeleteShardsResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns DeleteShardsResponse instance + */ + public static create(properties?: vtctldata.IDeleteShardsResponse): vtctldata.DeleteShardsResponse; + + /** + * Encodes the specified DeleteShardsResponse message. Does not implicitly {@link vtctldata.DeleteShardsResponse.verify|verify} messages. + * @param message DeleteShardsResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IDeleteShardsResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified DeleteShardsResponse message, length delimited. Does not implicitly {@link vtctldata.DeleteShardsResponse.verify|verify} messages. + * @param message DeleteShardsResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IDeleteShardsResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a DeleteShardsResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns DeleteShardsResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.DeleteShardsResponse; + + /** + * Decodes a DeleteShardsResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns DeleteShardsResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.DeleteShardsResponse; + + /** + * Verifies a DeleteShardsResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a DeleteShardsResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns DeleteShardsResponse + */ + public static fromObject(object: { [k: string]: any }): vtctldata.DeleteShardsResponse; + + /** + * Creates a plain object from a DeleteShardsResponse message. Also converts values to other types if specified. + * @param message DeleteShardsResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.DeleteShardsResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this DeleteShardsResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for DeleteShardsResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a DeleteSrvVSchemaRequest. */ + interface IDeleteSrvVSchemaRequest { + + /** DeleteSrvVSchemaRequest cell */ + cell?: (string|null); + } + + /** Represents a DeleteSrvVSchemaRequest. */ + class DeleteSrvVSchemaRequest implements IDeleteSrvVSchemaRequest { + + /** + * Constructs a new DeleteSrvVSchemaRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IDeleteSrvVSchemaRequest); + + /** DeleteSrvVSchemaRequest cell. */ + public cell: string; + + /** + * Creates a new DeleteSrvVSchemaRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns DeleteSrvVSchemaRequest instance + */ + public static create(properties?: vtctldata.IDeleteSrvVSchemaRequest): vtctldata.DeleteSrvVSchemaRequest; + + /** + * Encodes the specified DeleteSrvVSchemaRequest message. Does not implicitly {@link vtctldata.DeleteSrvVSchemaRequest.verify|verify} messages. + * @param message DeleteSrvVSchemaRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IDeleteSrvVSchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified DeleteSrvVSchemaRequest message, length delimited. Does not implicitly {@link vtctldata.DeleteSrvVSchemaRequest.verify|verify} messages. + * @param message DeleteSrvVSchemaRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IDeleteSrvVSchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a DeleteSrvVSchemaRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns DeleteSrvVSchemaRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.DeleteSrvVSchemaRequest; + + /** + * Decodes a DeleteSrvVSchemaRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns DeleteSrvVSchemaRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.DeleteSrvVSchemaRequest; + + /** + * Verifies a DeleteSrvVSchemaRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a DeleteSrvVSchemaRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns DeleteSrvVSchemaRequest + */ + public static fromObject(object: { [k: string]: any }): vtctldata.DeleteSrvVSchemaRequest; + + /** + * Creates a plain object from a DeleteSrvVSchemaRequest message. Also converts values to other types if specified. + * @param message DeleteSrvVSchemaRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.DeleteSrvVSchemaRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this DeleteSrvVSchemaRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for DeleteSrvVSchemaRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a DeleteSrvVSchemaResponse. */ + interface IDeleteSrvVSchemaResponse { + } + + /** Represents a DeleteSrvVSchemaResponse. */ + class DeleteSrvVSchemaResponse implements IDeleteSrvVSchemaResponse { + + /** + * Constructs a new DeleteSrvVSchemaResponse. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IDeleteSrvVSchemaResponse); + + /** + * Creates a new DeleteSrvVSchemaResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns DeleteSrvVSchemaResponse instance + */ + public static create(properties?: vtctldata.IDeleteSrvVSchemaResponse): vtctldata.DeleteSrvVSchemaResponse; + + /** + * Encodes the specified DeleteSrvVSchemaResponse message. Does not implicitly {@link vtctldata.DeleteSrvVSchemaResponse.verify|verify} messages. + * @param message DeleteSrvVSchemaResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IDeleteSrvVSchemaResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified DeleteSrvVSchemaResponse message, length delimited. Does not implicitly {@link vtctldata.DeleteSrvVSchemaResponse.verify|verify} messages. + * @param message DeleteSrvVSchemaResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IDeleteSrvVSchemaResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a DeleteSrvVSchemaResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns DeleteSrvVSchemaResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.DeleteSrvVSchemaResponse; + + /** + * Decodes a DeleteSrvVSchemaResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns DeleteSrvVSchemaResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.DeleteSrvVSchemaResponse; + + /** + * Verifies a DeleteSrvVSchemaResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a DeleteSrvVSchemaResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns DeleteSrvVSchemaResponse + */ + public static fromObject(object: { [k: string]: any }): vtctldata.DeleteSrvVSchemaResponse; + + /** + * Creates a plain object from a DeleteSrvVSchemaResponse message. Also converts values to other types if specified. + * @param message DeleteSrvVSchemaResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.DeleteSrvVSchemaResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this DeleteSrvVSchemaResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for DeleteSrvVSchemaResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a DeleteTabletsRequest. */ + interface IDeleteTabletsRequest { + + /** DeleteTabletsRequest tablet_aliases */ + tablet_aliases?: (topodata.ITabletAlias[]|null); + + /** DeleteTabletsRequest allow_primary */ + allow_primary?: (boolean|null); + } + + /** Represents a DeleteTabletsRequest. */ + class DeleteTabletsRequest implements IDeleteTabletsRequest { + + /** + * Constructs a new DeleteTabletsRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IDeleteTabletsRequest); + + /** DeleteTabletsRequest tablet_aliases. */ + public tablet_aliases: topodata.ITabletAlias[]; + + /** DeleteTabletsRequest allow_primary. */ + public allow_primary: boolean; + + /** + * Creates a new DeleteTabletsRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns DeleteTabletsRequest instance + */ + public static create(properties?: vtctldata.IDeleteTabletsRequest): vtctldata.DeleteTabletsRequest; + + /** + * Encodes the specified DeleteTabletsRequest message. Does not implicitly {@link vtctldata.DeleteTabletsRequest.verify|verify} messages. + * @param message DeleteTabletsRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IDeleteTabletsRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified DeleteTabletsRequest message, length delimited. Does not implicitly {@link vtctldata.DeleteTabletsRequest.verify|verify} messages. + * @param message DeleteTabletsRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IDeleteTabletsRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a DeleteTabletsRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns DeleteTabletsRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.DeleteTabletsRequest; + + /** + * Decodes a DeleteTabletsRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns DeleteTabletsRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.DeleteTabletsRequest; + + /** + * Verifies a DeleteTabletsRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a DeleteTabletsRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns DeleteTabletsRequest + */ + public static fromObject(object: { [k: string]: any }): vtctldata.DeleteTabletsRequest; + + /** + * Creates a plain object from a DeleteTabletsRequest message. Also converts values to other types if specified. + * @param message DeleteTabletsRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.DeleteTabletsRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this DeleteTabletsRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for DeleteTabletsRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a DeleteTabletsResponse. */ + interface IDeleteTabletsResponse { + } + + /** Represents a DeleteTabletsResponse. */ + class DeleteTabletsResponse implements IDeleteTabletsResponse { + + /** + * Constructs a new DeleteTabletsResponse. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IDeleteTabletsResponse); + + /** + * Creates a new DeleteTabletsResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns DeleteTabletsResponse instance + */ + public static create(properties?: vtctldata.IDeleteTabletsResponse): vtctldata.DeleteTabletsResponse; + + /** + * Encodes the specified DeleteTabletsResponse message. Does not implicitly {@link vtctldata.DeleteTabletsResponse.verify|verify} messages. + * @param message DeleteTabletsResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IDeleteTabletsResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified DeleteTabletsResponse message, length delimited. Does not implicitly {@link vtctldata.DeleteTabletsResponse.verify|verify} messages. + * @param message DeleteTabletsResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IDeleteTabletsResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a DeleteTabletsResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns DeleteTabletsResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.DeleteTabletsResponse; + + /** + * Decodes a DeleteTabletsResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns DeleteTabletsResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.DeleteTabletsResponse; + + /** + * Verifies a DeleteTabletsResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a DeleteTabletsResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns DeleteTabletsResponse + */ + public static fromObject(object: { [k: string]: any }): vtctldata.DeleteTabletsResponse; + + /** + * Creates a plain object from a DeleteTabletsResponse message. Also converts values to other types if specified. + * @param message DeleteTabletsResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.DeleteTabletsResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this DeleteTabletsResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for DeleteTabletsResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of an EmergencyReparentShardRequest. */ + interface IEmergencyReparentShardRequest { + + /** EmergencyReparentShardRequest keyspace */ + keyspace?: (string|null); + + /** EmergencyReparentShardRequest shard */ + shard?: (string|null); + + /** EmergencyReparentShardRequest new_primary */ + new_primary?: (topodata.ITabletAlias|null); + + /** EmergencyReparentShardRequest ignore_replicas */ + ignore_replicas?: (topodata.ITabletAlias[]|null); + + /** EmergencyReparentShardRequest wait_replicas_timeout */ + wait_replicas_timeout?: (vttime.IDuration|null); + + /** EmergencyReparentShardRequest prevent_cross_cell_promotion */ + prevent_cross_cell_promotion?: (boolean|null); + + /** EmergencyReparentShardRequest wait_for_all_tablets */ + wait_for_all_tablets?: (boolean|null); + } + + /** Represents an EmergencyReparentShardRequest. */ + class EmergencyReparentShardRequest implements IEmergencyReparentShardRequest { + + /** + * Constructs a new EmergencyReparentShardRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IEmergencyReparentShardRequest); + + /** EmergencyReparentShardRequest keyspace. */ + public keyspace: string; + + /** EmergencyReparentShardRequest shard. */ + public shard: string; + + /** EmergencyReparentShardRequest new_primary. */ + public new_primary?: (topodata.ITabletAlias|null); + + /** EmergencyReparentShardRequest ignore_replicas. */ + public ignore_replicas: topodata.ITabletAlias[]; + + /** EmergencyReparentShardRequest wait_replicas_timeout. */ + public wait_replicas_timeout?: (vttime.IDuration|null); + + /** EmergencyReparentShardRequest prevent_cross_cell_promotion. */ + public prevent_cross_cell_promotion: boolean; + + /** EmergencyReparentShardRequest wait_for_all_tablets. */ + public wait_for_all_tablets: boolean; + + /** + * Creates a new EmergencyReparentShardRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns EmergencyReparentShardRequest instance + */ + public static create(properties?: vtctldata.IEmergencyReparentShardRequest): vtctldata.EmergencyReparentShardRequest; + + /** + * Encodes the specified EmergencyReparentShardRequest message. Does not implicitly {@link vtctldata.EmergencyReparentShardRequest.verify|verify} messages. + * @param message EmergencyReparentShardRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IEmergencyReparentShardRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified EmergencyReparentShardRequest message, length delimited. Does not implicitly {@link vtctldata.EmergencyReparentShardRequest.verify|verify} messages. + * @param message EmergencyReparentShardRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IEmergencyReparentShardRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes an EmergencyReparentShardRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns EmergencyReparentShardRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.EmergencyReparentShardRequest; + + /** + * Decodes an EmergencyReparentShardRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns EmergencyReparentShardRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.EmergencyReparentShardRequest; + + /** + * Verifies an EmergencyReparentShardRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates an EmergencyReparentShardRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns EmergencyReparentShardRequest + */ + public static fromObject(object: { [k: string]: any }): vtctldata.EmergencyReparentShardRequest; + + /** + * Creates a plain object from an EmergencyReparentShardRequest message. Also converts values to other types if specified. + * @param message EmergencyReparentShardRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.EmergencyReparentShardRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this EmergencyReparentShardRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for EmergencyReparentShardRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of an EmergencyReparentShardResponse. */ + interface IEmergencyReparentShardResponse { + + /** EmergencyReparentShardResponse keyspace */ + keyspace?: (string|null); + + /** EmergencyReparentShardResponse shard */ + shard?: (string|null); + + /** EmergencyReparentShardResponse promoted_primary */ + promoted_primary?: (topodata.ITabletAlias|null); + + /** EmergencyReparentShardResponse events */ + events?: (logutil.IEvent[]|null); + } + + /** Represents an EmergencyReparentShardResponse. */ + class EmergencyReparentShardResponse implements IEmergencyReparentShardResponse { + + /** + * Constructs a new EmergencyReparentShardResponse. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IEmergencyReparentShardResponse); + + /** EmergencyReparentShardResponse keyspace. */ + public keyspace: string; + + /** EmergencyReparentShardResponse shard. */ + public shard: string; + + /** EmergencyReparentShardResponse promoted_primary. */ + public promoted_primary?: (topodata.ITabletAlias|null); + + /** EmergencyReparentShardResponse events. */ + public events: logutil.IEvent[]; + + /** + * Creates a new EmergencyReparentShardResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns EmergencyReparentShardResponse instance + */ + public static create(properties?: vtctldata.IEmergencyReparentShardResponse): vtctldata.EmergencyReparentShardResponse; + + /** + * Encodes the specified EmergencyReparentShardResponse message. Does not implicitly {@link vtctldata.EmergencyReparentShardResponse.verify|verify} messages. + * @param message EmergencyReparentShardResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IEmergencyReparentShardResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified EmergencyReparentShardResponse message, length delimited. Does not implicitly {@link vtctldata.EmergencyReparentShardResponse.verify|verify} messages. + * @param message EmergencyReparentShardResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IEmergencyReparentShardResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes an EmergencyReparentShardResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns EmergencyReparentShardResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.EmergencyReparentShardResponse; + + /** + * Decodes an EmergencyReparentShardResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns EmergencyReparentShardResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.EmergencyReparentShardResponse; + + /** + * Verifies an EmergencyReparentShardResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates an EmergencyReparentShardResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns EmergencyReparentShardResponse + */ + public static fromObject(object: { [k: string]: any }): vtctldata.EmergencyReparentShardResponse; + + /** + * Creates a plain object from an EmergencyReparentShardResponse message. Also converts values to other types if specified. + * @param message EmergencyReparentShardResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.EmergencyReparentShardResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this EmergencyReparentShardResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for EmergencyReparentShardResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of an ExecuteFetchAsAppRequest. */ + interface IExecuteFetchAsAppRequest { + + /** ExecuteFetchAsAppRequest tablet_alias */ + tablet_alias?: (topodata.ITabletAlias|null); + + /** ExecuteFetchAsAppRequest query */ + query?: (string|null); + + /** ExecuteFetchAsAppRequest max_rows */ + max_rows?: (number|Long|null); + + /** ExecuteFetchAsAppRequest use_pool */ + use_pool?: (boolean|null); + } + + /** Represents an ExecuteFetchAsAppRequest. */ + class ExecuteFetchAsAppRequest implements IExecuteFetchAsAppRequest { + + /** + * Constructs a new ExecuteFetchAsAppRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IExecuteFetchAsAppRequest); + + /** ExecuteFetchAsAppRequest tablet_alias. */ + public tablet_alias?: (topodata.ITabletAlias|null); + + /** ExecuteFetchAsAppRequest query. */ + public query: string; + + /** ExecuteFetchAsAppRequest max_rows. */ + public max_rows: (number|Long); + + /** ExecuteFetchAsAppRequest use_pool. */ + public use_pool: boolean; + + /** + * Creates a new ExecuteFetchAsAppRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns ExecuteFetchAsAppRequest instance + */ + public static create(properties?: vtctldata.IExecuteFetchAsAppRequest): vtctldata.ExecuteFetchAsAppRequest; + + /** + * Encodes the specified ExecuteFetchAsAppRequest message. Does not implicitly {@link vtctldata.ExecuteFetchAsAppRequest.verify|verify} messages. + * @param message ExecuteFetchAsAppRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IExecuteFetchAsAppRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ExecuteFetchAsAppRequest message, length delimited. Does not implicitly {@link vtctldata.ExecuteFetchAsAppRequest.verify|verify} messages. + * @param message ExecuteFetchAsAppRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IExecuteFetchAsAppRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes an ExecuteFetchAsAppRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ExecuteFetchAsAppRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ExecuteFetchAsAppRequest; + + /** + * Decodes an ExecuteFetchAsAppRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ExecuteFetchAsAppRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ExecuteFetchAsAppRequest; + + /** + * Verifies an ExecuteFetchAsAppRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates an ExecuteFetchAsAppRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ExecuteFetchAsAppRequest + */ + public static fromObject(object: { [k: string]: any }): vtctldata.ExecuteFetchAsAppRequest; + + /** + * Creates a plain object from an ExecuteFetchAsAppRequest message. Also converts values to other types if specified. + * @param message ExecuteFetchAsAppRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.ExecuteFetchAsAppRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ExecuteFetchAsAppRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ExecuteFetchAsAppRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of an ExecuteFetchAsAppResponse. */ + interface IExecuteFetchAsAppResponse { + + /** ExecuteFetchAsAppResponse result */ + result?: (query.IQueryResult|null); + } + + /** Represents an ExecuteFetchAsAppResponse. */ + class ExecuteFetchAsAppResponse implements IExecuteFetchAsAppResponse { + + /** + * Constructs a new ExecuteFetchAsAppResponse. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IExecuteFetchAsAppResponse); + + /** ExecuteFetchAsAppResponse result. */ + public result?: (query.IQueryResult|null); + + /** + * Creates a new ExecuteFetchAsAppResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns ExecuteFetchAsAppResponse instance + */ + public static create(properties?: vtctldata.IExecuteFetchAsAppResponse): vtctldata.ExecuteFetchAsAppResponse; + + /** + * Encodes the specified ExecuteFetchAsAppResponse message. Does not implicitly {@link vtctldata.ExecuteFetchAsAppResponse.verify|verify} messages. + * @param message ExecuteFetchAsAppResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IExecuteFetchAsAppResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ExecuteFetchAsAppResponse message, length delimited. Does not implicitly {@link vtctldata.ExecuteFetchAsAppResponse.verify|verify} messages. + * @param message ExecuteFetchAsAppResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IExecuteFetchAsAppResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes an ExecuteFetchAsAppResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ExecuteFetchAsAppResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ExecuteFetchAsAppResponse; + + /** + * Decodes an ExecuteFetchAsAppResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ExecuteFetchAsAppResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ExecuteFetchAsAppResponse; + + /** + * Verifies an ExecuteFetchAsAppResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates an ExecuteFetchAsAppResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ExecuteFetchAsAppResponse + */ + public static fromObject(object: { [k: string]: any }): vtctldata.ExecuteFetchAsAppResponse; + + /** + * Creates a plain object from an ExecuteFetchAsAppResponse message. Also converts values to other types if specified. + * @param message ExecuteFetchAsAppResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.ExecuteFetchAsAppResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ExecuteFetchAsAppResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ExecuteFetchAsAppResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of an ExecuteFetchAsDBARequest. */ + interface IExecuteFetchAsDBARequest { + + /** ExecuteFetchAsDBARequest tablet_alias */ + tablet_alias?: (topodata.ITabletAlias|null); + + /** ExecuteFetchAsDBARequest query */ + query?: (string|null); + + /** ExecuteFetchAsDBARequest max_rows */ + max_rows?: (number|Long|null); + + /** ExecuteFetchAsDBARequest disable_binlogs */ + disable_binlogs?: (boolean|null); + + /** ExecuteFetchAsDBARequest reload_schema */ + reload_schema?: (boolean|null); + } + + /** Represents an ExecuteFetchAsDBARequest. */ + class ExecuteFetchAsDBARequest implements IExecuteFetchAsDBARequest { + + /** + * Constructs a new ExecuteFetchAsDBARequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IExecuteFetchAsDBARequest); + + /** ExecuteFetchAsDBARequest tablet_alias. */ + public tablet_alias?: (topodata.ITabletAlias|null); + + /** ExecuteFetchAsDBARequest query. */ + public query: string; + + /** ExecuteFetchAsDBARequest max_rows. */ + public max_rows: (number|Long); + + /** ExecuteFetchAsDBARequest disable_binlogs. */ + public disable_binlogs: boolean; + + /** ExecuteFetchAsDBARequest reload_schema. */ + public reload_schema: boolean; + + /** + * Creates a new ExecuteFetchAsDBARequest instance using the specified properties. + * @param [properties] Properties to set + * @returns ExecuteFetchAsDBARequest instance + */ + public static create(properties?: vtctldata.IExecuteFetchAsDBARequest): vtctldata.ExecuteFetchAsDBARequest; + + /** + * Encodes the specified ExecuteFetchAsDBARequest message. Does not implicitly {@link vtctldata.ExecuteFetchAsDBARequest.verify|verify} messages. + * @param message ExecuteFetchAsDBARequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IExecuteFetchAsDBARequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ExecuteFetchAsDBARequest message, length delimited. Does not implicitly {@link vtctldata.ExecuteFetchAsDBARequest.verify|verify} messages. + * @param message ExecuteFetchAsDBARequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IExecuteFetchAsDBARequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes an ExecuteFetchAsDBARequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ExecuteFetchAsDBARequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ExecuteFetchAsDBARequest; + + /** + * Decodes an ExecuteFetchAsDBARequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ExecuteFetchAsDBARequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ExecuteFetchAsDBARequest; + + /** + * Verifies an ExecuteFetchAsDBARequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates an ExecuteFetchAsDBARequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ExecuteFetchAsDBARequest + */ + public static fromObject(object: { [k: string]: any }): vtctldata.ExecuteFetchAsDBARequest; + + /** + * Creates a plain object from an ExecuteFetchAsDBARequest message. Also converts values to other types if specified. + * @param message ExecuteFetchAsDBARequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.ExecuteFetchAsDBARequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ExecuteFetchAsDBARequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ExecuteFetchAsDBARequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of an ExecuteFetchAsDBAResponse. */ + interface IExecuteFetchAsDBAResponse { + + /** ExecuteFetchAsDBAResponse result */ + result?: (query.IQueryResult|null); + } + + /** Represents an ExecuteFetchAsDBAResponse. */ + class ExecuteFetchAsDBAResponse implements IExecuteFetchAsDBAResponse { + + /** + * Constructs a new ExecuteFetchAsDBAResponse. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IExecuteFetchAsDBAResponse); + + /** ExecuteFetchAsDBAResponse result. */ + public result?: (query.IQueryResult|null); + + /** + * Creates a new ExecuteFetchAsDBAResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns ExecuteFetchAsDBAResponse instance + */ + public static create(properties?: vtctldata.IExecuteFetchAsDBAResponse): vtctldata.ExecuteFetchAsDBAResponse; + + /** + * Encodes the specified ExecuteFetchAsDBAResponse message. Does not implicitly {@link vtctldata.ExecuteFetchAsDBAResponse.verify|verify} messages. + * @param message ExecuteFetchAsDBAResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IExecuteFetchAsDBAResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ExecuteFetchAsDBAResponse message, length delimited. Does not implicitly {@link vtctldata.ExecuteFetchAsDBAResponse.verify|verify} messages. + * @param message ExecuteFetchAsDBAResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IExecuteFetchAsDBAResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes an ExecuteFetchAsDBAResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ExecuteFetchAsDBAResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ExecuteFetchAsDBAResponse; + + /** + * Decodes an ExecuteFetchAsDBAResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ExecuteFetchAsDBAResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ExecuteFetchAsDBAResponse; + + /** + * Verifies an ExecuteFetchAsDBAResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); - /** Stream binlog_source. */ - public binlog_source?: (binlogdata.IBinlogSource|null); + /** + * Creates an ExecuteFetchAsDBAResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ExecuteFetchAsDBAResponse + */ + public static fromObject(object: { [k: string]: any }): vtctldata.ExecuteFetchAsDBAResponse; - /** Stream position. */ - public position: string; + /** + * Creates a plain object from an ExecuteFetchAsDBAResponse message. Also converts values to other types if specified. + * @param message ExecuteFetchAsDBAResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.ExecuteFetchAsDBAResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; - /** Stream stop_position. */ - public stop_position: string; + /** + * Converts this ExecuteFetchAsDBAResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; - /** Stream state. */ - public state: string; + /** + * Gets the default type url for ExecuteFetchAsDBAResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } - /** Stream db_name. */ - public db_name: string; + /** Properties of an ExecuteHookRequest. */ + interface IExecuteHookRequest { - /** Stream transaction_timestamp. */ - public transaction_timestamp?: (vttime.ITime|null); + /** ExecuteHookRequest tablet_alias */ + tablet_alias?: (topodata.ITabletAlias|null); - /** Stream time_updated. */ - public time_updated?: (vttime.ITime|null); + /** ExecuteHookRequest tablet_hook_request */ + tablet_hook_request?: (tabletmanagerdata.IExecuteHookRequest|null); + } - /** Stream message. */ - public message: string; + /** Represents an ExecuteHookRequest. */ + class ExecuteHookRequest implements IExecuteHookRequest { - /** Stream copy_states. */ - public copy_states: vtctldata.Workflow.Stream.ICopyState[]; + /** + * Constructs a new ExecuteHookRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IExecuteHookRequest); - /** Stream logs. */ - public logs: vtctldata.Workflow.Stream.ILog[]; + /** ExecuteHookRequest tablet_alias. */ + public tablet_alias?: (topodata.ITabletAlias|null); - /** Stream log_fetch_error. */ - public log_fetch_error: string; + /** ExecuteHookRequest tablet_hook_request. */ + public tablet_hook_request?: (tabletmanagerdata.IExecuteHookRequest|null); - /** Stream tags. */ - public tags: string[]; + /** + * Creates a new ExecuteHookRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns ExecuteHookRequest instance + */ + public static create(properties?: vtctldata.IExecuteHookRequest): vtctldata.ExecuteHookRequest; - /** - * Creates a new Stream instance using the specified properties. - * @param [properties] Properties to set - * @returns Stream instance - */ - public static create(properties?: vtctldata.Workflow.IStream): vtctldata.Workflow.Stream; + /** + * Encodes the specified ExecuteHookRequest message. Does not implicitly {@link vtctldata.ExecuteHookRequest.verify|verify} messages. + * @param message ExecuteHookRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IExecuteHookRequest, writer?: $protobuf.Writer): $protobuf.Writer; - /** - * Encodes the specified Stream message. Does not implicitly {@link vtctldata.Workflow.Stream.verify|verify} messages. - * @param message Stream message or plain object to encode - * @param [writer] Writer to encode to - * @returns Writer - */ - public static encode(message: vtctldata.Workflow.IStream, writer?: $protobuf.Writer): $protobuf.Writer; + /** + * Encodes the specified ExecuteHookRequest message, length delimited. Does not implicitly {@link vtctldata.ExecuteHookRequest.verify|verify} messages. + * @param message ExecuteHookRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IExecuteHookRequest, writer?: $protobuf.Writer): $protobuf.Writer; - /** - * Encodes the specified Stream message, length delimited. Does not implicitly {@link vtctldata.Workflow.Stream.verify|verify} messages. - * @param message Stream message or plain object to encode - * @param [writer] Writer to encode to - * @returns Writer - */ - public static encodeDelimited(message: vtctldata.Workflow.IStream, writer?: $protobuf.Writer): $protobuf.Writer; + /** + * Decodes an ExecuteHookRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ExecuteHookRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ExecuteHookRequest; - /** - * Decodes a Stream message from the specified reader or buffer. - * @param reader Reader or buffer to decode from - * @param [length] Message length if known beforehand - * @returns Stream - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.Workflow.Stream; + /** + * Decodes an ExecuteHookRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ExecuteHookRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ExecuteHookRequest; - /** - * Decodes a Stream message from the specified reader or buffer, length delimited. - * @param reader Reader or buffer to decode from - * @returns Stream - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.Workflow.Stream; + /** + * Verifies an ExecuteHookRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); - /** - * Verifies a Stream message. - * @param message Plain object to verify - * @returns `null` if valid, otherwise the reason why it is not - */ - public static verify(message: { [k: string]: any }): (string|null); + /** + * Creates an ExecuteHookRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ExecuteHookRequest + */ + public static fromObject(object: { [k: string]: any }): vtctldata.ExecuteHookRequest; - /** - * Creates a Stream message from a plain object. Also converts values to their respective internal types. - * @param object Plain object - * @returns Stream - */ - public static fromObject(object: { [k: string]: any }): vtctldata.Workflow.Stream; + /** + * Creates a plain object from an ExecuteHookRequest message. Also converts values to other types if specified. + * @param message ExecuteHookRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.ExecuteHookRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; - /** - * Creates a plain object from a Stream message. Also converts values to other types if specified. - * @param message Stream - * @param [options] Conversion options - * @returns Plain object - */ - public static toObject(message: vtctldata.Workflow.Stream, options?: $protobuf.IConversionOptions): { [k: string]: any }; + /** + * Converts this ExecuteHookRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; - /** - * Converts this Stream to JSON. - * @returns JSON object - */ - public toJSON(): { [k: string]: any }; + /** + * Gets the default type url for ExecuteHookRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } - /** - * Gets the default type url for Stream - * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns The default type url - */ - public static getTypeUrl(typeUrlPrefix?: string): string; - } + /** Properties of an ExecuteHookResponse. */ + interface IExecuteHookResponse { - namespace Stream { + /** ExecuteHookResponse hook_result */ + hook_result?: (tabletmanagerdata.IExecuteHookResponse|null); + } - /** Properties of a CopyState. */ - interface ICopyState { + /** Represents an ExecuteHookResponse. */ + class ExecuteHookResponse implements IExecuteHookResponse { - /** CopyState table */ - table?: (string|null); + /** + * Constructs a new ExecuteHookResponse. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IExecuteHookResponse); - /** CopyState last_pk */ - last_pk?: (string|null); - } + /** ExecuteHookResponse hook_result. */ + public hook_result?: (tabletmanagerdata.IExecuteHookResponse|null); - /** Represents a CopyState. */ - class CopyState implements ICopyState { + /** + * Creates a new ExecuteHookResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns ExecuteHookResponse instance + */ + public static create(properties?: vtctldata.IExecuteHookResponse): vtctldata.ExecuteHookResponse; - /** - * Constructs a new CopyState. - * @param [properties] Properties to set - */ - constructor(properties?: vtctldata.Workflow.Stream.ICopyState); + /** + * Encodes the specified ExecuteHookResponse message. Does not implicitly {@link vtctldata.ExecuteHookResponse.verify|verify} messages. + * @param message ExecuteHookResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IExecuteHookResponse, writer?: $protobuf.Writer): $protobuf.Writer; - /** CopyState table. */ - public table: string; + /** + * Encodes the specified ExecuteHookResponse message, length delimited. Does not implicitly {@link vtctldata.ExecuteHookResponse.verify|verify} messages. + * @param message ExecuteHookResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IExecuteHookResponse, writer?: $protobuf.Writer): $protobuf.Writer; - /** CopyState last_pk. */ - public last_pk: string; + /** + * Decodes an ExecuteHookResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ExecuteHookResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ExecuteHookResponse; - /** - * Creates a new CopyState instance using the specified properties. - * @param [properties] Properties to set - * @returns CopyState instance - */ - public static create(properties?: vtctldata.Workflow.Stream.ICopyState): vtctldata.Workflow.Stream.CopyState; + /** + * Decodes an ExecuteHookResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ExecuteHookResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ExecuteHookResponse; - /** - * Encodes the specified CopyState message. Does not implicitly {@link vtctldata.Workflow.Stream.CopyState.verify|verify} messages. - * @param message CopyState message or plain object to encode - * @param [writer] Writer to encode to - * @returns Writer - */ - public static encode(message: vtctldata.Workflow.Stream.ICopyState, writer?: $protobuf.Writer): $protobuf.Writer; + /** + * Verifies an ExecuteHookResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); - /** - * Encodes the specified CopyState message, length delimited. Does not implicitly {@link vtctldata.Workflow.Stream.CopyState.verify|verify} messages. - * @param message CopyState message or plain object to encode - * @param [writer] Writer to encode to - * @returns Writer - */ - public static encodeDelimited(message: vtctldata.Workflow.Stream.ICopyState, writer?: $protobuf.Writer): $protobuf.Writer; + /** + * Creates an ExecuteHookResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ExecuteHookResponse + */ + public static fromObject(object: { [k: string]: any }): vtctldata.ExecuteHookResponse; - /** - * Decodes a CopyState message from the specified reader or buffer. - * @param reader Reader or buffer to decode from - * @param [length] Message length if known beforehand - * @returns CopyState - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.Workflow.Stream.CopyState; + /** + * Creates a plain object from an ExecuteHookResponse message. Also converts values to other types if specified. + * @param message ExecuteHookResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.ExecuteHookResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; - /** - * Decodes a CopyState message from the specified reader or buffer, length delimited. - * @param reader Reader or buffer to decode from - * @returns CopyState - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.Workflow.Stream.CopyState; + /** + * Converts this ExecuteHookResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; - /** - * Verifies a CopyState message. - * @param message Plain object to verify - * @returns `null` if valid, otherwise the reason why it is not - */ - public static verify(message: { [k: string]: any }): (string|null); + /** + * Gets the default type url for ExecuteHookResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } - /** - * Creates a CopyState message from a plain object. Also converts values to their respective internal types. - * @param object Plain object - * @returns CopyState - */ - public static fromObject(object: { [k: string]: any }): vtctldata.Workflow.Stream.CopyState; + /** Properties of a FindAllShardsInKeyspaceRequest. */ + interface IFindAllShardsInKeyspaceRequest { - /** - * Creates a plain object from a CopyState message. Also converts values to other types if specified. - * @param message CopyState - * @param [options] Conversion options - * @returns Plain object - */ - public static toObject(message: vtctldata.Workflow.Stream.CopyState, options?: $protobuf.IConversionOptions): { [k: string]: any }; + /** FindAllShardsInKeyspaceRequest keyspace */ + keyspace?: (string|null); + } - /** - * Converts this CopyState to JSON. - * @returns JSON object - */ - public toJSON(): { [k: string]: any }; + /** Represents a FindAllShardsInKeyspaceRequest. */ + class FindAllShardsInKeyspaceRequest implements IFindAllShardsInKeyspaceRequest { + + /** + * Constructs a new FindAllShardsInKeyspaceRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IFindAllShardsInKeyspaceRequest); + + /** FindAllShardsInKeyspaceRequest keyspace. */ + public keyspace: string; + + /** + * Creates a new FindAllShardsInKeyspaceRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns FindAllShardsInKeyspaceRequest instance + */ + public static create(properties?: vtctldata.IFindAllShardsInKeyspaceRequest): vtctldata.FindAllShardsInKeyspaceRequest; + + /** + * Encodes the specified FindAllShardsInKeyspaceRequest message. Does not implicitly {@link vtctldata.FindAllShardsInKeyspaceRequest.verify|verify} messages. + * @param message FindAllShardsInKeyspaceRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IFindAllShardsInKeyspaceRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified FindAllShardsInKeyspaceRequest message, length delimited. Does not implicitly {@link vtctldata.FindAllShardsInKeyspaceRequest.verify|verify} messages. + * @param message FindAllShardsInKeyspaceRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IFindAllShardsInKeyspaceRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a FindAllShardsInKeyspaceRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns FindAllShardsInKeyspaceRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.FindAllShardsInKeyspaceRequest; + + /** + * Decodes a FindAllShardsInKeyspaceRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns FindAllShardsInKeyspaceRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.FindAllShardsInKeyspaceRequest; + + /** + * Verifies a FindAllShardsInKeyspaceRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a FindAllShardsInKeyspaceRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns FindAllShardsInKeyspaceRequest + */ + public static fromObject(object: { [k: string]: any }): vtctldata.FindAllShardsInKeyspaceRequest; + + /** + * Creates a plain object from a FindAllShardsInKeyspaceRequest message. Also converts values to other types if specified. + * @param message FindAllShardsInKeyspaceRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.FindAllShardsInKeyspaceRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this FindAllShardsInKeyspaceRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for FindAllShardsInKeyspaceRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a FindAllShardsInKeyspaceResponse. */ + interface IFindAllShardsInKeyspaceResponse { + + /** FindAllShardsInKeyspaceResponse shards */ + shards?: ({ [k: string]: vtctldata.IShard }|null); + } + + /** Represents a FindAllShardsInKeyspaceResponse. */ + class FindAllShardsInKeyspaceResponse implements IFindAllShardsInKeyspaceResponse { + + /** + * Constructs a new FindAllShardsInKeyspaceResponse. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IFindAllShardsInKeyspaceResponse); + + /** FindAllShardsInKeyspaceResponse shards. */ + public shards: { [k: string]: vtctldata.IShard }; + + /** + * Creates a new FindAllShardsInKeyspaceResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns FindAllShardsInKeyspaceResponse instance + */ + public static create(properties?: vtctldata.IFindAllShardsInKeyspaceResponse): vtctldata.FindAllShardsInKeyspaceResponse; + + /** + * Encodes the specified FindAllShardsInKeyspaceResponse message. Does not implicitly {@link vtctldata.FindAllShardsInKeyspaceResponse.verify|verify} messages. + * @param message FindAllShardsInKeyspaceResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IFindAllShardsInKeyspaceResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified FindAllShardsInKeyspaceResponse message, length delimited. Does not implicitly {@link vtctldata.FindAllShardsInKeyspaceResponse.verify|verify} messages. + * @param message FindAllShardsInKeyspaceResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IFindAllShardsInKeyspaceResponse, writer?: $protobuf.Writer): $protobuf.Writer; - /** - * Gets the default type url for CopyState - * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns The default type url - */ - public static getTypeUrl(typeUrlPrefix?: string): string; - } + /** + * Decodes a FindAllShardsInKeyspaceResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns FindAllShardsInKeyspaceResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.FindAllShardsInKeyspaceResponse; - /** Properties of a Log. */ - interface ILog { + /** + * Decodes a FindAllShardsInKeyspaceResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns FindAllShardsInKeyspaceResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.FindAllShardsInKeyspaceResponse; - /** Log id */ - id?: (number|Long|null); + /** + * Verifies a FindAllShardsInKeyspaceResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); - /** Log stream_id */ - stream_id?: (number|Long|null); + /** + * Creates a FindAllShardsInKeyspaceResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns FindAllShardsInKeyspaceResponse + */ + public static fromObject(object: { [k: string]: any }): vtctldata.FindAllShardsInKeyspaceResponse; - /** Log type */ - type?: (string|null); + /** + * Creates a plain object from a FindAllShardsInKeyspaceResponse message. Also converts values to other types if specified. + * @param message FindAllShardsInKeyspaceResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.FindAllShardsInKeyspaceResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; - /** Log state */ - state?: (string|null); + /** + * Converts this FindAllShardsInKeyspaceResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; - /** Log created_at */ - created_at?: (vttime.ITime|null); + /** + * Gets the default type url for FindAllShardsInKeyspaceResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } - /** Log updated_at */ - updated_at?: (vttime.ITime|null); + /** Properties of a GetBackupsRequest. */ + interface IGetBackupsRequest { - /** Log message */ - message?: (string|null); + /** GetBackupsRequest keyspace */ + keyspace?: (string|null); - /** Log count */ - count?: (number|Long|null); - } + /** GetBackupsRequest shard */ + shard?: (string|null); - /** Represents a Log. */ - class Log implements ILog { + /** GetBackupsRequest limit */ + limit?: (number|null); - /** - * Constructs a new Log. - * @param [properties] Properties to set - */ - constructor(properties?: vtctldata.Workflow.Stream.ILog); + /** GetBackupsRequest detailed */ + detailed?: (boolean|null); - /** Log id. */ - public id: (number|Long); + /** GetBackupsRequest detailed_limit */ + detailed_limit?: (number|null); + } - /** Log stream_id. */ - public stream_id: (number|Long); + /** Represents a GetBackupsRequest. */ + class GetBackupsRequest implements IGetBackupsRequest { - /** Log type. */ - public type: string; + /** + * Constructs a new GetBackupsRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IGetBackupsRequest); - /** Log state. */ - public state: string; + /** GetBackupsRequest keyspace. */ + public keyspace: string; - /** Log created_at. */ - public created_at?: (vttime.ITime|null); + /** GetBackupsRequest shard. */ + public shard: string; - /** Log updated_at. */ - public updated_at?: (vttime.ITime|null); + /** GetBackupsRequest limit. */ + public limit: number; - /** Log message. */ - public message: string; + /** GetBackupsRequest detailed. */ + public detailed: boolean; - /** Log count. */ - public count: (number|Long); + /** GetBackupsRequest detailed_limit. */ + public detailed_limit: number; - /** - * Creates a new Log instance using the specified properties. - * @param [properties] Properties to set - * @returns Log instance - */ - public static create(properties?: vtctldata.Workflow.Stream.ILog): vtctldata.Workflow.Stream.Log; + /** + * Creates a new GetBackupsRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns GetBackupsRequest instance + */ + public static create(properties?: vtctldata.IGetBackupsRequest): vtctldata.GetBackupsRequest; - /** - * Encodes the specified Log message. Does not implicitly {@link vtctldata.Workflow.Stream.Log.verify|verify} messages. - * @param message Log message or plain object to encode - * @param [writer] Writer to encode to - * @returns Writer - */ - public static encode(message: vtctldata.Workflow.Stream.ILog, writer?: $protobuf.Writer): $protobuf.Writer; + /** + * Encodes the specified GetBackupsRequest message. Does not implicitly {@link vtctldata.GetBackupsRequest.verify|verify} messages. + * @param message GetBackupsRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IGetBackupsRequest, writer?: $protobuf.Writer): $protobuf.Writer; - /** - * Encodes the specified Log message, length delimited. Does not implicitly {@link vtctldata.Workflow.Stream.Log.verify|verify} messages. - * @param message Log message or plain object to encode - * @param [writer] Writer to encode to - * @returns Writer - */ - public static encodeDelimited(message: vtctldata.Workflow.Stream.ILog, writer?: $protobuf.Writer): $protobuf.Writer; + /** + * Encodes the specified GetBackupsRequest message, length delimited. Does not implicitly {@link vtctldata.GetBackupsRequest.verify|verify} messages. + * @param message GetBackupsRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IGetBackupsRequest, writer?: $protobuf.Writer): $protobuf.Writer; - /** - * Decodes a Log message from the specified reader or buffer. - * @param reader Reader or buffer to decode from - * @param [length] Message length if known beforehand - * @returns Log - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.Workflow.Stream.Log; + /** + * Decodes a GetBackupsRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns GetBackupsRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetBackupsRequest; - /** - * Decodes a Log message from the specified reader or buffer, length delimited. - * @param reader Reader or buffer to decode from - * @returns Log - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.Workflow.Stream.Log; + /** + * Decodes a GetBackupsRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns GetBackupsRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetBackupsRequest; - /** - * Verifies a Log message. - * @param message Plain object to verify - * @returns `null` if valid, otherwise the reason why it is not - */ - public static verify(message: { [k: string]: any }): (string|null); + /** + * Verifies a GetBackupsRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); - /** - * Creates a Log message from a plain object. Also converts values to their respective internal types. - * @param object Plain object - * @returns Log - */ - public static fromObject(object: { [k: string]: any }): vtctldata.Workflow.Stream.Log; + /** + * Creates a GetBackupsRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns GetBackupsRequest + */ + public static fromObject(object: { [k: string]: any }): vtctldata.GetBackupsRequest; - /** - * Creates a plain object from a Log message. Also converts values to other types if specified. - * @param message Log - * @param [options] Conversion options - * @returns Plain object - */ - public static toObject(message: vtctldata.Workflow.Stream.Log, options?: $protobuf.IConversionOptions): { [k: string]: any }; + /** + * Creates a plain object from a GetBackupsRequest message. Also converts values to other types if specified. + * @param message GetBackupsRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.GetBackupsRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; - /** - * Converts this Log to JSON. - * @returns JSON object - */ - public toJSON(): { [k: string]: any }; + /** + * Converts this GetBackupsRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; - /** - * Gets the default type url for Log - * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns The default type url - */ - public static getTypeUrl(typeUrlPrefix?: string): string; - } - } + /** + * Gets the default type url for GetBackupsRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of an AddCellInfoRequest. */ - interface IAddCellInfoRequest { - - /** AddCellInfoRequest name */ - name?: (string|null); + /** Properties of a GetBackupsResponse. */ + interface IGetBackupsResponse { - /** AddCellInfoRequest cell_info */ - cell_info?: (topodata.ICellInfo|null); + /** GetBackupsResponse backups */ + backups?: (mysqlctl.IBackupInfo[]|null); } - /** Represents an AddCellInfoRequest. */ - class AddCellInfoRequest implements IAddCellInfoRequest { + /** Represents a GetBackupsResponse. */ + class GetBackupsResponse implements IGetBackupsResponse { /** - * Constructs a new AddCellInfoRequest. + * Constructs a new GetBackupsResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IAddCellInfoRequest); - - /** AddCellInfoRequest name. */ - public name: string; + constructor(properties?: vtctldata.IGetBackupsResponse); - /** AddCellInfoRequest cell_info. */ - public cell_info?: (topodata.ICellInfo|null); + /** GetBackupsResponse backups. */ + public backups: mysqlctl.IBackupInfo[]; /** - * Creates a new AddCellInfoRequest instance using the specified properties. + * Creates a new GetBackupsResponse instance using the specified properties. * @param [properties] Properties to set - * @returns AddCellInfoRequest instance + * @returns GetBackupsResponse instance */ - public static create(properties?: vtctldata.IAddCellInfoRequest): vtctldata.AddCellInfoRequest; + public static create(properties?: vtctldata.IGetBackupsResponse): vtctldata.GetBackupsResponse; /** - * Encodes the specified AddCellInfoRequest message. Does not implicitly {@link vtctldata.AddCellInfoRequest.verify|verify} messages. - * @param message AddCellInfoRequest message or plain object to encode + * Encodes the specified GetBackupsResponse message. Does not implicitly {@link vtctldata.GetBackupsResponse.verify|verify} messages. + * @param message GetBackupsResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IAddCellInfoRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetBackupsResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified AddCellInfoRequest message, length delimited. Does not implicitly {@link vtctldata.AddCellInfoRequest.verify|verify} messages. - * @param message AddCellInfoRequest message or plain object to encode + * Encodes the specified GetBackupsResponse message, length delimited. Does not implicitly {@link vtctldata.GetBackupsResponse.verify|verify} messages. + * @param message GetBackupsResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IAddCellInfoRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetBackupsResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an AddCellInfoRequest message from the specified reader or buffer. + * Decodes a GetBackupsResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns AddCellInfoRequest + * @returns GetBackupsResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.AddCellInfoRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetBackupsResponse; /** - * Decodes an AddCellInfoRequest message from the specified reader or buffer, length delimited. + * Decodes a GetBackupsResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns AddCellInfoRequest + * @returns GetBackupsResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.AddCellInfoRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetBackupsResponse; /** - * Verifies an AddCellInfoRequest message. + * Verifies a GetBackupsResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an AddCellInfoRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetBackupsResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns AddCellInfoRequest + * @returns GetBackupsResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.AddCellInfoRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.GetBackupsResponse; /** - * Creates a plain object from an AddCellInfoRequest message. Also converts values to other types if specified. - * @param message AddCellInfoRequest + * Creates a plain object from a GetBackupsResponse message. Also converts values to other types if specified. + * @param message GetBackupsResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.AddCellInfoRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetBackupsResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this AddCellInfoRequest to JSON. + * Converts this GetBackupsResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for AddCellInfoRequest + * Gets the default type url for GetBackupsResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of an AddCellInfoResponse. */ - interface IAddCellInfoResponse { + /** Properties of a GetCellInfoRequest. */ + interface IGetCellInfoRequest { + + /** GetCellInfoRequest cell */ + cell?: (string|null); } - /** Represents an AddCellInfoResponse. */ - class AddCellInfoResponse implements IAddCellInfoResponse { + /** Represents a GetCellInfoRequest. */ + class GetCellInfoRequest implements IGetCellInfoRequest { /** - * Constructs a new AddCellInfoResponse. + * Constructs a new GetCellInfoRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IAddCellInfoResponse); + constructor(properties?: vtctldata.IGetCellInfoRequest); + + /** GetCellInfoRequest cell. */ + public cell: string; /** - * Creates a new AddCellInfoResponse instance using the specified properties. + * Creates a new GetCellInfoRequest instance using the specified properties. * @param [properties] Properties to set - * @returns AddCellInfoResponse instance + * @returns GetCellInfoRequest instance */ - public static create(properties?: vtctldata.IAddCellInfoResponse): vtctldata.AddCellInfoResponse; + public static create(properties?: vtctldata.IGetCellInfoRequest): vtctldata.GetCellInfoRequest; /** - * Encodes the specified AddCellInfoResponse message. Does not implicitly {@link vtctldata.AddCellInfoResponse.verify|verify} messages. - * @param message AddCellInfoResponse message or plain object to encode + * Encodes the specified GetCellInfoRequest message. Does not implicitly {@link vtctldata.GetCellInfoRequest.verify|verify} messages. + * @param message GetCellInfoRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IAddCellInfoResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetCellInfoRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified AddCellInfoResponse message, length delimited. Does not implicitly {@link vtctldata.AddCellInfoResponse.verify|verify} messages. - * @param message AddCellInfoResponse message or plain object to encode + * Encodes the specified GetCellInfoRequest message, length delimited. Does not implicitly {@link vtctldata.GetCellInfoRequest.verify|verify} messages. + * @param message GetCellInfoRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IAddCellInfoResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetCellInfoRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an AddCellInfoResponse message from the specified reader or buffer. + * Decodes a GetCellInfoRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns AddCellInfoResponse + * @returns GetCellInfoRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.AddCellInfoResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetCellInfoRequest; /** - * Decodes an AddCellInfoResponse message from the specified reader or buffer, length delimited. + * Decodes a GetCellInfoRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns AddCellInfoResponse + * @returns GetCellInfoRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.AddCellInfoResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetCellInfoRequest; /** - * Verifies an AddCellInfoResponse message. + * Verifies a GetCellInfoRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an AddCellInfoResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetCellInfoRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns AddCellInfoResponse + * @returns GetCellInfoRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.AddCellInfoResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.GetCellInfoRequest; /** - * Creates a plain object from an AddCellInfoResponse message. Also converts values to other types if specified. - * @param message AddCellInfoResponse + * Creates a plain object from a GetCellInfoRequest message. Also converts values to other types if specified. + * @param message GetCellInfoRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.AddCellInfoResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetCellInfoRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this AddCellInfoResponse to JSON. + * Converts this GetCellInfoRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for AddCellInfoResponse + * Gets the default type url for GetCellInfoRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of an AddCellsAliasRequest. */ - interface IAddCellsAliasRequest { - - /** AddCellsAliasRequest name */ - name?: (string|null); + /** Properties of a GetCellInfoResponse. */ + interface IGetCellInfoResponse { - /** AddCellsAliasRequest cells */ - cells?: (string[]|null); + /** GetCellInfoResponse cell_info */ + cell_info?: (topodata.ICellInfo|null); } - /** Represents an AddCellsAliasRequest. */ - class AddCellsAliasRequest implements IAddCellsAliasRequest { + /** Represents a GetCellInfoResponse. */ + class GetCellInfoResponse implements IGetCellInfoResponse { /** - * Constructs a new AddCellsAliasRequest. + * Constructs a new GetCellInfoResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IAddCellsAliasRequest); - - /** AddCellsAliasRequest name. */ - public name: string; + constructor(properties?: vtctldata.IGetCellInfoResponse); - /** AddCellsAliasRequest cells. */ - public cells: string[]; + /** GetCellInfoResponse cell_info. */ + public cell_info?: (topodata.ICellInfo|null); /** - * Creates a new AddCellsAliasRequest instance using the specified properties. + * Creates a new GetCellInfoResponse instance using the specified properties. * @param [properties] Properties to set - * @returns AddCellsAliasRequest instance + * @returns GetCellInfoResponse instance */ - public static create(properties?: vtctldata.IAddCellsAliasRequest): vtctldata.AddCellsAliasRequest; + public static create(properties?: vtctldata.IGetCellInfoResponse): vtctldata.GetCellInfoResponse; /** - * Encodes the specified AddCellsAliasRequest message. Does not implicitly {@link vtctldata.AddCellsAliasRequest.verify|verify} messages. - * @param message AddCellsAliasRequest message or plain object to encode + * Encodes the specified GetCellInfoResponse message. Does not implicitly {@link vtctldata.GetCellInfoResponse.verify|verify} messages. + * @param message GetCellInfoResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IAddCellsAliasRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetCellInfoResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified AddCellsAliasRequest message, length delimited. Does not implicitly {@link vtctldata.AddCellsAliasRequest.verify|verify} messages. - * @param message AddCellsAliasRequest message or plain object to encode + * Encodes the specified GetCellInfoResponse message, length delimited. Does not implicitly {@link vtctldata.GetCellInfoResponse.verify|verify} messages. + * @param message GetCellInfoResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IAddCellsAliasRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetCellInfoResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an AddCellsAliasRequest message from the specified reader or buffer. + * Decodes a GetCellInfoResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns AddCellsAliasRequest + * @returns GetCellInfoResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.AddCellsAliasRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetCellInfoResponse; /** - * Decodes an AddCellsAliasRequest message from the specified reader or buffer, length delimited. + * Decodes a GetCellInfoResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns AddCellsAliasRequest + * @returns GetCellInfoResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.AddCellsAliasRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetCellInfoResponse; /** - * Verifies an AddCellsAliasRequest message. + * Verifies a GetCellInfoResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an AddCellsAliasRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetCellInfoResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns AddCellsAliasRequest + * @returns GetCellInfoResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.AddCellsAliasRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.GetCellInfoResponse; /** - * Creates a plain object from an AddCellsAliasRequest message. Also converts values to other types if specified. - * @param message AddCellsAliasRequest + * Creates a plain object from a GetCellInfoResponse message. Also converts values to other types if specified. + * @param message GetCellInfoResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.AddCellsAliasRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetCellInfoResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this AddCellsAliasRequest to JSON. + * Converts this GetCellInfoResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for AddCellsAliasRequest + * Gets the default type url for GetCellInfoResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of an AddCellsAliasResponse. */ - interface IAddCellsAliasResponse { + /** Properties of a GetCellInfoNamesRequest. */ + interface IGetCellInfoNamesRequest { } - /** Represents an AddCellsAliasResponse. */ - class AddCellsAliasResponse implements IAddCellsAliasResponse { + /** Represents a GetCellInfoNamesRequest. */ + class GetCellInfoNamesRequest implements IGetCellInfoNamesRequest { /** - * Constructs a new AddCellsAliasResponse. + * Constructs a new GetCellInfoNamesRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IAddCellsAliasResponse); + constructor(properties?: vtctldata.IGetCellInfoNamesRequest); /** - * Creates a new AddCellsAliasResponse instance using the specified properties. + * Creates a new GetCellInfoNamesRequest instance using the specified properties. * @param [properties] Properties to set - * @returns AddCellsAliasResponse instance + * @returns GetCellInfoNamesRequest instance */ - public static create(properties?: vtctldata.IAddCellsAliasResponse): vtctldata.AddCellsAliasResponse; + public static create(properties?: vtctldata.IGetCellInfoNamesRequest): vtctldata.GetCellInfoNamesRequest; /** - * Encodes the specified AddCellsAliasResponse message. Does not implicitly {@link vtctldata.AddCellsAliasResponse.verify|verify} messages. - * @param message AddCellsAliasResponse message or plain object to encode + * Encodes the specified GetCellInfoNamesRequest message. Does not implicitly {@link vtctldata.GetCellInfoNamesRequest.verify|verify} messages. + * @param message GetCellInfoNamesRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IAddCellsAliasResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetCellInfoNamesRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified AddCellsAliasResponse message, length delimited. Does not implicitly {@link vtctldata.AddCellsAliasResponse.verify|verify} messages. - * @param message AddCellsAliasResponse message or plain object to encode + * Encodes the specified GetCellInfoNamesRequest message, length delimited. Does not implicitly {@link vtctldata.GetCellInfoNamesRequest.verify|verify} messages. + * @param message GetCellInfoNamesRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IAddCellsAliasResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetCellInfoNamesRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an AddCellsAliasResponse message from the specified reader or buffer. + * Decodes a GetCellInfoNamesRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns AddCellsAliasResponse + * @returns GetCellInfoNamesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.AddCellsAliasResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetCellInfoNamesRequest; /** - * Decodes an AddCellsAliasResponse message from the specified reader or buffer, length delimited. + * Decodes a GetCellInfoNamesRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns AddCellsAliasResponse + * @returns GetCellInfoNamesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.AddCellsAliasResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetCellInfoNamesRequest; /** - * Verifies an AddCellsAliasResponse message. + * Verifies a GetCellInfoNamesRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an AddCellsAliasResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetCellInfoNamesRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns AddCellsAliasResponse + * @returns GetCellInfoNamesRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.AddCellsAliasResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.GetCellInfoNamesRequest; /** - * Creates a plain object from an AddCellsAliasResponse message. Also converts values to other types if specified. - * @param message AddCellsAliasResponse + * Creates a plain object from a GetCellInfoNamesRequest message. Also converts values to other types if specified. + * @param message GetCellInfoNamesRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.AddCellsAliasResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetCellInfoNamesRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this AddCellsAliasResponse to JSON. + * Converts this GetCellInfoNamesRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for AddCellsAliasResponse + * Gets the default type url for GetCellInfoNamesRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of an ApplyRoutingRulesRequest. */ - interface IApplyRoutingRulesRequest { - - /** ApplyRoutingRulesRequest routing_rules */ - routing_rules?: (vschema.IRoutingRules|null); - - /** ApplyRoutingRulesRequest skip_rebuild */ - skip_rebuild?: (boolean|null); + /** Properties of a GetCellInfoNamesResponse. */ + interface IGetCellInfoNamesResponse { - /** ApplyRoutingRulesRequest rebuild_cells */ - rebuild_cells?: (string[]|null); + /** GetCellInfoNamesResponse names */ + names?: (string[]|null); } - /** Represents an ApplyRoutingRulesRequest. */ - class ApplyRoutingRulesRequest implements IApplyRoutingRulesRequest { + /** Represents a GetCellInfoNamesResponse. */ + class GetCellInfoNamesResponse implements IGetCellInfoNamesResponse { /** - * Constructs a new ApplyRoutingRulesRequest. + * Constructs a new GetCellInfoNamesResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IApplyRoutingRulesRequest); - - /** ApplyRoutingRulesRequest routing_rules. */ - public routing_rules?: (vschema.IRoutingRules|null); - - /** ApplyRoutingRulesRequest skip_rebuild. */ - public skip_rebuild: boolean; + constructor(properties?: vtctldata.IGetCellInfoNamesResponse); - /** ApplyRoutingRulesRequest rebuild_cells. */ - public rebuild_cells: string[]; + /** GetCellInfoNamesResponse names. */ + public names: string[]; /** - * Creates a new ApplyRoutingRulesRequest instance using the specified properties. + * Creates a new GetCellInfoNamesResponse instance using the specified properties. * @param [properties] Properties to set - * @returns ApplyRoutingRulesRequest instance + * @returns GetCellInfoNamesResponse instance */ - public static create(properties?: vtctldata.IApplyRoutingRulesRequest): vtctldata.ApplyRoutingRulesRequest; + public static create(properties?: vtctldata.IGetCellInfoNamesResponse): vtctldata.GetCellInfoNamesResponse; /** - * Encodes the specified ApplyRoutingRulesRequest message. Does not implicitly {@link vtctldata.ApplyRoutingRulesRequest.verify|verify} messages. - * @param message ApplyRoutingRulesRequest message or plain object to encode + * Encodes the specified GetCellInfoNamesResponse message. Does not implicitly {@link vtctldata.GetCellInfoNamesResponse.verify|verify} messages. + * @param message GetCellInfoNamesResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IApplyRoutingRulesRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetCellInfoNamesResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ApplyRoutingRulesRequest message, length delimited. Does not implicitly {@link vtctldata.ApplyRoutingRulesRequest.verify|verify} messages. - * @param message ApplyRoutingRulesRequest message or plain object to encode + * Encodes the specified GetCellInfoNamesResponse message, length delimited. Does not implicitly {@link vtctldata.GetCellInfoNamesResponse.verify|verify} messages. + * @param message GetCellInfoNamesResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IApplyRoutingRulesRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetCellInfoNamesResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an ApplyRoutingRulesRequest message from the specified reader or buffer. + * Decodes a GetCellInfoNamesResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ApplyRoutingRulesRequest + * @returns GetCellInfoNamesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ApplyRoutingRulesRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetCellInfoNamesResponse; /** - * Decodes an ApplyRoutingRulesRequest message from the specified reader or buffer, length delimited. + * Decodes a GetCellInfoNamesResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ApplyRoutingRulesRequest + * @returns GetCellInfoNamesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ApplyRoutingRulesRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetCellInfoNamesResponse; /** - * Verifies an ApplyRoutingRulesRequest message. + * Verifies a GetCellInfoNamesResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an ApplyRoutingRulesRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetCellInfoNamesResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ApplyRoutingRulesRequest + * @returns GetCellInfoNamesResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.ApplyRoutingRulesRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.GetCellInfoNamesResponse; /** - * Creates a plain object from an ApplyRoutingRulesRequest message. Also converts values to other types if specified. - * @param message ApplyRoutingRulesRequest + * Creates a plain object from a GetCellInfoNamesResponse message. Also converts values to other types if specified. + * @param message GetCellInfoNamesResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ApplyRoutingRulesRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetCellInfoNamesResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ApplyRoutingRulesRequest to JSON. + * Converts this GetCellInfoNamesResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ApplyRoutingRulesRequest + * Gets the default type url for GetCellInfoNamesResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of an ApplyRoutingRulesResponse. */ - interface IApplyRoutingRulesResponse { + /** Properties of a GetCellsAliasesRequest. */ + interface IGetCellsAliasesRequest { } - /** Represents an ApplyRoutingRulesResponse. */ - class ApplyRoutingRulesResponse implements IApplyRoutingRulesResponse { + /** Represents a GetCellsAliasesRequest. */ + class GetCellsAliasesRequest implements IGetCellsAliasesRequest { /** - * Constructs a new ApplyRoutingRulesResponse. + * Constructs a new GetCellsAliasesRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IApplyRoutingRulesResponse); + constructor(properties?: vtctldata.IGetCellsAliasesRequest); /** - * Creates a new ApplyRoutingRulesResponse instance using the specified properties. + * Creates a new GetCellsAliasesRequest instance using the specified properties. * @param [properties] Properties to set - * @returns ApplyRoutingRulesResponse instance + * @returns GetCellsAliasesRequest instance */ - public static create(properties?: vtctldata.IApplyRoutingRulesResponse): vtctldata.ApplyRoutingRulesResponse; + public static create(properties?: vtctldata.IGetCellsAliasesRequest): vtctldata.GetCellsAliasesRequest; /** - * Encodes the specified ApplyRoutingRulesResponse message. Does not implicitly {@link vtctldata.ApplyRoutingRulesResponse.verify|verify} messages. - * @param message ApplyRoutingRulesResponse message or plain object to encode + * Encodes the specified GetCellsAliasesRequest message. Does not implicitly {@link vtctldata.GetCellsAliasesRequest.verify|verify} messages. + * @param message GetCellsAliasesRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IApplyRoutingRulesResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetCellsAliasesRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ApplyRoutingRulesResponse message, length delimited. Does not implicitly {@link vtctldata.ApplyRoutingRulesResponse.verify|verify} messages. - * @param message ApplyRoutingRulesResponse message or plain object to encode + * Encodes the specified GetCellsAliasesRequest message, length delimited. Does not implicitly {@link vtctldata.GetCellsAliasesRequest.verify|verify} messages. + * @param message GetCellsAliasesRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IApplyRoutingRulesResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetCellsAliasesRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an ApplyRoutingRulesResponse message from the specified reader or buffer. + * Decodes a GetCellsAliasesRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ApplyRoutingRulesResponse + * @returns GetCellsAliasesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ApplyRoutingRulesResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetCellsAliasesRequest; /** - * Decodes an ApplyRoutingRulesResponse message from the specified reader or buffer, length delimited. + * Decodes a GetCellsAliasesRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ApplyRoutingRulesResponse + * @returns GetCellsAliasesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ApplyRoutingRulesResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetCellsAliasesRequest; /** - * Verifies an ApplyRoutingRulesResponse message. + * Verifies a GetCellsAliasesRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an ApplyRoutingRulesResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetCellsAliasesRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ApplyRoutingRulesResponse + * @returns GetCellsAliasesRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.ApplyRoutingRulesResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.GetCellsAliasesRequest; /** - * Creates a plain object from an ApplyRoutingRulesResponse message. Also converts values to other types if specified. - * @param message ApplyRoutingRulesResponse + * Creates a plain object from a GetCellsAliasesRequest message. Also converts values to other types if specified. + * @param message GetCellsAliasesRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ApplyRoutingRulesResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetCellsAliasesRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ApplyRoutingRulesResponse to JSON. + * Converts this GetCellsAliasesRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ApplyRoutingRulesResponse + * Gets the default type url for GetCellsAliasesRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of an ApplyShardRoutingRulesRequest. */ - interface IApplyShardRoutingRulesRequest { - - /** ApplyShardRoutingRulesRequest shard_routing_rules */ - shard_routing_rules?: (vschema.IShardRoutingRules|null); - - /** ApplyShardRoutingRulesRequest skip_rebuild */ - skip_rebuild?: (boolean|null); + /** Properties of a GetCellsAliasesResponse. */ + interface IGetCellsAliasesResponse { - /** ApplyShardRoutingRulesRequest rebuild_cells */ - rebuild_cells?: (string[]|null); + /** GetCellsAliasesResponse aliases */ + aliases?: ({ [k: string]: topodata.ICellsAlias }|null); } - /** Represents an ApplyShardRoutingRulesRequest. */ - class ApplyShardRoutingRulesRequest implements IApplyShardRoutingRulesRequest { + /** Represents a GetCellsAliasesResponse. */ + class GetCellsAliasesResponse implements IGetCellsAliasesResponse { /** - * Constructs a new ApplyShardRoutingRulesRequest. + * Constructs a new GetCellsAliasesResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IApplyShardRoutingRulesRequest); - - /** ApplyShardRoutingRulesRequest shard_routing_rules. */ - public shard_routing_rules?: (vschema.IShardRoutingRules|null); - - /** ApplyShardRoutingRulesRequest skip_rebuild. */ - public skip_rebuild: boolean; + constructor(properties?: vtctldata.IGetCellsAliasesResponse); - /** ApplyShardRoutingRulesRequest rebuild_cells. */ - public rebuild_cells: string[]; + /** GetCellsAliasesResponse aliases. */ + public aliases: { [k: string]: topodata.ICellsAlias }; /** - * Creates a new ApplyShardRoutingRulesRequest instance using the specified properties. + * Creates a new GetCellsAliasesResponse instance using the specified properties. * @param [properties] Properties to set - * @returns ApplyShardRoutingRulesRequest instance + * @returns GetCellsAliasesResponse instance */ - public static create(properties?: vtctldata.IApplyShardRoutingRulesRequest): vtctldata.ApplyShardRoutingRulesRequest; + public static create(properties?: vtctldata.IGetCellsAliasesResponse): vtctldata.GetCellsAliasesResponse; /** - * Encodes the specified ApplyShardRoutingRulesRequest message. Does not implicitly {@link vtctldata.ApplyShardRoutingRulesRequest.verify|verify} messages. - * @param message ApplyShardRoutingRulesRequest message or plain object to encode + * Encodes the specified GetCellsAliasesResponse message. Does not implicitly {@link vtctldata.GetCellsAliasesResponse.verify|verify} messages. + * @param message GetCellsAliasesResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IApplyShardRoutingRulesRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetCellsAliasesResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ApplyShardRoutingRulesRequest message, length delimited. Does not implicitly {@link vtctldata.ApplyShardRoutingRulesRequest.verify|verify} messages. - * @param message ApplyShardRoutingRulesRequest message or plain object to encode + * Encodes the specified GetCellsAliasesResponse message, length delimited. Does not implicitly {@link vtctldata.GetCellsAliasesResponse.verify|verify} messages. + * @param message GetCellsAliasesResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IApplyShardRoutingRulesRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetCellsAliasesResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an ApplyShardRoutingRulesRequest message from the specified reader or buffer. + * Decodes a GetCellsAliasesResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ApplyShardRoutingRulesRequest + * @returns GetCellsAliasesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ApplyShardRoutingRulesRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetCellsAliasesResponse; /** - * Decodes an ApplyShardRoutingRulesRequest message from the specified reader or buffer, length delimited. + * Decodes a GetCellsAliasesResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ApplyShardRoutingRulesRequest + * @returns GetCellsAliasesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ApplyShardRoutingRulesRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetCellsAliasesResponse; /** - * Verifies an ApplyShardRoutingRulesRequest message. + * Verifies a GetCellsAliasesResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an ApplyShardRoutingRulesRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetCellsAliasesResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ApplyShardRoutingRulesRequest + * @returns GetCellsAliasesResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.ApplyShardRoutingRulesRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.GetCellsAliasesResponse; /** - * Creates a plain object from an ApplyShardRoutingRulesRequest message. Also converts values to other types if specified. - * @param message ApplyShardRoutingRulesRequest + * Creates a plain object from a GetCellsAliasesResponse message. Also converts values to other types if specified. + * @param message GetCellsAliasesResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ApplyShardRoutingRulesRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetCellsAliasesResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ApplyShardRoutingRulesRequest to JSON. + * Converts this GetCellsAliasesResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ApplyShardRoutingRulesRequest + * Gets the default type url for GetCellsAliasesResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of an ApplyShardRoutingRulesResponse. */ - interface IApplyShardRoutingRulesResponse { + /** Properties of a GetFullStatusRequest. */ + interface IGetFullStatusRequest { + + /** GetFullStatusRequest tablet_alias */ + tablet_alias?: (topodata.ITabletAlias|null); } - /** Represents an ApplyShardRoutingRulesResponse. */ - class ApplyShardRoutingRulesResponse implements IApplyShardRoutingRulesResponse { + /** Represents a GetFullStatusRequest. */ + class GetFullStatusRequest implements IGetFullStatusRequest { /** - * Constructs a new ApplyShardRoutingRulesResponse. + * Constructs a new GetFullStatusRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IApplyShardRoutingRulesResponse); + constructor(properties?: vtctldata.IGetFullStatusRequest); + + /** GetFullStatusRequest tablet_alias. */ + public tablet_alias?: (topodata.ITabletAlias|null); /** - * Creates a new ApplyShardRoutingRulesResponse instance using the specified properties. + * Creates a new GetFullStatusRequest instance using the specified properties. * @param [properties] Properties to set - * @returns ApplyShardRoutingRulesResponse instance + * @returns GetFullStatusRequest instance */ - public static create(properties?: vtctldata.IApplyShardRoutingRulesResponse): vtctldata.ApplyShardRoutingRulesResponse; + public static create(properties?: vtctldata.IGetFullStatusRequest): vtctldata.GetFullStatusRequest; /** - * Encodes the specified ApplyShardRoutingRulesResponse message. Does not implicitly {@link vtctldata.ApplyShardRoutingRulesResponse.verify|verify} messages. - * @param message ApplyShardRoutingRulesResponse message or plain object to encode + * Encodes the specified GetFullStatusRequest message. Does not implicitly {@link vtctldata.GetFullStatusRequest.verify|verify} messages. + * @param message GetFullStatusRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IApplyShardRoutingRulesResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetFullStatusRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ApplyShardRoutingRulesResponse message, length delimited. Does not implicitly {@link vtctldata.ApplyShardRoutingRulesResponse.verify|verify} messages. - * @param message ApplyShardRoutingRulesResponse message or plain object to encode + * Encodes the specified GetFullStatusRequest message, length delimited. Does not implicitly {@link vtctldata.GetFullStatusRequest.verify|verify} messages. + * @param message GetFullStatusRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IApplyShardRoutingRulesResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetFullStatusRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an ApplyShardRoutingRulesResponse message from the specified reader or buffer. + * Decodes a GetFullStatusRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ApplyShardRoutingRulesResponse + * @returns GetFullStatusRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ApplyShardRoutingRulesResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetFullStatusRequest; /** - * Decodes an ApplyShardRoutingRulesResponse message from the specified reader or buffer, length delimited. + * Decodes a GetFullStatusRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ApplyShardRoutingRulesResponse + * @returns GetFullStatusRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ApplyShardRoutingRulesResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetFullStatusRequest; /** - * Verifies an ApplyShardRoutingRulesResponse message. + * Verifies a GetFullStatusRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an ApplyShardRoutingRulesResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetFullStatusRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ApplyShardRoutingRulesResponse + * @returns GetFullStatusRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.ApplyShardRoutingRulesResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.GetFullStatusRequest; /** - * Creates a plain object from an ApplyShardRoutingRulesResponse message. Also converts values to other types if specified. - * @param message ApplyShardRoutingRulesResponse + * Creates a plain object from a GetFullStatusRequest message. Also converts values to other types if specified. + * @param message GetFullStatusRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ApplyShardRoutingRulesResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetFullStatusRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ApplyShardRoutingRulesResponse to JSON. + * Converts this GetFullStatusRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ApplyShardRoutingRulesResponse + * Gets the default type url for GetFullStatusRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of an ApplySchemaRequest. */ - interface IApplySchemaRequest { - - /** ApplySchemaRequest keyspace */ - keyspace?: (string|null); - - /** ApplySchemaRequest allow_long_unavailability */ - allow_long_unavailability?: (boolean|null); - - /** ApplySchemaRequest sql */ - sql?: (string[]|null); - - /** ApplySchemaRequest ddl_strategy */ - ddl_strategy?: (string|null); - - /** ApplySchemaRequest uuid_list */ - uuid_list?: (string[]|null); - - /** ApplySchemaRequest migration_context */ - migration_context?: (string|null); - - /** ApplySchemaRequest wait_replicas_timeout */ - wait_replicas_timeout?: (vttime.IDuration|null); - - /** ApplySchemaRequest skip_preflight */ - skip_preflight?: (boolean|null); + /** Properties of a GetFullStatusResponse. */ + interface IGetFullStatusResponse { - /** ApplySchemaRequest caller_id */ - caller_id?: (vtrpc.ICallerID|null); + /** GetFullStatusResponse status */ + status?: (replicationdata.IFullStatus|null); } - /** Represents an ApplySchemaRequest. */ - class ApplySchemaRequest implements IApplySchemaRequest { + /** Represents a GetFullStatusResponse. */ + class GetFullStatusResponse implements IGetFullStatusResponse { /** - * Constructs a new ApplySchemaRequest. + * Constructs a new GetFullStatusResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IApplySchemaRequest); - - /** ApplySchemaRequest keyspace. */ - public keyspace: string; - - /** ApplySchemaRequest allow_long_unavailability. */ - public allow_long_unavailability: boolean; - - /** ApplySchemaRequest sql. */ - public sql: string[]; - - /** ApplySchemaRequest ddl_strategy. */ - public ddl_strategy: string; - - /** ApplySchemaRequest uuid_list. */ - public uuid_list: string[]; - - /** ApplySchemaRequest migration_context. */ - public migration_context: string; - - /** ApplySchemaRequest wait_replicas_timeout. */ - public wait_replicas_timeout?: (vttime.IDuration|null); - - /** ApplySchemaRequest skip_preflight. */ - public skip_preflight: boolean; + constructor(properties?: vtctldata.IGetFullStatusResponse); - /** ApplySchemaRequest caller_id. */ - public caller_id?: (vtrpc.ICallerID|null); + /** GetFullStatusResponse status. */ + public status?: (replicationdata.IFullStatus|null); /** - * Creates a new ApplySchemaRequest instance using the specified properties. + * Creates a new GetFullStatusResponse instance using the specified properties. * @param [properties] Properties to set - * @returns ApplySchemaRequest instance + * @returns GetFullStatusResponse instance */ - public static create(properties?: vtctldata.IApplySchemaRequest): vtctldata.ApplySchemaRequest; + public static create(properties?: vtctldata.IGetFullStatusResponse): vtctldata.GetFullStatusResponse; /** - * Encodes the specified ApplySchemaRequest message. Does not implicitly {@link vtctldata.ApplySchemaRequest.verify|verify} messages. - * @param message ApplySchemaRequest message or plain object to encode + * Encodes the specified GetFullStatusResponse message. Does not implicitly {@link vtctldata.GetFullStatusResponse.verify|verify} messages. + * @param message GetFullStatusResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IApplySchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetFullStatusResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ApplySchemaRequest message, length delimited. Does not implicitly {@link vtctldata.ApplySchemaRequest.verify|verify} messages. - * @param message ApplySchemaRequest message or plain object to encode + * Encodes the specified GetFullStatusResponse message, length delimited. Does not implicitly {@link vtctldata.GetFullStatusResponse.verify|verify} messages. + * @param message GetFullStatusResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IApplySchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetFullStatusResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an ApplySchemaRequest message from the specified reader or buffer. + * Decodes a GetFullStatusResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ApplySchemaRequest + * @returns GetFullStatusResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ApplySchemaRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetFullStatusResponse; /** - * Decodes an ApplySchemaRequest message from the specified reader or buffer, length delimited. + * Decodes a GetFullStatusResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ApplySchemaRequest + * @returns GetFullStatusResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ApplySchemaRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetFullStatusResponse; /** - * Verifies an ApplySchemaRequest message. + * Verifies a GetFullStatusResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an ApplySchemaRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetFullStatusResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ApplySchemaRequest + * @returns GetFullStatusResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.ApplySchemaRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.GetFullStatusResponse; /** - * Creates a plain object from an ApplySchemaRequest message. Also converts values to other types if specified. - * @param message ApplySchemaRequest + * Creates a plain object from a GetFullStatusResponse message. Also converts values to other types if specified. + * @param message GetFullStatusResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ApplySchemaRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetFullStatusResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ApplySchemaRequest to JSON. + * Converts this GetFullStatusResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ApplySchemaRequest + * Gets the default type url for GetFullStatusResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of an ApplySchemaResponse. */ - interface IApplySchemaResponse { - - /** ApplySchemaResponse uuid_list */ - uuid_list?: (string[]|null); + /** Properties of a GetKeyspacesRequest. */ + interface IGetKeyspacesRequest { } - /** Represents an ApplySchemaResponse. */ - class ApplySchemaResponse implements IApplySchemaResponse { + /** Represents a GetKeyspacesRequest. */ + class GetKeyspacesRequest implements IGetKeyspacesRequest { /** - * Constructs a new ApplySchemaResponse. + * Constructs a new GetKeyspacesRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IApplySchemaResponse); - - /** ApplySchemaResponse uuid_list. */ - public uuid_list: string[]; + constructor(properties?: vtctldata.IGetKeyspacesRequest); /** - * Creates a new ApplySchemaResponse instance using the specified properties. + * Creates a new GetKeyspacesRequest instance using the specified properties. * @param [properties] Properties to set - * @returns ApplySchemaResponse instance + * @returns GetKeyspacesRequest instance */ - public static create(properties?: vtctldata.IApplySchemaResponse): vtctldata.ApplySchemaResponse; + public static create(properties?: vtctldata.IGetKeyspacesRequest): vtctldata.GetKeyspacesRequest; /** - * Encodes the specified ApplySchemaResponse message. Does not implicitly {@link vtctldata.ApplySchemaResponse.verify|verify} messages. - * @param message ApplySchemaResponse message or plain object to encode + * Encodes the specified GetKeyspacesRequest message. Does not implicitly {@link vtctldata.GetKeyspacesRequest.verify|verify} messages. + * @param message GetKeyspacesRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IApplySchemaResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetKeyspacesRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ApplySchemaResponse message, length delimited. Does not implicitly {@link vtctldata.ApplySchemaResponse.verify|verify} messages. - * @param message ApplySchemaResponse message or plain object to encode + * Encodes the specified GetKeyspacesRequest message, length delimited. Does not implicitly {@link vtctldata.GetKeyspacesRequest.verify|verify} messages. + * @param message GetKeyspacesRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IApplySchemaResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetKeyspacesRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an ApplySchemaResponse message from the specified reader or buffer. + * Decodes a GetKeyspacesRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ApplySchemaResponse + * @returns GetKeyspacesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ApplySchemaResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetKeyspacesRequest; /** - * Decodes an ApplySchemaResponse message from the specified reader or buffer, length delimited. + * Decodes a GetKeyspacesRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ApplySchemaResponse + * @returns GetKeyspacesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ApplySchemaResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetKeyspacesRequest; /** - * Verifies an ApplySchemaResponse message. + * Verifies a GetKeyspacesRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an ApplySchemaResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetKeyspacesRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ApplySchemaResponse + * @returns GetKeyspacesRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.ApplySchemaResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.GetKeyspacesRequest; /** - * Creates a plain object from an ApplySchemaResponse message. Also converts values to other types if specified. - * @param message ApplySchemaResponse + * Creates a plain object from a GetKeyspacesRequest message. Also converts values to other types if specified. + * @param message GetKeyspacesRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ApplySchemaResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetKeyspacesRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ApplySchemaResponse to JSON. + * Converts this GetKeyspacesRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ApplySchemaResponse + * Gets the default type url for GetKeyspacesRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of an ApplyVSchemaRequest. */ - interface IApplyVSchemaRequest { - - /** ApplyVSchemaRequest keyspace */ - keyspace?: (string|null); - - /** ApplyVSchemaRequest skip_rebuild */ - skip_rebuild?: (boolean|null); - - /** ApplyVSchemaRequest dry_run */ - dry_run?: (boolean|null); - - /** ApplyVSchemaRequest cells */ - cells?: (string[]|null); - - /** ApplyVSchemaRequest v_schema */ - v_schema?: (vschema.IKeyspace|null); + /** Properties of a GetKeyspacesResponse. */ + interface IGetKeyspacesResponse { - /** ApplyVSchemaRequest sql */ - sql?: (string|null); + /** GetKeyspacesResponse keyspaces */ + keyspaces?: (vtctldata.IKeyspace[]|null); } - /** Represents an ApplyVSchemaRequest. */ - class ApplyVSchemaRequest implements IApplyVSchemaRequest { + /** Represents a GetKeyspacesResponse. */ + class GetKeyspacesResponse implements IGetKeyspacesResponse { /** - * Constructs a new ApplyVSchemaRequest. + * Constructs a new GetKeyspacesResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IApplyVSchemaRequest); - - /** ApplyVSchemaRequest keyspace. */ - public keyspace: string; - - /** ApplyVSchemaRequest skip_rebuild. */ - public skip_rebuild: boolean; - - /** ApplyVSchemaRequest dry_run. */ - public dry_run: boolean; - - /** ApplyVSchemaRequest cells. */ - public cells: string[]; - - /** ApplyVSchemaRequest v_schema. */ - public v_schema?: (vschema.IKeyspace|null); + constructor(properties?: vtctldata.IGetKeyspacesResponse); - /** ApplyVSchemaRequest sql. */ - public sql: string; + /** GetKeyspacesResponse keyspaces. */ + public keyspaces: vtctldata.IKeyspace[]; /** - * Creates a new ApplyVSchemaRequest instance using the specified properties. + * Creates a new GetKeyspacesResponse instance using the specified properties. * @param [properties] Properties to set - * @returns ApplyVSchemaRequest instance + * @returns GetKeyspacesResponse instance */ - public static create(properties?: vtctldata.IApplyVSchemaRequest): vtctldata.ApplyVSchemaRequest; + public static create(properties?: vtctldata.IGetKeyspacesResponse): vtctldata.GetKeyspacesResponse; /** - * Encodes the specified ApplyVSchemaRequest message. Does not implicitly {@link vtctldata.ApplyVSchemaRequest.verify|verify} messages. - * @param message ApplyVSchemaRequest message or plain object to encode + * Encodes the specified GetKeyspacesResponse message. Does not implicitly {@link vtctldata.GetKeyspacesResponse.verify|verify} messages. + * @param message GetKeyspacesResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IApplyVSchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetKeyspacesResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ApplyVSchemaRequest message, length delimited. Does not implicitly {@link vtctldata.ApplyVSchemaRequest.verify|verify} messages. - * @param message ApplyVSchemaRequest message or plain object to encode + * Encodes the specified GetKeyspacesResponse message, length delimited. Does not implicitly {@link vtctldata.GetKeyspacesResponse.verify|verify} messages. + * @param message GetKeyspacesResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IApplyVSchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetKeyspacesResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an ApplyVSchemaRequest message from the specified reader or buffer. + * Decodes a GetKeyspacesResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ApplyVSchemaRequest + * @returns GetKeyspacesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ApplyVSchemaRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetKeyspacesResponse; /** - * Decodes an ApplyVSchemaRequest message from the specified reader or buffer, length delimited. + * Decodes a GetKeyspacesResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ApplyVSchemaRequest + * @returns GetKeyspacesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ApplyVSchemaRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetKeyspacesResponse; /** - * Verifies an ApplyVSchemaRequest message. + * Verifies a GetKeyspacesResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an ApplyVSchemaRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetKeyspacesResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ApplyVSchemaRequest + * @returns GetKeyspacesResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.ApplyVSchemaRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.GetKeyspacesResponse; /** - * Creates a plain object from an ApplyVSchemaRequest message. Also converts values to other types if specified. - * @param message ApplyVSchemaRequest + * Creates a plain object from a GetKeyspacesResponse message. Also converts values to other types if specified. + * @param message GetKeyspacesResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ApplyVSchemaRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetKeyspacesResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ApplyVSchemaRequest to JSON. + * Converts this GetKeyspacesResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ApplyVSchemaRequest + * Gets the default type url for GetKeyspacesResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of an ApplyVSchemaResponse. */ - interface IApplyVSchemaResponse { + /** Properties of a GetKeyspaceRequest. */ + interface IGetKeyspaceRequest { - /** ApplyVSchemaResponse v_schema */ - v_schema?: (vschema.IKeyspace|null); + /** GetKeyspaceRequest keyspace */ + keyspace?: (string|null); } - /** Represents an ApplyVSchemaResponse. */ - class ApplyVSchemaResponse implements IApplyVSchemaResponse { + /** Represents a GetKeyspaceRequest. */ + class GetKeyspaceRequest implements IGetKeyspaceRequest { /** - * Constructs a new ApplyVSchemaResponse. + * Constructs a new GetKeyspaceRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IApplyVSchemaResponse); + constructor(properties?: vtctldata.IGetKeyspaceRequest); - /** ApplyVSchemaResponse v_schema. */ - public v_schema?: (vschema.IKeyspace|null); + /** GetKeyspaceRequest keyspace. */ + public keyspace: string; /** - * Creates a new ApplyVSchemaResponse instance using the specified properties. + * Creates a new GetKeyspaceRequest instance using the specified properties. * @param [properties] Properties to set - * @returns ApplyVSchemaResponse instance + * @returns GetKeyspaceRequest instance */ - public static create(properties?: vtctldata.IApplyVSchemaResponse): vtctldata.ApplyVSchemaResponse; + public static create(properties?: vtctldata.IGetKeyspaceRequest): vtctldata.GetKeyspaceRequest; /** - * Encodes the specified ApplyVSchemaResponse message. Does not implicitly {@link vtctldata.ApplyVSchemaResponse.verify|verify} messages. - * @param message ApplyVSchemaResponse message or plain object to encode + * Encodes the specified GetKeyspaceRequest message. Does not implicitly {@link vtctldata.GetKeyspaceRequest.verify|verify} messages. + * @param message GetKeyspaceRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IApplyVSchemaResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetKeyspaceRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ApplyVSchemaResponse message, length delimited. Does not implicitly {@link vtctldata.ApplyVSchemaResponse.verify|verify} messages. - * @param message ApplyVSchemaResponse message or plain object to encode + * Encodes the specified GetKeyspaceRequest message, length delimited. Does not implicitly {@link vtctldata.GetKeyspaceRequest.verify|verify} messages. + * @param message GetKeyspaceRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IApplyVSchemaResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetKeyspaceRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an ApplyVSchemaResponse message from the specified reader or buffer. + * Decodes a GetKeyspaceRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ApplyVSchemaResponse + * @returns GetKeyspaceRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ApplyVSchemaResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetKeyspaceRequest; /** - * Decodes an ApplyVSchemaResponse message from the specified reader or buffer, length delimited. + * Decodes a GetKeyspaceRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ApplyVSchemaResponse + * @returns GetKeyspaceRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ApplyVSchemaResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetKeyspaceRequest; /** - * Verifies an ApplyVSchemaResponse message. + * Verifies a GetKeyspaceRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an ApplyVSchemaResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetKeyspaceRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ApplyVSchemaResponse + * @returns GetKeyspaceRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.ApplyVSchemaResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.GetKeyspaceRequest; /** - * Creates a plain object from an ApplyVSchemaResponse message. Also converts values to other types if specified. - * @param message ApplyVSchemaResponse + * Creates a plain object from a GetKeyspaceRequest message. Also converts values to other types if specified. + * @param message GetKeyspaceRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ApplyVSchemaResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetKeyspaceRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ApplyVSchemaResponse to JSON. + * Converts this GetKeyspaceRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ApplyVSchemaResponse + * Gets the default type url for GetKeyspaceRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a BackupRequest. */ - interface IBackupRequest { - - /** BackupRequest tablet_alias */ - tablet_alias?: (topodata.ITabletAlias|null); - - /** BackupRequest allow_primary */ - allow_primary?: (boolean|null); - - /** BackupRequest concurrency */ - concurrency?: (number|Long|null); + /** Properties of a GetKeyspaceResponse. */ + interface IGetKeyspaceResponse { - /** BackupRequest incremental_from_pos */ - incremental_from_pos?: (string|null); + /** GetKeyspaceResponse keyspace */ + keyspace?: (vtctldata.IKeyspace|null); } - /** Represents a BackupRequest. */ - class BackupRequest implements IBackupRequest { + /** Represents a GetKeyspaceResponse. */ + class GetKeyspaceResponse implements IGetKeyspaceResponse { /** - * Constructs a new BackupRequest. + * Constructs a new GetKeyspaceResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IBackupRequest); - - /** BackupRequest tablet_alias. */ - public tablet_alias?: (topodata.ITabletAlias|null); - - /** BackupRequest allow_primary. */ - public allow_primary: boolean; - - /** BackupRequest concurrency. */ - public concurrency: (number|Long); + constructor(properties?: vtctldata.IGetKeyspaceResponse); - /** BackupRequest incremental_from_pos. */ - public incremental_from_pos: string; + /** GetKeyspaceResponse keyspace. */ + public keyspace?: (vtctldata.IKeyspace|null); /** - * Creates a new BackupRequest instance using the specified properties. + * Creates a new GetKeyspaceResponse instance using the specified properties. * @param [properties] Properties to set - * @returns BackupRequest instance + * @returns GetKeyspaceResponse instance */ - public static create(properties?: vtctldata.IBackupRequest): vtctldata.BackupRequest; + public static create(properties?: vtctldata.IGetKeyspaceResponse): vtctldata.GetKeyspaceResponse; /** - * Encodes the specified BackupRequest message. Does not implicitly {@link vtctldata.BackupRequest.verify|verify} messages. - * @param message BackupRequest message or plain object to encode + * Encodes the specified GetKeyspaceResponse message. Does not implicitly {@link vtctldata.GetKeyspaceResponse.verify|verify} messages. + * @param message GetKeyspaceResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IBackupRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetKeyspaceResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified BackupRequest message, length delimited. Does not implicitly {@link vtctldata.BackupRequest.verify|verify} messages. - * @param message BackupRequest message or plain object to encode + * Encodes the specified GetKeyspaceResponse message, length delimited. Does not implicitly {@link vtctldata.GetKeyspaceResponse.verify|verify} messages. + * @param message GetKeyspaceResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IBackupRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetKeyspaceResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a BackupRequest message from the specified reader or buffer. + * Decodes a GetKeyspaceResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns BackupRequest + * @returns GetKeyspaceResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.BackupRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetKeyspaceResponse; /** - * Decodes a BackupRequest message from the specified reader or buffer, length delimited. + * Decodes a GetKeyspaceResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns BackupRequest + * @returns GetKeyspaceResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.BackupRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetKeyspaceResponse; /** - * Verifies a BackupRequest message. + * Verifies a GetKeyspaceResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a BackupRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetKeyspaceResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns BackupRequest + * @returns GetKeyspaceResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.BackupRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.GetKeyspaceResponse; /** - * Creates a plain object from a BackupRequest message. Also converts values to other types if specified. - * @param message BackupRequest + * Creates a plain object from a GetKeyspaceResponse message. Also converts values to other types if specified. + * @param message GetKeyspaceResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.BackupRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetKeyspaceResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this BackupRequest to JSON. + * Converts this GetKeyspaceResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for BackupRequest + * Gets the default type url for GetKeyspaceResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a BackupResponse. */ - interface IBackupResponse { + /** Properties of a GetPermissionsRequest. */ + interface IGetPermissionsRequest { - /** BackupResponse tablet_alias */ + /** GetPermissionsRequest tablet_alias */ tablet_alias?: (topodata.ITabletAlias|null); - - /** BackupResponse keyspace */ - keyspace?: (string|null); - - /** BackupResponse shard */ - shard?: (string|null); - - /** BackupResponse event */ - event?: (logutil.IEvent|null); } - /** Represents a BackupResponse. */ - class BackupResponse implements IBackupResponse { + /** Represents a GetPermissionsRequest. */ + class GetPermissionsRequest implements IGetPermissionsRequest { /** - * Constructs a new BackupResponse. + * Constructs a new GetPermissionsRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IBackupResponse); + constructor(properties?: vtctldata.IGetPermissionsRequest); - /** BackupResponse tablet_alias. */ + /** GetPermissionsRequest tablet_alias. */ public tablet_alias?: (topodata.ITabletAlias|null); - /** BackupResponse keyspace. */ - public keyspace: string; - - /** BackupResponse shard. */ - public shard: string; - - /** BackupResponse event. */ - public event?: (logutil.IEvent|null); - /** - * Creates a new BackupResponse instance using the specified properties. + * Creates a new GetPermissionsRequest instance using the specified properties. * @param [properties] Properties to set - * @returns BackupResponse instance + * @returns GetPermissionsRequest instance */ - public static create(properties?: vtctldata.IBackupResponse): vtctldata.BackupResponse; + public static create(properties?: vtctldata.IGetPermissionsRequest): vtctldata.GetPermissionsRequest; /** - * Encodes the specified BackupResponse message. Does not implicitly {@link vtctldata.BackupResponse.verify|verify} messages. - * @param message BackupResponse message or plain object to encode + * Encodes the specified GetPermissionsRequest message. Does not implicitly {@link vtctldata.GetPermissionsRequest.verify|verify} messages. + * @param message GetPermissionsRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IBackupResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetPermissionsRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified BackupResponse message, length delimited. Does not implicitly {@link vtctldata.BackupResponse.verify|verify} messages. - * @param message BackupResponse message or plain object to encode + * Encodes the specified GetPermissionsRequest message, length delimited. Does not implicitly {@link vtctldata.GetPermissionsRequest.verify|verify} messages. + * @param message GetPermissionsRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IBackupResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetPermissionsRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a BackupResponse message from the specified reader or buffer. + * Decodes a GetPermissionsRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns BackupResponse + * @returns GetPermissionsRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.BackupResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetPermissionsRequest; /** - * Decodes a BackupResponse message from the specified reader or buffer, length delimited. + * Decodes a GetPermissionsRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns BackupResponse + * @returns GetPermissionsRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.BackupResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetPermissionsRequest; /** - * Verifies a BackupResponse message. + * Verifies a GetPermissionsRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a BackupResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetPermissionsRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns BackupResponse + * @returns GetPermissionsRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.BackupResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.GetPermissionsRequest; /** - * Creates a plain object from a BackupResponse message. Also converts values to other types if specified. - * @param message BackupResponse + * Creates a plain object from a GetPermissionsRequest message. Also converts values to other types if specified. + * @param message GetPermissionsRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.BackupResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetPermissionsRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this BackupResponse to JSON. + * Converts this GetPermissionsRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for BackupResponse + * Gets the default type url for GetPermissionsRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a BackupShardRequest. */ - interface IBackupShardRequest { - - /** BackupShardRequest keyspace */ - keyspace?: (string|null); - - /** BackupShardRequest shard */ - shard?: (string|null); - - /** BackupShardRequest allow_primary */ - allow_primary?: (boolean|null); + /** Properties of a GetPermissionsResponse. */ + interface IGetPermissionsResponse { - /** BackupShardRequest concurrency */ - concurrency?: (number|Long|null); + /** GetPermissionsResponse permissions */ + permissions?: (tabletmanagerdata.IPermissions|null); } - /** Represents a BackupShardRequest. */ - class BackupShardRequest implements IBackupShardRequest { + /** Represents a GetPermissionsResponse. */ + class GetPermissionsResponse implements IGetPermissionsResponse { /** - * Constructs a new BackupShardRequest. + * Constructs a new GetPermissionsResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IBackupShardRequest); - - /** BackupShardRequest keyspace. */ - public keyspace: string; - - /** BackupShardRequest shard. */ - public shard: string; - - /** BackupShardRequest allow_primary. */ - public allow_primary: boolean; + constructor(properties?: vtctldata.IGetPermissionsResponse); - /** BackupShardRequest concurrency. */ - public concurrency: (number|Long); + /** GetPermissionsResponse permissions. */ + public permissions?: (tabletmanagerdata.IPermissions|null); /** - * Creates a new BackupShardRequest instance using the specified properties. + * Creates a new GetPermissionsResponse instance using the specified properties. * @param [properties] Properties to set - * @returns BackupShardRequest instance + * @returns GetPermissionsResponse instance */ - public static create(properties?: vtctldata.IBackupShardRequest): vtctldata.BackupShardRequest; + public static create(properties?: vtctldata.IGetPermissionsResponse): vtctldata.GetPermissionsResponse; /** - * Encodes the specified BackupShardRequest message. Does not implicitly {@link vtctldata.BackupShardRequest.verify|verify} messages. - * @param message BackupShardRequest message or plain object to encode + * Encodes the specified GetPermissionsResponse message. Does not implicitly {@link vtctldata.GetPermissionsResponse.verify|verify} messages. + * @param message GetPermissionsResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IBackupShardRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetPermissionsResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified BackupShardRequest message, length delimited. Does not implicitly {@link vtctldata.BackupShardRequest.verify|verify} messages. - * @param message BackupShardRequest message or plain object to encode + * Encodes the specified GetPermissionsResponse message, length delimited. Does not implicitly {@link vtctldata.GetPermissionsResponse.verify|verify} messages. + * @param message GetPermissionsResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IBackupShardRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetPermissionsResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a BackupShardRequest message from the specified reader or buffer. + * Decodes a GetPermissionsResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns BackupShardRequest + * @returns GetPermissionsResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.BackupShardRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetPermissionsResponse; /** - * Decodes a BackupShardRequest message from the specified reader or buffer, length delimited. + * Decodes a GetPermissionsResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns BackupShardRequest + * @returns GetPermissionsResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.BackupShardRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetPermissionsResponse; /** - * Verifies a BackupShardRequest message. + * Verifies a GetPermissionsResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a BackupShardRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetPermissionsResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns BackupShardRequest + * @returns GetPermissionsResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.BackupShardRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.GetPermissionsResponse; /** - * Creates a plain object from a BackupShardRequest message. Also converts values to other types if specified. - * @param message BackupShardRequest + * Creates a plain object from a GetPermissionsResponse message. Also converts values to other types if specified. + * @param message GetPermissionsResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.BackupShardRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetPermissionsResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this BackupShardRequest to JSON. + * Converts this GetPermissionsResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for BackupShardRequest + * Gets the default type url for GetPermissionsResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ChangeTabletTypeRequest. */ - interface IChangeTabletTypeRequest { - - /** ChangeTabletTypeRequest tablet_alias */ - tablet_alias?: (topodata.ITabletAlias|null); - - /** ChangeTabletTypeRequest db_type */ - db_type?: (topodata.TabletType|null); - - /** ChangeTabletTypeRequest dry_run */ - dry_run?: (boolean|null); + /** Properties of a GetRoutingRulesRequest. */ + interface IGetRoutingRulesRequest { } - /** Represents a ChangeTabletTypeRequest. */ - class ChangeTabletTypeRequest implements IChangeTabletTypeRequest { + /** Represents a GetRoutingRulesRequest. */ + class GetRoutingRulesRequest implements IGetRoutingRulesRequest { /** - * Constructs a new ChangeTabletTypeRequest. + * Constructs a new GetRoutingRulesRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IChangeTabletTypeRequest); - - /** ChangeTabletTypeRequest tablet_alias. */ - public tablet_alias?: (topodata.ITabletAlias|null); - - /** ChangeTabletTypeRequest db_type. */ - public db_type: topodata.TabletType; - - /** ChangeTabletTypeRequest dry_run. */ - public dry_run: boolean; + constructor(properties?: vtctldata.IGetRoutingRulesRequest); /** - * Creates a new ChangeTabletTypeRequest instance using the specified properties. + * Creates a new GetRoutingRulesRequest instance using the specified properties. * @param [properties] Properties to set - * @returns ChangeTabletTypeRequest instance + * @returns GetRoutingRulesRequest instance */ - public static create(properties?: vtctldata.IChangeTabletTypeRequest): vtctldata.ChangeTabletTypeRequest; + public static create(properties?: vtctldata.IGetRoutingRulesRequest): vtctldata.GetRoutingRulesRequest; /** - * Encodes the specified ChangeTabletTypeRequest message. Does not implicitly {@link vtctldata.ChangeTabletTypeRequest.verify|verify} messages. - * @param message ChangeTabletTypeRequest message or plain object to encode + * Encodes the specified GetRoutingRulesRequest message. Does not implicitly {@link vtctldata.GetRoutingRulesRequest.verify|verify} messages. + * @param message GetRoutingRulesRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IChangeTabletTypeRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetRoutingRulesRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ChangeTabletTypeRequest message, length delimited. Does not implicitly {@link vtctldata.ChangeTabletTypeRequest.verify|verify} messages. - * @param message ChangeTabletTypeRequest message or plain object to encode + * Encodes the specified GetRoutingRulesRequest message, length delimited. Does not implicitly {@link vtctldata.GetRoutingRulesRequest.verify|verify} messages. + * @param message GetRoutingRulesRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IChangeTabletTypeRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetRoutingRulesRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ChangeTabletTypeRequest message from the specified reader or buffer. + * Decodes a GetRoutingRulesRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ChangeTabletTypeRequest + * @returns GetRoutingRulesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ChangeTabletTypeRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetRoutingRulesRequest; /** - * Decodes a ChangeTabletTypeRequest message from the specified reader or buffer, length delimited. + * Decodes a GetRoutingRulesRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ChangeTabletTypeRequest + * @returns GetRoutingRulesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ChangeTabletTypeRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetRoutingRulesRequest; /** - * Verifies a ChangeTabletTypeRequest message. + * Verifies a GetRoutingRulesRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ChangeTabletTypeRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetRoutingRulesRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ChangeTabletTypeRequest + * @returns GetRoutingRulesRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.ChangeTabletTypeRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.GetRoutingRulesRequest; /** - * Creates a plain object from a ChangeTabletTypeRequest message. Also converts values to other types if specified. - * @param message ChangeTabletTypeRequest + * Creates a plain object from a GetRoutingRulesRequest message. Also converts values to other types if specified. + * @param message GetRoutingRulesRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ChangeTabletTypeRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetRoutingRulesRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ChangeTabletTypeRequest to JSON. + * Converts this GetRoutingRulesRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ChangeTabletTypeRequest + * Gets the default type url for GetRoutingRulesRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ChangeTabletTypeResponse. */ - interface IChangeTabletTypeResponse { - - /** ChangeTabletTypeResponse before_tablet */ - before_tablet?: (topodata.ITablet|null); - - /** ChangeTabletTypeResponse after_tablet */ - after_tablet?: (topodata.ITablet|null); + /** Properties of a GetRoutingRulesResponse. */ + interface IGetRoutingRulesResponse { - /** ChangeTabletTypeResponse was_dry_run */ - was_dry_run?: (boolean|null); + /** GetRoutingRulesResponse routing_rules */ + routing_rules?: (vschema.IRoutingRules|null); } - /** Represents a ChangeTabletTypeResponse. */ - class ChangeTabletTypeResponse implements IChangeTabletTypeResponse { + /** Represents a GetRoutingRulesResponse. */ + class GetRoutingRulesResponse implements IGetRoutingRulesResponse { /** - * Constructs a new ChangeTabletTypeResponse. + * Constructs a new GetRoutingRulesResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IChangeTabletTypeResponse); - - /** ChangeTabletTypeResponse before_tablet. */ - public before_tablet?: (topodata.ITablet|null); - - /** ChangeTabletTypeResponse after_tablet. */ - public after_tablet?: (topodata.ITablet|null); + constructor(properties?: vtctldata.IGetRoutingRulesResponse); - /** ChangeTabletTypeResponse was_dry_run. */ - public was_dry_run: boolean; + /** GetRoutingRulesResponse routing_rules. */ + public routing_rules?: (vschema.IRoutingRules|null); /** - * Creates a new ChangeTabletTypeResponse instance using the specified properties. + * Creates a new GetRoutingRulesResponse instance using the specified properties. * @param [properties] Properties to set - * @returns ChangeTabletTypeResponse instance + * @returns GetRoutingRulesResponse instance */ - public static create(properties?: vtctldata.IChangeTabletTypeResponse): vtctldata.ChangeTabletTypeResponse; + public static create(properties?: vtctldata.IGetRoutingRulesResponse): vtctldata.GetRoutingRulesResponse; - /** - * Encodes the specified ChangeTabletTypeResponse message. Does not implicitly {@link vtctldata.ChangeTabletTypeResponse.verify|verify} messages. - * @param message ChangeTabletTypeResponse message or plain object to encode + /** + * Encodes the specified GetRoutingRulesResponse message. Does not implicitly {@link vtctldata.GetRoutingRulesResponse.verify|verify} messages. + * @param message GetRoutingRulesResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IChangeTabletTypeResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetRoutingRulesResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ChangeTabletTypeResponse message, length delimited. Does not implicitly {@link vtctldata.ChangeTabletTypeResponse.verify|verify} messages. - * @param message ChangeTabletTypeResponse message or plain object to encode + * Encodes the specified GetRoutingRulesResponse message, length delimited. Does not implicitly {@link vtctldata.GetRoutingRulesResponse.verify|verify} messages. + * @param message GetRoutingRulesResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IChangeTabletTypeResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetRoutingRulesResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ChangeTabletTypeResponse message from the specified reader or buffer. + * Decodes a GetRoutingRulesResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ChangeTabletTypeResponse + * @returns GetRoutingRulesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ChangeTabletTypeResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetRoutingRulesResponse; /** - * Decodes a ChangeTabletTypeResponse message from the specified reader or buffer, length delimited. + * Decodes a GetRoutingRulesResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ChangeTabletTypeResponse + * @returns GetRoutingRulesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ChangeTabletTypeResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetRoutingRulesResponse; /** - * Verifies a ChangeTabletTypeResponse message. + * Verifies a GetRoutingRulesResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ChangeTabletTypeResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetRoutingRulesResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ChangeTabletTypeResponse + * @returns GetRoutingRulesResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.ChangeTabletTypeResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.GetRoutingRulesResponse; /** - * Creates a plain object from a ChangeTabletTypeResponse message. Also converts values to other types if specified. - * @param message ChangeTabletTypeResponse + * Creates a plain object from a GetRoutingRulesResponse message. Also converts values to other types if specified. + * @param message GetRoutingRulesResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ChangeTabletTypeResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetRoutingRulesResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ChangeTabletTypeResponse to JSON. + * Converts this GetRoutingRulesResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ChangeTabletTypeResponse + * Gets the default type url for GetRoutingRulesResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a CreateKeyspaceRequest. */ - interface ICreateKeyspaceRequest { - - /** CreateKeyspaceRequest name */ - name?: (string|null); - - /** CreateKeyspaceRequest force */ - force?: (boolean|null); + /** Properties of a GetSchemaRequest. */ + interface IGetSchemaRequest { - /** CreateKeyspaceRequest allow_empty_v_schema */ - allow_empty_v_schema?: (boolean|null); + /** GetSchemaRequest tablet_alias */ + tablet_alias?: (topodata.ITabletAlias|null); - /** CreateKeyspaceRequest served_froms */ - served_froms?: (topodata.Keyspace.IServedFrom[]|null); + /** GetSchemaRequest tables */ + tables?: (string[]|null); - /** CreateKeyspaceRequest type */ - type?: (topodata.KeyspaceType|null); + /** GetSchemaRequest exclude_tables */ + exclude_tables?: (string[]|null); - /** CreateKeyspaceRequest base_keyspace */ - base_keyspace?: (string|null); + /** GetSchemaRequest include_views */ + include_views?: (boolean|null); - /** CreateKeyspaceRequest snapshot_time */ - snapshot_time?: (vttime.ITime|null); + /** GetSchemaRequest table_names_only */ + table_names_only?: (boolean|null); - /** CreateKeyspaceRequest durability_policy */ - durability_policy?: (string|null); + /** GetSchemaRequest table_sizes_only */ + table_sizes_only?: (boolean|null); - /** CreateKeyspaceRequest sidecar_db_name */ - sidecar_db_name?: (string|null); + /** GetSchemaRequest table_schema_only */ + table_schema_only?: (boolean|null); } - /** Represents a CreateKeyspaceRequest. */ - class CreateKeyspaceRequest implements ICreateKeyspaceRequest { + /** Represents a GetSchemaRequest. */ + class GetSchemaRequest implements IGetSchemaRequest { /** - * Constructs a new CreateKeyspaceRequest. + * Constructs a new GetSchemaRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.ICreateKeyspaceRequest); - - /** CreateKeyspaceRequest name. */ - public name: string; - - /** CreateKeyspaceRequest force. */ - public force: boolean; + constructor(properties?: vtctldata.IGetSchemaRequest); - /** CreateKeyspaceRequest allow_empty_v_schema. */ - public allow_empty_v_schema: boolean; + /** GetSchemaRequest tablet_alias. */ + public tablet_alias?: (topodata.ITabletAlias|null); - /** CreateKeyspaceRequest served_froms. */ - public served_froms: topodata.Keyspace.IServedFrom[]; + /** GetSchemaRequest tables. */ + public tables: string[]; - /** CreateKeyspaceRequest type. */ - public type: topodata.KeyspaceType; + /** GetSchemaRequest exclude_tables. */ + public exclude_tables: string[]; - /** CreateKeyspaceRequest base_keyspace. */ - public base_keyspace: string; + /** GetSchemaRequest include_views. */ + public include_views: boolean; - /** CreateKeyspaceRequest snapshot_time. */ - public snapshot_time?: (vttime.ITime|null); + /** GetSchemaRequest table_names_only. */ + public table_names_only: boolean; - /** CreateKeyspaceRequest durability_policy. */ - public durability_policy: string; + /** GetSchemaRequest table_sizes_only. */ + public table_sizes_only: boolean; - /** CreateKeyspaceRequest sidecar_db_name. */ - public sidecar_db_name: string; + /** GetSchemaRequest table_schema_only. */ + public table_schema_only: boolean; /** - * Creates a new CreateKeyspaceRequest instance using the specified properties. + * Creates a new GetSchemaRequest instance using the specified properties. * @param [properties] Properties to set - * @returns CreateKeyspaceRequest instance + * @returns GetSchemaRequest instance */ - public static create(properties?: vtctldata.ICreateKeyspaceRequest): vtctldata.CreateKeyspaceRequest; + public static create(properties?: vtctldata.IGetSchemaRequest): vtctldata.GetSchemaRequest; /** - * Encodes the specified CreateKeyspaceRequest message. Does not implicitly {@link vtctldata.CreateKeyspaceRequest.verify|verify} messages. - * @param message CreateKeyspaceRequest message or plain object to encode + * Encodes the specified GetSchemaRequest message. Does not implicitly {@link vtctldata.GetSchemaRequest.verify|verify} messages. + * @param message GetSchemaRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.ICreateKeyspaceRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetSchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified CreateKeyspaceRequest message, length delimited. Does not implicitly {@link vtctldata.CreateKeyspaceRequest.verify|verify} messages. - * @param message CreateKeyspaceRequest message or plain object to encode + * Encodes the specified GetSchemaRequest message, length delimited. Does not implicitly {@link vtctldata.GetSchemaRequest.verify|verify} messages. + * @param message GetSchemaRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.ICreateKeyspaceRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetSchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a CreateKeyspaceRequest message from the specified reader or buffer. + * Decodes a GetSchemaRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns CreateKeyspaceRequest + * @returns GetSchemaRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.CreateKeyspaceRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetSchemaRequest; /** - * Decodes a CreateKeyspaceRequest message from the specified reader or buffer, length delimited. + * Decodes a GetSchemaRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns CreateKeyspaceRequest + * @returns GetSchemaRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.CreateKeyspaceRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetSchemaRequest; /** - * Verifies a CreateKeyspaceRequest message. + * Verifies a GetSchemaRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a CreateKeyspaceRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetSchemaRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns CreateKeyspaceRequest + * @returns GetSchemaRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.CreateKeyspaceRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.GetSchemaRequest; /** - * Creates a plain object from a CreateKeyspaceRequest message. Also converts values to other types if specified. - * @param message CreateKeyspaceRequest + * Creates a plain object from a GetSchemaRequest message. Also converts values to other types if specified. + * @param message GetSchemaRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.CreateKeyspaceRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetSchemaRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this CreateKeyspaceRequest to JSON. + * Converts this GetSchemaRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for CreateKeyspaceRequest + * Gets the default type url for GetSchemaRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a CreateKeyspaceResponse. */ - interface ICreateKeyspaceResponse { + /** Properties of a GetSchemaResponse. */ + interface IGetSchemaResponse { - /** CreateKeyspaceResponse keyspace */ - keyspace?: (vtctldata.IKeyspace|null); + /** GetSchemaResponse schema */ + schema?: (tabletmanagerdata.ISchemaDefinition|null); } - /** Represents a CreateKeyspaceResponse. */ - class CreateKeyspaceResponse implements ICreateKeyspaceResponse { + /** Represents a GetSchemaResponse. */ + class GetSchemaResponse implements IGetSchemaResponse { /** - * Constructs a new CreateKeyspaceResponse. + * Constructs a new GetSchemaResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.ICreateKeyspaceResponse); + constructor(properties?: vtctldata.IGetSchemaResponse); - /** CreateKeyspaceResponse keyspace. */ - public keyspace?: (vtctldata.IKeyspace|null); + /** GetSchemaResponse schema. */ + public schema?: (tabletmanagerdata.ISchemaDefinition|null); /** - * Creates a new CreateKeyspaceResponse instance using the specified properties. + * Creates a new GetSchemaResponse instance using the specified properties. * @param [properties] Properties to set - * @returns CreateKeyspaceResponse instance + * @returns GetSchemaResponse instance */ - public static create(properties?: vtctldata.ICreateKeyspaceResponse): vtctldata.CreateKeyspaceResponse; + public static create(properties?: vtctldata.IGetSchemaResponse): vtctldata.GetSchemaResponse; /** - * Encodes the specified CreateKeyspaceResponse message. Does not implicitly {@link vtctldata.CreateKeyspaceResponse.verify|verify} messages. - * @param message CreateKeyspaceResponse message or plain object to encode + * Encodes the specified GetSchemaResponse message. Does not implicitly {@link vtctldata.GetSchemaResponse.verify|verify} messages. + * @param message GetSchemaResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.ICreateKeyspaceResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetSchemaResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified CreateKeyspaceResponse message, length delimited. Does not implicitly {@link vtctldata.CreateKeyspaceResponse.verify|verify} messages. - * @param message CreateKeyspaceResponse message or plain object to encode + * Encodes the specified GetSchemaResponse message, length delimited. Does not implicitly {@link vtctldata.GetSchemaResponse.verify|verify} messages. + * @param message GetSchemaResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.ICreateKeyspaceResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetSchemaResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a CreateKeyspaceResponse message from the specified reader or buffer. + * Decodes a GetSchemaResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns CreateKeyspaceResponse + * @returns GetSchemaResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.CreateKeyspaceResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetSchemaResponse; /** - * Decodes a CreateKeyspaceResponse message from the specified reader or buffer, length delimited. + * Decodes a GetSchemaResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns CreateKeyspaceResponse + * @returns GetSchemaResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.CreateKeyspaceResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetSchemaResponse; /** - * Verifies a CreateKeyspaceResponse message. + * Verifies a GetSchemaResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a CreateKeyspaceResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetSchemaResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns CreateKeyspaceResponse + * @returns GetSchemaResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.CreateKeyspaceResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.GetSchemaResponse; /** - * Creates a plain object from a CreateKeyspaceResponse message. Also converts values to other types if specified. - * @param message CreateKeyspaceResponse + * Creates a plain object from a GetSchemaResponse message. Also converts values to other types if specified. + * @param message GetSchemaResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.CreateKeyspaceResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetSchemaResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this CreateKeyspaceResponse to JSON. + * Converts this GetSchemaResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for CreateKeyspaceResponse + * Gets the default type url for GetSchemaResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a CreateShardRequest. */ - interface ICreateShardRequest { + /** Properties of a GetSchemaMigrationsRequest. */ + interface IGetSchemaMigrationsRequest { - /** CreateShardRequest keyspace */ + /** GetSchemaMigrationsRequest keyspace */ keyspace?: (string|null); - /** CreateShardRequest shard_name */ - shard_name?: (string|null); + /** GetSchemaMigrationsRequest uuid */ + uuid?: (string|null); - /** CreateShardRequest force */ - force?: (boolean|null); + /** GetSchemaMigrationsRequest migration_context */ + migration_context?: (string|null); - /** CreateShardRequest include_parent */ - include_parent?: (boolean|null); + /** GetSchemaMigrationsRequest status */ + status?: (vtctldata.SchemaMigration.Status|null); + + /** GetSchemaMigrationsRequest recent */ + recent?: (vttime.IDuration|null); + + /** GetSchemaMigrationsRequest order */ + order?: (vtctldata.QueryOrdering|null); + + /** GetSchemaMigrationsRequest limit */ + limit?: (number|Long|null); + + /** GetSchemaMigrationsRequest skip */ + skip?: (number|Long|null); } - /** Represents a CreateShardRequest. */ - class CreateShardRequest implements ICreateShardRequest { + /** Represents a GetSchemaMigrationsRequest. */ + class GetSchemaMigrationsRequest implements IGetSchemaMigrationsRequest { /** - * Constructs a new CreateShardRequest. + * Constructs a new GetSchemaMigrationsRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.ICreateShardRequest); + constructor(properties?: vtctldata.IGetSchemaMigrationsRequest); - /** CreateShardRequest keyspace. */ + /** GetSchemaMigrationsRequest keyspace. */ public keyspace: string; - /** CreateShardRequest shard_name. */ - public shard_name: string; + /** GetSchemaMigrationsRequest uuid. */ + public uuid: string; - /** CreateShardRequest force. */ - public force: boolean; + /** GetSchemaMigrationsRequest migration_context. */ + public migration_context: string; - /** CreateShardRequest include_parent. */ - public include_parent: boolean; + /** GetSchemaMigrationsRequest status. */ + public status: vtctldata.SchemaMigration.Status; + + /** GetSchemaMigrationsRequest recent. */ + public recent?: (vttime.IDuration|null); + + /** GetSchemaMigrationsRequest order. */ + public order: vtctldata.QueryOrdering; + + /** GetSchemaMigrationsRequest limit. */ + public limit: (number|Long); + + /** GetSchemaMigrationsRequest skip. */ + public skip: (number|Long); /** - * Creates a new CreateShardRequest instance using the specified properties. + * Creates a new GetSchemaMigrationsRequest instance using the specified properties. * @param [properties] Properties to set - * @returns CreateShardRequest instance + * @returns GetSchemaMigrationsRequest instance */ - public static create(properties?: vtctldata.ICreateShardRequest): vtctldata.CreateShardRequest; + public static create(properties?: vtctldata.IGetSchemaMigrationsRequest): vtctldata.GetSchemaMigrationsRequest; /** - * Encodes the specified CreateShardRequest message. Does not implicitly {@link vtctldata.CreateShardRequest.verify|verify} messages. - * @param message CreateShardRequest message or plain object to encode + * Encodes the specified GetSchemaMigrationsRequest message. Does not implicitly {@link vtctldata.GetSchemaMigrationsRequest.verify|verify} messages. + * @param message GetSchemaMigrationsRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.ICreateShardRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetSchemaMigrationsRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified CreateShardRequest message, length delimited. Does not implicitly {@link vtctldata.CreateShardRequest.verify|verify} messages. - * @param message CreateShardRequest message or plain object to encode + * Encodes the specified GetSchemaMigrationsRequest message, length delimited. Does not implicitly {@link vtctldata.GetSchemaMigrationsRequest.verify|verify} messages. + * @param message GetSchemaMigrationsRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.ICreateShardRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetSchemaMigrationsRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a CreateShardRequest message from the specified reader or buffer. + * Decodes a GetSchemaMigrationsRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns CreateShardRequest + * @returns GetSchemaMigrationsRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.CreateShardRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetSchemaMigrationsRequest; /** - * Decodes a CreateShardRequest message from the specified reader or buffer, length delimited. + * Decodes a GetSchemaMigrationsRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns CreateShardRequest + * @returns GetSchemaMigrationsRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.CreateShardRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetSchemaMigrationsRequest; /** - * Verifies a CreateShardRequest message. + * Verifies a GetSchemaMigrationsRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a CreateShardRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetSchemaMigrationsRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns CreateShardRequest + * @returns GetSchemaMigrationsRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.CreateShardRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.GetSchemaMigrationsRequest; /** - * Creates a plain object from a CreateShardRequest message. Also converts values to other types if specified. - * @param message CreateShardRequest + * Creates a plain object from a GetSchemaMigrationsRequest message. Also converts values to other types if specified. + * @param message GetSchemaMigrationsRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.CreateShardRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetSchemaMigrationsRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this CreateShardRequest to JSON. + * Converts this GetSchemaMigrationsRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for CreateShardRequest + * Gets the default type url for GetSchemaMigrationsRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a CreateShardResponse. */ - interface ICreateShardResponse { - - /** CreateShardResponse keyspace */ - keyspace?: (vtctldata.IKeyspace|null); - - /** CreateShardResponse shard */ - shard?: (vtctldata.IShard|null); + /** Properties of a GetSchemaMigrationsResponse. */ + interface IGetSchemaMigrationsResponse { - /** CreateShardResponse shard_already_exists */ - shard_already_exists?: (boolean|null); + /** GetSchemaMigrationsResponse migrations */ + migrations?: (vtctldata.ISchemaMigration[]|null); } - /** Represents a CreateShardResponse. */ - class CreateShardResponse implements ICreateShardResponse { + /** Represents a GetSchemaMigrationsResponse. */ + class GetSchemaMigrationsResponse implements IGetSchemaMigrationsResponse { /** - * Constructs a new CreateShardResponse. + * Constructs a new GetSchemaMigrationsResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.ICreateShardResponse); - - /** CreateShardResponse keyspace. */ - public keyspace?: (vtctldata.IKeyspace|null); + constructor(properties?: vtctldata.IGetSchemaMigrationsResponse); - /** CreateShardResponse shard. */ - public shard?: (vtctldata.IShard|null); - - /** CreateShardResponse shard_already_exists. */ - public shard_already_exists: boolean; + /** GetSchemaMigrationsResponse migrations. */ + public migrations: vtctldata.ISchemaMigration[]; /** - * Creates a new CreateShardResponse instance using the specified properties. + * Creates a new GetSchemaMigrationsResponse instance using the specified properties. * @param [properties] Properties to set - * @returns CreateShardResponse instance + * @returns GetSchemaMigrationsResponse instance */ - public static create(properties?: vtctldata.ICreateShardResponse): vtctldata.CreateShardResponse; + public static create(properties?: vtctldata.IGetSchemaMigrationsResponse): vtctldata.GetSchemaMigrationsResponse; /** - * Encodes the specified CreateShardResponse message. Does not implicitly {@link vtctldata.CreateShardResponse.verify|verify} messages. - * @param message CreateShardResponse message or plain object to encode + * Encodes the specified GetSchemaMigrationsResponse message. Does not implicitly {@link vtctldata.GetSchemaMigrationsResponse.verify|verify} messages. + * @param message GetSchemaMigrationsResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.ICreateShardResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetSchemaMigrationsResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified CreateShardResponse message, length delimited. Does not implicitly {@link vtctldata.CreateShardResponse.verify|verify} messages. - * @param message CreateShardResponse message or plain object to encode + * Encodes the specified GetSchemaMigrationsResponse message, length delimited. Does not implicitly {@link vtctldata.GetSchemaMigrationsResponse.verify|verify} messages. + * @param message GetSchemaMigrationsResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.ICreateShardResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetSchemaMigrationsResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a CreateShardResponse message from the specified reader or buffer. + * Decodes a GetSchemaMigrationsResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns CreateShardResponse + * @returns GetSchemaMigrationsResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.CreateShardResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetSchemaMigrationsResponse; /** - * Decodes a CreateShardResponse message from the specified reader or buffer, length delimited. + * Decodes a GetSchemaMigrationsResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns CreateShardResponse + * @returns GetSchemaMigrationsResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.CreateShardResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetSchemaMigrationsResponse; /** - * Verifies a CreateShardResponse message. + * Verifies a GetSchemaMigrationsResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a CreateShardResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetSchemaMigrationsResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns CreateShardResponse + * @returns GetSchemaMigrationsResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.CreateShardResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.GetSchemaMigrationsResponse; /** - * Creates a plain object from a CreateShardResponse message. Also converts values to other types if specified. - * @param message CreateShardResponse + * Creates a plain object from a GetSchemaMigrationsResponse message. Also converts values to other types if specified. + * @param message GetSchemaMigrationsResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.CreateShardResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetSchemaMigrationsResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this CreateShardResponse to JSON. + * Converts this GetSchemaMigrationsResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for CreateShardResponse + * Gets the default type url for GetSchemaMigrationsResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a DeleteCellInfoRequest. */ - interface IDeleteCellInfoRequest { + /** Properties of a GetShardRequest. */ + interface IGetShardRequest { - /** DeleteCellInfoRequest name */ - name?: (string|null); + /** GetShardRequest keyspace */ + keyspace?: (string|null); - /** DeleteCellInfoRequest force */ - force?: (boolean|null); + /** GetShardRequest shard_name */ + shard_name?: (string|null); } - /** Represents a DeleteCellInfoRequest. */ - class DeleteCellInfoRequest implements IDeleteCellInfoRequest { + /** Represents a GetShardRequest. */ + class GetShardRequest implements IGetShardRequest { /** - * Constructs a new DeleteCellInfoRequest. + * Constructs a new GetShardRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IDeleteCellInfoRequest); + constructor(properties?: vtctldata.IGetShardRequest); - /** DeleteCellInfoRequest name. */ - public name: string; + /** GetShardRequest keyspace. */ + public keyspace: string; - /** DeleteCellInfoRequest force. */ - public force: boolean; + /** GetShardRequest shard_name. */ + public shard_name: string; /** - * Creates a new DeleteCellInfoRequest instance using the specified properties. + * Creates a new GetShardRequest instance using the specified properties. * @param [properties] Properties to set - * @returns DeleteCellInfoRequest instance + * @returns GetShardRequest instance */ - public static create(properties?: vtctldata.IDeleteCellInfoRequest): vtctldata.DeleteCellInfoRequest; + public static create(properties?: vtctldata.IGetShardRequest): vtctldata.GetShardRequest; /** - * Encodes the specified DeleteCellInfoRequest message. Does not implicitly {@link vtctldata.DeleteCellInfoRequest.verify|verify} messages. - * @param message DeleteCellInfoRequest message or plain object to encode + * Encodes the specified GetShardRequest message. Does not implicitly {@link vtctldata.GetShardRequest.verify|verify} messages. + * @param message GetShardRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IDeleteCellInfoRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetShardRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified DeleteCellInfoRequest message, length delimited. Does not implicitly {@link vtctldata.DeleteCellInfoRequest.verify|verify} messages. - * @param message DeleteCellInfoRequest message or plain object to encode + * Encodes the specified GetShardRequest message, length delimited. Does not implicitly {@link vtctldata.GetShardRequest.verify|verify} messages. + * @param message GetShardRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IDeleteCellInfoRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetShardRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a DeleteCellInfoRequest message from the specified reader or buffer. + * Decodes a GetShardRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns DeleteCellInfoRequest + * @returns GetShardRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.DeleteCellInfoRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetShardRequest; /** - * Decodes a DeleteCellInfoRequest message from the specified reader or buffer, length delimited. + * Decodes a GetShardRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns DeleteCellInfoRequest + * @returns GetShardRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.DeleteCellInfoRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetShardRequest; /** - * Verifies a DeleteCellInfoRequest message. + * Verifies a GetShardRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a DeleteCellInfoRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetShardRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns DeleteCellInfoRequest + * @returns GetShardRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.DeleteCellInfoRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.GetShardRequest; /** - * Creates a plain object from a DeleteCellInfoRequest message. Also converts values to other types if specified. - * @param message DeleteCellInfoRequest + * Creates a plain object from a GetShardRequest message. Also converts values to other types if specified. + * @param message GetShardRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.DeleteCellInfoRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetShardRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this DeleteCellInfoRequest to JSON. + * Converts this GetShardRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for DeleteCellInfoRequest + * Gets the default type url for GetShardRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a DeleteCellInfoResponse. */ - interface IDeleteCellInfoResponse { + /** Properties of a GetShardResponse. */ + interface IGetShardResponse { + + /** GetShardResponse shard */ + shard?: (vtctldata.IShard|null); } - /** Represents a DeleteCellInfoResponse. */ - class DeleteCellInfoResponse implements IDeleteCellInfoResponse { + /** Represents a GetShardResponse. */ + class GetShardResponse implements IGetShardResponse { /** - * Constructs a new DeleteCellInfoResponse. + * Constructs a new GetShardResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IDeleteCellInfoResponse); + constructor(properties?: vtctldata.IGetShardResponse); + + /** GetShardResponse shard. */ + public shard?: (vtctldata.IShard|null); /** - * Creates a new DeleteCellInfoResponse instance using the specified properties. + * Creates a new GetShardResponse instance using the specified properties. * @param [properties] Properties to set - * @returns DeleteCellInfoResponse instance + * @returns GetShardResponse instance */ - public static create(properties?: vtctldata.IDeleteCellInfoResponse): vtctldata.DeleteCellInfoResponse; + public static create(properties?: vtctldata.IGetShardResponse): vtctldata.GetShardResponse; /** - * Encodes the specified DeleteCellInfoResponse message. Does not implicitly {@link vtctldata.DeleteCellInfoResponse.verify|verify} messages. - * @param message DeleteCellInfoResponse message or plain object to encode + * Encodes the specified GetShardResponse message. Does not implicitly {@link vtctldata.GetShardResponse.verify|verify} messages. + * @param message GetShardResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IDeleteCellInfoResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetShardResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified DeleteCellInfoResponse message, length delimited. Does not implicitly {@link vtctldata.DeleteCellInfoResponse.verify|verify} messages. - * @param message DeleteCellInfoResponse message or plain object to encode + * Encodes the specified GetShardResponse message, length delimited. Does not implicitly {@link vtctldata.GetShardResponse.verify|verify} messages. + * @param message GetShardResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IDeleteCellInfoResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetShardResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a DeleteCellInfoResponse message from the specified reader or buffer. + * Decodes a GetShardResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns DeleteCellInfoResponse + * @returns GetShardResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.DeleteCellInfoResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetShardResponse; /** - * Decodes a DeleteCellInfoResponse message from the specified reader or buffer, length delimited. + * Decodes a GetShardResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns DeleteCellInfoResponse + * @returns GetShardResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.DeleteCellInfoResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetShardResponse; /** - * Verifies a DeleteCellInfoResponse message. + * Verifies a GetShardResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a DeleteCellInfoResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetShardResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns DeleteCellInfoResponse + * @returns GetShardResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.DeleteCellInfoResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.GetShardResponse; /** - * Creates a plain object from a DeleteCellInfoResponse message. Also converts values to other types if specified. - * @param message DeleteCellInfoResponse + * Creates a plain object from a GetShardResponse message. Also converts values to other types if specified. + * @param message GetShardResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.DeleteCellInfoResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetShardResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this DeleteCellInfoResponse to JSON. + * Converts this GetShardResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for DeleteCellInfoResponse + * Gets the default type url for GetShardResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a DeleteCellsAliasRequest. */ - interface IDeleteCellsAliasRequest { - - /** DeleteCellsAliasRequest name */ - name?: (string|null); + /** Properties of a GetShardRoutingRulesRequest. */ + interface IGetShardRoutingRulesRequest { } - /** Represents a DeleteCellsAliasRequest. */ - class DeleteCellsAliasRequest implements IDeleteCellsAliasRequest { + /** Represents a GetShardRoutingRulesRequest. */ + class GetShardRoutingRulesRequest implements IGetShardRoutingRulesRequest { /** - * Constructs a new DeleteCellsAliasRequest. + * Constructs a new GetShardRoutingRulesRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IDeleteCellsAliasRequest); - - /** DeleteCellsAliasRequest name. */ - public name: string; + constructor(properties?: vtctldata.IGetShardRoutingRulesRequest); /** - * Creates a new DeleteCellsAliasRequest instance using the specified properties. + * Creates a new GetShardRoutingRulesRequest instance using the specified properties. * @param [properties] Properties to set - * @returns DeleteCellsAliasRequest instance + * @returns GetShardRoutingRulesRequest instance */ - public static create(properties?: vtctldata.IDeleteCellsAliasRequest): vtctldata.DeleteCellsAliasRequest; + public static create(properties?: vtctldata.IGetShardRoutingRulesRequest): vtctldata.GetShardRoutingRulesRequest; /** - * Encodes the specified DeleteCellsAliasRequest message. Does not implicitly {@link vtctldata.DeleteCellsAliasRequest.verify|verify} messages. - * @param message DeleteCellsAliasRequest message or plain object to encode + * Encodes the specified GetShardRoutingRulesRequest message. Does not implicitly {@link vtctldata.GetShardRoutingRulesRequest.verify|verify} messages. + * @param message GetShardRoutingRulesRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IDeleteCellsAliasRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetShardRoutingRulesRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified DeleteCellsAliasRequest message, length delimited. Does not implicitly {@link vtctldata.DeleteCellsAliasRequest.verify|verify} messages. - * @param message DeleteCellsAliasRequest message or plain object to encode + * Encodes the specified GetShardRoutingRulesRequest message, length delimited. Does not implicitly {@link vtctldata.GetShardRoutingRulesRequest.verify|verify} messages. + * @param message GetShardRoutingRulesRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IDeleteCellsAliasRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetShardRoutingRulesRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a DeleteCellsAliasRequest message from the specified reader or buffer. + * Decodes a GetShardRoutingRulesRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns DeleteCellsAliasRequest + * @returns GetShardRoutingRulesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.DeleteCellsAliasRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetShardRoutingRulesRequest; /** - * Decodes a DeleteCellsAliasRequest message from the specified reader or buffer, length delimited. + * Decodes a GetShardRoutingRulesRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns DeleteCellsAliasRequest + * @returns GetShardRoutingRulesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.DeleteCellsAliasRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetShardRoutingRulesRequest; /** - * Verifies a DeleteCellsAliasRequest message. + * Verifies a GetShardRoutingRulesRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a DeleteCellsAliasRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetShardRoutingRulesRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns DeleteCellsAliasRequest + * @returns GetShardRoutingRulesRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.DeleteCellsAliasRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.GetShardRoutingRulesRequest; /** - * Creates a plain object from a DeleteCellsAliasRequest message. Also converts values to other types if specified. - * @param message DeleteCellsAliasRequest + * Creates a plain object from a GetShardRoutingRulesRequest message. Also converts values to other types if specified. + * @param message GetShardRoutingRulesRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.DeleteCellsAliasRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetShardRoutingRulesRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this DeleteCellsAliasRequest to JSON. + * Converts this GetShardRoutingRulesRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for DeleteCellsAliasRequest + * Gets the default type url for GetShardRoutingRulesRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a DeleteCellsAliasResponse. */ - interface IDeleteCellsAliasResponse { + /** Properties of a GetShardRoutingRulesResponse. */ + interface IGetShardRoutingRulesResponse { + + /** GetShardRoutingRulesResponse shard_routing_rules */ + shard_routing_rules?: (vschema.IShardRoutingRules|null); } - /** Represents a DeleteCellsAliasResponse. */ - class DeleteCellsAliasResponse implements IDeleteCellsAliasResponse { + /** Represents a GetShardRoutingRulesResponse. */ + class GetShardRoutingRulesResponse implements IGetShardRoutingRulesResponse { /** - * Constructs a new DeleteCellsAliasResponse. + * Constructs a new GetShardRoutingRulesResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IDeleteCellsAliasResponse); + constructor(properties?: vtctldata.IGetShardRoutingRulesResponse); + + /** GetShardRoutingRulesResponse shard_routing_rules. */ + public shard_routing_rules?: (vschema.IShardRoutingRules|null); /** - * Creates a new DeleteCellsAliasResponse instance using the specified properties. + * Creates a new GetShardRoutingRulesResponse instance using the specified properties. * @param [properties] Properties to set - * @returns DeleteCellsAliasResponse instance + * @returns GetShardRoutingRulesResponse instance */ - public static create(properties?: vtctldata.IDeleteCellsAliasResponse): vtctldata.DeleteCellsAliasResponse; + public static create(properties?: vtctldata.IGetShardRoutingRulesResponse): vtctldata.GetShardRoutingRulesResponse; /** - * Encodes the specified DeleteCellsAliasResponse message. Does not implicitly {@link vtctldata.DeleteCellsAliasResponse.verify|verify} messages. - * @param message DeleteCellsAliasResponse message or plain object to encode + * Encodes the specified GetShardRoutingRulesResponse message. Does not implicitly {@link vtctldata.GetShardRoutingRulesResponse.verify|verify} messages. + * @param message GetShardRoutingRulesResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IDeleteCellsAliasResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetShardRoutingRulesResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified DeleteCellsAliasResponse message, length delimited. Does not implicitly {@link vtctldata.DeleteCellsAliasResponse.verify|verify} messages. - * @param message DeleteCellsAliasResponse message or plain object to encode + * Encodes the specified GetShardRoutingRulesResponse message, length delimited. Does not implicitly {@link vtctldata.GetShardRoutingRulesResponse.verify|verify} messages. + * @param message GetShardRoutingRulesResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IDeleteCellsAliasResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetShardRoutingRulesResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a DeleteCellsAliasResponse message from the specified reader or buffer. + * Decodes a GetShardRoutingRulesResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns DeleteCellsAliasResponse + * @returns GetShardRoutingRulesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.DeleteCellsAliasResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetShardRoutingRulesResponse; /** - * Decodes a DeleteCellsAliasResponse message from the specified reader or buffer, length delimited. + * Decodes a GetShardRoutingRulesResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns DeleteCellsAliasResponse + * @returns GetShardRoutingRulesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.DeleteCellsAliasResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetShardRoutingRulesResponse; /** - * Verifies a DeleteCellsAliasResponse message. + * Verifies a GetShardRoutingRulesResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a DeleteCellsAliasResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetShardRoutingRulesResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns DeleteCellsAliasResponse + * @returns GetShardRoutingRulesResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.DeleteCellsAliasResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.GetShardRoutingRulesResponse; /** - * Creates a plain object from a DeleteCellsAliasResponse message. Also converts values to other types if specified. - * @param message DeleteCellsAliasResponse + * Creates a plain object from a GetShardRoutingRulesResponse message. Also converts values to other types if specified. + * @param message GetShardRoutingRulesResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.DeleteCellsAliasResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetShardRoutingRulesResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this DeleteCellsAliasResponse to JSON. + * Converts this GetShardRoutingRulesResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for DeleteCellsAliasResponse + * Gets the default type url for GetShardRoutingRulesResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a DeleteKeyspaceRequest. */ - interface IDeleteKeyspaceRequest { - - /** DeleteKeyspaceRequest keyspace */ - keyspace?: (string|null); - - /** DeleteKeyspaceRequest recursive */ - recursive?: (boolean|null); + /** Properties of a GetSrvKeyspaceNamesRequest. */ + interface IGetSrvKeyspaceNamesRequest { - /** DeleteKeyspaceRequest force */ - force?: (boolean|null); + /** GetSrvKeyspaceNamesRequest cells */ + cells?: (string[]|null); } - /** Represents a DeleteKeyspaceRequest. */ - class DeleteKeyspaceRequest implements IDeleteKeyspaceRequest { + /** Represents a GetSrvKeyspaceNamesRequest. */ + class GetSrvKeyspaceNamesRequest implements IGetSrvKeyspaceNamesRequest { /** - * Constructs a new DeleteKeyspaceRequest. + * Constructs a new GetSrvKeyspaceNamesRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IDeleteKeyspaceRequest); - - /** DeleteKeyspaceRequest keyspace. */ - public keyspace: string; - - /** DeleteKeyspaceRequest recursive. */ - public recursive: boolean; + constructor(properties?: vtctldata.IGetSrvKeyspaceNamesRequest); - /** DeleteKeyspaceRequest force. */ - public force: boolean; + /** GetSrvKeyspaceNamesRequest cells. */ + public cells: string[]; /** - * Creates a new DeleteKeyspaceRequest instance using the specified properties. + * Creates a new GetSrvKeyspaceNamesRequest instance using the specified properties. * @param [properties] Properties to set - * @returns DeleteKeyspaceRequest instance + * @returns GetSrvKeyspaceNamesRequest instance */ - public static create(properties?: vtctldata.IDeleteKeyspaceRequest): vtctldata.DeleteKeyspaceRequest; + public static create(properties?: vtctldata.IGetSrvKeyspaceNamesRequest): vtctldata.GetSrvKeyspaceNamesRequest; /** - * Encodes the specified DeleteKeyspaceRequest message. Does not implicitly {@link vtctldata.DeleteKeyspaceRequest.verify|verify} messages. - * @param message DeleteKeyspaceRequest message or plain object to encode + * Encodes the specified GetSrvKeyspaceNamesRequest message. Does not implicitly {@link vtctldata.GetSrvKeyspaceNamesRequest.verify|verify} messages. + * @param message GetSrvKeyspaceNamesRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IDeleteKeyspaceRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetSrvKeyspaceNamesRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified DeleteKeyspaceRequest message, length delimited. Does not implicitly {@link vtctldata.DeleteKeyspaceRequest.verify|verify} messages. - * @param message DeleteKeyspaceRequest message or plain object to encode + * Encodes the specified GetSrvKeyspaceNamesRequest message, length delimited. Does not implicitly {@link vtctldata.GetSrvKeyspaceNamesRequest.verify|verify} messages. + * @param message GetSrvKeyspaceNamesRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IDeleteKeyspaceRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetSrvKeyspaceNamesRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a DeleteKeyspaceRequest message from the specified reader or buffer. + * Decodes a GetSrvKeyspaceNamesRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns DeleteKeyspaceRequest + * @returns GetSrvKeyspaceNamesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.DeleteKeyspaceRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetSrvKeyspaceNamesRequest; /** - * Decodes a DeleteKeyspaceRequest message from the specified reader or buffer, length delimited. + * Decodes a GetSrvKeyspaceNamesRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns DeleteKeyspaceRequest + * @returns GetSrvKeyspaceNamesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.DeleteKeyspaceRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetSrvKeyspaceNamesRequest; /** - * Verifies a DeleteKeyspaceRequest message. + * Verifies a GetSrvKeyspaceNamesRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a DeleteKeyspaceRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetSrvKeyspaceNamesRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns DeleteKeyspaceRequest + * @returns GetSrvKeyspaceNamesRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.DeleteKeyspaceRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.GetSrvKeyspaceNamesRequest; /** - * Creates a plain object from a DeleteKeyspaceRequest message. Also converts values to other types if specified. - * @param message DeleteKeyspaceRequest + * Creates a plain object from a GetSrvKeyspaceNamesRequest message. Also converts values to other types if specified. + * @param message GetSrvKeyspaceNamesRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.DeleteKeyspaceRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetSrvKeyspaceNamesRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this DeleteKeyspaceRequest to JSON. + * Converts this GetSrvKeyspaceNamesRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for DeleteKeyspaceRequest + * Gets the default type url for GetSrvKeyspaceNamesRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a DeleteKeyspaceResponse. */ - interface IDeleteKeyspaceResponse { + /** Properties of a GetSrvKeyspaceNamesResponse. */ + interface IGetSrvKeyspaceNamesResponse { + + /** GetSrvKeyspaceNamesResponse names */ + names?: ({ [k: string]: vtctldata.GetSrvKeyspaceNamesResponse.INameList }|null); } - /** Represents a DeleteKeyspaceResponse. */ - class DeleteKeyspaceResponse implements IDeleteKeyspaceResponse { + /** Represents a GetSrvKeyspaceNamesResponse. */ + class GetSrvKeyspaceNamesResponse implements IGetSrvKeyspaceNamesResponse { /** - * Constructs a new DeleteKeyspaceResponse. + * Constructs a new GetSrvKeyspaceNamesResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IDeleteKeyspaceResponse); + constructor(properties?: vtctldata.IGetSrvKeyspaceNamesResponse); + + /** GetSrvKeyspaceNamesResponse names. */ + public names: { [k: string]: vtctldata.GetSrvKeyspaceNamesResponse.INameList }; /** - * Creates a new DeleteKeyspaceResponse instance using the specified properties. + * Creates a new GetSrvKeyspaceNamesResponse instance using the specified properties. * @param [properties] Properties to set - * @returns DeleteKeyspaceResponse instance + * @returns GetSrvKeyspaceNamesResponse instance */ - public static create(properties?: vtctldata.IDeleteKeyspaceResponse): vtctldata.DeleteKeyspaceResponse; + public static create(properties?: vtctldata.IGetSrvKeyspaceNamesResponse): vtctldata.GetSrvKeyspaceNamesResponse; /** - * Encodes the specified DeleteKeyspaceResponse message. Does not implicitly {@link vtctldata.DeleteKeyspaceResponse.verify|verify} messages. - * @param message DeleteKeyspaceResponse message or plain object to encode + * Encodes the specified GetSrvKeyspaceNamesResponse message. Does not implicitly {@link vtctldata.GetSrvKeyspaceNamesResponse.verify|verify} messages. + * @param message GetSrvKeyspaceNamesResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IDeleteKeyspaceResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetSrvKeyspaceNamesResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified DeleteKeyspaceResponse message, length delimited. Does not implicitly {@link vtctldata.DeleteKeyspaceResponse.verify|verify} messages. - * @param message DeleteKeyspaceResponse message or plain object to encode + * Encodes the specified GetSrvKeyspaceNamesResponse message, length delimited. Does not implicitly {@link vtctldata.GetSrvKeyspaceNamesResponse.verify|verify} messages. + * @param message GetSrvKeyspaceNamesResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IDeleteKeyspaceResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetSrvKeyspaceNamesResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a DeleteKeyspaceResponse message from the specified reader or buffer. + * Decodes a GetSrvKeyspaceNamesResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns DeleteKeyspaceResponse + * @returns GetSrvKeyspaceNamesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.DeleteKeyspaceResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetSrvKeyspaceNamesResponse; /** - * Decodes a DeleteKeyspaceResponse message from the specified reader or buffer, length delimited. + * Decodes a GetSrvKeyspaceNamesResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns DeleteKeyspaceResponse + * @returns GetSrvKeyspaceNamesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.DeleteKeyspaceResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetSrvKeyspaceNamesResponse; /** - * Verifies a DeleteKeyspaceResponse message. + * Verifies a GetSrvKeyspaceNamesResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a DeleteKeyspaceResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetSrvKeyspaceNamesResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns DeleteKeyspaceResponse + * @returns GetSrvKeyspaceNamesResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.DeleteKeyspaceResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.GetSrvKeyspaceNamesResponse; /** - * Creates a plain object from a DeleteKeyspaceResponse message. Also converts values to other types if specified. - * @param message DeleteKeyspaceResponse + * Creates a plain object from a GetSrvKeyspaceNamesResponse message. Also converts values to other types if specified. + * @param message GetSrvKeyspaceNamesResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.DeleteKeyspaceResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetSrvKeyspaceNamesResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this DeleteKeyspaceResponse to JSON. + * Converts this GetSrvKeyspaceNamesResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for DeleteKeyspaceResponse + * Gets the default type url for GetSrvKeyspaceNamesResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a DeleteShardsRequest. */ - interface IDeleteShardsRequest { + namespace GetSrvKeyspaceNamesResponse { - /** DeleteShardsRequest shards */ - shards?: (vtctldata.IShard[]|null); + /** Properties of a NameList. */ + interface INameList { - /** DeleteShardsRequest recursive */ - recursive?: (boolean|null); + /** NameList names */ + names?: (string[]|null); + } - /** DeleteShardsRequest even_if_serving */ - even_if_serving?: (boolean|null); + /** Represents a NameList. */ + class NameList implements INameList { - /** DeleteShardsRequest force */ - force?: (boolean|null); + /** + * Constructs a new NameList. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.GetSrvKeyspaceNamesResponse.INameList); + + /** NameList names. */ + public names: string[]; + + /** + * Creates a new NameList instance using the specified properties. + * @param [properties] Properties to set + * @returns NameList instance + */ + public static create(properties?: vtctldata.GetSrvKeyspaceNamesResponse.INameList): vtctldata.GetSrvKeyspaceNamesResponse.NameList; + + /** + * Encodes the specified NameList message. Does not implicitly {@link vtctldata.GetSrvKeyspaceNamesResponse.NameList.verify|verify} messages. + * @param message NameList message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.GetSrvKeyspaceNamesResponse.INameList, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified NameList message, length delimited. Does not implicitly {@link vtctldata.GetSrvKeyspaceNamesResponse.NameList.verify|verify} messages. + * @param message NameList message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.GetSrvKeyspaceNamesResponse.INameList, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a NameList message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns NameList + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetSrvKeyspaceNamesResponse.NameList; + + /** + * Decodes a NameList message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns NameList + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetSrvKeyspaceNamesResponse.NameList; + + /** + * Verifies a NameList message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a NameList message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns NameList + */ + public static fromObject(object: { [k: string]: any }): vtctldata.GetSrvKeyspaceNamesResponse.NameList; + + /** + * Creates a plain object from a NameList message. Also converts values to other types if specified. + * @param message NameList + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.GetSrvKeyspaceNamesResponse.NameList, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this NameList to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for NameList + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } } - /** Represents a DeleteShardsRequest. */ - class DeleteShardsRequest implements IDeleteShardsRequest { + /** Properties of a GetSrvKeyspacesRequest. */ + interface IGetSrvKeyspacesRequest { + + /** GetSrvKeyspacesRequest keyspace */ + keyspace?: (string|null); + + /** GetSrvKeyspacesRequest cells */ + cells?: (string[]|null); + } + + /** Represents a GetSrvKeyspacesRequest. */ + class GetSrvKeyspacesRequest implements IGetSrvKeyspacesRequest { /** - * Constructs a new DeleteShardsRequest. + * Constructs a new GetSrvKeyspacesRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IDeleteShardsRequest); - - /** DeleteShardsRequest shards. */ - public shards: vtctldata.IShard[]; - - /** DeleteShardsRequest recursive. */ - public recursive: boolean; + constructor(properties?: vtctldata.IGetSrvKeyspacesRequest); - /** DeleteShardsRequest even_if_serving. */ - public even_if_serving: boolean; + /** GetSrvKeyspacesRequest keyspace. */ + public keyspace: string; - /** DeleteShardsRequest force. */ - public force: boolean; + /** GetSrvKeyspacesRequest cells. */ + public cells: string[]; /** - * Creates a new DeleteShardsRequest instance using the specified properties. + * Creates a new GetSrvKeyspacesRequest instance using the specified properties. * @param [properties] Properties to set - * @returns DeleteShardsRequest instance + * @returns GetSrvKeyspacesRequest instance */ - public static create(properties?: vtctldata.IDeleteShardsRequest): vtctldata.DeleteShardsRequest; + public static create(properties?: vtctldata.IGetSrvKeyspacesRequest): vtctldata.GetSrvKeyspacesRequest; /** - * Encodes the specified DeleteShardsRequest message. Does not implicitly {@link vtctldata.DeleteShardsRequest.verify|verify} messages. - * @param message DeleteShardsRequest message or plain object to encode + * Encodes the specified GetSrvKeyspacesRequest message. Does not implicitly {@link vtctldata.GetSrvKeyspacesRequest.verify|verify} messages. + * @param message GetSrvKeyspacesRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IDeleteShardsRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetSrvKeyspacesRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified DeleteShardsRequest message, length delimited. Does not implicitly {@link vtctldata.DeleteShardsRequest.verify|verify} messages. - * @param message DeleteShardsRequest message or plain object to encode + * Encodes the specified GetSrvKeyspacesRequest message, length delimited. Does not implicitly {@link vtctldata.GetSrvKeyspacesRequest.verify|verify} messages. + * @param message GetSrvKeyspacesRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IDeleteShardsRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetSrvKeyspacesRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a DeleteShardsRequest message from the specified reader or buffer. + * Decodes a GetSrvKeyspacesRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns DeleteShardsRequest + * @returns GetSrvKeyspacesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.DeleteShardsRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetSrvKeyspacesRequest; /** - * Decodes a DeleteShardsRequest message from the specified reader or buffer, length delimited. + * Decodes a GetSrvKeyspacesRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns DeleteShardsRequest + * @returns GetSrvKeyspacesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.DeleteShardsRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetSrvKeyspacesRequest; /** - * Verifies a DeleteShardsRequest message. + * Verifies a GetSrvKeyspacesRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a DeleteShardsRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetSrvKeyspacesRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns DeleteShardsRequest + * @returns GetSrvKeyspacesRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.DeleteShardsRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.GetSrvKeyspacesRequest; /** - * Creates a plain object from a DeleteShardsRequest message. Also converts values to other types if specified. - * @param message DeleteShardsRequest + * Creates a plain object from a GetSrvKeyspacesRequest message. Also converts values to other types if specified. + * @param message GetSrvKeyspacesRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.DeleteShardsRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetSrvKeyspacesRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this DeleteShardsRequest to JSON. + * Converts this GetSrvKeyspacesRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for DeleteShardsRequest + * Gets the default type url for GetSrvKeyspacesRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a DeleteShardsResponse. */ - interface IDeleteShardsResponse { + /** Properties of a GetSrvKeyspacesResponse. */ + interface IGetSrvKeyspacesResponse { + + /** GetSrvKeyspacesResponse srv_keyspaces */ + srv_keyspaces?: ({ [k: string]: topodata.ISrvKeyspace }|null); } - /** Represents a DeleteShardsResponse. */ - class DeleteShardsResponse implements IDeleteShardsResponse { + /** Represents a GetSrvKeyspacesResponse. */ + class GetSrvKeyspacesResponse implements IGetSrvKeyspacesResponse { /** - * Constructs a new DeleteShardsResponse. + * Constructs a new GetSrvKeyspacesResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IDeleteShardsResponse); + constructor(properties?: vtctldata.IGetSrvKeyspacesResponse); + + /** GetSrvKeyspacesResponse srv_keyspaces. */ + public srv_keyspaces: { [k: string]: topodata.ISrvKeyspace }; /** - * Creates a new DeleteShardsResponse instance using the specified properties. + * Creates a new GetSrvKeyspacesResponse instance using the specified properties. * @param [properties] Properties to set - * @returns DeleteShardsResponse instance + * @returns GetSrvKeyspacesResponse instance */ - public static create(properties?: vtctldata.IDeleteShardsResponse): vtctldata.DeleteShardsResponse; + public static create(properties?: vtctldata.IGetSrvKeyspacesResponse): vtctldata.GetSrvKeyspacesResponse; /** - * Encodes the specified DeleteShardsResponse message. Does not implicitly {@link vtctldata.DeleteShardsResponse.verify|verify} messages. - * @param message DeleteShardsResponse message or plain object to encode + * Encodes the specified GetSrvKeyspacesResponse message. Does not implicitly {@link vtctldata.GetSrvKeyspacesResponse.verify|verify} messages. + * @param message GetSrvKeyspacesResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IDeleteShardsResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetSrvKeyspacesResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified DeleteShardsResponse message, length delimited. Does not implicitly {@link vtctldata.DeleteShardsResponse.verify|verify} messages. - * @param message DeleteShardsResponse message or plain object to encode + * Encodes the specified GetSrvKeyspacesResponse message, length delimited. Does not implicitly {@link vtctldata.GetSrvKeyspacesResponse.verify|verify} messages. + * @param message GetSrvKeyspacesResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IDeleteShardsResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetSrvKeyspacesResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a DeleteShardsResponse message from the specified reader or buffer. + * Decodes a GetSrvKeyspacesResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns DeleteShardsResponse + * @returns GetSrvKeyspacesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.DeleteShardsResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetSrvKeyspacesResponse; /** - * Decodes a DeleteShardsResponse message from the specified reader or buffer, length delimited. + * Decodes a GetSrvKeyspacesResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns DeleteShardsResponse + * @returns GetSrvKeyspacesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.DeleteShardsResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetSrvKeyspacesResponse; /** - * Verifies a DeleteShardsResponse message. + * Verifies a GetSrvKeyspacesResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a DeleteShardsResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetSrvKeyspacesResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns DeleteShardsResponse + * @returns GetSrvKeyspacesResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.DeleteShardsResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.GetSrvKeyspacesResponse; /** - * Creates a plain object from a DeleteShardsResponse message. Also converts values to other types if specified. - * @param message DeleteShardsResponse + * Creates a plain object from a GetSrvKeyspacesResponse message. Also converts values to other types if specified. + * @param message GetSrvKeyspacesResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.DeleteShardsResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetSrvKeyspacesResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this DeleteShardsResponse to JSON. + * Converts this GetSrvKeyspacesResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for DeleteShardsResponse + * Gets the default type url for GetSrvKeyspacesResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a DeleteSrvVSchemaRequest. */ - interface IDeleteSrvVSchemaRequest { + /** Properties of an UpdateThrottlerConfigRequest. */ + interface IUpdateThrottlerConfigRequest { + + /** UpdateThrottlerConfigRequest keyspace */ + keyspace?: (string|null); + + /** UpdateThrottlerConfigRequest enable */ + enable?: (boolean|null); + + /** UpdateThrottlerConfigRequest disable */ + disable?: (boolean|null); + + /** UpdateThrottlerConfigRequest threshold */ + threshold?: (number|null); + + /** UpdateThrottlerConfigRequest custom_query */ + custom_query?: (string|null); + + /** UpdateThrottlerConfigRequest custom_query_set */ + custom_query_set?: (boolean|null); - /** DeleteSrvVSchemaRequest cell */ - cell?: (string|null); + /** UpdateThrottlerConfigRequest check_as_check_self */ + check_as_check_self?: (boolean|null); + + /** UpdateThrottlerConfigRequest check_as_check_shard */ + check_as_check_shard?: (boolean|null); + + /** UpdateThrottlerConfigRequest throttled_app */ + throttled_app?: (topodata.IThrottledAppRule|null); } - /** Represents a DeleteSrvVSchemaRequest. */ - class DeleteSrvVSchemaRequest implements IDeleteSrvVSchemaRequest { + /** Represents an UpdateThrottlerConfigRequest. */ + class UpdateThrottlerConfigRequest implements IUpdateThrottlerConfigRequest { /** - * Constructs a new DeleteSrvVSchemaRequest. + * Constructs a new UpdateThrottlerConfigRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IDeleteSrvVSchemaRequest); + constructor(properties?: vtctldata.IUpdateThrottlerConfigRequest); - /** DeleteSrvVSchemaRequest cell. */ - public cell: string; + /** UpdateThrottlerConfigRequest keyspace. */ + public keyspace: string; + + /** UpdateThrottlerConfigRequest enable. */ + public enable: boolean; + + /** UpdateThrottlerConfigRequest disable. */ + public disable: boolean; + + /** UpdateThrottlerConfigRequest threshold. */ + public threshold: number; + + /** UpdateThrottlerConfigRequest custom_query. */ + public custom_query: string; + + /** UpdateThrottlerConfigRequest custom_query_set. */ + public custom_query_set: boolean; + + /** UpdateThrottlerConfigRequest check_as_check_self. */ + public check_as_check_self: boolean; + + /** UpdateThrottlerConfigRequest check_as_check_shard. */ + public check_as_check_shard: boolean; + + /** UpdateThrottlerConfigRequest throttled_app. */ + public throttled_app?: (topodata.IThrottledAppRule|null); /** - * Creates a new DeleteSrvVSchemaRequest instance using the specified properties. + * Creates a new UpdateThrottlerConfigRequest instance using the specified properties. * @param [properties] Properties to set - * @returns DeleteSrvVSchemaRequest instance + * @returns UpdateThrottlerConfigRequest instance */ - public static create(properties?: vtctldata.IDeleteSrvVSchemaRequest): vtctldata.DeleteSrvVSchemaRequest; + public static create(properties?: vtctldata.IUpdateThrottlerConfigRequest): vtctldata.UpdateThrottlerConfigRequest; /** - * Encodes the specified DeleteSrvVSchemaRequest message. Does not implicitly {@link vtctldata.DeleteSrvVSchemaRequest.verify|verify} messages. - * @param message DeleteSrvVSchemaRequest message or plain object to encode + * Encodes the specified UpdateThrottlerConfigRequest message. Does not implicitly {@link vtctldata.UpdateThrottlerConfigRequest.verify|verify} messages. + * @param message UpdateThrottlerConfigRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IDeleteSrvVSchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IUpdateThrottlerConfigRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified DeleteSrvVSchemaRequest message, length delimited. Does not implicitly {@link vtctldata.DeleteSrvVSchemaRequest.verify|verify} messages. - * @param message DeleteSrvVSchemaRequest message or plain object to encode + * Encodes the specified UpdateThrottlerConfigRequest message, length delimited. Does not implicitly {@link vtctldata.UpdateThrottlerConfigRequest.verify|verify} messages. + * @param message UpdateThrottlerConfigRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IDeleteSrvVSchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IUpdateThrottlerConfigRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a DeleteSrvVSchemaRequest message from the specified reader or buffer. + * Decodes an UpdateThrottlerConfigRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns DeleteSrvVSchemaRequest + * @returns UpdateThrottlerConfigRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.DeleteSrvVSchemaRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.UpdateThrottlerConfigRequest; /** - * Decodes a DeleteSrvVSchemaRequest message from the specified reader or buffer, length delimited. + * Decodes an UpdateThrottlerConfigRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns DeleteSrvVSchemaRequest + * @returns UpdateThrottlerConfigRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.DeleteSrvVSchemaRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.UpdateThrottlerConfigRequest; /** - * Verifies a DeleteSrvVSchemaRequest message. + * Verifies an UpdateThrottlerConfigRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a DeleteSrvVSchemaRequest message from a plain object. Also converts values to their respective internal types. + * Creates an UpdateThrottlerConfigRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns DeleteSrvVSchemaRequest + * @returns UpdateThrottlerConfigRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.DeleteSrvVSchemaRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.UpdateThrottlerConfigRequest; /** - * Creates a plain object from a DeleteSrvVSchemaRequest message. Also converts values to other types if specified. - * @param message DeleteSrvVSchemaRequest + * Creates a plain object from an UpdateThrottlerConfigRequest message. Also converts values to other types if specified. + * @param message UpdateThrottlerConfigRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.DeleteSrvVSchemaRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.UpdateThrottlerConfigRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this DeleteSrvVSchemaRequest to JSON. + * Converts this UpdateThrottlerConfigRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for DeleteSrvVSchemaRequest + * Gets the default type url for UpdateThrottlerConfigRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a DeleteSrvVSchemaResponse. */ - interface IDeleteSrvVSchemaResponse { + /** Properties of an UpdateThrottlerConfigResponse. */ + interface IUpdateThrottlerConfigResponse { } - /** Represents a DeleteSrvVSchemaResponse. */ - class DeleteSrvVSchemaResponse implements IDeleteSrvVSchemaResponse { + /** Represents an UpdateThrottlerConfigResponse. */ + class UpdateThrottlerConfigResponse implements IUpdateThrottlerConfigResponse { /** - * Constructs a new DeleteSrvVSchemaResponse. + * Constructs a new UpdateThrottlerConfigResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IDeleteSrvVSchemaResponse); + constructor(properties?: vtctldata.IUpdateThrottlerConfigResponse); /** - * Creates a new DeleteSrvVSchemaResponse instance using the specified properties. + * Creates a new UpdateThrottlerConfigResponse instance using the specified properties. * @param [properties] Properties to set - * @returns DeleteSrvVSchemaResponse instance + * @returns UpdateThrottlerConfigResponse instance */ - public static create(properties?: vtctldata.IDeleteSrvVSchemaResponse): vtctldata.DeleteSrvVSchemaResponse; + public static create(properties?: vtctldata.IUpdateThrottlerConfigResponse): vtctldata.UpdateThrottlerConfigResponse; /** - * Encodes the specified DeleteSrvVSchemaResponse message. Does not implicitly {@link vtctldata.DeleteSrvVSchemaResponse.verify|verify} messages. - * @param message DeleteSrvVSchemaResponse message or plain object to encode + * Encodes the specified UpdateThrottlerConfigResponse message. Does not implicitly {@link vtctldata.UpdateThrottlerConfigResponse.verify|verify} messages. + * @param message UpdateThrottlerConfigResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IDeleteSrvVSchemaResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IUpdateThrottlerConfigResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified DeleteSrvVSchemaResponse message, length delimited. Does not implicitly {@link vtctldata.DeleteSrvVSchemaResponse.verify|verify} messages. - * @param message DeleteSrvVSchemaResponse message or plain object to encode + * Encodes the specified UpdateThrottlerConfigResponse message, length delimited. Does not implicitly {@link vtctldata.UpdateThrottlerConfigResponse.verify|verify} messages. + * @param message UpdateThrottlerConfigResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IDeleteSrvVSchemaResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IUpdateThrottlerConfigResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a DeleteSrvVSchemaResponse message from the specified reader or buffer. + * Decodes an UpdateThrottlerConfigResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns DeleteSrvVSchemaResponse + * @returns UpdateThrottlerConfigResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.DeleteSrvVSchemaResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.UpdateThrottlerConfigResponse; /** - * Decodes a DeleteSrvVSchemaResponse message from the specified reader or buffer, length delimited. + * Decodes an UpdateThrottlerConfigResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns DeleteSrvVSchemaResponse + * @returns UpdateThrottlerConfigResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.DeleteSrvVSchemaResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.UpdateThrottlerConfigResponse; /** - * Verifies a DeleteSrvVSchemaResponse message. + * Verifies an UpdateThrottlerConfigResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a DeleteSrvVSchemaResponse message from a plain object. Also converts values to their respective internal types. + * Creates an UpdateThrottlerConfigResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns DeleteSrvVSchemaResponse + * @returns UpdateThrottlerConfigResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.DeleteSrvVSchemaResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.UpdateThrottlerConfigResponse; /** - * Creates a plain object from a DeleteSrvVSchemaResponse message. Also converts values to other types if specified. - * @param message DeleteSrvVSchemaResponse + * Creates a plain object from an UpdateThrottlerConfigResponse message. Also converts values to other types if specified. + * @param message UpdateThrottlerConfigResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.DeleteSrvVSchemaResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.UpdateThrottlerConfigResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this DeleteSrvVSchemaResponse to JSON. + * Converts this UpdateThrottlerConfigResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for DeleteSrvVSchemaResponse + * Gets the default type url for UpdateThrottlerConfigResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a DeleteTabletsRequest. */ - interface IDeleteTabletsRequest { - - /** DeleteTabletsRequest tablet_aliases */ - tablet_aliases?: (topodata.ITabletAlias[]|null); + /** Properties of a GetSrvVSchemaRequest. */ + interface IGetSrvVSchemaRequest { - /** DeleteTabletsRequest allow_primary */ - allow_primary?: (boolean|null); + /** GetSrvVSchemaRequest cell */ + cell?: (string|null); } - /** Represents a DeleteTabletsRequest. */ - class DeleteTabletsRequest implements IDeleteTabletsRequest { + /** Represents a GetSrvVSchemaRequest. */ + class GetSrvVSchemaRequest implements IGetSrvVSchemaRequest { /** - * Constructs a new DeleteTabletsRequest. + * Constructs a new GetSrvVSchemaRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IDeleteTabletsRequest); - - /** DeleteTabletsRequest tablet_aliases. */ - public tablet_aliases: topodata.ITabletAlias[]; + constructor(properties?: vtctldata.IGetSrvVSchemaRequest); - /** DeleteTabletsRequest allow_primary. */ - public allow_primary: boolean; + /** GetSrvVSchemaRequest cell. */ + public cell: string; /** - * Creates a new DeleteTabletsRequest instance using the specified properties. + * Creates a new GetSrvVSchemaRequest instance using the specified properties. * @param [properties] Properties to set - * @returns DeleteTabletsRequest instance + * @returns GetSrvVSchemaRequest instance */ - public static create(properties?: vtctldata.IDeleteTabletsRequest): vtctldata.DeleteTabletsRequest; + public static create(properties?: vtctldata.IGetSrvVSchemaRequest): vtctldata.GetSrvVSchemaRequest; /** - * Encodes the specified DeleteTabletsRequest message. Does not implicitly {@link vtctldata.DeleteTabletsRequest.verify|verify} messages. - * @param message DeleteTabletsRequest message or plain object to encode + * Encodes the specified GetSrvVSchemaRequest message. Does not implicitly {@link vtctldata.GetSrvVSchemaRequest.verify|verify} messages. + * @param message GetSrvVSchemaRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IDeleteTabletsRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetSrvVSchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified DeleteTabletsRequest message, length delimited. Does not implicitly {@link vtctldata.DeleteTabletsRequest.verify|verify} messages. - * @param message DeleteTabletsRequest message or plain object to encode + * Encodes the specified GetSrvVSchemaRequest message, length delimited. Does not implicitly {@link vtctldata.GetSrvVSchemaRequest.verify|verify} messages. + * @param message GetSrvVSchemaRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IDeleteTabletsRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetSrvVSchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a DeleteTabletsRequest message from the specified reader or buffer. + * Decodes a GetSrvVSchemaRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns DeleteTabletsRequest + * @returns GetSrvVSchemaRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.DeleteTabletsRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetSrvVSchemaRequest; /** - * Decodes a DeleteTabletsRequest message from the specified reader or buffer, length delimited. + * Decodes a GetSrvVSchemaRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns DeleteTabletsRequest + * @returns GetSrvVSchemaRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.DeleteTabletsRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetSrvVSchemaRequest; /** - * Verifies a DeleteTabletsRequest message. + * Verifies a GetSrvVSchemaRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a DeleteTabletsRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetSrvVSchemaRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns DeleteTabletsRequest + * @returns GetSrvVSchemaRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.DeleteTabletsRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.GetSrvVSchemaRequest; /** - * Creates a plain object from a DeleteTabletsRequest message. Also converts values to other types if specified. - * @param message DeleteTabletsRequest + * Creates a plain object from a GetSrvVSchemaRequest message. Also converts values to other types if specified. + * @param message GetSrvVSchemaRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.DeleteTabletsRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetSrvVSchemaRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this DeleteTabletsRequest to JSON. + * Converts this GetSrvVSchemaRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for DeleteTabletsRequest + * Gets the default type url for GetSrvVSchemaRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a DeleteTabletsResponse. */ - interface IDeleteTabletsResponse { + /** Properties of a GetSrvVSchemaResponse. */ + interface IGetSrvVSchemaResponse { + + /** GetSrvVSchemaResponse srv_v_schema */ + srv_v_schema?: (vschema.ISrvVSchema|null); } - /** Represents a DeleteTabletsResponse. */ - class DeleteTabletsResponse implements IDeleteTabletsResponse { + /** Represents a GetSrvVSchemaResponse. */ + class GetSrvVSchemaResponse implements IGetSrvVSchemaResponse { /** - * Constructs a new DeleteTabletsResponse. + * Constructs a new GetSrvVSchemaResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IDeleteTabletsResponse); + constructor(properties?: vtctldata.IGetSrvVSchemaResponse); + + /** GetSrvVSchemaResponse srv_v_schema. */ + public srv_v_schema?: (vschema.ISrvVSchema|null); /** - * Creates a new DeleteTabletsResponse instance using the specified properties. + * Creates a new GetSrvVSchemaResponse instance using the specified properties. * @param [properties] Properties to set - * @returns DeleteTabletsResponse instance + * @returns GetSrvVSchemaResponse instance */ - public static create(properties?: vtctldata.IDeleteTabletsResponse): vtctldata.DeleteTabletsResponse; + public static create(properties?: vtctldata.IGetSrvVSchemaResponse): vtctldata.GetSrvVSchemaResponse; /** - * Encodes the specified DeleteTabletsResponse message. Does not implicitly {@link vtctldata.DeleteTabletsResponse.verify|verify} messages. - * @param message DeleteTabletsResponse message or plain object to encode + * Encodes the specified GetSrvVSchemaResponse message. Does not implicitly {@link vtctldata.GetSrvVSchemaResponse.verify|verify} messages. + * @param message GetSrvVSchemaResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IDeleteTabletsResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetSrvVSchemaResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified DeleteTabletsResponse message, length delimited. Does not implicitly {@link vtctldata.DeleteTabletsResponse.verify|verify} messages. - * @param message DeleteTabletsResponse message or plain object to encode + * Encodes the specified GetSrvVSchemaResponse message, length delimited. Does not implicitly {@link vtctldata.GetSrvVSchemaResponse.verify|verify} messages. + * @param message GetSrvVSchemaResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IDeleteTabletsResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetSrvVSchemaResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a DeleteTabletsResponse message from the specified reader or buffer. + * Decodes a GetSrvVSchemaResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns DeleteTabletsResponse + * @returns GetSrvVSchemaResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.DeleteTabletsResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetSrvVSchemaResponse; /** - * Decodes a DeleteTabletsResponse message from the specified reader or buffer, length delimited. + * Decodes a GetSrvVSchemaResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns DeleteTabletsResponse + * @returns GetSrvVSchemaResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.DeleteTabletsResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetSrvVSchemaResponse; /** - * Verifies a DeleteTabletsResponse message. + * Verifies a GetSrvVSchemaResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a DeleteTabletsResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetSrvVSchemaResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns DeleteTabletsResponse + * @returns GetSrvVSchemaResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.DeleteTabletsResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.GetSrvVSchemaResponse; /** - * Creates a plain object from a DeleteTabletsResponse message. Also converts values to other types if specified. - * @param message DeleteTabletsResponse + * Creates a plain object from a GetSrvVSchemaResponse message. Also converts values to other types if specified. + * @param message GetSrvVSchemaResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.DeleteTabletsResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetSrvVSchemaResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this DeleteTabletsResponse to JSON. + * Converts this GetSrvVSchemaResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for DeleteTabletsResponse + * Gets the default type url for GetSrvVSchemaResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of an EmergencyReparentShardRequest. */ - interface IEmergencyReparentShardRequest { - - /** EmergencyReparentShardRequest keyspace */ - keyspace?: (string|null); - - /** EmergencyReparentShardRequest shard */ - shard?: (string|null); - - /** EmergencyReparentShardRequest new_primary */ - new_primary?: (topodata.ITabletAlias|null); - - /** EmergencyReparentShardRequest ignore_replicas */ - ignore_replicas?: (topodata.ITabletAlias[]|null); - - /** EmergencyReparentShardRequest wait_replicas_timeout */ - wait_replicas_timeout?: (vttime.IDuration|null); + /** Properties of a GetSrvVSchemasRequest. */ + interface IGetSrvVSchemasRequest { - /** EmergencyReparentShardRequest prevent_cross_cell_promotion */ - prevent_cross_cell_promotion?: (boolean|null); + /** GetSrvVSchemasRequest cells */ + cells?: (string[]|null); } - /** Represents an EmergencyReparentShardRequest. */ - class EmergencyReparentShardRequest implements IEmergencyReparentShardRequest { + /** Represents a GetSrvVSchemasRequest. */ + class GetSrvVSchemasRequest implements IGetSrvVSchemasRequest { /** - * Constructs a new EmergencyReparentShardRequest. + * Constructs a new GetSrvVSchemasRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IEmergencyReparentShardRequest); - - /** EmergencyReparentShardRequest keyspace. */ - public keyspace: string; - - /** EmergencyReparentShardRequest shard. */ - public shard: string; - - /** EmergencyReparentShardRequest new_primary. */ - public new_primary?: (topodata.ITabletAlias|null); - - /** EmergencyReparentShardRequest ignore_replicas. */ - public ignore_replicas: topodata.ITabletAlias[]; - - /** EmergencyReparentShardRequest wait_replicas_timeout. */ - public wait_replicas_timeout?: (vttime.IDuration|null); + constructor(properties?: vtctldata.IGetSrvVSchemasRequest); - /** EmergencyReparentShardRequest prevent_cross_cell_promotion. */ - public prevent_cross_cell_promotion: boolean; + /** GetSrvVSchemasRequest cells. */ + public cells: string[]; /** - * Creates a new EmergencyReparentShardRequest instance using the specified properties. + * Creates a new GetSrvVSchemasRequest instance using the specified properties. * @param [properties] Properties to set - * @returns EmergencyReparentShardRequest instance + * @returns GetSrvVSchemasRequest instance */ - public static create(properties?: vtctldata.IEmergencyReparentShardRequest): vtctldata.EmergencyReparentShardRequest; + public static create(properties?: vtctldata.IGetSrvVSchemasRequest): vtctldata.GetSrvVSchemasRequest; /** - * Encodes the specified EmergencyReparentShardRequest message. Does not implicitly {@link vtctldata.EmergencyReparentShardRequest.verify|verify} messages. - * @param message EmergencyReparentShardRequest message or plain object to encode + * Encodes the specified GetSrvVSchemasRequest message. Does not implicitly {@link vtctldata.GetSrvVSchemasRequest.verify|verify} messages. + * @param message GetSrvVSchemasRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IEmergencyReparentShardRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetSrvVSchemasRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified EmergencyReparentShardRequest message, length delimited. Does not implicitly {@link vtctldata.EmergencyReparentShardRequest.verify|verify} messages. - * @param message EmergencyReparentShardRequest message or plain object to encode + * Encodes the specified GetSrvVSchemasRequest message, length delimited. Does not implicitly {@link vtctldata.GetSrvVSchemasRequest.verify|verify} messages. + * @param message GetSrvVSchemasRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IEmergencyReparentShardRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetSrvVSchemasRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an EmergencyReparentShardRequest message from the specified reader or buffer. + * Decodes a GetSrvVSchemasRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns EmergencyReparentShardRequest + * @returns GetSrvVSchemasRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.EmergencyReparentShardRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetSrvVSchemasRequest; /** - * Decodes an EmergencyReparentShardRequest message from the specified reader or buffer, length delimited. + * Decodes a GetSrvVSchemasRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns EmergencyReparentShardRequest + * @returns GetSrvVSchemasRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.EmergencyReparentShardRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetSrvVSchemasRequest; /** - * Verifies an EmergencyReparentShardRequest message. + * Verifies a GetSrvVSchemasRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an EmergencyReparentShardRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetSrvVSchemasRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns EmergencyReparentShardRequest + * @returns GetSrvVSchemasRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.EmergencyReparentShardRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.GetSrvVSchemasRequest; /** - * Creates a plain object from an EmergencyReparentShardRequest message. Also converts values to other types if specified. - * @param message EmergencyReparentShardRequest + * Creates a plain object from a GetSrvVSchemasRequest message. Also converts values to other types if specified. + * @param message GetSrvVSchemasRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.EmergencyReparentShardRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetSrvVSchemasRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this EmergencyReparentShardRequest to JSON. + * Converts this GetSrvVSchemasRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for EmergencyReparentShardRequest + * Gets the default type url for GetSrvVSchemasRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of an EmergencyReparentShardResponse. */ - interface IEmergencyReparentShardResponse { - - /** EmergencyReparentShardResponse keyspace */ - keyspace?: (string|null); - - /** EmergencyReparentShardResponse shard */ - shard?: (string|null); - - /** EmergencyReparentShardResponse promoted_primary */ - promoted_primary?: (topodata.ITabletAlias|null); + /** Properties of a GetSrvVSchemasResponse. */ + interface IGetSrvVSchemasResponse { - /** EmergencyReparentShardResponse events */ - events?: (logutil.IEvent[]|null); + /** GetSrvVSchemasResponse srv_v_schemas */ + srv_v_schemas?: ({ [k: string]: vschema.ISrvVSchema }|null); } - /** Represents an EmergencyReparentShardResponse. */ - class EmergencyReparentShardResponse implements IEmergencyReparentShardResponse { + /** Represents a GetSrvVSchemasResponse. */ + class GetSrvVSchemasResponse implements IGetSrvVSchemasResponse { /** - * Constructs a new EmergencyReparentShardResponse. + * Constructs a new GetSrvVSchemasResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IEmergencyReparentShardResponse); - - /** EmergencyReparentShardResponse keyspace. */ - public keyspace: string; - - /** EmergencyReparentShardResponse shard. */ - public shard: string; - - /** EmergencyReparentShardResponse promoted_primary. */ - public promoted_primary?: (topodata.ITabletAlias|null); + constructor(properties?: vtctldata.IGetSrvVSchemasResponse); - /** EmergencyReparentShardResponse events. */ - public events: logutil.IEvent[]; + /** GetSrvVSchemasResponse srv_v_schemas. */ + public srv_v_schemas: { [k: string]: vschema.ISrvVSchema }; /** - * Creates a new EmergencyReparentShardResponse instance using the specified properties. + * Creates a new GetSrvVSchemasResponse instance using the specified properties. * @param [properties] Properties to set - * @returns EmergencyReparentShardResponse instance + * @returns GetSrvVSchemasResponse instance */ - public static create(properties?: vtctldata.IEmergencyReparentShardResponse): vtctldata.EmergencyReparentShardResponse; + public static create(properties?: vtctldata.IGetSrvVSchemasResponse): vtctldata.GetSrvVSchemasResponse; /** - * Encodes the specified EmergencyReparentShardResponse message. Does not implicitly {@link vtctldata.EmergencyReparentShardResponse.verify|verify} messages. - * @param message EmergencyReparentShardResponse message or plain object to encode + * Encodes the specified GetSrvVSchemasResponse message. Does not implicitly {@link vtctldata.GetSrvVSchemasResponse.verify|verify} messages. + * @param message GetSrvVSchemasResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IEmergencyReparentShardResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetSrvVSchemasResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified EmergencyReparentShardResponse message, length delimited. Does not implicitly {@link vtctldata.EmergencyReparentShardResponse.verify|verify} messages. - * @param message EmergencyReparentShardResponse message or plain object to encode + * Encodes the specified GetSrvVSchemasResponse message, length delimited. Does not implicitly {@link vtctldata.GetSrvVSchemasResponse.verify|verify} messages. + * @param message GetSrvVSchemasResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IEmergencyReparentShardResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetSrvVSchemasResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an EmergencyReparentShardResponse message from the specified reader or buffer. + * Decodes a GetSrvVSchemasResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns EmergencyReparentShardResponse + * @returns GetSrvVSchemasResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.EmergencyReparentShardResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetSrvVSchemasResponse; /** - * Decodes an EmergencyReparentShardResponse message from the specified reader or buffer, length delimited. + * Decodes a GetSrvVSchemasResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns EmergencyReparentShardResponse + * @returns GetSrvVSchemasResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.EmergencyReparentShardResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetSrvVSchemasResponse; /** - * Verifies an EmergencyReparentShardResponse message. + * Verifies a GetSrvVSchemasResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an EmergencyReparentShardResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetSrvVSchemasResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns EmergencyReparentShardResponse + * @returns GetSrvVSchemasResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.EmergencyReparentShardResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.GetSrvVSchemasResponse; /** - * Creates a plain object from an EmergencyReparentShardResponse message. Also converts values to other types if specified. - * @param message EmergencyReparentShardResponse + * Creates a plain object from a GetSrvVSchemasResponse message. Also converts values to other types if specified. + * @param message GetSrvVSchemasResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.EmergencyReparentShardResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetSrvVSchemasResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this EmergencyReparentShardResponse to JSON. + * Converts this GetSrvVSchemasResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for EmergencyReparentShardResponse + * Gets the default type url for GetSrvVSchemasResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of an ExecuteFetchAsAppRequest. */ - interface IExecuteFetchAsAppRequest { + /** Properties of a GetTabletRequest. */ + interface IGetTabletRequest { - /** ExecuteFetchAsAppRequest tablet_alias */ + /** GetTabletRequest tablet_alias */ tablet_alias?: (topodata.ITabletAlias|null); - - /** ExecuteFetchAsAppRequest query */ - query?: (string|null); - - /** ExecuteFetchAsAppRequest max_rows */ - max_rows?: (number|Long|null); - - /** ExecuteFetchAsAppRequest use_pool */ - use_pool?: (boolean|null); } - /** Represents an ExecuteFetchAsAppRequest. */ - class ExecuteFetchAsAppRequest implements IExecuteFetchAsAppRequest { - - /** - * Constructs a new ExecuteFetchAsAppRequest. - * @param [properties] Properties to set - */ - constructor(properties?: vtctldata.IExecuteFetchAsAppRequest); - - /** ExecuteFetchAsAppRequest tablet_alias. */ - public tablet_alias?: (topodata.ITabletAlias|null); - - /** ExecuteFetchAsAppRequest query. */ - public query: string; + /** Represents a GetTabletRequest. */ + class GetTabletRequest implements IGetTabletRequest { - /** ExecuteFetchAsAppRequest max_rows. */ - public max_rows: (number|Long); + /** + * Constructs a new GetTabletRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IGetTabletRequest); - /** ExecuteFetchAsAppRequest use_pool. */ - public use_pool: boolean; + /** GetTabletRequest tablet_alias. */ + public tablet_alias?: (topodata.ITabletAlias|null); /** - * Creates a new ExecuteFetchAsAppRequest instance using the specified properties. + * Creates a new GetTabletRequest instance using the specified properties. * @param [properties] Properties to set - * @returns ExecuteFetchAsAppRequest instance + * @returns GetTabletRequest instance */ - public static create(properties?: vtctldata.IExecuteFetchAsAppRequest): vtctldata.ExecuteFetchAsAppRequest; + public static create(properties?: vtctldata.IGetTabletRequest): vtctldata.GetTabletRequest; /** - * Encodes the specified ExecuteFetchAsAppRequest message. Does not implicitly {@link vtctldata.ExecuteFetchAsAppRequest.verify|verify} messages. - * @param message ExecuteFetchAsAppRequest message or plain object to encode + * Encodes the specified GetTabletRequest message. Does not implicitly {@link vtctldata.GetTabletRequest.verify|verify} messages. + * @param message GetTabletRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IExecuteFetchAsAppRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetTabletRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ExecuteFetchAsAppRequest message, length delimited. Does not implicitly {@link vtctldata.ExecuteFetchAsAppRequest.verify|verify} messages. - * @param message ExecuteFetchAsAppRequest message or plain object to encode + * Encodes the specified GetTabletRequest message, length delimited. Does not implicitly {@link vtctldata.GetTabletRequest.verify|verify} messages. + * @param message GetTabletRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IExecuteFetchAsAppRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetTabletRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an ExecuteFetchAsAppRequest message from the specified reader or buffer. + * Decodes a GetTabletRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ExecuteFetchAsAppRequest + * @returns GetTabletRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ExecuteFetchAsAppRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetTabletRequest; /** - * Decodes an ExecuteFetchAsAppRequest message from the specified reader or buffer, length delimited. + * Decodes a GetTabletRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ExecuteFetchAsAppRequest + * @returns GetTabletRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ExecuteFetchAsAppRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetTabletRequest; /** - * Verifies an ExecuteFetchAsAppRequest message. + * Verifies a GetTabletRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an ExecuteFetchAsAppRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetTabletRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ExecuteFetchAsAppRequest + * @returns GetTabletRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.ExecuteFetchAsAppRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.GetTabletRequest; /** - * Creates a plain object from an ExecuteFetchAsAppRequest message. Also converts values to other types if specified. - * @param message ExecuteFetchAsAppRequest + * Creates a plain object from a GetTabletRequest message. Also converts values to other types if specified. + * @param message GetTabletRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ExecuteFetchAsAppRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetTabletRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ExecuteFetchAsAppRequest to JSON. + * Converts this GetTabletRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ExecuteFetchAsAppRequest + * Gets the default type url for GetTabletRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of an ExecuteFetchAsAppResponse. */ - interface IExecuteFetchAsAppResponse { + /** Properties of a GetTabletResponse. */ + interface IGetTabletResponse { - /** ExecuteFetchAsAppResponse result */ - result?: (query.IQueryResult|null); + /** GetTabletResponse tablet */ + tablet?: (topodata.ITablet|null); } - /** Represents an ExecuteFetchAsAppResponse. */ - class ExecuteFetchAsAppResponse implements IExecuteFetchAsAppResponse { + /** Represents a GetTabletResponse. */ + class GetTabletResponse implements IGetTabletResponse { /** - * Constructs a new ExecuteFetchAsAppResponse. + * Constructs a new GetTabletResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IExecuteFetchAsAppResponse); + constructor(properties?: vtctldata.IGetTabletResponse); - /** ExecuteFetchAsAppResponse result. */ - public result?: (query.IQueryResult|null); + /** GetTabletResponse tablet. */ + public tablet?: (topodata.ITablet|null); /** - * Creates a new ExecuteFetchAsAppResponse instance using the specified properties. + * Creates a new GetTabletResponse instance using the specified properties. * @param [properties] Properties to set - * @returns ExecuteFetchAsAppResponse instance + * @returns GetTabletResponse instance */ - public static create(properties?: vtctldata.IExecuteFetchAsAppResponse): vtctldata.ExecuteFetchAsAppResponse; + public static create(properties?: vtctldata.IGetTabletResponse): vtctldata.GetTabletResponse; /** - * Encodes the specified ExecuteFetchAsAppResponse message. Does not implicitly {@link vtctldata.ExecuteFetchAsAppResponse.verify|verify} messages. - * @param message ExecuteFetchAsAppResponse message or plain object to encode + * Encodes the specified GetTabletResponse message. Does not implicitly {@link vtctldata.GetTabletResponse.verify|verify} messages. + * @param message GetTabletResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IExecuteFetchAsAppResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetTabletResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ExecuteFetchAsAppResponse message, length delimited. Does not implicitly {@link vtctldata.ExecuteFetchAsAppResponse.verify|verify} messages. - * @param message ExecuteFetchAsAppResponse message or plain object to encode + * Encodes the specified GetTabletResponse message, length delimited. Does not implicitly {@link vtctldata.GetTabletResponse.verify|verify} messages. + * @param message GetTabletResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IExecuteFetchAsAppResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetTabletResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an ExecuteFetchAsAppResponse message from the specified reader or buffer. + * Decodes a GetTabletResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ExecuteFetchAsAppResponse + * @returns GetTabletResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ExecuteFetchAsAppResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetTabletResponse; /** - * Decodes an ExecuteFetchAsAppResponse message from the specified reader or buffer, length delimited. + * Decodes a GetTabletResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ExecuteFetchAsAppResponse + * @returns GetTabletResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ExecuteFetchAsAppResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetTabletResponse; /** - * Verifies an ExecuteFetchAsAppResponse message. + * Verifies a GetTabletResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an ExecuteFetchAsAppResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetTabletResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ExecuteFetchAsAppResponse + * @returns GetTabletResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.ExecuteFetchAsAppResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.GetTabletResponse; /** - * Creates a plain object from an ExecuteFetchAsAppResponse message. Also converts values to other types if specified. - * @param message ExecuteFetchAsAppResponse + * Creates a plain object from a GetTabletResponse message. Also converts values to other types if specified. + * @param message GetTabletResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ExecuteFetchAsAppResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetTabletResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ExecuteFetchAsAppResponse to JSON. + * Converts this GetTabletResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ExecuteFetchAsAppResponse + * Gets the default type url for GetTabletResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of an ExecuteFetchAsDBARequest. */ - interface IExecuteFetchAsDBARequest { + /** Properties of a GetTabletsRequest. */ + interface IGetTabletsRequest { - /** ExecuteFetchAsDBARequest tablet_alias */ - tablet_alias?: (topodata.ITabletAlias|null); + /** GetTabletsRequest keyspace */ + keyspace?: (string|null); - /** ExecuteFetchAsDBARequest query */ - query?: (string|null); + /** GetTabletsRequest shard */ + shard?: (string|null); - /** ExecuteFetchAsDBARequest max_rows */ - max_rows?: (number|Long|null); + /** GetTabletsRequest cells */ + cells?: (string[]|null); - /** ExecuteFetchAsDBARequest disable_binlogs */ - disable_binlogs?: (boolean|null); + /** GetTabletsRequest strict */ + strict?: (boolean|null); - /** ExecuteFetchAsDBARequest reload_schema */ - reload_schema?: (boolean|null); + /** GetTabletsRequest tablet_aliases */ + tablet_aliases?: (topodata.ITabletAlias[]|null); + + /** GetTabletsRequest tablet_type */ + tablet_type?: (topodata.TabletType|null); } - /** Represents an ExecuteFetchAsDBARequest. */ - class ExecuteFetchAsDBARequest implements IExecuteFetchAsDBARequest { + /** Represents a GetTabletsRequest. */ + class GetTabletsRequest implements IGetTabletsRequest { /** - * Constructs a new ExecuteFetchAsDBARequest. + * Constructs a new GetTabletsRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IExecuteFetchAsDBARequest); + constructor(properties?: vtctldata.IGetTabletsRequest); - /** ExecuteFetchAsDBARequest tablet_alias. */ - public tablet_alias?: (topodata.ITabletAlias|null); + /** GetTabletsRequest keyspace. */ + public keyspace: string; - /** ExecuteFetchAsDBARequest query. */ - public query: string; + /** GetTabletsRequest shard. */ + public shard: string; - /** ExecuteFetchAsDBARequest max_rows. */ - public max_rows: (number|Long); + /** GetTabletsRequest cells. */ + public cells: string[]; - /** ExecuteFetchAsDBARequest disable_binlogs. */ - public disable_binlogs: boolean; + /** GetTabletsRequest strict. */ + public strict: boolean; - /** ExecuteFetchAsDBARequest reload_schema. */ - public reload_schema: boolean; + /** GetTabletsRequest tablet_aliases. */ + public tablet_aliases: topodata.ITabletAlias[]; + + /** GetTabletsRequest tablet_type. */ + public tablet_type: topodata.TabletType; /** - * Creates a new ExecuteFetchAsDBARequest instance using the specified properties. + * Creates a new GetTabletsRequest instance using the specified properties. * @param [properties] Properties to set - * @returns ExecuteFetchAsDBARequest instance + * @returns GetTabletsRequest instance */ - public static create(properties?: vtctldata.IExecuteFetchAsDBARequest): vtctldata.ExecuteFetchAsDBARequest; + public static create(properties?: vtctldata.IGetTabletsRequest): vtctldata.GetTabletsRequest; /** - * Encodes the specified ExecuteFetchAsDBARequest message. Does not implicitly {@link vtctldata.ExecuteFetchAsDBARequest.verify|verify} messages. - * @param message ExecuteFetchAsDBARequest message or plain object to encode + * Encodes the specified GetTabletsRequest message. Does not implicitly {@link vtctldata.GetTabletsRequest.verify|verify} messages. + * @param message GetTabletsRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IExecuteFetchAsDBARequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetTabletsRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ExecuteFetchAsDBARequest message, length delimited. Does not implicitly {@link vtctldata.ExecuteFetchAsDBARequest.verify|verify} messages. - * @param message ExecuteFetchAsDBARequest message or plain object to encode + * Encodes the specified GetTabletsRequest message, length delimited. Does not implicitly {@link vtctldata.GetTabletsRequest.verify|verify} messages. + * @param message GetTabletsRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IExecuteFetchAsDBARequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetTabletsRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an ExecuteFetchAsDBARequest message from the specified reader or buffer. + * Decodes a GetTabletsRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ExecuteFetchAsDBARequest + * @returns GetTabletsRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ExecuteFetchAsDBARequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetTabletsRequest; /** - * Decodes an ExecuteFetchAsDBARequest message from the specified reader or buffer, length delimited. + * Decodes a GetTabletsRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ExecuteFetchAsDBARequest + * @returns GetTabletsRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ExecuteFetchAsDBARequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetTabletsRequest; /** - * Verifies an ExecuteFetchAsDBARequest message. + * Verifies a GetTabletsRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an ExecuteFetchAsDBARequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetTabletsRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ExecuteFetchAsDBARequest + * @returns GetTabletsRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.ExecuteFetchAsDBARequest; + public static fromObject(object: { [k: string]: any }): vtctldata.GetTabletsRequest; /** - * Creates a plain object from an ExecuteFetchAsDBARequest message. Also converts values to other types if specified. - * @param message ExecuteFetchAsDBARequest + * Creates a plain object from a GetTabletsRequest message. Also converts values to other types if specified. + * @param message GetTabletsRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ExecuteFetchAsDBARequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetTabletsRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ExecuteFetchAsDBARequest to JSON. + * Converts this GetTabletsRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ExecuteFetchAsDBARequest + * Gets the default type url for GetTabletsRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of an ExecuteFetchAsDBAResponse. */ - interface IExecuteFetchAsDBAResponse { + /** Properties of a GetTabletsResponse. */ + interface IGetTabletsResponse { - /** ExecuteFetchAsDBAResponse result */ - result?: (query.IQueryResult|null); + /** GetTabletsResponse tablets */ + tablets?: (topodata.ITablet[]|null); } - /** Represents an ExecuteFetchAsDBAResponse. */ - class ExecuteFetchAsDBAResponse implements IExecuteFetchAsDBAResponse { + /** Represents a GetTabletsResponse. */ + class GetTabletsResponse implements IGetTabletsResponse { /** - * Constructs a new ExecuteFetchAsDBAResponse. + * Constructs a new GetTabletsResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IExecuteFetchAsDBAResponse); + constructor(properties?: vtctldata.IGetTabletsResponse); - /** ExecuteFetchAsDBAResponse result. */ - public result?: (query.IQueryResult|null); + /** GetTabletsResponse tablets. */ + public tablets: topodata.ITablet[]; /** - * Creates a new ExecuteFetchAsDBAResponse instance using the specified properties. + * Creates a new GetTabletsResponse instance using the specified properties. * @param [properties] Properties to set - * @returns ExecuteFetchAsDBAResponse instance + * @returns GetTabletsResponse instance */ - public static create(properties?: vtctldata.IExecuteFetchAsDBAResponse): vtctldata.ExecuteFetchAsDBAResponse; + public static create(properties?: vtctldata.IGetTabletsResponse): vtctldata.GetTabletsResponse; /** - * Encodes the specified ExecuteFetchAsDBAResponse message. Does not implicitly {@link vtctldata.ExecuteFetchAsDBAResponse.verify|verify} messages. - * @param message ExecuteFetchAsDBAResponse message or plain object to encode + * Encodes the specified GetTabletsResponse message. Does not implicitly {@link vtctldata.GetTabletsResponse.verify|verify} messages. + * @param message GetTabletsResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IExecuteFetchAsDBAResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetTabletsResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ExecuteFetchAsDBAResponse message, length delimited. Does not implicitly {@link vtctldata.ExecuteFetchAsDBAResponse.verify|verify} messages. - * @param message ExecuteFetchAsDBAResponse message or plain object to encode + * Encodes the specified GetTabletsResponse message, length delimited. Does not implicitly {@link vtctldata.GetTabletsResponse.verify|verify} messages. + * @param message GetTabletsResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IExecuteFetchAsDBAResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetTabletsResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an ExecuteFetchAsDBAResponse message from the specified reader or buffer. + * Decodes a GetTabletsResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ExecuteFetchAsDBAResponse + * @returns GetTabletsResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ExecuteFetchAsDBAResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetTabletsResponse; /** - * Decodes an ExecuteFetchAsDBAResponse message from the specified reader or buffer, length delimited. + * Decodes a GetTabletsResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ExecuteFetchAsDBAResponse + * @returns GetTabletsResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ExecuteFetchAsDBAResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetTabletsResponse; /** - * Verifies an ExecuteFetchAsDBAResponse message. + * Verifies a GetTabletsResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an ExecuteFetchAsDBAResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetTabletsResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ExecuteFetchAsDBAResponse + * @returns GetTabletsResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.ExecuteFetchAsDBAResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.GetTabletsResponse; /** - * Creates a plain object from an ExecuteFetchAsDBAResponse message. Also converts values to other types if specified. - * @param message ExecuteFetchAsDBAResponse + * Creates a plain object from a GetTabletsResponse message. Also converts values to other types if specified. + * @param message GetTabletsResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ExecuteFetchAsDBAResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetTabletsResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ExecuteFetchAsDBAResponse to JSON. + * Converts this GetTabletsResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ExecuteFetchAsDBAResponse + * Gets the default type url for GetTabletsResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of an ExecuteHookRequest. */ - interface IExecuteHookRequest { - - /** ExecuteHookRequest tablet_alias */ - tablet_alias?: (topodata.ITabletAlias|null); + /** Properties of a GetTopologyPathRequest. */ + interface IGetTopologyPathRequest { - /** ExecuteHookRequest tablet_hook_request */ - tablet_hook_request?: (tabletmanagerdata.IExecuteHookRequest|null); + /** GetTopologyPathRequest path */ + path?: (string|null); } - /** Represents an ExecuteHookRequest. */ - class ExecuteHookRequest implements IExecuteHookRequest { + /** Represents a GetTopologyPathRequest. */ + class GetTopologyPathRequest implements IGetTopologyPathRequest { /** - * Constructs a new ExecuteHookRequest. + * Constructs a new GetTopologyPathRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IExecuteHookRequest); - - /** ExecuteHookRequest tablet_alias. */ - public tablet_alias?: (topodata.ITabletAlias|null); + constructor(properties?: vtctldata.IGetTopologyPathRequest); - /** ExecuteHookRequest tablet_hook_request. */ - public tablet_hook_request?: (tabletmanagerdata.IExecuteHookRequest|null); + /** GetTopologyPathRequest path. */ + public path: string; /** - * Creates a new ExecuteHookRequest instance using the specified properties. + * Creates a new GetTopologyPathRequest instance using the specified properties. * @param [properties] Properties to set - * @returns ExecuteHookRequest instance + * @returns GetTopologyPathRequest instance */ - public static create(properties?: vtctldata.IExecuteHookRequest): vtctldata.ExecuteHookRequest; + public static create(properties?: vtctldata.IGetTopologyPathRequest): vtctldata.GetTopologyPathRequest; /** - * Encodes the specified ExecuteHookRequest message. Does not implicitly {@link vtctldata.ExecuteHookRequest.verify|verify} messages. - * @param message ExecuteHookRequest message or plain object to encode + * Encodes the specified GetTopologyPathRequest message. Does not implicitly {@link vtctldata.GetTopologyPathRequest.verify|verify} messages. + * @param message GetTopologyPathRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IExecuteHookRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetTopologyPathRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ExecuteHookRequest message, length delimited. Does not implicitly {@link vtctldata.ExecuteHookRequest.verify|verify} messages. - * @param message ExecuteHookRequest message or plain object to encode + * Encodes the specified GetTopologyPathRequest message, length delimited. Does not implicitly {@link vtctldata.GetTopologyPathRequest.verify|verify} messages. + * @param message GetTopologyPathRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IExecuteHookRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetTopologyPathRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an ExecuteHookRequest message from the specified reader or buffer. + * Decodes a GetTopologyPathRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ExecuteHookRequest + * @returns GetTopologyPathRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ExecuteHookRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetTopologyPathRequest; /** - * Decodes an ExecuteHookRequest message from the specified reader or buffer, length delimited. + * Decodes a GetTopologyPathRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ExecuteHookRequest + * @returns GetTopologyPathRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ExecuteHookRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetTopologyPathRequest; /** - * Verifies an ExecuteHookRequest message. + * Verifies a GetTopologyPathRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an ExecuteHookRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetTopologyPathRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ExecuteHookRequest + * @returns GetTopologyPathRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.ExecuteHookRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.GetTopologyPathRequest; /** - * Creates a plain object from an ExecuteHookRequest message. Also converts values to other types if specified. - * @param message ExecuteHookRequest + * Creates a plain object from a GetTopologyPathRequest message. Also converts values to other types if specified. + * @param message GetTopologyPathRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ExecuteHookRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetTopologyPathRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ExecuteHookRequest to JSON. + * Converts this GetTopologyPathRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ExecuteHookRequest + * Gets the default type url for GetTopologyPathRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of an ExecuteHookResponse. */ - interface IExecuteHookResponse { + /** Properties of a GetTopologyPathResponse. */ + interface IGetTopologyPathResponse { - /** ExecuteHookResponse hook_result */ - hook_result?: (tabletmanagerdata.IExecuteHookResponse|null); + /** GetTopologyPathResponse cell */ + cell?: (vtctldata.ITopologyCell|null); } - /** Represents an ExecuteHookResponse. */ - class ExecuteHookResponse implements IExecuteHookResponse { + /** Represents a GetTopologyPathResponse. */ + class GetTopologyPathResponse implements IGetTopologyPathResponse { /** - * Constructs a new ExecuteHookResponse. + * Constructs a new GetTopologyPathResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IExecuteHookResponse); + constructor(properties?: vtctldata.IGetTopologyPathResponse); - /** ExecuteHookResponse hook_result. */ - public hook_result?: (tabletmanagerdata.IExecuteHookResponse|null); + /** GetTopologyPathResponse cell. */ + public cell?: (vtctldata.ITopologyCell|null); /** - * Creates a new ExecuteHookResponse instance using the specified properties. + * Creates a new GetTopologyPathResponse instance using the specified properties. * @param [properties] Properties to set - * @returns ExecuteHookResponse instance + * @returns GetTopologyPathResponse instance */ - public static create(properties?: vtctldata.IExecuteHookResponse): vtctldata.ExecuteHookResponse; + public static create(properties?: vtctldata.IGetTopologyPathResponse): vtctldata.GetTopologyPathResponse; /** - * Encodes the specified ExecuteHookResponse message. Does not implicitly {@link vtctldata.ExecuteHookResponse.verify|verify} messages. - * @param message ExecuteHookResponse message or plain object to encode + * Encodes the specified GetTopologyPathResponse message. Does not implicitly {@link vtctldata.GetTopologyPathResponse.verify|verify} messages. + * @param message GetTopologyPathResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IExecuteHookResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetTopologyPathResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ExecuteHookResponse message, length delimited. Does not implicitly {@link vtctldata.ExecuteHookResponse.verify|verify} messages. - * @param message ExecuteHookResponse message or plain object to encode + * Encodes the specified GetTopologyPathResponse message, length delimited. Does not implicitly {@link vtctldata.GetTopologyPathResponse.verify|verify} messages. + * @param message GetTopologyPathResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IExecuteHookResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetTopologyPathResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an ExecuteHookResponse message from the specified reader or buffer. + * Decodes a GetTopologyPathResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ExecuteHookResponse + * @returns GetTopologyPathResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ExecuteHookResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetTopologyPathResponse; /** - * Decodes an ExecuteHookResponse message from the specified reader or buffer, length delimited. + * Decodes a GetTopologyPathResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ExecuteHookResponse + * @returns GetTopologyPathResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ExecuteHookResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetTopologyPathResponse; /** - * Verifies an ExecuteHookResponse message. + * Verifies a GetTopologyPathResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an ExecuteHookResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetTopologyPathResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ExecuteHookResponse + * @returns GetTopologyPathResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.ExecuteHookResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.GetTopologyPathResponse; /** - * Creates a plain object from an ExecuteHookResponse message. Also converts values to other types if specified. - * @param message ExecuteHookResponse + * Creates a plain object from a GetTopologyPathResponse message. Also converts values to other types if specified. + * @param message GetTopologyPathResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ExecuteHookResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetTopologyPathResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ExecuteHookResponse to JSON. + * Converts this GetTopologyPathResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ExecuteHookResponse + * Gets the default type url for GetTopologyPathResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a FindAllShardsInKeyspaceRequest. */ - interface IFindAllShardsInKeyspaceRequest { + /** Properties of a TopologyCell. */ + interface ITopologyCell { - /** FindAllShardsInKeyspaceRequest keyspace */ - keyspace?: (string|null); + /** TopologyCell name */ + name?: (string|null); + + /** TopologyCell path */ + path?: (string|null); + + /** TopologyCell data */ + data?: (string|null); + + /** TopologyCell children */ + children?: (string[]|null); } - /** Represents a FindAllShardsInKeyspaceRequest. */ - class FindAllShardsInKeyspaceRequest implements IFindAllShardsInKeyspaceRequest { + /** Represents a TopologyCell. */ + class TopologyCell implements ITopologyCell { /** - * Constructs a new FindAllShardsInKeyspaceRequest. + * Constructs a new TopologyCell. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IFindAllShardsInKeyspaceRequest); + constructor(properties?: vtctldata.ITopologyCell); - /** FindAllShardsInKeyspaceRequest keyspace. */ - public keyspace: string; + /** TopologyCell name. */ + public name: string; + + /** TopologyCell path. */ + public path: string; + + /** TopologyCell data. */ + public data: string; + + /** TopologyCell children. */ + public children: string[]; /** - * Creates a new FindAllShardsInKeyspaceRequest instance using the specified properties. + * Creates a new TopologyCell instance using the specified properties. * @param [properties] Properties to set - * @returns FindAllShardsInKeyspaceRequest instance + * @returns TopologyCell instance */ - public static create(properties?: vtctldata.IFindAllShardsInKeyspaceRequest): vtctldata.FindAllShardsInKeyspaceRequest; + public static create(properties?: vtctldata.ITopologyCell): vtctldata.TopologyCell; /** - * Encodes the specified FindAllShardsInKeyspaceRequest message. Does not implicitly {@link vtctldata.FindAllShardsInKeyspaceRequest.verify|verify} messages. - * @param message FindAllShardsInKeyspaceRequest message or plain object to encode + * Encodes the specified TopologyCell message. Does not implicitly {@link vtctldata.TopologyCell.verify|verify} messages. + * @param message TopologyCell message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IFindAllShardsInKeyspaceRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.ITopologyCell, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified FindAllShardsInKeyspaceRequest message, length delimited. Does not implicitly {@link vtctldata.FindAllShardsInKeyspaceRequest.verify|verify} messages. - * @param message FindAllShardsInKeyspaceRequest message or plain object to encode + * Encodes the specified TopologyCell message, length delimited. Does not implicitly {@link vtctldata.TopologyCell.verify|verify} messages. + * @param message TopologyCell message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IFindAllShardsInKeyspaceRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.ITopologyCell, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a FindAllShardsInKeyspaceRequest message from the specified reader or buffer. + * Decodes a TopologyCell message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns FindAllShardsInKeyspaceRequest + * @returns TopologyCell * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.FindAllShardsInKeyspaceRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.TopologyCell; /** - * Decodes a FindAllShardsInKeyspaceRequest message from the specified reader or buffer, length delimited. + * Decodes a TopologyCell message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns FindAllShardsInKeyspaceRequest + * @returns TopologyCell * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.FindAllShardsInKeyspaceRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.TopologyCell; /** - * Verifies a FindAllShardsInKeyspaceRequest message. + * Verifies a TopologyCell message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a FindAllShardsInKeyspaceRequest message from a plain object. Also converts values to their respective internal types. + * Creates a TopologyCell message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns FindAllShardsInKeyspaceRequest + * @returns TopologyCell */ - public static fromObject(object: { [k: string]: any }): vtctldata.FindAllShardsInKeyspaceRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.TopologyCell; /** - * Creates a plain object from a FindAllShardsInKeyspaceRequest message. Also converts values to other types if specified. - * @param message FindAllShardsInKeyspaceRequest + * Creates a plain object from a TopologyCell message. Also converts values to other types if specified. + * @param message TopologyCell * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.FindAllShardsInKeyspaceRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.TopologyCell, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this FindAllShardsInKeyspaceRequest to JSON. + * Converts this TopologyCell to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for FindAllShardsInKeyspaceRequest + * Gets the default type url for TopologyCell * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a FindAllShardsInKeyspaceResponse. */ - interface IFindAllShardsInKeyspaceResponse { + /** Properties of a GetVSchemaRequest. */ + interface IGetVSchemaRequest { - /** FindAllShardsInKeyspaceResponse shards */ - shards?: ({ [k: string]: vtctldata.IShard }|null); + /** GetVSchemaRequest keyspace */ + keyspace?: (string|null); } - /** Represents a FindAllShardsInKeyspaceResponse. */ - class FindAllShardsInKeyspaceResponse implements IFindAllShardsInKeyspaceResponse { + /** Represents a GetVSchemaRequest. */ + class GetVSchemaRequest implements IGetVSchemaRequest { /** - * Constructs a new FindAllShardsInKeyspaceResponse. + * Constructs a new GetVSchemaRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IFindAllShardsInKeyspaceResponse); + constructor(properties?: vtctldata.IGetVSchemaRequest); - /** FindAllShardsInKeyspaceResponse shards. */ - public shards: { [k: string]: vtctldata.IShard }; + /** GetVSchemaRequest keyspace. */ + public keyspace: string; /** - * Creates a new FindAllShardsInKeyspaceResponse instance using the specified properties. + * Creates a new GetVSchemaRequest instance using the specified properties. * @param [properties] Properties to set - * @returns FindAllShardsInKeyspaceResponse instance + * @returns GetVSchemaRequest instance */ - public static create(properties?: vtctldata.IFindAllShardsInKeyspaceResponse): vtctldata.FindAllShardsInKeyspaceResponse; + public static create(properties?: vtctldata.IGetVSchemaRequest): vtctldata.GetVSchemaRequest; /** - * Encodes the specified FindAllShardsInKeyspaceResponse message. Does not implicitly {@link vtctldata.FindAllShardsInKeyspaceResponse.verify|verify} messages. - * @param message FindAllShardsInKeyspaceResponse message or plain object to encode + * Encodes the specified GetVSchemaRequest message. Does not implicitly {@link vtctldata.GetVSchemaRequest.verify|verify} messages. + * @param message GetVSchemaRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IFindAllShardsInKeyspaceResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetVSchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified FindAllShardsInKeyspaceResponse message, length delimited. Does not implicitly {@link vtctldata.FindAllShardsInKeyspaceResponse.verify|verify} messages. - * @param message FindAllShardsInKeyspaceResponse message or plain object to encode + * Encodes the specified GetVSchemaRequest message, length delimited. Does not implicitly {@link vtctldata.GetVSchemaRequest.verify|verify} messages. + * @param message GetVSchemaRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IFindAllShardsInKeyspaceResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetVSchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a FindAllShardsInKeyspaceResponse message from the specified reader or buffer. + * Decodes a GetVSchemaRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns FindAllShardsInKeyspaceResponse + * @returns GetVSchemaRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.FindAllShardsInKeyspaceResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetVSchemaRequest; /** - * Decodes a FindAllShardsInKeyspaceResponse message from the specified reader or buffer, length delimited. + * Decodes a GetVSchemaRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns FindAllShardsInKeyspaceResponse + * @returns GetVSchemaRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.FindAllShardsInKeyspaceResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetVSchemaRequest; /** - * Verifies a FindAllShardsInKeyspaceResponse message. + * Verifies a GetVSchemaRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a FindAllShardsInKeyspaceResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetVSchemaRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns FindAllShardsInKeyspaceResponse + * @returns GetVSchemaRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.FindAllShardsInKeyspaceResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.GetVSchemaRequest; /** - * Creates a plain object from a FindAllShardsInKeyspaceResponse message. Also converts values to other types if specified. - * @param message FindAllShardsInKeyspaceResponse + * Creates a plain object from a GetVSchemaRequest message. Also converts values to other types if specified. + * @param message GetVSchemaRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.FindAllShardsInKeyspaceResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetVSchemaRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this FindAllShardsInKeyspaceResponse to JSON. + * Converts this GetVSchemaRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for FindAllShardsInKeyspaceResponse + * Gets the default type url for GetVSchemaRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetBackupsRequest. */ - interface IGetBackupsRequest { - - /** GetBackupsRequest keyspace */ - keyspace?: (string|null); - - /** GetBackupsRequest shard */ - shard?: (string|null); - - /** GetBackupsRequest limit */ - limit?: (number|null); - - /** GetBackupsRequest detailed */ - detailed?: (boolean|null); + /** Properties of a GetVersionRequest. */ + interface IGetVersionRequest { - /** GetBackupsRequest detailed_limit */ - detailed_limit?: (number|null); + /** GetVersionRequest tablet_alias */ + tablet_alias?: (topodata.ITabletAlias|null); } - /** Represents a GetBackupsRequest. */ - class GetBackupsRequest implements IGetBackupsRequest { + /** Represents a GetVersionRequest. */ + class GetVersionRequest implements IGetVersionRequest { /** - * Constructs a new GetBackupsRequest. + * Constructs a new GetVersionRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetBackupsRequest); - - /** GetBackupsRequest keyspace. */ - public keyspace: string; - - /** GetBackupsRequest shard. */ - public shard: string; - - /** GetBackupsRequest limit. */ - public limit: number; - - /** GetBackupsRequest detailed. */ - public detailed: boolean; + constructor(properties?: vtctldata.IGetVersionRequest); - /** GetBackupsRequest detailed_limit. */ - public detailed_limit: number; + /** GetVersionRequest tablet_alias. */ + public tablet_alias?: (topodata.ITabletAlias|null); /** - * Creates a new GetBackupsRequest instance using the specified properties. + * Creates a new GetVersionRequest instance using the specified properties. * @param [properties] Properties to set - * @returns GetBackupsRequest instance + * @returns GetVersionRequest instance */ - public static create(properties?: vtctldata.IGetBackupsRequest): vtctldata.GetBackupsRequest; + public static create(properties?: vtctldata.IGetVersionRequest): vtctldata.GetVersionRequest; /** - * Encodes the specified GetBackupsRequest message. Does not implicitly {@link vtctldata.GetBackupsRequest.verify|verify} messages. - * @param message GetBackupsRequest message or plain object to encode + * Encodes the specified GetVersionRequest message. Does not implicitly {@link vtctldata.GetVersionRequest.verify|verify} messages. + * @param message GetVersionRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetBackupsRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetVersionRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetBackupsRequest message, length delimited. Does not implicitly {@link vtctldata.GetBackupsRequest.verify|verify} messages. - * @param message GetBackupsRequest message or plain object to encode + * Encodes the specified GetVersionRequest message, length delimited. Does not implicitly {@link vtctldata.GetVersionRequest.verify|verify} messages. + * @param message GetVersionRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetBackupsRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetVersionRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetBackupsRequest message from the specified reader or buffer. + * Decodes a GetVersionRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetBackupsRequest + * @returns GetVersionRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetBackupsRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetVersionRequest; /** - * Decodes a GetBackupsRequest message from the specified reader or buffer, length delimited. + * Decodes a GetVersionRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetBackupsRequest + * @returns GetVersionRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetBackupsRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetVersionRequest; /** - * Verifies a GetBackupsRequest message. + * Verifies a GetVersionRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetBackupsRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetVersionRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetBackupsRequest + * @returns GetVersionRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetBackupsRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.GetVersionRequest; /** - * Creates a plain object from a GetBackupsRequest message. Also converts values to other types if specified. - * @param message GetBackupsRequest + * Creates a plain object from a GetVersionRequest message. Also converts values to other types if specified. + * @param message GetVersionRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetBackupsRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetVersionRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetBackupsRequest to JSON. + * Converts this GetVersionRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetBackupsRequest + * Gets the default type url for GetVersionRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetBackupsResponse. */ - interface IGetBackupsResponse { + /** Properties of a GetVersionResponse. */ + interface IGetVersionResponse { - /** GetBackupsResponse backups */ - backups?: (mysqlctl.IBackupInfo[]|null); + /** GetVersionResponse version */ + version?: (string|null); } - /** Represents a GetBackupsResponse. */ - class GetBackupsResponse implements IGetBackupsResponse { + /** Represents a GetVersionResponse. */ + class GetVersionResponse implements IGetVersionResponse { /** - * Constructs a new GetBackupsResponse. + * Constructs a new GetVersionResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetBackupsResponse); + constructor(properties?: vtctldata.IGetVersionResponse); - /** GetBackupsResponse backups. */ - public backups: mysqlctl.IBackupInfo[]; + /** GetVersionResponse version. */ + public version: string; /** - * Creates a new GetBackupsResponse instance using the specified properties. + * Creates a new GetVersionResponse instance using the specified properties. * @param [properties] Properties to set - * @returns GetBackupsResponse instance + * @returns GetVersionResponse instance */ - public static create(properties?: vtctldata.IGetBackupsResponse): vtctldata.GetBackupsResponse; + public static create(properties?: vtctldata.IGetVersionResponse): vtctldata.GetVersionResponse; /** - * Encodes the specified GetBackupsResponse message. Does not implicitly {@link vtctldata.GetBackupsResponse.verify|verify} messages. - * @param message GetBackupsResponse message or plain object to encode + * Encodes the specified GetVersionResponse message. Does not implicitly {@link vtctldata.GetVersionResponse.verify|verify} messages. + * @param message GetVersionResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetBackupsResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetVersionResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetBackupsResponse message, length delimited. Does not implicitly {@link vtctldata.GetBackupsResponse.verify|verify} messages. - * @param message GetBackupsResponse message or plain object to encode + * Encodes the specified GetVersionResponse message, length delimited. Does not implicitly {@link vtctldata.GetVersionResponse.verify|verify} messages. + * @param message GetVersionResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetBackupsResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetVersionResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetBackupsResponse message from the specified reader or buffer. + * Decodes a GetVersionResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetBackupsResponse + * @returns GetVersionResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetBackupsResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetVersionResponse; /** - * Decodes a GetBackupsResponse message from the specified reader or buffer, length delimited. + * Decodes a GetVersionResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetBackupsResponse + * @returns GetVersionResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetBackupsResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetVersionResponse; /** - * Verifies a GetBackupsResponse message. + * Verifies a GetVersionResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetBackupsResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetVersionResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetBackupsResponse + * @returns GetVersionResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetBackupsResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.GetVersionResponse; /** - * Creates a plain object from a GetBackupsResponse message. Also converts values to other types if specified. - * @param message GetBackupsResponse + * Creates a plain object from a GetVersionResponse message. Also converts values to other types if specified. + * @param message GetVersionResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetBackupsResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetVersionResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetBackupsResponse to JSON. + * Converts this GetVersionResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetBackupsResponse + * Gets the default type url for GetVersionResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetCellInfoRequest. */ - interface IGetCellInfoRequest { + /** Properties of a GetVSchemaResponse. */ + interface IGetVSchemaResponse { - /** GetCellInfoRequest cell */ - cell?: (string|null); + /** GetVSchemaResponse v_schema */ + v_schema?: (vschema.IKeyspace|null); } - /** Represents a GetCellInfoRequest. */ - class GetCellInfoRequest implements IGetCellInfoRequest { + /** Represents a GetVSchemaResponse. */ + class GetVSchemaResponse implements IGetVSchemaResponse { /** - * Constructs a new GetCellInfoRequest. + * Constructs a new GetVSchemaResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetCellInfoRequest); + constructor(properties?: vtctldata.IGetVSchemaResponse); - /** GetCellInfoRequest cell. */ - public cell: string; + /** GetVSchemaResponse v_schema. */ + public v_schema?: (vschema.IKeyspace|null); /** - * Creates a new GetCellInfoRequest instance using the specified properties. + * Creates a new GetVSchemaResponse instance using the specified properties. * @param [properties] Properties to set - * @returns GetCellInfoRequest instance + * @returns GetVSchemaResponse instance */ - public static create(properties?: vtctldata.IGetCellInfoRequest): vtctldata.GetCellInfoRequest; + public static create(properties?: vtctldata.IGetVSchemaResponse): vtctldata.GetVSchemaResponse; /** - * Encodes the specified GetCellInfoRequest message. Does not implicitly {@link vtctldata.GetCellInfoRequest.verify|verify} messages. - * @param message GetCellInfoRequest message or plain object to encode + * Encodes the specified GetVSchemaResponse message. Does not implicitly {@link vtctldata.GetVSchemaResponse.verify|verify} messages. + * @param message GetVSchemaResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetCellInfoRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetVSchemaResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetCellInfoRequest message, length delimited. Does not implicitly {@link vtctldata.GetCellInfoRequest.verify|verify} messages. - * @param message GetCellInfoRequest message or plain object to encode + * Encodes the specified GetVSchemaResponse message, length delimited. Does not implicitly {@link vtctldata.GetVSchemaResponse.verify|verify} messages. + * @param message GetVSchemaResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetCellInfoRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetVSchemaResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetCellInfoRequest message from the specified reader or buffer. + * Decodes a GetVSchemaResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetCellInfoRequest + * @returns GetVSchemaResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetCellInfoRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetVSchemaResponse; /** - * Decodes a GetCellInfoRequest message from the specified reader or buffer, length delimited. + * Decodes a GetVSchemaResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetCellInfoRequest + * @returns GetVSchemaResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetCellInfoRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetVSchemaResponse; /** - * Verifies a GetCellInfoRequest message. + * Verifies a GetVSchemaResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetCellInfoRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetVSchemaResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetCellInfoRequest + * @returns GetVSchemaResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetCellInfoRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.GetVSchemaResponse; /** - * Creates a plain object from a GetCellInfoRequest message. Also converts values to other types if specified. - * @param message GetCellInfoRequest + * Creates a plain object from a GetVSchemaResponse message. Also converts values to other types if specified. + * @param message GetVSchemaResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetCellInfoRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetVSchemaResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetCellInfoRequest to JSON. + * Converts this GetVSchemaResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetCellInfoRequest + * Gets the default type url for GetVSchemaResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetCellInfoResponse. */ - interface IGetCellInfoResponse { + /** Properties of a GetWorkflowsRequest. */ + interface IGetWorkflowsRequest { - /** GetCellInfoResponse cell_info */ - cell_info?: (topodata.ICellInfo|null); + /** GetWorkflowsRequest keyspace */ + keyspace?: (string|null); + + /** GetWorkflowsRequest active_only */ + active_only?: (boolean|null); + + /** GetWorkflowsRequest name_only */ + name_only?: (boolean|null); + + /** GetWorkflowsRequest workflow */ + workflow?: (string|null); + + /** GetWorkflowsRequest include_logs */ + include_logs?: (boolean|null); } - /** Represents a GetCellInfoResponse. */ - class GetCellInfoResponse implements IGetCellInfoResponse { + /** Represents a GetWorkflowsRequest. */ + class GetWorkflowsRequest implements IGetWorkflowsRequest { /** - * Constructs a new GetCellInfoResponse. + * Constructs a new GetWorkflowsRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetCellInfoResponse); + constructor(properties?: vtctldata.IGetWorkflowsRequest); - /** GetCellInfoResponse cell_info. */ - public cell_info?: (topodata.ICellInfo|null); + /** GetWorkflowsRequest keyspace. */ + public keyspace: string; + + /** GetWorkflowsRequest active_only. */ + public active_only: boolean; + + /** GetWorkflowsRequest name_only. */ + public name_only: boolean; + + /** GetWorkflowsRequest workflow. */ + public workflow: string; + + /** GetWorkflowsRequest include_logs. */ + public include_logs: boolean; /** - * Creates a new GetCellInfoResponse instance using the specified properties. + * Creates a new GetWorkflowsRequest instance using the specified properties. * @param [properties] Properties to set - * @returns GetCellInfoResponse instance + * @returns GetWorkflowsRequest instance */ - public static create(properties?: vtctldata.IGetCellInfoResponse): vtctldata.GetCellInfoResponse; + public static create(properties?: vtctldata.IGetWorkflowsRequest): vtctldata.GetWorkflowsRequest; /** - * Encodes the specified GetCellInfoResponse message. Does not implicitly {@link vtctldata.GetCellInfoResponse.verify|verify} messages. - * @param message GetCellInfoResponse message or plain object to encode + * Encodes the specified GetWorkflowsRequest message. Does not implicitly {@link vtctldata.GetWorkflowsRequest.verify|verify} messages. + * @param message GetWorkflowsRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetCellInfoResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetWorkflowsRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetCellInfoResponse message, length delimited. Does not implicitly {@link vtctldata.GetCellInfoResponse.verify|verify} messages. - * @param message GetCellInfoResponse message or plain object to encode + * Encodes the specified GetWorkflowsRequest message, length delimited. Does not implicitly {@link vtctldata.GetWorkflowsRequest.verify|verify} messages. + * @param message GetWorkflowsRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetCellInfoResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetWorkflowsRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetCellInfoResponse message from the specified reader or buffer. + * Decodes a GetWorkflowsRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetCellInfoResponse + * @returns GetWorkflowsRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetCellInfoResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetWorkflowsRequest; /** - * Decodes a GetCellInfoResponse message from the specified reader or buffer, length delimited. + * Decodes a GetWorkflowsRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetCellInfoResponse + * @returns GetWorkflowsRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetCellInfoResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetWorkflowsRequest; /** - * Verifies a GetCellInfoResponse message. + * Verifies a GetWorkflowsRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetCellInfoResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetWorkflowsRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetCellInfoResponse + * @returns GetWorkflowsRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetCellInfoResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.GetWorkflowsRequest; /** - * Creates a plain object from a GetCellInfoResponse message. Also converts values to other types if specified. - * @param message GetCellInfoResponse + * Creates a plain object from a GetWorkflowsRequest message. Also converts values to other types if specified. + * @param message GetWorkflowsRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetCellInfoResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetWorkflowsRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetCellInfoResponse to JSON. + * Converts this GetWorkflowsRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetCellInfoResponse + * Gets the default type url for GetWorkflowsRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetCellInfoNamesRequest. */ - interface IGetCellInfoNamesRequest { + /** Properties of a GetWorkflowsResponse. */ + interface IGetWorkflowsResponse { + + /** GetWorkflowsResponse workflows */ + workflows?: (vtctldata.IWorkflow[]|null); } - /** Represents a GetCellInfoNamesRequest. */ - class GetCellInfoNamesRequest implements IGetCellInfoNamesRequest { + /** Represents a GetWorkflowsResponse. */ + class GetWorkflowsResponse implements IGetWorkflowsResponse { /** - * Constructs a new GetCellInfoNamesRequest. + * Constructs a new GetWorkflowsResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetCellInfoNamesRequest); + constructor(properties?: vtctldata.IGetWorkflowsResponse); + + /** GetWorkflowsResponse workflows. */ + public workflows: vtctldata.IWorkflow[]; /** - * Creates a new GetCellInfoNamesRequest instance using the specified properties. + * Creates a new GetWorkflowsResponse instance using the specified properties. * @param [properties] Properties to set - * @returns GetCellInfoNamesRequest instance + * @returns GetWorkflowsResponse instance */ - public static create(properties?: vtctldata.IGetCellInfoNamesRequest): vtctldata.GetCellInfoNamesRequest; + public static create(properties?: vtctldata.IGetWorkflowsResponse): vtctldata.GetWorkflowsResponse; /** - * Encodes the specified GetCellInfoNamesRequest message. Does not implicitly {@link vtctldata.GetCellInfoNamesRequest.verify|verify} messages. - * @param message GetCellInfoNamesRequest message or plain object to encode + * Encodes the specified GetWorkflowsResponse message. Does not implicitly {@link vtctldata.GetWorkflowsResponse.verify|verify} messages. + * @param message GetWorkflowsResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetCellInfoNamesRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IGetWorkflowsResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetCellInfoNamesRequest message, length delimited. Does not implicitly {@link vtctldata.GetCellInfoNamesRequest.verify|verify} messages. - * @param message GetCellInfoNamesRequest message or plain object to encode + * Encodes the specified GetWorkflowsResponse message, length delimited. Does not implicitly {@link vtctldata.GetWorkflowsResponse.verify|verify} messages. + * @param message GetWorkflowsResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetCellInfoNamesRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IGetWorkflowsResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetCellInfoNamesRequest message from the specified reader or buffer. + * Decodes a GetWorkflowsResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetCellInfoNamesRequest + * @returns GetWorkflowsResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetCellInfoNamesRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetWorkflowsResponse; /** - * Decodes a GetCellInfoNamesRequest message from the specified reader or buffer, length delimited. + * Decodes a GetWorkflowsResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetCellInfoNamesRequest + * @returns GetWorkflowsResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetCellInfoNamesRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetWorkflowsResponse; /** - * Verifies a GetCellInfoNamesRequest message. + * Verifies a GetWorkflowsResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetCellInfoNamesRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetWorkflowsResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetCellInfoNamesRequest + * @returns GetWorkflowsResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetCellInfoNamesRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.GetWorkflowsResponse; /** - * Creates a plain object from a GetCellInfoNamesRequest message. Also converts values to other types if specified. - * @param message GetCellInfoNamesRequest + * Creates a plain object from a GetWorkflowsResponse message. Also converts values to other types if specified. + * @param message GetWorkflowsResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetCellInfoNamesRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.GetWorkflowsResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetCellInfoNamesRequest to JSON. + * Converts this GetWorkflowsResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetCellInfoNamesRequest + * Gets the default type url for GetWorkflowsResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetCellInfoNamesResponse. */ - interface IGetCellInfoNamesResponse { + /** Properties of an InitShardPrimaryRequest. */ + interface IInitShardPrimaryRequest { - /** GetCellInfoNamesResponse names */ - names?: (string[]|null); + /** InitShardPrimaryRequest keyspace */ + keyspace?: (string|null); + + /** InitShardPrimaryRequest shard */ + shard?: (string|null); + + /** InitShardPrimaryRequest primary_elect_tablet_alias */ + primary_elect_tablet_alias?: (topodata.ITabletAlias|null); + + /** InitShardPrimaryRequest force */ + force?: (boolean|null); + + /** InitShardPrimaryRequest wait_replicas_timeout */ + wait_replicas_timeout?: (vttime.IDuration|null); } - /** Represents a GetCellInfoNamesResponse. */ - class GetCellInfoNamesResponse implements IGetCellInfoNamesResponse { + /** Represents an InitShardPrimaryRequest. */ + class InitShardPrimaryRequest implements IInitShardPrimaryRequest { /** - * Constructs a new GetCellInfoNamesResponse. + * Constructs a new InitShardPrimaryRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetCellInfoNamesResponse); + constructor(properties?: vtctldata.IInitShardPrimaryRequest); - /** GetCellInfoNamesResponse names. */ - public names: string[]; + /** InitShardPrimaryRequest keyspace. */ + public keyspace: string; + + /** InitShardPrimaryRequest shard. */ + public shard: string; + + /** InitShardPrimaryRequest primary_elect_tablet_alias. */ + public primary_elect_tablet_alias?: (topodata.ITabletAlias|null); + + /** InitShardPrimaryRequest force. */ + public force: boolean; + + /** InitShardPrimaryRequest wait_replicas_timeout. */ + public wait_replicas_timeout?: (vttime.IDuration|null); /** - * Creates a new GetCellInfoNamesResponse instance using the specified properties. + * Creates a new InitShardPrimaryRequest instance using the specified properties. * @param [properties] Properties to set - * @returns GetCellInfoNamesResponse instance + * @returns InitShardPrimaryRequest instance */ - public static create(properties?: vtctldata.IGetCellInfoNamesResponse): vtctldata.GetCellInfoNamesResponse; + public static create(properties?: vtctldata.IInitShardPrimaryRequest): vtctldata.InitShardPrimaryRequest; /** - * Encodes the specified GetCellInfoNamesResponse message. Does not implicitly {@link vtctldata.GetCellInfoNamesResponse.verify|verify} messages. - * @param message GetCellInfoNamesResponse message or plain object to encode + * Encodes the specified InitShardPrimaryRequest message. Does not implicitly {@link vtctldata.InitShardPrimaryRequest.verify|verify} messages. + * @param message InitShardPrimaryRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetCellInfoNamesResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IInitShardPrimaryRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetCellInfoNamesResponse message, length delimited. Does not implicitly {@link vtctldata.GetCellInfoNamesResponse.verify|verify} messages. - * @param message GetCellInfoNamesResponse message or plain object to encode + * Encodes the specified InitShardPrimaryRequest message, length delimited. Does not implicitly {@link vtctldata.InitShardPrimaryRequest.verify|verify} messages. + * @param message InitShardPrimaryRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetCellInfoNamesResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IInitShardPrimaryRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetCellInfoNamesResponse message from the specified reader or buffer. + * Decodes an InitShardPrimaryRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetCellInfoNamesResponse + * @returns InitShardPrimaryRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetCellInfoNamesResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.InitShardPrimaryRequest; /** - * Decodes a GetCellInfoNamesResponse message from the specified reader or buffer, length delimited. + * Decodes an InitShardPrimaryRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetCellInfoNamesResponse + * @returns InitShardPrimaryRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetCellInfoNamesResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.InitShardPrimaryRequest; /** - * Verifies a GetCellInfoNamesResponse message. + * Verifies an InitShardPrimaryRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetCellInfoNamesResponse message from a plain object. Also converts values to their respective internal types. + * Creates an InitShardPrimaryRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetCellInfoNamesResponse + * @returns InitShardPrimaryRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetCellInfoNamesResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.InitShardPrimaryRequest; /** - * Creates a plain object from a GetCellInfoNamesResponse message. Also converts values to other types if specified. - * @param message GetCellInfoNamesResponse + * Creates a plain object from an InitShardPrimaryRequest message. Also converts values to other types if specified. + * @param message InitShardPrimaryRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetCellInfoNamesResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.InitShardPrimaryRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetCellInfoNamesResponse to JSON. + * Converts this InitShardPrimaryRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetCellInfoNamesResponse + * Gets the default type url for InitShardPrimaryRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetCellsAliasesRequest. */ - interface IGetCellsAliasesRequest { + /** Properties of an InitShardPrimaryResponse. */ + interface IInitShardPrimaryResponse { + + /** InitShardPrimaryResponse events */ + events?: (logutil.IEvent[]|null); } - /** Represents a GetCellsAliasesRequest. */ - class GetCellsAliasesRequest implements IGetCellsAliasesRequest { + /** Represents an InitShardPrimaryResponse. */ + class InitShardPrimaryResponse implements IInitShardPrimaryResponse { /** - * Constructs a new GetCellsAliasesRequest. + * Constructs a new InitShardPrimaryResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetCellsAliasesRequest); + constructor(properties?: vtctldata.IInitShardPrimaryResponse); + + /** InitShardPrimaryResponse events. */ + public events: logutil.IEvent[]; /** - * Creates a new GetCellsAliasesRequest instance using the specified properties. + * Creates a new InitShardPrimaryResponse instance using the specified properties. * @param [properties] Properties to set - * @returns GetCellsAliasesRequest instance + * @returns InitShardPrimaryResponse instance */ - public static create(properties?: vtctldata.IGetCellsAliasesRequest): vtctldata.GetCellsAliasesRequest; + public static create(properties?: vtctldata.IInitShardPrimaryResponse): vtctldata.InitShardPrimaryResponse; /** - * Encodes the specified GetCellsAliasesRequest message. Does not implicitly {@link vtctldata.GetCellsAliasesRequest.verify|verify} messages. - * @param message GetCellsAliasesRequest message or plain object to encode + * Encodes the specified InitShardPrimaryResponse message. Does not implicitly {@link vtctldata.InitShardPrimaryResponse.verify|verify} messages. + * @param message InitShardPrimaryResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetCellsAliasesRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IInitShardPrimaryResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetCellsAliasesRequest message, length delimited. Does not implicitly {@link vtctldata.GetCellsAliasesRequest.verify|verify} messages. - * @param message GetCellsAliasesRequest message or plain object to encode + * Encodes the specified InitShardPrimaryResponse message, length delimited. Does not implicitly {@link vtctldata.InitShardPrimaryResponse.verify|verify} messages. + * @param message InitShardPrimaryResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetCellsAliasesRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IInitShardPrimaryResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetCellsAliasesRequest message from the specified reader or buffer. + * Decodes an InitShardPrimaryResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetCellsAliasesRequest + * @returns InitShardPrimaryResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetCellsAliasesRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.InitShardPrimaryResponse; /** - * Decodes a GetCellsAliasesRequest message from the specified reader or buffer, length delimited. + * Decodes an InitShardPrimaryResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetCellsAliasesRequest + * @returns InitShardPrimaryResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetCellsAliasesRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.InitShardPrimaryResponse; /** - * Verifies a GetCellsAliasesRequest message. + * Verifies an InitShardPrimaryResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetCellsAliasesRequest message from a plain object. Also converts values to their respective internal types. + * Creates an InitShardPrimaryResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetCellsAliasesRequest + * @returns InitShardPrimaryResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetCellsAliasesRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.InitShardPrimaryResponse; /** - * Creates a plain object from a GetCellsAliasesRequest message. Also converts values to other types if specified. - * @param message GetCellsAliasesRequest + * Creates a plain object from an InitShardPrimaryResponse message. Also converts values to other types if specified. + * @param message InitShardPrimaryResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetCellsAliasesRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.InitShardPrimaryResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetCellsAliasesRequest to JSON. + * Converts this InitShardPrimaryResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetCellsAliasesRequest + * Gets the default type url for InitShardPrimaryResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetCellsAliasesResponse. */ - interface IGetCellsAliasesResponse { + /** Properties of a LaunchSchemaMigrationRequest. */ + interface ILaunchSchemaMigrationRequest { - /** GetCellsAliasesResponse aliases */ - aliases?: ({ [k: string]: topodata.ICellsAlias }|null); + /** LaunchSchemaMigrationRequest keyspace */ + keyspace?: (string|null); + + /** LaunchSchemaMigrationRequest uuid */ + uuid?: (string|null); } - /** Represents a GetCellsAliasesResponse. */ - class GetCellsAliasesResponse implements IGetCellsAliasesResponse { + /** Represents a LaunchSchemaMigrationRequest. */ + class LaunchSchemaMigrationRequest implements ILaunchSchemaMigrationRequest { /** - * Constructs a new GetCellsAliasesResponse. + * Constructs a new LaunchSchemaMigrationRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetCellsAliasesResponse); + constructor(properties?: vtctldata.ILaunchSchemaMigrationRequest); - /** GetCellsAliasesResponse aliases. */ - public aliases: { [k: string]: topodata.ICellsAlias }; + /** LaunchSchemaMigrationRequest keyspace. */ + public keyspace: string; + + /** LaunchSchemaMigrationRequest uuid. */ + public uuid: string; /** - * Creates a new GetCellsAliasesResponse instance using the specified properties. + * Creates a new LaunchSchemaMigrationRequest instance using the specified properties. * @param [properties] Properties to set - * @returns GetCellsAliasesResponse instance + * @returns LaunchSchemaMigrationRequest instance */ - public static create(properties?: vtctldata.IGetCellsAliasesResponse): vtctldata.GetCellsAliasesResponse; + public static create(properties?: vtctldata.ILaunchSchemaMigrationRequest): vtctldata.LaunchSchemaMigrationRequest; /** - * Encodes the specified GetCellsAliasesResponse message. Does not implicitly {@link vtctldata.GetCellsAliasesResponse.verify|verify} messages. - * @param message GetCellsAliasesResponse message or plain object to encode + * Encodes the specified LaunchSchemaMigrationRequest message. Does not implicitly {@link vtctldata.LaunchSchemaMigrationRequest.verify|verify} messages. + * @param message LaunchSchemaMigrationRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetCellsAliasesResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.ILaunchSchemaMigrationRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetCellsAliasesResponse message, length delimited. Does not implicitly {@link vtctldata.GetCellsAliasesResponse.verify|verify} messages. - * @param message GetCellsAliasesResponse message or plain object to encode + * Encodes the specified LaunchSchemaMigrationRequest message, length delimited. Does not implicitly {@link vtctldata.LaunchSchemaMigrationRequest.verify|verify} messages. + * @param message LaunchSchemaMigrationRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetCellsAliasesResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.ILaunchSchemaMigrationRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetCellsAliasesResponse message from the specified reader or buffer. + * Decodes a LaunchSchemaMigrationRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetCellsAliasesResponse + * @returns LaunchSchemaMigrationRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetCellsAliasesResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.LaunchSchemaMigrationRequest; /** - * Decodes a GetCellsAliasesResponse message from the specified reader or buffer, length delimited. + * Decodes a LaunchSchemaMigrationRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetCellsAliasesResponse + * @returns LaunchSchemaMigrationRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetCellsAliasesResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.LaunchSchemaMigrationRequest; /** - * Verifies a GetCellsAliasesResponse message. + * Verifies a LaunchSchemaMigrationRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetCellsAliasesResponse message from a plain object. Also converts values to their respective internal types. + * Creates a LaunchSchemaMigrationRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetCellsAliasesResponse + * @returns LaunchSchemaMigrationRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetCellsAliasesResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.LaunchSchemaMigrationRequest; /** - * Creates a plain object from a GetCellsAliasesResponse message. Also converts values to other types if specified. - * @param message GetCellsAliasesResponse + * Creates a plain object from a LaunchSchemaMigrationRequest message. Also converts values to other types if specified. + * @param message LaunchSchemaMigrationRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetCellsAliasesResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.LaunchSchemaMigrationRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetCellsAliasesResponse to JSON. + * Converts this LaunchSchemaMigrationRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetCellsAliasesResponse + * Gets the default type url for LaunchSchemaMigrationRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetFullStatusRequest. */ - interface IGetFullStatusRequest { + /** Properties of a LaunchSchemaMigrationResponse. */ + interface ILaunchSchemaMigrationResponse { - /** GetFullStatusRequest tablet_alias */ - tablet_alias?: (topodata.ITabletAlias|null); + /** LaunchSchemaMigrationResponse rows_affected_by_shard */ + rows_affected_by_shard?: ({ [k: string]: (number|Long) }|null); } - /** Represents a GetFullStatusRequest. */ - class GetFullStatusRequest implements IGetFullStatusRequest { + /** Represents a LaunchSchemaMigrationResponse. */ + class LaunchSchemaMigrationResponse implements ILaunchSchemaMigrationResponse { /** - * Constructs a new GetFullStatusRequest. + * Constructs a new LaunchSchemaMigrationResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetFullStatusRequest); + constructor(properties?: vtctldata.ILaunchSchemaMigrationResponse); - /** GetFullStatusRequest tablet_alias. */ - public tablet_alias?: (topodata.ITabletAlias|null); + /** LaunchSchemaMigrationResponse rows_affected_by_shard. */ + public rows_affected_by_shard: { [k: string]: (number|Long) }; /** - * Creates a new GetFullStatusRequest instance using the specified properties. + * Creates a new LaunchSchemaMigrationResponse instance using the specified properties. * @param [properties] Properties to set - * @returns GetFullStatusRequest instance + * @returns LaunchSchemaMigrationResponse instance */ - public static create(properties?: vtctldata.IGetFullStatusRequest): vtctldata.GetFullStatusRequest; + public static create(properties?: vtctldata.ILaunchSchemaMigrationResponse): vtctldata.LaunchSchemaMigrationResponse; /** - * Encodes the specified GetFullStatusRequest message. Does not implicitly {@link vtctldata.GetFullStatusRequest.verify|verify} messages. - * @param message GetFullStatusRequest message or plain object to encode + * Encodes the specified LaunchSchemaMigrationResponse message. Does not implicitly {@link vtctldata.LaunchSchemaMigrationResponse.verify|verify} messages. + * @param message LaunchSchemaMigrationResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetFullStatusRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.ILaunchSchemaMigrationResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetFullStatusRequest message, length delimited. Does not implicitly {@link vtctldata.GetFullStatusRequest.verify|verify} messages. - * @param message GetFullStatusRequest message or plain object to encode + * Encodes the specified LaunchSchemaMigrationResponse message, length delimited. Does not implicitly {@link vtctldata.LaunchSchemaMigrationResponse.verify|verify} messages. + * @param message LaunchSchemaMigrationResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetFullStatusRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.ILaunchSchemaMigrationResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetFullStatusRequest message from the specified reader or buffer. + * Decodes a LaunchSchemaMigrationResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetFullStatusRequest + * @returns LaunchSchemaMigrationResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetFullStatusRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.LaunchSchemaMigrationResponse; /** - * Decodes a GetFullStatusRequest message from the specified reader or buffer, length delimited. + * Decodes a LaunchSchemaMigrationResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetFullStatusRequest + * @returns LaunchSchemaMigrationResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetFullStatusRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.LaunchSchemaMigrationResponse; /** - * Verifies a GetFullStatusRequest message. + * Verifies a LaunchSchemaMigrationResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetFullStatusRequest message from a plain object. Also converts values to their respective internal types. + * Creates a LaunchSchemaMigrationResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetFullStatusRequest + * @returns LaunchSchemaMigrationResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetFullStatusRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.LaunchSchemaMigrationResponse; /** - * Creates a plain object from a GetFullStatusRequest message. Also converts values to other types if specified. - * @param message GetFullStatusRequest + * Creates a plain object from a LaunchSchemaMigrationResponse message. Also converts values to other types if specified. + * @param message LaunchSchemaMigrationResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetFullStatusRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.LaunchSchemaMigrationResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetFullStatusRequest to JSON. + * Converts this LaunchSchemaMigrationResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetFullStatusRequest + * Gets the default type url for LaunchSchemaMigrationResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetFullStatusResponse. */ - interface IGetFullStatusResponse { + /** Properties of a LookupVindexCreateRequest. */ + interface ILookupVindexCreateRequest { - /** GetFullStatusResponse status */ - status?: (replicationdata.IFullStatus|null); + /** LookupVindexCreateRequest keyspace */ + keyspace?: (string|null); + + /** LookupVindexCreateRequest workflow */ + workflow?: (string|null); + + /** LookupVindexCreateRequest cells */ + cells?: (string[]|null); + + /** LookupVindexCreateRequest vindex */ + vindex?: (vschema.IKeyspace|null); + + /** LookupVindexCreateRequest continue_after_copy_with_owner */ + continue_after_copy_with_owner?: (boolean|null); + + /** LookupVindexCreateRequest tablet_types */ + tablet_types?: (topodata.TabletType[]|null); + + /** LookupVindexCreateRequest tablet_selection_preference */ + tablet_selection_preference?: (tabletmanagerdata.TabletSelectionPreference|null); } - /** Represents a GetFullStatusResponse. */ - class GetFullStatusResponse implements IGetFullStatusResponse { + /** Represents a LookupVindexCreateRequest. */ + class LookupVindexCreateRequest implements ILookupVindexCreateRequest { /** - * Constructs a new GetFullStatusResponse. + * Constructs a new LookupVindexCreateRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetFullStatusResponse); + constructor(properties?: vtctldata.ILookupVindexCreateRequest); - /** GetFullStatusResponse status. */ - public status?: (replicationdata.IFullStatus|null); + /** LookupVindexCreateRequest keyspace. */ + public keyspace: string; + + /** LookupVindexCreateRequest workflow. */ + public workflow: string; + + /** LookupVindexCreateRequest cells. */ + public cells: string[]; + + /** LookupVindexCreateRequest vindex. */ + public vindex?: (vschema.IKeyspace|null); + + /** LookupVindexCreateRequest continue_after_copy_with_owner. */ + public continue_after_copy_with_owner: boolean; + + /** LookupVindexCreateRequest tablet_types. */ + public tablet_types: topodata.TabletType[]; + + /** LookupVindexCreateRequest tablet_selection_preference. */ + public tablet_selection_preference: tabletmanagerdata.TabletSelectionPreference; /** - * Creates a new GetFullStatusResponse instance using the specified properties. + * Creates a new LookupVindexCreateRequest instance using the specified properties. * @param [properties] Properties to set - * @returns GetFullStatusResponse instance + * @returns LookupVindexCreateRequest instance */ - public static create(properties?: vtctldata.IGetFullStatusResponse): vtctldata.GetFullStatusResponse; + public static create(properties?: vtctldata.ILookupVindexCreateRequest): vtctldata.LookupVindexCreateRequest; /** - * Encodes the specified GetFullStatusResponse message. Does not implicitly {@link vtctldata.GetFullStatusResponse.verify|verify} messages. - * @param message GetFullStatusResponse message or plain object to encode + * Encodes the specified LookupVindexCreateRequest message. Does not implicitly {@link vtctldata.LookupVindexCreateRequest.verify|verify} messages. + * @param message LookupVindexCreateRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetFullStatusResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.ILookupVindexCreateRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetFullStatusResponse message, length delimited. Does not implicitly {@link vtctldata.GetFullStatusResponse.verify|verify} messages. - * @param message GetFullStatusResponse message or plain object to encode + * Encodes the specified LookupVindexCreateRequest message, length delimited. Does not implicitly {@link vtctldata.LookupVindexCreateRequest.verify|verify} messages. + * @param message LookupVindexCreateRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetFullStatusResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.ILookupVindexCreateRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetFullStatusResponse message from the specified reader or buffer. + * Decodes a LookupVindexCreateRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetFullStatusResponse + * @returns LookupVindexCreateRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetFullStatusResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.LookupVindexCreateRequest; /** - * Decodes a GetFullStatusResponse message from the specified reader or buffer, length delimited. + * Decodes a LookupVindexCreateRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetFullStatusResponse + * @returns LookupVindexCreateRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetFullStatusResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.LookupVindexCreateRequest; /** - * Verifies a GetFullStatusResponse message. + * Verifies a LookupVindexCreateRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetFullStatusResponse message from a plain object. Also converts values to their respective internal types. + * Creates a LookupVindexCreateRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetFullStatusResponse + * @returns LookupVindexCreateRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetFullStatusResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.LookupVindexCreateRequest; /** - * Creates a plain object from a GetFullStatusResponse message. Also converts values to other types if specified. - * @param message GetFullStatusResponse + * Creates a plain object from a LookupVindexCreateRequest message. Also converts values to other types if specified. + * @param message LookupVindexCreateRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetFullStatusResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.LookupVindexCreateRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetFullStatusResponse to JSON. + * Converts this LookupVindexCreateRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetFullStatusResponse + * Gets the default type url for LookupVindexCreateRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetKeyspacesRequest. */ - interface IGetKeyspacesRequest { + /** Properties of a LookupVindexCreateResponse. */ + interface ILookupVindexCreateResponse { } - /** Represents a GetKeyspacesRequest. */ - class GetKeyspacesRequest implements IGetKeyspacesRequest { + /** Represents a LookupVindexCreateResponse. */ + class LookupVindexCreateResponse implements ILookupVindexCreateResponse { /** - * Constructs a new GetKeyspacesRequest. + * Constructs a new LookupVindexCreateResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetKeyspacesRequest); + constructor(properties?: vtctldata.ILookupVindexCreateResponse); /** - * Creates a new GetKeyspacesRequest instance using the specified properties. + * Creates a new LookupVindexCreateResponse instance using the specified properties. * @param [properties] Properties to set - * @returns GetKeyspacesRequest instance + * @returns LookupVindexCreateResponse instance */ - public static create(properties?: vtctldata.IGetKeyspacesRequest): vtctldata.GetKeyspacesRequest; + public static create(properties?: vtctldata.ILookupVindexCreateResponse): vtctldata.LookupVindexCreateResponse; /** - * Encodes the specified GetKeyspacesRequest message. Does not implicitly {@link vtctldata.GetKeyspacesRequest.verify|verify} messages. - * @param message GetKeyspacesRequest message or plain object to encode + * Encodes the specified LookupVindexCreateResponse message. Does not implicitly {@link vtctldata.LookupVindexCreateResponse.verify|verify} messages. + * @param message LookupVindexCreateResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetKeyspacesRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.ILookupVindexCreateResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetKeyspacesRequest message, length delimited. Does not implicitly {@link vtctldata.GetKeyspacesRequest.verify|verify} messages. - * @param message GetKeyspacesRequest message or plain object to encode + * Encodes the specified LookupVindexCreateResponse message, length delimited. Does not implicitly {@link vtctldata.LookupVindexCreateResponse.verify|verify} messages. + * @param message LookupVindexCreateResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetKeyspacesRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.ILookupVindexCreateResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetKeyspacesRequest message from the specified reader or buffer. + * Decodes a LookupVindexCreateResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetKeyspacesRequest + * @returns LookupVindexCreateResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetKeyspacesRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.LookupVindexCreateResponse; /** - * Decodes a GetKeyspacesRequest message from the specified reader or buffer, length delimited. + * Decodes a LookupVindexCreateResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetKeyspacesRequest + * @returns LookupVindexCreateResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetKeyspacesRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.LookupVindexCreateResponse; /** - * Verifies a GetKeyspacesRequest message. + * Verifies a LookupVindexCreateResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetKeyspacesRequest message from a plain object. Also converts values to their respective internal types. + * Creates a LookupVindexCreateResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetKeyspacesRequest + * @returns LookupVindexCreateResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetKeyspacesRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.LookupVindexCreateResponse; /** - * Creates a plain object from a GetKeyspacesRequest message. Also converts values to other types if specified. - * @param message GetKeyspacesRequest + * Creates a plain object from a LookupVindexCreateResponse message. Also converts values to other types if specified. + * @param message LookupVindexCreateResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetKeyspacesRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.LookupVindexCreateResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetKeyspacesRequest to JSON. + * Converts this LookupVindexCreateResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetKeyspacesRequest + * Gets the default type url for LookupVindexCreateResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetKeyspacesResponse. */ - interface IGetKeyspacesResponse { + /** Properties of a LookupVindexExternalizeRequest. */ + interface ILookupVindexExternalizeRequest { - /** GetKeyspacesResponse keyspaces */ - keyspaces?: (vtctldata.IKeyspace[]|null); + /** LookupVindexExternalizeRequest keyspace */ + keyspace?: (string|null); + + /** LookupVindexExternalizeRequest name */ + name?: (string|null); + + /** LookupVindexExternalizeRequest table_keyspace */ + table_keyspace?: (string|null); } - /** Represents a GetKeyspacesResponse. */ - class GetKeyspacesResponse implements IGetKeyspacesResponse { + /** Represents a LookupVindexExternalizeRequest. */ + class LookupVindexExternalizeRequest implements ILookupVindexExternalizeRequest { /** - * Constructs a new GetKeyspacesResponse. + * Constructs a new LookupVindexExternalizeRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetKeyspacesResponse); + constructor(properties?: vtctldata.ILookupVindexExternalizeRequest); - /** GetKeyspacesResponse keyspaces. */ - public keyspaces: vtctldata.IKeyspace[]; + /** LookupVindexExternalizeRequest keyspace. */ + public keyspace: string; + + /** LookupVindexExternalizeRequest name. */ + public name: string; + + /** LookupVindexExternalizeRequest table_keyspace. */ + public table_keyspace: string; /** - * Creates a new GetKeyspacesResponse instance using the specified properties. + * Creates a new LookupVindexExternalizeRequest instance using the specified properties. * @param [properties] Properties to set - * @returns GetKeyspacesResponse instance + * @returns LookupVindexExternalizeRequest instance */ - public static create(properties?: vtctldata.IGetKeyspacesResponse): vtctldata.GetKeyspacesResponse; + public static create(properties?: vtctldata.ILookupVindexExternalizeRequest): vtctldata.LookupVindexExternalizeRequest; /** - * Encodes the specified GetKeyspacesResponse message. Does not implicitly {@link vtctldata.GetKeyspacesResponse.verify|verify} messages. - * @param message GetKeyspacesResponse message or plain object to encode + * Encodes the specified LookupVindexExternalizeRequest message. Does not implicitly {@link vtctldata.LookupVindexExternalizeRequest.verify|verify} messages. + * @param message LookupVindexExternalizeRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetKeyspacesResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.ILookupVindexExternalizeRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetKeyspacesResponse message, length delimited. Does not implicitly {@link vtctldata.GetKeyspacesResponse.verify|verify} messages. - * @param message GetKeyspacesResponse message or plain object to encode + * Encodes the specified LookupVindexExternalizeRequest message, length delimited. Does not implicitly {@link vtctldata.LookupVindexExternalizeRequest.verify|verify} messages. + * @param message LookupVindexExternalizeRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetKeyspacesResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.ILookupVindexExternalizeRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetKeyspacesResponse message from the specified reader or buffer. + * Decodes a LookupVindexExternalizeRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetKeyspacesResponse + * @returns LookupVindexExternalizeRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetKeyspacesResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.LookupVindexExternalizeRequest; /** - * Decodes a GetKeyspacesResponse message from the specified reader or buffer, length delimited. + * Decodes a LookupVindexExternalizeRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetKeyspacesResponse + * @returns LookupVindexExternalizeRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetKeyspacesResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.LookupVindexExternalizeRequest; /** - * Verifies a GetKeyspacesResponse message. + * Verifies a LookupVindexExternalizeRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetKeyspacesResponse message from a plain object. Also converts values to their respective internal types. + * Creates a LookupVindexExternalizeRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetKeyspacesResponse + * @returns LookupVindexExternalizeRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetKeyspacesResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.LookupVindexExternalizeRequest; /** - * Creates a plain object from a GetKeyspacesResponse message. Also converts values to other types if specified. - * @param message GetKeyspacesResponse + * Creates a plain object from a LookupVindexExternalizeRequest message. Also converts values to other types if specified. + * @param message LookupVindexExternalizeRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetKeyspacesResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.LookupVindexExternalizeRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetKeyspacesResponse to JSON. + * Converts this LookupVindexExternalizeRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetKeyspacesResponse + * Gets the default type url for LookupVindexExternalizeRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetKeyspaceRequest. */ - interface IGetKeyspaceRequest { + /** Properties of a LookupVindexExternalizeResponse. */ + interface ILookupVindexExternalizeResponse { - /** GetKeyspaceRequest keyspace */ - keyspace?: (string|null); + /** LookupVindexExternalizeResponse workflow_deleted */ + workflow_deleted?: (boolean|null); } - /** Represents a GetKeyspaceRequest. */ - class GetKeyspaceRequest implements IGetKeyspaceRequest { + /** Represents a LookupVindexExternalizeResponse. */ + class LookupVindexExternalizeResponse implements ILookupVindexExternalizeResponse { /** - * Constructs a new GetKeyspaceRequest. + * Constructs a new LookupVindexExternalizeResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetKeyspaceRequest); + constructor(properties?: vtctldata.ILookupVindexExternalizeResponse); - /** GetKeyspaceRequest keyspace. */ - public keyspace: string; + /** LookupVindexExternalizeResponse workflow_deleted. */ + public workflow_deleted: boolean; /** - * Creates a new GetKeyspaceRequest instance using the specified properties. + * Creates a new LookupVindexExternalizeResponse instance using the specified properties. * @param [properties] Properties to set - * @returns GetKeyspaceRequest instance + * @returns LookupVindexExternalizeResponse instance */ - public static create(properties?: vtctldata.IGetKeyspaceRequest): vtctldata.GetKeyspaceRequest; + public static create(properties?: vtctldata.ILookupVindexExternalizeResponse): vtctldata.LookupVindexExternalizeResponse; /** - * Encodes the specified GetKeyspaceRequest message. Does not implicitly {@link vtctldata.GetKeyspaceRequest.verify|verify} messages. - * @param message GetKeyspaceRequest message or plain object to encode + * Encodes the specified LookupVindexExternalizeResponse message. Does not implicitly {@link vtctldata.LookupVindexExternalizeResponse.verify|verify} messages. + * @param message LookupVindexExternalizeResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetKeyspaceRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.ILookupVindexExternalizeResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetKeyspaceRequest message, length delimited. Does not implicitly {@link vtctldata.GetKeyspaceRequest.verify|verify} messages. - * @param message GetKeyspaceRequest message or plain object to encode + * Encodes the specified LookupVindexExternalizeResponse message, length delimited. Does not implicitly {@link vtctldata.LookupVindexExternalizeResponse.verify|verify} messages. + * @param message LookupVindexExternalizeResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetKeyspaceRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.ILookupVindexExternalizeResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetKeyspaceRequest message from the specified reader or buffer. + * Decodes a LookupVindexExternalizeResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetKeyspaceRequest + * @returns LookupVindexExternalizeResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetKeyspaceRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.LookupVindexExternalizeResponse; /** - * Decodes a GetKeyspaceRequest message from the specified reader or buffer, length delimited. + * Decodes a LookupVindexExternalizeResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetKeyspaceRequest + * @returns LookupVindexExternalizeResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetKeyspaceRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.LookupVindexExternalizeResponse; /** - * Verifies a GetKeyspaceRequest message. + * Verifies a LookupVindexExternalizeResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetKeyspaceRequest message from a plain object. Also converts values to their respective internal types. + * Creates a LookupVindexExternalizeResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetKeyspaceRequest + * @returns LookupVindexExternalizeResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetKeyspaceRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.LookupVindexExternalizeResponse; /** - * Creates a plain object from a GetKeyspaceRequest message. Also converts values to other types if specified. - * @param message GetKeyspaceRequest + * Creates a plain object from a LookupVindexExternalizeResponse message. Also converts values to other types if specified. + * @param message LookupVindexExternalizeResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetKeyspaceRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.LookupVindexExternalizeResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetKeyspaceRequest to JSON. + * Converts this LookupVindexExternalizeResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetKeyspaceRequest + * Gets the default type url for LookupVindexExternalizeResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetKeyspaceResponse. */ - interface IGetKeyspaceResponse { + /** Properties of a MaterializeCreateRequest. */ + interface IMaterializeCreateRequest { - /** GetKeyspaceResponse keyspace */ - keyspace?: (vtctldata.IKeyspace|null); + /** MaterializeCreateRequest settings */ + settings?: (vtctldata.IMaterializeSettings|null); } - /** Represents a GetKeyspaceResponse. */ - class GetKeyspaceResponse implements IGetKeyspaceResponse { + /** Represents a MaterializeCreateRequest. */ + class MaterializeCreateRequest implements IMaterializeCreateRequest { /** - * Constructs a new GetKeyspaceResponse. + * Constructs a new MaterializeCreateRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetKeyspaceResponse); + constructor(properties?: vtctldata.IMaterializeCreateRequest); - /** GetKeyspaceResponse keyspace. */ - public keyspace?: (vtctldata.IKeyspace|null); + /** MaterializeCreateRequest settings. */ + public settings?: (vtctldata.IMaterializeSettings|null); /** - * Creates a new GetKeyspaceResponse instance using the specified properties. + * Creates a new MaterializeCreateRequest instance using the specified properties. * @param [properties] Properties to set - * @returns GetKeyspaceResponse instance + * @returns MaterializeCreateRequest instance */ - public static create(properties?: vtctldata.IGetKeyspaceResponse): vtctldata.GetKeyspaceResponse; + public static create(properties?: vtctldata.IMaterializeCreateRequest): vtctldata.MaterializeCreateRequest; /** - * Encodes the specified GetKeyspaceResponse message. Does not implicitly {@link vtctldata.GetKeyspaceResponse.verify|verify} messages. - * @param message GetKeyspaceResponse message or plain object to encode + * Encodes the specified MaterializeCreateRequest message. Does not implicitly {@link vtctldata.MaterializeCreateRequest.verify|verify} messages. + * @param message MaterializeCreateRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetKeyspaceResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IMaterializeCreateRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetKeyspaceResponse message, length delimited. Does not implicitly {@link vtctldata.GetKeyspaceResponse.verify|verify} messages. - * @param message GetKeyspaceResponse message or plain object to encode + * Encodes the specified MaterializeCreateRequest message, length delimited. Does not implicitly {@link vtctldata.MaterializeCreateRequest.verify|verify} messages. + * @param message MaterializeCreateRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetKeyspaceResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IMaterializeCreateRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetKeyspaceResponse message from the specified reader or buffer. + * Decodes a MaterializeCreateRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetKeyspaceResponse + * @returns MaterializeCreateRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetKeyspaceResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.MaterializeCreateRequest; /** - * Decodes a GetKeyspaceResponse message from the specified reader or buffer, length delimited. + * Decodes a MaterializeCreateRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetKeyspaceResponse + * @returns MaterializeCreateRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetKeyspaceResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.MaterializeCreateRequest; /** - * Verifies a GetKeyspaceResponse message. + * Verifies a MaterializeCreateRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetKeyspaceResponse message from a plain object. Also converts values to their respective internal types. + * Creates a MaterializeCreateRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetKeyspaceResponse + * @returns MaterializeCreateRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetKeyspaceResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.MaterializeCreateRequest; /** - * Creates a plain object from a GetKeyspaceResponse message. Also converts values to other types if specified. - * @param message GetKeyspaceResponse + * Creates a plain object from a MaterializeCreateRequest message. Also converts values to other types if specified. + * @param message MaterializeCreateRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetKeyspaceResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.MaterializeCreateRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetKeyspaceResponse to JSON. + * Converts this MaterializeCreateRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetKeyspaceResponse + * Gets the default type url for MaterializeCreateRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetPermissionsRequest. */ - interface IGetPermissionsRequest { - - /** GetPermissionsRequest tablet_alias */ - tablet_alias?: (topodata.ITabletAlias|null); + /** Properties of a MaterializeCreateResponse. */ + interface IMaterializeCreateResponse { } - /** Represents a GetPermissionsRequest. */ - class GetPermissionsRequest implements IGetPermissionsRequest { + /** Represents a MaterializeCreateResponse. */ + class MaterializeCreateResponse implements IMaterializeCreateResponse { /** - * Constructs a new GetPermissionsRequest. + * Constructs a new MaterializeCreateResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetPermissionsRequest); - - /** GetPermissionsRequest tablet_alias. */ - public tablet_alias?: (topodata.ITabletAlias|null); + constructor(properties?: vtctldata.IMaterializeCreateResponse); /** - * Creates a new GetPermissionsRequest instance using the specified properties. + * Creates a new MaterializeCreateResponse instance using the specified properties. * @param [properties] Properties to set - * @returns GetPermissionsRequest instance + * @returns MaterializeCreateResponse instance */ - public static create(properties?: vtctldata.IGetPermissionsRequest): vtctldata.GetPermissionsRequest; + public static create(properties?: vtctldata.IMaterializeCreateResponse): vtctldata.MaterializeCreateResponse; /** - * Encodes the specified GetPermissionsRequest message. Does not implicitly {@link vtctldata.GetPermissionsRequest.verify|verify} messages. - * @param message GetPermissionsRequest message or plain object to encode + * Encodes the specified MaterializeCreateResponse message. Does not implicitly {@link vtctldata.MaterializeCreateResponse.verify|verify} messages. + * @param message MaterializeCreateResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetPermissionsRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IMaterializeCreateResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetPermissionsRequest message, length delimited. Does not implicitly {@link vtctldata.GetPermissionsRequest.verify|verify} messages. - * @param message GetPermissionsRequest message or plain object to encode + * Encodes the specified MaterializeCreateResponse message, length delimited. Does not implicitly {@link vtctldata.MaterializeCreateResponse.verify|verify} messages. + * @param message MaterializeCreateResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetPermissionsRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IMaterializeCreateResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetPermissionsRequest message from the specified reader or buffer. + * Decodes a MaterializeCreateResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetPermissionsRequest + * @returns MaterializeCreateResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetPermissionsRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.MaterializeCreateResponse; /** - * Decodes a GetPermissionsRequest message from the specified reader or buffer, length delimited. + * Decodes a MaterializeCreateResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetPermissionsRequest + * @returns MaterializeCreateResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetPermissionsRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.MaterializeCreateResponse; /** - * Verifies a GetPermissionsRequest message. + * Verifies a MaterializeCreateResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetPermissionsRequest message from a plain object. Also converts values to their respective internal types. + * Creates a MaterializeCreateResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetPermissionsRequest + * @returns MaterializeCreateResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetPermissionsRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.MaterializeCreateResponse; /** - * Creates a plain object from a GetPermissionsRequest message. Also converts values to other types if specified. - * @param message GetPermissionsRequest + * Creates a plain object from a MaterializeCreateResponse message. Also converts values to other types if specified. + * @param message MaterializeCreateResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetPermissionsRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.MaterializeCreateResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetPermissionsRequest to JSON. + * Converts this MaterializeCreateResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetPermissionsRequest + * Gets the default type url for MaterializeCreateResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetPermissionsResponse. */ - interface IGetPermissionsResponse { + /** Properties of a MigrateCreateRequest. */ + interface IMigrateCreateRequest { - /** GetPermissionsResponse permissions */ - permissions?: (tabletmanagerdata.IPermissions|null); + /** MigrateCreateRequest workflow */ + workflow?: (string|null); + + /** MigrateCreateRequest source_keyspace */ + source_keyspace?: (string|null); + + /** MigrateCreateRequest target_keyspace */ + target_keyspace?: (string|null); + + /** MigrateCreateRequest mount_name */ + mount_name?: (string|null); + + /** MigrateCreateRequest cells */ + cells?: (string[]|null); + + /** MigrateCreateRequest tablet_types */ + tablet_types?: (topodata.TabletType[]|null); + + /** MigrateCreateRequest tablet_selection_preference */ + tablet_selection_preference?: (tabletmanagerdata.TabletSelectionPreference|null); + + /** MigrateCreateRequest all_tables */ + all_tables?: (boolean|null); + + /** MigrateCreateRequest include_tables */ + include_tables?: (string[]|null); + + /** MigrateCreateRequest exclude_tables */ + exclude_tables?: (string[]|null); + + /** MigrateCreateRequest source_time_zone */ + source_time_zone?: (string|null); + + /** MigrateCreateRequest on_ddl */ + on_ddl?: (string|null); + + /** MigrateCreateRequest stop_after_copy */ + stop_after_copy?: (boolean|null); + + /** MigrateCreateRequest drop_foreign_keys */ + drop_foreign_keys?: (boolean|null); + + /** MigrateCreateRequest defer_secondary_keys */ + defer_secondary_keys?: (boolean|null); + + /** MigrateCreateRequest auto_start */ + auto_start?: (boolean|null); + + /** MigrateCreateRequest no_routing_rules */ + no_routing_rules?: (boolean|null); } - /** Represents a GetPermissionsResponse. */ - class GetPermissionsResponse implements IGetPermissionsResponse { + /** Represents a MigrateCreateRequest. */ + class MigrateCreateRequest implements IMigrateCreateRequest { /** - * Constructs a new GetPermissionsResponse. + * Constructs a new MigrateCreateRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetPermissionsResponse); + constructor(properties?: vtctldata.IMigrateCreateRequest); - /** GetPermissionsResponse permissions. */ - public permissions?: (tabletmanagerdata.IPermissions|null); + /** MigrateCreateRequest workflow. */ + public workflow: string; + + /** MigrateCreateRequest source_keyspace. */ + public source_keyspace: string; + + /** MigrateCreateRequest target_keyspace. */ + public target_keyspace: string; + + /** MigrateCreateRequest mount_name. */ + public mount_name: string; + + /** MigrateCreateRequest cells. */ + public cells: string[]; + + /** MigrateCreateRequest tablet_types. */ + public tablet_types: topodata.TabletType[]; + + /** MigrateCreateRequest tablet_selection_preference. */ + public tablet_selection_preference: tabletmanagerdata.TabletSelectionPreference; + + /** MigrateCreateRequest all_tables. */ + public all_tables: boolean; + + /** MigrateCreateRequest include_tables. */ + public include_tables: string[]; + + /** MigrateCreateRequest exclude_tables. */ + public exclude_tables: string[]; + + /** MigrateCreateRequest source_time_zone. */ + public source_time_zone: string; + + /** MigrateCreateRequest on_ddl. */ + public on_ddl: string; + + /** MigrateCreateRequest stop_after_copy. */ + public stop_after_copy: boolean; + + /** MigrateCreateRequest drop_foreign_keys. */ + public drop_foreign_keys: boolean; + + /** MigrateCreateRequest defer_secondary_keys. */ + public defer_secondary_keys: boolean; + + /** MigrateCreateRequest auto_start. */ + public auto_start: boolean; + + /** MigrateCreateRequest no_routing_rules. */ + public no_routing_rules: boolean; /** - * Creates a new GetPermissionsResponse instance using the specified properties. + * Creates a new MigrateCreateRequest instance using the specified properties. * @param [properties] Properties to set - * @returns GetPermissionsResponse instance + * @returns MigrateCreateRequest instance */ - public static create(properties?: vtctldata.IGetPermissionsResponse): vtctldata.GetPermissionsResponse; + public static create(properties?: vtctldata.IMigrateCreateRequest): vtctldata.MigrateCreateRequest; /** - * Encodes the specified GetPermissionsResponse message. Does not implicitly {@link vtctldata.GetPermissionsResponse.verify|verify} messages. - * @param message GetPermissionsResponse message or plain object to encode + * Encodes the specified MigrateCreateRequest message. Does not implicitly {@link vtctldata.MigrateCreateRequest.verify|verify} messages. + * @param message MigrateCreateRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetPermissionsResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IMigrateCreateRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetPermissionsResponse message, length delimited. Does not implicitly {@link vtctldata.GetPermissionsResponse.verify|verify} messages. - * @param message GetPermissionsResponse message or plain object to encode + * Encodes the specified MigrateCreateRequest message, length delimited. Does not implicitly {@link vtctldata.MigrateCreateRequest.verify|verify} messages. + * @param message MigrateCreateRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetPermissionsResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IMigrateCreateRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetPermissionsResponse message from the specified reader or buffer. + * Decodes a MigrateCreateRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetPermissionsResponse + * @returns MigrateCreateRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetPermissionsResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.MigrateCreateRequest; /** - * Decodes a GetPermissionsResponse message from the specified reader or buffer, length delimited. + * Decodes a MigrateCreateRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetPermissionsResponse + * @returns MigrateCreateRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetPermissionsResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.MigrateCreateRequest; /** - * Verifies a GetPermissionsResponse message. + * Verifies a MigrateCreateRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetPermissionsResponse message from a plain object. Also converts values to their respective internal types. + * Creates a MigrateCreateRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetPermissionsResponse + * @returns MigrateCreateRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetPermissionsResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.MigrateCreateRequest; /** - * Creates a plain object from a GetPermissionsResponse message. Also converts values to other types if specified. - * @param message GetPermissionsResponse + * Creates a plain object from a MigrateCreateRequest message. Also converts values to other types if specified. + * @param message MigrateCreateRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetPermissionsResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.MigrateCreateRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetPermissionsResponse to JSON. + * Converts this MigrateCreateRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetPermissionsResponse + * Gets the default type url for MigrateCreateRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetRoutingRulesRequest. */ - interface IGetRoutingRulesRequest { + /** Properties of a MigrateCompleteRequest. */ + interface IMigrateCompleteRequest { + + /** MigrateCompleteRequest workflow */ + workflow?: (string|null); + + /** MigrateCompleteRequest target_keyspace */ + target_keyspace?: (string|null); + + /** MigrateCompleteRequest keep_data */ + keep_data?: (boolean|null); + + /** MigrateCompleteRequest keep_routing_rules */ + keep_routing_rules?: (boolean|null); + + /** MigrateCompleteRequest rename_tables */ + rename_tables?: (boolean|null); + + /** MigrateCompleteRequest dry_run */ + dry_run?: (boolean|null); } - /** Represents a GetRoutingRulesRequest. */ - class GetRoutingRulesRequest implements IGetRoutingRulesRequest { + /** Represents a MigrateCompleteRequest. */ + class MigrateCompleteRequest implements IMigrateCompleteRequest { /** - * Constructs a new GetRoutingRulesRequest. + * Constructs a new MigrateCompleteRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetRoutingRulesRequest); + constructor(properties?: vtctldata.IMigrateCompleteRequest); + + /** MigrateCompleteRequest workflow. */ + public workflow: string; + + /** MigrateCompleteRequest target_keyspace. */ + public target_keyspace: string; + + /** MigrateCompleteRequest keep_data. */ + public keep_data: boolean; + + /** MigrateCompleteRequest keep_routing_rules. */ + public keep_routing_rules: boolean; + + /** MigrateCompleteRequest rename_tables. */ + public rename_tables: boolean; + + /** MigrateCompleteRequest dry_run. */ + public dry_run: boolean; /** - * Creates a new GetRoutingRulesRequest instance using the specified properties. + * Creates a new MigrateCompleteRequest instance using the specified properties. * @param [properties] Properties to set - * @returns GetRoutingRulesRequest instance + * @returns MigrateCompleteRequest instance */ - public static create(properties?: vtctldata.IGetRoutingRulesRequest): vtctldata.GetRoutingRulesRequest; + public static create(properties?: vtctldata.IMigrateCompleteRequest): vtctldata.MigrateCompleteRequest; /** - * Encodes the specified GetRoutingRulesRequest message. Does not implicitly {@link vtctldata.GetRoutingRulesRequest.verify|verify} messages. - * @param message GetRoutingRulesRequest message or plain object to encode + * Encodes the specified MigrateCompleteRequest message. Does not implicitly {@link vtctldata.MigrateCompleteRequest.verify|verify} messages. + * @param message MigrateCompleteRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetRoutingRulesRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IMigrateCompleteRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetRoutingRulesRequest message, length delimited. Does not implicitly {@link vtctldata.GetRoutingRulesRequest.verify|verify} messages. - * @param message GetRoutingRulesRequest message or plain object to encode + * Encodes the specified MigrateCompleteRequest message, length delimited. Does not implicitly {@link vtctldata.MigrateCompleteRequest.verify|verify} messages. + * @param message MigrateCompleteRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetRoutingRulesRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IMigrateCompleteRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetRoutingRulesRequest message from the specified reader or buffer. + * Decodes a MigrateCompleteRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetRoutingRulesRequest + * @returns MigrateCompleteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetRoutingRulesRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.MigrateCompleteRequest; /** - * Decodes a GetRoutingRulesRequest message from the specified reader or buffer, length delimited. + * Decodes a MigrateCompleteRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetRoutingRulesRequest + * @returns MigrateCompleteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetRoutingRulesRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.MigrateCompleteRequest; /** - * Verifies a GetRoutingRulesRequest message. + * Verifies a MigrateCompleteRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetRoutingRulesRequest message from a plain object. Also converts values to their respective internal types. + * Creates a MigrateCompleteRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetRoutingRulesRequest + * @returns MigrateCompleteRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetRoutingRulesRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.MigrateCompleteRequest; /** - * Creates a plain object from a GetRoutingRulesRequest message. Also converts values to other types if specified. - * @param message GetRoutingRulesRequest + * Creates a plain object from a MigrateCompleteRequest message. Also converts values to other types if specified. + * @param message MigrateCompleteRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetRoutingRulesRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.MigrateCompleteRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetRoutingRulesRequest to JSON. + * Converts this MigrateCompleteRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetRoutingRulesRequest + * Gets the default type url for MigrateCompleteRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetRoutingRulesResponse. */ - interface IGetRoutingRulesResponse { + /** Properties of a MigrateCompleteResponse. */ + interface IMigrateCompleteResponse { - /** GetRoutingRulesResponse routing_rules */ - routing_rules?: (vschema.IRoutingRules|null); + /** MigrateCompleteResponse summary */ + summary?: (string|null); + + /** MigrateCompleteResponse dry_run_results */ + dry_run_results?: (string[]|null); } - /** Represents a GetRoutingRulesResponse. */ - class GetRoutingRulesResponse implements IGetRoutingRulesResponse { + /** Represents a MigrateCompleteResponse. */ + class MigrateCompleteResponse implements IMigrateCompleteResponse { /** - * Constructs a new GetRoutingRulesResponse. + * Constructs a new MigrateCompleteResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetRoutingRulesResponse); + constructor(properties?: vtctldata.IMigrateCompleteResponse); - /** GetRoutingRulesResponse routing_rules. */ - public routing_rules?: (vschema.IRoutingRules|null); + /** MigrateCompleteResponse summary. */ + public summary: string; + + /** MigrateCompleteResponse dry_run_results. */ + public dry_run_results: string[]; /** - * Creates a new GetRoutingRulesResponse instance using the specified properties. + * Creates a new MigrateCompleteResponse instance using the specified properties. * @param [properties] Properties to set - * @returns GetRoutingRulesResponse instance + * @returns MigrateCompleteResponse instance */ - public static create(properties?: vtctldata.IGetRoutingRulesResponse): vtctldata.GetRoutingRulesResponse; + public static create(properties?: vtctldata.IMigrateCompleteResponse): vtctldata.MigrateCompleteResponse; /** - * Encodes the specified GetRoutingRulesResponse message. Does not implicitly {@link vtctldata.GetRoutingRulesResponse.verify|verify} messages. - * @param message GetRoutingRulesResponse message or plain object to encode + * Encodes the specified MigrateCompleteResponse message. Does not implicitly {@link vtctldata.MigrateCompleteResponse.verify|verify} messages. + * @param message MigrateCompleteResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetRoutingRulesResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IMigrateCompleteResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetRoutingRulesResponse message, length delimited. Does not implicitly {@link vtctldata.GetRoutingRulesResponse.verify|verify} messages. - * @param message GetRoutingRulesResponse message or plain object to encode + * Encodes the specified MigrateCompleteResponse message, length delimited. Does not implicitly {@link vtctldata.MigrateCompleteResponse.verify|verify} messages. + * @param message MigrateCompleteResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetRoutingRulesResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IMigrateCompleteResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetRoutingRulesResponse message from the specified reader or buffer. + * Decodes a MigrateCompleteResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetRoutingRulesResponse + * @returns MigrateCompleteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetRoutingRulesResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.MigrateCompleteResponse; /** - * Decodes a GetRoutingRulesResponse message from the specified reader or buffer, length delimited. + * Decodes a MigrateCompleteResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetRoutingRulesResponse + * @returns MigrateCompleteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetRoutingRulesResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.MigrateCompleteResponse; /** - * Verifies a GetRoutingRulesResponse message. + * Verifies a MigrateCompleteResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetRoutingRulesResponse message from a plain object. Also converts values to their respective internal types. + * Creates a MigrateCompleteResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetRoutingRulesResponse + * @returns MigrateCompleteResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetRoutingRulesResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.MigrateCompleteResponse; /** - * Creates a plain object from a GetRoutingRulesResponse message. Also converts values to other types if specified. - * @param message GetRoutingRulesResponse + * Creates a plain object from a MigrateCompleteResponse message. Also converts values to other types if specified. + * @param message MigrateCompleteResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetRoutingRulesResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.MigrateCompleteResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetRoutingRulesResponse to JSON. + * Converts this MigrateCompleteResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetRoutingRulesResponse + * Gets the default type url for MigrateCompleteResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetSchemaRequest. */ - interface IGetSchemaRequest { - - /** GetSchemaRequest tablet_alias */ - tablet_alias?: (topodata.ITabletAlias|null); - - /** GetSchemaRequest tables */ - tables?: (string[]|null); - - /** GetSchemaRequest exclude_tables */ - exclude_tables?: (string[]|null); + /** Properties of a MountRegisterRequest. */ + interface IMountRegisterRequest { - /** GetSchemaRequest include_views */ - include_views?: (boolean|null); + /** MountRegisterRequest topo_type */ + topo_type?: (string|null); - /** GetSchemaRequest table_names_only */ - table_names_only?: (boolean|null); + /** MountRegisterRequest topo_server */ + topo_server?: (string|null); - /** GetSchemaRequest table_sizes_only */ - table_sizes_only?: (boolean|null); + /** MountRegisterRequest topo_root */ + topo_root?: (string|null); - /** GetSchemaRequest table_schema_only */ - table_schema_only?: (boolean|null); + /** MountRegisterRequest name */ + name?: (string|null); } - /** Represents a GetSchemaRequest. */ - class GetSchemaRequest implements IGetSchemaRequest { + /** Represents a MountRegisterRequest. */ + class MountRegisterRequest implements IMountRegisterRequest { /** - * Constructs a new GetSchemaRequest. + * Constructs a new MountRegisterRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetSchemaRequest); - - /** GetSchemaRequest tablet_alias. */ - public tablet_alias?: (topodata.ITabletAlias|null); - - /** GetSchemaRequest tables. */ - public tables: string[]; - - /** GetSchemaRequest exclude_tables. */ - public exclude_tables: string[]; + constructor(properties?: vtctldata.IMountRegisterRequest); - /** GetSchemaRequest include_views. */ - public include_views: boolean; + /** MountRegisterRequest topo_type. */ + public topo_type: string; - /** GetSchemaRequest table_names_only. */ - public table_names_only: boolean; + /** MountRegisterRequest topo_server. */ + public topo_server: string; - /** GetSchemaRequest table_sizes_only. */ - public table_sizes_only: boolean; + /** MountRegisterRequest topo_root. */ + public topo_root: string; - /** GetSchemaRequest table_schema_only. */ - public table_schema_only: boolean; + /** MountRegisterRequest name. */ + public name: string; /** - * Creates a new GetSchemaRequest instance using the specified properties. + * Creates a new MountRegisterRequest instance using the specified properties. * @param [properties] Properties to set - * @returns GetSchemaRequest instance + * @returns MountRegisterRequest instance */ - public static create(properties?: vtctldata.IGetSchemaRequest): vtctldata.GetSchemaRequest; + public static create(properties?: vtctldata.IMountRegisterRequest): vtctldata.MountRegisterRequest; /** - * Encodes the specified GetSchemaRequest message. Does not implicitly {@link vtctldata.GetSchemaRequest.verify|verify} messages. - * @param message GetSchemaRequest message or plain object to encode + * Encodes the specified MountRegisterRequest message. Does not implicitly {@link vtctldata.MountRegisterRequest.verify|verify} messages. + * @param message MountRegisterRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetSchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IMountRegisterRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetSchemaRequest message, length delimited. Does not implicitly {@link vtctldata.GetSchemaRequest.verify|verify} messages. - * @param message GetSchemaRequest message or plain object to encode + * Encodes the specified MountRegisterRequest message, length delimited. Does not implicitly {@link vtctldata.MountRegisterRequest.verify|verify} messages. + * @param message MountRegisterRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetSchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IMountRegisterRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetSchemaRequest message from the specified reader or buffer. + * Decodes a MountRegisterRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetSchemaRequest + * @returns MountRegisterRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetSchemaRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.MountRegisterRequest; /** - * Decodes a GetSchemaRequest message from the specified reader or buffer, length delimited. + * Decodes a MountRegisterRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetSchemaRequest + * @returns MountRegisterRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetSchemaRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.MountRegisterRequest; /** - * Verifies a GetSchemaRequest message. + * Verifies a MountRegisterRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetSchemaRequest message from a plain object. Also converts values to their respective internal types. + * Creates a MountRegisterRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetSchemaRequest + * @returns MountRegisterRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetSchemaRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.MountRegisterRequest; /** - * Creates a plain object from a GetSchemaRequest message. Also converts values to other types if specified. - * @param message GetSchemaRequest + * Creates a plain object from a MountRegisterRequest message. Also converts values to other types if specified. + * @param message MountRegisterRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetSchemaRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.MountRegisterRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetSchemaRequest to JSON. + * Converts this MountRegisterRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetSchemaRequest + * Gets the default type url for MountRegisterRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetSchemaResponse. */ - interface IGetSchemaResponse { - - /** GetSchemaResponse schema */ - schema?: (tabletmanagerdata.ISchemaDefinition|null); + /** Properties of a MountRegisterResponse. */ + interface IMountRegisterResponse { } - /** Represents a GetSchemaResponse. */ - class GetSchemaResponse implements IGetSchemaResponse { + /** Represents a MountRegisterResponse. */ + class MountRegisterResponse implements IMountRegisterResponse { /** - * Constructs a new GetSchemaResponse. + * Constructs a new MountRegisterResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetSchemaResponse); - - /** GetSchemaResponse schema. */ - public schema?: (tabletmanagerdata.ISchemaDefinition|null); + constructor(properties?: vtctldata.IMountRegisterResponse); /** - * Creates a new GetSchemaResponse instance using the specified properties. + * Creates a new MountRegisterResponse instance using the specified properties. * @param [properties] Properties to set - * @returns GetSchemaResponse instance + * @returns MountRegisterResponse instance */ - public static create(properties?: vtctldata.IGetSchemaResponse): vtctldata.GetSchemaResponse; + public static create(properties?: vtctldata.IMountRegisterResponse): vtctldata.MountRegisterResponse; /** - * Encodes the specified GetSchemaResponse message. Does not implicitly {@link vtctldata.GetSchemaResponse.verify|verify} messages. - * @param message GetSchemaResponse message or plain object to encode + * Encodes the specified MountRegisterResponse message. Does not implicitly {@link vtctldata.MountRegisterResponse.verify|verify} messages. + * @param message MountRegisterResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetSchemaResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IMountRegisterResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetSchemaResponse message, length delimited. Does not implicitly {@link vtctldata.GetSchemaResponse.verify|verify} messages. - * @param message GetSchemaResponse message or plain object to encode + * Encodes the specified MountRegisterResponse message, length delimited. Does not implicitly {@link vtctldata.MountRegisterResponse.verify|verify} messages. + * @param message MountRegisterResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetSchemaResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IMountRegisterResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetSchemaResponse message from the specified reader or buffer. + * Decodes a MountRegisterResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetSchemaResponse + * @returns MountRegisterResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetSchemaResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.MountRegisterResponse; /** - * Decodes a GetSchemaResponse message from the specified reader or buffer, length delimited. + * Decodes a MountRegisterResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetSchemaResponse + * @returns MountRegisterResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetSchemaResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.MountRegisterResponse; /** - * Verifies a GetSchemaResponse message. + * Verifies a MountRegisterResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetSchemaResponse message from a plain object. Also converts values to their respective internal types. + * Creates a MountRegisterResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetSchemaResponse + * @returns MountRegisterResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetSchemaResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.MountRegisterResponse; /** - * Creates a plain object from a GetSchemaResponse message. Also converts values to other types if specified. - * @param message GetSchemaResponse + * Creates a plain object from a MountRegisterResponse message. Also converts values to other types if specified. + * @param message MountRegisterResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetSchemaResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.MountRegisterResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetSchemaResponse to JSON. + * Converts this MountRegisterResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetSchemaResponse + * Gets the default type url for MountRegisterResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetShardRequest. */ - interface IGetShardRequest { - - /** GetShardRequest keyspace */ - keyspace?: (string|null); + /** Properties of a MountUnregisterRequest. */ + interface IMountUnregisterRequest { - /** GetShardRequest shard_name */ - shard_name?: (string|null); + /** MountUnregisterRequest name */ + name?: (string|null); } - /** Represents a GetShardRequest. */ - class GetShardRequest implements IGetShardRequest { + /** Represents a MountUnregisterRequest. */ + class MountUnregisterRequest implements IMountUnregisterRequest { /** - * Constructs a new GetShardRequest. + * Constructs a new MountUnregisterRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetShardRequest); - - /** GetShardRequest keyspace. */ - public keyspace: string; + constructor(properties?: vtctldata.IMountUnregisterRequest); - /** GetShardRequest shard_name. */ - public shard_name: string; + /** MountUnregisterRequest name. */ + public name: string; /** - * Creates a new GetShardRequest instance using the specified properties. + * Creates a new MountUnregisterRequest instance using the specified properties. * @param [properties] Properties to set - * @returns GetShardRequest instance + * @returns MountUnregisterRequest instance */ - public static create(properties?: vtctldata.IGetShardRequest): vtctldata.GetShardRequest; + public static create(properties?: vtctldata.IMountUnregisterRequest): vtctldata.MountUnregisterRequest; /** - * Encodes the specified GetShardRequest message. Does not implicitly {@link vtctldata.GetShardRequest.verify|verify} messages. - * @param message GetShardRequest message or plain object to encode + * Encodes the specified MountUnregisterRequest message. Does not implicitly {@link vtctldata.MountUnregisterRequest.verify|verify} messages. + * @param message MountUnregisterRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetShardRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IMountUnregisterRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetShardRequest message, length delimited. Does not implicitly {@link vtctldata.GetShardRequest.verify|verify} messages. - * @param message GetShardRequest message or plain object to encode + * Encodes the specified MountUnregisterRequest message, length delimited. Does not implicitly {@link vtctldata.MountUnregisterRequest.verify|verify} messages. + * @param message MountUnregisterRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetShardRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IMountUnregisterRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetShardRequest message from the specified reader or buffer. + * Decodes a MountUnregisterRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetShardRequest + * @returns MountUnregisterRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetShardRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.MountUnregisterRequest; /** - * Decodes a GetShardRequest message from the specified reader or buffer, length delimited. + * Decodes a MountUnregisterRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetShardRequest + * @returns MountUnregisterRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetShardRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.MountUnregisterRequest; /** - * Verifies a GetShardRequest message. + * Verifies a MountUnregisterRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetShardRequest message from a plain object. Also converts values to their respective internal types. + * Creates a MountUnregisterRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetShardRequest + * @returns MountUnregisterRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetShardRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.MountUnregisterRequest; /** - * Creates a plain object from a GetShardRequest message. Also converts values to other types if specified. - * @param message GetShardRequest + * Creates a plain object from a MountUnregisterRequest message. Also converts values to other types if specified. + * @param message MountUnregisterRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetShardRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.MountUnregisterRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetShardRequest to JSON. + * Converts this MountUnregisterRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetShardRequest + * Gets the default type url for MountUnregisterRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetShardResponse. */ - interface IGetShardResponse { - - /** GetShardResponse shard */ - shard?: (vtctldata.IShard|null); + /** Properties of a MountUnregisterResponse. */ + interface IMountUnregisterResponse { } - /** Represents a GetShardResponse. */ - class GetShardResponse implements IGetShardResponse { + /** Represents a MountUnregisterResponse. */ + class MountUnregisterResponse implements IMountUnregisterResponse { /** - * Constructs a new GetShardResponse. + * Constructs a new MountUnregisterResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetShardResponse); - - /** GetShardResponse shard. */ - public shard?: (vtctldata.IShard|null); + constructor(properties?: vtctldata.IMountUnregisterResponse); /** - * Creates a new GetShardResponse instance using the specified properties. + * Creates a new MountUnregisterResponse instance using the specified properties. * @param [properties] Properties to set - * @returns GetShardResponse instance + * @returns MountUnregisterResponse instance */ - public static create(properties?: vtctldata.IGetShardResponse): vtctldata.GetShardResponse; + public static create(properties?: vtctldata.IMountUnregisterResponse): vtctldata.MountUnregisterResponse; /** - * Encodes the specified GetShardResponse message. Does not implicitly {@link vtctldata.GetShardResponse.verify|verify} messages. - * @param message GetShardResponse message or plain object to encode + * Encodes the specified MountUnregisterResponse message. Does not implicitly {@link vtctldata.MountUnregisterResponse.verify|verify} messages. + * @param message MountUnregisterResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetShardResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IMountUnregisterResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetShardResponse message, length delimited. Does not implicitly {@link vtctldata.GetShardResponse.verify|verify} messages. - * @param message GetShardResponse message or plain object to encode + * Encodes the specified MountUnregisterResponse message, length delimited. Does not implicitly {@link vtctldata.MountUnregisterResponse.verify|verify} messages. + * @param message MountUnregisterResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetShardResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IMountUnregisterResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetShardResponse message from the specified reader or buffer. + * Decodes a MountUnregisterResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetShardResponse + * @returns MountUnregisterResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetShardResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.MountUnregisterResponse; /** - * Decodes a GetShardResponse message from the specified reader or buffer, length delimited. + * Decodes a MountUnregisterResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetShardResponse + * @returns MountUnregisterResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetShardResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.MountUnregisterResponse; /** - * Verifies a GetShardResponse message. + * Verifies a MountUnregisterResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetShardResponse message from a plain object. Also converts values to their respective internal types. + * Creates a MountUnregisterResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetShardResponse + * @returns MountUnregisterResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetShardResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.MountUnregisterResponse; /** - * Creates a plain object from a GetShardResponse message. Also converts values to other types if specified. - * @param message GetShardResponse + * Creates a plain object from a MountUnregisterResponse message. Also converts values to other types if specified. + * @param message MountUnregisterResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetShardResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.MountUnregisterResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetShardResponse to JSON. + * Converts this MountUnregisterResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetShardResponse + * Gets the default type url for MountUnregisterResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetShardRoutingRulesRequest. */ - interface IGetShardRoutingRulesRequest { + /** Properties of a MountShowRequest. */ + interface IMountShowRequest { + + /** MountShowRequest name */ + name?: (string|null); } - /** Represents a GetShardRoutingRulesRequest. */ - class GetShardRoutingRulesRequest implements IGetShardRoutingRulesRequest { + /** Represents a MountShowRequest. */ + class MountShowRequest implements IMountShowRequest { /** - * Constructs a new GetShardRoutingRulesRequest. + * Constructs a new MountShowRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetShardRoutingRulesRequest); + constructor(properties?: vtctldata.IMountShowRequest); + + /** MountShowRequest name. */ + public name: string; /** - * Creates a new GetShardRoutingRulesRequest instance using the specified properties. + * Creates a new MountShowRequest instance using the specified properties. * @param [properties] Properties to set - * @returns GetShardRoutingRulesRequest instance + * @returns MountShowRequest instance */ - public static create(properties?: vtctldata.IGetShardRoutingRulesRequest): vtctldata.GetShardRoutingRulesRequest; + public static create(properties?: vtctldata.IMountShowRequest): vtctldata.MountShowRequest; /** - * Encodes the specified GetShardRoutingRulesRequest message. Does not implicitly {@link vtctldata.GetShardRoutingRulesRequest.verify|verify} messages. - * @param message GetShardRoutingRulesRequest message or plain object to encode + * Encodes the specified MountShowRequest message. Does not implicitly {@link vtctldata.MountShowRequest.verify|verify} messages. + * @param message MountShowRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetShardRoutingRulesRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IMountShowRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetShardRoutingRulesRequest message, length delimited. Does not implicitly {@link vtctldata.GetShardRoutingRulesRequest.verify|verify} messages. - * @param message GetShardRoutingRulesRequest message or plain object to encode + * Encodes the specified MountShowRequest message, length delimited. Does not implicitly {@link vtctldata.MountShowRequest.verify|verify} messages. + * @param message MountShowRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetShardRoutingRulesRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IMountShowRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetShardRoutingRulesRequest message from the specified reader or buffer. + * Decodes a MountShowRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetShardRoutingRulesRequest + * @returns MountShowRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetShardRoutingRulesRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.MountShowRequest; /** - * Decodes a GetShardRoutingRulesRequest message from the specified reader or buffer, length delimited. + * Decodes a MountShowRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetShardRoutingRulesRequest + * @returns MountShowRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetShardRoutingRulesRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.MountShowRequest; /** - * Verifies a GetShardRoutingRulesRequest message. + * Verifies a MountShowRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetShardRoutingRulesRequest message from a plain object. Also converts values to their respective internal types. + * Creates a MountShowRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetShardRoutingRulesRequest + * @returns MountShowRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetShardRoutingRulesRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.MountShowRequest; /** - * Creates a plain object from a GetShardRoutingRulesRequest message. Also converts values to other types if specified. - * @param message GetShardRoutingRulesRequest + * Creates a plain object from a MountShowRequest message. Also converts values to other types if specified. + * @param message MountShowRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetShardRoutingRulesRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.MountShowRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetShardRoutingRulesRequest to JSON. + * Converts this MountShowRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetShardRoutingRulesRequest + * Gets the default type url for MountShowRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetShardRoutingRulesResponse. */ - interface IGetShardRoutingRulesResponse { + /** Properties of a MountShowResponse. */ + interface IMountShowResponse { - /** GetShardRoutingRulesResponse shard_routing_rules */ - shard_routing_rules?: (vschema.IShardRoutingRules|null); + /** MountShowResponse topo_type */ + topo_type?: (string|null); + + /** MountShowResponse topo_server */ + topo_server?: (string|null); + + /** MountShowResponse topo_root */ + topo_root?: (string|null); + + /** MountShowResponse name */ + name?: (string|null); } - /** Represents a GetShardRoutingRulesResponse. */ - class GetShardRoutingRulesResponse implements IGetShardRoutingRulesResponse { + /** Represents a MountShowResponse. */ + class MountShowResponse implements IMountShowResponse { /** - * Constructs a new GetShardRoutingRulesResponse. + * Constructs a new MountShowResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetShardRoutingRulesResponse); + constructor(properties?: vtctldata.IMountShowResponse); - /** GetShardRoutingRulesResponse shard_routing_rules. */ - public shard_routing_rules?: (vschema.IShardRoutingRules|null); + /** MountShowResponse topo_type. */ + public topo_type: string; + + /** MountShowResponse topo_server. */ + public topo_server: string; + + /** MountShowResponse topo_root. */ + public topo_root: string; + + /** MountShowResponse name. */ + public name: string; /** - * Creates a new GetShardRoutingRulesResponse instance using the specified properties. + * Creates a new MountShowResponse instance using the specified properties. * @param [properties] Properties to set - * @returns GetShardRoutingRulesResponse instance + * @returns MountShowResponse instance */ - public static create(properties?: vtctldata.IGetShardRoutingRulesResponse): vtctldata.GetShardRoutingRulesResponse; + public static create(properties?: vtctldata.IMountShowResponse): vtctldata.MountShowResponse; /** - * Encodes the specified GetShardRoutingRulesResponse message. Does not implicitly {@link vtctldata.GetShardRoutingRulesResponse.verify|verify} messages. - * @param message GetShardRoutingRulesResponse message or plain object to encode + * Encodes the specified MountShowResponse message. Does not implicitly {@link vtctldata.MountShowResponse.verify|verify} messages. + * @param message MountShowResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetShardRoutingRulesResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IMountShowResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetShardRoutingRulesResponse message, length delimited. Does not implicitly {@link vtctldata.GetShardRoutingRulesResponse.verify|verify} messages. - * @param message GetShardRoutingRulesResponse message or plain object to encode + * Encodes the specified MountShowResponse message, length delimited. Does not implicitly {@link vtctldata.MountShowResponse.verify|verify} messages. + * @param message MountShowResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetShardRoutingRulesResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IMountShowResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetShardRoutingRulesResponse message from the specified reader or buffer. + * Decodes a MountShowResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetShardRoutingRulesResponse + * @returns MountShowResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetShardRoutingRulesResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.MountShowResponse; /** - * Decodes a GetShardRoutingRulesResponse message from the specified reader or buffer, length delimited. + * Decodes a MountShowResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetShardRoutingRulesResponse + * @returns MountShowResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetShardRoutingRulesResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.MountShowResponse; /** - * Verifies a GetShardRoutingRulesResponse message. + * Verifies a MountShowResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetShardRoutingRulesResponse message from a plain object. Also converts values to their respective internal types. + * Creates a MountShowResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetShardRoutingRulesResponse + * @returns MountShowResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetShardRoutingRulesResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.MountShowResponse; /** - * Creates a plain object from a GetShardRoutingRulesResponse message. Also converts values to other types if specified. - * @param message GetShardRoutingRulesResponse + * Creates a plain object from a MountShowResponse message. Also converts values to other types if specified. + * @param message MountShowResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetShardRoutingRulesResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.MountShowResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetShardRoutingRulesResponse to JSON. + * Converts this MountShowResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetShardRoutingRulesResponse + * Gets the default type url for MountShowResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetSrvKeyspaceNamesRequest. */ - interface IGetSrvKeyspaceNamesRequest { - - /** GetSrvKeyspaceNamesRequest cells */ - cells?: (string[]|null); + /** Properties of a MountListRequest. */ + interface IMountListRequest { } - /** Represents a GetSrvKeyspaceNamesRequest. */ - class GetSrvKeyspaceNamesRequest implements IGetSrvKeyspaceNamesRequest { + /** Represents a MountListRequest. */ + class MountListRequest implements IMountListRequest { /** - * Constructs a new GetSrvKeyspaceNamesRequest. + * Constructs a new MountListRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetSrvKeyspaceNamesRequest); - - /** GetSrvKeyspaceNamesRequest cells. */ - public cells: string[]; + constructor(properties?: vtctldata.IMountListRequest); /** - * Creates a new GetSrvKeyspaceNamesRequest instance using the specified properties. + * Creates a new MountListRequest instance using the specified properties. * @param [properties] Properties to set - * @returns GetSrvKeyspaceNamesRequest instance + * @returns MountListRequest instance */ - public static create(properties?: vtctldata.IGetSrvKeyspaceNamesRequest): vtctldata.GetSrvKeyspaceNamesRequest; + public static create(properties?: vtctldata.IMountListRequest): vtctldata.MountListRequest; /** - * Encodes the specified GetSrvKeyspaceNamesRequest message. Does not implicitly {@link vtctldata.GetSrvKeyspaceNamesRequest.verify|verify} messages. - * @param message GetSrvKeyspaceNamesRequest message or plain object to encode + * Encodes the specified MountListRequest message. Does not implicitly {@link vtctldata.MountListRequest.verify|verify} messages. + * @param message MountListRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetSrvKeyspaceNamesRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IMountListRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetSrvKeyspaceNamesRequest message, length delimited. Does not implicitly {@link vtctldata.GetSrvKeyspaceNamesRequest.verify|verify} messages. - * @param message GetSrvKeyspaceNamesRequest message or plain object to encode + * Encodes the specified MountListRequest message, length delimited. Does not implicitly {@link vtctldata.MountListRequest.verify|verify} messages. + * @param message MountListRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetSrvKeyspaceNamesRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IMountListRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetSrvKeyspaceNamesRequest message from the specified reader or buffer. + * Decodes a MountListRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetSrvKeyspaceNamesRequest + * @returns MountListRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetSrvKeyspaceNamesRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.MountListRequest; /** - * Decodes a GetSrvKeyspaceNamesRequest message from the specified reader or buffer, length delimited. + * Decodes a MountListRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetSrvKeyspaceNamesRequest + * @returns MountListRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetSrvKeyspaceNamesRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.MountListRequest; /** - * Verifies a GetSrvKeyspaceNamesRequest message. + * Verifies a MountListRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetSrvKeyspaceNamesRequest message from a plain object. Also converts values to their respective internal types. + * Creates a MountListRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetSrvKeyspaceNamesRequest + * @returns MountListRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetSrvKeyspaceNamesRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.MountListRequest; /** - * Creates a plain object from a GetSrvKeyspaceNamesRequest message. Also converts values to other types if specified. - * @param message GetSrvKeyspaceNamesRequest + * Creates a plain object from a MountListRequest message. Also converts values to other types if specified. + * @param message MountListRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetSrvKeyspaceNamesRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.MountListRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetSrvKeyspaceNamesRequest to JSON. + * Converts this MountListRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetSrvKeyspaceNamesRequest + * Gets the default type url for MountListRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetSrvKeyspaceNamesResponse. */ - interface IGetSrvKeyspaceNamesResponse { + /** Properties of a MountListResponse. */ + interface IMountListResponse { - /** GetSrvKeyspaceNamesResponse names */ - names?: ({ [k: string]: vtctldata.GetSrvKeyspaceNamesResponse.INameList }|null); + /** MountListResponse names */ + names?: (string[]|null); } - /** Represents a GetSrvKeyspaceNamesResponse. */ - class GetSrvKeyspaceNamesResponse implements IGetSrvKeyspaceNamesResponse { + /** Represents a MountListResponse. */ + class MountListResponse implements IMountListResponse { /** - * Constructs a new GetSrvKeyspaceNamesResponse. + * Constructs a new MountListResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetSrvKeyspaceNamesResponse); + constructor(properties?: vtctldata.IMountListResponse); - /** GetSrvKeyspaceNamesResponse names. */ - public names: { [k: string]: vtctldata.GetSrvKeyspaceNamesResponse.INameList }; + /** MountListResponse names. */ + public names: string[]; /** - * Creates a new GetSrvKeyspaceNamesResponse instance using the specified properties. + * Creates a new MountListResponse instance using the specified properties. * @param [properties] Properties to set - * @returns GetSrvKeyspaceNamesResponse instance + * @returns MountListResponse instance */ - public static create(properties?: vtctldata.IGetSrvKeyspaceNamesResponse): vtctldata.GetSrvKeyspaceNamesResponse; + public static create(properties?: vtctldata.IMountListResponse): vtctldata.MountListResponse; /** - * Encodes the specified GetSrvKeyspaceNamesResponse message. Does not implicitly {@link vtctldata.GetSrvKeyspaceNamesResponse.verify|verify} messages. - * @param message GetSrvKeyspaceNamesResponse message or plain object to encode + * Encodes the specified MountListResponse message. Does not implicitly {@link vtctldata.MountListResponse.verify|verify} messages. + * @param message MountListResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetSrvKeyspaceNamesResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IMountListResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetSrvKeyspaceNamesResponse message, length delimited. Does not implicitly {@link vtctldata.GetSrvKeyspaceNamesResponse.verify|verify} messages. - * @param message GetSrvKeyspaceNamesResponse message or plain object to encode + * Encodes the specified MountListResponse message, length delimited. Does not implicitly {@link vtctldata.MountListResponse.verify|verify} messages. + * @param message MountListResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetSrvKeyspaceNamesResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IMountListResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetSrvKeyspaceNamesResponse message from the specified reader or buffer. + * Decodes a MountListResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetSrvKeyspaceNamesResponse + * @returns MountListResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetSrvKeyspaceNamesResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.MountListResponse; /** - * Decodes a GetSrvKeyspaceNamesResponse message from the specified reader or buffer, length delimited. + * Decodes a MountListResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetSrvKeyspaceNamesResponse + * @returns MountListResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetSrvKeyspaceNamesResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.MountListResponse; /** - * Verifies a GetSrvKeyspaceNamesResponse message. + * Verifies a MountListResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetSrvKeyspaceNamesResponse message from a plain object. Also converts values to their respective internal types. + * Creates a MountListResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetSrvKeyspaceNamesResponse + * @returns MountListResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetSrvKeyspaceNamesResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.MountListResponse; /** - * Creates a plain object from a GetSrvKeyspaceNamesResponse message. Also converts values to other types if specified. - * @param message GetSrvKeyspaceNamesResponse + * Creates a plain object from a MountListResponse message. Also converts values to other types if specified. + * @param message MountListResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetSrvKeyspaceNamesResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.MountListResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetSrvKeyspaceNamesResponse to JSON. + * Converts this MountListResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetSrvKeyspaceNamesResponse + * Gets the default type url for MountListResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - namespace GetSrvKeyspaceNamesResponse { + /** Properties of a MoveTablesCreateRequest. */ + interface IMoveTablesCreateRequest { - /** Properties of a NameList. */ - interface INameList { + /** MoveTablesCreateRequest workflow */ + workflow?: (string|null); - /** NameList names */ - names?: (string[]|null); - } + /** MoveTablesCreateRequest source_keyspace */ + source_keyspace?: (string|null); - /** Represents a NameList. */ - class NameList implements INameList { + /** MoveTablesCreateRequest target_keyspace */ + target_keyspace?: (string|null); - /** - * Constructs a new NameList. - * @param [properties] Properties to set - */ - constructor(properties?: vtctldata.GetSrvKeyspaceNamesResponse.INameList); + /** MoveTablesCreateRequest cells */ + cells?: (string[]|null); - /** NameList names. */ - public names: string[]; + /** MoveTablesCreateRequest tablet_types */ + tablet_types?: (topodata.TabletType[]|null); - /** - * Creates a new NameList instance using the specified properties. - * @param [properties] Properties to set - * @returns NameList instance - */ - public static create(properties?: vtctldata.GetSrvKeyspaceNamesResponse.INameList): vtctldata.GetSrvKeyspaceNamesResponse.NameList; + /** MoveTablesCreateRequest tablet_selection_preference */ + tablet_selection_preference?: (tabletmanagerdata.TabletSelectionPreference|null); - /** - * Encodes the specified NameList message. Does not implicitly {@link vtctldata.GetSrvKeyspaceNamesResponse.NameList.verify|verify} messages. - * @param message NameList message or plain object to encode - * @param [writer] Writer to encode to - * @returns Writer - */ - public static encode(message: vtctldata.GetSrvKeyspaceNamesResponse.INameList, writer?: $protobuf.Writer): $protobuf.Writer; + /** MoveTablesCreateRequest source_shards */ + source_shards?: (string[]|null); - /** - * Encodes the specified NameList message, length delimited. Does not implicitly {@link vtctldata.GetSrvKeyspaceNamesResponse.NameList.verify|verify} messages. - * @param message NameList message or plain object to encode - * @param [writer] Writer to encode to - * @returns Writer - */ - public static encodeDelimited(message: vtctldata.GetSrvKeyspaceNamesResponse.INameList, writer?: $protobuf.Writer): $protobuf.Writer; + /** MoveTablesCreateRequest all_tables */ + all_tables?: (boolean|null); - /** - * Decodes a NameList message from the specified reader or buffer. - * @param reader Reader or buffer to decode from - * @param [length] Message length if known beforehand - * @returns NameList - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetSrvKeyspaceNamesResponse.NameList; + /** MoveTablesCreateRequest include_tables */ + include_tables?: (string[]|null); - /** - * Decodes a NameList message from the specified reader or buffer, length delimited. - * @param reader Reader or buffer to decode from - * @returns NameList - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetSrvKeyspaceNamesResponse.NameList; + /** MoveTablesCreateRequest exclude_tables */ + exclude_tables?: (string[]|null); - /** - * Verifies a NameList message. - * @param message Plain object to verify - * @returns `null` if valid, otherwise the reason why it is not - */ - public static verify(message: { [k: string]: any }): (string|null); + /** MoveTablesCreateRequest external_cluster_name */ + external_cluster_name?: (string|null); - /** - * Creates a NameList message from a plain object. Also converts values to their respective internal types. - * @param object Plain object - * @returns NameList - */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetSrvKeyspaceNamesResponse.NameList; + /** MoveTablesCreateRequest source_time_zone */ + source_time_zone?: (string|null); - /** - * Creates a plain object from a NameList message. Also converts values to other types if specified. - * @param message NameList - * @param [options] Conversion options - * @returns Plain object - */ - public static toObject(message: vtctldata.GetSrvKeyspaceNamesResponse.NameList, options?: $protobuf.IConversionOptions): { [k: string]: any }; + /** MoveTablesCreateRequest on_ddl */ + on_ddl?: (string|null); - /** - * Converts this NameList to JSON. - * @returns JSON object - */ - public toJSON(): { [k: string]: any }; + /** MoveTablesCreateRequest stop_after_copy */ + stop_after_copy?: (boolean|null); - /** - * Gets the default type url for NameList - * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns The default type url - */ - public static getTypeUrl(typeUrlPrefix?: string): string; - } + /** MoveTablesCreateRequest drop_foreign_keys */ + drop_foreign_keys?: (boolean|null); + + /** MoveTablesCreateRequest defer_secondary_keys */ + defer_secondary_keys?: (boolean|null); + + /** MoveTablesCreateRequest auto_start */ + auto_start?: (boolean|null); + + /** MoveTablesCreateRequest no_routing_rules */ + no_routing_rules?: (boolean|null); + + /** MoveTablesCreateRequest atomic_copy */ + atomic_copy?: (boolean|null); } - /** Properties of a GetSrvKeyspacesRequest. */ - interface IGetSrvKeyspacesRequest { + /** Represents a MoveTablesCreateRequest. */ + class MoveTablesCreateRequest implements IMoveTablesCreateRequest { + + /** + * Constructs a new MoveTablesCreateRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IMoveTablesCreateRequest); + + /** MoveTablesCreateRequest workflow. */ + public workflow: string; + + /** MoveTablesCreateRequest source_keyspace. */ + public source_keyspace: string; + + /** MoveTablesCreateRequest target_keyspace. */ + public target_keyspace: string; + + /** MoveTablesCreateRequest cells. */ + public cells: string[]; + + /** MoveTablesCreateRequest tablet_types. */ + public tablet_types: topodata.TabletType[]; + + /** MoveTablesCreateRequest tablet_selection_preference. */ + public tablet_selection_preference: tabletmanagerdata.TabletSelectionPreference; + + /** MoveTablesCreateRequest source_shards. */ + public source_shards: string[]; + + /** MoveTablesCreateRequest all_tables. */ + public all_tables: boolean; - /** GetSrvKeyspacesRequest keyspace */ - keyspace?: (string|null); + /** MoveTablesCreateRequest include_tables. */ + public include_tables: string[]; - /** GetSrvKeyspacesRequest cells */ - cells?: (string[]|null); - } + /** MoveTablesCreateRequest exclude_tables. */ + public exclude_tables: string[]; - /** Represents a GetSrvKeyspacesRequest. */ - class GetSrvKeyspacesRequest implements IGetSrvKeyspacesRequest { + /** MoveTablesCreateRequest external_cluster_name. */ + public external_cluster_name: string; - /** - * Constructs a new GetSrvKeyspacesRequest. - * @param [properties] Properties to set - */ - constructor(properties?: vtctldata.IGetSrvKeyspacesRequest); + /** MoveTablesCreateRequest source_time_zone. */ + public source_time_zone: string; - /** GetSrvKeyspacesRequest keyspace. */ - public keyspace: string; + /** MoveTablesCreateRequest on_ddl. */ + public on_ddl: string; - /** GetSrvKeyspacesRequest cells. */ - public cells: string[]; + /** MoveTablesCreateRequest stop_after_copy. */ + public stop_after_copy: boolean; + + /** MoveTablesCreateRequest drop_foreign_keys. */ + public drop_foreign_keys: boolean; + + /** MoveTablesCreateRequest defer_secondary_keys. */ + public defer_secondary_keys: boolean; + + /** MoveTablesCreateRequest auto_start. */ + public auto_start: boolean; + + /** MoveTablesCreateRequest no_routing_rules. */ + public no_routing_rules: boolean; + + /** MoveTablesCreateRequest atomic_copy. */ + public atomic_copy: boolean; /** - * Creates a new GetSrvKeyspacesRequest instance using the specified properties. + * Creates a new MoveTablesCreateRequest instance using the specified properties. * @param [properties] Properties to set - * @returns GetSrvKeyspacesRequest instance + * @returns MoveTablesCreateRequest instance */ - public static create(properties?: vtctldata.IGetSrvKeyspacesRequest): vtctldata.GetSrvKeyspacesRequest; + public static create(properties?: vtctldata.IMoveTablesCreateRequest): vtctldata.MoveTablesCreateRequest; /** - * Encodes the specified GetSrvKeyspacesRequest message. Does not implicitly {@link vtctldata.GetSrvKeyspacesRequest.verify|verify} messages. - * @param message GetSrvKeyspacesRequest message or plain object to encode + * Encodes the specified MoveTablesCreateRequest message. Does not implicitly {@link vtctldata.MoveTablesCreateRequest.verify|verify} messages. + * @param message MoveTablesCreateRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetSrvKeyspacesRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IMoveTablesCreateRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetSrvKeyspacesRequest message, length delimited. Does not implicitly {@link vtctldata.GetSrvKeyspacesRequest.verify|verify} messages. - * @param message GetSrvKeyspacesRequest message or plain object to encode + * Encodes the specified MoveTablesCreateRequest message, length delimited. Does not implicitly {@link vtctldata.MoveTablesCreateRequest.verify|verify} messages. + * @param message MoveTablesCreateRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetSrvKeyspacesRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IMoveTablesCreateRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetSrvKeyspacesRequest message from the specified reader or buffer. + * Decodes a MoveTablesCreateRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetSrvKeyspacesRequest + * @returns MoveTablesCreateRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetSrvKeyspacesRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.MoveTablesCreateRequest; /** - * Decodes a GetSrvKeyspacesRequest message from the specified reader or buffer, length delimited. + * Decodes a MoveTablesCreateRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetSrvKeyspacesRequest + * @returns MoveTablesCreateRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetSrvKeyspacesRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.MoveTablesCreateRequest; /** - * Verifies a GetSrvKeyspacesRequest message. + * Verifies a MoveTablesCreateRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetSrvKeyspacesRequest message from a plain object. Also converts values to their respective internal types. + * Creates a MoveTablesCreateRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetSrvKeyspacesRequest + * @returns MoveTablesCreateRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetSrvKeyspacesRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.MoveTablesCreateRequest; /** - * Creates a plain object from a GetSrvKeyspacesRequest message. Also converts values to other types if specified. - * @param message GetSrvKeyspacesRequest + * Creates a plain object from a MoveTablesCreateRequest message. Also converts values to other types if specified. + * @param message MoveTablesCreateRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetSrvKeyspacesRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.MoveTablesCreateRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetSrvKeyspacesRequest to JSON. + * Converts this MoveTablesCreateRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetSrvKeyspacesRequest + * Gets the default type url for MoveTablesCreateRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetSrvKeyspacesResponse. */ - interface IGetSrvKeyspacesResponse { + /** Properties of a MoveTablesCreateResponse. */ + interface IMoveTablesCreateResponse { - /** GetSrvKeyspacesResponse srv_keyspaces */ - srv_keyspaces?: ({ [k: string]: topodata.ISrvKeyspace }|null); + /** MoveTablesCreateResponse summary */ + summary?: (string|null); + + /** MoveTablesCreateResponse details */ + details?: (vtctldata.MoveTablesCreateResponse.ITabletInfo[]|null); } - /** Represents a GetSrvKeyspacesResponse. */ - class GetSrvKeyspacesResponse implements IGetSrvKeyspacesResponse { + /** Represents a MoveTablesCreateResponse. */ + class MoveTablesCreateResponse implements IMoveTablesCreateResponse { /** - * Constructs a new GetSrvKeyspacesResponse. + * Constructs a new MoveTablesCreateResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetSrvKeyspacesResponse); + constructor(properties?: vtctldata.IMoveTablesCreateResponse); - /** GetSrvKeyspacesResponse srv_keyspaces. */ - public srv_keyspaces: { [k: string]: topodata.ISrvKeyspace }; + /** MoveTablesCreateResponse summary. */ + public summary: string; + + /** MoveTablesCreateResponse details. */ + public details: vtctldata.MoveTablesCreateResponse.ITabletInfo[]; /** - * Creates a new GetSrvKeyspacesResponse instance using the specified properties. + * Creates a new MoveTablesCreateResponse instance using the specified properties. * @param [properties] Properties to set - * @returns GetSrvKeyspacesResponse instance + * @returns MoveTablesCreateResponse instance */ - public static create(properties?: vtctldata.IGetSrvKeyspacesResponse): vtctldata.GetSrvKeyspacesResponse; + public static create(properties?: vtctldata.IMoveTablesCreateResponse): vtctldata.MoveTablesCreateResponse; /** - * Encodes the specified GetSrvKeyspacesResponse message. Does not implicitly {@link vtctldata.GetSrvKeyspacesResponse.verify|verify} messages. - * @param message GetSrvKeyspacesResponse message or plain object to encode + * Encodes the specified MoveTablesCreateResponse message. Does not implicitly {@link vtctldata.MoveTablesCreateResponse.verify|verify} messages. + * @param message MoveTablesCreateResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetSrvKeyspacesResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IMoveTablesCreateResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetSrvKeyspacesResponse message, length delimited. Does not implicitly {@link vtctldata.GetSrvKeyspacesResponse.verify|verify} messages. - * @param message GetSrvKeyspacesResponse message or plain object to encode + * Encodes the specified MoveTablesCreateResponse message, length delimited. Does not implicitly {@link vtctldata.MoveTablesCreateResponse.verify|verify} messages. + * @param message MoveTablesCreateResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetSrvKeyspacesResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IMoveTablesCreateResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetSrvKeyspacesResponse message from the specified reader or buffer. + * Decodes a MoveTablesCreateResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetSrvKeyspacesResponse + * @returns MoveTablesCreateResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetSrvKeyspacesResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.MoveTablesCreateResponse; /** - * Decodes a GetSrvKeyspacesResponse message from the specified reader or buffer, length delimited. + * Decodes a MoveTablesCreateResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetSrvKeyspacesResponse + * @returns MoveTablesCreateResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetSrvKeyspacesResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.MoveTablesCreateResponse; /** - * Verifies a GetSrvKeyspacesResponse message. + * Verifies a MoveTablesCreateResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetSrvKeyspacesResponse message from a plain object. Also converts values to their respective internal types. + * Creates a MoveTablesCreateResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetSrvKeyspacesResponse + * @returns MoveTablesCreateResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetSrvKeyspacesResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.MoveTablesCreateResponse; /** - * Creates a plain object from a GetSrvKeyspacesResponse message. Also converts values to other types if specified. - * @param message GetSrvKeyspacesResponse + * Creates a plain object from a MoveTablesCreateResponse message. Also converts values to other types if specified. + * @param message MoveTablesCreateResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetSrvKeyspacesResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.MoveTablesCreateResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetSrvKeyspacesResponse to JSON. + * Converts this MoveTablesCreateResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetSrvKeyspacesResponse + * Gets the default type url for MoveTablesCreateResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of an UpdateThrottlerConfigRequest. */ - interface IUpdateThrottlerConfigRequest { + namespace MoveTablesCreateResponse { - /** UpdateThrottlerConfigRequest keyspace */ - keyspace?: (string|null); + /** Properties of a TabletInfo. */ + interface ITabletInfo { - /** UpdateThrottlerConfigRequest enable */ - enable?: (boolean|null); + /** TabletInfo tablet */ + tablet?: (topodata.ITabletAlias|null); - /** UpdateThrottlerConfigRequest disable */ - disable?: (boolean|null); + /** TabletInfo created */ + created?: (boolean|null); + } - /** UpdateThrottlerConfigRequest threshold */ - threshold?: (number|null); + /** Represents a TabletInfo. */ + class TabletInfo implements ITabletInfo { - /** UpdateThrottlerConfigRequest custom_query */ - custom_query?: (string|null); + /** + * Constructs a new TabletInfo. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.MoveTablesCreateResponse.ITabletInfo); - /** UpdateThrottlerConfigRequest custom_query_set */ - custom_query_set?: (boolean|null); + /** TabletInfo tablet. */ + public tablet?: (topodata.ITabletAlias|null); - /** UpdateThrottlerConfigRequest check_as_check_self */ - check_as_check_self?: (boolean|null); + /** TabletInfo created. */ + public created: boolean; - /** UpdateThrottlerConfigRequest check_as_check_shard */ - check_as_check_shard?: (boolean|null); + /** + * Creates a new TabletInfo instance using the specified properties. + * @param [properties] Properties to set + * @returns TabletInfo instance + */ + public static create(properties?: vtctldata.MoveTablesCreateResponse.ITabletInfo): vtctldata.MoveTablesCreateResponse.TabletInfo; + + /** + * Encodes the specified TabletInfo message. Does not implicitly {@link vtctldata.MoveTablesCreateResponse.TabletInfo.verify|verify} messages. + * @param message TabletInfo message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.MoveTablesCreateResponse.ITabletInfo, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified TabletInfo message, length delimited. Does not implicitly {@link vtctldata.MoveTablesCreateResponse.TabletInfo.verify|verify} messages. + * @param message TabletInfo message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.MoveTablesCreateResponse.ITabletInfo, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a TabletInfo message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns TabletInfo + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.MoveTablesCreateResponse.TabletInfo; + + /** + * Decodes a TabletInfo message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns TabletInfo + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.MoveTablesCreateResponse.TabletInfo; + + /** + * Verifies a TabletInfo message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a TabletInfo message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns TabletInfo + */ + public static fromObject(object: { [k: string]: any }): vtctldata.MoveTablesCreateResponse.TabletInfo; + + /** + * Creates a plain object from a TabletInfo message. Also converts values to other types if specified. + * @param message TabletInfo + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.MoveTablesCreateResponse.TabletInfo, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this TabletInfo to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for TabletInfo + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } } - /** Represents an UpdateThrottlerConfigRequest. */ - class UpdateThrottlerConfigRequest implements IUpdateThrottlerConfigRequest { + /** Properties of a MoveTablesCompleteRequest. */ + interface IMoveTablesCompleteRequest { + + /** MoveTablesCompleteRequest workflow */ + workflow?: (string|null); + + /** MoveTablesCompleteRequest target_keyspace */ + target_keyspace?: (string|null); + + /** MoveTablesCompleteRequest keep_data */ + keep_data?: (boolean|null); + + /** MoveTablesCompleteRequest keep_routing_rules */ + keep_routing_rules?: (boolean|null); + + /** MoveTablesCompleteRequest rename_tables */ + rename_tables?: (boolean|null); + + /** MoveTablesCompleteRequest dry_run */ + dry_run?: (boolean|null); + } + + /** Represents a MoveTablesCompleteRequest. */ + class MoveTablesCompleteRequest implements IMoveTablesCompleteRequest { /** - * Constructs a new UpdateThrottlerConfigRequest. + * Constructs a new MoveTablesCompleteRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IUpdateThrottlerConfigRequest); - - /** UpdateThrottlerConfigRequest keyspace. */ - public keyspace: string; + constructor(properties?: vtctldata.IMoveTablesCompleteRequest); - /** UpdateThrottlerConfigRequest enable. */ - public enable: boolean; - - /** UpdateThrottlerConfigRequest disable. */ - public disable: boolean; + /** MoveTablesCompleteRequest workflow. */ + public workflow: string; - /** UpdateThrottlerConfigRequest threshold. */ - public threshold: number; + /** MoveTablesCompleteRequest target_keyspace. */ + public target_keyspace: string; - /** UpdateThrottlerConfigRequest custom_query. */ - public custom_query: string; + /** MoveTablesCompleteRequest keep_data. */ + public keep_data: boolean; - /** UpdateThrottlerConfigRequest custom_query_set. */ - public custom_query_set: boolean; + /** MoveTablesCompleteRequest keep_routing_rules. */ + public keep_routing_rules: boolean; - /** UpdateThrottlerConfigRequest check_as_check_self. */ - public check_as_check_self: boolean; + /** MoveTablesCompleteRequest rename_tables. */ + public rename_tables: boolean; - /** UpdateThrottlerConfigRequest check_as_check_shard. */ - public check_as_check_shard: boolean; + /** MoveTablesCompleteRequest dry_run. */ + public dry_run: boolean; /** - * Creates a new UpdateThrottlerConfigRequest instance using the specified properties. + * Creates a new MoveTablesCompleteRequest instance using the specified properties. * @param [properties] Properties to set - * @returns UpdateThrottlerConfigRequest instance + * @returns MoveTablesCompleteRequest instance */ - public static create(properties?: vtctldata.IUpdateThrottlerConfigRequest): vtctldata.UpdateThrottlerConfigRequest; + public static create(properties?: vtctldata.IMoveTablesCompleteRequest): vtctldata.MoveTablesCompleteRequest; /** - * Encodes the specified UpdateThrottlerConfigRequest message. Does not implicitly {@link vtctldata.UpdateThrottlerConfigRequest.verify|verify} messages. - * @param message UpdateThrottlerConfigRequest message or plain object to encode + * Encodes the specified MoveTablesCompleteRequest message. Does not implicitly {@link vtctldata.MoveTablesCompleteRequest.verify|verify} messages. + * @param message MoveTablesCompleteRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IUpdateThrottlerConfigRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IMoveTablesCompleteRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified UpdateThrottlerConfigRequest message, length delimited. Does not implicitly {@link vtctldata.UpdateThrottlerConfigRequest.verify|verify} messages. - * @param message UpdateThrottlerConfigRequest message or plain object to encode + * Encodes the specified MoveTablesCompleteRequest message, length delimited. Does not implicitly {@link vtctldata.MoveTablesCompleteRequest.verify|verify} messages. + * @param message MoveTablesCompleteRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IUpdateThrottlerConfigRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IMoveTablesCompleteRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an UpdateThrottlerConfigRequest message from the specified reader or buffer. + * Decodes a MoveTablesCompleteRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns UpdateThrottlerConfigRequest + * @returns MoveTablesCompleteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.UpdateThrottlerConfigRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.MoveTablesCompleteRequest; /** - * Decodes an UpdateThrottlerConfigRequest message from the specified reader or buffer, length delimited. + * Decodes a MoveTablesCompleteRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns UpdateThrottlerConfigRequest + * @returns MoveTablesCompleteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.UpdateThrottlerConfigRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.MoveTablesCompleteRequest; /** - * Verifies an UpdateThrottlerConfigRequest message. + * Verifies a MoveTablesCompleteRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an UpdateThrottlerConfigRequest message from a plain object. Also converts values to their respective internal types. + * Creates a MoveTablesCompleteRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns UpdateThrottlerConfigRequest + * @returns MoveTablesCompleteRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.UpdateThrottlerConfigRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.MoveTablesCompleteRequest; /** - * Creates a plain object from an UpdateThrottlerConfigRequest message. Also converts values to other types if specified. - * @param message UpdateThrottlerConfigRequest + * Creates a plain object from a MoveTablesCompleteRequest message. Also converts values to other types if specified. + * @param message MoveTablesCompleteRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.UpdateThrottlerConfigRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.MoveTablesCompleteRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this UpdateThrottlerConfigRequest to JSON. + * Converts this MoveTablesCompleteRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for UpdateThrottlerConfigRequest + * Gets the default type url for MoveTablesCompleteRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of an UpdateThrottlerConfigResponse. */ - interface IUpdateThrottlerConfigResponse { + /** Properties of a MoveTablesCompleteResponse. */ + interface IMoveTablesCompleteResponse { + + /** MoveTablesCompleteResponse summary */ + summary?: (string|null); + + /** MoveTablesCompleteResponse dry_run_results */ + dry_run_results?: (string[]|null); } - /** Represents an UpdateThrottlerConfigResponse. */ - class UpdateThrottlerConfigResponse implements IUpdateThrottlerConfigResponse { + /** Represents a MoveTablesCompleteResponse. */ + class MoveTablesCompleteResponse implements IMoveTablesCompleteResponse { /** - * Constructs a new UpdateThrottlerConfigResponse. + * Constructs a new MoveTablesCompleteResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IUpdateThrottlerConfigResponse); + constructor(properties?: vtctldata.IMoveTablesCompleteResponse); + + /** MoveTablesCompleteResponse summary. */ + public summary: string; + + /** MoveTablesCompleteResponse dry_run_results. */ + public dry_run_results: string[]; /** - * Creates a new UpdateThrottlerConfigResponse instance using the specified properties. + * Creates a new MoveTablesCompleteResponse instance using the specified properties. * @param [properties] Properties to set - * @returns UpdateThrottlerConfigResponse instance + * @returns MoveTablesCompleteResponse instance */ - public static create(properties?: vtctldata.IUpdateThrottlerConfigResponse): vtctldata.UpdateThrottlerConfigResponse; + public static create(properties?: vtctldata.IMoveTablesCompleteResponse): vtctldata.MoveTablesCompleteResponse; /** - * Encodes the specified UpdateThrottlerConfigResponse message. Does not implicitly {@link vtctldata.UpdateThrottlerConfigResponse.verify|verify} messages. - * @param message UpdateThrottlerConfigResponse message or plain object to encode + * Encodes the specified MoveTablesCompleteResponse message. Does not implicitly {@link vtctldata.MoveTablesCompleteResponse.verify|verify} messages. + * @param message MoveTablesCompleteResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IUpdateThrottlerConfigResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IMoveTablesCompleteResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified UpdateThrottlerConfigResponse message, length delimited. Does not implicitly {@link vtctldata.UpdateThrottlerConfigResponse.verify|verify} messages. - * @param message UpdateThrottlerConfigResponse message or plain object to encode + * Encodes the specified MoveTablesCompleteResponse message, length delimited. Does not implicitly {@link vtctldata.MoveTablesCompleteResponse.verify|verify} messages. + * @param message MoveTablesCompleteResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IUpdateThrottlerConfigResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IMoveTablesCompleteResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an UpdateThrottlerConfigResponse message from the specified reader or buffer. + * Decodes a MoveTablesCompleteResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns UpdateThrottlerConfigResponse + * @returns MoveTablesCompleteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.UpdateThrottlerConfigResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.MoveTablesCompleteResponse; /** - * Decodes an UpdateThrottlerConfigResponse message from the specified reader or buffer, length delimited. + * Decodes a MoveTablesCompleteResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns UpdateThrottlerConfigResponse + * @returns MoveTablesCompleteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.UpdateThrottlerConfigResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.MoveTablesCompleteResponse; /** - * Verifies an UpdateThrottlerConfigResponse message. + * Verifies a MoveTablesCompleteResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an UpdateThrottlerConfigResponse message from a plain object. Also converts values to their respective internal types. + * Creates a MoveTablesCompleteResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns UpdateThrottlerConfigResponse + * @returns MoveTablesCompleteResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.UpdateThrottlerConfigResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.MoveTablesCompleteResponse; /** - * Creates a plain object from an UpdateThrottlerConfigResponse message. Also converts values to other types if specified. - * @param message UpdateThrottlerConfigResponse + * Creates a plain object from a MoveTablesCompleteResponse message. Also converts values to other types if specified. + * @param message MoveTablesCompleteResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.UpdateThrottlerConfigResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.MoveTablesCompleteResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this UpdateThrottlerConfigResponse to JSON. + * Converts this MoveTablesCompleteResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for UpdateThrottlerConfigResponse + * Gets the default type url for MoveTablesCompleteResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetSrvVSchemaRequest. */ - interface IGetSrvVSchemaRequest { + /** Properties of a PingTabletRequest. */ + interface IPingTabletRequest { - /** GetSrvVSchemaRequest cell */ - cell?: (string|null); + /** PingTabletRequest tablet_alias */ + tablet_alias?: (topodata.ITabletAlias|null); } - /** Represents a GetSrvVSchemaRequest. */ - class GetSrvVSchemaRequest implements IGetSrvVSchemaRequest { + /** Represents a PingTabletRequest. */ + class PingTabletRequest implements IPingTabletRequest { /** - * Constructs a new GetSrvVSchemaRequest. + * Constructs a new PingTabletRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetSrvVSchemaRequest); + constructor(properties?: vtctldata.IPingTabletRequest); - /** GetSrvVSchemaRequest cell. */ - public cell: string; + /** PingTabletRequest tablet_alias. */ + public tablet_alias?: (topodata.ITabletAlias|null); /** - * Creates a new GetSrvVSchemaRequest instance using the specified properties. + * Creates a new PingTabletRequest instance using the specified properties. * @param [properties] Properties to set - * @returns GetSrvVSchemaRequest instance + * @returns PingTabletRequest instance */ - public static create(properties?: vtctldata.IGetSrvVSchemaRequest): vtctldata.GetSrvVSchemaRequest; + public static create(properties?: vtctldata.IPingTabletRequest): vtctldata.PingTabletRequest; /** - * Encodes the specified GetSrvVSchemaRequest message. Does not implicitly {@link vtctldata.GetSrvVSchemaRequest.verify|verify} messages. - * @param message GetSrvVSchemaRequest message or plain object to encode + * Encodes the specified PingTabletRequest message. Does not implicitly {@link vtctldata.PingTabletRequest.verify|verify} messages. + * @param message PingTabletRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetSrvVSchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IPingTabletRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetSrvVSchemaRequest message, length delimited. Does not implicitly {@link vtctldata.GetSrvVSchemaRequest.verify|verify} messages. - * @param message GetSrvVSchemaRequest message or plain object to encode + * Encodes the specified PingTabletRequest message, length delimited. Does not implicitly {@link vtctldata.PingTabletRequest.verify|verify} messages. + * @param message PingTabletRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetSrvVSchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IPingTabletRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetSrvVSchemaRequest message from the specified reader or buffer. + * Decodes a PingTabletRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetSrvVSchemaRequest + * @returns PingTabletRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetSrvVSchemaRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.PingTabletRequest; /** - * Decodes a GetSrvVSchemaRequest message from the specified reader or buffer, length delimited. + * Decodes a PingTabletRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetSrvVSchemaRequest + * @returns PingTabletRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetSrvVSchemaRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.PingTabletRequest; /** - * Verifies a GetSrvVSchemaRequest message. + * Verifies a PingTabletRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetSrvVSchemaRequest message from a plain object. Also converts values to their respective internal types. + * Creates a PingTabletRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetSrvVSchemaRequest + * @returns PingTabletRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetSrvVSchemaRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.PingTabletRequest; /** - * Creates a plain object from a GetSrvVSchemaRequest message. Also converts values to other types if specified. - * @param message GetSrvVSchemaRequest + * Creates a plain object from a PingTabletRequest message. Also converts values to other types if specified. + * @param message PingTabletRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetSrvVSchemaRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.PingTabletRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetSrvVSchemaRequest to JSON. + * Converts this PingTabletRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetSrvVSchemaRequest + * Gets the default type url for PingTabletRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetSrvVSchemaResponse. */ - interface IGetSrvVSchemaResponse { - - /** GetSrvVSchemaResponse srv_v_schema */ - srv_v_schema?: (vschema.ISrvVSchema|null); + /** Properties of a PingTabletResponse. */ + interface IPingTabletResponse { } - /** Represents a GetSrvVSchemaResponse. */ - class GetSrvVSchemaResponse implements IGetSrvVSchemaResponse { + /** Represents a PingTabletResponse. */ + class PingTabletResponse implements IPingTabletResponse { /** - * Constructs a new GetSrvVSchemaResponse. + * Constructs a new PingTabletResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetSrvVSchemaResponse); - - /** GetSrvVSchemaResponse srv_v_schema. */ - public srv_v_schema?: (vschema.ISrvVSchema|null); + constructor(properties?: vtctldata.IPingTabletResponse); /** - * Creates a new GetSrvVSchemaResponse instance using the specified properties. + * Creates a new PingTabletResponse instance using the specified properties. * @param [properties] Properties to set - * @returns GetSrvVSchemaResponse instance + * @returns PingTabletResponse instance */ - public static create(properties?: vtctldata.IGetSrvVSchemaResponse): vtctldata.GetSrvVSchemaResponse; + public static create(properties?: vtctldata.IPingTabletResponse): vtctldata.PingTabletResponse; /** - * Encodes the specified GetSrvVSchemaResponse message. Does not implicitly {@link vtctldata.GetSrvVSchemaResponse.verify|verify} messages. - * @param message GetSrvVSchemaResponse message or plain object to encode + * Encodes the specified PingTabletResponse message. Does not implicitly {@link vtctldata.PingTabletResponse.verify|verify} messages. + * @param message PingTabletResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetSrvVSchemaResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IPingTabletResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetSrvVSchemaResponse message, length delimited. Does not implicitly {@link vtctldata.GetSrvVSchemaResponse.verify|verify} messages. - * @param message GetSrvVSchemaResponse message or plain object to encode + * Encodes the specified PingTabletResponse message, length delimited. Does not implicitly {@link vtctldata.PingTabletResponse.verify|verify} messages. + * @param message PingTabletResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetSrvVSchemaResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IPingTabletResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetSrvVSchemaResponse message from the specified reader or buffer. + * Decodes a PingTabletResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetSrvVSchemaResponse + * @returns PingTabletResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetSrvVSchemaResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.PingTabletResponse; /** - * Decodes a GetSrvVSchemaResponse message from the specified reader or buffer, length delimited. + * Decodes a PingTabletResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetSrvVSchemaResponse + * @returns PingTabletResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetSrvVSchemaResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.PingTabletResponse; /** - * Verifies a GetSrvVSchemaResponse message. + * Verifies a PingTabletResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetSrvVSchemaResponse message from a plain object. Also converts values to their respective internal types. + * Creates a PingTabletResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetSrvVSchemaResponse + * @returns PingTabletResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetSrvVSchemaResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.PingTabletResponse; /** - * Creates a plain object from a GetSrvVSchemaResponse message. Also converts values to other types if specified. - * @param message GetSrvVSchemaResponse + * Creates a plain object from a PingTabletResponse message. Also converts values to other types if specified. + * @param message PingTabletResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetSrvVSchemaResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.PingTabletResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetSrvVSchemaResponse to JSON. + * Converts this PingTabletResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetSrvVSchemaResponse + * Gets the default type url for PingTabletResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetSrvVSchemasRequest. */ - interface IGetSrvVSchemasRequest { + /** Properties of a PlannedReparentShardRequest. */ + interface IPlannedReparentShardRequest { - /** GetSrvVSchemasRequest cells */ - cells?: (string[]|null); + /** PlannedReparentShardRequest keyspace */ + keyspace?: (string|null); + + /** PlannedReparentShardRequest shard */ + shard?: (string|null); + + /** PlannedReparentShardRequest new_primary */ + new_primary?: (topodata.ITabletAlias|null); + + /** PlannedReparentShardRequest avoid_primary */ + avoid_primary?: (topodata.ITabletAlias|null); + + /** PlannedReparentShardRequest wait_replicas_timeout */ + wait_replicas_timeout?: (vttime.IDuration|null); } - /** Represents a GetSrvVSchemasRequest. */ - class GetSrvVSchemasRequest implements IGetSrvVSchemasRequest { + /** Represents a PlannedReparentShardRequest. */ + class PlannedReparentShardRequest implements IPlannedReparentShardRequest { /** - * Constructs a new GetSrvVSchemasRequest. + * Constructs a new PlannedReparentShardRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetSrvVSchemasRequest); + constructor(properties?: vtctldata.IPlannedReparentShardRequest); - /** GetSrvVSchemasRequest cells. */ - public cells: string[]; + /** PlannedReparentShardRequest keyspace. */ + public keyspace: string; + + /** PlannedReparentShardRequest shard. */ + public shard: string; + + /** PlannedReparentShardRequest new_primary. */ + public new_primary?: (topodata.ITabletAlias|null); + + /** PlannedReparentShardRequest avoid_primary. */ + public avoid_primary?: (topodata.ITabletAlias|null); + + /** PlannedReparentShardRequest wait_replicas_timeout. */ + public wait_replicas_timeout?: (vttime.IDuration|null); /** - * Creates a new GetSrvVSchemasRequest instance using the specified properties. + * Creates a new PlannedReparentShardRequest instance using the specified properties. * @param [properties] Properties to set - * @returns GetSrvVSchemasRequest instance + * @returns PlannedReparentShardRequest instance */ - public static create(properties?: vtctldata.IGetSrvVSchemasRequest): vtctldata.GetSrvVSchemasRequest; + public static create(properties?: vtctldata.IPlannedReparentShardRequest): vtctldata.PlannedReparentShardRequest; /** - * Encodes the specified GetSrvVSchemasRequest message. Does not implicitly {@link vtctldata.GetSrvVSchemasRequest.verify|verify} messages. - * @param message GetSrvVSchemasRequest message or plain object to encode + * Encodes the specified PlannedReparentShardRequest message. Does not implicitly {@link vtctldata.PlannedReparentShardRequest.verify|verify} messages. + * @param message PlannedReparentShardRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetSrvVSchemasRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IPlannedReparentShardRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetSrvVSchemasRequest message, length delimited. Does not implicitly {@link vtctldata.GetSrvVSchemasRequest.verify|verify} messages. - * @param message GetSrvVSchemasRequest message or plain object to encode + * Encodes the specified PlannedReparentShardRequest message, length delimited. Does not implicitly {@link vtctldata.PlannedReparentShardRequest.verify|verify} messages. + * @param message PlannedReparentShardRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetSrvVSchemasRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IPlannedReparentShardRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetSrvVSchemasRequest message from the specified reader or buffer. + * Decodes a PlannedReparentShardRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetSrvVSchemasRequest + * @returns PlannedReparentShardRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetSrvVSchemasRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.PlannedReparentShardRequest; /** - * Decodes a GetSrvVSchemasRequest message from the specified reader or buffer, length delimited. + * Decodes a PlannedReparentShardRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetSrvVSchemasRequest + * @returns PlannedReparentShardRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetSrvVSchemasRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.PlannedReparentShardRequest; /** - * Verifies a GetSrvVSchemasRequest message. + * Verifies a PlannedReparentShardRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetSrvVSchemasRequest message from a plain object. Also converts values to their respective internal types. + * Creates a PlannedReparentShardRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetSrvVSchemasRequest + * @returns PlannedReparentShardRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetSrvVSchemasRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.PlannedReparentShardRequest; /** - * Creates a plain object from a GetSrvVSchemasRequest message. Also converts values to other types if specified. - * @param message GetSrvVSchemasRequest + * Creates a plain object from a PlannedReparentShardRequest message. Also converts values to other types if specified. + * @param message PlannedReparentShardRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetSrvVSchemasRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.PlannedReparentShardRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetSrvVSchemasRequest to JSON. + * Converts this PlannedReparentShardRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetSrvVSchemasRequest + * Gets the default type url for PlannedReparentShardRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetSrvVSchemasResponse. */ - interface IGetSrvVSchemasResponse { + /** Properties of a PlannedReparentShardResponse. */ + interface IPlannedReparentShardResponse { - /** GetSrvVSchemasResponse srv_v_schemas */ - srv_v_schemas?: ({ [k: string]: vschema.ISrvVSchema }|null); + /** PlannedReparentShardResponse keyspace */ + keyspace?: (string|null); + + /** PlannedReparentShardResponse shard */ + shard?: (string|null); + + /** PlannedReparentShardResponse promoted_primary */ + promoted_primary?: (topodata.ITabletAlias|null); + + /** PlannedReparentShardResponse events */ + events?: (logutil.IEvent[]|null); } - /** Represents a GetSrvVSchemasResponse. */ - class GetSrvVSchemasResponse implements IGetSrvVSchemasResponse { + /** Represents a PlannedReparentShardResponse. */ + class PlannedReparentShardResponse implements IPlannedReparentShardResponse { /** - * Constructs a new GetSrvVSchemasResponse. + * Constructs a new PlannedReparentShardResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetSrvVSchemasResponse); + constructor(properties?: vtctldata.IPlannedReparentShardResponse); - /** GetSrvVSchemasResponse srv_v_schemas. */ - public srv_v_schemas: { [k: string]: vschema.ISrvVSchema }; + /** PlannedReparentShardResponse keyspace. */ + public keyspace: string; + + /** PlannedReparentShardResponse shard. */ + public shard: string; + + /** PlannedReparentShardResponse promoted_primary. */ + public promoted_primary?: (topodata.ITabletAlias|null); + + /** PlannedReparentShardResponse events. */ + public events: logutil.IEvent[]; /** - * Creates a new GetSrvVSchemasResponse instance using the specified properties. + * Creates a new PlannedReparentShardResponse instance using the specified properties. * @param [properties] Properties to set - * @returns GetSrvVSchemasResponse instance + * @returns PlannedReparentShardResponse instance */ - public static create(properties?: vtctldata.IGetSrvVSchemasResponse): vtctldata.GetSrvVSchemasResponse; + public static create(properties?: vtctldata.IPlannedReparentShardResponse): vtctldata.PlannedReparentShardResponse; /** - * Encodes the specified GetSrvVSchemasResponse message. Does not implicitly {@link vtctldata.GetSrvVSchemasResponse.verify|verify} messages. - * @param message GetSrvVSchemasResponse message or plain object to encode + * Encodes the specified PlannedReparentShardResponse message. Does not implicitly {@link vtctldata.PlannedReparentShardResponse.verify|verify} messages. + * @param message PlannedReparentShardResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetSrvVSchemasResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IPlannedReparentShardResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetSrvVSchemasResponse message, length delimited. Does not implicitly {@link vtctldata.GetSrvVSchemasResponse.verify|verify} messages. - * @param message GetSrvVSchemasResponse message or plain object to encode + * Encodes the specified PlannedReparentShardResponse message, length delimited. Does not implicitly {@link vtctldata.PlannedReparentShardResponse.verify|verify} messages. + * @param message PlannedReparentShardResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetSrvVSchemasResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IPlannedReparentShardResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetSrvVSchemasResponse message from the specified reader or buffer. + * Decodes a PlannedReparentShardResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetSrvVSchemasResponse + * @returns PlannedReparentShardResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetSrvVSchemasResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.PlannedReparentShardResponse; /** - * Decodes a GetSrvVSchemasResponse message from the specified reader or buffer, length delimited. + * Decodes a PlannedReparentShardResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetSrvVSchemasResponse + * @returns PlannedReparentShardResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetSrvVSchemasResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.PlannedReparentShardResponse; /** - * Verifies a GetSrvVSchemasResponse message. + * Verifies a PlannedReparentShardResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetSrvVSchemasResponse message from a plain object. Also converts values to their respective internal types. + * Creates a PlannedReparentShardResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetSrvVSchemasResponse + * @returns PlannedReparentShardResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetSrvVSchemasResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.PlannedReparentShardResponse; /** - * Creates a plain object from a GetSrvVSchemasResponse message. Also converts values to other types if specified. - * @param message GetSrvVSchemasResponse + * Creates a plain object from a PlannedReparentShardResponse message. Also converts values to other types if specified. + * @param message PlannedReparentShardResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetSrvVSchemasResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.PlannedReparentShardResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetSrvVSchemasResponse to JSON. + * Converts this PlannedReparentShardResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetSrvVSchemasResponse + * Gets the default type url for PlannedReparentShardResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetTabletRequest. */ - interface IGetTabletRequest { + /** Properties of a RebuildKeyspaceGraphRequest. */ + interface IRebuildKeyspaceGraphRequest { - /** GetTabletRequest tablet_alias */ - tablet_alias?: (topodata.ITabletAlias|null); + /** RebuildKeyspaceGraphRequest keyspace */ + keyspace?: (string|null); + + /** RebuildKeyspaceGraphRequest cells */ + cells?: (string[]|null); + + /** RebuildKeyspaceGraphRequest allow_partial */ + allow_partial?: (boolean|null); } - /** Represents a GetTabletRequest. */ - class GetTabletRequest implements IGetTabletRequest { + /** Represents a RebuildKeyspaceGraphRequest. */ + class RebuildKeyspaceGraphRequest implements IRebuildKeyspaceGraphRequest { /** - * Constructs a new GetTabletRequest. + * Constructs a new RebuildKeyspaceGraphRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetTabletRequest); + constructor(properties?: vtctldata.IRebuildKeyspaceGraphRequest); - /** GetTabletRequest tablet_alias. */ - public tablet_alias?: (topodata.ITabletAlias|null); + /** RebuildKeyspaceGraphRequest keyspace. */ + public keyspace: string; + + /** RebuildKeyspaceGraphRequest cells. */ + public cells: string[]; + + /** RebuildKeyspaceGraphRequest allow_partial. */ + public allow_partial: boolean; /** - * Creates a new GetTabletRequest instance using the specified properties. + * Creates a new RebuildKeyspaceGraphRequest instance using the specified properties. * @param [properties] Properties to set - * @returns GetTabletRequest instance + * @returns RebuildKeyspaceGraphRequest instance */ - public static create(properties?: vtctldata.IGetTabletRequest): vtctldata.GetTabletRequest; + public static create(properties?: vtctldata.IRebuildKeyspaceGraphRequest): vtctldata.RebuildKeyspaceGraphRequest; /** - * Encodes the specified GetTabletRequest message. Does not implicitly {@link vtctldata.GetTabletRequest.verify|verify} messages. - * @param message GetTabletRequest message or plain object to encode + * Encodes the specified RebuildKeyspaceGraphRequest message. Does not implicitly {@link vtctldata.RebuildKeyspaceGraphRequest.verify|verify} messages. + * @param message RebuildKeyspaceGraphRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetTabletRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IRebuildKeyspaceGraphRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetTabletRequest message, length delimited. Does not implicitly {@link vtctldata.GetTabletRequest.verify|verify} messages. - * @param message GetTabletRequest message or plain object to encode + * Encodes the specified RebuildKeyspaceGraphRequest message, length delimited. Does not implicitly {@link vtctldata.RebuildKeyspaceGraphRequest.verify|verify} messages. + * @param message RebuildKeyspaceGraphRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetTabletRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IRebuildKeyspaceGraphRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetTabletRequest message from the specified reader or buffer. + * Decodes a RebuildKeyspaceGraphRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetTabletRequest + * @returns RebuildKeyspaceGraphRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetTabletRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RebuildKeyspaceGraphRequest; /** - * Decodes a GetTabletRequest message from the specified reader or buffer, length delimited. + * Decodes a RebuildKeyspaceGraphRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetTabletRequest + * @returns RebuildKeyspaceGraphRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetTabletRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RebuildKeyspaceGraphRequest; /** - * Verifies a GetTabletRequest message. + * Verifies a RebuildKeyspaceGraphRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetTabletRequest message from a plain object. Also converts values to their respective internal types. + * Creates a RebuildKeyspaceGraphRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetTabletRequest + * @returns RebuildKeyspaceGraphRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetTabletRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.RebuildKeyspaceGraphRequest; /** - * Creates a plain object from a GetTabletRequest message. Also converts values to other types if specified. - * @param message GetTabletRequest + * Creates a plain object from a RebuildKeyspaceGraphRequest message. Also converts values to other types if specified. + * @param message RebuildKeyspaceGraphRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetTabletRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.RebuildKeyspaceGraphRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetTabletRequest to JSON. + * Converts this RebuildKeyspaceGraphRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetTabletRequest + * Gets the default type url for RebuildKeyspaceGraphRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetTabletResponse. */ - interface IGetTabletResponse { - - /** GetTabletResponse tablet */ - tablet?: (topodata.ITablet|null); + /** Properties of a RebuildKeyspaceGraphResponse. */ + interface IRebuildKeyspaceGraphResponse { } - /** Represents a GetTabletResponse. */ - class GetTabletResponse implements IGetTabletResponse { + /** Represents a RebuildKeyspaceGraphResponse. */ + class RebuildKeyspaceGraphResponse implements IRebuildKeyspaceGraphResponse { /** - * Constructs a new GetTabletResponse. + * Constructs a new RebuildKeyspaceGraphResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetTabletResponse); - - /** GetTabletResponse tablet. */ - public tablet?: (topodata.ITablet|null); + constructor(properties?: vtctldata.IRebuildKeyspaceGraphResponse); /** - * Creates a new GetTabletResponse instance using the specified properties. + * Creates a new RebuildKeyspaceGraphResponse instance using the specified properties. * @param [properties] Properties to set - * @returns GetTabletResponse instance + * @returns RebuildKeyspaceGraphResponse instance */ - public static create(properties?: vtctldata.IGetTabletResponse): vtctldata.GetTabletResponse; + public static create(properties?: vtctldata.IRebuildKeyspaceGraphResponse): vtctldata.RebuildKeyspaceGraphResponse; /** - * Encodes the specified GetTabletResponse message. Does not implicitly {@link vtctldata.GetTabletResponse.verify|verify} messages. - * @param message GetTabletResponse message or plain object to encode + * Encodes the specified RebuildKeyspaceGraphResponse message. Does not implicitly {@link vtctldata.RebuildKeyspaceGraphResponse.verify|verify} messages. + * @param message RebuildKeyspaceGraphResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetTabletResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IRebuildKeyspaceGraphResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetTabletResponse message, length delimited. Does not implicitly {@link vtctldata.GetTabletResponse.verify|verify} messages. - * @param message GetTabletResponse message or plain object to encode + * Encodes the specified RebuildKeyspaceGraphResponse message, length delimited. Does not implicitly {@link vtctldata.RebuildKeyspaceGraphResponse.verify|verify} messages. + * @param message RebuildKeyspaceGraphResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetTabletResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IRebuildKeyspaceGraphResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetTabletResponse message from the specified reader or buffer. + * Decodes a RebuildKeyspaceGraphResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetTabletResponse + * @returns RebuildKeyspaceGraphResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetTabletResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RebuildKeyspaceGraphResponse; /** - * Decodes a GetTabletResponse message from the specified reader or buffer, length delimited. + * Decodes a RebuildKeyspaceGraphResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetTabletResponse + * @returns RebuildKeyspaceGraphResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetTabletResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RebuildKeyspaceGraphResponse; /** - * Verifies a GetTabletResponse message. + * Verifies a RebuildKeyspaceGraphResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetTabletResponse message from a plain object. Also converts values to their respective internal types. + * Creates a RebuildKeyspaceGraphResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetTabletResponse + * @returns RebuildKeyspaceGraphResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetTabletResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.RebuildKeyspaceGraphResponse; /** - * Creates a plain object from a GetTabletResponse message. Also converts values to other types if specified. - * @param message GetTabletResponse + * Creates a plain object from a RebuildKeyspaceGraphResponse message. Also converts values to other types if specified. + * @param message RebuildKeyspaceGraphResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetTabletResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.RebuildKeyspaceGraphResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetTabletResponse to JSON. + * Converts this RebuildKeyspaceGraphResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetTabletResponse + * Gets the default type url for RebuildKeyspaceGraphResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetTabletsRequest. */ - interface IGetTabletsRequest { - - /** GetTabletsRequest keyspace */ - keyspace?: (string|null); - - /** GetTabletsRequest shard */ - shard?: (string|null); + /** Properties of a RebuildVSchemaGraphRequest. */ + interface IRebuildVSchemaGraphRequest { - /** GetTabletsRequest cells */ + /** RebuildVSchemaGraphRequest cells */ cells?: (string[]|null); - - /** GetTabletsRequest strict */ - strict?: (boolean|null); - - /** GetTabletsRequest tablet_aliases */ - tablet_aliases?: (topodata.ITabletAlias[]|null); - - /** GetTabletsRequest tablet_type */ - tablet_type?: (topodata.TabletType|null); } - /** Represents a GetTabletsRequest. */ - class GetTabletsRequest implements IGetTabletsRequest { + /** Represents a RebuildVSchemaGraphRequest. */ + class RebuildVSchemaGraphRequest implements IRebuildVSchemaGraphRequest { /** - * Constructs a new GetTabletsRequest. + * Constructs a new RebuildVSchemaGraphRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetTabletsRequest); - - /** GetTabletsRequest keyspace. */ - public keyspace: string; - - /** GetTabletsRequest shard. */ - public shard: string; + constructor(properties?: vtctldata.IRebuildVSchemaGraphRequest); - /** GetTabletsRequest cells. */ + /** RebuildVSchemaGraphRequest cells. */ public cells: string[]; - /** GetTabletsRequest strict. */ - public strict: boolean; - - /** GetTabletsRequest tablet_aliases. */ - public tablet_aliases: topodata.ITabletAlias[]; - - /** GetTabletsRequest tablet_type. */ - public tablet_type: topodata.TabletType; - /** - * Creates a new GetTabletsRequest instance using the specified properties. + * Creates a new RebuildVSchemaGraphRequest instance using the specified properties. * @param [properties] Properties to set - * @returns GetTabletsRequest instance + * @returns RebuildVSchemaGraphRequest instance */ - public static create(properties?: vtctldata.IGetTabletsRequest): vtctldata.GetTabletsRequest; + public static create(properties?: vtctldata.IRebuildVSchemaGraphRequest): vtctldata.RebuildVSchemaGraphRequest; /** - * Encodes the specified GetTabletsRequest message. Does not implicitly {@link vtctldata.GetTabletsRequest.verify|verify} messages. - * @param message GetTabletsRequest message or plain object to encode + * Encodes the specified RebuildVSchemaGraphRequest message. Does not implicitly {@link vtctldata.RebuildVSchemaGraphRequest.verify|verify} messages. + * @param message RebuildVSchemaGraphRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetTabletsRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IRebuildVSchemaGraphRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetTabletsRequest message, length delimited. Does not implicitly {@link vtctldata.GetTabletsRequest.verify|verify} messages. - * @param message GetTabletsRequest message or plain object to encode + * Encodes the specified RebuildVSchemaGraphRequest message, length delimited. Does not implicitly {@link vtctldata.RebuildVSchemaGraphRequest.verify|verify} messages. + * @param message RebuildVSchemaGraphRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetTabletsRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IRebuildVSchemaGraphRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetTabletsRequest message from the specified reader or buffer. + * Decodes a RebuildVSchemaGraphRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetTabletsRequest + * @returns RebuildVSchemaGraphRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetTabletsRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RebuildVSchemaGraphRequest; /** - * Decodes a GetTabletsRequest message from the specified reader or buffer, length delimited. + * Decodes a RebuildVSchemaGraphRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetTabletsRequest + * @returns RebuildVSchemaGraphRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetTabletsRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RebuildVSchemaGraphRequest; /** - * Verifies a GetTabletsRequest message. + * Verifies a RebuildVSchemaGraphRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetTabletsRequest message from a plain object. Also converts values to their respective internal types. + * Creates a RebuildVSchemaGraphRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetTabletsRequest + * @returns RebuildVSchemaGraphRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetTabletsRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.RebuildVSchemaGraphRequest; /** - * Creates a plain object from a GetTabletsRequest message. Also converts values to other types if specified. - * @param message GetTabletsRequest + * Creates a plain object from a RebuildVSchemaGraphRequest message. Also converts values to other types if specified. + * @param message RebuildVSchemaGraphRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetTabletsRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.RebuildVSchemaGraphRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetTabletsRequest to JSON. + * Converts this RebuildVSchemaGraphRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetTabletsRequest + * Gets the default type url for RebuildVSchemaGraphRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetTabletsResponse. */ - interface IGetTabletsResponse { - - /** GetTabletsResponse tablets */ - tablets?: (topodata.ITablet[]|null); + /** Properties of a RebuildVSchemaGraphResponse. */ + interface IRebuildVSchemaGraphResponse { } - /** Represents a GetTabletsResponse. */ - class GetTabletsResponse implements IGetTabletsResponse { + /** Represents a RebuildVSchemaGraphResponse. */ + class RebuildVSchemaGraphResponse implements IRebuildVSchemaGraphResponse { /** - * Constructs a new GetTabletsResponse. + * Constructs a new RebuildVSchemaGraphResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetTabletsResponse); - - /** GetTabletsResponse tablets. */ - public tablets: topodata.ITablet[]; + constructor(properties?: vtctldata.IRebuildVSchemaGraphResponse); /** - * Creates a new GetTabletsResponse instance using the specified properties. + * Creates a new RebuildVSchemaGraphResponse instance using the specified properties. * @param [properties] Properties to set - * @returns GetTabletsResponse instance + * @returns RebuildVSchemaGraphResponse instance */ - public static create(properties?: vtctldata.IGetTabletsResponse): vtctldata.GetTabletsResponse; + public static create(properties?: vtctldata.IRebuildVSchemaGraphResponse): vtctldata.RebuildVSchemaGraphResponse; /** - * Encodes the specified GetTabletsResponse message. Does not implicitly {@link vtctldata.GetTabletsResponse.verify|verify} messages. - * @param message GetTabletsResponse message or plain object to encode + * Encodes the specified RebuildVSchemaGraphResponse message. Does not implicitly {@link vtctldata.RebuildVSchemaGraphResponse.verify|verify} messages. + * @param message RebuildVSchemaGraphResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetTabletsResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IRebuildVSchemaGraphResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetTabletsResponse message, length delimited. Does not implicitly {@link vtctldata.GetTabletsResponse.verify|verify} messages. - * @param message GetTabletsResponse message or plain object to encode + * Encodes the specified RebuildVSchemaGraphResponse message, length delimited. Does not implicitly {@link vtctldata.RebuildVSchemaGraphResponse.verify|verify} messages. + * @param message RebuildVSchemaGraphResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetTabletsResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IRebuildVSchemaGraphResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetTabletsResponse message from the specified reader or buffer. + * Decodes a RebuildVSchemaGraphResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetTabletsResponse + * @returns RebuildVSchemaGraphResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetTabletsResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RebuildVSchemaGraphResponse; /** - * Decodes a GetTabletsResponse message from the specified reader or buffer, length delimited. + * Decodes a RebuildVSchemaGraphResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetTabletsResponse + * @returns RebuildVSchemaGraphResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetTabletsResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RebuildVSchemaGraphResponse; /** - * Verifies a GetTabletsResponse message. + * Verifies a RebuildVSchemaGraphResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetTabletsResponse message from a plain object. Also converts values to their respective internal types. + * Creates a RebuildVSchemaGraphResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetTabletsResponse + * @returns RebuildVSchemaGraphResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetTabletsResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.RebuildVSchemaGraphResponse; /** - * Creates a plain object from a GetTabletsResponse message. Also converts values to other types if specified. - * @param message GetTabletsResponse + * Creates a plain object from a RebuildVSchemaGraphResponse message. Also converts values to other types if specified. + * @param message RebuildVSchemaGraphResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetTabletsResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.RebuildVSchemaGraphResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetTabletsResponse to JSON. + * Converts this RebuildVSchemaGraphResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetTabletsResponse + * Gets the default type url for RebuildVSchemaGraphResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetTopologyPathRequest. */ - interface IGetTopologyPathRequest { + /** Properties of a RefreshStateRequest. */ + interface IRefreshStateRequest { - /** GetTopologyPathRequest path */ - path?: (string|null); + /** RefreshStateRequest tablet_alias */ + tablet_alias?: (topodata.ITabletAlias|null); } - /** Represents a GetTopologyPathRequest. */ - class GetTopologyPathRequest implements IGetTopologyPathRequest { + /** Represents a RefreshStateRequest. */ + class RefreshStateRequest implements IRefreshStateRequest { /** - * Constructs a new GetTopologyPathRequest. + * Constructs a new RefreshStateRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetTopologyPathRequest); + constructor(properties?: vtctldata.IRefreshStateRequest); - /** GetTopologyPathRequest path. */ - public path: string; + /** RefreshStateRequest tablet_alias. */ + public tablet_alias?: (topodata.ITabletAlias|null); /** - * Creates a new GetTopologyPathRequest instance using the specified properties. + * Creates a new RefreshStateRequest instance using the specified properties. * @param [properties] Properties to set - * @returns GetTopologyPathRequest instance + * @returns RefreshStateRequest instance */ - public static create(properties?: vtctldata.IGetTopologyPathRequest): vtctldata.GetTopologyPathRequest; + public static create(properties?: vtctldata.IRefreshStateRequest): vtctldata.RefreshStateRequest; /** - * Encodes the specified GetTopologyPathRequest message. Does not implicitly {@link vtctldata.GetTopologyPathRequest.verify|verify} messages. - * @param message GetTopologyPathRequest message or plain object to encode + * Encodes the specified RefreshStateRequest message. Does not implicitly {@link vtctldata.RefreshStateRequest.verify|verify} messages. + * @param message RefreshStateRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetTopologyPathRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IRefreshStateRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetTopologyPathRequest message, length delimited. Does not implicitly {@link vtctldata.GetTopologyPathRequest.verify|verify} messages. - * @param message GetTopologyPathRequest message or plain object to encode + * Encodes the specified RefreshStateRequest message, length delimited. Does not implicitly {@link vtctldata.RefreshStateRequest.verify|verify} messages. + * @param message RefreshStateRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetTopologyPathRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IRefreshStateRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetTopologyPathRequest message from the specified reader or buffer. + * Decodes a RefreshStateRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetTopologyPathRequest + * @returns RefreshStateRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetTopologyPathRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RefreshStateRequest; /** - * Decodes a GetTopologyPathRequest message from the specified reader or buffer, length delimited. + * Decodes a RefreshStateRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetTopologyPathRequest + * @returns RefreshStateRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetTopologyPathRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RefreshStateRequest; /** - * Verifies a GetTopologyPathRequest message. + * Verifies a RefreshStateRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetTopologyPathRequest message from a plain object. Also converts values to their respective internal types. + * Creates a RefreshStateRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetTopologyPathRequest + * @returns RefreshStateRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetTopologyPathRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.RefreshStateRequest; /** - * Creates a plain object from a GetTopologyPathRequest message. Also converts values to other types if specified. - * @param message GetTopologyPathRequest + * Creates a plain object from a RefreshStateRequest message. Also converts values to other types if specified. + * @param message RefreshStateRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetTopologyPathRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.RefreshStateRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetTopologyPathRequest to JSON. + * Converts this RefreshStateRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetTopologyPathRequest + * Gets the default type url for RefreshStateRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetTopologyPathResponse. */ - interface IGetTopologyPathResponse { - - /** GetTopologyPathResponse cell */ - cell?: (vtctldata.ITopologyCell|null); + /** Properties of a RefreshStateResponse. */ + interface IRefreshStateResponse { } - /** Represents a GetTopologyPathResponse. */ - class GetTopologyPathResponse implements IGetTopologyPathResponse { + /** Represents a RefreshStateResponse. */ + class RefreshStateResponse implements IRefreshStateResponse { /** - * Constructs a new GetTopologyPathResponse. + * Constructs a new RefreshStateResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetTopologyPathResponse); - - /** GetTopologyPathResponse cell. */ - public cell?: (vtctldata.ITopologyCell|null); + constructor(properties?: vtctldata.IRefreshStateResponse); /** - * Creates a new GetTopologyPathResponse instance using the specified properties. + * Creates a new RefreshStateResponse instance using the specified properties. * @param [properties] Properties to set - * @returns GetTopologyPathResponse instance + * @returns RefreshStateResponse instance */ - public static create(properties?: vtctldata.IGetTopologyPathResponse): vtctldata.GetTopologyPathResponse; + public static create(properties?: vtctldata.IRefreshStateResponse): vtctldata.RefreshStateResponse; /** - * Encodes the specified GetTopologyPathResponse message. Does not implicitly {@link vtctldata.GetTopologyPathResponse.verify|verify} messages. - * @param message GetTopologyPathResponse message or plain object to encode + * Encodes the specified RefreshStateResponse message. Does not implicitly {@link vtctldata.RefreshStateResponse.verify|verify} messages. + * @param message RefreshStateResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetTopologyPathResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IRefreshStateResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetTopologyPathResponse message, length delimited. Does not implicitly {@link vtctldata.GetTopologyPathResponse.verify|verify} messages. - * @param message GetTopologyPathResponse message or plain object to encode + * Encodes the specified RefreshStateResponse message, length delimited. Does not implicitly {@link vtctldata.RefreshStateResponse.verify|verify} messages. + * @param message RefreshStateResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetTopologyPathResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IRefreshStateResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetTopologyPathResponse message from the specified reader or buffer. + * Decodes a RefreshStateResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetTopologyPathResponse + * @returns RefreshStateResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetTopologyPathResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RefreshStateResponse; /** - * Decodes a GetTopologyPathResponse message from the specified reader or buffer, length delimited. + * Decodes a RefreshStateResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetTopologyPathResponse + * @returns RefreshStateResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetTopologyPathResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RefreshStateResponse; /** - * Verifies a GetTopologyPathResponse message. + * Verifies a RefreshStateResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetTopologyPathResponse message from a plain object. Also converts values to their respective internal types. + * Creates a RefreshStateResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetTopologyPathResponse + * @returns RefreshStateResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetTopologyPathResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.RefreshStateResponse; /** - * Creates a plain object from a GetTopologyPathResponse message. Also converts values to other types if specified. - * @param message GetTopologyPathResponse + * Creates a plain object from a RefreshStateResponse message. Also converts values to other types if specified. + * @param message RefreshStateResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetTopologyPathResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.RefreshStateResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetTopologyPathResponse to JSON. + * Converts this RefreshStateResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetTopologyPathResponse + * Gets the default type url for RefreshStateResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a TopologyCell. */ - interface ITopologyCell { - - /** TopologyCell name */ - name?: (string|null); + /** Properties of a RefreshStateByShardRequest. */ + interface IRefreshStateByShardRequest { - /** TopologyCell path */ - path?: (string|null); + /** RefreshStateByShardRequest keyspace */ + keyspace?: (string|null); - /** TopologyCell data */ - data?: (string|null); + /** RefreshStateByShardRequest shard */ + shard?: (string|null); - /** TopologyCell children */ - children?: (string[]|null); + /** RefreshStateByShardRequest cells */ + cells?: (string[]|null); } - /** Represents a TopologyCell. */ - class TopologyCell implements ITopologyCell { + /** Represents a RefreshStateByShardRequest. */ + class RefreshStateByShardRequest implements IRefreshStateByShardRequest { /** - * Constructs a new TopologyCell. + * Constructs a new RefreshStateByShardRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.ITopologyCell); - - /** TopologyCell name. */ - public name: string; + constructor(properties?: vtctldata.IRefreshStateByShardRequest); - /** TopologyCell path. */ - public path: string; + /** RefreshStateByShardRequest keyspace. */ + public keyspace: string; - /** TopologyCell data. */ - public data: string; + /** RefreshStateByShardRequest shard. */ + public shard: string; - /** TopologyCell children. */ - public children: string[]; + /** RefreshStateByShardRequest cells. */ + public cells: string[]; /** - * Creates a new TopologyCell instance using the specified properties. + * Creates a new RefreshStateByShardRequest instance using the specified properties. * @param [properties] Properties to set - * @returns TopologyCell instance + * @returns RefreshStateByShardRequest instance */ - public static create(properties?: vtctldata.ITopologyCell): vtctldata.TopologyCell; + public static create(properties?: vtctldata.IRefreshStateByShardRequest): vtctldata.RefreshStateByShardRequest; /** - * Encodes the specified TopologyCell message. Does not implicitly {@link vtctldata.TopologyCell.verify|verify} messages. - * @param message TopologyCell message or plain object to encode + * Encodes the specified RefreshStateByShardRequest message. Does not implicitly {@link vtctldata.RefreshStateByShardRequest.verify|verify} messages. + * @param message RefreshStateByShardRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.ITopologyCell, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IRefreshStateByShardRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified TopologyCell message, length delimited. Does not implicitly {@link vtctldata.TopologyCell.verify|verify} messages. - * @param message TopologyCell message or plain object to encode + * Encodes the specified RefreshStateByShardRequest message, length delimited. Does not implicitly {@link vtctldata.RefreshStateByShardRequest.verify|verify} messages. + * @param message RefreshStateByShardRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.ITopologyCell, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IRefreshStateByShardRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a TopologyCell message from the specified reader or buffer. + * Decodes a RefreshStateByShardRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns TopologyCell + * @returns RefreshStateByShardRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.TopologyCell; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RefreshStateByShardRequest; /** - * Decodes a TopologyCell message from the specified reader or buffer, length delimited. + * Decodes a RefreshStateByShardRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns TopologyCell + * @returns RefreshStateByShardRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.TopologyCell; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RefreshStateByShardRequest; /** - * Verifies a TopologyCell message. + * Verifies a RefreshStateByShardRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a TopologyCell message from a plain object. Also converts values to their respective internal types. + * Creates a RefreshStateByShardRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns TopologyCell + * @returns RefreshStateByShardRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.TopologyCell; + public static fromObject(object: { [k: string]: any }): vtctldata.RefreshStateByShardRequest; /** - * Creates a plain object from a TopologyCell message. Also converts values to other types if specified. - * @param message TopologyCell + * Creates a plain object from a RefreshStateByShardRequest message. Also converts values to other types if specified. + * @param message RefreshStateByShardRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.TopologyCell, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.RefreshStateByShardRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this TopologyCell to JSON. + * Converts this RefreshStateByShardRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for TopologyCell + * Gets the default type url for RefreshStateByShardRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetVSchemaRequest. */ - interface IGetVSchemaRequest { + /** Properties of a RefreshStateByShardResponse. */ + interface IRefreshStateByShardResponse { - /** GetVSchemaRequest keyspace */ - keyspace?: (string|null); + /** RefreshStateByShardResponse is_partial_refresh */ + is_partial_refresh?: (boolean|null); + + /** RefreshStateByShardResponse partial_refresh_details */ + partial_refresh_details?: (string|null); } - /** Represents a GetVSchemaRequest. */ - class GetVSchemaRequest implements IGetVSchemaRequest { + /** Represents a RefreshStateByShardResponse. */ + class RefreshStateByShardResponse implements IRefreshStateByShardResponse { /** - * Constructs a new GetVSchemaRequest. + * Constructs a new RefreshStateByShardResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetVSchemaRequest); + constructor(properties?: vtctldata.IRefreshStateByShardResponse); - /** GetVSchemaRequest keyspace. */ - public keyspace: string; + /** RefreshStateByShardResponse is_partial_refresh. */ + public is_partial_refresh: boolean; + + /** RefreshStateByShardResponse partial_refresh_details. */ + public partial_refresh_details: string; /** - * Creates a new GetVSchemaRequest instance using the specified properties. + * Creates a new RefreshStateByShardResponse instance using the specified properties. * @param [properties] Properties to set - * @returns GetVSchemaRequest instance + * @returns RefreshStateByShardResponse instance */ - public static create(properties?: vtctldata.IGetVSchemaRequest): vtctldata.GetVSchemaRequest; + public static create(properties?: vtctldata.IRefreshStateByShardResponse): vtctldata.RefreshStateByShardResponse; /** - * Encodes the specified GetVSchemaRequest message. Does not implicitly {@link vtctldata.GetVSchemaRequest.verify|verify} messages. - * @param message GetVSchemaRequest message or plain object to encode + * Encodes the specified RefreshStateByShardResponse message. Does not implicitly {@link vtctldata.RefreshStateByShardResponse.verify|verify} messages. + * @param message RefreshStateByShardResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetVSchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IRefreshStateByShardResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetVSchemaRequest message, length delimited. Does not implicitly {@link vtctldata.GetVSchemaRequest.verify|verify} messages. - * @param message GetVSchemaRequest message or plain object to encode + * Encodes the specified RefreshStateByShardResponse message, length delimited. Does not implicitly {@link vtctldata.RefreshStateByShardResponse.verify|verify} messages. + * @param message RefreshStateByShardResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetVSchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IRefreshStateByShardResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetVSchemaRequest message from the specified reader or buffer. + * Decodes a RefreshStateByShardResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetVSchemaRequest + * @returns RefreshStateByShardResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetVSchemaRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RefreshStateByShardResponse; /** - * Decodes a GetVSchemaRequest message from the specified reader or buffer, length delimited. + * Decodes a RefreshStateByShardResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetVSchemaRequest + * @returns RefreshStateByShardResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetVSchemaRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RefreshStateByShardResponse; /** - * Verifies a GetVSchemaRequest message. + * Verifies a RefreshStateByShardResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetVSchemaRequest message from a plain object. Also converts values to their respective internal types. + * Creates a RefreshStateByShardResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetVSchemaRequest + * @returns RefreshStateByShardResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetVSchemaRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.RefreshStateByShardResponse; /** - * Creates a plain object from a GetVSchemaRequest message. Also converts values to other types if specified. - * @param message GetVSchemaRequest + * Creates a plain object from a RefreshStateByShardResponse message. Also converts values to other types if specified. + * @param message RefreshStateByShardResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetVSchemaRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.RefreshStateByShardResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetVSchemaRequest to JSON. + * Converts this RefreshStateByShardResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetVSchemaRequest + * Gets the default type url for RefreshStateByShardResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetVersionRequest. */ - interface IGetVersionRequest { + /** Properties of a ReloadSchemaRequest. */ + interface IReloadSchemaRequest { - /** GetVersionRequest tablet_alias */ + /** ReloadSchemaRequest tablet_alias */ tablet_alias?: (topodata.ITabletAlias|null); } - /** Represents a GetVersionRequest. */ - class GetVersionRequest implements IGetVersionRequest { + /** Represents a ReloadSchemaRequest. */ + class ReloadSchemaRequest implements IReloadSchemaRequest { /** - * Constructs a new GetVersionRequest. + * Constructs a new ReloadSchemaRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetVersionRequest); + constructor(properties?: vtctldata.IReloadSchemaRequest); - /** GetVersionRequest tablet_alias. */ + /** ReloadSchemaRequest tablet_alias. */ public tablet_alias?: (topodata.ITabletAlias|null); /** - * Creates a new GetVersionRequest instance using the specified properties. + * Creates a new ReloadSchemaRequest instance using the specified properties. * @param [properties] Properties to set - * @returns GetVersionRequest instance + * @returns ReloadSchemaRequest instance */ - public static create(properties?: vtctldata.IGetVersionRequest): vtctldata.GetVersionRequest; + public static create(properties?: vtctldata.IReloadSchemaRequest): vtctldata.ReloadSchemaRequest; /** - * Encodes the specified GetVersionRequest message. Does not implicitly {@link vtctldata.GetVersionRequest.verify|verify} messages. - * @param message GetVersionRequest message or plain object to encode + * Encodes the specified ReloadSchemaRequest message. Does not implicitly {@link vtctldata.ReloadSchemaRequest.verify|verify} messages. + * @param message ReloadSchemaRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetVersionRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IReloadSchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetVersionRequest message, length delimited. Does not implicitly {@link vtctldata.GetVersionRequest.verify|verify} messages. - * @param message GetVersionRequest message or plain object to encode + * Encodes the specified ReloadSchemaRequest message, length delimited. Does not implicitly {@link vtctldata.ReloadSchemaRequest.verify|verify} messages. + * @param message ReloadSchemaRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetVersionRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IReloadSchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetVersionRequest message from the specified reader or buffer. + * Decodes a ReloadSchemaRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetVersionRequest + * @returns ReloadSchemaRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetVersionRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ReloadSchemaRequest; /** - * Decodes a GetVersionRequest message from the specified reader or buffer, length delimited. + * Decodes a ReloadSchemaRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetVersionRequest + * @returns ReloadSchemaRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetVersionRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ReloadSchemaRequest; /** - * Verifies a GetVersionRequest message. + * Verifies a ReloadSchemaRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetVersionRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ReloadSchemaRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetVersionRequest + * @returns ReloadSchemaRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetVersionRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.ReloadSchemaRequest; /** - * Creates a plain object from a GetVersionRequest message. Also converts values to other types if specified. - * @param message GetVersionRequest + * Creates a plain object from a ReloadSchemaRequest message. Also converts values to other types if specified. + * @param message ReloadSchemaRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetVersionRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.ReloadSchemaRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetVersionRequest to JSON. + * Converts this ReloadSchemaRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetVersionRequest + * Gets the default type url for ReloadSchemaRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetVersionResponse. */ - interface IGetVersionResponse { - - /** GetVersionResponse version */ - version?: (string|null); + /** Properties of a ReloadSchemaResponse. */ + interface IReloadSchemaResponse { } - /** Represents a GetVersionResponse. */ - class GetVersionResponse implements IGetVersionResponse { + /** Represents a ReloadSchemaResponse. */ + class ReloadSchemaResponse implements IReloadSchemaResponse { /** - * Constructs a new GetVersionResponse. + * Constructs a new ReloadSchemaResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetVersionResponse); - - /** GetVersionResponse version. */ - public version: string; + constructor(properties?: vtctldata.IReloadSchemaResponse); /** - * Creates a new GetVersionResponse instance using the specified properties. + * Creates a new ReloadSchemaResponse instance using the specified properties. * @param [properties] Properties to set - * @returns GetVersionResponse instance + * @returns ReloadSchemaResponse instance */ - public static create(properties?: vtctldata.IGetVersionResponse): vtctldata.GetVersionResponse; + public static create(properties?: vtctldata.IReloadSchemaResponse): vtctldata.ReloadSchemaResponse; /** - * Encodes the specified GetVersionResponse message. Does not implicitly {@link vtctldata.GetVersionResponse.verify|verify} messages. - * @param message GetVersionResponse message or plain object to encode + * Encodes the specified ReloadSchemaResponse message. Does not implicitly {@link vtctldata.ReloadSchemaResponse.verify|verify} messages. + * @param message ReloadSchemaResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetVersionResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IReloadSchemaResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetVersionResponse message, length delimited. Does not implicitly {@link vtctldata.GetVersionResponse.verify|verify} messages. - * @param message GetVersionResponse message or plain object to encode + * Encodes the specified ReloadSchemaResponse message, length delimited. Does not implicitly {@link vtctldata.ReloadSchemaResponse.verify|verify} messages. + * @param message ReloadSchemaResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetVersionResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IReloadSchemaResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetVersionResponse message from the specified reader or buffer. + * Decodes a ReloadSchemaResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetVersionResponse + * @returns ReloadSchemaResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetVersionResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ReloadSchemaResponse; /** - * Decodes a GetVersionResponse message from the specified reader or buffer, length delimited. + * Decodes a ReloadSchemaResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetVersionResponse + * @returns ReloadSchemaResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetVersionResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ReloadSchemaResponse; /** - * Verifies a GetVersionResponse message. + * Verifies a ReloadSchemaResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetVersionResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ReloadSchemaResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetVersionResponse + * @returns ReloadSchemaResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetVersionResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.ReloadSchemaResponse; /** - * Creates a plain object from a GetVersionResponse message. Also converts values to other types if specified. - * @param message GetVersionResponse + * Creates a plain object from a ReloadSchemaResponse message. Also converts values to other types if specified. + * @param message ReloadSchemaResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetVersionResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.ReloadSchemaResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetVersionResponse to JSON. + * Converts this ReloadSchemaResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetVersionResponse + * Gets the default type url for ReloadSchemaResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetVSchemaResponse. */ - interface IGetVSchemaResponse { + /** Properties of a ReloadSchemaKeyspaceRequest. */ + interface IReloadSchemaKeyspaceRequest { - /** GetVSchemaResponse v_schema */ - v_schema?: (vschema.IKeyspace|null); + /** ReloadSchemaKeyspaceRequest keyspace */ + keyspace?: (string|null); + + /** ReloadSchemaKeyspaceRequest wait_position */ + wait_position?: (string|null); + + /** ReloadSchemaKeyspaceRequest include_primary */ + include_primary?: (boolean|null); + + /** ReloadSchemaKeyspaceRequest concurrency */ + concurrency?: (number|null); } - /** Represents a GetVSchemaResponse. */ - class GetVSchemaResponse implements IGetVSchemaResponse { + /** Represents a ReloadSchemaKeyspaceRequest. */ + class ReloadSchemaKeyspaceRequest implements IReloadSchemaKeyspaceRequest { /** - * Constructs a new GetVSchemaResponse. + * Constructs a new ReloadSchemaKeyspaceRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetVSchemaResponse); + constructor(properties?: vtctldata.IReloadSchemaKeyspaceRequest); - /** GetVSchemaResponse v_schema. */ - public v_schema?: (vschema.IKeyspace|null); + /** ReloadSchemaKeyspaceRequest keyspace. */ + public keyspace: string; + + /** ReloadSchemaKeyspaceRequest wait_position. */ + public wait_position: string; + + /** ReloadSchemaKeyspaceRequest include_primary. */ + public include_primary: boolean; + + /** ReloadSchemaKeyspaceRequest concurrency. */ + public concurrency: number; /** - * Creates a new GetVSchemaResponse instance using the specified properties. + * Creates a new ReloadSchemaKeyspaceRequest instance using the specified properties. * @param [properties] Properties to set - * @returns GetVSchemaResponse instance + * @returns ReloadSchemaKeyspaceRequest instance */ - public static create(properties?: vtctldata.IGetVSchemaResponse): vtctldata.GetVSchemaResponse; + public static create(properties?: vtctldata.IReloadSchemaKeyspaceRequest): vtctldata.ReloadSchemaKeyspaceRequest; /** - * Encodes the specified GetVSchemaResponse message. Does not implicitly {@link vtctldata.GetVSchemaResponse.verify|verify} messages. - * @param message GetVSchemaResponse message or plain object to encode + * Encodes the specified ReloadSchemaKeyspaceRequest message. Does not implicitly {@link vtctldata.ReloadSchemaKeyspaceRequest.verify|verify} messages. + * @param message ReloadSchemaKeyspaceRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetVSchemaResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IReloadSchemaKeyspaceRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetVSchemaResponse message, length delimited. Does not implicitly {@link vtctldata.GetVSchemaResponse.verify|verify} messages. - * @param message GetVSchemaResponse message or plain object to encode + * Encodes the specified ReloadSchemaKeyspaceRequest message, length delimited. Does not implicitly {@link vtctldata.ReloadSchemaKeyspaceRequest.verify|verify} messages. + * @param message ReloadSchemaKeyspaceRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetVSchemaResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IReloadSchemaKeyspaceRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetVSchemaResponse message from the specified reader or buffer. + * Decodes a ReloadSchemaKeyspaceRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetVSchemaResponse + * @returns ReloadSchemaKeyspaceRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetVSchemaResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ReloadSchemaKeyspaceRequest; /** - * Decodes a GetVSchemaResponse message from the specified reader or buffer, length delimited. + * Decodes a ReloadSchemaKeyspaceRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetVSchemaResponse + * @returns ReloadSchemaKeyspaceRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetVSchemaResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ReloadSchemaKeyspaceRequest; /** - * Verifies a GetVSchemaResponse message. + * Verifies a ReloadSchemaKeyspaceRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetVSchemaResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ReloadSchemaKeyspaceRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetVSchemaResponse + * @returns ReloadSchemaKeyspaceRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetVSchemaResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.ReloadSchemaKeyspaceRequest; /** - * Creates a plain object from a GetVSchemaResponse message. Also converts values to other types if specified. - * @param message GetVSchemaResponse + * Creates a plain object from a ReloadSchemaKeyspaceRequest message. Also converts values to other types if specified. + * @param message ReloadSchemaKeyspaceRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetVSchemaResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.ReloadSchemaKeyspaceRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetVSchemaResponse to JSON. + * Converts this ReloadSchemaKeyspaceRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetVSchemaResponse + * Gets the default type url for ReloadSchemaKeyspaceRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetWorkflowsRequest. */ - interface IGetWorkflowsRequest { - - /** GetWorkflowsRequest keyspace */ - keyspace?: (string|null); - - /** GetWorkflowsRequest active_only */ - active_only?: (boolean|null); + /** Properties of a ReloadSchemaKeyspaceResponse. */ + interface IReloadSchemaKeyspaceResponse { - /** GetWorkflowsRequest name_only */ - name_only?: (boolean|null); + /** ReloadSchemaKeyspaceResponse events */ + events?: (logutil.IEvent[]|null); } - /** Represents a GetWorkflowsRequest. */ - class GetWorkflowsRequest implements IGetWorkflowsRequest { + /** Represents a ReloadSchemaKeyspaceResponse. */ + class ReloadSchemaKeyspaceResponse implements IReloadSchemaKeyspaceResponse { /** - * Constructs a new GetWorkflowsRequest. + * Constructs a new ReloadSchemaKeyspaceResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IGetWorkflowsRequest); - - /** GetWorkflowsRequest keyspace. */ - public keyspace: string; - - /** GetWorkflowsRequest active_only. */ - public active_only: boolean; + constructor(properties?: vtctldata.IReloadSchemaKeyspaceResponse); - /** GetWorkflowsRequest name_only. */ - public name_only: boolean; + /** ReloadSchemaKeyspaceResponse events. */ + public events: logutil.IEvent[]; /** - * Creates a new GetWorkflowsRequest instance using the specified properties. + * Creates a new ReloadSchemaKeyspaceResponse instance using the specified properties. * @param [properties] Properties to set - * @returns GetWorkflowsRequest instance + * @returns ReloadSchemaKeyspaceResponse instance */ - public static create(properties?: vtctldata.IGetWorkflowsRequest): vtctldata.GetWorkflowsRequest; + public static create(properties?: vtctldata.IReloadSchemaKeyspaceResponse): vtctldata.ReloadSchemaKeyspaceResponse; /** - * Encodes the specified GetWorkflowsRequest message. Does not implicitly {@link vtctldata.GetWorkflowsRequest.verify|verify} messages. - * @param message GetWorkflowsRequest message or plain object to encode + * Encodes the specified ReloadSchemaKeyspaceResponse message. Does not implicitly {@link vtctldata.ReloadSchemaKeyspaceResponse.verify|verify} messages. + * @param message ReloadSchemaKeyspaceResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetWorkflowsRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IReloadSchemaKeyspaceResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetWorkflowsRequest message, length delimited. Does not implicitly {@link vtctldata.GetWorkflowsRequest.verify|verify} messages. - * @param message GetWorkflowsRequest message or plain object to encode + * Encodes the specified ReloadSchemaKeyspaceResponse message, length delimited. Does not implicitly {@link vtctldata.ReloadSchemaKeyspaceResponse.verify|verify} messages. + * @param message ReloadSchemaKeyspaceResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetWorkflowsRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IReloadSchemaKeyspaceResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetWorkflowsRequest message from the specified reader or buffer. + * Decodes a ReloadSchemaKeyspaceResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetWorkflowsRequest + * @returns ReloadSchemaKeyspaceResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetWorkflowsRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ReloadSchemaKeyspaceResponse; /** - * Decodes a GetWorkflowsRequest message from the specified reader or buffer, length delimited. + * Decodes a ReloadSchemaKeyspaceResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetWorkflowsRequest + * @returns ReloadSchemaKeyspaceResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetWorkflowsRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ReloadSchemaKeyspaceResponse; /** - * Verifies a GetWorkflowsRequest message. + * Verifies a ReloadSchemaKeyspaceResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetWorkflowsRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ReloadSchemaKeyspaceResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetWorkflowsRequest + * @returns ReloadSchemaKeyspaceResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetWorkflowsRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.ReloadSchemaKeyspaceResponse; /** - * Creates a plain object from a GetWorkflowsRequest message. Also converts values to other types if specified. - * @param message GetWorkflowsRequest + * Creates a plain object from a ReloadSchemaKeyspaceResponse message. Also converts values to other types if specified. + * @param message ReloadSchemaKeyspaceResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetWorkflowsRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.ReloadSchemaKeyspaceResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetWorkflowsRequest to JSON. + * Converts this ReloadSchemaKeyspaceResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetWorkflowsRequest + * Gets the default type url for ReloadSchemaKeyspaceResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a GetWorkflowsResponse. */ - interface IGetWorkflowsResponse { + /** Properties of a ReloadSchemaShardRequest. */ + interface IReloadSchemaShardRequest { - /** GetWorkflowsResponse workflows */ - workflows?: (vtctldata.IWorkflow[]|null); + /** ReloadSchemaShardRequest keyspace */ + keyspace?: (string|null); + + /** ReloadSchemaShardRequest shard */ + shard?: (string|null); + + /** ReloadSchemaShardRequest wait_position */ + wait_position?: (string|null); + + /** ReloadSchemaShardRequest include_primary */ + include_primary?: (boolean|null); + + /** ReloadSchemaShardRequest concurrency */ + concurrency?: (number|null); } - /** Represents a GetWorkflowsResponse. */ - class GetWorkflowsResponse implements IGetWorkflowsResponse { + /** Represents a ReloadSchemaShardRequest. */ + class ReloadSchemaShardRequest implements IReloadSchemaShardRequest { + + /** + * Constructs a new ReloadSchemaShardRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IReloadSchemaShardRequest); + + /** ReloadSchemaShardRequest keyspace. */ + public keyspace: string; + + /** ReloadSchemaShardRequest shard. */ + public shard: string; + + /** ReloadSchemaShardRequest wait_position. */ + public wait_position: string; - /** - * Constructs a new GetWorkflowsResponse. - * @param [properties] Properties to set - */ - constructor(properties?: vtctldata.IGetWorkflowsResponse); + /** ReloadSchemaShardRequest include_primary. */ + public include_primary: boolean; - /** GetWorkflowsResponse workflows. */ - public workflows: vtctldata.IWorkflow[]; + /** ReloadSchemaShardRequest concurrency. */ + public concurrency: number; /** - * Creates a new GetWorkflowsResponse instance using the specified properties. + * Creates a new ReloadSchemaShardRequest instance using the specified properties. * @param [properties] Properties to set - * @returns GetWorkflowsResponse instance + * @returns ReloadSchemaShardRequest instance */ - public static create(properties?: vtctldata.IGetWorkflowsResponse): vtctldata.GetWorkflowsResponse; + public static create(properties?: vtctldata.IReloadSchemaShardRequest): vtctldata.ReloadSchemaShardRequest; /** - * Encodes the specified GetWorkflowsResponse message. Does not implicitly {@link vtctldata.GetWorkflowsResponse.verify|verify} messages. - * @param message GetWorkflowsResponse message or plain object to encode + * Encodes the specified ReloadSchemaShardRequest message. Does not implicitly {@link vtctldata.ReloadSchemaShardRequest.verify|verify} messages. + * @param message ReloadSchemaShardRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IGetWorkflowsResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IReloadSchemaShardRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified GetWorkflowsResponse message, length delimited. Does not implicitly {@link vtctldata.GetWorkflowsResponse.verify|verify} messages. - * @param message GetWorkflowsResponse message or plain object to encode + * Encodes the specified ReloadSchemaShardRequest message, length delimited. Does not implicitly {@link vtctldata.ReloadSchemaShardRequest.verify|verify} messages. + * @param message ReloadSchemaShardRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IGetWorkflowsResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IReloadSchemaShardRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a GetWorkflowsResponse message from the specified reader or buffer. + * Decodes a ReloadSchemaShardRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns GetWorkflowsResponse + * @returns ReloadSchemaShardRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetWorkflowsResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ReloadSchemaShardRequest; /** - * Decodes a GetWorkflowsResponse message from the specified reader or buffer, length delimited. + * Decodes a ReloadSchemaShardRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns GetWorkflowsResponse + * @returns ReloadSchemaShardRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetWorkflowsResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ReloadSchemaShardRequest; /** - * Verifies a GetWorkflowsResponse message. + * Verifies a ReloadSchemaShardRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a GetWorkflowsResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ReloadSchemaShardRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns GetWorkflowsResponse + * @returns ReloadSchemaShardRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.GetWorkflowsResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.ReloadSchemaShardRequest; /** - * Creates a plain object from a GetWorkflowsResponse message. Also converts values to other types if specified. - * @param message GetWorkflowsResponse + * Creates a plain object from a ReloadSchemaShardRequest message. Also converts values to other types if specified. + * @param message ReloadSchemaShardRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.GetWorkflowsResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.ReloadSchemaShardRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this GetWorkflowsResponse to JSON. + * Converts this ReloadSchemaShardRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for GetWorkflowsResponse + * Gets the default type url for ReloadSchemaShardRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of an InitShardPrimaryRequest. */ - interface IInitShardPrimaryRequest { - - /** InitShardPrimaryRequest keyspace */ - keyspace?: (string|null); - - /** InitShardPrimaryRequest shard */ - shard?: (string|null); - - /** InitShardPrimaryRequest primary_elect_tablet_alias */ - primary_elect_tablet_alias?: (topodata.ITabletAlias|null); - - /** InitShardPrimaryRequest force */ - force?: (boolean|null); + /** Properties of a ReloadSchemaShardResponse. */ + interface IReloadSchemaShardResponse { - /** InitShardPrimaryRequest wait_replicas_timeout */ - wait_replicas_timeout?: (vttime.IDuration|null); + /** ReloadSchemaShardResponse events */ + events?: (logutil.IEvent[]|null); } - /** Represents an InitShardPrimaryRequest. */ - class InitShardPrimaryRequest implements IInitShardPrimaryRequest { + /** Represents a ReloadSchemaShardResponse. */ + class ReloadSchemaShardResponse implements IReloadSchemaShardResponse { /** - * Constructs a new InitShardPrimaryRequest. + * Constructs a new ReloadSchemaShardResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IInitShardPrimaryRequest); - - /** InitShardPrimaryRequest keyspace. */ - public keyspace: string; - - /** InitShardPrimaryRequest shard. */ - public shard: string; - - /** InitShardPrimaryRequest primary_elect_tablet_alias. */ - public primary_elect_tablet_alias?: (topodata.ITabletAlias|null); - - /** InitShardPrimaryRequest force. */ - public force: boolean; + constructor(properties?: vtctldata.IReloadSchemaShardResponse); - /** InitShardPrimaryRequest wait_replicas_timeout. */ - public wait_replicas_timeout?: (vttime.IDuration|null); + /** ReloadSchemaShardResponse events. */ + public events: logutil.IEvent[]; /** - * Creates a new InitShardPrimaryRequest instance using the specified properties. + * Creates a new ReloadSchemaShardResponse instance using the specified properties. * @param [properties] Properties to set - * @returns InitShardPrimaryRequest instance + * @returns ReloadSchemaShardResponse instance */ - public static create(properties?: vtctldata.IInitShardPrimaryRequest): vtctldata.InitShardPrimaryRequest; + public static create(properties?: vtctldata.IReloadSchemaShardResponse): vtctldata.ReloadSchemaShardResponse; /** - * Encodes the specified InitShardPrimaryRequest message. Does not implicitly {@link vtctldata.InitShardPrimaryRequest.verify|verify} messages. - * @param message InitShardPrimaryRequest message or plain object to encode + * Encodes the specified ReloadSchemaShardResponse message. Does not implicitly {@link vtctldata.ReloadSchemaShardResponse.verify|verify} messages. + * @param message ReloadSchemaShardResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IInitShardPrimaryRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IReloadSchemaShardResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified InitShardPrimaryRequest message, length delimited. Does not implicitly {@link vtctldata.InitShardPrimaryRequest.verify|verify} messages. - * @param message InitShardPrimaryRequest message or plain object to encode + * Encodes the specified ReloadSchemaShardResponse message, length delimited. Does not implicitly {@link vtctldata.ReloadSchemaShardResponse.verify|verify} messages. + * @param message ReloadSchemaShardResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IInitShardPrimaryRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IReloadSchemaShardResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an InitShardPrimaryRequest message from the specified reader or buffer. + * Decodes a ReloadSchemaShardResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns InitShardPrimaryRequest + * @returns ReloadSchemaShardResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.InitShardPrimaryRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ReloadSchemaShardResponse; /** - * Decodes an InitShardPrimaryRequest message from the specified reader or buffer, length delimited. + * Decodes a ReloadSchemaShardResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns InitShardPrimaryRequest + * @returns ReloadSchemaShardResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.InitShardPrimaryRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ReloadSchemaShardResponse; /** - * Verifies an InitShardPrimaryRequest message. + * Verifies a ReloadSchemaShardResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an InitShardPrimaryRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ReloadSchemaShardResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns InitShardPrimaryRequest + * @returns ReloadSchemaShardResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.InitShardPrimaryRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.ReloadSchemaShardResponse; /** - * Creates a plain object from an InitShardPrimaryRequest message. Also converts values to other types if specified. - * @param message InitShardPrimaryRequest + * Creates a plain object from a ReloadSchemaShardResponse message. Also converts values to other types if specified. + * @param message ReloadSchemaShardResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.InitShardPrimaryRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.ReloadSchemaShardResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this InitShardPrimaryRequest to JSON. + * Converts this ReloadSchemaShardResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for InitShardPrimaryRequest + * Gets the default type url for ReloadSchemaShardResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of an InitShardPrimaryResponse. */ - interface IInitShardPrimaryResponse { + /** Properties of a RemoveBackupRequest. */ + interface IRemoveBackupRequest { - /** InitShardPrimaryResponse events */ - events?: (logutil.IEvent[]|null); + /** RemoveBackupRequest keyspace */ + keyspace?: (string|null); + + /** RemoveBackupRequest shard */ + shard?: (string|null); + + /** RemoveBackupRequest name */ + name?: (string|null); } - /** Represents an InitShardPrimaryResponse. */ - class InitShardPrimaryResponse implements IInitShardPrimaryResponse { + /** Represents a RemoveBackupRequest. */ + class RemoveBackupRequest implements IRemoveBackupRequest { /** - * Constructs a new InitShardPrimaryResponse. + * Constructs a new RemoveBackupRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IInitShardPrimaryResponse); + constructor(properties?: vtctldata.IRemoveBackupRequest); - /** InitShardPrimaryResponse events. */ - public events: logutil.IEvent[]; + /** RemoveBackupRequest keyspace. */ + public keyspace: string; + + /** RemoveBackupRequest shard. */ + public shard: string; + + /** RemoveBackupRequest name. */ + public name: string; /** - * Creates a new InitShardPrimaryResponse instance using the specified properties. + * Creates a new RemoveBackupRequest instance using the specified properties. * @param [properties] Properties to set - * @returns InitShardPrimaryResponse instance + * @returns RemoveBackupRequest instance */ - public static create(properties?: vtctldata.IInitShardPrimaryResponse): vtctldata.InitShardPrimaryResponse; + public static create(properties?: vtctldata.IRemoveBackupRequest): vtctldata.RemoveBackupRequest; /** - * Encodes the specified InitShardPrimaryResponse message. Does not implicitly {@link vtctldata.InitShardPrimaryResponse.verify|verify} messages. - * @param message InitShardPrimaryResponse message or plain object to encode + * Encodes the specified RemoveBackupRequest message. Does not implicitly {@link vtctldata.RemoveBackupRequest.verify|verify} messages. + * @param message RemoveBackupRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IInitShardPrimaryResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IRemoveBackupRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified InitShardPrimaryResponse message, length delimited. Does not implicitly {@link vtctldata.InitShardPrimaryResponse.verify|verify} messages. - * @param message InitShardPrimaryResponse message or plain object to encode + * Encodes the specified RemoveBackupRequest message, length delimited. Does not implicitly {@link vtctldata.RemoveBackupRequest.verify|verify} messages. + * @param message RemoveBackupRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IInitShardPrimaryResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IRemoveBackupRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an InitShardPrimaryResponse message from the specified reader or buffer. + * Decodes a RemoveBackupRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns InitShardPrimaryResponse + * @returns RemoveBackupRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.InitShardPrimaryResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RemoveBackupRequest; /** - * Decodes an InitShardPrimaryResponse message from the specified reader or buffer, length delimited. + * Decodes a RemoveBackupRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns InitShardPrimaryResponse + * @returns RemoveBackupRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.InitShardPrimaryResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RemoveBackupRequest; /** - * Verifies an InitShardPrimaryResponse message. + * Verifies a RemoveBackupRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an InitShardPrimaryResponse message from a plain object. Also converts values to their respective internal types. + * Creates a RemoveBackupRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns InitShardPrimaryResponse + * @returns RemoveBackupRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.InitShardPrimaryResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.RemoveBackupRequest; /** - * Creates a plain object from an InitShardPrimaryResponse message. Also converts values to other types if specified. - * @param message InitShardPrimaryResponse + * Creates a plain object from a RemoveBackupRequest message. Also converts values to other types if specified. + * @param message RemoveBackupRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.InitShardPrimaryResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.RemoveBackupRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this InitShardPrimaryResponse to JSON. + * Converts this RemoveBackupRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for InitShardPrimaryResponse + * Gets the default type url for RemoveBackupRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a PingTabletRequest. */ - interface IPingTabletRequest { - - /** PingTabletRequest tablet_alias */ - tablet_alias?: (topodata.ITabletAlias|null); + /** Properties of a RemoveBackupResponse. */ + interface IRemoveBackupResponse { } - /** Represents a PingTabletRequest. */ - class PingTabletRequest implements IPingTabletRequest { + /** Represents a RemoveBackupResponse. */ + class RemoveBackupResponse implements IRemoveBackupResponse { /** - * Constructs a new PingTabletRequest. + * Constructs a new RemoveBackupResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IPingTabletRequest); - - /** PingTabletRequest tablet_alias. */ - public tablet_alias?: (topodata.ITabletAlias|null); + constructor(properties?: vtctldata.IRemoveBackupResponse); /** - * Creates a new PingTabletRequest instance using the specified properties. + * Creates a new RemoveBackupResponse instance using the specified properties. * @param [properties] Properties to set - * @returns PingTabletRequest instance + * @returns RemoveBackupResponse instance */ - public static create(properties?: vtctldata.IPingTabletRequest): vtctldata.PingTabletRequest; + public static create(properties?: vtctldata.IRemoveBackupResponse): vtctldata.RemoveBackupResponse; /** - * Encodes the specified PingTabletRequest message. Does not implicitly {@link vtctldata.PingTabletRequest.verify|verify} messages. - * @param message PingTabletRequest message or plain object to encode + * Encodes the specified RemoveBackupResponse message. Does not implicitly {@link vtctldata.RemoveBackupResponse.verify|verify} messages. + * @param message RemoveBackupResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IPingTabletRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IRemoveBackupResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified PingTabletRequest message, length delimited. Does not implicitly {@link vtctldata.PingTabletRequest.verify|verify} messages. - * @param message PingTabletRequest message or plain object to encode + * Encodes the specified RemoveBackupResponse message, length delimited. Does not implicitly {@link vtctldata.RemoveBackupResponse.verify|verify} messages. + * @param message RemoveBackupResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IPingTabletRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IRemoveBackupResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a PingTabletRequest message from the specified reader or buffer. + * Decodes a RemoveBackupResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns PingTabletRequest + * @returns RemoveBackupResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.PingTabletRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RemoveBackupResponse; /** - * Decodes a PingTabletRequest message from the specified reader or buffer, length delimited. + * Decodes a RemoveBackupResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns PingTabletRequest + * @returns RemoveBackupResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.PingTabletRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RemoveBackupResponse; /** - * Verifies a PingTabletRequest message. + * Verifies a RemoveBackupResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a PingTabletRequest message from a plain object. Also converts values to their respective internal types. + * Creates a RemoveBackupResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns PingTabletRequest + * @returns RemoveBackupResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.PingTabletRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.RemoveBackupResponse; /** - * Creates a plain object from a PingTabletRequest message. Also converts values to other types if specified. - * @param message PingTabletRequest + * Creates a plain object from a RemoveBackupResponse message. Also converts values to other types if specified. + * @param message RemoveBackupResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.PingTabletRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.RemoveBackupResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this PingTabletRequest to JSON. + * Converts this RemoveBackupResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for PingTabletRequest + * Gets the default type url for RemoveBackupResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a PingTabletResponse. */ - interface IPingTabletResponse { + /** Properties of a RemoveKeyspaceCellRequest. */ + interface IRemoveKeyspaceCellRequest { + + /** RemoveKeyspaceCellRequest keyspace */ + keyspace?: (string|null); + + /** RemoveKeyspaceCellRequest cell */ + cell?: (string|null); + + /** RemoveKeyspaceCellRequest force */ + force?: (boolean|null); + + /** RemoveKeyspaceCellRequest recursive */ + recursive?: (boolean|null); } - /** Represents a PingTabletResponse. */ - class PingTabletResponse implements IPingTabletResponse { + /** Represents a RemoveKeyspaceCellRequest. */ + class RemoveKeyspaceCellRequest implements IRemoveKeyspaceCellRequest { /** - * Constructs a new PingTabletResponse. + * Constructs a new RemoveKeyspaceCellRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IPingTabletResponse); + constructor(properties?: vtctldata.IRemoveKeyspaceCellRequest); + + /** RemoveKeyspaceCellRequest keyspace. */ + public keyspace: string; + + /** RemoveKeyspaceCellRequest cell. */ + public cell: string; + + /** RemoveKeyspaceCellRequest force. */ + public force: boolean; + + /** RemoveKeyspaceCellRequest recursive. */ + public recursive: boolean; /** - * Creates a new PingTabletResponse instance using the specified properties. + * Creates a new RemoveKeyspaceCellRequest instance using the specified properties. * @param [properties] Properties to set - * @returns PingTabletResponse instance + * @returns RemoveKeyspaceCellRequest instance */ - public static create(properties?: vtctldata.IPingTabletResponse): vtctldata.PingTabletResponse; + public static create(properties?: vtctldata.IRemoveKeyspaceCellRequest): vtctldata.RemoveKeyspaceCellRequest; /** - * Encodes the specified PingTabletResponse message. Does not implicitly {@link vtctldata.PingTabletResponse.verify|verify} messages. - * @param message PingTabletResponse message or plain object to encode + * Encodes the specified RemoveKeyspaceCellRequest message. Does not implicitly {@link vtctldata.RemoveKeyspaceCellRequest.verify|verify} messages. + * @param message RemoveKeyspaceCellRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IPingTabletResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IRemoveKeyspaceCellRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified PingTabletResponse message, length delimited. Does not implicitly {@link vtctldata.PingTabletResponse.verify|verify} messages. - * @param message PingTabletResponse message or plain object to encode + * Encodes the specified RemoveKeyspaceCellRequest message, length delimited. Does not implicitly {@link vtctldata.RemoveKeyspaceCellRequest.verify|verify} messages. + * @param message RemoveKeyspaceCellRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IPingTabletResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IRemoveKeyspaceCellRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a PingTabletResponse message from the specified reader or buffer. + * Decodes a RemoveKeyspaceCellRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns PingTabletResponse + * @returns RemoveKeyspaceCellRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.PingTabletResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RemoveKeyspaceCellRequest; /** - * Decodes a PingTabletResponse message from the specified reader or buffer, length delimited. + * Decodes a RemoveKeyspaceCellRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns PingTabletResponse + * @returns RemoveKeyspaceCellRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.PingTabletResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RemoveKeyspaceCellRequest; /** - * Verifies a PingTabletResponse message. + * Verifies a RemoveKeyspaceCellRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a PingTabletResponse message from a plain object. Also converts values to their respective internal types. + * Creates a RemoveKeyspaceCellRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns PingTabletResponse + * @returns RemoveKeyspaceCellRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.PingTabletResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.RemoveKeyspaceCellRequest; /** - * Creates a plain object from a PingTabletResponse message. Also converts values to other types if specified. - * @param message PingTabletResponse + * Creates a plain object from a RemoveKeyspaceCellRequest message. Also converts values to other types if specified. + * @param message RemoveKeyspaceCellRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.PingTabletResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.RemoveKeyspaceCellRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this PingTabletResponse to JSON. + * Converts this RemoveKeyspaceCellRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for PingTabletResponse + * Gets the default type url for RemoveKeyspaceCellRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a PlannedReparentShardRequest. */ - interface IPlannedReparentShardRequest { - - /** PlannedReparentShardRequest keyspace */ - keyspace?: (string|null); - - /** PlannedReparentShardRequest shard */ - shard?: (string|null); - - /** PlannedReparentShardRequest new_primary */ - new_primary?: (topodata.ITabletAlias|null); - - /** PlannedReparentShardRequest avoid_primary */ - avoid_primary?: (topodata.ITabletAlias|null); - - /** PlannedReparentShardRequest wait_replicas_timeout */ - wait_replicas_timeout?: (vttime.IDuration|null); + /** Properties of a RemoveKeyspaceCellResponse. */ + interface IRemoveKeyspaceCellResponse { } - /** Represents a PlannedReparentShardRequest. */ - class PlannedReparentShardRequest implements IPlannedReparentShardRequest { + /** Represents a RemoveKeyspaceCellResponse. */ + class RemoveKeyspaceCellResponse implements IRemoveKeyspaceCellResponse { /** - * Constructs a new PlannedReparentShardRequest. + * Constructs a new RemoveKeyspaceCellResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IPlannedReparentShardRequest); - - /** PlannedReparentShardRequest keyspace. */ - public keyspace: string; - - /** PlannedReparentShardRequest shard. */ - public shard: string; - - /** PlannedReparentShardRequest new_primary. */ - public new_primary?: (topodata.ITabletAlias|null); - - /** PlannedReparentShardRequest avoid_primary. */ - public avoid_primary?: (topodata.ITabletAlias|null); - - /** PlannedReparentShardRequest wait_replicas_timeout. */ - public wait_replicas_timeout?: (vttime.IDuration|null); + constructor(properties?: vtctldata.IRemoveKeyspaceCellResponse); /** - * Creates a new PlannedReparentShardRequest instance using the specified properties. + * Creates a new RemoveKeyspaceCellResponse instance using the specified properties. * @param [properties] Properties to set - * @returns PlannedReparentShardRequest instance + * @returns RemoveKeyspaceCellResponse instance */ - public static create(properties?: vtctldata.IPlannedReparentShardRequest): vtctldata.PlannedReparentShardRequest; + public static create(properties?: vtctldata.IRemoveKeyspaceCellResponse): vtctldata.RemoveKeyspaceCellResponse; /** - * Encodes the specified PlannedReparentShardRequest message. Does not implicitly {@link vtctldata.PlannedReparentShardRequest.verify|verify} messages. - * @param message PlannedReparentShardRequest message or plain object to encode + * Encodes the specified RemoveKeyspaceCellResponse message. Does not implicitly {@link vtctldata.RemoveKeyspaceCellResponse.verify|verify} messages. + * @param message RemoveKeyspaceCellResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IPlannedReparentShardRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IRemoveKeyspaceCellResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified PlannedReparentShardRequest message, length delimited. Does not implicitly {@link vtctldata.PlannedReparentShardRequest.verify|verify} messages. - * @param message PlannedReparentShardRequest message or plain object to encode + * Encodes the specified RemoveKeyspaceCellResponse message, length delimited. Does not implicitly {@link vtctldata.RemoveKeyspaceCellResponse.verify|verify} messages. + * @param message RemoveKeyspaceCellResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IPlannedReparentShardRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IRemoveKeyspaceCellResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a PlannedReparentShardRequest message from the specified reader or buffer. + * Decodes a RemoveKeyspaceCellResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns PlannedReparentShardRequest + * @returns RemoveKeyspaceCellResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.PlannedReparentShardRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RemoveKeyspaceCellResponse; /** - * Decodes a PlannedReparentShardRequest message from the specified reader or buffer, length delimited. + * Decodes a RemoveKeyspaceCellResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns PlannedReparentShardRequest + * @returns RemoveKeyspaceCellResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.PlannedReparentShardRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RemoveKeyspaceCellResponse; /** - * Verifies a PlannedReparentShardRequest message. + * Verifies a RemoveKeyspaceCellResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a PlannedReparentShardRequest message from a plain object. Also converts values to their respective internal types. + * Creates a RemoveKeyspaceCellResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns PlannedReparentShardRequest + * @returns RemoveKeyspaceCellResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.PlannedReparentShardRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.RemoveKeyspaceCellResponse; /** - * Creates a plain object from a PlannedReparentShardRequest message. Also converts values to other types if specified. - * @param message PlannedReparentShardRequest + * Creates a plain object from a RemoveKeyspaceCellResponse message. Also converts values to other types if specified. + * @param message RemoveKeyspaceCellResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.PlannedReparentShardRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.RemoveKeyspaceCellResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this PlannedReparentShardRequest to JSON. + * Converts this RemoveKeyspaceCellResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for PlannedReparentShardRequest + * Gets the default type url for RemoveKeyspaceCellResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a PlannedReparentShardResponse. */ - interface IPlannedReparentShardResponse { + /** Properties of a RemoveShardCellRequest. */ + interface IRemoveShardCellRequest { - /** PlannedReparentShardResponse keyspace */ + /** RemoveShardCellRequest keyspace */ keyspace?: (string|null); - /** PlannedReparentShardResponse shard */ - shard?: (string|null); + /** RemoveShardCellRequest shard_name */ + shard_name?: (string|null); - /** PlannedReparentShardResponse promoted_primary */ - promoted_primary?: (topodata.ITabletAlias|null); + /** RemoveShardCellRequest cell */ + cell?: (string|null); - /** PlannedReparentShardResponse events */ - events?: (logutil.IEvent[]|null); + /** RemoveShardCellRequest force */ + force?: (boolean|null); + + /** RemoveShardCellRequest recursive */ + recursive?: (boolean|null); } - /** Represents a PlannedReparentShardResponse. */ - class PlannedReparentShardResponse implements IPlannedReparentShardResponse { + /** Represents a RemoveShardCellRequest. */ + class RemoveShardCellRequest implements IRemoveShardCellRequest { /** - * Constructs a new PlannedReparentShardResponse. + * Constructs a new RemoveShardCellRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IPlannedReparentShardResponse); + constructor(properties?: vtctldata.IRemoveShardCellRequest); - /** PlannedReparentShardResponse keyspace. */ + /** RemoveShardCellRequest keyspace. */ public keyspace: string; - /** PlannedReparentShardResponse shard. */ - public shard: string; + /** RemoveShardCellRequest shard_name. */ + public shard_name: string; - /** PlannedReparentShardResponse promoted_primary. */ - public promoted_primary?: (topodata.ITabletAlias|null); + /** RemoveShardCellRequest cell. */ + public cell: string; + + /** RemoveShardCellRequest force. */ + public force: boolean; - /** PlannedReparentShardResponse events. */ - public events: logutil.IEvent[]; + /** RemoveShardCellRequest recursive. */ + public recursive: boolean; /** - * Creates a new PlannedReparentShardResponse instance using the specified properties. + * Creates a new RemoveShardCellRequest instance using the specified properties. * @param [properties] Properties to set - * @returns PlannedReparentShardResponse instance + * @returns RemoveShardCellRequest instance */ - public static create(properties?: vtctldata.IPlannedReparentShardResponse): vtctldata.PlannedReparentShardResponse; + public static create(properties?: vtctldata.IRemoveShardCellRequest): vtctldata.RemoveShardCellRequest; /** - * Encodes the specified PlannedReparentShardResponse message. Does not implicitly {@link vtctldata.PlannedReparentShardResponse.verify|verify} messages. - * @param message PlannedReparentShardResponse message or plain object to encode + * Encodes the specified RemoveShardCellRequest message. Does not implicitly {@link vtctldata.RemoveShardCellRequest.verify|verify} messages. + * @param message RemoveShardCellRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IPlannedReparentShardResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IRemoveShardCellRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified PlannedReparentShardResponse message, length delimited. Does not implicitly {@link vtctldata.PlannedReparentShardResponse.verify|verify} messages. - * @param message PlannedReparentShardResponse message or plain object to encode + * Encodes the specified RemoveShardCellRequest message, length delimited. Does not implicitly {@link vtctldata.RemoveShardCellRequest.verify|verify} messages. + * @param message RemoveShardCellRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IPlannedReparentShardResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IRemoveShardCellRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a PlannedReparentShardResponse message from the specified reader or buffer. + * Decodes a RemoveShardCellRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns PlannedReparentShardResponse + * @returns RemoveShardCellRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.PlannedReparentShardResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RemoveShardCellRequest; /** - * Decodes a PlannedReparentShardResponse message from the specified reader or buffer, length delimited. + * Decodes a RemoveShardCellRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns PlannedReparentShardResponse + * @returns RemoveShardCellRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.PlannedReparentShardResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RemoveShardCellRequest; /** - * Verifies a PlannedReparentShardResponse message. + * Verifies a RemoveShardCellRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a PlannedReparentShardResponse message from a plain object. Also converts values to their respective internal types. + * Creates a RemoveShardCellRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns PlannedReparentShardResponse + * @returns RemoveShardCellRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.PlannedReparentShardResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.RemoveShardCellRequest; /** - * Creates a plain object from a PlannedReparentShardResponse message. Also converts values to other types if specified. - * @param message PlannedReparentShardResponse + * Creates a plain object from a RemoveShardCellRequest message. Also converts values to other types if specified. + * @param message RemoveShardCellRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.PlannedReparentShardResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.RemoveShardCellRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this PlannedReparentShardResponse to JSON. + * Converts this RemoveShardCellRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for PlannedReparentShardResponse + * Gets the default type url for RemoveShardCellRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a RebuildKeyspaceGraphRequest. */ - interface IRebuildKeyspaceGraphRequest { - - /** RebuildKeyspaceGraphRequest keyspace */ - keyspace?: (string|null); - - /** RebuildKeyspaceGraphRequest cells */ - cells?: (string[]|null); - - /** RebuildKeyspaceGraphRequest allow_partial */ - allow_partial?: (boolean|null); + /** Properties of a RemoveShardCellResponse. */ + interface IRemoveShardCellResponse { } - /** Represents a RebuildKeyspaceGraphRequest. */ - class RebuildKeyspaceGraphRequest implements IRebuildKeyspaceGraphRequest { + /** Represents a RemoveShardCellResponse. */ + class RemoveShardCellResponse implements IRemoveShardCellResponse { /** - * Constructs a new RebuildKeyspaceGraphRequest. + * Constructs a new RemoveShardCellResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IRebuildKeyspaceGraphRequest); - - /** RebuildKeyspaceGraphRequest keyspace. */ - public keyspace: string; - - /** RebuildKeyspaceGraphRequest cells. */ - public cells: string[]; - - /** RebuildKeyspaceGraphRequest allow_partial. */ - public allow_partial: boolean; + constructor(properties?: vtctldata.IRemoveShardCellResponse); /** - * Creates a new RebuildKeyspaceGraphRequest instance using the specified properties. + * Creates a new RemoveShardCellResponse instance using the specified properties. * @param [properties] Properties to set - * @returns RebuildKeyspaceGraphRequest instance + * @returns RemoveShardCellResponse instance */ - public static create(properties?: vtctldata.IRebuildKeyspaceGraphRequest): vtctldata.RebuildKeyspaceGraphRequest; + public static create(properties?: vtctldata.IRemoveShardCellResponse): vtctldata.RemoveShardCellResponse; /** - * Encodes the specified RebuildKeyspaceGraphRequest message. Does not implicitly {@link vtctldata.RebuildKeyspaceGraphRequest.verify|verify} messages. - * @param message RebuildKeyspaceGraphRequest message or plain object to encode + * Encodes the specified RemoveShardCellResponse message. Does not implicitly {@link vtctldata.RemoveShardCellResponse.verify|verify} messages. + * @param message RemoveShardCellResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IRebuildKeyspaceGraphRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IRemoveShardCellResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified RebuildKeyspaceGraphRequest message, length delimited. Does not implicitly {@link vtctldata.RebuildKeyspaceGraphRequest.verify|verify} messages. - * @param message RebuildKeyspaceGraphRequest message or plain object to encode + * Encodes the specified RemoveShardCellResponse message, length delimited. Does not implicitly {@link vtctldata.RemoveShardCellResponse.verify|verify} messages. + * @param message RemoveShardCellResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IRebuildKeyspaceGraphRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IRemoveShardCellResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a RebuildKeyspaceGraphRequest message from the specified reader or buffer. + * Decodes a RemoveShardCellResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns RebuildKeyspaceGraphRequest + * @returns RemoveShardCellResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RebuildKeyspaceGraphRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RemoveShardCellResponse; /** - * Decodes a RebuildKeyspaceGraphRequest message from the specified reader or buffer, length delimited. + * Decodes a RemoveShardCellResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns RebuildKeyspaceGraphRequest + * @returns RemoveShardCellResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RebuildKeyspaceGraphRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RemoveShardCellResponse; /** - * Verifies a RebuildKeyspaceGraphRequest message. + * Verifies a RemoveShardCellResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a RebuildKeyspaceGraphRequest message from a plain object. Also converts values to their respective internal types. + * Creates a RemoveShardCellResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns RebuildKeyspaceGraphRequest + * @returns RemoveShardCellResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.RebuildKeyspaceGraphRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.RemoveShardCellResponse; /** - * Creates a plain object from a RebuildKeyspaceGraphRequest message. Also converts values to other types if specified. - * @param message RebuildKeyspaceGraphRequest + * Creates a plain object from a RemoveShardCellResponse message. Also converts values to other types if specified. + * @param message RemoveShardCellResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.RebuildKeyspaceGraphRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.RemoveShardCellResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this RebuildKeyspaceGraphRequest to JSON. + * Converts this RemoveShardCellResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for RebuildKeyspaceGraphRequest + * Gets the default type url for RemoveShardCellResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a RebuildKeyspaceGraphResponse. */ - interface IRebuildKeyspaceGraphResponse { + /** Properties of a ReparentTabletRequest. */ + interface IReparentTabletRequest { + + /** ReparentTabletRequest tablet */ + tablet?: (topodata.ITabletAlias|null); } - /** Represents a RebuildKeyspaceGraphResponse. */ - class RebuildKeyspaceGraphResponse implements IRebuildKeyspaceGraphResponse { + /** Represents a ReparentTabletRequest. */ + class ReparentTabletRequest implements IReparentTabletRequest { /** - * Constructs a new RebuildKeyspaceGraphResponse. + * Constructs a new ReparentTabletRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IRebuildKeyspaceGraphResponse); + constructor(properties?: vtctldata.IReparentTabletRequest); + + /** ReparentTabletRequest tablet. */ + public tablet?: (topodata.ITabletAlias|null); /** - * Creates a new RebuildKeyspaceGraphResponse instance using the specified properties. + * Creates a new ReparentTabletRequest instance using the specified properties. * @param [properties] Properties to set - * @returns RebuildKeyspaceGraphResponse instance + * @returns ReparentTabletRequest instance */ - public static create(properties?: vtctldata.IRebuildKeyspaceGraphResponse): vtctldata.RebuildKeyspaceGraphResponse; + public static create(properties?: vtctldata.IReparentTabletRequest): vtctldata.ReparentTabletRequest; /** - * Encodes the specified RebuildKeyspaceGraphResponse message. Does not implicitly {@link vtctldata.RebuildKeyspaceGraphResponse.verify|verify} messages. - * @param message RebuildKeyspaceGraphResponse message or plain object to encode + * Encodes the specified ReparentTabletRequest message. Does not implicitly {@link vtctldata.ReparentTabletRequest.verify|verify} messages. + * @param message ReparentTabletRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IRebuildKeyspaceGraphResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IReparentTabletRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified RebuildKeyspaceGraphResponse message, length delimited. Does not implicitly {@link vtctldata.RebuildKeyspaceGraphResponse.verify|verify} messages. - * @param message RebuildKeyspaceGraphResponse message or plain object to encode + * Encodes the specified ReparentTabletRequest message, length delimited. Does not implicitly {@link vtctldata.ReparentTabletRequest.verify|verify} messages. + * @param message ReparentTabletRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IRebuildKeyspaceGraphResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IReparentTabletRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a RebuildKeyspaceGraphResponse message from the specified reader or buffer. + * Decodes a ReparentTabletRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns RebuildKeyspaceGraphResponse + * @returns ReparentTabletRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RebuildKeyspaceGraphResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ReparentTabletRequest; /** - * Decodes a RebuildKeyspaceGraphResponse message from the specified reader or buffer, length delimited. + * Decodes a ReparentTabletRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns RebuildKeyspaceGraphResponse + * @returns ReparentTabletRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RebuildKeyspaceGraphResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ReparentTabletRequest; /** - * Verifies a RebuildKeyspaceGraphResponse message. + * Verifies a ReparentTabletRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a RebuildKeyspaceGraphResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ReparentTabletRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns RebuildKeyspaceGraphResponse + * @returns ReparentTabletRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.RebuildKeyspaceGraphResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.ReparentTabletRequest; /** - * Creates a plain object from a RebuildKeyspaceGraphResponse message. Also converts values to other types if specified. - * @param message RebuildKeyspaceGraphResponse + * Creates a plain object from a ReparentTabletRequest message. Also converts values to other types if specified. + * @param message ReparentTabletRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.RebuildKeyspaceGraphResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.ReparentTabletRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this RebuildKeyspaceGraphResponse to JSON. + * Converts this ReparentTabletRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for RebuildKeyspaceGraphResponse + * Gets the default type url for ReparentTabletRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a RebuildVSchemaGraphRequest. */ - interface IRebuildVSchemaGraphRequest { + /** Properties of a ReparentTabletResponse. */ + interface IReparentTabletResponse { - /** RebuildVSchemaGraphRequest cells */ - cells?: (string[]|null); + /** ReparentTabletResponse keyspace */ + keyspace?: (string|null); + + /** ReparentTabletResponse shard */ + shard?: (string|null); + + /** ReparentTabletResponse primary */ + primary?: (topodata.ITabletAlias|null); } - /** Represents a RebuildVSchemaGraphRequest. */ - class RebuildVSchemaGraphRequest implements IRebuildVSchemaGraphRequest { + /** Represents a ReparentTabletResponse. */ + class ReparentTabletResponse implements IReparentTabletResponse { /** - * Constructs a new RebuildVSchemaGraphRequest. + * Constructs a new ReparentTabletResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IRebuildVSchemaGraphRequest); + constructor(properties?: vtctldata.IReparentTabletResponse); - /** RebuildVSchemaGraphRequest cells. */ - public cells: string[]; + /** ReparentTabletResponse keyspace. */ + public keyspace: string; + + /** ReparentTabletResponse shard. */ + public shard: string; + + /** ReparentTabletResponse primary. */ + public primary?: (topodata.ITabletAlias|null); /** - * Creates a new RebuildVSchemaGraphRequest instance using the specified properties. + * Creates a new ReparentTabletResponse instance using the specified properties. * @param [properties] Properties to set - * @returns RebuildVSchemaGraphRequest instance + * @returns ReparentTabletResponse instance */ - public static create(properties?: vtctldata.IRebuildVSchemaGraphRequest): vtctldata.RebuildVSchemaGraphRequest; + public static create(properties?: vtctldata.IReparentTabletResponse): vtctldata.ReparentTabletResponse; /** - * Encodes the specified RebuildVSchemaGraphRequest message. Does not implicitly {@link vtctldata.RebuildVSchemaGraphRequest.verify|verify} messages. - * @param message RebuildVSchemaGraphRequest message or plain object to encode + * Encodes the specified ReparentTabletResponse message. Does not implicitly {@link vtctldata.ReparentTabletResponse.verify|verify} messages. + * @param message ReparentTabletResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IRebuildVSchemaGraphRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IReparentTabletResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified RebuildVSchemaGraphRequest message, length delimited. Does not implicitly {@link vtctldata.RebuildVSchemaGraphRequest.verify|verify} messages. - * @param message RebuildVSchemaGraphRequest message or plain object to encode + * Encodes the specified ReparentTabletResponse message, length delimited. Does not implicitly {@link vtctldata.ReparentTabletResponse.verify|verify} messages. + * @param message ReparentTabletResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IRebuildVSchemaGraphRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IReparentTabletResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a RebuildVSchemaGraphRequest message from the specified reader or buffer. + * Decodes a ReparentTabletResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns RebuildVSchemaGraphRequest + * @returns ReparentTabletResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RebuildVSchemaGraphRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ReparentTabletResponse; /** - * Decodes a RebuildVSchemaGraphRequest message from the specified reader or buffer, length delimited. + * Decodes a ReparentTabletResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns RebuildVSchemaGraphRequest + * @returns ReparentTabletResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RebuildVSchemaGraphRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ReparentTabletResponse; /** - * Verifies a RebuildVSchemaGraphRequest message. + * Verifies a ReparentTabletResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a RebuildVSchemaGraphRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ReparentTabletResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns RebuildVSchemaGraphRequest + * @returns ReparentTabletResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.RebuildVSchemaGraphRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.ReparentTabletResponse; /** - * Creates a plain object from a RebuildVSchemaGraphRequest message. Also converts values to other types if specified. - * @param message RebuildVSchemaGraphRequest + * Creates a plain object from a ReparentTabletResponse message. Also converts values to other types if specified. + * @param message ReparentTabletResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.RebuildVSchemaGraphRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.ReparentTabletResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this RebuildVSchemaGraphRequest to JSON. + * Converts this ReparentTabletResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for RebuildVSchemaGraphRequest + * Gets the default type url for ReparentTabletResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a RebuildVSchemaGraphResponse. */ - interface IRebuildVSchemaGraphResponse { + /** Properties of a ReshardCreateRequest. */ + interface IReshardCreateRequest { + + /** ReshardCreateRequest workflow */ + workflow?: (string|null); + + /** ReshardCreateRequest keyspace */ + keyspace?: (string|null); + + /** ReshardCreateRequest source_shards */ + source_shards?: (string[]|null); + + /** ReshardCreateRequest target_shards */ + target_shards?: (string[]|null); + + /** ReshardCreateRequest cells */ + cells?: (string[]|null); + + /** ReshardCreateRequest tablet_types */ + tablet_types?: (topodata.TabletType[]|null); + + /** ReshardCreateRequest tablet_selection_preference */ + tablet_selection_preference?: (tabletmanagerdata.TabletSelectionPreference|null); + + /** ReshardCreateRequest skip_schema_copy */ + skip_schema_copy?: (boolean|null); + + /** ReshardCreateRequest on_ddl */ + on_ddl?: (string|null); + + /** ReshardCreateRequest stop_after_copy */ + stop_after_copy?: (boolean|null); + + /** ReshardCreateRequest defer_secondary_keys */ + defer_secondary_keys?: (boolean|null); + + /** ReshardCreateRequest auto_start */ + auto_start?: (boolean|null); } - /** Represents a RebuildVSchemaGraphResponse. */ - class RebuildVSchemaGraphResponse implements IRebuildVSchemaGraphResponse { + /** Represents a ReshardCreateRequest. */ + class ReshardCreateRequest implements IReshardCreateRequest { /** - * Constructs a new RebuildVSchemaGraphResponse. + * Constructs a new ReshardCreateRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IRebuildVSchemaGraphResponse); + constructor(properties?: vtctldata.IReshardCreateRequest); + + /** ReshardCreateRequest workflow. */ + public workflow: string; + + /** ReshardCreateRequest keyspace. */ + public keyspace: string; + + /** ReshardCreateRequest source_shards. */ + public source_shards: string[]; + + /** ReshardCreateRequest target_shards. */ + public target_shards: string[]; + + /** ReshardCreateRequest cells. */ + public cells: string[]; + + /** ReshardCreateRequest tablet_types. */ + public tablet_types: topodata.TabletType[]; + + /** ReshardCreateRequest tablet_selection_preference. */ + public tablet_selection_preference: tabletmanagerdata.TabletSelectionPreference; + + /** ReshardCreateRequest skip_schema_copy. */ + public skip_schema_copy: boolean; + + /** ReshardCreateRequest on_ddl. */ + public on_ddl: string; + + /** ReshardCreateRequest stop_after_copy. */ + public stop_after_copy: boolean; + + /** ReshardCreateRequest defer_secondary_keys. */ + public defer_secondary_keys: boolean; + + /** ReshardCreateRequest auto_start. */ + public auto_start: boolean; /** - * Creates a new RebuildVSchemaGraphResponse instance using the specified properties. + * Creates a new ReshardCreateRequest instance using the specified properties. * @param [properties] Properties to set - * @returns RebuildVSchemaGraphResponse instance + * @returns ReshardCreateRequest instance */ - public static create(properties?: vtctldata.IRebuildVSchemaGraphResponse): vtctldata.RebuildVSchemaGraphResponse; + public static create(properties?: vtctldata.IReshardCreateRequest): vtctldata.ReshardCreateRequest; /** - * Encodes the specified RebuildVSchemaGraphResponse message. Does not implicitly {@link vtctldata.RebuildVSchemaGraphResponse.verify|verify} messages. - * @param message RebuildVSchemaGraphResponse message or plain object to encode + * Encodes the specified ReshardCreateRequest message. Does not implicitly {@link vtctldata.ReshardCreateRequest.verify|verify} messages. + * @param message ReshardCreateRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IRebuildVSchemaGraphResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IReshardCreateRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified RebuildVSchemaGraphResponse message, length delimited. Does not implicitly {@link vtctldata.RebuildVSchemaGraphResponse.verify|verify} messages. - * @param message RebuildVSchemaGraphResponse message or plain object to encode + * Encodes the specified ReshardCreateRequest message, length delimited. Does not implicitly {@link vtctldata.ReshardCreateRequest.verify|verify} messages. + * @param message ReshardCreateRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IRebuildVSchemaGraphResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IReshardCreateRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a RebuildVSchemaGraphResponse message from the specified reader or buffer. + * Decodes a ReshardCreateRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns RebuildVSchemaGraphResponse + * @returns ReshardCreateRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RebuildVSchemaGraphResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ReshardCreateRequest; /** - * Decodes a RebuildVSchemaGraphResponse message from the specified reader or buffer, length delimited. + * Decodes a ReshardCreateRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns RebuildVSchemaGraphResponse + * @returns ReshardCreateRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RebuildVSchemaGraphResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ReshardCreateRequest; /** - * Verifies a RebuildVSchemaGraphResponse message. + * Verifies a ReshardCreateRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a RebuildVSchemaGraphResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ReshardCreateRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns RebuildVSchemaGraphResponse + * @returns ReshardCreateRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.RebuildVSchemaGraphResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.ReshardCreateRequest; /** - * Creates a plain object from a RebuildVSchemaGraphResponse message. Also converts values to other types if specified. - * @param message RebuildVSchemaGraphResponse + * Creates a plain object from a ReshardCreateRequest message. Also converts values to other types if specified. + * @param message ReshardCreateRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.RebuildVSchemaGraphResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.ReshardCreateRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this RebuildVSchemaGraphResponse to JSON. + * Converts this ReshardCreateRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for RebuildVSchemaGraphResponse + * Gets the default type url for ReshardCreateRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a RefreshStateRequest. */ - interface IRefreshStateRequest { + /** Properties of a RestoreFromBackupRequest. */ + interface IRestoreFromBackupRequest { - /** RefreshStateRequest tablet_alias */ + /** RestoreFromBackupRequest tablet_alias */ tablet_alias?: (topodata.ITabletAlias|null); + + /** RestoreFromBackupRequest backup_time */ + backup_time?: (vttime.ITime|null); + + /** RestoreFromBackupRequest restore_to_pos */ + restore_to_pos?: (string|null); + + /** RestoreFromBackupRequest dry_run */ + dry_run?: (boolean|null); + + /** RestoreFromBackupRequest restore_to_timestamp */ + restore_to_timestamp?: (vttime.ITime|null); } - /** Represents a RefreshStateRequest. */ - class RefreshStateRequest implements IRefreshStateRequest { + /** Represents a RestoreFromBackupRequest. */ + class RestoreFromBackupRequest implements IRestoreFromBackupRequest { /** - * Constructs a new RefreshStateRequest. + * Constructs a new RestoreFromBackupRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IRefreshStateRequest); + constructor(properties?: vtctldata.IRestoreFromBackupRequest); - /** RefreshStateRequest tablet_alias. */ + /** RestoreFromBackupRequest tablet_alias. */ public tablet_alias?: (topodata.ITabletAlias|null); + /** RestoreFromBackupRequest backup_time. */ + public backup_time?: (vttime.ITime|null); + + /** RestoreFromBackupRequest restore_to_pos. */ + public restore_to_pos: string; + + /** RestoreFromBackupRequest dry_run. */ + public dry_run: boolean; + + /** RestoreFromBackupRequest restore_to_timestamp. */ + public restore_to_timestamp?: (vttime.ITime|null); + /** - * Creates a new RefreshStateRequest instance using the specified properties. + * Creates a new RestoreFromBackupRequest instance using the specified properties. * @param [properties] Properties to set - * @returns RefreshStateRequest instance + * @returns RestoreFromBackupRequest instance */ - public static create(properties?: vtctldata.IRefreshStateRequest): vtctldata.RefreshStateRequest; + public static create(properties?: vtctldata.IRestoreFromBackupRequest): vtctldata.RestoreFromBackupRequest; /** - * Encodes the specified RefreshStateRequest message. Does not implicitly {@link vtctldata.RefreshStateRequest.verify|verify} messages. - * @param message RefreshStateRequest message or plain object to encode + * Encodes the specified RestoreFromBackupRequest message. Does not implicitly {@link vtctldata.RestoreFromBackupRequest.verify|verify} messages. + * @param message RestoreFromBackupRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IRefreshStateRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IRestoreFromBackupRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified RefreshStateRequest message, length delimited. Does not implicitly {@link vtctldata.RefreshStateRequest.verify|verify} messages. - * @param message RefreshStateRequest message or plain object to encode + * Encodes the specified RestoreFromBackupRequest message, length delimited. Does not implicitly {@link vtctldata.RestoreFromBackupRequest.verify|verify} messages. + * @param message RestoreFromBackupRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IRefreshStateRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IRestoreFromBackupRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a RefreshStateRequest message from the specified reader or buffer. + * Decodes a RestoreFromBackupRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns RefreshStateRequest + * @returns RestoreFromBackupRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RefreshStateRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RestoreFromBackupRequest; /** - * Decodes a RefreshStateRequest message from the specified reader or buffer, length delimited. + * Decodes a RestoreFromBackupRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns RefreshStateRequest + * @returns RestoreFromBackupRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RefreshStateRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RestoreFromBackupRequest; /** - * Verifies a RefreshStateRequest message. + * Verifies a RestoreFromBackupRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a RefreshStateRequest message from a plain object. Also converts values to their respective internal types. + * Creates a RestoreFromBackupRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns RefreshStateRequest + * @returns RestoreFromBackupRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.RefreshStateRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.RestoreFromBackupRequest; /** - * Creates a plain object from a RefreshStateRequest message. Also converts values to other types if specified. - * @param message RefreshStateRequest + * Creates a plain object from a RestoreFromBackupRequest message. Also converts values to other types if specified. + * @param message RestoreFromBackupRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.RefreshStateRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.RestoreFromBackupRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this RefreshStateRequest to JSON. + * Converts this RestoreFromBackupRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for RefreshStateRequest + * Gets the default type url for RestoreFromBackupRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a RefreshStateResponse. */ - interface IRefreshStateResponse { + /** Properties of a RestoreFromBackupResponse. */ + interface IRestoreFromBackupResponse { + + /** RestoreFromBackupResponse tablet_alias */ + tablet_alias?: (topodata.ITabletAlias|null); + + /** RestoreFromBackupResponse keyspace */ + keyspace?: (string|null); + + /** RestoreFromBackupResponse shard */ + shard?: (string|null); + + /** RestoreFromBackupResponse event */ + event?: (logutil.IEvent|null); } - /** Represents a RefreshStateResponse. */ - class RefreshStateResponse implements IRefreshStateResponse { + /** Represents a RestoreFromBackupResponse. */ + class RestoreFromBackupResponse implements IRestoreFromBackupResponse { /** - * Constructs a new RefreshStateResponse. + * Constructs a new RestoreFromBackupResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IRefreshStateResponse); + constructor(properties?: vtctldata.IRestoreFromBackupResponse); + + /** RestoreFromBackupResponse tablet_alias. */ + public tablet_alias?: (topodata.ITabletAlias|null); + + /** RestoreFromBackupResponse keyspace. */ + public keyspace: string; + + /** RestoreFromBackupResponse shard. */ + public shard: string; + + /** RestoreFromBackupResponse event. */ + public event?: (logutil.IEvent|null); /** - * Creates a new RefreshStateResponse instance using the specified properties. + * Creates a new RestoreFromBackupResponse instance using the specified properties. * @param [properties] Properties to set - * @returns RefreshStateResponse instance + * @returns RestoreFromBackupResponse instance */ - public static create(properties?: vtctldata.IRefreshStateResponse): vtctldata.RefreshStateResponse; + public static create(properties?: vtctldata.IRestoreFromBackupResponse): vtctldata.RestoreFromBackupResponse; /** - * Encodes the specified RefreshStateResponse message. Does not implicitly {@link vtctldata.RefreshStateResponse.verify|verify} messages. - * @param message RefreshStateResponse message or plain object to encode + * Encodes the specified RestoreFromBackupResponse message. Does not implicitly {@link vtctldata.RestoreFromBackupResponse.verify|verify} messages. + * @param message RestoreFromBackupResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IRefreshStateResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IRestoreFromBackupResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified RefreshStateResponse message, length delimited. Does not implicitly {@link vtctldata.RefreshStateResponse.verify|verify} messages. - * @param message RefreshStateResponse message or plain object to encode + * Encodes the specified RestoreFromBackupResponse message, length delimited. Does not implicitly {@link vtctldata.RestoreFromBackupResponse.verify|verify} messages. + * @param message RestoreFromBackupResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IRefreshStateResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IRestoreFromBackupResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a RefreshStateResponse message from the specified reader or buffer. + * Decodes a RestoreFromBackupResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns RefreshStateResponse + * @returns RestoreFromBackupResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RefreshStateResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RestoreFromBackupResponse; /** - * Decodes a RefreshStateResponse message from the specified reader or buffer, length delimited. + * Decodes a RestoreFromBackupResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns RefreshStateResponse + * @returns RestoreFromBackupResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RefreshStateResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RestoreFromBackupResponse; /** - * Verifies a RefreshStateResponse message. + * Verifies a RestoreFromBackupResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a RefreshStateResponse message from a plain object. Also converts values to their respective internal types. + * Creates a RestoreFromBackupResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns RefreshStateResponse + * @returns RestoreFromBackupResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.RefreshStateResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.RestoreFromBackupResponse; /** - * Creates a plain object from a RefreshStateResponse message. Also converts values to other types if specified. - * @param message RefreshStateResponse + * Creates a plain object from a RestoreFromBackupResponse message. Also converts values to other types if specified. + * @param message RestoreFromBackupResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.RefreshStateResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.RestoreFromBackupResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this RefreshStateResponse to JSON. + * Converts this RestoreFromBackupResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for RefreshStateResponse + * Gets the default type url for RestoreFromBackupResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a RefreshStateByShardRequest. */ - interface IRefreshStateByShardRequest { + /** Properties of a RetrySchemaMigrationRequest. */ + interface IRetrySchemaMigrationRequest { - /** RefreshStateByShardRequest keyspace */ + /** RetrySchemaMigrationRequest keyspace */ keyspace?: (string|null); - /** RefreshStateByShardRequest shard */ - shard?: (string|null); - - /** RefreshStateByShardRequest cells */ - cells?: (string[]|null); + /** RetrySchemaMigrationRequest uuid */ + uuid?: (string|null); } - /** Represents a RefreshStateByShardRequest. */ - class RefreshStateByShardRequest implements IRefreshStateByShardRequest { + /** Represents a RetrySchemaMigrationRequest. */ + class RetrySchemaMigrationRequest implements IRetrySchemaMigrationRequest { /** - * Constructs a new RefreshStateByShardRequest. + * Constructs a new RetrySchemaMigrationRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IRefreshStateByShardRequest); + constructor(properties?: vtctldata.IRetrySchemaMigrationRequest); - /** RefreshStateByShardRequest keyspace. */ + /** RetrySchemaMigrationRequest keyspace. */ public keyspace: string; - /** RefreshStateByShardRequest shard. */ - public shard: string; - - /** RefreshStateByShardRequest cells. */ - public cells: string[]; + /** RetrySchemaMigrationRequest uuid. */ + public uuid: string; /** - * Creates a new RefreshStateByShardRequest instance using the specified properties. + * Creates a new RetrySchemaMigrationRequest instance using the specified properties. * @param [properties] Properties to set - * @returns RefreshStateByShardRequest instance + * @returns RetrySchemaMigrationRequest instance */ - public static create(properties?: vtctldata.IRefreshStateByShardRequest): vtctldata.RefreshStateByShardRequest; + public static create(properties?: vtctldata.IRetrySchemaMigrationRequest): vtctldata.RetrySchemaMigrationRequest; /** - * Encodes the specified RefreshStateByShardRequest message. Does not implicitly {@link vtctldata.RefreshStateByShardRequest.verify|verify} messages. - * @param message RefreshStateByShardRequest message or plain object to encode + * Encodes the specified RetrySchemaMigrationRequest message. Does not implicitly {@link vtctldata.RetrySchemaMigrationRequest.verify|verify} messages. + * @param message RetrySchemaMigrationRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IRefreshStateByShardRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IRetrySchemaMigrationRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified RefreshStateByShardRequest message, length delimited. Does not implicitly {@link vtctldata.RefreshStateByShardRequest.verify|verify} messages. - * @param message RefreshStateByShardRequest message or plain object to encode + * Encodes the specified RetrySchemaMigrationRequest message, length delimited. Does not implicitly {@link vtctldata.RetrySchemaMigrationRequest.verify|verify} messages. + * @param message RetrySchemaMigrationRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IRefreshStateByShardRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IRetrySchemaMigrationRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a RefreshStateByShardRequest message from the specified reader or buffer. + * Decodes a RetrySchemaMigrationRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns RefreshStateByShardRequest + * @returns RetrySchemaMigrationRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RefreshStateByShardRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RetrySchemaMigrationRequest; /** - * Decodes a RefreshStateByShardRequest message from the specified reader or buffer, length delimited. + * Decodes a RetrySchemaMigrationRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns RefreshStateByShardRequest + * @returns RetrySchemaMigrationRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RefreshStateByShardRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RetrySchemaMigrationRequest; /** - * Verifies a RefreshStateByShardRequest message. + * Verifies a RetrySchemaMigrationRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a RefreshStateByShardRequest message from a plain object. Also converts values to their respective internal types. + * Creates a RetrySchemaMigrationRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns RefreshStateByShardRequest + * @returns RetrySchemaMigrationRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.RefreshStateByShardRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.RetrySchemaMigrationRequest; /** - * Creates a plain object from a RefreshStateByShardRequest message. Also converts values to other types if specified. - * @param message RefreshStateByShardRequest + * Creates a plain object from a RetrySchemaMigrationRequest message. Also converts values to other types if specified. + * @param message RetrySchemaMigrationRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.RefreshStateByShardRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.RetrySchemaMigrationRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this RefreshStateByShardRequest to JSON. + * Converts this RetrySchemaMigrationRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for RefreshStateByShardRequest + * Gets the default type url for RetrySchemaMigrationRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a RefreshStateByShardResponse. */ - interface IRefreshStateByShardResponse { - - /** RefreshStateByShardResponse is_partial_refresh */ - is_partial_refresh?: (boolean|null); + /** Properties of a RetrySchemaMigrationResponse. */ + interface IRetrySchemaMigrationResponse { - /** RefreshStateByShardResponse partial_refresh_details */ - partial_refresh_details?: (string|null); + /** RetrySchemaMigrationResponse rows_affected_by_shard */ + rows_affected_by_shard?: ({ [k: string]: (number|Long) }|null); } - /** Represents a RefreshStateByShardResponse. */ - class RefreshStateByShardResponse implements IRefreshStateByShardResponse { + /** Represents a RetrySchemaMigrationResponse. */ + class RetrySchemaMigrationResponse implements IRetrySchemaMigrationResponse { /** - * Constructs a new RefreshStateByShardResponse. + * Constructs a new RetrySchemaMigrationResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IRefreshStateByShardResponse); - - /** RefreshStateByShardResponse is_partial_refresh. */ - public is_partial_refresh: boolean; + constructor(properties?: vtctldata.IRetrySchemaMigrationResponse); - /** RefreshStateByShardResponse partial_refresh_details. */ - public partial_refresh_details: string; + /** RetrySchemaMigrationResponse rows_affected_by_shard. */ + public rows_affected_by_shard: { [k: string]: (number|Long) }; /** - * Creates a new RefreshStateByShardResponse instance using the specified properties. + * Creates a new RetrySchemaMigrationResponse instance using the specified properties. * @param [properties] Properties to set - * @returns RefreshStateByShardResponse instance + * @returns RetrySchemaMigrationResponse instance */ - public static create(properties?: vtctldata.IRefreshStateByShardResponse): vtctldata.RefreshStateByShardResponse; + public static create(properties?: vtctldata.IRetrySchemaMigrationResponse): vtctldata.RetrySchemaMigrationResponse; /** - * Encodes the specified RefreshStateByShardResponse message. Does not implicitly {@link vtctldata.RefreshStateByShardResponse.verify|verify} messages. - * @param message RefreshStateByShardResponse message or plain object to encode + * Encodes the specified RetrySchemaMigrationResponse message. Does not implicitly {@link vtctldata.RetrySchemaMigrationResponse.verify|verify} messages. + * @param message RetrySchemaMigrationResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IRefreshStateByShardResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IRetrySchemaMigrationResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified RefreshStateByShardResponse message, length delimited. Does not implicitly {@link vtctldata.RefreshStateByShardResponse.verify|verify} messages. - * @param message RefreshStateByShardResponse message or plain object to encode + * Encodes the specified RetrySchemaMigrationResponse message, length delimited. Does not implicitly {@link vtctldata.RetrySchemaMigrationResponse.verify|verify} messages. + * @param message RetrySchemaMigrationResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IRefreshStateByShardResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IRetrySchemaMigrationResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a RefreshStateByShardResponse message from the specified reader or buffer. + * Decodes a RetrySchemaMigrationResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns RefreshStateByShardResponse + * @returns RetrySchemaMigrationResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RefreshStateByShardResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RetrySchemaMigrationResponse; /** - * Decodes a RefreshStateByShardResponse message from the specified reader or buffer, length delimited. + * Decodes a RetrySchemaMigrationResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns RefreshStateByShardResponse + * @returns RetrySchemaMigrationResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RefreshStateByShardResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RetrySchemaMigrationResponse; /** - * Verifies a RefreshStateByShardResponse message. + * Verifies a RetrySchemaMigrationResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a RefreshStateByShardResponse message from a plain object. Also converts values to their respective internal types. + * Creates a RetrySchemaMigrationResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns RefreshStateByShardResponse + * @returns RetrySchemaMigrationResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.RefreshStateByShardResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.RetrySchemaMigrationResponse; /** - * Creates a plain object from a RefreshStateByShardResponse message. Also converts values to other types if specified. - * @param message RefreshStateByShardResponse + * Creates a plain object from a RetrySchemaMigrationResponse message. Also converts values to other types if specified. + * @param message RetrySchemaMigrationResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.RefreshStateByShardResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.RetrySchemaMigrationResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this RefreshStateByShardResponse to JSON. + * Converts this RetrySchemaMigrationResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for RefreshStateByShardResponse + * Gets the default type url for RetrySchemaMigrationResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ReloadSchemaRequest. */ - interface IReloadSchemaRequest { + /** Properties of a RunHealthCheckRequest. */ + interface IRunHealthCheckRequest { - /** ReloadSchemaRequest tablet_alias */ + /** RunHealthCheckRequest tablet_alias */ tablet_alias?: (topodata.ITabletAlias|null); } - /** Represents a ReloadSchemaRequest. */ - class ReloadSchemaRequest implements IReloadSchemaRequest { + /** Represents a RunHealthCheckRequest. */ + class RunHealthCheckRequest implements IRunHealthCheckRequest { /** - * Constructs a new ReloadSchemaRequest. + * Constructs a new RunHealthCheckRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IReloadSchemaRequest); + constructor(properties?: vtctldata.IRunHealthCheckRequest); - /** ReloadSchemaRequest tablet_alias. */ + /** RunHealthCheckRequest tablet_alias. */ public tablet_alias?: (topodata.ITabletAlias|null); /** - * Creates a new ReloadSchemaRequest instance using the specified properties. + * Creates a new RunHealthCheckRequest instance using the specified properties. * @param [properties] Properties to set - * @returns ReloadSchemaRequest instance + * @returns RunHealthCheckRequest instance */ - public static create(properties?: vtctldata.IReloadSchemaRequest): vtctldata.ReloadSchemaRequest; + public static create(properties?: vtctldata.IRunHealthCheckRequest): vtctldata.RunHealthCheckRequest; /** - * Encodes the specified ReloadSchemaRequest message. Does not implicitly {@link vtctldata.ReloadSchemaRequest.verify|verify} messages. - * @param message ReloadSchemaRequest message or plain object to encode + * Encodes the specified RunHealthCheckRequest message. Does not implicitly {@link vtctldata.RunHealthCheckRequest.verify|verify} messages. + * @param message RunHealthCheckRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IReloadSchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IRunHealthCheckRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ReloadSchemaRequest message, length delimited. Does not implicitly {@link vtctldata.ReloadSchemaRequest.verify|verify} messages. - * @param message ReloadSchemaRequest message or plain object to encode + * Encodes the specified RunHealthCheckRequest message, length delimited. Does not implicitly {@link vtctldata.RunHealthCheckRequest.verify|verify} messages. + * @param message RunHealthCheckRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IReloadSchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IRunHealthCheckRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ReloadSchemaRequest message from the specified reader or buffer. + * Decodes a RunHealthCheckRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ReloadSchemaRequest + * @returns RunHealthCheckRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ReloadSchemaRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RunHealthCheckRequest; /** - * Decodes a ReloadSchemaRequest message from the specified reader or buffer, length delimited. + * Decodes a RunHealthCheckRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ReloadSchemaRequest + * @returns RunHealthCheckRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ReloadSchemaRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RunHealthCheckRequest; /** - * Verifies a ReloadSchemaRequest message. + * Verifies a RunHealthCheckRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ReloadSchemaRequest message from a plain object. Also converts values to their respective internal types. + * Creates a RunHealthCheckRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ReloadSchemaRequest + * @returns RunHealthCheckRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.ReloadSchemaRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.RunHealthCheckRequest; /** - * Creates a plain object from a ReloadSchemaRequest message. Also converts values to other types if specified. - * @param message ReloadSchemaRequest + * Creates a plain object from a RunHealthCheckRequest message. Also converts values to other types if specified. + * @param message RunHealthCheckRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ReloadSchemaRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.RunHealthCheckRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ReloadSchemaRequest to JSON. + * Converts this RunHealthCheckRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ReloadSchemaRequest + * Gets the default type url for RunHealthCheckRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ReloadSchemaResponse. */ - interface IReloadSchemaResponse { + /** Properties of a RunHealthCheckResponse. */ + interface IRunHealthCheckResponse { } - /** Represents a ReloadSchemaResponse. */ - class ReloadSchemaResponse implements IReloadSchemaResponse { + /** Represents a RunHealthCheckResponse. */ + class RunHealthCheckResponse implements IRunHealthCheckResponse { /** - * Constructs a new ReloadSchemaResponse. + * Constructs a new RunHealthCheckResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IReloadSchemaResponse); + constructor(properties?: vtctldata.IRunHealthCheckResponse); /** - * Creates a new ReloadSchemaResponse instance using the specified properties. + * Creates a new RunHealthCheckResponse instance using the specified properties. * @param [properties] Properties to set - * @returns ReloadSchemaResponse instance + * @returns RunHealthCheckResponse instance */ - public static create(properties?: vtctldata.IReloadSchemaResponse): vtctldata.ReloadSchemaResponse; + public static create(properties?: vtctldata.IRunHealthCheckResponse): vtctldata.RunHealthCheckResponse; /** - * Encodes the specified ReloadSchemaResponse message. Does not implicitly {@link vtctldata.ReloadSchemaResponse.verify|verify} messages. - * @param message ReloadSchemaResponse message or plain object to encode + * Encodes the specified RunHealthCheckResponse message. Does not implicitly {@link vtctldata.RunHealthCheckResponse.verify|verify} messages. + * @param message RunHealthCheckResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IReloadSchemaResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IRunHealthCheckResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ReloadSchemaResponse message, length delimited. Does not implicitly {@link vtctldata.ReloadSchemaResponse.verify|verify} messages. - * @param message ReloadSchemaResponse message or plain object to encode + * Encodes the specified RunHealthCheckResponse message, length delimited. Does not implicitly {@link vtctldata.RunHealthCheckResponse.verify|verify} messages. + * @param message RunHealthCheckResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IReloadSchemaResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IRunHealthCheckResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ReloadSchemaResponse message from the specified reader or buffer. + * Decodes a RunHealthCheckResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ReloadSchemaResponse + * @returns RunHealthCheckResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ReloadSchemaResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RunHealthCheckResponse; /** - * Decodes a ReloadSchemaResponse message from the specified reader or buffer, length delimited. + * Decodes a RunHealthCheckResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ReloadSchemaResponse + * @returns RunHealthCheckResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ReloadSchemaResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RunHealthCheckResponse; /** - * Verifies a ReloadSchemaResponse message. + * Verifies a RunHealthCheckResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ReloadSchemaResponse message from a plain object. Also converts values to their respective internal types. + * Creates a RunHealthCheckResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ReloadSchemaResponse + * @returns RunHealthCheckResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.ReloadSchemaResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.RunHealthCheckResponse; /** - * Creates a plain object from a ReloadSchemaResponse message. Also converts values to other types if specified. - * @param message ReloadSchemaResponse + * Creates a plain object from a RunHealthCheckResponse message. Also converts values to other types if specified. + * @param message RunHealthCheckResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ReloadSchemaResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.RunHealthCheckResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ReloadSchemaResponse to JSON. + * Converts this RunHealthCheckResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ReloadSchemaResponse + * Gets the default type url for RunHealthCheckResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ReloadSchemaKeyspaceRequest. */ - interface IReloadSchemaKeyspaceRequest { + /** Properties of a SetKeyspaceDurabilityPolicyRequest. */ + interface ISetKeyspaceDurabilityPolicyRequest { - /** ReloadSchemaKeyspaceRequest keyspace */ + /** SetKeyspaceDurabilityPolicyRequest keyspace */ keyspace?: (string|null); - /** ReloadSchemaKeyspaceRequest wait_position */ - wait_position?: (string|null); - - /** ReloadSchemaKeyspaceRequest include_primary */ - include_primary?: (boolean|null); - - /** ReloadSchemaKeyspaceRequest concurrency */ - concurrency?: (number|null); + /** SetKeyspaceDurabilityPolicyRequest durability_policy */ + durability_policy?: (string|null); } - /** Represents a ReloadSchemaKeyspaceRequest. */ - class ReloadSchemaKeyspaceRequest implements IReloadSchemaKeyspaceRequest { + /** Represents a SetKeyspaceDurabilityPolicyRequest. */ + class SetKeyspaceDurabilityPolicyRequest implements ISetKeyspaceDurabilityPolicyRequest { /** - * Constructs a new ReloadSchemaKeyspaceRequest. + * Constructs a new SetKeyspaceDurabilityPolicyRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IReloadSchemaKeyspaceRequest); + constructor(properties?: vtctldata.ISetKeyspaceDurabilityPolicyRequest); - /** ReloadSchemaKeyspaceRequest keyspace. */ + /** SetKeyspaceDurabilityPolicyRequest keyspace. */ public keyspace: string; - /** ReloadSchemaKeyspaceRequest wait_position. */ - public wait_position: string; - - /** ReloadSchemaKeyspaceRequest include_primary. */ - public include_primary: boolean; - - /** ReloadSchemaKeyspaceRequest concurrency. */ - public concurrency: number; + /** SetKeyspaceDurabilityPolicyRequest durability_policy. */ + public durability_policy: string; /** - * Creates a new ReloadSchemaKeyspaceRequest instance using the specified properties. + * Creates a new SetKeyspaceDurabilityPolicyRequest instance using the specified properties. * @param [properties] Properties to set - * @returns ReloadSchemaKeyspaceRequest instance + * @returns SetKeyspaceDurabilityPolicyRequest instance */ - public static create(properties?: vtctldata.IReloadSchemaKeyspaceRequest): vtctldata.ReloadSchemaKeyspaceRequest; + public static create(properties?: vtctldata.ISetKeyspaceDurabilityPolicyRequest): vtctldata.SetKeyspaceDurabilityPolicyRequest; /** - * Encodes the specified ReloadSchemaKeyspaceRequest message. Does not implicitly {@link vtctldata.ReloadSchemaKeyspaceRequest.verify|verify} messages. - * @param message ReloadSchemaKeyspaceRequest message or plain object to encode + * Encodes the specified SetKeyspaceDurabilityPolicyRequest message. Does not implicitly {@link vtctldata.SetKeyspaceDurabilityPolicyRequest.verify|verify} messages. + * @param message SetKeyspaceDurabilityPolicyRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IReloadSchemaKeyspaceRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.ISetKeyspaceDurabilityPolicyRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ReloadSchemaKeyspaceRequest message, length delimited. Does not implicitly {@link vtctldata.ReloadSchemaKeyspaceRequest.verify|verify} messages. - * @param message ReloadSchemaKeyspaceRequest message or plain object to encode + * Encodes the specified SetKeyspaceDurabilityPolicyRequest message, length delimited. Does not implicitly {@link vtctldata.SetKeyspaceDurabilityPolicyRequest.verify|verify} messages. + * @param message SetKeyspaceDurabilityPolicyRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IReloadSchemaKeyspaceRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.ISetKeyspaceDurabilityPolicyRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ReloadSchemaKeyspaceRequest message from the specified reader or buffer. + * Decodes a SetKeyspaceDurabilityPolicyRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ReloadSchemaKeyspaceRequest + * @returns SetKeyspaceDurabilityPolicyRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ReloadSchemaKeyspaceRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SetKeyspaceDurabilityPolicyRequest; /** - * Decodes a ReloadSchemaKeyspaceRequest message from the specified reader or buffer, length delimited. + * Decodes a SetKeyspaceDurabilityPolicyRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ReloadSchemaKeyspaceRequest + * @returns SetKeyspaceDurabilityPolicyRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ReloadSchemaKeyspaceRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SetKeyspaceDurabilityPolicyRequest; /** - * Verifies a ReloadSchemaKeyspaceRequest message. + * Verifies a SetKeyspaceDurabilityPolicyRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ReloadSchemaKeyspaceRequest message from a plain object. Also converts values to their respective internal types. + * Creates a SetKeyspaceDurabilityPolicyRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ReloadSchemaKeyspaceRequest + * @returns SetKeyspaceDurabilityPolicyRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.ReloadSchemaKeyspaceRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.SetKeyspaceDurabilityPolicyRequest; /** - * Creates a plain object from a ReloadSchemaKeyspaceRequest message. Also converts values to other types if specified. - * @param message ReloadSchemaKeyspaceRequest + * Creates a plain object from a SetKeyspaceDurabilityPolicyRequest message. Also converts values to other types if specified. + * @param message SetKeyspaceDurabilityPolicyRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ReloadSchemaKeyspaceRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.SetKeyspaceDurabilityPolicyRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ReloadSchemaKeyspaceRequest to JSON. + * Converts this SetKeyspaceDurabilityPolicyRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ReloadSchemaKeyspaceRequest + * Gets the default type url for SetKeyspaceDurabilityPolicyRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ReloadSchemaKeyspaceResponse. */ - interface IReloadSchemaKeyspaceResponse { + /** Properties of a SetKeyspaceDurabilityPolicyResponse. */ + interface ISetKeyspaceDurabilityPolicyResponse { - /** ReloadSchemaKeyspaceResponse events */ - events?: (logutil.IEvent[]|null); + /** SetKeyspaceDurabilityPolicyResponse keyspace */ + keyspace?: (topodata.IKeyspace|null); } - /** Represents a ReloadSchemaKeyspaceResponse. */ - class ReloadSchemaKeyspaceResponse implements IReloadSchemaKeyspaceResponse { + /** Represents a SetKeyspaceDurabilityPolicyResponse. */ + class SetKeyspaceDurabilityPolicyResponse implements ISetKeyspaceDurabilityPolicyResponse { /** - * Constructs a new ReloadSchemaKeyspaceResponse. + * Constructs a new SetKeyspaceDurabilityPolicyResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IReloadSchemaKeyspaceResponse); + constructor(properties?: vtctldata.ISetKeyspaceDurabilityPolicyResponse); - /** ReloadSchemaKeyspaceResponse events. */ - public events: logutil.IEvent[]; + /** SetKeyspaceDurabilityPolicyResponse keyspace. */ + public keyspace?: (topodata.IKeyspace|null); /** - * Creates a new ReloadSchemaKeyspaceResponse instance using the specified properties. + * Creates a new SetKeyspaceDurabilityPolicyResponse instance using the specified properties. * @param [properties] Properties to set - * @returns ReloadSchemaKeyspaceResponse instance + * @returns SetKeyspaceDurabilityPolicyResponse instance */ - public static create(properties?: vtctldata.IReloadSchemaKeyspaceResponse): vtctldata.ReloadSchemaKeyspaceResponse; + public static create(properties?: vtctldata.ISetKeyspaceDurabilityPolicyResponse): vtctldata.SetKeyspaceDurabilityPolicyResponse; /** - * Encodes the specified ReloadSchemaKeyspaceResponse message. Does not implicitly {@link vtctldata.ReloadSchemaKeyspaceResponse.verify|verify} messages. - * @param message ReloadSchemaKeyspaceResponse message or plain object to encode + * Encodes the specified SetKeyspaceDurabilityPolicyResponse message. Does not implicitly {@link vtctldata.SetKeyspaceDurabilityPolicyResponse.verify|verify} messages. + * @param message SetKeyspaceDurabilityPolicyResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IReloadSchemaKeyspaceResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.ISetKeyspaceDurabilityPolicyResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ReloadSchemaKeyspaceResponse message, length delimited. Does not implicitly {@link vtctldata.ReloadSchemaKeyspaceResponse.verify|verify} messages. - * @param message ReloadSchemaKeyspaceResponse message or plain object to encode + * Encodes the specified SetKeyspaceDurabilityPolicyResponse message, length delimited. Does not implicitly {@link vtctldata.SetKeyspaceDurabilityPolicyResponse.verify|verify} messages. + * @param message SetKeyspaceDurabilityPolicyResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IReloadSchemaKeyspaceResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.ISetKeyspaceDurabilityPolicyResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ReloadSchemaKeyspaceResponse message from the specified reader or buffer. + * Decodes a SetKeyspaceDurabilityPolicyResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ReloadSchemaKeyspaceResponse + * @returns SetKeyspaceDurabilityPolicyResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ReloadSchemaKeyspaceResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SetKeyspaceDurabilityPolicyResponse; /** - * Decodes a ReloadSchemaKeyspaceResponse message from the specified reader or buffer, length delimited. + * Decodes a SetKeyspaceDurabilityPolicyResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ReloadSchemaKeyspaceResponse + * @returns SetKeyspaceDurabilityPolicyResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ReloadSchemaKeyspaceResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SetKeyspaceDurabilityPolicyResponse; /** - * Verifies a ReloadSchemaKeyspaceResponse message. + * Verifies a SetKeyspaceDurabilityPolicyResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ReloadSchemaKeyspaceResponse message from a plain object. Also converts values to their respective internal types. + * Creates a SetKeyspaceDurabilityPolicyResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ReloadSchemaKeyspaceResponse + * @returns SetKeyspaceDurabilityPolicyResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.ReloadSchemaKeyspaceResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.SetKeyspaceDurabilityPolicyResponse; /** - * Creates a plain object from a ReloadSchemaKeyspaceResponse message. Also converts values to other types if specified. - * @param message ReloadSchemaKeyspaceResponse + * Creates a plain object from a SetKeyspaceDurabilityPolicyResponse message. Also converts values to other types if specified. + * @param message SetKeyspaceDurabilityPolicyResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ReloadSchemaKeyspaceResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.SetKeyspaceDurabilityPolicyResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ReloadSchemaKeyspaceResponse to JSON. + * Converts this SetKeyspaceDurabilityPolicyResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ReloadSchemaKeyspaceResponse + * Gets the default type url for SetKeyspaceDurabilityPolicyResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ReloadSchemaShardRequest. */ - interface IReloadSchemaShardRequest { + /** Properties of a SetKeyspaceServedFromRequest. */ + interface ISetKeyspaceServedFromRequest { - /** ReloadSchemaShardRequest keyspace */ + /** SetKeyspaceServedFromRequest keyspace */ keyspace?: (string|null); - /** ReloadSchemaShardRequest shard */ - shard?: (string|null); + /** SetKeyspaceServedFromRequest tablet_type */ + tablet_type?: (topodata.TabletType|null); - /** ReloadSchemaShardRequest wait_position */ - wait_position?: (string|null); + /** SetKeyspaceServedFromRequest cells */ + cells?: (string[]|null); - /** ReloadSchemaShardRequest include_primary */ - include_primary?: (boolean|null); + /** SetKeyspaceServedFromRequest remove */ + remove?: (boolean|null); - /** ReloadSchemaShardRequest concurrency */ - concurrency?: (number|null); + /** SetKeyspaceServedFromRequest source_keyspace */ + source_keyspace?: (string|null); } - /** Represents a ReloadSchemaShardRequest. */ - class ReloadSchemaShardRequest implements IReloadSchemaShardRequest { + /** Represents a SetKeyspaceServedFromRequest. */ + class SetKeyspaceServedFromRequest implements ISetKeyspaceServedFromRequest { /** - * Constructs a new ReloadSchemaShardRequest. + * Constructs a new SetKeyspaceServedFromRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IReloadSchemaShardRequest); + constructor(properties?: vtctldata.ISetKeyspaceServedFromRequest); - /** ReloadSchemaShardRequest keyspace. */ + /** SetKeyspaceServedFromRequest keyspace. */ public keyspace: string; - /** ReloadSchemaShardRequest shard. */ - public shard: string; + /** SetKeyspaceServedFromRequest tablet_type. */ + public tablet_type: topodata.TabletType; - /** ReloadSchemaShardRequest wait_position. */ - public wait_position: string; + /** SetKeyspaceServedFromRequest cells. */ + public cells: string[]; - /** ReloadSchemaShardRequest include_primary. */ - public include_primary: boolean; + /** SetKeyspaceServedFromRequest remove. */ + public remove: boolean; - /** ReloadSchemaShardRequest concurrency. */ - public concurrency: number; + /** SetKeyspaceServedFromRequest source_keyspace. */ + public source_keyspace: string; /** - * Creates a new ReloadSchemaShardRequest instance using the specified properties. + * Creates a new SetKeyspaceServedFromRequest instance using the specified properties. * @param [properties] Properties to set - * @returns ReloadSchemaShardRequest instance + * @returns SetKeyspaceServedFromRequest instance */ - public static create(properties?: vtctldata.IReloadSchemaShardRequest): vtctldata.ReloadSchemaShardRequest; + public static create(properties?: vtctldata.ISetKeyspaceServedFromRequest): vtctldata.SetKeyspaceServedFromRequest; /** - * Encodes the specified ReloadSchemaShardRequest message. Does not implicitly {@link vtctldata.ReloadSchemaShardRequest.verify|verify} messages. - * @param message ReloadSchemaShardRequest message or plain object to encode + * Encodes the specified SetKeyspaceServedFromRequest message. Does not implicitly {@link vtctldata.SetKeyspaceServedFromRequest.verify|verify} messages. + * @param message SetKeyspaceServedFromRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IReloadSchemaShardRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.ISetKeyspaceServedFromRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ReloadSchemaShardRequest message, length delimited. Does not implicitly {@link vtctldata.ReloadSchemaShardRequest.verify|verify} messages. - * @param message ReloadSchemaShardRequest message or plain object to encode + * Encodes the specified SetKeyspaceServedFromRequest message, length delimited. Does not implicitly {@link vtctldata.SetKeyspaceServedFromRequest.verify|verify} messages. + * @param message SetKeyspaceServedFromRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IReloadSchemaShardRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.ISetKeyspaceServedFromRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ReloadSchemaShardRequest message from the specified reader or buffer. + * Decodes a SetKeyspaceServedFromRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ReloadSchemaShardRequest + * @returns SetKeyspaceServedFromRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ReloadSchemaShardRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SetKeyspaceServedFromRequest; /** - * Decodes a ReloadSchemaShardRequest message from the specified reader or buffer, length delimited. + * Decodes a SetKeyspaceServedFromRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ReloadSchemaShardRequest + * @returns SetKeyspaceServedFromRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ReloadSchemaShardRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SetKeyspaceServedFromRequest; /** - * Verifies a ReloadSchemaShardRequest message. + * Verifies a SetKeyspaceServedFromRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ReloadSchemaShardRequest message from a plain object. Also converts values to their respective internal types. + * Creates a SetKeyspaceServedFromRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ReloadSchemaShardRequest + * @returns SetKeyspaceServedFromRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.ReloadSchemaShardRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.SetKeyspaceServedFromRequest; /** - * Creates a plain object from a ReloadSchemaShardRequest message. Also converts values to other types if specified. - * @param message ReloadSchemaShardRequest + * Creates a plain object from a SetKeyspaceServedFromRequest message. Also converts values to other types if specified. + * @param message SetKeyspaceServedFromRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ReloadSchemaShardRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.SetKeyspaceServedFromRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ReloadSchemaShardRequest to JSON. + * Converts this SetKeyspaceServedFromRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ReloadSchemaShardRequest + * Gets the default type url for SetKeyspaceServedFromRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ReloadSchemaShardResponse. */ - interface IReloadSchemaShardResponse { + /** Properties of a SetKeyspaceServedFromResponse. */ + interface ISetKeyspaceServedFromResponse { - /** ReloadSchemaShardResponse events */ - events?: (logutil.IEvent[]|null); + /** SetKeyspaceServedFromResponse keyspace */ + keyspace?: (topodata.IKeyspace|null); } - /** Represents a ReloadSchemaShardResponse. */ - class ReloadSchemaShardResponse implements IReloadSchemaShardResponse { + /** Represents a SetKeyspaceServedFromResponse. */ + class SetKeyspaceServedFromResponse implements ISetKeyspaceServedFromResponse { /** - * Constructs a new ReloadSchemaShardResponse. + * Constructs a new SetKeyspaceServedFromResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IReloadSchemaShardResponse); + constructor(properties?: vtctldata.ISetKeyspaceServedFromResponse); - /** ReloadSchemaShardResponse events. */ - public events: logutil.IEvent[]; + /** SetKeyspaceServedFromResponse keyspace. */ + public keyspace?: (topodata.IKeyspace|null); /** - * Creates a new ReloadSchemaShardResponse instance using the specified properties. + * Creates a new SetKeyspaceServedFromResponse instance using the specified properties. * @param [properties] Properties to set - * @returns ReloadSchemaShardResponse instance + * @returns SetKeyspaceServedFromResponse instance */ - public static create(properties?: vtctldata.IReloadSchemaShardResponse): vtctldata.ReloadSchemaShardResponse; + public static create(properties?: vtctldata.ISetKeyspaceServedFromResponse): vtctldata.SetKeyspaceServedFromResponse; /** - * Encodes the specified ReloadSchemaShardResponse message. Does not implicitly {@link vtctldata.ReloadSchemaShardResponse.verify|verify} messages. - * @param message ReloadSchemaShardResponse message or plain object to encode + * Encodes the specified SetKeyspaceServedFromResponse message. Does not implicitly {@link vtctldata.SetKeyspaceServedFromResponse.verify|verify} messages. + * @param message SetKeyspaceServedFromResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IReloadSchemaShardResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.ISetKeyspaceServedFromResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ReloadSchemaShardResponse message, length delimited. Does not implicitly {@link vtctldata.ReloadSchemaShardResponse.verify|verify} messages. - * @param message ReloadSchemaShardResponse message or plain object to encode + * Encodes the specified SetKeyspaceServedFromResponse message, length delimited. Does not implicitly {@link vtctldata.SetKeyspaceServedFromResponse.verify|verify} messages. + * @param message SetKeyspaceServedFromResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IReloadSchemaShardResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.ISetKeyspaceServedFromResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ReloadSchemaShardResponse message from the specified reader or buffer. + * Decodes a SetKeyspaceServedFromResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ReloadSchemaShardResponse + * @returns SetKeyspaceServedFromResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ReloadSchemaShardResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SetKeyspaceServedFromResponse; /** - * Decodes a ReloadSchemaShardResponse message from the specified reader or buffer, length delimited. + * Decodes a SetKeyspaceServedFromResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ReloadSchemaShardResponse + * @returns SetKeyspaceServedFromResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ReloadSchemaShardResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SetKeyspaceServedFromResponse; /** - * Verifies a ReloadSchemaShardResponse message. + * Verifies a SetKeyspaceServedFromResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ReloadSchemaShardResponse message from a plain object. Also converts values to their respective internal types. + * Creates a SetKeyspaceServedFromResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ReloadSchemaShardResponse + * @returns SetKeyspaceServedFromResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.ReloadSchemaShardResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.SetKeyspaceServedFromResponse; /** - * Creates a plain object from a ReloadSchemaShardResponse message. Also converts values to other types if specified. - * @param message ReloadSchemaShardResponse + * Creates a plain object from a SetKeyspaceServedFromResponse message. Also converts values to other types if specified. + * @param message SetKeyspaceServedFromResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ReloadSchemaShardResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.SetKeyspaceServedFromResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ReloadSchemaShardResponse to JSON. + * Converts this SetKeyspaceServedFromResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ReloadSchemaShardResponse + * Gets the default type url for SetKeyspaceServedFromResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a RemoveBackupRequest. */ - interface IRemoveBackupRequest { + /** Properties of a SetKeyspaceShardingInfoRequest. */ + interface ISetKeyspaceShardingInfoRequest { - /** RemoveBackupRequest keyspace */ + /** SetKeyspaceShardingInfoRequest keyspace */ keyspace?: (string|null); - /** RemoveBackupRequest shard */ - shard?: (string|null); - - /** RemoveBackupRequest name */ - name?: (string|null); + /** SetKeyspaceShardingInfoRequest force */ + force?: (boolean|null); } - /** Represents a RemoveBackupRequest. */ - class RemoveBackupRequest implements IRemoveBackupRequest { + /** Represents a SetKeyspaceShardingInfoRequest. */ + class SetKeyspaceShardingInfoRequest implements ISetKeyspaceShardingInfoRequest { /** - * Constructs a new RemoveBackupRequest. + * Constructs a new SetKeyspaceShardingInfoRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IRemoveBackupRequest); + constructor(properties?: vtctldata.ISetKeyspaceShardingInfoRequest); - /** RemoveBackupRequest keyspace. */ + /** SetKeyspaceShardingInfoRequest keyspace. */ public keyspace: string; - /** RemoveBackupRequest shard. */ - public shard: string; - - /** RemoveBackupRequest name. */ - public name: string; + /** SetKeyspaceShardingInfoRequest force. */ + public force: boolean; /** - * Creates a new RemoveBackupRequest instance using the specified properties. + * Creates a new SetKeyspaceShardingInfoRequest instance using the specified properties. * @param [properties] Properties to set - * @returns RemoveBackupRequest instance + * @returns SetKeyspaceShardingInfoRequest instance */ - public static create(properties?: vtctldata.IRemoveBackupRequest): vtctldata.RemoveBackupRequest; + public static create(properties?: vtctldata.ISetKeyspaceShardingInfoRequest): vtctldata.SetKeyspaceShardingInfoRequest; /** - * Encodes the specified RemoveBackupRequest message. Does not implicitly {@link vtctldata.RemoveBackupRequest.verify|verify} messages. - * @param message RemoveBackupRequest message or plain object to encode + * Encodes the specified SetKeyspaceShardingInfoRequest message. Does not implicitly {@link vtctldata.SetKeyspaceShardingInfoRequest.verify|verify} messages. + * @param message SetKeyspaceShardingInfoRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IRemoveBackupRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.ISetKeyspaceShardingInfoRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified RemoveBackupRequest message, length delimited. Does not implicitly {@link vtctldata.RemoveBackupRequest.verify|verify} messages. - * @param message RemoveBackupRequest message or plain object to encode + * Encodes the specified SetKeyspaceShardingInfoRequest message, length delimited. Does not implicitly {@link vtctldata.SetKeyspaceShardingInfoRequest.verify|verify} messages. + * @param message SetKeyspaceShardingInfoRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IRemoveBackupRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.ISetKeyspaceShardingInfoRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a RemoveBackupRequest message from the specified reader or buffer. + * Decodes a SetKeyspaceShardingInfoRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns RemoveBackupRequest + * @returns SetKeyspaceShardingInfoRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RemoveBackupRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SetKeyspaceShardingInfoRequest; /** - * Decodes a RemoveBackupRequest message from the specified reader or buffer, length delimited. + * Decodes a SetKeyspaceShardingInfoRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns RemoveBackupRequest + * @returns SetKeyspaceShardingInfoRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RemoveBackupRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SetKeyspaceShardingInfoRequest; /** - * Verifies a RemoveBackupRequest message. + * Verifies a SetKeyspaceShardingInfoRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a RemoveBackupRequest message from a plain object. Also converts values to their respective internal types. + * Creates a SetKeyspaceShardingInfoRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns RemoveBackupRequest + * @returns SetKeyspaceShardingInfoRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.RemoveBackupRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.SetKeyspaceShardingInfoRequest; /** - * Creates a plain object from a RemoveBackupRequest message. Also converts values to other types if specified. - * @param message RemoveBackupRequest + * Creates a plain object from a SetKeyspaceShardingInfoRequest message. Also converts values to other types if specified. + * @param message SetKeyspaceShardingInfoRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.RemoveBackupRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.SetKeyspaceShardingInfoRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this RemoveBackupRequest to JSON. + * Converts this SetKeyspaceShardingInfoRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for RemoveBackupRequest + * Gets the default type url for SetKeyspaceShardingInfoRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a RemoveBackupResponse. */ - interface IRemoveBackupResponse { + /** Properties of a SetKeyspaceShardingInfoResponse. */ + interface ISetKeyspaceShardingInfoResponse { + + /** SetKeyspaceShardingInfoResponse keyspace */ + keyspace?: (topodata.IKeyspace|null); } - /** Represents a RemoveBackupResponse. */ - class RemoveBackupResponse implements IRemoveBackupResponse { + /** Represents a SetKeyspaceShardingInfoResponse. */ + class SetKeyspaceShardingInfoResponse implements ISetKeyspaceShardingInfoResponse { /** - * Constructs a new RemoveBackupResponse. + * Constructs a new SetKeyspaceShardingInfoResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IRemoveBackupResponse); + constructor(properties?: vtctldata.ISetKeyspaceShardingInfoResponse); + + /** SetKeyspaceShardingInfoResponse keyspace. */ + public keyspace?: (topodata.IKeyspace|null); /** - * Creates a new RemoveBackupResponse instance using the specified properties. + * Creates a new SetKeyspaceShardingInfoResponse instance using the specified properties. * @param [properties] Properties to set - * @returns RemoveBackupResponse instance + * @returns SetKeyspaceShardingInfoResponse instance */ - public static create(properties?: vtctldata.IRemoveBackupResponse): vtctldata.RemoveBackupResponse; + public static create(properties?: vtctldata.ISetKeyspaceShardingInfoResponse): vtctldata.SetKeyspaceShardingInfoResponse; /** - * Encodes the specified RemoveBackupResponse message. Does not implicitly {@link vtctldata.RemoveBackupResponse.verify|verify} messages. - * @param message RemoveBackupResponse message or plain object to encode + * Encodes the specified SetKeyspaceShardingInfoResponse message. Does not implicitly {@link vtctldata.SetKeyspaceShardingInfoResponse.verify|verify} messages. + * @param message SetKeyspaceShardingInfoResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IRemoveBackupResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.ISetKeyspaceShardingInfoResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified RemoveBackupResponse message, length delimited. Does not implicitly {@link vtctldata.RemoveBackupResponse.verify|verify} messages. - * @param message RemoveBackupResponse message or plain object to encode + * Encodes the specified SetKeyspaceShardingInfoResponse message, length delimited. Does not implicitly {@link vtctldata.SetKeyspaceShardingInfoResponse.verify|verify} messages. + * @param message SetKeyspaceShardingInfoResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IRemoveBackupResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.ISetKeyspaceShardingInfoResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a RemoveBackupResponse message from the specified reader or buffer. + * Decodes a SetKeyspaceShardingInfoResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns RemoveBackupResponse + * @returns SetKeyspaceShardingInfoResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RemoveBackupResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SetKeyspaceShardingInfoResponse; /** - * Decodes a RemoveBackupResponse message from the specified reader or buffer, length delimited. + * Decodes a SetKeyspaceShardingInfoResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns RemoveBackupResponse + * @returns SetKeyspaceShardingInfoResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RemoveBackupResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SetKeyspaceShardingInfoResponse; /** - * Verifies a RemoveBackupResponse message. + * Verifies a SetKeyspaceShardingInfoResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a RemoveBackupResponse message from a plain object. Also converts values to their respective internal types. + * Creates a SetKeyspaceShardingInfoResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns RemoveBackupResponse + * @returns SetKeyspaceShardingInfoResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.RemoveBackupResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.SetKeyspaceShardingInfoResponse; /** - * Creates a plain object from a RemoveBackupResponse message. Also converts values to other types if specified. - * @param message RemoveBackupResponse + * Creates a plain object from a SetKeyspaceShardingInfoResponse message. Also converts values to other types if specified. + * @param message SetKeyspaceShardingInfoResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.RemoveBackupResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.SetKeyspaceShardingInfoResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this RemoveBackupResponse to JSON. + * Converts this SetKeyspaceShardingInfoResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for RemoveBackupResponse + * Gets the default type url for SetKeyspaceShardingInfoResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a RemoveKeyspaceCellRequest. */ - interface IRemoveKeyspaceCellRequest { + /** Properties of a SetShardIsPrimaryServingRequest. */ + interface ISetShardIsPrimaryServingRequest { - /** RemoveKeyspaceCellRequest keyspace */ + /** SetShardIsPrimaryServingRequest keyspace */ keyspace?: (string|null); - /** RemoveKeyspaceCellRequest cell */ - cell?: (string|null); - - /** RemoveKeyspaceCellRequest force */ - force?: (boolean|null); + /** SetShardIsPrimaryServingRequest shard */ + shard?: (string|null); - /** RemoveKeyspaceCellRequest recursive */ - recursive?: (boolean|null); + /** SetShardIsPrimaryServingRequest is_serving */ + is_serving?: (boolean|null); } - /** Represents a RemoveKeyspaceCellRequest. */ - class RemoveKeyspaceCellRequest implements IRemoveKeyspaceCellRequest { + /** Represents a SetShardIsPrimaryServingRequest. */ + class SetShardIsPrimaryServingRequest implements ISetShardIsPrimaryServingRequest { /** - * Constructs a new RemoveKeyspaceCellRequest. + * Constructs a new SetShardIsPrimaryServingRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IRemoveKeyspaceCellRequest); + constructor(properties?: vtctldata.ISetShardIsPrimaryServingRequest); - /** RemoveKeyspaceCellRequest keyspace. */ + /** SetShardIsPrimaryServingRequest keyspace. */ public keyspace: string; - /** RemoveKeyspaceCellRequest cell. */ - public cell: string; - - /** RemoveKeyspaceCellRequest force. */ - public force: boolean; + /** SetShardIsPrimaryServingRequest shard. */ + public shard: string; - /** RemoveKeyspaceCellRequest recursive. */ - public recursive: boolean; + /** SetShardIsPrimaryServingRequest is_serving. */ + public is_serving: boolean; /** - * Creates a new RemoveKeyspaceCellRequest instance using the specified properties. + * Creates a new SetShardIsPrimaryServingRequest instance using the specified properties. * @param [properties] Properties to set - * @returns RemoveKeyspaceCellRequest instance + * @returns SetShardIsPrimaryServingRequest instance */ - public static create(properties?: vtctldata.IRemoveKeyspaceCellRequest): vtctldata.RemoveKeyspaceCellRequest; + public static create(properties?: vtctldata.ISetShardIsPrimaryServingRequest): vtctldata.SetShardIsPrimaryServingRequest; /** - * Encodes the specified RemoveKeyspaceCellRequest message. Does not implicitly {@link vtctldata.RemoveKeyspaceCellRequest.verify|verify} messages. - * @param message RemoveKeyspaceCellRequest message or plain object to encode + * Encodes the specified SetShardIsPrimaryServingRequest message. Does not implicitly {@link vtctldata.SetShardIsPrimaryServingRequest.verify|verify} messages. + * @param message SetShardIsPrimaryServingRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IRemoveKeyspaceCellRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.ISetShardIsPrimaryServingRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified RemoveKeyspaceCellRequest message, length delimited. Does not implicitly {@link vtctldata.RemoveKeyspaceCellRequest.verify|verify} messages. - * @param message RemoveKeyspaceCellRequest message or plain object to encode + * Encodes the specified SetShardIsPrimaryServingRequest message, length delimited. Does not implicitly {@link vtctldata.SetShardIsPrimaryServingRequest.verify|verify} messages. + * @param message SetShardIsPrimaryServingRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IRemoveKeyspaceCellRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.ISetShardIsPrimaryServingRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a RemoveKeyspaceCellRequest message from the specified reader or buffer. + * Decodes a SetShardIsPrimaryServingRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns RemoveKeyspaceCellRequest + * @returns SetShardIsPrimaryServingRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RemoveKeyspaceCellRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SetShardIsPrimaryServingRequest; /** - * Decodes a RemoveKeyspaceCellRequest message from the specified reader or buffer, length delimited. + * Decodes a SetShardIsPrimaryServingRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns RemoveKeyspaceCellRequest + * @returns SetShardIsPrimaryServingRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RemoveKeyspaceCellRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SetShardIsPrimaryServingRequest; /** - * Verifies a RemoveKeyspaceCellRequest message. + * Verifies a SetShardIsPrimaryServingRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a RemoveKeyspaceCellRequest message from a plain object. Also converts values to their respective internal types. + * Creates a SetShardIsPrimaryServingRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns RemoveKeyspaceCellRequest + * @returns SetShardIsPrimaryServingRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.RemoveKeyspaceCellRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.SetShardIsPrimaryServingRequest; /** - * Creates a plain object from a RemoveKeyspaceCellRequest message. Also converts values to other types if specified. - * @param message RemoveKeyspaceCellRequest + * Creates a plain object from a SetShardIsPrimaryServingRequest message. Also converts values to other types if specified. + * @param message SetShardIsPrimaryServingRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.RemoveKeyspaceCellRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.SetShardIsPrimaryServingRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this RemoveKeyspaceCellRequest to JSON. + * Converts this SetShardIsPrimaryServingRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for RemoveKeyspaceCellRequest + * Gets the default type url for SetShardIsPrimaryServingRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a RemoveKeyspaceCellResponse. */ - interface IRemoveKeyspaceCellResponse { + /** Properties of a SetShardIsPrimaryServingResponse. */ + interface ISetShardIsPrimaryServingResponse { + + /** SetShardIsPrimaryServingResponse shard */ + shard?: (topodata.IShard|null); } - /** Represents a RemoveKeyspaceCellResponse. */ - class RemoveKeyspaceCellResponse implements IRemoveKeyspaceCellResponse { + /** Represents a SetShardIsPrimaryServingResponse. */ + class SetShardIsPrimaryServingResponse implements ISetShardIsPrimaryServingResponse { /** - * Constructs a new RemoveKeyspaceCellResponse. + * Constructs a new SetShardIsPrimaryServingResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IRemoveKeyspaceCellResponse); + constructor(properties?: vtctldata.ISetShardIsPrimaryServingResponse); + + /** SetShardIsPrimaryServingResponse shard. */ + public shard?: (topodata.IShard|null); /** - * Creates a new RemoveKeyspaceCellResponse instance using the specified properties. + * Creates a new SetShardIsPrimaryServingResponse instance using the specified properties. * @param [properties] Properties to set - * @returns RemoveKeyspaceCellResponse instance + * @returns SetShardIsPrimaryServingResponse instance */ - public static create(properties?: vtctldata.IRemoveKeyspaceCellResponse): vtctldata.RemoveKeyspaceCellResponse; + public static create(properties?: vtctldata.ISetShardIsPrimaryServingResponse): vtctldata.SetShardIsPrimaryServingResponse; /** - * Encodes the specified RemoveKeyspaceCellResponse message. Does not implicitly {@link vtctldata.RemoveKeyspaceCellResponse.verify|verify} messages. - * @param message RemoveKeyspaceCellResponse message or plain object to encode + * Encodes the specified SetShardIsPrimaryServingResponse message. Does not implicitly {@link vtctldata.SetShardIsPrimaryServingResponse.verify|verify} messages. + * @param message SetShardIsPrimaryServingResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IRemoveKeyspaceCellResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.ISetShardIsPrimaryServingResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified RemoveKeyspaceCellResponse message, length delimited. Does not implicitly {@link vtctldata.RemoveKeyspaceCellResponse.verify|verify} messages. - * @param message RemoveKeyspaceCellResponse message or plain object to encode + * Encodes the specified SetShardIsPrimaryServingResponse message, length delimited. Does not implicitly {@link vtctldata.SetShardIsPrimaryServingResponse.verify|verify} messages. + * @param message SetShardIsPrimaryServingResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IRemoveKeyspaceCellResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.ISetShardIsPrimaryServingResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a RemoveKeyspaceCellResponse message from the specified reader or buffer. + * Decodes a SetShardIsPrimaryServingResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns RemoveKeyspaceCellResponse + * @returns SetShardIsPrimaryServingResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RemoveKeyspaceCellResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SetShardIsPrimaryServingResponse; /** - * Decodes a RemoveKeyspaceCellResponse message from the specified reader or buffer, length delimited. + * Decodes a SetShardIsPrimaryServingResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns RemoveKeyspaceCellResponse + * @returns SetShardIsPrimaryServingResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RemoveKeyspaceCellResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SetShardIsPrimaryServingResponse; /** - * Verifies a RemoveKeyspaceCellResponse message. + * Verifies a SetShardIsPrimaryServingResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a RemoveKeyspaceCellResponse message from a plain object. Also converts values to their respective internal types. + * Creates a SetShardIsPrimaryServingResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns RemoveKeyspaceCellResponse + * @returns SetShardIsPrimaryServingResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.RemoveKeyspaceCellResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.SetShardIsPrimaryServingResponse; /** - * Creates a plain object from a RemoveKeyspaceCellResponse message. Also converts values to other types if specified. - * @param message RemoveKeyspaceCellResponse + * Creates a plain object from a SetShardIsPrimaryServingResponse message. Also converts values to other types if specified. + * @param message SetShardIsPrimaryServingResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.RemoveKeyspaceCellResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.SetShardIsPrimaryServingResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this RemoveKeyspaceCellResponse to JSON. + * Converts this SetShardIsPrimaryServingResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for RemoveKeyspaceCellResponse + * Gets the default type url for SetShardIsPrimaryServingResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a RemoveShardCellRequest. */ - interface IRemoveShardCellRequest { + /** Properties of a SetShardTabletControlRequest. */ + interface ISetShardTabletControlRequest { - /** RemoveShardCellRequest keyspace */ + /** SetShardTabletControlRequest keyspace */ keyspace?: (string|null); - /** RemoveShardCellRequest shard_name */ - shard_name?: (string|null); + /** SetShardTabletControlRequest shard */ + shard?: (string|null); - /** RemoveShardCellRequest cell */ - cell?: (string|null); + /** SetShardTabletControlRequest tablet_type */ + tablet_type?: (topodata.TabletType|null); - /** RemoveShardCellRequest force */ - force?: (boolean|null); + /** SetShardTabletControlRequest cells */ + cells?: (string[]|null); - /** RemoveShardCellRequest recursive */ - recursive?: (boolean|null); + /** SetShardTabletControlRequest denied_tables */ + denied_tables?: (string[]|null); + + /** SetShardTabletControlRequest disable_query_service */ + disable_query_service?: (boolean|null); + + /** SetShardTabletControlRequest remove */ + remove?: (boolean|null); } - /** Represents a RemoveShardCellRequest. */ - class RemoveShardCellRequest implements IRemoveShardCellRequest { + /** Represents a SetShardTabletControlRequest. */ + class SetShardTabletControlRequest implements ISetShardTabletControlRequest { /** - * Constructs a new RemoveShardCellRequest. + * Constructs a new SetShardTabletControlRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IRemoveShardCellRequest); + constructor(properties?: vtctldata.ISetShardTabletControlRequest); - /** RemoveShardCellRequest keyspace. */ + /** SetShardTabletControlRequest keyspace. */ public keyspace: string; - /** RemoveShardCellRequest shard_name. */ - public shard_name: string; + /** SetShardTabletControlRequest shard. */ + public shard: string; - /** RemoveShardCellRequest cell. */ - public cell: string; + /** SetShardTabletControlRequest tablet_type. */ + public tablet_type: topodata.TabletType; - /** RemoveShardCellRequest force. */ - public force: boolean; + /** SetShardTabletControlRequest cells. */ + public cells: string[]; - /** RemoveShardCellRequest recursive. */ - public recursive: boolean; + /** SetShardTabletControlRequest denied_tables. */ + public denied_tables: string[]; + + /** SetShardTabletControlRequest disable_query_service. */ + public disable_query_service: boolean; + + /** SetShardTabletControlRequest remove. */ + public remove: boolean; /** - * Creates a new RemoveShardCellRequest instance using the specified properties. + * Creates a new SetShardTabletControlRequest instance using the specified properties. * @param [properties] Properties to set - * @returns RemoveShardCellRequest instance + * @returns SetShardTabletControlRequest instance */ - public static create(properties?: vtctldata.IRemoveShardCellRequest): vtctldata.RemoveShardCellRequest; + public static create(properties?: vtctldata.ISetShardTabletControlRequest): vtctldata.SetShardTabletControlRequest; /** - * Encodes the specified RemoveShardCellRequest message. Does not implicitly {@link vtctldata.RemoveShardCellRequest.verify|verify} messages. - * @param message RemoveShardCellRequest message or plain object to encode + * Encodes the specified SetShardTabletControlRequest message. Does not implicitly {@link vtctldata.SetShardTabletControlRequest.verify|verify} messages. + * @param message SetShardTabletControlRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IRemoveShardCellRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.ISetShardTabletControlRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified RemoveShardCellRequest message, length delimited. Does not implicitly {@link vtctldata.RemoveShardCellRequest.verify|verify} messages. - * @param message RemoveShardCellRequest message or plain object to encode + * Encodes the specified SetShardTabletControlRequest message, length delimited. Does not implicitly {@link vtctldata.SetShardTabletControlRequest.verify|verify} messages. + * @param message SetShardTabletControlRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IRemoveShardCellRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.ISetShardTabletControlRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a RemoveShardCellRequest message from the specified reader or buffer. + * Decodes a SetShardTabletControlRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns RemoveShardCellRequest + * @returns SetShardTabletControlRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RemoveShardCellRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SetShardTabletControlRequest; /** - * Decodes a RemoveShardCellRequest message from the specified reader or buffer, length delimited. + * Decodes a SetShardTabletControlRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns RemoveShardCellRequest + * @returns SetShardTabletControlRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RemoveShardCellRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SetShardTabletControlRequest; /** - * Verifies a RemoveShardCellRequest message. + * Verifies a SetShardTabletControlRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a RemoveShardCellRequest message from a plain object. Also converts values to their respective internal types. + * Creates a SetShardTabletControlRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns RemoveShardCellRequest + * @returns SetShardTabletControlRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.RemoveShardCellRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.SetShardTabletControlRequest; /** - * Creates a plain object from a RemoveShardCellRequest message. Also converts values to other types if specified. - * @param message RemoveShardCellRequest + * Creates a plain object from a SetShardTabletControlRequest message. Also converts values to other types if specified. + * @param message SetShardTabletControlRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.RemoveShardCellRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.SetShardTabletControlRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this RemoveShardCellRequest to JSON. + * Converts this SetShardTabletControlRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for RemoveShardCellRequest + * Gets the default type url for SetShardTabletControlRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a RemoveShardCellResponse. */ - interface IRemoveShardCellResponse { + /** Properties of a SetShardTabletControlResponse. */ + interface ISetShardTabletControlResponse { + + /** SetShardTabletControlResponse shard */ + shard?: (topodata.IShard|null); } - /** Represents a RemoveShardCellResponse. */ - class RemoveShardCellResponse implements IRemoveShardCellResponse { + /** Represents a SetShardTabletControlResponse. */ + class SetShardTabletControlResponse implements ISetShardTabletControlResponse { /** - * Constructs a new RemoveShardCellResponse. + * Constructs a new SetShardTabletControlResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IRemoveShardCellResponse); + constructor(properties?: vtctldata.ISetShardTabletControlResponse); + + /** SetShardTabletControlResponse shard. */ + public shard?: (topodata.IShard|null); /** - * Creates a new RemoveShardCellResponse instance using the specified properties. + * Creates a new SetShardTabletControlResponse instance using the specified properties. * @param [properties] Properties to set - * @returns RemoveShardCellResponse instance + * @returns SetShardTabletControlResponse instance */ - public static create(properties?: vtctldata.IRemoveShardCellResponse): vtctldata.RemoveShardCellResponse; + public static create(properties?: vtctldata.ISetShardTabletControlResponse): vtctldata.SetShardTabletControlResponse; /** - * Encodes the specified RemoveShardCellResponse message. Does not implicitly {@link vtctldata.RemoveShardCellResponse.verify|verify} messages. - * @param message RemoveShardCellResponse message or plain object to encode + * Encodes the specified SetShardTabletControlResponse message. Does not implicitly {@link vtctldata.SetShardTabletControlResponse.verify|verify} messages. + * @param message SetShardTabletControlResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IRemoveShardCellResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.ISetShardTabletControlResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified RemoveShardCellResponse message, length delimited. Does not implicitly {@link vtctldata.RemoveShardCellResponse.verify|verify} messages. - * @param message RemoveShardCellResponse message or plain object to encode + * Encodes the specified SetShardTabletControlResponse message, length delimited. Does not implicitly {@link vtctldata.SetShardTabletControlResponse.verify|verify} messages. + * @param message SetShardTabletControlResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IRemoveShardCellResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.ISetShardTabletControlResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a RemoveShardCellResponse message from the specified reader or buffer. + * Decodes a SetShardTabletControlResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns RemoveShardCellResponse + * @returns SetShardTabletControlResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RemoveShardCellResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SetShardTabletControlResponse; /** - * Decodes a RemoveShardCellResponse message from the specified reader or buffer, length delimited. + * Decodes a SetShardTabletControlResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns RemoveShardCellResponse + * @returns SetShardTabletControlResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RemoveShardCellResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SetShardTabletControlResponse; /** - * Verifies a RemoveShardCellResponse message. + * Verifies a SetShardTabletControlResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a RemoveShardCellResponse message from a plain object. Also converts values to their respective internal types. + * Creates a SetShardTabletControlResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns RemoveShardCellResponse + * @returns SetShardTabletControlResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.RemoveShardCellResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.SetShardTabletControlResponse; /** - * Creates a plain object from a RemoveShardCellResponse message. Also converts values to other types if specified. - * @param message RemoveShardCellResponse + * Creates a plain object from a SetShardTabletControlResponse message. Also converts values to other types if specified. + * @param message SetShardTabletControlResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.RemoveShardCellResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.SetShardTabletControlResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this RemoveShardCellResponse to JSON. + * Converts this SetShardTabletControlResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for RemoveShardCellResponse + * Gets the default type url for SetShardTabletControlResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ReparentTabletRequest. */ - interface IReparentTabletRequest { + /** Properties of a SetWritableRequest. */ + interface ISetWritableRequest { - /** ReparentTabletRequest tablet */ - tablet?: (topodata.ITabletAlias|null); + /** SetWritableRequest tablet_alias */ + tablet_alias?: (topodata.ITabletAlias|null); + + /** SetWritableRequest writable */ + writable?: (boolean|null); } - /** Represents a ReparentTabletRequest. */ - class ReparentTabletRequest implements IReparentTabletRequest { + /** Represents a SetWritableRequest. */ + class SetWritableRequest implements ISetWritableRequest { /** - * Constructs a new ReparentTabletRequest. + * Constructs a new SetWritableRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IReparentTabletRequest); + constructor(properties?: vtctldata.ISetWritableRequest); - /** ReparentTabletRequest tablet. */ - public tablet?: (topodata.ITabletAlias|null); + /** SetWritableRequest tablet_alias. */ + public tablet_alias?: (topodata.ITabletAlias|null); + + /** SetWritableRequest writable. */ + public writable: boolean; /** - * Creates a new ReparentTabletRequest instance using the specified properties. + * Creates a new SetWritableRequest instance using the specified properties. * @param [properties] Properties to set - * @returns ReparentTabletRequest instance + * @returns SetWritableRequest instance */ - public static create(properties?: vtctldata.IReparentTabletRequest): vtctldata.ReparentTabletRequest; + public static create(properties?: vtctldata.ISetWritableRequest): vtctldata.SetWritableRequest; /** - * Encodes the specified ReparentTabletRequest message. Does not implicitly {@link vtctldata.ReparentTabletRequest.verify|verify} messages. - * @param message ReparentTabletRequest message or plain object to encode + * Encodes the specified SetWritableRequest message. Does not implicitly {@link vtctldata.SetWritableRequest.verify|verify} messages. + * @param message SetWritableRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IReparentTabletRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.ISetWritableRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ReparentTabletRequest message, length delimited. Does not implicitly {@link vtctldata.ReparentTabletRequest.verify|verify} messages. - * @param message ReparentTabletRequest message or plain object to encode + * Encodes the specified SetWritableRequest message, length delimited. Does not implicitly {@link vtctldata.SetWritableRequest.verify|verify} messages. + * @param message SetWritableRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IReparentTabletRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.ISetWritableRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ReparentTabletRequest message from the specified reader or buffer. + * Decodes a SetWritableRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ReparentTabletRequest + * @returns SetWritableRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ReparentTabletRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SetWritableRequest; /** - * Decodes a ReparentTabletRequest message from the specified reader or buffer, length delimited. + * Decodes a SetWritableRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ReparentTabletRequest + * @returns SetWritableRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ReparentTabletRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SetWritableRequest; /** - * Verifies a ReparentTabletRequest message. + * Verifies a SetWritableRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ReparentTabletRequest message from a plain object. Also converts values to their respective internal types. + * Creates a SetWritableRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ReparentTabletRequest + * @returns SetWritableRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.ReparentTabletRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.SetWritableRequest; /** - * Creates a plain object from a ReparentTabletRequest message. Also converts values to other types if specified. - * @param message ReparentTabletRequest + * Creates a plain object from a SetWritableRequest message. Also converts values to other types if specified. + * @param message SetWritableRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ReparentTabletRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.SetWritableRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ReparentTabletRequest to JSON. + * Converts this SetWritableRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ReparentTabletRequest + * Gets the default type url for SetWritableRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ReparentTabletResponse. */ - interface IReparentTabletResponse { - - /** ReparentTabletResponse keyspace */ - keyspace?: (string|null); - - /** ReparentTabletResponse shard */ - shard?: (string|null); - - /** ReparentTabletResponse primary */ - primary?: (topodata.ITabletAlias|null); + /** Properties of a SetWritableResponse. */ + interface ISetWritableResponse { } - /** Represents a ReparentTabletResponse. */ - class ReparentTabletResponse implements IReparentTabletResponse { + /** Represents a SetWritableResponse. */ + class SetWritableResponse implements ISetWritableResponse { /** - * Constructs a new ReparentTabletResponse. + * Constructs a new SetWritableResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IReparentTabletResponse); - - /** ReparentTabletResponse keyspace. */ - public keyspace: string; - - /** ReparentTabletResponse shard. */ - public shard: string; - - /** ReparentTabletResponse primary. */ - public primary?: (topodata.ITabletAlias|null); + constructor(properties?: vtctldata.ISetWritableResponse); /** - * Creates a new ReparentTabletResponse instance using the specified properties. + * Creates a new SetWritableResponse instance using the specified properties. * @param [properties] Properties to set - * @returns ReparentTabletResponse instance + * @returns SetWritableResponse instance */ - public static create(properties?: vtctldata.IReparentTabletResponse): vtctldata.ReparentTabletResponse; + public static create(properties?: vtctldata.ISetWritableResponse): vtctldata.SetWritableResponse; /** - * Encodes the specified ReparentTabletResponse message. Does not implicitly {@link vtctldata.ReparentTabletResponse.verify|verify} messages. - * @param message ReparentTabletResponse message or plain object to encode + * Encodes the specified SetWritableResponse message. Does not implicitly {@link vtctldata.SetWritableResponse.verify|verify} messages. + * @param message SetWritableResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IReparentTabletResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.ISetWritableResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ReparentTabletResponse message, length delimited. Does not implicitly {@link vtctldata.ReparentTabletResponse.verify|verify} messages. - * @param message ReparentTabletResponse message or plain object to encode + * Encodes the specified SetWritableResponse message, length delimited. Does not implicitly {@link vtctldata.SetWritableResponse.verify|verify} messages. + * @param message SetWritableResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IReparentTabletResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.ISetWritableResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ReparentTabletResponse message from the specified reader or buffer. + * Decodes a SetWritableResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ReparentTabletResponse + * @returns SetWritableResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ReparentTabletResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SetWritableResponse; /** - * Decodes a ReparentTabletResponse message from the specified reader or buffer, length delimited. + * Decodes a SetWritableResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ReparentTabletResponse + * @returns SetWritableResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ReparentTabletResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SetWritableResponse; /** - * Verifies a ReparentTabletResponse message. + * Verifies a SetWritableResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ReparentTabletResponse message from a plain object. Also converts values to their respective internal types. + * Creates a SetWritableResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ReparentTabletResponse + * @returns SetWritableResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.ReparentTabletResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.SetWritableResponse; /** - * Creates a plain object from a ReparentTabletResponse message. Also converts values to other types if specified. - * @param message ReparentTabletResponse + * Creates a plain object from a SetWritableResponse message. Also converts values to other types if specified. + * @param message SetWritableResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ReparentTabletResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.SetWritableResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ReparentTabletResponse to JSON. + * Converts this SetWritableResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ReparentTabletResponse + * Gets the default type url for SetWritableResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a RestoreFromBackupRequest. */ - interface IRestoreFromBackupRequest { - - /** RestoreFromBackupRequest tablet_alias */ - tablet_alias?: (topodata.ITabletAlias|null); + /** Properties of a ShardReplicationAddRequest. */ + interface IShardReplicationAddRequest { - /** RestoreFromBackupRequest backup_time */ - backup_time?: (vttime.ITime|null); + /** ShardReplicationAddRequest keyspace */ + keyspace?: (string|null); - /** RestoreFromBackupRequest restore_to_pos */ - restore_to_pos?: (string|null); + /** ShardReplicationAddRequest shard */ + shard?: (string|null); - /** RestoreFromBackupRequest dry_run */ - dry_run?: (boolean|null); + /** ShardReplicationAddRequest tablet_alias */ + tablet_alias?: (topodata.ITabletAlias|null); } - /** Represents a RestoreFromBackupRequest. */ - class RestoreFromBackupRequest implements IRestoreFromBackupRequest { + /** Represents a ShardReplicationAddRequest. */ + class ShardReplicationAddRequest implements IShardReplicationAddRequest { /** - * Constructs a new RestoreFromBackupRequest. + * Constructs a new ShardReplicationAddRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IRestoreFromBackupRequest); - - /** RestoreFromBackupRequest tablet_alias. */ - public tablet_alias?: (topodata.ITabletAlias|null); + constructor(properties?: vtctldata.IShardReplicationAddRequest); - /** RestoreFromBackupRequest backup_time. */ - public backup_time?: (vttime.ITime|null); + /** ShardReplicationAddRequest keyspace. */ + public keyspace: string; - /** RestoreFromBackupRequest restore_to_pos. */ - public restore_to_pos: string; + /** ShardReplicationAddRequest shard. */ + public shard: string; - /** RestoreFromBackupRequest dry_run. */ - public dry_run: boolean; + /** ShardReplicationAddRequest tablet_alias. */ + public tablet_alias?: (topodata.ITabletAlias|null); /** - * Creates a new RestoreFromBackupRequest instance using the specified properties. + * Creates a new ShardReplicationAddRequest instance using the specified properties. * @param [properties] Properties to set - * @returns RestoreFromBackupRequest instance + * @returns ShardReplicationAddRequest instance */ - public static create(properties?: vtctldata.IRestoreFromBackupRequest): vtctldata.RestoreFromBackupRequest; + public static create(properties?: vtctldata.IShardReplicationAddRequest): vtctldata.ShardReplicationAddRequest; /** - * Encodes the specified RestoreFromBackupRequest message. Does not implicitly {@link vtctldata.RestoreFromBackupRequest.verify|verify} messages. - * @param message RestoreFromBackupRequest message or plain object to encode + * Encodes the specified ShardReplicationAddRequest message. Does not implicitly {@link vtctldata.ShardReplicationAddRequest.verify|verify} messages. + * @param message ShardReplicationAddRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IRestoreFromBackupRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IShardReplicationAddRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified RestoreFromBackupRequest message, length delimited. Does not implicitly {@link vtctldata.RestoreFromBackupRequest.verify|verify} messages. - * @param message RestoreFromBackupRequest message or plain object to encode + * Encodes the specified ShardReplicationAddRequest message, length delimited. Does not implicitly {@link vtctldata.ShardReplicationAddRequest.verify|verify} messages. + * @param message ShardReplicationAddRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IRestoreFromBackupRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IShardReplicationAddRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a RestoreFromBackupRequest message from the specified reader or buffer. + * Decodes a ShardReplicationAddRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns RestoreFromBackupRequest + * @returns ShardReplicationAddRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RestoreFromBackupRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ShardReplicationAddRequest; /** - * Decodes a RestoreFromBackupRequest message from the specified reader or buffer, length delimited. + * Decodes a ShardReplicationAddRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns RestoreFromBackupRequest + * @returns ShardReplicationAddRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RestoreFromBackupRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ShardReplicationAddRequest; /** - * Verifies a RestoreFromBackupRequest message. + * Verifies a ShardReplicationAddRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a RestoreFromBackupRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ShardReplicationAddRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns RestoreFromBackupRequest + * @returns ShardReplicationAddRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.RestoreFromBackupRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.ShardReplicationAddRequest; /** - * Creates a plain object from a RestoreFromBackupRequest message. Also converts values to other types if specified. - * @param message RestoreFromBackupRequest + * Creates a plain object from a ShardReplicationAddRequest message. Also converts values to other types if specified. + * @param message ShardReplicationAddRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.RestoreFromBackupRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.ShardReplicationAddRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this RestoreFromBackupRequest to JSON. + * Converts this ShardReplicationAddRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for RestoreFromBackupRequest + * Gets the default type url for ShardReplicationAddRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a RestoreFromBackupResponse. */ - interface IRestoreFromBackupResponse { - - /** RestoreFromBackupResponse tablet_alias */ - tablet_alias?: (topodata.ITabletAlias|null); - - /** RestoreFromBackupResponse keyspace */ - keyspace?: (string|null); - - /** RestoreFromBackupResponse shard */ - shard?: (string|null); - - /** RestoreFromBackupResponse event */ - event?: (logutil.IEvent|null); + /** Properties of a ShardReplicationAddResponse. */ + interface IShardReplicationAddResponse { } - /** Represents a RestoreFromBackupResponse. */ - class RestoreFromBackupResponse implements IRestoreFromBackupResponse { + /** Represents a ShardReplicationAddResponse. */ + class ShardReplicationAddResponse implements IShardReplicationAddResponse { /** - * Constructs a new RestoreFromBackupResponse. + * Constructs a new ShardReplicationAddResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IRestoreFromBackupResponse); - - /** RestoreFromBackupResponse tablet_alias. */ - public tablet_alias?: (topodata.ITabletAlias|null); - - /** RestoreFromBackupResponse keyspace. */ - public keyspace: string; - - /** RestoreFromBackupResponse shard. */ - public shard: string; - - /** RestoreFromBackupResponse event. */ - public event?: (logutil.IEvent|null); + constructor(properties?: vtctldata.IShardReplicationAddResponse); /** - * Creates a new RestoreFromBackupResponse instance using the specified properties. + * Creates a new ShardReplicationAddResponse instance using the specified properties. * @param [properties] Properties to set - * @returns RestoreFromBackupResponse instance + * @returns ShardReplicationAddResponse instance */ - public static create(properties?: vtctldata.IRestoreFromBackupResponse): vtctldata.RestoreFromBackupResponse; + public static create(properties?: vtctldata.IShardReplicationAddResponse): vtctldata.ShardReplicationAddResponse; /** - * Encodes the specified RestoreFromBackupResponse message. Does not implicitly {@link vtctldata.RestoreFromBackupResponse.verify|verify} messages. - * @param message RestoreFromBackupResponse message or plain object to encode + * Encodes the specified ShardReplicationAddResponse message. Does not implicitly {@link vtctldata.ShardReplicationAddResponse.verify|verify} messages. + * @param message ShardReplicationAddResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IRestoreFromBackupResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IShardReplicationAddResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified RestoreFromBackupResponse message, length delimited. Does not implicitly {@link vtctldata.RestoreFromBackupResponse.verify|verify} messages. - * @param message RestoreFromBackupResponse message or plain object to encode + * Encodes the specified ShardReplicationAddResponse message, length delimited. Does not implicitly {@link vtctldata.ShardReplicationAddResponse.verify|verify} messages. + * @param message ShardReplicationAddResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IRestoreFromBackupResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IShardReplicationAddResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a RestoreFromBackupResponse message from the specified reader or buffer. + * Decodes a ShardReplicationAddResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns RestoreFromBackupResponse + * @returns ShardReplicationAddResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RestoreFromBackupResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ShardReplicationAddResponse; /** - * Decodes a RestoreFromBackupResponse message from the specified reader or buffer, length delimited. + * Decodes a ShardReplicationAddResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns RestoreFromBackupResponse + * @returns ShardReplicationAddResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RestoreFromBackupResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ShardReplicationAddResponse; /** - * Verifies a RestoreFromBackupResponse message. + * Verifies a ShardReplicationAddResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a RestoreFromBackupResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ShardReplicationAddResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns RestoreFromBackupResponse + * @returns ShardReplicationAddResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.RestoreFromBackupResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.ShardReplicationAddResponse; /** - * Creates a plain object from a RestoreFromBackupResponse message. Also converts values to other types if specified. - * @param message RestoreFromBackupResponse + * Creates a plain object from a ShardReplicationAddResponse message. Also converts values to other types if specified. + * @param message ShardReplicationAddResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.RestoreFromBackupResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.ShardReplicationAddResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this RestoreFromBackupResponse to JSON. + * Converts this ShardReplicationAddResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for RestoreFromBackupResponse + * Gets the default type url for ShardReplicationAddResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a RunHealthCheckRequest. */ - interface IRunHealthCheckRequest { + /** Properties of a ShardReplicationFixRequest. */ + interface IShardReplicationFixRequest { - /** RunHealthCheckRequest tablet_alias */ - tablet_alias?: (topodata.ITabletAlias|null); + /** ShardReplicationFixRequest keyspace */ + keyspace?: (string|null); + + /** ShardReplicationFixRequest shard */ + shard?: (string|null); + + /** ShardReplicationFixRequest cell */ + cell?: (string|null); } - /** Represents a RunHealthCheckRequest. */ - class RunHealthCheckRequest implements IRunHealthCheckRequest { + /** Represents a ShardReplicationFixRequest. */ + class ShardReplicationFixRequest implements IShardReplicationFixRequest { /** - * Constructs a new RunHealthCheckRequest. + * Constructs a new ShardReplicationFixRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IRunHealthCheckRequest); + constructor(properties?: vtctldata.IShardReplicationFixRequest); - /** RunHealthCheckRequest tablet_alias. */ - public tablet_alias?: (topodata.ITabletAlias|null); + /** ShardReplicationFixRequest keyspace. */ + public keyspace: string; + + /** ShardReplicationFixRequest shard. */ + public shard: string; + + /** ShardReplicationFixRequest cell. */ + public cell: string; /** - * Creates a new RunHealthCheckRequest instance using the specified properties. + * Creates a new ShardReplicationFixRequest instance using the specified properties. * @param [properties] Properties to set - * @returns RunHealthCheckRequest instance + * @returns ShardReplicationFixRequest instance */ - public static create(properties?: vtctldata.IRunHealthCheckRequest): vtctldata.RunHealthCheckRequest; + public static create(properties?: vtctldata.IShardReplicationFixRequest): vtctldata.ShardReplicationFixRequest; /** - * Encodes the specified RunHealthCheckRequest message. Does not implicitly {@link vtctldata.RunHealthCheckRequest.verify|verify} messages. - * @param message RunHealthCheckRequest message or plain object to encode + * Encodes the specified ShardReplicationFixRequest message. Does not implicitly {@link vtctldata.ShardReplicationFixRequest.verify|verify} messages. + * @param message ShardReplicationFixRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IRunHealthCheckRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IShardReplicationFixRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified RunHealthCheckRequest message, length delimited. Does not implicitly {@link vtctldata.RunHealthCheckRequest.verify|verify} messages. - * @param message RunHealthCheckRequest message or plain object to encode + * Encodes the specified ShardReplicationFixRequest message, length delimited. Does not implicitly {@link vtctldata.ShardReplicationFixRequest.verify|verify} messages. + * @param message ShardReplicationFixRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IRunHealthCheckRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IShardReplicationFixRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a RunHealthCheckRequest message from the specified reader or buffer. + * Decodes a ShardReplicationFixRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns RunHealthCheckRequest + * @returns ShardReplicationFixRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RunHealthCheckRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ShardReplicationFixRequest; /** - * Decodes a RunHealthCheckRequest message from the specified reader or buffer, length delimited. + * Decodes a ShardReplicationFixRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns RunHealthCheckRequest + * @returns ShardReplicationFixRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RunHealthCheckRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ShardReplicationFixRequest; /** - * Verifies a RunHealthCheckRequest message. + * Verifies a ShardReplicationFixRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a RunHealthCheckRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ShardReplicationFixRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns RunHealthCheckRequest + * @returns ShardReplicationFixRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.RunHealthCheckRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.ShardReplicationFixRequest; /** - * Creates a plain object from a RunHealthCheckRequest message. Also converts values to other types if specified. - * @param message RunHealthCheckRequest + * Creates a plain object from a ShardReplicationFixRequest message. Also converts values to other types if specified. + * @param message ShardReplicationFixRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.RunHealthCheckRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.ShardReplicationFixRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this RunHealthCheckRequest to JSON. + * Converts this ShardReplicationFixRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for RunHealthCheckRequest + * Gets the default type url for ShardReplicationFixRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a RunHealthCheckResponse. */ - interface IRunHealthCheckResponse { + /** Properties of a ShardReplicationFixResponse. */ + interface IShardReplicationFixResponse { + + /** ShardReplicationFixResponse error */ + error?: (topodata.IShardReplicationError|null); } - /** Represents a RunHealthCheckResponse. */ - class RunHealthCheckResponse implements IRunHealthCheckResponse { + /** Represents a ShardReplicationFixResponse. */ + class ShardReplicationFixResponse implements IShardReplicationFixResponse { /** - * Constructs a new RunHealthCheckResponse. + * Constructs a new ShardReplicationFixResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IRunHealthCheckResponse); + constructor(properties?: vtctldata.IShardReplicationFixResponse); + + /** ShardReplicationFixResponse error. */ + public error?: (topodata.IShardReplicationError|null); /** - * Creates a new RunHealthCheckResponse instance using the specified properties. + * Creates a new ShardReplicationFixResponse instance using the specified properties. * @param [properties] Properties to set - * @returns RunHealthCheckResponse instance + * @returns ShardReplicationFixResponse instance */ - public static create(properties?: vtctldata.IRunHealthCheckResponse): vtctldata.RunHealthCheckResponse; + public static create(properties?: vtctldata.IShardReplicationFixResponse): vtctldata.ShardReplicationFixResponse; /** - * Encodes the specified RunHealthCheckResponse message. Does not implicitly {@link vtctldata.RunHealthCheckResponse.verify|verify} messages. - * @param message RunHealthCheckResponse message or plain object to encode + * Encodes the specified ShardReplicationFixResponse message. Does not implicitly {@link vtctldata.ShardReplicationFixResponse.verify|verify} messages. + * @param message ShardReplicationFixResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IRunHealthCheckResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IShardReplicationFixResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified RunHealthCheckResponse message, length delimited. Does not implicitly {@link vtctldata.RunHealthCheckResponse.verify|verify} messages. - * @param message RunHealthCheckResponse message or plain object to encode + * Encodes the specified ShardReplicationFixResponse message, length delimited. Does not implicitly {@link vtctldata.ShardReplicationFixResponse.verify|verify} messages. + * @param message ShardReplicationFixResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IRunHealthCheckResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IShardReplicationFixResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a RunHealthCheckResponse message from the specified reader or buffer. + * Decodes a ShardReplicationFixResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns RunHealthCheckResponse + * @returns ShardReplicationFixResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.RunHealthCheckResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ShardReplicationFixResponse; /** - * Decodes a RunHealthCheckResponse message from the specified reader or buffer, length delimited. + * Decodes a ShardReplicationFixResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns RunHealthCheckResponse + * @returns ShardReplicationFixResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.RunHealthCheckResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ShardReplicationFixResponse; /** - * Verifies a RunHealthCheckResponse message. + * Verifies a ShardReplicationFixResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a RunHealthCheckResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ShardReplicationFixResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns RunHealthCheckResponse + * @returns ShardReplicationFixResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.RunHealthCheckResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.ShardReplicationFixResponse; /** - * Creates a plain object from a RunHealthCheckResponse message. Also converts values to other types if specified. - * @param message RunHealthCheckResponse + * Creates a plain object from a ShardReplicationFixResponse message. Also converts values to other types if specified. + * @param message ShardReplicationFixResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.RunHealthCheckResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.ShardReplicationFixResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this RunHealthCheckResponse to JSON. + * Converts this ShardReplicationFixResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for RunHealthCheckResponse + * Gets the default type url for ShardReplicationFixResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a SetKeyspaceDurabilityPolicyRequest. */ - interface ISetKeyspaceDurabilityPolicyRequest { + /** Properties of a ShardReplicationPositionsRequest. */ + interface IShardReplicationPositionsRequest { - /** SetKeyspaceDurabilityPolicyRequest keyspace */ + /** ShardReplicationPositionsRequest keyspace */ keyspace?: (string|null); - /** SetKeyspaceDurabilityPolicyRequest durability_policy */ - durability_policy?: (string|null); + /** ShardReplicationPositionsRequest shard */ + shard?: (string|null); } - /** Represents a SetKeyspaceDurabilityPolicyRequest. */ - class SetKeyspaceDurabilityPolicyRequest implements ISetKeyspaceDurabilityPolicyRequest { + /** Represents a ShardReplicationPositionsRequest. */ + class ShardReplicationPositionsRequest implements IShardReplicationPositionsRequest { /** - * Constructs a new SetKeyspaceDurabilityPolicyRequest. + * Constructs a new ShardReplicationPositionsRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.ISetKeyspaceDurabilityPolicyRequest); + constructor(properties?: vtctldata.IShardReplicationPositionsRequest); - /** SetKeyspaceDurabilityPolicyRequest keyspace. */ + /** ShardReplicationPositionsRequest keyspace. */ public keyspace: string; - /** SetKeyspaceDurabilityPolicyRequest durability_policy. */ - public durability_policy: string; + /** ShardReplicationPositionsRequest shard. */ + public shard: string; /** - * Creates a new SetKeyspaceDurabilityPolicyRequest instance using the specified properties. + * Creates a new ShardReplicationPositionsRequest instance using the specified properties. * @param [properties] Properties to set - * @returns SetKeyspaceDurabilityPolicyRequest instance + * @returns ShardReplicationPositionsRequest instance */ - public static create(properties?: vtctldata.ISetKeyspaceDurabilityPolicyRequest): vtctldata.SetKeyspaceDurabilityPolicyRequest; + public static create(properties?: vtctldata.IShardReplicationPositionsRequest): vtctldata.ShardReplicationPositionsRequest; /** - * Encodes the specified SetKeyspaceDurabilityPolicyRequest message. Does not implicitly {@link vtctldata.SetKeyspaceDurabilityPolicyRequest.verify|verify} messages. - * @param message SetKeyspaceDurabilityPolicyRequest message or plain object to encode + * Encodes the specified ShardReplicationPositionsRequest message. Does not implicitly {@link vtctldata.ShardReplicationPositionsRequest.verify|verify} messages. + * @param message ShardReplicationPositionsRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.ISetKeyspaceDurabilityPolicyRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IShardReplicationPositionsRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified SetKeyspaceDurabilityPolicyRequest message, length delimited. Does not implicitly {@link vtctldata.SetKeyspaceDurabilityPolicyRequest.verify|verify} messages. - * @param message SetKeyspaceDurabilityPolicyRequest message or plain object to encode + * Encodes the specified ShardReplicationPositionsRequest message, length delimited. Does not implicitly {@link vtctldata.ShardReplicationPositionsRequest.verify|verify} messages. + * @param message ShardReplicationPositionsRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.ISetKeyspaceDurabilityPolicyRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IShardReplicationPositionsRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a SetKeyspaceDurabilityPolicyRequest message from the specified reader or buffer. + * Decodes a ShardReplicationPositionsRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns SetKeyspaceDurabilityPolicyRequest + * @returns ShardReplicationPositionsRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SetKeyspaceDurabilityPolicyRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ShardReplicationPositionsRequest; /** - * Decodes a SetKeyspaceDurabilityPolicyRequest message from the specified reader or buffer, length delimited. + * Decodes a ShardReplicationPositionsRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns SetKeyspaceDurabilityPolicyRequest + * @returns ShardReplicationPositionsRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SetKeyspaceDurabilityPolicyRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ShardReplicationPositionsRequest; /** - * Verifies a SetKeyspaceDurabilityPolicyRequest message. + * Verifies a ShardReplicationPositionsRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a SetKeyspaceDurabilityPolicyRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ShardReplicationPositionsRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns SetKeyspaceDurabilityPolicyRequest + * @returns ShardReplicationPositionsRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.SetKeyspaceDurabilityPolicyRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.ShardReplicationPositionsRequest; /** - * Creates a plain object from a SetKeyspaceDurabilityPolicyRequest message. Also converts values to other types if specified. - * @param message SetKeyspaceDurabilityPolicyRequest + * Creates a plain object from a ShardReplicationPositionsRequest message. Also converts values to other types if specified. + * @param message ShardReplicationPositionsRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.SetKeyspaceDurabilityPolicyRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.ShardReplicationPositionsRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this SetKeyspaceDurabilityPolicyRequest to JSON. + * Converts this ShardReplicationPositionsRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for SetKeyspaceDurabilityPolicyRequest + * Gets the default type url for ShardReplicationPositionsRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a SetKeyspaceDurabilityPolicyResponse. */ - interface ISetKeyspaceDurabilityPolicyResponse { + /** Properties of a ShardReplicationPositionsResponse. */ + interface IShardReplicationPositionsResponse { - /** SetKeyspaceDurabilityPolicyResponse keyspace */ - keyspace?: (topodata.IKeyspace|null); + /** ShardReplicationPositionsResponse replication_statuses */ + replication_statuses?: ({ [k: string]: replicationdata.IStatus }|null); + + /** ShardReplicationPositionsResponse tablet_map */ + tablet_map?: ({ [k: string]: topodata.ITablet }|null); } - /** Represents a SetKeyspaceDurabilityPolicyResponse. */ - class SetKeyspaceDurabilityPolicyResponse implements ISetKeyspaceDurabilityPolicyResponse { + /** Represents a ShardReplicationPositionsResponse. */ + class ShardReplicationPositionsResponse implements IShardReplicationPositionsResponse { /** - * Constructs a new SetKeyspaceDurabilityPolicyResponse. + * Constructs a new ShardReplicationPositionsResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.ISetKeyspaceDurabilityPolicyResponse); + constructor(properties?: vtctldata.IShardReplicationPositionsResponse); - /** SetKeyspaceDurabilityPolicyResponse keyspace. */ - public keyspace?: (topodata.IKeyspace|null); + /** ShardReplicationPositionsResponse replication_statuses. */ + public replication_statuses: { [k: string]: replicationdata.IStatus }; + + /** ShardReplicationPositionsResponse tablet_map. */ + public tablet_map: { [k: string]: topodata.ITablet }; /** - * Creates a new SetKeyspaceDurabilityPolicyResponse instance using the specified properties. + * Creates a new ShardReplicationPositionsResponse instance using the specified properties. * @param [properties] Properties to set - * @returns SetKeyspaceDurabilityPolicyResponse instance + * @returns ShardReplicationPositionsResponse instance */ - public static create(properties?: vtctldata.ISetKeyspaceDurabilityPolicyResponse): vtctldata.SetKeyspaceDurabilityPolicyResponse; + public static create(properties?: vtctldata.IShardReplicationPositionsResponse): vtctldata.ShardReplicationPositionsResponse; /** - * Encodes the specified SetKeyspaceDurabilityPolicyResponse message. Does not implicitly {@link vtctldata.SetKeyspaceDurabilityPolicyResponse.verify|verify} messages. - * @param message SetKeyspaceDurabilityPolicyResponse message or plain object to encode + * Encodes the specified ShardReplicationPositionsResponse message. Does not implicitly {@link vtctldata.ShardReplicationPositionsResponse.verify|verify} messages. + * @param message ShardReplicationPositionsResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.ISetKeyspaceDurabilityPolicyResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IShardReplicationPositionsResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified SetKeyspaceDurabilityPolicyResponse message, length delimited. Does not implicitly {@link vtctldata.SetKeyspaceDurabilityPolicyResponse.verify|verify} messages. - * @param message SetKeyspaceDurabilityPolicyResponse message or plain object to encode + * Encodes the specified ShardReplicationPositionsResponse message, length delimited. Does not implicitly {@link vtctldata.ShardReplicationPositionsResponse.verify|verify} messages. + * @param message ShardReplicationPositionsResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.ISetKeyspaceDurabilityPolicyResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IShardReplicationPositionsResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a SetKeyspaceDurabilityPolicyResponse message from the specified reader or buffer. + * Decodes a ShardReplicationPositionsResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns SetKeyspaceDurabilityPolicyResponse + * @returns ShardReplicationPositionsResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SetKeyspaceDurabilityPolicyResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ShardReplicationPositionsResponse; /** - * Decodes a SetKeyspaceDurabilityPolicyResponse message from the specified reader or buffer, length delimited. + * Decodes a ShardReplicationPositionsResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns SetKeyspaceDurabilityPolicyResponse + * @returns ShardReplicationPositionsResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SetKeyspaceDurabilityPolicyResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ShardReplicationPositionsResponse; /** - * Verifies a SetKeyspaceDurabilityPolicyResponse message. + * Verifies a ShardReplicationPositionsResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a SetKeyspaceDurabilityPolicyResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ShardReplicationPositionsResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns SetKeyspaceDurabilityPolicyResponse + * @returns ShardReplicationPositionsResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.SetKeyspaceDurabilityPolicyResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.ShardReplicationPositionsResponse; /** - * Creates a plain object from a SetKeyspaceDurabilityPolicyResponse message. Also converts values to other types if specified. - * @param message SetKeyspaceDurabilityPolicyResponse + * Creates a plain object from a ShardReplicationPositionsResponse message. Also converts values to other types if specified. + * @param message ShardReplicationPositionsResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.SetKeyspaceDurabilityPolicyResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.ShardReplicationPositionsResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this SetKeyspaceDurabilityPolicyResponse to JSON. + * Converts this ShardReplicationPositionsResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for SetKeyspaceDurabilityPolicyResponse + * Gets the default type url for ShardReplicationPositionsResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a SetKeyspaceServedFromRequest. */ - interface ISetKeyspaceServedFromRequest { + /** Properties of a ShardReplicationRemoveRequest. */ + interface IShardReplicationRemoveRequest { - /** SetKeyspaceServedFromRequest keyspace */ + /** ShardReplicationRemoveRequest keyspace */ keyspace?: (string|null); - /** SetKeyspaceServedFromRequest tablet_type */ - tablet_type?: (topodata.TabletType|null); - - /** SetKeyspaceServedFromRequest cells */ - cells?: (string[]|null); - - /** SetKeyspaceServedFromRequest remove */ - remove?: (boolean|null); + /** ShardReplicationRemoveRequest shard */ + shard?: (string|null); - /** SetKeyspaceServedFromRequest source_keyspace */ - source_keyspace?: (string|null); + /** ShardReplicationRemoveRequest tablet_alias */ + tablet_alias?: (topodata.ITabletAlias|null); } - /** Represents a SetKeyspaceServedFromRequest. */ - class SetKeyspaceServedFromRequest implements ISetKeyspaceServedFromRequest { + /** Represents a ShardReplicationRemoveRequest. */ + class ShardReplicationRemoveRequest implements IShardReplicationRemoveRequest { /** - * Constructs a new SetKeyspaceServedFromRequest. + * Constructs a new ShardReplicationRemoveRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.ISetKeyspaceServedFromRequest); + constructor(properties?: vtctldata.IShardReplicationRemoveRequest); - /** SetKeyspaceServedFromRequest keyspace. */ + /** ShardReplicationRemoveRequest keyspace. */ public keyspace: string; - /** SetKeyspaceServedFromRequest tablet_type. */ - public tablet_type: topodata.TabletType; - - /** SetKeyspaceServedFromRequest cells. */ - public cells: string[]; - - /** SetKeyspaceServedFromRequest remove. */ - public remove: boolean; + /** ShardReplicationRemoveRequest shard. */ + public shard: string; - /** SetKeyspaceServedFromRequest source_keyspace. */ - public source_keyspace: string; + /** ShardReplicationRemoveRequest tablet_alias. */ + public tablet_alias?: (topodata.ITabletAlias|null); /** - * Creates a new SetKeyspaceServedFromRequest instance using the specified properties. + * Creates a new ShardReplicationRemoveRequest instance using the specified properties. * @param [properties] Properties to set - * @returns SetKeyspaceServedFromRequest instance + * @returns ShardReplicationRemoveRequest instance */ - public static create(properties?: vtctldata.ISetKeyspaceServedFromRequest): vtctldata.SetKeyspaceServedFromRequest; + public static create(properties?: vtctldata.IShardReplicationRemoveRequest): vtctldata.ShardReplicationRemoveRequest; /** - * Encodes the specified SetKeyspaceServedFromRequest message. Does not implicitly {@link vtctldata.SetKeyspaceServedFromRequest.verify|verify} messages. - * @param message SetKeyspaceServedFromRequest message or plain object to encode + * Encodes the specified ShardReplicationRemoveRequest message. Does not implicitly {@link vtctldata.ShardReplicationRemoveRequest.verify|verify} messages. + * @param message ShardReplicationRemoveRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.ISetKeyspaceServedFromRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IShardReplicationRemoveRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified SetKeyspaceServedFromRequest message, length delimited. Does not implicitly {@link vtctldata.SetKeyspaceServedFromRequest.verify|verify} messages. - * @param message SetKeyspaceServedFromRequest message or plain object to encode + * Encodes the specified ShardReplicationRemoveRequest message, length delimited. Does not implicitly {@link vtctldata.ShardReplicationRemoveRequest.verify|verify} messages. + * @param message ShardReplicationRemoveRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.ISetKeyspaceServedFromRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IShardReplicationRemoveRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a SetKeyspaceServedFromRequest message from the specified reader or buffer. + * Decodes a ShardReplicationRemoveRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns SetKeyspaceServedFromRequest + * @returns ShardReplicationRemoveRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SetKeyspaceServedFromRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ShardReplicationRemoveRequest; /** - * Decodes a SetKeyspaceServedFromRequest message from the specified reader or buffer, length delimited. + * Decodes a ShardReplicationRemoveRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns SetKeyspaceServedFromRequest + * @returns ShardReplicationRemoveRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SetKeyspaceServedFromRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ShardReplicationRemoveRequest; /** - * Verifies a SetKeyspaceServedFromRequest message. + * Verifies a ShardReplicationRemoveRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a SetKeyspaceServedFromRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ShardReplicationRemoveRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns SetKeyspaceServedFromRequest + * @returns ShardReplicationRemoveRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.SetKeyspaceServedFromRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.ShardReplicationRemoveRequest; /** - * Creates a plain object from a SetKeyspaceServedFromRequest message. Also converts values to other types if specified. - * @param message SetKeyspaceServedFromRequest + * Creates a plain object from a ShardReplicationRemoveRequest message. Also converts values to other types if specified. + * @param message ShardReplicationRemoveRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.SetKeyspaceServedFromRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.ShardReplicationRemoveRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this SetKeyspaceServedFromRequest to JSON. + * Converts this ShardReplicationRemoveRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for SetKeyspaceServedFromRequest + * Gets the default type url for ShardReplicationRemoveRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a SetKeyspaceServedFromResponse. */ - interface ISetKeyspaceServedFromResponse { - - /** SetKeyspaceServedFromResponse keyspace */ - keyspace?: (topodata.IKeyspace|null); + /** Properties of a ShardReplicationRemoveResponse. */ + interface IShardReplicationRemoveResponse { } - /** Represents a SetKeyspaceServedFromResponse. */ - class SetKeyspaceServedFromResponse implements ISetKeyspaceServedFromResponse { + /** Represents a ShardReplicationRemoveResponse. */ + class ShardReplicationRemoveResponse implements IShardReplicationRemoveResponse { /** - * Constructs a new SetKeyspaceServedFromResponse. + * Constructs a new ShardReplicationRemoveResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.ISetKeyspaceServedFromResponse); - - /** SetKeyspaceServedFromResponse keyspace. */ - public keyspace?: (topodata.IKeyspace|null); + constructor(properties?: vtctldata.IShardReplicationRemoveResponse); /** - * Creates a new SetKeyspaceServedFromResponse instance using the specified properties. + * Creates a new ShardReplicationRemoveResponse instance using the specified properties. * @param [properties] Properties to set - * @returns SetKeyspaceServedFromResponse instance + * @returns ShardReplicationRemoveResponse instance */ - public static create(properties?: vtctldata.ISetKeyspaceServedFromResponse): vtctldata.SetKeyspaceServedFromResponse; + public static create(properties?: vtctldata.IShardReplicationRemoveResponse): vtctldata.ShardReplicationRemoveResponse; /** - * Encodes the specified SetKeyspaceServedFromResponse message. Does not implicitly {@link vtctldata.SetKeyspaceServedFromResponse.verify|verify} messages. - * @param message SetKeyspaceServedFromResponse message or plain object to encode + * Encodes the specified ShardReplicationRemoveResponse message. Does not implicitly {@link vtctldata.ShardReplicationRemoveResponse.verify|verify} messages. + * @param message ShardReplicationRemoveResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.ISetKeyspaceServedFromResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IShardReplicationRemoveResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified SetKeyspaceServedFromResponse message, length delimited. Does not implicitly {@link vtctldata.SetKeyspaceServedFromResponse.verify|verify} messages. - * @param message SetKeyspaceServedFromResponse message or plain object to encode + * Encodes the specified ShardReplicationRemoveResponse message, length delimited. Does not implicitly {@link vtctldata.ShardReplicationRemoveResponse.verify|verify} messages. + * @param message ShardReplicationRemoveResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.ISetKeyspaceServedFromResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IShardReplicationRemoveResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a SetKeyspaceServedFromResponse message from the specified reader or buffer. + * Decodes a ShardReplicationRemoveResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns SetKeyspaceServedFromResponse + * @returns ShardReplicationRemoveResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SetKeyspaceServedFromResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ShardReplicationRemoveResponse; /** - * Decodes a SetKeyspaceServedFromResponse message from the specified reader or buffer, length delimited. + * Decodes a ShardReplicationRemoveResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns SetKeyspaceServedFromResponse + * @returns ShardReplicationRemoveResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SetKeyspaceServedFromResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ShardReplicationRemoveResponse; /** - * Verifies a SetKeyspaceServedFromResponse message. + * Verifies a ShardReplicationRemoveResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a SetKeyspaceServedFromResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ShardReplicationRemoveResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns SetKeyspaceServedFromResponse + * @returns ShardReplicationRemoveResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.SetKeyspaceServedFromResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.ShardReplicationRemoveResponse; /** - * Creates a plain object from a SetKeyspaceServedFromResponse message. Also converts values to other types if specified. - * @param message SetKeyspaceServedFromResponse + * Creates a plain object from a ShardReplicationRemoveResponse message. Also converts values to other types if specified. + * @param message ShardReplicationRemoveResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.SetKeyspaceServedFromResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.ShardReplicationRemoveResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this SetKeyspaceServedFromResponse to JSON. + * Converts this ShardReplicationRemoveResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for SetKeyspaceServedFromResponse + * Gets the default type url for ShardReplicationRemoveResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a SetKeyspaceShardingInfoRequest. */ - interface ISetKeyspaceShardingInfoRequest { + /** Properties of a SleepTabletRequest. */ + interface ISleepTabletRequest { - /** SetKeyspaceShardingInfoRequest keyspace */ - keyspace?: (string|null); + /** SleepTabletRequest tablet_alias */ + tablet_alias?: (topodata.ITabletAlias|null); - /** SetKeyspaceShardingInfoRequest force */ - force?: (boolean|null); + /** SleepTabletRequest duration */ + duration?: (vttime.IDuration|null); } - /** Represents a SetKeyspaceShardingInfoRequest. */ - class SetKeyspaceShardingInfoRequest implements ISetKeyspaceShardingInfoRequest { + /** Represents a SleepTabletRequest. */ + class SleepTabletRequest implements ISleepTabletRequest { /** - * Constructs a new SetKeyspaceShardingInfoRequest. + * Constructs a new SleepTabletRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.ISetKeyspaceShardingInfoRequest); + constructor(properties?: vtctldata.ISleepTabletRequest); - /** SetKeyspaceShardingInfoRequest keyspace. */ - public keyspace: string; + /** SleepTabletRequest tablet_alias. */ + public tablet_alias?: (topodata.ITabletAlias|null); - /** SetKeyspaceShardingInfoRequest force. */ - public force: boolean; + /** SleepTabletRequest duration. */ + public duration?: (vttime.IDuration|null); /** - * Creates a new SetKeyspaceShardingInfoRequest instance using the specified properties. + * Creates a new SleepTabletRequest instance using the specified properties. * @param [properties] Properties to set - * @returns SetKeyspaceShardingInfoRequest instance + * @returns SleepTabletRequest instance */ - public static create(properties?: vtctldata.ISetKeyspaceShardingInfoRequest): vtctldata.SetKeyspaceShardingInfoRequest; + public static create(properties?: vtctldata.ISleepTabletRequest): vtctldata.SleepTabletRequest; /** - * Encodes the specified SetKeyspaceShardingInfoRequest message. Does not implicitly {@link vtctldata.SetKeyspaceShardingInfoRequest.verify|verify} messages. - * @param message SetKeyspaceShardingInfoRequest message or plain object to encode + * Encodes the specified SleepTabletRequest message. Does not implicitly {@link vtctldata.SleepTabletRequest.verify|verify} messages. + * @param message SleepTabletRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.ISetKeyspaceShardingInfoRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.ISleepTabletRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified SetKeyspaceShardingInfoRequest message, length delimited. Does not implicitly {@link vtctldata.SetKeyspaceShardingInfoRequest.verify|verify} messages. - * @param message SetKeyspaceShardingInfoRequest message or plain object to encode + * Encodes the specified SleepTabletRequest message, length delimited. Does not implicitly {@link vtctldata.SleepTabletRequest.verify|verify} messages. + * @param message SleepTabletRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.ISetKeyspaceShardingInfoRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.ISleepTabletRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a SetKeyspaceShardingInfoRequest message from the specified reader or buffer. + * Decodes a SleepTabletRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns SetKeyspaceShardingInfoRequest + * @returns SleepTabletRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SetKeyspaceShardingInfoRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SleepTabletRequest; /** - * Decodes a SetKeyspaceShardingInfoRequest message from the specified reader or buffer, length delimited. + * Decodes a SleepTabletRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns SetKeyspaceShardingInfoRequest + * @returns SleepTabletRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SetKeyspaceShardingInfoRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SleepTabletRequest; /** - * Verifies a SetKeyspaceShardingInfoRequest message. + * Verifies a SleepTabletRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a SetKeyspaceShardingInfoRequest message from a plain object. Also converts values to their respective internal types. + * Creates a SleepTabletRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns SetKeyspaceShardingInfoRequest + * @returns SleepTabletRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.SetKeyspaceShardingInfoRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.SleepTabletRequest; /** - * Creates a plain object from a SetKeyspaceShardingInfoRequest message. Also converts values to other types if specified. - * @param message SetKeyspaceShardingInfoRequest + * Creates a plain object from a SleepTabletRequest message. Also converts values to other types if specified. + * @param message SleepTabletRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.SetKeyspaceShardingInfoRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.SleepTabletRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this SetKeyspaceShardingInfoRequest to JSON. + * Converts this SleepTabletRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for SetKeyspaceShardingInfoRequest + * Gets the default type url for SleepTabletRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a SetKeyspaceShardingInfoResponse. */ - interface ISetKeyspaceShardingInfoResponse { - - /** SetKeyspaceShardingInfoResponse keyspace */ - keyspace?: (topodata.IKeyspace|null); + /** Properties of a SleepTabletResponse. */ + interface ISleepTabletResponse { } - /** Represents a SetKeyspaceShardingInfoResponse. */ - class SetKeyspaceShardingInfoResponse implements ISetKeyspaceShardingInfoResponse { + /** Represents a SleepTabletResponse. */ + class SleepTabletResponse implements ISleepTabletResponse { /** - * Constructs a new SetKeyspaceShardingInfoResponse. + * Constructs a new SleepTabletResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.ISetKeyspaceShardingInfoResponse); - - /** SetKeyspaceShardingInfoResponse keyspace. */ - public keyspace?: (topodata.IKeyspace|null); + constructor(properties?: vtctldata.ISleepTabletResponse); /** - * Creates a new SetKeyspaceShardingInfoResponse instance using the specified properties. + * Creates a new SleepTabletResponse instance using the specified properties. * @param [properties] Properties to set - * @returns SetKeyspaceShardingInfoResponse instance + * @returns SleepTabletResponse instance */ - public static create(properties?: vtctldata.ISetKeyspaceShardingInfoResponse): vtctldata.SetKeyspaceShardingInfoResponse; + public static create(properties?: vtctldata.ISleepTabletResponse): vtctldata.SleepTabletResponse; /** - * Encodes the specified SetKeyspaceShardingInfoResponse message. Does not implicitly {@link vtctldata.SetKeyspaceShardingInfoResponse.verify|verify} messages. - * @param message SetKeyspaceShardingInfoResponse message or plain object to encode + * Encodes the specified SleepTabletResponse message. Does not implicitly {@link vtctldata.SleepTabletResponse.verify|verify} messages. + * @param message SleepTabletResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.ISetKeyspaceShardingInfoResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.ISleepTabletResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified SetKeyspaceShardingInfoResponse message, length delimited. Does not implicitly {@link vtctldata.SetKeyspaceShardingInfoResponse.verify|verify} messages. - * @param message SetKeyspaceShardingInfoResponse message or plain object to encode + * Encodes the specified SleepTabletResponse message, length delimited. Does not implicitly {@link vtctldata.SleepTabletResponse.verify|verify} messages. + * @param message SleepTabletResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.ISetKeyspaceShardingInfoResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.ISleepTabletResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a SetKeyspaceShardingInfoResponse message from the specified reader or buffer. + * Decodes a SleepTabletResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns SetKeyspaceShardingInfoResponse + * @returns SleepTabletResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SetKeyspaceShardingInfoResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SleepTabletResponse; /** - * Decodes a SetKeyspaceShardingInfoResponse message from the specified reader or buffer, length delimited. + * Decodes a SleepTabletResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns SetKeyspaceShardingInfoResponse + * @returns SleepTabletResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SetKeyspaceShardingInfoResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SleepTabletResponse; /** - * Verifies a SetKeyspaceShardingInfoResponse message. + * Verifies a SleepTabletResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a SetKeyspaceShardingInfoResponse message from a plain object. Also converts values to their respective internal types. + * Creates a SleepTabletResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns SetKeyspaceShardingInfoResponse + * @returns SleepTabletResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.SetKeyspaceShardingInfoResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.SleepTabletResponse; /** - * Creates a plain object from a SetKeyspaceShardingInfoResponse message. Also converts values to other types if specified. - * @param message SetKeyspaceShardingInfoResponse + * Creates a plain object from a SleepTabletResponse message. Also converts values to other types if specified. + * @param message SleepTabletResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.SetKeyspaceShardingInfoResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.SleepTabletResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this SetKeyspaceShardingInfoResponse to JSON. + * Converts this SleepTabletResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for SetKeyspaceShardingInfoResponse + * Gets the default type url for SleepTabletResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a SetShardIsPrimaryServingRequest. */ - interface ISetShardIsPrimaryServingRequest { + /** Properties of a SourceShardAddRequest. */ + interface ISourceShardAddRequest { - /** SetShardIsPrimaryServingRequest keyspace */ + /** SourceShardAddRequest keyspace */ keyspace?: (string|null); - /** SetShardIsPrimaryServingRequest shard */ + /** SourceShardAddRequest shard */ shard?: (string|null); - /** SetShardIsPrimaryServingRequest is_serving */ - is_serving?: (boolean|null); + /** SourceShardAddRequest uid */ + uid?: (number|null); + + /** SourceShardAddRequest source_keyspace */ + source_keyspace?: (string|null); + + /** SourceShardAddRequest source_shard */ + source_shard?: (string|null); + + /** SourceShardAddRequest key_range */ + key_range?: (topodata.IKeyRange|null); + + /** SourceShardAddRequest tables */ + tables?: (string[]|null); } - /** Represents a SetShardIsPrimaryServingRequest. */ - class SetShardIsPrimaryServingRequest implements ISetShardIsPrimaryServingRequest { + /** Represents a SourceShardAddRequest. */ + class SourceShardAddRequest implements ISourceShardAddRequest { /** - * Constructs a new SetShardIsPrimaryServingRequest. + * Constructs a new SourceShardAddRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.ISetShardIsPrimaryServingRequest); + constructor(properties?: vtctldata.ISourceShardAddRequest); - /** SetShardIsPrimaryServingRequest keyspace. */ + /** SourceShardAddRequest keyspace. */ public keyspace: string; - /** SetShardIsPrimaryServingRequest shard. */ + /** SourceShardAddRequest shard. */ public shard: string; - /** SetShardIsPrimaryServingRequest is_serving. */ - public is_serving: boolean; + /** SourceShardAddRequest uid. */ + public uid: number; + + /** SourceShardAddRequest source_keyspace. */ + public source_keyspace: string; + + /** SourceShardAddRequest source_shard. */ + public source_shard: string; + + /** SourceShardAddRequest key_range. */ + public key_range?: (topodata.IKeyRange|null); + + /** SourceShardAddRequest tables. */ + public tables: string[]; /** - * Creates a new SetShardIsPrimaryServingRequest instance using the specified properties. + * Creates a new SourceShardAddRequest instance using the specified properties. * @param [properties] Properties to set - * @returns SetShardIsPrimaryServingRequest instance + * @returns SourceShardAddRequest instance */ - public static create(properties?: vtctldata.ISetShardIsPrimaryServingRequest): vtctldata.SetShardIsPrimaryServingRequest; + public static create(properties?: vtctldata.ISourceShardAddRequest): vtctldata.SourceShardAddRequest; /** - * Encodes the specified SetShardIsPrimaryServingRequest message. Does not implicitly {@link vtctldata.SetShardIsPrimaryServingRequest.verify|verify} messages. - * @param message SetShardIsPrimaryServingRequest message or plain object to encode + * Encodes the specified SourceShardAddRequest message. Does not implicitly {@link vtctldata.SourceShardAddRequest.verify|verify} messages. + * @param message SourceShardAddRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.ISetShardIsPrimaryServingRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.ISourceShardAddRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified SetShardIsPrimaryServingRequest message, length delimited. Does not implicitly {@link vtctldata.SetShardIsPrimaryServingRequest.verify|verify} messages. - * @param message SetShardIsPrimaryServingRequest message or plain object to encode + * Encodes the specified SourceShardAddRequest message, length delimited. Does not implicitly {@link vtctldata.SourceShardAddRequest.verify|verify} messages. + * @param message SourceShardAddRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.ISetShardIsPrimaryServingRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.ISourceShardAddRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a SetShardIsPrimaryServingRequest message from the specified reader or buffer. + * Decodes a SourceShardAddRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns SetShardIsPrimaryServingRequest + * @returns SourceShardAddRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SetShardIsPrimaryServingRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SourceShardAddRequest; /** - * Decodes a SetShardIsPrimaryServingRequest message from the specified reader or buffer, length delimited. + * Decodes a SourceShardAddRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns SetShardIsPrimaryServingRequest + * @returns SourceShardAddRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SetShardIsPrimaryServingRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SourceShardAddRequest; /** - * Verifies a SetShardIsPrimaryServingRequest message. + * Verifies a SourceShardAddRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a SetShardIsPrimaryServingRequest message from a plain object. Also converts values to their respective internal types. + * Creates a SourceShardAddRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns SetShardIsPrimaryServingRequest + * @returns SourceShardAddRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.SetShardIsPrimaryServingRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.SourceShardAddRequest; /** - * Creates a plain object from a SetShardIsPrimaryServingRequest message. Also converts values to other types if specified. - * @param message SetShardIsPrimaryServingRequest + * Creates a plain object from a SourceShardAddRequest message. Also converts values to other types if specified. + * @param message SourceShardAddRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.SetShardIsPrimaryServingRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.SourceShardAddRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this SetShardIsPrimaryServingRequest to JSON. + * Converts this SourceShardAddRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for SetShardIsPrimaryServingRequest + * Gets the default type url for SourceShardAddRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a SetShardIsPrimaryServingResponse. */ - interface ISetShardIsPrimaryServingResponse { + /** Properties of a SourceShardAddResponse. */ + interface ISourceShardAddResponse { - /** SetShardIsPrimaryServingResponse shard */ + /** SourceShardAddResponse shard */ shard?: (topodata.IShard|null); } - /** Represents a SetShardIsPrimaryServingResponse. */ - class SetShardIsPrimaryServingResponse implements ISetShardIsPrimaryServingResponse { + /** Represents a SourceShardAddResponse. */ + class SourceShardAddResponse implements ISourceShardAddResponse { /** - * Constructs a new SetShardIsPrimaryServingResponse. + * Constructs a new SourceShardAddResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.ISetShardIsPrimaryServingResponse); + constructor(properties?: vtctldata.ISourceShardAddResponse); - /** SetShardIsPrimaryServingResponse shard. */ + /** SourceShardAddResponse shard. */ public shard?: (topodata.IShard|null); /** - * Creates a new SetShardIsPrimaryServingResponse instance using the specified properties. + * Creates a new SourceShardAddResponse instance using the specified properties. * @param [properties] Properties to set - * @returns SetShardIsPrimaryServingResponse instance + * @returns SourceShardAddResponse instance */ - public static create(properties?: vtctldata.ISetShardIsPrimaryServingResponse): vtctldata.SetShardIsPrimaryServingResponse; + public static create(properties?: vtctldata.ISourceShardAddResponse): vtctldata.SourceShardAddResponse; /** - * Encodes the specified SetShardIsPrimaryServingResponse message. Does not implicitly {@link vtctldata.SetShardIsPrimaryServingResponse.verify|verify} messages. - * @param message SetShardIsPrimaryServingResponse message or plain object to encode + * Encodes the specified SourceShardAddResponse message. Does not implicitly {@link vtctldata.SourceShardAddResponse.verify|verify} messages. + * @param message SourceShardAddResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.ISetShardIsPrimaryServingResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.ISourceShardAddResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified SetShardIsPrimaryServingResponse message, length delimited. Does not implicitly {@link vtctldata.SetShardIsPrimaryServingResponse.verify|verify} messages. - * @param message SetShardIsPrimaryServingResponse message or plain object to encode + * Encodes the specified SourceShardAddResponse message, length delimited. Does not implicitly {@link vtctldata.SourceShardAddResponse.verify|verify} messages. + * @param message SourceShardAddResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.ISetShardIsPrimaryServingResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.ISourceShardAddResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a SetShardIsPrimaryServingResponse message from the specified reader or buffer. + * Decodes a SourceShardAddResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns SetShardIsPrimaryServingResponse + * @returns SourceShardAddResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SetShardIsPrimaryServingResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SourceShardAddResponse; /** - * Decodes a SetShardIsPrimaryServingResponse message from the specified reader or buffer, length delimited. + * Decodes a SourceShardAddResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns SetShardIsPrimaryServingResponse + * @returns SourceShardAddResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SetShardIsPrimaryServingResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SourceShardAddResponse; /** - * Verifies a SetShardIsPrimaryServingResponse message. + * Verifies a SourceShardAddResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a SetShardIsPrimaryServingResponse message from a plain object. Also converts values to their respective internal types. + * Creates a SourceShardAddResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns SetShardIsPrimaryServingResponse + * @returns SourceShardAddResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.SetShardIsPrimaryServingResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.SourceShardAddResponse; /** - * Creates a plain object from a SetShardIsPrimaryServingResponse message. Also converts values to other types if specified. - * @param message SetShardIsPrimaryServingResponse + * Creates a plain object from a SourceShardAddResponse message. Also converts values to other types if specified. + * @param message SourceShardAddResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.SetShardIsPrimaryServingResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.SourceShardAddResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this SetShardIsPrimaryServingResponse to JSON. + * Converts this SourceShardAddResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for SetShardIsPrimaryServingResponse + * Gets the default type url for SourceShardAddResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a SetShardTabletControlRequest. */ - interface ISetShardTabletControlRequest { + /** Properties of a SourceShardDeleteRequest. */ + interface ISourceShardDeleteRequest { - /** SetShardTabletControlRequest keyspace */ + /** SourceShardDeleteRequest keyspace */ keyspace?: (string|null); - /** SetShardTabletControlRequest shard */ + /** SourceShardDeleteRequest shard */ shard?: (string|null); - /** SetShardTabletControlRequest tablet_type */ - tablet_type?: (topodata.TabletType|null); - - /** SetShardTabletControlRequest cells */ - cells?: (string[]|null); - - /** SetShardTabletControlRequest denied_tables */ - denied_tables?: (string[]|null); - - /** SetShardTabletControlRequest disable_query_service */ - disable_query_service?: (boolean|null); - - /** SetShardTabletControlRequest remove */ - remove?: (boolean|null); + /** SourceShardDeleteRequest uid */ + uid?: (number|null); } - /** Represents a SetShardTabletControlRequest. */ - class SetShardTabletControlRequest implements ISetShardTabletControlRequest { + /** Represents a SourceShardDeleteRequest. */ + class SourceShardDeleteRequest implements ISourceShardDeleteRequest { /** - * Constructs a new SetShardTabletControlRequest. + * Constructs a new SourceShardDeleteRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.ISetShardTabletControlRequest); + constructor(properties?: vtctldata.ISourceShardDeleteRequest); - /** SetShardTabletControlRequest keyspace. */ + /** SourceShardDeleteRequest keyspace. */ public keyspace: string; - /** SetShardTabletControlRequest shard. */ + /** SourceShardDeleteRequest shard. */ public shard: string; - /** SetShardTabletControlRequest tablet_type. */ - public tablet_type: topodata.TabletType; - - /** SetShardTabletControlRequest cells. */ - public cells: string[]; - - /** SetShardTabletControlRequest denied_tables. */ - public denied_tables: string[]; - - /** SetShardTabletControlRequest disable_query_service. */ - public disable_query_service: boolean; - - /** SetShardTabletControlRequest remove. */ - public remove: boolean; + /** SourceShardDeleteRequest uid. */ + public uid: number; /** - * Creates a new SetShardTabletControlRequest instance using the specified properties. + * Creates a new SourceShardDeleteRequest instance using the specified properties. * @param [properties] Properties to set - * @returns SetShardTabletControlRequest instance + * @returns SourceShardDeleteRequest instance */ - public static create(properties?: vtctldata.ISetShardTabletControlRequest): vtctldata.SetShardTabletControlRequest; + public static create(properties?: vtctldata.ISourceShardDeleteRequest): vtctldata.SourceShardDeleteRequest; /** - * Encodes the specified SetShardTabletControlRequest message. Does not implicitly {@link vtctldata.SetShardTabletControlRequest.verify|verify} messages. - * @param message SetShardTabletControlRequest message or plain object to encode + * Encodes the specified SourceShardDeleteRequest message. Does not implicitly {@link vtctldata.SourceShardDeleteRequest.verify|verify} messages. + * @param message SourceShardDeleteRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.ISetShardTabletControlRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.ISourceShardDeleteRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified SetShardTabletControlRequest message, length delimited. Does not implicitly {@link vtctldata.SetShardTabletControlRequest.verify|verify} messages. - * @param message SetShardTabletControlRequest message or plain object to encode + * Encodes the specified SourceShardDeleteRequest message, length delimited. Does not implicitly {@link vtctldata.SourceShardDeleteRequest.verify|verify} messages. + * @param message SourceShardDeleteRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.ISetShardTabletControlRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.ISourceShardDeleteRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a SetShardTabletControlRequest message from the specified reader or buffer. + * Decodes a SourceShardDeleteRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns SetShardTabletControlRequest + * @returns SourceShardDeleteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SetShardTabletControlRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SourceShardDeleteRequest; /** - * Decodes a SetShardTabletControlRequest message from the specified reader or buffer, length delimited. + * Decodes a SourceShardDeleteRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns SetShardTabletControlRequest + * @returns SourceShardDeleteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SetShardTabletControlRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SourceShardDeleteRequest; /** - * Verifies a SetShardTabletControlRequest message. + * Verifies a SourceShardDeleteRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a SetShardTabletControlRequest message from a plain object. Also converts values to their respective internal types. + * Creates a SourceShardDeleteRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns SetShardTabletControlRequest + * @returns SourceShardDeleteRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.SetShardTabletControlRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.SourceShardDeleteRequest; /** - * Creates a plain object from a SetShardTabletControlRequest message. Also converts values to other types if specified. - * @param message SetShardTabletControlRequest + * Creates a plain object from a SourceShardDeleteRequest message. Also converts values to other types if specified. + * @param message SourceShardDeleteRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.SetShardTabletControlRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.SourceShardDeleteRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this SetShardTabletControlRequest to JSON. + * Converts this SourceShardDeleteRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for SetShardTabletControlRequest + * Gets the default type url for SourceShardDeleteRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a SetShardTabletControlResponse. */ - interface ISetShardTabletControlResponse { + /** Properties of a SourceShardDeleteResponse. */ + interface ISourceShardDeleteResponse { - /** SetShardTabletControlResponse shard */ + /** SourceShardDeleteResponse shard */ shard?: (topodata.IShard|null); } - /** Represents a SetShardTabletControlResponse. */ - class SetShardTabletControlResponse implements ISetShardTabletControlResponse { + /** Represents a SourceShardDeleteResponse. */ + class SourceShardDeleteResponse implements ISourceShardDeleteResponse { /** - * Constructs a new SetShardTabletControlResponse. + * Constructs a new SourceShardDeleteResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.ISetShardTabletControlResponse); + constructor(properties?: vtctldata.ISourceShardDeleteResponse); - /** SetShardTabletControlResponse shard. */ + /** SourceShardDeleteResponse shard. */ public shard?: (topodata.IShard|null); /** - * Creates a new SetShardTabletControlResponse instance using the specified properties. + * Creates a new SourceShardDeleteResponse instance using the specified properties. * @param [properties] Properties to set - * @returns SetShardTabletControlResponse instance + * @returns SourceShardDeleteResponse instance */ - public static create(properties?: vtctldata.ISetShardTabletControlResponse): vtctldata.SetShardTabletControlResponse; + public static create(properties?: vtctldata.ISourceShardDeleteResponse): vtctldata.SourceShardDeleteResponse; /** - * Encodes the specified SetShardTabletControlResponse message. Does not implicitly {@link vtctldata.SetShardTabletControlResponse.verify|verify} messages. - * @param message SetShardTabletControlResponse message or plain object to encode + * Encodes the specified SourceShardDeleteResponse message. Does not implicitly {@link vtctldata.SourceShardDeleteResponse.verify|verify} messages. + * @param message SourceShardDeleteResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.ISetShardTabletControlResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.ISourceShardDeleteResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified SetShardTabletControlResponse message, length delimited. Does not implicitly {@link vtctldata.SetShardTabletControlResponse.verify|verify} messages. - * @param message SetShardTabletControlResponse message or plain object to encode + * Encodes the specified SourceShardDeleteResponse message, length delimited. Does not implicitly {@link vtctldata.SourceShardDeleteResponse.verify|verify} messages. + * @param message SourceShardDeleteResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.ISetShardTabletControlResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.ISourceShardDeleteResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a SetShardTabletControlResponse message from the specified reader or buffer. + * Decodes a SourceShardDeleteResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns SetShardTabletControlResponse + * @returns SourceShardDeleteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SetShardTabletControlResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SourceShardDeleteResponse; /** - * Decodes a SetShardTabletControlResponse message from the specified reader or buffer, length delimited. + * Decodes a SourceShardDeleteResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns SetShardTabletControlResponse + * @returns SourceShardDeleteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SetShardTabletControlResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SourceShardDeleteResponse; /** - * Verifies a SetShardTabletControlResponse message. + * Verifies a SourceShardDeleteResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a SetShardTabletControlResponse message from a plain object. Also converts values to their respective internal types. + * Creates a SourceShardDeleteResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns SetShardTabletControlResponse + * @returns SourceShardDeleteResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.SetShardTabletControlResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.SourceShardDeleteResponse; /** - * Creates a plain object from a SetShardTabletControlResponse message. Also converts values to other types if specified. - * @param message SetShardTabletControlResponse + * Creates a plain object from a SourceShardDeleteResponse message. Also converts values to other types if specified. + * @param message SourceShardDeleteResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.SetShardTabletControlResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.SourceShardDeleteResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this SetShardTabletControlResponse to JSON. + * Converts this SourceShardDeleteResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for SetShardTabletControlResponse + * Gets the default type url for SourceShardDeleteResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a SetWritableRequest. */ - interface ISetWritableRequest { + /** Properties of a StartReplicationRequest. */ + interface IStartReplicationRequest { - /** SetWritableRequest tablet_alias */ + /** StartReplicationRequest tablet_alias */ tablet_alias?: (topodata.ITabletAlias|null); - - /** SetWritableRequest writable */ - writable?: (boolean|null); } - /** Represents a SetWritableRequest. */ - class SetWritableRequest implements ISetWritableRequest { + /** Represents a StartReplicationRequest. */ + class StartReplicationRequest implements IStartReplicationRequest { /** - * Constructs a new SetWritableRequest. + * Constructs a new StartReplicationRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.ISetWritableRequest); + constructor(properties?: vtctldata.IStartReplicationRequest); - /** SetWritableRequest tablet_alias. */ + /** StartReplicationRequest tablet_alias. */ public tablet_alias?: (topodata.ITabletAlias|null); - /** SetWritableRequest writable. */ - public writable: boolean; - /** - * Creates a new SetWritableRequest instance using the specified properties. + * Creates a new StartReplicationRequest instance using the specified properties. * @param [properties] Properties to set - * @returns SetWritableRequest instance + * @returns StartReplicationRequest instance */ - public static create(properties?: vtctldata.ISetWritableRequest): vtctldata.SetWritableRequest; + public static create(properties?: vtctldata.IStartReplicationRequest): vtctldata.StartReplicationRequest; /** - * Encodes the specified SetWritableRequest message. Does not implicitly {@link vtctldata.SetWritableRequest.verify|verify} messages. - * @param message SetWritableRequest message or plain object to encode + * Encodes the specified StartReplicationRequest message. Does not implicitly {@link vtctldata.StartReplicationRequest.verify|verify} messages. + * @param message StartReplicationRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.ISetWritableRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IStartReplicationRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified SetWritableRequest message, length delimited. Does not implicitly {@link vtctldata.SetWritableRequest.verify|verify} messages. - * @param message SetWritableRequest message or plain object to encode + * Encodes the specified StartReplicationRequest message, length delimited. Does not implicitly {@link vtctldata.StartReplicationRequest.verify|verify} messages. + * @param message StartReplicationRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.ISetWritableRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IStartReplicationRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a SetWritableRequest message from the specified reader or buffer. + * Decodes a StartReplicationRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns SetWritableRequest + * @returns StartReplicationRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SetWritableRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.StartReplicationRequest; /** - * Decodes a SetWritableRequest message from the specified reader or buffer, length delimited. + * Decodes a StartReplicationRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns SetWritableRequest + * @returns StartReplicationRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SetWritableRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.StartReplicationRequest; /** - * Verifies a SetWritableRequest message. + * Verifies a StartReplicationRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a SetWritableRequest message from a plain object. Also converts values to their respective internal types. + * Creates a StartReplicationRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns SetWritableRequest + * @returns StartReplicationRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.SetWritableRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.StartReplicationRequest; /** - * Creates a plain object from a SetWritableRequest message. Also converts values to other types if specified. - * @param message SetWritableRequest + * Creates a plain object from a StartReplicationRequest message. Also converts values to other types if specified. + * @param message StartReplicationRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.SetWritableRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.StartReplicationRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this SetWritableRequest to JSON. + * Converts this StartReplicationRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for SetWritableRequest + * Gets the default type url for StartReplicationRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a SetWritableResponse. */ - interface ISetWritableResponse { + /** Properties of a StartReplicationResponse. */ + interface IStartReplicationResponse { } - /** Represents a SetWritableResponse. */ - class SetWritableResponse implements ISetWritableResponse { + /** Represents a StartReplicationResponse. */ + class StartReplicationResponse implements IStartReplicationResponse { /** - * Constructs a new SetWritableResponse. + * Constructs a new StartReplicationResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.ISetWritableResponse); + constructor(properties?: vtctldata.IStartReplicationResponse); /** - * Creates a new SetWritableResponse instance using the specified properties. + * Creates a new StartReplicationResponse instance using the specified properties. * @param [properties] Properties to set - * @returns SetWritableResponse instance + * @returns StartReplicationResponse instance */ - public static create(properties?: vtctldata.ISetWritableResponse): vtctldata.SetWritableResponse; + public static create(properties?: vtctldata.IStartReplicationResponse): vtctldata.StartReplicationResponse; /** - * Encodes the specified SetWritableResponse message. Does not implicitly {@link vtctldata.SetWritableResponse.verify|verify} messages. - * @param message SetWritableResponse message or plain object to encode + * Encodes the specified StartReplicationResponse message. Does not implicitly {@link vtctldata.StartReplicationResponse.verify|verify} messages. + * @param message StartReplicationResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.ISetWritableResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IStartReplicationResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified SetWritableResponse message, length delimited. Does not implicitly {@link vtctldata.SetWritableResponse.verify|verify} messages. - * @param message SetWritableResponse message or plain object to encode + * Encodes the specified StartReplicationResponse message, length delimited. Does not implicitly {@link vtctldata.StartReplicationResponse.verify|verify} messages. + * @param message StartReplicationResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.ISetWritableResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IStartReplicationResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a SetWritableResponse message from the specified reader or buffer. + * Decodes a StartReplicationResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns SetWritableResponse + * @returns StartReplicationResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SetWritableResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.StartReplicationResponse; /** - * Decodes a SetWritableResponse message from the specified reader or buffer, length delimited. + * Decodes a StartReplicationResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns SetWritableResponse + * @returns StartReplicationResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SetWritableResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.StartReplicationResponse; /** - * Verifies a SetWritableResponse message. + * Verifies a StartReplicationResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a SetWritableResponse message from a plain object. Also converts values to their respective internal types. + * Creates a StartReplicationResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns SetWritableResponse + * @returns StartReplicationResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.SetWritableResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.StartReplicationResponse; /** - * Creates a plain object from a SetWritableResponse message. Also converts values to other types if specified. - * @param message SetWritableResponse + * Creates a plain object from a StartReplicationResponse message. Also converts values to other types if specified. + * @param message StartReplicationResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.SetWritableResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.StartReplicationResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this SetWritableResponse to JSON. + * Converts this StartReplicationResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for SetWritableResponse + * Gets the default type url for StartReplicationResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ShardReplicationAddRequest. */ - interface IShardReplicationAddRequest { - - /** ShardReplicationAddRequest keyspace */ - keyspace?: (string|null); - - /** ShardReplicationAddRequest shard */ - shard?: (string|null); + /** Properties of a StopReplicationRequest. */ + interface IStopReplicationRequest { - /** ShardReplicationAddRequest tablet_alias */ + /** StopReplicationRequest tablet_alias */ tablet_alias?: (topodata.ITabletAlias|null); } - /** Represents a ShardReplicationAddRequest. */ - class ShardReplicationAddRequest implements IShardReplicationAddRequest { + /** Represents a StopReplicationRequest. */ + class StopReplicationRequest implements IStopReplicationRequest { /** - * Constructs a new ShardReplicationAddRequest. + * Constructs a new StopReplicationRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IShardReplicationAddRequest); - - /** ShardReplicationAddRequest keyspace. */ - public keyspace: string; - - /** ShardReplicationAddRequest shard. */ - public shard: string; + constructor(properties?: vtctldata.IStopReplicationRequest); - /** ShardReplicationAddRequest tablet_alias. */ + /** StopReplicationRequest tablet_alias. */ public tablet_alias?: (topodata.ITabletAlias|null); /** - * Creates a new ShardReplicationAddRequest instance using the specified properties. + * Creates a new StopReplicationRequest instance using the specified properties. * @param [properties] Properties to set - * @returns ShardReplicationAddRequest instance + * @returns StopReplicationRequest instance */ - public static create(properties?: vtctldata.IShardReplicationAddRequest): vtctldata.ShardReplicationAddRequest; + public static create(properties?: vtctldata.IStopReplicationRequest): vtctldata.StopReplicationRequest; /** - * Encodes the specified ShardReplicationAddRequest message. Does not implicitly {@link vtctldata.ShardReplicationAddRequest.verify|verify} messages. - * @param message ShardReplicationAddRequest message or plain object to encode + * Encodes the specified StopReplicationRequest message. Does not implicitly {@link vtctldata.StopReplicationRequest.verify|verify} messages. + * @param message StopReplicationRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IShardReplicationAddRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IStopReplicationRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ShardReplicationAddRequest message, length delimited. Does not implicitly {@link vtctldata.ShardReplicationAddRequest.verify|verify} messages. - * @param message ShardReplicationAddRequest message or plain object to encode + * Encodes the specified StopReplicationRequest message, length delimited. Does not implicitly {@link vtctldata.StopReplicationRequest.verify|verify} messages. + * @param message StopReplicationRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IShardReplicationAddRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IStopReplicationRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ShardReplicationAddRequest message from the specified reader or buffer. + * Decodes a StopReplicationRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ShardReplicationAddRequest + * @returns StopReplicationRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ShardReplicationAddRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.StopReplicationRequest; /** - * Decodes a ShardReplicationAddRequest message from the specified reader or buffer, length delimited. + * Decodes a StopReplicationRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ShardReplicationAddRequest + * @returns StopReplicationRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ShardReplicationAddRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.StopReplicationRequest; /** - * Verifies a ShardReplicationAddRequest message. + * Verifies a StopReplicationRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ShardReplicationAddRequest message from a plain object. Also converts values to their respective internal types. + * Creates a StopReplicationRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ShardReplicationAddRequest + * @returns StopReplicationRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.ShardReplicationAddRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.StopReplicationRequest; /** - * Creates a plain object from a ShardReplicationAddRequest message. Also converts values to other types if specified. - * @param message ShardReplicationAddRequest + * Creates a plain object from a StopReplicationRequest message. Also converts values to other types if specified. + * @param message StopReplicationRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ShardReplicationAddRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.StopReplicationRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ShardReplicationAddRequest to JSON. + * Converts this StopReplicationRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ShardReplicationAddRequest + * Gets the default type url for StopReplicationRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ShardReplicationAddResponse. */ - interface IShardReplicationAddResponse { + /** Properties of a StopReplicationResponse. */ + interface IStopReplicationResponse { } - /** Represents a ShardReplicationAddResponse. */ - class ShardReplicationAddResponse implements IShardReplicationAddResponse { + /** Represents a StopReplicationResponse. */ + class StopReplicationResponse implements IStopReplicationResponse { /** - * Constructs a new ShardReplicationAddResponse. + * Constructs a new StopReplicationResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IShardReplicationAddResponse); + constructor(properties?: vtctldata.IStopReplicationResponse); /** - * Creates a new ShardReplicationAddResponse instance using the specified properties. + * Creates a new StopReplicationResponse instance using the specified properties. * @param [properties] Properties to set - * @returns ShardReplicationAddResponse instance + * @returns StopReplicationResponse instance */ - public static create(properties?: vtctldata.IShardReplicationAddResponse): vtctldata.ShardReplicationAddResponse; + public static create(properties?: vtctldata.IStopReplicationResponse): vtctldata.StopReplicationResponse; /** - * Encodes the specified ShardReplicationAddResponse message. Does not implicitly {@link vtctldata.ShardReplicationAddResponse.verify|verify} messages. - * @param message ShardReplicationAddResponse message or plain object to encode + * Encodes the specified StopReplicationResponse message. Does not implicitly {@link vtctldata.StopReplicationResponse.verify|verify} messages. + * @param message StopReplicationResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IShardReplicationAddResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IStopReplicationResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ShardReplicationAddResponse message, length delimited. Does not implicitly {@link vtctldata.ShardReplicationAddResponse.verify|verify} messages. - * @param message ShardReplicationAddResponse message or plain object to encode + * Encodes the specified StopReplicationResponse message, length delimited. Does not implicitly {@link vtctldata.StopReplicationResponse.verify|verify} messages. + * @param message StopReplicationResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IShardReplicationAddResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IStopReplicationResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ShardReplicationAddResponse message from the specified reader or buffer. + * Decodes a StopReplicationResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ShardReplicationAddResponse + * @returns StopReplicationResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ShardReplicationAddResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.StopReplicationResponse; /** - * Decodes a ShardReplicationAddResponse message from the specified reader or buffer, length delimited. + * Decodes a StopReplicationResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ShardReplicationAddResponse + * @returns StopReplicationResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ShardReplicationAddResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.StopReplicationResponse; /** - * Verifies a ShardReplicationAddResponse message. + * Verifies a StopReplicationResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ShardReplicationAddResponse message from a plain object. Also converts values to their respective internal types. + * Creates a StopReplicationResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ShardReplicationAddResponse + * @returns StopReplicationResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.ShardReplicationAddResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.StopReplicationResponse; /** - * Creates a plain object from a ShardReplicationAddResponse message. Also converts values to other types if specified. - * @param message ShardReplicationAddResponse + * Creates a plain object from a StopReplicationResponse message. Also converts values to other types if specified. + * @param message StopReplicationResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ShardReplicationAddResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.StopReplicationResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ShardReplicationAddResponse to JSON. + * Converts this StopReplicationResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ShardReplicationAddResponse + * Gets the default type url for StopReplicationResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ShardReplicationFixRequest. */ - interface IShardReplicationFixRequest { - - /** ShardReplicationFixRequest keyspace */ - keyspace?: (string|null); - - /** ShardReplicationFixRequest shard */ - shard?: (string|null); + /** Properties of a TabletExternallyReparentedRequest. */ + interface ITabletExternallyReparentedRequest { - /** ShardReplicationFixRequest cell */ - cell?: (string|null); + /** TabletExternallyReparentedRequest tablet */ + tablet?: (topodata.ITabletAlias|null); } - /** Represents a ShardReplicationFixRequest. */ - class ShardReplicationFixRequest implements IShardReplicationFixRequest { + /** Represents a TabletExternallyReparentedRequest. */ + class TabletExternallyReparentedRequest implements ITabletExternallyReparentedRequest { /** - * Constructs a new ShardReplicationFixRequest. + * Constructs a new TabletExternallyReparentedRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IShardReplicationFixRequest); - - /** ShardReplicationFixRequest keyspace. */ - public keyspace: string; - - /** ShardReplicationFixRequest shard. */ - public shard: string; + constructor(properties?: vtctldata.ITabletExternallyReparentedRequest); - /** ShardReplicationFixRequest cell. */ - public cell: string; + /** TabletExternallyReparentedRequest tablet. */ + public tablet?: (topodata.ITabletAlias|null); /** - * Creates a new ShardReplicationFixRequest instance using the specified properties. + * Creates a new TabletExternallyReparentedRequest instance using the specified properties. * @param [properties] Properties to set - * @returns ShardReplicationFixRequest instance + * @returns TabletExternallyReparentedRequest instance */ - public static create(properties?: vtctldata.IShardReplicationFixRequest): vtctldata.ShardReplicationFixRequest; + public static create(properties?: vtctldata.ITabletExternallyReparentedRequest): vtctldata.TabletExternallyReparentedRequest; /** - * Encodes the specified ShardReplicationFixRequest message. Does not implicitly {@link vtctldata.ShardReplicationFixRequest.verify|verify} messages. - * @param message ShardReplicationFixRequest message or plain object to encode + * Encodes the specified TabletExternallyReparentedRequest message. Does not implicitly {@link vtctldata.TabletExternallyReparentedRequest.verify|verify} messages. + * @param message TabletExternallyReparentedRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IShardReplicationFixRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.ITabletExternallyReparentedRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ShardReplicationFixRequest message, length delimited. Does not implicitly {@link vtctldata.ShardReplicationFixRequest.verify|verify} messages. - * @param message ShardReplicationFixRequest message or plain object to encode + * Encodes the specified TabletExternallyReparentedRequest message, length delimited. Does not implicitly {@link vtctldata.TabletExternallyReparentedRequest.verify|verify} messages. + * @param message TabletExternallyReparentedRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IShardReplicationFixRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.ITabletExternallyReparentedRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ShardReplicationFixRequest message from the specified reader or buffer. + * Decodes a TabletExternallyReparentedRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ShardReplicationFixRequest + * @returns TabletExternallyReparentedRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ShardReplicationFixRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.TabletExternallyReparentedRequest; /** - * Decodes a ShardReplicationFixRequest message from the specified reader or buffer, length delimited. + * Decodes a TabletExternallyReparentedRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ShardReplicationFixRequest + * @returns TabletExternallyReparentedRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ShardReplicationFixRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.TabletExternallyReparentedRequest; /** - * Verifies a ShardReplicationFixRequest message. + * Verifies a TabletExternallyReparentedRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ShardReplicationFixRequest message from a plain object. Also converts values to their respective internal types. + * Creates a TabletExternallyReparentedRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ShardReplicationFixRequest + * @returns TabletExternallyReparentedRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.ShardReplicationFixRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.TabletExternallyReparentedRequest; /** - * Creates a plain object from a ShardReplicationFixRequest message. Also converts values to other types if specified. - * @param message ShardReplicationFixRequest + * Creates a plain object from a TabletExternallyReparentedRequest message. Also converts values to other types if specified. + * @param message TabletExternallyReparentedRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ShardReplicationFixRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.TabletExternallyReparentedRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ShardReplicationFixRequest to JSON. + * Converts this TabletExternallyReparentedRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ShardReplicationFixRequest + * Gets the default type url for TabletExternallyReparentedRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ShardReplicationFixResponse. */ - interface IShardReplicationFixResponse { + /** Properties of a TabletExternallyReparentedResponse. */ + interface ITabletExternallyReparentedResponse { - /** ShardReplicationFixResponse error */ - error?: (topodata.IShardReplicationError|null); + /** TabletExternallyReparentedResponse keyspace */ + keyspace?: (string|null); + + /** TabletExternallyReparentedResponse shard */ + shard?: (string|null); + + /** TabletExternallyReparentedResponse new_primary */ + new_primary?: (topodata.ITabletAlias|null); + + /** TabletExternallyReparentedResponse old_primary */ + old_primary?: (topodata.ITabletAlias|null); } - /** Represents a ShardReplicationFixResponse. */ - class ShardReplicationFixResponse implements IShardReplicationFixResponse { + /** Represents a TabletExternallyReparentedResponse. */ + class TabletExternallyReparentedResponse implements ITabletExternallyReparentedResponse { /** - * Constructs a new ShardReplicationFixResponse. + * Constructs a new TabletExternallyReparentedResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IShardReplicationFixResponse); + constructor(properties?: vtctldata.ITabletExternallyReparentedResponse); - /** ShardReplicationFixResponse error. */ - public error?: (topodata.IShardReplicationError|null); + /** TabletExternallyReparentedResponse keyspace. */ + public keyspace: string; + + /** TabletExternallyReparentedResponse shard. */ + public shard: string; + + /** TabletExternallyReparentedResponse new_primary. */ + public new_primary?: (topodata.ITabletAlias|null); + + /** TabletExternallyReparentedResponse old_primary. */ + public old_primary?: (topodata.ITabletAlias|null); /** - * Creates a new ShardReplicationFixResponse instance using the specified properties. + * Creates a new TabletExternallyReparentedResponse instance using the specified properties. * @param [properties] Properties to set - * @returns ShardReplicationFixResponse instance + * @returns TabletExternallyReparentedResponse instance */ - public static create(properties?: vtctldata.IShardReplicationFixResponse): vtctldata.ShardReplicationFixResponse; + public static create(properties?: vtctldata.ITabletExternallyReparentedResponse): vtctldata.TabletExternallyReparentedResponse; /** - * Encodes the specified ShardReplicationFixResponse message. Does not implicitly {@link vtctldata.ShardReplicationFixResponse.verify|verify} messages. - * @param message ShardReplicationFixResponse message or plain object to encode + * Encodes the specified TabletExternallyReparentedResponse message. Does not implicitly {@link vtctldata.TabletExternallyReparentedResponse.verify|verify} messages. + * @param message TabletExternallyReparentedResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IShardReplicationFixResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.ITabletExternallyReparentedResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ShardReplicationFixResponse message, length delimited. Does not implicitly {@link vtctldata.ShardReplicationFixResponse.verify|verify} messages. - * @param message ShardReplicationFixResponse message or plain object to encode + * Encodes the specified TabletExternallyReparentedResponse message, length delimited. Does not implicitly {@link vtctldata.TabletExternallyReparentedResponse.verify|verify} messages. + * @param message TabletExternallyReparentedResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IShardReplicationFixResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.ITabletExternallyReparentedResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ShardReplicationFixResponse message from the specified reader or buffer. + * Decodes a TabletExternallyReparentedResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ShardReplicationFixResponse + * @returns TabletExternallyReparentedResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ShardReplicationFixResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.TabletExternallyReparentedResponse; /** - * Decodes a ShardReplicationFixResponse message from the specified reader or buffer, length delimited. + * Decodes a TabletExternallyReparentedResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ShardReplicationFixResponse + * @returns TabletExternallyReparentedResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ShardReplicationFixResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.TabletExternallyReparentedResponse; /** - * Verifies a ShardReplicationFixResponse message. + * Verifies a TabletExternallyReparentedResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ShardReplicationFixResponse message from a plain object. Also converts values to their respective internal types. + * Creates a TabletExternallyReparentedResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ShardReplicationFixResponse + * @returns TabletExternallyReparentedResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.ShardReplicationFixResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.TabletExternallyReparentedResponse; /** - * Creates a plain object from a ShardReplicationFixResponse message. Also converts values to other types if specified. - * @param message ShardReplicationFixResponse + * Creates a plain object from a TabletExternallyReparentedResponse message. Also converts values to other types if specified. + * @param message TabletExternallyReparentedResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ShardReplicationFixResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.TabletExternallyReparentedResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ShardReplicationFixResponse to JSON. + * Converts this TabletExternallyReparentedResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ShardReplicationFixResponse + * Gets the default type url for TabletExternallyReparentedResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ShardReplicationPositionsRequest. */ - interface IShardReplicationPositionsRequest { + /** Properties of an UpdateCellInfoRequest. */ + interface IUpdateCellInfoRequest { - /** ShardReplicationPositionsRequest keyspace */ - keyspace?: (string|null); + /** UpdateCellInfoRequest name */ + name?: (string|null); - /** ShardReplicationPositionsRequest shard */ - shard?: (string|null); + /** UpdateCellInfoRequest cell_info */ + cell_info?: (topodata.ICellInfo|null); } - /** Represents a ShardReplicationPositionsRequest. */ - class ShardReplicationPositionsRequest implements IShardReplicationPositionsRequest { + /** Represents an UpdateCellInfoRequest. */ + class UpdateCellInfoRequest implements IUpdateCellInfoRequest { /** - * Constructs a new ShardReplicationPositionsRequest. + * Constructs a new UpdateCellInfoRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IShardReplicationPositionsRequest); + constructor(properties?: vtctldata.IUpdateCellInfoRequest); - /** ShardReplicationPositionsRequest keyspace. */ - public keyspace: string; + /** UpdateCellInfoRequest name. */ + public name: string; - /** ShardReplicationPositionsRequest shard. */ - public shard: string; + /** UpdateCellInfoRequest cell_info. */ + public cell_info?: (topodata.ICellInfo|null); /** - * Creates a new ShardReplicationPositionsRequest instance using the specified properties. + * Creates a new UpdateCellInfoRequest instance using the specified properties. * @param [properties] Properties to set - * @returns ShardReplicationPositionsRequest instance + * @returns UpdateCellInfoRequest instance */ - public static create(properties?: vtctldata.IShardReplicationPositionsRequest): vtctldata.ShardReplicationPositionsRequest; + public static create(properties?: vtctldata.IUpdateCellInfoRequest): vtctldata.UpdateCellInfoRequest; /** - * Encodes the specified ShardReplicationPositionsRequest message. Does not implicitly {@link vtctldata.ShardReplicationPositionsRequest.verify|verify} messages. - * @param message ShardReplicationPositionsRequest message or plain object to encode + * Encodes the specified UpdateCellInfoRequest message. Does not implicitly {@link vtctldata.UpdateCellInfoRequest.verify|verify} messages. + * @param message UpdateCellInfoRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IShardReplicationPositionsRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IUpdateCellInfoRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ShardReplicationPositionsRequest message, length delimited. Does not implicitly {@link vtctldata.ShardReplicationPositionsRequest.verify|verify} messages. - * @param message ShardReplicationPositionsRequest message or plain object to encode + * Encodes the specified UpdateCellInfoRequest message, length delimited. Does not implicitly {@link vtctldata.UpdateCellInfoRequest.verify|verify} messages. + * @param message UpdateCellInfoRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IShardReplicationPositionsRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IUpdateCellInfoRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ShardReplicationPositionsRequest message from the specified reader or buffer. + * Decodes an UpdateCellInfoRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ShardReplicationPositionsRequest + * @returns UpdateCellInfoRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ShardReplicationPositionsRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.UpdateCellInfoRequest; /** - * Decodes a ShardReplicationPositionsRequest message from the specified reader or buffer, length delimited. + * Decodes an UpdateCellInfoRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ShardReplicationPositionsRequest + * @returns UpdateCellInfoRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ShardReplicationPositionsRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.UpdateCellInfoRequest; /** - * Verifies a ShardReplicationPositionsRequest message. + * Verifies an UpdateCellInfoRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ShardReplicationPositionsRequest message from a plain object. Also converts values to their respective internal types. + * Creates an UpdateCellInfoRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ShardReplicationPositionsRequest + * @returns UpdateCellInfoRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.ShardReplicationPositionsRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.UpdateCellInfoRequest; /** - * Creates a plain object from a ShardReplicationPositionsRequest message. Also converts values to other types if specified. - * @param message ShardReplicationPositionsRequest + * Creates a plain object from an UpdateCellInfoRequest message. Also converts values to other types if specified. + * @param message UpdateCellInfoRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ShardReplicationPositionsRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.UpdateCellInfoRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ShardReplicationPositionsRequest to JSON. + * Converts this UpdateCellInfoRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ShardReplicationPositionsRequest + * Gets the default type url for UpdateCellInfoRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ShardReplicationPositionsResponse. */ - interface IShardReplicationPositionsResponse { + /** Properties of an UpdateCellInfoResponse. */ + interface IUpdateCellInfoResponse { - /** ShardReplicationPositionsResponse replication_statuses */ - replication_statuses?: ({ [k: string]: replicationdata.IStatus }|null); + /** UpdateCellInfoResponse name */ + name?: (string|null); - /** ShardReplicationPositionsResponse tablet_map */ - tablet_map?: ({ [k: string]: topodata.ITablet }|null); + /** UpdateCellInfoResponse cell_info */ + cell_info?: (topodata.ICellInfo|null); } - /** Represents a ShardReplicationPositionsResponse. */ - class ShardReplicationPositionsResponse implements IShardReplicationPositionsResponse { + /** Represents an UpdateCellInfoResponse. */ + class UpdateCellInfoResponse implements IUpdateCellInfoResponse { /** - * Constructs a new ShardReplicationPositionsResponse. + * Constructs a new UpdateCellInfoResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IShardReplicationPositionsResponse); + constructor(properties?: vtctldata.IUpdateCellInfoResponse); - /** ShardReplicationPositionsResponse replication_statuses. */ - public replication_statuses: { [k: string]: replicationdata.IStatus }; + /** UpdateCellInfoResponse name. */ + public name: string; - /** ShardReplicationPositionsResponse tablet_map. */ - public tablet_map: { [k: string]: topodata.ITablet }; + /** UpdateCellInfoResponse cell_info. */ + public cell_info?: (topodata.ICellInfo|null); /** - * Creates a new ShardReplicationPositionsResponse instance using the specified properties. + * Creates a new UpdateCellInfoResponse instance using the specified properties. * @param [properties] Properties to set - * @returns ShardReplicationPositionsResponse instance + * @returns UpdateCellInfoResponse instance */ - public static create(properties?: vtctldata.IShardReplicationPositionsResponse): vtctldata.ShardReplicationPositionsResponse; + public static create(properties?: vtctldata.IUpdateCellInfoResponse): vtctldata.UpdateCellInfoResponse; /** - * Encodes the specified ShardReplicationPositionsResponse message. Does not implicitly {@link vtctldata.ShardReplicationPositionsResponse.verify|verify} messages. - * @param message ShardReplicationPositionsResponse message or plain object to encode + * Encodes the specified UpdateCellInfoResponse message. Does not implicitly {@link vtctldata.UpdateCellInfoResponse.verify|verify} messages. + * @param message UpdateCellInfoResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IShardReplicationPositionsResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IUpdateCellInfoResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ShardReplicationPositionsResponse message, length delimited. Does not implicitly {@link vtctldata.ShardReplicationPositionsResponse.verify|verify} messages. - * @param message ShardReplicationPositionsResponse message or plain object to encode + * Encodes the specified UpdateCellInfoResponse message, length delimited. Does not implicitly {@link vtctldata.UpdateCellInfoResponse.verify|verify} messages. + * @param message UpdateCellInfoResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IShardReplicationPositionsResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IUpdateCellInfoResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ShardReplicationPositionsResponse message from the specified reader or buffer. + * Decodes an UpdateCellInfoResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ShardReplicationPositionsResponse + * @returns UpdateCellInfoResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ShardReplicationPositionsResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.UpdateCellInfoResponse; /** - * Decodes a ShardReplicationPositionsResponse message from the specified reader or buffer, length delimited. + * Decodes an UpdateCellInfoResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ShardReplicationPositionsResponse + * @returns UpdateCellInfoResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ShardReplicationPositionsResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.UpdateCellInfoResponse; /** - * Verifies a ShardReplicationPositionsResponse message. + * Verifies an UpdateCellInfoResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ShardReplicationPositionsResponse message from a plain object. Also converts values to their respective internal types. + * Creates an UpdateCellInfoResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ShardReplicationPositionsResponse + * @returns UpdateCellInfoResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.ShardReplicationPositionsResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.UpdateCellInfoResponse; /** - * Creates a plain object from a ShardReplicationPositionsResponse message. Also converts values to other types if specified. - * @param message ShardReplicationPositionsResponse + * Creates a plain object from an UpdateCellInfoResponse message. Also converts values to other types if specified. + * @param message UpdateCellInfoResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ShardReplicationPositionsResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.UpdateCellInfoResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ShardReplicationPositionsResponse to JSON. + * Converts this UpdateCellInfoResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ShardReplicationPositionsResponse + * Gets the default type url for UpdateCellInfoResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ShardReplicationRemoveRequest. */ - interface IShardReplicationRemoveRequest { - - /** ShardReplicationRemoveRequest keyspace */ - keyspace?: (string|null); + /** Properties of an UpdateCellsAliasRequest. */ + interface IUpdateCellsAliasRequest { - /** ShardReplicationRemoveRequest shard */ - shard?: (string|null); + /** UpdateCellsAliasRequest name */ + name?: (string|null); - /** ShardReplicationRemoveRequest tablet_alias */ - tablet_alias?: (topodata.ITabletAlias|null); + /** UpdateCellsAliasRequest cells_alias */ + cells_alias?: (topodata.ICellsAlias|null); } - /** Represents a ShardReplicationRemoveRequest. */ - class ShardReplicationRemoveRequest implements IShardReplicationRemoveRequest { + /** Represents an UpdateCellsAliasRequest. */ + class UpdateCellsAliasRequest implements IUpdateCellsAliasRequest { /** - * Constructs a new ShardReplicationRemoveRequest. + * Constructs a new UpdateCellsAliasRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IShardReplicationRemoveRequest); - - /** ShardReplicationRemoveRequest keyspace. */ - public keyspace: string; + constructor(properties?: vtctldata.IUpdateCellsAliasRequest); - /** ShardReplicationRemoveRequest shard. */ - public shard: string; + /** UpdateCellsAliasRequest name. */ + public name: string; - /** ShardReplicationRemoveRequest tablet_alias. */ - public tablet_alias?: (topodata.ITabletAlias|null); + /** UpdateCellsAliasRequest cells_alias. */ + public cells_alias?: (topodata.ICellsAlias|null); /** - * Creates a new ShardReplicationRemoveRequest instance using the specified properties. + * Creates a new UpdateCellsAliasRequest instance using the specified properties. * @param [properties] Properties to set - * @returns ShardReplicationRemoveRequest instance + * @returns UpdateCellsAliasRequest instance */ - public static create(properties?: vtctldata.IShardReplicationRemoveRequest): vtctldata.ShardReplicationRemoveRequest; + public static create(properties?: vtctldata.IUpdateCellsAliasRequest): vtctldata.UpdateCellsAliasRequest; /** - * Encodes the specified ShardReplicationRemoveRequest message. Does not implicitly {@link vtctldata.ShardReplicationRemoveRequest.verify|verify} messages. - * @param message ShardReplicationRemoveRequest message or plain object to encode + * Encodes the specified UpdateCellsAliasRequest message. Does not implicitly {@link vtctldata.UpdateCellsAliasRequest.verify|verify} messages. + * @param message UpdateCellsAliasRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IShardReplicationRemoveRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IUpdateCellsAliasRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ShardReplicationRemoveRequest message, length delimited. Does not implicitly {@link vtctldata.ShardReplicationRemoveRequest.verify|verify} messages. - * @param message ShardReplicationRemoveRequest message or plain object to encode + * Encodes the specified UpdateCellsAliasRequest message, length delimited. Does not implicitly {@link vtctldata.UpdateCellsAliasRequest.verify|verify} messages. + * @param message UpdateCellsAliasRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IShardReplicationRemoveRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IUpdateCellsAliasRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ShardReplicationRemoveRequest message from the specified reader or buffer. + * Decodes an UpdateCellsAliasRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ShardReplicationRemoveRequest + * @returns UpdateCellsAliasRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ShardReplicationRemoveRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.UpdateCellsAliasRequest; /** - * Decodes a ShardReplicationRemoveRequest message from the specified reader or buffer, length delimited. + * Decodes an UpdateCellsAliasRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ShardReplicationRemoveRequest + * @returns UpdateCellsAliasRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ShardReplicationRemoveRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.UpdateCellsAliasRequest; /** - * Verifies a ShardReplicationRemoveRequest message. + * Verifies an UpdateCellsAliasRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ShardReplicationRemoveRequest message from a plain object. Also converts values to their respective internal types. + * Creates an UpdateCellsAliasRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ShardReplicationRemoveRequest + * @returns UpdateCellsAliasRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.ShardReplicationRemoveRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.UpdateCellsAliasRequest; /** - * Creates a plain object from a ShardReplicationRemoveRequest message. Also converts values to other types if specified. - * @param message ShardReplicationRemoveRequest + * Creates a plain object from an UpdateCellsAliasRequest message. Also converts values to other types if specified. + * @param message UpdateCellsAliasRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ShardReplicationRemoveRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.UpdateCellsAliasRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ShardReplicationRemoveRequest to JSON. + * Converts this UpdateCellsAliasRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ShardReplicationRemoveRequest + * Gets the default type url for UpdateCellsAliasRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ShardReplicationRemoveResponse. */ - interface IShardReplicationRemoveResponse { + /** Properties of an UpdateCellsAliasResponse. */ + interface IUpdateCellsAliasResponse { + + /** UpdateCellsAliasResponse name */ + name?: (string|null); + + /** UpdateCellsAliasResponse cells_alias */ + cells_alias?: (topodata.ICellsAlias|null); } - /** Represents a ShardReplicationRemoveResponse. */ - class ShardReplicationRemoveResponse implements IShardReplicationRemoveResponse { + /** Represents an UpdateCellsAliasResponse. */ + class UpdateCellsAliasResponse implements IUpdateCellsAliasResponse { /** - * Constructs a new ShardReplicationRemoveResponse. + * Constructs a new UpdateCellsAliasResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IShardReplicationRemoveResponse); + constructor(properties?: vtctldata.IUpdateCellsAliasResponse); + + /** UpdateCellsAliasResponse name. */ + public name: string; + + /** UpdateCellsAliasResponse cells_alias. */ + public cells_alias?: (topodata.ICellsAlias|null); /** - * Creates a new ShardReplicationRemoveResponse instance using the specified properties. + * Creates a new UpdateCellsAliasResponse instance using the specified properties. * @param [properties] Properties to set - * @returns ShardReplicationRemoveResponse instance + * @returns UpdateCellsAliasResponse instance */ - public static create(properties?: vtctldata.IShardReplicationRemoveResponse): vtctldata.ShardReplicationRemoveResponse; + public static create(properties?: vtctldata.IUpdateCellsAliasResponse): vtctldata.UpdateCellsAliasResponse; /** - * Encodes the specified ShardReplicationRemoveResponse message. Does not implicitly {@link vtctldata.ShardReplicationRemoveResponse.verify|verify} messages. - * @param message ShardReplicationRemoveResponse message or plain object to encode + * Encodes the specified UpdateCellsAliasResponse message. Does not implicitly {@link vtctldata.UpdateCellsAliasResponse.verify|verify} messages. + * @param message UpdateCellsAliasResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IShardReplicationRemoveResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IUpdateCellsAliasResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ShardReplicationRemoveResponse message, length delimited. Does not implicitly {@link vtctldata.ShardReplicationRemoveResponse.verify|verify} messages. - * @param message ShardReplicationRemoveResponse message or plain object to encode + * Encodes the specified UpdateCellsAliasResponse message, length delimited. Does not implicitly {@link vtctldata.UpdateCellsAliasResponse.verify|verify} messages. + * @param message UpdateCellsAliasResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IShardReplicationRemoveResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IUpdateCellsAliasResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ShardReplicationRemoveResponse message from the specified reader or buffer. + * Decodes an UpdateCellsAliasResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ShardReplicationRemoveResponse + * @returns UpdateCellsAliasResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ShardReplicationRemoveResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.UpdateCellsAliasResponse; /** - * Decodes a ShardReplicationRemoveResponse message from the specified reader or buffer, length delimited. + * Decodes an UpdateCellsAliasResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ShardReplicationRemoveResponse + * @returns UpdateCellsAliasResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ShardReplicationRemoveResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.UpdateCellsAliasResponse; /** - * Verifies a ShardReplicationRemoveResponse message. + * Verifies an UpdateCellsAliasResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ShardReplicationRemoveResponse message from a plain object. Also converts values to their respective internal types. + * Creates an UpdateCellsAliasResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ShardReplicationRemoveResponse + * @returns UpdateCellsAliasResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.ShardReplicationRemoveResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.UpdateCellsAliasResponse; /** - * Creates a plain object from a ShardReplicationRemoveResponse message. Also converts values to other types if specified. - * @param message ShardReplicationRemoveResponse + * Creates a plain object from an UpdateCellsAliasResponse message. Also converts values to other types if specified. + * @param message UpdateCellsAliasResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ShardReplicationRemoveResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.UpdateCellsAliasResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ShardReplicationRemoveResponse to JSON. + * Converts this UpdateCellsAliasResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ShardReplicationRemoveResponse + * Gets the default type url for UpdateCellsAliasResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a SleepTabletRequest. */ - interface ISleepTabletRequest { - - /** SleepTabletRequest tablet_alias */ - tablet_alias?: (topodata.ITabletAlias|null); + /** Properties of a ValidateRequest. */ + interface IValidateRequest { - /** SleepTabletRequest duration */ - duration?: (vttime.IDuration|null); + /** ValidateRequest ping_tablets */ + ping_tablets?: (boolean|null); } - /** Represents a SleepTabletRequest. */ - class SleepTabletRequest implements ISleepTabletRequest { + /** Represents a ValidateRequest. */ + class ValidateRequest implements IValidateRequest { /** - * Constructs a new SleepTabletRequest. + * Constructs a new ValidateRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.ISleepTabletRequest); - - /** SleepTabletRequest tablet_alias. */ - public tablet_alias?: (topodata.ITabletAlias|null); + constructor(properties?: vtctldata.IValidateRequest); - /** SleepTabletRequest duration. */ - public duration?: (vttime.IDuration|null); + /** ValidateRequest ping_tablets. */ + public ping_tablets: boolean; /** - * Creates a new SleepTabletRequest instance using the specified properties. + * Creates a new ValidateRequest instance using the specified properties. * @param [properties] Properties to set - * @returns SleepTabletRequest instance + * @returns ValidateRequest instance */ - public static create(properties?: vtctldata.ISleepTabletRequest): vtctldata.SleepTabletRequest; + public static create(properties?: vtctldata.IValidateRequest): vtctldata.ValidateRequest; /** - * Encodes the specified SleepTabletRequest message. Does not implicitly {@link vtctldata.SleepTabletRequest.verify|verify} messages. - * @param message SleepTabletRequest message or plain object to encode + * Encodes the specified ValidateRequest message. Does not implicitly {@link vtctldata.ValidateRequest.verify|verify} messages. + * @param message ValidateRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.ISleepTabletRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IValidateRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified SleepTabletRequest message, length delimited. Does not implicitly {@link vtctldata.SleepTabletRequest.verify|verify} messages. - * @param message SleepTabletRequest message or plain object to encode + * Encodes the specified ValidateRequest message, length delimited. Does not implicitly {@link vtctldata.ValidateRequest.verify|verify} messages. + * @param message ValidateRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.ISleepTabletRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IValidateRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a SleepTabletRequest message from the specified reader or buffer. + * Decodes a ValidateRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns SleepTabletRequest + * @returns ValidateRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SleepTabletRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ValidateRequest; /** - * Decodes a SleepTabletRequest message from the specified reader or buffer, length delimited. + * Decodes a ValidateRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns SleepTabletRequest + * @returns ValidateRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SleepTabletRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ValidateRequest; /** - * Verifies a SleepTabletRequest message. + * Verifies a ValidateRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a SleepTabletRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ValidateRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns SleepTabletRequest + * @returns ValidateRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.SleepTabletRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.ValidateRequest; /** - * Creates a plain object from a SleepTabletRequest message. Also converts values to other types if specified. - * @param message SleepTabletRequest + * Creates a plain object from a ValidateRequest message. Also converts values to other types if specified. + * @param message ValidateRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.SleepTabletRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.ValidateRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this SleepTabletRequest to JSON. + * Converts this ValidateRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for SleepTabletRequest + * Gets the default type url for ValidateRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a SleepTabletResponse. */ - interface ISleepTabletResponse { + /** Properties of a ValidateResponse. */ + interface IValidateResponse { + + /** ValidateResponse results */ + results?: (string[]|null); + + /** ValidateResponse results_by_keyspace */ + results_by_keyspace?: ({ [k: string]: vtctldata.IValidateKeyspaceResponse }|null); } - /** Represents a SleepTabletResponse. */ - class SleepTabletResponse implements ISleepTabletResponse { + /** Represents a ValidateResponse. */ + class ValidateResponse implements IValidateResponse { /** - * Constructs a new SleepTabletResponse. + * Constructs a new ValidateResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.ISleepTabletResponse); + constructor(properties?: vtctldata.IValidateResponse); + + /** ValidateResponse results. */ + public results: string[]; + + /** ValidateResponse results_by_keyspace. */ + public results_by_keyspace: { [k: string]: vtctldata.IValidateKeyspaceResponse }; /** - * Creates a new SleepTabletResponse instance using the specified properties. + * Creates a new ValidateResponse instance using the specified properties. * @param [properties] Properties to set - * @returns SleepTabletResponse instance + * @returns ValidateResponse instance */ - public static create(properties?: vtctldata.ISleepTabletResponse): vtctldata.SleepTabletResponse; + public static create(properties?: vtctldata.IValidateResponse): vtctldata.ValidateResponse; /** - * Encodes the specified SleepTabletResponse message. Does not implicitly {@link vtctldata.SleepTabletResponse.verify|verify} messages. - * @param message SleepTabletResponse message or plain object to encode + * Encodes the specified ValidateResponse message. Does not implicitly {@link vtctldata.ValidateResponse.verify|verify} messages. + * @param message ValidateResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.ISleepTabletResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IValidateResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified SleepTabletResponse message, length delimited. Does not implicitly {@link vtctldata.SleepTabletResponse.verify|verify} messages. - * @param message SleepTabletResponse message or plain object to encode + * Encodes the specified ValidateResponse message, length delimited. Does not implicitly {@link vtctldata.ValidateResponse.verify|verify} messages. + * @param message ValidateResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.ISleepTabletResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IValidateResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a SleepTabletResponse message from the specified reader or buffer. + * Decodes a ValidateResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns SleepTabletResponse + * @returns ValidateResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SleepTabletResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ValidateResponse; /** - * Decodes a SleepTabletResponse message from the specified reader or buffer, length delimited. + * Decodes a ValidateResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns SleepTabletResponse + * @returns ValidateResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SleepTabletResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ValidateResponse; /** - * Verifies a SleepTabletResponse message. + * Verifies a ValidateResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a SleepTabletResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ValidateResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns SleepTabletResponse + * @returns ValidateResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.SleepTabletResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.ValidateResponse; /** - * Creates a plain object from a SleepTabletResponse message. Also converts values to other types if specified. - * @param message SleepTabletResponse + * Creates a plain object from a ValidateResponse message. Also converts values to other types if specified. + * @param message ValidateResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.SleepTabletResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.ValidateResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this SleepTabletResponse to JSON. + * Converts this ValidateResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for SleepTabletResponse + * Gets the default type url for ValidateResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a SourceShardAddRequest. */ - interface ISourceShardAddRequest { + /** Properties of a ValidateKeyspaceRequest. */ + interface IValidateKeyspaceRequest { - /** SourceShardAddRequest keyspace */ + /** ValidateKeyspaceRequest keyspace */ keyspace?: (string|null); - /** SourceShardAddRequest shard */ - shard?: (string|null); - - /** SourceShardAddRequest uid */ - uid?: (number|null); - - /** SourceShardAddRequest source_keyspace */ - source_keyspace?: (string|null); - - /** SourceShardAddRequest source_shard */ - source_shard?: (string|null); - - /** SourceShardAddRequest key_range */ - key_range?: (topodata.IKeyRange|null); - - /** SourceShardAddRequest tables */ - tables?: (string[]|null); + /** ValidateKeyspaceRequest ping_tablets */ + ping_tablets?: (boolean|null); } - /** Represents a SourceShardAddRequest. */ - class SourceShardAddRequest implements ISourceShardAddRequest { + /** Represents a ValidateKeyspaceRequest. */ + class ValidateKeyspaceRequest implements IValidateKeyspaceRequest { /** - * Constructs a new SourceShardAddRequest. + * Constructs a new ValidateKeyspaceRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.ISourceShardAddRequest); + constructor(properties?: vtctldata.IValidateKeyspaceRequest); - /** SourceShardAddRequest keyspace. */ + /** ValidateKeyspaceRequest keyspace. */ public keyspace: string; - /** SourceShardAddRequest shard. */ - public shard: string; - - /** SourceShardAddRequest uid. */ - public uid: number; - - /** SourceShardAddRequest source_keyspace. */ - public source_keyspace: string; - - /** SourceShardAddRequest source_shard. */ - public source_shard: string; - - /** SourceShardAddRequest key_range. */ - public key_range?: (topodata.IKeyRange|null); - - /** SourceShardAddRequest tables. */ - public tables: string[]; + /** ValidateKeyspaceRequest ping_tablets. */ + public ping_tablets: boolean; /** - * Creates a new SourceShardAddRequest instance using the specified properties. + * Creates a new ValidateKeyspaceRequest instance using the specified properties. * @param [properties] Properties to set - * @returns SourceShardAddRequest instance + * @returns ValidateKeyspaceRequest instance */ - public static create(properties?: vtctldata.ISourceShardAddRequest): vtctldata.SourceShardAddRequest; + public static create(properties?: vtctldata.IValidateKeyspaceRequest): vtctldata.ValidateKeyspaceRequest; /** - * Encodes the specified SourceShardAddRequest message. Does not implicitly {@link vtctldata.SourceShardAddRequest.verify|verify} messages. - * @param message SourceShardAddRequest message or plain object to encode + * Encodes the specified ValidateKeyspaceRequest message. Does not implicitly {@link vtctldata.ValidateKeyspaceRequest.verify|verify} messages. + * @param message ValidateKeyspaceRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.ISourceShardAddRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IValidateKeyspaceRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified SourceShardAddRequest message, length delimited. Does not implicitly {@link vtctldata.SourceShardAddRequest.verify|verify} messages. - * @param message SourceShardAddRequest message or plain object to encode + * Encodes the specified ValidateKeyspaceRequest message, length delimited. Does not implicitly {@link vtctldata.ValidateKeyspaceRequest.verify|verify} messages. + * @param message ValidateKeyspaceRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.ISourceShardAddRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IValidateKeyspaceRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a SourceShardAddRequest message from the specified reader or buffer. + * Decodes a ValidateKeyspaceRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns SourceShardAddRequest + * @returns ValidateKeyspaceRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SourceShardAddRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ValidateKeyspaceRequest; /** - * Decodes a SourceShardAddRequest message from the specified reader or buffer, length delimited. + * Decodes a ValidateKeyspaceRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns SourceShardAddRequest + * @returns ValidateKeyspaceRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SourceShardAddRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ValidateKeyspaceRequest; /** - * Verifies a SourceShardAddRequest message. + * Verifies a ValidateKeyspaceRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a SourceShardAddRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ValidateKeyspaceRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns SourceShardAddRequest + * @returns ValidateKeyspaceRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.SourceShardAddRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.ValidateKeyspaceRequest; /** - * Creates a plain object from a SourceShardAddRequest message. Also converts values to other types if specified. - * @param message SourceShardAddRequest + * Creates a plain object from a ValidateKeyspaceRequest message. Also converts values to other types if specified. + * @param message ValidateKeyspaceRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.SourceShardAddRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.ValidateKeyspaceRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this SourceShardAddRequest to JSON. + * Converts this ValidateKeyspaceRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for SourceShardAddRequest + * Gets the default type url for ValidateKeyspaceRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a SourceShardAddResponse. */ - interface ISourceShardAddResponse { + /** Properties of a ValidateKeyspaceResponse. */ + interface IValidateKeyspaceResponse { - /** SourceShardAddResponse shard */ - shard?: (topodata.IShard|null); + /** ValidateKeyspaceResponse results */ + results?: (string[]|null); + + /** ValidateKeyspaceResponse results_by_shard */ + results_by_shard?: ({ [k: string]: vtctldata.IValidateShardResponse }|null); } - /** Represents a SourceShardAddResponse. */ - class SourceShardAddResponse implements ISourceShardAddResponse { + /** Represents a ValidateKeyspaceResponse. */ + class ValidateKeyspaceResponse implements IValidateKeyspaceResponse { /** - * Constructs a new SourceShardAddResponse. + * Constructs a new ValidateKeyspaceResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.ISourceShardAddResponse); + constructor(properties?: vtctldata.IValidateKeyspaceResponse); - /** SourceShardAddResponse shard. */ - public shard?: (topodata.IShard|null); + /** ValidateKeyspaceResponse results. */ + public results: string[]; + + /** ValidateKeyspaceResponse results_by_shard. */ + public results_by_shard: { [k: string]: vtctldata.IValidateShardResponse }; /** - * Creates a new SourceShardAddResponse instance using the specified properties. + * Creates a new ValidateKeyspaceResponse instance using the specified properties. * @param [properties] Properties to set - * @returns SourceShardAddResponse instance + * @returns ValidateKeyspaceResponse instance */ - public static create(properties?: vtctldata.ISourceShardAddResponse): vtctldata.SourceShardAddResponse; + public static create(properties?: vtctldata.IValidateKeyspaceResponse): vtctldata.ValidateKeyspaceResponse; /** - * Encodes the specified SourceShardAddResponse message. Does not implicitly {@link vtctldata.SourceShardAddResponse.verify|verify} messages. - * @param message SourceShardAddResponse message or plain object to encode + * Encodes the specified ValidateKeyspaceResponse message. Does not implicitly {@link vtctldata.ValidateKeyspaceResponse.verify|verify} messages. + * @param message ValidateKeyspaceResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.ISourceShardAddResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IValidateKeyspaceResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified SourceShardAddResponse message, length delimited. Does not implicitly {@link vtctldata.SourceShardAddResponse.verify|verify} messages. - * @param message SourceShardAddResponse message or plain object to encode + * Encodes the specified ValidateKeyspaceResponse message, length delimited. Does not implicitly {@link vtctldata.ValidateKeyspaceResponse.verify|verify} messages. + * @param message ValidateKeyspaceResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.ISourceShardAddResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IValidateKeyspaceResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a SourceShardAddResponse message from the specified reader or buffer. + * Decodes a ValidateKeyspaceResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns SourceShardAddResponse + * @returns ValidateKeyspaceResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SourceShardAddResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ValidateKeyspaceResponse; /** - * Decodes a SourceShardAddResponse message from the specified reader or buffer, length delimited. + * Decodes a ValidateKeyspaceResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns SourceShardAddResponse + * @returns ValidateKeyspaceResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SourceShardAddResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ValidateKeyspaceResponse; /** - * Verifies a SourceShardAddResponse message. + * Verifies a ValidateKeyspaceResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a SourceShardAddResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ValidateKeyspaceResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns SourceShardAddResponse + * @returns ValidateKeyspaceResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.SourceShardAddResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.ValidateKeyspaceResponse; /** - * Creates a plain object from a SourceShardAddResponse message. Also converts values to other types if specified. - * @param message SourceShardAddResponse + * Creates a plain object from a ValidateKeyspaceResponse message. Also converts values to other types if specified. + * @param message ValidateKeyspaceResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.SourceShardAddResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.ValidateKeyspaceResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this SourceShardAddResponse to JSON. + * Converts this ValidateKeyspaceResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for SourceShardAddResponse + * Gets the default type url for ValidateKeyspaceResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a SourceShardDeleteRequest. */ - interface ISourceShardDeleteRequest { + /** Properties of a ValidateSchemaKeyspaceRequest. */ + interface IValidateSchemaKeyspaceRequest { - /** SourceShardDeleteRequest keyspace */ + /** ValidateSchemaKeyspaceRequest keyspace */ keyspace?: (string|null); - /** SourceShardDeleteRequest shard */ - shard?: (string|null); + /** ValidateSchemaKeyspaceRequest exclude_tables */ + exclude_tables?: (string[]|null); - /** SourceShardDeleteRequest uid */ - uid?: (number|null); + /** ValidateSchemaKeyspaceRequest include_views */ + include_views?: (boolean|null); + + /** ValidateSchemaKeyspaceRequest skip_no_primary */ + skip_no_primary?: (boolean|null); + + /** ValidateSchemaKeyspaceRequest include_vschema */ + include_vschema?: (boolean|null); } - /** Represents a SourceShardDeleteRequest. */ - class SourceShardDeleteRequest implements ISourceShardDeleteRequest { + /** Represents a ValidateSchemaKeyspaceRequest. */ + class ValidateSchemaKeyspaceRequest implements IValidateSchemaKeyspaceRequest { /** - * Constructs a new SourceShardDeleteRequest. + * Constructs a new ValidateSchemaKeyspaceRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.ISourceShardDeleteRequest); + constructor(properties?: vtctldata.IValidateSchemaKeyspaceRequest); - /** SourceShardDeleteRequest keyspace. */ + /** ValidateSchemaKeyspaceRequest keyspace. */ public keyspace: string; - /** SourceShardDeleteRequest shard. */ - public shard: string; + /** ValidateSchemaKeyspaceRequest exclude_tables. */ + public exclude_tables: string[]; - /** SourceShardDeleteRequest uid. */ - public uid: number; + /** ValidateSchemaKeyspaceRequest include_views. */ + public include_views: boolean; + + /** ValidateSchemaKeyspaceRequest skip_no_primary. */ + public skip_no_primary: boolean; + + /** ValidateSchemaKeyspaceRequest include_vschema. */ + public include_vschema: boolean; /** - * Creates a new SourceShardDeleteRequest instance using the specified properties. + * Creates a new ValidateSchemaKeyspaceRequest instance using the specified properties. * @param [properties] Properties to set - * @returns SourceShardDeleteRequest instance + * @returns ValidateSchemaKeyspaceRequest instance */ - public static create(properties?: vtctldata.ISourceShardDeleteRequest): vtctldata.SourceShardDeleteRequest; + public static create(properties?: vtctldata.IValidateSchemaKeyspaceRequest): vtctldata.ValidateSchemaKeyspaceRequest; /** - * Encodes the specified SourceShardDeleteRequest message. Does not implicitly {@link vtctldata.SourceShardDeleteRequest.verify|verify} messages. - * @param message SourceShardDeleteRequest message or plain object to encode + * Encodes the specified ValidateSchemaKeyspaceRequest message. Does not implicitly {@link vtctldata.ValidateSchemaKeyspaceRequest.verify|verify} messages. + * @param message ValidateSchemaKeyspaceRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.ISourceShardDeleteRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IValidateSchemaKeyspaceRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified SourceShardDeleteRequest message, length delimited. Does not implicitly {@link vtctldata.SourceShardDeleteRequest.verify|verify} messages. - * @param message SourceShardDeleteRequest message or plain object to encode + * Encodes the specified ValidateSchemaKeyspaceRequest message, length delimited. Does not implicitly {@link vtctldata.ValidateSchemaKeyspaceRequest.verify|verify} messages. + * @param message ValidateSchemaKeyspaceRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.ISourceShardDeleteRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IValidateSchemaKeyspaceRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a SourceShardDeleteRequest message from the specified reader or buffer. + * Decodes a ValidateSchemaKeyspaceRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns SourceShardDeleteRequest + * @returns ValidateSchemaKeyspaceRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SourceShardDeleteRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ValidateSchemaKeyspaceRequest; /** - * Decodes a SourceShardDeleteRequest message from the specified reader or buffer, length delimited. + * Decodes a ValidateSchemaKeyspaceRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns SourceShardDeleteRequest + * @returns ValidateSchemaKeyspaceRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SourceShardDeleteRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ValidateSchemaKeyspaceRequest; /** - * Verifies a SourceShardDeleteRequest message. + * Verifies a ValidateSchemaKeyspaceRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a SourceShardDeleteRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ValidateSchemaKeyspaceRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns SourceShardDeleteRequest + * @returns ValidateSchemaKeyspaceRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.SourceShardDeleteRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.ValidateSchemaKeyspaceRequest; /** - * Creates a plain object from a SourceShardDeleteRequest message. Also converts values to other types if specified. - * @param message SourceShardDeleteRequest + * Creates a plain object from a ValidateSchemaKeyspaceRequest message. Also converts values to other types if specified. + * @param message ValidateSchemaKeyspaceRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.SourceShardDeleteRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.ValidateSchemaKeyspaceRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this SourceShardDeleteRequest to JSON. + * Converts this ValidateSchemaKeyspaceRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for SourceShardDeleteRequest + * Gets the default type url for ValidateSchemaKeyspaceRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a SourceShardDeleteResponse. */ - interface ISourceShardDeleteResponse { + /** Properties of a ValidateSchemaKeyspaceResponse. */ + interface IValidateSchemaKeyspaceResponse { - /** SourceShardDeleteResponse shard */ - shard?: (topodata.IShard|null); + /** ValidateSchemaKeyspaceResponse results */ + results?: (string[]|null); + + /** ValidateSchemaKeyspaceResponse results_by_shard */ + results_by_shard?: ({ [k: string]: vtctldata.IValidateShardResponse }|null); } - /** Represents a SourceShardDeleteResponse. */ - class SourceShardDeleteResponse implements ISourceShardDeleteResponse { + /** Represents a ValidateSchemaKeyspaceResponse. */ + class ValidateSchemaKeyspaceResponse implements IValidateSchemaKeyspaceResponse { /** - * Constructs a new SourceShardDeleteResponse. + * Constructs a new ValidateSchemaKeyspaceResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.ISourceShardDeleteResponse); + constructor(properties?: vtctldata.IValidateSchemaKeyspaceResponse); - /** SourceShardDeleteResponse shard. */ - public shard?: (topodata.IShard|null); + /** ValidateSchemaKeyspaceResponse results. */ + public results: string[]; + + /** ValidateSchemaKeyspaceResponse results_by_shard. */ + public results_by_shard: { [k: string]: vtctldata.IValidateShardResponse }; /** - * Creates a new SourceShardDeleteResponse instance using the specified properties. + * Creates a new ValidateSchemaKeyspaceResponse instance using the specified properties. * @param [properties] Properties to set - * @returns SourceShardDeleteResponse instance + * @returns ValidateSchemaKeyspaceResponse instance */ - public static create(properties?: vtctldata.ISourceShardDeleteResponse): vtctldata.SourceShardDeleteResponse; + public static create(properties?: vtctldata.IValidateSchemaKeyspaceResponse): vtctldata.ValidateSchemaKeyspaceResponse; /** - * Encodes the specified SourceShardDeleteResponse message. Does not implicitly {@link vtctldata.SourceShardDeleteResponse.verify|verify} messages. - * @param message SourceShardDeleteResponse message or plain object to encode + * Encodes the specified ValidateSchemaKeyspaceResponse message. Does not implicitly {@link vtctldata.ValidateSchemaKeyspaceResponse.verify|verify} messages. + * @param message ValidateSchemaKeyspaceResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.ISourceShardDeleteResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IValidateSchemaKeyspaceResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified SourceShardDeleteResponse message, length delimited. Does not implicitly {@link vtctldata.SourceShardDeleteResponse.verify|verify} messages. - * @param message SourceShardDeleteResponse message or plain object to encode + * Encodes the specified ValidateSchemaKeyspaceResponse message, length delimited. Does not implicitly {@link vtctldata.ValidateSchemaKeyspaceResponse.verify|verify} messages. + * @param message ValidateSchemaKeyspaceResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.ISourceShardDeleteResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IValidateSchemaKeyspaceResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a SourceShardDeleteResponse message from the specified reader or buffer. + * Decodes a ValidateSchemaKeyspaceResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns SourceShardDeleteResponse + * @returns ValidateSchemaKeyspaceResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SourceShardDeleteResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ValidateSchemaKeyspaceResponse; /** - * Decodes a SourceShardDeleteResponse message from the specified reader or buffer, length delimited. + * Decodes a ValidateSchemaKeyspaceResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns SourceShardDeleteResponse + * @returns ValidateSchemaKeyspaceResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SourceShardDeleteResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ValidateSchemaKeyspaceResponse; /** - * Verifies a SourceShardDeleteResponse message. + * Verifies a ValidateSchemaKeyspaceResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a SourceShardDeleteResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ValidateSchemaKeyspaceResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns SourceShardDeleteResponse + * @returns ValidateSchemaKeyspaceResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.SourceShardDeleteResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.ValidateSchemaKeyspaceResponse; /** - * Creates a plain object from a SourceShardDeleteResponse message. Also converts values to other types if specified. - * @param message SourceShardDeleteResponse + * Creates a plain object from a ValidateSchemaKeyspaceResponse message. Also converts values to other types if specified. + * @param message ValidateSchemaKeyspaceResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.SourceShardDeleteResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.ValidateSchemaKeyspaceResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this SourceShardDeleteResponse to JSON. + * Converts this ValidateSchemaKeyspaceResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for SourceShardDeleteResponse + * Gets the default type url for ValidateSchemaKeyspaceResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a StartReplicationRequest. */ - interface IStartReplicationRequest { + /** Properties of a ValidateShardRequest. */ + interface IValidateShardRequest { - /** StartReplicationRequest tablet_alias */ - tablet_alias?: (topodata.ITabletAlias|null); + /** ValidateShardRequest keyspace */ + keyspace?: (string|null); + + /** ValidateShardRequest shard */ + shard?: (string|null); + + /** ValidateShardRequest ping_tablets */ + ping_tablets?: (boolean|null); } - /** Represents a StartReplicationRequest. */ - class StartReplicationRequest implements IStartReplicationRequest { + /** Represents a ValidateShardRequest. */ + class ValidateShardRequest implements IValidateShardRequest { /** - * Constructs a new StartReplicationRequest. + * Constructs a new ValidateShardRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IStartReplicationRequest); + constructor(properties?: vtctldata.IValidateShardRequest); - /** StartReplicationRequest tablet_alias. */ - public tablet_alias?: (topodata.ITabletAlias|null); + /** ValidateShardRequest keyspace. */ + public keyspace: string; + + /** ValidateShardRequest shard. */ + public shard: string; + + /** ValidateShardRequest ping_tablets. */ + public ping_tablets: boolean; /** - * Creates a new StartReplicationRequest instance using the specified properties. + * Creates a new ValidateShardRequest instance using the specified properties. * @param [properties] Properties to set - * @returns StartReplicationRequest instance + * @returns ValidateShardRequest instance */ - public static create(properties?: vtctldata.IStartReplicationRequest): vtctldata.StartReplicationRequest; + public static create(properties?: vtctldata.IValidateShardRequest): vtctldata.ValidateShardRequest; /** - * Encodes the specified StartReplicationRequest message. Does not implicitly {@link vtctldata.StartReplicationRequest.verify|verify} messages. - * @param message StartReplicationRequest message or plain object to encode + * Encodes the specified ValidateShardRequest message. Does not implicitly {@link vtctldata.ValidateShardRequest.verify|verify} messages. + * @param message ValidateShardRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IStartReplicationRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IValidateShardRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified StartReplicationRequest message, length delimited. Does not implicitly {@link vtctldata.StartReplicationRequest.verify|verify} messages. - * @param message StartReplicationRequest message or plain object to encode + * Encodes the specified ValidateShardRequest message, length delimited. Does not implicitly {@link vtctldata.ValidateShardRequest.verify|verify} messages. + * @param message ValidateShardRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IStartReplicationRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IValidateShardRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a StartReplicationRequest message from the specified reader or buffer. + * Decodes a ValidateShardRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns StartReplicationRequest + * @returns ValidateShardRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.StartReplicationRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ValidateShardRequest; /** - * Decodes a StartReplicationRequest message from the specified reader or buffer, length delimited. + * Decodes a ValidateShardRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns StartReplicationRequest + * @returns ValidateShardRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.StartReplicationRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ValidateShardRequest; /** - * Verifies a StartReplicationRequest message. + * Verifies a ValidateShardRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a StartReplicationRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ValidateShardRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns StartReplicationRequest + * @returns ValidateShardRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.StartReplicationRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.ValidateShardRequest; /** - * Creates a plain object from a StartReplicationRequest message. Also converts values to other types if specified. - * @param message StartReplicationRequest + * Creates a plain object from a ValidateShardRequest message. Also converts values to other types if specified. + * @param message ValidateShardRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.StartReplicationRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.ValidateShardRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this StartReplicationRequest to JSON. + * Converts this ValidateShardRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for StartReplicationRequest + * Gets the default type url for ValidateShardRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a StartReplicationResponse. */ - interface IStartReplicationResponse { + /** Properties of a ValidateShardResponse. */ + interface IValidateShardResponse { + + /** ValidateShardResponse results */ + results?: (string[]|null); } - /** Represents a StartReplicationResponse. */ - class StartReplicationResponse implements IStartReplicationResponse { + /** Represents a ValidateShardResponse. */ + class ValidateShardResponse implements IValidateShardResponse { /** - * Constructs a new StartReplicationResponse. + * Constructs a new ValidateShardResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IStartReplicationResponse); + constructor(properties?: vtctldata.IValidateShardResponse); + + /** ValidateShardResponse results. */ + public results: string[]; /** - * Creates a new StartReplicationResponse instance using the specified properties. + * Creates a new ValidateShardResponse instance using the specified properties. * @param [properties] Properties to set - * @returns StartReplicationResponse instance + * @returns ValidateShardResponse instance */ - public static create(properties?: vtctldata.IStartReplicationResponse): vtctldata.StartReplicationResponse; + public static create(properties?: vtctldata.IValidateShardResponse): vtctldata.ValidateShardResponse; /** - * Encodes the specified StartReplicationResponse message. Does not implicitly {@link vtctldata.StartReplicationResponse.verify|verify} messages. - * @param message StartReplicationResponse message or plain object to encode + * Encodes the specified ValidateShardResponse message. Does not implicitly {@link vtctldata.ValidateShardResponse.verify|verify} messages. + * @param message ValidateShardResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IStartReplicationResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IValidateShardResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified StartReplicationResponse message, length delimited. Does not implicitly {@link vtctldata.StartReplicationResponse.verify|verify} messages. - * @param message StartReplicationResponse message or plain object to encode + * Encodes the specified ValidateShardResponse message, length delimited. Does not implicitly {@link vtctldata.ValidateShardResponse.verify|verify} messages. + * @param message ValidateShardResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IStartReplicationResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IValidateShardResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a StartReplicationResponse message from the specified reader or buffer. + * Decodes a ValidateShardResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns StartReplicationResponse + * @returns ValidateShardResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.StartReplicationResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ValidateShardResponse; /** - * Decodes a StartReplicationResponse message from the specified reader or buffer, length delimited. + * Decodes a ValidateShardResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns StartReplicationResponse + * @returns ValidateShardResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.StartReplicationResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ValidateShardResponse; /** - * Verifies a StartReplicationResponse message. + * Verifies a ValidateShardResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a StartReplicationResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ValidateShardResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns StartReplicationResponse + * @returns ValidateShardResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.StartReplicationResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.ValidateShardResponse; /** - * Creates a plain object from a StartReplicationResponse message. Also converts values to other types if specified. - * @param message StartReplicationResponse + * Creates a plain object from a ValidateShardResponse message. Also converts values to other types if specified. + * @param message ValidateShardResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.StartReplicationResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.ValidateShardResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this StartReplicationResponse to JSON. + * Converts this ValidateShardResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for StartReplicationResponse + * Gets the default type url for ValidateShardResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a StopReplicationRequest. */ - interface IStopReplicationRequest { + /** Properties of a ValidateVersionKeyspaceRequest. */ + interface IValidateVersionKeyspaceRequest { - /** StopReplicationRequest tablet_alias */ - tablet_alias?: (topodata.ITabletAlias|null); + /** ValidateVersionKeyspaceRequest keyspace */ + keyspace?: (string|null); } - /** Represents a StopReplicationRequest. */ - class StopReplicationRequest implements IStopReplicationRequest { + /** Represents a ValidateVersionKeyspaceRequest. */ + class ValidateVersionKeyspaceRequest implements IValidateVersionKeyspaceRequest { /** - * Constructs a new StopReplicationRequest. + * Constructs a new ValidateVersionKeyspaceRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IStopReplicationRequest); + constructor(properties?: vtctldata.IValidateVersionKeyspaceRequest); - /** StopReplicationRequest tablet_alias. */ - public tablet_alias?: (topodata.ITabletAlias|null); + /** ValidateVersionKeyspaceRequest keyspace. */ + public keyspace: string; /** - * Creates a new StopReplicationRequest instance using the specified properties. + * Creates a new ValidateVersionKeyspaceRequest instance using the specified properties. * @param [properties] Properties to set - * @returns StopReplicationRequest instance + * @returns ValidateVersionKeyspaceRequest instance */ - public static create(properties?: vtctldata.IStopReplicationRequest): vtctldata.StopReplicationRequest; + public static create(properties?: vtctldata.IValidateVersionKeyspaceRequest): vtctldata.ValidateVersionKeyspaceRequest; /** - * Encodes the specified StopReplicationRequest message. Does not implicitly {@link vtctldata.StopReplicationRequest.verify|verify} messages. - * @param message StopReplicationRequest message or plain object to encode + * Encodes the specified ValidateVersionKeyspaceRequest message. Does not implicitly {@link vtctldata.ValidateVersionKeyspaceRequest.verify|verify} messages. + * @param message ValidateVersionKeyspaceRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IStopReplicationRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IValidateVersionKeyspaceRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified StopReplicationRequest message, length delimited. Does not implicitly {@link vtctldata.StopReplicationRequest.verify|verify} messages. - * @param message StopReplicationRequest message or plain object to encode + * Encodes the specified ValidateVersionKeyspaceRequest message, length delimited. Does not implicitly {@link vtctldata.ValidateVersionKeyspaceRequest.verify|verify} messages. + * @param message ValidateVersionKeyspaceRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IStopReplicationRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IValidateVersionKeyspaceRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a StopReplicationRequest message from the specified reader or buffer. + * Decodes a ValidateVersionKeyspaceRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns StopReplicationRequest + * @returns ValidateVersionKeyspaceRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.StopReplicationRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ValidateVersionKeyspaceRequest; /** - * Decodes a StopReplicationRequest message from the specified reader or buffer, length delimited. + * Decodes a ValidateVersionKeyspaceRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns StopReplicationRequest + * @returns ValidateVersionKeyspaceRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.StopReplicationRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ValidateVersionKeyspaceRequest; /** - * Verifies a StopReplicationRequest message. + * Verifies a ValidateVersionKeyspaceRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a StopReplicationRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ValidateVersionKeyspaceRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns StopReplicationRequest + * @returns ValidateVersionKeyspaceRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.StopReplicationRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.ValidateVersionKeyspaceRequest; /** - * Creates a plain object from a StopReplicationRequest message. Also converts values to other types if specified. - * @param message StopReplicationRequest + * Creates a plain object from a ValidateVersionKeyspaceRequest message. Also converts values to other types if specified. + * @param message ValidateVersionKeyspaceRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.StopReplicationRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.ValidateVersionKeyspaceRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this StopReplicationRequest to JSON. + * Converts this ValidateVersionKeyspaceRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for StopReplicationRequest + * Gets the default type url for ValidateVersionKeyspaceRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a StopReplicationResponse. */ - interface IStopReplicationResponse { + /** Properties of a ValidateVersionKeyspaceResponse. */ + interface IValidateVersionKeyspaceResponse { + + /** ValidateVersionKeyspaceResponse results */ + results?: (string[]|null); + + /** ValidateVersionKeyspaceResponse results_by_shard */ + results_by_shard?: ({ [k: string]: vtctldata.IValidateShardResponse }|null); } - /** Represents a StopReplicationResponse. */ - class StopReplicationResponse implements IStopReplicationResponse { + /** Represents a ValidateVersionKeyspaceResponse. */ + class ValidateVersionKeyspaceResponse implements IValidateVersionKeyspaceResponse { /** - * Constructs a new StopReplicationResponse. + * Constructs a new ValidateVersionKeyspaceResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IStopReplicationResponse); + constructor(properties?: vtctldata.IValidateVersionKeyspaceResponse); + + /** ValidateVersionKeyspaceResponse results. */ + public results: string[]; + + /** ValidateVersionKeyspaceResponse results_by_shard. */ + public results_by_shard: { [k: string]: vtctldata.IValidateShardResponse }; /** - * Creates a new StopReplicationResponse instance using the specified properties. + * Creates a new ValidateVersionKeyspaceResponse instance using the specified properties. * @param [properties] Properties to set - * @returns StopReplicationResponse instance + * @returns ValidateVersionKeyspaceResponse instance */ - public static create(properties?: vtctldata.IStopReplicationResponse): vtctldata.StopReplicationResponse; + public static create(properties?: vtctldata.IValidateVersionKeyspaceResponse): vtctldata.ValidateVersionKeyspaceResponse; /** - * Encodes the specified StopReplicationResponse message. Does not implicitly {@link vtctldata.StopReplicationResponse.verify|verify} messages. - * @param message StopReplicationResponse message or plain object to encode + * Encodes the specified ValidateVersionKeyspaceResponse message. Does not implicitly {@link vtctldata.ValidateVersionKeyspaceResponse.verify|verify} messages. + * @param message ValidateVersionKeyspaceResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IStopReplicationResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IValidateVersionKeyspaceResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified StopReplicationResponse message, length delimited. Does not implicitly {@link vtctldata.StopReplicationResponse.verify|verify} messages. - * @param message StopReplicationResponse message or plain object to encode + * Encodes the specified ValidateVersionKeyspaceResponse message, length delimited. Does not implicitly {@link vtctldata.ValidateVersionKeyspaceResponse.verify|verify} messages. + * @param message ValidateVersionKeyspaceResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IStopReplicationResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IValidateVersionKeyspaceResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a StopReplicationResponse message from the specified reader or buffer. + * Decodes a ValidateVersionKeyspaceResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns StopReplicationResponse + * @returns ValidateVersionKeyspaceResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.StopReplicationResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ValidateVersionKeyspaceResponse; /** - * Decodes a StopReplicationResponse message from the specified reader or buffer, length delimited. + * Decodes a ValidateVersionKeyspaceResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns StopReplicationResponse + * @returns ValidateVersionKeyspaceResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.StopReplicationResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ValidateVersionKeyspaceResponse; /** - * Verifies a StopReplicationResponse message. + * Verifies a ValidateVersionKeyspaceResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a StopReplicationResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ValidateVersionKeyspaceResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns StopReplicationResponse + * @returns ValidateVersionKeyspaceResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.StopReplicationResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.ValidateVersionKeyspaceResponse; /** - * Creates a plain object from a StopReplicationResponse message. Also converts values to other types if specified. - * @param message StopReplicationResponse + * Creates a plain object from a ValidateVersionKeyspaceResponse message. Also converts values to other types if specified. + * @param message ValidateVersionKeyspaceResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.StopReplicationResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.ValidateVersionKeyspaceResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this StopReplicationResponse to JSON. + * Converts this ValidateVersionKeyspaceResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for StopReplicationResponse + * Gets the default type url for ValidateVersionKeyspaceResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a TabletExternallyReparentedRequest. */ - interface ITabletExternallyReparentedRequest { + /** Properties of a ValidateVersionShardRequest. */ + interface IValidateVersionShardRequest { - /** TabletExternallyReparentedRequest tablet */ - tablet?: (topodata.ITabletAlias|null); + /** ValidateVersionShardRequest keyspace */ + keyspace?: (string|null); + + /** ValidateVersionShardRequest shard */ + shard?: (string|null); } - /** Represents a TabletExternallyReparentedRequest. */ - class TabletExternallyReparentedRequest implements ITabletExternallyReparentedRequest { + /** Represents a ValidateVersionShardRequest. */ + class ValidateVersionShardRequest implements IValidateVersionShardRequest { /** - * Constructs a new TabletExternallyReparentedRequest. + * Constructs a new ValidateVersionShardRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.ITabletExternallyReparentedRequest); + constructor(properties?: vtctldata.IValidateVersionShardRequest); - /** TabletExternallyReparentedRequest tablet. */ - public tablet?: (topodata.ITabletAlias|null); + /** ValidateVersionShardRequest keyspace. */ + public keyspace: string; + + /** ValidateVersionShardRequest shard. */ + public shard: string; /** - * Creates a new TabletExternallyReparentedRequest instance using the specified properties. + * Creates a new ValidateVersionShardRequest instance using the specified properties. * @param [properties] Properties to set - * @returns TabletExternallyReparentedRequest instance + * @returns ValidateVersionShardRequest instance */ - public static create(properties?: vtctldata.ITabletExternallyReparentedRequest): vtctldata.TabletExternallyReparentedRequest; + public static create(properties?: vtctldata.IValidateVersionShardRequest): vtctldata.ValidateVersionShardRequest; /** - * Encodes the specified TabletExternallyReparentedRequest message. Does not implicitly {@link vtctldata.TabletExternallyReparentedRequest.verify|verify} messages. - * @param message TabletExternallyReparentedRequest message or plain object to encode + * Encodes the specified ValidateVersionShardRequest message. Does not implicitly {@link vtctldata.ValidateVersionShardRequest.verify|verify} messages. + * @param message ValidateVersionShardRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.ITabletExternallyReparentedRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IValidateVersionShardRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified TabletExternallyReparentedRequest message, length delimited. Does not implicitly {@link vtctldata.TabletExternallyReparentedRequest.verify|verify} messages. - * @param message TabletExternallyReparentedRequest message or plain object to encode + * Encodes the specified ValidateVersionShardRequest message, length delimited. Does not implicitly {@link vtctldata.ValidateVersionShardRequest.verify|verify} messages. + * @param message ValidateVersionShardRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.ITabletExternallyReparentedRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IValidateVersionShardRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a TabletExternallyReparentedRequest message from the specified reader or buffer. + * Decodes a ValidateVersionShardRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns TabletExternallyReparentedRequest + * @returns ValidateVersionShardRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.TabletExternallyReparentedRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ValidateVersionShardRequest; /** - * Decodes a TabletExternallyReparentedRequest message from the specified reader or buffer, length delimited. + * Decodes a ValidateVersionShardRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns TabletExternallyReparentedRequest + * @returns ValidateVersionShardRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.TabletExternallyReparentedRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ValidateVersionShardRequest; /** - * Verifies a TabletExternallyReparentedRequest message. + * Verifies a ValidateVersionShardRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a TabletExternallyReparentedRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ValidateVersionShardRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns TabletExternallyReparentedRequest + * @returns ValidateVersionShardRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.TabletExternallyReparentedRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.ValidateVersionShardRequest; /** - * Creates a plain object from a TabletExternallyReparentedRequest message. Also converts values to other types if specified. - * @param message TabletExternallyReparentedRequest + * Creates a plain object from a ValidateVersionShardRequest message. Also converts values to other types if specified. + * @param message ValidateVersionShardRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.TabletExternallyReparentedRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.ValidateVersionShardRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this TabletExternallyReparentedRequest to JSON. + * Converts this ValidateVersionShardRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for TabletExternallyReparentedRequest + * Gets the default type url for ValidateVersionShardRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a TabletExternallyReparentedResponse. */ - interface ITabletExternallyReparentedResponse { - - /** TabletExternallyReparentedResponse keyspace */ - keyspace?: (string|null); - - /** TabletExternallyReparentedResponse shard */ - shard?: (string|null); - - /** TabletExternallyReparentedResponse new_primary */ - new_primary?: (topodata.ITabletAlias|null); + /** Properties of a ValidateVersionShardResponse. */ + interface IValidateVersionShardResponse { - /** TabletExternallyReparentedResponse old_primary */ - old_primary?: (topodata.ITabletAlias|null); + /** ValidateVersionShardResponse results */ + results?: (string[]|null); } - /** Represents a TabletExternallyReparentedResponse. */ - class TabletExternallyReparentedResponse implements ITabletExternallyReparentedResponse { + /** Represents a ValidateVersionShardResponse. */ + class ValidateVersionShardResponse implements IValidateVersionShardResponse { /** - * Constructs a new TabletExternallyReparentedResponse. + * Constructs a new ValidateVersionShardResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.ITabletExternallyReparentedResponse); - - /** TabletExternallyReparentedResponse keyspace. */ - public keyspace: string; - - /** TabletExternallyReparentedResponse shard. */ - public shard: string; - - /** TabletExternallyReparentedResponse new_primary. */ - public new_primary?: (topodata.ITabletAlias|null); + constructor(properties?: vtctldata.IValidateVersionShardResponse); - /** TabletExternallyReparentedResponse old_primary. */ - public old_primary?: (topodata.ITabletAlias|null); + /** ValidateVersionShardResponse results. */ + public results: string[]; /** - * Creates a new TabletExternallyReparentedResponse instance using the specified properties. + * Creates a new ValidateVersionShardResponse instance using the specified properties. * @param [properties] Properties to set - * @returns TabletExternallyReparentedResponse instance + * @returns ValidateVersionShardResponse instance */ - public static create(properties?: vtctldata.ITabletExternallyReparentedResponse): vtctldata.TabletExternallyReparentedResponse; + public static create(properties?: vtctldata.IValidateVersionShardResponse): vtctldata.ValidateVersionShardResponse; /** - * Encodes the specified TabletExternallyReparentedResponse message. Does not implicitly {@link vtctldata.TabletExternallyReparentedResponse.verify|verify} messages. - * @param message TabletExternallyReparentedResponse message or plain object to encode + * Encodes the specified ValidateVersionShardResponse message. Does not implicitly {@link vtctldata.ValidateVersionShardResponse.verify|verify} messages. + * @param message ValidateVersionShardResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.ITabletExternallyReparentedResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IValidateVersionShardResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified TabletExternallyReparentedResponse message, length delimited. Does not implicitly {@link vtctldata.TabletExternallyReparentedResponse.verify|verify} messages. - * @param message TabletExternallyReparentedResponse message or plain object to encode + * Encodes the specified ValidateVersionShardResponse message, length delimited. Does not implicitly {@link vtctldata.ValidateVersionShardResponse.verify|verify} messages. + * @param message ValidateVersionShardResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.ITabletExternallyReparentedResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IValidateVersionShardResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a TabletExternallyReparentedResponse message from the specified reader or buffer. + * Decodes a ValidateVersionShardResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns TabletExternallyReparentedResponse + * @returns ValidateVersionShardResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.TabletExternallyReparentedResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ValidateVersionShardResponse; /** - * Decodes a TabletExternallyReparentedResponse message from the specified reader or buffer, length delimited. + * Decodes a ValidateVersionShardResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns TabletExternallyReparentedResponse + * @returns ValidateVersionShardResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.TabletExternallyReparentedResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ValidateVersionShardResponse; /** - * Verifies a TabletExternallyReparentedResponse message. + * Verifies a ValidateVersionShardResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a TabletExternallyReparentedResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ValidateVersionShardResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns TabletExternallyReparentedResponse + * @returns ValidateVersionShardResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.TabletExternallyReparentedResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.ValidateVersionShardResponse; /** - * Creates a plain object from a TabletExternallyReparentedResponse message. Also converts values to other types if specified. - * @param message TabletExternallyReparentedResponse + * Creates a plain object from a ValidateVersionShardResponse message. Also converts values to other types if specified. + * @param message ValidateVersionShardResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.TabletExternallyReparentedResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.ValidateVersionShardResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this TabletExternallyReparentedResponse to JSON. + * Converts this ValidateVersionShardResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for TabletExternallyReparentedResponse + * Gets the default type url for ValidateVersionShardResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of an UpdateCellInfoRequest. */ - interface IUpdateCellInfoRequest { + /** Properties of a ValidateVSchemaRequest. */ + interface IValidateVSchemaRequest { - /** UpdateCellInfoRequest name */ - name?: (string|null); + /** ValidateVSchemaRequest keyspace */ + keyspace?: (string|null); - /** UpdateCellInfoRequest cell_info */ - cell_info?: (topodata.ICellInfo|null); + /** ValidateVSchemaRequest shards */ + shards?: (string[]|null); + + /** ValidateVSchemaRequest exclude_tables */ + exclude_tables?: (string[]|null); + + /** ValidateVSchemaRequest include_views */ + include_views?: (boolean|null); } - /** Represents an UpdateCellInfoRequest. */ - class UpdateCellInfoRequest implements IUpdateCellInfoRequest { + /** Represents a ValidateVSchemaRequest. */ + class ValidateVSchemaRequest implements IValidateVSchemaRequest { /** - * Constructs a new UpdateCellInfoRequest. + * Constructs a new ValidateVSchemaRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IUpdateCellInfoRequest); + constructor(properties?: vtctldata.IValidateVSchemaRequest); - /** UpdateCellInfoRequest name. */ - public name: string; + /** ValidateVSchemaRequest keyspace. */ + public keyspace: string; - /** UpdateCellInfoRequest cell_info. */ - public cell_info?: (topodata.ICellInfo|null); + /** ValidateVSchemaRequest shards. */ + public shards: string[]; + + /** ValidateVSchemaRequest exclude_tables. */ + public exclude_tables: string[]; + + /** ValidateVSchemaRequest include_views. */ + public include_views: boolean; /** - * Creates a new UpdateCellInfoRequest instance using the specified properties. + * Creates a new ValidateVSchemaRequest instance using the specified properties. * @param [properties] Properties to set - * @returns UpdateCellInfoRequest instance + * @returns ValidateVSchemaRequest instance */ - public static create(properties?: vtctldata.IUpdateCellInfoRequest): vtctldata.UpdateCellInfoRequest; + public static create(properties?: vtctldata.IValidateVSchemaRequest): vtctldata.ValidateVSchemaRequest; /** - * Encodes the specified UpdateCellInfoRequest message. Does not implicitly {@link vtctldata.UpdateCellInfoRequest.verify|verify} messages. - * @param message UpdateCellInfoRequest message or plain object to encode + * Encodes the specified ValidateVSchemaRequest message. Does not implicitly {@link vtctldata.ValidateVSchemaRequest.verify|verify} messages. + * @param message ValidateVSchemaRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IUpdateCellInfoRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IValidateVSchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified UpdateCellInfoRequest message, length delimited. Does not implicitly {@link vtctldata.UpdateCellInfoRequest.verify|verify} messages. - * @param message UpdateCellInfoRequest message or plain object to encode + * Encodes the specified ValidateVSchemaRequest message, length delimited. Does not implicitly {@link vtctldata.ValidateVSchemaRequest.verify|verify} messages. + * @param message ValidateVSchemaRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IUpdateCellInfoRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IValidateVSchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an UpdateCellInfoRequest message from the specified reader or buffer. + * Decodes a ValidateVSchemaRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns UpdateCellInfoRequest + * @returns ValidateVSchemaRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.UpdateCellInfoRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ValidateVSchemaRequest; /** - * Decodes an UpdateCellInfoRequest message from the specified reader or buffer, length delimited. + * Decodes a ValidateVSchemaRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns UpdateCellInfoRequest + * @returns ValidateVSchemaRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.UpdateCellInfoRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ValidateVSchemaRequest; /** - * Verifies an UpdateCellInfoRequest message. + * Verifies a ValidateVSchemaRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an UpdateCellInfoRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ValidateVSchemaRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns UpdateCellInfoRequest + * @returns ValidateVSchemaRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.UpdateCellInfoRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.ValidateVSchemaRequest; /** - * Creates a plain object from an UpdateCellInfoRequest message. Also converts values to other types if specified. - * @param message UpdateCellInfoRequest + * Creates a plain object from a ValidateVSchemaRequest message. Also converts values to other types if specified. + * @param message ValidateVSchemaRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.UpdateCellInfoRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.ValidateVSchemaRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this UpdateCellInfoRequest to JSON. + * Converts this ValidateVSchemaRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for UpdateCellInfoRequest + * Gets the default type url for ValidateVSchemaRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of an UpdateCellInfoResponse. */ - interface IUpdateCellInfoResponse { + /** Properties of a ValidateVSchemaResponse. */ + interface IValidateVSchemaResponse { - /** UpdateCellInfoResponse name */ - name?: (string|null); + /** ValidateVSchemaResponse results */ + results?: (string[]|null); - /** UpdateCellInfoResponse cell_info */ - cell_info?: (topodata.ICellInfo|null); + /** ValidateVSchemaResponse results_by_shard */ + results_by_shard?: ({ [k: string]: vtctldata.IValidateShardResponse }|null); } - /** Represents an UpdateCellInfoResponse. */ - class UpdateCellInfoResponse implements IUpdateCellInfoResponse { + /** Represents a ValidateVSchemaResponse. */ + class ValidateVSchemaResponse implements IValidateVSchemaResponse { /** - * Constructs a new UpdateCellInfoResponse. + * Constructs a new ValidateVSchemaResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IUpdateCellInfoResponse); + constructor(properties?: vtctldata.IValidateVSchemaResponse); - /** UpdateCellInfoResponse name. */ - public name: string; + /** ValidateVSchemaResponse results. */ + public results: string[]; - /** UpdateCellInfoResponse cell_info. */ - public cell_info?: (topodata.ICellInfo|null); + /** ValidateVSchemaResponse results_by_shard. */ + public results_by_shard: { [k: string]: vtctldata.IValidateShardResponse }; /** - * Creates a new UpdateCellInfoResponse instance using the specified properties. + * Creates a new ValidateVSchemaResponse instance using the specified properties. * @param [properties] Properties to set - * @returns UpdateCellInfoResponse instance + * @returns ValidateVSchemaResponse instance */ - public static create(properties?: vtctldata.IUpdateCellInfoResponse): vtctldata.UpdateCellInfoResponse; + public static create(properties?: vtctldata.IValidateVSchemaResponse): vtctldata.ValidateVSchemaResponse; /** - * Encodes the specified UpdateCellInfoResponse message. Does not implicitly {@link vtctldata.UpdateCellInfoResponse.verify|verify} messages. - * @param message UpdateCellInfoResponse message or plain object to encode + * Encodes the specified ValidateVSchemaResponse message. Does not implicitly {@link vtctldata.ValidateVSchemaResponse.verify|verify} messages. + * @param message ValidateVSchemaResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IUpdateCellInfoResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IValidateVSchemaResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified UpdateCellInfoResponse message, length delimited. Does not implicitly {@link vtctldata.UpdateCellInfoResponse.verify|verify} messages. - * @param message UpdateCellInfoResponse message or plain object to encode + * Encodes the specified ValidateVSchemaResponse message, length delimited. Does not implicitly {@link vtctldata.ValidateVSchemaResponse.verify|verify} messages. + * @param message ValidateVSchemaResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IUpdateCellInfoResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IValidateVSchemaResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an UpdateCellInfoResponse message from the specified reader or buffer. + * Decodes a ValidateVSchemaResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns UpdateCellInfoResponse + * @returns ValidateVSchemaResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.UpdateCellInfoResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ValidateVSchemaResponse; /** - * Decodes an UpdateCellInfoResponse message from the specified reader or buffer, length delimited. + * Decodes a ValidateVSchemaResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns UpdateCellInfoResponse + * @returns ValidateVSchemaResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.UpdateCellInfoResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ValidateVSchemaResponse; /** - * Verifies an UpdateCellInfoResponse message. + * Verifies a ValidateVSchemaResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an UpdateCellInfoResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ValidateVSchemaResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns UpdateCellInfoResponse + * @returns ValidateVSchemaResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.UpdateCellInfoResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.ValidateVSchemaResponse; /** - * Creates a plain object from an UpdateCellInfoResponse message. Also converts values to other types if specified. - * @param message UpdateCellInfoResponse + * Creates a plain object from a ValidateVSchemaResponse message. Also converts values to other types if specified. + * @param message ValidateVSchemaResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.UpdateCellInfoResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.ValidateVSchemaResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this UpdateCellInfoResponse to JSON. + * Converts this ValidateVSchemaResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for UpdateCellInfoResponse + * Gets the default type url for ValidateVSchemaResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of an UpdateCellsAliasRequest. */ - interface IUpdateCellsAliasRequest { + /** Properties of a VDiffCreateRequest. */ + interface IVDiffCreateRequest { + + /** VDiffCreateRequest workflow */ + workflow?: (string|null); + + /** VDiffCreateRequest target_keyspace */ + target_keyspace?: (string|null); + + /** VDiffCreateRequest uuid */ + uuid?: (string|null); + + /** VDiffCreateRequest source_cells */ + source_cells?: (string[]|null); + + /** VDiffCreateRequest target_cells */ + target_cells?: (string[]|null); + + /** VDiffCreateRequest tablet_types */ + tablet_types?: (topodata.TabletType[]|null); + + /** VDiffCreateRequest tablet_selection_preference */ + tablet_selection_preference?: (tabletmanagerdata.TabletSelectionPreference|null); + + /** VDiffCreateRequest tables */ + tables?: (string[]|null); + + /** VDiffCreateRequest limit */ + limit?: (number|Long|null); + + /** VDiffCreateRequest filtered_replication_wait_time */ + filtered_replication_wait_time?: (vttime.IDuration|null); + + /** VDiffCreateRequest debug_query */ + debug_query?: (boolean|null); + + /** VDiffCreateRequest only_p_ks */ + only_p_ks?: (boolean|null); + + /** VDiffCreateRequest update_table_stats */ + update_table_stats?: (boolean|null); + + /** VDiffCreateRequest max_extra_rows_to_compare */ + max_extra_rows_to_compare?: (number|Long|null); + + /** VDiffCreateRequest wait */ + wait?: (boolean|null); - /** UpdateCellsAliasRequest name */ - name?: (string|null); + /** VDiffCreateRequest wait_update_interval */ + wait_update_interval?: (vttime.IDuration|null); - /** UpdateCellsAliasRequest cells_alias */ - cells_alias?: (topodata.ICellsAlias|null); + /** VDiffCreateRequest auto_retry */ + auto_retry?: (boolean|null); + + /** VDiffCreateRequest verbose */ + verbose?: (boolean|null); } - /** Represents an UpdateCellsAliasRequest. */ - class UpdateCellsAliasRequest implements IUpdateCellsAliasRequest { + /** Represents a VDiffCreateRequest. */ + class VDiffCreateRequest implements IVDiffCreateRequest { /** - * Constructs a new UpdateCellsAliasRequest. + * Constructs a new VDiffCreateRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IUpdateCellsAliasRequest); + constructor(properties?: vtctldata.IVDiffCreateRequest); - /** UpdateCellsAliasRequest name. */ - public name: string; + /** VDiffCreateRequest workflow. */ + public workflow: string; - /** UpdateCellsAliasRequest cells_alias. */ - public cells_alias?: (topodata.ICellsAlias|null); + /** VDiffCreateRequest target_keyspace. */ + public target_keyspace: string; + + /** VDiffCreateRequest uuid. */ + public uuid: string; + + /** VDiffCreateRequest source_cells. */ + public source_cells: string[]; + + /** VDiffCreateRequest target_cells. */ + public target_cells: string[]; + + /** VDiffCreateRequest tablet_types. */ + public tablet_types: topodata.TabletType[]; + + /** VDiffCreateRequest tablet_selection_preference. */ + public tablet_selection_preference: tabletmanagerdata.TabletSelectionPreference; + + /** VDiffCreateRequest tables. */ + public tables: string[]; + + /** VDiffCreateRequest limit. */ + public limit: (number|Long); + + /** VDiffCreateRequest filtered_replication_wait_time. */ + public filtered_replication_wait_time?: (vttime.IDuration|null); + + /** VDiffCreateRequest debug_query. */ + public debug_query: boolean; + + /** VDiffCreateRequest only_p_ks. */ + public only_p_ks: boolean; + + /** VDiffCreateRequest update_table_stats. */ + public update_table_stats: boolean; + + /** VDiffCreateRequest max_extra_rows_to_compare. */ + public max_extra_rows_to_compare: (number|Long); + + /** VDiffCreateRequest wait. */ + public wait: boolean; + + /** VDiffCreateRequest wait_update_interval. */ + public wait_update_interval?: (vttime.IDuration|null); + + /** VDiffCreateRequest auto_retry. */ + public auto_retry: boolean; + + /** VDiffCreateRequest verbose. */ + public verbose: boolean; /** - * Creates a new UpdateCellsAliasRequest instance using the specified properties. + * Creates a new VDiffCreateRequest instance using the specified properties. * @param [properties] Properties to set - * @returns UpdateCellsAliasRequest instance + * @returns VDiffCreateRequest instance */ - public static create(properties?: vtctldata.IUpdateCellsAliasRequest): vtctldata.UpdateCellsAliasRequest; + public static create(properties?: vtctldata.IVDiffCreateRequest): vtctldata.VDiffCreateRequest; /** - * Encodes the specified UpdateCellsAliasRequest message. Does not implicitly {@link vtctldata.UpdateCellsAliasRequest.verify|verify} messages. - * @param message UpdateCellsAliasRequest message or plain object to encode + * Encodes the specified VDiffCreateRequest message. Does not implicitly {@link vtctldata.VDiffCreateRequest.verify|verify} messages. + * @param message VDiffCreateRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IUpdateCellsAliasRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IVDiffCreateRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified UpdateCellsAliasRequest message, length delimited. Does not implicitly {@link vtctldata.UpdateCellsAliasRequest.verify|verify} messages. - * @param message UpdateCellsAliasRequest message or plain object to encode + * Encodes the specified VDiffCreateRequest message, length delimited. Does not implicitly {@link vtctldata.VDiffCreateRequest.verify|verify} messages. + * @param message VDiffCreateRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IUpdateCellsAliasRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IVDiffCreateRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an UpdateCellsAliasRequest message from the specified reader or buffer. + * Decodes a VDiffCreateRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns UpdateCellsAliasRequest + * @returns VDiffCreateRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.UpdateCellsAliasRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.VDiffCreateRequest; /** - * Decodes an UpdateCellsAliasRequest message from the specified reader or buffer, length delimited. + * Decodes a VDiffCreateRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns UpdateCellsAliasRequest + * @returns VDiffCreateRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.UpdateCellsAliasRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.VDiffCreateRequest; /** - * Verifies an UpdateCellsAliasRequest message. + * Verifies a VDiffCreateRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an UpdateCellsAliasRequest message from a plain object. Also converts values to their respective internal types. + * Creates a VDiffCreateRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns UpdateCellsAliasRequest + * @returns VDiffCreateRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.UpdateCellsAliasRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.VDiffCreateRequest; /** - * Creates a plain object from an UpdateCellsAliasRequest message. Also converts values to other types if specified. - * @param message UpdateCellsAliasRequest + * Creates a plain object from a VDiffCreateRequest message. Also converts values to other types if specified. + * @param message VDiffCreateRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.UpdateCellsAliasRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.VDiffCreateRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this UpdateCellsAliasRequest to JSON. + * Converts this VDiffCreateRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for UpdateCellsAliasRequest + * Gets the default type url for VDiffCreateRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of an UpdateCellsAliasResponse. */ - interface IUpdateCellsAliasResponse { - - /** UpdateCellsAliasResponse name */ - name?: (string|null); + /** Properties of a VDiffCreateResponse. */ + interface IVDiffCreateResponse { - /** UpdateCellsAliasResponse cells_alias */ - cells_alias?: (topodata.ICellsAlias|null); + /** VDiffCreateResponse UUID */ + UUID?: (string|null); } - /** Represents an UpdateCellsAliasResponse. */ - class UpdateCellsAliasResponse implements IUpdateCellsAliasResponse { + /** Represents a VDiffCreateResponse. */ + class VDiffCreateResponse implements IVDiffCreateResponse { /** - * Constructs a new UpdateCellsAliasResponse. + * Constructs a new VDiffCreateResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IUpdateCellsAliasResponse); - - /** UpdateCellsAliasResponse name. */ - public name: string; + constructor(properties?: vtctldata.IVDiffCreateResponse); - /** UpdateCellsAliasResponse cells_alias. */ - public cells_alias?: (topodata.ICellsAlias|null); + /** VDiffCreateResponse UUID. */ + public UUID: string; /** - * Creates a new UpdateCellsAliasResponse instance using the specified properties. + * Creates a new VDiffCreateResponse instance using the specified properties. * @param [properties] Properties to set - * @returns UpdateCellsAliasResponse instance + * @returns VDiffCreateResponse instance */ - public static create(properties?: vtctldata.IUpdateCellsAliasResponse): vtctldata.UpdateCellsAliasResponse; + public static create(properties?: vtctldata.IVDiffCreateResponse): vtctldata.VDiffCreateResponse; /** - * Encodes the specified UpdateCellsAliasResponse message. Does not implicitly {@link vtctldata.UpdateCellsAliasResponse.verify|verify} messages. - * @param message UpdateCellsAliasResponse message or plain object to encode + * Encodes the specified VDiffCreateResponse message. Does not implicitly {@link vtctldata.VDiffCreateResponse.verify|verify} messages. + * @param message VDiffCreateResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IUpdateCellsAliasResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IVDiffCreateResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified UpdateCellsAliasResponse message, length delimited. Does not implicitly {@link vtctldata.UpdateCellsAliasResponse.verify|verify} messages. - * @param message UpdateCellsAliasResponse message or plain object to encode + * Encodes the specified VDiffCreateResponse message, length delimited. Does not implicitly {@link vtctldata.VDiffCreateResponse.verify|verify} messages. + * @param message VDiffCreateResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IUpdateCellsAliasResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IVDiffCreateResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes an UpdateCellsAliasResponse message from the specified reader or buffer. + * Decodes a VDiffCreateResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns UpdateCellsAliasResponse + * @returns VDiffCreateResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.UpdateCellsAliasResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.VDiffCreateResponse; /** - * Decodes an UpdateCellsAliasResponse message from the specified reader or buffer, length delimited. + * Decodes a VDiffCreateResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns UpdateCellsAliasResponse + * @returns VDiffCreateResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.UpdateCellsAliasResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.VDiffCreateResponse; /** - * Verifies an UpdateCellsAliasResponse message. + * Verifies a VDiffCreateResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates an UpdateCellsAliasResponse message from a plain object. Also converts values to their respective internal types. + * Creates a VDiffCreateResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns UpdateCellsAliasResponse + * @returns VDiffCreateResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.UpdateCellsAliasResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.VDiffCreateResponse; /** - * Creates a plain object from an UpdateCellsAliasResponse message. Also converts values to other types if specified. - * @param message UpdateCellsAliasResponse + * Creates a plain object from a VDiffCreateResponse message. Also converts values to other types if specified. + * @param message VDiffCreateResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.UpdateCellsAliasResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.VDiffCreateResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this UpdateCellsAliasResponse to JSON. + * Converts this VDiffCreateResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for UpdateCellsAliasResponse + * Gets the default type url for VDiffCreateResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ValidateRequest. */ - interface IValidateRequest { + /** Properties of a VDiffDeleteRequest. */ + interface IVDiffDeleteRequest { - /** ValidateRequest ping_tablets */ - ping_tablets?: (boolean|null); + /** VDiffDeleteRequest workflow */ + workflow?: (string|null); + + /** VDiffDeleteRequest target_keyspace */ + target_keyspace?: (string|null); + + /** VDiffDeleteRequest arg */ + arg?: (string|null); } - /** Represents a ValidateRequest. */ - class ValidateRequest implements IValidateRequest { + /** Represents a VDiffDeleteRequest. */ + class VDiffDeleteRequest implements IVDiffDeleteRequest { /** - * Constructs a new ValidateRequest. + * Constructs a new VDiffDeleteRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IValidateRequest); + constructor(properties?: vtctldata.IVDiffDeleteRequest); - /** ValidateRequest ping_tablets. */ - public ping_tablets: boolean; + /** VDiffDeleteRequest workflow. */ + public workflow: string; + + /** VDiffDeleteRequest target_keyspace. */ + public target_keyspace: string; + + /** VDiffDeleteRequest arg. */ + public arg: string; /** - * Creates a new ValidateRequest instance using the specified properties. + * Creates a new VDiffDeleteRequest instance using the specified properties. * @param [properties] Properties to set - * @returns ValidateRequest instance + * @returns VDiffDeleteRequest instance */ - public static create(properties?: vtctldata.IValidateRequest): vtctldata.ValidateRequest; + public static create(properties?: vtctldata.IVDiffDeleteRequest): vtctldata.VDiffDeleteRequest; /** - * Encodes the specified ValidateRequest message. Does not implicitly {@link vtctldata.ValidateRequest.verify|verify} messages. - * @param message ValidateRequest message or plain object to encode + * Encodes the specified VDiffDeleteRequest message. Does not implicitly {@link vtctldata.VDiffDeleteRequest.verify|verify} messages. + * @param message VDiffDeleteRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IValidateRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IVDiffDeleteRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ValidateRequest message, length delimited. Does not implicitly {@link vtctldata.ValidateRequest.verify|verify} messages. - * @param message ValidateRequest message or plain object to encode + * Encodes the specified VDiffDeleteRequest message, length delimited. Does not implicitly {@link vtctldata.VDiffDeleteRequest.verify|verify} messages. + * @param message VDiffDeleteRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IValidateRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IVDiffDeleteRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ValidateRequest message from the specified reader or buffer. + * Decodes a VDiffDeleteRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ValidateRequest + * @returns VDiffDeleteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ValidateRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.VDiffDeleteRequest; /** - * Decodes a ValidateRequest message from the specified reader or buffer, length delimited. + * Decodes a VDiffDeleteRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ValidateRequest + * @returns VDiffDeleteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ValidateRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.VDiffDeleteRequest; /** - * Verifies a ValidateRequest message. + * Verifies a VDiffDeleteRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ValidateRequest message from a plain object. Also converts values to their respective internal types. + * Creates a VDiffDeleteRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ValidateRequest + * @returns VDiffDeleteRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.ValidateRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.VDiffDeleteRequest; /** - * Creates a plain object from a ValidateRequest message. Also converts values to other types if specified. - * @param message ValidateRequest + * Creates a plain object from a VDiffDeleteRequest message. Also converts values to other types if specified. + * @param message VDiffDeleteRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ValidateRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.VDiffDeleteRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ValidateRequest to JSON. + * Converts this VDiffDeleteRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ValidateRequest + * Gets the default type url for VDiffDeleteRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ValidateResponse. */ - interface IValidateResponse { - - /** ValidateResponse results */ - results?: (string[]|null); - - /** ValidateResponse results_by_keyspace */ - results_by_keyspace?: ({ [k: string]: vtctldata.IValidateKeyspaceResponse }|null); + /** Properties of a VDiffDeleteResponse. */ + interface IVDiffDeleteResponse { } - /** Represents a ValidateResponse. */ - class ValidateResponse implements IValidateResponse { + /** Represents a VDiffDeleteResponse. */ + class VDiffDeleteResponse implements IVDiffDeleteResponse { /** - * Constructs a new ValidateResponse. + * Constructs a new VDiffDeleteResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IValidateResponse); - - /** ValidateResponse results. */ - public results: string[]; - - /** ValidateResponse results_by_keyspace. */ - public results_by_keyspace: { [k: string]: vtctldata.IValidateKeyspaceResponse }; + constructor(properties?: vtctldata.IVDiffDeleteResponse); /** - * Creates a new ValidateResponse instance using the specified properties. + * Creates a new VDiffDeleteResponse instance using the specified properties. * @param [properties] Properties to set - * @returns ValidateResponse instance + * @returns VDiffDeleteResponse instance */ - public static create(properties?: vtctldata.IValidateResponse): vtctldata.ValidateResponse; + public static create(properties?: vtctldata.IVDiffDeleteResponse): vtctldata.VDiffDeleteResponse; /** - * Encodes the specified ValidateResponse message. Does not implicitly {@link vtctldata.ValidateResponse.verify|verify} messages. - * @param message ValidateResponse message or plain object to encode + * Encodes the specified VDiffDeleteResponse message. Does not implicitly {@link vtctldata.VDiffDeleteResponse.verify|verify} messages. + * @param message VDiffDeleteResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IValidateResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IVDiffDeleteResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ValidateResponse message, length delimited. Does not implicitly {@link vtctldata.ValidateResponse.verify|verify} messages. - * @param message ValidateResponse message or plain object to encode + * Encodes the specified VDiffDeleteResponse message, length delimited. Does not implicitly {@link vtctldata.VDiffDeleteResponse.verify|verify} messages. + * @param message VDiffDeleteResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IValidateResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IVDiffDeleteResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ValidateResponse message from the specified reader or buffer. + * Decodes a VDiffDeleteResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ValidateResponse + * @returns VDiffDeleteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ValidateResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.VDiffDeleteResponse; /** - * Decodes a ValidateResponse message from the specified reader or buffer, length delimited. + * Decodes a VDiffDeleteResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ValidateResponse + * @returns VDiffDeleteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ValidateResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.VDiffDeleteResponse; /** - * Verifies a ValidateResponse message. + * Verifies a VDiffDeleteResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ValidateResponse message from a plain object. Also converts values to their respective internal types. + * Creates a VDiffDeleteResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ValidateResponse + * @returns VDiffDeleteResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.ValidateResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.VDiffDeleteResponse; /** - * Creates a plain object from a ValidateResponse message. Also converts values to other types if specified. - * @param message ValidateResponse + * Creates a plain object from a VDiffDeleteResponse message. Also converts values to other types if specified. + * @param message VDiffDeleteResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ValidateResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.VDiffDeleteResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ValidateResponse to JSON. + * Converts this VDiffDeleteResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ValidateResponse + * Gets the default type url for VDiffDeleteResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ValidateKeyspaceRequest. */ - interface IValidateKeyspaceRequest { + /** Properties of a VDiffResumeRequest. */ + interface IVDiffResumeRequest { - /** ValidateKeyspaceRequest keyspace */ - keyspace?: (string|null); + /** VDiffResumeRequest workflow */ + workflow?: (string|null); - /** ValidateKeyspaceRequest ping_tablets */ - ping_tablets?: (boolean|null); + /** VDiffResumeRequest target_keyspace */ + target_keyspace?: (string|null); + + /** VDiffResumeRequest uuid */ + uuid?: (string|null); } - /** Represents a ValidateKeyspaceRequest. */ - class ValidateKeyspaceRequest implements IValidateKeyspaceRequest { + /** Represents a VDiffResumeRequest. */ + class VDiffResumeRequest implements IVDiffResumeRequest { /** - * Constructs a new ValidateKeyspaceRequest. + * Constructs a new VDiffResumeRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IValidateKeyspaceRequest); + constructor(properties?: vtctldata.IVDiffResumeRequest); - /** ValidateKeyspaceRequest keyspace. */ - public keyspace: string; + /** VDiffResumeRequest workflow. */ + public workflow: string; - /** ValidateKeyspaceRequest ping_tablets. */ - public ping_tablets: boolean; + /** VDiffResumeRequest target_keyspace. */ + public target_keyspace: string; + + /** VDiffResumeRequest uuid. */ + public uuid: string; /** - * Creates a new ValidateKeyspaceRequest instance using the specified properties. + * Creates a new VDiffResumeRequest instance using the specified properties. * @param [properties] Properties to set - * @returns ValidateKeyspaceRequest instance + * @returns VDiffResumeRequest instance */ - public static create(properties?: vtctldata.IValidateKeyspaceRequest): vtctldata.ValidateKeyspaceRequest; + public static create(properties?: vtctldata.IVDiffResumeRequest): vtctldata.VDiffResumeRequest; /** - * Encodes the specified ValidateKeyspaceRequest message. Does not implicitly {@link vtctldata.ValidateKeyspaceRequest.verify|verify} messages. - * @param message ValidateKeyspaceRequest message or plain object to encode + * Encodes the specified VDiffResumeRequest message. Does not implicitly {@link vtctldata.VDiffResumeRequest.verify|verify} messages. + * @param message VDiffResumeRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IValidateKeyspaceRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IVDiffResumeRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ValidateKeyspaceRequest message, length delimited. Does not implicitly {@link vtctldata.ValidateKeyspaceRequest.verify|verify} messages. - * @param message ValidateKeyspaceRequest message or plain object to encode + * Encodes the specified VDiffResumeRequest message, length delimited. Does not implicitly {@link vtctldata.VDiffResumeRequest.verify|verify} messages. + * @param message VDiffResumeRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IValidateKeyspaceRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IVDiffResumeRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ValidateKeyspaceRequest message from the specified reader or buffer. + * Decodes a VDiffResumeRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ValidateKeyspaceRequest + * @returns VDiffResumeRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ValidateKeyspaceRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.VDiffResumeRequest; /** - * Decodes a ValidateKeyspaceRequest message from the specified reader or buffer, length delimited. + * Decodes a VDiffResumeRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ValidateKeyspaceRequest + * @returns VDiffResumeRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ValidateKeyspaceRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.VDiffResumeRequest; /** - * Verifies a ValidateKeyspaceRequest message. + * Verifies a VDiffResumeRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ValidateKeyspaceRequest message from a plain object. Also converts values to their respective internal types. + * Creates a VDiffResumeRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ValidateKeyspaceRequest + * @returns VDiffResumeRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.ValidateKeyspaceRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.VDiffResumeRequest; /** - * Creates a plain object from a ValidateKeyspaceRequest message. Also converts values to other types if specified. - * @param message ValidateKeyspaceRequest + * Creates a plain object from a VDiffResumeRequest message. Also converts values to other types if specified. + * @param message VDiffResumeRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ValidateKeyspaceRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.VDiffResumeRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ValidateKeyspaceRequest to JSON. + * Converts this VDiffResumeRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ValidateKeyspaceRequest + * Gets the default type url for VDiffResumeRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ValidateKeyspaceResponse. */ - interface IValidateKeyspaceResponse { - - /** ValidateKeyspaceResponse results */ - results?: (string[]|null); - - /** ValidateKeyspaceResponse results_by_shard */ - results_by_shard?: ({ [k: string]: vtctldata.IValidateShardResponse }|null); + /** Properties of a VDiffResumeResponse. */ + interface IVDiffResumeResponse { } - /** Represents a ValidateKeyspaceResponse. */ - class ValidateKeyspaceResponse implements IValidateKeyspaceResponse { + /** Represents a VDiffResumeResponse. */ + class VDiffResumeResponse implements IVDiffResumeResponse { /** - * Constructs a new ValidateKeyspaceResponse. + * Constructs a new VDiffResumeResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IValidateKeyspaceResponse); - - /** ValidateKeyspaceResponse results. */ - public results: string[]; - - /** ValidateKeyspaceResponse results_by_shard. */ - public results_by_shard: { [k: string]: vtctldata.IValidateShardResponse }; + constructor(properties?: vtctldata.IVDiffResumeResponse); /** - * Creates a new ValidateKeyspaceResponse instance using the specified properties. + * Creates a new VDiffResumeResponse instance using the specified properties. * @param [properties] Properties to set - * @returns ValidateKeyspaceResponse instance + * @returns VDiffResumeResponse instance */ - public static create(properties?: vtctldata.IValidateKeyspaceResponse): vtctldata.ValidateKeyspaceResponse; + public static create(properties?: vtctldata.IVDiffResumeResponse): vtctldata.VDiffResumeResponse; /** - * Encodes the specified ValidateKeyspaceResponse message. Does not implicitly {@link vtctldata.ValidateKeyspaceResponse.verify|verify} messages. - * @param message ValidateKeyspaceResponse message or plain object to encode + * Encodes the specified VDiffResumeResponse message. Does not implicitly {@link vtctldata.VDiffResumeResponse.verify|verify} messages. + * @param message VDiffResumeResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IValidateKeyspaceResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IVDiffResumeResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ValidateKeyspaceResponse message, length delimited. Does not implicitly {@link vtctldata.ValidateKeyspaceResponse.verify|verify} messages. - * @param message ValidateKeyspaceResponse message or plain object to encode + * Encodes the specified VDiffResumeResponse message, length delimited. Does not implicitly {@link vtctldata.VDiffResumeResponse.verify|verify} messages. + * @param message VDiffResumeResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IValidateKeyspaceResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IVDiffResumeResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ValidateKeyspaceResponse message from the specified reader or buffer. + * Decodes a VDiffResumeResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ValidateKeyspaceResponse + * @returns VDiffResumeResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ValidateKeyspaceResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.VDiffResumeResponse; /** - * Decodes a ValidateKeyspaceResponse message from the specified reader or buffer, length delimited. + * Decodes a VDiffResumeResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ValidateKeyspaceResponse + * @returns VDiffResumeResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ValidateKeyspaceResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.VDiffResumeResponse; /** - * Verifies a ValidateKeyspaceResponse message. + * Verifies a VDiffResumeResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ValidateKeyspaceResponse message from a plain object. Also converts values to their respective internal types. + * Creates a VDiffResumeResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ValidateKeyspaceResponse + * @returns VDiffResumeResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.ValidateKeyspaceResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.VDiffResumeResponse; /** - * Creates a plain object from a ValidateKeyspaceResponse message. Also converts values to other types if specified. - * @param message ValidateKeyspaceResponse + * Creates a plain object from a VDiffResumeResponse message. Also converts values to other types if specified. + * @param message VDiffResumeResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ValidateKeyspaceResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.VDiffResumeResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ValidateKeyspaceResponse to JSON. + * Converts this VDiffResumeResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ValidateKeyspaceResponse + * Gets the default type url for VDiffResumeResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ValidateSchemaKeyspaceRequest. */ - interface IValidateSchemaKeyspaceRequest { - - /** ValidateSchemaKeyspaceRequest keyspace */ - keyspace?: (string|null); - - /** ValidateSchemaKeyspaceRequest exclude_tables */ - exclude_tables?: (string[]|null); + /** Properties of a VDiffShowRequest. */ + interface IVDiffShowRequest { - /** ValidateSchemaKeyspaceRequest include_views */ - include_views?: (boolean|null); + /** VDiffShowRequest workflow */ + workflow?: (string|null); - /** ValidateSchemaKeyspaceRequest skip_no_primary */ - skip_no_primary?: (boolean|null); + /** VDiffShowRequest target_keyspace */ + target_keyspace?: (string|null); - /** ValidateSchemaKeyspaceRequest include_vschema */ - include_vschema?: (boolean|null); + /** VDiffShowRequest arg */ + arg?: (string|null); } - /** Represents a ValidateSchemaKeyspaceRequest. */ - class ValidateSchemaKeyspaceRequest implements IValidateSchemaKeyspaceRequest { + /** Represents a VDiffShowRequest. */ + class VDiffShowRequest implements IVDiffShowRequest { /** - * Constructs a new ValidateSchemaKeyspaceRequest. + * Constructs a new VDiffShowRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IValidateSchemaKeyspaceRequest); - - /** ValidateSchemaKeyspaceRequest keyspace. */ - public keyspace: string; - - /** ValidateSchemaKeyspaceRequest exclude_tables. */ - public exclude_tables: string[]; + constructor(properties?: vtctldata.IVDiffShowRequest); - /** ValidateSchemaKeyspaceRequest include_views. */ - public include_views: boolean; + /** VDiffShowRequest workflow. */ + public workflow: string; - /** ValidateSchemaKeyspaceRequest skip_no_primary. */ - public skip_no_primary: boolean; + /** VDiffShowRequest target_keyspace. */ + public target_keyspace: string; - /** ValidateSchemaKeyspaceRequest include_vschema. */ - public include_vschema: boolean; + /** VDiffShowRequest arg. */ + public arg: string; /** - * Creates a new ValidateSchemaKeyspaceRequest instance using the specified properties. + * Creates a new VDiffShowRequest instance using the specified properties. * @param [properties] Properties to set - * @returns ValidateSchemaKeyspaceRequest instance + * @returns VDiffShowRequest instance */ - public static create(properties?: vtctldata.IValidateSchemaKeyspaceRequest): vtctldata.ValidateSchemaKeyspaceRequest; + public static create(properties?: vtctldata.IVDiffShowRequest): vtctldata.VDiffShowRequest; /** - * Encodes the specified ValidateSchemaKeyspaceRequest message. Does not implicitly {@link vtctldata.ValidateSchemaKeyspaceRequest.verify|verify} messages. - * @param message ValidateSchemaKeyspaceRequest message or plain object to encode + * Encodes the specified VDiffShowRequest message. Does not implicitly {@link vtctldata.VDiffShowRequest.verify|verify} messages. + * @param message VDiffShowRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IValidateSchemaKeyspaceRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IVDiffShowRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ValidateSchemaKeyspaceRequest message, length delimited. Does not implicitly {@link vtctldata.ValidateSchemaKeyspaceRequest.verify|verify} messages. - * @param message ValidateSchemaKeyspaceRequest message or plain object to encode + * Encodes the specified VDiffShowRequest message, length delimited. Does not implicitly {@link vtctldata.VDiffShowRequest.verify|verify} messages. + * @param message VDiffShowRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IValidateSchemaKeyspaceRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IVDiffShowRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ValidateSchemaKeyspaceRequest message from the specified reader or buffer. + * Decodes a VDiffShowRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ValidateSchemaKeyspaceRequest + * @returns VDiffShowRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ValidateSchemaKeyspaceRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.VDiffShowRequest; /** - * Decodes a ValidateSchemaKeyspaceRequest message from the specified reader or buffer, length delimited. + * Decodes a VDiffShowRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ValidateSchemaKeyspaceRequest + * @returns VDiffShowRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ValidateSchemaKeyspaceRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.VDiffShowRequest; /** - * Verifies a ValidateSchemaKeyspaceRequest message. + * Verifies a VDiffShowRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ValidateSchemaKeyspaceRequest message from a plain object. Also converts values to their respective internal types. + * Creates a VDiffShowRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ValidateSchemaKeyspaceRequest + * @returns VDiffShowRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.ValidateSchemaKeyspaceRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.VDiffShowRequest; /** - * Creates a plain object from a ValidateSchemaKeyspaceRequest message. Also converts values to other types if specified. - * @param message ValidateSchemaKeyspaceRequest + * Creates a plain object from a VDiffShowRequest message. Also converts values to other types if specified. + * @param message VDiffShowRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ValidateSchemaKeyspaceRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.VDiffShowRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ValidateSchemaKeyspaceRequest to JSON. + * Converts this VDiffShowRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ValidateSchemaKeyspaceRequest + * Gets the default type url for VDiffShowRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ValidateSchemaKeyspaceResponse. */ - interface IValidateSchemaKeyspaceResponse { - - /** ValidateSchemaKeyspaceResponse results */ - results?: (string[]|null); + /** Properties of a VDiffShowResponse. */ + interface IVDiffShowResponse { - /** ValidateSchemaKeyspaceResponse results_by_shard */ - results_by_shard?: ({ [k: string]: vtctldata.IValidateShardResponse }|null); + /** VDiffShowResponse tablet_responses */ + tablet_responses?: ({ [k: string]: tabletmanagerdata.IVDiffResponse }|null); } - /** Represents a ValidateSchemaKeyspaceResponse. */ - class ValidateSchemaKeyspaceResponse implements IValidateSchemaKeyspaceResponse { + /** Represents a VDiffShowResponse. */ + class VDiffShowResponse implements IVDiffShowResponse { /** - * Constructs a new ValidateSchemaKeyspaceResponse. + * Constructs a new VDiffShowResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IValidateSchemaKeyspaceResponse); - - /** ValidateSchemaKeyspaceResponse results. */ - public results: string[]; + constructor(properties?: vtctldata.IVDiffShowResponse); - /** ValidateSchemaKeyspaceResponse results_by_shard. */ - public results_by_shard: { [k: string]: vtctldata.IValidateShardResponse }; + /** VDiffShowResponse tablet_responses. */ + public tablet_responses: { [k: string]: tabletmanagerdata.IVDiffResponse }; /** - * Creates a new ValidateSchemaKeyspaceResponse instance using the specified properties. + * Creates a new VDiffShowResponse instance using the specified properties. * @param [properties] Properties to set - * @returns ValidateSchemaKeyspaceResponse instance + * @returns VDiffShowResponse instance */ - public static create(properties?: vtctldata.IValidateSchemaKeyspaceResponse): vtctldata.ValidateSchemaKeyspaceResponse; + public static create(properties?: vtctldata.IVDiffShowResponse): vtctldata.VDiffShowResponse; /** - * Encodes the specified ValidateSchemaKeyspaceResponse message. Does not implicitly {@link vtctldata.ValidateSchemaKeyspaceResponse.verify|verify} messages. - * @param message ValidateSchemaKeyspaceResponse message or plain object to encode + * Encodes the specified VDiffShowResponse message. Does not implicitly {@link vtctldata.VDiffShowResponse.verify|verify} messages. + * @param message VDiffShowResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IValidateSchemaKeyspaceResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IVDiffShowResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ValidateSchemaKeyspaceResponse message, length delimited. Does not implicitly {@link vtctldata.ValidateSchemaKeyspaceResponse.verify|verify} messages. - * @param message ValidateSchemaKeyspaceResponse message or plain object to encode + * Encodes the specified VDiffShowResponse message, length delimited. Does not implicitly {@link vtctldata.VDiffShowResponse.verify|verify} messages. + * @param message VDiffShowResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IValidateSchemaKeyspaceResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IVDiffShowResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ValidateSchemaKeyspaceResponse message from the specified reader or buffer. + * Decodes a VDiffShowResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ValidateSchemaKeyspaceResponse + * @returns VDiffShowResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ValidateSchemaKeyspaceResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.VDiffShowResponse; /** - * Decodes a ValidateSchemaKeyspaceResponse message from the specified reader or buffer, length delimited. + * Decodes a VDiffShowResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ValidateSchemaKeyspaceResponse + * @returns VDiffShowResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ValidateSchemaKeyspaceResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.VDiffShowResponse; /** - * Verifies a ValidateSchemaKeyspaceResponse message. + * Verifies a VDiffShowResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ValidateSchemaKeyspaceResponse message from a plain object. Also converts values to their respective internal types. + * Creates a VDiffShowResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ValidateSchemaKeyspaceResponse + * @returns VDiffShowResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.ValidateSchemaKeyspaceResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.VDiffShowResponse; /** - * Creates a plain object from a ValidateSchemaKeyspaceResponse message. Also converts values to other types if specified. - * @param message ValidateSchemaKeyspaceResponse + * Creates a plain object from a VDiffShowResponse message. Also converts values to other types if specified. + * @param message VDiffShowResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ValidateSchemaKeyspaceResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.VDiffShowResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ValidateSchemaKeyspaceResponse to JSON. + * Converts this VDiffShowResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ValidateSchemaKeyspaceResponse + * Gets the default type url for VDiffShowResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ValidateShardRequest. */ - interface IValidateShardRequest { + /** Properties of a VDiffStopRequest. */ + interface IVDiffStopRequest { - /** ValidateShardRequest keyspace */ - keyspace?: (string|null); + /** VDiffStopRequest workflow */ + workflow?: (string|null); - /** ValidateShardRequest shard */ - shard?: (string|null); + /** VDiffStopRequest target_keyspace */ + target_keyspace?: (string|null); - /** ValidateShardRequest ping_tablets */ - ping_tablets?: (boolean|null); + /** VDiffStopRequest uuid */ + uuid?: (string|null); } - /** Represents a ValidateShardRequest. */ - class ValidateShardRequest implements IValidateShardRequest { + /** Represents a VDiffStopRequest. */ + class VDiffStopRequest implements IVDiffStopRequest { /** - * Constructs a new ValidateShardRequest. + * Constructs a new VDiffStopRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IValidateShardRequest); + constructor(properties?: vtctldata.IVDiffStopRequest); - /** ValidateShardRequest keyspace. */ - public keyspace: string; + /** VDiffStopRequest workflow. */ + public workflow: string; - /** ValidateShardRequest shard. */ - public shard: string; + /** VDiffStopRequest target_keyspace. */ + public target_keyspace: string; - /** ValidateShardRequest ping_tablets. */ - public ping_tablets: boolean; + /** VDiffStopRequest uuid. */ + public uuid: string; /** - * Creates a new ValidateShardRequest instance using the specified properties. + * Creates a new VDiffStopRequest instance using the specified properties. * @param [properties] Properties to set - * @returns ValidateShardRequest instance + * @returns VDiffStopRequest instance */ - public static create(properties?: vtctldata.IValidateShardRequest): vtctldata.ValidateShardRequest; + public static create(properties?: vtctldata.IVDiffStopRequest): vtctldata.VDiffStopRequest; /** - * Encodes the specified ValidateShardRequest message. Does not implicitly {@link vtctldata.ValidateShardRequest.verify|verify} messages. - * @param message ValidateShardRequest message or plain object to encode + * Encodes the specified VDiffStopRequest message. Does not implicitly {@link vtctldata.VDiffStopRequest.verify|verify} messages. + * @param message VDiffStopRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IValidateShardRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IVDiffStopRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ValidateShardRequest message, length delimited. Does not implicitly {@link vtctldata.ValidateShardRequest.verify|verify} messages. - * @param message ValidateShardRequest message or plain object to encode + * Encodes the specified VDiffStopRequest message, length delimited. Does not implicitly {@link vtctldata.VDiffStopRequest.verify|verify} messages. + * @param message VDiffStopRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IValidateShardRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IVDiffStopRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ValidateShardRequest message from the specified reader or buffer. + * Decodes a VDiffStopRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ValidateShardRequest + * @returns VDiffStopRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ValidateShardRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.VDiffStopRequest; /** - * Decodes a ValidateShardRequest message from the specified reader or buffer, length delimited. + * Decodes a VDiffStopRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ValidateShardRequest + * @returns VDiffStopRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ValidateShardRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.VDiffStopRequest; /** - * Verifies a ValidateShardRequest message. + * Verifies a VDiffStopRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ValidateShardRequest message from a plain object. Also converts values to their respective internal types. + * Creates a VDiffStopRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ValidateShardRequest + * @returns VDiffStopRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.ValidateShardRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.VDiffStopRequest; /** - * Creates a plain object from a ValidateShardRequest message. Also converts values to other types if specified. - * @param message ValidateShardRequest + * Creates a plain object from a VDiffStopRequest message. Also converts values to other types if specified. + * @param message VDiffStopRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ValidateShardRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.VDiffStopRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ValidateShardRequest to JSON. + * Converts this VDiffStopRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ValidateShardRequest + * Gets the default type url for VDiffStopRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ValidateShardResponse. */ - interface IValidateShardResponse { - - /** ValidateShardResponse results */ - results?: (string[]|null); + /** Properties of a VDiffStopResponse. */ + interface IVDiffStopResponse { } - /** Represents a ValidateShardResponse. */ - class ValidateShardResponse implements IValidateShardResponse { + /** Represents a VDiffStopResponse. */ + class VDiffStopResponse implements IVDiffStopResponse { /** - * Constructs a new ValidateShardResponse. + * Constructs a new VDiffStopResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IValidateShardResponse); - - /** ValidateShardResponse results. */ - public results: string[]; + constructor(properties?: vtctldata.IVDiffStopResponse); /** - * Creates a new ValidateShardResponse instance using the specified properties. + * Creates a new VDiffStopResponse instance using the specified properties. * @param [properties] Properties to set - * @returns ValidateShardResponse instance + * @returns VDiffStopResponse instance */ - public static create(properties?: vtctldata.IValidateShardResponse): vtctldata.ValidateShardResponse; + public static create(properties?: vtctldata.IVDiffStopResponse): vtctldata.VDiffStopResponse; /** - * Encodes the specified ValidateShardResponse message. Does not implicitly {@link vtctldata.ValidateShardResponse.verify|verify} messages. - * @param message ValidateShardResponse message or plain object to encode + * Encodes the specified VDiffStopResponse message. Does not implicitly {@link vtctldata.VDiffStopResponse.verify|verify} messages. + * @param message VDiffStopResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IValidateShardResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IVDiffStopResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ValidateShardResponse message, length delimited. Does not implicitly {@link vtctldata.ValidateShardResponse.verify|verify} messages. - * @param message ValidateShardResponse message or plain object to encode + * Encodes the specified VDiffStopResponse message, length delimited. Does not implicitly {@link vtctldata.VDiffStopResponse.verify|verify} messages. + * @param message VDiffStopResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IValidateShardResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IVDiffStopResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ValidateShardResponse message from the specified reader or buffer. + * Decodes a VDiffStopResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ValidateShardResponse + * @returns VDiffStopResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ValidateShardResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.VDiffStopResponse; /** - * Decodes a ValidateShardResponse message from the specified reader or buffer, length delimited. + * Decodes a VDiffStopResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ValidateShardResponse + * @returns VDiffStopResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ValidateShardResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.VDiffStopResponse; /** - * Verifies a ValidateShardResponse message. + * Verifies a VDiffStopResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ValidateShardResponse message from a plain object. Also converts values to their respective internal types. + * Creates a VDiffStopResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ValidateShardResponse + * @returns VDiffStopResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.ValidateShardResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.VDiffStopResponse; /** - * Creates a plain object from a ValidateShardResponse message. Also converts values to other types if specified. - * @param message ValidateShardResponse + * Creates a plain object from a VDiffStopResponse message. Also converts values to other types if specified. + * @param message VDiffStopResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ValidateShardResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.VDiffStopResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ValidateShardResponse to JSON. + * Converts this VDiffStopResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ValidateShardResponse + * Gets the default type url for VDiffStopResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ValidateVersionKeyspaceRequest. */ - interface IValidateVersionKeyspaceRequest { + /** Properties of a WorkflowDeleteRequest. */ + interface IWorkflowDeleteRequest { - /** ValidateVersionKeyspaceRequest keyspace */ + /** WorkflowDeleteRequest keyspace */ keyspace?: (string|null); + + /** WorkflowDeleteRequest workflow */ + workflow?: (string|null); + + /** WorkflowDeleteRequest keep_data */ + keep_data?: (boolean|null); + + /** WorkflowDeleteRequest keep_routing_rules */ + keep_routing_rules?: (boolean|null); } - /** Represents a ValidateVersionKeyspaceRequest. */ - class ValidateVersionKeyspaceRequest implements IValidateVersionKeyspaceRequest { + /** Represents a WorkflowDeleteRequest. */ + class WorkflowDeleteRequest implements IWorkflowDeleteRequest { /** - * Constructs a new ValidateVersionKeyspaceRequest. + * Constructs a new WorkflowDeleteRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IValidateVersionKeyspaceRequest); + constructor(properties?: vtctldata.IWorkflowDeleteRequest); - /** ValidateVersionKeyspaceRequest keyspace. */ + /** WorkflowDeleteRequest keyspace. */ public keyspace: string; + /** WorkflowDeleteRequest workflow. */ + public workflow: string; + + /** WorkflowDeleteRequest keep_data. */ + public keep_data: boolean; + + /** WorkflowDeleteRequest keep_routing_rules. */ + public keep_routing_rules: boolean; + /** - * Creates a new ValidateVersionKeyspaceRequest instance using the specified properties. + * Creates a new WorkflowDeleteRequest instance using the specified properties. * @param [properties] Properties to set - * @returns ValidateVersionKeyspaceRequest instance + * @returns WorkflowDeleteRequest instance */ - public static create(properties?: vtctldata.IValidateVersionKeyspaceRequest): vtctldata.ValidateVersionKeyspaceRequest; + public static create(properties?: vtctldata.IWorkflowDeleteRequest): vtctldata.WorkflowDeleteRequest; /** - * Encodes the specified ValidateVersionKeyspaceRequest message. Does not implicitly {@link vtctldata.ValidateVersionKeyspaceRequest.verify|verify} messages. - * @param message ValidateVersionKeyspaceRequest message or plain object to encode + * Encodes the specified WorkflowDeleteRequest message. Does not implicitly {@link vtctldata.WorkflowDeleteRequest.verify|verify} messages. + * @param message WorkflowDeleteRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IValidateVersionKeyspaceRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IWorkflowDeleteRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ValidateVersionKeyspaceRequest message, length delimited. Does not implicitly {@link vtctldata.ValidateVersionKeyspaceRequest.verify|verify} messages. - * @param message ValidateVersionKeyspaceRequest message or plain object to encode + * Encodes the specified WorkflowDeleteRequest message, length delimited. Does not implicitly {@link vtctldata.WorkflowDeleteRequest.verify|verify} messages. + * @param message WorkflowDeleteRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IValidateVersionKeyspaceRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IWorkflowDeleteRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ValidateVersionKeyspaceRequest message from the specified reader or buffer. + * Decodes a WorkflowDeleteRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ValidateVersionKeyspaceRequest + * @returns WorkflowDeleteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ValidateVersionKeyspaceRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.WorkflowDeleteRequest; /** - * Decodes a ValidateVersionKeyspaceRequest message from the specified reader or buffer, length delimited. + * Decodes a WorkflowDeleteRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ValidateVersionKeyspaceRequest + * @returns WorkflowDeleteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ValidateVersionKeyspaceRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.WorkflowDeleteRequest; /** - * Verifies a ValidateVersionKeyspaceRequest message. + * Verifies a WorkflowDeleteRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ValidateVersionKeyspaceRequest message from a plain object. Also converts values to their respective internal types. + * Creates a WorkflowDeleteRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ValidateVersionKeyspaceRequest + * @returns WorkflowDeleteRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.ValidateVersionKeyspaceRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.WorkflowDeleteRequest; /** - * Creates a plain object from a ValidateVersionKeyspaceRequest message. Also converts values to other types if specified. - * @param message ValidateVersionKeyspaceRequest + * Creates a plain object from a WorkflowDeleteRequest message. Also converts values to other types if specified. + * @param message WorkflowDeleteRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ValidateVersionKeyspaceRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.WorkflowDeleteRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ValidateVersionKeyspaceRequest to JSON. + * Converts this WorkflowDeleteRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ValidateVersionKeyspaceRequest + * Gets the default type url for WorkflowDeleteRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ValidateVersionKeyspaceResponse. */ - interface IValidateVersionKeyspaceResponse { + /** Properties of a WorkflowDeleteResponse. */ + interface IWorkflowDeleteResponse { - /** ValidateVersionKeyspaceResponse results */ - results?: (string[]|null); + /** WorkflowDeleteResponse summary */ + summary?: (string|null); - /** ValidateVersionKeyspaceResponse results_by_shard */ - results_by_shard?: ({ [k: string]: vtctldata.IValidateShardResponse }|null); + /** WorkflowDeleteResponse details */ + details?: (vtctldata.WorkflowDeleteResponse.ITabletInfo[]|null); } - /** Represents a ValidateVersionKeyspaceResponse. */ - class ValidateVersionKeyspaceResponse implements IValidateVersionKeyspaceResponse { + /** Represents a WorkflowDeleteResponse. */ + class WorkflowDeleteResponse implements IWorkflowDeleteResponse { /** - * Constructs a new ValidateVersionKeyspaceResponse. + * Constructs a new WorkflowDeleteResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IValidateVersionKeyspaceResponse); + constructor(properties?: vtctldata.IWorkflowDeleteResponse); - /** ValidateVersionKeyspaceResponse results. */ - public results: string[]; + /** WorkflowDeleteResponse summary. */ + public summary: string; - /** ValidateVersionKeyspaceResponse results_by_shard. */ - public results_by_shard: { [k: string]: vtctldata.IValidateShardResponse }; + /** WorkflowDeleteResponse details. */ + public details: vtctldata.WorkflowDeleteResponse.ITabletInfo[]; /** - * Creates a new ValidateVersionKeyspaceResponse instance using the specified properties. + * Creates a new WorkflowDeleteResponse instance using the specified properties. * @param [properties] Properties to set - * @returns ValidateVersionKeyspaceResponse instance + * @returns WorkflowDeleteResponse instance */ - public static create(properties?: vtctldata.IValidateVersionKeyspaceResponse): vtctldata.ValidateVersionKeyspaceResponse; + public static create(properties?: vtctldata.IWorkflowDeleteResponse): vtctldata.WorkflowDeleteResponse; /** - * Encodes the specified ValidateVersionKeyspaceResponse message. Does not implicitly {@link vtctldata.ValidateVersionKeyspaceResponse.verify|verify} messages. - * @param message ValidateVersionKeyspaceResponse message or plain object to encode + * Encodes the specified WorkflowDeleteResponse message. Does not implicitly {@link vtctldata.WorkflowDeleteResponse.verify|verify} messages. + * @param message WorkflowDeleteResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IValidateVersionKeyspaceResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IWorkflowDeleteResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ValidateVersionKeyspaceResponse message, length delimited. Does not implicitly {@link vtctldata.ValidateVersionKeyspaceResponse.verify|verify} messages. - * @param message ValidateVersionKeyspaceResponse message or plain object to encode + * Encodes the specified WorkflowDeleteResponse message, length delimited. Does not implicitly {@link vtctldata.WorkflowDeleteResponse.verify|verify} messages. + * @param message WorkflowDeleteResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IValidateVersionKeyspaceResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IWorkflowDeleteResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ValidateVersionKeyspaceResponse message from the specified reader or buffer. + * Decodes a WorkflowDeleteResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ValidateVersionKeyspaceResponse + * @returns WorkflowDeleteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ValidateVersionKeyspaceResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.WorkflowDeleteResponse; /** - * Decodes a ValidateVersionKeyspaceResponse message from the specified reader or buffer, length delimited. + * Decodes a WorkflowDeleteResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ValidateVersionKeyspaceResponse + * @returns WorkflowDeleteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ValidateVersionKeyspaceResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.WorkflowDeleteResponse; /** - * Verifies a ValidateVersionKeyspaceResponse message. + * Verifies a WorkflowDeleteResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ValidateVersionKeyspaceResponse message from a plain object. Also converts values to their respective internal types. + * Creates a WorkflowDeleteResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ValidateVersionKeyspaceResponse + * @returns WorkflowDeleteResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.ValidateVersionKeyspaceResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.WorkflowDeleteResponse; /** - * Creates a plain object from a ValidateVersionKeyspaceResponse message. Also converts values to other types if specified. - * @param message ValidateVersionKeyspaceResponse + * Creates a plain object from a WorkflowDeleteResponse message. Also converts values to other types if specified. + * @param message WorkflowDeleteResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ValidateVersionKeyspaceResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.WorkflowDeleteResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ValidateVersionKeyspaceResponse to JSON. + * Converts this WorkflowDeleteResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ValidateVersionKeyspaceResponse + * Gets the default type url for WorkflowDeleteResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ValidateVersionShardRequest. */ - interface IValidateVersionShardRequest { + namespace WorkflowDeleteResponse { - /** ValidateVersionShardRequest keyspace */ + /** Properties of a TabletInfo. */ + interface ITabletInfo { + + /** TabletInfo tablet */ + tablet?: (topodata.ITabletAlias|null); + + /** TabletInfo deleted */ + deleted?: (boolean|null); + } + + /** Represents a TabletInfo. */ + class TabletInfo implements ITabletInfo { + + /** + * Constructs a new TabletInfo. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.WorkflowDeleteResponse.ITabletInfo); + + /** TabletInfo tablet. */ + public tablet?: (topodata.ITabletAlias|null); + + /** TabletInfo deleted. */ + public deleted: boolean; + + /** + * Creates a new TabletInfo instance using the specified properties. + * @param [properties] Properties to set + * @returns TabletInfo instance + */ + public static create(properties?: vtctldata.WorkflowDeleteResponse.ITabletInfo): vtctldata.WorkflowDeleteResponse.TabletInfo; + + /** + * Encodes the specified TabletInfo message. Does not implicitly {@link vtctldata.WorkflowDeleteResponse.TabletInfo.verify|verify} messages. + * @param message TabletInfo message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.WorkflowDeleteResponse.ITabletInfo, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified TabletInfo message, length delimited. Does not implicitly {@link vtctldata.WorkflowDeleteResponse.TabletInfo.verify|verify} messages. + * @param message TabletInfo message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.WorkflowDeleteResponse.ITabletInfo, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a TabletInfo message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns TabletInfo + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.WorkflowDeleteResponse.TabletInfo; + + /** + * Decodes a TabletInfo message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns TabletInfo + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.WorkflowDeleteResponse.TabletInfo; + + /** + * Verifies a TabletInfo message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a TabletInfo message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns TabletInfo + */ + public static fromObject(object: { [k: string]: any }): vtctldata.WorkflowDeleteResponse.TabletInfo; + + /** + * Creates a plain object from a TabletInfo message. Also converts values to other types if specified. + * @param message TabletInfo + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.WorkflowDeleteResponse.TabletInfo, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this TabletInfo to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for TabletInfo + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + } + + /** Properties of a WorkflowStatusRequest. */ + interface IWorkflowStatusRequest { + + /** WorkflowStatusRequest keyspace */ keyspace?: (string|null); - /** ValidateVersionShardRequest shard */ - shard?: (string|null); + /** WorkflowStatusRequest workflow */ + workflow?: (string|null); } - /** Represents a ValidateVersionShardRequest. */ - class ValidateVersionShardRequest implements IValidateVersionShardRequest { + /** Represents a WorkflowStatusRequest. */ + class WorkflowStatusRequest implements IWorkflowStatusRequest { /** - * Constructs a new ValidateVersionShardRequest. + * Constructs a new WorkflowStatusRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IValidateVersionShardRequest); + constructor(properties?: vtctldata.IWorkflowStatusRequest); - /** ValidateVersionShardRequest keyspace. */ + /** WorkflowStatusRequest keyspace. */ public keyspace: string; - /** ValidateVersionShardRequest shard. */ - public shard: string; + /** WorkflowStatusRequest workflow. */ + public workflow: string; /** - * Creates a new ValidateVersionShardRequest instance using the specified properties. + * Creates a new WorkflowStatusRequest instance using the specified properties. * @param [properties] Properties to set - * @returns ValidateVersionShardRequest instance + * @returns WorkflowStatusRequest instance */ - public static create(properties?: vtctldata.IValidateVersionShardRequest): vtctldata.ValidateVersionShardRequest; + public static create(properties?: vtctldata.IWorkflowStatusRequest): vtctldata.WorkflowStatusRequest; /** - * Encodes the specified ValidateVersionShardRequest message. Does not implicitly {@link vtctldata.ValidateVersionShardRequest.verify|verify} messages. - * @param message ValidateVersionShardRequest message or plain object to encode + * Encodes the specified WorkflowStatusRequest message. Does not implicitly {@link vtctldata.WorkflowStatusRequest.verify|verify} messages. + * @param message WorkflowStatusRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IValidateVersionShardRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IWorkflowStatusRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ValidateVersionShardRequest message, length delimited. Does not implicitly {@link vtctldata.ValidateVersionShardRequest.verify|verify} messages. - * @param message ValidateVersionShardRequest message or plain object to encode + * Encodes the specified WorkflowStatusRequest message, length delimited. Does not implicitly {@link vtctldata.WorkflowStatusRequest.verify|verify} messages. + * @param message WorkflowStatusRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IValidateVersionShardRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IWorkflowStatusRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ValidateVersionShardRequest message from the specified reader or buffer. + * Decodes a WorkflowStatusRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ValidateVersionShardRequest + * @returns WorkflowStatusRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ValidateVersionShardRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.WorkflowStatusRequest; /** - * Decodes a ValidateVersionShardRequest message from the specified reader or buffer, length delimited. + * Decodes a WorkflowStatusRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ValidateVersionShardRequest + * @returns WorkflowStatusRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ValidateVersionShardRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.WorkflowStatusRequest; /** - * Verifies a ValidateVersionShardRequest message. + * Verifies a WorkflowStatusRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ValidateVersionShardRequest message from a plain object. Also converts values to their respective internal types. + * Creates a WorkflowStatusRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ValidateVersionShardRequest + * @returns WorkflowStatusRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.ValidateVersionShardRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.WorkflowStatusRequest; /** - * Creates a plain object from a ValidateVersionShardRequest message. Also converts values to other types if specified. - * @param message ValidateVersionShardRequest + * Creates a plain object from a WorkflowStatusRequest message. Also converts values to other types if specified. + * @param message WorkflowStatusRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ValidateVersionShardRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.WorkflowStatusRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ValidateVersionShardRequest to JSON. + * Converts this WorkflowStatusRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ValidateVersionShardRequest + * Gets the default type url for WorkflowStatusRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ValidateVersionShardResponse. */ - interface IValidateVersionShardResponse { + /** Properties of a WorkflowStatusResponse. */ + interface IWorkflowStatusResponse { - /** ValidateVersionShardResponse results */ - results?: (string[]|null); + /** WorkflowStatusResponse table_copy_state */ + table_copy_state?: ({ [k: string]: vtctldata.WorkflowStatusResponse.ITableCopyState }|null); + + /** WorkflowStatusResponse shard_streams */ + shard_streams?: ({ [k: string]: vtctldata.WorkflowStatusResponse.IShardStreams }|null); + + /** WorkflowStatusResponse traffic_state */ + traffic_state?: (string|null); } - /** Represents a ValidateVersionShardResponse. */ - class ValidateVersionShardResponse implements IValidateVersionShardResponse { + /** Represents a WorkflowStatusResponse. */ + class WorkflowStatusResponse implements IWorkflowStatusResponse { /** - * Constructs a new ValidateVersionShardResponse. + * Constructs a new WorkflowStatusResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IValidateVersionShardResponse); + constructor(properties?: vtctldata.IWorkflowStatusResponse); - /** ValidateVersionShardResponse results. */ - public results: string[]; + /** WorkflowStatusResponse table_copy_state. */ + public table_copy_state: { [k: string]: vtctldata.WorkflowStatusResponse.ITableCopyState }; + + /** WorkflowStatusResponse shard_streams. */ + public shard_streams: { [k: string]: vtctldata.WorkflowStatusResponse.IShardStreams }; + + /** WorkflowStatusResponse traffic_state. */ + public traffic_state: string; /** - * Creates a new ValidateVersionShardResponse instance using the specified properties. + * Creates a new WorkflowStatusResponse instance using the specified properties. * @param [properties] Properties to set - * @returns ValidateVersionShardResponse instance + * @returns WorkflowStatusResponse instance */ - public static create(properties?: vtctldata.IValidateVersionShardResponse): vtctldata.ValidateVersionShardResponse; + public static create(properties?: vtctldata.IWorkflowStatusResponse): vtctldata.WorkflowStatusResponse; /** - * Encodes the specified ValidateVersionShardResponse message. Does not implicitly {@link vtctldata.ValidateVersionShardResponse.verify|verify} messages. - * @param message ValidateVersionShardResponse message or plain object to encode + * Encodes the specified WorkflowStatusResponse message. Does not implicitly {@link vtctldata.WorkflowStatusResponse.verify|verify} messages. + * @param message WorkflowStatusResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IValidateVersionShardResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IWorkflowStatusResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ValidateVersionShardResponse message, length delimited. Does not implicitly {@link vtctldata.ValidateVersionShardResponse.verify|verify} messages. - * @param message ValidateVersionShardResponse message or plain object to encode + * Encodes the specified WorkflowStatusResponse message, length delimited. Does not implicitly {@link vtctldata.WorkflowStatusResponse.verify|verify} messages. + * @param message WorkflowStatusResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IValidateVersionShardResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IWorkflowStatusResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ValidateVersionShardResponse message from the specified reader or buffer. + * Decodes a WorkflowStatusResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ValidateVersionShardResponse + * @returns WorkflowStatusResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ValidateVersionShardResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.WorkflowStatusResponse; /** - * Decodes a ValidateVersionShardResponse message from the specified reader or buffer, length delimited. + * Decodes a WorkflowStatusResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ValidateVersionShardResponse + * @returns WorkflowStatusResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ValidateVersionShardResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.WorkflowStatusResponse; /** - * Verifies a ValidateVersionShardResponse message. + * Verifies a WorkflowStatusResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ValidateVersionShardResponse message from a plain object. Also converts values to their respective internal types. + * Creates a WorkflowStatusResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ValidateVersionShardResponse + * @returns WorkflowStatusResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.ValidateVersionShardResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.WorkflowStatusResponse; /** - * Creates a plain object from a ValidateVersionShardResponse message. Also converts values to other types if specified. - * @param message ValidateVersionShardResponse + * Creates a plain object from a WorkflowStatusResponse message. Also converts values to other types if specified. + * @param message WorkflowStatusResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ValidateVersionShardResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.WorkflowStatusResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ValidateVersionShardResponse to JSON. + * Converts this WorkflowStatusResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ValidateVersionShardResponse + * Gets the default type url for WorkflowStatusResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ValidateVSchemaRequest. */ - interface IValidateVSchemaRequest { + namespace WorkflowStatusResponse { - /** ValidateVSchemaRequest keyspace */ + /** Properties of a TableCopyState. */ + interface ITableCopyState { + + /** TableCopyState rows_copied */ + rows_copied?: (number|Long|null); + + /** TableCopyState rows_total */ + rows_total?: (number|Long|null); + + /** TableCopyState rows_percentage */ + rows_percentage?: (number|null); + + /** TableCopyState bytes_copied */ + bytes_copied?: (number|Long|null); + + /** TableCopyState bytes_total */ + bytes_total?: (number|Long|null); + + /** TableCopyState bytes_percentage */ + bytes_percentage?: (number|null); + } + + /** Represents a TableCopyState. */ + class TableCopyState implements ITableCopyState { + + /** + * Constructs a new TableCopyState. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.WorkflowStatusResponse.ITableCopyState); + + /** TableCopyState rows_copied. */ + public rows_copied: (number|Long); + + /** TableCopyState rows_total. */ + public rows_total: (number|Long); + + /** TableCopyState rows_percentage. */ + public rows_percentage: number; + + /** TableCopyState bytes_copied. */ + public bytes_copied: (number|Long); + + /** TableCopyState bytes_total. */ + public bytes_total: (number|Long); + + /** TableCopyState bytes_percentage. */ + public bytes_percentage: number; + + /** + * Creates a new TableCopyState instance using the specified properties. + * @param [properties] Properties to set + * @returns TableCopyState instance + */ + public static create(properties?: vtctldata.WorkflowStatusResponse.ITableCopyState): vtctldata.WorkflowStatusResponse.TableCopyState; + + /** + * Encodes the specified TableCopyState message. Does not implicitly {@link vtctldata.WorkflowStatusResponse.TableCopyState.verify|verify} messages. + * @param message TableCopyState message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.WorkflowStatusResponse.ITableCopyState, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified TableCopyState message, length delimited. Does not implicitly {@link vtctldata.WorkflowStatusResponse.TableCopyState.verify|verify} messages. + * @param message TableCopyState message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.WorkflowStatusResponse.ITableCopyState, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a TableCopyState message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns TableCopyState + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.WorkflowStatusResponse.TableCopyState; + + /** + * Decodes a TableCopyState message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns TableCopyState + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.WorkflowStatusResponse.TableCopyState; + + /** + * Verifies a TableCopyState message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a TableCopyState message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns TableCopyState + */ + public static fromObject(object: { [k: string]: any }): vtctldata.WorkflowStatusResponse.TableCopyState; + + /** + * Creates a plain object from a TableCopyState message. Also converts values to other types if specified. + * @param message TableCopyState + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.WorkflowStatusResponse.TableCopyState, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this TableCopyState to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for TableCopyState + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a ShardStreamState. */ + interface IShardStreamState { + + /** ShardStreamState id */ + id?: (number|null); + + /** ShardStreamState tablet */ + tablet?: (topodata.ITabletAlias|null); + + /** ShardStreamState source_shard */ + source_shard?: (string|null); + + /** ShardStreamState position */ + position?: (string|null); + + /** ShardStreamState status */ + status?: (string|null); + + /** ShardStreamState info */ + info?: (string|null); + } + + /** Represents a ShardStreamState. */ + class ShardStreamState implements IShardStreamState { + + /** + * Constructs a new ShardStreamState. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.WorkflowStatusResponse.IShardStreamState); + + /** ShardStreamState id. */ + public id: number; + + /** ShardStreamState tablet. */ + public tablet?: (topodata.ITabletAlias|null); + + /** ShardStreamState source_shard. */ + public source_shard: string; + + /** ShardStreamState position. */ + public position: string; + + /** ShardStreamState status. */ + public status: string; + + /** ShardStreamState info. */ + public info: string; + + /** + * Creates a new ShardStreamState instance using the specified properties. + * @param [properties] Properties to set + * @returns ShardStreamState instance + */ + public static create(properties?: vtctldata.WorkflowStatusResponse.IShardStreamState): vtctldata.WorkflowStatusResponse.ShardStreamState; + + /** + * Encodes the specified ShardStreamState message. Does not implicitly {@link vtctldata.WorkflowStatusResponse.ShardStreamState.verify|verify} messages. + * @param message ShardStreamState message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.WorkflowStatusResponse.IShardStreamState, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ShardStreamState message, length delimited. Does not implicitly {@link vtctldata.WorkflowStatusResponse.ShardStreamState.verify|verify} messages. + * @param message ShardStreamState message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.WorkflowStatusResponse.IShardStreamState, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a ShardStreamState message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ShardStreamState + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.WorkflowStatusResponse.ShardStreamState; + + /** + * Decodes a ShardStreamState message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ShardStreamState + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.WorkflowStatusResponse.ShardStreamState; + + /** + * Verifies a ShardStreamState message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a ShardStreamState message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ShardStreamState + */ + public static fromObject(object: { [k: string]: any }): vtctldata.WorkflowStatusResponse.ShardStreamState; + + /** + * Creates a plain object from a ShardStreamState message. Also converts values to other types if specified. + * @param message ShardStreamState + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.WorkflowStatusResponse.ShardStreamState, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ShardStreamState to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ShardStreamState + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a ShardStreams. */ + interface IShardStreams { + + /** ShardStreams streams */ + streams?: (vtctldata.WorkflowStatusResponse.IShardStreamState[]|null); + } + + /** Represents a ShardStreams. */ + class ShardStreams implements IShardStreams { + + /** + * Constructs a new ShardStreams. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.WorkflowStatusResponse.IShardStreams); + + /** ShardStreams streams. */ + public streams: vtctldata.WorkflowStatusResponse.IShardStreamState[]; + + /** + * Creates a new ShardStreams instance using the specified properties. + * @param [properties] Properties to set + * @returns ShardStreams instance + */ + public static create(properties?: vtctldata.WorkflowStatusResponse.IShardStreams): vtctldata.WorkflowStatusResponse.ShardStreams; + + /** + * Encodes the specified ShardStreams message. Does not implicitly {@link vtctldata.WorkflowStatusResponse.ShardStreams.verify|verify} messages. + * @param message ShardStreams message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.WorkflowStatusResponse.IShardStreams, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ShardStreams message, length delimited. Does not implicitly {@link vtctldata.WorkflowStatusResponse.ShardStreams.verify|verify} messages. + * @param message ShardStreams message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.WorkflowStatusResponse.IShardStreams, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a ShardStreams message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ShardStreams + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.WorkflowStatusResponse.ShardStreams; + + /** + * Decodes a ShardStreams message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ShardStreams + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.WorkflowStatusResponse.ShardStreams; + + /** + * Verifies a ShardStreams message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a ShardStreams message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ShardStreams + */ + public static fromObject(object: { [k: string]: any }): vtctldata.WorkflowStatusResponse.ShardStreams; + + /** + * Creates a plain object from a ShardStreams message. Also converts values to other types if specified. + * @param message ShardStreams + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.WorkflowStatusResponse.ShardStreams, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ShardStreams to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ShardStreams + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + } + + /** Properties of a WorkflowSwitchTrafficRequest. */ + interface IWorkflowSwitchTrafficRequest { + + /** WorkflowSwitchTrafficRequest keyspace */ keyspace?: (string|null); - /** ValidateVSchemaRequest shards */ - shards?: (string[]|null); + /** WorkflowSwitchTrafficRequest workflow */ + workflow?: (string|null); - /** ValidateVSchemaRequest exclude_tables */ - exclude_tables?: (string[]|null); + /** WorkflowSwitchTrafficRequest cells */ + cells?: (string[]|null); - /** ValidateVSchemaRequest include_views */ - include_views?: (boolean|null); + /** WorkflowSwitchTrafficRequest tablet_types */ + tablet_types?: (topodata.TabletType[]|null); + + /** WorkflowSwitchTrafficRequest max_replication_lag_allowed */ + max_replication_lag_allowed?: (vttime.IDuration|null); + + /** WorkflowSwitchTrafficRequest enable_reverse_replication */ + enable_reverse_replication?: (boolean|null); + + /** WorkflowSwitchTrafficRequest direction */ + direction?: (number|null); + + /** WorkflowSwitchTrafficRequest timeout */ + timeout?: (vttime.IDuration|null); + + /** WorkflowSwitchTrafficRequest dry_run */ + dry_run?: (boolean|null); + + /** WorkflowSwitchTrafficRequest initialize_target_sequences */ + initialize_target_sequences?: (boolean|null); } - /** Represents a ValidateVSchemaRequest. */ - class ValidateVSchemaRequest implements IValidateVSchemaRequest { + /** Represents a WorkflowSwitchTrafficRequest. */ + class WorkflowSwitchTrafficRequest implements IWorkflowSwitchTrafficRequest { /** - * Constructs a new ValidateVSchemaRequest. + * Constructs a new WorkflowSwitchTrafficRequest. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IValidateVSchemaRequest); + constructor(properties?: vtctldata.IWorkflowSwitchTrafficRequest); - /** ValidateVSchemaRequest keyspace. */ + /** WorkflowSwitchTrafficRequest keyspace. */ public keyspace: string; - /** ValidateVSchemaRequest shards. */ - public shards: string[]; + /** WorkflowSwitchTrafficRequest workflow. */ + public workflow: string; - /** ValidateVSchemaRequest exclude_tables. */ - public exclude_tables: string[]; + /** WorkflowSwitchTrafficRequest cells. */ + public cells: string[]; - /** ValidateVSchemaRequest include_views. */ - public include_views: boolean; + /** WorkflowSwitchTrafficRequest tablet_types. */ + public tablet_types: topodata.TabletType[]; + + /** WorkflowSwitchTrafficRequest max_replication_lag_allowed. */ + public max_replication_lag_allowed?: (vttime.IDuration|null); + + /** WorkflowSwitchTrafficRequest enable_reverse_replication. */ + public enable_reverse_replication: boolean; + + /** WorkflowSwitchTrafficRequest direction. */ + public direction: number; + + /** WorkflowSwitchTrafficRequest timeout. */ + public timeout?: (vttime.IDuration|null); + + /** WorkflowSwitchTrafficRequest dry_run. */ + public dry_run: boolean; + + /** WorkflowSwitchTrafficRequest initialize_target_sequences. */ + public initialize_target_sequences: boolean; /** - * Creates a new ValidateVSchemaRequest instance using the specified properties. + * Creates a new WorkflowSwitchTrafficRequest instance using the specified properties. * @param [properties] Properties to set - * @returns ValidateVSchemaRequest instance + * @returns WorkflowSwitchTrafficRequest instance */ - public static create(properties?: vtctldata.IValidateVSchemaRequest): vtctldata.ValidateVSchemaRequest; + public static create(properties?: vtctldata.IWorkflowSwitchTrafficRequest): vtctldata.WorkflowSwitchTrafficRequest; /** - * Encodes the specified ValidateVSchemaRequest message. Does not implicitly {@link vtctldata.ValidateVSchemaRequest.verify|verify} messages. - * @param message ValidateVSchemaRequest message or plain object to encode + * Encodes the specified WorkflowSwitchTrafficRequest message. Does not implicitly {@link vtctldata.WorkflowSwitchTrafficRequest.verify|verify} messages. + * @param message WorkflowSwitchTrafficRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IValidateVSchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IWorkflowSwitchTrafficRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ValidateVSchemaRequest message, length delimited. Does not implicitly {@link vtctldata.ValidateVSchemaRequest.verify|verify} messages. - * @param message ValidateVSchemaRequest message or plain object to encode + * Encodes the specified WorkflowSwitchTrafficRequest message, length delimited. Does not implicitly {@link vtctldata.WorkflowSwitchTrafficRequest.verify|verify} messages. + * @param message WorkflowSwitchTrafficRequest message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IValidateVSchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IWorkflowSwitchTrafficRequest, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ValidateVSchemaRequest message from the specified reader or buffer. + * Decodes a WorkflowSwitchTrafficRequest message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ValidateVSchemaRequest + * @returns WorkflowSwitchTrafficRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ValidateVSchemaRequest; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.WorkflowSwitchTrafficRequest; /** - * Decodes a ValidateVSchemaRequest message from the specified reader or buffer, length delimited. + * Decodes a WorkflowSwitchTrafficRequest message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ValidateVSchemaRequest + * @returns WorkflowSwitchTrafficRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ValidateVSchemaRequest; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.WorkflowSwitchTrafficRequest; /** - * Verifies a ValidateVSchemaRequest message. + * Verifies a WorkflowSwitchTrafficRequest message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ValidateVSchemaRequest message from a plain object. Also converts values to their respective internal types. + * Creates a WorkflowSwitchTrafficRequest message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ValidateVSchemaRequest + * @returns WorkflowSwitchTrafficRequest */ - public static fromObject(object: { [k: string]: any }): vtctldata.ValidateVSchemaRequest; + public static fromObject(object: { [k: string]: any }): vtctldata.WorkflowSwitchTrafficRequest; /** - * Creates a plain object from a ValidateVSchemaRequest message. Also converts values to other types if specified. - * @param message ValidateVSchemaRequest + * Creates a plain object from a WorkflowSwitchTrafficRequest message. Also converts values to other types if specified. + * @param message WorkflowSwitchTrafficRequest * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ValidateVSchemaRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.WorkflowSwitchTrafficRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ValidateVSchemaRequest to JSON. + * Converts this WorkflowSwitchTrafficRequest to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ValidateVSchemaRequest + * Gets the default type url for WorkflowSwitchTrafficRequest * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a ValidateVSchemaResponse. */ - interface IValidateVSchemaResponse { + /** Properties of a WorkflowSwitchTrafficResponse. */ + interface IWorkflowSwitchTrafficResponse { - /** ValidateVSchemaResponse results */ - results?: (string[]|null); + /** WorkflowSwitchTrafficResponse summary */ + summary?: (string|null); - /** ValidateVSchemaResponse results_by_shard */ - results_by_shard?: ({ [k: string]: vtctldata.IValidateShardResponse }|null); + /** WorkflowSwitchTrafficResponse start_state */ + start_state?: (string|null); + + /** WorkflowSwitchTrafficResponse current_state */ + current_state?: (string|null); + + /** WorkflowSwitchTrafficResponse dry_run_results */ + dry_run_results?: (string[]|null); } - /** Represents a ValidateVSchemaResponse. */ - class ValidateVSchemaResponse implements IValidateVSchemaResponse { + /** Represents a WorkflowSwitchTrafficResponse. */ + class WorkflowSwitchTrafficResponse implements IWorkflowSwitchTrafficResponse { /** - * Constructs a new ValidateVSchemaResponse. + * Constructs a new WorkflowSwitchTrafficResponse. * @param [properties] Properties to set */ - constructor(properties?: vtctldata.IValidateVSchemaResponse); + constructor(properties?: vtctldata.IWorkflowSwitchTrafficResponse); - /** ValidateVSchemaResponse results. */ - public results: string[]; + /** WorkflowSwitchTrafficResponse summary. */ + public summary: string; - /** ValidateVSchemaResponse results_by_shard. */ - public results_by_shard: { [k: string]: vtctldata.IValidateShardResponse }; + /** WorkflowSwitchTrafficResponse start_state. */ + public start_state: string; + + /** WorkflowSwitchTrafficResponse current_state. */ + public current_state: string; + + /** WorkflowSwitchTrafficResponse dry_run_results. */ + public dry_run_results: string[]; /** - * Creates a new ValidateVSchemaResponse instance using the specified properties. + * Creates a new WorkflowSwitchTrafficResponse instance using the specified properties. * @param [properties] Properties to set - * @returns ValidateVSchemaResponse instance + * @returns WorkflowSwitchTrafficResponse instance */ - public static create(properties?: vtctldata.IValidateVSchemaResponse): vtctldata.ValidateVSchemaResponse; + public static create(properties?: vtctldata.IWorkflowSwitchTrafficResponse): vtctldata.WorkflowSwitchTrafficResponse; /** - * Encodes the specified ValidateVSchemaResponse message. Does not implicitly {@link vtctldata.ValidateVSchemaResponse.verify|verify} messages. - * @param message ValidateVSchemaResponse message or plain object to encode + * Encodes the specified WorkflowSwitchTrafficResponse message. Does not implicitly {@link vtctldata.WorkflowSwitchTrafficResponse.verify|verify} messages. + * @param message WorkflowSwitchTrafficResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encode(message: vtctldata.IValidateVSchemaResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encode(message: vtctldata.IWorkflowSwitchTrafficResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Encodes the specified ValidateVSchemaResponse message, length delimited. Does not implicitly {@link vtctldata.ValidateVSchemaResponse.verify|verify} messages. - * @param message ValidateVSchemaResponse message or plain object to encode + * Encodes the specified WorkflowSwitchTrafficResponse message, length delimited. Does not implicitly {@link vtctldata.WorkflowSwitchTrafficResponse.verify|verify} messages. + * @param message WorkflowSwitchTrafficResponse message or plain object to encode * @param [writer] Writer to encode to * @returns Writer */ - public static encodeDelimited(message: vtctldata.IValidateVSchemaResponse, writer?: $protobuf.Writer): $protobuf.Writer; + public static encodeDelimited(message: vtctldata.IWorkflowSwitchTrafficResponse, writer?: $protobuf.Writer): $protobuf.Writer; /** - * Decodes a ValidateVSchemaResponse message from the specified reader or buffer. + * Decodes a WorkflowSwitchTrafficResponse message from the specified reader or buffer. * @param reader Reader or buffer to decode from * @param [length] Message length if known beforehand - * @returns ValidateVSchemaResponse + * @returns WorkflowSwitchTrafficResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ValidateVSchemaResponse; + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.WorkflowSwitchTrafficResponse; /** - * Decodes a ValidateVSchemaResponse message from the specified reader or buffer, length delimited. + * Decodes a WorkflowSwitchTrafficResponse message from the specified reader or buffer, length delimited. * @param reader Reader or buffer to decode from - * @returns ValidateVSchemaResponse + * @returns WorkflowSwitchTrafficResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ValidateVSchemaResponse; + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.WorkflowSwitchTrafficResponse; /** - * Verifies a ValidateVSchemaResponse message. + * Verifies a WorkflowSwitchTrafficResponse message. * @param message Plain object to verify * @returns `null` if valid, otherwise the reason why it is not */ public static verify(message: { [k: string]: any }): (string|null); /** - * Creates a ValidateVSchemaResponse message from a plain object. Also converts values to their respective internal types. + * Creates a WorkflowSwitchTrafficResponse message from a plain object. Also converts values to their respective internal types. * @param object Plain object - * @returns ValidateVSchemaResponse + * @returns WorkflowSwitchTrafficResponse */ - public static fromObject(object: { [k: string]: any }): vtctldata.ValidateVSchemaResponse; + public static fromObject(object: { [k: string]: any }): vtctldata.WorkflowSwitchTrafficResponse; /** - * Creates a plain object from a ValidateVSchemaResponse message. Also converts values to other types if specified. - * @param message ValidateVSchemaResponse + * Creates a plain object from a WorkflowSwitchTrafficResponse message. Also converts values to other types if specified. + * @param message WorkflowSwitchTrafficResponse * @param [options] Conversion options * @returns Plain object */ - public static toObject(message: vtctldata.ValidateVSchemaResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + public static toObject(message: vtctldata.WorkflowSwitchTrafficResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; /** - * Converts this ValidateVSchemaResponse to JSON. + * Converts this WorkflowSwitchTrafficResponse to JSON. * @returns JSON object */ public toJSON(): { [k: string]: any }; /** - * Gets the default type url for ValidateVSchemaResponse + * Gets the default type url for WorkflowSwitchTrafficResponse * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns The default type url */ @@ -59328,7 +68249,7 @@ export namespace vtctldata { keyspace?: (string|null); /** WorkflowUpdateRequest tablet_request */ - tablet_request?: (tabletmanagerdata.IUpdateVRWorkflowRequest|null); + tablet_request?: (tabletmanagerdata.IUpdateVReplicationWorkflowRequest|null); } /** Represents a WorkflowUpdateRequest. */ @@ -59344,7 +68265,7 @@ export namespace vtctldata { public keyspace: string; /** WorkflowUpdateRequest tablet_request. */ - public tablet_request?: (tabletmanagerdata.IUpdateVRWorkflowRequest|null); + public tablet_request?: (tabletmanagerdata.IUpdateVReplicationWorkflowRequest|null); /** * Creates a new WorkflowUpdateRequest instance using the specified properties. @@ -59533,7 +68454,7 @@ export namespace vtctldata { interface ITabletInfo { /** TabletInfo tablet */ - tablet?: (string|null); + tablet?: (topodata.ITabletAlias|null); /** TabletInfo changed */ changed?: (boolean|null); @@ -59549,7 +68470,7 @@ export namespace vtctldata { constructor(properties?: vtctldata.WorkflowUpdateResponse.ITabletInfo); /** TabletInfo tablet. */ - public tablet: string; + public tablet?: (topodata.ITabletAlias|null); /** TabletInfo changed. */ public changed: boolean; diff --git a/web/vtadmin/src/proto/vtadmin.js b/web/vtadmin/src/proto/vtadmin.js index 03384c2adb9..2d84f23ce3c 100644 --- a/web/vtadmin/src/proto/vtadmin.js +++ b/web/vtadmin/src/proto/vtadmin.js @@ -30065,6 +30065,7 @@ export const mysqlctl = $root.mysqlctl = (() => { * @interface IApplyBinlogFileRequest * @property {string|null} [binlog_file_name] ApplyBinlogFileRequest binlog_file_name * @property {string|null} [binlog_restore_position] ApplyBinlogFileRequest binlog_restore_position + * @property {vttime.ITime|null} [binlog_restore_datetime] ApplyBinlogFileRequest binlog_restore_datetime */ /** @@ -30098,6 +30099,14 @@ export const mysqlctl = $root.mysqlctl = (() => { */ ApplyBinlogFileRequest.prototype.binlog_restore_position = ""; + /** + * ApplyBinlogFileRequest binlog_restore_datetime. + * @member {vttime.ITime|null|undefined} binlog_restore_datetime + * @memberof mysqlctl.ApplyBinlogFileRequest + * @instance + */ + ApplyBinlogFileRequest.prototype.binlog_restore_datetime = null; + /** * Creates a new ApplyBinlogFileRequest instance using the specified properties. * @function create @@ -30126,6 +30135,8 @@ export const mysqlctl = $root.mysqlctl = (() => { writer.uint32(/* id 1, wireType 2 =*/10).string(message.binlog_file_name); if (message.binlog_restore_position != null && Object.hasOwnProperty.call(message, "binlog_restore_position")) writer.uint32(/* id 2, wireType 2 =*/18).string(message.binlog_restore_position); + if (message.binlog_restore_datetime != null && Object.hasOwnProperty.call(message, "binlog_restore_datetime")) + $root.vttime.Time.encode(message.binlog_restore_datetime, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); return writer; }; @@ -30168,6 +30179,10 @@ export const mysqlctl = $root.mysqlctl = (() => { message.binlog_restore_position = reader.string(); break; } + case 3: { + message.binlog_restore_datetime = $root.vttime.Time.decode(reader, reader.uint32()); + break; + } default: reader.skipType(tag & 7); break; @@ -30209,6 +30224,11 @@ export const mysqlctl = $root.mysqlctl = (() => { if (message.binlog_restore_position != null && message.hasOwnProperty("binlog_restore_position")) if (!$util.isString(message.binlog_restore_position)) return "binlog_restore_position: string expected"; + if (message.binlog_restore_datetime != null && message.hasOwnProperty("binlog_restore_datetime")) { + let error = $root.vttime.Time.verify(message.binlog_restore_datetime); + if (error) + return "binlog_restore_datetime." + error; + } return null; }; @@ -30228,6 +30248,11 @@ export const mysqlctl = $root.mysqlctl = (() => { message.binlog_file_name = String(object.binlog_file_name); if (object.binlog_restore_position != null) message.binlog_restore_position = String(object.binlog_restore_position); + if (object.binlog_restore_datetime != null) { + if (typeof object.binlog_restore_datetime !== "object") + throw TypeError(".mysqlctl.ApplyBinlogFileRequest.binlog_restore_datetime: object expected"); + message.binlog_restore_datetime = $root.vttime.Time.fromObject(object.binlog_restore_datetime); + } return message; }; @@ -30247,11 +30272,14 @@ export const mysqlctl = $root.mysqlctl = (() => { if (options.defaults) { object.binlog_file_name = ""; object.binlog_restore_position = ""; + object.binlog_restore_datetime = null; } if (message.binlog_file_name != null && message.hasOwnProperty("binlog_file_name")) object.binlog_file_name = message.binlog_file_name; if (message.binlog_restore_position != null && message.hasOwnProperty("binlog_restore_position")) object.binlog_restore_position = message.binlog_restore_position; + if (message.binlog_restore_datetime != null && message.hasOwnProperty("binlog_restore_datetime")) + object.binlog_restore_datetime = $root.vttime.Time.toObject(message.binlog_restore_datetime, options); return object; }; @@ -30459,6 +30487,508 @@ export const mysqlctl = $root.mysqlctl = (() => { return ApplyBinlogFileResponse; })(); + mysqlctl.ReadBinlogFilesTimestampsRequest = (function() { + + /** + * Properties of a ReadBinlogFilesTimestampsRequest. + * @memberof mysqlctl + * @interface IReadBinlogFilesTimestampsRequest + * @property {Array.|null} [binlog_file_names] ReadBinlogFilesTimestampsRequest binlog_file_names + */ + + /** + * Constructs a new ReadBinlogFilesTimestampsRequest. + * @memberof mysqlctl + * @classdesc Represents a ReadBinlogFilesTimestampsRequest. + * @implements IReadBinlogFilesTimestampsRequest + * @constructor + * @param {mysqlctl.IReadBinlogFilesTimestampsRequest=} [properties] Properties to set + */ + function ReadBinlogFilesTimestampsRequest(properties) { + this.binlog_file_names = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ReadBinlogFilesTimestampsRequest binlog_file_names. + * @member {Array.} binlog_file_names + * @memberof mysqlctl.ReadBinlogFilesTimestampsRequest + * @instance + */ + ReadBinlogFilesTimestampsRequest.prototype.binlog_file_names = $util.emptyArray; + + /** + * Creates a new ReadBinlogFilesTimestampsRequest instance using the specified properties. + * @function create + * @memberof mysqlctl.ReadBinlogFilesTimestampsRequest + * @static + * @param {mysqlctl.IReadBinlogFilesTimestampsRequest=} [properties] Properties to set + * @returns {mysqlctl.ReadBinlogFilesTimestampsRequest} ReadBinlogFilesTimestampsRequest instance + */ + ReadBinlogFilesTimestampsRequest.create = function create(properties) { + return new ReadBinlogFilesTimestampsRequest(properties); + }; + + /** + * Encodes the specified ReadBinlogFilesTimestampsRequest message. Does not implicitly {@link mysqlctl.ReadBinlogFilesTimestampsRequest.verify|verify} messages. + * @function encode + * @memberof mysqlctl.ReadBinlogFilesTimestampsRequest + * @static + * @param {mysqlctl.IReadBinlogFilesTimestampsRequest} message ReadBinlogFilesTimestampsRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ReadBinlogFilesTimestampsRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.binlog_file_names != null && message.binlog_file_names.length) + for (let i = 0; i < message.binlog_file_names.length; ++i) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.binlog_file_names[i]); + return writer; + }; + + /** + * Encodes the specified ReadBinlogFilesTimestampsRequest message, length delimited. Does not implicitly {@link mysqlctl.ReadBinlogFilesTimestampsRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof mysqlctl.ReadBinlogFilesTimestampsRequest + * @static + * @param {mysqlctl.IReadBinlogFilesTimestampsRequest} message ReadBinlogFilesTimestampsRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ReadBinlogFilesTimestampsRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a ReadBinlogFilesTimestampsRequest message from the specified reader or buffer. + * @function decode + * @memberof mysqlctl.ReadBinlogFilesTimestampsRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {mysqlctl.ReadBinlogFilesTimestampsRequest} ReadBinlogFilesTimestampsRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ReadBinlogFilesTimestampsRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.mysqlctl.ReadBinlogFilesTimestampsRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (!(message.binlog_file_names && message.binlog_file_names.length)) + message.binlog_file_names = []; + message.binlog_file_names.push(reader.string()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a ReadBinlogFilesTimestampsRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof mysqlctl.ReadBinlogFilesTimestampsRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {mysqlctl.ReadBinlogFilesTimestampsRequest} ReadBinlogFilesTimestampsRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ReadBinlogFilesTimestampsRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a ReadBinlogFilesTimestampsRequest message. + * @function verify + * @memberof mysqlctl.ReadBinlogFilesTimestampsRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ReadBinlogFilesTimestampsRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.binlog_file_names != null && message.hasOwnProperty("binlog_file_names")) { + if (!Array.isArray(message.binlog_file_names)) + return "binlog_file_names: array expected"; + for (let i = 0; i < message.binlog_file_names.length; ++i) + if (!$util.isString(message.binlog_file_names[i])) + return "binlog_file_names: string[] expected"; + } + return null; + }; + + /** + * Creates a ReadBinlogFilesTimestampsRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof mysqlctl.ReadBinlogFilesTimestampsRequest + * @static + * @param {Object.} object Plain object + * @returns {mysqlctl.ReadBinlogFilesTimestampsRequest} ReadBinlogFilesTimestampsRequest + */ + ReadBinlogFilesTimestampsRequest.fromObject = function fromObject(object) { + if (object instanceof $root.mysqlctl.ReadBinlogFilesTimestampsRequest) + return object; + let message = new $root.mysqlctl.ReadBinlogFilesTimestampsRequest(); + if (object.binlog_file_names) { + if (!Array.isArray(object.binlog_file_names)) + throw TypeError(".mysqlctl.ReadBinlogFilesTimestampsRequest.binlog_file_names: array expected"); + message.binlog_file_names = []; + for (let i = 0; i < object.binlog_file_names.length; ++i) + message.binlog_file_names[i] = String(object.binlog_file_names[i]); + } + return message; + }; + + /** + * Creates a plain object from a ReadBinlogFilesTimestampsRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof mysqlctl.ReadBinlogFilesTimestampsRequest + * @static + * @param {mysqlctl.ReadBinlogFilesTimestampsRequest} message ReadBinlogFilesTimestampsRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ReadBinlogFilesTimestampsRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.binlog_file_names = []; + if (message.binlog_file_names && message.binlog_file_names.length) { + object.binlog_file_names = []; + for (let j = 0; j < message.binlog_file_names.length; ++j) + object.binlog_file_names[j] = message.binlog_file_names[j]; + } + return object; + }; + + /** + * Converts this ReadBinlogFilesTimestampsRequest to JSON. + * @function toJSON + * @memberof mysqlctl.ReadBinlogFilesTimestampsRequest + * @instance + * @returns {Object.} JSON object + */ + ReadBinlogFilesTimestampsRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ReadBinlogFilesTimestampsRequest + * @function getTypeUrl + * @memberof mysqlctl.ReadBinlogFilesTimestampsRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ReadBinlogFilesTimestampsRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/mysqlctl.ReadBinlogFilesTimestampsRequest"; + }; + + return ReadBinlogFilesTimestampsRequest; + })(); + + mysqlctl.ReadBinlogFilesTimestampsResponse = (function() { + + /** + * Properties of a ReadBinlogFilesTimestampsResponse. + * @memberof mysqlctl + * @interface IReadBinlogFilesTimestampsResponse + * @property {vttime.ITime|null} [first_timestamp] ReadBinlogFilesTimestampsResponse first_timestamp + * @property {string|null} [first_timestamp_binlog] ReadBinlogFilesTimestampsResponse first_timestamp_binlog + * @property {vttime.ITime|null} [last_timestamp] ReadBinlogFilesTimestampsResponse last_timestamp + * @property {string|null} [last_timestamp_binlog] ReadBinlogFilesTimestampsResponse last_timestamp_binlog + */ + + /** + * Constructs a new ReadBinlogFilesTimestampsResponse. + * @memberof mysqlctl + * @classdesc Represents a ReadBinlogFilesTimestampsResponse. + * @implements IReadBinlogFilesTimestampsResponse + * @constructor + * @param {mysqlctl.IReadBinlogFilesTimestampsResponse=} [properties] Properties to set + */ + function ReadBinlogFilesTimestampsResponse(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ReadBinlogFilesTimestampsResponse first_timestamp. + * @member {vttime.ITime|null|undefined} first_timestamp + * @memberof mysqlctl.ReadBinlogFilesTimestampsResponse + * @instance + */ + ReadBinlogFilesTimestampsResponse.prototype.first_timestamp = null; + + /** + * ReadBinlogFilesTimestampsResponse first_timestamp_binlog. + * @member {string} first_timestamp_binlog + * @memberof mysqlctl.ReadBinlogFilesTimestampsResponse + * @instance + */ + ReadBinlogFilesTimestampsResponse.prototype.first_timestamp_binlog = ""; + + /** + * ReadBinlogFilesTimestampsResponse last_timestamp. + * @member {vttime.ITime|null|undefined} last_timestamp + * @memberof mysqlctl.ReadBinlogFilesTimestampsResponse + * @instance + */ + ReadBinlogFilesTimestampsResponse.prototype.last_timestamp = null; + + /** + * ReadBinlogFilesTimestampsResponse last_timestamp_binlog. + * @member {string} last_timestamp_binlog + * @memberof mysqlctl.ReadBinlogFilesTimestampsResponse + * @instance + */ + ReadBinlogFilesTimestampsResponse.prototype.last_timestamp_binlog = ""; + + /** + * Creates a new ReadBinlogFilesTimestampsResponse instance using the specified properties. + * @function create + * @memberof mysqlctl.ReadBinlogFilesTimestampsResponse + * @static + * @param {mysqlctl.IReadBinlogFilesTimestampsResponse=} [properties] Properties to set + * @returns {mysqlctl.ReadBinlogFilesTimestampsResponse} ReadBinlogFilesTimestampsResponse instance + */ + ReadBinlogFilesTimestampsResponse.create = function create(properties) { + return new ReadBinlogFilesTimestampsResponse(properties); + }; + + /** + * Encodes the specified ReadBinlogFilesTimestampsResponse message. Does not implicitly {@link mysqlctl.ReadBinlogFilesTimestampsResponse.verify|verify} messages. + * @function encode + * @memberof mysqlctl.ReadBinlogFilesTimestampsResponse + * @static + * @param {mysqlctl.IReadBinlogFilesTimestampsResponse} message ReadBinlogFilesTimestampsResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ReadBinlogFilesTimestampsResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.first_timestamp != null && Object.hasOwnProperty.call(message, "first_timestamp")) + $root.vttime.Time.encode(message.first_timestamp, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.first_timestamp_binlog != null && Object.hasOwnProperty.call(message, "first_timestamp_binlog")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.first_timestamp_binlog); + if (message.last_timestamp != null && Object.hasOwnProperty.call(message, "last_timestamp")) + $root.vttime.Time.encode(message.last_timestamp, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.last_timestamp_binlog != null && Object.hasOwnProperty.call(message, "last_timestamp_binlog")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.last_timestamp_binlog); + return writer; + }; + + /** + * Encodes the specified ReadBinlogFilesTimestampsResponse message, length delimited. Does not implicitly {@link mysqlctl.ReadBinlogFilesTimestampsResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof mysqlctl.ReadBinlogFilesTimestampsResponse + * @static + * @param {mysqlctl.IReadBinlogFilesTimestampsResponse} message ReadBinlogFilesTimestampsResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ReadBinlogFilesTimestampsResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a ReadBinlogFilesTimestampsResponse message from the specified reader or buffer. + * @function decode + * @memberof mysqlctl.ReadBinlogFilesTimestampsResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {mysqlctl.ReadBinlogFilesTimestampsResponse} ReadBinlogFilesTimestampsResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ReadBinlogFilesTimestampsResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.mysqlctl.ReadBinlogFilesTimestampsResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.first_timestamp = $root.vttime.Time.decode(reader, reader.uint32()); + break; + } + case 2: { + message.first_timestamp_binlog = reader.string(); + break; + } + case 3: { + message.last_timestamp = $root.vttime.Time.decode(reader, reader.uint32()); + break; + } + case 4: { + message.last_timestamp_binlog = reader.string(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a ReadBinlogFilesTimestampsResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof mysqlctl.ReadBinlogFilesTimestampsResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {mysqlctl.ReadBinlogFilesTimestampsResponse} ReadBinlogFilesTimestampsResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ReadBinlogFilesTimestampsResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a ReadBinlogFilesTimestampsResponse message. + * @function verify + * @memberof mysqlctl.ReadBinlogFilesTimestampsResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ReadBinlogFilesTimestampsResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.first_timestamp != null && message.hasOwnProperty("first_timestamp")) { + let error = $root.vttime.Time.verify(message.first_timestamp); + if (error) + return "first_timestamp." + error; + } + if (message.first_timestamp_binlog != null && message.hasOwnProperty("first_timestamp_binlog")) + if (!$util.isString(message.first_timestamp_binlog)) + return "first_timestamp_binlog: string expected"; + if (message.last_timestamp != null && message.hasOwnProperty("last_timestamp")) { + let error = $root.vttime.Time.verify(message.last_timestamp); + if (error) + return "last_timestamp." + error; + } + if (message.last_timestamp_binlog != null && message.hasOwnProperty("last_timestamp_binlog")) + if (!$util.isString(message.last_timestamp_binlog)) + return "last_timestamp_binlog: string expected"; + return null; + }; + + /** + * Creates a ReadBinlogFilesTimestampsResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof mysqlctl.ReadBinlogFilesTimestampsResponse + * @static + * @param {Object.} object Plain object + * @returns {mysqlctl.ReadBinlogFilesTimestampsResponse} ReadBinlogFilesTimestampsResponse + */ + ReadBinlogFilesTimestampsResponse.fromObject = function fromObject(object) { + if (object instanceof $root.mysqlctl.ReadBinlogFilesTimestampsResponse) + return object; + let message = new $root.mysqlctl.ReadBinlogFilesTimestampsResponse(); + if (object.first_timestamp != null) { + if (typeof object.first_timestamp !== "object") + throw TypeError(".mysqlctl.ReadBinlogFilesTimestampsResponse.first_timestamp: object expected"); + message.first_timestamp = $root.vttime.Time.fromObject(object.first_timestamp); + } + if (object.first_timestamp_binlog != null) + message.first_timestamp_binlog = String(object.first_timestamp_binlog); + if (object.last_timestamp != null) { + if (typeof object.last_timestamp !== "object") + throw TypeError(".mysqlctl.ReadBinlogFilesTimestampsResponse.last_timestamp: object expected"); + message.last_timestamp = $root.vttime.Time.fromObject(object.last_timestamp); + } + if (object.last_timestamp_binlog != null) + message.last_timestamp_binlog = String(object.last_timestamp_binlog); + return message; + }; + + /** + * Creates a plain object from a ReadBinlogFilesTimestampsResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof mysqlctl.ReadBinlogFilesTimestampsResponse + * @static + * @param {mysqlctl.ReadBinlogFilesTimestampsResponse} message ReadBinlogFilesTimestampsResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ReadBinlogFilesTimestampsResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.first_timestamp = null; + object.first_timestamp_binlog = ""; + object.last_timestamp = null; + object.last_timestamp_binlog = ""; + } + if (message.first_timestamp != null && message.hasOwnProperty("first_timestamp")) + object.first_timestamp = $root.vttime.Time.toObject(message.first_timestamp, options); + if (message.first_timestamp_binlog != null && message.hasOwnProperty("first_timestamp_binlog")) + object.first_timestamp_binlog = message.first_timestamp_binlog; + if (message.last_timestamp != null && message.hasOwnProperty("last_timestamp")) + object.last_timestamp = $root.vttime.Time.toObject(message.last_timestamp, options); + if (message.last_timestamp_binlog != null && message.hasOwnProperty("last_timestamp_binlog")) + object.last_timestamp_binlog = message.last_timestamp_binlog; + return object; + }; + + /** + * Converts this ReadBinlogFilesTimestampsResponse to JSON. + * @function toJSON + * @memberof mysqlctl.ReadBinlogFilesTimestampsResponse + * @instance + * @returns {Object.} JSON object + */ + ReadBinlogFilesTimestampsResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ReadBinlogFilesTimestampsResponse + * @function getTypeUrl + * @memberof mysqlctl.ReadBinlogFilesTimestampsResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ReadBinlogFilesTimestampsResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/mysqlctl.ReadBinlogFilesTimestampsResponse"; + }; + + return ReadBinlogFilesTimestampsResponse; + })(); + mysqlctl.ReinitConfigRequest = (function() { /** @@ -31159,6 +31689,384 @@ export const mysqlctl = $root.mysqlctl = (() => { return RefreshConfigResponse; })(); + mysqlctl.VersionStringRequest = (function() { + + /** + * Properties of a VersionStringRequest. + * @memberof mysqlctl + * @interface IVersionStringRequest + */ + + /** + * Constructs a new VersionStringRequest. + * @memberof mysqlctl + * @classdesc Represents a VersionStringRequest. + * @implements IVersionStringRequest + * @constructor + * @param {mysqlctl.IVersionStringRequest=} [properties] Properties to set + */ + function VersionStringRequest(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * Creates a new VersionStringRequest instance using the specified properties. + * @function create + * @memberof mysqlctl.VersionStringRequest + * @static + * @param {mysqlctl.IVersionStringRequest=} [properties] Properties to set + * @returns {mysqlctl.VersionStringRequest} VersionStringRequest instance + */ + VersionStringRequest.create = function create(properties) { + return new VersionStringRequest(properties); + }; + + /** + * Encodes the specified VersionStringRequest message. Does not implicitly {@link mysqlctl.VersionStringRequest.verify|verify} messages. + * @function encode + * @memberof mysqlctl.VersionStringRequest + * @static + * @param {mysqlctl.IVersionStringRequest} message VersionStringRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + VersionStringRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + return writer; + }; + + /** + * Encodes the specified VersionStringRequest message, length delimited. Does not implicitly {@link mysqlctl.VersionStringRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof mysqlctl.VersionStringRequest + * @static + * @param {mysqlctl.IVersionStringRequest} message VersionStringRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + VersionStringRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a VersionStringRequest message from the specified reader or buffer. + * @function decode + * @memberof mysqlctl.VersionStringRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {mysqlctl.VersionStringRequest} VersionStringRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + VersionStringRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.mysqlctl.VersionStringRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a VersionStringRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof mysqlctl.VersionStringRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {mysqlctl.VersionStringRequest} VersionStringRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + VersionStringRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a VersionStringRequest message. + * @function verify + * @memberof mysqlctl.VersionStringRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + VersionStringRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + return null; + }; + + /** + * Creates a VersionStringRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof mysqlctl.VersionStringRequest + * @static + * @param {Object.} object Plain object + * @returns {mysqlctl.VersionStringRequest} VersionStringRequest + */ + VersionStringRequest.fromObject = function fromObject(object) { + if (object instanceof $root.mysqlctl.VersionStringRequest) + return object; + return new $root.mysqlctl.VersionStringRequest(); + }; + + /** + * Creates a plain object from a VersionStringRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof mysqlctl.VersionStringRequest + * @static + * @param {mysqlctl.VersionStringRequest} message VersionStringRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + VersionStringRequest.toObject = function toObject() { + return {}; + }; + + /** + * Converts this VersionStringRequest to JSON. + * @function toJSON + * @memberof mysqlctl.VersionStringRequest + * @instance + * @returns {Object.} JSON object + */ + VersionStringRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for VersionStringRequest + * @function getTypeUrl + * @memberof mysqlctl.VersionStringRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + VersionStringRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/mysqlctl.VersionStringRequest"; + }; + + return VersionStringRequest; + })(); + + mysqlctl.VersionStringResponse = (function() { + + /** + * Properties of a VersionStringResponse. + * @memberof mysqlctl + * @interface IVersionStringResponse + * @property {string|null} [version] VersionStringResponse version + */ + + /** + * Constructs a new VersionStringResponse. + * @memberof mysqlctl + * @classdesc Represents a VersionStringResponse. + * @implements IVersionStringResponse + * @constructor + * @param {mysqlctl.IVersionStringResponse=} [properties] Properties to set + */ + function VersionStringResponse(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * VersionStringResponse version. + * @member {string} version + * @memberof mysqlctl.VersionStringResponse + * @instance + */ + VersionStringResponse.prototype.version = ""; + + /** + * Creates a new VersionStringResponse instance using the specified properties. + * @function create + * @memberof mysqlctl.VersionStringResponse + * @static + * @param {mysqlctl.IVersionStringResponse=} [properties] Properties to set + * @returns {mysqlctl.VersionStringResponse} VersionStringResponse instance + */ + VersionStringResponse.create = function create(properties) { + return new VersionStringResponse(properties); + }; + + /** + * Encodes the specified VersionStringResponse message. Does not implicitly {@link mysqlctl.VersionStringResponse.verify|verify} messages. + * @function encode + * @memberof mysqlctl.VersionStringResponse + * @static + * @param {mysqlctl.IVersionStringResponse} message VersionStringResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + VersionStringResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.version != null && Object.hasOwnProperty.call(message, "version")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.version); + return writer; + }; + + /** + * Encodes the specified VersionStringResponse message, length delimited. Does not implicitly {@link mysqlctl.VersionStringResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof mysqlctl.VersionStringResponse + * @static + * @param {mysqlctl.IVersionStringResponse} message VersionStringResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + VersionStringResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a VersionStringResponse message from the specified reader or buffer. + * @function decode + * @memberof mysqlctl.VersionStringResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {mysqlctl.VersionStringResponse} VersionStringResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + VersionStringResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.mysqlctl.VersionStringResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.version = reader.string(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a VersionStringResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof mysqlctl.VersionStringResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {mysqlctl.VersionStringResponse} VersionStringResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + VersionStringResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a VersionStringResponse message. + * @function verify + * @memberof mysqlctl.VersionStringResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + VersionStringResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.version != null && message.hasOwnProperty("version")) + if (!$util.isString(message.version)) + return "version: string expected"; + return null; + }; + + /** + * Creates a VersionStringResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof mysqlctl.VersionStringResponse + * @static + * @param {Object.} object Plain object + * @returns {mysqlctl.VersionStringResponse} VersionStringResponse + */ + VersionStringResponse.fromObject = function fromObject(object) { + if (object instanceof $root.mysqlctl.VersionStringResponse) + return object; + let message = new $root.mysqlctl.VersionStringResponse(); + if (object.version != null) + message.version = String(object.version); + return message; + }; + + /** + * Creates a plain object from a VersionStringResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof mysqlctl.VersionStringResponse + * @static + * @param {mysqlctl.VersionStringResponse} message VersionStringResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + VersionStringResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) + object.version = ""; + if (message.version != null && message.hasOwnProperty("version")) + object.version = message.version; + return object; + }; + + /** + * Converts this VersionStringResponse to JSON. + * @function toJSON + * @memberof mysqlctl.VersionStringResponse + * @instance + * @returns {Object.} JSON object + */ + VersionStringResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for VersionStringResponse + * @function getTypeUrl + * @memberof mysqlctl.VersionStringResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + VersionStringResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/mysqlctl.VersionStringResponse"; + }; + + return VersionStringResponse; + })(); + mysqlctl.MysqlCtl = (function() { /** @@ -31323,6 +32231,39 @@ export const mysqlctl = $root.mysqlctl = (() => { * @variation 2 */ + /** + * Callback as used by {@link mysqlctl.MysqlCtl#readBinlogFilesTimestamps}. + * @memberof mysqlctl.MysqlCtl + * @typedef ReadBinlogFilesTimestampsCallback + * @type {function} + * @param {Error|null} error Error, if any + * @param {mysqlctl.ReadBinlogFilesTimestampsResponse} [response] ReadBinlogFilesTimestampsResponse + */ + + /** + * Calls ReadBinlogFilesTimestamps. + * @function readBinlogFilesTimestamps + * @memberof mysqlctl.MysqlCtl + * @instance + * @param {mysqlctl.IReadBinlogFilesTimestampsRequest} request ReadBinlogFilesTimestampsRequest message or plain object + * @param {mysqlctl.MysqlCtl.ReadBinlogFilesTimestampsCallback} callback Node-style callback called with the error, if any, and ReadBinlogFilesTimestampsResponse + * @returns {undefined} + * @variation 1 + */ + Object.defineProperty(MysqlCtl.prototype.readBinlogFilesTimestamps = function readBinlogFilesTimestamps(request, callback) { + return this.rpcCall(readBinlogFilesTimestamps, $root.mysqlctl.ReadBinlogFilesTimestampsRequest, $root.mysqlctl.ReadBinlogFilesTimestampsResponse, request, callback); + }, "name", { value: "ReadBinlogFilesTimestamps" }); + + /** + * Calls ReadBinlogFilesTimestamps. + * @function readBinlogFilesTimestamps + * @memberof mysqlctl.MysqlCtl + * @instance + * @param {mysqlctl.IReadBinlogFilesTimestampsRequest} request ReadBinlogFilesTimestampsRequest message or plain object + * @returns {Promise} Promise + * @variation 2 + */ + /** * Callback as used by {@link mysqlctl.MysqlCtl#reinitConfig}. * @memberof mysqlctl.MysqlCtl @@ -31389,6 +32330,39 @@ export const mysqlctl = $root.mysqlctl = (() => { * @variation 2 */ + /** + * Callback as used by {@link mysqlctl.MysqlCtl#versionString}. + * @memberof mysqlctl.MysqlCtl + * @typedef VersionStringCallback + * @type {function} + * @param {Error|null} error Error, if any + * @param {mysqlctl.VersionStringResponse} [response] VersionStringResponse + */ + + /** + * Calls VersionString. + * @function versionString + * @memberof mysqlctl.MysqlCtl + * @instance + * @param {mysqlctl.IVersionStringRequest} request VersionStringRequest message or plain object + * @param {mysqlctl.MysqlCtl.VersionStringCallback} callback Node-style callback called with the error, if any, and VersionStringResponse + * @returns {undefined} + * @variation 1 + */ + Object.defineProperty(MysqlCtl.prototype.versionString = function versionString(request, callback) { + return this.rpcCall(versionString, $root.mysqlctl.VersionStringRequest, $root.mysqlctl.VersionStringResponse, request, callback); + }, "name", { value: "VersionString" }); + + /** + * Calls VersionString. + * @function versionString + * @memberof mysqlctl.MysqlCtl + * @instance + * @param {mysqlctl.IVersionStringRequest} request VersionStringRequest message or plain object + * @returns {Promise} Promise + * @variation 2 + */ + return MysqlCtl; })(); @@ -35963,6 +36937,284 @@ export const topodata = $root.topodata = (() => { return ShardTabletControl; })(); + topodata.ThrottledAppRule = (function() { + + /** + * Properties of a ThrottledAppRule. + * @memberof topodata + * @interface IThrottledAppRule + * @property {string|null} [name] ThrottledAppRule name + * @property {number|null} [ratio] ThrottledAppRule ratio + * @property {vttime.ITime|null} [expires_at] ThrottledAppRule expires_at + * @property {boolean|null} [exempt] ThrottledAppRule exempt + */ + + /** + * Constructs a new ThrottledAppRule. + * @memberof topodata + * @classdesc Represents a ThrottledAppRule. + * @implements IThrottledAppRule + * @constructor + * @param {topodata.IThrottledAppRule=} [properties] Properties to set + */ + function ThrottledAppRule(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ThrottledAppRule name. + * @member {string} name + * @memberof topodata.ThrottledAppRule + * @instance + */ + ThrottledAppRule.prototype.name = ""; + + /** + * ThrottledAppRule ratio. + * @member {number} ratio + * @memberof topodata.ThrottledAppRule + * @instance + */ + ThrottledAppRule.prototype.ratio = 0; + + /** + * ThrottledAppRule expires_at. + * @member {vttime.ITime|null|undefined} expires_at + * @memberof topodata.ThrottledAppRule + * @instance + */ + ThrottledAppRule.prototype.expires_at = null; + + /** + * ThrottledAppRule exempt. + * @member {boolean} exempt + * @memberof topodata.ThrottledAppRule + * @instance + */ + ThrottledAppRule.prototype.exempt = false; + + /** + * Creates a new ThrottledAppRule instance using the specified properties. + * @function create + * @memberof topodata.ThrottledAppRule + * @static + * @param {topodata.IThrottledAppRule=} [properties] Properties to set + * @returns {topodata.ThrottledAppRule} ThrottledAppRule instance + */ + ThrottledAppRule.create = function create(properties) { + return new ThrottledAppRule(properties); + }; + + /** + * Encodes the specified ThrottledAppRule message. Does not implicitly {@link topodata.ThrottledAppRule.verify|verify} messages. + * @function encode + * @memberof topodata.ThrottledAppRule + * @static + * @param {topodata.IThrottledAppRule} message ThrottledAppRule message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ThrottledAppRule.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.name != null && Object.hasOwnProperty.call(message, "name")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); + if (message.ratio != null && Object.hasOwnProperty.call(message, "ratio")) + writer.uint32(/* id 2, wireType 1 =*/17).double(message.ratio); + if (message.expires_at != null && Object.hasOwnProperty.call(message, "expires_at")) + $root.vttime.Time.encode(message.expires_at, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.exempt != null && Object.hasOwnProperty.call(message, "exempt")) + writer.uint32(/* id 4, wireType 0 =*/32).bool(message.exempt); + return writer; + }; + + /** + * Encodes the specified ThrottledAppRule message, length delimited. Does not implicitly {@link topodata.ThrottledAppRule.verify|verify} messages. + * @function encodeDelimited + * @memberof topodata.ThrottledAppRule + * @static + * @param {topodata.IThrottledAppRule} message ThrottledAppRule message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ThrottledAppRule.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a ThrottledAppRule message from the specified reader or buffer. + * @function decode + * @memberof topodata.ThrottledAppRule + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {topodata.ThrottledAppRule} ThrottledAppRule + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ThrottledAppRule.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.topodata.ThrottledAppRule(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.name = reader.string(); + break; + } + case 2: { + message.ratio = reader.double(); + break; + } + case 3: { + message.expires_at = $root.vttime.Time.decode(reader, reader.uint32()); + break; + } + case 4: { + message.exempt = reader.bool(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a ThrottledAppRule message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof topodata.ThrottledAppRule + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {topodata.ThrottledAppRule} ThrottledAppRule + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ThrottledAppRule.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a ThrottledAppRule message. + * @function verify + * @memberof topodata.ThrottledAppRule + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ThrottledAppRule.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; + if (message.ratio != null && message.hasOwnProperty("ratio")) + if (typeof message.ratio !== "number") + return "ratio: number expected"; + if (message.expires_at != null && message.hasOwnProperty("expires_at")) { + let error = $root.vttime.Time.verify(message.expires_at); + if (error) + return "expires_at." + error; + } + if (message.exempt != null && message.hasOwnProperty("exempt")) + if (typeof message.exempt !== "boolean") + return "exempt: boolean expected"; + return null; + }; + + /** + * Creates a ThrottledAppRule message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof topodata.ThrottledAppRule + * @static + * @param {Object.} object Plain object + * @returns {topodata.ThrottledAppRule} ThrottledAppRule + */ + ThrottledAppRule.fromObject = function fromObject(object) { + if (object instanceof $root.topodata.ThrottledAppRule) + return object; + let message = new $root.topodata.ThrottledAppRule(); + if (object.name != null) + message.name = String(object.name); + if (object.ratio != null) + message.ratio = Number(object.ratio); + if (object.expires_at != null) { + if (typeof object.expires_at !== "object") + throw TypeError(".topodata.ThrottledAppRule.expires_at: object expected"); + message.expires_at = $root.vttime.Time.fromObject(object.expires_at); + } + if (object.exempt != null) + message.exempt = Boolean(object.exempt); + return message; + }; + + /** + * Creates a plain object from a ThrottledAppRule message. Also converts values to other types if specified. + * @function toObject + * @memberof topodata.ThrottledAppRule + * @static + * @param {topodata.ThrottledAppRule} message ThrottledAppRule + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ThrottledAppRule.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.name = ""; + object.ratio = 0; + object.expires_at = null; + object.exempt = false; + } + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; + if (message.ratio != null && message.hasOwnProperty("ratio")) + object.ratio = options.json && !isFinite(message.ratio) ? String(message.ratio) : message.ratio; + if (message.expires_at != null && message.hasOwnProperty("expires_at")) + object.expires_at = $root.vttime.Time.toObject(message.expires_at, options); + if (message.exempt != null && message.hasOwnProperty("exempt")) + object.exempt = message.exempt; + return object; + }; + + /** + * Converts this ThrottledAppRule to JSON. + * @function toJSON + * @memberof topodata.ThrottledAppRule + * @instance + * @returns {Object.} JSON object + */ + ThrottledAppRule.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ThrottledAppRule + * @function getTypeUrl + * @memberof topodata.ThrottledAppRule + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ThrottledAppRule.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/topodata.ThrottledAppRule"; + }; + + return ThrottledAppRule; + })(); + topodata.ThrottlerConfig = (function() { /** @@ -35973,6 +37225,7 @@ export const topodata = $root.topodata = (() => { * @property {number|null} [threshold] ThrottlerConfig threshold * @property {string|null} [custom_query] ThrottlerConfig custom_query * @property {boolean|null} [check_as_check_self] ThrottlerConfig check_as_check_self + * @property {Object.|null} [throttled_apps] ThrottlerConfig throttled_apps */ /** @@ -35984,6 +37237,7 @@ export const topodata = $root.topodata = (() => { * @param {topodata.IThrottlerConfig=} [properties] Properties to set */ function ThrottlerConfig(properties) { + this.throttled_apps = {}; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -36022,6 +37276,14 @@ export const topodata = $root.topodata = (() => { */ ThrottlerConfig.prototype.check_as_check_self = false; + /** + * ThrottlerConfig throttled_apps. + * @member {Object.} throttled_apps + * @memberof topodata.ThrottlerConfig + * @instance + */ + ThrottlerConfig.prototype.throttled_apps = $util.emptyObject; + /** * Creates a new ThrottlerConfig instance using the specified properties. * @function create @@ -36054,6 +37316,11 @@ export const topodata = $root.topodata = (() => { writer.uint32(/* id 3, wireType 2 =*/26).string(message.custom_query); if (message.check_as_check_self != null && Object.hasOwnProperty.call(message, "check_as_check_self")) writer.uint32(/* id 4, wireType 0 =*/32).bool(message.check_as_check_self); + if (message.throttled_apps != null && Object.hasOwnProperty.call(message, "throttled_apps")) + for (let keys = Object.keys(message.throttled_apps), i = 0; i < keys.length; ++i) { + writer.uint32(/* id 5, wireType 2 =*/42).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); + $root.topodata.ThrottledAppRule.encode(message.throttled_apps[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); + } return writer; }; @@ -36084,7 +37351,7 @@ export const topodata = $root.topodata = (() => { ThrottlerConfig.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.topodata.ThrottlerConfig(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.topodata.ThrottlerConfig(), key, value; while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -36104,6 +37371,29 @@ export const topodata = $root.topodata = (() => { message.check_as_check_self = reader.bool(); break; } + case 5: { + if (message.throttled_apps === $util.emptyObject) + message.throttled_apps = {}; + let end2 = reader.uint32() + reader.pos; + key = ""; + value = null; + while (reader.pos < end2) { + let tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = $root.topodata.ThrottledAppRule.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.throttled_apps[key] = value; + break; + } default: reader.skipType(tag & 7); break; @@ -36151,6 +37441,16 @@ export const topodata = $root.topodata = (() => { if (message.check_as_check_self != null && message.hasOwnProperty("check_as_check_self")) if (typeof message.check_as_check_self !== "boolean") return "check_as_check_self: boolean expected"; + if (message.throttled_apps != null && message.hasOwnProperty("throttled_apps")) { + if (!$util.isObject(message.throttled_apps)) + return "throttled_apps: object expected"; + let key = Object.keys(message.throttled_apps); + for (let i = 0; i < key.length; ++i) { + let error = $root.topodata.ThrottledAppRule.verify(message.throttled_apps[key[i]]); + if (error) + return "throttled_apps." + error; + } + } return null; }; @@ -36174,6 +37474,16 @@ export const topodata = $root.topodata = (() => { message.custom_query = String(object.custom_query); if (object.check_as_check_self != null) message.check_as_check_self = Boolean(object.check_as_check_self); + if (object.throttled_apps) { + if (typeof object.throttled_apps !== "object") + throw TypeError(".topodata.ThrottlerConfig.throttled_apps: object expected"); + message.throttled_apps = {}; + for (let keys = Object.keys(object.throttled_apps), i = 0; i < keys.length; ++i) { + if (typeof object.throttled_apps[keys[i]] !== "object") + throw TypeError(".topodata.ThrottlerConfig.throttled_apps: object expected"); + message.throttled_apps[keys[i]] = $root.topodata.ThrottledAppRule.fromObject(object.throttled_apps[keys[i]]); + } + } return message; }; @@ -36190,6 +37500,8 @@ export const topodata = $root.topodata = (() => { if (!options) options = {}; let object = {}; + if (options.objects || options.defaults) + object.throttled_apps = {}; if (options.defaults) { object.enabled = false; object.threshold = 0; @@ -36204,6 +37516,12 @@ export const topodata = $root.topodata = (() => { object.custom_query = message.custom_query; if (message.check_as_check_self != null && message.hasOwnProperty("check_as_check_self")) object.check_as_check_self = message.check_as_check_self; + let keys2; + if (message.throttled_apps && (keys2 = Object.keys(message.throttled_apps)).length) { + object.throttled_apps = {}; + for (let j = 0; j < keys2.length; ++j) + object.throttled_apps[keys2[j]] = $root.topodata.ThrottledAppRule.toObject(message.throttled_apps[keys2[j]], options); + } return object; }; @@ -38322,6 +39640,22 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { */ const tabletmanagerdata = {}; + /** + * TabletSelectionPreference enum. + * @name tabletmanagerdata.TabletSelectionPreference + * @enum {number} + * @property {number} ANY=0 ANY value + * @property {number} INORDER=1 INORDER value + * @property {number} UNKNOWN=3 UNKNOWN value + */ + tabletmanagerdata.TabletSelectionPreference = (function() { + const valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "ANY"] = 0; + values[valuesById[1] = "INORDER"] = 1; + values[valuesById[3] = "UNKNOWN"] = 3; + return values; + })(); + tabletmanagerdata.TableDefinition = (function() { /** @@ -45117,6 +46451,7 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { * @property {tabletmanagerdata.ISchemaDefinition|null} [before_schema] ApplySchemaRequest before_schema * @property {tabletmanagerdata.ISchemaDefinition|null} [after_schema] ApplySchemaRequest after_schema * @property {string|null} [sql_mode] ApplySchemaRequest sql_mode + * @property {number|Long|null} [batch_size] ApplySchemaRequest batch_size */ /** @@ -45182,6 +46517,14 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { */ ApplySchemaRequest.prototype.sql_mode = ""; + /** + * ApplySchemaRequest batch_size. + * @member {number|Long} batch_size + * @memberof tabletmanagerdata.ApplySchemaRequest + * @instance + */ + ApplySchemaRequest.prototype.batch_size = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + /** * Creates a new ApplySchemaRequest instance using the specified properties. * @function create @@ -45218,6 +46561,8 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { $root.tabletmanagerdata.SchemaDefinition.encode(message.after_schema, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); if (message.sql_mode != null && Object.hasOwnProperty.call(message, "sql_mode")) writer.uint32(/* id 6, wireType 2 =*/50).string(message.sql_mode); + if (message.batch_size != null && Object.hasOwnProperty.call(message, "batch_size")) + writer.uint32(/* id 7, wireType 0 =*/56).int64(message.batch_size); return writer; }; @@ -45276,6 +46621,10 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { message.sql_mode = reader.string(); break; } + case 7: { + message.batch_size = reader.int64(); + break; + } default: reader.skipType(tag & 7); break; @@ -45333,6 +46682,9 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { if (message.sql_mode != null && message.hasOwnProperty("sql_mode")) if (!$util.isString(message.sql_mode)) return "sql_mode: string expected"; + if (message.batch_size != null && message.hasOwnProperty("batch_size")) + if (!$util.isInteger(message.batch_size) && !(message.batch_size && $util.isInteger(message.batch_size.low) && $util.isInteger(message.batch_size.high))) + return "batch_size: integer|Long expected"; return null; }; @@ -45366,6 +46718,15 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { } if (object.sql_mode != null) message.sql_mode = String(object.sql_mode); + if (object.batch_size != null) + if ($util.Long) + (message.batch_size = $util.Long.fromValue(object.batch_size)).unsigned = false; + else if (typeof object.batch_size === "string") + message.batch_size = parseInt(object.batch_size, 10); + else if (typeof object.batch_size === "number") + message.batch_size = object.batch_size; + else if (typeof object.batch_size === "object") + message.batch_size = new $util.LongBits(object.batch_size.low >>> 0, object.batch_size.high >>> 0).toNumber(); return message; }; @@ -45389,6 +46750,11 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { object.before_schema = null; object.after_schema = null; object.sql_mode = ""; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.batch_size = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.batch_size = options.longs === String ? "0" : 0; } if (message.sql != null && message.hasOwnProperty("sql")) object.sql = message.sql; @@ -45402,6 +46768,11 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { object.after_schema = $root.tabletmanagerdata.SchemaDefinition.toObject(message.after_schema, options); if (message.sql_mode != null && message.hasOwnProperty("sql_mode")) object.sql_mode = message.sql_mode; + if (message.batch_size != null && message.hasOwnProperty("batch_size")) + if (typeof message.batch_size === "number") + object.batch_size = options.longs === String ? String(message.batch_size) : message.batch_size; + else + object.batch_size = options.longs === String ? $util.Long.prototype.toString.call(message.batch_size) : options.longs === Number ? new $util.LongBits(message.batch_size.low >>> 0, message.batch_size.high >>> 0).toNumber() : message.batch_size; return object; }; @@ -57938,6 +59309,7 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { * @property {number|Long|null} [concurrency] BackupRequest concurrency * @property {boolean|null} [allow_primary] BackupRequest allow_primary * @property {string|null} [incremental_from_pos] BackupRequest incremental_from_pos + * @property {boolean|null} [upgrade_safe] BackupRequest upgrade_safe */ /** @@ -57979,6 +59351,14 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { */ BackupRequest.prototype.incremental_from_pos = ""; + /** + * BackupRequest upgrade_safe. + * @member {boolean} upgrade_safe + * @memberof tabletmanagerdata.BackupRequest + * @instance + */ + BackupRequest.prototype.upgrade_safe = false; + /** * Creates a new BackupRequest instance using the specified properties. * @function create @@ -58009,6 +59389,8 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { writer.uint32(/* id 2, wireType 0 =*/16).bool(message.allow_primary); if (message.incremental_from_pos != null && Object.hasOwnProperty.call(message, "incremental_from_pos")) writer.uint32(/* id 3, wireType 2 =*/26).string(message.incremental_from_pos); + if (message.upgrade_safe != null && Object.hasOwnProperty.call(message, "upgrade_safe")) + writer.uint32(/* id 4, wireType 0 =*/32).bool(message.upgrade_safe); return writer; }; @@ -58055,6 +59437,10 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { message.incremental_from_pos = reader.string(); break; } + case 4: { + message.upgrade_safe = reader.bool(); + break; + } default: reader.skipType(tag & 7); break; @@ -58099,6 +59485,9 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { if (message.incremental_from_pos != null && message.hasOwnProperty("incremental_from_pos")) if (!$util.isString(message.incremental_from_pos)) return "incremental_from_pos: string expected"; + if (message.upgrade_safe != null && message.hasOwnProperty("upgrade_safe")) + if (typeof message.upgrade_safe !== "boolean") + return "upgrade_safe: boolean expected"; return null; }; @@ -58127,6 +59516,8 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { message.allow_primary = Boolean(object.allow_primary); if (object.incremental_from_pos != null) message.incremental_from_pos = String(object.incremental_from_pos); + if (object.upgrade_safe != null) + message.upgrade_safe = Boolean(object.upgrade_safe); return message; }; @@ -58151,6 +59542,7 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { object.concurrency = options.longs === String ? "0" : 0; object.allow_primary = false; object.incremental_from_pos = ""; + object.upgrade_safe = false; } if (message.concurrency != null && message.hasOwnProperty("concurrency")) if (typeof message.concurrency === "number") @@ -58161,6 +59553,8 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { object.allow_primary = message.allow_primary; if (message.incremental_from_pos != null && message.hasOwnProperty("incremental_from_pos")) object.incremental_from_pos = message.incremental_from_pos; + if (message.upgrade_safe != null && message.hasOwnProperty("upgrade_safe")) + object.upgrade_safe = message.upgrade_safe; return object; }; @@ -58410,6 +59804,7 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { * @property {vttime.ITime|null} [backup_time] RestoreFromBackupRequest backup_time * @property {string|null} [restore_to_pos] RestoreFromBackupRequest restore_to_pos * @property {boolean|null} [dry_run] RestoreFromBackupRequest dry_run + * @property {vttime.ITime|null} [restore_to_timestamp] RestoreFromBackupRequest restore_to_timestamp */ /** @@ -58451,6 +59846,14 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { */ RestoreFromBackupRequest.prototype.dry_run = false; + /** + * RestoreFromBackupRequest restore_to_timestamp. + * @member {vttime.ITime|null|undefined} restore_to_timestamp + * @memberof tabletmanagerdata.RestoreFromBackupRequest + * @instance + */ + RestoreFromBackupRequest.prototype.restore_to_timestamp = null; + /** * Creates a new RestoreFromBackupRequest instance using the specified properties. * @function create @@ -58481,6 +59884,8 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { writer.uint32(/* id 2, wireType 2 =*/18).string(message.restore_to_pos); if (message.dry_run != null && Object.hasOwnProperty.call(message, "dry_run")) writer.uint32(/* id 3, wireType 0 =*/24).bool(message.dry_run); + if (message.restore_to_timestamp != null && Object.hasOwnProperty.call(message, "restore_to_timestamp")) + $root.vttime.Time.encode(message.restore_to_timestamp, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); return writer; }; @@ -58527,6 +59932,10 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { message.dry_run = reader.bool(); break; } + case 4: { + message.restore_to_timestamp = $root.vttime.Time.decode(reader, reader.uint32()); + break; + } default: reader.skipType(tag & 7); break; @@ -58573,6 +59982,11 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { if (message.dry_run != null && message.hasOwnProperty("dry_run")) if (typeof message.dry_run !== "boolean") return "dry_run: boolean expected"; + if (message.restore_to_timestamp != null && message.hasOwnProperty("restore_to_timestamp")) { + let error = $root.vttime.Time.verify(message.restore_to_timestamp); + if (error) + return "restore_to_timestamp." + error; + } return null; }; @@ -58597,6 +60011,11 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { message.restore_to_pos = String(object.restore_to_pos); if (object.dry_run != null) message.dry_run = Boolean(object.dry_run); + if (object.restore_to_timestamp != null) { + if (typeof object.restore_to_timestamp !== "object") + throw TypeError(".tabletmanagerdata.RestoreFromBackupRequest.restore_to_timestamp: object expected"); + message.restore_to_timestamp = $root.vttime.Time.fromObject(object.restore_to_timestamp); + } return message; }; @@ -58617,6 +60036,7 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { object.backup_time = null; object.restore_to_pos = ""; object.dry_run = false; + object.restore_to_timestamp = null; } if (message.backup_time != null && message.hasOwnProperty("backup_time")) object.backup_time = $root.vttime.Time.toObject(message.backup_time, options); @@ -58624,6 +60044,8 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { object.restore_to_pos = message.restore_to_pos; if (message.dry_run != null && message.hasOwnProperty("dry_run")) object.dry_run = message.dry_run; + if (message.restore_to_timestamp != null && message.hasOwnProperty("restore_to_timestamp")) + object.restore_to_timestamp = $root.vttime.Time.toObject(message.restore_to_timestamp, options); return object; }; @@ -58864,29 +60286,36 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { return RestoreFromBackupResponse; })(); - tabletmanagerdata.VDiffRequest = (function() { + tabletmanagerdata.CreateVReplicationWorkflowRequest = (function() { /** - * Properties of a VDiffRequest. + * Properties of a CreateVReplicationWorkflowRequest. * @memberof tabletmanagerdata - * @interface IVDiffRequest - * @property {string|null} [keyspace] VDiffRequest keyspace - * @property {string|null} [workflow] VDiffRequest workflow - * @property {string|null} [action] VDiffRequest action - * @property {string|null} [action_arg] VDiffRequest action_arg - * @property {string|null} [vdiff_uuid] VDiffRequest vdiff_uuid - * @property {tabletmanagerdata.IVDiffOptions|null} [options] VDiffRequest options + * @interface ICreateVReplicationWorkflowRequest + * @property {string|null} [workflow] CreateVReplicationWorkflowRequest workflow + * @property {Array.|null} [binlog_source] CreateVReplicationWorkflowRequest binlog_source + * @property {Array.|null} [cells] CreateVReplicationWorkflowRequest cells + * @property {Array.|null} [tablet_types] CreateVReplicationWorkflowRequest tablet_types + * @property {tabletmanagerdata.TabletSelectionPreference|null} [tablet_selection_preference] CreateVReplicationWorkflowRequest tablet_selection_preference + * @property {binlogdata.VReplicationWorkflowType|null} [workflow_type] CreateVReplicationWorkflowRequest workflow_type + * @property {binlogdata.VReplicationWorkflowSubType|null} [workflow_sub_type] CreateVReplicationWorkflowRequest workflow_sub_type + * @property {boolean|null} [defer_secondary_keys] CreateVReplicationWorkflowRequest defer_secondary_keys + * @property {boolean|null} [auto_start] CreateVReplicationWorkflowRequest auto_start + * @property {boolean|null} [stop_after_copy] CreateVReplicationWorkflowRequest stop_after_copy */ /** - * Constructs a new VDiffRequest. + * Constructs a new CreateVReplicationWorkflowRequest. * @memberof tabletmanagerdata - * @classdesc Represents a VDiffRequest. - * @implements IVDiffRequest + * @classdesc Represents a CreateVReplicationWorkflowRequest. + * @implements ICreateVReplicationWorkflowRequest * @constructor - * @param {tabletmanagerdata.IVDiffRequest=} [properties] Properties to set + * @param {tabletmanagerdata.ICreateVReplicationWorkflowRequest=} [properties] Properties to set */ - function VDiffRequest(properties) { + function CreateVReplicationWorkflowRequest(properties) { + this.binlog_source = []; + this.cells = []; + this.tablet_types = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -58894,145 +60323,218 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { } /** - * VDiffRequest keyspace. - * @member {string} keyspace - * @memberof tabletmanagerdata.VDiffRequest + * CreateVReplicationWorkflowRequest workflow. + * @member {string} workflow + * @memberof tabletmanagerdata.CreateVReplicationWorkflowRequest * @instance */ - VDiffRequest.prototype.keyspace = ""; + CreateVReplicationWorkflowRequest.prototype.workflow = ""; /** - * VDiffRequest workflow. - * @member {string} workflow - * @memberof tabletmanagerdata.VDiffRequest + * CreateVReplicationWorkflowRequest binlog_source. + * @member {Array.} binlog_source + * @memberof tabletmanagerdata.CreateVReplicationWorkflowRequest * @instance */ - VDiffRequest.prototype.workflow = ""; + CreateVReplicationWorkflowRequest.prototype.binlog_source = $util.emptyArray; /** - * VDiffRequest action. - * @member {string} action - * @memberof tabletmanagerdata.VDiffRequest + * CreateVReplicationWorkflowRequest cells. + * @member {Array.} cells + * @memberof tabletmanagerdata.CreateVReplicationWorkflowRequest * @instance */ - VDiffRequest.prototype.action = ""; + CreateVReplicationWorkflowRequest.prototype.cells = $util.emptyArray; /** - * VDiffRequest action_arg. - * @member {string} action_arg - * @memberof tabletmanagerdata.VDiffRequest + * CreateVReplicationWorkflowRequest tablet_types. + * @member {Array.} tablet_types + * @memberof tabletmanagerdata.CreateVReplicationWorkflowRequest * @instance */ - VDiffRequest.prototype.action_arg = ""; + CreateVReplicationWorkflowRequest.prototype.tablet_types = $util.emptyArray; /** - * VDiffRequest vdiff_uuid. - * @member {string} vdiff_uuid - * @memberof tabletmanagerdata.VDiffRequest + * CreateVReplicationWorkflowRequest tablet_selection_preference. + * @member {tabletmanagerdata.TabletSelectionPreference} tablet_selection_preference + * @memberof tabletmanagerdata.CreateVReplicationWorkflowRequest * @instance */ - VDiffRequest.prototype.vdiff_uuid = ""; + CreateVReplicationWorkflowRequest.prototype.tablet_selection_preference = 0; /** - * VDiffRequest options. - * @member {tabletmanagerdata.IVDiffOptions|null|undefined} options - * @memberof tabletmanagerdata.VDiffRequest + * CreateVReplicationWorkflowRequest workflow_type. + * @member {binlogdata.VReplicationWorkflowType} workflow_type + * @memberof tabletmanagerdata.CreateVReplicationWorkflowRequest * @instance */ - VDiffRequest.prototype.options = null; + CreateVReplicationWorkflowRequest.prototype.workflow_type = 0; /** - * Creates a new VDiffRequest instance using the specified properties. + * CreateVReplicationWorkflowRequest workflow_sub_type. + * @member {binlogdata.VReplicationWorkflowSubType} workflow_sub_type + * @memberof tabletmanagerdata.CreateVReplicationWorkflowRequest + * @instance + */ + CreateVReplicationWorkflowRequest.prototype.workflow_sub_type = 0; + + /** + * CreateVReplicationWorkflowRequest defer_secondary_keys. + * @member {boolean} defer_secondary_keys + * @memberof tabletmanagerdata.CreateVReplicationWorkflowRequest + * @instance + */ + CreateVReplicationWorkflowRequest.prototype.defer_secondary_keys = false; + + /** + * CreateVReplicationWorkflowRequest auto_start. + * @member {boolean} auto_start + * @memberof tabletmanagerdata.CreateVReplicationWorkflowRequest + * @instance + */ + CreateVReplicationWorkflowRequest.prototype.auto_start = false; + + /** + * CreateVReplicationWorkflowRequest stop_after_copy. + * @member {boolean} stop_after_copy + * @memberof tabletmanagerdata.CreateVReplicationWorkflowRequest + * @instance + */ + CreateVReplicationWorkflowRequest.prototype.stop_after_copy = false; + + /** + * Creates a new CreateVReplicationWorkflowRequest instance using the specified properties. * @function create - * @memberof tabletmanagerdata.VDiffRequest + * @memberof tabletmanagerdata.CreateVReplicationWorkflowRequest * @static - * @param {tabletmanagerdata.IVDiffRequest=} [properties] Properties to set - * @returns {tabletmanagerdata.VDiffRequest} VDiffRequest instance + * @param {tabletmanagerdata.ICreateVReplicationWorkflowRequest=} [properties] Properties to set + * @returns {tabletmanagerdata.CreateVReplicationWorkflowRequest} CreateVReplicationWorkflowRequest instance */ - VDiffRequest.create = function create(properties) { - return new VDiffRequest(properties); + CreateVReplicationWorkflowRequest.create = function create(properties) { + return new CreateVReplicationWorkflowRequest(properties); }; /** - * Encodes the specified VDiffRequest message. Does not implicitly {@link tabletmanagerdata.VDiffRequest.verify|verify} messages. + * Encodes the specified CreateVReplicationWorkflowRequest message. Does not implicitly {@link tabletmanagerdata.CreateVReplicationWorkflowRequest.verify|verify} messages. * @function encode - * @memberof tabletmanagerdata.VDiffRequest + * @memberof tabletmanagerdata.CreateVReplicationWorkflowRequest * @static - * @param {tabletmanagerdata.IVDiffRequest} message VDiffRequest message or plain object to encode + * @param {tabletmanagerdata.ICreateVReplicationWorkflowRequest} message CreateVReplicationWorkflowRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - VDiffRequest.encode = function encode(message, writer) { + CreateVReplicationWorkflowRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); if (message.workflow != null && Object.hasOwnProperty.call(message, "workflow")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.workflow); - if (message.action != null && Object.hasOwnProperty.call(message, "action")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.action); - if (message.action_arg != null && Object.hasOwnProperty.call(message, "action_arg")) - writer.uint32(/* id 4, wireType 2 =*/34).string(message.action_arg); - if (message.vdiff_uuid != null && Object.hasOwnProperty.call(message, "vdiff_uuid")) - writer.uint32(/* id 5, wireType 2 =*/42).string(message.vdiff_uuid); - if (message.options != null && Object.hasOwnProperty.call(message, "options")) - $root.tabletmanagerdata.VDiffOptions.encode(message.options, writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); + writer.uint32(/* id 1, wireType 2 =*/10).string(message.workflow); + if (message.binlog_source != null && message.binlog_source.length) + for (let i = 0; i < message.binlog_source.length; ++i) + $root.binlogdata.BinlogSource.encode(message.binlog_source[i], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.cells != null && message.cells.length) + for (let i = 0; i < message.cells.length; ++i) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.cells[i]); + if (message.tablet_types != null && message.tablet_types.length) { + writer.uint32(/* id 4, wireType 2 =*/34).fork(); + for (let i = 0; i < message.tablet_types.length; ++i) + writer.int32(message.tablet_types[i]); + writer.ldelim(); + } + if (message.tablet_selection_preference != null && Object.hasOwnProperty.call(message, "tablet_selection_preference")) + writer.uint32(/* id 5, wireType 0 =*/40).int32(message.tablet_selection_preference); + if (message.workflow_type != null && Object.hasOwnProperty.call(message, "workflow_type")) + writer.uint32(/* id 6, wireType 0 =*/48).int32(message.workflow_type); + if (message.workflow_sub_type != null && Object.hasOwnProperty.call(message, "workflow_sub_type")) + writer.uint32(/* id 7, wireType 0 =*/56).int32(message.workflow_sub_type); + if (message.defer_secondary_keys != null && Object.hasOwnProperty.call(message, "defer_secondary_keys")) + writer.uint32(/* id 8, wireType 0 =*/64).bool(message.defer_secondary_keys); + if (message.auto_start != null && Object.hasOwnProperty.call(message, "auto_start")) + writer.uint32(/* id 9, wireType 0 =*/72).bool(message.auto_start); + if (message.stop_after_copy != null && Object.hasOwnProperty.call(message, "stop_after_copy")) + writer.uint32(/* id 10, wireType 0 =*/80).bool(message.stop_after_copy); return writer; }; /** - * Encodes the specified VDiffRequest message, length delimited. Does not implicitly {@link tabletmanagerdata.VDiffRequest.verify|verify} messages. + * Encodes the specified CreateVReplicationWorkflowRequest message, length delimited. Does not implicitly {@link tabletmanagerdata.CreateVReplicationWorkflowRequest.verify|verify} messages. * @function encodeDelimited - * @memberof tabletmanagerdata.VDiffRequest + * @memberof tabletmanagerdata.CreateVReplicationWorkflowRequest * @static - * @param {tabletmanagerdata.IVDiffRequest} message VDiffRequest message or plain object to encode + * @param {tabletmanagerdata.ICreateVReplicationWorkflowRequest} message CreateVReplicationWorkflowRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - VDiffRequest.encodeDelimited = function encodeDelimited(message, writer) { + CreateVReplicationWorkflowRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a VDiffRequest message from the specified reader or buffer. + * Decodes a CreateVReplicationWorkflowRequest message from the specified reader or buffer. * @function decode - * @memberof tabletmanagerdata.VDiffRequest + * @memberof tabletmanagerdata.CreateVReplicationWorkflowRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {tabletmanagerdata.VDiffRequest} VDiffRequest + * @returns {tabletmanagerdata.CreateVReplicationWorkflowRequest} CreateVReplicationWorkflowRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - VDiffRequest.decode = function decode(reader, length) { + CreateVReplicationWorkflowRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.tabletmanagerdata.VDiffRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.tabletmanagerdata.CreateVReplicationWorkflowRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.keyspace = reader.string(); + message.workflow = reader.string(); break; } case 2: { - message.workflow = reader.string(); + if (!(message.binlog_source && message.binlog_source.length)) + message.binlog_source = []; + message.binlog_source.push($root.binlogdata.BinlogSource.decode(reader, reader.uint32())); break; } case 3: { - message.action = reader.string(); + if (!(message.cells && message.cells.length)) + message.cells = []; + message.cells.push(reader.string()); break; } case 4: { - message.action_arg = reader.string(); + if (!(message.tablet_types && message.tablet_types.length)) + message.tablet_types = []; + if ((tag & 7) === 2) { + let end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.tablet_types.push(reader.int32()); + } else + message.tablet_types.push(reader.int32()); break; } case 5: { - message.vdiff_uuid = reader.string(); + message.tablet_selection_preference = reader.int32(); break; } case 6: { - message.options = $root.tabletmanagerdata.VDiffOptions.decode(reader, reader.uint32()); + message.workflow_type = reader.int32(); + break; + } + case 7: { + message.workflow_sub_type = reader.int32(); + break; + } + case 8: { + message.defer_secondary_keys = reader.bool(); + break; + } + case 9: { + message.auto_start = reader.bool(); + break; + } + case 10: { + message.stop_after_copy = reader.bool(); break; } default: @@ -59044,170 +60546,389 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { }; /** - * Decodes a VDiffRequest message from the specified reader or buffer, length delimited. + * Decodes a CreateVReplicationWorkflowRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof tabletmanagerdata.VDiffRequest + * @memberof tabletmanagerdata.CreateVReplicationWorkflowRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {tabletmanagerdata.VDiffRequest} VDiffRequest + * @returns {tabletmanagerdata.CreateVReplicationWorkflowRequest} CreateVReplicationWorkflowRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - VDiffRequest.decodeDelimited = function decodeDelimited(reader) { + CreateVReplicationWorkflowRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a VDiffRequest message. + * Verifies a CreateVReplicationWorkflowRequest message. * @function verify - * @memberof tabletmanagerdata.VDiffRequest + * @memberof tabletmanagerdata.CreateVReplicationWorkflowRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - VDiffRequest.verify = function verify(message) { + CreateVReplicationWorkflowRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; if (message.workflow != null && message.hasOwnProperty("workflow")) if (!$util.isString(message.workflow)) return "workflow: string expected"; - if (message.action != null && message.hasOwnProperty("action")) - if (!$util.isString(message.action)) - return "action: string expected"; - if (message.action_arg != null && message.hasOwnProperty("action_arg")) - if (!$util.isString(message.action_arg)) - return "action_arg: string expected"; - if (message.vdiff_uuid != null && message.hasOwnProperty("vdiff_uuid")) - if (!$util.isString(message.vdiff_uuid)) - return "vdiff_uuid: string expected"; - if (message.options != null && message.hasOwnProperty("options")) { - let error = $root.tabletmanagerdata.VDiffOptions.verify(message.options); - if (error) - return "options." + error; + if (message.binlog_source != null && message.hasOwnProperty("binlog_source")) { + if (!Array.isArray(message.binlog_source)) + return "binlog_source: array expected"; + for (let i = 0; i < message.binlog_source.length; ++i) { + let error = $root.binlogdata.BinlogSource.verify(message.binlog_source[i]); + if (error) + return "binlog_source." + error; + } + } + if (message.cells != null && message.hasOwnProperty("cells")) { + if (!Array.isArray(message.cells)) + return "cells: array expected"; + for (let i = 0; i < message.cells.length; ++i) + if (!$util.isString(message.cells[i])) + return "cells: string[] expected"; + } + if (message.tablet_types != null && message.hasOwnProperty("tablet_types")) { + if (!Array.isArray(message.tablet_types)) + return "tablet_types: array expected"; + for (let i = 0; i < message.tablet_types.length; ++i) + switch (message.tablet_types[i]) { + default: + return "tablet_types: enum value[] expected"; + case 0: + case 1: + case 1: + case 2: + case 3: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + break; + } } + if (message.tablet_selection_preference != null && message.hasOwnProperty("tablet_selection_preference")) + switch (message.tablet_selection_preference) { + default: + return "tablet_selection_preference: enum value expected"; + case 0: + case 1: + case 3: + break; + } + if (message.workflow_type != null && message.hasOwnProperty("workflow_type")) + switch (message.workflow_type) { + default: + return "workflow_type: enum value expected"; + case 0: + case 1: + case 2: + case 3: + case 4: + case 5: + break; + } + if (message.workflow_sub_type != null && message.hasOwnProperty("workflow_sub_type")) + switch (message.workflow_sub_type) { + default: + return "workflow_sub_type: enum value expected"; + case 0: + case 1: + case 2: + break; + } + if (message.defer_secondary_keys != null && message.hasOwnProperty("defer_secondary_keys")) + if (typeof message.defer_secondary_keys !== "boolean") + return "defer_secondary_keys: boolean expected"; + if (message.auto_start != null && message.hasOwnProperty("auto_start")) + if (typeof message.auto_start !== "boolean") + return "auto_start: boolean expected"; + if (message.stop_after_copy != null && message.hasOwnProperty("stop_after_copy")) + if (typeof message.stop_after_copy !== "boolean") + return "stop_after_copy: boolean expected"; return null; }; /** - * Creates a VDiffRequest message from a plain object. Also converts values to their respective internal types. + * Creates a CreateVReplicationWorkflowRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof tabletmanagerdata.VDiffRequest + * @memberof tabletmanagerdata.CreateVReplicationWorkflowRequest * @static * @param {Object.} object Plain object - * @returns {tabletmanagerdata.VDiffRequest} VDiffRequest + * @returns {tabletmanagerdata.CreateVReplicationWorkflowRequest} CreateVReplicationWorkflowRequest */ - VDiffRequest.fromObject = function fromObject(object) { - if (object instanceof $root.tabletmanagerdata.VDiffRequest) + CreateVReplicationWorkflowRequest.fromObject = function fromObject(object) { + if (object instanceof $root.tabletmanagerdata.CreateVReplicationWorkflowRequest) return object; - let message = new $root.tabletmanagerdata.VDiffRequest(); - if (object.keyspace != null) - message.keyspace = String(object.keyspace); + let message = new $root.tabletmanagerdata.CreateVReplicationWorkflowRequest(); if (object.workflow != null) message.workflow = String(object.workflow); - if (object.action != null) - message.action = String(object.action); - if (object.action_arg != null) - message.action_arg = String(object.action_arg); - if (object.vdiff_uuid != null) - message.vdiff_uuid = String(object.vdiff_uuid); - if (object.options != null) { - if (typeof object.options !== "object") - throw TypeError(".tabletmanagerdata.VDiffRequest.options: object expected"); - message.options = $root.tabletmanagerdata.VDiffOptions.fromObject(object.options); + if (object.binlog_source) { + if (!Array.isArray(object.binlog_source)) + throw TypeError(".tabletmanagerdata.CreateVReplicationWorkflowRequest.binlog_source: array expected"); + message.binlog_source = []; + for (let i = 0; i < object.binlog_source.length; ++i) { + if (typeof object.binlog_source[i] !== "object") + throw TypeError(".tabletmanagerdata.CreateVReplicationWorkflowRequest.binlog_source: object expected"); + message.binlog_source[i] = $root.binlogdata.BinlogSource.fromObject(object.binlog_source[i]); + } } - return message; - }; - - /** - * Creates a plain object from a VDiffRequest message. Also converts values to other types if specified. - * @function toObject - * @memberof tabletmanagerdata.VDiffRequest - * @static - * @param {tabletmanagerdata.VDiffRequest} message VDiffRequest - * @param {$protobuf.IConversionOptions} [options] Conversion options - * @returns {Object.} Plain object - */ - VDiffRequest.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) { - object.keyspace = ""; - object.workflow = ""; - object.action = ""; - object.action_arg = ""; - object.vdiff_uuid = ""; - object.options = null; + if (object.cells) { + if (!Array.isArray(object.cells)) + throw TypeError(".tabletmanagerdata.CreateVReplicationWorkflowRequest.cells: array expected"); + message.cells = []; + for (let i = 0; i < object.cells.length; ++i) + message.cells[i] = String(object.cells[i]); } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.workflow != null && message.hasOwnProperty("workflow")) - object.workflow = message.workflow; - if (message.action != null && message.hasOwnProperty("action")) - object.action = message.action; - if (message.action_arg != null && message.hasOwnProperty("action_arg")) - object.action_arg = message.action_arg; - if (message.vdiff_uuid != null && message.hasOwnProperty("vdiff_uuid")) - object.vdiff_uuid = message.vdiff_uuid; - if (message.options != null && message.hasOwnProperty("options")) - object.options = $root.tabletmanagerdata.VDiffOptions.toObject(message.options, options); + if (object.tablet_types) { + if (!Array.isArray(object.tablet_types)) + throw TypeError(".tabletmanagerdata.CreateVReplicationWorkflowRequest.tablet_types: array expected"); + message.tablet_types = []; + for (let i = 0; i < object.tablet_types.length; ++i) + switch (object.tablet_types[i]) { + default: + if (typeof object.tablet_types[i] === "number") { + message.tablet_types[i] = object.tablet_types[i]; + break; + } + case "UNKNOWN": + case 0: + message.tablet_types[i] = 0; + break; + case "PRIMARY": + case 1: + message.tablet_types[i] = 1; + break; + case "MASTER": + case 1: + message.tablet_types[i] = 1; + break; + case "REPLICA": + case 2: + message.tablet_types[i] = 2; + break; + case "RDONLY": + case 3: + message.tablet_types[i] = 3; + break; + case "BATCH": + case 3: + message.tablet_types[i] = 3; + break; + case "SPARE": + case 4: + message.tablet_types[i] = 4; + break; + case "EXPERIMENTAL": + case 5: + message.tablet_types[i] = 5; + break; + case "BACKUP": + case 6: + message.tablet_types[i] = 6; + break; + case "RESTORE": + case 7: + message.tablet_types[i] = 7; + break; + case "DRAINED": + case 8: + message.tablet_types[i] = 8; + break; + } + } + switch (object.tablet_selection_preference) { + default: + if (typeof object.tablet_selection_preference === "number") { + message.tablet_selection_preference = object.tablet_selection_preference; + break; + } + break; + case "ANY": + case 0: + message.tablet_selection_preference = 0; + break; + case "INORDER": + case 1: + message.tablet_selection_preference = 1; + break; + case "UNKNOWN": + case 3: + message.tablet_selection_preference = 3; + break; + } + switch (object.workflow_type) { + default: + if (typeof object.workflow_type === "number") { + message.workflow_type = object.workflow_type; + break; + } + break; + case "Materialize": + case 0: + message.workflow_type = 0; + break; + case "MoveTables": + case 1: + message.workflow_type = 1; + break; + case "CreateLookupIndex": + case 2: + message.workflow_type = 2; + break; + case "Migrate": + case 3: + message.workflow_type = 3; + break; + case "Reshard": + case 4: + message.workflow_type = 4; + break; + case "OnlineDDL": + case 5: + message.workflow_type = 5; + break; + } + switch (object.workflow_sub_type) { + default: + if (typeof object.workflow_sub_type === "number") { + message.workflow_sub_type = object.workflow_sub_type; + break; + } + break; + case "None": + case 0: + message.workflow_sub_type = 0; + break; + case "Partial": + case 1: + message.workflow_sub_type = 1; + break; + case "AtomicCopy": + case 2: + message.workflow_sub_type = 2; + break; + } + if (object.defer_secondary_keys != null) + message.defer_secondary_keys = Boolean(object.defer_secondary_keys); + if (object.auto_start != null) + message.auto_start = Boolean(object.auto_start); + if (object.stop_after_copy != null) + message.stop_after_copy = Boolean(object.stop_after_copy); + return message; + }; + + /** + * Creates a plain object from a CreateVReplicationWorkflowRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof tabletmanagerdata.CreateVReplicationWorkflowRequest + * @static + * @param {tabletmanagerdata.CreateVReplicationWorkflowRequest} message CreateVReplicationWorkflowRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + CreateVReplicationWorkflowRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) { + object.binlog_source = []; + object.cells = []; + object.tablet_types = []; + } + if (options.defaults) { + object.workflow = ""; + object.tablet_selection_preference = options.enums === String ? "ANY" : 0; + object.workflow_type = options.enums === String ? "Materialize" : 0; + object.workflow_sub_type = options.enums === String ? "None" : 0; + object.defer_secondary_keys = false; + object.auto_start = false; + object.stop_after_copy = false; + } + if (message.workflow != null && message.hasOwnProperty("workflow")) + object.workflow = message.workflow; + if (message.binlog_source && message.binlog_source.length) { + object.binlog_source = []; + for (let j = 0; j < message.binlog_source.length; ++j) + object.binlog_source[j] = $root.binlogdata.BinlogSource.toObject(message.binlog_source[j], options); + } + if (message.cells && message.cells.length) { + object.cells = []; + for (let j = 0; j < message.cells.length; ++j) + object.cells[j] = message.cells[j]; + } + if (message.tablet_types && message.tablet_types.length) { + object.tablet_types = []; + for (let j = 0; j < message.tablet_types.length; ++j) + object.tablet_types[j] = options.enums === String ? $root.topodata.TabletType[message.tablet_types[j]] === undefined ? message.tablet_types[j] : $root.topodata.TabletType[message.tablet_types[j]] : message.tablet_types[j]; + } + if (message.tablet_selection_preference != null && message.hasOwnProperty("tablet_selection_preference")) + object.tablet_selection_preference = options.enums === String ? $root.tabletmanagerdata.TabletSelectionPreference[message.tablet_selection_preference] === undefined ? message.tablet_selection_preference : $root.tabletmanagerdata.TabletSelectionPreference[message.tablet_selection_preference] : message.tablet_selection_preference; + if (message.workflow_type != null && message.hasOwnProperty("workflow_type")) + object.workflow_type = options.enums === String ? $root.binlogdata.VReplicationWorkflowType[message.workflow_type] === undefined ? message.workflow_type : $root.binlogdata.VReplicationWorkflowType[message.workflow_type] : message.workflow_type; + if (message.workflow_sub_type != null && message.hasOwnProperty("workflow_sub_type")) + object.workflow_sub_type = options.enums === String ? $root.binlogdata.VReplicationWorkflowSubType[message.workflow_sub_type] === undefined ? message.workflow_sub_type : $root.binlogdata.VReplicationWorkflowSubType[message.workflow_sub_type] : message.workflow_sub_type; + if (message.defer_secondary_keys != null && message.hasOwnProperty("defer_secondary_keys")) + object.defer_secondary_keys = message.defer_secondary_keys; + if (message.auto_start != null && message.hasOwnProperty("auto_start")) + object.auto_start = message.auto_start; + if (message.stop_after_copy != null && message.hasOwnProperty("stop_after_copy")) + object.stop_after_copy = message.stop_after_copy; return object; }; /** - * Converts this VDiffRequest to JSON. + * Converts this CreateVReplicationWorkflowRequest to JSON. * @function toJSON - * @memberof tabletmanagerdata.VDiffRequest + * @memberof tabletmanagerdata.CreateVReplicationWorkflowRequest * @instance * @returns {Object.} JSON object */ - VDiffRequest.prototype.toJSON = function toJSON() { + CreateVReplicationWorkflowRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for VDiffRequest + * Gets the default type url for CreateVReplicationWorkflowRequest * @function getTypeUrl - * @memberof tabletmanagerdata.VDiffRequest + * @memberof tabletmanagerdata.CreateVReplicationWorkflowRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - VDiffRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + CreateVReplicationWorkflowRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/tabletmanagerdata.VDiffRequest"; + return typeUrlPrefix + "/tabletmanagerdata.CreateVReplicationWorkflowRequest"; }; - return VDiffRequest; + return CreateVReplicationWorkflowRequest; })(); - tabletmanagerdata.VDiffResponse = (function() { + tabletmanagerdata.CreateVReplicationWorkflowResponse = (function() { /** - * Properties of a VDiffResponse. + * Properties of a CreateVReplicationWorkflowResponse. * @memberof tabletmanagerdata - * @interface IVDiffResponse - * @property {number|Long|null} [id] VDiffResponse id - * @property {query.IQueryResult|null} [output] VDiffResponse output - * @property {string|null} [vdiff_uuid] VDiffResponse vdiff_uuid + * @interface ICreateVReplicationWorkflowResponse + * @property {query.IQueryResult|null} [result] CreateVReplicationWorkflowResponse result */ /** - * Constructs a new VDiffResponse. + * Constructs a new CreateVReplicationWorkflowResponse. * @memberof tabletmanagerdata - * @classdesc Represents a VDiffResponse. - * @implements IVDiffResponse + * @classdesc Represents a CreateVReplicationWorkflowResponse. + * @implements ICreateVReplicationWorkflowResponse * @constructor - * @param {tabletmanagerdata.IVDiffResponse=} [properties] Properties to set + * @param {tabletmanagerdata.ICreateVReplicationWorkflowResponse=} [properties] Properties to set */ - function VDiffResponse(properties) { + function CreateVReplicationWorkflowResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -59215,103 +60936,75 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { } /** - * VDiffResponse id. - * @member {number|Long} id - * @memberof tabletmanagerdata.VDiffResponse - * @instance - */ - VDiffResponse.prototype.id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; - - /** - * VDiffResponse output. - * @member {query.IQueryResult|null|undefined} output - * @memberof tabletmanagerdata.VDiffResponse - * @instance - */ - VDiffResponse.prototype.output = null; - - /** - * VDiffResponse vdiff_uuid. - * @member {string} vdiff_uuid - * @memberof tabletmanagerdata.VDiffResponse + * CreateVReplicationWorkflowResponse result. + * @member {query.IQueryResult|null|undefined} result + * @memberof tabletmanagerdata.CreateVReplicationWorkflowResponse * @instance */ - VDiffResponse.prototype.vdiff_uuid = ""; + CreateVReplicationWorkflowResponse.prototype.result = null; /** - * Creates a new VDiffResponse instance using the specified properties. + * Creates a new CreateVReplicationWorkflowResponse instance using the specified properties. * @function create - * @memberof tabletmanagerdata.VDiffResponse + * @memberof tabletmanagerdata.CreateVReplicationWorkflowResponse * @static - * @param {tabletmanagerdata.IVDiffResponse=} [properties] Properties to set - * @returns {tabletmanagerdata.VDiffResponse} VDiffResponse instance + * @param {tabletmanagerdata.ICreateVReplicationWorkflowResponse=} [properties] Properties to set + * @returns {tabletmanagerdata.CreateVReplicationWorkflowResponse} CreateVReplicationWorkflowResponse instance */ - VDiffResponse.create = function create(properties) { - return new VDiffResponse(properties); + CreateVReplicationWorkflowResponse.create = function create(properties) { + return new CreateVReplicationWorkflowResponse(properties); }; /** - * Encodes the specified VDiffResponse message. Does not implicitly {@link tabletmanagerdata.VDiffResponse.verify|verify} messages. + * Encodes the specified CreateVReplicationWorkflowResponse message. Does not implicitly {@link tabletmanagerdata.CreateVReplicationWorkflowResponse.verify|verify} messages. * @function encode - * @memberof tabletmanagerdata.VDiffResponse + * @memberof tabletmanagerdata.CreateVReplicationWorkflowResponse * @static - * @param {tabletmanagerdata.IVDiffResponse} message VDiffResponse message or plain object to encode + * @param {tabletmanagerdata.ICreateVReplicationWorkflowResponse} message CreateVReplicationWorkflowResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - VDiffResponse.encode = function encode(message, writer) { + CreateVReplicationWorkflowResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.id != null && Object.hasOwnProperty.call(message, "id")) - writer.uint32(/* id 1, wireType 0 =*/8).int64(message.id); - if (message.output != null && Object.hasOwnProperty.call(message, "output")) - $root.query.QueryResult.encode(message.output, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.vdiff_uuid != null && Object.hasOwnProperty.call(message, "vdiff_uuid")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.vdiff_uuid); + if (message.result != null && Object.hasOwnProperty.call(message, "result")) + $root.query.QueryResult.encode(message.result, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified VDiffResponse message, length delimited. Does not implicitly {@link tabletmanagerdata.VDiffResponse.verify|verify} messages. + * Encodes the specified CreateVReplicationWorkflowResponse message, length delimited. Does not implicitly {@link tabletmanagerdata.CreateVReplicationWorkflowResponse.verify|verify} messages. * @function encodeDelimited - * @memberof tabletmanagerdata.VDiffResponse + * @memberof tabletmanagerdata.CreateVReplicationWorkflowResponse * @static - * @param {tabletmanagerdata.IVDiffResponse} message VDiffResponse message or plain object to encode + * @param {tabletmanagerdata.ICreateVReplicationWorkflowResponse} message CreateVReplicationWorkflowResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - VDiffResponse.encodeDelimited = function encodeDelimited(message, writer) { + CreateVReplicationWorkflowResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a VDiffResponse message from the specified reader or buffer. + * Decodes a CreateVReplicationWorkflowResponse message from the specified reader or buffer. * @function decode - * @memberof tabletmanagerdata.VDiffResponse + * @memberof tabletmanagerdata.CreateVReplicationWorkflowResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {tabletmanagerdata.VDiffResponse} VDiffResponse + * @returns {tabletmanagerdata.CreateVReplicationWorkflowResponse} CreateVReplicationWorkflowResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - VDiffResponse.decode = function decode(reader, length) { + CreateVReplicationWorkflowResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.tabletmanagerdata.VDiffResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.tabletmanagerdata.CreateVReplicationWorkflowResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.id = reader.int64(); - break; - } - case 2: { - message.output = $root.query.QueryResult.decode(reader, reader.uint32()); - break; - } - case 3: { - message.vdiff_uuid = reader.string(); + message.result = $root.query.QueryResult.decode(reader, reader.uint32()); break; } default: @@ -59323,160 +61016,127 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { }; /** - * Decodes a VDiffResponse message from the specified reader or buffer, length delimited. + * Decodes a CreateVReplicationWorkflowResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof tabletmanagerdata.VDiffResponse + * @memberof tabletmanagerdata.CreateVReplicationWorkflowResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {tabletmanagerdata.VDiffResponse} VDiffResponse + * @returns {tabletmanagerdata.CreateVReplicationWorkflowResponse} CreateVReplicationWorkflowResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - VDiffResponse.decodeDelimited = function decodeDelimited(reader) { + CreateVReplicationWorkflowResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a VDiffResponse message. + * Verifies a CreateVReplicationWorkflowResponse message. * @function verify - * @memberof tabletmanagerdata.VDiffResponse + * @memberof tabletmanagerdata.CreateVReplicationWorkflowResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - VDiffResponse.verify = function verify(message) { + CreateVReplicationWorkflowResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.id != null && message.hasOwnProperty("id")) - if (!$util.isInteger(message.id) && !(message.id && $util.isInteger(message.id.low) && $util.isInteger(message.id.high))) - return "id: integer|Long expected"; - if (message.output != null && message.hasOwnProperty("output")) { - let error = $root.query.QueryResult.verify(message.output); + if (message.result != null && message.hasOwnProperty("result")) { + let error = $root.query.QueryResult.verify(message.result); if (error) - return "output." + error; + return "result." + error; } - if (message.vdiff_uuid != null && message.hasOwnProperty("vdiff_uuid")) - if (!$util.isString(message.vdiff_uuid)) - return "vdiff_uuid: string expected"; return null; }; /** - * Creates a VDiffResponse message from a plain object. Also converts values to their respective internal types. + * Creates a CreateVReplicationWorkflowResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof tabletmanagerdata.VDiffResponse + * @memberof tabletmanagerdata.CreateVReplicationWorkflowResponse * @static * @param {Object.} object Plain object - * @returns {tabletmanagerdata.VDiffResponse} VDiffResponse + * @returns {tabletmanagerdata.CreateVReplicationWorkflowResponse} CreateVReplicationWorkflowResponse */ - VDiffResponse.fromObject = function fromObject(object) { - if (object instanceof $root.tabletmanagerdata.VDiffResponse) + CreateVReplicationWorkflowResponse.fromObject = function fromObject(object) { + if (object instanceof $root.tabletmanagerdata.CreateVReplicationWorkflowResponse) return object; - let message = new $root.tabletmanagerdata.VDiffResponse(); - if (object.id != null) - if ($util.Long) - (message.id = $util.Long.fromValue(object.id)).unsigned = false; - else if (typeof object.id === "string") - message.id = parseInt(object.id, 10); - else if (typeof object.id === "number") - message.id = object.id; - else if (typeof object.id === "object") - message.id = new $util.LongBits(object.id.low >>> 0, object.id.high >>> 0).toNumber(); - if (object.output != null) { - if (typeof object.output !== "object") - throw TypeError(".tabletmanagerdata.VDiffResponse.output: object expected"); - message.output = $root.query.QueryResult.fromObject(object.output); + let message = new $root.tabletmanagerdata.CreateVReplicationWorkflowResponse(); + if (object.result != null) { + if (typeof object.result !== "object") + throw TypeError(".tabletmanagerdata.CreateVReplicationWorkflowResponse.result: object expected"); + message.result = $root.query.QueryResult.fromObject(object.result); } - if (object.vdiff_uuid != null) - message.vdiff_uuid = String(object.vdiff_uuid); return message; }; /** - * Creates a plain object from a VDiffResponse message. Also converts values to other types if specified. + * Creates a plain object from a CreateVReplicationWorkflowResponse message. Also converts values to other types if specified. * @function toObject - * @memberof tabletmanagerdata.VDiffResponse + * @memberof tabletmanagerdata.CreateVReplicationWorkflowResponse * @static - * @param {tabletmanagerdata.VDiffResponse} message VDiffResponse + * @param {tabletmanagerdata.CreateVReplicationWorkflowResponse} message CreateVReplicationWorkflowResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - VDiffResponse.toObject = function toObject(message, options) { + CreateVReplicationWorkflowResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) { - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.id = options.longs === String ? "0" : 0; - object.output = null; - object.vdiff_uuid = ""; - } - if (message.id != null && message.hasOwnProperty("id")) - if (typeof message.id === "number") - object.id = options.longs === String ? String(message.id) : message.id; - else - object.id = options.longs === String ? $util.Long.prototype.toString.call(message.id) : options.longs === Number ? new $util.LongBits(message.id.low >>> 0, message.id.high >>> 0).toNumber() : message.id; - if (message.output != null && message.hasOwnProperty("output")) - object.output = $root.query.QueryResult.toObject(message.output, options); - if (message.vdiff_uuid != null && message.hasOwnProperty("vdiff_uuid")) - object.vdiff_uuid = message.vdiff_uuid; + if (options.defaults) + object.result = null; + if (message.result != null && message.hasOwnProperty("result")) + object.result = $root.query.QueryResult.toObject(message.result, options); return object; }; /** - * Converts this VDiffResponse to JSON. + * Converts this CreateVReplicationWorkflowResponse to JSON. * @function toJSON - * @memberof tabletmanagerdata.VDiffResponse + * @memberof tabletmanagerdata.CreateVReplicationWorkflowResponse * @instance * @returns {Object.} JSON object */ - VDiffResponse.prototype.toJSON = function toJSON() { + CreateVReplicationWorkflowResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for VDiffResponse + * Gets the default type url for CreateVReplicationWorkflowResponse * @function getTypeUrl - * @memberof tabletmanagerdata.VDiffResponse + * @memberof tabletmanagerdata.CreateVReplicationWorkflowResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - VDiffResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + CreateVReplicationWorkflowResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/tabletmanagerdata.VDiffResponse"; + return typeUrlPrefix + "/tabletmanagerdata.CreateVReplicationWorkflowResponse"; }; - return VDiffResponse; + return CreateVReplicationWorkflowResponse; })(); - tabletmanagerdata.VDiffPickerOptions = (function() { + tabletmanagerdata.DeleteVReplicationWorkflowRequest = (function() { /** - * Properties of a VDiffPickerOptions. + * Properties of a DeleteVReplicationWorkflowRequest. * @memberof tabletmanagerdata - * @interface IVDiffPickerOptions - * @property {string|null} [tablet_types] VDiffPickerOptions tablet_types - * @property {string|null} [source_cell] VDiffPickerOptions source_cell - * @property {string|null} [target_cell] VDiffPickerOptions target_cell + * @interface IDeleteVReplicationWorkflowRequest + * @property {string|null} [workflow] DeleteVReplicationWorkflowRequest workflow */ /** - * Constructs a new VDiffPickerOptions. + * Constructs a new DeleteVReplicationWorkflowRequest. * @memberof tabletmanagerdata - * @classdesc Represents a VDiffPickerOptions. - * @implements IVDiffPickerOptions + * @classdesc Represents a DeleteVReplicationWorkflowRequest. + * @implements IDeleteVReplicationWorkflowRequest * @constructor - * @param {tabletmanagerdata.IVDiffPickerOptions=} [properties] Properties to set + * @param {tabletmanagerdata.IDeleteVReplicationWorkflowRequest=} [properties] Properties to set */ - function VDiffPickerOptions(properties) { + function DeleteVReplicationWorkflowRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -59484,103 +61144,75 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { } /** - * VDiffPickerOptions tablet_types. - * @member {string} tablet_types - * @memberof tabletmanagerdata.VDiffPickerOptions - * @instance - */ - VDiffPickerOptions.prototype.tablet_types = ""; - - /** - * VDiffPickerOptions source_cell. - * @member {string} source_cell - * @memberof tabletmanagerdata.VDiffPickerOptions - * @instance - */ - VDiffPickerOptions.prototype.source_cell = ""; - - /** - * VDiffPickerOptions target_cell. - * @member {string} target_cell - * @memberof tabletmanagerdata.VDiffPickerOptions + * DeleteVReplicationWorkflowRequest workflow. + * @member {string} workflow + * @memberof tabletmanagerdata.DeleteVReplicationWorkflowRequest * @instance */ - VDiffPickerOptions.prototype.target_cell = ""; + DeleteVReplicationWorkflowRequest.prototype.workflow = ""; /** - * Creates a new VDiffPickerOptions instance using the specified properties. + * Creates a new DeleteVReplicationWorkflowRequest instance using the specified properties. * @function create - * @memberof tabletmanagerdata.VDiffPickerOptions + * @memberof tabletmanagerdata.DeleteVReplicationWorkflowRequest * @static - * @param {tabletmanagerdata.IVDiffPickerOptions=} [properties] Properties to set - * @returns {tabletmanagerdata.VDiffPickerOptions} VDiffPickerOptions instance + * @param {tabletmanagerdata.IDeleteVReplicationWorkflowRequest=} [properties] Properties to set + * @returns {tabletmanagerdata.DeleteVReplicationWorkflowRequest} DeleteVReplicationWorkflowRequest instance */ - VDiffPickerOptions.create = function create(properties) { - return new VDiffPickerOptions(properties); + DeleteVReplicationWorkflowRequest.create = function create(properties) { + return new DeleteVReplicationWorkflowRequest(properties); }; /** - * Encodes the specified VDiffPickerOptions message. Does not implicitly {@link tabletmanagerdata.VDiffPickerOptions.verify|verify} messages. + * Encodes the specified DeleteVReplicationWorkflowRequest message. Does not implicitly {@link tabletmanagerdata.DeleteVReplicationWorkflowRequest.verify|verify} messages. * @function encode - * @memberof tabletmanagerdata.VDiffPickerOptions + * @memberof tabletmanagerdata.DeleteVReplicationWorkflowRequest * @static - * @param {tabletmanagerdata.IVDiffPickerOptions} message VDiffPickerOptions message or plain object to encode + * @param {tabletmanagerdata.IDeleteVReplicationWorkflowRequest} message DeleteVReplicationWorkflowRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - VDiffPickerOptions.encode = function encode(message, writer) { + DeleteVReplicationWorkflowRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.tablet_types != null && Object.hasOwnProperty.call(message, "tablet_types")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.tablet_types); - if (message.source_cell != null && Object.hasOwnProperty.call(message, "source_cell")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.source_cell); - if (message.target_cell != null && Object.hasOwnProperty.call(message, "target_cell")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.target_cell); + if (message.workflow != null && Object.hasOwnProperty.call(message, "workflow")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.workflow); return writer; }; /** - * Encodes the specified VDiffPickerOptions message, length delimited. Does not implicitly {@link tabletmanagerdata.VDiffPickerOptions.verify|verify} messages. + * Encodes the specified DeleteVReplicationWorkflowRequest message, length delimited. Does not implicitly {@link tabletmanagerdata.DeleteVReplicationWorkflowRequest.verify|verify} messages. * @function encodeDelimited - * @memberof tabletmanagerdata.VDiffPickerOptions + * @memberof tabletmanagerdata.DeleteVReplicationWorkflowRequest * @static - * @param {tabletmanagerdata.IVDiffPickerOptions} message VDiffPickerOptions message or plain object to encode + * @param {tabletmanagerdata.IDeleteVReplicationWorkflowRequest} message DeleteVReplicationWorkflowRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - VDiffPickerOptions.encodeDelimited = function encodeDelimited(message, writer) { + DeleteVReplicationWorkflowRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a VDiffPickerOptions message from the specified reader or buffer. + * Decodes a DeleteVReplicationWorkflowRequest message from the specified reader or buffer. * @function decode - * @memberof tabletmanagerdata.VDiffPickerOptions + * @memberof tabletmanagerdata.DeleteVReplicationWorkflowRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {tabletmanagerdata.VDiffPickerOptions} VDiffPickerOptions + * @returns {tabletmanagerdata.DeleteVReplicationWorkflowRequest} DeleteVReplicationWorkflowRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - VDiffPickerOptions.decode = function decode(reader, length) { + DeleteVReplicationWorkflowRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.tabletmanagerdata.VDiffPickerOptions(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.tabletmanagerdata.DeleteVReplicationWorkflowRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.tablet_types = reader.string(); - break; - } - case 2: { - message.source_cell = reader.string(); - break; - } - case 3: { - message.target_cell = reader.string(); + message.workflow = reader.string(); break; } default: @@ -59592,141 +61224,122 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { }; /** - * Decodes a VDiffPickerOptions message from the specified reader or buffer, length delimited. + * Decodes a DeleteVReplicationWorkflowRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof tabletmanagerdata.VDiffPickerOptions + * @memberof tabletmanagerdata.DeleteVReplicationWorkflowRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {tabletmanagerdata.VDiffPickerOptions} VDiffPickerOptions + * @returns {tabletmanagerdata.DeleteVReplicationWorkflowRequest} DeleteVReplicationWorkflowRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - VDiffPickerOptions.decodeDelimited = function decodeDelimited(reader) { + DeleteVReplicationWorkflowRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a VDiffPickerOptions message. + * Verifies a DeleteVReplicationWorkflowRequest message. * @function verify - * @memberof tabletmanagerdata.VDiffPickerOptions + * @memberof tabletmanagerdata.DeleteVReplicationWorkflowRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - VDiffPickerOptions.verify = function verify(message) { + DeleteVReplicationWorkflowRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.tablet_types != null && message.hasOwnProperty("tablet_types")) - if (!$util.isString(message.tablet_types)) - return "tablet_types: string expected"; - if (message.source_cell != null && message.hasOwnProperty("source_cell")) - if (!$util.isString(message.source_cell)) - return "source_cell: string expected"; - if (message.target_cell != null && message.hasOwnProperty("target_cell")) - if (!$util.isString(message.target_cell)) - return "target_cell: string expected"; + if (message.workflow != null && message.hasOwnProperty("workflow")) + if (!$util.isString(message.workflow)) + return "workflow: string expected"; return null; }; /** - * Creates a VDiffPickerOptions message from a plain object. Also converts values to their respective internal types. + * Creates a DeleteVReplicationWorkflowRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof tabletmanagerdata.VDiffPickerOptions + * @memberof tabletmanagerdata.DeleteVReplicationWorkflowRequest * @static * @param {Object.} object Plain object - * @returns {tabletmanagerdata.VDiffPickerOptions} VDiffPickerOptions + * @returns {tabletmanagerdata.DeleteVReplicationWorkflowRequest} DeleteVReplicationWorkflowRequest */ - VDiffPickerOptions.fromObject = function fromObject(object) { - if (object instanceof $root.tabletmanagerdata.VDiffPickerOptions) + DeleteVReplicationWorkflowRequest.fromObject = function fromObject(object) { + if (object instanceof $root.tabletmanagerdata.DeleteVReplicationWorkflowRequest) return object; - let message = new $root.tabletmanagerdata.VDiffPickerOptions(); - if (object.tablet_types != null) - message.tablet_types = String(object.tablet_types); - if (object.source_cell != null) - message.source_cell = String(object.source_cell); - if (object.target_cell != null) - message.target_cell = String(object.target_cell); + let message = new $root.tabletmanagerdata.DeleteVReplicationWorkflowRequest(); + if (object.workflow != null) + message.workflow = String(object.workflow); return message; }; /** - * Creates a plain object from a VDiffPickerOptions message. Also converts values to other types if specified. + * Creates a plain object from a DeleteVReplicationWorkflowRequest message. Also converts values to other types if specified. * @function toObject - * @memberof tabletmanagerdata.VDiffPickerOptions + * @memberof tabletmanagerdata.DeleteVReplicationWorkflowRequest * @static - * @param {tabletmanagerdata.VDiffPickerOptions} message VDiffPickerOptions + * @param {tabletmanagerdata.DeleteVReplicationWorkflowRequest} message DeleteVReplicationWorkflowRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - VDiffPickerOptions.toObject = function toObject(message, options) { + DeleteVReplicationWorkflowRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) { - object.tablet_types = ""; - object.source_cell = ""; - object.target_cell = ""; - } - if (message.tablet_types != null && message.hasOwnProperty("tablet_types")) - object.tablet_types = message.tablet_types; - if (message.source_cell != null && message.hasOwnProperty("source_cell")) - object.source_cell = message.source_cell; - if (message.target_cell != null && message.hasOwnProperty("target_cell")) - object.target_cell = message.target_cell; + if (options.defaults) + object.workflow = ""; + if (message.workflow != null && message.hasOwnProperty("workflow")) + object.workflow = message.workflow; return object; }; /** - * Converts this VDiffPickerOptions to JSON. + * Converts this DeleteVReplicationWorkflowRequest to JSON. * @function toJSON - * @memberof tabletmanagerdata.VDiffPickerOptions + * @memberof tabletmanagerdata.DeleteVReplicationWorkflowRequest * @instance * @returns {Object.} JSON object */ - VDiffPickerOptions.prototype.toJSON = function toJSON() { + DeleteVReplicationWorkflowRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for VDiffPickerOptions + * Gets the default type url for DeleteVReplicationWorkflowRequest * @function getTypeUrl - * @memberof tabletmanagerdata.VDiffPickerOptions + * @memberof tabletmanagerdata.DeleteVReplicationWorkflowRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - VDiffPickerOptions.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + DeleteVReplicationWorkflowRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/tabletmanagerdata.VDiffPickerOptions"; + return typeUrlPrefix + "/tabletmanagerdata.DeleteVReplicationWorkflowRequest"; }; - return VDiffPickerOptions; + return DeleteVReplicationWorkflowRequest; })(); - tabletmanagerdata.VDiffReportOptions = (function() { + tabletmanagerdata.DeleteVReplicationWorkflowResponse = (function() { /** - * Properties of a VDiffReportOptions. + * Properties of a DeleteVReplicationWorkflowResponse. * @memberof tabletmanagerdata - * @interface IVDiffReportOptions - * @property {boolean|null} [only_pks] VDiffReportOptions only_pks - * @property {boolean|null} [debug_query] VDiffReportOptions debug_query - * @property {string|null} [format] VDiffReportOptions format + * @interface IDeleteVReplicationWorkflowResponse + * @property {query.IQueryResult|null} [result] DeleteVReplicationWorkflowResponse result */ /** - * Constructs a new VDiffReportOptions. + * Constructs a new DeleteVReplicationWorkflowResponse. * @memberof tabletmanagerdata - * @classdesc Represents a VDiffReportOptions. - * @implements IVDiffReportOptions + * @classdesc Represents a DeleteVReplicationWorkflowResponse. + * @implements IDeleteVReplicationWorkflowResponse * @constructor - * @param {tabletmanagerdata.IVDiffReportOptions=} [properties] Properties to set + * @param {tabletmanagerdata.IDeleteVReplicationWorkflowResponse=} [properties] Properties to set */ - function VDiffReportOptions(properties) { + function DeleteVReplicationWorkflowResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -59734,103 +61347,75 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { } /** - * VDiffReportOptions only_pks. - * @member {boolean} only_pks - * @memberof tabletmanagerdata.VDiffReportOptions - * @instance - */ - VDiffReportOptions.prototype.only_pks = false; - - /** - * VDiffReportOptions debug_query. - * @member {boolean} debug_query - * @memberof tabletmanagerdata.VDiffReportOptions - * @instance - */ - VDiffReportOptions.prototype.debug_query = false; - - /** - * VDiffReportOptions format. - * @member {string} format - * @memberof tabletmanagerdata.VDiffReportOptions + * DeleteVReplicationWorkflowResponse result. + * @member {query.IQueryResult|null|undefined} result + * @memberof tabletmanagerdata.DeleteVReplicationWorkflowResponse * @instance */ - VDiffReportOptions.prototype.format = ""; + DeleteVReplicationWorkflowResponse.prototype.result = null; /** - * Creates a new VDiffReportOptions instance using the specified properties. + * Creates a new DeleteVReplicationWorkflowResponse instance using the specified properties. * @function create - * @memberof tabletmanagerdata.VDiffReportOptions + * @memberof tabletmanagerdata.DeleteVReplicationWorkflowResponse * @static - * @param {tabletmanagerdata.IVDiffReportOptions=} [properties] Properties to set - * @returns {tabletmanagerdata.VDiffReportOptions} VDiffReportOptions instance + * @param {tabletmanagerdata.IDeleteVReplicationWorkflowResponse=} [properties] Properties to set + * @returns {tabletmanagerdata.DeleteVReplicationWorkflowResponse} DeleteVReplicationWorkflowResponse instance */ - VDiffReportOptions.create = function create(properties) { - return new VDiffReportOptions(properties); + DeleteVReplicationWorkflowResponse.create = function create(properties) { + return new DeleteVReplicationWorkflowResponse(properties); }; /** - * Encodes the specified VDiffReportOptions message. Does not implicitly {@link tabletmanagerdata.VDiffReportOptions.verify|verify} messages. + * Encodes the specified DeleteVReplicationWorkflowResponse message. Does not implicitly {@link tabletmanagerdata.DeleteVReplicationWorkflowResponse.verify|verify} messages. * @function encode - * @memberof tabletmanagerdata.VDiffReportOptions + * @memberof tabletmanagerdata.DeleteVReplicationWorkflowResponse * @static - * @param {tabletmanagerdata.IVDiffReportOptions} message VDiffReportOptions message or plain object to encode + * @param {tabletmanagerdata.IDeleteVReplicationWorkflowResponse} message DeleteVReplicationWorkflowResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - VDiffReportOptions.encode = function encode(message, writer) { + DeleteVReplicationWorkflowResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.only_pks != null && Object.hasOwnProperty.call(message, "only_pks")) - writer.uint32(/* id 1, wireType 0 =*/8).bool(message.only_pks); - if (message.debug_query != null && Object.hasOwnProperty.call(message, "debug_query")) - writer.uint32(/* id 2, wireType 0 =*/16).bool(message.debug_query); - if (message.format != null && Object.hasOwnProperty.call(message, "format")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.format); + if (message.result != null && Object.hasOwnProperty.call(message, "result")) + $root.query.QueryResult.encode(message.result, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified VDiffReportOptions message, length delimited. Does not implicitly {@link tabletmanagerdata.VDiffReportOptions.verify|verify} messages. + * Encodes the specified DeleteVReplicationWorkflowResponse message, length delimited. Does not implicitly {@link tabletmanagerdata.DeleteVReplicationWorkflowResponse.verify|verify} messages. * @function encodeDelimited - * @memberof tabletmanagerdata.VDiffReportOptions + * @memberof tabletmanagerdata.DeleteVReplicationWorkflowResponse * @static - * @param {tabletmanagerdata.IVDiffReportOptions} message VDiffReportOptions message or plain object to encode + * @param {tabletmanagerdata.IDeleteVReplicationWorkflowResponse} message DeleteVReplicationWorkflowResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - VDiffReportOptions.encodeDelimited = function encodeDelimited(message, writer) { + DeleteVReplicationWorkflowResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a VDiffReportOptions message from the specified reader or buffer. + * Decodes a DeleteVReplicationWorkflowResponse message from the specified reader or buffer. * @function decode - * @memberof tabletmanagerdata.VDiffReportOptions + * @memberof tabletmanagerdata.DeleteVReplicationWorkflowResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {tabletmanagerdata.VDiffReportOptions} VDiffReportOptions + * @returns {tabletmanagerdata.DeleteVReplicationWorkflowResponse} DeleteVReplicationWorkflowResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - VDiffReportOptions.decode = function decode(reader, length) { + DeleteVReplicationWorkflowResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.tabletmanagerdata.VDiffReportOptions(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.tabletmanagerdata.DeleteVReplicationWorkflowResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.only_pks = reader.bool(); - break; - } - case 2: { - message.debug_query = reader.bool(); - break; - } - case 3: { - message.format = reader.string(); + message.result = $root.query.QueryResult.decode(reader, reader.uint32()); break; } default: @@ -59842,146 +61427,127 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { }; /** - * Decodes a VDiffReportOptions message from the specified reader or buffer, length delimited. + * Decodes a DeleteVReplicationWorkflowResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof tabletmanagerdata.VDiffReportOptions + * @memberof tabletmanagerdata.DeleteVReplicationWorkflowResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {tabletmanagerdata.VDiffReportOptions} VDiffReportOptions + * @returns {tabletmanagerdata.DeleteVReplicationWorkflowResponse} DeleteVReplicationWorkflowResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - VDiffReportOptions.decodeDelimited = function decodeDelimited(reader) { + DeleteVReplicationWorkflowResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a VDiffReportOptions message. + * Verifies a DeleteVReplicationWorkflowResponse message. * @function verify - * @memberof tabletmanagerdata.VDiffReportOptions + * @memberof tabletmanagerdata.DeleteVReplicationWorkflowResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - VDiffReportOptions.verify = function verify(message) { + DeleteVReplicationWorkflowResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.only_pks != null && message.hasOwnProperty("only_pks")) - if (typeof message.only_pks !== "boolean") - return "only_pks: boolean expected"; - if (message.debug_query != null && message.hasOwnProperty("debug_query")) - if (typeof message.debug_query !== "boolean") - return "debug_query: boolean expected"; - if (message.format != null && message.hasOwnProperty("format")) - if (!$util.isString(message.format)) - return "format: string expected"; + if (message.result != null && message.hasOwnProperty("result")) { + let error = $root.query.QueryResult.verify(message.result); + if (error) + return "result." + error; + } return null; }; /** - * Creates a VDiffReportOptions message from a plain object. Also converts values to their respective internal types. + * Creates a DeleteVReplicationWorkflowResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof tabletmanagerdata.VDiffReportOptions + * @memberof tabletmanagerdata.DeleteVReplicationWorkflowResponse * @static * @param {Object.} object Plain object - * @returns {tabletmanagerdata.VDiffReportOptions} VDiffReportOptions + * @returns {tabletmanagerdata.DeleteVReplicationWorkflowResponse} DeleteVReplicationWorkflowResponse */ - VDiffReportOptions.fromObject = function fromObject(object) { - if (object instanceof $root.tabletmanagerdata.VDiffReportOptions) + DeleteVReplicationWorkflowResponse.fromObject = function fromObject(object) { + if (object instanceof $root.tabletmanagerdata.DeleteVReplicationWorkflowResponse) return object; - let message = new $root.tabletmanagerdata.VDiffReportOptions(); - if (object.only_pks != null) - message.only_pks = Boolean(object.only_pks); - if (object.debug_query != null) - message.debug_query = Boolean(object.debug_query); - if (object.format != null) - message.format = String(object.format); + let message = new $root.tabletmanagerdata.DeleteVReplicationWorkflowResponse(); + if (object.result != null) { + if (typeof object.result !== "object") + throw TypeError(".tabletmanagerdata.DeleteVReplicationWorkflowResponse.result: object expected"); + message.result = $root.query.QueryResult.fromObject(object.result); + } return message; }; /** - * Creates a plain object from a VDiffReportOptions message. Also converts values to other types if specified. + * Creates a plain object from a DeleteVReplicationWorkflowResponse message. Also converts values to other types if specified. * @function toObject - * @memberof tabletmanagerdata.VDiffReportOptions + * @memberof tabletmanagerdata.DeleteVReplicationWorkflowResponse * @static - * @param {tabletmanagerdata.VDiffReportOptions} message VDiffReportOptions + * @param {tabletmanagerdata.DeleteVReplicationWorkflowResponse} message DeleteVReplicationWorkflowResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - VDiffReportOptions.toObject = function toObject(message, options) { + DeleteVReplicationWorkflowResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) { - object.only_pks = false; - object.debug_query = false; - object.format = ""; - } - if (message.only_pks != null && message.hasOwnProperty("only_pks")) - object.only_pks = message.only_pks; - if (message.debug_query != null && message.hasOwnProperty("debug_query")) - object.debug_query = message.debug_query; - if (message.format != null && message.hasOwnProperty("format")) - object.format = message.format; + if (options.defaults) + object.result = null; + if (message.result != null && message.hasOwnProperty("result")) + object.result = $root.query.QueryResult.toObject(message.result, options); return object; }; /** - * Converts this VDiffReportOptions to JSON. + * Converts this DeleteVReplicationWorkflowResponse to JSON. * @function toJSON - * @memberof tabletmanagerdata.VDiffReportOptions + * @memberof tabletmanagerdata.DeleteVReplicationWorkflowResponse * @instance * @returns {Object.} JSON object */ - VDiffReportOptions.prototype.toJSON = function toJSON() { + DeleteVReplicationWorkflowResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for VDiffReportOptions + * Gets the default type url for DeleteVReplicationWorkflowResponse * @function getTypeUrl - * @memberof tabletmanagerdata.VDiffReportOptions + * @memberof tabletmanagerdata.DeleteVReplicationWorkflowResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - VDiffReportOptions.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + DeleteVReplicationWorkflowResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/tabletmanagerdata.VDiffReportOptions"; + return typeUrlPrefix + "/tabletmanagerdata.DeleteVReplicationWorkflowResponse"; }; - return VDiffReportOptions; + return DeleteVReplicationWorkflowResponse; })(); - tabletmanagerdata.VDiffCoreOptions = (function() { + tabletmanagerdata.ReadVReplicationWorkflowRequest = (function() { /** - * Properties of a VDiffCoreOptions. + * Properties of a ReadVReplicationWorkflowRequest. * @memberof tabletmanagerdata - * @interface IVDiffCoreOptions - * @property {string|null} [tables] VDiffCoreOptions tables - * @property {boolean|null} [auto_retry] VDiffCoreOptions auto_retry - * @property {number|Long|null} [max_rows] VDiffCoreOptions max_rows - * @property {boolean|null} [checksum] VDiffCoreOptions checksum - * @property {number|Long|null} [sample_pct] VDiffCoreOptions sample_pct - * @property {number|Long|null} [timeout_seconds] VDiffCoreOptions timeout_seconds - * @property {number|Long|null} [max_extra_rows_to_compare] VDiffCoreOptions max_extra_rows_to_compare - * @property {boolean|null} [update_table_stats] VDiffCoreOptions update_table_stats + * @interface IReadVReplicationWorkflowRequest + * @property {string|null} [workflow] ReadVReplicationWorkflowRequest workflow */ /** - * Constructs a new VDiffCoreOptions. + * Constructs a new ReadVReplicationWorkflowRequest. * @memberof tabletmanagerdata - * @classdesc Represents a VDiffCoreOptions. - * @implements IVDiffCoreOptions + * @classdesc Represents a ReadVReplicationWorkflowRequest. + * @implements IReadVReplicationWorkflowRequest * @constructor - * @param {tabletmanagerdata.IVDiffCoreOptions=} [properties] Properties to set + * @param {tabletmanagerdata.IReadVReplicationWorkflowRequest=} [properties] Properties to set */ - function VDiffCoreOptions(properties) { + function ReadVReplicationWorkflowRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -59989,173 +61555,75 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { } /** - * VDiffCoreOptions tables. - * @member {string} tables - * @memberof tabletmanagerdata.VDiffCoreOptions - * @instance - */ - VDiffCoreOptions.prototype.tables = ""; - - /** - * VDiffCoreOptions auto_retry. - * @member {boolean} auto_retry - * @memberof tabletmanagerdata.VDiffCoreOptions - * @instance - */ - VDiffCoreOptions.prototype.auto_retry = false; - - /** - * VDiffCoreOptions max_rows. - * @member {number|Long} max_rows - * @memberof tabletmanagerdata.VDiffCoreOptions - * @instance - */ - VDiffCoreOptions.prototype.max_rows = $util.Long ? $util.Long.fromBits(0,0,false) : 0; - - /** - * VDiffCoreOptions checksum. - * @member {boolean} checksum - * @memberof tabletmanagerdata.VDiffCoreOptions - * @instance - */ - VDiffCoreOptions.prototype.checksum = false; - - /** - * VDiffCoreOptions sample_pct. - * @member {number|Long} sample_pct - * @memberof tabletmanagerdata.VDiffCoreOptions - * @instance - */ - VDiffCoreOptions.prototype.sample_pct = $util.Long ? $util.Long.fromBits(0,0,false) : 0; - - /** - * VDiffCoreOptions timeout_seconds. - * @member {number|Long} timeout_seconds - * @memberof tabletmanagerdata.VDiffCoreOptions - * @instance - */ - VDiffCoreOptions.prototype.timeout_seconds = $util.Long ? $util.Long.fromBits(0,0,false) : 0; - - /** - * VDiffCoreOptions max_extra_rows_to_compare. - * @member {number|Long} max_extra_rows_to_compare - * @memberof tabletmanagerdata.VDiffCoreOptions - * @instance - */ - VDiffCoreOptions.prototype.max_extra_rows_to_compare = $util.Long ? $util.Long.fromBits(0,0,false) : 0; - - /** - * VDiffCoreOptions update_table_stats. - * @member {boolean} update_table_stats - * @memberof tabletmanagerdata.VDiffCoreOptions + * ReadVReplicationWorkflowRequest workflow. + * @member {string} workflow + * @memberof tabletmanagerdata.ReadVReplicationWorkflowRequest * @instance */ - VDiffCoreOptions.prototype.update_table_stats = false; + ReadVReplicationWorkflowRequest.prototype.workflow = ""; /** - * Creates a new VDiffCoreOptions instance using the specified properties. + * Creates a new ReadVReplicationWorkflowRequest instance using the specified properties. * @function create - * @memberof tabletmanagerdata.VDiffCoreOptions + * @memberof tabletmanagerdata.ReadVReplicationWorkflowRequest * @static - * @param {tabletmanagerdata.IVDiffCoreOptions=} [properties] Properties to set - * @returns {tabletmanagerdata.VDiffCoreOptions} VDiffCoreOptions instance + * @param {tabletmanagerdata.IReadVReplicationWorkflowRequest=} [properties] Properties to set + * @returns {tabletmanagerdata.ReadVReplicationWorkflowRequest} ReadVReplicationWorkflowRequest instance */ - VDiffCoreOptions.create = function create(properties) { - return new VDiffCoreOptions(properties); + ReadVReplicationWorkflowRequest.create = function create(properties) { + return new ReadVReplicationWorkflowRequest(properties); }; /** - * Encodes the specified VDiffCoreOptions message. Does not implicitly {@link tabletmanagerdata.VDiffCoreOptions.verify|verify} messages. + * Encodes the specified ReadVReplicationWorkflowRequest message. Does not implicitly {@link tabletmanagerdata.ReadVReplicationWorkflowRequest.verify|verify} messages. * @function encode - * @memberof tabletmanagerdata.VDiffCoreOptions + * @memberof tabletmanagerdata.ReadVReplicationWorkflowRequest * @static - * @param {tabletmanagerdata.IVDiffCoreOptions} message VDiffCoreOptions message or plain object to encode + * @param {tabletmanagerdata.IReadVReplicationWorkflowRequest} message ReadVReplicationWorkflowRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - VDiffCoreOptions.encode = function encode(message, writer) { + ReadVReplicationWorkflowRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.tables != null && Object.hasOwnProperty.call(message, "tables")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.tables); - if (message.auto_retry != null && Object.hasOwnProperty.call(message, "auto_retry")) - writer.uint32(/* id 2, wireType 0 =*/16).bool(message.auto_retry); - if (message.max_rows != null && Object.hasOwnProperty.call(message, "max_rows")) - writer.uint32(/* id 3, wireType 0 =*/24).int64(message.max_rows); - if (message.checksum != null && Object.hasOwnProperty.call(message, "checksum")) - writer.uint32(/* id 4, wireType 0 =*/32).bool(message.checksum); - if (message.sample_pct != null && Object.hasOwnProperty.call(message, "sample_pct")) - writer.uint32(/* id 5, wireType 0 =*/40).int64(message.sample_pct); - if (message.timeout_seconds != null && Object.hasOwnProperty.call(message, "timeout_seconds")) - writer.uint32(/* id 6, wireType 0 =*/48).int64(message.timeout_seconds); - if (message.max_extra_rows_to_compare != null && Object.hasOwnProperty.call(message, "max_extra_rows_to_compare")) - writer.uint32(/* id 7, wireType 0 =*/56).int64(message.max_extra_rows_to_compare); - if (message.update_table_stats != null && Object.hasOwnProperty.call(message, "update_table_stats")) - writer.uint32(/* id 8, wireType 0 =*/64).bool(message.update_table_stats); + if (message.workflow != null && Object.hasOwnProperty.call(message, "workflow")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.workflow); return writer; }; /** - * Encodes the specified VDiffCoreOptions message, length delimited. Does not implicitly {@link tabletmanagerdata.VDiffCoreOptions.verify|verify} messages. + * Encodes the specified ReadVReplicationWorkflowRequest message, length delimited. Does not implicitly {@link tabletmanagerdata.ReadVReplicationWorkflowRequest.verify|verify} messages. * @function encodeDelimited - * @memberof tabletmanagerdata.VDiffCoreOptions + * @memberof tabletmanagerdata.ReadVReplicationWorkflowRequest * @static - * @param {tabletmanagerdata.IVDiffCoreOptions} message VDiffCoreOptions message or plain object to encode + * @param {tabletmanagerdata.IReadVReplicationWorkflowRequest} message ReadVReplicationWorkflowRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - VDiffCoreOptions.encodeDelimited = function encodeDelimited(message, writer) { + ReadVReplicationWorkflowRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a VDiffCoreOptions message from the specified reader or buffer. + * Decodes a ReadVReplicationWorkflowRequest message from the specified reader or buffer. * @function decode - * @memberof tabletmanagerdata.VDiffCoreOptions + * @memberof tabletmanagerdata.ReadVReplicationWorkflowRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {tabletmanagerdata.VDiffCoreOptions} VDiffCoreOptions + * @returns {tabletmanagerdata.ReadVReplicationWorkflowRequest} ReadVReplicationWorkflowRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - VDiffCoreOptions.decode = function decode(reader, length) { + ReadVReplicationWorkflowRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.tabletmanagerdata.VDiffCoreOptions(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.tabletmanagerdata.ReadVReplicationWorkflowRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.tables = reader.string(); - break; - } - case 2: { - message.auto_retry = reader.bool(); - break; - } - case 3: { - message.max_rows = reader.int64(); - break; - } - case 4: { - message.checksum = reader.bool(); - break; - } - case 5: { - message.sample_pct = reader.int64(); - break; - } - case 6: { - message.timeout_seconds = reader.int64(); - break; - } - case 7: { - message.max_extra_rows_to_compare = reader.int64(); - break; - } - case 8: { - message.update_table_stats = reader.bool(); + message.workflow = reader.string(); break; } default: @@ -60167,237 +61635,133 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { }; /** - * Decodes a VDiffCoreOptions message from the specified reader or buffer, length delimited. + * Decodes a ReadVReplicationWorkflowRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof tabletmanagerdata.VDiffCoreOptions + * @memberof tabletmanagerdata.ReadVReplicationWorkflowRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {tabletmanagerdata.VDiffCoreOptions} VDiffCoreOptions + * @returns {tabletmanagerdata.ReadVReplicationWorkflowRequest} ReadVReplicationWorkflowRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - VDiffCoreOptions.decodeDelimited = function decodeDelimited(reader) { + ReadVReplicationWorkflowRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a VDiffCoreOptions message. + * Verifies a ReadVReplicationWorkflowRequest message. * @function verify - * @memberof tabletmanagerdata.VDiffCoreOptions + * @memberof tabletmanagerdata.ReadVReplicationWorkflowRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - VDiffCoreOptions.verify = function verify(message) { + ReadVReplicationWorkflowRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.tables != null && message.hasOwnProperty("tables")) - if (!$util.isString(message.tables)) - return "tables: string expected"; - if (message.auto_retry != null && message.hasOwnProperty("auto_retry")) - if (typeof message.auto_retry !== "boolean") - return "auto_retry: boolean expected"; - if (message.max_rows != null && message.hasOwnProperty("max_rows")) - if (!$util.isInteger(message.max_rows) && !(message.max_rows && $util.isInteger(message.max_rows.low) && $util.isInteger(message.max_rows.high))) - return "max_rows: integer|Long expected"; - if (message.checksum != null && message.hasOwnProperty("checksum")) - if (typeof message.checksum !== "boolean") - return "checksum: boolean expected"; - if (message.sample_pct != null && message.hasOwnProperty("sample_pct")) - if (!$util.isInteger(message.sample_pct) && !(message.sample_pct && $util.isInteger(message.sample_pct.low) && $util.isInteger(message.sample_pct.high))) - return "sample_pct: integer|Long expected"; - if (message.timeout_seconds != null && message.hasOwnProperty("timeout_seconds")) - if (!$util.isInteger(message.timeout_seconds) && !(message.timeout_seconds && $util.isInteger(message.timeout_seconds.low) && $util.isInteger(message.timeout_seconds.high))) - return "timeout_seconds: integer|Long expected"; - if (message.max_extra_rows_to_compare != null && message.hasOwnProperty("max_extra_rows_to_compare")) - if (!$util.isInteger(message.max_extra_rows_to_compare) && !(message.max_extra_rows_to_compare && $util.isInteger(message.max_extra_rows_to_compare.low) && $util.isInteger(message.max_extra_rows_to_compare.high))) - return "max_extra_rows_to_compare: integer|Long expected"; - if (message.update_table_stats != null && message.hasOwnProperty("update_table_stats")) - if (typeof message.update_table_stats !== "boolean") - return "update_table_stats: boolean expected"; + if (message.workflow != null && message.hasOwnProperty("workflow")) + if (!$util.isString(message.workflow)) + return "workflow: string expected"; return null; }; /** - * Creates a VDiffCoreOptions message from a plain object. Also converts values to their respective internal types. + * Creates a ReadVReplicationWorkflowRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof tabletmanagerdata.VDiffCoreOptions + * @memberof tabletmanagerdata.ReadVReplicationWorkflowRequest * @static * @param {Object.} object Plain object - * @returns {tabletmanagerdata.VDiffCoreOptions} VDiffCoreOptions + * @returns {tabletmanagerdata.ReadVReplicationWorkflowRequest} ReadVReplicationWorkflowRequest */ - VDiffCoreOptions.fromObject = function fromObject(object) { - if (object instanceof $root.tabletmanagerdata.VDiffCoreOptions) + ReadVReplicationWorkflowRequest.fromObject = function fromObject(object) { + if (object instanceof $root.tabletmanagerdata.ReadVReplicationWorkflowRequest) return object; - let message = new $root.tabletmanagerdata.VDiffCoreOptions(); - if (object.tables != null) - message.tables = String(object.tables); - if (object.auto_retry != null) - message.auto_retry = Boolean(object.auto_retry); - if (object.max_rows != null) - if ($util.Long) - (message.max_rows = $util.Long.fromValue(object.max_rows)).unsigned = false; - else if (typeof object.max_rows === "string") - message.max_rows = parseInt(object.max_rows, 10); - else if (typeof object.max_rows === "number") - message.max_rows = object.max_rows; - else if (typeof object.max_rows === "object") - message.max_rows = new $util.LongBits(object.max_rows.low >>> 0, object.max_rows.high >>> 0).toNumber(); - if (object.checksum != null) - message.checksum = Boolean(object.checksum); - if (object.sample_pct != null) - if ($util.Long) - (message.sample_pct = $util.Long.fromValue(object.sample_pct)).unsigned = false; - else if (typeof object.sample_pct === "string") - message.sample_pct = parseInt(object.sample_pct, 10); - else if (typeof object.sample_pct === "number") - message.sample_pct = object.sample_pct; - else if (typeof object.sample_pct === "object") - message.sample_pct = new $util.LongBits(object.sample_pct.low >>> 0, object.sample_pct.high >>> 0).toNumber(); - if (object.timeout_seconds != null) - if ($util.Long) - (message.timeout_seconds = $util.Long.fromValue(object.timeout_seconds)).unsigned = false; - else if (typeof object.timeout_seconds === "string") - message.timeout_seconds = parseInt(object.timeout_seconds, 10); - else if (typeof object.timeout_seconds === "number") - message.timeout_seconds = object.timeout_seconds; - else if (typeof object.timeout_seconds === "object") - message.timeout_seconds = new $util.LongBits(object.timeout_seconds.low >>> 0, object.timeout_seconds.high >>> 0).toNumber(); - if (object.max_extra_rows_to_compare != null) - if ($util.Long) - (message.max_extra_rows_to_compare = $util.Long.fromValue(object.max_extra_rows_to_compare)).unsigned = false; - else if (typeof object.max_extra_rows_to_compare === "string") - message.max_extra_rows_to_compare = parseInt(object.max_extra_rows_to_compare, 10); - else if (typeof object.max_extra_rows_to_compare === "number") - message.max_extra_rows_to_compare = object.max_extra_rows_to_compare; - else if (typeof object.max_extra_rows_to_compare === "object") - message.max_extra_rows_to_compare = new $util.LongBits(object.max_extra_rows_to_compare.low >>> 0, object.max_extra_rows_to_compare.high >>> 0).toNumber(); - if (object.update_table_stats != null) - message.update_table_stats = Boolean(object.update_table_stats); + let message = new $root.tabletmanagerdata.ReadVReplicationWorkflowRequest(); + if (object.workflow != null) + message.workflow = String(object.workflow); return message; }; /** - * Creates a plain object from a VDiffCoreOptions message. Also converts values to other types if specified. + * Creates a plain object from a ReadVReplicationWorkflowRequest message. Also converts values to other types if specified. * @function toObject - * @memberof tabletmanagerdata.VDiffCoreOptions + * @memberof tabletmanagerdata.ReadVReplicationWorkflowRequest * @static - * @param {tabletmanagerdata.VDiffCoreOptions} message VDiffCoreOptions + * @param {tabletmanagerdata.ReadVReplicationWorkflowRequest} message ReadVReplicationWorkflowRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - VDiffCoreOptions.toObject = function toObject(message, options) { + ReadVReplicationWorkflowRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) { - object.tables = ""; - object.auto_retry = false; - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.max_rows = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.max_rows = options.longs === String ? "0" : 0; - object.checksum = false; - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.sample_pct = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.sample_pct = options.longs === String ? "0" : 0; - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.timeout_seconds = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.timeout_seconds = options.longs === String ? "0" : 0; - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.max_extra_rows_to_compare = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.max_extra_rows_to_compare = options.longs === String ? "0" : 0; - object.update_table_stats = false; - } - if (message.tables != null && message.hasOwnProperty("tables")) - object.tables = message.tables; - if (message.auto_retry != null && message.hasOwnProperty("auto_retry")) - object.auto_retry = message.auto_retry; - if (message.max_rows != null && message.hasOwnProperty("max_rows")) - if (typeof message.max_rows === "number") - object.max_rows = options.longs === String ? String(message.max_rows) : message.max_rows; - else - object.max_rows = options.longs === String ? $util.Long.prototype.toString.call(message.max_rows) : options.longs === Number ? new $util.LongBits(message.max_rows.low >>> 0, message.max_rows.high >>> 0).toNumber() : message.max_rows; - if (message.checksum != null && message.hasOwnProperty("checksum")) - object.checksum = message.checksum; - if (message.sample_pct != null && message.hasOwnProperty("sample_pct")) - if (typeof message.sample_pct === "number") - object.sample_pct = options.longs === String ? String(message.sample_pct) : message.sample_pct; - else - object.sample_pct = options.longs === String ? $util.Long.prototype.toString.call(message.sample_pct) : options.longs === Number ? new $util.LongBits(message.sample_pct.low >>> 0, message.sample_pct.high >>> 0).toNumber() : message.sample_pct; - if (message.timeout_seconds != null && message.hasOwnProperty("timeout_seconds")) - if (typeof message.timeout_seconds === "number") - object.timeout_seconds = options.longs === String ? String(message.timeout_seconds) : message.timeout_seconds; - else - object.timeout_seconds = options.longs === String ? $util.Long.prototype.toString.call(message.timeout_seconds) : options.longs === Number ? new $util.LongBits(message.timeout_seconds.low >>> 0, message.timeout_seconds.high >>> 0).toNumber() : message.timeout_seconds; - if (message.max_extra_rows_to_compare != null && message.hasOwnProperty("max_extra_rows_to_compare")) - if (typeof message.max_extra_rows_to_compare === "number") - object.max_extra_rows_to_compare = options.longs === String ? String(message.max_extra_rows_to_compare) : message.max_extra_rows_to_compare; - else - object.max_extra_rows_to_compare = options.longs === String ? $util.Long.prototype.toString.call(message.max_extra_rows_to_compare) : options.longs === Number ? new $util.LongBits(message.max_extra_rows_to_compare.low >>> 0, message.max_extra_rows_to_compare.high >>> 0).toNumber() : message.max_extra_rows_to_compare; - if (message.update_table_stats != null && message.hasOwnProperty("update_table_stats")) - object.update_table_stats = message.update_table_stats; + if (options.defaults) + object.workflow = ""; + if (message.workflow != null && message.hasOwnProperty("workflow")) + object.workflow = message.workflow; return object; }; /** - * Converts this VDiffCoreOptions to JSON. + * Converts this ReadVReplicationWorkflowRequest to JSON. * @function toJSON - * @memberof tabletmanagerdata.VDiffCoreOptions + * @memberof tabletmanagerdata.ReadVReplicationWorkflowRequest * @instance * @returns {Object.} JSON object */ - VDiffCoreOptions.prototype.toJSON = function toJSON() { + ReadVReplicationWorkflowRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for VDiffCoreOptions + * Gets the default type url for ReadVReplicationWorkflowRequest * @function getTypeUrl - * @memberof tabletmanagerdata.VDiffCoreOptions + * @memberof tabletmanagerdata.ReadVReplicationWorkflowRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - VDiffCoreOptions.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ReadVReplicationWorkflowRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/tabletmanagerdata.VDiffCoreOptions"; + return typeUrlPrefix + "/tabletmanagerdata.ReadVReplicationWorkflowRequest"; }; - return VDiffCoreOptions; + return ReadVReplicationWorkflowRequest; })(); - tabletmanagerdata.VDiffOptions = (function() { + tabletmanagerdata.ReadVReplicationWorkflowResponse = (function() { /** - * Properties of a VDiffOptions. + * Properties of a ReadVReplicationWorkflowResponse. * @memberof tabletmanagerdata - * @interface IVDiffOptions - * @property {tabletmanagerdata.IVDiffPickerOptions|null} [picker_options] VDiffOptions picker_options - * @property {tabletmanagerdata.IVDiffCoreOptions|null} [core_options] VDiffOptions core_options - * @property {tabletmanagerdata.IVDiffReportOptions|null} [report_options] VDiffOptions report_options + * @interface IReadVReplicationWorkflowResponse + * @property {string|null} [workflow] ReadVReplicationWorkflowResponse workflow + * @property {string|null} [cells] ReadVReplicationWorkflowResponse cells + * @property {Array.|null} [tablet_types] ReadVReplicationWorkflowResponse tablet_types + * @property {tabletmanagerdata.TabletSelectionPreference|null} [tablet_selection_preference] ReadVReplicationWorkflowResponse tablet_selection_preference + * @property {string|null} [db_name] ReadVReplicationWorkflowResponse db_name + * @property {string|null} [tags] ReadVReplicationWorkflowResponse tags + * @property {binlogdata.VReplicationWorkflowType|null} [workflow_type] ReadVReplicationWorkflowResponse workflow_type + * @property {binlogdata.VReplicationWorkflowSubType|null} [workflow_sub_type] ReadVReplicationWorkflowResponse workflow_sub_type + * @property {boolean|null} [defer_secondary_keys] ReadVReplicationWorkflowResponse defer_secondary_keys + * @property {Array.|null} [streams] ReadVReplicationWorkflowResponse streams */ /** - * Constructs a new VDiffOptions. + * Constructs a new ReadVReplicationWorkflowResponse. * @memberof tabletmanagerdata - * @classdesc Represents a VDiffOptions. - * @implements IVDiffOptions + * @classdesc Represents a ReadVReplicationWorkflowResponse. + * @implements IReadVReplicationWorkflowResponse * @constructor - * @param {tabletmanagerdata.IVDiffOptions=} [properties] Properties to set + * @param {tabletmanagerdata.IReadVReplicationWorkflowResponse=} [properties] Properties to set */ - function VDiffOptions(properties) { + function ReadVReplicationWorkflowResponse(properties) { + this.tablet_types = []; + this.streams = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -60405,103 +61769,215 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { } /** - * VDiffOptions picker_options. - * @member {tabletmanagerdata.IVDiffPickerOptions|null|undefined} picker_options - * @memberof tabletmanagerdata.VDiffOptions + * ReadVReplicationWorkflowResponse workflow. + * @member {string} workflow + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse * @instance */ - VDiffOptions.prototype.picker_options = null; + ReadVReplicationWorkflowResponse.prototype.workflow = ""; /** - * VDiffOptions core_options. - * @member {tabletmanagerdata.IVDiffCoreOptions|null|undefined} core_options - * @memberof tabletmanagerdata.VDiffOptions + * ReadVReplicationWorkflowResponse cells. + * @member {string} cells + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse * @instance */ - VDiffOptions.prototype.core_options = null; + ReadVReplicationWorkflowResponse.prototype.cells = ""; /** - * VDiffOptions report_options. - * @member {tabletmanagerdata.IVDiffReportOptions|null|undefined} report_options - * @memberof tabletmanagerdata.VDiffOptions + * ReadVReplicationWorkflowResponse tablet_types. + * @member {Array.} tablet_types + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse * @instance */ - VDiffOptions.prototype.report_options = null; + ReadVReplicationWorkflowResponse.prototype.tablet_types = $util.emptyArray; /** - * Creates a new VDiffOptions instance using the specified properties. + * ReadVReplicationWorkflowResponse tablet_selection_preference. + * @member {tabletmanagerdata.TabletSelectionPreference} tablet_selection_preference + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse + * @instance + */ + ReadVReplicationWorkflowResponse.prototype.tablet_selection_preference = 0; + + /** + * ReadVReplicationWorkflowResponse db_name. + * @member {string} db_name + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse + * @instance + */ + ReadVReplicationWorkflowResponse.prototype.db_name = ""; + + /** + * ReadVReplicationWorkflowResponse tags. + * @member {string} tags + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse + * @instance + */ + ReadVReplicationWorkflowResponse.prototype.tags = ""; + + /** + * ReadVReplicationWorkflowResponse workflow_type. + * @member {binlogdata.VReplicationWorkflowType} workflow_type + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse + * @instance + */ + ReadVReplicationWorkflowResponse.prototype.workflow_type = 0; + + /** + * ReadVReplicationWorkflowResponse workflow_sub_type. + * @member {binlogdata.VReplicationWorkflowSubType} workflow_sub_type + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse + * @instance + */ + ReadVReplicationWorkflowResponse.prototype.workflow_sub_type = 0; + + /** + * ReadVReplicationWorkflowResponse defer_secondary_keys. + * @member {boolean} defer_secondary_keys + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse + * @instance + */ + ReadVReplicationWorkflowResponse.prototype.defer_secondary_keys = false; + + /** + * ReadVReplicationWorkflowResponse streams. + * @member {Array.} streams + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse + * @instance + */ + ReadVReplicationWorkflowResponse.prototype.streams = $util.emptyArray; + + /** + * Creates a new ReadVReplicationWorkflowResponse instance using the specified properties. * @function create - * @memberof tabletmanagerdata.VDiffOptions + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse * @static - * @param {tabletmanagerdata.IVDiffOptions=} [properties] Properties to set - * @returns {tabletmanagerdata.VDiffOptions} VDiffOptions instance + * @param {tabletmanagerdata.IReadVReplicationWorkflowResponse=} [properties] Properties to set + * @returns {tabletmanagerdata.ReadVReplicationWorkflowResponse} ReadVReplicationWorkflowResponse instance */ - VDiffOptions.create = function create(properties) { - return new VDiffOptions(properties); + ReadVReplicationWorkflowResponse.create = function create(properties) { + return new ReadVReplicationWorkflowResponse(properties); }; /** - * Encodes the specified VDiffOptions message. Does not implicitly {@link tabletmanagerdata.VDiffOptions.verify|verify} messages. + * Encodes the specified ReadVReplicationWorkflowResponse message. Does not implicitly {@link tabletmanagerdata.ReadVReplicationWorkflowResponse.verify|verify} messages. * @function encode - * @memberof tabletmanagerdata.VDiffOptions + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse * @static - * @param {tabletmanagerdata.IVDiffOptions} message VDiffOptions message or plain object to encode + * @param {tabletmanagerdata.IReadVReplicationWorkflowResponse} message ReadVReplicationWorkflowResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - VDiffOptions.encode = function encode(message, writer) { + ReadVReplicationWorkflowResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.picker_options != null && Object.hasOwnProperty.call(message, "picker_options")) - $root.tabletmanagerdata.VDiffPickerOptions.encode(message.picker_options, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.core_options != null && Object.hasOwnProperty.call(message, "core_options")) - $root.tabletmanagerdata.VDiffCoreOptions.encode(message.core_options, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.report_options != null && Object.hasOwnProperty.call(message, "report_options")) - $root.tabletmanagerdata.VDiffReportOptions.encode(message.report_options, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.workflow != null && Object.hasOwnProperty.call(message, "workflow")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.workflow); + if (message.cells != null && Object.hasOwnProperty.call(message, "cells")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.cells); + if (message.tablet_types != null && message.tablet_types.length) { + writer.uint32(/* id 4, wireType 2 =*/34).fork(); + for (let i = 0; i < message.tablet_types.length; ++i) + writer.int32(message.tablet_types[i]); + writer.ldelim(); + } + if (message.tablet_selection_preference != null && Object.hasOwnProperty.call(message, "tablet_selection_preference")) + writer.uint32(/* id 5, wireType 0 =*/40).int32(message.tablet_selection_preference); + if (message.db_name != null && Object.hasOwnProperty.call(message, "db_name")) + writer.uint32(/* id 6, wireType 2 =*/50).string(message.db_name); + if (message.tags != null && Object.hasOwnProperty.call(message, "tags")) + writer.uint32(/* id 7, wireType 2 =*/58).string(message.tags); + if (message.workflow_type != null && Object.hasOwnProperty.call(message, "workflow_type")) + writer.uint32(/* id 8, wireType 0 =*/64).int32(message.workflow_type); + if (message.workflow_sub_type != null && Object.hasOwnProperty.call(message, "workflow_sub_type")) + writer.uint32(/* id 9, wireType 0 =*/72).int32(message.workflow_sub_type); + if (message.defer_secondary_keys != null && Object.hasOwnProperty.call(message, "defer_secondary_keys")) + writer.uint32(/* id 10, wireType 0 =*/80).bool(message.defer_secondary_keys); + if (message.streams != null && message.streams.length) + for (let i = 0; i < message.streams.length; ++i) + $root.tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.encode(message.streams[i], writer.uint32(/* id 11, wireType 2 =*/90).fork()).ldelim(); return writer; }; /** - * Encodes the specified VDiffOptions message, length delimited. Does not implicitly {@link tabletmanagerdata.VDiffOptions.verify|verify} messages. + * Encodes the specified ReadVReplicationWorkflowResponse message, length delimited. Does not implicitly {@link tabletmanagerdata.ReadVReplicationWorkflowResponse.verify|verify} messages. * @function encodeDelimited - * @memberof tabletmanagerdata.VDiffOptions + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse * @static - * @param {tabletmanagerdata.IVDiffOptions} message VDiffOptions message or plain object to encode + * @param {tabletmanagerdata.IReadVReplicationWorkflowResponse} message ReadVReplicationWorkflowResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - VDiffOptions.encodeDelimited = function encodeDelimited(message, writer) { + ReadVReplicationWorkflowResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a VDiffOptions message from the specified reader or buffer. + * Decodes a ReadVReplicationWorkflowResponse message from the specified reader or buffer. * @function decode - * @memberof tabletmanagerdata.VDiffOptions + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {tabletmanagerdata.VDiffOptions} VDiffOptions + * @returns {tabletmanagerdata.ReadVReplicationWorkflowResponse} ReadVReplicationWorkflowResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - VDiffOptions.decode = function decode(reader, length) { + ReadVReplicationWorkflowResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.tabletmanagerdata.VDiffOptions(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.tabletmanagerdata.ReadVReplicationWorkflowResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - message.picker_options = $root.tabletmanagerdata.VDiffPickerOptions.decode(reader, reader.uint32()); - break; - } case 2: { - message.core_options = $root.tabletmanagerdata.VDiffCoreOptions.decode(reader, reader.uint32()); + message.workflow = reader.string(); break; } case 3: { - message.report_options = $root.tabletmanagerdata.VDiffReportOptions.decode(reader, reader.uint32()); + message.cells = reader.string(); + break; + } + case 4: { + if (!(message.tablet_types && message.tablet_types.length)) + message.tablet_types = []; + if ((tag & 7) === 2) { + let end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.tablet_types.push(reader.int32()); + } else + message.tablet_types.push(reader.int32()); + break; + } + case 5: { + message.tablet_selection_preference = reader.int32(); + break; + } + case 6: { + message.db_name = reader.string(); + break; + } + case 7: { + message.tags = reader.string(); + break; + } + case 8: { + message.workflow_type = reader.int32(); + break; + } + case 9: { + message.workflow_sub_type = reader.int32(); + break; + } + case 10: { + message.defer_secondary_keys = reader.bool(); + break; + } + case 11: { + if (!(message.streams && message.streams.length)) + message.streams = []; + message.streams.push($root.tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.decode(reader, reader.uint32())); break; } default: @@ -60513,712 +61989,996 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { }; /** - * Decodes a VDiffOptions message from the specified reader or buffer, length delimited. + * Decodes a ReadVReplicationWorkflowResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof tabletmanagerdata.VDiffOptions + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {tabletmanagerdata.VDiffOptions} VDiffOptions + * @returns {tabletmanagerdata.ReadVReplicationWorkflowResponse} ReadVReplicationWorkflowResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - VDiffOptions.decodeDelimited = function decodeDelimited(reader) { + ReadVReplicationWorkflowResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a VDiffOptions message. + * Verifies a ReadVReplicationWorkflowResponse message. * @function verify - * @memberof tabletmanagerdata.VDiffOptions + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - VDiffOptions.verify = function verify(message) { + ReadVReplicationWorkflowResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.picker_options != null && message.hasOwnProperty("picker_options")) { - let error = $root.tabletmanagerdata.VDiffPickerOptions.verify(message.picker_options); - if (error) - return "picker_options." + error; - } - if (message.core_options != null && message.hasOwnProperty("core_options")) { - let error = $root.tabletmanagerdata.VDiffCoreOptions.verify(message.core_options); - if (error) - return "core_options." + error; - } - if (message.report_options != null && message.hasOwnProperty("report_options")) { - let error = $root.tabletmanagerdata.VDiffReportOptions.verify(message.report_options); - if (error) - return "report_options." + error; + if (message.workflow != null && message.hasOwnProperty("workflow")) + if (!$util.isString(message.workflow)) + return "workflow: string expected"; + if (message.cells != null && message.hasOwnProperty("cells")) + if (!$util.isString(message.cells)) + return "cells: string expected"; + if (message.tablet_types != null && message.hasOwnProperty("tablet_types")) { + if (!Array.isArray(message.tablet_types)) + return "tablet_types: array expected"; + for (let i = 0; i < message.tablet_types.length; ++i) + switch (message.tablet_types[i]) { + default: + return "tablet_types: enum value[] expected"; + case 0: + case 1: + case 1: + case 2: + case 3: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + break; + } } - return null; - }; - - /** - * Creates a VDiffOptions message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof tabletmanagerdata.VDiffOptions - * @static - * @param {Object.} object Plain object - * @returns {tabletmanagerdata.VDiffOptions} VDiffOptions - */ - VDiffOptions.fromObject = function fromObject(object) { - if (object instanceof $root.tabletmanagerdata.VDiffOptions) - return object; - let message = new $root.tabletmanagerdata.VDiffOptions(); - if (object.picker_options != null) { - if (typeof object.picker_options !== "object") - throw TypeError(".tabletmanagerdata.VDiffOptions.picker_options: object expected"); - message.picker_options = $root.tabletmanagerdata.VDiffPickerOptions.fromObject(object.picker_options); - } - if (object.core_options != null) { - if (typeof object.core_options !== "object") - throw TypeError(".tabletmanagerdata.VDiffOptions.core_options: object expected"); - message.core_options = $root.tabletmanagerdata.VDiffCoreOptions.fromObject(object.core_options); - } - if (object.report_options != null) { - if (typeof object.report_options !== "object") - throw TypeError(".tabletmanagerdata.VDiffOptions.report_options: object expected"); - message.report_options = $root.tabletmanagerdata.VDiffReportOptions.fromObject(object.report_options); - } - return message; - }; - - /** - * Creates a plain object from a VDiffOptions message. Also converts values to other types if specified. - * @function toObject - * @memberof tabletmanagerdata.VDiffOptions - * @static - * @param {tabletmanagerdata.VDiffOptions} message VDiffOptions - * @param {$protobuf.IConversionOptions} [options] Conversion options - * @returns {Object.} Plain object - */ - VDiffOptions.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) { - object.picker_options = null; - object.core_options = null; - object.report_options = null; - } - if (message.picker_options != null && message.hasOwnProperty("picker_options")) - object.picker_options = $root.tabletmanagerdata.VDiffPickerOptions.toObject(message.picker_options, options); - if (message.core_options != null && message.hasOwnProperty("core_options")) - object.core_options = $root.tabletmanagerdata.VDiffCoreOptions.toObject(message.core_options, options); - if (message.report_options != null && message.hasOwnProperty("report_options")) - object.report_options = $root.tabletmanagerdata.VDiffReportOptions.toObject(message.report_options, options); - return object; - }; - - /** - * Converts this VDiffOptions to JSON. - * @function toJSON - * @memberof tabletmanagerdata.VDiffOptions - * @instance - * @returns {Object.} JSON object - */ - VDiffOptions.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; - - /** - * Gets the default type url for VDiffOptions - * @function getTypeUrl - * @memberof tabletmanagerdata.VDiffOptions - * @static - * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns {string} The default type url - */ - VDiffOptions.getTypeUrl = function getTypeUrl(typeUrlPrefix) { - if (typeUrlPrefix === undefined) { - typeUrlPrefix = "type.googleapis.com"; - } - return typeUrlPrefix + "/tabletmanagerdata.VDiffOptions"; - }; - - return VDiffOptions; - })(); - - tabletmanagerdata.UpdateVRWorkflowRequest = (function() { - - /** - * Properties of an UpdateVRWorkflowRequest. - * @memberof tabletmanagerdata - * @interface IUpdateVRWorkflowRequest - * @property {string|null} [workflow] UpdateVRWorkflowRequest workflow - * @property {Array.|null} [cells] UpdateVRWorkflowRequest cells - * @property {Array.|null} [tablet_types] UpdateVRWorkflowRequest tablet_types - * @property {binlogdata.OnDDLAction|null} [on_ddl] UpdateVRWorkflowRequest on_ddl - */ - - /** - * Constructs a new UpdateVRWorkflowRequest. - * @memberof tabletmanagerdata - * @classdesc Represents an UpdateVRWorkflowRequest. - * @implements IUpdateVRWorkflowRequest - * @constructor - * @param {tabletmanagerdata.IUpdateVRWorkflowRequest=} [properties] Properties to set - */ - function UpdateVRWorkflowRequest(properties) { - this.cells = []; - this.tablet_types = []; - if (properties) - for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; - } - - /** - * UpdateVRWorkflowRequest workflow. - * @member {string} workflow - * @memberof tabletmanagerdata.UpdateVRWorkflowRequest - * @instance - */ - UpdateVRWorkflowRequest.prototype.workflow = ""; - - /** - * UpdateVRWorkflowRequest cells. - * @member {Array.} cells - * @memberof tabletmanagerdata.UpdateVRWorkflowRequest - * @instance - */ - UpdateVRWorkflowRequest.prototype.cells = $util.emptyArray; - - /** - * UpdateVRWorkflowRequest tablet_types. - * @member {Array.} tablet_types - * @memberof tabletmanagerdata.UpdateVRWorkflowRequest - * @instance - */ - UpdateVRWorkflowRequest.prototype.tablet_types = $util.emptyArray; - - /** - * UpdateVRWorkflowRequest on_ddl. - * @member {binlogdata.OnDDLAction} on_ddl - * @memberof tabletmanagerdata.UpdateVRWorkflowRequest - * @instance - */ - UpdateVRWorkflowRequest.prototype.on_ddl = 0; - - /** - * Creates a new UpdateVRWorkflowRequest instance using the specified properties. - * @function create - * @memberof tabletmanagerdata.UpdateVRWorkflowRequest - * @static - * @param {tabletmanagerdata.IUpdateVRWorkflowRequest=} [properties] Properties to set - * @returns {tabletmanagerdata.UpdateVRWorkflowRequest} UpdateVRWorkflowRequest instance - */ - UpdateVRWorkflowRequest.create = function create(properties) { - return new UpdateVRWorkflowRequest(properties); - }; - - /** - * Encodes the specified UpdateVRWorkflowRequest message. Does not implicitly {@link tabletmanagerdata.UpdateVRWorkflowRequest.verify|verify} messages. - * @function encode - * @memberof tabletmanagerdata.UpdateVRWorkflowRequest - * @static - * @param {tabletmanagerdata.IUpdateVRWorkflowRequest} message UpdateVRWorkflowRequest message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - UpdateVRWorkflowRequest.encode = function encode(message, writer) { - if (!writer) - writer = $Writer.create(); - if (message.workflow != null && Object.hasOwnProperty.call(message, "workflow")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.workflow); - if (message.cells != null && message.cells.length) - for (let i = 0; i < message.cells.length; ++i) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.cells[i]); - if (message.tablet_types != null && message.tablet_types.length) - for (let i = 0; i < message.tablet_types.length; ++i) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.tablet_types[i]); - if (message.on_ddl != null && Object.hasOwnProperty.call(message, "on_ddl")) - writer.uint32(/* id 4, wireType 0 =*/32).int32(message.on_ddl); - return writer; - }; - - /** - * Encodes the specified UpdateVRWorkflowRequest message, length delimited. Does not implicitly {@link tabletmanagerdata.UpdateVRWorkflowRequest.verify|verify} messages. - * @function encodeDelimited - * @memberof tabletmanagerdata.UpdateVRWorkflowRequest - * @static - * @param {tabletmanagerdata.IUpdateVRWorkflowRequest} message UpdateVRWorkflowRequest message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - UpdateVRWorkflowRequest.encodeDelimited = function encodeDelimited(message, writer) { - return this.encode(message, writer).ldelim(); - }; - - /** - * Decodes an UpdateVRWorkflowRequest message from the specified reader or buffer. - * @function decode - * @memberof tabletmanagerdata.UpdateVRWorkflowRequest - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @param {number} [length] Message length if known beforehand - * @returns {tabletmanagerdata.UpdateVRWorkflowRequest} UpdateVRWorkflowRequest - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - UpdateVRWorkflowRequest.decode = function decode(reader, length) { - if (!(reader instanceof $Reader)) - reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.tabletmanagerdata.UpdateVRWorkflowRequest(); - while (reader.pos < end) { - let tag = reader.uint32(); - switch (tag >>> 3) { - case 1: { - message.workflow = reader.string(); - break; - } - case 2: { - if (!(message.cells && message.cells.length)) - message.cells = []; - message.cells.push(reader.string()); - break; - } - case 3: { - if (!(message.tablet_types && message.tablet_types.length)) - message.tablet_types = []; - message.tablet_types.push(reader.string()); - break; - } - case 4: { - message.on_ddl = reader.int32(); - break; - } + if (message.tablet_selection_preference != null && message.hasOwnProperty("tablet_selection_preference")) + switch (message.tablet_selection_preference) { default: - reader.skipType(tag & 7); + return "tablet_selection_preference: enum value expected"; + case 0: + case 1: + case 3: break; } - } - return message; - }; - - /** - * Decodes an UpdateVRWorkflowRequest message from the specified reader or buffer, length delimited. - * @function decodeDelimited - * @memberof tabletmanagerdata.UpdateVRWorkflowRequest - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {tabletmanagerdata.UpdateVRWorkflowRequest} UpdateVRWorkflowRequest - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - UpdateVRWorkflowRequest.decodeDelimited = function decodeDelimited(reader) { - if (!(reader instanceof $Reader)) - reader = new $Reader(reader); - return this.decode(reader, reader.uint32()); - }; - - /** - * Verifies an UpdateVRWorkflowRequest message. - * @function verify - * @memberof tabletmanagerdata.UpdateVRWorkflowRequest - * @static - * @param {Object.} message Plain object to verify - * @returns {string|null} `null` if valid, otherwise the reason why it is not - */ - UpdateVRWorkflowRequest.verify = function verify(message) { - if (typeof message !== "object" || message === null) - return "object expected"; - if (message.workflow != null && message.hasOwnProperty("workflow")) - if (!$util.isString(message.workflow)) - return "workflow: string expected"; - if (message.cells != null && message.hasOwnProperty("cells")) { - if (!Array.isArray(message.cells)) - return "cells: array expected"; - for (let i = 0; i < message.cells.length; ++i) - if (!$util.isString(message.cells[i])) - return "cells: string[] expected"; - } - if (message.tablet_types != null && message.hasOwnProperty("tablet_types")) { - if (!Array.isArray(message.tablet_types)) - return "tablet_types: array expected"; - for (let i = 0; i < message.tablet_types.length; ++i) - if (!$util.isString(message.tablet_types[i])) - return "tablet_types: string[] expected"; - } - if (message.on_ddl != null && message.hasOwnProperty("on_ddl")) - switch (message.on_ddl) { + if (message.db_name != null && message.hasOwnProperty("db_name")) + if (!$util.isString(message.db_name)) + return "db_name: string expected"; + if (message.tags != null && message.hasOwnProperty("tags")) + if (!$util.isString(message.tags)) + return "tags: string expected"; + if (message.workflow_type != null && message.hasOwnProperty("workflow_type")) + switch (message.workflow_type) { default: - return "on_ddl: enum value expected"; + return "workflow_type: enum value expected"; case 0: case 1: case 2: case 3: + case 4: + case 5: + break; + } + if (message.workflow_sub_type != null && message.hasOwnProperty("workflow_sub_type")) + switch (message.workflow_sub_type) { + default: + return "workflow_sub_type: enum value expected"; + case 0: + case 1: + case 2: break; } + if (message.defer_secondary_keys != null && message.hasOwnProperty("defer_secondary_keys")) + if (typeof message.defer_secondary_keys !== "boolean") + return "defer_secondary_keys: boolean expected"; + if (message.streams != null && message.hasOwnProperty("streams")) { + if (!Array.isArray(message.streams)) + return "streams: array expected"; + for (let i = 0; i < message.streams.length; ++i) { + let error = $root.tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.verify(message.streams[i]); + if (error) + return "streams." + error; + } + } return null; }; /** - * Creates an UpdateVRWorkflowRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ReadVReplicationWorkflowResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof tabletmanagerdata.UpdateVRWorkflowRequest + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse * @static * @param {Object.} object Plain object - * @returns {tabletmanagerdata.UpdateVRWorkflowRequest} UpdateVRWorkflowRequest + * @returns {tabletmanagerdata.ReadVReplicationWorkflowResponse} ReadVReplicationWorkflowResponse */ - UpdateVRWorkflowRequest.fromObject = function fromObject(object) { - if (object instanceof $root.tabletmanagerdata.UpdateVRWorkflowRequest) + ReadVReplicationWorkflowResponse.fromObject = function fromObject(object) { + if (object instanceof $root.tabletmanagerdata.ReadVReplicationWorkflowResponse) return object; - let message = new $root.tabletmanagerdata.UpdateVRWorkflowRequest(); + let message = new $root.tabletmanagerdata.ReadVReplicationWorkflowResponse(); if (object.workflow != null) message.workflow = String(object.workflow); - if (object.cells) { - if (!Array.isArray(object.cells)) - throw TypeError(".tabletmanagerdata.UpdateVRWorkflowRequest.cells: array expected"); - message.cells = []; - for (let i = 0; i < object.cells.length; ++i) - message.cells[i] = String(object.cells[i]); - } + if (object.cells != null) + message.cells = String(object.cells); if (object.tablet_types) { if (!Array.isArray(object.tablet_types)) - throw TypeError(".tabletmanagerdata.UpdateVRWorkflowRequest.tablet_types: array expected"); + throw TypeError(".tabletmanagerdata.ReadVReplicationWorkflowResponse.tablet_types: array expected"); message.tablet_types = []; for (let i = 0; i < object.tablet_types.length; ++i) - message.tablet_types[i] = String(object.tablet_types[i]); + switch (object.tablet_types[i]) { + default: + if (typeof object.tablet_types[i] === "number") { + message.tablet_types[i] = object.tablet_types[i]; + break; + } + case "UNKNOWN": + case 0: + message.tablet_types[i] = 0; + break; + case "PRIMARY": + case 1: + message.tablet_types[i] = 1; + break; + case "MASTER": + case 1: + message.tablet_types[i] = 1; + break; + case "REPLICA": + case 2: + message.tablet_types[i] = 2; + break; + case "RDONLY": + case 3: + message.tablet_types[i] = 3; + break; + case "BATCH": + case 3: + message.tablet_types[i] = 3; + break; + case "SPARE": + case 4: + message.tablet_types[i] = 4; + break; + case "EXPERIMENTAL": + case 5: + message.tablet_types[i] = 5; + break; + case "BACKUP": + case 6: + message.tablet_types[i] = 6; + break; + case "RESTORE": + case 7: + message.tablet_types[i] = 7; + break; + case "DRAINED": + case 8: + message.tablet_types[i] = 8; + break; + } } - switch (object.on_ddl) { + switch (object.tablet_selection_preference) { default: - if (typeof object.on_ddl === "number") { - message.on_ddl = object.on_ddl; + if (typeof object.tablet_selection_preference === "number") { + message.tablet_selection_preference = object.tablet_selection_preference; break; } break; - case "IGNORE": + case "ANY": case 0: - message.on_ddl = 0; + message.tablet_selection_preference = 0; break; - case "STOP": + case "INORDER": case 1: - message.on_ddl = 1; + message.tablet_selection_preference = 1; break; - case "EXEC": + case "UNKNOWN": + case 3: + message.tablet_selection_preference = 3; + break; + } + if (object.db_name != null) + message.db_name = String(object.db_name); + if (object.tags != null) + message.tags = String(object.tags); + switch (object.workflow_type) { + default: + if (typeof object.workflow_type === "number") { + message.workflow_type = object.workflow_type; + break; + } + break; + case "Materialize": + case 0: + message.workflow_type = 0; + break; + case "MoveTables": + case 1: + message.workflow_type = 1; + break; + case "CreateLookupIndex": case 2: - message.on_ddl = 2; + message.workflow_type = 2; break; - case "EXEC_IGNORE": + case "Migrate": case 3: - message.on_ddl = 3; + message.workflow_type = 3; + break; + case "Reshard": + case 4: + message.workflow_type = 4; + break; + case "OnlineDDL": + case 5: + message.workflow_type = 5; + break; + } + switch (object.workflow_sub_type) { + default: + if (typeof object.workflow_sub_type === "number") { + message.workflow_sub_type = object.workflow_sub_type; + break; + } + break; + case "None": + case 0: + message.workflow_sub_type = 0; + break; + case "Partial": + case 1: + message.workflow_sub_type = 1; + break; + case "AtomicCopy": + case 2: + message.workflow_sub_type = 2; break; } + if (object.defer_secondary_keys != null) + message.defer_secondary_keys = Boolean(object.defer_secondary_keys); + if (object.streams) { + if (!Array.isArray(object.streams)) + throw TypeError(".tabletmanagerdata.ReadVReplicationWorkflowResponse.streams: array expected"); + message.streams = []; + for (let i = 0; i < object.streams.length; ++i) { + if (typeof object.streams[i] !== "object") + throw TypeError(".tabletmanagerdata.ReadVReplicationWorkflowResponse.streams: object expected"); + message.streams[i] = $root.tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.fromObject(object.streams[i]); + } + } return message; }; /** - * Creates a plain object from an UpdateVRWorkflowRequest message. Also converts values to other types if specified. + * Creates a plain object from a ReadVReplicationWorkflowResponse message. Also converts values to other types if specified. * @function toObject - * @memberof tabletmanagerdata.UpdateVRWorkflowRequest + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse * @static - * @param {tabletmanagerdata.UpdateVRWorkflowRequest} message UpdateVRWorkflowRequest + * @param {tabletmanagerdata.ReadVReplicationWorkflowResponse} message ReadVReplicationWorkflowResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - UpdateVRWorkflowRequest.toObject = function toObject(message, options) { + ReadVReplicationWorkflowResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.arrays || options.defaults) { - object.cells = []; object.tablet_types = []; + object.streams = []; } if (options.defaults) { object.workflow = ""; - object.on_ddl = options.enums === String ? "IGNORE" : 0; + object.cells = ""; + object.tablet_selection_preference = options.enums === String ? "ANY" : 0; + object.db_name = ""; + object.tags = ""; + object.workflow_type = options.enums === String ? "Materialize" : 0; + object.workflow_sub_type = options.enums === String ? "None" : 0; + object.defer_secondary_keys = false; } if (message.workflow != null && message.hasOwnProperty("workflow")) object.workflow = message.workflow; - if (message.cells && message.cells.length) { - object.cells = []; - for (let j = 0; j < message.cells.length; ++j) - object.cells[j] = message.cells[j]; - } + if (message.cells != null && message.hasOwnProperty("cells")) + object.cells = message.cells; if (message.tablet_types && message.tablet_types.length) { object.tablet_types = []; for (let j = 0; j < message.tablet_types.length; ++j) - object.tablet_types[j] = message.tablet_types[j]; + object.tablet_types[j] = options.enums === String ? $root.topodata.TabletType[message.tablet_types[j]] === undefined ? message.tablet_types[j] : $root.topodata.TabletType[message.tablet_types[j]] : message.tablet_types[j]; + } + if (message.tablet_selection_preference != null && message.hasOwnProperty("tablet_selection_preference")) + object.tablet_selection_preference = options.enums === String ? $root.tabletmanagerdata.TabletSelectionPreference[message.tablet_selection_preference] === undefined ? message.tablet_selection_preference : $root.tabletmanagerdata.TabletSelectionPreference[message.tablet_selection_preference] : message.tablet_selection_preference; + if (message.db_name != null && message.hasOwnProperty("db_name")) + object.db_name = message.db_name; + if (message.tags != null && message.hasOwnProperty("tags")) + object.tags = message.tags; + if (message.workflow_type != null && message.hasOwnProperty("workflow_type")) + object.workflow_type = options.enums === String ? $root.binlogdata.VReplicationWorkflowType[message.workflow_type] === undefined ? message.workflow_type : $root.binlogdata.VReplicationWorkflowType[message.workflow_type] : message.workflow_type; + if (message.workflow_sub_type != null && message.hasOwnProperty("workflow_sub_type")) + object.workflow_sub_type = options.enums === String ? $root.binlogdata.VReplicationWorkflowSubType[message.workflow_sub_type] === undefined ? message.workflow_sub_type : $root.binlogdata.VReplicationWorkflowSubType[message.workflow_sub_type] : message.workflow_sub_type; + if (message.defer_secondary_keys != null && message.hasOwnProperty("defer_secondary_keys")) + object.defer_secondary_keys = message.defer_secondary_keys; + if (message.streams && message.streams.length) { + object.streams = []; + for (let j = 0; j < message.streams.length; ++j) + object.streams[j] = $root.tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.toObject(message.streams[j], options); } - if (message.on_ddl != null && message.hasOwnProperty("on_ddl")) - object.on_ddl = options.enums === String ? $root.binlogdata.OnDDLAction[message.on_ddl] === undefined ? message.on_ddl : $root.binlogdata.OnDDLAction[message.on_ddl] : message.on_ddl; return object; }; /** - * Converts this UpdateVRWorkflowRequest to JSON. + * Converts this ReadVReplicationWorkflowResponse to JSON. * @function toJSON - * @memberof tabletmanagerdata.UpdateVRWorkflowRequest + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse * @instance * @returns {Object.} JSON object */ - UpdateVRWorkflowRequest.prototype.toJSON = function toJSON() { + ReadVReplicationWorkflowResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for UpdateVRWorkflowRequest + * Gets the default type url for ReadVReplicationWorkflowResponse * @function getTypeUrl - * @memberof tabletmanagerdata.UpdateVRWorkflowRequest + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - UpdateVRWorkflowRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ReadVReplicationWorkflowResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/tabletmanagerdata.UpdateVRWorkflowRequest"; + return typeUrlPrefix + "/tabletmanagerdata.ReadVReplicationWorkflowResponse"; }; - return UpdateVRWorkflowRequest; - })(); - - tabletmanagerdata.UpdateVRWorkflowResponse = (function() { + ReadVReplicationWorkflowResponse.Stream = (function() { - /** - * Properties of an UpdateVRWorkflowResponse. - * @memberof tabletmanagerdata - * @interface IUpdateVRWorkflowResponse - * @property {query.IQueryResult|null} [result] UpdateVRWorkflowResponse result - */ + /** + * Properties of a Stream. + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse + * @interface IStream + * @property {number|null} [id] Stream id + * @property {binlogdata.IBinlogSource|null} [bls] Stream bls + * @property {string|null} [pos] Stream pos + * @property {string|null} [stop_pos] Stream stop_pos + * @property {number|Long|null} [max_tps] Stream max_tps + * @property {number|Long|null} [max_replication_lag] Stream max_replication_lag + * @property {vttime.ITime|null} [time_updated] Stream time_updated + * @property {vttime.ITime|null} [transaction_timestamp] Stream transaction_timestamp + * @property {binlogdata.VReplicationWorkflowState|null} [state] Stream state + * @property {string|null} [message] Stream message + * @property {number|Long|null} [rows_copied] Stream rows_copied + * @property {vttime.ITime|null} [time_heartbeat] Stream time_heartbeat + * @property {vttime.ITime|null} [time_throttled] Stream time_throttled + * @property {string|null} [component_throttled] Stream component_throttled + */ - /** - * Constructs a new UpdateVRWorkflowResponse. - * @memberof tabletmanagerdata - * @classdesc Represents an UpdateVRWorkflowResponse. - * @implements IUpdateVRWorkflowResponse - * @constructor - * @param {tabletmanagerdata.IUpdateVRWorkflowResponse=} [properties] Properties to set - */ - function UpdateVRWorkflowResponse(properties) { - if (properties) - for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; - } + /** + * Constructs a new Stream. + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse + * @classdesc Represents a Stream. + * @implements IStream + * @constructor + * @param {tabletmanagerdata.ReadVReplicationWorkflowResponse.IStream=} [properties] Properties to set + */ + function Stream(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } - /** - * UpdateVRWorkflowResponse result. - * @member {query.IQueryResult|null|undefined} result - * @memberof tabletmanagerdata.UpdateVRWorkflowResponse - * @instance - */ - UpdateVRWorkflowResponse.prototype.result = null; + /** + * Stream id. + * @member {number} id + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream + * @instance + */ + Stream.prototype.id = 0; - /** - * Creates a new UpdateVRWorkflowResponse instance using the specified properties. - * @function create - * @memberof tabletmanagerdata.UpdateVRWorkflowResponse - * @static - * @param {tabletmanagerdata.IUpdateVRWorkflowResponse=} [properties] Properties to set - * @returns {tabletmanagerdata.UpdateVRWorkflowResponse} UpdateVRWorkflowResponse instance - */ - UpdateVRWorkflowResponse.create = function create(properties) { - return new UpdateVRWorkflowResponse(properties); - }; + /** + * Stream bls. + * @member {binlogdata.IBinlogSource|null|undefined} bls + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream + * @instance + */ + Stream.prototype.bls = null; - /** - * Encodes the specified UpdateVRWorkflowResponse message. Does not implicitly {@link tabletmanagerdata.UpdateVRWorkflowResponse.verify|verify} messages. - * @function encode - * @memberof tabletmanagerdata.UpdateVRWorkflowResponse - * @static - * @param {tabletmanagerdata.IUpdateVRWorkflowResponse} message UpdateVRWorkflowResponse message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - UpdateVRWorkflowResponse.encode = function encode(message, writer) { - if (!writer) - writer = $Writer.create(); - if (message.result != null && Object.hasOwnProperty.call(message, "result")) - $root.query.QueryResult.encode(message.result, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - return writer; - }; + /** + * Stream pos. + * @member {string} pos + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream + * @instance + */ + Stream.prototype.pos = ""; - /** - * Encodes the specified UpdateVRWorkflowResponse message, length delimited. Does not implicitly {@link tabletmanagerdata.UpdateVRWorkflowResponse.verify|verify} messages. - * @function encodeDelimited - * @memberof tabletmanagerdata.UpdateVRWorkflowResponse - * @static - * @param {tabletmanagerdata.IUpdateVRWorkflowResponse} message UpdateVRWorkflowResponse message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - UpdateVRWorkflowResponse.encodeDelimited = function encodeDelimited(message, writer) { - return this.encode(message, writer).ldelim(); - }; + /** + * Stream stop_pos. + * @member {string} stop_pos + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream + * @instance + */ + Stream.prototype.stop_pos = ""; - /** - * Decodes an UpdateVRWorkflowResponse message from the specified reader or buffer. - * @function decode - * @memberof tabletmanagerdata.UpdateVRWorkflowResponse - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @param {number} [length] Message length if known beforehand - * @returns {tabletmanagerdata.UpdateVRWorkflowResponse} UpdateVRWorkflowResponse - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - UpdateVRWorkflowResponse.decode = function decode(reader, length) { - if (!(reader instanceof $Reader)) - reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.tabletmanagerdata.UpdateVRWorkflowResponse(); - while (reader.pos < end) { - let tag = reader.uint32(); - switch (tag >>> 3) { - case 1: { - message.result = $root.query.QueryResult.decode(reader, reader.uint32()); - break; - } - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }; + /** + * Stream max_tps. + * @member {number|Long} max_tps + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream + * @instance + */ + Stream.prototype.max_tps = $util.Long ? $util.Long.fromBits(0,0,false) : 0; - /** - * Decodes an UpdateVRWorkflowResponse message from the specified reader or buffer, length delimited. - * @function decodeDelimited - * @memberof tabletmanagerdata.UpdateVRWorkflowResponse - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {tabletmanagerdata.UpdateVRWorkflowResponse} UpdateVRWorkflowResponse - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - UpdateVRWorkflowResponse.decodeDelimited = function decodeDelimited(reader) { - if (!(reader instanceof $Reader)) - reader = new $Reader(reader); - return this.decode(reader, reader.uint32()); - }; + /** + * Stream max_replication_lag. + * @member {number|Long} max_replication_lag + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream + * @instance + */ + Stream.prototype.max_replication_lag = $util.Long ? $util.Long.fromBits(0,0,false) : 0; - /** - * Verifies an UpdateVRWorkflowResponse message. - * @function verify - * @memberof tabletmanagerdata.UpdateVRWorkflowResponse - * @static - * @param {Object.} message Plain object to verify - * @returns {string|null} `null` if valid, otherwise the reason why it is not - */ - UpdateVRWorkflowResponse.verify = function verify(message) { - if (typeof message !== "object" || message === null) - return "object expected"; - if (message.result != null && message.hasOwnProperty("result")) { - let error = $root.query.QueryResult.verify(message.result); - if (error) - return "result." + error; - } - return null; - }; + /** + * Stream time_updated. + * @member {vttime.ITime|null|undefined} time_updated + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream + * @instance + */ + Stream.prototype.time_updated = null; - /** - * Creates an UpdateVRWorkflowResponse message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof tabletmanagerdata.UpdateVRWorkflowResponse - * @static - * @param {Object.} object Plain object - * @returns {tabletmanagerdata.UpdateVRWorkflowResponse} UpdateVRWorkflowResponse - */ - UpdateVRWorkflowResponse.fromObject = function fromObject(object) { - if (object instanceof $root.tabletmanagerdata.UpdateVRWorkflowResponse) - return object; - let message = new $root.tabletmanagerdata.UpdateVRWorkflowResponse(); - if (object.result != null) { - if (typeof object.result !== "object") - throw TypeError(".tabletmanagerdata.UpdateVRWorkflowResponse.result: object expected"); - message.result = $root.query.QueryResult.fromObject(object.result); - } - return message; - }; + /** + * Stream transaction_timestamp. + * @member {vttime.ITime|null|undefined} transaction_timestamp + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream + * @instance + */ + Stream.prototype.transaction_timestamp = null; - /** - * Creates a plain object from an UpdateVRWorkflowResponse message. Also converts values to other types if specified. - * @function toObject - * @memberof tabletmanagerdata.UpdateVRWorkflowResponse - * @static - * @param {tabletmanagerdata.UpdateVRWorkflowResponse} message UpdateVRWorkflowResponse - * @param {$protobuf.IConversionOptions} [options] Conversion options - * @returns {Object.} Plain object - */ - UpdateVRWorkflowResponse.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) - object.result = null; - if (message.result != null && message.hasOwnProperty("result")) - object.result = $root.query.QueryResult.toObject(message.result, options); - return object; - }; + /** + * Stream state. + * @member {binlogdata.VReplicationWorkflowState} state + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream + * @instance + */ + Stream.prototype.state = 0; - /** - * Converts this UpdateVRWorkflowResponse to JSON. - * @function toJSON - * @memberof tabletmanagerdata.UpdateVRWorkflowResponse - * @instance - * @returns {Object.} JSON object - */ - UpdateVRWorkflowResponse.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; + /** + * Stream message. + * @member {string} message + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream + * @instance + */ + Stream.prototype.message = ""; - /** - * Gets the default type url for UpdateVRWorkflowResponse - * @function getTypeUrl - * @memberof tabletmanagerdata.UpdateVRWorkflowResponse - * @static - * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns {string} The default type url - */ - UpdateVRWorkflowResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { - if (typeUrlPrefix === undefined) { - typeUrlPrefix = "type.googleapis.com"; - } - return typeUrlPrefix + "/tabletmanagerdata.UpdateVRWorkflowResponse"; - }; + /** + * Stream rows_copied. + * @member {number|Long} rows_copied + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream + * @instance + */ + Stream.prototype.rows_copied = $util.Long ? $util.Long.fromBits(0,0,false) : 0; - return UpdateVRWorkflowResponse; - })(); + /** + * Stream time_heartbeat. + * @member {vttime.ITime|null|undefined} time_heartbeat + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream + * @instance + */ + Stream.prototype.time_heartbeat = null; - return tabletmanagerdata; -})(); + /** + * Stream time_throttled. + * @member {vttime.ITime|null|undefined} time_throttled + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream + * @instance + */ + Stream.prototype.time_throttled = null; -export const binlogdata = $root.binlogdata = (() => { + /** + * Stream component_throttled. + * @member {string} component_throttled + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream + * @instance + */ + Stream.prototype.component_throttled = ""; - /** - * Namespace binlogdata. - * @exports binlogdata - * @namespace - */ - const binlogdata = {}; + /** + * Creates a new Stream instance using the specified properties. + * @function create + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream + * @static + * @param {tabletmanagerdata.ReadVReplicationWorkflowResponse.IStream=} [properties] Properties to set + * @returns {tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream} Stream instance + */ + Stream.create = function create(properties) { + return new Stream(properties); + }; - binlogdata.Charset = (function() { + /** + * Encodes the specified Stream message. Does not implicitly {@link tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.verify|verify} messages. + * @function encode + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream + * @static + * @param {tabletmanagerdata.ReadVReplicationWorkflowResponse.IStream} message Stream message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Stream.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.id != null && Object.hasOwnProperty.call(message, "id")) + writer.uint32(/* id 1, wireType 0 =*/8).int32(message.id); + if (message.bls != null && Object.hasOwnProperty.call(message, "bls")) + $root.binlogdata.BinlogSource.encode(message.bls, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.pos != null && Object.hasOwnProperty.call(message, "pos")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.pos); + if (message.stop_pos != null && Object.hasOwnProperty.call(message, "stop_pos")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.stop_pos); + if (message.max_tps != null && Object.hasOwnProperty.call(message, "max_tps")) + writer.uint32(/* id 5, wireType 0 =*/40).int64(message.max_tps); + if (message.max_replication_lag != null && Object.hasOwnProperty.call(message, "max_replication_lag")) + writer.uint32(/* id 6, wireType 0 =*/48).int64(message.max_replication_lag); + if (message.time_updated != null && Object.hasOwnProperty.call(message, "time_updated")) + $root.vttime.Time.encode(message.time_updated, writer.uint32(/* id 7, wireType 2 =*/58).fork()).ldelim(); + if (message.transaction_timestamp != null && Object.hasOwnProperty.call(message, "transaction_timestamp")) + $root.vttime.Time.encode(message.transaction_timestamp, writer.uint32(/* id 8, wireType 2 =*/66).fork()).ldelim(); + if (message.state != null && Object.hasOwnProperty.call(message, "state")) + writer.uint32(/* id 9, wireType 0 =*/72).int32(message.state); + if (message.message != null && Object.hasOwnProperty.call(message, "message")) + writer.uint32(/* id 10, wireType 2 =*/82).string(message.message); + if (message.rows_copied != null && Object.hasOwnProperty.call(message, "rows_copied")) + writer.uint32(/* id 11, wireType 0 =*/88).int64(message.rows_copied); + if (message.time_heartbeat != null && Object.hasOwnProperty.call(message, "time_heartbeat")) + $root.vttime.Time.encode(message.time_heartbeat, writer.uint32(/* id 12, wireType 2 =*/98).fork()).ldelim(); + if (message.time_throttled != null && Object.hasOwnProperty.call(message, "time_throttled")) + $root.vttime.Time.encode(message.time_throttled, writer.uint32(/* id 13, wireType 2 =*/106).fork()).ldelim(); + if (message.component_throttled != null && Object.hasOwnProperty.call(message, "component_throttled")) + writer.uint32(/* id 14, wireType 2 =*/114).string(message.component_throttled); + return writer; + }; + + /** + * Encodes the specified Stream message, length delimited. Does not implicitly {@link tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.verify|verify} messages. + * @function encodeDelimited + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream + * @static + * @param {tabletmanagerdata.ReadVReplicationWorkflowResponse.IStream} message Stream message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Stream.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a Stream message from the specified reader or buffer. + * @function decode + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream} Stream + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Stream.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.id = reader.int32(); + break; + } + case 2: { + message.bls = $root.binlogdata.BinlogSource.decode(reader, reader.uint32()); + break; + } + case 3: { + message.pos = reader.string(); + break; + } + case 4: { + message.stop_pos = reader.string(); + break; + } + case 5: { + message.max_tps = reader.int64(); + break; + } + case 6: { + message.max_replication_lag = reader.int64(); + break; + } + case 7: { + message.time_updated = $root.vttime.Time.decode(reader, reader.uint32()); + break; + } + case 8: { + message.transaction_timestamp = $root.vttime.Time.decode(reader, reader.uint32()); + break; + } + case 9: { + message.state = reader.int32(); + break; + } + case 10: { + message.message = reader.string(); + break; + } + case 11: { + message.rows_copied = reader.int64(); + break; + } + case 12: { + message.time_heartbeat = $root.vttime.Time.decode(reader, reader.uint32()); + break; + } + case 13: { + message.time_throttled = $root.vttime.Time.decode(reader, reader.uint32()); + break; + } + case 14: { + message.component_throttled = reader.string(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a Stream message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream} Stream + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Stream.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a Stream message. + * @function verify + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + Stream.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.id != null && message.hasOwnProperty("id")) + if (!$util.isInteger(message.id)) + return "id: integer expected"; + if (message.bls != null && message.hasOwnProperty("bls")) { + let error = $root.binlogdata.BinlogSource.verify(message.bls); + if (error) + return "bls." + error; + } + if (message.pos != null && message.hasOwnProperty("pos")) + if (!$util.isString(message.pos)) + return "pos: string expected"; + if (message.stop_pos != null && message.hasOwnProperty("stop_pos")) + if (!$util.isString(message.stop_pos)) + return "stop_pos: string expected"; + if (message.max_tps != null && message.hasOwnProperty("max_tps")) + if (!$util.isInteger(message.max_tps) && !(message.max_tps && $util.isInteger(message.max_tps.low) && $util.isInteger(message.max_tps.high))) + return "max_tps: integer|Long expected"; + if (message.max_replication_lag != null && message.hasOwnProperty("max_replication_lag")) + if (!$util.isInteger(message.max_replication_lag) && !(message.max_replication_lag && $util.isInteger(message.max_replication_lag.low) && $util.isInteger(message.max_replication_lag.high))) + return "max_replication_lag: integer|Long expected"; + if (message.time_updated != null && message.hasOwnProperty("time_updated")) { + let error = $root.vttime.Time.verify(message.time_updated); + if (error) + return "time_updated." + error; + } + if (message.transaction_timestamp != null && message.hasOwnProperty("transaction_timestamp")) { + let error = $root.vttime.Time.verify(message.transaction_timestamp); + if (error) + return "transaction_timestamp." + error; + } + if (message.state != null && message.hasOwnProperty("state")) + switch (message.state) { + default: + return "state: enum value expected"; + case 0: + case 1: + case 2: + case 3: + case 4: + case 5: + case 6: + break; + } + if (message.message != null && message.hasOwnProperty("message")) + if (!$util.isString(message.message)) + return "message: string expected"; + if (message.rows_copied != null && message.hasOwnProperty("rows_copied")) + if (!$util.isInteger(message.rows_copied) && !(message.rows_copied && $util.isInteger(message.rows_copied.low) && $util.isInteger(message.rows_copied.high))) + return "rows_copied: integer|Long expected"; + if (message.time_heartbeat != null && message.hasOwnProperty("time_heartbeat")) { + let error = $root.vttime.Time.verify(message.time_heartbeat); + if (error) + return "time_heartbeat." + error; + } + if (message.time_throttled != null && message.hasOwnProperty("time_throttled")) { + let error = $root.vttime.Time.verify(message.time_throttled); + if (error) + return "time_throttled." + error; + } + if (message.component_throttled != null && message.hasOwnProperty("component_throttled")) + if (!$util.isString(message.component_throttled)) + return "component_throttled: string expected"; + return null; + }; + + /** + * Creates a Stream message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream + * @static + * @param {Object.} object Plain object + * @returns {tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream} Stream + */ + Stream.fromObject = function fromObject(object) { + if (object instanceof $root.tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream) + return object; + let message = new $root.tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream(); + if (object.id != null) + message.id = object.id | 0; + if (object.bls != null) { + if (typeof object.bls !== "object") + throw TypeError(".tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.bls: object expected"); + message.bls = $root.binlogdata.BinlogSource.fromObject(object.bls); + } + if (object.pos != null) + message.pos = String(object.pos); + if (object.stop_pos != null) + message.stop_pos = String(object.stop_pos); + if (object.max_tps != null) + if ($util.Long) + (message.max_tps = $util.Long.fromValue(object.max_tps)).unsigned = false; + else if (typeof object.max_tps === "string") + message.max_tps = parseInt(object.max_tps, 10); + else if (typeof object.max_tps === "number") + message.max_tps = object.max_tps; + else if (typeof object.max_tps === "object") + message.max_tps = new $util.LongBits(object.max_tps.low >>> 0, object.max_tps.high >>> 0).toNumber(); + if (object.max_replication_lag != null) + if ($util.Long) + (message.max_replication_lag = $util.Long.fromValue(object.max_replication_lag)).unsigned = false; + else if (typeof object.max_replication_lag === "string") + message.max_replication_lag = parseInt(object.max_replication_lag, 10); + else if (typeof object.max_replication_lag === "number") + message.max_replication_lag = object.max_replication_lag; + else if (typeof object.max_replication_lag === "object") + message.max_replication_lag = new $util.LongBits(object.max_replication_lag.low >>> 0, object.max_replication_lag.high >>> 0).toNumber(); + if (object.time_updated != null) { + if (typeof object.time_updated !== "object") + throw TypeError(".tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.time_updated: object expected"); + message.time_updated = $root.vttime.Time.fromObject(object.time_updated); + } + if (object.transaction_timestamp != null) { + if (typeof object.transaction_timestamp !== "object") + throw TypeError(".tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.transaction_timestamp: object expected"); + message.transaction_timestamp = $root.vttime.Time.fromObject(object.transaction_timestamp); + } + switch (object.state) { + default: + if (typeof object.state === "number") { + message.state = object.state; + break; + } + break; + case "Unknown": + case 0: + message.state = 0; + break; + case "Init": + case 1: + message.state = 1; + break; + case "Stopped": + case 2: + message.state = 2; + break; + case "Copying": + case 3: + message.state = 3; + break; + case "Running": + case 4: + message.state = 4; + break; + case "Error": + case 5: + message.state = 5; + break; + case "Lagging": + case 6: + message.state = 6; + break; + } + if (object.message != null) + message.message = String(object.message); + if (object.rows_copied != null) + if ($util.Long) + (message.rows_copied = $util.Long.fromValue(object.rows_copied)).unsigned = false; + else if (typeof object.rows_copied === "string") + message.rows_copied = parseInt(object.rows_copied, 10); + else if (typeof object.rows_copied === "number") + message.rows_copied = object.rows_copied; + else if (typeof object.rows_copied === "object") + message.rows_copied = new $util.LongBits(object.rows_copied.low >>> 0, object.rows_copied.high >>> 0).toNumber(); + if (object.time_heartbeat != null) { + if (typeof object.time_heartbeat !== "object") + throw TypeError(".tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.time_heartbeat: object expected"); + message.time_heartbeat = $root.vttime.Time.fromObject(object.time_heartbeat); + } + if (object.time_throttled != null) { + if (typeof object.time_throttled !== "object") + throw TypeError(".tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.time_throttled: object expected"); + message.time_throttled = $root.vttime.Time.fromObject(object.time_throttled); + } + if (object.component_throttled != null) + message.component_throttled = String(object.component_throttled); + return message; + }; + + /** + * Creates a plain object from a Stream message. Also converts values to other types if specified. + * @function toObject + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream + * @static + * @param {tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream} message Stream + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + Stream.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.id = 0; + object.bls = null; + object.pos = ""; + object.stop_pos = ""; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.max_tps = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.max_tps = options.longs === String ? "0" : 0; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.max_replication_lag = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.max_replication_lag = options.longs === String ? "0" : 0; + object.time_updated = null; + object.transaction_timestamp = null; + object.state = options.enums === String ? "Unknown" : 0; + object.message = ""; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.rows_copied = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.rows_copied = options.longs === String ? "0" : 0; + object.time_heartbeat = null; + object.time_throttled = null; + object.component_throttled = ""; + } + if (message.id != null && message.hasOwnProperty("id")) + object.id = message.id; + if (message.bls != null && message.hasOwnProperty("bls")) + object.bls = $root.binlogdata.BinlogSource.toObject(message.bls, options); + if (message.pos != null && message.hasOwnProperty("pos")) + object.pos = message.pos; + if (message.stop_pos != null && message.hasOwnProperty("stop_pos")) + object.stop_pos = message.stop_pos; + if (message.max_tps != null && message.hasOwnProperty("max_tps")) + if (typeof message.max_tps === "number") + object.max_tps = options.longs === String ? String(message.max_tps) : message.max_tps; + else + object.max_tps = options.longs === String ? $util.Long.prototype.toString.call(message.max_tps) : options.longs === Number ? new $util.LongBits(message.max_tps.low >>> 0, message.max_tps.high >>> 0).toNumber() : message.max_tps; + if (message.max_replication_lag != null && message.hasOwnProperty("max_replication_lag")) + if (typeof message.max_replication_lag === "number") + object.max_replication_lag = options.longs === String ? String(message.max_replication_lag) : message.max_replication_lag; + else + object.max_replication_lag = options.longs === String ? $util.Long.prototype.toString.call(message.max_replication_lag) : options.longs === Number ? new $util.LongBits(message.max_replication_lag.low >>> 0, message.max_replication_lag.high >>> 0).toNumber() : message.max_replication_lag; + if (message.time_updated != null && message.hasOwnProperty("time_updated")) + object.time_updated = $root.vttime.Time.toObject(message.time_updated, options); + if (message.transaction_timestamp != null && message.hasOwnProperty("transaction_timestamp")) + object.transaction_timestamp = $root.vttime.Time.toObject(message.transaction_timestamp, options); + if (message.state != null && message.hasOwnProperty("state")) + object.state = options.enums === String ? $root.binlogdata.VReplicationWorkflowState[message.state] === undefined ? message.state : $root.binlogdata.VReplicationWorkflowState[message.state] : message.state; + if (message.message != null && message.hasOwnProperty("message")) + object.message = message.message; + if (message.rows_copied != null && message.hasOwnProperty("rows_copied")) + if (typeof message.rows_copied === "number") + object.rows_copied = options.longs === String ? String(message.rows_copied) : message.rows_copied; + else + object.rows_copied = options.longs === String ? $util.Long.prototype.toString.call(message.rows_copied) : options.longs === Number ? new $util.LongBits(message.rows_copied.low >>> 0, message.rows_copied.high >>> 0).toNumber() : message.rows_copied; + if (message.time_heartbeat != null && message.hasOwnProperty("time_heartbeat")) + object.time_heartbeat = $root.vttime.Time.toObject(message.time_heartbeat, options); + if (message.time_throttled != null && message.hasOwnProperty("time_throttled")) + object.time_throttled = $root.vttime.Time.toObject(message.time_throttled, options); + if (message.component_throttled != null && message.hasOwnProperty("component_throttled")) + object.component_throttled = message.component_throttled; + return object; + }; + + /** + * Converts this Stream to JSON. + * @function toJSON + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream + * @instance + * @returns {Object.} JSON object + */ + Stream.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for Stream + * @function getTypeUrl + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + Stream.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream"; + }; + + return Stream; + })(); + + return ReadVReplicationWorkflowResponse; + })(); + + tabletmanagerdata.VDiffRequest = (function() { /** - * Properties of a Charset. - * @memberof binlogdata - * @interface ICharset - * @property {number|null} [client] Charset client - * @property {number|null} [conn] Charset conn - * @property {number|null} [server] Charset server + * Properties of a VDiffRequest. + * @memberof tabletmanagerdata + * @interface IVDiffRequest + * @property {string|null} [keyspace] VDiffRequest keyspace + * @property {string|null} [workflow] VDiffRequest workflow + * @property {string|null} [action] VDiffRequest action + * @property {string|null} [action_arg] VDiffRequest action_arg + * @property {string|null} [vdiff_uuid] VDiffRequest vdiff_uuid + * @property {tabletmanagerdata.IVDiffOptions|null} [options] VDiffRequest options */ /** - * Constructs a new Charset. - * @memberof binlogdata - * @classdesc Represents a Charset. - * @implements ICharset + * Constructs a new VDiffRequest. + * @memberof tabletmanagerdata + * @classdesc Represents a VDiffRequest. + * @implements IVDiffRequest * @constructor - * @param {binlogdata.ICharset=} [properties] Properties to set + * @param {tabletmanagerdata.IVDiffRequest=} [properties] Properties to set */ - function Charset(properties) { + function VDiffRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -61226,103 +62986,145 @@ export const binlogdata = $root.binlogdata = (() => { } /** - * Charset client. - * @member {number} client - * @memberof binlogdata.Charset + * VDiffRequest keyspace. + * @member {string} keyspace + * @memberof tabletmanagerdata.VDiffRequest * @instance */ - Charset.prototype.client = 0; + VDiffRequest.prototype.keyspace = ""; /** - * Charset conn. - * @member {number} conn - * @memberof binlogdata.Charset + * VDiffRequest workflow. + * @member {string} workflow + * @memberof tabletmanagerdata.VDiffRequest * @instance */ - Charset.prototype.conn = 0; + VDiffRequest.prototype.workflow = ""; /** - * Charset server. - * @member {number} server - * @memberof binlogdata.Charset + * VDiffRequest action. + * @member {string} action + * @memberof tabletmanagerdata.VDiffRequest * @instance */ - Charset.prototype.server = 0; + VDiffRequest.prototype.action = ""; /** - * Creates a new Charset instance using the specified properties. + * VDiffRequest action_arg. + * @member {string} action_arg + * @memberof tabletmanagerdata.VDiffRequest + * @instance + */ + VDiffRequest.prototype.action_arg = ""; + + /** + * VDiffRequest vdiff_uuid. + * @member {string} vdiff_uuid + * @memberof tabletmanagerdata.VDiffRequest + * @instance + */ + VDiffRequest.prototype.vdiff_uuid = ""; + + /** + * VDiffRequest options. + * @member {tabletmanagerdata.IVDiffOptions|null|undefined} options + * @memberof tabletmanagerdata.VDiffRequest + * @instance + */ + VDiffRequest.prototype.options = null; + + /** + * Creates a new VDiffRequest instance using the specified properties. * @function create - * @memberof binlogdata.Charset + * @memberof tabletmanagerdata.VDiffRequest * @static - * @param {binlogdata.ICharset=} [properties] Properties to set - * @returns {binlogdata.Charset} Charset instance + * @param {tabletmanagerdata.IVDiffRequest=} [properties] Properties to set + * @returns {tabletmanagerdata.VDiffRequest} VDiffRequest instance */ - Charset.create = function create(properties) { - return new Charset(properties); + VDiffRequest.create = function create(properties) { + return new VDiffRequest(properties); }; /** - * Encodes the specified Charset message. Does not implicitly {@link binlogdata.Charset.verify|verify} messages. + * Encodes the specified VDiffRequest message. Does not implicitly {@link tabletmanagerdata.VDiffRequest.verify|verify} messages. * @function encode - * @memberof binlogdata.Charset + * @memberof tabletmanagerdata.VDiffRequest * @static - * @param {binlogdata.ICharset} message Charset message or plain object to encode + * @param {tabletmanagerdata.IVDiffRequest} message VDiffRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Charset.encode = function encode(message, writer) { + VDiffRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.client != null && Object.hasOwnProperty.call(message, "client")) - writer.uint32(/* id 1, wireType 0 =*/8).int32(message.client); - if (message.conn != null && Object.hasOwnProperty.call(message, "conn")) - writer.uint32(/* id 2, wireType 0 =*/16).int32(message.conn); - if (message.server != null && Object.hasOwnProperty.call(message, "server")) - writer.uint32(/* id 3, wireType 0 =*/24).int32(message.server); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.workflow != null && Object.hasOwnProperty.call(message, "workflow")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.workflow); + if (message.action != null && Object.hasOwnProperty.call(message, "action")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.action); + if (message.action_arg != null && Object.hasOwnProperty.call(message, "action_arg")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.action_arg); + if (message.vdiff_uuid != null && Object.hasOwnProperty.call(message, "vdiff_uuid")) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.vdiff_uuid); + if (message.options != null && Object.hasOwnProperty.call(message, "options")) + $root.tabletmanagerdata.VDiffOptions.encode(message.options, writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); return writer; }; /** - * Encodes the specified Charset message, length delimited. Does not implicitly {@link binlogdata.Charset.verify|verify} messages. + * Encodes the specified VDiffRequest message, length delimited. Does not implicitly {@link tabletmanagerdata.VDiffRequest.verify|verify} messages. * @function encodeDelimited - * @memberof binlogdata.Charset + * @memberof tabletmanagerdata.VDiffRequest * @static - * @param {binlogdata.ICharset} message Charset message or plain object to encode + * @param {tabletmanagerdata.IVDiffRequest} message VDiffRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Charset.encodeDelimited = function encodeDelimited(message, writer) { + VDiffRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a Charset message from the specified reader or buffer. + * Decodes a VDiffRequest message from the specified reader or buffer. * @function decode - * @memberof binlogdata.Charset + * @memberof tabletmanagerdata.VDiffRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {binlogdata.Charset} Charset + * @returns {tabletmanagerdata.VDiffRequest} VDiffRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Charset.decode = function decode(reader, length) { + VDiffRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.Charset(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.tabletmanagerdata.VDiffRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.client = reader.int32(); + message.keyspace = reader.string(); break; } case 2: { - message.conn = reader.int32(); + message.workflow = reader.string(); break; } case 3: { - message.server = reader.int32(); + message.action = reader.string(); + break; + } + case 4: { + message.action_arg = reader.string(); + break; + } + case 5: { + message.vdiff_uuid = reader.string(); + break; + } + case 6: { + message.options = $root.tabletmanagerdata.VDiffOptions.decode(reader, reader.uint32()); break; } default: @@ -61334,141 +63136,170 @@ export const binlogdata = $root.binlogdata = (() => { }; /** - * Decodes a Charset message from the specified reader or buffer, length delimited. + * Decodes a VDiffRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof binlogdata.Charset + * @memberof tabletmanagerdata.VDiffRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {binlogdata.Charset} Charset + * @returns {tabletmanagerdata.VDiffRequest} VDiffRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Charset.decodeDelimited = function decodeDelimited(reader) { + VDiffRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a Charset message. + * Verifies a VDiffRequest message. * @function verify - * @memberof binlogdata.Charset + * @memberof tabletmanagerdata.VDiffRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - Charset.verify = function verify(message) { + VDiffRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.client != null && message.hasOwnProperty("client")) - if (!$util.isInteger(message.client)) - return "client: integer expected"; - if (message.conn != null && message.hasOwnProperty("conn")) - if (!$util.isInteger(message.conn)) - return "conn: integer expected"; - if (message.server != null && message.hasOwnProperty("server")) - if (!$util.isInteger(message.server)) - return "server: integer expected"; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.workflow != null && message.hasOwnProperty("workflow")) + if (!$util.isString(message.workflow)) + return "workflow: string expected"; + if (message.action != null && message.hasOwnProperty("action")) + if (!$util.isString(message.action)) + return "action: string expected"; + if (message.action_arg != null && message.hasOwnProperty("action_arg")) + if (!$util.isString(message.action_arg)) + return "action_arg: string expected"; + if (message.vdiff_uuid != null && message.hasOwnProperty("vdiff_uuid")) + if (!$util.isString(message.vdiff_uuid)) + return "vdiff_uuid: string expected"; + if (message.options != null && message.hasOwnProperty("options")) { + let error = $root.tabletmanagerdata.VDiffOptions.verify(message.options); + if (error) + return "options." + error; + } return null; }; /** - * Creates a Charset message from a plain object. Also converts values to their respective internal types. + * Creates a VDiffRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof binlogdata.Charset + * @memberof tabletmanagerdata.VDiffRequest * @static * @param {Object.} object Plain object - * @returns {binlogdata.Charset} Charset + * @returns {tabletmanagerdata.VDiffRequest} VDiffRequest */ - Charset.fromObject = function fromObject(object) { - if (object instanceof $root.binlogdata.Charset) + VDiffRequest.fromObject = function fromObject(object) { + if (object instanceof $root.tabletmanagerdata.VDiffRequest) return object; - let message = new $root.binlogdata.Charset(); - if (object.client != null) - message.client = object.client | 0; - if (object.conn != null) - message.conn = object.conn | 0; - if (object.server != null) - message.server = object.server | 0; + let message = new $root.tabletmanagerdata.VDiffRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.workflow != null) + message.workflow = String(object.workflow); + if (object.action != null) + message.action = String(object.action); + if (object.action_arg != null) + message.action_arg = String(object.action_arg); + if (object.vdiff_uuid != null) + message.vdiff_uuid = String(object.vdiff_uuid); + if (object.options != null) { + if (typeof object.options !== "object") + throw TypeError(".tabletmanagerdata.VDiffRequest.options: object expected"); + message.options = $root.tabletmanagerdata.VDiffOptions.fromObject(object.options); + } return message; }; /** - * Creates a plain object from a Charset message. Also converts values to other types if specified. + * Creates a plain object from a VDiffRequest message. Also converts values to other types if specified. * @function toObject - * @memberof binlogdata.Charset + * @memberof tabletmanagerdata.VDiffRequest * @static - * @param {binlogdata.Charset} message Charset + * @param {tabletmanagerdata.VDiffRequest} message VDiffRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - Charset.toObject = function toObject(message, options) { + VDiffRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) { - object.client = 0; - object.conn = 0; - object.server = 0; + object.keyspace = ""; + object.workflow = ""; + object.action = ""; + object.action_arg = ""; + object.vdiff_uuid = ""; + object.options = null; } - if (message.client != null && message.hasOwnProperty("client")) - object.client = message.client; - if (message.conn != null && message.hasOwnProperty("conn")) - object.conn = message.conn; - if (message.server != null && message.hasOwnProperty("server")) - object.server = message.server; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.workflow != null && message.hasOwnProperty("workflow")) + object.workflow = message.workflow; + if (message.action != null && message.hasOwnProperty("action")) + object.action = message.action; + if (message.action_arg != null && message.hasOwnProperty("action_arg")) + object.action_arg = message.action_arg; + if (message.vdiff_uuid != null && message.hasOwnProperty("vdiff_uuid")) + object.vdiff_uuid = message.vdiff_uuid; + if (message.options != null && message.hasOwnProperty("options")) + object.options = $root.tabletmanagerdata.VDiffOptions.toObject(message.options, options); return object; }; /** - * Converts this Charset to JSON. + * Converts this VDiffRequest to JSON. * @function toJSON - * @memberof binlogdata.Charset + * @memberof tabletmanagerdata.VDiffRequest * @instance * @returns {Object.} JSON object */ - Charset.prototype.toJSON = function toJSON() { + VDiffRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for Charset + * Gets the default type url for VDiffRequest * @function getTypeUrl - * @memberof binlogdata.Charset + * @memberof tabletmanagerdata.VDiffRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - Charset.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + VDiffRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/binlogdata.Charset"; + return typeUrlPrefix + "/tabletmanagerdata.VDiffRequest"; }; - return Charset; + return VDiffRequest; })(); - binlogdata.BinlogTransaction = (function() { + tabletmanagerdata.VDiffResponse = (function() { /** - * Properties of a BinlogTransaction. - * @memberof binlogdata - * @interface IBinlogTransaction - * @property {Array.|null} [statements] BinlogTransaction statements - * @property {query.IEventToken|null} [event_token] BinlogTransaction event_token + * Properties of a VDiffResponse. + * @memberof tabletmanagerdata + * @interface IVDiffResponse + * @property {number|Long|null} [id] VDiffResponse id + * @property {query.IQueryResult|null} [output] VDiffResponse output + * @property {string|null} [vdiff_uuid] VDiffResponse vdiff_uuid */ /** - * Constructs a new BinlogTransaction. - * @memberof binlogdata - * @classdesc Represents a BinlogTransaction. - * @implements IBinlogTransaction + * Constructs a new VDiffResponse. + * @memberof tabletmanagerdata + * @classdesc Represents a VDiffResponse. + * @implements IVDiffResponse * @constructor - * @param {binlogdata.IBinlogTransaction=} [properties] Properties to set + * @param {tabletmanagerdata.IVDiffResponse=} [properties] Properties to set */ - function BinlogTransaction(properties) { - this.statements = []; + function VDiffResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -61476,92 +63307,103 @@ export const binlogdata = $root.binlogdata = (() => { } /** - * BinlogTransaction statements. - * @member {Array.} statements - * @memberof binlogdata.BinlogTransaction + * VDiffResponse id. + * @member {number|Long} id + * @memberof tabletmanagerdata.VDiffResponse * @instance */ - BinlogTransaction.prototype.statements = $util.emptyArray; + VDiffResponse.prototype.id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; /** - * BinlogTransaction event_token. - * @member {query.IEventToken|null|undefined} event_token - * @memberof binlogdata.BinlogTransaction + * VDiffResponse output. + * @member {query.IQueryResult|null|undefined} output + * @memberof tabletmanagerdata.VDiffResponse * @instance */ - BinlogTransaction.prototype.event_token = null; + VDiffResponse.prototype.output = null; /** - * Creates a new BinlogTransaction instance using the specified properties. + * VDiffResponse vdiff_uuid. + * @member {string} vdiff_uuid + * @memberof tabletmanagerdata.VDiffResponse + * @instance + */ + VDiffResponse.prototype.vdiff_uuid = ""; + + /** + * Creates a new VDiffResponse instance using the specified properties. * @function create - * @memberof binlogdata.BinlogTransaction + * @memberof tabletmanagerdata.VDiffResponse * @static - * @param {binlogdata.IBinlogTransaction=} [properties] Properties to set - * @returns {binlogdata.BinlogTransaction} BinlogTransaction instance + * @param {tabletmanagerdata.IVDiffResponse=} [properties] Properties to set + * @returns {tabletmanagerdata.VDiffResponse} VDiffResponse instance */ - BinlogTransaction.create = function create(properties) { - return new BinlogTransaction(properties); + VDiffResponse.create = function create(properties) { + return new VDiffResponse(properties); }; /** - * Encodes the specified BinlogTransaction message. Does not implicitly {@link binlogdata.BinlogTransaction.verify|verify} messages. + * Encodes the specified VDiffResponse message. Does not implicitly {@link tabletmanagerdata.VDiffResponse.verify|verify} messages. * @function encode - * @memberof binlogdata.BinlogTransaction + * @memberof tabletmanagerdata.VDiffResponse * @static - * @param {binlogdata.IBinlogTransaction} message BinlogTransaction message or plain object to encode + * @param {tabletmanagerdata.IVDiffResponse} message VDiffResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - BinlogTransaction.encode = function encode(message, writer) { + VDiffResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.statements != null && message.statements.length) - for (let i = 0; i < message.statements.length; ++i) - $root.binlogdata.BinlogTransaction.Statement.encode(message.statements[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.event_token != null && Object.hasOwnProperty.call(message, "event_token")) - $root.query.EventToken.encode(message.event_token, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.id != null && Object.hasOwnProperty.call(message, "id")) + writer.uint32(/* id 1, wireType 0 =*/8).int64(message.id); + if (message.output != null && Object.hasOwnProperty.call(message, "output")) + $root.query.QueryResult.encode(message.output, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.vdiff_uuid != null && Object.hasOwnProperty.call(message, "vdiff_uuid")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.vdiff_uuid); return writer; }; /** - * Encodes the specified BinlogTransaction message, length delimited. Does not implicitly {@link binlogdata.BinlogTransaction.verify|verify} messages. + * Encodes the specified VDiffResponse message, length delimited. Does not implicitly {@link tabletmanagerdata.VDiffResponse.verify|verify} messages. * @function encodeDelimited - * @memberof binlogdata.BinlogTransaction + * @memberof tabletmanagerdata.VDiffResponse * @static - * @param {binlogdata.IBinlogTransaction} message BinlogTransaction message or plain object to encode + * @param {tabletmanagerdata.IVDiffResponse} message VDiffResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - BinlogTransaction.encodeDelimited = function encodeDelimited(message, writer) { + VDiffResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a BinlogTransaction message from the specified reader or buffer. + * Decodes a VDiffResponse message from the specified reader or buffer. * @function decode - * @memberof binlogdata.BinlogTransaction + * @memberof tabletmanagerdata.VDiffResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {binlogdata.BinlogTransaction} BinlogTransaction + * @returns {tabletmanagerdata.VDiffResponse} VDiffResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - BinlogTransaction.decode = function decode(reader, length) { + VDiffResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.BinlogTransaction(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.tabletmanagerdata.VDiffResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (!(message.statements && message.statements.length)) - message.statements = []; - message.statements.push($root.binlogdata.BinlogTransaction.Statement.decode(reader, reader.uint32())); + message.id = reader.int64(); break; } - case 4: { - message.event_token = $root.query.EventToken.decode(reader, reader.uint32()); + case 2: { + message.output = $root.query.QueryResult.decode(reader, reader.uint32()); + break; + } + case 3: { + message.vdiff_uuid = reader.string(); break; } default: @@ -61573,612 +63415,264 @@ export const binlogdata = $root.binlogdata = (() => { }; /** - * Decodes a BinlogTransaction message from the specified reader or buffer, length delimited. + * Decodes a VDiffResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof binlogdata.BinlogTransaction + * @memberof tabletmanagerdata.VDiffResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {binlogdata.BinlogTransaction} BinlogTransaction + * @returns {tabletmanagerdata.VDiffResponse} VDiffResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - BinlogTransaction.decodeDelimited = function decodeDelimited(reader) { + VDiffResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a BinlogTransaction message. + * Verifies a VDiffResponse message. * @function verify - * @memberof binlogdata.BinlogTransaction + * @memberof tabletmanagerdata.VDiffResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - BinlogTransaction.verify = function verify(message) { + VDiffResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.statements != null && message.hasOwnProperty("statements")) { - if (!Array.isArray(message.statements)) - return "statements: array expected"; - for (let i = 0; i < message.statements.length; ++i) { - let error = $root.binlogdata.BinlogTransaction.Statement.verify(message.statements[i]); - if (error) - return "statements." + error; - } - } - if (message.event_token != null && message.hasOwnProperty("event_token")) { - let error = $root.query.EventToken.verify(message.event_token); + if (message.id != null && message.hasOwnProperty("id")) + if (!$util.isInteger(message.id) && !(message.id && $util.isInteger(message.id.low) && $util.isInteger(message.id.high))) + return "id: integer|Long expected"; + if (message.output != null && message.hasOwnProperty("output")) { + let error = $root.query.QueryResult.verify(message.output); if (error) - return "event_token." + error; + return "output." + error; } + if (message.vdiff_uuid != null && message.hasOwnProperty("vdiff_uuid")) + if (!$util.isString(message.vdiff_uuid)) + return "vdiff_uuid: string expected"; return null; }; /** - * Creates a BinlogTransaction message from a plain object. Also converts values to their respective internal types. + * Creates a VDiffResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof binlogdata.BinlogTransaction + * @memberof tabletmanagerdata.VDiffResponse * @static * @param {Object.} object Plain object - * @returns {binlogdata.BinlogTransaction} BinlogTransaction + * @returns {tabletmanagerdata.VDiffResponse} VDiffResponse */ - BinlogTransaction.fromObject = function fromObject(object) { - if (object instanceof $root.binlogdata.BinlogTransaction) + VDiffResponse.fromObject = function fromObject(object) { + if (object instanceof $root.tabletmanagerdata.VDiffResponse) return object; - let message = new $root.binlogdata.BinlogTransaction(); - if (object.statements) { - if (!Array.isArray(object.statements)) - throw TypeError(".binlogdata.BinlogTransaction.statements: array expected"); - message.statements = []; - for (let i = 0; i < object.statements.length; ++i) { - if (typeof object.statements[i] !== "object") - throw TypeError(".binlogdata.BinlogTransaction.statements: object expected"); - message.statements[i] = $root.binlogdata.BinlogTransaction.Statement.fromObject(object.statements[i]); - } - } - if (object.event_token != null) { - if (typeof object.event_token !== "object") - throw TypeError(".binlogdata.BinlogTransaction.event_token: object expected"); - message.event_token = $root.query.EventToken.fromObject(object.event_token); + let message = new $root.tabletmanagerdata.VDiffResponse(); + if (object.id != null) + if ($util.Long) + (message.id = $util.Long.fromValue(object.id)).unsigned = false; + else if (typeof object.id === "string") + message.id = parseInt(object.id, 10); + else if (typeof object.id === "number") + message.id = object.id; + else if (typeof object.id === "object") + message.id = new $util.LongBits(object.id.low >>> 0, object.id.high >>> 0).toNumber(); + if (object.output != null) { + if (typeof object.output !== "object") + throw TypeError(".tabletmanagerdata.VDiffResponse.output: object expected"); + message.output = $root.query.QueryResult.fromObject(object.output); } + if (object.vdiff_uuid != null) + message.vdiff_uuid = String(object.vdiff_uuid); return message; }; /** - * Creates a plain object from a BinlogTransaction message. Also converts values to other types if specified. + * Creates a plain object from a VDiffResponse message. Also converts values to other types if specified. * @function toObject - * @memberof binlogdata.BinlogTransaction + * @memberof tabletmanagerdata.VDiffResponse * @static - * @param {binlogdata.BinlogTransaction} message BinlogTransaction + * @param {tabletmanagerdata.VDiffResponse} message VDiffResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - BinlogTransaction.toObject = function toObject(message, options) { + VDiffResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.statements = []; - if (options.defaults) - object.event_token = null; - if (message.statements && message.statements.length) { - object.statements = []; - for (let j = 0; j < message.statements.length; ++j) - object.statements[j] = $root.binlogdata.BinlogTransaction.Statement.toObject(message.statements[j], options); + if (options.defaults) { + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.id = options.longs === String ? "0" : 0; + object.output = null; + object.vdiff_uuid = ""; } - if (message.event_token != null && message.hasOwnProperty("event_token")) - object.event_token = $root.query.EventToken.toObject(message.event_token, options); + if (message.id != null && message.hasOwnProperty("id")) + if (typeof message.id === "number") + object.id = options.longs === String ? String(message.id) : message.id; + else + object.id = options.longs === String ? $util.Long.prototype.toString.call(message.id) : options.longs === Number ? new $util.LongBits(message.id.low >>> 0, message.id.high >>> 0).toNumber() : message.id; + if (message.output != null && message.hasOwnProperty("output")) + object.output = $root.query.QueryResult.toObject(message.output, options); + if (message.vdiff_uuid != null && message.hasOwnProperty("vdiff_uuid")) + object.vdiff_uuid = message.vdiff_uuid; return object; }; /** - * Converts this BinlogTransaction to JSON. + * Converts this VDiffResponse to JSON. * @function toJSON - * @memberof binlogdata.BinlogTransaction + * @memberof tabletmanagerdata.VDiffResponse * @instance * @returns {Object.} JSON object */ - BinlogTransaction.prototype.toJSON = function toJSON() { + VDiffResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for BinlogTransaction + * Gets the default type url for VDiffResponse * @function getTypeUrl - * @memberof binlogdata.BinlogTransaction + * @memberof tabletmanagerdata.VDiffResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - BinlogTransaction.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + VDiffResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/binlogdata.BinlogTransaction"; + return typeUrlPrefix + "/tabletmanagerdata.VDiffResponse"; }; - BinlogTransaction.Statement = (function() { + return VDiffResponse; + })(); - /** - * Properties of a Statement. - * @memberof binlogdata.BinlogTransaction - * @interface IStatement - * @property {binlogdata.BinlogTransaction.Statement.Category|null} [category] Statement category - * @property {binlogdata.ICharset|null} [charset] Statement charset - * @property {Uint8Array|null} [sql] Statement sql - */ + tabletmanagerdata.VDiffPickerOptions = (function() { - /** - * Constructs a new Statement. - * @memberof binlogdata.BinlogTransaction - * @classdesc Represents a Statement. - * @implements IStatement - * @constructor - * @param {binlogdata.BinlogTransaction.IStatement=} [properties] Properties to set - */ - function Statement(properties) { - if (properties) - for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; - } + /** + * Properties of a VDiffPickerOptions. + * @memberof tabletmanagerdata + * @interface IVDiffPickerOptions + * @property {string|null} [tablet_types] VDiffPickerOptions tablet_types + * @property {string|null} [source_cell] VDiffPickerOptions source_cell + * @property {string|null} [target_cell] VDiffPickerOptions target_cell + */ - /** - * Statement category. - * @member {binlogdata.BinlogTransaction.Statement.Category} category - * @memberof binlogdata.BinlogTransaction.Statement - * @instance - */ - Statement.prototype.category = 0; + /** + * Constructs a new VDiffPickerOptions. + * @memberof tabletmanagerdata + * @classdesc Represents a VDiffPickerOptions. + * @implements IVDiffPickerOptions + * @constructor + * @param {tabletmanagerdata.IVDiffPickerOptions=} [properties] Properties to set + */ + function VDiffPickerOptions(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } - /** - * Statement charset. - * @member {binlogdata.ICharset|null|undefined} charset - * @memberof binlogdata.BinlogTransaction.Statement - * @instance - */ - Statement.prototype.charset = null; + /** + * VDiffPickerOptions tablet_types. + * @member {string} tablet_types + * @memberof tabletmanagerdata.VDiffPickerOptions + * @instance + */ + VDiffPickerOptions.prototype.tablet_types = ""; - /** - * Statement sql. - * @member {Uint8Array} sql - * @memberof binlogdata.BinlogTransaction.Statement - * @instance - */ - Statement.prototype.sql = $util.newBuffer([]); + /** + * VDiffPickerOptions source_cell. + * @member {string} source_cell + * @memberof tabletmanagerdata.VDiffPickerOptions + * @instance + */ + VDiffPickerOptions.prototype.source_cell = ""; - /** - * Creates a new Statement instance using the specified properties. - * @function create - * @memberof binlogdata.BinlogTransaction.Statement - * @static - * @param {binlogdata.BinlogTransaction.IStatement=} [properties] Properties to set - * @returns {binlogdata.BinlogTransaction.Statement} Statement instance - */ - Statement.create = function create(properties) { - return new Statement(properties); - }; + /** + * VDiffPickerOptions target_cell. + * @member {string} target_cell + * @memberof tabletmanagerdata.VDiffPickerOptions + * @instance + */ + VDiffPickerOptions.prototype.target_cell = ""; - /** - * Encodes the specified Statement message. Does not implicitly {@link binlogdata.BinlogTransaction.Statement.verify|verify} messages. - * @function encode - * @memberof binlogdata.BinlogTransaction.Statement - * @static - * @param {binlogdata.BinlogTransaction.IStatement} message Statement message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - Statement.encode = function encode(message, writer) { - if (!writer) - writer = $Writer.create(); - if (message.category != null && Object.hasOwnProperty.call(message, "category")) - writer.uint32(/* id 1, wireType 0 =*/8).int32(message.category); - if (message.charset != null && Object.hasOwnProperty.call(message, "charset")) - $root.binlogdata.Charset.encode(message.charset, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.sql != null && Object.hasOwnProperty.call(message, "sql")) - writer.uint32(/* id 3, wireType 2 =*/26).bytes(message.sql); - return writer; - }; + /** + * Creates a new VDiffPickerOptions instance using the specified properties. + * @function create + * @memberof tabletmanagerdata.VDiffPickerOptions + * @static + * @param {tabletmanagerdata.IVDiffPickerOptions=} [properties] Properties to set + * @returns {tabletmanagerdata.VDiffPickerOptions} VDiffPickerOptions instance + */ + VDiffPickerOptions.create = function create(properties) { + return new VDiffPickerOptions(properties); + }; - /** - * Encodes the specified Statement message, length delimited. Does not implicitly {@link binlogdata.BinlogTransaction.Statement.verify|verify} messages. - * @function encodeDelimited - * @memberof binlogdata.BinlogTransaction.Statement - * @static - * @param {binlogdata.BinlogTransaction.IStatement} message Statement message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - Statement.encodeDelimited = function encodeDelimited(message, writer) { - return this.encode(message, writer).ldelim(); - }; - - /** - * Decodes a Statement message from the specified reader or buffer. - * @function decode - * @memberof binlogdata.BinlogTransaction.Statement - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @param {number} [length] Message length if known beforehand - * @returns {binlogdata.BinlogTransaction.Statement} Statement - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - Statement.decode = function decode(reader, length) { - if (!(reader instanceof $Reader)) - reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.BinlogTransaction.Statement(); - while (reader.pos < end) { - let tag = reader.uint32(); - switch (tag >>> 3) { - case 1: { - message.category = reader.int32(); - break; - } - case 2: { - message.charset = $root.binlogdata.Charset.decode(reader, reader.uint32()); - break; - } - case 3: { - message.sql = reader.bytes(); - break; - } - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }; - - /** - * Decodes a Statement message from the specified reader or buffer, length delimited. - * @function decodeDelimited - * @memberof binlogdata.BinlogTransaction.Statement - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {binlogdata.BinlogTransaction.Statement} Statement - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - Statement.decodeDelimited = function decodeDelimited(reader) { - if (!(reader instanceof $Reader)) - reader = new $Reader(reader); - return this.decode(reader, reader.uint32()); - }; - - /** - * Verifies a Statement message. - * @function verify - * @memberof binlogdata.BinlogTransaction.Statement - * @static - * @param {Object.} message Plain object to verify - * @returns {string|null} `null` if valid, otherwise the reason why it is not - */ - Statement.verify = function verify(message) { - if (typeof message !== "object" || message === null) - return "object expected"; - if (message.category != null && message.hasOwnProperty("category")) - switch (message.category) { - default: - return "category: enum value expected"; - case 0: - case 1: - case 2: - case 3: - case 4: - case 5: - case 6: - case 7: - case 8: - case 9: - break; - } - if (message.charset != null && message.hasOwnProperty("charset")) { - let error = $root.binlogdata.Charset.verify(message.charset); - if (error) - return "charset." + error; - } - if (message.sql != null && message.hasOwnProperty("sql")) - if (!(message.sql && typeof message.sql.length === "number" || $util.isString(message.sql))) - return "sql: buffer expected"; - return null; - }; - - /** - * Creates a Statement message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof binlogdata.BinlogTransaction.Statement - * @static - * @param {Object.} object Plain object - * @returns {binlogdata.BinlogTransaction.Statement} Statement - */ - Statement.fromObject = function fromObject(object) { - if (object instanceof $root.binlogdata.BinlogTransaction.Statement) - return object; - let message = new $root.binlogdata.BinlogTransaction.Statement(); - switch (object.category) { - default: - if (typeof object.category === "number") { - message.category = object.category; - break; - } - break; - case "BL_UNRECOGNIZED": - case 0: - message.category = 0; - break; - case "BL_BEGIN": - case 1: - message.category = 1; - break; - case "BL_COMMIT": - case 2: - message.category = 2; - break; - case "BL_ROLLBACK": - case 3: - message.category = 3; - break; - case "BL_DML_DEPRECATED": - case 4: - message.category = 4; - break; - case "BL_DDL": - case 5: - message.category = 5; - break; - case "BL_SET": - case 6: - message.category = 6; - break; - case "BL_INSERT": - case 7: - message.category = 7; - break; - case "BL_UPDATE": - case 8: - message.category = 8; - break; - case "BL_DELETE": - case 9: - message.category = 9; - break; - } - if (object.charset != null) { - if (typeof object.charset !== "object") - throw TypeError(".binlogdata.BinlogTransaction.Statement.charset: object expected"); - message.charset = $root.binlogdata.Charset.fromObject(object.charset); - } - if (object.sql != null) - if (typeof object.sql === "string") - $util.base64.decode(object.sql, message.sql = $util.newBuffer($util.base64.length(object.sql)), 0); - else if (object.sql.length >= 0) - message.sql = object.sql; - return message; - }; - - /** - * Creates a plain object from a Statement message. Also converts values to other types if specified. - * @function toObject - * @memberof binlogdata.BinlogTransaction.Statement - * @static - * @param {binlogdata.BinlogTransaction.Statement} message Statement - * @param {$protobuf.IConversionOptions} [options] Conversion options - * @returns {Object.} Plain object - */ - Statement.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) { - object.category = options.enums === String ? "BL_UNRECOGNIZED" : 0; - object.charset = null; - if (options.bytes === String) - object.sql = ""; - else { - object.sql = []; - if (options.bytes !== Array) - object.sql = $util.newBuffer(object.sql); - } - } - if (message.category != null && message.hasOwnProperty("category")) - object.category = options.enums === String ? $root.binlogdata.BinlogTransaction.Statement.Category[message.category] === undefined ? message.category : $root.binlogdata.BinlogTransaction.Statement.Category[message.category] : message.category; - if (message.charset != null && message.hasOwnProperty("charset")) - object.charset = $root.binlogdata.Charset.toObject(message.charset, options); - if (message.sql != null && message.hasOwnProperty("sql")) - object.sql = options.bytes === String ? $util.base64.encode(message.sql, 0, message.sql.length) : options.bytes === Array ? Array.prototype.slice.call(message.sql) : message.sql; - return object; - }; - - /** - * Converts this Statement to JSON. - * @function toJSON - * @memberof binlogdata.BinlogTransaction.Statement - * @instance - * @returns {Object.} JSON object - */ - Statement.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; - - /** - * Gets the default type url for Statement - * @function getTypeUrl - * @memberof binlogdata.BinlogTransaction.Statement - * @static - * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns {string} The default type url - */ - Statement.getTypeUrl = function getTypeUrl(typeUrlPrefix) { - if (typeUrlPrefix === undefined) { - typeUrlPrefix = "type.googleapis.com"; - } - return typeUrlPrefix + "/binlogdata.BinlogTransaction.Statement"; - }; - - /** - * Category enum. - * @name binlogdata.BinlogTransaction.Statement.Category - * @enum {number} - * @property {number} BL_UNRECOGNIZED=0 BL_UNRECOGNIZED value - * @property {number} BL_BEGIN=1 BL_BEGIN value - * @property {number} BL_COMMIT=2 BL_COMMIT value - * @property {number} BL_ROLLBACK=3 BL_ROLLBACK value - * @property {number} BL_DML_DEPRECATED=4 BL_DML_DEPRECATED value - * @property {number} BL_DDL=5 BL_DDL value - * @property {number} BL_SET=6 BL_SET value - * @property {number} BL_INSERT=7 BL_INSERT value - * @property {number} BL_UPDATE=8 BL_UPDATE value - * @property {number} BL_DELETE=9 BL_DELETE value - */ - Statement.Category = (function() { - const valuesById = {}, values = Object.create(valuesById); - values[valuesById[0] = "BL_UNRECOGNIZED"] = 0; - values[valuesById[1] = "BL_BEGIN"] = 1; - values[valuesById[2] = "BL_COMMIT"] = 2; - values[valuesById[3] = "BL_ROLLBACK"] = 3; - values[valuesById[4] = "BL_DML_DEPRECATED"] = 4; - values[valuesById[5] = "BL_DDL"] = 5; - values[valuesById[6] = "BL_SET"] = 6; - values[valuesById[7] = "BL_INSERT"] = 7; - values[valuesById[8] = "BL_UPDATE"] = 8; - values[valuesById[9] = "BL_DELETE"] = 9; - return values; - })(); - - return Statement; - })(); - - return BinlogTransaction; - })(); - - binlogdata.StreamKeyRangeRequest = (function() { - - /** - * Properties of a StreamKeyRangeRequest. - * @memberof binlogdata - * @interface IStreamKeyRangeRequest - * @property {string|null} [position] StreamKeyRangeRequest position - * @property {topodata.IKeyRange|null} [key_range] StreamKeyRangeRequest key_range - * @property {binlogdata.ICharset|null} [charset] StreamKeyRangeRequest charset - */ - - /** - * Constructs a new StreamKeyRangeRequest. - * @memberof binlogdata - * @classdesc Represents a StreamKeyRangeRequest. - * @implements IStreamKeyRangeRequest - * @constructor - * @param {binlogdata.IStreamKeyRangeRequest=} [properties] Properties to set - */ - function StreamKeyRangeRequest(properties) { - if (properties) - for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; - } - - /** - * StreamKeyRangeRequest position. - * @member {string} position - * @memberof binlogdata.StreamKeyRangeRequest - * @instance - */ - StreamKeyRangeRequest.prototype.position = ""; - - /** - * StreamKeyRangeRequest key_range. - * @member {topodata.IKeyRange|null|undefined} key_range - * @memberof binlogdata.StreamKeyRangeRequest - * @instance - */ - StreamKeyRangeRequest.prototype.key_range = null; - - /** - * StreamKeyRangeRequest charset. - * @member {binlogdata.ICharset|null|undefined} charset - * @memberof binlogdata.StreamKeyRangeRequest - * @instance - */ - StreamKeyRangeRequest.prototype.charset = null; - - /** - * Creates a new StreamKeyRangeRequest instance using the specified properties. - * @function create - * @memberof binlogdata.StreamKeyRangeRequest - * @static - * @param {binlogdata.IStreamKeyRangeRequest=} [properties] Properties to set - * @returns {binlogdata.StreamKeyRangeRequest} StreamKeyRangeRequest instance - */ - StreamKeyRangeRequest.create = function create(properties) { - return new StreamKeyRangeRequest(properties); - }; - - /** - * Encodes the specified StreamKeyRangeRequest message. Does not implicitly {@link binlogdata.StreamKeyRangeRequest.verify|verify} messages. - * @function encode - * @memberof binlogdata.StreamKeyRangeRequest - * @static - * @param {binlogdata.IStreamKeyRangeRequest} message StreamKeyRangeRequest message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - StreamKeyRangeRequest.encode = function encode(message, writer) { - if (!writer) - writer = $Writer.create(); - if (message.position != null && Object.hasOwnProperty.call(message, "position")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.position); - if (message.key_range != null && Object.hasOwnProperty.call(message, "key_range")) - $root.topodata.KeyRange.encode(message.key_range, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.charset != null && Object.hasOwnProperty.call(message, "charset")) - $root.binlogdata.Charset.encode(message.charset, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - return writer; - }; + /** + * Encodes the specified VDiffPickerOptions message. Does not implicitly {@link tabletmanagerdata.VDiffPickerOptions.verify|verify} messages. + * @function encode + * @memberof tabletmanagerdata.VDiffPickerOptions + * @static + * @param {tabletmanagerdata.IVDiffPickerOptions} message VDiffPickerOptions message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + VDiffPickerOptions.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.tablet_types != null && Object.hasOwnProperty.call(message, "tablet_types")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.tablet_types); + if (message.source_cell != null && Object.hasOwnProperty.call(message, "source_cell")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.source_cell); + if (message.target_cell != null && Object.hasOwnProperty.call(message, "target_cell")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.target_cell); + return writer; + }; /** - * Encodes the specified StreamKeyRangeRequest message, length delimited. Does not implicitly {@link binlogdata.StreamKeyRangeRequest.verify|verify} messages. + * Encodes the specified VDiffPickerOptions message, length delimited. Does not implicitly {@link tabletmanagerdata.VDiffPickerOptions.verify|verify} messages. * @function encodeDelimited - * @memberof binlogdata.StreamKeyRangeRequest + * @memberof tabletmanagerdata.VDiffPickerOptions * @static - * @param {binlogdata.IStreamKeyRangeRequest} message StreamKeyRangeRequest message or plain object to encode + * @param {tabletmanagerdata.IVDiffPickerOptions} message VDiffPickerOptions message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - StreamKeyRangeRequest.encodeDelimited = function encodeDelimited(message, writer) { + VDiffPickerOptions.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a StreamKeyRangeRequest message from the specified reader or buffer. + * Decodes a VDiffPickerOptions message from the specified reader or buffer. * @function decode - * @memberof binlogdata.StreamKeyRangeRequest + * @memberof tabletmanagerdata.VDiffPickerOptions * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {binlogdata.StreamKeyRangeRequest} StreamKeyRangeRequest + * @returns {tabletmanagerdata.VDiffPickerOptions} VDiffPickerOptions * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StreamKeyRangeRequest.decode = function decode(reader, length) { + VDiffPickerOptions.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.StreamKeyRangeRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.tabletmanagerdata.VDiffPickerOptions(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.position = reader.string(); + message.tablet_types = reader.string(); break; } case 2: { - message.key_range = $root.topodata.KeyRange.decode(reader, reader.uint32()); + message.source_cell = reader.string(); break; } case 3: { - message.charset = $root.binlogdata.Charset.decode(reader, reader.uint32()); + message.target_cell = reader.string(); break; } default: @@ -62190,149 +63684,141 @@ export const binlogdata = $root.binlogdata = (() => { }; /** - * Decodes a StreamKeyRangeRequest message from the specified reader or buffer, length delimited. + * Decodes a VDiffPickerOptions message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof binlogdata.StreamKeyRangeRequest + * @memberof tabletmanagerdata.VDiffPickerOptions * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {binlogdata.StreamKeyRangeRequest} StreamKeyRangeRequest + * @returns {tabletmanagerdata.VDiffPickerOptions} VDiffPickerOptions * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StreamKeyRangeRequest.decodeDelimited = function decodeDelimited(reader) { + VDiffPickerOptions.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a StreamKeyRangeRequest message. + * Verifies a VDiffPickerOptions message. * @function verify - * @memberof binlogdata.StreamKeyRangeRequest + * @memberof tabletmanagerdata.VDiffPickerOptions * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - StreamKeyRangeRequest.verify = function verify(message) { + VDiffPickerOptions.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.position != null && message.hasOwnProperty("position")) - if (!$util.isString(message.position)) - return "position: string expected"; - if (message.key_range != null && message.hasOwnProperty("key_range")) { - let error = $root.topodata.KeyRange.verify(message.key_range); - if (error) - return "key_range." + error; - } - if (message.charset != null && message.hasOwnProperty("charset")) { - let error = $root.binlogdata.Charset.verify(message.charset); - if (error) - return "charset." + error; - } + if (message.tablet_types != null && message.hasOwnProperty("tablet_types")) + if (!$util.isString(message.tablet_types)) + return "tablet_types: string expected"; + if (message.source_cell != null && message.hasOwnProperty("source_cell")) + if (!$util.isString(message.source_cell)) + return "source_cell: string expected"; + if (message.target_cell != null && message.hasOwnProperty("target_cell")) + if (!$util.isString(message.target_cell)) + return "target_cell: string expected"; return null; }; /** - * Creates a StreamKeyRangeRequest message from a plain object. Also converts values to their respective internal types. + * Creates a VDiffPickerOptions message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof binlogdata.StreamKeyRangeRequest + * @memberof tabletmanagerdata.VDiffPickerOptions * @static * @param {Object.} object Plain object - * @returns {binlogdata.StreamKeyRangeRequest} StreamKeyRangeRequest + * @returns {tabletmanagerdata.VDiffPickerOptions} VDiffPickerOptions */ - StreamKeyRangeRequest.fromObject = function fromObject(object) { - if (object instanceof $root.binlogdata.StreamKeyRangeRequest) + VDiffPickerOptions.fromObject = function fromObject(object) { + if (object instanceof $root.tabletmanagerdata.VDiffPickerOptions) return object; - let message = new $root.binlogdata.StreamKeyRangeRequest(); - if (object.position != null) - message.position = String(object.position); - if (object.key_range != null) { - if (typeof object.key_range !== "object") - throw TypeError(".binlogdata.StreamKeyRangeRequest.key_range: object expected"); - message.key_range = $root.topodata.KeyRange.fromObject(object.key_range); - } - if (object.charset != null) { - if (typeof object.charset !== "object") - throw TypeError(".binlogdata.StreamKeyRangeRequest.charset: object expected"); - message.charset = $root.binlogdata.Charset.fromObject(object.charset); - } + let message = new $root.tabletmanagerdata.VDiffPickerOptions(); + if (object.tablet_types != null) + message.tablet_types = String(object.tablet_types); + if (object.source_cell != null) + message.source_cell = String(object.source_cell); + if (object.target_cell != null) + message.target_cell = String(object.target_cell); return message; }; /** - * Creates a plain object from a StreamKeyRangeRequest message. Also converts values to other types if specified. + * Creates a plain object from a VDiffPickerOptions message. Also converts values to other types if specified. * @function toObject - * @memberof binlogdata.StreamKeyRangeRequest + * @memberof tabletmanagerdata.VDiffPickerOptions * @static - * @param {binlogdata.StreamKeyRangeRequest} message StreamKeyRangeRequest + * @param {tabletmanagerdata.VDiffPickerOptions} message VDiffPickerOptions * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - StreamKeyRangeRequest.toObject = function toObject(message, options) { + VDiffPickerOptions.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) { - object.position = ""; - object.key_range = null; - object.charset = null; + object.tablet_types = ""; + object.source_cell = ""; + object.target_cell = ""; } - if (message.position != null && message.hasOwnProperty("position")) - object.position = message.position; - if (message.key_range != null && message.hasOwnProperty("key_range")) - object.key_range = $root.topodata.KeyRange.toObject(message.key_range, options); - if (message.charset != null && message.hasOwnProperty("charset")) - object.charset = $root.binlogdata.Charset.toObject(message.charset, options); + if (message.tablet_types != null && message.hasOwnProperty("tablet_types")) + object.tablet_types = message.tablet_types; + if (message.source_cell != null && message.hasOwnProperty("source_cell")) + object.source_cell = message.source_cell; + if (message.target_cell != null && message.hasOwnProperty("target_cell")) + object.target_cell = message.target_cell; return object; }; /** - * Converts this StreamKeyRangeRequest to JSON. + * Converts this VDiffPickerOptions to JSON. * @function toJSON - * @memberof binlogdata.StreamKeyRangeRequest + * @memberof tabletmanagerdata.VDiffPickerOptions * @instance * @returns {Object.} JSON object */ - StreamKeyRangeRequest.prototype.toJSON = function toJSON() { + VDiffPickerOptions.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for StreamKeyRangeRequest + * Gets the default type url for VDiffPickerOptions * @function getTypeUrl - * @memberof binlogdata.StreamKeyRangeRequest + * @memberof tabletmanagerdata.VDiffPickerOptions * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - StreamKeyRangeRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + VDiffPickerOptions.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/binlogdata.StreamKeyRangeRequest"; + return typeUrlPrefix + "/tabletmanagerdata.VDiffPickerOptions"; }; - return StreamKeyRangeRequest; + return VDiffPickerOptions; })(); - binlogdata.StreamKeyRangeResponse = (function() { + tabletmanagerdata.VDiffReportOptions = (function() { /** - * Properties of a StreamKeyRangeResponse. - * @memberof binlogdata - * @interface IStreamKeyRangeResponse - * @property {binlogdata.IBinlogTransaction|null} [binlog_transaction] StreamKeyRangeResponse binlog_transaction + * Properties of a VDiffReportOptions. + * @memberof tabletmanagerdata + * @interface IVDiffReportOptions + * @property {boolean|null} [only_pks] VDiffReportOptions only_pks + * @property {boolean|null} [debug_query] VDiffReportOptions debug_query + * @property {string|null} [format] VDiffReportOptions format */ /** - * Constructs a new StreamKeyRangeResponse. - * @memberof binlogdata - * @classdesc Represents a StreamKeyRangeResponse. - * @implements IStreamKeyRangeResponse + * Constructs a new VDiffReportOptions. + * @memberof tabletmanagerdata + * @classdesc Represents a VDiffReportOptions. + * @implements IVDiffReportOptions * @constructor - * @param {binlogdata.IStreamKeyRangeResponse=} [properties] Properties to set + * @param {tabletmanagerdata.IVDiffReportOptions=} [properties] Properties to set */ - function StreamKeyRangeResponse(properties) { + function VDiffReportOptions(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -62340,75 +63826,103 @@ export const binlogdata = $root.binlogdata = (() => { } /** - * StreamKeyRangeResponse binlog_transaction. - * @member {binlogdata.IBinlogTransaction|null|undefined} binlog_transaction - * @memberof binlogdata.StreamKeyRangeResponse + * VDiffReportOptions only_pks. + * @member {boolean} only_pks + * @memberof tabletmanagerdata.VDiffReportOptions * @instance */ - StreamKeyRangeResponse.prototype.binlog_transaction = null; + VDiffReportOptions.prototype.only_pks = false; /** - * Creates a new StreamKeyRangeResponse instance using the specified properties. + * VDiffReportOptions debug_query. + * @member {boolean} debug_query + * @memberof tabletmanagerdata.VDiffReportOptions + * @instance + */ + VDiffReportOptions.prototype.debug_query = false; + + /** + * VDiffReportOptions format. + * @member {string} format + * @memberof tabletmanagerdata.VDiffReportOptions + * @instance + */ + VDiffReportOptions.prototype.format = ""; + + /** + * Creates a new VDiffReportOptions instance using the specified properties. * @function create - * @memberof binlogdata.StreamKeyRangeResponse + * @memberof tabletmanagerdata.VDiffReportOptions * @static - * @param {binlogdata.IStreamKeyRangeResponse=} [properties] Properties to set - * @returns {binlogdata.StreamKeyRangeResponse} StreamKeyRangeResponse instance + * @param {tabletmanagerdata.IVDiffReportOptions=} [properties] Properties to set + * @returns {tabletmanagerdata.VDiffReportOptions} VDiffReportOptions instance */ - StreamKeyRangeResponse.create = function create(properties) { - return new StreamKeyRangeResponse(properties); + VDiffReportOptions.create = function create(properties) { + return new VDiffReportOptions(properties); }; /** - * Encodes the specified StreamKeyRangeResponse message. Does not implicitly {@link binlogdata.StreamKeyRangeResponse.verify|verify} messages. + * Encodes the specified VDiffReportOptions message. Does not implicitly {@link tabletmanagerdata.VDiffReportOptions.verify|verify} messages. * @function encode - * @memberof binlogdata.StreamKeyRangeResponse + * @memberof tabletmanagerdata.VDiffReportOptions * @static - * @param {binlogdata.IStreamKeyRangeResponse} message StreamKeyRangeResponse message or plain object to encode + * @param {tabletmanagerdata.IVDiffReportOptions} message VDiffReportOptions message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - StreamKeyRangeResponse.encode = function encode(message, writer) { + VDiffReportOptions.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.binlog_transaction != null && Object.hasOwnProperty.call(message, "binlog_transaction")) - $root.binlogdata.BinlogTransaction.encode(message.binlog_transaction, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.only_pks != null && Object.hasOwnProperty.call(message, "only_pks")) + writer.uint32(/* id 1, wireType 0 =*/8).bool(message.only_pks); + if (message.debug_query != null && Object.hasOwnProperty.call(message, "debug_query")) + writer.uint32(/* id 2, wireType 0 =*/16).bool(message.debug_query); + if (message.format != null && Object.hasOwnProperty.call(message, "format")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.format); return writer; }; /** - * Encodes the specified StreamKeyRangeResponse message, length delimited. Does not implicitly {@link binlogdata.StreamKeyRangeResponse.verify|verify} messages. + * Encodes the specified VDiffReportOptions message, length delimited. Does not implicitly {@link tabletmanagerdata.VDiffReportOptions.verify|verify} messages. * @function encodeDelimited - * @memberof binlogdata.StreamKeyRangeResponse + * @memberof tabletmanagerdata.VDiffReportOptions * @static - * @param {binlogdata.IStreamKeyRangeResponse} message StreamKeyRangeResponse message or plain object to encode + * @param {tabletmanagerdata.IVDiffReportOptions} message VDiffReportOptions message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - StreamKeyRangeResponse.encodeDelimited = function encodeDelimited(message, writer) { + VDiffReportOptions.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a StreamKeyRangeResponse message from the specified reader or buffer. + * Decodes a VDiffReportOptions message from the specified reader or buffer. * @function decode - * @memberof binlogdata.StreamKeyRangeResponse + * @memberof tabletmanagerdata.VDiffReportOptions * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {binlogdata.StreamKeyRangeResponse} StreamKeyRangeResponse + * @returns {tabletmanagerdata.VDiffReportOptions} VDiffReportOptions * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StreamKeyRangeResponse.decode = function decode(reader, length) { + VDiffReportOptions.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.StreamKeyRangeResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.tabletmanagerdata.VDiffReportOptions(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.binlog_transaction = $root.binlogdata.BinlogTransaction.decode(reader, reader.uint32()); + message.only_pks = reader.bool(); + break; + } + case 2: { + message.debug_query = reader.bool(); + break; + } + case 3: { + message.format = reader.string(); break; } default: @@ -62420,130 +63934,146 @@ export const binlogdata = $root.binlogdata = (() => { }; /** - * Decodes a StreamKeyRangeResponse message from the specified reader or buffer, length delimited. + * Decodes a VDiffReportOptions message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof binlogdata.StreamKeyRangeResponse + * @memberof tabletmanagerdata.VDiffReportOptions * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {binlogdata.StreamKeyRangeResponse} StreamKeyRangeResponse + * @returns {tabletmanagerdata.VDiffReportOptions} VDiffReportOptions * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StreamKeyRangeResponse.decodeDelimited = function decodeDelimited(reader) { + VDiffReportOptions.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a StreamKeyRangeResponse message. + * Verifies a VDiffReportOptions message. * @function verify - * @memberof binlogdata.StreamKeyRangeResponse + * @memberof tabletmanagerdata.VDiffReportOptions * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - StreamKeyRangeResponse.verify = function verify(message) { + VDiffReportOptions.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.binlog_transaction != null && message.hasOwnProperty("binlog_transaction")) { - let error = $root.binlogdata.BinlogTransaction.verify(message.binlog_transaction); - if (error) - return "binlog_transaction." + error; - } + if (message.only_pks != null && message.hasOwnProperty("only_pks")) + if (typeof message.only_pks !== "boolean") + return "only_pks: boolean expected"; + if (message.debug_query != null && message.hasOwnProperty("debug_query")) + if (typeof message.debug_query !== "boolean") + return "debug_query: boolean expected"; + if (message.format != null && message.hasOwnProperty("format")) + if (!$util.isString(message.format)) + return "format: string expected"; return null; }; /** - * Creates a StreamKeyRangeResponse message from a plain object. Also converts values to their respective internal types. + * Creates a VDiffReportOptions message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof binlogdata.StreamKeyRangeResponse + * @memberof tabletmanagerdata.VDiffReportOptions * @static * @param {Object.} object Plain object - * @returns {binlogdata.StreamKeyRangeResponse} StreamKeyRangeResponse + * @returns {tabletmanagerdata.VDiffReportOptions} VDiffReportOptions */ - StreamKeyRangeResponse.fromObject = function fromObject(object) { - if (object instanceof $root.binlogdata.StreamKeyRangeResponse) + VDiffReportOptions.fromObject = function fromObject(object) { + if (object instanceof $root.tabletmanagerdata.VDiffReportOptions) return object; - let message = new $root.binlogdata.StreamKeyRangeResponse(); - if (object.binlog_transaction != null) { - if (typeof object.binlog_transaction !== "object") - throw TypeError(".binlogdata.StreamKeyRangeResponse.binlog_transaction: object expected"); - message.binlog_transaction = $root.binlogdata.BinlogTransaction.fromObject(object.binlog_transaction); - } + let message = new $root.tabletmanagerdata.VDiffReportOptions(); + if (object.only_pks != null) + message.only_pks = Boolean(object.only_pks); + if (object.debug_query != null) + message.debug_query = Boolean(object.debug_query); + if (object.format != null) + message.format = String(object.format); return message; }; /** - * Creates a plain object from a StreamKeyRangeResponse message. Also converts values to other types if specified. + * Creates a plain object from a VDiffReportOptions message. Also converts values to other types if specified. * @function toObject - * @memberof binlogdata.StreamKeyRangeResponse + * @memberof tabletmanagerdata.VDiffReportOptions * @static - * @param {binlogdata.StreamKeyRangeResponse} message StreamKeyRangeResponse + * @param {tabletmanagerdata.VDiffReportOptions} message VDiffReportOptions * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - StreamKeyRangeResponse.toObject = function toObject(message, options) { + VDiffReportOptions.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) - object.binlog_transaction = null; - if (message.binlog_transaction != null && message.hasOwnProperty("binlog_transaction")) - object.binlog_transaction = $root.binlogdata.BinlogTransaction.toObject(message.binlog_transaction, options); + if (options.defaults) { + object.only_pks = false; + object.debug_query = false; + object.format = ""; + } + if (message.only_pks != null && message.hasOwnProperty("only_pks")) + object.only_pks = message.only_pks; + if (message.debug_query != null && message.hasOwnProperty("debug_query")) + object.debug_query = message.debug_query; + if (message.format != null && message.hasOwnProperty("format")) + object.format = message.format; return object; }; /** - * Converts this StreamKeyRangeResponse to JSON. + * Converts this VDiffReportOptions to JSON. * @function toJSON - * @memberof binlogdata.StreamKeyRangeResponse + * @memberof tabletmanagerdata.VDiffReportOptions * @instance * @returns {Object.} JSON object */ - StreamKeyRangeResponse.prototype.toJSON = function toJSON() { + VDiffReportOptions.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for StreamKeyRangeResponse + * Gets the default type url for VDiffReportOptions * @function getTypeUrl - * @memberof binlogdata.StreamKeyRangeResponse + * @memberof tabletmanagerdata.VDiffReportOptions * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - StreamKeyRangeResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + VDiffReportOptions.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/binlogdata.StreamKeyRangeResponse"; + return typeUrlPrefix + "/tabletmanagerdata.VDiffReportOptions"; }; - return StreamKeyRangeResponse; + return VDiffReportOptions; })(); - binlogdata.StreamTablesRequest = (function() { + tabletmanagerdata.VDiffCoreOptions = (function() { /** - * Properties of a StreamTablesRequest. - * @memberof binlogdata - * @interface IStreamTablesRequest - * @property {string|null} [position] StreamTablesRequest position - * @property {Array.|null} [tables] StreamTablesRequest tables - * @property {binlogdata.ICharset|null} [charset] StreamTablesRequest charset + * Properties of a VDiffCoreOptions. + * @memberof tabletmanagerdata + * @interface IVDiffCoreOptions + * @property {string|null} [tables] VDiffCoreOptions tables + * @property {boolean|null} [auto_retry] VDiffCoreOptions auto_retry + * @property {number|Long|null} [max_rows] VDiffCoreOptions max_rows + * @property {boolean|null} [checksum] VDiffCoreOptions checksum + * @property {number|Long|null} [sample_pct] VDiffCoreOptions sample_pct + * @property {number|Long|null} [timeout_seconds] VDiffCoreOptions timeout_seconds + * @property {number|Long|null} [max_extra_rows_to_compare] VDiffCoreOptions max_extra_rows_to_compare + * @property {boolean|null} [update_table_stats] VDiffCoreOptions update_table_stats */ /** - * Constructs a new StreamTablesRequest. - * @memberof binlogdata - * @classdesc Represents a StreamTablesRequest. - * @implements IStreamTablesRequest + * Constructs a new VDiffCoreOptions. + * @memberof tabletmanagerdata + * @classdesc Represents a VDiffCoreOptions. + * @implements IVDiffCoreOptions * @constructor - * @param {binlogdata.IStreamTablesRequest=} [properties] Properties to set + * @param {tabletmanagerdata.IVDiffCoreOptions=} [properties] Properties to set */ - function StreamTablesRequest(properties) { - this.tables = []; + function VDiffCoreOptions(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -62551,106 +64081,173 @@ export const binlogdata = $root.binlogdata = (() => { } /** - * StreamTablesRequest position. - * @member {string} position - * @memberof binlogdata.StreamTablesRequest + * VDiffCoreOptions tables. + * @member {string} tables + * @memberof tabletmanagerdata.VDiffCoreOptions * @instance */ - StreamTablesRequest.prototype.position = ""; + VDiffCoreOptions.prototype.tables = ""; /** - * StreamTablesRequest tables. - * @member {Array.} tables - * @memberof binlogdata.StreamTablesRequest + * VDiffCoreOptions auto_retry. + * @member {boolean} auto_retry + * @memberof tabletmanagerdata.VDiffCoreOptions * @instance */ - StreamTablesRequest.prototype.tables = $util.emptyArray; + VDiffCoreOptions.prototype.auto_retry = false; /** - * StreamTablesRequest charset. - * @member {binlogdata.ICharset|null|undefined} charset - * @memberof binlogdata.StreamTablesRequest + * VDiffCoreOptions max_rows. + * @member {number|Long} max_rows + * @memberof tabletmanagerdata.VDiffCoreOptions * @instance */ - StreamTablesRequest.prototype.charset = null; + VDiffCoreOptions.prototype.max_rows = $util.Long ? $util.Long.fromBits(0,0,false) : 0; /** - * Creates a new StreamTablesRequest instance using the specified properties. + * VDiffCoreOptions checksum. + * @member {boolean} checksum + * @memberof tabletmanagerdata.VDiffCoreOptions + * @instance + */ + VDiffCoreOptions.prototype.checksum = false; + + /** + * VDiffCoreOptions sample_pct. + * @member {number|Long} sample_pct + * @memberof tabletmanagerdata.VDiffCoreOptions + * @instance + */ + VDiffCoreOptions.prototype.sample_pct = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * VDiffCoreOptions timeout_seconds. + * @member {number|Long} timeout_seconds + * @memberof tabletmanagerdata.VDiffCoreOptions + * @instance + */ + VDiffCoreOptions.prototype.timeout_seconds = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * VDiffCoreOptions max_extra_rows_to_compare. + * @member {number|Long} max_extra_rows_to_compare + * @memberof tabletmanagerdata.VDiffCoreOptions + * @instance + */ + VDiffCoreOptions.prototype.max_extra_rows_to_compare = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * VDiffCoreOptions update_table_stats. + * @member {boolean} update_table_stats + * @memberof tabletmanagerdata.VDiffCoreOptions + * @instance + */ + VDiffCoreOptions.prototype.update_table_stats = false; + + /** + * Creates a new VDiffCoreOptions instance using the specified properties. * @function create - * @memberof binlogdata.StreamTablesRequest + * @memberof tabletmanagerdata.VDiffCoreOptions * @static - * @param {binlogdata.IStreamTablesRequest=} [properties] Properties to set - * @returns {binlogdata.StreamTablesRequest} StreamTablesRequest instance + * @param {tabletmanagerdata.IVDiffCoreOptions=} [properties] Properties to set + * @returns {tabletmanagerdata.VDiffCoreOptions} VDiffCoreOptions instance */ - StreamTablesRequest.create = function create(properties) { - return new StreamTablesRequest(properties); + VDiffCoreOptions.create = function create(properties) { + return new VDiffCoreOptions(properties); }; /** - * Encodes the specified StreamTablesRequest message. Does not implicitly {@link binlogdata.StreamTablesRequest.verify|verify} messages. + * Encodes the specified VDiffCoreOptions message. Does not implicitly {@link tabletmanagerdata.VDiffCoreOptions.verify|verify} messages. * @function encode - * @memberof binlogdata.StreamTablesRequest + * @memberof tabletmanagerdata.VDiffCoreOptions * @static - * @param {binlogdata.IStreamTablesRequest} message StreamTablesRequest message or plain object to encode + * @param {tabletmanagerdata.IVDiffCoreOptions} message VDiffCoreOptions message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - StreamTablesRequest.encode = function encode(message, writer) { + VDiffCoreOptions.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.position != null && Object.hasOwnProperty.call(message, "position")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.position); - if (message.tables != null && message.tables.length) - for (let i = 0; i < message.tables.length; ++i) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.tables[i]); - if (message.charset != null && Object.hasOwnProperty.call(message, "charset")) - $root.binlogdata.Charset.encode(message.charset, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.tables != null && Object.hasOwnProperty.call(message, "tables")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.tables); + if (message.auto_retry != null && Object.hasOwnProperty.call(message, "auto_retry")) + writer.uint32(/* id 2, wireType 0 =*/16).bool(message.auto_retry); + if (message.max_rows != null && Object.hasOwnProperty.call(message, "max_rows")) + writer.uint32(/* id 3, wireType 0 =*/24).int64(message.max_rows); + if (message.checksum != null && Object.hasOwnProperty.call(message, "checksum")) + writer.uint32(/* id 4, wireType 0 =*/32).bool(message.checksum); + if (message.sample_pct != null && Object.hasOwnProperty.call(message, "sample_pct")) + writer.uint32(/* id 5, wireType 0 =*/40).int64(message.sample_pct); + if (message.timeout_seconds != null && Object.hasOwnProperty.call(message, "timeout_seconds")) + writer.uint32(/* id 6, wireType 0 =*/48).int64(message.timeout_seconds); + if (message.max_extra_rows_to_compare != null && Object.hasOwnProperty.call(message, "max_extra_rows_to_compare")) + writer.uint32(/* id 7, wireType 0 =*/56).int64(message.max_extra_rows_to_compare); + if (message.update_table_stats != null && Object.hasOwnProperty.call(message, "update_table_stats")) + writer.uint32(/* id 8, wireType 0 =*/64).bool(message.update_table_stats); return writer; }; /** - * Encodes the specified StreamTablesRequest message, length delimited. Does not implicitly {@link binlogdata.StreamTablesRequest.verify|verify} messages. + * Encodes the specified VDiffCoreOptions message, length delimited. Does not implicitly {@link tabletmanagerdata.VDiffCoreOptions.verify|verify} messages. * @function encodeDelimited - * @memberof binlogdata.StreamTablesRequest + * @memberof tabletmanagerdata.VDiffCoreOptions * @static - * @param {binlogdata.IStreamTablesRequest} message StreamTablesRequest message or plain object to encode + * @param {tabletmanagerdata.IVDiffCoreOptions} message VDiffCoreOptions message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - StreamTablesRequest.encodeDelimited = function encodeDelimited(message, writer) { + VDiffCoreOptions.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a StreamTablesRequest message from the specified reader or buffer. + * Decodes a VDiffCoreOptions message from the specified reader or buffer. * @function decode - * @memberof binlogdata.StreamTablesRequest + * @memberof tabletmanagerdata.VDiffCoreOptions * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {binlogdata.StreamTablesRequest} StreamTablesRequest + * @returns {tabletmanagerdata.VDiffCoreOptions} VDiffCoreOptions * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StreamTablesRequest.decode = function decode(reader, length) { + VDiffCoreOptions.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.StreamTablesRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.tabletmanagerdata.VDiffCoreOptions(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.position = reader.string(); + message.tables = reader.string(); break; } case 2: { - if (!(message.tables && message.tables.length)) - message.tables = []; - message.tables.push(reader.string()); + message.auto_retry = reader.bool(); break; } case 3: { - message.charset = $root.binlogdata.Charset.decode(reader, reader.uint32()); + message.max_rows = reader.int64(); + break; + } + case 4: { + message.checksum = reader.bool(); + break; + } + case 5: { + message.sample_pct = reader.int64(); + break; + } + case 6: { + message.timeout_seconds = reader.int64(); + break; + } + case 7: { + message.max_extra_rows_to_compare = reader.int64(); + break; + } + case 8: { + message.update_table_stats = reader.bool(); break; } default: @@ -62662,157 +64259,237 @@ export const binlogdata = $root.binlogdata = (() => { }; /** - * Decodes a StreamTablesRequest message from the specified reader or buffer, length delimited. + * Decodes a VDiffCoreOptions message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof binlogdata.StreamTablesRequest + * @memberof tabletmanagerdata.VDiffCoreOptions * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {binlogdata.StreamTablesRequest} StreamTablesRequest + * @returns {tabletmanagerdata.VDiffCoreOptions} VDiffCoreOptions * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StreamTablesRequest.decodeDelimited = function decodeDelimited(reader) { + VDiffCoreOptions.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a StreamTablesRequest message. + * Verifies a VDiffCoreOptions message. * @function verify - * @memberof binlogdata.StreamTablesRequest + * @memberof tabletmanagerdata.VDiffCoreOptions * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - StreamTablesRequest.verify = function verify(message) { + VDiffCoreOptions.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.position != null && message.hasOwnProperty("position")) - if (!$util.isString(message.position)) - return "position: string expected"; - if (message.tables != null && message.hasOwnProperty("tables")) { - if (!Array.isArray(message.tables)) - return "tables: array expected"; - for (let i = 0; i < message.tables.length; ++i) - if (!$util.isString(message.tables[i])) - return "tables: string[] expected"; - } - if (message.charset != null && message.hasOwnProperty("charset")) { - let error = $root.binlogdata.Charset.verify(message.charset); - if (error) - return "charset." + error; - } + if (message.tables != null && message.hasOwnProperty("tables")) + if (!$util.isString(message.tables)) + return "tables: string expected"; + if (message.auto_retry != null && message.hasOwnProperty("auto_retry")) + if (typeof message.auto_retry !== "boolean") + return "auto_retry: boolean expected"; + if (message.max_rows != null && message.hasOwnProperty("max_rows")) + if (!$util.isInteger(message.max_rows) && !(message.max_rows && $util.isInteger(message.max_rows.low) && $util.isInteger(message.max_rows.high))) + return "max_rows: integer|Long expected"; + if (message.checksum != null && message.hasOwnProperty("checksum")) + if (typeof message.checksum !== "boolean") + return "checksum: boolean expected"; + if (message.sample_pct != null && message.hasOwnProperty("sample_pct")) + if (!$util.isInteger(message.sample_pct) && !(message.sample_pct && $util.isInteger(message.sample_pct.low) && $util.isInteger(message.sample_pct.high))) + return "sample_pct: integer|Long expected"; + if (message.timeout_seconds != null && message.hasOwnProperty("timeout_seconds")) + if (!$util.isInteger(message.timeout_seconds) && !(message.timeout_seconds && $util.isInteger(message.timeout_seconds.low) && $util.isInteger(message.timeout_seconds.high))) + return "timeout_seconds: integer|Long expected"; + if (message.max_extra_rows_to_compare != null && message.hasOwnProperty("max_extra_rows_to_compare")) + if (!$util.isInteger(message.max_extra_rows_to_compare) && !(message.max_extra_rows_to_compare && $util.isInteger(message.max_extra_rows_to_compare.low) && $util.isInteger(message.max_extra_rows_to_compare.high))) + return "max_extra_rows_to_compare: integer|Long expected"; + if (message.update_table_stats != null && message.hasOwnProperty("update_table_stats")) + if (typeof message.update_table_stats !== "boolean") + return "update_table_stats: boolean expected"; return null; }; /** - * Creates a StreamTablesRequest message from a plain object. Also converts values to their respective internal types. + * Creates a VDiffCoreOptions message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof binlogdata.StreamTablesRequest + * @memberof tabletmanagerdata.VDiffCoreOptions * @static * @param {Object.} object Plain object - * @returns {binlogdata.StreamTablesRequest} StreamTablesRequest + * @returns {tabletmanagerdata.VDiffCoreOptions} VDiffCoreOptions */ - StreamTablesRequest.fromObject = function fromObject(object) { - if (object instanceof $root.binlogdata.StreamTablesRequest) + VDiffCoreOptions.fromObject = function fromObject(object) { + if (object instanceof $root.tabletmanagerdata.VDiffCoreOptions) return object; - let message = new $root.binlogdata.StreamTablesRequest(); - if (object.position != null) - message.position = String(object.position); - if (object.tables) { - if (!Array.isArray(object.tables)) - throw TypeError(".binlogdata.StreamTablesRequest.tables: array expected"); - message.tables = []; - for (let i = 0; i < object.tables.length; ++i) - message.tables[i] = String(object.tables[i]); - } - if (object.charset != null) { - if (typeof object.charset !== "object") - throw TypeError(".binlogdata.StreamTablesRequest.charset: object expected"); - message.charset = $root.binlogdata.Charset.fromObject(object.charset); - } + let message = new $root.tabletmanagerdata.VDiffCoreOptions(); + if (object.tables != null) + message.tables = String(object.tables); + if (object.auto_retry != null) + message.auto_retry = Boolean(object.auto_retry); + if (object.max_rows != null) + if ($util.Long) + (message.max_rows = $util.Long.fromValue(object.max_rows)).unsigned = false; + else if (typeof object.max_rows === "string") + message.max_rows = parseInt(object.max_rows, 10); + else if (typeof object.max_rows === "number") + message.max_rows = object.max_rows; + else if (typeof object.max_rows === "object") + message.max_rows = new $util.LongBits(object.max_rows.low >>> 0, object.max_rows.high >>> 0).toNumber(); + if (object.checksum != null) + message.checksum = Boolean(object.checksum); + if (object.sample_pct != null) + if ($util.Long) + (message.sample_pct = $util.Long.fromValue(object.sample_pct)).unsigned = false; + else if (typeof object.sample_pct === "string") + message.sample_pct = parseInt(object.sample_pct, 10); + else if (typeof object.sample_pct === "number") + message.sample_pct = object.sample_pct; + else if (typeof object.sample_pct === "object") + message.sample_pct = new $util.LongBits(object.sample_pct.low >>> 0, object.sample_pct.high >>> 0).toNumber(); + if (object.timeout_seconds != null) + if ($util.Long) + (message.timeout_seconds = $util.Long.fromValue(object.timeout_seconds)).unsigned = false; + else if (typeof object.timeout_seconds === "string") + message.timeout_seconds = parseInt(object.timeout_seconds, 10); + else if (typeof object.timeout_seconds === "number") + message.timeout_seconds = object.timeout_seconds; + else if (typeof object.timeout_seconds === "object") + message.timeout_seconds = new $util.LongBits(object.timeout_seconds.low >>> 0, object.timeout_seconds.high >>> 0).toNumber(); + if (object.max_extra_rows_to_compare != null) + if ($util.Long) + (message.max_extra_rows_to_compare = $util.Long.fromValue(object.max_extra_rows_to_compare)).unsigned = false; + else if (typeof object.max_extra_rows_to_compare === "string") + message.max_extra_rows_to_compare = parseInt(object.max_extra_rows_to_compare, 10); + else if (typeof object.max_extra_rows_to_compare === "number") + message.max_extra_rows_to_compare = object.max_extra_rows_to_compare; + else if (typeof object.max_extra_rows_to_compare === "object") + message.max_extra_rows_to_compare = new $util.LongBits(object.max_extra_rows_to_compare.low >>> 0, object.max_extra_rows_to_compare.high >>> 0).toNumber(); + if (object.update_table_stats != null) + message.update_table_stats = Boolean(object.update_table_stats); return message; }; /** - * Creates a plain object from a StreamTablesRequest message. Also converts values to other types if specified. + * Creates a plain object from a VDiffCoreOptions message. Also converts values to other types if specified. * @function toObject - * @memberof binlogdata.StreamTablesRequest + * @memberof tabletmanagerdata.VDiffCoreOptions * @static - * @param {binlogdata.StreamTablesRequest} message StreamTablesRequest + * @param {tabletmanagerdata.VDiffCoreOptions} message VDiffCoreOptions * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - StreamTablesRequest.toObject = function toObject(message, options) { + VDiffCoreOptions.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.tables = []; if (options.defaults) { - object.position = ""; - object.charset = null; - } - if (message.position != null && message.hasOwnProperty("position")) - object.position = message.position; - if (message.tables && message.tables.length) { - object.tables = []; - for (let j = 0; j < message.tables.length; ++j) - object.tables[j] = message.tables[j]; + object.tables = ""; + object.auto_retry = false; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.max_rows = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.max_rows = options.longs === String ? "0" : 0; + object.checksum = false; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.sample_pct = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.sample_pct = options.longs === String ? "0" : 0; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.timeout_seconds = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.timeout_seconds = options.longs === String ? "0" : 0; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.max_extra_rows_to_compare = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.max_extra_rows_to_compare = options.longs === String ? "0" : 0; + object.update_table_stats = false; } - if (message.charset != null && message.hasOwnProperty("charset")) - object.charset = $root.binlogdata.Charset.toObject(message.charset, options); + if (message.tables != null && message.hasOwnProperty("tables")) + object.tables = message.tables; + if (message.auto_retry != null && message.hasOwnProperty("auto_retry")) + object.auto_retry = message.auto_retry; + if (message.max_rows != null && message.hasOwnProperty("max_rows")) + if (typeof message.max_rows === "number") + object.max_rows = options.longs === String ? String(message.max_rows) : message.max_rows; + else + object.max_rows = options.longs === String ? $util.Long.prototype.toString.call(message.max_rows) : options.longs === Number ? new $util.LongBits(message.max_rows.low >>> 0, message.max_rows.high >>> 0).toNumber() : message.max_rows; + if (message.checksum != null && message.hasOwnProperty("checksum")) + object.checksum = message.checksum; + if (message.sample_pct != null && message.hasOwnProperty("sample_pct")) + if (typeof message.sample_pct === "number") + object.sample_pct = options.longs === String ? String(message.sample_pct) : message.sample_pct; + else + object.sample_pct = options.longs === String ? $util.Long.prototype.toString.call(message.sample_pct) : options.longs === Number ? new $util.LongBits(message.sample_pct.low >>> 0, message.sample_pct.high >>> 0).toNumber() : message.sample_pct; + if (message.timeout_seconds != null && message.hasOwnProperty("timeout_seconds")) + if (typeof message.timeout_seconds === "number") + object.timeout_seconds = options.longs === String ? String(message.timeout_seconds) : message.timeout_seconds; + else + object.timeout_seconds = options.longs === String ? $util.Long.prototype.toString.call(message.timeout_seconds) : options.longs === Number ? new $util.LongBits(message.timeout_seconds.low >>> 0, message.timeout_seconds.high >>> 0).toNumber() : message.timeout_seconds; + if (message.max_extra_rows_to_compare != null && message.hasOwnProperty("max_extra_rows_to_compare")) + if (typeof message.max_extra_rows_to_compare === "number") + object.max_extra_rows_to_compare = options.longs === String ? String(message.max_extra_rows_to_compare) : message.max_extra_rows_to_compare; + else + object.max_extra_rows_to_compare = options.longs === String ? $util.Long.prototype.toString.call(message.max_extra_rows_to_compare) : options.longs === Number ? new $util.LongBits(message.max_extra_rows_to_compare.low >>> 0, message.max_extra_rows_to_compare.high >>> 0).toNumber() : message.max_extra_rows_to_compare; + if (message.update_table_stats != null && message.hasOwnProperty("update_table_stats")) + object.update_table_stats = message.update_table_stats; return object; }; /** - * Converts this StreamTablesRequest to JSON. + * Converts this VDiffCoreOptions to JSON. * @function toJSON - * @memberof binlogdata.StreamTablesRequest + * @memberof tabletmanagerdata.VDiffCoreOptions * @instance * @returns {Object.} JSON object */ - StreamTablesRequest.prototype.toJSON = function toJSON() { + VDiffCoreOptions.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for StreamTablesRequest + * Gets the default type url for VDiffCoreOptions * @function getTypeUrl - * @memberof binlogdata.StreamTablesRequest + * @memberof tabletmanagerdata.VDiffCoreOptions * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - StreamTablesRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + VDiffCoreOptions.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/binlogdata.StreamTablesRequest"; + return typeUrlPrefix + "/tabletmanagerdata.VDiffCoreOptions"; }; - return StreamTablesRequest; + return VDiffCoreOptions; })(); - binlogdata.StreamTablesResponse = (function() { + tabletmanagerdata.VDiffOptions = (function() { /** - * Properties of a StreamTablesResponse. - * @memberof binlogdata - * @interface IStreamTablesResponse - * @property {binlogdata.IBinlogTransaction|null} [binlog_transaction] StreamTablesResponse binlog_transaction + * Properties of a VDiffOptions. + * @memberof tabletmanagerdata + * @interface IVDiffOptions + * @property {tabletmanagerdata.IVDiffPickerOptions|null} [picker_options] VDiffOptions picker_options + * @property {tabletmanagerdata.IVDiffCoreOptions|null} [core_options] VDiffOptions core_options + * @property {tabletmanagerdata.IVDiffReportOptions|null} [report_options] VDiffOptions report_options */ /** - * Constructs a new StreamTablesResponse. - * @memberof binlogdata - * @classdesc Represents a StreamTablesResponse. - * @implements IStreamTablesResponse + * Constructs a new VDiffOptions. + * @memberof tabletmanagerdata + * @classdesc Represents a VDiffOptions. + * @implements IVDiffOptions * @constructor - * @param {binlogdata.IStreamTablesResponse=} [properties] Properties to set + * @param {tabletmanagerdata.IVDiffOptions=} [properties] Properties to set */ - function StreamTablesResponse(properties) { + function VDiffOptions(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -62820,75 +64497,103 @@ export const binlogdata = $root.binlogdata = (() => { } /** - * StreamTablesResponse binlog_transaction. - * @member {binlogdata.IBinlogTransaction|null|undefined} binlog_transaction - * @memberof binlogdata.StreamTablesResponse + * VDiffOptions picker_options. + * @member {tabletmanagerdata.IVDiffPickerOptions|null|undefined} picker_options + * @memberof tabletmanagerdata.VDiffOptions * @instance */ - StreamTablesResponse.prototype.binlog_transaction = null; + VDiffOptions.prototype.picker_options = null; /** - * Creates a new StreamTablesResponse instance using the specified properties. + * VDiffOptions core_options. + * @member {tabletmanagerdata.IVDiffCoreOptions|null|undefined} core_options + * @memberof tabletmanagerdata.VDiffOptions + * @instance + */ + VDiffOptions.prototype.core_options = null; + + /** + * VDiffOptions report_options. + * @member {tabletmanagerdata.IVDiffReportOptions|null|undefined} report_options + * @memberof tabletmanagerdata.VDiffOptions + * @instance + */ + VDiffOptions.prototype.report_options = null; + + /** + * Creates a new VDiffOptions instance using the specified properties. * @function create - * @memberof binlogdata.StreamTablesResponse + * @memberof tabletmanagerdata.VDiffOptions * @static - * @param {binlogdata.IStreamTablesResponse=} [properties] Properties to set - * @returns {binlogdata.StreamTablesResponse} StreamTablesResponse instance + * @param {tabletmanagerdata.IVDiffOptions=} [properties] Properties to set + * @returns {tabletmanagerdata.VDiffOptions} VDiffOptions instance */ - StreamTablesResponse.create = function create(properties) { - return new StreamTablesResponse(properties); + VDiffOptions.create = function create(properties) { + return new VDiffOptions(properties); }; /** - * Encodes the specified StreamTablesResponse message. Does not implicitly {@link binlogdata.StreamTablesResponse.verify|verify} messages. + * Encodes the specified VDiffOptions message. Does not implicitly {@link tabletmanagerdata.VDiffOptions.verify|verify} messages. * @function encode - * @memberof binlogdata.StreamTablesResponse + * @memberof tabletmanagerdata.VDiffOptions * @static - * @param {binlogdata.IStreamTablesResponse} message StreamTablesResponse message or plain object to encode + * @param {tabletmanagerdata.IVDiffOptions} message VDiffOptions message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - StreamTablesResponse.encode = function encode(message, writer) { + VDiffOptions.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.binlog_transaction != null && Object.hasOwnProperty.call(message, "binlog_transaction")) - $root.binlogdata.BinlogTransaction.encode(message.binlog_transaction, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.picker_options != null && Object.hasOwnProperty.call(message, "picker_options")) + $root.tabletmanagerdata.VDiffPickerOptions.encode(message.picker_options, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.core_options != null && Object.hasOwnProperty.call(message, "core_options")) + $root.tabletmanagerdata.VDiffCoreOptions.encode(message.core_options, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.report_options != null && Object.hasOwnProperty.call(message, "report_options")) + $root.tabletmanagerdata.VDiffReportOptions.encode(message.report_options, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); return writer; }; /** - * Encodes the specified StreamTablesResponse message, length delimited. Does not implicitly {@link binlogdata.StreamTablesResponse.verify|verify} messages. + * Encodes the specified VDiffOptions message, length delimited. Does not implicitly {@link tabletmanagerdata.VDiffOptions.verify|verify} messages. * @function encodeDelimited - * @memberof binlogdata.StreamTablesResponse + * @memberof tabletmanagerdata.VDiffOptions * @static - * @param {binlogdata.IStreamTablesResponse} message StreamTablesResponse message or plain object to encode + * @param {tabletmanagerdata.IVDiffOptions} message VDiffOptions message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - StreamTablesResponse.encodeDelimited = function encodeDelimited(message, writer) { + VDiffOptions.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a StreamTablesResponse message from the specified reader or buffer. + * Decodes a VDiffOptions message from the specified reader or buffer. * @function decode - * @memberof binlogdata.StreamTablesResponse + * @memberof tabletmanagerdata.VDiffOptions * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {binlogdata.StreamTablesResponse} StreamTablesResponse + * @returns {tabletmanagerdata.VDiffOptions} VDiffOptions * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StreamTablesResponse.decode = function decode(reader, length) { + VDiffOptions.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.StreamTablesResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.tabletmanagerdata.VDiffOptions(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.binlog_transaction = $root.binlogdata.BinlogTransaction.decode(reader, reader.uint32()); + message.picker_options = $root.tabletmanagerdata.VDiffPickerOptions.decode(reader, reader.uint32()); + break; + } + case 2: { + message.core_options = $root.tabletmanagerdata.VDiffCoreOptions.decode(reader, reader.uint32()); + break; + } + case 3: { + message.report_options = $root.tabletmanagerdata.VDiffReportOptions.decode(reader, reader.uint32()); break; } default: @@ -62900,128 +64605,161 @@ export const binlogdata = $root.binlogdata = (() => { }; /** - * Decodes a StreamTablesResponse message from the specified reader or buffer, length delimited. + * Decodes a VDiffOptions message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof binlogdata.StreamTablesResponse + * @memberof tabletmanagerdata.VDiffOptions * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {binlogdata.StreamTablesResponse} StreamTablesResponse + * @returns {tabletmanagerdata.VDiffOptions} VDiffOptions * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StreamTablesResponse.decodeDelimited = function decodeDelimited(reader) { + VDiffOptions.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a StreamTablesResponse message. + * Verifies a VDiffOptions message. * @function verify - * @memberof binlogdata.StreamTablesResponse + * @memberof tabletmanagerdata.VDiffOptions * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - StreamTablesResponse.verify = function verify(message) { + VDiffOptions.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.binlog_transaction != null && message.hasOwnProperty("binlog_transaction")) { - let error = $root.binlogdata.BinlogTransaction.verify(message.binlog_transaction); + if (message.picker_options != null && message.hasOwnProperty("picker_options")) { + let error = $root.tabletmanagerdata.VDiffPickerOptions.verify(message.picker_options); if (error) - return "binlog_transaction." + error; + return "picker_options." + error; + } + if (message.core_options != null && message.hasOwnProperty("core_options")) { + let error = $root.tabletmanagerdata.VDiffCoreOptions.verify(message.core_options); + if (error) + return "core_options." + error; + } + if (message.report_options != null && message.hasOwnProperty("report_options")) { + let error = $root.tabletmanagerdata.VDiffReportOptions.verify(message.report_options); + if (error) + return "report_options." + error; } return null; }; /** - * Creates a StreamTablesResponse message from a plain object. Also converts values to their respective internal types. + * Creates a VDiffOptions message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof binlogdata.StreamTablesResponse + * @memberof tabletmanagerdata.VDiffOptions * @static * @param {Object.} object Plain object - * @returns {binlogdata.StreamTablesResponse} StreamTablesResponse + * @returns {tabletmanagerdata.VDiffOptions} VDiffOptions */ - StreamTablesResponse.fromObject = function fromObject(object) { - if (object instanceof $root.binlogdata.StreamTablesResponse) + VDiffOptions.fromObject = function fromObject(object) { + if (object instanceof $root.tabletmanagerdata.VDiffOptions) return object; - let message = new $root.binlogdata.StreamTablesResponse(); - if (object.binlog_transaction != null) { - if (typeof object.binlog_transaction !== "object") - throw TypeError(".binlogdata.StreamTablesResponse.binlog_transaction: object expected"); - message.binlog_transaction = $root.binlogdata.BinlogTransaction.fromObject(object.binlog_transaction); + let message = new $root.tabletmanagerdata.VDiffOptions(); + if (object.picker_options != null) { + if (typeof object.picker_options !== "object") + throw TypeError(".tabletmanagerdata.VDiffOptions.picker_options: object expected"); + message.picker_options = $root.tabletmanagerdata.VDiffPickerOptions.fromObject(object.picker_options); + } + if (object.core_options != null) { + if (typeof object.core_options !== "object") + throw TypeError(".tabletmanagerdata.VDiffOptions.core_options: object expected"); + message.core_options = $root.tabletmanagerdata.VDiffCoreOptions.fromObject(object.core_options); + } + if (object.report_options != null) { + if (typeof object.report_options !== "object") + throw TypeError(".tabletmanagerdata.VDiffOptions.report_options: object expected"); + message.report_options = $root.tabletmanagerdata.VDiffReportOptions.fromObject(object.report_options); } return message; }; /** - * Creates a plain object from a StreamTablesResponse message. Also converts values to other types if specified. + * Creates a plain object from a VDiffOptions message. Also converts values to other types if specified. * @function toObject - * @memberof binlogdata.StreamTablesResponse + * @memberof tabletmanagerdata.VDiffOptions * @static - * @param {binlogdata.StreamTablesResponse} message StreamTablesResponse + * @param {tabletmanagerdata.VDiffOptions} message VDiffOptions * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - StreamTablesResponse.toObject = function toObject(message, options) { + VDiffOptions.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) - object.binlog_transaction = null; - if (message.binlog_transaction != null && message.hasOwnProperty("binlog_transaction")) - object.binlog_transaction = $root.binlogdata.BinlogTransaction.toObject(message.binlog_transaction, options); + if (options.defaults) { + object.picker_options = null; + object.core_options = null; + object.report_options = null; + } + if (message.picker_options != null && message.hasOwnProperty("picker_options")) + object.picker_options = $root.tabletmanagerdata.VDiffPickerOptions.toObject(message.picker_options, options); + if (message.core_options != null && message.hasOwnProperty("core_options")) + object.core_options = $root.tabletmanagerdata.VDiffCoreOptions.toObject(message.core_options, options); + if (message.report_options != null && message.hasOwnProperty("report_options")) + object.report_options = $root.tabletmanagerdata.VDiffReportOptions.toObject(message.report_options, options); return object; }; /** - * Converts this StreamTablesResponse to JSON. + * Converts this VDiffOptions to JSON. * @function toJSON - * @memberof binlogdata.StreamTablesResponse + * @memberof tabletmanagerdata.VDiffOptions * @instance * @returns {Object.} JSON object */ - StreamTablesResponse.prototype.toJSON = function toJSON() { + VDiffOptions.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for StreamTablesResponse + * Gets the default type url for VDiffOptions * @function getTypeUrl - * @memberof binlogdata.StreamTablesResponse + * @memberof tabletmanagerdata.VDiffOptions * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - StreamTablesResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + VDiffOptions.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/binlogdata.StreamTablesResponse"; + return typeUrlPrefix + "/tabletmanagerdata.VDiffOptions"; }; - return StreamTablesResponse; + return VDiffOptions; })(); - binlogdata.CharsetConversion = (function() { + tabletmanagerdata.UpdateVReplicationWorkflowRequest = (function() { /** - * Properties of a CharsetConversion. - * @memberof binlogdata - * @interface ICharsetConversion - * @property {string|null} [from_charset] CharsetConversion from_charset - * @property {string|null} [to_charset] CharsetConversion to_charset + * Properties of an UpdateVReplicationWorkflowRequest. + * @memberof tabletmanagerdata + * @interface IUpdateVReplicationWorkflowRequest + * @property {string|null} [workflow] UpdateVReplicationWorkflowRequest workflow + * @property {Array.|null} [cells] UpdateVReplicationWorkflowRequest cells + * @property {Array.|null} [tablet_types] UpdateVReplicationWorkflowRequest tablet_types + * @property {tabletmanagerdata.TabletSelectionPreference|null} [tablet_selection_preference] UpdateVReplicationWorkflowRequest tablet_selection_preference + * @property {binlogdata.OnDDLAction|null} [on_ddl] UpdateVReplicationWorkflowRequest on_ddl + * @property {binlogdata.VReplicationWorkflowState|null} [state] UpdateVReplicationWorkflowRequest state */ /** - * Constructs a new CharsetConversion. - * @memberof binlogdata - * @classdesc Represents a CharsetConversion. - * @implements ICharsetConversion + * Constructs a new UpdateVReplicationWorkflowRequest. + * @memberof tabletmanagerdata + * @classdesc Represents an UpdateVReplicationWorkflowRequest. + * @implements IUpdateVReplicationWorkflowRequest * @constructor - * @param {binlogdata.ICharsetConversion=} [properties] Properties to set + * @param {tabletmanagerdata.IUpdateVReplicationWorkflowRequest=} [properties] Properties to set */ - function CharsetConversion(properties) { + function UpdateVReplicationWorkflowRequest(properties) { + this.cells = []; + this.tablet_types = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -63029,89 +64767,159 @@ export const binlogdata = $root.binlogdata = (() => { } /** - * CharsetConversion from_charset. - * @member {string} from_charset - * @memberof binlogdata.CharsetConversion + * UpdateVReplicationWorkflowRequest workflow. + * @member {string} workflow + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowRequest * @instance */ - CharsetConversion.prototype.from_charset = ""; + UpdateVReplicationWorkflowRequest.prototype.workflow = ""; /** - * CharsetConversion to_charset. - * @member {string} to_charset - * @memberof binlogdata.CharsetConversion + * UpdateVReplicationWorkflowRequest cells. + * @member {Array.} cells + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowRequest * @instance */ - CharsetConversion.prototype.to_charset = ""; + UpdateVReplicationWorkflowRequest.prototype.cells = $util.emptyArray; /** - * Creates a new CharsetConversion instance using the specified properties. + * UpdateVReplicationWorkflowRequest tablet_types. + * @member {Array.} tablet_types + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowRequest + * @instance + */ + UpdateVReplicationWorkflowRequest.prototype.tablet_types = $util.emptyArray; + + /** + * UpdateVReplicationWorkflowRequest tablet_selection_preference. + * @member {tabletmanagerdata.TabletSelectionPreference} tablet_selection_preference + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowRequest + * @instance + */ + UpdateVReplicationWorkflowRequest.prototype.tablet_selection_preference = 0; + + /** + * UpdateVReplicationWorkflowRequest on_ddl. + * @member {binlogdata.OnDDLAction} on_ddl + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowRequest + * @instance + */ + UpdateVReplicationWorkflowRequest.prototype.on_ddl = 0; + + /** + * UpdateVReplicationWorkflowRequest state. + * @member {binlogdata.VReplicationWorkflowState} state + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowRequest + * @instance + */ + UpdateVReplicationWorkflowRequest.prototype.state = 0; + + /** + * Creates a new UpdateVReplicationWorkflowRequest instance using the specified properties. * @function create - * @memberof binlogdata.CharsetConversion + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowRequest * @static - * @param {binlogdata.ICharsetConversion=} [properties] Properties to set - * @returns {binlogdata.CharsetConversion} CharsetConversion instance + * @param {tabletmanagerdata.IUpdateVReplicationWorkflowRequest=} [properties] Properties to set + * @returns {tabletmanagerdata.UpdateVReplicationWorkflowRequest} UpdateVReplicationWorkflowRequest instance */ - CharsetConversion.create = function create(properties) { - return new CharsetConversion(properties); + UpdateVReplicationWorkflowRequest.create = function create(properties) { + return new UpdateVReplicationWorkflowRequest(properties); }; /** - * Encodes the specified CharsetConversion message. Does not implicitly {@link binlogdata.CharsetConversion.verify|verify} messages. + * Encodes the specified UpdateVReplicationWorkflowRequest message. Does not implicitly {@link tabletmanagerdata.UpdateVReplicationWorkflowRequest.verify|verify} messages. * @function encode - * @memberof binlogdata.CharsetConversion + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowRequest * @static - * @param {binlogdata.ICharsetConversion} message CharsetConversion message or plain object to encode + * @param {tabletmanagerdata.IUpdateVReplicationWorkflowRequest} message UpdateVReplicationWorkflowRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - CharsetConversion.encode = function encode(message, writer) { + UpdateVReplicationWorkflowRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.from_charset != null && Object.hasOwnProperty.call(message, "from_charset")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.from_charset); - if (message.to_charset != null && Object.hasOwnProperty.call(message, "to_charset")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.to_charset); + if (message.workflow != null && Object.hasOwnProperty.call(message, "workflow")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.workflow); + if (message.cells != null && message.cells.length) + for (let i = 0; i < message.cells.length; ++i) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.cells[i]); + if (message.tablet_types != null && message.tablet_types.length) { + writer.uint32(/* id 3, wireType 2 =*/26).fork(); + for (let i = 0; i < message.tablet_types.length; ++i) + writer.int32(message.tablet_types[i]); + writer.ldelim(); + } + if (message.tablet_selection_preference != null && Object.hasOwnProperty.call(message, "tablet_selection_preference")) + writer.uint32(/* id 4, wireType 0 =*/32).int32(message.tablet_selection_preference); + if (message.on_ddl != null && Object.hasOwnProperty.call(message, "on_ddl")) + writer.uint32(/* id 5, wireType 0 =*/40).int32(message.on_ddl); + if (message.state != null && Object.hasOwnProperty.call(message, "state")) + writer.uint32(/* id 6, wireType 0 =*/48).int32(message.state); return writer; }; /** - * Encodes the specified CharsetConversion message, length delimited. Does not implicitly {@link binlogdata.CharsetConversion.verify|verify} messages. + * Encodes the specified UpdateVReplicationWorkflowRequest message, length delimited. Does not implicitly {@link tabletmanagerdata.UpdateVReplicationWorkflowRequest.verify|verify} messages. * @function encodeDelimited - * @memberof binlogdata.CharsetConversion + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowRequest * @static - * @param {binlogdata.ICharsetConversion} message CharsetConversion message or plain object to encode + * @param {tabletmanagerdata.IUpdateVReplicationWorkflowRequest} message UpdateVReplicationWorkflowRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - CharsetConversion.encodeDelimited = function encodeDelimited(message, writer) { + UpdateVReplicationWorkflowRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a CharsetConversion message from the specified reader or buffer. + * Decodes an UpdateVReplicationWorkflowRequest message from the specified reader or buffer. * @function decode - * @memberof binlogdata.CharsetConversion + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {binlogdata.CharsetConversion} CharsetConversion + * @returns {tabletmanagerdata.UpdateVReplicationWorkflowRequest} UpdateVReplicationWorkflowRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CharsetConversion.decode = function decode(reader, length) { + UpdateVReplicationWorkflowRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.CharsetConversion(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.tabletmanagerdata.UpdateVReplicationWorkflowRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.from_charset = reader.string(); + message.workflow = reader.string(); break; } case 2: { - message.to_charset = reader.string(); + if (!(message.cells && message.cells.length)) + message.cells = []; + message.cells.push(reader.string()); + break; + } + case 3: { + if (!(message.tablet_types && message.tablet_types.length)) + message.tablet_types = []; + if ((tag & 7) === 2) { + let end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.tablet_types.push(reader.int32()); + } else + message.tablet_types.push(reader.int32()); + break; + } + case 4: { + message.tablet_selection_preference = reader.int32(); + break; + } + case 5: { + message.on_ddl = reader.int32(); + break; + } + case 6: { + message.state = reader.int32(); break; } default: @@ -63123,141 +64931,350 @@ export const binlogdata = $root.binlogdata = (() => { }; /** - * Decodes a CharsetConversion message from the specified reader or buffer, length delimited. + * Decodes an UpdateVReplicationWorkflowRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof binlogdata.CharsetConversion + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {binlogdata.CharsetConversion} CharsetConversion + * @returns {tabletmanagerdata.UpdateVReplicationWorkflowRequest} UpdateVReplicationWorkflowRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CharsetConversion.decodeDelimited = function decodeDelimited(reader) { + UpdateVReplicationWorkflowRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a CharsetConversion message. + * Verifies an UpdateVReplicationWorkflowRequest message. * @function verify - * @memberof binlogdata.CharsetConversion + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - CharsetConversion.verify = function verify(message) { + UpdateVReplicationWorkflowRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.from_charset != null && message.hasOwnProperty("from_charset")) - if (!$util.isString(message.from_charset)) - return "from_charset: string expected"; - if (message.to_charset != null && message.hasOwnProperty("to_charset")) - if (!$util.isString(message.to_charset)) - return "to_charset: string expected"; + if (message.workflow != null && message.hasOwnProperty("workflow")) + if (!$util.isString(message.workflow)) + return "workflow: string expected"; + if (message.cells != null && message.hasOwnProperty("cells")) { + if (!Array.isArray(message.cells)) + return "cells: array expected"; + for (let i = 0; i < message.cells.length; ++i) + if (!$util.isString(message.cells[i])) + return "cells: string[] expected"; + } + if (message.tablet_types != null && message.hasOwnProperty("tablet_types")) { + if (!Array.isArray(message.tablet_types)) + return "tablet_types: array expected"; + for (let i = 0; i < message.tablet_types.length; ++i) + switch (message.tablet_types[i]) { + default: + return "tablet_types: enum value[] expected"; + case 0: + case 1: + case 1: + case 2: + case 3: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + break; + } + } + if (message.tablet_selection_preference != null && message.hasOwnProperty("tablet_selection_preference")) + switch (message.tablet_selection_preference) { + default: + return "tablet_selection_preference: enum value expected"; + case 0: + case 1: + case 3: + break; + } + if (message.on_ddl != null && message.hasOwnProperty("on_ddl")) + switch (message.on_ddl) { + default: + return "on_ddl: enum value expected"; + case 0: + case 1: + case 2: + case 3: + break; + } + if (message.state != null && message.hasOwnProperty("state")) + switch (message.state) { + default: + return "state: enum value expected"; + case 0: + case 1: + case 2: + case 3: + case 4: + case 5: + case 6: + break; + } return null; }; /** - * Creates a CharsetConversion message from a plain object. Also converts values to their respective internal types. + * Creates an UpdateVReplicationWorkflowRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof binlogdata.CharsetConversion + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowRequest * @static * @param {Object.} object Plain object - * @returns {binlogdata.CharsetConversion} CharsetConversion + * @returns {tabletmanagerdata.UpdateVReplicationWorkflowRequest} UpdateVReplicationWorkflowRequest */ - CharsetConversion.fromObject = function fromObject(object) { - if (object instanceof $root.binlogdata.CharsetConversion) + UpdateVReplicationWorkflowRequest.fromObject = function fromObject(object) { + if (object instanceof $root.tabletmanagerdata.UpdateVReplicationWorkflowRequest) return object; - let message = new $root.binlogdata.CharsetConversion(); - if (object.from_charset != null) - message.from_charset = String(object.from_charset); - if (object.to_charset != null) - message.to_charset = String(object.to_charset); + let message = new $root.tabletmanagerdata.UpdateVReplicationWorkflowRequest(); + if (object.workflow != null) + message.workflow = String(object.workflow); + if (object.cells) { + if (!Array.isArray(object.cells)) + throw TypeError(".tabletmanagerdata.UpdateVReplicationWorkflowRequest.cells: array expected"); + message.cells = []; + for (let i = 0; i < object.cells.length; ++i) + message.cells[i] = String(object.cells[i]); + } + if (object.tablet_types) { + if (!Array.isArray(object.tablet_types)) + throw TypeError(".tabletmanagerdata.UpdateVReplicationWorkflowRequest.tablet_types: array expected"); + message.tablet_types = []; + for (let i = 0; i < object.tablet_types.length; ++i) + switch (object.tablet_types[i]) { + default: + if (typeof object.tablet_types[i] === "number") { + message.tablet_types[i] = object.tablet_types[i]; + break; + } + case "UNKNOWN": + case 0: + message.tablet_types[i] = 0; + break; + case "PRIMARY": + case 1: + message.tablet_types[i] = 1; + break; + case "MASTER": + case 1: + message.tablet_types[i] = 1; + break; + case "REPLICA": + case 2: + message.tablet_types[i] = 2; + break; + case "RDONLY": + case 3: + message.tablet_types[i] = 3; + break; + case "BATCH": + case 3: + message.tablet_types[i] = 3; + break; + case "SPARE": + case 4: + message.tablet_types[i] = 4; + break; + case "EXPERIMENTAL": + case 5: + message.tablet_types[i] = 5; + break; + case "BACKUP": + case 6: + message.tablet_types[i] = 6; + break; + case "RESTORE": + case 7: + message.tablet_types[i] = 7; + break; + case "DRAINED": + case 8: + message.tablet_types[i] = 8; + break; + } + } + switch (object.tablet_selection_preference) { + default: + if (typeof object.tablet_selection_preference === "number") { + message.tablet_selection_preference = object.tablet_selection_preference; + break; + } + break; + case "ANY": + case 0: + message.tablet_selection_preference = 0; + break; + case "INORDER": + case 1: + message.tablet_selection_preference = 1; + break; + case "UNKNOWN": + case 3: + message.tablet_selection_preference = 3; + break; + } + switch (object.on_ddl) { + default: + if (typeof object.on_ddl === "number") { + message.on_ddl = object.on_ddl; + break; + } + break; + case "IGNORE": + case 0: + message.on_ddl = 0; + break; + case "STOP": + case 1: + message.on_ddl = 1; + break; + case "EXEC": + case 2: + message.on_ddl = 2; + break; + case "EXEC_IGNORE": + case 3: + message.on_ddl = 3; + break; + } + switch (object.state) { + default: + if (typeof object.state === "number") { + message.state = object.state; + break; + } + break; + case "Unknown": + case 0: + message.state = 0; + break; + case "Init": + case 1: + message.state = 1; + break; + case "Stopped": + case 2: + message.state = 2; + break; + case "Copying": + case 3: + message.state = 3; + break; + case "Running": + case 4: + message.state = 4; + break; + case "Error": + case 5: + message.state = 5; + break; + case "Lagging": + case 6: + message.state = 6; + break; + } return message; }; /** - * Creates a plain object from a CharsetConversion message. Also converts values to other types if specified. + * Creates a plain object from an UpdateVReplicationWorkflowRequest message. Also converts values to other types if specified. * @function toObject - * @memberof binlogdata.CharsetConversion + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowRequest * @static - * @param {binlogdata.CharsetConversion} message CharsetConversion + * @param {tabletmanagerdata.UpdateVReplicationWorkflowRequest} message UpdateVReplicationWorkflowRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - CharsetConversion.toObject = function toObject(message, options) { + UpdateVReplicationWorkflowRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; + if (options.arrays || options.defaults) { + object.cells = []; + object.tablet_types = []; + } if (options.defaults) { - object.from_charset = ""; - object.to_charset = ""; + object.workflow = ""; + object.tablet_selection_preference = options.enums === String ? "ANY" : 0; + object.on_ddl = options.enums === String ? "IGNORE" : 0; + object.state = options.enums === String ? "Unknown" : 0; } - if (message.from_charset != null && message.hasOwnProperty("from_charset")) - object.from_charset = message.from_charset; - if (message.to_charset != null && message.hasOwnProperty("to_charset")) - object.to_charset = message.to_charset; + if (message.workflow != null && message.hasOwnProperty("workflow")) + object.workflow = message.workflow; + if (message.cells && message.cells.length) { + object.cells = []; + for (let j = 0; j < message.cells.length; ++j) + object.cells[j] = message.cells[j]; + } + if (message.tablet_types && message.tablet_types.length) { + object.tablet_types = []; + for (let j = 0; j < message.tablet_types.length; ++j) + object.tablet_types[j] = options.enums === String ? $root.topodata.TabletType[message.tablet_types[j]] === undefined ? message.tablet_types[j] : $root.topodata.TabletType[message.tablet_types[j]] : message.tablet_types[j]; + } + if (message.tablet_selection_preference != null && message.hasOwnProperty("tablet_selection_preference")) + object.tablet_selection_preference = options.enums === String ? $root.tabletmanagerdata.TabletSelectionPreference[message.tablet_selection_preference] === undefined ? message.tablet_selection_preference : $root.tabletmanagerdata.TabletSelectionPreference[message.tablet_selection_preference] : message.tablet_selection_preference; + if (message.on_ddl != null && message.hasOwnProperty("on_ddl")) + object.on_ddl = options.enums === String ? $root.binlogdata.OnDDLAction[message.on_ddl] === undefined ? message.on_ddl : $root.binlogdata.OnDDLAction[message.on_ddl] : message.on_ddl; + if (message.state != null && message.hasOwnProperty("state")) + object.state = options.enums === String ? $root.binlogdata.VReplicationWorkflowState[message.state] === undefined ? message.state : $root.binlogdata.VReplicationWorkflowState[message.state] : message.state; return object; }; /** - * Converts this CharsetConversion to JSON. + * Converts this UpdateVReplicationWorkflowRequest to JSON. * @function toJSON - * @memberof binlogdata.CharsetConversion + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowRequest * @instance * @returns {Object.} JSON object */ - CharsetConversion.prototype.toJSON = function toJSON() { + UpdateVReplicationWorkflowRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for CharsetConversion + * Gets the default type url for UpdateVReplicationWorkflowRequest * @function getTypeUrl - * @memberof binlogdata.CharsetConversion + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - CharsetConversion.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + UpdateVReplicationWorkflowRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/binlogdata.CharsetConversion"; + return typeUrlPrefix + "/tabletmanagerdata.UpdateVReplicationWorkflowRequest"; }; - return CharsetConversion; + return UpdateVReplicationWorkflowRequest; })(); - binlogdata.Rule = (function() { + tabletmanagerdata.UpdateVReplicationWorkflowResponse = (function() { /** - * Properties of a Rule. - * @memberof binlogdata - * @interface IRule - * @property {string|null} [match] Rule match - * @property {string|null} [filter] Rule filter - * @property {Object.|null} [convert_enum_to_text] Rule convert_enum_to_text - * @property {Object.|null} [convert_charset] Rule convert_charset - * @property {string|null} [source_unique_key_columns] Rule source_unique_key_columns - * @property {string|null} [target_unique_key_columns] Rule target_unique_key_columns - * @property {string|null} [source_unique_key_target_columns] Rule source_unique_key_target_columns - * @property {Object.|null} [convert_int_to_enum] Rule convert_int_to_enum + * Properties of an UpdateVReplicationWorkflowResponse. + * @memberof tabletmanagerdata + * @interface IUpdateVReplicationWorkflowResponse + * @property {query.IQueryResult|null} [result] UpdateVReplicationWorkflowResponse result */ /** - * Constructs a new Rule. - * @memberof binlogdata - * @classdesc Represents a Rule. - * @implements IRule + * Constructs a new UpdateVReplicationWorkflowResponse. + * @memberof tabletmanagerdata + * @classdesc Represents an UpdateVReplicationWorkflowResponse. + * @implements IUpdateVReplicationWorkflowResponse * @constructor - * @param {binlogdata.IRule=} [properties] Properties to set + * @param {tabletmanagerdata.IUpdateVReplicationWorkflowResponse=} [properties] Properties to set */ - function Rule(properties) { - this.convert_enum_to_text = {}; - this.convert_charset = {}; - this.convert_int_to_enum = {}; + function UpdateVReplicationWorkflowResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -63265,235 +65282,75 @@ export const binlogdata = $root.binlogdata = (() => { } /** - * Rule match. - * @member {string} match - * @memberof binlogdata.Rule - * @instance - */ - Rule.prototype.match = ""; - - /** - * Rule filter. - * @member {string} filter - * @memberof binlogdata.Rule - * @instance - */ - Rule.prototype.filter = ""; - - /** - * Rule convert_enum_to_text. - * @member {Object.} convert_enum_to_text - * @memberof binlogdata.Rule - * @instance - */ - Rule.prototype.convert_enum_to_text = $util.emptyObject; - - /** - * Rule convert_charset. - * @member {Object.} convert_charset - * @memberof binlogdata.Rule - * @instance - */ - Rule.prototype.convert_charset = $util.emptyObject; - - /** - * Rule source_unique_key_columns. - * @member {string} source_unique_key_columns - * @memberof binlogdata.Rule - * @instance - */ - Rule.prototype.source_unique_key_columns = ""; - - /** - * Rule target_unique_key_columns. - * @member {string} target_unique_key_columns - * @memberof binlogdata.Rule - * @instance - */ - Rule.prototype.target_unique_key_columns = ""; - - /** - * Rule source_unique_key_target_columns. - * @member {string} source_unique_key_target_columns - * @memberof binlogdata.Rule - * @instance - */ - Rule.prototype.source_unique_key_target_columns = ""; - - /** - * Rule convert_int_to_enum. - * @member {Object.} convert_int_to_enum - * @memberof binlogdata.Rule + * UpdateVReplicationWorkflowResponse result. + * @member {query.IQueryResult|null|undefined} result + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowResponse * @instance */ - Rule.prototype.convert_int_to_enum = $util.emptyObject; + UpdateVReplicationWorkflowResponse.prototype.result = null; /** - * Creates a new Rule instance using the specified properties. + * Creates a new UpdateVReplicationWorkflowResponse instance using the specified properties. * @function create - * @memberof binlogdata.Rule + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowResponse * @static - * @param {binlogdata.IRule=} [properties] Properties to set - * @returns {binlogdata.Rule} Rule instance + * @param {tabletmanagerdata.IUpdateVReplicationWorkflowResponse=} [properties] Properties to set + * @returns {tabletmanagerdata.UpdateVReplicationWorkflowResponse} UpdateVReplicationWorkflowResponse instance */ - Rule.create = function create(properties) { - return new Rule(properties); + UpdateVReplicationWorkflowResponse.create = function create(properties) { + return new UpdateVReplicationWorkflowResponse(properties); }; /** - * Encodes the specified Rule message. Does not implicitly {@link binlogdata.Rule.verify|verify} messages. + * Encodes the specified UpdateVReplicationWorkflowResponse message. Does not implicitly {@link tabletmanagerdata.UpdateVReplicationWorkflowResponse.verify|verify} messages. * @function encode - * @memberof binlogdata.Rule + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowResponse * @static - * @param {binlogdata.IRule} message Rule message or plain object to encode + * @param {tabletmanagerdata.IUpdateVReplicationWorkflowResponse} message UpdateVReplicationWorkflowResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Rule.encode = function encode(message, writer) { + UpdateVReplicationWorkflowResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.match != null && Object.hasOwnProperty.call(message, "match")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.match); - if (message.filter != null && Object.hasOwnProperty.call(message, "filter")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.filter); - if (message.convert_enum_to_text != null && Object.hasOwnProperty.call(message, "convert_enum_to_text")) - for (let keys = Object.keys(message.convert_enum_to_text), i = 0; i < keys.length; ++i) - writer.uint32(/* id 3, wireType 2 =*/26).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 2 =*/18).string(message.convert_enum_to_text[keys[i]]).ldelim(); - if (message.convert_charset != null && Object.hasOwnProperty.call(message, "convert_charset")) - for (let keys = Object.keys(message.convert_charset), i = 0; i < keys.length; ++i) { - writer.uint32(/* id 4, wireType 2 =*/34).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); - $root.binlogdata.CharsetConversion.encode(message.convert_charset[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); - } - if (message.source_unique_key_columns != null && Object.hasOwnProperty.call(message, "source_unique_key_columns")) - writer.uint32(/* id 5, wireType 2 =*/42).string(message.source_unique_key_columns); - if (message.target_unique_key_columns != null && Object.hasOwnProperty.call(message, "target_unique_key_columns")) - writer.uint32(/* id 6, wireType 2 =*/50).string(message.target_unique_key_columns); - if (message.source_unique_key_target_columns != null && Object.hasOwnProperty.call(message, "source_unique_key_target_columns")) - writer.uint32(/* id 7, wireType 2 =*/58).string(message.source_unique_key_target_columns); - if (message.convert_int_to_enum != null && Object.hasOwnProperty.call(message, "convert_int_to_enum")) - for (let keys = Object.keys(message.convert_int_to_enum), i = 0; i < keys.length; ++i) - writer.uint32(/* id 8, wireType 2 =*/66).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 0 =*/16).bool(message.convert_int_to_enum[keys[i]]).ldelim(); + if (message.result != null && Object.hasOwnProperty.call(message, "result")) + $root.query.QueryResult.encode(message.result, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified Rule message, length delimited. Does not implicitly {@link binlogdata.Rule.verify|verify} messages. + * Encodes the specified UpdateVReplicationWorkflowResponse message, length delimited. Does not implicitly {@link tabletmanagerdata.UpdateVReplicationWorkflowResponse.verify|verify} messages. * @function encodeDelimited - * @memberof binlogdata.Rule + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowResponse * @static - * @param {binlogdata.IRule} message Rule message or plain object to encode + * @param {tabletmanagerdata.IUpdateVReplicationWorkflowResponse} message UpdateVReplicationWorkflowResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Rule.encodeDelimited = function encodeDelimited(message, writer) { + UpdateVReplicationWorkflowResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a Rule message from the specified reader or buffer. + * Decodes an UpdateVReplicationWorkflowResponse message from the specified reader or buffer. * @function decode - * @memberof binlogdata.Rule + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {binlogdata.Rule} Rule + * @returns {tabletmanagerdata.UpdateVReplicationWorkflowResponse} UpdateVReplicationWorkflowResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Rule.decode = function decode(reader, length) { + UpdateVReplicationWorkflowResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.Rule(), key, value; + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.tabletmanagerdata.UpdateVReplicationWorkflowResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.match = reader.string(); - break; - } - case 2: { - message.filter = reader.string(); - break; - } - case 3: { - if (message.convert_enum_to_text === $util.emptyObject) - message.convert_enum_to_text = {}; - let end2 = reader.uint32() + reader.pos; - key = ""; - value = ""; - while (reader.pos < end2) { - let tag2 = reader.uint32(); - switch (tag2 >>> 3) { - case 1: - key = reader.string(); - break; - case 2: - value = reader.string(); - break; - default: - reader.skipType(tag2 & 7); - break; - } - } - message.convert_enum_to_text[key] = value; - break; - } - case 4: { - if (message.convert_charset === $util.emptyObject) - message.convert_charset = {}; - let end2 = reader.uint32() + reader.pos; - key = ""; - value = null; - while (reader.pos < end2) { - let tag2 = reader.uint32(); - switch (tag2 >>> 3) { - case 1: - key = reader.string(); - break; - case 2: - value = $root.binlogdata.CharsetConversion.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag2 & 7); - break; - } - } - message.convert_charset[key] = value; - break; - } - case 5: { - message.source_unique_key_columns = reader.string(); - break; - } - case 6: { - message.target_unique_key_columns = reader.string(); - break; - } - case 7: { - message.source_unique_key_target_columns = reader.string(); - break; - } - case 8: { - if (message.convert_int_to_enum === $util.emptyObject) - message.convert_int_to_enum = {}; - let end2 = reader.uint32() + reader.pos; - key = ""; - value = false; - while (reader.pos < end2) { - let tag2 = reader.uint32(); - switch (tag2 >>> 3) { - case 1: - key = reader.string(); - break; - case 2: - value = reader.bool(); - break; - default: - reader.skipType(tag2 & 7); - break; - } - } - message.convert_int_to_enum[key] = value; + message.result = $root.query.QueryResult.decode(reader, reader.uint32()); break; } default: @@ -63505,230 +65362,128 @@ export const binlogdata = $root.binlogdata = (() => { }; /** - * Decodes a Rule message from the specified reader or buffer, length delimited. + * Decodes an UpdateVReplicationWorkflowResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof binlogdata.Rule + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {binlogdata.Rule} Rule + * @returns {tabletmanagerdata.UpdateVReplicationWorkflowResponse} UpdateVReplicationWorkflowResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Rule.decodeDelimited = function decodeDelimited(reader) { + UpdateVReplicationWorkflowResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a Rule message. + * Verifies an UpdateVReplicationWorkflowResponse message. * @function verify - * @memberof binlogdata.Rule + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - Rule.verify = function verify(message) { + UpdateVReplicationWorkflowResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.match != null && message.hasOwnProperty("match")) - if (!$util.isString(message.match)) - return "match: string expected"; - if (message.filter != null && message.hasOwnProperty("filter")) - if (!$util.isString(message.filter)) - return "filter: string expected"; - if (message.convert_enum_to_text != null && message.hasOwnProperty("convert_enum_to_text")) { - if (!$util.isObject(message.convert_enum_to_text)) - return "convert_enum_to_text: object expected"; - let key = Object.keys(message.convert_enum_to_text); - for (let i = 0; i < key.length; ++i) - if (!$util.isString(message.convert_enum_to_text[key[i]])) - return "convert_enum_to_text: string{k:string} expected"; - } - if (message.convert_charset != null && message.hasOwnProperty("convert_charset")) { - if (!$util.isObject(message.convert_charset)) - return "convert_charset: object expected"; - let key = Object.keys(message.convert_charset); - for (let i = 0; i < key.length; ++i) { - let error = $root.binlogdata.CharsetConversion.verify(message.convert_charset[key[i]]); - if (error) - return "convert_charset." + error; - } - } - if (message.source_unique_key_columns != null && message.hasOwnProperty("source_unique_key_columns")) - if (!$util.isString(message.source_unique_key_columns)) - return "source_unique_key_columns: string expected"; - if (message.target_unique_key_columns != null && message.hasOwnProperty("target_unique_key_columns")) - if (!$util.isString(message.target_unique_key_columns)) - return "target_unique_key_columns: string expected"; - if (message.source_unique_key_target_columns != null && message.hasOwnProperty("source_unique_key_target_columns")) - if (!$util.isString(message.source_unique_key_target_columns)) - return "source_unique_key_target_columns: string expected"; - if (message.convert_int_to_enum != null && message.hasOwnProperty("convert_int_to_enum")) { - if (!$util.isObject(message.convert_int_to_enum)) - return "convert_int_to_enum: object expected"; - let key = Object.keys(message.convert_int_to_enum); - for (let i = 0; i < key.length; ++i) - if (typeof message.convert_int_to_enum[key[i]] !== "boolean") - return "convert_int_to_enum: boolean{k:string} expected"; + if (message.result != null && message.hasOwnProperty("result")) { + let error = $root.query.QueryResult.verify(message.result); + if (error) + return "result." + error; } return null; }; /** - * Creates a Rule message from a plain object. Also converts values to their respective internal types. + * Creates an UpdateVReplicationWorkflowResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof binlogdata.Rule + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowResponse * @static * @param {Object.} object Plain object - * @returns {binlogdata.Rule} Rule + * @returns {tabletmanagerdata.UpdateVReplicationWorkflowResponse} UpdateVReplicationWorkflowResponse */ - Rule.fromObject = function fromObject(object) { - if (object instanceof $root.binlogdata.Rule) + UpdateVReplicationWorkflowResponse.fromObject = function fromObject(object) { + if (object instanceof $root.tabletmanagerdata.UpdateVReplicationWorkflowResponse) return object; - let message = new $root.binlogdata.Rule(); - if (object.match != null) - message.match = String(object.match); - if (object.filter != null) - message.filter = String(object.filter); - if (object.convert_enum_to_text) { - if (typeof object.convert_enum_to_text !== "object") - throw TypeError(".binlogdata.Rule.convert_enum_to_text: object expected"); - message.convert_enum_to_text = {}; - for (let keys = Object.keys(object.convert_enum_to_text), i = 0; i < keys.length; ++i) - message.convert_enum_to_text[keys[i]] = String(object.convert_enum_to_text[keys[i]]); - } - if (object.convert_charset) { - if (typeof object.convert_charset !== "object") - throw TypeError(".binlogdata.Rule.convert_charset: object expected"); - message.convert_charset = {}; - for (let keys = Object.keys(object.convert_charset), i = 0; i < keys.length; ++i) { - if (typeof object.convert_charset[keys[i]] !== "object") - throw TypeError(".binlogdata.Rule.convert_charset: object expected"); - message.convert_charset[keys[i]] = $root.binlogdata.CharsetConversion.fromObject(object.convert_charset[keys[i]]); - } - } - if (object.source_unique_key_columns != null) - message.source_unique_key_columns = String(object.source_unique_key_columns); - if (object.target_unique_key_columns != null) - message.target_unique_key_columns = String(object.target_unique_key_columns); - if (object.source_unique_key_target_columns != null) - message.source_unique_key_target_columns = String(object.source_unique_key_target_columns); - if (object.convert_int_to_enum) { - if (typeof object.convert_int_to_enum !== "object") - throw TypeError(".binlogdata.Rule.convert_int_to_enum: object expected"); - message.convert_int_to_enum = {}; - for (let keys = Object.keys(object.convert_int_to_enum), i = 0; i < keys.length; ++i) - message.convert_int_to_enum[keys[i]] = Boolean(object.convert_int_to_enum[keys[i]]); + let message = new $root.tabletmanagerdata.UpdateVReplicationWorkflowResponse(); + if (object.result != null) { + if (typeof object.result !== "object") + throw TypeError(".tabletmanagerdata.UpdateVReplicationWorkflowResponse.result: object expected"); + message.result = $root.query.QueryResult.fromObject(object.result); } return message; }; /** - * Creates a plain object from a Rule message. Also converts values to other types if specified. + * Creates a plain object from an UpdateVReplicationWorkflowResponse message. Also converts values to other types if specified. * @function toObject - * @memberof binlogdata.Rule + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowResponse * @static - * @param {binlogdata.Rule} message Rule + * @param {tabletmanagerdata.UpdateVReplicationWorkflowResponse} message UpdateVReplicationWorkflowResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - Rule.toObject = function toObject(message, options) { + UpdateVReplicationWorkflowResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.objects || options.defaults) { - object.convert_enum_to_text = {}; - object.convert_charset = {}; - object.convert_int_to_enum = {}; - } - if (options.defaults) { - object.match = ""; - object.filter = ""; - object.source_unique_key_columns = ""; - object.target_unique_key_columns = ""; - object.source_unique_key_target_columns = ""; - } - if (message.match != null && message.hasOwnProperty("match")) - object.match = message.match; - if (message.filter != null && message.hasOwnProperty("filter")) - object.filter = message.filter; - let keys2; - if (message.convert_enum_to_text && (keys2 = Object.keys(message.convert_enum_to_text)).length) { - object.convert_enum_to_text = {}; - for (let j = 0; j < keys2.length; ++j) - object.convert_enum_to_text[keys2[j]] = message.convert_enum_to_text[keys2[j]]; - } - if (message.convert_charset && (keys2 = Object.keys(message.convert_charset)).length) { - object.convert_charset = {}; - for (let j = 0; j < keys2.length; ++j) - object.convert_charset[keys2[j]] = $root.binlogdata.CharsetConversion.toObject(message.convert_charset[keys2[j]], options); - } - if (message.source_unique_key_columns != null && message.hasOwnProperty("source_unique_key_columns")) - object.source_unique_key_columns = message.source_unique_key_columns; - if (message.target_unique_key_columns != null && message.hasOwnProperty("target_unique_key_columns")) - object.target_unique_key_columns = message.target_unique_key_columns; - if (message.source_unique_key_target_columns != null && message.hasOwnProperty("source_unique_key_target_columns")) - object.source_unique_key_target_columns = message.source_unique_key_target_columns; - if (message.convert_int_to_enum && (keys2 = Object.keys(message.convert_int_to_enum)).length) { - object.convert_int_to_enum = {}; - for (let j = 0; j < keys2.length; ++j) - object.convert_int_to_enum[keys2[j]] = message.convert_int_to_enum[keys2[j]]; - } + if (options.defaults) + object.result = null; + if (message.result != null && message.hasOwnProperty("result")) + object.result = $root.query.QueryResult.toObject(message.result, options); return object; }; /** - * Converts this Rule to JSON. + * Converts this UpdateVReplicationWorkflowResponse to JSON. * @function toJSON - * @memberof binlogdata.Rule + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowResponse * @instance * @returns {Object.} JSON object */ - Rule.prototype.toJSON = function toJSON() { + UpdateVReplicationWorkflowResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for Rule + * Gets the default type url for UpdateVReplicationWorkflowResponse * @function getTypeUrl - * @memberof binlogdata.Rule + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - Rule.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + UpdateVReplicationWorkflowResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/binlogdata.Rule"; + return typeUrlPrefix + "/tabletmanagerdata.UpdateVReplicationWorkflowResponse"; }; - return Rule; + return UpdateVReplicationWorkflowResponse; })(); - binlogdata.Filter = (function() { + tabletmanagerdata.ResetSequencesRequest = (function() { /** - * Properties of a Filter. - * @memberof binlogdata - * @interface IFilter - * @property {Array.|null} [rules] Filter rules - * @property {binlogdata.Filter.FieldEventMode|null} [field_event_mode] Filter field_event_mode - * @property {number|Long|null} [workflow_type] Filter workflow_type - * @property {string|null} [workflow_name] Filter workflow_name + * Properties of a ResetSequencesRequest. + * @memberof tabletmanagerdata + * @interface IResetSequencesRequest + * @property {Array.|null} [tables] ResetSequencesRequest tables */ /** - * Constructs a new Filter. - * @memberof binlogdata - * @classdesc Represents a Filter. - * @implements IFilter + * Constructs a new ResetSequencesRequest. + * @memberof tabletmanagerdata + * @classdesc Represents a ResetSequencesRequest. + * @implements IResetSequencesRequest * @constructor - * @param {binlogdata.IFilter=} [properties] Properties to set + * @param {tabletmanagerdata.IResetSequencesRequest=} [properties] Properties to set */ - function Filter(properties) { - this.rules = []; + function ResetSequencesRequest(properties) { + this.tables = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -63736,120 +65491,78 @@ export const binlogdata = $root.binlogdata = (() => { } /** - * Filter rules. - * @member {Array.} rules - * @memberof binlogdata.Filter - * @instance - */ - Filter.prototype.rules = $util.emptyArray; - - /** - * Filter field_event_mode. - * @member {binlogdata.Filter.FieldEventMode} field_event_mode - * @memberof binlogdata.Filter - * @instance - */ - Filter.prototype.field_event_mode = 0; - - /** - * Filter workflow_type. - * @member {number|Long} workflow_type - * @memberof binlogdata.Filter - * @instance - */ - Filter.prototype.workflow_type = $util.Long ? $util.Long.fromBits(0,0,false) : 0; - - /** - * Filter workflow_name. - * @member {string} workflow_name - * @memberof binlogdata.Filter + * ResetSequencesRequest tables. + * @member {Array.} tables + * @memberof tabletmanagerdata.ResetSequencesRequest * @instance */ - Filter.prototype.workflow_name = ""; + ResetSequencesRequest.prototype.tables = $util.emptyArray; /** - * Creates a new Filter instance using the specified properties. + * Creates a new ResetSequencesRequest instance using the specified properties. * @function create - * @memberof binlogdata.Filter + * @memberof tabletmanagerdata.ResetSequencesRequest * @static - * @param {binlogdata.IFilter=} [properties] Properties to set - * @returns {binlogdata.Filter} Filter instance + * @param {tabletmanagerdata.IResetSequencesRequest=} [properties] Properties to set + * @returns {tabletmanagerdata.ResetSequencesRequest} ResetSequencesRequest instance */ - Filter.create = function create(properties) { - return new Filter(properties); + ResetSequencesRequest.create = function create(properties) { + return new ResetSequencesRequest(properties); }; /** - * Encodes the specified Filter message. Does not implicitly {@link binlogdata.Filter.verify|verify} messages. + * Encodes the specified ResetSequencesRequest message. Does not implicitly {@link tabletmanagerdata.ResetSequencesRequest.verify|verify} messages. * @function encode - * @memberof binlogdata.Filter + * @memberof tabletmanagerdata.ResetSequencesRequest * @static - * @param {binlogdata.IFilter} message Filter message or plain object to encode + * @param {tabletmanagerdata.IResetSequencesRequest} message ResetSequencesRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Filter.encode = function encode(message, writer) { + ResetSequencesRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.rules != null && message.rules.length) - for (let i = 0; i < message.rules.length; ++i) - $root.binlogdata.Rule.encode(message.rules[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.field_event_mode != null && Object.hasOwnProperty.call(message, "field_event_mode")) - writer.uint32(/* id 2, wireType 0 =*/16).int32(message.field_event_mode); - if (message.workflow_type != null && Object.hasOwnProperty.call(message, "workflow_type")) - writer.uint32(/* id 3, wireType 0 =*/24).int64(message.workflow_type); - if (message.workflow_name != null && Object.hasOwnProperty.call(message, "workflow_name")) - writer.uint32(/* id 4, wireType 2 =*/34).string(message.workflow_name); + if (message.tables != null && message.tables.length) + for (let i = 0; i < message.tables.length; ++i) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.tables[i]); return writer; }; /** - * Encodes the specified Filter message, length delimited. Does not implicitly {@link binlogdata.Filter.verify|verify} messages. + * Encodes the specified ResetSequencesRequest message, length delimited. Does not implicitly {@link tabletmanagerdata.ResetSequencesRequest.verify|verify} messages. * @function encodeDelimited - * @memberof binlogdata.Filter + * @memberof tabletmanagerdata.ResetSequencesRequest * @static - * @param {binlogdata.IFilter} message Filter message or plain object to encode + * @param {tabletmanagerdata.IResetSequencesRequest} message ResetSequencesRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Filter.encodeDelimited = function encodeDelimited(message, writer) { + ResetSequencesRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a Filter message from the specified reader or buffer. + * Decodes a ResetSequencesRequest message from the specified reader or buffer. * @function decode - * @memberof binlogdata.Filter + * @memberof tabletmanagerdata.ResetSequencesRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {binlogdata.Filter} Filter + * @returns {tabletmanagerdata.ResetSequencesRequest} ResetSequencesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Filter.decode = function decode(reader, length) { + ResetSequencesRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.Filter(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.tabletmanagerdata.ResetSequencesRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (!(message.rules && message.rules.length)) - message.rules = []; - message.rules.push($root.binlogdata.Rule.decode(reader, reader.uint32())); - break; - } - case 2: { - message.field_event_mode = reader.int32(); - break; - } - case 3: { - message.workflow_type = reader.int64(); - break; - } - case 4: { - message.workflow_name = reader.string(); + if (!(message.tables && message.tables.length)) + message.tables = []; + message.tables.push(reader.string()); break; } default: @@ -63861,278 +65574,133 @@ export const binlogdata = $root.binlogdata = (() => { }; /** - * Decodes a Filter message from the specified reader or buffer, length delimited. + * Decodes a ResetSequencesRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof binlogdata.Filter + * @memberof tabletmanagerdata.ResetSequencesRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {binlogdata.Filter} Filter + * @returns {tabletmanagerdata.ResetSequencesRequest} ResetSequencesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Filter.decodeDelimited = function decodeDelimited(reader) { + ResetSequencesRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a Filter message. + * Verifies a ResetSequencesRequest message. * @function verify - * @memberof binlogdata.Filter + * @memberof tabletmanagerdata.ResetSequencesRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - Filter.verify = function verify(message) { + ResetSequencesRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.rules != null && message.hasOwnProperty("rules")) { - if (!Array.isArray(message.rules)) - return "rules: array expected"; - for (let i = 0; i < message.rules.length; ++i) { - let error = $root.binlogdata.Rule.verify(message.rules[i]); - if (error) - return "rules." + error; - } + if (message.tables != null && message.hasOwnProperty("tables")) { + if (!Array.isArray(message.tables)) + return "tables: array expected"; + for (let i = 0; i < message.tables.length; ++i) + if (!$util.isString(message.tables[i])) + return "tables: string[] expected"; } - if (message.field_event_mode != null && message.hasOwnProperty("field_event_mode")) - switch (message.field_event_mode) { - default: - return "field_event_mode: enum value expected"; - case 0: - case 1: - break; - } - if (message.workflow_type != null && message.hasOwnProperty("workflow_type")) - if (!$util.isInteger(message.workflow_type) && !(message.workflow_type && $util.isInteger(message.workflow_type.low) && $util.isInteger(message.workflow_type.high))) - return "workflow_type: integer|Long expected"; - if (message.workflow_name != null && message.hasOwnProperty("workflow_name")) - if (!$util.isString(message.workflow_name)) - return "workflow_name: string expected"; return null; }; /** - * Creates a Filter message from a plain object. Also converts values to their respective internal types. + * Creates a ResetSequencesRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof binlogdata.Filter + * @memberof tabletmanagerdata.ResetSequencesRequest * @static * @param {Object.} object Plain object - * @returns {binlogdata.Filter} Filter + * @returns {tabletmanagerdata.ResetSequencesRequest} ResetSequencesRequest */ - Filter.fromObject = function fromObject(object) { - if (object instanceof $root.binlogdata.Filter) + ResetSequencesRequest.fromObject = function fromObject(object) { + if (object instanceof $root.tabletmanagerdata.ResetSequencesRequest) return object; - let message = new $root.binlogdata.Filter(); - if (object.rules) { - if (!Array.isArray(object.rules)) - throw TypeError(".binlogdata.Filter.rules: array expected"); - message.rules = []; - for (let i = 0; i < object.rules.length; ++i) { - if (typeof object.rules[i] !== "object") - throw TypeError(".binlogdata.Filter.rules: object expected"); - message.rules[i] = $root.binlogdata.Rule.fromObject(object.rules[i]); - } - } - switch (object.field_event_mode) { - default: - if (typeof object.field_event_mode === "number") { - message.field_event_mode = object.field_event_mode; - break; - } - break; - case "ERR_ON_MISMATCH": - case 0: - message.field_event_mode = 0; - break; - case "BEST_EFFORT": - case 1: - message.field_event_mode = 1; - break; + let message = new $root.tabletmanagerdata.ResetSequencesRequest(); + if (object.tables) { + if (!Array.isArray(object.tables)) + throw TypeError(".tabletmanagerdata.ResetSequencesRequest.tables: array expected"); + message.tables = []; + for (let i = 0; i < object.tables.length; ++i) + message.tables[i] = String(object.tables[i]); } - if (object.workflow_type != null) - if ($util.Long) - (message.workflow_type = $util.Long.fromValue(object.workflow_type)).unsigned = false; - else if (typeof object.workflow_type === "string") - message.workflow_type = parseInt(object.workflow_type, 10); - else if (typeof object.workflow_type === "number") - message.workflow_type = object.workflow_type; - else if (typeof object.workflow_type === "object") - message.workflow_type = new $util.LongBits(object.workflow_type.low >>> 0, object.workflow_type.high >>> 0).toNumber(); - if (object.workflow_name != null) - message.workflow_name = String(object.workflow_name); return message; }; /** - * Creates a plain object from a Filter message. Also converts values to other types if specified. + * Creates a plain object from a ResetSequencesRequest message. Also converts values to other types if specified. * @function toObject - * @memberof binlogdata.Filter + * @memberof tabletmanagerdata.ResetSequencesRequest * @static - * @param {binlogdata.Filter} message Filter + * @param {tabletmanagerdata.ResetSequencesRequest} message ResetSequencesRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - Filter.toObject = function toObject(message, options) { + ResetSequencesRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.arrays || options.defaults) - object.rules = []; - if (options.defaults) { - object.field_event_mode = options.enums === String ? "ERR_ON_MISMATCH" : 0; - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.workflow_type = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.workflow_type = options.longs === String ? "0" : 0; - object.workflow_name = ""; - } - if (message.rules && message.rules.length) { - object.rules = []; - for (let j = 0; j < message.rules.length; ++j) - object.rules[j] = $root.binlogdata.Rule.toObject(message.rules[j], options); + object.tables = []; + if (message.tables && message.tables.length) { + object.tables = []; + for (let j = 0; j < message.tables.length; ++j) + object.tables[j] = message.tables[j]; } - if (message.field_event_mode != null && message.hasOwnProperty("field_event_mode")) - object.field_event_mode = options.enums === String ? $root.binlogdata.Filter.FieldEventMode[message.field_event_mode] === undefined ? message.field_event_mode : $root.binlogdata.Filter.FieldEventMode[message.field_event_mode] : message.field_event_mode; - if (message.workflow_type != null && message.hasOwnProperty("workflow_type")) - if (typeof message.workflow_type === "number") - object.workflow_type = options.longs === String ? String(message.workflow_type) : message.workflow_type; - else - object.workflow_type = options.longs === String ? $util.Long.prototype.toString.call(message.workflow_type) : options.longs === Number ? new $util.LongBits(message.workflow_type.low >>> 0, message.workflow_type.high >>> 0).toNumber() : message.workflow_type; - if (message.workflow_name != null && message.hasOwnProperty("workflow_name")) - object.workflow_name = message.workflow_name; return object; }; /** - * Converts this Filter to JSON. + * Converts this ResetSequencesRequest to JSON. * @function toJSON - * @memberof binlogdata.Filter + * @memberof tabletmanagerdata.ResetSequencesRequest * @instance * @returns {Object.} JSON object */ - Filter.prototype.toJSON = function toJSON() { + ResetSequencesRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for Filter + * Gets the default type url for ResetSequencesRequest * @function getTypeUrl - * @memberof binlogdata.Filter + * @memberof tabletmanagerdata.ResetSequencesRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - Filter.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ResetSequencesRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/binlogdata.Filter"; + return typeUrlPrefix + "/tabletmanagerdata.ResetSequencesRequest"; }; - /** - * FieldEventMode enum. - * @name binlogdata.Filter.FieldEventMode - * @enum {number} - * @property {number} ERR_ON_MISMATCH=0 ERR_ON_MISMATCH value - * @property {number} BEST_EFFORT=1 BEST_EFFORT value - */ - Filter.FieldEventMode = (function() { - const valuesById = {}, values = Object.create(valuesById); - values[valuesById[0] = "ERR_ON_MISMATCH"] = 0; - values[valuesById[1] = "BEST_EFFORT"] = 1; - return values; - })(); - - return Filter; - })(); - - /** - * OnDDLAction enum. - * @name binlogdata.OnDDLAction - * @enum {number} - * @property {number} IGNORE=0 IGNORE value - * @property {number} STOP=1 STOP value - * @property {number} EXEC=2 EXEC value - * @property {number} EXEC_IGNORE=3 EXEC_IGNORE value - */ - binlogdata.OnDDLAction = (function() { - const valuesById = {}, values = Object.create(valuesById); - values[valuesById[0] = "IGNORE"] = 0; - values[valuesById[1] = "STOP"] = 1; - values[valuesById[2] = "EXEC"] = 2; - values[valuesById[3] = "EXEC_IGNORE"] = 3; - return values; - })(); - - /** - * VReplicationWorkflowType enum. - * @name binlogdata.VReplicationWorkflowType - * @enum {number} - * @property {number} Materialize=0 Materialize value - * @property {number} MoveTables=1 MoveTables value - * @property {number} CreateLookupIndex=2 CreateLookupIndex value - * @property {number} Migrate=3 Migrate value - * @property {number} Reshard=4 Reshard value - * @property {number} OnlineDDL=5 OnlineDDL value - */ - binlogdata.VReplicationWorkflowType = (function() { - const valuesById = {}, values = Object.create(valuesById); - values[valuesById[0] = "Materialize"] = 0; - values[valuesById[1] = "MoveTables"] = 1; - values[valuesById[2] = "CreateLookupIndex"] = 2; - values[valuesById[3] = "Migrate"] = 3; - values[valuesById[4] = "Reshard"] = 4; - values[valuesById[5] = "OnlineDDL"] = 5; - return values; - })(); - - /** - * VReplicationWorkflowSubType enum. - * @name binlogdata.VReplicationWorkflowSubType - * @enum {number} - * @property {number} None=0 None value - * @property {number} Partial=1 Partial value - */ - binlogdata.VReplicationWorkflowSubType = (function() { - const valuesById = {}, values = Object.create(valuesById); - values[valuesById[0] = "None"] = 0; - values[valuesById[1] = "Partial"] = 1; - return values; + return ResetSequencesRequest; })(); - binlogdata.BinlogSource = (function() { + tabletmanagerdata.ResetSequencesResponse = (function() { /** - * Properties of a BinlogSource. - * @memberof binlogdata - * @interface IBinlogSource - * @property {string|null} [keyspace] BinlogSource keyspace - * @property {string|null} [shard] BinlogSource shard - * @property {topodata.TabletType|null} [tablet_type] BinlogSource tablet_type - * @property {topodata.IKeyRange|null} [key_range] BinlogSource key_range - * @property {Array.|null} [tables] BinlogSource tables - * @property {binlogdata.IFilter|null} [filter] BinlogSource filter - * @property {binlogdata.OnDDLAction|null} [on_ddl] BinlogSource on_ddl - * @property {string|null} [external_mysql] BinlogSource external_mysql - * @property {boolean|null} [stop_after_copy] BinlogSource stop_after_copy - * @property {string|null} [external_cluster] BinlogSource external_cluster - * @property {string|null} [source_time_zone] BinlogSource source_time_zone - * @property {string|null} [target_time_zone] BinlogSource target_time_zone + * Properties of a ResetSequencesResponse. + * @memberof tabletmanagerdata + * @interface IResetSequencesResponse */ /** - * Constructs a new BinlogSource. - * @memberof binlogdata - * @classdesc Represents a BinlogSource. - * @implements IBinlogSource + * Constructs a new ResetSequencesResponse. + * @memberof tabletmanagerdata + * @classdesc Represents a ResetSequencesResponse. + * @implements IResetSequencesResponse * @constructor - * @param {binlogdata.IBinlogSource=} [properties] Properties to set + * @param {tabletmanagerdata.IResetSequencesResponse=} [properties] Properties to set */ - function BinlogSource(properties) { - this.tables = []; + function ResetSequencesResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -64140,232 +65708,251 @@ export const binlogdata = $root.binlogdata = (() => { } /** - * BinlogSource keyspace. - * @member {string} keyspace - * @memberof binlogdata.BinlogSource - * @instance + * Creates a new ResetSequencesResponse instance using the specified properties. + * @function create + * @memberof tabletmanagerdata.ResetSequencesResponse + * @static + * @param {tabletmanagerdata.IResetSequencesResponse=} [properties] Properties to set + * @returns {tabletmanagerdata.ResetSequencesResponse} ResetSequencesResponse instance */ - BinlogSource.prototype.keyspace = ""; + ResetSequencesResponse.create = function create(properties) { + return new ResetSequencesResponse(properties); + }; /** - * BinlogSource shard. - * @member {string} shard - * @memberof binlogdata.BinlogSource - * @instance + * Encodes the specified ResetSequencesResponse message. Does not implicitly {@link tabletmanagerdata.ResetSequencesResponse.verify|verify} messages. + * @function encode + * @memberof tabletmanagerdata.ResetSequencesResponse + * @static + * @param {tabletmanagerdata.IResetSequencesResponse} message ResetSequencesResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer */ - BinlogSource.prototype.shard = ""; + ResetSequencesResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + return writer; + }; /** - * BinlogSource tablet_type. - * @member {topodata.TabletType} tablet_type - * @memberof binlogdata.BinlogSource - * @instance + * Encodes the specified ResetSequencesResponse message, length delimited. Does not implicitly {@link tabletmanagerdata.ResetSequencesResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof tabletmanagerdata.ResetSequencesResponse + * @static + * @param {tabletmanagerdata.IResetSequencesResponse} message ResetSequencesResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer */ - BinlogSource.prototype.tablet_type = 0; + ResetSequencesResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; /** - * BinlogSource key_range. - * @member {topodata.IKeyRange|null|undefined} key_range - * @memberof binlogdata.BinlogSource - * @instance + * Decodes a ResetSequencesResponse message from the specified reader or buffer. + * @function decode + * @memberof tabletmanagerdata.ResetSequencesResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {tabletmanagerdata.ResetSequencesResponse} ResetSequencesResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - BinlogSource.prototype.key_range = null; + ResetSequencesResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.tabletmanagerdata.ResetSequencesResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; /** - * BinlogSource tables. - * @member {Array.} tables - * @memberof binlogdata.BinlogSource - * @instance + * Decodes a ResetSequencesResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof tabletmanagerdata.ResetSequencesResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {tabletmanagerdata.ResetSequencesResponse} ResetSequencesResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - BinlogSource.prototype.tables = $util.emptyArray; + ResetSequencesResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; /** - * BinlogSource filter. - * @member {binlogdata.IFilter|null|undefined} filter - * @memberof binlogdata.BinlogSource - * @instance + * Verifies a ResetSequencesResponse message. + * @function verify + * @memberof tabletmanagerdata.ResetSequencesResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - BinlogSource.prototype.filter = null; + ResetSequencesResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + return null; + }; /** - * BinlogSource on_ddl. - * @member {binlogdata.OnDDLAction} on_ddl - * @memberof binlogdata.BinlogSource - * @instance + * Creates a ResetSequencesResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof tabletmanagerdata.ResetSequencesResponse + * @static + * @param {Object.} object Plain object + * @returns {tabletmanagerdata.ResetSequencesResponse} ResetSequencesResponse */ - BinlogSource.prototype.on_ddl = 0; + ResetSequencesResponse.fromObject = function fromObject(object) { + if (object instanceof $root.tabletmanagerdata.ResetSequencesResponse) + return object; + return new $root.tabletmanagerdata.ResetSequencesResponse(); + }; /** - * BinlogSource external_mysql. - * @member {string} external_mysql - * @memberof binlogdata.BinlogSource - * @instance + * Creates a plain object from a ResetSequencesResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof tabletmanagerdata.ResetSequencesResponse + * @static + * @param {tabletmanagerdata.ResetSequencesResponse} message ResetSequencesResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object */ - BinlogSource.prototype.external_mysql = ""; + ResetSequencesResponse.toObject = function toObject() { + return {}; + }; /** - * BinlogSource stop_after_copy. - * @member {boolean} stop_after_copy - * @memberof binlogdata.BinlogSource + * Converts this ResetSequencesResponse to JSON. + * @function toJSON + * @memberof tabletmanagerdata.ResetSequencesResponse * @instance + * @returns {Object.} JSON object */ - BinlogSource.prototype.stop_after_copy = false; + ResetSequencesResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; /** - * BinlogSource external_cluster. - * @member {string} external_cluster - * @memberof binlogdata.BinlogSource - * @instance + * Gets the default type url for ResetSequencesResponse + * @function getTypeUrl + * @memberof tabletmanagerdata.ResetSequencesResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url */ - BinlogSource.prototype.external_cluster = ""; + ResetSequencesResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/tabletmanagerdata.ResetSequencesResponse"; + }; + + return ResetSequencesResponse; + })(); + + tabletmanagerdata.CheckThrottlerRequest = (function() { /** - * BinlogSource source_time_zone. - * @member {string} source_time_zone - * @memberof binlogdata.BinlogSource - * @instance + * Properties of a CheckThrottlerRequest. + * @memberof tabletmanagerdata + * @interface ICheckThrottlerRequest + * @property {string|null} [app_name] CheckThrottlerRequest app_name */ - BinlogSource.prototype.source_time_zone = ""; /** - * BinlogSource target_time_zone. - * @member {string} target_time_zone - * @memberof binlogdata.BinlogSource + * Constructs a new CheckThrottlerRequest. + * @memberof tabletmanagerdata + * @classdesc Represents a CheckThrottlerRequest. + * @implements ICheckThrottlerRequest + * @constructor + * @param {tabletmanagerdata.ICheckThrottlerRequest=} [properties] Properties to set + */ + function CheckThrottlerRequest(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * CheckThrottlerRequest app_name. + * @member {string} app_name + * @memberof tabletmanagerdata.CheckThrottlerRequest * @instance */ - BinlogSource.prototype.target_time_zone = ""; + CheckThrottlerRequest.prototype.app_name = ""; /** - * Creates a new BinlogSource instance using the specified properties. + * Creates a new CheckThrottlerRequest instance using the specified properties. * @function create - * @memberof binlogdata.BinlogSource + * @memberof tabletmanagerdata.CheckThrottlerRequest * @static - * @param {binlogdata.IBinlogSource=} [properties] Properties to set - * @returns {binlogdata.BinlogSource} BinlogSource instance + * @param {tabletmanagerdata.ICheckThrottlerRequest=} [properties] Properties to set + * @returns {tabletmanagerdata.CheckThrottlerRequest} CheckThrottlerRequest instance */ - BinlogSource.create = function create(properties) { - return new BinlogSource(properties); + CheckThrottlerRequest.create = function create(properties) { + return new CheckThrottlerRequest(properties); }; /** - * Encodes the specified BinlogSource message. Does not implicitly {@link binlogdata.BinlogSource.verify|verify} messages. + * Encodes the specified CheckThrottlerRequest message. Does not implicitly {@link tabletmanagerdata.CheckThrottlerRequest.verify|verify} messages. * @function encode - * @memberof binlogdata.BinlogSource + * @memberof tabletmanagerdata.CheckThrottlerRequest * @static - * @param {binlogdata.IBinlogSource} message BinlogSource message or plain object to encode + * @param {tabletmanagerdata.ICheckThrottlerRequest} message CheckThrottlerRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - BinlogSource.encode = function encode(message, writer) { + CheckThrottlerRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); - if (message.tablet_type != null && Object.hasOwnProperty.call(message, "tablet_type")) - writer.uint32(/* id 3, wireType 0 =*/24).int32(message.tablet_type); - if (message.key_range != null && Object.hasOwnProperty.call(message, "key_range")) - $root.topodata.KeyRange.encode(message.key_range, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); - if (message.tables != null && message.tables.length) - for (let i = 0; i < message.tables.length; ++i) - writer.uint32(/* id 5, wireType 2 =*/42).string(message.tables[i]); - if (message.filter != null && Object.hasOwnProperty.call(message, "filter")) - $root.binlogdata.Filter.encode(message.filter, writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); - if (message.on_ddl != null && Object.hasOwnProperty.call(message, "on_ddl")) - writer.uint32(/* id 7, wireType 0 =*/56).int32(message.on_ddl); - if (message.external_mysql != null && Object.hasOwnProperty.call(message, "external_mysql")) - writer.uint32(/* id 8, wireType 2 =*/66).string(message.external_mysql); - if (message.stop_after_copy != null && Object.hasOwnProperty.call(message, "stop_after_copy")) - writer.uint32(/* id 9, wireType 0 =*/72).bool(message.stop_after_copy); - if (message.external_cluster != null && Object.hasOwnProperty.call(message, "external_cluster")) - writer.uint32(/* id 10, wireType 2 =*/82).string(message.external_cluster); - if (message.source_time_zone != null && Object.hasOwnProperty.call(message, "source_time_zone")) - writer.uint32(/* id 11, wireType 2 =*/90).string(message.source_time_zone); - if (message.target_time_zone != null && Object.hasOwnProperty.call(message, "target_time_zone")) - writer.uint32(/* id 12, wireType 2 =*/98).string(message.target_time_zone); + if (message.app_name != null && Object.hasOwnProperty.call(message, "app_name")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.app_name); return writer; }; /** - * Encodes the specified BinlogSource message, length delimited. Does not implicitly {@link binlogdata.BinlogSource.verify|verify} messages. + * Encodes the specified CheckThrottlerRequest message, length delimited. Does not implicitly {@link tabletmanagerdata.CheckThrottlerRequest.verify|verify} messages. * @function encodeDelimited - * @memberof binlogdata.BinlogSource + * @memberof tabletmanagerdata.CheckThrottlerRequest * @static - * @param {binlogdata.IBinlogSource} message BinlogSource message or plain object to encode + * @param {tabletmanagerdata.ICheckThrottlerRequest} message CheckThrottlerRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - BinlogSource.encodeDelimited = function encodeDelimited(message, writer) { + CheckThrottlerRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a BinlogSource message from the specified reader or buffer. + * Decodes a CheckThrottlerRequest message from the specified reader or buffer. * @function decode - * @memberof binlogdata.BinlogSource + * @memberof tabletmanagerdata.CheckThrottlerRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {binlogdata.BinlogSource} BinlogSource + * @returns {tabletmanagerdata.CheckThrottlerRequest} CheckThrottlerRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - BinlogSource.decode = function decode(reader, length) { + CheckThrottlerRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.BinlogSource(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.tabletmanagerdata.CheckThrottlerRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.keyspace = reader.string(); - break; - } - case 2: { - message.shard = reader.string(); - break; - } - case 3: { - message.tablet_type = reader.int32(); - break; - } - case 4: { - message.key_range = $root.topodata.KeyRange.decode(reader, reader.uint32()); - break; - } - case 5: { - if (!(message.tables && message.tables.length)) - message.tables = []; - message.tables.push(reader.string()); - break; - } - case 6: { - message.filter = $root.binlogdata.Filter.decode(reader, reader.uint32()); - break; - } - case 7: { - message.on_ddl = reader.int32(); - break; - } - case 8: { - message.external_mysql = reader.string(); - break; - } - case 9: { - message.stop_after_copy = reader.bool(); - break; - } - case 10: { - message.external_cluster = reader.string(); - break; - } - case 11: { - message.source_time_zone = reader.string(); - break; - } - case 12: { - message.target_time_zone = reader.string(); + message.app_name = reader.string(); break; } default: @@ -64377,381 +65964,127 @@ export const binlogdata = $root.binlogdata = (() => { }; /** - * Decodes a BinlogSource message from the specified reader or buffer, length delimited. + * Decodes a CheckThrottlerRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof binlogdata.BinlogSource + * @memberof tabletmanagerdata.CheckThrottlerRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {binlogdata.BinlogSource} BinlogSource + * @returns {tabletmanagerdata.CheckThrottlerRequest} CheckThrottlerRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - BinlogSource.decodeDelimited = function decodeDelimited(reader) { + CheckThrottlerRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a BinlogSource message. + * Verifies a CheckThrottlerRequest message. * @function verify - * @memberof binlogdata.BinlogSource + * @memberof tabletmanagerdata.CheckThrottlerRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - BinlogSource.verify = function verify(message) { + CheckThrottlerRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - if (message.shard != null && message.hasOwnProperty("shard")) - if (!$util.isString(message.shard)) - return "shard: string expected"; - if (message.tablet_type != null && message.hasOwnProperty("tablet_type")) - switch (message.tablet_type) { - default: - return "tablet_type: enum value expected"; - case 0: - case 1: - case 1: - case 2: - case 3: - case 3: - case 4: - case 5: - case 6: - case 7: - case 8: - break; - } - if (message.key_range != null && message.hasOwnProperty("key_range")) { - let error = $root.topodata.KeyRange.verify(message.key_range); - if (error) - return "key_range." + error; - } - if (message.tables != null && message.hasOwnProperty("tables")) { - if (!Array.isArray(message.tables)) - return "tables: array expected"; - for (let i = 0; i < message.tables.length; ++i) - if (!$util.isString(message.tables[i])) - return "tables: string[] expected"; - } - if (message.filter != null && message.hasOwnProperty("filter")) { - let error = $root.binlogdata.Filter.verify(message.filter); - if (error) - return "filter." + error; - } - if (message.on_ddl != null && message.hasOwnProperty("on_ddl")) - switch (message.on_ddl) { - default: - return "on_ddl: enum value expected"; - case 0: - case 1: - case 2: - case 3: - break; - } - if (message.external_mysql != null && message.hasOwnProperty("external_mysql")) - if (!$util.isString(message.external_mysql)) - return "external_mysql: string expected"; - if (message.stop_after_copy != null && message.hasOwnProperty("stop_after_copy")) - if (typeof message.stop_after_copy !== "boolean") - return "stop_after_copy: boolean expected"; - if (message.external_cluster != null && message.hasOwnProperty("external_cluster")) - if (!$util.isString(message.external_cluster)) - return "external_cluster: string expected"; - if (message.source_time_zone != null && message.hasOwnProperty("source_time_zone")) - if (!$util.isString(message.source_time_zone)) - return "source_time_zone: string expected"; - if (message.target_time_zone != null && message.hasOwnProperty("target_time_zone")) - if (!$util.isString(message.target_time_zone)) - return "target_time_zone: string expected"; + if (message.app_name != null && message.hasOwnProperty("app_name")) + if (!$util.isString(message.app_name)) + return "app_name: string expected"; return null; }; /** - * Creates a BinlogSource message from a plain object. Also converts values to their respective internal types. + * Creates a CheckThrottlerRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof binlogdata.BinlogSource + * @memberof tabletmanagerdata.CheckThrottlerRequest * @static * @param {Object.} object Plain object - * @returns {binlogdata.BinlogSource} BinlogSource + * @returns {tabletmanagerdata.CheckThrottlerRequest} CheckThrottlerRequest */ - BinlogSource.fromObject = function fromObject(object) { - if (object instanceof $root.binlogdata.BinlogSource) + CheckThrottlerRequest.fromObject = function fromObject(object) { + if (object instanceof $root.tabletmanagerdata.CheckThrottlerRequest) return object; - let message = new $root.binlogdata.BinlogSource(); - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - if (object.shard != null) - message.shard = String(object.shard); - switch (object.tablet_type) { - default: - if (typeof object.tablet_type === "number") { - message.tablet_type = object.tablet_type; - break; - } - break; - case "UNKNOWN": - case 0: - message.tablet_type = 0; - break; - case "PRIMARY": - case 1: - message.tablet_type = 1; - break; - case "MASTER": - case 1: - message.tablet_type = 1; - break; - case "REPLICA": - case 2: - message.tablet_type = 2; - break; - case "RDONLY": - case 3: - message.tablet_type = 3; - break; - case "BATCH": - case 3: - message.tablet_type = 3; - break; - case "SPARE": - case 4: - message.tablet_type = 4; - break; - case "EXPERIMENTAL": - case 5: - message.tablet_type = 5; - break; - case "BACKUP": - case 6: - message.tablet_type = 6; - break; - case "RESTORE": - case 7: - message.tablet_type = 7; - break; - case "DRAINED": - case 8: - message.tablet_type = 8; - break; - } - if (object.key_range != null) { - if (typeof object.key_range !== "object") - throw TypeError(".binlogdata.BinlogSource.key_range: object expected"); - message.key_range = $root.topodata.KeyRange.fromObject(object.key_range); - } - if (object.tables) { - if (!Array.isArray(object.tables)) - throw TypeError(".binlogdata.BinlogSource.tables: array expected"); - message.tables = []; - for (let i = 0; i < object.tables.length; ++i) - message.tables[i] = String(object.tables[i]); - } - if (object.filter != null) { - if (typeof object.filter !== "object") - throw TypeError(".binlogdata.BinlogSource.filter: object expected"); - message.filter = $root.binlogdata.Filter.fromObject(object.filter); - } - switch (object.on_ddl) { - default: - if (typeof object.on_ddl === "number") { - message.on_ddl = object.on_ddl; - break; - } - break; - case "IGNORE": - case 0: - message.on_ddl = 0; - break; - case "STOP": - case 1: - message.on_ddl = 1; - break; - case "EXEC": - case 2: - message.on_ddl = 2; - break; - case "EXEC_IGNORE": - case 3: - message.on_ddl = 3; - break; - } - if (object.external_mysql != null) - message.external_mysql = String(object.external_mysql); - if (object.stop_after_copy != null) - message.stop_after_copy = Boolean(object.stop_after_copy); - if (object.external_cluster != null) - message.external_cluster = String(object.external_cluster); - if (object.source_time_zone != null) - message.source_time_zone = String(object.source_time_zone); - if (object.target_time_zone != null) - message.target_time_zone = String(object.target_time_zone); + let message = new $root.tabletmanagerdata.CheckThrottlerRequest(); + if (object.app_name != null) + message.app_name = String(object.app_name); return message; }; /** - * Creates a plain object from a BinlogSource message. Also converts values to other types if specified. + * Creates a plain object from a CheckThrottlerRequest message. Also converts values to other types if specified. * @function toObject - * @memberof binlogdata.BinlogSource + * @memberof tabletmanagerdata.CheckThrottlerRequest * @static - * @param {binlogdata.BinlogSource} message BinlogSource + * @param {tabletmanagerdata.CheckThrottlerRequest} message CheckThrottlerRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - BinlogSource.toObject = function toObject(message, options) { + CheckThrottlerRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.tables = []; - if (options.defaults) { - object.keyspace = ""; - object.shard = ""; - object.tablet_type = options.enums === String ? "UNKNOWN" : 0; - object.key_range = null; - object.filter = null; - object.on_ddl = options.enums === String ? "IGNORE" : 0; - object.external_mysql = ""; - object.stop_after_copy = false; - object.external_cluster = ""; - object.source_time_zone = ""; - object.target_time_zone = ""; - } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.shard != null && message.hasOwnProperty("shard")) - object.shard = message.shard; - if (message.tablet_type != null && message.hasOwnProperty("tablet_type")) - object.tablet_type = options.enums === String ? $root.topodata.TabletType[message.tablet_type] === undefined ? message.tablet_type : $root.topodata.TabletType[message.tablet_type] : message.tablet_type; - if (message.key_range != null && message.hasOwnProperty("key_range")) - object.key_range = $root.topodata.KeyRange.toObject(message.key_range, options); - if (message.tables && message.tables.length) { - object.tables = []; - for (let j = 0; j < message.tables.length; ++j) - object.tables[j] = message.tables[j]; - } - if (message.filter != null && message.hasOwnProperty("filter")) - object.filter = $root.binlogdata.Filter.toObject(message.filter, options); - if (message.on_ddl != null && message.hasOwnProperty("on_ddl")) - object.on_ddl = options.enums === String ? $root.binlogdata.OnDDLAction[message.on_ddl] === undefined ? message.on_ddl : $root.binlogdata.OnDDLAction[message.on_ddl] : message.on_ddl; - if (message.external_mysql != null && message.hasOwnProperty("external_mysql")) - object.external_mysql = message.external_mysql; - if (message.stop_after_copy != null && message.hasOwnProperty("stop_after_copy")) - object.stop_after_copy = message.stop_after_copy; - if (message.external_cluster != null && message.hasOwnProperty("external_cluster")) - object.external_cluster = message.external_cluster; - if (message.source_time_zone != null && message.hasOwnProperty("source_time_zone")) - object.source_time_zone = message.source_time_zone; - if (message.target_time_zone != null && message.hasOwnProperty("target_time_zone")) - object.target_time_zone = message.target_time_zone; + if (options.defaults) + object.app_name = ""; + if (message.app_name != null && message.hasOwnProperty("app_name")) + object.app_name = message.app_name; return object; }; /** - * Converts this BinlogSource to JSON. + * Converts this CheckThrottlerRequest to JSON. * @function toJSON - * @memberof binlogdata.BinlogSource + * @memberof tabletmanagerdata.CheckThrottlerRequest * @instance * @returns {Object.} JSON object */ - BinlogSource.prototype.toJSON = function toJSON() { + CheckThrottlerRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for BinlogSource + * Gets the default type url for CheckThrottlerRequest * @function getTypeUrl - * @memberof binlogdata.BinlogSource + * @memberof tabletmanagerdata.CheckThrottlerRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - BinlogSource.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + CheckThrottlerRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/binlogdata.BinlogSource"; + return typeUrlPrefix + "/tabletmanagerdata.CheckThrottlerRequest"; }; - return BinlogSource; - })(); - - /** - * VEventType enum. - * @name binlogdata.VEventType - * @enum {number} - * @property {number} UNKNOWN=0 UNKNOWN value - * @property {number} GTID=1 GTID value - * @property {number} BEGIN=2 BEGIN value - * @property {number} COMMIT=3 COMMIT value - * @property {number} ROLLBACK=4 ROLLBACK value - * @property {number} DDL=5 DDL value - * @property {number} INSERT=6 INSERT value - * @property {number} REPLACE=7 REPLACE value - * @property {number} UPDATE=8 UPDATE value - * @property {number} DELETE=9 DELETE value - * @property {number} SET=10 SET value - * @property {number} OTHER=11 OTHER value - * @property {number} ROW=12 ROW value - * @property {number} FIELD=13 FIELD value - * @property {number} HEARTBEAT=14 HEARTBEAT value - * @property {number} VGTID=15 VGTID value - * @property {number} JOURNAL=16 JOURNAL value - * @property {number} VERSION=17 VERSION value - * @property {number} LASTPK=18 LASTPK value - * @property {number} SAVEPOINT=19 SAVEPOINT value - * @property {number} COPY_COMPLETED=20 COPY_COMPLETED value - */ - binlogdata.VEventType = (function() { - const valuesById = {}, values = Object.create(valuesById); - values[valuesById[0] = "UNKNOWN"] = 0; - values[valuesById[1] = "GTID"] = 1; - values[valuesById[2] = "BEGIN"] = 2; - values[valuesById[3] = "COMMIT"] = 3; - values[valuesById[4] = "ROLLBACK"] = 4; - values[valuesById[5] = "DDL"] = 5; - values[valuesById[6] = "INSERT"] = 6; - values[valuesById[7] = "REPLACE"] = 7; - values[valuesById[8] = "UPDATE"] = 8; - values[valuesById[9] = "DELETE"] = 9; - values[valuesById[10] = "SET"] = 10; - values[valuesById[11] = "OTHER"] = 11; - values[valuesById[12] = "ROW"] = 12; - values[valuesById[13] = "FIELD"] = 13; - values[valuesById[14] = "HEARTBEAT"] = 14; - values[valuesById[15] = "VGTID"] = 15; - values[valuesById[16] = "JOURNAL"] = 16; - values[valuesById[17] = "VERSION"] = 17; - values[valuesById[18] = "LASTPK"] = 18; - values[valuesById[19] = "SAVEPOINT"] = 19; - values[valuesById[20] = "COPY_COMPLETED"] = 20; - return values; + return CheckThrottlerRequest; })(); - binlogdata.RowChange = (function() { + tabletmanagerdata.CheckThrottlerResponse = (function() { /** - * Properties of a RowChange. - * @memberof binlogdata - * @interface IRowChange - * @property {query.IRow|null} [before] RowChange before - * @property {query.IRow|null} [after] RowChange after - * @property {binlogdata.RowChange.IBitmap|null} [data_columns] RowChange data_columns + * Properties of a CheckThrottlerResponse. + * @memberof tabletmanagerdata + * @interface ICheckThrottlerResponse + * @property {number|null} [status_code] CheckThrottlerResponse status_code + * @property {number|null} [value] CheckThrottlerResponse value + * @property {number|null} [threshold] CheckThrottlerResponse threshold + * @property {string|null} [error] CheckThrottlerResponse error + * @property {string|null} [message] CheckThrottlerResponse message + * @property {boolean|null} [recently_checked] CheckThrottlerResponse recently_checked */ /** - * Constructs a new RowChange. - * @memberof binlogdata - * @classdesc Represents a RowChange. - * @implements IRowChange + * Constructs a new CheckThrottlerResponse. + * @memberof tabletmanagerdata + * @classdesc Represents a CheckThrottlerResponse. + * @implements ICheckThrottlerResponse * @constructor - * @param {binlogdata.IRowChange=} [properties] Properties to set + * @param {tabletmanagerdata.ICheckThrottlerResponse=} [properties] Properties to set */ - function RowChange(properties) { + function CheckThrottlerResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -64759,103 +66092,145 @@ export const binlogdata = $root.binlogdata = (() => { } /** - * RowChange before. - * @member {query.IRow|null|undefined} before - * @memberof binlogdata.RowChange + * CheckThrottlerResponse status_code. + * @member {number} status_code + * @memberof tabletmanagerdata.CheckThrottlerResponse * @instance */ - RowChange.prototype.before = null; + CheckThrottlerResponse.prototype.status_code = 0; /** - * RowChange after. - * @member {query.IRow|null|undefined} after - * @memberof binlogdata.RowChange + * CheckThrottlerResponse value. + * @member {number} value + * @memberof tabletmanagerdata.CheckThrottlerResponse * @instance */ - RowChange.prototype.after = null; + CheckThrottlerResponse.prototype.value = 0; /** - * RowChange data_columns. - * @member {binlogdata.RowChange.IBitmap|null|undefined} data_columns - * @memberof binlogdata.RowChange + * CheckThrottlerResponse threshold. + * @member {number} threshold + * @memberof tabletmanagerdata.CheckThrottlerResponse * @instance */ - RowChange.prototype.data_columns = null; + CheckThrottlerResponse.prototype.threshold = 0; /** - * Creates a new RowChange instance using the specified properties. + * CheckThrottlerResponse error. + * @member {string} error + * @memberof tabletmanagerdata.CheckThrottlerResponse + * @instance + */ + CheckThrottlerResponse.prototype.error = ""; + + /** + * CheckThrottlerResponse message. + * @member {string} message + * @memberof tabletmanagerdata.CheckThrottlerResponse + * @instance + */ + CheckThrottlerResponse.prototype.message = ""; + + /** + * CheckThrottlerResponse recently_checked. + * @member {boolean} recently_checked + * @memberof tabletmanagerdata.CheckThrottlerResponse + * @instance + */ + CheckThrottlerResponse.prototype.recently_checked = false; + + /** + * Creates a new CheckThrottlerResponse instance using the specified properties. * @function create - * @memberof binlogdata.RowChange + * @memberof tabletmanagerdata.CheckThrottlerResponse * @static - * @param {binlogdata.IRowChange=} [properties] Properties to set - * @returns {binlogdata.RowChange} RowChange instance + * @param {tabletmanagerdata.ICheckThrottlerResponse=} [properties] Properties to set + * @returns {tabletmanagerdata.CheckThrottlerResponse} CheckThrottlerResponse instance */ - RowChange.create = function create(properties) { - return new RowChange(properties); + CheckThrottlerResponse.create = function create(properties) { + return new CheckThrottlerResponse(properties); }; /** - * Encodes the specified RowChange message. Does not implicitly {@link binlogdata.RowChange.verify|verify} messages. + * Encodes the specified CheckThrottlerResponse message. Does not implicitly {@link tabletmanagerdata.CheckThrottlerResponse.verify|verify} messages. * @function encode - * @memberof binlogdata.RowChange + * @memberof tabletmanagerdata.CheckThrottlerResponse * @static - * @param {binlogdata.IRowChange} message RowChange message or plain object to encode + * @param {tabletmanagerdata.ICheckThrottlerResponse} message CheckThrottlerResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RowChange.encode = function encode(message, writer) { + CheckThrottlerResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.before != null && Object.hasOwnProperty.call(message, "before")) - $root.query.Row.encode(message.before, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.after != null && Object.hasOwnProperty.call(message, "after")) - $root.query.Row.encode(message.after, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.data_columns != null && Object.hasOwnProperty.call(message, "data_columns")) - $root.binlogdata.RowChange.Bitmap.encode(message.data_columns, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.status_code != null && Object.hasOwnProperty.call(message, "status_code")) + writer.uint32(/* id 1, wireType 0 =*/8).int32(message.status_code); + if (message.value != null && Object.hasOwnProperty.call(message, "value")) + writer.uint32(/* id 2, wireType 1 =*/17).double(message.value); + if (message.threshold != null && Object.hasOwnProperty.call(message, "threshold")) + writer.uint32(/* id 3, wireType 1 =*/25).double(message.threshold); + if (message.error != null && Object.hasOwnProperty.call(message, "error")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.error); + if (message.message != null && Object.hasOwnProperty.call(message, "message")) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.message); + if (message.recently_checked != null && Object.hasOwnProperty.call(message, "recently_checked")) + writer.uint32(/* id 6, wireType 0 =*/48).bool(message.recently_checked); return writer; }; /** - * Encodes the specified RowChange message, length delimited. Does not implicitly {@link binlogdata.RowChange.verify|verify} messages. + * Encodes the specified CheckThrottlerResponse message, length delimited. Does not implicitly {@link tabletmanagerdata.CheckThrottlerResponse.verify|verify} messages. * @function encodeDelimited - * @memberof binlogdata.RowChange + * @memberof tabletmanagerdata.CheckThrottlerResponse * @static - * @param {binlogdata.IRowChange} message RowChange message or plain object to encode + * @param {tabletmanagerdata.ICheckThrottlerResponse} message CheckThrottlerResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RowChange.encodeDelimited = function encodeDelimited(message, writer) { + CheckThrottlerResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a RowChange message from the specified reader or buffer. + * Decodes a CheckThrottlerResponse message from the specified reader or buffer. * @function decode - * @memberof binlogdata.RowChange + * @memberof tabletmanagerdata.CheckThrottlerResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {binlogdata.RowChange} RowChange + * @returns {tabletmanagerdata.CheckThrottlerResponse} CheckThrottlerResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RowChange.decode = function decode(reader, length) { + CheckThrottlerResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.RowChange(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.tabletmanagerdata.CheckThrottlerResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.before = $root.query.Row.decode(reader, reader.uint32()); + message.status_code = reader.int32(); break; } case 2: { - message.after = $root.query.Row.decode(reader, reader.uint32()); + message.value = reader.double(); break; } case 3: { - message.data_columns = $root.binlogdata.RowChange.Bitmap.decode(reader, reader.uint32()); + message.threshold = reader.double(); + break; + } + case 4: { + message.error = reader.string(); + break; + } + case 5: { + message.message = reader.string(); + break; + } + case 6: { + message.recently_checked = reader.bool(); break; } default: @@ -64867,529 +66242,281 @@ export const binlogdata = $root.binlogdata = (() => { }; /** - * Decodes a RowChange message from the specified reader or buffer, length delimited. + * Decodes a CheckThrottlerResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof binlogdata.RowChange + * @memberof tabletmanagerdata.CheckThrottlerResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {binlogdata.RowChange} RowChange + * @returns {tabletmanagerdata.CheckThrottlerResponse} CheckThrottlerResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RowChange.decodeDelimited = function decodeDelimited(reader) { + CheckThrottlerResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a RowChange message. + * Verifies a CheckThrottlerResponse message. * @function verify - * @memberof binlogdata.RowChange + * @memberof tabletmanagerdata.CheckThrottlerResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - RowChange.verify = function verify(message) { + CheckThrottlerResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.before != null && message.hasOwnProperty("before")) { - let error = $root.query.Row.verify(message.before); - if (error) - return "before." + error; - } - if (message.after != null && message.hasOwnProperty("after")) { - let error = $root.query.Row.verify(message.after); - if (error) - return "after." + error; - } - if (message.data_columns != null && message.hasOwnProperty("data_columns")) { - let error = $root.binlogdata.RowChange.Bitmap.verify(message.data_columns); - if (error) - return "data_columns." + error; - } + if (message.status_code != null && message.hasOwnProperty("status_code")) + if (!$util.isInteger(message.status_code)) + return "status_code: integer expected"; + if (message.value != null && message.hasOwnProperty("value")) + if (typeof message.value !== "number") + return "value: number expected"; + if (message.threshold != null && message.hasOwnProperty("threshold")) + if (typeof message.threshold !== "number") + return "threshold: number expected"; + if (message.error != null && message.hasOwnProperty("error")) + if (!$util.isString(message.error)) + return "error: string expected"; + if (message.message != null && message.hasOwnProperty("message")) + if (!$util.isString(message.message)) + return "message: string expected"; + if (message.recently_checked != null && message.hasOwnProperty("recently_checked")) + if (typeof message.recently_checked !== "boolean") + return "recently_checked: boolean expected"; return null; }; /** - * Creates a RowChange message from a plain object. Also converts values to their respective internal types. + * Creates a CheckThrottlerResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof binlogdata.RowChange + * @memberof tabletmanagerdata.CheckThrottlerResponse * @static * @param {Object.} object Plain object - * @returns {binlogdata.RowChange} RowChange + * @returns {tabletmanagerdata.CheckThrottlerResponse} CheckThrottlerResponse */ - RowChange.fromObject = function fromObject(object) { - if (object instanceof $root.binlogdata.RowChange) + CheckThrottlerResponse.fromObject = function fromObject(object) { + if (object instanceof $root.tabletmanagerdata.CheckThrottlerResponse) return object; - let message = new $root.binlogdata.RowChange(); - if (object.before != null) { - if (typeof object.before !== "object") - throw TypeError(".binlogdata.RowChange.before: object expected"); - message.before = $root.query.Row.fromObject(object.before); - } - if (object.after != null) { - if (typeof object.after !== "object") - throw TypeError(".binlogdata.RowChange.after: object expected"); - message.after = $root.query.Row.fromObject(object.after); - } - if (object.data_columns != null) { - if (typeof object.data_columns !== "object") - throw TypeError(".binlogdata.RowChange.data_columns: object expected"); - message.data_columns = $root.binlogdata.RowChange.Bitmap.fromObject(object.data_columns); - } + let message = new $root.tabletmanagerdata.CheckThrottlerResponse(); + if (object.status_code != null) + message.status_code = object.status_code | 0; + if (object.value != null) + message.value = Number(object.value); + if (object.threshold != null) + message.threshold = Number(object.threshold); + if (object.error != null) + message.error = String(object.error); + if (object.message != null) + message.message = String(object.message); + if (object.recently_checked != null) + message.recently_checked = Boolean(object.recently_checked); return message; }; /** - * Creates a plain object from a RowChange message. Also converts values to other types if specified. + * Creates a plain object from a CheckThrottlerResponse message. Also converts values to other types if specified. * @function toObject - * @memberof binlogdata.RowChange + * @memberof tabletmanagerdata.CheckThrottlerResponse * @static - * @param {binlogdata.RowChange} message RowChange + * @param {tabletmanagerdata.CheckThrottlerResponse} message CheckThrottlerResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - RowChange.toObject = function toObject(message, options) { + CheckThrottlerResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) { - object.before = null; - object.after = null; - object.data_columns = null; + object.status_code = 0; + object.value = 0; + object.threshold = 0; + object.error = ""; + object.message = ""; + object.recently_checked = false; } - if (message.before != null && message.hasOwnProperty("before")) - object.before = $root.query.Row.toObject(message.before, options); - if (message.after != null && message.hasOwnProperty("after")) - object.after = $root.query.Row.toObject(message.after, options); - if (message.data_columns != null && message.hasOwnProperty("data_columns")) - object.data_columns = $root.binlogdata.RowChange.Bitmap.toObject(message.data_columns, options); + if (message.status_code != null && message.hasOwnProperty("status_code")) + object.status_code = message.status_code; + if (message.value != null && message.hasOwnProperty("value")) + object.value = options.json && !isFinite(message.value) ? String(message.value) : message.value; + if (message.threshold != null && message.hasOwnProperty("threshold")) + object.threshold = options.json && !isFinite(message.threshold) ? String(message.threshold) : message.threshold; + if (message.error != null && message.hasOwnProperty("error")) + object.error = message.error; + if (message.message != null && message.hasOwnProperty("message")) + object.message = message.message; + if (message.recently_checked != null && message.hasOwnProperty("recently_checked")) + object.recently_checked = message.recently_checked; return object; }; /** - * Converts this RowChange to JSON. + * Converts this CheckThrottlerResponse to JSON. * @function toJSON - * @memberof binlogdata.RowChange + * @memberof tabletmanagerdata.CheckThrottlerResponse * @instance * @returns {Object.} JSON object */ - RowChange.prototype.toJSON = function toJSON() { + CheckThrottlerResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for RowChange + * Gets the default type url for CheckThrottlerResponse * @function getTypeUrl - * @memberof binlogdata.RowChange + * @memberof tabletmanagerdata.CheckThrottlerResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - RowChange.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + CheckThrottlerResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/binlogdata.RowChange"; + return typeUrlPrefix + "/tabletmanagerdata.CheckThrottlerResponse"; }; - RowChange.Bitmap = (function() { + return CheckThrottlerResponse; + })(); - /** - * Properties of a Bitmap. - * @memberof binlogdata.RowChange - * @interface IBitmap - * @property {number|Long|null} [count] Bitmap count - * @property {Uint8Array|null} [cols] Bitmap cols - */ + return tabletmanagerdata; +})(); - /** - * Constructs a new Bitmap. - * @memberof binlogdata.RowChange - * @classdesc Represents a Bitmap. - * @implements IBitmap - * @constructor - * @param {binlogdata.RowChange.IBitmap=} [properties] Properties to set - */ - function Bitmap(properties) { - if (properties) - for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; - } +export const binlogdata = $root.binlogdata = (() => { - /** - * Bitmap count. - * @member {number|Long} count - * @memberof binlogdata.RowChange.Bitmap - * @instance - */ - Bitmap.prototype.count = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + /** + * Namespace binlogdata. + * @exports binlogdata + * @namespace + */ + const binlogdata = {}; - /** - * Bitmap cols. - * @member {Uint8Array} cols - * @memberof binlogdata.RowChange.Bitmap - * @instance - */ - Bitmap.prototype.cols = $util.newBuffer([]); + binlogdata.Charset = (function() { - /** - * Creates a new Bitmap instance using the specified properties. - * @function create - * @memberof binlogdata.RowChange.Bitmap - * @static - * @param {binlogdata.RowChange.IBitmap=} [properties] Properties to set - * @returns {binlogdata.RowChange.Bitmap} Bitmap instance - */ - Bitmap.create = function create(properties) { - return new Bitmap(properties); - }; + /** + * Properties of a Charset. + * @memberof binlogdata + * @interface ICharset + * @property {number|null} [client] Charset client + * @property {number|null} [conn] Charset conn + * @property {number|null} [server] Charset server + */ - /** - * Encodes the specified Bitmap message. Does not implicitly {@link binlogdata.RowChange.Bitmap.verify|verify} messages. - * @function encode - * @memberof binlogdata.RowChange.Bitmap - * @static - * @param {binlogdata.RowChange.IBitmap} message Bitmap message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - Bitmap.encode = function encode(message, writer) { - if (!writer) - writer = $Writer.create(); - if (message.count != null && Object.hasOwnProperty.call(message, "count")) - writer.uint32(/* id 1, wireType 0 =*/8).int64(message.count); - if (message.cols != null && Object.hasOwnProperty.call(message, "cols")) - writer.uint32(/* id 2, wireType 2 =*/18).bytes(message.cols); - return writer; - }; + /** + * Constructs a new Charset. + * @memberof binlogdata + * @classdesc Represents a Charset. + * @implements ICharset + * @constructor + * @param {binlogdata.ICharset=} [properties] Properties to set + */ + function Charset(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } - /** - * Encodes the specified Bitmap message, length delimited. Does not implicitly {@link binlogdata.RowChange.Bitmap.verify|verify} messages. - * @function encodeDelimited - * @memberof binlogdata.RowChange.Bitmap - * @static - * @param {binlogdata.RowChange.IBitmap} message Bitmap message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - Bitmap.encodeDelimited = function encodeDelimited(message, writer) { - return this.encode(message, writer).ldelim(); - }; - - /** - * Decodes a Bitmap message from the specified reader or buffer. - * @function decode - * @memberof binlogdata.RowChange.Bitmap - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @param {number} [length] Message length if known beforehand - * @returns {binlogdata.RowChange.Bitmap} Bitmap - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - Bitmap.decode = function decode(reader, length) { - if (!(reader instanceof $Reader)) - reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.RowChange.Bitmap(); - while (reader.pos < end) { - let tag = reader.uint32(); - switch (tag >>> 3) { - case 1: { - message.count = reader.int64(); - break; - } - case 2: { - message.cols = reader.bytes(); - break; - } - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }; - - /** - * Decodes a Bitmap message from the specified reader or buffer, length delimited. - * @function decodeDelimited - * @memberof binlogdata.RowChange.Bitmap - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {binlogdata.RowChange.Bitmap} Bitmap - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - Bitmap.decodeDelimited = function decodeDelimited(reader) { - if (!(reader instanceof $Reader)) - reader = new $Reader(reader); - return this.decode(reader, reader.uint32()); - }; - - /** - * Verifies a Bitmap message. - * @function verify - * @memberof binlogdata.RowChange.Bitmap - * @static - * @param {Object.} message Plain object to verify - * @returns {string|null} `null` if valid, otherwise the reason why it is not - */ - Bitmap.verify = function verify(message) { - if (typeof message !== "object" || message === null) - return "object expected"; - if (message.count != null && message.hasOwnProperty("count")) - if (!$util.isInteger(message.count) && !(message.count && $util.isInteger(message.count.low) && $util.isInteger(message.count.high))) - return "count: integer|Long expected"; - if (message.cols != null && message.hasOwnProperty("cols")) - if (!(message.cols && typeof message.cols.length === "number" || $util.isString(message.cols))) - return "cols: buffer expected"; - return null; - }; - - /** - * Creates a Bitmap message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof binlogdata.RowChange.Bitmap - * @static - * @param {Object.} object Plain object - * @returns {binlogdata.RowChange.Bitmap} Bitmap - */ - Bitmap.fromObject = function fromObject(object) { - if (object instanceof $root.binlogdata.RowChange.Bitmap) - return object; - let message = new $root.binlogdata.RowChange.Bitmap(); - if (object.count != null) - if ($util.Long) - (message.count = $util.Long.fromValue(object.count)).unsigned = false; - else if (typeof object.count === "string") - message.count = parseInt(object.count, 10); - else if (typeof object.count === "number") - message.count = object.count; - else if (typeof object.count === "object") - message.count = new $util.LongBits(object.count.low >>> 0, object.count.high >>> 0).toNumber(); - if (object.cols != null) - if (typeof object.cols === "string") - $util.base64.decode(object.cols, message.cols = $util.newBuffer($util.base64.length(object.cols)), 0); - else if (object.cols.length >= 0) - message.cols = object.cols; - return message; - }; - - /** - * Creates a plain object from a Bitmap message. Also converts values to other types if specified. - * @function toObject - * @memberof binlogdata.RowChange.Bitmap - * @static - * @param {binlogdata.RowChange.Bitmap} message Bitmap - * @param {$protobuf.IConversionOptions} [options] Conversion options - * @returns {Object.} Plain object - */ - Bitmap.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) { - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.count = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.count = options.longs === String ? "0" : 0; - if (options.bytes === String) - object.cols = ""; - else { - object.cols = []; - if (options.bytes !== Array) - object.cols = $util.newBuffer(object.cols); - } - } - if (message.count != null && message.hasOwnProperty("count")) - if (typeof message.count === "number") - object.count = options.longs === String ? String(message.count) : message.count; - else - object.count = options.longs === String ? $util.Long.prototype.toString.call(message.count) : options.longs === Number ? new $util.LongBits(message.count.low >>> 0, message.count.high >>> 0).toNumber() : message.count; - if (message.cols != null && message.hasOwnProperty("cols")) - object.cols = options.bytes === String ? $util.base64.encode(message.cols, 0, message.cols.length) : options.bytes === Array ? Array.prototype.slice.call(message.cols) : message.cols; - return object; - }; - - /** - * Converts this Bitmap to JSON. - * @function toJSON - * @memberof binlogdata.RowChange.Bitmap - * @instance - * @returns {Object.} JSON object - */ - Bitmap.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; - - /** - * Gets the default type url for Bitmap - * @function getTypeUrl - * @memberof binlogdata.RowChange.Bitmap - * @static - * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns {string} The default type url - */ - Bitmap.getTypeUrl = function getTypeUrl(typeUrlPrefix) { - if (typeUrlPrefix === undefined) { - typeUrlPrefix = "type.googleapis.com"; - } - return typeUrlPrefix + "/binlogdata.RowChange.Bitmap"; - }; - - return Bitmap; - })(); - - return RowChange; - })(); - - binlogdata.RowEvent = (function() { - - /** - * Properties of a RowEvent. - * @memberof binlogdata - * @interface IRowEvent - * @property {string|null} [table_name] RowEvent table_name - * @property {Array.|null} [row_changes] RowEvent row_changes - * @property {string|null} [keyspace] RowEvent keyspace - * @property {string|null} [shard] RowEvent shard - */ - - /** - * Constructs a new RowEvent. - * @memberof binlogdata - * @classdesc Represents a RowEvent. - * @implements IRowEvent - * @constructor - * @param {binlogdata.IRowEvent=} [properties] Properties to set - */ - function RowEvent(properties) { - this.row_changes = []; - if (properties) - for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; - } - - /** - * RowEvent table_name. - * @member {string} table_name - * @memberof binlogdata.RowEvent - * @instance - */ - RowEvent.prototype.table_name = ""; - - /** - * RowEvent row_changes. - * @member {Array.} row_changes - * @memberof binlogdata.RowEvent - * @instance - */ - RowEvent.prototype.row_changes = $util.emptyArray; + /** + * Charset client. + * @member {number} client + * @memberof binlogdata.Charset + * @instance + */ + Charset.prototype.client = 0; /** - * RowEvent keyspace. - * @member {string} keyspace - * @memberof binlogdata.RowEvent + * Charset conn. + * @member {number} conn + * @memberof binlogdata.Charset * @instance */ - RowEvent.prototype.keyspace = ""; + Charset.prototype.conn = 0; /** - * RowEvent shard. - * @member {string} shard - * @memberof binlogdata.RowEvent + * Charset server. + * @member {number} server + * @memberof binlogdata.Charset * @instance */ - RowEvent.prototype.shard = ""; + Charset.prototype.server = 0; /** - * Creates a new RowEvent instance using the specified properties. + * Creates a new Charset instance using the specified properties. * @function create - * @memberof binlogdata.RowEvent + * @memberof binlogdata.Charset * @static - * @param {binlogdata.IRowEvent=} [properties] Properties to set - * @returns {binlogdata.RowEvent} RowEvent instance + * @param {binlogdata.ICharset=} [properties] Properties to set + * @returns {binlogdata.Charset} Charset instance */ - RowEvent.create = function create(properties) { - return new RowEvent(properties); + Charset.create = function create(properties) { + return new Charset(properties); }; /** - * Encodes the specified RowEvent message. Does not implicitly {@link binlogdata.RowEvent.verify|verify} messages. + * Encodes the specified Charset message. Does not implicitly {@link binlogdata.Charset.verify|verify} messages. * @function encode - * @memberof binlogdata.RowEvent + * @memberof binlogdata.Charset * @static - * @param {binlogdata.IRowEvent} message RowEvent message or plain object to encode + * @param {binlogdata.ICharset} message Charset message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RowEvent.encode = function encode(message, writer) { + Charset.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.table_name != null && Object.hasOwnProperty.call(message, "table_name")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.table_name); - if (message.row_changes != null && message.row_changes.length) - for (let i = 0; i < message.row_changes.length; ++i) - $root.binlogdata.RowChange.encode(message.row_changes[i], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.keyspace); - if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - writer.uint32(/* id 4, wireType 2 =*/34).string(message.shard); + if (message.client != null && Object.hasOwnProperty.call(message, "client")) + writer.uint32(/* id 1, wireType 0 =*/8).int32(message.client); + if (message.conn != null && Object.hasOwnProperty.call(message, "conn")) + writer.uint32(/* id 2, wireType 0 =*/16).int32(message.conn); + if (message.server != null && Object.hasOwnProperty.call(message, "server")) + writer.uint32(/* id 3, wireType 0 =*/24).int32(message.server); return writer; }; /** - * Encodes the specified RowEvent message, length delimited. Does not implicitly {@link binlogdata.RowEvent.verify|verify} messages. + * Encodes the specified Charset message, length delimited. Does not implicitly {@link binlogdata.Charset.verify|verify} messages. * @function encodeDelimited - * @memberof binlogdata.RowEvent + * @memberof binlogdata.Charset * @static - * @param {binlogdata.IRowEvent} message RowEvent message or plain object to encode + * @param {binlogdata.ICharset} message Charset message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RowEvent.encodeDelimited = function encodeDelimited(message, writer) { + Charset.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a RowEvent message from the specified reader or buffer. + * Decodes a Charset message from the specified reader or buffer. * @function decode - * @memberof binlogdata.RowEvent + * @memberof binlogdata.Charset * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {binlogdata.RowEvent} RowEvent + * @returns {binlogdata.Charset} Charset * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RowEvent.decode = function decode(reader, length) { + Charset.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.RowEvent(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.Charset(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.table_name = reader.string(); + message.client = reader.int32(); break; } case 2: { - if (!(message.row_changes && message.row_changes.length)) - message.row_changes = []; - message.row_changes.push($root.binlogdata.RowChange.decode(reader, reader.uint32())); + message.conn = reader.int32(); break; } case 3: { - message.keyspace = reader.string(); - break; - } - case 4: { - message.shard = reader.string(); + message.server = reader.int32(); break; } default: @@ -65401,169 +66528,141 @@ export const binlogdata = $root.binlogdata = (() => { }; /** - * Decodes a RowEvent message from the specified reader or buffer, length delimited. + * Decodes a Charset message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof binlogdata.RowEvent + * @memberof binlogdata.Charset * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {binlogdata.RowEvent} RowEvent + * @returns {binlogdata.Charset} Charset * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RowEvent.decodeDelimited = function decodeDelimited(reader) { + Charset.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a RowEvent message. + * Verifies a Charset message. * @function verify - * @memberof binlogdata.RowEvent + * @memberof binlogdata.Charset * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - RowEvent.verify = function verify(message) { + Charset.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.table_name != null && message.hasOwnProperty("table_name")) - if (!$util.isString(message.table_name)) - return "table_name: string expected"; - if (message.row_changes != null && message.hasOwnProperty("row_changes")) { - if (!Array.isArray(message.row_changes)) - return "row_changes: array expected"; - for (let i = 0; i < message.row_changes.length; ++i) { - let error = $root.binlogdata.RowChange.verify(message.row_changes[i]); - if (error) - return "row_changes." + error; - } - } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - if (message.shard != null && message.hasOwnProperty("shard")) - if (!$util.isString(message.shard)) - return "shard: string expected"; + if (message.client != null && message.hasOwnProperty("client")) + if (!$util.isInteger(message.client)) + return "client: integer expected"; + if (message.conn != null && message.hasOwnProperty("conn")) + if (!$util.isInteger(message.conn)) + return "conn: integer expected"; + if (message.server != null && message.hasOwnProperty("server")) + if (!$util.isInteger(message.server)) + return "server: integer expected"; return null; }; /** - * Creates a RowEvent message from a plain object. Also converts values to their respective internal types. + * Creates a Charset message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof binlogdata.RowEvent + * @memberof binlogdata.Charset * @static * @param {Object.} object Plain object - * @returns {binlogdata.RowEvent} RowEvent + * @returns {binlogdata.Charset} Charset */ - RowEvent.fromObject = function fromObject(object) { - if (object instanceof $root.binlogdata.RowEvent) + Charset.fromObject = function fromObject(object) { + if (object instanceof $root.binlogdata.Charset) return object; - let message = new $root.binlogdata.RowEvent(); - if (object.table_name != null) - message.table_name = String(object.table_name); - if (object.row_changes) { - if (!Array.isArray(object.row_changes)) - throw TypeError(".binlogdata.RowEvent.row_changes: array expected"); - message.row_changes = []; - for (let i = 0; i < object.row_changes.length; ++i) { - if (typeof object.row_changes[i] !== "object") - throw TypeError(".binlogdata.RowEvent.row_changes: object expected"); - message.row_changes[i] = $root.binlogdata.RowChange.fromObject(object.row_changes[i]); - } - } - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - if (object.shard != null) - message.shard = String(object.shard); + let message = new $root.binlogdata.Charset(); + if (object.client != null) + message.client = object.client | 0; + if (object.conn != null) + message.conn = object.conn | 0; + if (object.server != null) + message.server = object.server | 0; return message; }; /** - * Creates a plain object from a RowEvent message. Also converts values to other types if specified. + * Creates a plain object from a Charset message. Also converts values to other types if specified. * @function toObject - * @memberof binlogdata.RowEvent + * @memberof binlogdata.Charset * @static - * @param {binlogdata.RowEvent} message RowEvent + * @param {binlogdata.Charset} message Charset * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - RowEvent.toObject = function toObject(message, options) { + Charset.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.row_changes = []; if (options.defaults) { - object.table_name = ""; - object.keyspace = ""; - object.shard = ""; - } - if (message.table_name != null && message.hasOwnProperty("table_name")) - object.table_name = message.table_name; - if (message.row_changes && message.row_changes.length) { - object.row_changes = []; - for (let j = 0; j < message.row_changes.length; ++j) - object.row_changes[j] = $root.binlogdata.RowChange.toObject(message.row_changes[j], options); + object.client = 0; + object.conn = 0; + object.server = 0; } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.shard != null && message.hasOwnProperty("shard")) - object.shard = message.shard; + if (message.client != null && message.hasOwnProperty("client")) + object.client = message.client; + if (message.conn != null && message.hasOwnProperty("conn")) + object.conn = message.conn; + if (message.server != null && message.hasOwnProperty("server")) + object.server = message.server; return object; }; /** - * Converts this RowEvent to JSON. + * Converts this Charset to JSON. * @function toJSON - * @memberof binlogdata.RowEvent + * @memberof binlogdata.Charset * @instance * @returns {Object.} JSON object */ - RowEvent.prototype.toJSON = function toJSON() { + Charset.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for RowEvent + * Gets the default type url for Charset * @function getTypeUrl - * @memberof binlogdata.RowEvent + * @memberof binlogdata.Charset * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - RowEvent.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + Charset.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/binlogdata.RowEvent"; + return typeUrlPrefix + "/binlogdata.Charset"; }; - return RowEvent; + return Charset; })(); - binlogdata.FieldEvent = (function() { + binlogdata.BinlogTransaction = (function() { /** - * Properties of a FieldEvent. + * Properties of a BinlogTransaction. * @memberof binlogdata - * @interface IFieldEvent - * @property {string|null} [table_name] FieldEvent table_name - * @property {Array.|null} [fields] FieldEvent fields - * @property {string|null} [keyspace] FieldEvent keyspace - * @property {string|null} [shard] FieldEvent shard + * @interface IBinlogTransaction + * @property {Array.|null} [statements] BinlogTransaction statements + * @property {query.IEventToken|null} [event_token] BinlogTransaction event_token */ /** - * Constructs a new FieldEvent. + * Constructs a new BinlogTransaction. * @memberof binlogdata - * @classdesc Represents a FieldEvent. - * @implements IFieldEvent + * @classdesc Represents a BinlogTransaction. + * @implements IBinlogTransaction * @constructor - * @param {binlogdata.IFieldEvent=} [properties] Properties to set + * @param {binlogdata.IBinlogTransaction=} [properties] Properties to set */ - function FieldEvent(properties) { - this.fields = []; + function BinlogTransaction(properties) { + this.statements = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -65571,120 +66670,92 @@ export const binlogdata = $root.binlogdata = (() => { } /** - * FieldEvent table_name. - * @member {string} table_name - * @memberof binlogdata.FieldEvent - * @instance - */ - FieldEvent.prototype.table_name = ""; - - /** - * FieldEvent fields. - * @member {Array.} fields - * @memberof binlogdata.FieldEvent - * @instance - */ - FieldEvent.prototype.fields = $util.emptyArray; - - /** - * FieldEvent keyspace. - * @member {string} keyspace - * @memberof binlogdata.FieldEvent + * BinlogTransaction statements. + * @member {Array.} statements + * @memberof binlogdata.BinlogTransaction * @instance */ - FieldEvent.prototype.keyspace = ""; + BinlogTransaction.prototype.statements = $util.emptyArray; /** - * FieldEvent shard. - * @member {string} shard - * @memberof binlogdata.FieldEvent + * BinlogTransaction event_token. + * @member {query.IEventToken|null|undefined} event_token + * @memberof binlogdata.BinlogTransaction * @instance */ - FieldEvent.prototype.shard = ""; + BinlogTransaction.prototype.event_token = null; /** - * Creates a new FieldEvent instance using the specified properties. + * Creates a new BinlogTransaction instance using the specified properties. * @function create - * @memberof binlogdata.FieldEvent + * @memberof binlogdata.BinlogTransaction * @static - * @param {binlogdata.IFieldEvent=} [properties] Properties to set - * @returns {binlogdata.FieldEvent} FieldEvent instance + * @param {binlogdata.IBinlogTransaction=} [properties] Properties to set + * @returns {binlogdata.BinlogTransaction} BinlogTransaction instance */ - FieldEvent.create = function create(properties) { - return new FieldEvent(properties); + BinlogTransaction.create = function create(properties) { + return new BinlogTransaction(properties); }; /** - * Encodes the specified FieldEvent message. Does not implicitly {@link binlogdata.FieldEvent.verify|verify} messages. + * Encodes the specified BinlogTransaction message. Does not implicitly {@link binlogdata.BinlogTransaction.verify|verify} messages. * @function encode - * @memberof binlogdata.FieldEvent + * @memberof binlogdata.BinlogTransaction * @static - * @param {binlogdata.IFieldEvent} message FieldEvent message or plain object to encode + * @param {binlogdata.IBinlogTransaction} message BinlogTransaction message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - FieldEvent.encode = function encode(message, writer) { + BinlogTransaction.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.table_name != null && Object.hasOwnProperty.call(message, "table_name")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.table_name); - if (message.fields != null && message.fields.length) - for (let i = 0; i < message.fields.length; ++i) - $root.query.Field.encode(message.fields[i], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.keyspace); - if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - writer.uint32(/* id 4, wireType 2 =*/34).string(message.shard); + if (message.statements != null && message.statements.length) + for (let i = 0; i < message.statements.length; ++i) + $root.binlogdata.BinlogTransaction.Statement.encode(message.statements[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.event_token != null && Object.hasOwnProperty.call(message, "event_token")) + $root.query.EventToken.encode(message.event_token, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); return writer; }; /** - * Encodes the specified FieldEvent message, length delimited. Does not implicitly {@link binlogdata.FieldEvent.verify|verify} messages. + * Encodes the specified BinlogTransaction message, length delimited. Does not implicitly {@link binlogdata.BinlogTransaction.verify|verify} messages. * @function encodeDelimited - * @memberof binlogdata.FieldEvent + * @memberof binlogdata.BinlogTransaction * @static - * @param {binlogdata.IFieldEvent} message FieldEvent message or plain object to encode + * @param {binlogdata.IBinlogTransaction} message BinlogTransaction message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - FieldEvent.encodeDelimited = function encodeDelimited(message, writer) { + BinlogTransaction.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a FieldEvent message from the specified reader or buffer. + * Decodes a BinlogTransaction message from the specified reader or buffer. * @function decode - * @memberof binlogdata.FieldEvent + * @memberof binlogdata.BinlogTransaction * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {binlogdata.FieldEvent} FieldEvent + * @returns {binlogdata.BinlogTransaction} BinlogTransaction * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - FieldEvent.decode = function decode(reader, length) { + BinlogTransaction.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.FieldEvent(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.BinlogTransaction(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.table_name = reader.string(); - break; - } - case 2: { - if (!(message.fields && message.fields.length)) - message.fields = []; - message.fields.push($root.query.Field.decode(reader, reader.uint32())); + if (!(message.statements && message.statements.length)) + message.statements = []; + message.statements.push($root.binlogdata.BinlogTransaction.Statement.decode(reader, reader.uint32())); break; } - case 3: { - message.keyspace = reader.string(); - break; - } - case 4: { - message.shard = reader.string(); + case 4: { + message.event_token = $root.query.EventToken.decode(reader, reader.uint32()); break; } default: @@ -65696,461 +66767,508 @@ export const binlogdata = $root.binlogdata = (() => { }; /** - * Decodes a FieldEvent message from the specified reader or buffer, length delimited. + * Decodes a BinlogTransaction message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof binlogdata.FieldEvent + * @memberof binlogdata.BinlogTransaction * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {binlogdata.FieldEvent} FieldEvent + * @returns {binlogdata.BinlogTransaction} BinlogTransaction * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - FieldEvent.decodeDelimited = function decodeDelimited(reader) { + BinlogTransaction.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a FieldEvent message. + * Verifies a BinlogTransaction message. * @function verify - * @memberof binlogdata.FieldEvent + * @memberof binlogdata.BinlogTransaction * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - FieldEvent.verify = function verify(message) { + BinlogTransaction.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.table_name != null && message.hasOwnProperty("table_name")) - if (!$util.isString(message.table_name)) - return "table_name: string expected"; - if (message.fields != null && message.hasOwnProperty("fields")) { - if (!Array.isArray(message.fields)) - return "fields: array expected"; - for (let i = 0; i < message.fields.length; ++i) { - let error = $root.query.Field.verify(message.fields[i]); + if (message.statements != null && message.hasOwnProperty("statements")) { + if (!Array.isArray(message.statements)) + return "statements: array expected"; + for (let i = 0; i < message.statements.length; ++i) { + let error = $root.binlogdata.BinlogTransaction.Statement.verify(message.statements[i]); if (error) - return "fields." + error; + return "statements." + error; } } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - if (message.shard != null && message.hasOwnProperty("shard")) - if (!$util.isString(message.shard)) - return "shard: string expected"; + if (message.event_token != null && message.hasOwnProperty("event_token")) { + let error = $root.query.EventToken.verify(message.event_token); + if (error) + return "event_token." + error; + } return null; }; /** - * Creates a FieldEvent message from a plain object. Also converts values to their respective internal types. + * Creates a BinlogTransaction message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof binlogdata.FieldEvent + * @memberof binlogdata.BinlogTransaction * @static * @param {Object.} object Plain object - * @returns {binlogdata.FieldEvent} FieldEvent + * @returns {binlogdata.BinlogTransaction} BinlogTransaction */ - FieldEvent.fromObject = function fromObject(object) { - if (object instanceof $root.binlogdata.FieldEvent) + BinlogTransaction.fromObject = function fromObject(object) { + if (object instanceof $root.binlogdata.BinlogTransaction) return object; - let message = new $root.binlogdata.FieldEvent(); - if (object.table_name != null) - message.table_name = String(object.table_name); - if (object.fields) { - if (!Array.isArray(object.fields)) - throw TypeError(".binlogdata.FieldEvent.fields: array expected"); - message.fields = []; - for (let i = 0; i < object.fields.length; ++i) { - if (typeof object.fields[i] !== "object") - throw TypeError(".binlogdata.FieldEvent.fields: object expected"); - message.fields[i] = $root.query.Field.fromObject(object.fields[i]); + let message = new $root.binlogdata.BinlogTransaction(); + if (object.statements) { + if (!Array.isArray(object.statements)) + throw TypeError(".binlogdata.BinlogTransaction.statements: array expected"); + message.statements = []; + for (let i = 0; i < object.statements.length; ++i) { + if (typeof object.statements[i] !== "object") + throw TypeError(".binlogdata.BinlogTransaction.statements: object expected"); + message.statements[i] = $root.binlogdata.BinlogTransaction.Statement.fromObject(object.statements[i]); } } - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - if (object.shard != null) - message.shard = String(object.shard); + if (object.event_token != null) { + if (typeof object.event_token !== "object") + throw TypeError(".binlogdata.BinlogTransaction.event_token: object expected"); + message.event_token = $root.query.EventToken.fromObject(object.event_token); + } return message; }; /** - * Creates a plain object from a FieldEvent message. Also converts values to other types if specified. + * Creates a plain object from a BinlogTransaction message. Also converts values to other types if specified. * @function toObject - * @memberof binlogdata.FieldEvent + * @memberof binlogdata.BinlogTransaction * @static - * @param {binlogdata.FieldEvent} message FieldEvent + * @param {binlogdata.BinlogTransaction} message BinlogTransaction * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - FieldEvent.toObject = function toObject(message, options) { + BinlogTransaction.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.arrays || options.defaults) - object.fields = []; - if (options.defaults) { - object.table_name = ""; - object.keyspace = ""; - object.shard = ""; - } - if (message.table_name != null && message.hasOwnProperty("table_name")) - object.table_name = message.table_name; - if (message.fields && message.fields.length) { - object.fields = []; - for (let j = 0; j < message.fields.length; ++j) - object.fields[j] = $root.query.Field.toObject(message.fields[j], options); + object.statements = []; + if (options.defaults) + object.event_token = null; + if (message.statements && message.statements.length) { + object.statements = []; + for (let j = 0; j < message.statements.length; ++j) + object.statements[j] = $root.binlogdata.BinlogTransaction.Statement.toObject(message.statements[j], options); } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.shard != null && message.hasOwnProperty("shard")) - object.shard = message.shard; + if (message.event_token != null && message.hasOwnProperty("event_token")) + object.event_token = $root.query.EventToken.toObject(message.event_token, options); return object; }; /** - * Converts this FieldEvent to JSON. + * Converts this BinlogTransaction to JSON. * @function toJSON - * @memberof binlogdata.FieldEvent + * @memberof binlogdata.BinlogTransaction * @instance * @returns {Object.} JSON object */ - FieldEvent.prototype.toJSON = function toJSON() { + BinlogTransaction.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for FieldEvent + * Gets the default type url for BinlogTransaction * @function getTypeUrl - * @memberof binlogdata.FieldEvent + * @memberof binlogdata.BinlogTransaction * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - FieldEvent.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + BinlogTransaction.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/binlogdata.FieldEvent"; + return typeUrlPrefix + "/binlogdata.BinlogTransaction"; }; - return FieldEvent; - })(); - - binlogdata.ShardGtid = (function() { - - /** - * Properties of a ShardGtid. - * @memberof binlogdata - * @interface IShardGtid - * @property {string|null} [keyspace] ShardGtid keyspace - * @property {string|null} [shard] ShardGtid shard - * @property {string|null} [gtid] ShardGtid gtid - * @property {Array.|null} [table_p_ks] ShardGtid table_p_ks - */ + BinlogTransaction.Statement = (function() { - /** - * Constructs a new ShardGtid. - * @memberof binlogdata - * @classdesc Represents a ShardGtid. - * @implements IShardGtid - * @constructor - * @param {binlogdata.IShardGtid=} [properties] Properties to set - */ - function ShardGtid(properties) { - this.table_p_ks = []; - if (properties) - for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; - } + /** + * Properties of a Statement. + * @memberof binlogdata.BinlogTransaction + * @interface IStatement + * @property {binlogdata.BinlogTransaction.Statement.Category|null} [category] Statement category + * @property {binlogdata.ICharset|null} [charset] Statement charset + * @property {Uint8Array|null} [sql] Statement sql + */ - /** - * ShardGtid keyspace. - * @member {string} keyspace - * @memberof binlogdata.ShardGtid - * @instance - */ - ShardGtid.prototype.keyspace = ""; + /** + * Constructs a new Statement. + * @memberof binlogdata.BinlogTransaction + * @classdesc Represents a Statement. + * @implements IStatement + * @constructor + * @param {binlogdata.BinlogTransaction.IStatement=} [properties] Properties to set + */ + function Statement(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } - /** - * ShardGtid shard. - * @member {string} shard - * @memberof binlogdata.ShardGtid - * @instance - */ - ShardGtid.prototype.shard = ""; + /** + * Statement category. + * @member {binlogdata.BinlogTransaction.Statement.Category} category + * @memberof binlogdata.BinlogTransaction.Statement + * @instance + */ + Statement.prototype.category = 0; - /** - * ShardGtid gtid. - * @member {string} gtid - * @memberof binlogdata.ShardGtid - * @instance - */ - ShardGtid.prototype.gtid = ""; + /** + * Statement charset. + * @member {binlogdata.ICharset|null|undefined} charset + * @memberof binlogdata.BinlogTransaction.Statement + * @instance + */ + Statement.prototype.charset = null; - /** - * ShardGtid table_p_ks. - * @member {Array.} table_p_ks - * @memberof binlogdata.ShardGtid - * @instance - */ - ShardGtid.prototype.table_p_ks = $util.emptyArray; + /** + * Statement sql. + * @member {Uint8Array} sql + * @memberof binlogdata.BinlogTransaction.Statement + * @instance + */ + Statement.prototype.sql = $util.newBuffer([]); - /** - * Creates a new ShardGtid instance using the specified properties. - * @function create - * @memberof binlogdata.ShardGtid - * @static - * @param {binlogdata.IShardGtid=} [properties] Properties to set - * @returns {binlogdata.ShardGtid} ShardGtid instance - */ - ShardGtid.create = function create(properties) { - return new ShardGtid(properties); - }; + /** + * Creates a new Statement instance using the specified properties. + * @function create + * @memberof binlogdata.BinlogTransaction.Statement + * @static + * @param {binlogdata.BinlogTransaction.IStatement=} [properties] Properties to set + * @returns {binlogdata.BinlogTransaction.Statement} Statement instance + */ + Statement.create = function create(properties) { + return new Statement(properties); + }; - /** - * Encodes the specified ShardGtid message. Does not implicitly {@link binlogdata.ShardGtid.verify|verify} messages. - * @function encode - * @memberof binlogdata.ShardGtid - * @static - * @param {binlogdata.IShardGtid} message ShardGtid message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - ShardGtid.encode = function encode(message, writer) { - if (!writer) - writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); - if (message.gtid != null && Object.hasOwnProperty.call(message, "gtid")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.gtid); - if (message.table_p_ks != null && message.table_p_ks.length) - for (let i = 0; i < message.table_p_ks.length; ++i) - $root.binlogdata.TableLastPK.encode(message.table_p_ks[i], writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); - return writer; - }; + /** + * Encodes the specified Statement message. Does not implicitly {@link binlogdata.BinlogTransaction.Statement.verify|verify} messages. + * @function encode + * @memberof binlogdata.BinlogTransaction.Statement + * @static + * @param {binlogdata.BinlogTransaction.IStatement} message Statement message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Statement.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.category != null && Object.hasOwnProperty.call(message, "category")) + writer.uint32(/* id 1, wireType 0 =*/8).int32(message.category); + if (message.charset != null && Object.hasOwnProperty.call(message, "charset")) + $root.binlogdata.Charset.encode(message.charset, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.sql != null && Object.hasOwnProperty.call(message, "sql")) + writer.uint32(/* id 3, wireType 2 =*/26).bytes(message.sql); + return writer; + }; - /** - * Encodes the specified ShardGtid message, length delimited. Does not implicitly {@link binlogdata.ShardGtid.verify|verify} messages. - * @function encodeDelimited - * @memberof binlogdata.ShardGtid - * @static - * @param {binlogdata.IShardGtid} message ShardGtid message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - ShardGtid.encodeDelimited = function encodeDelimited(message, writer) { - return this.encode(message, writer).ldelim(); - }; + /** + * Encodes the specified Statement message, length delimited. Does not implicitly {@link binlogdata.BinlogTransaction.Statement.verify|verify} messages. + * @function encodeDelimited + * @memberof binlogdata.BinlogTransaction.Statement + * @static + * @param {binlogdata.BinlogTransaction.IStatement} message Statement message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Statement.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; - /** - * Decodes a ShardGtid message from the specified reader or buffer. - * @function decode - * @memberof binlogdata.ShardGtid - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @param {number} [length] Message length if known beforehand - * @returns {binlogdata.ShardGtid} ShardGtid - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - ShardGtid.decode = function decode(reader, length) { - if (!(reader instanceof $Reader)) - reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.ShardGtid(); - while (reader.pos < end) { - let tag = reader.uint32(); - switch (tag >>> 3) { - case 1: { - message.keyspace = reader.string(); - break; - } - case 2: { - message.shard = reader.string(); + /** + * Decodes a Statement message from the specified reader or buffer. + * @function decode + * @memberof binlogdata.BinlogTransaction.Statement + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {binlogdata.BinlogTransaction.Statement} Statement + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Statement.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.BinlogTransaction.Statement(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.category = reader.int32(); + break; + } + case 2: { + message.charset = $root.binlogdata.Charset.decode(reader, reader.uint32()); + break; + } + case 3: { + message.sql = reader.bytes(); + break; + } + default: + reader.skipType(tag & 7); break; } - case 3: { - message.gtid = reader.string(); + } + return message; + }; + + /** + * Decodes a Statement message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof binlogdata.BinlogTransaction.Statement + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {binlogdata.BinlogTransaction.Statement} Statement + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Statement.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a Statement message. + * @function verify + * @memberof binlogdata.BinlogTransaction.Statement + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + Statement.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.category != null && message.hasOwnProperty("category")) + switch (message.category) { + default: + return "category: enum value expected"; + case 0: + case 1: + case 2: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + case 9: break; } - case 4: { - if (!(message.table_p_ks && message.table_p_ks.length)) - message.table_p_ks = []; - message.table_p_ks.push($root.binlogdata.TableLastPK.decode(reader, reader.uint32())); + if (message.charset != null && message.hasOwnProperty("charset")) { + let error = $root.binlogdata.Charset.verify(message.charset); + if (error) + return "charset." + error; + } + if (message.sql != null && message.hasOwnProperty("sql")) + if (!(message.sql && typeof message.sql.length === "number" || $util.isString(message.sql))) + return "sql: buffer expected"; + return null; + }; + + /** + * Creates a Statement message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof binlogdata.BinlogTransaction.Statement + * @static + * @param {Object.} object Plain object + * @returns {binlogdata.BinlogTransaction.Statement} Statement + */ + Statement.fromObject = function fromObject(object) { + if (object instanceof $root.binlogdata.BinlogTransaction.Statement) + return object; + let message = new $root.binlogdata.BinlogTransaction.Statement(); + switch (object.category) { + default: + if (typeof object.category === "number") { + message.category = object.category; break; } - default: - reader.skipType(tag & 7); + break; + case "BL_UNRECOGNIZED": + case 0: + message.category = 0; + break; + case "BL_BEGIN": + case 1: + message.category = 1; + break; + case "BL_COMMIT": + case 2: + message.category = 2; + break; + case "BL_ROLLBACK": + case 3: + message.category = 3; + break; + case "BL_DML_DEPRECATED": + case 4: + message.category = 4; + break; + case "BL_DDL": + case 5: + message.category = 5; + break; + case "BL_SET": + case 6: + message.category = 6; + break; + case "BL_INSERT": + case 7: + message.category = 7; + break; + case "BL_UPDATE": + case 8: + message.category = 8; + break; + case "BL_DELETE": + case 9: + message.category = 9; break; } - } - return message; - }; + if (object.charset != null) { + if (typeof object.charset !== "object") + throw TypeError(".binlogdata.BinlogTransaction.Statement.charset: object expected"); + message.charset = $root.binlogdata.Charset.fromObject(object.charset); + } + if (object.sql != null) + if (typeof object.sql === "string") + $util.base64.decode(object.sql, message.sql = $util.newBuffer($util.base64.length(object.sql)), 0); + else if (object.sql.length >= 0) + message.sql = object.sql; + return message; + }; - /** - * Decodes a ShardGtid message from the specified reader or buffer, length delimited. - * @function decodeDelimited - * @memberof binlogdata.ShardGtid - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {binlogdata.ShardGtid} ShardGtid - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - ShardGtid.decodeDelimited = function decodeDelimited(reader) { - if (!(reader instanceof $Reader)) - reader = new $Reader(reader); - return this.decode(reader, reader.uint32()); - }; - - /** - * Verifies a ShardGtid message. - * @function verify - * @memberof binlogdata.ShardGtid - * @static - * @param {Object.} message Plain object to verify - * @returns {string|null} `null` if valid, otherwise the reason why it is not - */ - ShardGtid.verify = function verify(message) { - if (typeof message !== "object" || message === null) - return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - if (message.shard != null && message.hasOwnProperty("shard")) - if (!$util.isString(message.shard)) - return "shard: string expected"; - if (message.gtid != null && message.hasOwnProperty("gtid")) - if (!$util.isString(message.gtid)) - return "gtid: string expected"; - if (message.table_p_ks != null && message.hasOwnProperty("table_p_ks")) { - if (!Array.isArray(message.table_p_ks)) - return "table_p_ks: array expected"; - for (let i = 0; i < message.table_p_ks.length; ++i) { - let error = $root.binlogdata.TableLastPK.verify(message.table_p_ks[i]); - if (error) - return "table_p_ks." + error; + /** + * Creates a plain object from a Statement message. Also converts values to other types if specified. + * @function toObject + * @memberof binlogdata.BinlogTransaction.Statement + * @static + * @param {binlogdata.BinlogTransaction.Statement} message Statement + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + Statement.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.category = options.enums === String ? "BL_UNRECOGNIZED" : 0; + object.charset = null; + if (options.bytes === String) + object.sql = ""; + else { + object.sql = []; + if (options.bytes !== Array) + object.sql = $util.newBuffer(object.sql); + } } - } - return null; - }; - - /** - * Creates a ShardGtid message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof binlogdata.ShardGtid - * @static - * @param {Object.} object Plain object - * @returns {binlogdata.ShardGtid} ShardGtid - */ - ShardGtid.fromObject = function fromObject(object) { - if (object instanceof $root.binlogdata.ShardGtid) + if (message.category != null && message.hasOwnProperty("category")) + object.category = options.enums === String ? $root.binlogdata.BinlogTransaction.Statement.Category[message.category] === undefined ? message.category : $root.binlogdata.BinlogTransaction.Statement.Category[message.category] : message.category; + if (message.charset != null && message.hasOwnProperty("charset")) + object.charset = $root.binlogdata.Charset.toObject(message.charset, options); + if (message.sql != null && message.hasOwnProperty("sql")) + object.sql = options.bytes === String ? $util.base64.encode(message.sql, 0, message.sql.length) : options.bytes === Array ? Array.prototype.slice.call(message.sql) : message.sql; return object; - let message = new $root.binlogdata.ShardGtid(); - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - if (object.shard != null) - message.shard = String(object.shard); - if (object.gtid != null) - message.gtid = String(object.gtid); - if (object.table_p_ks) { - if (!Array.isArray(object.table_p_ks)) - throw TypeError(".binlogdata.ShardGtid.table_p_ks: array expected"); - message.table_p_ks = []; - for (let i = 0; i < object.table_p_ks.length; ++i) { - if (typeof object.table_p_ks[i] !== "object") - throw TypeError(".binlogdata.ShardGtid.table_p_ks: object expected"); - message.table_p_ks[i] = $root.binlogdata.TableLastPK.fromObject(object.table_p_ks[i]); - } - } - return message; - }; + }; - /** - * Creates a plain object from a ShardGtid message. Also converts values to other types if specified. - * @function toObject - * @memberof binlogdata.ShardGtid - * @static - * @param {binlogdata.ShardGtid} message ShardGtid - * @param {$protobuf.IConversionOptions} [options] Conversion options - * @returns {Object.} Plain object - */ - ShardGtid.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.arrays || options.defaults) - object.table_p_ks = []; - if (options.defaults) { - object.keyspace = ""; - object.shard = ""; - object.gtid = ""; - } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.shard != null && message.hasOwnProperty("shard")) - object.shard = message.shard; - if (message.gtid != null && message.hasOwnProperty("gtid")) - object.gtid = message.gtid; - if (message.table_p_ks && message.table_p_ks.length) { - object.table_p_ks = []; - for (let j = 0; j < message.table_p_ks.length; ++j) - object.table_p_ks[j] = $root.binlogdata.TableLastPK.toObject(message.table_p_ks[j], options); - } - return object; - }; + /** + * Converts this Statement to JSON. + * @function toJSON + * @memberof binlogdata.BinlogTransaction.Statement + * @instance + * @returns {Object.} JSON object + */ + Statement.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; - /** - * Converts this ShardGtid to JSON. - * @function toJSON - * @memberof binlogdata.ShardGtid - * @instance - * @returns {Object.} JSON object - */ - ShardGtid.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; + /** + * Gets the default type url for Statement + * @function getTypeUrl + * @memberof binlogdata.BinlogTransaction.Statement + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + Statement.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/binlogdata.BinlogTransaction.Statement"; + }; - /** - * Gets the default type url for ShardGtid - * @function getTypeUrl - * @memberof binlogdata.ShardGtid - * @static - * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns {string} The default type url - */ - ShardGtid.getTypeUrl = function getTypeUrl(typeUrlPrefix) { - if (typeUrlPrefix === undefined) { - typeUrlPrefix = "type.googleapis.com"; - } - return typeUrlPrefix + "/binlogdata.ShardGtid"; - }; + /** + * Category enum. + * @name binlogdata.BinlogTransaction.Statement.Category + * @enum {number} + * @property {number} BL_UNRECOGNIZED=0 BL_UNRECOGNIZED value + * @property {number} BL_BEGIN=1 BL_BEGIN value + * @property {number} BL_COMMIT=2 BL_COMMIT value + * @property {number} BL_ROLLBACK=3 BL_ROLLBACK value + * @property {number} BL_DML_DEPRECATED=4 BL_DML_DEPRECATED value + * @property {number} BL_DDL=5 BL_DDL value + * @property {number} BL_SET=6 BL_SET value + * @property {number} BL_INSERT=7 BL_INSERT value + * @property {number} BL_UPDATE=8 BL_UPDATE value + * @property {number} BL_DELETE=9 BL_DELETE value + */ + Statement.Category = (function() { + const valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "BL_UNRECOGNIZED"] = 0; + values[valuesById[1] = "BL_BEGIN"] = 1; + values[valuesById[2] = "BL_COMMIT"] = 2; + values[valuesById[3] = "BL_ROLLBACK"] = 3; + values[valuesById[4] = "BL_DML_DEPRECATED"] = 4; + values[valuesById[5] = "BL_DDL"] = 5; + values[valuesById[6] = "BL_SET"] = 6; + values[valuesById[7] = "BL_INSERT"] = 7; + values[valuesById[8] = "BL_UPDATE"] = 8; + values[valuesById[9] = "BL_DELETE"] = 9; + return values; + })(); - return ShardGtid; + return Statement; + })(); + + return BinlogTransaction; })(); - binlogdata.VGtid = (function() { + binlogdata.StreamKeyRangeRequest = (function() { /** - * Properties of a VGtid. + * Properties of a StreamKeyRangeRequest. * @memberof binlogdata - * @interface IVGtid - * @property {Array.|null} [shard_gtids] VGtid shard_gtids + * @interface IStreamKeyRangeRequest + * @property {string|null} [position] StreamKeyRangeRequest position + * @property {topodata.IKeyRange|null} [key_range] StreamKeyRangeRequest key_range + * @property {binlogdata.ICharset|null} [charset] StreamKeyRangeRequest charset */ /** - * Constructs a new VGtid. + * Constructs a new StreamKeyRangeRequest. * @memberof binlogdata - * @classdesc Represents a VGtid. - * @implements IVGtid + * @classdesc Represents a StreamKeyRangeRequest. + * @implements IStreamKeyRangeRequest * @constructor - * @param {binlogdata.IVGtid=} [properties] Properties to set + * @param {binlogdata.IStreamKeyRangeRequest=} [properties] Properties to set */ - function VGtid(properties) { - this.shard_gtids = []; + function StreamKeyRangeRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -66158,78 +67276,103 @@ export const binlogdata = $root.binlogdata = (() => { } /** - * VGtid shard_gtids. - * @member {Array.} shard_gtids - * @memberof binlogdata.VGtid + * StreamKeyRangeRequest position. + * @member {string} position + * @memberof binlogdata.StreamKeyRangeRequest * @instance */ - VGtid.prototype.shard_gtids = $util.emptyArray; + StreamKeyRangeRequest.prototype.position = ""; /** - * Creates a new VGtid instance using the specified properties. + * StreamKeyRangeRequest key_range. + * @member {topodata.IKeyRange|null|undefined} key_range + * @memberof binlogdata.StreamKeyRangeRequest + * @instance + */ + StreamKeyRangeRequest.prototype.key_range = null; + + /** + * StreamKeyRangeRequest charset. + * @member {binlogdata.ICharset|null|undefined} charset + * @memberof binlogdata.StreamKeyRangeRequest + * @instance + */ + StreamKeyRangeRequest.prototype.charset = null; + + /** + * Creates a new StreamKeyRangeRequest instance using the specified properties. * @function create - * @memberof binlogdata.VGtid + * @memberof binlogdata.StreamKeyRangeRequest * @static - * @param {binlogdata.IVGtid=} [properties] Properties to set - * @returns {binlogdata.VGtid} VGtid instance + * @param {binlogdata.IStreamKeyRangeRequest=} [properties] Properties to set + * @returns {binlogdata.StreamKeyRangeRequest} StreamKeyRangeRequest instance */ - VGtid.create = function create(properties) { - return new VGtid(properties); + StreamKeyRangeRequest.create = function create(properties) { + return new StreamKeyRangeRequest(properties); }; /** - * Encodes the specified VGtid message. Does not implicitly {@link binlogdata.VGtid.verify|verify} messages. + * Encodes the specified StreamKeyRangeRequest message. Does not implicitly {@link binlogdata.StreamKeyRangeRequest.verify|verify} messages. * @function encode - * @memberof binlogdata.VGtid + * @memberof binlogdata.StreamKeyRangeRequest * @static - * @param {binlogdata.IVGtid} message VGtid message or plain object to encode + * @param {binlogdata.IStreamKeyRangeRequest} message StreamKeyRangeRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - VGtid.encode = function encode(message, writer) { + StreamKeyRangeRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.shard_gtids != null && message.shard_gtids.length) - for (let i = 0; i < message.shard_gtids.length; ++i) - $root.binlogdata.ShardGtid.encode(message.shard_gtids[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.position != null && Object.hasOwnProperty.call(message, "position")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.position); + if (message.key_range != null && Object.hasOwnProperty.call(message, "key_range")) + $root.topodata.KeyRange.encode(message.key_range, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.charset != null && Object.hasOwnProperty.call(message, "charset")) + $root.binlogdata.Charset.encode(message.charset, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); return writer; }; /** - * Encodes the specified VGtid message, length delimited. Does not implicitly {@link binlogdata.VGtid.verify|verify} messages. + * Encodes the specified StreamKeyRangeRequest message, length delimited. Does not implicitly {@link binlogdata.StreamKeyRangeRequest.verify|verify} messages. * @function encodeDelimited - * @memberof binlogdata.VGtid + * @memberof binlogdata.StreamKeyRangeRequest * @static - * @param {binlogdata.IVGtid} message VGtid message or plain object to encode + * @param {binlogdata.IStreamKeyRangeRequest} message StreamKeyRangeRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - VGtid.encodeDelimited = function encodeDelimited(message, writer) { + StreamKeyRangeRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a VGtid message from the specified reader or buffer. + * Decodes a StreamKeyRangeRequest message from the specified reader or buffer. * @function decode - * @memberof binlogdata.VGtid + * @memberof binlogdata.StreamKeyRangeRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {binlogdata.VGtid} VGtid + * @returns {binlogdata.StreamKeyRangeRequest} StreamKeyRangeRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - VGtid.decode = function decode(reader, length) { + StreamKeyRangeRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.VGtid(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.StreamKeyRangeRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (!(message.shard_gtids && message.shard_gtids.length)) - message.shard_gtids = []; - message.shard_gtids.push($root.binlogdata.ShardGtid.decode(reader, reader.uint32())); + message.position = reader.string(); + break; + } + case 2: { + message.key_range = $root.topodata.KeyRange.decode(reader, reader.uint32()); + break; + } + case 3: { + message.charset = $root.binlogdata.Charset.decode(reader, reader.uint32()); break; } default: @@ -66241,140 +67384,149 @@ export const binlogdata = $root.binlogdata = (() => { }; /** - * Decodes a VGtid message from the specified reader or buffer, length delimited. + * Decodes a StreamKeyRangeRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof binlogdata.VGtid + * @memberof binlogdata.StreamKeyRangeRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {binlogdata.VGtid} VGtid + * @returns {binlogdata.StreamKeyRangeRequest} StreamKeyRangeRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - VGtid.decodeDelimited = function decodeDelimited(reader) { + StreamKeyRangeRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a VGtid message. + * Verifies a StreamKeyRangeRequest message. * @function verify - * @memberof binlogdata.VGtid + * @memberof binlogdata.StreamKeyRangeRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - VGtid.verify = function verify(message) { + StreamKeyRangeRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.shard_gtids != null && message.hasOwnProperty("shard_gtids")) { - if (!Array.isArray(message.shard_gtids)) - return "shard_gtids: array expected"; - for (let i = 0; i < message.shard_gtids.length; ++i) { - let error = $root.binlogdata.ShardGtid.verify(message.shard_gtids[i]); - if (error) - return "shard_gtids." + error; - } + if (message.position != null && message.hasOwnProperty("position")) + if (!$util.isString(message.position)) + return "position: string expected"; + if (message.key_range != null && message.hasOwnProperty("key_range")) { + let error = $root.topodata.KeyRange.verify(message.key_range); + if (error) + return "key_range." + error; + } + if (message.charset != null && message.hasOwnProperty("charset")) { + let error = $root.binlogdata.Charset.verify(message.charset); + if (error) + return "charset." + error; } return null; }; /** - * Creates a VGtid message from a plain object. Also converts values to their respective internal types. + * Creates a StreamKeyRangeRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof binlogdata.VGtid + * @memberof binlogdata.StreamKeyRangeRequest * @static * @param {Object.} object Plain object - * @returns {binlogdata.VGtid} VGtid + * @returns {binlogdata.StreamKeyRangeRequest} StreamKeyRangeRequest */ - VGtid.fromObject = function fromObject(object) { - if (object instanceof $root.binlogdata.VGtid) + StreamKeyRangeRequest.fromObject = function fromObject(object) { + if (object instanceof $root.binlogdata.StreamKeyRangeRequest) return object; - let message = new $root.binlogdata.VGtid(); - if (object.shard_gtids) { - if (!Array.isArray(object.shard_gtids)) - throw TypeError(".binlogdata.VGtid.shard_gtids: array expected"); - message.shard_gtids = []; - for (let i = 0; i < object.shard_gtids.length; ++i) { - if (typeof object.shard_gtids[i] !== "object") - throw TypeError(".binlogdata.VGtid.shard_gtids: object expected"); - message.shard_gtids[i] = $root.binlogdata.ShardGtid.fromObject(object.shard_gtids[i]); - } + let message = new $root.binlogdata.StreamKeyRangeRequest(); + if (object.position != null) + message.position = String(object.position); + if (object.key_range != null) { + if (typeof object.key_range !== "object") + throw TypeError(".binlogdata.StreamKeyRangeRequest.key_range: object expected"); + message.key_range = $root.topodata.KeyRange.fromObject(object.key_range); + } + if (object.charset != null) { + if (typeof object.charset !== "object") + throw TypeError(".binlogdata.StreamKeyRangeRequest.charset: object expected"); + message.charset = $root.binlogdata.Charset.fromObject(object.charset); } return message; }; /** - * Creates a plain object from a VGtid message. Also converts values to other types if specified. + * Creates a plain object from a StreamKeyRangeRequest message. Also converts values to other types if specified. * @function toObject - * @memberof binlogdata.VGtid + * @memberof binlogdata.StreamKeyRangeRequest * @static - * @param {binlogdata.VGtid} message VGtid + * @param {binlogdata.StreamKeyRangeRequest} message StreamKeyRangeRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - VGtid.toObject = function toObject(message, options) { + StreamKeyRangeRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.shard_gtids = []; - if (message.shard_gtids && message.shard_gtids.length) { - object.shard_gtids = []; - for (let j = 0; j < message.shard_gtids.length; ++j) - object.shard_gtids[j] = $root.binlogdata.ShardGtid.toObject(message.shard_gtids[j], options); + if (options.defaults) { + object.position = ""; + object.key_range = null; + object.charset = null; } + if (message.position != null && message.hasOwnProperty("position")) + object.position = message.position; + if (message.key_range != null && message.hasOwnProperty("key_range")) + object.key_range = $root.topodata.KeyRange.toObject(message.key_range, options); + if (message.charset != null && message.hasOwnProperty("charset")) + object.charset = $root.binlogdata.Charset.toObject(message.charset, options); return object; }; /** - * Converts this VGtid to JSON. + * Converts this StreamKeyRangeRequest to JSON. * @function toJSON - * @memberof binlogdata.VGtid + * @memberof binlogdata.StreamKeyRangeRequest * @instance * @returns {Object.} JSON object */ - VGtid.prototype.toJSON = function toJSON() { + StreamKeyRangeRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for VGtid + * Gets the default type url for StreamKeyRangeRequest * @function getTypeUrl - * @memberof binlogdata.VGtid + * @memberof binlogdata.StreamKeyRangeRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - VGtid.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + StreamKeyRangeRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/binlogdata.VGtid"; + return typeUrlPrefix + "/binlogdata.StreamKeyRangeRequest"; }; - return VGtid; + return StreamKeyRangeRequest; })(); - binlogdata.KeyspaceShard = (function() { + binlogdata.StreamKeyRangeResponse = (function() { /** - * Properties of a KeyspaceShard. + * Properties of a StreamKeyRangeResponse. * @memberof binlogdata - * @interface IKeyspaceShard - * @property {string|null} [keyspace] KeyspaceShard keyspace - * @property {string|null} [shard] KeyspaceShard shard + * @interface IStreamKeyRangeResponse + * @property {binlogdata.IBinlogTransaction|null} [binlog_transaction] StreamKeyRangeResponse binlog_transaction */ /** - * Constructs a new KeyspaceShard. + * Constructs a new StreamKeyRangeResponse. * @memberof binlogdata - * @classdesc Represents a KeyspaceShard. - * @implements IKeyspaceShard + * @classdesc Represents a StreamKeyRangeResponse. + * @implements IStreamKeyRangeResponse * @constructor - * @param {binlogdata.IKeyspaceShard=} [properties] Properties to set + * @param {binlogdata.IStreamKeyRangeResponse=} [properties] Properties to set */ - function KeyspaceShard(properties) { + function StreamKeyRangeResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -66382,89 +67534,75 @@ export const binlogdata = $root.binlogdata = (() => { } /** - * KeyspaceShard keyspace. - * @member {string} keyspace - * @memberof binlogdata.KeyspaceShard - * @instance - */ - KeyspaceShard.prototype.keyspace = ""; - - /** - * KeyspaceShard shard. - * @member {string} shard - * @memberof binlogdata.KeyspaceShard + * StreamKeyRangeResponse binlog_transaction. + * @member {binlogdata.IBinlogTransaction|null|undefined} binlog_transaction + * @memberof binlogdata.StreamKeyRangeResponse * @instance */ - KeyspaceShard.prototype.shard = ""; + StreamKeyRangeResponse.prototype.binlog_transaction = null; /** - * Creates a new KeyspaceShard instance using the specified properties. + * Creates a new StreamKeyRangeResponse instance using the specified properties. * @function create - * @memberof binlogdata.KeyspaceShard + * @memberof binlogdata.StreamKeyRangeResponse * @static - * @param {binlogdata.IKeyspaceShard=} [properties] Properties to set - * @returns {binlogdata.KeyspaceShard} KeyspaceShard instance + * @param {binlogdata.IStreamKeyRangeResponse=} [properties] Properties to set + * @returns {binlogdata.StreamKeyRangeResponse} StreamKeyRangeResponse instance */ - KeyspaceShard.create = function create(properties) { - return new KeyspaceShard(properties); + StreamKeyRangeResponse.create = function create(properties) { + return new StreamKeyRangeResponse(properties); }; /** - * Encodes the specified KeyspaceShard message. Does not implicitly {@link binlogdata.KeyspaceShard.verify|verify} messages. + * Encodes the specified StreamKeyRangeResponse message. Does not implicitly {@link binlogdata.StreamKeyRangeResponse.verify|verify} messages. * @function encode - * @memberof binlogdata.KeyspaceShard + * @memberof binlogdata.StreamKeyRangeResponse * @static - * @param {binlogdata.IKeyspaceShard} message KeyspaceShard message or plain object to encode + * @param {binlogdata.IStreamKeyRangeResponse} message StreamKeyRangeResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - KeyspaceShard.encode = function encode(message, writer) { + StreamKeyRangeResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); + if (message.binlog_transaction != null && Object.hasOwnProperty.call(message, "binlog_transaction")) + $root.binlogdata.BinlogTransaction.encode(message.binlog_transaction, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified KeyspaceShard message, length delimited. Does not implicitly {@link binlogdata.KeyspaceShard.verify|verify} messages. + * Encodes the specified StreamKeyRangeResponse message, length delimited. Does not implicitly {@link binlogdata.StreamKeyRangeResponse.verify|verify} messages. * @function encodeDelimited - * @memberof binlogdata.KeyspaceShard + * @memberof binlogdata.StreamKeyRangeResponse * @static - * @param {binlogdata.IKeyspaceShard} message KeyspaceShard message or plain object to encode + * @param {binlogdata.IStreamKeyRangeResponse} message StreamKeyRangeResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - KeyspaceShard.encodeDelimited = function encodeDelimited(message, writer) { + StreamKeyRangeResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a KeyspaceShard message from the specified reader or buffer. + * Decodes a StreamKeyRangeResponse message from the specified reader or buffer. * @function decode - * @memberof binlogdata.KeyspaceShard + * @memberof binlogdata.StreamKeyRangeResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {binlogdata.KeyspaceShard} KeyspaceShard + * @returns {binlogdata.StreamKeyRangeResponse} StreamKeyRangeResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - KeyspaceShard.decode = function decode(reader, length) { + StreamKeyRangeResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.KeyspaceShard(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.StreamKeyRangeResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.keyspace = reader.string(); - break; - } - case 2: { - message.shard = reader.string(); + message.binlog_transaction = $root.binlogdata.BinlogTransaction.decode(reader, reader.uint32()); break; } default: @@ -66476,155 +67614,130 @@ export const binlogdata = $root.binlogdata = (() => { }; /** - * Decodes a KeyspaceShard message from the specified reader or buffer, length delimited. + * Decodes a StreamKeyRangeResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof binlogdata.KeyspaceShard + * @memberof binlogdata.StreamKeyRangeResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {binlogdata.KeyspaceShard} KeyspaceShard + * @returns {binlogdata.StreamKeyRangeResponse} StreamKeyRangeResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - KeyspaceShard.decodeDelimited = function decodeDelimited(reader) { + StreamKeyRangeResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a KeyspaceShard message. + * Verifies a StreamKeyRangeResponse message. * @function verify - * @memberof binlogdata.KeyspaceShard + * @memberof binlogdata.StreamKeyRangeResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - KeyspaceShard.verify = function verify(message) { + StreamKeyRangeResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - if (message.shard != null && message.hasOwnProperty("shard")) - if (!$util.isString(message.shard)) - return "shard: string expected"; + if (message.binlog_transaction != null && message.hasOwnProperty("binlog_transaction")) { + let error = $root.binlogdata.BinlogTransaction.verify(message.binlog_transaction); + if (error) + return "binlog_transaction." + error; + } return null; }; /** - * Creates a KeyspaceShard message from a plain object. Also converts values to their respective internal types. + * Creates a StreamKeyRangeResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof binlogdata.KeyspaceShard + * @memberof binlogdata.StreamKeyRangeResponse * @static * @param {Object.} object Plain object - * @returns {binlogdata.KeyspaceShard} KeyspaceShard + * @returns {binlogdata.StreamKeyRangeResponse} StreamKeyRangeResponse */ - KeyspaceShard.fromObject = function fromObject(object) { - if (object instanceof $root.binlogdata.KeyspaceShard) + StreamKeyRangeResponse.fromObject = function fromObject(object) { + if (object instanceof $root.binlogdata.StreamKeyRangeResponse) return object; - let message = new $root.binlogdata.KeyspaceShard(); - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - if (object.shard != null) - message.shard = String(object.shard); + let message = new $root.binlogdata.StreamKeyRangeResponse(); + if (object.binlog_transaction != null) { + if (typeof object.binlog_transaction !== "object") + throw TypeError(".binlogdata.StreamKeyRangeResponse.binlog_transaction: object expected"); + message.binlog_transaction = $root.binlogdata.BinlogTransaction.fromObject(object.binlog_transaction); + } return message; }; /** - * Creates a plain object from a KeyspaceShard message. Also converts values to other types if specified. + * Creates a plain object from a StreamKeyRangeResponse message. Also converts values to other types if specified. * @function toObject - * @memberof binlogdata.KeyspaceShard + * @memberof binlogdata.StreamKeyRangeResponse * @static - * @param {binlogdata.KeyspaceShard} message KeyspaceShard + * @param {binlogdata.StreamKeyRangeResponse} message StreamKeyRangeResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - KeyspaceShard.toObject = function toObject(message, options) { + StreamKeyRangeResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) { - object.keyspace = ""; - object.shard = ""; - } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.shard != null && message.hasOwnProperty("shard")) - object.shard = message.shard; + if (options.defaults) + object.binlog_transaction = null; + if (message.binlog_transaction != null && message.hasOwnProperty("binlog_transaction")) + object.binlog_transaction = $root.binlogdata.BinlogTransaction.toObject(message.binlog_transaction, options); return object; }; /** - * Converts this KeyspaceShard to JSON. + * Converts this StreamKeyRangeResponse to JSON. * @function toJSON - * @memberof binlogdata.KeyspaceShard + * @memberof binlogdata.StreamKeyRangeResponse * @instance * @returns {Object.} JSON object */ - KeyspaceShard.prototype.toJSON = function toJSON() { + StreamKeyRangeResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for KeyspaceShard + * Gets the default type url for StreamKeyRangeResponse * @function getTypeUrl - * @memberof binlogdata.KeyspaceShard + * @memberof binlogdata.StreamKeyRangeResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - KeyspaceShard.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + StreamKeyRangeResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/binlogdata.KeyspaceShard"; + return typeUrlPrefix + "/binlogdata.StreamKeyRangeResponse"; }; - return KeyspaceShard; - })(); - - /** - * MigrationType enum. - * @name binlogdata.MigrationType - * @enum {number} - * @property {number} TABLES=0 TABLES value - * @property {number} SHARDS=1 SHARDS value - */ - binlogdata.MigrationType = (function() { - const valuesById = {}, values = Object.create(valuesById); - values[valuesById[0] = "TABLES"] = 0; - values[valuesById[1] = "SHARDS"] = 1; - return values; + return StreamKeyRangeResponse; })(); - binlogdata.Journal = (function() { + binlogdata.StreamTablesRequest = (function() { /** - * Properties of a Journal. + * Properties of a StreamTablesRequest. * @memberof binlogdata - * @interface IJournal - * @property {number|Long|null} [id] Journal id - * @property {binlogdata.MigrationType|null} [migration_type] Journal migration_type - * @property {Array.|null} [tables] Journal tables - * @property {string|null} [local_position] Journal local_position - * @property {Array.|null} [shard_gtids] Journal shard_gtids - * @property {Array.|null} [participants] Journal participants - * @property {Array.|null} [source_workflows] Journal source_workflows + * @interface IStreamTablesRequest + * @property {string|null} [position] StreamTablesRequest position + * @property {Array.|null} [tables] StreamTablesRequest tables + * @property {binlogdata.ICharset|null} [charset] StreamTablesRequest charset */ /** - * Constructs a new Journal. + * Constructs a new StreamTablesRequest. * @memberof binlogdata - * @classdesc Represents a Journal. - * @implements IJournal + * @classdesc Represents a StreamTablesRequest. + * @implements IStreamTablesRequest * @constructor - * @param {binlogdata.IJournal=} [properties] Properties to set + * @param {binlogdata.IStreamTablesRequest=} [properties] Properties to set */ - function Journal(properties) { + function StreamTablesRequest(properties) { this.tables = []; - this.shard_gtids = []; - this.participants = []; - this.source_workflows = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -66632,171 +67745,106 @@ export const binlogdata = $root.binlogdata = (() => { } /** - * Journal id. - * @member {number|Long} id - * @memberof binlogdata.Journal - * @instance - */ - Journal.prototype.id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; - - /** - * Journal migration_type. - * @member {binlogdata.MigrationType} migration_type - * @memberof binlogdata.Journal + * StreamTablesRequest position. + * @member {string} position + * @memberof binlogdata.StreamTablesRequest * @instance */ - Journal.prototype.migration_type = 0; + StreamTablesRequest.prototype.position = ""; /** - * Journal tables. + * StreamTablesRequest tables. * @member {Array.} tables - * @memberof binlogdata.Journal - * @instance - */ - Journal.prototype.tables = $util.emptyArray; - - /** - * Journal local_position. - * @member {string} local_position - * @memberof binlogdata.Journal - * @instance - */ - Journal.prototype.local_position = ""; - - /** - * Journal shard_gtids. - * @member {Array.} shard_gtids - * @memberof binlogdata.Journal - * @instance - */ - Journal.prototype.shard_gtids = $util.emptyArray; - - /** - * Journal participants. - * @member {Array.} participants - * @memberof binlogdata.Journal + * @memberof binlogdata.StreamTablesRequest * @instance */ - Journal.prototype.participants = $util.emptyArray; + StreamTablesRequest.prototype.tables = $util.emptyArray; /** - * Journal source_workflows. - * @member {Array.} source_workflows - * @memberof binlogdata.Journal + * StreamTablesRequest charset. + * @member {binlogdata.ICharset|null|undefined} charset + * @memberof binlogdata.StreamTablesRequest * @instance */ - Journal.prototype.source_workflows = $util.emptyArray; + StreamTablesRequest.prototype.charset = null; /** - * Creates a new Journal instance using the specified properties. + * Creates a new StreamTablesRequest instance using the specified properties. * @function create - * @memberof binlogdata.Journal + * @memberof binlogdata.StreamTablesRequest * @static - * @param {binlogdata.IJournal=} [properties] Properties to set - * @returns {binlogdata.Journal} Journal instance + * @param {binlogdata.IStreamTablesRequest=} [properties] Properties to set + * @returns {binlogdata.StreamTablesRequest} StreamTablesRequest instance */ - Journal.create = function create(properties) { - return new Journal(properties); + StreamTablesRequest.create = function create(properties) { + return new StreamTablesRequest(properties); }; /** - * Encodes the specified Journal message. Does not implicitly {@link binlogdata.Journal.verify|verify} messages. + * Encodes the specified StreamTablesRequest message. Does not implicitly {@link binlogdata.StreamTablesRequest.verify|verify} messages. * @function encode - * @memberof binlogdata.Journal + * @memberof binlogdata.StreamTablesRequest * @static - * @param {binlogdata.IJournal} message Journal message or plain object to encode + * @param {binlogdata.IStreamTablesRequest} message StreamTablesRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Journal.encode = function encode(message, writer) { + StreamTablesRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.id != null && Object.hasOwnProperty.call(message, "id")) - writer.uint32(/* id 1, wireType 0 =*/8).int64(message.id); - if (message.migration_type != null && Object.hasOwnProperty.call(message, "migration_type")) - writer.uint32(/* id 2, wireType 0 =*/16).int32(message.migration_type); + if (message.position != null && Object.hasOwnProperty.call(message, "position")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.position); if (message.tables != null && message.tables.length) for (let i = 0; i < message.tables.length; ++i) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.tables[i]); - if (message.local_position != null && Object.hasOwnProperty.call(message, "local_position")) - writer.uint32(/* id 4, wireType 2 =*/34).string(message.local_position); - if (message.shard_gtids != null && message.shard_gtids.length) - for (let i = 0; i < message.shard_gtids.length; ++i) - $root.binlogdata.ShardGtid.encode(message.shard_gtids[i], writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); - if (message.participants != null && message.participants.length) - for (let i = 0; i < message.participants.length; ++i) - $root.binlogdata.KeyspaceShard.encode(message.participants[i], writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); - if (message.source_workflows != null && message.source_workflows.length) - for (let i = 0; i < message.source_workflows.length; ++i) - writer.uint32(/* id 7, wireType 2 =*/58).string(message.source_workflows[i]); + writer.uint32(/* id 2, wireType 2 =*/18).string(message.tables[i]); + if (message.charset != null && Object.hasOwnProperty.call(message, "charset")) + $root.binlogdata.Charset.encode(message.charset, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); return writer; }; /** - * Encodes the specified Journal message, length delimited. Does not implicitly {@link binlogdata.Journal.verify|verify} messages. + * Encodes the specified StreamTablesRequest message, length delimited. Does not implicitly {@link binlogdata.StreamTablesRequest.verify|verify} messages. * @function encodeDelimited - * @memberof binlogdata.Journal + * @memberof binlogdata.StreamTablesRequest * @static - * @param {binlogdata.IJournal} message Journal message or plain object to encode + * @param {binlogdata.IStreamTablesRequest} message StreamTablesRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Journal.encodeDelimited = function encodeDelimited(message, writer) { + StreamTablesRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a Journal message from the specified reader or buffer. + * Decodes a StreamTablesRequest message from the specified reader or buffer. * @function decode - * @memberof binlogdata.Journal + * @memberof binlogdata.StreamTablesRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {binlogdata.Journal} Journal + * @returns {binlogdata.StreamTablesRequest} StreamTablesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Journal.decode = function decode(reader, length) { + StreamTablesRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.Journal(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.StreamTablesRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.id = reader.int64(); + message.position = reader.string(); break; } case 2: { - message.migration_type = reader.int32(); - break; - } - case 3: { if (!(message.tables && message.tables.length)) message.tables = []; message.tables.push(reader.string()); break; } - case 4: { - message.local_position = reader.string(); - break; - } - case 5: { - if (!(message.shard_gtids && message.shard_gtids.length)) - message.shard_gtids = []; - message.shard_gtids.push($root.binlogdata.ShardGtid.decode(reader, reader.uint32())); - break; - } - case 6: { - if (!(message.participants && message.participants.length)) - message.participants = []; - message.participants.push($root.binlogdata.KeyspaceShard.decode(reader, reader.uint32())); - break; - } - case 7: { - if (!(message.source_workflows && message.source_workflows.length)) - message.source_workflows = []; - message.source_workflows.push(reader.string()); + case 3: { + message.charset = $root.binlogdata.Charset.decode(reader, reader.uint32()); break; } default: @@ -66808,43 +67856,35 @@ export const binlogdata = $root.binlogdata = (() => { }; /** - * Decodes a Journal message from the specified reader or buffer, length delimited. + * Decodes a StreamTablesRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof binlogdata.Journal + * @memberof binlogdata.StreamTablesRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {binlogdata.Journal} Journal + * @returns {binlogdata.StreamTablesRequest} StreamTablesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Journal.decodeDelimited = function decodeDelimited(reader) { + StreamTablesRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a Journal message. + * Verifies a StreamTablesRequest message. * @function verify - * @memberof binlogdata.Journal + * @memberof binlogdata.StreamTablesRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - Journal.verify = function verify(message) { + StreamTablesRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.id != null && message.hasOwnProperty("id")) - if (!$util.isInteger(message.id) && !(message.id && $util.isInteger(message.id.low) && $util.isInteger(message.id.high))) - return "id: integer|Long expected"; - if (message.migration_type != null && message.hasOwnProperty("migration_type")) - switch (message.migration_type) { - default: - return "migration_type: enum value expected"; - case 0: - case 1: - break; - } + if (message.position != null && message.hasOwnProperty("position")) + if (!$util.isString(message.position)) + return "position: string expected"; if (message.tables != null && message.hasOwnProperty("tables")) { if (!Array.isArray(message.tables)) return "tables: array expected"; @@ -66852,233 +67892,121 @@ export const binlogdata = $root.binlogdata = (() => { if (!$util.isString(message.tables[i])) return "tables: string[] expected"; } - if (message.local_position != null && message.hasOwnProperty("local_position")) - if (!$util.isString(message.local_position)) - return "local_position: string expected"; - if (message.shard_gtids != null && message.hasOwnProperty("shard_gtids")) { - if (!Array.isArray(message.shard_gtids)) - return "shard_gtids: array expected"; - for (let i = 0; i < message.shard_gtids.length; ++i) { - let error = $root.binlogdata.ShardGtid.verify(message.shard_gtids[i]); - if (error) - return "shard_gtids." + error; - } - } - if (message.participants != null && message.hasOwnProperty("participants")) { - if (!Array.isArray(message.participants)) - return "participants: array expected"; - for (let i = 0; i < message.participants.length; ++i) { - let error = $root.binlogdata.KeyspaceShard.verify(message.participants[i]); - if (error) - return "participants." + error; - } - } - if (message.source_workflows != null && message.hasOwnProperty("source_workflows")) { - if (!Array.isArray(message.source_workflows)) - return "source_workflows: array expected"; - for (let i = 0; i < message.source_workflows.length; ++i) - if (!$util.isString(message.source_workflows[i])) - return "source_workflows: string[] expected"; + if (message.charset != null && message.hasOwnProperty("charset")) { + let error = $root.binlogdata.Charset.verify(message.charset); + if (error) + return "charset." + error; } return null; }; /** - * Creates a Journal message from a plain object. Also converts values to their respective internal types. + * Creates a StreamTablesRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof binlogdata.Journal + * @memberof binlogdata.StreamTablesRequest * @static * @param {Object.} object Plain object - * @returns {binlogdata.Journal} Journal + * @returns {binlogdata.StreamTablesRequest} StreamTablesRequest */ - Journal.fromObject = function fromObject(object) { - if (object instanceof $root.binlogdata.Journal) + StreamTablesRequest.fromObject = function fromObject(object) { + if (object instanceof $root.binlogdata.StreamTablesRequest) return object; - let message = new $root.binlogdata.Journal(); - if (object.id != null) - if ($util.Long) - (message.id = $util.Long.fromValue(object.id)).unsigned = false; - else if (typeof object.id === "string") - message.id = parseInt(object.id, 10); - else if (typeof object.id === "number") - message.id = object.id; - else if (typeof object.id === "object") - message.id = new $util.LongBits(object.id.low >>> 0, object.id.high >>> 0).toNumber(); - switch (object.migration_type) { - default: - if (typeof object.migration_type === "number") { - message.migration_type = object.migration_type; - break; - } - break; - case "TABLES": - case 0: - message.migration_type = 0; - break; - case "SHARDS": - case 1: - message.migration_type = 1; - break; - } + let message = new $root.binlogdata.StreamTablesRequest(); + if (object.position != null) + message.position = String(object.position); if (object.tables) { if (!Array.isArray(object.tables)) - throw TypeError(".binlogdata.Journal.tables: array expected"); + throw TypeError(".binlogdata.StreamTablesRequest.tables: array expected"); message.tables = []; for (let i = 0; i < object.tables.length; ++i) message.tables[i] = String(object.tables[i]); } - if (object.local_position != null) - message.local_position = String(object.local_position); - if (object.shard_gtids) { - if (!Array.isArray(object.shard_gtids)) - throw TypeError(".binlogdata.Journal.shard_gtids: array expected"); - message.shard_gtids = []; - for (let i = 0; i < object.shard_gtids.length; ++i) { - if (typeof object.shard_gtids[i] !== "object") - throw TypeError(".binlogdata.Journal.shard_gtids: object expected"); - message.shard_gtids[i] = $root.binlogdata.ShardGtid.fromObject(object.shard_gtids[i]); - } - } - if (object.participants) { - if (!Array.isArray(object.participants)) - throw TypeError(".binlogdata.Journal.participants: array expected"); - message.participants = []; - for (let i = 0; i < object.participants.length; ++i) { - if (typeof object.participants[i] !== "object") - throw TypeError(".binlogdata.Journal.participants: object expected"); - message.participants[i] = $root.binlogdata.KeyspaceShard.fromObject(object.participants[i]); - } - } - if (object.source_workflows) { - if (!Array.isArray(object.source_workflows)) - throw TypeError(".binlogdata.Journal.source_workflows: array expected"); - message.source_workflows = []; - for (let i = 0; i < object.source_workflows.length; ++i) - message.source_workflows[i] = String(object.source_workflows[i]); + if (object.charset != null) { + if (typeof object.charset !== "object") + throw TypeError(".binlogdata.StreamTablesRequest.charset: object expected"); + message.charset = $root.binlogdata.Charset.fromObject(object.charset); } return message; }; /** - * Creates a plain object from a Journal message. Also converts values to other types if specified. + * Creates a plain object from a StreamTablesRequest message. Also converts values to other types if specified. * @function toObject - * @memberof binlogdata.Journal + * @memberof binlogdata.StreamTablesRequest * @static - * @param {binlogdata.Journal} message Journal + * @param {binlogdata.StreamTablesRequest} message StreamTablesRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - Journal.toObject = function toObject(message, options) { + StreamTablesRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) { + if (options.arrays || options.defaults) object.tables = []; - object.shard_gtids = []; - object.participants = []; - object.source_workflows = []; - } if (options.defaults) { - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.id = options.longs === String ? "0" : 0; - object.migration_type = options.enums === String ? "TABLES" : 0; - object.local_position = ""; + object.position = ""; + object.charset = null; } - if (message.id != null && message.hasOwnProperty("id")) - if (typeof message.id === "number") - object.id = options.longs === String ? String(message.id) : message.id; - else - object.id = options.longs === String ? $util.Long.prototype.toString.call(message.id) : options.longs === Number ? new $util.LongBits(message.id.low >>> 0, message.id.high >>> 0).toNumber() : message.id; - if (message.migration_type != null && message.hasOwnProperty("migration_type")) - object.migration_type = options.enums === String ? $root.binlogdata.MigrationType[message.migration_type] === undefined ? message.migration_type : $root.binlogdata.MigrationType[message.migration_type] : message.migration_type; + if (message.position != null && message.hasOwnProperty("position")) + object.position = message.position; if (message.tables && message.tables.length) { object.tables = []; for (let j = 0; j < message.tables.length; ++j) object.tables[j] = message.tables[j]; } - if (message.local_position != null && message.hasOwnProperty("local_position")) - object.local_position = message.local_position; - if (message.shard_gtids && message.shard_gtids.length) { - object.shard_gtids = []; - for (let j = 0; j < message.shard_gtids.length; ++j) - object.shard_gtids[j] = $root.binlogdata.ShardGtid.toObject(message.shard_gtids[j], options); - } - if (message.participants && message.participants.length) { - object.participants = []; - for (let j = 0; j < message.participants.length; ++j) - object.participants[j] = $root.binlogdata.KeyspaceShard.toObject(message.participants[j], options); - } - if (message.source_workflows && message.source_workflows.length) { - object.source_workflows = []; - for (let j = 0; j < message.source_workflows.length; ++j) - object.source_workflows[j] = message.source_workflows[j]; - } + if (message.charset != null && message.hasOwnProperty("charset")) + object.charset = $root.binlogdata.Charset.toObject(message.charset, options); return object; }; /** - * Converts this Journal to JSON. + * Converts this StreamTablesRequest to JSON. * @function toJSON - * @memberof binlogdata.Journal + * @memberof binlogdata.StreamTablesRequest * @instance * @returns {Object.} JSON object */ - Journal.prototype.toJSON = function toJSON() { + StreamTablesRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for Journal + * Gets the default type url for StreamTablesRequest * @function getTypeUrl - * @memberof binlogdata.Journal + * @memberof binlogdata.StreamTablesRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - Journal.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + StreamTablesRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/binlogdata.Journal"; + return typeUrlPrefix + "/binlogdata.StreamTablesRequest"; }; - return Journal; + return StreamTablesRequest; })(); - binlogdata.VEvent = (function() { + binlogdata.StreamTablesResponse = (function() { /** - * Properties of a VEvent. + * Properties of a StreamTablesResponse. * @memberof binlogdata - * @interface IVEvent - * @property {binlogdata.VEventType|null} [type] VEvent type - * @property {number|Long|null} [timestamp] VEvent timestamp - * @property {string|null} [gtid] VEvent gtid - * @property {string|null} [statement] VEvent statement - * @property {binlogdata.IRowEvent|null} [row_event] VEvent row_event - * @property {binlogdata.IFieldEvent|null} [field_event] VEvent field_event - * @property {binlogdata.IVGtid|null} [vgtid] VEvent vgtid - * @property {binlogdata.IJournal|null} [journal] VEvent journal - * @property {string|null} [dml] VEvent dml - * @property {number|Long|null} [current_time] VEvent current_time - * @property {binlogdata.ILastPKEvent|null} [last_p_k_event] VEvent last_p_k_event - * @property {string|null} [keyspace] VEvent keyspace - * @property {string|null} [shard] VEvent shard - * @property {boolean|null} [throttled] VEvent throttled + * @interface IStreamTablesResponse + * @property {binlogdata.IBinlogTransaction|null} [binlog_transaction] StreamTablesResponse binlog_transaction */ /** - * Constructs a new VEvent. + * Constructs a new StreamTablesResponse. * @memberof binlogdata - * @classdesc Represents a VEvent. - * @implements IVEvent + * @classdesc Represents a StreamTablesResponse. + * @implements IStreamTablesResponse * @constructor - * @param {binlogdata.IVEvent=} [properties] Properties to set + * @param {binlogdata.IStreamTablesResponse=} [properties] Properties to set */ - function VEvent(properties) { + function StreamTablesResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -67086,257 +68014,298 @@ export const binlogdata = $root.binlogdata = (() => { } /** - * VEvent type. - * @member {binlogdata.VEventType} type - * @memberof binlogdata.VEvent + * StreamTablesResponse binlog_transaction. + * @member {binlogdata.IBinlogTransaction|null|undefined} binlog_transaction + * @memberof binlogdata.StreamTablesResponse * @instance */ - VEvent.prototype.type = 0; + StreamTablesResponse.prototype.binlog_transaction = null; /** - * VEvent timestamp. - * @member {number|Long} timestamp - * @memberof binlogdata.VEvent - * @instance - */ - VEvent.prototype.timestamp = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + * Creates a new StreamTablesResponse instance using the specified properties. + * @function create + * @memberof binlogdata.StreamTablesResponse + * @static + * @param {binlogdata.IStreamTablesResponse=} [properties] Properties to set + * @returns {binlogdata.StreamTablesResponse} StreamTablesResponse instance + */ + StreamTablesResponse.create = function create(properties) { + return new StreamTablesResponse(properties); + }; /** - * VEvent gtid. - * @member {string} gtid - * @memberof binlogdata.VEvent - * @instance + * Encodes the specified StreamTablesResponse message. Does not implicitly {@link binlogdata.StreamTablesResponse.verify|verify} messages. + * @function encode + * @memberof binlogdata.StreamTablesResponse + * @static + * @param {binlogdata.IStreamTablesResponse} message StreamTablesResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer */ - VEvent.prototype.gtid = ""; + StreamTablesResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.binlog_transaction != null && Object.hasOwnProperty.call(message, "binlog_transaction")) + $root.binlogdata.BinlogTransaction.encode(message.binlog_transaction, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + return writer; + }; /** - * VEvent statement. - * @member {string} statement - * @memberof binlogdata.VEvent - * @instance + * Encodes the specified StreamTablesResponse message, length delimited. Does not implicitly {@link binlogdata.StreamTablesResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof binlogdata.StreamTablesResponse + * @static + * @param {binlogdata.IStreamTablesResponse} message StreamTablesResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer */ - VEvent.prototype.statement = ""; + StreamTablesResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; /** - * VEvent row_event. - * @member {binlogdata.IRowEvent|null|undefined} row_event - * @memberof binlogdata.VEvent - * @instance + * Decodes a StreamTablesResponse message from the specified reader or buffer. + * @function decode + * @memberof binlogdata.StreamTablesResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {binlogdata.StreamTablesResponse} StreamTablesResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - VEvent.prototype.row_event = null; + StreamTablesResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.StreamTablesResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.binlog_transaction = $root.binlogdata.BinlogTransaction.decode(reader, reader.uint32()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; /** - * VEvent field_event. - * @member {binlogdata.IFieldEvent|null|undefined} field_event - * @memberof binlogdata.VEvent - * @instance + * Decodes a StreamTablesResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof binlogdata.StreamTablesResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {binlogdata.StreamTablesResponse} StreamTablesResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - VEvent.prototype.field_event = null; + StreamTablesResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; /** - * VEvent vgtid. - * @member {binlogdata.IVGtid|null|undefined} vgtid - * @memberof binlogdata.VEvent - * @instance + * Verifies a StreamTablesResponse message. + * @function verify + * @memberof binlogdata.StreamTablesResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - VEvent.prototype.vgtid = null; + StreamTablesResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.binlog_transaction != null && message.hasOwnProperty("binlog_transaction")) { + let error = $root.binlogdata.BinlogTransaction.verify(message.binlog_transaction); + if (error) + return "binlog_transaction." + error; + } + return null; + }; /** - * VEvent journal. - * @member {binlogdata.IJournal|null|undefined} journal - * @memberof binlogdata.VEvent - * @instance + * Creates a StreamTablesResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof binlogdata.StreamTablesResponse + * @static + * @param {Object.} object Plain object + * @returns {binlogdata.StreamTablesResponse} StreamTablesResponse */ - VEvent.prototype.journal = null; + StreamTablesResponse.fromObject = function fromObject(object) { + if (object instanceof $root.binlogdata.StreamTablesResponse) + return object; + let message = new $root.binlogdata.StreamTablesResponse(); + if (object.binlog_transaction != null) { + if (typeof object.binlog_transaction !== "object") + throw TypeError(".binlogdata.StreamTablesResponse.binlog_transaction: object expected"); + message.binlog_transaction = $root.binlogdata.BinlogTransaction.fromObject(object.binlog_transaction); + } + return message; + }; /** - * VEvent dml. - * @member {string} dml - * @memberof binlogdata.VEvent - * @instance + * Creates a plain object from a StreamTablesResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof binlogdata.StreamTablesResponse + * @static + * @param {binlogdata.StreamTablesResponse} message StreamTablesResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object */ - VEvent.prototype.dml = ""; + StreamTablesResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) + object.binlog_transaction = null; + if (message.binlog_transaction != null && message.hasOwnProperty("binlog_transaction")) + object.binlog_transaction = $root.binlogdata.BinlogTransaction.toObject(message.binlog_transaction, options); + return object; + }; /** - * VEvent current_time. - * @member {number|Long} current_time - * @memberof binlogdata.VEvent + * Converts this StreamTablesResponse to JSON. + * @function toJSON + * @memberof binlogdata.StreamTablesResponse * @instance + * @returns {Object.} JSON object */ - VEvent.prototype.current_time = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + StreamTablesResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; /** - * VEvent last_p_k_event. - * @member {binlogdata.ILastPKEvent|null|undefined} last_p_k_event - * @memberof binlogdata.VEvent - * @instance + * Gets the default type url for StreamTablesResponse + * @function getTypeUrl + * @memberof binlogdata.StreamTablesResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url */ - VEvent.prototype.last_p_k_event = null; + StreamTablesResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/binlogdata.StreamTablesResponse"; + }; + + return StreamTablesResponse; + })(); + + binlogdata.CharsetConversion = (function() { /** - * VEvent keyspace. - * @member {string} keyspace - * @memberof binlogdata.VEvent - * @instance + * Properties of a CharsetConversion. + * @memberof binlogdata + * @interface ICharsetConversion + * @property {string|null} [from_charset] CharsetConversion from_charset + * @property {string|null} [to_charset] CharsetConversion to_charset */ - VEvent.prototype.keyspace = ""; /** - * VEvent shard. - * @member {string} shard - * @memberof binlogdata.VEvent + * Constructs a new CharsetConversion. + * @memberof binlogdata + * @classdesc Represents a CharsetConversion. + * @implements ICharsetConversion + * @constructor + * @param {binlogdata.ICharsetConversion=} [properties] Properties to set + */ + function CharsetConversion(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * CharsetConversion from_charset. + * @member {string} from_charset + * @memberof binlogdata.CharsetConversion * @instance */ - VEvent.prototype.shard = ""; + CharsetConversion.prototype.from_charset = ""; /** - * VEvent throttled. - * @member {boolean} throttled - * @memberof binlogdata.VEvent + * CharsetConversion to_charset. + * @member {string} to_charset + * @memberof binlogdata.CharsetConversion * @instance */ - VEvent.prototype.throttled = false; + CharsetConversion.prototype.to_charset = ""; /** - * Creates a new VEvent instance using the specified properties. + * Creates a new CharsetConversion instance using the specified properties. * @function create - * @memberof binlogdata.VEvent + * @memberof binlogdata.CharsetConversion * @static - * @param {binlogdata.IVEvent=} [properties] Properties to set - * @returns {binlogdata.VEvent} VEvent instance + * @param {binlogdata.ICharsetConversion=} [properties] Properties to set + * @returns {binlogdata.CharsetConversion} CharsetConversion instance */ - VEvent.create = function create(properties) { - return new VEvent(properties); + CharsetConversion.create = function create(properties) { + return new CharsetConversion(properties); }; /** - * Encodes the specified VEvent message. Does not implicitly {@link binlogdata.VEvent.verify|verify} messages. + * Encodes the specified CharsetConversion message. Does not implicitly {@link binlogdata.CharsetConversion.verify|verify} messages. * @function encode - * @memberof binlogdata.VEvent + * @memberof binlogdata.CharsetConversion * @static - * @param {binlogdata.IVEvent} message VEvent message or plain object to encode + * @param {binlogdata.ICharsetConversion} message CharsetConversion message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - VEvent.encode = function encode(message, writer) { + CharsetConversion.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.type != null && Object.hasOwnProperty.call(message, "type")) - writer.uint32(/* id 1, wireType 0 =*/8).int32(message.type); - if (message.timestamp != null && Object.hasOwnProperty.call(message, "timestamp")) - writer.uint32(/* id 2, wireType 0 =*/16).int64(message.timestamp); - if (message.gtid != null && Object.hasOwnProperty.call(message, "gtid")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.gtid); - if (message.statement != null && Object.hasOwnProperty.call(message, "statement")) - writer.uint32(/* id 4, wireType 2 =*/34).string(message.statement); - if (message.row_event != null && Object.hasOwnProperty.call(message, "row_event")) - $root.binlogdata.RowEvent.encode(message.row_event, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); - if (message.field_event != null && Object.hasOwnProperty.call(message, "field_event")) - $root.binlogdata.FieldEvent.encode(message.field_event, writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); - if (message.vgtid != null && Object.hasOwnProperty.call(message, "vgtid")) - $root.binlogdata.VGtid.encode(message.vgtid, writer.uint32(/* id 7, wireType 2 =*/58).fork()).ldelim(); - if (message.journal != null && Object.hasOwnProperty.call(message, "journal")) - $root.binlogdata.Journal.encode(message.journal, writer.uint32(/* id 8, wireType 2 =*/66).fork()).ldelim(); - if (message.dml != null && Object.hasOwnProperty.call(message, "dml")) - writer.uint32(/* id 9, wireType 2 =*/74).string(message.dml); - if (message.current_time != null && Object.hasOwnProperty.call(message, "current_time")) - writer.uint32(/* id 20, wireType 0 =*/160).int64(message.current_time); - if (message.last_p_k_event != null && Object.hasOwnProperty.call(message, "last_p_k_event")) - $root.binlogdata.LastPKEvent.encode(message.last_p_k_event, writer.uint32(/* id 21, wireType 2 =*/170).fork()).ldelim(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 22, wireType 2 =*/178).string(message.keyspace); - if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - writer.uint32(/* id 23, wireType 2 =*/186).string(message.shard); - if (message.throttled != null && Object.hasOwnProperty.call(message, "throttled")) - writer.uint32(/* id 24, wireType 0 =*/192).bool(message.throttled); + if (message.from_charset != null && Object.hasOwnProperty.call(message, "from_charset")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.from_charset); + if (message.to_charset != null && Object.hasOwnProperty.call(message, "to_charset")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.to_charset); return writer; }; /** - * Encodes the specified VEvent message, length delimited. Does not implicitly {@link binlogdata.VEvent.verify|verify} messages. + * Encodes the specified CharsetConversion message, length delimited. Does not implicitly {@link binlogdata.CharsetConversion.verify|verify} messages. * @function encodeDelimited - * @memberof binlogdata.VEvent + * @memberof binlogdata.CharsetConversion * @static - * @param {binlogdata.IVEvent} message VEvent message or plain object to encode + * @param {binlogdata.ICharsetConversion} message CharsetConversion message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - VEvent.encodeDelimited = function encodeDelimited(message, writer) { + CharsetConversion.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a VEvent message from the specified reader or buffer. + * Decodes a CharsetConversion message from the specified reader or buffer. * @function decode - * @memberof binlogdata.VEvent + * @memberof binlogdata.CharsetConversion * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {binlogdata.VEvent} VEvent + * @returns {binlogdata.CharsetConversion} CharsetConversion * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - VEvent.decode = function decode(reader, length) { + CharsetConversion.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.VEvent(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.CharsetConversion(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.type = reader.int32(); + message.from_charset = reader.string(); break; } case 2: { - message.timestamp = reader.int64(); - break; - } - case 3: { - message.gtid = reader.string(); - break; - } - case 4: { - message.statement = reader.string(); - break; - } - case 5: { - message.row_event = $root.binlogdata.RowEvent.decode(reader, reader.uint32()); - break; - } - case 6: { - message.field_event = $root.binlogdata.FieldEvent.decode(reader, reader.uint32()); - break; - } - case 7: { - message.vgtid = $root.binlogdata.VGtid.decode(reader, reader.uint32()); - break; - } - case 8: { - message.journal = $root.binlogdata.Journal.decode(reader, reader.uint32()); - break; - } - case 9: { - message.dml = reader.string(); - break; - } - case 20: { - message.current_time = reader.int64(); - break; - } - case 21: { - message.last_p_k_event = $root.binlogdata.LastPKEvent.decode(reader, reader.uint32()); - break; - } - case 22: { - message.keyspace = reader.string(); - break; - } - case 23: { - message.shard = reader.string(); - break; - } - case 24: { - message.throttled = reader.bool(); + message.to_charset = reader.string(); break; } default: @@ -67348,398 +68317,141 @@ export const binlogdata = $root.binlogdata = (() => { }; /** - * Decodes a VEvent message from the specified reader or buffer, length delimited. + * Decodes a CharsetConversion message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof binlogdata.VEvent + * @memberof binlogdata.CharsetConversion * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {binlogdata.VEvent} VEvent + * @returns {binlogdata.CharsetConversion} CharsetConversion * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - VEvent.decodeDelimited = function decodeDelimited(reader) { + CharsetConversion.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a VEvent message. + * Verifies a CharsetConversion message. * @function verify - * @memberof binlogdata.VEvent + * @memberof binlogdata.CharsetConversion * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - VEvent.verify = function verify(message) { + CharsetConversion.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.type != null && message.hasOwnProperty("type")) - switch (message.type) { - default: - return "type: enum value expected"; - case 0: - case 1: - case 2: - case 3: - case 4: - case 5: - case 6: - case 7: - case 8: - case 9: - case 10: - case 11: - case 12: - case 13: - case 14: - case 15: - case 16: - case 17: - case 18: - case 19: - case 20: - break; - } - if (message.timestamp != null && message.hasOwnProperty("timestamp")) - if (!$util.isInteger(message.timestamp) && !(message.timestamp && $util.isInteger(message.timestamp.low) && $util.isInteger(message.timestamp.high))) - return "timestamp: integer|Long expected"; - if (message.gtid != null && message.hasOwnProperty("gtid")) - if (!$util.isString(message.gtid)) - return "gtid: string expected"; - if (message.statement != null && message.hasOwnProperty("statement")) - if (!$util.isString(message.statement)) - return "statement: string expected"; - if (message.row_event != null && message.hasOwnProperty("row_event")) { - let error = $root.binlogdata.RowEvent.verify(message.row_event); - if (error) - return "row_event." + error; - } - if (message.field_event != null && message.hasOwnProperty("field_event")) { - let error = $root.binlogdata.FieldEvent.verify(message.field_event); - if (error) - return "field_event." + error; - } - if (message.vgtid != null && message.hasOwnProperty("vgtid")) { - let error = $root.binlogdata.VGtid.verify(message.vgtid); - if (error) - return "vgtid." + error; - } - if (message.journal != null && message.hasOwnProperty("journal")) { - let error = $root.binlogdata.Journal.verify(message.journal); - if (error) - return "journal." + error; - } - if (message.dml != null && message.hasOwnProperty("dml")) - if (!$util.isString(message.dml)) - return "dml: string expected"; - if (message.current_time != null && message.hasOwnProperty("current_time")) - if (!$util.isInteger(message.current_time) && !(message.current_time && $util.isInteger(message.current_time.low) && $util.isInteger(message.current_time.high))) - return "current_time: integer|Long expected"; - if (message.last_p_k_event != null && message.hasOwnProperty("last_p_k_event")) { - let error = $root.binlogdata.LastPKEvent.verify(message.last_p_k_event); - if (error) - return "last_p_k_event." + error; - } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - if (message.shard != null && message.hasOwnProperty("shard")) - if (!$util.isString(message.shard)) - return "shard: string expected"; - if (message.throttled != null && message.hasOwnProperty("throttled")) - if (typeof message.throttled !== "boolean") - return "throttled: boolean expected"; + if (message.from_charset != null && message.hasOwnProperty("from_charset")) + if (!$util.isString(message.from_charset)) + return "from_charset: string expected"; + if (message.to_charset != null && message.hasOwnProperty("to_charset")) + if (!$util.isString(message.to_charset)) + return "to_charset: string expected"; return null; }; /** - * Creates a VEvent message from a plain object. Also converts values to their respective internal types. + * Creates a CharsetConversion message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof binlogdata.VEvent + * @memberof binlogdata.CharsetConversion * @static * @param {Object.} object Plain object - * @returns {binlogdata.VEvent} VEvent + * @returns {binlogdata.CharsetConversion} CharsetConversion */ - VEvent.fromObject = function fromObject(object) { - if (object instanceof $root.binlogdata.VEvent) + CharsetConversion.fromObject = function fromObject(object) { + if (object instanceof $root.binlogdata.CharsetConversion) return object; - let message = new $root.binlogdata.VEvent(); - switch (object.type) { - default: - if (typeof object.type === "number") { - message.type = object.type; - break; - } - break; - case "UNKNOWN": - case 0: - message.type = 0; - break; - case "GTID": - case 1: - message.type = 1; - break; - case "BEGIN": - case 2: - message.type = 2; - break; - case "COMMIT": - case 3: - message.type = 3; - break; - case "ROLLBACK": - case 4: - message.type = 4; - break; - case "DDL": - case 5: - message.type = 5; - break; - case "INSERT": - case 6: - message.type = 6; - break; - case "REPLACE": - case 7: - message.type = 7; - break; - case "UPDATE": - case 8: - message.type = 8; - break; - case "DELETE": - case 9: - message.type = 9; - break; - case "SET": - case 10: - message.type = 10; - break; - case "OTHER": - case 11: - message.type = 11; - break; - case "ROW": - case 12: - message.type = 12; - break; - case "FIELD": - case 13: - message.type = 13; - break; - case "HEARTBEAT": - case 14: - message.type = 14; - break; - case "VGTID": - case 15: - message.type = 15; - break; - case "JOURNAL": - case 16: - message.type = 16; - break; - case "VERSION": - case 17: - message.type = 17; - break; - case "LASTPK": - case 18: - message.type = 18; - break; - case "SAVEPOINT": - case 19: - message.type = 19; - break; - case "COPY_COMPLETED": - case 20: - message.type = 20; - break; - } - if (object.timestamp != null) - if ($util.Long) - (message.timestamp = $util.Long.fromValue(object.timestamp)).unsigned = false; - else if (typeof object.timestamp === "string") - message.timestamp = parseInt(object.timestamp, 10); - else if (typeof object.timestamp === "number") - message.timestamp = object.timestamp; - else if (typeof object.timestamp === "object") - message.timestamp = new $util.LongBits(object.timestamp.low >>> 0, object.timestamp.high >>> 0).toNumber(); - if (object.gtid != null) - message.gtid = String(object.gtid); - if (object.statement != null) - message.statement = String(object.statement); - if (object.row_event != null) { - if (typeof object.row_event !== "object") - throw TypeError(".binlogdata.VEvent.row_event: object expected"); - message.row_event = $root.binlogdata.RowEvent.fromObject(object.row_event); - } - if (object.field_event != null) { - if (typeof object.field_event !== "object") - throw TypeError(".binlogdata.VEvent.field_event: object expected"); - message.field_event = $root.binlogdata.FieldEvent.fromObject(object.field_event); - } - if (object.vgtid != null) { - if (typeof object.vgtid !== "object") - throw TypeError(".binlogdata.VEvent.vgtid: object expected"); - message.vgtid = $root.binlogdata.VGtid.fromObject(object.vgtid); - } - if (object.journal != null) { - if (typeof object.journal !== "object") - throw TypeError(".binlogdata.VEvent.journal: object expected"); - message.journal = $root.binlogdata.Journal.fromObject(object.journal); - } - if (object.dml != null) - message.dml = String(object.dml); - if (object.current_time != null) - if ($util.Long) - (message.current_time = $util.Long.fromValue(object.current_time)).unsigned = false; - else if (typeof object.current_time === "string") - message.current_time = parseInt(object.current_time, 10); - else if (typeof object.current_time === "number") - message.current_time = object.current_time; - else if (typeof object.current_time === "object") - message.current_time = new $util.LongBits(object.current_time.low >>> 0, object.current_time.high >>> 0).toNumber(); - if (object.last_p_k_event != null) { - if (typeof object.last_p_k_event !== "object") - throw TypeError(".binlogdata.VEvent.last_p_k_event: object expected"); - message.last_p_k_event = $root.binlogdata.LastPKEvent.fromObject(object.last_p_k_event); - } - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - if (object.shard != null) - message.shard = String(object.shard); - if (object.throttled != null) - message.throttled = Boolean(object.throttled); + let message = new $root.binlogdata.CharsetConversion(); + if (object.from_charset != null) + message.from_charset = String(object.from_charset); + if (object.to_charset != null) + message.to_charset = String(object.to_charset); return message; }; /** - * Creates a plain object from a VEvent message. Also converts values to other types if specified. + * Creates a plain object from a CharsetConversion message. Also converts values to other types if specified. * @function toObject - * @memberof binlogdata.VEvent + * @memberof binlogdata.CharsetConversion * @static - * @param {binlogdata.VEvent} message VEvent + * @param {binlogdata.CharsetConversion} message CharsetConversion * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - VEvent.toObject = function toObject(message, options) { + CharsetConversion.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) { - object.type = options.enums === String ? "UNKNOWN" : 0; - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.timestamp = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.timestamp = options.longs === String ? "0" : 0; - object.gtid = ""; - object.statement = ""; - object.row_event = null; - object.field_event = null; - object.vgtid = null; - object.journal = null; - object.dml = ""; - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.current_time = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.current_time = options.longs === String ? "0" : 0; - object.last_p_k_event = null; - object.keyspace = ""; - object.shard = ""; - object.throttled = false; + object.from_charset = ""; + object.to_charset = ""; } - if (message.type != null && message.hasOwnProperty("type")) - object.type = options.enums === String ? $root.binlogdata.VEventType[message.type] === undefined ? message.type : $root.binlogdata.VEventType[message.type] : message.type; - if (message.timestamp != null && message.hasOwnProperty("timestamp")) - if (typeof message.timestamp === "number") - object.timestamp = options.longs === String ? String(message.timestamp) : message.timestamp; - else - object.timestamp = options.longs === String ? $util.Long.prototype.toString.call(message.timestamp) : options.longs === Number ? new $util.LongBits(message.timestamp.low >>> 0, message.timestamp.high >>> 0).toNumber() : message.timestamp; - if (message.gtid != null && message.hasOwnProperty("gtid")) - object.gtid = message.gtid; - if (message.statement != null && message.hasOwnProperty("statement")) - object.statement = message.statement; - if (message.row_event != null && message.hasOwnProperty("row_event")) - object.row_event = $root.binlogdata.RowEvent.toObject(message.row_event, options); - if (message.field_event != null && message.hasOwnProperty("field_event")) - object.field_event = $root.binlogdata.FieldEvent.toObject(message.field_event, options); - if (message.vgtid != null && message.hasOwnProperty("vgtid")) - object.vgtid = $root.binlogdata.VGtid.toObject(message.vgtid, options); - if (message.journal != null && message.hasOwnProperty("journal")) - object.journal = $root.binlogdata.Journal.toObject(message.journal, options); - if (message.dml != null && message.hasOwnProperty("dml")) - object.dml = message.dml; - if (message.current_time != null && message.hasOwnProperty("current_time")) - if (typeof message.current_time === "number") - object.current_time = options.longs === String ? String(message.current_time) : message.current_time; - else - object.current_time = options.longs === String ? $util.Long.prototype.toString.call(message.current_time) : options.longs === Number ? new $util.LongBits(message.current_time.low >>> 0, message.current_time.high >>> 0).toNumber() : message.current_time; - if (message.last_p_k_event != null && message.hasOwnProperty("last_p_k_event")) - object.last_p_k_event = $root.binlogdata.LastPKEvent.toObject(message.last_p_k_event, options); - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.shard != null && message.hasOwnProperty("shard")) - object.shard = message.shard; - if (message.throttled != null && message.hasOwnProperty("throttled")) - object.throttled = message.throttled; + if (message.from_charset != null && message.hasOwnProperty("from_charset")) + object.from_charset = message.from_charset; + if (message.to_charset != null && message.hasOwnProperty("to_charset")) + object.to_charset = message.to_charset; return object; }; /** - * Converts this VEvent to JSON. + * Converts this CharsetConversion to JSON. * @function toJSON - * @memberof binlogdata.VEvent + * @memberof binlogdata.CharsetConversion * @instance * @returns {Object.} JSON object */ - VEvent.prototype.toJSON = function toJSON() { + CharsetConversion.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for VEvent + * Gets the default type url for CharsetConversion * @function getTypeUrl - * @memberof binlogdata.VEvent + * @memberof binlogdata.CharsetConversion * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - VEvent.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + CharsetConversion.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/binlogdata.VEvent"; + return typeUrlPrefix + "/binlogdata.CharsetConversion"; }; - return VEvent; + return CharsetConversion; })(); - binlogdata.MinimalTable = (function() { + binlogdata.Rule = (function() { /** - * Properties of a MinimalTable. + * Properties of a Rule. * @memberof binlogdata - * @interface IMinimalTable - * @property {string|null} [name] MinimalTable name - * @property {Array.|null} [fields] MinimalTable fields - * @property {Array.|null} [p_k_columns] MinimalTable p_k_columns + * @interface IRule + * @property {string|null} [match] Rule match + * @property {string|null} [filter] Rule filter + * @property {Object.|null} [convert_enum_to_text] Rule convert_enum_to_text + * @property {Object.|null} [convert_charset] Rule convert_charset + * @property {string|null} [source_unique_key_columns] Rule source_unique_key_columns + * @property {string|null} [target_unique_key_columns] Rule target_unique_key_columns + * @property {string|null} [source_unique_key_target_columns] Rule source_unique_key_target_columns + * @property {Object.|null} [convert_int_to_enum] Rule convert_int_to_enum */ /** - * Constructs a new MinimalTable. + * Constructs a new Rule. * @memberof binlogdata - * @classdesc Represents a MinimalTable. - * @implements IMinimalTable + * @classdesc Represents a Rule. + * @implements IRule * @constructor - * @param {binlogdata.IMinimalTable=} [properties] Properties to set + * @param {binlogdata.IRule=} [properties] Properties to set */ - function MinimalTable(properties) { - this.fields = []; - this.p_k_columns = []; + function Rule(properties) { + this.convert_enum_to_text = {}; + this.convert_charset = {}; + this.convert_int_to_enum = {}; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -67747,117 +68459,235 @@ export const binlogdata = $root.binlogdata = (() => { } /** - * MinimalTable name. - * @member {string} name - * @memberof binlogdata.MinimalTable + * Rule match. + * @member {string} match + * @memberof binlogdata.Rule * @instance */ - MinimalTable.prototype.name = ""; + Rule.prototype.match = ""; /** - * MinimalTable fields. - * @member {Array.} fields - * @memberof binlogdata.MinimalTable + * Rule filter. + * @member {string} filter + * @memberof binlogdata.Rule * @instance */ - MinimalTable.prototype.fields = $util.emptyArray; + Rule.prototype.filter = ""; /** - * MinimalTable p_k_columns. - * @member {Array.} p_k_columns - * @memberof binlogdata.MinimalTable + * Rule convert_enum_to_text. + * @member {Object.} convert_enum_to_text + * @memberof binlogdata.Rule * @instance */ - MinimalTable.prototype.p_k_columns = $util.emptyArray; + Rule.prototype.convert_enum_to_text = $util.emptyObject; /** - * Creates a new MinimalTable instance using the specified properties. + * Rule convert_charset. + * @member {Object.} convert_charset + * @memberof binlogdata.Rule + * @instance + */ + Rule.prototype.convert_charset = $util.emptyObject; + + /** + * Rule source_unique_key_columns. + * @member {string} source_unique_key_columns + * @memberof binlogdata.Rule + * @instance + */ + Rule.prototype.source_unique_key_columns = ""; + + /** + * Rule target_unique_key_columns. + * @member {string} target_unique_key_columns + * @memberof binlogdata.Rule + * @instance + */ + Rule.prototype.target_unique_key_columns = ""; + + /** + * Rule source_unique_key_target_columns. + * @member {string} source_unique_key_target_columns + * @memberof binlogdata.Rule + * @instance + */ + Rule.prototype.source_unique_key_target_columns = ""; + + /** + * Rule convert_int_to_enum. + * @member {Object.} convert_int_to_enum + * @memberof binlogdata.Rule + * @instance + */ + Rule.prototype.convert_int_to_enum = $util.emptyObject; + + /** + * Creates a new Rule instance using the specified properties. * @function create - * @memberof binlogdata.MinimalTable + * @memberof binlogdata.Rule * @static - * @param {binlogdata.IMinimalTable=} [properties] Properties to set - * @returns {binlogdata.MinimalTable} MinimalTable instance + * @param {binlogdata.IRule=} [properties] Properties to set + * @returns {binlogdata.Rule} Rule instance */ - MinimalTable.create = function create(properties) { - return new MinimalTable(properties); + Rule.create = function create(properties) { + return new Rule(properties); }; /** - * Encodes the specified MinimalTable message. Does not implicitly {@link binlogdata.MinimalTable.verify|verify} messages. + * Encodes the specified Rule message. Does not implicitly {@link binlogdata.Rule.verify|verify} messages. * @function encode - * @memberof binlogdata.MinimalTable + * @memberof binlogdata.Rule * @static - * @param {binlogdata.IMinimalTable} message MinimalTable message or plain object to encode + * @param {binlogdata.IRule} message Rule message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - MinimalTable.encode = function encode(message, writer) { + Rule.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.name != null && Object.hasOwnProperty.call(message, "name")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); - if (message.fields != null && message.fields.length) - for (let i = 0; i < message.fields.length; ++i) - $root.query.Field.encode(message.fields[i], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.p_k_columns != null && message.p_k_columns.length) { - writer.uint32(/* id 3, wireType 2 =*/26).fork(); - for (let i = 0; i < message.p_k_columns.length; ++i) - writer.int64(message.p_k_columns[i]); - writer.ldelim(); - } + if (message.match != null && Object.hasOwnProperty.call(message, "match")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.match); + if (message.filter != null && Object.hasOwnProperty.call(message, "filter")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.filter); + if (message.convert_enum_to_text != null && Object.hasOwnProperty.call(message, "convert_enum_to_text")) + for (let keys = Object.keys(message.convert_enum_to_text), i = 0; i < keys.length; ++i) + writer.uint32(/* id 3, wireType 2 =*/26).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 2 =*/18).string(message.convert_enum_to_text[keys[i]]).ldelim(); + if (message.convert_charset != null && Object.hasOwnProperty.call(message, "convert_charset")) + for (let keys = Object.keys(message.convert_charset), i = 0; i < keys.length; ++i) { + writer.uint32(/* id 4, wireType 2 =*/34).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); + $root.binlogdata.CharsetConversion.encode(message.convert_charset[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); + } + if (message.source_unique_key_columns != null && Object.hasOwnProperty.call(message, "source_unique_key_columns")) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.source_unique_key_columns); + if (message.target_unique_key_columns != null && Object.hasOwnProperty.call(message, "target_unique_key_columns")) + writer.uint32(/* id 6, wireType 2 =*/50).string(message.target_unique_key_columns); + if (message.source_unique_key_target_columns != null && Object.hasOwnProperty.call(message, "source_unique_key_target_columns")) + writer.uint32(/* id 7, wireType 2 =*/58).string(message.source_unique_key_target_columns); + if (message.convert_int_to_enum != null && Object.hasOwnProperty.call(message, "convert_int_to_enum")) + for (let keys = Object.keys(message.convert_int_to_enum), i = 0; i < keys.length; ++i) + writer.uint32(/* id 8, wireType 2 =*/66).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 0 =*/16).bool(message.convert_int_to_enum[keys[i]]).ldelim(); return writer; }; /** - * Encodes the specified MinimalTable message, length delimited. Does not implicitly {@link binlogdata.MinimalTable.verify|verify} messages. + * Encodes the specified Rule message, length delimited. Does not implicitly {@link binlogdata.Rule.verify|verify} messages. * @function encodeDelimited - * @memberof binlogdata.MinimalTable + * @memberof binlogdata.Rule * @static - * @param {binlogdata.IMinimalTable} message MinimalTable message or plain object to encode + * @param {binlogdata.IRule} message Rule message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - MinimalTable.encodeDelimited = function encodeDelimited(message, writer) { + Rule.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a MinimalTable message from the specified reader or buffer. + * Decodes a Rule message from the specified reader or buffer. * @function decode - * @memberof binlogdata.MinimalTable + * @memberof binlogdata.Rule * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {binlogdata.MinimalTable} MinimalTable + * @returns {binlogdata.Rule} Rule * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - MinimalTable.decode = function decode(reader, length) { + Rule.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.MinimalTable(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.Rule(), key, value; while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.name = reader.string(); + message.match = reader.string(); break; } case 2: { - if (!(message.fields && message.fields.length)) - message.fields = []; - message.fields.push($root.query.Field.decode(reader, reader.uint32())); + message.filter = reader.string(); break; } case 3: { - if (!(message.p_k_columns && message.p_k_columns.length)) - message.p_k_columns = []; - if ((tag & 7) === 2) { - let end2 = reader.uint32() + reader.pos; - while (reader.pos < end2) - message.p_k_columns.push(reader.int64()); - } else - message.p_k_columns.push(reader.int64()); + if (message.convert_enum_to_text === $util.emptyObject) + message.convert_enum_to_text = {}; + let end2 = reader.uint32() + reader.pos; + key = ""; + value = ""; + while (reader.pos < end2) { + let tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = reader.string(); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.convert_enum_to_text[key] = value; + break; + } + case 4: { + if (message.convert_charset === $util.emptyObject) + message.convert_charset = {}; + let end2 = reader.uint32() + reader.pos; + key = ""; + value = null; + while (reader.pos < end2) { + let tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = $root.binlogdata.CharsetConversion.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.convert_charset[key] = value; + break; + } + case 5: { + message.source_unique_key_columns = reader.string(); + break; + } + case 6: { + message.target_unique_key_columns = reader.string(); + break; + } + case 7: { + message.source_unique_key_target_columns = reader.string(); + break; + } + case 8: { + if (message.convert_int_to_enum === $util.emptyObject) + message.convert_int_to_enum = {}; + let end2 = reader.uint32() + reader.pos; + key = ""; + value = false; + while (reader.pos < end2) { + let tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = reader.bool(); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.convert_int_to_enum[key] = value; break; } default: @@ -67869,180 +68699,230 @@ export const binlogdata = $root.binlogdata = (() => { }; /** - * Decodes a MinimalTable message from the specified reader or buffer, length delimited. + * Decodes a Rule message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof binlogdata.MinimalTable + * @memberof binlogdata.Rule * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {binlogdata.MinimalTable} MinimalTable + * @returns {binlogdata.Rule} Rule * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - MinimalTable.decodeDelimited = function decodeDelimited(reader) { + Rule.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a MinimalTable message. + * Verifies a Rule message. * @function verify - * @memberof binlogdata.MinimalTable + * @memberof binlogdata.Rule * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - MinimalTable.verify = function verify(message) { + Rule.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.name != null && message.hasOwnProperty("name")) - if (!$util.isString(message.name)) - return "name: string expected"; - if (message.fields != null && message.hasOwnProperty("fields")) { - if (!Array.isArray(message.fields)) - return "fields: array expected"; - for (let i = 0; i < message.fields.length; ++i) { - let error = $root.query.Field.verify(message.fields[i]); + if (message.match != null && message.hasOwnProperty("match")) + if (!$util.isString(message.match)) + return "match: string expected"; + if (message.filter != null && message.hasOwnProperty("filter")) + if (!$util.isString(message.filter)) + return "filter: string expected"; + if (message.convert_enum_to_text != null && message.hasOwnProperty("convert_enum_to_text")) { + if (!$util.isObject(message.convert_enum_to_text)) + return "convert_enum_to_text: object expected"; + let key = Object.keys(message.convert_enum_to_text); + for (let i = 0; i < key.length; ++i) + if (!$util.isString(message.convert_enum_to_text[key[i]])) + return "convert_enum_to_text: string{k:string} expected"; + } + if (message.convert_charset != null && message.hasOwnProperty("convert_charset")) { + if (!$util.isObject(message.convert_charset)) + return "convert_charset: object expected"; + let key = Object.keys(message.convert_charset); + for (let i = 0; i < key.length; ++i) { + let error = $root.binlogdata.CharsetConversion.verify(message.convert_charset[key[i]]); if (error) - return "fields." + error; + return "convert_charset." + error; } } - if (message.p_k_columns != null && message.hasOwnProperty("p_k_columns")) { - if (!Array.isArray(message.p_k_columns)) - return "p_k_columns: array expected"; - for (let i = 0; i < message.p_k_columns.length; ++i) - if (!$util.isInteger(message.p_k_columns[i]) && !(message.p_k_columns[i] && $util.isInteger(message.p_k_columns[i].low) && $util.isInteger(message.p_k_columns[i].high))) - return "p_k_columns: integer|Long[] expected"; - } - return null; - }; - - /** - * Creates a MinimalTable message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof binlogdata.MinimalTable - * @static - * @param {Object.} object Plain object - * @returns {binlogdata.MinimalTable} MinimalTable - */ - MinimalTable.fromObject = function fromObject(object) { - if (object instanceof $root.binlogdata.MinimalTable) - return object; - let message = new $root.binlogdata.MinimalTable(); - if (object.name != null) - message.name = String(object.name); - if (object.fields) { - if (!Array.isArray(object.fields)) - throw TypeError(".binlogdata.MinimalTable.fields: array expected"); - message.fields = []; - for (let i = 0; i < object.fields.length; ++i) { - if (typeof object.fields[i] !== "object") - throw TypeError(".binlogdata.MinimalTable.fields: object expected"); - message.fields[i] = $root.query.Field.fromObject(object.fields[i]); + if (message.source_unique_key_columns != null && message.hasOwnProperty("source_unique_key_columns")) + if (!$util.isString(message.source_unique_key_columns)) + return "source_unique_key_columns: string expected"; + if (message.target_unique_key_columns != null && message.hasOwnProperty("target_unique_key_columns")) + if (!$util.isString(message.target_unique_key_columns)) + return "target_unique_key_columns: string expected"; + if (message.source_unique_key_target_columns != null && message.hasOwnProperty("source_unique_key_target_columns")) + if (!$util.isString(message.source_unique_key_target_columns)) + return "source_unique_key_target_columns: string expected"; + if (message.convert_int_to_enum != null && message.hasOwnProperty("convert_int_to_enum")) { + if (!$util.isObject(message.convert_int_to_enum)) + return "convert_int_to_enum: object expected"; + let key = Object.keys(message.convert_int_to_enum); + for (let i = 0; i < key.length; ++i) + if (typeof message.convert_int_to_enum[key[i]] !== "boolean") + return "convert_int_to_enum: boolean{k:string} expected"; + } + return null; + }; + + /** + * Creates a Rule message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof binlogdata.Rule + * @static + * @param {Object.} object Plain object + * @returns {binlogdata.Rule} Rule + */ + Rule.fromObject = function fromObject(object) { + if (object instanceof $root.binlogdata.Rule) + return object; + let message = new $root.binlogdata.Rule(); + if (object.match != null) + message.match = String(object.match); + if (object.filter != null) + message.filter = String(object.filter); + if (object.convert_enum_to_text) { + if (typeof object.convert_enum_to_text !== "object") + throw TypeError(".binlogdata.Rule.convert_enum_to_text: object expected"); + message.convert_enum_to_text = {}; + for (let keys = Object.keys(object.convert_enum_to_text), i = 0; i < keys.length; ++i) + message.convert_enum_to_text[keys[i]] = String(object.convert_enum_to_text[keys[i]]); + } + if (object.convert_charset) { + if (typeof object.convert_charset !== "object") + throw TypeError(".binlogdata.Rule.convert_charset: object expected"); + message.convert_charset = {}; + for (let keys = Object.keys(object.convert_charset), i = 0; i < keys.length; ++i) { + if (typeof object.convert_charset[keys[i]] !== "object") + throw TypeError(".binlogdata.Rule.convert_charset: object expected"); + message.convert_charset[keys[i]] = $root.binlogdata.CharsetConversion.fromObject(object.convert_charset[keys[i]]); } } - if (object.p_k_columns) { - if (!Array.isArray(object.p_k_columns)) - throw TypeError(".binlogdata.MinimalTable.p_k_columns: array expected"); - message.p_k_columns = []; - for (let i = 0; i < object.p_k_columns.length; ++i) - if ($util.Long) - (message.p_k_columns[i] = $util.Long.fromValue(object.p_k_columns[i])).unsigned = false; - else if (typeof object.p_k_columns[i] === "string") - message.p_k_columns[i] = parseInt(object.p_k_columns[i], 10); - else if (typeof object.p_k_columns[i] === "number") - message.p_k_columns[i] = object.p_k_columns[i]; - else if (typeof object.p_k_columns[i] === "object") - message.p_k_columns[i] = new $util.LongBits(object.p_k_columns[i].low >>> 0, object.p_k_columns[i].high >>> 0).toNumber(); + if (object.source_unique_key_columns != null) + message.source_unique_key_columns = String(object.source_unique_key_columns); + if (object.target_unique_key_columns != null) + message.target_unique_key_columns = String(object.target_unique_key_columns); + if (object.source_unique_key_target_columns != null) + message.source_unique_key_target_columns = String(object.source_unique_key_target_columns); + if (object.convert_int_to_enum) { + if (typeof object.convert_int_to_enum !== "object") + throw TypeError(".binlogdata.Rule.convert_int_to_enum: object expected"); + message.convert_int_to_enum = {}; + for (let keys = Object.keys(object.convert_int_to_enum), i = 0; i < keys.length; ++i) + message.convert_int_to_enum[keys[i]] = Boolean(object.convert_int_to_enum[keys[i]]); } return message; }; /** - * Creates a plain object from a MinimalTable message. Also converts values to other types if specified. + * Creates a plain object from a Rule message. Also converts values to other types if specified. * @function toObject - * @memberof binlogdata.MinimalTable + * @memberof binlogdata.Rule * @static - * @param {binlogdata.MinimalTable} message MinimalTable + * @param {binlogdata.Rule} message Rule * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - MinimalTable.toObject = function toObject(message, options) { + Rule.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) { - object.fields = []; - object.p_k_columns = []; + if (options.objects || options.defaults) { + object.convert_enum_to_text = {}; + object.convert_charset = {}; + object.convert_int_to_enum = {}; } - if (options.defaults) - object.name = ""; - if (message.name != null && message.hasOwnProperty("name")) - object.name = message.name; - if (message.fields && message.fields.length) { - object.fields = []; - for (let j = 0; j < message.fields.length; ++j) - object.fields[j] = $root.query.Field.toObject(message.fields[j], options); + if (options.defaults) { + object.match = ""; + object.filter = ""; + object.source_unique_key_columns = ""; + object.target_unique_key_columns = ""; + object.source_unique_key_target_columns = ""; } - if (message.p_k_columns && message.p_k_columns.length) { - object.p_k_columns = []; - for (let j = 0; j < message.p_k_columns.length; ++j) - if (typeof message.p_k_columns[j] === "number") - object.p_k_columns[j] = options.longs === String ? String(message.p_k_columns[j]) : message.p_k_columns[j]; - else - object.p_k_columns[j] = options.longs === String ? $util.Long.prototype.toString.call(message.p_k_columns[j]) : options.longs === Number ? new $util.LongBits(message.p_k_columns[j].low >>> 0, message.p_k_columns[j].high >>> 0).toNumber() : message.p_k_columns[j]; + if (message.match != null && message.hasOwnProperty("match")) + object.match = message.match; + if (message.filter != null && message.hasOwnProperty("filter")) + object.filter = message.filter; + let keys2; + if (message.convert_enum_to_text && (keys2 = Object.keys(message.convert_enum_to_text)).length) { + object.convert_enum_to_text = {}; + for (let j = 0; j < keys2.length; ++j) + object.convert_enum_to_text[keys2[j]] = message.convert_enum_to_text[keys2[j]]; + } + if (message.convert_charset && (keys2 = Object.keys(message.convert_charset)).length) { + object.convert_charset = {}; + for (let j = 0; j < keys2.length; ++j) + object.convert_charset[keys2[j]] = $root.binlogdata.CharsetConversion.toObject(message.convert_charset[keys2[j]], options); + } + if (message.source_unique_key_columns != null && message.hasOwnProperty("source_unique_key_columns")) + object.source_unique_key_columns = message.source_unique_key_columns; + if (message.target_unique_key_columns != null && message.hasOwnProperty("target_unique_key_columns")) + object.target_unique_key_columns = message.target_unique_key_columns; + if (message.source_unique_key_target_columns != null && message.hasOwnProperty("source_unique_key_target_columns")) + object.source_unique_key_target_columns = message.source_unique_key_target_columns; + if (message.convert_int_to_enum && (keys2 = Object.keys(message.convert_int_to_enum)).length) { + object.convert_int_to_enum = {}; + for (let j = 0; j < keys2.length; ++j) + object.convert_int_to_enum[keys2[j]] = message.convert_int_to_enum[keys2[j]]; } return object; }; /** - * Converts this MinimalTable to JSON. + * Converts this Rule to JSON. * @function toJSON - * @memberof binlogdata.MinimalTable + * @memberof binlogdata.Rule * @instance * @returns {Object.} JSON object */ - MinimalTable.prototype.toJSON = function toJSON() { + Rule.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for MinimalTable + * Gets the default type url for Rule * @function getTypeUrl - * @memberof binlogdata.MinimalTable + * @memberof binlogdata.Rule * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - MinimalTable.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + Rule.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/binlogdata.MinimalTable"; + return typeUrlPrefix + "/binlogdata.Rule"; }; - return MinimalTable; + return Rule; })(); - binlogdata.MinimalSchema = (function() { + binlogdata.Filter = (function() { /** - * Properties of a MinimalSchema. + * Properties of a Filter. * @memberof binlogdata - * @interface IMinimalSchema - * @property {Array.|null} [tables] MinimalSchema tables + * @interface IFilter + * @property {Array.|null} [rules] Filter rules + * @property {binlogdata.Filter.FieldEventMode|null} [field_event_mode] Filter field_event_mode + * @property {number|Long|null} [workflow_type] Filter workflow_type + * @property {string|null} [workflow_name] Filter workflow_name */ /** - * Constructs a new MinimalSchema. + * Constructs a new Filter. * @memberof binlogdata - * @classdesc Represents a MinimalSchema. - * @implements IMinimalSchema + * @classdesc Represents a Filter. + * @implements IFilter * @constructor - * @param {binlogdata.IMinimalSchema=} [properties] Properties to set + * @param {binlogdata.IFilter=} [properties] Properties to set */ - function MinimalSchema(properties) { - this.tables = []; + function Filter(properties) { + this.rules = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -68050,78 +68930,120 @@ export const binlogdata = $root.binlogdata = (() => { } /** - * MinimalSchema tables. - * @member {Array.} tables - * @memberof binlogdata.MinimalSchema + * Filter rules. + * @member {Array.} rules + * @memberof binlogdata.Filter * @instance */ - MinimalSchema.prototype.tables = $util.emptyArray; + Filter.prototype.rules = $util.emptyArray; /** - * Creates a new MinimalSchema instance using the specified properties. + * Filter field_event_mode. + * @member {binlogdata.Filter.FieldEventMode} field_event_mode + * @memberof binlogdata.Filter + * @instance + */ + Filter.prototype.field_event_mode = 0; + + /** + * Filter workflow_type. + * @member {number|Long} workflow_type + * @memberof binlogdata.Filter + * @instance + */ + Filter.prototype.workflow_type = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * Filter workflow_name. + * @member {string} workflow_name + * @memberof binlogdata.Filter + * @instance + */ + Filter.prototype.workflow_name = ""; + + /** + * Creates a new Filter instance using the specified properties. * @function create - * @memberof binlogdata.MinimalSchema + * @memberof binlogdata.Filter * @static - * @param {binlogdata.IMinimalSchema=} [properties] Properties to set - * @returns {binlogdata.MinimalSchema} MinimalSchema instance + * @param {binlogdata.IFilter=} [properties] Properties to set + * @returns {binlogdata.Filter} Filter instance */ - MinimalSchema.create = function create(properties) { - return new MinimalSchema(properties); + Filter.create = function create(properties) { + return new Filter(properties); }; /** - * Encodes the specified MinimalSchema message. Does not implicitly {@link binlogdata.MinimalSchema.verify|verify} messages. + * Encodes the specified Filter message. Does not implicitly {@link binlogdata.Filter.verify|verify} messages. * @function encode - * @memberof binlogdata.MinimalSchema + * @memberof binlogdata.Filter * @static - * @param {binlogdata.IMinimalSchema} message MinimalSchema message or plain object to encode + * @param {binlogdata.IFilter} message Filter message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - MinimalSchema.encode = function encode(message, writer) { + Filter.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.tables != null && message.tables.length) - for (let i = 0; i < message.tables.length; ++i) - $root.binlogdata.MinimalTable.encode(message.tables[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.rules != null && message.rules.length) + for (let i = 0; i < message.rules.length; ++i) + $root.binlogdata.Rule.encode(message.rules[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.field_event_mode != null && Object.hasOwnProperty.call(message, "field_event_mode")) + writer.uint32(/* id 2, wireType 0 =*/16).int32(message.field_event_mode); + if (message.workflow_type != null && Object.hasOwnProperty.call(message, "workflow_type")) + writer.uint32(/* id 3, wireType 0 =*/24).int64(message.workflow_type); + if (message.workflow_name != null && Object.hasOwnProperty.call(message, "workflow_name")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.workflow_name); return writer; }; /** - * Encodes the specified MinimalSchema message, length delimited. Does not implicitly {@link binlogdata.MinimalSchema.verify|verify} messages. + * Encodes the specified Filter message, length delimited. Does not implicitly {@link binlogdata.Filter.verify|verify} messages. * @function encodeDelimited - * @memberof binlogdata.MinimalSchema + * @memberof binlogdata.Filter * @static - * @param {binlogdata.IMinimalSchema} message MinimalSchema message or plain object to encode + * @param {binlogdata.IFilter} message Filter message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - MinimalSchema.encodeDelimited = function encodeDelimited(message, writer) { + Filter.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a MinimalSchema message from the specified reader or buffer. + * Decodes a Filter message from the specified reader or buffer. * @function decode - * @memberof binlogdata.MinimalSchema + * @memberof binlogdata.Filter * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {binlogdata.MinimalSchema} MinimalSchema + * @returns {binlogdata.Filter} Filter * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - MinimalSchema.decode = function decode(reader, length) { + Filter.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.MinimalSchema(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.Filter(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (!(message.tables && message.tables.length)) - message.tables = []; - message.tables.push($root.binlogdata.MinimalTable.decode(reader, reader.uint32())); + if (!(message.rules && message.rules.length)) + message.rules = []; + message.rules.push($root.binlogdata.Rule.decode(reader, reader.uint32())); + break; + } + case 2: { + message.field_event_mode = reader.int32(); + break; + } + case 3: { + message.workflow_type = reader.int64(); + break; + } + case 4: { + message.workflow_name = reader.string(); break; } default: @@ -68133,294 +69055,537 @@ export const binlogdata = $root.binlogdata = (() => { }; /** - * Decodes a MinimalSchema message from the specified reader or buffer, length delimited. + * Decodes a Filter message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof binlogdata.MinimalSchema + * @memberof binlogdata.Filter * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {binlogdata.MinimalSchema} MinimalSchema + * @returns {binlogdata.Filter} Filter * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - MinimalSchema.decodeDelimited = function decodeDelimited(reader) { + Filter.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a MinimalSchema message. + * Verifies a Filter message. * @function verify - * @memberof binlogdata.MinimalSchema + * @memberof binlogdata.Filter * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - MinimalSchema.verify = function verify(message) { + Filter.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.tables != null && message.hasOwnProperty("tables")) { - if (!Array.isArray(message.tables)) - return "tables: array expected"; - for (let i = 0; i < message.tables.length; ++i) { - let error = $root.binlogdata.MinimalTable.verify(message.tables[i]); + if (message.rules != null && message.hasOwnProperty("rules")) { + if (!Array.isArray(message.rules)) + return "rules: array expected"; + for (let i = 0; i < message.rules.length; ++i) { + let error = $root.binlogdata.Rule.verify(message.rules[i]); if (error) - return "tables." + error; + return "rules." + error; } } + if (message.field_event_mode != null && message.hasOwnProperty("field_event_mode")) + switch (message.field_event_mode) { + default: + return "field_event_mode: enum value expected"; + case 0: + case 1: + break; + } + if (message.workflow_type != null && message.hasOwnProperty("workflow_type")) + if (!$util.isInteger(message.workflow_type) && !(message.workflow_type && $util.isInteger(message.workflow_type.low) && $util.isInteger(message.workflow_type.high))) + return "workflow_type: integer|Long expected"; + if (message.workflow_name != null && message.hasOwnProperty("workflow_name")) + if (!$util.isString(message.workflow_name)) + return "workflow_name: string expected"; return null; }; /** - * Creates a MinimalSchema message from a plain object. Also converts values to their respective internal types. + * Creates a Filter message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof binlogdata.MinimalSchema + * @memberof binlogdata.Filter * @static * @param {Object.} object Plain object - * @returns {binlogdata.MinimalSchema} MinimalSchema + * @returns {binlogdata.Filter} Filter */ - MinimalSchema.fromObject = function fromObject(object) { - if (object instanceof $root.binlogdata.MinimalSchema) + Filter.fromObject = function fromObject(object) { + if (object instanceof $root.binlogdata.Filter) return object; - let message = new $root.binlogdata.MinimalSchema(); - if (object.tables) { - if (!Array.isArray(object.tables)) - throw TypeError(".binlogdata.MinimalSchema.tables: array expected"); - message.tables = []; - for (let i = 0; i < object.tables.length; ++i) { - if (typeof object.tables[i] !== "object") - throw TypeError(".binlogdata.MinimalSchema.tables: object expected"); - message.tables[i] = $root.binlogdata.MinimalTable.fromObject(object.tables[i]); + let message = new $root.binlogdata.Filter(); + if (object.rules) { + if (!Array.isArray(object.rules)) + throw TypeError(".binlogdata.Filter.rules: array expected"); + message.rules = []; + for (let i = 0; i < object.rules.length; ++i) { + if (typeof object.rules[i] !== "object") + throw TypeError(".binlogdata.Filter.rules: object expected"); + message.rules[i] = $root.binlogdata.Rule.fromObject(object.rules[i]); + } + } + switch (object.field_event_mode) { + default: + if (typeof object.field_event_mode === "number") { + message.field_event_mode = object.field_event_mode; + break; } + break; + case "ERR_ON_MISMATCH": + case 0: + message.field_event_mode = 0; + break; + case "BEST_EFFORT": + case 1: + message.field_event_mode = 1; + break; } + if (object.workflow_type != null) + if ($util.Long) + (message.workflow_type = $util.Long.fromValue(object.workflow_type)).unsigned = false; + else if (typeof object.workflow_type === "string") + message.workflow_type = parseInt(object.workflow_type, 10); + else if (typeof object.workflow_type === "number") + message.workflow_type = object.workflow_type; + else if (typeof object.workflow_type === "object") + message.workflow_type = new $util.LongBits(object.workflow_type.low >>> 0, object.workflow_type.high >>> 0).toNumber(); + if (object.workflow_name != null) + message.workflow_name = String(object.workflow_name); return message; }; /** - * Creates a plain object from a MinimalSchema message. Also converts values to other types if specified. + * Creates a plain object from a Filter message. Also converts values to other types if specified. * @function toObject - * @memberof binlogdata.MinimalSchema + * @memberof binlogdata.Filter * @static - * @param {binlogdata.MinimalSchema} message MinimalSchema + * @param {binlogdata.Filter} message Filter * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - MinimalSchema.toObject = function toObject(message, options) { + Filter.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.arrays || options.defaults) - object.tables = []; - if (message.tables && message.tables.length) { - object.tables = []; - for (let j = 0; j < message.tables.length; ++j) - object.tables[j] = $root.binlogdata.MinimalTable.toObject(message.tables[j], options); + object.rules = []; + if (options.defaults) { + object.field_event_mode = options.enums === String ? "ERR_ON_MISMATCH" : 0; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.workflow_type = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.workflow_type = options.longs === String ? "0" : 0; + object.workflow_name = ""; + } + if (message.rules && message.rules.length) { + object.rules = []; + for (let j = 0; j < message.rules.length; ++j) + object.rules[j] = $root.binlogdata.Rule.toObject(message.rules[j], options); } + if (message.field_event_mode != null && message.hasOwnProperty("field_event_mode")) + object.field_event_mode = options.enums === String ? $root.binlogdata.Filter.FieldEventMode[message.field_event_mode] === undefined ? message.field_event_mode : $root.binlogdata.Filter.FieldEventMode[message.field_event_mode] : message.field_event_mode; + if (message.workflow_type != null && message.hasOwnProperty("workflow_type")) + if (typeof message.workflow_type === "number") + object.workflow_type = options.longs === String ? String(message.workflow_type) : message.workflow_type; + else + object.workflow_type = options.longs === String ? $util.Long.prototype.toString.call(message.workflow_type) : options.longs === Number ? new $util.LongBits(message.workflow_type.low >>> 0, message.workflow_type.high >>> 0).toNumber() : message.workflow_type; + if (message.workflow_name != null && message.hasOwnProperty("workflow_name")) + object.workflow_name = message.workflow_name; return object; }; /** - * Converts this MinimalSchema to JSON. + * Converts this Filter to JSON. * @function toJSON - * @memberof binlogdata.MinimalSchema + * @memberof binlogdata.Filter * @instance * @returns {Object.} JSON object */ - MinimalSchema.prototype.toJSON = function toJSON() { + Filter.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for MinimalSchema + * Gets the default type url for Filter * @function getTypeUrl - * @memberof binlogdata.MinimalSchema + * @memberof binlogdata.Filter * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - MinimalSchema.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + Filter.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/binlogdata.MinimalSchema"; + return typeUrlPrefix + "/binlogdata.Filter"; }; - return MinimalSchema; - })(); - - binlogdata.VStreamRequest = (function() { - - /** - * Properties of a VStreamRequest. - * @memberof binlogdata - * @interface IVStreamRequest - * @property {vtrpc.ICallerID|null} [effective_caller_id] VStreamRequest effective_caller_id - * @property {query.IVTGateCallerID|null} [immediate_caller_id] VStreamRequest immediate_caller_id - * @property {query.ITarget|null} [target] VStreamRequest target - * @property {string|null} [position] VStreamRequest position - * @property {binlogdata.IFilter|null} [filter] VStreamRequest filter - * @property {Array.|null} [table_last_p_ks] VStreamRequest table_last_p_ks - */ - /** - * Constructs a new VStreamRequest. - * @memberof binlogdata - * @classdesc Represents a VStreamRequest. - * @implements IVStreamRequest - * @constructor - * @param {binlogdata.IVStreamRequest=} [properties] Properties to set + * FieldEventMode enum. + * @name binlogdata.Filter.FieldEventMode + * @enum {number} + * @property {number} ERR_ON_MISMATCH=0 ERR_ON_MISMATCH value + * @property {number} BEST_EFFORT=1 BEST_EFFORT value */ - function VStreamRequest(properties) { - this.table_last_p_ks = []; - if (properties) - for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; - } + Filter.FieldEventMode = (function() { + const valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "ERR_ON_MISMATCH"] = 0; + values[valuesById[1] = "BEST_EFFORT"] = 1; + return values; + })(); - /** - * VStreamRequest effective_caller_id. - * @member {vtrpc.ICallerID|null|undefined} effective_caller_id - * @memberof binlogdata.VStreamRequest - * @instance - */ - VStreamRequest.prototype.effective_caller_id = null; + return Filter; + })(); - /** - * VStreamRequest immediate_caller_id. - * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id - * @memberof binlogdata.VStreamRequest - * @instance - */ - VStreamRequest.prototype.immediate_caller_id = null; + /** + * OnDDLAction enum. + * @name binlogdata.OnDDLAction + * @enum {number} + * @property {number} IGNORE=0 IGNORE value + * @property {number} STOP=1 STOP value + * @property {number} EXEC=2 EXEC value + * @property {number} EXEC_IGNORE=3 EXEC_IGNORE value + */ + binlogdata.OnDDLAction = (function() { + const valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "IGNORE"] = 0; + values[valuesById[1] = "STOP"] = 1; + values[valuesById[2] = "EXEC"] = 2; + values[valuesById[3] = "EXEC_IGNORE"] = 3; + return values; + })(); - /** - * VStreamRequest target. - * @member {query.ITarget|null|undefined} target - * @memberof binlogdata.VStreamRequest - * @instance - */ - VStreamRequest.prototype.target = null; + /** + * VReplicationWorkflowType enum. + * @name binlogdata.VReplicationWorkflowType + * @enum {number} + * @property {number} Materialize=0 Materialize value + * @property {number} MoveTables=1 MoveTables value + * @property {number} CreateLookupIndex=2 CreateLookupIndex value + * @property {number} Migrate=3 Migrate value + * @property {number} Reshard=4 Reshard value + * @property {number} OnlineDDL=5 OnlineDDL value + */ + binlogdata.VReplicationWorkflowType = (function() { + const valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "Materialize"] = 0; + values[valuesById[1] = "MoveTables"] = 1; + values[valuesById[2] = "CreateLookupIndex"] = 2; + values[valuesById[3] = "Migrate"] = 3; + values[valuesById[4] = "Reshard"] = 4; + values[valuesById[5] = "OnlineDDL"] = 5; + return values; + })(); + + /** + * VReplicationWorkflowSubType enum. + * @name binlogdata.VReplicationWorkflowSubType + * @enum {number} + * @property {number} None=0 None value + * @property {number} Partial=1 Partial value + * @property {number} AtomicCopy=2 AtomicCopy value + */ + binlogdata.VReplicationWorkflowSubType = (function() { + const valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "None"] = 0; + values[valuesById[1] = "Partial"] = 1; + values[valuesById[2] = "AtomicCopy"] = 2; + return values; + })(); + + /** + * VReplicationWorkflowState enum. + * @name binlogdata.VReplicationWorkflowState + * @enum {number} + * @property {number} Unknown=0 Unknown value + * @property {number} Init=1 Init value + * @property {number} Stopped=2 Stopped value + * @property {number} Copying=3 Copying value + * @property {number} Running=4 Running value + * @property {number} Error=5 Error value + * @property {number} Lagging=6 Lagging value + */ + binlogdata.VReplicationWorkflowState = (function() { + const valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "Unknown"] = 0; + values[valuesById[1] = "Init"] = 1; + values[valuesById[2] = "Stopped"] = 2; + values[valuesById[3] = "Copying"] = 3; + values[valuesById[4] = "Running"] = 4; + values[valuesById[5] = "Error"] = 5; + values[valuesById[6] = "Lagging"] = 6; + return values; + })(); + + binlogdata.BinlogSource = (function() { /** - * VStreamRequest position. - * @member {string} position - * @memberof binlogdata.VStreamRequest + * Properties of a BinlogSource. + * @memberof binlogdata + * @interface IBinlogSource + * @property {string|null} [keyspace] BinlogSource keyspace + * @property {string|null} [shard] BinlogSource shard + * @property {topodata.TabletType|null} [tablet_type] BinlogSource tablet_type + * @property {topodata.IKeyRange|null} [key_range] BinlogSource key_range + * @property {Array.|null} [tables] BinlogSource tables + * @property {binlogdata.IFilter|null} [filter] BinlogSource filter + * @property {binlogdata.OnDDLAction|null} [on_ddl] BinlogSource on_ddl + * @property {string|null} [external_mysql] BinlogSource external_mysql + * @property {boolean|null} [stop_after_copy] BinlogSource stop_after_copy + * @property {string|null} [external_cluster] BinlogSource external_cluster + * @property {string|null} [source_time_zone] BinlogSource source_time_zone + * @property {string|null} [target_time_zone] BinlogSource target_time_zone + */ + + /** + * Constructs a new BinlogSource. + * @memberof binlogdata + * @classdesc Represents a BinlogSource. + * @implements IBinlogSource + * @constructor + * @param {binlogdata.IBinlogSource=} [properties] Properties to set + */ + function BinlogSource(properties) { + this.tables = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * BinlogSource keyspace. + * @member {string} keyspace + * @memberof binlogdata.BinlogSource * @instance */ - VStreamRequest.prototype.position = ""; + BinlogSource.prototype.keyspace = ""; /** - * VStreamRequest filter. + * BinlogSource shard. + * @member {string} shard + * @memberof binlogdata.BinlogSource + * @instance + */ + BinlogSource.prototype.shard = ""; + + /** + * BinlogSource tablet_type. + * @member {topodata.TabletType} tablet_type + * @memberof binlogdata.BinlogSource + * @instance + */ + BinlogSource.prototype.tablet_type = 0; + + /** + * BinlogSource key_range. + * @member {topodata.IKeyRange|null|undefined} key_range + * @memberof binlogdata.BinlogSource + * @instance + */ + BinlogSource.prototype.key_range = null; + + /** + * BinlogSource tables. + * @member {Array.} tables + * @memberof binlogdata.BinlogSource + * @instance + */ + BinlogSource.prototype.tables = $util.emptyArray; + + /** + * BinlogSource filter. * @member {binlogdata.IFilter|null|undefined} filter - * @memberof binlogdata.VStreamRequest + * @memberof binlogdata.BinlogSource * @instance */ - VStreamRequest.prototype.filter = null; + BinlogSource.prototype.filter = null; /** - * VStreamRequest table_last_p_ks. - * @member {Array.} table_last_p_ks - * @memberof binlogdata.VStreamRequest + * BinlogSource on_ddl. + * @member {binlogdata.OnDDLAction} on_ddl + * @memberof binlogdata.BinlogSource * @instance */ - VStreamRequest.prototype.table_last_p_ks = $util.emptyArray; + BinlogSource.prototype.on_ddl = 0; /** - * Creates a new VStreamRequest instance using the specified properties. + * BinlogSource external_mysql. + * @member {string} external_mysql + * @memberof binlogdata.BinlogSource + * @instance + */ + BinlogSource.prototype.external_mysql = ""; + + /** + * BinlogSource stop_after_copy. + * @member {boolean} stop_after_copy + * @memberof binlogdata.BinlogSource + * @instance + */ + BinlogSource.prototype.stop_after_copy = false; + + /** + * BinlogSource external_cluster. + * @member {string} external_cluster + * @memberof binlogdata.BinlogSource + * @instance + */ + BinlogSource.prototype.external_cluster = ""; + + /** + * BinlogSource source_time_zone. + * @member {string} source_time_zone + * @memberof binlogdata.BinlogSource + * @instance + */ + BinlogSource.prototype.source_time_zone = ""; + + /** + * BinlogSource target_time_zone. + * @member {string} target_time_zone + * @memberof binlogdata.BinlogSource + * @instance + */ + BinlogSource.prototype.target_time_zone = ""; + + /** + * Creates a new BinlogSource instance using the specified properties. * @function create - * @memberof binlogdata.VStreamRequest + * @memberof binlogdata.BinlogSource * @static - * @param {binlogdata.IVStreamRequest=} [properties] Properties to set - * @returns {binlogdata.VStreamRequest} VStreamRequest instance + * @param {binlogdata.IBinlogSource=} [properties] Properties to set + * @returns {binlogdata.BinlogSource} BinlogSource instance */ - VStreamRequest.create = function create(properties) { - return new VStreamRequest(properties); + BinlogSource.create = function create(properties) { + return new BinlogSource(properties); }; /** - * Encodes the specified VStreamRequest message. Does not implicitly {@link binlogdata.VStreamRequest.verify|verify} messages. + * Encodes the specified BinlogSource message. Does not implicitly {@link binlogdata.BinlogSource.verify|verify} messages. * @function encode - * @memberof binlogdata.VStreamRequest + * @memberof binlogdata.BinlogSource * @static - * @param {binlogdata.IVStreamRequest} message VStreamRequest message or plain object to encode + * @param {binlogdata.IBinlogSource} message BinlogSource message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - VStreamRequest.encode = function encode(message, writer) { + BinlogSource.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) - $root.vtrpc.CallerID.encode(message.effective_caller_id, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.immediate_caller_id != null && Object.hasOwnProperty.call(message, "immediate_caller_id")) - $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.target != null && Object.hasOwnProperty.call(message, "target")) - $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.position != null && Object.hasOwnProperty.call(message, "position")) - writer.uint32(/* id 4, wireType 2 =*/34).string(message.position); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); + if (message.tablet_type != null && Object.hasOwnProperty.call(message, "tablet_type")) + writer.uint32(/* id 3, wireType 0 =*/24).int32(message.tablet_type); + if (message.key_range != null && Object.hasOwnProperty.call(message, "key_range")) + $root.topodata.KeyRange.encode(message.key_range, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.tables != null && message.tables.length) + for (let i = 0; i < message.tables.length; ++i) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.tables[i]); if (message.filter != null && Object.hasOwnProperty.call(message, "filter")) - $root.binlogdata.Filter.encode(message.filter, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); - if (message.table_last_p_ks != null && message.table_last_p_ks.length) - for (let i = 0; i < message.table_last_p_ks.length; ++i) - $root.binlogdata.TableLastPK.encode(message.table_last_p_ks[i], writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); + $root.binlogdata.Filter.encode(message.filter, writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); + if (message.on_ddl != null && Object.hasOwnProperty.call(message, "on_ddl")) + writer.uint32(/* id 7, wireType 0 =*/56).int32(message.on_ddl); + if (message.external_mysql != null && Object.hasOwnProperty.call(message, "external_mysql")) + writer.uint32(/* id 8, wireType 2 =*/66).string(message.external_mysql); + if (message.stop_after_copy != null && Object.hasOwnProperty.call(message, "stop_after_copy")) + writer.uint32(/* id 9, wireType 0 =*/72).bool(message.stop_after_copy); + if (message.external_cluster != null && Object.hasOwnProperty.call(message, "external_cluster")) + writer.uint32(/* id 10, wireType 2 =*/82).string(message.external_cluster); + if (message.source_time_zone != null && Object.hasOwnProperty.call(message, "source_time_zone")) + writer.uint32(/* id 11, wireType 2 =*/90).string(message.source_time_zone); + if (message.target_time_zone != null && Object.hasOwnProperty.call(message, "target_time_zone")) + writer.uint32(/* id 12, wireType 2 =*/98).string(message.target_time_zone); return writer; }; /** - * Encodes the specified VStreamRequest message, length delimited. Does not implicitly {@link binlogdata.VStreamRequest.verify|verify} messages. + * Encodes the specified BinlogSource message, length delimited. Does not implicitly {@link binlogdata.BinlogSource.verify|verify} messages. * @function encodeDelimited - * @memberof binlogdata.VStreamRequest + * @memberof binlogdata.BinlogSource * @static - * @param {binlogdata.IVStreamRequest} message VStreamRequest message or plain object to encode + * @param {binlogdata.IBinlogSource} message BinlogSource message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - VStreamRequest.encodeDelimited = function encodeDelimited(message, writer) { + BinlogSource.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a VStreamRequest message from the specified reader or buffer. + * Decodes a BinlogSource message from the specified reader or buffer. * @function decode - * @memberof binlogdata.VStreamRequest + * @memberof binlogdata.BinlogSource * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {binlogdata.VStreamRequest} VStreamRequest + * @returns {binlogdata.BinlogSource} BinlogSource * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - VStreamRequest.decode = function decode(reader, length) { + BinlogSource.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.VStreamRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.BinlogSource(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.effective_caller_id = $root.vtrpc.CallerID.decode(reader, reader.uint32()); + message.keyspace = reader.string(); break; } case 2: { - message.immediate_caller_id = $root.query.VTGateCallerID.decode(reader, reader.uint32()); + message.shard = reader.string(); break; } case 3: { - message.target = $root.query.Target.decode(reader, reader.uint32()); + message.tablet_type = reader.int32(); break; } case 4: { - message.position = reader.string(); + message.key_range = $root.topodata.KeyRange.decode(reader, reader.uint32()); break; } case 5: { - message.filter = $root.binlogdata.Filter.decode(reader, reader.uint32()); + if (!(message.tables && message.tables.length)) + message.tables = []; + message.tables.push(reader.string()); break; } case 6: { - if (!(message.table_last_p_ks && message.table_last_p_ks.length)) - message.table_last_p_ks = []; - message.table_last_p_ks.push($root.binlogdata.TableLastPK.decode(reader, reader.uint32())); + message.filter = $root.binlogdata.Filter.decode(reader, reader.uint32()); + break; + } + case 7: { + message.on_ddl = reader.int32(); + break; + } + case 8: { + message.external_mysql = reader.string(); + break; + } + case 9: { + message.stop_after_copy = reader.bool(); + break; + } + case 10: { + message.external_cluster = reader.string(); + break; + } + case 11: { + message.source_time_zone = reader.string(); + break; + } + case 12: { + message.target_time_zone = reader.string(); break; } default: @@ -68432,202 +69597,381 @@ export const binlogdata = $root.binlogdata = (() => { }; /** - * Decodes a VStreamRequest message from the specified reader or buffer, length delimited. + * Decodes a BinlogSource message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof binlogdata.VStreamRequest + * @memberof binlogdata.BinlogSource * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {binlogdata.VStreamRequest} VStreamRequest + * @returns {binlogdata.BinlogSource} BinlogSource * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - VStreamRequest.decodeDelimited = function decodeDelimited(reader) { + BinlogSource.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a VStreamRequest message. + * Verifies a BinlogSource message. * @function verify - * @memberof binlogdata.VStreamRequest + * @memberof binlogdata.BinlogSource * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - VStreamRequest.verify = function verify(message) { + BinlogSource.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { - let error = $root.vtrpc.CallerID.verify(message.effective_caller_id); - if (error) - return "effective_caller_id." + error; - } - if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) { - let error = $root.query.VTGateCallerID.verify(message.immediate_caller_id); + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.shard != null && message.hasOwnProperty("shard")) + if (!$util.isString(message.shard)) + return "shard: string expected"; + if (message.tablet_type != null && message.hasOwnProperty("tablet_type")) + switch (message.tablet_type) { + default: + return "tablet_type: enum value expected"; + case 0: + case 1: + case 1: + case 2: + case 3: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + break; + } + if (message.key_range != null && message.hasOwnProperty("key_range")) { + let error = $root.topodata.KeyRange.verify(message.key_range); if (error) - return "immediate_caller_id." + error; + return "key_range." + error; } - if (message.target != null && message.hasOwnProperty("target")) { - let error = $root.query.Target.verify(message.target); - if (error) - return "target." + error; + if (message.tables != null && message.hasOwnProperty("tables")) { + if (!Array.isArray(message.tables)) + return "tables: array expected"; + for (let i = 0; i < message.tables.length; ++i) + if (!$util.isString(message.tables[i])) + return "tables: string[] expected"; } - if (message.position != null && message.hasOwnProperty("position")) - if (!$util.isString(message.position)) - return "position: string expected"; if (message.filter != null && message.hasOwnProperty("filter")) { let error = $root.binlogdata.Filter.verify(message.filter); if (error) return "filter." + error; } - if (message.table_last_p_ks != null && message.hasOwnProperty("table_last_p_ks")) { - if (!Array.isArray(message.table_last_p_ks)) - return "table_last_p_ks: array expected"; - for (let i = 0; i < message.table_last_p_ks.length; ++i) { - let error = $root.binlogdata.TableLastPK.verify(message.table_last_p_ks[i]); - if (error) - return "table_last_p_ks." + error; + if (message.on_ddl != null && message.hasOwnProperty("on_ddl")) + switch (message.on_ddl) { + default: + return "on_ddl: enum value expected"; + case 0: + case 1: + case 2: + case 3: + break; } - } + if (message.external_mysql != null && message.hasOwnProperty("external_mysql")) + if (!$util.isString(message.external_mysql)) + return "external_mysql: string expected"; + if (message.stop_after_copy != null && message.hasOwnProperty("stop_after_copy")) + if (typeof message.stop_after_copy !== "boolean") + return "stop_after_copy: boolean expected"; + if (message.external_cluster != null && message.hasOwnProperty("external_cluster")) + if (!$util.isString(message.external_cluster)) + return "external_cluster: string expected"; + if (message.source_time_zone != null && message.hasOwnProperty("source_time_zone")) + if (!$util.isString(message.source_time_zone)) + return "source_time_zone: string expected"; + if (message.target_time_zone != null && message.hasOwnProperty("target_time_zone")) + if (!$util.isString(message.target_time_zone)) + return "target_time_zone: string expected"; return null; }; /** - * Creates a VStreamRequest message from a plain object. Also converts values to their respective internal types. + * Creates a BinlogSource message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof binlogdata.VStreamRequest + * @memberof binlogdata.BinlogSource * @static * @param {Object.} object Plain object - * @returns {binlogdata.VStreamRequest} VStreamRequest + * @returns {binlogdata.BinlogSource} BinlogSource */ - VStreamRequest.fromObject = function fromObject(object) { - if (object instanceof $root.binlogdata.VStreamRequest) + BinlogSource.fromObject = function fromObject(object) { + if (object instanceof $root.binlogdata.BinlogSource) return object; - let message = new $root.binlogdata.VStreamRequest(); - if (object.effective_caller_id != null) { - if (typeof object.effective_caller_id !== "object") - throw TypeError(".binlogdata.VStreamRequest.effective_caller_id: object expected"); - message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); + let message = new $root.binlogdata.BinlogSource(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.shard != null) + message.shard = String(object.shard); + switch (object.tablet_type) { + default: + if (typeof object.tablet_type === "number") { + message.tablet_type = object.tablet_type; + break; + } + break; + case "UNKNOWN": + case 0: + message.tablet_type = 0; + break; + case "PRIMARY": + case 1: + message.tablet_type = 1; + break; + case "MASTER": + case 1: + message.tablet_type = 1; + break; + case "REPLICA": + case 2: + message.tablet_type = 2; + break; + case "RDONLY": + case 3: + message.tablet_type = 3; + break; + case "BATCH": + case 3: + message.tablet_type = 3; + break; + case "SPARE": + case 4: + message.tablet_type = 4; + break; + case "EXPERIMENTAL": + case 5: + message.tablet_type = 5; + break; + case "BACKUP": + case 6: + message.tablet_type = 6; + break; + case "RESTORE": + case 7: + message.tablet_type = 7; + break; + case "DRAINED": + case 8: + message.tablet_type = 8; + break; } - if (object.immediate_caller_id != null) { - if (typeof object.immediate_caller_id !== "object") - throw TypeError(".binlogdata.VStreamRequest.immediate_caller_id: object expected"); - message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); + if (object.key_range != null) { + if (typeof object.key_range !== "object") + throw TypeError(".binlogdata.BinlogSource.key_range: object expected"); + message.key_range = $root.topodata.KeyRange.fromObject(object.key_range); } - if (object.target != null) { - if (typeof object.target !== "object") - throw TypeError(".binlogdata.VStreamRequest.target: object expected"); - message.target = $root.query.Target.fromObject(object.target); + if (object.tables) { + if (!Array.isArray(object.tables)) + throw TypeError(".binlogdata.BinlogSource.tables: array expected"); + message.tables = []; + for (let i = 0; i < object.tables.length; ++i) + message.tables[i] = String(object.tables[i]); } - if (object.position != null) - message.position = String(object.position); if (object.filter != null) { if (typeof object.filter !== "object") - throw TypeError(".binlogdata.VStreamRequest.filter: object expected"); + throw TypeError(".binlogdata.BinlogSource.filter: object expected"); message.filter = $root.binlogdata.Filter.fromObject(object.filter); } - if (object.table_last_p_ks) { - if (!Array.isArray(object.table_last_p_ks)) - throw TypeError(".binlogdata.VStreamRequest.table_last_p_ks: array expected"); - message.table_last_p_ks = []; - for (let i = 0; i < object.table_last_p_ks.length; ++i) { - if (typeof object.table_last_p_ks[i] !== "object") - throw TypeError(".binlogdata.VStreamRequest.table_last_p_ks: object expected"); - message.table_last_p_ks[i] = $root.binlogdata.TableLastPK.fromObject(object.table_last_p_ks[i]); + switch (object.on_ddl) { + default: + if (typeof object.on_ddl === "number") { + message.on_ddl = object.on_ddl; + break; } + break; + case "IGNORE": + case 0: + message.on_ddl = 0; + break; + case "STOP": + case 1: + message.on_ddl = 1; + break; + case "EXEC": + case 2: + message.on_ddl = 2; + break; + case "EXEC_IGNORE": + case 3: + message.on_ddl = 3; + break; } - return message; - }; - - /** - * Creates a plain object from a VStreamRequest message. Also converts values to other types if specified. - * @function toObject - * @memberof binlogdata.VStreamRequest - * @static - * @param {binlogdata.VStreamRequest} message VStreamRequest + if (object.external_mysql != null) + message.external_mysql = String(object.external_mysql); + if (object.stop_after_copy != null) + message.stop_after_copy = Boolean(object.stop_after_copy); + if (object.external_cluster != null) + message.external_cluster = String(object.external_cluster); + if (object.source_time_zone != null) + message.source_time_zone = String(object.source_time_zone); + if (object.target_time_zone != null) + message.target_time_zone = String(object.target_time_zone); + return message; + }; + + /** + * Creates a plain object from a BinlogSource message. Also converts values to other types if specified. + * @function toObject + * @memberof binlogdata.BinlogSource + * @static + * @param {binlogdata.BinlogSource} message BinlogSource * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - VStreamRequest.toObject = function toObject(message, options) { + BinlogSource.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.arrays || options.defaults) - object.table_last_p_ks = []; + object.tables = []; if (options.defaults) { - object.effective_caller_id = null; - object.immediate_caller_id = null; - object.target = null; - object.position = ""; + object.keyspace = ""; + object.shard = ""; + object.tablet_type = options.enums === String ? "UNKNOWN" : 0; + object.key_range = null; object.filter = null; + object.on_ddl = options.enums === String ? "IGNORE" : 0; + object.external_mysql = ""; + object.stop_after_copy = false; + object.external_cluster = ""; + object.source_time_zone = ""; + object.target_time_zone = ""; + } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = message.shard; + if (message.tablet_type != null && message.hasOwnProperty("tablet_type")) + object.tablet_type = options.enums === String ? $root.topodata.TabletType[message.tablet_type] === undefined ? message.tablet_type : $root.topodata.TabletType[message.tablet_type] : message.tablet_type; + if (message.key_range != null && message.hasOwnProperty("key_range")) + object.key_range = $root.topodata.KeyRange.toObject(message.key_range, options); + if (message.tables && message.tables.length) { + object.tables = []; + for (let j = 0; j < message.tables.length; ++j) + object.tables[j] = message.tables[j]; } - if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) - object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); - if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) - object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); - if (message.target != null && message.hasOwnProperty("target")) - object.target = $root.query.Target.toObject(message.target, options); - if (message.position != null && message.hasOwnProperty("position")) - object.position = message.position; if (message.filter != null && message.hasOwnProperty("filter")) object.filter = $root.binlogdata.Filter.toObject(message.filter, options); - if (message.table_last_p_ks && message.table_last_p_ks.length) { - object.table_last_p_ks = []; - for (let j = 0; j < message.table_last_p_ks.length; ++j) - object.table_last_p_ks[j] = $root.binlogdata.TableLastPK.toObject(message.table_last_p_ks[j], options); - } + if (message.on_ddl != null && message.hasOwnProperty("on_ddl")) + object.on_ddl = options.enums === String ? $root.binlogdata.OnDDLAction[message.on_ddl] === undefined ? message.on_ddl : $root.binlogdata.OnDDLAction[message.on_ddl] : message.on_ddl; + if (message.external_mysql != null && message.hasOwnProperty("external_mysql")) + object.external_mysql = message.external_mysql; + if (message.stop_after_copy != null && message.hasOwnProperty("stop_after_copy")) + object.stop_after_copy = message.stop_after_copy; + if (message.external_cluster != null && message.hasOwnProperty("external_cluster")) + object.external_cluster = message.external_cluster; + if (message.source_time_zone != null && message.hasOwnProperty("source_time_zone")) + object.source_time_zone = message.source_time_zone; + if (message.target_time_zone != null && message.hasOwnProperty("target_time_zone")) + object.target_time_zone = message.target_time_zone; return object; }; /** - * Converts this VStreamRequest to JSON. + * Converts this BinlogSource to JSON. * @function toJSON - * @memberof binlogdata.VStreamRequest + * @memberof binlogdata.BinlogSource * @instance * @returns {Object.} JSON object */ - VStreamRequest.prototype.toJSON = function toJSON() { + BinlogSource.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for VStreamRequest + * Gets the default type url for BinlogSource * @function getTypeUrl - * @memberof binlogdata.VStreamRequest + * @memberof binlogdata.BinlogSource * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - VStreamRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + BinlogSource.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/binlogdata.VStreamRequest"; + return typeUrlPrefix + "/binlogdata.BinlogSource"; }; - return VStreamRequest; + return BinlogSource; })(); - binlogdata.VStreamResponse = (function() { + /** + * VEventType enum. + * @name binlogdata.VEventType + * @enum {number} + * @property {number} UNKNOWN=0 UNKNOWN value + * @property {number} GTID=1 GTID value + * @property {number} BEGIN=2 BEGIN value + * @property {number} COMMIT=3 COMMIT value + * @property {number} ROLLBACK=4 ROLLBACK value + * @property {number} DDL=5 DDL value + * @property {number} INSERT=6 INSERT value + * @property {number} REPLACE=7 REPLACE value + * @property {number} UPDATE=8 UPDATE value + * @property {number} DELETE=9 DELETE value + * @property {number} SET=10 SET value + * @property {number} OTHER=11 OTHER value + * @property {number} ROW=12 ROW value + * @property {number} FIELD=13 FIELD value + * @property {number} HEARTBEAT=14 HEARTBEAT value + * @property {number} VGTID=15 VGTID value + * @property {number} JOURNAL=16 JOURNAL value + * @property {number} VERSION=17 VERSION value + * @property {number} LASTPK=18 LASTPK value + * @property {number} SAVEPOINT=19 SAVEPOINT value + * @property {number} COPY_COMPLETED=20 COPY_COMPLETED value + */ + binlogdata.VEventType = (function() { + const valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "UNKNOWN"] = 0; + values[valuesById[1] = "GTID"] = 1; + values[valuesById[2] = "BEGIN"] = 2; + values[valuesById[3] = "COMMIT"] = 3; + values[valuesById[4] = "ROLLBACK"] = 4; + values[valuesById[5] = "DDL"] = 5; + values[valuesById[6] = "INSERT"] = 6; + values[valuesById[7] = "REPLACE"] = 7; + values[valuesById[8] = "UPDATE"] = 8; + values[valuesById[9] = "DELETE"] = 9; + values[valuesById[10] = "SET"] = 10; + values[valuesById[11] = "OTHER"] = 11; + values[valuesById[12] = "ROW"] = 12; + values[valuesById[13] = "FIELD"] = 13; + values[valuesById[14] = "HEARTBEAT"] = 14; + values[valuesById[15] = "VGTID"] = 15; + values[valuesById[16] = "JOURNAL"] = 16; + values[valuesById[17] = "VERSION"] = 17; + values[valuesById[18] = "LASTPK"] = 18; + values[valuesById[19] = "SAVEPOINT"] = 19; + values[valuesById[20] = "COPY_COMPLETED"] = 20; + return values; + })(); + + binlogdata.RowChange = (function() { /** - * Properties of a VStreamResponse. + * Properties of a RowChange. * @memberof binlogdata - * @interface IVStreamResponse - * @property {Array.|null} [events] VStreamResponse events + * @interface IRowChange + * @property {query.IRow|null} [before] RowChange before + * @property {query.IRow|null} [after] RowChange after + * @property {binlogdata.RowChange.IBitmap|null} [data_columns] RowChange data_columns */ /** - * Constructs a new VStreamResponse. + * Constructs a new RowChange. * @memberof binlogdata - * @classdesc Represents a VStreamResponse. - * @implements IVStreamResponse + * @classdesc Represents a RowChange. + * @implements IRowChange * @constructor - * @param {binlogdata.IVStreamResponse=} [properties] Properties to set + * @param {binlogdata.IRowChange=} [properties] Properties to set */ - function VStreamResponse(properties) { - this.events = []; + function RowChange(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -68635,78 +69979,103 @@ export const binlogdata = $root.binlogdata = (() => { } /** - * VStreamResponse events. - * @member {Array.} events - * @memberof binlogdata.VStreamResponse + * RowChange before. + * @member {query.IRow|null|undefined} before + * @memberof binlogdata.RowChange * @instance */ - VStreamResponse.prototype.events = $util.emptyArray; + RowChange.prototype.before = null; /** - * Creates a new VStreamResponse instance using the specified properties. + * RowChange after. + * @member {query.IRow|null|undefined} after + * @memberof binlogdata.RowChange + * @instance + */ + RowChange.prototype.after = null; + + /** + * RowChange data_columns. + * @member {binlogdata.RowChange.IBitmap|null|undefined} data_columns + * @memberof binlogdata.RowChange + * @instance + */ + RowChange.prototype.data_columns = null; + + /** + * Creates a new RowChange instance using the specified properties. * @function create - * @memberof binlogdata.VStreamResponse + * @memberof binlogdata.RowChange * @static - * @param {binlogdata.IVStreamResponse=} [properties] Properties to set - * @returns {binlogdata.VStreamResponse} VStreamResponse instance + * @param {binlogdata.IRowChange=} [properties] Properties to set + * @returns {binlogdata.RowChange} RowChange instance */ - VStreamResponse.create = function create(properties) { - return new VStreamResponse(properties); + RowChange.create = function create(properties) { + return new RowChange(properties); }; /** - * Encodes the specified VStreamResponse message. Does not implicitly {@link binlogdata.VStreamResponse.verify|verify} messages. + * Encodes the specified RowChange message. Does not implicitly {@link binlogdata.RowChange.verify|verify} messages. * @function encode - * @memberof binlogdata.VStreamResponse + * @memberof binlogdata.RowChange * @static - * @param {binlogdata.IVStreamResponse} message VStreamResponse message or plain object to encode + * @param {binlogdata.IRowChange} message RowChange message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - VStreamResponse.encode = function encode(message, writer) { + RowChange.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.events != null && message.events.length) - for (let i = 0; i < message.events.length; ++i) - $root.binlogdata.VEvent.encode(message.events[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.before != null && Object.hasOwnProperty.call(message, "before")) + $root.query.Row.encode(message.before, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.after != null && Object.hasOwnProperty.call(message, "after")) + $root.query.Row.encode(message.after, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.data_columns != null && Object.hasOwnProperty.call(message, "data_columns")) + $root.binlogdata.RowChange.Bitmap.encode(message.data_columns, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); return writer; }; /** - * Encodes the specified VStreamResponse message, length delimited. Does not implicitly {@link binlogdata.VStreamResponse.verify|verify} messages. + * Encodes the specified RowChange message, length delimited. Does not implicitly {@link binlogdata.RowChange.verify|verify} messages. * @function encodeDelimited - * @memberof binlogdata.VStreamResponse + * @memberof binlogdata.RowChange * @static - * @param {binlogdata.IVStreamResponse} message VStreamResponse message or plain object to encode + * @param {binlogdata.IRowChange} message RowChange message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - VStreamResponse.encodeDelimited = function encodeDelimited(message, writer) { + RowChange.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a VStreamResponse message from the specified reader or buffer. + * Decodes a RowChange message from the specified reader or buffer. * @function decode - * @memberof binlogdata.VStreamResponse + * @memberof binlogdata.RowChange * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {binlogdata.VStreamResponse} VStreamResponse + * @returns {binlogdata.RowChange} RowChange * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - VStreamResponse.decode = function decode(reader, length) { + RowChange.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.VStreamResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.RowChange(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (!(message.events && message.events.length)) - message.events = []; - message.events.push($root.binlogdata.VEvent.decode(reader, reader.uint32())); + message.before = $root.query.Row.decode(reader, reader.uint32()); + break; + } + case 2: { + message.after = $root.query.Row.decode(reader, reader.uint32()); + break; + } + case 3: { + message.data_columns = $root.binlogdata.RowChange.Bitmap.decode(reader, reader.uint32()); break; } default: @@ -68718,275 +70087,544 @@ export const binlogdata = $root.binlogdata = (() => { }; /** - * Decodes a VStreamResponse message from the specified reader or buffer, length delimited. + * Decodes a RowChange message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof binlogdata.VStreamResponse + * @memberof binlogdata.RowChange * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {binlogdata.VStreamResponse} VStreamResponse + * @returns {binlogdata.RowChange} RowChange * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - VStreamResponse.decodeDelimited = function decodeDelimited(reader) { + RowChange.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a VStreamResponse message. + * Verifies a RowChange message. * @function verify - * @memberof binlogdata.VStreamResponse + * @memberof binlogdata.RowChange * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - VStreamResponse.verify = function verify(message) { + RowChange.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.events != null && message.hasOwnProperty("events")) { - if (!Array.isArray(message.events)) - return "events: array expected"; - for (let i = 0; i < message.events.length; ++i) { - let error = $root.binlogdata.VEvent.verify(message.events[i]); - if (error) - return "events." + error; - } + if (message.before != null && message.hasOwnProperty("before")) { + let error = $root.query.Row.verify(message.before); + if (error) + return "before." + error; + } + if (message.after != null && message.hasOwnProperty("after")) { + let error = $root.query.Row.verify(message.after); + if (error) + return "after." + error; + } + if (message.data_columns != null && message.hasOwnProperty("data_columns")) { + let error = $root.binlogdata.RowChange.Bitmap.verify(message.data_columns); + if (error) + return "data_columns." + error; } return null; }; /** - * Creates a VStreamResponse message from a plain object. Also converts values to their respective internal types. + * Creates a RowChange message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof binlogdata.VStreamResponse + * @memberof binlogdata.RowChange * @static * @param {Object.} object Plain object - * @returns {binlogdata.VStreamResponse} VStreamResponse + * @returns {binlogdata.RowChange} RowChange */ - VStreamResponse.fromObject = function fromObject(object) { - if (object instanceof $root.binlogdata.VStreamResponse) + RowChange.fromObject = function fromObject(object) { + if (object instanceof $root.binlogdata.RowChange) return object; - let message = new $root.binlogdata.VStreamResponse(); - if (object.events) { - if (!Array.isArray(object.events)) - throw TypeError(".binlogdata.VStreamResponse.events: array expected"); - message.events = []; - for (let i = 0; i < object.events.length; ++i) { - if (typeof object.events[i] !== "object") - throw TypeError(".binlogdata.VStreamResponse.events: object expected"); - message.events[i] = $root.binlogdata.VEvent.fromObject(object.events[i]); - } + let message = new $root.binlogdata.RowChange(); + if (object.before != null) { + if (typeof object.before !== "object") + throw TypeError(".binlogdata.RowChange.before: object expected"); + message.before = $root.query.Row.fromObject(object.before); + } + if (object.after != null) { + if (typeof object.after !== "object") + throw TypeError(".binlogdata.RowChange.after: object expected"); + message.after = $root.query.Row.fromObject(object.after); + } + if (object.data_columns != null) { + if (typeof object.data_columns !== "object") + throw TypeError(".binlogdata.RowChange.data_columns: object expected"); + message.data_columns = $root.binlogdata.RowChange.Bitmap.fromObject(object.data_columns); } return message; }; /** - * Creates a plain object from a VStreamResponse message. Also converts values to other types if specified. + * Creates a plain object from a RowChange message. Also converts values to other types if specified. * @function toObject - * @memberof binlogdata.VStreamResponse + * @memberof binlogdata.RowChange * @static - * @param {binlogdata.VStreamResponse} message VStreamResponse + * @param {binlogdata.RowChange} message RowChange * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - VStreamResponse.toObject = function toObject(message, options) { + RowChange.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.events = []; - if (message.events && message.events.length) { - object.events = []; - for (let j = 0; j < message.events.length; ++j) - object.events[j] = $root.binlogdata.VEvent.toObject(message.events[j], options); + if (options.defaults) { + object.before = null; + object.after = null; + object.data_columns = null; } + if (message.before != null && message.hasOwnProperty("before")) + object.before = $root.query.Row.toObject(message.before, options); + if (message.after != null && message.hasOwnProperty("after")) + object.after = $root.query.Row.toObject(message.after, options); + if (message.data_columns != null && message.hasOwnProperty("data_columns")) + object.data_columns = $root.binlogdata.RowChange.Bitmap.toObject(message.data_columns, options); return object; }; /** - * Converts this VStreamResponse to JSON. + * Converts this RowChange to JSON. * @function toJSON - * @memberof binlogdata.VStreamResponse + * @memberof binlogdata.RowChange * @instance * @returns {Object.} JSON object */ - VStreamResponse.prototype.toJSON = function toJSON() { + RowChange.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for VStreamResponse + * Gets the default type url for RowChange * @function getTypeUrl - * @memberof binlogdata.VStreamResponse + * @memberof binlogdata.RowChange * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - VStreamResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + RowChange.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/binlogdata.VStreamResponse"; + return typeUrlPrefix + "/binlogdata.RowChange"; }; - return VStreamResponse; - })(); - - binlogdata.VStreamRowsRequest = (function() { - - /** - * Properties of a VStreamRowsRequest. - * @memberof binlogdata - * @interface IVStreamRowsRequest - * @property {vtrpc.ICallerID|null} [effective_caller_id] VStreamRowsRequest effective_caller_id - * @property {query.IVTGateCallerID|null} [immediate_caller_id] VStreamRowsRequest immediate_caller_id - * @property {query.ITarget|null} [target] VStreamRowsRequest target - * @property {string|null} [query] VStreamRowsRequest query - * @property {query.IQueryResult|null} [lastpk] VStreamRowsRequest lastpk - */ + RowChange.Bitmap = (function() { - /** - * Constructs a new VStreamRowsRequest. - * @memberof binlogdata - * @classdesc Represents a VStreamRowsRequest. - * @implements IVStreamRowsRequest - * @constructor - * @param {binlogdata.IVStreamRowsRequest=} [properties] Properties to set - */ - function VStreamRowsRequest(properties) { - if (properties) - for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; - } + /** + * Properties of a Bitmap. + * @memberof binlogdata.RowChange + * @interface IBitmap + * @property {number|Long|null} [count] Bitmap count + * @property {Uint8Array|null} [cols] Bitmap cols + */ - /** - * VStreamRowsRequest effective_caller_id. - * @member {vtrpc.ICallerID|null|undefined} effective_caller_id - * @memberof binlogdata.VStreamRowsRequest - * @instance - */ - VStreamRowsRequest.prototype.effective_caller_id = null; + /** + * Constructs a new Bitmap. + * @memberof binlogdata.RowChange + * @classdesc Represents a Bitmap. + * @implements IBitmap + * @constructor + * @param {binlogdata.RowChange.IBitmap=} [properties] Properties to set + */ + function Bitmap(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } - /** - * VStreamRowsRequest immediate_caller_id. - * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id - * @memberof binlogdata.VStreamRowsRequest - * @instance - */ - VStreamRowsRequest.prototype.immediate_caller_id = null; + /** + * Bitmap count. + * @member {number|Long} count + * @memberof binlogdata.RowChange.Bitmap + * @instance + */ + Bitmap.prototype.count = $util.Long ? $util.Long.fromBits(0,0,false) : 0; - /** - * VStreamRowsRequest target. - * @member {query.ITarget|null|undefined} target - * @memberof binlogdata.VStreamRowsRequest - * @instance - */ - VStreamRowsRequest.prototype.target = null; + /** + * Bitmap cols. + * @member {Uint8Array} cols + * @memberof binlogdata.RowChange.Bitmap + * @instance + */ + Bitmap.prototype.cols = $util.newBuffer([]); - /** - * VStreamRowsRequest query. - * @member {string} query - * @memberof binlogdata.VStreamRowsRequest - * @instance - */ - VStreamRowsRequest.prototype.query = ""; + /** + * Creates a new Bitmap instance using the specified properties. + * @function create + * @memberof binlogdata.RowChange.Bitmap + * @static + * @param {binlogdata.RowChange.IBitmap=} [properties] Properties to set + * @returns {binlogdata.RowChange.Bitmap} Bitmap instance + */ + Bitmap.create = function create(properties) { + return new Bitmap(properties); + }; - /** - * VStreamRowsRequest lastpk. - * @member {query.IQueryResult|null|undefined} lastpk - * @memberof binlogdata.VStreamRowsRequest - * @instance - */ - VStreamRowsRequest.prototype.lastpk = null; + /** + * Encodes the specified Bitmap message. Does not implicitly {@link binlogdata.RowChange.Bitmap.verify|verify} messages. + * @function encode + * @memberof binlogdata.RowChange.Bitmap + * @static + * @param {binlogdata.RowChange.IBitmap} message Bitmap message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Bitmap.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.count != null && Object.hasOwnProperty.call(message, "count")) + writer.uint32(/* id 1, wireType 0 =*/8).int64(message.count); + if (message.cols != null && Object.hasOwnProperty.call(message, "cols")) + writer.uint32(/* id 2, wireType 2 =*/18).bytes(message.cols); + return writer; + }; - /** - * Creates a new VStreamRowsRequest instance using the specified properties. - * @function create - * @memberof binlogdata.VStreamRowsRequest - * @static - * @param {binlogdata.IVStreamRowsRequest=} [properties] Properties to set - * @returns {binlogdata.VStreamRowsRequest} VStreamRowsRequest instance - */ - VStreamRowsRequest.create = function create(properties) { - return new VStreamRowsRequest(properties); - }; + /** + * Encodes the specified Bitmap message, length delimited. Does not implicitly {@link binlogdata.RowChange.Bitmap.verify|verify} messages. + * @function encodeDelimited + * @memberof binlogdata.RowChange.Bitmap + * @static + * @param {binlogdata.RowChange.IBitmap} message Bitmap message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Bitmap.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; - /** - * Encodes the specified VStreamRowsRequest message. Does not implicitly {@link binlogdata.VStreamRowsRequest.verify|verify} messages. - * @function encode - * @memberof binlogdata.VStreamRowsRequest + /** + * Decodes a Bitmap message from the specified reader or buffer. + * @function decode + * @memberof binlogdata.RowChange.Bitmap + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {binlogdata.RowChange.Bitmap} Bitmap + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Bitmap.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.RowChange.Bitmap(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.count = reader.int64(); + break; + } + case 2: { + message.cols = reader.bytes(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a Bitmap message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof binlogdata.RowChange.Bitmap + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {binlogdata.RowChange.Bitmap} Bitmap + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Bitmap.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a Bitmap message. + * @function verify + * @memberof binlogdata.RowChange.Bitmap + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + Bitmap.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.count != null && message.hasOwnProperty("count")) + if (!$util.isInteger(message.count) && !(message.count && $util.isInteger(message.count.low) && $util.isInteger(message.count.high))) + return "count: integer|Long expected"; + if (message.cols != null && message.hasOwnProperty("cols")) + if (!(message.cols && typeof message.cols.length === "number" || $util.isString(message.cols))) + return "cols: buffer expected"; + return null; + }; + + /** + * Creates a Bitmap message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof binlogdata.RowChange.Bitmap + * @static + * @param {Object.} object Plain object + * @returns {binlogdata.RowChange.Bitmap} Bitmap + */ + Bitmap.fromObject = function fromObject(object) { + if (object instanceof $root.binlogdata.RowChange.Bitmap) + return object; + let message = new $root.binlogdata.RowChange.Bitmap(); + if (object.count != null) + if ($util.Long) + (message.count = $util.Long.fromValue(object.count)).unsigned = false; + else if (typeof object.count === "string") + message.count = parseInt(object.count, 10); + else if (typeof object.count === "number") + message.count = object.count; + else if (typeof object.count === "object") + message.count = new $util.LongBits(object.count.low >>> 0, object.count.high >>> 0).toNumber(); + if (object.cols != null) + if (typeof object.cols === "string") + $util.base64.decode(object.cols, message.cols = $util.newBuffer($util.base64.length(object.cols)), 0); + else if (object.cols.length >= 0) + message.cols = object.cols; + return message; + }; + + /** + * Creates a plain object from a Bitmap message. Also converts values to other types if specified. + * @function toObject + * @memberof binlogdata.RowChange.Bitmap + * @static + * @param {binlogdata.RowChange.Bitmap} message Bitmap + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + Bitmap.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.count = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.count = options.longs === String ? "0" : 0; + if (options.bytes === String) + object.cols = ""; + else { + object.cols = []; + if (options.bytes !== Array) + object.cols = $util.newBuffer(object.cols); + } + } + if (message.count != null && message.hasOwnProperty("count")) + if (typeof message.count === "number") + object.count = options.longs === String ? String(message.count) : message.count; + else + object.count = options.longs === String ? $util.Long.prototype.toString.call(message.count) : options.longs === Number ? new $util.LongBits(message.count.low >>> 0, message.count.high >>> 0).toNumber() : message.count; + if (message.cols != null && message.hasOwnProperty("cols")) + object.cols = options.bytes === String ? $util.base64.encode(message.cols, 0, message.cols.length) : options.bytes === Array ? Array.prototype.slice.call(message.cols) : message.cols; + return object; + }; + + /** + * Converts this Bitmap to JSON. + * @function toJSON + * @memberof binlogdata.RowChange.Bitmap + * @instance + * @returns {Object.} JSON object + */ + Bitmap.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for Bitmap + * @function getTypeUrl + * @memberof binlogdata.RowChange.Bitmap + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + Bitmap.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/binlogdata.RowChange.Bitmap"; + }; + + return Bitmap; + })(); + + return RowChange; + })(); + + binlogdata.RowEvent = (function() { + + /** + * Properties of a RowEvent. + * @memberof binlogdata + * @interface IRowEvent + * @property {string|null} [table_name] RowEvent table_name + * @property {Array.|null} [row_changes] RowEvent row_changes + * @property {string|null} [keyspace] RowEvent keyspace + * @property {string|null} [shard] RowEvent shard + * @property {number|null} [flags] RowEvent flags + */ + + /** + * Constructs a new RowEvent. + * @memberof binlogdata + * @classdesc Represents a RowEvent. + * @implements IRowEvent + * @constructor + * @param {binlogdata.IRowEvent=} [properties] Properties to set + */ + function RowEvent(properties) { + this.row_changes = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * RowEvent table_name. + * @member {string} table_name + * @memberof binlogdata.RowEvent + * @instance + */ + RowEvent.prototype.table_name = ""; + + /** + * RowEvent row_changes. + * @member {Array.} row_changes + * @memberof binlogdata.RowEvent + * @instance + */ + RowEvent.prototype.row_changes = $util.emptyArray; + + /** + * RowEvent keyspace. + * @member {string} keyspace + * @memberof binlogdata.RowEvent + * @instance + */ + RowEvent.prototype.keyspace = ""; + + /** + * RowEvent shard. + * @member {string} shard + * @memberof binlogdata.RowEvent + * @instance + */ + RowEvent.prototype.shard = ""; + + /** + * RowEvent flags. + * @member {number} flags + * @memberof binlogdata.RowEvent + * @instance + */ + RowEvent.prototype.flags = 0; + + /** + * Creates a new RowEvent instance using the specified properties. + * @function create + * @memberof binlogdata.RowEvent * @static - * @param {binlogdata.IVStreamRowsRequest} message VStreamRowsRequest message or plain object to encode + * @param {binlogdata.IRowEvent=} [properties] Properties to set + * @returns {binlogdata.RowEvent} RowEvent instance + */ + RowEvent.create = function create(properties) { + return new RowEvent(properties); + }; + + /** + * Encodes the specified RowEvent message. Does not implicitly {@link binlogdata.RowEvent.verify|verify} messages. + * @function encode + * @memberof binlogdata.RowEvent + * @static + * @param {binlogdata.IRowEvent} message RowEvent message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - VStreamRowsRequest.encode = function encode(message, writer) { + RowEvent.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) - $root.vtrpc.CallerID.encode(message.effective_caller_id, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.immediate_caller_id != null && Object.hasOwnProperty.call(message, "immediate_caller_id")) - $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.target != null && Object.hasOwnProperty.call(message, "target")) - $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.query != null && Object.hasOwnProperty.call(message, "query")) - writer.uint32(/* id 4, wireType 2 =*/34).string(message.query); - if (message.lastpk != null && Object.hasOwnProperty.call(message, "lastpk")) - $root.query.QueryResult.encode(message.lastpk, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); + if (message.table_name != null && Object.hasOwnProperty.call(message, "table_name")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.table_name); + if (message.row_changes != null && message.row_changes.length) + for (let i = 0; i < message.row_changes.length; ++i) + $root.binlogdata.RowChange.encode(message.row_changes[i], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.keyspace); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.shard); + if (message.flags != null && Object.hasOwnProperty.call(message, "flags")) + writer.uint32(/* id 5, wireType 0 =*/40).uint32(message.flags); return writer; }; /** - * Encodes the specified VStreamRowsRequest message, length delimited. Does not implicitly {@link binlogdata.VStreamRowsRequest.verify|verify} messages. + * Encodes the specified RowEvent message, length delimited. Does not implicitly {@link binlogdata.RowEvent.verify|verify} messages. * @function encodeDelimited - * @memberof binlogdata.VStreamRowsRequest + * @memberof binlogdata.RowEvent * @static - * @param {binlogdata.IVStreamRowsRequest} message VStreamRowsRequest message or plain object to encode + * @param {binlogdata.IRowEvent} message RowEvent message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - VStreamRowsRequest.encodeDelimited = function encodeDelimited(message, writer) { + RowEvent.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a VStreamRowsRequest message from the specified reader or buffer. + * Decodes a RowEvent message from the specified reader or buffer. * @function decode - * @memberof binlogdata.VStreamRowsRequest + * @memberof binlogdata.RowEvent * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {binlogdata.VStreamRowsRequest} VStreamRowsRequest + * @returns {binlogdata.RowEvent} RowEvent * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - VStreamRowsRequest.decode = function decode(reader, length) { + RowEvent.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.VStreamRowsRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.RowEvent(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.effective_caller_id = $root.vtrpc.CallerID.decode(reader, reader.uint32()); + message.table_name = reader.string(); break; } case 2: { - message.immediate_caller_id = $root.query.VTGateCallerID.decode(reader, reader.uint32()); + if (!(message.row_changes && message.row_changes.length)) + message.row_changes = []; + message.row_changes.push($root.binlogdata.RowChange.decode(reader, reader.uint32())); break; } case 3: { - message.target = $root.query.Target.decode(reader, reader.uint32()); + message.keyspace = reader.string(); break; } case 4: { - message.query = reader.string(); + message.shard = reader.string(); break; } case 5: { - message.lastpk = $root.query.QueryResult.decode(reader, reader.uint32()); + message.flags = reader.uint32(); break; } default: @@ -68998,184 +70636,177 @@ export const binlogdata = $root.binlogdata = (() => { }; /** - * Decodes a VStreamRowsRequest message from the specified reader or buffer, length delimited. + * Decodes a RowEvent message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof binlogdata.VStreamRowsRequest + * @memberof binlogdata.RowEvent * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {binlogdata.VStreamRowsRequest} VStreamRowsRequest + * @returns {binlogdata.RowEvent} RowEvent * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - VStreamRowsRequest.decodeDelimited = function decodeDelimited(reader) { + RowEvent.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a VStreamRowsRequest message. + * Verifies a RowEvent message. * @function verify - * @memberof binlogdata.VStreamRowsRequest + * @memberof binlogdata.RowEvent * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - VStreamRowsRequest.verify = function verify(message) { + RowEvent.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { - let error = $root.vtrpc.CallerID.verify(message.effective_caller_id); - if (error) - return "effective_caller_id." + error; - } - if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) { - let error = $root.query.VTGateCallerID.verify(message.immediate_caller_id); - if (error) - return "immediate_caller_id." + error; - } - if (message.target != null && message.hasOwnProperty("target")) { - let error = $root.query.Target.verify(message.target); - if (error) - return "target." + error; - } - if (message.query != null && message.hasOwnProperty("query")) - if (!$util.isString(message.query)) - return "query: string expected"; - if (message.lastpk != null && message.hasOwnProperty("lastpk")) { - let error = $root.query.QueryResult.verify(message.lastpk); - if (error) - return "lastpk." + error; + if (message.table_name != null && message.hasOwnProperty("table_name")) + if (!$util.isString(message.table_name)) + return "table_name: string expected"; + if (message.row_changes != null && message.hasOwnProperty("row_changes")) { + if (!Array.isArray(message.row_changes)) + return "row_changes: array expected"; + for (let i = 0; i < message.row_changes.length; ++i) { + let error = $root.binlogdata.RowChange.verify(message.row_changes[i]); + if (error) + return "row_changes." + error; + } } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.shard != null && message.hasOwnProperty("shard")) + if (!$util.isString(message.shard)) + return "shard: string expected"; + if (message.flags != null && message.hasOwnProperty("flags")) + if (!$util.isInteger(message.flags)) + return "flags: integer expected"; return null; }; /** - * Creates a VStreamRowsRequest message from a plain object. Also converts values to their respective internal types. + * Creates a RowEvent message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof binlogdata.VStreamRowsRequest + * @memberof binlogdata.RowEvent * @static * @param {Object.} object Plain object - * @returns {binlogdata.VStreamRowsRequest} VStreamRowsRequest + * @returns {binlogdata.RowEvent} RowEvent */ - VStreamRowsRequest.fromObject = function fromObject(object) { - if (object instanceof $root.binlogdata.VStreamRowsRequest) + RowEvent.fromObject = function fromObject(object) { + if (object instanceof $root.binlogdata.RowEvent) return object; - let message = new $root.binlogdata.VStreamRowsRequest(); - if (object.effective_caller_id != null) { - if (typeof object.effective_caller_id !== "object") - throw TypeError(".binlogdata.VStreamRowsRequest.effective_caller_id: object expected"); - message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); - } - if (object.immediate_caller_id != null) { - if (typeof object.immediate_caller_id !== "object") - throw TypeError(".binlogdata.VStreamRowsRequest.immediate_caller_id: object expected"); - message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); - } - if (object.target != null) { - if (typeof object.target !== "object") - throw TypeError(".binlogdata.VStreamRowsRequest.target: object expected"); - message.target = $root.query.Target.fromObject(object.target); - } - if (object.query != null) - message.query = String(object.query); - if (object.lastpk != null) { - if (typeof object.lastpk !== "object") - throw TypeError(".binlogdata.VStreamRowsRequest.lastpk: object expected"); - message.lastpk = $root.query.QueryResult.fromObject(object.lastpk); + let message = new $root.binlogdata.RowEvent(); + if (object.table_name != null) + message.table_name = String(object.table_name); + if (object.row_changes) { + if (!Array.isArray(object.row_changes)) + throw TypeError(".binlogdata.RowEvent.row_changes: array expected"); + message.row_changes = []; + for (let i = 0; i < object.row_changes.length; ++i) { + if (typeof object.row_changes[i] !== "object") + throw TypeError(".binlogdata.RowEvent.row_changes: object expected"); + message.row_changes[i] = $root.binlogdata.RowChange.fromObject(object.row_changes[i]); + } } + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.shard != null) + message.shard = String(object.shard); + if (object.flags != null) + message.flags = object.flags >>> 0; return message; }; /** - * Creates a plain object from a VStreamRowsRequest message. Also converts values to other types if specified. + * Creates a plain object from a RowEvent message. Also converts values to other types if specified. * @function toObject - * @memberof binlogdata.VStreamRowsRequest + * @memberof binlogdata.RowEvent * @static - * @param {binlogdata.VStreamRowsRequest} message VStreamRowsRequest + * @param {binlogdata.RowEvent} message RowEvent * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - VStreamRowsRequest.toObject = function toObject(message, options) { + RowEvent.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; + if (options.arrays || options.defaults) + object.row_changes = []; if (options.defaults) { - object.effective_caller_id = null; - object.immediate_caller_id = null; - object.target = null; - object.query = ""; - object.lastpk = null; + object.table_name = ""; + object.keyspace = ""; + object.shard = ""; + object.flags = 0; } - if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) - object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); - if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) - object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); - if (message.target != null && message.hasOwnProperty("target")) - object.target = $root.query.Target.toObject(message.target, options); - if (message.query != null && message.hasOwnProperty("query")) - object.query = message.query; - if (message.lastpk != null && message.hasOwnProperty("lastpk")) - object.lastpk = $root.query.QueryResult.toObject(message.lastpk, options); + if (message.table_name != null && message.hasOwnProperty("table_name")) + object.table_name = message.table_name; + if (message.row_changes && message.row_changes.length) { + object.row_changes = []; + for (let j = 0; j < message.row_changes.length; ++j) + object.row_changes[j] = $root.binlogdata.RowChange.toObject(message.row_changes[j], options); + } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = message.shard; + if (message.flags != null && message.hasOwnProperty("flags")) + object.flags = message.flags; return object; }; /** - * Converts this VStreamRowsRequest to JSON. + * Converts this RowEvent to JSON. * @function toJSON - * @memberof binlogdata.VStreamRowsRequest + * @memberof binlogdata.RowEvent * @instance * @returns {Object.} JSON object */ - VStreamRowsRequest.prototype.toJSON = function toJSON() { + RowEvent.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for VStreamRowsRequest + * Gets the default type url for RowEvent * @function getTypeUrl - * @memberof binlogdata.VStreamRowsRequest + * @memberof binlogdata.RowEvent * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - VStreamRowsRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + RowEvent.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/binlogdata.VStreamRowsRequest"; + return typeUrlPrefix + "/binlogdata.RowEvent"; }; - return VStreamRowsRequest; + return RowEvent; })(); - binlogdata.VStreamRowsResponse = (function() { + binlogdata.FieldEvent = (function() { /** - * Properties of a VStreamRowsResponse. + * Properties of a FieldEvent. * @memberof binlogdata - * @interface IVStreamRowsResponse - * @property {Array.|null} [fields] VStreamRowsResponse fields - * @property {Array.|null} [pkfields] VStreamRowsResponse pkfields - * @property {string|null} [gtid] VStreamRowsResponse gtid - * @property {Array.|null} [rows] VStreamRowsResponse rows - * @property {query.IRow|null} [lastpk] VStreamRowsResponse lastpk - * @property {boolean|null} [throttled] VStreamRowsResponse throttled - * @property {boolean|null} [heartbeat] VStreamRowsResponse heartbeat + * @interface IFieldEvent + * @property {string|null} [table_name] FieldEvent table_name + * @property {Array.|null} [fields] FieldEvent fields + * @property {string|null} [keyspace] FieldEvent keyspace + * @property {string|null} [shard] FieldEvent shard */ /** - * Constructs a new VStreamRowsResponse. + * Constructs a new FieldEvent. * @memberof binlogdata - * @classdesc Represents a VStreamRowsResponse. - * @implements IVStreamRowsResponse + * @classdesc Represents a FieldEvent. + * @implements IFieldEvent * @constructor - * @param {binlogdata.IVStreamRowsResponse=} [properties] Properties to set + * @param {binlogdata.IFieldEvent=} [properties] Properties to set */ - function VStreamRowsResponse(properties) { + function FieldEvent(properties) { this.fields = []; - this.pkfields = []; - this.rows = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -69183,168 +70814,120 @@ export const binlogdata = $root.binlogdata = (() => { } /** - * VStreamRowsResponse fields. - * @member {Array.} fields - * @memberof binlogdata.VStreamRowsResponse + * FieldEvent table_name. + * @member {string} table_name + * @memberof binlogdata.FieldEvent * @instance */ - VStreamRowsResponse.prototype.fields = $util.emptyArray; + FieldEvent.prototype.table_name = ""; /** - * VStreamRowsResponse pkfields. - * @member {Array.} pkfields - * @memberof binlogdata.VStreamRowsResponse + * FieldEvent fields. + * @member {Array.} fields + * @memberof binlogdata.FieldEvent * @instance */ - VStreamRowsResponse.prototype.pkfields = $util.emptyArray; - - /** - * VStreamRowsResponse gtid. - * @member {string} gtid - * @memberof binlogdata.VStreamRowsResponse - * @instance - */ - VStreamRowsResponse.prototype.gtid = ""; - - /** - * VStreamRowsResponse rows. - * @member {Array.} rows - * @memberof binlogdata.VStreamRowsResponse - * @instance - */ - VStreamRowsResponse.prototype.rows = $util.emptyArray; - - /** - * VStreamRowsResponse lastpk. - * @member {query.IRow|null|undefined} lastpk - * @memberof binlogdata.VStreamRowsResponse - * @instance - */ - VStreamRowsResponse.prototype.lastpk = null; + FieldEvent.prototype.fields = $util.emptyArray; /** - * VStreamRowsResponse throttled. - * @member {boolean} throttled - * @memberof binlogdata.VStreamRowsResponse + * FieldEvent keyspace. + * @member {string} keyspace + * @memberof binlogdata.FieldEvent * @instance */ - VStreamRowsResponse.prototype.throttled = false; + FieldEvent.prototype.keyspace = ""; /** - * VStreamRowsResponse heartbeat. - * @member {boolean} heartbeat - * @memberof binlogdata.VStreamRowsResponse + * FieldEvent shard. + * @member {string} shard + * @memberof binlogdata.FieldEvent * @instance */ - VStreamRowsResponse.prototype.heartbeat = false; + FieldEvent.prototype.shard = ""; /** - * Creates a new VStreamRowsResponse instance using the specified properties. + * Creates a new FieldEvent instance using the specified properties. * @function create - * @memberof binlogdata.VStreamRowsResponse + * @memberof binlogdata.FieldEvent * @static - * @param {binlogdata.IVStreamRowsResponse=} [properties] Properties to set - * @returns {binlogdata.VStreamRowsResponse} VStreamRowsResponse instance + * @param {binlogdata.IFieldEvent=} [properties] Properties to set + * @returns {binlogdata.FieldEvent} FieldEvent instance */ - VStreamRowsResponse.create = function create(properties) { - return new VStreamRowsResponse(properties); + FieldEvent.create = function create(properties) { + return new FieldEvent(properties); }; /** - * Encodes the specified VStreamRowsResponse message. Does not implicitly {@link binlogdata.VStreamRowsResponse.verify|verify} messages. + * Encodes the specified FieldEvent message. Does not implicitly {@link binlogdata.FieldEvent.verify|verify} messages. * @function encode - * @memberof binlogdata.VStreamRowsResponse + * @memberof binlogdata.FieldEvent * @static - * @param {binlogdata.IVStreamRowsResponse} message VStreamRowsResponse message or plain object to encode + * @param {binlogdata.IFieldEvent} message FieldEvent message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - VStreamRowsResponse.encode = function encode(message, writer) { + FieldEvent.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.table_name != null && Object.hasOwnProperty.call(message, "table_name")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.table_name); if (message.fields != null && message.fields.length) for (let i = 0; i < message.fields.length; ++i) - $root.query.Field.encode(message.fields[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.pkfields != null && message.pkfields.length) - for (let i = 0; i < message.pkfields.length; ++i) - $root.query.Field.encode(message.pkfields[i], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.gtid != null && Object.hasOwnProperty.call(message, "gtid")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.gtid); - if (message.rows != null && message.rows.length) - for (let i = 0; i < message.rows.length; ++i) - $root.query.Row.encode(message.rows[i], writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); - if (message.lastpk != null && Object.hasOwnProperty.call(message, "lastpk")) - $root.query.Row.encode(message.lastpk, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); - if (message.throttled != null && Object.hasOwnProperty.call(message, "throttled")) - writer.uint32(/* id 6, wireType 0 =*/48).bool(message.throttled); - if (message.heartbeat != null && Object.hasOwnProperty.call(message, "heartbeat")) - writer.uint32(/* id 7, wireType 0 =*/56).bool(message.heartbeat); + $root.query.Field.encode(message.fields[i], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.keyspace); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.shard); return writer; }; /** - * Encodes the specified VStreamRowsResponse message, length delimited. Does not implicitly {@link binlogdata.VStreamRowsResponse.verify|verify} messages. + * Encodes the specified FieldEvent message, length delimited. Does not implicitly {@link binlogdata.FieldEvent.verify|verify} messages. * @function encodeDelimited - * @memberof binlogdata.VStreamRowsResponse + * @memberof binlogdata.FieldEvent * @static - * @param {binlogdata.IVStreamRowsResponse} message VStreamRowsResponse message or plain object to encode + * @param {binlogdata.IFieldEvent} message FieldEvent message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - VStreamRowsResponse.encodeDelimited = function encodeDelimited(message, writer) { + FieldEvent.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a VStreamRowsResponse message from the specified reader or buffer. + * Decodes a FieldEvent message from the specified reader or buffer. * @function decode - * @memberof binlogdata.VStreamRowsResponse + * @memberof binlogdata.FieldEvent * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {binlogdata.VStreamRowsResponse} VStreamRowsResponse + * @returns {binlogdata.FieldEvent} FieldEvent * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - VStreamRowsResponse.decode = function decode(reader, length) { + FieldEvent.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.VStreamRowsResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.FieldEvent(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (!(message.fields && message.fields.length)) - message.fields = []; - message.fields.push($root.query.Field.decode(reader, reader.uint32())); + message.table_name = reader.string(); break; } case 2: { - if (!(message.pkfields && message.pkfields.length)) - message.pkfields = []; - message.pkfields.push($root.query.Field.decode(reader, reader.uint32())); + if (!(message.fields && message.fields.length)) + message.fields = []; + message.fields.push($root.query.Field.decode(reader, reader.uint32())); break; } case 3: { - message.gtid = reader.string(); + message.keyspace = reader.string(); break; } case 4: { - if (!(message.rows && message.rows.length)) - message.rows = []; - message.rows.push($root.query.Row.decode(reader, reader.uint32())); - break; - } - case 5: { - message.lastpk = $root.query.Row.decode(reader, reader.uint32()); - break; - } - case 6: { - message.throttled = reader.bool(); - break; - } - case 7: { - message.heartbeat = reader.bool(); + message.shard = reader.string(); break; } default: @@ -69356,32 +70939,35 @@ export const binlogdata = $root.binlogdata = (() => { }; /** - * Decodes a VStreamRowsResponse message from the specified reader or buffer, length delimited. + * Decodes a FieldEvent message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof binlogdata.VStreamRowsResponse + * @memberof binlogdata.FieldEvent * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {binlogdata.VStreamRowsResponse} VStreamRowsResponse + * @returns {binlogdata.FieldEvent} FieldEvent * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - VStreamRowsResponse.decodeDelimited = function decodeDelimited(reader) { + FieldEvent.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a VStreamRowsResponse message. + * Verifies a FieldEvent message. * @function verify - * @memberof binlogdata.VStreamRowsResponse + * @memberof binlogdata.FieldEvent * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - VStreamRowsResponse.verify = function verify(message) { + FieldEvent.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.table_name != null && message.hasOwnProperty("table_name")) + if (!$util.isString(message.table_name)) + return "table_name: string expected"; if (message.fields != null && message.hasOwnProperty("fields")) { if (!Array.isArray(message.fields)) return "fields: array expected"; @@ -69391,195 +70977,131 @@ export const binlogdata = $root.binlogdata = (() => { return "fields." + error; } } - if (message.pkfields != null && message.hasOwnProperty("pkfields")) { - if (!Array.isArray(message.pkfields)) - return "pkfields: array expected"; - for (let i = 0; i < message.pkfields.length; ++i) { - let error = $root.query.Field.verify(message.pkfields[i]); - if (error) - return "pkfields." + error; - } - } - if (message.gtid != null && message.hasOwnProperty("gtid")) - if (!$util.isString(message.gtid)) - return "gtid: string expected"; - if (message.rows != null && message.hasOwnProperty("rows")) { - if (!Array.isArray(message.rows)) - return "rows: array expected"; - for (let i = 0; i < message.rows.length; ++i) { - let error = $root.query.Row.verify(message.rows[i]); - if (error) - return "rows." + error; - } - } - if (message.lastpk != null && message.hasOwnProperty("lastpk")) { - let error = $root.query.Row.verify(message.lastpk); - if (error) - return "lastpk." + error; - } - if (message.throttled != null && message.hasOwnProperty("throttled")) - if (typeof message.throttled !== "boolean") - return "throttled: boolean expected"; - if (message.heartbeat != null && message.hasOwnProperty("heartbeat")) - if (typeof message.heartbeat !== "boolean") - return "heartbeat: boolean expected"; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.shard != null && message.hasOwnProperty("shard")) + if (!$util.isString(message.shard)) + return "shard: string expected"; return null; }; /** - * Creates a VStreamRowsResponse message from a plain object. Also converts values to their respective internal types. + * Creates a FieldEvent message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof binlogdata.VStreamRowsResponse + * @memberof binlogdata.FieldEvent * @static * @param {Object.} object Plain object - * @returns {binlogdata.VStreamRowsResponse} VStreamRowsResponse + * @returns {binlogdata.FieldEvent} FieldEvent */ - VStreamRowsResponse.fromObject = function fromObject(object) { - if (object instanceof $root.binlogdata.VStreamRowsResponse) + FieldEvent.fromObject = function fromObject(object) { + if (object instanceof $root.binlogdata.FieldEvent) return object; - let message = new $root.binlogdata.VStreamRowsResponse(); + let message = new $root.binlogdata.FieldEvent(); + if (object.table_name != null) + message.table_name = String(object.table_name); if (object.fields) { if (!Array.isArray(object.fields)) - throw TypeError(".binlogdata.VStreamRowsResponse.fields: array expected"); + throw TypeError(".binlogdata.FieldEvent.fields: array expected"); message.fields = []; for (let i = 0; i < object.fields.length; ++i) { if (typeof object.fields[i] !== "object") - throw TypeError(".binlogdata.VStreamRowsResponse.fields: object expected"); + throw TypeError(".binlogdata.FieldEvent.fields: object expected"); message.fields[i] = $root.query.Field.fromObject(object.fields[i]); } } - if (object.pkfields) { - if (!Array.isArray(object.pkfields)) - throw TypeError(".binlogdata.VStreamRowsResponse.pkfields: array expected"); - message.pkfields = []; - for (let i = 0; i < object.pkfields.length; ++i) { - if (typeof object.pkfields[i] !== "object") - throw TypeError(".binlogdata.VStreamRowsResponse.pkfields: object expected"); - message.pkfields[i] = $root.query.Field.fromObject(object.pkfields[i]); - } - } - if (object.gtid != null) - message.gtid = String(object.gtid); - if (object.rows) { - if (!Array.isArray(object.rows)) - throw TypeError(".binlogdata.VStreamRowsResponse.rows: array expected"); - message.rows = []; - for (let i = 0; i < object.rows.length; ++i) { - if (typeof object.rows[i] !== "object") - throw TypeError(".binlogdata.VStreamRowsResponse.rows: object expected"); - message.rows[i] = $root.query.Row.fromObject(object.rows[i]); - } - } - if (object.lastpk != null) { - if (typeof object.lastpk !== "object") - throw TypeError(".binlogdata.VStreamRowsResponse.lastpk: object expected"); - message.lastpk = $root.query.Row.fromObject(object.lastpk); - } - if (object.throttled != null) - message.throttled = Boolean(object.throttled); - if (object.heartbeat != null) - message.heartbeat = Boolean(object.heartbeat); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.shard != null) + message.shard = String(object.shard); return message; }; /** - * Creates a plain object from a VStreamRowsResponse message. Also converts values to other types if specified. + * Creates a plain object from a FieldEvent message. Also converts values to other types if specified. * @function toObject - * @memberof binlogdata.VStreamRowsResponse + * @memberof binlogdata.FieldEvent * @static - * @param {binlogdata.VStreamRowsResponse} message VStreamRowsResponse + * @param {binlogdata.FieldEvent} message FieldEvent * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - VStreamRowsResponse.toObject = function toObject(message, options) { + FieldEvent.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) { + if (options.arrays || options.defaults) object.fields = []; - object.pkfields = []; - object.rows = []; - } if (options.defaults) { - object.gtid = ""; - object.lastpk = null; - object.throttled = false; - object.heartbeat = false; + object.table_name = ""; + object.keyspace = ""; + object.shard = ""; } + if (message.table_name != null && message.hasOwnProperty("table_name")) + object.table_name = message.table_name; if (message.fields && message.fields.length) { object.fields = []; for (let j = 0; j < message.fields.length; ++j) object.fields[j] = $root.query.Field.toObject(message.fields[j], options); } - if (message.pkfields && message.pkfields.length) { - object.pkfields = []; - for (let j = 0; j < message.pkfields.length; ++j) - object.pkfields[j] = $root.query.Field.toObject(message.pkfields[j], options); - } - if (message.gtid != null && message.hasOwnProperty("gtid")) - object.gtid = message.gtid; - if (message.rows && message.rows.length) { - object.rows = []; - for (let j = 0; j < message.rows.length; ++j) - object.rows[j] = $root.query.Row.toObject(message.rows[j], options); - } - if (message.lastpk != null && message.hasOwnProperty("lastpk")) - object.lastpk = $root.query.Row.toObject(message.lastpk, options); - if (message.throttled != null && message.hasOwnProperty("throttled")) - object.throttled = message.throttled; - if (message.heartbeat != null && message.hasOwnProperty("heartbeat")) - object.heartbeat = message.heartbeat; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = message.shard; return object; }; /** - * Converts this VStreamRowsResponse to JSON. + * Converts this FieldEvent to JSON. * @function toJSON - * @memberof binlogdata.VStreamRowsResponse + * @memberof binlogdata.FieldEvent * @instance * @returns {Object.} JSON object */ - VStreamRowsResponse.prototype.toJSON = function toJSON() { + FieldEvent.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for VStreamRowsResponse + * Gets the default type url for FieldEvent * @function getTypeUrl - * @memberof binlogdata.VStreamRowsResponse + * @memberof binlogdata.FieldEvent * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - VStreamRowsResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + FieldEvent.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/binlogdata.VStreamRowsResponse"; + return typeUrlPrefix + "/binlogdata.FieldEvent"; }; - return VStreamRowsResponse; + return FieldEvent; })(); - binlogdata.LastPKEvent = (function() { + binlogdata.ShardGtid = (function() { /** - * Properties of a LastPKEvent. + * Properties of a ShardGtid. * @memberof binlogdata - * @interface ILastPKEvent - * @property {binlogdata.ITableLastPK|null} [table_last_p_k] LastPKEvent table_last_p_k - * @property {boolean|null} [completed] LastPKEvent completed + * @interface IShardGtid + * @property {string|null} [keyspace] ShardGtid keyspace + * @property {string|null} [shard] ShardGtid shard + * @property {string|null} [gtid] ShardGtid gtid + * @property {Array.|null} [table_p_ks] ShardGtid table_p_ks */ /** - * Constructs a new LastPKEvent. + * Constructs a new ShardGtid. * @memberof binlogdata - * @classdesc Represents a LastPKEvent. - * @implements ILastPKEvent + * @classdesc Represents a ShardGtid. + * @implements IShardGtid * @constructor - * @param {binlogdata.ILastPKEvent=} [properties] Properties to set + * @param {binlogdata.IShardGtid=} [properties] Properties to set */ - function LastPKEvent(properties) { + function ShardGtid(properties) { + this.table_p_ks = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -69587,89 +71109,120 @@ export const binlogdata = $root.binlogdata = (() => { } /** - * LastPKEvent table_last_p_k. - * @member {binlogdata.ITableLastPK|null|undefined} table_last_p_k - * @memberof binlogdata.LastPKEvent + * ShardGtid keyspace. + * @member {string} keyspace + * @memberof binlogdata.ShardGtid * @instance */ - LastPKEvent.prototype.table_last_p_k = null; + ShardGtid.prototype.keyspace = ""; /** - * LastPKEvent completed. - * @member {boolean} completed - * @memberof binlogdata.LastPKEvent + * ShardGtid shard. + * @member {string} shard + * @memberof binlogdata.ShardGtid * @instance */ - LastPKEvent.prototype.completed = false; + ShardGtid.prototype.shard = ""; /** - * Creates a new LastPKEvent instance using the specified properties. + * ShardGtid gtid. + * @member {string} gtid + * @memberof binlogdata.ShardGtid + * @instance + */ + ShardGtid.prototype.gtid = ""; + + /** + * ShardGtid table_p_ks. + * @member {Array.} table_p_ks + * @memberof binlogdata.ShardGtid + * @instance + */ + ShardGtid.prototype.table_p_ks = $util.emptyArray; + + /** + * Creates a new ShardGtid instance using the specified properties. * @function create - * @memberof binlogdata.LastPKEvent + * @memberof binlogdata.ShardGtid * @static - * @param {binlogdata.ILastPKEvent=} [properties] Properties to set - * @returns {binlogdata.LastPKEvent} LastPKEvent instance + * @param {binlogdata.IShardGtid=} [properties] Properties to set + * @returns {binlogdata.ShardGtid} ShardGtid instance */ - LastPKEvent.create = function create(properties) { - return new LastPKEvent(properties); + ShardGtid.create = function create(properties) { + return new ShardGtid(properties); }; /** - * Encodes the specified LastPKEvent message. Does not implicitly {@link binlogdata.LastPKEvent.verify|verify} messages. + * Encodes the specified ShardGtid message. Does not implicitly {@link binlogdata.ShardGtid.verify|verify} messages. * @function encode - * @memberof binlogdata.LastPKEvent + * @memberof binlogdata.ShardGtid * @static - * @param {binlogdata.ILastPKEvent} message LastPKEvent message or plain object to encode + * @param {binlogdata.IShardGtid} message ShardGtid message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - LastPKEvent.encode = function encode(message, writer) { + ShardGtid.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.table_last_p_k != null && Object.hasOwnProperty.call(message, "table_last_p_k")) - $root.binlogdata.TableLastPK.encode(message.table_last_p_k, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.completed != null && Object.hasOwnProperty.call(message, "completed")) - writer.uint32(/* id 2, wireType 0 =*/16).bool(message.completed); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); + if (message.gtid != null && Object.hasOwnProperty.call(message, "gtid")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.gtid); + if (message.table_p_ks != null && message.table_p_ks.length) + for (let i = 0; i < message.table_p_ks.length; ++i) + $root.binlogdata.TableLastPK.encode(message.table_p_ks[i], writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); return writer; }; /** - * Encodes the specified LastPKEvent message, length delimited. Does not implicitly {@link binlogdata.LastPKEvent.verify|verify} messages. + * Encodes the specified ShardGtid message, length delimited. Does not implicitly {@link binlogdata.ShardGtid.verify|verify} messages. * @function encodeDelimited - * @memberof binlogdata.LastPKEvent + * @memberof binlogdata.ShardGtid * @static - * @param {binlogdata.ILastPKEvent} message LastPKEvent message or plain object to encode + * @param {binlogdata.IShardGtid} message ShardGtid message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - LastPKEvent.encodeDelimited = function encodeDelimited(message, writer) { + ShardGtid.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a LastPKEvent message from the specified reader or buffer. + * Decodes a ShardGtid message from the specified reader or buffer. * @function decode - * @memberof binlogdata.LastPKEvent + * @memberof binlogdata.ShardGtid * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {binlogdata.LastPKEvent} LastPKEvent + * @returns {binlogdata.ShardGtid} ShardGtid * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - LastPKEvent.decode = function decode(reader, length) { + ShardGtid.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.LastPKEvent(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.ShardGtid(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.table_last_p_k = $root.binlogdata.TableLastPK.decode(reader, reader.uint32()); + message.keyspace = reader.string(); break; } case 2: { - message.completed = reader.bool(); + message.shard = reader.string(); + break; + } + case 3: { + message.gtid = reader.string(); + break; + } + case 4: { + if (!(message.table_p_ks && message.table_p_ks.length)) + message.table_p_ks = []; + message.table_p_ks.push($root.binlogdata.TableLastPK.decode(reader, reader.uint32())); break; } default: @@ -69681,137 +71234,166 @@ export const binlogdata = $root.binlogdata = (() => { }; /** - * Decodes a LastPKEvent message from the specified reader or buffer, length delimited. + * Decodes a ShardGtid message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof binlogdata.LastPKEvent + * @memberof binlogdata.ShardGtid * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {binlogdata.LastPKEvent} LastPKEvent + * @returns {binlogdata.ShardGtid} ShardGtid * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - LastPKEvent.decodeDelimited = function decodeDelimited(reader) { + ShardGtid.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a LastPKEvent message. + * Verifies a ShardGtid message. * @function verify - * @memberof binlogdata.LastPKEvent + * @memberof binlogdata.ShardGtid * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - LastPKEvent.verify = function verify(message) { + ShardGtid.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.table_last_p_k != null && message.hasOwnProperty("table_last_p_k")) { - let error = $root.binlogdata.TableLastPK.verify(message.table_last_p_k); - if (error) - return "table_last_p_k." + error; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.shard != null && message.hasOwnProperty("shard")) + if (!$util.isString(message.shard)) + return "shard: string expected"; + if (message.gtid != null && message.hasOwnProperty("gtid")) + if (!$util.isString(message.gtid)) + return "gtid: string expected"; + if (message.table_p_ks != null && message.hasOwnProperty("table_p_ks")) { + if (!Array.isArray(message.table_p_ks)) + return "table_p_ks: array expected"; + for (let i = 0; i < message.table_p_ks.length; ++i) { + let error = $root.binlogdata.TableLastPK.verify(message.table_p_ks[i]); + if (error) + return "table_p_ks." + error; + } } - if (message.completed != null && message.hasOwnProperty("completed")) - if (typeof message.completed !== "boolean") - return "completed: boolean expected"; return null; }; /** - * Creates a LastPKEvent message from a plain object. Also converts values to their respective internal types. + * Creates a ShardGtid message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof binlogdata.LastPKEvent + * @memberof binlogdata.ShardGtid * @static * @param {Object.} object Plain object - * @returns {binlogdata.LastPKEvent} LastPKEvent + * @returns {binlogdata.ShardGtid} ShardGtid */ - LastPKEvent.fromObject = function fromObject(object) { - if (object instanceof $root.binlogdata.LastPKEvent) + ShardGtid.fromObject = function fromObject(object) { + if (object instanceof $root.binlogdata.ShardGtid) return object; - let message = new $root.binlogdata.LastPKEvent(); - if (object.table_last_p_k != null) { - if (typeof object.table_last_p_k !== "object") - throw TypeError(".binlogdata.LastPKEvent.table_last_p_k: object expected"); - message.table_last_p_k = $root.binlogdata.TableLastPK.fromObject(object.table_last_p_k); + let message = new $root.binlogdata.ShardGtid(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.shard != null) + message.shard = String(object.shard); + if (object.gtid != null) + message.gtid = String(object.gtid); + if (object.table_p_ks) { + if (!Array.isArray(object.table_p_ks)) + throw TypeError(".binlogdata.ShardGtid.table_p_ks: array expected"); + message.table_p_ks = []; + for (let i = 0; i < object.table_p_ks.length; ++i) { + if (typeof object.table_p_ks[i] !== "object") + throw TypeError(".binlogdata.ShardGtid.table_p_ks: object expected"); + message.table_p_ks[i] = $root.binlogdata.TableLastPK.fromObject(object.table_p_ks[i]); + } } - if (object.completed != null) - message.completed = Boolean(object.completed); return message; }; /** - * Creates a plain object from a LastPKEvent message. Also converts values to other types if specified. + * Creates a plain object from a ShardGtid message. Also converts values to other types if specified. * @function toObject - * @memberof binlogdata.LastPKEvent + * @memberof binlogdata.ShardGtid * @static - * @param {binlogdata.LastPKEvent} message LastPKEvent + * @param {binlogdata.ShardGtid} message ShardGtid * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - LastPKEvent.toObject = function toObject(message, options) { + ShardGtid.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; + if (options.arrays || options.defaults) + object.table_p_ks = []; if (options.defaults) { - object.table_last_p_k = null; - object.completed = false; + object.keyspace = ""; + object.shard = ""; + object.gtid = ""; } - if (message.table_last_p_k != null && message.hasOwnProperty("table_last_p_k")) - object.table_last_p_k = $root.binlogdata.TableLastPK.toObject(message.table_last_p_k, options); - if (message.completed != null && message.hasOwnProperty("completed")) - object.completed = message.completed; - return object; - }; - - /** - * Converts this LastPKEvent to JSON. + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = message.shard; + if (message.gtid != null && message.hasOwnProperty("gtid")) + object.gtid = message.gtid; + if (message.table_p_ks && message.table_p_ks.length) { + object.table_p_ks = []; + for (let j = 0; j < message.table_p_ks.length; ++j) + object.table_p_ks[j] = $root.binlogdata.TableLastPK.toObject(message.table_p_ks[j], options); + } + return object; + }; + + /** + * Converts this ShardGtid to JSON. * @function toJSON - * @memberof binlogdata.LastPKEvent + * @memberof binlogdata.ShardGtid * @instance * @returns {Object.} JSON object */ - LastPKEvent.prototype.toJSON = function toJSON() { + ShardGtid.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for LastPKEvent + * Gets the default type url for ShardGtid * @function getTypeUrl - * @memberof binlogdata.LastPKEvent + * @memberof binlogdata.ShardGtid * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - LastPKEvent.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ShardGtid.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/binlogdata.LastPKEvent"; + return typeUrlPrefix + "/binlogdata.ShardGtid"; }; - return LastPKEvent; + return ShardGtid; })(); - binlogdata.TableLastPK = (function() { + binlogdata.VGtid = (function() { /** - * Properties of a TableLastPK. + * Properties of a VGtid. * @memberof binlogdata - * @interface ITableLastPK - * @property {string|null} [table_name] TableLastPK table_name - * @property {query.IQueryResult|null} [lastpk] TableLastPK lastpk + * @interface IVGtid + * @property {Array.|null} [shard_gtids] VGtid shard_gtids */ /** - * Constructs a new TableLastPK. + * Constructs a new VGtid. * @memberof binlogdata - * @classdesc Represents a TableLastPK. - * @implements ITableLastPK + * @classdesc Represents a VGtid. + * @implements IVGtid * @constructor - * @param {binlogdata.ITableLastPK=} [properties] Properties to set + * @param {binlogdata.IVGtid=} [properties] Properties to set */ - function TableLastPK(properties) { + function VGtid(properties) { + this.shard_gtids = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -69819,89 +71401,78 @@ export const binlogdata = $root.binlogdata = (() => { } /** - * TableLastPK table_name. - * @member {string} table_name - * @memberof binlogdata.TableLastPK - * @instance - */ - TableLastPK.prototype.table_name = ""; - - /** - * TableLastPK lastpk. - * @member {query.IQueryResult|null|undefined} lastpk - * @memberof binlogdata.TableLastPK + * VGtid shard_gtids. + * @member {Array.} shard_gtids + * @memberof binlogdata.VGtid * @instance */ - TableLastPK.prototype.lastpk = null; + VGtid.prototype.shard_gtids = $util.emptyArray; /** - * Creates a new TableLastPK instance using the specified properties. + * Creates a new VGtid instance using the specified properties. * @function create - * @memberof binlogdata.TableLastPK + * @memberof binlogdata.VGtid * @static - * @param {binlogdata.ITableLastPK=} [properties] Properties to set - * @returns {binlogdata.TableLastPK} TableLastPK instance + * @param {binlogdata.IVGtid=} [properties] Properties to set + * @returns {binlogdata.VGtid} VGtid instance */ - TableLastPK.create = function create(properties) { - return new TableLastPK(properties); + VGtid.create = function create(properties) { + return new VGtid(properties); }; /** - * Encodes the specified TableLastPK message. Does not implicitly {@link binlogdata.TableLastPK.verify|verify} messages. + * Encodes the specified VGtid message. Does not implicitly {@link binlogdata.VGtid.verify|verify} messages. * @function encode - * @memberof binlogdata.TableLastPK + * @memberof binlogdata.VGtid * @static - * @param {binlogdata.ITableLastPK} message TableLastPK message or plain object to encode + * @param {binlogdata.IVGtid} message VGtid message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - TableLastPK.encode = function encode(message, writer) { + VGtid.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.table_name != null && Object.hasOwnProperty.call(message, "table_name")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.table_name); - if (message.lastpk != null && Object.hasOwnProperty.call(message, "lastpk")) - $root.query.QueryResult.encode(message.lastpk, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.shard_gtids != null && message.shard_gtids.length) + for (let i = 0; i < message.shard_gtids.length; ++i) + $root.binlogdata.ShardGtid.encode(message.shard_gtids[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified TableLastPK message, length delimited. Does not implicitly {@link binlogdata.TableLastPK.verify|verify} messages. + * Encodes the specified VGtid message, length delimited. Does not implicitly {@link binlogdata.VGtid.verify|verify} messages. * @function encodeDelimited - * @memberof binlogdata.TableLastPK + * @memberof binlogdata.VGtid * @static - * @param {binlogdata.ITableLastPK} message TableLastPK message or plain object to encode + * @param {binlogdata.IVGtid} message VGtid message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - TableLastPK.encodeDelimited = function encodeDelimited(message, writer) { + VGtid.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a TableLastPK message from the specified reader or buffer. + * Decodes a VGtid message from the specified reader or buffer. * @function decode - * @memberof binlogdata.TableLastPK + * @memberof binlogdata.VGtid * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {binlogdata.TableLastPK} TableLastPK + * @returns {binlogdata.VGtid} VGtid * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - TableLastPK.decode = function decode(reader, length) { + VGtid.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.TableLastPK(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.VGtid(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.table_name = reader.string(); - break; - } - case 3: { - message.lastpk = $root.query.QueryResult.decode(reader, reader.uint32()); + if (!(message.shard_gtids && message.shard_gtids.length)) + message.shard_gtids = []; + message.shard_gtids.push($root.binlogdata.ShardGtid.decode(reader, reader.uint32())); break; } default: @@ -69913,139 +71484,140 @@ export const binlogdata = $root.binlogdata = (() => { }; /** - * Decodes a TableLastPK message from the specified reader or buffer, length delimited. + * Decodes a VGtid message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof binlogdata.TableLastPK + * @memberof binlogdata.VGtid * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {binlogdata.TableLastPK} TableLastPK + * @returns {binlogdata.VGtid} VGtid * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - TableLastPK.decodeDelimited = function decodeDelimited(reader) { + VGtid.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a TableLastPK message. + * Verifies a VGtid message. * @function verify - * @memberof binlogdata.TableLastPK + * @memberof binlogdata.VGtid * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - TableLastPK.verify = function verify(message) { + VGtid.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.table_name != null && message.hasOwnProperty("table_name")) - if (!$util.isString(message.table_name)) - return "table_name: string expected"; - if (message.lastpk != null && message.hasOwnProperty("lastpk")) { - let error = $root.query.QueryResult.verify(message.lastpk); - if (error) - return "lastpk." + error; + if (message.shard_gtids != null && message.hasOwnProperty("shard_gtids")) { + if (!Array.isArray(message.shard_gtids)) + return "shard_gtids: array expected"; + for (let i = 0; i < message.shard_gtids.length; ++i) { + let error = $root.binlogdata.ShardGtid.verify(message.shard_gtids[i]); + if (error) + return "shard_gtids." + error; + } } return null; }; /** - * Creates a TableLastPK message from a plain object. Also converts values to their respective internal types. + * Creates a VGtid message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof binlogdata.TableLastPK + * @memberof binlogdata.VGtid * @static * @param {Object.} object Plain object - * @returns {binlogdata.TableLastPK} TableLastPK + * @returns {binlogdata.VGtid} VGtid */ - TableLastPK.fromObject = function fromObject(object) { - if (object instanceof $root.binlogdata.TableLastPK) + VGtid.fromObject = function fromObject(object) { + if (object instanceof $root.binlogdata.VGtid) return object; - let message = new $root.binlogdata.TableLastPK(); - if (object.table_name != null) - message.table_name = String(object.table_name); - if (object.lastpk != null) { - if (typeof object.lastpk !== "object") - throw TypeError(".binlogdata.TableLastPK.lastpk: object expected"); - message.lastpk = $root.query.QueryResult.fromObject(object.lastpk); + let message = new $root.binlogdata.VGtid(); + if (object.shard_gtids) { + if (!Array.isArray(object.shard_gtids)) + throw TypeError(".binlogdata.VGtid.shard_gtids: array expected"); + message.shard_gtids = []; + for (let i = 0; i < object.shard_gtids.length; ++i) { + if (typeof object.shard_gtids[i] !== "object") + throw TypeError(".binlogdata.VGtid.shard_gtids: object expected"); + message.shard_gtids[i] = $root.binlogdata.ShardGtid.fromObject(object.shard_gtids[i]); + } } return message; }; /** - * Creates a plain object from a TableLastPK message. Also converts values to other types if specified. + * Creates a plain object from a VGtid message. Also converts values to other types if specified. * @function toObject - * @memberof binlogdata.TableLastPK + * @memberof binlogdata.VGtid * @static - * @param {binlogdata.TableLastPK} message TableLastPK + * @param {binlogdata.VGtid} message VGtid * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - TableLastPK.toObject = function toObject(message, options) { + VGtid.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) { - object.table_name = ""; - object.lastpk = null; + if (options.arrays || options.defaults) + object.shard_gtids = []; + if (message.shard_gtids && message.shard_gtids.length) { + object.shard_gtids = []; + for (let j = 0; j < message.shard_gtids.length; ++j) + object.shard_gtids[j] = $root.binlogdata.ShardGtid.toObject(message.shard_gtids[j], options); } - if (message.table_name != null && message.hasOwnProperty("table_name")) - object.table_name = message.table_name; - if (message.lastpk != null && message.hasOwnProperty("lastpk")) - object.lastpk = $root.query.QueryResult.toObject(message.lastpk, options); return object; }; /** - * Converts this TableLastPK to JSON. + * Converts this VGtid to JSON. * @function toJSON - * @memberof binlogdata.TableLastPK + * @memberof binlogdata.VGtid * @instance * @returns {Object.} JSON object */ - TableLastPK.prototype.toJSON = function toJSON() { + VGtid.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for TableLastPK + * Gets the default type url for VGtid * @function getTypeUrl - * @memberof binlogdata.TableLastPK + * @memberof binlogdata.VGtid * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - TableLastPK.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + VGtid.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/binlogdata.TableLastPK"; + return typeUrlPrefix + "/binlogdata.VGtid"; }; - return TableLastPK; + return VGtid; })(); - binlogdata.VStreamResultsRequest = (function() { + binlogdata.KeyspaceShard = (function() { /** - * Properties of a VStreamResultsRequest. + * Properties of a KeyspaceShard. * @memberof binlogdata - * @interface IVStreamResultsRequest - * @property {vtrpc.ICallerID|null} [effective_caller_id] VStreamResultsRequest effective_caller_id - * @property {query.IVTGateCallerID|null} [immediate_caller_id] VStreamResultsRequest immediate_caller_id - * @property {query.ITarget|null} [target] VStreamResultsRequest target - * @property {string|null} [query] VStreamResultsRequest query + * @interface IKeyspaceShard + * @property {string|null} [keyspace] KeyspaceShard keyspace + * @property {string|null} [shard] KeyspaceShard shard */ /** - * Constructs a new VStreamResultsRequest. + * Constructs a new KeyspaceShard. * @memberof binlogdata - * @classdesc Represents a VStreamResultsRequest. - * @implements IVStreamResultsRequest + * @classdesc Represents a KeyspaceShard. + * @implements IKeyspaceShard * @constructor - * @param {binlogdata.IVStreamResultsRequest=} [properties] Properties to set + * @param {binlogdata.IKeyspaceShard=} [properties] Properties to set */ - function VStreamResultsRequest(properties) { + function KeyspaceShard(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -70053,117 +71625,89 @@ export const binlogdata = $root.binlogdata = (() => { } /** - * VStreamResultsRequest effective_caller_id. - * @member {vtrpc.ICallerID|null|undefined} effective_caller_id - * @memberof binlogdata.VStreamResultsRequest - * @instance - */ - VStreamResultsRequest.prototype.effective_caller_id = null; - - /** - * VStreamResultsRequest immediate_caller_id. - * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id - * @memberof binlogdata.VStreamResultsRequest - * @instance - */ - VStreamResultsRequest.prototype.immediate_caller_id = null; - - /** - * VStreamResultsRequest target. - * @member {query.ITarget|null|undefined} target - * @memberof binlogdata.VStreamResultsRequest + * KeyspaceShard keyspace. + * @member {string} keyspace + * @memberof binlogdata.KeyspaceShard * @instance */ - VStreamResultsRequest.prototype.target = null; + KeyspaceShard.prototype.keyspace = ""; /** - * VStreamResultsRequest query. - * @member {string} query - * @memberof binlogdata.VStreamResultsRequest + * KeyspaceShard shard. + * @member {string} shard + * @memberof binlogdata.KeyspaceShard * @instance */ - VStreamResultsRequest.prototype.query = ""; + KeyspaceShard.prototype.shard = ""; /** - * Creates a new VStreamResultsRequest instance using the specified properties. + * Creates a new KeyspaceShard instance using the specified properties. * @function create - * @memberof binlogdata.VStreamResultsRequest + * @memberof binlogdata.KeyspaceShard * @static - * @param {binlogdata.IVStreamResultsRequest=} [properties] Properties to set - * @returns {binlogdata.VStreamResultsRequest} VStreamResultsRequest instance + * @param {binlogdata.IKeyspaceShard=} [properties] Properties to set + * @returns {binlogdata.KeyspaceShard} KeyspaceShard instance */ - VStreamResultsRequest.create = function create(properties) { - return new VStreamResultsRequest(properties); + KeyspaceShard.create = function create(properties) { + return new KeyspaceShard(properties); }; /** - * Encodes the specified VStreamResultsRequest message. Does not implicitly {@link binlogdata.VStreamResultsRequest.verify|verify} messages. + * Encodes the specified KeyspaceShard message. Does not implicitly {@link binlogdata.KeyspaceShard.verify|verify} messages. * @function encode - * @memberof binlogdata.VStreamResultsRequest + * @memberof binlogdata.KeyspaceShard * @static - * @param {binlogdata.IVStreamResultsRequest} message VStreamResultsRequest message or plain object to encode + * @param {binlogdata.IKeyspaceShard} message KeyspaceShard message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - VStreamResultsRequest.encode = function encode(message, writer) { + KeyspaceShard.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) - $root.vtrpc.CallerID.encode(message.effective_caller_id, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.immediate_caller_id != null && Object.hasOwnProperty.call(message, "immediate_caller_id")) - $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.target != null && Object.hasOwnProperty.call(message, "target")) - $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.query != null && Object.hasOwnProperty.call(message, "query")) - writer.uint32(/* id 4, wireType 2 =*/34).string(message.query); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); return writer; }; /** - * Encodes the specified VStreamResultsRequest message, length delimited. Does not implicitly {@link binlogdata.VStreamResultsRequest.verify|verify} messages. + * Encodes the specified KeyspaceShard message, length delimited. Does not implicitly {@link binlogdata.KeyspaceShard.verify|verify} messages. * @function encodeDelimited - * @memberof binlogdata.VStreamResultsRequest + * @memberof binlogdata.KeyspaceShard * @static - * @param {binlogdata.IVStreamResultsRequest} message VStreamResultsRequest message or plain object to encode + * @param {binlogdata.IKeyspaceShard} message KeyspaceShard message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - VStreamResultsRequest.encodeDelimited = function encodeDelimited(message, writer) { + KeyspaceShard.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a VStreamResultsRequest message from the specified reader or buffer. + * Decodes a KeyspaceShard message from the specified reader or buffer. * @function decode - * @memberof binlogdata.VStreamResultsRequest + * @memberof binlogdata.KeyspaceShard * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {binlogdata.VStreamResultsRequest} VStreamResultsRequest + * @returns {binlogdata.KeyspaceShard} KeyspaceShard * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - VStreamResultsRequest.decode = function decode(reader, length) { + KeyspaceShard.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.VStreamResultsRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.KeyspaceShard(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.effective_caller_id = $root.vtrpc.CallerID.decode(reader, reader.uint32()); + message.keyspace = reader.string(); break; } case 2: { - message.immediate_caller_id = $root.query.VTGateCallerID.decode(reader, reader.uint32()); - break; - } - case 3: { - message.target = $root.query.Target.decode(reader, reader.uint32()); - break; - } - case 4: { - message.query = reader.string(); + message.shard = reader.string(); break; } default: @@ -70175,166 +71719,155 @@ export const binlogdata = $root.binlogdata = (() => { }; /** - * Decodes a VStreamResultsRequest message from the specified reader or buffer, length delimited. + * Decodes a KeyspaceShard message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof binlogdata.VStreamResultsRequest + * @memberof binlogdata.KeyspaceShard * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {binlogdata.VStreamResultsRequest} VStreamResultsRequest + * @returns {binlogdata.KeyspaceShard} KeyspaceShard * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - VStreamResultsRequest.decodeDelimited = function decodeDelimited(reader) { + KeyspaceShard.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a VStreamResultsRequest message. + * Verifies a KeyspaceShard message. * @function verify - * @memberof binlogdata.VStreamResultsRequest + * @memberof binlogdata.KeyspaceShard * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - VStreamResultsRequest.verify = function verify(message) { + KeyspaceShard.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { - let error = $root.vtrpc.CallerID.verify(message.effective_caller_id); - if (error) - return "effective_caller_id." + error; - } - if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) { - let error = $root.query.VTGateCallerID.verify(message.immediate_caller_id); - if (error) - return "immediate_caller_id." + error; - } - if (message.target != null && message.hasOwnProperty("target")) { - let error = $root.query.Target.verify(message.target); - if (error) - return "target." + error; - } - if (message.query != null && message.hasOwnProperty("query")) - if (!$util.isString(message.query)) - return "query: string expected"; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.shard != null && message.hasOwnProperty("shard")) + if (!$util.isString(message.shard)) + return "shard: string expected"; return null; }; /** - * Creates a VStreamResultsRequest message from a plain object. Also converts values to their respective internal types. + * Creates a KeyspaceShard message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof binlogdata.VStreamResultsRequest + * @memberof binlogdata.KeyspaceShard * @static * @param {Object.} object Plain object - * @returns {binlogdata.VStreamResultsRequest} VStreamResultsRequest + * @returns {binlogdata.KeyspaceShard} KeyspaceShard */ - VStreamResultsRequest.fromObject = function fromObject(object) { - if (object instanceof $root.binlogdata.VStreamResultsRequest) + KeyspaceShard.fromObject = function fromObject(object) { + if (object instanceof $root.binlogdata.KeyspaceShard) return object; - let message = new $root.binlogdata.VStreamResultsRequest(); - if (object.effective_caller_id != null) { - if (typeof object.effective_caller_id !== "object") - throw TypeError(".binlogdata.VStreamResultsRequest.effective_caller_id: object expected"); - message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); - } - if (object.immediate_caller_id != null) { - if (typeof object.immediate_caller_id !== "object") - throw TypeError(".binlogdata.VStreamResultsRequest.immediate_caller_id: object expected"); - message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); - } - if (object.target != null) { - if (typeof object.target !== "object") - throw TypeError(".binlogdata.VStreamResultsRequest.target: object expected"); - message.target = $root.query.Target.fromObject(object.target); - } - if (object.query != null) - message.query = String(object.query); + let message = new $root.binlogdata.KeyspaceShard(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.shard != null) + message.shard = String(object.shard); return message; }; /** - * Creates a plain object from a VStreamResultsRequest message. Also converts values to other types if specified. + * Creates a plain object from a KeyspaceShard message. Also converts values to other types if specified. * @function toObject - * @memberof binlogdata.VStreamResultsRequest + * @memberof binlogdata.KeyspaceShard * @static - * @param {binlogdata.VStreamResultsRequest} message VStreamResultsRequest + * @param {binlogdata.KeyspaceShard} message KeyspaceShard * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - VStreamResultsRequest.toObject = function toObject(message, options) { + KeyspaceShard.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) { - object.effective_caller_id = null; - object.immediate_caller_id = null; - object.target = null; - object.query = ""; + object.keyspace = ""; + object.shard = ""; } - if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) - object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); - if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) - object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); - if (message.target != null && message.hasOwnProperty("target")) - object.target = $root.query.Target.toObject(message.target, options); - if (message.query != null && message.hasOwnProperty("query")) - object.query = message.query; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = message.shard; return object; }; /** - * Converts this VStreamResultsRequest to JSON. + * Converts this KeyspaceShard to JSON. * @function toJSON - * @memberof binlogdata.VStreamResultsRequest + * @memberof binlogdata.KeyspaceShard * @instance * @returns {Object.} JSON object */ - VStreamResultsRequest.prototype.toJSON = function toJSON() { + KeyspaceShard.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for VStreamResultsRequest + * Gets the default type url for KeyspaceShard * @function getTypeUrl - * @memberof binlogdata.VStreamResultsRequest + * @memberof binlogdata.KeyspaceShard * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - VStreamResultsRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + KeyspaceShard.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/binlogdata.VStreamResultsRequest"; + return typeUrlPrefix + "/binlogdata.KeyspaceShard"; }; - return VStreamResultsRequest; + return KeyspaceShard; })(); - binlogdata.VStreamResultsResponse = (function() { + /** + * MigrationType enum. + * @name binlogdata.MigrationType + * @enum {number} + * @property {number} TABLES=0 TABLES value + * @property {number} SHARDS=1 SHARDS value + */ + binlogdata.MigrationType = (function() { + const valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "TABLES"] = 0; + values[valuesById[1] = "SHARDS"] = 1; + return values; + })(); + + binlogdata.Journal = (function() { /** - * Properties of a VStreamResultsResponse. + * Properties of a Journal. * @memberof binlogdata - * @interface IVStreamResultsResponse - * @property {Array.|null} [fields] VStreamResultsResponse fields - * @property {string|null} [gtid] VStreamResultsResponse gtid - * @property {Array.|null} [rows] VStreamResultsResponse rows + * @interface IJournal + * @property {number|Long|null} [id] Journal id + * @property {binlogdata.MigrationType|null} [migration_type] Journal migration_type + * @property {Array.|null} [tables] Journal tables + * @property {string|null} [local_position] Journal local_position + * @property {Array.|null} [shard_gtids] Journal shard_gtids + * @property {Array.|null} [participants] Journal participants + * @property {Array.|null} [source_workflows] Journal source_workflows */ /** - * Constructs a new VStreamResultsResponse. + * Constructs a new Journal. * @memberof binlogdata - * @classdesc Represents a VStreamResultsResponse. - * @implements IVStreamResultsResponse + * @classdesc Represents a Journal. + * @implements IJournal * @constructor - * @param {binlogdata.IVStreamResultsResponse=} [properties] Properties to set + * @param {binlogdata.IJournal=} [properties] Properties to set */ - function VStreamResultsResponse(properties) { - this.fields = []; - this.rows = []; + function Journal(properties) { + this.tables = []; + this.shard_gtids = []; + this.participants = []; + this.source_workflows = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -70342,109 +71875,171 @@ export const binlogdata = $root.binlogdata = (() => { } /** - * VStreamResultsResponse fields. - * @member {Array.} fields - * @memberof binlogdata.VStreamResultsResponse + * Journal id. + * @member {number|Long} id + * @memberof binlogdata.Journal * @instance */ - VStreamResultsResponse.prototype.fields = $util.emptyArray; + Journal.prototype.id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; /** - * VStreamResultsResponse gtid. - * @member {string} gtid - * @memberof binlogdata.VStreamResultsResponse + * Journal migration_type. + * @member {binlogdata.MigrationType} migration_type + * @memberof binlogdata.Journal * @instance */ - VStreamResultsResponse.prototype.gtid = ""; + Journal.prototype.migration_type = 0; /** - * VStreamResultsResponse rows. - * @member {Array.} rows - * @memberof binlogdata.VStreamResultsResponse + * Journal tables. + * @member {Array.} tables + * @memberof binlogdata.Journal * @instance */ - VStreamResultsResponse.prototype.rows = $util.emptyArray; + Journal.prototype.tables = $util.emptyArray; /** - * Creates a new VStreamResultsResponse instance using the specified properties. + * Journal local_position. + * @member {string} local_position + * @memberof binlogdata.Journal + * @instance + */ + Journal.prototype.local_position = ""; + + /** + * Journal shard_gtids. + * @member {Array.} shard_gtids + * @memberof binlogdata.Journal + * @instance + */ + Journal.prototype.shard_gtids = $util.emptyArray; + + /** + * Journal participants. + * @member {Array.} participants + * @memberof binlogdata.Journal + * @instance + */ + Journal.prototype.participants = $util.emptyArray; + + /** + * Journal source_workflows. + * @member {Array.} source_workflows + * @memberof binlogdata.Journal + * @instance + */ + Journal.prototype.source_workflows = $util.emptyArray; + + /** + * Creates a new Journal instance using the specified properties. * @function create - * @memberof binlogdata.VStreamResultsResponse + * @memberof binlogdata.Journal * @static - * @param {binlogdata.IVStreamResultsResponse=} [properties] Properties to set - * @returns {binlogdata.VStreamResultsResponse} VStreamResultsResponse instance + * @param {binlogdata.IJournal=} [properties] Properties to set + * @returns {binlogdata.Journal} Journal instance */ - VStreamResultsResponse.create = function create(properties) { - return new VStreamResultsResponse(properties); + Journal.create = function create(properties) { + return new Journal(properties); }; /** - * Encodes the specified VStreamResultsResponse message. Does not implicitly {@link binlogdata.VStreamResultsResponse.verify|verify} messages. + * Encodes the specified Journal message. Does not implicitly {@link binlogdata.Journal.verify|verify} messages. * @function encode - * @memberof binlogdata.VStreamResultsResponse + * @memberof binlogdata.Journal * @static - * @param {binlogdata.IVStreamResultsResponse} message VStreamResultsResponse message or plain object to encode + * @param {binlogdata.IJournal} message Journal message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - VStreamResultsResponse.encode = function encode(message, writer) { + Journal.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.fields != null && message.fields.length) - for (let i = 0; i < message.fields.length; ++i) - $root.query.Field.encode(message.fields[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.gtid != null && Object.hasOwnProperty.call(message, "gtid")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.gtid); - if (message.rows != null && message.rows.length) - for (let i = 0; i < message.rows.length; ++i) - $root.query.Row.encode(message.rows[i], writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.id != null && Object.hasOwnProperty.call(message, "id")) + writer.uint32(/* id 1, wireType 0 =*/8).int64(message.id); + if (message.migration_type != null && Object.hasOwnProperty.call(message, "migration_type")) + writer.uint32(/* id 2, wireType 0 =*/16).int32(message.migration_type); + if (message.tables != null && message.tables.length) + for (let i = 0; i < message.tables.length; ++i) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.tables[i]); + if (message.local_position != null && Object.hasOwnProperty.call(message, "local_position")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.local_position); + if (message.shard_gtids != null && message.shard_gtids.length) + for (let i = 0; i < message.shard_gtids.length; ++i) + $root.binlogdata.ShardGtid.encode(message.shard_gtids[i], writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); + if (message.participants != null && message.participants.length) + for (let i = 0; i < message.participants.length; ++i) + $root.binlogdata.KeyspaceShard.encode(message.participants[i], writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); + if (message.source_workflows != null && message.source_workflows.length) + for (let i = 0; i < message.source_workflows.length; ++i) + writer.uint32(/* id 7, wireType 2 =*/58).string(message.source_workflows[i]); return writer; }; /** - * Encodes the specified VStreamResultsResponse message, length delimited. Does not implicitly {@link binlogdata.VStreamResultsResponse.verify|verify} messages. + * Encodes the specified Journal message, length delimited. Does not implicitly {@link binlogdata.Journal.verify|verify} messages. * @function encodeDelimited - * @memberof binlogdata.VStreamResultsResponse + * @memberof binlogdata.Journal * @static - * @param {binlogdata.IVStreamResultsResponse} message VStreamResultsResponse message or plain object to encode + * @param {binlogdata.IJournal} message Journal message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - VStreamResultsResponse.encodeDelimited = function encodeDelimited(message, writer) { + Journal.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a VStreamResultsResponse message from the specified reader or buffer. + * Decodes a Journal message from the specified reader or buffer. * @function decode - * @memberof binlogdata.VStreamResultsResponse + * @memberof binlogdata.Journal * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {binlogdata.VStreamResultsResponse} VStreamResultsResponse + * @returns {binlogdata.Journal} Journal * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - VStreamResultsResponse.decode = function decode(reader, length) { + Journal.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.VStreamResultsResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.Journal(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (!(message.fields && message.fields.length)) - message.fields = []; - message.fields.push($root.query.Field.decode(reader, reader.uint32())); + message.id = reader.int64(); + break; + } + case 2: { + message.migration_type = reader.int32(); break; } case 3: { - message.gtid = reader.string(); + if (!(message.tables && message.tables.length)) + message.tables = []; + message.tables.push(reader.string()); break; } case 4: { - if (!(message.rows && message.rows.length)) - message.rows = []; - message.rows.push($root.query.Row.decode(reader, reader.uint32())); + message.local_position = reader.string(); + break; + } + case 5: { + if (!(message.shard_gtids && message.shard_gtids.length)) + message.shard_gtids = []; + message.shard_gtids.push($root.binlogdata.ShardGtid.decode(reader, reader.uint32())); + break; + } + case 6: { + if (!(message.participants && message.participants.length)) + message.participants = []; + message.participants.push($root.binlogdata.KeyspaceShard.decode(reader, reader.uint32())); + break; + } + case 7: { + if (!(message.source_workflows && message.source_workflows.length)) + message.source_workflows = []; + message.source_workflows.push(reader.string()); break; } default: @@ -70456,190 +72051,277 @@ export const binlogdata = $root.binlogdata = (() => { }; /** - * Decodes a VStreamResultsResponse message from the specified reader or buffer, length delimited. + * Decodes a Journal message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof binlogdata.VStreamResultsResponse + * @memberof binlogdata.Journal * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {binlogdata.VStreamResultsResponse} VStreamResultsResponse + * @returns {binlogdata.Journal} Journal * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - VStreamResultsResponse.decodeDelimited = function decodeDelimited(reader) { + Journal.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a VStreamResultsResponse message. + * Verifies a Journal message. * @function verify - * @memberof binlogdata.VStreamResultsResponse + * @memberof binlogdata.Journal * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - VStreamResultsResponse.verify = function verify(message) { + Journal.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.fields != null && message.hasOwnProperty("fields")) { - if (!Array.isArray(message.fields)) - return "fields: array expected"; - for (let i = 0; i < message.fields.length; ++i) { - let error = $root.query.Field.verify(message.fields[i]); + if (message.id != null && message.hasOwnProperty("id")) + if (!$util.isInteger(message.id) && !(message.id && $util.isInteger(message.id.low) && $util.isInteger(message.id.high))) + return "id: integer|Long expected"; + if (message.migration_type != null && message.hasOwnProperty("migration_type")) + switch (message.migration_type) { + default: + return "migration_type: enum value expected"; + case 0: + case 1: + break; + } + if (message.tables != null && message.hasOwnProperty("tables")) { + if (!Array.isArray(message.tables)) + return "tables: array expected"; + for (let i = 0; i < message.tables.length; ++i) + if (!$util.isString(message.tables[i])) + return "tables: string[] expected"; + } + if (message.local_position != null && message.hasOwnProperty("local_position")) + if (!$util.isString(message.local_position)) + return "local_position: string expected"; + if (message.shard_gtids != null && message.hasOwnProperty("shard_gtids")) { + if (!Array.isArray(message.shard_gtids)) + return "shard_gtids: array expected"; + for (let i = 0; i < message.shard_gtids.length; ++i) { + let error = $root.binlogdata.ShardGtid.verify(message.shard_gtids[i]); if (error) - return "fields." + error; + return "shard_gtids." + error; } } - if (message.gtid != null && message.hasOwnProperty("gtid")) - if (!$util.isString(message.gtid)) - return "gtid: string expected"; - if (message.rows != null && message.hasOwnProperty("rows")) { - if (!Array.isArray(message.rows)) - return "rows: array expected"; - for (let i = 0; i < message.rows.length; ++i) { - let error = $root.query.Row.verify(message.rows[i]); + if (message.participants != null && message.hasOwnProperty("participants")) { + if (!Array.isArray(message.participants)) + return "participants: array expected"; + for (let i = 0; i < message.participants.length; ++i) { + let error = $root.binlogdata.KeyspaceShard.verify(message.participants[i]); if (error) - return "rows." + error; + return "participants." + error; } } + if (message.source_workflows != null && message.hasOwnProperty("source_workflows")) { + if (!Array.isArray(message.source_workflows)) + return "source_workflows: array expected"; + for (let i = 0; i < message.source_workflows.length; ++i) + if (!$util.isString(message.source_workflows[i])) + return "source_workflows: string[] expected"; + } return null; }; /** - * Creates a VStreamResultsResponse message from a plain object. Also converts values to their respective internal types. + * Creates a Journal message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof binlogdata.VStreamResultsResponse + * @memberof binlogdata.Journal * @static * @param {Object.} object Plain object - * @returns {binlogdata.VStreamResultsResponse} VStreamResultsResponse + * @returns {binlogdata.Journal} Journal */ - VStreamResultsResponse.fromObject = function fromObject(object) { - if (object instanceof $root.binlogdata.VStreamResultsResponse) + Journal.fromObject = function fromObject(object) { + if (object instanceof $root.binlogdata.Journal) return object; - let message = new $root.binlogdata.VStreamResultsResponse(); - if (object.fields) { - if (!Array.isArray(object.fields)) - throw TypeError(".binlogdata.VStreamResultsResponse.fields: array expected"); - message.fields = []; - for (let i = 0; i < object.fields.length; ++i) { - if (typeof object.fields[i] !== "object") - throw TypeError(".binlogdata.VStreamResultsResponse.fields: object expected"); - message.fields[i] = $root.query.Field.fromObject(object.fields[i]); + let message = new $root.binlogdata.Journal(); + if (object.id != null) + if ($util.Long) + (message.id = $util.Long.fromValue(object.id)).unsigned = false; + else if (typeof object.id === "string") + message.id = parseInt(object.id, 10); + else if (typeof object.id === "number") + message.id = object.id; + else if (typeof object.id === "object") + message.id = new $util.LongBits(object.id.low >>> 0, object.id.high >>> 0).toNumber(); + switch (object.migration_type) { + default: + if (typeof object.migration_type === "number") { + message.migration_type = object.migration_type; + break; } + break; + case "TABLES": + case 0: + message.migration_type = 0; + break; + case "SHARDS": + case 1: + message.migration_type = 1; + break; } - if (object.gtid != null) - message.gtid = String(object.gtid); - if (object.rows) { - if (!Array.isArray(object.rows)) - throw TypeError(".binlogdata.VStreamResultsResponse.rows: array expected"); - message.rows = []; - for (let i = 0; i < object.rows.length; ++i) { - if (typeof object.rows[i] !== "object") - throw TypeError(".binlogdata.VStreamResultsResponse.rows: object expected"); - message.rows[i] = $root.query.Row.fromObject(object.rows[i]); + if (object.tables) { + if (!Array.isArray(object.tables)) + throw TypeError(".binlogdata.Journal.tables: array expected"); + message.tables = []; + for (let i = 0; i < object.tables.length; ++i) + message.tables[i] = String(object.tables[i]); + } + if (object.local_position != null) + message.local_position = String(object.local_position); + if (object.shard_gtids) { + if (!Array.isArray(object.shard_gtids)) + throw TypeError(".binlogdata.Journal.shard_gtids: array expected"); + message.shard_gtids = []; + for (let i = 0; i < object.shard_gtids.length; ++i) { + if (typeof object.shard_gtids[i] !== "object") + throw TypeError(".binlogdata.Journal.shard_gtids: object expected"); + message.shard_gtids[i] = $root.binlogdata.ShardGtid.fromObject(object.shard_gtids[i]); + } + } + if (object.participants) { + if (!Array.isArray(object.participants)) + throw TypeError(".binlogdata.Journal.participants: array expected"); + message.participants = []; + for (let i = 0; i < object.participants.length; ++i) { + if (typeof object.participants[i] !== "object") + throw TypeError(".binlogdata.Journal.participants: object expected"); + message.participants[i] = $root.binlogdata.KeyspaceShard.fromObject(object.participants[i]); } } + if (object.source_workflows) { + if (!Array.isArray(object.source_workflows)) + throw TypeError(".binlogdata.Journal.source_workflows: array expected"); + message.source_workflows = []; + for (let i = 0; i < object.source_workflows.length; ++i) + message.source_workflows[i] = String(object.source_workflows[i]); + } return message; }; /** - * Creates a plain object from a VStreamResultsResponse message. Also converts values to other types if specified. + * Creates a plain object from a Journal message. Also converts values to other types if specified. * @function toObject - * @memberof binlogdata.VStreamResultsResponse + * @memberof binlogdata.Journal * @static - * @param {binlogdata.VStreamResultsResponse} message VStreamResultsResponse + * @param {binlogdata.Journal} message Journal * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - VStreamResultsResponse.toObject = function toObject(message, options) { + Journal.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.arrays || options.defaults) { - object.fields = []; - object.rows = []; + object.tables = []; + object.shard_gtids = []; + object.participants = []; + object.source_workflows = []; } - if (options.defaults) - object.gtid = ""; - if (message.fields && message.fields.length) { - object.fields = []; - for (let j = 0; j < message.fields.length; ++j) - object.fields[j] = $root.query.Field.toObject(message.fields[j], options); + if (options.defaults) { + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.id = options.longs === String ? "0" : 0; + object.migration_type = options.enums === String ? "TABLES" : 0; + object.local_position = ""; } - if (message.gtid != null && message.hasOwnProperty("gtid")) - object.gtid = message.gtid; - if (message.rows && message.rows.length) { - object.rows = []; - for (let j = 0; j < message.rows.length; ++j) - object.rows[j] = $root.query.Row.toObject(message.rows[j], options); + if (message.id != null && message.hasOwnProperty("id")) + if (typeof message.id === "number") + object.id = options.longs === String ? String(message.id) : message.id; + else + object.id = options.longs === String ? $util.Long.prototype.toString.call(message.id) : options.longs === Number ? new $util.LongBits(message.id.low >>> 0, message.id.high >>> 0).toNumber() : message.id; + if (message.migration_type != null && message.hasOwnProperty("migration_type")) + object.migration_type = options.enums === String ? $root.binlogdata.MigrationType[message.migration_type] === undefined ? message.migration_type : $root.binlogdata.MigrationType[message.migration_type] : message.migration_type; + if (message.tables && message.tables.length) { + object.tables = []; + for (let j = 0; j < message.tables.length; ++j) + object.tables[j] = message.tables[j]; + } + if (message.local_position != null && message.hasOwnProperty("local_position")) + object.local_position = message.local_position; + if (message.shard_gtids && message.shard_gtids.length) { + object.shard_gtids = []; + for (let j = 0; j < message.shard_gtids.length; ++j) + object.shard_gtids[j] = $root.binlogdata.ShardGtid.toObject(message.shard_gtids[j], options); + } + if (message.participants && message.participants.length) { + object.participants = []; + for (let j = 0; j < message.participants.length; ++j) + object.participants[j] = $root.binlogdata.KeyspaceShard.toObject(message.participants[j], options); + } + if (message.source_workflows && message.source_workflows.length) { + object.source_workflows = []; + for (let j = 0; j < message.source_workflows.length; ++j) + object.source_workflows[j] = message.source_workflows[j]; } return object; }; /** - * Converts this VStreamResultsResponse to JSON. + * Converts this Journal to JSON. * @function toJSON - * @memberof binlogdata.VStreamResultsResponse + * @memberof binlogdata.Journal * @instance * @returns {Object.} JSON object */ - VStreamResultsResponse.prototype.toJSON = function toJSON() { + Journal.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for VStreamResultsResponse + * Gets the default type url for Journal * @function getTypeUrl - * @memberof binlogdata.VStreamResultsResponse + * @memberof binlogdata.Journal * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - VStreamResultsResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + Journal.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/binlogdata.VStreamResultsResponse"; + return typeUrlPrefix + "/binlogdata.Journal"; }; - return VStreamResultsResponse; + return Journal; })(); - return binlogdata; -})(); - -export const vtrpc = $root.vtrpc = (() => { - - /** - * Namespace vtrpc. - * @exports vtrpc - * @namespace - */ - const vtrpc = {}; - - vtrpc.CallerID = (function() { + binlogdata.VEvent = (function() { /** - * Properties of a CallerID. - * @memberof vtrpc - * @interface ICallerID - * @property {string|null} [principal] CallerID principal - * @property {string|null} [component] CallerID component - * @property {string|null} [subcomponent] CallerID subcomponent - * @property {Array.|null} [groups] CallerID groups + * Properties of a VEvent. + * @memberof binlogdata + * @interface IVEvent + * @property {binlogdata.VEventType|null} [type] VEvent type + * @property {number|Long|null} [timestamp] VEvent timestamp + * @property {string|null} [gtid] VEvent gtid + * @property {string|null} [statement] VEvent statement + * @property {binlogdata.IRowEvent|null} [row_event] VEvent row_event + * @property {binlogdata.IFieldEvent|null} [field_event] VEvent field_event + * @property {binlogdata.IVGtid|null} [vgtid] VEvent vgtid + * @property {binlogdata.IJournal|null} [journal] VEvent journal + * @property {string|null} [dml] VEvent dml + * @property {number|Long|null} [current_time] VEvent current_time + * @property {binlogdata.ILastPKEvent|null} [last_p_k_event] VEvent last_p_k_event + * @property {string|null} [keyspace] VEvent keyspace + * @property {string|null} [shard] VEvent shard + * @property {boolean|null} [throttled] VEvent throttled */ /** - * Constructs a new CallerID. - * @memberof vtrpc - * @classdesc Represents a CallerID. - * @implements ICallerID + * Constructs a new VEvent. + * @memberof binlogdata + * @classdesc Represents a VEvent. + * @implements IVEvent * @constructor - * @param {vtrpc.ICallerID=} [properties] Properties to set + * @param {binlogdata.IVEvent=} [properties] Properties to set */ - function CallerID(properties) { - this.groups = []; + function VEvent(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -70647,424 +72329,257 @@ export const vtrpc = $root.vtrpc = (() => { } /** - * CallerID principal. - * @member {string} principal - * @memberof vtrpc.CallerID + * VEvent type. + * @member {binlogdata.VEventType} type + * @memberof binlogdata.VEvent * @instance */ - CallerID.prototype.principal = ""; + VEvent.prototype.type = 0; /** - * CallerID component. - * @member {string} component - * @memberof vtrpc.CallerID + * VEvent timestamp. + * @member {number|Long} timestamp + * @memberof binlogdata.VEvent * @instance */ - CallerID.prototype.component = ""; + VEvent.prototype.timestamp = $util.Long ? $util.Long.fromBits(0,0,false) : 0; /** - * CallerID subcomponent. - * @member {string} subcomponent - * @memberof vtrpc.CallerID + * VEvent gtid. + * @member {string} gtid + * @memberof binlogdata.VEvent * @instance */ - CallerID.prototype.subcomponent = ""; + VEvent.prototype.gtid = ""; /** - * CallerID groups. - * @member {Array.} groups - * @memberof vtrpc.CallerID + * VEvent statement. + * @member {string} statement + * @memberof binlogdata.VEvent * @instance */ - CallerID.prototype.groups = $util.emptyArray; + VEvent.prototype.statement = ""; /** - * Creates a new CallerID instance using the specified properties. - * @function create - * @memberof vtrpc.CallerID - * @static - * @param {vtrpc.ICallerID=} [properties] Properties to set - * @returns {vtrpc.CallerID} CallerID instance + * VEvent row_event. + * @member {binlogdata.IRowEvent|null|undefined} row_event + * @memberof binlogdata.VEvent + * @instance */ - CallerID.create = function create(properties) { - return new CallerID(properties); - }; + VEvent.prototype.row_event = null; /** - * Encodes the specified CallerID message. Does not implicitly {@link vtrpc.CallerID.verify|verify} messages. - * @function encode - * @memberof vtrpc.CallerID - * @static - * @param {vtrpc.ICallerID} message CallerID message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer + * VEvent field_event. + * @member {binlogdata.IFieldEvent|null|undefined} field_event + * @memberof binlogdata.VEvent + * @instance */ - CallerID.encode = function encode(message, writer) { - if (!writer) - writer = $Writer.create(); - if (message.principal != null && Object.hasOwnProperty.call(message, "principal")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.principal); - if (message.component != null && Object.hasOwnProperty.call(message, "component")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.component); - if (message.subcomponent != null && Object.hasOwnProperty.call(message, "subcomponent")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.subcomponent); - if (message.groups != null && message.groups.length) - for (let i = 0; i < message.groups.length; ++i) - writer.uint32(/* id 4, wireType 2 =*/34).string(message.groups[i]); - return writer; - }; + VEvent.prototype.field_event = null; /** - * Encodes the specified CallerID message, length delimited. Does not implicitly {@link vtrpc.CallerID.verify|verify} messages. - * @function encodeDelimited - * @memberof vtrpc.CallerID - * @static - * @param {vtrpc.ICallerID} message CallerID message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer + * VEvent vgtid. + * @member {binlogdata.IVGtid|null|undefined} vgtid + * @memberof binlogdata.VEvent + * @instance */ - CallerID.encodeDelimited = function encodeDelimited(message, writer) { - return this.encode(message, writer).ldelim(); - }; + VEvent.prototype.vgtid = null; /** - * Decodes a CallerID message from the specified reader or buffer. - * @function decode - * @memberof vtrpc.CallerID - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @param {number} [length] Message length if known beforehand - * @returns {vtrpc.CallerID} CallerID - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - CallerID.decode = function decode(reader, length) { - if (!(reader instanceof $Reader)) - reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtrpc.CallerID(); - while (reader.pos < end) { - let tag = reader.uint32(); - switch (tag >>> 3) { - case 1: { - message.principal = reader.string(); - break; - } - case 2: { - message.component = reader.string(); - break; - } - case 3: { - message.subcomponent = reader.string(); - break; - } - case 4: { - if (!(message.groups && message.groups.length)) - message.groups = []; - message.groups.push(reader.string()); - break; - } - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }; - - /** - * Decodes a CallerID message from the specified reader or buffer, length delimited. - * @function decodeDelimited - * @memberof vtrpc.CallerID - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtrpc.CallerID} CallerID - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - CallerID.decodeDelimited = function decodeDelimited(reader) { - if (!(reader instanceof $Reader)) - reader = new $Reader(reader); - return this.decode(reader, reader.uint32()); - }; - - /** - * Verifies a CallerID message. - * @function verify - * @memberof vtrpc.CallerID - * @static - * @param {Object.} message Plain object to verify - * @returns {string|null} `null` if valid, otherwise the reason why it is not - */ - CallerID.verify = function verify(message) { - if (typeof message !== "object" || message === null) - return "object expected"; - if (message.principal != null && message.hasOwnProperty("principal")) - if (!$util.isString(message.principal)) - return "principal: string expected"; - if (message.component != null && message.hasOwnProperty("component")) - if (!$util.isString(message.component)) - return "component: string expected"; - if (message.subcomponent != null && message.hasOwnProperty("subcomponent")) - if (!$util.isString(message.subcomponent)) - return "subcomponent: string expected"; - if (message.groups != null && message.hasOwnProperty("groups")) { - if (!Array.isArray(message.groups)) - return "groups: array expected"; - for (let i = 0; i < message.groups.length; ++i) - if (!$util.isString(message.groups[i])) - return "groups: string[] expected"; - } - return null; - }; - - /** - * Creates a CallerID message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof vtrpc.CallerID - * @static - * @param {Object.} object Plain object - * @returns {vtrpc.CallerID} CallerID - */ - CallerID.fromObject = function fromObject(object) { - if (object instanceof $root.vtrpc.CallerID) - return object; - let message = new $root.vtrpc.CallerID(); - if (object.principal != null) - message.principal = String(object.principal); - if (object.component != null) - message.component = String(object.component); - if (object.subcomponent != null) - message.subcomponent = String(object.subcomponent); - if (object.groups) { - if (!Array.isArray(object.groups)) - throw TypeError(".vtrpc.CallerID.groups: array expected"); - message.groups = []; - for (let i = 0; i < object.groups.length; ++i) - message.groups[i] = String(object.groups[i]); - } - return message; - }; - - /** - * Creates a plain object from a CallerID message. Also converts values to other types if specified. - * @function toObject - * @memberof vtrpc.CallerID - * @static - * @param {vtrpc.CallerID} message CallerID - * @param {$protobuf.IConversionOptions} [options] Conversion options - * @returns {Object.} Plain object + * VEvent journal. + * @member {binlogdata.IJournal|null|undefined} journal + * @memberof binlogdata.VEvent + * @instance */ - CallerID.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.arrays || options.defaults) - object.groups = []; - if (options.defaults) { - object.principal = ""; - object.component = ""; - object.subcomponent = ""; - } - if (message.principal != null && message.hasOwnProperty("principal")) - object.principal = message.principal; - if (message.component != null && message.hasOwnProperty("component")) - object.component = message.component; - if (message.subcomponent != null && message.hasOwnProperty("subcomponent")) - object.subcomponent = message.subcomponent; - if (message.groups && message.groups.length) { - object.groups = []; - for (let j = 0; j < message.groups.length; ++j) - object.groups[j] = message.groups[j]; - } - return object; - }; + VEvent.prototype.journal = null; /** - * Converts this CallerID to JSON. - * @function toJSON - * @memberof vtrpc.CallerID + * VEvent dml. + * @member {string} dml + * @memberof binlogdata.VEvent * @instance - * @returns {Object.} JSON object */ - CallerID.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; + VEvent.prototype.dml = ""; /** - * Gets the default type url for CallerID - * @function getTypeUrl - * @memberof vtrpc.CallerID - * @static - * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns {string} The default type url + * VEvent current_time. + * @member {number|Long} current_time + * @memberof binlogdata.VEvent + * @instance */ - CallerID.getTypeUrl = function getTypeUrl(typeUrlPrefix) { - if (typeUrlPrefix === undefined) { - typeUrlPrefix = "type.googleapis.com"; - } - return typeUrlPrefix + "/vtrpc.CallerID"; - }; - - return CallerID; - })(); - - /** - * Code enum. - * @name vtrpc.Code - * @enum {number} - * @property {number} OK=0 OK value - * @property {number} CANCELED=1 CANCELED value - * @property {number} UNKNOWN=2 UNKNOWN value - * @property {number} INVALID_ARGUMENT=3 INVALID_ARGUMENT value - * @property {number} DEADLINE_EXCEEDED=4 DEADLINE_EXCEEDED value - * @property {number} NOT_FOUND=5 NOT_FOUND value - * @property {number} ALREADY_EXISTS=6 ALREADY_EXISTS value - * @property {number} PERMISSION_DENIED=7 PERMISSION_DENIED value - * @property {number} RESOURCE_EXHAUSTED=8 RESOURCE_EXHAUSTED value - * @property {number} FAILED_PRECONDITION=9 FAILED_PRECONDITION value - * @property {number} ABORTED=10 ABORTED value - * @property {number} OUT_OF_RANGE=11 OUT_OF_RANGE value - * @property {number} UNIMPLEMENTED=12 UNIMPLEMENTED value - * @property {number} INTERNAL=13 INTERNAL value - * @property {number} UNAVAILABLE=14 UNAVAILABLE value - * @property {number} DATA_LOSS=15 DATA_LOSS value - * @property {number} UNAUTHENTICATED=16 UNAUTHENTICATED value - * @property {number} CLUSTER_EVENT=17 CLUSTER_EVENT value - * @property {number} READ_ONLY=18 READ_ONLY value - */ - vtrpc.Code = (function() { - const valuesById = {}, values = Object.create(valuesById); - values[valuesById[0] = "OK"] = 0; - values[valuesById[1] = "CANCELED"] = 1; - values[valuesById[2] = "UNKNOWN"] = 2; - values[valuesById[3] = "INVALID_ARGUMENT"] = 3; - values[valuesById[4] = "DEADLINE_EXCEEDED"] = 4; - values[valuesById[5] = "NOT_FOUND"] = 5; - values[valuesById[6] = "ALREADY_EXISTS"] = 6; - values[valuesById[7] = "PERMISSION_DENIED"] = 7; - values[valuesById[8] = "RESOURCE_EXHAUSTED"] = 8; - values[valuesById[9] = "FAILED_PRECONDITION"] = 9; - values[valuesById[10] = "ABORTED"] = 10; - values[valuesById[11] = "OUT_OF_RANGE"] = 11; - values[valuesById[12] = "UNIMPLEMENTED"] = 12; - values[valuesById[13] = "INTERNAL"] = 13; - values[valuesById[14] = "UNAVAILABLE"] = 14; - values[valuesById[15] = "DATA_LOSS"] = 15; - values[valuesById[16] = "UNAUTHENTICATED"] = 16; - values[valuesById[17] = "CLUSTER_EVENT"] = 17; - values[valuesById[18] = "READ_ONLY"] = 18; - return values; - })(); - - vtrpc.RPCError = (function() { + VEvent.prototype.current_time = $util.Long ? $util.Long.fromBits(0,0,false) : 0; /** - * Properties of a RPCError. - * @memberof vtrpc - * @interface IRPCError - * @property {string|null} [message] RPCError message - * @property {vtrpc.Code|null} [code] RPCError code + * VEvent last_p_k_event. + * @member {binlogdata.ILastPKEvent|null|undefined} last_p_k_event + * @memberof binlogdata.VEvent + * @instance */ + VEvent.prototype.last_p_k_event = null; /** - * Constructs a new RPCError. - * @memberof vtrpc - * @classdesc Represents a RPCError. - * @implements IRPCError - * @constructor - * @param {vtrpc.IRPCError=} [properties] Properties to set + * VEvent keyspace. + * @member {string} keyspace + * @memberof binlogdata.VEvent + * @instance */ - function RPCError(properties) { - if (properties) - for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; - } + VEvent.prototype.keyspace = ""; /** - * RPCError message. - * @member {string} message - * @memberof vtrpc.RPCError + * VEvent shard. + * @member {string} shard + * @memberof binlogdata.VEvent * @instance */ - RPCError.prototype.message = ""; + VEvent.prototype.shard = ""; /** - * RPCError code. - * @member {vtrpc.Code} code - * @memberof vtrpc.RPCError + * VEvent throttled. + * @member {boolean} throttled + * @memberof binlogdata.VEvent * @instance */ - RPCError.prototype.code = 0; + VEvent.prototype.throttled = false; /** - * Creates a new RPCError instance using the specified properties. + * Creates a new VEvent instance using the specified properties. * @function create - * @memberof vtrpc.RPCError + * @memberof binlogdata.VEvent * @static - * @param {vtrpc.IRPCError=} [properties] Properties to set - * @returns {vtrpc.RPCError} RPCError instance + * @param {binlogdata.IVEvent=} [properties] Properties to set + * @returns {binlogdata.VEvent} VEvent instance */ - RPCError.create = function create(properties) { - return new RPCError(properties); + VEvent.create = function create(properties) { + return new VEvent(properties); }; /** - * Encodes the specified RPCError message. Does not implicitly {@link vtrpc.RPCError.verify|verify} messages. + * Encodes the specified VEvent message. Does not implicitly {@link binlogdata.VEvent.verify|verify} messages. * @function encode - * @memberof vtrpc.RPCError + * @memberof binlogdata.VEvent * @static - * @param {vtrpc.IRPCError} message RPCError message or plain object to encode + * @param {binlogdata.IVEvent} message VEvent message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RPCError.encode = function encode(message, writer) { + VEvent.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.message != null && Object.hasOwnProperty.call(message, "message")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.message); - if (message.code != null && Object.hasOwnProperty.call(message, "code")) - writer.uint32(/* id 3, wireType 0 =*/24).int32(message.code); + if (message.type != null && Object.hasOwnProperty.call(message, "type")) + writer.uint32(/* id 1, wireType 0 =*/8).int32(message.type); + if (message.timestamp != null && Object.hasOwnProperty.call(message, "timestamp")) + writer.uint32(/* id 2, wireType 0 =*/16).int64(message.timestamp); + if (message.gtid != null && Object.hasOwnProperty.call(message, "gtid")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.gtid); + if (message.statement != null && Object.hasOwnProperty.call(message, "statement")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.statement); + if (message.row_event != null && Object.hasOwnProperty.call(message, "row_event")) + $root.binlogdata.RowEvent.encode(message.row_event, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); + if (message.field_event != null && Object.hasOwnProperty.call(message, "field_event")) + $root.binlogdata.FieldEvent.encode(message.field_event, writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); + if (message.vgtid != null && Object.hasOwnProperty.call(message, "vgtid")) + $root.binlogdata.VGtid.encode(message.vgtid, writer.uint32(/* id 7, wireType 2 =*/58).fork()).ldelim(); + if (message.journal != null && Object.hasOwnProperty.call(message, "journal")) + $root.binlogdata.Journal.encode(message.journal, writer.uint32(/* id 8, wireType 2 =*/66).fork()).ldelim(); + if (message.dml != null && Object.hasOwnProperty.call(message, "dml")) + writer.uint32(/* id 9, wireType 2 =*/74).string(message.dml); + if (message.current_time != null && Object.hasOwnProperty.call(message, "current_time")) + writer.uint32(/* id 20, wireType 0 =*/160).int64(message.current_time); + if (message.last_p_k_event != null && Object.hasOwnProperty.call(message, "last_p_k_event")) + $root.binlogdata.LastPKEvent.encode(message.last_p_k_event, writer.uint32(/* id 21, wireType 2 =*/170).fork()).ldelim(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 22, wireType 2 =*/178).string(message.keyspace); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + writer.uint32(/* id 23, wireType 2 =*/186).string(message.shard); + if (message.throttled != null && Object.hasOwnProperty.call(message, "throttled")) + writer.uint32(/* id 24, wireType 0 =*/192).bool(message.throttled); return writer; }; /** - * Encodes the specified RPCError message, length delimited. Does not implicitly {@link vtrpc.RPCError.verify|verify} messages. + * Encodes the specified VEvent message, length delimited. Does not implicitly {@link binlogdata.VEvent.verify|verify} messages. * @function encodeDelimited - * @memberof vtrpc.RPCError + * @memberof binlogdata.VEvent * @static - * @param {vtrpc.IRPCError} message RPCError message or plain object to encode + * @param {binlogdata.IVEvent} message VEvent message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RPCError.encodeDelimited = function encodeDelimited(message, writer) { + VEvent.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a RPCError message from the specified reader or buffer. + * Decodes a VEvent message from the specified reader or buffer. * @function decode - * @memberof vtrpc.RPCError + * @memberof binlogdata.VEvent * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtrpc.RPCError} RPCError + * @returns {binlogdata.VEvent} VEvent * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RPCError.decode = function decode(reader, length) { + VEvent.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtrpc.RPCError(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.VEvent(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + message.type = reader.int32(); + break; + } case 2: { - message.message = reader.string(); + message.timestamp = reader.int64(); break; } case 3: { - message.code = reader.int32(); + message.gtid = reader.string(); + break; + } + case 4: { + message.statement = reader.string(); + break; + } + case 5: { + message.row_event = $root.binlogdata.RowEvent.decode(reader, reader.uint32()); + break; + } + case 6: { + message.field_event = $root.binlogdata.FieldEvent.decode(reader, reader.uint32()); + break; + } + case 7: { + message.vgtid = $root.binlogdata.VGtid.decode(reader, reader.uint32()); + break; + } + case 8: { + message.journal = $root.binlogdata.Journal.decode(reader, reader.uint32()); + break; + } + case 9: { + message.dml = reader.string(); + break; + } + case 20: { + message.current_time = reader.int64(); + break; + } + case 21: { + message.last_p_k_event = $root.binlogdata.LastPKEvent.decode(reader, reader.uint32()); + break; + } + case 22: { + message.keyspace = reader.string(); + break; + } + case 23: { + message.shard = reader.string(); + break; + } + case 24: { + message.throttled = reader.bool(); break; } default: @@ -71076,39 +72591,36 @@ export const vtrpc = $root.vtrpc = (() => { }; /** - * Decodes a RPCError message from the specified reader or buffer, length delimited. + * Decodes a VEvent message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtrpc.RPCError + * @memberof binlogdata.VEvent * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtrpc.RPCError} RPCError + * @returns {binlogdata.VEvent} VEvent * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RPCError.decodeDelimited = function decodeDelimited(reader) { + VEvent.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a RPCError message. + * Verifies a VEvent message. * @function verify - * @memberof vtrpc.RPCError + * @memberof binlogdata.VEvent * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - RPCError.verify = function verify(message) { + VEvent.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.message != null && message.hasOwnProperty("message")) - if (!$util.isString(message.message)) - return "message: string expected"; - if (message.code != null && message.hasOwnProperty("code")) - switch (message.code) { + if (message.type != null && message.hasOwnProperty("type")) + switch (message.type) { default: - return "code: enum value expected"; + return "type: enum value expected"; case 0: case 1: case 2: @@ -71128,198 +72640,349 @@ export const vtrpc = $root.vtrpc = (() => { case 16: case 17: case 18: + case 19: + case 20: break; } + if (message.timestamp != null && message.hasOwnProperty("timestamp")) + if (!$util.isInteger(message.timestamp) && !(message.timestamp && $util.isInteger(message.timestamp.low) && $util.isInteger(message.timestamp.high))) + return "timestamp: integer|Long expected"; + if (message.gtid != null && message.hasOwnProperty("gtid")) + if (!$util.isString(message.gtid)) + return "gtid: string expected"; + if (message.statement != null && message.hasOwnProperty("statement")) + if (!$util.isString(message.statement)) + return "statement: string expected"; + if (message.row_event != null && message.hasOwnProperty("row_event")) { + let error = $root.binlogdata.RowEvent.verify(message.row_event); + if (error) + return "row_event." + error; + } + if (message.field_event != null && message.hasOwnProperty("field_event")) { + let error = $root.binlogdata.FieldEvent.verify(message.field_event); + if (error) + return "field_event." + error; + } + if (message.vgtid != null && message.hasOwnProperty("vgtid")) { + let error = $root.binlogdata.VGtid.verify(message.vgtid); + if (error) + return "vgtid." + error; + } + if (message.journal != null && message.hasOwnProperty("journal")) { + let error = $root.binlogdata.Journal.verify(message.journal); + if (error) + return "journal." + error; + } + if (message.dml != null && message.hasOwnProperty("dml")) + if (!$util.isString(message.dml)) + return "dml: string expected"; + if (message.current_time != null && message.hasOwnProperty("current_time")) + if (!$util.isInteger(message.current_time) && !(message.current_time && $util.isInteger(message.current_time.low) && $util.isInteger(message.current_time.high))) + return "current_time: integer|Long expected"; + if (message.last_p_k_event != null && message.hasOwnProperty("last_p_k_event")) { + let error = $root.binlogdata.LastPKEvent.verify(message.last_p_k_event); + if (error) + return "last_p_k_event." + error; + } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.shard != null && message.hasOwnProperty("shard")) + if (!$util.isString(message.shard)) + return "shard: string expected"; + if (message.throttled != null && message.hasOwnProperty("throttled")) + if (typeof message.throttled !== "boolean") + return "throttled: boolean expected"; return null; }; /** - * Creates a RPCError message from a plain object. Also converts values to their respective internal types. + * Creates a VEvent message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtrpc.RPCError + * @memberof binlogdata.VEvent * @static * @param {Object.} object Plain object - * @returns {vtrpc.RPCError} RPCError + * @returns {binlogdata.VEvent} VEvent */ - RPCError.fromObject = function fromObject(object) { - if (object instanceof $root.vtrpc.RPCError) + VEvent.fromObject = function fromObject(object) { + if (object instanceof $root.binlogdata.VEvent) return object; - let message = new $root.vtrpc.RPCError(); - if (object.message != null) - message.message = String(object.message); - switch (object.code) { + let message = new $root.binlogdata.VEvent(); + switch (object.type) { default: - if (typeof object.code === "number") { - message.code = object.code; + if (typeof object.type === "number") { + message.type = object.type; break; } break; - case "OK": + case "UNKNOWN": case 0: - message.code = 0; + message.type = 0; break; - case "CANCELED": + case "GTID": case 1: - message.code = 1; + message.type = 1; break; - case "UNKNOWN": + case "BEGIN": case 2: - message.code = 2; + message.type = 2; break; - case "INVALID_ARGUMENT": + case "COMMIT": case 3: - message.code = 3; + message.type = 3; break; - case "DEADLINE_EXCEEDED": + case "ROLLBACK": case 4: - message.code = 4; + message.type = 4; break; - case "NOT_FOUND": + case "DDL": case 5: - message.code = 5; + message.type = 5; break; - case "ALREADY_EXISTS": + case "INSERT": case 6: - message.code = 6; + message.type = 6; break; - case "PERMISSION_DENIED": + case "REPLACE": case 7: - message.code = 7; + message.type = 7; break; - case "RESOURCE_EXHAUSTED": + case "UPDATE": case 8: - message.code = 8; + message.type = 8; break; - case "FAILED_PRECONDITION": + case "DELETE": case 9: - message.code = 9; + message.type = 9; break; - case "ABORTED": + case "SET": case 10: - message.code = 10; + message.type = 10; break; - case "OUT_OF_RANGE": + case "OTHER": case 11: - message.code = 11; + message.type = 11; break; - case "UNIMPLEMENTED": + case "ROW": case 12: - message.code = 12; + message.type = 12; break; - case "INTERNAL": + case "FIELD": case 13: - message.code = 13; + message.type = 13; break; - case "UNAVAILABLE": + case "HEARTBEAT": case 14: - message.code = 14; + message.type = 14; break; - case "DATA_LOSS": + case "VGTID": case 15: - message.code = 15; + message.type = 15; break; - case "UNAUTHENTICATED": + case "JOURNAL": case 16: - message.code = 16; + message.type = 16; break; - case "CLUSTER_EVENT": + case "VERSION": case 17: - message.code = 17; + message.type = 17; break; - case "READ_ONLY": + case "LASTPK": case 18: - message.code = 18; + message.type = 18; + break; + case "SAVEPOINT": + case 19: + message.type = 19; + break; + case "COPY_COMPLETED": + case 20: + message.type = 20; break; } + if (object.timestamp != null) + if ($util.Long) + (message.timestamp = $util.Long.fromValue(object.timestamp)).unsigned = false; + else if (typeof object.timestamp === "string") + message.timestamp = parseInt(object.timestamp, 10); + else if (typeof object.timestamp === "number") + message.timestamp = object.timestamp; + else if (typeof object.timestamp === "object") + message.timestamp = new $util.LongBits(object.timestamp.low >>> 0, object.timestamp.high >>> 0).toNumber(); + if (object.gtid != null) + message.gtid = String(object.gtid); + if (object.statement != null) + message.statement = String(object.statement); + if (object.row_event != null) { + if (typeof object.row_event !== "object") + throw TypeError(".binlogdata.VEvent.row_event: object expected"); + message.row_event = $root.binlogdata.RowEvent.fromObject(object.row_event); + } + if (object.field_event != null) { + if (typeof object.field_event !== "object") + throw TypeError(".binlogdata.VEvent.field_event: object expected"); + message.field_event = $root.binlogdata.FieldEvent.fromObject(object.field_event); + } + if (object.vgtid != null) { + if (typeof object.vgtid !== "object") + throw TypeError(".binlogdata.VEvent.vgtid: object expected"); + message.vgtid = $root.binlogdata.VGtid.fromObject(object.vgtid); + } + if (object.journal != null) { + if (typeof object.journal !== "object") + throw TypeError(".binlogdata.VEvent.journal: object expected"); + message.journal = $root.binlogdata.Journal.fromObject(object.journal); + } + if (object.dml != null) + message.dml = String(object.dml); + if (object.current_time != null) + if ($util.Long) + (message.current_time = $util.Long.fromValue(object.current_time)).unsigned = false; + else if (typeof object.current_time === "string") + message.current_time = parseInt(object.current_time, 10); + else if (typeof object.current_time === "number") + message.current_time = object.current_time; + else if (typeof object.current_time === "object") + message.current_time = new $util.LongBits(object.current_time.low >>> 0, object.current_time.high >>> 0).toNumber(); + if (object.last_p_k_event != null) { + if (typeof object.last_p_k_event !== "object") + throw TypeError(".binlogdata.VEvent.last_p_k_event: object expected"); + message.last_p_k_event = $root.binlogdata.LastPKEvent.fromObject(object.last_p_k_event); + } + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.shard != null) + message.shard = String(object.shard); + if (object.throttled != null) + message.throttled = Boolean(object.throttled); return message; }; /** - * Creates a plain object from a RPCError message. Also converts values to other types if specified. + * Creates a plain object from a VEvent message. Also converts values to other types if specified. * @function toObject - * @memberof vtrpc.RPCError + * @memberof binlogdata.VEvent * @static - * @param {vtrpc.RPCError} message RPCError + * @param {binlogdata.VEvent} message VEvent * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - RPCError.toObject = function toObject(message, options) { + VEvent.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) { - object.message = ""; - object.code = options.enums === String ? "OK" : 0; + object.type = options.enums === String ? "UNKNOWN" : 0; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.timestamp = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.timestamp = options.longs === String ? "0" : 0; + object.gtid = ""; + object.statement = ""; + object.row_event = null; + object.field_event = null; + object.vgtid = null; + object.journal = null; + object.dml = ""; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.current_time = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.current_time = options.longs === String ? "0" : 0; + object.last_p_k_event = null; + object.keyspace = ""; + object.shard = ""; + object.throttled = false; } - if (message.message != null && message.hasOwnProperty("message")) - object.message = message.message; - if (message.code != null && message.hasOwnProperty("code")) - object.code = options.enums === String ? $root.vtrpc.Code[message.code] === undefined ? message.code : $root.vtrpc.Code[message.code] : message.code; + if (message.type != null && message.hasOwnProperty("type")) + object.type = options.enums === String ? $root.binlogdata.VEventType[message.type] === undefined ? message.type : $root.binlogdata.VEventType[message.type] : message.type; + if (message.timestamp != null && message.hasOwnProperty("timestamp")) + if (typeof message.timestamp === "number") + object.timestamp = options.longs === String ? String(message.timestamp) : message.timestamp; + else + object.timestamp = options.longs === String ? $util.Long.prototype.toString.call(message.timestamp) : options.longs === Number ? new $util.LongBits(message.timestamp.low >>> 0, message.timestamp.high >>> 0).toNumber() : message.timestamp; + if (message.gtid != null && message.hasOwnProperty("gtid")) + object.gtid = message.gtid; + if (message.statement != null && message.hasOwnProperty("statement")) + object.statement = message.statement; + if (message.row_event != null && message.hasOwnProperty("row_event")) + object.row_event = $root.binlogdata.RowEvent.toObject(message.row_event, options); + if (message.field_event != null && message.hasOwnProperty("field_event")) + object.field_event = $root.binlogdata.FieldEvent.toObject(message.field_event, options); + if (message.vgtid != null && message.hasOwnProperty("vgtid")) + object.vgtid = $root.binlogdata.VGtid.toObject(message.vgtid, options); + if (message.journal != null && message.hasOwnProperty("journal")) + object.journal = $root.binlogdata.Journal.toObject(message.journal, options); + if (message.dml != null && message.hasOwnProperty("dml")) + object.dml = message.dml; + if (message.current_time != null && message.hasOwnProperty("current_time")) + if (typeof message.current_time === "number") + object.current_time = options.longs === String ? String(message.current_time) : message.current_time; + else + object.current_time = options.longs === String ? $util.Long.prototype.toString.call(message.current_time) : options.longs === Number ? new $util.LongBits(message.current_time.low >>> 0, message.current_time.high >>> 0).toNumber() : message.current_time; + if (message.last_p_k_event != null && message.hasOwnProperty("last_p_k_event")) + object.last_p_k_event = $root.binlogdata.LastPKEvent.toObject(message.last_p_k_event, options); + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = message.shard; + if (message.throttled != null && message.hasOwnProperty("throttled")) + object.throttled = message.throttled; return object; }; /** - * Converts this RPCError to JSON. + * Converts this VEvent to JSON. * @function toJSON - * @memberof vtrpc.RPCError + * @memberof binlogdata.VEvent * @instance * @returns {Object.} JSON object */ - RPCError.prototype.toJSON = function toJSON() { + VEvent.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for RPCError + * Gets the default type url for VEvent * @function getTypeUrl - * @memberof vtrpc.RPCError + * @memberof binlogdata.VEvent * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - RPCError.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + VEvent.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtrpc.RPCError"; + return typeUrlPrefix + "/binlogdata.VEvent"; }; - return RPCError; + return VEvent; })(); - return vtrpc; -})(); - -export const query = $root.query = (() => { - - /** - * Namespace query. - * @exports query - * @namespace - */ - const query = {}; - - query.Target = (function() { + binlogdata.MinimalTable = (function() { /** - * Properties of a Target. - * @memberof query - * @interface ITarget - * @property {string|null} [keyspace] Target keyspace - * @property {string|null} [shard] Target shard - * @property {topodata.TabletType|null} [tablet_type] Target tablet_type - * @property {string|null} [cell] Target cell + * Properties of a MinimalTable. + * @memberof binlogdata + * @interface IMinimalTable + * @property {string|null} [name] MinimalTable name + * @property {Array.|null} [fields] MinimalTable fields + * @property {Array.|null} [p_k_columns] MinimalTable p_k_columns */ /** - * Constructs a new Target. - * @memberof query - * @classdesc Represents a Target. - * @implements ITarget + * Constructs a new MinimalTable. + * @memberof binlogdata + * @classdesc Represents a MinimalTable. + * @implements IMinimalTable * @constructor - * @param {query.ITarget=} [properties] Properties to set + * @param {binlogdata.IMinimalTable=} [properties] Properties to set */ - function Target(properties) { + function MinimalTable(properties) { + this.fields = []; + this.p_k_columns = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -71327,117 +72990,117 @@ export const query = $root.query = (() => { } /** - * Target keyspace. - * @member {string} keyspace - * @memberof query.Target - * @instance - */ - Target.prototype.keyspace = ""; - - /** - * Target shard. - * @member {string} shard - * @memberof query.Target + * MinimalTable name. + * @member {string} name + * @memberof binlogdata.MinimalTable * @instance */ - Target.prototype.shard = ""; + MinimalTable.prototype.name = ""; /** - * Target tablet_type. - * @member {topodata.TabletType} tablet_type - * @memberof query.Target + * MinimalTable fields. + * @member {Array.} fields + * @memberof binlogdata.MinimalTable * @instance */ - Target.prototype.tablet_type = 0; + MinimalTable.prototype.fields = $util.emptyArray; /** - * Target cell. - * @member {string} cell - * @memberof query.Target + * MinimalTable p_k_columns. + * @member {Array.} p_k_columns + * @memberof binlogdata.MinimalTable * @instance */ - Target.prototype.cell = ""; + MinimalTable.prototype.p_k_columns = $util.emptyArray; /** - * Creates a new Target instance using the specified properties. + * Creates a new MinimalTable instance using the specified properties. * @function create - * @memberof query.Target + * @memberof binlogdata.MinimalTable * @static - * @param {query.ITarget=} [properties] Properties to set - * @returns {query.Target} Target instance + * @param {binlogdata.IMinimalTable=} [properties] Properties to set + * @returns {binlogdata.MinimalTable} MinimalTable instance */ - Target.create = function create(properties) { - return new Target(properties); + MinimalTable.create = function create(properties) { + return new MinimalTable(properties); }; /** - * Encodes the specified Target message. Does not implicitly {@link query.Target.verify|verify} messages. + * Encodes the specified MinimalTable message. Does not implicitly {@link binlogdata.MinimalTable.verify|verify} messages. * @function encode - * @memberof query.Target + * @memberof binlogdata.MinimalTable * @static - * @param {query.ITarget} message Target message or plain object to encode + * @param {binlogdata.IMinimalTable} message MinimalTable message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Target.encode = function encode(message, writer) { + MinimalTable.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); - if (message.tablet_type != null && Object.hasOwnProperty.call(message, "tablet_type")) - writer.uint32(/* id 3, wireType 0 =*/24).int32(message.tablet_type); - if (message.cell != null && Object.hasOwnProperty.call(message, "cell")) - writer.uint32(/* id 4, wireType 2 =*/34).string(message.cell); + if (message.name != null && Object.hasOwnProperty.call(message, "name")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); + if (message.fields != null && message.fields.length) + for (let i = 0; i < message.fields.length; ++i) + $root.query.Field.encode(message.fields[i], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.p_k_columns != null && message.p_k_columns.length) { + writer.uint32(/* id 3, wireType 2 =*/26).fork(); + for (let i = 0; i < message.p_k_columns.length; ++i) + writer.int64(message.p_k_columns[i]); + writer.ldelim(); + } return writer; }; /** - * Encodes the specified Target message, length delimited. Does not implicitly {@link query.Target.verify|verify} messages. + * Encodes the specified MinimalTable message, length delimited. Does not implicitly {@link binlogdata.MinimalTable.verify|verify} messages. * @function encodeDelimited - * @memberof query.Target + * @memberof binlogdata.MinimalTable * @static - * @param {query.ITarget} message Target message or plain object to encode + * @param {binlogdata.IMinimalTable} message MinimalTable message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Target.encodeDelimited = function encodeDelimited(message, writer) { + MinimalTable.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a Target message from the specified reader or buffer. + * Decodes a MinimalTable message from the specified reader or buffer. * @function decode - * @memberof query.Target + * @memberof binlogdata.MinimalTable * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.Target} Target + * @returns {binlogdata.MinimalTable} MinimalTable * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Target.decode = function decode(reader, length) { + MinimalTable.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.Target(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.MinimalTable(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.keyspace = reader.string(); + message.name = reader.string(); break; } case 2: { - message.shard = reader.string(); + if (!(message.fields && message.fields.length)) + message.fields = []; + message.fields.push($root.query.Field.decode(reader, reader.uint32())); break; } case 3: { - message.tablet_type = reader.int32(); - break; - } - case 4: { - message.cell = reader.string(); + if (!(message.p_k_columns && message.p_k_columns.length)) + message.p_k_columns = []; + if ((tag & 7) === 2) { + let end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.p_k_columns.push(reader.int64()); + } else + message.p_k_columns.push(reader.int64()); break; } default: @@ -71449,213 +73112,180 @@ export const query = $root.query = (() => { }; /** - * Decodes a Target message from the specified reader or buffer, length delimited. + * Decodes a MinimalTable message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.Target + * @memberof binlogdata.MinimalTable * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.Target} Target + * @returns {binlogdata.MinimalTable} MinimalTable * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Target.decodeDelimited = function decodeDelimited(reader) { + MinimalTable.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a Target message. + * Verifies a MinimalTable message. * @function verify - * @memberof query.Target + * @memberof binlogdata.MinimalTable * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - Target.verify = function verify(message) { + MinimalTable.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - if (message.shard != null && message.hasOwnProperty("shard")) - if (!$util.isString(message.shard)) - return "shard: string expected"; - if (message.tablet_type != null && message.hasOwnProperty("tablet_type")) - switch (message.tablet_type) { - default: - return "tablet_type: enum value expected"; - case 0: - case 1: - case 1: - case 2: - case 3: - case 3: - case 4: - case 5: - case 6: - case 7: - case 8: - break; + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; + if (message.fields != null && message.hasOwnProperty("fields")) { + if (!Array.isArray(message.fields)) + return "fields: array expected"; + for (let i = 0; i < message.fields.length; ++i) { + let error = $root.query.Field.verify(message.fields[i]); + if (error) + return "fields." + error; } - if (message.cell != null && message.hasOwnProperty("cell")) - if (!$util.isString(message.cell)) - return "cell: string expected"; + } + if (message.p_k_columns != null && message.hasOwnProperty("p_k_columns")) { + if (!Array.isArray(message.p_k_columns)) + return "p_k_columns: array expected"; + for (let i = 0; i < message.p_k_columns.length; ++i) + if (!$util.isInteger(message.p_k_columns[i]) && !(message.p_k_columns[i] && $util.isInteger(message.p_k_columns[i].low) && $util.isInteger(message.p_k_columns[i].high))) + return "p_k_columns: integer|Long[] expected"; + } return null; }; /** - * Creates a Target message from a plain object. Also converts values to their respective internal types. + * Creates a MinimalTable message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.Target + * @memberof binlogdata.MinimalTable * @static * @param {Object.} object Plain object - * @returns {query.Target} Target + * @returns {binlogdata.MinimalTable} MinimalTable */ - Target.fromObject = function fromObject(object) { - if (object instanceof $root.query.Target) + MinimalTable.fromObject = function fromObject(object) { + if (object instanceof $root.binlogdata.MinimalTable) return object; - let message = new $root.query.Target(); - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - if (object.shard != null) - message.shard = String(object.shard); - switch (object.tablet_type) { - default: - if (typeof object.tablet_type === "number") { - message.tablet_type = object.tablet_type; - break; + let message = new $root.binlogdata.MinimalTable(); + if (object.name != null) + message.name = String(object.name); + if (object.fields) { + if (!Array.isArray(object.fields)) + throw TypeError(".binlogdata.MinimalTable.fields: array expected"); + message.fields = []; + for (let i = 0; i < object.fields.length; ++i) { + if (typeof object.fields[i] !== "object") + throw TypeError(".binlogdata.MinimalTable.fields: object expected"); + message.fields[i] = $root.query.Field.fromObject(object.fields[i]); } - break; - case "UNKNOWN": - case 0: - message.tablet_type = 0; - break; - case "PRIMARY": - case 1: - message.tablet_type = 1; - break; - case "MASTER": - case 1: - message.tablet_type = 1; - break; - case "REPLICA": - case 2: - message.tablet_type = 2; - break; - case "RDONLY": - case 3: - message.tablet_type = 3; - break; - case "BATCH": - case 3: - message.tablet_type = 3; - break; - case "SPARE": - case 4: - message.tablet_type = 4; - break; - case "EXPERIMENTAL": - case 5: - message.tablet_type = 5; - break; - case "BACKUP": - case 6: - message.tablet_type = 6; - break; - case "RESTORE": - case 7: - message.tablet_type = 7; - break; - case "DRAINED": - case 8: - message.tablet_type = 8; - break; } - if (object.cell != null) - message.cell = String(object.cell); + if (object.p_k_columns) { + if (!Array.isArray(object.p_k_columns)) + throw TypeError(".binlogdata.MinimalTable.p_k_columns: array expected"); + message.p_k_columns = []; + for (let i = 0; i < object.p_k_columns.length; ++i) + if ($util.Long) + (message.p_k_columns[i] = $util.Long.fromValue(object.p_k_columns[i])).unsigned = false; + else if (typeof object.p_k_columns[i] === "string") + message.p_k_columns[i] = parseInt(object.p_k_columns[i], 10); + else if (typeof object.p_k_columns[i] === "number") + message.p_k_columns[i] = object.p_k_columns[i]; + else if (typeof object.p_k_columns[i] === "object") + message.p_k_columns[i] = new $util.LongBits(object.p_k_columns[i].low >>> 0, object.p_k_columns[i].high >>> 0).toNumber(); + } return message; }; /** - * Creates a plain object from a Target message. Also converts values to other types if specified. + * Creates a plain object from a MinimalTable message. Also converts values to other types if specified. * @function toObject - * @memberof query.Target + * @memberof binlogdata.MinimalTable * @static - * @param {query.Target} message Target + * @param {binlogdata.MinimalTable} message MinimalTable * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - Target.toObject = function toObject(message, options) { + MinimalTable.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) { - object.keyspace = ""; - object.shard = ""; - object.tablet_type = options.enums === String ? "UNKNOWN" : 0; - object.cell = ""; + if (options.arrays || options.defaults) { + object.fields = []; + object.p_k_columns = []; + } + if (options.defaults) + object.name = ""; + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; + if (message.fields && message.fields.length) { + object.fields = []; + for (let j = 0; j < message.fields.length; ++j) + object.fields[j] = $root.query.Field.toObject(message.fields[j], options); + } + if (message.p_k_columns && message.p_k_columns.length) { + object.p_k_columns = []; + for (let j = 0; j < message.p_k_columns.length; ++j) + if (typeof message.p_k_columns[j] === "number") + object.p_k_columns[j] = options.longs === String ? String(message.p_k_columns[j]) : message.p_k_columns[j]; + else + object.p_k_columns[j] = options.longs === String ? $util.Long.prototype.toString.call(message.p_k_columns[j]) : options.longs === Number ? new $util.LongBits(message.p_k_columns[j].low >>> 0, message.p_k_columns[j].high >>> 0).toNumber() : message.p_k_columns[j]; } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.shard != null && message.hasOwnProperty("shard")) - object.shard = message.shard; - if (message.tablet_type != null && message.hasOwnProperty("tablet_type")) - object.tablet_type = options.enums === String ? $root.topodata.TabletType[message.tablet_type] === undefined ? message.tablet_type : $root.topodata.TabletType[message.tablet_type] : message.tablet_type; - if (message.cell != null && message.hasOwnProperty("cell")) - object.cell = message.cell; return object; }; /** - * Converts this Target to JSON. + * Converts this MinimalTable to JSON. * @function toJSON - * @memberof query.Target + * @memberof binlogdata.MinimalTable * @instance * @returns {Object.} JSON object */ - Target.prototype.toJSON = function toJSON() { + MinimalTable.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for Target + * Gets the default type url for MinimalTable * @function getTypeUrl - * @memberof query.Target + * @memberof binlogdata.MinimalTable * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - Target.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + MinimalTable.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.Target"; + return typeUrlPrefix + "/binlogdata.MinimalTable"; }; - return Target; + return MinimalTable; })(); - query.VTGateCallerID = (function() { + binlogdata.MinimalSchema = (function() { /** - * Properties of a VTGateCallerID. - * @memberof query - * @interface IVTGateCallerID - * @property {string|null} [username] VTGateCallerID username - * @property {Array.|null} [groups] VTGateCallerID groups + * Properties of a MinimalSchema. + * @memberof binlogdata + * @interface IMinimalSchema + * @property {Array.|null} [tables] MinimalSchema tables */ /** - * Constructs a new VTGateCallerID. - * @memberof query - * @classdesc Represents a VTGateCallerID. - * @implements IVTGateCallerID + * Constructs a new MinimalSchema. + * @memberof binlogdata + * @classdesc Represents a MinimalSchema. + * @implements IMinimalSchema * @constructor - * @param {query.IVTGateCallerID=} [properties] Properties to set + * @param {binlogdata.IMinimalSchema=} [properties] Properties to set */ - function VTGateCallerID(properties) { - this.groups = []; + function MinimalSchema(properties) { + this.tables = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -71663,92 +73293,78 @@ export const query = $root.query = (() => { } /** - * VTGateCallerID username. - * @member {string} username - * @memberof query.VTGateCallerID - * @instance - */ - VTGateCallerID.prototype.username = ""; - - /** - * VTGateCallerID groups. - * @member {Array.} groups - * @memberof query.VTGateCallerID + * MinimalSchema tables. + * @member {Array.} tables + * @memberof binlogdata.MinimalSchema * @instance */ - VTGateCallerID.prototype.groups = $util.emptyArray; + MinimalSchema.prototype.tables = $util.emptyArray; /** - * Creates a new VTGateCallerID instance using the specified properties. + * Creates a new MinimalSchema instance using the specified properties. * @function create - * @memberof query.VTGateCallerID + * @memberof binlogdata.MinimalSchema * @static - * @param {query.IVTGateCallerID=} [properties] Properties to set - * @returns {query.VTGateCallerID} VTGateCallerID instance + * @param {binlogdata.IMinimalSchema=} [properties] Properties to set + * @returns {binlogdata.MinimalSchema} MinimalSchema instance */ - VTGateCallerID.create = function create(properties) { - return new VTGateCallerID(properties); + MinimalSchema.create = function create(properties) { + return new MinimalSchema(properties); }; /** - * Encodes the specified VTGateCallerID message. Does not implicitly {@link query.VTGateCallerID.verify|verify} messages. + * Encodes the specified MinimalSchema message. Does not implicitly {@link binlogdata.MinimalSchema.verify|verify} messages. * @function encode - * @memberof query.VTGateCallerID + * @memberof binlogdata.MinimalSchema * @static - * @param {query.IVTGateCallerID} message VTGateCallerID message or plain object to encode + * @param {binlogdata.IMinimalSchema} message MinimalSchema message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - VTGateCallerID.encode = function encode(message, writer) { + MinimalSchema.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.username != null && Object.hasOwnProperty.call(message, "username")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.username); - if (message.groups != null && message.groups.length) - for (let i = 0; i < message.groups.length; ++i) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.groups[i]); + if (message.tables != null && message.tables.length) + for (let i = 0; i < message.tables.length; ++i) + $root.binlogdata.MinimalTable.encode(message.tables[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified VTGateCallerID message, length delimited. Does not implicitly {@link query.VTGateCallerID.verify|verify} messages. + * Encodes the specified MinimalSchema message, length delimited. Does not implicitly {@link binlogdata.MinimalSchema.verify|verify} messages. * @function encodeDelimited - * @memberof query.VTGateCallerID + * @memberof binlogdata.MinimalSchema * @static - * @param {query.IVTGateCallerID} message VTGateCallerID message or plain object to encode + * @param {binlogdata.IMinimalSchema} message MinimalSchema message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - VTGateCallerID.encodeDelimited = function encodeDelimited(message, writer) { + MinimalSchema.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a VTGateCallerID message from the specified reader or buffer. + * Decodes a MinimalSchema message from the specified reader or buffer. * @function decode - * @memberof query.VTGateCallerID + * @memberof binlogdata.MinimalSchema * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.VTGateCallerID} VTGateCallerID + * @returns {binlogdata.MinimalSchema} MinimalSchema * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - VTGateCallerID.decode = function decode(reader, length) { + MinimalSchema.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.VTGateCallerID(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.MinimalSchema(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.username = reader.string(); - break; - } - case 2: { - if (!(message.groups && message.groups.length)) - message.groups = []; - message.groups.push(reader.string()); + if (!(message.tables && message.tables.length)) + message.tables = []; + message.tables.push($root.binlogdata.MinimalTable.decode(reader, reader.uint32())); break; } default: @@ -71760,145 +73376,145 @@ export const query = $root.query = (() => { }; /** - * Decodes a VTGateCallerID message from the specified reader or buffer, length delimited. + * Decodes a MinimalSchema message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.VTGateCallerID + * @memberof binlogdata.MinimalSchema * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.VTGateCallerID} VTGateCallerID + * @returns {binlogdata.MinimalSchema} MinimalSchema * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - VTGateCallerID.decodeDelimited = function decodeDelimited(reader) { + MinimalSchema.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a VTGateCallerID message. + * Verifies a MinimalSchema message. * @function verify - * @memberof query.VTGateCallerID + * @memberof binlogdata.MinimalSchema * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - VTGateCallerID.verify = function verify(message) { + MinimalSchema.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.username != null && message.hasOwnProperty("username")) - if (!$util.isString(message.username)) - return "username: string expected"; - if (message.groups != null && message.hasOwnProperty("groups")) { - if (!Array.isArray(message.groups)) - return "groups: array expected"; - for (let i = 0; i < message.groups.length; ++i) - if (!$util.isString(message.groups[i])) - return "groups: string[] expected"; + if (message.tables != null && message.hasOwnProperty("tables")) { + if (!Array.isArray(message.tables)) + return "tables: array expected"; + for (let i = 0; i < message.tables.length; ++i) { + let error = $root.binlogdata.MinimalTable.verify(message.tables[i]); + if (error) + return "tables." + error; + } } return null; }; /** - * Creates a VTGateCallerID message from a plain object. Also converts values to their respective internal types. + * Creates a MinimalSchema message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.VTGateCallerID + * @memberof binlogdata.MinimalSchema * @static * @param {Object.} object Plain object - * @returns {query.VTGateCallerID} VTGateCallerID + * @returns {binlogdata.MinimalSchema} MinimalSchema */ - VTGateCallerID.fromObject = function fromObject(object) { - if (object instanceof $root.query.VTGateCallerID) + MinimalSchema.fromObject = function fromObject(object) { + if (object instanceof $root.binlogdata.MinimalSchema) return object; - let message = new $root.query.VTGateCallerID(); - if (object.username != null) - message.username = String(object.username); - if (object.groups) { - if (!Array.isArray(object.groups)) - throw TypeError(".query.VTGateCallerID.groups: array expected"); - message.groups = []; - for (let i = 0; i < object.groups.length; ++i) - message.groups[i] = String(object.groups[i]); + let message = new $root.binlogdata.MinimalSchema(); + if (object.tables) { + if (!Array.isArray(object.tables)) + throw TypeError(".binlogdata.MinimalSchema.tables: array expected"); + message.tables = []; + for (let i = 0; i < object.tables.length; ++i) { + if (typeof object.tables[i] !== "object") + throw TypeError(".binlogdata.MinimalSchema.tables: object expected"); + message.tables[i] = $root.binlogdata.MinimalTable.fromObject(object.tables[i]); + } } return message; }; /** - * Creates a plain object from a VTGateCallerID message. Also converts values to other types if specified. + * Creates a plain object from a MinimalSchema message. Also converts values to other types if specified. * @function toObject - * @memberof query.VTGateCallerID + * @memberof binlogdata.MinimalSchema * @static - * @param {query.VTGateCallerID} message VTGateCallerID + * @param {binlogdata.MinimalSchema} message MinimalSchema * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - VTGateCallerID.toObject = function toObject(message, options) { + MinimalSchema.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.arrays || options.defaults) - object.groups = []; - if (options.defaults) - object.username = ""; - if (message.username != null && message.hasOwnProperty("username")) - object.username = message.username; - if (message.groups && message.groups.length) { - object.groups = []; - for (let j = 0; j < message.groups.length; ++j) - object.groups[j] = message.groups[j]; + object.tables = []; + if (message.tables && message.tables.length) { + object.tables = []; + for (let j = 0; j < message.tables.length; ++j) + object.tables[j] = $root.binlogdata.MinimalTable.toObject(message.tables[j], options); } return object; }; /** - * Converts this VTGateCallerID to JSON. + * Converts this MinimalSchema to JSON. * @function toJSON - * @memberof query.VTGateCallerID + * @memberof binlogdata.MinimalSchema * @instance * @returns {Object.} JSON object */ - VTGateCallerID.prototype.toJSON = function toJSON() { + MinimalSchema.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for VTGateCallerID + * Gets the default type url for MinimalSchema * @function getTypeUrl - * @memberof query.VTGateCallerID + * @memberof binlogdata.MinimalSchema * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - VTGateCallerID.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + MinimalSchema.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.VTGateCallerID"; + return typeUrlPrefix + "/binlogdata.MinimalSchema"; }; - return VTGateCallerID; + return MinimalSchema; })(); - query.EventToken = (function() { + binlogdata.VStreamRequest = (function() { /** - * Properties of an EventToken. - * @memberof query - * @interface IEventToken - * @property {number|Long|null} [timestamp] EventToken timestamp - * @property {string|null} [shard] EventToken shard - * @property {string|null} [position] EventToken position + * Properties of a VStreamRequest. + * @memberof binlogdata + * @interface IVStreamRequest + * @property {vtrpc.ICallerID|null} [effective_caller_id] VStreamRequest effective_caller_id + * @property {query.IVTGateCallerID|null} [immediate_caller_id] VStreamRequest immediate_caller_id + * @property {query.ITarget|null} [target] VStreamRequest target + * @property {string|null} [position] VStreamRequest position + * @property {binlogdata.IFilter|null} [filter] VStreamRequest filter + * @property {Array.|null} [table_last_p_ks] VStreamRequest table_last_p_ks */ /** - * Constructs a new EventToken. - * @memberof query - * @classdesc Represents an EventToken. - * @implements IEventToken + * Constructs a new VStreamRequest. + * @memberof binlogdata + * @classdesc Represents a VStreamRequest. + * @implements IVStreamRequest * @constructor - * @param {query.IEventToken=} [properties] Properties to set + * @param {binlogdata.IVStreamRequest=} [properties] Properties to set */ - function EventToken(properties) { + function VStreamRequest(properties) { + this.table_last_p_ks = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -71906,105 +73522,150 @@ export const query = $root.query = (() => { } /** - * EventToken timestamp. - * @member {number|Long} timestamp - * @memberof query.EventToken + * VStreamRequest effective_caller_id. + * @member {vtrpc.ICallerID|null|undefined} effective_caller_id + * @memberof binlogdata.VStreamRequest * @instance */ - EventToken.prototype.timestamp = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + VStreamRequest.prototype.effective_caller_id = null; /** - * EventToken shard. - * @member {string} shard - * @memberof query.EventToken + * VStreamRequest immediate_caller_id. + * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id + * @memberof binlogdata.VStreamRequest * @instance */ - EventToken.prototype.shard = ""; + VStreamRequest.prototype.immediate_caller_id = null; /** - * EventToken position. + * VStreamRequest target. + * @member {query.ITarget|null|undefined} target + * @memberof binlogdata.VStreamRequest + * @instance + */ + VStreamRequest.prototype.target = null; + + /** + * VStreamRequest position. * @member {string} position - * @memberof query.EventToken + * @memberof binlogdata.VStreamRequest * @instance */ - EventToken.prototype.position = ""; + VStreamRequest.prototype.position = ""; /** - * Creates a new EventToken instance using the specified properties. + * VStreamRequest filter. + * @member {binlogdata.IFilter|null|undefined} filter + * @memberof binlogdata.VStreamRequest + * @instance + */ + VStreamRequest.prototype.filter = null; + + /** + * VStreamRequest table_last_p_ks. + * @member {Array.} table_last_p_ks + * @memberof binlogdata.VStreamRequest + * @instance + */ + VStreamRequest.prototype.table_last_p_ks = $util.emptyArray; + + /** + * Creates a new VStreamRequest instance using the specified properties. * @function create - * @memberof query.EventToken + * @memberof binlogdata.VStreamRequest * @static - * @param {query.IEventToken=} [properties] Properties to set - * @returns {query.EventToken} EventToken instance + * @param {binlogdata.IVStreamRequest=} [properties] Properties to set + * @returns {binlogdata.VStreamRequest} VStreamRequest instance */ - EventToken.create = function create(properties) { - return new EventToken(properties); + VStreamRequest.create = function create(properties) { + return new VStreamRequest(properties); }; /** - * Encodes the specified EventToken message. Does not implicitly {@link query.EventToken.verify|verify} messages. + * Encodes the specified VStreamRequest message. Does not implicitly {@link binlogdata.VStreamRequest.verify|verify} messages. * @function encode - * @memberof query.EventToken + * @memberof binlogdata.VStreamRequest * @static - * @param {query.IEventToken} message EventToken message or plain object to encode + * @param {binlogdata.IVStreamRequest} message VStreamRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - EventToken.encode = function encode(message, writer) { + VStreamRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.timestamp != null && Object.hasOwnProperty.call(message, "timestamp")) - writer.uint32(/* id 1, wireType 0 =*/8).int64(message.timestamp); - if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); + if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) + $root.vtrpc.CallerID.encode(message.effective_caller_id, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.immediate_caller_id != null && Object.hasOwnProperty.call(message, "immediate_caller_id")) + $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.target != null && Object.hasOwnProperty.call(message, "target")) + $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); if (message.position != null && Object.hasOwnProperty.call(message, "position")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.position); + writer.uint32(/* id 4, wireType 2 =*/34).string(message.position); + if (message.filter != null && Object.hasOwnProperty.call(message, "filter")) + $root.binlogdata.Filter.encode(message.filter, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); + if (message.table_last_p_ks != null && message.table_last_p_ks.length) + for (let i = 0; i < message.table_last_p_ks.length; ++i) + $root.binlogdata.TableLastPK.encode(message.table_last_p_ks[i], writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); return writer; }; /** - * Encodes the specified EventToken message, length delimited. Does not implicitly {@link query.EventToken.verify|verify} messages. + * Encodes the specified VStreamRequest message, length delimited. Does not implicitly {@link binlogdata.VStreamRequest.verify|verify} messages. * @function encodeDelimited - * @memberof query.EventToken + * @memberof binlogdata.VStreamRequest * @static - * @param {query.IEventToken} message EventToken message or plain object to encode + * @param {binlogdata.IVStreamRequest} message VStreamRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - EventToken.encodeDelimited = function encodeDelimited(message, writer) { + VStreamRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an EventToken message from the specified reader or buffer. + * Decodes a VStreamRequest message from the specified reader or buffer. * @function decode - * @memberof query.EventToken + * @memberof binlogdata.VStreamRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.EventToken} EventToken + * @returns {binlogdata.VStreamRequest} VStreamRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - EventToken.decode = function decode(reader, length) { + VStreamRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.EventToken(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.VStreamRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.timestamp = reader.int64(); + message.effective_caller_id = $root.vtrpc.CallerID.decode(reader, reader.uint32()); break; } case 2: { - message.shard = reader.string(); + message.immediate_caller_id = $root.query.VTGateCallerID.decode(reader, reader.uint32()); break; } case 3: { + message.target = $root.query.Target.decode(reader, reader.uint32()); + break; + } + case 4: { message.position = reader.string(); break; } + case 5: { + message.filter = $root.binlogdata.Filter.decode(reader, reader.uint32()); + break; + } + case 6: { + if (!(message.table_last_p_ks && message.table_last_p_ks.length)) + message.table_last_p_ks = []; + message.table_last_p_ks.push($root.binlogdata.TableLastPK.decode(reader, reader.uint32())); + break; + } default: reader.skipType(tag & 7); break; @@ -72014,308 +73675,202 @@ export const query = $root.query = (() => { }; /** - * Decodes an EventToken message from the specified reader or buffer, length delimited. + * Decodes a VStreamRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.EventToken + * @memberof binlogdata.VStreamRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.EventToken} EventToken + * @returns {binlogdata.VStreamRequest} VStreamRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - EventToken.decodeDelimited = function decodeDelimited(reader) { + VStreamRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an EventToken message. + * Verifies a VStreamRequest message. * @function verify - * @memberof query.EventToken + * @memberof binlogdata.VStreamRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - EventToken.verify = function verify(message) { + VStreamRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.timestamp != null && message.hasOwnProperty("timestamp")) - if (!$util.isInteger(message.timestamp) && !(message.timestamp && $util.isInteger(message.timestamp.low) && $util.isInteger(message.timestamp.high))) - return "timestamp: integer|Long expected"; - if (message.shard != null && message.hasOwnProperty("shard")) - if (!$util.isString(message.shard)) - return "shard: string expected"; + if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { + let error = $root.vtrpc.CallerID.verify(message.effective_caller_id); + if (error) + return "effective_caller_id." + error; + } + if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) { + let error = $root.query.VTGateCallerID.verify(message.immediate_caller_id); + if (error) + return "immediate_caller_id." + error; + } + if (message.target != null && message.hasOwnProperty("target")) { + let error = $root.query.Target.verify(message.target); + if (error) + return "target." + error; + } if (message.position != null && message.hasOwnProperty("position")) if (!$util.isString(message.position)) return "position: string expected"; + if (message.filter != null && message.hasOwnProperty("filter")) { + let error = $root.binlogdata.Filter.verify(message.filter); + if (error) + return "filter." + error; + } + if (message.table_last_p_ks != null && message.hasOwnProperty("table_last_p_ks")) { + if (!Array.isArray(message.table_last_p_ks)) + return "table_last_p_ks: array expected"; + for (let i = 0; i < message.table_last_p_ks.length; ++i) { + let error = $root.binlogdata.TableLastPK.verify(message.table_last_p_ks[i]); + if (error) + return "table_last_p_ks." + error; + } + } return null; }; /** - * Creates an EventToken message from a plain object. Also converts values to their respective internal types. + * Creates a VStreamRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.EventToken + * @memberof binlogdata.VStreamRequest * @static * @param {Object.} object Plain object - * @returns {query.EventToken} EventToken + * @returns {binlogdata.VStreamRequest} VStreamRequest */ - EventToken.fromObject = function fromObject(object) { - if (object instanceof $root.query.EventToken) + VStreamRequest.fromObject = function fromObject(object) { + if (object instanceof $root.binlogdata.VStreamRequest) return object; - let message = new $root.query.EventToken(); - if (object.timestamp != null) - if ($util.Long) - (message.timestamp = $util.Long.fromValue(object.timestamp)).unsigned = false; - else if (typeof object.timestamp === "string") - message.timestamp = parseInt(object.timestamp, 10); - else if (typeof object.timestamp === "number") - message.timestamp = object.timestamp; - else if (typeof object.timestamp === "object") - message.timestamp = new $util.LongBits(object.timestamp.low >>> 0, object.timestamp.high >>> 0).toNumber(); - if (object.shard != null) - message.shard = String(object.shard); + let message = new $root.binlogdata.VStreamRequest(); + if (object.effective_caller_id != null) { + if (typeof object.effective_caller_id !== "object") + throw TypeError(".binlogdata.VStreamRequest.effective_caller_id: object expected"); + message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); + } + if (object.immediate_caller_id != null) { + if (typeof object.immediate_caller_id !== "object") + throw TypeError(".binlogdata.VStreamRequest.immediate_caller_id: object expected"); + message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); + } + if (object.target != null) { + if (typeof object.target !== "object") + throw TypeError(".binlogdata.VStreamRequest.target: object expected"); + message.target = $root.query.Target.fromObject(object.target); + } if (object.position != null) message.position = String(object.position); + if (object.filter != null) { + if (typeof object.filter !== "object") + throw TypeError(".binlogdata.VStreamRequest.filter: object expected"); + message.filter = $root.binlogdata.Filter.fromObject(object.filter); + } + if (object.table_last_p_ks) { + if (!Array.isArray(object.table_last_p_ks)) + throw TypeError(".binlogdata.VStreamRequest.table_last_p_ks: array expected"); + message.table_last_p_ks = []; + for (let i = 0; i < object.table_last_p_ks.length; ++i) { + if (typeof object.table_last_p_ks[i] !== "object") + throw TypeError(".binlogdata.VStreamRequest.table_last_p_ks: object expected"); + message.table_last_p_ks[i] = $root.binlogdata.TableLastPK.fromObject(object.table_last_p_ks[i]); + } + } return message; }; /** - * Creates a plain object from an EventToken message. Also converts values to other types if specified. + * Creates a plain object from a VStreamRequest message. Also converts values to other types if specified. * @function toObject - * @memberof query.EventToken + * @memberof binlogdata.VStreamRequest * @static - * @param {query.EventToken} message EventToken + * @param {binlogdata.VStreamRequest} message VStreamRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - EventToken.toObject = function toObject(message, options) { + VStreamRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; + if (options.arrays || options.defaults) + object.table_last_p_ks = []; if (options.defaults) { - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.timestamp = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.timestamp = options.longs === String ? "0" : 0; - object.shard = ""; + object.effective_caller_id = null; + object.immediate_caller_id = null; + object.target = null; object.position = ""; + object.filter = null; } - if (message.timestamp != null && message.hasOwnProperty("timestamp")) - if (typeof message.timestamp === "number") - object.timestamp = options.longs === String ? String(message.timestamp) : message.timestamp; - else - object.timestamp = options.longs === String ? $util.Long.prototype.toString.call(message.timestamp) : options.longs === Number ? new $util.LongBits(message.timestamp.low >>> 0, message.timestamp.high >>> 0).toNumber() : message.timestamp; - if (message.shard != null && message.hasOwnProperty("shard")) - object.shard = message.shard; + if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) + object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); + if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) + object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); + if (message.target != null && message.hasOwnProperty("target")) + object.target = $root.query.Target.toObject(message.target, options); if (message.position != null && message.hasOwnProperty("position")) object.position = message.position; + if (message.filter != null && message.hasOwnProperty("filter")) + object.filter = $root.binlogdata.Filter.toObject(message.filter, options); + if (message.table_last_p_ks && message.table_last_p_ks.length) { + object.table_last_p_ks = []; + for (let j = 0; j < message.table_last_p_ks.length; ++j) + object.table_last_p_ks[j] = $root.binlogdata.TableLastPK.toObject(message.table_last_p_ks[j], options); + } return object; }; /** - * Converts this EventToken to JSON. + * Converts this VStreamRequest to JSON. * @function toJSON - * @memberof query.EventToken + * @memberof binlogdata.VStreamRequest * @instance * @returns {Object.} JSON object */ - EventToken.prototype.toJSON = function toJSON() { + VStreamRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for EventToken + * Gets the default type url for VStreamRequest * @function getTypeUrl - * @memberof query.EventToken + * @memberof binlogdata.VStreamRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - EventToken.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + VStreamRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.EventToken"; + return typeUrlPrefix + "/binlogdata.VStreamRequest"; }; - return EventToken; - })(); - - /** - * MySqlFlag enum. - * @name query.MySqlFlag - * @enum {number} - * @property {number} EMPTY=0 EMPTY value - * @property {number} NOT_NULL_FLAG=1 NOT_NULL_FLAG value - * @property {number} PRI_KEY_FLAG=2 PRI_KEY_FLAG value - * @property {number} UNIQUE_KEY_FLAG=4 UNIQUE_KEY_FLAG value - * @property {number} MULTIPLE_KEY_FLAG=8 MULTIPLE_KEY_FLAG value - * @property {number} BLOB_FLAG=16 BLOB_FLAG value - * @property {number} UNSIGNED_FLAG=32 UNSIGNED_FLAG value - * @property {number} ZEROFILL_FLAG=64 ZEROFILL_FLAG value - * @property {number} BINARY_FLAG=128 BINARY_FLAG value - * @property {number} ENUM_FLAG=256 ENUM_FLAG value - * @property {number} AUTO_INCREMENT_FLAG=512 AUTO_INCREMENT_FLAG value - * @property {number} TIMESTAMP_FLAG=1024 TIMESTAMP_FLAG value - * @property {number} SET_FLAG=2048 SET_FLAG value - * @property {number} NO_DEFAULT_VALUE_FLAG=4096 NO_DEFAULT_VALUE_FLAG value - * @property {number} ON_UPDATE_NOW_FLAG=8192 ON_UPDATE_NOW_FLAG value - * @property {number} NUM_FLAG=32768 NUM_FLAG value - * @property {number} PART_KEY_FLAG=16384 PART_KEY_FLAG value - * @property {number} GROUP_FLAG=32768 GROUP_FLAG value - * @property {number} UNIQUE_FLAG=65536 UNIQUE_FLAG value - * @property {number} BINCMP_FLAG=131072 BINCMP_FLAG value - */ - query.MySqlFlag = (function() { - const valuesById = {}, values = Object.create(valuesById); - values[valuesById[0] = "EMPTY"] = 0; - values[valuesById[1] = "NOT_NULL_FLAG"] = 1; - values[valuesById[2] = "PRI_KEY_FLAG"] = 2; - values[valuesById[4] = "UNIQUE_KEY_FLAG"] = 4; - values[valuesById[8] = "MULTIPLE_KEY_FLAG"] = 8; - values[valuesById[16] = "BLOB_FLAG"] = 16; - values[valuesById[32] = "UNSIGNED_FLAG"] = 32; - values[valuesById[64] = "ZEROFILL_FLAG"] = 64; - values[valuesById[128] = "BINARY_FLAG"] = 128; - values[valuesById[256] = "ENUM_FLAG"] = 256; - values[valuesById[512] = "AUTO_INCREMENT_FLAG"] = 512; - values[valuesById[1024] = "TIMESTAMP_FLAG"] = 1024; - values[valuesById[2048] = "SET_FLAG"] = 2048; - values[valuesById[4096] = "NO_DEFAULT_VALUE_FLAG"] = 4096; - values[valuesById[8192] = "ON_UPDATE_NOW_FLAG"] = 8192; - values[valuesById[32768] = "NUM_FLAG"] = 32768; - values[valuesById[16384] = "PART_KEY_FLAG"] = 16384; - values["GROUP_FLAG"] = 32768; - values[valuesById[65536] = "UNIQUE_FLAG"] = 65536; - values[valuesById[131072] = "BINCMP_FLAG"] = 131072; - return values; - })(); - - /** - * Flag enum. - * @name query.Flag - * @enum {number} - * @property {number} NONE=0 NONE value - * @property {number} ISINTEGRAL=256 ISINTEGRAL value - * @property {number} ISUNSIGNED=512 ISUNSIGNED value - * @property {number} ISFLOAT=1024 ISFLOAT value - * @property {number} ISQUOTED=2048 ISQUOTED value - * @property {number} ISTEXT=4096 ISTEXT value - * @property {number} ISBINARY=8192 ISBINARY value - */ - query.Flag = (function() { - const valuesById = {}, values = Object.create(valuesById); - values[valuesById[0] = "NONE"] = 0; - values[valuesById[256] = "ISINTEGRAL"] = 256; - values[valuesById[512] = "ISUNSIGNED"] = 512; - values[valuesById[1024] = "ISFLOAT"] = 1024; - values[valuesById[2048] = "ISQUOTED"] = 2048; - values[valuesById[4096] = "ISTEXT"] = 4096; - values[valuesById[8192] = "ISBINARY"] = 8192; - return values; - })(); - - /** - * Type enum. - * @name query.Type - * @enum {number} - * @property {number} NULL_TYPE=0 NULL_TYPE value - * @property {number} INT8=257 INT8 value - * @property {number} UINT8=770 UINT8 value - * @property {number} INT16=259 INT16 value - * @property {number} UINT16=772 UINT16 value - * @property {number} INT24=261 INT24 value - * @property {number} UINT24=774 UINT24 value - * @property {number} INT32=263 INT32 value - * @property {number} UINT32=776 UINT32 value - * @property {number} INT64=265 INT64 value - * @property {number} UINT64=778 UINT64 value - * @property {number} FLOAT32=1035 FLOAT32 value - * @property {number} FLOAT64=1036 FLOAT64 value - * @property {number} TIMESTAMP=2061 TIMESTAMP value - * @property {number} DATE=2062 DATE value - * @property {number} TIME=2063 TIME value - * @property {number} DATETIME=2064 DATETIME value - * @property {number} YEAR=785 YEAR value - * @property {number} DECIMAL=18 DECIMAL value - * @property {number} TEXT=6163 TEXT value - * @property {number} BLOB=10260 BLOB value - * @property {number} VARCHAR=6165 VARCHAR value - * @property {number} VARBINARY=10262 VARBINARY value - * @property {number} CHAR=6167 CHAR value - * @property {number} BINARY=10264 BINARY value - * @property {number} BIT=2073 BIT value - * @property {number} ENUM=2074 ENUM value - * @property {number} SET=2075 SET value - * @property {number} TUPLE=28 TUPLE value - * @property {number} GEOMETRY=2077 GEOMETRY value - * @property {number} JSON=2078 JSON value - * @property {number} EXPRESSION=31 EXPRESSION value - * @property {number} HEXNUM=4128 HEXNUM value - * @property {number} HEXVAL=4129 HEXVAL value - * @property {number} BITNUM=4130 BITNUM value - */ - query.Type = (function() { - const valuesById = {}, values = Object.create(valuesById); - values[valuesById[0] = "NULL_TYPE"] = 0; - values[valuesById[257] = "INT8"] = 257; - values[valuesById[770] = "UINT8"] = 770; - values[valuesById[259] = "INT16"] = 259; - values[valuesById[772] = "UINT16"] = 772; - values[valuesById[261] = "INT24"] = 261; - values[valuesById[774] = "UINT24"] = 774; - values[valuesById[263] = "INT32"] = 263; - values[valuesById[776] = "UINT32"] = 776; - values[valuesById[265] = "INT64"] = 265; - values[valuesById[778] = "UINT64"] = 778; - values[valuesById[1035] = "FLOAT32"] = 1035; - values[valuesById[1036] = "FLOAT64"] = 1036; - values[valuesById[2061] = "TIMESTAMP"] = 2061; - values[valuesById[2062] = "DATE"] = 2062; - values[valuesById[2063] = "TIME"] = 2063; - values[valuesById[2064] = "DATETIME"] = 2064; - values[valuesById[785] = "YEAR"] = 785; - values[valuesById[18] = "DECIMAL"] = 18; - values[valuesById[6163] = "TEXT"] = 6163; - values[valuesById[10260] = "BLOB"] = 10260; - values[valuesById[6165] = "VARCHAR"] = 6165; - values[valuesById[10262] = "VARBINARY"] = 10262; - values[valuesById[6167] = "CHAR"] = 6167; - values[valuesById[10264] = "BINARY"] = 10264; - values[valuesById[2073] = "BIT"] = 2073; - values[valuesById[2074] = "ENUM"] = 2074; - values[valuesById[2075] = "SET"] = 2075; - values[valuesById[28] = "TUPLE"] = 28; - values[valuesById[2077] = "GEOMETRY"] = 2077; - values[valuesById[2078] = "JSON"] = 2078; - values[valuesById[31] = "EXPRESSION"] = 31; - values[valuesById[4128] = "HEXNUM"] = 4128; - values[valuesById[4129] = "HEXVAL"] = 4129; - values[valuesById[4130] = "BITNUM"] = 4130; - return values; + return VStreamRequest; })(); - query.Value = (function() { + binlogdata.VStreamResponse = (function() { /** - * Properties of a Value. - * @memberof query - * @interface IValue - * @property {query.Type|null} [type] Value type - * @property {Uint8Array|null} [value] Value value + * Properties of a VStreamResponse. + * @memberof binlogdata + * @interface IVStreamResponse + * @property {Array.|null} [events] VStreamResponse events */ /** - * Constructs a new Value. - * @memberof query - * @classdesc Represents a Value. - * @implements IValue + * Constructs a new VStreamResponse. + * @memberof binlogdata + * @classdesc Represents a VStreamResponse. + * @implements IVStreamResponse * @constructor - * @param {query.IValue=} [properties] Properties to set + * @param {binlogdata.IVStreamResponse=} [properties] Properties to set */ - function Value(properties) { + function VStreamResponse(properties) { + this.events = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -72323,89 +73878,78 @@ export const query = $root.query = (() => { } /** - * Value type. - * @member {query.Type} type - * @memberof query.Value - * @instance - */ - Value.prototype.type = 0; - - /** - * Value value. - * @member {Uint8Array} value - * @memberof query.Value + * VStreamResponse events. + * @member {Array.} events + * @memberof binlogdata.VStreamResponse * @instance */ - Value.prototype.value = $util.newBuffer([]); + VStreamResponse.prototype.events = $util.emptyArray; /** - * Creates a new Value instance using the specified properties. + * Creates a new VStreamResponse instance using the specified properties. * @function create - * @memberof query.Value + * @memberof binlogdata.VStreamResponse * @static - * @param {query.IValue=} [properties] Properties to set - * @returns {query.Value} Value instance + * @param {binlogdata.IVStreamResponse=} [properties] Properties to set + * @returns {binlogdata.VStreamResponse} VStreamResponse instance */ - Value.create = function create(properties) { - return new Value(properties); + VStreamResponse.create = function create(properties) { + return new VStreamResponse(properties); }; /** - * Encodes the specified Value message. Does not implicitly {@link query.Value.verify|verify} messages. + * Encodes the specified VStreamResponse message. Does not implicitly {@link binlogdata.VStreamResponse.verify|verify} messages. * @function encode - * @memberof query.Value + * @memberof binlogdata.VStreamResponse * @static - * @param {query.IValue} message Value message or plain object to encode + * @param {binlogdata.IVStreamResponse} message VStreamResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Value.encode = function encode(message, writer) { + VStreamResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.type != null && Object.hasOwnProperty.call(message, "type")) - writer.uint32(/* id 1, wireType 0 =*/8).int32(message.type); - if (message.value != null && Object.hasOwnProperty.call(message, "value")) - writer.uint32(/* id 2, wireType 2 =*/18).bytes(message.value); + if (message.events != null && message.events.length) + for (let i = 0; i < message.events.length; ++i) + $root.binlogdata.VEvent.encode(message.events[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified Value message, length delimited. Does not implicitly {@link query.Value.verify|verify} messages. + * Encodes the specified VStreamResponse message, length delimited. Does not implicitly {@link binlogdata.VStreamResponse.verify|verify} messages. * @function encodeDelimited - * @memberof query.Value + * @memberof binlogdata.VStreamResponse * @static - * @param {query.IValue} message Value message or plain object to encode + * @param {binlogdata.IVStreamResponse} message VStreamResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Value.encodeDelimited = function encodeDelimited(message, writer) { + VStreamResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a Value message from the specified reader or buffer. + * Decodes a VStreamResponse message from the specified reader or buffer. * @function decode - * @memberof query.Value + * @memberof binlogdata.VStreamResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.Value} Value + * @returns {binlogdata.VStreamResponse} VStreamResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Value.decode = function decode(reader, length) { + VStreamResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.Value(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.VStreamResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.type = reader.int32(); - break; - } - case 2: { - message.value = reader.bytes(); + if (!(message.events && message.events.length)) + message.events = []; + message.events.push($root.binlogdata.VEvent.decode(reader, reader.uint32())); break; } default: @@ -72417,327 +73961,143 @@ export const query = $root.query = (() => { }; /** - * Decodes a Value message from the specified reader or buffer, length delimited. + * Decodes a VStreamResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.Value + * @memberof binlogdata.VStreamResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.Value} Value + * @returns {binlogdata.VStreamResponse} VStreamResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Value.decodeDelimited = function decodeDelimited(reader) { + VStreamResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a Value message. + * Verifies a VStreamResponse message. * @function verify - * @memberof query.Value + * @memberof binlogdata.VStreamResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - Value.verify = function verify(message) { + VStreamResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.type != null && message.hasOwnProperty("type")) - switch (message.type) { - default: - return "type: enum value expected"; - case 0: - case 257: - case 770: - case 259: - case 772: - case 261: - case 774: - case 263: - case 776: - case 265: - case 778: - case 1035: - case 1036: - case 2061: - case 2062: - case 2063: - case 2064: - case 785: - case 18: - case 6163: - case 10260: - case 6165: - case 10262: - case 6167: - case 10264: - case 2073: - case 2074: - case 2075: - case 28: - case 2077: - case 2078: - case 31: - case 4128: - case 4129: - case 4130: - break; + if (message.events != null && message.hasOwnProperty("events")) { + if (!Array.isArray(message.events)) + return "events: array expected"; + for (let i = 0; i < message.events.length; ++i) { + let error = $root.binlogdata.VEvent.verify(message.events[i]); + if (error) + return "events." + error; } - if (message.value != null && message.hasOwnProperty("value")) - if (!(message.value && typeof message.value.length === "number" || $util.isString(message.value))) - return "value: buffer expected"; + } return null; }; /** - * Creates a Value message from a plain object. Also converts values to their respective internal types. + * Creates a VStreamResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.Value + * @memberof binlogdata.VStreamResponse * @static * @param {Object.} object Plain object - * @returns {query.Value} Value + * @returns {binlogdata.VStreamResponse} VStreamResponse */ - Value.fromObject = function fromObject(object) { - if (object instanceof $root.query.Value) + VStreamResponse.fromObject = function fromObject(object) { + if (object instanceof $root.binlogdata.VStreamResponse) return object; - let message = new $root.query.Value(); - switch (object.type) { - default: - if (typeof object.type === "number") { - message.type = object.type; - break; + let message = new $root.binlogdata.VStreamResponse(); + if (object.events) { + if (!Array.isArray(object.events)) + throw TypeError(".binlogdata.VStreamResponse.events: array expected"); + message.events = []; + for (let i = 0; i < object.events.length; ++i) { + if (typeof object.events[i] !== "object") + throw TypeError(".binlogdata.VStreamResponse.events: object expected"); + message.events[i] = $root.binlogdata.VEvent.fromObject(object.events[i]); } - break; - case "NULL_TYPE": - case 0: - message.type = 0; - break; - case "INT8": - case 257: - message.type = 257; - break; - case "UINT8": - case 770: - message.type = 770; - break; - case "INT16": - case 259: - message.type = 259; - break; - case "UINT16": - case 772: - message.type = 772; - break; - case "INT24": - case 261: - message.type = 261; - break; - case "UINT24": - case 774: - message.type = 774; - break; - case "INT32": - case 263: - message.type = 263; - break; - case "UINT32": - case 776: - message.type = 776; - break; - case "INT64": - case 265: - message.type = 265; - break; - case "UINT64": - case 778: - message.type = 778; - break; - case "FLOAT32": - case 1035: - message.type = 1035; - break; - case "FLOAT64": - case 1036: - message.type = 1036; - break; - case "TIMESTAMP": - case 2061: - message.type = 2061; - break; - case "DATE": - case 2062: - message.type = 2062; - break; - case "TIME": - case 2063: - message.type = 2063; - break; - case "DATETIME": - case 2064: - message.type = 2064; - break; - case "YEAR": - case 785: - message.type = 785; - break; - case "DECIMAL": - case 18: - message.type = 18; - break; - case "TEXT": - case 6163: - message.type = 6163; - break; - case "BLOB": - case 10260: - message.type = 10260; - break; - case "VARCHAR": - case 6165: - message.type = 6165; - break; - case "VARBINARY": - case 10262: - message.type = 10262; - break; - case "CHAR": - case 6167: - message.type = 6167; - break; - case "BINARY": - case 10264: - message.type = 10264; - break; - case "BIT": - case 2073: - message.type = 2073; - break; - case "ENUM": - case 2074: - message.type = 2074; - break; - case "SET": - case 2075: - message.type = 2075; - break; - case "TUPLE": - case 28: - message.type = 28; - break; - case "GEOMETRY": - case 2077: - message.type = 2077; - break; - case "JSON": - case 2078: - message.type = 2078; - break; - case "EXPRESSION": - case 31: - message.type = 31; - break; - case "HEXNUM": - case 4128: - message.type = 4128; - break; - case "HEXVAL": - case 4129: - message.type = 4129; - break; - case "BITNUM": - case 4130: - message.type = 4130; - break; } - if (object.value != null) - if (typeof object.value === "string") - $util.base64.decode(object.value, message.value = $util.newBuffer($util.base64.length(object.value)), 0); - else if (object.value.length >= 0) - message.value = object.value; return message; }; /** - * Creates a plain object from a Value message. Also converts values to other types if specified. + * Creates a plain object from a VStreamResponse message. Also converts values to other types if specified. * @function toObject - * @memberof query.Value + * @memberof binlogdata.VStreamResponse * @static - * @param {query.Value} message Value + * @param {binlogdata.VStreamResponse} message VStreamResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - Value.toObject = function toObject(message, options) { + VStreamResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) { - object.type = options.enums === String ? "NULL_TYPE" : 0; - if (options.bytes === String) - object.value = ""; - else { - object.value = []; - if (options.bytes !== Array) - object.value = $util.newBuffer(object.value); - } + if (options.arrays || options.defaults) + object.events = []; + if (message.events && message.events.length) { + object.events = []; + for (let j = 0; j < message.events.length; ++j) + object.events[j] = $root.binlogdata.VEvent.toObject(message.events[j], options); } - if (message.type != null && message.hasOwnProperty("type")) - object.type = options.enums === String ? $root.query.Type[message.type] === undefined ? message.type : $root.query.Type[message.type] : message.type; - if (message.value != null && message.hasOwnProperty("value")) - object.value = options.bytes === String ? $util.base64.encode(message.value, 0, message.value.length) : options.bytes === Array ? Array.prototype.slice.call(message.value) : message.value; return object; }; /** - * Converts this Value to JSON. + * Converts this VStreamResponse to JSON. * @function toJSON - * @memberof query.Value + * @memberof binlogdata.VStreamResponse * @instance * @returns {Object.} JSON object */ - Value.prototype.toJSON = function toJSON() { + VStreamResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for Value + * Gets the default type url for VStreamResponse * @function getTypeUrl - * @memberof query.Value + * @memberof binlogdata.VStreamResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - Value.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + VStreamResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.Value"; + return typeUrlPrefix + "/binlogdata.VStreamResponse"; }; - return Value; + return VStreamResponse; })(); - query.BindVariable = (function() { + binlogdata.VStreamRowsRequest = (function() { /** - * Properties of a BindVariable. - * @memberof query - * @interface IBindVariable - * @property {query.Type|null} [type] BindVariable type - * @property {Uint8Array|null} [value] BindVariable value - * @property {Array.|null} [values] BindVariable values + * Properties of a VStreamRowsRequest. + * @memberof binlogdata + * @interface IVStreamRowsRequest + * @property {vtrpc.ICallerID|null} [effective_caller_id] VStreamRowsRequest effective_caller_id + * @property {query.IVTGateCallerID|null} [immediate_caller_id] VStreamRowsRequest immediate_caller_id + * @property {query.ITarget|null} [target] VStreamRowsRequest target + * @property {string|null} [query] VStreamRowsRequest query + * @property {query.IQueryResult|null} [lastpk] VStreamRowsRequest lastpk */ /** - * Constructs a new BindVariable. - * @memberof query - * @classdesc Represents a BindVariable. - * @implements IBindVariable + * Constructs a new VStreamRowsRequest. + * @memberof binlogdata + * @classdesc Represents a VStreamRowsRequest. + * @implements IVStreamRowsRequest * @constructor - * @param {query.IBindVariable=} [properties] Properties to set + * @param {binlogdata.IVStreamRowsRequest=} [properties] Properties to set */ - function BindVariable(properties) { - this.values = []; + function VStreamRowsRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -72745,106 +74105,131 @@ export const query = $root.query = (() => { } /** - * BindVariable type. - * @member {query.Type} type - * @memberof query.BindVariable + * VStreamRowsRequest effective_caller_id. + * @member {vtrpc.ICallerID|null|undefined} effective_caller_id + * @memberof binlogdata.VStreamRowsRequest * @instance */ - BindVariable.prototype.type = 0; + VStreamRowsRequest.prototype.effective_caller_id = null; /** - * BindVariable value. - * @member {Uint8Array} value - * @memberof query.BindVariable + * VStreamRowsRequest immediate_caller_id. + * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id + * @memberof binlogdata.VStreamRowsRequest * @instance */ - BindVariable.prototype.value = $util.newBuffer([]); + VStreamRowsRequest.prototype.immediate_caller_id = null; /** - * BindVariable values. - * @member {Array.} values - * @memberof query.BindVariable + * VStreamRowsRequest target. + * @member {query.ITarget|null|undefined} target + * @memberof binlogdata.VStreamRowsRequest * @instance */ - BindVariable.prototype.values = $util.emptyArray; + VStreamRowsRequest.prototype.target = null; /** - * Creates a new BindVariable instance using the specified properties. + * VStreamRowsRequest query. + * @member {string} query + * @memberof binlogdata.VStreamRowsRequest + * @instance + */ + VStreamRowsRequest.prototype.query = ""; + + /** + * VStreamRowsRequest lastpk. + * @member {query.IQueryResult|null|undefined} lastpk + * @memberof binlogdata.VStreamRowsRequest + * @instance + */ + VStreamRowsRequest.prototype.lastpk = null; + + /** + * Creates a new VStreamRowsRequest instance using the specified properties. * @function create - * @memberof query.BindVariable + * @memberof binlogdata.VStreamRowsRequest * @static - * @param {query.IBindVariable=} [properties] Properties to set - * @returns {query.BindVariable} BindVariable instance + * @param {binlogdata.IVStreamRowsRequest=} [properties] Properties to set + * @returns {binlogdata.VStreamRowsRequest} VStreamRowsRequest instance */ - BindVariable.create = function create(properties) { - return new BindVariable(properties); + VStreamRowsRequest.create = function create(properties) { + return new VStreamRowsRequest(properties); }; /** - * Encodes the specified BindVariable message. Does not implicitly {@link query.BindVariable.verify|verify} messages. + * Encodes the specified VStreamRowsRequest message. Does not implicitly {@link binlogdata.VStreamRowsRequest.verify|verify} messages. * @function encode - * @memberof query.BindVariable + * @memberof binlogdata.VStreamRowsRequest * @static - * @param {query.IBindVariable} message BindVariable message or plain object to encode + * @param {binlogdata.IVStreamRowsRequest} message VStreamRowsRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - BindVariable.encode = function encode(message, writer) { + VStreamRowsRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.type != null && Object.hasOwnProperty.call(message, "type")) - writer.uint32(/* id 1, wireType 0 =*/8).int32(message.type); - if (message.value != null && Object.hasOwnProperty.call(message, "value")) - writer.uint32(/* id 2, wireType 2 =*/18).bytes(message.value); - if (message.values != null && message.values.length) - for (let i = 0; i < message.values.length; ++i) - $root.query.Value.encode(message.values[i], writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) + $root.vtrpc.CallerID.encode(message.effective_caller_id, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.immediate_caller_id != null && Object.hasOwnProperty.call(message, "immediate_caller_id")) + $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.target != null && Object.hasOwnProperty.call(message, "target")) + $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.query != null && Object.hasOwnProperty.call(message, "query")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.query); + if (message.lastpk != null && Object.hasOwnProperty.call(message, "lastpk")) + $root.query.QueryResult.encode(message.lastpk, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); return writer; }; /** - * Encodes the specified BindVariable message, length delimited. Does not implicitly {@link query.BindVariable.verify|verify} messages. + * Encodes the specified VStreamRowsRequest message, length delimited. Does not implicitly {@link binlogdata.VStreamRowsRequest.verify|verify} messages. * @function encodeDelimited - * @memberof query.BindVariable + * @memberof binlogdata.VStreamRowsRequest * @static - * @param {query.IBindVariable} message BindVariable message or plain object to encode + * @param {binlogdata.IVStreamRowsRequest} message VStreamRowsRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - BindVariable.encodeDelimited = function encodeDelimited(message, writer) { + VStreamRowsRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a BindVariable message from the specified reader or buffer. + * Decodes a VStreamRowsRequest message from the specified reader or buffer. * @function decode - * @memberof query.BindVariable + * @memberof binlogdata.VStreamRowsRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.BindVariable} BindVariable + * @returns {binlogdata.VStreamRowsRequest} VStreamRowsRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - BindVariable.decode = function decode(reader, length) { + VStreamRowsRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.BindVariable(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.VStreamRowsRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.type = reader.int32(); + message.effective_caller_id = $root.vtrpc.CallerID.decode(reader, reader.uint32()); break; } case 2: { - message.value = reader.bytes(); + message.immediate_caller_id = $root.query.VTGateCallerID.decode(reader, reader.uint32()); break; } case 3: { - if (!(message.values && message.values.length)) - message.values = []; - message.values.push($root.query.Value.decode(reader, reader.uint32())); + message.target = $root.query.Target.decode(reader, reader.uint32()); + break; + } + case 4: { + message.query = reader.string(); + break; + } + case 5: { + message.lastpk = $root.query.QueryResult.decode(reader, reader.uint32()); break; } default: @@ -72856,352 +74241,184 @@ export const query = $root.query = (() => { }; /** - * Decodes a BindVariable message from the specified reader or buffer, length delimited. + * Decodes a VStreamRowsRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.BindVariable + * @memberof binlogdata.VStreamRowsRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.BindVariable} BindVariable + * @returns {binlogdata.VStreamRowsRequest} VStreamRowsRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - BindVariable.decodeDelimited = function decodeDelimited(reader) { + VStreamRowsRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a BindVariable message. + * Verifies a VStreamRowsRequest message. * @function verify - * @memberof query.BindVariable + * @memberof binlogdata.VStreamRowsRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - BindVariable.verify = function verify(message) { + VStreamRowsRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.type != null && message.hasOwnProperty("type")) - switch (message.type) { - default: - return "type: enum value expected"; - case 0: - case 257: - case 770: - case 259: - case 772: - case 261: - case 774: - case 263: - case 776: - case 265: - case 778: - case 1035: - case 1036: - case 2061: - case 2062: - case 2063: - case 2064: - case 785: - case 18: - case 6163: - case 10260: - case 6165: - case 10262: - case 6167: - case 10264: - case 2073: - case 2074: - case 2075: - case 28: - case 2077: - case 2078: - case 31: - case 4128: - case 4129: - case 4130: - break; - } - if (message.value != null && message.hasOwnProperty("value")) - if (!(message.value && typeof message.value.length === "number" || $util.isString(message.value))) - return "value: buffer expected"; - if (message.values != null && message.hasOwnProperty("values")) { - if (!Array.isArray(message.values)) - return "values: array expected"; - for (let i = 0; i < message.values.length; ++i) { - let error = $root.query.Value.verify(message.values[i]); - if (error) - return "values." + error; - } + if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { + let error = $root.vtrpc.CallerID.verify(message.effective_caller_id); + if (error) + return "effective_caller_id." + error; + } + if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) { + let error = $root.query.VTGateCallerID.verify(message.immediate_caller_id); + if (error) + return "immediate_caller_id." + error; + } + if (message.target != null && message.hasOwnProperty("target")) { + let error = $root.query.Target.verify(message.target); + if (error) + return "target." + error; + } + if (message.query != null && message.hasOwnProperty("query")) + if (!$util.isString(message.query)) + return "query: string expected"; + if (message.lastpk != null && message.hasOwnProperty("lastpk")) { + let error = $root.query.QueryResult.verify(message.lastpk); + if (error) + return "lastpk." + error; } return null; }; /** - * Creates a BindVariable message from a plain object. Also converts values to their respective internal types. + * Creates a VStreamRowsRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.BindVariable + * @memberof binlogdata.VStreamRowsRequest * @static * @param {Object.} object Plain object - * @returns {query.BindVariable} BindVariable + * @returns {binlogdata.VStreamRowsRequest} VStreamRowsRequest */ - BindVariable.fromObject = function fromObject(object) { - if (object instanceof $root.query.BindVariable) + VStreamRowsRequest.fromObject = function fromObject(object) { + if (object instanceof $root.binlogdata.VStreamRowsRequest) return object; - let message = new $root.query.BindVariable(); - switch (object.type) { - default: - if (typeof object.type === "number") { - message.type = object.type; - break; - } - break; - case "NULL_TYPE": - case 0: - message.type = 0; - break; - case "INT8": - case 257: - message.type = 257; - break; - case "UINT8": - case 770: - message.type = 770; - break; - case "INT16": - case 259: - message.type = 259; - break; - case "UINT16": - case 772: - message.type = 772; - break; - case "INT24": - case 261: - message.type = 261; - break; - case "UINT24": - case 774: - message.type = 774; - break; - case "INT32": - case 263: - message.type = 263; - break; - case "UINT32": - case 776: - message.type = 776; - break; - case "INT64": - case 265: - message.type = 265; - break; - case "UINT64": - case 778: - message.type = 778; - break; - case "FLOAT32": - case 1035: - message.type = 1035; - break; - case "FLOAT64": - case 1036: - message.type = 1036; - break; - case "TIMESTAMP": - case 2061: - message.type = 2061; - break; - case "DATE": - case 2062: - message.type = 2062; - break; - case "TIME": - case 2063: - message.type = 2063; - break; - case "DATETIME": - case 2064: - message.type = 2064; - break; - case "YEAR": - case 785: - message.type = 785; - break; - case "DECIMAL": - case 18: - message.type = 18; - break; - case "TEXT": - case 6163: - message.type = 6163; - break; - case "BLOB": - case 10260: - message.type = 10260; - break; - case "VARCHAR": - case 6165: - message.type = 6165; - break; - case "VARBINARY": - case 10262: - message.type = 10262; - break; - case "CHAR": - case 6167: - message.type = 6167; - break; - case "BINARY": - case 10264: - message.type = 10264; - break; - case "BIT": - case 2073: - message.type = 2073; - break; - case "ENUM": - case 2074: - message.type = 2074; - break; - case "SET": - case 2075: - message.type = 2075; - break; - case "TUPLE": - case 28: - message.type = 28; - break; - case "GEOMETRY": - case 2077: - message.type = 2077; - break; - case "JSON": - case 2078: - message.type = 2078; - break; - case "EXPRESSION": - case 31: - message.type = 31; - break; - case "HEXNUM": - case 4128: - message.type = 4128; - break; - case "HEXVAL": - case 4129: - message.type = 4129; - break; - case "BITNUM": - case 4130: - message.type = 4130; - break; + let message = new $root.binlogdata.VStreamRowsRequest(); + if (object.effective_caller_id != null) { + if (typeof object.effective_caller_id !== "object") + throw TypeError(".binlogdata.VStreamRowsRequest.effective_caller_id: object expected"); + message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); } - if (object.value != null) - if (typeof object.value === "string") - $util.base64.decode(object.value, message.value = $util.newBuffer($util.base64.length(object.value)), 0); - else if (object.value.length >= 0) - message.value = object.value; - if (object.values) { - if (!Array.isArray(object.values)) - throw TypeError(".query.BindVariable.values: array expected"); - message.values = []; - for (let i = 0; i < object.values.length; ++i) { - if (typeof object.values[i] !== "object") - throw TypeError(".query.BindVariable.values: object expected"); - message.values[i] = $root.query.Value.fromObject(object.values[i]); - } + if (object.immediate_caller_id != null) { + if (typeof object.immediate_caller_id !== "object") + throw TypeError(".binlogdata.VStreamRowsRequest.immediate_caller_id: object expected"); + message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); + } + if (object.target != null) { + if (typeof object.target !== "object") + throw TypeError(".binlogdata.VStreamRowsRequest.target: object expected"); + message.target = $root.query.Target.fromObject(object.target); + } + if (object.query != null) + message.query = String(object.query); + if (object.lastpk != null) { + if (typeof object.lastpk !== "object") + throw TypeError(".binlogdata.VStreamRowsRequest.lastpk: object expected"); + message.lastpk = $root.query.QueryResult.fromObject(object.lastpk); } return message; }; /** - * Creates a plain object from a BindVariable message. Also converts values to other types if specified. + * Creates a plain object from a VStreamRowsRequest message. Also converts values to other types if specified. * @function toObject - * @memberof query.BindVariable + * @memberof binlogdata.VStreamRowsRequest * @static - * @param {query.BindVariable} message BindVariable + * @param {binlogdata.VStreamRowsRequest} message VStreamRowsRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - BindVariable.toObject = function toObject(message, options) { + VStreamRowsRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.values = []; if (options.defaults) { - object.type = options.enums === String ? "NULL_TYPE" : 0; - if (options.bytes === String) - object.value = ""; - else { - object.value = []; - if (options.bytes !== Array) - object.value = $util.newBuffer(object.value); - } - } - if (message.type != null && message.hasOwnProperty("type")) - object.type = options.enums === String ? $root.query.Type[message.type] === undefined ? message.type : $root.query.Type[message.type] : message.type; - if (message.value != null && message.hasOwnProperty("value")) - object.value = options.bytes === String ? $util.base64.encode(message.value, 0, message.value.length) : options.bytes === Array ? Array.prototype.slice.call(message.value) : message.value; - if (message.values && message.values.length) { - object.values = []; - for (let j = 0; j < message.values.length; ++j) - object.values[j] = $root.query.Value.toObject(message.values[j], options); + object.effective_caller_id = null; + object.immediate_caller_id = null; + object.target = null; + object.query = ""; + object.lastpk = null; } + if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) + object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); + if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) + object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); + if (message.target != null && message.hasOwnProperty("target")) + object.target = $root.query.Target.toObject(message.target, options); + if (message.query != null && message.hasOwnProperty("query")) + object.query = message.query; + if (message.lastpk != null && message.hasOwnProperty("lastpk")) + object.lastpk = $root.query.QueryResult.toObject(message.lastpk, options); return object; }; /** - * Converts this BindVariable to JSON. + * Converts this VStreamRowsRequest to JSON. * @function toJSON - * @memberof query.BindVariable + * @memberof binlogdata.VStreamRowsRequest * @instance * @returns {Object.} JSON object */ - BindVariable.prototype.toJSON = function toJSON() { + VStreamRowsRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for BindVariable + * Gets the default type url for VStreamRowsRequest * @function getTypeUrl - * @memberof query.BindVariable + * @memberof binlogdata.VStreamRowsRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - BindVariable.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + VStreamRowsRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.BindVariable"; + return typeUrlPrefix + "/binlogdata.VStreamRowsRequest"; }; - return BindVariable; + return VStreamRowsRequest; })(); - query.BoundQuery = (function() { + binlogdata.VStreamRowsResponse = (function() { /** - * Properties of a BoundQuery. - * @memberof query - * @interface IBoundQuery - * @property {string|null} [sql] BoundQuery sql - * @property {Object.|null} [bind_variables] BoundQuery bind_variables + * Properties of a VStreamRowsResponse. + * @memberof binlogdata + * @interface IVStreamRowsResponse + * @property {Array.|null} [fields] VStreamRowsResponse fields + * @property {Array.|null} [pkfields] VStreamRowsResponse pkfields + * @property {string|null} [gtid] VStreamRowsResponse gtid + * @property {Array.|null} [rows] VStreamRowsResponse rows + * @property {query.IRow|null} [lastpk] VStreamRowsResponse lastpk + * @property {boolean|null} [throttled] VStreamRowsResponse throttled + * @property {boolean|null} [heartbeat] VStreamRowsResponse heartbeat */ /** - * Constructs a new BoundQuery. - * @memberof query - * @classdesc Represents a BoundQuery. - * @implements IBoundQuery + * Constructs a new VStreamRowsResponse. + * @memberof binlogdata + * @classdesc Represents a VStreamRowsResponse. + * @implements IVStreamRowsResponse * @constructor - * @param {query.IBoundQuery=} [properties] Properties to set + * @param {binlogdata.IVStreamRowsResponse=} [properties] Properties to set */ - function BoundQuery(properties) { - this.bind_variables = {}; + function VStreamRowsResponse(properties) { + this.fields = []; + this.pkfields = []; + this.rows = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -73209,111 +74426,168 @@ export const query = $root.query = (() => { } /** - * BoundQuery sql. - * @member {string} sql - * @memberof query.BoundQuery + * VStreamRowsResponse fields. + * @member {Array.} fields + * @memberof binlogdata.VStreamRowsResponse * @instance */ - BoundQuery.prototype.sql = ""; + VStreamRowsResponse.prototype.fields = $util.emptyArray; /** - * BoundQuery bind_variables. - * @member {Object.} bind_variables - * @memberof query.BoundQuery + * VStreamRowsResponse pkfields. + * @member {Array.} pkfields + * @memberof binlogdata.VStreamRowsResponse * @instance */ - BoundQuery.prototype.bind_variables = $util.emptyObject; + VStreamRowsResponse.prototype.pkfields = $util.emptyArray; /** - * Creates a new BoundQuery instance using the specified properties. + * VStreamRowsResponse gtid. + * @member {string} gtid + * @memberof binlogdata.VStreamRowsResponse + * @instance + */ + VStreamRowsResponse.prototype.gtid = ""; + + /** + * VStreamRowsResponse rows. + * @member {Array.} rows + * @memberof binlogdata.VStreamRowsResponse + * @instance + */ + VStreamRowsResponse.prototype.rows = $util.emptyArray; + + /** + * VStreamRowsResponse lastpk. + * @member {query.IRow|null|undefined} lastpk + * @memberof binlogdata.VStreamRowsResponse + * @instance + */ + VStreamRowsResponse.prototype.lastpk = null; + + /** + * VStreamRowsResponse throttled. + * @member {boolean} throttled + * @memberof binlogdata.VStreamRowsResponse + * @instance + */ + VStreamRowsResponse.prototype.throttled = false; + + /** + * VStreamRowsResponse heartbeat. + * @member {boolean} heartbeat + * @memberof binlogdata.VStreamRowsResponse + * @instance + */ + VStreamRowsResponse.prototype.heartbeat = false; + + /** + * Creates a new VStreamRowsResponse instance using the specified properties. * @function create - * @memberof query.BoundQuery + * @memberof binlogdata.VStreamRowsResponse * @static - * @param {query.IBoundQuery=} [properties] Properties to set - * @returns {query.BoundQuery} BoundQuery instance + * @param {binlogdata.IVStreamRowsResponse=} [properties] Properties to set + * @returns {binlogdata.VStreamRowsResponse} VStreamRowsResponse instance */ - BoundQuery.create = function create(properties) { - return new BoundQuery(properties); + VStreamRowsResponse.create = function create(properties) { + return new VStreamRowsResponse(properties); }; /** - * Encodes the specified BoundQuery message. Does not implicitly {@link query.BoundQuery.verify|verify} messages. + * Encodes the specified VStreamRowsResponse message. Does not implicitly {@link binlogdata.VStreamRowsResponse.verify|verify} messages. * @function encode - * @memberof query.BoundQuery + * @memberof binlogdata.VStreamRowsResponse * @static - * @param {query.IBoundQuery} message BoundQuery message or plain object to encode + * @param {binlogdata.IVStreamRowsResponse} message VStreamRowsResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - BoundQuery.encode = function encode(message, writer) { + VStreamRowsResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.sql != null && Object.hasOwnProperty.call(message, "sql")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.sql); - if (message.bind_variables != null && Object.hasOwnProperty.call(message, "bind_variables")) - for (let keys = Object.keys(message.bind_variables), i = 0; i < keys.length; ++i) { - writer.uint32(/* id 2, wireType 2 =*/18).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); - $root.query.BindVariable.encode(message.bind_variables[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); - } + if (message.fields != null && message.fields.length) + for (let i = 0; i < message.fields.length; ++i) + $root.query.Field.encode(message.fields[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.pkfields != null && message.pkfields.length) + for (let i = 0; i < message.pkfields.length; ++i) + $root.query.Field.encode(message.pkfields[i], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.gtid != null && Object.hasOwnProperty.call(message, "gtid")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.gtid); + if (message.rows != null && message.rows.length) + for (let i = 0; i < message.rows.length; ++i) + $root.query.Row.encode(message.rows[i], writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.lastpk != null && Object.hasOwnProperty.call(message, "lastpk")) + $root.query.Row.encode(message.lastpk, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); + if (message.throttled != null && Object.hasOwnProperty.call(message, "throttled")) + writer.uint32(/* id 6, wireType 0 =*/48).bool(message.throttled); + if (message.heartbeat != null && Object.hasOwnProperty.call(message, "heartbeat")) + writer.uint32(/* id 7, wireType 0 =*/56).bool(message.heartbeat); return writer; }; /** - * Encodes the specified BoundQuery message, length delimited. Does not implicitly {@link query.BoundQuery.verify|verify} messages. + * Encodes the specified VStreamRowsResponse message, length delimited. Does not implicitly {@link binlogdata.VStreamRowsResponse.verify|verify} messages. * @function encodeDelimited - * @memberof query.BoundQuery + * @memberof binlogdata.VStreamRowsResponse * @static - * @param {query.IBoundQuery} message BoundQuery message or plain object to encode + * @param {binlogdata.IVStreamRowsResponse} message VStreamRowsResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - BoundQuery.encodeDelimited = function encodeDelimited(message, writer) { + VStreamRowsResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a BoundQuery message from the specified reader or buffer. + * Decodes a VStreamRowsResponse message from the specified reader or buffer. * @function decode - * @memberof query.BoundQuery + * @memberof binlogdata.VStreamRowsResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.BoundQuery} BoundQuery + * @returns {binlogdata.VStreamRowsResponse} VStreamRowsResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - BoundQuery.decode = function decode(reader, length) { + VStreamRowsResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.BoundQuery(), key, value; + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.VStreamRowsResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.sql = reader.string(); + if (!(message.fields && message.fields.length)) + message.fields = []; + message.fields.push($root.query.Field.decode(reader, reader.uint32())); break; } case 2: { - if (message.bind_variables === $util.emptyObject) - message.bind_variables = {}; - let end2 = reader.uint32() + reader.pos; - key = ""; - value = null; - while (reader.pos < end2) { - let tag2 = reader.uint32(); - switch (tag2 >>> 3) { - case 1: - key = reader.string(); - break; - case 2: - value = $root.query.BindVariable.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag2 & 7); - break; - } - } - message.bind_variables[key] = value; + if (!(message.pkfields && message.pkfields.length)) + message.pkfields = []; + message.pkfields.push($root.query.Field.decode(reader, reader.uint32())); + break; + } + case 3: { + message.gtid = reader.string(); + break; + } + case 4: { + if (!(message.rows && message.rows.length)) + message.rows = []; + message.rows.push($root.query.Row.decode(reader, reader.uint32())); + break; + } + case 5: { + message.lastpk = $root.query.Row.decode(reader, reader.uint32()); + break; + } + case 6: { + message.throttled = reader.bool(); + break; + } + case 7: { + message.heartbeat = reader.bool(); break; } default: @@ -73325,163 +74599,231 @@ export const query = $root.query = (() => { }; /** - * Decodes a BoundQuery message from the specified reader or buffer, length delimited. + * Decodes a VStreamRowsResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.BoundQuery + * @memberof binlogdata.VStreamRowsResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.BoundQuery} BoundQuery + * @returns {binlogdata.VStreamRowsResponse} VStreamRowsResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - BoundQuery.decodeDelimited = function decodeDelimited(reader) { + VStreamRowsResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a BoundQuery message. + * Verifies a VStreamRowsResponse message. * @function verify - * @memberof query.BoundQuery + * @memberof binlogdata.VStreamRowsResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - BoundQuery.verify = function verify(message) { + VStreamRowsResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.sql != null && message.hasOwnProperty("sql")) - if (!$util.isString(message.sql)) - return "sql: string expected"; - if (message.bind_variables != null && message.hasOwnProperty("bind_variables")) { - if (!$util.isObject(message.bind_variables)) - return "bind_variables: object expected"; - let key = Object.keys(message.bind_variables); - for (let i = 0; i < key.length; ++i) { - let error = $root.query.BindVariable.verify(message.bind_variables[key[i]]); + if (message.fields != null && message.hasOwnProperty("fields")) { + if (!Array.isArray(message.fields)) + return "fields: array expected"; + for (let i = 0; i < message.fields.length; ++i) { + let error = $root.query.Field.verify(message.fields[i]); if (error) - return "bind_variables." + error; + return "fields." + error; + } + } + if (message.pkfields != null && message.hasOwnProperty("pkfields")) { + if (!Array.isArray(message.pkfields)) + return "pkfields: array expected"; + for (let i = 0; i < message.pkfields.length; ++i) { + let error = $root.query.Field.verify(message.pkfields[i]); + if (error) + return "pkfields." + error; + } + } + if (message.gtid != null && message.hasOwnProperty("gtid")) + if (!$util.isString(message.gtid)) + return "gtid: string expected"; + if (message.rows != null && message.hasOwnProperty("rows")) { + if (!Array.isArray(message.rows)) + return "rows: array expected"; + for (let i = 0; i < message.rows.length; ++i) { + let error = $root.query.Row.verify(message.rows[i]); + if (error) + return "rows." + error; } } + if (message.lastpk != null && message.hasOwnProperty("lastpk")) { + let error = $root.query.Row.verify(message.lastpk); + if (error) + return "lastpk." + error; + } + if (message.throttled != null && message.hasOwnProperty("throttled")) + if (typeof message.throttled !== "boolean") + return "throttled: boolean expected"; + if (message.heartbeat != null && message.hasOwnProperty("heartbeat")) + if (typeof message.heartbeat !== "boolean") + return "heartbeat: boolean expected"; return null; }; /** - * Creates a BoundQuery message from a plain object. Also converts values to their respective internal types. + * Creates a VStreamRowsResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.BoundQuery + * @memberof binlogdata.VStreamRowsResponse * @static * @param {Object.} object Plain object - * @returns {query.BoundQuery} BoundQuery + * @returns {binlogdata.VStreamRowsResponse} VStreamRowsResponse */ - BoundQuery.fromObject = function fromObject(object) { - if (object instanceof $root.query.BoundQuery) + VStreamRowsResponse.fromObject = function fromObject(object) { + if (object instanceof $root.binlogdata.VStreamRowsResponse) return object; - let message = new $root.query.BoundQuery(); - if (object.sql != null) - message.sql = String(object.sql); - if (object.bind_variables) { - if (typeof object.bind_variables !== "object") - throw TypeError(".query.BoundQuery.bind_variables: object expected"); - message.bind_variables = {}; - for (let keys = Object.keys(object.bind_variables), i = 0; i < keys.length; ++i) { - if (typeof object.bind_variables[keys[i]] !== "object") - throw TypeError(".query.BoundQuery.bind_variables: object expected"); - message.bind_variables[keys[i]] = $root.query.BindVariable.fromObject(object.bind_variables[keys[i]]); + let message = new $root.binlogdata.VStreamRowsResponse(); + if (object.fields) { + if (!Array.isArray(object.fields)) + throw TypeError(".binlogdata.VStreamRowsResponse.fields: array expected"); + message.fields = []; + for (let i = 0; i < object.fields.length; ++i) { + if (typeof object.fields[i] !== "object") + throw TypeError(".binlogdata.VStreamRowsResponse.fields: object expected"); + message.fields[i] = $root.query.Field.fromObject(object.fields[i]); + } + } + if (object.pkfields) { + if (!Array.isArray(object.pkfields)) + throw TypeError(".binlogdata.VStreamRowsResponse.pkfields: array expected"); + message.pkfields = []; + for (let i = 0; i < object.pkfields.length; ++i) { + if (typeof object.pkfields[i] !== "object") + throw TypeError(".binlogdata.VStreamRowsResponse.pkfields: object expected"); + message.pkfields[i] = $root.query.Field.fromObject(object.pkfields[i]); + } + } + if (object.gtid != null) + message.gtid = String(object.gtid); + if (object.rows) { + if (!Array.isArray(object.rows)) + throw TypeError(".binlogdata.VStreamRowsResponse.rows: array expected"); + message.rows = []; + for (let i = 0; i < object.rows.length; ++i) { + if (typeof object.rows[i] !== "object") + throw TypeError(".binlogdata.VStreamRowsResponse.rows: object expected"); + message.rows[i] = $root.query.Row.fromObject(object.rows[i]); } } + if (object.lastpk != null) { + if (typeof object.lastpk !== "object") + throw TypeError(".binlogdata.VStreamRowsResponse.lastpk: object expected"); + message.lastpk = $root.query.Row.fromObject(object.lastpk); + } + if (object.throttled != null) + message.throttled = Boolean(object.throttled); + if (object.heartbeat != null) + message.heartbeat = Boolean(object.heartbeat); return message; }; /** - * Creates a plain object from a BoundQuery message. Also converts values to other types if specified. + * Creates a plain object from a VStreamRowsResponse message. Also converts values to other types if specified. * @function toObject - * @memberof query.BoundQuery + * @memberof binlogdata.VStreamRowsResponse * @static - * @param {query.BoundQuery} message BoundQuery + * @param {binlogdata.VStreamRowsResponse} message VStreamRowsResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - BoundQuery.toObject = function toObject(message, options) { + VStreamRowsResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.objects || options.defaults) - object.bind_variables = {}; - if (options.defaults) - object.sql = ""; - if (message.sql != null && message.hasOwnProperty("sql")) - object.sql = message.sql; - let keys2; - if (message.bind_variables && (keys2 = Object.keys(message.bind_variables)).length) { - object.bind_variables = {}; - for (let j = 0; j < keys2.length; ++j) - object.bind_variables[keys2[j]] = $root.query.BindVariable.toObject(message.bind_variables[keys2[j]], options); + if (options.arrays || options.defaults) { + object.fields = []; + object.pkfields = []; + object.rows = []; + } + if (options.defaults) { + object.gtid = ""; + object.lastpk = null; + object.throttled = false; + object.heartbeat = false; + } + if (message.fields && message.fields.length) { + object.fields = []; + for (let j = 0; j < message.fields.length; ++j) + object.fields[j] = $root.query.Field.toObject(message.fields[j], options); + } + if (message.pkfields && message.pkfields.length) { + object.pkfields = []; + for (let j = 0; j < message.pkfields.length; ++j) + object.pkfields[j] = $root.query.Field.toObject(message.pkfields[j], options); + } + if (message.gtid != null && message.hasOwnProperty("gtid")) + object.gtid = message.gtid; + if (message.rows && message.rows.length) { + object.rows = []; + for (let j = 0; j < message.rows.length; ++j) + object.rows[j] = $root.query.Row.toObject(message.rows[j], options); } + if (message.lastpk != null && message.hasOwnProperty("lastpk")) + object.lastpk = $root.query.Row.toObject(message.lastpk, options); + if (message.throttled != null && message.hasOwnProperty("throttled")) + object.throttled = message.throttled; + if (message.heartbeat != null && message.hasOwnProperty("heartbeat")) + object.heartbeat = message.heartbeat; return object; }; /** - * Converts this BoundQuery to JSON. + * Converts this VStreamRowsResponse to JSON. * @function toJSON - * @memberof query.BoundQuery + * @memberof binlogdata.VStreamRowsResponse * @instance * @returns {Object.} JSON object */ - BoundQuery.prototype.toJSON = function toJSON() { + VStreamRowsResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for BoundQuery + * Gets the default type url for VStreamRowsResponse * @function getTypeUrl - * @memberof query.BoundQuery + * @memberof binlogdata.VStreamRowsResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - BoundQuery.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + VStreamRowsResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.BoundQuery"; + return typeUrlPrefix + "/binlogdata.VStreamRowsResponse"; }; - return BoundQuery; + return VStreamRowsResponse; })(); - query.ExecuteOptions = (function() { + binlogdata.VStreamTablesRequest = (function() { /** - * Properties of an ExecuteOptions. - * @memberof query - * @interface IExecuteOptions - * @property {query.ExecuteOptions.IncludedFields|null} [included_fields] ExecuteOptions included_fields - * @property {boolean|null} [client_found_rows] ExecuteOptions client_found_rows - * @property {query.ExecuteOptions.Workload|null} [workload] ExecuteOptions workload - * @property {number|Long|null} [sql_select_limit] ExecuteOptions sql_select_limit - * @property {query.ExecuteOptions.TransactionIsolation|null} [transaction_isolation] ExecuteOptions transaction_isolation - * @property {boolean|null} [skip_query_plan_cache] ExecuteOptions skip_query_plan_cache - * @property {query.ExecuteOptions.PlannerVersion|null} [planner_version] ExecuteOptions planner_version - * @property {boolean|null} [has_created_temp_tables] ExecuteOptions has_created_temp_tables - * @property {query.ExecuteOptions.Consolidator|null} [consolidator] ExecuteOptions consolidator - * @property {Array.|null} [transaction_access_mode] ExecuteOptions transaction_access_mode - * @property {string|null} [WorkloadName] ExecuteOptions WorkloadName - * @property {string|null} [priority] ExecuteOptions priority - * @property {string|null} [uag_info] ExecuteOptions uag_info + * Properties of a VStreamTablesRequest. + * @memberof binlogdata + * @interface IVStreamTablesRequest + * @property {vtrpc.ICallerID|null} [effective_caller_id] VStreamTablesRequest effective_caller_id + * @property {query.IVTGateCallerID|null} [immediate_caller_id] VStreamTablesRequest immediate_caller_id + * @property {query.ITarget|null} [target] VStreamTablesRequest target */ /** - * Constructs a new ExecuteOptions. - * @memberof query - * @classdesc Represents an ExecuteOptions. - * @implements IExecuteOptions + * Constructs a new VStreamTablesRequest. + * @memberof binlogdata + * @classdesc Represents a VStreamTablesRequest. + * @implements IVStreamTablesRequest * @constructor - * @param {query.IExecuteOptions=} [properties] Properties to set + * @param {binlogdata.IVStreamTablesRequest=} [properties] Properties to set */ - function ExecuteOptions(properties) { - this.transaction_access_mode = []; + function VStreamTablesRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -73489,254 +74831,425 @@ export const query = $root.query = (() => { } /** - * ExecuteOptions included_fields. - * @member {query.ExecuteOptions.IncludedFields} included_fields - * @memberof query.ExecuteOptions - * @instance - */ - ExecuteOptions.prototype.included_fields = 0; - - /** - * ExecuteOptions client_found_rows. - * @member {boolean} client_found_rows - * @memberof query.ExecuteOptions - * @instance - */ - ExecuteOptions.prototype.client_found_rows = false; - - /** - * ExecuteOptions workload. - * @member {query.ExecuteOptions.Workload} workload - * @memberof query.ExecuteOptions + * VStreamTablesRequest effective_caller_id. + * @member {vtrpc.ICallerID|null|undefined} effective_caller_id + * @memberof binlogdata.VStreamTablesRequest * @instance */ - ExecuteOptions.prototype.workload = 0; + VStreamTablesRequest.prototype.effective_caller_id = null; /** - * ExecuteOptions sql_select_limit. - * @member {number|Long} sql_select_limit - * @memberof query.ExecuteOptions + * VStreamTablesRequest immediate_caller_id. + * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id + * @memberof binlogdata.VStreamTablesRequest * @instance */ - ExecuteOptions.prototype.sql_select_limit = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + VStreamTablesRequest.prototype.immediate_caller_id = null; /** - * ExecuteOptions transaction_isolation. - * @member {query.ExecuteOptions.TransactionIsolation} transaction_isolation - * @memberof query.ExecuteOptions + * VStreamTablesRequest target. + * @member {query.ITarget|null|undefined} target + * @memberof binlogdata.VStreamTablesRequest * @instance */ - ExecuteOptions.prototype.transaction_isolation = 0; + VStreamTablesRequest.prototype.target = null; /** - * ExecuteOptions skip_query_plan_cache. - * @member {boolean} skip_query_plan_cache - * @memberof query.ExecuteOptions - * @instance + * Creates a new VStreamTablesRequest instance using the specified properties. + * @function create + * @memberof binlogdata.VStreamTablesRequest + * @static + * @param {binlogdata.IVStreamTablesRequest=} [properties] Properties to set + * @returns {binlogdata.VStreamTablesRequest} VStreamTablesRequest instance */ - ExecuteOptions.prototype.skip_query_plan_cache = false; + VStreamTablesRequest.create = function create(properties) { + return new VStreamTablesRequest(properties); + }; /** - * ExecuteOptions planner_version. - * @member {query.ExecuteOptions.PlannerVersion} planner_version - * @memberof query.ExecuteOptions - * @instance + * Encodes the specified VStreamTablesRequest message. Does not implicitly {@link binlogdata.VStreamTablesRequest.verify|verify} messages. + * @function encode + * @memberof binlogdata.VStreamTablesRequest + * @static + * @param {binlogdata.IVStreamTablesRequest} message VStreamTablesRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer */ - ExecuteOptions.prototype.planner_version = 0; + VStreamTablesRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) + $root.vtrpc.CallerID.encode(message.effective_caller_id, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.immediate_caller_id != null && Object.hasOwnProperty.call(message, "immediate_caller_id")) + $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.target != null && Object.hasOwnProperty.call(message, "target")) + $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + return writer; + }; /** - * ExecuteOptions has_created_temp_tables. - * @member {boolean} has_created_temp_tables - * @memberof query.ExecuteOptions - * @instance + * Encodes the specified VStreamTablesRequest message, length delimited. Does not implicitly {@link binlogdata.VStreamTablesRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof binlogdata.VStreamTablesRequest + * @static + * @param {binlogdata.IVStreamTablesRequest} message VStreamTablesRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer */ - ExecuteOptions.prototype.has_created_temp_tables = false; + VStreamTablesRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; /** - * ExecuteOptions consolidator. - * @member {query.ExecuteOptions.Consolidator} consolidator - * @memberof query.ExecuteOptions - * @instance + * Decodes a VStreamTablesRequest message from the specified reader or buffer. + * @function decode + * @memberof binlogdata.VStreamTablesRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {binlogdata.VStreamTablesRequest} VStreamTablesRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ExecuteOptions.prototype.consolidator = 0; - - /** - * ExecuteOptions transaction_access_mode. - * @member {Array.} transaction_access_mode - * @memberof query.ExecuteOptions + VStreamTablesRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.VStreamTablesRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.effective_caller_id = $root.vtrpc.CallerID.decode(reader, reader.uint32()); + break; + } + case 2: { + message.immediate_caller_id = $root.query.VTGateCallerID.decode(reader, reader.uint32()); + break; + } + case 3: { + message.target = $root.query.Target.decode(reader, reader.uint32()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a VStreamTablesRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof binlogdata.VStreamTablesRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {binlogdata.VStreamTablesRequest} VStreamTablesRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + VStreamTablesRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a VStreamTablesRequest message. + * @function verify + * @memberof binlogdata.VStreamTablesRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + VStreamTablesRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { + let error = $root.vtrpc.CallerID.verify(message.effective_caller_id); + if (error) + return "effective_caller_id." + error; + } + if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) { + let error = $root.query.VTGateCallerID.verify(message.immediate_caller_id); + if (error) + return "immediate_caller_id." + error; + } + if (message.target != null && message.hasOwnProperty("target")) { + let error = $root.query.Target.verify(message.target); + if (error) + return "target." + error; + } + return null; + }; + + /** + * Creates a VStreamTablesRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof binlogdata.VStreamTablesRequest + * @static + * @param {Object.} object Plain object + * @returns {binlogdata.VStreamTablesRequest} VStreamTablesRequest + */ + VStreamTablesRequest.fromObject = function fromObject(object) { + if (object instanceof $root.binlogdata.VStreamTablesRequest) + return object; + let message = new $root.binlogdata.VStreamTablesRequest(); + if (object.effective_caller_id != null) { + if (typeof object.effective_caller_id !== "object") + throw TypeError(".binlogdata.VStreamTablesRequest.effective_caller_id: object expected"); + message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); + } + if (object.immediate_caller_id != null) { + if (typeof object.immediate_caller_id !== "object") + throw TypeError(".binlogdata.VStreamTablesRequest.immediate_caller_id: object expected"); + message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); + } + if (object.target != null) { + if (typeof object.target !== "object") + throw TypeError(".binlogdata.VStreamTablesRequest.target: object expected"); + message.target = $root.query.Target.fromObject(object.target); + } + return message; + }; + + /** + * Creates a plain object from a VStreamTablesRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof binlogdata.VStreamTablesRequest + * @static + * @param {binlogdata.VStreamTablesRequest} message VStreamTablesRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + VStreamTablesRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.effective_caller_id = null; + object.immediate_caller_id = null; + object.target = null; + } + if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) + object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); + if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) + object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); + if (message.target != null && message.hasOwnProperty("target")) + object.target = $root.query.Target.toObject(message.target, options); + return object; + }; + + /** + * Converts this VStreamTablesRequest to JSON. + * @function toJSON + * @memberof binlogdata.VStreamTablesRequest * @instance + * @returns {Object.} JSON object */ - ExecuteOptions.prototype.transaction_access_mode = $util.emptyArray; + VStreamTablesRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; /** - * ExecuteOptions WorkloadName. - * @member {string} WorkloadName - * @memberof query.ExecuteOptions + * Gets the default type url for VStreamTablesRequest + * @function getTypeUrl + * @memberof binlogdata.VStreamTablesRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + VStreamTablesRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/binlogdata.VStreamTablesRequest"; + }; + + return VStreamTablesRequest; + })(); + + binlogdata.VStreamTablesResponse = (function() { + + /** + * Properties of a VStreamTablesResponse. + * @memberof binlogdata + * @interface IVStreamTablesResponse + * @property {string|null} [table_name] VStreamTablesResponse table_name + * @property {Array.|null} [fields] VStreamTablesResponse fields + * @property {Array.|null} [pkfields] VStreamTablesResponse pkfields + * @property {string|null} [gtid] VStreamTablesResponse gtid + * @property {Array.|null} [rows] VStreamTablesResponse rows + * @property {query.IRow|null} [lastpk] VStreamTablesResponse lastpk + */ + + /** + * Constructs a new VStreamTablesResponse. + * @memberof binlogdata + * @classdesc Represents a VStreamTablesResponse. + * @implements IVStreamTablesResponse + * @constructor + * @param {binlogdata.IVStreamTablesResponse=} [properties] Properties to set + */ + function VStreamTablesResponse(properties) { + this.fields = []; + this.pkfields = []; + this.rows = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * VStreamTablesResponse table_name. + * @member {string} table_name + * @memberof binlogdata.VStreamTablesResponse * @instance */ - ExecuteOptions.prototype.WorkloadName = ""; + VStreamTablesResponse.prototype.table_name = ""; /** - * ExecuteOptions priority. - * @member {string} priority - * @memberof query.ExecuteOptions + * VStreamTablesResponse fields. + * @member {Array.} fields + * @memberof binlogdata.VStreamTablesResponse * @instance */ - ExecuteOptions.prototype.priority = ""; + VStreamTablesResponse.prototype.fields = $util.emptyArray; /** - * ExecuteOptions uag_info. - * @member {string} uag_info - * @memberof query.ExecuteOptions + * VStreamTablesResponse pkfields. + * @member {Array.} pkfields + * @memberof binlogdata.VStreamTablesResponse * @instance */ - ExecuteOptions.prototype.uag_info = ""; + VStreamTablesResponse.prototype.pkfields = $util.emptyArray; /** - * Creates a new ExecuteOptions instance using the specified properties. + * VStreamTablesResponse gtid. + * @member {string} gtid + * @memberof binlogdata.VStreamTablesResponse + * @instance + */ + VStreamTablesResponse.prototype.gtid = ""; + + /** + * VStreamTablesResponse rows. + * @member {Array.} rows + * @memberof binlogdata.VStreamTablesResponse + * @instance + */ + VStreamTablesResponse.prototype.rows = $util.emptyArray; + + /** + * VStreamTablesResponse lastpk. + * @member {query.IRow|null|undefined} lastpk + * @memberof binlogdata.VStreamTablesResponse + * @instance + */ + VStreamTablesResponse.prototype.lastpk = null; + + /** + * Creates a new VStreamTablesResponse instance using the specified properties. * @function create - * @memberof query.ExecuteOptions + * @memberof binlogdata.VStreamTablesResponse * @static - * @param {query.IExecuteOptions=} [properties] Properties to set - * @returns {query.ExecuteOptions} ExecuteOptions instance + * @param {binlogdata.IVStreamTablesResponse=} [properties] Properties to set + * @returns {binlogdata.VStreamTablesResponse} VStreamTablesResponse instance */ - ExecuteOptions.create = function create(properties) { - return new ExecuteOptions(properties); + VStreamTablesResponse.create = function create(properties) { + return new VStreamTablesResponse(properties); }; /** - * Encodes the specified ExecuteOptions message. Does not implicitly {@link query.ExecuteOptions.verify|verify} messages. + * Encodes the specified VStreamTablesResponse message. Does not implicitly {@link binlogdata.VStreamTablesResponse.verify|verify} messages. * @function encode - * @memberof query.ExecuteOptions + * @memberof binlogdata.VStreamTablesResponse * @static - * @param {query.IExecuteOptions} message ExecuteOptions message or plain object to encode + * @param {binlogdata.IVStreamTablesResponse} message VStreamTablesResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ExecuteOptions.encode = function encode(message, writer) { + VStreamTablesResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.included_fields != null && Object.hasOwnProperty.call(message, "included_fields")) - writer.uint32(/* id 4, wireType 0 =*/32).int32(message.included_fields); - if (message.client_found_rows != null && Object.hasOwnProperty.call(message, "client_found_rows")) - writer.uint32(/* id 5, wireType 0 =*/40).bool(message.client_found_rows); - if (message.workload != null && Object.hasOwnProperty.call(message, "workload")) - writer.uint32(/* id 6, wireType 0 =*/48).int32(message.workload); - if (message.sql_select_limit != null && Object.hasOwnProperty.call(message, "sql_select_limit")) - writer.uint32(/* id 8, wireType 0 =*/64).int64(message.sql_select_limit); - if (message.transaction_isolation != null && Object.hasOwnProperty.call(message, "transaction_isolation")) - writer.uint32(/* id 9, wireType 0 =*/72).int32(message.transaction_isolation); - if (message.skip_query_plan_cache != null && Object.hasOwnProperty.call(message, "skip_query_plan_cache")) - writer.uint32(/* id 10, wireType 0 =*/80).bool(message.skip_query_plan_cache); - if (message.planner_version != null && Object.hasOwnProperty.call(message, "planner_version")) - writer.uint32(/* id 11, wireType 0 =*/88).int32(message.planner_version); - if (message.has_created_temp_tables != null && Object.hasOwnProperty.call(message, "has_created_temp_tables")) - writer.uint32(/* id 12, wireType 0 =*/96).bool(message.has_created_temp_tables); - if (message.consolidator != null && Object.hasOwnProperty.call(message, "consolidator")) - writer.uint32(/* id 13, wireType 0 =*/104).int32(message.consolidator); - if (message.transaction_access_mode != null && message.transaction_access_mode.length) { - writer.uint32(/* id 14, wireType 2 =*/114).fork(); - for (let i = 0; i < message.transaction_access_mode.length; ++i) - writer.int32(message.transaction_access_mode[i]); - writer.ldelim(); - } - if (message.WorkloadName != null && Object.hasOwnProperty.call(message, "WorkloadName")) - writer.uint32(/* id 15, wireType 2 =*/122).string(message.WorkloadName); - if (message.priority != null && Object.hasOwnProperty.call(message, "priority")) - writer.uint32(/* id 16, wireType 2 =*/130).string(message.priority); - if (message.uag_info != null && Object.hasOwnProperty.call(message, "uag_info")) - writer.uint32(/* id 88, wireType 2 =*/706).string(message.uag_info); + if (message.table_name != null && Object.hasOwnProperty.call(message, "table_name")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.table_name); + if (message.fields != null && message.fields.length) + for (let i = 0; i < message.fields.length; ++i) + $root.query.Field.encode(message.fields[i], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.pkfields != null && message.pkfields.length) + for (let i = 0; i < message.pkfields.length; ++i) + $root.query.Field.encode(message.pkfields[i], writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.gtid != null && Object.hasOwnProperty.call(message, "gtid")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.gtid); + if (message.rows != null && message.rows.length) + for (let i = 0; i < message.rows.length; ++i) + $root.query.Row.encode(message.rows[i], writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); + if (message.lastpk != null && Object.hasOwnProperty.call(message, "lastpk")) + $root.query.Row.encode(message.lastpk, writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); return writer; }; /** - * Encodes the specified ExecuteOptions message, length delimited. Does not implicitly {@link query.ExecuteOptions.verify|verify} messages. + * Encodes the specified VStreamTablesResponse message, length delimited. Does not implicitly {@link binlogdata.VStreamTablesResponse.verify|verify} messages. * @function encodeDelimited - * @memberof query.ExecuteOptions + * @memberof binlogdata.VStreamTablesResponse * @static - * @param {query.IExecuteOptions} message ExecuteOptions message or plain object to encode + * @param {binlogdata.IVStreamTablesResponse} message VStreamTablesResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ExecuteOptions.encodeDelimited = function encodeDelimited(message, writer) { + VStreamTablesResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an ExecuteOptions message from the specified reader or buffer. + * Decodes a VStreamTablesResponse message from the specified reader or buffer. * @function decode - * @memberof query.ExecuteOptions + * @memberof binlogdata.VStreamTablesResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.ExecuteOptions} ExecuteOptions + * @returns {binlogdata.VStreamTablesResponse} VStreamTablesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ExecuteOptions.decode = function decode(reader, length) { + VStreamTablesResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.ExecuteOptions(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.VStreamTablesResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 4: { - message.included_fields = reader.int32(); - break; - } - case 5: { - message.client_found_rows = reader.bool(); - break; - } - case 6: { - message.workload = reader.int32(); - break; - } - case 8: { - message.sql_select_limit = reader.int64(); - break; - } - case 9: { - message.transaction_isolation = reader.int32(); - break; - } - case 10: { - message.skip_query_plan_cache = reader.bool(); - break; - } - case 11: { - message.planner_version = reader.int32(); - break; - } - case 12: { - message.has_created_temp_tables = reader.bool(); + case 1: { + message.table_name = reader.string(); break; } - case 13: { - message.consolidator = reader.int32(); + case 2: { + if (!(message.fields && message.fields.length)) + message.fields = []; + message.fields.push($root.query.Field.decode(reader, reader.uint32())); break; } - case 14: { - if (!(message.transaction_access_mode && message.transaction_access_mode.length)) - message.transaction_access_mode = []; - if ((tag & 7) === 2) { - let end2 = reader.uint32() + reader.pos; - while (reader.pos < end2) - message.transaction_access_mode.push(reader.int32()); - } else - message.transaction_access_mode.push(reader.int32()); + case 3: { + if (!(message.pkfields && message.pkfields.length)) + message.pkfields = []; + message.pkfields.push($root.query.Field.decode(reader, reader.uint32())); break; } - case 15: { - message.WorkloadName = reader.string(); + case 4: { + message.gtid = reader.string(); break; } - case 16: { - message.priority = reader.string(); + case 5: { + if (!(message.rows && message.rows.length)) + message.rows = []; + message.rows.push($root.query.Row.decode(reader, reader.uint32())); break; } - case 88: { - message.uag_info = reader.string(); + case 6: { + message.lastpk = $root.query.Row.decode(reader, reader.uint32()); break; } default: @@ -73748,573 +75261,222 @@ export const query = $root.query = (() => { }; /** - * Decodes an ExecuteOptions message from the specified reader or buffer, length delimited. + * Decodes a VStreamTablesResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.ExecuteOptions + * @memberof binlogdata.VStreamTablesResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.ExecuteOptions} ExecuteOptions + * @returns {binlogdata.VStreamTablesResponse} VStreamTablesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ExecuteOptions.decodeDelimited = function decodeDelimited(reader) { + VStreamTablesResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an ExecuteOptions message. + * Verifies a VStreamTablesResponse message. * @function verify - * @memberof query.ExecuteOptions + * @memberof binlogdata.VStreamTablesResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ExecuteOptions.verify = function verify(message) { + VStreamTablesResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.included_fields != null && message.hasOwnProperty("included_fields")) - switch (message.included_fields) { - default: - return "included_fields: enum value expected"; - case 0: - case 1: - case 2: - break; - } - if (message.client_found_rows != null && message.hasOwnProperty("client_found_rows")) - if (typeof message.client_found_rows !== "boolean") - return "client_found_rows: boolean expected"; - if (message.workload != null && message.hasOwnProperty("workload")) - switch (message.workload) { - default: - return "workload: enum value expected"; - case 0: - case 1: - case 2: - case 3: - break; - } - if (message.sql_select_limit != null && message.hasOwnProperty("sql_select_limit")) - if (!$util.isInteger(message.sql_select_limit) && !(message.sql_select_limit && $util.isInteger(message.sql_select_limit.low) && $util.isInteger(message.sql_select_limit.high))) - return "sql_select_limit: integer|Long expected"; - if (message.transaction_isolation != null && message.hasOwnProperty("transaction_isolation")) - switch (message.transaction_isolation) { - default: - return "transaction_isolation: enum value expected"; - case 0: - case 1: - case 2: - case 3: - case 4: - case 5: - case 6: - break; + if (message.table_name != null && message.hasOwnProperty("table_name")) + if (!$util.isString(message.table_name)) + return "table_name: string expected"; + if (message.fields != null && message.hasOwnProperty("fields")) { + if (!Array.isArray(message.fields)) + return "fields: array expected"; + for (let i = 0; i < message.fields.length; ++i) { + let error = $root.query.Field.verify(message.fields[i]); + if (error) + return "fields." + error; } - if (message.skip_query_plan_cache != null && message.hasOwnProperty("skip_query_plan_cache")) - if (typeof message.skip_query_plan_cache !== "boolean") - return "skip_query_plan_cache: boolean expected"; - if (message.planner_version != null && message.hasOwnProperty("planner_version")) - switch (message.planner_version) { - default: - return "planner_version: enum value expected"; - case 0: - case 1: - case 2: - case 3: - case 4: - case 5: - case 6: - case 7: - break; + } + if (message.pkfields != null && message.hasOwnProperty("pkfields")) { + if (!Array.isArray(message.pkfields)) + return "pkfields: array expected"; + for (let i = 0; i < message.pkfields.length; ++i) { + let error = $root.query.Field.verify(message.pkfields[i]); + if (error) + return "pkfields." + error; } - if (message.has_created_temp_tables != null && message.hasOwnProperty("has_created_temp_tables")) - if (typeof message.has_created_temp_tables !== "boolean") - return "has_created_temp_tables: boolean expected"; - if (message.consolidator != null && message.hasOwnProperty("consolidator")) - switch (message.consolidator) { - default: - return "consolidator: enum value expected"; - case 0: - case 1: - case 2: - case 3: - break; + } + if (message.gtid != null && message.hasOwnProperty("gtid")) + if (!$util.isString(message.gtid)) + return "gtid: string expected"; + if (message.rows != null && message.hasOwnProperty("rows")) { + if (!Array.isArray(message.rows)) + return "rows: array expected"; + for (let i = 0; i < message.rows.length; ++i) { + let error = $root.query.Row.verify(message.rows[i]); + if (error) + return "rows." + error; } - if (message.transaction_access_mode != null && message.hasOwnProperty("transaction_access_mode")) { - if (!Array.isArray(message.transaction_access_mode)) - return "transaction_access_mode: array expected"; - for (let i = 0; i < message.transaction_access_mode.length; ++i) - switch (message.transaction_access_mode[i]) { - default: - return "transaction_access_mode: enum value[] expected"; - case 0: - case 1: - case 2: - break; - } } - if (message.WorkloadName != null && message.hasOwnProperty("WorkloadName")) - if (!$util.isString(message.WorkloadName)) - return "WorkloadName: string expected"; - if (message.priority != null && message.hasOwnProperty("priority")) - if (!$util.isString(message.priority)) - return "priority: string expected"; - if (message.uag_info != null && message.hasOwnProperty("uag_info")) - if (!$util.isString(message.uag_info)) - return "uag_info: string expected"; + if (message.lastpk != null && message.hasOwnProperty("lastpk")) { + let error = $root.query.Row.verify(message.lastpk); + if (error) + return "lastpk." + error; + } return null; }; /** - * Creates an ExecuteOptions message from a plain object. Also converts values to their respective internal types. + * Creates a VStreamTablesResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.ExecuteOptions + * @memberof binlogdata.VStreamTablesResponse * @static * @param {Object.} object Plain object - * @returns {query.ExecuteOptions} ExecuteOptions + * @returns {binlogdata.VStreamTablesResponse} VStreamTablesResponse */ - ExecuteOptions.fromObject = function fromObject(object) { - if (object instanceof $root.query.ExecuteOptions) + VStreamTablesResponse.fromObject = function fromObject(object) { + if (object instanceof $root.binlogdata.VStreamTablesResponse) return object; - let message = new $root.query.ExecuteOptions(); - switch (object.included_fields) { - default: - if (typeof object.included_fields === "number") { - message.included_fields = object.included_fields; - break; + let message = new $root.binlogdata.VStreamTablesResponse(); + if (object.table_name != null) + message.table_name = String(object.table_name); + if (object.fields) { + if (!Array.isArray(object.fields)) + throw TypeError(".binlogdata.VStreamTablesResponse.fields: array expected"); + message.fields = []; + for (let i = 0; i < object.fields.length; ++i) { + if (typeof object.fields[i] !== "object") + throw TypeError(".binlogdata.VStreamTablesResponse.fields: object expected"); + message.fields[i] = $root.query.Field.fromObject(object.fields[i]); } - break; - case "TYPE_AND_NAME": - case 0: - message.included_fields = 0; - break; - case "TYPE_ONLY": - case 1: - message.included_fields = 1; - break; - case "ALL": - case 2: - message.included_fields = 2; - break; } - if (object.client_found_rows != null) - message.client_found_rows = Boolean(object.client_found_rows); - switch (object.workload) { - default: - if (typeof object.workload === "number") { - message.workload = object.workload; - break; + if (object.pkfields) { + if (!Array.isArray(object.pkfields)) + throw TypeError(".binlogdata.VStreamTablesResponse.pkfields: array expected"); + message.pkfields = []; + for (let i = 0; i < object.pkfields.length; ++i) { + if (typeof object.pkfields[i] !== "object") + throw TypeError(".binlogdata.VStreamTablesResponse.pkfields: object expected"); + message.pkfields[i] = $root.query.Field.fromObject(object.pkfields[i]); } - break; - case "UNSPECIFIED": - case 0: - message.workload = 0; - break; - case "OLTP": - case 1: - message.workload = 1; - break; - case "OLAP": - case 2: - message.workload = 2; - break; - case "DBA": - case 3: - message.workload = 3; - break; } - if (object.sql_select_limit != null) - if ($util.Long) - (message.sql_select_limit = $util.Long.fromValue(object.sql_select_limit)).unsigned = false; - else if (typeof object.sql_select_limit === "string") - message.sql_select_limit = parseInt(object.sql_select_limit, 10); - else if (typeof object.sql_select_limit === "number") - message.sql_select_limit = object.sql_select_limit; - else if (typeof object.sql_select_limit === "object") - message.sql_select_limit = new $util.LongBits(object.sql_select_limit.low >>> 0, object.sql_select_limit.high >>> 0).toNumber(); - switch (object.transaction_isolation) { - default: - if (typeof object.transaction_isolation === "number") { - message.transaction_isolation = object.transaction_isolation; - break; + if (object.gtid != null) + message.gtid = String(object.gtid); + if (object.rows) { + if (!Array.isArray(object.rows)) + throw TypeError(".binlogdata.VStreamTablesResponse.rows: array expected"); + message.rows = []; + for (let i = 0; i < object.rows.length; ++i) { + if (typeof object.rows[i] !== "object") + throw TypeError(".binlogdata.VStreamTablesResponse.rows: object expected"); + message.rows[i] = $root.query.Row.fromObject(object.rows[i]); } - break; - case "DEFAULT": - case 0: - message.transaction_isolation = 0; - break; - case "REPEATABLE_READ": - case 1: - message.transaction_isolation = 1; - break; - case "READ_COMMITTED": - case 2: - message.transaction_isolation = 2; - break; - case "READ_UNCOMMITTED": - case 3: - message.transaction_isolation = 3; - break; - case "SERIALIZABLE": - case 4: - message.transaction_isolation = 4; - break; - case "CONSISTENT_SNAPSHOT_READ_ONLY": - case 5: - message.transaction_isolation = 5; - break; - case "AUTOCOMMIT": - case 6: - message.transaction_isolation = 6; - break; - } - if (object.skip_query_plan_cache != null) - message.skip_query_plan_cache = Boolean(object.skip_query_plan_cache); - switch (object.planner_version) { - default: - if (typeof object.planner_version === "number") { - message.planner_version = object.planner_version; - break; - } - break; - case "DEFAULT_PLANNER": - case 0: - message.planner_version = 0; - break; - case "V3": - case 1: - message.planner_version = 1; - break; - case "Gen4": - case 2: - message.planner_version = 2; - break; - case "Gen4Greedy": - case 3: - message.planner_version = 3; - break; - case "Gen4Left2Right": - case 4: - message.planner_version = 4; - break; - case "Gen4WithFallback": - case 5: - message.planner_version = 5; - break; - case "Gen4CompareV3": - case 6: - message.planner_version = 6; - break; - case "V3Insert": - case 7: - message.planner_version = 7; - break; - } - if (object.has_created_temp_tables != null) - message.has_created_temp_tables = Boolean(object.has_created_temp_tables); - switch (object.consolidator) { - default: - if (typeof object.consolidator === "number") { - message.consolidator = object.consolidator; - break; - } - break; - case "CONSOLIDATOR_UNSPECIFIED": - case 0: - message.consolidator = 0; - break; - case "CONSOLIDATOR_DISABLED": - case 1: - message.consolidator = 1; - break; - case "CONSOLIDATOR_ENABLED": - case 2: - message.consolidator = 2; - break; - case "CONSOLIDATOR_ENABLED_REPLICAS": - case 3: - message.consolidator = 3; - break; } - if (object.transaction_access_mode) { - if (!Array.isArray(object.transaction_access_mode)) - throw TypeError(".query.ExecuteOptions.transaction_access_mode: array expected"); - message.transaction_access_mode = []; - for (let i = 0; i < object.transaction_access_mode.length; ++i) - switch (object.transaction_access_mode[i]) { - default: - if (typeof object.transaction_access_mode[i] === "number") { - message.transaction_access_mode[i] = object.transaction_access_mode[i]; - break; - } - case "CONSISTENT_SNAPSHOT": - case 0: - message.transaction_access_mode[i] = 0; - break; - case "READ_WRITE": - case 1: - message.transaction_access_mode[i] = 1; - break; - case "READ_ONLY": - case 2: - message.transaction_access_mode[i] = 2; - break; - } + if (object.lastpk != null) { + if (typeof object.lastpk !== "object") + throw TypeError(".binlogdata.VStreamTablesResponse.lastpk: object expected"); + message.lastpk = $root.query.Row.fromObject(object.lastpk); } - if (object.WorkloadName != null) - message.WorkloadName = String(object.WorkloadName); - if (object.priority != null) - message.priority = String(object.priority); - if (object.uag_info != null) - message.uag_info = String(object.uag_info); return message; }; /** - * Creates a plain object from an ExecuteOptions message. Also converts values to other types if specified. + * Creates a plain object from a VStreamTablesResponse message. Also converts values to other types if specified. * @function toObject - * @memberof query.ExecuteOptions + * @memberof binlogdata.VStreamTablesResponse * @static - * @param {query.ExecuteOptions} message ExecuteOptions + * @param {binlogdata.VStreamTablesResponse} message VStreamTablesResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ExecuteOptions.toObject = function toObject(message, options) { + VStreamTablesResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.transaction_access_mode = []; + if (options.arrays || options.defaults) { + object.fields = []; + object.pkfields = []; + object.rows = []; + } if (options.defaults) { - object.included_fields = options.enums === String ? "TYPE_AND_NAME" : 0; - object.client_found_rows = false; - object.workload = options.enums === String ? "UNSPECIFIED" : 0; - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.sql_select_limit = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.sql_select_limit = options.longs === String ? "0" : 0; - object.transaction_isolation = options.enums === String ? "DEFAULT" : 0; - object.skip_query_plan_cache = false; - object.planner_version = options.enums === String ? "DEFAULT_PLANNER" : 0; - object.has_created_temp_tables = false; - object.consolidator = options.enums === String ? "CONSOLIDATOR_UNSPECIFIED" : 0; - object.WorkloadName = ""; - object.priority = ""; - object.uag_info = ""; + object.table_name = ""; + object.gtid = ""; + object.lastpk = null; } - if (message.included_fields != null && message.hasOwnProperty("included_fields")) - object.included_fields = options.enums === String ? $root.query.ExecuteOptions.IncludedFields[message.included_fields] === undefined ? message.included_fields : $root.query.ExecuteOptions.IncludedFields[message.included_fields] : message.included_fields; - if (message.client_found_rows != null && message.hasOwnProperty("client_found_rows")) - object.client_found_rows = message.client_found_rows; - if (message.workload != null && message.hasOwnProperty("workload")) - object.workload = options.enums === String ? $root.query.ExecuteOptions.Workload[message.workload] === undefined ? message.workload : $root.query.ExecuteOptions.Workload[message.workload] : message.workload; - if (message.sql_select_limit != null && message.hasOwnProperty("sql_select_limit")) - if (typeof message.sql_select_limit === "number") - object.sql_select_limit = options.longs === String ? String(message.sql_select_limit) : message.sql_select_limit; - else - object.sql_select_limit = options.longs === String ? $util.Long.prototype.toString.call(message.sql_select_limit) : options.longs === Number ? new $util.LongBits(message.sql_select_limit.low >>> 0, message.sql_select_limit.high >>> 0).toNumber() : message.sql_select_limit; - if (message.transaction_isolation != null && message.hasOwnProperty("transaction_isolation")) - object.transaction_isolation = options.enums === String ? $root.query.ExecuteOptions.TransactionIsolation[message.transaction_isolation] === undefined ? message.transaction_isolation : $root.query.ExecuteOptions.TransactionIsolation[message.transaction_isolation] : message.transaction_isolation; - if (message.skip_query_plan_cache != null && message.hasOwnProperty("skip_query_plan_cache")) - object.skip_query_plan_cache = message.skip_query_plan_cache; - if (message.planner_version != null && message.hasOwnProperty("planner_version")) - object.planner_version = options.enums === String ? $root.query.ExecuteOptions.PlannerVersion[message.planner_version] === undefined ? message.planner_version : $root.query.ExecuteOptions.PlannerVersion[message.planner_version] : message.planner_version; - if (message.has_created_temp_tables != null && message.hasOwnProperty("has_created_temp_tables")) - object.has_created_temp_tables = message.has_created_temp_tables; - if (message.consolidator != null && message.hasOwnProperty("consolidator")) - object.consolidator = options.enums === String ? $root.query.ExecuteOptions.Consolidator[message.consolidator] === undefined ? message.consolidator : $root.query.ExecuteOptions.Consolidator[message.consolidator] : message.consolidator; - if (message.transaction_access_mode && message.transaction_access_mode.length) { - object.transaction_access_mode = []; - for (let j = 0; j < message.transaction_access_mode.length; ++j) - object.transaction_access_mode[j] = options.enums === String ? $root.query.ExecuteOptions.TransactionAccessMode[message.transaction_access_mode[j]] === undefined ? message.transaction_access_mode[j] : $root.query.ExecuteOptions.TransactionAccessMode[message.transaction_access_mode[j]] : message.transaction_access_mode[j]; + if (message.table_name != null && message.hasOwnProperty("table_name")) + object.table_name = message.table_name; + if (message.fields && message.fields.length) { + object.fields = []; + for (let j = 0; j < message.fields.length; ++j) + object.fields[j] = $root.query.Field.toObject(message.fields[j], options); } - if (message.WorkloadName != null && message.hasOwnProperty("WorkloadName")) - object.WorkloadName = message.WorkloadName; - if (message.priority != null && message.hasOwnProperty("priority")) - object.priority = message.priority; - if (message.uag_info != null && message.hasOwnProperty("uag_info")) - object.uag_info = message.uag_info; + if (message.pkfields && message.pkfields.length) { + object.pkfields = []; + for (let j = 0; j < message.pkfields.length; ++j) + object.pkfields[j] = $root.query.Field.toObject(message.pkfields[j], options); + } + if (message.gtid != null && message.hasOwnProperty("gtid")) + object.gtid = message.gtid; + if (message.rows && message.rows.length) { + object.rows = []; + for (let j = 0; j < message.rows.length; ++j) + object.rows[j] = $root.query.Row.toObject(message.rows[j], options); + } + if (message.lastpk != null && message.hasOwnProperty("lastpk")) + object.lastpk = $root.query.Row.toObject(message.lastpk, options); return object; }; /** - * Converts this ExecuteOptions to JSON. + * Converts this VStreamTablesResponse to JSON. * @function toJSON - * @memberof query.ExecuteOptions + * @memberof binlogdata.VStreamTablesResponse * @instance * @returns {Object.} JSON object */ - ExecuteOptions.prototype.toJSON = function toJSON() { + VStreamTablesResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ExecuteOptions + * Gets the default type url for VStreamTablesResponse * @function getTypeUrl - * @memberof query.ExecuteOptions + * @memberof binlogdata.VStreamTablesResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ExecuteOptions.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + VStreamTablesResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.ExecuteOptions"; + return typeUrlPrefix + "/binlogdata.VStreamTablesResponse"; }; - /** - * IncludedFields enum. - * @name query.ExecuteOptions.IncludedFields - * @enum {number} - * @property {number} TYPE_AND_NAME=0 TYPE_AND_NAME value - * @property {number} TYPE_ONLY=1 TYPE_ONLY value - * @property {number} ALL=2 ALL value - */ - ExecuteOptions.IncludedFields = (function() { - const valuesById = {}, values = Object.create(valuesById); - values[valuesById[0] = "TYPE_AND_NAME"] = 0; - values[valuesById[1] = "TYPE_ONLY"] = 1; - values[valuesById[2] = "ALL"] = 2; - return values; - })(); - - /** - * Workload enum. - * @name query.ExecuteOptions.Workload - * @enum {number} - * @property {number} UNSPECIFIED=0 UNSPECIFIED value - * @property {number} OLTP=1 OLTP value - * @property {number} OLAP=2 OLAP value - * @property {number} DBA=3 DBA value - */ - ExecuteOptions.Workload = (function() { - const valuesById = {}, values = Object.create(valuesById); - values[valuesById[0] = "UNSPECIFIED"] = 0; - values[valuesById[1] = "OLTP"] = 1; - values[valuesById[2] = "OLAP"] = 2; - values[valuesById[3] = "DBA"] = 3; - return values; - })(); - - /** - * TransactionIsolation enum. - * @name query.ExecuteOptions.TransactionIsolation - * @enum {number} - * @property {number} DEFAULT=0 DEFAULT value - * @property {number} REPEATABLE_READ=1 REPEATABLE_READ value - * @property {number} READ_COMMITTED=2 READ_COMMITTED value - * @property {number} READ_UNCOMMITTED=3 READ_UNCOMMITTED value - * @property {number} SERIALIZABLE=4 SERIALIZABLE value - * @property {number} CONSISTENT_SNAPSHOT_READ_ONLY=5 CONSISTENT_SNAPSHOT_READ_ONLY value - * @property {number} AUTOCOMMIT=6 AUTOCOMMIT value - */ - ExecuteOptions.TransactionIsolation = (function() { - const valuesById = {}, values = Object.create(valuesById); - values[valuesById[0] = "DEFAULT"] = 0; - values[valuesById[1] = "REPEATABLE_READ"] = 1; - values[valuesById[2] = "READ_COMMITTED"] = 2; - values[valuesById[3] = "READ_UNCOMMITTED"] = 3; - values[valuesById[4] = "SERIALIZABLE"] = 4; - values[valuesById[5] = "CONSISTENT_SNAPSHOT_READ_ONLY"] = 5; - values[valuesById[6] = "AUTOCOMMIT"] = 6; - return values; - })(); - - /** - * PlannerVersion enum. - * @name query.ExecuteOptions.PlannerVersion - * @enum {number} - * @property {number} DEFAULT_PLANNER=0 DEFAULT_PLANNER value - * @property {number} V3=1 V3 value - * @property {number} Gen4=2 Gen4 value - * @property {number} Gen4Greedy=3 Gen4Greedy value - * @property {number} Gen4Left2Right=4 Gen4Left2Right value - * @property {number} Gen4WithFallback=5 Gen4WithFallback value - * @property {number} Gen4CompareV3=6 Gen4CompareV3 value - * @property {number} V3Insert=7 V3Insert value - */ - ExecuteOptions.PlannerVersion = (function() { - const valuesById = {}, values = Object.create(valuesById); - values[valuesById[0] = "DEFAULT_PLANNER"] = 0; - values[valuesById[1] = "V3"] = 1; - values[valuesById[2] = "Gen4"] = 2; - values[valuesById[3] = "Gen4Greedy"] = 3; - values[valuesById[4] = "Gen4Left2Right"] = 4; - values[valuesById[5] = "Gen4WithFallback"] = 5; - values[valuesById[6] = "Gen4CompareV3"] = 6; - values[valuesById[7] = "V3Insert"] = 7; - return values; - })(); - - /** - * Consolidator enum. - * @name query.ExecuteOptions.Consolidator - * @enum {number} - * @property {number} CONSOLIDATOR_UNSPECIFIED=0 CONSOLIDATOR_UNSPECIFIED value - * @property {number} CONSOLIDATOR_DISABLED=1 CONSOLIDATOR_DISABLED value - * @property {number} CONSOLIDATOR_ENABLED=2 CONSOLIDATOR_ENABLED value - * @property {number} CONSOLIDATOR_ENABLED_REPLICAS=3 CONSOLIDATOR_ENABLED_REPLICAS value - */ - ExecuteOptions.Consolidator = (function() { - const valuesById = {}, values = Object.create(valuesById); - values[valuesById[0] = "CONSOLIDATOR_UNSPECIFIED"] = 0; - values[valuesById[1] = "CONSOLIDATOR_DISABLED"] = 1; - values[valuesById[2] = "CONSOLIDATOR_ENABLED"] = 2; - values[valuesById[3] = "CONSOLIDATOR_ENABLED_REPLICAS"] = 3; - return values; - })(); - - /** - * TransactionAccessMode enum. - * @name query.ExecuteOptions.TransactionAccessMode - * @enum {number} - * @property {number} CONSISTENT_SNAPSHOT=0 CONSISTENT_SNAPSHOT value - * @property {number} READ_WRITE=1 READ_WRITE value - * @property {number} READ_ONLY=2 READ_ONLY value - */ - ExecuteOptions.TransactionAccessMode = (function() { - const valuesById = {}, values = Object.create(valuesById); - values[valuesById[0] = "CONSISTENT_SNAPSHOT"] = 0; - values[valuesById[1] = "READ_WRITE"] = 1; - values[valuesById[2] = "READ_ONLY"] = 2; - return values; - })(); - - return ExecuteOptions; + return VStreamTablesResponse; })(); - query.Field = (function() { + binlogdata.LastPKEvent = (function() { /** - * Properties of a Field. - * @memberof query - * @interface IField - * @property {string|null} [name] Field name - * @property {query.Type|null} [type] Field type - * @property {string|null} [table] Field table - * @property {string|null} [org_table] Field org_table - * @property {string|null} [database] Field database - * @property {string|null} [org_name] Field org_name - * @property {number|null} [column_length] Field column_length - * @property {number|null} [charset] Field charset - * @property {number|null} [decimals] Field decimals - * @property {number|null} [flags] Field flags - * @property {string|null} [column_type] Field column_type + * Properties of a LastPKEvent. + * @memberof binlogdata + * @interface ILastPKEvent + * @property {binlogdata.ITableLastPK|null} [table_last_p_k] LastPKEvent table_last_p_k + * @property {boolean|null} [completed] LastPKEvent completed */ /** - * Constructs a new Field. - * @memberof query - * @classdesc Represents a Field. - * @implements IField + * Constructs a new LastPKEvent. + * @memberof binlogdata + * @classdesc Represents a LastPKEvent. + * @implements ILastPKEvent * @constructor - * @param {query.IField=} [properties] Properties to set + * @param {binlogdata.ILastPKEvent=} [properties] Properties to set */ - function Field(properties) { + function LastPKEvent(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -74322,215 +75484,89 @@ export const query = $root.query = (() => { } /** - * Field name. - * @member {string} name - * @memberof query.Field - * @instance - */ - Field.prototype.name = ""; - - /** - * Field type. - * @member {query.Type} type - * @memberof query.Field - * @instance - */ - Field.prototype.type = 0; - - /** - * Field table. - * @member {string} table - * @memberof query.Field - * @instance - */ - Field.prototype.table = ""; - - /** - * Field org_table. - * @member {string} org_table - * @memberof query.Field - * @instance - */ - Field.prototype.org_table = ""; - - /** - * Field database. - * @member {string} database - * @memberof query.Field - * @instance - */ - Field.prototype.database = ""; - - /** - * Field org_name. - * @member {string} org_name - * @memberof query.Field - * @instance - */ - Field.prototype.org_name = ""; - - /** - * Field column_length. - * @member {number} column_length - * @memberof query.Field - * @instance - */ - Field.prototype.column_length = 0; - - /** - * Field charset. - * @member {number} charset - * @memberof query.Field - * @instance - */ - Field.prototype.charset = 0; - - /** - * Field decimals. - * @member {number} decimals - * @memberof query.Field - * @instance - */ - Field.prototype.decimals = 0; - - /** - * Field flags. - * @member {number} flags - * @memberof query.Field + * LastPKEvent table_last_p_k. + * @member {binlogdata.ITableLastPK|null|undefined} table_last_p_k + * @memberof binlogdata.LastPKEvent * @instance */ - Field.prototype.flags = 0; + LastPKEvent.prototype.table_last_p_k = null; /** - * Field column_type. - * @member {string} column_type - * @memberof query.Field + * LastPKEvent completed. + * @member {boolean} completed + * @memberof binlogdata.LastPKEvent * @instance */ - Field.prototype.column_type = ""; + LastPKEvent.prototype.completed = false; /** - * Creates a new Field instance using the specified properties. + * Creates a new LastPKEvent instance using the specified properties. * @function create - * @memberof query.Field + * @memberof binlogdata.LastPKEvent * @static - * @param {query.IField=} [properties] Properties to set - * @returns {query.Field} Field instance + * @param {binlogdata.ILastPKEvent=} [properties] Properties to set + * @returns {binlogdata.LastPKEvent} LastPKEvent instance */ - Field.create = function create(properties) { - return new Field(properties); + LastPKEvent.create = function create(properties) { + return new LastPKEvent(properties); }; /** - * Encodes the specified Field message. Does not implicitly {@link query.Field.verify|verify} messages. + * Encodes the specified LastPKEvent message. Does not implicitly {@link binlogdata.LastPKEvent.verify|verify} messages. * @function encode - * @memberof query.Field + * @memberof binlogdata.LastPKEvent * @static - * @param {query.IField} message Field message or plain object to encode + * @param {binlogdata.ILastPKEvent} message LastPKEvent message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Field.encode = function encode(message, writer) { + LastPKEvent.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.name != null && Object.hasOwnProperty.call(message, "name")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); - if (message.type != null && Object.hasOwnProperty.call(message, "type")) - writer.uint32(/* id 2, wireType 0 =*/16).int32(message.type); - if (message.table != null && Object.hasOwnProperty.call(message, "table")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.table); - if (message.org_table != null && Object.hasOwnProperty.call(message, "org_table")) - writer.uint32(/* id 4, wireType 2 =*/34).string(message.org_table); - if (message.database != null && Object.hasOwnProperty.call(message, "database")) - writer.uint32(/* id 5, wireType 2 =*/42).string(message.database); - if (message.org_name != null && Object.hasOwnProperty.call(message, "org_name")) - writer.uint32(/* id 6, wireType 2 =*/50).string(message.org_name); - if (message.column_length != null && Object.hasOwnProperty.call(message, "column_length")) - writer.uint32(/* id 7, wireType 0 =*/56).uint32(message.column_length); - if (message.charset != null && Object.hasOwnProperty.call(message, "charset")) - writer.uint32(/* id 8, wireType 0 =*/64).uint32(message.charset); - if (message.decimals != null && Object.hasOwnProperty.call(message, "decimals")) - writer.uint32(/* id 9, wireType 0 =*/72).uint32(message.decimals); - if (message.flags != null && Object.hasOwnProperty.call(message, "flags")) - writer.uint32(/* id 10, wireType 0 =*/80).uint32(message.flags); - if (message.column_type != null && Object.hasOwnProperty.call(message, "column_type")) - writer.uint32(/* id 11, wireType 2 =*/90).string(message.column_type); + if (message.table_last_p_k != null && Object.hasOwnProperty.call(message, "table_last_p_k")) + $root.binlogdata.TableLastPK.encode(message.table_last_p_k, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.completed != null && Object.hasOwnProperty.call(message, "completed")) + writer.uint32(/* id 2, wireType 0 =*/16).bool(message.completed); return writer; }; /** - * Encodes the specified Field message, length delimited. Does not implicitly {@link query.Field.verify|verify} messages. + * Encodes the specified LastPKEvent message, length delimited. Does not implicitly {@link binlogdata.LastPKEvent.verify|verify} messages. * @function encodeDelimited - * @memberof query.Field + * @memberof binlogdata.LastPKEvent * @static - * @param {query.IField} message Field message or plain object to encode + * @param {binlogdata.ILastPKEvent} message LastPKEvent message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Field.encodeDelimited = function encodeDelimited(message, writer) { + LastPKEvent.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a Field message from the specified reader or buffer. + * Decodes a LastPKEvent message from the specified reader or buffer. * @function decode - * @memberof query.Field + * @memberof binlogdata.LastPKEvent * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.Field} Field + * @returns {binlogdata.LastPKEvent} LastPKEvent * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Field.decode = function decode(reader, length) { + LastPKEvent.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.Field(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.LastPKEvent(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.name = reader.string(); + message.table_last_p_k = $root.binlogdata.TableLastPK.decode(reader, reader.uint32()); break; } case 2: { - message.type = reader.int32(); - break; - } - case 3: { - message.table = reader.string(); - break; - } - case 4: { - message.org_table = reader.string(); - break; - } - case 5: { - message.database = reader.string(); - break; - } - case 6: { - message.org_name = reader.string(); - break; - } - case 7: { - message.column_length = reader.uint32(); - break; - } - case 8: { - message.charset = reader.uint32(); - break; - } - case 9: { - message.decimals = reader.uint32(); - break; - } - case 10: { - message.flags = reader.uint32(); - break; - } - case 11: { - message.column_type = reader.string(); + message.completed = reader.bool(); break; } default: @@ -74542,389 +75578,137 @@ export const query = $root.query = (() => { }; /** - * Decodes a Field message from the specified reader or buffer, length delimited. + * Decodes a LastPKEvent message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.Field + * @memberof binlogdata.LastPKEvent * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.Field} Field + * @returns {binlogdata.LastPKEvent} LastPKEvent * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Field.decodeDelimited = function decodeDelimited(reader) { + LastPKEvent.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a Field message. + * Verifies a LastPKEvent message. * @function verify - * @memberof query.Field + * @memberof binlogdata.LastPKEvent * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - Field.verify = function verify(message) { + LastPKEvent.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.name != null && message.hasOwnProperty("name")) - if (!$util.isString(message.name)) - return "name: string expected"; - if (message.type != null && message.hasOwnProperty("type")) - switch (message.type) { - default: - return "type: enum value expected"; - case 0: - case 257: - case 770: - case 259: - case 772: - case 261: - case 774: - case 263: - case 776: - case 265: - case 778: - case 1035: - case 1036: - case 2061: - case 2062: - case 2063: - case 2064: - case 785: - case 18: - case 6163: - case 10260: - case 6165: - case 10262: - case 6167: - case 10264: - case 2073: - case 2074: - case 2075: - case 28: - case 2077: - case 2078: - case 31: - case 4128: - case 4129: - case 4130: - break; - } - if (message.table != null && message.hasOwnProperty("table")) - if (!$util.isString(message.table)) - return "table: string expected"; - if (message.org_table != null && message.hasOwnProperty("org_table")) - if (!$util.isString(message.org_table)) - return "org_table: string expected"; - if (message.database != null && message.hasOwnProperty("database")) - if (!$util.isString(message.database)) - return "database: string expected"; - if (message.org_name != null && message.hasOwnProperty("org_name")) - if (!$util.isString(message.org_name)) - return "org_name: string expected"; - if (message.column_length != null && message.hasOwnProperty("column_length")) - if (!$util.isInteger(message.column_length)) - return "column_length: integer expected"; - if (message.charset != null && message.hasOwnProperty("charset")) - if (!$util.isInteger(message.charset)) - return "charset: integer expected"; - if (message.decimals != null && message.hasOwnProperty("decimals")) - if (!$util.isInteger(message.decimals)) - return "decimals: integer expected"; - if (message.flags != null && message.hasOwnProperty("flags")) - if (!$util.isInteger(message.flags)) - return "flags: integer expected"; - if (message.column_type != null && message.hasOwnProperty("column_type")) - if (!$util.isString(message.column_type)) - return "column_type: string expected"; + if (message.table_last_p_k != null && message.hasOwnProperty("table_last_p_k")) { + let error = $root.binlogdata.TableLastPK.verify(message.table_last_p_k); + if (error) + return "table_last_p_k." + error; + } + if (message.completed != null && message.hasOwnProperty("completed")) + if (typeof message.completed !== "boolean") + return "completed: boolean expected"; return null; }; /** - * Creates a Field message from a plain object. Also converts values to their respective internal types. + * Creates a LastPKEvent message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.Field + * @memberof binlogdata.LastPKEvent * @static * @param {Object.} object Plain object - * @returns {query.Field} Field + * @returns {binlogdata.LastPKEvent} LastPKEvent */ - Field.fromObject = function fromObject(object) { - if (object instanceof $root.query.Field) + LastPKEvent.fromObject = function fromObject(object) { + if (object instanceof $root.binlogdata.LastPKEvent) return object; - let message = new $root.query.Field(); - if (object.name != null) - message.name = String(object.name); - switch (object.type) { - default: - if (typeof object.type === "number") { - message.type = object.type; - break; - } - break; - case "NULL_TYPE": - case 0: - message.type = 0; - break; - case "INT8": - case 257: - message.type = 257; - break; - case "UINT8": - case 770: - message.type = 770; - break; - case "INT16": - case 259: - message.type = 259; - break; - case "UINT16": - case 772: - message.type = 772; - break; - case "INT24": - case 261: - message.type = 261; - break; - case "UINT24": - case 774: - message.type = 774; - break; - case "INT32": - case 263: - message.type = 263; - break; - case "UINT32": - case 776: - message.type = 776; - break; - case "INT64": - case 265: - message.type = 265; - break; - case "UINT64": - case 778: - message.type = 778; - break; - case "FLOAT32": - case 1035: - message.type = 1035; - break; - case "FLOAT64": - case 1036: - message.type = 1036; - break; - case "TIMESTAMP": - case 2061: - message.type = 2061; - break; - case "DATE": - case 2062: - message.type = 2062; - break; - case "TIME": - case 2063: - message.type = 2063; - break; - case "DATETIME": - case 2064: - message.type = 2064; - break; - case "YEAR": - case 785: - message.type = 785; - break; - case "DECIMAL": - case 18: - message.type = 18; - break; - case "TEXT": - case 6163: - message.type = 6163; - break; - case "BLOB": - case 10260: - message.type = 10260; - break; - case "VARCHAR": - case 6165: - message.type = 6165; - break; - case "VARBINARY": - case 10262: - message.type = 10262; - break; - case "CHAR": - case 6167: - message.type = 6167; - break; - case "BINARY": - case 10264: - message.type = 10264; - break; - case "BIT": - case 2073: - message.type = 2073; - break; - case "ENUM": - case 2074: - message.type = 2074; - break; - case "SET": - case 2075: - message.type = 2075; - break; - case "TUPLE": - case 28: - message.type = 28; - break; - case "GEOMETRY": - case 2077: - message.type = 2077; - break; - case "JSON": - case 2078: - message.type = 2078; - break; - case "EXPRESSION": - case 31: - message.type = 31; - break; - case "HEXNUM": - case 4128: - message.type = 4128; - break; - case "HEXVAL": - case 4129: - message.type = 4129; - break; - case "BITNUM": - case 4130: - message.type = 4130; - break; + let message = new $root.binlogdata.LastPKEvent(); + if (object.table_last_p_k != null) { + if (typeof object.table_last_p_k !== "object") + throw TypeError(".binlogdata.LastPKEvent.table_last_p_k: object expected"); + message.table_last_p_k = $root.binlogdata.TableLastPK.fromObject(object.table_last_p_k); } - if (object.table != null) - message.table = String(object.table); - if (object.org_table != null) - message.org_table = String(object.org_table); - if (object.database != null) - message.database = String(object.database); - if (object.org_name != null) - message.org_name = String(object.org_name); - if (object.column_length != null) - message.column_length = object.column_length >>> 0; - if (object.charset != null) - message.charset = object.charset >>> 0; - if (object.decimals != null) - message.decimals = object.decimals >>> 0; - if (object.flags != null) - message.flags = object.flags >>> 0; - if (object.column_type != null) - message.column_type = String(object.column_type); + if (object.completed != null) + message.completed = Boolean(object.completed); return message; }; /** - * Creates a plain object from a Field message. Also converts values to other types if specified. + * Creates a plain object from a LastPKEvent message. Also converts values to other types if specified. * @function toObject - * @memberof query.Field + * @memberof binlogdata.LastPKEvent * @static - * @param {query.Field} message Field + * @param {binlogdata.LastPKEvent} message LastPKEvent * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - Field.toObject = function toObject(message, options) { + LastPKEvent.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) { - object.name = ""; - object.type = options.enums === String ? "NULL_TYPE" : 0; - object.table = ""; - object.org_table = ""; - object.database = ""; - object.org_name = ""; - object.column_length = 0; - object.charset = 0; - object.decimals = 0; - object.flags = 0; - object.column_type = ""; + object.table_last_p_k = null; + object.completed = false; } - if (message.name != null && message.hasOwnProperty("name")) - object.name = message.name; - if (message.type != null && message.hasOwnProperty("type")) - object.type = options.enums === String ? $root.query.Type[message.type] === undefined ? message.type : $root.query.Type[message.type] : message.type; - if (message.table != null && message.hasOwnProperty("table")) - object.table = message.table; - if (message.org_table != null && message.hasOwnProperty("org_table")) - object.org_table = message.org_table; - if (message.database != null && message.hasOwnProperty("database")) - object.database = message.database; - if (message.org_name != null && message.hasOwnProperty("org_name")) - object.org_name = message.org_name; - if (message.column_length != null && message.hasOwnProperty("column_length")) - object.column_length = message.column_length; - if (message.charset != null && message.hasOwnProperty("charset")) - object.charset = message.charset; - if (message.decimals != null && message.hasOwnProperty("decimals")) - object.decimals = message.decimals; - if (message.flags != null && message.hasOwnProperty("flags")) - object.flags = message.flags; - if (message.column_type != null && message.hasOwnProperty("column_type")) - object.column_type = message.column_type; + if (message.table_last_p_k != null && message.hasOwnProperty("table_last_p_k")) + object.table_last_p_k = $root.binlogdata.TableLastPK.toObject(message.table_last_p_k, options); + if (message.completed != null && message.hasOwnProperty("completed")) + object.completed = message.completed; return object; }; /** - * Converts this Field to JSON. + * Converts this LastPKEvent to JSON. * @function toJSON - * @memberof query.Field + * @memberof binlogdata.LastPKEvent * @instance * @returns {Object.} JSON object */ - Field.prototype.toJSON = function toJSON() { + LastPKEvent.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for Field + * Gets the default type url for LastPKEvent * @function getTypeUrl - * @memberof query.Field + * @memberof binlogdata.LastPKEvent * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - Field.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + LastPKEvent.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.Field"; + return typeUrlPrefix + "/binlogdata.LastPKEvent"; }; - return Field; + return LastPKEvent; })(); - query.Row = (function() { + binlogdata.TableLastPK = (function() { /** - * Properties of a Row. - * @memberof query - * @interface IRow - * @property {Array.|null} [lengths] Row lengths - * @property {Uint8Array|null} [values] Row values + * Properties of a TableLastPK. + * @memberof binlogdata + * @interface ITableLastPK + * @property {string|null} [table_name] TableLastPK table_name + * @property {query.IQueryResult|null} [lastpk] TableLastPK lastpk */ /** - * Constructs a new Row. - * @memberof query - * @classdesc Represents a Row. - * @implements IRow + * Constructs a new TableLastPK. + * @memberof binlogdata + * @classdesc Represents a TableLastPK. + * @implements ITableLastPK * @constructor - * @param {query.IRow=} [properties] Properties to set + * @param {binlogdata.ITableLastPK=} [properties] Properties to set */ - function Row(properties) { - this.lengths = []; + function TableLastPK(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -74932,100 +75716,89 @@ export const query = $root.query = (() => { } /** - * Row lengths. - * @member {Array.} lengths - * @memberof query.Row + * TableLastPK table_name. + * @member {string} table_name + * @memberof binlogdata.TableLastPK * @instance */ - Row.prototype.lengths = $util.emptyArray; + TableLastPK.prototype.table_name = ""; /** - * Row values. - * @member {Uint8Array} values - * @memberof query.Row + * TableLastPK lastpk. + * @member {query.IQueryResult|null|undefined} lastpk + * @memberof binlogdata.TableLastPK * @instance */ - Row.prototype.values = $util.newBuffer([]); + TableLastPK.prototype.lastpk = null; /** - * Creates a new Row instance using the specified properties. + * Creates a new TableLastPK instance using the specified properties. * @function create - * @memberof query.Row + * @memberof binlogdata.TableLastPK * @static - * @param {query.IRow=} [properties] Properties to set - * @returns {query.Row} Row instance + * @param {binlogdata.ITableLastPK=} [properties] Properties to set + * @returns {binlogdata.TableLastPK} TableLastPK instance */ - Row.create = function create(properties) { - return new Row(properties); + TableLastPK.create = function create(properties) { + return new TableLastPK(properties); }; /** - * Encodes the specified Row message. Does not implicitly {@link query.Row.verify|verify} messages. + * Encodes the specified TableLastPK message. Does not implicitly {@link binlogdata.TableLastPK.verify|verify} messages. * @function encode - * @memberof query.Row + * @memberof binlogdata.TableLastPK * @static - * @param {query.IRow} message Row message or plain object to encode + * @param {binlogdata.ITableLastPK} message TableLastPK message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Row.encode = function encode(message, writer) { + TableLastPK.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.lengths != null && message.lengths.length) { - writer.uint32(/* id 1, wireType 2 =*/10).fork(); - for (let i = 0; i < message.lengths.length; ++i) - writer.sint64(message.lengths[i]); - writer.ldelim(); - } - if (message.values != null && Object.hasOwnProperty.call(message, "values")) - writer.uint32(/* id 2, wireType 2 =*/18).bytes(message.values); + if (message.table_name != null && Object.hasOwnProperty.call(message, "table_name")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.table_name); + if (message.lastpk != null && Object.hasOwnProperty.call(message, "lastpk")) + $root.query.QueryResult.encode(message.lastpk, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); return writer; }; /** - * Encodes the specified Row message, length delimited. Does not implicitly {@link query.Row.verify|verify} messages. + * Encodes the specified TableLastPK message, length delimited. Does not implicitly {@link binlogdata.TableLastPK.verify|verify} messages. * @function encodeDelimited - * @memberof query.Row + * @memberof binlogdata.TableLastPK * @static - * @param {query.IRow} message Row message or plain object to encode + * @param {binlogdata.ITableLastPK} message TableLastPK message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Row.encodeDelimited = function encodeDelimited(message, writer) { + TableLastPK.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a Row message from the specified reader or buffer. + * Decodes a TableLastPK message from the specified reader or buffer. * @function decode - * @memberof query.Row + * @memberof binlogdata.TableLastPK * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.Row} Row + * @returns {binlogdata.TableLastPK} TableLastPK * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Row.decode = function decode(reader, length) { + TableLastPK.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.Row(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.TableLastPK(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (!(message.lengths && message.lengths.length)) - message.lengths = []; - if ((tag & 7) === 2) { - let end2 = reader.uint32() + reader.pos; - while (reader.pos < end2) - message.lengths.push(reader.sint64()); - } else - message.lengths.push(reader.sint64()); + message.table_name = reader.string(); break; } - case 2: { - message.values = reader.bytes(); + case 3: { + message.lastpk = $root.query.QueryResult.decode(reader, reader.uint32()); break; } default: @@ -75037,169 +75810,139 @@ export const query = $root.query = (() => { }; /** - * Decodes a Row message from the specified reader or buffer, length delimited. + * Decodes a TableLastPK message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.Row + * @memberof binlogdata.TableLastPK * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.Row} Row + * @returns {binlogdata.TableLastPK} TableLastPK * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Row.decodeDelimited = function decodeDelimited(reader) { + TableLastPK.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a Row message. + * Verifies a TableLastPK message. * @function verify - * @memberof query.Row + * @memberof binlogdata.TableLastPK * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - Row.verify = function verify(message) { + TableLastPK.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.lengths != null && message.hasOwnProperty("lengths")) { - if (!Array.isArray(message.lengths)) - return "lengths: array expected"; - for (let i = 0; i < message.lengths.length; ++i) - if (!$util.isInteger(message.lengths[i]) && !(message.lengths[i] && $util.isInteger(message.lengths[i].low) && $util.isInteger(message.lengths[i].high))) - return "lengths: integer|Long[] expected"; + if (message.table_name != null && message.hasOwnProperty("table_name")) + if (!$util.isString(message.table_name)) + return "table_name: string expected"; + if (message.lastpk != null && message.hasOwnProperty("lastpk")) { + let error = $root.query.QueryResult.verify(message.lastpk); + if (error) + return "lastpk." + error; } - if (message.values != null && message.hasOwnProperty("values")) - if (!(message.values && typeof message.values.length === "number" || $util.isString(message.values))) - return "values: buffer expected"; return null; }; /** - * Creates a Row message from a plain object. Also converts values to their respective internal types. + * Creates a TableLastPK message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.Row + * @memberof binlogdata.TableLastPK * @static * @param {Object.} object Plain object - * @returns {query.Row} Row + * @returns {binlogdata.TableLastPK} TableLastPK */ - Row.fromObject = function fromObject(object) { - if (object instanceof $root.query.Row) + TableLastPK.fromObject = function fromObject(object) { + if (object instanceof $root.binlogdata.TableLastPK) return object; - let message = new $root.query.Row(); - if (object.lengths) { - if (!Array.isArray(object.lengths)) - throw TypeError(".query.Row.lengths: array expected"); - message.lengths = []; - for (let i = 0; i < object.lengths.length; ++i) - if ($util.Long) - (message.lengths[i] = $util.Long.fromValue(object.lengths[i])).unsigned = false; - else if (typeof object.lengths[i] === "string") - message.lengths[i] = parseInt(object.lengths[i], 10); - else if (typeof object.lengths[i] === "number") - message.lengths[i] = object.lengths[i]; - else if (typeof object.lengths[i] === "object") - message.lengths[i] = new $util.LongBits(object.lengths[i].low >>> 0, object.lengths[i].high >>> 0).toNumber(); + let message = new $root.binlogdata.TableLastPK(); + if (object.table_name != null) + message.table_name = String(object.table_name); + if (object.lastpk != null) { + if (typeof object.lastpk !== "object") + throw TypeError(".binlogdata.TableLastPK.lastpk: object expected"); + message.lastpk = $root.query.QueryResult.fromObject(object.lastpk); } - if (object.values != null) - if (typeof object.values === "string") - $util.base64.decode(object.values, message.values = $util.newBuffer($util.base64.length(object.values)), 0); - else if (object.values.length >= 0) - message.values = object.values; return message; }; /** - * Creates a plain object from a Row message. Also converts values to other types if specified. + * Creates a plain object from a TableLastPK message. Also converts values to other types if specified. * @function toObject - * @memberof query.Row + * @memberof binlogdata.TableLastPK * @static - * @param {query.Row} message Row + * @param {binlogdata.TableLastPK} message TableLastPK * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - Row.toObject = function toObject(message, options) { + TableLastPK.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.lengths = []; - if (options.defaults) - if (options.bytes === String) - object.values = ""; - else { - object.values = []; - if (options.bytes !== Array) - object.values = $util.newBuffer(object.values); - } - if (message.lengths && message.lengths.length) { - object.lengths = []; - for (let j = 0; j < message.lengths.length; ++j) - if (typeof message.lengths[j] === "number") - object.lengths[j] = options.longs === String ? String(message.lengths[j]) : message.lengths[j]; - else - object.lengths[j] = options.longs === String ? $util.Long.prototype.toString.call(message.lengths[j]) : options.longs === Number ? new $util.LongBits(message.lengths[j].low >>> 0, message.lengths[j].high >>> 0).toNumber() : message.lengths[j]; + if (options.defaults) { + object.table_name = ""; + object.lastpk = null; } - if (message.values != null && message.hasOwnProperty("values")) - object.values = options.bytes === String ? $util.base64.encode(message.values, 0, message.values.length) : options.bytes === Array ? Array.prototype.slice.call(message.values) : message.values; + if (message.table_name != null && message.hasOwnProperty("table_name")) + object.table_name = message.table_name; + if (message.lastpk != null && message.hasOwnProperty("lastpk")) + object.lastpk = $root.query.QueryResult.toObject(message.lastpk, options); return object; }; /** - * Converts this Row to JSON. + * Converts this TableLastPK to JSON. * @function toJSON - * @memberof query.Row + * @memberof binlogdata.TableLastPK * @instance * @returns {Object.} JSON object */ - Row.prototype.toJSON = function toJSON() { + TableLastPK.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for Row + * Gets the default type url for TableLastPK * @function getTypeUrl - * @memberof query.Row + * @memberof binlogdata.TableLastPK * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - Row.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + TableLastPK.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.Row"; + return typeUrlPrefix + "/binlogdata.TableLastPK"; }; - return Row; + return TableLastPK; })(); - query.QueryResult = (function() { + binlogdata.VStreamResultsRequest = (function() { /** - * Properties of a QueryResult. - * @memberof query - * @interface IQueryResult - * @property {Array.|null} [fields] QueryResult fields - * @property {number|Long|null} [rows_affected] QueryResult rows_affected - * @property {number|Long|null} [insert_id] QueryResult insert_id - * @property {Array.|null} [rows] QueryResult rows - * @property {string|null} [info] QueryResult info - * @property {string|null} [session_state_changes] QueryResult session_state_changes + * Properties of a VStreamResultsRequest. + * @memberof binlogdata + * @interface IVStreamResultsRequest + * @property {vtrpc.ICallerID|null} [effective_caller_id] VStreamResultsRequest effective_caller_id + * @property {query.IVTGateCallerID|null} [immediate_caller_id] VStreamResultsRequest immediate_caller_id + * @property {query.ITarget|null} [target] VStreamResultsRequest target + * @property {string|null} [query] VStreamResultsRequest query */ /** - * Constructs a new QueryResult. - * @memberof query - * @classdesc Represents a QueryResult. - * @implements IQueryResult + * Constructs a new VStreamResultsRequest. + * @memberof binlogdata + * @classdesc Represents a VStreamResultsRequest. + * @implements IVStreamResultsRequest * @constructor - * @param {query.IQueryResult=} [properties] Properties to set + * @param {binlogdata.IVStreamResultsRequest=} [properties] Properties to set */ - function QueryResult(properties) { - this.fields = []; - this.rows = []; + function VStreamResultsRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -75207,151 +75950,117 @@ export const query = $root.query = (() => { } /** - * QueryResult fields. - * @member {Array.} fields - * @memberof query.QueryResult - * @instance - */ - QueryResult.prototype.fields = $util.emptyArray; - - /** - * QueryResult rows_affected. - * @member {number|Long} rows_affected - * @memberof query.QueryResult - * @instance - */ - QueryResult.prototype.rows_affected = $util.Long ? $util.Long.fromBits(0,0,true) : 0; - - /** - * QueryResult insert_id. - * @member {number|Long} insert_id - * @memberof query.QueryResult + * VStreamResultsRequest effective_caller_id. + * @member {vtrpc.ICallerID|null|undefined} effective_caller_id + * @memberof binlogdata.VStreamResultsRequest * @instance */ - QueryResult.prototype.insert_id = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + VStreamResultsRequest.prototype.effective_caller_id = null; /** - * QueryResult rows. - * @member {Array.} rows - * @memberof query.QueryResult + * VStreamResultsRequest immediate_caller_id. + * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id + * @memberof binlogdata.VStreamResultsRequest * @instance */ - QueryResult.prototype.rows = $util.emptyArray; + VStreamResultsRequest.prototype.immediate_caller_id = null; /** - * QueryResult info. - * @member {string} info - * @memberof query.QueryResult + * VStreamResultsRequest target. + * @member {query.ITarget|null|undefined} target + * @memberof binlogdata.VStreamResultsRequest * @instance */ - QueryResult.prototype.info = ""; + VStreamResultsRequest.prototype.target = null; /** - * QueryResult session_state_changes. - * @member {string} session_state_changes - * @memberof query.QueryResult + * VStreamResultsRequest query. + * @member {string} query + * @memberof binlogdata.VStreamResultsRequest * @instance */ - QueryResult.prototype.session_state_changes = ""; + VStreamResultsRequest.prototype.query = ""; /** - * Creates a new QueryResult instance using the specified properties. + * Creates a new VStreamResultsRequest instance using the specified properties. * @function create - * @memberof query.QueryResult + * @memberof binlogdata.VStreamResultsRequest * @static - * @param {query.IQueryResult=} [properties] Properties to set - * @returns {query.QueryResult} QueryResult instance + * @param {binlogdata.IVStreamResultsRequest=} [properties] Properties to set + * @returns {binlogdata.VStreamResultsRequest} VStreamResultsRequest instance */ - QueryResult.create = function create(properties) { - return new QueryResult(properties); + VStreamResultsRequest.create = function create(properties) { + return new VStreamResultsRequest(properties); }; /** - * Encodes the specified QueryResult message. Does not implicitly {@link query.QueryResult.verify|verify} messages. + * Encodes the specified VStreamResultsRequest message. Does not implicitly {@link binlogdata.VStreamResultsRequest.verify|verify} messages. * @function encode - * @memberof query.QueryResult + * @memberof binlogdata.VStreamResultsRequest * @static - * @param {query.IQueryResult} message QueryResult message or plain object to encode + * @param {binlogdata.IVStreamResultsRequest} message VStreamResultsRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - QueryResult.encode = function encode(message, writer) { + VStreamResultsRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.fields != null && message.fields.length) - for (let i = 0; i < message.fields.length; ++i) - $root.query.Field.encode(message.fields[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.rows_affected != null && Object.hasOwnProperty.call(message, "rows_affected")) - writer.uint32(/* id 2, wireType 0 =*/16).uint64(message.rows_affected); - if (message.insert_id != null && Object.hasOwnProperty.call(message, "insert_id")) - writer.uint32(/* id 3, wireType 0 =*/24).uint64(message.insert_id); - if (message.rows != null && message.rows.length) - for (let i = 0; i < message.rows.length; ++i) - $root.query.Row.encode(message.rows[i], writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); - if (message.info != null && Object.hasOwnProperty.call(message, "info")) - writer.uint32(/* id 6, wireType 2 =*/50).string(message.info); - if (message.session_state_changes != null && Object.hasOwnProperty.call(message, "session_state_changes")) - writer.uint32(/* id 7, wireType 2 =*/58).string(message.session_state_changes); + if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) + $root.vtrpc.CallerID.encode(message.effective_caller_id, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.immediate_caller_id != null && Object.hasOwnProperty.call(message, "immediate_caller_id")) + $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.target != null && Object.hasOwnProperty.call(message, "target")) + $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.query != null && Object.hasOwnProperty.call(message, "query")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.query); return writer; }; /** - * Encodes the specified QueryResult message, length delimited. Does not implicitly {@link query.QueryResult.verify|verify} messages. + * Encodes the specified VStreamResultsRequest message, length delimited. Does not implicitly {@link binlogdata.VStreamResultsRequest.verify|verify} messages. * @function encodeDelimited - * @memberof query.QueryResult + * @memberof binlogdata.VStreamResultsRequest * @static - * @param {query.IQueryResult} message QueryResult message or plain object to encode + * @param {binlogdata.IVStreamResultsRequest} message VStreamResultsRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - QueryResult.encodeDelimited = function encodeDelimited(message, writer) { + VStreamResultsRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a QueryResult message from the specified reader or buffer. + * Decodes a VStreamResultsRequest message from the specified reader or buffer. * @function decode - * @memberof query.QueryResult + * @memberof binlogdata.VStreamResultsRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.QueryResult} QueryResult + * @returns {binlogdata.VStreamResultsRequest} VStreamResultsRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - QueryResult.decode = function decode(reader, length) { + VStreamResultsRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.QueryResult(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.VStreamResultsRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (!(message.fields && message.fields.length)) - message.fields = []; - message.fields.push($root.query.Field.decode(reader, reader.uint32())); + message.effective_caller_id = $root.vtrpc.CallerID.decode(reader, reader.uint32()); break; } case 2: { - message.rows_affected = reader.uint64(); + message.immediate_caller_id = $root.query.VTGateCallerID.decode(reader, reader.uint32()); break; } case 3: { - message.insert_id = reader.uint64(); + message.target = $root.query.Target.decode(reader, reader.uint32()); break; } case 4: { - if (!(message.rows && message.rows.length)) - message.rows = []; - message.rows.push($root.query.Row.decode(reader, reader.uint32())); - break; - } - case 6: { - message.info = reader.string(); - break; - } - case 7: { - message.session_state_changes = reader.string(); + message.query = reader.string(); break; } default: @@ -75363,228 +76072,166 @@ export const query = $root.query = (() => { }; /** - * Decodes a QueryResult message from the specified reader or buffer, length delimited. + * Decodes a VStreamResultsRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.QueryResult + * @memberof binlogdata.VStreamResultsRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.QueryResult} QueryResult + * @returns {binlogdata.VStreamResultsRequest} VStreamResultsRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - QueryResult.decodeDelimited = function decodeDelimited(reader) { + VStreamResultsRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a QueryResult message. + * Verifies a VStreamResultsRequest message. * @function verify - * @memberof query.QueryResult + * @memberof binlogdata.VStreamResultsRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - QueryResult.verify = function verify(message) { + VStreamResultsRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.fields != null && message.hasOwnProperty("fields")) { - if (!Array.isArray(message.fields)) - return "fields: array expected"; - for (let i = 0; i < message.fields.length; ++i) { - let error = $root.query.Field.verify(message.fields[i]); - if (error) - return "fields." + error; - } + if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { + let error = $root.vtrpc.CallerID.verify(message.effective_caller_id); + if (error) + return "effective_caller_id." + error; } - if (message.rows_affected != null && message.hasOwnProperty("rows_affected")) - if (!$util.isInteger(message.rows_affected) && !(message.rows_affected && $util.isInteger(message.rows_affected.low) && $util.isInteger(message.rows_affected.high))) - return "rows_affected: integer|Long expected"; - if (message.insert_id != null && message.hasOwnProperty("insert_id")) - if (!$util.isInteger(message.insert_id) && !(message.insert_id && $util.isInteger(message.insert_id.low) && $util.isInteger(message.insert_id.high))) - return "insert_id: integer|Long expected"; - if (message.rows != null && message.hasOwnProperty("rows")) { - if (!Array.isArray(message.rows)) - return "rows: array expected"; - for (let i = 0; i < message.rows.length; ++i) { - let error = $root.query.Row.verify(message.rows[i]); - if (error) - return "rows." + error; - } + if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) { + let error = $root.query.VTGateCallerID.verify(message.immediate_caller_id); + if (error) + return "immediate_caller_id." + error; } - if (message.info != null && message.hasOwnProperty("info")) - if (!$util.isString(message.info)) - return "info: string expected"; - if (message.session_state_changes != null && message.hasOwnProperty("session_state_changes")) - if (!$util.isString(message.session_state_changes)) - return "session_state_changes: string expected"; + if (message.target != null && message.hasOwnProperty("target")) { + let error = $root.query.Target.verify(message.target); + if (error) + return "target." + error; + } + if (message.query != null && message.hasOwnProperty("query")) + if (!$util.isString(message.query)) + return "query: string expected"; return null; }; /** - * Creates a QueryResult message from a plain object. Also converts values to their respective internal types. + * Creates a VStreamResultsRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.QueryResult + * @memberof binlogdata.VStreamResultsRequest * @static * @param {Object.} object Plain object - * @returns {query.QueryResult} QueryResult + * @returns {binlogdata.VStreamResultsRequest} VStreamResultsRequest */ - QueryResult.fromObject = function fromObject(object) { - if (object instanceof $root.query.QueryResult) + VStreamResultsRequest.fromObject = function fromObject(object) { + if (object instanceof $root.binlogdata.VStreamResultsRequest) return object; - let message = new $root.query.QueryResult(); - if (object.fields) { - if (!Array.isArray(object.fields)) - throw TypeError(".query.QueryResult.fields: array expected"); - message.fields = []; - for (let i = 0; i < object.fields.length; ++i) { - if (typeof object.fields[i] !== "object") - throw TypeError(".query.QueryResult.fields: object expected"); - message.fields[i] = $root.query.Field.fromObject(object.fields[i]); - } + let message = new $root.binlogdata.VStreamResultsRequest(); + if (object.effective_caller_id != null) { + if (typeof object.effective_caller_id !== "object") + throw TypeError(".binlogdata.VStreamResultsRequest.effective_caller_id: object expected"); + message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); } - if (object.rows_affected != null) - if ($util.Long) - (message.rows_affected = $util.Long.fromValue(object.rows_affected)).unsigned = true; - else if (typeof object.rows_affected === "string") - message.rows_affected = parseInt(object.rows_affected, 10); - else if (typeof object.rows_affected === "number") - message.rows_affected = object.rows_affected; - else if (typeof object.rows_affected === "object") - message.rows_affected = new $util.LongBits(object.rows_affected.low >>> 0, object.rows_affected.high >>> 0).toNumber(true); - if (object.insert_id != null) - if ($util.Long) - (message.insert_id = $util.Long.fromValue(object.insert_id)).unsigned = true; - else if (typeof object.insert_id === "string") - message.insert_id = parseInt(object.insert_id, 10); - else if (typeof object.insert_id === "number") - message.insert_id = object.insert_id; - else if (typeof object.insert_id === "object") - message.insert_id = new $util.LongBits(object.insert_id.low >>> 0, object.insert_id.high >>> 0).toNumber(true); - if (object.rows) { - if (!Array.isArray(object.rows)) - throw TypeError(".query.QueryResult.rows: array expected"); - message.rows = []; - for (let i = 0; i < object.rows.length; ++i) { - if (typeof object.rows[i] !== "object") - throw TypeError(".query.QueryResult.rows: object expected"); - message.rows[i] = $root.query.Row.fromObject(object.rows[i]); - } + if (object.immediate_caller_id != null) { + if (typeof object.immediate_caller_id !== "object") + throw TypeError(".binlogdata.VStreamResultsRequest.immediate_caller_id: object expected"); + message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); } - if (object.info != null) - message.info = String(object.info); - if (object.session_state_changes != null) - message.session_state_changes = String(object.session_state_changes); + if (object.target != null) { + if (typeof object.target !== "object") + throw TypeError(".binlogdata.VStreamResultsRequest.target: object expected"); + message.target = $root.query.Target.fromObject(object.target); + } + if (object.query != null) + message.query = String(object.query); return message; }; /** - * Creates a plain object from a QueryResult message. Also converts values to other types if specified. + * Creates a plain object from a VStreamResultsRequest message. Also converts values to other types if specified. * @function toObject - * @memberof query.QueryResult + * @memberof binlogdata.VStreamResultsRequest * @static - * @param {query.QueryResult} message QueryResult + * @param {binlogdata.VStreamResultsRequest} message VStreamResultsRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - QueryResult.toObject = function toObject(message, options) { + VStreamResultsRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) { - object.fields = []; - object.rows = []; - } if (options.defaults) { - if ($util.Long) { - let long = new $util.Long(0, 0, true); - object.rows_affected = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.rows_affected = options.longs === String ? "0" : 0; - if ($util.Long) { - let long = new $util.Long(0, 0, true); - object.insert_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.insert_id = options.longs === String ? "0" : 0; - object.info = ""; - object.session_state_changes = ""; - } - if (message.fields && message.fields.length) { - object.fields = []; - for (let j = 0; j < message.fields.length; ++j) - object.fields[j] = $root.query.Field.toObject(message.fields[j], options); - } - if (message.rows_affected != null && message.hasOwnProperty("rows_affected")) - if (typeof message.rows_affected === "number") - object.rows_affected = options.longs === String ? String(message.rows_affected) : message.rows_affected; - else - object.rows_affected = options.longs === String ? $util.Long.prototype.toString.call(message.rows_affected) : options.longs === Number ? new $util.LongBits(message.rows_affected.low >>> 0, message.rows_affected.high >>> 0).toNumber(true) : message.rows_affected; - if (message.insert_id != null && message.hasOwnProperty("insert_id")) - if (typeof message.insert_id === "number") - object.insert_id = options.longs === String ? String(message.insert_id) : message.insert_id; - else - object.insert_id = options.longs === String ? $util.Long.prototype.toString.call(message.insert_id) : options.longs === Number ? new $util.LongBits(message.insert_id.low >>> 0, message.insert_id.high >>> 0).toNumber(true) : message.insert_id; - if (message.rows && message.rows.length) { - object.rows = []; - for (let j = 0; j < message.rows.length; ++j) - object.rows[j] = $root.query.Row.toObject(message.rows[j], options); + object.effective_caller_id = null; + object.immediate_caller_id = null; + object.target = null; + object.query = ""; } - if (message.info != null && message.hasOwnProperty("info")) - object.info = message.info; - if (message.session_state_changes != null && message.hasOwnProperty("session_state_changes")) - object.session_state_changes = message.session_state_changes; + if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) + object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); + if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) + object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); + if (message.target != null && message.hasOwnProperty("target")) + object.target = $root.query.Target.toObject(message.target, options); + if (message.query != null && message.hasOwnProperty("query")) + object.query = message.query; return object; }; /** - * Converts this QueryResult to JSON. + * Converts this VStreamResultsRequest to JSON. * @function toJSON - * @memberof query.QueryResult + * @memberof binlogdata.VStreamResultsRequest * @instance * @returns {Object.} JSON object */ - QueryResult.prototype.toJSON = function toJSON() { + VStreamResultsRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for QueryResult + * Gets the default type url for VStreamResultsRequest * @function getTypeUrl - * @memberof query.QueryResult + * @memberof binlogdata.VStreamResultsRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - QueryResult.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + VStreamResultsRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.QueryResult"; + return typeUrlPrefix + "/binlogdata.VStreamResultsRequest"; }; - return QueryResult; + return VStreamResultsRequest; })(); - query.QueryWarning = (function() { + binlogdata.VStreamResultsResponse = (function() { /** - * Properties of a QueryWarning. - * @memberof query - * @interface IQueryWarning - * @property {number|null} [code] QueryWarning code - * @property {string|null} [message] QueryWarning message + * Properties of a VStreamResultsResponse. + * @memberof binlogdata + * @interface IVStreamResultsResponse + * @property {Array.|null} [fields] VStreamResultsResponse fields + * @property {string|null} [gtid] VStreamResultsResponse gtid + * @property {Array.|null} [rows] VStreamResultsResponse rows */ /** - * Constructs a new QueryWarning. - * @memberof query - * @classdesc Represents a QueryWarning. - * @implements IQueryWarning + * Constructs a new VStreamResultsResponse. + * @memberof binlogdata + * @classdesc Represents a VStreamResultsResponse. + * @implements IVStreamResultsResponse * @constructor - * @param {query.IQueryWarning=} [properties] Properties to set + * @param {binlogdata.IVStreamResultsResponse=} [properties] Properties to set */ - function QueryWarning(properties) { + function VStreamResultsResponse(properties) { + this.fields = []; + this.rows = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -75592,89 +76239,109 @@ export const query = $root.query = (() => { } /** - * QueryWarning code. - * @member {number} code - * @memberof query.QueryWarning + * VStreamResultsResponse fields. + * @member {Array.} fields + * @memberof binlogdata.VStreamResultsResponse * @instance */ - QueryWarning.prototype.code = 0; + VStreamResultsResponse.prototype.fields = $util.emptyArray; /** - * QueryWarning message. - * @member {string} message - * @memberof query.QueryWarning + * VStreamResultsResponse gtid. + * @member {string} gtid + * @memberof binlogdata.VStreamResultsResponse * @instance */ - QueryWarning.prototype.message = ""; + VStreamResultsResponse.prototype.gtid = ""; /** - * Creates a new QueryWarning instance using the specified properties. - * @function create - * @memberof query.QueryWarning - * @static - * @param {query.IQueryWarning=} [properties] Properties to set - * @returns {query.QueryWarning} QueryWarning instance + * VStreamResultsResponse rows. + * @member {Array.} rows + * @memberof binlogdata.VStreamResultsResponse + * @instance */ - QueryWarning.create = function create(properties) { - return new QueryWarning(properties); + VStreamResultsResponse.prototype.rows = $util.emptyArray; + + /** + * Creates a new VStreamResultsResponse instance using the specified properties. + * @function create + * @memberof binlogdata.VStreamResultsResponse + * @static + * @param {binlogdata.IVStreamResultsResponse=} [properties] Properties to set + * @returns {binlogdata.VStreamResultsResponse} VStreamResultsResponse instance + */ + VStreamResultsResponse.create = function create(properties) { + return new VStreamResultsResponse(properties); }; /** - * Encodes the specified QueryWarning message. Does not implicitly {@link query.QueryWarning.verify|verify} messages. + * Encodes the specified VStreamResultsResponse message. Does not implicitly {@link binlogdata.VStreamResultsResponse.verify|verify} messages. * @function encode - * @memberof query.QueryWarning + * @memberof binlogdata.VStreamResultsResponse * @static - * @param {query.IQueryWarning} message QueryWarning message or plain object to encode + * @param {binlogdata.IVStreamResultsResponse} message VStreamResultsResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - QueryWarning.encode = function encode(message, writer) { + VStreamResultsResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.code != null && Object.hasOwnProperty.call(message, "code")) - writer.uint32(/* id 1, wireType 0 =*/8).uint32(message.code); - if (message.message != null && Object.hasOwnProperty.call(message, "message")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.message); + if (message.fields != null && message.fields.length) + for (let i = 0; i < message.fields.length; ++i) + $root.query.Field.encode(message.fields[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.gtid != null && Object.hasOwnProperty.call(message, "gtid")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.gtid); + if (message.rows != null && message.rows.length) + for (let i = 0; i < message.rows.length; ++i) + $root.query.Row.encode(message.rows[i], writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); return writer; }; /** - * Encodes the specified QueryWarning message, length delimited. Does not implicitly {@link query.QueryWarning.verify|verify} messages. + * Encodes the specified VStreamResultsResponse message, length delimited. Does not implicitly {@link binlogdata.VStreamResultsResponse.verify|verify} messages. * @function encodeDelimited - * @memberof query.QueryWarning + * @memberof binlogdata.VStreamResultsResponse * @static - * @param {query.IQueryWarning} message QueryWarning message or plain object to encode + * @param {binlogdata.IVStreamResultsResponse} message VStreamResultsResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - QueryWarning.encodeDelimited = function encodeDelimited(message, writer) { + VStreamResultsResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a QueryWarning message from the specified reader or buffer. + * Decodes a VStreamResultsResponse message from the specified reader or buffer. * @function decode - * @memberof query.QueryWarning + * @memberof binlogdata.VStreamResultsResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.QueryWarning} QueryWarning + * @returns {binlogdata.VStreamResultsResponse} VStreamResultsResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - QueryWarning.decode = function decode(reader, length) { + VStreamResultsResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.QueryWarning(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.binlogdata.VStreamResultsResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.code = reader.uint32(); + if (!(message.fields && message.fields.length)) + message.fields = []; + message.fields.push($root.query.Field.decode(reader, reader.uint32())); break; } - case 2: { - message.message = reader.string(); + case 3: { + message.gtid = reader.string(); + break; + } + case 4: { + if (!(message.rows && message.rows.length)) + message.rows = []; + message.rows.push($root.query.Row.decode(reader, reader.uint32())); break; } default: @@ -75686,133 +76353,190 @@ export const query = $root.query = (() => { }; /** - * Decodes a QueryWarning message from the specified reader or buffer, length delimited. + * Decodes a VStreamResultsResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.QueryWarning + * @memberof binlogdata.VStreamResultsResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.QueryWarning} QueryWarning + * @returns {binlogdata.VStreamResultsResponse} VStreamResultsResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - QueryWarning.decodeDelimited = function decodeDelimited(reader) { + VStreamResultsResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a QueryWarning message. + * Verifies a VStreamResultsResponse message. * @function verify - * @memberof query.QueryWarning + * @memberof binlogdata.VStreamResultsResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - QueryWarning.verify = function verify(message) { + VStreamResultsResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.code != null && message.hasOwnProperty("code")) - if (!$util.isInteger(message.code)) - return "code: integer expected"; - if (message.message != null && message.hasOwnProperty("message")) - if (!$util.isString(message.message)) - return "message: string expected"; + if (message.fields != null && message.hasOwnProperty("fields")) { + if (!Array.isArray(message.fields)) + return "fields: array expected"; + for (let i = 0; i < message.fields.length; ++i) { + let error = $root.query.Field.verify(message.fields[i]); + if (error) + return "fields." + error; + } + } + if (message.gtid != null && message.hasOwnProperty("gtid")) + if (!$util.isString(message.gtid)) + return "gtid: string expected"; + if (message.rows != null && message.hasOwnProperty("rows")) { + if (!Array.isArray(message.rows)) + return "rows: array expected"; + for (let i = 0; i < message.rows.length; ++i) { + let error = $root.query.Row.verify(message.rows[i]); + if (error) + return "rows." + error; + } + } return null; }; /** - * Creates a QueryWarning message from a plain object. Also converts values to their respective internal types. + * Creates a VStreamResultsResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.QueryWarning + * @memberof binlogdata.VStreamResultsResponse * @static * @param {Object.} object Plain object - * @returns {query.QueryWarning} QueryWarning + * @returns {binlogdata.VStreamResultsResponse} VStreamResultsResponse */ - QueryWarning.fromObject = function fromObject(object) { - if (object instanceof $root.query.QueryWarning) + VStreamResultsResponse.fromObject = function fromObject(object) { + if (object instanceof $root.binlogdata.VStreamResultsResponse) return object; - let message = new $root.query.QueryWarning(); - if (object.code != null) - message.code = object.code >>> 0; - if (object.message != null) - message.message = String(object.message); + let message = new $root.binlogdata.VStreamResultsResponse(); + if (object.fields) { + if (!Array.isArray(object.fields)) + throw TypeError(".binlogdata.VStreamResultsResponse.fields: array expected"); + message.fields = []; + for (let i = 0; i < object.fields.length; ++i) { + if (typeof object.fields[i] !== "object") + throw TypeError(".binlogdata.VStreamResultsResponse.fields: object expected"); + message.fields[i] = $root.query.Field.fromObject(object.fields[i]); + } + } + if (object.gtid != null) + message.gtid = String(object.gtid); + if (object.rows) { + if (!Array.isArray(object.rows)) + throw TypeError(".binlogdata.VStreamResultsResponse.rows: array expected"); + message.rows = []; + for (let i = 0; i < object.rows.length; ++i) { + if (typeof object.rows[i] !== "object") + throw TypeError(".binlogdata.VStreamResultsResponse.rows: object expected"); + message.rows[i] = $root.query.Row.fromObject(object.rows[i]); + } + } return message; }; /** - * Creates a plain object from a QueryWarning message. Also converts values to other types if specified. + * Creates a plain object from a VStreamResultsResponse message. Also converts values to other types if specified. * @function toObject - * @memberof query.QueryWarning + * @memberof binlogdata.VStreamResultsResponse * @static - * @param {query.QueryWarning} message QueryWarning + * @param {binlogdata.VStreamResultsResponse} message VStreamResultsResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - QueryWarning.toObject = function toObject(message, options) { + VStreamResultsResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) { - object.code = 0; - object.message = ""; + if (options.arrays || options.defaults) { + object.fields = []; + object.rows = []; + } + if (options.defaults) + object.gtid = ""; + if (message.fields && message.fields.length) { + object.fields = []; + for (let j = 0; j < message.fields.length; ++j) + object.fields[j] = $root.query.Field.toObject(message.fields[j], options); + } + if (message.gtid != null && message.hasOwnProperty("gtid")) + object.gtid = message.gtid; + if (message.rows && message.rows.length) { + object.rows = []; + for (let j = 0; j < message.rows.length; ++j) + object.rows[j] = $root.query.Row.toObject(message.rows[j], options); } - if (message.code != null && message.hasOwnProperty("code")) - object.code = message.code; - if (message.message != null && message.hasOwnProperty("message")) - object.message = message.message; return object; }; /** - * Converts this QueryWarning to JSON. + * Converts this VStreamResultsResponse to JSON. * @function toJSON - * @memberof query.QueryWarning + * @memberof binlogdata.VStreamResultsResponse * @instance * @returns {Object.} JSON object */ - QueryWarning.prototype.toJSON = function toJSON() { + VStreamResultsResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for QueryWarning + * Gets the default type url for VStreamResultsResponse * @function getTypeUrl - * @memberof query.QueryWarning + * @memberof binlogdata.VStreamResultsResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - QueryWarning.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + VStreamResultsResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.QueryWarning"; + return typeUrlPrefix + "/binlogdata.VStreamResultsResponse"; }; - return QueryWarning; + return VStreamResultsResponse; })(); - query.StreamEvent = (function() { + return binlogdata; +})(); + +export const vtrpc = $root.vtrpc = (() => { + + /** + * Namespace vtrpc. + * @exports vtrpc + * @namespace + */ + const vtrpc = {}; + + vtrpc.CallerID = (function() { /** - * Properties of a StreamEvent. - * @memberof query - * @interface IStreamEvent - * @property {Array.|null} [statements] StreamEvent statements - * @property {query.IEventToken|null} [event_token] StreamEvent event_token + * Properties of a CallerID. + * @memberof vtrpc + * @interface ICallerID + * @property {string|null} [principal] CallerID principal + * @property {string|null} [component] CallerID component + * @property {string|null} [subcomponent] CallerID subcomponent + * @property {Array.|null} [groups] CallerID groups */ /** - * Constructs a new StreamEvent. - * @memberof query - * @classdesc Represents a StreamEvent. - * @implements IStreamEvent + * Constructs a new CallerID. + * @memberof vtrpc + * @classdesc Represents a CallerID. + * @implements ICallerID * @constructor - * @param {query.IStreamEvent=} [properties] Properties to set + * @param {vtrpc.ICallerID=} [properties] Properties to set */ - function StreamEvent(properties) { - this.statements = []; + function CallerID(properties) { + this.groups = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -75820,92 +76544,120 @@ export const query = $root.query = (() => { } /** - * StreamEvent statements. - * @member {Array.} statements - * @memberof query.StreamEvent + * CallerID principal. + * @member {string} principal + * @memberof vtrpc.CallerID * @instance */ - StreamEvent.prototype.statements = $util.emptyArray; + CallerID.prototype.principal = ""; /** - * StreamEvent event_token. - * @member {query.IEventToken|null|undefined} event_token - * @memberof query.StreamEvent + * CallerID component. + * @member {string} component + * @memberof vtrpc.CallerID * @instance */ - StreamEvent.prototype.event_token = null; + CallerID.prototype.component = ""; /** - * Creates a new StreamEvent instance using the specified properties. + * CallerID subcomponent. + * @member {string} subcomponent + * @memberof vtrpc.CallerID + * @instance + */ + CallerID.prototype.subcomponent = ""; + + /** + * CallerID groups. + * @member {Array.} groups + * @memberof vtrpc.CallerID + * @instance + */ + CallerID.prototype.groups = $util.emptyArray; + + /** + * Creates a new CallerID instance using the specified properties. * @function create - * @memberof query.StreamEvent + * @memberof vtrpc.CallerID * @static - * @param {query.IStreamEvent=} [properties] Properties to set - * @returns {query.StreamEvent} StreamEvent instance + * @param {vtrpc.ICallerID=} [properties] Properties to set + * @returns {vtrpc.CallerID} CallerID instance */ - StreamEvent.create = function create(properties) { - return new StreamEvent(properties); + CallerID.create = function create(properties) { + return new CallerID(properties); }; /** - * Encodes the specified StreamEvent message. Does not implicitly {@link query.StreamEvent.verify|verify} messages. + * Encodes the specified CallerID message. Does not implicitly {@link vtrpc.CallerID.verify|verify} messages. * @function encode - * @memberof query.StreamEvent + * @memberof vtrpc.CallerID * @static - * @param {query.IStreamEvent} message StreamEvent message or plain object to encode + * @param {vtrpc.ICallerID} message CallerID message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - StreamEvent.encode = function encode(message, writer) { + CallerID.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.statements != null && message.statements.length) - for (let i = 0; i < message.statements.length; ++i) - $root.query.StreamEvent.Statement.encode(message.statements[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.event_token != null && Object.hasOwnProperty.call(message, "event_token")) - $root.query.EventToken.encode(message.event_token, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.principal != null && Object.hasOwnProperty.call(message, "principal")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.principal); + if (message.component != null && Object.hasOwnProperty.call(message, "component")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.component); + if (message.subcomponent != null && Object.hasOwnProperty.call(message, "subcomponent")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.subcomponent); + if (message.groups != null && message.groups.length) + for (let i = 0; i < message.groups.length; ++i) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.groups[i]); return writer; }; /** - * Encodes the specified StreamEvent message, length delimited. Does not implicitly {@link query.StreamEvent.verify|verify} messages. + * Encodes the specified CallerID message, length delimited. Does not implicitly {@link vtrpc.CallerID.verify|verify} messages. * @function encodeDelimited - * @memberof query.StreamEvent + * @memberof vtrpc.CallerID * @static - * @param {query.IStreamEvent} message StreamEvent message or plain object to encode + * @param {vtrpc.ICallerID} message CallerID message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - StreamEvent.encodeDelimited = function encodeDelimited(message, writer) { + CallerID.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a StreamEvent message from the specified reader or buffer. + * Decodes a CallerID message from the specified reader or buffer. * @function decode - * @memberof query.StreamEvent + * @memberof vtrpc.CallerID * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.StreamEvent} StreamEvent + * @returns {vtrpc.CallerID} CallerID * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StreamEvent.decode = function decode(reader, length) { + CallerID.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.StreamEvent(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtrpc.CallerID(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (!(message.statements && message.statements.length)) - message.statements = []; - message.statements.push($root.query.StreamEvent.Statement.decode(reader, reader.uint32())); + message.principal = reader.string(); break; } case 2: { - message.event_token = $root.query.EventToken.decode(reader, reader.uint32()); + message.component = reader.string(); + break; + } + case 3: { + message.subcomponent = reader.string(); + break; + } + case 4: { + if (!(message.groups && message.groups.length)) + message.groups = []; + message.groups.push(reader.string()); break; } default: @@ -75917,548 +76669,209 @@ export const query = $root.query = (() => { }; /** - * Decodes a StreamEvent message from the specified reader or buffer, length delimited. + * Decodes a CallerID message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.StreamEvent + * @memberof vtrpc.CallerID * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.StreamEvent} StreamEvent + * @returns {vtrpc.CallerID} CallerID * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StreamEvent.decodeDelimited = function decodeDelimited(reader) { + CallerID.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a StreamEvent message. + * Verifies a CallerID message. * @function verify - * @memberof query.StreamEvent + * @memberof vtrpc.CallerID * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - StreamEvent.verify = function verify(message) { + CallerID.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.statements != null && message.hasOwnProperty("statements")) { - if (!Array.isArray(message.statements)) - return "statements: array expected"; - for (let i = 0; i < message.statements.length; ++i) { - let error = $root.query.StreamEvent.Statement.verify(message.statements[i]); - if (error) - return "statements." + error; - } - } - if (message.event_token != null && message.hasOwnProperty("event_token")) { - let error = $root.query.EventToken.verify(message.event_token); - if (error) - return "event_token." + error; + if (message.principal != null && message.hasOwnProperty("principal")) + if (!$util.isString(message.principal)) + return "principal: string expected"; + if (message.component != null && message.hasOwnProperty("component")) + if (!$util.isString(message.component)) + return "component: string expected"; + if (message.subcomponent != null && message.hasOwnProperty("subcomponent")) + if (!$util.isString(message.subcomponent)) + return "subcomponent: string expected"; + if (message.groups != null && message.hasOwnProperty("groups")) { + if (!Array.isArray(message.groups)) + return "groups: array expected"; + for (let i = 0; i < message.groups.length; ++i) + if (!$util.isString(message.groups[i])) + return "groups: string[] expected"; } return null; }; /** - * Creates a StreamEvent message from a plain object. Also converts values to their respective internal types. + * Creates a CallerID message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.StreamEvent + * @memberof vtrpc.CallerID * @static * @param {Object.} object Plain object - * @returns {query.StreamEvent} StreamEvent + * @returns {vtrpc.CallerID} CallerID */ - StreamEvent.fromObject = function fromObject(object) { - if (object instanceof $root.query.StreamEvent) + CallerID.fromObject = function fromObject(object) { + if (object instanceof $root.vtrpc.CallerID) return object; - let message = new $root.query.StreamEvent(); - if (object.statements) { - if (!Array.isArray(object.statements)) - throw TypeError(".query.StreamEvent.statements: array expected"); - message.statements = []; - for (let i = 0; i < object.statements.length; ++i) { - if (typeof object.statements[i] !== "object") - throw TypeError(".query.StreamEvent.statements: object expected"); - message.statements[i] = $root.query.StreamEvent.Statement.fromObject(object.statements[i]); - } - } - if (object.event_token != null) { - if (typeof object.event_token !== "object") - throw TypeError(".query.StreamEvent.event_token: object expected"); - message.event_token = $root.query.EventToken.fromObject(object.event_token); + let message = new $root.vtrpc.CallerID(); + if (object.principal != null) + message.principal = String(object.principal); + if (object.component != null) + message.component = String(object.component); + if (object.subcomponent != null) + message.subcomponent = String(object.subcomponent); + if (object.groups) { + if (!Array.isArray(object.groups)) + throw TypeError(".vtrpc.CallerID.groups: array expected"); + message.groups = []; + for (let i = 0; i < object.groups.length; ++i) + message.groups[i] = String(object.groups[i]); } return message; }; /** - * Creates a plain object from a StreamEvent message. Also converts values to other types if specified. + * Creates a plain object from a CallerID message. Also converts values to other types if specified. * @function toObject - * @memberof query.StreamEvent + * @memberof vtrpc.CallerID * @static - * @param {query.StreamEvent} message StreamEvent + * @param {vtrpc.CallerID} message CallerID * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - StreamEvent.toObject = function toObject(message, options) { + CallerID.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.arrays || options.defaults) - object.statements = []; - if (options.defaults) - object.event_token = null; - if (message.statements && message.statements.length) { - object.statements = []; - for (let j = 0; j < message.statements.length; ++j) - object.statements[j] = $root.query.StreamEvent.Statement.toObject(message.statements[j], options); + object.groups = []; + if (options.defaults) { + object.principal = ""; + object.component = ""; + object.subcomponent = ""; + } + if (message.principal != null && message.hasOwnProperty("principal")) + object.principal = message.principal; + if (message.component != null && message.hasOwnProperty("component")) + object.component = message.component; + if (message.subcomponent != null && message.hasOwnProperty("subcomponent")) + object.subcomponent = message.subcomponent; + if (message.groups && message.groups.length) { + object.groups = []; + for (let j = 0; j < message.groups.length; ++j) + object.groups[j] = message.groups[j]; } - if (message.event_token != null && message.hasOwnProperty("event_token")) - object.event_token = $root.query.EventToken.toObject(message.event_token, options); return object; }; /** - * Converts this StreamEvent to JSON. + * Converts this CallerID to JSON. * @function toJSON - * @memberof query.StreamEvent + * @memberof vtrpc.CallerID * @instance * @returns {Object.} JSON object */ - StreamEvent.prototype.toJSON = function toJSON() { + CallerID.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for StreamEvent + * Gets the default type url for CallerID * @function getTypeUrl - * @memberof query.StreamEvent + * @memberof vtrpc.CallerID * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - StreamEvent.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + CallerID.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.StreamEvent"; + return typeUrlPrefix + "/vtrpc.CallerID"; }; - StreamEvent.Statement = (function() { - - /** - * Properties of a Statement. - * @memberof query.StreamEvent - * @interface IStatement - * @property {query.StreamEvent.Statement.Category|null} [category] Statement category - * @property {string|null} [table_name] Statement table_name - * @property {Array.|null} [primary_key_fields] Statement primary_key_fields - * @property {Array.|null} [primary_key_values] Statement primary_key_values - * @property {Uint8Array|null} [sql] Statement sql - */ - - /** - * Constructs a new Statement. - * @memberof query.StreamEvent - * @classdesc Represents a Statement. - * @implements IStatement - * @constructor - * @param {query.StreamEvent.IStatement=} [properties] Properties to set - */ - function Statement(properties) { - this.primary_key_fields = []; - this.primary_key_values = []; - if (properties) - for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; - } - - /** - * Statement category. - * @member {query.StreamEvent.Statement.Category} category - * @memberof query.StreamEvent.Statement - * @instance - */ - Statement.prototype.category = 0; - - /** - * Statement table_name. - * @member {string} table_name - * @memberof query.StreamEvent.Statement - * @instance - */ - Statement.prototype.table_name = ""; + return CallerID; + })(); - /** - * Statement primary_key_fields. - * @member {Array.} primary_key_fields - * @memberof query.StreamEvent.Statement - * @instance - */ - Statement.prototype.primary_key_fields = $util.emptyArray; - - /** - * Statement primary_key_values. - * @member {Array.} primary_key_values - * @memberof query.StreamEvent.Statement - * @instance - */ - Statement.prototype.primary_key_values = $util.emptyArray; - - /** - * Statement sql. - * @member {Uint8Array} sql - * @memberof query.StreamEvent.Statement - * @instance - */ - Statement.prototype.sql = $util.newBuffer([]); - - /** - * Creates a new Statement instance using the specified properties. - * @function create - * @memberof query.StreamEvent.Statement - * @static - * @param {query.StreamEvent.IStatement=} [properties] Properties to set - * @returns {query.StreamEvent.Statement} Statement instance - */ - Statement.create = function create(properties) { - return new Statement(properties); - }; - - /** - * Encodes the specified Statement message. Does not implicitly {@link query.StreamEvent.Statement.verify|verify} messages. - * @function encode - * @memberof query.StreamEvent.Statement - * @static - * @param {query.StreamEvent.IStatement} message Statement message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - Statement.encode = function encode(message, writer) { - if (!writer) - writer = $Writer.create(); - if (message.category != null && Object.hasOwnProperty.call(message, "category")) - writer.uint32(/* id 1, wireType 0 =*/8).int32(message.category); - if (message.table_name != null && Object.hasOwnProperty.call(message, "table_name")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.table_name); - if (message.primary_key_fields != null && message.primary_key_fields.length) - for (let i = 0; i < message.primary_key_fields.length; ++i) - $root.query.Field.encode(message.primary_key_fields[i], writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.primary_key_values != null && message.primary_key_values.length) - for (let i = 0; i < message.primary_key_values.length; ++i) - $root.query.Row.encode(message.primary_key_values[i], writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); - if (message.sql != null && Object.hasOwnProperty.call(message, "sql")) - writer.uint32(/* id 5, wireType 2 =*/42).bytes(message.sql); - return writer; - }; - - /** - * Encodes the specified Statement message, length delimited. Does not implicitly {@link query.StreamEvent.Statement.verify|verify} messages. - * @function encodeDelimited - * @memberof query.StreamEvent.Statement - * @static - * @param {query.StreamEvent.IStatement} message Statement message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - Statement.encodeDelimited = function encodeDelimited(message, writer) { - return this.encode(message, writer).ldelim(); - }; - - /** - * Decodes a Statement message from the specified reader or buffer. - * @function decode - * @memberof query.StreamEvent.Statement - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @param {number} [length] Message length if known beforehand - * @returns {query.StreamEvent.Statement} Statement - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - Statement.decode = function decode(reader, length) { - if (!(reader instanceof $Reader)) - reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.StreamEvent.Statement(); - while (reader.pos < end) { - let tag = reader.uint32(); - switch (tag >>> 3) { - case 1: { - message.category = reader.int32(); - break; - } - case 2: { - message.table_name = reader.string(); - break; - } - case 3: { - if (!(message.primary_key_fields && message.primary_key_fields.length)) - message.primary_key_fields = []; - message.primary_key_fields.push($root.query.Field.decode(reader, reader.uint32())); - break; - } - case 4: { - if (!(message.primary_key_values && message.primary_key_values.length)) - message.primary_key_values = []; - message.primary_key_values.push($root.query.Row.decode(reader, reader.uint32())); - break; - } - case 5: { - message.sql = reader.bytes(); - break; - } - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }; - - /** - * Decodes a Statement message from the specified reader or buffer, length delimited. - * @function decodeDelimited - * @memberof query.StreamEvent.Statement - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.StreamEvent.Statement} Statement - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - Statement.decodeDelimited = function decodeDelimited(reader) { - if (!(reader instanceof $Reader)) - reader = new $Reader(reader); - return this.decode(reader, reader.uint32()); - }; - - /** - * Verifies a Statement message. - * @function verify - * @memberof query.StreamEvent.Statement - * @static - * @param {Object.} message Plain object to verify - * @returns {string|null} `null` if valid, otherwise the reason why it is not - */ - Statement.verify = function verify(message) { - if (typeof message !== "object" || message === null) - return "object expected"; - if (message.category != null && message.hasOwnProperty("category")) - switch (message.category) { - default: - return "category: enum value expected"; - case 0: - case 1: - case 2: - break; - } - if (message.table_name != null && message.hasOwnProperty("table_name")) - if (!$util.isString(message.table_name)) - return "table_name: string expected"; - if (message.primary_key_fields != null && message.hasOwnProperty("primary_key_fields")) { - if (!Array.isArray(message.primary_key_fields)) - return "primary_key_fields: array expected"; - for (let i = 0; i < message.primary_key_fields.length; ++i) { - let error = $root.query.Field.verify(message.primary_key_fields[i]); - if (error) - return "primary_key_fields." + error; - } - } - if (message.primary_key_values != null && message.hasOwnProperty("primary_key_values")) { - if (!Array.isArray(message.primary_key_values)) - return "primary_key_values: array expected"; - for (let i = 0; i < message.primary_key_values.length; ++i) { - let error = $root.query.Row.verify(message.primary_key_values[i]); - if (error) - return "primary_key_values." + error; - } - } - if (message.sql != null && message.hasOwnProperty("sql")) - if (!(message.sql && typeof message.sql.length === "number" || $util.isString(message.sql))) - return "sql: buffer expected"; - return null; - }; - - /** - * Creates a Statement message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof query.StreamEvent.Statement - * @static - * @param {Object.} object Plain object - * @returns {query.StreamEvent.Statement} Statement - */ - Statement.fromObject = function fromObject(object) { - if (object instanceof $root.query.StreamEvent.Statement) - return object; - let message = new $root.query.StreamEvent.Statement(); - switch (object.category) { - default: - if (typeof object.category === "number") { - message.category = object.category; - break; - } - break; - case "Error": - case 0: - message.category = 0; - break; - case "DML": - case 1: - message.category = 1; - break; - case "DDL": - case 2: - message.category = 2; - break; - } - if (object.table_name != null) - message.table_name = String(object.table_name); - if (object.primary_key_fields) { - if (!Array.isArray(object.primary_key_fields)) - throw TypeError(".query.StreamEvent.Statement.primary_key_fields: array expected"); - message.primary_key_fields = []; - for (let i = 0; i < object.primary_key_fields.length; ++i) { - if (typeof object.primary_key_fields[i] !== "object") - throw TypeError(".query.StreamEvent.Statement.primary_key_fields: object expected"); - message.primary_key_fields[i] = $root.query.Field.fromObject(object.primary_key_fields[i]); - } - } - if (object.primary_key_values) { - if (!Array.isArray(object.primary_key_values)) - throw TypeError(".query.StreamEvent.Statement.primary_key_values: array expected"); - message.primary_key_values = []; - for (let i = 0; i < object.primary_key_values.length; ++i) { - if (typeof object.primary_key_values[i] !== "object") - throw TypeError(".query.StreamEvent.Statement.primary_key_values: object expected"); - message.primary_key_values[i] = $root.query.Row.fromObject(object.primary_key_values[i]); - } - } - if (object.sql != null) - if (typeof object.sql === "string") - $util.base64.decode(object.sql, message.sql = $util.newBuffer($util.base64.length(object.sql)), 0); - else if (object.sql.length >= 0) - message.sql = object.sql; - return message; - }; - - /** - * Creates a plain object from a Statement message. Also converts values to other types if specified. - * @function toObject - * @memberof query.StreamEvent.Statement - * @static - * @param {query.StreamEvent.Statement} message Statement - * @param {$protobuf.IConversionOptions} [options] Conversion options - * @returns {Object.} Plain object - */ - Statement.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.arrays || options.defaults) { - object.primary_key_fields = []; - object.primary_key_values = []; - } - if (options.defaults) { - object.category = options.enums === String ? "Error" : 0; - object.table_name = ""; - if (options.bytes === String) - object.sql = ""; - else { - object.sql = []; - if (options.bytes !== Array) - object.sql = $util.newBuffer(object.sql); - } - } - if (message.category != null && message.hasOwnProperty("category")) - object.category = options.enums === String ? $root.query.StreamEvent.Statement.Category[message.category] === undefined ? message.category : $root.query.StreamEvent.Statement.Category[message.category] : message.category; - if (message.table_name != null && message.hasOwnProperty("table_name")) - object.table_name = message.table_name; - if (message.primary_key_fields && message.primary_key_fields.length) { - object.primary_key_fields = []; - for (let j = 0; j < message.primary_key_fields.length; ++j) - object.primary_key_fields[j] = $root.query.Field.toObject(message.primary_key_fields[j], options); - } - if (message.primary_key_values && message.primary_key_values.length) { - object.primary_key_values = []; - for (let j = 0; j < message.primary_key_values.length; ++j) - object.primary_key_values[j] = $root.query.Row.toObject(message.primary_key_values[j], options); - } - if (message.sql != null && message.hasOwnProperty("sql")) - object.sql = options.bytes === String ? $util.base64.encode(message.sql, 0, message.sql.length) : options.bytes === Array ? Array.prototype.slice.call(message.sql) : message.sql; - return object; - }; - - /** - * Converts this Statement to JSON. - * @function toJSON - * @memberof query.StreamEvent.Statement - * @instance - * @returns {Object.} JSON object - */ - Statement.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; - - /** - * Gets the default type url for Statement - * @function getTypeUrl - * @memberof query.StreamEvent.Statement - * @static - * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns {string} The default type url - */ - Statement.getTypeUrl = function getTypeUrl(typeUrlPrefix) { - if (typeUrlPrefix === undefined) { - typeUrlPrefix = "type.googleapis.com"; - } - return typeUrlPrefix + "/query.StreamEvent.Statement"; - }; - - /** - * Category enum. - * @name query.StreamEvent.Statement.Category - * @enum {number} - * @property {number} Error=0 Error value - * @property {number} DML=1 DML value - * @property {number} DDL=2 DDL value - */ - Statement.Category = (function() { - const valuesById = {}, values = Object.create(valuesById); - values[valuesById[0] = "Error"] = 0; - values[valuesById[1] = "DML"] = 1; - values[valuesById[2] = "DDL"] = 2; - return values; - })(); - - return Statement; - })(); - - return StreamEvent; + /** + * Code enum. + * @name vtrpc.Code + * @enum {number} + * @property {number} OK=0 OK value + * @property {number} CANCELED=1 CANCELED value + * @property {number} UNKNOWN=2 UNKNOWN value + * @property {number} INVALID_ARGUMENT=3 INVALID_ARGUMENT value + * @property {number} DEADLINE_EXCEEDED=4 DEADLINE_EXCEEDED value + * @property {number} NOT_FOUND=5 NOT_FOUND value + * @property {number} ALREADY_EXISTS=6 ALREADY_EXISTS value + * @property {number} PERMISSION_DENIED=7 PERMISSION_DENIED value + * @property {number} RESOURCE_EXHAUSTED=8 RESOURCE_EXHAUSTED value + * @property {number} FAILED_PRECONDITION=9 FAILED_PRECONDITION value + * @property {number} ABORTED=10 ABORTED value + * @property {number} OUT_OF_RANGE=11 OUT_OF_RANGE value + * @property {number} UNIMPLEMENTED=12 UNIMPLEMENTED value + * @property {number} INTERNAL=13 INTERNAL value + * @property {number} UNAVAILABLE=14 UNAVAILABLE value + * @property {number} DATA_LOSS=15 DATA_LOSS value + * @property {number} UNAUTHENTICATED=16 UNAUTHENTICATED value + * @property {number} CLUSTER_EVENT=17 CLUSTER_EVENT value + * @property {number} READ_ONLY=18 READ_ONLY value + */ + vtrpc.Code = (function() { + const valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "OK"] = 0; + values[valuesById[1] = "CANCELED"] = 1; + values[valuesById[2] = "UNKNOWN"] = 2; + values[valuesById[3] = "INVALID_ARGUMENT"] = 3; + values[valuesById[4] = "DEADLINE_EXCEEDED"] = 4; + values[valuesById[5] = "NOT_FOUND"] = 5; + values[valuesById[6] = "ALREADY_EXISTS"] = 6; + values[valuesById[7] = "PERMISSION_DENIED"] = 7; + values[valuesById[8] = "RESOURCE_EXHAUSTED"] = 8; + values[valuesById[9] = "FAILED_PRECONDITION"] = 9; + values[valuesById[10] = "ABORTED"] = 10; + values[valuesById[11] = "OUT_OF_RANGE"] = 11; + values[valuesById[12] = "UNIMPLEMENTED"] = 12; + values[valuesById[13] = "INTERNAL"] = 13; + values[valuesById[14] = "UNAVAILABLE"] = 14; + values[valuesById[15] = "DATA_LOSS"] = 15; + values[valuesById[16] = "UNAUTHENTICATED"] = 16; + values[valuesById[17] = "CLUSTER_EVENT"] = 17; + values[valuesById[18] = "READ_ONLY"] = 18; + return values; })(); - query.ExecuteRequest = (function() { + vtrpc.RPCError = (function() { /** - * Properties of an ExecuteRequest. - * @memberof query - * @interface IExecuteRequest - * @property {vtrpc.ICallerID|null} [effective_caller_id] ExecuteRequest effective_caller_id - * @property {query.IVTGateCallerID|null} [immediate_caller_id] ExecuteRequest immediate_caller_id - * @property {query.ITarget|null} [target] ExecuteRequest target - * @property {query.IBoundQuery|null} [query] ExecuteRequest query - * @property {number|Long|null} [transaction_id] ExecuteRequest transaction_id - * @property {query.IExecuteOptions|null} [options] ExecuteRequest options - * @property {number|Long|null} [reserved_id] ExecuteRequest reserved_id + * Properties of a RPCError. + * @memberof vtrpc + * @interface IRPCError + * @property {string|null} [message] RPCError message + * @property {vtrpc.Code|null} [code] RPCError code */ /** - * Constructs a new ExecuteRequest. - * @memberof query - * @classdesc Represents an ExecuteRequest. - * @implements IExecuteRequest + * Constructs a new RPCError. + * @memberof vtrpc + * @classdesc Represents a RPCError. + * @implements IRPCError * @constructor - * @param {query.IExecuteRequest=} [properties] Properties to set + * @param {vtrpc.IRPCError=} [properties] Properties to set */ - function ExecuteRequest(properties) { + function RPCError(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -76466,159 +76879,89 @@ export const query = $root.query = (() => { } /** - * ExecuteRequest effective_caller_id. - * @member {vtrpc.ICallerID|null|undefined} effective_caller_id - * @memberof query.ExecuteRequest - * @instance - */ - ExecuteRequest.prototype.effective_caller_id = null; - - /** - * ExecuteRequest immediate_caller_id. - * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id - * @memberof query.ExecuteRequest - * @instance - */ - ExecuteRequest.prototype.immediate_caller_id = null; - - /** - * ExecuteRequest target. - * @member {query.ITarget|null|undefined} target - * @memberof query.ExecuteRequest - * @instance - */ - ExecuteRequest.prototype.target = null; - - /** - * ExecuteRequest query. - * @member {query.IBoundQuery|null|undefined} query - * @memberof query.ExecuteRequest - * @instance - */ - ExecuteRequest.prototype.query = null; - - /** - * ExecuteRequest transaction_id. - * @member {number|Long} transaction_id - * @memberof query.ExecuteRequest - * @instance - */ - ExecuteRequest.prototype.transaction_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; - - /** - * ExecuteRequest options. - * @member {query.IExecuteOptions|null|undefined} options - * @memberof query.ExecuteRequest + * RPCError message. + * @member {string} message + * @memberof vtrpc.RPCError * @instance */ - ExecuteRequest.prototype.options = null; + RPCError.prototype.message = ""; /** - * ExecuteRequest reserved_id. - * @member {number|Long} reserved_id - * @memberof query.ExecuteRequest + * RPCError code. + * @member {vtrpc.Code} code + * @memberof vtrpc.RPCError * @instance */ - ExecuteRequest.prototype.reserved_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + RPCError.prototype.code = 0; /** - * Creates a new ExecuteRequest instance using the specified properties. + * Creates a new RPCError instance using the specified properties. * @function create - * @memberof query.ExecuteRequest + * @memberof vtrpc.RPCError * @static - * @param {query.IExecuteRequest=} [properties] Properties to set - * @returns {query.ExecuteRequest} ExecuteRequest instance + * @param {vtrpc.IRPCError=} [properties] Properties to set + * @returns {vtrpc.RPCError} RPCError instance */ - ExecuteRequest.create = function create(properties) { - return new ExecuteRequest(properties); + RPCError.create = function create(properties) { + return new RPCError(properties); }; /** - * Encodes the specified ExecuteRequest message. Does not implicitly {@link query.ExecuteRequest.verify|verify} messages. + * Encodes the specified RPCError message. Does not implicitly {@link vtrpc.RPCError.verify|verify} messages. * @function encode - * @memberof query.ExecuteRequest + * @memberof vtrpc.RPCError * @static - * @param {query.IExecuteRequest} message ExecuteRequest message or plain object to encode + * @param {vtrpc.IRPCError} message RPCError message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ExecuteRequest.encode = function encode(message, writer) { + RPCError.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) - $root.vtrpc.CallerID.encode(message.effective_caller_id, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.immediate_caller_id != null && Object.hasOwnProperty.call(message, "immediate_caller_id")) - $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.target != null && Object.hasOwnProperty.call(message, "target")) - $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.query != null && Object.hasOwnProperty.call(message, "query")) - $root.query.BoundQuery.encode(message.query, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); - if (message.transaction_id != null && Object.hasOwnProperty.call(message, "transaction_id")) - writer.uint32(/* id 5, wireType 0 =*/40).int64(message.transaction_id); - if (message.options != null && Object.hasOwnProperty.call(message, "options")) - $root.query.ExecuteOptions.encode(message.options, writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); - if (message.reserved_id != null && Object.hasOwnProperty.call(message, "reserved_id")) - writer.uint32(/* id 7, wireType 0 =*/56).int64(message.reserved_id); + if (message.message != null && Object.hasOwnProperty.call(message, "message")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.message); + if (message.code != null && Object.hasOwnProperty.call(message, "code")) + writer.uint32(/* id 3, wireType 0 =*/24).int32(message.code); return writer; }; /** - * Encodes the specified ExecuteRequest message, length delimited. Does not implicitly {@link query.ExecuteRequest.verify|verify} messages. + * Encodes the specified RPCError message, length delimited. Does not implicitly {@link vtrpc.RPCError.verify|verify} messages. * @function encodeDelimited - * @memberof query.ExecuteRequest + * @memberof vtrpc.RPCError * @static - * @param {query.IExecuteRequest} message ExecuteRequest message or plain object to encode + * @param {vtrpc.IRPCError} message RPCError message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ExecuteRequest.encodeDelimited = function encodeDelimited(message, writer) { + RPCError.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an ExecuteRequest message from the specified reader or buffer. + * Decodes a RPCError message from the specified reader or buffer. * @function decode - * @memberof query.ExecuteRequest + * @memberof vtrpc.RPCError * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.ExecuteRequest} ExecuteRequest + * @returns {vtrpc.RPCError} RPCError * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ExecuteRequest.decode = function decode(reader, length) { + RPCError.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.ExecuteRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtrpc.RPCError(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - message.effective_caller_id = $root.vtrpc.CallerID.decode(reader, reader.uint32()); - break; - } case 2: { - message.immediate_caller_id = $root.query.VTGateCallerID.decode(reader, reader.uint32()); + message.message = reader.string(); break; } case 3: { - message.target = $root.query.Target.decode(reader, reader.uint32()); - break; - } - case 4: { - message.query = $root.query.BoundQuery.decode(reader, reader.uint32()); - break; - } - case 5: { - message.transaction_id = reader.int64(); - break; - } - case 6: { - message.options = $root.query.ExecuteOptions.decode(reader, reader.uint32()); - break; - } - case 7: { - message.reserved_id = reader.int64(); + message.code = reader.int32(); break; } default: @@ -76630,224 +76973,250 @@ export const query = $root.query = (() => { }; /** - * Decodes an ExecuteRequest message from the specified reader or buffer, length delimited. + * Decodes a RPCError message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.ExecuteRequest + * @memberof vtrpc.RPCError * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.ExecuteRequest} ExecuteRequest + * @returns {vtrpc.RPCError} RPCError * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ExecuteRequest.decodeDelimited = function decodeDelimited(reader) { + RPCError.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an ExecuteRequest message. + * Verifies a RPCError message. * @function verify - * @memberof query.ExecuteRequest + * @memberof vtrpc.RPCError * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ExecuteRequest.verify = function verify(message) { + RPCError.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { - let error = $root.vtrpc.CallerID.verify(message.effective_caller_id); - if (error) - return "effective_caller_id." + error; - } - if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) { - let error = $root.query.VTGateCallerID.verify(message.immediate_caller_id); - if (error) - return "immediate_caller_id." + error; - } - if (message.target != null && message.hasOwnProperty("target")) { - let error = $root.query.Target.verify(message.target); - if (error) - return "target." + error; - } - if (message.query != null && message.hasOwnProperty("query")) { - let error = $root.query.BoundQuery.verify(message.query); - if (error) - return "query." + error; - } - if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) - if (!$util.isInteger(message.transaction_id) && !(message.transaction_id && $util.isInteger(message.transaction_id.low) && $util.isInteger(message.transaction_id.high))) - return "transaction_id: integer|Long expected"; - if (message.options != null && message.hasOwnProperty("options")) { - let error = $root.query.ExecuteOptions.verify(message.options); - if (error) - return "options." + error; - } - if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) - if (!$util.isInteger(message.reserved_id) && !(message.reserved_id && $util.isInteger(message.reserved_id.low) && $util.isInteger(message.reserved_id.high))) - return "reserved_id: integer|Long expected"; + if (message.message != null && message.hasOwnProperty("message")) + if (!$util.isString(message.message)) + return "message: string expected"; + if (message.code != null && message.hasOwnProperty("code")) + switch (message.code) { + default: + return "code: enum value expected"; + case 0: + case 1: + case 2: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + case 9: + case 10: + case 11: + case 12: + case 13: + case 14: + case 15: + case 16: + case 17: + case 18: + break; + } return null; }; /** - * Creates an ExecuteRequest message from a plain object. Also converts values to their respective internal types. + * Creates a RPCError message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.ExecuteRequest + * @memberof vtrpc.RPCError * @static * @param {Object.} object Plain object - * @returns {query.ExecuteRequest} ExecuteRequest + * @returns {vtrpc.RPCError} RPCError */ - ExecuteRequest.fromObject = function fromObject(object) { - if (object instanceof $root.query.ExecuteRequest) + RPCError.fromObject = function fromObject(object) { + if (object instanceof $root.vtrpc.RPCError) return object; - let message = new $root.query.ExecuteRequest(); - if (object.effective_caller_id != null) { - if (typeof object.effective_caller_id !== "object") - throw TypeError(".query.ExecuteRequest.effective_caller_id: object expected"); - message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); - } - if (object.immediate_caller_id != null) { - if (typeof object.immediate_caller_id !== "object") - throw TypeError(".query.ExecuteRequest.immediate_caller_id: object expected"); - message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); - } - if (object.target != null) { - if (typeof object.target !== "object") - throw TypeError(".query.ExecuteRequest.target: object expected"); - message.target = $root.query.Target.fromObject(object.target); - } - if (object.query != null) { - if (typeof object.query !== "object") - throw TypeError(".query.ExecuteRequest.query: object expected"); - message.query = $root.query.BoundQuery.fromObject(object.query); - } - if (object.transaction_id != null) - if ($util.Long) - (message.transaction_id = $util.Long.fromValue(object.transaction_id)).unsigned = false; - else if (typeof object.transaction_id === "string") - message.transaction_id = parseInt(object.transaction_id, 10); - else if (typeof object.transaction_id === "number") - message.transaction_id = object.transaction_id; - else if (typeof object.transaction_id === "object") - message.transaction_id = new $util.LongBits(object.transaction_id.low >>> 0, object.transaction_id.high >>> 0).toNumber(); - if (object.options != null) { - if (typeof object.options !== "object") - throw TypeError(".query.ExecuteRequest.options: object expected"); - message.options = $root.query.ExecuteOptions.fromObject(object.options); + let message = new $root.vtrpc.RPCError(); + if (object.message != null) + message.message = String(object.message); + switch (object.code) { + default: + if (typeof object.code === "number") { + message.code = object.code; + break; + } + break; + case "OK": + case 0: + message.code = 0; + break; + case "CANCELED": + case 1: + message.code = 1; + break; + case "UNKNOWN": + case 2: + message.code = 2; + break; + case "INVALID_ARGUMENT": + case 3: + message.code = 3; + break; + case "DEADLINE_EXCEEDED": + case 4: + message.code = 4; + break; + case "NOT_FOUND": + case 5: + message.code = 5; + break; + case "ALREADY_EXISTS": + case 6: + message.code = 6; + break; + case "PERMISSION_DENIED": + case 7: + message.code = 7; + break; + case "RESOURCE_EXHAUSTED": + case 8: + message.code = 8; + break; + case "FAILED_PRECONDITION": + case 9: + message.code = 9; + break; + case "ABORTED": + case 10: + message.code = 10; + break; + case "OUT_OF_RANGE": + case 11: + message.code = 11; + break; + case "UNIMPLEMENTED": + case 12: + message.code = 12; + break; + case "INTERNAL": + case 13: + message.code = 13; + break; + case "UNAVAILABLE": + case 14: + message.code = 14; + break; + case "DATA_LOSS": + case 15: + message.code = 15; + break; + case "UNAUTHENTICATED": + case 16: + message.code = 16; + break; + case "CLUSTER_EVENT": + case 17: + message.code = 17; + break; + case "READ_ONLY": + case 18: + message.code = 18; + break; } - if (object.reserved_id != null) - if ($util.Long) - (message.reserved_id = $util.Long.fromValue(object.reserved_id)).unsigned = false; - else if (typeof object.reserved_id === "string") - message.reserved_id = parseInt(object.reserved_id, 10); - else if (typeof object.reserved_id === "number") - message.reserved_id = object.reserved_id; - else if (typeof object.reserved_id === "object") - message.reserved_id = new $util.LongBits(object.reserved_id.low >>> 0, object.reserved_id.high >>> 0).toNumber(); return message; }; /** - * Creates a plain object from an ExecuteRequest message. Also converts values to other types if specified. + * Creates a plain object from a RPCError message. Also converts values to other types if specified. * @function toObject - * @memberof query.ExecuteRequest + * @memberof vtrpc.RPCError * @static - * @param {query.ExecuteRequest} message ExecuteRequest + * @param {vtrpc.RPCError} message RPCError * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ExecuteRequest.toObject = function toObject(message, options) { + RPCError.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) { - object.effective_caller_id = null; - object.immediate_caller_id = null; - object.target = null; - object.query = null; - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.transaction_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.transaction_id = options.longs === String ? "0" : 0; - object.options = null; - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.reserved_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.reserved_id = options.longs === String ? "0" : 0; + object.message = ""; + object.code = options.enums === String ? "OK" : 0; } - if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) - object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); - if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) - object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); - if (message.target != null && message.hasOwnProperty("target")) - object.target = $root.query.Target.toObject(message.target, options); - if (message.query != null && message.hasOwnProperty("query")) - object.query = $root.query.BoundQuery.toObject(message.query, options); - if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) - if (typeof message.transaction_id === "number") - object.transaction_id = options.longs === String ? String(message.transaction_id) : message.transaction_id; - else - object.transaction_id = options.longs === String ? $util.Long.prototype.toString.call(message.transaction_id) : options.longs === Number ? new $util.LongBits(message.transaction_id.low >>> 0, message.transaction_id.high >>> 0).toNumber() : message.transaction_id; - if (message.options != null && message.hasOwnProperty("options")) - object.options = $root.query.ExecuteOptions.toObject(message.options, options); - if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) - if (typeof message.reserved_id === "number") - object.reserved_id = options.longs === String ? String(message.reserved_id) : message.reserved_id; - else - object.reserved_id = options.longs === String ? $util.Long.prototype.toString.call(message.reserved_id) : options.longs === Number ? new $util.LongBits(message.reserved_id.low >>> 0, message.reserved_id.high >>> 0).toNumber() : message.reserved_id; + if (message.message != null && message.hasOwnProperty("message")) + object.message = message.message; + if (message.code != null && message.hasOwnProperty("code")) + object.code = options.enums === String ? $root.vtrpc.Code[message.code] === undefined ? message.code : $root.vtrpc.Code[message.code] : message.code; return object; }; /** - * Converts this ExecuteRequest to JSON. + * Converts this RPCError to JSON. * @function toJSON - * @memberof query.ExecuteRequest + * @memberof vtrpc.RPCError * @instance * @returns {Object.} JSON object */ - ExecuteRequest.prototype.toJSON = function toJSON() { + RPCError.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ExecuteRequest + * Gets the default type url for RPCError * @function getTypeUrl - * @memberof query.ExecuteRequest + * @memberof vtrpc.RPCError * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ExecuteRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + RPCError.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.ExecuteRequest"; + return typeUrlPrefix + "/vtrpc.RPCError"; }; - return ExecuteRequest; + return RPCError; })(); - query.ExecuteResponse = (function() { + return vtrpc; +})(); + +export const query = $root.query = (() => { + + /** + * Namespace query. + * @exports query + * @namespace + */ + const query = {}; + + query.Target = (function() { /** - * Properties of an ExecuteResponse. + * Properties of a Target. * @memberof query - * @interface IExecuteResponse - * @property {query.IQueryResult|null} [result] ExecuteResponse result + * @interface ITarget + * @property {string|null} [keyspace] Target keyspace + * @property {string|null} [shard] Target shard + * @property {topodata.TabletType|null} [tablet_type] Target tablet_type + * @property {string|null} [cell] Target cell */ /** - * Constructs a new ExecuteResponse. + * Constructs a new Target. * @memberof query - * @classdesc Represents an ExecuteResponse. - * @implements IExecuteResponse + * @classdesc Represents a Target. + * @implements ITarget * @constructor - * @param {query.IExecuteResponse=} [properties] Properties to set + * @param {query.ITarget=} [properties] Properties to set */ - function ExecuteResponse(properties) { + function Target(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -76855,75 +77224,117 @@ export const query = $root.query = (() => { } /** - * ExecuteResponse result. - * @member {query.IQueryResult|null|undefined} result - * @memberof query.ExecuteResponse + * Target keyspace. + * @member {string} keyspace + * @memberof query.Target * @instance */ - ExecuteResponse.prototype.result = null; + Target.prototype.keyspace = ""; /** - * Creates a new ExecuteResponse instance using the specified properties. + * Target shard. + * @member {string} shard + * @memberof query.Target + * @instance + */ + Target.prototype.shard = ""; + + /** + * Target tablet_type. + * @member {topodata.TabletType} tablet_type + * @memberof query.Target + * @instance + */ + Target.prototype.tablet_type = 0; + + /** + * Target cell. + * @member {string} cell + * @memberof query.Target + * @instance + */ + Target.prototype.cell = ""; + + /** + * Creates a new Target instance using the specified properties. * @function create - * @memberof query.ExecuteResponse + * @memberof query.Target * @static - * @param {query.IExecuteResponse=} [properties] Properties to set - * @returns {query.ExecuteResponse} ExecuteResponse instance + * @param {query.ITarget=} [properties] Properties to set + * @returns {query.Target} Target instance */ - ExecuteResponse.create = function create(properties) { - return new ExecuteResponse(properties); + Target.create = function create(properties) { + return new Target(properties); }; /** - * Encodes the specified ExecuteResponse message. Does not implicitly {@link query.ExecuteResponse.verify|verify} messages. + * Encodes the specified Target message. Does not implicitly {@link query.Target.verify|verify} messages. * @function encode - * @memberof query.ExecuteResponse + * @memberof query.Target * @static - * @param {query.IExecuteResponse} message ExecuteResponse message or plain object to encode + * @param {query.ITarget} message Target message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ExecuteResponse.encode = function encode(message, writer) { + Target.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.result != null && Object.hasOwnProperty.call(message, "result")) - $root.query.QueryResult.encode(message.result, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); + if (message.tablet_type != null && Object.hasOwnProperty.call(message, "tablet_type")) + writer.uint32(/* id 3, wireType 0 =*/24).int32(message.tablet_type); + if (message.cell != null && Object.hasOwnProperty.call(message, "cell")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.cell); return writer; }; /** - * Encodes the specified ExecuteResponse message, length delimited. Does not implicitly {@link query.ExecuteResponse.verify|verify} messages. + * Encodes the specified Target message, length delimited. Does not implicitly {@link query.Target.verify|verify} messages. * @function encodeDelimited - * @memberof query.ExecuteResponse + * @memberof query.Target * @static - * @param {query.IExecuteResponse} message ExecuteResponse message or plain object to encode + * @param {query.ITarget} message Target message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ExecuteResponse.encodeDelimited = function encodeDelimited(message, writer) { + Target.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an ExecuteResponse message from the specified reader or buffer. + * Decodes a Target message from the specified reader or buffer. * @function decode - * @memberof query.ExecuteResponse + * @memberof query.Target * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.ExecuteResponse} ExecuteResponse + * @returns {query.Target} Target * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ExecuteResponse.decode = function decode(reader, length) { + Target.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.ExecuteResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.Target(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.result = $root.query.QueryResult.decode(reader, reader.uint32()); + message.keyspace = reader.string(); + break; + } + case 2: { + message.shard = reader.string(); + break; + } + case 3: { + message.tablet_type = reader.int32(); + break; + } + case 4: { + message.cell = reader.string(); break; } default: @@ -76935,128 +77346,213 @@ export const query = $root.query = (() => { }; /** - * Decodes an ExecuteResponse message from the specified reader or buffer, length delimited. + * Decodes a Target message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.ExecuteResponse + * @memberof query.Target * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.ExecuteResponse} ExecuteResponse + * @returns {query.Target} Target * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ExecuteResponse.decodeDelimited = function decodeDelimited(reader) { + Target.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an ExecuteResponse message. + * Verifies a Target message. * @function verify - * @memberof query.ExecuteResponse + * @memberof query.Target * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ExecuteResponse.verify = function verify(message) { + Target.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.result != null && message.hasOwnProperty("result")) { - let error = $root.query.QueryResult.verify(message.result); - if (error) - return "result." + error; - } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.shard != null && message.hasOwnProperty("shard")) + if (!$util.isString(message.shard)) + return "shard: string expected"; + if (message.tablet_type != null && message.hasOwnProperty("tablet_type")) + switch (message.tablet_type) { + default: + return "tablet_type: enum value expected"; + case 0: + case 1: + case 1: + case 2: + case 3: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + break; + } + if (message.cell != null && message.hasOwnProperty("cell")) + if (!$util.isString(message.cell)) + return "cell: string expected"; return null; }; /** - * Creates an ExecuteResponse message from a plain object. Also converts values to their respective internal types. + * Creates a Target message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.ExecuteResponse + * @memberof query.Target * @static * @param {Object.} object Plain object - * @returns {query.ExecuteResponse} ExecuteResponse + * @returns {query.Target} Target */ - ExecuteResponse.fromObject = function fromObject(object) { - if (object instanceof $root.query.ExecuteResponse) + Target.fromObject = function fromObject(object) { + if (object instanceof $root.query.Target) return object; - let message = new $root.query.ExecuteResponse(); - if (object.result != null) { - if (typeof object.result !== "object") - throw TypeError(".query.ExecuteResponse.result: object expected"); - message.result = $root.query.QueryResult.fromObject(object.result); - } - return message; - }; - - /** - * Creates a plain object from an ExecuteResponse message. Also converts values to other types if specified. - * @function toObject - * @memberof query.ExecuteResponse - * @static - * @param {query.ExecuteResponse} message ExecuteResponse - * @param {$protobuf.IConversionOptions} [options] Conversion options + let message = new $root.query.Target(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.shard != null) + message.shard = String(object.shard); + switch (object.tablet_type) { + default: + if (typeof object.tablet_type === "number") { + message.tablet_type = object.tablet_type; + break; + } + break; + case "UNKNOWN": + case 0: + message.tablet_type = 0; + break; + case "PRIMARY": + case 1: + message.tablet_type = 1; + break; + case "MASTER": + case 1: + message.tablet_type = 1; + break; + case "REPLICA": + case 2: + message.tablet_type = 2; + break; + case "RDONLY": + case 3: + message.tablet_type = 3; + break; + case "BATCH": + case 3: + message.tablet_type = 3; + break; + case "SPARE": + case 4: + message.tablet_type = 4; + break; + case "EXPERIMENTAL": + case 5: + message.tablet_type = 5; + break; + case "BACKUP": + case 6: + message.tablet_type = 6; + break; + case "RESTORE": + case 7: + message.tablet_type = 7; + break; + case "DRAINED": + case 8: + message.tablet_type = 8; + break; + } + if (object.cell != null) + message.cell = String(object.cell); + return message; + }; + + /** + * Creates a plain object from a Target message. Also converts values to other types if specified. + * @function toObject + * @memberof query.Target + * @static + * @param {query.Target} message Target + * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ExecuteResponse.toObject = function toObject(message, options) { + Target.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) - object.result = null; - if (message.result != null && message.hasOwnProperty("result")) - object.result = $root.query.QueryResult.toObject(message.result, options); + if (options.defaults) { + object.keyspace = ""; + object.shard = ""; + object.tablet_type = options.enums === String ? "UNKNOWN" : 0; + object.cell = ""; + } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = message.shard; + if (message.tablet_type != null && message.hasOwnProperty("tablet_type")) + object.tablet_type = options.enums === String ? $root.topodata.TabletType[message.tablet_type] === undefined ? message.tablet_type : $root.topodata.TabletType[message.tablet_type] : message.tablet_type; + if (message.cell != null && message.hasOwnProperty("cell")) + object.cell = message.cell; return object; }; /** - * Converts this ExecuteResponse to JSON. + * Converts this Target to JSON. * @function toJSON - * @memberof query.ExecuteResponse + * @memberof query.Target * @instance * @returns {Object.} JSON object */ - ExecuteResponse.prototype.toJSON = function toJSON() { + Target.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ExecuteResponse + * Gets the default type url for Target * @function getTypeUrl - * @memberof query.ExecuteResponse + * @memberof query.Target * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ExecuteResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + Target.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.ExecuteResponse"; + return typeUrlPrefix + "/query.Target"; }; - return ExecuteResponse; + return Target; })(); - query.ResultWithError = (function() { + query.VTGateCallerID = (function() { /** - * Properties of a ResultWithError. + * Properties of a VTGateCallerID. * @memberof query - * @interface IResultWithError - * @property {vtrpc.IRPCError|null} [error] ResultWithError error - * @property {query.IQueryResult|null} [result] ResultWithError result + * @interface IVTGateCallerID + * @property {string|null} [username] VTGateCallerID username + * @property {Array.|null} [groups] VTGateCallerID groups */ /** - * Constructs a new ResultWithError. + * Constructs a new VTGateCallerID. * @memberof query - * @classdesc Represents a ResultWithError. - * @implements IResultWithError + * @classdesc Represents a VTGateCallerID. + * @implements IVTGateCallerID * @constructor - * @param {query.IResultWithError=} [properties] Properties to set + * @param {query.IVTGateCallerID=} [properties] Properties to set */ - function ResultWithError(properties) { + function VTGateCallerID(properties) { + this.groups = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -77064,89 +77560,92 @@ export const query = $root.query = (() => { } /** - * ResultWithError error. - * @member {vtrpc.IRPCError|null|undefined} error - * @memberof query.ResultWithError + * VTGateCallerID username. + * @member {string} username + * @memberof query.VTGateCallerID * @instance */ - ResultWithError.prototype.error = null; + VTGateCallerID.prototype.username = ""; /** - * ResultWithError result. - * @member {query.IQueryResult|null|undefined} result - * @memberof query.ResultWithError + * VTGateCallerID groups. + * @member {Array.} groups + * @memberof query.VTGateCallerID * @instance */ - ResultWithError.prototype.result = null; + VTGateCallerID.prototype.groups = $util.emptyArray; /** - * Creates a new ResultWithError instance using the specified properties. + * Creates a new VTGateCallerID instance using the specified properties. * @function create - * @memberof query.ResultWithError + * @memberof query.VTGateCallerID * @static - * @param {query.IResultWithError=} [properties] Properties to set - * @returns {query.ResultWithError} ResultWithError instance + * @param {query.IVTGateCallerID=} [properties] Properties to set + * @returns {query.VTGateCallerID} VTGateCallerID instance */ - ResultWithError.create = function create(properties) { - return new ResultWithError(properties); + VTGateCallerID.create = function create(properties) { + return new VTGateCallerID(properties); }; /** - * Encodes the specified ResultWithError message. Does not implicitly {@link query.ResultWithError.verify|verify} messages. + * Encodes the specified VTGateCallerID message. Does not implicitly {@link query.VTGateCallerID.verify|verify} messages. * @function encode - * @memberof query.ResultWithError + * @memberof query.VTGateCallerID * @static - * @param {query.IResultWithError} message ResultWithError message or plain object to encode + * @param {query.IVTGateCallerID} message VTGateCallerID message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ResultWithError.encode = function encode(message, writer) { + VTGateCallerID.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.error != null && Object.hasOwnProperty.call(message, "error")) - $root.vtrpc.RPCError.encode(message.error, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.result != null && Object.hasOwnProperty.call(message, "result")) - $root.query.QueryResult.encode(message.result, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.username != null && Object.hasOwnProperty.call(message, "username")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.username); + if (message.groups != null && message.groups.length) + for (let i = 0; i < message.groups.length; ++i) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.groups[i]); return writer; }; /** - * Encodes the specified ResultWithError message, length delimited. Does not implicitly {@link query.ResultWithError.verify|verify} messages. + * Encodes the specified VTGateCallerID message, length delimited. Does not implicitly {@link query.VTGateCallerID.verify|verify} messages. * @function encodeDelimited - * @memberof query.ResultWithError + * @memberof query.VTGateCallerID * @static - * @param {query.IResultWithError} message ResultWithError message or plain object to encode + * @param {query.IVTGateCallerID} message VTGateCallerID message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ResultWithError.encodeDelimited = function encodeDelimited(message, writer) { + VTGateCallerID.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ResultWithError message from the specified reader or buffer. + * Decodes a VTGateCallerID message from the specified reader or buffer. * @function decode - * @memberof query.ResultWithError + * @memberof query.VTGateCallerID * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.ResultWithError} ResultWithError + * @returns {query.VTGateCallerID} VTGateCallerID * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ResultWithError.decode = function decode(reader, length) { + VTGateCallerID.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.ResultWithError(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.VTGateCallerID(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.error = $root.vtrpc.RPCError.decode(reader, reader.uint32()); + message.username = reader.string(); break; } case 2: { - message.result = $root.query.QueryResult.decode(reader, reader.uint32()); + if (!(message.groups && message.groups.length)) + message.groups = []; + message.groups.push(reader.string()); break; } default: @@ -77158,147 +77657,145 @@ export const query = $root.query = (() => { }; /** - * Decodes a ResultWithError message from the specified reader or buffer, length delimited. + * Decodes a VTGateCallerID message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.ResultWithError + * @memberof query.VTGateCallerID * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.ResultWithError} ResultWithError + * @returns {query.VTGateCallerID} VTGateCallerID * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ResultWithError.decodeDelimited = function decodeDelimited(reader) { + VTGateCallerID.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ResultWithError message. + * Verifies a VTGateCallerID message. * @function verify - * @memberof query.ResultWithError + * @memberof query.VTGateCallerID * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ResultWithError.verify = function verify(message) { + VTGateCallerID.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.error != null && message.hasOwnProperty("error")) { - let error = $root.vtrpc.RPCError.verify(message.error); - if (error) - return "error." + error; - } - if (message.result != null && message.hasOwnProperty("result")) { - let error = $root.query.QueryResult.verify(message.result); - if (error) - return "result." + error; + if (message.username != null && message.hasOwnProperty("username")) + if (!$util.isString(message.username)) + return "username: string expected"; + if (message.groups != null && message.hasOwnProperty("groups")) { + if (!Array.isArray(message.groups)) + return "groups: array expected"; + for (let i = 0; i < message.groups.length; ++i) + if (!$util.isString(message.groups[i])) + return "groups: string[] expected"; } return null; }; /** - * Creates a ResultWithError message from a plain object. Also converts values to their respective internal types. + * Creates a VTGateCallerID message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.ResultWithError + * @memberof query.VTGateCallerID * @static * @param {Object.} object Plain object - * @returns {query.ResultWithError} ResultWithError + * @returns {query.VTGateCallerID} VTGateCallerID */ - ResultWithError.fromObject = function fromObject(object) { - if (object instanceof $root.query.ResultWithError) + VTGateCallerID.fromObject = function fromObject(object) { + if (object instanceof $root.query.VTGateCallerID) return object; - let message = new $root.query.ResultWithError(); - if (object.error != null) { - if (typeof object.error !== "object") - throw TypeError(".query.ResultWithError.error: object expected"); - message.error = $root.vtrpc.RPCError.fromObject(object.error); - } - if (object.result != null) { - if (typeof object.result !== "object") - throw TypeError(".query.ResultWithError.result: object expected"); - message.result = $root.query.QueryResult.fromObject(object.result); + let message = new $root.query.VTGateCallerID(); + if (object.username != null) + message.username = String(object.username); + if (object.groups) { + if (!Array.isArray(object.groups)) + throw TypeError(".query.VTGateCallerID.groups: array expected"); + message.groups = []; + for (let i = 0; i < object.groups.length; ++i) + message.groups[i] = String(object.groups[i]); } return message; }; /** - * Creates a plain object from a ResultWithError message. Also converts values to other types if specified. + * Creates a plain object from a VTGateCallerID message. Also converts values to other types if specified. * @function toObject - * @memberof query.ResultWithError + * @memberof query.VTGateCallerID * @static - * @param {query.ResultWithError} message ResultWithError + * @param {query.VTGateCallerID} message VTGateCallerID * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ResultWithError.toObject = function toObject(message, options) { + VTGateCallerID.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) { - object.error = null; - object.result = null; + if (options.arrays || options.defaults) + object.groups = []; + if (options.defaults) + object.username = ""; + if (message.username != null && message.hasOwnProperty("username")) + object.username = message.username; + if (message.groups && message.groups.length) { + object.groups = []; + for (let j = 0; j < message.groups.length; ++j) + object.groups[j] = message.groups[j]; } - if (message.error != null && message.hasOwnProperty("error")) - object.error = $root.vtrpc.RPCError.toObject(message.error, options); - if (message.result != null && message.hasOwnProperty("result")) - object.result = $root.query.QueryResult.toObject(message.result, options); return object; }; /** - * Converts this ResultWithError to JSON. + * Converts this VTGateCallerID to JSON. * @function toJSON - * @memberof query.ResultWithError + * @memberof query.VTGateCallerID * @instance * @returns {Object.} JSON object */ - ResultWithError.prototype.toJSON = function toJSON() { + VTGateCallerID.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ResultWithError + * Gets the default type url for VTGateCallerID * @function getTypeUrl - * @memberof query.ResultWithError + * @memberof query.VTGateCallerID * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ResultWithError.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + VTGateCallerID.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.ResultWithError"; + return typeUrlPrefix + "/query.VTGateCallerID"; }; - return ResultWithError; + return VTGateCallerID; })(); - query.StreamExecuteRequest = (function() { + query.EventToken = (function() { /** - * Properties of a StreamExecuteRequest. + * Properties of an EventToken. * @memberof query - * @interface IStreamExecuteRequest - * @property {vtrpc.ICallerID|null} [effective_caller_id] StreamExecuteRequest effective_caller_id - * @property {query.IVTGateCallerID|null} [immediate_caller_id] StreamExecuteRequest immediate_caller_id - * @property {query.ITarget|null} [target] StreamExecuteRequest target - * @property {query.IBoundQuery|null} [query] StreamExecuteRequest query - * @property {query.IExecuteOptions|null} [options] StreamExecuteRequest options - * @property {number|Long|null} [transaction_id] StreamExecuteRequest transaction_id - * @property {number|Long|null} [reserved_id] StreamExecuteRequest reserved_id + * @interface IEventToken + * @property {number|Long|null} [timestamp] EventToken timestamp + * @property {string|null} [shard] EventToken shard + * @property {string|null} [position] EventToken position */ /** - * Constructs a new StreamExecuteRequest. + * Constructs a new EventToken. * @memberof query - * @classdesc Represents a StreamExecuteRequest. - * @implements IStreamExecuteRequest + * @classdesc Represents an EventToken. + * @implements IEventToken * @constructor - * @param {query.IStreamExecuteRequest=} [properties] Properties to set + * @param {query.IEventToken=} [properties] Properties to set */ - function StreamExecuteRequest(properties) { + function EventToken(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -77306,159 +77803,103 @@ export const query = $root.query = (() => { } /** - * StreamExecuteRequest effective_caller_id. - * @member {vtrpc.ICallerID|null|undefined} effective_caller_id - * @memberof query.StreamExecuteRequest - * @instance - */ - StreamExecuteRequest.prototype.effective_caller_id = null; - - /** - * StreamExecuteRequest immediate_caller_id. - * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id - * @memberof query.StreamExecuteRequest - * @instance - */ - StreamExecuteRequest.prototype.immediate_caller_id = null; - - /** - * StreamExecuteRequest target. - * @member {query.ITarget|null|undefined} target - * @memberof query.StreamExecuteRequest - * @instance - */ - StreamExecuteRequest.prototype.target = null; - - /** - * StreamExecuteRequest query. - * @member {query.IBoundQuery|null|undefined} query - * @memberof query.StreamExecuteRequest - * @instance - */ - StreamExecuteRequest.prototype.query = null; - - /** - * StreamExecuteRequest options. - * @member {query.IExecuteOptions|null|undefined} options - * @memberof query.StreamExecuteRequest + * EventToken timestamp. + * @member {number|Long} timestamp + * @memberof query.EventToken * @instance */ - StreamExecuteRequest.prototype.options = null; + EventToken.prototype.timestamp = $util.Long ? $util.Long.fromBits(0,0,false) : 0; /** - * StreamExecuteRequest transaction_id. - * @member {number|Long} transaction_id - * @memberof query.StreamExecuteRequest + * EventToken shard. + * @member {string} shard + * @memberof query.EventToken * @instance */ - StreamExecuteRequest.prototype.transaction_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + EventToken.prototype.shard = ""; /** - * StreamExecuteRequest reserved_id. - * @member {number|Long} reserved_id - * @memberof query.StreamExecuteRequest + * EventToken position. + * @member {string} position + * @memberof query.EventToken * @instance */ - StreamExecuteRequest.prototype.reserved_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + EventToken.prototype.position = ""; /** - * Creates a new StreamExecuteRequest instance using the specified properties. + * Creates a new EventToken instance using the specified properties. * @function create - * @memberof query.StreamExecuteRequest + * @memberof query.EventToken * @static - * @param {query.IStreamExecuteRequest=} [properties] Properties to set - * @returns {query.StreamExecuteRequest} StreamExecuteRequest instance + * @param {query.IEventToken=} [properties] Properties to set + * @returns {query.EventToken} EventToken instance */ - StreamExecuteRequest.create = function create(properties) { - return new StreamExecuteRequest(properties); + EventToken.create = function create(properties) { + return new EventToken(properties); }; /** - * Encodes the specified StreamExecuteRequest message. Does not implicitly {@link query.StreamExecuteRequest.verify|verify} messages. + * Encodes the specified EventToken message. Does not implicitly {@link query.EventToken.verify|verify} messages. * @function encode - * @memberof query.StreamExecuteRequest + * @memberof query.EventToken * @static - * @param {query.IStreamExecuteRequest} message StreamExecuteRequest message or plain object to encode + * @param {query.IEventToken} message EventToken message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - StreamExecuteRequest.encode = function encode(message, writer) { + EventToken.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) - $root.vtrpc.CallerID.encode(message.effective_caller_id, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.immediate_caller_id != null && Object.hasOwnProperty.call(message, "immediate_caller_id")) - $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.target != null && Object.hasOwnProperty.call(message, "target")) - $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.query != null && Object.hasOwnProperty.call(message, "query")) - $root.query.BoundQuery.encode(message.query, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); - if (message.options != null && Object.hasOwnProperty.call(message, "options")) - $root.query.ExecuteOptions.encode(message.options, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); - if (message.transaction_id != null && Object.hasOwnProperty.call(message, "transaction_id")) - writer.uint32(/* id 6, wireType 0 =*/48).int64(message.transaction_id); - if (message.reserved_id != null && Object.hasOwnProperty.call(message, "reserved_id")) - writer.uint32(/* id 7, wireType 0 =*/56).int64(message.reserved_id); + if (message.timestamp != null && Object.hasOwnProperty.call(message, "timestamp")) + writer.uint32(/* id 1, wireType 0 =*/8).int64(message.timestamp); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); + if (message.position != null && Object.hasOwnProperty.call(message, "position")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.position); return writer; }; /** - * Encodes the specified StreamExecuteRequest message, length delimited. Does not implicitly {@link query.StreamExecuteRequest.verify|verify} messages. + * Encodes the specified EventToken message, length delimited. Does not implicitly {@link query.EventToken.verify|verify} messages. * @function encodeDelimited - * @memberof query.StreamExecuteRequest + * @memberof query.EventToken * @static - * @param {query.IStreamExecuteRequest} message StreamExecuteRequest message or plain object to encode + * @param {query.IEventToken} message EventToken message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - StreamExecuteRequest.encodeDelimited = function encodeDelimited(message, writer) { + EventToken.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a StreamExecuteRequest message from the specified reader or buffer. + * Decodes an EventToken message from the specified reader or buffer. * @function decode - * @memberof query.StreamExecuteRequest + * @memberof query.EventToken * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.StreamExecuteRequest} StreamExecuteRequest + * @returns {query.EventToken} EventToken * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StreamExecuteRequest.decode = function decode(reader, length) { + EventToken.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.StreamExecuteRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.EventToken(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.effective_caller_id = $root.vtrpc.CallerID.decode(reader, reader.uint32()); + message.timestamp = reader.int64(); break; } case 2: { - message.immediate_caller_id = $root.query.VTGateCallerID.decode(reader, reader.uint32()); + message.shard = reader.string(); break; } case 3: { - message.target = $root.query.Target.decode(reader, reader.uint32()); - break; - } - case 4: { - message.query = $root.query.BoundQuery.decode(reader, reader.uint32()); - break; - } - case 5: { - message.options = $root.query.ExecuteOptions.decode(reader, reader.uint32()); - break; - } - case 6: { - message.transaction_id = reader.int64(); - break; - } - case 7: { - message.reserved_id = reader.int64(); + message.position = reader.string(); break; } default: @@ -77470,224 +77911,308 @@ export const query = $root.query = (() => { }; /** - * Decodes a StreamExecuteRequest message from the specified reader or buffer, length delimited. + * Decodes an EventToken message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.StreamExecuteRequest + * @memberof query.EventToken * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.StreamExecuteRequest} StreamExecuteRequest + * @returns {query.EventToken} EventToken * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StreamExecuteRequest.decodeDelimited = function decodeDelimited(reader) { + EventToken.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a StreamExecuteRequest message. + * Verifies an EventToken message. * @function verify - * @memberof query.StreamExecuteRequest + * @memberof query.EventToken * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - StreamExecuteRequest.verify = function verify(message) { + EventToken.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { - let error = $root.vtrpc.CallerID.verify(message.effective_caller_id); - if (error) - return "effective_caller_id." + error; - } - if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) { - let error = $root.query.VTGateCallerID.verify(message.immediate_caller_id); - if (error) - return "immediate_caller_id." + error; - } - if (message.target != null && message.hasOwnProperty("target")) { - let error = $root.query.Target.verify(message.target); - if (error) - return "target." + error; - } - if (message.query != null && message.hasOwnProperty("query")) { - let error = $root.query.BoundQuery.verify(message.query); - if (error) - return "query." + error; - } - if (message.options != null && message.hasOwnProperty("options")) { - let error = $root.query.ExecuteOptions.verify(message.options); - if (error) - return "options." + error; - } - if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) - if (!$util.isInteger(message.transaction_id) && !(message.transaction_id && $util.isInteger(message.transaction_id.low) && $util.isInteger(message.transaction_id.high))) - return "transaction_id: integer|Long expected"; - if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) - if (!$util.isInteger(message.reserved_id) && !(message.reserved_id && $util.isInteger(message.reserved_id.low) && $util.isInteger(message.reserved_id.high))) - return "reserved_id: integer|Long expected"; - return null; - }; - - /** - * Creates a StreamExecuteRequest message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof query.StreamExecuteRequest - * @static + if (message.timestamp != null && message.hasOwnProperty("timestamp")) + if (!$util.isInteger(message.timestamp) && !(message.timestamp && $util.isInteger(message.timestamp.low) && $util.isInteger(message.timestamp.high))) + return "timestamp: integer|Long expected"; + if (message.shard != null && message.hasOwnProperty("shard")) + if (!$util.isString(message.shard)) + return "shard: string expected"; + if (message.position != null && message.hasOwnProperty("position")) + if (!$util.isString(message.position)) + return "position: string expected"; + return null; + }; + + /** + * Creates an EventToken message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof query.EventToken + * @static * @param {Object.} object Plain object - * @returns {query.StreamExecuteRequest} StreamExecuteRequest + * @returns {query.EventToken} EventToken */ - StreamExecuteRequest.fromObject = function fromObject(object) { - if (object instanceof $root.query.StreamExecuteRequest) + EventToken.fromObject = function fromObject(object) { + if (object instanceof $root.query.EventToken) return object; - let message = new $root.query.StreamExecuteRequest(); - if (object.effective_caller_id != null) { - if (typeof object.effective_caller_id !== "object") - throw TypeError(".query.StreamExecuteRequest.effective_caller_id: object expected"); - message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); - } - if (object.immediate_caller_id != null) { - if (typeof object.immediate_caller_id !== "object") - throw TypeError(".query.StreamExecuteRequest.immediate_caller_id: object expected"); - message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); - } - if (object.target != null) { - if (typeof object.target !== "object") - throw TypeError(".query.StreamExecuteRequest.target: object expected"); - message.target = $root.query.Target.fromObject(object.target); - } - if (object.query != null) { - if (typeof object.query !== "object") - throw TypeError(".query.StreamExecuteRequest.query: object expected"); - message.query = $root.query.BoundQuery.fromObject(object.query); - } - if (object.options != null) { - if (typeof object.options !== "object") - throw TypeError(".query.StreamExecuteRequest.options: object expected"); - message.options = $root.query.ExecuteOptions.fromObject(object.options); - } - if (object.transaction_id != null) - if ($util.Long) - (message.transaction_id = $util.Long.fromValue(object.transaction_id)).unsigned = false; - else if (typeof object.transaction_id === "string") - message.transaction_id = parseInt(object.transaction_id, 10); - else if (typeof object.transaction_id === "number") - message.transaction_id = object.transaction_id; - else if (typeof object.transaction_id === "object") - message.transaction_id = new $util.LongBits(object.transaction_id.low >>> 0, object.transaction_id.high >>> 0).toNumber(); - if (object.reserved_id != null) + let message = new $root.query.EventToken(); + if (object.timestamp != null) if ($util.Long) - (message.reserved_id = $util.Long.fromValue(object.reserved_id)).unsigned = false; - else if (typeof object.reserved_id === "string") - message.reserved_id = parseInt(object.reserved_id, 10); - else if (typeof object.reserved_id === "number") - message.reserved_id = object.reserved_id; - else if (typeof object.reserved_id === "object") - message.reserved_id = new $util.LongBits(object.reserved_id.low >>> 0, object.reserved_id.high >>> 0).toNumber(); + (message.timestamp = $util.Long.fromValue(object.timestamp)).unsigned = false; + else if (typeof object.timestamp === "string") + message.timestamp = parseInt(object.timestamp, 10); + else if (typeof object.timestamp === "number") + message.timestamp = object.timestamp; + else if (typeof object.timestamp === "object") + message.timestamp = new $util.LongBits(object.timestamp.low >>> 0, object.timestamp.high >>> 0).toNumber(); + if (object.shard != null) + message.shard = String(object.shard); + if (object.position != null) + message.position = String(object.position); return message; }; /** - * Creates a plain object from a StreamExecuteRequest message. Also converts values to other types if specified. + * Creates a plain object from an EventToken message. Also converts values to other types if specified. * @function toObject - * @memberof query.StreamExecuteRequest + * @memberof query.EventToken * @static - * @param {query.StreamExecuteRequest} message StreamExecuteRequest + * @param {query.EventToken} message EventToken * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - StreamExecuteRequest.toObject = function toObject(message, options) { + EventToken.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) { - object.effective_caller_id = null; - object.immediate_caller_id = null; - object.target = null; - object.query = null; - object.options = null; - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.transaction_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.transaction_id = options.longs === String ? "0" : 0; if ($util.Long) { let long = new $util.Long(0, 0, false); - object.reserved_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + object.timestamp = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; } else - object.reserved_id = options.longs === String ? "0" : 0; + object.timestamp = options.longs === String ? "0" : 0; + object.shard = ""; + object.position = ""; } - if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) - object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); - if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) - object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); - if (message.target != null && message.hasOwnProperty("target")) - object.target = $root.query.Target.toObject(message.target, options); - if (message.query != null && message.hasOwnProperty("query")) - object.query = $root.query.BoundQuery.toObject(message.query, options); - if (message.options != null && message.hasOwnProperty("options")) - object.options = $root.query.ExecuteOptions.toObject(message.options, options); - if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) - if (typeof message.transaction_id === "number") - object.transaction_id = options.longs === String ? String(message.transaction_id) : message.transaction_id; - else - object.transaction_id = options.longs === String ? $util.Long.prototype.toString.call(message.transaction_id) : options.longs === Number ? new $util.LongBits(message.transaction_id.low >>> 0, message.transaction_id.high >>> 0).toNumber() : message.transaction_id; - if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) - if (typeof message.reserved_id === "number") - object.reserved_id = options.longs === String ? String(message.reserved_id) : message.reserved_id; + if (message.timestamp != null && message.hasOwnProperty("timestamp")) + if (typeof message.timestamp === "number") + object.timestamp = options.longs === String ? String(message.timestamp) : message.timestamp; else - object.reserved_id = options.longs === String ? $util.Long.prototype.toString.call(message.reserved_id) : options.longs === Number ? new $util.LongBits(message.reserved_id.low >>> 0, message.reserved_id.high >>> 0).toNumber() : message.reserved_id; + object.timestamp = options.longs === String ? $util.Long.prototype.toString.call(message.timestamp) : options.longs === Number ? new $util.LongBits(message.timestamp.low >>> 0, message.timestamp.high >>> 0).toNumber() : message.timestamp; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = message.shard; + if (message.position != null && message.hasOwnProperty("position")) + object.position = message.position; return object; }; /** - * Converts this StreamExecuteRequest to JSON. + * Converts this EventToken to JSON. * @function toJSON - * @memberof query.StreamExecuteRequest + * @memberof query.EventToken * @instance * @returns {Object.} JSON object */ - StreamExecuteRequest.prototype.toJSON = function toJSON() { + EventToken.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for StreamExecuteRequest + * Gets the default type url for EventToken * @function getTypeUrl - * @memberof query.StreamExecuteRequest + * @memberof query.EventToken * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - StreamExecuteRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + EventToken.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.StreamExecuteRequest"; + return typeUrlPrefix + "/query.EventToken"; }; - return StreamExecuteRequest; + return EventToken; })(); - query.StreamExecuteResponse = (function() { + /** + * MySqlFlag enum. + * @name query.MySqlFlag + * @enum {number} + * @property {number} EMPTY=0 EMPTY value + * @property {number} NOT_NULL_FLAG=1 NOT_NULL_FLAG value + * @property {number} PRI_KEY_FLAG=2 PRI_KEY_FLAG value + * @property {number} UNIQUE_KEY_FLAG=4 UNIQUE_KEY_FLAG value + * @property {number} MULTIPLE_KEY_FLAG=8 MULTIPLE_KEY_FLAG value + * @property {number} BLOB_FLAG=16 BLOB_FLAG value + * @property {number} UNSIGNED_FLAG=32 UNSIGNED_FLAG value + * @property {number} ZEROFILL_FLAG=64 ZEROFILL_FLAG value + * @property {number} BINARY_FLAG=128 BINARY_FLAG value + * @property {number} ENUM_FLAG=256 ENUM_FLAG value + * @property {number} AUTO_INCREMENT_FLAG=512 AUTO_INCREMENT_FLAG value + * @property {number} TIMESTAMP_FLAG=1024 TIMESTAMP_FLAG value + * @property {number} SET_FLAG=2048 SET_FLAG value + * @property {number} NO_DEFAULT_VALUE_FLAG=4096 NO_DEFAULT_VALUE_FLAG value + * @property {number} ON_UPDATE_NOW_FLAG=8192 ON_UPDATE_NOW_FLAG value + * @property {number} NUM_FLAG=32768 NUM_FLAG value + * @property {number} PART_KEY_FLAG=16384 PART_KEY_FLAG value + * @property {number} GROUP_FLAG=32768 GROUP_FLAG value + * @property {number} UNIQUE_FLAG=65536 UNIQUE_FLAG value + * @property {number} BINCMP_FLAG=131072 BINCMP_FLAG value + */ + query.MySqlFlag = (function() { + const valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "EMPTY"] = 0; + values[valuesById[1] = "NOT_NULL_FLAG"] = 1; + values[valuesById[2] = "PRI_KEY_FLAG"] = 2; + values[valuesById[4] = "UNIQUE_KEY_FLAG"] = 4; + values[valuesById[8] = "MULTIPLE_KEY_FLAG"] = 8; + values[valuesById[16] = "BLOB_FLAG"] = 16; + values[valuesById[32] = "UNSIGNED_FLAG"] = 32; + values[valuesById[64] = "ZEROFILL_FLAG"] = 64; + values[valuesById[128] = "BINARY_FLAG"] = 128; + values[valuesById[256] = "ENUM_FLAG"] = 256; + values[valuesById[512] = "AUTO_INCREMENT_FLAG"] = 512; + values[valuesById[1024] = "TIMESTAMP_FLAG"] = 1024; + values[valuesById[2048] = "SET_FLAG"] = 2048; + values[valuesById[4096] = "NO_DEFAULT_VALUE_FLAG"] = 4096; + values[valuesById[8192] = "ON_UPDATE_NOW_FLAG"] = 8192; + values[valuesById[32768] = "NUM_FLAG"] = 32768; + values[valuesById[16384] = "PART_KEY_FLAG"] = 16384; + values["GROUP_FLAG"] = 32768; + values[valuesById[65536] = "UNIQUE_FLAG"] = 65536; + values[valuesById[131072] = "BINCMP_FLAG"] = 131072; + return values; + })(); + + /** + * Flag enum. + * @name query.Flag + * @enum {number} + * @property {number} NONE=0 NONE value + * @property {number} ISINTEGRAL=256 ISINTEGRAL value + * @property {number} ISUNSIGNED=512 ISUNSIGNED value + * @property {number} ISFLOAT=1024 ISFLOAT value + * @property {number} ISQUOTED=2048 ISQUOTED value + * @property {number} ISTEXT=4096 ISTEXT value + * @property {number} ISBINARY=8192 ISBINARY value + */ + query.Flag = (function() { + const valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "NONE"] = 0; + values[valuesById[256] = "ISINTEGRAL"] = 256; + values[valuesById[512] = "ISUNSIGNED"] = 512; + values[valuesById[1024] = "ISFLOAT"] = 1024; + values[valuesById[2048] = "ISQUOTED"] = 2048; + values[valuesById[4096] = "ISTEXT"] = 4096; + values[valuesById[8192] = "ISBINARY"] = 8192; + return values; + })(); + + /** + * Type enum. + * @name query.Type + * @enum {number} + * @property {number} NULL_TYPE=0 NULL_TYPE value + * @property {number} INT8=257 INT8 value + * @property {number} UINT8=770 UINT8 value + * @property {number} INT16=259 INT16 value + * @property {number} UINT16=772 UINT16 value + * @property {number} INT24=261 INT24 value + * @property {number} UINT24=774 UINT24 value + * @property {number} INT32=263 INT32 value + * @property {number} UINT32=776 UINT32 value + * @property {number} INT64=265 INT64 value + * @property {number} UINT64=778 UINT64 value + * @property {number} FLOAT32=1035 FLOAT32 value + * @property {number} FLOAT64=1036 FLOAT64 value + * @property {number} TIMESTAMP=2061 TIMESTAMP value + * @property {number} DATE=2062 DATE value + * @property {number} TIME=2063 TIME value + * @property {number} DATETIME=2064 DATETIME value + * @property {number} YEAR=785 YEAR value + * @property {number} DECIMAL=18 DECIMAL value + * @property {number} TEXT=6163 TEXT value + * @property {number} BLOB=10260 BLOB value + * @property {number} VARCHAR=6165 VARCHAR value + * @property {number} VARBINARY=10262 VARBINARY value + * @property {number} CHAR=6167 CHAR value + * @property {number} BINARY=10264 BINARY value + * @property {number} BIT=2073 BIT value + * @property {number} ENUM=2074 ENUM value + * @property {number} SET=2075 SET value + * @property {number} TUPLE=28 TUPLE value + * @property {number} GEOMETRY=2077 GEOMETRY value + * @property {number} JSON=2078 JSON value + * @property {number} EXPRESSION=31 EXPRESSION value + * @property {number} HEXNUM=4128 HEXNUM value + * @property {number} HEXVAL=4129 HEXVAL value + * @property {number} BITNUM=4130 BITNUM value + */ + query.Type = (function() { + const valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "NULL_TYPE"] = 0; + values[valuesById[257] = "INT8"] = 257; + values[valuesById[770] = "UINT8"] = 770; + values[valuesById[259] = "INT16"] = 259; + values[valuesById[772] = "UINT16"] = 772; + values[valuesById[261] = "INT24"] = 261; + values[valuesById[774] = "UINT24"] = 774; + values[valuesById[263] = "INT32"] = 263; + values[valuesById[776] = "UINT32"] = 776; + values[valuesById[265] = "INT64"] = 265; + values[valuesById[778] = "UINT64"] = 778; + values[valuesById[1035] = "FLOAT32"] = 1035; + values[valuesById[1036] = "FLOAT64"] = 1036; + values[valuesById[2061] = "TIMESTAMP"] = 2061; + values[valuesById[2062] = "DATE"] = 2062; + values[valuesById[2063] = "TIME"] = 2063; + values[valuesById[2064] = "DATETIME"] = 2064; + values[valuesById[785] = "YEAR"] = 785; + values[valuesById[18] = "DECIMAL"] = 18; + values[valuesById[6163] = "TEXT"] = 6163; + values[valuesById[10260] = "BLOB"] = 10260; + values[valuesById[6165] = "VARCHAR"] = 6165; + values[valuesById[10262] = "VARBINARY"] = 10262; + values[valuesById[6167] = "CHAR"] = 6167; + values[valuesById[10264] = "BINARY"] = 10264; + values[valuesById[2073] = "BIT"] = 2073; + values[valuesById[2074] = "ENUM"] = 2074; + values[valuesById[2075] = "SET"] = 2075; + values[valuesById[28] = "TUPLE"] = 28; + values[valuesById[2077] = "GEOMETRY"] = 2077; + values[valuesById[2078] = "JSON"] = 2078; + values[valuesById[31] = "EXPRESSION"] = 31; + values[valuesById[4128] = "HEXNUM"] = 4128; + values[valuesById[4129] = "HEXVAL"] = 4129; + values[valuesById[4130] = "BITNUM"] = 4130; + return values; + })(); + + query.Value = (function() { /** - * Properties of a StreamExecuteResponse. + * Properties of a Value. * @memberof query - * @interface IStreamExecuteResponse - * @property {query.IQueryResult|null} [result] StreamExecuteResponse result + * @interface IValue + * @property {query.Type|null} [type] Value type + * @property {Uint8Array|null} [value] Value value */ /** - * Constructs a new StreamExecuteResponse. + * Constructs a new Value. * @memberof query - * @classdesc Represents a StreamExecuteResponse. - * @implements IStreamExecuteResponse + * @classdesc Represents a Value. + * @implements IValue * @constructor - * @param {query.IStreamExecuteResponse=} [properties] Properties to set + * @param {query.IValue=} [properties] Properties to set */ - function StreamExecuteResponse(properties) { + function Value(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -77695,75 +78220,89 @@ export const query = $root.query = (() => { } /** - * StreamExecuteResponse result. - * @member {query.IQueryResult|null|undefined} result - * @memberof query.StreamExecuteResponse + * Value type. + * @member {query.Type} type + * @memberof query.Value * @instance */ - StreamExecuteResponse.prototype.result = null; + Value.prototype.type = 0; /** - * Creates a new StreamExecuteResponse instance using the specified properties. + * Value value. + * @member {Uint8Array} value + * @memberof query.Value + * @instance + */ + Value.prototype.value = $util.newBuffer([]); + + /** + * Creates a new Value instance using the specified properties. * @function create - * @memberof query.StreamExecuteResponse + * @memberof query.Value * @static - * @param {query.IStreamExecuteResponse=} [properties] Properties to set - * @returns {query.StreamExecuteResponse} StreamExecuteResponse instance + * @param {query.IValue=} [properties] Properties to set + * @returns {query.Value} Value instance */ - StreamExecuteResponse.create = function create(properties) { - return new StreamExecuteResponse(properties); + Value.create = function create(properties) { + return new Value(properties); }; /** - * Encodes the specified StreamExecuteResponse message. Does not implicitly {@link query.StreamExecuteResponse.verify|verify} messages. + * Encodes the specified Value message. Does not implicitly {@link query.Value.verify|verify} messages. * @function encode - * @memberof query.StreamExecuteResponse + * @memberof query.Value * @static - * @param {query.IStreamExecuteResponse} message StreamExecuteResponse message or plain object to encode + * @param {query.IValue} message Value message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - StreamExecuteResponse.encode = function encode(message, writer) { + Value.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.result != null && Object.hasOwnProperty.call(message, "result")) - $root.query.QueryResult.encode(message.result, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.type != null && Object.hasOwnProperty.call(message, "type")) + writer.uint32(/* id 1, wireType 0 =*/8).int32(message.type); + if (message.value != null && Object.hasOwnProperty.call(message, "value")) + writer.uint32(/* id 2, wireType 2 =*/18).bytes(message.value); return writer; }; /** - * Encodes the specified StreamExecuteResponse message, length delimited. Does not implicitly {@link query.StreamExecuteResponse.verify|verify} messages. + * Encodes the specified Value message, length delimited. Does not implicitly {@link query.Value.verify|verify} messages. * @function encodeDelimited - * @memberof query.StreamExecuteResponse + * @memberof query.Value * @static - * @param {query.IStreamExecuteResponse} message StreamExecuteResponse message or plain object to encode + * @param {query.IValue} message Value message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - StreamExecuteResponse.encodeDelimited = function encodeDelimited(message, writer) { + Value.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a StreamExecuteResponse message from the specified reader or buffer. + * Decodes a Value message from the specified reader or buffer. * @function decode - * @memberof query.StreamExecuteResponse + * @memberof query.Value * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.StreamExecuteResponse} StreamExecuteResponse + * @returns {query.Value} Value * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StreamExecuteResponse.decode = function decode(reader, length) { + Value.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.StreamExecuteResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.Value(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.result = $root.query.QueryResult.decode(reader, reader.uint32()); + message.type = reader.int32(); + break; + } + case 2: { + message.value = reader.bytes(); break; } default: @@ -77775,130 +78314,327 @@ export const query = $root.query = (() => { }; /** - * Decodes a StreamExecuteResponse message from the specified reader or buffer, length delimited. + * Decodes a Value message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.StreamExecuteResponse + * @memberof query.Value * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.StreamExecuteResponse} StreamExecuteResponse + * @returns {query.Value} Value * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StreamExecuteResponse.decodeDelimited = function decodeDelimited(reader) { + Value.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a StreamExecuteResponse message. + * Verifies a Value message. * @function verify - * @memberof query.StreamExecuteResponse + * @memberof query.Value * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - StreamExecuteResponse.verify = function verify(message) { + Value.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.result != null && message.hasOwnProperty("result")) { - let error = $root.query.QueryResult.verify(message.result); - if (error) - return "result." + error; - } + if (message.type != null && message.hasOwnProperty("type")) + switch (message.type) { + default: + return "type: enum value expected"; + case 0: + case 257: + case 770: + case 259: + case 772: + case 261: + case 774: + case 263: + case 776: + case 265: + case 778: + case 1035: + case 1036: + case 2061: + case 2062: + case 2063: + case 2064: + case 785: + case 18: + case 6163: + case 10260: + case 6165: + case 10262: + case 6167: + case 10264: + case 2073: + case 2074: + case 2075: + case 28: + case 2077: + case 2078: + case 31: + case 4128: + case 4129: + case 4130: + break; + } + if (message.value != null && message.hasOwnProperty("value")) + if (!(message.value && typeof message.value.length === "number" || $util.isString(message.value))) + return "value: buffer expected"; return null; }; /** - * Creates a StreamExecuteResponse message from a plain object. Also converts values to their respective internal types. + * Creates a Value message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.StreamExecuteResponse + * @memberof query.Value * @static * @param {Object.} object Plain object - * @returns {query.StreamExecuteResponse} StreamExecuteResponse + * @returns {query.Value} Value */ - StreamExecuteResponse.fromObject = function fromObject(object) { - if (object instanceof $root.query.StreamExecuteResponse) + Value.fromObject = function fromObject(object) { + if (object instanceof $root.query.Value) return object; - let message = new $root.query.StreamExecuteResponse(); - if (object.result != null) { - if (typeof object.result !== "object") - throw TypeError(".query.StreamExecuteResponse.result: object expected"); - message.result = $root.query.QueryResult.fromObject(object.result); - } - return message; - }; - - /** - * Creates a plain object from a StreamExecuteResponse message. Also converts values to other types if specified. - * @function toObject - * @memberof query.StreamExecuteResponse - * @static - * @param {query.StreamExecuteResponse} message StreamExecuteResponse - * @param {$protobuf.IConversionOptions} [options] Conversion options - * @returns {Object.} Plain object - */ - StreamExecuteResponse.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) - object.result = null; - if (message.result != null && message.hasOwnProperty("result")) - object.result = $root.query.QueryResult.toObject(message.result, options); - return object; - }; - - /** - * Converts this StreamExecuteResponse to JSON. - * @function toJSON - * @memberof query.StreamExecuteResponse - * @instance - * @returns {Object.} JSON object - */ - StreamExecuteResponse.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; - - /** - * Gets the default type url for StreamExecuteResponse - * @function getTypeUrl - * @memberof query.StreamExecuteResponse - * @static - * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns {string} The default type url - */ - StreamExecuteResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { - if (typeUrlPrefix === undefined) { - typeUrlPrefix = "type.googleapis.com"; - } - return typeUrlPrefix + "/query.StreamExecuteResponse"; - }; + let message = new $root.query.Value(); + switch (object.type) { + default: + if (typeof object.type === "number") { + message.type = object.type; + break; + } + break; + case "NULL_TYPE": + case 0: + message.type = 0; + break; + case "INT8": + case 257: + message.type = 257; + break; + case "UINT8": + case 770: + message.type = 770; + break; + case "INT16": + case 259: + message.type = 259; + break; + case "UINT16": + case 772: + message.type = 772; + break; + case "INT24": + case 261: + message.type = 261; + break; + case "UINT24": + case 774: + message.type = 774; + break; + case "INT32": + case 263: + message.type = 263; + break; + case "UINT32": + case 776: + message.type = 776; + break; + case "INT64": + case 265: + message.type = 265; + break; + case "UINT64": + case 778: + message.type = 778; + break; + case "FLOAT32": + case 1035: + message.type = 1035; + break; + case "FLOAT64": + case 1036: + message.type = 1036; + break; + case "TIMESTAMP": + case 2061: + message.type = 2061; + break; + case "DATE": + case 2062: + message.type = 2062; + break; + case "TIME": + case 2063: + message.type = 2063; + break; + case "DATETIME": + case 2064: + message.type = 2064; + break; + case "YEAR": + case 785: + message.type = 785; + break; + case "DECIMAL": + case 18: + message.type = 18; + break; + case "TEXT": + case 6163: + message.type = 6163; + break; + case "BLOB": + case 10260: + message.type = 10260; + break; + case "VARCHAR": + case 6165: + message.type = 6165; + break; + case "VARBINARY": + case 10262: + message.type = 10262; + break; + case "CHAR": + case 6167: + message.type = 6167; + break; + case "BINARY": + case 10264: + message.type = 10264; + break; + case "BIT": + case 2073: + message.type = 2073; + break; + case "ENUM": + case 2074: + message.type = 2074; + break; + case "SET": + case 2075: + message.type = 2075; + break; + case "TUPLE": + case 28: + message.type = 28; + break; + case "GEOMETRY": + case 2077: + message.type = 2077; + break; + case "JSON": + case 2078: + message.type = 2078; + break; + case "EXPRESSION": + case 31: + message.type = 31; + break; + case "HEXNUM": + case 4128: + message.type = 4128; + break; + case "HEXVAL": + case 4129: + message.type = 4129; + break; + case "BITNUM": + case 4130: + message.type = 4130; + break; + } + if (object.value != null) + if (typeof object.value === "string") + $util.base64.decode(object.value, message.value = $util.newBuffer($util.base64.length(object.value)), 0); + else if (object.value.length >= 0) + message.value = object.value; + return message; + }; - return StreamExecuteResponse; + /** + * Creates a plain object from a Value message. Also converts values to other types if specified. + * @function toObject + * @memberof query.Value + * @static + * @param {query.Value} message Value + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + Value.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.type = options.enums === String ? "NULL_TYPE" : 0; + if (options.bytes === String) + object.value = ""; + else { + object.value = []; + if (options.bytes !== Array) + object.value = $util.newBuffer(object.value); + } + } + if (message.type != null && message.hasOwnProperty("type")) + object.type = options.enums === String ? $root.query.Type[message.type] === undefined ? message.type : $root.query.Type[message.type] : message.type; + if (message.value != null && message.hasOwnProperty("value")) + object.value = options.bytes === String ? $util.base64.encode(message.value, 0, message.value.length) : options.bytes === Array ? Array.prototype.slice.call(message.value) : message.value; + return object; + }; + + /** + * Converts this Value to JSON. + * @function toJSON + * @memberof query.Value + * @instance + * @returns {Object.} JSON object + */ + Value.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for Value + * @function getTypeUrl + * @memberof query.Value + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + Value.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/query.Value"; + }; + + return Value; })(); - query.BeginRequest = (function() { + query.BindVariable = (function() { /** - * Properties of a BeginRequest. + * Properties of a BindVariable. * @memberof query - * @interface IBeginRequest - * @property {vtrpc.ICallerID|null} [effective_caller_id] BeginRequest effective_caller_id - * @property {query.IVTGateCallerID|null} [immediate_caller_id] BeginRequest immediate_caller_id - * @property {query.ITarget|null} [target] BeginRequest target - * @property {query.IExecuteOptions|null} [options] BeginRequest options + * @interface IBindVariable + * @property {query.Type|null} [type] BindVariable type + * @property {Uint8Array|null} [value] BindVariable value + * @property {Array.|null} [values] BindVariable values */ /** - * Constructs a new BeginRequest. + * Constructs a new BindVariable. * @memberof query - * @classdesc Represents a BeginRequest. - * @implements IBeginRequest + * @classdesc Represents a BindVariable. + * @implements IBindVariable * @constructor - * @param {query.IBeginRequest=} [properties] Properties to set + * @param {query.IBindVariable=} [properties] Properties to set */ - function BeginRequest(properties) { + function BindVariable(properties) { + this.values = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -77906,117 +78642,106 @@ export const query = $root.query = (() => { } /** - * BeginRequest effective_caller_id. - * @member {vtrpc.ICallerID|null|undefined} effective_caller_id - * @memberof query.BeginRequest - * @instance - */ - BeginRequest.prototype.effective_caller_id = null; - - /** - * BeginRequest immediate_caller_id. - * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id - * @memberof query.BeginRequest + * BindVariable type. + * @member {query.Type} type + * @memberof query.BindVariable * @instance */ - BeginRequest.prototype.immediate_caller_id = null; + BindVariable.prototype.type = 0; /** - * BeginRequest target. - * @member {query.ITarget|null|undefined} target - * @memberof query.BeginRequest + * BindVariable value. + * @member {Uint8Array} value + * @memberof query.BindVariable * @instance */ - BeginRequest.prototype.target = null; + BindVariable.prototype.value = $util.newBuffer([]); /** - * BeginRequest options. - * @member {query.IExecuteOptions|null|undefined} options - * @memberof query.BeginRequest + * BindVariable values. + * @member {Array.} values + * @memberof query.BindVariable * @instance */ - BeginRequest.prototype.options = null; + BindVariable.prototype.values = $util.emptyArray; /** - * Creates a new BeginRequest instance using the specified properties. + * Creates a new BindVariable instance using the specified properties. * @function create - * @memberof query.BeginRequest + * @memberof query.BindVariable * @static - * @param {query.IBeginRequest=} [properties] Properties to set - * @returns {query.BeginRequest} BeginRequest instance + * @param {query.IBindVariable=} [properties] Properties to set + * @returns {query.BindVariable} BindVariable instance */ - BeginRequest.create = function create(properties) { - return new BeginRequest(properties); + BindVariable.create = function create(properties) { + return new BindVariable(properties); }; /** - * Encodes the specified BeginRequest message. Does not implicitly {@link query.BeginRequest.verify|verify} messages. + * Encodes the specified BindVariable message. Does not implicitly {@link query.BindVariable.verify|verify} messages. * @function encode - * @memberof query.BeginRequest + * @memberof query.BindVariable * @static - * @param {query.IBeginRequest} message BeginRequest message or plain object to encode + * @param {query.IBindVariable} message BindVariable message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - BeginRequest.encode = function encode(message, writer) { + BindVariable.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) - $root.vtrpc.CallerID.encode(message.effective_caller_id, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.immediate_caller_id != null && Object.hasOwnProperty.call(message, "immediate_caller_id")) - $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.target != null && Object.hasOwnProperty.call(message, "target")) - $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.options != null && Object.hasOwnProperty.call(message, "options")) - $root.query.ExecuteOptions.encode(message.options, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.type != null && Object.hasOwnProperty.call(message, "type")) + writer.uint32(/* id 1, wireType 0 =*/8).int32(message.type); + if (message.value != null && Object.hasOwnProperty.call(message, "value")) + writer.uint32(/* id 2, wireType 2 =*/18).bytes(message.value); + if (message.values != null && message.values.length) + for (let i = 0; i < message.values.length; ++i) + $root.query.Value.encode(message.values[i], writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); return writer; }; /** - * Encodes the specified BeginRequest message, length delimited. Does not implicitly {@link query.BeginRequest.verify|verify} messages. + * Encodes the specified BindVariable message, length delimited. Does not implicitly {@link query.BindVariable.verify|verify} messages. * @function encodeDelimited - * @memberof query.BeginRequest + * @memberof query.BindVariable * @static - * @param {query.IBeginRequest} message BeginRequest message or plain object to encode + * @param {query.IBindVariable} message BindVariable message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - BeginRequest.encodeDelimited = function encodeDelimited(message, writer) { + BindVariable.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a BeginRequest message from the specified reader or buffer. + * Decodes a BindVariable message from the specified reader or buffer. * @function decode - * @memberof query.BeginRequest + * @memberof query.BindVariable * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.BeginRequest} BeginRequest + * @returns {query.BindVariable} BindVariable * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - BeginRequest.decode = function decode(reader, length) { + BindVariable.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.BeginRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.BindVariable(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.effective_caller_id = $root.vtrpc.CallerID.decode(reader, reader.uint32()); + message.type = reader.int32(); break; } case 2: { - message.immediate_caller_id = $root.query.VTGateCallerID.decode(reader, reader.uint32()); + message.value = reader.bytes(); break; } case 3: { - message.target = $root.query.Target.decode(reader, reader.uint32()); - break; - } - case 4: { - message.options = $root.query.ExecuteOptions.decode(reader, reader.uint32()); + if (!(message.values && message.values.length)) + message.values = []; + message.values.push($root.query.Value.decode(reader, reader.uint32())); break; } default: @@ -78028,169 +78753,352 @@ export const query = $root.query = (() => { }; /** - * Decodes a BeginRequest message from the specified reader or buffer, length delimited. + * Decodes a BindVariable message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.BeginRequest + * @memberof query.BindVariable * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.BeginRequest} BeginRequest + * @returns {query.BindVariable} BindVariable * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - BeginRequest.decodeDelimited = function decodeDelimited(reader) { + BindVariable.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a BeginRequest message. + * Verifies a BindVariable message. * @function verify - * @memberof query.BeginRequest + * @memberof query.BindVariable * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - BeginRequest.verify = function verify(message) { + BindVariable.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { - let error = $root.vtrpc.CallerID.verify(message.effective_caller_id); - if (error) - return "effective_caller_id." + error; - } - if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) { - let error = $root.query.VTGateCallerID.verify(message.immediate_caller_id); - if (error) - return "immediate_caller_id." + error; - } - if (message.target != null && message.hasOwnProperty("target")) { - let error = $root.query.Target.verify(message.target); - if (error) - return "target." + error; - } - if (message.options != null && message.hasOwnProperty("options")) { - let error = $root.query.ExecuteOptions.verify(message.options); - if (error) - return "options." + error; + if (message.type != null && message.hasOwnProperty("type")) + switch (message.type) { + default: + return "type: enum value expected"; + case 0: + case 257: + case 770: + case 259: + case 772: + case 261: + case 774: + case 263: + case 776: + case 265: + case 778: + case 1035: + case 1036: + case 2061: + case 2062: + case 2063: + case 2064: + case 785: + case 18: + case 6163: + case 10260: + case 6165: + case 10262: + case 6167: + case 10264: + case 2073: + case 2074: + case 2075: + case 28: + case 2077: + case 2078: + case 31: + case 4128: + case 4129: + case 4130: + break; + } + if (message.value != null && message.hasOwnProperty("value")) + if (!(message.value && typeof message.value.length === "number" || $util.isString(message.value))) + return "value: buffer expected"; + if (message.values != null && message.hasOwnProperty("values")) { + if (!Array.isArray(message.values)) + return "values: array expected"; + for (let i = 0; i < message.values.length; ++i) { + let error = $root.query.Value.verify(message.values[i]); + if (error) + return "values." + error; + } } return null; }; /** - * Creates a BeginRequest message from a plain object. Also converts values to their respective internal types. + * Creates a BindVariable message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.BeginRequest + * @memberof query.BindVariable * @static * @param {Object.} object Plain object - * @returns {query.BeginRequest} BeginRequest + * @returns {query.BindVariable} BindVariable */ - BeginRequest.fromObject = function fromObject(object) { - if (object instanceof $root.query.BeginRequest) + BindVariable.fromObject = function fromObject(object) { + if (object instanceof $root.query.BindVariable) return object; - let message = new $root.query.BeginRequest(); - if (object.effective_caller_id != null) { - if (typeof object.effective_caller_id !== "object") - throw TypeError(".query.BeginRequest.effective_caller_id: object expected"); - message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); - } - if (object.immediate_caller_id != null) { - if (typeof object.immediate_caller_id !== "object") - throw TypeError(".query.BeginRequest.immediate_caller_id: object expected"); - message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); - } - if (object.target != null) { - if (typeof object.target !== "object") - throw TypeError(".query.BeginRequest.target: object expected"); - message.target = $root.query.Target.fromObject(object.target); - } - if (object.options != null) { - if (typeof object.options !== "object") - throw TypeError(".query.BeginRequest.options: object expected"); - message.options = $root.query.ExecuteOptions.fromObject(object.options); - } - return message; - }; - - /** - * Creates a plain object from a BeginRequest message. Also converts values to other types if specified. - * @function toObject - * @memberof query.BeginRequest - * @static - * @param {query.BeginRequest} message BeginRequest - * @param {$protobuf.IConversionOptions} [options] Conversion options - * @returns {Object.} Plain object - */ - BeginRequest.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) { - object.effective_caller_id = null; - object.immediate_caller_id = null; - object.target = null; - object.options = null; - } - if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) - object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); - if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) - object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); - if (message.target != null && message.hasOwnProperty("target")) - object.target = $root.query.Target.toObject(message.target, options); - if (message.options != null && message.hasOwnProperty("options")) - object.options = $root.query.ExecuteOptions.toObject(message.options, options); - return object; - }; - - /** - * Converts this BeginRequest to JSON. - * @function toJSON - * @memberof query.BeginRequest - * @instance - * @returns {Object.} JSON object - */ - BeginRequest.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; - - /** - * Gets the default type url for BeginRequest - * @function getTypeUrl - * @memberof query.BeginRequest - * @static - * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns {string} The default type url - */ - BeginRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { - if (typeUrlPrefix === undefined) { - typeUrlPrefix = "type.googleapis.com"; - } - return typeUrlPrefix + "/query.BeginRequest"; - }; - - return BeginRequest; - })(); + let message = new $root.query.BindVariable(); + switch (object.type) { + default: + if (typeof object.type === "number") { + message.type = object.type; + break; + } + break; + case "NULL_TYPE": + case 0: + message.type = 0; + break; + case "INT8": + case 257: + message.type = 257; + break; + case "UINT8": + case 770: + message.type = 770; + break; + case "INT16": + case 259: + message.type = 259; + break; + case "UINT16": + case 772: + message.type = 772; + break; + case "INT24": + case 261: + message.type = 261; + break; + case "UINT24": + case 774: + message.type = 774; + break; + case "INT32": + case 263: + message.type = 263; + break; + case "UINT32": + case 776: + message.type = 776; + break; + case "INT64": + case 265: + message.type = 265; + break; + case "UINT64": + case 778: + message.type = 778; + break; + case "FLOAT32": + case 1035: + message.type = 1035; + break; + case "FLOAT64": + case 1036: + message.type = 1036; + break; + case "TIMESTAMP": + case 2061: + message.type = 2061; + break; + case "DATE": + case 2062: + message.type = 2062; + break; + case "TIME": + case 2063: + message.type = 2063; + break; + case "DATETIME": + case 2064: + message.type = 2064; + break; + case "YEAR": + case 785: + message.type = 785; + break; + case "DECIMAL": + case 18: + message.type = 18; + break; + case "TEXT": + case 6163: + message.type = 6163; + break; + case "BLOB": + case 10260: + message.type = 10260; + break; + case "VARCHAR": + case 6165: + message.type = 6165; + break; + case "VARBINARY": + case 10262: + message.type = 10262; + break; + case "CHAR": + case 6167: + message.type = 6167; + break; + case "BINARY": + case 10264: + message.type = 10264; + break; + case "BIT": + case 2073: + message.type = 2073; + break; + case "ENUM": + case 2074: + message.type = 2074; + break; + case "SET": + case 2075: + message.type = 2075; + break; + case "TUPLE": + case 28: + message.type = 28; + break; + case "GEOMETRY": + case 2077: + message.type = 2077; + break; + case "JSON": + case 2078: + message.type = 2078; + break; + case "EXPRESSION": + case 31: + message.type = 31; + break; + case "HEXNUM": + case 4128: + message.type = 4128; + break; + case "HEXVAL": + case 4129: + message.type = 4129; + break; + case "BITNUM": + case 4130: + message.type = 4130; + break; + } + if (object.value != null) + if (typeof object.value === "string") + $util.base64.decode(object.value, message.value = $util.newBuffer($util.base64.length(object.value)), 0); + else if (object.value.length >= 0) + message.value = object.value; + if (object.values) { + if (!Array.isArray(object.values)) + throw TypeError(".query.BindVariable.values: array expected"); + message.values = []; + for (let i = 0; i < object.values.length; ++i) { + if (typeof object.values[i] !== "object") + throw TypeError(".query.BindVariable.values: object expected"); + message.values[i] = $root.query.Value.fromObject(object.values[i]); + } + } + return message; + }; - query.BeginResponse = (function() { + /** + * Creates a plain object from a BindVariable message. Also converts values to other types if specified. + * @function toObject + * @memberof query.BindVariable + * @static + * @param {query.BindVariable} message BindVariable + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + BindVariable.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.values = []; + if (options.defaults) { + object.type = options.enums === String ? "NULL_TYPE" : 0; + if (options.bytes === String) + object.value = ""; + else { + object.value = []; + if (options.bytes !== Array) + object.value = $util.newBuffer(object.value); + } + } + if (message.type != null && message.hasOwnProperty("type")) + object.type = options.enums === String ? $root.query.Type[message.type] === undefined ? message.type : $root.query.Type[message.type] : message.type; + if (message.value != null && message.hasOwnProperty("value")) + object.value = options.bytes === String ? $util.base64.encode(message.value, 0, message.value.length) : options.bytes === Array ? Array.prototype.slice.call(message.value) : message.value; + if (message.values && message.values.length) { + object.values = []; + for (let j = 0; j < message.values.length; ++j) + object.values[j] = $root.query.Value.toObject(message.values[j], options); + } + return object; + }; /** - * Properties of a BeginResponse. + * Converts this BindVariable to JSON. + * @function toJSON + * @memberof query.BindVariable + * @instance + * @returns {Object.} JSON object + */ + BindVariable.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for BindVariable + * @function getTypeUrl + * @memberof query.BindVariable + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + BindVariable.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/query.BindVariable"; + }; + + return BindVariable; + })(); + + query.BoundQuery = (function() { + + /** + * Properties of a BoundQuery. * @memberof query - * @interface IBeginResponse - * @property {number|Long|null} [transaction_id] BeginResponse transaction_id - * @property {topodata.ITabletAlias|null} [tablet_alias] BeginResponse tablet_alias - * @property {string|null} [session_state_changes] BeginResponse session_state_changes + * @interface IBoundQuery + * @property {string|null} [sql] BoundQuery sql + * @property {Object.|null} [bind_variables] BoundQuery bind_variables */ /** - * Constructs a new BeginResponse. + * Constructs a new BoundQuery. * @memberof query - * @classdesc Represents a BeginResponse. - * @implements IBeginResponse + * @classdesc Represents a BoundQuery. + * @implements IBoundQuery * @constructor - * @param {query.IBeginResponse=} [properties] Properties to set + * @param {query.IBoundQuery=} [properties] Properties to set */ - function BeginResponse(properties) { + function BoundQuery(properties) { + this.bind_variables = {}; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -78198,103 +79106,111 @@ export const query = $root.query = (() => { } /** - * BeginResponse transaction_id. - * @member {number|Long} transaction_id - * @memberof query.BeginResponse - * @instance - */ - BeginResponse.prototype.transaction_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; - - /** - * BeginResponse tablet_alias. - * @member {topodata.ITabletAlias|null|undefined} tablet_alias - * @memberof query.BeginResponse + * BoundQuery sql. + * @member {string} sql + * @memberof query.BoundQuery * @instance */ - BeginResponse.prototype.tablet_alias = null; + BoundQuery.prototype.sql = ""; /** - * BeginResponse session_state_changes. - * @member {string} session_state_changes - * @memberof query.BeginResponse + * BoundQuery bind_variables. + * @member {Object.} bind_variables + * @memberof query.BoundQuery * @instance */ - BeginResponse.prototype.session_state_changes = ""; + BoundQuery.prototype.bind_variables = $util.emptyObject; /** - * Creates a new BeginResponse instance using the specified properties. + * Creates a new BoundQuery instance using the specified properties. * @function create - * @memberof query.BeginResponse + * @memberof query.BoundQuery * @static - * @param {query.IBeginResponse=} [properties] Properties to set - * @returns {query.BeginResponse} BeginResponse instance + * @param {query.IBoundQuery=} [properties] Properties to set + * @returns {query.BoundQuery} BoundQuery instance */ - BeginResponse.create = function create(properties) { - return new BeginResponse(properties); + BoundQuery.create = function create(properties) { + return new BoundQuery(properties); }; /** - * Encodes the specified BeginResponse message. Does not implicitly {@link query.BeginResponse.verify|verify} messages. + * Encodes the specified BoundQuery message. Does not implicitly {@link query.BoundQuery.verify|verify} messages. * @function encode - * @memberof query.BeginResponse + * @memberof query.BoundQuery * @static - * @param {query.IBeginResponse} message BeginResponse message or plain object to encode + * @param {query.IBoundQuery} message BoundQuery message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - BeginResponse.encode = function encode(message, writer) { + BoundQuery.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.transaction_id != null && Object.hasOwnProperty.call(message, "transaction_id")) - writer.uint32(/* id 1, wireType 0 =*/8).int64(message.transaction_id); - if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) - $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.session_state_changes != null && Object.hasOwnProperty.call(message, "session_state_changes")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.session_state_changes); + if (message.sql != null && Object.hasOwnProperty.call(message, "sql")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.sql); + if (message.bind_variables != null && Object.hasOwnProperty.call(message, "bind_variables")) + for (let keys = Object.keys(message.bind_variables), i = 0; i < keys.length; ++i) { + writer.uint32(/* id 2, wireType 2 =*/18).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); + $root.query.BindVariable.encode(message.bind_variables[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); + } return writer; }; /** - * Encodes the specified BeginResponse message, length delimited. Does not implicitly {@link query.BeginResponse.verify|verify} messages. + * Encodes the specified BoundQuery message, length delimited. Does not implicitly {@link query.BoundQuery.verify|verify} messages. * @function encodeDelimited - * @memberof query.BeginResponse + * @memberof query.BoundQuery * @static - * @param {query.IBeginResponse} message BeginResponse message or plain object to encode + * @param {query.IBoundQuery} message BoundQuery message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - BeginResponse.encodeDelimited = function encodeDelimited(message, writer) { + BoundQuery.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a BeginResponse message from the specified reader or buffer. + * Decodes a BoundQuery message from the specified reader or buffer. * @function decode - * @memberof query.BeginResponse + * @memberof query.BoundQuery * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.BeginResponse} BeginResponse + * @returns {query.BoundQuery} BoundQuery * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - BeginResponse.decode = function decode(reader, length) { + BoundQuery.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.BeginResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.BoundQuery(), key, value; while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.transaction_id = reader.int64(); + message.sql = reader.string(); break; } case 2: { - message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); - break; - } - case 3: { - message.session_state_changes = reader.string(); + if (message.bind_variables === $util.emptyObject) + message.bind_variables = {}; + let end2 = reader.uint32() + reader.pos; + key = ""; + value = null; + while (reader.pos < end2) { + let tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = $root.query.BindVariable.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.bind_variables[key] = value; break; } default: @@ -78306,161 +79222,163 @@ export const query = $root.query = (() => { }; /** - * Decodes a BeginResponse message from the specified reader or buffer, length delimited. + * Decodes a BoundQuery message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.BeginResponse + * @memberof query.BoundQuery * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.BeginResponse} BeginResponse + * @returns {query.BoundQuery} BoundQuery * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - BeginResponse.decodeDelimited = function decodeDelimited(reader) { + BoundQuery.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a BeginResponse message. + * Verifies a BoundQuery message. * @function verify - * @memberof query.BeginResponse + * @memberof query.BoundQuery * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - BeginResponse.verify = function verify(message) { + BoundQuery.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) - if (!$util.isInteger(message.transaction_id) && !(message.transaction_id && $util.isInteger(message.transaction_id.low) && $util.isInteger(message.transaction_id.high))) - return "transaction_id: integer|Long expected"; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { - let error = $root.topodata.TabletAlias.verify(message.tablet_alias); - if (error) - return "tablet_alias." + error; + if (message.sql != null && message.hasOwnProperty("sql")) + if (!$util.isString(message.sql)) + return "sql: string expected"; + if (message.bind_variables != null && message.hasOwnProperty("bind_variables")) { + if (!$util.isObject(message.bind_variables)) + return "bind_variables: object expected"; + let key = Object.keys(message.bind_variables); + for (let i = 0; i < key.length; ++i) { + let error = $root.query.BindVariable.verify(message.bind_variables[key[i]]); + if (error) + return "bind_variables." + error; + } } - if (message.session_state_changes != null && message.hasOwnProperty("session_state_changes")) - if (!$util.isString(message.session_state_changes)) - return "session_state_changes: string expected"; return null; }; /** - * Creates a BeginResponse message from a plain object. Also converts values to their respective internal types. + * Creates a BoundQuery message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.BeginResponse + * @memberof query.BoundQuery * @static * @param {Object.} object Plain object - * @returns {query.BeginResponse} BeginResponse + * @returns {query.BoundQuery} BoundQuery */ - BeginResponse.fromObject = function fromObject(object) { - if (object instanceof $root.query.BeginResponse) + BoundQuery.fromObject = function fromObject(object) { + if (object instanceof $root.query.BoundQuery) return object; - let message = new $root.query.BeginResponse(); - if (object.transaction_id != null) - if ($util.Long) - (message.transaction_id = $util.Long.fromValue(object.transaction_id)).unsigned = false; - else if (typeof object.transaction_id === "string") - message.transaction_id = parseInt(object.transaction_id, 10); - else if (typeof object.transaction_id === "number") - message.transaction_id = object.transaction_id; - else if (typeof object.transaction_id === "object") - message.transaction_id = new $util.LongBits(object.transaction_id.low >>> 0, object.transaction_id.high >>> 0).toNumber(); - if (object.tablet_alias != null) { - if (typeof object.tablet_alias !== "object") - throw TypeError(".query.BeginResponse.tablet_alias: object expected"); - message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); + let message = new $root.query.BoundQuery(); + if (object.sql != null) + message.sql = String(object.sql); + if (object.bind_variables) { + if (typeof object.bind_variables !== "object") + throw TypeError(".query.BoundQuery.bind_variables: object expected"); + message.bind_variables = {}; + for (let keys = Object.keys(object.bind_variables), i = 0; i < keys.length; ++i) { + if (typeof object.bind_variables[keys[i]] !== "object") + throw TypeError(".query.BoundQuery.bind_variables: object expected"); + message.bind_variables[keys[i]] = $root.query.BindVariable.fromObject(object.bind_variables[keys[i]]); + } } - if (object.session_state_changes != null) - message.session_state_changes = String(object.session_state_changes); return message; }; /** - * Creates a plain object from a BeginResponse message. Also converts values to other types if specified. + * Creates a plain object from a BoundQuery message. Also converts values to other types if specified. * @function toObject - * @memberof query.BeginResponse + * @memberof query.BoundQuery * @static - * @param {query.BeginResponse} message BeginResponse + * @param {query.BoundQuery} message BoundQuery * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - BeginResponse.toObject = function toObject(message, options) { + BoundQuery.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) { - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.transaction_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.transaction_id = options.longs === String ? "0" : 0; - object.tablet_alias = null; - object.session_state_changes = ""; + if (options.objects || options.defaults) + object.bind_variables = {}; + if (options.defaults) + object.sql = ""; + if (message.sql != null && message.hasOwnProperty("sql")) + object.sql = message.sql; + let keys2; + if (message.bind_variables && (keys2 = Object.keys(message.bind_variables)).length) { + object.bind_variables = {}; + for (let j = 0; j < keys2.length; ++j) + object.bind_variables[keys2[j]] = $root.query.BindVariable.toObject(message.bind_variables[keys2[j]], options); } - if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) - if (typeof message.transaction_id === "number") - object.transaction_id = options.longs === String ? String(message.transaction_id) : message.transaction_id; - else - object.transaction_id = options.longs === String ? $util.Long.prototype.toString.call(message.transaction_id) : options.longs === Number ? new $util.LongBits(message.transaction_id.low >>> 0, message.transaction_id.high >>> 0).toNumber() : message.transaction_id; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) - object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); - if (message.session_state_changes != null && message.hasOwnProperty("session_state_changes")) - object.session_state_changes = message.session_state_changes; return object; }; /** - * Converts this BeginResponse to JSON. + * Converts this BoundQuery to JSON. * @function toJSON - * @memberof query.BeginResponse + * @memberof query.BoundQuery * @instance * @returns {Object.} JSON object */ - BeginResponse.prototype.toJSON = function toJSON() { + BoundQuery.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for BeginResponse + * Gets the default type url for BoundQuery * @function getTypeUrl - * @memberof query.BeginResponse + * @memberof query.BoundQuery * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - BeginResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + BoundQuery.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.BeginResponse"; + return typeUrlPrefix + "/query.BoundQuery"; }; - return BeginResponse; + return BoundQuery; })(); - query.CommitRequest = (function() { + query.ExecuteOptions = (function() { /** - * Properties of a CommitRequest. + * Properties of an ExecuteOptions. * @memberof query - * @interface ICommitRequest - * @property {vtrpc.ICallerID|null} [effective_caller_id] CommitRequest effective_caller_id - * @property {query.IVTGateCallerID|null} [immediate_caller_id] CommitRequest immediate_caller_id - * @property {query.ITarget|null} [target] CommitRequest target - * @property {number|Long|null} [transaction_id] CommitRequest transaction_id + * @interface IExecuteOptions + * @property {query.ExecuteOptions.IncludedFields|null} [included_fields] ExecuteOptions included_fields + * @property {boolean|null} [client_found_rows] ExecuteOptions client_found_rows + * @property {query.ExecuteOptions.Workload|null} [workload] ExecuteOptions workload + * @property {number|Long|null} [sql_select_limit] ExecuteOptions sql_select_limit + * @property {query.ExecuteOptions.TransactionIsolation|null} [transaction_isolation] ExecuteOptions transaction_isolation + * @property {boolean|null} [skip_query_plan_cache] ExecuteOptions skip_query_plan_cache + * @property {query.ExecuteOptions.PlannerVersion|null} [planner_version] ExecuteOptions planner_version + * @property {boolean|null} [has_created_temp_tables] ExecuteOptions has_created_temp_tables + * @property {query.ExecuteOptions.Consolidator|null} [consolidator] ExecuteOptions consolidator + * @property {Array.|null} [transaction_access_mode] ExecuteOptions transaction_access_mode + * @property {string|null} [WorkloadName] ExecuteOptions WorkloadName + * @property {string|null} [priority] ExecuteOptions priority + * @property {string|null} [uag_info] ExecuteOptions uag_info */ /** - * Constructs a new CommitRequest. + * Constructs a new ExecuteOptions. * @memberof query - * @classdesc Represents a CommitRequest. - * @implements ICommitRequest + * @classdesc Represents an ExecuteOptions. + * @implements IExecuteOptions * @constructor - * @param {query.ICommitRequest=} [properties] Properties to set + * @param {query.IExecuteOptions=} [properties] Properties to set */ - function CommitRequest(properties) { + function ExecuteOptions(properties) { + this.transaction_access_mode = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -78468,374 +79386,254 @@ export const query = $root.query = (() => { } /** - * CommitRequest effective_caller_id. - * @member {vtrpc.ICallerID|null|undefined} effective_caller_id - * @memberof query.CommitRequest + * ExecuteOptions included_fields. + * @member {query.ExecuteOptions.IncludedFields} included_fields + * @memberof query.ExecuteOptions * @instance */ - CommitRequest.prototype.effective_caller_id = null; + ExecuteOptions.prototype.included_fields = 0; /** - * CommitRequest immediate_caller_id. - * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id - * @memberof query.CommitRequest + * ExecuteOptions client_found_rows. + * @member {boolean} client_found_rows + * @memberof query.ExecuteOptions * @instance */ - CommitRequest.prototype.immediate_caller_id = null; + ExecuteOptions.prototype.client_found_rows = false; /** - * CommitRequest target. - * @member {query.ITarget|null|undefined} target - * @memberof query.CommitRequest + * ExecuteOptions workload. + * @member {query.ExecuteOptions.Workload} workload + * @memberof query.ExecuteOptions * @instance */ - CommitRequest.prototype.target = null; + ExecuteOptions.prototype.workload = 0; /** - * CommitRequest transaction_id. - * @member {number|Long} transaction_id - * @memberof query.CommitRequest + * ExecuteOptions sql_select_limit. + * @member {number|Long} sql_select_limit + * @memberof query.ExecuteOptions * @instance */ - CommitRequest.prototype.transaction_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; - - /** - * Creates a new CommitRequest instance using the specified properties. - * @function create - * @memberof query.CommitRequest - * @static - * @param {query.ICommitRequest=} [properties] Properties to set - * @returns {query.CommitRequest} CommitRequest instance - */ - CommitRequest.create = function create(properties) { - return new CommitRequest(properties); - }; - - /** - * Encodes the specified CommitRequest message. Does not implicitly {@link query.CommitRequest.verify|verify} messages. - * @function encode - * @memberof query.CommitRequest - * @static - * @param {query.ICommitRequest} message CommitRequest message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - CommitRequest.encode = function encode(message, writer) { - if (!writer) - writer = $Writer.create(); - if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) - $root.vtrpc.CallerID.encode(message.effective_caller_id, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.immediate_caller_id != null && Object.hasOwnProperty.call(message, "immediate_caller_id")) - $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.target != null && Object.hasOwnProperty.call(message, "target")) - $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.transaction_id != null && Object.hasOwnProperty.call(message, "transaction_id")) - writer.uint32(/* id 4, wireType 0 =*/32).int64(message.transaction_id); - return writer; - }; - - /** - * Encodes the specified CommitRequest message, length delimited. Does not implicitly {@link query.CommitRequest.verify|verify} messages. - * @function encodeDelimited - * @memberof query.CommitRequest - * @static - * @param {query.ICommitRequest} message CommitRequest message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - CommitRequest.encodeDelimited = function encodeDelimited(message, writer) { - return this.encode(message, writer).ldelim(); - }; - - /** - * Decodes a CommitRequest message from the specified reader or buffer. - * @function decode - * @memberof query.CommitRequest - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @param {number} [length] Message length if known beforehand - * @returns {query.CommitRequest} CommitRequest - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - CommitRequest.decode = function decode(reader, length) { - if (!(reader instanceof $Reader)) - reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.CommitRequest(); - while (reader.pos < end) { - let tag = reader.uint32(); - switch (tag >>> 3) { - case 1: { - message.effective_caller_id = $root.vtrpc.CallerID.decode(reader, reader.uint32()); - break; - } - case 2: { - message.immediate_caller_id = $root.query.VTGateCallerID.decode(reader, reader.uint32()); - break; - } - case 3: { - message.target = $root.query.Target.decode(reader, reader.uint32()); - break; - } - case 4: { - message.transaction_id = reader.int64(); - break; - } - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }; + ExecuteOptions.prototype.sql_select_limit = $util.Long ? $util.Long.fromBits(0,0,false) : 0; /** - * Decodes a CommitRequest message from the specified reader or buffer, length delimited. - * @function decodeDelimited - * @memberof query.CommitRequest - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.CommitRequest} CommitRequest - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing + * ExecuteOptions transaction_isolation. + * @member {query.ExecuteOptions.TransactionIsolation} transaction_isolation + * @memberof query.ExecuteOptions + * @instance */ - CommitRequest.decodeDelimited = function decodeDelimited(reader) { - if (!(reader instanceof $Reader)) - reader = new $Reader(reader); - return this.decode(reader, reader.uint32()); - }; + ExecuteOptions.prototype.transaction_isolation = 0; /** - * Verifies a CommitRequest message. - * @function verify - * @memberof query.CommitRequest - * @static - * @param {Object.} message Plain object to verify - * @returns {string|null} `null` if valid, otherwise the reason why it is not + * ExecuteOptions skip_query_plan_cache. + * @member {boolean} skip_query_plan_cache + * @memberof query.ExecuteOptions + * @instance */ - CommitRequest.verify = function verify(message) { - if (typeof message !== "object" || message === null) - return "object expected"; - if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { - let error = $root.vtrpc.CallerID.verify(message.effective_caller_id); - if (error) - return "effective_caller_id." + error; - } - if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) { - let error = $root.query.VTGateCallerID.verify(message.immediate_caller_id); - if (error) - return "immediate_caller_id." + error; - } - if (message.target != null && message.hasOwnProperty("target")) { - let error = $root.query.Target.verify(message.target); - if (error) - return "target." + error; - } - if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) - if (!$util.isInteger(message.transaction_id) && !(message.transaction_id && $util.isInteger(message.transaction_id.low) && $util.isInteger(message.transaction_id.high))) - return "transaction_id: integer|Long expected"; - return null; - }; + ExecuteOptions.prototype.skip_query_plan_cache = false; /** - * Creates a CommitRequest message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof query.CommitRequest - * @static - * @param {Object.} object Plain object - * @returns {query.CommitRequest} CommitRequest + * ExecuteOptions planner_version. + * @member {query.ExecuteOptions.PlannerVersion} planner_version + * @memberof query.ExecuteOptions + * @instance */ - CommitRequest.fromObject = function fromObject(object) { - if (object instanceof $root.query.CommitRequest) - return object; - let message = new $root.query.CommitRequest(); - if (object.effective_caller_id != null) { - if (typeof object.effective_caller_id !== "object") - throw TypeError(".query.CommitRequest.effective_caller_id: object expected"); - message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); - } - if (object.immediate_caller_id != null) { - if (typeof object.immediate_caller_id !== "object") - throw TypeError(".query.CommitRequest.immediate_caller_id: object expected"); - message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); - } - if (object.target != null) { - if (typeof object.target !== "object") - throw TypeError(".query.CommitRequest.target: object expected"); - message.target = $root.query.Target.fromObject(object.target); - } - if (object.transaction_id != null) - if ($util.Long) - (message.transaction_id = $util.Long.fromValue(object.transaction_id)).unsigned = false; - else if (typeof object.transaction_id === "string") - message.transaction_id = parseInt(object.transaction_id, 10); - else if (typeof object.transaction_id === "number") - message.transaction_id = object.transaction_id; - else if (typeof object.transaction_id === "object") - message.transaction_id = new $util.LongBits(object.transaction_id.low >>> 0, object.transaction_id.high >>> 0).toNumber(); - return message; - }; + ExecuteOptions.prototype.planner_version = 0; /** - * Creates a plain object from a CommitRequest message. Also converts values to other types if specified. - * @function toObject - * @memberof query.CommitRequest - * @static - * @param {query.CommitRequest} message CommitRequest - * @param {$protobuf.IConversionOptions} [options] Conversion options - * @returns {Object.} Plain object + * ExecuteOptions has_created_temp_tables. + * @member {boolean} has_created_temp_tables + * @memberof query.ExecuteOptions + * @instance */ - CommitRequest.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) { - object.effective_caller_id = null; - object.immediate_caller_id = null; - object.target = null; - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.transaction_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.transaction_id = options.longs === String ? "0" : 0; - } - if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) - object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); - if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) - object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); - if (message.target != null && message.hasOwnProperty("target")) - object.target = $root.query.Target.toObject(message.target, options); - if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) - if (typeof message.transaction_id === "number") - object.transaction_id = options.longs === String ? String(message.transaction_id) : message.transaction_id; - else - object.transaction_id = options.longs === String ? $util.Long.prototype.toString.call(message.transaction_id) : options.longs === Number ? new $util.LongBits(message.transaction_id.low >>> 0, message.transaction_id.high >>> 0).toNumber() : message.transaction_id; - return object; - }; + ExecuteOptions.prototype.has_created_temp_tables = false; /** - * Converts this CommitRequest to JSON. - * @function toJSON - * @memberof query.CommitRequest + * ExecuteOptions consolidator. + * @member {query.ExecuteOptions.Consolidator} consolidator + * @memberof query.ExecuteOptions * @instance - * @returns {Object.} JSON object */ - CommitRequest.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; + ExecuteOptions.prototype.consolidator = 0; /** - * Gets the default type url for CommitRequest - * @function getTypeUrl - * @memberof query.CommitRequest - * @static - * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns {string} The default type url + * ExecuteOptions transaction_access_mode. + * @member {Array.} transaction_access_mode + * @memberof query.ExecuteOptions + * @instance */ - CommitRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { - if (typeUrlPrefix === undefined) { - typeUrlPrefix = "type.googleapis.com"; - } - return typeUrlPrefix + "/query.CommitRequest"; - }; - - return CommitRequest; - })(); - - query.CommitResponse = (function() { + ExecuteOptions.prototype.transaction_access_mode = $util.emptyArray; /** - * Properties of a CommitResponse. - * @memberof query - * @interface ICommitResponse - * @property {number|Long|null} [reserved_id] CommitResponse reserved_id + * ExecuteOptions WorkloadName. + * @member {string} WorkloadName + * @memberof query.ExecuteOptions + * @instance */ + ExecuteOptions.prototype.WorkloadName = ""; /** - * Constructs a new CommitResponse. - * @memberof query - * @classdesc Represents a CommitResponse. - * @implements ICommitResponse - * @constructor - * @param {query.ICommitResponse=} [properties] Properties to set + * ExecuteOptions priority. + * @member {string} priority + * @memberof query.ExecuteOptions + * @instance */ - function CommitResponse(properties) { - if (properties) - for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; - } + ExecuteOptions.prototype.priority = ""; /** - * CommitResponse reserved_id. - * @member {number|Long} reserved_id - * @memberof query.CommitResponse + * ExecuteOptions uag_info. + * @member {string} uag_info + * @memberof query.ExecuteOptions * @instance */ - CommitResponse.prototype.reserved_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + ExecuteOptions.prototype.uag_info = ""; /** - * Creates a new CommitResponse instance using the specified properties. + * Creates a new ExecuteOptions instance using the specified properties. * @function create - * @memberof query.CommitResponse + * @memberof query.ExecuteOptions * @static - * @param {query.ICommitResponse=} [properties] Properties to set - * @returns {query.CommitResponse} CommitResponse instance + * @param {query.IExecuteOptions=} [properties] Properties to set + * @returns {query.ExecuteOptions} ExecuteOptions instance */ - CommitResponse.create = function create(properties) { - return new CommitResponse(properties); + ExecuteOptions.create = function create(properties) { + return new ExecuteOptions(properties); }; /** - * Encodes the specified CommitResponse message. Does not implicitly {@link query.CommitResponse.verify|verify} messages. + * Encodes the specified ExecuteOptions message. Does not implicitly {@link query.ExecuteOptions.verify|verify} messages. * @function encode - * @memberof query.CommitResponse + * @memberof query.ExecuteOptions * @static - * @param {query.ICommitResponse} message CommitResponse message or plain object to encode + * @param {query.IExecuteOptions} message ExecuteOptions message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - CommitResponse.encode = function encode(message, writer) { + ExecuteOptions.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.reserved_id != null && Object.hasOwnProperty.call(message, "reserved_id")) - writer.uint32(/* id 1, wireType 0 =*/8).int64(message.reserved_id); + if (message.included_fields != null && Object.hasOwnProperty.call(message, "included_fields")) + writer.uint32(/* id 4, wireType 0 =*/32).int32(message.included_fields); + if (message.client_found_rows != null && Object.hasOwnProperty.call(message, "client_found_rows")) + writer.uint32(/* id 5, wireType 0 =*/40).bool(message.client_found_rows); + if (message.workload != null && Object.hasOwnProperty.call(message, "workload")) + writer.uint32(/* id 6, wireType 0 =*/48).int32(message.workload); + if (message.sql_select_limit != null && Object.hasOwnProperty.call(message, "sql_select_limit")) + writer.uint32(/* id 8, wireType 0 =*/64).int64(message.sql_select_limit); + if (message.transaction_isolation != null && Object.hasOwnProperty.call(message, "transaction_isolation")) + writer.uint32(/* id 9, wireType 0 =*/72).int32(message.transaction_isolation); + if (message.skip_query_plan_cache != null && Object.hasOwnProperty.call(message, "skip_query_plan_cache")) + writer.uint32(/* id 10, wireType 0 =*/80).bool(message.skip_query_plan_cache); + if (message.planner_version != null && Object.hasOwnProperty.call(message, "planner_version")) + writer.uint32(/* id 11, wireType 0 =*/88).int32(message.planner_version); + if (message.has_created_temp_tables != null && Object.hasOwnProperty.call(message, "has_created_temp_tables")) + writer.uint32(/* id 12, wireType 0 =*/96).bool(message.has_created_temp_tables); + if (message.consolidator != null && Object.hasOwnProperty.call(message, "consolidator")) + writer.uint32(/* id 13, wireType 0 =*/104).int32(message.consolidator); + if (message.transaction_access_mode != null && message.transaction_access_mode.length) { + writer.uint32(/* id 14, wireType 2 =*/114).fork(); + for (let i = 0; i < message.transaction_access_mode.length; ++i) + writer.int32(message.transaction_access_mode[i]); + writer.ldelim(); + } + if (message.WorkloadName != null && Object.hasOwnProperty.call(message, "WorkloadName")) + writer.uint32(/* id 15, wireType 2 =*/122).string(message.WorkloadName); + if (message.priority != null && Object.hasOwnProperty.call(message, "priority")) + writer.uint32(/* id 16, wireType 2 =*/130).string(message.priority); + if (message.uag_info != null && Object.hasOwnProperty.call(message, "uag_info")) + writer.uint32(/* id 88, wireType 2 =*/706).string(message.uag_info); return writer; }; /** - * Encodes the specified CommitResponse message, length delimited. Does not implicitly {@link query.CommitResponse.verify|verify} messages. + * Encodes the specified ExecuteOptions message, length delimited. Does not implicitly {@link query.ExecuteOptions.verify|verify} messages. * @function encodeDelimited - * @memberof query.CommitResponse + * @memberof query.ExecuteOptions * @static - * @param {query.ICommitResponse} message CommitResponse message or plain object to encode + * @param {query.IExecuteOptions} message ExecuteOptions message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - CommitResponse.encodeDelimited = function encodeDelimited(message, writer) { + ExecuteOptions.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a CommitResponse message from the specified reader or buffer. + * Decodes an ExecuteOptions message from the specified reader or buffer. * @function decode - * @memberof query.CommitResponse + * @memberof query.ExecuteOptions * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.CommitResponse} CommitResponse + * @returns {query.ExecuteOptions} ExecuteOptions * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CommitResponse.decode = function decode(reader, length) { + ExecuteOptions.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.CommitResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.ExecuteOptions(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - message.reserved_id = reader.int64(); + case 4: { + message.included_fields = reader.int32(); + break; + } + case 5: { + message.client_found_rows = reader.bool(); + break; + } + case 6: { + message.workload = reader.int32(); + break; + } + case 8: { + message.sql_select_limit = reader.int64(); + break; + } + case 9: { + message.transaction_isolation = reader.int32(); + break; + } + case 10: { + message.skip_query_plan_cache = reader.bool(); + break; + } + case 11: { + message.planner_version = reader.int32(); + break; + } + case 12: { + message.has_created_temp_tables = reader.bool(); + break; + } + case 13: { + message.consolidator = reader.int32(); + break; + } + case 14: { + if (!(message.transaction_access_mode && message.transaction_access_mode.length)) + message.transaction_access_mode = []; + if ((tag & 7) === 2) { + let end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.transaction_access_mode.push(reader.int32()); + } else + message.transaction_access_mode.push(reader.int32()); + break; + } + case 15: { + message.WorkloadName = reader.string(); + break; + } + case 16: { + message.priority = reader.string(); + break; + } + case 88: { + message.uag_info = reader.string(); break; } default: @@ -78847,514 +79645,789 @@ export const query = $root.query = (() => { }; /** - * Decodes a CommitResponse message from the specified reader or buffer, length delimited. + * Decodes an ExecuteOptions message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.CommitResponse + * @memberof query.ExecuteOptions * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.CommitResponse} CommitResponse + * @returns {query.ExecuteOptions} ExecuteOptions * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CommitResponse.decodeDelimited = function decodeDelimited(reader) { + ExecuteOptions.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a CommitResponse message. + * Verifies an ExecuteOptions message. * @function verify - * @memberof query.CommitResponse + * @memberof query.ExecuteOptions * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - CommitResponse.verify = function verify(message) { + ExecuteOptions.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) - if (!$util.isInteger(message.reserved_id) && !(message.reserved_id && $util.isInteger(message.reserved_id.low) && $util.isInteger(message.reserved_id.high))) - return "reserved_id: integer|Long expected"; - return null; - }; - - /** - * Creates a CommitResponse message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof query.CommitResponse - * @static - * @param {Object.} object Plain object - * @returns {query.CommitResponse} CommitResponse - */ - CommitResponse.fromObject = function fromObject(object) { - if (object instanceof $root.query.CommitResponse) - return object; - let message = new $root.query.CommitResponse(); - if (object.reserved_id != null) - if ($util.Long) - (message.reserved_id = $util.Long.fromValue(object.reserved_id)).unsigned = false; - else if (typeof object.reserved_id === "string") - message.reserved_id = parseInt(object.reserved_id, 10); - else if (typeof object.reserved_id === "number") - message.reserved_id = object.reserved_id; - else if (typeof object.reserved_id === "object") - message.reserved_id = new $util.LongBits(object.reserved_id.low >>> 0, object.reserved_id.high >>> 0).toNumber(); - return message; - }; - - /** - * Creates a plain object from a CommitResponse message. Also converts values to other types if specified. - * @function toObject - * @memberof query.CommitResponse - * @static - * @param {query.CommitResponse} message CommitResponse - * @param {$protobuf.IConversionOptions} [options] Conversion options - * @returns {Object.} Plain object - */ - CommitResponse.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) + if (message.included_fields != null && message.hasOwnProperty("included_fields")) + switch (message.included_fields) { + default: + return "included_fields: enum value expected"; + case 0: + case 1: + case 2: + break; + } + if (message.client_found_rows != null && message.hasOwnProperty("client_found_rows")) + if (typeof message.client_found_rows !== "boolean") + return "client_found_rows: boolean expected"; + if (message.workload != null && message.hasOwnProperty("workload")) + switch (message.workload) { + default: + return "workload: enum value expected"; + case 0: + case 1: + case 2: + case 3: + break; + } + if (message.sql_select_limit != null && message.hasOwnProperty("sql_select_limit")) + if (!$util.isInteger(message.sql_select_limit) && !(message.sql_select_limit && $util.isInteger(message.sql_select_limit.low) && $util.isInteger(message.sql_select_limit.high))) + return "sql_select_limit: integer|Long expected"; + if (message.transaction_isolation != null && message.hasOwnProperty("transaction_isolation")) + switch (message.transaction_isolation) { + default: + return "transaction_isolation: enum value expected"; + case 0: + case 1: + case 2: + case 3: + case 4: + case 5: + case 6: + break; + } + if (message.skip_query_plan_cache != null && message.hasOwnProperty("skip_query_plan_cache")) + if (typeof message.skip_query_plan_cache !== "boolean") + return "skip_query_plan_cache: boolean expected"; + if (message.planner_version != null && message.hasOwnProperty("planner_version")) + switch (message.planner_version) { + default: + return "planner_version: enum value expected"; + case 0: + case 1: + case 2: + case 3: + case 4: + case 5: + case 6: + case 7: + break; + } + if (message.has_created_temp_tables != null && message.hasOwnProperty("has_created_temp_tables")) + if (typeof message.has_created_temp_tables !== "boolean") + return "has_created_temp_tables: boolean expected"; + if (message.consolidator != null && message.hasOwnProperty("consolidator")) + switch (message.consolidator) { + default: + return "consolidator: enum value expected"; + case 0: + case 1: + case 2: + case 3: + break; + } + if (message.transaction_access_mode != null && message.hasOwnProperty("transaction_access_mode")) { + if (!Array.isArray(message.transaction_access_mode)) + return "transaction_access_mode: array expected"; + for (let i = 0; i < message.transaction_access_mode.length; ++i) + switch (message.transaction_access_mode[i]) { + default: + return "transaction_access_mode: enum value[] expected"; + case 0: + case 1: + case 2: + break; + } + } + if (message.WorkloadName != null && message.hasOwnProperty("WorkloadName")) + if (!$util.isString(message.WorkloadName)) + return "WorkloadName: string expected"; + if (message.priority != null && message.hasOwnProperty("priority")) + if (!$util.isString(message.priority)) + return "priority: string expected"; + if (message.uag_info != null && message.hasOwnProperty("uag_info")) + if (!$util.isString(message.uag_info)) + return "uag_info: string expected"; + return null; + }; + + /** + * Creates an ExecuteOptions message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof query.ExecuteOptions + * @static + * @param {Object.} object Plain object + * @returns {query.ExecuteOptions} ExecuteOptions + */ + ExecuteOptions.fromObject = function fromObject(object) { + if (object instanceof $root.query.ExecuteOptions) + return object; + let message = new $root.query.ExecuteOptions(); + switch (object.included_fields) { + default: + if (typeof object.included_fields === "number") { + message.included_fields = object.included_fields; + break; + } + break; + case "TYPE_AND_NAME": + case 0: + message.included_fields = 0; + break; + case "TYPE_ONLY": + case 1: + message.included_fields = 1; + break; + case "ALL": + case 2: + message.included_fields = 2; + break; + } + if (object.client_found_rows != null) + message.client_found_rows = Boolean(object.client_found_rows); + switch (object.workload) { + default: + if (typeof object.workload === "number") { + message.workload = object.workload; + break; + } + break; + case "UNSPECIFIED": + case 0: + message.workload = 0; + break; + case "OLTP": + case 1: + message.workload = 1; + break; + case "OLAP": + case 2: + message.workload = 2; + break; + case "DBA": + case 3: + message.workload = 3; + break; + } + if (object.sql_select_limit != null) + if ($util.Long) + (message.sql_select_limit = $util.Long.fromValue(object.sql_select_limit)).unsigned = false; + else if (typeof object.sql_select_limit === "string") + message.sql_select_limit = parseInt(object.sql_select_limit, 10); + else if (typeof object.sql_select_limit === "number") + message.sql_select_limit = object.sql_select_limit; + else if (typeof object.sql_select_limit === "object") + message.sql_select_limit = new $util.LongBits(object.sql_select_limit.low >>> 0, object.sql_select_limit.high >>> 0).toNumber(); + switch (object.transaction_isolation) { + default: + if (typeof object.transaction_isolation === "number") { + message.transaction_isolation = object.transaction_isolation; + break; + } + break; + case "DEFAULT": + case 0: + message.transaction_isolation = 0; + break; + case "REPEATABLE_READ": + case 1: + message.transaction_isolation = 1; + break; + case "READ_COMMITTED": + case 2: + message.transaction_isolation = 2; + break; + case "READ_UNCOMMITTED": + case 3: + message.transaction_isolation = 3; + break; + case "SERIALIZABLE": + case 4: + message.transaction_isolation = 4; + break; + case "CONSISTENT_SNAPSHOT_READ_ONLY": + case 5: + message.transaction_isolation = 5; + break; + case "AUTOCOMMIT": + case 6: + message.transaction_isolation = 6; + break; + } + if (object.skip_query_plan_cache != null) + message.skip_query_plan_cache = Boolean(object.skip_query_plan_cache); + switch (object.planner_version) { + default: + if (typeof object.planner_version === "number") { + message.planner_version = object.planner_version; + break; + } + break; + case "DEFAULT_PLANNER": + case 0: + message.planner_version = 0; + break; + case "V3": + case 1: + message.planner_version = 1; + break; + case "Gen4": + case 2: + message.planner_version = 2; + break; + case "Gen4Greedy": + case 3: + message.planner_version = 3; + break; + case "Gen4Left2Right": + case 4: + message.planner_version = 4; + break; + case "Gen4WithFallback": + case 5: + message.planner_version = 5; + break; + case "Gen4CompareV3": + case 6: + message.planner_version = 6; + break; + case "V3Insert": + case 7: + message.planner_version = 7; + break; + } + if (object.has_created_temp_tables != null) + message.has_created_temp_tables = Boolean(object.has_created_temp_tables); + switch (object.consolidator) { + default: + if (typeof object.consolidator === "number") { + message.consolidator = object.consolidator; + break; + } + break; + case "CONSOLIDATOR_UNSPECIFIED": + case 0: + message.consolidator = 0; + break; + case "CONSOLIDATOR_DISABLED": + case 1: + message.consolidator = 1; + break; + case "CONSOLIDATOR_ENABLED": + case 2: + message.consolidator = 2; + break; + case "CONSOLIDATOR_ENABLED_REPLICAS": + case 3: + message.consolidator = 3; + break; + } + if (object.transaction_access_mode) { + if (!Array.isArray(object.transaction_access_mode)) + throw TypeError(".query.ExecuteOptions.transaction_access_mode: array expected"); + message.transaction_access_mode = []; + for (let i = 0; i < object.transaction_access_mode.length; ++i) + switch (object.transaction_access_mode[i]) { + default: + if (typeof object.transaction_access_mode[i] === "number") { + message.transaction_access_mode[i] = object.transaction_access_mode[i]; + break; + } + case "CONSISTENT_SNAPSHOT": + case 0: + message.transaction_access_mode[i] = 0; + break; + case "READ_WRITE": + case 1: + message.transaction_access_mode[i] = 1; + break; + case "READ_ONLY": + case 2: + message.transaction_access_mode[i] = 2; + break; + } + } + if (object.WorkloadName != null) + message.WorkloadName = String(object.WorkloadName); + if (object.priority != null) + message.priority = String(object.priority); + if (object.uag_info != null) + message.uag_info = String(object.uag_info); + return message; + }; + + /** + * Creates a plain object from an ExecuteOptions message. Also converts values to other types if specified. + * @function toObject + * @memberof query.ExecuteOptions + * @static + * @param {query.ExecuteOptions} message ExecuteOptions + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ExecuteOptions.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.transaction_access_mode = []; + if (options.defaults) { + object.included_fields = options.enums === String ? "TYPE_AND_NAME" : 0; + object.client_found_rows = false; + object.workload = options.enums === String ? "UNSPECIFIED" : 0; if ($util.Long) { let long = new $util.Long(0, 0, false); - object.reserved_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + object.sql_select_limit = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; } else - object.reserved_id = options.longs === String ? "0" : 0; - if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) - if (typeof message.reserved_id === "number") - object.reserved_id = options.longs === String ? String(message.reserved_id) : message.reserved_id; + object.sql_select_limit = options.longs === String ? "0" : 0; + object.transaction_isolation = options.enums === String ? "DEFAULT" : 0; + object.skip_query_plan_cache = false; + object.planner_version = options.enums === String ? "DEFAULT_PLANNER" : 0; + object.has_created_temp_tables = false; + object.consolidator = options.enums === String ? "CONSOLIDATOR_UNSPECIFIED" : 0; + object.WorkloadName = ""; + object.priority = ""; + object.uag_info = ""; + } + if (message.included_fields != null && message.hasOwnProperty("included_fields")) + object.included_fields = options.enums === String ? $root.query.ExecuteOptions.IncludedFields[message.included_fields] === undefined ? message.included_fields : $root.query.ExecuteOptions.IncludedFields[message.included_fields] : message.included_fields; + if (message.client_found_rows != null && message.hasOwnProperty("client_found_rows")) + object.client_found_rows = message.client_found_rows; + if (message.workload != null && message.hasOwnProperty("workload")) + object.workload = options.enums === String ? $root.query.ExecuteOptions.Workload[message.workload] === undefined ? message.workload : $root.query.ExecuteOptions.Workload[message.workload] : message.workload; + if (message.sql_select_limit != null && message.hasOwnProperty("sql_select_limit")) + if (typeof message.sql_select_limit === "number") + object.sql_select_limit = options.longs === String ? String(message.sql_select_limit) : message.sql_select_limit; else - object.reserved_id = options.longs === String ? $util.Long.prototype.toString.call(message.reserved_id) : options.longs === Number ? new $util.LongBits(message.reserved_id.low >>> 0, message.reserved_id.high >>> 0).toNumber() : message.reserved_id; + object.sql_select_limit = options.longs === String ? $util.Long.prototype.toString.call(message.sql_select_limit) : options.longs === Number ? new $util.LongBits(message.sql_select_limit.low >>> 0, message.sql_select_limit.high >>> 0).toNumber() : message.sql_select_limit; + if (message.transaction_isolation != null && message.hasOwnProperty("transaction_isolation")) + object.transaction_isolation = options.enums === String ? $root.query.ExecuteOptions.TransactionIsolation[message.transaction_isolation] === undefined ? message.transaction_isolation : $root.query.ExecuteOptions.TransactionIsolation[message.transaction_isolation] : message.transaction_isolation; + if (message.skip_query_plan_cache != null && message.hasOwnProperty("skip_query_plan_cache")) + object.skip_query_plan_cache = message.skip_query_plan_cache; + if (message.planner_version != null && message.hasOwnProperty("planner_version")) + object.planner_version = options.enums === String ? $root.query.ExecuteOptions.PlannerVersion[message.planner_version] === undefined ? message.planner_version : $root.query.ExecuteOptions.PlannerVersion[message.planner_version] : message.planner_version; + if (message.has_created_temp_tables != null && message.hasOwnProperty("has_created_temp_tables")) + object.has_created_temp_tables = message.has_created_temp_tables; + if (message.consolidator != null && message.hasOwnProperty("consolidator")) + object.consolidator = options.enums === String ? $root.query.ExecuteOptions.Consolidator[message.consolidator] === undefined ? message.consolidator : $root.query.ExecuteOptions.Consolidator[message.consolidator] : message.consolidator; + if (message.transaction_access_mode && message.transaction_access_mode.length) { + object.transaction_access_mode = []; + for (let j = 0; j < message.transaction_access_mode.length; ++j) + object.transaction_access_mode[j] = options.enums === String ? $root.query.ExecuteOptions.TransactionAccessMode[message.transaction_access_mode[j]] === undefined ? message.transaction_access_mode[j] : $root.query.ExecuteOptions.TransactionAccessMode[message.transaction_access_mode[j]] : message.transaction_access_mode[j]; + } + if (message.WorkloadName != null && message.hasOwnProperty("WorkloadName")) + object.WorkloadName = message.WorkloadName; + if (message.priority != null && message.hasOwnProperty("priority")) + object.priority = message.priority; + if (message.uag_info != null && message.hasOwnProperty("uag_info")) + object.uag_info = message.uag_info; return object; }; /** - * Converts this CommitResponse to JSON. + * Converts this ExecuteOptions to JSON. * @function toJSON - * @memberof query.CommitResponse + * @memberof query.ExecuteOptions * @instance * @returns {Object.} JSON object */ - CommitResponse.prototype.toJSON = function toJSON() { + ExecuteOptions.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for CommitResponse + * Gets the default type url for ExecuteOptions * @function getTypeUrl - * @memberof query.CommitResponse + * @memberof query.ExecuteOptions * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - CommitResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ExecuteOptions.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.CommitResponse"; + return typeUrlPrefix + "/query.ExecuteOptions"; }; - return CommitResponse; - })(); - - query.RollbackRequest = (function() { - /** - * Properties of a RollbackRequest. - * @memberof query - * @interface IRollbackRequest - * @property {vtrpc.ICallerID|null} [effective_caller_id] RollbackRequest effective_caller_id - * @property {query.IVTGateCallerID|null} [immediate_caller_id] RollbackRequest immediate_caller_id - * @property {query.ITarget|null} [target] RollbackRequest target - * @property {number|Long|null} [transaction_id] RollbackRequest transaction_id + * IncludedFields enum. + * @name query.ExecuteOptions.IncludedFields + * @enum {number} + * @property {number} TYPE_AND_NAME=0 TYPE_AND_NAME value + * @property {number} TYPE_ONLY=1 TYPE_ONLY value + * @property {number} ALL=2 ALL value */ + ExecuteOptions.IncludedFields = (function() { + const valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "TYPE_AND_NAME"] = 0; + values[valuesById[1] = "TYPE_ONLY"] = 1; + values[valuesById[2] = "ALL"] = 2; + return values; + })(); /** - * Constructs a new RollbackRequest. - * @memberof query - * @classdesc Represents a RollbackRequest. - * @implements IRollbackRequest - * @constructor - * @param {query.IRollbackRequest=} [properties] Properties to set + * Workload enum. + * @name query.ExecuteOptions.Workload + * @enum {number} + * @property {number} UNSPECIFIED=0 UNSPECIFIED value + * @property {number} OLTP=1 OLTP value + * @property {number} OLAP=2 OLAP value + * @property {number} DBA=3 DBA value */ - function RollbackRequest(properties) { - if (properties) - for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; - } + ExecuteOptions.Workload = (function() { + const valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "UNSPECIFIED"] = 0; + values[valuesById[1] = "OLTP"] = 1; + values[valuesById[2] = "OLAP"] = 2; + values[valuesById[3] = "DBA"] = 3; + return values; + })(); /** - * RollbackRequest effective_caller_id. - * @member {vtrpc.ICallerID|null|undefined} effective_caller_id - * @memberof query.RollbackRequest - * @instance + * TransactionIsolation enum. + * @name query.ExecuteOptions.TransactionIsolation + * @enum {number} + * @property {number} DEFAULT=0 DEFAULT value + * @property {number} REPEATABLE_READ=1 REPEATABLE_READ value + * @property {number} READ_COMMITTED=2 READ_COMMITTED value + * @property {number} READ_UNCOMMITTED=3 READ_UNCOMMITTED value + * @property {number} SERIALIZABLE=4 SERIALIZABLE value + * @property {number} CONSISTENT_SNAPSHOT_READ_ONLY=5 CONSISTENT_SNAPSHOT_READ_ONLY value + * @property {number} AUTOCOMMIT=6 AUTOCOMMIT value */ - RollbackRequest.prototype.effective_caller_id = null; + ExecuteOptions.TransactionIsolation = (function() { + const valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "DEFAULT"] = 0; + values[valuesById[1] = "REPEATABLE_READ"] = 1; + values[valuesById[2] = "READ_COMMITTED"] = 2; + values[valuesById[3] = "READ_UNCOMMITTED"] = 3; + values[valuesById[4] = "SERIALIZABLE"] = 4; + values[valuesById[5] = "CONSISTENT_SNAPSHOT_READ_ONLY"] = 5; + values[valuesById[6] = "AUTOCOMMIT"] = 6; + return values; + })(); /** - * RollbackRequest immediate_caller_id. - * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id - * @memberof query.RollbackRequest - * @instance + * PlannerVersion enum. + * @name query.ExecuteOptions.PlannerVersion + * @enum {number} + * @property {number} DEFAULT_PLANNER=0 DEFAULT_PLANNER value + * @property {number} V3=1 V3 value + * @property {number} Gen4=2 Gen4 value + * @property {number} Gen4Greedy=3 Gen4Greedy value + * @property {number} Gen4Left2Right=4 Gen4Left2Right value + * @property {number} Gen4WithFallback=5 Gen4WithFallback value + * @property {number} Gen4CompareV3=6 Gen4CompareV3 value + * @property {number} V3Insert=7 V3Insert value */ - RollbackRequest.prototype.immediate_caller_id = null; + ExecuteOptions.PlannerVersion = (function() { + const valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "DEFAULT_PLANNER"] = 0; + values[valuesById[1] = "V3"] = 1; + values[valuesById[2] = "Gen4"] = 2; + values[valuesById[3] = "Gen4Greedy"] = 3; + values[valuesById[4] = "Gen4Left2Right"] = 4; + values[valuesById[5] = "Gen4WithFallback"] = 5; + values[valuesById[6] = "Gen4CompareV3"] = 6; + values[valuesById[7] = "V3Insert"] = 7; + return values; + })(); /** - * RollbackRequest target. - * @member {query.ITarget|null|undefined} target - * @memberof query.RollbackRequest - * @instance + * Consolidator enum. + * @name query.ExecuteOptions.Consolidator + * @enum {number} + * @property {number} CONSOLIDATOR_UNSPECIFIED=0 CONSOLIDATOR_UNSPECIFIED value + * @property {number} CONSOLIDATOR_DISABLED=1 CONSOLIDATOR_DISABLED value + * @property {number} CONSOLIDATOR_ENABLED=2 CONSOLIDATOR_ENABLED value + * @property {number} CONSOLIDATOR_ENABLED_REPLICAS=3 CONSOLIDATOR_ENABLED_REPLICAS value */ - RollbackRequest.prototype.target = null; + ExecuteOptions.Consolidator = (function() { + const valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "CONSOLIDATOR_UNSPECIFIED"] = 0; + values[valuesById[1] = "CONSOLIDATOR_DISABLED"] = 1; + values[valuesById[2] = "CONSOLIDATOR_ENABLED"] = 2; + values[valuesById[3] = "CONSOLIDATOR_ENABLED_REPLICAS"] = 3; + return values; + })(); /** - * RollbackRequest transaction_id. - * @member {number|Long} transaction_id - * @memberof query.RollbackRequest - * @instance - */ - RollbackRequest.prototype.transaction_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; - - /** - * Creates a new RollbackRequest instance using the specified properties. - * @function create - * @memberof query.RollbackRequest - * @static - * @param {query.IRollbackRequest=} [properties] Properties to set - * @returns {query.RollbackRequest} RollbackRequest instance + * TransactionAccessMode enum. + * @name query.ExecuteOptions.TransactionAccessMode + * @enum {number} + * @property {number} CONSISTENT_SNAPSHOT=0 CONSISTENT_SNAPSHOT value + * @property {number} READ_WRITE=1 READ_WRITE value + * @property {number} READ_ONLY=2 READ_ONLY value */ - RollbackRequest.create = function create(properties) { - return new RollbackRequest(properties); - }; + ExecuteOptions.TransactionAccessMode = (function() { + const valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "CONSISTENT_SNAPSHOT"] = 0; + values[valuesById[1] = "READ_WRITE"] = 1; + values[valuesById[2] = "READ_ONLY"] = 2; + return values; + })(); + + return ExecuteOptions; + })(); + + query.Field = (function() { /** - * Encodes the specified RollbackRequest message. Does not implicitly {@link query.RollbackRequest.verify|verify} messages. - * @function encode - * @memberof query.RollbackRequest - * @static - * @param {query.IRollbackRequest} message RollbackRequest message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer + * Properties of a Field. + * @memberof query + * @interface IField + * @property {string|null} [name] Field name + * @property {query.Type|null} [type] Field type + * @property {string|null} [table] Field table + * @property {string|null} [org_table] Field org_table + * @property {string|null} [database] Field database + * @property {string|null} [org_name] Field org_name + * @property {number|null} [column_length] Field column_length + * @property {number|null} [charset] Field charset + * @property {number|null} [decimals] Field decimals + * @property {number|null} [flags] Field flags + * @property {string|null} [column_type] Field column_type */ - RollbackRequest.encode = function encode(message, writer) { - if (!writer) - writer = $Writer.create(); - if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) - $root.vtrpc.CallerID.encode(message.effective_caller_id, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.immediate_caller_id != null && Object.hasOwnProperty.call(message, "immediate_caller_id")) - $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.target != null && Object.hasOwnProperty.call(message, "target")) - $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.transaction_id != null && Object.hasOwnProperty.call(message, "transaction_id")) - writer.uint32(/* id 4, wireType 0 =*/32).int64(message.transaction_id); - return writer; - }; /** - * Encodes the specified RollbackRequest message, length delimited. Does not implicitly {@link query.RollbackRequest.verify|verify} messages. - * @function encodeDelimited - * @memberof query.RollbackRequest - * @static - * @param {query.IRollbackRequest} message RollbackRequest message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer + * Constructs a new Field. + * @memberof query + * @classdesc Represents a Field. + * @implements IField + * @constructor + * @param {query.IField=} [properties] Properties to set */ - RollbackRequest.encodeDelimited = function encodeDelimited(message, writer) { - return this.encode(message, writer).ldelim(); - }; + function Field(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } /** - * Decodes a RollbackRequest message from the specified reader or buffer. - * @function decode - * @memberof query.RollbackRequest - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @param {number} [length] Message length if known beforehand - * @returns {query.RollbackRequest} RollbackRequest - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing + * Field name. + * @member {string} name + * @memberof query.Field + * @instance */ - RollbackRequest.decode = function decode(reader, length) { - if (!(reader instanceof $Reader)) - reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.RollbackRequest(); - while (reader.pos < end) { - let tag = reader.uint32(); - switch (tag >>> 3) { - case 1: { - message.effective_caller_id = $root.vtrpc.CallerID.decode(reader, reader.uint32()); - break; - } - case 2: { - message.immediate_caller_id = $root.query.VTGateCallerID.decode(reader, reader.uint32()); - break; - } - case 3: { - message.target = $root.query.Target.decode(reader, reader.uint32()); - break; - } - case 4: { - message.transaction_id = reader.int64(); - break; - } - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }; + Field.prototype.name = ""; /** - * Decodes a RollbackRequest message from the specified reader or buffer, length delimited. - * @function decodeDelimited - * @memberof query.RollbackRequest - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.RollbackRequest} RollbackRequest - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing + * Field type. + * @member {query.Type} type + * @memberof query.Field + * @instance */ - RollbackRequest.decodeDelimited = function decodeDelimited(reader) { - if (!(reader instanceof $Reader)) - reader = new $Reader(reader); - return this.decode(reader, reader.uint32()); - }; + Field.prototype.type = 0; /** - * Verifies a RollbackRequest message. - * @function verify - * @memberof query.RollbackRequest - * @static - * @param {Object.} message Plain object to verify - * @returns {string|null} `null` if valid, otherwise the reason why it is not + * Field table. + * @member {string} table + * @memberof query.Field + * @instance */ - RollbackRequest.verify = function verify(message) { - if (typeof message !== "object" || message === null) - return "object expected"; - if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { - let error = $root.vtrpc.CallerID.verify(message.effective_caller_id); - if (error) - return "effective_caller_id." + error; - } - if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) { - let error = $root.query.VTGateCallerID.verify(message.immediate_caller_id); - if (error) - return "immediate_caller_id." + error; - } - if (message.target != null && message.hasOwnProperty("target")) { - let error = $root.query.Target.verify(message.target); - if (error) - return "target." + error; - } - if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) - if (!$util.isInteger(message.transaction_id) && !(message.transaction_id && $util.isInteger(message.transaction_id.low) && $util.isInteger(message.transaction_id.high))) - return "transaction_id: integer|Long expected"; - return null; - }; + Field.prototype.table = ""; /** - * Creates a RollbackRequest message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof query.RollbackRequest - * @static - * @param {Object.} object Plain object - * @returns {query.RollbackRequest} RollbackRequest + * Field org_table. + * @member {string} org_table + * @memberof query.Field + * @instance */ - RollbackRequest.fromObject = function fromObject(object) { - if (object instanceof $root.query.RollbackRequest) - return object; - let message = new $root.query.RollbackRequest(); - if (object.effective_caller_id != null) { - if (typeof object.effective_caller_id !== "object") - throw TypeError(".query.RollbackRequest.effective_caller_id: object expected"); - message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); - } - if (object.immediate_caller_id != null) { - if (typeof object.immediate_caller_id !== "object") - throw TypeError(".query.RollbackRequest.immediate_caller_id: object expected"); - message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); - } - if (object.target != null) { - if (typeof object.target !== "object") - throw TypeError(".query.RollbackRequest.target: object expected"); - message.target = $root.query.Target.fromObject(object.target); - } - if (object.transaction_id != null) - if ($util.Long) - (message.transaction_id = $util.Long.fromValue(object.transaction_id)).unsigned = false; - else if (typeof object.transaction_id === "string") - message.transaction_id = parseInt(object.transaction_id, 10); - else if (typeof object.transaction_id === "number") - message.transaction_id = object.transaction_id; - else if (typeof object.transaction_id === "object") - message.transaction_id = new $util.LongBits(object.transaction_id.low >>> 0, object.transaction_id.high >>> 0).toNumber(); - return message; - }; + Field.prototype.org_table = ""; /** - * Creates a plain object from a RollbackRequest message. Also converts values to other types if specified. - * @function toObject - * @memberof query.RollbackRequest - * @static - * @param {query.RollbackRequest} message RollbackRequest - * @param {$protobuf.IConversionOptions} [options] Conversion options - * @returns {Object.} Plain object + * Field database. + * @member {string} database + * @memberof query.Field + * @instance */ - RollbackRequest.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) { - object.effective_caller_id = null; - object.immediate_caller_id = null; - object.target = null; - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.transaction_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.transaction_id = options.longs === String ? "0" : 0; - } - if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) - object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); - if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) - object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); - if (message.target != null && message.hasOwnProperty("target")) - object.target = $root.query.Target.toObject(message.target, options); - if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) - if (typeof message.transaction_id === "number") - object.transaction_id = options.longs === String ? String(message.transaction_id) : message.transaction_id; - else - object.transaction_id = options.longs === String ? $util.Long.prototype.toString.call(message.transaction_id) : options.longs === Number ? new $util.LongBits(message.transaction_id.low >>> 0, message.transaction_id.high >>> 0).toNumber() : message.transaction_id; - return object; - }; + Field.prototype.database = ""; /** - * Converts this RollbackRequest to JSON. - * @function toJSON - * @memberof query.RollbackRequest + * Field org_name. + * @member {string} org_name + * @memberof query.Field * @instance - * @returns {Object.} JSON object */ - RollbackRequest.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; + Field.prototype.org_name = ""; /** - * Gets the default type url for RollbackRequest - * @function getTypeUrl - * @memberof query.RollbackRequest - * @static - * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns {string} The default type url + * Field column_length. + * @member {number} column_length + * @memberof query.Field + * @instance */ - RollbackRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { - if (typeUrlPrefix === undefined) { - typeUrlPrefix = "type.googleapis.com"; - } - return typeUrlPrefix + "/query.RollbackRequest"; - }; - - return RollbackRequest; - })(); + Field.prototype.column_length = 0; - query.RollbackResponse = (function() { + /** + * Field charset. + * @member {number} charset + * @memberof query.Field + * @instance + */ + Field.prototype.charset = 0; /** - * Properties of a RollbackResponse. - * @memberof query - * @interface IRollbackResponse - * @property {number|Long|null} [reserved_id] RollbackResponse reserved_id + * Field decimals. + * @member {number} decimals + * @memberof query.Field + * @instance */ + Field.prototype.decimals = 0; /** - * Constructs a new RollbackResponse. - * @memberof query - * @classdesc Represents a RollbackResponse. - * @implements IRollbackResponse - * @constructor - * @param {query.IRollbackResponse=} [properties] Properties to set + * Field flags. + * @member {number} flags + * @memberof query.Field + * @instance */ - function RollbackResponse(properties) { - if (properties) - for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; - } + Field.prototype.flags = 0; /** - * RollbackResponse reserved_id. - * @member {number|Long} reserved_id - * @memberof query.RollbackResponse + * Field column_type. + * @member {string} column_type + * @memberof query.Field * @instance */ - RollbackResponse.prototype.reserved_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + Field.prototype.column_type = ""; /** - * Creates a new RollbackResponse instance using the specified properties. + * Creates a new Field instance using the specified properties. * @function create - * @memberof query.RollbackResponse + * @memberof query.Field * @static - * @param {query.IRollbackResponse=} [properties] Properties to set - * @returns {query.RollbackResponse} RollbackResponse instance + * @param {query.IField=} [properties] Properties to set + * @returns {query.Field} Field instance */ - RollbackResponse.create = function create(properties) { - return new RollbackResponse(properties); + Field.create = function create(properties) { + return new Field(properties); }; /** - * Encodes the specified RollbackResponse message. Does not implicitly {@link query.RollbackResponse.verify|verify} messages. + * Encodes the specified Field message. Does not implicitly {@link query.Field.verify|verify} messages. * @function encode - * @memberof query.RollbackResponse + * @memberof query.Field * @static - * @param {query.IRollbackResponse} message RollbackResponse message or plain object to encode + * @param {query.IField} message Field message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RollbackResponse.encode = function encode(message, writer) { + Field.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.reserved_id != null && Object.hasOwnProperty.call(message, "reserved_id")) - writer.uint32(/* id 1, wireType 0 =*/8).int64(message.reserved_id); + if (message.name != null && Object.hasOwnProperty.call(message, "name")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); + if (message.type != null && Object.hasOwnProperty.call(message, "type")) + writer.uint32(/* id 2, wireType 0 =*/16).int32(message.type); + if (message.table != null && Object.hasOwnProperty.call(message, "table")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.table); + if (message.org_table != null && Object.hasOwnProperty.call(message, "org_table")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.org_table); + if (message.database != null && Object.hasOwnProperty.call(message, "database")) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.database); + if (message.org_name != null && Object.hasOwnProperty.call(message, "org_name")) + writer.uint32(/* id 6, wireType 2 =*/50).string(message.org_name); + if (message.column_length != null && Object.hasOwnProperty.call(message, "column_length")) + writer.uint32(/* id 7, wireType 0 =*/56).uint32(message.column_length); + if (message.charset != null && Object.hasOwnProperty.call(message, "charset")) + writer.uint32(/* id 8, wireType 0 =*/64).uint32(message.charset); + if (message.decimals != null && Object.hasOwnProperty.call(message, "decimals")) + writer.uint32(/* id 9, wireType 0 =*/72).uint32(message.decimals); + if (message.flags != null && Object.hasOwnProperty.call(message, "flags")) + writer.uint32(/* id 10, wireType 0 =*/80).uint32(message.flags); + if (message.column_type != null && Object.hasOwnProperty.call(message, "column_type")) + writer.uint32(/* id 11, wireType 2 =*/90).string(message.column_type); return writer; }; /** - * Encodes the specified RollbackResponse message, length delimited. Does not implicitly {@link query.RollbackResponse.verify|verify} messages. + * Encodes the specified Field message, length delimited. Does not implicitly {@link query.Field.verify|verify} messages. * @function encodeDelimited - * @memberof query.RollbackResponse + * @memberof query.Field * @static - * @param {query.IRollbackResponse} message RollbackResponse message or plain object to encode + * @param {query.IField} message Field message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RollbackResponse.encodeDelimited = function encodeDelimited(message, writer) { + Field.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a RollbackResponse message from the specified reader or buffer. + * Decodes a Field message from the specified reader or buffer. * @function decode - * @memberof query.RollbackResponse + * @memberof query.Field * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.RollbackResponse} RollbackResponse + * @returns {query.Field} Field * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RollbackResponse.decode = function decode(reader, length) { + Field.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.RollbackResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.Field(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.reserved_id = reader.int64(); + message.name = reader.string(); + break; + } + case 2: { + message.type = reader.int32(); + break; + } + case 3: { + message.table = reader.string(); + break; + } + case 4: { + message.org_table = reader.string(); + break; + } + case 5: { + message.database = reader.string(); + break; + } + case 6: { + message.org_name = reader.string(); + break; + } + case 7: { + message.column_length = reader.uint32(); + break; + } + case 8: { + message.charset = reader.uint32(); + break; + } + case 9: { + message.decimals = reader.uint32(); + break; + } + case 10: { + message.flags = reader.uint32(); + break; + } + case 11: { + message.column_type = reader.string(); break; } default: @@ -79366,140 +80439,389 @@ export const query = $root.query = (() => { }; /** - * Decodes a RollbackResponse message from the specified reader or buffer, length delimited. + * Decodes a Field message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.RollbackResponse + * @memberof query.Field * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.RollbackResponse} RollbackResponse + * @returns {query.Field} Field * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RollbackResponse.decodeDelimited = function decodeDelimited(reader) { + Field.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a RollbackResponse message. + * Verifies a Field message. * @function verify - * @memberof query.RollbackResponse + * @memberof query.Field * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - RollbackResponse.verify = function verify(message) { + Field.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) - if (!$util.isInteger(message.reserved_id) && !(message.reserved_id && $util.isInteger(message.reserved_id.low) && $util.isInteger(message.reserved_id.high))) - return "reserved_id: integer|Long expected"; - return null; - }; - - /** - * Creates a RollbackResponse message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof query.RollbackResponse - * @static - * @param {Object.} object Plain object - * @returns {query.RollbackResponse} RollbackResponse - */ - RollbackResponse.fromObject = function fromObject(object) { - if (object instanceof $root.query.RollbackResponse) - return object; - let message = new $root.query.RollbackResponse(); - if (object.reserved_id != null) - if ($util.Long) - (message.reserved_id = $util.Long.fromValue(object.reserved_id)).unsigned = false; - else if (typeof object.reserved_id === "string") - message.reserved_id = parseInt(object.reserved_id, 10); - else if (typeof object.reserved_id === "number") - message.reserved_id = object.reserved_id; - else if (typeof object.reserved_id === "object") - message.reserved_id = new $util.LongBits(object.reserved_id.low >>> 0, object.reserved_id.high >>> 0).toNumber(); - return message; - }; - - /** - * Creates a plain object from a RollbackResponse message. Also converts values to other types if specified. - * @function toObject - * @memberof query.RollbackResponse - * @static - * @param {query.RollbackResponse} message RollbackResponse - * @param {$protobuf.IConversionOptions} [options] Conversion options - * @returns {Object.} Plain object - */ - RollbackResponse.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.reserved_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.reserved_id = options.longs === String ? "0" : 0; - if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) - if (typeof message.reserved_id === "number") - object.reserved_id = options.longs === String ? String(message.reserved_id) : message.reserved_id; - else - object.reserved_id = options.longs === String ? $util.Long.prototype.toString.call(message.reserved_id) : options.longs === Number ? new $util.LongBits(message.reserved_id.low >>> 0, message.reserved_id.high >>> 0).toNumber() : message.reserved_id; - return object; - }; - - /** - * Converts this RollbackResponse to JSON. - * @function toJSON - * @memberof query.RollbackResponse + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; + if (message.type != null && message.hasOwnProperty("type")) + switch (message.type) { + default: + return "type: enum value expected"; + case 0: + case 257: + case 770: + case 259: + case 772: + case 261: + case 774: + case 263: + case 776: + case 265: + case 778: + case 1035: + case 1036: + case 2061: + case 2062: + case 2063: + case 2064: + case 785: + case 18: + case 6163: + case 10260: + case 6165: + case 10262: + case 6167: + case 10264: + case 2073: + case 2074: + case 2075: + case 28: + case 2077: + case 2078: + case 31: + case 4128: + case 4129: + case 4130: + break; + } + if (message.table != null && message.hasOwnProperty("table")) + if (!$util.isString(message.table)) + return "table: string expected"; + if (message.org_table != null && message.hasOwnProperty("org_table")) + if (!$util.isString(message.org_table)) + return "org_table: string expected"; + if (message.database != null && message.hasOwnProperty("database")) + if (!$util.isString(message.database)) + return "database: string expected"; + if (message.org_name != null && message.hasOwnProperty("org_name")) + if (!$util.isString(message.org_name)) + return "org_name: string expected"; + if (message.column_length != null && message.hasOwnProperty("column_length")) + if (!$util.isInteger(message.column_length)) + return "column_length: integer expected"; + if (message.charset != null && message.hasOwnProperty("charset")) + if (!$util.isInteger(message.charset)) + return "charset: integer expected"; + if (message.decimals != null && message.hasOwnProperty("decimals")) + if (!$util.isInteger(message.decimals)) + return "decimals: integer expected"; + if (message.flags != null && message.hasOwnProperty("flags")) + if (!$util.isInteger(message.flags)) + return "flags: integer expected"; + if (message.column_type != null && message.hasOwnProperty("column_type")) + if (!$util.isString(message.column_type)) + return "column_type: string expected"; + return null; + }; + + /** + * Creates a Field message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof query.Field + * @static + * @param {Object.} object Plain object + * @returns {query.Field} Field + */ + Field.fromObject = function fromObject(object) { + if (object instanceof $root.query.Field) + return object; + let message = new $root.query.Field(); + if (object.name != null) + message.name = String(object.name); + switch (object.type) { + default: + if (typeof object.type === "number") { + message.type = object.type; + break; + } + break; + case "NULL_TYPE": + case 0: + message.type = 0; + break; + case "INT8": + case 257: + message.type = 257; + break; + case "UINT8": + case 770: + message.type = 770; + break; + case "INT16": + case 259: + message.type = 259; + break; + case "UINT16": + case 772: + message.type = 772; + break; + case "INT24": + case 261: + message.type = 261; + break; + case "UINT24": + case 774: + message.type = 774; + break; + case "INT32": + case 263: + message.type = 263; + break; + case "UINT32": + case 776: + message.type = 776; + break; + case "INT64": + case 265: + message.type = 265; + break; + case "UINT64": + case 778: + message.type = 778; + break; + case "FLOAT32": + case 1035: + message.type = 1035; + break; + case "FLOAT64": + case 1036: + message.type = 1036; + break; + case "TIMESTAMP": + case 2061: + message.type = 2061; + break; + case "DATE": + case 2062: + message.type = 2062; + break; + case "TIME": + case 2063: + message.type = 2063; + break; + case "DATETIME": + case 2064: + message.type = 2064; + break; + case "YEAR": + case 785: + message.type = 785; + break; + case "DECIMAL": + case 18: + message.type = 18; + break; + case "TEXT": + case 6163: + message.type = 6163; + break; + case "BLOB": + case 10260: + message.type = 10260; + break; + case "VARCHAR": + case 6165: + message.type = 6165; + break; + case "VARBINARY": + case 10262: + message.type = 10262; + break; + case "CHAR": + case 6167: + message.type = 6167; + break; + case "BINARY": + case 10264: + message.type = 10264; + break; + case "BIT": + case 2073: + message.type = 2073; + break; + case "ENUM": + case 2074: + message.type = 2074; + break; + case "SET": + case 2075: + message.type = 2075; + break; + case "TUPLE": + case 28: + message.type = 28; + break; + case "GEOMETRY": + case 2077: + message.type = 2077; + break; + case "JSON": + case 2078: + message.type = 2078; + break; + case "EXPRESSION": + case 31: + message.type = 31; + break; + case "HEXNUM": + case 4128: + message.type = 4128; + break; + case "HEXVAL": + case 4129: + message.type = 4129; + break; + case "BITNUM": + case 4130: + message.type = 4130; + break; + } + if (object.table != null) + message.table = String(object.table); + if (object.org_table != null) + message.org_table = String(object.org_table); + if (object.database != null) + message.database = String(object.database); + if (object.org_name != null) + message.org_name = String(object.org_name); + if (object.column_length != null) + message.column_length = object.column_length >>> 0; + if (object.charset != null) + message.charset = object.charset >>> 0; + if (object.decimals != null) + message.decimals = object.decimals >>> 0; + if (object.flags != null) + message.flags = object.flags >>> 0; + if (object.column_type != null) + message.column_type = String(object.column_type); + return message; + }; + + /** + * Creates a plain object from a Field message. Also converts values to other types if specified. + * @function toObject + * @memberof query.Field + * @static + * @param {query.Field} message Field + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + Field.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.name = ""; + object.type = options.enums === String ? "NULL_TYPE" : 0; + object.table = ""; + object.org_table = ""; + object.database = ""; + object.org_name = ""; + object.column_length = 0; + object.charset = 0; + object.decimals = 0; + object.flags = 0; + object.column_type = ""; + } + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; + if (message.type != null && message.hasOwnProperty("type")) + object.type = options.enums === String ? $root.query.Type[message.type] === undefined ? message.type : $root.query.Type[message.type] : message.type; + if (message.table != null && message.hasOwnProperty("table")) + object.table = message.table; + if (message.org_table != null && message.hasOwnProperty("org_table")) + object.org_table = message.org_table; + if (message.database != null && message.hasOwnProperty("database")) + object.database = message.database; + if (message.org_name != null && message.hasOwnProperty("org_name")) + object.org_name = message.org_name; + if (message.column_length != null && message.hasOwnProperty("column_length")) + object.column_length = message.column_length; + if (message.charset != null && message.hasOwnProperty("charset")) + object.charset = message.charset; + if (message.decimals != null && message.hasOwnProperty("decimals")) + object.decimals = message.decimals; + if (message.flags != null && message.hasOwnProperty("flags")) + object.flags = message.flags; + if (message.column_type != null && message.hasOwnProperty("column_type")) + object.column_type = message.column_type; + return object; + }; + + /** + * Converts this Field to JSON. + * @function toJSON + * @memberof query.Field * @instance * @returns {Object.} JSON object */ - RollbackResponse.prototype.toJSON = function toJSON() { + Field.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for RollbackResponse + * Gets the default type url for Field * @function getTypeUrl - * @memberof query.RollbackResponse + * @memberof query.Field * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - RollbackResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + Field.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.RollbackResponse"; + return typeUrlPrefix + "/query.Field"; }; - return RollbackResponse; + return Field; })(); - query.PrepareRequest = (function() { + query.Row = (function() { /** - * Properties of a PrepareRequest. + * Properties of a Row. * @memberof query - * @interface IPrepareRequest - * @property {vtrpc.ICallerID|null} [effective_caller_id] PrepareRequest effective_caller_id - * @property {query.IVTGateCallerID|null} [immediate_caller_id] PrepareRequest immediate_caller_id - * @property {query.ITarget|null} [target] PrepareRequest target - * @property {number|Long|null} [transaction_id] PrepareRequest transaction_id - * @property {string|null} [dtid] PrepareRequest dtid + * @interface IRow + * @property {Array.|null} [lengths] Row lengths + * @property {Uint8Array|null} [values] Row values */ /** - * Constructs a new PrepareRequest. + * Constructs a new Row. * @memberof query - * @classdesc Represents a PrepareRequest. - * @implements IPrepareRequest + * @classdesc Represents a Row. + * @implements IRow * @constructor - * @param {query.IPrepareRequest=} [properties] Properties to set + * @param {query.IRow=} [properties] Properties to set */ - function PrepareRequest(properties) { + function Row(properties) { + this.lengths = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -79507,131 +80829,100 @@ export const query = $root.query = (() => { } /** - * PrepareRequest effective_caller_id. - * @member {vtrpc.ICallerID|null|undefined} effective_caller_id - * @memberof query.PrepareRequest - * @instance - */ - PrepareRequest.prototype.effective_caller_id = null; - - /** - * PrepareRequest immediate_caller_id. - * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id - * @memberof query.PrepareRequest - * @instance - */ - PrepareRequest.prototype.immediate_caller_id = null; - - /** - * PrepareRequest target. - * @member {query.ITarget|null|undefined} target - * @memberof query.PrepareRequest - * @instance - */ - PrepareRequest.prototype.target = null; - - /** - * PrepareRequest transaction_id. - * @member {number|Long} transaction_id - * @memberof query.PrepareRequest + * Row lengths. + * @member {Array.} lengths + * @memberof query.Row * @instance */ - PrepareRequest.prototype.transaction_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + Row.prototype.lengths = $util.emptyArray; /** - * PrepareRequest dtid. - * @member {string} dtid - * @memberof query.PrepareRequest + * Row values. + * @member {Uint8Array} values + * @memberof query.Row * @instance */ - PrepareRequest.prototype.dtid = ""; + Row.prototype.values = $util.newBuffer([]); /** - * Creates a new PrepareRequest instance using the specified properties. + * Creates a new Row instance using the specified properties. * @function create - * @memberof query.PrepareRequest + * @memberof query.Row * @static - * @param {query.IPrepareRequest=} [properties] Properties to set - * @returns {query.PrepareRequest} PrepareRequest instance + * @param {query.IRow=} [properties] Properties to set + * @returns {query.Row} Row instance */ - PrepareRequest.create = function create(properties) { - return new PrepareRequest(properties); + Row.create = function create(properties) { + return new Row(properties); }; /** - * Encodes the specified PrepareRequest message. Does not implicitly {@link query.PrepareRequest.verify|verify} messages. + * Encodes the specified Row message. Does not implicitly {@link query.Row.verify|verify} messages. * @function encode - * @memberof query.PrepareRequest + * @memberof query.Row * @static - * @param {query.IPrepareRequest} message PrepareRequest message or plain object to encode + * @param {query.IRow} message Row message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - PrepareRequest.encode = function encode(message, writer) { + Row.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) - $root.vtrpc.CallerID.encode(message.effective_caller_id, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.immediate_caller_id != null && Object.hasOwnProperty.call(message, "immediate_caller_id")) - $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.target != null && Object.hasOwnProperty.call(message, "target")) - $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.transaction_id != null && Object.hasOwnProperty.call(message, "transaction_id")) - writer.uint32(/* id 4, wireType 0 =*/32).int64(message.transaction_id); - if (message.dtid != null && Object.hasOwnProperty.call(message, "dtid")) - writer.uint32(/* id 5, wireType 2 =*/42).string(message.dtid); + if (message.lengths != null && message.lengths.length) { + writer.uint32(/* id 1, wireType 2 =*/10).fork(); + for (let i = 0; i < message.lengths.length; ++i) + writer.sint64(message.lengths[i]); + writer.ldelim(); + } + if (message.values != null && Object.hasOwnProperty.call(message, "values")) + writer.uint32(/* id 2, wireType 2 =*/18).bytes(message.values); return writer; }; /** - * Encodes the specified PrepareRequest message, length delimited. Does not implicitly {@link query.PrepareRequest.verify|verify} messages. + * Encodes the specified Row message, length delimited. Does not implicitly {@link query.Row.verify|verify} messages. * @function encodeDelimited - * @memberof query.PrepareRequest + * @memberof query.Row * @static - * @param {query.IPrepareRequest} message PrepareRequest message or plain object to encode + * @param {query.IRow} message Row message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - PrepareRequest.encodeDelimited = function encodeDelimited(message, writer) { + Row.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a PrepareRequest message from the specified reader or buffer. + * Decodes a Row message from the specified reader or buffer. * @function decode - * @memberof query.PrepareRequest + * @memberof query.Row * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.PrepareRequest} PrepareRequest + * @returns {query.Row} Row * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - PrepareRequest.decode = function decode(reader, length) { + Row.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.PrepareRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.Row(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.effective_caller_id = $root.vtrpc.CallerID.decode(reader, reader.uint32()); + if (!(message.lengths && message.lengths.length)) + message.lengths = []; + if ((tag & 7) === 2) { + let end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.lengths.push(reader.sint64()); + } else + message.lengths.push(reader.sint64()); break; } case 2: { - message.immediate_caller_id = $root.query.VTGateCallerID.decode(reader, reader.uint32()); - break; - } - case 3: { - message.target = $root.query.Target.decode(reader, reader.uint32()); - break; - } - case 4: { - message.transaction_id = reader.int64(); - break; - } - case 5: { - message.dtid = reader.string(); + message.values = reader.bytes(); break; } default: @@ -79643,183 +80934,169 @@ export const query = $root.query = (() => { }; /** - * Decodes a PrepareRequest message from the specified reader or buffer, length delimited. + * Decodes a Row message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.PrepareRequest + * @memberof query.Row * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.PrepareRequest} PrepareRequest + * @returns {query.Row} Row * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - PrepareRequest.decodeDelimited = function decodeDelimited(reader) { + Row.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a PrepareRequest message. + * Verifies a Row message. * @function verify - * @memberof query.PrepareRequest + * @memberof query.Row * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - PrepareRequest.verify = function verify(message) { + Row.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { - let error = $root.vtrpc.CallerID.verify(message.effective_caller_id); - if (error) - return "effective_caller_id." + error; - } - if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) { - let error = $root.query.VTGateCallerID.verify(message.immediate_caller_id); - if (error) - return "immediate_caller_id." + error; - } - if (message.target != null && message.hasOwnProperty("target")) { - let error = $root.query.Target.verify(message.target); - if (error) - return "target." + error; + if (message.lengths != null && message.hasOwnProperty("lengths")) { + if (!Array.isArray(message.lengths)) + return "lengths: array expected"; + for (let i = 0; i < message.lengths.length; ++i) + if (!$util.isInteger(message.lengths[i]) && !(message.lengths[i] && $util.isInteger(message.lengths[i].low) && $util.isInteger(message.lengths[i].high))) + return "lengths: integer|Long[] expected"; } - if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) - if (!$util.isInteger(message.transaction_id) && !(message.transaction_id && $util.isInteger(message.transaction_id.low) && $util.isInteger(message.transaction_id.high))) - return "transaction_id: integer|Long expected"; - if (message.dtid != null && message.hasOwnProperty("dtid")) - if (!$util.isString(message.dtid)) - return "dtid: string expected"; + if (message.values != null && message.hasOwnProperty("values")) + if (!(message.values && typeof message.values.length === "number" || $util.isString(message.values))) + return "values: buffer expected"; return null; }; /** - * Creates a PrepareRequest message from a plain object. Also converts values to their respective internal types. + * Creates a Row message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.PrepareRequest + * @memberof query.Row * @static * @param {Object.} object Plain object - * @returns {query.PrepareRequest} PrepareRequest + * @returns {query.Row} Row */ - PrepareRequest.fromObject = function fromObject(object) { - if (object instanceof $root.query.PrepareRequest) + Row.fromObject = function fromObject(object) { + if (object instanceof $root.query.Row) return object; - let message = new $root.query.PrepareRequest(); - if (object.effective_caller_id != null) { - if (typeof object.effective_caller_id !== "object") - throw TypeError(".query.PrepareRequest.effective_caller_id: object expected"); - message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); - } - if (object.immediate_caller_id != null) { - if (typeof object.immediate_caller_id !== "object") - throw TypeError(".query.PrepareRequest.immediate_caller_id: object expected"); - message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); - } - if (object.target != null) { - if (typeof object.target !== "object") - throw TypeError(".query.PrepareRequest.target: object expected"); - message.target = $root.query.Target.fromObject(object.target); + let message = new $root.query.Row(); + if (object.lengths) { + if (!Array.isArray(object.lengths)) + throw TypeError(".query.Row.lengths: array expected"); + message.lengths = []; + for (let i = 0; i < object.lengths.length; ++i) + if ($util.Long) + (message.lengths[i] = $util.Long.fromValue(object.lengths[i])).unsigned = false; + else if (typeof object.lengths[i] === "string") + message.lengths[i] = parseInt(object.lengths[i], 10); + else if (typeof object.lengths[i] === "number") + message.lengths[i] = object.lengths[i]; + else if (typeof object.lengths[i] === "object") + message.lengths[i] = new $util.LongBits(object.lengths[i].low >>> 0, object.lengths[i].high >>> 0).toNumber(); } - if (object.transaction_id != null) - if ($util.Long) - (message.transaction_id = $util.Long.fromValue(object.transaction_id)).unsigned = false; - else if (typeof object.transaction_id === "string") - message.transaction_id = parseInt(object.transaction_id, 10); - else if (typeof object.transaction_id === "number") - message.transaction_id = object.transaction_id; - else if (typeof object.transaction_id === "object") - message.transaction_id = new $util.LongBits(object.transaction_id.low >>> 0, object.transaction_id.high >>> 0).toNumber(); - if (object.dtid != null) - message.dtid = String(object.dtid); + if (object.values != null) + if (typeof object.values === "string") + $util.base64.decode(object.values, message.values = $util.newBuffer($util.base64.length(object.values)), 0); + else if (object.values.length >= 0) + message.values = object.values; return message; }; /** - * Creates a plain object from a PrepareRequest message. Also converts values to other types if specified. + * Creates a plain object from a Row message. Also converts values to other types if specified. * @function toObject - * @memberof query.PrepareRequest + * @memberof query.Row * @static - * @param {query.PrepareRequest} message PrepareRequest + * @param {query.Row} message Row * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - PrepareRequest.toObject = function toObject(message, options) { + Row.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) { - object.effective_caller_id = null; - object.immediate_caller_id = null; - object.target = null; - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.transaction_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.transaction_id = options.longs === String ? "0" : 0; - object.dtid = ""; + if (options.arrays || options.defaults) + object.lengths = []; + if (options.defaults) + if (options.bytes === String) + object.values = ""; + else { + object.values = []; + if (options.bytes !== Array) + object.values = $util.newBuffer(object.values); + } + if (message.lengths && message.lengths.length) { + object.lengths = []; + for (let j = 0; j < message.lengths.length; ++j) + if (typeof message.lengths[j] === "number") + object.lengths[j] = options.longs === String ? String(message.lengths[j]) : message.lengths[j]; + else + object.lengths[j] = options.longs === String ? $util.Long.prototype.toString.call(message.lengths[j]) : options.longs === Number ? new $util.LongBits(message.lengths[j].low >>> 0, message.lengths[j].high >>> 0).toNumber() : message.lengths[j]; } - if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) - object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); - if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) - object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); - if (message.target != null && message.hasOwnProperty("target")) - object.target = $root.query.Target.toObject(message.target, options); - if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) - if (typeof message.transaction_id === "number") - object.transaction_id = options.longs === String ? String(message.transaction_id) : message.transaction_id; - else - object.transaction_id = options.longs === String ? $util.Long.prototype.toString.call(message.transaction_id) : options.longs === Number ? new $util.LongBits(message.transaction_id.low >>> 0, message.transaction_id.high >>> 0).toNumber() : message.transaction_id; - if (message.dtid != null && message.hasOwnProperty("dtid")) - object.dtid = message.dtid; + if (message.values != null && message.hasOwnProperty("values")) + object.values = options.bytes === String ? $util.base64.encode(message.values, 0, message.values.length) : options.bytes === Array ? Array.prototype.slice.call(message.values) : message.values; return object; }; /** - * Converts this PrepareRequest to JSON. + * Converts this Row to JSON. * @function toJSON - * @memberof query.PrepareRequest + * @memberof query.Row * @instance * @returns {Object.} JSON object */ - PrepareRequest.prototype.toJSON = function toJSON() { + Row.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for PrepareRequest + * Gets the default type url for Row * @function getTypeUrl - * @memberof query.PrepareRequest + * @memberof query.Row * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - PrepareRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + Row.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.PrepareRequest"; + return typeUrlPrefix + "/query.Row"; }; - return PrepareRequest; + return Row; })(); - query.PrepareResponse = (function() { + query.QueryResult = (function() { /** - * Properties of a PrepareResponse. + * Properties of a QueryResult. * @memberof query - * @interface IPrepareResponse + * @interface IQueryResult + * @property {Array.|null} [fields] QueryResult fields + * @property {number|Long|null} [rows_affected] QueryResult rows_affected + * @property {number|Long|null} [insert_id] QueryResult insert_id + * @property {Array.|null} [rows] QueryResult rows + * @property {string|null} [info] QueryResult info + * @property {string|null} [session_state_changes] QueryResult session_state_changes */ /** - * Constructs a new PrepareResponse. + * Constructs a new QueryResult. * @memberof query - * @classdesc Represents a PrepareResponse. - * @implements IPrepareResponse + * @classdesc Represents a QueryResult. + * @implements IQueryResult * @constructor - * @param {query.IPrepareResponse=} [properties] Properties to set + * @param {query.IQueryResult=} [properties] Properties to set */ - function PrepareResponse(properties) { + function QueryResult(properties) { + this.fields = []; + this.rows = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -79827,63 +81104,153 @@ export const query = $root.query = (() => { } /** - * Creates a new PrepareResponse instance using the specified properties. + * QueryResult fields. + * @member {Array.} fields + * @memberof query.QueryResult + * @instance + */ + QueryResult.prototype.fields = $util.emptyArray; + + /** + * QueryResult rows_affected. + * @member {number|Long} rows_affected + * @memberof query.QueryResult + * @instance + */ + QueryResult.prototype.rows_affected = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + + /** + * QueryResult insert_id. + * @member {number|Long} insert_id + * @memberof query.QueryResult + * @instance + */ + QueryResult.prototype.insert_id = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + + /** + * QueryResult rows. + * @member {Array.} rows + * @memberof query.QueryResult + * @instance + */ + QueryResult.prototype.rows = $util.emptyArray; + + /** + * QueryResult info. + * @member {string} info + * @memberof query.QueryResult + * @instance + */ + QueryResult.prototype.info = ""; + + /** + * QueryResult session_state_changes. + * @member {string} session_state_changes + * @memberof query.QueryResult + * @instance + */ + QueryResult.prototype.session_state_changes = ""; + + /** + * Creates a new QueryResult instance using the specified properties. * @function create - * @memberof query.PrepareResponse + * @memberof query.QueryResult * @static - * @param {query.IPrepareResponse=} [properties] Properties to set - * @returns {query.PrepareResponse} PrepareResponse instance + * @param {query.IQueryResult=} [properties] Properties to set + * @returns {query.QueryResult} QueryResult instance */ - PrepareResponse.create = function create(properties) { - return new PrepareResponse(properties); + QueryResult.create = function create(properties) { + return new QueryResult(properties); }; /** - * Encodes the specified PrepareResponse message. Does not implicitly {@link query.PrepareResponse.verify|verify} messages. + * Encodes the specified QueryResult message. Does not implicitly {@link query.QueryResult.verify|verify} messages. * @function encode - * @memberof query.PrepareResponse + * @memberof query.QueryResult * @static - * @param {query.IPrepareResponse} message PrepareResponse message or plain object to encode + * @param {query.IQueryResult} message QueryResult message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - PrepareResponse.encode = function encode(message, writer) { + QueryResult.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.fields != null && message.fields.length) + for (let i = 0; i < message.fields.length; ++i) + $root.query.Field.encode(message.fields[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.rows_affected != null && Object.hasOwnProperty.call(message, "rows_affected")) + writer.uint32(/* id 2, wireType 0 =*/16).uint64(message.rows_affected); + if (message.insert_id != null && Object.hasOwnProperty.call(message, "insert_id")) + writer.uint32(/* id 3, wireType 0 =*/24).uint64(message.insert_id); + if (message.rows != null && message.rows.length) + for (let i = 0; i < message.rows.length; ++i) + $root.query.Row.encode(message.rows[i], writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.info != null && Object.hasOwnProperty.call(message, "info")) + writer.uint32(/* id 6, wireType 2 =*/50).string(message.info); + if (message.session_state_changes != null && Object.hasOwnProperty.call(message, "session_state_changes")) + writer.uint32(/* id 7, wireType 2 =*/58).string(message.session_state_changes); return writer; }; /** - * Encodes the specified PrepareResponse message, length delimited. Does not implicitly {@link query.PrepareResponse.verify|verify} messages. + * Encodes the specified QueryResult message, length delimited. Does not implicitly {@link query.QueryResult.verify|verify} messages. * @function encodeDelimited - * @memberof query.PrepareResponse + * @memberof query.QueryResult * @static - * @param {query.IPrepareResponse} message PrepareResponse message or plain object to encode + * @param {query.IQueryResult} message QueryResult message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - PrepareResponse.encodeDelimited = function encodeDelimited(message, writer) { + QueryResult.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a PrepareResponse message from the specified reader or buffer. + * Decodes a QueryResult message from the specified reader or buffer. * @function decode - * @memberof query.PrepareResponse + * @memberof query.QueryResult * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.PrepareResponse} PrepareResponse + * @returns {query.QueryResult} QueryResult * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - PrepareResponse.decode = function decode(reader, length) { + QueryResult.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.PrepareResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.QueryResult(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + if (!(message.fields && message.fields.length)) + message.fields = []; + message.fields.push($root.query.Field.decode(reader, reader.uint32())); + break; + } + case 2: { + message.rows_affected = reader.uint64(); + break; + } + case 3: { + message.insert_id = reader.uint64(); + break; + } + case 4: { + if (!(message.rows && message.rows.length)) + message.rows = []; + message.rows.push($root.query.Row.decode(reader, reader.uint32())); + break; + } + case 6: { + message.info = reader.string(); + break; + } + case 7: { + message.session_state_changes = reader.string(); + break; + } default: reader.skipType(tag & 7); break; @@ -79893,112 +81260,228 @@ export const query = $root.query = (() => { }; /** - * Decodes a PrepareResponse message from the specified reader or buffer, length delimited. + * Decodes a QueryResult message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.PrepareResponse + * @memberof query.QueryResult * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.PrepareResponse} PrepareResponse + * @returns {query.QueryResult} QueryResult * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - PrepareResponse.decodeDelimited = function decodeDelimited(reader) { + QueryResult.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a PrepareResponse message. + * Verifies a QueryResult message. * @function verify - * @memberof query.PrepareResponse + * @memberof query.QueryResult * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - PrepareResponse.verify = function verify(message) { + QueryResult.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.fields != null && message.hasOwnProperty("fields")) { + if (!Array.isArray(message.fields)) + return "fields: array expected"; + for (let i = 0; i < message.fields.length; ++i) { + let error = $root.query.Field.verify(message.fields[i]); + if (error) + return "fields." + error; + } + } + if (message.rows_affected != null && message.hasOwnProperty("rows_affected")) + if (!$util.isInteger(message.rows_affected) && !(message.rows_affected && $util.isInteger(message.rows_affected.low) && $util.isInteger(message.rows_affected.high))) + return "rows_affected: integer|Long expected"; + if (message.insert_id != null && message.hasOwnProperty("insert_id")) + if (!$util.isInteger(message.insert_id) && !(message.insert_id && $util.isInteger(message.insert_id.low) && $util.isInteger(message.insert_id.high))) + return "insert_id: integer|Long expected"; + if (message.rows != null && message.hasOwnProperty("rows")) { + if (!Array.isArray(message.rows)) + return "rows: array expected"; + for (let i = 0; i < message.rows.length; ++i) { + let error = $root.query.Row.verify(message.rows[i]); + if (error) + return "rows." + error; + } + } + if (message.info != null && message.hasOwnProperty("info")) + if (!$util.isString(message.info)) + return "info: string expected"; + if (message.session_state_changes != null && message.hasOwnProperty("session_state_changes")) + if (!$util.isString(message.session_state_changes)) + return "session_state_changes: string expected"; return null; }; /** - * Creates a PrepareResponse message from a plain object. Also converts values to their respective internal types. + * Creates a QueryResult message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.PrepareResponse + * @memberof query.QueryResult * @static * @param {Object.} object Plain object - * @returns {query.PrepareResponse} PrepareResponse + * @returns {query.QueryResult} QueryResult */ - PrepareResponse.fromObject = function fromObject(object) { - if (object instanceof $root.query.PrepareResponse) + QueryResult.fromObject = function fromObject(object) { + if (object instanceof $root.query.QueryResult) return object; - return new $root.query.PrepareResponse(); + let message = new $root.query.QueryResult(); + if (object.fields) { + if (!Array.isArray(object.fields)) + throw TypeError(".query.QueryResult.fields: array expected"); + message.fields = []; + for (let i = 0; i < object.fields.length; ++i) { + if (typeof object.fields[i] !== "object") + throw TypeError(".query.QueryResult.fields: object expected"); + message.fields[i] = $root.query.Field.fromObject(object.fields[i]); + } + } + if (object.rows_affected != null) + if ($util.Long) + (message.rows_affected = $util.Long.fromValue(object.rows_affected)).unsigned = true; + else if (typeof object.rows_affected === "string") + message.rows_affected = parseInt(object.rows_affected, 10); + else if (typeof object.rows_affected === "number") + message.rows_affected = object.rows_affected; + else if (typeof object.rows_affected === "object") + message.rows_affected = new $util.LongBits(object.rows_affected.low >>> 0, object.rows_affected.high >>> 0).toNumber(true); + if (object.insert_id != null) + if ($util.Long) + (message.insert_id = $util.Long.fromValue(object.insert_id)).unsigned = true; + else if (typeof object.insert_id === "string") + message.insert_id = parseInt(object.insert_id, 10); + else if (typeof object.insert_id === "number") + message.insert_id = object.insert_id; + else if (typeof object.insert_id === "object") + message.insert_id = new $util.LongBits(object.insert_id.low >>> 0, object.insert_id.high >>> 0).toNumber(true); + if (object.rows) { + if (!Array.isArray(object.rows)) + throw TypeError(".query.QueryResult.rows: array expected"); + message.rows = []; + for (let i = 0; i < object.rows.length; ++i) { + if (typeof object.rows[i] !== "object") + throw TypeError(".query.QueryResult.rows: object expected"); + message.rows[i] = $root.query.Row.fromObject(object.rows[i]); + } + } + if (object.info != null) + message.info = String(object.info); + if (object.session_state_changes != null) + message.session_state_changes = String(object.session_state_changes); + return message; }; /** - * Creates a plain object from a PrepareResponse message. Also converts values to other types if specified. + * Creates a plain object from a QueryResult message. Also converts values to other types if specified. * @function toObject - * @memberof query.PrepareResponse + * @memberof query.QueryResult * @static - * @param {query.PrepareResponse} message PrepareResponse + * @param {query.QueryResult} message QueryResult * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - PrepareResponse.toObject = function toObject() { - return {}; + QueryResult.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) { + object.fields = []; + object.rows = []; + } + if (options.defaults) { + if ($util.Long) { + let long = new $util.Long(0, 0, true); + object.rows_affected = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.rows_affected = options.longs === String ? "0" : 0; + if ($util.Long) { + let long = new $util.Long(0, 0, true); + object.insert_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.insert_id = options.longs === String ? "0" : 0; + object.info = ""; + object.session_state_changes = ""; + } + if (message.fields && message.fields.length) { + object.fields = []; + for (let j = 0; j < message.fields.length; ++j) + object.fields[j] = $root.query.Field.toObject(message.fields[j], options); + } + if (message.rows_affected != null && message.hasOwnProperty("rows_affected")) + if (typeof message.rows_affected === "number") + object.rows_affected = options.longs === String ? String(message.rows_affected) : message.rows_affected; + else + object.rows_affected = options.longs === String ? $util.Long.prototype.toString.call(message.rows_affected) : options.longs === Number ? new $util.LongBits(message.rows_affected.low >>> 0, message.rows_affected.high >>> 0).toNumber(true) : message.rows_affected; + if (message.insert_id != null && message.hasOwnProperty("insert_id")) + if (typeof message.insert_id === "number") + object.insert_id = options.longs === String ? String(message.insert_id) : message.insert_id; + else + object.insert_id = options.longs === String ? $util.Long.prototype.toString.call(message.insert_id) : options.longs === Number ? new $util.LongBits(message.insert_id.low >>> 0, message.insert_id.high >>> 0).toNumber(true) : message.insert_id; + if (message.rows && message.rows.length) { + object.rows = []; + for (let j = 0; j < message.rows.length; ++j) + object.rows[j] = $root.query.Row.toObject(message.rows[j], options); + } + if (message.info != null && message.hasOwnProperty("info")) + object.info = message.info; + if (message.session_state_changes != null && message.hasOwnProperty("session_state_changes")) + object.session_state_changes = message.session_state_changes; + return object; }; /** - * Converts this PrepareResponse to JSON. + * Converts this QueryResult to JSON. * @function toJSON - * @memberof query.PrepareResponse + * @memberof query.QueryResult * @instance * @returns {Object.} JSON object */ - PrepareResponse.prototype.toJSON = function toJSON() { + QueryResult.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for PrepareResponse + * Gets the default type url for QueryResult * @function getTypeUrl - * @memberof query.PrepareResponse + * @memberof query.QueryResult * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - PrepareResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + QueryResult.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.PrepareResponse"; + return typeUrlPrefix + "/query.QueryResult"; }; - return PrepareResponse; + return QueryResult; })(); - query.CommitPreparedRequest = (function() { + query.QueryWarning = (function() { /** - * Properties of a CommitPreparedRequest. + * Properties of a QueryWarning. * @memberof query - * @interface ICommitPreparedRequest - * @property {vtrpc.ICallerID|null} [effective_caller_id] CommitPreparedRequest effective_caller_id - * @property {query.IVTGateCallerID|null} [immediate_caller_id] CommitPreparedRequest immediate_caller_id - * @property {query.ITarget|null} [target] CommitPreparedRequest target - * @property {string|null} [dtid] CommitPreparedRequest dtid + * @interface IQueryWarning + * @property {number|null} [code] QueryWarning code + * @property {string|null} [message] QueryWarning message */ /** - * Constructs a new CommitPreparedRequest. + * Constructs a new QueryWarning. * @memberof query - * @classdesc Represents a CommitPreparedRequest. - * @implements ICommitPreparedRequest + * @classdesc Represents a QueryWarning. + * @implements IQueryWarning * @constructor - * @param {query.ICommitPreparedRequest=} [properties] Properties to set + * @param {query.IQueryWarning=} [properties] Properties to set */ - function CommitPreparedRequest(properties) { + function QueryWarning(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -80006,117 +81489,89 @@ export const query = $root.query = (() => { } /** - * CommitPreparedRequest effective_caller_id. - * @member {vtrpc.ICallerID|null|undefined} effective_caller_id - * @memberof query.CommitPreparedRequest - * @instance - */ - CommitPreparedRequest.prototype.effective_caller_id = null; - - /** - * CommitPreparedRequest immediate_caller_id. - * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id - * @memberof query.CommitPreparedRequest - * @instance - */ - CommitPreparedRequest.prototype.immediate_caller_id = null; - - /** - * CommitPreparedRequest target. - * @member {query.ITarget|null|undefined} target - * @memberof query.CommitPreparedRequest + * QueryWarning code. + * @member {number} code + * @memberof query.QueryWarning * @instance */ - CommitPreparedRequest.prototype.target = null; + QueryWarning.prototype.code = 0; /** - * CommitPreparedRequest dtid. - * @member {string} dtid - * @memberof query.CommitPreparedRequest + * QueryWarning message. + * @member {string} message + * @memberof query.QueryWarning * @instance */ - CommitPreparedRequest.prototype.dtid = ""; + QueryWarning.prototype.message = ""; /** - * Creates a new CommitPreparedRequest instance using the specified properties. + * Creates a new QueryWarning instance using the specified properties. * @function create - * @memberof query.CommitPreparedRequest + * @memberof query.QueryWarning * @static - * @param {query.ICommitPreparedRequest=} [properties] Properties to set - * @returns {query.CommitPreparedRequest} CommitPreparedRequest instance + * @param {query.IQueryWarning=} [properties] Properties to set + * @returns {query.QueryWarning} QueryWarning instance */ - CommitPreparedRequest.create = function create(properties) { - return new CommitPreparedRequest(properties); + QueryWarning.create = function create(properties) { + return new QueryWarning(properties); }; /** - * Encodes the specified CommitPreparedRequest message. Does not implicitly {@link query.CommitPreparedRequest.verify|verify} messages. + * Encodes the specified QueryWarning message. Does not implicitly {@link query.QueryWarning.verify|verify} messages. * @function encode - * @memberof query.CommitPreparedRequest + * @memberof query.QueryWarning * @static - * @param {query.ICommitPreparedRequest} message CommitPreparedRequest message or plain object to encode + * @param {query.IQueryWarning} message QueryWarning message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - CommitPreparedRequest.encode = function encode(message, writer) { + QueryWarning.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) - $root.vtrpc.CallerID.encode(message.effective_caller_id, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.immediate_caller_id != null && Object.hasOwnProperty.call(message, "immediate_caller_id")) - $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.target != null && Object.hasOwnProperty.call(message, "target")) - $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.dtid != null && Object.hasOwnProperty.call(message, "dtid")) - writer.uint32(/* id 4, wireType 2 =*/34).string(message.dtid); + if (message.code != null && Object.hasOwnProperty.call(message, "code")) + writer.uint32(/* id 1, wireType 0 =*/8).uint32(message.code); + if (message.message != null && Object.hasOwnProperty.call(message, "message")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.message); return writer; }; /** - * Encodes the specified CommitPreparedRequest message, length delimited. Does not implicitly {@link query.CommitPreparedRequest.verify|verify} messages. + * Encodes the specified QueryWarning message, length delimited. Does not implicitly {@link query.QueryWarning.verify|verify} messages. * @function encodeDelimited - * @memberof query.CommitPreparedRequest + * @memberof query.QueryWarning * @static - * @param {query.ICommitPreparedRequest} message CommitPreparedRequest message or plain object to encode + * @param {query.IQueryWarning} message QueryWarning message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - CommitPreparedRequest.encodeDelimited = function encodeDelimited(message, writer) { + QueryWarning.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a CommitPreparedRequest message from the specified reader or buffer. + * Decodes a QueryWarning message from the specified reader or buffer. * @function decode - * @memberof query.CommitPreparedRequest + * @memberof query.QueryWarning * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.CommitPreparedRequest} CommitPreparedRequest + * @returns {query.QueryWarning} QueryWarning * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CommitPreparedRequest.decode = function decode(reader, length) { + QueryWarning.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.CommitPreparedRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.QueryWarning(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.effective_caller_id = $root.vtrpc.CallerID.decode(reader, reader.uint32()); + message.code = reader.uint32(); break; } case 2: { - message.immediate_caller_id = $root.query.VTGateCallerID.decode(reader, reader.uint32()); - break; - } - case 3: { - message.target = $root.query.Target.decode(reader, reader.uint32()); - break; - } - case 4: { - message.dtid = reader.string(); + message.message = reader.string(); break; } default: @@ -80128,161 +81583,133 @@ export const query = $root.query = (() => { }; /** - * Decodes a CommitPreparedRequest message from the specified reader or buffer, length delimited. + * Decodes a QueryWarning message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.CommitPreparedRequest + * @memberof query.QueryWarning * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.CommitPreparedRequest} CommitPreparedRequest + * @returns {query.QueryWarning} QueryWarning * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CommitPreparedRequest.decodeDelimited = function decodeDelimited(reader) { + QueryWarning.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a CommitPreparedRequest message. + * Verifies a QueryWarning message. * @function verify - * @memberof query.CommitPreparedRequest + * @memberof query.QueryWarning * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - CommitPreparedRequest.verify = function verify(message) { + QueryWarning.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { - let error = $root.vtrpc.CallerID.verify(message.effective_caller_id); - if (error) - return "effective_caller_id." + error; - } - if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) { - let error = $root.query.VTGateCallerID.verify(message.immediate_caller_id); - if (error) - return "immediate_caller_id." + error; - } - if (message.target != null && message.hasOwnProperty("target")) { - let error = $root.query.Target.verify(message.target); - if (error) - return "target." + error; - } - if (message.dtid != null && message.hasOwnProperty("dtid")) - if (!$util.isString(message.dtid)) - return "dtid: string expected"; + if (message.code != null && message.hasOwnProperty("code")) + if (!$util.isInteger(message.code)) + return "code: integer expected"; + if (message.message != null && message.hasOwnProperty("message")) + if (!$util.isString(message.message)) + return "message: string expected"; return null; }; /** - * Creates a CommitPreparedRequest message from a plain object. Also converts values to their respective internal types. + * Creates a QueryWarning message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.CommitPreparedRequest + * @memberof query.QueryWarning * @static * @param {Object.} object Plain object - * @returns {query.CommitPreparedRequest} CommitPreparedRequest + * @returns {query.QueryWarning} QueryWarning */ - CommitPreparedRequest.fromObject = function fromObject(object) { - if (object instanceof $root.query.CommitPreparedRequest) + QueryWarning.fromObject = function fromObject(object) { + if (object instanceof $root.query.QueryWarning) return object; - let message = new $root.query.CommitPreparedRequest(); - if (object.effective_caller_id != null) { - if (typeof object.effective_caller_id !== "object") - throw TypeError(".query.CommitPreparedRequest.effective_caller_id: object expected"); - message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); - } - if (object.immediate_caller_id != null) { - if (typeof object.immediate_caller_id !== "object") - throw TypeError(".query.CommitPreparedRequest.immediate_caller_id: object expected"); - message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); - } - if (object.target != null) { - if (typeof object.target !== "object") - throw TypeError(".query.CommitPreparedRequest.target: object expected"); - message.target = $root.query.Target.fromObject(object.target); - } - if (object.dtid != null) - message.dtid = String(object.dtid); + let message = new $root.query.QueryWarning(); + if (object.code != null) + message.code = object.code >>> 0; + if (object.message != null) + message.message = String(object.message); return message; }; /** - * Creates a plain object from a CommitPreparedRequest message. Also converts values to other types if specified. + * Creates a plain object from a QueryWarning message. Also converts values to other types if specified. * @function toObject - * @memberof query.CommitPreparedRequest + * @memberof query.QueryWarning * @static - * @param {query.CommitPreparedRequest} message CommitPreparedRequest + * @param {query.QueryWarning} message QueryWarning * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - CommitPreparedRequest.toObject = function toObject(message, options) { + QueryWarning.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) { - object.effective_caller_id = null; - object.immediate_caller_id = null; - object.target = null; - object.dtid = ""; + object.code = 0; + object.message = ""; } - if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) - object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); - if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) - object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); - if (message.target != null && message.hasOwnProperty("target")) - object.target = $root.query.Target.toObject(message.target, options); - if (message.dtid != null && message.hasOwnProperty("dtid")) - object.dtid = message.dtid; + if (message.code != null && message.hasOwnProperty("code")) + object.code = message.code; + if (message.message != null && message.hasOwnProperty("message")) + object.message = message.message; return object; }; /** - * Converts this CommitPreparedRequest to JSON. + * Converts this QueryWarning to JSON. * @function toJSON - * @memberof query.CommitPreparedRequest + * @memberof query.QueryWarning * @instance * @returns {Object.} JSON object */ - CommitPreparedRequest.prototype.toJSON = function toJSON() { + QueryWarning.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for CommitPreparedRequest + * Gets the default type url for QueryWarning * @function getTypeUrl - * @memberof query.CommitPreparedRequest + * @memberof query.QueryWarning * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - CommitPreparedRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + QueryWarning.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.CommitPreparedRequest"; + return typeUrlPrefix + "/query.QueryWarning"; }; - return CommitPreparedRequest; + return QueryWarning; })(); - query.CommitPreparedResponse = (function() { + query.StreamEvent = (function() { /** - * Properties of a CommitPreparedResponse. + * Properties of a StreamEvent. * @memberof query - * @interface ICommitPreparedResponse + * @interface IStreamEvent + * @property {Array.|null} [statements] StreamEvent statements + * @property {query.IEventToken|null} [event_token] StreamEvent event_token */ /** - * Constructs a new CommitPreparedResponse. + * Constructs a new StreamEvent. * @memberof query - * @classdesc Represents a CommitPreparedResponse. - * @implements ICommitPreparedResponse + * @classdesc Represents a StreamEvent. + * @implements IStreamEvent * @constructor - * @param {query.ICommitPreparedResponse=} [properties] Properties to set + * @param {query.IStreamEvent=} [properties] Properties to set */ - function CommitPreparedResponse(properties) { + function StreamEvent(properties) { + this.statements = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -80290,63 +81717,94 @@ export const query = $root.query = (() => { } /** - * Creates a new CommitPreparedResponse instance using the specified properties. + * StreamEvent statements. + * @member {Array.} statements + * @memberof query.StreamEvent + * @instance + */ + StreamEvent.prototype.statements = $util.emptyArray; + + /** + * StreamEvent event_token. + * @member {query.IEventToken|null|undefined} event_token + * @memberof query.StreamEvent + * @instance + */ + StreamEvent.prototype.event_token = null; + + /** + * Creates a new StreamEvent instance using the specified properties. * @function create - * @memberof query.CommitPreparedResponse + * @memberof query.StreamEvent * @static - * @param {query.ICommitPreparedResponse=} [properties] Properties to set - * @returns {query.CommitPreparedResponse} CommitPreparedResponse instance + * @param {query.IStreamEvent=} [properties] Properties to set + * @returns {query.StreamEvent} StreamEvent instance */ - CommitPreparedResponse.create = function create(properties) { - return new CommitPreparedResponse(properties); + StreamEvent.create = function create(properties) { + return new StreamEvent(properties); }; /** - * Encodes the specified CommitPreparedResponse message. Does not implicitly {@link query.CommitPreparedResponse.verify|verify} messages. + * Encodes the specified StreamEvent message. Does not implicitly {@link query.StreamEvent.verify|verify} messages. * @function encode - * @memberof query.CommitPreparedResponse + * @memberof query.StreamEvent * @static - * @param {query.ICommitPreparedResponse} message CommitPreparedResponse message or plain object to encode + * @param {query.IStreamEvent} message StreamEvent message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - CommitPreparedResponse.encode = function encode(message, writer) { + StreamEvent.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.statements != null && message.statements.length) + for (let i = 0; i < message.statements.length; ++i) + $root.query.StreamEvent.Statement.encode(message.statements[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.event_token != null && Object.hasOwnProperty.call(message, "event_token")) + $root.query.EventToken.encode(message.event_token, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); return writer; }; /** - * Encodes the specified CommitPreparedResponse message, length delimited. Does not implicitly {@link query.CommitPreparedResponse.verify|verify} messages. + * Encodes the specified StreamEvent message, length delimited. Does not implicitly {@link query.StreamEvent.verify|verify} messages. * @function encodeDelimited - * @memberof query.CommitPreparedResponse + * @memberof query.StreamEvent * @static - * @param {query.ICommitPreparedResponse} message CommitPreparedResponse message or plain object to encode + * @param {query.IStreamEvent} message StreamEvent message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - CommitPreparedResponse.encodeDelimited = function encodeDelimited(message, writer) { + StreamEvent.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a CommitPreparedResponse message from the specified reader or buffer. + * Decodes a StreamEvent message from the specified reader or buffer. * @function decode - * @memberof query.CommitPreparedResponse + * @memberof query.StreamEvent * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.CommitPreparedResponse} CommitPreparedResponse + * @returns {query.StreamEvent} StreamEvent * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CommitPreparedResponse.decode = function decode(reader, length) { + StreamEvent.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.CommitPreparedResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.StreamEvent(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + if (!(message.statements && message.statements.length)) + message.statements = []; + message.statements.push($root.query.StreamEvent.Statement.decode(reader, reader.uint32())); + break; + } + case 2: { + message.event_token = $root.query.EventToken.decode(reader, reader.uint32()); + break; + } default: reader.skipType(tag & 7); break; @@ -80356,181 +81814,632 @@ export const query = $root.query = (() => { }; /** - * Decodes a CommitPreparedResponse message from the specified reader or buffer, length delimited. + * Decodes a StreamEvent message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.CommitPreparedResponse + * @memberof query.StreamEvent * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.CommitPreparedResponse} CommitPreparedResponse + * @returns {query.StreamEvent} StreamEvent * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CommitPreparedResponse.decodeDelimited = function decodeDelimited(reader) { + StreamEvent.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a CommitPreparedResponse message. + * Verifies a StreamEvent message. * @function verify - * @memberof query.CommitPreparedResponse + * @memberof query.StreamEvent * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - CommitPreparedResponse.verify = function verify(message) { + StreamEvent.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.statements != null && message.hasOwnProperty("statements")) { + if (!Array.isArray(message.statements)) + return "statements: array expected"; + for (let i = 0; i < message.statements.length; ++i) { + let error = $root.query.StreamEvent.Statement.verify(message.statements[i]); + if (error) + return "statements." + error; + } + } + if (message.event_token != null && message.hasOwnProperty("event_token")) { + let error = $root.query.EventToken.verify(message.event_token); + if (error) + return "event_token." + error; + } return null; }; /** - * Creates a CommitPreparedResponse message from a plain object. Also converts values to their respective internal types. + * Creates a StreamEvent message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.CommitPreparedResponse + * @memberof query.StreamEvent * @static * @param {Object.} object Plain object - * @returns {query.CommitPreparedResponse} CommitPreparedResponse + * @returns {query.StreamEvent} StreamEvent */ - CommitPreparedResponse.fromObject = function fromObject(object) { - if (object instanceof $root.query.CommitPreparedResponse) + StreamEvent.fromObject = function fromObject(object) { + if (object instanceof $root.query.StreamEvent) return object; - return new $root.query.CommitPreparedResponse(); + let message = new $root.query.StreamEvent(); + if (object.statements) { + if (!Array.isArray(object.statements)) + throw TypeError(".query.StreamEvent.statements: array expected"); + message.statements = []; + for (let i = 0; i < object.statements.length; ++i) { + if (typeof object.statements[i] !== "object") + throw TypeError(".query.StreamEvent.statements: object expected"); + message.statements[i] = $root.query.StreamEvent.Statement.fromObject(object.statements[i]); + } + } + if (object.event_token != null) { + if (typeof object.event_token !== "object") + throw TypeError(".query.StreamEvent.event_token: object expected"); + message.event_token = $root.query.EventToken.fromObject(object.event_token); + } + return message; }; /** - * Creates a plain object from a CommitPreparedResponse message. Also converts values to other types if specified. + * Creates a plain object from a StreamEvent message. Also converts values to other types if specified. * @function toObject - * @memberof query.CommitPreparedResponse + * @memberof query.StreamEvent * @static - * @param {query.CommitPreparedResponse} message CommitPreparedResponse + * @param {query.StreamEvent} message StreamEvent * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - CommitPreparedResponse.toObject = function toObject() { - return {}; + StreamEvent.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.statements = []; + if (options.defaults) + object.event_token = null; + if (message.statements && message.statements.length) { + object.statements = []; + for (let j = 0; j < message.statements.length; ++j) + object.statements[j] = $root.query.StreamEvent.Statement.toObject(message.statements[j], options); + } + if (message.event_token != null && message.hasOwnProperty("event_token")) + object.event_token = $root.query.EventToken.toObject(message.event_token, options); + return object; }; /** - * Converts this CommitPreparedResponse to JSON. + * Converts this StreamEvent to JSON. * @function toJSON - * @memberof query.CommitPreparedResponse + * @memberof query.StreamEvent * @instance * @returns {Object.} JSON object */ - CommitPreparedResponse.prototype.toJSON = function toJSON() { + StreamEvent.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for CommitPreparedResponse + * Gets the default type url for StreamEvent * @function getTypeUrl - * @memberof query.CommitPreparedResponse + * @memberof query.StreamEvent * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - CommitPreparedResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + StreamEvent.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.CommitPreparedResponse"; + return typeUrlPrefix + "/query.StreamEvent"; }; - return CommitPreparedResponse; - })(); + StreamEvent.Statement = (function() { - query.RollbackPreparedRequest = (function() { + /** + * Properties of a Statement. + * @memberof query.StreamEvent + * @interface IStatement + * @property {query.StreamEvent.Statement.Category|null} [category] Statement category + * @property {string|null} [table_name] Statement table_name + * @property {Array.|null} [primary_key_fields] Statement primary_key_fields + * @property {Array.|null} [primary_key_values] Statement primary_key_values + * @property {Uint8Array|null} [sql] Statement sql + */ - /** - * Properties of a RollbackPreparedRequest. - * @memberof query - * @interface IRollbackPreparedRequest - * @property {vtrpc.ICallerID|null} [effective_caller_id] RollbackPreparedRequest effective_caller_id - * @property {query.IVTGateCallerID|null} [immediate_caller_id] RollbackPreparedRequest immediate_caller_id - * @property {query.ITarget|null} [target] RollbackPreparedRequest target - * @property {number|Long|null} [transaction_id] RollbackPreparedRequest transaction_id - * @property {string|null} [dtid] RollbackPreparedRequest dtid - */ + /** + * Constructs a new Statement. + * @memberof query.StreamEvent + * @classdesc Represents a Statement. + * @implements IStatement + * @constructor + * @param {query.StreamEvent.IStatement=} [properties] Properties to set + */ + function Statement(properties) { + this.primary_key_fields = []; + this.primary_key_values = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } - /** - * Constructs a new RollbackPreparedRequest. - * @memberof query - * @classdesc Represents a RollbackPreparedRequest. - * @implements IRollbackPreparedRequest - * @constructor - * @param {query.IRollbackPreparedRequest=} [properties] Properties to set - */ - function RollbackPreparedRequest(properties) { - if (properties) - for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; - } + /** + * Statement category. + * @member {query.StreamEvent.Statement.Category} category + * @memberof query.StreamEvent.Statement + * @instance + */ + Statement.prototype.category = 0; - /** - * RollbackPreparedRequest effective_caller_id. - * @member {vtrpc.ICallerID|null|undefined} effective_caller_id - * @memberof query.RollbackPreparedRequest - * @instance - */ - RollbackPreparedRequest.prototype.effective_caller_id = null; + /** + * Statement table_name. + * @member {string} table_name + * @memberof query.StreamEvent.Statement + * @instance + */ + Statement.prototype.table_name = ""; - /** - * RollbackPreparedRequest immediate_caller_id. - * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id - * @memberof query.RollbackPreparedRequest - * @instance - */ - RollbackPreparedRequest.prototype.immediate_caller_id = null; + /** + * Statement primary_key_fields. + * @member {Array.} primary_key_fields + * @memberof query.StreamEvent.Statement + * @instance + */ + Statement.prototype.primary_key_fields = $util.emptyArray; - /** - * RollbackPreparedRequest target. - * @member {query.ITarget|null|undefined} target - * @memberof query.RollbackPreparedRequest - * @instance - */ - RollbackPreparedRequest.prototype.target = null; + /** + * Statement primary_key_values. + * @member {Array.} primary_key_values + * @memberof query.StreamEvent.Statement + * @instance + */ + Statement.prototype.primary_key_values = $util.emptyArray; - /** - * RollbackPreparedRequest transaction_id. - * @member {number|Long} transaction_id - * @memberof query.RollbackPreparedRequest - * @instance - */ - RollbackPreparedRequest.prototype.transaction_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + /** + * Statement sql. + * @member {Uint8Array} sql + * @memberof query.StreamEvent.Statement + * @instance + */ + Statement.prototype.sql = $util.newBuffer([]); - /** - * RollbackPreparedRequest dtid. - * @member {string} dtid - * @memberof query.RollbackPreparedRequest - * @instance - */ - RollbackPreparedRequest.prototype.dtid = ""; + /** + * Creates a new Statement instance using the specified properties. + * @function create + * @memberof query.StreamEvent.Statement + * @static + * @param {query.StreamEvent.IStatement=} [properties] Properties to set + * @returns {query.StreamEvent.Statement} Statement instance + */ + Statement.create = function create(properties) { + return new Statement(properties); + }; - /** - * Creates a new RollbackPreparedRequest instance using the specified properties. - * @function create - * @memberof query.RollbackPreparedRequest - * @static - * @param {query.IRollbackPreparedRequest=} [properties] Properties to set - * @returns {query.RollbackPreparedRequest} RollbackPreparedRequest instance - */ - RollbackPreparedRequest.create = function create(properties) { - return new RollbackPreparedRequest(properties); - }; + /** + * Encodes the specified Statement message. Does not implicitly {@link query.StreamEvent.Statement.verify|verify} messages. + * @function encode + * @memberof query.StreamEvent.Statement + * @static + * @param {query.StreamEvent.IStatement} message Statement message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Statement.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.category != null && Object.hasOwnProperty.call(message, "category")) + writer.uint32(/* id 1, wireType 0 =*/8).int32(message.category); + if (message.table_name != null && Object.hasOwnProperty.call(message, "table_name")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.table_name); + if (message.primary_key_fields != null && message.primary_key_fields.length) + for (let i = 0; i < message.primary_key_fields.length; ++i) + $root.query.Field.encode(message.primary_key_fields[i], writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.primary_key_values != null && message.primary_key_values.length) + for (let i = 0; i < message.primary_key_values.length; ++i) + $root.query.Row.encode(message.primary_key_values[i], writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.sql != null && Object.hasOwnProperty.call(message, "sql")) + writer.uint32(/* id 5, wireType 2 =*/42).bytes(message.sql); + return writer; + }; + + /** + * Encodes the specified Statement message, length delimited. Does not implicitly {@link query.StreamEvent.Statement.verify|verify} messages. + * @function encodeDelimited + * @memberof query.StreamEvent.Statement + * @static + * @param {query.StreamEvent.IStatement} message Statement message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Statement.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a Statement message from the specified reader or buffer. + * @function decode + * @memberof query.StreamEvent.Statement + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {query.StreamEvent.Statement} Statement + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Statement.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.StreamEvent.Statement(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.category = reader.int32(); + break; + } + case 2: { + message.table_name = reader.string(); + break; + } + case 3: { + if (!(message.primary_key_fields && message.primary_key_fields.length)) + message.primary_key_fields = []; + message.primary_key_fields.push($root.query.Field.decode(reader, reader.uint32())); + break; + } + case 4: { + if (!(message.primary_key_values && message.primary_key_values.length)) + message.primary_key_values = []; + message.primary_key_values.push($root.query.Row.decode(reader, reader.uint32())); + break; + } + case 5: { + message.sql = reader.bytes(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a Statement message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof query.StreamEvent.Statement + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {query.StreamEvent.Statement} Statement + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Statement.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a Statement message. + * @function verify + * @memberof query.StreamEvent.Statement + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + Statement.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.category != null && message.hasOwnProperty("category")) + switch (message.category) { + default: + return "category: enum value expected"; + case 0: + case 1: + case 2: + break; + } + if (message.table_name != null && message.hasOwnProperty("table_name")) + if (!$util.isString(message.table_name)) + return "table_name: string expected"; + if (message.primary_key_fields != null && message.hasOwnProperty("primary_key_fields")) { + if (!Array.isArray(message.primary_key_fields)) + return "primary_key_fields: array expected"; + for (let i = 0; i < message.primary_key_fields.length; ++i) { + let error = $root.query.Field.verify(message.primary_key_fields[i]); + if (error) + return "primary_key_fields." + error; + } + } + if (message.primary_key_values != null && message.hasOwnProperty("primary_key_values")) { + if (!Array.isArray(message.primary_key_values)) + return "primary_key_values: array expected"; + for (let i = 0; i < message.primary_key_values.length; ++i) { + let error = $root.query.Row.verify(message.primary_key_values[i]); + if (error) + return "primary_key_values." + error; + } + } + if (message.sql != null && message.hasOwnProperty("sql")) + if (!(message.sql && typeof message.sql.length === "number" || $util.isString(message.sql))) + return "sql: buffer expected"; + return null; + }; + + /** + * Creates a Statement message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof query.StreamEvent.Statement + * @static + * @param {Object.} object Plain object + * @returns {query.StreamEvent.Statement} Statement + */ + Statement.fromObject = function fromObject(object) { + if (object instanceof $root.query.StreamEvent.Statement) + return object; + let message = new $root.query.StreamEvent.Statement(); + switch (object.category) { + default: + if (typeof object.category === "number") { + message.category = object.category; + break; + } + break; + case "Error": + case 0: + message.category = 0; + break; + case "DML": + case 1: + message.category = 1; + break; + case "DDL": + case 2: + message.category = 2; + break; + } + if (object.table_name != null) + message.table_name = String(object.table_name); + if (object.primary_key_fields) { + if (!Array.isArray(object.primary_key_fields)) + throw TypeError(".query.StreamEvent.Statement.primary_key_fields: array expected"); + message.primary_key_fields = []; + for (let i = 0; i < object.primary_key_fields.length; ++i) { + if (typeof object.primary_key_fields[i] !== "object") + throw TypeError(".query.StreamEvent.Statement.primary_key_fields: object expected"); + message.primary_key_fields[i] = $root.query.Field.fromObject(object.primary_key_fields[i]); + } + } + if (object.primary_key_values) { + if (!Array.isArray(object.primary_key_values)) + throw TypeError(".query.StreamEvent.Statement.primary_key_values: array expected"); + message.primary_key_values = []; + for (let i = 0; i < object.primary_key_values.length; ++i) { + if (typeof object.primary_key_values[i] !== "object") + throw TypeError(".query.StreamEvent.Statement.primary_key_values: object expected"); + message.primary_key_values[i] = $root.query.Row.fromObject(object.primary_key_values[i]); + } + } + if (object.sql != null) + if (typeof object.sql === "string") + $util.base64.decode(object.sql, message.sql = $util.newBuffer($util.base64.length(object.sql)), 0); + else if (object.sql.length >= 0) + message.sql = object.sql; + return message; + }; + + /** + * Creates a plain object from a Statement message. Also converts values to other types if specified. + * @function toObject + * @memberof query.StreamEvent.Statement + * @static + * @param {query.StreamEvent.Statement} message Statement + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + Statement.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) { + object.primary_key_fields = []; + object.primary_key_values = []; + } + if (options.defaults) { + object.category = options.enums === String ? "Error" : 0; + object.table_name = ""; + if (options.bytes === String) + object.sql = ""; + else { + object.sql = []; + if (options.bytes !== Array) + object.sql = $util.newBuffer(object.sql); + } + } + if (message.category != null && message.hasOwnProperty("category")) + object.category = options.enums === String ? $root.query.StreamEvent.Statement.Category[message.category] === undefined ? message.category : $root.query.StreamEvent.Statement.Category[message.category] : message.category; + if (message.table_name != null && message.hasOwnProperty("table_name")) + object.table_name = message.table_name; + if (message.primary_key_fields && message.primary_key_fields.length) { + object.primary_key_fields = []; + for (let j = 0; j < message.primary_key_fields.length; ++j) + object.primary_key_fields[j] = $root.query.Field.toObject(message.primary_key_fields[j], options); + } + if (message.primary_key_values && message.primary_key_values.length) { + object.primary_key_values = []; + for (let j = 0; j < message.primary_key_values.length; ++j) + object.primary_key_values[j] = $root.query.Row.toObject(message.primary_key_values[j], options); + } + if (message.sql != null && message.hasOwnProperty("sql")) + object.sql = options.bytes === String ? $util.base64.encode(message.sql, 0, message.sql.length) : options.bytes === Array ? Array.prototype.slice.call(message.sql) : message.sql; + return object; + }; + + /** + * Converts this Statement to JSON. + * @function toJSON + * @memberof query.StreamEvent.Statement + * @instance + * @returns {Object.} JSON object + */ + Statement.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for Statement + * @function getTypeUrl + * @memberof query.StreamEvent.Statement + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + Statement.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/query.StreamEvent.Statement"; + }; + + /** + * Category enum. + * @name query.StreamEvent.Statement.Category + * @enum {number} + * @property {number} Error=0 Error value + * @property {number} DML=1 DML value + * @property {number} DDL=2 DDL value + */ + Statement.Category = (function() { + const valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "Error"] = 0; + values[valuesById[1] = "DML"] = 1; + values[valuesById[2] = "DDL"] = 2; + return values; + })(); + + return Statement; + })(); + + return StreamEvent; + })(); + + query.ExecuteRequest = (function() { /** - * Encodes the specified RollbackPreparedRequest message. Does not implicitly {@link query.RollbackPreparedRequest.verify|verify} messages. + * Properties of an ExecuteRequest. + * @memberof query + * @interface IExecuteRequest + * @property {vtrpc.ICallerID|null} [effective_caller_id] ExecuteRequest effective_caller_id + * @property {query.IVTGateCallerID|null} [immediate_caller_id] ExecuteRequest immediate_caller_id + * @property {query.ITarget|null} [target] ExecuteRequest target + * @property {query.IBoundQuery|null} [query] ExecuteRequest query + * @property {number|Long|null} [transaction_id] ExecuteRequest transaction_id + * @property {query.IExecuteOptions|null} [options] ExecuteRequest options + * @property {number|Long|null} [reserved_id] ExecuteRequest reserved_id + */ + + /** + * Constructs a new ExecuteRequest. + * @memberof query + * @classdesc Represents an ExecuteRequest. + * @implements IExecuteRequest + * @constructor + * @param {query.IExecuteRequest=} [properties] Properties to set + */ + function ExecuteRequest(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ExecuteRequest effective_caller_id. + * @member {vtrpc.ICallerID|null|undefined} effective_caller_id + * @memberof query.ExecuteRequest + * @instance + */ + ExecuteRequest.prototype.effective_caller_id = null; + + /** + * ExecuteRequest immediate_caller_id. + * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id + * @memberof query.ExecuteRequest + * @instance + */ + ExecuteRequest.prototype.immediate_caller_id = null; + + /** + * ExecuteRequest target. + * @member {query.ITarget|null|undefined} target + * @memberof query.ExecuteRequest + * @instance + */ + ExecuteRequest.prototype.target = null; + + /** + * ExecuteRequest query. + * @member {query.IBoundQuery|null|undefined} query + * @memberof query.ExecuteRequest + * @instance + */ + ExecuteRequest.prototype.query = null; + + /** + * ExecuteRequest transaction_id. + * @member {number|Long} transaction_id + * @memberof query.ExecuteRequest + * @instance + */ + ExecuteRequest.prototype.transaction_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * ExecuteRequest options. + * @member {query.IExecuteOptions|null|undefined} options + * @memberof query.ExecuteRequest + * @instance + */ + ExecuteRequest.prototype.options = null; + + /** + * ExecuteRequest reserved_id. + * @member {number|Long} reserved_id + * @memberof query.ExecuteRequest + * @instance + */ + ExecuteRequest.prototype.reserved_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * Creates a new ExecuteRequest instance using the specified properties. + * @function create + * @memberof query.ExecuteRequest + * @static + * @param {query.IExecuteRequest=} [properties] Properties to set + * @returns {query.ExecuteRequest} ExecuteRequest instance + */ + ExecuteRequest.create = function create(properties) { + return new ExecuteRequest(properties); + }; + + /** + * Encodes the specified ExecuteRequest message. Does not implicitly {@link query.ExecuteRequest.verify|verify} messages. * @function encode - * @memberof query.RollbackPreparedRequest + * @memberof query.ExecuteRequest * @static - * @param {query.IRollbackPreparedRequest} message RollbackPreparedRequest message or plain object to encode + * @param {query.IExecuteRequest} message ExecuteRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RollbackPreparedRequest.encode = function encode(message, writer) { + ExecuteRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) @@ -80539,41 +82448,45 @@ export const query = $root.query = (() => { $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); if (message.target != null && Object.hasOwnProperty.call(message, "target")) $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.query != null && Object.hasOwnProperty.call(message, "query")) + $root.query.BoundQuery.encode(message.query, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); if (message.transaction_id != null && Object.hasOwnProperty.call(message, "transaction_id")) - writer.uint32(/* id 4, wireType 0 =*/32).int64(message.transaction_id); - if (message.dtid != null && Object.hasOwnProperty.call(message, "dtid")) - writer.uint32(/* id 5, wireType 2 =*/42).string(message.dtid); + writer.uint32(/* id 5, wireType 0 =*/40).int64(message.transaction_id); + if (message.options != null && Object.hasOwnProperty.call(message, "options")) + $root.query.ExecuteOptions.encode(message.options, writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); + if (message.reserved_id != null && Object.hasOwnProperty.call(message, "reserved_id")) + writer.uint32(/* id 7, wireType 0 =*/56).int64(message.reserved_id); return writer; }; /** - * Encodes the specified RollbackPreparedRequest message, length delimited. Does not implicitly {@link query.RollbackPreparedRequest.verify|verify} messages. + * Encodes the specified ExecuteRequest message, length delimited. Does not implicitly {@link query.ExecuteRequest.verify|verify} messages. * @function encodeDelimited - * @memberof query.RollbackPreparedRequest + * @memberof query.ExecuteRequest * @static - * @param {query.IRollbackPreparedRequest} message RollbackPreparedRequest message or plain object to encode + * @param {query.IExecuteRequest} message ExecuteRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RollbackPreparedRequest.encodeDelimited = function encodeDelimited(message, writer) { + ExecuteRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a RollbackPreparedRequest message from the specified reader or buffer. + * Decodes an ExecuteRequest message from the specified reader or buffer. * @function decode - * @memberof query.RollbackPreparedRequest + * @memberof query.ExecuteRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.RollbackPreparedRequest} RollbackPreparedRequest + * @returns {query.ExecuteRequest} ExecuteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RollbackPreparedRequest.decode = function decode(reader, length) { + ExecuteRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.RollbackPreparedRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.ExecuteRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -80590,11 +82503,19 @@ export const query = $root.query = (() => { break; } case 4: { - message.transaction_id = reader.int64(); + message.query = $root.query.BoundQuery.decode(reader, reader.uint32()); break; } case 5: { - message.dtid = reader.string(); + message.transaction_id = reader.int64(); + break; + } + case 6: { + message.options = $root.query.ExecuteOptions.decode(reader, reader.uint32()); + break; + } + case 7: { + message.reserved_id = reader.int64(); break; } default: @@ -80606,30 +82527,30 @@ export const query = $root.query = (() => { }; /** - * Decodes a RollbackPreparedRequest message from the specified reader or buffer, length delimited. + * Decodes an ExecuteRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.RollbackPreparedRequest + * @memberof query.ExecuteRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.RollbackPreparedRequest} RollbackPreparedRequest + * @returns {query.ExecuteRequest} ExecuteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RollbackPreparedRequest.decodeDelimited = function decodeDelimited(reader) { + ExecuteRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a RollbackPreparedRequest message. + * Verifies an ExecuteRequest message. * @function verify - * @memberof query.RollbackPreparedRequest + * @memberof query.ExecuteRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - RollbackPreparedRequest.verify = function verify(message) { + ExecuteRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { @@ -80647,42 +82568,57 @@ export const query = $root.query = (() => { if (error) return "target." + error; } + if (message.query != null && message.hasOwnProperty("query")) { + let error = $root.query.BoundQuery.verify(message.query); + if (error) + return "query." + error; + } if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) if (!$util.isInteger(message.transaction_id) && !(message.transaction_id && $util.isInteger(message.transaction_id.low) && $util.isInteger(message.transaction_id.high))) return "transaction_id: integer|Long expected"; - if (message.dtid != null && message.hasOwnProperty("dtid")) - if (!$util.isString(message.dtid)) - return "dtid: string expected"; + if (message.options != null && message.hasOwnProperty("options")) { + let error = $root.query.ExecuteOptions.verify(message.options); + if (error) + return "options." + error; + } + if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) + if (!$util.isInteger(message.reserved_id) && !(message.reserved_id && $util.isInteger(message.reserved_id.low) && $util.isInteger(message.reserved_id.high))) + return "reserved_id: integer|Long expected"; return null; }; /** - * Creates a RollbackPreparedRequest message from a plain object. Also converts values to their respective internal types. + * Creates an ExecuteRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.RollbackPreparedRequest + * @memberof query.ExecuteRequest * @static * @param {Object.} object Plain object - * @returns {query.RollbackPreparedRequest} RollbackPreparedRequest + * @returns {query.ExecuteRequest} ExecuteRequest */ - RollbackPreparedRequest.fromObject = function fromObject(object) { - if (object instanceof $root.query.RollbackPreparedRequest) + ExecuteRequest.fromObject = function fromObject(object) { + if (object instanceof $root.query.ExecuteRequest) return object; - let message = new $root.query.RollbackPreparedRequest(); + let message = new $root.query.ExecuteRequest(); if (object.effective_caller_id != null) { if (typeof object.effective_caller_id !== "object") - throw TypeError(".query.RollbackPreparedRequest.effective_caller_id: object expected"); + throw TypeError(".query.ExecuteRequest.effective_caller_id: object expected"); message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); } if (object.immediate_caller_id != null) { if (typeof object.immediate_caller_id !== "object") - throw TypeError(".query.RollbackPreparedRequest.immediate_caller_id: object expected"); + throw TypeError(".query.ExecuteRequest.immediate_caller_id: object expected"); message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); } if (object.target != null) { if (typeof object.target !== "object") - throw TypeError(".query.RollbackPreparedRequest.target: object expected"); + throw TypeError(".query.ExecuteRequest.target: object expected"); message.target = $root.query.Target.fromObject(object.target); } + if (object.query != null) { + if (typeof object.query !== "object") + throw TypeError(".query.ExecuteRequest.query: object expected"); + message.query = $root.query.BoundQuery.fromObject(object.query); + } if (object.transaction_id != null) if ($util.Long) (message.transaction_id = $util.Long.fromValue(object.transaction_id)).unsigned = false; @@ -80692,21 +82628,33 @@ export const query = $root.query = (() => { message.transaction_id = object.transaction_id; else if (typeof object.transaction_id === "object") message.transaction_id = new $util.LongBits(object.transaction_id.low >>> 0, object.transaction_id.high >>> 0).toNumber(); - if (object.dtid != null) - message.dtid = String(object.dtid); + if (object.options != null) { + if (typeof object.options !== "object") + throw TypeError(".query.ExecuteRequest.options: object expected"); + message.options = $root.query.ExecuteOptions.fromObject(object.options); + } + if (object.reserved_id != null) + if ($util.Long) + (message.reserved_id = $util.Long.fromValue(object.reserved_id)).unsigned = false; + else if (typeof object.reserved_id === "string") + message.reserved_id = parseInt(object.reserved_id, 10); + else if (typeof object.reserved_id === "number") + message.reserved_id = object.reserved_id; + else if (typeof object.reserved_id === "object") + message.reserved_id = new $util.LongBits(object.reserved_id.low >>> 0, object.reserved_id.high >>> 0).toNumber(); return message; }; /** - * Creates a plain object from a RollbackPreparedRequest message. Also converts values to other types if specified. + * Creates a plain object from an ExecuteRequest message. Also converts values to other types if specified. * @function toObject - * @memberof query.RollbackPreparedRequest + * @memberof query.ExecuteRequest * @static - * @param {query.RollbackPreparedRequest} message RollbackPreparedRequest + * @param {query.ExecuteRequest} message ExecuteRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - RollbackPreparedRequest.toObject = function toObject(message, options) { + ExecuteRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; @@ -80714,75 +82662,89 @@ export const query = $root.query = (() => { object.effective_caller_id = null; object.immediate_caller_id = null; object.target = null; + object.query = null; if ($util.Long) { let long = new $util.Long(0, 0, false); object.transaction_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; } else object.transaction_id = options.longs === String ? "0" : 0; - object.dtid = ""; - } - if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) - object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); - if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) + object.options = null; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.reserved_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.reserved_id = options.longs === String ? "0" : 0; + } + if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) + object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); + if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); if (message.target != null && message.hasOwnProperty("target")) object.target = $root.query.Target.toObject(message.target, options); + if (message.query != null && message.hasOwnProperty("query")) + object.query = $root.query.BoundQuery.toObject(message.query, options); if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) if (typeof message.transaction_id === "number") object.transaction_id = options.longs === String ? String(message.transaction_id) : message.transaction_id; else object.transaction_id = options.longs === String ? $util.Long.prototype.toString.call(message.transaction_id) : options.longs === Number ? new $util.LongBits(message.transaction_id.low >>> 0, message.transaction_id.high >>> 0).toNumber() : message.transaction_id; - if (message.dtid != null && message.hasOwnProperty("dtid")) - object.dtid = message.dtid; + if (message.options != null && message.hasOwnProperty("options")) + object.options = $root.query.ExecuteOptions.toObject(message.options, options); + if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) + if (typeof message.reserved_id === "number") + object.reserved_id = options.longs === String ? String(message.reserved_id) : message.reserved_id; + else + object.reserved_id = options.longs === String ? $util.Long.prototype.toString.call(message.reserved_id) : options.longs === Number ? new $util.LongBits(message.reserved_id.low >>> 0, message.reserved_id.high >>> 0).toNumber() : message.reserved_id; return object; }; /** - * Converts this RollbackPreparedRequest to JSON. + * Converts this ExecuteRequest to JSON. * @function toJSON - * @memberof query.RollbackPreparedRequest + * @memberof query.ExecuteRequest * @instance * @returns {Object.} JSON object */ - RollbackPreparedRequest.prototype.toJSON = function toJSON() { + ExecuteRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for RollbackPreparedRequest + * Gets the default type url for ExecuteRequest * @function getTypeUrl - * @memberof query.RollbackPreparedRequest + * @memberof query.ExecuteRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - RollbackPreparedRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ExecuteRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.RollbackPreparedRequest"; + return typeUrlPrefix + "/query.ExecuteRequest"; }; - return RollbackPreparedRequest; + return ExecuteRequest; })(); - query.RollbackPreparedResponse = (function() { + query.ExecuteResponse = (function() { /** - * Properties of a RollbackPreparedResponse. + * Properties of an ExecuteResponse. * @memberof query - * @interface IRollbackPreparedResponse + * @interface IExecuteResponse + * @property {query.IQueryResult|null} [result] ExecuteResponse result */ /** - * Constructs a new RollbackPreparedResponse. + * Constructs a new ExecuteResponse. * @memberof query - * @classdesc Represents a RollbackPreparedResponse. - * @implements IRollbackPreparedResponse + * @classdesc Represents an ExecuteResponse. + * @implements IExecuteResponse * @constructor - * @param {query.IRollbackPreparedResponse=} [properties] Properties to set + * @param {query.IExecuteResponse=} [properties] Properties to set */ - function RollbackPreparedResponse(properties) { + function ExecuteResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -80790,63 +82752,77 @@ export const query = $root.query = (() => { } /** - * Creates a new RollbackPreparedResponse instance using the specified properties. + * ExecuteResponse result. + * @member {query.IQueryResult|null|undefined} result + * @memberof query.ExecuteResponse + * @instance + */ + ExecuteResponse.prototype.result = null; + + /** + * Creates a new ExecuteResponse instance using the specified properties. * @function create - * @memberof query.RollbackPreparedResponse + * @memberof query.ExecuteResponse * @static - * @param {query.IRollbackPreparedResponse=} [properties] Properties to set - * @returns {query.RollbackPreparedResponse} RollbackPreparedResponse instance + * @param {query.IExecuteResponse=} [properties] Properties to set + * @returns {query.ExecuteResponse} ExecuteResponse instance */ - RollbackPreparedResponse.create = function create(properties) { - return new RollbackPreparedResponse(properties); + ExecuteResponse.create = function create(properties) { + return new ExecuteResponse(properties); }; /** - * Encodes the specified RollbackPreparedResponse message. Does not implicitly {@link query.RollbackPreparedResponse.verify|verify} messages. + * Encodes the specified ExecuteResponse message. Does not implicitly {@link query.ExecuteResponse.verify|verify} messages. * @function encode - * @memberof query.RollbackPreparedResponse + * @memberof query.ExecuteResponse * @static - * @param {query.IRollbackPreparedResponse} message RollbackPreparedResponse message or plain object to encode + * @param {query.IExecuteResponse} message ExecuteResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RollbackPreparedResponse.encode = function encode(message, writer) { + ExecuteResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.result != null && Object.hasOwnProperty.call(message, "result")) + $root.query.QueryResult.encode(message.result, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified RollbackPreparedResponse message, length delimited. Does not implicitly {@link query.RollbackPreparedResponse.verify|verify} messages. + * Encodes the specified ExecuteResponse message, length delimited. Does not implicitly {@link query.ExecuteResponse.verify|verify} messages. * @function encodeDelimited - * @memberof query.RollbackPreparedResponse + * @memberof query.ExecuteResponse * @static - * @param {query.IRollbackPreparedResponse} message RollbackPreparedResponse message or plain object to encode + * @param {query.IExecuteResponse} message ExecuteResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RollbackPreparedResponse.encodeDelimited = function encodeDelimited(message, writer) { + ExecuteResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a RollbackPreparedResponse message from the specified reader or buffer. + * Decodes an ExecuteResponse message from the specified reader or buffer. * @function decode - * @memberof query.RollbackPreparedResponse + * @memberof query.ExecuteResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.RollbackPreparedResponse} RollbackPreparedResponse + * @returns {query.ExecuteResponse} ExecuteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RollbackPreparedResponse.decode = function decode(reader, length) { + ExecuteResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.RollbackPreparedResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.ExecuteResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + message.result = $root.query.QueryResult.decode(reader, reader.uint32()); + break; + } default: reader.skipType(tag & 7); break; @@ -80856,114 +82832,128 @@ export const query = $root.query = (() => { }; /** - * Decodes a RollbackPreparedResponse message from the specified reader or buffer, length delimited. + * Decodes an ExecuteResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.RollbackPreparedResponse + * @memberof query.ExecuteResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.RollbackPreparedResponse} RollbackPreparedResponse + * @returns {query.ExecuteResponse} ExecuteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RollbackPreparedResponse.decodeDelimited = function decodeDelimited(reader) { + ExecuteResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a RollbackPreparedResponse message. + * Verifies an ExecuteResponse message. * @function verify - * @memberof query.RollbackPreparedResponse + * @memberof query.ExecuteResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - RollbackPreparedResponse.verify = function verify(message) { + ExecuteResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.result != null && message.hasOwnProperty("result")) { + let error = $root.query.QueryResult.verify(message.result); + if (error) + return "result." + error; + } return null; }; /** - * Creates a RollbackPreparedResponse message from a plain object. Also converts values to their respective internal types. + * Creates an ExecuteResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.RollbackPreparedResponse + * @memberof query.ExecuteResponse * @static * @param {Object.} object Plain object - * @returns {query.RollbackPreparedResponse} RollbackPreparedResponse + * @returns {query.ExecuteResponse} ExecuteResponse */ - RollbackPreparedResponse.fromObject = function fromObject(object) { - if (object instanceof $root.query.RollbackPreparedResponse) + ExecuteResponse.fromObject = function fromObject(object) { + if (object instanceof $root.query.ExecuteResponse) return object; - return new $root.query.RollbackPreparedResponse(); + let message = new $root.query.ExecuteResponse(); + if (object.result != null) { + if (typeof object.result !== "object") + throw TypeError(".query.ExecuteResponse.result: object expected"); + message.result = $root.query.QueryResult.fromObject(object.result); + } + return message; }; /** - * Creates a plain object from a RollbackPreparedResponse message. Also converts values to other types if specified. + * Creates a plain object from an ExecuteResponse message. Also converts values to other types if specified. * @function toObject - * @memberof query.RollbackPreparedResponse + * @memberof query.ExecuteResponse * @static - * @param {query.RollbackPreparedResponse} message RollbackPreparedResponse + * @param {query.ExecuteResponse} message ExecuteResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - RollbackPreparedResponse.toObject = function toObject() { - return {}; + ExecuteResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) + object.result = null; + if (message.result != null && message.hasOwnProperty("result")) + object.result = $root.query.QueryResult.toObject(message.result, options); + return object; }; /** - * Converts this RollbackPreparedResponse to JSON. + * Converts this ExecuteResponse to JSON. * @function toJSON - * @memberof query.RollbackPreparedResponse + * @memberof query.ExecuteResponse * @instance * @returns {Object.} JSON object */ - RollbackPreparedResponse.prototype.toJSON = function toJSON() { + ExecuteResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for RollbackPreparedResponse + * Gets the default type url for ExecuteResponse * @function getTypeUrl - * @memberof query.RollbackPreparedResponse + * @memberof query.ExecuteResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - RollbackPreparedResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ExecuteResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.RollbackPreparedResponse"; + return typeUrlPrefix + "/query.ExecuteResponse"; }; - return RollbackPreparedResponse; + return ExecuteResponse; })(); - query.CreateTransactionRequest = (function() { + query.ResultWithError = (function() { /** - * Properties of a CreateTransactionRequest. + * Properties of a ResultWithError. * @memberof query - * @interface ICreateTransactionRequest - * @property {vtrpc.ICallerID|null} [effective_caller_id] CreateTransactionRequest effective_caller_id - * @property {query.IVTGateCallerID|null} [immediate_caller_id] CreateTransactionRequest immediate_caller_id - * @property {query.ITarget|null} [target] CreateTransactionRequest target - * @property {string|null} [dtid] CreateTransactionRequest dtid - * @property {Array.|null} [participants] CreateTransactionRequest participants + * @interface IResultWithError + * @property {vtrpc.IRPCError|null} [error] ResultWithError error + * @property {query.IQueryResult|null} [result] ResultWithError result */ /** - * Constructs a new CreateTransactionRequest. + * Constructs a new ResultWithError. * @memberof query - * @classdesc Represents a CreateTransactionRequest. - * @implements ICreateTransactionRequest + * @classdesc Represents a ResultWithError. + * @implements IResultWithError * @constructor - * @param {query.ICreateTransactionRequest=} [properties] Properties to set + * @param {query.IResultWithError=} [properties] Properties to set */ - function CreateTransactionRequest(properties) { - this.participants = []; + function ResultWithError(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -80971,134 +82961,89 @@ export const query = $root.query = (() => { } /** - * CreateTransactionRequest effective_caller_id. - * @member {vtrpc.ICallerID|null|undefined} effective_caller_id - * @memberof query.CreateTransactionRequest - * @instance - */ - CreateTransactionRequest.prototype.effective_caller_id = null; - - /** - * CreateTransactionRequest immediate_caller_id. - * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id - * @memberof query.CreateTransactionRequest - * @instance - */ - CreateTransactionRequest.prototype.immediate_caller_id = null; - - /** - * CreateTransactionRequest target. - * @member {query.ITarget|null|undefined} target - * @memberof query.CreateTransactionRequest - * @instance - */ - CreateTransactionRequest.prototype.target = null; - - /** - * CreateTransactionRequest dtid. - * @member {string} dtid - * @memberof query.CreateTransactionRequest + * ResultWithError error. + * @member {vtrpc.IRPCError|null|undefined} error + * @memberof query.ResultWithError * @instance */ - CreateTransactionRequest.prototype.dtid = ""; + ResultWithError.prototype.error = null; /** - * CreateTransactionRequest participants. - * @member {Array.} participants - * @memberof query.CreateTransactionRequest + * ResultWithError result. + * @member {query.IQueryResult|null|undefined} result + * @memberof query.ResultWithError * @instance */ - CreateTransactionRequest.prototype.participants = $util.emptyArray; + ResultWithError.prototype.result = null; /** - * Creates a new CreateTransactionRequest instance using the specified properties. + * Creates a new ResultWithError instance using the specified properties. * @function create - * @memberof query.CreateTransactionRequest + * @memberof query.ResultWithError * @static - * @param {query.ICreateTransactionRequest=} [properties] Properties to set - * @returns {query.CreateTransactionRequest} CreateTransactionRequest instance + * @param {query.IResultWithError=} [properties] Properties to set + * @returns {query.ResultWithError} ResultWithError instance */ - CreateTransactionRequest.create = function create(properties) { - return new CreateTransactionRequest(properties); + ResultWithError.create = function create(properties) { + return new ResultWithError(properties); }; /** - * Encodes the specified CreateTransactionRequest message. Does not implicitly {@link query.CreateTransactionRequest.verify|verify} messages. + * Encodes the specified ResultWithError message. Does not implicitly {@link query.ResultWithError.verify|verify} messages. * @function encode - * @memberof query.CreateTransactionRequest + * @memberof query.ResultWithError * @static - * @param {query.ICreateTransactionRequest} message CreateTransactionRequest message or plain object to encode + * @param {query.IResultWithError} message ResultWithError message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - CreateTransactionRequest.encode = function encode(message, writer) { + ResultWithError.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) - $root.vtrpc.CallerID.encode(message.effective_caller_id, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.immediate_caller_id != null && Object.hasOwnProperty.call(message, "immediate_caller_id")) - $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.target != null && Object.hasOwnProperty.call(message, "target")) - $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.dtid != null && Object.hasOwnProperty.call(message, "dtid")) - writer.uint32(/* id 4, wireType 2 =*/34).string(message.dtid); - if (message.participants != null && message.participants.length) - for (let i = 0; i < message.participants.length; ++i) - $root.query.Target.encode(message.participants[i], writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); + if (message.error != null && Object.hasOwnProperty.call(message, "error")) + $root.vtrpc.RPCError.encode(message.error, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.result != null && Object.hasOwnProperty.call(message, "result")) + $root.query.QueryResult.encode(message.result, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); return writer; }; /** - * Encodes the specified CreateTransactionRequest message, length delimited. Does not implicitly {@link query.CreateTransactionRequest.verify|verify} messages. + * Encodes the specified ResultWithError message, length delimited. Does not implicitly {@link query.ResultWithError.verify|verify} messages. * @function encodeDelimited - * @memberof query.CreateTransactionRequest + * @memberof query.ResultWithError * @static - * @param {query.ICreateTransactionRequest} message CreateTransactionRequest message or plain object to encode + * @param {query.IResultWithError} message ResultWithError message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - CreateTransactionRequest.encodeDelimited = function encodeDelimited(message, writer) { + ResultWithError.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a CreateTransactionRequest message from the specified reader or buffer. + * Decodes a ResultWithError message from the specified reader or buffer. * @function decode - * @memberof query.CreateTransactionRequest + * @memberof query.ResultWithError * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.CreateTransactionRequest} CreateTransactionRequest + * @returns {query.ResultWithError} ResultWithError * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CreateTransactionRequest.decode = function decode(reader, length) { + ResultWithError.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.CreateTransactionRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.ResultWithError(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.effective_caller_id = $root.vtrpc.CallerID.decode(reader, reader.uint32()); + message.error = $root.vtrpc.RPCError.decode(reader, reader.uint32()); break; } case 2: { - message.immediate_caller_id = $root.query.VTGateCallerID.decode(reader, reader.uint32()); - break; - } - case 3: { - message.target = $root.query.Target.decode(reader, reader.uint32()); - break; - } - case 4: { - message.dtid = reader.string(); - break; - } - case 5: { - if (!(message.participants && message.participants.length)) - message.participants = []; - message.participants.push($root.query.Target.decode(reader, reader.uint32())); + message.result = $root.query.QueryResult.decode(reader, reader.uint32()); break; } default: @@ -81110,187 +83055,147 @@ export const query = $root.query = (() => { }; /** - * Decodes a CreateTransactionRequest message from the specified reader or buffer, length delimited. + * Decodes a ResultWithError message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.CreateTransactionRequest + * @memberof query.ResultWithError * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.CreateTransactionRequest} CreateTransactionRequest + * @returns {query.ResultWithError} ResultWithError * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CreateTransactionRequest.decodeDelimited = function decodeDelimited(reader) { + ResultWithError.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a CreateTransactionRequest message. + * Verifies a ResultWithError message. * @function verify - * @memberof query.CreateTransactionRequest + * @memberof query.ResultWithError * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - CreateTransactionRequest.verify = function verify(message) { + ResultWithError.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { - let error = $root.vtrpc.CallerID.verify(message.effective_caller_id); - if (error) - return "effective_caller_id." + error; - } - if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) { - let error = $root.query.VTGateCallerID.verify(message.immediate_caller_id); + if (message.error != null && message.hasOwnProperty("error")) { + let error = $root.vtrpc.RPCError.verify(message.error); if (error) - return "immediate_caller_id." + error; + return "error." + error; } - if (message.target != null && message.hasOwnProperty("target")) { - let error = $root.query.Target.verify(message.target); + if (message.result != null && message.hasOwnProperty("result")) { + let error = $root.query.QueryResult.verify(message.result); if (error) - return "target." + error; - } - if (message.dtid != null && message.hasOwnProperty("dtid")) - if (!$util.isString(message.dtid)) - return "dtid: string expected"; - if (message.participants != null && message.hasOwnProperty("participants")) { - if (!Array.isArray(message.participants)) - return "participants: array expected"; - for (let i = 0; i < message.participants.length; ++i) { - let error = $root.query.Target.verify(message.participants[i]); - if (error) - return "participants." + error; - } + return "result." + error; } return null; }; /** - * Creates a CreateTransactionRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ResultWithError message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.CreateTransactionRequest + * @memberof query.ResultWithError * @static * @param {Object.} object Plain object - * @returns {query.CreateTransactionRequest} CreateTransactionRequest + * @returns {query.ResultWithError} ResultWithError */ - CreateTransactionRequest.fromObject = function fromObject(object) { - if (object instanceof $root.query.CreateTransactionRequest) + ResultWithError.fromObject = function fromObject(object) { + if (object instanceof $root.query.ResultWithError) return object; - let message = new $root.query.CreateTransactionRequest(); - if (object.effective_caller_id != null) { - if (typeof object.effective_caller_id !== "object") - throw TypeError(".query.CreateTransactionRequest.effective_caller_id: object expected"); - message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); - } - if (object.immediate_caller_id != null) { - if (typeof object.immediate_caller_id !== "object") - throw TypeError(".query.CreateTransactionRequest.immediate_caller_id: object expected"); - message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); - } - if (object.target != null) { - if (typeof object.target !== "object") - throw TypeError(".query.CreateTransactionRequest.target: object expected"); - message.target = $root.query.Target.fromObject(object.target); + let message = new $root.query.ResultWithError(); + if (object.error != null) { + if (typeof object.error !== "object") + throw TypeError(".query.ResultWithError.error: object expected"); + message.error = $root.vtrpc.RPCError.fromObject(object.error); } - if (object.dtid != null) - message.dtid = String(object.dtid); - if (object.participants) { - if (!Array.isArray(object.participants)) - throw TypeError(".query.CreateTransactionRequest.participants: array expected"); - message.participants = []; - for (let i = 0; i < object.participants.length; ++i) { - if (typeof object.participants[i] !== "object") - throw TypeError(".query.CreateTransactionRequest.participants: object expected"); - message.participants[i] = $root.query.Target.fromObject(object.participants[i]); - } + if (object.result != null) { + if (typeof object.result !== "object") + throw TypeError(".query.ResultWithError.result: object expected"); + message.result = $root.query.QueryResult.fromObject(object.result); } return message; }; /** - * Creates a plain object from a CreateTransactionRequest message. Also converts values to other types if specified. + * Creates a plain object from a ResultWithError message. Also converts values to other types if specified. * @function toObject - * @memberof query.CreateTransactionRequest + * @memberof query.ResultWithError * @static - * @param {query.CreateTransactionRequest} message CreateTransactionRequest + * @param {query.ResultWithError} message ResultWithError * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - CreateTransactionRequest.toObject = function toObject(message, options) { + ResultWithError.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.participants = []; if (options.defaults) { - object.effective_caller_id = null; - object.immediate_caller_id = null; - object.target = null; - object.dtid = ""; - } - if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) - object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); - if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) - object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); - if (message.target != null && message.hasOwnProperty("target")) - object.target = $root.query.Target.toObject(message.target, options); - if (message.dtid != null && message.hasOwnProperty("dtid")) - object.dtid = message.dtid; - if (message.participants && message.participants.length) { - object.participants = []; - for (let j = 0; j < message.participants.length; ++j) - object.participants[j] = $root.query.Target.toObject(message.participants[j], options); + object.error = null; + object.result = null; } + if (message.error != null && message.hasOwnProperty("error")) + object.error = $root.vtrpc.RPCError.toObject(message.error, options); + if (message.result != null && message.hasOwnProperty("result")) + object.result = $root.query.QueryResult.toObject(message.result, options); return object; }; /** - * Converts this CreateTransactionRequest to JSON. + * Converts this ResultWithError to JSON. * @function toJSON - * @memberof query.CreateTransactionRequest + * @memberof query.ResultWithError * @instance * @returns {Object.} JSON object */ - CreateTransactionRequest.prototype.toJSON = function toJSON() { + ResultWithError.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for CreateTransactionRequest + * Gets the default type url for ResultWithError * @function getTypeUrl - * @memberof query.CreateTransactionRequest + * @memberof query.ResultWithError * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - CreateTransactionRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ResultWithError.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.CreateTransactionRequest"; + return typeUrlPrefix + "/query.ResultWithError"; }; - return CreateTransactionRequest; + return ResultWithError; })(); - query.CreateTransactionResponse = (function() { + query.StreamExecuteRequest = (function() { /** - * Properties of a CreateTransactionResponse. + * Properties of a StreamExecuteRequest. * @memberof query - * @interface ICreateTransactionResponse + * @interface IStreamExecuteRequest + * @property {vtrpc.ICallerID|null} [effective_caller_id] StreamExecuteRequest effective_caller_id + * @property {query.IVTGateCallerID|null} [immediate_caller_id] StreamExecuteRequest immediate_caller_id + * @property {query.ITarget|null} [target] StreamExecuteRequest target + * @property {query.IBoundQuery|null} [query] StreamExecuteRequest query + * @property {query.IExecuteOptions|null} [options] StreamExecuteRequest options + * @property {number|Long|null} [transaction_id] StreamExecuteRequest transaction_id + * @property {number|Long|null} [reserved_id] StreamExecuteRequest reserved_id */ /** - * Constructs a new CreateTransactionResponse. + * Constructs a new StreamExecuteRequest. * @memberof query - * @classdesc Represents a CreateTransactionResponse. - * @implements ICreateTransactionResponse + * @classdesc Represents a StreamExecuteRequest. + * @implements IStreamExecuteRequest * @constructor - * @param {query.ICreateTransactionResponse=} [properties] Properties to set + * @param {query.IStreamExecuteRequest=} [properties] Properties to set */ - function CreateTransactionResponse(properties) { + function StreamExecuteRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -81298,247 +83203,83 @@ export const query = $root.query = (() => { } /** - * Creates a new CreateTransactionResponse instance using the specified properties. - * @function create - * @memberof query.CreateTransactionResponse - * @static - * @param {query.ICreateTransactionResponse=} [properties] Properties to set - * @returns {query.CreateTransactionResponse} CreateTransactionResponse instance + * StreamExecuteRequest effective_caller_id. + * @member {vtrpc.ICallerID|null|undefined} effective_caller_id + * @memberof query.StreamExecuteRequest + * @instance */ - CreateTransactionResponse.create = function create(properties) { - return new CreateTransactionResponse(properties); - }; + StreamExecuteRequest.prototype.effective_caller_id = null; /** - * Encodes the specified CreateTransactionResponse message. Does not implicitly {@link query.CreateTransactionResponse.verify|verify} messages. - * @function encode - * @memberof query.CreateTransactionResponse - * @static - * @param {query.ICreateTransactionResponse} message CreateTransactionResponse message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer + * StreamExecuteRequest immediate_caller_id. + * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id + * @memberof query.StreamExecuteRequest + * @instance */ - CreateTransactionResponse.encode = function encode(message, writer) { - if (!writer) - writer = $Writer.create(); - return writer; - }; + StreamExecuteRequest.prototype.immediate_caller_id = null; /** - * Encodes the specified CreateTransactionResponse message, length delimited. Does not implicitly {@link query.CreateTransactionResponse.verify|verify} messages. - * @function encodeDelimited - * @memberof query.CreateTransactionResponse - * @static - * @param {query.ICreateTransactionResponse} message CreateTransactionResponse message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - CreateTransactionResponse.encodeDelimited = function encodeDelimited(message, writer) { - return this.encode(message, writer).ldelim(); - }; - - /** - * Decodes a CreateTransactionResponse message from the specified reader or buffer. - * @function decode - * @memberof query.CreateTransactionResponse - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @param {number} [length] Message length if known beforehand - * @returns {query.CreateTransactionResponse} CreateTransactionResponse - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - CreateTransactionResponse.decode = function decode(reader, length) { - if (!(reader instanceof $Reader)) - reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.CreateTransactionResponse(); - while (reader.pos < end) { - let tag = reader.uint32(); - switch (tag >>> 3) { - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }; - - /** - * Decodes a CreateTransactionResponse message from the specified reader or buffer, length delimited. - * @function decodeDelimited - * @memberof query.CreateTransactionResponse - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.CreateTransactionResponse} CreateTransactionResponse - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - CreateTransactionResponse.decodeDelimited = function decodeDelimited(reader) { - if (!(reader instanceof $Reader)) - reader = new $Reader(reader); - return this.decode(reader, reader.uint32()); - }; - - /** - * Verifies a CreateTransactionResponse message. - * @function verify - * @memberof query.CreateTransactionResponse - * @static - * @param {Object.} message Plain object to verify - * @returns {string|null} `null` if valid, otherwise the reason why it is not - */ - CreateTransactionResponse.verify = function verify(message) { - if (typeof message !== "object" || message === null) - return "object expected"; - return null; - }; - - /** - * Creates a CreateTransactionResponse message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof query.CreateTransactionResponse - * @static - * @param {Object.} object Plain object - * @returns {query.CreateTransactionResponse} CreateTransactionResponse - */ - CreateTransactionResponse.fromObject = function fromObject(object) { - if (object instanceof $root.query.CreateTransactionResponse) - return object; - return new $root.query.CreateTransactionResponse(); - }; - - /** - * Creates a plain object from a CreateTransactionResponse message. Also converts values to other types if specified. - * @function toObject - * @memberof query.CreateTransactionResponse - * @static - * @param {query.CreateTransactionResponse} message CreateTransactionResponse - * @param {$protobuf.IConversionOptions} [options] Conversion options - * @returns {Object.} Plain object - */ - CreateTransactionResponse.toObject = function toObject() { - return {}; - }; - - /** - * Converts this CreateTransactionResponse to JSON. - * @function toJSON - * @memberof query.CreateTransactionResponse - * @instance - * @returns {Object.} JSON object - */ - CreateTransactionResponse.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; - - /** - * Gets the default type url for CreateTransactionResponse - * @function getTypeUrl - * @memberof query.CreateTransactionResponse - * @static - * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns {string} The default type url - */ - CreateTransactionResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { - if (typeUrlPrefix === undefined) { - typeUrlPrefix = "type.googleapis.com"; - } - return typeUrlPrefix + "/query.CreateTransactionResponse"; - }; - - return CreateTransactionResponse; - })(); - - query.StartCommitRequest = (function() { - - /** - * Properties of a StartCommitRequest. - * @memberof query - * @interface IStartCommitRequest - * @property {vtrpc.ICallerID|null} [effective_caller_id] StartCommitRequest effective_caller_id - * @property {query.IVTGateCallerID|null} [immediate_caller_id] StartCommitRequest immediate_caller_id - * @property {query.ITarget|null} [target] StartCommitRequest target - * @property {number|Long|null} [transaction_id] StartCommitRequest transaction_id - * @property {string|null} [dtid] StartCommitRequest dtid - */ - - /** - * Constructs a new StartCommitRequest. - * @memberof query - * @classdesc Represents a StartCommitRequest. - * @implements IStartCommitRequest - * @constructor - * @param {query.IStartCommitRequest=} [properties] Properties to set - */ - function StartCommitRequest(properties) { - if (properties) - for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; - } - - /** - * StartCommitRequest effective_caller_id. - * @member {vtrpc.ICallerID|null|undefined} effective_caller_id - * @memberof query.StartCommitRequest + * StreamExecuteRequest target. + * @member {query.ITarget|null|undefined} target + * @memberof query.StreamExecuteRequest * @instance */ - StartCommitRequest.prototype.effective_caller_id = null; + StreamExecuteRequest.prototype.target = null; /** - * StartCommitRequest immediate_caller_id. - * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id - * @memberof query.StartCommitRequest + * StreamExecuteRequest query. + * @member {query.IBoundQuery|null|undefined} query + * @memberof query.StreamExecuteRequest * @instance */ - StartCommitRequest.prototype.immediate_caller_id = null; + StreamExecuteRequest.prototype.query = null; /** - * StartCommitRequest target. - * @member {query.ITarget|null|undefined} target - * @memberof query.StartCommitRequest + * StreamExecuteRequest options. + * @member {query.IExecuteOptions|null|undefined} options + * @memberof query.StreamExecuteRequest * @instance */ - StartCommitRequest.prototype.target = null; + StreamExecuteRequest.prototype.options = null; /** - * StartCommitRequest transaction_id. + * StreamExecuteRequest transaction_id. * @member {number|Long} transaction_id - * @memberof query.StartCommitRequest + * @memberof query.StreamExecuteRequest * @instance */ - StartCommitRequest.prototype.transaction_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + StreamExecuteRequest.prototype.transaction_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; /** - * StartCommitRequest dtid. - * @member {string} dtid - * @memberof query.StartCommitRequest + * StreamExecuteRequest reserved_id. + * @member {number|Long} reserved_id + * @memberof query.StreamExecuteRequest * @instance */ - StartCommitRequest.prototype.dtid = ""; + StreamExecuteRequest.prototype.reserved_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; /** - * Creates a new StartCommitRequest instance using the specified properties. + * Creates a new StreamExecuteRequest instance using the specified properties. * @function create - * @memberof query.StartCommitRequest + * @memberof query.StreamExecuteRequest * @static - * @param {query.IStartCommitRequest=} [properties] Properties to set - * @returns {query.StartCommitRequest} StartCommitRequest instance + * @param {query.IStreamExecuteRequest=} [properties] Properties to set + * @returns {query.StreamExecuteRequest} StreamExecuteRequest instance */ - StartCommitRequest.create = function create(properties) { - return new StartCommitRequest(properties); + StreamExecuteRequest.create = function create(properties) { + return new StreamExecuteRequest(properties); }; /** - * Encodes the specified StartCommitRequest message. Does not implicitly {@link query.StartCommitRequest.verify|verify} messages. + * Encodes the specified StreamExecuteRequest message. Does not implicitly {@link query.StreamExecuteRequest.verify|verify} messages. * @function encode - * @memberof query.StartCommitRequest + * @memberof query.StreamExecuteRequest * @static - * @param {query.IStartCommitRequest} message StartCommitRequest message or plain object to encode + * @param {query.IStreamExecuteRequest} message StreamExecuteRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - StartCommitRequest.encode = function encode(message, writer) { + StreamExecuteRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) @@ -81547,41 +83288,45 @@ export const query = $root.query = (() => { $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); if (message.target != null && Object.hasOwnProperty.call(message, "target")) $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.query != null && Object.hasOwnProperty.call(message, "query")) + $root.query.BoundQuery.encode(message.query, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.options != null && Object.hasOwnProperty.call(message, "options")) + $root.query.ExecuteOptions.encode(message.options, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); if (message.transaction_id != null && Object.hasOwnProperty.call(message, "transaction_id")) - writer.uint32(/* id 4, wireType 0 =*/32).int64(message.transaction_id); - if (message.dtid != null && Object.hasOwnProperty.call(message, "dtid")) - writer.uint32(/* id 5, wireType 2 =*/42).string(message.dtid); + writer.uint32(/* id 6, wireType 0 =*/48).int64(message.transaction_id); + if (message.reserved_id != null && Object.hasOwnProperty.call(message, "reserved_id")) + writer.uint32(/* id 7, wireType 0 =*/56).int64(message.reserved_id); return writer; }; /** - * Encodes the specified StartCommitRequest message, length delimited. Does not implicitly {@link query.StartCommitRequest.verify|verify} messages. + * Encodes the specified StreamExecuteRequest message, length delimited. Does not implicitly {@link query.StreamExecuteRequest.verify|verify} messages. * @function encodeDelimited - * @memberof query.StartCommitRequest + * @memberof query.StreamExecuteRequest * @static - * @param {query.IStartCommitRequest} message StartCommitRequest message or plain object to encode + * @param {query.IStreamExecuteRequest} message StreamExecuteRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - StartCommitRequest.encodeDelimited = function encodeDelimited(message, writer) { + StreamExecuteRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a StartCommitRequest message from the specified reader or buffer. + * Decodes a StreamExecuteRequest message from the specified reader or buffer. * @function decode - * @memberof query.StartCommitRequest + * @memberof query.StreamExecuteRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.StartCommitRequest} StartCommitRequest + * @returns {query.StreamExecuteRequest} StreamExecuteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StartCommitRequest.decode = function decode(reader, length) { + StreamExecuteRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.StartCommitRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.StreamExecuteRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -81598,11 +83343,19 @@ export const query = $root.query = (() => { break; } case 4: { - message.transaction_id = reader.int64(); + message.query = $root.query.BoundQuery.decode(reader, reader.uint32()); break; } case 5: { - message.dtid = reader.string(); + message.options = $root.query.ExecuteOptions.decode(reader, reader.uint32()); + break; + } + case 6: { + message.transaction_id = reader.int64(); + break; + } + case 7: { + message.reserved_id = reader.int64(); break; } default: @@ -81614,30 +83367,30 @@ export const query = $root.query = (() => { }; /** - * Decodes a StartCommitRequest message from the specified reader or buffer, length delimited. + * Decodes a StreamExecuteRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.StartCommitRequest + * @memberof query.StreamExecuteRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.StartCommitRequest} StartCommitRequest + * @returns {query.StreamExecuteRequest} StreamExecuteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StartCommitRequest.decodeDelimited = function decodeDelimited(reader) { + StreamExecuteRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a StartCommitRequest message. + * Verifies a StreamExecuteRequest message. * @function verify - * @memberof query.StartCommitRequest + * @memberof query.StreamExecuteRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - StartCommitRequest.verify = function verify(message) { + StreamExecuteRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { @@ -81655,42 +83408,62 @@ export const query = $root.query = (() => { if (error) return "target." + error; } + if (message.query != null && message.hasOwnProperty("query")) { + let error = $root.query.BoundQuery.verify(message.query); + if (error) + return "query." + error; + } + if (message.options != null && message.hasOwnProperty("options")) { + let error = $root.query.ExecuteOptions.verify(message.options); + if (error) + return "options." + error; + } if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) if (!$util.isInteger(message.transaction_id) && !(message.transaction_id && $util.isInteger(message.transaction_id.low) && $util.isInteger(message.transaction_id.high))) return "transaction_id: integer|Long expected"; - if (message.dtid != null && message.hasOwnProperty("dtid")) - if (!$util.isString(message.dtid)) - return "dtid: string expected"; + if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) + if (!$util.isInteger(message.reserved_id) && !(message.reserved_id && $util.isInteger(message.reserved_id.low) && $util.isInteger(message.reserved_id.high))) + return "reserved_id: integer|Long expected"; return null; }; /** - * Creates a StartCommitRequest message from a plain object. Also converts values to their respective internal types. + * Creates a StreamExecuteRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.StartCommitRequest + * @memberof query.StreamExecuteRequest * @static * @param {Object.} object Plain object - * @returns {query.StartCommitRequest} StartCommitRequest + * @returns {query.StreamExecuteRequest} StreamExecuteRequest */ - StartCommitRequest.fromObject = function fromObject(object) { - if (object instanceof $root.query.StartCommitRequest) + StreamExecuteRequest.fromObject = function fromObject(object) { + if (object instanceof $root.query.StreamExecuteRequest) return object; - let message = new $root.query.StartCommitRequest(); + let message = new $root.query.StreamExecuteRequest(); if (object.effective_caller_id != null) { if (typeof object.effective_caller_id !== "object") - throw TypeError(".query.StartCommitRequest.effective_caller_id: object expected"); + throw TypeError(".query.StreamExecuteRequest.effective_caller_id: object expected"); message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); } if (object.immediate_caller_id != null) { if (typeof object.immediate_caller_id !== "object") - throw TypeError(".query.StartCommitRequest.immediate_caller_id: object expected"); + throw TypeError(".query.StreamExecuteRequest.immediate_caller_id: object expected"); message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); } if (object.target != null) { if (typeof object.target !== "object") - throw TypeError(".query.StartCommitRequest.target: object expected"); + throw TypeError(".query.StreamExecuteRequest.target: object expected"); message.target = $root.query.Target.fromObject(object.target); } + if (object.query != null) { + if (typeof object.query !== "object") + throw TypeError(".query.StreamExecuteRequest.query: object expected"); + message.query = $root.query.BoundQuery.fromObject(object.query); + } + if (object.options != null) { + if (typeof object.options !== "object") + throw TypeError(".query.StreamExecuteRequest.options: object expected"); + message.options = $root.query.ExecuteOptions.fromObject(object.options); + } if (object.transaction_id != null) if ($util.Long) (message.transaction_id = $util.Long.fromValue(object.transaction_id)).unsigned = false; @@ -81700,21 +83473,28 @@ export const query = $root.query = (() => { message.transaction_id = object.transaction_id; else if (typeof object.transaction_id === "object") message.transaction_id = new $util.LongBits(object.transaction_id.low >>> 0, object.transaction_id.high >>> 0).toNumber(); - if (object.dtid != null) - message.dtid = String(object.dtid); + if (object.reserved_id != null) + if ($util.Long) + (message.reserved_id = $util.Long.fromValue(object.reserved_id)).unsigned = false; + else if (typeof object.reserved_id === "string") + message.reserved_id = parseInt(object.reserved_id, 10); + else if (typeof object.reserved_id === "number") + message.reserved_id = object.reserved_id; + else if (typeof object.reserved_id === "object") + message.reserved_id = new $util.LongBits(object.reserved_id.low >>> 0, object.reserved_id.high >>> 0).toNumber(); return message; }; /** - * Creates a plain object from a StartCommitRequest message. Also converts values to other types if specified. + * Creates a plain object from a StreamExecuteRequest message. Also converts values to other types if specified. * @function toObject - * @memberof query.StartCommitRequest + * @memberof query.StreamExecuteRequest * @static - * @param {query.StartCommitRequest} message StartCommitRequest + * @param {query.StreamExecuteRequest} message StreamExecuteRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - StartCommitRequest.toObject = function toObject(message, options) { + StreamExecuteRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; @@ -81722,12 +83502,18 @@ export const query = $root.query = (() => { object.effective_caller_id = null; object.immediate_caller_id = null; object.target = null; + object.query = null; + object.options = null; if ($util.Long) { let long = new $util.Long(0, 0, false); object.transaction_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; } else object.transaction_id = options.longs === String ? "0" : 0; - object.dtid = ""; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.reserved_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.reserved_id = options.longs === String ? "0" : 0; } if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); @@ -81735,62 +83521,70 @@ export const query = $root.query = (() => { object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); if (message.target != null && message.hasOwnProperty("target")) object.target = $root.query.Target.toObject(message.target, options); + if (message.query != null && message.hasOwnProperty("query")) + object.query = $root.query.BoundQuery.toObject(message.query, options); + if (message.options != null && message.hasOwnProperty("options")) + object.options = $root.query.ExecuteOptions.toObject(message.options, options); if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) if (typeof message.transaction_id === "number") object.transaction_id = options.longs === String ? String(message.transaction_id) : message.transaction_id; else object.transaction_id = options.longs === String ? $util.Long.prototype.toString.call(message.transaction_id) : options.longs === Number ? new $util.LongBits(message.transaction_id.low >>> 0, message.transaction_id.high >>> 0).toNumber() : message.transaction_id; - if (message.dtid != null && message.hasOwnProperty("dtid")) - object.dtid = message.dtid; + if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) + if (typeof message.reserved_id === "number") + object.reserved_id = options.longs === String ? String(message.reserved_id) : message.reserved_id; + else + object.reserved_id = options.longs === String ? $util.Long.prototype.toString.call(message.reserved_id) : options.longs === Number ? new $util.LongBits(message.reserved_id.low >>> 0, message.reserved_id.high >>> 0).toNumber() : message.reserved_id; return object; }; /** - * Converts this StartCommitRequest to JSON. + * Converts this StreamExecuteRequest to JSON. * @function toJSON - * @memberof query.StartCommitRequest + * @memberof query.StreamExecuteRequest * @instance * @returns {Object.} JSON object */ - StartCommitRequest.prototype.toJSON = function toJSON() { + StreamExecuteRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for StartCommitRequest + * Gets the default type url for StreamExecuteRequest * @function getTypeUrl - * @memberof query.StartCommitRequest + * @memberof query.StreamExecuteRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - StartCommitRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + StreamExecuteRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.StartCommitRequest"; + return typeUrlPrefix + "/query.StreamExecuteRequest"; }; - return StartCommitRequest; + return StreamExecuteRequest; })(); - query.StartCommitResponse = (function() { + query.StreamExecuteResponse = (function() { /** - * Properties of a StartCommitResponse. + * Properties of a StreamExecuteResponse. * @memberof query - * @interface IStartCommitResponse + * @interface IStreamExecuteResponse + * @property {query.IQueryResult|null} [result] StreamExecuteResponse result */ /** - * Constructs a new StartCommitResponse. + * Constructs a new StreamExecuteResponse. * @memberof query - * @classdesc Represents a StartCommitResponse. - * @implements IStartCommitResponse + * @classdesc Represents a StreamExecuteResponse. + * @implements IStreamExecuteResponse * @constructor - * @param {query.IStartCommitResponse=} [properties] Properties to set + * @param {query.IStreamExecuteResponse=} [properties] Properties to set */ - function StartCommitResponse(properties) { + function StreamExecuteResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -81798,63 +83592,77 @@ export const query = $root.query = (() => { } /** - * Creates a new StartCommitResponse instance using the specified properties. + * StreamExecuteResponse result. + * @member {query.IQueryResult|null|undefined} result + * @memberof query.StreamExecuteResponse + * @instance + */ + StreamExecuteResponse.prototype.result = null; + + /** + * Creates a new StreamExecuteResponse instance using the specified properties. * @function create - * @memberof query.StartCommitResponse + * @memberof query.StreamExecuteResponse * @static - * @param {query.IStartCommitResponse=} [properties] Properties to set - * @returns {query.StartCommitResponse} StartCommitResponse instance + * @param {query.IStreamExecuteResponse=} [properties] Properties to set + * @returns {query.StreamExecuteResponse} StreamExecuteResponse instance */ - StartCommitResponse.create = function create(properties) { - return new StartCommitResponse(properties); + StreamExecuteResponse.create = function create(properties) { + return new StreamExecuteResponse(properties); }; /** - * Encodes the specified StartCommitResponse message. Does not implicitly {@link query.StartCommitResponse.verify|verify} messages. + * Encodes the specified StreamExecuteResponse message. Does not implicitly {@link query.StreamExecuteResponse.verify|verify} messages. * @function encode - * @memberof query.StartCommitResponse + * @memberof query.StreamExecuteResponse * @static - * @param {query.IStartCommitResponse} message StartCommitResponse message or plain object to encode + * @param {query.IStreamExecuteResponse} message StreamExecuteResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - StartCommitResponse.encode = function encode(message, writer) { + StreamExecuteResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.result != null && Object.hasOwnProperty.call(message, "result")) + $root.query.QueryResult.encode(message.result, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified StartCommitResponse message, length delimited. Does not implicitly {@link query.StartCommitResponse.verify|verify} messages. + * Encodes the specified StreamExecuteResponse message, length delimited. Does not implicitly {@link query.StreamExecuteResponse.verify|verify} messages. * @function encodeDelimited - * @memberof query.StartCommitResponse + * @memberof query.StreamExecuteResponse * @static - * @param {query.IStartCommitResponse} message StartCommitResponse message or plain object to encode + * @param {query.IStreamExecuteResponse} message StreamExecuteResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - StartCommitResponse.encodeDelimited = function encodeDelimited(message, writer) { + StreamExecuteResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a StartCommitResponse message from the specified reader or buffer. + * Decodes a StreamExecuteResponse message from the specified reader or buffer. * @function decode - * @memberof query.StartCommitResponse + * @memberof query.StreamExecuteResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.StartCommitResponse} StartCommitResponse + * @returns {query.StreamExecuteResponse} StreamExecuteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StartCommitResponse.decode = function decode(reader, length) { + StreamExecuteResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.StartCommitResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.StreamExecuteResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + message.result = $root.query.QueryResult.decode(reader, reader.uint32()); + break; + } default: reader.skipType(tag & 7); break; @@ -81864,113 +83672,130 @@ export const query = $root.query = (() => { }; /** - * Decodes a StartCommitResponse message from the specified reader or buffer, length delimited. + * Decodes a StreamExecuteResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.StartCommitResponse + * @memberof query.StreamExecuteResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.StartCommitResponse} StartCommitResponse + * @returns {query.StreamExecuteResponse} StreamExecuteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StartCommitResponse.decodeDelimited = function decodeDelimited(reader) { + StreamExecuteResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a StartCommitResponse message. + * Verifies a StreamExecuteResponse message. * @function verify - * @memberof query.StartCommitResponse + * @memberof query.StreamExecuteResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - StartCommitResponse.verify = function verify(message) { + StreamExecuteResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.result != null && message.hasOwnProperty("result")) { + let error = $root.query.QueryResult.verify(message.result); + if (error) + return "result." + error; + } return null; }; /** - * Creates a StartCommitResponse message from a plain object. Also converts values to their respective internal types. + * Creates a StreamExecuteResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.StartCommitResponse + * @memberof query.StreamExecuteResponse * @static * @param {Object.} object Plain object - * @returns {query.StartCommitResponse} StartCommitResponse + * @returns {query.StreamExecuteResponse} StreamExecuteResponse */ - StartCommitResponse.fromObject = function fromObject(object) { - if (object instanceof $root.query.StartCommitResponse) + StreamExecuteResponse.fromObject = function fromObject(object) { + if (object instanceof $root.query.StreamExecuteResponse) return object; - return new $root.query.StartCommitResponse(); + let message = new $root.query.StreamExecuteResponse(); + if (object.result != null) { + if (typeof object.result !== "object") + throw TypeError(".query.StreamExecuteResponse.result: object expected"); + message.result = $root.query.QueryResult.fromObject(object.result); + } + return message; }; /** - * Creates a plain object from a StartCommitResponse message. Also converts values to other types if specified. + * Creates a plain object from a StreamExecuteResponse message. Also converts values to other types if specified. * @function toObject - * @memberof query.StartCommitResponse + * @memberof query.StreamExecuteResponse * @static - * @param {query.StartCommitResponse} message StartCommitResponse + * @param {query.StreamExecuteResponse} message StreamExecuteResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - StartCommitResponse.toObject = function toObject() { - return {}; + StreamExecuteResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) + object.result = null; + if (message.result != null && message.hasOwnProperty("result")) + object.result = $root.query.QueryResult.toObject(message.result, options); + return object; }; /** - * Converts this StartCommitResponse to JSON. + * Converts this StreamExecuteResponse to JSON. * @function toJSON - * @memberof query.StartCommitResponse + * @memberof query.StreamExecuteResponse * @instance * @returns {Object.} JSON object */ - StartCommitResponse.prototype.toJSON = function toJSON() { + StreamExecuteResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for StartCommitResponse + * Gets the default type url for StreamExecuteResponse * @function getTypeUrl - * @memberof query.StartCommitResponse + * @memberof query.StreamExecuteResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - StartCommitResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + StreamExecuteResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.StartCommitResponse"; + return typeUrlPrefix + "/query.StreamExecuteResponse"; }; - return StartCommitResponse; + return StreamExecuteResponse; })(); - query.SetRollbackRequest = (function() { + query.BeginRequest = (function() { /** - * Properties of a SetRollbackRequest. + * Properties of a BeginRequest. * @memberof query - * @interface ISetRollbackRequest - * @property {vtrpc.ICallerID|null} [effective_caller_id] SetRollbackRequest effective_caller_id - * @property {query.IVTGateCallerID|null} [immediate_caller_id] SetRollbackRequest immediate_caller_id - * @property {query.ITarget|null} [target] SetRollbackRequest target - * @property {number|Long|null} [transaction_id] SetRollbackRequest transaction_id - * @property {string|null} [dtid] SetRollbackRequest dtid + * @interface IBeginRequest + * @property {vtrpc.ICallerID|null} [effective_caller_id] BeginRequest effective_caller_id + * @property {query.IVTGateCallerID|null} [immediate_caller_id] BeginRequest immediate_caller_id + * @property {query.ITarget|null} [target] BeginRequest target + * @property {query.IExecuteOptions|null} [options] BeginRequest options */ /** - * Constructs a new SetRollbackRequest. + * Constructs a new BeginRequest. * @memberof query - * @classdesc Represents a SetRollbackRequest. - * @implements ISetRollbackRequest - * @constructor - * @param {query.ISetRollbackRequest=} [properties] Properties to set + * @classdesc Represents a BeginRequest. + * @implements IBeginRequest + * @constructor + * @param {query.IBeginRequest=} [properties] Properties to set */ - function SetRollbackRequest(properties) { + function BeginRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -81978,67 +83803,59 @@ export const query = $root.query = (() => { } /** - * SetRollbackRequest effective_caller_id. + * BeginRequest effective_caller_id. * @member {vtrpc.ICallerID|null|undefined} effective_caller_id - * @memberof query.SetRollbackRequest + * @memberof query.BeginRequest * @instance */ - SetRollbackRequest.prototype.effective_caller_id = null; + BeginRequest.prototype.effective_caller_id = null; /** - * SetRollbackRequest immediate_caller_id. + * BeginRequest immediate_caller_id. * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id - * @memberof query.SetRollbackRequest + * @memberof query.BeginRequest * @instance */ - SetRollbackRequest.prototype.immediate_caller_id = null; + BeginRequest.prototype.immediate_caller_id = null; /** - * SetRollbackRequest target. + * BeginRequest target. * @member {query.ITarget|null|undefined} target - * @memberof query.SetRollbackRequest - * @instance - */ - SetRollbackRequest.prototype.target = null; - - /** - * SetRollbackRequest transaction_id. - * @member {number|Long} transaction_id - * @memberof query.SetRollbackRequest + * @memberof query.BeginRequest * @instance */ - SetRollbackRequest.prototype.transaction_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + BeginRequest.prototype.target = null; /** - * SetRollbackRequest dtid. - * @member {string} dtid - * @memberof query.SetRollbackRequest + * BeginRequest options. + * @member {query.IExecuteOptions|null|undefined} options + * @memberof query.BeginRequest * @instance */ - SetRollbackRequest.prototype.dtid = ""; + BeginRequest.prototype.options = null; /** - * Creates a new SetRollbackRequest instance using the specified properties. + * Creates a new BeginRequest instance using the specified properties. * @function create - * @memberof query.SetRollbackRequest + * @memberof query.BeginRequest * @static - * @param {query.ISetRollbackRequest=} [properties] Properties to set - * @returns {query.SetRollbackRequest} SetRollbackRequest instance + * @param {query.IBeginRequest=} [properties] Properties to set + * @returns {query.BeginRequest} BeginRequest instance */ - SetRollbackRequest.create = function create(properties) { - return new SetRollbackRequest(properties); + BeginRequest.create = function create(properties) { + return new BeginRequest(properties); }; /** - * Encodes the specified SetRollbackRequest message. Does not implicitly {@link query.SetRollbackRequest.verify|verify} messages. + * Encodes the specified BeginRequest message. Does not implicitly {@link query.BeginRequest.verify|verify} messages. * @function encode - * @memberof query.SetRollbackRequest + * @memberof query.BeginRequest * @static - * @param {query.ISetRollbackRequest} message SetRollbackRequest message or plain object to encode + * @param {query.IBeginRequest} message BeginRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SetRollbackRequest.encode = function encode(message, writer) { + BeginRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) @@ -82047,41 +83864,39 @@ export const query = $root.query = (() => { $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); if (message.target != null && Object.hasOwnProperty.call(message, "target")) $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.transaction_id != null && Object.hasOwnProperty.call(message, "transaction_id")) - writer.uint32(/* id 4, wireType 0 =*/32).int64(message.transaction_id); - if (message.dtid != null && Object.hasOwnProperty.call(message, "dtid")) - writer.uint32(/* id 5, wireType 2 =*/42).string(message.dtid); + if (message.options != null && Object.hasOwnProperty.call(message, "options")) + $root.query.ExecuteOptions.encode(message.options, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); return writer; }; /** - * Encodes the specified SetRollbackRequest message, length delimited. Does not implicitly {@link query.SetRollbackRequest.verify|verify} messages. + * Encodes the specified BeginRequest message, length delimited. Does not implicitly {@link query.BeginRequest.verify|verify} messages. * @function encodeDelimited - * @memberof query.SetRollbackRequest + * @memberof query.BeginRequest * @static - * @param {query.ISetRollbackRequest} message SetRollbackRequest message or plain object to encode + * @param {query.IBeginRequest} message BeginRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SetRollbackRequest.encodeDelimited = function encodeDelimited(message, writer) { + BeginRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a SetRollbackRequest message from the specified reader or buffer. + * Decodes a BeginRequest message from the specified reader or buffer. * @function decode - * @memberof query.SetRollbackRequest + * @memberof query.BeginRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.SetRollbackRequest} SetRollbackRequest + * @returns {query.BeginRequest} BeginRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SetRollbackRequest.decode = function decode(reader, length) { + BeginRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.SetRollbackRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.BeginRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -82098,11 +83913,7 @@ export const query = $root.query = (() => { break; } case 4: { - message.transaction_id = reader.int64(); - break; - } - case 5: { - message.dtid = reader.string(); + message.options = $root.query.ExecuteOptions.decode(reader, reader.uint32()); break; } default: @@ -82114,30 +83925,30 @@ export const query = $root.query = (() => { }; /** - * Decodes a SetRollbackRequest message from the specified reader or buffer, length delimited. + * Decodes a BeginRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.SetRollbackRequest + * @memberof query.BeginRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.SetRollbackRequest} SetRollbackRequest + * @returns {query.BeginRequest} BeginRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SetRollbackRequest.decodeDelimited = function decodeDelimited(reader) { + BeginRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a SetRollbackRequest message. + * Verifies a BeginRequest message. * @function verify - * @memberof query.SetRollbackRequest + * @memberof query.BeginRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - SetRollbackRequest.verify = function verify(message) { + BeginRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { @@ -82155,66 +83966,59 @@ export const query = $root.query = (() => { if (error) return "target." + error; } - if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) - if (!$util.isInteger(message.transaction_id) && !(message.transaction_id && $util.isInteger(message.transaction_id.low) && $util.isInteger(message.transaction_id.high))) - return "transaction_id: integer|Long expected"; - if (message.dtid != null && message.hasOwnProperty("dtid")) - if (!$util.isString(message.dtid)) - return "dtid: string expected"; + if (message.options != null && message.hasOwnProperty("options")) { + let error = $root.query.ExecuteOptions.verify(message.options); + if (error) + return "options." + error; + } return null; }; /** - * Creates a SetRollbackRequest message from a plain object. Also converts values to their respective internal types. + * Creates a BeginRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.SetRollbackRequest + * @memberof query.BeginRequest * @static * @param {Object.} object Plain object - * @returns {query.SetRollbackRequest} SetRollbackRequest + * @returns {query.BeginRequest} BeginRequest */ - SetRollbackRequest.fromObject = function fromObject(object) { - if (object instanceof $root.query.SetRollbackRequest) + BeginRequest.fromObject = function fromObject(object) { + if (object instanceof $root.query.BeginRequest) return object; - let message = new $root.query.SetRollbackRequest(); + let message = new $root.query.BeginRequest(); if (object.effective_caller_id != null) { if (typeof object.effective_caller_id !== "object") - throw TypeError(".query.SetRollbackRequest.effective_caller_id: object expected"); + throw TypeError(".query.BeginRequest.effective_caller_id: object expected"); message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); } if (object.immediate_caller_id != null) { if (typeof object.immediate_caller_id !== "object") - throw TypeError(".query.SetRollbackRequest.immediate_caller_id: object expected"); + throw TypeError(".query.BeginRequest.immediate_caller_id: object expected"); message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); } if (object.target != null) { if (typeof object.target !== "object") - throw TypeError(".query.SetRollbackRequest.target: object expected"); + throw TypeError(".query.BeginRequest.target: object expected"); message.target = $root.query.Target.fromObject(object.target); } - if (object.transaction_id != null) - if ($util.Long) - (message.transaction_id = $util.Long.fromValue(object.transaction_id)).unsigned = false; - else if (typeof object.transaction_id === "string") - message.transaction_id = parseInt(object.transaction_id, 10); - else if (typeof object.transaction_id === "number") - message.transaction_id = object.transaction_id; - else if (typeof object.transaction_id === "object") - message.transaction_id = new $util.LongBits(object.transaction_id.low >>> 0, object.transaction_id.high >>> 0).toNumber(); - if (object.dtid != null) - message.dtid = String(object.dtid); + if (object.options != null) { + if (typeof object.options !== "object") + throw TypeError(".query.BeginRequest.options: object expected"); + message.options = $root.query.ExecuteOptions.fromObject(object.options); + } return message; }; /** - * Creates a plain object from a SetRollbackRequest message. Also converts values to other types if specified. + * Creates a plain object from a BeginRequest message. Also converts values to other types if specified. * @function toObject - * @memberof query.SetRollbackRequest + * @memberof query.BeginRequest * @static - * @param {query.SetRollbackRequest} message SetRollbackRequest + * @param {query.BeginRequest} message BeginRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - SetRollbackRequest.toObject = function toObject(message, options) { + BeginRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; @@ -82222,12 +84026,7 @@ export const query = $root.query = (() => { object.effective_caller_id = null; object.immediate_caller_id = null; object.target = null; - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.transaction_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.transaction_id = options.longs === String ? "0" : 0; - object.dtid = ""; + object.options = null; } if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); @@ -82235,62 +84034,60 @@ export const query = $root.query = (() => { object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); if (message.target != null && message.hasOwnProperty("target")) object.target = $root.query.Target.toObject(message.target, options); - if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) - if (typeof message.transaction_id === "number") - object.transaction_id = options.longs === String ? String(message.transaction_id) : message.transaction_id; - else - object.transaction_id = options.longs === String ? $util.Long.prototype.toString.call(message.transaction_id) : options.longs === Number ? new $util.LongBits(message.transaction_id.low >>> 0, message.transaction_id.high >>> 0).toNumber() : message.transaction_id; - if (message.dtid != null && message.hasOwnProperty("dtid")) - object.dtid = message.dtid; + if (message.options != null && message.hasOwnProperty("options")) + object.options = $root.query.ExecuteOptions.toObject(message.options, options); return object; }; /** - * Converts this SetRollbackRequest to JSON. + * Converts this BeginRequest to JSON. * @function toJSON - * @memberof query.SetRollbackRequest + * @memberof query.BeginRequest * @instance * @returns {Object.} JSON object */ - SetRollbackRequest.prototype.toJSON = function toJSON() { + BeginRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for SetRollbackRequest + * Gets the default type url for BeginRequest * @function getTypeUrl - * @memberof query.SetRollbackRequest + * @memberof query.BeginRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - SetRollbackRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + BeginRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.SetRollbackRequest"; + return typeUrlPrefix + "/query.BeginRequest"; }; - return SetRollbackRequest; + return BeginRequest; })(); - query.SetRollbackResponse = (function() { + query.BeginResponse = (function() { /** - * Properties of a SetRollbackResponse. + * Properties of a BeginResponse. * @memberof query - * @interface ISetRollbackResponse + * @interface IBeginResponse + * @property {number|Long|null} [transaction_id] BeginResponse transaction_id + * @property {topodata.ITabletAlias|null} [tablet_alias] BeginResponse tablet_alias + * @property {string|null} [session_state_changes] BeginResponse session_state_changes */ /** - * Constructs a new SetRollbackResponse. + * Constructs a new BeginResponse. * @memberof query - * @classdesc Represents a SetRollbackResponse. - * @implements ISetRollbackResponse + * @classdesc Represents a BeginResponse. + * @implements IBeginResponse * @constructor - * @param {query.ISetRollbackResponse=} [properties] Properties to set + * @param {query.IBeginResponse=} [properties] Properties to set */ - function SetRollbackResponse(properties) { + function BeginResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -82298,63 +84095,105 @@ export const query = $root.query = (() => { } /** - * Creates a new SetRollbackResponse instance using the specified properties. + * BeginResponse transaction_id. + * @member {number|Long} transaction_id + * @memberof query.BeginResponse + * @instance + */ + BeginResponse.prototype.transaction_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * BeginResponse tablet_alias. + * @member {topodata.ITabletAlias|null|undefined} tablet_alias + * @memberof query.BeginResponse + * @instance + */ + BeginResponse.prototype.tablet_alias = null; + + /** + * BeginResponse session_state_changes. + * @member {string} session_state_changes + * @memberof query.BeginResponse + * @instance + */ + BeginResponse.prototype.session_state_changes = ""; + + /** + * Creates a new BeginResponse instance using the specified properties. * @function create - * @memberof query.SetRollbackResponse + * @memberof query.BeginResponse * @static - * @param {query.ISetRollbackResponse=} [properties] Properties to set - * @returns {query.SetRollbackResponse} SetRollbackResponse instance + * @param {query.IBeginResponse=} [properties] Properties to set + * @returns {query.BeginResponse} BeginResponse instance */ - SetRollbackResponse.create = function create(properties) { - return new SetRollbackResponse(properties); + BeginResponse.create = function create(properties) { + return new BeginResponse(properties); }; /** - * Encodes the specified SetRollbackResponse message. Does not implicitly {@link query.SetRollbackResponse.verify|verify} messages. + * Encodes the specified BeginResponse message. Does not implicitly {@link query.BeginResponse.verify|verify} messages. * @function encode - * @memberof query.SetRollbackResponse + * @memberof query.BeginResponse * @static - * @param {query.ISetRollbackResponse} message SetRollbackResponse message or plain object to encode + * @param {query.IBeginResponse} message BeginResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SetRollbackResponse.encode = function encode(message, writer) { + BeginResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.transaction_id != null && Object.hasOwnProperty.call(message, "transaction_id")) + writer.uint32(/* id 1, wireType 0 =*/8).int64(message.transaction_id); + if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) + $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.session_state_changes != null && Object.hasOwnProperty.call(message, "session_state_changes")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.session_state_changes); return writer; }; /** - * Encodes the specified SetRollbackResponse message, length delimited. Does not implicitly {@link query.SetRollbackResponse.verify|verify} messages. + * Encodes the specified BeginResponse message, length delimited. Does not implicitly {@link query.BeginResponse.verify|verify} messages. * @function encodeDelimited - * @memberof query.SetRollbackResponse + * @memberof query.BeginResponse * @static - * @param {query.ISetRollbackResponse} message SetRollbackResponse message or plain object to encode + * @param {query.IBeginResponse} message BeginResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SetRollbackResponse.encodeDelimited = function encodeDelimited(message, writer) { + BeginResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a SetRollbackResponse message from the specified reader or buffer. + * Decodes a BeginResponse message from the specified reader or buffer. * @function decode - * @memberof query.SetRollbackResponse + * @memberof query.BeginResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.SetRollbackResponse} SetRollbackResponse + * @returns {query.BeginResponse} BeginResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SetRollbackResponse.decode = function decode(reader, length) { + BeginResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.SetRollbackResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.BeginResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + message.transaction_id = reader.int64(); + break; + } + case 2: { + message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + break; + } + case 3: { + message.session_state_changes = reader.string(); + break; + } default: reader.skipType(tag & 7); break; @@ -82364,112 +84203,161 @@ export const query = $root.query = (() => { }; /** - * Decodes a SetRollbackResponse message from the specified reader or buffer, length delimited. + * Decodes a BeginResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.SetRollbackResponse + * @memberof query.BeginResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.SetRollbackResponse} SetRollbackResponse + * @returns {query.BeginResponse} BeginResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SetRollbackResponse.decodeDelimited = function decodeDelimited(reader) { + BeginResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a SetRollbackResponse message. + * Verifies a BeginResponse message. * @function verify - * @memberof query.SetRollbackResponse + * @memberof query.BeginResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - SetRollbackResponse.verify = function verify(message) { + BeginResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) + if (!$util.isInteger(message.transaction_id) && !(message.transaction_id && $util.isInteger(message.transaction_id.low) && $util.isInteger(message.transaction_id.high))) + return "transaction_id: integer|Long expected"; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { + let error = $root.topodata.TabletAlias.verify(message.tablet_alias); + if (error) + return "tablet_alias." + error; + } + if (message.session_state_changes != null && message.hasOwnProperty("session_state_changes")) + if (!$util.isString(message.session_state_changes)) + return "session_state_changes: string expected"; return null; }; /** - * Creates a SetRollbackResponse message from a plain object. Also converts values to their respective internal types. + * Creates a BeginResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.SetRollbackResponse + * @memberof query.BeginResponse * @static * @param {Object.} object Plain object - * @returns {query.SetRollbackResponse} SetRollbackResponse + * @returns {query.BeginResponse} BeginResponse */ - SetRollbackResponse.fromObject = function fromObject(object) { - if (object instanceof $root.query.SetRollbackResponse) + BeginResponse.fromObject = function fromObject(object) { + if (object instanceof $root.query.BeginResponse) return object; - return new $root.query.SetRollbackResponse(); + let message = new $root.query.BeginResponse(); + if (object.transaction_id != null) + if ($util.Long) + (message.transaction_id = $util.Long.fromValue(object.transaction_id)).unsigned = false; + else if (typeof object.transaction_id === "string") + message.transaction_id = parseInt(object.transaction_id, 10); + else if (typeof object.transaction_id === "number") + message.transaction_id = object.transaction_id; + else if (typeof object.transaction_id === "object") + message.transaction_id = new $util.LongBits(object.transaction_id.low >>> 0, object.transaction_id.high >>> 0).toNumber(); + if (object.tablet_alias != null) { + if (typeof object.tablet_alias !== "object") + throw TypeError(".query.BeginResponse.tablet_alias: object expected"); + message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); + } + if (object.session_state_changes != null) + message.session_state_changes = String(object.session_state_changes); + return message; }; /** - * Creates a plain object from a SetRollbackResponse message. Also converts values to other types if specified. + * Creates a plain object from a BeginResponse message. Also converts values to other types if specified. * @function toObject - * @memberof query.SetRollbackResponse + * @memberof query.BeginResponse * @static - * @param {query.SetRollbackResponse} message SetRollbackResponse + * @param {query.BeginResponse} message BeginResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - SetRollbackResponse.toObject = function toObject() { - return {}; + BeginResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.transaction_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.transaction_id = options.longs === String ? "0" : 0; + object.tablet_alias = null; + object.session_state_changes = ""; + } + if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) + if (typeof message.transaction_id === "number") + object.transaction_id = options.longs === String ? String(message.transaction_id) : message.transaction_id; + else + object.transaction_id = options.longs === String ? $util.Long.prototype.toString.call(message.transaction_id) : options.longs === Number ? new $util.LongBits(message.transaction_id.low >>> 0, message.transaction_id.high >>> 0).toNumber() : message.transaction_id; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) + object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); + if (message.session_state_changes != null && message.hasOwnProperty("session_state_changes")) + object.session_state_changes = message.session_state_changes; + return object; }; /** - * Converts this SetRollbackResponse to JSON. + * Converts this BeginResponse to JSON. * @function toJSON - * @memberof query.SetRollbackResponse + * @memberof query.BeginResponse * @instance * @returns {Object.} JSON object */ - SetRollbackResponse.prototype.toJSON = function toJSON() { + BeginResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for SetRollbackResponse + * Gets the default type url for BeginResponse * @function getTypeUrl - * @memberof query.SetRollbackResponse + * @memberof query.BeginResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - SetRollbackResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + BeginResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.SetRollbackResponse"; + return typeUrlPrefix + "/query.BeginResponse"; }; - return SetRollbackResponse; + return BeginResponse; })(); - query.ConcludeTransactionRequest = (function() { + query.CommitRequest = (function() { /** - * Properties of a ConcludeTransactionRequest. + * Properties of a CommitRequest. * @memberof query - * @interface IConcludeTransactionRequest - * @property {vtrpc.ICallerID|null} [effective_caller_id] ConcludeTransactionRequest effective_caller_id - * @property {query.IVTGateCallerID|null} [immediate_caller_id] ConcludeTransactionRequest immediate_caller_id - * @property {query.ITarget|null} [target] ConcludeTransactionRequest target - * @property {string|null} [dtid] ConcludeTransactionRequest dtid + * @interface ICommitRequest + * @property {vtrpc.ICallerID|null} [effective_caller_id] CommitRequest effective_caller_id + * @property {query.IVTGateCallerID|null} [immediate_caller_id] CommitRequest immediate_caller_id + * @property {query.ITarget|null} [target] CommitRequest target + * @property {number|Long|null} [transaction_id] CommitRequest transaction_id */ /** - * Constructs a new ConcludeTransactionRequest. + * Constructs a new CommitRequest. * @memberof query - * @classdesc Represents a ConcludeTransactionRequest. - * @implements IConcludeTransactionRequest + * @classdesc Represents a CommitRequest. + * @implements ICommitRequest * @constructor - * @param {query.IConcludeTransactionRequest=} [properties] Properties to set + * @param {query.ICommitRequest=} [properties] Properties to set */ - function ConcludeTransactionRequest(properties) { + function CommitRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -82477,59 +84365,59 @@ export const query = $root.query = (() => { } /** - * ConcludeTransactionRequest effective_caller_id. + * CommitRequest effective_caller_id. * @member {vtrpc.ICallerID|null|undefined} effective_caller_id - * @memberof query.ConcludeTransactionRequest + * @memberof query.CommitRequest * @instance */ - ConcludeTransactionRequest.prototype.effective_caller_id = null; + CommitRequest.prototype.effective_caller_id = null; /** - * ConcludeTransactionRequest immediate_caller_id. + * CommitRequest immediate_caller_id. * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id - * @memberof query.ConcludeTransactionRequest + * @memberof query.CommitRequest * @instance */ - ConcludeTransactionRequest.prototype.immediate_caller_id = null; + CommitRequest.prototype.immediate_caller_id = null; /** - * ConcludeTransactionRequest target. + * CommitRequest target. * @member {query.ITarget|null|undefined} target - * @memberof query.ConcludeTransactionRequest + * @memberof query.CommitRequest * @instance */ - ConcludeTransactionRequest.prototype.target = null; + CommitRequest.prototype.target = null; /** - * ConcludeTransactionRequest dtid. - * @member {string} dtid - * @memberof query.ConcludeTransactionRequest + * CommitRequest transaction_id. + * @member {number|Long} transaction_id + * @memberof query.CommitRequest * @instance */ - ConcludeTransactionRequest.prototype.dtid = ""; + CommitRequest.prototype.transaction_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; /** - * Creates a new ConcludeTransactionRequest instance using the specified properties. + * Creates a new CommitRequest instance using the specified properties. * @function create - * @memberof query.ConcludeTransactionRequest + * @memberof query.CommitRequest * @static - * @param {query.IConcludeTransactionRequest=} [properties] Properties to set - * @returns {query.ConcludeTransactionRequest} ConcludeTransactionRequest instance + * @param {query.ICommitRequest=} [properties] Properties to set + * @returns {query.CommitRequest} CommitRequest instance */ - ConcludeTransactionRequest.create = function create(properties) { - return new ConcludeTransactionRequest(properties); + CommitRequest.create = function create(properties) { + return new CommitRequest(properties); }; /** - * Encodes the specified ConcludeTransactionRequest message. Does not implicitly {@link query.ConcludeTransactionRequest.verify|verify} messages. + * Encodes the specified CommitRequest message. Does not implicitly {@link query.CommitRequest.verify|verify} messages. * @function encode - * @memberof query.ConcludeTransactionRequest + * @memberof query.CommitRequest * @static - * @param {query.IConcludeTransactionRequest} message ConcludeTransactionRequest message or plain object to encode + * @param {query.ICommitRequest} message CommitRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ConcludeTransactionRequest.encode = function encode(message, writer) { + CommitRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) @@ -82538,39 +84426,39 @@ export const query = $root.query = (() => { $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); if (message.target != null && Object.hasOwnProperty.call(message, "target")) $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.dtid != null && Object.hasOwnProperty.call(message, "dtid")) - writer.uint32(/* id 4, wireType 2 =*/34).string(message.dtid); + if (message.transaction_id != null && Object.hasOwnProperty.call(message, "transaction_id")) + writer.uint32(/* id 4, wireType 0 =*/32).int64(message.transaction_id); return writer; }; /** - * Encodes the specified ConcludeTransactionRequest message, length delimited. Does not implicitly {@link query.ConcludeTransactionRequest.verify|verify} messages. + * Encodes the specified CommitRequest message, length delimited. Does not implicitly {@link query.CommitRequest.verify|verify} messages. * @function encodeDelimited - * @memberof query.ConcludeTransactionRequest + * @memberof query.CommitRequest * @static - * @param {query.IConcludeTransactionRequest} message ConcludeTransactionRequest message or plain object to encode + * @param {query.ICommitRequest} message CommitRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ConcludeTransactionRequest.encodeDelimited = function encodeDelimited(message, writer) { + CommitRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ConcludeTransactionRequest message from the specified reader or buffer. + * Decodes a CommitRequest message from the specified reader or buffer. * @function decode - * @memberof query.ConcludeTransactionRequest + * @memberof query.CommitRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.ConcludeTransactionRequest} ConcludeTransactionRequest + * @returns {query.CommitRequest} CommitRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ConcludeTransactionRequest.decode = function decode(reader, length) { + CommitRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.ConcludeTransactionRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.CommitRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -82587,7 +84475,7 @@ export const query = $root.query = (() => { break; } case 4: { - message.dtid = reader.string(); + message.transaction_id = reader.int64(); break; } default: @@ -82599,30 +84487,30 @@ export const query = $root.query = (() => { }; /** - * Decodes a ConcludeTransactionRequest message from the specified reader or buffer, length delimited. + * Decodes a CommitRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.ConcludeTransactionRequest + * @memberof query.CommitRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.ConcludeTransactionRequest} ConcludeTransactionRequest + * @returns {query.CommitRequest} CommitRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ConcludeTransactionRequest.decodeDelimited = function decodeDelimited(reader) { + CommitRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ConcludeTransactionRequest message. + * Verifies a CommitRequest message. * @function verify - * @memberof query.ConcludeTransactionRequest + * @memberof query.CommitRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ConcludeTransactionRequest.verify = function verify(message) { + CommitRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { @@ -82640,54 +84528,61 @@ export const query = $root.query = (() => { if (error) return "target." + error; } - if (message.dtid != null && message.hasOwnProperty("dtid")) - if (!$util.isString(message.dtid)) - return "dtid: string expected"; + if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) + if (!$util.isInteger(message.transaction_id) && !(message.transaction_id && $util.isInteger(message.transaction_id.low) && $util.isInteger(message.transaction_id.high))) + return "transaction_id: integer|Long expected"; return null; }; /** - * Creates a ConcludeTransactionRequest message from a plain object. Also converts values to their respective internal types. + * Creates a CommitRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.ConcludeTransactionRequest + * @memberof query.CommitRequest * @static * @param {Object.} object Plain object - * @returns {query.ConcludeTransactionRequest} ConcludeTransactionRequest + * @returns {query.CommitRequest} CommitRequest */ - ConcludeTransactionRequest.fromObject = function fromObject(object) { - if (object instanceof $root.query.ConcludeTransactionRequest) + CommitRequest.fromObject = function fromObject(object) { + if (object instanceof $root.query.CommitRequest) return object; - let message = new $root.query.ConcludeTransactionRequest(); + let message = new $root.query.CommitRequest(); if (object.effective_caller_id != null) { if (typeof object.effective_caller_id !== "object") - throw TypeError(".query.ConcludeTransactionRequest.effective_caller_id: object expected"); + throw TypeError(".query.CommitRequest.effective_caller_id: object expected"); message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); } if (object.immediate_caller_id != null) { if (typeof object.immediate_caller_id !== "object") - throw TypeError(".query.ConcludeTransactionRequest.immediate_caller_id: object expected"); + throw TypeError(".query.CommitRequest.immediate_caller_id: object expected"); message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); } if (object.target != null) { if (typeof object.target !== "object") - throw TypeError(".query.ConcludeTransactionRequest.target: object expected"); + throw TypeError(".query.CommitRequest.target: object expected"); message.target = $root.query.Target.fromObject(object.target); } - if (object.dtid != null) - message.dtid = String(object.dtid); + if (object.transaction_id != null) + if ($util.Long) + (message.transaction_id = $util.Long.fromValue(object.transaction_id)).unsigned = false; + else if (typeof object.transaction_id === "string") + message.transaction_id = parseInt(object.transaction_id, 10); + else if (typeof object.transaction_id === "number") + message.transaction_id = object.transaction_id; + else if (typeof object.transaction_id === "object") + message.transaction_id = new $util.LongBits(object.transaction_id.low >>> 0, object.transaction_id.high >>> 0).toNumber(); return message; }; /** - * Creates a plain object from a ConcludeTransactionRequest message. Also converts values to other types if specified. + * Creates a plain object from a CommitRequest message. Also converts values to other types if specified. * @function toObject - * @memberof query.ConcludeTransactionRequest + * @memberof query.CommitRequest * @static - * @param {query.ConcludeTransactionRequest} message ConcludeTransactionRequest + * @param {query.CommitRequest} message CommitRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ConcludeTransactionRequest.toObject = function toObject(message, options) { + CommitRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; @@ -82695,7 +84590,11 @@ export const query = $root.query = (() => { object.effective_caller_id = null; object.immediate_caller_id = null; object.target = null; - object.dtid = ""; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.transaction_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.transaction_id = options.longs === String ? "0" : 0; } if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); @@ -82703,57 +84602,61 @@ export const query = $root.query = (() => { object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); if (message.target != null && message.hasOwnProperty("target")) object.target = $root.query.Target.toObject(message.target, options); - if (message.dtid != null && message.hasOwnProperty("dtid")) - object.dtid = message.dtid; + if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) + if (typeof message.transaction_id === "number") + object.transaction_id = options.longs === String ? String(message.transaction_id) : message.transaction_id; + else + object.transaction_id = options.longs === String ? $util.Long.prototype.toString.call(message.transaction_id) : options.longs === Number ? new $util.LongBits(message.transaction_id.low >>> 0, message.transaction_id.high >>> 0).toNumber() : message.transaction_id; return object; }; /** - * Converts this ConcludeTransactionRequest to JSON. + * Converts this CommitRequest to JSON. * @function toJSON - * @memberof query.ConcludeTransactionRequest + * @memberof query.CommitRequest * @instance * @returns {Object.} JSON object */ - ConcludeTransactionRequest.prototype.toJSON = function toJSON() { + CommitRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ConcludeTransactionRequest + * Gets the default type url for CommitRequest * @function getTypeUrl - * @memberof query.ConcludeTransactionRequest + * @memberof query.CommitRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ConcludeTransactionRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + CommitRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.ConcludeTransactionRequest"; + return typeUrlPrefix + "/query.CommitRequest"; }; - return ConcludeTransactionRequest; + return CommitRequest; })(); - query.ConcludeTransactionResponse = (function() { + query.CommitResponse = (function() { /** - * Properties of a ConcludeTransactionResponse. + * Properties of a CommitResponse. * @memberof query - * @interface IConcludeTransactionResponse + * @interface ICommitResponse + * @property {number|Long|null} [reserved_id] CommitResponse reserved_id */ /** - * Constructs a new ConcludeTransactionResponse. + * Constructs a new CommitResponse. * @memberof query - * @classdesc Represents a ConcludeTransactionResponse. - * @implements IConcludeTransactionResponse + * @classdesc Represents a CommitResponse. + * @implements ICommitResponse * @constructor - * @param {query.IConcludeTransactionResponse=} [properties] Properties to set + * @param {query.ICommitResponse=} [properties] Properties to set */ - function ConcludeTransactionResponse(properties) { + function CommitResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -82761,63 +84664,77 @@ export const query = $root.query = (() => { } /** - * Creates a new ConcludeTransactionResponse instance using the specified properties. + * CommitResponse reserved_id. + * @member {number|Long} reserved_id + * @memberof query.CommitResponse + * @instance + */ + CommitResponse.prototype.reserved_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * Creates a new CommitResponse instance using the specified properties. * @function create - * @memberof query.ConcludeTransactionResponse + * @memberof query.CommitResponse * @static - * @param {query.IConcludeTransactionResponse=} [properties] Properties to set - * @returns {query.ConcludeTransactionResponse} ConcludeTransactionResponse instance + * @param {query.ICommitResponse=} [properties] Properties to set + * @returns {query.CommitResponse} CommitResponse instance */ - ConcludeTransactionResponse.create = function create(properties) { - return new ConcludeTransactionResponse(properties); + CommitResponse.create = function create(properties) { + return new CommitResponse(properties); }; /** - * Encodes the specified ConcludeTransactionResponse message. Does not implicitly {@link query.ConcludeTransactionResponse.verify|verify} messages. + * Encodes the specified CommitResponse message. Does not implicitly {@link query.CommitResponse.verify|verify} messages. * @function encode - * @memberof query.ConcludeTransactionResponse + * @memberof query.CommitResponse * @static - * @param {query.IConcludeTransactionResponse} message ConcludeTransactionResponse message or plain object to encode + * @param {query.ICommitResponse} message CommitResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ConcludeTransactionResponse.encode = function encode(message, writer) { + CommitResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.reserved_id != null && Object.hasOwnProperty.call(message, "reserved_id")) + writer.uint32(/* id 1, wireType 0 =*/8).int64(message.reserved_id); return writer; }; /** - * Encodes the specified ConcludeTransactionResponse message, length delimited. Does not implicitly {@link query.ConcludeTransactionResponse.verify|verify} messages. + * Encodes the specified CommitResponse message, length delimited. Does not implicitly {@link query.CommitResponse.verify|verify} messages. * @function encodeDelimited - * @memberof query.ConcludeTransactionResponse + * @memberof query.CommitResponse * @static - * @param {query.IConcludeTransactionResponse} message ConcludeTransactionResponse message or plain object to encode + * @param {query.ICommitResponse} message CommitResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ConcludeTransactionResponse.encodeDelimited = function encodeDelimited(message, writer) { + CommitResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ConcludeTransactionResponse message from the specified reader or buffer. + * Decodes a CommitResponse message from the specified reader or buffer. * @function decode - * @memberof query.ConcludeTransactionResponse + * @memberof query.CommitResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.ConcludeTransactionResponse} ConcludeTransactionResponse + * @returns {query.CommitResponse} CommitResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ConcludeTransactionResponse.decode = function decode(reader, length) { + CommitResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.ConcludeTransactionResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.CommitResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + message.reserved_id = reader.int64(); + break; + } default: reader.skipType(tag & 7); break; @@ -82827,112 +84744,139 @@ export const query = $root.query = (() => { }; /** - * Decodes a ConcludeTransactionResponse message from the specified reader or buffer, length delimited. + * Decodes a CommitResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.ConcludeTransactionResponse + * @memberof query.CommitResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.ConcludeTransactionResponse} ConcludeTransactionResponse + * @returns {query.CommitResponse} CommitResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ConcludeTransactionResponse.decodeDelimited = function decodeDelimited(reader) { + CommitResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ConcludeTransactionResponse message. + * Verifies a CommitResponse message. * @function verify - * @memberof query.ConcludeTransactionResponse + * @memberof query.CommitResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ConcludeTransactionResponse.verify = function verify(message) { + CommitResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) + if (!$util.isInteger(message.reserved_id) && !(message.reserved_id && $util.isInteger(message.reserved_id.low) && $util.isInteger(message.reserved_id.high))) + return "reserved_id: integer|Long expected"; return null; }; /** - * Creates a ConcludeTransactionResponse message from a plain object. Also converts values to their respective internal types. + * Creates a CommitResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.ConcludeTransactionResponse + * @memberof query.CommitResponse * @static * @param {Object.} object Plain object - * @returns {query.ConcludeTransactionResponse} ConcludeTransactionResponse + * @returns {query.CommitResponse} CommitResponse */ - ConcludeTransactionResponse.fromObject = function fromObject(object) { - if (object instanceof $root.query.ConcludeTransactionResponse) + CommitResponse.fromObject = function fromObject(object) { + if (object instanceof $root.query.CommitResponse) return object; - return new $root.query.ConcludeTransactionResponse(); + let message = new $root.query.CommitResponse(); + if (object.reserved_id != null) + if ($util.Long) + (message.reserved_id = $util.Long.fromValue(object.reserved_id)).unsigned = false; + else if (typeof object.reserved_id === "string") + message.reserved_id = parseInt(object.reserved_id, 10); + else if (typeof object.reserved_id === "number") + message.reserved_id = object.reserved_id; + else if (typeof object.reserved_id === "object") + message.reserved_id = new $util.LongBits(object.reserved_id.low >>> 0, object.reserved_id.high >>> 0).toNumber(); + return message; }; /** - * Creates a plain object from a ConcludeTransactionResponse message. Also converts values to other types if specified. + * Creates a plain object from a CommitResponse message. Also converts values to other types if specified. * @function toObject - * @memberof query.ConcludeTransactionResponse + * @memberof query.CommitResponse * @static - * @param {query.ConcludeTransactionResponse} message ConcludeTransactionResponse + * @param {query.CommitResponse} message CommitResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ConcludeTransactionResponse.toObject = function toObject() { - return {}; + CommitResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.reserved_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.reserved_id = options.longs === String ? "0" : 0; + if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) + if (typeof message.reserved_id === "number") + object.reserved_id = options.longs === String ? String(message.reserved_id) : message.reserved_id; + else + object.reserved_id = options.longs === String ? $util.Long.prototype.toString.call(message.reserved_id) : options.longs === Number ? new $util.LongBits(message.reserved_id.low >>> 0, message.reserved_id.high >>> 0).toNumber() : message.reserved_id; + return object; }; /** - * Converts this ConcludeTransactionResponse to JSON. + * Converts this CommitResponse to JSON. * @function toJSON - * @memberof query.ConcludeTransactionResponse + * @memberof query.CommitResponse * @instance * @returns {Object.} JSON object */ - ConcludeTransactionResponse.prototype.toJSON = function toJSON() { + CommitResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ConcludeTransactionResponse + * Gets the default type url for CommitResponse * @function getTypeUrl - * @memberof query.ConcludeTransactionResponse + * @memberof query.CommitResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ConcludeTransactionResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + CommitResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.ConcludeTransactionResponse"; + return typeUrlPrefix + "/query.CommitResponse"; }; - return ConcludeTransactionResponse; + return CommitResponse; })(); - query.ReadTransactionRequest = (function() { + query.RollbackRequest = (function() { /** - * Properties of a ReadTransactionRequest. + * Properties of a RollbackRequest. * @memberof query - * @interface IReadTransactionRequest - * @property {vtrpc.ICallerID|null} [effective_caller_id] ReadTransactionRequest effective_caller_id - * @property {query.IVTGateCallerID|null} [immediate_caller_id] ReadTransactionRequest immediate_caller_id - * @property {query.ITarget|null} [target] ReadTransactionRequest target - * @property {string|null} [dtid] ReadTransactionRequest dtid + * @interface IRollbackRequest + * @property {vtrpc.ICallerID|null} [effective_caller_id] RollbackRequest effective_caller_id + * @property {query.IVTGateCallerID|null} [immediate_caller_id] RollbackRequest immediate_caller_id + * @property {query.ITarget|null} [target] RollbackRequest target + * @property {number|Long|null} [transaction_id] RollbackRequest transaction_id */ /** - * Constructs a new ReadTransactionRequest. + * Constructs a new RollbackRequest. * @memberof query - * @classdesc Represents a ReadTransactionRequest. - * @implements IReadTransactionRequest + * @classdesc Represents a RollbackRequest. + * @implements IRollbackRequest * @constructor - * @param {query.IReadTransactionRequest=} [properties] Properties to set + * @param {query.IRollbackRequest=} [properties] Properties to set */ - function ReadTransactionRequest(properties) { + function RollbackRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -82940,59 +84884,59 @@ export const query = $root.query = (() => { } /** - * ReadTransactionRequest effective_caller_id. + * RollbackRequest effective_caller_id. * @member {vtrpc.ICallerID|null|undefined} effective_caller_id - * @memberof query.ReadTransactionRequest + * @memberof query.RollbackRequest * @instance */ - ReadTransactionRequest.prototype.effective_caller_id = null; + RollbackRequest.prototype.effective_caller_id = null; /** - * ReadTransactionRequest immediate_caller_id. + * RollbackRequest immediate_caller_id. * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id - * @memberof query.ReadTransactionRequest + * @memberof query.RollbackRequest * @instance */ - ReadTransactionRequest.prototype.immediate_caller_id = null; + RollbackRequest.prototype.immediate_caller_id = null; /** - * ReadTransactionRequest target. + * RollbackRequest target. * @member {query.ITarget|null|undefined} target - * @memberof query.ReadTransactionRequest + * @memberof query.RollbackRequest * @instance */ - ReadTransactionRequest.prototype.target = null; + RollbackRequest.prototype.target = null; /** - * ReadTransactionRequest dtid. - * @member {string} dtid - * @memberof query.ReadTransactionRequest + * RollbackRequest transaction_id. + * @member {number|Long} transaction_id + * @memberof query.RollbackRequest * @instance */ - ReadTransactionRequest.prototype.dtid = ""; + RollbackRequest.prototype.transaction_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; /** - * Creates a new ReadTransactionRequest instance using the specified properties. + * Creates a new RollbackRequest instance using the specified properties. * @function create - * @memberof query.ReadTransactionRequest + * @memberof query.RollbackRequest * @static - * @param {query.IReadTransactionRequest=} [properties] Properties to set - * @returns {query.ReadTransactionRequest} ReadTransactionRequest instance + * @param {query.IRollbackRequest=} [properties] Properties to set + * @returns {query.RollbackRequest} RollbackRequest instance */ - ReadTransactionRequest.create = function create(properties) { - return new ReadTransactionRequest(properties); + RollbackRequest.create = function create(properties) { + return new RollbackRequest(properties); }; /** - * Encodes the specified ReadTransactionRequest message. Does not implicitly {@link query.ReadTransactionRequest.verify|verify} messages. + * Encodes the specified RollbackRequest message. Does not implicitly {@link query.RollbackRequest.verify|verify} messages. * @function encode - * @memberof query.ReadTransactionRequest + * @memberof query.RollbackRequest * @static - * @param {query.IReadTransactionRequest} message ReadTransactionRequest message or plain object to encode + * @param {query.IRollbackRequest} message RollbackRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReadTransactionRequest.encode = function encode(message, writer) { + RollbackRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) @@ -83001,39 +84945,39 @@ export const query = $root.query = (() => { $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); if (message.target != null && Object.hasOwnProperty.call(message, "target")) $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.dtid != null && Object.hasOwnProperty.call(message, "dtid")) - writer.uint32(/* id 4, wireType 2 =*/34).string(message.dtid); + if (message.transaction_id != null && Object.hasOwnProperty.call(message, "transaction_id")) + writer.uint32(/* id 4, wireType 0 =*/32).int64(message.transaction_id); return writer; }; /** - * Encodes the specified ReadTransactionRequest message, length delimited. Does not implicitly {@link query.ReadTransactionRequest.verify|verify} messages. + * Encodes the specified RollbackRequest message, length delimited. Does not implicitly {@link query.RollbackRequest.verify|verify} messages. * @function encodeDelimited - * @memberof query.ReadTransactionRequest + * @memberof query.RollbackRequest * @static - * @param {query.IReadTransactionRequest} message ReadTransactionRequest message or plain object to encode + * @param {query.IRollbackRequest} message RollbackRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReadTransactionRequest.encodeDelimited = function encodeDelimited(message, writer) { + RollbackRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ReadTransactionRequest message from the specified reader or buffer. + * Decodes a RollbackRequest message from the specified reader or buffer. * @function decode - * @memberof query.ReadTransactionRequest + * @memberof query.RollbackRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.ReadTransactionRequest} ReadTransactionRequest + * @returns {query.RollbackRequest} RollbackRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReadTransactionRequest.decode = function decode(reader, length) { + RollbackRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.ReadTransactionRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.RollbackRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -83050,7 +84994,7 @@ export const query = $root.query = (() => { break; } case 4: { - message.dtid = reader.string(); + message.transaction_id = reader.int64(); break; } default: @@ -83062,30 +85006,30 @@ export const query = $root.query = (() => { }; /** - * Decodes a ReadTransactionRequest message from the specified reader or buffer, length delimited. + * Decodes a RollbackRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.ReadTransactionRequest + * @memberof query.RollbackRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.ReadTransactionRequest} ReadTransactionRequest + * @returns {query.RollbackRequest} RollbackRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReadTransactionRequest.decodeDelimited = function decodeDelimited(reader) { + RollbackRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ReadTransactionRequest message. + * Verifies a RollbackRequest message. * @function verify - * @memberof query.ReadTransactionRequest + * @memberof query.RollbackRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ReadTransactionRequest.verify = function verify(message) { + RollbackRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { @@ -83103,54 +85047,61 @@ export const query = $root.query = (() => { if (error) return "target." + error; } - if (message.dtid != null && message.hasOwnProperty("dtid")) - if (!$util.isString(message.dtid)) - return "dtid: string expected"; + if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) + if (!$util.isInteger(message.transaction_id) && !(message.transaction_id && $util.isInteger(message.transaction_id.low) && $util.isInteger(message.transaction_id.high))) + return "transaction_id: integer|Long expected"; return null; }; /** - * Creates a ReadTransactionRequest message from a plain object. Also converts values to their respective internal types. + * Creates a RollbackRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.ReadTransactionRequest + * @memberof query.RollbackRequest * @static * @param {Object.} object Plain object - * @returns {query.ReadTransactionRequest} ReadTransactionRequest + * @returns {query.RollbackRequest} RollbackRequest */ - ReadTransactionRequest.fromObject = function fromObject(object) { - if (object instanceof $root.query.ReadTransactionRequest) + RollbackRequest.fromObject = function fromObject(object) { + if (object instanceof $root.query.RollbackRequest) return object; - let message = new $root.query.ReadTransactionRequest(); + let message = new $root.query.RollbackRequest(); if (object.effective_caller_id != null) { if (typeof object.effective_caller_id !== "object") - throw TypeError(".query.ReadTransactionRequest.effective_caller_id: object expected"); + throw TypeError(".query.RollbackRequest.effective_caller_id: object expected"); message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); } if (object.immediate_caller_id != null) { if (typeof object.immediate_caller_id !== "object") - throw TypeError(".query.ReadTransactionRequest.immediate_caller_id: object expected"); + throw TypeError(".query.RollbackRequest.immediate_caller_id: object expected"); message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); } if (object.target != null) { if (typeof object.target !== "object") - throw TypeError(".query.ReadTransactionRequest.target: object expected"); + throw TypeError(".query.RollbackRequest.target: object expected"); message.target = $root.query.Target.fromObject(object.target); } - if (object.dtid != null) - message.dtid = String(object.dtid); + if (object.transaction_id != null) + if ($util.Long) + (message.transaction_id = $util.Long.fromValue(object.transaction_id)).unsigned = false; + else if (typeof object.transaction_id === "string") + message.transaction_id = parseInt(object.transaction_id, 10); + else if (typeof object.transaction_id === "number") + message.transaction_id = object.transaction_id; + else if (typeof object.transaction_id === "object") + message.transaction_id = new $util.LongBits(object.transaction_id.low >>> 0, object.transaction_id.high >>> 0).toNumber(); return message; }; /** - * Creates a plain object from a ReadTransactionRequest message. Also converts values to other types if specified. + * Creates a plain object from a RollbackRequest message. Also converts values to other types if specified. * @function toObject - * @memberof query.ReadTransactionRequest + * @memberof query.RollbackRequest * @static - * @param {query.ReadTransactionRequest} message ReadTransactionRequest + * @param {query.RollbackRequest} message RollbackRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ReadTransactionRequest.toObject = function toObject(message, options) { + RollbackRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; @@ -83158,7 +85109,11 @@ export const query = $root.query = (() => { object.effective_caller_id = null; object.immediate_caller_id = null; object.target = null; - object.dtid = ""; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.transaction_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.transaction_id = options.longs === String ? "0" : 0; } if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); @@ -83166,58 +85121,61 @@ export const query = $root.query = (() => { object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); if (message.target != null && message.hasOwnProperty("target")) object.target = $root.query.Target.toObject(message.target, options); - if (message.dtid != null && message.hasOwnProperty("dtid")) - object.dtid = message.dtid; + if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) + if (typeof message.transaction_id === "number") + object.transaction_id = options.longs === String ? String(message.transaction_id) : message.transaction_id; + else + object.transaction_id = options.longs === String ? $util.Long.prototype.toString.call(message.transaction_id) : options.longs === Number ? new $util.LongBits(message.transaction_id.low >>> 0, message.transaction_id.high >>> 0).toNumber() : message.transaction_id; return object; }; /** - * Converts this ReadTransactionRequest to JSON. + * Converts this RollbackRequest to JSON. * @function toJSON - * @memberof query.ReadTransactionRequest + * @memberof query.RollbackRequest * @instance * @returns {Object.} JSON object */ - ReadTransactionRequest.prototype.toJSON = function toJSON() { + RollbackRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ReadTransactionRequest + * Gets the default type url for RollbackRequest * @function getTypeUrl - * @memberof query.ReadTransactionRequest + * @memberof query.RollbackRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ReadTransactionRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + RollbackRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.ReadTransactionRequest"; + return typeUrlPrefix + "/query.RollbackRequest"; }; - return ReadTransactionRequest; + return RollbackRequest; })(); - query.ReadTransactionResponse = (function() { + query.RollbackResponse = (function() { /** - * Properties of a ReadTransactionResponse. + * Properties of a RollbackResponse. * @memberof query - * @interface IReadTransactionResponse - * @property {query.ITransactionMetadata|null} [metadata] ReadTransactionResponse metadata + * @interface IRollbackResponse + * @property {number|Long|null} [reserved_id] RollbackResponse reserved_id */ /** - * Constructs a new ReadTransactionResponse. + * Constructs a new RollbackResponse. * @memberof query - * @classdesc Represents a ReadTransactionResponse. - * @implements IReadTransactionResponse + * @classdesc Represents a RollbackResponse. + * @implements IRollbackResponse * @constructor - * @param {query.IReadTransactionResponse=} [properties] Properties to set + * @param {query.IRollbackResponse=} [properties] Properties to set */ - function ReadTransactionResponse(properties) { + function RollbackResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -83225,75 +85183,75 @@ export const query = $root.query = (() => { } /** - * ReadTransactionResponse metadata. - * @member {query.ITransactionMetadata|null|undefined} metadata - * @memberof query.ReadTransactionResponse + * RollbackResponse reserved_id. + * @member {number|Long} reserved_id + * @memberof query.RollbackResponse * @instance */ - ReadTransactionResponse.prototype.metadata = null; + RollbackResponse.prototype.reserved_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; /** - * Creates a new ReadTransactionResponse instance using the specified properties. + * Creates a new RollbackResponse instance using the specified properties. * @function create - * @memberof query.ReadTransactionResponse + * @memberof query.RollbackResponse * @static - * @param {query.IReadTransactionResponse=} [properties] Properties to set - * @returns {query.ReadTransactionResponse} ReadTransactionResponse instance + * @param {query.IRollbackResponse=} [properties] Properties to set + * @returns {query.RollbackResponse} RollbackResponse instance */ - ReadTransactionResponse.create = function create(properties) { - return new ReadTransactionResponse(properties); + RollbackResponse.create = function create(properties) { + return new RollbackResponse(properties); }; /** - * Encodes the specified ReadTransactionResponse message. Does not implicitly {@link query.ReadTransactionResponse.verify|verify} messages. + * Encodes the specified RollbackResponse message. Does not implicitly {@link query.RollbackResponse.verify|verify} messages. * @function encode - * @memberof query.ReadTransactionResponse + * @memberof query.RollbackResponse * @static - * @param {query.IReadTransactionResponse} message ReadTransactionResponse message or plain object to encode + * @param {query.IRollbackResponse} message RollbackResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReadTransactionResponse.encode = function encode(message, writer) { + RollbackResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.metadata != null && Object.hasOwnProperty.call(message, "metadata")) - $root.query.TransactionMetadata.encode(message.metadata, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.reserved_id != null && Object.hasOwnProperty.call(message, "reserved_id")) + writer.uint32(/* id 1, wireType 0 =*/8).int64(message.reserved_id); return writer; }; /** - * Encodes the specified ReadTransactionResponse message, length delimited. Does not implicitly {@link query.ReadTransactionResponse.verify|verify} messages. + * Encodes the specified RollbackResponse message, length delimited. Does not implicitly {@link query.RollbackResponse.verify|verify} messages. * @function encodeDelimited - * @memberof query.ReadTransactionResponse + * @memberof query.RollbackResponse * @static - * @param {query.IReadTransactionResponse} message ReadTransactionResponse message or plain object to encode + * @param {query.IRollbackResponse} message RollbackResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReadTransactionResponse.encodeDelimited = function encodeDelimited(message, writer) { + RollbackResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ReadTransactionResponse message from the specified reader or buffer. + * Decodes a RollbackResponse message from the specified reader or buffer. * @function decode - * @memberof query.ReadTransactionResponse + * @memberof query.RollbackResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.ReadTransactionResponse} ReadTransactionResponse + * @returns {query.RollbackResponse} RollbackResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReadTransactionResponse.decode = function decode(reader, length) { + RollbackResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.ReadTransactionResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.RollbackResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.metadata = $root.query.TransactionMetadata.decode(reader, reader.uint32()); + message.reserved_id = reader.int64(); break; } default: @@ -83305,134 +85263,140 @@ export const query = $root.query = (() => { }; /** - * Decodes a ReadTransactionResponse message from the specified reader or buffer, length delimited. + * Decodes a RollbackResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.ReadTransactionResponse + * @memberof query.RollbackResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.ReadTransactionResponse} ReadTransactionResponse + * @returns {query.RollbackResponse} RollbackResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReadTransactionResponse.decodeDelimited = function decodeDelimited(reader) { + RollbackResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ReadTransactionResponse message. + * Verifies a RollbackResponse message. * @function verify - * @memberof query.ReadTransactionResponse + * @memberof query.RollbackResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ReadTransactionResponse.verify = function verify(message) { + RollbackResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.metadata != null && message.hasOwnProperty("metadata")) { - let error = $root.query.TransactionMetadata.verify(message.metadata); - if (error) - return "metadata." + error; - } + if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) + if (!$util.isInteger(message.reserved_id) && !(message.reserved_id && $util.isInteger(message.reserved_id.low) && $util.isInteger(message.reserved_id.high))) + return "reserved_id: integer|Long expected"; return null; }; /** - * Creates a ReadTransactionResponse message from a plain object. Also converts values to their respective internal types. + * Creates a RollbackResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.ReadTransactionResponse + * @memberof query.RollbackResponse * @static * @param {Object.} object Plain object - * @returns {query.ReadTransactionResponse} ReadTransactionResponse + * @returns {query.RollbackResponse} RollbackResponse */ - ReadTransactionResponse.fromObject = function fromObject(object) { - if (object instanceof $root.query.ReadTransactionResponse) + RollbackResponse.fromObject = function fromObject(object) { + if (object instanceof $root.query.RollbackResponse) return object; - let message = new $root.query.ReadTransactionResponse(); - if (object.metadata != null) { - if (typeof object.metadata !== "object") - throw TypeError(".query.ReadTransactionResponse.metadata: object expected"); - message.metadata = $root.query.TransactionMetadata.fromObject(object.metadata); - } + let message = new $root.query.RollbackResponse(); + if (object.reserved_id != null) + if ($util.Long) + (message.reserved_id = $util.Long.fromValue(object.reserved_id)).unsigned = false; + else if (typeof object.reserved_id === "string") + message.reserved_id = parseInt(object.reserved_id, 10); + else if (typeof object.reserved_id === "number") + message.reserved_id = object.reserved_id; + else if (typeof object.reserved_id === "object") + message.reserved_id = new $util.LongBits(object.reserved_id.low >>> 0, object.reserved_id.high >>> 0).toNumber(); return message; }; /** - * Creates a plain object from a ReadTransactionResponse message. Also converts values to other types if specified. + * Creates a plain object from a RollbackResponse message. Also converts values to other types if specified. * @function toObject - * @memberof query.ReadTransactionResponse + * @memberof query.RollbackResponse * @static - * @param {query.ReadTransactionResponse} message ReadTransactionResponse + * @param {query.RollbackResponse} message RollbackResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ReadTransactionResponse.toObject = function toObject(message, options) { + RollbackResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) - object.metadata = null; - if (message.metadata != null && message.hasOwnProperty("metadata")) - object.metadata = $root.query.TransactionMetadata.toObject(message.metadata, options); + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.reserved_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.reserved_id = options.longs === String ? "0" : 0; + if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) + if (typeof message.reserved_id === "number") + object.reserved_id = options.longs === String ? String(message.reserved_id) : message.reserved_id; + else + object.reserved_id = options.longs === String ? $util.Long.prototype.toString.call(message.reserved_id) : options.longs === Number ? new $util.LongBits(message.reserved_id.low >>> 0, message.reserved_id.high >>> 0).toNumber() : message.reserved_id; return object; }; /** - * Converts this ReadTransactionResponse to JSON. + * Converts this RollbackResponse to JSON. * @function toJSON - * @memberof query.ReadTransactionResponse + * @memberof query.RollbackResponse * @instance * @returns {Object.} JSON object */ - ReadTransactionResponse.prototype.toJSON = function toJSON() { + RollbackResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ReadTransactionResponse + * Gets the default type url for RollbackResponse * @function getTypeUrl - * @memberof query.ReadTransactionResponse + * @memberof query.RollbackResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ReadTransactionResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + RollbackResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.ReadTransactionResponse"; + return typeUrlPrefix + "/query.RollbackResponse"; }; - return ReadTransactionResponse; + return RollbackResponse; })(); - query.BeginExecuteRequest = (function() { + query.PrepareRequest = (function() { /** - * Properties of a BeginExecuteRequest. + * Properties of a PrepareRequest. * @memberof query - * @interface IBeginExecuteRequest - * @property {vtrpc.ICallerID|null} [effective_caller_id] BeginExecuteRequest effective_caller_id - * @property {query.IVTGateCallerID|null} [immediate_caller_id] BeginExecuteRequest immediate_caller_id - * @property {query.ITarget|null} [target] BeginExecuteRequest target - * @property {query.IBoundQuery|null} [query] BeginExecuteRequest query - * @property {query.IExecuteOptions|null} [options] BeginExecuteRequest options - * @property {number|Long|null} [reserved_id] BeginExecuteRequest reserved_id - * @property {Array.|null} [pre_queries] BeginExecuteRequest pre_queries + * @interface IPrepareRequest + * @property {vtrpc.ICallerID|null} [effective_caller_id] PrepareRequest effective_caller_id + * @property {query.IVTGateCallerID|null} [immediate_caller_id] PrepareRequest immediate_caller_id + * @property {query.ITarget|null} [target] PrepareRequest target + * @property {number|Long|null} [transaction_id] PrepareRequest transaction_id + * @property {string|null} [dtid] PrepareRequest dtid */ /** - * Constructs a new BeginExecuteRequest. + * Constructs a new PrepareRequest. * @memberof query - * @classdesc Represents a BeginExecuteRequest. - * @implements IBeginExecuteRequest + * @classdesc Represents a PrepareRequest. + * @implements IPrepareRequest * @constructor - * @param {query.IBeginExecuteRequest=} [properties] Properties to set + * @param {query.IPrepareRequest=} [properties] Properties to set */ - function BeginExecuteRequest(properties) { - this.pre_queries = []; + function PrepareRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -83440,83 +85404,67 @@ export const query = $root.query = (() => { } /** - * BeginExecuteRequest effective_caller_id. + * PrepareRequest effective_caller_id. * @member {vtrpc.ICallerID|null|undefined} effective_caller_id - * @memberof query.BeginExecuteRequest + * @memberof query.PrepareRequest * @instance */ - BeginExecuteRequest.prototype.effective_caller_id = null; + PrepareRequest.prototype.effective_caller_id = null; /** - * BeginExecuteRequest immediate_caller_id. + * PrepareRequest immediate_caller_id. * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id - * @memberof query.BeginExecuteRequest + * @memberof query.PrepareRequest * @instance */ - BeginExecuteRequest.prototype.immediate_caller_id = null; + PrepareRequest.prototype.immediate_caller_id = null; /** - * BeginExecuteRequest target. + * PrepareRequest target. * @member {query.ITarget|null|undefined} target - * @memberof query.BeginExecuteRequest - * @instance - */ - BeginExecuteRequest.prototype.target = null; - - /** - * BeginExecuteRequest query. - * @member {query.IBoundQuery|null|undefined} query - * @memberof query.BeginExecuteRequest - * @instance - */ - BeginExecuteRequest.prototype.query = null; - - /** - * BeginExecuteRequest options. - * @member {query.IExecuteOptions|null|undefined} options - * @memberof query.BeginExecuteRequest + * @memberof query.PrepareRequest * @instance */ - BeginExecuteRequest.prototype.options = null; + PrepareRequest.prototype.target = null; /** - * BeginExecuteRequest reserved_id. - * @member {number|Long} reserved_id - * @memberof query.BeginExecuteRequest + * PrepareRequest transaction_id. + * @member {number|Long} transaction_id + * @memberof query.PrepareRequest * @instance */ - BeginExecuteRequest.prototype.reserved_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + PrepareRequest.prototype.transaction_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; /** - * BeginExecuteRequest pre_queries. - * @member {Array.} pre_queries - * @memberof query.BeginExecuteRequest + * PrepareRequest dtid. + * @member {string} dtid + * @memberof query.PrepareRequest * @instance */ - BeginExecuteRequest.prototype.pre_queries = $util.emptyArray; + PrepareRequest.prototype.dtid = ""; /** - * Creates a new BeginExecuteRequest instance using the specified properties. + * Creates a new PrepareRequest instance using the specified properties. * @function create - * @memberof query.BeginExecuteRequest + * @memberof query.PrepareRequest * @static - * @param {query.IBeginExecuteRequest=} [properties] Properties to set - * @returns {query.BeginExecuteRequest} BeginExecuteRequest instance + * @param {query.IPrepareRequest=} [properties] Properties to set + * @returns {query.PrepareRequest} PrepareRequest instance */ - BeginExecuteRequest.create = function create(properties) { - return new BeginExecuteRequest(properties); + PrepareRequest.create = function create(properties) { + return new PrepareRequest(properties); }; /** - * Encodes the specified BeginExecuteRequest message. Does not implicitly {@link query.BeginExecuteRequest.verify|verify} messages. + * Encodes the specified PrepareRequest message. Does not implicitly {@link query.PrepareRequest.verify|verify} messages. * @function encode - * @memberof query.BeginExecuteRequest + * @memberof query.PrepareRequest * @static - * @param {query.IBeginExecuteRequest} message BeginExecuteRequest message or plain object to encode + * @param {query.IPrepareRequest} message PrepareRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - BeginExecuteRequest.encode = function encode(message, writer) { + PrepareRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) @@ -83525,46 +85473,41 @@ export const query = $root.query = (() => { $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); if (message.target != null && Object.hasOwnProperty.call(message, "target")) $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.query != null && Object.hasOwnProperty.call(message, "query")) - $root.query.BoundQuery.encode(message.query, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); - if (message.options != null && Object.hasOwnProperty.call(message, "options")) - $root.query.ExecuteOptions.encode(message.options, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); - if (message.reserved_id != null && Object.hasOwnProperty.call(message, "reserved_id")) - writer.uint32(/* id 6, wireType 0 =*/48).int64(message.reserved_id); - if (message.pre_queries != null && message.pre_queries.length) - for (let i = 0; i < message.pre_queries.length; ++i) - writer.uint32(/* id 7, wireType 2 =*/58).string(message.pre_queries[i]); + if (message.transaction_id != null && Object.hasOwnProperty.call(message, "transaction_id")) + writer.uint32(/* id 4, wireType 0 =*/32).int64(message.transaction_id); + if (message.dtid != null && Object.hasOwnProperty.call(message, "dtid")) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.dtid); return writer; }; /** - * Encodes the specified BeginExecuteRequest message, length delimited. Does not implicitly {@link query.BeginExecuteRequest.verify|verify} messages. + * Encodes the specified PrepareRequest message, length delimited. Does not implicitly {@link query.PrepareRequest.verify|verify} messages. * @function encodeDelimited - * @memberof query.BeginExecuteRequest + * @memberof query.PrepareRequest * @static - * @param {query.IBeginExecuteRequest} message BeginExecuteRequest message or plain object to encode + * @param {query.IPrepareRequest} message PrepareRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - BeginExecuteRequest.encodeDelimited = function encodeDelimited(message, writer) { + PrepareRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a BeginExecuteRequest message from the specified reader or buffer. + * Decodes a PrepareRequest message from the specified reader or buffer. * @function decode - * @memberof query.BeginExecuteRequest + * @memberof query.PrepareRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.BeginExecuteRequest} BeginExecuteRequest + * @returns {query.PrepareRequest} PrepareRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - BeginExecuteRequest.decode = function decode(reader, length) { + PrepareRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.BeginExecuteRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.PrepareRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -83581,21 +85524,11 @@ export const query = $root.query = (() => { break; } case 4: { - message.query = $root.query.BoundQuery.decode(reader, reader.uint32()); + message.transaction_id = reader.int64(); break; } case 5: { - message.options = $root.query.ExecuteOptions.decode(reader, reader.uint32()); - break; - } - case 6: { - message.reserved_id = reader.int64(); - break; - } - case 7: { - if (!(message.pre_queries && message.pre_queries.length)) - message.pre_queries = []; - message.pre_queries.push(reader.string()); + message.dtid = reader.string(); break; } default: @@ -83607,30 +85540,30 @@ export const query = $root.query = (() => { }; /** - * Decodes a BeginExecuteRequest message from the specified reader or buffer, length delimited. + * Decodes a PrepareRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.BeginExecuteRequest + * @memberof query.PrepareRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.BeginExecuteRequest} BeginExecuteRequest + * @returns {query.PrepareRequest} PrepareRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - BeginExecuteRequest.decodeDelimited = function decodeDelimited(reader) { + PrepareRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a BeginExecuteRequest message. + * Verifies a PrepareRequest message. * @function verify - * @memberof query.BeginExecuteRequest + * @memberof query.PrepareRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - BeginExecuteRequest.verify = function verify(message) { + PrepareRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { @@ -83648,111 +85581,79 @@ export const query = $root.query = (() => { if (error) return "target." + error; } - if (message.query != null && message.hasOwnProperty("query")) { - let error = $root.query.BoundQuery.verify(message.query); - if (error) - return "query." + error; - } - if (message.options != null && message.hasOwnProperty("options")) { - let error = $root.query.ExecuteOptions.verify(message.options); - if (error) - return "options." + error; - } - if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) - if (!$util.isInteger(message.reserved_id) && !(message.reserved_id && $util.isInteger(message.reserved_id.low) && $util.isInteger(message.reserved_id.high))) - return "reserved_id: integer|Long expected"; - if (message.pre_queries != null && message.hasOwnProperty("pre_queries")) { - if (!Array.isArray(message.pre_queries)) - return "pre_queries: array expected"; - for (let i = 0; i < message.pre_queries.length; ++i) - if (!$util.isString(message.pre_queries[i])) - return "pre_queries: string[] expected"; - } + if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) + if (!$util.isInteger(message.transaction_id) && !(message.transaction_id && $util.isInteger(message.transaction_id.low) && $util.isInteger(message.transaction_id.high))) + return "transaction_id: integer|Long expected"; + if (message.dtid != null && message.hasOwnProperty("dtid")) + if (!$util.isString(message.dtid)) + return "dtid: string expected"; return null; }; /** - * Creates a BeginExecuteRequest message from a plain object. Also converts values to their respective internal types. + * Creates a PrepareRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.BeginExecuteRequest + * @memberof query.PrepareRequest * @static * @param {Object.} object Plain object - * @returns {query.BeginExecuteRequest} BeginExecuteRequest + * @returns {query.PrepareRequest} PrepareRequest */ - BeginExecuteRequest.fromObject = function fromObject(object) { - if (object instanceof $root.query.BeginExecuteRequest) + PrepareRequest.fromObject = function fromObject(object) { + if (object instanceof $root.query.PrepareRequest) return object; - let message = new $root.query.BeginExecuteRequest(); + let message = new $root.query.PrepareRequest(); if (object.effective_caller_id != null) { if (typeof object.effective_caller_id !== "object") - throw TypeError(".query.BeginExecuteRequest.effective_caller_id: object expected"); + throw TypeError(".query.PrepareRequest.effective_caller_id: object expected"); message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); } if (object.immediate_caller_id != null) { if (typeof object.immediate_caller_id !== "object") - throw TypeError(".query.BeginExecuteRequest.immediate_caller_id: object expected"); + throw TypeError(".query.PrepareRequest.immediate_caller_id: object expected"); message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); } if (object.target != null) { if (typeof object.target !== "object") - throw TypeError(".query.BeginExecuteRequest.target: object expected"); + throw TypeError(".query.PrepareRequest.target: object expected"); message.target = $root.query.Target.fromObject(object.target); } - if (object.query != null) { - if (typeof object.query !== "object") - throw TypeError(".query.BeginExecuteRequest.query: object expected"); - message.query = $root.query.BoundQuery.fromObject(object.query); - } - if (object.options != null) { - if (typeof object.options !== "object") - throw TypeError(".query.BeginExecuteRequest.options: object expected"); - message.options = $root.query.ExecuteOptions.fromObject(object.options); - } - if (object.reserved_id != null) + if (object.transaction_id != null) if ($util.Long) - (message.reserved_id = $util.Long.fromValue(object.reserved_id)).unsigned = false; - else if (typeof object.reserved_id === "string") - message.reserved_id = parseInt(object.reserved_id, 10); - else if (typeof object.reserved_id === "number") - message.reserved_id = object.reserved_id; - else if (typeof object.reserved_id === "object") - message.reserved_id = new $util.LongBits(object.reserved_id.low >>> 0, object.reserved_id.high >>> 0).toNumber(); - if (object.pre_queries) { - if (!Array.isArray(object.pre_queries)) - throw TypeError(".query.BeginExecuteRequest.pre_queries: array expected"); - message.pre_queries = []; - for (let i = 0; i < object.pre_queries.length; ++i) - message.pre_queries[i] = String(object.pre_queries[i]); - } + (message.transaction_id = $util.Long.fromValue(object.transaction_id)).unsigned = false; + else if (typeof object.transaction_id === "string") + message.transaction_id = parseInt(object.transaction_id, 10); + else if (typeof object.transaction_id === "number") + message.transaction_id = object.transaction_id; + else if (typeof object.transaction_id === "object") + message.transaction_id = new $util.LongBits(object.transaction_id.low >>> 0, object.transaction_id.high >>> 0).toNumber(); + if (object.dtid != null) + message.dtid = String(object.dtid); return message; }; /** - * Creates a plain object from a BeginExecuteRequest message. Also converts values to other types if specified. + * Creates a plain object from a PrepareRequest message. Also converts values to other types if specified. * @function toObject - * @memberof query.BeginExecuteRequest + * @memberof query.PrepareRequest * @static - * @param {query.BeginExecuteRequest} message BeginExecuteRequest + * @param {query.PrepareRequest} message PrepareRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - BeginExecuteRequest.toObject = function toObject(message, options) { + PrepareRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.pre_queries = []; if (options.defaults) { object.effective_caller_id = null; object.immediate_caller_id = null; object.target = null; - object.query = null; - object.options = null; if ($util.Long) { let long = new $util.Long(0, 0, false); - object.reserved_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + object.transaction_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; } else - object.reserved_id = options.longs === String ? "0" : 0; + object.transaction_id = options.longs === String ? "0" : 0; + object.dtid = ""; } if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); @@ -83760,74 +85661,62 @@ export const query = $root.query = (() => { object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); if (message.target != null && message.hasOwnProperty("target")) object.target = $root.query.Target.toObject(message.target, options); - if (message.query != null && message.hasOwnProperty("query")) - object.query = $root.query.BoundQuery.toObject(message.query, options); - if (message.options != null && message.hasOwnProperty("options")) - object.options = $root.query.ExecuteOptions.toObject(message.options, options); - if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) - if (typeof message.reserved_id === "number") - object.reserved_id = options.longs === String ? String(message.reserved_id) : message.reserved_id; + if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) + if (typeof message.transaction_id === "number") + object.transaction_id = options.longs === String ? String(message.transaction_id) : message.transaction_id; else - object.reserved_id = options.longs === String ? $util.Long.prototype.toString.call(message.reserved_id) : options.longs === Number ? new $util.LongBits(message.reserved_id.low >>> 0, message.reserved_id.high >>> 0).toNumber() : message.reserved_id; - if (message.pre_queries && message.pre_queries.length) { - object.pre_queries = []; - for (let j = 0; j < message.pre_queries.length; ++j) - object.pre_queries[j] = message.pre_queries[j]; - } + object.transaction_id = options.longs === String ? $util.Long.prototype.toString.call(message.transaction_id) : options.longs === Number ? new $util.LongBits(message.transaction_id.low >>> 0, message.transaction_id.high >>> 0).toNumber() : message.transaction_id; + if (message.dtid != null && message.hasOwnProperty("dtid")) + object.dtid = message.dtid; return object; }; /** - * Converts this BeginExecuteRequest to JSON. + * Converts this PrepareRequest to JSON. * @function toJSON - * @memberof query.BeginExecuteRequest + * @memberof query.PrepareRequest * @instance * @returns {Object.} JSON object */ - BeginExecuteRequest.prototype.toJSON = function toJSON() { + PrepareRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for BeginExecuteRequest + * Gets the default type url for PrepareRequest * @function getTypeUrl - * @memberof query.BeginExecuteRequest + * @memberof query.PrepareRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - BeginExecuteRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + PrepareRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.BeginExecuteRequest"; + return typeUrlPrefix + "/query.PrepareRequest"; }; - return BeginExecuteRequest; + return PrepareRequest; })(); - query.BeginExecuteResponse = (function() { + query.PrepareResponse = (function() { /** - * Properties of a BeginExecuteResponse. + * Properties of a PrepareResponse. * @memberof query - * @interface IBeginExecuteResponse - * @property {vtrpc.IRPCError|null} [error] BeginExecuteResponse error - * @property {query.IQueryResult|null} [result] BeginExecuteResponse result - * @property {number|Long|null} [transaction_id] BeginExecuteResponse transaction_id - * @property {topodata.ITabletAlias|null} [tablet_alias] BeginExecuteResponse tablet_alias - * @property {string|null} [session_state_changes] BeginExecuteResponse session_state_changes + * @interface IPrepareResponse */ /** - * Constructs a new BeginExecuteResponse. + * Constructs a new PrepareResponse. * @memberof query - * @classdesc Represents a BeginExecuteResponse. - * @implements IBeginExecuteResponse + * @classdesc Represents a PrepareResponse. + * @implements IPrepareResponse * @constructor - * @param {query.IBeginExecuteResponse=} [properties] Properties to set + * @param {query.IPrepareResponse=} [properties] Properties to set */ - function BeginExecuteResponse(properties) { + function PrepareResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -83835,133 +85724,63 @@ export const query = $root.query = (() => { } /** - * BeginExecuteResponse error. - * @member {vtrpc.IRPCError|null|undefined} error - * @memberof query.BeginExecuteResponse - * @instance - */ - BeginExecuteResponse.prototype.error = null; - - /** - * BeginExecuteResponse result. - * @member {query.IQueryResult|null|undefined} result - * @memberof query.BeginExecuteResponse - * @instance - */ - BeginExecuteResponse.prototype.result = null; - - /** - * BeginExecuteResponse transaction_id. - * @member {number|Long} transaction_id - * @memberof query.BeginExecuteResponse - * @instance - */ - BeginExecuteResponse.prototype.transaction_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; - - /** - * BeginExecuteResponse tablet_alias. - * @member {topodata.ITabletAlias|null|undefined} tablet_alias - * @memberof query.BeginExecuteResponse - * @instance - */ - BeginExecuteResponse.prototype.tablet_alias = null; - - /** - * BeginExecuteResponse session_state_changes. - * @member {string} session_state_changes - * @memberof query.BeginExecuteResponse - * @instance - */ - BeginExecuteResponse.prototype.session_state_changes = ""; - - /** - * Creates a new BeginExecuteResponse instance using the specified properties. + * Creates a new PrepareResponse instance using the specified properties. * @function create - * @memberof query.BeginExecuteResponse + * @memberof query.PrepareResponse * @static - * @param {query.IBeginExecuteResponse=} [properties] Properties to set - * @returns {query.BeginExecuteResponse} BeginExecuteResponse instance + * @param {query.IPrepareResponse=} [properties] Properties to set + * @returns {query.PrepareResponse} PrepareResponse instance */ - BeginExecuteResponse.create = function create(properties) { - return new BeginExecuteResponse(properties); + PrepareResponse.create = function create(properties) { + return new PrepareResponse(properties); }; /** - * Encodes the specified BeginExecuteResponse message. Does not implicitly {@link query.BeginExecuteResponse.verify|verify} messages. + * Encodes the specified PrepareResponse message. Does not implicitly {@link query.PrepareResponse.verify|verify} messages. * @function encode - * @memberof query.BeginExecuteResponse + * @memberof query.PrepareResponse * @static - * @param {query.IBeginExecuteResponse} message BeginExecuteResponse message or plain object to encode + * @param {query.IPrepareResponse} message PrepareResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - BeginExecuteResponse.encode = function encode(message, writer) { + PrepareResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.error != null && Object.hasOwnProperty.call(message, "error")) - $root.vtrpc.RPCError.encode(message.error, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.result != null && Object.hasOwnProperty.call(message, "result")) - $root.query.QueryResult.encode(message.result, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.transaction_id != null && Object.hasOwnProperty.call(message, "transaction_id")) - writer.uint32(/* id 3, wireType 0 =*/24).int64(message.transaction_id); - if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) - $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); - if (message.session_state_changes != null && Object.hasOwnProperty.call(message, "session_state_changes")) - writer.uint32(/* id 5, wireType 2 =*/42).string(message.session_state_changes); return writer; }; /** - * Encodes the specified BeginExecuteResponse message, length delimited. Does not implicitly {@link query.BeginExecuteResponse.verify|verify} messages. + * Encodes the specified PrepareResponse message, length delimited. Does not implicitly {@link query.PrepareResponse.verify|verify} messages. * @function encodeDelimited - * @memberof query.BeginExecuteResponse + * @memberof query.PrepareResponse * @static - * @param {query.IBeginExecuteResponse} message BeginExecuteResponse message or plain object to encode + * @param {query.IPrepareResponse} message PrepareResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - BeginExecuteResponse.encodeDelimited = function encodeDelimited(message, writer) { + PrepareResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a BeginExecuteResponse message from the specified reader or buffer. + * Decodes a PrepareResponse message from the specified reader or buffer. * @function decode - * @memberof query.BeginExecuteResponse + * @memberof query.PrepareResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.BeginExecuteResponse} BeginExecuteResponse + * @returns {query.PrepareResponse} PrepareResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - BeginExecuteResponse.decode = function decode(reader, length) { + PrepareResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.BeginExecuteResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.PrepareResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - message.error = $root.vtrpc.RPCError.decode(reader, reader.uint32()); - break; - } - case 2: { - message.result = $root.query.QueryResult.decode(reader, reader.uint32()); - break; - } - case 3: { - message.transaction_id = reader.int64(); - break; - } - case 4: { - message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); - break; - } - case 5: { - message.session_state_changes = reader.string(); - break; - } default: reader.skipType(tag & 7); break; @@ -83971,191 +85790,112 @@ export const query = $root.query = (() => { }; /** - * Decodes a BeginExecuteResponse message from the specified reader or buffer, length delimited. + * Decodes a PrepareResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.BeginExecuteResponse + * @memberof query.PrepareResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.BeginExecuteResponse} BeginExecuteResponse + * @returns {query.PrepareResponse} PrepareResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - BeginExecuteResponse.decodeDelimited = function decodeDelimited(reader) { + PrepareResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a BeginExecuteResponse message. + * Verifies a PrepareResponse message. * @function verify - * @memberof query.BeginExecuteResponse + * @memberof query.PrepareResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - BeginExecuteResponse.verify = function verify(message) { + PrepareResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.error != null && message.hasOwnProperty("error")) { - let error = $root.vtrpc.RPCError.verify(message.error); - if (error) - return "error." + error; - } - if (message.result != null && message.hasOwnProperty("result")) { - let error = $root.query.QueryResult.verify(message.result); - if (error) - return "result." + error; - } - if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) - if (!$util.isInteger(message.transaction_id) && !(message.transaction_id && $util.isInteger(message.transaction_id.low) && $util.isInteger(message.transaction_id.high))) - return "transaction_id: integer|Long expected"; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { - let error = $root.topodata.TabletAlias.verify(message.tablet_alias); - if (error) - return "tablet_alias." + error; - } - if (message.session_state_changes != null && message.hasOwnProperty("session_state_changes")) - if (!$util.isString(message.session_state_changes)) - return "session_state_changes: string expected"; return null; }; /** - * Creates a BeginExecuteResponse message from a plain object. Also converts values to their respective internal types. + * Creates a PrepareResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.BeginExecuteResponse + * @memberof query.PrepareResponse * @static * @param {Object.} object Plain object - * @returns {query.BeginExecuteResponse} BeginExecuteResponse + * @returns {query.PrepareResponse} PrepareResponse */ - BeginExecuteResponse.fromObject = function fromObject(object) { - if (object instanceof $root.query.BeginExecuteResponse) + PrepareResponse.fromObject = function fromObject(object) { + if (object instanceof $root.query.PrepareResponse) return object; - let message = new $root.query.BeginExecuteResponse(); - if (object.error != null) { - if (typeof object.error !== "object") - throw TypeError(".query.BeginExecuteResponse.error: object expected"); - message.error = $root.vtrpc.RPCError.fromObject(object.error); - } - if (object.result != null) { - if (typeof object.result !== "object") - throw TypeError(".query.BeginExecuteResponse.result: object expected"); - message.result = $root.query.QueryResult.fromObject(object.result); - } - if (object.transaction_id != null) - if ($util.Long) - (message.transaction_id = $util.Long.fromValue(object.transaction_id)).unsigned = false; - else if (typeof object.transaction_id === "string") - message.transaction_id = parseInt(object.transaction_id, 10); - else if (typeof object.transaction_id === "number") - message.transaction_id = object.transaction_id; - else if (typeof object.transaction_id === "object") - message.transaction_id = new $util.LongBits(object.transaction_id.low >>> 0, object.transaction_id.high >>> 0).toNumber(); - if (object.tablet_alias != null) { - if (typeof object.tablet_alias !== "object") - throw TypeError(".query.BeginExecuteResponse.tablet_alias: object expected"); - message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); - } - if (object.session_state_changes != null) - message.session_state_changes = String(object.session_state_changes); - return message; + return new $root.query.PrepareResponse(); }; /** - * Creates a plain object from a BeginExecuteResponse message. Also converts values to other types if specified. + * Creates a plain object from a PrepareResponse message. Also converts values to other types if specified. * @function toObject - * @memberof query.BeginExecuteResponse + * @memberof query.PrepareResponse * @static - * @param {query.BeginExecuteResponse} message BeginExecuteResponse + * @param {query.PrepareResponse} message PrepareResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - BeginExecuteResponse.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) { - object.error = null; - object.result = null; - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.transaction_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.transaction_id = options.longs === String ? "0" : 0; - object.tablet_alias = null; - object.session_state_changes = ""; - } - if (message.error != null && message.hasOwnProperty("error")) - object.error = $root.vtrpc.RPCError.toObject(message.error, options); - if (message.result != null && message.hasOwnProperty("result")) - object.result = $root.query.QueryResult.toObject(message.result, options); - if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) - if (typeof message.transaction_id === "number") - object.transaction_id = options.longs === String ? String(message.transaction_id) : message.transaction_id; - else - object.transaction_id = options.longs === String ? $util.Long.prototype.toString.call(message.transaction_id) : options.longs === Number ? new $util.LongBits(message.transaction_id.low >>> 0, message.transaction_id.high >>> 0).toNumber() : message.transaction_id; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) - object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); - if (message.session_state_changes != null && message.hasOwnProperty("session_state_changes")) - object.session_state_changes = message.session_state_changes; - return object; + PrepareResponse.toObject = function toObject() { + return {}; }; /** - * Converts this BeginExecuteResponse to JSON. + * Converts this PrepareResponse to JSON. * @function toJSON - * @memberof query.BeginExecuteResponse + * @memberof query.PrepareResponse * @instance * @returns {Object.} JSON object */ - BeginExecuteResponse.prototype.toJSON = function toJSON() { + PrepareResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for BeginExecuteResponse + * Gets the default type url for PrepareResponse * @function getTypeUrl - * @memberof query.BeginExecuteResponse + * @memberof query.PrepareResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - BeginExecuteResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + PrepareResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.BeginExecuteResponse"; + return typeUrlPrefix + "/query.PrepareResponse"; }; - return BeginExecuteResponse; + return PrepareResponse; })(); - query.BeginStreamExecuteRequest = (function() { + query.CommitPreparedRequest = (function() { /** - * Properties of a BeginStreamExecuteRequest. + * Properties of a CommitPreparedRequest. * @memberof query - * @interface IBeginStreamExecuteRequest - * @property {vtrpc.ICallerID|null} [effective_caller_id] BeginStreamExecuteRequest effective_caller_id - * @property {query.IVTGateCallerID|null} [immediate_caller_id] BeginStreamExecuteRequest immediate_caller_id - * @property {query.ITarget|null} [target] BeginStreamExecuteRequest target - * @property {query.IBoundQuery|null} [query] BeginStreamExecuteRequest query - * @property {query.IExecuteOptions|null} [options] BeginStreamExecuteRequest options - * @property {Array.|null} [pre_queries] BeginStreamExecuteRequest pre_queries - * @property {number|Long|null} [reserved_id] BeginStreamExecuteRequest reserved_id + * @interface ICommitPreparedRequest + * @property {vtrpc.ICallerID|null} [effective_caller_id] CommitPreparedRequest effective_caller_id + * @property {query.IVTGateCallerID|null} [immediate_caller_id] CommitPreparedRequest immediate_caller_id + * @property {query.ITarget|null} [target] CommitPreparedRequest target + * @property {string|null} [dtid] CommitPreparedRequest dtid */ /** - * Constructs a new BeginStreamExecuteRequest. + * Constructs a new CommitPreparedRequest. * @memberof query - * @classdesc Represents a BeginStreamExecuteRequest. - * @implements IBeginStreamExecuteRequest + * @classdesc Represents a CommitPreparedRequest. + * @implements ICommitPreparedRequest * @constructor - * @param {query.IBeginStreamExecuteRequest=} [properties] Properties to set + * @param {query.ICommitPreparedRequest=} [properties] Properties to set */ - function BeginStreamExecuteRequest(properties) { - this.pre_queries = []; + function CommitPreparedRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -84163,83 +85903,59 @@ export const query = $root.query = (() => { } /** - * BeginStreamExecuteRequest effective_caller_id. + * CommitPreparedRequest effective_caller_id. * @member {vtrpc.ICallerID|null|undefined} effective_caller_id - * @memberof query.BeginStreamExecuteRequest + * @memberof query.CommitPreparedRequest * @instance */ - BeginStreamExecuteRequest.prototype.effective_caller_id = null; + CommitPreparedRequest.prototype.effective_caller_id = null; /** - * BeginStreamExecuteRequest immediate_caller_id. + * CommitPreparedRequest immediate_caller_id. * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id - * @memberof query.BeginStreamExecuteRequest + * @memberof query.CommitPreparedRequest * @instance */ - BeginStreamExecuteRequest.prototype.immediate_caller_id = null; + CommitPreparedRequest.prototype.immediate_caller_id = null; /** - * BeginStreamExecuteRequest target. + * CommitPreparedRequest target. * @member {query.ITarget|null|undefined} target - * @memberof query.BeginStreamExecuteRequest - * @instance - */ - BeginStreamExecuteRequest.prototype.target = null; - - /** - * BeginStreamExecuteRequest query. - * @member {query.IBoundQuery|null|undefined} query - * @memberof query.BeginStreamExecuteRequest - * @instance - */ - BeginStreamExecuteRequest.prototype.query = null; - - /** - * BeginStreamExecuteRequest options. - * @member {query.IExecuteOptions|null|undefined} options - * @memberof query.BeginStreamExecuteRequest - * @instance - */ - BeginStreamExecuteRequest.prototype.options = null; - - /** - * BeginStreamExecuteRequest pre_queries. - * @member {Array.} pre_queries - * @memberof query.BeginStreamExecuteRequest + * @memberof query.CommitPreparedRequest * @instance */ - BeginStreamExecuteRequest.prototype.pre_queries = $util.emptyArray; + CommitPreparedRequest.prototype.target = null; /** - * BeginStreamExecuteRequest reserved_id. - * @member {number|Long} reserved_id - * @memberof query.BeginStreamExecuteRequest + * CommitPreparedRequest dtid. + * @member {string} dtid + * @memberof query.CommitPreparedRequest * @instance */ - BeginStreamExecuteRequest.prototype.reserved_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + CommitPreparedRequest.prototype.dtid = ""; /** - * Creates a new BeginStreamExecuteRequest instance using the specified properties. + * Creates a new CommitPreparedRequest instance using the specified properties. * @function create - * @memberof query.BeginStreamExecuteRequest + * @memberof query.CommitPreparedRequest * @static - * @param {query.IBeginStreamExecuteRequest=} [properties] Properties to set - * @returns {query.BeginStreamExecuteRequest} BeginStreamExecuteRequest instance + * @param {query.ICommitPreparedRequest=} [properties] Properties to set + * @returns {query.CommitPreparedRequest} CommitPreparedRequest instance */ - BeginStreamExecuteRequest.create = function create(properties) { - return new BeginStreamExecuteRequest(properties); + CommitPreparedRequest.create = function create(properties) { + return new CommitPreparedRequest(properties); }; /** - * Encodes the specified BeginStreamExecuteRequest message. Does not implicitly {@link query.BeginStreamExecuteRequest.verify|verify} messages. + * Encodes the specified CommitPreparedRequest message. Does not implicitly {@link query.CommitPreparedRequest.verify|verify} messages. * @function encode - * @memberof query.BeginStreamExecuteRequest + * @memberof query.CommitPreparedRequest * @static - * @param {query.IBeginStreamExecuteRequest} message BeginStreamExecuteRequest message or plain object to encode + * @param {query.ICommitPreparedRequest} message CommitPreparedRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - BeginStreamExecuteRequest.encode = function encode(message, writer) { + CommitPreparedRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) @@ -84248,46 +85964,39 @@ export const query = $root.query = (() => { $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); if (message.target != null && Object.hasOwnProperty.call(message, "target")) $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.query != null && Object.hasOwnProperty.call(message, "query")) - $root.query.BoundQuery.encode(message.query, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); - if (message.options != null && Object.hasOwnProperty.call(message, "options")) - $root.query.ExecuteOptions.encode(message.options, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); - if (message.pre_queries != null && message.pre_queries.length) - for (let i = 0; i < message.pre_queries.length; ++i) - writer.uint32(/* id 6, wireType 2 =*/50).string(message.pre_queries[i]); - if (message.reserved_id != null && Object.hasOwnProperty.call(message, "reserved_id")) - writer.uint32(/* id 7, wireType 0 =*/56).int64(message.reserved_id); + if (message.dtid != null && Object.hasOwnProperty.call(message, "dtid")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.dtid); return writer; }; /** - * Encodes the specified BeginStreamExecuteRequest message, length delimited. Does not implicitly {@link query.BeginStreamExecuteRequest.verify|verify} messages. + * Encodes the specified CommitPreparedRequest message, length delimited. Does not implicitly {@link query.CommitPreparedRequest.verify|verify} messages. * @function encodeDelimited - * @memberof query.BeginStreamExecuteRequest + * @memberof query.CommitPreparedRequest * @static - * @param {query.IBeginStreamExecuteRequest} message BeginStreamExecuteRequest message or plain object to encode + * @param {query.ICommitPreparedRequest} message CommitPreparedRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - BeginStreamExecuteRequest.encodeDelimited = function encodeDelimited(message, writer) { + CommitPreparedRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a BeginStreamExecuteRequest message from the specified reader or buffer. + * Decodes a CommitPreparedRequest message from the specified reader or buffer. * @function decode - * @memberof query.BeginStreamExecuteRequest + * @memberof query.CommitPreparedRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.BeginStreamExecuteRequest} BeginStreamExecuteRequest + * @returns {query.CommitPreparedRequest} CommitPreparedRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - BeginStreamExecuteRequest.decode = function decode(reader, length) { + CommitPreparedRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.BeginStreamExecuteRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.CommitPreparedRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -84304,21 +86013,7 @@ export const query = $root.query = (() => { break; } case 4: { - message.query = $root.query.BoundQuery.decode(reader, reader.uint32()); - break; - } - case 5: { - message.options = $root.query.ExecuteOptions.decode(reader, reader.uint32()); - break; - } - case 6: { - if (!(message.pre_queries && message.pre_queries.length)) - message.pre_queries = []; - message.pre_queries.push(reader.string()); - break; - } - case 7: { - message.reserved_id = reader.int64(); + message.dtid = reader.string(); break; } default: @@ -84330,30 +86025,30 @@ export const query = $root.query = (() => { }; /** - * Decodes a BeginStreamExecuteRequest message from the specified reader or buffer, length delimited. + * Decodes a CommitPreparedRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.BeginStreamExecuteRequest + * @memberof query.CommitPreparedRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.BeginStreamExecuteRequest} BeginStreamExecuteRequest + * @returns {query.CommitPreparedRequest} CommitPreparedRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - BeginStreamExecuteRequest.decodeDelimited = function decodeDelimited(reader) { + CommitPreparedRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a BeginStreamExecuteRequest message. + * Verifies a CommitPreparedRequest message. * @function verify - * @memberof query.BeginStreamExecuteRequest + * @memberof query.CommitPreparedRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - BeginStreamExecuteRequest.verify = function verify(message) { + CommitPreparedRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { @@ -84371,111 +86066,62 @@ export const query = $root.query = (() => { if (error) return "target." + error; } - if (message.query != null && message.hasOwnProperty("query")) { - let error = $root.query.BoundQuery.verify(message.query); - if (error) - return "query." + error; - } - if (message.options != null && message.hasOwnProperty("options")) { - let error = $root.query.ExecuteOptions.verify(message.options); - if (error) - return "options." + error; - } - if (message.pre_queries != null && message.hasOwnProperty("pre_queries")) { - if (!Array.isArray(message.pre_queries)) - return "pre_queries: array expected"; - for (let i = 0; i < message.pre_queries.length; ++i) - if (!$util.isString(message.pre_queries[i])) - return "pre_queries: string[] expected"; - } - if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) - if (!$util.isInteger(message.reserved_id) && !(message.reserved_id && $util.isInteger(message.reserved_id.low) && $util.isInteger(message.reserved_id.high))) - return "reserved_id: integer|Long expected"; + if (message.dtid != null && message.hasOwnProperty("dtid")) + if (!$util.isString(message.dtid)) + return "dtid: string expected"; return null; }; /** - * Creates a BeginStreamExecuteRequest message from a plain object. Also converts values to their respective internal types. + * Creates a CommitPreparedRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.BeginStreamExecuteRequest + * @memberof query.CommitPreparedRequest * @static * @param {Object.} object Plain object - * @returns {query.BeginStreamExecuteRequest} BeginStreamExecuteRequest + * @returns {query.CommitPreparedRequest} CommitPreparedRequest */ - BeginStreamExecuteRequest.fromObject = function fromObject(object) { - if (object instanceof $root.query.BeginStreamExecuteRequest) + CommitPreparedRequest.fromObject = function fromObject(object) { + if (object instanceof $root.query.CommitPreparedRequest) return object; - let message = new $root.query.BeginStreamExecuteRequest(); + let message = new $root.query.CommitPreparedRequest(); if (object.effective_caller_id != null) { if (typeof object.effective_caller_id !== "object") - throw TypeError(".query.BeginStreamExecuteRequest.effective_caller_id: object expected"); + throw TypeError(".query.CommitPreparedRequest.effective_caller_id: object expected"); message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); } if (object.immediate_caller_id != null) { if (typeof object.immediate_caller_id !== "object") - throw TypeError(".query.BeginStreamExecuteRequest.immediate_caller_id: object expected"); + throw TypeError(".query.CommitPreparedRequest.immediate_caller_id: object expected"); message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); } if (object.target != null) { if (typeof object.target !== "object") - throw TypeError(".query.BeginStreamExecuteRequest.target: object expected"); + throw TypeError(".query.CommitPreparedRequest.target: object expected"); message.target = $root.query.Target.fromObject(object.target); } - if (object.query != null) { - if (typeof object.query !== "object") - throw TypeError(".query.BeginStreamExecuteRequest.query: object expected"); - message.query = $root.query.BoundQuery.fromObject(object.query); - } - if (object.options != null) { - if (typeof object.options !== "object") - throw TypeError(".query.BeginStreamExecuteRequest.options: object expected"); - message.options = $root.query.ExecuteOptions.fromObject(object.options); - } - if (object.pre_queries) { - if (!Array.isArray(object.pre_queries)) - throw TypeError(".query.BeginStreamExecuteRequest.pre_queries: array expected"); - message.pre_queries = []; - for (let i = 0; i < object.pre_queries.length; ++i) - message.pre_queries[i] = String(object.pre_queries[i]); - } - if (object.reserved_id != null) - if ($util.Long) - (message.reserved_id = $util.Long.fromValue(object.reserved_id)).unsigned = false; - else if (typeof object.reserved_id === "string") - message.reserved_id = parseInt(object.reserved_id, 10); - else if (typeof object.reserved_id === "number") - message.reserved_id = object.reserved_id; - else if (typeof object.reserved_id === "object") - message.reserved_id = new $util.LongBits(object.reserved_id.low >>> 0, object.reserved_id.high >>> 0).toNumber(); + if (object.dtid != null) + message.dtid = String(object.dtid); return message; }; /** - * Creates a plain object from a BeginStreamExecuteRequest message. Also converts values to other types if specified. + * Creates a plain object from a CommitPreparedRequest message. Also converts values to other types if specified. * @function toObject - * @memberof query.BeginStreamExecuteRequest + * @memberof query.CommitPreparedRequest * @static - * @param {query.BeginStreamExecuteRequest} message BeginStreamExecuteRequest + * @param {query.CommitPreparedRequest} message CommitPreparedRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - BeginStreamExecuteRequest.toObject = function toObject(message, options) { + CommitPreparedRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.pre_queries = []; if (options.defaults) { object.effective_caller_id = null; object.immediate_caller_id = null; object.target = null; - object.query = null; - object.options = null; - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.reserved_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.reserved_id = options.longs === String ? "0" : 0; + object.dtid = ""; } if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); @@ -84483,74 +86129,57 @@ export const query = $root.query = (() => { object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); if (message.target != null && message.hasOwnProperty("target")) object.target = $root.query.Target.toObject(message.target, options); - if (message.query != null && message.hasOwnProperty("query")) - object.query = $root.query.BoundQuery.toObject(message.query, options); - if (message.options != null && message.hasOwnProperty("options")) - object.options = $root.query.ExecuteOptions.toObject(message.options, options); - if (message.pre_queries && message.pre_queries.length) { - object.pre_queries = []; - for (let j = 0; j < message.pre_queries.length; ++j) - object.pre_queries[j] = message.pre_queries[j]; - } - if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) - if (typeof message.reserved_id === "number") - object.reserved_id = options.longs === String ? String(message.reserved_id) : message.reserved_id; - else - object.reserved_id = options.longs === String ? $util.Long.prototype.toString.call(message.reserved_id) : options.longs === Number ? new $util.LongBits(message.reserved_id.low >>> 0, message.reserved_id.high >>> 0).toNumber() : message.reserved_id; + if (message.dtid != null && message.hasOwnProperty("dtid")) + object.dtid = message.dtid; return object; }; /** - * Converts this BeginStreamExecuteRequest to JSON. + * Converts this CommitPreparedRequest to JSON. * @function toJSON - * @memberof query.BeginStreamExecuteRequest + * @memberof query.CommitPreparedRequest * @instance * @returns {Object.} JSON object */ - BeginStreamExecuteRequest.prototype.toJSON = function toJSON() { + CommitPreparedRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for BeginStreamExecuteRequest + * Gets the default type url for CommitPreparedRequest * @function getTypeUrl - * @memberof query.BeginStreamExecuteRequest + * @memberof query.CommitPreparedRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - BeginStreamExecuteRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + CommitPreparedRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.BeginStreamExecuteRequest"; + return typeUrlPrefix + "/query.CommitPreparedRequest"; }; - return BeginStreamExecuteRequest; + return CommitPreparedRequest; })(); - query.BeginStreamExecuteResponse = (function() { + query.CommitPreparedResponse = (function() { /** - * Properties of a BeginStreamExecuteResponse. + * Properties of a CommitPreparedResponse. * @memberof query - * @interface IBeginStreamExecuteResponse - * @property {vtrpc.IRPCError|null} [error] BeginStreamExecuteResponse error - * @property {query.IQueryResult|null} [result] BeginStreamExecuteResponse result - * @property {number|Long|null} [transaction_id] BeginStreamExecuteResponse transaction_id - * @property {topodata.ITabletAlias|null} [tablet_alias] BeginStreamExecuteResponse tablet_alias - * @property {string|null} [session_state_changes] BeginStreamExecuteResponse session_state_changes + * @interface ICommitPreparedResponse */ /** - * Constructs a new BeginStreamExecuteResponse. + * Constructs a new CommitPreparedResponse. * @memberof query - * @classdesc Represents a BeginStreamExecuteResponse. - * @implements IBeginStreamExecuteResponse + * @classdesc Represents a CommitPreparedResponse. + * @implements ICommitPreparedResponse * @constructor - * @param {query.IBeginStreamExecuteResponse=} [properties] Properties to set + * @param {query.ICommitPreparedResponse=} [properties] Properties to set */ - function BeginStreamExecuteResponse(properties) { + function CommitPreparedResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -84558,133 +86187,63 @@ export const query = $root.query = (() => { } /** - * BeginStreamExecuteResponse error. - * @member {vtrpc.IRPCError|null|undefined} error - * @memberof query.BeginStreamExecuteResponse - * @instance - */ - BeginStreamExecuteResponse.prototype.error = null; - - /** - * BeginStreamExecuteResponse result. - * @member {query.IQueryResult|null|undefined} result - * @memberof query.BeginStreamExecuteResponse - * @instance - */ - BeginStreamExecuteResponse.prototype.result = null; - - /** - * BeginStreamExecuteResponse transaction_id. - * @member {number|Long} transaction_id - * @memberof query.BeginStreamExecuteResponse - * @instance - */ - BeginStreamExecuteResponse.prototype.transaction_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; - - /** - * BeginStreamExecuteResponse tablet_alias. - * @member {topodata.ITabletAlias|null|undefined} tablet_alias - * @memberof query.BeginStreamExecuteResponse - * @instance - */ - BeginStreamExecuteResponse.prototype.tablet_alias = null; - - /** - * BeginStreamExecuteResponse session_state_changes. - * @member {string} session_state_changes - * @memberof query.BeginStreamExecuteResponse - * @instance - */ - BeginStreamExecuteResponse.prototype.session_state_changes = ""; - - /** - * Creates a new BeginStreamExecuteResponse instance using the specified properties. + * Creates a new CommitPreparedResponse instance using the specified properties. * @function create - * @memberof query.BeginStreamExecuteResponse + * @memberof query.CommitPreparedResponse * @static - * @param {query.IBeginStreamExecuteResponse=} [properties] Properties to set - * @returns {query.BeginStreamExecuteResponse} BeginStreamExecuteResponse instance + * @param {query.ICommitPreparedResponse=} [properties] Properties to set + * @returns {query.CommitPreparedResponse} CommitPreparedResponse instance */ - BeginStreamExecuteResponse.create = function create(properties) { - return new BeginStreamExecuteResponse(properties); + CommitPreparedResponse.create = function create(properties) { + return new CommitPreparedResponse(properties); }; /** - * Encodes the specified BeginStreamExecuteResponse message. Does not implicitly {@link query.BeginStreamExecuteResponse.verify|verify} messages. + * Encodes the specified CommitPreparedResponse message. Does not implicitly {@link query.CommitPreparedResponse.verify|verify} messages. * @function encode - * @memberof query.BeginStreamExecuteResponse + * @memberof query.CommitPreparedResponse * @static - * @param {query.IBeginStreamExecuteResponse} message BeginStreamExecuteResponse message or plain object to encode + * @param {query.ICommitPreparedResponse} message CommitPreparedResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - BeginStreamExecuteResponse.encode = function encode(message, writer) { + CommitPreparedResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.error != null && Object.hasOwnProperty.call(message, "error")) - $root.vtrpc.RPCError.encode(message.error, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.result != null && Object.hasOwnProperty.call(message, "result")) - $root.query.QueryResult.encode(message.result, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.transaction_id != null && Object.hasOwnProperty.call(message, "transaction_id")) - writer.uint32(/* id 3, wireType 0 =*/24).int64(message.transaction_id); - if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) - $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); - if (message.session_state_changes != null && Object.hasOwnProperty.call(message, "session_state_changes")) - writer.uint32(/* id 5, wireType 2 =*/42).string(message.session_state_changes); return writer; }; /** - * Encodes the specified BeginStreamExecuteResponse message, length delimited. Does not implicitly {@link query.BeginStreamExecuteResponse.verify|verify} messages. + * Encodes the specified CommitPreparedResponse message, length delimited. Does not implicitly {@link query.CommitPreparedResponse.verify|verify} messages. * @function encodeDelimited - * @memberof query.BeginStreamExecuteResponse + * @memberof query.CommitPreparedResponse * @static - * @param {query.IBeginStreamExecuteResponse} message BeginStreamExecuteResponse message or plain object to encode + * @param {query.ICommitPreparedResponse} message CommitPreparedResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - BeginStreamExecuteResponse.encodeDelimited = function encodeDelimited(message, writer) { + CommitPreparedResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a BeginStreamExecuteResponse message from the specified reader or buffer. + * Decodes a CommitPreparedResponse message from the specified reader or buffer. * @function decode - * @memberof query.BeginStreamExecuteResponse + * @memberof query.CommitPreparedResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.BeginStreamExecuteResponse} BeginStreamExecuteResponse + * @returns {query.CommitPreparedResponse} CommitPreparedResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - BeginStreamExecuteResponse.decode = function decode(reader, length) { + CommitPreparedResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.BeginStreamExecuteResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.CommitPreparedResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - message.error = $root.vtrpc.RPCError.decode(reader, reader.uint32()); - break; - } - case 2: { - message.result = $root.query.QueryResult.decode(reader, reader.uint32()); - break; - } - case 3: { - message.transaction_id = reader.int64(); - break; - } - case 4: { - message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); - break; - } - case 5: { - message.session_state_changes = reader.string(); - break; - } default: reader.skipType(tag & 7); break; @@ -84694,187 +86253,113 @@ export const query = $root.query = (() => { }; /** - * Decodes a BeginStreamExecuteResponse message from the specified reader or buffer, length delimited. + * Decodes a CommitPreparedResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.BeginStreamExecuteResponse + * @memberof query.CommitPreparedResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.BeginStreamExecuteResponse} BeginStreamExecuteResponse + * @returns {query.CommitPreparedResponse} CommitPreparedResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - BeginStreamExecuteResponse.decodeDelimited = function decodeDelimited(reader) { + CommitPreparedResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a BeginStreamExecuteResponse message. + * Verifies a CommitPreparedResponse message. * @function verify - * @memberof query.BeginStreamExecuteResponse + * @memberof query.CommitPreparedResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - BeginStreamExecuteResponse.verify = function verify(message) { + CommitPreparedResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.error != null && message.hasOwnProperty("error")) { - let error = $root.vtrpc.RPCError.verify(message.error); - if (error) - return "error." + error; - } - if (message.result != null && message.hasOwnProperty("result")) { - let error = $root.query.QueryResult.verify(message.result); - if (error) - return "result." + error; - } - if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) - if (!$util.isInteger(message.transaction_id) && !(message.transaction_id && $util.isInteger(message.transaction_id.low) && $util.isInteger(message.transaction_id.high))) - return "transaction_id: integer|Long expected"; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { - let error = $root.topodata.TabletAlias.verify(message.tablet_alias); - if (error) - return "tablet_alias." + error; - } - if (message.session_state_changes != null && message.hasOwnProperty("session_state_changes")) - if (!$util.isString(message.session_state_changes)) - return "session_state_changes: string expected"; return null; }; /** - * Creates a BeginStreamExecuteResponse message from a plain object. Also converts values to their respective internal types. + * Creates a CommitPreparedResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.BeginStreamExecuteResponse + * @memberof query.CommitPreparedResponse * @static * @param {Object.} object Plain object - * @returns {query.BeginStreamExecuteResponse} BeginStreamExecuteResponse + * @returns {query.CommitPreparedResponse} CommitPreparedResponse */ - BeginStreamExecuteResponse.fromObject = function fromObject(object) { - if (object instanceof $root.query.BeginStreamExecuteResponse) + CommitPreparedResponse.fromObject = function fromObject(object) { + if (object instanceof $root.query.CommitPreparedResponse) return object; - let message = new $root.query.BeginStreamExecuteResponse(); - if (object.error != null) { - if (typeof object.error !== "object") - throw TypeError(".query.BeginStreamExecuteResponse.error: object expected"); - message.error = $root.vtrpc.RPCError.fromObject(object.error); - } - if (object.result != null) { - if (typeof object.result !== "object") - throw TypeError(".query.BeginStreamExecuteResponse.result: object expected"); - message.result = $root.query.QueryResult.fromObject(object.result); - } - if (object.transaction_id != null) - if ($util.Long) - (message.transaction_id = $util.Long.fromValue(object.transaction_id)).unsigned = false; - else if (typeof object.transaction_id === "string") - message.transaction_id = parseInt(object.transaction_id, 10); - else if (typeof object.transaction_id === "number") - message.transaction_id = object.transaction_id; - else if (typeof object.transaction_id === "object") - message.transaction_id = new $util.LongBits(object.transaction_id.low >>> 0, object.transaction_id.high >>> 0).toNumber(); - if (object.tablet_alias != null) { - if (typeof object.tablet_alias !== "object") - throw TypeError(".query.BeginStreamExecuteResponse.tablet_alias: object expected"); - message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); - } - if (object.session_state_changes != null) - message.session_state_changes = String(object.session_state_changes); - return message; + return new $root.query.CommitPreparedResponse(); }; /** - * Creates a plain object from a BeginStreamExecuteResponse message. Also converts values to other types if specified. + * Creates a plain object from a CommitPreparedResponse message. Also converts values to other types if specified. * @function toObject - * @memberof query.BeginStreamExecuteResponse + * @memberof query.CommitPreparedResponse * @static - * @param {query.BeginStreamExecuteResponse} message BeginStreamExecuteResponse + * @param {query.CommitPreparedResponse} message CommitPreparedResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - BeginStreamExecuteResponse.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) { - object.error = null; - object.result = null; - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.transaction_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.transaction_id = options.longs === String ? "0" : 0; - object.tablet_alias = null; - object.session_state_changes = ""; - } - if (message.error != null && message.hasOwnProperty("error")) - object.error = $root.vtrpc.RPCError.toObject(message.error, options); - if (message.result != null && message.hasOwnProperty("result")) - object.result = $root.query.QueryResult.toObject(message.result, options); - if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) - if (typeof message.transaction_id === "number") - object.transaction_id = options.longs === String ? String(message.transaction_id) : message.transaction_id; - else - object.transaction_id = options.longs === String ? $util.Long.prototype.toString.call(message.transaction_id) : options.longs === Number ? new $util.LongBits(message.transaction_id.low >>> 0, message.transaction_id.high >>> 0).toNumber() : message.transaction_id; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) - object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); - if (message.session_state_changes != null && message.hasOwnProperty("session_state_changes")) - object.session_state_changes = message.session_state_changes; - return object; + CommitPreparedResponse.toObject = function toObject() { + return {}; }; /** - * Converts this BeginStreamExecuteResponse to JSON. + * Converts this CommitPreparedResponse to JSON. * @function toJSON - * @memberof query.BeginStreamExecuteResponse + * @memberof query.CommitPreparedResponse * @instance * @returns {Object.} JSON object */ - BeginStreamExecuteResponse.prototype.toJSON = function toJSON() { + CommitPreparedResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for BeginStreamExecuteResponse + * Gets the default type url for CommitPreparedResponse * @function getTypeUrl - * @memberof query.BeginStreamExecuteResponse + * @memberof query.CommitPreparedResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - BeginStreamExecuteResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + CommitPreparedResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.BeginStreamExecuteResponse"; + return typeUrlPrefix + "/query.CommitPreparedResponse"; }; - return BeginStreamExecuteResponse; + return CommitPreparedResponse; })(); - query.MessageStreamRequest = (function() { + query.RollbackPreparedRequest = (function() { /** - * Properties of a MessageStreamRequest. + * Properties of a RollbackPreparedRequest. * @memberof query - * @interface IMessageStreamRequest - * @property {vtrpc.ICallerID|null} [effective_caller_id] MessageStreamRequest effective_caller_id - * @property {query.IVTGateCallerID|null} [immediate_caller_id] MessageStreamRequest immediate_caller_id - * @property {query.ITarget|null} [target] MessageStreamRequest target - * @property {string|null} [name] MessageStreamRequest name + * @interface IRollbackPreparedRequest + * @property {vtrpc.ICallerID|null} [effective_caller_id] RollbackPreparedRequest effective_caller_id + * @property {query.IVTGateCallerID|null} [immediate_caller_id] RollbackPreparedRequest immediate_caller_id + * @property {query.ITarget|null} [target] RollbackPreparedRequest target + * @property {number|Long|null} [transaction_id] RollbackPreparedRequest transaction_id + * @property {string|null} [dtid] RollbackPreparedRequest dtid */ /** - * Constructs a new MessageStreamRequest. + * Constructs a new RollbackPreparedRequest. * @memberof query - * @classdesc Represents a MessageStreamRequest. - * @implements IMessageStreamRequest + * @classdesc Represents a RollbackPreparedRequest. + * @implements IRollbackPreparedRequest * @constructor - * @param {query.IMessageStreamRequest=} [properties] Properties to set + * @param {query.IRollbackPreparedRequest=} [properties] Properties to set */ - function MessageStreamRequest(properties) { + function RollbackPreparedRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -84882,59 +86367,67 @@ export const query = $root.query = (() => { } /** - * MessageStreamRequest effective_caller_id. + * RollbackPreparedRequest effective_caller_id. * @member {vtrpc.ICallerID|null|undefined} effective_caller_id - * @memberof query.MessageStreamRequest + * @memberof query.RollbackPreparedRequest * @instance */ - MessageStreamRequest.prototype.effective_caller_id = null; + RollbackPreparedRequest.prototype.effective_caller_id = null; /** - * MessageStreamRequest immediate_caller_id. + * RollbackPreparedRequest immediate_caller_id. * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id - * @memberof query.MessageStreamRequest + * @memberof query.RollbackPreparedRequest * @instance */ - MessageStreamRequest.prototype.immediate_caller_id = null; + RollbackPreparedRequest.prototype.immediate_caller_id = null; /** - * MessageStreamRequest target. + * RollbackPreparedRequest target. * @member {query.ITarget|null|undefined} target - * @memberof query.MessageStreamRequest + * @memberof query.RollbackPreparedRequest * @instance */ - MessageStreamRequest.prototype.target = null; + RollbackPreparedRequest.prototype.target = null; /** - * MessageStreamRequest name. - * @member {string} name - * @memberof query.MessageStreamRequest + * RollbackPreparedRequest transaction_id. + * @member {number|Long} transaction_id + * @memberof query.RollbackPreparedRequest * @instance */ - MessageStreamRequest.prototype.name = ""; + RollbackPreparedRequest.prototype.transaction_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; /** - * Creates a new MessageStreamRequest instance using the specified properties. + * RollbackPreparedRequest dtid. + * @member {string} dtid + * @memberof query.RollbackPreparedRequest + * @instance + */ + RollbackPreparedRequest.prototype.dtid = ""; + + /** + * Creates a new RollbackPreparedRequest instance using the specified properties. * @function create - * @memberof query.MessageStreamRequest + * @memberof query.RollbackPreparedRequest * @static - * @param {query.IMessageStreamRequest=} [properties] Properties to set - * @returns {query.MessageStreamRequest} MessageStreamRequest instance + * @param {query.IRollbackPreparedRequest=} [properties] Properties to set + * @returns {query.RollbackPreparedRequest} RollbackPreparedRequest instance */ - MessageStreamRequest.create = function create(properties) { - return new MessageStreamRequest(properties); + RollbackPreparedRequest.create = function create(properties) { + return new RollbackPreparedRequest(properties); }; /** - * Encodes the specified MessageStreamRequest message. Does not implicitly {@link query.MessageStreamRequest.verify|verify} messages. + * Encodes the specified RollbackPreparedRequest message. Does not implicitly {@link query.RollbackPreparedRequest.verify|verify} messages. * @function encode - * @memberof query.MessageStreamRequest + * @memberof query.RollbackPreparedRequest * @static - * @param {query.IMessageStreamRequest} message MessageStreamRequest message or plain object to encode + * @param {query.IRollbackPreparedRequest} message RollbackPreparedRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - MessageStreamRequest.encode = function encode(message, writer) { + RollbackPreparedRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) @@ -84943,39 +86436,41 @@ export const query = $root.query = (() => { $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); if (message.target != null && Object.hasOwnProperty.call(message, "target")) $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.name != null && Object.hasOwnProperty.call(message, "name")) - writer.uint32(/* id 4, wireType 2 =*/34).string(message.name); + if (message.transaction_id != null && Object.hasOwnProperty.call(message, "transaction_id")) + writer.uint32(/* id 4, wireType 0 =*/32).int64(message.transaction_id); + if (message.dtid != null && Object.hasOwnProperty.call(message, "dtid")) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.dtid); return writer; }; /** - * Encodes the specified MessageStreamRequest message, length delimited. Does not implicitly {@link query.MessageStreamRequest.verify|verify} messages. + * Encodes the specified RollbackPreparedRequest message, length delimited. Does not implicitly {@link query.RollbackPreparedRequest.verify|verify} messages. * @function encodeDelimited - * @memberof query.MessageStreamRequest + * @memberof query.RollbackPreparedRequest * @static - * @param {query.IMessageStreamRequest} message MessageStreamRequest message or plain object to encode + * @param {query.IRollbackPreparedRequest} message RollbackPreparedRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - MessageStreamRequest.encodeDelimited = function encodeDelimited(message, writer) { + RollbackPreparedRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a MessageStreamRequest message from the specified reader or buffer. + * Decodes a RollbackPreparedRequest message from the specified reader or buffer. * @function decode - * @memberof query.MessageStreamRequest + * @memberof query.RollbackPreparedRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.MessageStreamRequest} MessageStreamRequest + * @returns {query.RollbackPreparedRequest} RollbackPreparedRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - MessageStreamRequest.decode = function decode(reader, length) { + RollbackPreparedRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.MessageStreamRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.RollbackPreparedRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -84992,7 +86487,11 @@ export const query = $root.query = (() => { break; } case 4: { - message.name = reader.string(); + message.transaction_id = reader.int64(); + break; + } + case 5: { + message.dtid = reader.string(); break; } default: @@ -85004,30 +86503,30 @@ export const query = $root.query = (() => { }; /** - * Decodes a MessageStreamRequest message from the specified reader or buffer, length delimited. + * Decodes a RollbackPreparedRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.MessageStreamRequest + * @memberof query.RollbackPreparedRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.MessageStreamRequest} MessageStreamRequest + * @returns {query.RollbackPreparedRequest} RollbackPreparedRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - MessageStreamRequest.decodeDelimited = function decodeDelimited(reader) { + RollbackPreparedRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a MessageStreamRequest message. + * Verifies a RollbackPreparedRequest message. * @function verify - * @memberof query.MessageStreamRequest + * @memberof query.RollbackPreparedRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - MessageStreamRequest.verify = function verify(message) { + RollbackPreparedRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { @@ -85045,54 +86544,66 @@ export const query = $root.query = (() => { if (error) return "target." + error; } - if (message.name != null && message.hasOwnProperty("name")) - if (!$util.isString(message.name)) - return "name: string expected"; + if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) + if (!$util.isInteger(message.transaction_id) && !(message.transaction_id && $util.isInteger(message.transaction_id.low) && $util.isInteger(message.transaction_id.high))) + return "transaction_id: integer|Long expected"; + if (message.dtid != null && message.hasOwnProperty("dtid")) + if (!$util.isString(message.dtid)) + return "dtid: string expected"; return null; }; /** - * Creates a MessageStreamRequest message from a plain object. Also converts values to their respective internal types. + * Creates a RollbackPreparedRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.MessageStreamRequest + * @memberof query.RollbackPreparedRequest * @static * @param {Object.} object Plain object - * @returns {query.MessageStreamRequest} MessageStreamRequest + * @returns {query.RollbackPreparedRequest} RollbackPreparedRequest */ - MessageStreamRequest.fromObject = function fromObject(object) { - if (object instanceof $root.query.MessageStreamRequest) + RollbackPreparedRequest.fromObject = function fromObject(object) { + if (object instanceof $root.query.RollbackPreparedRequest) return object; - let message = new $root.query.MessageStreamRequest(); + let message = new $root.query.RollbackPreparedRequest(); if (object.effective_caller_id != null) { if (typeof object.effective_caller_id !== "object") - throw TypeError(".query.MessageStreamRequest.effective_caller_id: object expected"); + throw TypeError(".query.RollbackPreparedRequest.effective_caller_id: object expected"); message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); } if (object.immediate_caller_id != null) { if (typeof object.immediate_caller_id !== "object") - throw TypeError(".query.MessageStreamRequest.immediate_caller_id: object expected"); + throw TypeError(".query.RollbackPreparedRequest.immediate_caller_id: object expected"); message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); } if (object.target != null) { if (typeof object.target !== "object") - throw TypeError(".query.MessageStreamRequest.target: object expected"); + throw TypeError(".query.RollbackPreparedRequest.target: object expected"); message.target = $root.query.Target.fromObject(object.target); } - if (object.name != null) - message.name = String(object.name); + if (object.transaction_id != null) + if ($util.Long) + (message.transaction_id = $util.Long.fromValue(object.transaction_id)).unsigned = false; + else if (typeof object.transaction_id === "string") + message.transaction_id = parseInt(object.transaction_id, 10); + else if (typeof object.transaction_id === "number") + message.transaction_id = object.transaction_id; + else if (typeof object.transaction_id === "object") + message.transaction_id = new $util.LongBits(object.transaction_id.low >>> 0, object.transaction_id.high >>> 0).toNumber(); + if (object.dtid != null) + message.dtid = String(object.dtid); return message; }; /** - * Creates a plain object from a MessageStreamRequest message. Also converts values to other types if specified. + * Creates a plain object from a RollbackPreparedRequest message. Also converts values to other types if specified. * @function toObject - * @memberof query.MessageStreamRequest + * @memberof query.RollbackPreparedRequest * @static - * @param {query.MessageStreamRequest} message MessageStreamRequest + * @param {query.RollbackPreparedRequest} message RollbackPreparedRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - MessageStreamRequest.toObject = function toObject(message, options) { + RollbackPreparedRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; @@ -85100,7 +86611,12 @@ export const query = $root.query = (() => { object.effective_caller_id = null; object.immediate_caller_id = null; object.target = null; - object.name = ""; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.transaction_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.transaction_id = options.longs === String ? "0" : 0; + object.dtid = ""; } if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); @@ -85108,58 +86624,62 @@ export const query = $root.query = (() => { object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); if (message.target != null && message.hasOwnProperty("target")) object.target = $root.query.Target.toObject(message.target, options); - if (message.name != null && message.hasOwnProperty("name")) - object.name = message.name; + if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) + if (typeof message.transaction_id === "number") + object.transaction_id = options.longs === String ? String(message.transaction_id) : message.transaction_id; + else + object.transaction_id = options.longs === String ? $util.Long.prototype.toString.call(message.transaction_id) : options.longs === Number ? new $util.LongBits(message.transaction_id.low >>> 0, message.transaction_id.high >>> 0).toNumber() : message.transaction_id; + if (message.dtid != null && message.hasOwnProperty("dtid")) + object.dtid = message.dtid; return object; }; /** - * Converts this MessageStreamRequest to JSON. + * Converts this RollbackPreparedRequest to JSON. * @function toJSON - * @memberof query.MessageStreamRequest + * @memberof query.RollbackPreparedRequest * @instance * @returns {Object.} JSON object */ - MessageStreamRequest.prototype.toJSON = function toJSON() { + RollbackPreparedRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for MessageStreamRequest + * Gets the default type url for RollbackPreparedRequest * @function getTypeUrl - * @memberof query.MessageStreamRequest + * @memberof query.RollbackPreparedRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - MessageStreamRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + RollbackPreparedRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.MessageStreamRequest"; + return typeUrlPrefix + "/query.RollbackPreparedRequest"; }; - return MessageStreamRequest; + return RollbackPreparedRequest; })(); - query.MessageStreamResponse = (function() { + query.RollbackPreparedResponse = (function() { /** - * Properties of a MessageStreamResponse. + * Properties of a RollbackPreparedResponse. * @memberof query - * @interface IMessageStreamResponse - * @property {query.IQueryResult|null} [result] MessageStreamResponse result + * @interface IRollbackPreparedResponse */ /** - * Constructs a new MessageStreamResponse. + * Constructs a new RollbackPreparedResponse. * @memberof query - * @classdesc Represents a MessageStreamResponse. - * @implements IMessageStreamResponse + * @classdesc Represents a RollbackPreparedResponse. + * @implements IRollbackPreparedResponse * @constructor - * @param {query.IMessageStreamResponse=} [properties] Properties to set + * @param {query.IRollbackPreparedResponse=} [properties] Properties to set */ - function MessageStreamResponse(properties) { + function RollbackPreparedResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -85167,77 +86687,63 @@ export const query = $root.query = (() => { } /** - * MessageStreamResponse result. - * @member {query.IQueryResult|null|undefined} result - * @memberof query.MessageStreamResponse - * @instance - */ - MessageStreamResponse.prototype.result = null; - - /** - * Creates a new MessageStreamResponse instance using the specified properties. + * Creates a new RollbackPreparedResponse instance using the specified properties. * @function create - * @memberof query.MessageStreamResponse + * @memberof query.RollbackPreparedResponse * @static - * @param {query.IMessageStreamResponse=} [properties] Properties to set - * @returns {query.MessageStreamResponse} MessageStreamResponse instance + * @param {query.IRollbackPreparedResponse=} [properties] Properties to set + * @returns {query.RollbackPreparedResponse} RollbackPreparedResponse instance */ - MessageStreamResponse.create = function create(properties) { - return new MessageStreamResponse(properties); + RollbackPreparedResponse.create = function create(properties) { + return new RollbackPreparedResponse(properties); }; /** - * Encodes the specified MessageStreamResponse message. Does not implicitly {@link query.MessageStreamResponse.verify|verify} messages. + * Encodes the specified RollbackPreparedResponse message. Does not implicitly {@link query.RollbackPreparedResponse.verify|verify} messages. * @function encode - * @memberof query.MessageStreamResponse + * @memberof query.RollbackPreparedResponse * @static - * @param {query.IMessageStreamResponse} message MessageStreamResponse message or plain object to encode + * @param {query.IRollbackPreparedResponse} message RollbackPreparedResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - MessageStreamResponse.encode = function encode(message, writer) { + RollbackPreparedResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.result != null && Object.hasOwnProperty.call(message, "result")) - $root.query.QueryResult.encode(message.result, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified MessageStreamResponse message, length delimited. Does not implicitly {@link query.MessageStreamResponse.verify|verify} messages. + * Encodes the specified RollbackPreparedResponse message, length delimited. Does not implicitly {@link query.RollbackPreparedResponse.verify|verify} messages. * @function encodeDelimited - * @memberof query.MessageStreamResponse + * @memberof query.RollbackPreparedResponse * @static - * @param {query.IMessageStreamResponse} message MessageStreamResponse message or plain object to encode + * @param {query.IRollbackPreparedResponse} message RollbackPreparedResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - MessageStreamResponse.encodeDelimited = function encodeDelimited(message, writer) { + RollbackPreparedResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a MessageStreamResponse message from the specified reader or buffer. + * Decodes a RollbackPreparedResponse message from the specified reader or buffer. * @function decode - * @memberof query.MessageStreamResponse + * @memberof query.RollbackPreparedResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.MessageStreamResponse} MessageStreamResponse + * @returns {query.RollbackPreparedResponse} RollbackPreparedResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - MessageStreamResponse.decode = function decode(reader, length) { + RollbackPreparedResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.MessageStreamResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.RollbackPreparedResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - message.result = $root.query.QueryResult.decode(reader, reader.uint32()); - break; - } default: reader.skipType(tag & 7); break; @@ -85247,132 +86753,114 @@ export const query = $root.query = (() => { }; /** - * Decodes a MessageStreamResponse message from the specified reader or buffer, length delimited. + * Decodes a RollbackPreparedResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.MessageStreamResponse + * @memberof query.RollbackPreparedResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.MessageStreamResponse} MessageStreamResponse + * @returns {query.RollbackPreparedResponse} RollbackPreparedResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - MessageStreamResponse.decodeDelimited = function decodeDelimited(reader) { + RollbackPreparedResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a MessageStreamResponse message. + * Verifies a RollbackPreparedResponse message. * @function verify - * @memberof query.MessageStreamResponse + * @memberof query.RollbackPreparedResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - MessageStreamResponse.verify = function verify(message) { + RollbackPreparedResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.result != null && message.hasOwnProperty("result")) { - let error = $root.query.QueryResult.verify(message.result); - if (error) - return "result." + error; - } return null; }; /** - * Creates a MessageStreamResponse message from a plain object. Also converts values to their respective internal types. + * Creates a RollbackPreparedResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.MessageStreamResponse + * @memberof query.RollbackPreparedResponse * @static * @param {Object.} object Plain object - * @returns {query.MessageStreamResponse} MessageStreamResponse + * @returns {query.RollbackPreparedResponse} RollbackPreparedResponse */ - MessageStreamResponse.fromObject = function fromObject(object) { - if (object instanceof $root.query.MessageStreamResponse) + RollbackPreparedResponse.fromObject = function fromObject(object) { + if (object instanceof $root.query.RollbackPreparedResponse) return object; - let message = new $root.query.MessageStreamResponse(); - if (object.result != null) { - if (typeof object.result !== "object") - throw TypeError(".query.MessageStreamResponse.result: object expected"); - message.result = $root.query.QueryResult.fromObject(object.result); - } - return message; + return new $root.query.RollbackPreparedResponse(); }; /** - * Creates a plain object from a MessageStreamResponse message. Also converts values to other types if specified. + * Creates a plain object from a RollbackPreparedResponse message. Also converts values to other types if specified. * @function toObject - * @memberof query.MessageStreamResponse + * @memberof query.RollbackPreparedResponse * @static - * @param {query.MessageStreamResponse} message MessageStreamResponse + * @param {query.RollbackPreparedResponse} message RollbackPreparedResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - MessageStreamResponse.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) - object.result = null; - if (message.result != null && message.hasOwnProperty("result")) - object.result = $root.query.QueryResult.toObject(message.result, options); - return object; + RollbackPreparedResponse.toObject = function toObject() { + return {}; }; /** - * Converts this MessageStreamResponse to JSON. + * Converts this RollbackPreparedResponse to JSON. * @function toJSON - * @memberof query.MessageStreamResponse + * @memberof query.RollbackPreparedResponse * @instance * @returns {Object.} JSON object */ - MessageStreamResponse.prototype.toJSON = function toJSON() { + RollbackPreparedResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for MessageStreamResponse + * Gets the default type url for RollbackPreparedResponse * @function getTypeUrl - * @memberof query.MessageStreamResponse + * @memberof query.RollbackPreparedResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - MessageStreamResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + RollbackPreparedResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.MessageStreamResponse"; + return typeUrlPrefix + "/query.RollbackPreparedResponse"; }; - return MessageStreamResponse; + return RollbackPreparedResponse; })(); - query.MessageAckRequest = (function() { + query.CreateTransactionRequest = (function() { /** - * Properties of a MessageAckRequest. + * Properties of a CreateTransactionRequest. * @memberof query - * @interface IMessageAckRequest - * @property {vtrpc.ICallerID|null} [effective_caller_id] MessageAckRequest effective_caller_id - * @property {query.IVTGateCallerID|null} [immediate_caller_id] MessageAckRequest immediate_caller_id - * @property {query.ITarget|null} [target] MessageAckRequest target - * @property {string|null} [name] MessageAckRequest name - * @property {Array.|null} [ids] MessageAckRequest ids + * @interface ICreateTransactionRequest + * @property {vtrpc.ICallerID|null} [effective_caller_id] CreateTransactionRequest effective_caller_id + * @property {query.IVTGateCallerID|null} [immediate_caller_id] CreateTransactionRequest immediate_caller_id + * @property {query.ITarget|null} [target] CreateTransactionRequest target + * @property {string|null} [dtid] CreateTransactionRequest dtid + * @property {Array.|null} [participants] CreateTransactionRequest participants */ /** - * Constructs a new MessageAckRequest. + * Constructs a new CreateTransactionRequest. * @memberof query - * @classdesc Represents a MessageAckRequest. - * @implements IMessageAckRequest + * @classdesc Represents a CreateTransactionRequest. + * @implements ICreateTransactionRequest * @constructor - * @param {query.IMessageAckRequest=} [properties] Properties to set + * @param {query.ICreateTransactionRequest=} [properties] Properties to set */ - function MessageAckRequest(properties) { - this.ids = []; + function CreateTransactionRequest(properties) { + this.participants = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -85380,67 +86868,67 @@ export const query = $root.query = (() => { } /** - * MessageAckRequest effective_caller_id. + * CreateTransactionRequest effective_caller_id. * @member {vtrpc.ICallerID|null|undefined} effective_caller_id - * @memberof query.MessageAckRequest + * @memberof query.CreateTransactionRequest * @instance */ - MessageAckRequest.prototype.effective_caller_id = null; + CreateTransactionRequest.prototype.effective_caller_id = null; /** - * MessageAckRequest immediate_caller_id. + * CreateTransactionRequest immediate_caller_id. * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id - * @memberof query.MessageAckRequest + * @memberof query.CreateTransactionRequest * @instance */ - MessageAckRequest.prototype.immediate_caller_id = null; + CreateTransactionRequest.prototype.immediate_caller_id = null; /** - * MessageAckRequest target. + * CreateTransactionRequest target. * @member {query.ITarget|null|undefined} target - * @memberof query.MessageAckRequest + * @memberof query.CreateTransactionRequest * @instance */ - MessageAckRequest.prototype.target = null; + CreateTransactionRequest.prototype.target = null; /** - * MessageAckRequest name. - * @member {string} name - * @memberof query.MessageAckRequest + * CreateTransactionRequest dtid. + * @member {string} dtid + * @memberof query.CreateTransactionRequest * @instance */ - MessageAckRequest.prototype.name = ""; + CreateTransactionRequest.prototype.dtid = ""; /** - * MessageAckRequest ids. - * @member {Array.} ids - * @memberof query.MessageAckRequest + * CreateTransactionRequest participants. + * @member {Array.} participants + * @memberof query.CreateTransactionRequest * @instance */ - MessageAckRequest.prototype.ids = $util.emptyArray; + CreateTransactionRequest.prototype.participants = $util.emptyArray; /** - * Creates a new MessageAckRequest instance using the specified properties. + * Creates a new CreateTransactionRequest instance using the specified properties. * @function create - * @memberof query.MessageAckRequest + * @memberof query.CreateTransactionRequest * @static - * @param {query.IMessageAckRequest=} [properties] Properties to set - * @returns {query.MessageAckRequest} MessageAckRequest instance + * @param {query.ICreateTransactionRequest=} [properties] Properties to set + * @returns {query.CreateTransactionRequest} CreateTransactionRequest instance */ - MessageAckRequest.create = function create(properties) { - return new MessageAckRequest(properties); + CreateTransactionRequest.create = function create(properties) { + return new CreateTransactionRequest(properties); }; /** - * Encodes the specified MessageAckRequest message. Does not implicitly {@link query.MessageAckRequest.verify|verify} messages. + * Encodes the specified CreateTransactionRequest message. Does not implicitly {@link query.CreateTransactionRequest.verify|verify} messages. * @function encode - * @memberof query.MessageAckRequest + * @memberof query.CreateTransactionRequest * @static - * @param {query.IMessageAckRequest} message MessageAckRequest message or plain object to encode + * @param {query.ICreateTransactionRequest} message CreateTransactionRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - MessageAckRequest.encode = function encode(message, writer) { + CreateTransactionRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) @@ -85449,42 +86937,42 @@ export const query = $root.query = (() => { $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); if (message.target != null && Object.hasOwnProperty.call(message, "target")) $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.name != null && Object.hasOwnProperty.call(message, "name")) - writer.uint32(/* id 4, wireType 2 =*/34).string(message.name); - if (message.ids != null && message.ids.length) - for (let i = 0; i < message.ids.length; ++i) - $root.query.Value.encode(message.ids[i], writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); - return writer; + if (message.dtid != null && Object.hasOwnProperty.call(message, "dtid")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.dtid); + if (message.participants != null && message.participants.length) + for (let i = 0; i < message.participants.length; ++i) + $root.query.Target.encode(message.participants[i], writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); + return writer; }; /** - * Encodes the specified MessageAckRequest message, length delimited. Does not implicitly {@link query.MessageAckRequest.verify|verify} messages. + * Encodes the specified CreateTransactionRequest message, length delimited. Does not implicitly {@link query.CreateTransactionRequest.verify|verify} messages. * @function encodeDelimited - * @memberof query.MessageAckRequest + * @memberof query.CreateTransactionRequest * @static - * @param {query.IMessageAckRequest} message MessageAckRequest message or plain object to encode + * @param {query.ICreateTransactionRequest} message CreateTransactionRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - MessageAckRequest.encodeDelimited = function encodeDelimited(message, writer) { + CreateTransactionRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a MessageAckRequest message from the specified reader or buffer. + * Decodes a CreateTransactionRequest message from the specified reader or buffer. * @function decode - * @memberof query.MessageAckRequest + * @memberof query.CreateTransactionRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.MessageAckRequest} MessageAckRequest + * @returns {query.CreateTransactionRequest} CreateTransactionRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - MessageAckRequest.decode = function decode(reader, length) { + CreateTransactionRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.MessageAckRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.CreateTransactionRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -85501,13 +86989,13 @@ export const query = $root.query = (() => { break; } case 4: { - message.name = reader.string(); + message.dtid = reader.string(); break; } case 5: { - if (!(message.ids && message.ids.length)) - message.ids = []; - message.ids.push($root.query.Value.decode(reader, reader.uint32())); + if (!(message.participants && message.participants.length)) + message.participants = []; + message.participants.push($root.query.Target.decode(reader, reader.uint32())); break; } default: @@ -85519,30 +87007,30 @@ export const query = $root.query = (() => { }; /** - * Decodes a MessageAckRequest message from the specified reader or buffer, length delimited. + * Decodes a CreateTransactionRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.MessageAckRequest + * @memberof query.CreateTransactionRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.MessageAckRequest} MessageAckRequest + * @returns {query.CreateTransactionRequest} CreateTransactionRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - MessageAckRequest.decodeDelimited = function decodeDelimited(reader) { + CreateTransactionRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a MessageAckRequest message. + * Verifies a CreateTransactionRequest message. * @function verify - * @memberof query.MessageAckRequest + * @memberof query.CreateTransactionRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - MessageAckRequest.verify = function verify(message) { + CreateTransactionRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { @@ -85560,83 +87048,83 @@ export const query = $root.query = (() => { if (error) return "target." + error; } - if (message.name != null && message.hasOwnProperty("name")) - if (!$util.isString(message.name)) - return "name: string expected"; - if (message.ids != null && message.hasOwnProperty("ids")) { - if (!Array.isArray(message.ids)) - return "ids: array expected"; - for (let i = 0; i < message.ids.length; ++i) { - let error = $root.query.Value.verify(message.ids[i]); + if (message.dtid != null && message.hasOwnProperty("dtid")) + if (!$util.isString(message.dtid)) + return "dtid: string expected"; + if (message.participants != null && message.hasOwnProperty("participants")) { + if (!Array.isArray(message.participants)) + return "participants: array expected"; + for (let i = 0; i < message.participants.length; ++i) { + let error = $root.query.Target.verify(message.participants[i]); if (error) - return "ids." + error; + return "participants." + error; } } return null; }; /** - * Creates a MessageAckRequest message from a plain object. Also converts values to their respective internal types. + * Creates a CreateTransactionRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.MessageAckRequest + * @memberof query.CreateTransactionRequest * @static * @param {Object.} object Plain object - * @returns {query.MessageAckRequest} MessageAckRequest + * @returns {query.CreateTransactionRequest} CreateTransactionRequest */ - MessageAckRequest.fromObject = function fromObject(object) { - if (object instanceof $root.query.MessageAckRequest) + CreateTransactionRequest.fromObject = function fromObject(object) { + if (object instanceof $root.query.CreateTransactionRequest) return object; - let message = new $root.query.MessageAckRequest(); + let message = new $root.query.CreateTransactionRequest(); if (object.effective_caller_id != null) { if (typeof object.effective_caller_id !== "object") - throw TypeError(".query.MessageAckRequest.effective_caller_id: object expected"); + throw TypeError(".query.CreateTransactionRequest.effective_caller_id: object expected"); message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); } if (object.immediate_caller_id != null) { if (typeof object.immediate_caller_id !== "object") - throw TypeError(".query.MessageAckRequest.immediate_caller_id: object expected"); + throw TypeError(".query.CreateTransactionRequest.immediate_caller_id: object expected"); message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); } if (object.target != null) { if (typeof object.target !== "object") - throw TypeError(".query.MessageAckRequest.target: object expected"); + throw TypeError(".query.CreateTransactionRequest.target: object expected"); message.target = $root.query.Target.fromObject(object.target); } - if (object.name != null) - message.name = String(object.name); - if (object.ids) { - if (!Array.isArray(object.ids)) - throw TypeError(".query.MessageAckRequest.ids: array expected"); - message.ids = []; - for (let i = 0; i < object.ids.length; ++i) { - if (typeof object.ids[i] !== "object") - throw TypeError(".query.MessageAckRequest.ids: object expected"); - message.ids[i] = $root.query.Value.fromObject(object.ids[i]); + if (object.dtid != null) + message.dtid = String(object.dtid); + if (object.participants) { + if (!Array.isArray(object.participants)) + throw TypeError(".query.CreateTransactionRequest.participants: array expected"); + message.participants = []; + for (let i = 0; i < object.participants.length; ++i) { + if (typeof object.participants[i] !== "object") + throw TypeError(".query.CreateTransactionRequest.participants: object expected"); + message.participants[i] = $root.query.Target.fromObject(object.participants[i]); } } return message; }; /** - * Creates a plain object from a MessageAckRequest message. Also converts values to other types if specified. + * Creates a plain object from a CreateTransactionRequest message. Also converts values to other types if specified. * @function toObject - * @memberof query.MessageAckRequest + * @memberof query.CreateTransactionRequest * @static - * @param {query.MessageAckRequest} message MessageAckRequest + * @param {query.CreateTransactionRequest} message CreateTransactionRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - MessageAckRequest.toObject = function toObject(message, options) { + CreateTransactionRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.arrays || options.defaults) - object.ids = []; + object.participants = []; if (options.defaults) { object.effective_caller_id = null; object.immediate_caller_id = null; object.target = null; - object.name = ""; + object.dtid = ""; } if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); @@ -85644,63 +87132,62 @@ export const query = $root.query = (() => { object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); if (message.target != null && message.hasOwnProperty("target")) object.target = $root.query.Target.toObject(message.target, options); - if (message.name != null && message.hasOwnProperty("name")) - object.name = message.name; - if (message.ids && message.ids.length) { - object.ids = []; - for (let j = 0; j < message.ids.length; ++j) - object.ids[j] = $root.query.Value.toObject(message.ids[j], options); + if (message.dtid != null && message.hasOwnProperty("dtid")) + object.dtid = message.dtid; + if (message.participants && message.participants.length) { + object.participants = []; + for (let j = 0; j < message.participants.length; ++j) + object.participants[j] = $root.query.Target.toObject(message.participants[j], options); } return object; }; /** - * Converts this MessageAckRequest to JSON. + * Converts this CreateTransactionRequest to JSON. * @function toJSON - * @memberof query.MessageAckRequest + * @memberof query.CreateTransactionRequest * @instance * @returns {Object.} JSON object */ - MessageAckRequest.prototype.toJSON = function toJSON() { + CreateTransactionRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for MessageAckRequest + * Gets the default type url for CreateTransactionRequest * @function getTypeUrl - * @memberof query.MessageAckRequest + * @memberof query.CreateTransactionRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - MessageAckRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + CreateTransactionRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.MessageAckRequest"; + return typeUrlPrefix + "/query.CreateTransactionRequest"; }; - return MessageAckRequest; + return CreateTransactionRequest; })(); - query.MessageAckResponse = (function() { + query.CreateTransactionResponse = (function() { /** - * Properties of a MessageAckResponse. + * Properties of a CreateTransactionResponse. * @memberof query - * @interface IMessageAckResponse - * @property {query.IQueryResult|null} [result] MessageAckResponse result + * @interface ICreateTransactionResponse */ /** - * Constructs a new MessageAckResponse. + * Constructs a new CreateTransactionResponse. * @memberof query - * @classdesc Represents a MessageAckResponse. - * @implements IMessageAckResponse + * @classdesc Represents a CreateTransactionResponse. + * @implements ICreateTransactionResponse * @constructor - * @param {query.IMessageAckResponse=} [properties] Properties to set + * @param {query.ICreateTransactionResponse=} [properties] Properties to set */ - function MessageAckResponse(properties) { + function CreateTransactionResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -85708,77 +87195,63 @@ export const query = $root.query = (() => { } /** - * MessageAckResponse result. - * @member {query.IQueryResult|null|undefined} result - * @memberof query.MessageAckResponse - * @instance - */ - MessageAckResponse.prototype.result = null; - - /** - * Creates a new MessageAckResponse instance using the specified properties. + * Creates a new CreateTransactionResponse instance using the specified properties. * @function create - * @memberof query.MessageAckResponse + * @memberof query.CreateTransactionResponse * @static - * @param {query.IMessageAckResponse=} [properties] Properties to set - * @returns {query.MessageAckResponse} MessageAckResponse instance + * @param {query.ICreateTransactionResponse=} [properties] Properties to set + * @returns {query.CreateTransactionResponse} CreateTransactionResponse instance */ - MessageAckResponse.create = function create(properties) { - return new MessageAckResponse(properties); + CreateTransactionResponse.create = function create(properties) { + return new CreateTransactionResponse(properties); }; /** - * Encodes the specified MessageAckResponse message. Does not implicitly {@link query.MessageAckResponse.verify|verify} messages. + * Encodes the specified CreateTransactionResponse message. Does not implicitly {@link query.CreateTransactionResponse.verify|verify} messages. * @function encode - * @memberof query.MessageAckResponse + * @memberof query.CreateTransactionResponse * @static - * @param {query.IMessageAckResponse} message MessageAckResponse message or plain object to encode + * @param {query.ICreateTransactionResponse} message CreateTransactionResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - MessageAckResponse.encode = function encode(message, writer) { + CreateTransactionResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.result != null && Object.hasOwnProperty.call(message, "result")) - $root.query.QueryResult.encode(message.result, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified MessageAckResponse message, length delimited. Does not implicitly {@link query.MessageAckResponse.verify|verify} messages. + * Encodes the specified CreateTransactionResponse message, length delimited. Does not implicitly {@link query.CreateTransactionResponse.verify|verify} messages. * @function encodeDelimited - * @memberof query.MessageAckResponse + * @memberof query.CreateTransactionResponse * @static - * @param {query.IMessageAckResponse} message MessageAckResponse message or plain object to encode + * @param {query.ICreateTransactionResponse} message CreateTransactionResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - MessageAckResponse.encodeDelimited = function encodeDelimited(message, writer) { + CreateTransactionResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a MessageAckResponse message from the specified reader or buffer. + * Decodes a CreateTransactionResponse message from the specified reader or buffer. * @function decode - * @memberof query.MessageAckResponse + * @memberof query.CreateTransactionResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.MessageAckResponse} MessageAckResponse + * @returns {query.CreateTransactionResponse} CreateTransactionResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - MessageAckResponse.decode = function decode(reader, length) { + CreateTransactionResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.MessageAckResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.CreateTransactionResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - message.result = $root.query.QueryResult.decode(reader, reader.uint32()); - break; - } default: reader.skipType(tag & 7); break; @@ -85788,134 +87261,113 @@ export const query = $root.query = (() => { }; /** - * Decodes a MessageAckResponse message from the specified reader or buffer, length delimited. + * Decodes a CreateTransactionResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.MessageAckResponse + * @memberof query.CreateTransactionResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.MessageAckResponse} MessageAckResponse + * @returns {query.CreateTransactionResponse} CreateTransactionResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - MessageAckResponse.decodeDelimited = function decodeDelimited(reader) { + CreateTransactionResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a MessageAckResponse message. + * Verifies a CreateTransactionResponse message. * @function verify - * @memberof query.MessageAckResponse + * @memberof query.CreateTransactionResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - MessageAckResponse.verify = function verify(message) { + CreateTransactionResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.result != null && message.hasOwnProperty("result")) { - let error = $root.query.QueryResult.verify(message.result); - if (error) - return "result." + error; - } return null; }; /** - * Creates a MessageAckResponse message from a plain object. Also converts values to their respective internal types. + * Creates a CreateTransactionResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.MessageAckResponse + * @memberof query.CreateTransactionResponse * @static * @param {Object.} object Plain object - * @returns {query.MessageAckResponse} MessageAckResponse + * @returns {query.CreateTransactionResponse} CreateTransactionResponse */ - MessageAckResponse.fromObject = function fromObject(object) { - if (object instanceof $root.query.MessageAckResponse) + CreateTransactionResponse.fromObject = function fromObject(object) { + if (object instanceof $root.query.CreateTransactionResponse) return object; - let message = new $root.query.MessageAckResponse(); - if (object.result != null) { - if (typeof object.result !== "object") - throw TypeError(".query.MessageAckResponse.result: object expected"); - message.result = $root.query.QueryResult.fromObject(object.result); - } - return message; + return new $root.query.CreateTransactionResponse(); }; /** - * Creates a plain object from a MessageAckResponse message. Also converts values to other types if specified. + * Creates a plain object from a CreateTransactionResponse message. Also converts values to other types if specified. * @function toObject - * @memberof query.MessageAckResponse + * @memberof query.CreateTransactionResponse * @static - * @param {query.MessageAckResponse} message MessageAckResponse + * @param {query.CreateTransactionResponse} message CreateTransactionResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - MessageAckResponse.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) - object.result = null; - if (message.result != null && message.hasOwnProperty("result")) - object.result = $root.query.QueryResult.toObject(message.result, options); - return object; + CreateTransactionResponse.toObject = function toObject() { + return {}; }; /** - * Converts this MessageAckResponse to JSON. + * Converts this CreateTransactionResponse to JSON. * @function toJSON - * @memberof query.MessageAckResponse + * @memberof query.CreateTransactionResponse * @instance * @returns {Object.} JSON object */ - MessageAckResponse.prototype.toJSON = function toJSON() { + CreateTransactionResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for MessageAckResponse + * Gets the default type url for CreateTransactionResponse * @function getTypeUrl - * @memberof query.MessageAckResponse + * @memberof query.CreateTransactionResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - MessageAckResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + CreateTransactionResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.MessageAckResponse"; + return typeUrlPrefix + "/query.CreateTransactionResponse"; }; - return MessageAckResponse; + return CreateTransactionResponse; })(); - query.ReserveExecuteRequest = (function() { + query.StartCommitRequest = (function() { /** - * Properties of a ReserveExecuteRequest. + * Properties of a StartCommitRequest. * @memberof query - * @interface IReserveExecuteRequest - * @property {vtrpc.ICallerID|null} [effective_caller_id] ReserveExecuteRequest effective_caller_id - * @property {query.IVTGateCallerID|null} [immediate_caller_id] ReserveExecuteRequest immediate_caller_id - * @property {query.ITarget|null} [target] ReserveExecuteRequest target - * @property {query.IBoundQuery|null} [query] ReserveExecuteRequest query - * @property {number|Long|null} [transaction_id] ReserveExecuteRequest transaction_id - * @property {query.IExecuteOptions|null} [options] ReserveExecuteRequest options - * @property {Array.|null} [pre_queries] ReserveExecuteRequest pre_queries + * @interface IStartCommitRequest + * @property {vtrpc.ICallerID|null} [effective_caller_id] StartCommitRequest effective_caller_id + * @property {query.IVTGateCallerID|null} [immediate_caller_id] StartCommitRequest immediate_caller_id + * @property {query.ITarget|null} [target] StartCommitRequest target + * @property {number|Long|null} [transaction_id] StartCommitRequest transaction_id + * @property {string|null} [dtid] StartCommitRequest dtid */ /** - * Constructs a new ReserveExecuteRequest. + * Constructs a new StartCommitRequest. * @memberof query - * @classdesc Represents a ReserveExecuteRequest. - * @implements IReserveExecuteRequest + * @classdesc Represents a StartCommitRequest. + * @implements IStartCommitRequest * @constructor - * @param {query.IReserveExecuteRequest=} [properties] Properties to set + * @param {query.IStartCommitRequest=} [properties] Properties to set */ - function ReserveExecuteRequest(properties) { - this.pre_queries = []; + function StartCommitRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -85923,83 +87375,67 @@ export const query = $root.query = (() => { } /** - * ReserveExecuteRequest effective_caller_id. + * StartCommitRequest effective_caller_id. * @member {vtrpc.ICallerID|null|undefined} effective_caller_id - * @memberof query.ReserveExecuteRequest + * @memberof query.StartCommitRequest * @instance */ - ReserveExecuteRequest.prototype.effective_caller_id = null; + StartCommitRequest.prototype.effective_caller_id = null; /** - * ReserveExecuteRequest immediate_caller_id. + * StartCommitRequest immediate_caller_id. * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id - * @memberof query.ReserveExecuteRequest + * @memberof query.StartCommitRequest * @instance */ - ReserveExecuteRequest.prototype.immediate_caller_id = null; + StartCommitRequest.prototype.immediate_caller_id = null; /** - * ReserveExecuteRequest target. + * StartCommitRequest target. * @member {query.ITarget|null|undefined} target - * @memberof query.ReserveExecuteRequest - * @instance - */ - ReserveExecuteRequest.prototype.target = null; - - /** - * ReserveExecuteRequest query. - * @member {query.IBoundQuery|null|undefined} query - * @memberof query.ReserveExecuteRequest + * @memberof query.StartCommitRequest * @instance */ - ReserveExecuteRequest.prototype.query = null; + StartCommitRequest.prototype.target = null; /** - * ReserveExecuteRequest transaction_id. + * StartCommitRequest transaction_id. * @member {number|Long} transaction_id - * @memberof query.ReserveExecuteRequest - * @instance - */ - ReserveExecuteRequest.prototype.transaction_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; - - /** - * ReserveExecuteRequest options. - * @member {query.IExecuteOptions|null|undefined} options - * @memberof query.ReserveExecuteRequest + * @memberof query.StartCommitRequest * @instance */ - ReserveExecuteRequest.prototype.options = null; + StartCommitRequest.prototype.transaction_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; /** - * ReserveExecuteRequest pre_queries. - * @member {Array.} pre_queries - * @memberof query.ReserveExecuteRequest + * StartCommitRequest dtid. + * @member {string} dtid + * @memberof query.StartCommitRequest * @instance */ - ReserveExecuteRequest.prototype.pre_queries = $util.emptyArray; + StartCommitRequest.prototype.dtid = ""; /** - * Creates a new ReserveExecuteRequest instance using the specified properties. + * Creates a new StartCommitRequest instance using the specified properties. * @function create - * @memberof query.ReserveExecuteRequest + * @memberof query.StartCommitRequest * @static - * @param {query.IReserveExecuteRequest=} [properties] Properties to set - * @returns {query.ReserveExecuteRequest} ReserveExecuteRequest instance + * @param {query.IStartCommitRequest=} [properties] Properties to set + * @returns {query.StartCommitRequest} StartCommitRequest instance */ - ReserveExecuteRequest.create = function create(properties) { - return new ReserveExecuteRequest(properties); + StartCommitRequest.create = function create(properties) { + return new StartCommitRequest(properties); }; /** - * Encodes the specified ReserveExecuteRequest message. Does not implicitly {@link query.ReserveExecuteRequest.verify|verify} messages. + * Encodes the specified StartCommitRequest message. Does not implicitly {@link query.StartCommitRequest.verify|verify} messages. * @function encode - * @memberof query.ReserveExecuteRequest + * @memberof query.StartCommitRequest * @static - * @param {query.IReserveExecuteRequest} message ReserveExecuteRequest message or plain object to encode + * @param {query.IStartCommitRequest} message StartCommitRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReserveExecuteRequest.encode = function encode(message, writer) { + StartCommitRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) @@ -86008,46 +87444,41 @@ export const query = $root.query = (() => { $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); if (message.target != null && Object.hasOwnProperty.call(message, "target")) $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.query != null && Object.hasOwnProperty.call(message, "query")) - $root.query.BoundQuery.encode(message.query, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); if (message.transaction_id != null && Object.hasOwnProperty.call(message, "transaction_id")) - writer.uint32(/* id 5, wireType 0 =*/40).int64(message.transaction_id); - if (message.options != null && Object.hasOwnProperty.call(message, "options")) - $root.query.ExecuteOptions.encode(message.options, writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); - if (message.pre_queries != null && message.pre_queries.length) - for (let i = 0; i < message.pre_queries.length; ++i) - writer.uint32(/* id 7, wireType 2 =*/58).string(message.pre_queries[i]); + writer.uint32(/* id 4, wireType 0 =*/32).int64(message.transaction_id); + if (message.dtid != null && Object.hasOwnProperty.call(message, "dtid")) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.dtid); return writer; }; /** - * Encodes the specified ReserveExecuteRequest message, length delimited. Does not implicitly {@link query.ReserveExecuteRequest.verify|verify} messages. + * Encodes the specified StartCommitRequest message, length delimited. Does not implicitly {@link query.StartCommitRequest.verify|verify} messages. * @function encodeDelimited - * @memberof query.ReserveExecuteRequest + * @memberof query.StartCommitRequest * @static - * @param {query.IReserveExecuteRequest} message ReserveExecuteRequest message or plain object to encode + * @param {query.IStartCommitRequest} message StartCommitRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReserveExecuteRequest.encodeDelimited = function encodeDelimited(message, writer) { + StartCommitRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ReserveExecuteRequest message from the specified reader or buffer. + * Decodes a StartCommitRequest message from the specified reader or buffer. * @function decode - * @memberof query.ReserveExecuteRequest + * @memberof query.StartCommitRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.ReserveExecuteRequest} ReserveExecuteRequest + * @returns {query.StartCommitRequest} StartCommitRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReserveExecuteRequest.decode = function decode(reader, length) { + StartCommitRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.ReserveExecuteRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.StartCommitRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -86064,21 +87495,11 @@ export const query = $root.query = (() => { break; } case 4: { - message.query = $root.query.BoundQuery.decode(reader, reader.uint32()); - break; - } - case 5: { message.transaction_id = reader.int64(); break; } - case 6: { - message.options = $root.query.ExecuteOptions.decode(reader, reader.uint32()); - break; - } - case 7: { - if (!(message.pre_queries && message.pre_queries.length)) - message.pre_queries = []; - message.pre_queries.push(reader.string()); + case 5: { + message.dtid = reader.string(); break; } default: @@ -86090,30 +87511,30 @@ export const query = $root.query = (() => { }; /** - * Decodes a ReserveExecuteRequest message from the specified reader or buffer, length delimited. + * Decodes a StartCommitRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.ReserveExecuteRequest + * @memberof query.StartCommitRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.ReserveExecuteRequest} ReserveExecuteRequest + * @returns {query.StartCommitRequest} StartCommitRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReserveExecuteRequest.decodeDelimited = function decodeDelimited(reader) { + StartCommitRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ReserveExecuteRequest message. + * Verifies a StartCommitRequest message. * @function verify - * @memberof query.ReserveExecuteRequest + * @memberof query.StartCommitRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ReserveExecuteRequest.verify = function verify(message) { + StartCommitRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { @@ -86131,61 +87552,42 @@ export const query = $root.query = (() => { if (error) return "target." + error; } - if (message.query != null && message.hasOwnProperty("query")) { - let error = $root.query.BoundQuery.verify(message.query); - if (error) - return "query." + error; - } if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) if (!$util.isInteger(message.transaction_id) && !(message.transaction_id && $util.isInteger(message.transaction_id.low) && $util.isInteger(message.transaction_id.high))) return "transaction_id: integer|Long expected"; - if (message.options != null && message.hasOwnProperty("options")) { - let error = $root.query.ExecuteOptions.verify(message.options); - if (error) - return "options." + error; - } - if (message.pre_queries != null && message.hasOwnProperty("pre_queries")) { - if (!Array.isArray(message.pre_queries)) - return "pre_queries: array expected"; - for (let i = 0; i < message.pre_queries.length; ++i) - if (!$util.isString(message.pre_queries[i])) - return "pre_queries: string[] expected"; - } + if (message.dtid != null && message.hasOwnProperty("dtid")) + if (!$util.isString(message.dtid)) + return "dtid: string expected"; return null; }; /** - * Creates a ReserveExecuteRequest message from a plain object. Also converts values to their respective internal types. + * Creates a StartCommitRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.ReserveExecuteRequest + * @memberof query.StartCommitRequest * @static * @param {Object.} object Plain object - * @returns {query.ReserveExecuteRequest} ReserveExecuteRequest + * @returns {query.StartCommitRequest} StartCommitRequest */ - ReserveExecuteRequest.fromObject = function fromObject(object) { - if (object instanceof $root.query.ReserveExecuteRequest) + StartCommitRequest.fromObject = function fromObject(object) { + if (object instanceof $root.query.StartCommitRequest) return object; - let message = new $root.query.ReserveExecuteRequest(); + let message = new $root.query.StartCommitRequest(); if (object.effective_caller_id != null) { if (typeof object.effective_caller_id !== "object") - throw TypeError(".query.ReserveExecuteRequest.effective_caller_id: object expected"); + throw TypeError(".query.StartCommitRequest.effective_caller_id: object expected"); message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); } if (object.immediate_caller_id != null) { if (typeof object.immediate_caller_id !== "object") - throw TypeError(".query.ReserveExecuteRequest.immediate_caller_id: object expected"); + throw TypeError(".query.StartCommitRequest.immediate_caller_id: object expected"); message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); } if (object.target != null) { if (typeof object.target !== "object") - throw TypeError(".query.ReserveExecuteRequest.target: object expected"); + throw TypeError(".query.StartCommitRequest.target: object expected"); message.target = $root.query.Target.fromObject(object.target); } - if (object.query != null) { - if (typeof object.query !== "object") - throw TypeError(".query.ReserveExecuteRequest.query: object expected"); - message.query = $root.query.BoundQuery.fromObject(object.query); - } if (object.transaction_id != null) if ($util.Long) (message.transaction_id = $util.Long.fromValue(object.transaction_id)).unsigned = false; @@ -86195,47 +87597,34 @@ export const query = $root.query = (() => { message.transaction_id = object.transaction_id; else if (typeof object.transaction_id === "object") message.transaction_id = new $util.LongBits(object.transaction_id.low >>> 0, object.transaction_id.high >>> 0).toNumber(); - if (object.options != null) { - if (typeof object.options !== "object") - throw TypeError(".query.ReserveExecuteRequest.options: object expected"); - message.options = $root.query.ExecuteOptions.fromObject(object.options); - } - if (object.pre_queries) { - if (!Array.isArray(object.pre_queries)) - throw TypeError(".query.ReserveExecuteRequest.pre_queries: array expected"); - message.pre_queries = []; - for (let i = 0; i < object.pre_queries.length; ++i) - message.pre_queries[i] = String(object.pre_queries[i]); - } + if (object.dtid != null) + message.dtid = String(object.dtid); return message; }; /** - * Creates a plain object from a ReserveExecuteRequest message. Also converts values to other types if specified. + * Creates a plain object from a StartCommitRequest message. Also converts values to other types if specified. * @function toObject - * @memberof query.ReserveExecuteRequest + * @memberof query.StartCommitRequest * @static - * @param {query.ReserveExecuteRequest} message ReserveExecuteRequest + * @param {query.StartCommitRequest} message StartCommitRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ReserveExecuteRequest.toObject = function toObject(message, options) { + StartCommitRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.pre_queries = []; if (options.defaults) { object.effective_caller_id = null; object.immediate_caller_id = null; object.target = null; - object.query = null; if ($util.Long) { let long = new $util.Long(0, 0, false); object.transaction_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; } else object.transaction_id = options.longs === String ? "0" : 0; - object.options = null; + object.dtid = ""; } if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); @@ -86243,73 +87632,62 @@ export const query = $root.query = (() => { object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); if (message.target != null && message.hasOwnProperty("target")) object.target = $root.query.Target.toObject(message.target, options); - if (message.query != null && message.hasOwnProperty("query")) - object.query = $root.query.BoundQuery.toObject(message.query, options); if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) if (typeof message.transaction_id === "number") object.transaction_id = options.longs === String ? String(message.transaction_id) : message.transaction_id; else object.transaction_id = options.longs === String ? $util.Long.prototype.toString.call(message.transaction_id) : options.longs === Number ? new $util.LongBits(message.transaction_id.low >>> 0, message.transaction_id.high >>> 0).toNumber() : message.transaction_id; - if (message.options != null && message.hasOwnProperty("options")) - object.options = $root.query.ExecuteOptions.toObject(message.options, options); - if (message.pre_queries && message.pre_queries.length) { - object.pre_queries = []; - for (let j = 0; j < message.pre_queries.length; ++j) - object.pre_queries[j] = message.pre_queries[j]; - } + if (message.dtid != null && message.hasOwnProperty("dtid")) + object.dtid = message.dtid; return object; }; /** - * Converts this ReserveExecuteRequest to JSON. + * Converts this StartCommitRequest to JSON. * @function toJSON - * @memberof query.ReserveExecuteRequest + * @memberof query.StartCommitRequest * @instance * @returns {Object.} JSON object */ - ReserveExecuteRequest.prototype.toJSON = function toJSON() { + StartCommitRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ReserveExecuteRequest + * Gets the default type url for StartCommitRequest * @function getTypeUrl - * @memberof query.ReserveExecuteRequest + * @memberof query.StartCommitRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ReserveExecuteRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + StartCommitRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.ReserveExecuteRequest"; + return typeUrlPrefix + "/query.StartCommitRequest"; }; - return ReserveExecuteRequest; + return StartCommitRequest; })(); - query.ReserveExecuteResponse = (function() { + query.StartCommitResponse = (function() { /** - * Properties of a ReserveExecuteResponse. + * Properties of a StartCommitResponse. * @memberof query - * @interface IReserveExecuteResponse - * @property {vtrpc.IRPCError|null} [error] ReserveExecuteResponse error - * @property {query.IQueryResult|null} [result] ReserveExecuteResponse result - * @property {number|Long|null} [reserved_id] ReserveExecuteResponse reserved_id - * @property {topodata.ITabletAlias|null} [tablet_alias] ReserveExecuteResponse tablet_alias + * @interface IStartCommitResponse */ /** - * Constructs a new ReserveExecuteResponse. + * Constructs a new StartCommitResponse. * @memberof query - * @classdesc Represents a ReserveExecuteResponse. - * @implements IReserveExecuteResponse + * @classdesc Represents a StartCommitResponse. + * @implements IStartCommitResponse * @constructor - * @param {query.IReserveExecuteResponse=} [properties] Properties to set + * @param {query.IStartCommitResponse=} [properties] Properties to set */ - function ReserveExecuteResponse(properties) { + function StartCommitResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -86317,119 +87695,63 @@ export const query = $root.query = (() => { } /** - * ReserveExecuteResponse error. - * @member {vtrpc.IRPCError|null|undefined} error - * @memberof query.ReserveExecuteResponse - * @instance - */ - ReserveExecuteResponse.prototype.error = null; - - /** - * ReserveExecuteResponse result. - * @member {query.IQueryResult|null|undefined} result - * @memberof query.ReserveExecuteResponse - * @instance - */ - ReserveExecuteResponse.prototype.result = null; - - /** - * ReserveExecuteResponse reserved_id. - * @member {number|Long} reserved_id - * @memberof query.ReserveExecuteResponse - * @instance - */ - ReserveExecuteResponse.prototype.reserved_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; - - /** - * ReserveExecuteResponse tablet_alias. - * @member {topodata.ITabletAlias|null|undefined} tablet_alias - * @memberof query.ReserveExecuteResponse - * @instance - */ - ReserveExecuteResponse.prototype.tablet_alias = null; - - /** - * Creates a new ReserveExecuteResponse instance using the specified properties. + * Creates a new StartCommitResponse instance using the specified properties. * @function create - * @memberof query.ReserveExecuteResponse + * @memberof query.StartCommitResponse * @static - * @param {query.IReserveExecuteResponse=} [properties] Properties to set - * @returns {query.ReserveExecuteResponse} ReserveExecuteResponse instance + * @param {query.IStartCommitResponse=} [properties] Properties to set + * @returns {query.StartCommitResponse} StartCommitResponse instance */ - ReserveExecuteResponse.create = function create(properties) { - return new ReserveExecuteResponse(properties); + StartCommitResponse.create = function create(properties) { + return new StartCommitResponse(properties); }; /** - * Encodes the specified ReserveExecuteResponse message. Does not implicitly {@link query.ReserveExecuteResponse.verify|verify} messages. + * Encodes the specified StartCommitResponse message. Does not implicitly {@link query.StartCommitResponse.verify|verify} messages. * @function encode - * @memberof query.ReserveExecuteResponse + * @memberof query.StartCommitResponse * @static - * @param {query.IReserveExecuteResponse} message ReserveExecuteResponse message or plain object to encode + * @param {query.IStartCommitResponse} message StartCommitResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReserveExecuteResponse.encode = function encode(message, writer) { + StartCommitResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.error != null && Object.hasOwnProperty.call(message, "error")) - $root.vtrpc.RPCError.encode(message.error, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.result != null && Object.hasOwnProperty.call(message, "result")) - $root.query.QueryResult.encode(message.result, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.reserved_id != null && Object.hasOwnProperty.call(message, "reserved_id")) - writer.uint32(/* id 3, wireType 0 =*/24).int64(message.reserved_id); - if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) - $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); return writer; }; /** - * Encodes the specified ReserveExecuteResponse message, length delimited. Does not implicitly {@link query.ReserveExecuteResponse.verify|verify} messages. + * Encodes the specified StartCommitResponse message, length delimited. Does not implicitly {@link query.StartCommitResponse.verify|verify} messages. * @function encodeDelimited - * @memberof query.ReserveExecuteResponse + * @memberof query.StartCommitResponse * @static - * @param {query.IReserveExecuteResponse} message ReserveExecuteResponse message or plain object to encode + * @param {query.IStartCommitResponse} message StartCommitResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReserveExecuteResponse.encodeDelimited = function encodeDelimited(message, writer) { + StartCommitResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ReserveExecuteResponse message from the specified reader or buffer. + * Decodes a StartCommitResponse message from the specified reader or buffer. * @function decode - * @memberof query.ReserveExecuteResponse + * @memberof query.StartCommitResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.ReserveExecuteResponse} ReserveExecuteResponse + * @returns {query.StartCommitResponse} StartCommitResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReserveExecuteResponse.decode = function decode(reader, length) { + StartCommitResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.ReserveExecuteResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.StartCommitResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - message.error = $root.vtrpc.RPCError.decode(reader, reader.uint32()); - break; - } - case 2: { - message.result = $root.query.QueryResult.decode(reader, reader.uint32()); - break; - } - case 3: { - message.reserved_id = reader.int64(); - break; - } - case 4: { - message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); - break; - } default: reader.skipType(tag & 7); break; @@ -86439,183 +87761,113 @@ export const query = $root.query = (() => { }; /** - * Decodes a ReserveExecuteResponse message from the specified reader or buffer, length delimited. + * Decodes a StartCommitResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.ReserveExecuteResponse + * @memberof query.StartCommitResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.ReserveExecuteResponse} ReserveExecuteResponse + * @returns {query.StartCommitResponse} StartCommitResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReserveExecuteResponse.decodeDelimited = function decodeDelimited(reader) { + StartCommitResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ReserveExecuteResponse message. + * Verifies a StartCommitResponse message. * @function verify - * @memberof query.ReserveExecuteResponse + * @memberof query.StartCommitResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ReserveExecuteResponse.verify = function verify(message) { + StartCommitResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.error != null && message.hasOwnProperty("error")) { - let error = $root.vtrpc.RPCError.verify(message.error); - if (error) - return "error." + error; - } - if (message.result != null && message.hasOwnProperty("result")) { - let error = $root.query.QueryResult.verify(message.result); - if (error) - return "result." + error; - } - if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) - if (!$util.isInteger(message.reserved_id) && !(message.reserved_id && $util.isInteger(message.reserved_id.low) && $util.isInteger(message.reserved_id.high))) - return "reserved_id: integer|Long expected"; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { - let error = $root.topodata.TabletAlias.verify(message.tablet_alias); - if (error) - return "tablet_alias." + error; - } return null; }; /** - * Creates a ReserveExecuteResponse message from a plain object. Also converts values to their respective internal types. + * Creates a StartCommitResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.ReserveExecuteResponse + * @memberof query.StartCommitResponse * @static * @param {Object.} object Plain object - * @returns {query.ReserveExecuteResponse} ReserveExecuteResponse + * @returns {query.StartCommitResponse} StartCommitResponse */ - ReserveExecuteResponse.fromObject = function fromObject(object) { - if (object instanceof $root.query.ReserveExecuteResponse) + StartCommitResponse.fromObject = function fromObject(object) { + if (object instanceof $root.query.StartCommitResponse) return object; - let message = new $root.query.ReserveExecuteResponse(); - if (object.error != null) { - if (typeof object.error !== "object") - throw TypeError(".query.ReserveExecuteResponse.error: object expected"); - message.error = $root.vtrpc.RPCError.fromObject(object.error); - } - if (object.result != null) { - if (typeof object.result !== "object") - throw TypeError(".query.ReserveExecuteResponse.result: object expected"); - message.result = $root.query.QueryResult.fromObject(object.result); - } - if (object.reserved_id != null) - if ($util.Long) - (message.reserved_id = $util.Long.fromValue(object.reserved_id)).unsigned = false; - else if (typeof object.reserved_id === "string") - message.reserved_id = parseInt(object.reserved_id, 10); - else if (typeof object.reserved_id === "number") - message.reserved_id = object.reserved_id; - else if (typeof object.reserved_id === "object") - message.reserved_id = new $util.LongBits(object.reserved_id.low >>> 0, object.reserved_id.high >>> 0).toNumber(); - if (object.tablet_alias != null) { - if (typeof object.tablet_alias !== "object") - throw TypeError(".query.ReserveExecuteResponse.tablet_alias: object expected"); - message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); - } - return message; + return new $root.query.StartCommitResponse(); }; /** - * Creates a plain object from a ReserveExecuteResponse message. Also converts values to other types if specified. + * Creates a plain object from a StartCommitResponse message. Also converts values to other types if specified. * @function toObject - * @memberof query.ReserveExecuteResponse + * @memberof query.StartCommitResponse * @static - * @param {query.ReserveExecuteResponse} message ReserveExecuteResponse + * @param {query.StartCommitResponse} message StartCommitResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ReserveExecuteResponse.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) { - object.error = null; - object.result = null; - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.reserved_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.reserved_id = options.longs === String ? "0" : 0; - object.tablet_alias = null; - } - if (message.error != null && message.hasOwnProperty("error")) - object.error = $root.vtrpc.RPCError.toObject(message.error, options); - if (message.result != null && message.hasOwnProperty("result")) - object.result = $root.query.QueryResult.toObject(message.result, options); - if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) - if (typeof message.reserved_id === "number") - object.reserved_id = options.longs === String ? String(message.reserved_id) : message.reserved_id; - else - object.reserved_id = options.longs === String ? $util.Long.prototype.toString.call(message.reserved_id) : options.longs === Number ? new $util.LongBits(message.reserved_id.low >>> 0, message.reserved_id.high >>> 0).toNumber() : message.reserved_id; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) - object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); - return object; + StartCommitResponse.toObject = function toObject() { + return {}; }; /** - * Converts this ReserveExecuteResponse to JSON. + * Converts this StartCommitResponse to JSON. * @function toJSON - * @memberof query.ReserveExecuteResponse + * @memberof query.StartCommitResponse * @instance * @returns {Object.} JSON object */ - ReserveExecuteResponse.prototype.toJSON = function toJSON() { + StartCommitResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ReserveExecuteResponse + * Gets the default type url for StartCommitResponse * @function getTypeUrl - * @memberof query.ReserveExecuteResponse + * @memberof query.StartCommitResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ReserveExecuteResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + StartCommitResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.ReserveExecuteResponse"; + return typeUrlPrefix + "/query.StartCommitResponse"; }; - return ReserveExecuteResponse; + return StartCommitResponse; })(); - query.ReserveStreamExecuteRequest = (function() { + query.SetRollbackRequest = (function() { /** - * Properties of a ReserveStreamExecuteRequest. + * Properties of a SetRollbackRequest. * @memberof query - * @interface IReserveStreamExecuteRequest - * @property {vtrpc.ICallerID|null} [effective_caller_id] ReserveStreamExecuteRequest effective_caller_id - * @property {query.IVTGateCallerID|null} [immediate_caller_id] ReserveStreamExecuteRequest immediate_caller_id - * @property {query.ITarget|null} [target] ReserveStreamExecuteRequest target - * @property {query.IBoundQuery|null} [query] ReserveStreamExecuteRequest query - * @property {query.IExecuteOptions|null} [options] ReserveStreamExecuteRequest options - * @property {number|Long|null} [transaction_id] ReserveStreamExecuteRequest transaction_id - * @property {Array.|null} [pre_queries] ReserveStreamExecuteRequest pre_queries + * @interface ISetRollbackRequest + * @property {vtrpc.ICallerID|null} [effective_caller_id] SetRollbackRequest effective_caller_id + * @property {query.IVTGateCallerID|null} [immediate_caller_id] SetRollbackRequest immediate_caller_id + * @property {query.ITarget|null} [target] SetRollbackRequest target + * @property {number|Long|null} [transaction_id] SetRollbackRequest transaction_id + * @property {string|null} [dtid] SetRollbackRequest dtid */ /** - * Constructs a new ReserveStreamExecuteRequest. + * Constructs a new SetRollbackRequest. * @memberof query - * @classdesc Represents a ReserveStreamExecuteRequest. - * @implements IReserveStreamExecuteRequest + * @classdesc Represents a SetRollbackRequest. + * @implements ISetRollbackRequest * @constructor - * @param {query.IReserveStreamExecuteRequest=} [properties] Properties to set + * @param {query.ISetRollbackRequest=} [properties] Properties to set */ - function ReserveStreamExecuteRequest(properties) { - this.pre_queries = []; + function SetRollbackRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -86623,83 +87875,67 @@ export const query = $root.query = (() => { } /** - * ReserveStreamExecuteRequest effective_caller_id. + * SetRollbackRequest effective_caller_id. * @member {vtrpc.ICallerID|null|undefined} effective_caller_id - * @memberof query.ReserveStreamExecuteRequest + * @memberof query.SetRollbackRequest * @instance */ - ReserveStreamExecuteRequest.prototype.effective_caller_id = null; + SetRollbackRequest.prototype.effective_caller_id = null; /** - * ReserveStreamExecuteRequest immediate_caller_id. + * SetRollbackRequest immediate_caller_id. * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id - * @memberof query.ReserveStreamExecuteRequest + * @memberof query.SetRollbackRequest * @instance */ - ReserveStreamExecuteRequest.prototype.immediate_caller_id = null; + SetRollbackRequest.prototype.immediate_caller_id = null; /** - * ReserveStreamExecuteRequest target. + * SetRollbackRequest target. * @member {query.ITarget|null|undefined} target - * @memberof query.ReserveStreamExecuteRequest - * @instance - */ - ReserveStreamExecuteRequest.prototype.target = null; - - /** - * ReserveStreamExecuteRequest query. - * @member {query.IBoundQuery|null|undefined} query - * @memberof query.ReserveStreamExecuteRequest - * @instance - */ - ReserveStreamExecuteRequest.prototype.query = null; - - /** - * ReserveStreamExecuteRequest options. - * @member {query.IExecuteOptions|null|undefined} options - * @memberof query.ReserveStreamExecuteRequest + * @memberof query.SetRollbackRequest * @instance */ - ReserveStreamExecuteRequest.prototype.options = null; + SetRollbackRequest.prototype.target = null; /** - * ReserveStreamExecuteRequest transaction_id. + * SetRollbackRequest transaction_id. * @member {number|Long} transaction_id - * @memberof query.ReserveStreamExecuteRequest + * @memberof query.SetRollbackRequest * @instance */ - ReserveStreamExecuteRequest.prototype.transaction_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + SetRollbackRequest.prototype.transaction_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; /** - * ReserveStreamExecuteRequest pre_queries. - * @member {Array.} pre_queries - * @memberof query.ReserveStreamExecuteRequest + * SetRollbackRequest dtid. + * @member {string} dtid + * @memberof query.SetRollbackRequest * @instance */ - ReserveStreamExecuteRequest.prototype.pre_queries = $util.emptyArray; + SetRollbackRequest.prototype.dtid = ""; /** - * Creates a new ReserveStreamExecuteRequest instance using the specified properties. + * Creates a new SetRollbackRequest instance using the specified properties. * @function create - * @memberof query.ReserveStreamExecuteRequest + * @memberof query.SetRollbackRequest * @static - * @param {query.IReserveStreamExecuteRequest=} [properties] Properties to set - * @returns {query.ReserveStreamExecuteRequest} ReserveStreamExecuteRequest instance + * @param {query.ISetRollbackRequest=} [properties] Properties to set + * @returns {query.SetRollbackRequest} SetRollbackRequest instance */ - ReserveStreamExecuteRequest.create = function create(properties) { - return new ReserveStreamExecuteRequest(properties); + SetRollbackRequest.create = function create(properties) { + return new SetRollbackRequest(properties); }; /** - * Encodes the specified ReserveStreamExecuteRequest message. Does not implicitly {@link query.ReserveStreamExecuteRequest.verify|verify} messages. + * Encodes the specified SetRollbackRequest message. Does not implicitly {@link query.SetRollbackRequest.verify|verify} messages. * @function encode - * @memberof query.ReserveStreamExecuteRequest + * @memberof query.SetRollbackRequest * @static - * @param {query.IReserveStreamExecuteRequest} message ReserveStreamExecuteRequest message or plain object to encode + * @param {query.ISetRollbackRequest} message SetRollbackRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReserveStreamExecuteRequest.encode = function encode(message, writer) { + SetRollbackRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) @@ -86708,46 +87944,41 @@ export const query = $root.query = (() => { $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); if (message.target != null && Object.hasOwnProperty.call(message, "target")) $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.query != null && Object.hasOwnProperty.call(message, "query")) - $root.query.BoundQuery.encode(message.query, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); - if (message.options != null && Object.hasOwnProperty.call(message, "options")) - $root.query.ExecuteOptions.encode(message.options, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); if (message.transaction_id != null && Object.hasOwnProperty.call(message, "transaction_id")) - writer.uint32(/* id 6, wireType 0 =*/48).int64(message.transaction_id); - if (message.pre_queries != null && message.pre_queries.length) - for (let i = 0; i < message.pre_queries.length; ++i) - writer.uint32(/* id 7, wireType 2 =*/58).string(message.pre_queries[i]); + writer.uint32(/* id 4, wireType 0 =*/32).int64(message.transaction_id); + if (message.dtid != null && Object.hasOwnProperty.call(message, "dtid")) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.dtid); return writer; }; /** - * Encodes the specified ReserveStreamExecuteRequest message, length delimited. Does not implicitly {@link query.ReserveStreamExecuteRequest.verify|verify} messages. + * Encodes the specified SetRollbackRequest message, length delimited. Does not implicitly {@link query.SetRollbackRequest.verify|verify} messages. * @function encodeDelimited - * @memberof query.ReserveStreamExecuteRequest + * @memberof query.SetRollbackRequest * @static - * @param {query.IReserveStreamExecuteRequest} message ReserveStreamExecuteRequest message or plain object to encode + * @param {query.ISetRollbackRequest} message SetRollbackRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReserveStreamExecuteRequest.encodeDelimited = function encodeDelimited(message, writer) { + SetRollbackRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ReserveStreamExecuteRequest message from the specified reader or buffer. + * Decodes a SetRollbackRequest message from the specified reader or buffer. * @function decode - * @memberof query.ReserveStreamExecuteRequest + * @memberof query.SetRollbackRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.ReserveStreamExecuteRequest} ReserveStreamExecuteRequest + * @returns {query.SetRollbackRequest} SetRollbackRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReserveStreamExecuteRequest.decode = function decode(reader, length) { + SetRollbackRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.ReserveStreamExecuteRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.SetRollbackRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -86764,21 +87995,11 @@ export const query = $root.query = (() => { break; } case 4: { - message.query = $root.query.BoundQuery.decode(reader, reader.uint32()); - break; - } - case 5: { - message.options = $root.query.ExecuteOptions.decode(reader, reader.uint32()); - break; - } - case 6: { message.transaction_id = reader.int64(); break; } - case 7: { - if (!(message.pre_queries && message.pre_queries.length)) - message.pre_queries = []; - message.pre_queries.push(reader.string()); + case 5: { + message.dtid = reader.string(); break; } default: @@ -86790,30 +88011,30 @@ export const query = $root.query = (() => { }; /** - * Decodes a ReserveStreamExecuteRequest message from the specified reader or buffer, length delimited. + * Decodes a SetRollbackRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.ReserveStreamExecuteRequest + * @memberof query.SetRollbackRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.ReserveStreamExecuteRequest} ReserveStreamExecuteRequest + * @returns {query.SetRollbackRequest} SetRollbackRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReserveStreamExecuteRequest.decodeDelimited = function decodeDelimited(reader) { + SetRollbackRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ReserveStreamExecuteRequest message. + * Verifies a SetRollbackRequest message. * @function verify - * @memberof query.ReserveStreamExecuteRequest + * @memberof query.SetRollbackRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ReserveStreamExecuteRequest.verify = function verify(message) { + SetRollbackRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { @@ -86831,66 +88052,42 @@ export const query = $root.query = (() => { if (error) return "target." + error; } - if (message.query != null && message.hasOwnProperty("query")) { - let error = $root.query.BoundQuery.verify(message.query); - if (error) - return "query." + error; - } - if (message.options != null && message.hasOwnProperty("options")) { - let error = $root.query.ExecuteOptions.verify(message.options); - if (error) - return "options." + error; - } if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) if (!$util.isInteger(message.transaction_id) && !(message.transaction_id && $util.isInteger(message.transaction_id.low) && $util.isInteger(message.transaction_id.high))) return "transaction_id: integer|Long expected"; - if (message.pre_queries != null && message.hasOwnProperty("pre_queries")) { - if (!Array.isArray(message.pre_queries)) - return "pre_queries: array expected"; - for (let i = 0; i < message.pre_queries.length; ++i) - if (!$util.isString(message.pre_queries[i])) - return "pre_queries: string[] expected"; - } + if (message.dtid != null && message.hasOwnProperty("dtid")) + if (!$util.isString(message.dtid)) + return "dtid: string expected"; return null; }; /** - * Creates a ReserveStreamExecuteRequest message from a plain object. Also converts values to their respective internal types. + * Creates a SetRollbackRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.ReserveStreamExecuteRequest + * @memberof query.SetRollbackRequest * @static * @param {Object.} object Plain object - * @returns {query.ReserveStreamExecuteRequest} ReserveStreamExecuteRequest + * @returns {query.SetRollbackRequest} SetRollbackRequest */ - ReserveStreamExecuteRequest.fromObject = function fromObject(object) { - if (object instanceof $root.query.ReserveStreamExecuteRequest) + SetRollbackRequest.fromObject = function fromObject(object) { + if (object instanceof $root.query.SetRollbackRequest) return object; - let message = new $root.query.ReserveStreamExecuteRequest(); + let message = new $root.query.SetRollbackRequest(); if (object.effective_caller_id != null) { if (typeof object.effective_caller_id !== "object") - throw TypeError(".query.ReserveStreamExecuteRequest.effective_caller_id: object expected"); + throw TypeError(".query.SetRollbackRequest.effective_caller_id: object expected"); message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); } if (object.immediate_caller_id != null) { if (typeof object.immediate_caller_id !== "object") - throw TypeError(".query.ReserveStreamExecuteRequest.immediate_caller_id: object expected"); + throw TypeError(".query.SetRollbackRequest.immediate_caller_id: object expected"); message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); } if (object.target != null) { if (typeof object.target !== "object") - throw TypeError(".query.ReserveStreamExecuteRequest.target: object expected"); + throw TypeError(".query.SetRollbackRequest.target: object expected"); message.target = $root.query.Target.fromObject(object.target); } - if (object.query != null) { - if (typeof object.query !== "object") - throw TypeError(".query.ReserveStreamExecuteRequest.query: object expected"); - message.query = $root.query.BoundQuery.fromObject(object.query); - } - if (object.options != null) { - if (typeof object.options !== "object") - throw TypeError(".query.ReserveStreamExecuteRequest.options: object expected"); - message.options = $root.query.ExecuteOptions.fromObject(object.options); - } if (object.transaction_id != null) if ($util.Long) (message.transaction_id = $util.Long.fromValue(object.transaction_id)).unsigned = false; @@ -86900,42 +88097,34 @@ export const query = $root.query = (() => { message.transaction_id = object.transaction_id; else if (typeof object.transaction_id === "object") message.transaction_id = new $util.LongBits(object.transaction_id.low >>> 0, object.transaction_id.high >>> 0).toNumber(); - if (object.pre_queries) { - if (!Array.isArray(object.pre_queries)) - throw TypeError(".query.ReserveStreamExecuteRequest.pre_queries: array expected"); - message.pre_queries = []; - for (let i = 0; i < object.pre_queries.length; ++i) - message.pre_queries[i] = String(object.pre_queries[i]); - } + if (object.dtid != null) + message.dtid = String(object.dtid); return message; }; /** - * Creates a plain object from a ReserveStreamExecuteRequest message. Also converts values to other types if specified. + * Creates a plain object from a SetRollbackRequest message. Also converts values to other types if specified. * @function toObject - * @memberof query.ReserveStreamExecuteRequest + * @memberof query.SetRollbackRequest * @static - * @param {query.ReserveStreamExecuteRequest} message ReserveStreamExecuteRequest + * @param {query.SetRollbackRequest} message SetRollbackRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ReserveStreamExecuteRequest.toObject = function toObject(message, options) { + SetRollbackRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.pre_queries = []; if (options.defaults) { object.effective_caller_id = null; object.immediate_caller_id = null; object.target = null; - object.query = null; - object.options = null; if ($util.Long) { let long = new $util.Long(0, 0, false); object.transaction_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; } else object.transaction_id = options.longs === String ? "0" : 0; + object.dtid = ""; } if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); @@ -86943,73 +88132,62 @@ export const query = $root.query = (() => { object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); if (message.target != null && message.hasOwnProperty("target")) object.target = $root.query.Target.toObject(message.target, options); - if (message.query != null && message.hasOwnProperty("query")) - object.query = $root.query.BoundQuery.toObject(message.query, options); - if (message.options != null && message.hasOwnProperty("options")) - object.options = $root.query.ExecuteOptions.toObject(message.options, options); if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) if (typeof message.transaction_id === "number") object.transaction_id = options.longs === String ? String(message.transaction_id) : message.transaction_id; else object.transaction_id = options.longs === String ? $util.Long.prototype.toString.call(message.transaction_id) : options.longs === Number ? new $util.LongBits(message.transaction_id.low >>> 0, message.transaction_id.high >>> 0).toNumber() : message.transaction_id; - if (message.pre_queries && message.pre_queries.length) { - object.pre_queries = []; - for (let j = 0; j < message.pre_queries.length; ++j) - object.pre_queries[j] = message.pre_queries[j]; - } + if (message.dtid != null && message.hasOwnProperty("dtid")) + object.dtid = message.dtid; return object; }; /** - * Converts this ReserveStreamExecuteRequest to JSON. + * Converts this SetRollbackRequest to JSON. * @function toJSON - * @memberof query.ReserveStreamExecuteRequest + * @memberof query.SetRollbackRequest * @instance * @returns {Object.} JSON object */ - ReserveStreamExecuteRequest.prototype.toJSON = function toJSON() { + SetRollbackRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ReserveStreamExecuteRequest + * Gets the default type url for SetRollbackRequest * @function getTypeUrl - * @memberof query.ReserveStreamExecuteRequest + * @memberof query.SetRollbackRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ReserveStreamExecuteRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + SetRollbackRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.ReserveStreamExecuteRequest"; + return typeUrlPrefix + "/query.SetRollbackRequest"; }; - return ReserveStreamExecuteRequest; + return SetRollbackRequest; })(); - query.ReserveStreamExecuteResponse = (function() { + query.SetRollbackResponse = (function() { /** - * Properties of a ReserveStreamExecuteResponse. + * Properties of a SetRollbackResponse. * @memberof query - * @interface IReserveStreamExecuteResponse - * @property {vtrpc.IRPCError|null} [error] ReserveStreamExecuteResponse error - * @property {query.IQueryResult|null} [result] ReserveStreamExecuteResponse result - * @property {number|Long|null} [reserved_id] ReserveStreamExecuteResponse reserved_id - * @property {topodata.ITabletAlias|null} [tablet_alias] ReserveStreamExecuteResponse tablet_alias + * @interface ISetRollbackResponse */ /** - * Constructs a new ReserveStreamExecuteResponse. + * Constructs a new SetRollbackResponse. * @memberof query - * @classdesc Represents a ReserveStreamExecuteResponse. - * @implements IReserveStreamExecuteResponse + * @classdesc Represents a SetRollbackResponse. + * @implements ISetRollbackResponse * @constructor - * @param {query.IReserveStreamExecuteResponse=} [properties] Properties to set + * @param {query.ISetRollbackResponse=} [properties] Properties to set */ - function ReserveStreamExecuteResponse(properties) { + function SetRollbackResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -87017,119 +88195,63 @@ export const query = $root.query = (() => { } /** - * ReserveStreamExecuteResponse error. - * @member {vtrpc.IRPCError|null|undefined} error - * @memberof query.ReserveStreamExecuteResponse - * @instance - */ - ReserveStreamExecuteResponse.prototype.error = null; - - /** - * ReserveStreamExecuteResponse result. - * @member {query.IQueryResult|null|undefined} result - * @memberof query.ReserveStreamExecuteResponse - * @instance - */ - ReserveStreamExecuteResponse.prototype.result = null; - - /** - * ReserveStreamExecuteResponse reserved_id. - * @member {number|Long} reserved_id - * @memberof query.ReserveStreamExecuteResponse - * @instance - */ - ReserveStreamExecuteResponse.prototype.reserved_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; - - /** - * ReserveStreamExecuteResponse tablet_alias. - * @member {topodata.ITabletAlias|null|undefined} tablet_alias - * @memberof query.ReserveStreamExecuteResponse - * @instance - */ - ReserveStreamExecuteResponse.prototype.tablet_alias = null; - - /** - * Creates a new ReserveStreamExecuteResponse instance using the specified properties. + * Creates a new SetRollbackResponse instance using the specified properties. * @function create - * @memberof query.ReserveStreamExecuteResponse + * @memberof query.SetRollbackResponse * @static - * @param {query.IReserveStreamExecuteResponse=} [properties] Properties to set - * @returns {query.ReserveStreamExecuteResponse} ReserveStreamExecuteResponse instance + * @param {query.ISetRollbackResponse=} [properties] Properties to set + * @returns {query.SetRollbackResponse} SetRollbackResponse instance */ - ReserveStreamExecuteResponse.create = function create(properties) { - return new ReserveStreamExecuteResponse(properties); + SetRollbackResponse.create = function create(properties) { + return new SetRollbackResponse(properties); }; /** - * Encodes the specified ReserveStreamExecuteResponse message. Does not implicitly {@link query.ReserveStreamExecuteResponse.verify|verify} messages. + * Encodes the specified SetRollbackResponse message. Does not implicitly {@link query.SetRollbackResponse.verify|verify} messages. * @function encode - * @memberof query.ReserveStreamExecuteResponse + * @memberof query.SetRollbackResponse * @static - * @param {query.IReserveStreamExecuteResponse} message ReserveStreamExecuteResponse message or plain object to encode + * @param {query.ISetRollbackResponse} message SetRollbackResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReserveStreamExecuteResponse.encode = function encode(message, writer) { + SetRollbackResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.error != null && Object.hasOwnProperty.call(message, "error")) - $root.vtrpc.RPCError.encode(message.error, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.result != null && Object.hasOwnProperty.call(message, "result")) - $root.query.QueryResult.encode(message.result, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.reserved_id != null && Object.hasOwnProperty.call(message, "reserved_id")) - writer.uint32(/* id 3, wireType 0 =*/24).int64(message.reserved_id); - if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) - $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); return writer; }; /** - * Encodes the specified ReserveStreamExecuteResponse message, length delimited. Does not implicitly {@link query.ReserveStreamExecuteResponse.verify|verify} messages. + * Encodes the specified SetRollbackResponse message, length delimited. Does not implicitly {@link query.SetRollbackResponse.verify|verify} messages. * @function encodeDelimited - * @memberof query.ReserveStreamExecuteResponse + * @memberof query.SetRollbackResponse * @static - * @param {query.IReserveStreamExecuteResponse} message ReserveStreamExecuteResponse message or plain object to encode + * @param {query.ISetRollbackResponse} message SetRollbackResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReserveStreamExecuteResponse.encodeDelimited = function encodeDelimited(message, writer) { + SetRollbackResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ReserveStreamExecuteResponse message from the specified reader or buffer. + * Decodes a SetRollbackResponse message from the specified reader or buffer. * @function decode - * @memberof query.ReserveStreamExecuteResponse + * @memberof query.SetRollbackResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.ReserveStreamExecuteResponse} ReserveStreamExecuteResponse + * @returns {query.SetRollbackResponse} SetRollbackResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReserveStreamExecuteResponse.decode = function decode(reader, length) { + SetRollbackResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.ReserveStreamExecuteResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.SetRollbackResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - message.error = $root.vtrpc.RPCError.decode(reader, reader.uint32()); - break; - } - case 2: { - message.result = $root.query.QueryResult.decode(reader, reader.uint32()); - break; - } - case 3: { - message.reserved_id = reader.int64(); - break; - } - case 4: { - message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); - break; - } default: reader.skipType(tag & 7); break; @@ -87139,184 +88261,112 @@ export const query = $root.query = (() => { }; /** - * Decodes a ReserveStreamExecuteResponse message from the specified reader or buffer, length delimited. + * Decodes a SetRollbackResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.ReserveStreamExecuteResponse + * @memberof query.SetRollbackResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.ReserveStreamExecuteResponse} ReserveStreamExecuteResponse + * @returns {query.SetRollbackResponse} SetRollbackResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReserveStreamExecuteResponse.decodeDelimited = function decodeDelimited(reader) { + SetRollbackResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ReserveStreamExecuteResponse message. + * Verifies a SetRollbackResponse message. * @function verify - * @memberof query.ReserveStreamExecuteResponse + * @memberof query.SetRollbackResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ReserveStreamExecuteResponse.verify = function verify(message) { + SetRollbackResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.error != null && message.hasOwnProperty("error")) { - let error = $root.vtrpc.RPCError.verify(message.error); - if (error) - return "error." + error; - } - if (message.result != null && message.hasOwnProperty("result")) { - let error = $root.query.QueryResult.verify(message.result); - if (error) - return "result." + error; - } - if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) - if (!$util.isInteger(message.reserved_id) && !(message.reserved_id && $util.isInteger(message.reserved_id.low) && $util.isInteger(message.reserved_id.high))) - return "reserved_id: integer|Long expected"; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { - let error = $root.topodata.TabletAlias.verify(message.tablet_alias); - if (error) - return "tablet_alias." + error; - } return null; }; /** - * Creates a ReserveStreamExecuteResponse message from a plain object. Also converts values to their respective internal types. + * Creates a SetRollbackResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.ReserveStreamExecuteResponse + * @memberof query.SetRollbackResponse * @static * @param {Object.} object Plain object - * @returns {query.ReserveStreamExecuteResponse} ReserveStreamExecuteResponse + * @returns {query.SetRollbackResponse} SetRollbackResponse */ - ReserveStreamExecuteResponse.fromObject = function fromObject(object) { - if (object instanceof $root.query.ReserveStreamExecuteResponse) + SetRollbackResponse.fromObject = function fromObject(object) { + if (object instanceof $root.query.SetRollbackResponse) return object; - let message = new $root.query.ReserveStreamExecuteResponse(); - if (object.error != null) { - if (typeof object.error !== "object") - throw TypeError(".query.ReserveStreamExecuteResponse.error: object expected"); - message.error = $root.vtrpc.RPCError.fromObject(object.error); - } - if (object.result != null) { - if (typeof object.result !== "object") - throw TypeError(".query.ReserveStreamExecuteResponse.result: object expected"); - message.result = $root.query.QueryResult.fromObject(object.result); - } - if (object.reserved_id != null) - if ($util.Long) - (message.reserved_id = $util.Long.fromValue(object.reserved_id)).unsigned = false; - else if (typeof object.reserved_id === "string") - message.reserved_id = parseInt(object.reserved_id, 10); - else if (typeof object.reserved_id === "number") - message.reserved_id = object.reserved_id; - else if (typeof object.reserved_id === "object") - message.reserved_id = new $util.LongBits(object.reserved_id.low >>> 0, object.reserved_id.high >>> 0).toNumber(); - if (object.tablet_alias != null) { - if (typeof object.tablet_alias !== "object") - throw TypeError(".query.ReserveStreamExecuteResponse.tablet_alias: object expected"); - message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); - } - return message; + return new $root.query.SetRollbackResponse(); }; /** - * Creates a plain object from a ReserveStreamExecuteResponse message. Also converts values to other types if specified. + * Creates a plain object from a SetRollbackResponse message. Also converts values to other types if specified. * @function toObject - * @memberof query.ReserveStreamExecuteResponse + * @memberof query.SetRollbackResponse * @static - * @param {query.ReserveStreamExecuteResponse} message ReserveStreamExecuteResponse + * @param {query.SetRollbackResponse} message SetRollbackResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ReserveStreamExecuteResponse.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) { - object.error = null; - object.result = null; - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.reserved_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.reserved_id = options.longs === String ? "0" : 0; - object.tablet_alias = null; - } - if (message.error != null && message.hasOwnProperty("error")) - object.error = $root.vtrpc.RPCError.toObject(message.error, options); - if (message.result != null && message.hasOwnProperty("result")) - object.result = $root.query.QueryResult.toObject(message.result, options); - if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) - if (typeof message.reserved_id === "number") - object.reserved_id = options.longs === String ? String(message.reserved_id) : message.reserved_id; - else - object.reserved_id = options.longs === String ? $util.Long.prototype.toString.call(message.reserved_id) : options.longs === Number ? new $util.LongBits(message.reserved_id.low >>> 0, message.reserved_id.high >>> 0).toNumber() : message.reserved_id; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) - object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); - return object; + SetRollbackResponse.toObject = function toObject() { + return {}; }; /** - * Converts this ReserveStreamExecuteResponse to JSON. + * Converts this SetRollbackResponse to JSON. * @function toJSON - * @memberof query.ReserveStreamExecuteResponse + * @memberof query.SetRollbackResponse * @instance * @returns {Object.} JSON object */ - ReserveStreamExecuteResponse.prototype.toJSON = function toJSON() { + SetRollbackResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ReserveStreamExecuteResponse + * Gets the default type url for SetRollbackResponse * @function getTypeUrl - * @memberof query.ReserveStreamExecuteResponse + * @memberof query.SetRollbackResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ReserveStreamExecuteResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + SetRollbackResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.ReserveStreamExecuteResponse"; + return typeUrlPrefix + "/query.SetRollbackResponse"; }; - return ReserveStreamExecuteResponse; + return SetRollbackResponse; })(); - query.ReserveBeginExecuteRequest = (function() { + query.ConcludeTransactionRequest = (function() { /** - * Properties of a ReserveBeginExecuteRequest. + * Properties of a ConcludeTransactionRequest. * @memberof query - * @interface IReserveBeginExecuteRequest - * @property {vtrpc.ICallerID|null} [effective_caller_id] ReserveBeginExecuteRequest effective_caller_id - * @property {query.IVTGateCallerID|null} [immediate_caller_id] ReserveBeginExecuteRequest immediate_caller_id - * @property {query.ITarget|null} [target] ReserveBeginExecuteRequest target - * @property {query.IBoundQuery|null} [query] ReserveBeginExecuteRequest query - * @property {query.IExecuteOptions|null} [options] ReserveBeginExecuteRequest options - * @property {Array.|null} [pre_queries] ReserveBeginExecuteRequest pre_queries - * @property {Array.|null} [post_begin_queries] ReserveBeginExecuteRequest post_begin_queries + * @interface IConcludeTransactionRequest + * @property {vtrpc.ICallerID|null} [effective_caller_id] ConcludeTransactionRequest effective_caller_id + * @property {query.IVTGateCallerID|null} [immediate_caller_id] ConcludeTransactionRequest immediate_caller_id + * @property {query.ITarget|null} [target] ConcludeTransactionRequest target + * @property {string|null} [dtid] ConcludeTransactionRequest dtid */ /** - * Constructs a new ReserveBeginExecuteRequest. + * Constructs a new ConcludeTransactionRequest. * @memberof query - * @classdesc Represents a ReserveBeginExecuteRequest. - * @implements IReserveBeginExecuteRequest + * @classdesc Represents a ConcludeTransactionRequest. + * @implements IConcludeTransactionRequest * @constructor - * @param {query.IReserveBeginExecuteRequest=} [properties] Properties to set + * @param {query.IConcludeTransactionRequest=} [properties] Properties to set */ - function ReserveBeginExecuteRequest(properties) { - this.pre_queries = []; - this.post_begin_queries = []; + function ConcludeTransactionRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -87324,83 +88374,59 @@ export const query = $root.query = (() => { } /** - * ReserveBeginExecuteRequest effective_caller_id. + * ConcludeTransactionRequest effective_caller_id. * @member {vtrpc.ICallerID|null|undefined} effective_caller_id - * @memberof query.ReserveBeginExecuteRequest + * @memberof query.ConcludeTransactionRequest * @instance */ - ReserveBeginExecuteRequest.prototype.effective_caller_id = null; + ConcludeTransactionRequest.prototype.effective_caller_id = null; /** - * ReserveBeginExecuteRequest immediate_caller_id. + * ConcludeTransactionRequest immediate_caller_id. * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id - * @memberof query.ReserveBeginExecuteRequest + * @memberof query.ConcludeTransactionRequest * @instance */ - ReserveBeginExecuteRequest.prototype.immediate_caller_id = null; + ConcludeTransactionRequest.prototype.immediate_caller_id = null; /** - * ReserveBeginExecuteRequest target. + * ConcludeTransactionRequest target. * @member {query.ITarget|null|undefined} target - * @memberof query.ReserveBeginExecuteRequest - * @instance - */ - ReserveBeginExecuteRequest.prototype.target = null; - - /** - * ReserveBeginExecuteRequest query. - * @member {query.IBoundQuery|null|undefined} query - * @memberof query.ReserveBeginExecuteRequest - * @instance - */ - ReserveBeginExecuteRequest.prototype.query = null; - - /** - * ReserveBeginExecuteRequest options. - * @member {query.IExecuteOptions|null|undefined} options - * @memberof query.ReserveBeginExecuteRequest - * @instance - */ - ReserveBeginExecuteRequest.prototype.options = null; - - /** - * ReserveBeginExecuteRequest pre_queries. - * @member {Array.} pre_queries - * @memberof query.ReserveBeginExecuteRequest + * @memberof query.ConcludeTransactionRequest * @instance */ - ReserveBeginExecuteRequest.prototype.pre_queries = $util.emptyArray; + ConcludeTransactionRequest.prototype.target = null; /** - * ReserveBeginExecuteRequest post_begin_queries. - * @member {Array.} post_begin_queries - * @memberof query.ReserveBeginExecuteRequest + * ConcludeTransactionRequest dtid. + * @member {string} dtid + * @memberof query.ConcludeTransactionRequest * @instance */ - ReserveBeginExecuteRequest.prototype.post_begin_queries = $util.emptyArray; + ConcludeTransactionRequest.prototype.dtid = ""; /** - * Creates a new ReserveBeginExecuteRequest instance using the specified properties. + * Creates a new ConcludeTransactionRequest instance using the specified properties. * @function create - * @memberof query.ReserveBeginExecuteRequest + * @memberof query.ConcludeTransactionRequest * @static - * @param {query.IReserveBeginExecuteRequest=} [properties] Properties to set - * @returns {query.ReserveBeginExecuteRequest} ReserveBeginExecuteRequest instance + * @param {query.IConcludeTransactionRequest=} [properties] Properties to set + * @returns {query.ConcludeTransactionRequest} ConcludeTransactionRequest instance */ - ReserveBeginExecuteRequest.create = function create(properties) { - return new ReserveBeginExecuteRequest(properties); + ConcludeTransactionRequest.create = function create(properties) { + return new ConcludeTransactionRequest(properties); }; /** - * Encodes the specified ReserveBeginExecuteRequest message. Does not implicitly {@link query.ReserveBeginExecuteRequest.verify|verify} messages. + * Encodes the specified ConcludeTransactionRequest message. Does not implicitly {@link query.ConcludeTransactionRequest.verify|verify} messages. * @function encode - * @memberof query.ReserveBeginExecuteRequest + * @memberof query.ConcludeTransactionRequest * @static - * @param {query.IReserveBeginExecuteRequest} message ReserveBeginExecuteRequest message or plain object to encode + * @param {query.IConcludeTransactionRequest} message ConcludeTransactionRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReserveBeginExecuteRequest.encode = function encode(message, writer) { + ConcludeTransactionRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) @@ -87409,47 +88435,39 @@ export const query = $root.query = (() => { $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); if (message.target != null && Object.hasOwnProperty.call(message, "target")) $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.query != null && Object.hasOwnProperty.call(message, "query")) - $root.query.BoundQuery.encode(message.query, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); - if (message.options != null && Object.hasOwnProperty.call(message, "options")) - $root.query.ExecuteOptions.encode(message.options, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); - if (message.pre_queries != null && message.pre_queries.length) - for (let i = 0; i < message.pre_queries.length; ++i) - writer.uint32(/* id 6, wireType 2 =*/50).string(message.pre_queries[i]); - if (message.post_begin_queries != null && message.post_begin_queries.length) - for (let i = 0; i < message.post_begin_queries.length; ++i) - writer.uint32(/* id 7, wireType 2 =*/58).string(message.post_begin_queries[i]); + if (message.dtid != null && Object.hasOwnProperty.call(message, "dtid")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.dtid); return writer; }; /** - * Encodes the specified ReserveBeginExecuteRequest message, length delimited. Does not implicitly {@link query.ReserveBeginExecuteRequest.verify|verify} messages. + * Encodes the specified ConcludeTransactionRequest message, length delimited. Does not implicitly {@link query.ConcludeTransactionRequest.verify|verify} messages. * @function encodeDelimited - * @memberof query.ReserveBeginExecuteRequest + * @memberof query.ConcludeTransactionRequest * @static - * @param {query.IReserveBeginExecuteRequest} message ReserveBeginExecuteRequest message or plain object to encode + * @param {query.IConcludeTransactionRequest} message ConcludeTransactionRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReserveBeginExecuteRequest.encodeDelimited = function encodeDelimited(message, writer) { + ConcludeTransactionRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ReserveBeginExecuteRequest message from the specified reader or buffer. + * Decodes a ConcludeTransactionRequest message from the specified reader or buffer. * @function decode - * @memberof query.ReserveBeginExecuteRequest + * @memberof query.ConcludeTransactionRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.ReserveBeginExecuteRequest} ReserveBeginExecuteRequest + * @returns {query.ConcludeTransactionRequest} ConcludeTransactionRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReserveBeginExecuteRequest.decode = function decode(reader, length) { + ConcludeTransactionRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.ReserveBeginExecuteRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.ConcludeTransactionRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -87466,23 +88484,7 @@ export const query = $root.query = (() => { break; } case 4: { - message.query = $root.query.BoundQuery.decode(reader, reader.uint32()); - break; - } - case 5: { - message.options = $root.query.ExecuteOptions.decode(reader, reader.uint32()); - break; - } - case 6: { - if (!(message.pre_queries && message.pre_queries.length)) - message.pre_queries = []; - message.pre_queries.push(reader.string()); - break; - } - case 7: { - if (!(message.post_begin_queries && message.post_begin_queries.length)) - message.post_begin_queries = []; - message.post_begin_queries.push(reader.string()); + message.dtid = reader.string(); break; } default: @@ -87494,30 +88496,30 @@ export const query = $root.query = (() => { }; /** - * Decodes a ReserveBeginExecuteRequest message from the specified reader or buffer, length delimited. + * Decodes a ConcludeTransactionRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.ReserveBeginExecuteRequest + * @memberof query.ConcludeTransactionRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.ReserveBeginExecuteRequest} ReserveBeginExecuteRequest + * @returns {query.ConcludeTransactionRequest} ConcludeTransactionRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReserveBeginExecuteRequest.decodeDelimited = function decodeDelimited(reader) { + ConcludeTransactionRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ReserveBeginExecuteRequest message. + * Verifies a ConcludeTransactionRequest message. * @function verify - * @memberof query.ReserveBeginExecuteRequest + * @memberof query.ConcludeTransactionRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ReserveBeginExecuteRequest.verify = function verify(message) { + ConcludeTransactionRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { @@ -87535,110 +88537,62 @@ export const query = $root.query = (() => { if (error) return "target." + error; } - if (message.query != null && message.hasOwnProperty("query")) { - let error = $root.query.BoundQuery.verify(message.query); - if (error) - return "query." + error; - } - if (message.options != null && message.hasOwnProperty("options")) { - let error = $root.query.ExecuteOptions.verify(message.options); - if (error) - return "options." + error; - } - if (message.pre_queries != null && message.hasOwnProperty("pre_queries")) { - if (!Array.isArray(message.pre_queries)) - return "pre_queries: array expected"; - for (let i = 0; i < message.pre_queries.length; ++i) - if (!$util.isString(message.pre_queries[i])) - return "pre_queries: string[] expected"; - } - if (message.post_begin_queries != null && message.hasOwnProperty("post_begin_queries")) { - if (!Array.isArray(message.post_begin_queries)) - return "post_begin_queries: array expected"; - for (let i = 0; i < message.post_begin_queries.length; ++i) - if (!$util.isString(message.post_begin_queries[i])) - return "post_begin_queries: string[] expected"; - } + if (message.dtid != null && message.hasOwnProperty("dtid")) + if (!$util.isString(message.dtid)) + return "dtid: string expected"; return null; }; /** - * Creates a ReserveBeginExecuteRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ConcludeTransactionRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.ReserveBeginExecuteRequest + * @memberof query.ConcludeTransactionRequest * @static * @param {Object.} object Plain object - * @returns {query.ReserveBeginExecuteRequest} ReserveBeginExecuteRequest + * @returns {query.ConcludeTransactionRequest} ConcludeTransactionRequest */ - ReserveBeginExecuteRequest.fromObject = function fromObject(object) { - if (object instanceof $root.query.ReserveBeginExecuteRequest) + ConcludeTransactionRequest.fromObject = function fromObject(object) { + if (object instanceof $root.query.ConcludeTransactionRequest) return object; - let message = new $root.query.ReserveBeginExecuteRequest(); + let message = new $root.query.ConcludeTransactionRequest(); if (object.effective_caller_id != null) { if (typeof object.effective_caller_id !== "object") - throw TypeError(".query.ReserveBeginExecuteRequest.effective_caller_id: object expected"); + throw TypeError(".query.ConcludeTransactionRequest.effective_caller_id: object expected"); message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); } if (object.immediate_caller_id != null) { if (typeof object.immediate_caller_id !== "object") - throw TypeError(".query.ReserveBeginExecuteRequest.immediate_caller_id: object expected"); + throw TypeError(".query.ConcludeTransactionRequest.immediate_caller_id: object expected"); message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); } if (object.target != null) { if (typeof object.target !== "object") - throw TypeError(".query.ReserveBeginExecuteRequest.target: object expected"); + throw TypeError(".query.ConcludeTransactionRequest.target: object expected"); message.target = $root.query.Target.fromObject(object.target); } - if (object.query != null) { - if (typeof object.query !== "object") - throw TypeError(".query.ReserveBeginExecuteRequest.query: object expected"); - message.query = $root.query.BoundQuery.fromObject(object.query); - } - if (object.options != null) { - if (typeof object.options !== "object") - throw TypeError(".query.ReserveBeginExecuteRequest.options: object expected"); - message.options = $root.query.ExecuteOptions.fromObject(object.options); - } - if (object.pre_queries) { - if (!Array.isArray(object.pre_queries)) - throw TypeError(".query.ReserveBeginExecuteRequest.pre_queries: array expected"); - message.pre_queries = []; - for (let i = 0; i < object.pre_queries.length; ++i) - message.pre_queries[i] = String(object.pre_queries[i]); - } - if (object.post_begin_queries) { - if (!Array.isArray(object.post_begin_queries)) - throw TypeError(".query.ReserveBeginExecuteRequest.post_begin_queries: array expected"); - message.post_begin_queries = []; - for (let i = 0; i < object.post_begin_queries.length; ++i) - message.post_begin_queries[i] = String(object.post_begin_queries[i]); - } + if (object.dtid != null) + message.dtid = String(object.dtid); return message; }; /** - * Creates a plain object from a ReserveBeginExecuteRequest message. Also converts values to other types if specified. + * Creates a plain object from a ConcludeTransactionRequest message. Also converts values to other types if specified. * @function toObject - * @memberof query.ReserveBeginExecuteRequest + * @memberof query.ConcludeTransactionRequest * @static - * @param {query.ReserveBeginExecuteRequest} message ReserveBeginExecuteRequest + * @param {query.ConcludeTransactionRequest} message ConcludeTransactionRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ReserveBeginExecuteRequest.toObject = function toObject(message, options) { + ConcludeTransactionRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) { - object.pre_queries = []; - object.post_begin_queries = []; - } if (options.defaults) { object.effective_caller_id = null; object.immediate_caller_id = null; object.target = null; - object.query = null; - object.options = null; + object.dtid = ""; } if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); @@ -87646,75 +88600,57 @@ export const query = $root.query = (() => { object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); if (message.target != null && message.hasOwnProperty("target")) object.target = $root.query.Target.toObject(message.target, options); - if (message.query != null && message.hasOwnProperty("query")) - object.query = $root.query.BoundQuery.toObject(message.query, options); - if (message.options != null && message.hasOwnProperty("options")) - object.options = $root.query.ExecuteOptions.toObject(message.options, options); - if (message.pre_queries && message.pre_queries.length) { - object.pre_queries = []; - for (let j = 0; j < message.pre_queries.length; ++j) - object.pre_queries[j] = message.pre_queries[j]; - } - if (message.post_begin_queries && message.post_begin_queries.length) { - object.post_begin_queries = []; - for (let j = 0; j < message.post_begin_queries.length; ++j) - object.post_begin_queries[j] = message.post_begin_queries[j]; - } + if (message.dtid != null && message.hasOwnProperty("dtid")) + object.dtid = message.dtid; return object; }; /** - * Converts this ReserveBeginExecuteRequest to JSON. + * Converts this ConcludeTransactionRequest to JSON. * @function toJSON - * @memberof query.ReserveBeginExecuteRequest + * @memberof query.ConcludeTransactionRequest * @instance * @returns {Object.} JSON object */ - ReserveBeginExecuteRequest.prototype.toJSON = function toJSON() { + ConcludeTransactionRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ReserveBeginExecuteRequest + * Gets the default type url for ConcludeTransactionRequest * @function getTypeUrl - * @memberof query.ReserveBeginExecuteRequest + * @memberof query.ConcludeTransactionRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ReserveBeginExecuteRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ConcludeTransactionRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.ReserveBeginExecuteRequest"; + return typeUrlPrefix + "/query.ConcludeTransactionRequest"; }; - return ReserveBeginExecuteRequest; + return ConcludeTransactionRequest; })(); - query.ReserveBeginExecuteResponse = (function() { + query.ConcludeTransactionResponse = (function() { /** - * Properties of a ReserveBeginExecuteResponse. + * Properties of a ConcludeTransactionResponse. * @memberof query - * @interface IReserveBeginExecuteResponse - * @property {vtrpc.IRPCError|null} [error] ReserveBeginExecuteResponse error - * @property {query.IQueryResult|null} [result] ReserveBeginExecuteResponse result - * @property {number|Long|null} [transaction_id] ReserveBeginExecuteResponse transaction_id - * @property {number|Long|null} [reserved_id] ReserveBeginExecuteResponse reserved_id - * @property {topodata.ITabletAlias|null} [tablet_alias] ReserveBeginExecuteResponse tablet_alias - * @property {string|null} [session_state_changes] ReserveBeginExecuteResponse session_state_changes + * @interface IConcludeTransactionResponse */ /** - * Constructs a new ReserveBeginExecuteResponse. + * Constructs a new ConcludeTransactionResponse. * @memberof query - * @classdesc Represents a ReserveBeginExecuteResponse. - * @implements IReserveBeginExecuteResponse + * @classdesc Represents a ConcludeTransactionResponse. + * @implements IConcludeTransactionResponse * @constructor - * @param {query.IReserveBeginExecuteResponse=} [properties] Properties to set + * @param {query.IConcludeTransactionResponse=} [properties] Properties to set */ - function ReserveBeginExecuteResponse(properties) { + function ConcludeTransactionResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -87722,147 +88658,63 @@ export const query = $root.query = (() => { } /** - * ReserveBeginExecuteResponse error. - * @member {vtrpc.IRPCError|null|undefined} error - * @memberof query.ReserveBeginExecuteResponse - * @instance - */ - ReserveBeginExecuteResponse.prototype.error = null; - - /** - * ReserveBeginExecuteResponse result. - * @member {query.IQueryResult|null|undefined} result - * @memberof query.ReserveBeginExecuteResponse - * @instance - */ - ReserveBeginExecuteResponse.prototype.result = null; - - /** - * ReserveBeginExecuteResponse transaction_id. - * @member {number|Long} transaction_id - * @memberof query.ReserveBeginExecuteResponse - * @instance - */ - ReserveBeginExecuteResponse.prototype.transaction_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; - - /** - * ReserveBeginExecuteResponse reserved_id. - * @member {number|Long} reserved_id - * @memberof query.ReserveBeginExecuteResponse - * @instance - */ - ReserveBeginExecuteResponse.prototype.reserved_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; - - /** - * ReserveBeginExecuteResponse tablet_alias. - * @member {topodata.ITabletAlias|null|undefined} tablet_alias - * @memberof query.ReserveBeginExecuteResponse - * @instance - */ - ReserveBeginExecuteResponse.prototype.tablet_alias = null; - - /** - * ReserveBeginExecuteResponse session_state_changes. - * @member {string} session_state_changes - * @memberof query.ReserveBeginExecuteResponse - * @instance - */ - ReserveBeginExecuteResponse.prototype.session_state_changes = ""; - - /** - * Creates a new ReserveBeginExecuteResponse instance using the specified properties. + * Creates a new ConcludeTransactionResponse instance using the specified properties. * @function create - * @memberof query.ReserveBeginExecuteResponse + * @memberof query.ConcludeTransactionResponse * @static - * @param {query.IReserveBeginExecuteResponse=} [properties] Properties to set - * @returns {query.ReserveBeginExecuteResponse} ReserveBeginExecuteResponse instance + * @param {query.IConcludeTransactionResponse=} [properties] Properties to set + * @returns {query.ConcludeTransactionResponse} ConcludeTransactionResponse instance */ - ReserveBeginExecuteResponse.create = function create(properties) { - return new ReserveBeginExecuteResponse(properties); + ConcludeTransactionResponse.create = function create(properties) { + return new ConcludeTransactionResponse(properties); }; /** - * Encodes the specified ReserveBeginExecuteResponse message. Does not implicitly {@link query.ReserveBeginExecuteResponse.verify|verify} messages. + * Encodes the specified ConcludeTransactionResponse message. Does not implicitly {@link query.ConcludeTransactionResponse.verify|verify} messages. * @function encode - * @memberof query.ReserveBeginExecuteResponse + * @memberof query.ConcludeTransactionResponse * @static - * @param {query.IReserveBeginExecuteResponse} message ReserveBeginExecuteResponse message or plain object to encode + * @param {query.IConcludeTransactionResponse} message ConcludeTransactionResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReserveBeginExecuteResponse.encode = function encode(message, writer) { + ConcludeTransactionResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.error != null && Object.hasOwnProperty.call(message, "error")) - $root.vtrpc.RPCError.encode(message.error, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.result != null && Object.hasOwnProperty.call(message, "result")) - $root.query.QueryResult.encode(message.result, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.transaction_id != null && Object.hasOwnProperty.call(message, "transaction_id")) - writer.uint32(/* id 3, wireType 0 =*/24).int64(message.transaction_id); - if (message.reserved_id != null && Object.hasOwnProperty.call(message, "reserved_id")) - writer.uint32(/* id 4, wireType 0 =*/32).int64(message.reserved_id); - if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) - $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); - if (message.session_state_changes != null && Object.hasOwnProperty.call(message, "session_state_changes")) - writer.uint32(/* id 6, wireType 2 =*/50).string(message.session_state_changes); return writer; }; /** - * Encodes the specified ReserveBeginExecuteResponse message, length delimited. Does not implicitly {@link query.ReserveBeginExecuteResponse.verify|verify} messages. + * Encodes the specified ConcludeTransactionResponse message, length delimited. Does not implicitly {@link query.ConcludeTransactionResponse.verify|verify} messages. * @function encodeDelimited - * @memberof query.ReserveBeginExecuteResponse + * @memberof query.ConcludeTransactionResponse * @static - * @param {query.IReserveBeginExecuteResponse} message ReserveBeginExecuteResponse message or plain object to encode + * @param {query.IConcludeTransactionResponse} message ConcludeTransactionResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReserveBeginExecuteResponse.encodeDelimited = function encodeDelimited(message, writer) { + ConcludeTransactionResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ReserveBeginExecuteResponse message from the specified reader or buffer. + * Decodes a ConcludeTransactionResponse message from the specified reader or buffer. * @function decode - * @memberof query.ReserveBeginExecuteResponse + * @memberof query.ConcludeTransactionResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.ReserveBeginExecuteResponse} ReserveBeginExecuteResponse + * @returns {query.ConcludeTransactionResponse} ConcludeTransactionResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReserveBeginExecuteResponse.decode = function decode(reader, length) { + ConcludeTransactionResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.ReserveBeginExecuteResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.ConcludeTransactionResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - message.error = $root.vtrpc.RPCError.decode(reader, reader.uint32()); - break; - } - case 2: { - message.result = $root.query.QueryResult.decode(reader, reader.uint32()); - break; - } - case 3: { - message.transaction_id = reader.int64(); - break; - } - case 4: { - message.reserved_id = reader.int64(); - break; - } - case 5: { - message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); - break; - } - case 6: { - message.session_state_changes = reader.string(); - break; - } default: reader.skipType(tag & 7); break; @@ -87872,214 +88724,112 @@ export const query = $root.query = (() => { }; /** - * Decodes a ReserveBeginExecuteResponse message from the specified reader or buffer, length delimited. + * Decodes a ConcludeTransactionResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.ReserveBeginExecuteResponse + * @memberof query.ConcludeTransactionResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.ReserveBeginExecuteResponse} ReserveBeginExecuteResponse + * @returns {query.ConcludeTransactionResponse} ConcludeTransactionResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReserveBeginExecuteResponse.decodeDelimited = function decodeDelimited(reader) { + ConcludeTransactionResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ReserveBeginExecuteResponse message. + * Verifies a ConcludeTransactionResponse message. * @function verify - * @memberof query.ReserveBeginExecuteResponse + * @memberof query.ConcludeTransactionResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ReserveBeginExecuteResponse.verify = function verify(message) { + ConcludeTransactionResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.error != null && message.hasOwnProperty("error")) { - let error = $root.vtrpc.RPCError.verify(message.error); - if (error) - return "error." + error; - } - if (message.result != null && message.hasOwnProperty("result")) { - let error = $root.query.QueryResult.verify(message.result); - if (error) - return "result." + error; - } - if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) - if (!$util.isInteger(message.transaction_id) && !(message.transaction_id && $util.isInteger(message.transaction_id.low) && $util.isInteger(message.transaction_id.high))) - return "transaction_id: integer|Long expected"; - if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) - if (!$util.isInteger(message.reserved_id) && !(message.reserved_id && $util.isInteger(message.reserved_id.low) && $util.isInteger(message.reserved_id.high))) - return "reserved_id: integer|Long expected"; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { - let error = $root.topodata.TabletAlias.verify(message.tablet_alias); - if (error) - return "tablet_alias." + error; - } - if (message.session_state_changes != null && message.hasOwnProperty("session_state_changes")) - if (!$util.isString(message.session_state_changes)) - return "session_state_changes: string expected"; return null; }; /** - * Creates a ReserveBeginExecuteResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ConcludeTransactionResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.ReserveBeginExecuteResponse + * @memberof query.ConcludeTransactionResponse * @static * @param {Object.} object Plain object - * @returns {query.ReserveBeginExecuteResponse} ReserveBeginExecuteResponse + * @returns {query.ConcludeTransactionResponse} ConcludeTransactionResponse */ - ReserveBeginExecuteResponse.fromObject = function fromObject(object) { - if (object instanceof $root.query.ReserveBeginExecuteResponse) + ConcludeTransactionResponse.fromObject = function fromObject(object) { + if (object instanceof $root.query.ConcludeTransactionResponse) return object; - let message = new $root.query.ReserveBeginExecuteResponse(); - if (object.error != null) { - if (typeof object.error !== "object") - throw TypeError(".query.ReserveBeginExecuteResponse.error: object expected"); - message.error = $root.vtrpc.RPCError.fromObject(object.error); - } - if (object.result != null) { - if (typeof object.result !== "object") - throw TypeError(".query.ReserveBeginExecuteResponse.result: object expected"); - message.result = $root.query.QueryResult.fromObject(object.result); - } - if (object.transaction_id != null) - if ($util.Long) - (message.transaction_id = $util.Long.fromValue(object.transaction_id)).unsigned = false; - else if (typeof object.transaction_id === "string") - message.transaction_id = parseInt(object.transaction_id, 10); - else if (typeof object.transaction_id === "number") - message.transaction_id = object.transaction_id; - else if (typeof object.transaction_id === "object") - message.transaction_id = new $util.LongBits(object.transaction_id.low >>> 0, object.transaction_id.high >>> 0).toNumber(); - if (object.reserved_id != null) - if ($util.Long) - (message.reserved_id = $util.Long.fromValue(object.reserved_id)).unsigned = false; - else if (typeof object.reserved_id === "string") - message.reserved_id = parseInt(object.reserved_id, 10); - else if (typeof object.reserved_id === "number") - message.reserved_id = object.reserved_id; - else if (typeof object.reserved_id === "object") - message.reserved_id = new $util.LongBits(object.reserved_id.low >>> 0, object.reserved_id.high >>> 0).toNumber(); - if (object.tablet_alias != null) { - if (typeof object.tablet_alias !== "object") - throw TypeError(".query.ReserveBeginExecuteResponse.tablet_alias: object expected"); - message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); - } - if (object.session_state_changes != null) - message.session_state_changes = String(object.session_state_changes); - return message; + return new $root.query.ConcludeTransactionResponse(); }; /** - * Creates a plain object from a ReserveBeginExecuteResponse message. Also converts values to other types if specified. + * Creates a plain object from a ConcludeTransactionResponse message. Also converts values to other types if specified. * @function toObject - * @memberof query.ReserveBeginExecuteResponse + * @memberof query.ConcludeTransactionResponse * @static - * @param {query.ReserveBeginExecuteResponse} message ReserveBeginExecuteResponse + * @param {query.ConcludeTransactionResponse} message ConcludeTransactionResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ReserveBeginExecuteResponse.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) { - object.error = null; - object.result = null; - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.transaction_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.transaction_id = options.longs === String ? "0" : 0; - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.reserved_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.reserved_id = options.longs === String ? "0" : 0; - object.tablet_alias = null; - object.session_state_changes = ""; - } - if (message.error != null && message.hasOwnProperty("error")) - object.error = $root.vtrpc.RPCError.toObject(message.error, options); - if (message.result != null && message.hasOwnProperty("result")) - object.result = $root.query.QueryResult.toObject(message.result, options); - if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) - if (typeof message.transaction_id === "number") - object.transaction_id = options.longs === String ? String(message.transaction_id) : message.transaction_id; - else - object.transaction_id = options.longs === String ? $util.Long.prototype.toString.call(message.transaction_id) : options.longs === Number ? new $util.LongBits(message.transaction_id.low >>> 0, message.transaction_id.high >>> 0).toNumber() : message.transaction_id; - if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) - if (typeof message.reserved_id === "number") - object.reserved_id = options.longs === String ? String(message.reserved_id) : message.reserved_id; - else - object.reserved_id = options.longs === String ? $util.Long.prototype.toString.call(message.reserved_id) : options.longs === Number ? new $util.LongBits(message.reserved_id.low >>> 0, message.reserved_id.high >>> 0).toNumber() : message.reserved_id; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) - object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); - if (message.session_state_changes != null && message.hasOwnProperty("session_state_changes")) - object.session_state_changes = message.session_state_changes; - return object; + ConcludeTransactionResponse.toObject = function toObject() { + return {}; }; /** - * Converts this ReserveBeginExecuteResponse to JSON. + * Converts this ConcludeTransactionResponse to JSON. * @function toJSON - * @memberof query.ReserveBeginExecuteResponse + * @memberof query.ConcludeTransactionResponse * @instance * @returns {Object.} JSON object */ - ReserveBeginExecuteResponse.prototype.toJSON = function toJSON() { + ConcludeTransactionResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ReserveBeginExecuteResponse + * Gets the default type url for ConcludeTransactionResponse * @function getTypeUrl - * @memberof query.ReserveBeginExecuteResponse + * @memberof query.ConcludeTransactionResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ReserveBeginExecuteResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ConcludeTransactionResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.ReserveBeginExecuteResponse"; + return typeUrlPrefix + "/query.ConcludeTransactionResponse"; }; - return ReserveBeginExecuteResponse; + return ConcludeTransactionResponse; })(); - query.ReserveBeginStreamExecuteRequest = (function() { + query.ReadTransactionRequest = (function() { /** - * Properties of a ReserveBeginStreamExecuteRequest. + * Properties of a ReadTransactionRequest. * @memberof query - * @interface IReserveBeginStreamExecuteRequest - * @property {vtrpc.ICallerID|null} [effective_caller_id] ReserveBeginStreamExecuteRequest effective_caller_id - * @property {query.IVTGateCallerID|null} [immediate_caller_id] ReserveBeginStreamExecuteRequest immediate_caller_id - * @property {query.ITarget|null} [target] ReserveBeginStreamExecuteRequest target - * @property {query.IBoundQuery|null} [query] ReserveBeginStreamExecuteRequest query - * @property {query.IExecuteOptions|null} [options] ReserveBeginStreamExecuteRequest options - * @property {Array.|null} [pre_queries] ReserveBeginStreamExecuteRequest pre_queries - * @property {Array.|null} [post_begin_queries] ReserveBeginStreamExecuteRequest post_begin_queries + * @interface IReadTransactionRequest + * @property {vtrpc.ICallerID|null} [effective_caller_id] ReadTransactionRequest effective_caller_id + * @property {query.IVTGateCallerID|null} [immediate_caller_id] ReadTransactionRequest immediate_caller_id + * @property {query.ITarget|null} [target] ReadTransactionRequest target + * @property {string|null} [dtid] ReadTransactionRequest dtid */ /** - * Constructs a new ReserveBeginStreamExecuteRequest. + * Constructs a new ReadTransactionRequest. * @memberof query - * @classdesc Represents a ReserveBeginStreamExecuteRequest. - * @implements IReserveBeginStreamExecuteRequest + * @classdesc Represents a ReadTransactionRequest. + * @implements IReadTransactionRequest * @constructor - * @param {query.IReserveBeginStreamExecuteRequest=} [properties] Properties to set + * @param {query.IReadTransactionRequest=} [properties] Properties to set */ - function ReserveBeginStreamExecuteRequest(properties) { - this.pre_queries = []; - this.post_begin_queries = []; + function ReadTransactionRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -88087,83 +88837,59 @@ export const query = $root.query = (() => { } /** - * ReserveBeginStreamExecuteRequest effective_caller_id. + * ReadTransactionRequest effective_caller_id. * @member {vtrpc.ICallerID|null|undefined} effective_caller_id - * @memberof query.ReserveBeginStreamExecuteRequest + * @memberof query.ReadTransactionRequest * @instance */ - ReserveBeginStreamExecuteRequest.prototype.effective_caller_id = null; + ReadTransactionRequest.prototype.effective_caller_id = null; /** - * ReserveBeginStreamExecuteRequest immediate_caller_id. + * ReadTransactionRequest immediate_caller_id. * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id - * @memberof query.ReserveBeginStreamExecuteRequest + * @memberof query.ReadTransactionRequest * @instance */ - ReserveBeginStreamExecuteRequest.prototype.immediate_caller_id = null; + ReadTransactionRequest.prototype.immediate_caller_id = null; /** - * ReserveBeginStreamExecuteRequest target. + * ReadTransactionRequest target. * @member {query.ITarget|null|undefined} target - * @memberof query.ReserveBeginStreamExecuteRequest - * @instance - */ - ReserveBeginStreamExecuteRequest.prototype.target = null; - - /** - * ReserveBeginStreamExecuteRequest query. - * @member {query.IBoundQuery|null|undefined} query - * @memberof query.ReserveBeginStreamExecuteRequest - * @instance - */ - ReserveBeginStreamExecuteRequest.prototype.query = null; - - /** - * ReserveBeginStreamExecuteRequest options. - * @member {query.IExecuteOptions|null|undefined} options - * @memberof query.ReserveBeginStreamExecuteRequest - * @instance - */ - ReserveBeginStreamExecuteRequest.prototype.options = null; - - /** - * ReserveBeginStreamExecuteRequest pre_queries. - * @member {Array.} pre_queries - * @memberof query.ReserveBeginStreamExecuteRequest + * @memberof query.ReadTransactionRequest * @instance */ - ReserveBeginStreamExecuteRequest.prototype.pre_queries = $util.emptyArray; + ReadTransactionRequest.prototype.target = null; /** - * ReserveBeginStreamExecuteRequest post_begin_queries. - * @member {Array.} post_begin_queries - * @memberof query.ReserveBeginStreamExecuteRequest + * ReadTransactionRequest dtid. + * @member {string} dtid + * @memberof query.ReadTransactionRequest * @instance */ - ReserveBeginStreamExecuteRequest.prototype.post_begin_queries = $util.emptyArray; + ReadTransactionRequest.prototype.dtid = ""; /** - * Creates a new ReserveBeginStreamExecuteRequest instance using the specified properties. + * Creates a new ReadTransactionRequest instance using the specified properties. * @function create - * @memberof query.ReserveBeginStreamExecuteRequest + * @memberof query.ReadTransactionRequest * @static - * @param {query.IReserveBeginStreamExecuteRequest=} [properties] Properties to set - * @returns {query.ReserveBeginStreamExecuteRequest} ReserveBeginStreamExecuteRequest instance + * @param {query.IReadTransactionRequest=} [properties] Properties to set + * @returns {query.ReadTransactionRequest} ReadTransactionRequest instance */ - ReserveBeginStreamExecuteRequest.create = function create(properties) { - return new ReserveBeginStreamExecuteRequest(properties); + ReadTransactionRequest.create = function create(properties) { + return new ReadTransactionRequest(properties); }; /** - * Encodes the specified ReserveBeginStreamExecuteRequest message. Does not implicitly {@link query.ReserveBeginStreamExecuteRequest.verify|verify} messages. + * Encodes the specified ReadTransactionRequest message. Does not implicitly {@link query.ReadTransactionRequest.verify|verify} messages. * @function encode - * @memberof query.ReserveBeginStreamExecuteRequest + * @memberof query.ReadTransactionRequest * @static - * @param {query.IReserveBeginStreamExecuteRequest} message ReserveBeginStreamExecuteRequest message or plain object to encode + * @param {query.IReadTransactionRequest} message ReadTransactionRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReserveBeginStreamExecuteRequest.encode = function encode(message, writer) { + ReadTransactionRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) @@ -88172,47 +88898,39 @@ export const query = $root.query = (() => { $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); if (message.target != null && Object.hasOwnProperty.call(message, "target")) $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.query != null && Object.hasOwnProperty.call(message, "query")) - $root.query.BoundQuery.encode(message.query, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); - if (message.options != null && Object.hasOwnProperty.call(message, "options")) - $root.query.ExecuteOptions.encode(message.options, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); - if (message.pre_queries != null && message.pre_queries.length) - for (let i = 0; i < message.pre_queries.length; ++i) - writer.uint32(/* id 6, wireType 2 =*/50).string(message.pre_queries[i]); - if (message.post_begin_queries != null && message.post_begin_queries.length) - for (let i = 0; i < message.post_begin_queries.length; ++i) - writer.uint32(/* id 7, wireType 2 =*/58).string(message.post_begin_queries[i]); + if (message.dtid != null && Object.hasOwnProperty.call(message, "dtid")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.dtid); return writer; }; /** - * Encodes the specified ReserveBeginStreamExecuteRequest message, length delimited. Does not implicitly {@link query.ReserveBeginStreamExecuteRequest.verify|verify} messages. + * Encodes the specified ReadTransactionRequest message, length delimited. Does not implicitly {@link query.ReadTransactionRequest.verify|verify} messages. * @function encodeDelimited - * @memberof query.ReserveBeginStreamExecuteRequest + * @memberof query.ReadTransactionRequest * @static - * @param {query.IReserveBeginStreamExecuteRequest} message ReserveBeginStreamExecuteRequest message or plain object to encode + * @param {query.IReadTransactionRequest} message ReadTransactionRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReserveBeginStreamExecuteRequest.encodeDelimited = function encodeDelimited(message, writer) { + ReadTransactionRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ReserveBeginStreamExecuteRequest message from the specified reader or buffer. + * Decodes a ReadTransactionRequest message from the specified reader or buffer. * @function decode - * @memberof query.ReserveBeginStreamExecuteRequest + * @memberof query.ReadTransactionRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.ReserveBeginStreamExecuteRequest} ReserveBeginStreamExecuteRequest + * @returns {query.ReadTransactionRequest} ReadTransactionRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReserveBeginStreamExecuteRequest.decode = function decode(reader, length) { + ReadTransactionRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.ReserveBeginStreamExecuteRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.ReadTransactionRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -88229,23 +88947,7 @@ export const query = $root.query = (() => { break; } case 4: { - message.query = $root.query.BoundQuery.decode(reader, reader.uint32()); - break; - } - case 5: { - message.options = $root.query.ExecuteOptions.decode(reader, reader.uint32()); - break; - } - case 6: { - if (!(message.pre_queries && message.pre_queries.length)) - message.pre_queries = []; - message.pre_queries.push(reader.string()); - break; - } - case 7: { - if (!(message.post_begin_queries && message.post_begin_queries.length)) - message.post_begin_queries = []; - message.post_begin_queries.push(reader.string()); + message.dtid = reader.string(); break; } default: @@ -88257,30 +88959,30 @@ export const query = $root.query = (() => { }; /** - * Decodes a ReserveBeginStreamExecuteRequest message from the specified reader or buffer, length delimited. + * Decodes a ReadTransactionRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.ReserveBeginStreamExecuteRequest + * @memberof query.ReadTransactionRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.ReserveBeginStreamExecuteRequest} ReserveBeginStreamExecuteRequest + * @returns {query.ReadTransactionRequest} ReadTransactionRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReserveBeginStreamExecuteRequest.decodeDelimited = function decodeDelimited(reader) { + ReadTransactionRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ReserveBeginStreamExecuteRequest message. + * Verifies a ReadTransactionRequest message. * @function verify - * @memberof query.ReserveBeginStreamExecuteRequest + * @memberof query.ReadTransactionRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ReserveBeginStreamExecuteRequest.verify = function verify(message) { + ReadTransactionRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { @@ -88298,110 +89000,62 @@ export const query = $root.query = (() => { if (error) return "target." + error; } - if (message.query != null && message.hasOwnProperty("query")) { - let error = $root.query.BoundQuery.verify(message.query); - if (error) - return "query." + error; - } - if (message.options != null && message.hasOwnProperty("options")) { - let error = $root.query.ExecuteOptions.verify(message.options); - if (error) - return "options." + error; - } - if (message.pre_queries != null && message.hasOwnProperty("pre_queries")) { - if (!Array.isArray(message.pre_queries)) - return "pre_queries: array expected"; - for (let i = 0; i < message.pre_queries.length; ++i) - if (!$util.isString(message.pre_queries[i])) - return "pre_queries: string[] expected"; - } - if (message.post_begin_queries != null && message.hasOwnProperty("post_begin_queries")) { - if (!Array.isArray(message.post_begin_queries)) - return "post_begin_queries: array expected"; - for (let i = 0; i < message.post_begin_queries.length; ++i) - if (!$util.isString(message.post_begin_queries[i])) - return "post_begin_queries: string[] expected"; - } + if (message.dtid != null && message.hasOwnProperty("dtid")) + if (!$util.isString(message.dtid)) + return "dtid: string expected"; return null; }; /** - * Creates a ReserveBeginStreamExecuteRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ReadTransactionRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.ReserveBeginStreamExecuteRequest + * @memberof query.ReadTransactionRequest * @static * @param {Object.} object Plain object - * @returns {query.ReserveBeginStreamExecuteRequest} ReserveBeginStreamExecuteRequest + * @returns {query.ReadTransactionRequest} ReadTransactionRequest */ - ReserveBeginStreamExecuteRequest.fromObject = function fromObject(object) { - if (object instanceof $root.query.ReserveBeginStreamExecuteRequest) + ReadTransactionRequest.fromObject = function fromObject(object) { + if (object instanceof $root.query.ReadTransactionRequest) return object; - let message = new $root.query.ReserveBeginStreamExecuteRequest(); + let message = new $root.query.ReadTransactionRequest(); if (object.effective_caller_id != null) { if (typeof object.effective_caller_id !== "object") - throw TypeError(".query.ReserveBeginStreamExecuteRequest.effective_caller_id: object expected"); + throw TypeError(".query.ReadTransactionRequest.effective_caller_id: object expected"); message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); } if (object.immediate_caller_id != null) { if (typeof object.immediate_caller_id !== "object") - throw TypeError(".query.ReserveBeginStreamExecuteRequest.immediate_caller_id: object expected"); + throw TypeError(".query.ReadTransactionRequest.immediate_caller_id: object expected"); message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); } if (object.target != null) { if (typeof object.target !== "object") - throw TypeError(".query.ReserveBeginStreamExecuteRequest.target: object expected"); + throw TypeError(".query.ReadTransactionRequest.target: object expected"); message.target = $root.query.Target.fromObject(object.target); } - if (object.query != null) { - if (typeof object.query !== "object") - throw TypeError(".query.ReserveBeginStreamExecuteRequest.query: object expected"); - message.query = $root.query.BoundQuery.fromObject(object.query); - } - if (object.options != null) { - if (typeof object.options !== "object") - throw TypeError(".query.ReserveBeginStreamExecuteRequest.options: object expected"); - message.options = $root.query.ExecuteOptions.fromObject(object.options); - } - if (object.pre_queries) { - if (!Array.isArray(object.pre_queries)) - throw TypeError(".query.ReserveBeginStreamExecuteRequest.pre_queries: array expected"); - message.pre_queries = []; - for (let i = 0; i < object.pre_queries.length; ++i) - message.pre_queries[i] = String(object.pre_queries[i]); - } - if (object.post_begin_queries) { - if (!Array.isArray(object.post_begin_queries)) - throw TypeError(".query.ReserveBeginStreamExecuteRequest.post_begin_queries: array expected"); - message.post_begin_queries = []; - for (let i = 0; i < object.post_begin_queries.length; ++i) - message.post_begin_queries[i] = String(object.post_begin_queries[i]); - } + if (object.dtid != null) + message.dtid = String(object.dtid); return message; }; /** - * Creates a plain object from a ReserveBeginStreamExecuteRequest message. Also converts values to other types if specified. + * Creates a plain object from a ReadTransactionRequest message. Also converts values to other types if specified. * @function toObject - * @memberof query.ReserveBeginStreamExecuteRequest + * @memberof query.ReadTransactionRequest * @static - * @param {query.ReserveBeginStreamExecuteRequest} message ReserveBeginStreamExecuteRequest + * @param {query.ReadTransactionRequest} message ReadTransactionRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ReserveBeginStreamExecuteRequest.toObject = function toObject(message, options) { + ReadTransactionRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) { - object.pre_queries = []; - object.post_begin_queries = []; - } if (options.defaults) { object.effective_caller_id = null; object.immediate_caller_id = null; object.target = null; - object.query = null; - object.options = null; + object.dtid = ""; } if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); @@ -88409,75 +89063,58 @@ export const query = $root.query = (() => { object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); if (message.target != null && message.hasOwnProperty("target")) object.target = $root.query.Target.toObject(message.target, options); - if (message.query != null && message.hasOwnProperty("query")) - object.query = $root.query.BoundQuery.toObject(message.query, options); - if (message.options != null && message.hasOwnProperty("options")) - object.options = $root.query.ExecuteOptions.toObject(message.options, options); - if (message.pre_queries && message.pre_queries.length) { - object.pre_queries = []; - for (let j = 0; j < message.pre_queries.length; ++j) - object.pre_queries[j] = message.pre_queries[j]; - } - if (message.post_begin_queries && message.post_begin_queries.length) { - object.post_begin_queries = []; - for (let j = 0; j < message.post_begin_queries.length; ++j) - object.post_begin_queries[j] = message.post_begin_queries[j]; - } + if (message.dtid != null && message.hasOwnProperty("dtid")) + object.dtid = message.dtid; return object; }; /** - * Converts this ReserveBeginStreamExecuteRequest to JSON. + * Converts this ReadTransactionRequest to JSON. * @function toJSON - * @memberof query.ReserveBeginStreamExecuteRequest + * @memberof query.ReadTransactionRequest * @instance * @returns {Object.} JSON object */ - ReserveBeginStreamExecuteRequest.prototype.toJSON = function toJSON() { + ReadTransactionRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ReserveBeginStreamExecuteRequest + * Gets the default type url for ReadTransactionRequest * @function getTypeUrl - * @memberof query.ReserveBeginStreamExecuteRequest + * @memberof query.ReadTransactionRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ReserveBeginStreamExecuteRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ReadTransactionRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.ReserveBeginStreamExecuteRequest"; + return typeUrlPrefix + "/query.ReadTransactionRequest"; }; - return ReserveBeginStreamExecuteRequest; + return ReadTransactionRequest; })(); - query.ReserveBeginStreamExecuteResponse = (function() { + query.ReadTransactionResponse = (function() { /** - * Properties of a ReserveBeginStreamExecuteResponse. + * Properties of a ReadTransactionResponse. * @memberof query - * @interface IReserveBeginStreamExecuteResponse - * @property {vtrpc.IRPCError|null} [error] ReserveBeginStreamExecuteResponse error - * @property {query.IQueryResult|null} [result] ReserveBeginStreamExecuteResponse result - * @property {number|Long|null} [transaction_id] ReserveBeginStreamExecuteResponse transaction_id - * @property {number|Long|null} [reserved_id] ReserveBeginStreamExecuteResponse reserved_id - * @property {topodata.ITabletAlias|null} [tablet_alias] ReserveBeginStreamExecuteResponse tablet_alias - * @property {string|null} [session_state_changes] ReserveBeginStreamExecuteResponse session_state_changes + * @interface IReadTransactionResponse + * @property {query.ITransactionMetadata|null} [metadata] ReadTransactionResponse metadata */ /** - * Constructs a new ReserveBeginStreamExecuteResponse. + * Constructs a new ReadTransactionResponse. * @memberof query - * @classdesc Represents a ReserveBeginStreamExecuteResponse. - * @implements IReserveBeginStreamExecuteResponse + * @classdesc Represents a ReadTransactionResponse. + * @implements IReadTransactionResponse * @constructor - * @param {query.IReserveBeginStreamExecuteResponse=} [properties] Properties to set + * @param {query.IReadTransactionResponse=} [properties] Properties to set */ - function ReserveBeginStreamExecuteResponse(properties) { + function ReadTransactionResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -88485,145 +89122,75 @@ export const query = $root.query = (() => { } /** - * ReserveBeginStreamExecuteResponse error. - * @member {vtrpc.IRPCError|null|undefined} error - * @memberof query.ReserveBeginStreamExecuteResponse - * @instance - */ - ReserveBeginStreamExecuteResponse.prototype.error = null; - - /** - * ReserveBeginStreamExecuteResponse result. - * @member {query.IQueryResult|null|undefined} result - * @memberof query.ReserveBeginStreamExecuteResponse - * @instance - */ - ReserveBeginStreamExecuteResponse.prototype.result = null; - - /** - * ReserveBeginStreamExecuteResponse transaction_id. - * @member {number|Long} transaction_id - * @memberof query.ReserveBeginStreamExecuteResponse - * @instance - */ - ReserveBeginStreamExecuteResponse.prototype.transaction_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; - - /** - * ReserveBeginStreamExecuteResponse reserved_id. - * @member {number|Long} reserved_id - * @memberof query.ReserveBeginStreamExecuteResponse - * @instance - */ - ReserveBeginStreamExecuteResponse.prototype.reserved_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; - - /** - * ReserveBeginStreamExecuteResponse tablet_alias. - * @member {topodata.ITabletAlias|null|undefined} tablet_alias - * @memberof query.ReserveBeginStreamExecuteResponse - * @instance - */ - ReserveBeginStreamExecuteResponse.prototype.tablet_alias = null; - - /** - * ReserveBeginStreamExecuteResponse session_state_changes. - * @member {string} session_state_changes - * @memberof query.ReserveBeginStreamExecuteResponse + * ReadTransactionResponse metadata. + * @member {query.ITransactionMetadata|null|undefined} metadata + * @memberof query.ReadTransactionResponse * @instance */ - ReserveBeginStreamExecuteResponse.prototype.session_state_changes = ""; + ReadTransactionResponse.prototype.metadata = null; /** - * Creates a new ReserveBeginStreamExecuteResponse instance using the specified properties. + * Creates a new ReadTransactionResponse instance using the specified properties. * @function create - * @memberof query.ReserveBeginStreamExecuteResponse + * @memberof query.ReadTransactionResponse * @static - * @param {query.IReserveBeginStreamExecuteResponse=} [properties] Properties to set - * @returns {query.ReserveBeginStreamExecuteResponse} ReserveBeginStreamExecuteResponse instance + * @param {query.IReadTransactionResponse=} [properties] Properties to set + * @returns {query.ReadTransactionResponse} ReadTransactionResponse instance */ - ReserveBeginStreamExecuteResponse.create = function create(properties) { - return new ReserveBeginStreamExecuteResponse(properties); + ReadTransactionResponse.create = function create(properties) { + return new ReadTransactionResponse(properties); }; /** - * Encodes the specified ReserveBeginStreamExecuteResponse message. Does not implicitly {@link query.ReserveBeginStreamExecuteResponse.verify|verify} messages. + * Encodes the specified ReadTransactionResponse message. Does not implicitly {@link query.ReadTransactionResponse.verify|verify} messages. * @function encode - * @memberof query.ReserveBeginStreamExecuteResponse + * @memberof query.ReadTransactionResponse * @static - * @param {query.IReserveBeginStreamExecuteResponse} message ReserveBeginStreamExecuteResponse message or plain object to encode + * @param {query.IReadTransactionResponse} message ReadTransactionResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReserveBeginStreamExecuteResponse.encode = function encode(message, writer) { + ReadTransactionResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.error != null && Object.hasOwnProperty.call(message, "error")) - $root.vtrpc.RPCError.encode(message.error, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.result != null && Object.hasOwnProperty.call(message, "result")) - $root.query.QueryResult.encode(message.result, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.transaction_id != null && Object.hasOwnProperty.call(message, "transaction_id")) - writer.uint32(/* id 3, wireType 0 =*/24).int64(message.transaction_id); - if (message.reserved_id != null && Object.hasOwnProperty.call(message, "reserved_id")) - writer.uint32(/* id 4, wireType 0 =*/32).int64(message.reserved_id); - if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) - $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); - if (message.session_state_changes != null && Object.hasOwnProperty.call(message, "session_state_changes")) - writer.uint32(/* id 6, wireType 2 =*/50).string(message.session_state_changes); + if (message.metadata != null && Object.hasOwnProperty.call(message, "metadata")) + $root.query.TransactionMetadata.encode(message.metadata, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified ReserveBeginStreamExecuteResponse message, length delimited. Does not implicitly {@link query.ReserveBeginStreamExecuteResponse.verify|verify} messages. + * Encodes the specified ReadTransactionResponse message, length delimited. Does not implicitly {@link query.ReadTransactionResponse.verify|verify} messages. * @function encodeDelimited - * @memberof query.ReserveBeginStreamExecuteResponse + * @memberof query.ReadTransactionResponse * @static - * @param {query.IReserveBeginStreamExecuteResponse} message ReserveBeginStreamExecuteResponse message or plain object to encode + * @param {query.IReadTransactionResponse} message ReadTransactionResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReserveBeginStreamExecuteResponse.encodeDelimited = function encodeDelimited(message, writer) { + ReadTransactionResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ReserveBeginStreamExecuteResponse message from the specified reader or buffer. + * Decodes a ReadTransactionResponse message from the specified reader or buffer. * @function decode - * @memberof query.ReserveBeginStreamExecuteResponse + * @memberof query.ReadTransactionResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.ReserveBeginStreamExecuteResponse} ReserveBeginStreamExecuteResponse + * @returns {query.ReadTransactionResponse} ReadTransactionResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReserveBeginStreamExecuteResponse.decode = function decode(reader, length) { + ReadTransactionResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.ReserveBeginStreamExecuteResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.ReadTransactionResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.error = $root.vtrpc.RPCError.decode(reader, reader.uint32()); - break; - } - case 2: { - message.result = $root.query.QueryResult.decode(reader, reader.uint32()); - break; - } - case 3: { - message.transaction_id = reader.int64(); - break; - } - case 4: { - message.reserved_id = reader.int64(); - break; - } - case 5: { - message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); - break; - } - case 6: { - message.session_state_changes = reader.string(); + message.metadata = $root.query.TransactionMetadata.decode(reader, reader.uint32()); break; } default: @@ -88635,210 +89202,134 @@ export const query = $root.query = (() => { }; /** - * Decodes a ReserveBeginStreamExecuteResponse message from the specified reader or buffer, length delimited. + * Decodes a ReadTransactionResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.ReserveBeginStreamExecuteResponse + * @memberof query.ReadTransactionResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.ReserveBeginStreamExecuteResponse} ReserveBeginStreamExecuteResponse + * @returns {query.ReadTransactionResponse} ReadTransactionResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReserveBeginStreamExecuteResponse.decodeDelimited = function decodeDelimited(reader) { + ReadTransactionResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ReserveBeginStreamExecuteResponse message. + * Verifies a ReadTransactionResponse message. * @function verify - * @memberof query.ReserveBeginStreamExecuteResponse + * @memberof query.ReadTransactionResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ReserveBeginStreamExecuteResponse.verify = function verify(message) { + ReadTransactionResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.error != null && message.hasOwnProperty("error")) { - let error = $root.vtrpc.RPCError.verify(message.error); - if (error) - return "error." + error; - } - if (message.result != null && message.hasOwnProperty("result")) { - let error = $root.query.QueryResult.verify(message.result); - if (error) - return "result." + error; - } - if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) - if (!$util.isInteger(message.transaction_id) && !(message.transaction_id && $util.isInteger(message.transaction_id.low) && $util.isInteger(message.transaction_id.high))) - return "transaction_id: integer|Long expected"; - if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) - if (!$util.isInteger(message.reserved_id) && !(message.reserved_id && $util.isInteger(message.reserved_id.low) && $util.isInteger(message.reserved_id.high))) - return "reserved_id: integer|Long expected"; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { - let error = $root.topodata.TabletAlias.verify(message.tablet_alias); + if (message.metadata != null && message.hasOwnProperty("metadata")) { + let error = $root.query.TransactionMetadata.verify(message.metadata); if (error) - return "tablet_alias." + error; + return "metadata." + error; } - if (message.session_state_changes != null && message.hasOwnProperty("session_state_changes")) - if (!$util.isString(message.session_state_changes)) - return "session_state_changes: string expected"; return null; }; /** - * Creates a ReserveBeginStreamExecuteResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ReadTransactionResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.ReserveBeginStreamExecuteResponse + * @memberof query.ReadTransactionResponse * @static * @param {Object.} object Plain object - * @returns {query.ReserveBeginStreamExecuteResponse} ReserveBeginStreamExecuteResponse + * @returns {query.ReadTransactionResponse} ReadTransactionResponse */ - ReserveBeginStreamExecuteResponse.fromObject = function fromObject(object) { - if (object instanceof $root.query.ReserveBeginStreamExecuteResponse) + ReadTransactionResponse.fromObject = function fromObject(object) { + if (object instanceof $root.query.ReadTransactionResponse) return object; - let message = new $root.query.ReserveBeginStreamExecuteResponse(); - if (object.error != null) { - if (typeof object.error !== "object") - throw TypeError(".query.ReserveBeginStreamExecuteResponse.error: object expected"); - message.error = $root.vtrpc.RPCError.fromObject(object.error); - } - if (object.result != null) { - if (typeof object.result !== "object") - throw TypeError(".query.ReserveBeginStreamExecuteResponse.result: object expected"); - message.result = $root.query.QueryResult.fromObject(object.result); - } - if (object.transaction_id != null) - if ($util.Long) - (message.transaction_id = $util.Long.fromValue(object.transaction_id)).unsigned = false; - else if (typeof object.transaction_id === "string") - message.transaction_id = parseInt(object.transaction_id, 10); - else if (typeof object.transaction_id === "number") - message.transaction_id = object.transaction_id; - else if (typeof object.transaction_id === "object") - message.transaction_id = new $util.LongBits(object.transaction_id.low >>> 0, object.transaction_id.high >>> 0).toNumber(); - if (object.reserved_id != null) - if ($util.Long) - (message.reserved_id = $util.Long.fromValue(object.reserved_id)).unsigned = false; - else if (typeof object.reserved_id === "string") - message.reserved_id = parseInt(object.reserved_id, 10); - else if (typeof object.reserved_id === "number") - message.reserved_id = object.reserved_id; - else if (typeof object.reserved_id === "object") - message.reserved_id = new $util.LongBits(object.reserved_id.low >>> 0, object.reserved_id.high >>> 0).toNumber(); - if (object.tablet_alias != null) { - if (typeof object.tablet_alias !== "object") - throw TypeError(".query.ReserveBeginStreamExecuteResponse.tablet_alias: object expected"); - message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); + let message = new $root.query.ReadTransactionResponse(); + if (object.metadata != null) { + if (typeof object.metadata !== "object") + throw TypeError(".query.ReadTransactionResponse.metadata: object expected"); + message.metadata = $root.query.TransactionMetadata.fromObject(object.metadata); } - if (object.session_state_changes != null) - message.session_state_changes = String(object.session_state_changes); return message; }; /** - * Creates a plain object from a ReserveBeginStreamExecuteResponse message. Also converts values to other types if specified. + * Creates a plain object from a ReadTransactionResponse message. Also converts values to other types if specified. * @function toObject - * @memberof query.ReserveBeginStreamExecuteResponse + * @memberof query.ReadTransactionResponse * @static - * @param {query.ReserveBeginStreamExecuteResponse} message ReserveBeginStreamExecuteResponse + * @param {query.ReadTransactionResponse} message ReadTransactionResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ReserveBeginStreamExecuteResponse.toObject = function toObject(message, options) { + ReadTransactionResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) { - object.error = null; - object.result = null; - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.transaction_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.transaction_id = options.longs === String ? "0" : 0; - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.reserved_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.reserved_id = options.longs === String ? "0" : 0; - object.tablet_alias = null; - object.session_state_changes = ""; - } - if (message.error != null && message.hasOwnProperty("error")) - object.error = $root.vtrpc.RPCError.toObject(message.error, options); - if (message.result != null && message.hasOwnProperty("result")) - object.result = $root.query.QueryResult.toObject(message.result, options); - if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) - if (typeof message.transaction_id === "number") - object.transaction_id = options.longs === String ? String(message.transaction_id) : message.transaction_id; - else - object.transaction_id = options.longs === String ? $util.Long.prototype.toString.call(message.transaction_id) : options.longs === Number ? new $util.LongBits(message.transaction_id.low >>> 0, message.transaction_id.high >>> 0).toNumber() : message.transaction_id; - if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) - if (typeof message.reserved_id === "number") - object.reserved_id = options.longs === String ? String(message.reserved_id) : message.reserved_id; - else - object.reserved_id = options.longs === String ? $util.Long.prototype.toString.call(message.reserved_id) : options.longs === Number ? new $util.LongBits(message.reserved_id.low >>> 0, message.reserved_id.high >>> 0).toNumber() : message.reserved_id; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) - object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); - if (message.session_state_changes != null && message.hasOwnProperty("session_state_changes")) - object.session_state_changes = message.session_state_changes; + if (options.defaults) + object.metadata = null; + if (message.metadata != null && message.hasOwnProperty("metadata")) + object.metadata = $root.query.TransactionMetadata.toObject(message.metadata, options); return object; }; /** - * Converts this ReserveBeginStreamExecuteResponse to JSON. + * Converts this ReadTransactionResponse to JSON. * @function toJSON - * @memberof query.ReserveBeginStreamExecuteResponse + * @memberof query.ReadTransactionResponse * @instance * @returns {Object.} JSON object */ - ReserveBeginStreamExecuteResponse.prototype.toJSON = function toJSON() { + ReadTransactionResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ReserveBeginStreamExecuteResponse + * Gets the default type url for ReadTransactionResponse * @function getTypeUrl - * @memberof query.ReserveBeginStreamExecuteResponse + * @memberof query.ReadTransactionResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ReserveBeginStreamExecuteResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ReadTransactionResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.ReserveBeginStreamExecuteResponse"; + return typeUrlPrefix + "/query.ReadTransactionResponse"; }; - return ReserveBeginStreamExecuteResponse; + return ReadTransactionResponse; })(); - query.ReleaseRequest = (function() { + query.BeginExecuteRequest = (function() { /** - * Properties of a ReleaseRequest. + * Properties of a BeginExecuteRequest. * @memberof query - * @interface IReleaseRequest - * @property {vtrpc.ICallerID|null} [effective_caller_id] ReleaseRequest effective_caller_id - * @property {query.IVTGateCallerID|null} [immediate_caller_id] ReleaseRequest immediate_caller_id - * @property {query.ITarget|null} [target] ReleaseRequest target - * @property {number|Long|null} [transaction_id] ReleaseRequest transaction_id - * @property {number|Long|null} [reserved_id] ReleaseRequest reserved_id + * @interface IBeginExecuteRequest + * @property {vtrpc.ICallerID|null} [effective_caller_id] BeginExecuteRequest effective_caller_id + * @property {query.IVTGateCallerID|null} [immediate_caller_id] BeginExecuteRequest immediate_caller_id + * @property {query.ITarget|null} [target] BeginExecuteRequest target + * @property {query.IBoundQuery|null} [query] BeginExecuteRequest query + * @property {query.IExecuteOptions|null} [options] BeginExecuteRequest options + * @property {number|Long|null} [reserved_id] BeginExecuteRequest reserved_id + * @property {Array.|null} [pre_queries] BeginExecuteRequest pre_queries */ /** - * Constructs a new ReleaseRequest. + * Constructs a new BeginExecuteRequest. * @memberof query - * @classdesc Represents a ReleaseRequest. - * @implements IReleaseRequest + * @classdesc Represents a BeginExecuteRequest. + * @implements IBeginExecuteRequest * @constructor - * @param {query.IReleaseRequest=} [properties] Properties to set + * @param {query.IBeginExecuteRequest=} [properties] Properties to set */ - function ReleaseRequest(properties) { + function BeginExecuteRequest(properties) { + this.pre_queries = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -88846,67 +89337,83 @@ export const query = $root.query = (() => { } /** - * ReleaseRequest effective_caller_id. + * BeginExecuteRequest effective_caller_id. * @member {vtrpc.ICallerID|null|undefined} effective_caller_id - * @memberof query.ReleaseRequest + * @memberof query.BeginExecuteRequest * @instance */ - ReleaseRequest.prototype.effective_caller_id = null; + BeginExecuteRequest.prototype.effective_caller_id = null; /** - * ReleaseRequest immediate_caller_id. + * BeginExecuteRequest immediate_caller_id. * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id - * @memberof query.ReleaseRequest + * @memberof query.BeginExecuteRequest * @instance */ - ReleaseRequest.prototype.immediate_caller_id = null; + BeginExecuteRequest.prototype.immediate_caller_id = null; /** - * ReleaseRequest target. + * BeginExecuteRequest target. * @member {query.ITarget|null|undefined} target - * @memberof query.ReleaseRequest + * @memberof query.BeginExecuteRequest * @instance */ - ReleaseRequest.prototype.target = null; + BeginExecuteRequest.prototype.target = null; /** - * ReleaseRequest transaction_id. - * @member {number|Long} transaction_id - * @memberof query.ReleaseRequest + * BeginExecuteRequest query. + * @member {query.IBoundQuery|null|undefined} query + * @memberof query.BeginExecuteRequest * @instance */ - ReleaseRequest.prototype.transaction_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + BeginExecuteRequest.prototype.query = null; /** - * ReleaseRequest reserved_id. + * BeginExecuteRequest options. + * @member {query.IExecuteOptions|null|undefined} options + * @memberof query.BeginExecuteRequest + * @instance + */ + BeginExecuteRequest.prototype.options = null; + + /** + * BeginExecuteRequest reserved_id. * @member {number|Long} reserved_id - * @memberof query.ReleaseRequest + * @memberof query.BeginExecuteRequest * @instance */ - ReleaseRequest.prototype.reserved_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + BeginExecuteRequest.prototype.reserved_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; /** - * Creates a new ReleaseRequest instance using the specified properties. + * BeginExecuteRequest pre_queries. + * @member {Array.} pre_queries + * @memberof query.BeginExecuteRequest + * @instance + */ + BeginExecuteRequest.prototype.pre_queries = $util.emptyArray; + + /** + * Creates a new BeginExecuteRequest instance using the specified properties. * @function create - * @memberof query.ReleaseRequest + * @memberof query.BeginExecuteRequest * @static - * @param {query.IReleaseRequest=} [properties] Properties to set - * @returns {query.ReleaseRequest} ReleaseRequest instance + * @param {query.IBeginExecuteRequest=} [properties] Properties to set + * @returns {query.BeginExecuteRequest} BeginExecuteRequest instance */ - ReleaseRequest.create = function create(properties) { - return new ReleaseRequest(properties); + BeginExecuteRequest.create = function create(properties) { + return new BeginExecuteRequest(properties); }; /** - * Encodes the specified ReleaseRequest message. Does not implicitly {@link query.ReleaseRequest.verify|verify} messages. + * Encodes the specified BeginExecuteRequest message. Does not implicitly {@link query.BeginExecuteRequest.verify|verify} messages. * @function encode - * @memberof query.ReleaseRequest + * @memberof query.BeginExecuteRequest * @static - * @param {query.IReleaseRequest} message ReleaseRequest message or plain object to encode + * @param {query.IBeginExecuteRequest} message BeginExecuteRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReleaseRequest.encode = function encode(message, writer) { + BeginExecuteRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) @@ -88915,41 +89422,46 @@ export const query = $root.query = (() => { $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); if (message.target != null && Object.hasOwnProperty.call(message, "target")) $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.transaction_id != null && Object.hasOwnProperty.call(message, "transaction_id")) - writer.uint32(/* id 4, wireType 0 =*/32).int64(message.transaction_id); + if (message.query != null && Object.hasOwnProperty.call(message, "query")) + $root.query.BoundQuery.encode(message.query, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.options != null && Object.hasOwnProperty.call(message, "options")) + $root.query.ExecuteOptions.encode(message.options, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); if (message.reserved_id != null && Object.hasOwnProperty.call(message, "reserved_id")) - writer.uint32(/* id 5, wireType 0 =*/40).int64(message.reserved_id); + writer.uint32(/* id 6, wireType 0 =*/48).int64(message.reserved_id); + if (message.pre_queries != null && message.pre_queries.length) + for (let i = 0; i < message.pre_queries.length; ++i) + writer.uint32(/* id 7, wireType 2 =*/58).string(message.pre_queries[i]); return writer; }; /** - * Encodes the specified ReleaseRequest message, length delimited. Does not implicitly {@link query.ReleaseRequest.verify|verify} messages. + * Encodes the specified BeginExecuteRequest message, length delimited. Does not implicitly {@link query.BeginExecuteRequest.verify|verify} messages. * @function encodeDelimited - * @memberof query.ReleaseRequest + * @memberof query.BeginExecuteRequest * @static - * @param {query.IReleaseRequest} message ReleaseRequest message or plain object to encode + * @param {query.IBeginExecuteRequest} message BeginExecuteRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReleaseRequest.encodeDelimited = function encodeDelimited(message, writer) { + BeginExecuteRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ReleaseRequest message from the specified reader or buffer. + * Decodes a BeginExecuteRequest message from the specified reader or buffer. * @function decode - * @memberof query.ReleaseRequest + * @memberof query.BeginExecuteRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.ReleaseRequest} ReleaseRequest + * @returns {query.BeginExecuteRequest} BeginExecuteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReleaseRequest.decode = function decode(reader, length) { + BeginExecuteRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.ReleaseRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.BeginExecuteRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -88966,13 +89478,23 @@ export const query = $root.query = (() => { break; } case 4: { - message.transaction_id = reader.int64(); + message.query = $root.query.BoundQuery.decode(reader, reader.uint32()); break; } case 5: { + message.options = $root.query.ExecuteOptions.decode(reader, reader.uint32()); + break; + } + case 6: { message.reserved_id = reader.int64(); break; } + case 7: { + if (!(message.pre_queries && message.pre_queries.length)) + message.pre_queries = []; + message.pre_queries.push(reader.string()); + break; + } default: reader.skipType(tag & 7); break; @@ -88982,30 +89504,30 @@ export const query = $root.query = (() => { }; /** - * Decodes a ReleaseRequest message from the specified reader or buffer, length delimited. + * Decodes a BeginExecuteRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.ReleaseRequest + * @memberof query.BeginExecuteRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.ReleaseRequest} ReleaseRequest + * @returns {query.BeginExecuteRequest} BeginExecuteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReleaseRequest.decodeDelimited = function decodeDelimited(reader) { + BeginExecuteRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ReleaseRequest message. + * Verifies a BeginExecuteRequest message. * @function verify - * @memberof query.ReleaseRequest + * @memberof query.BeginExecuteRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ReleaseRequest.verify = function verify(message) { + BeginExecuteRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { @@ -89023,51 +89545,66 @@ export const query = $root.query = (() => { if (error) return "target." + error; } - if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) - if (!$util.isInteger(message.transaction_id) && !(message.transaction_id && $util.isInteger(message.transaction_id.low) && $util.isInteger(message.transaction_id.high))) - return "transaction_id: integer|Long expected"; - if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) - if (!$util.isInteger(message.reserved_id) && !(message.reserved_id && $util.isInteger(message.reserved_id.low) && $util.isInteger(message.reserved_id.high))) - return "reserved_id: integer|Long expected"; - return null; - }; - - /** - * Creates a ReleaseRequest message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof query.ReleaseRequest + if (message.query != null && message.hasOwnProperty("query")) { + let error = $root.query.BoundQuery.verify(message.query); + if (error) + return "query." + error; + } + if (message.options != null && message.hasOwnProperty("options")) { + let error = $root.query.ExecuteOptions.verify(message.options); + if (error) + return "options." + error; + } + if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) + if (!$util.isInteger(message.reserved_id) && !(message.reserved_id && $util.isInteger(message.reserved_id.low) && $util.isInteger(message.reserved_id.high))) + return "reserved_id: integer|Long expected"; + if (message.pre_queries != null && message.hasOwnProperty("pre_queries")) { + if (!Array.isArray(message.pre_queries)) + return "pre_queries: array expected"; + for (let i = 0; i < message.pre_queries.length; ++i) + if (!$util.isString(message.pre_queries[i])) + return "pre_queries: string[] expected"; + } + return null; + }; + + /** + * Creates a BeginExecuteRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof query.BeginExecuteRequest * @static * @param {Object.} object Plain object - * @returns {query.ReleaseRequest} ReleaseRequest + * @returns {query.BeginExecuteRequest} BeginExecuteRequest */ - ReleaseRequest.fromObject = function fromObject(object) { - if (object instanceof $root.query.ReleaseRequest) + BeginExecuteRequest.fromObject = function fromObject(object) { + if (object instanceof $root.query.BeginExecuteRequest) return object; - let message = new $root.query.ReleaseRequest(); + let message = new $root.query.BeginExecuteRequest(); if (object.effective_caller_id != null) { if (typeof object.effective_caller_id !== "object") - throw TypeError(".query.ReleaseRequest.effective_caller_id: object expected"); + throw TypeError(".query.BeginExecuteRequest.effective_caller_id: object expected"); message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); } if (object.immediate_caller_id != null) { if (typeof object.immediate_caller_id !== "object") - throw TypeError(".query.ReleaseRequest.immediate_caller_id: object expected"); + throw TypeError(".query.BeginExecuteRequest.immediate_caller_id: object expected"); message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); } if (object.target != null) { if (typeof object.target !== "object") - throw TypeError(".query.ReleaseRequest.target: object expected"); + throw TypeError(".query.BeginExecuteRequest.target: object expected"); message.target = $root.query.Target.fromObject(object.target); } - if (object.transaction_id != null) - if ($util.Long) - (message.transaction_id = $util.Long.fromValue(object.transaction_id)).unsigned = false; - else if (typeof object.transaction_id === "string") - message.transaction_id = parseInt(object.transaction_id, 10); - else if (typeof object.transaction_id === "number") - message.transaction_id = object.transaction_id; - else if (typeof object.transaction_id === "object") - message.transaction_id = new $util.LongBits(object.transaction_id.low >>> 0, object.transaction_id.high >>> 0).toNumber(); + if (object.query != null) { + if (typeof object.query !== "object") + throw TypeError(".query.BeginExecuteRequest.query: object expected"); + message.query = $root.query.BoundQuery.fromObject(object.query); + } + if (object.options != null) { + if (typeof object.options !== "object") + throw TypeError(".query.BeginExecuteRequest.options: object expected"); + message.options = $root.query.ExecuteOptions.fromObject(object.options); + } if (object.reserved_id != null) if ($util.Long) (message.reserved_id = $util.Long.fromValue(object.reserved_id)).unsigned = false; @@ -89077,31 +89614,37 @@ export const query = $root.query = (() => { message.reserved_id = object.reserved_id; else if (typeof object.reserved_id === "object") message.reserved_id = new $util.LongBits(object.reserved_id.low >>> 0, object.reserved_id.high >>> 0).toNumber(); + if (object.pre_queries) { + if (!Array.isArray(object.pre_queries)) + throw TypeError(".query.BeginExecuteRequest.pre_queries: array expected"); + message.pre_queries = []; + for (let i = 0; i < object.pre_queries.length; ++i) + message.pre_queries[i] = String(object.pre_queries[i]); + } return message; }; /** - * Creates a plain object from a ReleaseRequest message. Also converts values to other types if specified. + * Creates a plain object from a BeginExecuteRequest message. Also converts values to other types if specified. * @function toObject - * @memberof query.ReleaseRequest + * @memberof query.BeginExecuteRequest * @static - * @param {query.ReleaseRequest} message ReleaseRequest + * @param {query.BeginExecuteRequest} message BeginExecuteRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ReleaseRequest.toObject = function toObject(message, options) { + BeginExecuteRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; + if (options.arrays || options.defaults) + object.pre_queries = []; if (options.defaults) { object.effective_caller_id = null; object.immediate_caller_id = null; object.target = null; - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.transaction_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.transaction_id = options.longs === String ? "0" : 0; + object.query = null; + object.options = null; if ($util.Long) { let long = new $util.Long(0, 0, false); object.reserved_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; @@ -89114,65 +89657,74 @@ export const query = $root.query = (() => { object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); if (message.target != null && message.hasOwnProperty("target")) object.target = $root.query.Target.toObject(message.target, options); - if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) - if (typeof message.transaction_id === "number") - object.transaction_id = options.longs === String ? String(message.transaction_id) : message.transaction_id; - else - object.transaction_id = options.longs === String ? $util.Long.prototype.toString.call(message.transaction_id) : options.longs === Number ? new $util.LongBits(message.transaction_id.low >>> 0, message.transaction_id.high >>> 0).toNumber() : message.transaction_id; + if (message.query != null && message.hasOwnProperty("query")) + object.query = $root.query.BoundQuery.toObject(message.query, options); + if (message.options != null && message.hasOwnProperty("options")) + object.options = $root.query.ExecuteOptions.toObject(message.options, options); if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) if (typeof message.reserved_id === "number") object.reserved_id = options.longs === String ? String(message.reserved_id) : message.reserved_id; else object.reserved_id = options.longs === String ? $util.Long.prototype.toString.call(message.reserved_id) : options.longs === Number ? new $util.LongBits(message.reserved_id.low >>> 0, message.reserved_id.high >>> 0).toNumber() : message.reserved_id; + if (message.pre_queries && message.pre_queries.length) { + object.pre_queries = []; + for (let j = 0; j < message.pre_queries.length; ++j) + object.pre_queries[j] = message.pre_queries[j]; + } return object; }; /** - * Converts this ReleaseRequest to JSON. + * Converts this BeginExecuteRequest to JSON. * @function toJSON - * @memberof query.ReleaseRequest + * @memberof query.BeginExecuteRequest * @instance * @returns {Object.} JSON object */ - ReleaseRequest.prototype.toJSON = function toJSON() { + BeginExecuteRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ReleaseRequest + * Gets the default type url for BeginExecuteRequest * @function getTypeUrl - * @memberof query.ReleaseRequest + * @memberof query.BeginExecuteRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ReleaseRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + BeginExecuteRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.ReleaseRequest"; + return typeUrlPrefix + "/query.BeginExecuteRequest"; }; - return ReleaseRequest; + return BeginExecuteRequest; })(); - query.ReleaseResponse = (function() { + query.BeginExecuteResponse = (function() { /** - * Properties of a ReleaseResponse. + * Properties of a BeginExecuteResponse. * @memberof query - * @interface IReleaseResponse + * @interface IBeginExecuteResponse + * @property {vtrpc.IRPCError|null} [error] BeginExecuteResponse error + * @property {query.IQueryResult|null} [result] BeginExecuteResponse result + * @property {number|Long|null} [transaction_id] BeginExecuteResponse transaction_id + * @property {topodata.ITabletAlias|null} [tablet_alias] BeginExecuteResponse tablet_alias + * @property {string|null} [session_state_changes] BeginExecuteResponse session_state_changes */ /** - * Constructs a new ReleaseResponse. + * Constructs a new BeginExecuteResponse. * @memberof query - * @classdesc Represents a ReleaseResponse. - * @implements IReleaseResponse + * @classdesc Represents a BeginExecuteResponse. + * @implements IBeginExecuteResponse * @constructor - * @param {query.IReleaseResponse=} [properties] Properties to set + * @param {query.IBeginExecuteResponse=} [properties] Properties to set */ - function ReleaseResponse(properties) { + function BeginExecuteResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -89180,63 +89732,133 @@ export const query = $root.query = (() => { } /** - * Creates a new ReleaseResponse instance using the specified properties. + * BeginExecuteResponse error. + * @member {vtrpc.IRPCError|null|undefined} error + * @memberof query.BeginExecuteResponse + * @instance + */ + BeginExecuteResponse.prototype.error = null; + + /** + * BeginExecuteResponse result. + * @member {query.IQueryResult|null|undefined} result + * @memberof query.BeginExecuteResponse + * @instance + */ + BeginExecuteResponse.prototype.result = null; + + /** + * BeginExecuteResponse transaction_id. + * @member {number|Long} transaction_id + * @memberof query.BeginExecuteResponse + * @instance + */ + BeginExecuteResponse.prototype.transaction_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * BeginExecuteResponse tablet_alias. + * @member {topodata.ITabletAlias|null|undefined} tablet_alias + * @memberof query.BeginExecuteResponse + * @instance + */ + BeginExecuteResponse.prototype.tablet_alias = null; + + /** + * BeginExecuteResponse session_state_changes. + * @member {string} session_state_changes + * @memberof query.BeginExecuteResponse + * @instance + */ + BeginExecuteResponse.prototype.session_state_changes = ""; + + /** + * Creates a new BeginExecuteResponse instance using the specified properties. * @function create - * @memberof query.ReleaseResponse + * @memberof query.BeginExecuteResponse * @static - * @param {query.IReleaseResponse=} [properties] Properties to set - * @returns {query.ReleaseResponse} ReleaseResponse instance + * @param {query.IBeginExecuteResponse=} [properties] Properties to set + * @returns {query.BeginExecuteResponse} BeginExecuteResponse instance */ - ReleaseResponse.create = function create(properties) { - return new ReleaseResponse(properties); + BeginExecuteResponse.create = function create(properties) { + return new BeginExecuteResponse(properties); }; /** - * Encodes the specified ReleaseResponse message. Does not implicitly {@link query.ReleaseResponse.verify|verify} messages. + * Encodes the specified BeginExecuteResponse message. Does not implicitly {@link query.BeginExecuteResponse.verify|verify} messages. * @function encode - * @memberof query.ReleaseResponse + * @memberof query.BeginExecuteResponse * @static - * @param {query.IReleaseResponse} message ReleaseResponse message or plain object to encode + * @param {query.IBeginExecuteResponse} message BeginExecuteResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReleaseResponse.encode = function encode(message, writer) { + BeginExecuteResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.error != null && Object.hasOwnProperty.call(message, "error")) + $root.vtrpc.RPCError.encode(message.error, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.result != null && Object.hasOwnProperty.call(message, "result")) + $root.query.QueryResult.encode(message.result, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.transaction_id != null && Object.hasOwnProperty.call(message, "transaction_id")) + writer.uint32(/* id 3, wireType 0 =*/24).int64(message.transaction_id); + if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) + $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.session_state_changes != null && Object.hasOwnProperty.call(message, "session_state_changes")) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.session_state_changes); return writer; }; /** - * Encodes the specified ReleaseResponse message, length delimited. Does not implicitly {@link query.ReleaseResponse.verify|verify} messages. + * Encodes the specified BeginExecuteResponse message, length delimited. Does not implicitly {@link query.BeginExecuteResponse.verify|verify} messages. * @function encodeDelimited - * @memberof query.ReleaseResponse + * @memberof query.BeginExecuteResponse * @static - * @param {query.IReleaseResponse} message ReleaseResponse message or plain object to encode + * @param {query.IBeginExecuteResponse} message BeginExecuteResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReleaseResponse.encodeDelimited = function encodeDelimited(message, writer) { + BeginExecuteResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ReleaseResponse message from the specified reader or buffer. + * Decodes a BeginExecuteResponse message from the specified reader or buffer. * @function decode - * @memberof query.ReleaseResponse + * @memberof query.BeginExecuteResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.ReleaseResponse} ReleaseResponse + * @returns {query.BeginExecuteResponse} BeginExecuteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReleaseResponse.decode = function decode(reader, length) { + BeginExecuteResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.ReleaseResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.BeginExecuteResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + message.error = $root.vtrpc.RPCError.decode(reader, reader.uint32()); + break; + } + case 2: { + message.result = $root.query.QueryResult.decode(reader, reader.uint32()); + break; + } + case 3: { + message.transaction_id = reader.int64(); + break; + } + case 4: { + message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + break; + } + case 5: { + message.session_state_changes = reader.string(); + break; + } default: reader.skipType(tag & 7); break; @@ -89246,108 +89868,191 @@ export const query = $root.query = (() => { }; /** - * Decodes a ReleaseResponse message from the specified reader or buffer, length delimited. + * Decodes a BeginExecuteResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.ReleaseResponse + * @memberof query.BeginExecuteResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.ReleaseResponse} ReleaseResponse + * @returns {query.BeginExecuteResponse} BeginExecuteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReleaseResponse.decodeDelimited = function decodeDelimited(reader) { + BeginExecuteResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ReleaseResponse message. + * Verifies a BeginExecuteResponse message. * @function verify - * @memberof query.ReleaseResponse + * @memberof query.BeginExecuteResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ReleaseResponse.verify = function verify(message) { + BeginExecuteResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.error != null && message.hasOwnProperty("error")) { + let error = $root.vtrpc.RPCError.verify(message.error); + if (error) + return "error." + error; + } + if (message.result != null && message.hasOwnProperty("result")) { + let error = $root.query.QueryResult.verify(message.result); + if (error) + return "result." + error; + } + if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) + if (!$util.isInteger(message.transaction_id) && !(message.transaction_id && $util.isInteger(message.transaction_id.low) && $util.isInteger(message.transaction_id.high))) + return "transaction_id: integer|Long expected"; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { + let error = $root.topodata.TabletAlias.verify(message.tablet_alias); + if (error) + return "tablet_alias." + error; + } + if (message.session_state_changes != null && message.hasOwnProperty("session_state_changes")) + if (!$util.isString(message.session_state_changes)) + return "session_state_changes: string expected"; return null; }; /** - * Creates a ReleaseResponse message from a plain object. Also converts values to their respective internal types. + * Creates a BeginExecuteResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.ReleaseResponse + * @memberof query.BeginExecuteResponse * @static * @param {Object.} object Plain object - * @returns {query.ReleaseResponse} ReleaseResponse + * @returns {query.BeginExecuteResponse} BeginExecuteResponse */ - ReleaseResponse.fromObject = function fromObject(object) { - if (object instanceof $root.query.ReleaseResponse) + BeginExecuteResponse.fromObject = function fromObject(object) { + if (object instanceof $root.query.BeginExecuteResponse) return object; - return new $root.query.ReleaseResponse(); + let message = new $root.query.BeginExecuteResponse(); + if (object.error != null) { + if (typeof object.error !== "object") + throw TypeError(".query.BeginExecuteResponse.error: object expected"); + message.error = $root.vtrpc.RPCError.fromObject(object.error); + } + if (object.result != null) { + if (typeof object.result !== "object") + throw TypeError(".query.BeginExecuteResponse.result: object expected"); + message.result = $root.query.QueryResult.fromObject(object.result); + } + if (object.transaction_id != null) + if ($util.Long) + (message.transaction_id = $util.Long.fromValue(object.transaction_id)).unsigned = false; + else if (typeof object.transaction_id === "string") + message.transaction_id = parseInt(object.transaction_id, 10); + else if (typeof object.transaction_id === "number") + message.transaction_id = object.transaction_id; + else if (typeof object.transaction_id === "object") + message.transaction_id = new $util.LongBits(object.transaction_id.low >>> 0, object.transaction_id.high >>> 0).toNumber(); + if (object.tablet_alias != null) { + if (typeof object.tablet_alias !== "object") + throw TypeError(".query.BeginExecuteResponse.tablet_alias: object expected"); + message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); + } + if (object.session_state_changes != null) + message.session_state_changes = String(object.session_state_changes); + return message; }; /** - * Creates a plain object from a ReleaseResponse message. Also converts values to other types if specified. + * Creates a plain object from a BeginExecuteResponse message. Also converts values to other types if specified. * @function toObject - * @memberof query.ReleaseResponse + * @memberof query.BeginExecuteResponse * @static - * @param {query.ReleaseResponse} message ReleaseResponse + * @param {query.BeginExecuteResponse} message BeginExecuteResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ReleaseResponse.toObject = function toObject() { - return {}; + BeginExecuteResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.error = null; + object.result = null; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.transaction_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.transaction_id = options.longs === String ? "0" : 0; + object.tablet_alias = null; + object.session_state_changes = ""; + } + if (message.error != null && message.hasOwnProperty("error")) + object.error = $root.vtrpc.RPCError.toObject(message.error, options); + if (message.result != null && message.hasOwnProperty("result")) + object.result = $root.query.QueryResult.toObject(message.result, options); + if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) + if (typeof message.transaction_id === "number") + object.transaction_id = options.longs === String ? String(message.transaction_id) : message.transaction_id; + else + object.transaction_id = options.longs === String ? $util.Long.prototype.toString.call(message.transaction_id) : options.longs === Number ? new $util.LongBits(message.transaction_id.low >>> 0, message.transaction_id.high >>> 0).toNumber() : message.transaction_id; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) + object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); + if (message.session_state_changes != null && message.hasOwnProperty("session_state_changes")) + object.session_state_changes = message.session_state_changes; + return object; }; /** - * Converts this ReleaseResponse to JSON. + * Converts this BeginExecuteResponse to JSON. * @function toJSON - * @memberof query.ReleaseResponse + * @memberof query.BeginExecuteResponse * @instance * @returns {Object.} JSON object */ - ReleaseResponse.prototype.toJSON = function toJSON() { + BeginExecuteResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ReleaseResponse + * Gets the default type url for BeginExecuteResponse * @function getTypeUrl - * @memberof query.ReleaseResponse + * @memberof query.BeginExecuteResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ReleaseResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + BeginExecuteResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.ReleaseResponse"; + return typeUrlPrefix + "/query.BeginExecuteResponse"; }; - return ReleaseResponse; + return BeginExecuteResponse; })(); - query.StreamHealthRequest = (function() { + query.BeginStreamExecuteRequest = (function() { /** - * Properties of a StreamHealthRequest. + * Properties of a BeginStreamExecuteRequest. * @memberof query - * @interface IStreamHealthRequest + * @interface IBeginStreamExecuteRequest + * @property {vtrpc.ICallerID|null} [effective_caller_id] BeginStreamExecuteRequest effective_caller_id + * @property {query.IVTGateCallerID|null} [immediate_caller_id] BeginStreamExecuteRequest immediate_caller_id + * @property {query.ITarget|null} [target] BeginStreamExecuteRequest target + * @property {query.IBoundQuery|null} [query] BeginStreamExecuteRequest query + * @property {query.IExecuteOptions|null} [options] BeginStreamExecuteRequest options + * @property {Array.|null} [pre_queries] BeginStreamExecuteRequest pre_queries + * @property {number|Long|null} [reserved_id] BeginStreamExecuteRequest reserved_id */ /** - * Constructs a new StreamHealthRequest. + * Constructs a new BeginStreamExecuteRequest. * @memberof query - * @classdesc Represents a StreamHealthRequest. - * @implements IStreamHealthRequest + * @classdesc Represents a BeginStreamExecuteRequest. + * @implements IBeginStreamExecuteRequest * @constructor - * @param {query.IStreamHealthRequest=} [properties] Properties to set + * @param {query.IBeginStreamExecuteRequest=} [properties] Properties to set */ - function StreamHealthRequest(properties) { + function BeginStreamExecuteRequest(properties) { + this.pre_queries = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -89355,184 +90060,394 @@ export const query = $root.query = (() => { } /** - * Creates a new StreamHealthRequest instance using the specified properties. + * BeginStreamExecuteRequest effective_caller_id. + * @member {vtrpc.ICallerID|null|undefined} effective_caller_id + * @memberof query.BeginStreamExecuteRequest + * @instance + */ + BeginStreamExecuteRequest.prototype.effective_caller_id = null; + + /** + * BeginStreamExecuteRequest immediate_caller_id. + * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id + * @memberof query.BeginStreamExecuteRequest + * @instance + */ + BeginStreamExecuteRequest.prototype.immediate_caller_id = null; + + /** + * BeginStreamExecuteRequest target. + * @member {query.ITarget|null|undefined} target + * @memberof query.BeginStreamExecuteRequest + * @instance + */ + BeginStreamExecuteRequest.prototype.target = null; + + /** + * BeginStreamExecuteRequest query. + * @member {query.IBoundQuery|null|undefined} query + * @memberof query.BeginStreamExecuteRequest + * @instance + */ + BeginStreamExecuteRequest.prototype.query = null; + + /** + * BeginStreamExecuteRequest options. + * @member {query.IExecuteOptions|null|undefined} options + * @memberof query.BeginStreamExecuteRequest + * @instance + */ + BeginStreamExecuteRequest.prototype.options = null; + + /** + * BeginStreamExecuteRequest pre_queries. + * @member {Array.} pre_queries + * @memberof query.BeginStreamExecuteRequest + * @instance + */ + BeginStreamExecuteRequest.prototype.pre_queries = $util.emptyArray; + + /** + * BeginStreamExecuteRequest reserved_id. + * @member {number|Long} reserved_id + * @memberof query.BeginStreamExecuteRequest + * @instance + */ + BeginStreamExecuteRequest.prototype.reserved_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * Creates a new BeginStreamExecuteRequest instance using the specified properties. * @function create - * @memberof query.StreamHealthRequest + * @memberof query.BeginStreamExecuteRequest * @static - * @param {query.IStreamHealthRequest=} [properties] Properties to set - * @returns {query.StreamHealthRequest} StreamHealthRequest instance + * @param {query.IBeginStreamExecuteRequest=} [properties] Properties to set + * @returns {query.BeginStreamExecuteRequest} BeginStreamExecuteRequest instance */ - StreamHealthRequest.create = function create(properties) { - return new StreamHealthRequest(properties); + BeginStreamExecuteRequest.create = function create(properties) { + return new BeginStreamExecuteRequest(properties); }; /** - * Encodes the specified StreamHealthRequest message. Does not implicitly {@link query.StreamHealthRequest.verify|verify} messages. + * Encodes the specified BeginStreamExecuteRequest message. Does not implicitly {@link query.BeginStreamExecuteRequest.verify|verify} messages. * @function encode - * @memberof query.StreamHealthRequest + * @memberof query.BeginStreamExecuteRequest * @static - * @param {query.IStreamHealthRequest} message StreamHealthRequest message or plain object to encode + * @param {query.IBeginStreamExecuteRequest} message BeginStreamExecuteRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - StreamHealthRequest.encode = function encode(message, writer) { + BeginStreamExecuteRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) + $root.vtrpc.CallerID.encode(message.effective_caller_id, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.immediate_caller_id != null && Object.hasOwnProperty.call(message, "immediate_caller_id")) + $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.target != null && Object.hasOwnProperty.call(message, "target")) + $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.query != null && Object.hasOwnProperty.call(message, "query")) + $root.query.BoundQuery.encode(message.query, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.options != null && Object.hasOwnProperty.call(message, "options")) + $root.query.ExecuteOptions.encode(message.options, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); + if (message.pre_queries != null && message.pre_queries.length) + for (let i = 0; i < message.pre_queries.length; ++i) + writer.uint32(/* id 6, wireType 2 =*/50).string(message.pre_queries[i]); + if (message.reserved_id != null && Object.hasOwnProperty.call(message, "reserved_id")) + writer.uint32(/* id 7, wireType 0 =*/56).int64(message.reserved_id); return writer; }; /** - * Encodes the specified StreamHealthRequest message, length delimited. Does not implicitly {@link query.StreamHealthRequest.verify|verify} messages. + * Encodes the specified BeginStreamExecuteRequest message, length delimited. Does not implicitly {@link query.BeginStreamExecuteRequest.verify|verify} messages. * @function encodeDelimited - * @memberof query.StreamHealthRequest + * @memberof query.BeginStreamExecuteRequest * @static - * @param {query.IStreamHealthRequest} message StreamHealthRequest message or plain object to encode + * @param {query.IBeginStreamExecuteRequest} message BeginStreamExecuteRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - StreamHealthRequest.encodeDelimited = function encodeDelimited(message, writer) { + BeginStreamExecuteRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a StreamHealthRequest message from the specified reader or buffer. + * Decodes a BeginStreamExecuteRequest message from the specified reader or buffer. * @function decode - * @memberof query.StreamHealthRequest + * @memberof query.BeginStreamExecuteRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.StreamHealthRequest} StreamHealthRequest + * @returns {query.BeginStreamExecuteRequest} BeginStreamExecuteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StreamHealthRequest.decode = function decode(reader, length) { + BeginStreamExecuteRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.StreamHealthRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.BeginStreamExecuteRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }; - + case 1: { + message.effective_caller_id = $root.vtrpc.CallerID.decode(reader, reader.uint32()); + break; + } + case 2: { + message.immediate_caller_id = $root.query.VTGateCallerID.decode(reader, reader.uint32()); + break; + } + case 3: { + message.target = $root.query.Target.decode(reader, reader.uint32()); + break; + } + case 4: { + message.query = $root.query.BoundQuery.decode(reader, reader.uint32()); + break; + } + case 5: { + message.options = $root.query.ExecuteOptions.decode(reader, reader.uint32()); + break; + } + case 6: { + if (!(message.pre_queries && message.pre_queries.length)) + message.pre_queries = []; + message.pre_queries.push(reader.string()); + break; + } + case 7: { + message.reserved_id = reader.int64(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + /** - * Decodes a StreamHealthRequest message from the specified reader or buffer, length delimited. + * Decodes a BeginStreamExecuteRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.StreamHealthRequest + * @memberof query.BeginStreamExecuteRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.StreamHealthRequest} StreamHealthRequest + * @returns {query.BeginStreamExecuteRequest} BeginStreamExecuteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StreamHealthRequest.decodeDelimited = function decodeDelimited(reader) { + BeginStreamExecuteRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a StreamHealthRequest message. + * Verifies a BeginStreamExecuteRequest message. * @function verify - * @memberof query.StreamHealthRequest + * @memberof query.BeginStreamExecuteRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - StreamHealthRequest.verify = function verify(message) { + BeginStreamExecuteRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { + let error = $root.vtrpc.CallerID.verify(message.effective_caller_id); + if (error) + return "effective_caller_id." + error; + } + if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) { + let error = $root.query.VTGateCallerID.verify(message.immediate_caller_id); + if (error) + return "immediate_caller_id." + error; + } + if (message.target != null && message.hasOwnProperty("target")) { + let error = $root.query.Target.verify(message.target); + if (error) + return "target." + error; + } + if (message.query != null && message.hasOwnProperty("query")) { + let error = $root.query.BoundQuery.verify(message.query); + if (error) + return "query." + error; + } + if (message.options != null && message.hasOwnProperty("options")) { + let error = $root.query.ExecuteOptions.verify(message.options); + if (error) + return "options." + error; + } + if (message.pre_queries != null && message.hasOwnProperty("pre_queries")) { + if (!Array.isArray(message.pre_queries)) + return "pre_queries: array expected"; + for (let i = 0; i < message.pre_queries.length; ++i) + if (!$util.isString(message.pre_queries[i])) + return "pre_queries: string[] expected"; + } + if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) + if (!$util.isInteger(message.reserved_id) && !(message.reserved_id && $util.isInteger(message.reserved_id.low) && $util.isInteger(message.reserved_id.high))) + return "reserved_id: integer|Long expected"; return null; }; /** - * Creates a StreamHealthRequest message from a plain object. Also converts values to their respective internal types. + * Creates a BeginStreamExecuteRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.StreamHealthRequest + * @memberof query.BeginStreamExecuteRequest * @static * @param {Object.} object Plain object - * @returns {query.StreamHealthRequest} StreamHealthRequest + * @returns {query.BeginStreamExecuteRequest} BeginStreamExecuteRequest */ - StreamHealthRequest.fromObject = function fromObject(object) { - if (object instanceof $root.query.StreamHealthRequest) + BeginStreamExecuteRequest.fromObject = function fromObject(object) { + if (object instanceof $root.query.BeginStreamExecuteRequest) return object; - return new $root.query.StreamHealthRequest(); + let message = new $root.query.BeginStreamExecuteRequest(); + if (object.effective_caller_id != null) { + if (typeof object.effective_caller_id !== "object") + throw TypeError(".query.BeginStreamExecuteRequest.effective_caller_id: object expected"); + message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); + } + if (object.immediate_caller_id != null) { + if (typeof object.immediate_caller_id !== "object") + throw TypeError(".query.BeginStreamExecuteRequest.immediate_caller_id: object expected"); + message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); + } + if (object.target != null) { + if (typeof object.target !== "object") + throw TypeError(".query.BeginStreamExecuteRequest.target: object expected"); + message.target = $root.query.Target.fromObject(object.target); + } + if (object.query != null) { + if (typeof object.query !== "object") + throw TypeError(".query.BeginStreamExecuteRequest.query: object expected"); + message.query = $root.query.BoundQuery.fromObject(object.query); + } + if (object.options != null) { + if (typeof object.options !== "object") + throw TypeError(".query.BeginStreamExecuteRequest.options: object expected"); + message.options = $root.query.ExecuteOptions.fromObject(object.options); + } + if (object.pre_queries) { + if (!Array.isArray(object.pre_queries)) + throw TypeError(".query.BeginStreamExecuteRequest.pre_queries: array expected"); + message.pre_queries = []; + for (let i = 0; i < object.pre_queries.length; ++i) + message.pre_queries[i] = String(object.pre_queries[i]); + } + if (object.reserved_id != null) + if ($util.Long) + (message.reserved_id = $util.Long.fromValue(object.reserved_id)).unsigned = false; + else if (typeof object.reserved_id === "string") + message.reserved_id = parseInt(object.reserved_id, 10); + else if (typeof object.reserved_id === "number") + message.reserved_id = object.reserved_id; + else if (typeof object.reserved_id === "object") + message.reserved_id = new $util.LongBits(object.reserved_id.low >>> 0, object.reserved_id.high >>> 0).toNumber(); + return message; }; /** - * Creates a plain object from a StreamHealthRequest message. Also converts values to other types if specified. + * Creates a plain object from a BeginStreamExecuteRequest message. Also converts values to other types if specified. * @function toObject - * @memberof query.StreamHealthRequest + * @memberof query.BeginStreamExecuteRequest * @static - * @param {query.StreamHealthRequest} message StreamHealthRequest + * @param {query.BeginStreamExecuteRequest} message BeginStreamExecuteRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - StreamHealthRequest.toObject = function toObject() { - return {}; + BeginStreamExecuteRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.pre_queries = []; + if (options.defaults) { + object.effective_caller_id = null; + object.immediate_caller_id = null; + object.target = null; + object.query = null; + object.options = null; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.reserved_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.reserved_id = options.longs === String ? "0" : 0; + } + if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) + object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); + if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) + object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); + if (message.target != null && message.hasOwnProperty("target")) + object.target = $root.query.Target.toObject(message.target, options); + if (message.query != null && message.hasOwnProperty("query")) + object.query = $root.query.BoundQuery.toObject(message.query, options); + if (message.options != null && message.hasOwnProperty("options")) + object.options = $root.query.ExecuteOptions.toObject(message.options, options); + if (message.pre_queries && message.pre_queries.length) { + object.pre_queries = []; + for (let j = 0; j < message.pre_queries.length; ++j) + object.pre_queries[j] = message.pre_queries[j]; + } + if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) + if (typeof message.reserved_id === "number") + object.reserved_id = options.longs === String ? String(message.reserved_id) : message.reserved_id; + else + object.reserved_id = options.longs === String ? $util.Long.prototype.toString.call(message.reserved_id) : options.longs === Number ? new $util.LongBits(message.reserved_id.low >>> 0, message.reserved_id.high >>> 0).toNumber() : message.reserved_id; + return object; }; /** - * Converts this StreamHealthRequest to JSON. + * Converts this BeginStreamExecuteRequest to JSON. * @function toJSON - * @memberof query.StreamHealthRequest + * @memberof query.BeginStreamExecuteRequest * @instance * @returns {Object.} JSON object */ - StreamHealthRequest.prototype.toJSON = function toJSON() { + BeginStreamExecuteRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for StreamHealthRequest + * Gets the default type url for BeginStreamExecuteRequest * @function getTypeUrl - * @memberof query.StreamHealthRequest + * @memberof query.BeginStreamExecuteRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - StreamHealthRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + BeginStreamExecuteRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.StreamHealthRequest"; + return typeUrlPrefix + "/query.BeginStreamExecuteRequest"; }; - return StreamHealthRequest; + return BeginStreamExecuteRequest; })(); - query.RealtimeStats = (function() { + query.BeginStreamExecuteResponse = (function() { /** - * Properties of a RealtimeStats. + * Properties of a BeginStreamExecuteResponse. * @memberof query - * @interface IRealtimeStats - * @property {string|null} [health_error] RealtimeStats health_error - * @property {number|null} [replication_lag_seconds] RealtimeStats replication_lag_seconds - * @property {number|null} [binlog_players_count] RealtimeStats binlog_players_count - * @property {number|Long|null} [filtered_replication_lag_seconds] RealtimeStats filtered_replication_lag_seconds - * @property {number|null} [cpu_usage] RealtimeStats cpu_usage - * @property {number|null} [qps] RealtimeStats qps - * @property {Array.|null} [table_schema_changed] RealtimeStats table_schema_changed - * @property {Array.|null} [view_schema_changed] RealtimeStats view_schema_changed + * @interface IBeginStreamExecuteResponse + * @property {vtrpc.IRPCError|null} [error] BeginStreamExecuteResponse error + * @property {query.IQueryResult|null} [result] BeginStreamExecuteResponse result + * @property {number|Long|null} [transaction_id] BeginStreamExecuteResponse transaction_id + * @property {topodata.ITabletAlias|null} [tablet_alias] BeginStreamExecuteResponse tablet_alias + * @property {string|null} [session_state_changes] BeginStreamExecuteResponse session_state_changes */ /** - * Constructs a new RealtimeStats. + * Constructs a new BeginStreamExecuteResponse. * @memberof query - * @classdesc Represents a RealtimeStats. - * @implements IRealtimeStats + * @classdesc Represents a BeginStreamExecuteResponse. + * @implements IBeginStreamExecuteResponse * @constructor - * @param {query.IRealtimeStats=} [properties] Properties to set + * @param {query.IBeginStreamExecuteResponse=} [properties] Properties to set */ - function RealtimeStats(properties) { - this.table_schema_changed = []; - this.view_schema_changed = []; + function BeginStreamExecuteResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -89540,179 +90455,131 @@ export const query = $root.query = (() => { } /** - * RealtimeStats health_error. - * @member {string} health_error - * @memberof query.RealtimeStats - * @instance - */ - RealtimeStats.prototype.health_error = ""; - - /** - * RealtimeStats replication_lag_seconds. - * @member {number} replication_lag_seconds - * @memberof query.RealtimeStats - * @instance - */ - RealtimeStats.prototype.replication_lag_seconds = 0; - - /** - * RealtimeStats binlog_players_count. - * @member {number} binlog_players_count - * @memberof query.RealtimeStats - * @instance - */ - RealtimeStats.prototype.binlog_players_count = 0; - - /** - * RealtimeStats filtered_replication_lag_seconds. - * @member {number|Long} filtered_replication_lag_seconds - * @memberof query.RealtimeStats + * BeginStreamExecuteResponse error. + * @member {vtrpc.IRPCError|null|undefined} error + * @memberof query.BeginStreamExecuteResponse * @instance */ - RealtimeStats.prototype.filtered_replication_lag_seconds = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + BeginStreamExecuteResponse.prototype.error = null; /** - * RealtimeStats cpu_usage. - * @member {number} cpu_usage - * @memberof query.RealtimeStats + * BeginStreamExecuteResponse result. + * @member {query.IQueryResult|null|undefined} result + * @memberof query.BeginStreamExecuteResponse * @instance */ - RealtimeStats.prototype.cpu_usage = 0; + BeginStreamExecuteResponse.prototype.result = null; /** - * RealtimeStats qps. - * @member {number} qps - * @memberof query.RealtimeStats + * BeginStreamExecuteResponse transaction_id. + * @member {number|Long} transaction_id + * @memberof query.BeginStreamExecuteResponse * @instance */ - RealtimeStats.prototype.qps = 0; + BeginStreamExecuteResponse.prototype.transaction_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; /** - * RealtimeStats table_schema_changed. - * @member {Array.} table_schema_changed - * @memberof query.RealtimeStats + * BeginStreamExecuteResponse tablet_alias. + * @member {topodata.ITabletAlias|null|undefined} tablet_alias + * @memberof query.BeginStreamExecuteResponse * @instance */ - RealtimeStats.prototype.table_schema_changed = $util.emptyArray; + BeginStreamExecuteResponse.prototype.tablet_alias = null; /** - * RealtimeStats view_schema_changed. - * @member {Array.} view_schema_changed - * @memberof query.RealtimeStats + * BeginStreamExecuteResponse session_state_changes. + * @member {string} session_state_changes + * @memberof query.BeginStreamExecuteResponse * @instance */ - RealtimeStats.prototype.view_schema_changed = $util.emptyArray; + BeginStreamExecuteResponse.prototype.session_state_changes = ""; /** - * Creates a new RealtimeStats instance using the specified properties. + * Creates a new BeginStreamExecuteResponse instance using the specified properties. * @function create - * @memberof query.RealtimeStats + * @memberof query.BeginStreamExecuteResponse * @static - * @param {query.IRealtimeStats=} [properties] Properties to set - * @returns {query.RealtimeStats} RealtimeStats instance + * @param {query.IBeginStreamExecuteResponse=} [properties] Properties to set + * @returns {query.BeginStreamExecuteResponse} BeginStreamExecuteResponse instance */ - RealtimeStats.create = function create(properties) { - return new RealtimeStats(properties); + BeginStreamExecuteResponse.create = function create(properties) { + return new BeginStreamExecuteResponse(properties); }; /** - * Encodes the specified RealtimeStats message. Does not implicitly {@link query.RealtimeStats.verify|verify} messages. + * Encodes the specified BeginStreamExecuteResponse message. Does not implicitly {@link query.BeginStreamExecuteResponse.verify|verify} messages. * @function encode - * @memberof query.RealtimeStats + * @memberof query.BeginStreamExecuteResponse * @static - * @param {query.IRealtimeStats} message RealtimeStats message or plain object to encode + * @param {query.IBeginStreamExecuteResponse} message BeginStreamExecuteResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RealtimeStats.encode = function encode(message, writer) { + BeginStreamExecuteResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.health_error != null && Object.hasOwnProperty.call(message, "health_error")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.health_error); - if (message.replication_lag_seconds != null && Object.hasOwnProperty.call(message, "replication_lag_seconds")) - writer.uint32(/* id 2, wireType 0 =*/16).uint32(message.replication_lag_seconds); - if (message.binlog_players_count != null && Object.hasOwnProperty.call(message, "binlog_players_count")) - writer.uint32(/* id 3, wireType 0 =*/24).int32(message.binlog_players_count); - if (message.filtered_replication_lag_seconds != null && Object.hasOwnProperty.call(message, "filtered_replication_lag_seconds")) - writer.uint32(/* id 4, wireType 0 =*/32).int64(message.filtered_replication_lag_seconds); - if (message.cpu_usage != null && Object.hasOwnProperty.call(message, "cpu_usage")) - writer.uint32(/* id 5, wireType 1 =*/41).double(message.cpu_usage); - if (message.qps != null && Object.hasOwnProperty.call(message, "qps")) - writer.uint32(/* id 6, wireType 1 =*/49).double(message.qps); - if (message.table_schema_changed != null && message.table_schema_changed.length) - for (let i = 0; i < message.table_schema_changed.length; ++i) - writer.uint32(/* id 7, wireType 2 =*/58).string(message.table_schema_changed[i]); - if (message.view_schema_changed != null && message.view_schema_changed.length) - for (let i = 0; i < message.view_schema_changed.length; ++i) - writer.uint32(/* id 8, wireType 2 =*/66).string(message.view_schema_changed[i]); + if (message.error != null && Object.hasOwnProperty.call(message, "error")) + $root.vtrpc.RPCError.encode(message.error, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.result != null && Object.hasOwnProperty.call(message, "result")) + $root.query.QueryResult.encode(message.result, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.transaction_id != null && Object.hasOwnProperty.call(message, "transaction_id")) + writer.uint32(/* id 3, wireType 0 =*/24).int64(message.transaction_id); + if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) + $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.session_state_changes != null && Object.hasOwnProperty.call(message, "session_state_changes")) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.session_state_changes); return writer; }; /** - * Encodes the specified RealtimeStats message, length delimited. Does not implicitly {@link query.RealtimeStats.verify|verify} messages. + * Encodes the specified BeginStreamExecuteResponse message, length delimited. Does not implicitly {@link query.BeginStreamExecuteResponse.verify|verify} messages. * @function encodeDelimited - * @memberof query.RealtimeStats + * @memberof query.BeginStreamExecuteResponse * @static - * @param {query.IRealtimeStats} message RealtimeStats message or plain object to encode + * @param {query.IBeginStreamExecuteResponse} message BeginStreamExecuteResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RealtimeStats.encodeDelimited = function encodeDelimited(message, writer) { + BeginStreamExecuteResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a RealtimeStats message from the specified reader or buffer. + * Decodes a BeginStreamExecuteResponse message from the specified reader or buffer. * @function decode - * @memberof query.RealtimeStats + * @memberof query.BeginStreamExecuteResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.RealtimeStats} RealtimeStats + * @returns {query.BeginStreamExecuteResponse} BeginStreamExecuteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RealtimeStats.decode = function decode(reader, length) { + BeginStreamExecuteResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.RealtimeStats(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.BeginStreamExecuteResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.health_error = reader.string(); + message.error = $root.vtrpc.RPCError.decode(reader, reader.uint32()); break; } case 2: { - message.replication_lag_seconds = reader.uint32(); + message.result = $root.query.QueryResult.decode(reader, reader.uint32()); break; } case 3: { - message.binlog_players_count = reader.int32(); + message.transaction_id = reader.int64(); break; } case 4: { - message.filtered_replication_lag_seconds = reader.int64(); + message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); break; } case 5: { - message.cpu_usage = reader.double(); - break; - } - case 6: { - message.qps = reader.double(); - break; - } - case 7: { - if (!(message.table_schema_changed && message.table_schema_changed.length)) - message.table_schema_changed = []; - message.table_schema_changed.push(reader.string()); - break; - } - case 8: { - if (!(message.view_schema_changed && message.view_schema_changed.length)) - message.view_schema_changed = []; - message.view_schema_changed.push(reader.string()); + message.session_state_changes = reader.string(); break; } default: @@ -89724,222 +90591,187 @@ export const query = $root.query = (() => { }; /** - * Decodes a RealtimeStats message from the specified reader or buffer, length delimited. + * Decodes a BeginStreamExecuteResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.RealtimeStats + * @memberof query.BeginStreamExecuteResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.RealtimeStats} RealtimeStats + * @returns {query.BeginStreamExecuteResponse} BeginStreamExecuteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RealtimeStats.decodeDelimited = function decodeDelimited(reader) { + BeginStreamExecuteResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a RealtimeStats message. + * Verifies a BeginStreamExecuteResponse message. * @function verify - * @memberof query.RealtimeStats + * @memberof query.BeginStreamExecuteResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - RealtimeStats.verify = function verify(message) { + BeginStreamExecuteResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.health_error != null && message.hasOwnProperty("health_error")) - if (!$util.isString(message.health_error)) - return "health_error: string expected"; - if (message.replication_lag_seconds != null && message.hasOwnProperty("replication_lag_seconds")) - if (!$util.isInteger(message.replication_lag_seconds)) - return "replication_lag_seconds: integer expected"; - if (message.binlog_players_count != null && message.hasOwnProperty("binlog_players_count")) - if (!$util.isInteger(message.binlog_players_count)) - return "binlog_players_count: integer expected"; - if (message.filtered_replication_lag_seconds != null && message.hasOwnProperty("filtered_replication_lag_seconds")) - if (!$util.isInteger(message.filtered_replication_lag_seconds) && !(message.filtered_replication_lag_seconds && $util.isInteger(message.filtered_replication_lag_seconds.low) && $util.isInteger(message.filtered_replication_lag_seconds.high))) - return "filtered_replication_lag_seconds: integer|Long expected"; - if (message.cpu_usage != null && message.hasOwnProperty("cpu_usage")) - if (typeof message.cpu_usage !== "number") - return "cpu_usage: number expected"; - if (message.qps != null && message.hasOwnProperty("qps")) - if (typeof message.qps !== "number") - return "qps: number expected"; - if (message.table_schema_changed != null && message.hasOwnProperty("table_schema_changed")) { - if (!Array.isArray(message.table_schema_changed)) - return "table_schema_changed: array expected"; - for (let i = 0; i < message.table_schema_changed.length; ++i) - if (!$util.isString(message.table_schema_changed[i])) - return "table_schema_changed: string[] expected"; + if (message.error != null && message.hasOwnProperty("error")) { + let error = $root.vtrpc.RPCError.verify(message.error); + if (error) + return "error." + error; } - if (message.view_schema_changed != null && message.hasOwnProperty("view_schema_changed")) { - if (!Array.isArray(message.view_schema_changed)) - return "view_schema_changed: array expected"; - for (let i = 0; i < message.view_schema_changed.length; ++i) - if (!$util.isString(message.view_schema_changed[i])) - return "view_schema_changed: string[] expected"; + if (message.result != null && message.hasOwnProperty("result")) { + let error = $root.query.QueryResult.verify(message.result); + if (error) + return "result." + error; + } + if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) + if (!$util.isInteger(message.transaction_id) && !(message.transaction_id && $util.isInteger(message.transaction_id.low) && $util.isInteger(message.transaction_id.high))) + return "transaction_id: integer|Long expected"; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { + let error = $root.topodata.TabletAlias.verify(message.tablet_alias); + if (error) + return "tablet_alias." + error; } + if (message.session_state_changes != null && message.hasOwnProperty("session_state_changes")) + if (!$util.isString(message.session_state_changes)) + return "session_state_changes: string expected"; return null; }; /** - * Creates a RealtimeStats message from a plain object. Also converts values to their respective internal types. + * Creates a BeginStreamExecuteResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.RealtimeStats + * @memberof query.BeginStreamExecuteResponse * @static * @param {Object.} object Plain object - * @returns {query.RealtimeStats} RealtimeStats + * @returns {query.BeginStreamExecuteResponse} BeginStreamExecuteResponse */ - RealtimeStats.fromObject = function fromObject(object) { - if (object instanceof $root.query.RealtimeStats) + BeginStreamExecuteResponse.fromObject = function fromObject(object) { + if (object instanceof $root.query.BeginStreamExecuteResponse) return object; - let message = new $root.query.RealtimeStats(); - if (object.health_error != null) - message.health_error = String(object.health_error); - if (object.replication_lag_seconds != null) - message.replication_lag_seconds = object.replication_lag_seconds >>> 0; - if (object.binlog_players_count != null) - message.binlog_players_count = object.binlog_players_count | 0; - if (object.filtered_replication_lag_seconds != null) - if ($util.Long) - (message.filtered_replication_lag_seconds = $util.Long.fromValue(object.filtered_replication_lag_seconds)).unsigned = false; - else if (typeof object.filtered_replication_lag_seconds === "string") - message.filtered_replication_lag_seconds = parseInt(object.filtered_replication_lag_seconds, 10); - else if (typeof object.filtered_replication_lag_seconds === "number") - message.filtered_replication_lag_seconds = object.filtered_replication_lag_seconds; - else if (typeof object.filtered_replication_lag_seconds === "object") - message.filtered_replication_lag_seconds = new $util.LongBits(object.filtered_replication_lag_seconds.low >>> 0, object.filtered_replication_lag_seconds.high >>> 0).toNumber(); - if (object.cpu_usage != null) - message.cpu_usage = Number(object.cpu_usage); - if (object.qps != null) - message.qps = Number(object.qps); - if (object.table_schema_changed) { - if (!Array.isArray(object.table_schema_changed)) - throw TypeError(".query.RealtimeStats.table_schema_changed: array expected"); - message.table_schema_changed = []; - for (let i = 0; i < object.table_schema_changed.length; ++i) - message.table_schema_changed[i] = String(object.table_schema_changed[i]); + let message = new $root.query.BeginStreamExecuteResponse(); + if (object.error != null) { + if (typeof object.error !== "object") + throw TypeError(".query.BeginStreamExecuteResponse.error: object expected"); + message.error = $root.vtrpc.RPCError.fromObject(object.error); } - if (object.view_schema_changed) { - if (!Array.isArray(object.view_schema_changed)) - throw TypeError(".query.RealtimeStats.view_schema_changed: array expected"); - message.view_schema_changed = []; - for (let i = 0; i < object.view_schema_changed.length; ++i) - message.view_schema_changed[i] = String(object.view_schema_changed[i]); + if (object.result != null) { + if (typeof object.result !== "object") + throw TypeError(".query.BeginStreamExecuteResponse.result: object expected"); + message.result = $root.query.QueryResult.fromObject(object.result); } - return message; - }; - - /** - * Creates a plain object from a RealtimeStats message. Also converts values to other types if specified. - * @function toObject - * @memberof query.RealtimeStats + if (object.transaction_id != null) + if ($util.Long) + (message.transaction_id = $util.Long.fromValue(object.transaction_id)).unsigned = false; + else if (typeof object.transaction_id === "string") + message.transaction_id = parseInt(object.transaction_id, 10); + else if (typeof object.transaction_id === "number") + message.transaction_id = object.transaction_id; + else if (typeof object.transaction_id === "object") + message.transaction_id = new $util.LongBits(object.transaction_id.low >>> 0, object.transaction_id.high >>> 0).toNumber(); + if (object.tablet_alias != null) { + if (typeof object.tablet_alias !== "object") + throw TypeError(".query.BeginStreamExecuteResponse.tablet_alias: object expected"); + message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); + } + if (object.session_state_changes != null) + message.session_state_changes = String(object.session_state_changes); + return message; + }; + + /** + * Creates a plain object from a BeginStreamExecuteResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof query.BeginStreamExecuteResponse * @static - * @param {query.RealtimeStats} message RealtimeStats + * @param {query.BeginStreamExecuteResponse} message BeginStreamExecuteResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - RealtimeStats.toObject = function toObject(message, options) { + BeginStreamExecuteResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) { - object.table_schema_changed = []; - object.view_schema_changed = []; - } if (options.defaults) { - object.health_error = ""; - object.replication_lag_seconds = 0; - object.binlog_players_count = 0; + object.error = null; + object.result = null; if ($util.Long) { let long = new $util.Long(0, 0, false); - object.filtered_replication_lag_seconds = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + object.transaction_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; } else - object.filtered_replication_lag_seconds = options.longs === String ? "0" : 0; - object.cpu_usage = 0; - object.qps = 0; + object.transaction_id = options.longs === String ? "0" : 0; + object.tablet_alias = null; + object.session_state_changes = ""; } - if (message.health_error != null && message.hasOwnProperty("health_error")) - object.health_error = message.health_error; - if (message.replication_lag_seconds != null && message.hasOwnProperty("replication_lag_seconds")) - object.replication_lag_seconds = message.replication_lag_seconds; - if (message.binlog_players_count != null && message.hasOwnProperty("binlog_players_count")) - object.binlog_players_count = message.binlog_players_count; - if (message.filtered_replication_lag_seconds != null && message.hasOwnProperty("filtered_replication_lag_seconds")) - if (typeof message.filtered_replication_lag_seconds === "number") - object.filtered_replication_lag_seconds = options.longs === String ? String(message.filtered_replication_lag_seconds) : message.filtered_replication_lag_seconds; + if (message.error != null && message.hasOwnProperty("error")) + object.error = $root.vtrpc.RPCError.toObject(message.error, options); + if (message.result != null && message.hasOwnProperty("result")) + object.result = $root.query.QueryResult.toObject(message.result, options); + if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) + if (typeof message.transaction_id === "number") + object.transaction_id = options.longs === String ? String(message.transaction_id) : message.transaction_id; else - object.filtered_replication_lag_seconds = options.longs === String ? $util.Long.prototype.toString.call(message.filtered_replication_lag_seconds) : options.longs === Number ? new $util.LongBits(message.filtered_replication_lag_seconds.low >>> 0, message.filtered_replication_lag_seconds.high >>> 0).toNumber() : message.filtered_replication_lag_seconds; - if (message.cpu_usage != null && message.hasOwnProperty("cpu_usage")) - object.cpu_usage = options.json && !isFinite(message.cpu_usage) ? String(message.cpu_usage) : message.cpu_usage; - if (message.qps != null && message.hasOwnProperty("qps")) - object.qps = options.json && !isFinite(message.qps) ? String(message.qps) : message.qps; - if (message.table_schema_changed && message.table_schema_changed.length) { - object.table_schema_changed = []; - for (let j = 0; j < message.table_schema_changed.length; ++j) - object.table_schema_changed[j] = message.table_schema_changed[j]; - } - if (message.view_schema_changed && message.view_schema_changed.length) { - object.view_schema_changed = []; - for (let j = 0; j < message.view_schema_changed.length; ++j) - object.view_schema_changed[j] = message.view_schema_changed[j]; - } + object.transaction_id = options.longs === String ? $util.Long.prototype.toString.call(message.transaction_id) : options.longs === Number ? new $util.LongBits(message.transaction_id.low >>> 0, message.transaction_id.high >>> 0).toNumber() : message.transaction_id; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) + object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); + if (message.session_state_changes != null && message.hasOwnProperty("session_state_changes")) + object.session_state_changes = message.session_state_changes; return object; }; /** - * Converts this RealtimeStats to JSON. + * Converts this BeginStreamExecuteResponse to JSON. * @function toJSON - * @memberof query.RealtimeStats + * @memberof query.BeginStreamExecuteResponse * @instance * @returns {Object.} JSON object */ - RealtimeStats.prototype.toJSON = function toJSON() { + BeginStreamExecuteResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for RealtimeStats + * Gets the default type url for BeginStreamExecuteResponse * @function getTypeUrl - * @memberof query.RealtimeStats + * @memberof query.BeginStreamExecuteResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - RealtimeStats.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + BeginStreamExecuteResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.RealtimeStats"; + return typeUrlPrefix + "/query.BeginStreamExecuteResponse"; }; - return RealtimeStats; + return BeginStreamExecuteResponse; })(); - query.AggregateStats = (function() { + query.MessageStreamRequest = (function() { /** - * Properties of an AggregateStats. + * Properties of a MessageStreamRequest. * @memberof query - * @interface IAggregateStats - * @property {number|null} [healthy_tablet_count] AggregateStats healthy_tablet_count - * @property {number|null} [unhealthy_tablet_count] AggregateStats unhealthy_tablet_count - * @property {number|null} [replication_lag_seconds_min] AggregateStats replication_lag_seconds_min - * @property {number|null} [replication_lag_seconds_max] AggregateStats replication_lag_seconds_max + * @interface IMessageStreamRequest + * @property {vtrpc.ICallerID|null} [effective_caller_id] MessageStreamRequest effective_caller_id + * @property {query.IVTGateCallerID|null} [immediate_caller_id] MessageStreamRequest immediate_caller_id + * @property {query.ITarget|null} [target] MessageStreamRequest target + * @property {string|null} [name] MessageStreamRequest name */ /** - * Constructs a new AggregateStats. + * Constructs a new MessageStreamRequest. * @memberof query - * @classdesc Represents an AggregateStats. - * @implements IAggregateStats + * @classdesc Represents a MessageStreamRequest. + * @implements IMessageStreamRequest * @constructor - * @param {query.IAggregateStats=} [properties] Properties to set + * @param {query.IMessageStreamRequest=} [properties] Properties to set */ - function AggregateStats(properties) { + function MessageStreamRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -89947,117 +90779,117 @@ export const query = $root.query = (() => { } /** - * AggregateStats healthy_tablet_count. - * @member {number} healthy_tablet_count - * @memberof query.AggregateStats + * MessageStreamRequest effective_caller_id. + * @member {vtrpc.ICallerID|null|undefined} effective_caller_id + * @memberof query.MessageStreamRequest * @instance */ - AggregateStats.prototype.healthy_tablet_count = 0; + MessageStreamRequest.prototype.effective_caller_id = null; /** - * AggregateStats unhealthy_tablet_count. - * @member {number} unhealthy_tablet_count - * @memberof query.AggregateStats + * MessageStreamRequest immediate_caller_id. + * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id + * @memberof query.MessageStreamRequest * @instance */ - AggregateStats.prototype.unhealthy_tablet_count = 0; + MessageStreamRequest.prototype.immediate_caller_id = null; /** - * AggregateStats replication_lag_seconds_min. - * @member {number} replication_lag_seconds_min - * @memberof query.AggregateStats + * MessageStreamRequest target. + * @member {query.ITarget|null|undefined} target + * @memberof query.MessageStreamRequest * @instance */ - AggregateStats.prototype.replication_lag_seconds_min = 0; + MessageStreamRequest.prototype.target = null; /** - * AggregateStats replication_lag_seconds_max. - * @member {number} replication_lag_seconds_max - * @memberof query.AggregateStats + * MessageStreamRequest name. + * @member {string} name + * @memberof query.MessageStreamRequest * @instance */ - AggregateStats.prototype.replication_lag_seconds_max = 0; + MessageStreamRequest.prototype.name = ""; /** - * Creates a new AggregateStats instance using the specified properties. + * Creates a new MessageStreamRequest instance using the specified properties. * @function create - * @memberof query.AggregateStats + * @memberof query.MessageStreamRequest * @static - * @param {query.IAggregateStats=} [properties] Properties to set - * @returns {query.AggregateStats} AggregateStats instance + * @param {query.IMessageStreamRequest=} [properties] Properties to set + * @returns {query.MessageStreamRequest} MessageStreamRequest instance */ - AggregateStats.create = function create(properties) { - return new AggregateStats(properties); + MessageStreamRequest.create = function create(properties) { + return new MessageStreamRequest(properties); }; /** - * Encodes the specified AggregateStats message. Does not implicitly {@link query.AggregateStats.verify|verify} messages. + * Encodes the specified MessageStreamRequest message. Does not implicitly {@link query.MessageStreamRequest.verify|verify} messages. * @function encode - * @memberof query.AggregateStats + * @memberof query.MessageStreamRequest * @static - * @param {query.IAggregateStats} message AggregateStats message or plain object to encode + * @param {query.IMessageStreamRequest} message MessageStreamRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - AggregateStats.encode = function encode(message, writer) { + MessageStreamRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.healthy_tablet_count != null && Object.hasOwnProperty.call(message, "healthy_tablet_count")) - writer.uint32(/* id 1, wireType 0 =*/8).int32(message.healthy_tablet_count); - if (message.unhealthy_tablet_count != null && Object.hasOwnProperty.call(message, "unhealthy_tablet_count")) - writer.uint32(/* id 2, wireType 0 =*/16).int32(message.unhealthy_tablet_count); - if (message.replication_lag_seconds_min != null && Object.hasOwnProperty.call(message, "replication_lag_seconds_min")) - writer.uint32(/* id 3, wireType 0 =*/24).uint32(message.replication_lag_seconds_min); - if (message.replication_lag_seconds_max != null && Object.hasOwnProperty.call(message, "replication_lag_seconds_max")) - writer.uint32(/* id 4, wireType 0 =*/32).uint32(message.replication_lag_seconds_max); + if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) + $root.vtrpc.CallerID.encode(message.effective_caller_id, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.immediate_caller_id != null && Object.hasOwnProperty.call(message, "immediate_caller_id")) + $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.target != null && Object.hasOwnProperty.call(message, "target")) + $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.name != null && Object.hasOwnProperty.call(message, "name")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.name); return writer; }; /** - * Encodes the specified AggregateStats message, length delimited. Does not implicitly {@link query.AggregateStats.verify|verify} messages. + * Encodes the specified MessageStreamRequest message, length delimited. Does not implicitly {@link query.MessageStreamRequest.verify|verify} messages. * @function encodeDelimited - * @memberof query.AggregateStats + * @memberof query.MessageStreamRequest * @static - * @param {query.IAggregateStats} message AggregateStats message or plain object to encode + * @param {query.IMessageStreamRequest} message MessageStreamRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - AggregateStats.encodeDelimited = function encodeDelimited(message, writer) { + MessageStreamRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an AggregateStats message from the specified reader or buffer. + * Decodes a MessageStreamRequest message from the specified reader or buffer. * @function decode - * @memberof query.AggregateStats + * @memberof query.MessageStreamRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.AggregateStats} AggregateStats + * @returns {query.MessageStreamRequest} MessageStreamRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - AggregateStats.decode = function decode(reader, length) { + MessageStreamRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.AggregateStats(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.MessageStreamRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.healthy_tablet_count = reader.int32(); + message.effective_caller_id = $root.vtrpc.CallerID.decode(reader, reader.uint32()); break; } case 2: { - message.unhealthy_tablet_count = reader.int32(); + message.immediate_caller_id = $root.query.VTGateCallerID.decode(reader, reader.uint32()); break; } case 3: { - message.replication_lag_seconds_min = reader.uint32(); + message.target = $root.query.Target.decode(reader, reader.uint32()); break; } case 4: { - message.replication_lag_seconds_max = reader.uint32(); + message.name = reader.string(); break; } default: @@ -90069,151 +90901,162 @@ export const query = $root.query = (() => { }; /** - * Decodes an AggregateStats message from the specified reader or buffer, length delimited. + * Decodes a MessageStreamRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.AggregateStats + * @memberof query.MessageStreamRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.AggregateStats} AggregateStats + * @returns {query.MessageStreamRequest} MessageStreamRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - AggregateStats.decodeDelimited = function decodeDelimited(reader) { + MessageStreamRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an AggregateStats message. + * Verifies a MessageStreamRequest message. * @function verify - * @memberof query.AggregateStats + * @memberof query.MessageStreamRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - AggregateStats.verify = function verify(message) { + MessageStreamRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.healthy_tablet_count != null && message.hasOwnProperty("healthy_tablet_count")) - if (!$util.isInteger(message.healthy_tablet_count)) - return "healthy_tablet_count: integer expected"; - if (message.unhealthy_tablet_count != null && message.hasOwnProperty("unhealthy_tablet_count")) - if (!$util.isInteger(message.unhealthy_tablet_count)) - return "unhealthy_tablet_count: integer expected"; - if (message.replication_lag_seconds_min != null && message.hasOwnProperty("replication_lag_seconds_min")) - if (!$util.isInteger(message.replication_lag_seconds_min)) - return "replication_lag_seconds_min: integer expected"; - if (message.replication_lag_seconds_max != null && message.hasOwnProperty("replication_lag_seconds_max")) - if (!$util.isInteger(message.replication_lag_seconds_max)) - return "replication_lag_seconds_max: integer expected"; + if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { + let error = $root.vtrpc.CallerID.verify(message.effective_caller_id); + if (error) + return "effective_caller_id." + error; + } + if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) { + let error = $root.query.VTGateCallerID.verify(message.immediate_caller_id); + if (error) + return "immediate_caller_id." + error; + } + if (message.target != null && message.hasOwnProperty("target")) { + let error = $root.query.Target.verify(message.target); + if (error) + return "target." + error; + } + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; return null; }; /** - * Creates an AggregateStats message from a plain object. Also converts values to their respective internal types. + * Creates a MessageStreamRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.AggregateStats + * @memberof query.MessageStreamRequest * @static * @param {Object.} object Plain object - * @returns {query.AggregateStats} AggregateStats + * @returns {query.MessageStreamRequest} MessageStreamRequest */ - AggregateStats.fromObject = function fromObject(object) { - if (object instanceof $root.query.AggregateStats) + MessageStreamRequest.fromObject = function fromObject(object) { + if (object instanceof $root.query.MessageStreamRequest) return object; - let message = new $root.query.AggregateStats(); - if (object.healthy_tablet_count != null) - message.healthy_tablet_count = object.healthy_tablet_count | 0; - if (object.unhealthy_tablet_count != null) - message.unhealthy_tablet_count = object.unhealthy_tablet_count | 0; - if (object.replication_lag_seconds_min != null) - message.replication_lag_seconds_min = object.replication_lag_seconds_min >>> 0; - if (object.replication_lag_seconds_max != null) - message.replication_lag_seconds_max = object.replication_lag_seconds_max >>> 0; + let message = new $root.query.MessageStreamRequest(); + if (object.effective_caller_id != null) { + if (typeof object.effective_caller_id !== "object") + throw TypeError(".query.MessageStreamRequest.effective_caller_id: object expected"); + message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); + } + if (object.immediate_caller_id != null) { + if (typeof object.immediate_caller_id !== "object") + throw TypeError(".query.MessageStreamRequest.immediate_caller_id: object expected"); + message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); + } + if (object.target != null) { + if (typeof object.target !== "object") + throw TypeError(".query.MessageStreamRequest.target: object expected"); + message.target = $root.query.Target.fromObject(object.target); + } + if (object.name != null) + message.name = String(object.name); return message; }; /** - * Creates a plain object from an AggregateStats message. Also converts values to other types if specified. + * Creates a plain object from a MessageStreamRequest message. Also converts values to other types if specified. * @function toObject - * @memberof query.AggregateStats + * @memberof query.MessageStreamRequest * @static - * @param {query.AggregateStats} message AggregateStats + * @param {query.MessageStreamRequest} message MessageStreamRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - AggregateStats.toObject = function toObject(message, options) { + MessageStreamRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) { - object.healthy_tablet_count = 0; - object.unhealthy_tablet_count = 0; - object.replication_lag_seconds_min = 0; - object.replication_lag_seconds_max = 0; + object.effective_caller_id = null; + object.immediate_caller_id = null; + object.target = null; + object.name = ""; } - if (message.healthy_tablet_count != null && message.hasOwnProperty("healthy_tablet_count")) - object.healthy_tablet_count = message.healthy_tablet_count; - if (message.unhealthy_tablet_count != null && message.hasOwnProperty("unhealthy_tablet_count")) - object.unhealthy_tablet_count = message.unhealthy_tablet_count; - if (message.replication_lag_seconds_min != null && message.hasOwnProperty("replication_lag_seconds_min")) - object.replication_lag_seconds_min = message.replication_lag_seconds_min; - if (message.replication_lag_seconds_max != null && message.hasOwnProperty("replication_lag_seconds_max")) - object.replication_lag_seconds_max = message.replication_lag_seconds_max; + if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) + object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); + if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) + object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); + if (message.target != null && message.hasOwnProperty("target")) + object.target = $root.query.Target.toObject(message.target, options); + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; return object; }; /** - * Converts this AggregateStats to JSON. + * Converts this MessageStreamRequest to JSON. * @function toJSON - * @memberof query.AggregateStats + * @memberof query.MessageStreamRequest * @instance * @returns {Object.} JSON object */ - AggregateStats.prototype.toJSON = function toJSON() { + MessageStreamRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for AggregateStats + * Gets the default type url for MessageStreamRequest * @function getTypeUrl - * @memberof query.AggregateStats + * @memberof query.MessageStreamRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - AggregateStats.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + MessageStreamRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.AggregateStats"; + return typeUrlPrefix + "/query.MessageStreamRequest"; }; - return AggregateStats; + return MessageStreamRequest; })(); - query.StreamHealthResponse = (function() { + query.MessageStreamResponse = (function() { /** - * Properties of a StreamHealthResponse. + * Properties of a MessageStreamResponse. * @memberof query - * @interface IStreamHealthResponse - * @property {query.ITarget|null} [target] StreamHealthResponse target - * @property {boolean|null} [serving] StreamHealthResponse serving - * @property {number|Long|null} [tablet_externally_reparented_timestamp] StreamHealthResponse tablet_externally_reparented_timestamp - * @property {query.IRealtimeStats|null} [realtime_stats] StreamHealthResponse realtime_stats - * @property {topodata.ITabletAlias|null} [tablet_alias] StreamHealthResponse tablet_alias + * @interface IMessageStreamResponse + * @property {query.IQueryResult|null} [result] MessageStreamResponse result */ /** - * Constructs a new StreamHealthResponse. + * Constructs a new MessageStreamResponse. * @memberof query - * @classdesc Represents a StreamHealthResponse. - * @implements IStreamHealthResponse + * @classdesc Represents a MessageStreamResponse. + * @implements IMessageStreamResponse * @constructor - * @param {query.IStreamHealthResponse=} [properties] Properties to set + * @param {query.IMessageStreamResponse=} [properties] Properties to set */ - function StreamHealthResponse(properties) { + function MessageStreamResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -90221,131 +91064,75 @@ export const query = $root.query = (() => { } /** - * StreamHealthResponse target. - * @member {query.ITarget|null|undefined} target - * @memberof query.StreamHealthResponse - * @instance - */ - StreamHealthResponse.prototype.target = null; - - /** - * StreamHealthResponse serving. - * @member {boolean} serving - * @memberof query.StreamHealthResponse - * @instance - */ - StreamHealthResponse.prototype.serving = false; - - /** - * StreamHealthResponse tablet_externally_reparented_timestamp. - * @member {number|Long} tablet_externally_reparented_timestamp - * @memberof query.StreamHealthResponse - * @instance - */ - StreamHealthResponse.prototype.tablet_externally_reparented_timestamp = $util.Long ? $util.Long.fromBits(0,0,false) : 0; - - /** - * StreamHealthResponse realtime_stats. - * @member {query.IRealtimeStats|null|undefined} realtime_stats - * @memberof query.StreamHealthResponse - * @instance - */ - StreamHealthResponse.prototype.realtime_stats = null; - - /** - * StreamHealthResponse tablet_alias. - * @member {topodata.ITabletAlias|null|undefined} tablet_alias - * @memberof query.StreamHealthResponse + * MessageStreamResponse result. + * @member {query.IQueryResult|null|undefined} result + * @memberof query.MessageStreamResponse * @instance */ - StreamHealthResponse.prototype.tablet_alias = null; + MessageStreamResponse.prototype.result = null; /** - * Creates a new StreamHealthResponse instance using the specified properties. + * Creates a new MessageStreamResponse instance using the specified properties. * @function create - * @memberof query.StreamHealthResponse + * @memberof query.MessageStreamResponse * @static - * @param {query.IStreamHealthResponse=} [properties] Properties to set - * @returns {query.StreamHealthResponse} StreamHealthResponse instance + * @param {query.IMessageStreamResponse=} [properties] Properties to set + * @returns {query.MessageStreamResponse} MessageStreamResponse instance */ - StreamHealthResponse.create = function create(properties) { - return new StreamHealthResponse(properties); + MessageStreamResponse.create = function create(properties) { + return new MessageStreamResponse(properties); }; /** - * Encodes the specified StreamHealthResponse message. Does not implicitly {@link query.StreamHealthResponse.verify|verify} messages. + * Encodes the specified MessageStreamResponse message. Does not implicitly {@link query.MessageStreamResponse.verify|verify} messages. * @function encode - * @memberof query.StreamHealthResponse + * @memberof query.MessageStreamResponse * @static - * @param {query.IStreamHealthResponse} message StreamHealthResponse message or plain object to encode + * @param {query.IMessageStreamResponse} message MessageStreamResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - StreamHealthResponse.encode = function encode(message, writer) { + MessageStreamResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.target != null && Object.hasOwnProperty.call(message, "target")) - $root.query.Target.encode(message.target, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.serving != null && Object.hasOwnProperty.call(message, "serving")) - writer.uint32(/* id 2, wireType 0 =*/16).bool(message.serving); - if (message.tablet_externally_reparented_timestamp != null && Object.hasOwnProperty.call(message, "tablet_externally_reparented_timestamp")) - writer.uint32(/* id 3, wireType 0 =*/24).int64(message.tablet_externally_reparented_timestamp); - if (message.realtime_stats != null && Object.hasOwnProperty.call(message, "realtime_stats")) - $root.query.RealtimeStats.encode(message.realtime_stats, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); - if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) - $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); + if (message.result != null && Object.hasOwnProperty.call(message, "result")) + $root.query.QueryResult.encode(message.result, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified StreamHealthResponse message, length delimited. Does not implicitly {@link query.StreamHealthResponse.verify|verify} messages. + * Encodes the specified MessageStreamResponse message, length delimited. Does not implicitly {@link query.MessageStreamResponse.verify|verify} messages. * @function encodeDelimited - * @memberof query.StreamHealthResponse + * @memberof query.MessageStreamResponse * @static - * @param {query.IStreamHealthResponse} message StreamHealthResponse message or plain object to encode + * @param {query.IMessageStreamResponse} message MessageStreamResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - StreamHealthResponse.encodeDelimited = function encodeDelimited(message, writer) { + MessageStreamResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a StreamHealthResponse message from the specified reader or buffer. + * Decodes a MessageStreamResponse message from the specified reader or buffer. * @function decode - * @memberof query.StreamHealthResponse + * @memberof query.MessageStreamResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.StreamHealthResponse} StreamHealthResponse + * @returns {query.MessageStreamResponse} MessageStreamResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StreamHealthResponse.decode = function decode(reader, length) { + MessageStreamResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.StreamHealthResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.MessageStreamResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.target = $root.query.Target.decode(reader, reader.uint32()); - break; - } - case 2: { - message.serving = reader.bool(); - break; - } - case 3: { - message.tablet_externally_reparented_timestamp = reader.int64(); - break; - } - case 4: { - message.realtime_stats = $root.query.RealtimeStats.decode(reader, reader.uint32()); - break; - } - case 5: { - message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + message.result = $root.query.QueryResult.decode(reader, reader.uint32()); break; } default: @@ -90357,206 +91144,132 @@ export const query = $root.query = (() => { }; /** - * Decodes a StreamHealthResponse message from the specified reader or buffer, length delimited. + * Decodes a MessageStreamResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.StreamHealthResponse + * @memberof query.MessageStreamResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.StreamHealthResponse} StreamHealthResponse + * @returns {query.MessageStreamResponse} MessageStreamResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StreamHealthResponse.decodeDelimited = function decodeDelimited(reader) { + MessageStreamResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a StreamHealthResponse message. + * Verifies a MessageStreamResponse message. * @function verify - * @memberof query.StreamHealthResponse + * @memberof query.MessageStreamResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - StreamHealthResponse.verify = function verify(message) { + MessageStreamResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.target != null && message.hasOwnProperty("target")) { - let error = $root.query.Target.verify(message.target); - if (error) - return "target." + error; - } - if (message.serving != null && message.hasOwnProperty("serving")) - if (typeof message.serving !== "boolean") - return "serving: boolean expected"; - if (message.tablet_externally_reparented_timestamp != null && message.hasOwnProperty("tablet_externally_reparented_timestamp")) - if (!$util.isInteger(message.tablet_externally_reparented_timestamp) && !(message.tablet_externally_reparented_timestamp && $util.isInteger(message.tablet_externally_reparented_timestamp.low) && $util.isInteger(message.tablet_externally_reparented_timestamp.high))) - return "tablet_externally_reparented_timestamp: integer|Long expected"; - if (message.realtime_stats != null && message.hasOwnProperty("realtime_stats")) { - let error = $root.query.RealtimeStats.verify(message.realtime_stats); - if (error) - return "realtime_stats." + error; - } - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { - let error = $root.topodata.TabletAlias.verify(message.tablet_alias); + if (message.result != null && message.hasOwnProperty("result")) { + let error = $root.query.QueryResult.verify(message.result); if (error) - return "tablet_alias." + error; + return "result." + error; } return null; }; /** - * Creates a StreamHealthResponse message from a plain object. Also converts values to their respective internal types. + * Creates a MessageStreamResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.StreamHealthResponse + * @memberof query.MessageStreamResponse * @static * @param {Object.} object Plain object - * @returns {query.StreamHealthResponse} StreamHealthResponse + * @returns {query.MessageStreamResponse} MessageStreamResponse */ - StreamHealthResponse.fromObject = function fromObject(object) { - if (object instanceof $root.query.StreamHealthResponse) + MessageStreamResponse.fromObject = function fromObject(object) { + if (object instanceof $root.query.MessageStreamResponse) return object; - let message = new $root.query.StreamHealthResponse(); - if (object.target != null) { - if (typeof object.target !== "object") - throw TypeError(".query.StreamHealthResponse.target: object expected"); - message.target = $root.query.Target.fromObject(object.target); - } - if (object.serving != null) - message.serving = Boolean(object.serving); - if (object.tablet_externally_reparented_timestamp != null) - if ($util.Long) - (message.tablet_externally_reparented_timestamp = $util.Long.fromValue(object.tablet_externally_reparented_timestamp)).unsigned = false; - else if (typeof object.tablet_externally_reparented_timestamp === "string") - message.tablet_externally_reparented_timestamp = parseInt(object.tablet_externally_reparented_timestamp, 10); - else if (typeof object.tablet_externally_reparented_timestamp === "number") - message.tablet_externally_reparented_timestamp = object.tablet_externally_reparented_timestamp; - else if (typeof object.tablet_externally_reparented_timestamp === "object") - message.tablet_externally_reparented_timestamp = new $util.LongBits(object.tablet_externally_reparented_timestamp.low >>> 0, object.tablet_externally_reparented_timestamp.high >>> 0).toNumber(); - if (object.realtime_stats != null) { - if (typeof object.realtime_stats !== "object") - throw TypeError(".query.StreamHealthResponse.realtime_stats: object expected"); - message.realtime_stats = $root.query.RealtimeStats.fromObject(object.realtime_stats); - } - if (object.tablet_alias != null) { - if (typeof object.tablet_alias !== "object") - throw TypeError(".query.StreamHealthResponse.tablet_alias: object expected"); - message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); + let message = new $root.query.MessageStreamResponse(); + if (object.result != null) { + if (typeof object.result !== "object") + throw TypeError(".query.MessageStreamResponse.result: object expected"); + message.result = $root.query.QueryResult.fromObject(object.result); } return message; }; /** - * Creates a plain object from a StreamHealthResponse message. Also converts values to other types if specified. + * Creates a plain object from a MessageStreamResponse message. Also converts values to other types if specified. * @function toObject - * @memberof query.StreamHealthResponse + * @memberof query.MessageStreamResponse * @static - * @param {query.StreamHealthResponse} message StreamHealthResponse + * @param {query.MessageStreamResponse} message MessageStreamResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - StreamHealthResponse.toObject = function toObject(message, options) { + MessageStreamResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) { - object.target = null; - object.serving = false; - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.tablet_externally_reparented_timestamp = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.tablet_externally_reparented_timestamp = options.longs === String ? "0" : 0; - object.realtime_stats = null; - object.tablet_alias = null; - } - if (message.target != null && message.hasOwnProperty("target")) - object.target = $root.query.Target.toObject(message.target, options); - if (message.serving != null && message.hasOwnProperty("serving")) - object.serving = message.serving; - if (message.tablet_externally_reparented_timestamp != null && message.hasOwnProperty("tablet_externally_reparented_timestamp")) - if (typeof message.tablet_externally_reparented_timestamp === "number") - object.tablet_externally_reparented_timestamp = options.longs === String ? String(message.tablet_externally_reparented_timestamp) : message.tablet_externally_reparented_timestamp; - else - object.tablet_externally_reparented_timestamp = options.longs === String ? $util.Long.prototype.toString.call(message.tablet_externally_reparented_timestamp) : options.longs === Number ? new $util.LongBits(message.tablet_externally_reparented_timestamp.low >>> 0, message.tablet_externally_reparented_timestamp.high >>> 0).toNumber() : message.tablet_externally_reparented_timestamp; - if (message.realtime_stats != null && message.hasOwnProperty("realtime_stats")) - object.realtime_stats = $root.query.RealtimeStats.toObject(message.realtime_stats, options); - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) - object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); + if (options.defaults) + object.result = null; + if (message.result != null && message.hasOwnProperty("result")) + object.result = $root.query.QueryResult.toObject(message.result, options); return object; }; /** - * Converts this StreamHealthResponse to JSON. + * Converts this MessageStreamResponse to JSON. * @function toJSON - * @memberof query.StreamHealthResponse + * @memberof query.MessageStreamResponse * @instance * @returns {Object.} JSON object */ - StreamHealthResponse.prototype.toJSON = function toJSON() { + MessageStreamResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for StreamHealthResponse + * Gets the default type url for MessageStreamResponse * @function getTypeUrl - * @memberof query.StreamHealthResponse + * @memberof query.MessageStreamResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - StreamHealthResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + MessageStreamResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.StreamHealthResponse"; + return typeUrlPrefix + "/query.MessageStreamResponse"; }; - return StreamHealthResponse; - })(); - - /** - * TransactionState enum. - * @name query.TransactionState - * @enum {number} - * @property {number} UNKNOWN=0 UNKNOWN value - * @property {number} PREPARE=1 PREPARE value - * @property {number} COMMIT=2 COMMIT value - * @property {number} ROLLBACK=3 ROLLBACK value - */ - query.TransactionState = (function() { - const valuesById = {}, values = Object.create(valuesById); - values[valuesById[0] = "UNKNOWN"] = 0; - values[valuesById[1] = "PREPARE"] = 1; - values[valuesById[2] = "COMMIT"] = 2; - values[valuesById[3] = "ROLLBACK"] = 3; - return values; + return MessageStreamResponse; })(); - query.TransactionMetadata = (function() { + query.MessageAckRequest = (function() { /** - * Properties of a TransactionMetadata. + * Properties of a MessageAckRequest. * @memberof query - * @interface ITransactionMetadata - * @property {string|null} [dtid] TransactionMetadata dtid - * @property {query.TransactionState|null} [state] TransactionMetadata state - * @property {number|Long|null} [time_created] TransactionMetadata time_created - * @property {Array.|null} [participants] TransactionMetadata participants + * @interface IMessageAckRequest + * @property {vtrpc.ICallerID|null} [effective_caller_id] MessageAckRequest effective_caller_id + * @property {query.IVTGateCallerID|null} [immediate_caller_id] MessageAckRequest immediate_caller_id + * @property {query.ITarget|null} [target] MessageAckRequest target + * @property {string|null} [name] MessageAckRequest name + * @property {Array.|null} [ids] MessageAckRequest ids */ /** - * Constructs a new TransactionMetadata. + * Constructs a new MessageAckRequest. * @memberof query - * @classdesc Represents a TransactionMetadata. - * @implements ITransactionMetadata + * @classdesc Represents a MessageAckRequest. + * @implements IMessageAckRequest * @constructor - * @param {query.ITransactionMetadata=} [properties] Properties to set + * @param {query.IMessageAckRequest=} [properties] Properties to set */ - function TransactionMetadata(properties) { - this.participants = []; + function MessageAckRequest(properties) { + this.ids = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -90564,120 +91277,134 @@ export const query = $root.query = (() => { } /** - * TransactionMetadata dtid. - * @member {string} dtid - * @memberof query.TransactionMetadata + * MessageAckRequest effective_caller_id. + * @member {vtrpc.ICallerID|null|undefined} effective_caller_id + * @memberof query.MessageAckRequest * @instance */ - TransactionMetadata.prototype.dtid = ""; + MessageAckRequest.prototype.effective_caller_id = null; /** - * TransactionMetadata state. - * @member {query.TransactionState} state - * @memberof query.TransactionMetadata + * MessageAckRequest immediate_caller_id. + * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id + * @memberof query.MessageAckRequest * @instance */ - TransactionMetadata.prototype.state = 0; + MessageAckRequest.prototype.immediate_caller_id = null; /** - * TransactionMetadata time_created. - * @member {number|Long} time_created - * @memberof query.TransactionMetadata + * MessageAckRequest target. + * @member {query.ITarget|null|undefined} target + * @memberof query.MessageAckRequest * @instance */ - TransactionMetadata.prototype.time_created = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + MessageAckRequest.prototype.target = null; /** - * TransactionMetadata participants. - * @member {Array.} participants - * @memberof query.TransactionMetadata + * MessageAckRequest name. + * @member {string} name + * @memberof query.MessageAckRequest * @instance */ - TransactionMetadata.prototype.participants = $util.emptyArray; + MessageAckRequest.prototype.name = ""; /** - * Creates a new TransactionMetadata instance using the specified properties. + * MessageAckRequest ids. + * @member {Array.} ids + * @memberof query.MessageAckRequest + * @instance + */ + MessageAckRequest.prototype.ids = $util.emptyArray; + + /** + * Creates a new MessageAckRequest instance using the specified properties. * @function create - * @memberof query.TransactionMetadata + * @memberof query.MessageAckRequest * @static - * @param {query.ITransactionMetadata=} [properties] Properties to set - * @returns {query.TransactionMetadata} TransactionMetadata instance + * @param {query.IMessageAckRequest=} [properties] Properties to set + * @returns {query.MessageAckRequest} MessageAckRequest instance */ - TransactionMetadata.create = function create(properties) { - return new TransactionMetadata(properties); + MessageAckRequest.create = function create(properties) { + return new MessageAckRequest(properties); }; /** - * Encodes the specified TransactionMetadata message. Does not implicitly {@link query.TransactionMetadata.verify|verify} messages. + * Encodes the specified MessageAckRequest message. Does not implicitly {@link query.MessageAckRequest.verify|verify} messages. * @function encode - * @memberof query.TransactionMetadata + * @memberof query.MessageAckRequest * @static - * @param {query.ITransactionMetadata} message TransactionMetadata message or plain object to encode + * @param {query.IMessageAckRequest} message MessageAckRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - TransactionMetadata.encode = function encode(message, writer) { + MessageAckRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.dtid != null && Object.hasOwnProperty.call(message, "dtid")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.dtid); - if (message.state != null && Object.hasOwnProperty.call(message, "state")) - writer.uint32(/* id 2, wireType 0 =*/16).int32(message.state); - if (message.time_created != null && Object.hasOwnProperty.call(message, "time_created")) - writer.uint32(/* id 3, wireType 0 =*/24).int64(message.time_created); - if (message.participants != null && message.participants.length) - for (let i = 0; i < message.participants.length; ++i) - $root.query.Target.encode(message.participants[i], writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) + $root.vtrpc.CallerID.encode(message.effective_caller_id, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.immediate_caller_id != null && Object.hasOwnProperty.call(message, "immediate_caller_id")) + $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.target != null && Object.hasOwnProperty.call(message, "target")) + $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.name != null && Object.hasOwnProperty.call(message, "name")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.name); + if (message.ids != null && message.ids.length) + for (let i = 0; i < message.ids.length; ++i) + $root.query.Value.encode(message.ids[i], writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); return writer; }; /** - * Encodes the specified TransactionMetadata message, length delimited. Does not implicitly {@link query.TransactionMetadata.verify|verify} messages. + * Encodes the specified MessageAckRequest message, length delimited. Does not implicitly {@link query.MessageAckRequest.verify|verify} messages. * @function encodeDelimited - * @memberof query.TransactionMetadata + * @memberof query.MessageAckRequest * @static - * @param {query.ITransactionMetadata} message TransactionMetadata message or plain object to encode + * @param {query.IMessageAckRequest} message MessageAckRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - TransactionMetadata.encodeDelimited = function encodeDelimited(message, writer) { + MessageAckRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a TransactionMetadata message from the specified reader or buffer. + * Decodes a MessageAckRequest message from the specified reader or buffer. * @function decode - * @memberof query.TransactionMetadata + * @memberof query.MessageAckRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.TransactionMetadata} TransactionMetadata + * @returns {query.MessageAckRequest} MessageAckRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - TransactionMetadata.decode = function decode(reader, length) { + MessageAckRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.TransactionMetadata(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.MessageAckRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.dtid = reader.string(); + message.effective_caller_id = $root.vtrpc.CallerID.decode(reader, reader.uint32()); break; } case 2: { - message.state = reader.int32(); + message.immediate_caller_id = $root.query.VTGateCallerID.decode(reader, reader.uint32()); break; } case 3: { - message.time_created = reader.int64(); + message.target = $root.query.Target.decode(reader, reader.uint32()); break; } case 4: { - if (!(message.participants && message.participants.length)) - message.participants = []; - message.participants.push($root.query.Target.decode(reader, reader.uint32())); + message.name = reader.string(); + break; + } + case 5: { + if (!(message.ids && message.ids.length)) + message.ids = []; + message.ids.push($root.query.Value.decode(reader, reader.uint32())); break; } default: @@ -90689,227 +91416,188 @@ export const query = $root.query = (() => { }; /** - * Decodes a TransactionMetadata message from the specified reader or buffer, length delimited. + * Decodes a MessageAckRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.TransactionMetadata + * @memberof query.MessageAckRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.TransactionMetadata} TransactionMetadata + * @returns {query.MessageAckRequest} MessageAckRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - TransactionMetadata.decodeDelimited = function decodeDelimited(reader) { + MessageAckRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a TransactionMetadata message. + * Verifies a MessageAckRequest message. * @function verify - * @memberof query.TransactionMetadata + * @memberof query.MessageAckRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - TransactionMetadata.verify = function verify(message) { + MessageAckRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.dtid != null && message.hasOwnProperty("dtid")) - if (!$util.isString(message.dtid)) - return "dtid: string expected"; - if (message.state != null && message.hasOwnProperty("state")) - switch (message.state) { - default: - return "state: enum value expected"; - case 0: - case 1: - case 2: - case 3: - break; - } - if (message.time_created != null && message.hasOwnProperty("time_created")) - if (!$util.isInteger(message.time_created) && !(message.time_created && $util.isInteger(message.time_created.low) && $util.isInteger(message.time_created.high))) - return "time_created: integer|Long expected"; - if (message.participants != null && message.hasOwnProperty("participants")) { - if (!Array.isArray(message.participants)) - return "participants: array expected"; - for (let i = 0; i < message.participants.length; ++i) { - let error = $root.query.Target.verify(message.participants[i]); + if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { + let error = $root.vtrpc.CallerID.verify(message.effective_caller_id); + if (error) + return "effective_caller_id." + error; + } + if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) { + let error = $root.query.VTGateCallerID.verify(message.immediate_caller_id); + if (error) + return "immediate_caller_id." + error; + } + if (message.target != null && message.hasOwnProperty("target")) { + let error = $root.query.Target.verify(message.target); + if (error) + return "target." + error; + } + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; + if (message.ids != null && message.hasOwnProperty("ids")) { + if (!Array.isArray(message.ids)) + return "ids: array expected"; + for (let i = 0; i < message.ids.length; ++i) { + let error = $root.query.Value.verify(message.ids[i]); if (error) - return "participants." + error; + return "ids." + error; } } return null; }; /** - * Creates a TransactionMetadata message from a plain object. Also converts values to their respective internal types. + * Creates a MessageAckRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.TransactionMetadata + * @memberof query.MessageAckRequest * @static * @param {Object.} object Plain object - * @returns {query.TransactionMetadata} TransactionMetadata + * @returns {query.MessageAckRequest} MessageAckRequest */ - TransactionMetadata.fromObject = function fromObject(object) { - if (object instanceof $root.query.TransactionMetadata) + MessageAckRequest.fromObject = function fromObject(object) { + if (object instanceof $root.query.MessageAckRequest) return object; - let message = new $root.query.TransactionMetadata(); - if (object.dtid != null) - message.dtid = String(object.dtid); - switch (object.state) { - default: - if (typeof object.state === "number") { - message.state = object.state; - break; - } - break; - case "UNKNOWN": - case 0: - message.state = 0; - break; - case "PREPARE": - case 1: - message.state = 1; - break; - case "COMMIT": - case 2: - message.state = 2; - break; - case "ROLLBACK": - case 3: - message.state = 3; - break; + let message = new $root.query.MessageAckRequest(); + if (object.effective_caller_id != null) { + if (typeof object.effective_caller_id !== "object") + throw TypeError(".query.MessageAckRequest.effective_caller_id: object expected"); + message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); } - if (object.time_created != null) - if ($util.Long) - (message.time_created = $util.Long.fromValue(object.time_created)).unsigned = false; - else if (typeof object.time_created === "string") - message.time_created = parseInt(object.time_created, 10); - else if (typeof object.time_created === "number") - message.time_created = object.time_created; - else if (typeof object.time_created === "object") - message.time_created = new $util.LongBits(object.time_created.low >>> 0, object.time_created.high >>> 0).toNumber(); - if (object.participants) { - if (!Array.isArray(object.participants)) - throw TypeError(".query.TransactionMetadata.participants: array expected"); - message.participants = []; - for (let i = 0; i < object.participants.length; ++i) { - if (typeof object.participants[i] !== "object") - throw TypeError(".query.TransactionMetadata.participants: object expected"); - message.participants[i] = $root.query.Target.fromObject(object.participants[i]); + if (object.immediate_caller_id != null) { + if (typeof object.immediate_caller_id !== "object") + throw TypeError(".query.MessageAckRequest.immediate_caller_id: object expected"); + message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); + } + if (object.target != null) { + if (typeof object.target !== "object") + throw TypeError(".query.MessageAckRequest.target: object expected"); + message.target = $root.query.Target.fromObject(object.target); + } + if (object.name != null) + message.name = String(object.name); + if (object.ids) { + if (!Array.isArray(object.ids)) + throw TypeError(".query.MessageAckRequest.ids: array expected"); + message.ids = []; + for (let i = 0; i < object.ids.length; ++i) { + if (typeof object.ids[i] !== "object") + throw TypeError(".query.MessageAckRequest.ids: object expected"); + message.ids[i] = $root.query.Value.fromObject(object.ids[i]); } } return message; }; /** - * Creates a plain object from a TransactionMetadata message. Also converts values to other types if specified. + * Creates a plain object from a MessageAckRequest message. Also converts values to other types if specified. * @function toObject - * @memberof query.TransactionMetadata + * @memberof query.MessageAckRequest * @static - * @param {query.TransactionMetadata} message TransactionMetadata + * @param {query.MessageAckRequest} message MessageAckRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - TransactionMetadata.toObject = function toObject(message, options) { + MessageAckRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.arrays || options.defaults) - object.participants = []; + object.ids = []; if (options.defaults) { - object.dtid = ""; - object.state = options.enums === String ? "UNKNOWN" : 0; - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.time_created = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.time_created = options.longs === String ? "0" : 0; + object.effective_caller_id = null; + object.immediate_caller_id = null; + object.target = null; + object.name = ""; } - if (message.dtid != null && message.hasOwnProperty("dtid")) - object.dtid = message.dtid; - if (message.state != null && message.hasOwnProperty("state")) - object.state = options.enums === String ? $root.query.TransactionState[message.state] === undefined ? message.state : $root.query.TransactionState[message.state] : message.state; - if (message.time_created != null && message.hasOwnProperty("time_created")) - if (typeof message.time_created === "number") - object.time_created = options.longs === String ? String(message.time_created) : message.time_created; - else - object.time_created = options.longs === String ? $util.Long.prototype.toString.call(message.time_created) : options.longs === Number ? new $util.LongBits(message.time_created.low >>> 0, message.time_created.high >>> 0).toNumber() : message.time_created; - if (message.participants && message.participants.length) { - object.participants = []; - for (let j = 0; j < message.participants.length; ++j) - object.participants[j] = $root.query.Target.toObject(message.participants[j], options); + if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) + object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); + if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) + object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); + if (message.target != null && message.hasOwnProperty("target")) + object.target = $root.query.Target.toObject(message.target, options); + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; + if (message.ids && message.ids.length) { + object.ids = []; + for (let j = 0; j < message.ids.length; ++j) + object.ids[j] = $root.query.Value.toObject(message.ids[j], options); } return object; }; /** - * Converts this TransactionMetadata to JSON. + * Converts this MessageAckRequest to JSON. * @function toJSON - * @memberof query.TransactionMetadata + * @memberof query.MessageAckRequest * @instance * @returns {Object.} JSON object */ - TransactionMetadata.prototype.toJSON = function toJSON() { + MessageAckRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for TransactionMetadata + * Gets the default type url for MessageAckRequest * @function getTypeUrl - * @memberof query.TransactionMetadata + * @memberof query.MessageAckRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - TransactionMetadata.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + MessageAckRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.TransactionMetadata"; + return typeUrlPrefix + "/query.MessageAckRequest"; }; - return TransactionMetadata; - })(); - - /** - * SchemaTableType enum. - * @name query.SchemaTableType - * @enum {number} - * @property {number} VIEWS=0 VIEWS value - * @property {number} TABLES=1 TABLES value - * @property {number} ALL=2 ALL value - */ - query.SchemaTableType = (function() { - const valuesById = {}, values = Object.create(valuesById); - values[valuesById[0] = "VIEWS"] = 0; - values[valuesById[1] = "TABLES"] = 1; - values[valuesById[2] = "ALL"] = 2; - return values; + return MessageAckRequest; })(); - query.GetSchemaRequest = (function() { + query.MessageAckResponse = (function() { /** - * Properties of a GetSchemaRequest. + * Properties of a MessageAckResponse. * @memberof query - * @interface IGetSchemaRequest - * @property {query.ITarget|null} [target] GetSchemaRequest target - * @property {query.SchemaTableType|null} [table_type] GetSchemaRequest table_type - * @property {Array.|null} [table_names] GetSchemaRequest table_names + * @interface IMessageAckResponse + * @property {query.IQueryResult|null} [result] MessageAckResponse result */ /** - * Constructs a new GetSchemaRequest. + * Constructs a new MessageAckResponse. * @memberof query - * @classdesc Represents a GetSchemaRequest. - * @implements IGetSchemaRequest + * @classdesc Represents a MessageAckResponse. + * @implements IMessageAckResponse * @constructor - * @param {query.IGetSchemaRequest=} [properties] Properties to set + * @param {query.IMessageAckResponse=} [properties] Properties to set */ - function GetSchemaRequest(properties) { - this.table_names = []; + function MessageAckResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -90917,106 +91605,75 @@ export const query = $root.query = (() => { } /** - * GetSchemaRequest target. - * @member {query.ITarget|null|undefined} target - * @memberof query.GetSchemaRequest - * @instance - */ - GetSchemaRequest.prototype.target = null; - - /** - * GetSchemaRequest table_type. - * @member {query.SchemaTableType} table_type - * @memberof query.GetSchemaRequest - * @instance - */ - GetSchemaRequest.prototype.table_type = 0; - - /** - * GetSchemaRequest table_names. - * @member {Array.} table_names - * @memberof query.GetSchemaRequest + * MessageAckResponse result. + * @member {query.IQueryResult|null|undefined} result + * @memberof query.MessageAckResponse * @instance */ - GetSchemaRequest.prototype.table_names = $util.emptyArray; + MessageAckResponse.prototype.result = null; /** - * Creates a new GetSchemaRequest instance using the specified properties. + * Creates a new MessageAckResponse instance using the specified properties. * @function create - * @memberof query.GetSchemaRequest + * @memberof query.MessageAckResponse * @static - * @param {query.IGetSchemaRequest=} [properties] Properties to set - * @returns {query.GetSchemaRequest} GetSchemaRequest instance + * @param {query.IMessageAckResponse=} [properties] Properties to set + * @returns {query.MessageAckResponse} MessageAckResponse instance */ - GetSchemaRequest.create = function create(properties) { - return new GetSchemaRequest(properties); + MessageAckResponse.create = function create(properties) { + return new MessageAckResponse(properties); }; /** - * Encodes the specified GetSchemaRequest message. Does not implicitly {@link query.GetSchemaRequest.verify|verify} messages. + * Encodes the specified MessageAckResponse message. Does not implicitly {@link query.MessageAckResponse.verify|verify} messages. * @function encode - * @memberof query.GetSchemaRequest + * @memberof query.MessageAckResponse * @static - * @param {query.IGetSchemaRequest} message GetSchemaRequest message or plain object to encode + * @param {query.IMessageAckResponse} message MessageAckResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetSchemaRequest.encode = function encode(message, writer) { + MessageAckResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.target != null && Object.hasOwnProperty.call(message, "target")) - $root.query.Target.encode(message.target, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.table_type != null && Object.hasOwnProperty.call(message, "table_type")) - writer.uint32(/* id 2, wireType 0 =*/16).int32(message.table_type); - if (message.table_names != null && message.table_names.length) - for (let i = 0; i < message.table_names.length; ++i) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.table_names[i]); + if (message.result != null && Object.hasOwnProperty.call(message, "result")) + $root.query.QueryResult.encode(message.result, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified GetSchemaRequest message, length delimited. Does not implicitly {@link query.GetSchemaRequest.verify|verify} messages. + * Encodes the specified MessageAckResponse message, length delimited. Does not implicitly {@link query.MessageAckResponse.verify|verify} messages. * @function encodeDelimited - * @memberof query.GetSchemaRequest + * @memberof query.MessageAckResponse * @static - * @param {query.IGetSchemaRequest} message GetSchemaRequest message or plain object to encode + * @param {query.IMessageAckResponse} message MessageAckResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetSchemaRequest.encodeDelimited = function encodeDelimited(message, writer) { + MessageAckResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetSchemaRequest message from the specified reader or buffer. + * Decodes a MessageAckResponse message from the specified reader or buffer. * @function decode - * @memberof query.GetSchemaRequest + * @memberof query.MessageAckResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.GetSchemaRequest} GetSchemaRequest + * @returns {query.MessageAckResponse} MessageAckResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetSchemaRequest.decode = function decode(reader, length) { + MessageAckResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.GetSchemaRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.MessageAckResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.target = $root.query.Target.decode(reader, reader.uint32()); - break; - } - case 2: { - message.table_type = reader.int32(); - break; - } - case 3: { - if (!(message.table_names && message.table_names.length)) - message.table_names = []; - message.table_names.push(reader.string()); + message.result = $root.query.QueryResult.decode(reader, reader.uint32()); break; } default: @@ -91028,182 +91685,134 @@ export const query = $root.query = (() => { }; /** - * Decodes a GetSchemaRequest message from the specified reader or buffer, length delimited. + * Decodes a MessageAckResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.GetSchemaRequest + * @memberof query.MessageAckResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.GetSchemaRequest} GetSchemaRequest + * @returns {query.MessageAckResponse} MessageAckResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetSchemaRequest.decodeDelimited = function decodeDelimited(reader) { + MessageAckResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetSchemaRequest message. + * Verifies a MessageAckResponse message. * @function verify - * @memberof query.GetSchemaRequest + * @memberof query.MessageAckResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetSchemaRequest.verify = function verify(message) { + MessageAckResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.target != null && message.hasOwnProperty("target")) { - let error = $root.query.Target.verify(message.target); + if (message.result != null && message.hasOwnProperty("result")) { + let error = $root.query.QueryResult.verify(message.result); if (error) - return "target." + error; - } - if (message.table_type != null && message.hasOwnProperty("table_type")) - switch (message.table_type) { - default: - return "table_type: enum value expected"; - case 0: - case 1: - case 2: - break; - } - if (message.table_names != null && message.hasOwnProperty("table_names")) { - if (!Array.isArray(message.table_names)) - return "table_names: array expected"; - for (let i = 0; i < message.table_names.length; ++i) - if (!$util.isString(message.table_names[i])) - return "table_names: string[] expected"; + return "result." + error; } return null; }; /** - * Creates a GetSchemaRequest message from a plain object. Also converts values to their respective internal types. + * Creates a MessageAckResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.GetSchemaRequest + * @memberof query.MessageAckResponse * @static * @param {Object.} object Plain object - * @returns {query.GetSchemaRequest} GetSchemaRequest + * @returns {query.MessageAckResponse} MessageAckResponse */ - GetSchemaRequest.fromObject = function fromObject(object) { - if (object instanceof $root.query.GetSchemaRequest) + MessageAckResponse.fromObject = function fromObject(object) { + if (object instanceof $root.query.MessageAckResponse) return object; - let message = new $root.query.GetSchemaRequest(); - if (object.target != null) { - if (typeof object.target !== "object") - throw TypeError(".query.GetSchemaRequest.target: object expected"); - message.target = $root.query.Target.fromObject(object.target); - } - switch (object.table_type) { - default: - if (typeof object.table_type === "number") { - message.table_type = object.table_type; - break; - } - break; - case "VIEWS": - case 0: - message.table_type = 0; - break; - case "TABLES": - case 1: - message.table_type = 1; - break; - case "ALL": - case 2: - message.table_type = 2; - break; - } - if (object.table_names) { - if (!Array.isArray(object.table_names)) - throw TypeError(".query.GetSchemaRequest.table_names: array expected"); - message.table_names = []; - for (let i = 0; i < object.table_names.length; ++i) - message.table_names[i] = String(object.table_names[i]); + let message = new $root.query.MessageAckResponse(); + if (object.result != null) { + if (typeof object.result !== "object") + throw TypeError(".query.MessageAckResponse.result: object expected"); + message.result = $root.query.QueryResult.fromObject(object.result); } return message; }; /** - * Creates a plain object from a GetSchemaRequest message. Also converts values to other types if specified. + * Creates a plain object from a MessageAckResponse message. Also converts values to other types if specified. * @function toObject - * @memberof query.GetSchemaRequest + * @memberof query.MessageAckResponse * @static - * @param {query.GetSchemaRequest} message GetSchemaRequest + * @param {query.MessageAckResponse} message MessageAckResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetSchemaRequest.toObject = function toObject(message, options) { + MessageAckResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.table_names = []; - if (options.defaults) { - object.target = null; - object.table_type = options.enums === String ? "VIEWS" : 0; - } - if (message.target != null && message.hasOwnProperty("target")) - object.target = $root.query.Target.toObject(message.target, options); - if (message.table_type != null && message.hasOwnProperty("table_type")) - object.table_type = options.enums === String ? $root.query.SchemaTableType[message.table_type] === undefined ? message.table_type : $root.query.SchemaTableType[message.table_type] : message.table_type; - if (message.table_names && message.table_names.length) { - object.table_names = []; - for (let j = 0; j < message.table_names.length; ++j) - object.table_names[j] = message.table_names[j]; - } + if (options.defaults) + object.result = null; + if (message.result != null && message.hasOwnProperty("result")) + object.result = $root.query.QueryResult.toObject(message.result, options); return object; }; /** - * Converts this GetSchemaRequest to JSON. + * Converts this MessageAckResponse to JSON. * @function toJSON - * @memberof query.GetSchemaRequest + * @memberof query.MessageAckResponse * @instance * @returns {Object.} JSON object */ - GetSchemaRequest.prototype.toJSON = function toJSON() { + MessageAckResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetSchemaRequest + * Gets the default type url for MessageAckResponse * @function getTypeUrl - * @memberof query.GetSchemaRequest + * @memberof query.MessageAckResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetSchemaRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + MessageAckResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.GetSchemaRequest"; + return typeUrlPrefix + "/query.MessageAckResponse"; }; - return GetSchemaRequest; + return MessageAckResponse; })(); - query.GetSchemaResponse = (function() { + query.ReserveExecuteRequest = (function() { /** - * Properties of a GetSchemaResponse. + * Properties of a ReserveExecuteRequest. * @memberof query - * @interface IGetSchemaResponse - * @property {Object.|null} [table_definition] GetSchemaResponse table_definition + * @interface IReserveExecuteRequest + * @property {vtrpc.ICallerID|null} [effective_caller_id] ReserveExecuteRequest effective_caller_id + * @property {query.IVTGateCallerID|null} [immediate_caller_id] ReserveExecuteRequest immediate_caller_id + * @property {query.ITarget|null} [target] ReserveExecuteRequest target + * @property {query.IBoundQuery|null} [query] ReserveExecuteRequest query + * @property {number|Long|null} [transaction_id] ReserveExecuteRequest transaction_id + * @property {query.IExecuteOptions|null} [options] ReserveExecuteRequest options + * @property {Array.|null} [pre_queries] ReserveExecuteRequest pre_queries */ /** - * Constructs a new GetSchemaResponse. + * Constructs a new ReserveExecuteRequest. * @memberof query - * @classdesc Represents a GetSchemaResponse. - * @implements IGetSchemaResponse + * @classdesc Represents a ReserveExecuteRequest. + * @implements IReserveExecuteRequest * @constructor - * @param {query.IGetSchemaResponse=} [properties] Properties to set + * @param {query.IReserveExecuteRequest=} [properties] Properties to set */ - function GetSchemaResponse(properties) { - this.table_definition = {}; + function ReserveExecuteRequest(properties) { + this.pre_queries = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -91211,95 +91820,162 @@ export const query = $root.query = (() => { } /** - * GetSchemaResponse table_definition. - * @member {Object.} table_definition - * @memberof query.GetSchemaResponse + * ReserveExecuteRequest effective_caller_id. + * @member {vtrpc.ICallerID|null|undefined} effective_caller_id + * @memberof query.ReserveExecuteRequest * @instance */ - GetSchemaResponse.prototype.table_definition = $util.emptyObject; + ReserveExecuteRequest.prototype.effective_caller_id = null; /** - * Creates a new GetSchemaResponse instance using the specified properties. + * ReserveExecuteRequest immediate_caller_id. + * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id + * @memberof query.ReserveExecuteRequest + * @instance + */ + ReserveExecuteRequest.prototype.immediate_caller_id = null; + + /** + * ReserveExecuteRequest target. + * @member {query.ITarget|null|undefined} target + * @memberof query.ReserveExecuteRequest + * @instance + */ + ReserveExecuteRequest.prototype.target = null; + + /** + * ReserveExecuteRequest query. + * @member {query.IBoundQuery|null|undefined} query + * @memberof query.ReserveExecuteRequest + * @instance + */ + ReserveExecuteRequest.prototype.query = null; + + /** + * ReserveExecuteRequest transaction_id. + * @member {number|Long} transaction_id + * @memberof query.ReserveExecuteRequest + * @instance + */ + ReserveExecuteRequest.prototype.transaction_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * ReserveExecuteRequest options. + * @member {query.IExecuteOptions|null|undefined} options + * @memberof query.ReserveExecuteRequest + * @instance + */ + ReserveExecuteRequest.prototype.options = null; + + /** + * ReserveExecuteRequest pre_queries. + * @member {Array.} pre_queries + * @memberof query.ReserveExecuteRequest + * @instance + */ + ReserveExecuteRequest.prototype.pre_queries = $util.emptyArray; + + /** + * Creates a new ReserveExecuteRequest instance using the specified properties. * @function create - * @memberof query.GetSchemaResponse + * @memberof query.ReserveExecuteRequest * @static - * @param {query.IGetSchemaResponse=} [properties] Properties to set - * @returns {query.GetSchemaResponse} GetSchemaResponse instance + * @param {query.IReserveExecuteRequest=} [properties] Properties to set + * @returns {query.ReserveExecuteRequest} ReserveExecuteRequest instance */ - GetSchemaResponse.create = function create(properties) { - return new GetSchemaResponse(properties); + ReserveExecuteRequest.create = function create(properties) { + return new ReserveExecuteRequest(properties); }; /** - * Encodes the specified GetSchemaResponse message. Does not implicitly {@link query.GetSchemaResponse.verify|verify} messages. + * Encodes the specified ReserveExecuteRequest message. Does not implicitly {@link query.ReserveExecuteRequest.verify|verify} messages. * @function encode - * @memberof query.GetSchemaResponse + * @memberof query.ReserveExecuteRequest * @static - * @param {query.IGetSchemaResponse} message GetSchemaResponse message or plain object to encode + * @param {query.IReserveExecuteRequest} message ReserveExecuteRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetSchemaResponse.encode = function encode(message, writer) { + ReserveExecuteRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.table_definition != null && Object.hasOwnProperty.call(message, "table_definition")) - for (let keys = Object.keys(message.table_definition), i = 0; i < keys.length; ++i) - writer.uint32(/* id 2, wireType 2 =*/18).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 2 =*/18).string(message.table_definition[keys[i]]).ldelim(); + if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) + $root.vtrpc.CallerID.encode(message.effective_caller_id, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.immediate_caller_id != null && Object.hasOwnProperty.call(message, "immediate_caller_id")) + $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.target != null && Object.hasOwnProperty.call(message, "target")) + $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.query != null && Object.hasOwnProperty.call(message, "query")) + $root.query.BoundQuery.encode(message.query, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.transaction_id != null && Object.hasOwnProperty.call(message, "transaction_id")) + writer.uint32(/* id 5, wireType 0 =*/40).int64(message.transaction_id); + if (message.options != null && Object.hasOwnProperty.call(message, "options")) + $root.query.ExecuteOptions.encode(message.options, writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); + if (message.pre_queries != null && message.pre_queries.length) + for (let i = 0; i < message.pre_queries.length; ++i) + writer.uint32(/* id 7, wireType 2 =*/58).string(message.pre_queries[i]); return writer; }; /** - * Encodes the specified GetSchemaResponse message, length delimited. Does not implicitly {@link query.GetSchemaResponse.verify|verify} messages. + * Encodes the specified ReserveExecuteRequest message, length delimited. Does not implicitly {@link query.ReserveExecuteRequest.verify|verify} messages. * @function encodeDelimited - * @memberof query.GetSchemaResponse + * @memberof query.ReserveExecuteRequest * @static - * @param {query.IGetSchemaResponse} message GetSchemaResponse message or plain object to encode + * @param {query.IReserveExecuteRequest} message ReserveExecuteRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetSchemaResponse.encodeDelimited = function encodeDelimited(message, writer) { + ReserveExecuteRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetSchemaResponse message from the specified reader or buffer. + * Decodes a ReserveExecuteRequest message from the specified reader or buffer. * @function decode - * @memberof query.GetSchemaResponse + * @memberof query.ReserveExecuteRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.GetSchemaResponse} GetSchemaResponse + * @returns {query.ReserveExecuteRequest} ReserveExecuteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetSchemaResponse.decode = function decode(reader, length) { + ReserveExecuteRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.GetSchemaResponse(), key, value; + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.ReserveExecuteRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + message.effective_caller_id = $root.vtrpc.CallerID.decode(reader, reader.uint32()); + break; + } case 2: { - if (message.table_definition === $util.emptyObject) - message.table_definition = {}; - let end2 = reader.uint32() + reader.pos; - key = ""; - value = ""; - while (reader.pos < end2) { - let tag2 = reader.uint32(); - switch (tag2 >>> 3) { - case 1: - key = reader.string(); - break; - case 2: - value = reader.string(); - break; - default: - reader.skipType(tag2 & 7); - break; - } - } - message.table_definition[key] = value; + message.immediate_caller_id = $root.query.VTGateCallerID.decode(reader, reader.uint32()); + break; + } + case 3: { + message.target = $root.query.Target.decode(reader, reader.uint32()); + break; + } + case 4: { + message.query = $root.query.BoundQuery.decode(reader, reader.uint32()); + break; + } + case 5: { + message.transaction_id = reader.int64(); + break; + } + case 6: { + message.options = $root.query.ExecuteOptions.decode(reader, reader.uint32()); + break; + } + case 7: { + if (!(message.pre_queries && message.pre_queries.length)) + message.pre_queries = []; + message.pre_queries.push(reader.string()); break; } default: @@ -91311,143 +91987,226 @@ export const query = $root.query = (() => { }; /** - * Decodes a GetSchemaResponse message from the specified reader or buffer, length delimited. + * Decodes a ReserveExecuteRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.GetSchemaResponse + * @memberof query.ReserveExecuteRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.GetSchemaResponse} GetSchemaResponse + * @returns {query.ReserveExecuteRequest} ReserveExecuteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetSchemaResponse.decodeDelimited = function decodeDelimited(reader) { + ReserveExecuteRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetSchemaResponse message. + * Verifies a ReserveExecuteRequest message. * @function verify - * @memberof query.GetSchemaResponse + * @memberof query.ReserveExecuteRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetSchemaResponse.verify = function verify(message) { + ReserveExecuteRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.table_definition != null && message.hasOwnProperty("table_definition")) { - if (!$util.isObject(message.table_definition)) - return "table_definition: object expected"; - let key = Object.keys(message.table_definition); - for (let i = 0; i < key.length; ++i) - if (!$util.isString(message.table_definition[key[i]])) - return "table_definition: string{k:string} expected"; + if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { + let error = $root.vtrpc.CallerID.verify(message.effective_caller_id); + if (error) + return "effective_caller_id." + error; + } + if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) { + let error = $root.query.VTGateCallerID.verify(message.immediate_caller_id); + if (error) + return "immediate_caller_id." + error; + } + if (message.target != null && message.hasOwnProperty("target")) { + let error = $root.query.Target.verify(message.target); + if (error) + return "target." + error; + } + if (message.query != null && message.hasOwnProperty("query")) { + let error = $root.query.BoundQuery.verify(message.query); + if (error) + return "query." + error; + } + if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) + if (!$util.isInteger(message.transaction_id) && !(message.transaction_id && $util.isInteger(message.transaction_id.low) && $util.isInteger(message.transaction_id.high))) + return "transaction_id: integer|Long expected"; + if (message.options != null && message.hasOwnProperty("options")) { + let error = $root.query.ExecuteOptions.verify(message.options); + if (error) + return "options." + error; + } + if (message.pre_queries != null && message.hasOwnProperty("pre_queries")) { + if (!Array.isArray(message.pre_queries)) + return "pre_queries: array expected"; + for (let i = 0; i < message.pre_queries.length; ++i) + if (!$util.isString(message.pre_queries[i])) + return "pre_queries: string[] expected"; } return null; }; /** - * Creates a GetSchemaResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ReserveExecuteRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.GetSchemaResponse + * @memberof query.ReserveExecuteRequest * @static * @param {Object.} object Plain object - * @returns {query.GetSchemaResponse} GetSchemaResponse + * @returns {query.ReserveExecuteRequest} ReserveExecuteRequest */ - GetSchemaResponse.fromObject = function fromObject(object) { - if (object instanceof $root.query.GetSchemaResponse) + ReserveExecuteRequest.fromObject = function fromObject(object) { + if (object instanceof $root.query.ReserveExecuteRequest) return object; - let message = new $root.query.GetSchemaResponse(); - if (object.table_definition) { - if (typeof object.table_definition !== "object") - throw TypeError(".query.GetSchemaResponse.table_definition: object expected"); - message.table_definition = {}; - for (let keys = Object.keys(object.table_definition), i = 0; i < keys.length; ++i) - message.table_definition[keys[i]] = String(object.table_definition[keys[i]]); + let message = new $root.query.ReserveExecuteRequest(); + if (object.effective_caller_id != null) { + if (typeof object.effective_caller_id !== "object") + throw TypeError(".query.ReserveExecuteRequest.effective_caller_id: object expected"); + message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); + } + if (object.immediate_caller_id != null) { + if (typeof object.immediate_caller_id !== "object") + throw TypeError(".query.ReserveExecuteRequest.immediate_caller_id: object expected"); + message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); + } + if (object.target != null) { + if (typeof object.target !== "object") + throw TypeError(".query.ReserveExecuteRequest.target: object expected"); + message.target = $root.query.Target.fromObject(object.target); + } + if (object.query != null) { + if (typeof object.query !== "object") + throw TypeError(".query.ReserveExecuteRequest.query: object expected"); + message.query = $root.query.BoundQuery.fromObject(object.query); + } + if (object.transaction_id != null) + if ($util.Long) + (message.transaction_id = $util.Long.fromValue(object.transaction_id)).unsigned = false; + else if (typeof object.transaction_id === "string") + message.transaction_id = parseInt(object.transaction_id, 10); + else if (typeof object.transaction_id === "number") + message.transaction_id = object.transaction_id; + else if (typeof object.transaction_id === "object") + message.transaction_id = new $util.LongBits(object.transaction_id.low >>> 0, object.transaction_id.high >>> 0).toNumber(); + if (object.options != null) { + if (typeof object.options !== "object") + throw TypeError(".query.ReserveExecuteRequest.options: object expected"); + message.options = $root.query.ExecuteOptions.fromObject(object.options); + } + if (object.pre_queries) { + if (!Array.isArray(object.pre_queries)) + throw TypeError(".query.ReserveExecuteRequest.pre_queries: array expected"); + message.pre_queries = []; + for (let i = 0; i < object.pre_queries.length; ++i) + message.pre_queries[i] = String(object.pre_queries[i]); } return message; }; /** - * Creates a plain object from a GetSchemaResponse message. Also converts values to other types if specified. + * Creates a plain object from a ReserveExecuteRequest message. Also converts values to other types if specified. * @function toObject - * @memberof query.GetSchemaResponse + * @memberof query.ReserveExecuteRequest * @static - * @param {query.GetSchemaResponse} message GetSchemaResponse + * @param {query.ReserveExecuteRequest} message ReserveExecuteRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetSchemaResponse.toObject = function toObject(message, options) { + ReserveExecuteRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.objects || options.defaults) - object.table_definition = {}; - let keys2; - if (message.table_definition && (keys2 = Object.keys(message.table_definition)).length) { - object.table_definition = {}; - for (let j = 0; j < keys2.length; ++j) - object.table_definition[keys2[j]] = message.table_definition[keys2[j]]; + if (options.arrays || options.defaults) + object.pre_queries = []; + if (options.defaults) { + object.effective_caller_id = null; + object.immediate_caller_id = null; + object.target = null; + object.query = null; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.transaction_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.transaction_id = options.longs === String ? "0" : 0; + object.options = null; + } + if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) + object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); + if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) + object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); + if (message.target != null && message.hasOwnProperty("target")) + object.target = $root.query.Target.toObject(message.target, options); + if (message.query != null && message.hasOwnProperty("query")) + object.query = $root.query.BoundQuery.toObject(message.query, options); + if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) + if (typeof message.transaction_id === "number") + object.transaction_id = options.longs === String ? String(message.transaction_id) : message.transaction_id; + else + object.transaction_id = options.longs === String ? $util.Long.prototype.toString.call(message.transaction_id) : options.longs === Number ? new $util.LongBits(message.transaction_id.low >>> 0, message.transaction_id.high >>> 0).toNumber() : message.transaction_id; + if (message.options != null && message.hasOwnProperty("options")) + object.options = $root.query.ExecuteOptions.toObject(message.options, options); + if (message.pre_queries && message.pre_queries.length) { + object.pre_queries = []; + for (let j = 0; j < message.pre_queries.length; ++j) + object.pre_queries[j] = message.pre_queries[j]; } return object; }; /** - * Converts this GetSchemaResponse to JSON. + * Converts this ReserveExecuteRequest to JSON. * @function toJSON - * @memberof query.GetSchemaResponse + * @memberof query.ReserveExecuteRequest * @instance * @returns {Object.} JSON object */ - GetSchemaResponse.prototype.toJSON = function toJSON() { + ReserveExecuteRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetSchemaResponse + * Gets the default type url for ReserveExecuteRequest * @function getTypeUrl - * @memberof query.GetSchemaResponse + * @memberof query.ReserveExecuteRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetSchemaResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ReserveExecuteRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.GetSchemaResponse"; + return typeUrlPrefix + "/query.ReserveExecuteRequest"; }; - return GetSchemaResponse; + return ReserveExecuteRequest; })(); - query.LoadDataStreamRequest = (function() { + query.ReserveExecuteResponse = (function() { /** - * Properties of a LoadDataStreamRequest. + * Properties of a ReserveExecuteResponse. * @memberof query - * @interface ILoadDataStreamRequest - * @property {vtrpc.ICallerID|null} [effective_caller_id] LoadDataStreamRequest effective_caller_id - * @property {query.IVTGateCallerID|null} [immediate_caller_id] LoadDataStreamRequest immediate_caller_id - * @property {query.ITarget|null} [target] LoadDataStreamRequest target - * @property {query.IBoundQuery|null} [query] LoadDataStreamRequest query - * @property {number|Long|null} [transaction_id] LoadDataStreamRequest transaction_id - * @property {query.IExecuteOptions|null} [options] LoadDataStreamRequest options - * @property {Array.|null} [lines] LoadDataStreamRequest lines + * @interface IReserveExecuteResponse + * @property {vtrpc.IRPCError|null} [error] ReserveExecuteResponse error + * @property {query.IQueryResult|null} [result] ReserveExecuteResponse result + * @property {number|Long|null} [reserved_id] ReserveExecuteResponse reserved_id + * @property {topodata.ITabletAlias|null} [tablet_alias] ReserveExecuteResponse tablet_alias */ /** - * Constructs a new LoadDataStreamRequest. + * Constructs a new ReserveExecuteResponse. * @memberof query - * @classdesc Represents a LoadDataStreamRequest. - * @implements ILoadDataStreamRequest + * @classdesc Represents a ReserveExecuteResponse. + * @implements IReserveExecuteResponse * @constructor - * @param {query.ILoadDataStreamRequest=} [properties] Properties to set + * @param {query.IReserveExecuteResponse=} [properties] Properties to set */ - function LoadDataStreamRequest(properties) { - this.lines = []; + function ReserveExecuteResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -91455,162 +92214,117 @@ export const query = $root.query = (() => { } /** - * LoadDataStreamRequest effective_caller_id. - * @member {vtrpc.ICallerID|null|undefined} effective_caller_id - * @memberof query.LoadDataStreamRequest - * @instance - */ - LoadDataStreamRequest.prototype.effective_caller_id = null; - - /** - * LoadDataStreamRequest immediate_caller_id. - * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id - * @memberof query.LoadDataStreamRequest - * @instance - */ - LoadDataStreamRequest.prototype.immediate_caller_id = null; - - /** - * LoadDataStreamRequest target. - * @member {query.ITarget|null|undefined} target - * @memberof query.LoadDataStreamRequest - * @instance - */ - LoadDataStreamRequest.prototype.target = null; - - /** - * LoadDataStreamRequest query. - * @member {query.IBoundQuery|null|undefined} query - * @memberof query.LoadDataStreamRequest + * ReserveExecuteResponse error. + * @member {vtrpc.IRPCError|null|undefined} error + * @memberof query.ReserveExecuteResponse * @instance */ - LoadDataStreamRequest.prototype.query = null; + ReserveExecuteResponse.prototype.error = null; /** - * LoadDataStreamRequest transaction_id. - * @member {number|Long} transaction_id - * @memberof query.LoadDataStreamRequest + * ReserveExecuteResponse result. + * @member {query.IQueryResult|null|undefined} result + * @memberof query.ReserveExecuteResponse * @instance */ - LoadDataStreamRequest.prototype.transaction_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + ReserveExecuteResponse.prototype.result = null; /** - * LoadDataStreamRequest options. - * @member {query.IExecuteOptions|null|undefined} options - * @memberof query.LoadDataStreamRequest + * ReserveExecuteResponse reserved_id. + * @member {number|Long} reserved_id + * @memberof query.ReserveExecuteResponse * @instance */ - LoadDataStreamRequest.prototype.options = null; + ReserveExecuteResponse.prototype.reserved_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; /** - * LoadDataStreamRequest lines. - * @member {Array.} lines - * @memberof query.LoadDataStreamRequest + * ReserveExecuteResponse tablet_alias. + * @member {topodata.ITabletAlias|null|undefined} tablet_alias + * @memberof query.ReserveExecuteResponse * @instance */ - LoadDataStreamRequest.prototype.lines = $util.emptyArray; + ReserveExecuteResponse.prototype.tablet_alias = null; /** - * Creates a new LoadDataStreamRequest instance using the specified properties. + * Creates a new ReserveExecuteResponse instance using the specified properties. * @function create - * @memberof query.LoadDataStreamRequest + * @memberof query.ReserveExecuteResponse * @static - * @param {query.ILoadDataStreamRequest=} [properties] Properties to set - * @returns {query.LoadDataStreamRequest} LoadDataStreamRequest instance + * @param {query.IReserveExecuteResponse=} [properties] Properties to set + * @returns {query.ReserveExecuteResponse} ReserveExecuteResponse instance */ - LoadDataStreamRequest.create = function create(properties) { - return new LoadDataStreamRequest(properties); + ReserveExecuteResponse.create = function create(properties) { + return new ReserveExecuteResponse(properties); }; /** - * Encodes the specified LoadDataStreamRequest message. Does not implicitly {@link query.LoadDataStreamRequest.verify|verify} messages. + * Encodes the specified ReserveExecuteResponse message. Does not implicitly {@link query.ReserveExecuteResponse.verify|verify} messages. * @function encode - * @memberof query.LoadDataStreamRequest + * @memberof query.ReserveExecuteResponse * @static - * @param {query.ILoadDataStreamRequest} message LoadDataStreamRequest message or plain object to encode + * @param {query.IReserveExecuteResponse} message ReserveExecuteResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - LoadDataStreamRequest.encode = function encode(message, writer) { + ReserveExecuteResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) - $root.vtrpc.CallerID.encode(message.effective_caller_id, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.immediate_caller_id != null && Object.hasOwnProperty.call(message, "immediate_caller_id")) - $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.target != null && Object.hasOwnProperty.call(message, "target")) - $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.query != null && Object.hasOwnProperty.call(message, "query")) - $root.query.BoundQuery.encode(message.query, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); - if (message.transaction_id != null && Object.hasOwnProperty.call(message, "transaction_id")) - writer.uint32(/* id 5, wireType 0 =*/40).int64(message.transaction_id); - if (message.options != null && Object.hasOwnProperty.call(message, "options")) - $root.query.ExecuteOptions.encode(message.options, writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); - if (message.lines != null && message.lines.length) - for (let i = 0; i < message.lines.length; ++i) - writer.uint32(/* id 7, wireType 2 =*/58).string(message.lines[i]); + if (message.error != null && Object.hasOwnProperty.call(message, "error")) + $root.vtrpc.RPCError.encode(message.error, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.result != null && Object.hasOwnProperty.call(message, "result")) + $root.query.QueryResult.encode(message.result, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.reserved_id != null && Object.hasOwnProperty.call(message, "reserved_id")) + writer.uint32(/* id 3, wireType 0 =*/24).int64(message.reserved_id); + if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) + $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); return writer; }; /** - * Encodes the specified LoadDataStreamRequest message, length delimited. Does not implicitly {@link query.LoadDataStreamRequest.verify|verify} messages. + * Encodes the specified ReserveExecuteResponse message, length delimited. Does not implicitly {@link query.ReserveExecuteResponse.verify|verify} messages. * @function encodeDelimited - * @memberof query.LoadDataStreamRequest + * @memberof query.ReserveExecuteResponse * @static - * @param {query.ILoadDataStreamRequest} message LoadDataStreamRequest message or plain object to encode + * @param {query.IReserveExecuteResponse} message ReserveExecuteResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - LoadDataStreamRequest.encodeDelimited = function encodeDelimited(message, writer) { + ReserveExecuteResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a LoadDataStreamRequest message from the specified reader or buffer. + * Decodes a ReserveExecuteResponse message from the specified reader or buffer. * @function decode - * @memberof query.LoadDataStreamRequest + * @memberof query.ReserveExecuteResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.LoadDataStreamRequest} LoadDataStreamRequest + * @returns {query.ReserveExecuteResponse} ReserveExecuteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - LoadDataStreamRequest.decode = function decode(reader, length) { + ReserveExecuteResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.LoadDataStreamRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.ReserveExecuteResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.effective_caller_id = $root.vtrpc.CallerID.decode(reader, reader.uint32()); + message.error = $root.vtrpc.RPCError.decode(reader, reader.uint32()); break; } case 2: { - message.immediate_caller_id = $root.query.VTGateCallerID.decode(reader, reader.uint32()); + message.result = $root.query.QueryResult.decode(reader, reader.uint32()); break; } case 3: { - message.target = $root.query.Target.decode(reader, reader.uint32()); + message.reserved_id = reader.int64(); break; } case 4: { - message.query = $root.query.BoundQuery.decode(reader, reader.uint32()); - break; - } - case 5: { - message.transaction_id = reader.int64(); - break; - } - case 6: { - message.options = $root.query.ExecuteOptions.decode(reader, reader.uint32()); - break; - } - case 7: { - if (!(message.lines && message.lines.length)) - message.lines = []; - message.lines.push(reader.string()); + message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); break; } default: @@ -91622,223 +92336,183 @@ export const query = $root.query = (() => { }; /** - * Decodes a LoadDataStreamRequest message from the specified reader or buffer, length delimited. + * Decodes a ReserveExecuteResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.LoadDataStreamRequest + * @memberof query.ReserveExecuteResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.LoadDataStreamRequest} LoadDataStreamRequest + * @returns {query.ReserveExecuteResponse} ReserveExecuteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - LoadDataStreamRequest.decodeDelimited = function decodeDelimited(reader) { + ReserveExecuteResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a LoadDataStreamRequest message. + * Verifies a ReserveExecuteResponse message. * @function verify - * @memberof query.LoadDataStreamRequest + * @memberof query.ReserveExecuteResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - LoadDataStreamRequest.verify = function verify(message) { + ReserveExecuteResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { - let error = $root.vtrpc.CallerID.verify(message.effective_caller_id); - if (error) - return "effective_caller_id." + error; - } - if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) { - let error = $root.query.VTGateCallerID.verify(message.immediate_caller_id); - if (error) - return "immediate_caller_id." + error; - } - if (message.target != null && message.hasOwnProperty("target")) { - let error = $root.query.Target.verify(message.target); + if (message.error != null && message.hasOwnProperty("error")) { + let error = $root.vtrpc.RPCError.verify(message.error); if (error) - return "target." + error; + return "error." + error; } - if (message.query != null && message.hasOwnProperty("query")) { - let error = $root.query.BoundQuery.verify(message.query); + if (message.result != null && message.hasOwnProperty("result")) { + let error = $root.query.QueryResult.verify(message.result); if (error) - return "query." + error; + return "result." + error; } - if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) - if (!$util.isInteger(message.transaction_id) && !(message.transaction_id && $util.isInteger(message.transaction_id.low) && $util.isInteger(message.transaction_id.high))) - return "transaction_id: integer|Long expected"; - if (message.options != null && message.hasOwnProperty("options")) { - let error = $root.query.ExecuteOptions.verify(message.options); + if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) + if (!$util.isInteger(message.reserved_id) && !(message.reserved_id && $util.isInteger(message.reserved_id.low) && $util.isInteger(message.reserved_id.high))) + return "reserved_id: integer|Long expected"; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { + let error = $root.topodata.TabletAlias.verify(message.tablet_alias); if (error) - return "options." + error; - } - if (message.lines != null && message.hasOwnProperty("lines")) { - if (!Array.isArray(message.lines)) - return "lines: array expected"; - for (let i = 0; i < message.lines.length; ++i) - if (!$util.isString(message.lines[i])) - return "lines: string[] expected"; + return "tablet_alias." + error; } return null; }; /** - * Creates a LoadDataStreamRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ReserveExecuteResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.LoadDataStreamRequest + * @memberof query.ReserveExecuteResponse * @static * @param {Object.} object Plain object - * @returns {query.LoadDataStreamRequest} LoadDataStreamRequest + * @returns {query.ReserveExecuteResponse} ReserveExecuteResponse */ - LoadDataStreamRequest.fromObject = function fromObject(object) { - if (object instanceof $root.query.LoadDataStreamRequest) + ReserveExecuteResponse.fromObject = function fromObject(object) { + if (object instanceof $root.query.ReserveExecuteResponse) return object; - let message = new $root.query.LoadDataStreamRequest(); - if (object.effective_caller_id != null) { - if (typeof object.effective_caller_id !== "object") - throw TypeError(".query.LoadDataStreamRequest.effective_caller_id: object expected"); - message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); - } - if (object.immediate_caller_id != null) { - if (typeof object.immediate_caller_id !== "object") - throw TypeError(".query.LoadDataStreamRequest.immediate_caller_id: object expected"); - message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); - } - if (object.target != null) { - if (typeof object.target !== "object") - throw TypeError(".query.LoadDataStreamRequest.target: object expected"); - message.target = $root.query.Target.fromObject(object.target); + let message = new $root.query.ReserveExecuteResponse(); + if (object.error != null) { + if (typeof object.error !== "object") + throw TypeError(".query.ReserveExecuteResponse.error: object expected"); + message.error = $root.vtrpc.RPCError.fromObject(object.error); } - if (object.query != null) { - if (typeof object.query !== "object") - throw TypeError(".query.LoadDataStreamRequest.query: object expected"); - message.query = $root.query.BoundQuery.fromObject(object.query); + if (object.result != null) { + if (typeof object.result !== "object") + throw TypeError(".query.ReserveExecuteResponse.result: object expected"); + message.result = $root.query.QueryResult.fromObject(object.result); } - if (object.transaction_id != null) + if (object.reserved_id != null) if ($util.Long) - (message.transaction_id = $util.Long.fromValue(object.transaction_id)).unsigned = false; - else if (typeof object.transaction_id === "string") - message.transaction_id = parseInt(object.transaction_id, 10); - else if (typeof object.transaction_id === "number") - message.transaction_id = object.transaction_id; - else if (typeof object.transaction_id === "object") - message.transaction_id = new $util.LongBits(object.transaction_id.low >>> 0, object.transaction_id.high >>> 0).toNumber(); - if (object.options != null) { - if (typeof object.options !== "object") - throw TypeError(".query.LoadDataStreamRequest.options: object expected"); - message.options = $root.query.ExecuteOptions.fromObject(object.options); - } - if (object.lines) { - if (!Array.isArray(object.lines)) - throw TypeError(".query.LoadDataStreamRequest.lines: array expected"); - message.lines = []; - for (let i = 0; i < object.lines.length; ++i) - message.lines[i] = String(object.lines[i]); + (message.reserved_id = $util.Long.fromValue(object.reserved_id)).unsigned = false; + else if (typeof object.reserved_id === "string") + message.reserved_id = parseInt(object.reserved_id, 10); + else if (typeof object.reserved_id === "number") + message.reserved_id = object.reserved_id; + else if (typeof object.reserved_id === "object") + message.reserved_id = new $util.LongBits(object.reserved_id.low >>> 0, object.reserved_id.high >>> 0).toNumber(); + if (object.tablet_alias != null) { + if (typeof object.tablet_alias !== "object") + throw TypeError(".query.ReserveExecuteResponse.tablet_alias: object expected"); + message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); } return message; }; /** - * Creates a plain object from a LoadDataStreamRequest message. Also converts values to other types if specified. + * Creates a plain object from a ReserveExecuteResponse message. Also converts values to other types if specified. * @function toObject - * @memberof query.LoadDataStreamRequest + * @memberof query.ReserveExecuteResponse * @static - * @param {query.LoadDataStreamRequest} message LoadDataStreamRequest + * @param {query.ReserveExecuteResponse} message ReserveExecuteResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - LoadDataStreamRequest.toObject = function toObject(message, options) { + ReserveExecuteResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.lines = []; if (options.defaults) { - object.effective_caller_id = null; - object.immediate_caller_id = null; - object.target = null; - object.query = null; + object.error = null; + object.result = null; if ($util.Long) { let long = new $util.Long(0, 0, false); - object.transaction_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + object.reserved_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; } else - object.transaction_id = options.longs === String ? "0" : 0; - object.options = null; + object.reserved_id = options.longs === String ? "0" : 0; + object.tablet_alias = null; } - if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) - object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); - if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) - object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); - if (message.target != null && message.hasOwnProperty("target")) - object.target = $root.query.Target.toObject(message.target, options); - if (message.query != null && message.hasOwnProperty("query")) - object.query = $root.query.BoundQuery.toObject(message.query, options); - if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) - if (typeof message.transaction_id === "number") - object.transaction_id = options.longs === String ? String(message.transaction_id) : message.transaction_id; + if (message.error != null && message.hasOwnProperty("error")) + object.error = $root.vtrpc.RPCError.toObject(message.error, options); + if (message.result != null && message.hasOwnProperty("result")) + object.result = $root.query.QueryResult.toObject(message.result, options); + if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) + if (typeof message.reserved_id === "number") + object.reserved_id = options.longs === String ? String(message.reserved_id) : message.reserved_id; else - object.transaction_id = options.longs === String ? $util.Long.prototype.toString.call(message.transaction_id) : options.longs === Number ? new $util.LongBits(message.transaction_id.low >>> 0, message.transaction_id.high >>> 0).toNumber() : message.transaction_id; - if (message.options != null && message.hasOwnProperty("options")) - object.options = $root.query.ExecuteOptions.toObject(message.options, options); - if (message.lines && message.lines.length) { - object.lines = []; - for (let j = 0; j < message.lines.length; ++j) - object.lines[j] = message.lines[j]; - } + object.reserved_id = options.longs === String ? $util.Long.prototype.toString.call(message.reserved_id) : options.longs === Number ? new $util.LongBits(message.reserved_id.low >>> 0, message.reserved_id.high >>> 0).toNumber() : message.reserved_id; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) + object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); return object; }; /** - * Converts this LoadDataStreamRequest to JSON. + * Converts this ReserveExecuteResponse to JSON. * @function toJSON - * @memberof query.LoadDataStreamRequest + * @memberof query.ReserveExecuteResponse * @instance * @returns {Object.} JSON object */ - LoadDataStreamRequest.prototype.toJSON = function toJSON() { + ReserveExecuteResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for LoadDataStreamRequest + * Gets the default type url for ReserveExecuteResponse * @function getTypeUrl - * @memberof query.LoadDataStreamRequest + * @memberof query.ReserveExecuteResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - LoadDataStreamRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ReserveExecuteResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.LoadDataStreamRequest"; + return typeUrlPrefix + "/query.ReserveExecuteResponse"; }; - return LoadDataStreamRequest; + return ReserveExecuteResponse; })(); - query.LoadDataStreamResponse = (function() { + query.ReserveStreamExecuteRequest = (function() { /** - * Properties of a LoadDataStreamResponse. + * Properties of a ReserveStreamExecuteRequest. * @memberof query - * @interface ILoadDataStreamResponse - * @property {query.IQueryResult|null} [result] LoadDataStreamResponse result + * @interface IReserveStreamExecuteRequest + * @property {vtrpc.ICallerID|null} [effective_caller_id] ReserveStreamExecuteRequest effective_caller_id + * @property {query.IVTGateCallerID|null} [immediate_caller_id] ReserveStreamExecuteRequest immediate_caller_id + * @property {query.ITarget|null} [target] ReserveStreamExecuteRequest target + * @property {query.IBoundQuery|null} [query] ReserveStreamExecuteRequest query + * @property {query.IExecuteOptions|null} [options] ReserveStreamExecuteRequest options + * @property {number|Long|null} [transaction_id] ReserveStreamExecuteRequest transaction_id + * @property {Array.|null} [pre_queries] ReserveStreamExecuteRequest pre_queries */ /** - * Constructs a new LoadDataStreamResponse. + * Constructs a new ReserveStreamExecuteRequest. * @memberof query - * @classdesc Represents a LoadDataStreamResponse. - * @implements ILoadDataStreamResponse + * @classdesc Represents a ReserveStreamExecuteRequest. + * @implements IReserveStreamExecuteRequest * @constructor - * @param {query.ILoadDataStreamResponse=} [properties] Properties to set + * @param {query.IReserveStreamExecuteRequest=} [properties] Properties to set */ - function LoadDataStreamResponse(properties) { + function ReserveStreamExecuteRequest(properties) { + this.pre_queries = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -91846,240 +92520,393 @@ export const query = $root.query = (() => { } /** - * LoadDataStreamResponse result. - * @member {query.IQueryResult|null|undefined} result - * @memberof query.LoadDataStreamResponse + * ReserveStreamExecuteRequest effective_caller_id. + * @member {vtrpc.ICallerID|null|undefined} effective_caller_id + * @memberof query.ReserveStreamExecuteRequest * @instance */ - LoadDataStreamResponse.prototype.result = null; + ReserveStreamExecuteRequest.prototype.effective_caller_id = null; /** - * Creates a new LoadDataStreamResponse instance using the specified properties. + * ReserveStreamExecuteRequest immediate_caller_id. + * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id + * @memberof query.ReserveStreamExecuteRequest + * @instance + */ + ReserveStreamExecuteRequest.prototype.immediate_caller_id = null; + + /** + * ReserveStreamExecuteRequest target. + * @member {query.ITarget|null|undefined} target + * @memberof query.ReserveStreamExecuteRequest + * @instance + */ + ReserveStreamExecuteRequest.prototype.target = null; + + /** + * ReserveStreamExecuteRequest query. + * @member {query.IBoundQuery|null|undefined} query + * @memberof query.ReserveStreamExecuteRequest + * @instance + */ + ReserveStreamExecuteRequest.prototype.query = null; + + /** + * ReserveStreamExecuteRequest options. + * @member {query.IExecuteOptions|null|undefined} options + * @memberof query.ReserveStreamExecuteRequest + * @instance + */ + ReserveStreamExecuteRequest.prototype.options = null; + + /** + * ReserveStreamExecuteRequest transaction_id. + * @member {number|Long} transaction_id + * @memberof query.ReserveStreamExecuteRequest + * @instance + */ + ReserveStreamExecuteRequest.prototype.transaction_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * ReserveStreamExecuteRequest pre_queries. + * @member {Array.} pre_queries + * @memberof query.ReserveStreamExecuteRequest + * @instance + */ + ReserveStreamExecuteRequest.prototype.pre_queries = $util.emptyArray; + + /** + * Creates a new ReserveStreamExecuteRequest instance using the specified properties. * @function create - * @memberof query.LoadDataStreamResponse + * @memberof query.ReserveStreamExecuteRequest * @static - * @param {query.ILoadDataStreamResponse=} [properties] Properties to set - * @returns {query.LoadDataStreamResponse} LoadDataStreamResponse instance + * @param {query.IReserveStreamExecuteRequest=} [properties] Properties to set + * @returns {query.ReserveStreamExecuteRequest} ReserveStreamExecuteRequest instance */ - LoadDataStreamResponse.create = function create(properties) { - return new LoadDataStreamResponse(properties); + ReserveStreamExecuteRequest.create = function create(properties) { + return new ReserveStreamExecuteRequest(properties); }; /** - * Encodes the specified LoadDataStreamResponse message. Does not implicitly {@link query.LoadDataStreamResponse.verify|verify} messages. + * Encodes the specified ReserveStreamExecuteRequest message. Does not implicitly {@link query.ReserveStreamExecuteRequest.verify|verify} messages. * @function encode - * @memberof query.LoadDataStreamResponse + * @memberof query.ReserveStreamExecuteRequest * @static - * @param {query.ILoadDataStreamResponse} message LoadDataStreamResponse message or plain object to encode + * @param {query.IReserveStreamExecuteRequest} message ReserveStreamExecuteRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - LoadDataStreamResponse.encode = function encode(message, writer) { + ReserveStreamExecuteRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.result != null && Object.hasOwnProperty.call(message, "result")) - $root.query.QueryResult.encode(message.result, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) + $root.vtrpc.CallerID.encode(message.effective_caller_id, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.immediate_caller_id != null && Object.hasOwnProperty.call(message, "immediate_caller_id")) + $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.target != null && Object.hasOwnProperty.call(message, "target")) + $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.query != null && Object.hasOwnProperty.call(message, "query")) + $root.query.BoundQuery.encode(message.query, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.options != null && Object.hasOwnProperty.call(message, "options")) + $root.query.ExecuteOptions.encode(message.options, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); + if (message.transaction_id != null && Object.hasOwnProperty.call(message, "transaction_id")) + writer.uint32(/* id 6, wireType 0 =*/48).int64(message.transaction_id); + if (message.pre_queries != null && message.pre_queries.length) + for (let i = 0; i < message.pre_queries.length; ++i) + writer.uint32(/* id 7, wireType 2 =*/58).string(message.pre_queries[i]); return writer; }; /** - * Encodes the specified LoadDataStreamResponse message, length delimited. Does not implicitly {@link query.LoadDataStreamResponse.verify|verify} messages. + * Encodes the specified ReserveStreamExecuteRequest message, length delimited. Does not implicitly {@link query.ReserveStreamExecuteRequest.verify|verify} messages. * @function encodeDelimited - * @memberof query.LoadDataStreamResponse + * @memberof query.ReserveStreamExecuteRequest * @static - * @param {query.ILoadDataStreamResponse} message LoadDataStreamResponse message or plain object to encode + * @param {query.IReserveStreamExecuteRequest} message ReserveStreamExecuteRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - LoadDataStreamResponse.encodeDelimited = function encodeDelimited(message, writer) { + ReserveStreamExecuteRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a LoadDataStreamResponse message from the specified reader or buffer. + * Decodes a ReserveStreamExecuteRequest message from the specified reader or buffer. * @function decode - * @memberof query.LoadDataStreamResponse + * @memberof query.ReserveStreamExecuteRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {query.LoadDataStreamResponse} LoadDataStreamResponse + * @returns {query.ReserveStreamExecuteRequest} ReserveStreamExecuteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - LoadDataStreamResponse.decode = function decode(reader, length) { + ReserveStreamExecuteRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.LoadDataStreamResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.ReserveStreamExecuteRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.result = $root.query.QueryResult.decode(reader, reader.uint32()); + message.effective_caller_id = $root.vtrpc.CallerID.decode(reader, reader.uint32()); break; } - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }; + case 2: { + message.immediate_caller_id = $root.query.VTGateCallerID.decode(reader, reader.uint32()); + break; + } + case 3: { + message.target = $root.query.Target.decode(reader, reader.uint32()); + break; + } + case 4: { + message.query = $root.query.BoundQuery.decode(reader, reader.uint32()); + break; + } + case 5: { + message.options = $root.query.ExecuteOptions.decode(reader, reader.uint32()); + break; + } + case 6: { + message.transaction_id = reader.int64(); + break; + } + case 7: { + if (!(message.pre_queries && message.pre_queries.length)) + message.pre_queries = []; + message.pre_queries.push(reader.string()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; /** - * Decodes a LoadDataStreamResponse message from the specified reader or buffer, length delimited. + * Decodes a ReserveStreamExecuteRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof query.LoadDataStreamResponse + * @memberof query.ReserveStreamExecuteRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {query.LoadDataStreamResponse} LoadDataStreamResponse + * @returns {query.ReserveStreamExecuteRequest} ReserveStreamExecuteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - LoadDataStreamResponse.decodeDelimited = function decodeDelimited(reader) { + ReserveStreamExecuteRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a LoadDataStreamResponse message. + * Verifies a ReserveStreamExecuteRequest message. * @function verify - * @memberof query.LoadDataStreamResponse + * @memberof query.ReserveStreamExecuteRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - LoadDataStreamResponse.verify = function verify(message) { + ReserveStreamExecuteRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.result != null && message.hasOwnProperty("result")) { - let error = $root.query.QueryResult.verify(message.result); + if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { + let error = $root.vtrpc.CallerID.verify(message.effective_caller_id); if (error) - return "result." + error; + return "effective_caller_id." + error; + } + if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) { + let error = $root.query.VTGateCallerID.verify(message.immediate_caller_id); + if (error) + return "immediate_caller_id." + error; + } + if (message.target != null && message.hasOwnProperty("target")) { + let error = $root.query.Target.verify(message.target); + if (error) + return "target." + error; + } + if (message.query != null && message.hasOwnProperty("query")) { + let error = $root.query.BoundQuery.verify(message.query); + if (error) + return "query." + error; + } + if (message.options != null && message.hasOwnProperty("options")) { + let error = $root.query.ExecuteOptions.verify(message.options); + if (error) + return "options." + error; + } + if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) + if (!$util.isInteger(message.transaction_id) && !(message.transaction_id && $util.isInteger(message.transaction_id.low) && $util.isInteger(message.transaction_id.high))) + return "transaction_id: integer|Long expected"; + if (message.pre_queries != null && message.hasOwnProperty("pre_queries")) { + if (!Array.isArray(message.pre_queries)) + return "pre_queries: array expected"; + for (let i = 0; i < message.pre_queries.length; ++i) + if (!$util.isString(message.pre_queries[i])) + return "pre_queries: string[] expected"; } return null; }; /** - * Creates a LoadDataStreamResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ReserveStreamExecuteRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof query.LoadDataStreamResponse + * @memberof query.ReserveStreamExecuteRequest * @static * @param {Object.} object Plain object - * @returns {query.LoadDataStreamResponse} LoadDataStreamResponse + * @returns {query.ReserveStreamExecuteRequest} ReserveStreamExecuteRequest */ - LoadDataStreamResponse.fromObject = function fromObject(object) { - if (object instanceof $root.query.LoadDataStreamResponse) + ReserveStreamExecuteRequest.fromObject = function fromObject(object) { + if (object instanceof $root.query.ReserveStreamExecuteRequest) return object; - let message = new $root.query.LoadDataStreamResponse(); - if (object.result != null) { - if (typeof object.result !== "object") - throw TypeError(".query.LoadDataStreamResponse.result: object expected"); - message.result = $root.query.QueryResult.fromObject(object.result); + let message = new $root.query.ReserveStreamExecuteRequest(); + if (object.effective_caller_id != null) { + if (typeof object.effective_caller_id !== "object") + throw TypeError(".query.ReserveStreamExecuteRequest.effective_caller_id: object expected"); + message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); + } + if (object.immediate_caller_id != null) { + if (typeof object.immediate_caller_id !== "object") + throw TypeError(".query.ReserveStreamExecuteRequest.immediate_caller_id: object expected"); + message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); + } + if (object.target != null) { + if (typeof object.target !== "object") + throw TypeError(".query.ReserveStreamExecuteRequest.target: object expected"); + message.target = $root.query.Target.fromObject(object.target); + } + if (object.query != null) { + if (typeof object.query !== "object") + throw TypeError(".query.ReserveStreamExecuteRequest.query: object expected"); + message.query = $root.query.BoundQuery.fromObject(object.query); + } + if (object.options != null) { + if (typeof object.options !== "object") + throw TypeError(".query.ReserveStreamExecuteRequest.options: object expected"); + message.options = $root.query.ExecuteOptions.fromObject(object.options); + } + if (object.transaction_id != null) + if ($util.Long) + (message.transaction_id = $util.Long.fromValue(object.transaction_id)).unsigned = false; + else if (typeof object.transaction_id === "string") + message.transaction_id = parseInt(object.transaction_id, 10); + else if (typeof object.transaction_id === "number") + message.transaction_id = object.transaction_id; + else if (typeof object.transaction_id === "object") + message.transaction_id = new $util.LongBits(object.transaction_id.low >>> 0, object.transaction_id.high >>> 0).toNumber(); + if (object.pre_queries) { + if (!Array.isArray(object.pre_queries)) + throw TypeError(".query.ReserveStreamExecuteRequest.pre_queries: array expected"); + message.pre_queries = []; + for (let i = 0; i < object.pre_queries.length; ++i) + message.pre_queries[i] = String(object.pre_queries[i]); } return message; }; /** - * Creates a plain object from a LoadDataStreamResponse message. Also converts values to other types if specified. + * Creates a plain object from a ReserveStreamExecuteRequest message. Also converts values to other types if specified. * @function toObject - * @memberof query.LoadDataStreamResponse + * @memberof query.ReserveStreamExecuteRequest * @static - * @param {query.LoadDataStreamResponse} message LoadDataStreamResponse + * @param {query.ReserveStreamExecuteRequest} message ReserveStreamExecuteRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - LoadDataStreamResponse.toObject = function toObject(message, options) { + ReserveStreamExecuteRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) - object.result = null; - if (message.result != null && message.hasOwnProperty("result")) - object.result = $root.query.QueryResult.toObject(message.result, options); + if (options.arrays || options.defaults) + object.pre_queries = []; + if (options.defaults) { + object.effective_caller_id = null; + object.immediate_caller_id = null; + object.target = null; + object.query = null; + object.options = null; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.transaction_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.transaction_id = options.longs === String ? "0" : 0; + } + if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) + object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); + if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) + object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); + if (message.target != null && message.hasOwnProperty("target")) + object.target = $root.query.Target.toObject(message.target, options); + if (message.query != null && message.hasOwnProperty("query")) + object.query = $root.query.BoundQuery.toObject(message.query, options); + if (message.options != null && message.hasOwnProperty("options")) + object.options = $root.query.ExecuteOptions.toObject(message.options, options); + if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) + if (typeof message.transaction_id === "number") + object.transaction_id = options.longs === String ? String(message.transaction_id) : message.transaction_id; + else + object.transaction_id = options.longs === String ? $util.Long.prototype.toString.call(message.transaction_id) : options.longs === Number ? new $util.LongBits(message.transaction_id.low >>> 0, message.transaction_id.high >>> 0).toNumber() : message.transaction_id; + if (message.pre_queries && message.pre_queries.length) { + object.pre_queries = []; + for (let j = 0; j < message.pre_queries.length; ++j) + object.pre_queries[j] = message.pre_queries[j]; + } return object; }; /** - * Converts this LoadDataStreamResponse to JSON. + * Converts this ReserveStreamExecuteRequest to JSON. * @function toJSON - * @memberof query.LoadDataStreamResponse + * @memberof query.ReserveStreamExecuteRequest * @instance * @returns {Object.} JSON object */ - LoadDataStreamResponse.prototype.toJSON = function toJSON() { + ReserveStreamExecuteRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for LoadDataStreamResponse + * Gets the default type url for ReserveStreamExecuteRequest * @function getTypeUrl - * @memberof query.LoadDataStreamResponse + * @memberof query.ReserveStreamExecuteRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - LoadDataStreamResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ReserveStreamExecuteRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/query.LoadDataStreamResponse"; + return typeUrlPrefix + "/query.ReserveStreamExecuteRequest"; }; - return LoadDataStreamResponse; + return ReserveStreamExecuteRequest; })(); - return query; -})(); - -export const replicationdata = $root.replicationdata = (() => { - - /** - * Namespace replicationdata. - * @exports replicationdata - * @namespace - */ - const replicationdata = {}; - - replicationdata.Status = (function() { + query.ReserveStreamExecuteResponse = (function() { /** - * Properties of a Status. - * @memberof replicationdata - * @interface IStatus - * @property {string|null} [position] Status position - * @property {number|null} [replication_lag_seconds] Status replication_lag_seconds - * @property {string|null} [source_host] Status source_host - * @property {number|null} [source_port] Status source_port - * @property {number|null} [connect_retry] Status connect_retry - * @property {string|null} [relay_log_position] Status relay_log_position - * @property {string|null} [file_position] Status file_position - * @property {string|null} [relay_log_source_binlog_equivalent_position] Status relay_log_source_binlog_equivalent_position - * @property {number|null} [source_server_id] Status source_server_id - * @property {string|null} [source_uuid] Status source_uuid - * @property {number|null} [io_state] Status io_state - * @property {string|null} [last_io_error] Status last_io_error - * @property {number|null} [sql_state] Status sql_state - * @property {string|null} [last_sql_error] Status last_sql_error - * @property {string|null} [relay_log_file_position] Status relay_log_file_position - * @property {string|null} [source_user] Status source_user - * @property {number|null} [sql_delay] Status sql_delay - * @property {boolean|null} [auto_position] Status auto_position - * @property {boolean|null} [using_gtid] Status using_gtid - * @property {boolean|null} [has_replication_filters] Status has_replication_filters - * @property {boolean|null} [ssl_allowed] Status ssl_allowed - * @property {boolean|null} [replication_lag_unknown] Status replication_lag_unknown + * Properties of a ReserveStreamExecuteResponse. + * @memberof query + * @interface IReserveStreamExecuteResponse + * @property {vtrpc.IRPCError|null} [error] ReserveStreamExecuteResponse error + * @property {query.IQueryResult|null} [result] ReserveStreamExecuteResponse result + * @property {number|Long|null} [reserved_id] ReserveStreamExecuteResponse reserved_id + * @property {topodata.ITabletAlias|null} [tablet_alias] ReserveStreamExecuteResponse tablet_alias */ /** - * Constructs a new Status. - * @memberof replicationdata - * @classdesc Represents a Status. - * @implements IStatus + * Constructs a new ReserveStreamExecuteResponse. + * @memberof query + * @classdesc Represents a ReserveStreamExecuteResponse. + * @implements IReserveStreamExecuteResponse * @constructor - * @param {replicationdata.IStatus=} [properties] Properties to set + * @param {query.IReserveStreamExecuteResponse=} [properties] Properties to set */ - function Status(properties) { + function ReserveStreamExecuteResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -92087,369 +92914,472 @@ export const replicationdata = $root.replicationdata = (() => { } /** - * Status position. - * @member {string} position - * @memberof replicationdata.Status - * @instance - */ - Status.prototype.position = ""; - - /** - * Status replication_lag_seconds. - * @member {number} replication_lag_seconds - * @memberof replicationdata.Status - * @instance - */ - Status.prototype.replication_lag_seconds = 0; - - /** - * Status source_host. - * @member {string} source_host - * @memberof replicationdata.Status + * ReserveStreamExecuteResponse error. + * @member {vtrpc.IRPCError|null|undefined} error + * @memberof query.ReserveStreamExecuteResponse * @instance */ - Status.prototype.source_host = ""; + ReserveStreamExecuteResponse.prototype.error = null; /** - * Status source_port. - * @member {number} source_port - * @memberof replicationdata.Status + * ReserveStreamExecuteResponse result. + * @member {query.IQueryResult|null|undefined} result + * @memberof query.ReserveStreamExecuteResponse * @instance */ - Status.prototype.source_port = 0; + ReserveStreamExecuteResponse.prototype.result = null; /** - * Status connect_retry. - * @member {number} connect_retry - * @memberof replicationdata.Status + * ReserveStreamExecuteResponse reserved_id. + * @member {number|Long} reserved_id + * @memberof query.ReserveStreamExecuteResponse * @instance */ - Status.prototype.connect_retry = 0; + ReserveStreamExecuteResponse.prototype.reserved_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; /** - * Status relay_log_position. - * @member {string} relay_log_position - * @memberof replicationdata.Status + * ReserveStreamExecuteResponse tablet_alias. + * @member {topodata.ITabletAlias|null|undefined} tablet_alias + * @memberof query.ReserveStreamExecuteResponse * @instance */ - Status.prototype.relay_log_position = ""; + ReserveStreamExecuteResponse.prototype.tablet_alias = null; /** - * Status file_position. - * @member {string} file_position - * @memberof replicationdata.Status - * @instance + * Creates a new ReserveStreamExecuteResponse instance using the specified properties. + * @function create + * @memberof query.ReserveStreamExecuteResponse + * @static + * @param {query.IReserveStreamExecuteResponse=} [properties] Properties to set + * @returns {query.ReserveStreamExecuteResponse} ReserveStreamExecuteResponse instance */ - Status.prototype.file_position = ""; + ReserveStreamExecuteResponse.create = function create(properties) { + return new ReserveStreamExecuteResponse(properties); + }; /** - * Status relay_log_source_binlog_equivalent_position. - * @member {string} relay_log_source_binlog_equivalent_position - * @memberof replicationdata.Status - * @instance + * Encodes the specified ReserveStreamExecuteResponse message. Does not implicitly {@link query.ReserveStreamExecuteResponse.verify|verify} messages. + * @function encode + * @memberof query.ReserveStreamExecuteResponse + * @static + * @param {query.IReserveStreamExecuteResponse} message ReserveStreamExecuteResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer */ - Status.prototype.relay_log_source_binlog_equivalent_position = ""; + ReserveStreamExecuteResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.error != null && Object.hasOwnProperty.call(message, "error")) + $root.vtrpc.RPCError.encode(message.error, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.result != null && Object.hasOwnProperty.call(message, "result")) + $root.query.QueryResult.encode(message.result, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.reserved_id != null && Object.hasOwnProperty.call(message, "reserved_id")) + writer.uint32(/* id 3, wireType 0 =*/24).int64(message.reserved_id); + if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) + $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + return writer; + }; /** - * Status source_server_id. - * @member {number} source_server_id - * @memberof replicationdata.Status - * @instance + * Encodes the specified ReserveStreamExecuteResponse message, length delimited. Does not implicitly {@link query.ReserveStreamExecuteResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof query.ReserveStreamExecuteResponse + * @static + * @param {query.IReserveStreamExecuteResponse} message ReserveStreamExecuteResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer */ - Status.prototype.source_server_id = 0; + ReserveStreamExecuteResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; /** - * Status source_uuid. - * @member {string} source_uuid - * @memberof replicationdata.Status - * @instance + * Decodes a ReserveStreamExecuteResponse message from the specified reader or buffer. + * @function decode + * @memberof query.ReserveStreamExecuteResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {query.ReserveStreamExecuteResponse} ReserveStreamExecuteResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Status.prototype.source_uuid = ""; + ReserveStreamExecuteResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.ReserveStreamExecuteResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.error = $root.vtrpc.RPCError.decode(reader, reader.uint32()); + break; + } + case 2: { + message.result = $root.query.QueryResult.decode(reader, reader.uint32()); + break; + } + case 3: { + message.reserved_id = reader.int64(); + break; + } + case 4: { + message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; /** - * Status io_state. - * @member {number} io_state - * @memberof replicationdata.Status - * @instance + * Decodes a ReserveStreamExecuteResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof query.ReserveStreamExecuteResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {query.ReserveStreamExecuteResponse} ReserveStreamExecuteResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Status.prototype.io_state = 0; + ReserveStreamExecuteResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; /** - * Status last_io_error. - * @member {string} last_io_error - * @memberof replicationdata.Status - * @instance + * Verifies a ReserveStreamExecuteResponse message. + * @function verify + * @memberof query.ReserveStreamExecuteResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - Status.prototype.last_io_error = ""; + ReserveStreamExecuteResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.error != null && message.hasOwnProperty("error")) { + let error = $root.vtrpc.RPCError.verify(message.error); + if (error) + return "error." + error; + } + if (message.result != null && message.hasOwnProperty("result")) { + let error = $root.query.QueryResult.verify(message.result); + if (error) + return "result." + error; + } + if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) + if (!$util.isInteger(message.reserved_id) && !(message.reserved_id && $util.isInteger(message.reserved_id.low) && $util.isInteger(message.reserved_id.high))) + return "reserved_id: integer|Long expected"; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { + let error = $root.topodata.TabletAlias.verify(message.tablet_alias); + if (error) + return "tablet_alias." + error; + } + return null; + }; /** - * Status sql_state. - * @member {number} sql_state - * @memberof replicationdata.Status - * @instance + * Creates a ReserveStreamExecuteResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof query.ReserveStreamExecuteResponse + * @static + * @param {Object.} object Plain object + * @returns {query.ReserveStreamExecuteResponse} ReserveStreamExecuteResponse */ - Status.prototype.sql_state = 0; + ReserveStreamExecuteResponse.fromObject = function fromObject(object) { + if (object instanceof $root.query.ReserveStreamExecuteResponse) + return object; + let message = new $root.query.ReserveStreamExecuteResponse(); + if (object.error != null) { + if (typeof object.error !== "object") + throw TypeError(".query.ReserveStreamExecuteResponse.error: object expected"); + message.error = $root.vtrpc.RPCError.fromObject(object.error); + } + if (object.result != null) { + if (typeof object.result !== "object") + throw TypeError(".query.ReserveStreamExecuteResponse.result: object expected"); + message.result = $root.query.QueryResult.fromObject(object.result); + } + if (object.reserved_id != null) + if ($util.Long) + (message.reserved_id = $util.Long.fromValue(object.reserved_id)).unsigned = false; + else if (typeof object.reserved_id === "string") + message.reserved_id = parseInt(object.reserved_id, 10); + else if (typeof object.reserved_id === "number") + message.reserved_id = object.reserved_id; + else if (typeof object.reserved_id === "object") + message.reserved_id = new $util.LongBits(object.reserved_id.low >>> 0, object.reserved_id.high >>> 0).toNumber(); + if (object.tablet_alias != null) { + if (typeof object.tablet_alias !== "object") + throw TypeError(".query.ReserveStreamExecuteResponse.tablet_alias: object expected"); + message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); + } + return message; + }; /** - * Status last_sql_error. - * @member {string} last_sql_error - * @memberof replicationdata.Status - * @instance + * Creates a plain object from a ReserveStreamExecuteResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof query.ReserveStreamExecuteResponse + * @static + * @param {query.ReserveStreamExecuteResponse} message ReserveStreamExecuteResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object */ - Status.prototype.last_sql_error = ""; - - /** - * Status relay_log_file_position. - * @member {string} relay_log_file_position - * @memberof replicationdata.Status + ReserveStreamExecuteResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.error = null; + object.result = null; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.reserved_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.reserved_id = options.longs === String ? "0" : 0; + object.tablet_alias = null; + } + if (message.error != null && message.hasOwnProperty("error")) + object.error = $root.vtrpc.RPCError.toObject(message.error, options); + if (message.result != null && message.hasOwnProperty("result")) + object.result = $root.query.QueryResult.toObject(message.result, options); + if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) + if (typeof message.reserved_id === "number") + object.reserved_id = options.longs === String ? String(message.reserved_id) : message.reserved_id; + else + object.reserved_id = options.longs === String ? $util.Long.prototype.toString.call(message.reserved_id) : options.longs === Number ? new $util.LongBits(message.reserved_id.low >>> 0, message.reserved_id.high >>> 0).toNumber() : message.reserved_id; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) + object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); + return object; + }; + + /** + * Converts this ReserveStreamExecuteResponse to JSON. + * @function toJSON + * @memberof query.ReserveStreamExecuteResponse * @instance + * @returns {Object.} JSON object */ - Status.prototype.relay_log_file_position = ""; + ReserveStreamExecuteResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; /** - * Status source_user. - * @member {string} source_user - * @memberof replicationdata.Status + * Gets the default type url for ReserveStreamExecuteResponse + * @function getTypeUrl + * @memberof query.ReserveStreamExecuteResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ReserveStreamExecuteResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/query.ReserveStreamExecuteResponse"; + }; + + return ReserveStreamExecuteResponse; + })(); + + query.ReserveBeginExecuteRequest = (function() { + + /** + * Properties of a ReserveBeginExecuteRequest. + * @memberof query + * @interface IReserveBeginExecuteRequest + * @property {vtrpc.ICallerID|null} [effective_caller_id] ReserveBeginExecuteRequest effective_caller_id + * @property {query.IVTGateCallerID|null} [immediate_caller_id] ReserveBeginExecuteRequest immediate_caller_id + * @property {query.ITarget|null} [target] ReserveBeginExecuteRequest target + * @property {query.IBoundQuery|null} [query] ReserveBeginExecuteRequest query + * @property {query.IExecuteOptions|null} [options] ReserveBeginExecuteRequest options + * @property {Array.|null} [pre_queries] ReserveBeginExecuteRequest pre_queries + * @property {Array.|null} [post_begin_queries] ReserveBeginExecuteRequest post_begin_queries + */ + + /** + * Constructs a new ReserveBeginExecuteRequest. + * @memberof query + * @classdesc Represents a ReserveBeginExecuteRequest. + * @implements IReserveBeginExecuteRequest + * @constructor + * @param {query.IReserveBeginExecuteRequest=} [properties] Properties to set + */ + function ReserveBeginExecuteRequest(properties) { + this.pre_queries = []; + this.post_begin_queries = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ReserveBeginExecuteRequest effective_caller_id. + * @member {vtrpc.ICallerID|null|undefined} effective_caller_id + * @memberof query.ReserveBeginExecuteRequest * @instance */ - Status.prototype.source_user = ""; + ReserveBeginExecuteRequest.prototype.effective_caller_id = null; /** - * Status sql_delay. - * @member {number} sql_delay - * @memberof replicationdata.Status + * ReserveBeginExecuteRequest immediate_caller_id. + * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id + * @memberof query.ReserveBeginExecuteRequest * @instance */ - Status.prototype.sql_delay = 0; + ReserveBeginExecuteRequest.prototype.immediate_caller_id = null; /** - * Status auto_position. - * @member {boolean} auto_position - * @memberof replicationdata.Status + * ReserveBeginExecuteRequest target. + * @member {query.ITarget|null|undefined} target + * @memberof query.ReserveBeginExecuteRequest * @instance */ - Status.prototype.auto_position = false; + ReserveBeginExecuteRequest.prototype.target = null; /** - * Status using_gtid. - * @member {boolean} using_gtid - * @memberof replicationdata.Status + * ReserveBeginExecuteRequest query. + * @member {query.IBoundQuery|null|undefined} query + * @memberof query.ReserveBeginExecuteRequest * @instance */ - Status.prototype.using_gtid = false; + ReserveBeginExecuteRequest.prototype.query = null; /** - * Status has_replication_filters. - * @member {boolean} has_replication_filters - * @memberof replicationdata.Status + * ReserveBeginExecuteRequest options. + * @member {query.IExecuteOptions|null|undefined} options + * @memberof query.ReserveBeginExecuteRequest * @instance */ - Status.prototype.has_replication_filters = false; + ReserveBeginExecuteRequest.prototype.options = null; /** - * Status ssl_allowed. - * @member {boolean} ssl_allowed - * @memberof replicationdata.Status + * ReserveBeginExecuteRequest pre_queries. + * @member {Array.} pre_queries + * @memberof query.ReserveBeginExecuteRequest * @instance */ - Status.prototype.ssl_allowed = false; + ReserveBeginExecuteRequest.prototype.pre_queries = $util.emptyArray; /** - * Status replication_lag_unknown. - * @member {boolean} replication_lag_unknown - * @memberof replicationdata.Status + * ReserveBeginExecuteRequest post_begin_queries. + * @member {Array.} post_begin_queries + * @memberof query.ReserveBeginExecuteRequest * @instance */ - Status.prototype.replication_lag_unknown = false; + ReserveBeginExecuteRequest.prototype.post_begin_queries = $util.emptyArray; /** - * Creates a new Status instance using the specified properties. + * Creates a new ReserveBeginExecuteRequest instance using the specified properties. * @function create - * @memberof replicationdata.Status + * @memberof query.ReserveBeginExecuteRequest * @static - * @param {replicationdata.IStatus=} [properties] Properties to set - * @returns {replicationdata.Status} Status instance + * @param {query.IReserveBeginExecuteRequest=} [properties] Properties to set + * @returns {query.ReserveBeginExecuteRequest} ReserveBeginExecuteRequest instance */ - Status.create = function create(properties) { - return new Status(properties); + ReserveBeginExecuteRequest.create = function create(properties) { + return new ReserveBeginExecuteRequest(properties); }; /** - * Encodes the specified Status message. Does not implicitly {@link replicationdata.Status.verify|verify} messages. + * Encodes the specified ReserveBeginExecuteRequest message. Does not implicitly {@link query.ReserveBeginExecuteRequest.verify|verify} messages. * @function encode - * @memberof replicationdata.Status + * @memberof query.ReserveBeginExecuteRequest * @static - * @param {replicationdata.IStatus} message Status message or plain object to encode + * @param {query.IReserveBeginExecuteRequest} message ReserveBeginExecuteRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Status.encode = function encode(message, writer) { + ReserveBeginExecuteRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.position != null && Object.hasOwnProperty.call(message, "position")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.position); - if (message.replication_lag_seconds != null && Object.hasOwnProperty.call(message, "replication_lag_seconds")) - writer.uint32(/* id 4, wireType 0 =*/32).uint32(message.replication_lag_seconds); - if (message.source_host != null && Object.hasOwnProperty.call(message, "source_host")) - writer.uint32(/* id 5, wireType 2 =*/42).string(message.source_host); - if (message.source_port != null && Object.hasOwnProperty.call(message, "source_port")) - writer.uint32(/* id 6, wireType 0 =*/48).int32(message.source_port); - if (message.connect_retry != null && Object.hasOwnProperty.call(message, "connect_retry")) - writer.uint32(/* id 7, wireType 0 =*/56).int32(message.connect_retry); - if (message.relay_log_position != null && Object.hasOwnProperty.call(message, "relay_log_position")) - writer.uint32(/* id 8, wireType 2 =*/66).string(message.relay_log_position); - if (message.file_position != null && Object.hasOwnProperty.call(message, "file_position")) - writer.uint32(/* id 9, wireType 2 =*/74).string(message.file_position); - if (message.relay_log_source_binlog_equivalent_position != null && Object.hasOwnProperty.call(message, "relay_log_source_binlog_equivalent_position")) - writer.uint32(/* id 10, wireType 2 =*/82).string(message.relay_log_source_binlog_equivalent_position); - if (message.source_server_id != null && Object.hasOwnProperty.call(message, "source_server_id")) - writer.uint32(/* id 11, wireType 0 =*/88).uint32(message.source_server_id); - if (message.source_uuid != null && Object.hasOwnProperty.call(message, "source_uuid")) - writer.uint32(/* id 12, wireType 2 =*/98).string(message.source_uuid); - if (message.io_state != null && Object.hasOwnProperty.call(message, "io_state")) - writer.uint32(/* id 13, wireType 0 =*/104).int32(message.io_state); - if (message.last_io_error != null && Object.hasOwnProperty.call(message, "last_io_error")) - writer.uint32(/* id 14, wireType 2 =*/114).string(message.last_io_error); - if (message.sql_state != null && Object.hasOwnProperty.call(message, "sql_state")) - writer.uint32(/* id 15, wireType 0 =*/120).int32(message.sql_state); - if (message.last_sql_error != null && Object.hasOwnProperty.call(message, "last_sql_error")) - writer.uint32(/* id 16, wireType 2 =*/130).string(message.last_sql_error); - if (message.relay_log_file_position != null && Object.hasOwnProperty.call(message, "relay_log_file_position")) - writer.uint32(/* id 17, wireType 2 =*/138).string(message.relay_log_file_position); - if (message.source_user != null && Object.hasOwnProperty.call(message, "source_user")) - writer.uint32(/* id 18, wireType 2 =*/146).string(message.source_user); - if (message.sql_delay != null && Object.hasOwnProperty.call(message, "sql_delay")) - writer.uint32(/* id 19, wireType 0 =*/152).uint32(message.sql_delay); - if (message.auto_position != null && Object.hasOwnProperty.call(message, "auto_position")) - writer.uint32(/* id 20, wireType 0 =*/160).bool(message.auto_position); - if (message.using_gtid != null && Object.hasOwnProperty.call(message, "using_gtid")) - writer.uint32(/* id 21, wireType 0 =*/168).bool(message.using_gtid); - if (message.has_replication_filters != null && Object.hasOwnProperty.call(message, "has_replication_filters")) - writer.uint32(/* id 22, wireType 0 =*/176).bool(message.has_replication_filters); - if (message.ssl_allowed != null && Object.hasOwnProperty.call(message, "ssl_allowed")) - writer.uint32(/* id 23, wireType 0 =*/184).bool(message.ssl_allowed); - if (message.replication_lag_unknown != null && Object.hasOwnProperty.call(message, "replication_lag_unknown")) - writer.uint32(/* id 24, wireType 0 =*/192).bool(message.replication_lag_unknown); + if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) + $root.vtrpc.CallerID.encode(message.effective_caller_id, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.immediate_caller_id != null && Object.hasOwnProperty.call(message, "immediate_caller_id")) + $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.target != null && Object.hasOwnProperty.call(message, "target")) + $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.query != null && Object.hasOwnProperty.call(message, "query")) + $root.query.BoundQuery.encode(message.query, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.options != null && Object.hasOwnProperty.call(message, "options")) + $root.query.ExecuteOptions.encode(message.options, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); + if (message.pre_queries != null && message.pre_queries.length) + for (let i = 0; i < message.pre_queries.length; ++i) + writer.uint32(/* id 6, wireType 2 =*/50).string(message.pre_queries[i]); + if (message.post_begin_queries != null && message.post_begin_queries.length) + for (let i = 0; i < message.post_begin_queries.length; ++i) + writer.uint32(/* id 7, wireType 2 =*/58).string(message.post_begin_queries[i]); return writer; }; /** - * Encodes the specified Status message, length delimited. Does not implicitly {@link replicationdata.Status.verify|verify} messages. + * Encodes the specified ReserveBeginExecuteRequest message, length delimited. Does not implicitly {@link query.ReserveBeginExecuteRequest.verify|verify} messages. * @function encodeDelimited - * @memberof replicationdata.Status + * @memberof query.ReserveBeginExecuteRequest * @static - * @param {replicationdata.IStatus} message Status message or plain object to encode + * @param {query.IReserveBeginExecuteRequest} message ReserveBeginExecuteRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Status.encodeDelimited = function encodeDelimited(message, writer) { + ReserveBeginExecuteRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a Status message from the specified reader or buffer. + * Decodes a ReserveBeginExecuteRequest message from the specified reader or buffer. * @function decode - * @memberof replicationdata.Status + * @memberof query.ReserveBeginExecuteRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {replicationdata.Status} Status + * @returns {query.ReserveBeginExecuteRequest} ReserveBeginExecuteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Status.decode = function decode(reader, length) { + ReserveBeginExecuteRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.replicationdata.Status(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.ReserveBeginExecuteRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.position = reader.string(); - break; - } - case 4: { - message.replication_lag_seconds = reader.uint32(); - break; - } - case 5: { - message.source_host = reader.string(); - break; - } - case 6: { - message.source_port = reader.int32(); - break; - } - case 7: { - message.connect_retry = reader.int32(); - break; - } - case 8: { - message.relay_log_position = reader.string(); - break; - } - case 9: { - message.file_position = reader.string(); - break; - } - case 10: { - message.relay_log_source_binlog_equivalent_position = reader.string(); - break; - } - case 11: { - message.source_server_id = reader.uint32(); - break; - } - case 12: { - message.source_uuid = reader.string(); - break; - } - case 13: { - message.io_state = reader.int32(); - break; - } - case 14: { - message.last_io_error = reader.string(); - break; - } - case 15: { - message.sql_state = reader.int32(); - break; - } - case 16: { - message.last_sql_error = reader.string(); - break; - } - case 17: { - message.relay_log_file_position = reader.string(); - break; - } - case 18: { - message.source_user = reader.string(); + message.effective_caller_id = $root.vtrpc.CallerID.decode(reader, reader.uint32()); break; } - case 19: { - message.sql_delay = reader.uint32(); + case 2: { + message.immediate_caller_id = $root.query.VTGateCallerID.decode(reader, reader.uint32()); break; } - case 20: { - message.auto_position = reader.bool(); + case 3: { + message.target = $root.query.Target.decode(reader, reader.uint32()); break; } - case 21: { - message.using_gtid = reader.bool(); + case 4: { + message.query = $root.query.BoundQuery.decode(reader, reader.uint32()); break; } - case 22: { - message.has_replication_filters = reader.bool(); + case 5: { + message.options = $root.query.ExecuteOptions.decode(reader, reader.uint32()); break; } - case 23: { - message.ssl_allowed = reader.bool(); + case 6: { + if (!(message.pre_queries && message.pre_queries.length)) + message.pre_queries = []; + message.pre_queries.push(reader.string()); break; } - case 24: { - message.replication_lag_unknown = reader.bool(); + case 7: { + if (!(message.post_begin_queries && message.post_begin_queries.length)) + message.post_begin_queries = []; + message.post_begin_queries.push(reader.string()); break; } default: @@ -92461,292 +93391,227 @@ export const replicationdata = $root.replicationdata = (() => { }; /** - * Decodes a Status message from the specified reader or buffer, length delimited. + * Decodes a ReserveBeginExecuteRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof replicationdata.Status + * @memberof query.ReserveBeginExecuteRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {replicationdata.Status} Status + * @returns {query.ReserveBeginExecuteRequest} ReserveBeginExecuteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Status.decodeDelimited = function decodeDelimited(reader) { + ReserveBeginExecuteRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a Status message. + * Verifies a ReserveBeginExecuteRequest message. * @function verify - * @memberof replicationdata.Status + * @memberof query.ReserveBeginExecuteRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - Status.verify = function verify(message) { + ReserveBeginExecuteRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.position != null && message.hasOwnProperty("position")) - if (!$util.isString(message.position)) - return "position: string expected"; - if (message.replication_lag_seconds != null && message.hasOwnProperty("replication_lag_seconds")) - if (!$util.isInteger(message.replication_lag_seconds)) - return "replication_lag_seconds: integer expected"; - if (message.source_host != null && message.hasOwnProperty("source_host")) - if (!$util.isString(message.source_host)) - return "source_host: string expected"; - if (message.source_port != null && message.hasOwnProperty("source_port")) - if (!$util.isInteger(message.source_port)) - return "source_port: integer expected"; - if (message.connect_retry != null && message.hasOwnProperty("connect_retry")) - if (!$util.isInteger(message.connect_retry)) - return "connect_retry: integer expected"; - if (message.relay_log_position != null && message.hasOwnProperty("relay_log_position")) - if (!$util.isString(message.relay_log_position)) - return "relay_log_position: string expected"; - if (message.file_position != null && message.hasOwnProperty("file_position")) - if (!$util.isString(message.file_position)) - return "file_position: string expected"; - if (message.relay_log_source_binlog_equivalent_position != null && message.hasOwnProperty("relay_log_source_binlog_equivalent_position")) - if (!$util.isString(message.relay_log_source_binlog_equivalent_position)) - return "relay_log_source_binlog_equivalent_position: string expected"; - if (message.source_server_id != null && message.hasOwnProperty("source_server_id")) - if (!$util.isInteger(message.source_server_id)) - return "source_server_id: integer expected"; - if (message.source_uuid != null && message.hasOwnProperty("source_uuid")) - if (!$util.isString(message.source_uuid)) - return "source_uuid: string expected"; - if (message.io_state != null && message.hasOwnProperty("io_state")) - if (!$util.isInteger(message.io_state)) - return "io_state: integer expected"; - if (message.last_io_error != null && message.hasOwnProperty("last_io_error")) - if (!$util.isString(message.last_io_error)) - return "last_io_error: string expected"; - if (message.sql_state != null && message.hasOwnProperty("sql_state")) - if (!$util.isInteger(message.sql_state)) - return "sql_state: integer expected"; - if (message.last_sql_error != null && message.hasOwnProperty("last_sql_error")) - if (!$util.isString(message.last_sql_error)) - return "last_sql_error: string expected"; - if (message.relay_log_file_position != null && message.hasOwnProperty("relay_log_file_position")) - if (!$util.isString(message.relay_log_file_position)) - return "relay_log_file_position: string expected"; - if (message.source_user != null && message.hasOwnProperty("source_user")) - if (!$util.isString(message.source_user)) - return "source_user: string expected"; - if (message.sql_delay != null && message.hasOwnProperty("sql_delay")) - if (!$util.isInteger(message.sql_delay)) - return "sql_delay: integer expected"; - if (message.auto_position != null && message.hasOwnProperty("auto_position")) - if (typeof message.auto_position !== "boolean") - return "auto_position: boolean expected"; - if (message.using_gtid != null && message.hasOwnProperty("using_gtid")) - if (typeof message.using_gtid !== "boolean") - return "using_gtid: boolean expected"; - if (message.has_replication_filters != null && message.hasOwnProperty("has_replication_filters")) - if (typeof message.has_replication_filters !== "boolean") - return "has_replication_filters: boolean expected"; - if (message.ssl_allowed != null && message.hasOwnProperty("ssl_allowed")) - if (typeof message.ssl_allowed !== "boolean") - return "ssl_allowed: boolean expected"; - if (message.replication_lag_unknown != null && message.hasOwnProperty("replication_lag_unknown")) - if (typeof message.replication_lag_unknown !== "boolean") - return "replication_lag_unknown: boolean expected"; + if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { + let error = $root.vtrpc.CallerID.verify(message.effective_caller_id); + if (error) + return "effective_caller_id." + error; + } + if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) { + let error = $root.query.VTGateCallerID.verify(message.immediate_caller_id); + if (error) + return "immediate_caller_id." + error; + } + if (message.target != null && message.hasOwnProperty("target")) { + let error = $root.query.Target.verify(message.target); + if (error) + return "target." + error; + } + if (message.query != null && message.hasOwnProperty("query")) { + let error = $root.query.BoundQuery.verify(message.query); + if (error) + return "query." + error; + } + if (message.options != null && message.hasOwnProperty("options")) { + let error = $root.query.ExecuteOptions.verify(message.options); + if (error) + return "options." + error; + } + if (message.pre_queries != null && message.hasOwnProperty("pre_queries")) { + if (!Array.isArray(message.pre_queries)) + return "pre_queries: array expected"; + for (let i = 0; i < message.pre_queries.length; ++i) + if (!$util.isString(message.pre_queries[i])) + return "pre_queries: string[] expected"; + } + if (message.post_begin_queries != null && message.hasOwnProperty("post_begin_queries")) { + if (!Array.isArray(message.post_begin_queries)) + return "post_begin_queries: array expected"; + for (let i = 0; i < message.post_begin_queries.length; ++i) + if (!$util.isString(message.post_begin_queries[i])) + return "post_begin_queries: string[] expected"; + } return null; }; /** - * Creates a Status message from a plain object. Also converts values to their respective internal types. + * Creates a ReserveBeginExecuteRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof replicationdata.Status + * @memberof query.ReserveBeginExecuteRequest * @static * @param {Object.} object Plain object - * @returns {replicationdata.Status} Status + * @returns {query.ReserveBeginExecuteRequest} ReserveBeginExecuteRequest */ - Status.fromObject = function fromObject(object) { - if (object instanceof $root.replicationdata.Status) + ReserveBeginExecuteRequest.fromObject = function fromObject(object) { + if (object instanceof $root.query.ReserveBeginExecuteRequest) return object; - let message = new $root.replicationdata.Status(); - if (object.position != null) - message.position = String(object.position); - if (object.replication_lag_seconds != null) - message.replication_lag_seconds = object.replication_lag_seconds >>> 0; - if (object.source_host != null) - message.source_host = String(object.source_host); - if (object.source_port != null) - message.source_port = object.source_port | 0; - if (object.connect_retry != null) - message.connect_retry = object.connect_retry | 0; - if (object.relay_log_position != null) - message.relay_log_position = String(object.relay_log_position); - if (object.file_position != null) - message.file_position = String(object.file_position); - if (object.relay_log_source_binlog_equivalent_position != null) - message.relay_log_source_binlog_equivalent_position = String(object.relay_log_source_binlog_equivalent_position); - if (object.source_server_id != null) - message.source_server_id = object.source_server_id >>> 0; - if (object.source_uuid != null) - message.source_uuid = String(object.source_uuid); - if (object.io_state != null) - message.io_state = object.io_state | 0; - if (object.last_io_error != null) - message.last_io_error = String(object.last_io_error); - if (object.sql_state != null) - message.sql_state = object.sql_state | 0; - if (object.last_sql_error != null) - message.last_sql_error = String(object.last_sql_error); - if (object.relay_log_file_position != null) - message.relay_log_file_position = String(object.relay_log_file_position); - if (object.source_user != null) - message.source_user = String(object.source_user); - if (object.sql_delay != null) - message.sql_delay = object.sql_delay >>> 0; - if (object.auto_position != null) - message.auto_position = Boolean(object.auto_position); - if (object.using_gtid != null) - message.using_gtid = Boolean(object.using_gtid); - if (object.has_replication_filters != null) - message.has_replication_filters = Boolean(object.has_replication_filters); - if (object.ssl_allowed != null) - message.ssl_allowed = Boolean(object.ssl_allowed); - if (object.replication_lag_unknown != null) - message.replication_lag_unknown = Boolean(object.replication_lag_unknown); - return message; - }; - - /** - * Creates a plain object from a Status message. Also converts values to other types if specified. + let message = new $root.query.ReserveBeginExecuteRequest(); + if (object.effective_caller_id != null) { + if (typeof object.effective_caller_id !== "object") + throw TypeError(".query.ReserveBeginExecuteRequest.effective_caller_id: object expected"); + message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); + } + if (object.immediate_caller_id != null) { + if (typeof object.immediate_caller_id !== "object") + throw TypeError(".query.ReserveBeginExecuteRequest.immediate_caller_id: object expected"); + message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); + } + if (object.target != null) { + if (typeof object.target !== "object") + throw TypeError(".query.ReserveBeginExecuteRequest.target: object expected"); + message.target = $root.query.Target.fromObject(object.target); + } + if (object.query != null) { + if (typeof object.query !== "object") + throw TypeError(".query.ReserveBeginExecuteRequest.query: object expected"); + message.query = $root.query.BoundQuery.fromObject(object.query); + } + if (object.options != null) { + if (typeof object.options !== "object") + throw TypeError(".query.ReserveBeginExecuteRequest.options: object expected"); + message.options = $root.query.ExecuteOptions.fromObject(object.options); + } + if (object.pre_queries) { + if (!Array.isArray(object.pre_queries)) + throw TypeError(".query.ReserveBeginExecuteRequest.pre_queries: array expected"); + message.pre_queries = []; + for (let i = 0; i < object.pre_queries.length; ++i) + message.pre_queries[i] = String(object.pre_queries[i]); + } + if (object.post_begin_queries) { + if (!Array.isArray(object.post_begin_queries)) + throw TypeError(".query.ReserveBeginExecuteRequest.post_begin_queries: array expected"); + message.post_begin_queries = []; + for (let i = 0; i < object.post_begin_queries.length; ++i) + message.post_begin_queries[i] = String(object.post_begin_queries[i]); + } + return message; + }; + + /** + * Creates a plain object from a ReserveBeginExecuteRequest message. Also converts values to other types if specified. * @function toObject - * @memberof replicationdata.Status + * @memberof query.ReserveBeginExecuteRequest * @static - * @param {replicationdata.Status} message Status + * @param {query.ReserveBeginExecuteRequest} message ReserveBeginExecuteRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - Status.toObject = function toObject(message, options) { + ReserveBeginExecuteRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; + if (options.arrays || options.defaults) { + object.pre_queries = []; + object.post_begin_queries = []; + } if (options.defaults) { - object.position = ""; - object.replication_lag_seconds = 0; - object.source_host = ""; - object.source_port = 0; - object.connect_retry = 0; - object.relay_log_position = ""; - object.file_position = ""; - object.relay_log_source_binlog_equivalent_position = ""; - object.source_server_id = 0; - object.source_uuid = ""; - object.io_state = 0; - object.last_io_error = ""; - object.sql_state = 0; - object.last_sql_error = ""; - object.relay_log_file_position = ""; - object.source_user = ""; - object.sql_delay = 0; - object.auto_position = false; - object.using_gtid = false; - object.has_replication_filters = false; - object.ssl_allowed = false; - object.replication_lag_unknown = false; + object.effective_caller_id = null; + object.immediate_caller_id = null; + object.target = null; + object.query = null; + object.options = null; + } + if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) + object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); + if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) + object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); + if (message.target != null && message.hasOwnProperty("target")) + object.target = $root.query.Target.toObject(message.target, options); + if (message.query != null && message.hasOwnProperty("query")) + object.query = $root.query.BoundQuery.toObject(message.query, options); + if (message.options != null && message.hasOwnProperty("options")) + object.options = $root.query.ExecuteOptions.toObject(message.options, options); + if (message.pre_queries && message.pre_queries.length) { + object.pre_queries = []; + for (let j = 0; j < message.pre_queries.length; ++j) + object.pre_queries[j] = message.pre_queries[j]; + } + if (message.post_begin_queries && message.post_begin_queries.length) { + object.post_begin_queries = []; + for (let j = 0; j < message.post_begin_queries.length; ++j) + object.post_begin_queries[j] = message.post_begin_queries[j]; } - if (message.position != null && message.hasOwnProperty("position")) - object.position = message.position; - if (message.replication_lag_seconds != null && message.hasOwnProperty("replication_lag_seconds")) - object.replication_lag_seconds = message.replication_lag_seconds; - if (message.source_host != null && message.hasOwnProperty("source_host")) - object.source_host = message.source_host; - if (message.source_port != null && message.hasOwnProperty("source_port")) - object.source_port = message.source_port; - if (message.connect_retry != null && message.hasOwnProperty("connect_retry")) - object.connect_retry = message.connect_retry; - if (message.relay_log_position != null && message.hasOwnProperty("relay_log_position")) - object.relay_log_position = message.relay_log_position; - if (message.file_position != null && message.hasOwnProperty("file_position")) - object.file_position = message.file_position; - if (message.relay_log_source_binlog_equivalent_position != null && message.hasOwnProperty("relay_log_source_binlog_equivalent_position")) - object.relay_log_source_binlog_equivalent_position = message.relay_log_source_binlog_equivalent_position; - if (message.source_server_id != null && message.hasOwnProperty("source_server_id")) - object.source_server_id = message.source_server_id; - if (message.source_uuid != null && message.hasOwnProperty("source_uuid")) - object.source_uuid = message.source_uuid; - if (message.io_state != null && message.hasOwnProperty("io_state")) - object.io_state = message.io_state; - if (message.last_io_error != null && message.hasOwnProperty("last_io_error")) - object.last_io_error = message.last_io_error; - if (message.sql_state != null && message.hasOwnProperty("sql_state")) - object.sql_state = message.sql_state; - if (message.last_sql_error != null && message.hasOwnProperty("last_sql_error")) - object.last_sql_error = message.last_sql_error; - if (message.relay_log_file_position != null && message.hasOwnProperty("relay_log_file_position")) - object.relay_log_file_position = message.relay_log_file_position; - if (message.source_user != null && message.hasOwnProperty("source_user")) - object.source_user = message.source_user; - if (message.sql_delay != null && message.hasOwnProperty("sql_delay")) - object.sql_delay = message.sql_delay; - if (message.auto_position != null && message.hasOwnProperty("auto_position")) - object.auto_position = message.auto_position; - if (message.using_gtid != null && message.hasOwnProperty("using_gtid")) - object.using_gtid = message.using_gtid; - if (message.has_replication_filters != null && message.hasOwnProperty("has_replication_filters")) - object.has_replication_filters = message.has_replication_filters; - if (message.ssl_allowed != null && message.hasOwnProperty("ssl_allowed")) - object.ssl_allowed = message.ssl_allowed; - if (message.replication_lag_unknown != null && message.hasOwnProperty("replication_lag_unknown")) - object.replication_lag_unknown = message.replication_lag_unknown; return object; }; /** - * Converts this Status to JSON. + * Converts this ReserveBeginExecuteRequest to JSON. * @function toJSON - * @memberof replicationdata.Status + * @memberof query.ReserveBeginExecuteRequest * @instance * @returns {Object.} JSON object */ - Status.prototype.toJSON = function toJSON() { + ReserveBeginExecuteRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for Status + * Gets the default type url for ReserveBeginExecuteRequest * @function getTypeUrl - * @memberof replicationdata.Status + * @memberof query.ReserveBeginExecuteRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - Status.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ReserveBeginExecuteRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/replicationdata.Status"; + return typeUrlPrefix + "/query.ReserveBeginExecuteRequest"; }; - return Status; + return ReserveBeginExecuteRequest; })(); - replicationdata.StopReplicationStatus = (function() { + query.ReserveBeginExecuteResponse = (function() { /** - * Properties of a StopReplicationStatus. - * @memberof replicationdata - * @interface IStopReplicationStatus - * @property {replicationdata.IStatus|null} [before] StopReplicationStatus before - * @property {replicationdata.IStatus|null} [after] StopReplicationStatus after + * Properties of a ReserveBeginExecuteResponse. + * @memberof query + * @interface IReserveBeginExecuteResponse + * @property {vtrpc.IRPCError|null} [error] ReserveBeginExecuteResponse error + * @property {query.IQueryResult|null} [result] ReserveBeginExecuteResponse result + * @property {number|Long|null} [transaction_id] ReserveBeginExecuteResponse transaction_id + * @property {number|Long|null} [reserved_id] ReserveBeginExecuteResponse reserved_id + * @property {topodata.ITabletAlias|null} [tablet_alias] ReserveBeginExecuteResponse tablet_alias + * @property {string|null} [session_state_changes] ReserveBeginExecuteResponse session_state_changes */ /** - * Constructs a new StopReplicationStatus. - * @memberof replicationdata - * @classdesc Represents a StopReplicationStatus. - * @implements IStopReplicationStatus + * Constructs a new ReserveBeginExecuteResponse. + * @memberof query + * @classdesc Represents a ReserveBeginExecuteResponse. + * @implements IReserveBeginExecuteResponse * @constructor - * @param {replicationdata.IStopReplicationStatus=} [properties] Properties to set + * @param {query.IReserveBeginExecuteResponse=} [properties] Properties to set */ - function StopReplicationStatus(properties) { + function ReserveBeginExecuteResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -92754,89 +93619,145 @@ export const replicationdata = $root.replicationdata = (() => { } /** - * StopReplicationStatus before. - * @member {replicationdata.IStatus|null|undefined} before - * @memberof replicationdata.StopReplicationStatus + * ReserveBeginExecuteResponse error. + * @member {vtrpc.IRPCError|null|undefined} error + * @memberof query.ReserveBeginExecuteResponse * @instance */ - StopReplicationStatus.prototype.before = null; + ReserveBeginExecuteResponse.prototype.error = null; /** - * StopReplicationStatus after. - * @member {replicationdata.IStatus|null|undefined} after - * @memberof replicationdata.StopReplicationStatus + * ReserveBeginExecuteResponse result. + * @member {query.IQueryResult|null|undefined} result + * @memberof query.ReserveBeginExecuteResponse * @instance */ - StopReplicationStatus.prototype.after = null; + ReserveBeginExecuteResponse.prototype.result = null; /** - * Creates a new StopReplicationStatus instance using the specified properties. + * ReserveBeginExecuteResponse transaction_id. + * @member {number|Long} transaction_id + * @memberof query.ReserveBeginExecuteResponse + * @instance + */ + ReserveBeginExecuteResponse.prototype.transaction_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * ReserveBeginExecuteResponse reserved_id. + * @member {number|Long} reserved_id + * @memberof query.ReserveBeginExecuteResponse + * @instance + */ + ReserveBeginExecuteResponse.prototype.reserved_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * ReserveBeginExecuteResponse tablet_alias. + * @member {topodata.ITabletAlias|null|undefined} tablet_alias + * @memberof query.ReserveBeginExecuteResponse + * @instance + */ + ReserveBeginExecuteResponse.prototype.tablet_alias = null; + + /** + * ReserveBeginExecuteResponse session_state_changes. + * @member {string} session_state_changes + * @memberof query.ReserveBeginExecuteResponse + * @instance + */ + ReserveBeginExecuteResponse.prototype.session_state_changes = ""; + + /** + * Creates a new ReserveBeginExecuteResponse instance using the specified properties. * @function create - * @memberof replicationdata.StopReplicationStatus + * @memberof query.ReserveBeginExecuteResponse * @static - * @param {replicationdata.IStopReplicationStatus=} [properties] Properties to set - * @returns {replicationdata.StopReplicationStatus} StopReplicationStatus instance + * @param {query.IReserveBeginExecuteResponse=} [properties] Properties to set + * @returns {query.ReserveBeginExecuteResponse} ReserveBeginExecuteResponse instance */ - StopReplicationStatus.create = function create(properties) { - return new StopReplicationStatus(properties); + ReserveBeginExecuteResponse.create = function create(properties) { + return new ReserveBeginExecuteResponse(properties); }; /** - * Encodes the specified StopReplicationStatus message. Does not implicitly {@link replicationdata.StopReplicationStatus.verify|verify} messages. + * Encodes the specified ReserveBeginExecuteResponse message. Does not implicitly {@link query.ReserveBeginExecuteResponse.verify|verify} messages. * @function encode - * @memberof replicationdata.StopReplicationStatus + * @memberof query.ReserveBeginExecuteResponse * @static - * @param {replicationdata.IStopReplicationStatus} message StopReplicationStatus message or plain object to encode + * @param {query.IReserveBeginExecuteResponse} message ReserveBeginExecuteResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - StopReplicationStatus.encode = function encode(message, writer) { + ReserveBeginExecuteResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.before != null && Object.hasOwnProperty.call(message, "before")) - $root.replicationdata.Status.encode(message.before, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.after != null && Object.hasOwnProperty.call(message, "after")) - $root.replicationdata.Status.encode(message.after, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.error != null && Object.hasOwnProperty.call(message, "error")) + $root.vtrpc.RPCError.encode(message.error, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.result != null && Object.hasOwnProperty.call(message, "result")) + $root.query.QueryResult.encode(message.result, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.transaction_id != null && Object.hasOwnProperty.call(message, "transaction_id")) + writer.uint32(/* id 3, wireType 0 =*/24).int64(message.transaction_id); + if (message.reserved_id != null && Object.hasOwnProperty.call(message, "reserved_id")) + writer.uint32(/* id 4, wireType 0 =*/32).int64(message.reserved_id); + if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) + $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); + if (message.session_state_changes != null && Object.hasOwnProperty.call(message, "session_state_changes")) + writer.uint32(/* id 6, wireType 2 =*/50).string(message.session_state_changes); return writer; }; /** - * Encodes the specified StopReplicationStatus message, length delimited. Does not implicitly {@link replicationdata.StopReplicationStatus.verify|verify} messages. + * Encodes the specified ReserveBeginExecuteResponse message, length delimited. Does not implicitly {@link query.ReserveBeginExecuteResponse.verify|verify} messages. * @function encodeDelimited - * @memberof replicationdata.StopReplicationStatus + * @memberof query.ReserveBeginExecuteResponse * @static - * @param {replicationdata.IStopReplicationStatus} message StopReplicationStatus message or plain object to encode + * @param {query.IReserveBeginExecuteResponse} message ReserveBeginExecuteResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - StopReplicationStatus.encodeDelimited = function encodeDelimited(message, writer) { + ReserveBeginExecuteResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a StopReplicationStatus message from the specified reader or buffer. + * Decodes a ReserveBeginExecuteResponse message from the specified reader or buffer. * @function decode - * @memberof replicationdata.StopReplicationStatus + * @memberof query.ReserveBeginExecuteResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {replicationdata.StopReplicationStatus} StopReplicationStatus + * @returns {query.ReserveBeginExecuteResponse} ReserveBeginExecuteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StopReplicationStatus.decode = function decode(reader, length) { + ReserveBeginExecuteResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.replicationdata.StopReplicationStatus(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.ReserveBeginExecuteResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.before = $root.replicationdata.Status.decode(reader, reader.uint32()); + message.error = $root.vtrpc.RPCError.decode(reader, reader.uint32()); break; } case 2: { - message.after = $root.replicationdata.Status.decode(reader, reader.uint32()); + message.result = $root.query.QueryResult.decode(reader, reader.uint32()); + break; + } + case 3: { + message.transaction_id = reader.int64(); + break; + } + case 4: { + message.reserved_id = reader.int64(); + break; + } + case 5: { + message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + break; + } + case 6: { + message.session_state_changes = reader.string(); break; } default: @@ -92848,246 +93769,380 @@ export const replicationdata = $root.replicationdata = (() => { }; /** - * Decodes a StopReplicationStatus message from the specified reader or buffer, length delimited. + * Decodes a ReserveBeginExecuteResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof replicationdata.StopReplicationStatus + * @memberof query.ReserveBeginExecuteResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {replicationdata.StopReplicationStatus} StopReplicationStatus + * @returns {query.ReserveBeginExecuteResponse} ReserveBeginExecuteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StopReplicationStatus.decodeDelimited = function decodeDelimited(reader) { + ReserveBeginExecuteResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a StopReplicationStatus message. + * Verifies a ReserveBeginExecuteResponse message. * @function verify - * @memberof replicationdata.StopReplicationStatus + * @memberof query.ReserveBeginExecuteResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - StopReplicationStatus.verify = function verify(message) { + ReserveBeginExecuteResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.before != null && message.hasOwnProperty("before")) { - let error = $root.replicationdata.Status.verify(message.before); + if (message.error != null && message.hasOwnProperty("error")) { + let error = $root.vtrpc.RPCError.verify(message.error); if (error) - return "before." + error; + return "error." + error; } - if (message.after != null && message.hasOwnProperty("after")) { - let error = $root.replicationdata.Status.verify(message.after); + if (message.result != null && message.hasOwnProperty("result")) { + let error = $root.query.QueryResult.verify(message.result); if (error) - return "after." + error; + return "result." + error; + } + if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) + if (!$util.isInteger(message.transaction_id) && !(message.transaction_id && $util.isInteger(message.transaction_id.low) && $util.isInteger(message.transaction_id.high))) + return "transaction_id: integer|Long expected"; + if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) + if (!$util.isInteger(message.reserved_id) && !(message.reserved_id && $util.isInteger(message.reserved_id.low) && $util.isInteger(message.reserved_id.high))) + return "reserved_id: integer|Long expected"; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { + let error = $root.topodata.TabletAlias.verify(message.tablet_alias); + if (error) + return "tablet_alias." + error; } + if (message.session_state_changes != null && message.hasOwnProperty("session_state_changes")) + if (!$util.isString(message.session_state_changes)) + return "session_state_changes: string expected"; return null; }; /** - * Creates a StopReplicationStatus message from a plain object. Also converts values to their respective internal types. + * Creates a ReserveBeginExecuteResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof replicationdata.StopReplicationStatus + * @memberof query.ReserveBeginExecuteResponse * @static * @param {Object.} object Plain object - * @returns {replicationdata.StopReplicationStatus} StopReplicationStatus + * @returns {query.ReserveBeginExecuteResponse} ReserveBeginExecuteResponse */ - StopReplicationStatus.fromObject = function fromObject(object) { - if (object instanceof $root.replicationdata.StopReplicationStatus) + ReserveBeginExecuteResponse.fromObject = function fromObject(object) { + if (object instanceof $root.query.ReserveBeginExecuteResponse) return object; - let message = new $root.replicationdata.StopReplicationStatus(); - if (object.before != null) { - if (typeof object.before !== "object") - throw TypeError(".replicationdata.StopReplicationStatus.before: object expected"); - message.before = $root.replicationdata.Status.fromObject(object.before); + let message = new $root.query.ReserveBeginExecuteResponse(); + if (object.error != null) { + if (typeof object.error !== "object") + throw TypeError(".query.ReserveBeginExecuteResponse.error: object expected"); + message.error = $root.vtrpc.RPCError.fromObject(object.error); } - if (object.after != null) { - if (typeof object.after !== "object") - throw TypeError(".replicationdata.StopReplicationStatus.after: object expected"); - message.after = $root.replicationdata.Status.fromObject(object.after); + if (object.result != null) { + if (typeof object.result !== "object") + throw TypeError(".query.ReserveBeginExecuteResponse.result: object expected"); + message.result = $root.query.QueryResult.fromObject(object.result); + } + if (object.transaction_id != null) + if ($util.Long) + (message.transaction_id = $util.Long.fromValue(object.transaction_id)).unsigned = false; + else if (typeof object.transaction_id === "string") + message.transaction_id = parseInt(object.transaction_id, 10); + else if (typeof object.transaction_id === "number") + message.transaction_id = object.transaction_id; + else if (typeof object.transaction_id === "object") + message.transaction_id = new $util.LongBits(object.transaction_id.low >>> 0, object.transaction_id.high >>> 0).toNumber(); + if (object.reserved_id != null) + if ($util.Long) + (message.reserved_id = $util.Long.fromValue(object.reserved_id)).unsigned = false; + else if (typeof object.reserved_id === "string") + message.reserved_id = parseInt(object.reserved_id, 10); + else if (typeof object.reserved_id === "number") + message.reserved_id = object.reserved_id; + else if (typeof object.reserved_id === "object") + message.reserved_id = new $util.LongBits(object.reserved_id.low >>> 0, object.reserved_id.high >>> 0).toNumber(); + if (object.tablet_alias != null) { + if (typeof object.tablet_alias !== "object") + throw TypeError(".query.ReserveBeginExecuteResponse.tablet_alias: object expected"); + message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); } + if (object.session_state_changes != null) + message.session_state_changes = String(object.session_state_changes); return message; }; /** - * Creates a plain object from a StopReplicationStatus message. Also converts values to other types if specified. + * Creates a plain object from a ReserveBeginExecuteResponse message. Also converts values to other types if specified. * @function toObject - * @memberof replicationdata.StopReplicationStatus + * @memberof query.ReserveBeginExecuteResponse * @static - * @param {replicationdata.StopReplicationStatus} message StopReplicationStatus + * @param {query.ReserveBeginExecuteResponse} message ReserveBeginExecuteResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - StopReplicationStatus.toObject = function toObject(message, options) { + ReserveBeginExecuteResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) { - object.before = null; - object.after = null; + object.error = null; + object.result = null; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.transaction_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.transaction_id = options.longs === String ? "0" : 0; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.reserved_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.reserved_id = options.longs === String ? "0" : 0; + object.tablet_alias = null; + object.session_state_changes = ""; } - if (message.before != null && message.hasOwnProperty("before")) - object.before = $root.replicationdata.Status.toObject(message.before, options); - if (message.after != null && message.hasOwnProperty("after")) - object.after = $root.replicationdata.Status.toObject(message.after, options); + if (message.error != null && message.hasOwnProperty("error")) + object.error = $root.vtrpc.RPCError.toObject(message.error, options); + if (message.result != null && message.hasOwnProperty("result")) + object.result = $root.query.QueryResult.toObject(message.result, options); + if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) + if (typeof message.transaction_id === "number") + object.transaction_id = options.longs === String ? String(message.transaction_id) : message.transaction_id; + else + object.transaction_id = options.longs === String ? $util.Long.prototype.toString.call(message.transaction_id) : options.longs === Number ? new $util.LongBits(message.transaction_id.low >>> 0, message.transaction_id.high >>> 0).toNumber() : message.transaction_id; + if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) + if (typeof message.reserved_id === "number") + object.reserved_id = options.longs === String ? String(message.reserved_id) : message.reserved_id; + else + object.reserved_id = options.longs === String ? $util.Long.prototype.toString.call(message.reserved_id) : options.longs === Number ? new $util.LongBits(message.reserved_id.low >>> 0, message.reserved_id.high >>> 0).toNumber() : message.reserved_id; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) + object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); + if (message.session_state_changes != null && message.hasOwnProperty("session_state_changes")) + object.session_state_changes = message.session_state_changes; return object; }; /** - * Converts this StopReplicationStatus to JSON. + * Converts this ReserveBeginExecuteResponse to JSON. * @function toJSON - * @memberof replicationdata.StopReplicationStatus + * @memberof query.ReserveBeginExecuteResponse * @instance * @returns {Object.} JSON object */ - StopReplicationStatus.prototype.toJSON = function toJSON() { + ReserveBeginExecuteResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for StopReplicationStatus + * Gets the default type url for ReserveBeginExecuteResponse * @function getTypeUrl - * @memberof replicationdata.StopReplicationStatus + * @memberof query.ReserveBeginExecuteResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - StopReplicationStatus.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ReserveBeginExecuteResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/replicationdata.StopReplicationStatus"; + return typeUrlPrefix + "/query.ReserveBeginExecuteResponse"; }; - return StopReplicationStatus; - })(); - - /** - * StopReplicationMode enum. - * @name replicationdata.StopReplicationMode - * @enum {number} - * @property {number} IOANDSQLTHREAD=0 IOANDSQLTHREAD value - * @property {number} IOTHREADONLY=1 IOTHREADONLY value - */ - replicationdata.StopReplicationMode = (function() { - const valuesById = {}, values = Object.create(valuesById); - values[valuesById[0] = "IOANDSQLTHREAD"] = 0; - values[valuesById[1] = "IOTHREADONLY"] = 1; - return values; + return ReserveBeginExecuteResponse; })(); - replicationdata.PrimaryStatus = (function() { + query.ReserveBeginStreamExecuteRequest = (function() { /** - * Properties of a PrimaryStatus. - * @memberof replicationdata - * @interface IPrimaryStatus - * @property {string|null} [position] PrimaryStatus position - * @property {string|null} [file_position] PrimaryStatus file_position + * Properties of a ReserveBeginStreamExecuteRequest. + * @memberof query + * @interface IReserveBeginStreamExecuteRequest + * @property {vtrpc.ICallerID|null} [effective_caller_id] ReserveBeginStreamExecuteRequest effective_caller_id + * @property {query.IVTGateCallerID|null} [immediate_caller_id] ReserveBeginStreamExecuteRequest immediate_caller_id + * @property {query.ITarget|null} [target] ReserveBeginStreamExecuteRequest target + * @property {query.IBoundQuery|null} [query] ReserveBeginStreamExecuteRequest query + * @property {query.IExecuteOptions|null} [options] ReserveBeginStreamExecuteRequest options + * @property {Array.|null} [pre_queries] ReserveBeginStreamExecuteRequest pre_queries + * @property {Array.|null} [post_begin_queries] ReserveBeginStreamExecuteRequest post_begin_queries */ /** - * Constructs a new PrimaryStatus. - * @memberof replicationdata - * @classdesc Represents a PrimaryStatus. - * @implements IPrimaryStatus + * Constructs a new ReserveBeginStreamExecuteRequest. + * @memberof query + * @classdesc Represents a ReserveBeginStreamExecuteRequest. + * @implements IReserveBeginStreamExecuteRequest * @constructor - * @param {replicationdata.IPrimaryStatus=} [properties] Properties to set + * @param {query.IReserveBeginStreamExecuteRequest=} [properties] Properties to set */ - function PrimaryStatus(properties) { - if (properties) - for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + function ReserveBeginStreamExecuteRequest(properties) { + this.pre_queries = []; + this.post_begin_queries = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) this[keys[i]] = properties[keys[i]]; } /** - * PrimaryStatus position. - * @member {string} position - * @memberof replicationdata.PrimaryStatus + * ReserveBeginStreamExecuteRequest effective_caller_id. + * @member {vtrpc.ICallerID|null|undefined} effective_caller_id + * @memberof query.ReserveBeginStreamExecuteRequest * @instance */ - PrimaryStatus.prototype.position = ""; + ReserveBeginStreamExecuteRequest.prototype.effective_caller_id = null; /** - * PrimaryStatus file_position. - * @member {string} file_position - * @memberof replicationdata.PrimaryStatus + * ReserveBeginStreamExecuteRequest immediate_caller_id. + * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id + * @memberof query.ReserveBeginStreamExecuteRequest * @instance */ - PrimaryStatus.prototype.file_position = ""; + ReserveBeginStreamExecuteRequest.prototype.immediate_caller_id = null; /** - * Creates a new PrimaryStatus instance using the specified properties. + * ReserveBeginStreamExecuteRequest target. + * @member {query.ITarget|null|undefined} target + * @memberof query.ReserveBeginStreamExecuteRequest + * @instance + */ + ReserveBeginStreamExecuteRequest.prototype.target = null; + + /** + * ReserveBeginStreamExecuteRequest query. + * @member {query.IBoundQuery|null|undefined} query + * @memberof query.ReserveBeginStreamExecuteRequest + * @instance + */ + ReserveBeginStreamExecuteRequest.prototype.query = null; + + /** + * ReserveBeginStreamExecuteRequest options. + * @member {query.IExecuteOptions|null|undefined} options + * @memberof query.ReserveBeginStreamExecuteRequest + * @instance + */ + ReserveBeginStreamExecuteRequest.prototype.options = null; + + /** + * ReserveBeginStreamExecuteRequest pre_queries. + * @member {Array.} pre_queries + * @memberof query.ReserveBeginStreamExecuteRequest + * @instance + */ + ReserveBeginStreamExecuteRequest.prototype.pre_queries = $util.emptyArray; + + /** + * ReserveBeginStreamExecuteRequest post_begin_queries. + * @member {Array.} post_begin_queries + * @memberof query.ReserveBeginStreamExecuteRequest + * @instance + */ + ReserveBeginStreamExecuteRequest.prototype.post_begin_queries = $util.emptyArray; + + /** + * Creates a new ReserveBeginStreamExecuteRequest instance using the specified properties. * @function create - * @memberof replicationdata.PrimaryStatus + * @memberof query.ReserveBeginStreamExecuteRequest * @static - * @param {replicationdata.IPrimaryStatus=} [properties] Properties to set - * @returns {replicationdata.PrimaryStatus} PrimaryStatus instance + * @param {query.IReserveBeginStreamExecuteRequest=} [properties] Properties to set + * @returns {query.ReserveBeginStreamExecuteRequest} ReserveBeginStreamExecuteRequest instance */ - PrimaryStatus.create = function create(properties) { - return new PrimaryStatus(properties); + ReserveBeginStreamExecuteRequest.create = function create(properties) { + return new ReserveBeginStreamExecuteRequest(properties); }; /** - * Encodes the specified PrimaryStatus message. Does not implicitly {@link replicationdata.PrimaryStatus.verify|verify} messages. + * Encodes the specified ReserveBeginStreamExecuteRequest message. Does not implicitly {@link query.ReserveBeginStreamExecuteRequest.verify|verify} messages. * @function encode - * @memberof replicationdata.PrimaryStatus + * @memberof query.ReserveBeginStreamExecuteRequest * @static - * @param {replicationdata.IPrimaryStatus} message PrimaryStatus message or plain object to encode + * @param {query.IReserveBeginStreamExecuteRequest} message ReserveBeginStreamExecuteRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - PrimaryStatus.encode = function encode(message, writer) { + ReserveBeginStreamExecuteRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.position != null && Object.hasOwnProperty.call(message, "position")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.position); - if (message.file_position != null && Object.hasOwnProperty.call(message, "file_position")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.file_position); + if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) + $root.vtrpc.CallerID.encode(message.effective_caller_id, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.immediate_caller_id != null && Object.hasOwnProperty.call(message, "immediate_caller_id")) + $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.target != null && Object.hasOwnProperty.call(message, "target")) + $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.query != null && Object.hasOwnProperty.call(message, "query")) + $root.query.BoundQuery.encode(message.query, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.options != null && Object.hasOwnProperty.call(message, "options")) + $root.query.ExecuteOptions.encode(message.options, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); + if (message.pre_queries != null && message.pre_queries.length) + for (let i = 0; i < message.pre_queries.length; ++i) + writer.uint32(/* id 6, wireType 2 =*/50).string(message.pre_queries[i]); + if (message.post_begin_queries != null && message.post_begin_queries.length) + for (let i = 0; i < message.post_begin_queries.length; ++i) + writer.uint32(/* id 7, wireType 2 =*/58).string(message.post_begin_queries[i]); return writer; }; /** - * Encodes the specified PrimaryStatus message, length delimited. Does not implicitly {@link replicationdata.PrimaryStatus.verify|verify} messages. + * Encodes the specified ReserveBeginStreamExecuteRequest message, length delimited. Does not implicitly {@link query.ReserveBeginStreamExecuteRequest.verify|verify} messages. * @function encodeDelimited - * @memberof replicationdata.PrimaryStatus + * @memberof query.ReserveBeginStreamExecuteRequest * @static - * @param {replicationdata.IPrimaryStatus} message PrimaryStatus message or plain object to encode + * @param {query.IReserveBeginStreamExecuteRequest} message ReserveBeginStreamExecuteRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - PrimaryStatus.encodeDelimited = function encodeDelimited(message, writer) { + ReserveBeginStreamExecuteRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a PrimaryStatus message from the specified reader or buffer. + * Decodes a ReserveBeginStreamExecuteRequest message from the specified reader or buffer. * @function decode - * @memberof replicationdata.PrimaryStatus + * @memberof query.ReserveBeginStreamExecuteRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {replicationdata.PrimaryStatus} PrimaryStatus + * @returns {query.ReserveBeginStreamExecuteRequest} ReserveBeginStreamExecuteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - PrimaryStatus.decode = function decode(reader, length) { + ReserveBeginStreamExecuteRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.replicationdata.PrimaryStatus(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.ReserveBeginStreamExecuteRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.position = reader.string(); + message.effective_caller_id = $root.vtrpc.CallerID.decode(reader, reader.uint32()); break; } case 2: { - message.file_position = reader.string(); + message.immediate_caller_id = $root.query.VTGateCallerID.decode(reader, reader.uint32()); + break; + } + case 3: { + message.target = $root.query.Target.decode(reader, reader.uint32()); + break; + } + case 4: { + message.query = $root.query.BoundQuery.decode(reader, reader.uint32()); + break; + } + case 5: { + message.options = $root.query.ExecuteOptions.decode(reader, reader.uint32()); + break; + } + case 6: { + if (!(message.pre_queries && message.pre_queries.length)) + message.pre_queries = []; + message.pre_queries.push(reader.string()); + break; + } + case 7: { + if (!(message.post_begin_queries && message.post_begin_queries.length)) + message.post_begin_queries = []; + message.post_begin_queries.push(reader.string()); break; } default: @@ -93099,151 +94154,227 @@ export const replicationdata = $root.replicationdata = (() => { }; /** - * Decodes a PrimaryStatus message from the specified reader or buffer, length delimited. + * Decodes a ReserveBeginStreamExecuteRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof replicationdata.PrimaryStatus + * @memberof query.ReserveBeginStreamExecuteRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {replicationdata.PrimaryStatus} PrimaryStatus + * @returns {query.ReserveBeginStreamExecuteRequest} ReserveBeginStreamExecuteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - PrimaryStatus.decodeDelimited = function decodeDelimited(reader) { + ReserveBeginStreamExecuteRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a PrimaryStatus message. + * Verifies a ReserveBeginStreamExecuteRequest message. * @function verify - * @memberof replicationdata.PrimaryStatus + * @memberof query.ReserveBeginStreamExecuteRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - PrimaryStatus.verify = function verify(message) { + ReserveBeginStreamExecuteRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.position != null && message.hasOwnProperty("position")) - if (!$util.isString(message.position)) - return "position: string expected"; - if (message.file_position != null && message.hasOwnProperty("file_position")) - if (!$util.isString(message.file_position)) - return "file_position: string expected"; + if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { + let error = $root.vtrpc.CallerID.verify(message.effective_caller_id); + if (error) + return "effective_caller_id." + error; + } + if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) { + let error = $root.query.VTGateCallerID.verify(message.immediate_caller_id); + if (error) + return "immediate_caller_id." + error; + } + if (message.target != null && message.hasOwnProperty("target")) { + let error = $root.query.Target.verify(message.target); + if (error) + return "target." + error; + } + if (message.query != null && message.hasOwnProperty("query")) { + let error = $root.query.BoundQuery.verify(message.query); + if (error) + return "query." + error; + } + if (message.options != null && message.hasOwnProperty("options")) { + let error = $root.query.ExecuteOptions.verify(message.options); + if (error) + return "options." + error; + } + if (message.pre_queries != null && message.hasOwnProperty("pre_queries")) { + if (!Array.isArray(message.pre_queries)) + return "pre_queries: array expected"; + for (let i = 0; i < message.pre_queries.length; ++i) + if (!$util.isString(message.pre_queries[i])) + return "pre_queries: string[] expected"; + } + if (message.post_begin_queries != null && message.hasOwnProperty("post_begin_queries")) { + if (!Array.isArray(message.post_begin_queries)) + return "post_begin_queries: array expected"; + for (let i = 0; i < message.post_begin_queries.length; ++i) + if (!$util.isString(message.post_begin_queries[i])) + return "post_begin_queries: string[] expected"; + } return null; }; /** - * Creates a PrimaryStatus message from a plain object. Also converts values to their respective internal types. + * Creates a ReserveBeginStreamExecuteRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof replicationdata.PrimaryStatus + * @memberof query.ReserveBeginStreamExecuteRequest * @static * @param {Object.} object Plain object - * @returns {replicationdata.PrimaryStatus} PrimaryStatus + * @returns {query.ReserveBeginStreamExecuteRequest} ReserveBeginStreamExecuteRequest */ - PrimaryStatus.fromObject = function fromObject(object) { - if (object instanceof $root.replicationdata.PrimaryStatus) + ReserveBeginStreamExecuteRequest.fromObject = function fromObject(object) { + if (object instanceof $root.query.ReserveBeginStreamExecuteRequest) return object; - let message = new $root.replicationdata.PrimaryStatus(); - if (object.position != null) - message.position = String(object.position); - if (object.file_position != null) - message.file_position = String(object.file_position); + let message = new $root.query.ReserveBeginStreamExecuteRequest(); + if (object.effective_caller_id != null) { + if (typeof object.effective_caller_id !== "object") + throw TypeError(".query.ReserveBeginStreamExecuteRequest.effective_caller_id: object expected"); + message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); + } + if (object.immediate_caller_id != null) { + if (typeof object.immediate_caller_id !== "object") + throw TypeError(".query.ReserveBeginStreamExecuteRequest.immediate_caller_id: object expected"); + message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); + } + if (object.target != null) { + if (typeof object.target !== "object") + throw TypeError(".query.ReserveBeginStreamExecuteRequest.target: object expected"); + message.target = $root.query.Target.fromObject(object.target); + } + if (object.query != null) { + if (typeof object.query !== "object") + throw TypeError(".query.ReserveBeginStreamExecuteRequest.query: object expected"); + message.query = $root.query.BoundQuery.fromObject(object.query); + } + if (object.options != null) { + if (typeof object.options !== "object") + throw TypeError(".query.ReserveBeginStreamExecuteRequest.options: object expected"); + message.options = $root.query.ExecuteOptions.fromObject(object.options); + } + if (object.pre_queries) { + if (!Array.isArray(object.pre_queries)) + throw TypeError(".query.ReserveBeginStreamExecuteRequest.pre_queries: array expected"); + message.pre_queries = []; + for (let i = 0; i < object.pre_queries.length; ++i) + message.pre_queries[i] = String(object.pre_queries[i]); + } + if (object.post_begin_queries) { + if (!Array.isArray(object.post_begin_queries)) + throw TypeError(".query.ReserveBeginStreamExecuteRequest.post_begin_queries: array expected"); + message.post_begin_queries = []; + for (let i = 0; i < object.post_begin_queries.length; ++i) + message.post_begin_queries[i] = String(object.post_begin_queries[i]); + } return message; }; /** - * Creates a plain object from a PrimaryStatus message. Also converts values to other types if specified. + * Creates a plain object from a ReserveBeginStreamExecuteRequest message. Also converts values to other types if specified. * @function toObject - * @memberof replicationdata.PrimaryStatus + * @memberof query.ReserveBeginStreamExecuteRequest * @static - * @param {replicationdata.PrimaryStatus} message PrimaryStatus + * @param {query.ReserveBeginStreamExecuteRequest} message ReserveBeginStreamExecuteRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - PrimaryStatus.toObject = function toObject(message, options) { + ReserveBeginStreamExecuteRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; + if (options.arrays || options.defaults) { + object.pre_queries = []; + object.post_begin_queries = []; + } if (options.defaults) { - object.position = ""; - object.file_position = ""; + object.effective_caller_id = null; + object.immediate_caller_id = null; + object.target = null; + object.query = null; + object.options = null; + } + if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) + object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); + if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) + object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); + if (message.target != null && message.hasOwnProperty("target")) + object.target = $root.query.Target.toObject(message.target, options); + if (message.query != null && message.hasOwnProperty("query")) + object.query = $root.query.BoundQuery.toObject(message.query, options); + if (message.options != null && message.hasOwnProperty("options")) + object.options = $root.query.ExecuteOptions.toObject(message.options, options); + if (message.pre_queries && message.pre_queries.length) { + object.pre_queries = []; + for (let j = 0; j < message.pre_queries.length; ++j) + object.pre_queries[j] = message.pre_queries[j]; + } + if (message.post_begin_queries && message.post_begin_queries.length) { + object.post_begin_queries = []; + for (let j = 0; j < message.post_begin_queries.length; ++j) + object.post_begin_queries[j] = message.post_begin_queries[j]; } - if (message.position != null && message.hasOwnProperty("position")) - object.position = message.position; - if (message.file_position != null && message.hasOwnProperty("file_position")) - object.file_position = message.file_position; return object; }; /** - * Converts this PrimaryStatus to JSON. + * Converts this ReserveBeginStreamExecuteRequest to JSON. * @function toJSON - * @memberof replicationdata.PrimaryStatus + * @memberof query.ReserveBeginStreamExecuteRequest * @instance * @returns {Object.} JSON object */ - PrimaryStatus.prototype.toJSON = function toJSON() { + ReserveBeginStreamExecuteRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for PrimaryStatus + * Gets the default type url for ReserveBeginStreamExecuteRequest * @function getTypeUrl - * @memberof replicationdata.PrimaryStatus + * @memberof query.ReserveBeginStreamExecuteRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - PrimaryStatus.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ReserveBeginStreamExecuteRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/replicationdata.PrimaryStatus"; + return typeUrlPrefix + "/query.ReserveBeginStreamExecuteRequest"; }; - return PrimaryStatus; + return ReserveBeginStreamExecuteRequest; })(); - replicationdata.FullStatus = (function() { + query.ReserveBeginStreamExecuteResponse = (function() { /** - * Properties of a FullStatus. - * @memberof replicationdata - * @interface IFullStatus - * @property {number|null} [server_id] FullStatus server_id - * @property {string|null} [server_uuid] FullStatus server_uuid - * @property {replicationdata.IStatus|null} [replication_status] FullStatus replication_status - * @property {replicationdata.IPrimaryStatus|null} [primary_status] FullStatus primary_status - * @property {string|null} [gtid_purged] FullStatus gtid_purged - * @property {string|null} [version] FullStatus version - * @property {string|null} [version_comment] FullStatus version_comment - * @property {boolean|null} [read_only] FullStatus read_only - * @property {string|null} [gtid_mode] FullStatus gtid_mode - * @property {string|null} [binlog_format] FullStatus binlog_format - * @property {string|null} [binlog_row_image] FullStatus binlog_row_image - * @property {boolean|null} [log_bin_enabled] FullStatus log_bin_enabled - * @property {boolean|null} [log_replica_updates] FullStatus log_replica_updates - * @property {boolean|null} [semi_sync_primary_enabled] FullStatus semi_sync_primary_enabled - * @property {boolean|null} [semi_sync_replica_enabled] FullStatus semi_sync_replica_enabled - * @property {boolean|null} [semi_sync_primary_status] FullStatus semi_sync_primary_status - * @property {boolean|null} [semi_sync_replica_status] FullStatus semi_sync_replica_status - * @property {number|null} [semi_sync_primary_clients] FullStatus semi_sync_primary_clients - * @property {number|Long|null} [semi_sync_primary_timeout] FullStatus semi_sync_primary_timeout - * @property {number|null} [semi_sync_wait_for_replica_count] FullStatus semi_sync_wait_for_replica_count - * @property {boolean|null} [super_read_only] FullStatus super_read_only + * Properties of a ReserveBeginStreamExecuteResponse. + * @memberof query + * @interface IReserveBeginStreamExecuteResponse + * @property {vtrpc.IRPCError|null} [error] ReserveBeginStreamExecuteResponse error + * @property {query.IQueryResult|null} [result] ReserveBeginStreamExecuteResponse result + * @property {number|Long|null} [transaction_id] ReserveBeginStreamExecuteResponse transaction_id + * @property {number|Long|null} [reserved_id] ReserveBeginStreamExecuteResponse reserved_id + * @property {topodata.ITabletAlias|null} [tablet_alias] ReserveBeginStreamExecuteResponse tablet_alias + * @property {string|null} [session_state_changes] ReserveBeginStreamExecuteResponse session_state_changes */ /** - * Constructs a new FullStatus. - * @memberof replicationdata - * @classdesc Represents a FullStatus. - * @implements IFullStatus + * Constructs a new ReserveBeginStreamExecuteResponse. + * @memberof query + * @classdesc Represents a ReserveBeginStreamExecuteResponse. + * @implements IReserveBeginStreamExecuteResponse * @constructor - * @param {replicationdata.IFullStatus=} [properties] Properties to set + * @param {query.IReserveBeginStreamExecuteResponse=} [properties] Properties to set */ - function FullStatus(properties) { + function ReserveBeginStreamExecuteResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -93251,355 +94382,492 @@ export const replicationdata = $root.replicationdata = (() => { } /** - * FullStatus server_id. - * @member {number} server_id - * @memberof replicationdata.FullStatus - * @instance - */ - FullStatus.prototype.server_id = 0; - - /** - * FullStatus server_uuid. - * @member {string} server_uuid - * @memberof replicationdata.FullStatus - * @instance - */ - FullStatus.prototype.server_uuid = ""; - - /** - * FullStatus replication_status. - * @member {replicationdata.IStatus|null|undefined} replication_status - * @memberof replicationdata.FullStatus - * @instance - */ - FullStatus.prototype.replication_status = null; - - /** - * FullStatus primary_status. - * @member {replicationdata.IPrimaryStatus|null|undefined} primary_status - * @memberof replicationdata.FullStatus + * ReserveBeginStreamExecuteResponse error. + * @member {vtrpc.IRPCError|null|undefined} error + * @memberof query.ReserveBeginStreamExecuteResponse * @instance */ - FullStatus.prototype.primary_status = null; + ReserveBeginStreamExecuteResponse.prototype.error = null; /** - * FullStatus gtid_purged. - * @member {string} gtid_purged - * @memberof replicationdata.FullStatus + * ReserveBeginStreamExecuteResponse result. + * @member {query.IQueryResult|null|undefined} result + * @memberof query.ReserveBeginStreamExecuteResponse * @instance */ - FullStatus.prototype.gtid_purged = ""; + ReserveBeginStreamExecuteResponse.prototype.result = null; /** - * FullStatus version. - * @member {string} version - * @memberof replicationdata.FullStatus + * ReserveBeginStreamExecuteResponse transaction_id. + * @member {number|Long} transaction_id + * @memberof query.ReserveBeginStreamExecuteResponse * @instance */ - FullStatus.prototype.version = ""; + ReserveBeginStreamExecuteResponse.prototype.transaction_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; /** - * FullStatus version_comment. - * @member {string} version_comment - * @memberof replicationdata.FullStatus + * ReserveBeginStreamExecuteResponse reserved_id. + * @member {number|Long} reserved_id + * @memberof query.ReserveBeginStreamExecuteResponse * @instance */ - FullStatus.prototype.version_comment = ""; + ReserveBeginStreamExecuteResponse.prototype.reserved_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; /** - * FullStatus read_only. - * @member {boolean} read_only - * @memberof replicationdata.FullStatus + * ReserveBeginStreamExecuteResponse tablet_alias. + * @member {topodata.ITabletAlias|null|undefined} tablet_alias + * @memberof query.ReserveBeginStreamExecuteResponse * @instance */ - FullStatus.prototype.read_only = false; + ReserveBeginStreamExecuteResponse.prototype.tablet_alias = null; /** - * FullStatus gtid_mode. - * @member {string} gtid_mode - * @memberof replicationdata.FullStatus + * ReserveBeginStreamExecuteResponse session_state_changes. + * @member {string} session_state_changes + * @memberof query.ReserveBeginStreamExecuteResponse * @instance */ - FullStatus.prototype.gtid_mode = ""; + ReserveBeginStreamExecuteResponse.prototype.session_state_changes = ""; /** - * FullStatus binlog_format. - * @member {string} binlog_format - * @memberof replicationdata.FullStatus - * @instance + * Creates a new ReserveBeginStreamExecuteResponse instance using the specified properties. + * @function create + * @memberof query.ReserveBeginStreamExecuteResponse + * @static + * @param {query.IReserveBeginStreamExecuteResponse=} [properties] Properties to set + * @returns {query.ReserveBeginStreamExecuteResponse} ReserveBeginStreamExecuteResponse instance */ - FullStatus.prototype.binlog_format = ""; + ReserveBeginStreamExecuteResponse.create = function create(properties) { + return new ReserveBeginStreamExecuteResponse(properties); + }; /** - * FullStatus binlog_row_image. - * @member {string} binlog_row_image - * @memberof replicationdata.FullStatus - * @instance + * Encodes the specified ReserveBeginStreamExecuteResponse message. Does not implicitly {@link query.ReserveBeginStreamExecuteResponse.verify|verify} messages. + * @function encode + * @memberof query.ReserveBeginStreamExecuteResponse + * @static + * @param {query.IReserveBeginStreamExecuteResponse} message ReserveBeginStreamExecuteResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer */ - FullStatus.prototype.binlog_row_image = ""; + ReserveBeginStreamExecuteResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.error != null && Object.hasOwnProperty.call(message, "error")) + $root.vtrpc.RPCError.encode(message.error, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.result != null && Object.hasOwnProperty.call(message, "result")) + $root.query.QueryResult.encode(message.result, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.transaction_id != null && Object.hasOwnProperty.call(message, "transaction_id")) + writer.uint32(/* id 3, wireType 0 =*/24).int64(message.transaction_id); + if (message.reserved_id != null && Object.hasOwnProperty.call(message, "reserved_id")) + writer.uint32(/* id 4, wireType 0 =*/32).int64(message.reserved_id); + if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) + $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); + if (message.session_state_changes != null && Object.hasOwnProperty.call(message, "session_state_changes")) + writer.uint32(/* id 6, wireType 2 =*/50).string(message.session_state_changes); + return writer; + }; /** - * FullStatus log_bin_enabled. - * @member {boolean} log_bin_enabled - * @memberof replicationdata.FullStatus - * @instance + * Encodes the specified ReserveBeginStreamExecuteResponse message, length delimited. Does not implicitly {@link query.ReserveBeginStreamExecuteResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof query.ReserveBeginStreamExecuteResponse + * @static + * @param {query.IReserveBeginStreamExecuteResponse} message ReserveBeginStreamExecuteResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer */ - FullStatus.prototype.log_bin_enabled = false; + ReserveBeginStreamExecuteResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; /** - * FullStatus log_replica_updates. - * @member {boolean} log_replica_updates - * @memberof replicationdata.FullStatus - * @instance + * Decodes a ReserveBeginStreamExecuteResponse message from the specified reader or buffer. + * @function decode + * @memberof query.ReserveBeginStreamExecuteResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {query.ReserveBeginStreamExecuteResponse} ReserveBeginStreamExecuteResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - FullStatus.prototype.log_replica_updates = false; - + ReserveBeginStreamExecuteResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.ReserveBeginStreamExecuteResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.error = $root.vtrpc.RPCError.decode(reader, reader.uint32()); + break; + } + case 2: { + message.result = $root.query.QueryResult.decode(reader, reader.uint32()); + break; + } + case 3: { + message.transaction_id = reader.int64(); + break; + } + case 4: { + message.reserved_id = reader.int64(); + break; + } + case 5: { + message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + break; + } + case 6: { + message.session_state_changes = reader.string(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + /** - * FullStatus semi_sync_primary_enabled. - * @member {boolean} semi_sync_primary_enabled - * @memberof replicationdata.FullStatus - * @instance + * Decodes a ReserveBeginStreamExecuteResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof query.ReserveBeginStreamExecuteResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {query.ReserveBeginStreamExecuteResponse} ReserveBeginStreamExecuteResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - FullStatus.prototype.semi_sync_primary_enabled = false; + ReserveBeginStreamExecuteResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; /** - * FullStatus semi_sync_replica_enabled. - * @member {boolean} semi_sync_replica_enabled - * @memberof replicationdata.FullStatus - * @instance + * Verifies a ReserveBeginStreamExecuteResponse message. + * @function verify + * @memberof query.ReserveBeginStreamExecuteResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - FullStatus.prototype.semi_sync_replica_enabled = false; + ReserveBeginStreamExecuteResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.error != null && message.hasOwnProperty("error")) { + let error = $root.vtrpc.RPCError.verify(message.error); + if (error) + return "error." + error; + } + if (message.result != null && message.hasOwnProperty("result")) { + let error = $root.query.QueryResult.verify(message.result); + if (error) + return "result." + error; + } + if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) + if (!$util.isInteger(message.transaction_id) && !(message.transaction_id && $util.isInteger(message.transaction_id.low) && $util.isInteger(message.transaction_id.high))) + return "transaction_id: integer|Long expected"; + if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) + if (!$util.isInteger(message.reserved_id) && !(message.reserved_id && $util.isInteger(message.reserved_id.low) && $util.isInteger(message.reserved_id.high))) + return "reserved_id: integer|Long expected"; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { + let error = $root.topodata.TabletAlias.verify(message.tablet_alias); + if (error) + return "tablet_alias." + error; + } + if (message.session_state_changes != null && message.hasOwnProperty("session_state_changes")) + if (!$util.isString(message.session_state_changes)) + return "session_state_changes: string expected"; + return null; + }; /** - * FullStatus semi_sync_primary_status. - * @member {boolean} semi_sync_primary_status - * @memberof replicationdata.FullStatus + * Creates a ReserveBeginStreamExecuteResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof query.ReserveBeginStreamExecuteResponse + * @static + * @param {Object.} object Plain object + * @returns {query.ReserveBeginStreamExecuteResponse} ReserveBeginStreamExecuteResponse + */ + ReserveBeginStreamExecuteResponse.fromObject = function fromObject(object) { + if (object instanceof $root.query.ReserveBeginStreamExecuteResponse) + return object; + let message = new $root.query.ReserveBeginStreamExecuteResponse(); + if (object.error != null) { + if (typeof object.error !== "object") + throw TypeError(".query.ReserveBeginStreamExecuteResponse.error: object expected"); + message.error = $root.vtrpc.RPCError.fromObject(object.error); + } + if (object.result != null) { + if (typeof object.result !== "object") + throw TypeError(".query.ReserveBeginStreamExecuteResponse.result: object expected"); + message.result = $root.query.QueryResult.fromObject(object.result); + } + if (object.transaction_id != null) + if ($util.Long) + (message.transaction_id = $util.Long.fromValue(object.transaction_id)).unsigned = false; + else if (typeof object.transaction_id === "string") + message.transaction_id = parseInt(object.transaction_id, 10); + else if (typeof object.transaction_id === "number") + message.transaction_id = object.transaction_id; + else if (typeof object.transaction_id === "object") + message.transaction_id = new $util.LongBits(object.transaction_id.low >>> 0, object.transaction_id.high >>> 0).toNumber(); + if (object.reserved_id != null) + if ($util.Long) + (message.reserved_id = $util.Long.fromValue(object.reserved_id)).unsigned = false; + else if (typeof object.reserved_id === "string") + message.reserved_id = parseInt(object.reserved_id, 10); + else if (typeof object.reserved_id === "number") + message.reserved_id = object.reserved_id; + else if (typeof object.reserved_id === "object") + message.reserved_id = new $util.LongBits(object.reserved_id.low >>> 0, object.reserved_id.high >>> 0).toNumber(); + if (object.tablet_alias != null) { + if (typeof object.tablet_alias !== "object") + throw TypeError(".query.ReserveBeginStreamExecuteResponse.tablet_alias: object expected"); + message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); + } + if (object.session_state_changes != null) + message.session_state_changes = String(object.session_state_changes); + return message; + }; + + /** + * Creates a plain object from a ReserveBeginStreamExecuteResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof query.ReserveBeginStreamExecuteResponse + * @static + * @param {query.ReserveBeginStreamExecuteResponse} message ReserveBeginStreamExecuteResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ReserveBeginStreamExecuteResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.error = null; + object.result = null; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.transaction_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.transaction_id = options.longs === String ? "0" : 0; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.reserved_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.reserved_id = options.longs === String ? "0" : 0; + object.tablet_alias = null; + object.session_state_changes = ""; + } + if (message.error != null && message.hasOwnProperty("error")) + object.error = $root.vtrpc.RPCError.toObject(message.error, options); + if (message.result != null && message.hasOwnProperty("result")) + object.result = $root.query.QueryResult.toObject(message.result, options); + if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) + if (typeof message.transaction_id === "number") + object.transaction_id = options.longs === String ? String(message.transaction_id) : message.transaction_id; + else + object.transaction_id = options.longs === String ? $util.Long.prototype.toString.call(message.transaction_id) : options.longs === Number ? new $util.LongBits(message.transaction_id.low >>> 0, message.transaction_id.high >>> 0).toNumber() : message.transaction_id; + if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) + if (typeof message.reserved_id === "number") + object.reserved_id = options.longs === String ? String(message.reserved_id) : message.reserved_id; + else + object.reserved_id = options.longs === String ? $util.Long.prototype.toString.call(message.reserved_id) : options.longs === Number ? new $util.LongBits(message.reserved_id.low >>> 0, message.reserved_id.high >>> 0).toNumber() : message.reserved_id; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) + object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); + if (message.session_state_changes != null && message.hasOwnProperty("session_state_changes")) + object.session_state_changes = message.session_state_changes; + return object; + }; + + /** + * Converts this ReserveBeginStreamExecuteResponse to JSON. + * @function toJSON + * @memberof query.ReserveBeginStreamExecuteResponse * @instance + * @returns {Object.} JSON object */ - FullStatus.prototype.semi_sync_primary_status = false; + ReserveBeginStreamExecuteResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; /** - * FullStatus semi_sync_replica_status. - * @member {boolean} semi_sync_replica_status - * @memberof replicationdata.FullStatus + * Gets the default type url for ReserveBeginStreamExecuteResponse + * @function getTypeUrl + * @memberof query.ReserveBeginStreamExecuteResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ReserveBeginStreamExecuteResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/query.ReserveBeginStreamExecuteResponse"; + }; + + return ReserveBeginStreamExecuteResponse; + })(); + + query.ReleaseRequest = (function() { + + /** + * Properties of a ReleaseRequest. + * @memberof query + * @interface IReleaseRequest + * @property {vtrpc.ICallerID|null} [effective_caller_id] ReleaseRequest effective_caller_id + * @property {query.IVTGateCallerID|null} [immediate_caller_id] ReleaseRequest immediate_caller_id + * @property {query.ITarget|null} [target] ReleaseRequest target + * @property {number|Long|null} [transaction_id] ReleaseRequest transaction_id + * @property {number|Long|null} [reserved_id] ReleaseRequest reserved_id + */ + + /** + * Constructs a new ReleaseRequest. + * @memberof query + * @classdesc Represents a ReleaseRequest. + * @implements IReleaseRequest + * @constructor + * @param {query.IReleaseRequest=} [properties] Properties to set + */ + function ReleaseRequest(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ReleaseRequest effective_caller_id. + * @member {vtrpc.ICallerID|null|undefined} effective_caller_id + * @memberof query.ReleaseRequest * @instance */ - FullStatus.prototype.semi_sync_replica_status = false; + ReleaseRequest.prototype.effective_caller_id = null; /** - * FullStatus semi_sync_primary_clients. - * @member {number} semi_sync_primary_clients - * @memberof replicationdata.FullStatus + * ReleaseRequest immediate_caller_id. + * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id + * @memberof query.ReleaseRequest * @instance */ - FullStatus.prototype.semi_sync_primary_clients = 0; + ReleaseRequest.prototype.immediate_caller_id = null; /** - * FullStatus semi_sync_primary_timeout. - * @member {number|Long} semi_sync_primary_timeout - * @memberof replicationdata.FullStatus + * ReleaseRequest target. + * @member {query.ITarget|null|undefined} target + * @memberof query.ReleaseRequest * @instance */ - FullStatus.prototype.semi_sync_primary_timeout = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + ReleaseRequest.prototype.target = null; /** - * FullStatus semi_sync_wait_for_replica_count. - * @member {number} semi_sync_wait_for_replica_count - * @memberof replicationdata.FullStatus + * ReleaseRequest transaction_id. + * @member {number|Long} transaction_id + * @memberof query.ReleaseRequest * @instance */ - FullStatus.prototype.semi_sync_wait_for_replica_count = 0; + ReleaseRequest.prototype.transaction_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; /** - * FullStatus super_read_only. - * @member {boolean} super_read_only - * @memberof replicationdata.FullStatus + * ReleaseRequest reserved_id. + * @member {number|Long} reserved_id + * @memberof query.ReleaseRequest * @instance */ - FullStatus.prototype.super_read_only = false; + ReleaseRequest.prototype.reserved_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; /** - * Creates a new FullStatus instance using the specified properties. + * Creates a new ReleaseRequest instance using the specified properties. * @function create - * @memberof replicationdata.FullStatus + * @memberof query.ReleaseRequest * @static - * @param {replicationdata.IFullStatus=} [properties] Properties to set - * @returns {replicationdata.FullStatus} FullStatus instance + * @param {query.IReleaseRequest=} [properties] Properties to set + * @returns {query.ReleaseRequest} ReleaseRequest instance */ - FullStatus.create = function create(properties) { - return new FullStatus(properties); + ReleaseRequest.create = function create(properties) { + return new ReleaseRequest(properties); }; /** - * Encodes the specified FullStatus message. Does not implicitly {@link replicationdata.FullStatus.verify|verify} messages. + * Encodes the specified ReleaseRequest message. Does not implicitly {@link query.ReleaseRequest.verify|verify} messages. * @function encode - * @memberof replicationdata.FullStatus + * @memberof query.ReleaseRequest * @static - * @param {replicationdata.IFullStatus} message FullStatus message or plain object to encode + * @param {query.IReleaseRequest} message ReleaseRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - FullStatus.encode = function encode(message, writer) { + ReleaseRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.server_id != null && Object.hasOwnProperty.call(message, "server_id")) - writer.uint32(/* id 1, wireType 0 =*/8).uint32(message.server_id); - if (message.server_uuid != null && Object.hasOwnProperty.call(message, "server_uuid")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.server_uuid); - if (message.replication_status != null && Object.hasOwnProperty.call(message, "replication_status")) - $root.replicationdata.Status.encode(message.replication_status, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.primary_status != null && Object.hasOwnProperty.call(message, "primary_status")) - $root.replicationdata.PrimaryStatus.encode(message.primary_status, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); - if (message.gtid_purged != null && Object.hasOwnProperty.call(message, "gtid_purged")) - writer.uint32(/* id 5, wireType 2 =*/42).string(message.gtid_purged); - if (message.version != null && Object.hasOwnProperty.call(message, "version")) - writer.uint32(/* id 6, wireType 2 =*/50).string(message.version); - if (message.version_comment != null && Object.hasOwnProperty.call(message, "version_comment")) - writer.uint32(/* id 7, wireType 2 =*/58).string(message.version_comment); - if (message.read_only != null && Object.hasOwnProperty.call(message, "read_only")) - writer.uint32(/* id 8, wireType 0 =*/64).bool(message.read_only); - if (message.gtid_mode != null && Object.hasOwnProperty.call(message, "gtid_mode")) - writer.uint32(/* id 9, wireType 2 =*/74).string(message.gtid_mode); - if (message.binlog_format != null && Object.hasOwnProperty.call(message, "binlog_format")) - writer.uint32(/* id 10, wireType 2 =*/82).string(message.binlog_format); - if (message.binlog_row_image != null && Object.hasOwnProperty.call(message, "binlog_row_image")) - writer.uint32(/* id 11, wireType 2 =*/90).string(message.binlog_row_image); - if (message.log_bin_enabled != null && Object.hasOwnProperty.call(message, "log_bin_enabled")) - writer.uint32(/* id 12, wireType 0 =*/96).bool(message.log_bin_enabled); - if (message.log_replica_updates != null && Object.hasOwnProperty.call(message, "log_replica_updates")) - writer.uint32(/* id 13, wireType 0 =*/104).bool(message.log_replica_updates); - if (message.semi_sync_primary_enabled != null && Object.hasOwnProperty.call(message, "semi_sync_primary_enabled")) - writer.uint32(/* id 14, wireType 0 =*/112).bool(message.semi_sync_primary_enabled); - if (message.semi_sync_replica_enabled != null && Object.hasOwnProperty.call(message, "semi_sync_replica_enabled")) - writer.uint32(/* id 15, wireType 0 =*/120).bool(message.semi_sync_replica_enabled); - if (message.semi_sync_primary_status != null && Object.hasOwnProperty.call(message, "semi_sync_primary_status")) - writer.uint32(/* id 16, wireType 0 =*/128).bool(message.semi_sync_primary_status); - if (message.semi_sync_replica_status != null && Object.hasOwnProperty.call(message, "semi_sync_replica_status")) - writer.uint32(/* id 17, wireType 0 =*/136).bool(message.semi_sync_replica_status); - if (message.semi_sync_primary_clients != null && Object.hasOwnProperty.call(message, "semi_sync_primary_clients")) - writer.uint32(/* id 18, wireType 0 =*/144).uint32(message.semi_sync_primary_clients); - if (message.semi_sync_primary_timeout != null && Object.hasOwnProperty.call(message, "semi_sync_primary_timeout")) - writer.uint32(/* id 19, wireType 0 =*/152).uint64(message.semi_sync_primary_timeout); - if (message.semi_sync_wait_for_replica_count != null && Object.hasOwnProperty.call(message, "semi_sync_wait_for_replica_count")) - writer.uint32(/* id 20, wireType 0 =*/160).uint32(message.semi_sync_wait_for_replica_count); - if (message.super_read_only != null && Object.hasOwnProperty.call(message, "super_read_only")) - writer.uint32(/* id 21, wireType 0 =*/168).bool(message.super_read_only); + if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) + $root.vtrpc.CallerID.encode(message.effective_caller_id, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.immediate_caller_id != null && Object.hasOwnProperty.call(message, "immediate_caller_id")) + $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.target != null && Object.hasOwnProperty.call(message, "target")) + $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.transaction_id != null && Object.hasOwnProperty.call(message, "transaction_id")) + writer.uint32(/* id 4, wireType 0 =*/32).int64(message.transaction_id); + if (message.reserved_id != null && Object.hasOwnProperty.call(message, "reserved_id")) + writer.uint32(/* id 5, wireType 0 =*/40).int64(message.reserved_id); return writer; }; /** - * Encodes the specified FullStatus message, length delimited. Does not implicitly {@link replicationdata.FullStatus.verify|verify} messages. + * Encodes the specified ReleaseRequest message, length delimited. Does not implicitly {@link query.ReleaseRequest.verify|verify} messages. * @function encodeDelimited - * @memberof replicationdata.FullStatus + * @memberof query.ReleaseRequest * @static - * @param {replicationdata.IFullStatus} message FullStatus message or plain object to encode + * @param {query.IReleaseRequest} message ReleaseRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - FullStatus.encodeDelimited = function encodeDelimited(message, writer) { + ReleaseRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a FullStatus message from the specified reader or buffer. + * Decodes a ReleaseRequest message from the specified reader or buffer. * @function decode - * @memberof replicationdata.FullStatus + * @memberof query.ReleaseRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {replicationdata.FullStatus} FullStatus + * @returns {query.ReleaseRequest} ReleaseRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - FullStatus.decode = function decode(reader, length) { + ReleaseRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.replicationdata.FullStatus(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.ReleaseRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.server_id = reader.uint32(); + message.effective_caller_id = $root.vtrpc.CallerID.decode(reader, reader.uint32()); break; } case 2: { - message.server_uuid = reader.string(); + message.immediate_caller_id = $root.query.VTGateCallerID.decode(reader, reader.uint32()); break; } case 3: { - message.replication_status = $root.replicationdata.Status.decode(reader, reader.uint32()); + message.target = $root.query.Target.decode(reader, reader.uint32()); break; } case 4: { - message.primary_status = $root.replicationdata.PrimaryStatus.decode(reader, reader.uint32()); + message.transaction_id = reader.int64(); break; } case 5: { - message.gtid_purged = reader.string(); - break; - } - case 6: { - message.version = reader.string(); - break; - } - case 7: { - message.version_comment = reader.string(); - break; - } - case 8: { - message.read_only = reader.bool(); - break; - } - case 9: { - message.gtid_mode = reader.string(); - break; - } - case 10: { - message.binlog_format = reader.string(); - break; - } - case 11: { - message.binlog_row_image = reader.string(); - break; - } - case 12: { - message.log_bin_enabled = reader.bool(); - break; - } - case 13: { - message.log_replica_updates = reader.bool(); - break; - } - case 14: { - message.semi_sync_primary_enabled = reader.bool(); - break; - } - case 15: { - message.semi_sync_replica_enabled = reader.bool(); - break; - } - case 16: { - message.semi_sync_primary_status = reader.bool(); - break; - } - case 17: { - message.semi_sync_replica_status = reader.bool(); - break; - } - case 18: { - message.semi_sync_primary_clients = reader.uint32(); - break; - } - case 19: { - message.semi_sync_primary_timeout = reader.uint64(); - break; - } - case 20: { - message.semi_sync_wait_for_replica_count = reader.uint32(); - break; - } - case 21: { - message.super_read_only = reader.bool(); + message.reserved_id = reader.int64(); break; } default: @@ -93611,320 +94879,197 @@ export const replicationdata = $root.replicationdata = (() => { }; /** - * Decodes a FullStatus message from the specified reader or buffer, length delimited. + * Decodes a ReleaseRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof replicationdata.FullStatus + * @memberof query.ReleaseRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {replicationdata.FullStatus} FullStatus + * @returns {query.ReleaseRequest} ReleaseRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - FullStatus.decodeDelimited = function decodeDelimited(reader) { + ReleaseRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a FullStatus message. + * Verifies a ReleaseRequest message. * @function verify - * @memberof replicationdata.FullStatus + * @memberof query.ReleaseRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - FullStatus.verify = function verify(message) { + ReleaseRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.server_id != null && message.hasOwnProperty("server_id")) - if (!$util.isInteger(message.server_id)) - return "server_id: integer expected"; - if (message.server_uuid != null && message.hasOwnProperty("server_uuid")) - if (!$util.isString(message.server_uuid)) - return "server_uuid: string expected"; - if (message.replication_status != null && message.hasOwnProperty("replication_status")) { - let error = $root.replicationdata.Status.verify(message.replication_status); + if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { + let error = $root.vtrpc.CallerID.verify(message.effective_caller_id); if (error) - return "replication_status." + error; + return "effective_caller_id." + error; } - if (message.primary_status != null && message.hasOwnProperty("primary_status")) { - let error = $root.replicationdata.PrimaryStatus.verify(message.primary_status); + if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) { + let error = $root.query.VTGateCallerID.verify(message.immediate_caller_id); if (error) - return "primary_status." + error; + return "immediate_caller_id." + error; } - if (message.gtid_purged != null && message.hasOwnProperty("gtid_purged")) - if (!$util.isString(message.gtid_purged)) - return "gtid_purged: string expected"; - if (message.version != null && message.hasOwnProperty("version")) - if (!$util.isString(message.version)) - return "version: string expected"; - if (message.version_comment != null && message.hasOwnProperty("version_comment")) - if (!$util.isString(message.version_comment)) - return "version_comment: string expected"; - if (message.read_only != null && message.hasOwnProperty("read_only")) - if (typeof message.read_only !== "boolean") - return "read_only: boolean expected"; - if (message.gtid_mode != null && message.hasOwnProperty("gtid_mode")) - if (!$util.isString(message.gtid_mode)) - return "gtid_mode: string expected"; - if (message.binlog_format != null && message.hasOwnProperty("binlog_format")) - if (!$util.isString(message.binlog_format)) - return "binlog_format: string expected"; - if (message.binlog_row_image != null && message.hasOwnProperty("binlog_row_image")) - if (!$util.isString(message.binlog_row_image)) - return "binlog_row_image: string expected"; - if (message.log_bin_enabled != null && message.hasOwnProperty("log_bin_enabled")) - if (typeof message.log_bin_enabled !== "boolean") - return "log_bin_enabled: boolean expected"; - if (message.log_replica_updates != null && message.hasOwnProperty("log_replica_updates")) - if (typeof message.log_replica_updates !== "boolean") - return "log_replica_updates: boolean expected"; - if (message.semi_sync_primary_enabled != null && message.hasOwnProperty("semi_sync_primary_enabled")) - if (typeof message.semi_sync_primary_enabled !== "boolean") - return "semi_sync_primary_enabled: boolean expected"; - if (message.semi_sync_replica_enabled != null && message.hasOwnProperty("semi_sync_replica_enabled")) - if (typeof message.semi_sync_replica_enabled !== "boolean") - return "semi_sync_replica_enabled: boolean expected"; - if (message.semi_sync_primary_status != null && message.hasOwnProperty("semi_sync_primary_status")) - if (typeof message.semi_sync_primary_status !== "boolean") - return "semi_sync_primary_status: boolean expected"; - if (message.semi_sync_replica_status != null && message.hasOwnProperty("semi_sync_replica_status")) - if (typeof message.semi_sync_replica_status !== "boolean") - return "semi_sync_replica_status: boolean expected"; - if (message.semi_sync_primary_clients != null && message.hasOwnProperty("semi_sync_primary_clients")) - if (!$util.isInteger(message.semi_sync_primary_clients)) - return "semi_sync_primary_clients: integer expected"; - if (message.semi_sync_primary_timeout != null && message.hasOwnProperty("semi_sync_primary_timeout")) - if (!$util.isInteger(message.semi_sync_primary_timeout) && !(message.semi_sync_primary_timeout && $util.isInteger(message.semi_sync_primary_timeout.low) && $util.isInteger(message.semi_sync_primary_timeout.high))) - return "semi_sync_primary_timeout: integer|Long expected"; - if (message.semi_sync_wait_for_replica_count != null && message.hasOwnProperty("semi_sync_wait_for_replica_count")) - if (!$util.isInteger(message.semi_sync_wait_for_replica_count)) - return "semi_sync_wait_for_replica_count: integer expected"; - if (message.super_read_only != null && message.hasOwnProperty("super_read_only")) - if (typeof message.super_read_only !== "boolean") - return "super_read_only: boolean expected"; + if (message.target != null && message.hasOwnProperty("target")) { + let error = $root.query.Target.verify(message.target); + if (error) + return "target." + error; + } + if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) + if (!$util.isInteger(message.transaction_id) && !(message.transaction_id && $util.isInteger(message.transaction_id.low) && $util.isInteger(message.transaction_id.high))) + return "transaction_id: integer|Long expected"; + if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) + if (!$util.isInteger(message.reserved_id) && !(message.reserved_id && $util.isInteger(message.reserved_id.low) && $util.isInteger(message.reserved_id.high))) + return "reserved_id: integer|Long expected"; return null; }; /** - * Creates a FullStatus message from a plain object. Also converts values to their respective internal types. + * Creates a ReleaseRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof replicationdata.FullStatus + * @memberof query.ReleaseRequest * @static * @param {Object.} object Plain object - * @returns {replicationdata.FullStatus} FullStatus + * @returns {query.ReleaseRequest} ReleaseRequest */ - FullStatus.fromObject = function fromObject(object) { - if (object instanceof $root.replicationdata.FullStatus) + ReleaseRequest.fromObject = function fromObject(object) { + if (object instanceof $root.query.ReleaseRequest) return object; - let message = new $root.replicationdata.FullStatus(); - if (object.server_id != null) - message.server_id = object.server_id >>> 0; - if (object.server_uuid != null) - message.server_uuid = String(object.server_uuid); - if (object.replication_status != null) { - if (typeof object.replication_status !== "object") - throw TypeError(".replicationdata.FullStatus.replication_status: object expected"); - message.replication_status = $root.replicationdata.Status.fromObject(object.replication_status); + let message = new $root.query.ReleaseRequest(); + if (object.effective_caller_id != null) { + if (typeof object.effective_caller_id !== "object") + throw TypeError(".query.ReleaseRequest.effective_caller_id: object expected"); + message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); } - if (object.primary_status != null) { - if (typeof object.primary_status !== "object") - throw TypeError(".replicationdata.FullStatus.primary_status: object expected"); - message.primary_status = $root.replicationdata.PrimaryStatus.fromObject(object.primary_status); + if (object.immediate_caller_id != null) { + if (typeof object.immediate_caller_id !== "object") + throw TypeError(".query.ReleaseRequest.immediate_caller_id: object expected"); + message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); } - if (object.gtid_purged != null) - message.gtid_purged = String(object.gtid_purged); - if (object.version != null) - message.version = String(object.version); - if (object.version_comment != null) - message.version_comment = String(object.version_comment); - if (object.read_only != null) - message.read_only = Boolean(object.read_only); - if (object.gtid_mode != null) - message.gtid_mode = String(object.gtid_mode); - if (object.binlog_format != null) - message.binlog_format = String(object.binlog_format); - if (object.binlog_row_image != null) - message.binlog_row_image = String(object.binlog_row_image); - if (object.log_bin_enabled != null) - message.log_bin_enabled = Boolean(object.log_bin_enabled); - if (object.log_replica_updates != null) - message.log_replica_updates = Boolean(object.log_replica_updates); - if (object.semi_sync_primary_enabled != null) - message.semi_sync_primary_enabled = Boolean(object.semi_sync_primary_enabled); - if (object.semi_sync_replica_enabled != null) - message.semi_sync_replica_enabled = Boolean(object.semi_sync_replica_enabled); - if (object.semi_sync_primary_status != null) - message.semi_sync_primary_status = Boolean(object.semi_sync_primary_status); - if (object.semi_sync_replica_status != null) - message.semi_sync_replica_status = Boolean(object.semi_sync_replica_status); - if (object.semi_sync_primary_clients != null) - message.semi_sync_primary_clients = object.semi_sync_primary_clients >>> 0; - if (object.semi_sync_primary_timeout != null) + if (object.target != null) { + if (typeof object.target !== "object") + throw TypeError(".query.ReleaseRequest.target: object expected"); + message.target = $root.query.Target.fromObject(object.target); + } + if (object.transaction_id != null) if ($util.Long) - (message.semi_sync_primary_timeout = $util.Long.fromValue(object.semi_sync_primary_timeout)).unsigned = true; - else if (typeof object.semi_sync_primary_timeout === "string") - message.semi_sync_primary_timeout = parseInt(object.semi_sync_primary_timeout, 10); - else if (typeof object.semi_sync_primary_timeout === "number") - message.semi_sync_primary_timeout = object.semi_sync_primary_timeout; - else if (typeof object.semi_sync_primary_timeout === "object") - message.semi_sync_primary_timeout = new $util.LongBits(object.semi_sync_primary_timeout.low >>> 0, object.semi_sync_primary_timeout.high >>> 0).toNumber(true); - if (object.semi_sync_wait_for_replica_count != null) - message.semi_sync_wait_for_replica_count = object.semi_sync_wait_for_replica_count >>> 0; - if (object.super_read_only != null) - message.super_read_only = Boolean(object.super_read_only); + (message.transaction_id = $util.Long.fromValue(object.transaction_id)).unsigned = false; + else if (typeof object.transaction_id === "string") + message.transaction_id = parseInt(object.transaction_id, 10); + else if (typeof object.transaction_id === "number") + message.transaction_id = object.transaction_id; + else if (typeof object.transaction_id === "object") + message.transaction_id = new $util.LongBits(object.transaction_id.low >>> 0, object.transaction_id.high >>> 0).toNumber(); + if (object.reserved_id != null) + if ($util.Long) + (message.reserved_id = $util.Long.fromValue(object.reserved_id)).unsigned = false; + else if (typeof object.reserved_id === "string") + message.reserved_id = parseInt(object.reserved_id, 10); + else if (typeof object.reserved_id === "number") + message.reserved_id = object.reserved_id; + else if (typeof object.reserved_id === "object") + message.reserved_id = new $util.LongBits(object.reserved_id.low >>> 0, object.reserved_id.high >>> 0).toNumber(); return message; }; /** - * Creates a plain object from a FullStatus message. Also converts values to other types if specified. + * Creates a plain object from a ReleaseRequest message. Also converts values to other types if specified. * @function toObject - * @memberof replicationdata.FullStatus + * @memberof query.ReleaseRequest * @static - * @param {replicationdata.FullStatus} message FullStatus + * @param {query.ReleaseRequest} message ReleaseRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - FullStatus.toObject = function toObject(message, options) { + ReleaseRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) { - object.server_id = 0; - object.server_uuid = ""; - object.replication_status = null; - object.primary_status = null; - object.gtid_purged = ""; - object.version = ""; - object.version_comment = ""; - object.read_only = false; - object.gtid_mode = ""; - object.binlog_format = ""; - object.binlog_row_image = ""; - object.log_bin_enabled = false; - object.log_replica_updates = false; - object.semi_sync_primary_enabled = false; - object.semi_sync_replica_enabled = false; - object.semi_sync_primary_status = false; - object.semi_sync_replica_status = false; - object.semi_sync_primary_clients = 0; + object.effective_caller_id = null; + object.immediate_caller_id = null; + object.target = null; if ($util.Long) { - let long = new $util.Long(0, 0, true); - object.semi_sync_primary_timeout = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + let long = new $util.Long(0, 0, false); + object.transaction_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; } else - object.semi_sync_primary_timeout = options.longs === String ? "0" : 0; - object.semi_sync_wait_for_replica_count = 0; - object.super_read_only = false; + object.transaction_id = options.longs === String ? "0" : 0; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.reserved_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.reserved_id = options.longs === String ? "0" : 0; } - if (message.server_id != null && message.hasOwnProperty("server_id")) - object.server_id = message.server_id; - if (message.server_uuid != null && message.hasOwnProperty("server_uuid")) - object.server_uuid = message.server_uuid; - if (message.replication_status != null && message.hasOwnProperty("replication_status")) - object.replication_status = $root.replicationdata.Status.toObject(message.replication_status, options); - if (message.primary_status != null && message.hasOwnProperty("primary_status")) - object.primary_status = $root.replicationdata.PrimaryStatus.toObject(message.primary_status, options); - if (message.gtid_purged != null && message.hasOwnProperty("gtid_purged")) - object.gtid_purged = message.gtid_purged; - if (message.version != null && message.hasOwnProperty("version")) - object.version = message.version; - if (message.version_comment != null && message.hasOwnProperty("version_comment")) - object.version_comment = message.version_comment; - if (message.read_only != null && message.hasOwnProperty("read_only")) - object.read_only = message.read_only; - if (message.gtid_mode != null && message.hasOwnProperty("gtid_mode")) - object.gtid_mode = message.gtid_mode; - if (message.binlog_format != null && message.hasOwnProperty("binlog_format")) - object.binlog_format = message.binlog_format; - if (message.binlog_row_image != null && message.hasOwnProperty("binlog_row_image")) - object.binlog_row_image = message.binlog_row_image; - if (message.log_bin_enabled != null && message.hasOwnProperty("log_bin_enabled")) - object.log_bin_enabled = message.log_bin_enabled; - if (message.log_replica_updates != null && message.hasOwnProperty("log_replica_updates")) - object.log_replica_updates = message.log_replica_updates; - if (message.semi_sync_primary_enabled != null && message.hasOwnProperty("semi_sync_primary_enabled")) - object.semi_sync_primary_enabled = message.semi_sync_primary_enabled; - if (message.semi_sync_replica_enabled != null && message.hasOwnProperty("semi_sync_replica_enabled")) - object.semi_sync_replica_enabled = message.semi_sync_replica_enabled; - if (message.semi_sync_primary_status != null && message.hasOwnProperty("semi_sync_primary_status")) - object.semi_sync_primary_status = message.semi_sync_primary_status; - if (message.semi_sync_replica_status != null && message.hasOwnProperty("semi_sync_replica_status")) - object.semi_sync_replica_status = message.semi_sync_replica_status; - if (message.semi_sync_primary_clients != null && message.hasOwnProperty("semi_sync_primary_clients")) - object.semi_sync_primary_clients = message.semi_sync_primary_clients; - if (message.semi_sync_primary_timeout != null && message.hasOwnProperty("semi_sync_primary_timeout")) - if (typeof message.semi_sync_primary_timeout === "number") - object.semi_sync_primary_timeout = options.longs === String ? String(message.semi_sync_primary_timeout) : message.semi_sync_primary_timeout; + if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) + object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); + if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) + object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); + if (message.target != null && message.hasOwnProperty("target")) + object.target = $root.query.Target.toObject(message.target, options); + if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) + if (typeof message.transaction_id === "number") + object.transaction_id = options.longs === String ? String(message.transaction_id) : message.transaction_id; else - object.semi_sync_primary_timeout = options.longs === String ? $util.Long.prototype.toString.call(message.semi_sync_primary_timeout) : options.longs === Number ? new $util.LongBits(message.semi_sync_primary_timeout.low >>> 0, message.semi_sync_primary_timeout.high >>> 0).toNumber(true) : message.semi_sync_primary_timeout; - if (message.semi_sync_wait_for_replica_count != null && message.hasOwnProperty("semi_sync_wait_for_replica_count")) - object.semi_sync_wait_for_replica_count = message.semi_sync_wait_for_replica_count; - if (message.super_read_only != null && message.hasOwnProperty("super_read_only")) - object.super_read_only = message.super_read_only; + object.transaction_id = options.longs === String ? $util.Long.prototype.toString.call(message.transaction_id) : options.longs === Number ? new $util.LongBits(message.transaction_id.low >>> 0, message.transaction_id.high >>> 0).toNumber() : message.transaction_id; + if (message.reserved_id != null && message.hasOwnProperty("reserved_id")) + if (typeof message.reserved_id === "number") + object.reserved_id = options.longs === String ? String(message.reserved_id) : message.reserved_id; + else + object.reserved_id = options.longs === String ? $util.Long.prototype.toString.call(message.reserved_id) : options.longs === Number ? new $util.LongBits(message.reserved_id.low >>> 0, message.reserved_id.high >>> 0).toNumber() : message.reserved_id; return object; }; /** - * Converts this FullStatus to JSON. + * Converts this ReleaseRequest to JSON. * @function toJSON - * @memberof replicationdata.FullStatus + * @memberof query.ReleaseRequest * @instance * @returns {Object.} JSON object */ - FullStatus.prototype.toJSON = function toJSON() { + ReleaseRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for FullStatus + * Gets the default type url for ReleaseRequest * @function getTypeUrl - * @memberof replicationdata.FullStatus + * @memberof query.ReleaseRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - FullStatus.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ReleaseRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/replicationdata.FullStatus"; + return typeUrlPrefix + "/query.ReleaseRequest"; }; - return FullStatus; + return ReleaseRequest; })(); - return replicationdata; -})(); - -export const vschema = $root.vschema = (() => { - - /** - * Namespace vschema. - * @exports vschema - * @namespace - */ - const vschema = {}; - - vschema.RoutingRules = (function() { + query.ReleaseResponse = (function() { /** - * Properties of a RoutingRules. - * @memberof vschema - * @interface IRoutingRules - * @property {Array.|null} [rules] RoutingRules rules + * Properties of a ReleaseResponse. + * @memberof query + * @interface IReleaseResponse */ /** - * Constructs a new RoutingRules. - * @memberof vschema - * @classdesc Represents a RoutingRules. - * @implements IRoutingRules + * Constructs a new ReleaseResponse. + * @memberof query + * @classdesc Represents a ReleaseResponse. + * @implements IReleaseResponse * @constructor - * @param {vschema.IRoutingRules=} [properties] Properties to set + * @param {query.IReleaseResponse=} [properties] Properties to set */ - function RoutingRules(properties) { - this.rules = []; + function ReleaseResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -93932,80 +95077,63 @@ export const vschema = $root.vschema = (() => { } /** - * RoutingRules rules. - * @member {Array.} rules - * @memberof vschema.RoutingRules - * @instance - */ - RoutingRules.prototype.rules = $util.emptyArray; - - /** - * Creates a new RoutingRules instance using the specified properties. + * Creates a new ReleaseResponse instance using the specified properties. * @function create - * @memberof vschema.RoutingRules + * @memberof query.ReleaseResponse * @static - * @param {vschema.IRoutingRules=} [properties] Properties to set - * @returns {vschema.RoutingRules} RoutingRules instance + * @param {query.IReleaseResponse=} [properties] Properties to set + * @returns {query.ReleaseResponse} ReleaseResponse instance */ - RoutingRules.create = function create(properties) { - return new RoutingRules(properties); + ReleaseResponse.create = function create(properties) { + return new ReleaseResponse(properties); }; /** - * Encodes the specified RoutingRules message. Does not implicitly {@link vschema.RoutingRules.verify|verify} messages. + * Encodes the specified ReleaseResponse message. Does not implicitly {@link query.ReleaseResponse.verify|verify} messages. * @function encode - * @memberof vschema.RoutingRules + * @memberof query.ReleaseResponse * @static - * @param {vschema.IRoutingRules} message RoutingRules message or plain object to encode + * @param {query.IReleaseResponse} message ReleaseResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RoutingRules.encode = function encode(message, writer) { + ReleaseResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.rules != null && message.rules.length) - for (let i = 0; i < message.rules.length; ++i) - $root.vschema.RoutingRule.encode(message.rules[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified RoutingRules message, length delimited. Does not implicitly {@link vschema.RoutingRules.verify|verify} messages. + * Encodes the specified ReleaseResponse message, length delimited. Does not implicitly {@link query.ReleaseResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vschema.RoutingRules + * @memberof query.ReleaseResponse * @static - * @param {vschema.IRoutingRules} message RoutingRules message or plain object to encode + * @param {query.IReleaseResponse} message ReleaseResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RoutingRules.encodeDelimited = function encodeDelimited(message, writer) { + ReleaseResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a RoutingRules message from the specified reader or buffer. + * Decodes a ReleaseResponse message from the specified reader or buffer. * @function decode - * @memberof vschema.RoutingRules + * @memberof query.ReleaseResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vschema.RoutingRules} RoutingRules + * @returns {query.ReleaseResponse} ReleaseResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RoutingRules.decode = function decode(reader, length) { + ReleaseResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vschema.RoutingRules(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.ReleaseResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - if (!(message.rules && message.rules.length)) - message.rules = []; - message.rules.push($root.vschema.RoutingRule.decode(reader, reader.uint32())); - break; - } default: reader.skipType(tag & 7); break; @@ -94015,141 +95143,108 @@ export const vschema = $root.vschema = (() => { }; /** - * Decodes a RoutingRules message from the specified reader or buffer, length delimited. + * Decodes a ReleaseResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vschema.RoutingRules + * @memberof query.ReleaseResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vschema.RoutingRules} RoutingRules + * @returns {query.ReleaseResponse} ReleaseResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RoutingRules.decodeDelimited = function decodeDelimited(reader) { + ReleaseResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a RoutingRules message. + * Verifies a ReleaseResponse message. * @function verify - * @memberof vschema.RoutingRules + * @memberof query.ReleaseResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - RoutingRules.verify = function verify(message) { + ReleaseResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.rules != null && message.hasOwnProperty("rules")) { - if (!Array.isArray(message.rules)) - return "rules: array expected"; - for (let i = 0; i < message.rules.length; ++i) { - let error = $root.vschema.RoutingRule.verify(message.rules[i]); - if (error) - return "rules." + error; - } - } return null; }; /** - * Creates a RoutingRules message from a plain object. Also converts values to their respective internal types. + * Creates a ReleaseResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vschema.RoutingRules + * @memberof query.ReleaseResponse * @static * @param {Object.} object Plain object - * @returns {vschema.RoutingRules} RoutingRules + * @returns {query.ReleaseResponse} ReleaseResponse */ - RoutingRules.fromObject = function fromObject(object) { - if (object instanceof $root.vschema.RoutingRules) + ReleaseResponse.fromObject = function fromObject(object) { + if (object instanceof $root.query.ReleaseResponse) return object; - let message = new $root.vschema.RoutingRules(); - if (object.rules) { - if (!Array.isArray(object.rules)) - throw TypeError(".vschema.RoutingRules.rules: array expected"); - message.rules = []; - for (let i = 0; i < object.rules.length; ++i) { - if (typeof object.rules[i] !== "object") - throw TypeError(".vschema.RoutingRules.rules: object expected"); - message.rules[i] = $root.vschema.RoutingRule.fromObject(object.rules[i]); - } - } - return message; + return new $root.query.ReleaseResponse(); }; /** - * Creates a plain object from a RoutingRules message. Also converts values to other types if specified. + * Creates a plain object from a ReleaseResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vschema.RoutingRules + * @memberof query.ReleaseResponse * @static - * @param {vschema.RoutingRules} message RoutingRules + * @param {query.ReleaseResponse} message ReleaseResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - RoutingRules.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.arrays || options.defaults) - object.rules = []; - if (message.rules && message.rules.length) { - object.rules = []; - for (let j = 0; j < message.rules.length; ++j) - object.rules[j] = $root.vschema.RoutingRule.toObject(message.rules[j], options); - } - return object; + ReleaseResponse.toObject = function toObject() { + return {}; }; /** - * Converts this RoutingRules to JSON. + * Converts this ReleaseResponse to JSON. * @function toJSON - * @memberof vschema.RoutingRules + * @memberof query.ReleaseResponse * @instance * @returns {Object.} JSON object */ - RoutingRules.prototype.toJSON = function toJSON() { + ReleaseResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for RoutingRules + * Gets the default type url for ReleaseResponse * @function getTypeUrl - * @memberof vschema.RoutingRules + * @memberof query.ReleaseResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - RoutingRules.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ReleaseResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vschema.RoutingRules"; + return typeUrlPrefix + "/query.ReleaseResponse"; }; - return RoutingRules; + return ReleaseResponse; })(); - vschema.RoutingRule = (function() { + query.StreamHealthRequest = (function() { /** - * Properties of a RoutingRule. - * @memberof vschema - * @interface IRoutingRule - * @property {string|null} [from_table] RoutingRule from_table - * @property {Array.|null} [to_tables] RoutingRule to_tables + * Properties of a StreamHealthRequest. + * @memberof query + * @interface IStreamHealthRequest */ /** - * Constructs a new RoutingRule. - * @memberof vschema - * @classdesc Represents a RoutingRule. - * @implements IRoutingRule + * Constructs a new StreamHealthRequest. + * @memberof query + * @classdesc Represents a StreamHealthRequest. + * @implements IStreamHealthRequest * @constructor - * @param {vschema.IRoutingRule=} [properties] Properties to set + * @param {query.IStreamHealthRequest=} [properties] Properties to set */ - function RoutingRule(properties) { - this.to_tables = []; + function StreamHealthRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -94157,94 +95252,63 @@ export const vschema = $root.vschema = (() => { } /** - * RoutingRule from_table. - * @member {string} from_table - * @memberof vschema.RoutingRule - * @instance - */ - RoutingRule.prototype.from_table = ""; - - /** - * RoutingRule to_tables. - * @member {Array.} to_tables - * @memberof vschema.RoutingRule - * @instance - */ - RoutingRule.prototype.to_tables = $util.emptyArray; - - /** - * Creates a new RoutingRule instance using the specified properties. + * Creates a new StreamHealthRequest instance using the specified properties. * @function create - * @memberof vschema.RoutingRule + * @memberof query.StreamHealthRequest * @static - * @param {vschema.IRoutingRule=} [properties] Properties to set - * @returns {vschema.RoutingRule} RoutingRule instance + * @param {query.IStreamHealthRequest=} [properties] Properties to set + * @returns {query.StreamHealthRequest} StreamHealthRequest instance */ - RoutingRule.create = function create(properties) { - return new RoutingRule(properties); + StreamHealthRequest.create = function create(properties) { + return new StreamHealthRequest(properties); }; /** - * Encodes the specified RoutingRule message. Does not implicitly {@link vschema.RoutingRule.verify|verify} messages. + * Encodes the specified StreamHealthRequest message. Does not implicitly {@link query.StreamHealthRequest.verify|verify} messages. * @function encode - * @memberof vschema.RoutingRule + * @memberof query.StreamHealthRequest * @static - * @param {vschema.IRoutingRule} message RoutingRule message or plain object to encode + * @param {query.IStreamHealthRequest} message StreamHealthRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RoutingRule.encode = function encode(message, writer) { + StreamHealthRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.from_table != null && Object.hasOwnProperty.call(message, "from_table")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.from_table); - if (message.to_tables != null && message.to_tables.length) - for (let i = 0; i < message.to_tables.length; ++i) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.to_tables[i]); return writer; }; /** - * Encodes the specified RoutingRule message, length delimited. Does not implicitly {@link vschema.RoutingRule.verify|verify} messages. + * Encodes the specified StreamHealthRequest message, length delimited. Does not implicitly {@link query.StreamHealthRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vschema.RoutingRule + * @memberof query.StreamHealthRequest * @static - * @param {vschema.IRoutingRule} message RoutingRule message or plain object to encode + * @param {query.IStreamHealthRequest} message StreamHealthRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RoutingRule.encodeDelimited = function encodeDelimited(message, writer) { + StreamHealthRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a RoutingRule message from the specified reader or buffer. + * Decodes a StreamHealthRequest message from the specified reader or buffer. * @function decode - * @memberof vschema.RoutingRule + * @memberof query.StreamHealthRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vschema.RoutingRule} RoutingRule + * @returns {query.StreamHealthRequest} StreamHealthRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RoutingRule.decode = function decode(reader, length) { + StreamHealthRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vschema.RoutingRule(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.StreamHealthRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - message.from_table = reader.string(); - break; - } - case 2: { - if (!(message.to_tables && message.to_tables.length)) - message.to_tables = []; - message.to_tables.push(reader.string()); - break; - } default: reader.skipType(tag & 7); break; @@ -94254,151 +95318,118 @@ export const vschema = $root.vschema = (() => { }; /** - * Decodes a RoutingRule message from the specified reader or buffer, length delimited. + * Decodes a StreamHealthRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vschema.RoutingRule + * @memberof query.StreamHealthRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vschema.RoutingRule} RoutingRule + * @returns {query.StreamHealthRequest} StreamHealthRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RoutingRule.decodeDelimited = function decodeDelimited(reader) { + StreamHealthRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a RoutingRule message. + * Verifies a StreamHealthRequest message. * @function verify - * @memberof vschema.RoutingRule + * @memberof query.StreamHealthRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - RoutingRule.verify = function verify(message) { + StreamHealthRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.from_table != null && message.hasOwnProperty("from_table")) - if (!$util.isString(message.from_table)) - return "from_table: string expected"; - if (message.to_tables != null && message.hasOwnProperty("to_tables")) { - if (!Array.isArray(message.to_tables)) - return "to_tables: array expected"; - for (let i = 0; i < message.to_tables.length; ++i) - if (!$util.isString(message.to_tables[i])) - return "to_tables: string[] expected"; - } return null; }; /** - * Creates a RoutingRule message from a plain object. Also converts values to their respective internal types. + * Creates a StreamHealthRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vschema.RoutingRule + * @memberof query.StreamHealthRequest * @static * @param {Object.} object Plain object - * @returns {vschema.RoutingRule} RoutingRule + * @returns {query.StreamHealthRequest} StreamHealthRequest */ - RoutingRule.fromObject = function fromObject(object) { - if (object instanceof $root.vschema.RoutingRule) + StreamHealthRequest.fromObject = function fromObject(object) { + if (object instanceof $root.query.StreamHealthRequest) return object; - let message = new $root.vschema.RoutingRule(); - if (object.from_table != null) - message.from_table = String(object.from_table); - if (object.to_tables) { - if (!Array.isArray(object.to_tables)) - throw TypeError(".vschema.RoutingRule.to_tables: array expected"); - message.to_tables = []; - for (let i = 0; i < object.to_tables.length; ++i) - message.to_tables[i] = String(object.to_tables[i]); - } - return message; + return new $root.query.StreamHealthRequest(); }; /** - * Creates a plain object from a RoutingRule message. Also converts values to other types if specified. + * Creates a plain object from a StreamHealthRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vschema.RoutingRule + * @memberof query.StreamHealthRequest * @static - * @param {vschema.RoutingRule} message RoutingRule + * @param {query.StreamHealthRequest} message StreamHealthRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - RoutingRule.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.arrays || options.defaults) - object.to_tables = []; - if (options.defaults) - object.from_table = ""; - if (message.from_table != null && message.hasOwnProperty("from_table")) - object.from_table = message.from_table; - if (message.to_tables && message.to_tables.length) { - object.to_tables = []; - for (let j = 0; j < message.to_tables.length; ++j) - object.to_tables[j] = message.to_tables[j]; - } - return object; + StreamHealthRequest.toObject = function toObject() { + return {}; }; /** - * Converts this RoutingRule to JSON. + * Converts this StreamHealthRequest to JSON. * @function toJSON - * @memberof vschema.RoutingRule + * @memberof query.StreamHealthRequest * @instance * @returns {Object.} JSON object */ - RoutingRule.prototype.toJSON = function toJSON() { + StreamHealthRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for RoutingRule + * Gets the default type url for StreamHealthRequest * @function getTypeUrl - * @memberof vschema.RoutingRule + * @memberof query.StreamHealthRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - RoutingRule.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + StreamHealthRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vschema.RoutingRule"; + return typeUrlPrefix + "/query.StreamHealthRequest"; }; - return RoutingRule; + return StreamHealthRequest; })(); - vschema.Keyspace = (function() { + query.RealtimeStats = (function() { /** - * Properties of a Keyspace. - * @memberof vschema - * @interface IKeyspace - * @property {boolean|null} [sharded] Keyspace sharded - * @property {Object.|null} [vindexes] Keyspace vindexes - * @property {Object.|null} [tables] Keyspace tables - * @property {boolean|null} [require_explicit_routing] Keyspace require_explicit_routing - * @property {boolean|null} [cross_tablet] Keyspace cross_tablet - * @property {boolean|null} [attach_enable] Keyspace attach_enable - * @property {string|null} [attach_to] Keyspace attach_to + * Properties of a RealtimeStats. + * @memberof query + * @interface IRealtimeStats + * @property {string|null} [health_error] RealtimeStats health_error + * @property {number|null} [replication_lag_seconds] RealtimeStats replication_lag_seconds + * @property {number|null} [binlog_players_count] RealtimeStats binlog_players_count + * @property {number|Long|null} [filtered_replication_lag_seconds] RealtimeStats filtered_replication_lag_seconds + * @property {number|null} [cpu_usage] RealtimeStats cpu_usage + * @property {number|null} [qps] RealtimeStats qps + * @property {Array.|null} [table_schema_changed] RealtimeStats table_schema_changed + * @property {Array.|null} [view_schema_changed] RealtimeStats view_schema_changed */ /** - * Constructs a new Keyspace. - * @memberof vschema - * @classdesc Represents a Keyspace. - * @implements IKeyspace + * Constructs a new RealtimeStats. + * @memberof query + * @classdesc Represents a RealtimeStats. + * @implements IRealtimeStats * @constructor - * @param {vschema.IKeyspace=} [properties] Properties to set + * @param {query.IRealtimeStats=} [properties] Properties to set */ - function Keyspace(properties) { - this.vindexes = {}; - this.tables = {}; + function RealtimeStats(properties) { + this.table_schema_changed = []; + this.view_schema_changed = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -94406,203 +95437,179 @@ export const vschema = $root.vschema = (() => { } /** - * Keyspace sharded. - * @member {boolean} sharded - * @memberof vschema.Keyspace + * RealtimeStats health_error. + * @member {string} health_error + * @memberof query.RealtimeStats * @instance */ - Keyspace.prototype.sharded = false; + RealtimeStats.prototype.health_error = ""; /** - * Keyspace vindexes. - * @member {Object.} vindexes - * @memberof vschema.Keyspace + * RealtimeStats replication_lag_seconds. + * @member {number} replication_lag_seconds + * @memberof query.RealtimeStats * @instance */ - Keyspace.prototype.vindexes = $util.emptyObject; + RealtimeStats.prototype.replication_lag_seconds = 0; /** - * Keyspace tables. - * @member {Object.} tables - * @memberof vschema.Keyspace + * RealtimeStats binlog_players_count. + * @member {number} binlog_players_count + * @memberof query.RealtimeStats * @instance */ - Keyspace.prototype.tables = $util.emptyObject; + RealtimeStats.prototype.binlog_players_count = 0; /** - * Keyspace require_explicit_routing. - * @member {boolean} require_explicit_routing - * @memberof vschema.Keyspace + * RealtimeStats filtered_replication_lag_seconds. + * @member {number|Long} filtered_replication_lag_seconds + * @memberof query.RealtimeStats * @instance */ - Keyspace.prototype.require_explicit_routing = false; + RealtimeStats.prototype.filtered_replication_lag_seconds = $util.Long ? $util.Long.fromBits(0,0,false) : 0; /** - * Keyspace cross_tablet. - * @member {boolean} cross_tablet - * @memberof vschema.Keyspace + * RealtimeStats cpu_usage. + * @member {number} cpu_usage + * @memberof query.RealtimeStats * @instance */ - Keyspace.prototype.cross_tablet = false; + RealtimeStats.prototype.cpu_usage = 0; /** - * Keyspace attach_enable. - * @member {boolean} attach_enable - * @memberof vschema.Keyspace + * RealtimeStats qps. + * @member {number} qps + * @memberof query.RealtimeStats * @instance */ - Keyspace.prototype.attach_enable = false; + RealtimeStats.prototype.qps = 0; /** - * Keyspace attach_to. - * @member {string} attach_to - * @memberof vschema.Keyspace + * RealtimeStats table_schema_changed. + * @member {Array.} table_schema_changed + * @memberof query.RealtimeStats * @instance */ - Keyspace.prototype.attach_to = ""; + RealtimeStats.prototype.table_schema_changed = $util.emptyArray; /** - * Creates a new Keyspace instance using the specified properties. + * RealtimeStats view_schema_changed. + * @member {Array.} view_schema_changed + * @memberof query.RealtimeStats + * @instance + */ + RealtimeStats.prototype.view_schema_changed = $util.emptyArray; + + /** + * Creates a new RealtimeStats instance using the specified properties. * @function create - * @memberof vschema.Keyspace + * @memberof query.RealtimeStats * @static - * @param {vschema.IKeyspace=} [properties] Properties to set - * @returns {vschema.Keyspace} Keyspace instance + * @param {query.IRealtimeStats=} [properties] Properties to set + * @returns {query.RealtimeStats} RealtimeStats instance */ - Keyspace.create = function create(properties) { - return new Keyspace(properties); + RealtimeStats.create = function create(properties) { + return new RealtimeStats(properties); }; /** - * Encodes the specified Keyspace message. Does not implicitly {@link vschema.Keyspace.verify|verify} messages. + * Encodes the specified RealtimeStats message. Does not implicitly {@link query.RealtimeStats.verify|verify} messages. * @function encode - * @memberof vschema.Keyspace + * @memberof query.RealtimeStats * @static - * @param {vschema.IKeyspace} message Keyspace message or plain object to encode + * @param {query.IRealtimeStats} message RealtimeStats message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Keyspace.encode = function encode(message, writer) { + RealtimeStats.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.sharded != null && Object.hasOwnProperty.call(message, "sharded")) - writer.uint32(/* id 1, wireType 0 =*/8).bool(message.sharded); - if (message.vindexes != null && Object.hasOwnProperty.call(message, "vindexes")) - for (let keys = Object.keys(message.vindexes), i = 0; i < keys.length; ++i) { - writer.uint32(/* id 2, wireType 2 =*/18).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); - $root.vschema.Vindex.encode(message.vindexes[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); - } - if (message.tables != null && Object.hasOwnProperty.call(message, "tables")) - for (let keys = Object.keys(message.tables), i = 0; i < keys.length; ++i) { - writer.uint32(/* id 3, wireType 2 =*/26).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); - $root.vschema.Table.encode(message.tables[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); - } - if (message.require_explicit_routing != null && Object.hasOwnProperty.call(message, "require_explicit_routing")) - writer.uint32(/* id 4, wireType 0 =*/32).bool(message.require_explicit_routing); - if (message.cross_tablet != null && Object.hasOwnProperty.call(message, "cross_tablet")) - writer.uint32(/* id 85, wireType 0 =*/680).bool(message.cross_tablet); - if (message.attach_enable != null && Object.hasOwnProperty.call(message, "attach_enable")) - writer.uint32(/* id 86, wireType 0 =*/688).bool(message.attach_enable); - if (message.attach_to != null && Object.hasOwnProperty.call(message, "attach_to")) - writer.uint32(/* id 87, wireType 2 =*/698).string(message.attach_to); + if (message.health_error != null && Object.hasOwnProperty.call(message, "health_error")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.health_error); + if (message.replication_lag_seconds != null && Object.hasOwnProperty.call(message, "replication_lag_seconds")) + writer.uint32(/* id 2, wireType 0 =*/16).uint32(message.replication_lag_seconds); + if (message.binlog_players_count != null && Object.hasOwnProperty.call(message, "binlog_players_count")) + writer.uint32(/* id 3, wireType 0 =*/24).int32(message.binlog_players_count); + if (message.filtered_replication_lag_seconds != null && Object.hasOwnProperty.call(message, "filtered_replication_lag_seconds")) + writer.uint32(/* id 4, wireType 0 =*/32).int64(message.filtered_replication_lag_seconds); + if (message.cpu_usage != null && Object.hasOwnProperty.call(message, "cpu_usage")) + writer.uint32(/* id 5, wireType 1 =*/41).double(message.cpu_usage); + if (message.qps != null && Object.hasOwnProperty.call(message, "qps")) + writer.uint32(/* id 6, wireType 1 =*/49).double(message.qps); + if (message.table_schema_changed != null && message.table_schema_changed.length) + for (let i = 0; i < message.table_schema_changed.length; ++i) + writer.uint32(/* id 7, wireType 2 =*/58).string(message.table_schema_changed[i]); + if (message.view_schema_changed != null && message.view_schema_changed.length) + for (let i = 0; i < message.view_schema_changed.length; ++i) + writer.uint32(/* id 8, wireType 2 =*/66).string(message.view_schema_changed[i]); return writer; }; /** - * Encodes the specified Keyspace message, length delimited. Does not implicitly {@link vschema.Keyspace.verify|verify} messages. + * Encodes the specified RealtimeStats message, length delimited. Does not implicitly {@link query.RealtimeStats.verify|verify} messages. * @function encodeDelimited - * @memberof vschema.Keyspace + * @memberof query.RealtimeStats * @static - * @param {vschema.IKeyspace} message Keyspace message or plain object to encode + * @param {query.IRealtimeStats} message RealtimeStats message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Keyspace.encodeDelimited = function encodeDelimited(message, writer) { + RealtimeStats.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a Keyspace message from the specified reader or buffer. + * Decodes a RealtimeStats message from the specified reader or buffer. * @function decode - * @memberof vschema.Keyspace + * @memberof query.RealtimeStats * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vschema.Keyspace} Keyspace + * @returns {query.RealtimeStats} RealtimeStats * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Keyspace.decode = function decode(reader, length) { + RealtimeStats.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vschema.Keyspace(), key, value; + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.RealtimeStats(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.sharded = reader.bool(); + message.health_error = reader.string(); break; } case 2: { - if (message.vindexes === $util.emptyObject) - message.vindexes = {}; - let end2 = reader.uint32() + reader.pos; - key = ""; - value = null; - while (reader.pos < end2) { - let tag2 = reader.uint32(); - switch (tag2 >>> 3) { - case 1: - key = reader.string(); - break; - case 2: - value = $root.vschema.Vindex.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag2 & 7); - break; - } - } - message.vindexes[key] = value; + message.replication_lag_seconds = reader.uint32(); break; } case 3: { - if (message.tables === $util.emptyObject) - message.tables = {}; - let end2 = reader.uint32() + reader.pos; - key = ""; - value = null; - while (reader.pos < end2) { - let tag2 = reader.uint32(); - switch (tag2 >>> 3) { - case 1: - key = reader.string(); - break; - case 2: - value = $root.vschema.Table.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag2 & 7); - break; - } - } - message.tables[key] = value; + message.binlog_players_count = reader.int32(); break; } case 4: { - message.require_explicit_routing = reader.bool(); + message.filtered_replication_lag_seconds = reader.int64(); break; } - case 85: { - message.cross_tablet = reader.bool(); + case 5: { + message.cpu_usage = reader.double(); break; } - case 86: { - message.attach_enable = reader.bool(); + case 6: { + message.qps = reader.double(); break; } - case 87: { - message.attach_to = reader.string(); + case 7: { + if (!(message.table_schema_changed && message.table_schema_changed.length)) + message.table_schema_changed = []; + message.table_schema_changed.push(reader.string()); + break; + } + case 8: { + if (!(message.view_schema_changed && message.view_schema_changed.length)) + message.view_schema_changed = []; + message.view_schema_changed.push(reader.string()); break; } default: @@ -94614,213 +95621,222 @@ export const vschema = $root.vschema = (() => { }; /** - * Decodes a Keyspace message from the specified reader or buffer, length delimited. + * Decodes a RealtimeStats message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vschema.Keyspace + * @memberof query.RealtimeStats * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vschema.Keyspace} Keyspace + * @returns {query.RealtimeStats} RealtimeStats * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Keyspace.decodeDelimited = function decodeDelimited(reader) { + RealtimeStats.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a Keyspace message. + * Verifies a RealtimeStats message. * @function verify - * @memberof vschema.Keyspace + * @memberof query.RealtimeStats * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - Keyspace.verify = function verify(message) { + RealtimeStats.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.sharded != null && message.hasOwnProperty("sharded")) - if (typeof message.sharded !== "boolean") - return "sharded: boolean expected"; - if (message.vindexes != null && message.hasOwnProperty("vindexes")) { - if (!$util.isObject(message.vindexes)) - return "vindexes: object expected"; - let key = Object.keys(message.vindexes); - for (let i = 0; i < key.length; ++i) { - let error = $root.vschema.Vindex.verify(message.vindexes[key[i]]); - if (error) - return "vindexes." + error; - } + if (message.health_error != null && message.hasOwnProperty("health_error")) + if (!$util.isString(message.health_error)) + return "health_error: string expected"; + if (message.replication_lag_seconds != null && message.hasOwnProperty("replication_lag_seconds")) + if (!$util.isInteger(message.replication_lag_seconds)) + return "replication_lag_seconds: integer expected"; + if (message.binlog_players_count != null && message.hasOwnProperty("binlog_players_count")) + if (!$util.isInteger(message.binlog_players_count)) + return "binlog_players_count: integer expected"; + if (message.filtered_replication_lag_seconds != null && message.hasOwnProperty("filtered_replication_lag_seconds")) + if (!$util.isInteger(message.filtered_replication_lag_seconds) && !(message.filtered_replication_lag_seconds && $util.isInteger(message.filtered_replication_lag_seconds.low) && $util.isInteger(message.filtered_replication_lag_seconds.high))) + return "filtered_replication_lag_seconds: integer|Long expected"; + if (message.cpu_usage != null && message.hasOwnProperty("cpu_usage")) + if (typeof message.cpu_usage !== "number") + return "cpu_usage: number expected"; + if (message.qps != null && message.hasOwnProperty("qps")) + if (typeof message.qps !== "number") + return "qps: number expected"; + if (message.table_schema_changed != null && message.hasOwnProperty("table_schema_changed")) { + if (!Array.isArray(message.table_schema_changed)) + return "table_schema_changed: array expected"; + for (let i = 0; i < message.table_schema_changed.length; ++i) + if (!$util.isString(message.table_schema_changed[i])) + return "table_schema_changed: string[] expected"; } - if (message.tables != null && message.hasOwnProperty("tables")) { - if (!$util.isObject(message.tables)) - return "tables: object expected"; - let key = Object.keys(message.tables); - for (let i = 0; i < key.length; ++i) { - let error = $root.vschema.Table.verify(message.tables[key[i]]); - if (error) - return "tables." + error; - } + if (message.view_schema_changed != null && message.hasOwnProperty("view_schema_changed")) { + if (!Array.isArray(message.view_schema_changed)) + return "view_schema_changed: array expected"; + for (let i = 0; i < message.view_schema_changed.length; ++i) + if (!$util.isString(message.view_schema_changed[i])) + return "view_schema_changed: string[] expected"; } - if (message.require_explicit_routing != null && message.hasOwnProperty("require_explicit_routing")) - if (typeof message.require_explicit_routing !== "boolean") - return "require_explicit_routing: boolean expected"; - if (message.cross_tablet != null && message.hasOwnProperty("cross_tablet")) - if (typeof message.cross_tablet !== "boolean") - return "cross_tablet: boolean expected"; - if (message.attach_enable != null && message.hasOwnProperty("attach_enable")) - if (typeof message.attach_enable !== "boolean") - return "attach_enable: boolean expected"; - if (message.attach_to != null && message.hasOwnProperty("attach_to")) - if (!$util.isString(message.attach_to)) - return "attach_to: string expected"; return null; }; /** - * Creates a Keyspace message from a plain object. Also converts values to their respective internal types. + * Creates a RealtimeStats message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vschema.Keyspace + * @memberof query.RealtimeStats * @static * @param {Object.} object Plain object - * @returns {vschema.Keyspace} Keyspace + * @returns {query.RealtimeStats} RealtimeStats */ - Keyspace.fromObject = function fromObject(object) { - if (object instanceof $root.vschema.Keyspace) + RealtimeStats.fromObject = function fromObject(object) { + if (object instanceof $root.query.RealtimeStats) return object; - let message = new $root.vschema.Keyspace(); - if (object.sharded != null) - message.sharded = Boolean(object.sharded); - if (object.vindexes) { - if (typeof object.vindexes !== "object") - throw TypeError(".vschema.Keyspace.vindexes: object expected"); - message.vindexes = {}; - for (let keys = Object.keys(object.vindexes), i = 0; i < keys.length; ++i) { - if (typeof object.vindexes[keys[i]] !== "object") - throw TypeError(".vschema.Keyspace.vindexes: object expected"); - message.vindexes[keys[i]] = $root.vschema.Vindex.fromObject(object.vindexes[keys[i]]); - } + let message = new $root.query.RealtimeStats(); + if (object.health_error != null) + message.health_error = String(object.health_error); + if (object.replication_lag_seconds != null) + message.replication_lag_seconds = object.replication_lag_seconds >>> 0; + if (object.binlog_players_count != null) + message.binlog_players_count = object.binlog_players_count | 0; + if (object.filtered_replication_lag_seconds != null) + if ($util.Long) + (message.filtered_replication_lag_seconds = $util.Long.fromValue(object.filtered_replication_lag_seconds)).unsigned = false; + else if (typeof object.filtered_replication_lag_seconds === "string") + message.filtered_replication_lag_seconds = parseInt(object.filtered_replication_lag_seconds, 10); + else if (typeof object.filtered_replication_lag_seconds === "number") + message.filtered_replication_lag_seconds = object.filtered_replication_lag_seconds; + else if (typeof object.filtered_replication_lag_seconds === "object") + message.filtered_replication_lag_seconds = new $util.LongBits(object.filtered_replication_lag_seconds.low >>> 0, object.filtered_replication_lag_seconds.high >>> 0).toNumber(); + if (object.cpu_usage != null) + message.cpu_usage = Number(object.cpu_usage); + if (object.qps != null) + message.qps = Number(object.qps); + if (object.table_schema_changed) { + if (!Array.isArray(object.table_schema_changed)) + throw TypeError(".query.RealtimeStats.table_schema_changed: array expected"); + message.table_schema_changed = []; + for (let i = 0; i < object.table_schema_changed.length; ++i) + message.table_schema_changed[i] = String(object.table_schema_changed[i]); } - if (object.tables) { - if (typeof object.tables !== "object") - throw TypeError(".vschema.Keyspace.tables: object expected"); - message.tables = {}; - for (let keys = Object.keys(object.tables), i = 0; i < keys.length; ++i) { - if (typeof object.tables[keys[i]] !== "object") - throw TypeError(".vschema.Keyspace.tables: object expected"); - message.tables[keys[i]] = $root.vschema.Table.fromObject(object.tables[keys[i]]); - } + if (object.view_schema_changed) { + if (!Array.isArray(object.view_schema_changed)) + throw TypeError(".query.RealtimeStats.view_schema_changed: array expected"); + message.view_schema_changed = []; + for (let i = 0; i < object.view_schema_changed.length; ++i) + message.view_schema_changed[i] = String(object.view_schema_changed[i]); } - if (object.require_explicit_routing != null) - message.require_explicit_routing = Boolean(object.require_explicit_routing); - if (object.cross_tablet != null) - message.cross_tablet = Boolean(object.cross_tablet); - if (object.attach_enable != null) - message.attach_enable = Boolean(object.attach_enable); - if (object.attach_to != null) - message.attach_to = String(object.attach_to); return message; }; /** - * Creates a plain object from a Keyspace message. Also converts values to other types if specified. + * Creates a plain object from a RealtimeStats message. Also converts values to other types if specified. * @function toObject - * @memberof vschema.Keyspace + * @memberof query.RealtimeStats * @static - * @param {vschema.Keyspace} message Keyspace + * @param {query.RealtimeStats} message RealtimeStats * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - Keyspace.toObject = function toObject(message, options) { + RealtimeStats.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.objects || options.defaults) { - object.vindexes = {}; - object.tables = {}; + if (options.arrays || options.defaults) { + object.table_schema_changed = []; + object.view_schema_changed = []; } if (options.defaults) { - object.sharded = false; - object.require_explicit_routing = false; - object.cross_tablet = false; - object.attach_enable = false; - object.attach_to = ""; + object.health_error = ""; + object.replication_lag_seconds = 0; + object.binlog_players_count = 0; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.filtered_replication_lag_seconds = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.filtered_replication_lag_seconds = options.longs === String ? "0" : 0; + object.cpu_usage = 0; + object.qps = 0; } - if (message.sharded != null && message.hasOwnProperty("sharded")) - object.sharded = message.sharded; - let keys2; - if (message.vindexes && (keys2 = Object.keys(message.vindexes)).length) { - object.vindexes = {}; - for (let j = 0; j < keys2.length; ++j) - object.vindexes[keys2[j]] = $root.vschema.Vindex.toObject(message.vindexes[keys2[j]], options); + if (message.health_error != null && message.hasOwnProperty("health_error")) + object.health_error = message.health_error; + if (message.replication_lag_seconds != null && message.hasOwnProperty("replication_lag_seconds")) + object.replication_lag_seconds = message.replication_lag_seconds; + if (message.binlog_players_count != null && message.hasOwnProperty("binlog_players_count")) + object.binlog_players_count = message.binlog_players_count; + if (message.filtered_replication_lag_seconds != null && message.hasOwnProperty("filtered_replication_lag_seconds")) + if (typeof message.filtered_replication_lag_seconds === "number") + object.filtered_replication_lag_seconds = options.longs === String ? String(message.filtered_replication_lag_seconds) : message.filtered_replication_lag_seconds; + else + object.filtered_replication_lag_seconds = options.longs === String ? $util.Long.prototype.toString.call(message.filtered_replication_lag_seconds) : options.longs === Number ? new $util.LongBits(message.filtered_replication_lag_seconds.low >>> 0, message.filtered_replication_lag_seconds.high >>> 0).toNumber() : message.filtered_replication_lag_seconds; + if (message.cpu_usage != null && message.hasOwnProperty("cpu_usage")) + object.cpu_usage = options.json && !isFinite(message.cpu_usage) ? String(message.cpu_usage) : message.cpu_usage; + if (message.qps != null && message.hasOwnProperty("qps")) + object.qps = options.json && !isFinite(message.qps) ? String(message.qps) : message.qps; + if (message.table_schema_changed && message.table_schema_changed.length) { + object.table_schema_changed = []; + for (let j = 0; j < message.table_schema_changed.length; ++j) + object.table_schema_changed[j] = message.table_schema_changed[j]; } - if (message.tables && (keys2 = Object.keys(message.tables)).length) { - object.tables = {}; - for (let j = 0; j < keys2.length; ++j) - object.tables[keys2[j]] = $root.vschema.Table.toObject(message.tables[keys2[j]], options); + if (message.view_schema_changed && message.view_schema_changed.length) { + object.view_schema_changed = []; + for (let j = 0; j < message.view_schema_changed.length; ++j) + object.view_schema_changed[j] = message.view_schema_changed[j]; } - if (message.require_explicit_routing != null && message.hasOwnProperty("require_explicit_routing")) - object.require_explicit_routing = message.require_explicit_routing; - if (message.cross_tablet != null && message.hasOwnProperty("cross_tablet")) - object.cross_tablet = message.cross_tablet; - if (message.attach_enable != null && message.hasOwnProperty("attach_enable")) - object.attach_enable = message.attach_enable; - if (message.attach_to != null && message.hasOwnProperty("attach_to")) - object.attach_to = message.attach_to; return object; }; /** - * Converts this Keyspace to JSON. + * Converts this RealtimeStats to JSON. * @function toJSON - * @memberof vschema.Keyspace + * @memberof query.RealtimeStats * @instance * @returns {Object.} JSON object */ - Keyspace.prototype.toJSON = function toJSON() { + RealtimeStats.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for Keyspace + * Gets the default type url for RealtimeStats * @function getTypeUrl - * @memberof vschema.Keyspace + * @memberof query.RealtimeStats * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - Keyspace.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + RealtimeStats.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vschema.Keyspace"; + return typeUrlPrefix + "/query.RealtimeStats"; }; - return Keyspace; + return RealtimeStats; })(); - vschema.Vindex = (function() { + query.AggregateStats = (function() { /** - * Properties of a Vindex. - * @memberof vschema - * @interface IVindex - * @property {string|null} [type] Vindex type - * @property {Object.|null} [params] Vindex params - * @property {string|null} [owner] Vindex owner + * Properties of an AggregateStats. + * @memberof query + * @interface IAggregateStats + * @property {number|null} [healthy_tablet_count] AggregateStats healthy_tablet_count + * @property {number|null} [unhealthy_tablet_count] AggregateStats unhealthy_tablet_count + * @property {number|null} [replication_lag_seconds_min] AggregateStats replication_lag_seconds_min + * @property {number|null} [replication_lag_seconds_max] AggregateStats replication_lag_seconds_max */ /** - * Constructs a new Vindex. - * @memberof vschema - * @classdesc Represents a Vindex. - * @implements IVindex + * Constructs a new AggregateStats. + * @memberof query + * @classdesc Represents an AggregateStats. + * @implements IAggregateStats * @constructor - * @param {vschema.IVindex=} [properties] Properties to set + * @param {query.IAggregateStats=} [properties] Properties to set */ - function Vindex(properties) { - this.params = {}; + function AggregateStats(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -94828,123 +95844,117 @@ export const vschema = $root.vschema = (() => { } /** - * Vindex type. - * @member {string} type - * @memberof vschema.Vindex + * AggregateStats healthy_tablet_count. + * @member {number} healthy_tablet_count + * @memberof query.AggregateStats * @instance */ - Vindex.prototype.type = ""; + AggregateStats.prototype.healthy_tablet_count = 0; /** - * Vindex params. - * @member {Object.} params - * @memberof vschema.Vindex + * AggregateStats unhealthy_tablet_count. + * @member {number} unhealthy_tablet_count + * @memberof query.AggregateStats * @instance */ - Vindex.prototype.params = $util.emptyObject; + AggregateStats.prototype.unhealthy_tablet_count = 0; /** - * Vindex owner. - * @member {string} owner - * @memberof vschema.Vindex + * AggregateStats replication_lag_seconds_min. + * @member {number} replication_lag_seconds_min + * @memberof query.AggregateStats * @instance */ - Vindex.prototype.owner = ""; + AggregateStats.prototype.replication_lag_seconds_min = 0; /** - * Creates a new Vindex instance using the specified properties. + * AggregateStats replication_lag_seconds_max. + * @member {number} replication_lag_seconds_max + * @memberof query.AggregateStats + * @instance + */ + AggregateStats.prototype.replication_lag_seconds_max = 0; + + /** + * Creates a new AggregateStats instance using the specified properties. * @function create - * @memberof vschema.Vindex + * @memberof query.AggregateStats * @static - * @param {vschema.IVindex=} [properties] Properties to set - * @returns {vschema.Vindex} Vindex instance + * @param {query.IAggregateStats=} [properties] Properties to set + * @returns {query.AggregateStats} AggregateStats instance */ - Vindex.create = function create(properties) { - return new Vindex(properties); + AggregateStats.create = function create(properties) { + return new AggregateStats(properties); }; /** - * Encodes the specified Vindex message. Does not implicitly {@link vschema.Vindex.verify|verify} messages. + * Encodes the specified AggregateStats message. Does not implicitly {@link query.AggregateStats.verify|verify} messages. * @function encode - * @memberof vschema.Vindex + * @memberof query.AggregateStats * @static - * @param {vschema.IVindex} message Vindex message or plain object to encode + * @param {query.IAggregateStats} message AggregateStats message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Vindex.encode = function encode(message, writer) { + AggregateStats.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.type != null && Object.hasOwnProperty.call(message, "type")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.type); - if (message.params != null && Object.hasOwnProperty.call(message, "params")) - for (let keys = Object.keys(message.params), i = 0; i < keys.length; ++i) - writer.uint32(/* id 2, wireType 2 =*/18).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 2 =*/18).string(message.params[keys[i]]).ldelim(); - if (message.owner != null && Object.hasOwnProperty.call(message, "owner")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.owner); + if (message.healthy_tablet_count != null && Object.hasOwnProperty.call(message, "healthy_tablet_count")) + writer.uint32(/* id 1, wireType 0 =*/8).int32(message.healthy_tablet_count); + if (message.unhealthy_tablet_count != null && Object.hasOwnProperty.call(message, "unhealthy_tablet_count")) + writer.uint32(/* id 2, wireType 0 =*/16).int32(message.unhealthy_tablet_count); + if (message.replication_lag_seconds_min != null && Object.hasOwnProperty.call(message, "replication_lag_seconds_min")) + writer.uint32(/* id 3, wireType 0 =*/24).uint32(message.replication_lag_seconds_min); + if (message.replication_lag_seconds_max != null && Object.hasOwnProperty.call(message, "replication_lag_seconds_max")) + writer.uint32(/* id 4, wireType 0 =*/32).uint32(message.replication_lag_seconds_max); return writer; }; /** - * Encodes the specified Vindex message, length delimited. Does not implicitly {@link vschema.Vindex.verify|verify} messages. + * Encodes the specified AggregateStats message, length delimited. Does not implicitly {@link query.AggregateStats.verify|verify} messages. * @function encodeDelimited - * @memberof vschema.Vindex + * @memberof query.AggregateStats * @static - * @param {vschema.IVindex} message Vindex message or plain object to encode + * @param {query.IAggregateStats} message AggregateStats message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Vindex.encodeDelimited = function encodeDelimited(message, writer) { + AggregateStats.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a Vindex message from the specified reader or buffer. + * Decodes an AggregateStats message from the specified reader or buffer. * @function decode - * @memberof vschema.Vindex + * @memberof query.AggregateStats * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vschema.Vindex} Vindex + * @returns {query.AggregateStats} AggregateStats * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Vindex.decode = function decode(reader, length) { + AggregateStats.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vschema.Vindex(), key, value; + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.AggregateStats(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.type = reader.string(); + message.healthy_tablet_count = reader.int32(); break; } case 2: { - if (message.params === $util.emptyObject) - message.params = {}; - let end2 = reader.uint32() + reader.pos; - key = ""; - value = ""; - while (reader.pos < end2) { - let tag2 = reader.uint32(); - switch (tag2 >>> 3) { - case 1: - key = reader.string(); - break; - case 2: - value = reader.string(); - break; - default: - reader.skipType(tag2 & 7); - break; - } - } - message.params[key] = value; + message.unhealthy_tablet_count = reader.int32(); break; } case 3: { - message.owner = reader.string(); + message.replication_lag_seconds_min = reader.uint32(); + break; + } + case 4: { + message.replication_lag_seconds_max = reader.uint32(); break; } default: @@ -94956,162 +95966,151 @@ export const vschema = $root.vschema = (() => { }; /** - * Decodes a Vindex message from the specified reader or buffer, length delimited. + * Decodes an AggregateStats message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vschema.Vindex + * @memberof query.AggregateStats * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vschema.Vindex} Vindex + * @returns {query.AggregateStats} AggregateStats * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Vindex.decodeDelimited = function decodeDelimited(reader) { + AggregateStats.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a Vindex message. + * Verifies an AggregateStats message. * @function verify - * @memberof vschema.Vindex + * @memberof query.AggregateStats * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - Vindex.verify = function verify(message) { + AggregateStats.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.type != null && message.hasOwnProperty("type")) - if (!$util.isString(message.type)) - return "type: string expected"; - if (message.params != null && message.hasOwnProperty("params")) { - if (!$util.isObject(message.params)) - return "params: object expected"; - let key = Object.keys(message.params); - for (let i = 0; i < key.length; ++i) - if (!$util.isString(message.params[key[i]])) - return "params: string{k:string} expected"; - } - if (message.owner != null && message.hasOwnProperty("owner")) - if (!$util.isString(message.owner)) - return "owner: string expected"; + if (message.healthy_tablet_count != null && message.hasOwnProperty("healthy_tablet_count")) + if (!$util.isInteger(message.healthy_tablet_count)) + return "healthy_tablet_count: integer expected"; + if (message.unhealthy_tablet_count != null && message.hasOwnProperty("unhealthy_tablet_count")) + if (!$util.isInteger(message.unhealthy_tablet_count)) + return "unhealthy_tablet_count: integer expected"; + if (message.replication_lag_seconds_min != null && message.hasOwnProperty("replication_lag_seconds_min")) + if (!$util.isInteger(message.replication_lag_seconds_min)) + return "replication_lag_seconds_min: integer expected"; + if (message.replication_lag_seconds_max != null && message.hasOwnProperty("replication_lag_seconds_max")) + if (!$util.isInteger(message.replication_lag_seconds_max)) + return "replication_lag_seconds_max: integer expected"; return null; }; /** - * Creates a Vindex message from a plain object. Also converts values to their respective internal types. + * Creates an AggregateStats message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vschema.Vindex + * @memberof query.AggregateStats * @static * @param {Object.} object Plain object - * @returns {vschema.Vindex} Vindex + * @returns {query.AggregateStats} AggregateStats */ - Vindex.fromObject = function fromObject(object) { - if (object instanceof $root.vschema.Vindex) + AggregateStats.fromObject = function fromObject(object) { + if (object instanceof $root.query.AggregateStats) return object; - let message = new $root.vschema.Vindex(); - if (object.type != null) - message.type = String(object.type); - if (object.params) { - if (typeof object.params !== "object") - throw TypeError(".vschema.Vindex.params: object expected"); - message.params = {}; - for (let keys = Object.keys(object.params), i = 0; i < keys.length; ++i) - message.params[keys[i]] = String(object.params[keys[i]]); - } - if (object.owner != null) - message.owner = String(object.owner); + let message = new $root.query.AggregateStats(); + if (object.healthy_tablet_count != null) + message.healthy_tablet_count = object.healthy_tablet_count | 0; + if (object.unhealthy_tablet_count != null) + message.unhealthy_tablet_count = object.unhealthy_tablet_count | 0; + if (object.replication_lag_seconds_min != null) + message.replication_lag_seconds_min = object.replication_lag_seconds_min >>> 0; + if (object.replication_lag_seconds_max != null) + message.replication_lag_seconds_max = object.replication_lag_seconds_max >>> 0; return message; }; /** - * Creates a plain object from a Vindex message. Also converts values to other types if specified. + * Creates a plain object from an AggregateStats message. Also converts values to other types if specified. * @function toObject - * @memberof vschema.Vindex + * @memberof query.AggregateStats * @static - * @param {vschema.Vindex} message Vindex + * @param {query.AggregateStats} message AggregateStats * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - Vindex.toObject = function toObject(message, options) { + AggregateStats.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.objects || options.defaults) - object.params = {}; if (options.defaults) { - object.type = ""; - object.owner = ""; - } - if (message.type != null && message.hasOwnProperty("type")) - object.type = message.type; - let keys2; - if (message.params && (keys2 = Object.keys(message.params)).length) { - object.params = {}; - for (let j = 0; j < keys2.length; ++j) - object.params[keys2[j]] = message.params[keys2[j]]; + object.healthy_tablet_count = 0; + object.unhealthy_tablet_count = 0; + object.replication_lag_seconds_min = 0; + object.replication_lag_seconds_max = 0; } - if (message.owner != null && message.hasOwnProperty("owner")) - object.owner = message.owner; + if (message.healthy_tablet_count != null && message.hasOwnProperty("healthy_tablet_count")) + object.healthy_tablet_count = message.healthy_tablet_count; + if (message.unhealthy_tablet_count != null && message.hasOwnProperty("unhealthy_tablet_count")) + object.unhealthy_tablet_count = message.unhealthy_tablet_count; + if (message.replication_lag_seconds_min != null && message.hasOwnProperty("replication_lag_seconds_min")) + object.replication_lag_seconds_min = message.replication_lag_seconds_min; + if (message.replication_lag_seconds_max != null && message.hasOwnProperty("replication_lag_seconds_max")) + object.replication_lag_seconds_max = message.replication_lag_seconds_max; return object; }; /** - * Converts this Vindex to JSON. + * Converts this AggregateStats to JSON. * @function toJSON - * @memberof vschema.Vindex + * @memberof query.AggregateStats * @instance * @returns {Object.} JSON object */ - Vindex.prototype.toJSON = function toJSON() { + AggregateStats.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for Vindex + * Gets the default type url for AggregateStats * @function getTypeUrl - * @memberof vschema.Vindex + * @memberof query.AggregateStats * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - Vindex.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + AggregateStats.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vschema.Vindex"; + return typeUrlPrefix + "/query.AggregateStats"; }; - return Vindex; + return AggregateStats; })(); - vschema.Table = (function() { + query.StreamHealthResponse = (function() { /** - * Properties of a Table. - * @memberof vschema - * @interface ITable - * @property {string|null} [type] Table type - * @property {Array.|null} [column_vindexes] Table column_vindexes - * @property {vschema.IAutoIncrement|null} [auto_increment] Table auto_increment - * @property {Array.|null} [columns] Table columns - * @property {string|null} [pinned] Table pinned - * @property {boolean|null} [column_list_authoritative] Table column_list_authoritative - * @property {string|null} [source] Table source + * Properties of a StreamHealthResponse. + * @memberof query + * @interface IStreamHealthResponse + * @property {query.ITarget|null} [target] StreamHealthResponse target + * @property {boolean|null} [serving] StreamHealthResponse serving + * @property {number|Long|null} [primary_term_start_timestamp] StreamHealthResponse primary_term_start_timestamp + * @property {query.IRealtimeStats|null} [realtime_stats] StreamHealthResponse realtime_stats + * @property {topodata.ITabletAlias|null} [tablet_alias] StreamHealthResponse tablet_alias */ /** - * Constructs a new Table. - * @memberof vschema - * @classdesc Represents a Table. - * @implements ITable + * Constructs a new StreamHealthResponse. + * @memberof query + * @classdesc Represents a StreamHealthResponse. + * @implements IStreamHealthResponse * @constructor - * @param {vschema.ITable=} [properties] Properties to set + * @param {query.IStreamHealthResponse=} [properties] Properties to set */ - function Table(properties) { - this.column_vindexes = []; - this.columns = []; + function StreamHealthResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -95119,165 +96118,131 @@ export const vschema = $root.vschema = (() => { } /** - * Table type. - * @member {string} type - * @memberof vschema.Table - * @instance - */ - Table.prototype.type = ""; - - /** - * Table column_vindexes. - * @member {Array.} column_vindexes - * @memberof vschema.Table - * @instance - */ - Table.prototype.column_vindexes = $util.emptyArray; - - /** - * Table auto_increment. - * @member {vschema.IAutoIncrement|null|undefined} auto_increment - * @memberof vschema.Table + * StreamHealthResponse target. + * @member {query.ITarget|null|undefined} target + * @memberof query.StreamHealthResponse * @instance */ - Table.prototype.auto_increment = null; + StreamHealthResponse.prototype.target = null; /** - * Table columns. - * @member {Array.} columns - * @memberof vschema.Table + * StreamHealthResponse serving. + * @member {boolean} serving + * @memberof query.StreamHealthResponse * @instance */ - Table.prototype.columns = $util.emptyArray; + StreamHealthResponse.prototype.serving = false; /** - * Table pinned. - * @member {string} pinned - * @memberof vschema.Table + * StreamHealthResponse primary_term_start_timestamp. + * @member {number|Long} primary_term_start_timestamp + * @memberof query.StreamHealthResponse * @instance */ - Table.prototype.pinned = ""; + StreamHealthResponse.prototype.primary_term_start_timestamp = $util.Long ? $util.Long.fromBits(0,0,false) : 0; /** - * Table column_list_authoritative. - * @member {boolean} column_list_authoritative - * @memberof vschema.Table + * StreamHealthResponse realtime_stats. + * @member {query.IRealtimeStats|null|undefined} realtime_stats + * @memberof query.StreamHealthResponse * @instance */ - Table.prototype.column_list_authoritative = false; + StreamHealthResponse.prototype.realtime_stats = null; /** - * Table source. - * @member {string} source - * @memberof vschema.Table + * StreamHealthResponse tablet_alias. + * @member {topodata.ITabletAlias|null|undefined} tablet_alias + * @memberof query.StreamHealthResponse * @instance */ - Table.prototype.source = ""; + StreamHealthResponse.prototype.tablet_alias = null; /** - * Creates a new Table instance using the specified properties. + * Creates a new StreamHealthResponse instance using the specified properties. * @function create - * @memberof vschema.Table + * @memberof query.StreamHealthResponse * @static - * @param {vschema.ITable=} [properties] Properties to set - * @returns {vschema.Table} Table instance + * @param {query.IStreamHealthResponse=} [properties] Properties to set + * @returns {query.StreamHealthResponse} StreamHealthResponse instance */ - Table.create = function create(properties) { - return new Table(properties); + StreamHealthResponse.create = function create(properties) { + return new StreamHealthResponse(properties); }; /** - * Encodes the specified Table message. Does not implicitly {@link vschema.Table.verify|verify} messages. + * Encodes the specified StreamHealthResponse message. Does not implicitly {@link query.StreamHealthResponse.verify|verify} messages. * @function encode - * @memberof vschema.Table + * @memberof query.StreamHealthResponse * @static - * @param {vschema.ITable} message Table message or plain object to encode + * @param {query.IStreamHealthResponse} message StreamHealthResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Table.encode = function encode(message, writer) { + StreamHealthResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.type != null && Object.hasOwnProperty.call(message, "type")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.type); - if (message.column_vindexes != null && message.column_vindexes.length) - for (let i = 0; i < message.column_vindexes.length; ++i) - $root.vschema.ColumnVindex.encode(message.column_vindexes[i], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.auto_increment != null && Object.hasOwnProperty.call(message, "auto_increment")) - $root.vschema.AutoIncrement.encode(message.auto_increment, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.columns != null && message.columns.length) - for (let i = 0; i < message.columns.length; ++i) - $root.vschema.Column.encode(message.columns[i], writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); - if (message.pinned != null && Object.hasOwnProperty.call(message, "pinned")) - writer.uint32(/* id 5, wireType 2 =*/42).string(message.pinned); - if (message.column_list_authoritative != null && Object.hasOwnProperty.call(message, "column_list_authoritative")) - writer.uint32(/* id 6, wireType 0 =*/48).bool(message.column_list_authoritative); - if (message.source != null && Object.hasOwnProperty.call(message, "source")) - writer.uint32(/* id 7, wireType 2 =*/58).string(message.source); + if (message.target != null && Object.hasOwnProperty.call(message, "target")) + $root.query.Target.encode(message.target, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.serving != null && Object.hasOwnProperty.call(message, "serving")) + writer.uint32(/* id 2, wireType 0 =*/16).bool(message.serving); + if (message.primary_term_start_timestamp != null && Object.hasOwnProperty.call(message, "primary_term_start_timestamp")) + writer.uint32(/* id 3, wireType 0 =*/24).int64(message.primary_term_start_timestamp); + if (message.realtime_stats != null && Object.hasOwnProperty.call(message, "realtime_stats")) + $root.query.RealtimeStats.encode(message.realtime_stats, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) + $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); return writer; }; /** - * Encodes the specified Table message, length delimited. Does not implicitly {@link vschema.Table.verify|verify} messages. + * Encodes the specified StreamHealthResponse message, length delimited. Does not implicitly {@link query.StreamHealthResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vschema.Table + * @memberof query.StreamHealthResponse * @static - * @param {vschema.ITable} message Table message or plain object to encode + * @param {query.IStreamHealthResponse} message StreamHealthResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Table.encodeDelimited = function encodeDelimited(message, writer) { + StreamHealthResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a Table message from the specified reader or buffer. + * Decodes a StreamHealthResponse message from the specified reader or buffer. * @function decode - * @memberof vschema.Table + * @memberof query.StreamHealthResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vschema.Table} Table + * @returns {query.StreamHealthResponse} StreamHealthResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Table.decode = function decode(reader, length) { + StreamHealthResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vschema.Table(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.StreamHealthResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.type = reader.string(); + message.target = $root.query.Target.decode(reader, reader.uint32()); break; } case 2: { - if (!(message.column_vindexes && message.column_vindexes.length)) - message.column_vindexes = []; - message.column_vindexes.push($root.vschema.ColumnVindex.decode(reader, reader.uint32())); + message.serving = reader.bool(); break; } case 3: { - message.auto_increment = $root.vschema.AutoIncrement.decode(reader, reader.uint32()); + message.primary_term_start_timestamp = reader.int64(); break; } case 4: { - if (!(message.columns && message.columns.length)) - message.columns = []; - message.columns.push($root.vschema.Column.decode(reader, reader.uint32())); + message.realtime_stats = $root.query.RealtimeStats.decode(reader, reader.uint32()); break; } case 5: { - message.pinned = reader.string(); - break; - } - case 6: { - message.column_list_authoritative = reader.bool(); - break; - } - case 7: { - message.source = reader.string(); + message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); break; } default: @@ -95289,215 +96254,206 @@ export const vschema = $root.vschema = (() => { }; /** - * Decodes a Table message from the specified reader or buffer, length delimited. + * Decodes a StreamHealthResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vschema.Table + * @memberof query.StreamHealthResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vschema.Table} Table + * @returns {query.StreamHealthResponse} StreamHealthResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Table.decodeDelimited = function decodeDelimited(reader) { + StreamHealthResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a Table message. + * Verifies a StreamHealthResponse message. * @function verify - * @memberof vschema.Table + * @memberof query.StreamHealthResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - Table.verify = function verify(message) { + StreamHealthResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.type != null && message.hasOwnProperty("type")) - if (!$util.isString(message.type)) - return "type: string expected"; - if (message.column_vindexes != null && message.hasOwnProperty("column_vindexes")) { - if (!Array.isArray(message.column_vindexes)) - return "column_vindexes: array expected"; - for (let i = 0; i < message.column_vindexes.length; ++i) { - let error = $root.vschema.ColumnVindex.verify(message.column_vindexes[i]); - if (error) - return "column_vindexes." + error; - } + if (message.target != null && message.hasOwnProperty("target")) { + let error = $root.query.Target.verify(message.target); + if (error) + return "target." + error; } - if (message.auto_increment != null && message.hasOwnProperty("auto_increment")) { - let error = $root.vschema.AutoIncrement.verify(message.auto_increment); + if (message.serving != null && message.hasOwnProperty("serving")) + if (typeof message.serving !== "boolean") + return "serving: boolean expected"; + if (message.primary_term_start_timestamp != null && message.hasOwnProperty("primary_term_start_timestamp")) + if (!$util.isInteger(message.primary_term_start_timestamp) && !(message.primary_term_start_timestamp && $util.isInteger(message.primary_term_start_timestamp.low) && $util.isInteger(message.primary_term_start_timestamp.high))) + return "primary_term_start_timestamp: integer|Long expected"; + if (message.realtime_stats != null && message.hasOwnProperty("realtime_stats")) { + let error = $root.query.RealtimeStats.verify(message.realtime_stats); if (error) - return "auto_increment." + error; + return "realtime_stats." + error; } - if (message.columns != null && message.hasOwnProperty("columns")) { - if (!Array.isArray(message.columns)) - return "columns: array expected"; - for (let i = 0; i < message.columns.length; ++i) { - let error = $root.vschema.Column.verify(message.columns[i]); - if (error) - return "columns." + error; - } + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { + let error = $root.topodata.TabletAlias.verify(message.tablet_alias); + if (error) + return "tablet_alias." + error; } - if (message.pinned != null && message.hasOwnProperty("pinned")) - if (!$util.isString(message.pinned)) - return "pinned: string expected"; - if (message.column_list_authoritative != null && message.hasOwnProperty("column_list_authoritative")) - if (typeof message.column_list_authoritative !== "boolean") - return "column_list_authoritative: boolean expected"; - if (message.source != null && message.hasOwnProperty("source")) - if (!$util.isString(message.source)) - return "source: string expected"; return null; }; /** - * Creates a Table message from a plain object. Also converts values to their respective internal types. + * Creates a StreamHealthResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vschema.Table + * @memberof query.StreamHealthResponse * @static * @param {Object.} object Plain object - * @returns {vschema.Table} Table + * @returns {query.StreamHealthResponse} StreamHealthResponse */ - Table.fromObject = function fromObject(object) { - if (object instanceof $root.vschema.Table) + StreamHealthResponse.fromObject = function fromObject(object) { + if (object instanceof $root.query.StreamHealthResponse) return object; - let message = new $root.vschema.Table(); - if (object.type != null) - message.type = String(object.type); - if (object.column_vindexes) { - if (!Array.isArray(object.column_vindexes)) - throw TypeError(".vschema.Table.column_vindexes: array expected"); - message.column_vindexes = []; - for (let i = 0; i < object.column_vindexes.length; ++i) { - if (typeof object.column_vindexes[i] !== "object") - throw TypeError(".vschema.Table.column_vindexes: object expected"); - message.column_vindexes[i] = $root.vschema.ColumnVindex.fromObject(object.column_vindexes[i]); - } + let message = new $root.query.StreamHealthResponse(); + if (object.target != null) { + if (typeof object.target !== "object") + throw TypeError(".query.StreamHealthResponse.target: object expected"); + message.target = $root.query.Target.fromObject(object.target); } - if (object.auto_increment != null) { - if (typeof object.auto_increment !== "object") - throw TypeError(".vschema.Table.auto_increment: object expected"); - message.auto_increment = $root.vschema.AutoIncrement.fromObject(object.auto_increment); + if (object.serving != null) + message.serving = Boolean(object.serving); + if (object.primary_term_start_timestamp != null) + if ($util.Long) + (message.primary_term_start_timestamp = $util.Long.fromValue(object.primary_term_start_timestamp)).unsigned = false; + else if (typeof object.primary_term_start_timestamp === "string") + message.primary_term_start_timestamp = parseInt(object.primary_term_start_timestamp, 10); + else if (typeof object.primary_term_start_timestamp === "number") + message.primary_term_start_timestamp = object.primary_term_start_timestamp; + else if (typeof object.primary_term_start_timestamp === "object") + message.primary_term_start_timestamp = new $util.LongBits(object.primary_term_start_timestamp.low >>> 0, object.primary_term_start_timestamp.high >>> 0).toNumber(); + if (object.realtime_stats != null) { + if (typeof object.realtime_stats !== "object") + throw TypeError(".query.StreamHealthResponse.realtime_stats: object expected"); + message.realtime_stats = $root.query.RealtimeStats.fromObject(object.realtime_stats); } - if (object.columns) { - if (!Array.isArray(object.columns)) - throw TypeError(".vschema.Table.columns: array expected"); - message.columns = []; - for (let i = 0; i < object.columns.length; ++i) { - if (typeof object.columns[i] !== "object") - throw TypeError(".vschema.Table.columns: object expected"); - message.columns[i] = $root.vschema.Column.fromObject(object.columns[i]); - } + if (object.tablet_alias != null) { + if (typeof object.tablet_alias !== "object") + throw TypeError(".query.StreamHealthResponse.tablet_alias: object expected"); + message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); } - if (object.pinned != null) - message.pinned = String(object.pinned); - if (object.column_list_authoritative != null) - message.column_list_authoritative = Boolean(object.column_list_authoritative); - if (object.source != null) - message.source = String(object.source); return message; }; /** - * Creates a plain object from a Table message. Also converts values to other types if specified. + * Creates a plain object from a StreamHealthResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vschema.Table + * @memberof query.StreamHealthResponse * @static - * @param {vschema.Table} message Table + * @param {query.StreamHealthResponse} message StreamHealthResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - Table.toObject = function toObject(message, options) { + StreamHealthResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) { - object.column_vindexes = []; - object.columns = []; - } if (options.defaults) { - object.type = ""; - object.auto_increment = null; - object.pinned = ""; - object.column_list_authoritative = false; - object.source = ""; - } - if (message.type != null && message.hasOwnProperty("type")) - object.type = message.type; - if (message.column_vindexes && message.column_vindexes.length) { - object.column_vindexes = []; - for (let j = 0; j < message.column_vindexes.length; ++j) - object.column_vindexes[j] = $root.vschema.ColumnVindex.toObject(message.column_vindexes[j], options); - } - if (message.auto_increment != null && message.hasOwnProperty("auto_increment")) - object.auto_increment = $root.vschema.AutoIncrement.toObject(message.auto_increment, options); - if (message.columns && message.columns.length) { - object.columns = []; - for (let j = 0; j < message.columns.length; ++j) - object.columns[j] = $root.vschema.Column.toObject(message.columns[j], options); + object.target = null; + object.serving = false; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.primary_term_start_timestamp = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.primary_term_start_timestamp = options.longs === String ? "0" : 0; + object.realtime_stats = null; + object.tablet_alias = null; } - if (message.pinned != null && message.hasOwnProperty("pinned")) - object.pinned = message.pinned; - if (message.column_list_authoritative != null && message.hasOwnProperty("column_list_authoritative")) - object.column_list_authoritative = message.column_list_authoritative; - if (message.source != null && message.hasOwnProperty("source")) - object.source = message.source; + if (message.target != null && message.hasOwnProperty("target")) + object.target = $root.query.Target.toObject(message.target, options); + if (message.serving != null && message.hasOwnProperty("serving")) + object.serving = message.serving; + if (message.primary_term_start_timestamp != null && message.hasOwnProperty("primary_term_start_timestamp")) + if (typeof message.primary_term_start_timestamp === "number") + object.primary_term_start_timestamp = options.longs === String ? String(message.primary_term_start_timestamp) : message.primary_term_start_timestamp; + else + object.primary_term_start_timestamp = options.longs === String ? $util.Long.prototype.toString.call(message.primary_term_start_timestamp) : options.longs === Number ? new $util.LongBits(message.primary_term_start_timestamp.low >>> 0, message.primary_term_start_timestamp.high >>> 0).toNumber() : message.primary_term_start_timestamp; + if (message.realtime_stats != null && message.hasOwnProperty("realtime_stats")) + object.realtime_stats = $root.query.RealtimeStats.toObject(message.realtime_stats, options); + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) + object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); return object; }; /** - * Converts this Table to JSON. + * Converts this StreamHealthResponse to JSON. * @function toJSON - * @memberof vschema.Table + * @memberof query.StreamHealthResponse * @instance * @returns {Object.} JSON object */ - Table.prototype.toJSON = function toJSON() { + StreamHealthResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for Table + * Gets the default type url for StreamHealthResponse * @function getTypeUrl - * @memberof vschema.Table + * @memberof query.StreamHealthResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - Table.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + StreamHealthResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vschema.Table"; + return typeUrlPrefix + "/query.StreamHealthResponse"; }; - return Table; + return StreamHealthResponse; })(); - vschema.ColumnVindex = (function() { + /** + * TransactionState enum. + * @name query.TransactionState + * @enum {number} + * @property {number} UNKNOWN=0 UNKNOWN value + * @property {number} PREPARE=1 PREPARE value + * @property {number} COMMIT=2 COMMIT value + * @property {number} ROLLBACK=3 ROLLBACK value + */ + query.TransactionState = (function() { + const valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "UNKNOWN"] = 0; + values[valuesById[1] = "PREPARE"] = 1; + values[valuesById[2] = "COMMIT"] = 2; + values[valuesById[3] = "ROLLBACK"] = 3; + return values; + })(); + + query.TransactionMetadata = (function() { /** - * Properties of a ColumnVindex. - * @memberof vschema - * @interface IColumnVindex - * @property {string|null} [column] ColumnVindex column - * @property {string|null} [name] ColumnVindex name - * @property {Array.|null} [columns] ColumnVindex columns + * Properties of a TransactionMetadata. + * @memberof query + * @interface ITransactionMetadata + * @property {string|null} [dtid] TransactionMetadata dtid + * @property {query.TransactionState|null} [state] TransactionMetadata state + * @property {number|Long|null} [time_created] TransactionMetadata time_created + * @property {Array.|null} [participants] TransactionMetadata participants */ /** - * Constructs a new ColumnVindex. - * @memberof vschema - * @classdesc Represents a ColumnVindex. - * @implements IColumnVindex + * Constructs a new TransactionMetadata. + * @memberof query + * @classdesc Represents a TransactionMetadata. + * @implements ITransactionMetadata * @constructor - * @param {vschema.IColumnVindex=} [properties] Properties to set + * @param {query.ITransactionMetadata=} [properties] Properties to set */ - function ColumnVindex(properties) { - this.columns = []; + function TransactionMetadata(properties) { + this.participants = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -95505,106 +96461,120 @@ export const vschema = $root.vschema = (() => { } /** - * ColumnVindex column. - * @member {string} column - * @memberof vschema.ColumnVindex + * TransactionMetadata dtid. + * @member {string} dtid + * @memberof query.TransactionMetadata * @instance */ - ColumnVindex.prototype.column = ""; + TransactionMetadata.prototype.dtid = ""; /** - * ColumnVindex name. - * @member {string} name - * @memberof vschema.ColumnVindex + * TransactionMetadata state. + * @member {query.TransactionState} state + * @memberof query.TransactionMetadata * @instance */ - ColumnVindex.prototype.name = ""; + TransactionMetadata.prototype.state = 0; /** - * ColumnVindex columns. - * @member {Array.} columns - * @memberof vschema.ColumnVindex + * TransactionMetadata time_created. + * @member {number|Long} time_created + * @memberof query.TransactionMetadata * @instance */ - ColumnVindex.prototype.columns = $util.emptyArray; + TransactionMetadata.prototype.time_created = $util.Long ? $util.Long.fromBits(0,0,false) : 0; /** - * Creates a new ColumnVindex instance using the specified properties. + * TransactionMetadata participants. + * @member {Array.} participants + * @memberof query.TransactionMetadata + * @instance + */ + TransactionMetadata.prototype.participants = $util.emptyArray; + + /** + * Creates a new TransactionMetadata instance using the specified properties. * @function create - * @memberof vschema.ColumnVindex + * @memberof query.TransactionMetadata * @static - * @param {vschema.IColumnVindex=} [properties] Properties to set - * @returns {vschema.ColumnVindex} ColumnVindex instance + * @param {query.ITransactionMetadata=} [properties] Properties to set + * @returns {query.TransactionMetadata} TransactionMetadata instance */ - ColumnVindex.create = function create(properties) { - return new ColumnVindex(properties); + TransactionMetadata.create = function create(properties) { + return new TransactionMetadata(properties); }; /** - * Encodes the specified ColumnVindex message. Does not implicitly {@link vschema.ColumnVindex.verify|verify} messages. + * Encodes the specified TransactionMetadata message. Does not implicitly {@link query.TransactionMetadata.verify|verify} messages. * @function encode - * @memberof vschema.ColumnVindex + * @memberof query.TransactionMetadata * @static - * @param {vschema.IColumnVindex} message ColumnVindex message or plain object to encode + * @param {query.ITransactionMetadata} message TransactionMetadata message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ColumnVindex.encode = function encode(message, writer) { + TransactionMetadata.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.column != null && Object.hasOwnProperty.call(message, "column")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.column); - if (message.name != null && Object.hasOwnProperty.call(message, "name")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.name); - if (message.columns != null && message.columns.length) - for (let i = 0; i < message.columns.length; ++i) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.columns[i]); + if (message.dtid != null && Object.hasOwnProperty.call(message, "dtid")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.dtid); + if (message.state != null && Object.hasOwnProperty.call(message, "state")) + writer.uint32(/* id 2, wireType 0 =*/16).int32(message.state); + if (message.time_created != null && Object.hasOwnProperty.call(message, "time_created")) + writer.uint32(/* id 3, wireType 0 =*/24).int64(message.time_created); + if (message.participants != null && message.participants.length) + for (let i = 0; i < message.participants.length; ++i) + $root.query.Target.encode(message.participants[i], writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); return writer; }; /** - * Encodes the specified ColumnVindex message, length delimited. Does not implicitly {@link vschema.ColumnVindex.verify|verify} messages. + * Encodes the specified TransactionMetadata message, length delimited. Does not implicitly {@link query.TransactionMetadata.verify|verify} messages. * @function encodeDelimited - * @memberof vschema.ColumnVindex + * @memberof query.TransactionMetadata * @static - * @param {vschema.IColumnVindex} message ColumnVindex message or plain object to encode + * @param {query.ITransactionMetadata} message TransactionMetadata message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ColumnVindex.encodeDelimited = function encodeDelimited(message, writer) { + TransactionMetadata.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ColumnVindex message from the specified reader or buffer. + * Decodes a TransactionMetadata message from the specified reader or buffer. * @function decode - * @memberof vschema.ColumnVindex + * @memberof query.TransactionMetadata * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vschema.ColumnVindex} ColumnVindex + * @returns {query.TransactionMetadata} TransactionMetadata * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ColumnVindex.decode = function decode(reader, length) { + TransactionMetadata.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vschema.ColumnVindex(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.TransactionMetadata(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.column = reader.string(); + message.dtid = reader.string(); break; } case 2: { - message.name = reader.string(); + message.state = reader.int32(); break; } case 3: { - if (!(message.columns && message.columns.length)) - message.columns = []; - message.columns.push(reader.string()); + message.time_created = reader.int64(); + break; + } + case 4: { + if (!(message.participants && message.participants.length)) + message.participants = []; + message.participants.push($root.query.Target.decode(reader, reader.uint32())); break; } default: @@ -95616,153 +96586,227 @@ export const vschema = $root.vschema = (() => { }; /** - * Decodes a ColumnVindex message from the specified reader or buffer, length delimited. + * Decodes a TransactionMetadata message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vschema.ColumnVindex + * @memberof query.TransactionMetadata * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vschema.ColumnVindex} ColumnVindex + * @returns {query.TransactionMetadata} TransactionMetadata * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ColumnVindex.decodeDelimited = function decodeDelimited(reader) { + TransactionMetadata.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ColumnVindex message. + * Verifies a TransactionMetadata message. * @function verify - * @memberof vschema.ColumnVindex + * @memberof query.TransactionMetadata * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ColumnVindex.verify = function verify(message) { + TransactionMetadata.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.column != null && message.hasOwnProperty("column")) - if (!$util.isString(message.column)) - return "column: string expected"; - if (message.name != null && message.hasOwnProperty("name")) - if (!$util.isString(message.name)) - return "name: string expected"; - if (message.columns != null && message.hasOwnProperty("columns")) { - if (!Array.isArray(message.columns)) - return "columns: array expected"; - for (let i = 0; i < message.columns.length; ++i) - if (!$util.isString(message.columns[i])) - return "columns: string[] expected"; + if (message.dtid != null && message.hasOwnProperty("dtid")) + if (!$util.isString(message.dtid)) + return "dtid: string expected"; + if (message.state != null && message.hasOwnProperty("state")) + switch (message.state) { + default: + return "state: enum value expected"; + case 0: + case 1: + case 2: + case 3: + break; + } + if (message.time_created != null && message.hasOwnProperty("time_created")) + if (!$util.isInteger(message.time_created) && !(message.time_created && $util.isInteger(message.time_created.low) && $util.isInteger(message.time_created.high))) + return "time_created: integer|Long expected"; + if (message.participants != null && message.hasOwnProperty("participants")) { + if (!Array.isArray(message.participants)) + return "participants: array expected"; + for (let i = 0; i < message.participants.length; ++i) { + let error = $root.query.Target.verify(message.participants[i]); + if (error) + return "participants." + error; + } } return null; }; /** - * Creates a ColumnVindex message from a plain object. Also converts values to their respective internal types. + * Creates a TransactionMetadata message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vschema.ColumnVindex + * @memberof query.TransactionMetadata * @static * @param {Object.} object Plain object - * @returns {vschema.ColumnVindex} ColumnVindex + * @returns {query.TransactionMetadata} TransactionMetadata */ - ColumnVindex.fromObject = function fromObject(object) { - if (object instanceof $root.vschema.ColumnVindex) + TransactionMetadata.fromObject = function fromObject(object) { + if (object instanceof $root.query.TransactionMetadata) return object; - let message = new $root.vschema.ColumnVindex(); - if (object.column != null) - message.column = String(object.column); - if (object.name != null) - message.name = String(object.name); - if (object.columns) { - if (!Array.isArray(object.columns)) - throw TypeError(".vschema.ColumnVindex.columns: array expected"); - message.columns = []; - for (let i = 0; i < object.columns.length; ++i) - message.columns[i] = String(object.columns[i]); + let message = new $root.query.TransactionMetadata(); + if (object.dtid != null) + message.dtid = String(object.dtid); + switch (object.state) { + default: + if (typeof object.state === "number") { + message.state = object.state; + break; + } + break; + case "UNKNOWN": + case 0: + message.state = 0; + break; + case "PREPARE": + case 1: + message.state = 1; + break; + case "COMMIT": + case 2: + message.state = 2; + break; + case "ROLLBACK": + case 3: + message.state = 3; + break; + } + if (object.time_created != null) + if ($util.Long) + (message.time_created = $util.Long.fromValue(object.time_created)).unsigned = false; + else if (typeof object.time_created === "string") + message.time_created = parseInt(object.time_created, 10); + else if (typeof object.time_created === "number") + message.time_created = object.time_created; + else if (typeof object.time_created === "object") + message.time_created = new $util.LongBits(object.time_created.low >>> 0, object.time_created.high >>> 0).toNumber(); + if (object.participants) { + if (!Array.isArray(object.participants)) + throw TypeError(".query.TransactionMetadata.participants: array expected"); + message.participants = []; + for (let i = 0; i < object.participants.length; ++i) { + if (typeof object.participants[i] !== "object") + throw TypeError(".query.TransactionMetadata.participants: object expected"); + message.participants[i] = $root.query.Target.fromObject(object.participants[i]); + } } return message; }; /** - * Creates a plain object from a ColumnVindex message. Also converts values to other types if specified. + * Creates a plain object from a TransactionMetadata message. Also converts values to other types if specified. * @function toObject - * @memberof vschema.ColumnVindex + * @memberof query.TransactionMetadata * @static - * @param {vschema.ColumnVindex} message ColumnVindex + * @param {query.TransactionMetadata} message TransactionMetadata * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ColumnVindex.toObject = function toObject(message, options) { + TransactionMetadata.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.arrays || options.defaults) - object.columns = []; + object.participants = []; if (options.defaults) { - object.column = ""; - object.name = ""; + object.dtid = ""; + object.state = options.enums === String ? "UNKNOWN" : 0; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.time_created = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.time_created = options.longs === String ? "0" : 0; } - if (message.column != null && message.hasOwnProperty("column")) - object.column = message.column; - if (message.name != null && message.hasOwnProperty("name")) - object.name = message.name; - if (message.columns && message.columns.length) { - object.columns = []; - for (let j = 0; j < message.columns.length; ++j) - object.columns[j] = message.columns[j]; + if (message.dtid != null && message.hasOwnProperty("dtid")) + object.dtid = message.dtid; + if (message.state != null && message.hasOwnProperty("state")) + object.state = options.enums === String ? $root.query.TransactionState[message.state] === undefined ? message.state : $root.query.TransactionState[message.state] : message.state; + if (message.time_created != null && message.hasOwnProperty("time_created")) + if (typeof message.time_created === "number") + object.time_created = options.longs === String ? String(message.time_created) : message.time_created; + else + object.time_created = options.longs === String ? $util.Long.prototype.toString.call(message.time_created) : options.longs === Number ? new $util.LongBits(message.time_created.low >>> 0, message.time_created.high >>> 0).toNumber() : message.time_created; + if (message.participants && message.participants.length) { + object.participants = []; + for (let j = 0; j < message.participants.length; ++j) + object.participants[j] = $root.query.Target.toObject(message.participants[j], options); } return object; }; /** - * Converts this ColumnVindex to JSON. + * Converts this TransactionMetadata to JSON. * @function toJSON - * @memberof vschema.ColumnVindex + * @memberof query.TransactionMetadata * @instance * @returns {Object.} JSON object */ - ColumnVindex.prototype.toJSON = function toJSON() { + TransactionMetadata.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ColumnVindex + * Gets the default type url for TransactionMetadata * @function getTypeUrl - * @memberof vschema.ColumnVindex + * @memberof query.TransactionMetadata * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ColumnVindex.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + TransactionMetadata.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vschema.ColumnVindex"; + return typeUrlPrefix + "/query.TransactionMetadata"; }; - return ColumnVindex; + return TransactionMetadata; })(); - vschema.AutoIncrement = (function() { + /** + * SchemaTableType enum. + * @name query.SchemaTableType + * @enum {number} + * @property {number} VIEWS=0 VIEWS value + * @property {number} TABLES=1 TABLES value + * @property {number} ALL=2 ALL value + */ + query.SchemaTableType = (function() { + const valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "VIEWS"] = 0; + values[valuesById[1] = "TABLES"] = 1; + values[valuesById[2] = "ALL"] = 2; + return values; + })(); + + query.GetSchemaRequest = (function() { /** - * Properties of an AutoIncrement. - * @memberof vschema - * @interface IAutoIncrement - * @property {string|null} [column] AutoIncrement column - * @property {string|null} [sequence] AutoIncrement sequence + * Properties of a GetSchemaRequest. + * @memberof query + * @interface IGetSchemaRequest + * @property {query.ITarget|null} [target] GetSchemaRequest target + * @property {query.SchemaTableType|null} [table_type] GetSchemaRequest table_type + * @property {Array.|null} [table_names] GetSchemaRequest table_names */ /** - * Constructs a new AutoIncrement. - * @memberof vschema - * @classdesc Represents an AutoIncrement. - * @implements IAutoIncrement + * Constructs a new GetSchemaRequest. + * @memberof query + * @classdesc Represents a GetSchemaRequest. + * @implements IGetSchemaRequest * @constructor - * @param {vschema.IAutoIncrement=} [properties] Properties to set + * @param {query.IGetSchemaRequest=} [properties] Properties to set */ - function AutoIncrement(properties) { + function GetSchemaRequest(properties) { + this.table_names = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -95770,89 +96814,106 @@ export const vschema = $root.vschema = (() => { } /** - * AutoIncrement column. - * @member {string} column - * @memberof vschema.AutoIncrement + * GetSchemaRequest target. + * @member {query.ITarget|null|undefined} target + * @memberof query.GetSchemaRequest * @instance */ - AutoIncrement.prototype.column = ""; + GetSchemaRequest.prototype.target = null; /** - * AutoIncrement sequence. - * @member {string} sequence - * @memberof vschema.AutoIncrement + * GetSchemaRequest table_type. + * @member {query.SchemaTableType} table_type + * @memberof query.GetSchemaRequest * @instance */ - AutoIncrement.prototype.sequence = ""; + GetSchemaRequest.prototype.table_type = 0; /** - * Creates a new AutoIncrement instance using the specified properties. + * GetSchemaRequest table_names. + * @member {Array.} table_names + * @memberof query.GetSchemaRequest + * @instance + */ + GetSchemaRequest.prototype.table_names = $util.emptyArray; + + /** + * Creates a new GetSchemaRequest instance using the specified properties. * @function create - * @memberof vschema.AutoIncrement + * @memberof query.GetSchemaRequest * @static - * @param {vschema.IAutoIncrement=} [properties] Properties to set - * @returns {vschema.AutoIncrement} AutoIncrement instance + * @param {query.IGetSchemaRequest=} [properties] Properties to set + * @returns {query.GetSchemaRequest} GetSchemaRequest instance */ - AutoIncrement.create = function create(properties) { - return new AutoIncrement(properties); + GetSchemaRequest.create = function create(properties) { + return new GetSchemaRequest(properties); }; /** - * Encodes the specified AutoIncrement message. Does not implicitly {@link vschema.AutoIncrement.verify|verify} messages. + * Encodes the specified GetSchemaRequest message. Does not implicitly {@link query.GetSchemaRequest.verify|verify} messages. * @function encode - * @memberof vschema.AutoIncrement + * @memberof query.GetSchemaRequest * @static - * @param {vschema.IAutoIncrement} message AutoIncrement message or plain object to encode + * @param {query.IGetSchemaRequest} message GetSchemaRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - AutoIncrement.encode = function encode(message, writer) { + GetSchemaRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.column != null && Object.hasOwnProperty.call(message, "column")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.column); - if (message.sequence != null && Object.hasOwnProperty.call(message, "sequence")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.sequence); + if (message.target != null && Object.hasOwnProperty.call(message, "target")) + $root.query.Target.encode(message.target, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.table_type != null && Object.hasOwnProperty.call(message, "table_type")) + writer.uint32(/* id 2, wireType 0 =*/16).int32(message.table_type); + if (message.table_names != null && message.table_names.length) + for (let i = 0; i < message.table_names.length; ++i) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.table_names[i]); return writer; }; /** - * Encodes the specified AutoIncrement message, length delimited. Does not implicitly {@link vschema.AutoIncrement.verify|verify} messages. + * Encodes the specified GetSchemaRequest message, length delimited. Does not implicitly {@link query.GetSchemaRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vschema.AutoIncrement + * @memberof query.GetSchemaRequest * @static - * @param {vschema.IAutoIncrement} message AutoIncrement message or plain object to encode + * @param {query.IGetSchemaRequest} message GetSchemaRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - AutoIncrement.encodeDelimited = function encodeDelimited(message, writer) { + GetSchemaRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an AutoIncrement message from the specified reader or buffer. + * Decodes a GetSchemaRequest message from the specified reader or buffer. * @function decode - * @memberof vschema.AutoIncrement + * @memberof query.GetSchemaRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vschema.AutoIncrement} AutoIncrement + * @returns {query.GetSchemaRequest} GetSchemaRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - AutoIncrement.decode = function decode(reader, length) { + GetSchemaRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vschema.AutoIncrement(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.GetSchemaRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.column = reader.string(); + message.target = $root.query.Target.decode(reader, reader.uint32()); break; } case 2: { - message.sequence = reader.string(); + message.table_type = reader.int32(); + break; + } + case 3: { + if (!(message.table_names && message.table_names.length)) + message.table_names = []; + message.table_names.push(reader.string()); break; } default: @@ -95864,132 +96925,182 @@ export const vschema = $root.vschema = (() => { }; /** - * Decodes an AutoIncrement message from the specified reader or buffer, length delimited. + * Decodes a GetSchemaRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vschema.AutoIncrement + * @memberof query.GetSchemaRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vschema.AutoIncrement} AutoIncrement + * @returns {query.GetSchemaRequest} GetSchemaRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - AutoIncrement.decodeDelimited = function decodeDelimited(reader) { + GetSchemaRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an AutoIncrement message. + * Verifies a GetSchemaRequest message. * @function verify - * @memberof vschema.AutoIncrement + * @memberof query.GetSchemaRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - AutoIncrement.verify = function verify(message) { + GetSchemaRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.column != null && message.hasOwnProperty("column")) - if (!$util.isString(message.column)) - return "column: string expected"; - if (message.sequence != null && message.hasOwnProperty("sequence")) - if (!$util.isString(message.sequence)) - return "sequence: string expected"; + if (message.target != null && message.hasOwnProperty("target")) { + let error = $root.query.Target.verify(message.target); + if (error) + return "target." + error; + } + if (message.table_type != null && message.hasOwnProperty("table_type")) + switch (message.table_type) { + default: + return "table_type: enum value expected"; + case 0: + case 1: + case 2: + break; + } + if (message.table_names != null && message.hasOwnProperty("table_names")) { + if (!Array.isArray(message.table_names)) + return "table_names: array expected"; + for (let i = 0; i < message.table_names.length; ++i) + if (!$util.isString(message.table_names[i])) + return "table_names: string[] expected"; + } return null; }; /** - * Creates an AutoIncrement message from a plain object. Also converts values to their respective internal types. + * Creates a GetSchemaRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vschema.AutoIncrement + * @memberof query.GetSchemaRequest * @static * @param {Object.} object Plain object - * @returns {vschema.AutoIncrement} AutoIncrement + * @returns {query.GetSchemaRequest} GetSchemaRequest */ - AutoIncrement.fromObject = function fromObject(object) { - if (object instanceof $root.vschema.AutoIncrement) + GetSchemaRequest.fromObject = function fromObject(object) { + if (object instanceof $root.query.GetSchemaRequest) return object; - let message = new $root.vschema.AutoIncrement(); - if (object.column != null) - message.column = String(object.column); - if (object.sequence != null) - message.sequence = String(object.sequence); + let message = new $root.query.GetSchemaRequest(); + if (object.target != null) { + if (typeof object.target !== "object") + throw TypeError(".query.GetSchemaRequest.target: object expected"); + message.target = $root.query.Target.fromObject(object.target); + } + switch (object.table_type) { + default: + if (typeof object.table_type === "number") { + message.table_type = object.table_type; + break; + } + break; + case "VIEWS": + case 0: + message.table_type = 0; + break; + case "TABLES": + case 1: + message.table_type = 1; + break; + case "ALL": + case 2: + message.table_type = 2; + break; + } + if (object.table_names) { + if (!Array.isArray(object.table_names)) + throw TypeError(".query.GetSchemaRequest.table_names: array expected"); + message.table_names = []; + for (let i = 0; i < object.table_names.length; ++i) + message.table_names[i] = String(object.table_names[i]); + } return message; }; /** - * Creates a plain object from an AutoIncrement message. Also converts values to other types if specified. + * Creates a plain object from a GetSchemaRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vschema.AutoIncrement + * @memberof query.GetSchemaRequest * @static - * @param {vschema.AutoIncrement} message AutoIncrement + * @param {query.GetSchemaRequest} message GetSchemaRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - AutoIncrement.toObject = function toObject(message, options) { + GetSchemaRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; + if (options.arrays || options.defaults) + object.table_names = []; if (options.defaults) { - object.column = ""; - object.sequence = ""; + object.target = null; + object.table_type = options.enums === String ? "VIEWS" : 0; + } + if (message.target != null && message.hasOwnProperty("target")) + object.target = $root.query.Target.toObject(message.target, options); + if (message.table_type != null && message.hasOwnProperty("table_type")) + object.table_type = options.enums === String ? $root.query.SchemaTableType[message.table_type] === undefined ? message.table_type : $root.query.SchemaTableType[message.table_type] : message.table_type; + if (message.table_names && message.table_names.length) { + object.table_names = []; + for (let j = 0; j < message.table_names.length; ++j) + object.table_names[j] = message.table_names[j]; } - if (message.column != null && message.hasOwnProperty("column")) - object.column = message.column; - if (message.sequence != null && message.hasOwnProperty("sequence")) - object.sequence = message.sequence; return object; }; /** - * Converts this AutoIncrement to JSON. + * Converts this GetSchemaRequest to JSON. * @function toJSON - * @memberof vschema.AutoIncrement + * @memberof query.GetSchemaRequest * @instance * @returns {Object.} JSON object */ - AutoIncrement.prototype.toJSON = function toJSON() { + GetSchemaRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for AutoIncrement + * Gets the default type url for GetSchemaRequest * @function getTypeUrl - * @memberof vschema.AutoIncrement + * @memberof query.GetSchemaRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - AutoIncrement.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetSchemaRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vschema.AutoIncrement"; + return typeUrlPrefix + "/query.GetSchemaRequest"; }; - return AutoIncrement; + return GetSchemaRequest; })(); - vschema.Column = (function() { + query.GetSchemaResponse = (function() { /** - * Properties of a Column. - * @memberof vschema - * @interface IColumn - * @property {string|null} [name] Column name - * @property {query.Type|null} [type] Column type + * Properties of a GetSchemaResponse. + * @memberof query + * @interface IGetSchemaResponse + * @property {Object.|null} [table_definition] GetSchemaResponse table_definition */ /** - * Constructs a new Column. - * @memberof vschema - * @classdesc Represents a Column. - * @implements IColumn + * Constructs a new GetSchemaResponse. + * @memberof query + * @classdesc Represents a GetSchemaResponse. + * @implements IGetSchemaResponse * @constructor - * @param {vschema.IColumn=} [properties] Properties to set + * @param {query.IGetSchemaResponse=} [properties] Properties to set */ - function Column(properties) { + function GetSchemaResponse(properties) { + this.table_definition = {}; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -95997,89 +97108,95 @@ export const vschema = $root.vschema = (() => { } /** - * Column name. - * @member {string} name - * @memberof vschema.Column - * @instance - */ - Column.prototype.name = ""; - - /** - * Column type. - * @member {query.Type} type - * @memberof vschema.Column + * GetSchemaResponse table_definition. + * @member {Object.} table_definition + * @memberof query.GetSchemaResponse * @instance */ - Column.prototype.type = 0; + GetSchemaResponse.prototype.table_definition = $util.emptyObject; /** - * Creates a new Column instance using the specified properties. + * Creates a new GetSchemaResponse instance using the specified properties. * @function create - * @memberof vschema.Column + * @memberof query.GetSchemaResponse * @static - * @param {vschema.IColumn=} [properties] Properties to set - * @returns {vschema.Column} Column instance + * @param {query.IGetSchemaResponse=} [properties] Properties to set + * @returns {query.GetSchemaResponse} GetSchemaResponse instance */ - Column.create = function create(properties) { - return new Column(properties); + GetSchemaResponse.create = function create(properties) { + return new GetSchemaResponse(properties); }; /** - * Encodes the specified Column message. Does not implicitly {@link vschema.Column.verify|verify} messages. + * Encodes the specified GetSchemaResponse message. Does not implicitly {@link query.GetSchemaResponse.verify|verify} messages. * @function encode - * @memberof vschema.Column + * @memberof query.GetSchemaResponse * @static - * @param {vschema.IColumn} message Column message or plain object to encode + * @param {query.IGetSchemaResponse} message GetSchemaResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Column.encode = function encode(message, writer) { + GetSchemaResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.name != null && Object.hasOwnProperty.call(message, "name")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); - if (message.type != null && Object.hasOwnProperty.call(message, "type")) - writer.uint32(/* id 2, wireType 0 =*/16).int32(message.type); + if (message.table_definition != null && Object.hasOwnProperty.call(message, "table_definition")) + for (let keys = Object.keys(message.table_definition), i = 0; i < keys.length; ++i) + writer.uint32(/* id 2, wireType 2 =*/18).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 2 =*/18).string(message.table_definition[keys[i]]).ldelim(); return writer; }; /** - * Encodes the specified Column message, length delimited. Does not implicitly {@link vschema.Column.verify|verify} messages. + * Encodes the specified GetSchemaResponse message, length delimited. Does not implicitly {@link query.GetSchemaResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vschema.Column + * @memberof query.GetSchemaResponse * @static - * @param {vschema.IColumn} message Column message or plain object to encode + * @param {query.IGetSchemaResponse} message GetSchemaResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Column.encodeDelimited = function encodeDelimited(message, writer) { + GetSchemaResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a Column message from the specified reader or buffer. + * Decodes a GetSchemaResponse message from the specified reader or buffer. * @function decode - * @memberof vschema.Column + * @memberof query.GetSchemaResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vschema.Column} Column + * @returns {query.GetSchemaResponse} GetSchemaResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Column.decode = function decode(reader, length) { + GetSchemaResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vschema.Column(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.GetSchemaResponse(), key, value; while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - message.name = reader.string(); - break; - } case 2: { - message.type = reader.int32(); + if (message.table_definition === $util.emptyObject) + message.table_definition = {}; + let end2 = reader.uint32() + reader.pos; + key = ""; + value = ""; + while (reader.pos < end2) { + let tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = reader.string(); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.table_definition[key] = value; break; } default: @@ -96091,318 +97208,143 @@ export const vschema = $root.vschema = (() => { }; /** - * Decodes a Column message from the specified reader or buffer, length delimited. + * Decodes a GetSchemaResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vschema.Column + * @memberof query.GetSchemaResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vschema.Column} Column + * @returns {query.GetSchemaResponse} GetSchemaResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Column.decodeDelimited = function decodeDelimited(reader) { + GetSchemaResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a Column message. + * Verifies a GetSchemaResponse message. * @function verify - * @memberof vschema.Column + * @memberof query.GetSchemaResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - Column.verify = function verify(message) { + GetSchemaResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.name != null && message.hasOwnProperty("name")) - if (!$util.isString(message.name)) - return "name: string expected"; - if (message.type != null && message.hasOwnProperty("type")) - switch (message.type) { - default: - return "type: enum value expected"; - case 0: - case 257: - case 770: - case 259: - case 772: - case 261: - case 774: - case 263: - case 776: - case 265: - case 778: - case 1035: - case 1036: - case 2061: - case 2062: - case 2063: - case 2064: - case 785: - case 18: - case 6163: - case 10260: - case 6165: - case 10262: - case 6167: - case 10264: - case 2073: - case 2074: - case 2075: - case 28: - case 2077: - case 2078: - case 31: - case 4128: - case 4129: - case 4130: - break; - } + if (message.table_definition != null && message.hasOwnProperty("table_definition")) { + if (!$util.isObject(message.table_definition)) + return "table_definition: object expected"; + let key = Object.keys(message.table_definition); + for (let i = 0; i < key.length; ++i) + if (!$util.isString(message.table_definition[key[i]])) + return "table_definition: string{k:string} expected"; + } return null; }; /** - * Creates a Column message from a plain object. Also converts values to their respective internal types. + * Creates a GetSchemaResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vschema.Column + * @memberof query.GetSchemaResponse * @static * @param {Object.} object Plain object - * @returns {vschema.Column} Column + * @returns {query.GetSchemaResponse} GetSchemaResponse */ - Column.fromObject = function fromObject(object) { - if (object instanceof $root.vschema.Column) + GetSchemaResponse.fromObject = function fromObject(object) { + if (object instanceof $root.query.GetSchemaResponse) return object; - let message = new $root.vschema.Column(); - if (object.name != null) - message.name = String(object.name); - switch (object.type) { - default: - if (typeof object.type === "number") { - message.type = object.type; - break; - } - break; - case "NULL_TYPE": - case 0: - message.type = 0; - break; - case "INT8": - case 257: - message.type = 257; - break; - case "UINT8": - case 770: - message.type = 770; - break; - case "INT16": - case 259: - message.type = 259; - break; - case "UINT16": - case 772: - message.type = 772; - break; - case "INT24": - case 261: - message.type = 261; - break; - case "UINT24": - case 774: - message.type = 774; - break; - case "INT32": - case 263: - message.type = 263; - break; - case "UINT32": - case 776: - message.type = 776; - break; - case "INT64": - case 265: - message.type = 265; - break; - case "UINT64": - case 778: - message.type = 778; - break; - case "FLOAT32": - case 1035: - message.type = 1035; - break; - case "FLOAT64": - case 1036: - message.type = 1036; - break; - case "TIMESTAMP": - case 2061: - message.type = 2061; - break; - case "DATE": - case 2062: - message.type = 2062; - break; - case "TIME": - case 2063: - message.type = 2063; - break; - case "DATETIME": - case 2064: - message.type = 2064; - break; - case "YEAR": - case 785: - message.type = 785; - break; - case "DECIMAL": - case 18: - message.type = 18; - break; - case "TEXT": - case 6163: - message.type = 6163; - break; - case "BLOB": - case 10260: - message.type = 10260; - break; - case "VARCHAR": - case 6165: - message.type = 6165; - break; - case "VARBINARY": - case 10262: - message.type = 10262; - break; - case "CHAR": - case 6167: - message.type = 6167; - break; - case "BINARY": - case 10264: - message.type = 10264; - break; - case "BIT": - case 2073: - message.type = 2073; - break; - case "ENUM": - case 2074: - message.type = 2074; - break; - case "SET": - case 2075: - message.type = 2075; - break; - case "TUPLE": - case 28: - message.type = 28; - break; - case "GEOMETRY": - case 2077: - message.type = 2077; - break; - case "JSON": - case 2078: - message.type = 2078; - break; - case "EXPRESSION": - case 31: - message.type = 31; - break; - case "HEXNUM": - case 4128: - message.type = 4128; - break; - case "HEXVAL": - case 4129: - message.type = 4129; - break; - case "BITNUM": - case 4130: - message.type = 4130; - break; + let message = new $root.query.GetSchemaResponse(); + if (object.table_definition) { + if (typeof object.table_definition !== "object") + throw TypeError(".query.GetSchemaResponse.table_definition: object expected"); + message.table_definition = {}; + for (let keys = Object.keys(object.table_definition), i = 0; i < keys.length; ++i) + message.table_definition[keys[i]] = String(object.table_definition[keys[i]]); } return message; }; /** - * Creates a plain object from a Column message. Also converts values to other types if specified. + * Creates a plain object from a GetSchemaResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vschema.Column + * @memberof query.GetSchemaResponse * @static - * @param {vschema.Column} message Column + * @param {query.GetSchemaResponse} message GetSchemaResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - Column.toObject = function toObject(message, options) { + GetSchemaResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) { - object.name = ""; - object.type = options.enums === String ? "NULL_TYPE" : 0; + if (options.objects || options.defaults) + object.table_definition = {}; + let keys2; + if (message.table_definition && (keys2 = Object.keys(message.table_definition)).length) { + object.table_definition = {}; + for (let j = 0; j < keys2.length; ++j) + object.table_definition[keys2[j]] = message.table_definition[keys2[j]]; } - if (message.name != null && message.hasOwnProperty("name")) - object.name = message.name; - if (message.type != null && message.hasOwnProperty("type")) - object.type = options.enums === String ? $root.query.Type[message.type] === undefined ? message.type : $root.query.Type[message.type] : message.type; return object; }; /** - * Converts this Column to JSON. + * Converts this GetSchemaResponse to JSON. * @function toJSON - * @memberof vschema.Column + * @memberof query.GetSchemaResponse * @instance * @returns {Object.} JSON object */ - Column.prototype.toJSON = function toJSON() { + GetSchemaResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for Column + * Gets the default type url for GetSchemaResponse * @function getTypeUrl - * @memberof vschema.Column + * @memberof query.GetSchemaResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - Column.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetSchemaResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vschema.Column"; + return typeUrlPrefix + "/query.GetSchemaResponse"; }; - return Column; + return GetSchemaResponse; })(); - vschema.SrvVSchema = (function() { + query.LoadDataStreamRequest = (function() { /** - * Properties of a SrvVSchema. - * @memberof vschema - * @interface ISrvVSchema - * @property {Object.|null} [keyspaces] SrvVSchema keyspaces - * @property {vschema.IRoutingRules|null} [routing_rules] SrvVSchema routing_rules - * @property {vschema.IShardRoutingRules|null} [shard_routing_rules] SrvVSchema shard_routing_rules + * Properties of a LoadDataStreamRequest. + * @memberof query + * @interface ILoadDataStreamRequest + * @property {vtrpc.ICallerID|null} [effective_caller_id] LoadDataStreamRequest effective_caller_id + * @property {query.IVTGateCallerID|null} [immediate_caller_id] LoadDataStreamRequest immediate_caller_id + * @property {query.ITarget|null} [target] LoadDataStreamRequest target + * @property {query.IBoundQuery|null} [query] LoadDataStreamRequest query + * @property {number|Long|null} [transaction_id] LoadDataStreamRequest transaction_id + * @property {query.IExecuteOptions|null} [options] LoadDataStreamRequest options + * @property {Array.|null} [lines] LoadDataStreamRequest lines */ /** - * Constructs a new SrvVSchema. - * @memberof vschema - * @classdesc Represents a SrvVSchema. - * @implements ISrvVSchema + * Constructs a new LoadDataStreamRequest. + * @memberof query + * @classdesc Represents a LoadDataStreamRequest. + * @implements ILoadDataStreamRequest * @constructor - * @param {vschema.ISrvVSchema=} [properties] Properties to set + * @param {query.ILoadDataStreamRequest=} [properties] Properties to set */ - function SrvVSchema(properties) { - this.keyspaces = {}; + function LoadDataStreamRequest(properties) { + this.lines = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -96410,125 +97352,162 @@ export const vschema = $root.vschema = (() => { } /** - * SrvVSchema keyspaces. - * @member {Object.} keyspaces - * @memberof vschema.SrvVSchema + * LoadDataStreamRequest effective_caller_id. + * @member {vtrpc.ICallerID|null|undefined} effective_caller_id + * @memberof query.LoadDataStreamRequest * @instance */ - SrvVSchema.prototype.keyspaces = $util.emptyObject; + LoadDataStreamRequest.prototype.effective_caller_id = null; /** - * SrvVSchema routing_rules. - * @member {vschema.IRoutingRules|null|undefined} routing_rules - * @memberof vschema.SrvVSchema + * LoadDataStreamRequest immediate_caller_id. + * @member {query.IVTGateCallerID|null|undefined} immediate_caller_id + * @memberof query.LoadDataStreamRequest * @instance */ - SrvVSchema.prototype.routing_rules = null; + LoadDataStreamRequest.prototype.immediate_caller_id = null; /** - * SrvVSchema shard_routing_rules. - * @member {vschema.IShardRoutingRules|null|undefined} shard_routing_rules - * @memberof vschema.SrvVSchema + * LoadDataStreamRequest target. + * @member {query.ITarget|null|undefined} target + * @memberof query.LoadDataStreamRequest * @instance */ - SrvVSchema.prototype.shard_routing_rules = null; + LoadDataStreamRequest.prototype.target = null; /** - * Creates a new SrvVSchema instance using the specified properties. + * LoadDataStreamRequest query. + * @member {query.IBoundQuery|null|undefined} query + * @memberof query.LoadDataStreamRequest + * @instance + */ + LoadDataStreamRequest.prototype.query = null; + + /** + * LoadDataStreamRequest transaction_id. + * @member {number|Long} transaction_id + * @memberof query.LoadDataStreamRequest + * @instance + */ + LoadDataStreamRequest.prototype.transaction_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * LoadDataStreamRequest options. + * @member {query.IExecuteOptions|null|undefined} options + * @memberof query.LoadDataStreamRequest + * @instance + */ + LoadDataStreamRequest.prototype.options = null; + + /** + * LoadDataStreamRequest lines. + * @member {Array.} lines + * @memberof query.LoadDataStreamRequest + * @instance + */ + LoadDataStreamRequest.prototype.lines = $util.emptyArray; + + /** + * Creates a new LoadDataStreamRequest instance using the specified properties. * @function create - * @memberof vschema.SrvVSchema + * @memberof query.LoadDataStreamRequest * @static - * @param {vschema.ISrvVSchema=} [properties] Properties to set - * @returns {vschema.SrvVSchema} SrvVSchema instance + * @param {query.ILoadDataStreamRequest=} [properties] Properties to set + * @returns {query.LoadDataStreamRequest} LoadDataStreamRequest instance */ - SrvVSchema.create = function create(properties) { - return new SrvVSchema(properties); + LoadDataStreamRequest.create = function create(properties) { + return new LoadDataStreamRequest(properties); }; /** - * Encodes the specified SrvVSchema message. Does not implicitly {@link vschema.SrvVSchema.verify|verify} messages. + * Encodes the specified LoadDataStreamRequest message. Does not implicitly {@link query.LoadDataStreamRequest.verify|verify} messages. * @function encode - * @memberof vschema.SrvVSchema + * @memberof query.LoadDataStreamRequest * @static - * @param {vschema.ISrvVSchema} message SrvVSchema message or plain object to encode + * @param {query.ILoadDataStreamRequest} message LoadDataStreamRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SrvVSchema.encode = function encode(message, writer) { + LoadDataStreamRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspaces != null && Object.hasOwnProperty.call(message, "keyspaces")) - for (let keys = Object.keys(message.keyspaces), i = 0; i < keys.length; ++i) { - writer.uint32(/* id 1, wireType 2 =*/10).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); - $root.vschema.Keyspace.encode(message.keyspaces[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); - } - if (message.routing_rules != null && Object.hasOwnProperty.call(message, "routing_rules")) - $root.vschema.RoutingRules.encode(message.routing_rules, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.shard_routing_rules != null && Object.hasOwnProperty.call(message, "shard_routing_rules")) - $root.vschema.ShardRoutingRules.encode(message.shard_routing_rules, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.effective_caller_id != null && Object.hasOwnProperty.call(message, "effective_caller_id")) + $root.vtrpc.CallerID.encode(message.effective_caller_id, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.immediate_caller_id != null && Object.hasOwnProperty.call(message, "immediate_caller_id")) + $root.query.VTGateCallerID.encode(message.immediate_caller_id, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.target != null && Object.hasOwnProperty.call(message, "target")) + $root.query.Target.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.query != null && Object.hasOwnProperty.call(message, "query")) + $root.query.BoundQuery.encode(message.query, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.transaction_id != null && Object.hasOwnProperty.call(message, "transaction_id")) + writer.uint32(/* id 5, wireType 0 =*/40).int64(message.transaction_id); + if (message.options != null && Object.hasOwnProperty.call(message, "options")) + $root.query.ExecuteOptions.encode(message.options, writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); + if (message.lines != null && message.lines.length) + for (let i = 0; i < message.lines.length; ++i) + writer.uint32(/* id 7, wireType 2 =*/58).string(message.lines[i]); return writer; }; /** - * Encodes the specified SrvVSchema message, length delimited. Does not implicitly {@link vschema.SrvVSchema.verify|verify} messages. + * Encodes the specified LoadDataStreamRequest message, length delimited. Does not implicitly {@link query.LoadDataStreamRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vschema.SrvVSchema + * @memberof query.LoadDataStreamRequest * @static - * @param {vschema.ISrvVSchema} message SrvVSchema message or plain object to encode + * @param {query.ILoadDataStreamRequest} message LoadDataStreamRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SrvVSchema.encodeDelimited = function encodeDelimited(message, writer) { + LoadDataStreamRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a SrvVSchema message from the specified reader or buffer. + * Decodes a LoadDataStreamRequest message from the specified reader or buffer. * @function decode - * @memberof vschema.SrvVSchema + * @memberof query.LoadDataStreamRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vschema.SrvVSchema} SrvVSchema + * @returns {query.LoadDataStreamRequest} LoadDataStreamRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SrvVSchema.decode = function decode(reader, length) { + LoadDataStreamRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vschema.SrvVSchema(), key, value; + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.LoadDataStreamRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (message.keyspaces === $util.emptyObject) - message.keyspaces = {}; - let end2 = reader.uint32() + reader.pos; - key = ""; - value = null; - while (reader.pos < end2) { - let tag2 = reader.uint32(); - switch (tag2 >>> 3) { - case 1: - key = reader.string(); - break; - case 2: - value = $root.vschema.Keyspace.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag2 & 7); - break; - } - } - message.keyspaces[key] = value; + message.effective_caller_id = $root.vtrpc.CallerID.decode(reader, reader.uint32()); break; } case 2: { - message.routing_rules = $root.vschema.RoutingRules.decode(reader, reader.uint32()); + message.immediate_caller_id = $root.query.VTGateCallerID.decode(reader, reader.uint32()); break; } case 3: { - message.shard_routing_rules = $root.vschema.ShardRoutingRules.decode(reader, reader.uint32()); + message.target = $root.query.Target.decode(reader, reader.uint32()); + break; + } + case 4: { + message.query = $root.query.BoundQuery.decode(reader, reader.uint32()); + break; + } + case 5: { + message.transaction_id = reader.int64(); + break; + } + case 6: { + message.options = $root.query.ExecuteOptions.decode(reader, reader.uint32()); + break; + } + case 7: { + if (!(message.lines && message.lines.length)) + message.lines = []; + message.lines.push(reader.string()); break; } default: @@ -96540,170 +97519,223 @@ export const vschema = $root.vschema = (() => { }; /** - * Decodes a SrvVSchema message from the specified reader or buffer, length delimited. + * Decodes a LoadDataStreamRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vschema.SrvVSchema + * @memberof query.LoadDataStreamRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vschema.SrvVSchema} SrvVSchema + * @returns {query.LoadDataStreamRequest} LoadDataStreamRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SrvVSchema.decodeDelimited = function decodeDelimited(reader) { + LoadDataStreamRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a SrvVSchema message. + * Verifies a LoadDataStreamRequest message. * @function verify - * @memberof vschema.SrvVSchema + * @memberof query.LoadDataStreamRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - SrvVSchema.verify = function verify(message) { + LoadDataStreamRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.keyspaces != null && message.hasOwnProperty("keyspaces")) { - if (!$util.isObject(message.keyspaces)) - return "keyspaces: object expected"; - let key = Object.keys(message.keyspaces); - for (let i = 0; i < key.length; ++i) { - let error = $root.vschema.Keyspace.verify(message.keyspaces[key[i]]); - if (error) - return "keyspaces." + error; - } + if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) { + let error = $root.vtrpc.CallerID.verify(message.effective_caller_id); + if (error) + return "effective_caller_id." + error; } - if (message.routing_rules != null && message.hasOwnProperty("routing_rules")) { - let error = $root.vschema.RoutingRules.verify(message.routing_rules); + if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) { + let error = $root.query.VTGateCallerID.verify(message.immediate_caller_id); if (error) - return "routing_rules." + error; + return "immediate_caller_id." + error; } - if (message.shard_routing_rules != null && message.hasOwnProperty("shard_routing_rules")) { - let error = $root.vschema.ShardRoutingRules.verify(message.shard_routing_rules); + if (message.target != null && message.hasOwnProperty("target")) { + let error = $root.query.Target.verify(message.target); if (error) - return "shard_routing_rules." + error; + return "target." + error; + } + if (message.query != null && message.hasOwnProperty("query")) { + let error = $root.query.BoundQuery.verify(message.query); + if (error) + return "query." + error; + } + if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) + if (!$util.isInteger(message.transaction_id) && !(message.transaction_id && $util.isInteger(message.transaction_id.low) && $util.isInteger(message.transaction_id.high))) + return "transaction_id: integer|Long expected"; + if (message.options != null && message.hasOwnProperty("options")) { + let error = $root.query.ExecuteOptions.verify(message.options); + if (error) + return "options." + error; + } + if (message.lines != null && message.hasOwnProperty("lines")) { + if (!Array.isArray(message.lines)) + return "lines: array expected"; + for (let i = 0; i < message.lines.length; ++i) + if (!$util.isString(message.lines[i])) + return "lines: string[] expected"; } return null; }; /** - * Creates a SrvVSchema message from a plain object. Also converts values to their respective internal types. + * Creates a LoadDataStreamRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vschema.SrvVSchema + * @memberof query.LoadDataStreamRequest * @static * @param {Object.} object Plain object - * @returns {vschema.SrvVSchema} SrvVSchema + * @returns {query.LoadDataStreamRequest} LoadDataStreamRequest */ - SrvVSchema.fromObject = function fromObject(object) { - if (object instanceof $root.vschema.SrvVSchema) + LoadDataStreamRequest.fromObject = function fromObject(object) { + if (object instanceof $root.query.LoadDataStreamRequest) return object; - let message = new $root.vschema.SrvVSchema(); - if (object.keyspaces) { - if (typeof object.keyspaces !== "object") - throw TypeError(".vschema.SrvVSchema.keyspaces: object expected"); - message.keyspaces = {}; - for (let keys = Object.keys(object.keyspaces), i = 0; i < keys.length; ++i) { - if (typeof object.keyspaces[keys[i]] !== "object") - throw TypeError(".vschema.SrvVSchema.keyspaces: object expected"); - message.keyspaces[keys[i]] = $root.vschema.Keyspace.fromObject(object.keyspaces[keys[i]]); - } + let message = new $root.query.LoadDataStreamRequest(); + if (object.effective_caller_id != null) { + if (typeof object.effective_caller_id !== "object") + throw TypeError(".query.LoadDataStreamRequest.effective_caller_id: object expected"); + message.effective_caller_id = $root.vtrpc.CallerID.fromObject(object.effective_caller_id); } - if (object.routing_rules != null) { - if (typeof object.routing_rules !== "object") - throw TypeError(".vschema.SrvVSchema.routing_rules: object expected"); - message.routing_rules = $root.vschema.RoutingRules.fromObject(object.routing_rules); + if (object.immediate_caller_id != null) { + if (typeof object.immediate_caller_id !== "object") + throw TypeError(".query.LoadDataStreamRequest.immediate_caller_id: object expected"); + message.immediate_caller_id = $root.query.VTGateCallerID.fromObject(object.immediate_caller_id); } - if (object.shard_routing_rules != null) { - if (typeof object.shard_routing_rules !== "object") - throw TypeError(".vschema.SrvVSchema.shard_routing_rules: object expected"); - message.shard_routing_rules = $root.vschema.ShardRoutingRules.fromObject(object.shard_routing_rules); + if (object.target != null) { + if (typeof object.target !== "object") + throw TypeError(".query.LoadDataStreamRequest.target: object expected"); + message.target = $root.query.Target.fromObject(object.target); + } + if (object.query != null) { + if (typeof object.query !== "object") + throw TypeError(".query.LoadDataStreamRequest.query: object expected"); + message.query = $root.query.BoundQuery.fromObject(object.query); + } + if (object.transaction_id != null) + if ($util.Long) + (message.transaction_id = $util.Long.fromValue(object.transaction_id)).unsigned = false; + else if (typeof object.transaction_id === "string") + message.transaction_id = parseInt(object.transaction_id, 10); + else if (typeof object.transaction_id === "number") + message.transaction_id = object.transaction_id; + else if (typeof object.transaction_id === "object") + message.transaction_id = new $util.LongBits(object.transaction_id.low >>> 0, object.transaction_id.high >>> 0).toNumber(); + if (object.options != null) { + if (typeof object.options !== "object") + throw TypeError(".query.LoadDataStreamRequest.options: object expected"); + message.options = $root.query.ExecuteOptions.fromObject(object.options); + } + if (object.lines) { + if (!Array.isArray(object.lines)) + throw TypeError(".query.LoadDataStreamRequest.lines: array expected"); + message.lines = []; + for (let i = 0; i < object.lines.length; ++i) + message.lines[i] = String(object.lines[i]); } return message; }; /** - * Creates a plain object from a SrvVSchema message. Also converts values to other types if specified. + * Creates a plain object from a LoadDataStreamRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vschema.SrvVSchema + * @memberof query.LoadDataStreamRequest * @static - * @param {vschema.SrvVSchema} message SrvVSchema + * @param {query.LoadDataStreamRequest} message LoadDataStreamRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - SrvVSchema.toObject = function toObject(message, options) { + LoadDataStreamRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.objects || options.defaults) - object.keyspaces = {}; + if (options.arrays || options.defaults) + object.lines = []; if (options.defaults) { - object.routing_rules = null; - object.shard_routing_rules = null; + object.effective_caller_id = null; + object.immediate_caller_id = null; + object.target = null; + object.query = null; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.transaction_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.transaction_id = options.longs === String ? "0" : 0; + object.options = null; } - let keys2; - if (message.keyspaces && (keys2 = Object.keys(message.keyspaces)).length) { - object.keyspaces = {}; - for (let j = 0; j < keys2.length; ++j) - object.keyspaces[keys2[j]] = $root.vschema.Keyspace.toObject(message.keyspaces[keys2[j]], options); + if (message.effective_caller_id != null && message.hasOwnProperty("effective_caller_id")) + object.effective_caller_id = $root.vtrpc.CallerID.toObject(message.effective_caller_id, options); + if (message.immediate_caller_id != null && message.hasOwnProperty("immediate_caller_id")) + object.immediate_caller_id = $root.query.VTGateCallerID.toObject(message.immediate_caller_id, options); + if (message.target != null && message.hasOwnProperty("target")) + object.target = $root.query.Target.toObject(message.target, options); + if (message.query != null && message.hasOwnProperty("query")) + object.query = $root.query.BoundQuery.toObject(message.query, options); + if (message.transaction_id != null && message.hasOwnProperty("transaction_id")) + if (typeof message.transaction_id === "number") + object.transaction_id = options.longs === String ? String(message.transaction_id) : message.transaction_id; + else + object.transaction_id = options.longs === String ? $util.Long.prototype.toString.call(message.transaction_id) : options.longs === Number ? new $util.LongBits(message.transaction_id.low >>> 0, message.transaction_id.high >>> 0).toNumber() : message.transaction_id; + if (message.options != null && message.hasOwnProperty("options")) + object.options = $root.query.ExecuteOptions.toObject(message.options, options); + if (message.lines && message.lines.length) { + object.lines = []; + for (let j = 0; j < message.lines.length; ++j) + object.lines[j] = message.lines[j]; } - if (message.routing_rules != null && message.hasOwnProperty("routing_rules")) - object.routing_rules = $root.vschema.RoutingRules.toObject(message.routing_rules, options); - if (message.shard_routing_rules != null && message.hasOwnProperty("shard_routing_rules")) - object.shard_routing_rules = $root.vschema.ShardRoutingRules.toObject(message.shard_routing_rules, options); return object; }; /** - * Converts this SrvVSchema to JSON. + * Converts this LoadDataStreamRequest to JSON. * @function toJSON - * @memberof vschema.SrvVSchema + * @memberof query.LoadDataStreamRequest * @instance * @returns {Object.} JSON object */ - SrvVSchema.prototype.toJSON = function toJSON() { + LoadDataStreamRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for SrvVSchema + * Gets the default type url for LoadDataStreamRequest * @function getTypeUrl - * @memberof vschema.SrvVSchema + * @memberof query.LoadDataStreamRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - SrvVSchema.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + LoadDataStreamRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vschema.SrvVSchema"; + return typeUrlPrefix + "/query.LoadDataStreamRequest"; }; - return SrvVSchema; + return LoadDataStreamRequest; })(); - vschema.ShardRoutingRules = (function() { + query.LoadDataStreamResponse = (function() { /** - * Properties of a ShardRoutingRules. - * @memberof vschema - * @interface IShardRoutingRules - * @property {Array.|null} [rules] ShardRoutingRules rules + * Properties of a LoadDataStreamResponse. + * @memberof query + * @interface ILoadDataStreamResponse + * @property {query.IQueryResult|null} [result] LoadDataStreamResponse result */ /** - * Constructs a new ShardRoutingRules. - * @memberof vschema - * @classdesc Represents a ShardRoutingRules. - * @implements IShardRoutingRules + * Constructs a new LoadDataStreamResponse. + * @memberof query + * @classdesc Represents a LoadDataStreamResponse. + * @implements ILoadDataStreamResponse * @constructor - * @param {vschema.IShardRoutingRules=} [properties] Properties to set + * @param {query.ILoadDataStreamResponse=} [properties] Properties to set */ - function ShardRoutingRules(properties) { - this.rules = []; + function LoadDataStreamResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -96711,78 +97743,75 @@ export const vschema = $root.vschema = (() => { } /** - * ShardRoutingRules rules. - * @member {Array.} rules - * @memberof vschema.ShardRoutingRules + * LoadDataStreamResponse result. + * @member {query.IQueryResult|null|undefined} result + * @memberof query.LoadDataStreamResponse * @instance */ - ShardRoutingRules.prototype.rules = $util.emptyArray; + LoadDataStreamResponse.prototype.result = null; /** - * Creates a new ShardRoutingRules instance using the specified properties. + * Creates a new LoadDataStreamResponse instance using the specified properties. * @function create - * @memberof vschema.ShardRoutingRules + * @memberof query.LoadDataStreamResponse * @static - * @param {vschema.IShardRoutingRules=} [properties] Properties to set - * @returns {vschema.ShardRoutingRules} ShardRoutingRules instance + * @param {query.ILoadDataStreamResponse=} [properties] Properties to set + * @returns {query.LoadDataStreamResponse} LoadDataStreamResponse instance */ - ShardRoutingRules.create = function create(properties) { - return new ShardRoutingRules(properties); + LoadDataStreamResponse.create = function create(properties) { + return new LoadDataStreamResponse(properties); }; /** - * Encodes the specified ShardRoutingRules message. Does not implicitly {@link vschema.ShardRoutingRules.verify|verify} messages. + * Encodes the specified LoadDataStreamResponse message. Does not implicitly {@link query.LoadDataStreamResponse.verify|verify} messages. * @function encode - * @memberof vschema.ShardRoutingRules + * @memberof query.LoadDataStreamResponse * @static - * @param {vschema.IShardRoutingRules} message ShardRoutingRules message or plain object to encode + * @param {query.ILoadDataStreamResponse} message LoadDataStreamResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ShardRoutingRules.encode = function encode(message, writer) { + LoadDataStreamResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.rules != null && message.rules.length) - for (let i = 0; i < message.rules.length; ++i) - $root.vschema.ShardRoutingRule.encode(message.rules[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.result != null && Object.hasOwnProperty.call(message, "result")) + $root.query.QueryResult.encode(message.result, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified ShardRoutingRules message, length delimited. Does not implicitly {@link vschema.ShardRoutingRules.verify|verify} messages. + * Encodes the specified LoadDataStreamResponse message, length delimited. Does not implicitly {@link query.LoadDataStreamResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vschema.ShardRoutingRules + * @memberof query.LoadDataStreamResponse * @static - * @param {vschema.IShardRoutingRules} message ShardRoutingRules message or plain object to encode + * @param {query.ILoadDataStreamResponse} message LoadDataStreamResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ShardRoutingRules.encodeDelimited = function encodeDelimited(message, writer) { + LoadDataStreamResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ShardRoutingRules message from the specified reader or buffer. + * Decodes a LoadDataStreamResponse message from the specified reader or buffer. * @function decode - * @memberof vschema.ShardRoutingRules + * @memberof query.LoadDataStreamResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vschema.ShardRoutingRules} ShardRoutingRules + * @returns {query.LoadDataStreamResponse} LoadDataStreamResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ShardRoutingRules.decode = function decode(reader, length) { + LoadDataStreamResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vschema.ShardRoutingRules(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.LoadDataStreamResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (!(message.rules && message.rules.length)) - message.rules = []; - message.rules.push($root.vschema.ShardRoutingRule.decode(reader, reader.uint32())); + message.result = $root.query.QueryResult.decode(reader, reader.uint32()); break; } default: @@ -96794,141 +97823,160 @@ export const vschema = $root.vschema = (() => { }; /** - * Decodes a ShardRoutingRules message from the specified reader or buffer, length delimited. + * Decodes a LoadDataStreamResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vschema.ShardRoutingRules + * @memberof query.LoadDataStreamResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vschema.ShardRoutingRules} ShardRoutingRules + * @returns {query.LoadDataStreamResponse} LoadDataStreamResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ShardRoutingRules.decodeDelimited = function decodeDelimited(reader) { + LoadDataStreamResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ShardRoutingRules message. + * Verifies a LoadDataStreamResponse message. * @function verify - * @memberof vschema.ShardRoutingRules + * @memberof query.LoadDataStreamResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ShardRoutingRules.verify = function verify(message) { + LoadDataStreamResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.rules != null && message.hasOwnProperty("rules")) { - if (!Array.isArray(message.rules)) - return "rules: array expected"; - for (let i = 0; i < message.rules.length; ++i) { - let error = $root.vschema.ShardRoutingRule.verify(message.rules[i]); - if (error) - return "rules." + error; - } + if (message.result != null && message.hasOwnProperty("result")) { + let error = $root.query.QueryResult.verify(message.result); + if (error) + return "result." + error; } return null; }; /** - * Creates a ShardRoutingRules message from a plain object. Also converts values to their respective internal types. + * Creates a LoadDataStreamResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vschema.ShardRoutingRules + * @memberof query.LoadDataStreamResponse * @static * @param {Object.} object Plain object - * @returns {vschema.ShardRoutingRules} ShardRoutingRules + * @returns {query.LoadDataStreamResponse} LoadDataStreamResponse */ - ShardRoutingRules.fromObject = function fromObject(object) { - if (object instanceof $root.vschema.ShardRoutingRules) + LoadDataStreamResponse.fromObject = function fromObject(object) { + if (object instanceof $root.query.LoadDataStreamResponse) return object; - let message = new $root.vschema.ShardRoutingRules(); - if (object.rules) { - if (!Array.isArray(object.rules)) - throw TypeError(".vschema.ShardRoutingRules.rules: array expected"); - message.rules = []; - for (let i = 0; i < object.rules.length; ++i) { - if (typeof object.rules[i] !== "object") - throw TypeError(".vschema.ShardRoutingRules.rules: object expected"); - message.rules[i] = $root.vschema.ShardRoutingRule.fromObject(object.rules[i]); - } + let message = new $root.query.LoadDataStreamResponse(); + if (object.result != null) { + if (typeof object.result !== "object") + throw TypeError(".query.LoadDataStreamResponse.result: object expected"); + message.result = $root.query.QueryResult.fromObject(object.result); } return message; }; /** - * Creates a plain object from a ShardRoutingRules message. Also converts values to other types if specified. + * Creates a plain object from a LoadDataStreamResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vschema.ShardRoutingRules + * @memberof query.LoadDataStreamResponse * @static - * @param {vschema.ShardRoutingRules} message ShardRoutingRules + * @param {query.LoadDataStreamResponse} message LoadDataStreamResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ShardRoutingRules.toObject = function toObject(message, options) { + LoadDataStreamResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.rules = []; - if (message.rules && message.rules.length) { - object.rules = []; - for (let j = 0; j < message.rules.length; ++j) - object.rules[j] = $root.vschema.ShardRoutingRule.toObject(message.rules[j], options); - } + if (options.defaults) + object.result = null; + if (message.result != null && message.hasOwnProperty("result")) + object.result = $root.query.QueryResult.toObject(message.result, options); return object; }; /** - * Converts this ShardRoutingRules to JSON. + * Converts this LoadDataStreamResponse to JSON. * @function toJSON - * @memberof vschema.ShardRoutingRules + * @memberof query.LoadDataStreamResponse * @instance * @returns {Object.} JSON object */ - ShardRoutingRules.prototype.toJSON = function toJSON() { + LoadDataStreamResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ShardRoutingRules + * Gets the default type url for LoadDataStreamResponse * @function getTypeUrl - * @memberof vschema.ShardRoutingRules + * @memberof query.LoadDataStreamResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ShardRoutingRules.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + LoadDataStreamResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vschema.ShardRoutingRules"; + return typeUrlPrefix + "/query.LoadDataStreamResponse"; }; - return ShardRoutingRules; + return LoadDataStreamResponse; })(); - vschema.ShardRoutingRule = (function() { + return query; +})(); + +export const replicationdata = $root.replicationdata = (() => { + + /** + * Namespace replicationdata. + * @exports replicationdata + * @namespace + */ + const replicationdata = {}; + + replicationdata.Status = (function() { /** - * Properties of a ShardRoutingRule. - * @memberof vschema - * @interface IShardRoutingRule - * @property {string|null} [from_keyspace] ShardRoutingRule from_keyspace - * @property {string|null} [to_keyspace] ShardRoutingRule to_keyspace - * @property {string|null} [shard] ShardRoutingRule shard + * Properties of a Status. + * @memberof replicationdata + * @interface IStatus + * @property {string|null} [position] Status position + * @property {number|null} [replication_lag_seconds] Status replication_lag_seconds + * @property {string|null} [source_host] Status source_host + * @property {number|null} [source_port] Status source_port + * @property {number|null} [connect_retry] Status connect_retry + * @property {string|null} [relay_log_position] Status relay_log_position + * @property {string|null} [file_position] Status file_position + * @property {string|null} [relay_log_source_binlog_equivalent_position] Status relay_log_source_binlog_equivalent_position + * @property {number|null} [source_server_id] Status source_server_id + * @property {string|null} [source_uuid] Status source_uuid + * @property {number|null} [io_state] Status io_state + * @property {string|null} [last_io_error] Status last_io_error + * @property {number|null} [sql_state] Status sql_state + * @property {string|null} [last_sql_error] Status last_sql_error + * @property {string|null} [relay_log_file_position] Status relay_log_file_position + * @property {string|null} [source_user] Status source_user + * @property {number|null} [sql_delay] Status sql_delay + * @property {boolean|null} [auto_position] Status auto_position + * @property {boolean|null} [using_gtid] Status using_gtid + * @property {boolean|null} [has_replication_filters] Status has_replication_filters + * @property {boolean|null} [ssl_allowed] Status ssl_allowed + * @property {boolean|null} [replication_lag_unknown] Status replication_lag_unknown */ /** - * Constructs a new ShardRoutingRule. - * @memberof vschema - * @classdesc Represents a ShardRoutingRule. - * @implements IShardRoutingRule + * Constructs a new Status. + * @memberof replicationdata + * @classdesc Represents a Status. + * @implements IStatus * @constructor - * @param {vschema.IShardRoutingRule=} [properties] Properties to set + * @param {replicationdata.IStatus=} [properties] Properties to set */ - function ShardRoutingRule(properties) { + function Status(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -96936,354 +97984,369 @@ export const vschema = $root.vschema = (() => { } /** - * ShardRoutingRule from_keyspace. - * @member {string} from_keyspace - * @memberof vschema.ShardRoutingRule + * Status position. + * @member {string} position + * @memberof replicationdata.Status * @instance */ - ShardRoutingRule.prototype.from_keyspace = ""; + Status.prototype.position = ""; /** - * ShardRoutingRule to_keyspace. - * @member {string} to_keyspace - * @memberof vschema.ShardRoutingRule + * Status replication_lag_seconds. + * @member {number} replication_lag_seconds + * @memberof replicationdata.Status * @instance */ - ShardRoutingRule.prototype.to_keyspace = ""; + Status.prototype.replication_lag_seconds = 0; /** - * ShardRoutingRule shard. - * @member {string} shard - * @memberof vschema.ShardRoutingRule + * Status source_host. + * @member {string} source_host + * @memberof replicationdata.Status * @instance */ - ShardRoutingRule.prototype.shard = ""; + Status.prototype.source_host = ""; /** - * Creates a new ShardRoutingRule instance using the specified properties. - * @function create - * @memberof vschema.ShardRoutingRule - * @static - * @param {vschema.IShardRoutingRule=} [properties] Properties to set - * @returns {vschema.ShardRoutingRule} ShardRoutingRule instance + * Status source_port. + * @member {number} source_port + * @memberof replicationdata.Status + * @instance */ - ShardRoutingRule.create = function create(properties) { - return new ShardRoutingRule(properties); - }; + Status.prototype.source_port = 0; /** - * Encodes the specified ShardRoutingRule message. Does not implicitly {@link vschema.ShardRoutingRule.verify|verify} messages. - * @function encode - * @memberof vschema.ShardRoutingRule - * @static - * @param {vschema.IShardRoutingRule} message ShardRoutingRule message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer + * Status connect_retry. + * @member {number} connect_retry + * @memberof replicationdata.Status + * @instance */ - ShardRoutingRule.encode = function encode(message, writer) { - if (!writer) - writer = $Writer.create(); - if (message.from_keyspace != null && Object.hasOwnProperty.call(message, "from_keyspace")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.from_keyspace); - if (message.to_keyspace != null && Object.hasOwnProperty.call(message, "to_keyspace")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.to_keyspace); - if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.shard); - return writer; - }; + Status.prototype.connect_retry = 0; /** - * Encodes the specified ShardRoutingRule message, length delimited. Does not implicitly {@link vschema.ShardRoutingRule.verify|verify} messages. - * @function encodeDelimited - * @memberof vschema.ShardRoutingRule - * @static - * @param {vschema.IShardRoutingRule} message ShardRoutingRule message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer + * Status relay_log_position. + * @member {string} relay_log_position + * @memberof replicationdata.Status + * @instance */ - ShardRoutingRule.encodeDelimited = function encodeDelimited(message, writer) { - return this.encode(message, writer).ldelim(); - }; + Status.prototype.relay_log_position = ""; /** - * Decodes a ShardRoutingRule message from the specified reader or buffer. - * @function decode - * @memberof vschema.ShardRoutingRule - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @param {number} [length] Message length if known beforehand - * @returns {vschema.ShardRoutingRule} ShardRoutingRule - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing + * Status file_position. + * @member {string} file_position + * @memberof replicationdata.Status + * @instance */ - ShardRoutingRule.decode = function decode(reader, length) { - if (!(reader instanceof $Reader)) - reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vschema.ShardRoutingRule(); - while (reader.pos < end) { - let tag = reader.uint32(); - switch (tag >>> 3) { - case 1: { - message.from_keyspace = reader.string(); - break; - } - case 2: { - message.to_keyspace = reader.string(); - break; - } - case 3: { - message.shard = reader.string(); - break; - } - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }; + Status.prototype.file_position = ""; /** - * Decodes a ShardRoutingRule message from the specified reader or buffer, length delimited. - * @function decodeDelimited - * @memberof vschema.ShardRoutingRule - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vschema.ShardRoutingRule} ShardRoutingRule - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing + * Status relay_log_source_binlog_equivalent_position. + * @member {string} relay_log_source_binlog_equivalent_position + * @memberof replicationdata.Status + * @instance */ - ShardRoutingRule.decodeDelimited = function decodeDelimited(reader) { - if (!(reader instanceof $Reader)) - reader = new $Reader(reader); - return this.decode(reader, reader.uint32()); - }; + Status.prototype.relay_log_source_binlog_equivalent_position = ""; /** - * Verifies a ShardRoutingRule message. - * @function verify - * @memberof vschema.ShardRoutingRule - * @static - * @param {Object.} message Plain object to verify - * @returns {string|null} `null` if valid, otherwise the reason why it is not + * Status source_server_id. + * @member {number} source_server_id + * @memberof replicationdata.Status + * @instance */ - ShardRoutingRule.verify = function verify(message) { - if (typeof message !== "object" || message === null) - return "object expected"; - if (message.from_keyspace != null && message.hasOwnProperty("from_keyspace")) - if (!$util.isString(message.from_keyspace)) - return "from_keyspace: string expected"; - if (message.to_keyspace != null && message.hasOwnProperty("to_keyspace")) - if (!$util.isString(message.to_keyspace)) - return "to_keyspace: string expected"; - if (message.shard != null && message.hasOwnProperty("shard")) - if (!$util.isString(message.shard)) - return "shard: string expected"; - return null; - }; + Status.prototype.source_server_id = 0; /** - * Creates a ShardRoutingRule message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof vschema.ShardRoutingRule - * @static - * @param {Object.} object Plain object - * @returns {vschema.ShardRoutingRule} ShardRoutingRule + * Status source_uuid. + * @member {string} source_uuid + * @memberof replicationdata.Status + * @instance */ - ShardRoutingRule.fromObject = function fromObject(object) { - if (object instanceof $root.vschema.ShardRoutingRule) - return object; - let message = new $root.vschema.ShardRoutingRule(); - if (object.from_keyspace != null) - message.from_keyspace = String(object.from_keyspace); - if (object.to_keyspace != null) - message.to_keyspace = String(object.to_keyspace); - if (object.shard != null) - message.shard = String(object.shard); - return message; - }; + Status.prototype.source_uuid = ""; /** - * Creates a plain object from a ShardRoutingRule message. Also converts values to other types if specified. - * @function toObject - * @memberof vschema.ShardRoutingRule - * @static - * @param {vschema.ShardRoutingRule} message ShardRoutingRule - * @param {$protobuf.IConversionOptions} [options] Conversion options - * @returns {Object.} Plain object + * Status io_state. + * @member {number} io_state + * @memberof replicationdata.Status + * @instance */ - ShardRoutingRule.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) { - object.from_keyspace = ""; - object.to_keyspace = ""; - object.shard = ""; - } - if (message.from_keyspace != null && message.hasOwnProperty("from_keyspace")) - object.from_keyspace = message.from_keyspace; - if (message.to_keyspace != null && message.hasOwnProperty("to_keyspace")) - object.to_keyspace = message.to_keyspace; - if (message.shard != null && message.hasOwnProperty("shard")) - object.shard = message.shard; - return object; - }; + Status.prototype.io_state = 0; /** - * Converts this ShardRoutingRule to JSON. - * @function toJSON - * @memberof vschema.ShardRoutingRule + * Status last_io_error. + * @member {string} last_io_error + * @memberof replicationdata.Status * @instance - * @returns {Object.} JSON object */ - ShardRoutingRule.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; + Status.prototype.last_io_error = ""; /** - * Gets the default type url for ShardRoutingRule - * @function getTypeUrl - * @memberof vschema.ShardRoutingRule - * @static - * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns {string} The default type url + * Status sql_state. + * @member {number} sql_state + * @memberof replicationdata.Status + * @instance */ - ShardRoutingRule.getTypeUrl = function getTypeUrl(typeUrlPrefix) { - if (typeUrlPrefix === undefined) { - typeUrlPrefix = "type.googleapis.com"; - } - return typeUrlPrefix + "/vschema.ShardRoutingRule"; - }; - - return ShardRoutingRule; - })(); - - return vschema; -})(); - -export const vtctldata = $root.vtctldata = (() => { - - /** - * Namespace vtctldata. - * @exports vtctldata - * @namespace - */ - const vtctldata = {}; + Status.prototype.sql_state = 0; - vtctldata.ExecuteVtctlCommandRequest = (function() { + /** + * Status last_sql_error. + * @member {string} last_sql_error + * @memberof replicationdata.Status + * @instance + */ + Status.prototype.last_sql_error = ""; /** - * Properties of an ExecuteVtctlCommandRequest. - * @memberof vtctldata - * @interface IExecuteVtctlCommandRequest - * @property {Array.|null} [args] ExecuteVtctlCommandRequest args - * @property {number|Long|null} [action_timeout] ExecuteVtctlCommandRequest action_timeout + * Status relay_log_file_position. + * @member {string} relay_log_file_position + * @memberof replicationdata.Status + * @instance */ + Status.prototype.relay_log_file_position = ""; /** - * Constructs a new ExecuteVtctlCommandRequest. - * @memberof vtctldata - * @classdesc Represents an ExecuteVtctlCommandRequest. - * @implements IExecuteVtctlCommandRequest - * @constructor - * @param {vtctldata.IExecuteVtctlCommandRequest=} [properties] Properties to set + * Status source_user. + * @member {string} source_user + * @memberof replicationdata.Status + * @instance */ - function ExecuteVtctlCommandRequest(properties) { - this.args = []; - if (properties) - for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; - } + Status.prototype.source_user = ""; /** - * ExecuteVtctlCommandRequest args. - * @member {Array.} args - * @memberof vtctldata.ExecuteVtctlCommandRequest + * Status sql_delay. + * @member {number} sql_delay + * @memberof replicationdata.Status * @instance */ - ExecuteVtctlCommandRequest.prototype.args = $util.emptyArray; + Status.prototype.sql_delay = 0; /** - * ExecuteVtctlCommandRequest action_timeout. - * @member {number|Long} action_timeout - * @memberof vtctldata.ExecuteVtctlCommandRequest + * Status auto_position. + * @member {boolean} auto_position + * @memberof replicationdata.Status * @instance */ - ExecuteVtctlCommandRequest.prototype.action_timeout = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + Status.prototype.auto_position = false; /** - * Creates a new ExecuteVtctlCommandRequest instance using the specified properties. - * @function create - * @memberof vtctldata.ExecuteVtctlCommandRequest - * @static - * @param {vtctldata.IExecuteVtctlCommandRequest=} [properties] Properties to set - * @returns {vtctldata.ExecuteVtctlCommandRequest} ExecuteVtctlCommandRequest instance + * Status using_gtid. + * @member {boolean} using_gtid + * @memberof replicationdata.Status + * @instance */ - ExecuteVtctlCommandRequest.create = function create(properties) { - return new ExecuteVtctlCommandRequest(properties); - }; + Status.prototype.using_gtid = false; /** - * Encodes the specified ExecuteVtctlCommandRequest message. Does not implicitly {@link vtctldata.ExecuteVtctlCommandRequest.verify|verify} messages. - * @function encode - * @memberof vtctldata.ExecuteVtctlCommandRequest + * Status has_replication_filters. + * @member {boolean} has_replication_filters + * @memberof replicationdata.Status + * @instance + */ + Status.prototype.has_replication_filters = false; + + /** + * Status ssl_allowed. + * @member {boolean} ssl_allowed + * @memberof replicationdata.Status + * @instance + */ + Status.prototype.ssl_allowed = false; + + /** + * Status replication_lag_unknown. + * @member {boolean} replication_lag_unknown + * @memberof replicationdata.Status + * @instance + */ + Status.prototype.replication_lag_unknown = false; + + /** + * Creates a new Status instance using the specified properties. + * @function create + * @memberof replicationdata.Status * @static - * @param {vtctldata.IExecuteVtctlCommandRequest} message ExecuteVtctlCommandRequest message or plain object to encode + * @param {replicationdata.IStatus=} [properties] Properties to set + * @returns {replicationdata.Status} Status instance + */ + Status.create = function create(properties) { + return new Status(properties); + }; + + /** + * Encodes the specified Status message. Does not implicitly {@link replicationdata.Status.verify|verify} messages. + * @function encode + * @memberof replicationdata.Status + * @static + * @param {replicationdata.IStatus} message Status message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ExecuteVtctlCommandRequest.encode = function encode(message, writer) { + Status.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.args != null && message.args.length) - for (let i = 0; i < message.args.length; ++i) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.args[i]); - if (message.action_timeout != null && Object.hasOwnProperty.call(message, "action_timeout")) - writer.uint32(/* id 2, wireType 0 =*/16).int64(message.action_timeout); + if (message.position != null && Object.hasOwnProperty.call(message, "position")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.position); + if (message.replication_lag_seconds != null && Object.hasOwnProperty.call(message, "replication_lag_seconds")) + writer.uint32(/* id 4, wireType 0 =*/32).uint32(message.replication_lag_seconds); + if (message.source_host != null && Object.hasOwnProperty.call(message, "source_host")) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.source_host); + if (message.source_port != null && Object.hasOwnProperty.call(message, "source_port")) + writer.uint32(/* id 6, wireType 0 =*/48).int32(message.source_port); + if (message.connect_retry != null && Object.hasOwnProperty.call(message, "connect_retry")) + writer.uint32(/* id 7, wireType 0 =*/56).int32(message.connect_retry); + if (message.relay_log_position != null && Object.hasOwnProperty.call(message, "relay_log_position")) + writer.uint32(/* id 8, wireType 2 =*/66).string(message.relay_log_position); + if (message.file_position != null && Object.hasOwnProperty.call(message, "file_position")) + writer.uint32(/* id 9, wireType 2 =*/74).string(message.file_position); + if (message.relay_log_source_binlog_equivalent_position != null && Object.hasOwnProperty.call(message, "relay_log_source_binlog_equivalent_position")) + writer.uint32(/* id 10, wireType 2 =*/82).string(message.relay_log_source_binlog_equivalent_position); + if (message.source_server_id != null && Object.hasOwnProperty.call(message, "source_server_id")) + writer.uint32(/* id 11, wireType 0 =*/88).uint32(message.source_server_id); + if (message.source_uuid != null && Object.hasOwnProperty.call(message, "source_uuid")) + writer.uint32(/* id 12, wireType 2 =*/98).string(message.source_uuid); + if (message.io_state != null && Object.hasOwnProperty.call(message, "io_state")) + writer.uint32(/* id 13, wireType 0 =*/104).int32(message.io_state); + if (message.last_io_error != null && Object.hasOwnProperty.call(message, "last_io_error")) + writer.uint32(/* id 14, wireType 2 =*/114).string(message.last_io_error); + if (message.sql_state != null && Object.hasOwnProperty.call(message, "sql_state")) + writer.uint32(/* id 15, wireType 0 =*/120).int32(message.sql_state); + if (message.last_sql_error != null && Object.hasOwnProperty.call(message, "last_sql_error")) + writer.uint32(/* id 16, wireType 2 =*/130).string(message.last_sql_error); + if (message.relay_log_file_position != null && Object.hasOwnProperty.call(message, "relay_log_file_position")) + writer.uint32(/* id 17, wireType 2 =*/138).string(message.relay_log_file_position); + if (message.source_user != null && Object.hasOwnProperty.call(message, "source_user")) + writer.uint32(/* id 18, wireType 2 =*/146).string(message.source_user); + if (message.sql_delay != null && Object.hasOwnProperty.call(message, "sql_delay")) + writer.uint32(/* id 19, wireType 0 =*/152).uint32(message.sql_delay); + if (message.auto_position != null && Object.hasOwnProperty.call(message, "auto_position")) + writer.uint32(/* id 20, wireType 0 =*/160).bool(message.auto_position); + if (message.using_gtid != null && Object.hasOwnProperty.call(message, "using_gtid")) + writer.uint32(/* id 21, wireType 0 =*/168).bool(message.using_gtid); + if (message.has_replication_filters != null && Object.hasOwnProperty.call(message, "has_replication_filters")) + writer.uint32(/* id 22, wireType 0 =*/176).bool(message.has_replication_filters); + if (message.ssl_allowed != null && Object.hasOwnProperty.call(message, "ssl_allowed")) + writer.uint32(/* id 23, wireType 0 =*/184).bool(message.ssl_allowed); + if (message.replication_lag_unknown != null && Object.hasOwnProperty.call(message, "replication_lag_unknown")) + writer.uint32(/* id 24, wireType 0 =*/192).bool(message.replication_lag_unknown); return writer; }; /** - * Encodes the specified ExecuteVtctlCommandRequest message, length delimited. Does not implicitly {@link vtctldata.ExecuteVtctlCommandRequest.verify|verify} messages. + * Encodes the specified Status message, length delimited. Does not implicitly {@link replicationdata.Status.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ExecuteVtctlCommandRequest + * @memberof replicationdata.Status * @static - * @param {vtctldata.IExecuteVtctlCommandRequest} message ExecuteVtctlCommandRequest message or plain object to encode + * @param {replicationdata.IStatus} message Status message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ExecuteVtctlCommandRequest.encodeDelimited = function encodeDelimited(message, writer) { + Status.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an ExecuteVtctlCommandRequest message from the specified reader or buffer. + * Decodes a Status message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ExecuteVtctlCommandRequest + * @memberof replicationdata.Status * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ExecuteVtctlCommandRequest} ExecuteVtctlCommandRequest + * @returns {replicationdata.Status} Status * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ExecuteVtctlCommandRequest.decode = function decode(reader, length) { + Status.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ExecuteVtctlCommandRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.replicationdata.Status(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (!(message.args && message.args.length)) - message.args = []; - message.args.push(reader.string()); + message.position = reader.string(); break; } - case 2: { - message.action_timeout = reader.int64(); + case 4: { + message.replication_lag_seconds = reader.uint32(); + break; + } + case 5: { + message.source_host = reader.string(); + break; + } + case 6: { + message.source_port = reader.int32(); + break; + } + case 7: { + message.connect_retry = reader.int32(); + break; + } + case 8: { + message.relay_log_position = reader.string(); + break; + } + case 9: { + message.file_position = reader.string(); + break; + } + case 10: { + message.relay_log_source_binlog_equivalent_position = reader.string(); + break; + } + case 11: { + message.source_server_id = reader.uint32(); + break; + } + case 12: { + message.source_uuid = reader.string(); + break; + } + case 13: { + message.io_state = reader.int32(); + break; + } + case 14: { + message.last_io_error = reader.string(); + break; + } + case 15: { + message.sql_state = reader.int32(); + break; + } + case 16: { + message.last_sql_error = reader.string(); + break; + } + case 17: { + message.relay_log_file_position = reader.string(); + break; + } + case 18: { + message.source_user = reader.string(); + break; + } + case 19: { + message.sql_delay = reader.uint32(); + break; + } + case 20: { + message.auto_position = reader.bool(); + break; + } + case 21: { + message.using_gtid = reader.bool(); + break; + } + case 22: { + message.has_replication_filters = reader.bool(); + break; + } + case 23: { + message.ssl_allowed = reader.bool(); + break; + } + case 24: { + message.replication_lag_unknown = reader.bool(); break; } default: @@ -97295,157 +98358,292 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes an ExecuteVtctlCommandRequest message from the specified reader or buffer, length delimited. + * Decodes a Status message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ExecuteVtctlCommandRequest + * @memberof replicationdata.Status * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ExecuteVtctlCommandRequest} ExecuteVtctlCommandRequest + * @returns {replicationdata.Status} Status * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ExecuteVtctlCommandRequest.decodeDelimited = function decodeDelimited(reader) { + Status.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an ExecuteVtctlCommandRequest message. + * Verifies a Status message. * @function verify - * @memberof vtctldata.ExecuteVtctlCommandRequest + * @memberof replicationdata.Status * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ExecuteVtctlCommandRequest.verify = function verify(message) { + Status.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.args != null && message.hasOwnProperty("args")) { - if (!Array.isArray(message.args)) - return "args: array expected"; - for (let i = 0; i < message.args.length; ++i) - if (!$util.isString(message.args[i])) - return "args: string[] expected"; - } - if (message.action_timeout != null && message.hasOwnProperty("action_timeout")) - if (!$util.isInteger(message.action_timeout) && !(message.action_timeout && $util.isInteger(message.action_timeout.low) && $util.isInteger(message.action_timeout.high))) - return "action_timeout: integer|Long expected"; + if (message.position != null && message.hasOwnProperty("position")) + if (!$util.isString(message.position)) + return "position: string expected"; + if (message.replication_lag_seconds != null && message.hasOwnProperty("replication_lag_seconds")) + if (!$util.isInteger(message.replication_lag_seconds)) + return "replication_lag_seconds: integer expected"; + if (message.source_host != null && message.hasOwnProperty("source_host")) + if (!$util.isString(message.source_host)) + return "source_host: string expected"; + if (message.source_port != null && message.hasOwnProperty("source_port")) + if (!$util.isInteger(message.source_port)) + return "source_port: integer expected"; + if (message.connect_retry != null && message.hasOwnProperty("connect_retry")) + if (!$util.isInteger(message.connect_retry)) + return "connect_retry: integer expected"; + if (message.relay_log_position != null && message.hasOwnProperty("relay_log_position")) + if (!$util.isString(message.relay_log_position)) + return "relay_log_position: string expected"; + if (message.file_position != null && message.hasOwnProperty("file_position")) + if (!$util.isString(message.file_position)) + return "file_position: string expected"; + if (message.relay_log_source_binlog_equivalent_position != null && message.hasOwnProperty("relay_log_source_binlog_equivalent_position")) + if (!$util.isString(message.relay_log_source_binlog_equivalent_position)) + return "relay_log_source_binlog_equivalent_position: string expected"; + if (message.source_server_id != null && message.hasOwnProperty("source_server_id")) + if (!$util.isInteger(message.source_server_id)) + return "source_server_id: integer expected"; + if (message.source_uuid != null && message.hasOwnProperty("source_uuid")) + if (!$util.isString(message.source_uuid)) + return "source_uuid: string expected"; + if (message.io_state != null && message.hasOwnProperty("io_state")) + if (!$util.isInteger(message.io_state)) + return "io_state: integer expected"; + if (message.last_io_error != null && message.hasOwnProperty("last_io_error")) + if (!$util.isString(message.last_io_error)) + return "last_io_error: string expected"; + if (message.sql_state != null && message.hasOwnProperty("sql_state")) + if (!$util.isInteger(message.sql_state)) + return "sql_state: integer expected"; + if (message.last_sql_error != null && message.hasOwnProperty("last_sql_error")) + if (!$util.isString(message.last_sql_error)) + return "last_sql_error: string expected"; + if (message.relay_log_file_position != null && message.hasOwnProperty("relay_log_file_position")) + if (!$util.isString(message.relay_log_file_position)) + return "relay_log_file_position: string expected"; + if (message.source_user != null && message.hasOwnProperty("source_user")) + if (!$util.isString(message.source_user)) + return "source_user: string expected"; + if (message.sql_delay != null && message.hasOwnProperty("sql_delay")) + if (!$util.isInteger(message.sql_delay)) + return "sql_delay: integer expected"; + if (message.auto_position != null && message.hasOwnProperty("auto_position")) + if (typeof message.auto_position !== "boolean") + return "auto_position: boolean expected"; + if (message.using_gtid != null && message.hasOwnProperty("using_gtid")) + if (typeof message.using_gtid !== "boolean") + return "using_gtid: boolean expected"; + if (message.has_replication_filters != null && message.hasOwnProperty("has_replication_filters")) + if (typeof message.has_replication_filters !== "boolean") + return "has_replication_filters: boolean expected"; + if (message.ssl_allowed != null && message.hasOwnProperty("ssl_allowed")) + if (typeof message.ssl_allowed !== "boolean") + return "ssl_allowed: boolean expected"; + if (message.replication_lag_unknown != null && message.hasOwnProperty("replication_lag_unknown")) + if (typeof message.replication_lag_unknown !== "boolean") + return "replication_lag_unknown: boolean expected"; return null; }; /** - * Creates an ExecuteVtctlCommandRequest message from a plain object. Also converts values to their respective internal types. + * Creates a Status message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ExecuteVtctlCommandRequest + * @memberof replicationdata.Status * @static * @param {Object.} object Plain object - * @returns {vtctldata.ExecuteVtctlCommandRequest} ExecuteVtctlCommandRequest + * @returns {replicationdata.Status} Status */ - ExecuteVtctlCommandRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ExecuteVtctlCommandRequest) + Status.fromObject = function fromObject(object) { + if (object instanceof $root.replicationdata.Status) return object; - let message = new $root.vtctldata.ExecuteVtctlCommandRequest(); - if (object.args) { - if (!Array.isArray(object.args)) - throw TypeError(".vtctldata.ExecuteVtctlCommandRequest.args: array expected"); - message.args = []; - for (let i = 0; i < object.args.length; ++i) - message.args[i] = String(object.args[i]); - } - if (object.action_timeout != null) - if ($util.Long) - (message.action_timeout = $util.Long.fromValue(object.action_timeout)).unsigned = false; - else if (typeof object.action_timeout === "string") - message.action_timeout = parseInt(object.action_timeout, 10); - else if (typeof object.action_timeout === "number") - message.action_timeout = object.action_timeout; - else if (typeof object.action_timeout === "object") - message.action_timeout = new $util.LongBits(object.action_timeout.low >>> 0, object.action_timeout.high >>> 0).toNumber(); + let message = new $root.replicationdata.Status(); + if (object.position != null) + message.position = String(object.position); + if (object.replication_lag_seconds != null) + message.replication_lag_seconds = object.replication_lag_seconds >>> 0; + if (object.source_host != null) + message.source_host = String(object.source_host); + if (object.source_port != null) + message.source_port = object.source_port | 0; + if (object.connect_retry != null) + message.connect_retry = object.connect_retry | 0; + if (object.relay_log_position != null) + message.relay_log_position = String(object.relay_log_position); + if (object.file_position != null) + message.file_position = String(object.file_position); + if (object.relay_log_source_binlog_equivalent_position != null) + message.relay_log_source_binlog_equivalent_position = String(object.relay_log_source_binlog_equivalent_position); + if (object.source_server_id != null) + message.source_server_id = object.source_server_id >>> 0; + if (object.source_uuid != null) + message.source_uuid = String(object.source_uuid); + if (object.io_state != null) + message.io_state = object.io_state | 0; + if (object.last_io_error != null) + message.last_io_error = String(object.last_io_error); + if (object.sql_state != null) + message.sql_state = object.sql_state | 0; + if (object.last_sql_error != null) + message.last_sql_error = String(object.last_sql_error); + if (object.relay_log_file_position != null) + message.relay_log_file_position = String(object.relay_log_file_position); + if (object.source_user != null) + message.source_user = String(object.source_user); + if (object.sql_delay != null) + message.sql_delay = object.sql_delay >>> 0; + if (object.auto_position != null) + message.auto_position = Boolean(object.auto_position); + if (object.using_gtid != null) + message.using_gtid = Boolean(object.using_gtid); + if (object.has_replication_filters != null) + message.has_replication_filters = Boolean(object.has_replication_filters); + if (object.ssl_allowed != null) + message.ssl_allowed = Boolean(object.ssl_allowed); + if (object.replication_lag_unknown != null) + message.replication_lag_unknown = Boolean(object.replication_lag_unknown); return message; }; /** - * Creates a plain object from an ExecuteVtctlCommandRequest message. Also converts values to other types if specified. + * Creates a plain object from a Status message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ExecuteVtctlCommandRequest + * @memberof replicationdata.Status * @static - * @param {vtctldata.ExecuteVtctlCommandRequest} message ExecuteVtctlCommandRequest + * @param {replicationdata.Status} message Status * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ExecuteVtctlCommandRequest.toObject = function toObject(message, options) { + Status.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.args = []; - if (options.defaults) - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.action_timeout = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.action_timeout = options.longs === String ? "0" : 0; - if (message.args && message.args.length) { - object.args = []; - for (let j = 0; j < message.args.length; ++j) - object.args[j] = message.args[j]; + if (options.defaults) { + object.position = ""; + object.replication_lag_seconds = 0; + object.source_host = ""; + object.source_port = 0; + object.connect_retry = 0; + object.relay_log_position = ""; + object.file_position = ""; + object.relay_log_source_binlog_equivalent_position = ""; + object.source_server_id = 0; + object.source_uuid = ""; + object.io_state = 0; + object.last_io_error = ""; + object.sql_state = 0; + object.last_sql_error = ""; + object.relay_log_file_position = ""; + object.source_user = ""; + object.sql_delay = 0; + object.auto_position = false; + object.using_gtid = false; + object.has_replication_filters = false; + object.ssl_allowed = false; + object.replication_lag_unknown = false; } - if (message.action_timeout != null && message.hasOwnProperty("action_timeout")) - if (typeof message.action_timeout === "number") - object.action_timeout = options.longs === String ? String(message.action_timeout) : message.action_timeout; - else - object.action_timeout = options.longs === String ? $util.Long.prototype.toString.call(message.action_timeout) : options.longs === Number ? new $util.LongBits(message.action_timeout.low >>> 0, message.action_timeout.high >>> 0).toNumber() : message.action_timeout; + if (message.position != null && message.hasOwnProperty("position")) + object.position = message.position; + if (message.replication_lag_seconds != null && message.hasOwnProperty("replication_lag_seconds")) + object.replication_lag_seconds = message.replication_lag_seconds; + if (message.source_host != null && message.hasOwnProperty("source_host")) + object.source_host = message.source_host; + if (message.source_port != null && message.hasOwnProperty("source_port")) + object.source_port = message.source_port; + if (message.connect_retry != null && message.hasOwnProperty("connect_retry")) + object.connect_retry = message.connect_retry; + if (message.relay_log_position != null && message.hasOwnProperty("relay_log_position")) + object.relay_log_position = message.relay_log_position; + if (message.file_position != null && message.hasOwnProperty("file_position")) + object.file_position = message.file_position; + if (message.relay_log_source_binlog_equivalent_position != null && message.hasOwnProperty("relay_log_source_binlog_equivalent_position")) + object.relay_log_source_binlog_equivalent_position = message.relay_log_source_binlog_equivalent_position; + if (message.source_server_id != null && message.hasOwnProperty("source_server_id")) + object.source_server_id = message.source_server_id; + if (message.source_uuid != null && message.hasOwnProperty("source_uuid")) + object.source_uuid = message.source_uuid; + if (message.io_state != null && message.hasOwnProperty("io_state")) + object.io_state = message.io_state; + if (message.last_io_error != null && message.hasOwnProperty("last_io_error")) + object.last_io_error = message.last_io_error; + if (message.sql_state != null && message.hasOwnProperty("sql_state")) + object.sql_state = message.sql_state; + if (message.last_sql_error != null && message.hasOwnProperty("last_sql_error")) + object.last_sql_error = message.last_sql_error; + if (message.relay_log_file_position != null && message.hasOwnProperty("relay_log_file_position")) + object.relay_log_file_position = message.relay_log_file_position; + if (message.source_user != null && message.hasOwnProperty("source_user")) + object.source_user = message.source_user; + if (message.sql_delay != null && message.hasOwnProperty("sql_delay")) + object.sql_delay = message.sql_delay; + if (message.auto_position != null && message.hasOwnProperty("auto_position")) + object.auto_position = message.auto_position; + if (message.using_gtid != null && message.hasOwnProperty("using_gtid")) + object.using_gtid = message.using_gtid; + if (message.has_replication_filters != null && message.hasOwnProperty("has_replication_filters")) + object.has_replication_filters = message.has_replication_filters; + if (message.ssl_allowed != null && message.hasOwnProperty("ssl_allowed")) + object.ssl_allowed = message.ssl_allowed; + if (message.replication_lag_unknown != null && message.hasOwnProperty("replication_lag_unknown")) + object.replication_lag_unknown = message.replication_lag_unknown; return object; }; /** - * Converts this ExecuteVtctlCommandRequest to JSON. + * Converts this Status to JSON. * @function toJSON - * @memberof vtctldata.ExecuteVtctlCommandRequest + * @memberof replicationdata.Status * @instance * @returns {Object.} JSON object */ - ExecuteVtctlCommandRequest.prototype.toJSON = function toJSON() { + Status.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ExecuteVtctlCommandRequest + * Gets the default type url for Status * @function getTypeUrl - * @memberof vtctldata.ExecuteVtctlCommandRequest + * @memberof replicationdata.Status * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ExecuteVtctlCommandRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + Status.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ExecuteVtctlCommandRequest"; + return typeUrlPrefix + "/replicationdata.Status"; }; - return ExecuteVtctlCommandRequest; + return Status; })(); - vtctldata.ExecuteVtctlCommandResponse = (function() { + replicationdata.StopReplicationStatus = (function() { /** - * Properties of an ExecuteVtctlCommandResponse. - * @memberof vtctldata - * @interface IExecuteVtctlCommandResponse - * @property {logutil.IEvent|null} [event] ExecuteVtctlCommandResponse event + * Properties of a StopReplicationStatus. + * @memberof replicationdata + * @interface IStopReplicationStatus + * @property {replicationdata.IStatus|null} [before] StopReplicationStatus before + * @property {replicationdata.IStatus|null} [after] StopReplicationStatus after */ /** - * Constructs a new ExecuteVtctlCommandResponse. - * @memberof vtctldata - * @classdesc Represents an ExecuteVtctlCommandResponse. - * @implements IExecuteVtctlCommandResponse + * Constructs a new StopReplicationStatus. + * @memberof replicationdata + * @classdesc Represents a StopReplicationStatus. + * @implements IStopReplicationStatus * @constructor - * @param {vtctldata.IExecuteVtctlCommandResponse=} [properties] Properties to set + * @param {replicationdata.IStopReplicationStatus=} [properties] Properties to set */ - function ExecuteVtctlCommandResponse(properties) { + function StopReplicationStatus(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -97453,75 +98651,89 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ExecuteVtctlCommandResponse event. - * @member {logutil.IEvent|null|undefined} event - * @memberof vtctldata.ExecuteVtctlCommandResponse + * StopReplicationStatus before. + * @member {replicationdata.IStatus|null|undefined} before + * @memberof replicationdata.StopReplicationStatus * @instance */ - ExecuteVtctlCommandResponse.prototype.event = null; + StopReplicationStatus.prototype.before = null; /** - * Creates a new ExecuteVtctlCommandResponse instance using the specified properties. + * StopReplicationStatus after. + * @member {replicationdata.IStatus|null|undefined} after + * @memberof replicationdata.StopReplicationStatus + * @instance + */ + StopReplicationStatus.prototype.after = null; + + /** + * Creates a new StopReplicationStatus instance using the specified properties. * @function create - * @memberof vtctldata.ExecuteVtctlCommandResponse + * @memberof replicationdata.StopReplicationStatus * @static - * @param {vtctldata.IExecuteVtctlCommandResponse=} [properties] Properties to set - * @returns {vtctldata.ExecuteVtctlCommandResponse} ExecuteVtctlCommandResponse instance + * @param {replicationdata.IStopReplicationStatus=} [properties] Properties to set + * @returns {replicationdata.StopReplicationStatus} StopReplicationStatus instance */ - ExecuteVtctlCommandResponse.create = function create(properties) { - return new ExecuteVtctlCommandResponse(properties); + StopReplicationStatus.create = function create(properties) { + return new StopReplicationStatus(properties); }; /** - * Encodes the specified ExecuteVtctlCommandResponse message. Does not implicitly {@link vtctldata.ExecuteVtctlCommandResponse.verify|verify} messages. + * Encodes the specified StopReplicationStatus message. Does not implicitly {@link replicationdata.StopReplicationStatus.verify|verify} messages. * @function encode - * @memberof vtctldata.ExecuteVtctlCommandResponse + * @memberof replicationdata.StopReplicationStatus * @static - * @param {vtctldata.IExecuteVtctlCommandResponse} message ExecuteVtctlCommandResponse message or plain object to encode + * @param {replicationdata.IStopReplicationStatus} message StopReplicationStatus message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ExecuteVtctlCommandResponse.encode = function encode(message, writer) { + StopReplicationStatus.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.event != null && Object.hasOwnProperty.call(message, "event")) - $root.logutil.Event.encode(message.event, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.before != null && Object.hasOwnProperty.call(message, "before")) + $root.replicationdata.Status.encode(message.before, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.after != null && Object.hasOwnProperty.call(message, "after")) + $root.replicationdata.Status.encode(message.after, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); return writer; }; /** - * Encodes the specified ExecuteVtctlCommandResponse message, length delimited. Does not implicitly {@link vtctldata.ExecuteVtctlCommandResponse.verify|verify} messages. + * Encodes the specified StopReplicationStatus message, length delimited. Does not implicitly {@link replicationdata.StopReplicationStatus.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ExecuteVtctlCommandResponse + * @memberof replicationdata.StopReplicationStatus * @static - * @param {vtctldata.IExecuteVtctlCommandResponse} message ExecuteVtctlCommandResponse message or plain object to encode + * @param {replicationdata.IStopReplicationStatus} message StopReplicationStatus message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ExecuteVtctlCommandResponse.encodeDelimited = function encodeDelimited(message, writer) { + StopReplicationStatus.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an ExecuteVtctlCommandResponse message from the specified reader or buffer. + * Decodes a StopReplicationStatus message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ExecuteVtctlCommandResponse + * @memberof replicationdata.StopReplicationStatus * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ExecuteVtctlCommandResponse} ExecuteVtctlCommandResponse + * @returns {replicationdata.StopReplicationStatus} StopReplicationStatus * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ExecuteVtctlCommandResponse.decode = function decode(reader, length) { + StopReplicationStatus.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ExecuteVtctlCommandResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.replicationdata.StopReplicationStatus(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.event = $root.logutil.Event.decode(reader, reader.uint32()); + message.before = $root.replicationdata.Status.decode(reader, reader.uint32()); + break; + } + case 2: { + message.after = $root.replicationdata.Status.decode(reader, reader.uint32()); break; } default: @@ -97533,145 +98745,156 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes an ExecuteVtctlCommandResponse message from the specified reader or buffer, length delimited. + * Decodes a StopReplicationStatus message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ExecuteVtctlCommandResponse + * @memberof replicationdata.StopReplicationStatus * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ExecuteVtctlCommandResponse} ExecuteVtctlCommandResponse + * @returns {replicationdata.StopReplicationStatus} StopReplicationStatus * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ExecuteVtctlCommandResponse.decodeDelimited = function decodeDelimited(reader) { + StopReplicationStatus.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an ExecuteVtctlCommandResponse message. + * Verifies a StopReplicationStatus message. * @function verify - * @memberof vtctldata.ExecuteVtctlCommandResponse + * @memberof replicationdata.StopReplicationStatus * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ExecuteVtctlCommandResponse.verify = function verify(message) { + StopReplicationStatus.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.event != null && message.hasOwnProperty("event")) { - let error = $root.logutil.Event.verify(message.event); + if (message.before != null && message.hasOwnProperty("before")) { + let error = $root.replicationdata.Status.verify(message.before); if (error) - return "event." + error; + return "before." + error; + } + if (message.after != null && message.hasOwnProperty("after")) { + let error = $root.replicationdata.Status.verify(message.after); + if (error) + return "after." + error; } return null; }; /** - * Creates an ExecuteVtctlCommandResponse message from a plain object. Also converts values to their respective internal types. + * Creates a StopReplicationStatus message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ExecuteVtctlCommandResponse + * @memberof replicationdata.StopReplicationStatus * @static * @param {Object.} object Plain object - * @returns {vtctldata.ExecuteVtctlCommandResponse} ExecuteVtctlCommandResponse + * @returns {replicationdata.StopReplicationStatus} StopReplicationStatus */ - ExecuteVtctlCommandResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ExecuteVtctlCommandResponse) + StopReplicationStatus.fromObject = function fromObject(object) { + if (object instanceof $root.replicationdata.StopReplicationStatus) return object; - let message = new $root.vtctldata.ExecuteVtctlCommandResponse(); - if (object.event != null) { - if (typeof object.event !== "object") - throw TypeError(".vtctldata.ExecuteVtctlCommandResponse.event: object expected"); - message.event = $root.logutil.Event.fromObject(object.event); + let message = new $root.replicationdata.StopReplicationStatus(); + if (object.before != null) { + if (typeof object.before !== "object") + throw TypeError(".replicationdata.StopReplicationStatus.before: object expected"); + message.before = $root.replicationdata.Status.fromObject(object.before); + } + if (object.after != null) { + if (typeof object.after !== "object") + throw TypeError(".replicationdata.StopReplicationStatus.after: object expected"); + message.after = $root.replicationdata.Status.fromObject(object.after); } return message; }; /** - * Creates a plain object from an ExecuteVtctlCommandResponse message. Also converts values to other types if specified. + * Creates a plain object from a StopReplicationStatus message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ExecuteVtctlCommandResponse + * @memberof replicationdata.StopReplicationStatus * @static - * @param {vtctldata.ExecuteVtctlCommandResponse} message ExecuteVtctlCommandResponse + * @param {replicationdata.StopReplicationStatus} message StopReplicationStatus * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ExecuteVtctlCommandResponse.toObject = function toObject(message, options) { + StopReplicationStatus.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) - object.event = null; - if (message.event != null && message.hasOwnProperty("event")) - object.event = $root.logutil.Event.toObject(message.event, options); + if (options.defaults) { + object.before = null; + object.after = null; + } + if (message.before != null && message.hasOwnProperty("before")) + object.before = $root.replicationdata.Status.toObject(message.before, options); + if (message.after != null && message.hasOwnProperty("after")) + object.after = $root.replicationdata.Status.toObject(message.after, options); return object; }; /** - * Converts this ExecuteVtctlCommandResponse to JSON. + * Converts this StopReplicationStatus to JSON. * @function toJSON - * @memberof vtctldata.ExecuteVtctlCommandResponse + * @memberof replicationdata.StopReplicationStatus * @instance * @returns {Object.} JSON object */ - ExecuteVtctlCommandResponse.prototype.toJSON = function toJSON() { + StopReplicationStatus.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ExecuteVtctlCommandResponse + * Gets the default type url for StopReplicationStatus * @function getTypeUrl - * @memberof vtctldata.ExecuteVtctlCommandResponse + * @memberof replicationdata.StopReplicationStatus * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ExecuteVtctlCommandResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + StopReplicationStatus.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ExecuteVtctlCommandResponse"; + return typeUrlPrefix + "/replicationdata.StopReplicationStatus"; }; - return ExecuteVtctlCommandResponse; + return StopReplicationStatus; })(); /** - * MaterializationIntent enum. - * @name vtctldata.MaterializationIntent + * StopReplicationMode enum. + * @name replicationdata.StopReplicationMode * @enum {number} - * @property {number} CUSTOM=0 CUSTOM value - * @property {number} MOVETABLES=1 MOVETABLES value - * @property {number} CREATELOOKUPINDEX=2 CREATELOOKUPINDEX value + * @property {number} IOANDSQLTHREAD=0 IOANDSQLTHREAD value + * @property {number} IOTHREADONLY=1 IOTHREADONLY value */ - vtctldata.MaterializationIntent = (function() { + replicationdata.StopReplicationMode = (function() { const valuesById = {}, values = Object.create(valuesById); - values[valuesById[0] = "CUSTOM"] = 0; - values[valuesById[1] = "MOVETABLES"] = 1; - values[valuesById[2] = "CREATELOOKUPINDEX"] = 2; + values[valuesById[0] = "IOANDSQLTHREAD"] = 0; + values[valuesById[1] = "IOTHREADONLY"] = 1; return values; })(); - vtctldata.TableMaterializeSettings = (function() { + replicationdata.PrimaryStatus = (function() { /** - * Properties of a TableMaterializeSettings. - * @memberof vtctldata - * @interface ITableMaterializeSettings - * @property {string|null} [target_table] TableMaterializeSettings target_table - * @property {string|null} [source_expression] TableMaterializeSettings source_expression - * @property {string|null} [create_ddl] TableMaterializeSettings create_ddl + * Properties of a PrimaryStatus. + * @memberof replicationdata + * @interface IPrimaryStatus + * @property {string|null} [position] PrimaryStatus position + * @property {string|null} [file_position] PrimaryStatus file_position */ /** - * Constructs a new TableMaterializeSettings. - * @memberof vtctldata - * @classdesc Represents a TableMaterializeSettings. - * @implements ITableMaterializeSettings + * Constructs a new PrimaryStatus. + * @memberof replicationdata + * @classdesc Represents a PrimaryStatus. + * @implements IPrimaryStatus * @constructor - * @param {vtctldata.ITableMaterializeSettings=} [properties] Properties to set + * @param {replicationdata.IPrimaryStatus=} [properties] Properties to set */ - function TableMaterializeSettings(properties) { + function PrimaryStatus(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -97679,103 +98902,89 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * TableMaterializeSettings target_table. - * @member {string} target_table - * @memberof vtctldata.TableMaterializeSettings - * @instance - */ - TableMaterializeSettings.prototype.target_table = ""; - - /** - * TableMaterializeSettings source_expression. - * @member {string} source_expression - * @memberof vtctldata.TableMaterializeSettings + * PrimaryStatus position. + * @member {string} position + * @memberof replicationdata.PrimaryStatus * @instance */ - TableMaterializeSettings.prototype.source_expression = ""; + PrimaryStatus.prototype.position = ""; /** - * TableMaterializeSettings create_ddl. - * @member {string} create_ddl - * @memberof vtctldata.TableMaterializeSettings + * PrimaryStatus file_position. + * @member {string} file_position + * @memberof replicationdata.PrimaryStatus * @instance */ - TableMaterializeSettings.prototype.create_ddl = ""; + PrimaryStatus.prototype.file_position = ""; /** - * Creates a new TableMaterializeSettings instance using the specified properties. + * Creates a new PrimaryStatus instance using the specified properties. * @function create - * @memberof vtctldata.TableMaterializeSettings + * @memberof replicationdata.PrimaryStatus * @static - * @param {vtctldata.ITableMaterializeSettings=} [properties] Properties to set - * @returns {vtctldata.TableMaterializeSettings} TableMaterializeSettings instance + * @param {replicationdata.IPrimaryStatus=} [properties] Properties to set + * @returns {replicationdata.PrimaryStatus} PrimaryStatus instance */ - TableMaterializeSettings.create = function create(properties) { - return new TableMaterializeSettings(properties); + PrimaryStatus.create = function create(properties) { + return new PrimaryStatus(properties); }; /** - * Encodes the specified TableMaterializeSettings message. Does not implicitly {@link vtctldata.TableMaterializeSettings.verify|verify} messages. + * Encodes the specified PrimaryStatus message. Does not implicitly {@link replicationdata.PrimaryStatus.verify|verify} messages. * @function encode - * @memberof vtctldata.TableMaterializeSettings + * @memberof replicationdata.PrimaryStatus * @static - * @param {vtctldata.ITableMaterializeSettings} message TableMaterializeSettings message or plain object to encode + * @param {replicationdata.IPrimaryStatus} message PrimaryStatus message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - TableMaterializeSettings.encode = function encode(message, writer) { + PrimaryStatus.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.target_table != null && Object.hasOwnProperty.call(message, "target_table")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.target_table); - if (message.source_expression != null && Object.hasOwnProperty.call(message, "source_expression")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.source_expression); - if (message.create_ddl != null && Object.hasOwnProperty.call(message, "create_ddl")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.create_ddl); + if (message.position != null && Object.hasOwnProperty.call(message, "position")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.position); + if (message.file_position != null && Object.hasOwnProperty.call(message, "file_position")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.file_position); return writer; }; /** - * Encodes the specified TableMaterializeSettings message, length delimited. Does not implicitly {@link vtctldata.TableMaterializeSettings.verify|verify} messages. + * Encodes the specified PrimaryStatus message, length delimited. Does not implicitly {@link replicationdata.PrimaryStatus.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.TableMaterializeSettings + * @memberof replicationdata.PrimaryStatus * @static - * @param {vtctldata.ITableMaterializeSettings} message TableMaterializeSettings message or plain object to encode + * @param {replicationdata.IPrimaryStatus} message PrimaryStatus message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - TableMaterializeSettings.encodeDelimited = function encodeDelimited(message, writer) { + PrimaryStatus.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a TableMaterializeSettings message from the specified reader or buffer. + * Decodes a PrimaryStatus message from the specified reader or buffer. * @function decode - * @memberof vtctldata.TableMaterializeSettings + * @memberof replicationdata.PrimaryStatus * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.TableMaterializeSettings} TableMaterializeSettings + * @returns {replicationdata.PrimaryStatus} PrimaryStatus * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - TableMaterializeSettings.decode = function decode(reader, length) { + PrimaryStatus.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.TableMaterializeSettings(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.replicationdata.PrimaryStatus(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.target_table = reader.string(); + message.position = reader.string(); break; } case 2: { - message.source_expression = reader.string(); - break; - } - case 3: { - message.create_ddl = reader.string(); + message.file_position = reader.string(); break; } default: @@ -97787,154 +98996,151 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a TableMaterializeSettings message from the specified reader or buffer, length delimited. + * Decodes a PrimaryStatus message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.TableMaterializeSettings + * @memberof replicationdata.PrimaryStatus * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.TableMaterializeSettings} TableMaterializeSettings + * @returns {replicationdata.PrimaryStatus} PrimaryStatus * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - TableMaterializeSettings.decodeDelimited = function decodeDelimited(reader) { + PrimaryStatus.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a TableMaterializeSettings message. + * Verifies a PrimaryStatus message. * @function verify - * @memberof vtctldata.TableMaterializeSettings + * @memberof replicationdata.PrimaryStatus * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - TableMaterializeSettings.verify = function verify(message) { + PrimaryStatus.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.target_table != null && message.hasOwnProperty("target_table")) - if (!$util.isString(message.target_table)) - return "target_table: string expected"; - if (message.source_expression != null && message.hasOwnProperty("source_expression")) - if (!$util.isString(message.source_expression)) - return "source_expression: string expected"; - if (message.create_ddl != null && message.hasOwnProperty("create_ddl")) - if (!$util.isString(message.create_ddl)) - return "create_ddl: string expected"; + if (message.position != null && message.hasOwnProperty("position")) + if (!$util.isString(message.position)) + return "position: string expected"; + if (message.file_position != null && message.hasOwnProperty("file_position")) + if (!$util.isString(message.file_position)) + return "file_position: string expected"; return null; }; /** - * Creates a TableMaterializeSettings message from a plain object. Also converts values to their respective internal types. + * Creates a PrimaryStatus message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.TableMaterializeSettings + * @memberof replicationdata.PrimaryStatus * @static * @param {Object.} object Plain object - * @returns {vtctldata.TableMaterializeSettings} TableMaterializeSettings + * @returns {replicationdata.PrimaryStatus} PrimaryStatus */ - TableMaterializeSettings.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.TableMaterializeSettings) + PrimaryStatus.fromObject = function fromObject(object) { + if (object instanceof $root.replicationdata.PrimaryStatus) return object; - let message = new $root.vtctldata.TableMaterializeSettings(); - if (object.target_table != null) - message.target_table = String(object.target_table); - if (object.source_expression != null) - message.source_expression = String(object.source_expression); - if (object.create_ddl != null) - message.create_ddl = String(object.create_ddl); + let message = new $root.replicationdata.PrimaryStatus(); + if (object.position != null) + message.position = String(object.position); + if (object.file_position != null) + message.file_position = String(object.file_position); return message; }; /** - * Creates a plain object from a TableMaterializeSettings message. Also converts values to other types if specified. + * Creates a plain object from a PrimaryStatus message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.TableMaterializeSettings + * @memberof replicationdata.PrimaryStatus * @static - * @param {vtctldata.TableMaterializeSettings} message TableMaterializeSettings + * @param {replicationdata.PrimaryStatus} message PrimaryStatus * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - TableMaterializeSettings.toObject = function toObject(message, options) { + PrimaryStatus.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) { - object.target_table = ""; - object.source_expression = ""; - object.create_ddl = ""; + object.position = ""; + object.file_position = ""; } - if (message.target_table != null && message.hasOwnProperty("target_table")) - object.target_table = message.target_table; - if (message.source_expression != null && message.hasOwnProperty("source_expression")) - object.source_expression = message.source_expression; - if (message.create_ddl != null && message.hasOwnProperty("create_ddl")) - object.create_ddl = message.create_ddl; + if (message.position != null && message.hasOwnProperty("position")) + object.position = message.position; + if (message.file_position != null && message.hasOwnProperty("file_position")) + object.file_position = message.file_position; return object; }; /** - * Converts this TableMaterializeSettings to JSON. + * Converts this PrimaryStatus to JSON. * @function toJSON - * @memberof vtctldata.TableMaterializeSettings + * @memberof replicationdata.PrimaryStatus * @instance * @returns {Object.} JSON object */ - TableMaterializeSettings.prototype.toJSON = function toJSON() { + PrimaryStatus.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for TableMaterializeSettings + * Gets the default type url for PrimaryStatus * @function getTypeUrl - * @memberof vtctldata.TableMaterializeSettings + * @memberof replicationdata.PrimaryStatus * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - TableMaterializeSettings.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + PrimaryStatus.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.TableMaterializeSettings"; + return typeUrlPrefix + "/replicationdata.PrimaryStatus"; }; - return TableMaterializeSettings; + return PrimaryStatus; })(); - vtctldata.MaterializeSettings = (function() { + replicationdata.FullStatus = (function() { /** - * Properties of a MaterializeSettings. - * @memberof vtctldata - * @interface IMaterializeSettings - * @property {string|null} [workflow] MaterializeSettings workflow - * @property {string|null} [source_keyspace] MaterializeSettings source_keyspace - * @property {string|null} [target_keyspace] MaterializeSettings target_keyspace - * @property {boolean|null} [stop_after_copy] MaterializeSettings stop_after_copy - * @property {Array.|null} [table_settings] MaterializeSettings table_settings - * @property {string|null} [cell] MaterializeSettings cell - * @property {string|null} [tablet_types] MaterializeSettings tablet_types - * @property {string|null} [external_cluster] MaterializeSettings external_cluster - * @property {vtctldata.MaterializationIntent|null} [materialization_intent] MaterializeSettings materialization_intent - * @property {string|null} [source_time_zone] MaterializeSettings source_time_zone - * @property {string|null} [target_time_zone] MaterializeSettings target_time_zone - * @property {Array.|null} [source_shards] MaterializeSettings source_shards - * @property {string|null} [on_ddl] MaterializeSettings on_ddl - * @property {boolean|null} [defer_secondary_keys] MaterializeSettings defer_secondary_keys + * Properties of a FullStatus. + * @memberof replicationdata + * @interface IFullStatus + * @property {number|null} [server_id] FullStatus server_id + * @property {string|null} [server_uuid] FullStatus server_uuid + * @property {replicationdata.IStatus|null} [replication_status] FullStatus replication_status + * @property {replicationdata.IPrimaryStatus|null} [primary_status] FullStatus primary_status + * @property {string|null} [gtid_purged] FullStatus gtid_purged + * @property {string|null} [version] FullStatus version + * @property {string|null} [version_comment] FullStatus version_comment + * @property {boolean|null} [read_only] FullStatus read_only + * @property {string|null} [gtid_mode] FullStatus gtid_mode + * @property {string|null} [binlog_format] FullStatus binlog_format + * @property {string|null} [binlog_row_image] FullStatus binlog_row_image + * @property {boolean|null} [log_bin_enabled] FullStatus log_bin_enabled + * @property {boolean|null} [log_replica_updates] FullStatus log_replica_updates + * @property {boolean|null} [semi_sync_primary_enabled] FullStatus semi_sync_primary_enabled + * @property {boolean|null} [semi_sync_replica_enabled] FullStatus semi_sync_replica_enabled + * @property {boolean|null} [semi_sync_primary_status] FullStatus semi_sync_primary_status + * @property {boolean|null} [semi_sync_replica_status] FullStatus semi_sync_replica_status + * @property {number|null} [semi_sync_primary_clients] FullStatus semi_sync_primary_clients + * @property {number|Long|null} [semi_sync_primary_timeout] FullStatus semi_sync_primary_timeout + * @property {number|null} [semi_sync_wait_for_replica_count] FullStatus semi_sync_wait_for_replica_count + * @property {boolean|null} [super_read_only] FullStatus super_read_only */ /** - * Constructs a new MaterializeSettings. - * @memberof vtctldata - * @classdesc Represents a MaterializeSettings. - * @implements IMaterializeSettings + * Constructs a new FullStatus. + * @memberof replicationdata + * @classdesc Represents a FullStatus. + * @implements IFullStatus * @constructor - * @param {vtctldata.IMaterializeSettings=} [properties] Properties to set + * @param {replicationdata.IFullStatus=} [properties] Properties to set */ - function MaterializeSettings(properties) { - this.table_settings = []; - this.source_shards = []; + function FullStatus(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -97942,263 +99148,355 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * MaterializeSettings workflow. - * @member {string} workflow - * @memberof vtctldata.MaterializeSettings + * FullStatus server_id. + * @member {number} server_id + * @memberof replicationdata.FullStatus * @instance */ - MaterializeSettings.prototype.workflow = ""; + FullStatus.prototype.server_id = 0; /** - * MaterializeSettings source_keyspace. - * @member {string} source_keyspace - * @memberof vtctldata.MaterializeSettings + * FullStatus server_uuid. + * @member {string} server_uuid + * @memberof replicationdata.FullStatus * @instance */ - MaterializeSettings.prototype.source_keyspace = ""; + FullStatus.prototype.server_uuid = ""; /** - * MaterializeSettings target_keyspace. - * @member {string} target_keyspace - * @memberof vtctldata.MaterializeSettings + * FullStatus replication_status. + * @member {replicationdata.IStatus|null|undefined} replication_status + * @memberof replicationdata.FullStatus * @instance */ - MaterializeSettings.prototype.target_keyspace = ""; + FullStatus.prototype.replication_status = null; /** - * MaterializeSettings stop_after_copy. - * @member {boolean} stop_after_copy - * @memberof vtctldata.MaterializeSettings + * FullStatus primary_status. + * @member {replicationdata.IPrimaryStatus|null|undefined} primary_status + * @memberof replicationdata.FullStatus * @instance */ - MaterializeSettings.prototype.stop_after_copy = false; + FullStatus.prototype.primary_status = null; /** - * MaterializeSettings table_settings. - * @member {Array.} table_settings - * @memberof vtctldata.MaterializeSettings + * FullStatus gtid_purged. + * @member {string} gtid_purged + * @memberof replicationdata.FullStatus * @instance */ - MaterializeSettings.prototype.table_settings = $util.emptyArray; + FullStatus.prototype.gtid_purged = ""; /** - * MaterializeSettings cell. - * @member {string} cell - * @memberof vtctldata.MaterializeSettings + * FullStatus version. + * @member {string} version + * @memberof replicationdata.FullStatus * @instance */ - MaterializeSettings.prototype.cell = ""; + FullStatus.prototype.version = ""; /** - * MaterializeSettings tablet_types. - * @member {string} tablet_types - * @memberof vtctldata.MaterializeSettings + * FullStatus version_comment. + * @member {string} version_comment + * @memberof replicationdata.FullStatus * @instance */ - MaterializeSettings.prototype.tablet_types = ""; + FullStatus.prototype.version_comment = ""; /** - * MaterializeSettings external_cluster. - * @member {string} external_cluster - * @memberof vtctldata.MaterializeSettings + * FullStatus read_only. + * @member {boolean} read_only + * @memberof replicationdata.FullStatus * @instance */ - MaterializeSettings.prototype.external_cluster = ""; + FullStatus.prototype.read_only = false; /** - * MaterializeSettings materialization_intent. - * @member {vtctldata.MaterializationIntent} materialization_intent - * @memberof vtctldata.MaterializeSettings + * FullStatus gtid_mode. + * @member {string} gtid_mode + * @memberof replicationdata.FullStatus * @instance */ - MaterializeSettings.prototype.materialization_intent = 0; + FullStatus.prototype.gtid_mode = ""; /** - * MaterializeSettings source_time_zone. - * @member {string} source_time_zone - * @memberof vtctldata.MaterializeSettings + * FullStatus binlog_format. + * @member {string} binlog_format + * @memberof replicationdata.FullStatus * @instance */ - MaterializeSettings.prototype.source_time_zone = ""; + FullStatus.prototype.binlog_format = ""; /** - * MaterializeSettings target_time_zone. - * @member {string} target_time_zone - * @memberof vtctldata.MaterializeSettings + * FullStatus binlog_row_image. + * @member {string} binlog_row_image + * @memberof replicationdata.FullStatus * @instance */ - MaterializeSettings.prototype.target_time_zone = ""; + FullStatus.prototype.binlog_row_image = ""; /** - * MaterializeSettings source_shards. - * @member {Array.} source_shards - * @memberof vtctldata.MaterializeSettings + * FullStatus log_bin_enabled. + * @member {boolean} log_bin_enabled + * @memberof replicationdata.FullStatus * @instance */ - MaterializeSettings.prototype.source_shards = $util.emptyArray; + FullStatus.prototype.log_bin_enabled = false; /** - * MaterializeSettings on_ddl. - * @member {string} on_ddl - * @memberof vtctldata.MaterializeSettings + * FullStatus log_replica_updates. + * @member {boolean} log_replica_updates + * @memberof replicationdata.FullStatus * @instance */ - MaterializeSettings.prototype.on_ddl = ""; + FullStatus.prototype.log_replica_updates = false; /** - * MaterializeSettings defer_secondary_keys. - * @member {boolean} defer_secondary_keys - * @memberof vtctldata.MaterializeSettings + * FullStatus semi_sync_primary_enabled. + * @member {boolean} semi_sync_primary_enabled + * @memberof replicationdata.FullStatus * @instance */ - MaterializeSettings.prototype.defer_secondary_keys = false; + FullStatus.prototype.semi_sync_primary_enabled = false; /** - * Creates a new MaterializeSettings instance using the specified properties. + * FullStatus semi_sync_replica_enabled. + * @member {boolean} semi_sync_replica_enabled + * @memberof replicationdata.FullStatus + * @instance + */ + FullStatus.prototype.semi_sync_replica_enabled = false; + + /** + * FullStatus semi_sync_primary_status. + * @member {boolean} semi_sync_primary_status + * @memberof replicationdata.FullStatus + * @instance + */ + FullStatus.prototype.semi_sync_primary_status = false; + + /** + * FullStatus semi_sync_replica_status. + * @member {boolean} semi_sync_replica_status + * @memberof replicationdata.FullStatus + * @instance + */ + FullStatus.prototype.semi_sync_replica_status = false; + + /** + * FullStatus semi_sync_primary_clients. + * @member {number} semi_sync_primary_clients + * @memberof replicationdata.FullStatus + * @instance + */ + FullStatus.prototype.semi_sync_primary_clients = 0; + + /** + * FullStatus semi_sync_primary_timeout. + * @member {number|Long} semi_sync_primary_timeout + * @memberof replicationdata.FullStatus + * @instance + */ + FullStatus.prototype.semi_sync_primary_timeout = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + + /** + * FullStatus semi_sync_wait_for_replica_count. + * @member {number} semi_sync_wait_for_replica_count + * @memberof replicationdata.FullStatus + * @instance + */ + FullStatus.prototype.semi_sync_wait_for_replica_count = 0; + + /** + * FullStatus super_read_only. + * @member {boolean} super_read_only + * @memberof replicationdata.FullStatus + * @instance + */ + FullStatus.prototype.super_read_only = false; + + /** + * Creates a new FullStatus instance using the specified properties. * @function create - * @memberof vtctldata.MaterializeSettings + * @memberof replicationdata.FullStatus * @static - * @param {vtctldata.IMaterializeSettings=} [properties] Properties to set - * @returns {vtctldata.MaterializeSettings} MaterializeSettings instance + * @param {replicationdata.IFullStatus=} [properties] Properties to set + * @returns {replicationdata.FullStatus} FullStatus instance */ - MaterializeSettings.create = function create(properties) { - return new MaterializeSettings(properties); + FullStatus.create = function create(properties) { + return new FullStatus(properties); }; /** - * Encodes the specified MaterializeSettings message. Does not implicitly {@link vtctldata.MaterializeSettings.verify|verify} messages. + * Encodes the specified FullStatus message. Does not implicitly {@link replicationdata.FullStatus.verify|verify} messages. * @function encode - * @memberof vtctldata.MaterializeSettings + * @memberof replicationdata.FullStatus * @static - * @param {vtctldata.IMaterializeSettings} message MaterializeSettings message or plain object to encode + * @param {replicationdata.IFullStatus} message FullStatus message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - MaterializeSettings.encode = function encode(message, writer) { + FullStatus.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.workflow != null && Object.hasOwnProperty.call(message, "workflow")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.workflow); - if (message.source_keyspace != null && Object.hasOwnProperty.call(message, "source_keyspace")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.source_keyspace); - if (message.target_keyspace != null && Object.hasOwnProperty.call(message, "target_keyspace")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.target_keyspace); - if (message.stop_after_copy != null && Object.hasOwnProperty.call(message, "stop_after_copy")) - writer.uint32(/* id 4, wireType 0 =*/32).bool(message.stop_after_copy); - if (message.table_settings != null && message.table_settings.length) - for (let i = 0; i < message.table_settings.length; ++i) - $root.vtctldata.TableMaterializeSettings.encode(message.table_settings[i], writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); - if (message.cell != null && Object.hasOwnProperty.call(message, "cell")) - writer.uint32(/* id 6, wireType 2 =*/50).string(message.cell); - if (message.tablet_types != null && Object.hasOwnProperty.call(message, "tablet_types")) - writer.uint32(/* id 7, wireType 2 =*/58).string(message.tablet_types); - if (message.external_cluster != null && Object.hasOwnProperty.call(message, "external_cluster")) - writer.uint32(/* id 8, wireType 2 =*/66).string(message.external_cluster); - if (message.materialization_intent != null && Object.hasOwnProperty.call(message, "materialization_intent")) - writer.uint32(/* id 9, wireType 0 =*/72).int32(message.materialization_intent); - if (message.source_time_zone != null && Object.hasOwnProperty.call(message, "source_time_zone")) - writer.uint32(/* id 10, wireType 2 =*/82).string(message.source_time_zone); - if (message.target_time_zone != null && Object.hasOwnProperty.call(message, "target_time_zone")) - writer.uint32(/* id 11, wireType 2 =*/90).string(message.target_time_zone); - if (message.source_shards != null && message.source_shards.length) - for (let i = 0; i < message.source_shards.length; ++i) - writer.uint32(/* id 12, wireType 2 =*/98).string(message.source_shards[i]); - if (message.on_ddl != null && Object.hasOwnProperty.call(message, "on_ddl")) - writer.uint32(/* id 13, wireType 2 =*/106).string(message.on_ddl); - if (message.defer_secondary_keys != null && Object.hasOwnProperty.call(message, "defer_secondary_keys")) - writer.uint32(/* id 14, wireType 0 =*/112).bool(message.defer_secondary_keys); + if (message.server_id != null && Object.hasOwnProperty.call(message, "server_id")) + writer.uint32(/* id 1, wireType 0 =*/8).uint32(message.server_id); + if (message.server_uuid != null && Object.hasOwnProperty.call(message, "server_uuid")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.server_uuid); + if (message.replication_status != null && Object.hasOwnProperty.call(message, "replication_status")) + $root.replicationdata.Status.encode(message.replication_status, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.primary_status != null && Object.hasOwnProperty.call(message, "primary_status")) + $root.replicationdata.PrimaryStatus.encode(message.primary_status, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.gtid_purged != null && Object.hasOwnProperty.call(message, "gtid_purged")) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.gtid_purged); + if (message.version != null && Object.hasOwnProperty.call(message, "version")) + writer.uint32(/* id 6, wireType 2 =*/50).string(message.version); + if (message.version_comment != null && Object.hasOwnProperty.call(message, "version_comment")) + writer.uint32(/* id 7, wireType 2 =*/58).string(message.version_comment); + if (message.read_only != null && Object.hasOwnProperty.call(message, "read_only")) + writer.uint32(/* id 8, wireType 0 =*/64).bool(message.read_only); + if (message.gtid_mode != null && Object.hasOwnProperty.call(message, "gtid_mode")) + writer.uint32(/* id 9, wireType 2 =*/74).string(message.gtid_mode); + if (message.binlog_format != null && Object.hasOwnProperty.call(message, "binlog_format")) + writer.uint32(/* id 10, wireType 2 =*/82).string(message.binlog_format); + if (message.binlog_row_image != null && Object.hasOwnProperty.call(message, "binlog_row_image")) + writer.uint32(/* id 11, wireType 2 =*/90).string(message.binlog_row_image); + if (message.log_bin_enabled != null && Object.hasOwnProperty.call(message, "log_bin_enabled")) + writer.uint32(/* id 12, wireType 0 =*/96).bool(message.log_bin_enabled); + if (message.log_replica_updates != null && Object.hasOwnProperty.call(message, "log_replica_updates")) + writer.uint32(/* id 13, wireType 0 =*/104).bool(message.log_replica_updates); + if (message.semi_sync_primary_enabled != null && Object.hasOwnProperty.call(message, "semi_sync_primary_enabled")) + writer.uint32(/* id 14, wireType 0 =*/112).bool(message.semi_sync_primary_enabled); + if (message.semi_sync_replica_enabled != null && Object.hasOwnProperty.call(message, "semi_sync_replica_enabled")) + writer.uint32(/* id 15, wireType 0 =*/120).bool(message.semi_sync_replica_enabled); + if (message.semi_sync_primary_status != null && Object.hasOwnProperty.call(message, "semi_sync_primary_status")) + writer.uint32(/* id 16, wireType 0 =*/128).bool(message.semi_sync_primary_status); + if (message.semi_sync_replica_status != null && Object.hasOwnProperty.call(message, "semi_sync_replica_status")) + writer.uint32(/* id 17, wireType 0 =*/136).bool(message.semi_sync_replica_status); + if (message.semi_sync_primary_clients != null && Object.hasOwnProperty.call(message, "semi_sync_primary_clients")) + writer.uint32(/* id 18, wireType 0 =*/144).uint32(message.semi_sync_primary_clients); + if (message.semi_sync_primary_timeout != null && Object.hasOwnProperty.call(message, "semi_sync_primary_timeout")) + writer.uint32(/* id 19, wireType 0 =*/152).uint64(message.semi_sync_primary_timeout); + if (message.semi_sync_wait_for_replica_count != null && Object.hasOwnProperty.call(message, "semi_sync_wait_for_replica_count")) + writer.uint32(/* id 20, wireType 0 =*/160).uint32(message.semi_sync_wait_for_replica_count); + if (message.super_read_only != null && Object.hasOwnProperty.call(message, "super_read_only")) + writer.uint32(/* id 21, wireType 0 =*/168).bool(message.super_read_only); return writer; }; /** - * Encodes the specified MaterializeSettings message, length delimited. Does not implicitly {@link vtctldata.MaterializeSettings.verify|verify} messages. + * Encodes the specified FullStatus message, length delimited. Does not implicitly {@link replicationdata.FullStatus.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.MaterializeSettings + * @memberof replicationdata.FullStatus * @static - * @param {vtctldata.IMaterializeSettings} message MaterializeSettings message or plain object to encode + * @param {replicationdata.IFullStatus} message FullStatus message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - MaterializeSettings.encodeDelimited = function encodeDelimited(message, writer) { + FullStatus.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a MaterializeSettings message from the specified reader or buffer. + * Decodes a FullStatus message from the specified reader or buffer. * @function decode - * @memberof vtctldata.MaterializeSettings + * @memberof replicationdata.FullStatus * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.MaterializeSettings} MaterializeSettings + * @returns {replicationdata.FullStatus} FullStatus * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - MaterializeSettings.decode = function decode(reader, length) { + FullStatus.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.MaterializeSettings(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.replicationdata.FullStatus(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.workflow = reader.string(); + message.server_id = reader.uint32(); break; } case 2: { - message.source_keyspace = reader.string(); + message.server_uuid = reader.string(); break; } case 3: { - message.target_keyspace = reader.string(); + message.replication_status = $root.replicationdata.Status.decode(reader, reader.uint32()); break; } case 4: { - message.stop_after_copy = reader.bool(); + message.primary_status = $root.replicationdata.PrimaryStatus.decode(reader, reader.uint32()); break; } case 5: { - if (!(message.table_settings && message.table_settings.length)) - message.table_settings = []; - message.table_settings.push($root.vtctldata.TableMaterializeSettings.decode(reader, reader.uint32())); + message.gtid_purged = reader.string(); break; } case 6: { - message.cell = reader.string(); + message.version = reader.string(); break; } case 7: { - message.tablet_types = reader.string(); + message.version_comment = reader.string(); break; } case 8: { - message.external_cluster = reader.string(); + message.read_only = reader.bool(); break; } case 9: { - message.materialization_intent = reader.int32(); + message.gtid_mode = reader.string(); break; } case 10: { - message.source_time_zone = reader.string(); + message.binlog_format = reader.string(); break; } case 11: { - message.target_time_zone = reader.string(); + message.binlog_row_image = reader.string(); break; } case 12: { - if (!(message.source_shards && message.source_shards.length)) - message.source_shards = []; - message.source_shards.push(reader.string()); + message.log_bin_enabled = reader.bool(); break; } case 13: { - message.on_ddl = reader.string(); + message.log_replica_updates = reader.bool(); break; } case 14: { - message.defer_secondary_keys = reader.bool(); + message.semi_sync_primary_enabled = reader.bool(); + break; + } + case 15: { + message.semi_sync_replica_enabled = reader.bool(); + break; + } + case 16: { + message.semi_sync_primary_status = reader.bool(); + break; + } + case 17: { + message.semi_sync_replica_status = reader.bool(); + break; + } + case 18: { + message.semi_sync_primary_clients = reader.uint32(); + break; + } + case 19: { + message.semi_sync_primary_timeout = reader.uint64(); + break; + } + case 20: { + message.semi_sync_wait_for_replica_count = reader.uint32(); + break; + } + case 21: { + message.super_read_only = reader.bool(); break; } default: @@ -98210,283 +99508,320 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a MaterializeSettings message from the specified reader or buffer, length delimited. + * Decodes a FullStatus message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.MaterializeSettings + * @memberof replicationdata.FullStatus * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.MaterializeSettings} MaterializeSettings + * @returns {replicationdata.FullStatus} FullStatus * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - MaterializeSettings.decodeDelimited = function decodeDelimited(reader) { + FullStatus.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a MaterializeSettings message. + * Verifies a FullStatus message. * @function verify - * @memberof vtctldata.MaterializeSettings + * @memberof replicationdata.FullStatus * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - MaterializeSettings.verify = function verify(message) { + FullStatus.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.workflow != null && message.hasOwnProperty("workflow")) - if (!$util.isString(message.workflow)) - return "workflow: string expected"; - if (message.source_keyspace != null && message.hasOwnProperty("source_keyspace")) - if (!$util.isString(message.source_keyspace)) - return "source_keyspace: string expected"; - if (message.target_keyspace != null && message.hasOwnProperty("target_keyspace")) - if (!$util.isString(message.target_keyspace)) - return "target_keyspace: string expected"; - if (message.stop_after_copy != null && message.hasOwnProperty("stop_after_copy")) - if (typeof message.stop_after_copy !== "boolean") - return "stop_after_copy: boolean expected"; - if (message.table_settings != null && message.hasOwnProperty("table_settings")) { - if (!Array.isArray(message.table_settings)) - return "table_settings: array expected"; - for (let i = 0; i < message.table_settings.length; ++i) { - let error = $root.vtctldata.TableMaterializeSettings.verify(message.table_settings[i]); - if (error) - return "table_settings." + error; - } + if (message.server_id != null && message.hasOwnProperty("server_id")) + if (!$util.isInteger(message.server_id)) + return "server_id: integer expected"; + if (message.server_uuid != null && message.hasOwnProperty("server_uuid")) + if (!$util.isString(message.server_uuid)) + return "server_uuid: string expected"; + if (message.replication_status != null && message.hasOwnProperty("replication_status")) { + let error = $root.replicationdata.Status.verify(message.replication_status); + if (error) + return "replication_status." + error; } - if (message.cell != null && message.hasOwnProperty("cell")) - if (!$util.isString(message.cell)) - return "cell: string expected"; - if (message.tablet_types != null && message.hasOwnProperty("tablet_types")) - if (!$util.isString(message.tablet_types)) - return "tablet_types: string expected"; - if (message.external_cluster != null && message.hasOwnProperty("external_cluster")) - if (!$util.isString(message.external_cluster)) - return "external_cluster: string expected"; - if (message.materialization_intent != null && message.hasOwnProperty("materialization_intent")) - switch (message.materialization_intent) { - default: - return "materialization_intent: enum value expected"; - case 0: - case 1: - case 2: - break; - } - if (message.source_time_zone != null && message.hasOwnProperty("source_time_zone")) - if (!$util.isString(message.source_time_zone)) - return "source_time_zone: string expected"; - if (message.target_time_zone != null && message.hasOwnProperty("target_time_zone")) - if (!$util.isString(message.target_time_zone)) - return "target_time_zone: string expected"; - if (message.source_shards != null && message.hasOwnProperty("source_shards")) { - if (!Array.isArray(message.source_shards)) - return "source_shards: array expected"; - for (let i = 0; i < message.source_shards.length; ++i) - if (!$util.isString(message.source_shards[i])) - return "source_shards: string[] expected"; + if (message.primary_status != null && message.hasOwnProperty("primary_status")) { + let error = $root.replicationdata.PrimaryStatus.verify(message.primary_status); + if (error) + return "primary_status." + error; } - if (message.on_ddl != null && message.hasOwnProperty("on_ddl")) - if (!$util.isString(message.on_ddl)) - return "on_ddl: string expected"; - if (message.defer_secondary_keys != null && message.hasOwnProperty("defer_secondary_keys")) - if (typeof message.defer_secondary_keys !== "boolean") - return "defer_secondary_keys: boolean expected"; + if (message.gtid_purged != null && message.hasOwnProperty("gtid_purged")) + if (!$util.isString(message.gtid_purged)) + return "gtid_purged: string expected"; + if (message.version != null && message.hasOwnProperty("version")) + if (!$util.isString(message.version)) + return "version: string expected"; + if (message.version_comment != null && message.hasOwnProperty("version_comment")) + if (!$util.isString(message.version_comment)) + return "version_comment: string expected"; + if (message.read_only != null && message.hasOwnProperty("read_only")) + if (typeof message.read_only !== "boolean") + return "read_only: boolean expected"; + if (message.gtid_mode != null && message.hasOwnProperty("gtid_mode")) + if (!$util.isString(message.gtid_mode)) + return "gtid_mode: string expected"; + if (message.binlog_format != null && message.hasOwnProperty("binlog_format")) + if (!$util.isString(message.binlog_format)) + return "binlog_format: string expected"; + if (message.binlog_row_image != null && message.hasOwnProperty("binlog_row_image")) + if (!$util.isString(message.binlog_row_image)) + return "binlog_row_image: string expected"; + if (message.log_bin_enabled != null && message.hasOwnProperty("log_bin_enabled")) + if (typeof message.log_bin_enabled !== "boolean") + return "log_bin_enabled: boolean expected"; + if (message.log_replica_updates != null && message.hasOwnProperty("log_replica_updates")) + if (typeof message.log_replica_updates !== "boolean") + return "log_replica_updates: boolean expected"; + if (message.semi_sync_primary_enabled != null && message.hasOwnProperty("semi_sync_primary_enabled")) + if (typeof message.semi_sync_primary_enabled !== "boolean") + return "semi_sync_primary_enabled: boolean expected"; + if (message.semi_sync_replica_enabled != null && message.hasOwnProperty("semi_sync_replica_enabled")) + if (typeof message.semi_sync_replica_enabled !== "boolean") + return "semi_sync_replica_enabled: boolean expected"; + if (message.semi_sync_primary_status != null && message.hasOwnProperty("semi_sync_primary_status")) + if (typeof message.semi_sync_primary_status !== "boolean") + return "semi_sync_primary_status: boolean expected"; + if (message.semi_sync_replica_status != null && message.hasOwnProperty("semi_sync_replica_status")) + if (typeof message.semi_sync_replica_status !== "boolean") + return "semi_sync_replica_status: boolean expected"; + if (message.semi_sync_primary_clients != null && message.hasOwnProperty("semi_sync_primary_clients")) + if (!$util.isInteger(message.semi_sync_primary_clients)) + return "semi_sync_primary_clients: integer expected"; + if (message.semi_sync_primary_timeout != null && message.hasOwnProperty("semi_sync_primary_timeout")) + if (!$util.isInteger(message.semi_sync_primary_timeout) && !(message.semi_sync_primary_timeout && $util.isInteger(message.semi_sync_primary_timeout.low) && $util.isInteger(message.semi_sync_primary_timeout.high))) + return "semi_sync_primary_timeout: integer|Long expected"; + if (message.semi_sync_wait_for_replica_count != null && message.hasOwnProperty("semi_sync_wait_for_replica_count")) + if (!$util.isInteger(message.semi_sync_wait_for_replica_count)) + return "semi_sync_wait_for_replica_count: integer expected"; + if (message.super_read_only != null && message.hasOwnProperty("super_read_only")) + if (typeof message.super_read_only !== "boolean") + return "super_read_only: boolean expected"; return null; }; /** - * Creates a MaterializeSettings message from a plain object. Also converts values to their respective internal types. + * Creates a FullStatus message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.MaterializeSettings + * @memberof replicationdata.FullStatus * @static * @param {Object.} object Plain object - * @returns {vtctldata.MaterializeSettings} MaterializeSettings + * @returns {replicationdata.FullStatus} FullStatus */ - MaterializeSettings.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.MaterializeSettings) + FullStatus.fromObject = function fromObject(object) { + if (object instanceof $root.replicationdata.FullStatus) return object; - let message = new $root.vtctldata.MaterializeSettings(); - if (object.workflow != null) - message.workflow = String(object.workflow); - if (object.source_keyspace != null) - message.source_keyspace = String(object.source_keyspace); - if (object.target_keyspace != null) - message.target_keyspace = String(object.target_keyspace); - if (object.stop_after_copy != null) - message.stop_after_copy = Boolean(object.stop_after_copy); - if (object.table_settings) { - if (!Array.isArray(object.table_settings)) - throw TypeError(".vtctldata.MaterializeSettings.table_settings: array expected"); - message.table_settings = []; - for (let i = 0; i < object.table_settings.length; ++i) { - if (typeof object.table_settings[i] !== "object") - throw TypeError(".vtctldata.MaterializeSettings.table_settings: object expected"); - message.table_settings[i] = $root.vtctldata.TableMaterializeSettings.fromObject(object.table_settings[i]); - } - } - if (object.cell != null) - message.cell = String(object.cell); - if (object.tablet_types != null) - message.tablet_types = String(object.tablet_types); - if (object.external_cluster != null) - message.external_cluster = String(object.external_cluster); - switch (object.materialization_intent) { - default: - if (typeof object.materialization_intent === "number") { - message.materialization_intent = object.materialization_intent; - break; - } - break; - case "CUSTOM": - case 0: - message.materialization_intent = 0; - break; - case "MOVETABLES": - case 1: - message.materialization_intent = 1; - break; - case "CREATELOOKUPINDEX": - case 2: - message.materialization_intent = 2; - break; + let message = new $root.replicationdata.FullStatus(); + if (object.server_id != null) + message.server_id = object.server_id >>> 0; + if (object.server_uuid != null) + message.server_uuid = String(object.server_uuid); + if (object.replication_status != null) { + if (typeof object.replication_status !== "object") + throw TypeError(".replicationdata.FullStatus.replication_status: object expected"); + message.replication_status = $root.replicationdata.Status.fromObject(object.replication_status); } - if (object.source_time_zone != null) - message.source_time_zone = String(object.source_time_zone); - if (object.target_time_zone != null) - message.target_time_zone = String(object.target_time_zone); - if (object.source_shards) { - if (!Array.isArray(object.source_shards)) - throw TypeError(".vtctldata.MaterializeSettings.source_shards: array expected"); - message.source_shards = []; - for (let i = 0; i < object.source_shards.length; ++i) - message.source_shards[i] = String(object.source_shards[i]); + if (object.primary_status != null) { + if (typeof object.primary_status !== "object") + throw TypeError(".replicationdata.FullStatus.primary_status: object expected"); + message.primary_status = $root.replicationdata.PrimaryStatus.fromObject(object.primary_status); } - if (object.on_ddl != null) - message.on_ddl = String(object.on_ddl); - if (object.defer_secondary_keys != null) - message.defer_secondary_keys = Boolean(object.defer_secondary_keys); + if (object.gtid_purged != null) + message.gtid_purged = String(object.gtid_purged); + if (object.version != null) + message.version = String(object.version); + if (object.version_comment != null) + message.version_comment = String(object.version_comment); + if (object.read_only != null) + message.read_only = Boolean(object.read_only); + if (object.gtid_mode != null) + message.gtid_mode = String(object.gtid_mode); + if (object.binlog_format != null) + message.binlog_format = String(object.binlog_format); + if (object.binlog_row_image != null) + message.binlog_row_image = String(object.binlog_row_image); + if (object.log_bin_enabled != null) + message.log_bin_enabled = Boolean(object.log_bin_enabled); + if (object.log_replica_updates != null) + message.log_replica_updates = Boolean(object.log_replica_updates); + if (object.semi_sync_primary_enabled != null) + message.semi_sync_primary_enabled = Boolean(object.semi_sync_primary_enabled); + if (object.semi_sync_replica_enabled != null) + message.semi_sync_replica_enabled = Boolean(object.semi_sync_replica_enabled); + if (object.semi_sync_primary_status != null) + message.semi_sync_primary_status = Boolean(object.semi_sync_primary_status); + if (object.semi_sync_replica_status != null) + message.semi_sync_replica_status = Boolean(object.semi_sync_replica_status); + if (object.semi_sync_primary_clients != null) + message.semi_sync_primary_clients = object.semi_sync_primary_clients >>> 0; + if (object.semi_sync_primary_timeout != null) + if ($util.Long) + (message.semi_sync_primary_timeout = $util.Long.fromValue(object.semi_sync_primary_timeout)).unsigned = true; + else if (typeof object.semi_sync_primary_timeout === "string") + message.semi_sync_primary_timeout = parseInt(object.semi_sync_primary_timeout, 10); + else if (typeof object.semi_sync_primary_timeout === "number") + message.semi_sync_primary_timeout = object.semi_sync_primary_timeout; + else if (typeof object.semi_sync_primary_timeout === "object") + message.semi_sync_primary_timeout = new $util.LongBits(object.semi_sync_primary_timeout.low >>> 0, object.semi_sync_primary_timeout.high >>> 0).toNumber(true); + if (object.semi_sync_wait_for_replica_count != null) + message.semi_sync_wait_for_replica_count = object.semi_sync_wait_for_replica_count >>> 0; + if (object.super_read_only != null) + message.super_read_only = Boolean(object.super_read_only); return message; }; /** - * Creates a plain object from a MaterializeSettings message. Also converts values to other types if specified. + * Creates a plain object from a FullStatus message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.MaterializeSettings + * @memberof replicationdata.FullStatus * @static - * @param {vtctldata.MaterializeSettings} message MaterializeSettings + * @param {replicationdata.FullStatus} message FullStatus * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - MaterializeSettings.toObject = function toObject(message, options) { + FullStatus.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) { - object.table_settings = []; - object.source_shards = []; - } if (options.defaults) { - object.workflow = ""; - object.source_keyspace = ""; - object.target_keyspace = ""; - object.stop_after_copy = false; - object.cell = ""; - object.tablet_types = ""; - object.external_cluster = ""; - object.materialization_intent = options.enums === String ? "CUSTOM" : 0; - object.source_time_zone = ""; - object.target_time_zone = ""; - object.on_ddl = ""; - object.defer_secondary_keys = false; - } - if (message.workflow != null && message.hasOwnProperty("workflow")) - object.workflow = message.workflow; - if (message.source_keyspace != null && message.hasOwnProperty("source_keyspace")) - object.source_keyspace = message.source_keyspace; - if (message.target_keyspace != null && message.hasOwnProperty("target_keyspace")) - object.target_keyspace = message.target_keyspace; - if (message.stop_after_copy != null && message.hasOwnProperty("stop_after_copy")) - object.stop_after_copy = message.stop_after_copy; - if (message.table_settings && message.table_settings.length) { - object.table_settings = []; - for (let j = 0; j < message.table_settings.length; ++j) - object.table_settings[j] = $root.vtctldata.TableMaterializeSettings.toObject(message.table_settings[j], options); - } - if (message.cell != null && message.hasOwnProperty("cell")) - object.cell = message.cell; - if (message.tablet_types != null && message.hasOwnProperty("tablet_types")) - object.tablet_types = message.tablet_types; - if (message.external_cluster != null && message.hasOwnProperty("external_cluster")) - object.external_cluster = message.external_cluster; - if (message.materialization_intent != null && message.hasOwnProperty("materialization_intent")) - object.materialization_intent = options.enums === String ? $root.vtctldata.MaterializationIntent[message.materialization_intent] === undefined ? message.materialization_intent : $root.vtctldata.MaterializationIntent[message.materialization_intent] : message.materialization_intent; - if (message.source_time_zone != null && message.hasOwnProperty("source_time_zone")) - object.source_time_zone = message.source_time_zone; - if (message.target_time_zone != null && message.hasOwnProperty("target_time_zone")) - object.target_time_zone = message.target_time_zone; - if (message.source_shards && message.source_shards.length) { - object.source_shards = []; - for (let j = 0; j < message.source_shards.length; ++j) - object.source_shards[j] = message.source_shards[j]; + object.server_id = 0; + object.server_uuid = ""; + object.replication_status = null; + object.primary_status = null; + object.gtid_purged = ""; + object.version = ""; + object.version_comment = ""; + object.read_only = false; + object.gtid_mode = ""; + object.binlog_format = ""; + object.binlog_row_image = ""; + object.log_bin_enabled = false; + object.log_replica_updates = false; + object.semi_sync_primary_enabled = false; + object.semi_sync_replica_enabled = false; + object.semi_sync_primary_status = false; + object.semi_sync_replica_status = false; + object.semi_sync_primary_clients = 0; + if ($util.Long) { + let long = new $util.Long(0, 0, true); + object.semi_sync_primary_timeout = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.semi_sync_primary_timeout = options.longs === String ? "0" : 0; + object.semi_sync_wait_for_replica_count = 0; + object.super_read_only = false; } - if (message.on_ddl != null && message.hasOwnProperty("on_ddl")) - object.on_ddl = message.on_ddl; - if (message.defer_secondary_keys != null && message.hasOwnProperty("defer_secondary_keys")) - object.defer_secondary_keys = message.defer_secondary_keys; + if (message.server_id != null && message.hasOwnProperty("server_id")) + object.server_id = message.server_id; + if (message.server_uuid != null && message.hasOwnProperty("server_uuid")) + object.server_uuid = message.server_uuid; + if (message.replication_status != null && message.hasOwnProperty("replication_status")) + object.replication_status = $root.replicationdata.Status.toObject(message.replication_status, options); + if (message.primary_status != null && message.hasOwnProperty("primary_status")) + object.primary_status = $root.replicationdata.PrimaryStatus.toObject(message.primary_status, options); + if (message.gtid_purged != null && message.hasOwnProperty("gtid_purged")) + object.gtid_purged = message.gtid_purged; + if (message.version != null && message.hasOwnProperty("version")) + object.version = message.version; + if (message.version_comment != null && message.hasOwnProperty("version_comment")) + object.version_comment = message.version_comment; + if (message.read_only != null && message.hasOwnProperty("read_only")) + object.read_only = message.read_only; + if (message.gtid_mode != null && message.hasOwnProperty("gtid_mode")) + object.gtid_mode = message.gtid_mode; + if (message.binlog_format != null && message.hasOwnProperty("binlog_format")) + object.binlog_format = message.binlog_format; + if (message.binlog_row_image != null && message.hasOwnProperty("binlog_row_image")) + object.binlog_row_image = message.binlog_row_image; + if (message.log_bin_enabled != null && message.hasOwnProperty("log_bin_enabled")) + object.log_bin_enabled = message.log_bin_enabled; + if (message.log_replica_updates != null && message.hasOwnProperty("log_replica_updates")) + object.log_replica_updates = message.log_replica_updates; + if (message.semi_sync_primary_enabled != null && message.hasOwnProperty("semi_sync_primary_enabled")) + object.semi_sync_primary_enabled = message.semi_sync_primary_enabled; + if (message.semi_sync_replica_enabled != null && message.hasOwnProperty("semi_sync_replica_enabled")) + object.semi_sync_replica_enabled = message.semi_sync_replica_enabled; + if (message.semi_sync_primary_status != null && message.hasOwnProperty("semi_sync_primary_status")) + object.semi_sync_primary_status = message.semi_sync_primary_status; + if (message.semi_sync_replica_status != null && message.hasOwnProperty("semi_sync_replica_status")) + object.semi_sync_replica_status = message.semi_sync_replica_status; + if (message.semi_sync_primary_clients != null && message.hasOwnProperty("semi_sync_primary_clients")) + object.semi_sync_primary_clients = message.semi_sync_primary_clients; + if (message.semi_sync_primary_timeout != null && message.hasOwnProperty("semi_sync_primary_timeout")) + if (typeof message.semi_sync_primary_timeout === "number") + object.semi_sync_primary_timeout = options.longs === String ? String(message.semi_sync_primary_timeout) : message.semi_sync_primary_timeout; + else + object.semi_sync_primary_timeout = options.longs === String ? $util.Long.prototype.toString.call(message.semi_sync_primary_timeout) : options.longs === Number ? new $util.LongBits(message.semi_sync_primary_timeout.low >>> 0, message.semi_sync_primary_timeout.high >>> 0).toNumber(true) : message.semi_sync_primary_timeout; + if (message.semi_sync_wait_for_replica_count != null && message.hasOwnProperty("semi_sync_wait_for_replica_count")) + object.semi_sync_wait_for_replica_count = message.semi_sync_wait_for_replica_count; + if (message.super_read_only != null && message.hasOwnProperty("super_read_only")) + object.super_read_only = message.super_read_only; return object; }; /** - * Converts this MaterializeSettings to JSON. + * Converts this FullStatus to JSON. * @function toJSON - * @memberof vtctldata.MaterializeSettings + * @memberof replicationdata.FullStatus * @instance * @returns {Object.} JSON object */ - MaterializeSettings.prototype.toJSON = function toJSON() { + FullStatus.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for MaterializeSettings + * Gets the default type url for FullStatus * @function getTypeUrl - * @memberof vtctldata.MaterializeSettings + * @memberof replicationdata.FullStatus * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - MaterializeSettings.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + FullStatus.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.MaterializeSettings"; + return typeUrlPrefix + "/replicationdata.FullStatus"; }; - return MaterializeSettings; + return FullStatus; })(); - vtctldata.Keyspace = (function() { + return replicationdata; +})(); + +export const vschema = $root.vschema = (() => { + + /** + * Namespace vschema. + * @exports vschema + * @namespace + */ + const vschema = {}; + + vschema.RoutingRules = (function() { /** - * Properties of a Keyspace. - * @memberof vtctldata - * @interface IKeyspace - * @property {string|null} [name] Keyspace name - * @property {topodata.IKeyspace|null} [keyspace] Keyspace keyspace + * Properties of a RoutingRules. + * @memberof vschema + * @interface IRoutingRules + * @property {Array.|null} [rules] RoutingRules rules */ /** - * Constructs a new Keyspace. - * @memberof vtctldata - * @classdesc Represents a Keyspace. - * @implements IKeyspace + * Constructs a new RoutingRules. + * @memberof vschema + * @classdesc Represents a RoutingRules. + * @implements IRoutingRules * @constructor - * @param {vtctldata.IKeyspace=} [properties] Properties to set + * @param {vschema.IRoutingRules=} [properties] Properties to set */ - function Keyspace(properties) { + function RoutingRules(properties) { + this.rules = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -98494,89 +99829,78 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * Keyspace name. - * @member {string} name - * @memberof vtctldata.Keyspace - * @instance - */ - Keyspace.prototype.name = ""; - - /** - * Keyspace keyspace. - * @member {topodata.IKeyspace|null|undefined} keyspace - * @memberof vtctldata.Keyspace + * RoutingRules rules. + * @member {Array.} rules + * @memberof vschema.RoutingRules * @instance */ - Keyspace.prototype.keyspace = null; + RoutingRules.prototype.rules = $util.emptyArray; /** - * Creates a new Keyspace instance using the specified properties. + * Creates a new RoutingRules instance using the specified properties. * @function create - * @memberof vtctldata.Keyspace + * @memberof vschema.RoutingRules * @static - * @param {vtctldata.IKeyspace=} [properties] Properties to set - * @returns {vtctldata.Keyspace} Keyspace instance + * @param {vschema.IRoutingRules=} [properties] Properties to set + * @returns {vschema.RoutingRules} RoutingRules instance */ - Keyspace.create = function create(properties) { - return new Keyspace(properties); + RoutingRules.create = function create(properties) { + return new RoutingRules(properties); }; /** - * Encodes the specified Keyspace message. Does not implicitly {@link vtctldata.Keyspace.verify|verify} messages. + * Encodes the specified RoutingRules message. Does not implicitly {@link vschema.RoutingRules.verify|verify} messages. * @function encode - * @memberof vtctldata.Keyspace + * @memberof vschema.RoutingRules * @static - * @param {vtctldata.IKeyspace} message Keyspace message or plain object to encode + * @param {vschema.IRoutingRules} message RoutingRules message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Keyspace.encode = function encode(message, writer) { + RoutingRules.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.name != null && Object.hasOwnProperty.call(message, "name")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - $root.topodata.Keyspace.encode(message.keyspace, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.rules != null && message.rules.length) + for (let i = 0; i < message.rules.length; ++i) + $root.vschema.RoutingRule.encode(message.rules[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified Keyspace message, length delimited. Does not implicitly {@link vtctldata.Keyspace.verify|verify} messages. + * Encodes the specified RoutingRules message, length delimited. Does not implicitly {@link vschema.RoutingRules.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.Keyspace + * @memberof vschema.RoutingRules * @static - * @param {vtctldata.IKeyspace} message Keyspace message or plain object to encode + * @param {vschema.IRoutingRules} message RoutingRules message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Keyspace.encodeDelimited = function encodeDelimited(message, writer) { + RoutingRules.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a Keyspace message from the specified reader or buffer. + * Decodes a RoutingRules message from the specified reader or buffer. * @function decode - * @memberof vtctldata.Keyspace + * @memberof vschema.RoutingRules * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.Keyspace} Keyspace + * @returns {vschema.RoutingRules} RoutingRules * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Keyspace.decode = function decode(reader, length) { + RoutingRules.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.Keyspace(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vschema.RoutingRules(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.name = reader.string(); - break; - } - case 2: { - message.keyspace = $root.topodata.Keyspace.decode(reader, reader.uint32()); + if (!(message.rules && message.rules.length)) + message.rules = []; + message.rules.push($root.vschema.RoutingRule.decode(reader, reader.uint32())); break; } default: @@ -98588,138 +99912,141 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a Keyspace message from the specified reader or buffer, length delimited. + * Decodes a RoutingRules message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.Keyspace + * @memberof vschema.RoutingRules * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.Keyspace} Keyspace + * @returns {vschema.RoutingRules} RoutingRules * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Keyspace.decodeDelimited = function decodeDelimited(reader) { + RoutingRules.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a Keyspace message. + * Verifies a RoutingRules message. * @function verify - * @memberof vtctldata.Keyspace + * @memberof vschema.RoutingRules * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - Keyspace.verify = function verify(message) { + RoutingRules.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.name != null && message.hasOwnProperty("name")) - if (!$util.isString(message.name)) - return "name: string expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) { - let error = $root.topodata.Keyspace.verify(message.keyspace); - if (error) - return "keyspace." + error; + if (message.rules != null && message.hasOwnProperty("rules")) { + if (!Array.isArray(message.rules)) + return "rules: array expected"; + for (let i = 0; i < message.rules.length; ++i) { + let error = $root.vschema.RoutingRule.verify(message.rules[i]); + if (error) + return "rules." + error; + } } return null; }; /** - * Creates a Keyspace message from a plain object. Also converts values to their respective internal types. + * Creates a RoutingRules message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.Keyspace + * @memberof vschema.RoutingRules * @static * @param {Object.} object Plain object - * @returns {vtctldata.Keyspace} Keyspace + * @returns {vschema.RoutingRules} RoutingRules */ - Keyspace.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.Keyspace) + RoutingRules.fromObject = function fromObject(object) { + if (object instanceof $root.vschema.RoutingRules) return object; - let message = new $root.vtctldata.Keyspace(); - if (object.name != null) - message.name = String(object.name); - if (object.keyspace != null) { - if (typeof object.keyspace !== "object") - throw TypeError(".vtctldata.Keyspace.keyspace: object expected"); - message.keyspace = $root.topodata.Keyspace.fromObject(object.keyspace); + let message = new $root.vschema.RoutingRules(); + if (object.rules) { + if (!Array.isArray(object.rules)) + throw TypeError(".vschema.RoutingRules.rules: array expected"); + message.rules = []; + for (let i = 0; i < object.rules.length; ++i) { + if (typeof object.rules[i] !== "object") + throw TypeError(".vschema.RoutingRules.rules: object expected"); + message.rules[i] = $root.vschema.RoutingRule.fromObject(object.rules[i]); + } } return message; }; /** - * Creates a plain object from a Keyspace message. Also converts values to other types if specified. + * Creates a plain object from a RoutingRules message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.Keyspace + * @memberof vschema.RoutingRules * @static - * @param {vtctldata.Keyspace} message Keyspace + * @param {vschema.RoutingRules} message RoutingRules * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - Keyspace.toObject = function toObject(message, options) { + RoutingRules.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) { - object.name = ""; - object.keyspace = null; + if (options.arrays || options.defaults) + object.rules = []; + if (message.rules && message.rules.length) { + object.rules = []; + for (let j = 0; j < message.rules.length; ++j) + object.rules[j] = $root.vschema.RoutingRule.toObject(message.rules[j], options); } - if (message.name != null && message.hasOwnProperty("name")) - object.name = message.name; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = $root.topodata.Keyspace.toObject(message.keyspace, options); return object; }; /** - * Converts this Keyspace to JSON. + * Converts this RoutingRules to JSON. * @function toJSON - * @memberof vtctldata.Keyspace + * @memberof vschema.RoutingRules * @instance * @returns {Object.} JSON object */ - Keyspace.prototype.toJSON = function toJSON() { + RoutingRules.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for Keyspace + * Gets the default type url for RoutingRules * @function getTypeUrl - * @memberof vtctldata.Keyspace + * @memberof vschema.RoutingRules * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - Keyspace.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + RoutingRules.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.Keyspace"; + return typeUrlPrefix + "/vschema.RoutingRules"; }; - return Keyspace; + return RoutingRules; })(); - vtctldata.Shard = (function() { + vschema.RoutingRule = (function() { /** - * Properties of a Shard. - * @memberof vtctldata - * @interface IShard - * @property {string|null} [keyspace] Shard keyspace - * @property {string|null} [name] Shard name - * @property {topodata.IShard|null} [shard] Shard shard + * Properties of a RoutingRule. + * @memberof vschema + * @interface IRoutingRule + * @property {string|null} [from_table] RoutingRule from_table + * @property {Array.|null} [to_tables] RoutingRule to_tables */ /** - * Constructs a new Shard. - * @memberof vtctldata - * @classdesc Represents a Shard. - * @implements IShard + * Constructs a new RoutingRule. + * @memberof vschema + * @classdesc Represents a RoutingRule. + * @implements IRoutingRule * @constructor - * @param {vtctldata.IShard=} [properties] Properties to set + * @param {vschema.IRoutingRule=} [properties] Properties to set */ - function Shard(properties) { + function RoutingRule(properties) { + this.to_tables = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -98727,103 +100054,92 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * Shard keyspace. - * @member {string} keyspace - * @memberof vtctldata.Shard - * @instance - */ - Shard.prototype.keyspace = ""; - - /** - * Shard name. - * @member {string} name - * @memberof vtctldata.Shard + * RoutingRule from_table. + * @member {string} from_table + * @memberof vschema.RoutingRule * @instance */ - Shard.prototype.name = ""; + RoutingRule.prototype.from_table = ""; /** - * Shard shard. - * @member {topodata.IShard|null|undefined} shard - * @memberof vtctldata.Shard + * RoutingRule to_tables. + * @member {Array.} to_tables + * @memberof vschema.RoutingRule * @instance */ - Shard.prototype.shard = null; + RoutingRule.prototype.to_tables = $util.emptyArray; /** - * Creates a new Shard instance using the specified properties. + * Creates a new RoutingRule instance using the specified properties. * @function create - * @memberof vtctldata.Shard + * @memberof vschema.RoutingRule * @static - * @param {vtctldata.IShard=} [properties] Properties to set - * @returns {vtctldata.Shard} Shard instance + * @param {vschema.IRoutingRule=} [properties] Properties to set + * @returns {vschema.RoutingRule} RoutingRule instance */ - Shard.create = function create(properties) { - return new Shard(properties); + RoutingRule.create = function create(properties) { + return new RoutingRule(properties); }; /** - * Encodes the specified Shard message. Does not implicitly {@link vtctldata.Shard.verify|verify} messages. + * Encodes the specified RoutingRule message. Does not implicitly {@link vschema.RoutingRule.verify|verify} messages. * @function encode - * @memberof vtctldata.Shard + * @memberof vschema.RoutingRule * @static - * @param {vtctldata.IShard} message Shard message or plain object to encode + * @param {vschema.IRoutingRule} message RoutingRule message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Shard.encode = function encode(message, writer) { + RoutingRule.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.name != null && Object.hasOwnProperty.call(message, "name")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.name); - if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - $root.topodata.Shard.encode(message.shard, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.from_table != null && Object.hasOwnProperty.call(message, "from_table")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.from_table); + if (message.to_tables != null && message.to_tables.length) + for (let i = 0; i < message.to_tables.length; ++i) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.to_tables[i]); return writer; }; /** - * Encodes the specified Shard message, length delimited. Does not implicitly {@link vtctldata.Shard.verify|verify} messages. + * Encodes the specified RoutingRule message, length delimited. Does not implicitly {@link vschema.RoutingRule.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.Shard + * @memberof vschema.RoutingRule * @static - * @param {vtctldata.IShard} message Shard message or plain object to encode + * @param {vschema.IRoutingRule} message RoutingRule message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Shard.encodeDelimited = function encodeDelimited(message, writer) { + RoutingRule.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a Shard message from the specified reader or buffer. + * Decodes a RoutingRule message from the specified reader or buffer. * @function decode - * @memberof vtctldata.Shard + * @memberof vschema.RoutingRule * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.Shard} Shard + * @returns {vschema.RoutingRule} RoutingRule * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Shard.decode = function decode(reader, length) { + RoutingRule.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.Shard(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vschema.RoutingRule(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.keyspace = reader.string(); + message.from_table = reader.string(); break; } case 2: { - message.name = reader.string(); - break; - } - case 3: { - message.shard = $root.topodata.Shard.decode(reader, reader.uint32()); + if (!(message.to_tables && message.to_tables.length)) + message.to_tables = []; + message.to_tables.push(reader.string()); break; } default: @@ -98835,151 +100151,152 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a Shard message from the specified reader or buffer, length delimited. + * Decodes a RoutingRule message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.Shard + * @memberof vschema.RoutingRule * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.Shard} Shard + * @returns {vschema.RoutingRule} RoutingRule * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Shard.decodeDelimited = function decodeDelimited(reader) { + RoutingRule.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a Shard message. + * Verifies a RoutingRule message. * @function verify - * @memberof vtctldata.Shard + * @memberof vschema.RoutingRule * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - Shard.verify = function verify(message) { + RoutingRule.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - if (message.name != null && message.hasOwnProperty("name")) - if (!$util.isString(message.name)) - return "name: string expected"; - if (message.shard != null && message.hasOwnProperty("shard")) { - let error = $root.topodata.Shard.verify(message.shard); - if (error) - return "shard." + error; + if (message.from_table != null && message.hasOwnProperty("from_table")) + if (!$util.isString(message.from_table)) + return "from_table: string expected"; + if (message.to_tables != null && message.hasOwnProperty("to_tables")) { + if (!Array.isArray(message.to_tables)) + return "to_tables: array expected"; + for (let i = 0; i < message.to_tables.length; ++i) + if (!$util.isString(message.to_tables[i])) + return "to_tables: string[] expected"; } return null; }; /** - * Creates a Shard message from a plain object. Also converts values to their respective internal types. + * Creates a RoutingRule message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.Shard + * @memberof vschema.RoutingRule * @static * @param {Object.} object Plain object - * @returns {vtctldata.Shard} Shard + * @returns {vschema.RoutingRule} RoutingRule */ - Shard.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.Shard) + RoutingRule.fromObject = function fromObject(object) { + if (object instanceof $root.vschema.RoutingRule) return object; - let message = new $root.vtctldata.Shard(); - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - if (object.name != null) - message.name = String(object.name); - if (object.shard != null) { - if (typeof object.shard !== "object") - throw TypeError(".vtctldata.Shard.shard: object expected"); - message.shard = $root.topodata.Shard.fromObject(object.shard); + let message = new $root.vschema.RoutingRule(); + if (object.from_table != null) + message.from_table = String(object.from_table); + if (object.to_tables) { + if (!Array.isArray(object.to_tables)) + throw TypeError(".vschema.RoutingRule.to_tables: array expected"); + message.to_tables = []; + for (let i = 0; i < object.to_tables.length; ++i) + message.to_tables[i] = String(object.to_tables[i]); } return message; }; /** - * Creates a plain object from a Shard message. Also converts values to other types if specified. + * Creates a plain object from a RoutingRule message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.Shard + * @memberof vschema.RoutingRule * @static - * @param {vtctldata.Shard} message Shard + * @param {vschema.RoutingRule} message RoutingRule * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - Shard.toObject = function toObject(message, options) { + RoutingRule.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) { - object.keyspace = ""; - object.name = ""; - object.shard = null; + if (options.arrays || options.defaults) + object.to_tables = []; + if (options.defaults) + object.from_table = ""; + if (message.from_table != null && message.hasOwnProperty("from_table")) + object.from_table = message.from_table; + if (message.to_tables && message.to_tables.length) { + object.to_tables = []; + for (let j = 0; j < message.to_tables.length; ++j) + object.to_tables[j] = message.to_tables[j]; } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.name != null && message.hasOwnProperty("name")) - object.name = message.name; - if (message.shard != null && message.hasOwnProperty("shard")) - object.shard = $root.topodata.Shard.toObject(message.shard, options); return object; }; /** - * Converts this Shard to JSON. + * Converts this RoutingRule to JSON. * @function toJSON - * @memberof vtctldata.Shard + * @memberof vschema.RoutingRule * @instance * @returns {Object.} JSON object */ - Shard.prototype.toJSON = function toJSON() { + RoutingRule.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for Shard + * Gets the default type url for RoutingRule * @function getTypeUrl - * @memberof vtctldata.Shard + * @memberof vschema.RoutingRule * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - Shard.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + RoutingRule.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.Shard"; + return typeUrlPrefix + "/vschema.RoutingRule"; }; - return Shard; + return RoutingRule; })(); - vtctldata.Workflow = (function() { + vschema.Keyspace = (function() { /** - * Properties of a Workflow. - * @memberof vtctldata - * @interface IWorkflow - * @property {string|null} [name] Workflow name - * @property {vtctldata.Workflow.IReplicationLocation|null} [source] Workflow source - * @property {vtctldata.Workflow.IReplicationLocation|null} [target] Workflow target - * @property {number|Long|null} [max_v_replication_lag] Workflow max_v_replication_lag - * @property {Object.|null} [shard_streams] Workflow shard_streams - * @property {string|null} [workflow_type] Workflow workflow_type - * @property {string|null} [workflow_sub_type] Workflow workflow_sub_type + * Properties of a Keyspace. + * @memberof vschema + * @interface IKeyspace + * @property {boolean|null} [sharded] Keyspace sharded + * @property {Object.|null} [vindexes] Keyspace vindexes + * @property {Object.|null} [tables] Keyspace tables + * @property {boolean|null} [require_explicit_routing] Keyspace require_explicit_routing + * @property {vschema.Keyspace.ForeignKeyMode|null} [foreign_key_mode] Keyspace foreign_key_mode + * @property {boolean|null} [cross_tablet] Keyspace cross_tablet + * @property {boolean|null} [attach_enable] Keyspace attach_enable + * @property {string|null} [attach_to] Keyspace attach_to */ /** - * Constructs a new Workflow. - * @memberof vtctldata - * @classdesc Represents a Workflow. - * @implements IWorkflow + * Constructs a new Keyspace. + * @memberof vschema + * @classdesc Represents a Keyspace. + * @implements IKeyspace * @constructor - * @param {vtctldata.IWorkflow=} [properties] Properties to set + * @param {vschema.IKeyspace=} [properties] Properties to set */ - function Workflow(properties) { - this.shard_streams = {}; + function Keyspace(properties) { + this.vindexes = {}; + this.tables = {}; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -98987,155 +100304,179 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * Workflow name. - * @member {string} name - * @memberof vtctldata.Workflow + * Keyspace sharded. + * @member {boolean} sharded + * @memberof vschema.Keyspace * @instance */ - Workflow.prototype.name = ""; + Keyspace.prototype.sharded = false; /** - * Workflow source. - * @member {vtctldata.Workflow.IReplicationLocation|null|undefined} source - * @memberof vtctldata.Workflow + * Keyspace vindexes. + * @member {Object.} vindexes + * @memberof vschema.Keyspace * @instance */ - Workflow.prototype.source = null; + Keyspace.prototype.vindexes = $util.emptyObject; /** - * Workflow target. - * @member {vtctldata.Workflow.IReplicationLocation|null|undefined} target - * @memberof vtctldata.Workflow + * Keyspace tables. + * @member {Object.} tables + * @memberof vschema.Keyspace * @instance */ - Workflow.prototype.target = null; + Keyspace.prototype.tables = $util.emptyObject; /** - * Workflow max_v_replication_lag. - * @member {number|Long} max_v_replication_lag - * @memberof vtctldata.Workflow + * Keyspace require_explicit_routing. + * @member {boolean} require_explicit_routing + * @memberof vschema.Keyspace * @instance */ - Workflow.prototype.max_v_replication_lag = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + Keyspace.prototype.require_explicit_routing = false; /** - * Workflow shard_streams. - * @member {Object.} shard_streams - * @memberof vtctldata.Workflow + * Keyspace foreign_key_mode. + * @member {vschema.Keyspace.ForeignKeyMode} foreign_key_mode + * @memberof vschema.Keyspace * @instance */ - Workflow.prototype.shard_streams = $util.emptyObject; + Keyspace.prototype.foreign_key_mode = 0; /** - * Workflow workflow_type. - * @member {string} workflow_type - * @memberof vtctldata.Workflow + * Keyspace cross_tablet. + * @member {boolean} cross_tablet + * @memberof vschema.Keyspace * @instance */ - Workflow.prototype.workflow_type = ""; + Keyspace.prototype.cross_tablet = false; /** - * Workflow workflow_sub_type. - * @member {string} workflow_sub_type - * @memberof vtctldata.Workflow + * Keyspace attach_enable. + * @member {boolean} attach_enable + * @memberof vschema.Keyspace * @instance */ - Workflow.prototype.workflow_sub_type = ""; + Keyspace.prototype.attach_enable = false; /** - * Creates a new Workflow instance using the specified properties. + * Keyspace attach_to. + * @member {string} attach_to + * @memberof vschema.Keyspace + * @instance + */ + Keyspace.prototype.attach_to = ""; + + /** + * Creates a new Keyspace instance using the specified properties. * @function create - * @memberof vtctldata.Workflow + * @memberof vschema.Keyspace * @static - * @param {vtctldata.IWorkflow=} [properties] Properties to set - * @returns {vtctldata.Workflow} Workflow instance + * @param {vschema.IKeyspace=} [properties] Properties to set + * @returns {vschema.Keyspace} Keyspace instance */ - Workflow.create = function create(properties) { - return new Workflow(properties); + Keyspace.create = function create(properties) { + return new Keyspace(properties); }; /** - * Encodes the specified Workflow message. Does not implicitly {@link vtctldata.Workflow.verify|verify} messages. + * Encodes the specified Keyspace message. Does not implicitly {@link vschema.Keyspace.verify|verify} messages. * @function encode - * @memberof vtctldata.Workflow + * @memberof vschema.Keyspace * @static - * @param {vtctldata.IWorkflow} message Workflow message or plain object to encode + * @param {vschema.IKeyspace} message Keyspace message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Workflow.encode = function encode(message, writer) { + Keyspace.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.name != null && Object.hasOwnProperty.call(message, "name")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); - if (message.source != null && Object.hasOwnProperty.call(message, "source")) - $root.vtctldata.Workflow.ReplicationLocation.encode(message.source, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.target != null && Object.hasOwnProperty.call(message, "target")) - $root.vtctldata.Workflow.ReplicationLocation.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.max_v_replication_lag != null && Object.hasOwnProperty.call(message, "max_v_replication_lag")) - writer.uint32(/* id 4, wireType 0 =*/32).int64(message.max_v_replication_lag); - if (message.shard_streams != null && Object.hasOwnProperty.call(message, "shard_streams")) - for (let keys = Object.keys(message.shard_streams), i = 0; i < keys.length; ++i) { - writer.uint32(/* id 5, wireType 2 =*/42).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); - $root.vtctldata.Workflow.ShardStream.encode(message.shard_streams[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); + if (message.sharded != null && Object.hasOwnProperty.call(message, "sharded")) + writer.uint32(/* id 1, wireType 0 =*/8).bool(message.sharded); + if (message.vindexes != null && Object.hasOwnProperty.call(message, "vindexes")) + for (let keys = Object.keys(message.vindexes), i = 0; i < keys.length; ++i) { + writer.uint32(/* id 2, wireType 2 =*/18).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); + $root.vschema.Vindex.encode(message.vindexes[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); } - if (message.workflow_type != null && Object.hasOwnProperty.call(message, "workflow_type")) - writer.uint32(/* id 6, wireType 2 =*/50).string(message.workflow_type); - if (message.workflow_sub_type != null && Object.hasOwnProperty.call(message, "workflow_sub_type")) - writer.uint32(/* id 7, wireType 2 =*/58).string(message.workflow_sub_type); + if (message.tables != null && Object.hasOwnProperty.call(message, "tables")) + for (let keys = Object.keys(message.tables), i = 0; i < keys.length; ++i) { + writer.uint32(/* id 3, wireType 2 =*/26).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); + $root.vschema.Table.encode(message.tables[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); + } + if (message.require_explicit_routing != null && Object.hasOwnProperty.call(message, "require_explicit_routing")) + writer.uint32(/* id 4, wireType 0 =*/32).bool(message.require_explicit_routing); + if (message.foreign_key_mode != null && Object.hasOwnProperty.call(message, "foreign_key_mode")) + writer.uint32(/* id 5, wireType 0 =*/40).int32(message.foreign_key_mode); + if (message.cross_tablet != null && Object.hasOwnProperty.call(message, "cross_tablet")) + writer.uint32(/* id 85, wireType 0 =*/680).bool(message.cross_tablet); + if (message.attach_enable != null && Object.hasOwnProperty.call(message, "attach_enable")) + writer.uint32(/* id 86, wireType 0 =*/688).bool(message.attach_enable); + if (message.attach_to != null && Object.hasOwnProperty.call(message, "attach_to")) + writer.uint32(/* id 87, wireType 2 =*/698).string(message.attach_to); return writer; }; /** - * Encodes the specified Workflow message, length delimited. Does not implicitly {@link vtctldata.Workflow.verify|verify} messages. + * Encodes the specified Keyspace message, length delimited. Does not implicitly {@link vschema.Keyspace.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.Workflow + * @memberof vschema.Keyspace * @static - * @param {vtctldata.IWorkflow} message Workflow message or plain object to encode + * @param {vschema.IKeyspace} message Keyspace message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - Workflow.encodeDelimited = function encodeDelimited(message, writer) { + Keyspace.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a Workflow message from the specified reader or buffer. + * Decodes a Keyspace message from the specified reader or buffer. * @function decode - * @memberof vtctldata.Workflow + * @memberof vschema.Keyspace * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.Workflow} Workflow + * @returns {vschema.Keyspace} Keyspace * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Workflow.decode = function decode(reader, length) { + Keyspace.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.Workflow(), key, value; + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vschema.Keyspace(), key, value; while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.name = reader.string(); + message.sharded = reader.bool(); break; } case 2: { - message.source = $root.vtctldata.Workflow.ReplicationLocation.decode(reader, reader.uint32()); + if (message.vindexes === $util.emptyObject) + message.vindexes = {}; + let end2 = reader.uint32() + reader.pos; + key = ""; + value = null; + while (reader.pos < end2) { + let tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = $root.vschema.Vindex.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.vindexes[key] = value; break; } case 3: { - message.target = $root.vtctldata.Workflow.ReplicationLocation.decode(reader, reader.uint32()); - break; - } - case 4: { - message.max_v_replication_lag = reader.int64(); - break; - } - case 5: { - if (message.shard_streams === $util.emptyObject) - message.shard_streams = {}; + if (message.tables === $util.emptyObject) + message.tables = {}; let end2 = reader.uint32() + reader.pos; key = ""; value = null; @@ -99146,22 +100487,34 @@ export const vtctldata = $root.vtctldata = (() => { key = reader.string(); break; case 2: - value = $root.vtctldata.Workflow.ShardStream.decode(reader, reader.uint32()); + value = $root.vschema.Table.decode(reader, reader.uint32()); break; default: reader.skipType(tag2 & 7); break; } } - message.shard_streams[key] = value; + message.tables[key] = value; break; } - case 6: { - message.workflow_type = reader.string(); + case 4: { + message.require_explicit_routing = reader.bool(); break; } - case 7: { - message.workflow_sub_type = reader.string(); + case 5: { + message.foreign_key_mode = reader.int32(); + break; + } + case 85: { + message.cross_tablet = reader.bool(); + break; + } + case 86: { + message.attach_enable = reader.bool(); + break; + } + case 87: { + message.attach_to = reader.string(); break; } default: @@ -99173,2016 +100526,21418 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a Workflow message from the specified reader or buffer, length delimited. + * Decodes a Keyspace message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.Workflow + * @memberof vschema.Keyspace * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.Workflow} Workflow + * @returns {vschema.Keyspace} Keyspace * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - Workflow.decodeDelimited = function decodeDelimited(reader) { + Keyspace.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a Workflow message. + * Verifies a Keyspace message. * @function verify - * @memberof vtctldata.Workflow + * @memberof vschema.Keyspace * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - Workflow.verify = function verify(message) { + Keyspace.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.name != null && message.hasOwnProperty("name")) - if (!$util.isString(message.name)) - return "name: string expected"; - if (message.source != null && message.hasOwnProperty("source")) { - let error = $root.vtctldata.Workflow.ReplicationLocation.verify(message.source); - if (error) - return "source." + error; - } - if (message.target != null && message.hasOwnProperty("target")) { - let error = $root.vtctldata.Workflow.ReplicationLocation.verify(message.target); - if (error) - return "target." + error; + if (message.sharded != null && message.hasOwnProperty("sharded")) + if (typeof message.sharded !== "boolean") + return "sharded: boolean expected"; + if (message.vindexes != null && message.hasOwnProperty("vindexes")) { + if (!$util.isObject(message.vindexes)) + return "vindexes: object expected"; + let key = Object.keys(message.vindexes); + for (let i = 0; i < key.length; ++i) { + let error = $root.vschema.Vindex.verify(message.vindexes[key[i]]); + if (error) + return "vindexes." + error; + } } - if (message.max_v_replication_lag != null && message.hasOwnProperty("max_v_replication_lag")) - if (!$util.isInteger(message.max_v_replication_lag) && !(message.max_v_replication_lag && $util.isInteger(message.max_v_replication_lag.low) && $util.isInteger(message.max_v_replication_lag.high))) - return "max_v_replication_lag: integer|Long expected"; - if (message.shard_streams != null && message.hasOwnProperty("shard_streams")) { - if (!$util.isObject(message.shard_streams)) - return "shard_streams: object expected"; - let key = Object.keys(message.shard_streams); + if (message.tables != null && message.hasOwnProperty("tables")) { + if (!$util.isObject(message.tables)) + return "tables: object expected"; + let key = Object.keys(message.tables); for (let i = 0; i < key.length; ++i) { - let error = $root.vtctldata.Workflow.ShardStream.verify(message.shard_streams[key[i]]); + let error = $root.vschema.Table.verify(message.tables[key[i]]); if (error) - return "shard_streams." + error; + return "tables." + error; } } - if (message.workflow_type != null && message.hasOwnProperty("workflow_type")) - if (!$util.isString(message.workflow_type)) - return "workflow_type: string expected"; - if (message.workflow_sub_type != null && message.hasOwnProperty("workflow_sub_type")) - if (!$util.isString(message.workflow_sub_type)) - return "workflow_sub_type: string expected"; + if (message.require_explicit_routing != null && message.hasOwnProperty("require_explicit_routing")) + if (typeof message.require_explicit_routing !== "boolean") + return "require_explicit_routing: boolean expected"; + if (message.foreign_key_mode != null && message.hasOwnProperty("foreign_key_mode")) + switch (message.foreign_key_mode) { + default: + return "foreign_key_mode: enum value expected"; + case 0: + case 1: + case 2: + case 3: + break; + } + if (message.cross_tablet != null && message.hasOwnProperty("cross_tablet")) + if (typeof message.cross_tablet !== "boolean") + return "cross_tablet: boolean expected"; + if (message.attach_enable != null && message.hasOwnProperty("attach_enable")) + if (typeof message.attach_enable !== "boolean") + return "attach_enable: boolean expected"; + if (message.attach_to != null && message.hasOwnProperty("attach_to")) + if (!$util.isString(message.attach_to)) + return "attach_to: string expected"; return null; }; /** - * Creates a Workflow message from a plain object. Also converts values to their respective internal types. + * Creates a Keyspace message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.Workflow + * @memberof vschema.Keyspace * @static * @param {Object.} object Plain object - * @returns {vtctldata.Workflow} Workflow + * @returns {vschema.Keyspace} Keyspace */ - Workflow.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.Workflow) + Keyspace.fromObject = function fromObject(object) { + if (object instanceof $root.vschema.Keyspace) return object; - let message = new $root.vtctldata.Workflow(); - if (object.name != null) - message.name = String(object.name); - if (object.source != null) { - if (typeof object.source !== "object") - throw TypeError(".vtctldata.Workflow.source: object expected"); - message.source = $root.vtctldata.Workflow.ReplicationLocation.fromObject(object.source); + let message = new $root.vschema.Keyspace(); + if (object.sharded != null) + message.sharded = Boolean(object.sharded); + if (object.vindexes) { + if (typeof object.vindexes !== "object") + throw TypeError(".vschema.Keyspace.vindexes: object expected"); + message.vindexes = {}; + for (let keys = Object.keys(object.vindexes), i = 0; i < keys.length; ++i) { + if (typeof object.vindexes[keys[i]] !== "object") + throw TypeError(".vschema.Keyspace.vindexes: object expected"); + message.vindexes[keys[i]] = $root.vschema.Vindex.fromObject(object.vindexes[keys[i]]); + } } - if (object.target != null) { - if (typeof object.target !== "object") - throw TypeError(".vtctldata.Workflow.target: object expected"); - message.target = $root.vtctldata.Workflow.ReplicationLocation.fromObject(object.target); + if (object.tables) { + if (typeof object.tables !== "object") + throw TypeError(".vschema.Keyspace.tables: object expected"); + message.tables = {}; + for (let keys = Object.keys(object.tables), i = 0; i < keys.length; ++i) { + if (typeof object.tables[keys[i]] !== "object") + throw TypeError(".vschema.Keyspace.tables: object expected"); + message.tables[keys[i]] = $root.vschema.Table.fromObject(object.tables[keys[i]]); + } } - if (object.max_v_replication_lag != null) - if ($util.Long) - (message.max_v_replication_lag = $util.Long.fromValue(object.max_v_replication_lag)).unsigned = false; - else if (typeof object.max_v_replication_lag === "string") - message.max_v_replication_lag = parseInt(object.max_v_replication_lag, 10); - else if (typeof object.max_v_replication_lag === "number") - message.max_v_replication_lag = object.max_v_replication_lag; - else if (typeof object.max_v_replication_lag === "object") - message.max_v_replication_lag = new $util.LongBits(object.max_v_replication_lag.low >>> 0, object.max_v_replication_lag.high >>> 0).toNumber(); - if (object.shard_streams) { - if (typeof object.shard_streams !== "object") - throw TypeError(".vtctldata.Workflow.shard_streams: object expected"); - message.shard_streams = {}; - for (let keys = Object.keys(object.shard_streams), i = 0; i < keys.length; ++i) { - if (typeof object.shard_streams[keys[i]] !== "object") - throw TypeError(".vtctldata.Workflow.shard_streams: object expected"); - message.shard_streams[keys[i]] = $root.vtctldata.Workflow.ShardStream.fromObject(object.shard_streams[keys[i]]); + if (object.require_explicit_routing != null) + message.require_explicit_routing = Boolean(object.require_explicit_routing); + switch (object.foreign_key_mode) { + default: + if (typeof object.foreign_key_mode === "number") { + message.foreign_key_mode = object.foreign_key_mode; + break; } + break; + case "unspecified": + case 0: + message.foreign_key_mode = 0; + break; + case "disallow": + case 1: + message.foreign_key_mode = 1; + break; + case "unmanaged": + case 2: + message.foreign_key_mode = 2; + break; + case "managed": + case 3: + message.foreign_key_mode = 3; + break; } - if (object.workflow_type != null) - message.workflow_type = String(object.workflow_type); - if (object.workflow_sub_type != null) - message.workflow_sub_type = String(object.workflow_sub_type); + if (object.cross_tablet != null) + message.cross_tablet = Boolean(object.cross_tablet); + if (object.attach_enable != null) + message.attach_enable = Boolean(object.attach_enable); + if (object.attach_to != null) + message.attach_to = String(object.attach_to); return message; }; /** - * Creates a plain object from a Workflow message. Also converts values to other types if specified. + * Creates a plain object from a Keyspace message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.Workflow + * @memberof vschema.Keyspace * @static - * @param {vtctldata.Workflow} message Workflow + * @param {vschema.Keyspace} message Keyspace * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - Workflow.toObject = function toObject(message, options) { + Keyspace.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.objects || options.defaults) - object.shard_streams = {}; + if (options.objects || options.defaults) { + object.vindexes = {}; + object.tables = {}; + } if (options.defaults) { - object.name = ""; - object.source = null; - object.target = null; - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.max_v_replication_lag = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.max_v_replication_lag = options.longs === String ? "0" : 0; - object.workflow_type = ""; - object.workflow_sub_type = ""; + object.sharded = false; + object.require_explicit_routing = false; + object.foreign_key_mode = options.enums === String ? "unspecified" : 0; + object.cross_tablet = false; + object.attach_enable = false; + object.attach_to = ""; } - if (message.name != null && message.hasOwnProperty("name")) - object.name = message.name; - if (message.source != null && message.hasOwnProperty("source")) - object.source = $root.vtctldata.Workflow.ReplicationLocation.toObject(message.source, options); - if (message.target != null && message.hasOwnProperty("target")) - object.target = $root.vtctldata.Workflow.ReplicationLocation.toObject(message.target, options); - if (message.max_v_replication_lag != null && message.hasOwnProperty("max_v_replication_lag")) - if (typeof message.max_v_replication_lag === "number") - object.max_v_replication_lag = options.longs === String ? String(message.max_v_replication_lag) : message.max_v_replication_lag; - else - object.max_v_replication_lag = options.longs === String ? $util.Long.prototype.toString.call(message.max_v_replication_lag) : options.longs === Number ? new $util.LongBits(message.max_v_replication_lag.low >>> 0, message.max_v_replication_lag.high >>> 0).toNumber() : message.max_v_replication_lag; + if (message.sharded != null && message.hasOwnProperty("sharded")) + object.sharded = message.sharded; let keys2; - if (message.shard_streams && (keys2 = Object.keys(message.shard_streams)).length) { - object.shard_streams = {}; + if (message.vindexes && (keys2 = Object.keys(message.vindexes)).length) { + object.vindexes = {}; for (let j = 0; j < keys2.length; ++j) - object.shard_streams[keys2[j]] = $root.vtctldata.Workflow.ShardStream.toObject(message.shard_streams[keys2[j]], options); + object.vindexes[keys2[j]] = $root.vschema.Vindex.toObject(message.vindexes[keys2[j]], options); } - if (message.workflow_type != null && message.hasOwnProperty("workflow_type")) - object.workflow_type = message.workflow_type; - if (message.workflow_sub_type != null && message.hasOwnProperty("workflow_sub_type")) - object.workflow_sub_type = message.workflow_sub_type; + if (message.tables && (keys2 = Object.keys(message.tables)).length) { + object.tables = {}; + for (let j = 0; j < keys2.length; ++j) + object.tables[keys2[j]] = $root.vschema.Table.toObject(message.tables[keys2[j]], options); + } + if (message.require_explicit_routing != null && message.hasOwnProperty("require_explicit_routing")) + object.require_explicit_routing = message.require_explicit_routing; + if (message.foreign_key_mode != null && message.hasOwnProperty("foreign_key_mode")) + object.foreign_key_mode = options.enums === String ? $root.vschema.Keyspace.ForeignKeyMode[message.foreign_key_mode] === undefined ? message.foreign_key_mode : $root.vschema.Keyspace.ForeignKeyMode[message.foreign_key_mode] : message.foreign_key_mode; + if (message.cross_tablet != null && message.hasOwnProperty("cross_tablet")) + object.cross_tablet = message.cross_tablet; + if (message.attach_enable != null && message.hasOwnProperty("attach_enable")) + object.attach_enable = message.attach_enable; + if (message.attach_to != null && message.hasOwnProperty("attach_to")) + object.attach_to = message.attach_to; return object; }; /** - * Converts this Workflow to JSON. + * Converts this Keyspace to JSON. * @function toJSON - * @memberof vtctldata.Workflow + * @memberof vschema.Keyspace * @instance * @returns {Object.} JSON object */ - Workflow.prototype.toJSON = function toJSON() { + Keyspace.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for Workflow + * Gets the default type url for Keyspace * @function getTypeUrl - * @memberof vtctldata.Workflow + * @memberof vschema.Keyspace * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - Workflow.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + Keyspace.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.Workflow"; + return typeUrlPrefix + "/vschema.Keyspace"; }; - Workflow.ReplicationLocation = (function() { + /** + * ForeignKeyMode enum. + * @name vschema.Keyspace.ForeignKeyMode + * @enum {number} + * @property {number} unspecified=0 unspecified value + * @property {number} disallow=1 disallow value + * @property {number} unmanaged=2 unmanaged value + * @property {number} managed=3 managed value + */ + Keyspace.ForeignKeyMode = (function() { + const valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "unspecified"] = 0; + values[valuesById[1] = "disallow"] = 1; + values[valuesById[2] = "unmanaged"] = 2; + values[valuesById[3] = "managed"] = 3; + return values; + })(); - /** - * Properties of a ReplicationLocation. - * @memberof vtctldata.Workflow - * @interface IReplicationLocation - * @property {string|null} [keyspace] ReplicationLocation keyspace - * @property {Array.|null} [shards] ReplicationLocation shards - */ + return Keyspace; + })(); - /** - * Constructs a new ReplicationLocation. - * @memberof vtctldata.Workflow - * @classdesc Represents a ReplicationLocation. - * @implements IReplicationLocation - * @constructor - * @param {vtctldata.Workflow.IReplicationLocation=} [properties] Properties to set - */ - function ReplicationLocation(properties) { - this.shards = []; - if (properties) - for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; - } + vschema.Vindex = (function() { - /** - * ReplicationLocation keyspace. - * @member {string} keyspace - * @memberof vtctldata.Workflow.ReplicationLocation - * @instance - */ - ReplicationLocation.prototype.keyspace = ""; + /** + * Properties of a Vindex. + * @memberof vschema + * @interface IVindex + * @property {string|null} [type] Vindex type + * @property {Object.|null} [params] Vindex params + * @property {string|null} [owner] Vindex owner + */ - /** - * ReplicationLocation shards. - * @member {Array.} shards - * @memberof vtctldata.Workflow.ReplicationLocation - * @instance - */ - ReplicationLocation.prototype.shards = $util.emptyArray; + /** + * Constructs a new Vindex. + * @memberof vschema + * @classdesc Represents a Vindex. + * @implements IVindex + * @constructor + * @param {vschema.IVindex=} [properties] Properties to set + */ + function Vindex(properties) { + this.params = {}; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } - /** - * Creates a new ReplicationLocation instance using the specified properties. - * @function create - * @memberof vtctldata.Workflow.ReplicationLocation - * @static - * @param {vtctldata.Workflow.IReplicationLocation=} [properties] Properties to set - * @returns {vtctldata.Workflow.ReplicationLocation} ReplicationLocation instance - */ - ReplicationLocation.create = function create(properties) { - return new ReplicationLocation(properties); - }; + /** + * Vindex type. + * @member {string} type + * @memberof vschema.Vindex + * @instance + */ + Vindex.prototype.type = ""; - /** - * Encodes the specified ReplicationLocation message. Does not implicitly {@link vtctldata.Workflow.ReplicationLocation.verify|verify} messages. - * @function encode - * @memberof vtctldata.Workflow.ReplicationLocation - * @static - * @param {vtctldata.Workflow.IReplicationLocation} message ReplicationLocation message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - ReplicationLocation.encode = function encode(message, writer) { - if (!writer) - writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.shards != null && message.shards.length) - for (let i = 0; i < message.shards.length; ++i) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.shards[i]); - return writer; - }; + /** + * Vindex params. + * @member {Object.} params + * @memberof vschema.Vindex + * @instance + */ + Vindex.prototype.params = $util.emptyObject; - /** - * Encodes the specified ReplicationLocation message, length delimited. Does not implicitly {@link vtctldata.Workflow.ReplicationLocation.verify|verify} messages. - * @function encodeDelimited - * @memberof vtctldata.Workflow.ReplicationLocation - * @static - * @param {vtctldata.Workflow.IReplicationLocation} message ReplicationLocation message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - ReplicationLocation.encodeDelimited = function encodeDelimited(message, writer) { - return this.encode(message, writer).ldelim(); - }; + /** + * Vindex owner. + * @member {string} owner + * @memberof vschema.Vindex + * @instance + */ + Vindex.prototype.owner = ""; - /** - * Decodes a ReplicationLocation message from the specified reader or buffer. - * @function decode - * @memberof vtctldata.Workflow.ReplicationLocation - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.Workflow.ReplicationLocation} ReplicationLocation - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - ReplicationLocation.decode = function decode(reader, length) { - if (!(reader instanceof $Reader)) - reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.Workflow.ReplicationLocation(); - while (reader.pos < end) { - let tag = reader.uint32(); - switch (tag >>> 3) { - case 1: { - message.keyspace = reader.string(); - break; - } - case 2: { - if (!(message.shards && message.shards.length)) - message.shards = []; - message.shards.push(reader.string()); - break; + /** + * Creates a new Vindex instance using the specified properties. + * @function create + * @memberof vschema.Vindex + * @static + * @param {vschema.IVindex=} [properties] Properties to set + * @returns {vschema.Vindex} Vindex instance + */ + Vindex.create = function create(properties) { + return new Vindex(properties); + }; + + /** + * Encodes the specified Vindex message. Does not implicitly {@link vschema.Vindex.verify|verify} messages. + * @function encode + * @memberof vschema.Vindex + * @static + * @param {vschema.IVindex} message Vindex message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Vindex.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.type != null && Object.hasOwnProperty.call(message, "type")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.type); + if (message.params != null && Object.hasOwnProperty.call(message, "params")) + for (let keys = Object.keys(message.params), i = 0; i < keys.length; ++i) + writer.uint32(/* id 2, wireType 2 =*/18).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 2 =*/18).string(message.params[keys[i]]).ldelim(); + if (message.owner != null && Object.hasOwnProperty.call(message, "owner")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.owner); + return writer; + }; + + /** + * Encodes the specified Vindex message, length delimited. Does not implicitly {@link vschema.Vindex.verify|verify} messages. + * @function encodeDelimited + * @memberof vschema.Vindex + * @static + * @param {vschema.IVindex} message Vindex message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Vindex.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a Vindex message from the specified reader or buffer. + * @function decode + * @memberof vschema.Vindex + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vschema.Vindex} Vindex + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Vindex.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vschema.Vindex(), key, value; + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.type = reader.string(); + break; + } + case 2: { + if (message.params === $util.emptyObject) + message.params = {}; + let end2 = reader.uint32() + reader.pos; + key = ""; + value = ""; + while (reader.pos < end2) { + let tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = reader.string(); + break; + default: + reader.skipType(tag2 & 7); + break; + } } - default: - reader.skipType(tag & 7); + message.params[key] = value; + break; + } + case 3: { + message.owner = reader.string(); break; } + default: + reader.skipType(tag & 7); + break; } - return message; - }; - - /** - * Decodes a ReplicationLocation message from the specified reader or buffer, length delimited. - * @function decodeDelimited - * @memberof vtctldata.Workflow.ReplicationLocation - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.Workflow.ReplicationLocation} ReplicationLocation - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - ReplicationLocation.decodeDelimited = function decodeDelimited(reader) { - if (!(reader instanceof $Reader)) - reader = new $Reader(reader); - return this.decode(reader, reader.uint32()); - }; + } + return message; + }; - /** - * Verifies a ReplicationLocation message. - * @function verify - * @memberof vtctldata.Workflow.ReplicationLocation - * @static - * @param {Object.} message Plain object to verify - * @returns {string|null} `null` if valid, otherwise the reason why it is not - */ - ReplicationLocation.verify = function verify(message) { - if (typeof message !== "object" || message === null) - return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - if (message.shards != null && message.hasOwnProperty("shards")) { - if (!Array.isArray(message.shards)) - return "shards: array expected"; - for (let i = 0; i < message.shards.length; ++i) - if (!$util.isString(message.shards[i])) - return "shards: string[] expected"; - } - return null; - }; + /** + * Decodes a Vindex message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vschema.Vindex + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vschema.Vindex} Vindex + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Vindex.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; - /** - * Creates a ReplicationLocation message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof vtctldata.Workflow.ReplicationLocation - * @static - * @param {Object.} object Plain object - * @returns {vtctldata.Workflow.ReplicationLocation} ReplicationLocation - */ - ReplicationLocation.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.Workflow.ReplicationLocation) - return object; - let message = new $root.vtctldata.Workflow.ReplicationLocation(); - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - if (object.shards) { - if (!Array.isArray(object.shards)) - throw TypeError(".vtctldata.Workflow.ReplicationLocation.shards: array expected"); - message.shards = []; - for (let i = 0; i < object.shards.length; ++i) - message.shards[i] = String(object.shards[i]); - } - return message; - }; + /** + * Verifies a Vindex message. + * @function verify + * @memberof vschema.Vindex + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + Vindex.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.type != null && message.hasOwnProperty("type")) + if (!$util.isString(message.type)) + return "type: string expected"; + if (message.params != null && message.hasOwnProperty("params")) { + if (!$util.isObject(message.params)) + return "params: object expected"; + let key = Object.keys(message.params); + for (let i = 0; i < key.length; ++i) + if (!$util.isString(message.params[key[i]])) + return "params: string{k:string} expected"; + } + if (message.owner != null && message.hasOwnProperty("owner")) + if (!$util.isString(message.owner)) + return "owner: string expected"; + return null; + }; - /** - * Creates a plain object from a ReplicationLocation message. Also converts values to other types if specified. - * @function toObject - * @memberof vtctldata.Workflow.ReplicationLocation - * @static - * @param {vtctldata.Workflow.ReplicationLocation} message ReplicationLocation - * @param {$protobuf.IConversionOptions} [options] Conversion options - * @returns {Object.} Plain object - */ - ReplicationLocation.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.arrays || options.defaults) - object.shards = []; - if (options.defaults) - object.keyspace = ""; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.shards && message.shards.length) { - object.shards = []; - for (let j = 0; j < message.shards.length; ++j) - object.shards[j] = message.shards[j]; - } + /** + * Creates a Vindex message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vschema.Vindex + * @static + * @param {Object.} object Plain object + * @returns {vschema.Vindex} Vindex + */ + Vindex.fromObject = function fromObject(object) { + if (object instanceof $root.vschema.Vindex) return object; - }; - - /** - * Converts this ReplicationLocation to JSON. - * @function toJSON - * @memberof vtctldata.Workflow.ReplicationLocation - * @instance - * @returns {Object.} JSON object - */ - ReplicationLocation.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; - - /** - * Gets the default type url for ReplicationLocation - * @function getTypeUrl - * @memberof vtctldata.Workflow.ReplicationLocation - * @static - * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns {string} The default type url - */ - ReplicationLocation.getTypeUrl = function getTypeUrl(typeUrlPrefix) { - if (typeUrlPrefix === undefined) { - typeUrlPrefix = "type.googleapis.com"; - } - return typeUrlPrefix + "/vtctldata.Workflow.ReplicationLocation"; - }; - - return ReplicationLocation; - })(); + let message = new $root.vschema.Vindex(); + if (object.type != null) + message.type = String(object.type); + if (object.params) { + if (typeof object.params !== "object") + throw TypeError(".vschema.Vindex.params: object expected"); + message.params = {}; + for (let keys = Object.keys(object.params), i = 0; i < keys.length; ++i) + message.params[keys[i]] = String(object.params[keys[i]]); + } + if (object.owner != null) + message.owner = String(object.owner); + return message; + }; - Workflow.ShardStream = (function() { + /** + * Creates a plain object from a Vindex message. Also converts values to other types if specified. + * @function toObject + * @memberof vschema.Vindex + * @static + * @param {vschema.Vindex} message Vindex + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + Vindex.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.objects || options.defaults) + object.params = {}; + if (options.defaults) { + object.type = ""; + object.owner = ""; + } + if (message.type != null && message.hasOwnProperty("type")) + object.type = message.type; + let keys2; + if (message.params && (keys2 = Object.keys(message.params)).length) { + object.params = {}; + for (let j = 0; j < keys2.length; ++j) + object.params[keys2[j]] = message.params[keys2[j]]; + } + if (message.owner != null && message.hasOwnProperty("owner")) + object.owner = message.owner; + return object; + }; - /** - * Properties of a ShardStream. - * @memberof vtctldata.Workflow - * @interface IShardStream - * @property {Array.|null} [streams] ShardStream streams - * @property {Array.|null} [tablet_controls] ShardStream tablet_controls - * @property {boolean|null} [is_primary_serving] ShardStream is_primary_serving - */ + /** + * Converts this Vindex to JSON. + * @function toJSON + * @memberof vschema.Vindex + * @instance + * @returns {Object.} JSON object + */ + Vindex.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; - /** - * Constructs a new ShardStream. - * @memberof vtctldata.Workflow - * @classdesc Represents a ShardStream. - * @implements IShardStream - * @constructor - * @param {vtctldata.Workflow.IShardStream=} [properties] Properties to set - */ - function ShardStream(properties) { - this.streams = []; - this.tablet_controls = []; - if (properties) - for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; + /** + * Gets the default type url for Vindex + * @function getTypeUrl + * @memberof vschema.Vindex + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + Vindex.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; } + return typeUrlPrefix + "/vschema.Vindex"; + }; - /** - * ShardStream streams. - * @member {Array.} streams - * @memberof vtctldata.Workflow.ShardStream - * @instance - */ - ShardStream.prototype.streams = $util.emptyArray; + return Vindex; + })(); - /** - * ShardStream tablet_controls. - * @member {Array.} tablet_controls - * @memberof vtctldata.Workflow.ShardStream - * @instance - */ - ShardStream.prototype.tablet_controls = $util.emptyArray; + vschema.Table = (function() { - /** - * ShardStream is_primary_serving. - * @member {boolean} is_primary_serving - * @memberof vtctldata.Workflow.ShardStream - * @instance - */ - ShardStream.prototype.is_primary_serving = false; + /** + * Properties of a Table. + * @memberof vschema + * @interface ITable + * @property {string|null} [type] Table type + * @property {Array.|null} [column_vindexes] Table column_vindexes + * @property {vschema.IAutoIncrement|null} [auto_increment] Table auto_increment + * @property {Array.|null} [columns] Table columns + * @property {string|null} [pinned] Table pinned + * @property {boolean|null} [column_list_authoritative] Table column_list_authoritative + * @property {string|null} [source] Table source + */ - /** - * Creates a new ShardStream instance using the specified properties. - * @function create - * @memberof vtctldata.Workflow.ShardStream - * @static - * @param {vtctldata.Workflow.IShardStream=} [properties] Properties to set - * @returns {vtctldata.Workflow.ShardStream} ShardStream instance - */ - ShardStream.create = function create(properties) { - return new ShardStream(properties); - }; + /** + * Constructs a new Table. + * @memberof vschema + * @classdesc Represents a Table. + * @implements ITable + * @constructor + * @param {vschema.ITable=} [properties] Properties to set + */ + function Table(properties) { + this.column_vindexes = []; + this.columns = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } - /** - * Encodes the specified ShardStream message. Does not implicitly {@link vtctldata.Workflow.ShardStream.verify|verify} messages. - * @function encode - * @memberof vtctldata.Workflow.ShardStream - * @static - * @param {vtctldata.Workflow.IShardStream} message ShardStream message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - ShardStream.encode = function encode(message, writer) { - if (!writer) - writer = $Writer.create(); - if (message.streams != null && message.streams.length) - for (let i = 0; i < message.streams.length; ++i) - $root.vtctldata.Workflow.Stream.encode(message.streams[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.tablet_controls != null && message.tablet_controls.length) - for (let i = 0; i < message.tablet_controls.length; ++i) - $root.topodata.Shard.TabletControl.encode(message.tablet_controls[i], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.is_primary_serving != null && Object.hasOwnProperty.call(message, "is_primary_serving")) - writer.uint32(/* id 3, wireType 0 =*/24).bool(message.is_primary_serving); - return writer; - }; + /** + * Table type. + * @member {string} type + * @memberof vschema.Table + * @instance + */ + Table.prototype.type = ""; - /** - * Encodes the specified ShardStream message, length delimited. Does not implicitly {@link vtctldata.Workflow.ShardStream.verify|verify} messages. - * @function encodeDelimited - * @memberof vtctldata.Workflow.ShardStream - * @static - * @param {vtctldata.Workflow.IShardStream} message ShardStream message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - ShardStream.encodeDelimited = function encodeDelimited(message, writer) { - return this.encode(message, writer).ldelim(); - }; + /** + * Table column_vindexes. + * @member {Array.} column_vindexes + * @memberof vschema.Table + * @instance + */ + Table.prototype.column_vindexes = $util.emptyArray; - /** - * Decodes a ShardStream message from the specified reader or buffer. - * @function decode - * @memberof vtctldata.Workflow.ShardStream - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.Workflow.ShardStream} ShardStream - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - ShardStream.decode = function decode(reader, length) { - if (!(reader instanceof $Reader)) - reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.Workflow.ShardStream(); - while (reader.pos < end) { - let tag = reader.uint32(); - switch (tag >>> 3) { - case 1: { - if (!(message.streams && message.streams.length)) - message.streams = []; - message.streams.push($root.vtctldata.Workflow.Stream.decode(reader, reader.uint32())); - break; - } - case 2: { - if (!(message.tablet_controls && message.tablet_controls.length)) - message.tablet_controls = []; - message.tablet_controls.push($root.topodata.Shard.TabletControl.decode(reader, reader.uint32())); - break; - } - case 3: { - message.is_primary_serving = reader.bool(); - break; - } - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }; + /** + * Table auto_increment. + * @member {vschema.IAutoIncrement|null|undefined} auto_increment + * @memberof vschema.Table + * @instance + */ + Table.prototype.auto_increment = null; - /** - * Decodes a ShardStream message from the specified reader or buffer, length delimited. - * @function decodeDelimited - * @memberof vtctldata.Workflow.ShardStream - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.Workflow.ShardStream} ShardStream - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - ShardStream.decodeDelimited = function decodeDelimited(reader) { - if (!(reader instanceof $Reader)) - reader = new $Reader(reader); - return this.decode(reader, reader.uint32()); - }; + /** + * Table columns. + * @member {Array.} columns + * @memberof vschema.Table + * @instance + */ + Table.prototype.columns = $util.emptyArray; - /** - * Verifies a ShardStream message. - * @function verify - * @memberof vtctldata.Workflow.ShardStream - * @static - * @param {Object.} message Plain object to verify - * @returns {string|null} `null` if valid, otherwise the reason why it is not - */ - ShardStream.verify = function verify(message) { - if (typeof message !== "object" || message === null) - return "object expected"; - if (message.streams != null && message.hasOwnProperty("streams")) { - if (!Array.isArray(message.streams)) - return "streams: array expected"; - for (let i = 0; i < message.streams.length; ++i) { - let error = $root.vtctldata.Workflow.Stream.verify(message.streams[i]); - if (error) - return "streams." + error; + /** + * Table pinned. + * @member {string} pinned + * @memberof vschema.Table + * @instance + */ + Table.prototype.pinned = ""; + + /** + * Table column_list_authoritative. + * @member {boolean} column_list_authoritative + * @memberof vschema.Table + * @instance + */ + Table.prototype.column_list_authoritative = false; + + /** + * Table source. + * @member {string} source + * @memberof vschema.Table + * @instance + */ + Table.prototype.source = ""; + + /** + * Creates a new Table instance using the specified properties. + * @function create + * @memberof vschema.Table + * @static + * @param {vschema.ITable=} [properties] Properties to set + * @returns {vschema.Table} Table instance + */ + Table.create = function create(properties) { + return new Table(properties); + }; + + /** + * Encodes the specified Table message. Does not implicitly {@link vschema.Table.verify|verify} messages. + * @function encode + * @memberof vschema.Table + * @static + * @param {vschema.ITable} message Table message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Table.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.type != null && Object.hasOwnProperty.call(message, "type")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.type); + if (message.column_vindexes != null && message.column_vindexes.length) + for (let i = 0; i < message.column_vindexes.length; ++i) + $root.vschema.ColumnVindex.encode(message.column_vindexes[i], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.auto_increment != null && Object.hasOwnProperty.call(message, "auto_increment")) + $root.vschema.AutoIncrement.encode(message.auto_increment, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.columns != null && message.columns.length) + for (let i = 0; i < message.columns.length; ++i) + $root.vschema.Column.encode(message.columns[i], writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.pinned != null && Object.hasOwnProperty.call(message, "pinned")) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.pinned); + if (message.column_list_authoritative != null && Object.hasOwnProperty.call(message, "column_list_authoritative")) + writer.uint32(/* id 6, wireType 0 =*/48).bool(message.column_list_authoritative); + if (message.source != null && Object.hasOwnProperty.call(message, "source")) + writer.uint32(/* id 7, wireType 2 =*/58).string(message.source); + return writer; + }; + + /** + * Encodes the specified Table message, length delimited. Does not implicitly {@link vschema.Table.verify|verify} messages. + * @function encodeDelimited + * @memberof vschema.Table + * @static + * @param {vschema.ITable} message Table message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Table.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a Table message from the specified reader or buffer. + * @function decode + * @memberof vschema.Table + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vschema.Table} Table + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Table.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vschema.Table(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.type = reader.string(); + break; } - } - if (message.tablet_controls != null && message.hasOwnProperty("tablet_controls")) { - if (!Array.isArray(message.tablet_controls)) - return "tablet_controls: array expected"; - for (let i = 0; i < message.tablet_controls.length; ++i) { - let error = $root.topodata.Shard.TabletControl.verify(message.tablet_controls[i]); - if (error) - return "tablet_controls." + error; + case 2: { + if (!(message.column_vindexes && message.column_vindexes.length)) + message.column_vindexes = []; + message.column_vindexes.push($root.vschema.ColumnVindex.decode(reader, reader.uint32())); + break; } - } - if (message.is_primary_serving != null && message.hasOwnProperty("is_primary_serving")) - if (typeof message.is_primary_serving !== "boolean") - return "is_primary_serving: boolean expected"; - return null; - }; - - /** - * Creates a ShardStream message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof vtctldata.Workflow.ShardStream - * @static - * @param {Object.} object Plain object - * @returns {vtctldata.Workflow.ShardStream} ShardStream - */ - ShardStream.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.Workflow.ShardStream) - return object; - let message = new $root.vtctldata.Workflow.ShardStream(); - if (object.streams) { - if (!Array.isArray(object.streams)) - throw TypeError(".vtctldata.Workflow.ShardStream.streams: array expected"); - message.streams = []; - for (let i = 0; i < object.streams.length; ++i) { - if (typeof object.streams[i] !== "object") - throw TypeError(".vtctldata.Workflow.ShardStream.streams: object expected"); - message.streams[i] = $root.vtctldata.Workflow.Stream.fromObject(object.streams[i]); + case 3: { + message.auto_increment = $root.vschema.AutoIncrement.decode(reader, reader.uint32()); + break; } - } - if (object.tablet_controls) { - if (!Array.isArray(object.tablet_controls)) - throw TypeError(".vtctldata.Workflow.ShardStream.tablet_controls: array expected"); - message.tablet_controls = []; - for (let i = 0; i < object.tablet_controls.length; ++i) { - if (typeof object.tablet_controls[i] !== "object") - throw TypeError(".vtctldata.Workflow.ShardStream.tablet_controls: object expected"); - message.tablet_controls[i] = $root.topodata.Shard.TabletControl.fromObject(object.tablet_controls[i]); + case 4: { + if (!(message.columns && message.columns.length)) + message.columns = []; + message.columns.push($root.vschema.Column.decode(reader, reader.uint32())); + break; + } + case 5: { + message.pinned = reader.string(); + break; + } + case 6: { + message.column_list_authoritative = reader.bool(); + break; + } + case 7: { + message.source = reader.string(); + break; } + default: + reader.skipType(tag & 7); + break; } - if (object.is_primary_serving != null) - message.is_primary_serving = Boolean(object.is_primary_serving); - return message; - }; + } + return message; + }; - /** - * Creates a plain object from a ShardStream message. Also converts values to other types if specified. - * @function toObject - * @memberof vtctldata.Workflow.ShardStream - * @static - * @param {vtctldata.Workflow.ShardStream} message ShardStream - * @param {$protobuf.IConversionOptions} [options] Conversion options - * @returns {Object.} Plain object - */ - ShardStream.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.arrays || options.defaults) { - object.streams = []; - object.tablet_controls = []; - } - if (options.defaults) - object.is_primary_serving = false; - if (message.streams && message.streams.length) { - object.streams = []; - for (let j = 0; j < message.streams.length; ++j) - object.streams[j] = $root.vtctldata.Workflow.Stream.toObject(message.streams[j], options); + /** + * Decodes a Table message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vschema.Table + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vschema.Table} Table + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Table.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a Table message. + * @function verify + * @memberof vschema.Table + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + Table.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.type != null && message.hasOwnProperty("type")) + if (!$util.isString(message.type)) + return "type: string expected"; + if (message.column_vindexes != null && message.hasOwnProperty("column_vindexes")) { + if (!Array.isArray(message.column_vindexes)) + return "column_vindexes: array expected"; + for (let i = 0; i < message.column_vindexes.length; ++i) { + let error = $root.vschema.ColumnVindex.verify(message.column_vindexes[i]); + if (error) + return "column_vindexes." + error; } - if (message.tablet_controls && message.tablet_controls.length) { - object.tablet_controls = []; - for (let j = 0; j < message.tablet_controls.length; ++j) - object.tablet_controls[j] = $root.topodata.Shard.TabletControl.toObject(message.tablet_controls[j], options); + } + if (message.auto_increment != null && message.hasOwnProperty("auto_increment")) { + let error = $root.vschema.AutoIncrement.verify(message.auto_increment); + if (error) + return "auto_increment." + error; + } + if (message.columns != null && message.hasOwnProperty("columns")) { + if (!Array.isArray(message.columns)) + return "columns: array expected"; + for (let i = 0; i < message.columns.length; ++i) { + let error = $root.vschema.Column.verify(message.columns[i]); + if (error) + return "columns." + error; } - if (message.is_primary_serving != null && message.hasOwnProperty("is_primary_serving")) - object.is_primary_serving = message.is_primary_serving; - return object; - }; - - /** - * Converts this ShardStream to JSON. - * @function toJSON - * @memberof vtctldata.Workflow.ShardStream - * @instance - * @returns {Object.} JSON object - */ - ShardStream.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; + } + if (message.pinned != null && message.hasOwnProperty("pinned")) + if (!$util.isString(message.pinned)) + return "pinned: string expected"; + if (message.column_list_authoritative != null && message.hasOwnProperty("column_list_authoritative")) + if (typeof message.column_list_authoritative !== "boolean") + return "column_list_authoritative: boolean expected"; + if (message.source != null && message.hasOwnProperty("source")) + if (!$util.isString(message.source)) + return "source: string expected"; + return null; + }; - /** - * Gets the default type url for ShardStream - * @function getTypeUrl - * @memberof vtctldata.Workflow.ShardStream - * @static - * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns {string} The default type url - */ - ShardStream.getTypeUrl = function getTypeUrl(typeUrlPrefix) { - if (typeUrlPrefix === undefined) { - typeUrlPrefix = "type.googleapis.com"; + /** + * Creates a Table message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vschema.Table + * @static + * @param {Object.} object Plain object + * @returns {vschema.Table} Table + */ + Table.fromObject = function fromObject(object) { + if (object instanceof $root.vschema.Table) + return object; + let message = new $root.vschema.Table(); + if (object.type != null) + message.type = String(object.type); + if (object.column_vindexes) { + if (!Array.isArray(object.column_vindexes)) + throw TypeError(".vschema.Table.column_vindexes: array expected"); + message.column_vindexes = []; + for (let i = 0; i < object.column_vindexes.length; ++i) { + if (typeof object.column_vindexes[i] !== "object") + throw TypeError(".vschema.Table.column_vindexes: object expected"); + message.column_vindexes[i] = $root.vschema.ColumnVindex.fromObject(object.column_vindexes[i]); } - return typeUrlPrefix + "/vtctldata.Workflow.ShardStream"; - }; - - return ShardStream; - })(); - - Workflow.Stream = (function() { - - /** - * Properties of a Stream. - * @memberof vtctldata.Workflow - * @interface IStream - * @property {number|Long|null} [id] Stream id - * @property {string|null} [shard] Stream shard - * @property {topodata.ITabletAlias|null} [tablet] Stream tablet - * @property {binlogdata.IBinlogSource|null} [binlog_source] Stream binlog_source - * @property {string|null} [position] Stream position - * @property {string|null} [stop_position] Stream stop_position - * @property {string|null} [state] Stream state - * @property {string|null} [db_name] Stream db_name - * @property {vttime.ITime|null} [transaction_timestamp] Stream transaction_timestamp - * @property {vttime.ITime|null} [time_updated] Stream time_updated - * @property {string|null} [message] Stream message - * @property {Array.|null} [copy_states] Stream copy_states - * @property {Array.|null} [logs] Stream logs - * @property {string|null} [log_fetch_error] Stream log_fetch_error - * @property {Array.|null} [tags] Stream tags - */ + } + if (object.auto_increment != null) { + if (typeof object.auto_increment !== "object") + throw TypeError(".vschema.Table.auto_increment: object expected"); + message.auto_increment = $root.vschema.AutoIncrement.fromObject(object.auto_increment); + } + if (object.columns) { + if (!Array.isArray(object.columns)) + throw TypeError(".vschema.Table.columns: array expected"); + message.columns = []; + for (let i = 0; i < object.columns.length; ++i) { + if (typeof object.columns[i] !== "object") + throw TypeError(".vschema.Table.columns: object expected"); + message.columns[i] = $root.vschema.Column.fromObject(object.columns[i]); + } + } + if (object.pinned != null) + message.pinned = String(object.pinned); + if (object.column_list_authoritative != null) + message.column_list_authoritative = Boolean(object.column_list_authoritative); + if (object.source != null) + message.source = String(object.source); + return message; + }; - /** - * Constructs a new Stream. - * @memberof vtctldata.Workflow - * @classdesc Represents a Stream. - * @implements IStream - * @constructor - * @param {vtctldata.Workflow.IStream=} [properties] Properties to set - */ - function Stream(properties) { - this.copy_states = []; - this.logs = []; - this.tags = []; - if (properties) - for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; + /** + * Creates a plain object from a Table message. Also converts values to other types if specified. + * @function toObject + * @memberof vschema.Table + * @static + * @param {vschema.Table} message Table + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + Table.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) { + object.column_vindexes = []; + object.columns = []; + } + if (options.defaults) { + object.type = ""; + object.auto_increment = null; + object.pinned = ""; + object.column_list_authoritative = false; + object.source = ""; + } + if (message.type != null && message.hasOwnProperty("type")) + object.type = message.type; + if (message.column_vindexes && message.column_vindexes.length) { + object.column_vindexes = []; + for (let j = 0; j < message.column_vindexes.length; ++j) + object.column_vindexes[j] = $root.vschema.ColumnVindex.toObject(message.column_vindexes[j], options); + } + if (message.auto_increment != null && message.hasOwnProperty("auto_increment")) + object.auto_increment = $root.vschema.AutoIncrement.toObject(message.auto_increment, options); + if (message.columns && message.columns.length) { + object.columns = []; + for (let j = 0; j < message.columns.length; ++j) + object.columns[j] = $root.vschema.Column.toObject(message.columns[j], options); } + if (message.pinned != null && message.hasOwnProperty("pinned")) + object.pinned = message.pinned; + if (message.column_list_authoritative != null && message.hasOwnProperty("column_list_authoritative")) + object.column_list_authoritative = message.column_list_authoritative; + if (message.source != null && message.hasOwnProperty("source")) + object.source = message.source; + return object; + }; - /** - * Stream id. - * @member {number|Long} id - * @memberof vtctldata.Workflow.Stream - * @instance - */ - Stream.prototype.id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + /** + * Converts this Table to JSON. + * @function toJSON + * @memberof vschema.Table + * @instance + * @returns {Object.} JSON object + */ + Table.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; - /** - * Stream shard. - * @member {string} shard - * @memberof vtctldata.Workflow.Stream - * @instance - */ - Stream.prototype.shard = ""; + /** + * Gets the default type url for Table + * @function getTypeUrl + * @memberof vschema.Table + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + Table.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vschema.Table"; + }; - /** - * Stream tablet. - * @member {topodata.ITabletAlias|null|undefined} tablet - * @memberof vtctldata.Workflow.Stream - * @instance - */ - Stream.prototype.tablet = null; + return Table; + })(); - /** - * Stream binlog_source. - * @member {binlogdata.IBinlogSource|null|undefined} binlog_source - * @memberof vtctldata.Workflow.Stream - * @instance - */ - Stream.prototype.binlog_source = null; + vschema.ColumnVindex = (function() { - /** - * Stream position. - * @member {string} position - * @memberof vtctldata.Workflow.Stream - * @instance - */ - Stream.prototype.position = ""; + /** + * Properties of a ColumnVindex. + * @memberof vschema + * @interface IColumnVindex + * @property {string|null} [column] ColumnVindex column + * @property {string|null} [name] ColumnVindex name + * @property {Array.|null} [columns] ColumnVindex columns + */ - /** - * Stream stop_position. - * @member {string} stop_position - * @memberof vtctldata.Workflow.Stream - * @instance - */ - Stream.prototype.stop_position = ""; + /** + * Constructs a new ColumnVindex. + * @memberof vschema + * @classdesc Represents a ColumnVindex. + * @implements IColumnVindex + * @constructor + * @param {vschema.IColumnVindex=} [properties] Properties to set + */ + function ColumnVindex(properties) { + this.columns = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } - /** - * Stream state. - * @member {string} state - * @memberof vtctldata.Workflow.Stream - * @instance - */ - Stream.prototype.state = ""; + /** + * ColumnVindex column. + * @member {string} column + * @memberof vschema.ColumnVindex + * @instance + */ + ColumnVindex.prototype.column = ""; - /** - * Stream db_name. - * @member {string} db_name - * @memberof vtctldata.Workflow.Stream - * @instance - */ - Stream.prototype.db_name = ""; + /** + * ColumnVindex name. + * @member {string} name + * @memberof vschema.ColumnVindex + * @instance + */ + ColumnVindex.prototype.name = ""; - /** - * Stream transaction_timestamp. - * @member {vttime.ITime|null|undefined} transaction_timestamp - * @memberof vtctldata.Workflow.Stream - * @instance - */ - Stream.prototype.transaction_timestamp = null; + /** + * ColumnVindex columns. + * @member {Array.} columns + * @memberof vschema.ColumnVindex + * @instance + */ + ColumnVindex.prototype.columns = $util.emptyArray; - /** - * Stream time_updated. - * @member {vttime.ITime|null|undefined} time_updated - * @memberof vtctldata.Workflow.Stream - * @instance - */ - Stream.prototype.time_updated = null; + /** + * Creates a new ColumnVindex instance using the specified properties. + * @function create + * @memberof vschema.ColumnVindex + * @static + * @param {vschema.IColumnVindex=} [properties] Properties to set + * @returns {vschema.ColumnVindex} ColumnVindex instance + */ + ColumnVindex.create = function create(properties) { + return new ColumnVindex(properties); + }; - /** - * Stream message. - * @member {string} message - * @memberof vtctldata.Workflow.Stream - * @instance - */ - Stream.prototype.message = ""; + /** + * Encodes the specified ColumnVindex message. Does not implicitly {@link vschema.ColumnVindex.verify|verify} messages. + * @function encode + * @memberof vschema.ColumnVindex + * @static + * @param {vschema.IColumnVindex} message ColumnVindex message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ColumnVindex.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.column != null && Object.hasOwnProperty.call(message, "column")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.column); + if (message.name != null && Object.hasOwnProperty.call(message, "name")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.name); + if (message.columns != null && message.columns.length) + for (let i = 0; i < message.columns.length; ++i) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.columns[i]); + return writer; + }; - /** - * Stream copy_states. - * @member {Array.} copy_states - * @memberof vtctldata.Workflow.Stream - * @instance - */ - Stream.prototype.copy_states = $util.emptyArray; + /** + * Encodes the specified ColumnVindex message, length delimited. Does not implicitly {@link vschema.ColumnVindex.verify|verify} messages. + * @function encodeDelimited + * @memberof vschema.ColumnVindex + * @static + * @param {vschema.IColumnVindex} message ColumnVindex message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ColumnVindex.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; - /** - * Stream logs. - * @member {Array.} logs - * @memberof vtctldata.Workflow.Stream - * @instance - */ - Stream.prototype.logs = $util.emptyArray; + /** + * Decodes a ColumnVindex message from the specified reader or buffer. + * @function decode + * @memberof vschema.ColumnVindex + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vschema.ColumnVindex} ColumnVindex + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ColumnVindex.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vschema.ColumnVindex(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.column = reader.string(); + break; + } + case 2: { + message.name = reader.string(); + break; + } + case 3: { + if (!(message.columns && message.columns.length)) + message.columns = []; + message.columns.push(reader.string()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; - /** - * Stream log_fetch_error. - * @member {string} log_fetch_error - * @memberof vtctldata.Workflow.Stream - * @instance - */ - Stream.prototype.log_fetch_error = ""; + /** + * Decodes a ColumnVindex message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vschema.ColumnVindex + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vschema.ColumnVindex} ColumnVindex + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ColumnVindex.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; - /** - * Stream tags. - * @member {Array.} tags - * @memberof vtctldata.Workflow.Stream - * @instance - */ - Stream.prototype.tags = $util.emptyArray; + /** + * Verifies a ColumnVindex message. + * @function verify + * @memberof vschema.ColumnVindex + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ColumnVindex.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.column != null && message.hasOwnProperty("column")) + if (!$util.isString(message.column)) + return "column: string expected"; + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; + if (message.columns != null && message.hasOwnProperty("columns")) { + if (!Array.isArray(message.columns)) + return "columns: array expected"; + for (let i = 0; i < message.columns.length; ++i) + if (!$util.isString(message.columns[i])) + return "columns: string[] expected"; + } + return null; + }; - /** - * Creates a new Stream instance using the specified properties. - * @function create - * @memberof vtctldata.Workflow.Stream - * @static - * @param {vtctldata.Workflow.IStream=} [properties] Properties to set - * @returns {vtctldata.Workflow.Stream} Stream instance - */ - Stream.create = function create(properties) { - return new Stream(properties); - }; + /** + * Creates a ColumnVindex message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vschema.ColumnVindex + * @static + * @param {Object.} object Plain object + * @returns {vschema.ColumnVindex} ColumnVindex + */ + ColumnVindex.fromObject = function fromObject(object) { + if (object instanceof $root.vschema.ColumnVindex) + return object; + let message = new $root.vschema.ColumnVindex(); + if (object.column != null) + message.column = String(object.column); + if (object.name != null) + message.name = String(object.name); + if (object.columns) { + if (!Array.isArray(object.columns)) + throw TypeError(".vschema.ColumnVindex.columns: array expected"); + message.columns = []; + for (let i = 0; i < object.columns.length; ++i) + message.columns[i] = String(object.columns[i]); + } + return message; + }; - /** - * Encodes the specified Stream message. Does not implicitly {@link vtctldata.Workflow.Stream.verify|verify} messages. - * @function encode - * @memberof vtctldata.Workflow.Stream - * @static - * @param {vtctldata.Workflow.IStream} message Stream message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - Stream.encode = function encode(message, writer) { - if (!writer) - writer = $Writer.create(); - if (message.id != null && Object.hasOwnProperty.call(message, "id")) - writer.uint32(/* id 1, wireType 0 =*/8).int64(message.id); - if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); - if (message.tablet != null && Object.hasOwnProperty.call(message, "tablet")) - $root.topodata.TabletAlias.encode(message.tablet, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.binlog_source != null && Object.hasOwnProperty.call(message, "binlog_source")) - $root.binlogdata.BinlogSource.encode(message.binlog_source, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); - if (message.position != null && Object.hasOwnProperty.call(message, "position")) - writer.uint32(/* id 5, wireType 2 =*/42).string(message.position); - if (message.stop_position != null && Object.hasOwnProperty.call(message, "stop_position")) - writer.uint32(/* id 6, wireType 2 =*/50).string(message.stop_position); - if (message.state != null && Object.hasOwnProperty.call(message, "state")) - writer.uint32(/* id 7, wireType 2 =*/58).string(message.state); - if (message.db_name != null && Object.hasOwnProperty.call(message, "db_name")) - writer.uint32(/* id 8, wireType 2 =*/66).string(message.db_name); - if (message.transaction_timestamp != null && Object.hasOwnProperty.call(message, "transaction_timestamp")) - $root.vttime.Time.encode(message.transaction_timestamp, writer.uint32(/* id 9, wireType 2 =*/74).fork()).ldelim(); - if (message.time_updated != null && Object.hasOwnProperty.call(message, "time_updated")) - $root.vttime.Time.encode(message.time_updated, writer.uint32(/* id 10, wireType 2 =*/82).fork()).ldelim(); - if (message.message != null && Object.hasOwnProperty.call(message, "message")) - writer.uint32(/* id 11, wireType 2 =*/90).string(message.message); - if (message.copy_states != null && message.copy_states.length) - for (let i = 0; i < message.copy_states.length; ++i) - $root.vtctldata.Workflow.Stream.CopyState.encode(message.copy_states[i], writer.uint32(/* id 12, wireType 2 =*/98).fork()).ldelim(); - if (message.logs != null && message.logs.length) - for (let i = 0; i < message.logs.length; ++i) - $root.vtctldata.Workflow.Stream.Log.encode(message.logs[i], writer.uint32(/* id 13, wireType 2 =*/106).fork()).ldelim(); - if (message.log_fetch_error != null && Object.hasOwnProperty.call(message, "log_fetch_error")) - writer.uint32(/* id 14, wireType 2 =*/114).string(message.log_fetch_error); - if (message.tags != null && message.tags.length) - for (let i = 0; i < message.tags.length; ++i) - writer.uint32(/* id 15, wireType 2 =*/122).string(message.tags[i]); - return writer; - }; + /** + * Creates a plain object from a ColumnVindex message. Also converts values to other types if specified. + * @function toObject + * @memberof vschema.ColumnVindex + * @static + * @param {vschema.ColumnVindex} message ColumnVindex + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ColumnVindex.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.columns = []; + if (options.defaults) { + object.column = ""; + object.name = ""; + } + if (message.column != null && message.hasOwnProperty("column")) + object.column = message.column; + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; + if (message.columns && message.columns.length) { + object.columns = []; + for (let j = 0; j < message.columns.length; ++j) + object.columns[j] = message.columns[j]; + } + return object; + }; - /** + /** + * Converts this ColumnVindex to JSON. + * @function toJSON + * @memberof vschema.ColumnVindex + * @instance + * @returns {Object.} JSON object + */ + ColumnVindex.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ColumnVindex + * @function getTypeUrl + * @memberof vschema.ColumnVindex + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ColumnVindex.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vschema.ColumnVindex"; + }; + + return ColumnVindex; + })(); + + vschema.AutoIncrement = (function() { + + /** + * Properties of an AutoIncrement. + * @memberof vschema + * @interface IAutoIncrement + * @property {string|null} [column] AutoIncrement column + * @property {string|null} [sequence] AutoIncrement sequence + */ + + /** + * Constructs a new AutoIncrement. + * @memberof vschema + * @classdesc Represents an AutoIncrement. + * @implements IAutoIncrement + * @constructor + * @param {vschema.IAutoIncrement=} [properties] Properties to set + */ + function AutoIncrement(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * AutoIncrement column. + * @member {string} column + * @memberof vschema.AutoIncrement + * @instance + */ + AutoIncrement.prototype.column = ""; + + /** + * AutoIncrement sequence. + * @member {string} sequence + * @memberof vschema.AutoIncrement + * @instance + */ + AutoIncrement.prototype.sequence = ""; + + /** + * Creates a new AutoIncrement instance using the specified properties. + * @function create + * @memberof vschema.AutoIncrement + * @static + * @param {vschema.IAutoIncrement=} [properties] Properties to set + * @returns {vschema.AutoIncrement} AutoIncrement instance + */ + AutoIncrement.create = function create(properties) { + return new AutoIncrement(properties); + }; + + /** + * Encodes the specified AutoIncrement message. Does not implicitly {@link vschema.AutoIncrement.verify|verify} messages. + * @function encode + * @memberof vschema.AutoIncrement + * @static + * @param {vschema.IAutoIncrement} message AutoIncrement message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + AutoIncrement.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.column != null && Object.hasOwnProperty.call(message, "column")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.column); + if (message.sequence != null && Object.hasOwnProperty.call(message, "sequence")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.sequence); + return writer; + }; + + /** + * Encodes the specified AutoIncrement message, length delimited. Does not implicitly {@link vschema.AutoIncrement.verify|verify} messages. + * @function encodeDelimited + * @memberof vschema.AutoIncrement + * @static + * @param {vschema.IAutoIncrement} message AutoIncrement message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + AutoIncrement.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes an AutoIncrement message from the specified reader or buffer. + * @function decode + * @memberof vschema.AutoIncrement + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vschema.AutoIncrement} AutoIncrement + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + AutoIncrement.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vschema.AutoIncrement(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.column = reader.string(); + break; + } + case 2: { + message.sequence = reader.string(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes an AutoIncrement message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vschema.AutoIncrement + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vschema.AutoIncrement} AutoIncrement + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + AutoIncrement.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies an AutoIncrement message. + * @function verify + * @memberof vschema.AutoIncrement + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + AutoIncrement.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.column != null && message.hasOwnProperty("column")) + if (!$util.isString(message.column)) + return "column: string expected"; + if (message.sequence != null && message.hasOwnProperty("sequence")) + if (!$util.isString(message.sequence)) + return "sequence: string expected"; + return null; + }; + + /** + * Creates an AutoIncrement message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vschema.AutoIncrement + * @static + * @param {Object.} object Plain object + * @returns {vschema.AutoIncrement} AutoIncrement + */ + AutoIncrement.fromObject = function fromObject(object) { + if (object instanceof $root.vschema.AutoIncrement) + return object; + let message = new $root.vschema.AutoIncrement(); + if (object.column != null) + message.column = String(object.column); + if (object.sequence != null) + message.sequence = String(object.sequence); + return message; + }; + + /** + * Creates a plain object from an AutoIncrement message. Also converts values to other types if specified. + * @function toObject + * @memberof vschema.AutoIncrement + * @static + * @param {vschema.AutoIncrement} message AutoIncrement + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + AutoIncrement.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.column = ""; + object.sequence = ""; + } + if (message.column != null && message.hasOwnProperty("column")) + object.column = message.column; + if (message.sequence != null && message.hasOwnProperty("sequence")) + object.sequence = message.sequence; + return object; + }; + + /** + * Converts this AutoIncrement to JSON. + * @function toJSON + * @memberof vschema.AutoIncrement + * @instance + * @returns {Object.} JSON object + */ + AutoIncrement.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for AutoIncrement + * @function getTypeUrl + * @memberof vschema.AutoIncrement + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + AutoIncrement.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vschema.AutoIncrement"; + }; + + return AutoIncrement; + })(); + + vschema.Column = (function() { + + /** + * Properties of a Column. + * @memberof vschema + * @interface IColumn + * @property {string|null} [name] Column name + * @property {query.Type|null} [type] Column type + */ + + /** + * Constructs a new Column. + * @memberof vschema + * @classdesc Represents a Column. + * @implements IColumn + * @constructor + * @param {vschema.IColumn=} [properties] Properties to set + */ + function Column(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * Column name. + * @member {string} name + * @memberof vschema.Column + * @instance + */ + Column.prototype.name = ""; + + /** + * Column type. + * @member {query.Type} type + * @memberof vschema.Column + * @instance + */ + Column.prototype.type = 0; + + /** + * Creates a new Column instance using the specified properties. + * @function create + * @memberof vschema.Column + * @static + * @param {vschema.IColumn=} [properties] Properties to set + * @returns {vschema.Column} Column instance + */ + Column.create = function create(properties) { + return new Column(properties); + }; + + /** + * Encodes the specified Column message. Does not implicitly {@link vschema.Column.verify|verify} messages. + * @function encode + * @memberof vschema.Column + * @static + * @param {vschema.IColumn} message Column message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Column.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.name != null && Object.hasOwnProperty.call(message, "name")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); + if (message.type != null && Object.hasOwnProperty.call(message, "type")) + writer.uint32(/* id 2, wireType 0 =*/16).int32(message.type); + return writer; + }; + + /** + * Encodes the specified Column message, length delimited. Does not implicitly {@link vschema.Column.verify|verify} messages. + * @function encodeDelimited + * @memberof vschema.Column + * @static + * @param {vschema.IColumn} message Column message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Column.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a Column message from the specified reader or buffer. + * @function decode + * @memberof vschema.Column + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vschema.Column} Column + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Column.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vschema.Column(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.name = reader.string(); + break; + } + case 2: { + message.type = reader.int32(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a Column message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vschema.Column + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vschema.Column} Column + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Column.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a Column message. + * @function verify + * @memberof vschema.Column + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + Column.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; + if (message.type != null && message.hasOwnProperty("type")) + switch (message.type) { + default: + return "type: enum value expected"; + case 0: + case 257: + case 770: + case 259: + case 772: + case 261: + case 774: + case 263: + case 776: + case 265: + case 778: + case 1035: + case 1036: + case 2061: + case 2062: + case 2063: + case 2064: + case 785: + case 18: + case 6163: + case 10260: + case 6165: + case 10262: + case 6167: + case 10264: + case 2073: + case 2074: + case 2075: + case 28: + case 2077: + case 2078: + case 31: + case 4128: + case 4129: + case 4130: + break; + } + return null; + }; + + /** + * Creates a Column message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vschema.Column + * @static + * @param {Object.} object Plain object + * @returns {vschema.Column} Column + */ + Column.fromObject = function fromObject(object) { + if (object instanceof $root.vschema.Column) + return object; + let message = new $root.vschema.Column(); + if (object.name != null) + message.name = String(object.name); + switch (object.type) { + default: + if (typeof object.type === "number") { + message.type = object.type; + break; + } + break; + case "NULL_TYPE": + case 0: + message.type = 0; + break; + case "INT8": + case 257: + message.type = 257; + break; + case "UINT8": + case 770: + message.type = 770; + break; + case "INT16": + case 259: + message.type = 259; + break; + case "UINT16": + case 772: + message.type = 772; + break; + case "INT24": + case 261: + message.type = 261; + break; + case "UINT24": + case 774: + message.type = 774; + break; + case "INT32": + case 263: + message.type = 263; + break; + case "UINT32": + case 776: + message.type = 776; + break; + case "INT64": + case 265: + message.type = 265; + break; + case "UINT64": + case 778: + message.type = 778; + break; + case "FLOAT32": + case 1035: + message.type = 1035; + break; + case "FLOAT64": + case 1036: + message.type = 1036; + break; + case "TIMESTAMP": + case 2061: + message.type = 2061; + break; + case "DATE": + case 2062: + message.type = 2062; + break; + case "TIME": + case 2063: + message.type = 2063; + break; + case "DATETIME": + case 2064: + message.type = 2064; + break; + case "YEAR": + case 785: + message.type = 785; + break; + case "DECIMAL": + case 18: + message.type = 18; + break; + case "TEXT": + case 6163: + message.type = 6163; + break; + case "BLOB": + case 10260: + message.type = 10260; + break; + case "VARCHAR": + case 6165: + message.type = 6165; + break; + case "VARBINARY": + case 10262: + message.type = 10262; + break; + case "CHAR": + case 6167: + message.type = 6167; + break; + case "BINARY": + case 10264: + message.type = 10264; + break; + case "BIT": + case 2073: + message.type = 2073; + break; + case "ENUM": + case 2074: + message.type = 2074; + break; + case "SET": + case 2075: + message.type = 2075; + break; + case "TUPLE": + case 28: + message.type = 28; + break; + case "GEOMETRY": + case 2077: + message.type = 2077; + break; + case "JSON": + case 2078: + message.type = 2078; + break; + case "EXPRESSION": + case 31: + message.type = 31; + break; + case "HEXNUM": + case 4128: + message.type = 4128; + break; + case "HEXVAL": + case 4129: + message.type = 4129; + break; + case "BITNUM": + case 4130: + message.type = 4130; + break; + } + return message; + }; + + /** + * Creates a plain object from a Column message. Also converts values to other types if specified. + * @function toObject + * @memberof vschema.Column + * @static + * @param {vschema.Column} message Column + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + Column.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.name = ""; + object.type = options.enums === String ? "NULL_TYPE" : 0; + } + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; + if (message.type != null && message.hasOwnProperty("type")) + object.type = options.enums === String ? $root.query.Type[message.type] === undefined ? message.type : $root.query.Type[message.type] : message.type; + return object; + }; + + /** + * Converts this Column to JSON. + * @function toJSON + * @memberof vschema.Column + * @instance + * @returns {Object.} JSON object + */ + Column.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for Column + * @function getTypeUrl + * @memberof vschema.Column + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + Column.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vschema.Column"; + }; + + return Column; + })(); + + vschema.SrvVSchema = (function() { + + /** + * Properties of a SrvVSchema. + * @memberof vschema + * @interface ISrvVSchema + * @property {Object.|null} [keyspaces] SrvVSchema keyspaces + * @property {vschema.IRoutingRules|null} [routing_rules] SrvVSchema routing_rules + * @property {vschema.IShardRoutingRules|null} [shard_routing_rules] SrvVSchema shard_routing_rules + */ + + /** + * Constructs a new SrvVSchema. + * @memberof vschema + * @classdesc Represents a SrvVSchema. + * @implements ISrvVSchema + * @constructor + * @param {vschema.ISrvVSchema=} [properties] Properties to set + */ + function SrvVSchema(properties) { + this.keyspaces = {}; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * SrvVSchema keyspaces. + * @member {Object.} keyspaces + * @memberof vschema.SrvVSchema + * @instance + */ + SrvVSchema.prototype.keyspaces = $util.emptyObject; + + /** + * SrvVSchema routing_rules. + * @member {vschema.IRoutingRules|null|undefined} routing_rules + * @memberof vschema.SrvVSchema + * @instance + */ + SrvVSchema.prototype.routing_rules = null; + + /** + * SrvVSchema shard_routing_rules. + * @member {vschema.IShardRoutingRules|null|undefined} shard_routing_rules + * @memberof vschema.SrvVSchema + * @instance + */ + SrvVSchema.prototype.shard_routing_rules = null; + + /** + * Creates a new SrvVSchema instance using the specified properties. + * @function create + * @memberof vschema.SrvVSchema + * @static + * @param {vschema.ISrvVSchema=} [properties] Properties to set + * @returns {vschema.SrvVSchema} SrvVSchema instance + */ + SrvVSchema.create = function create(properties) { + return new SrvVSchema(properties); + }; + + /** + * Encodes the specified SrvVSchema message. Does not implicitly {@link vschema.SrvVSchema.verify|verify} messages. + * @function encode + * @memberof vschema.SrvVSchema + * @static + * @param {vschema.ISrvVSchema} message SrvVSchema message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + SrvVSchema.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.keyspaces != null && Object.hasOwnProperty.call(message, "keyspaces")) + for (let keys = Object.keys(message.keyspaces), i = 0; i < keys.length; ++i) { + writer.uint32(/* id 1, wireType 2 =*/10).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); + $root.vschema.Keyspace.encode(message.keyspaces[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); + } + if (message.routing_rules != null && Object.hasOwnProperty.call(message, "routing_rules")) + $root.vschema.RoutingRules.encode(message.routing_rules, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.shard_routing_rules != null && Object.hasOwnProperty.call(message, "shard_routing_rules")) + $root.vschema.ShardRoutingRules.encode(message.shard_routing_rules, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified SrvVSchema message, length delimited. Does not implicitly {@link vschema.SrvVSchema.verify|verify} messages. + * @function encodeDelimited + * @memberof vschema.SrvVSchema + * @static + * @param {vschema.ISrvVSchema} message SrvVSchema message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + SrvVSchema.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a SrvVSchema message from the specified reader or buffer. + * @function decode + * @memberof vschema.SrvVSchema + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vschema.SrvVSchema} SrvVSchema + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + SrvVSchema.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vschema.SrvVSchema(), key, value; + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (message.keyspaces === $util.emptyObject) + message.keyspaces = {}; + let end2 = reader.uint32() + reader.pos; + key = ""; + value = null; + while (reader.pos < end2) { + let tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = $root.vschema.Keyspace.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.keyspaces[key] = value; + break; + } + case 2: { + message.routing_rules = $root.vschema.RoutingRules.decode(reader, reader.uint32()); + break; + } + case 3: { + message.shard_routing_rules = $root.vschema.ShardRoutingRules.decode(reader, reader.uint32()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a SrvVSchema message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vschema.SrvVSchema + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vschema.SrvVSchema} SrvVSchema + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + SrvVSchema.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a SrvVSchema message. + * @function verify + * @memberof vschema.SrvVSchema + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + SrvVSchema.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.keyspaces != null && message.hasOwnProperty("keyspaces")) { + if (!$util.isObject(message.keyspaces)) + return "keyspaces: object expected"; + let key = Object.keys(message.keyspaces); + for (let i = 0; i < key.length; ++i) { + let error = $root.vschema.Keyspace.verify(message.keyspaces[key[i]]); + if (error) + return "keyspaces." + error; + } + } + if (message.routing_rules != null && message.hasOwnProperty("routing_rules")) { + let error = $root.vschema.RoutingRules.verify(message.routing_rules); + if (error) + return "routing_rules." + error; + } + if (message.shard_routing_rules != null && message.hasOwnProperty("shard_routing_rules")) { + let error = $root.vschema.ShardRoutingRules.verify(message.shard_routing_rules); + if (error) + return "shard_routing_rules." + error; + } + return null; + }; + + /** + * Creates a SrvVSchema message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vschema.SrvVSchema + * @static + * @param {Object.} object Plain object + * @returns {vschema.SrvVSchema} SrvVSchema + */ + SrvVSchema.fromObject = function fromObject(object) { + if (object instanceof $root.vschema.SrvVSchema) + return object; + let message = new $root.vschema.SrvVSchema(); + if (object.keyspaces) { + if (typeof object.keyspaces !== "object") + throw TypeError(".vschema.SrvVSchema.keyspaces: object expected"); + message.keyspaces = {}; + for (let keys = Object.keys(object.keyspaces), i = 0; i < keys.length; ++i) { + if (typeof object.keyspaces[keys[i]] !== "object") + throw TypeError(".vschema.SrvVSchema.keyspaces: object expected"); + message.keyspaces[keys[i]] = $root.vschema.Keyspace.fromObject(object.keyspaces[keys[i]]); + } + } + if (object.routing_rules != null) { + if (typeof object.routing_rules !== "object") + throw TypeError(".vschema.SrvVSchema.routing_rules: object expected"); + message.routing_rules = $root.vschema.RoutingRules.fromObject(object.routing_rules); + } + if (object.shard_routing_rules != null) { + if (typeof object.shard_routing_rules !== "object") + throw TypeError(".vschema.SrvVSchema.shard_routing_rules: object expected"); + message.shard_routing_rules = $root.vschema.ShardRoutingRules.fromObject(object.shard_routing_rules); + } + return message; + }; + + /** + * Creates a plain object from a SrvVSchema message. Also converts values to other types if specified. + * @function toObject + * @memberof vschema.SrvVSchema + * @static + * @param {vschema.SrvVSchema} message SrvVSchema + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + SrvVSchema.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.objects || options.defaults) + object.keyspaces = {}; + if (options.defaults) { + object.routing_rules = null; + object.shard_routing_rules = null; + } + let keys2; + if (message.keyspaces && (keys2 = Object.keys(message.keyspaces)).length) { + object.keyspaces = {}; + for (let j = 0; j < keys2.length; ++j) + object.keyspaces[keys2[j]] = $root.vschema.Keyspace.toObject(message.keyspaces[keys2[j]], options); + } + if (message.routing_rules != null && message.hasOwnProperty("routing_rules")) + object.routing_rules = $root.vschema.RoutingRules.toObject(message.routing_rules, options); + if (message.shard_routing_rules != null && message.hasOwnProperty("shard_routing_rules")) + object.shard_routing_rules = $root.vschema.ShardRoutingRules.toObject(message.shard_routing_rules, options); + return object; + }; + + /** + * Converts this SrvVSchema to JSON. + * @function toJSON + * @memberof vschema.SrvVSchema + * @instance + * @returns {Object.} JSON object + */ + SrvVSchema.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for SrvVSchema + * @function getTypeUrl + * @memberof vschema.SrvVSchema + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + SrvVSchema.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vschema.SrvVSchema"; + }; + + return SrvVSchema; + })(); + + vschema.ShardRoutingRules = (function() { + + /** + * Properties of a ShardRoutingRules. + * @memberof vschema + * @interface IShardRoutingRules + * @property {Array.|null} [rules] ShardRoutingRules rules + */ + + /** + * Constructs a new ShardRoutingRules. + * @memberof vschema + * @classdesc Represents a ShardRoutingRules. + * @implements IShardRoutingRules + * @constructor + * @param {vschema.IShardRoutingRules=} [properties] Properties to set + */ + function ShardRoutingRules(properties) { + this.rules = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ShardRoutingRules rules. + * @member {Array.} rules + * @memberof vschema.ShardRoutingRules + * @instance + */ + ShardRoutingRules.prototype.rules = $util.emptyArray; + + /** + * Creates a new ShardRoutingRules instance using the specified properties. + * @function create + * @memberof vschema.ShardRoutingRules + * @static + * @param {vschema.IShardRoutingRules=} [properties] Properties to set + * @returns {vschema.ShardRoutingRules} ShardRoutingRules instance + */ + ShardRoutingRules.create = function create(properties) { + return new ShardRoutingRules(properties); + }; + + /** + * Encodes the specified ShardRoutingRules message. Does not implicitly {@link vschema.ShardRoutingRules.verify|verify} messages. + * @function encode + * @memberof vschema.ShardRoutingRules + * @static + * @param {vschema.IShardRoutingRules} message ShardRoutingRules message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ShardRoutingRules.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.rules != null && message.rules.length) + for (let i = 0; i < message.rules.length; ++i) + $root.vschema.ShardRoutingRule.encode(message.rules[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified ShardRoutingRules message, length delimited. Does not implicitly {@link vschema.ShardRoutingRules.verify|verify} messages. + * @function encodeDelimited + * @memberof vschema.ShardRoutingRules + * @static + * @param {vschema.IShardRoutingRules} message ShardRoutingRules message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ShardRoutingRules.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a ShardRoutingRules message from the specified reader or buffer. + * @function decode + * @memberof vschema.ShardRoutingRules + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vschema.ShardRoutingRules} ShardRoutingRules + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ShardRoutingRules.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vschema.ShardRoutingRules(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (!(message.rules && message.rules.length)) + message.rules = []; + message.rules.push($root.vschema.ShardRoutingRule.decode(reader, reader.uint32())); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a ShardRoutingRules message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vschema.ShardRoutingRules + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vschema.ShardRoutingRules} ShardRoutingRules + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ShardRoutingRules.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a ShardRoutingRules message. + * @function verify + * @memberof vschema.ShardRoutingRules + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ShardRoutingRules.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.rules != null && message.hasOwnProperty("rules")) { + if (!Array.isArray(message.rules)) + return "rules: array expected"; + for (let i = 0; i < message.rules.length; ++i) { + let error = $root.vschema.ShardRoutingRule.verify(message.rules[i]); + if (error) + return "rules." + error; + } + } + return null; + }; + + /** + * Creates a ShardRoutingRules message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vschema.ShardRoutingRules + * @static + * @param {Object.} object Plain object + * @returns {vschema.ShardRoutingRules} ShardRoutingRules + */ + ShardRoutingRules.fromObject = function fromObject(object) { + if (object instanceof $root.vschema.ShardRoutingRules) + return object; + let message = new $root.vschema.ShardRoutingRules(); + if (object.rules) { + if (!Array.isArray(object.rules)) + throw TypeError(".vschema.ShardRoutingRules.rules: array expected"); + message.rules = []; + for (let i = 0; i < object.rules.length; ++i) { + if (typeof object.rules[i] !== "object") + throw TypeError(".vschema.ShardRoutingRules.rules: object expected"); + message.rules[i] = $root.vschema.ShardRoutingRule.fromObject(object.rules[i]); + } + } + return message; + }; + + /** + * Creates a plain object from a ShardRoutingRules message. Also converts values to other types if specified. + * @function toObject + * @memberof vschema.ShardRoutingRules + * @static + * @param {vschema.ShardRoutingRules} message ShardRoutingRules + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ShardRoutingRules.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.rules = []; + if (message.rules && message.rules.length) { + object.rules = []; + for (let j = 0; j < message.rules.length; ++j) + object.rules[j] = $root.vschema.ShardRoutingRule.toObject(message.rules[j], options); + } + return object; + }; + + /** + * Converts this ShardRoutingRules to JSON. + * @function toJSON + * @memberof vschema.ShardRoutingRules + * @instance + * @returns {Object.} JSON object + */ + ShardRoutingRules.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ShardRoutingRules + * @function getTypeUrl + * @memberof vschema.ShardRoutingRules + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ShardRoutingRules.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vschema.ShardRoutingRules"; + }; + + return ShardRoutingRules; + })(); + + vschema.ShardRoutingRule = (function() { + + /** + * Properties of a ShardRoutingRule. + * @memberof vschema + * @interface IShardRoutingRule + * @property {string|null} [from_keyspace] ShardRoutingRule from_keyspace + * @property {string|null} [to_keyspace] ShardRoutingRule to_keyspace + * @property {string|null} [shard] ShardRoutingRule shard + */ + + /** + * Constructs a new ShardRoutingRule. + * @memberof vschema + * @classdesc Represents a ShardRoutingRule. + * @implements IShardRoutingRule + * @constructor + * @param {vschema.IShardRoutingRule=} [properties] Properties to set + */ + function ShardRoutingRule(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ShardRoutingRule from_keyspace. + * @member {string} from_keyspace + * @memberof vschema.ShardRoutingRule + * @instance + */ + ShardRoutingRule.prototype.from_keyspace = ""; + + /** + * ShardRoutingRule to_keyspace. + * @member {string} to_keyspace + * @memberof vschema.ShardRoutingRule + * @instance + */ + ShardRoutingRule.prototype.to_keyspace = ""; + + /** + * ShardRoutingRule shard. + * @member {string} shard + * @memberof vschema.ShardRoutingRule + * @instance + */ + ShardRoutingRule.prototype.shard = ""; + + /** + * Creates a new ShardRoutingRule instance using the specified properties. + * @function create + * @memberof vschema.ShardRoutingRule + * @static + * @param {vschema.IShardRoutingRule=} [properties] Properties to set + * @returns {vschema.ShardRoutingRule} ShardRoutingRule instance + */ + ShardRoutingRule.create = function create(properties) { + return new ShardRoutingRule(properties); + }; + + /** + * Encodes the specified ShardRoutingRule message. Does not implicitly {@link vschema.ShardRoutingRule.verify|verify} messages. + * @function encode + * @memberof vschema.ShardRoutingRule + * @static + * @param {vschema.IShardRoutingRule} message ShardRoutingRule message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ShardRoutingRule.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.from_keyspace != null && Object.hasOwnProperty.call(message, "from_keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.from_keyspace); + if (message.to_keyspace != null && Object.hasOwnProperty.call(message, "to_keyspace")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.to_keyspace); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.shard); + return writer; + }; + + /** + * Encodes the specified ShardRoutingRule message, length delimited. Does not implicitly {@link vschema.ShardRoutingRule.verify|verify} messages. + * @function encodeDelimited + * @memberof vschema.ShardRoutingRule + * @static + * @param {vschema.IShardRoutingRule} message ShardRoutingRule message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ShardRoutingRule.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a ShardRoutingRule message from the specified reader or buffer. + * @function decode + * @memberof vschema.ShardRoutingRule + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vschema.ShardRoutingRule} ShardRoutingRule + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ShardRoutingRule.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vschema.ShardRoutingRule(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.from_keyspace = reader.string(); + break; + } + case 2: { + message.to_keyspace = reader.string(); + break; + } + case 3: { + message.shard = reader.string(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a ShardRoutingRule message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vschema.ShardRoutingRule + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vschema.ShardRoutingRule} ShardRoutingRule + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ShardRoutingRule.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a ShardRoutingRule message. + * @function verify + * @memberof vschema.ShardRoutingRule + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ShardRoutingRule.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.from_keyspace != null && message.hasOwnProperty("from_keyspace")) + if (!$util.isString(message.from_keyspace)) + return "from_keyspace: string expected"; + if (message.to_keyspace != null && message.hasOwnProperty("to_keyspace")) + if (!$util.isString(message.to_keyspace)) + return "to_keyspace: string expected"; + if (message.shard != null && message.hasOwnProperty("shard")) + if (!$util.isString(message.shard)) + return "shard: string expected"; + return null; + }; + + /** + * Creates a ShardRoutingRule message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vschema.ShardRoutingRule + * @static + * @param {Object.} object Plain object + * @returns {vschema.ShardRoutingRule} ShardRoutingRule + */ + ShardRoutingRule.fromObject = function fromObject(object) { + if (object instanceof $root.vschema.ShardRoutingRule) + return object; + let message = new $root.vschema.ShardRoutingRule(); + if (object.from_keyspace != null) + message.from_keyspace = String(object.from_keyspace); + if (object.to_keyspace != null) + message.to_keyspace = String(object.to_keyspace); + if (object.shard != null) + message.shard = String(object.shard); + return message; + }; + + /** + * Creates a plain object from a ShardRoutingRule message. Also converts values to other types if specified. + * @function toObject + * @memberof vschema.ShardRoutingRule + * @static + * @param {vschema.ShardRoutingRule} message ShardRoutingRule + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ShardRoutingRule.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.from_keyspace = ""; + object.to_keyspace = ""; + object.shard = ""; + } + if (message.from_keyspace != null && message.hasOwnProperty("from_keyspace")) + object.from_keyspace = message.from_keyspace; + if (message.to_keyspace != null && message.hasOwnProperty("to_keyspace")) + object.to_keyspace = message.to_keyspace; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = message.shard; + return object; + }; + + /** + * Converts this ShardRoutingRule to JSON. + * @function toJSON + * @memberof vschema.ShardRoutingRule + * @instance + * @returns {Object.} JSON object + */ + ShardRoutingRule.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ShardRoutingRule + * @function getTypeUrl + * @memberof vschema.ShardRoutingRule + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ShardRoutingRule.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vschema.ShardRoutingRule"; + }; + + return ShardRoutingRule; + })(); + + return vschema; +})(); + +export const vtctldata = $root.vtctldata = (() => { + + /** + * Namespace vtctldata. + * @exports vtctldata + * @namespace + */ + const vtctldata = {}; + + vtctldata.ExecuteVtctlCommandRequest = (function() { + + /** + * Properties of an ExecuteVtctlCommandRequest. + * @memberof vtctldata + * @interface IExecuteVtctlCommandRequest + * @property {Array.|null} [args] ExecuteVtctlCommandRequest args + * @property {number|Long|null} [action_timeout] ExecuteVtctlCommandRequest action_timeout + */ + + /** + * Constructs a new ExecuteVtctlCommandRequest. + * @memberof vtctldata + * @classdesc Represents an ExecuteVtctlCommandRequest. + * @implements IExecuteVtctlCommandRequest + * @constructor + * @param {vtctldata.IExecuteVtctlCommandRequest=} [properties] Properties to set + */ + function ExecuteVtctlCommandRequest(properties) { + this.args = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ExecuteVtctlCommandRequest args. + * @member {Array.} args + * @memberof vtctldata.ExecuteVtctlCommandRequest + * @instance + */ + ExecuteVtctlCommandRequest.prototype.args = $util.emptyArray; + + /** + * ExecuteVtctlCommandRequest action_timeout. + * @member {number|Long} action_timeout + * @memberof vtctldata.ExecuteVtctlCommandRequest + * @instance + */ + ExecuteVtctlCommandRequest.prototype.action_timeout = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * Creates a new ExecuteVtctlCommandRequest instance using the specified properties. + * @function create + * @memberof vtctldata.ExecuteVtctlCommandRequest + * @static + * @param {vtctldata.IExecuteVtctlCommandRequest=} [properties] Properties to set + * @returns {vtctldata.ExecuteVtctlCommandRequest} ExecuteVtctlCommandRequest instance + */ + ExecuteVtctlCommandRequest.create = function create(properties) { + return new ExecuteVtctlCommandRequest(properties); + }; + + /** + * Encodes the specified ExecuteVtctlCommandRequest message. Does not implicitly {@link vtctldata.ExecuteVtctlCommandRequest.verify|verify} messages. + * @function encode + * @memberof vtctldata.ExecuteVtctlCommandRequest + * @static + * @param {vtctldata.IExecuteVtctlCommandRequest} message ExecuteVtctlCommandRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ExecuteVtctlCommandRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.args != null && message.args.length) + for (let i = 0; i < message.args.length; ++i) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.args[i]); + if (message.action_timeout != null && Object.hasOwnProperty.call(message, "action_timeout")) + writer.uint32(/* id 2, wireType 0 =*/16).int64(message.action_timeout); + return writer; + }; + + /** + * Encodes the specified ExecuteVtctlCommandRequest message, length delimited. Does not implicitly {@link vtctldata.ExecuteVtctlCommandRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.ExecuteVtctlCommandRequest + * @static + * @param {vtctldata.IExecuteVtctlCommandRequest} message ExecuteVtctlCommandRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ExecuteVtctlCommandRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes an ExecuteVtctlCommandRequest message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.ExecuteVtctlCommandRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.ExecuteVtctlCommandRequest} ExecuteVtctlCommandRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ExecuteVtctlCommandRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ExecuteVtctlCommandRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (!(message.args && message.args.length)) + message.args = []; + message.args.push(reader.string()); + break; + } + case 2: { + message.action_timeout = reader.int64(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes an ExecuteVtctlCommandRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.ExecuteVtctlCommandRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.ExecuteVtctlCommandRequest} ExecuteVtctlCommandRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ExecuteVtctlCommandRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies an ExecuteVtctlCommandRequest message. + * @function verify + * @memberof vtctldata.ExecuteVtctlCommandRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ExecuteVtctlCommandRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.args != null && message.hasOwnProperty("args")) { + if (!Array.isArray(message.args)) + return "args: array expected"; + for (let i = 0; i < message.args.length; ++i) + if (!$util.isString(message.args[i])) + return "args: string[] expected"; + } + if (message.action_timeout != null && message.hasOwnProperty("action_timeout")) + if (!$util.isInteger(message.action_timeout) && !(message.action_timeout && $util.isInteger(message.action_timeout.low) && $util.isInteger(message.action_timeout.high))) + return "action_timeout: integer|Long expected"; + return null; + }; + + /** + * Creates an ExecuteVtctlCommandRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.ExecuteVtctlCommandRequest + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.ExecuteVtctlCommandRequest} ExecuteVtctlCommandRequest + */ + ExecuteVtctlCommandRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ExecuteVtctlCommandRequest) + return object; + let message = new $root.vtctldata.ExecuteVtctlCommandRequest(); + if (object.args) { + if (!Array.isArray(object.args)) + throw TypeError(".vtctldata.ExecuteVtctlCommandRequest.args: array expected"); + message.args = []; + for (let i = 0; i < object.args.length; ++i) + message.args[i] = String(object.args[i]); + } + if (object.action_timeout != null) + if ($util.Long) + (message.action_timeout = $util.Long.fromValue(object.action_timeout)).unsigned = false; + else if (typeof object.action_timeout === "string") + message.action_timeout = parseInt(object.action_timeout, 10); + else if (typeof object.action_timeout === "number") + message.action_timeout = object.action_timeout; + else if (typeof object.action_timeout === "object") + message.action_timeout = new $util.LongBits(object.action_timeout.low >>> 0, object.action_timeout.high >>> 0).toNumber(); + return message; + }; + + /** + * Creates a plain object from an ExecuteVtctlCommandRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.ExecuteVtctlCommandRequest + * @static + * @param {vtctldata.ExecuteVtctlCommandRequest} message ExecuteVtctlCommandRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ExecuteVtctlCommandRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.args = []; + if (options.defaults) + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.action_timeout = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.action_timeout = options.longs === String ? "0" : 0; + if (message.args && message.args.length) { + object.args = []; + for (let j = 0; j < message.args.length; ++j) + object.args[j] = message.args[j]; + } + if (message.action_timeout != null && message.hasOwnProperty("action_timeout")) + if (typeof message.action_timeout === "number") + object.action_timeout = options.longs === String ? String(message.action_timeout) : message.action_timeout; + else + object.action_timeout = options.longs === String ? $util.Long.prototype.toString.call(message.action_timeout) : options.longs === Number ? new $util.LongBits(message.action_timeout.low >>> 0, message.action_timeout.high >>> 0).toNumber() : message.action_timeout; + return object; + }; + + /** + * Converts this ExecuteVtctlCommandRequest to JSON. + * @function toJSON + * @memberof vtctldata.ExecuteVtctlCommandRequest + * @instance + * @returns {Object.} JSON object + */ + ExecuteVtctlCommandRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ExecuteVtctlCommandRequest + * @function getTypeUrl + * @memberof vtctldata.ExecuteVtctlCommandRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ExecuteVtctlCommandRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.ExecuteVtctlCommandRequest"; + }; + + return ExecuteVtctlCommandRequest; + })(); + + vtctldata.ExecuteVtctlCommandResponse = (function() { + + /** + * Properties of an ExecuteVtctlCommandResponse. + * @memberof vtctldata + * @interface IExecuteVtctlCommandResponse + * @property {logutil.IEvent|null} [event] ExecuteVtctlCommandResponse event + */ + + /** + * Constructs a new ExecuteVtctlCommandResponse. + * @memberof vtctldata + * @classdesc Represents an ExecuteVtctlCommandResponse. + * @implements IExecuteVtctlCommandResponse + * @constructor + * @param {vtctldata.IExecuteVtctlCommandResponse=} [properties] Properties to set + */ + function ExecuteVtctlCommandResponse(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ExecuteVtctlCommandResponse event. + * @member {logutil.IEvent|null|undefined} event + * @memberof vtctldata.ExecuteVtctlCommandResponse + * @instance + */ + ExecuteVtctlCommandResponse.prototype.event = null; + + /** + * Creates a new ExecuteVtctlCommandResponse instance using the specified properties. + * @function create + * @memberof vtctldata.ExecuteVtctlCommandResponse + * @static + * @param {vtctldata.IExecuteVtctlCommandResponse=} [properties] Properties to set + * @returns {vtctldata.ExecuteVtctlCommandResponse} ExecuteVtctlCommandResponse instance + */ + ExecuteVtctlCommandResponse.create = function create(properties) { + return new ExecuteVtctlCommandResponse(properties); + }; + + /** + * Encodes the specified ExecuteVtctlCommandResponse message. Does not implicitly {@link vtctldata.ExecuteVtctlCommandResponse.verify|verify} messages. + * @function encode + * @memberof vtctldata.ExecuteVtctlCommandResponse + * @static + * @param {vtctldata.IExecuteVtctlCommandResponse} message ExecuteVtctlCommandResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ExecuteVtctlCommandResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.event != null && Object.hasOwnProperty.call(message, "event")) + $root.logutil.Event.encode(message.event, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified ExecuteVtctlCommandResponse message, length delimited. Does not implicitly {@link vtctldata.ExecuteVtctlCommandResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.ExecuteVtctlCommandResponse + * @static + * @param {vtctldata.IExecuteVtctlCommandResponse} message ExecuteVtctlCommandResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ExecuteVtctlCommandResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes an ExecuteVtctlCommandResponse message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.ExecuteVtctlCommandResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.ExecuteVtctlCommandResponse} ExecuteVtctlCommandResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ExecuteVtctlCommandResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ExecuteVtctlCommandResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.event = $root.logutil.Event.decode(reader, reader.uint32()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes an ExecuteVtctlCommandResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.ExecuteVtctlCommandResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.ExecuteVtctlCommandResponse} ExecuteVtctlCommandResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ExecuteVtctlCommandResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies an ExecuteVtctlCommandResponse message. + * @function verify + * @memberof vtctldata.ExecuteVtctlCommandResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ExecuteVtctlCommandResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.event != null && message.hasOwnProperty("event")) { + let error = $root.logutil.Event.verify(message.event); + if (error) + return "event." + error; + } + return null; + }; + + /** + * Creates an ExecuteVtctlCommandResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.ExecuteVtctlCommandResponse + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.ExecuteVtctlCommandResponse} ExecuteVtctlCommandResponse + */ + ExecuteVtctlCommandResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ExecuteVtctlCommandResponse) + return object; + let message = new $root.vtctldata.ExecuteVtctlCommandResponse(); + if (object.event != null) { + if (typeof object.event !== "object") + throw TypeError(".vtctldata.ExecuteVtctlCommandResponse.event: object expected"); + message.event = $root.logutil.Event.fromObject(object.event); + } + return message; + }; + + /** + * Creates a plain object from an ExecuteVtctlCommandResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.ExecuteVtctlCommandResponse + * @static + * @param {vtctldata.ExecuteVtctlCommandResponse} message ExecuteVtctlCommandResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ExecuteVtctlCommandResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) + object.event = null; + if (message.event != null && message.hasOwnProperty("event")) + object.event = $root.logutil.Event.toObject(message.event, options); + return object; + }; + + /** + * Converts this ExecuteVtctlCommandResponse to JSON. + * @function toJSON + * @memberof vtctldata.ExecuteVtctlCommandResponse + * @instance + * @returns {Object.} JSON object + */ + ExecuteVtctlCommandResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ExecuteVtctlCommandResponse + * @function getTypeUrl + * @memberof vtctldata.ExecuteVtctlCommandResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ExecuteVtctlCommandResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.ExecuteVtctlCommandResponse"; + }; + + return ExecuteVtctlCommandResponse; + })(); + + /** + * MaterializationIntent enum. + * @name vtctldata.MaterializationIntent + * @enum {number} + * @property {number} CUSTOM=0 CUSTOM value + * @property {number} MOVETABLES=1 MOVETABLES value + * @property {number} CREATELOOKUPINDEX=2 CREATELOOKUPINDEX value + */ + vtctldata.MaterializationIntent = (function() { + const valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "CUSTOM"] = 0; + values[valuesById[1] = "MOVETABLES"] = 1; + values[valuesById[2] = "CREATELOOKUPINDEX"] = 2; + return values; + })(); + + vtctldata.TableMaterializeSettings = (function() { + + /** + * Properties of a TableMaterializeSettings. + * @memberof vtctldata + * @interface ITableMaterializeSettings + * @property {string|null} [target_table] TableMaterializeSettings target_table + * @property {string|null} [source_expression] TableMaterializeSettings source_expression + * @property {string|null} [create_ddl] TableMaterializeSettings create_ddl + */ + + /** + * Constructs a new TableMaterializeSettings. + * @memberof vtctldata + * @classdesc Represents a TableMaterializeSettings. + * @implements ITableMaterializeSettings + * @constructor + * @param {vtctldata.ITableMaterializeSettings=} [properties] Properties to set + */ + function TableMaterializeSettings(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * TableMaterializeSettings target_table. + * @member {string} target_table + * @memberof vtctldata.TableMaterializeSettings + * @instance + */ + TableMaterializeSettings.prototype.target_table = ""; + + /** + * TableMaterializeSettings source_expression. + * @member {string} source_expression + * @memberof vtctldata.TableMaterializeSettings + * @instance + */ + TableMaterializeSettings.prototype.source_expression = ""; + + /** + * TableMaterializeSettings create_ddl. + * @member {string} create_ddl + * @memberof vtctldata.TableMaterializeSettings + * @instance + */ + TableMaterializeSettings.prototype.create_ddl = ""; + + /** + * Creates a new TableMaterializeSettings instance using the specified properties. + * @function create + * @memberof vtctldata.TableMaterializeSettings + * @static + * @param {vtctldata.ITableMaterializeSettings=} [properties] Properties to set + * @returns {vtctldata.TableMaterializeSettings} TableMaterializeSettings instance + */ + TableMaterializeSettings.create = function create(properties) { + return new TableMaterializeSettings(properties); + }; + + /** + * Encodes the specified TableMaterializeSettings message. Does not implicitly {@link vtctldata.TableMaterializeSettings.verify|verify} messages. + * @function encode + * @memberof vtctldata.TableMaterializeSettings + * @static + * @param {vtctldata.ITableMaterializeSettings} message TableMaterializeSettings message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + TableMaterializeSettings.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.target_table != null && Object.hasOwnProperty.call(message, "target_table")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.target_table); + if (message.source_expression != null && Object.hasOwnProperty.call(message, "source_expression")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.source_expression); + if (message.create_ddl != null && Object.hasOwnProperty.call(message, "create_ddl")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.create_ddl); + return writer; + }; + + /** + * Encodes the specified TableMaterializeSettings message, length delimited. Does not implicitly {@link vtctldata.TableMaterializeSettings.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.TableMaterializeSettings + * @static + * @param {vtctldata.ITableMaterializeSettings} message TableMaterializeSettings message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + TableMaterializeSettings.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a TableMaterializeSettings message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.TableMaterializeSettings + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.TableMaterializeSettings} TableMaterializeSettings + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + TableMaterializeSettings.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.TableMaterializeSettings(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.target_table = reader.string(); + break; + } + case 2: { + message.source_expression = reader.string(); + break; + } + case 3: { + message.create_ddl = reader.string(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a TableMaterializeSettings message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.TableMaterializeSettings + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.TableMaterializeSettings} TableMaterializeSettings + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + TableMaterializeSettings.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a TableMaterializeSettings message. + * @function verify + * @memberof vtctldata.TableMaterializeSettings + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + TableMaterializeSettings.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.target_table != null && message.hasOwnProperty("target_table")) + if (!$util.isString(message.target_table)) + return "target_table: string expected"; + if (message.source_expression != null && message.hasOwnProperty("source_expression")) + if (!$util.isString(message.source_expression)) + return "source_expression: string expected"; + if (message.create_ddl != null && message.hasOwnProperty("create_ddl")) + if (!$util.isString(message.create_ddl)) + return "create_ddl: string expected"; + return null; + }; + + /** + * Creates a TableMaterializeSettings message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.TableMaterializeSettings + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.TableMaterializeSettings} TableMaterializeSettings + */ + TableMaterializeSettings.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.TableMaterializeSettings) + return object; + let message = new $root.vtctldata.TableMaterializeSettings(); + if (object.target_table != null) + message.target_table = String(object.target_table); + if (object.source_expression != null) + message.source_expression = String(object.source_expression); + if (object.create_ddl != null) + message.create_ddl = String(object.create_ddl); + return message; + }; + + /** + * Creates a plain object from a TableMaterializeSettings message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.TableMaterializeSettings + * @static + * @param {vtctldata.TableMaterializeSettings} message TableMaterializeSettings + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + TableMaterializeSettings.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.target_table = ""; + object.source_expression = ""; + object.create_ddl = ""; + } + if (message.target_table != null && message.hasOwnProperty("target_table")) + object.target_table = message.target_table; + if (message.source_expression != null && message.hasOwnProperty("source_expression")) + object.source_expression = message.source_expression; + if (message.create_ddl != null && message.hasOwnProperty("create_ddl")) + object.create_ddl = message.create_ddl; + return object; + }; + + /** + * Converts this TableMaterializeSettings to JSON. + * @function toJSON + * @memberof vtctldata.TableMaterializeSettings + * @instance + * @returns {Object.} JSON object + */ + TableMaterializeSettings.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for TableMaterializeSettings + * @function getTypeUrl + * @memberof vtctldata.TableMaterializeSettings + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + TableMaterializeSettings.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.TableMaterializeSettings"; + }; + + return TableMaterializeSettings; + })(); + + vtctldata.MaterializeSettings = (function() { + + /** + * Properties of a MaterializeSettings. + * @memberof vtctldata + * @interface IMaterializeSettings + * @property {string|null} [workflow] MaterializeSettings workflow + * @property {string|null} [source_keyspace] MaterializeSettings source_keyspace + * @property {string|null} [target_keyspace] MaterializeSettings target_keyspace + * @property {boolean|null} [stop_after_copy] MaterializeSettings stop_after_copy + * @property {Array.|null} [table_settings] MaterializeSettings table_settings + * @property {string|null} [cell] MaterializeSettings cell + * @property {string|null} [tablet_types] MaterializeSettings tablet_types + * @property {string|null} [external_cluster] MaterializeSettings external_cluster + * @property {vtctldata.MaterializationIntent|null} [materialization_intent] MaterializeSettings materialization_intent + * @property {string|null} [source_time_zone] MaterializeSettings source_time_zone + * @property {string|null} [target_time_zone] MaterializeSettings target_time_zone + * @property {Array.|null} [source_shards] MaterializeSettings source_shards + * @property {string|null} [on_ddl] MaterializeSettings on_ddl + * @property {boolean|null} [defer_secondary_keys] MaterializeSettings defer_secondary_keys + * @property {tabletmanagerdata.TabletSelectionPreference|null} [tablet_selection_preference] MaterializeSettings tablet_selection_preference + * @property {boolean|null} [atomic_copy] MaterializeSettings atomic_copy + */ + + /** + * Constructs a new MaterializeSettings. + * @memberof vtctldata + * @classdesc Represents a MaterializeSettings. + * @implements IMaterializeSettings + * @constructor + * @param {vtctldata.IMaterializeSettings=} [properties] Properties to set + */ + function MaterializeSettings(properties) { + this.table_settings = []; + this.source_shards = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * MaterializeSettings workflow. + * @member {string} workflow + * @memberof vtctldata.MaterializeSettings + * @instance + */ + MaterializeSettings.prototype.workflow = ""; + + /** + * MaterializeSettings source_keyspace. + * @member {string} source_keyspace + * @memberof vtctldata.MaterializeSettings + * @instance + */ + MaterializeSettings.prototype.source_keyspace = ""; + + /** + * MaterializeSettings target_keyspace. + * @member {string} target_keyspace + * @memberof vtctldata.MaterializeSettings + * @instance + */ + MaterializeSettings.prototype.target_keyspace = ""; + + /** + * MaterializeSettings stop_after_copy. + * @member {boolean} stop_after_copy + * @memberof vtctldata.MaterializeSettings + * @instance + */ + MaterializeSettings.prototype.stop_after_copy = false; + + /** + * MaterializeSettings table_settings. + * @member {Array.} table_settings + * @memberof vtctldata.MaterializeSettings + * @instance + */ + MaterializeSettings.prototype.table_settings = $util.emptyArray; + + /** + * MaterializeSettings cell. + * @member {string} cell + * @memberof vtctldata.MaterializeSettings + * @instance + */ + MaterializeSettings.prototype.cell = ""; + + /** + * MaterializeSettings tablet_types. + * @member {string} tablet_types + * @memberof vtctldata.MaterializeSettings + * @instance + */ + MaterializeSettings.prototype.tablet_types = ""; + + /** + * MaterializeSettings external_cluster. + * @member {string} external_cluster + * @memberof vtctldata.MaterializeSettings + * @instance + */ + MaterializeSettings.prototype.external_cluster = ""; + + /** + * MaterializeSettings materialization_intent. + * @member {vtctldata.MaterializationIntent} materialization_intent + * @memberof vtctldata.MaterializeSettings + * @instance + */ + MaterializeSettings.prototype.materialization_intent = 0; + + /** + * MaterializeSettings source_time_zone. + * @member {string} source_time_zone + * @memberof vtctldata.MaterializeSettings + * @instance + */ + MaterializeSettings.prototype.source_time_zone = ""; + + /** + * MaterializeSettings target_time_zone. + * @member {string} target_time_zone + * @memberof vtctldata.MaterializeSettings + * @instance + */ + MaterializeSettings.prototype.target_time_zone = ""; + + /** + * MaterializeSettings source_shards. + * @member {Array.} source_shards + * @memberof vtctldata.MaterializeSettings + * @instance + */ + MaterializeSettings.prototype.source_shards = $util.emptyArray; + + /** + * MaterializeSettings on_ddl. + * @member {string} on_ddl + * @memberof vtctldata.MaterializeSettings + * @instance + */ + MaterializeSettings.prototype.on_ddl = ""; + + /** + * MaterializeSettings defer_secondary_keys. + * @member {boolean} defer_secondary_keys + * @memberof vtctldata.MaterializeSettings + * @instance + */ + MaterializeSettings.prototype.defer_secondary_keys = false; + + /** + * MaterializeSettings tablet_selection_preference. + * @member {tabletmanagerdata.TabletSelectionPreference} tablet_selection_preference + * @memberof vtctldata.MaterializeSettings + * @instance + */ + MaterializeSettings.prototype.tablet_selection_preference = 0; + + /** + * MaterializeSettings atomic_copy. + * @member {boolean} atomic_copy + * @memberof vtctldata.MaterializeSettings + * @instance + */ + MaterializeSettings.prototype.atomic_copy = false; + + /** + * Creates a new MaterializeSettings instance using the specified properties. + * @function create + * @memberof vtctldata.MaterializeSettings + * @static + * @param {vtctldata.IMaterializeSettings=} [properties] Properties to set + * @returns {vtctldata.MaterializeSettings} MaterializeSettings instance + */ + MaterializeSettings.create = function create(properties) { + return new MaterializeSettings(properties); + }; + + /** + * Encodes the specified MaterializeSettings message. Does not implicitly {@link vtctldata.MaterializeSettings.verify|verify} messages. + * @function encode + * @memberof vtctldata.MaterializeSettings + * @static + * @param {vtctldata.IMaterializeSettings} message MaterializeSettings message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + MaterializeSettings.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.workflow != null && Object.hasOwnProperty.call(message, "workflow")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.workflow); + if (message.source_keyspace != null && Object.hasOwnProperty.call(message, "source_keyspace")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.source_keyspace); + if (message.target_keyspace != null && Object.hasOwnProperty.call(message, "target_keyspace")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.target_keyspace); + if (message.stop_after_copy != null && Object.hasOwnProperty.call(message, "stop_after_copy")) + writer.uint32(/* id 4, wireType 0 =*/32).bool(message.stop_after_copy); + if (message.table_settings != null && message.table_settings.length) + for (let i = 0; i < message.table_settings.length; ++i) + $root.vtctldata.TableMaterializeSettings.encode(message.table_settings[i], writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); + if (message.cell != null && Object.hasOwnProperty.call(message, "cell")) + writer.uint32(/* id 6, wireType 2 =*/50).string(message.cell); + if (message.tablet_types != null && Object.hasOwnProperty.call(message, "tablet_types")) + writer.uint32(/* id 7, wireType 2 =*/58).string(message.tablet_types); + if (message.external_cluster != null && Object.hasOwnProperty.call(message, "external_cluster")) + writer.uint32(/* id 8, wireType 2 =*/66).string(message.external_cluster); + if (message.materialization_intent != null && Object.hasOwnProperty.call(message, "materialization_intent")) + writer.uint32(/* id 9, wireType 0 =*/72).int32(message.materialization_intent); + if (message.source_time_zone != null && Object.hasOwnProperty.call(message, "source_time_zone")) + writer.uint32(/* id 10, wireType 2 =*/82).string(message.source_time_zone); + if (message.target_time_zone != null && Object.hasOwnProperty.call(message, "target_time_zone")) + writer.uint32(/* id 11, wireType 2 =*/90).string(message.target_time_zone); + if (message.source_shards != null && message.source_shards.length) + for (let i = 0; i < message.source_shards.length; ++i) + writer.uint32(/* id 12, wireType 2 =*/98).string(message.source_shards[i]); + if (message.on_ddl != null && Object.hasOwnProperty.call(message, "on_ddl")) + writer.uint32(/* id 13, wireType 2 =*/106).string(message.on_ddl); + if (message.defer_secondary_keys != null && Object.hasOwnProperty.call(message, "defer_secondary_keys")) + writer.uint32(/* id 14, wireType 0 =*/112).bool(message.defer_secondary_keys); + if (message.tablet_selection_preference != null && Object.hasOwnProperty.call(message, "tablet_selection_preference")) + writer.uint32(/* id 15, wireType 0 =*/120).int32(message.tablet_selection_preference); + if (message.atomic_copy != null && Object.hasOwnProperty.call(message, "atomic_copy")) + writer.uint32(/* id 16, wireType 0 =*/128).bool(message.atomic_copy); + return writer; + }; + + /** + * Encodes the specified MaterializeSettings message, length delimited. Does not implicitly {@link vtctldata.MaterializeSettings.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.MaterializeSettings + * @static + * @param {vtctldata.IMaterializeSettings} message MaterializeSettings message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + MaterializeSettings.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a MaterializeSettings message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.MaterializeSettings + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.MaterializeSettings} MaterializeSettings + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + MaterializeSettings.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.MaterializeSettings(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.workflow = reader.string(); + break; + } + case 2: { + message.source_keyspace = reader.string(); + break; + } + case 3: { + message.target_keyspace = reader.string(); + break; + } + case 4: { + message.stop_after_copy = reader.bool(); + break; + } + case 5: { + if (!(message.table_settings && message.table_settings.length)) + message.table_settings = []; + message.table_settings.push($root.vtctldata.TableMaterializeSettings.decode(reader, reader.uint32())); + break; + } + case 6: { + message.cell = reader.string(); + break; + } + case 7: { + message.tablet_types = reader.string(); + break; + } + case 8: { + message.external_cluster = reader.string(); + break; + } + case 9: { + message.materialization_intent = reader.int32(); + break; + } + case 10: { + message.source_time_zone = reader.string(); + break; + } + case 11: { + message.target_time_zone = reader.string(); + break; + } + case 12: { + if (!(message.source_shards && message.source_shards.length)) + message.source_shards = []; + message.source_shards.push(reader.string()); + break; + } + case 13: { + message.on_ddl = reader.string(); + break; + } + case 14: { + message.defer_secondary_keys = reader.bool(); + break; + } + case 15: { + message.tablet_selection_preference = reader.int32(); + break; + } + case 16: { + message.atomic_copy = reader.bool(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a MaterializeSettings message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.MaterializeSettings + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.MaterializeSettings} MaterializeSettings + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + MaterializeSettings.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a MaterializeSettings message. + * @function verify + * @memberof vtctldata.MaterializeSettings + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + MaterializeSettings.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.workflow != null && message.hasOwnProperty("workflow")) + if (!$util.isString(message.workflow)) + return "workflow: string expected"; + if (message.source_keyspace != null && message.hasOwnProperty("source_keyspace")) + if (!$util.isString(message.source_keyspace)) + return "source_keyspace: string expected"; + if (message.target_keyspace != null && message.hasOwnProperty("target_keyspace")) + if (!$util.isString(message.target_keyspace)) + return "target_keyspace: string expected"; + if (message.stop_after_copy != null && message.hasOwnProperty("stop_after_copy")) + if (typeof message.stop_after_copy !== "boolean") + return "stop_after_copy: boolean expected"; + if (message.table_settings != null && message.hasOwnProperty("table_settings")) { + if (!Array.isArray(message.table_settings)) + return "table_settings: array expected"; + for (let i = 0; i < message.table_settings.length; ++i) { + let error = $root.vtctldata.TableMaterializeSettings.verify(message.table_settings[i]); + if (error) + return "table_settings." + error; + } + } + if (message.cell != null && message.hasOwnProperty("cell")) + if (!$util.isString(message.cell)) + return "cell: string expected"; + if (message.tablet_types != null && message.hasOwnProperty("tablet_types")) + if (!$util.isString(message.tablet_types)) + return "tablet_types: string expected"; + if (message.external_cluster != null && message.hasOwnProperty("external_cluster")) + if (!$util.isString(message.external_cluster)) + return "external_cluster: string expected"; + if (message.materialization_intent != null && message.hasOwnProperty("materialization_intent")) + switch (message.materialization_intent) { + default: + return "materialization_intent: enum value expected"; + case 0: + case 1: + case 2: + break; + } + if (message.source_time_zone != null && message.hasOwnProperty("source_time_zone")) + if (!$util.isString(message.source_time_zone)) + return "source_time_zone: string expected"; + if (message.target_time_zone != null && message.hasOwnProperty("target_time_zone")) + if (!$util.isString(message.target_time_zone)) + return "target_time_zone: string expected"; + if (message.source_shards != null && message.hasOwnProperty("source_shards")) { + if (!Array.isArray(message.source_shards)) + return "source_shards: array expected"; + for (let i = 0; i < message.source_shards.length; ++i) + if (!$util.isString(message.source_shards[i])) + return "source_shards: string[] expected"; + } + if (message.on_ddl != null && message.hasOwnProperty("on_ddl")) + if (!$util.isString(message.on_ddl)) + return "on_ddl: string expected"; + if (message.defer_secondary_keys != null && message.hasOwnProperty("defer_secondary_keys")) + if (typeof message.defer_secondary_keys !== "boolean") + return "defer_secondary_keys: boolean expected"; + if (message.tablet_selection_preference != null && message.hasOwnProperty("tablet_selection_preference")) + switch (message.tablet_selection_preference) { + default: + return "tablet_selection_preference: enum value expected"; + case 0: + case 1: + case 3: + break; + } + if (message.atomic_copy != null && message.hasOwnProperty("atomic_copy")) + if (typeof message.atomic_copy !== "boolean") + return "atomic_copy: boolean expected"; + return null; + }; + + /** + * Creates a MaterializeSettings message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.MaterializeSettings + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.MaterializeSettings} MaterializeSettings + */ + MaterializeSettings.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.MaterializeSettings) + return object; + let message = new $root.vtctldata.MaterializeSettings(); + if (object.workflow != null) + message.workflow = String(object.workflow); + if (object.source_keyspace != null) + message.source_keyspace = String(object.source_keyspace); + if (object.target_keyspace != null) + message.target_keyspace = String(object.target_keyspace); + if (object.stop_after_copy != null) + message.stop_after_copy = Boolean(object.stop_after_copy); + if (object.table_settings) { + if (!Array.isArray(object.table_settings)) + throw TypeError(".vtctldata.MaterializeSettings.table_settings: array expected"); + message.table_settings = []; + for (let i = 0; i < object.table_settings.length; ++i) { + if (typeof object.table_settings[i] !== "object") + throw TypeError(".vtctldata.MaterializeSettings.table_settings: object expected"); + message.table_settings[i] = $root.vtctldata.TableMaterializeSettings.fromObject(object.table_settings[i]); + } + } + if (object.cell != null) + message.cell = String(object.cell); + if (object.tablet_types != null) + message.tablet_types = String(object.tablet_types); + if (object.external_cluster != null) + message.external_cluster = String(object.external_cluster); + switch (object.materialization_intent) { + default: + if (typeof object.materialization_intent === "number") { + message.materialization_intent = object.materialization_intent; + break; + } + break; + case "CUSTOM": + case 0: + message.materialization_intent = 0; + break; + case "MOVETABLES": + case 1: + message.materialization_intent = 1; + break; + case "CREATELOOKUPINDEX": + case 2: + message.materialization_intent = 2; + break; + } + if (object.source_time_zone != null) + message.source_time_zone = String(object.source_time_zone); + if (object.target_time_zone != null) + message.target_time_zone = String(object.target_time_zone); + if (object.source_shards) { + if (!Array.isArray(object.source_shards)) + throw TypeError(".vtctldata.MaterializeSettings.source_shards: array expected"); + message.source_shards = []; + for (let i = 0; i < object.source_shards.length; ++i) + message.source_shards[i] = String(object.source_shards[i]); + } + if (object.on_ddl != null) + message.on_ddl = String(object.on_ddl); + if (object.defer_secondary_keys != null) + message.defer_secondary_keys = Boolean(object.defer_secondary_keys); + switch (object.tablet_selection_preference) { + default: + if (typeof object.tablet_selection_preference === "number") { + message.tablet_selection_preference = object.tablet_selection_preference; + break; + } + break; + case "ANY": + case 0: + message.tablet_selection_preference = 0; + break; + case "INORDER": + case 1: + message.tablet_selection_preference = 1; + break; + case "UNKNOWN": + case 3: + message.tablet_selection_preference = 3; + break; + } + if (object.atomic_copy != null) + message.atomic_copy = Boolean(object.atomic_copy); + return message; + }; + + /** + * Creates a plain object from a MaterializeSettings message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.MaterializeSettings + * @static + * @param {vtctldata.MaterializeSettings} message MaterializeSettings + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + MaterializeSettings.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) { + object.table_settings = []; + object.source_shards = []; + } + if (options.defaults) { + object.workflow = ""; + object.source_keyspace = ""; + object.target_keyspace = ""; + object.stop_after_copy = false; + object.cell = ""; + object.tablet_types = ""; + object.external_cluster = ""; + object.materialization_intent = options.enums === String ? "CUSTOM" : 0; + object.source_time_zone = ""; + object.target_time_zone = ""; + object.on_ddl = ""; + object.defer_secondary_keys = false; + object.tablet_selection_preference = options.enums === String ? "ANY" : 0; + object.atomic_copy = false; + } + if (message.workflow != null && message.hasOwnProperty("workflow")) + object.workflow = message.workflow; + if (message.source_keyspace != null && message.hasOwnProperty("source_keyspace")) + object.source_keyspace = message.source_keyspace; + if (message.target_keyspace != null && message.hasOwnProperty("target_keyspace")) + object.target_keyspace = message.target_keyspace; + if (message.stop_after_copy != null && message.hasOwnProperty("stop_after_copy")) + object.stop_after_copy = message.stop_after_copy; + if (message.table_settings && message.table_settings.length) { + object.table_settings = []; + for (let j = 0; j < message.table_settings.length; ++j) + object.table_settings[j] = $root.vtctldata.TableMaterializeSettings.toObject(message.table_settings[j], options); + } + if (message.cell != null && message.hasOwnProperty("cell")) + object.cell = message.cell; + if (message.tablet_types != null && message.hasOwnProperty("tablet_types")) + object.tablet_types = message.tablet_types; + if (message.external_cluster != null && message.hasOwnProperty("external_cluster")) + object.external_cluster = message.external_cluster; + if (message.materialization_intent != null && message.hasOwnProperty("materialization_intent")) + object.materialization_intent = options.enums === String ? $root.vtctldata.MaterializationIntent[message.materialization_intent] === undefined ? message.materialization_intent : $root.vtctldata.MaterializationIntent[message.materialization_intent] : message.materialization_intent; + if (message.source_time_zone != null && message.hasOwnProperty("source_time_zone")) + object.source_time_zone = message.source_time_zone; + if (message.target_time_zone != null && message.hasOwnProperty("target_time_zone")) + object.target_time_zone = message.target_time_zone; + if (message.source_shards && message.source_shards.length) { + object.source_shards = []; + for (let j = 0; j < message.source_shards.length; ++j) + object.source_shards[j] = message.source_shards[j]; + } + if (message.on_ddl != null && message.hasOwnProperty("on_ddl")) + object.on_ddl = message.on_ddl; + if (message.defer_secondary_keys != null && message.hasOwnProperty("defer_secondary_keys")) + object.defer_secondary_keys = message.defer_secondary_keys; + if (message.tablet_selection_preference != null && message.hasOwnProperty("tablet_selection_preference")) + object.tablet_selection_preference = options.enums === String ? $root.tabletmanagerdata.TabletSelectionPreference[message.tablet_selection_preference] === undefined ? message.tablet_selection_preference : $root.tabletmanagerdata.TabletSelectionPreference[message.tablet_selection_preference] : message.tablet_selection_preference; + if (message.atomic_copy != null && message.hasOwnProperty("atomic_copy")) + object.atomic_copy = message.atomic_copy; + return object; + }; + + /** + * Converts this MaterializeSettings to JSON. + * @function toJSON + * @memberof vtctldata.MaterializeSettings + * @instance + * @returns {Object.} JSON object + */ + MaterializeSettings.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for MaterializeSettings + * @function getTypeUrl + * @memberof vtctldata.MaterializeSettings + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + MaterializeSettings.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.MaterializeSettings"; + }; + + return MaterializeSettings; + })(); + + vtctldata.Keyspace = (function() { + + /** + * Properties of a Keyspace. + * @memberof vtctldata + * @interface IKeyspace + * @property {string|null} [name] Keyspace name + * @property {topodata.IKeyspace|null} [keyspace] Keyspace keyspace + */ + + /** + * Constructs a new Keyspace. + * @memberof vtctldata + * @classdesc Represents a Keyspace. + * @implements IKeyspace + * @constructor + * @param {vtctldata.IKeyspace=} [properties] Properties to set + */ + function Keyspace(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * Keyspace name. + * @member {string} name + * @memberof vtctldata.Keyspace + * @instance + */ + Keyspace.prototype.name = ""; + + /** + * Keyspace keyspace. + * @member {topodata.IKeyspace|null|undefined} keyspace + * @memberof vtctldata.Keyspace + * @instance + */ + Keyspace.prototype.keyspace = null; + + /** + * Creates a new Keyspace instance using the specified properties. + * @function create + * @memberof vtctldata.Keyspace + * @static + * @param {vtctldata.IKeyspace=} [properties] Properties to set + * @returns {vtctldata.Keyspace} Keyspace instance + */ + Keyspace.create = function create(properties) { + return new Keyspace(properties); + }; + + /** + * Encodes the specified Keyspace message. Does not implicitly {@link vtctldata.Keyspace.verify|verify} messages. + * @function encode + * @memberof vtctldata.Keyspace + * @static + * @param {vtctldata.IKeyspace} message Keyspace message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Keyspace.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.name != null && Object.hasOwnProperty.call(message, "name")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + $root.topodata.Keyspace.encode(message.keyspace, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified Keyspace message, length delimited. Does not implicitly {@link vtctldata.Keyspace.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.Keyspace + * @static + * @param {vtctldata.IKeyspace} message Keyspace message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Keyspace.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a Keyspace message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.Keyspace + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.Keyspace} Keyspace + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Keyspace.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.Keyspace(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.name = reader.string(); + break; + } + case 2: { + message.keyspace = $root.topodata.Keyspace.decode(reader, reader.uint32()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a Keyspace message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.Keyspace + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.Keyspace} Keyspace + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Keyspace.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a Keyspace message. + * @function verify + * @memberof vtctldata.Keyspace + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + Keyspace.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) { + let error = $root.topodata.Keyspace.verify(message.keyspace); + if (error) + return "keyspace." + error; + } + return null; + }; + + /** + * Creates a Keyspace message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.Keyspace + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.Keyspace} Keyspace + */ + Keyspace.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.Keyspace) + return object; + let message = new $root.vtctldata.Keyspace(); + if (object.name != null) + message.name = String(object.name); + if (object.keyspace != null) { + if (typeof object.keyspace !== "object") + throw TypeError(".vtctldata.Keyspace.keyspace: object expected"); + message.keyspace = $root.topodata.Keyspace.fromObject(object.keyspace); + } + return message; + }; + + /** + * Creates a plain object from a Keyspace message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.Keyspace + * @static + * @param {vtctldata.Keyspace} message Keyspace + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + Keyspace.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.name = ""; + object.keyspace = null; + } + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = $root.topodata.Keyspace.toObject(message.keyspace, options); + return object; + }; + + /** + * Converts this Keyspace to JSON. + * @function toJSON + * @memberof vtctldata.Keyspace + * @instance + * @returns {Object.} JSON object + */ + Keyspace.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for Keyspace + * @function getTypeUrl + * @memberof vtctldata.Keyspace + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + Keyspace.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.Keyspace"; + }; + + return Keyspace; + })(); + + /** + * QueryOrdering enum. + * @name vtctldata.QueryOrdering + * @enum {number} + * @property {number} NONE=0 NONE value + * @property {number} ASCENDING=1 ASCENDING value + * @property {number} DESCENDING=2 DESCENDING value + */ + vtctldata.QueryOrdering = (function() { + const valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "NONE"] = 0; + values[valuesById[1] = "ASCENDING"] = 1; + values[valuesById[2] = "DESCENDING"] = 2; + return values; + })(); + + vtctldata.SchemaMigration = (function() { + + /** + * Properties of a SchemaMigration. + * @memberof vtctldata + * @interface ISchemaMigration + * @property {string|null} [uuid] SchemaMigration uuid + * @property {string|null} [keyspace] SchemaMigration keyspace + * @property {string|null} [shard] SchemaMigration shard + * @property {string|null} [schema] SchemaMigration schema + * @property {string|null} [table] SchemaMigration table + * @property {string|null} [migration_statement] SchemaMigration migration_statement + * @property {vtctldata.SchemaMigration.Strategy|null} [strategy] SchemaMigration strategy + * @property {string|null} [options] SchemaMigration options + * @property {vttime.ITime|null} [added_at] SchemaMigration added_at + * @property {vttime.ITime|null} [requested_at] SchemaMigration requested_at + * @property {vttime.ITime|null} [ready_at] SchemaMigration ready_at + * @property {vttime.ITime|null} [started_at] SchemaMigration started_at + * @property {vttime.ITime|null} [liveness_timestamp] SchemaMigration liveness_timestamp + * @property {vttime.ITime|null} [completed_at] SchemaMigration completed_at + * @property {vttime.ITime|null} [cleaned_up_at] SchemaMigration cleaned_up_at + * @property {vtctldata.SchemaMigration.Status|null} [status] SchemaMigration status + * @property {string|null} [log_path] SchemaMigration log_path + * @property {string|null} [artifacts] SchemaMigration artifacts + * @property {number|Long|null} [retries] SchemaMigration retries + * @property {topodata.ITabletAlias|null} [tablet] SchemaMigration tablet + * @property {boolean|null} [tablet_failure] SchemaMigration tablet_failure + * @property {number|null} [progress] SchemaMigration progress + * @property {string|null} [migration_context] SchemaMigration migration_context + * @property {string|null} [ddl_action] SchemaMigration ddl_action + * @property {string|null} [message] SchemaMigration message + * @property {number|Long|null} [eta_seconds] SchemaMigration eta_seconds + * @property {number|Long|null} [rows_copied] SchemaMigration rows_copied + * @property {number|Long|null} [table_rows] SchemaMigration table_rows + * @property {number|null} [added_unique_keys] SchemaMigration added_unique_keys + * @property {number|null} [removed_unique_keys] SchemaMigration removed_unique_keys + * @property {string|null} [log_file] SchemaMigration log_file + * @property {vttime.IDuration|null} [artifact_retention] SchemaMigration artifact_retention + * @property {boolean|null} [postpone_completion] SchemaMigration postpone_completion + * @property {string|null} [removed_unique_key_names] SchemaMigration removed_unique_key_names + * @property {string|null} [dropped_no_default_column_names] SchemaMigration dropped_no_default_column_names + * @property {string|null} [expanded_column_names] SchemaMigration expanded_column_names + * @property {string|null} [revertible_notes] SchemaMigration revertible_notes + * @property {boolean|null} [allow_concurrent] SchemaMigration allow_concurrent + * @property {string|null} [reverted_uuid] SchemaMigration reverted_uuid + * @property {boolean|null} [is_view] SchemaMigration is_view + * @property {boolean|null} [ready_to_complete] SchemaMigration ready_to_complete + * @property {number|Long|null} [vitess_liveness_indicator] SchemaMigration vitess_liveness_indicator + * @property {number|null} [user_throttle_ratio] SchemaMigration user_throttle_ratio + * @property {string|null} [special_plan] SchemaMigration special_plan + * @property {vttime.ITime|null} [last_throttled_at] SchemaMigration last_throttled_at + * @property {string|null} [component_throttled] SchemaMigration component_throttled + * @property {vttime.ITime|null} [cancelled_at] SchemaMigration cancelled_at + * @property {boolean|null} [postpone_launch] SchemaMigration postpone_launch + * @property {string|null} [stage] SchemaMigration stage + * @property {number|null} [cutover_attempts] SchemaMigration cutover_attempts + * @property {boolean|null} [is_immediate_operation] SchemaMigration is_immediate_operation + * @property {vttime.ITime|null} [reviewed_at] SchemaMigration reviewed_at + * @property {vttime.ITime|null} [ready_to_complete_at] SchemaMigration ready_to_complete_at + */ + + /** + * Constructs a new SchemaMigration. + * @memberof vtctldata + * @classdesc Represents a SchemaMigration. + * @implements ISchemaMigration + * @constructor + * @param {vtctldata.ISchemaMigration=} [properties] Properties to set + */ + function SchemaMigration(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * SchemaMigration uuid. + * @member {string} uuid + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.uuid = ""; + + /** + * SchemaMigration keyspace. + * @member {string} keyspace + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.keyspace = ""; + + /** + * SchemaMigration shard. + * @member {string} shard + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.shard = ""; + + /** + * SchemaMigration schema. + * @member {string} schema + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.schema = ""; + + /** + * SchemaMigration table. + * @member {string} table + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.table = ""; + + /** + * SchemaMigration migration_statement. + * @member {string} migration_statement + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.migration_statement = ""; + + /** + * SchemaMigration strategy. + * @member {vtctldata.SchemaMigration.Strategy} strategy + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.strategy = 0; + + /** + * SchemaMigration options. + * @member {string} options + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.options = ""; + + /** + * SchemaMigration added_at. + * @member {vttime.ITime|null|undefined} added_at + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.added_at = null; + + /** + * SchemaMigration requested_at. + * @member {vttime.ITime|null|undefined} requested_at + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.requested_at = null; + + /** + * SchemaMigration ready_at. + * @member {vttime.ITime|null|undefined} ready_at + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.ready_at = null; + + /** + * SchemaMigration started_at. + * @member {vttime.ITime|null|undefined} started_at + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.started_at = null; + + /** + * SchemaMigration liveness_timestamp. + * @member {vttime.ITime|null|undefined} liveness_timestamp + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.liveness_timestamp = null; + + /** + * SchemaMigration completed_at. + * @member {vttime.ITime|null|undefined} completed_at + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.completed_at = null; + + /** + * SchemaMigration cleaned_up_at. + * @member {vttime.ITime|null|undefined} cleaned_up_at + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.cleaned_up_at = null; + + /** + * SchemaMigration status. + * @member {vtctldata.SchemaMigration.Status} status + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.status = 0; + + /** + * SchemaMigration log_path. + * @member {string} log_path + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.log_path = ""; + + /** + * SchemaMigration artifacts. + * @member {string} artifacts + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.artifacts = ""; + + /** + * SchemaMigration retries. + * @member {number|Long} retries + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.retries = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + + /** + * SchemaMigration tablet. + * @member {topodata.ITabletAlias|null|undefined} tablet + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.tablet = null; + + /** + * SchemaMigration tablet_failure. + * @member {boolean} tablet_failure + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.tablet_failure = false; + + /** + * SchemaMigration progress. + * @member {number} progress + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.progress = 0; + + /** + * SchemaMigration migration_context. + * @member {string} migration_context + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.migration_context = ""; + + /** + * SchemaMigration ddl_action. + * @member {string} ddl_action + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.ddl_action = ""; + + /** + * SchemaMigration message. + * @member {string} message + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.message = ""; + + /** + * SchemaMigration eta_seconds. + * @member {number|Long} eta_seconds + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.eta_seconds = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * SchemaMigration rows_copied. + * @member {number|Long} rows_copied + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.rows_copied = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + + /** + * SchemaMigration table_rows. + * @member {number|Long} table_rows + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.table_rows = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * SchemaMigration added_unique_keys. + * @member {number} added_unique_keys + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.added_unique_keys = 0; + + /** + * SchemaMigration removed_unique_keys. + * @member {number} removed_unique_keys + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.removed_unique_keys = 0; + + /** + * SchemaMigration log_file. + * @member {string} log_file + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.log_file = ""; + + /** + * SchemaMigration artifact_retention. + * @member {vttime.IDuration|null|undefined} artifact_retention + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.artifact_retention = null; + + /** + * SchemaMigration postpone_completion. + * @member {boolean} postpone_completion + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.postpone_completion = false; + + /** + * SchemaMigration removed_unique_key_names. + * @member {string} removed_unique_key_names + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.removed_unique_key_names = ""; + + /** + * SchemaMigration dropped_no_default_column_names. + * @member {string} dropped_no_default_column_names + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.dropped_no_default_column_names = ""; + + /** + * SchemaMigration expanded_column_names. + * @member {string} expanded_column_names + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.expanded_column_names = ""; + + /** + * SchemaMigration revertible_notes. + * @member {string} revertible_notes + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.revertible_notes = ""; + + /** + * SchemaMigration allow_concurrent. + * @member {boolean} allow_concurrent + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.allow_concurrent = false; + + /** + * SchemaMigration reverted_uuid. + * @member {string} reverted_uuid + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.reverted_uuid = ""; + + /** + * SchemaMigration is_view. + * @member {boolean} is_view + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.is_view = false; + + /** + * SchemaMigration ready_to_complete. + * @member {boolean} ready_to_complete + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.ready_to_complete = false; + + /** + * SchemaMigration vitess_liveness_indicator. + * @member {number|Long} vitess_liveness_indicator + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.vitess_liveness_indicator = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * SchemaMigration user_throttle_ratio. + * @member {number} user_throttle_ratio + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.user_throttle_ratio = 0; + + /** + * SchemaMigration special_plan. + * @member {string} special_plan + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.special_plan = ""; + + /** + * SchemaMigration last_throttled_at. + * @member {vttime.ITime|null|undefined} last_throttled_at + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.last_throttled_at = null; + + /** + * SchemaMigration component_throttled. + * @member {string} component_throttled + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.component_throttled = ""; + + /** + * SchemaMigration cancelled_at. + * @member {vttime.ITime|null|undefined} cancelled_at + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.cancelled_at = null; + + /** + * SchemaMigration postpone_launch. + * @member {boolean} postpone_launch + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.postpone_launch = false; + + /** + * SchemaMigration stage. + * @member {string} stage + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.stage = ""; + + /** + * SchemaMigration cutover_attempts. + * @member {number} cutover_attempts + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.cutover_attempts = 0; + + /** + * SchemaMigration is_immediate_operation. + * @member {boolean} is_immediate_operation + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.is_immediate_operation = false; + + /** + * SchemaMigration reviewed_at. + * @member {vttime.ITime|null|undefined} reviewed_at + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.reviewed_at = null; + + /** + * SchemaMigration ready_to_complete_at. + * @member {vttime.ITime|null|undefined} ready_to_complete_at + * @memberof vtctldata.SchemaMigration + * @instance + */ + SchemaMigration.prototype.ready_to_complete_at = null; + + /** + * Creates a new SchemaMigration instance using the specified properties. + * @function create + * @memberof vtctldata.SchemaMigration + * @static + * @param {vtctldata.ISchemaMigration=} [properties] Properties to set + * @returns {vtctldata.SchemaMigration} SchemaMigration instance + */ + SchemaMigration.create = function create(properties) { + return new SchemaMigration(properties); + }; + + /** + * Encodes the specified SchemaMigration message. Does not implicitly {@link vtctldata.SchemaMigration.verify|verify} messages. + * @function encode + * @memberof vtctldata.SchemaMigration + * @static + * @param {vtctldata.ISchemaMigration} message SchemaMigration message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + SchemaMigration.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.uuid != null && Object.hasOwnProperty.call(message, "uuid")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.uuid); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.keyspace); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.shard); + if (message.schema != null && Object.hasOwnProperty.call(message, "schema")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.schema); + if (message.table != null && Object.hasOwnProperty.call(message, "table")) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.table); + if (message.migration_statement != null && Object.hasOwnProperty.call(message, "migration_statement")) + writer.uint32(/* id 6, wireType 2 =*/50).string(message.migration_statement); + if (message.strategy != null && Object.hasOwnProperty.call(message, "strategy")) + writer.uint32(/* id 7, wireType 0 =*/56).int32(message.strategy); + if (message.options != null && Object.hasOwnProperty.call(message, "options")) + writer.uint32(/* id 8, wireType 2 =*/66).string(message.options); + if (message.added_at != null && Object.hasOwnProperty.call(message, "added_at")) + $root.vttime.Time.encode(message.added_at, writer.uint32(/* id 9, wireType 2 =*/74).fork()).ldelim(); + if (message.requested_at != null && Object.hasOwnProperty.call(message, "requested_at")) + $root.vttime.Time.encode(message.requested_at, writer.uint32(/* id 10, wireType 2 =*/82).fork()).ldelim(); + if (message.ready_at != null && Object.hasOwnProperty.call(message, "ready_at")) + $root.vttime.Time.encode(message.ready_at, writer.uint32(/* id 11, wireType 2 =*/90).fork()).ldelim(); + if (message.started_at != null && Object.hasOwnProperty.call(message, "started_at")) + $root.vttime.Time.encode(message.started_at, writer.uint32(/* id 12, wireType 2 =*/98).fork()).ldelim(); + if (message.liveness_timestamp != null && Object.hasOwnProperty.call(message, "liveness_timestamp")) + $root.vttime.Time.encode(message.liveness_timestamp, writer.uint32(/* id 13, wireType 2 =*/106).fork()).ldelim(); + if (message.completed_at != null && Object.hasOwnProperty.call(message, "completed_at")) + $root.vttime.Time.encode(message.completed_at, writer.uint32(/* id 14, wireType 2 =*/114).fork()).ldelim(); + if (message.cleaned_up_at != null && Object.hasOwnProperty.call(message, "cleaned_up_at")) + $root.vttime.Time.encode(message.cleaned_up_at, writer.uint32(/* id 15, wireType 2 =*/122).fork()).ldelim(); + if (message.status != null && Object.hasOwnProperty.call(message, "status")) + writer.uint32(/* id 16, wireType 0 =*/128).int32(message.status); + if (message.log_path != null && Object.hasOwnProperty.call(message, "log_path")) + writer.uint32(/* id 17, wireType 2 =*/138).string(message.log_path); + if (message.artifacts != null && Object.hasOwnProperty.call(message, "artifacts")) + writer.uint32(/* id 18, wireType 2 =*/146).string(message.artifacts); + if (message.retries != null && Object.hasOwnProperty.call(message, "retries")) + writer.uint32(/* id 19, wireType 0 =*/152).uint64(message.retries); + if (message.tablet != null && Object.hasOwnProperty.call(message, "tablet")) + $root.topodata.TabletAlias.encode(message.tablet, writer.uint32(/* id 20, wireType 2 =*/162).fork()).ldelim(); + if (message.tablet_failure != null && Object.hasOwnProperty.call(message, "tablet_failure")) + writer.uint32(/* id 21, wireType 0 =*/168).bool(message.tablet_failure); + if (message.progress != null && Object.hasOwnProperty.call(message, "progress")) + writer.uint32(/* id 22, wireType 5 =*/181).float(message.progress); + if (message.migration_context != null && Object.hasOwnProperty.call(message, "migration_context")) + writer.uint32(/* id 23, wireType 2 =*/186).string(message.migration_context); + if (message.ddl_action != null && Object.hasOwnProperty.call(message, "ddl_action")) + writer.uint32(/* id 24, wireType 2 =*/194).string(message.ddl_action); + if (message.message != null && Object.hasOwnProperty.call(message, "message")) + writer.uint32(/* id 25, wireType 2 =*/202).string(message.message); + if (message.eta_seconds != null && Object.hasOwnProperty.call(message, "eta_seconds")) + writer.uint32(/* id 26, wireType 0 =*/208).int64(message.eta_seconds); + if (message.rows_copied != null && Object.hasOwnProperty.call(message, "rows_copied")) + writer.uint32(/* id 27, wireType 0 =*/216).uint64(message.rows_copied); + if (message.table_rows != null && Object.hasOwnProperty.call(message, "table_rows")) + writer.uint32(/* id 28, wireType 0 =*/224).int64(message.table_rows); + if (message.added_unique_keys != null && Object.hasOwnProperty.call(message, "added_unique_keys")) + writer.uint32(/* id 29, wireType 0 =*/232).uint32(message.added_unique_keys); + if (message.removed_unique_keys != null && Object.hasOwnProperty.call(message, "removed_unique_keys")) + writer.uint32(/* id 30, wireType 0 =*/240).uint32(message.removed_unique_keys); + if (message.log_file != null && Object.hasOwnProperty.call(message, "log_file")) + writer.uint32(/* id 31, wireType 2 =*/250).string(message.log_file); + if (message.artifact_retention != null && Object.hasOwnProperty.call(message, "artifact_retention")) + $root.vttime.Duration.encode(message.artifact_retention, writer.uint32(/* id 32, wireType 2 =*/258).fork()).ldelim(); + if (message.postpone_completion != null && Object.hasOwnProperty.call(message, "postpone_completion")) + writer.uint32(/* id 33, wireType 0 =*/264).bool(message.postpone_completion); + if (message.removed_unique_key_names != null && Object.hasOwnProperty.call(message, "removed_unique_key_names")) + writer.uint32(/* id 34, wireType 2 =*/274).string(message.removed_unique_key_names); + if (message.dropped_no_default_column_names != null && Object.hasOwnProperty.call(message, "dropped_no_default_column_names")) + writer.uint32(/* id 35, wireType 2 =*/282).string(message.dropped_no_default_column_names); + if (message.expanded_column_names != null && Object.hasOwnProperty.call(message, "expanded_column_names")) + writer.uint32(/* id 36, wireType 2 =*/290).string(message.expanded_column_names); + if (message.revertible_notes != null && Object.hasOwnProperty.call(message, "revertible_notes")) + writer.uint32(/* id 37, wireType 2 =*/298).string(message.revertible_notes); + if (message.allow_concurrent != null && Object.hasOwnProperty.call(message, "allow_concurrent")) + writer.uint32(/* id 38, wireType 0 =*/304).bool(message.allow_concurrent); + if (message.reverted_uuid != null && Object.hasOwnProperty.call(message, "reverted_uuid")) + writer.uint32(/* id 39, wireType 2 =*/314).string(message.reverted_uuid); + if (message.is_view != null && Object.hasOwnProperty.call(message, "is_view")) + writer.uint32(/* id 40, wireType 0 =*/320).bool(message.is_view); + if (message.ready_to_complete != null && Object.hasOwnProperty.call(message, "ready_to_complete")) + writer.uint32(/* id 41, wireType 0 =*/328).bool(message.ready_to_complete); + if (message.vitess_liveness_indicator != null && Object.hasOwnProperty.call(message, "vitess_liveness_indicator")) + writer.uint32(/* id 42, wireType 0 =*/336).int64(message.vitess_liveness_indicator); + if (message.user_throttle_ratio != null && Object.hasOwnProperty.call(message, "user_throttle_ratio")) + writer.uint32(/* id 43, wireType 5 =*/349).float(message.user_throttle_ratio); + if (message.special_plan != null && Object.hasOwnProperty.call(message, "special_plan")) + writer.uint32(/* id 44, wireType 2 =*/354).string(message.special_plan); + if (message.last_throttled_at != null && Object.hasOwnProperty.call(message, "last_throttled_at")) + $root.vttime.Time.encode(message.last_throttled_at, writer.uint32(/* id 45, wireType 2 =*/362).fork()).ldelim(); + if (message.component_throttled != null && Object.hasOwnProperty.call(message, "component_throttled")) + writer.uint32(/* id 46, wireType 2 =*/370).string(message.component_throttled); + if (message.cancelled_at != null && Object.hasOwnProperty.call(message, "cancelled_at")) + $root.vttime.Time.encode(message.cancelled_at, writer.uint32(/* id 47, wireType 2 =*/378).fork()).ldelim(); + if (message.postpone_launch != null && Object.hasOwnProperty.call(message, "postpone_launch")) + writer.uint32(/* id 48, wireType 0 =*/384).bool(message.postpone_launch); + if (message.stage != null && Object.hasOwnProperty.call(message, "stage")) + writer.uint32(/* id 49, wireType 2 =*/394).string(message.stage); + if (message.cutover_attempts != null && Object.hasOwnProperty.call(message, "cutover_attempts")) + writer.uint32(/* id 50, wireType 0 =*/400).uint32(message.cutover_attempts); + if (message.is_immediate_operation != null && Object.hasOwnProperty.call(message, "is_immediate_operation")) + writer.uint32(/* id 51, wireType 0 =*/408).bool(message.is_immediate_operation); + if (message.reviewed_at != null && Object.hasOwnProperty.call(message, "reviewed_at")) + $root.vttime.Time.encode(message.reviewed_at, writer.uint32(/* id 52, wireType 2 =*/418).fork()).ldelim(); + if (message.ready_to_complete_at != null && Object.hasOwnProperty.call(message, "ready_to_complete_at")) + $root.vttime.Time.encode(message.ready_to_complete_at, writer.uint32(/* id 53, wireType 2 =*/426).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified SchemaMigration message, length delimited. Does not implicitly {@link vtctldata.SchemaMigration.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.SchemaMigration + * @static + * @param {vtctldata.ISchemaMigration} message SchemaMigration message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + SchemaMigration.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a SchemaMigration message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.SchemaMigration + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.SchemaMigration} SchemaMigration + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + SchemaMigration.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SchemaMigration(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.uuid = reader.string(); + break; + } + case 2: { + message.keyspace = reader.string(); + break; + } + case 3: { + message.shard = reader.string(); + break; + } + case 4: { + message.schema = reader.string(); + break; + } + case 5: { + message.table = reader.string(); + break; + } + case 6: { + message.migration_statement = reader.string(); + break; + } + case 7: { + message.strategy = reader.int32(); + break; + } + case 8: { + message.options = reader.string(); + break; + } + case 9: { + message.added_at = $root.vttime.Time.decode(reader, reader.uint32()); + break; + } + case 10: { + message.requested_at = $root.vttime.Time.decode(reader, reader.uint32()); + break; + } + case 11: { + message.ready_at = $root.vttime.Time.decode(reader, reader.uint32()); + break; + } + case 12: { + message.started_at = $root.vttime.Time.decode(reader, reader.uint32()); + break; + } + case 13: { + message.liveness_timestamp = $root.vttime.Time.decode(reader, reader.uint32()); + break; + } + case 14: { + message.completed_at = $root.vttime.Time.decode(reader, reader.uint32()); + break; + } + case 15: { + message.cleaned_up_at = $root.vttime.Time.decode(reader, reader.uint32()); + break; + } + case 16: { + message.status = reader.int32(); + break; + } + case 17: { + message.log_path = reader.string(); + break; + } + case 18: { + message.artifacts = reader.string(); + break; + } + case 19: { + message.retries = reader.uint64(); + break; + } + case 20: { + message.tablet = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + break; + } + case 21: { + message.tablet_failure = reader.bool(); + break; + } + case 22: { + message.progress = reader.float(); + break; + } + case 23: { + message.migration_context = reader.string(); + break; + } + case 24: { + message.ddl_action = reader.string(); + break; + } + case 25: { + message.message = reader.string(); + break; + } + case 26: { + message.eta_seconds = reader.int64(); + break; + } + case 27: { + message.rows_copied = reader.uint64(); + break; + } + case 28: { + message.table_rows = reader.int64(); + break; + } + case 29: { + message.added_unique_keys = reader.uint32(); + break; + } + case 30: { + message.removed_unique_keys = reader.uint32(); + break; + } + case 31: { + message.log_file = reader.string(); + break; + } + case 32: { + message.artifact_retention = $root.vttime.Duration.decode(reader, reader.uint32()); + break; + } + case 33: { + message.postpone_completion = reader.bool(); + break; + } + case 34: { + message.removed_unique_key_names = reader.string(); + break; + } + case 35: { + message.dropped_no_default_column_names = reader.string(); + break; + } + case 36: { + message.expanded_column_names = reader.string(); + break; + } + case 37: { + message.revertible_notes = reader.string(); + break; + } + case 38: { + message.allow_concurrent = reader.bool(); + break; + } + case 39: { + message.reverted_uuid = reader.string(); + break; + } + case 40: { + message.is_view = reader.bool(); + break; + } + case 41: { + message.ready_to_complete = reader.bool(); + break; + } + case 42: { + message.vitess_liveness_indicator = reader.int64(); + break; + } + case 43: { + message.user_throttle_ratio = reader.float(); + break; + } + case 44: { + message.special_plan = reader.string(); + break; + } + case 45: { + message.last_throttled_at = $root.vttime.Time.decode(reader, reader.uint32()); + break; + } + case 46: { + message.component_throttled = reader.string(); + break; + } + case 47: { + message.cancelled_at = $root.vttime.Time.decode(reader, reader.uint32()); + break; + } + case 48: { + message.postpone_launch = reader.bool(); + break; + } + case 49: { + message.stage = reader.string(); + break; + } + case 50: { + message.cutover_attempts = reader.uint32(); + break; + } + case 51: { + message.is_immediate_operation = reader.bool(); + break; + } + case 52: { + message.reviewed_at = $root.vttime.Time.decode(reader, reader.uint32()); + break; + } + case 53: { + message.ready_to_complete_at = $root.vttime.Time.decode(reader, reader.uint32()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a SchemaMigration message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.SchemaMigration + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.SchemaMigration} SchemaMigration + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + SchemaMigration.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a SchemaMigration message. + * @function verify + * @memberof vtctldata.SchemaMigration + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + SchemaMigration.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.uuid != null && message.hasOwnProperty("uuid")) + if (!$util.isString(message.uuid)) + return "uuid: string expected"; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.shard != null && message.hasOwnProperty("shard")) + if (!$util.isString(message.shard)) + return "shard: string expected"; + if (message.schema != null && message.hasOwnProperty("schema")) + if (!$util.isString(message.schema)) + return "schema: string expected"; + if (message.table != null && message.hasOwnProperty("table")) + if (!$util.isString(message.table)) + return "table: string expected"; + if (message.migration_statement != null && message.hasOwnProperty("migration_statement")) + if (!$util.isString(message.migration_statement)) + return "migration_statement: string expected"; + if (message.strategy != null && message.hasOwnProperty("strategy")) + switch (message.strategy) { + default: + return "strategy: enum value expected"; + case 0: + case 0: + case 1: + case 2: + case 3: + case 4: + break; + } + if (message.options != null && message.hasOwnProperty("options")) + if (!$util.isString(message.options)) + return "options: string expected"; + if (message.added_at != null && message.hasOwnProperty("added_at")) { + let error = $root.vttime.Time.verify(message.added_at); + if (error) + return "added_at." + error; + } + if (message.requested_at != null && message.hasOwnProperty("requested_at")) { + let error = $root.vttime.Time.verify(message.requested_at); + if (error) + return "requested_at." + error; + } + if (message.ready_at != null && message.hasOwnProperty("ready_at")) { + let error = $root.vttime.Time.verify(message.ready_at); + if (error) + return "ready_at." + error; + } + if (message.started_at != null && message.hasOwnProperty("started_at")) { + let error = $root.vttime.Time.verify(message.started_at); + if (error) + return "started_at." + error; + } + if (message.liveness_timestamp != null && message.hasOwnProperty("liveness_timestamp")) { + let error = $root.vttime.Time.verify(message.liveness_timestamp); + if (error) + return "liveness_timestamp." + error; + } + if (message.completed_at != null && message.hasOwnProperty("completed_at")) { + let error = $root.vttime.Time.verify(message.completed_at); + if (error) + return "completed_at." + error; + } + if (message.cleaned_up_at != null && message.hasOwnProperty("cleaned_up_at")) { + let error = $root.vttime.Time.verify(message.cleaned_up_at); + if (error) + return "cleaned_up_at." + error; + } + if (message.status != null && message.hasOwnProperty("status")) + switch (message.status) { + default: + return "status: enum value expected"; + case 0: + case 1: + case 2: + case 3: + case 4: + case 5: + case 6: + case 7: + break; + } + if (message.log_path != null && message.hasOwnProperty("log_path")) + if (!$util.isString(message.log_path)) + return "log_path: string expected"; + if (message.artifacts != null && message.hasOwnProperty("artifacts")) + if (!$util.isString(message.artifacts)) + return "artifacts: string expected"; + if (message.retries != null && message.hasOwnProperty("retries")) + if (!$util.isInteger(message.retries) && !(message.retries && $util.isInteger(message.retries.low) && $util.isInteger(message.retries.high))) + return "retries: integer|Long expected"; + if (message.tablet != null && message.hasOwnProperty("tablet")) { + let error = $root.topodata.TabletAlias.verify(message.tablet); + if (error) + return "tablet." + error; + } + if (message.tablet_failure != null && message.hasOwnProperty("tablet_failure")) + if (typeof message.tablet_failure !== "boolean") + return "tablet_failure: boolean expected"; + if (message.progress != null && message.hasOwnProperty("progress")) + if (typeof message.progress !== "number") + return "progress: number expected"; + if (message.migration_context != null && message.hasOwnProperty("migration_context")) + if (!$util.isString(message.migration_context)) + return "migration_context: string expected"; + if (message.ddl_action != null && message.hasOwnProperty("ddl_action")) + if (!$util.isString(message.ddl_action)) + return "ddl_action: string expected"; + if (message.message != null && message.hasOwnProperty("message")) + if (!$util.isString(message.message)) + return "message: string expected"; + if (message.eta_seconds != null && message.hasOwnProperty("eta_seconds")) + if (!$util.isInteger(message.eta_seconds) && !(message.eta_seconds && $util.isInteger(message.eta_seconds.low) && $util.isInteger(message.eta_seconds.high))) + return "eta_seconds: integer|Long expected"; + if (message.rows_copied != null && message.hasOwnProperty("rows_copied")) + if (!$util.isInteger(message.rows_copied) && !(message.rows_copied && $util.isInteger(message.rows_copied.low) && $util.isInteger(message.rows_copied.high))) + return "rows_copied: integer|Long expected"; + if (message.table_rows != null && message.hasOwnProperty("table_rows")) + if (!$util.isInteger(message.table_rows) && !(message.table_rows && $util.isInteger(message.table_rows.low) && $util.isInteger(message.table_rows.high))) + return "table_rows: integer|Long expected"; + if (message.added_unique_keys != null && message.hasOwnProperty("added_unique_keys")) + if (!$util.isInteger(message.added_unique_keys)) + return "added_unique_keys: integer expected"; + if (message.removed_unique_keys != null && message.hasOwnProperty("removed_unique_keys")) + if (!$util.isInteger(message.removed_unique_keys)) + return "removed_unique_keys: integer expected"; + if (message.log_file != null && message.hasOwnProperty("log_file")) + if (!$util.isString(message.log_file)) + return "log_file: string expected"; + if (message.artifact_retention != null && message.hasOwnProperty("artifact_retention")) { + let error = $root.vttime.Duration.verify(message.artifact_retention); + if (error) + return "artifact_retention." + error; + } + if (message.postpone_completion != null && message.hasOwnProperty("postpone_completion")) + if (typeof message.postpone_completion !== "boolean") + return "postpone_completion: boolean expected"; + if (message.removed_unique_key_names != null && message.hasOwnProperty("removed_unique_key_names")) + if (!$util.isString(message.removed_unique_key_names)) + return "removed_unique_key_names: string expected"; + if (message.dropped_no_default_column_names != null && message.hasOwnProperty("dropped_no_default_column_names")) + if (!$util.isString(message.dropped_no_default_column_names)) + return "dropped_no_default_column_names: string expected"; + if (message.expanded_column_names != null && message.hasOwnProperty("expanded_column_names")) + if (!$util.isString(message.expanded_column_names)) + return "expanded_column_names: string expected"; + if (message.revertible_notes != null && message.hasOwnProperty("revertible_notes")) + if (!$util.isString(message.revertible_notes)) + return "revertible_notes: string expected"; + if (message.allow_concurrent != null && message.hasOwnProperty("allow_concurrent")) + if (typeof message.allow_concurrent !== "boolean") + return "allow_concurrent: boolean expected"; + if (message.reverted_uuid != null && message.hasOwnProperty("reverted_uuid")) + if (!$util.isString(message.reverted_uuid)) + return "reverted_uuid: string expected"; + if (message.is_view != null && message.hasOwnProperty("is_view")) + if (typeof message.is_view !== "boolean") + return "is_view: boolean expected"; + if (message.ready_to_complete != null && message.hasOwnProperty("ready_to_complete")) + if (typeof message.ready_to_complete !== "boolean") + return "ready_to_complete: boolean expected"; + if (message.vitess_liveness_indicator != null && message.hasOwnProperty("vitess_liveness_indicator")) + if (!$util.isInteger(message.vitess_liveness_indicator) && !(message.vitess_liveness_indicator && $util.isInteger(message.vitess_liveness_indicator.low) && $util.isInteger(message.vitess_liveness_indicator.high))) + return "vitess_liveness_indicator: integer|Long expected"; + if (message.user_throttle_ratio != null && message.hasOwnProperty("user_throttle_ratio")) + if (typeof message.user_throttle_ratio !== "number") + return "user_throttle_ratio: number expected"; + if (message.special_plan != null && message.hasOwnProperty("special_plan")) + if (!$util.isString(message.special_plan)) + return "special_plan: string expected"; + if (message.last_throttled_at != null && message.hasOwnProperty("last_throttled_at")) { + let error = $root.vttime.Time.verify(message.last_throttled_at); + if (error) + return "last_throttled_at." + error; + } + if (message.component_throttled != null && message.hasOwnProperty("component_throttled")) + if (!$util.isString(message.component_throttled)) + return "component_throttled: string expected"; + if (message.cancelled_at != null && message.hasOwnProperty("cancelled_at")) { + let error = $root.vttime.Time.verify(message.cancelled_at); + if (error) + return "cancelled_at." + error; + } + if (message.postpone_launch != null && message.hasOwnProperty("postpone_launch")) + if (typeof message.postpone_launch !== "boolean") + return "postpone_launch: boolean expected"; + if (message.stage != null && message.hasOwnProperty("stage")) + if (!$util.isString(message.stage)) + return "stage: string expected"; + if (message.cutover_attempts != null && message.hasOwnProperty("cutover_attempts")) + if (!$util.isInteger(message.cutover_attempts)) + return "cutover_attempts: integer expected"; + if (message.is_immediate_operation != null && message.hasOwnProperty("is_immediate_operation")) + if (typeof message.is_immediate_operation !== "boolean") + return "is_immediate_operation: boolean expected"; + if (message.reviewed_at != null && message.hasOwnProperty("reviewed_at")) { + let error = $root.vttime.Time.verify(message.reviewed_at); + if (error) + return "reviewed_at." + error; + } + if (message.ready_to_complete_at != null && message.hasOwnProperty("ready_to_complete_at")) { + let error = $root.vttime.Time.verify(message.ready_to_complete_at); + if (error) + return "ready_to_complete_at." + error; + } + return null; + }; + + /** + * Creates a SchemaMigration message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.SchemaMigration + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.SchemaMigration} SchemaMigration + */ + SchemaMigration.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.SchemaMigration) + return object; + let message = new $root.vtctldata.SchemaMigration(); + if (object.uuid != null) + message.uuid = String(object.uuid); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.shard != null) + message.shard = String(object.shard); + if (object.schema != null) + message.schema = String(object.schema); + if (object.table != null) + message.table = String(object.table); + if (object.migration_statement != null) + message.migration_statement = String(object.migration_statement); + switch (object.strategy) { + default: + if (typeof object.strategy === "number") { + message.strategy = object.strategy; + break; + } + break; + case "VITESS": + case 0: + message.strategy = 0; + break; + case "ONLINE": + case 0: + message.strategy = 0; + break; + case "GHOST": + case 1: + message.strategy = 1; + break; + case "PTOSC": + case 2: + message.strategy = 2; + break; + case "DIRECT": + case 3: + message.strategy = 3; + break; + case "MYSQL": + case 4: + message.strategy = 4; + break; + } + if (object.options != null) + message.options = String(object.options); + if (object.added_at != null) { + if (typeof object.added_at !== "object") + throw TypeError(".vtctldata.SchemaMigration.added_at: object expected"); + message.added_at = $root.vttime.Time.fromObject(object.added_at); + } + if (object.requested_at != null) { + if (typeof object.requested_at !== "object") + throw TypeError(".vtctldata.SchemaMigration.requested_at: object expected"); + message.requested_at = $root.vttime.Time.fromObject(object.requested_at); + } + if (object.ready_at != null) { + if (typeof object.ready_at !== "object") + throw TypeError(".vtctldata.SchemaMigration.ready_at: object expected"); + message.ready_at = $root.vttime.Time.fromObject(object.ready_at); + } + if (object.started_at != null) { + if (typeof object.started_at !== "object") + throw TypeError(".vtctldata.SchemaMigration.started_at: object expected"); + message.started_at = $root.vttime.Time.fromObject(object.started_at); + } + if (object.liveness_timestamp != null) { + if (typeof object.liveness_timestamp !== "object") + throw TypeError(".vtctldata.SchemaMigration.liveness_timestamp: object expected"); + message.liveness_timestamp = $root.vttime.Time.fromObject(object.liveness_timestamp); + } + if (object.completed_at != null) { + if (typeof object.completed_at !== "object") + throw TypeError(".vtctldata.SchemaMigration.completed_at: object expected"); + message.completed_at = $root.vttime.Time.fromObject(object.completed_at); + } + if (object.cleaned_up_at != null) { + if (typeof object.cleaned_up_at !== "object") + throw TypeError(".vtctldata.SchemaMigration.cleaned_up_at: object expected"); + message.cleaned_up_at = $root.vttime.Time.fromObject(object.cleaned_up_at); + } + switch (object.status) { + default: + if (typeof object.status === "number") { + message.status = object.status; + break; + } + break; + case "UNKNOWN": + case 0: + message.status = 0; + break; + case "REQUESTED": + case 1: + message.status = 1; + break; + case "CANCELLED": + case 2: + message.status = 2; + break; + case "QUEUED": + case 3: + message.status = 3; + break; + case "READY": + case 4: + message.status = 4; + break; + case "RUNNING": + case 5: + message.status = 5; + break; + case "COMPLETE": + case 6: + message.status = 6; + break; + case "FAILED": + case 7: + message.status = 7; + break; + } + if (object.log_path != null) + message.log_path = String(object.log_path); + if (object.artifacts != null) + message.artifacts = String(object.artifacts); + if (object.retries != null) + if ($util.Long) + (message.retries = $util.Long.fromValue(object.retries)).unsigned = true; + else if (typeof object.retries === "string") + message.retries = parseInt(object.retries, 10); + else if (typeof object.retries === "number") + message.retries = object.retries; + else if (typeof object.retries === "object") + message.retries = new $util.LongBits(object.retries.low >>> 0, object.retries.high >>> 0).toNumber(true); + if (object.tablet != null) { + if (typeof object.tablet !== "object") + throw TypeError(".vtctldata.SchemaMigration.tablet: object expected"); + message.tablet = $root.topodata.TabletAlias.fromObject(object.tablet); + } + if (object.tablet_failure != null) + message.tablet_failure = Boolean(object.tablet_failure); + if (object.progress != null) + message.progress = Number(object.progress); + if (object.migration_context != null) + message.migration_context = String(object.migration_context); + if (object.ddl_action != null) + message.ddl_action = String(object.ddl_action); + if (object.message != null) + message.message = String(object.message); + if (object.eta_seconds != null) + if ($util.Long) + (message.eta_seconds = $util.Long.fromValue(object.eta_seconds)).unsigned = false; + else if (typeof object.eta_seconds === "string") + message.eta_seconds = parseInt(object.eta_seconds, 10); + else if (typeof object.eta_seconds === "number") + message.eta_seconds = object.eta_seconds; + else if (typeof object.eta_seconds === "object") + message.eta_seconds = new $util.LongBits(object.eta_seconds.low >>> 0, object.eta_seconds.high >>> 0).toNumber(); + if (object.rows_copied != null) + if ($util.Long) + (message.rows_copied = $util.Long.fromValue(object.rows_copied)).unsigned = true; + else if (typeof object.rows_copied === "string") + message.rows_copied = parseInt(object.rows_copied, 10); + else if (typeof object.rows_copied === "number") + message.rows_copied = object.rows_copied; + else if (typeof object.rows_copied === "object") + message.rows_copied = new $util.LongBits(object.rows_copied.low >>> 0, object.rows_copied.high >>> 0).toNumber(true); + if (object.table_rows != null) + if ($util.Long) + (message.table_rows = $util.Long.fromValue(object.table_rows)).unsigned = false; + else if (typeof object.table_rows === "string") + message.table_rows = parseInt(object.table_rows, 10); + else if (typeof object.table_rows === "number") + message.table_rows = object.table_rows; + else if (typeof object.table_rows === "object") + message.table_rows = new $util.LongBits(object.table_rows.low >>> 0, object.table_rows.high >>> 0).toNumber(); + if (object.added_unique_keys != null) + message.added_unique_keys = object.added_unique_keys >>> 0; + if (object.removed_unique_keys != null) + message.removed_unique_keys = object.removed_unique_keys >>> 0; + if (object.log_file != null) + message.log_file = String(object.log_file); + if (object.artifact_retention != null) { + if (typeof object.artifact_retention !== "object") + throw TypeError(".vtctldata.SchemaMigration.artifact_retention: object expected"); + message.artifact_retention = $root.vttime.Duration.fromObject(object.artifact_retention); + } + if (object.postpone_completion != null) + message.postpone_completion = Boolean(object.postpone_completion); + if (object.removed_unique_key_names != null) + message.removed_unique_key_names = String(object.removed_unique_key_names); + if (object.dropped_no_default_column_names != null) + message.dropped_no_default_column_names = String(object.dropped_no_default_column_names); + if (object.expanded_column_names != null) + message.expanded_column_names = String(object.expanded_column_names); + if (object.revertible_notes != null) + message.revertible_notes = String(object.revertible_notes); + if (object.allow_concurrent != null) + message.allow_concurrent = Boolean(object.allow_concurrent); + if (object.reverted_uuid != null) + message.reverted_uuid = String(object.reverted_uuid); + if (object.is_view != null) + message.is_view = Boolean(object.is_view); + if (object.ready_to_complete != null) + message.ready_to_complete = Boolean(object.ready_to_complete); + if (object.vitess_liveness_indicator != null) + if ($util.Long) + (message.vitess_liveness_indicator = $util.Long.fromValue(object.vitess_liveness_indicator)).unsigned = false; + else if (typeof object.vitess_liveness_indicator === "string") + message.vitess_liveness_indicator = parseInt(object.vitess_liveness_indicator, 10); + else if (typeof object.vitess_liveness_indicator === "number") + message.vitess_liveness_indicator = object.vitess_liveness_indicator; + else if (typeof object.vitess_liveness_indicator === "object") + message.vitess_liveness_indicator = new $util.LongBits(object.vitess_liveness_indicator.low >>> 0, object.vitess_liveness_indicator.high >>> 0).toNumber(); + if (object.user_throttle_ratio != null) + message.user_throttle_ratio = Number(object.user_throttle_ratio); + if (object.special_plan != null) + message.special_plan = String(object.special_plan); + if (object.last_throttled_at != null) { + if (typeof object.last_throttled_at !== "object") + throw TypeError(".vtctldata.SchemaMigration.last_throttled_at: object expected"); + message.last_throttled_at = $root.vttime.Time.fromObject(object.last_throttled_at); + } + if (object.component_throttled != null) + message.component_throttled = String(object.component_throttled); + if (object.cancelled_at != null) { + if (typeof object.cancelled_at !== "object") + throw TypeError(".vtctldata.SchemaMigration.cancelled_at: object expected"); + message.cancelled_at = $root.vttime.Time.fromObject(object.cancelled_at); + } + if (object.postpone_launch != null) + message.postpone_launch = Boolean(object.postpone_launch); + if (object.stage != null) + message.stage = String(object.stage); + if (object.cutover_attempts != null) + message.cutover_attempts = object.cutover_attempts >>> 0; + if (object.is_immediate_operation != null) + message.is_immediate_operation = Boolean(object.is_immediate_operation); + if (object.reviewed_at != null) { + if (typeof object.reviewed_at !== "object") + throw TypeError(".vtctldata.SchemaMigration.reviewed_at: object expected"); + message.reviewed_at = $root.vttime.Time.fromObject(object.reviewed_at); + } + if (object.ready_to_complete_at != null) { + if (typeof object.ready_to_complete_at !== "object") + throw TypeError(".vtctldata.SchemaMigration.ready_to_complete_at: object expected"); + message.ready_to_complete_at = $root.vttime.Time.fromObject(object.ready_to_complete_at); + } + return message; + }; + + /** + * Creates a plain object from a SchemaMigration message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.SchemaMigration + * @static + * @param {vtctldata.SchemaMigration} message SchemaMigration + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + SchemaMigration.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.uuid = ""; + object.keyspace = ""; + object.shard = ""; + object.schema = ""; + object.table = ""; + object.migration_statement = ""; + object.strategy = options.enums === String ? "VITESS" : 0; + object.options = ""; + object.added_at = null; + object.requested_at = null; + object.ready_at = null; + object.started_at = null; + object.liveness_timestamp = null; + object.completed_at = null; + object.cleaned_up_at = null; + object.status = options.enums === String ? "UNKNOWN" : 0; + object.log_path = ""; + object.artifacts = ""; + if ($util.Long) { + let long = new $util.Long(0, 0, true); + object.retries = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.retries = options.longs === String ? "0" : 0; + object.tablet = null; + object.tablet_failure = false; + object.progress = 0; + object.migration_context = ""; + object.ddl_action = ""; + object.message = ""; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.eta_seconds = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.eta_seconds = options.longs === String ? "0" : 0; + if ($util.Long) { + let long = new $util.Long(0, 0, true); + object.rows_copied = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.rows_copied = options.longs === String ? "0" : 0; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.table_rows = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.table_rows = options.longs === String ? "0" : 0; + object.added_unique_keys = 0; + object.removed_unique_keys = 0; + object.log_file = ""; + object.artifact_retention = null; + object.postpone_completion = false; + object.removed_unique_key_names = ""; + object.dropped_no_default_column_names = ""; + object.expanded_column_names = ""; + object.revertible_notes = ""; + object.allow_concurrent = false; + object.reverted_uuid = ""; + object.is_view = false; + object.ready_to_complete = false; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.vitess_liveness_indicator = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.vitess_liveness_indicator = options.longs === String ? "0" : 0; + object.user_throttle_ratio = 0; + object.special_plan = ""; + object.last_throttled_at = null; + object.component_throttled = ""; + object.cancelled_at = null; + object.postpone_launch = false; + object.stage = ""; + object.cutover_attempts = 0; + object.is_immediate_operation = false; + object.reviewed_at = null; + object.ready_to_complete_at = null; + } + if (message.uuid != null && message.hasOwnProperty("uuid")) + object.uuid = message.uuid; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = message.shard; + if (message.schema != null && message.hasOwnProperty("schema")) + object.schema = message.schema; + if (message.table != null && message.hasOwnProperty("table")) + object.table = message.table; + if (message.migration_statement != null && message.hasOwnProperty("migration_statement")) + object.migration_statement = message.migration_statement; + if (message.strategy != null && message.hasOwnProperty("strategy")) + object.strategy = options.enums === String ? $root.vtctldata.SchemaMigration.Strategy[message.strategy] === undefined ? message.strategy : $root.vtctldata.SchemaMigration.Strategy[message.strategy] : message.strategy; + if (message.options != null && message.hasOwnProperty("options")) + object.options = message.options; + if (message.added_at != null && message.hasOwnProperty("added_at")) + object.added_at = $root.vttime.Time.toObject(message.added_at, options); + if (message.requested_at != null && message.hasOwnProperty("requested_at")) + object.requested_at = $root.vttime.Time.toObject(message.requested_at, options); + if (message.ready_at != null && message.hasOwnProperty("ready_at")) + object.ready_at = $root.vttime.Time.toObject(message.ready_at, options); + if (message.started_at != null && message.hasOwnProperty("started_at")) + object.started_at = $root.vttime.Time.toObject(message.started_at, options); + if (message.liveness_timestamp != null && message.hasOwnProperty("liveness_timestamp")) + object.liveness_timestamp = $root.vttime.Time.toObject(message.liveness_timestamp, options); + if (message.completed_at != null && message.hasOwnProperty("completed_at")) + object.completed_at = $root.vttime.Time.toObject(message.completed_at, options); + if (message.cleaned_up_at != null && message.hasOwnProperty("cleaned_up_at")) + object.cleaned_up_at = $root.vttime.Time.toObject(message.cleaned_up_at, options); + if (message.status != null && message.hasOwnProperty("status")) + object.status = options.enums === String ? $root.vtctldata.SchemaMigration.Status[message.status] === undefined ? message.status : $root.vtctldata.SchemaMigration.Status[message.status] : message.status; + if (message.log_path != null && message.hasOwnProperty("log_path")) + object.log_path = message.log_path; + if (message.artifacts != null && message.hasOwnProperty("artifacts")) + object.artifacts = message.artifacts; + if (message.retries != null && message.hasOwnProperty("retries")) + if (typeof message.retries === "number") + object.retries = options.longs === String ? String(message.retries) : message.retries; + else + object.retries = options.longs === String ? $util.Long.prototype.toString.call(message.retries) : options.longs === Number ? new $util.LongBits(message.retries.low >>> 0, message.retries.high >>> 0).toNumber(true) : message.retries; + if (message.tablet != null && message.hasOwnProperty("tablet")) + object.tablet = $root.topodata.TabletAlias.toObject(message.tablet, options); + if (message.tablet_failure != null && message.hasOwnProperty("tablet_failure")) + object.tablet_failure = message.tablet_failure; + if (message.progress != null && message.hasOwnProperty("progress")) + object.progress = options.json && !isFinite(message.progress) ? String(message.progress) : message.progress; + if (message.migration_context != null && message.hasOwnProperty("migration_context")) + object.migration_context = message.migration_context; + if (message.ddl_action != null && message.hasOwnProperty("ddl_action")) + object.ddl_action = message.ddl_action; + if (message.message != null && message.hasOwnProperty("message")) + object.message = message.message; + if (message.eta_seconds != null && message.hasOwnProperty("eta_seconds")) + if (typeof message.eta_seconds === "number") + object.eta_seconds = options.longs === String ? String(message.eta_seconds) : message.eta_seconds; + else + object.eta_seconds = options.longs === String ? $util.Long.prototype.toString.call(message.eta_seconds) : options.longs === Number ? new $util.LongBits(message.eta_seconds.low >>> 0, message.eta_seconds.high >>> 0).toNumber() : message.eta_seconds; + if (message.rows_copied != null && message.hasOwnProperty("rows_copied")) + if (typeof message.rows_copied === "number") + object.rows_copied = options.longs === String ? String(message.rows_copied) : message.rows_copied; + else + object.rows_copied = options.longs === String ? $util.Long.prototype.toString.call(message.rows_copied) : options.longs === Number ? new $util.LongBits(message.rows_copied.low >>> 0, message.rows_copied.high >>> 0).toNumber(true) : message.rows_copied; + if (message.table_rows != null && message.hasOwnProperty("table_rows")) + if (typeof message.table_rows === "number") + object.table_rows = options.longs === String ? String(message.table_rows) : message.table_rows; + else + object.table_rows = options.longs === String ? $util.Long.prototype.toString.call(message.table_rows) : options.longs === Number ? new $util.LongBits(message.table_rows.low >>> 0, message.table_rows.high >>> 0).toNumber() : message.table_rows; + if (message.added_unique_keys != null && message.hasOwnProperty("added_unique_keys")) + object.added_unique_keys = message.added_unique_keys; + if (message.removed_unique_keys != null && message.hasOwnProperty("removed_unique_keys")) + object.removed_unique_keys = message.removed_unique_keys; + if (message.log_file != null && message.hasOwnProperty("log_file")) + object.log_file = message.log_file; + if (message.artifact_retention != null && message.hasOwnProperty("artifact_retention")) + object.artifact_retention = $root.vttime.Duration.toObject(message.artifact_retention, options); + if (message.postpone_completion != null && message.hasOwnProperty("postpone_completion")) + object.postpone_completion = message.postpone_completion; + if (message.removed_unique_key_names != null && message.hasOwnProperty("removed_unique_key_names")) + object.removed_unique_key_names = message.removed_unique_key_names; + if (message.dropped_no_default_column_names != null && message.hasOwnProperty("dropped_no_default_column_names")) + object.dropped_no_default_column_names = message.dropped_no_default_column_names; + if (message.expanded_column_names != null && message.hasOwnProperty("expanded_column_names")) + object.expanded_column_names = message.expanded_column_names; + if (message.revertible_notes != null && message.hasOwnProperty("revertible_notes")) + object.revertible_notes = message.revertible_notes; + if (message.allow_concurrent != null && message.hasOwnProperty("allow_concurrent")) + object.allow_concurrent = message.allow_concurrent; + if (message.reverted_uuid != null && message.hasOwnProperty("reverted_uuid")) + object.reverted_uuid = message.reverted_uuid; + if (message.is_view != null && message.hasOwnProperty("is_view")) + object.is_view = message.is_view; + if (message.ready_to_complete != null && message.hasOwnProperty("ready_to_complete")) + object.ready_to_complete = message.ready_to_complete; + if (message.vitess_liveness_indicator != null && message.hasOwnProperty("vitess_liveness_indicator")) + if (typeof message.vitess_liveness_indicator === "number") + object.vitess_liveness_indicator = options.longs === String ? String(message.vitess_liveness_indicator) : message.vitess_liveness_indicator; + else + object.vitess_liveness_indicator = options.longs === String ? $util.Long.prototype.toString.call(message.vitess_liveness_indicator) : options.longs === Number ? new $util.LongBits(message.vitess_liveness_indicator.low >>> 0, message.vitess_liveness_indicator.high >>> 0).toNumber() : message.vitess_liveness_indicator; + if (message.user_throttle_ratio != null && message.hasOwnProperty("user_throttle_ratio")) + object.user_throttle_ratio = options.json && !isFinite(message.user_throttle_ratio) ? String(message.user_throttle_ratio) : message.user_throttle_ratio; + if (message.special_plan != null && message.hasOwnProperty("special_plan")) + object.special_plan = message.special_plan; + if (message.last_throttled_at != null && message.hasOwnProperty("last_throttled_at")) + object.last_throttled_at = $root.vttime.Time.toObject(message.last_throttled_at, options); + if (message.component_throttled != null && message.hasOwnProperty("component_throttled")) + object.component_throttled = message.component_throttled; + if (message.cancelled_at != null && message.hasOwnProperty("cancelled_at")) + object.cancelled_at = $root.vttime.Time.toObject(message.cancelled_at, options); + if (message.postpone_launch != null && message.hasOwnProperty("postpone_launch")) + object.postpone_launch = message.postpone_launch; + if (message.stage != null && message.hasOwnProperty("stage")) + object.stage = message.stage; + if (message.cutover_attempts != null && message.hasOwnProperty("cutover_attempts")) + object.cutover_attempts = message.cutover_attempts; + if (message.is_immediate_operation != null && message.hasOwnProperty("is_immediate_operation")) + object.is_immediate_operation = message.is_immediate_operation; + if (message.reviewed_at != null && message.hasOwnProperty("reviewed_at")) + object.reviewed_at = $root.vttime.Time.toObject(message.reviewed_at, options); + if (message.ready_to_complete_at != null && message.hasOwnProperty("ready_to_complete_at")) + object.ready_to_complete_at = $root.vttime.Time.toObject(message.ready_to_complete_at, options); + return object; + }; + + /** + * Converts this SchemaMigration to JSON. + * @function toJSON + * @memberof vtctldata.SchemaMigration + * @instance + * @returns {Object.} JSON object + */ + SchemaMigration.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for SchemaMigration + * @function getTypeUrl + * @memberof vtctldata.SchemaMigration + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + SchemaMigration.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.SchemaMigration"; + }; + + /** + * Strategy enum. + * @name vtctldata.SchemaMigration.Strategy + * @enum {number} + * @property {number} VITESS=0 VITESS value + * @property {number} ONLINE=0 ONLINE value + * @property {number} GHOST=1 GHOST value + * @property {number} PTOSC=2 PTOSC value + * @property {number} DIRECT=3 DIRECT value + * @property {number} MYSQL=4 MYSQL value + */ + SchemaMigration.Strategy = (function() { + const valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "VITESS"] = 0; + values["ONLINE"] = 0; + values[valuesById[1] = "GHOST"] = 1; + values[valuesById[2] = "PTOSC"] = 2; + values[valuesById[3] = "DIRECT"] = 3; + values[valuesById[4] = "MYSQL"] = 4; + return values; + })(); + + /** + * Status enum. + * @name vtctldata.SchemaMigration.Status + * @enum {number} + * @property {number} UNKNOWN=0 UNKNOWN value + * @property {number} REQUESTED=1 REQUESTED value + * @property {number} CANCELLED=2 CANCELLED value + * @property {number} QUEUED=3 QUEUED value + * @property {number} READY=4 READY value + * @property {number} RUNNING=5 RUNNING value + * @property {number} COMPLETE=6 COMPLETE value + * @property {number} FAILED=7 FAILED value + */ + SchemaMigration.Status = (function() { + const valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "UNKNOWN"] = 0; + values[valuesById[1] = "REQUESTED"] = 1; + values[valuesById[2] = "CANCELLED"] = 2; + values[valuesById[3] = "QUEUED"] = 3; + values[valuesById[4] = "READY"] = 4; + values[valuesById[5] = "RUNNING"] = 5; + values[valuesById[6] = "COMPLETE"] = 6; + values[valuesById[7] = "FAILED"] = 7; + return values; + })(); + + return SchemaMigration; + })(); + + vtctldata.Shard = (function() { + + /** + * Properties of a Shard. + * @memberof vtctldata + * @interface IShard + * @property {string|null} [keyspace] Shard keyspace + * @property {string|null} [name] Shard name + * @property {topodata.IShard|null} [shard] Shard shard + */ + + /** + * Constructs a new Shard. + * @memberof vtctldata + * @classdesc Represents a Shard. + * @implements IShard + * @constructor + * @param {vtctldata.IShard=} [properties] Properties to set + */ + function Shard(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * Shard keyspace. + * @member {string} keyspace + * @memberof vtctldata.Shard + * @instance + */ + Shard.prototype.keyspace = ""; + + /** + * Shard name. + * @member {string} name + * @memberof vtctldata.Shard + * @instance + */ + Shard.prototype.name = ""; + + /** + * Shard shard. + * @member {topodata.IShard|null|undefined} shard + * @memberof vtctldata.Shard + * @instance + */ + Shard.prototype.shard = null; + + /** + * Creates a new Shard instance using the specified properties. + * @function create + * @memberof vtctldata.Shard + * @static + * @param {vtctldata.IShard=} [properties] Properties to set + * @returns {vtctldata.Shard} Shard instance + */ + Shard.create = function create(properties) { + return new Shard(properties); + }; + + /** + * Encodes the specified Shard message. Does not implicitly {@link vtctldata.Shard.verify|verify} messages. + * @function encode + * @memberof vtctldata.Shard + * @static + * @param {vtctldata.IShard} message Shard message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Shard.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.name != null && Object.hasOwnProperty.call(message, "name")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.name); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + $root.topodata.Shard.encode(message.shard, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified Shard message, length delimited. Does not implicitly {@link vtctldata.Shard.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.Shard + * @static + * @param {vtctldata.IShard} message Shard message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Shard.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a Shard message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.Shard + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.Shard} Shard + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Shard.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.Shard(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.keyspace = reader.string(); + break; + } + case 2: { + message.name = reader.string(); + break; + } + case 3: { + message.shard = $root.topodata.Shard.decode(reader, reader.uint32()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a Shard message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.Shard + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.Shard} Shard + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Shard.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a Shard message. + * @function verify + * @memberof vtctldata.Shard + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + Shard.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; + if (message.shard != null && message.hasOwnProperty("shard")) { + let error = $root.topodata.Shard.verify(message.shard); + if (error) + return "shard." + error; + } + return null; + }; + + /** + * Creates a Shard message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.Shard + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.Shard} Shard + */ + Shard.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.Shard) + return object; + let message = new $root.vtctldata.Shard(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.name != null) + message.name = String(object.name); + if (object.shard != null) { + if (typeof object.shard !== "object") + throw TypeError(".vtctldata.Shard.shard: object expected"); + message.shard = $root.topodata.Shard.fromObject(object.shard); + } + return message; + }; + + /** + * Creates a plain object from a Shard message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.Shard + * @static + * @param {vtctldata.Shard} message Shard + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + Shard.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.keyspace = ""; + object.name = ""; + object.shard = null; + } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = $root.topodata.Shard.toObject(message.shard, options); + return object; + }; + + /** + * Converts this Shard to JSON. + * @function toJSON + * @memberof vtctldata.Shard + * @instance + * @returns {Object.} JSON object + */ + Shard.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for Shard + * @function getTypeUrl + * @memberof vtctldata.Shard + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + Shard.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.Shard"; + }; + + return Shard; + })(); + + vtctldata.Workflow = (function() { + + /** + * Properties of a Workflow. + * @memberof vtctldata + * @interface IWorkflow + * @property {string|null} [name] Workflow name + * @property {vtctldata.Workflow.IReplicationLocation|null} [source] Workflow source + * @property {vtctldata.Workflow.IReplicationLocation|null} [target] Workflow target + * @property {number|Long|null} [max_v_replication_lag] Workflow max_v_replication_lag + * @property {Object.|null} [shard_streams] Workflow shard_streams + * @property {string|null} [workflow_type] Workflow workflow_type + * @property {string|null} [workflow_sub_type] Workflow workflow_sub_type + * @property {number|Long|null} [max_v_replication_transaction_lag] Workflow max_v_replication_transaction_lag + * @property {boolean|null} [defer_secondary_keys] Workflow defer_secondary_keys + */ + + /** + * Constructs a new Workflow. + * @memberof vtctldata + * @classdesc Represents a Workflow. + * @implements IWorkflow + * @constructor + * @param {vtctldata.IWorkflow=} [properties] Properties to set + */ + function Workflow(properties) { + this.shard_streams = {}; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * Workflow name. + * @member {string} name + * @memberof vtctldata.Workflow + * @instance + */ + Workflow.prototype.name = ""; + + /** + * Workflow source. + * @member {vtctldata.Workflow.IReplicationLocation|null|undefined} source + * @memberof vtctldata.Workflow + * @instance + */ + Workflow.prototype.source = null; + + /** + * Workflow target. + * @member {vtctldata.Workflow.IReplicationLocation|null|undefined} target + * @memberof vtctldata.Workflow + * @instance + */ + Workflow.prototype.target = null; + + /** + * Workflow max_v_replication_lag. + * @member {number|Long} max_v_replication_lag + * @memberof vtctldata.Workflow + * @instance + */ + Workflow.prototype.max_v_replication_lag = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * Workflow shard_streams. + * @member {Object.} shard_streams + * @memberof vtctldata.Workflow + * @instance + */ + Workflow.prototype.shard_streams = $util.emptyObject; + + /** + * Workflow workflow_type. + * @member {string} workflow_type + * @memberof vtctldata.Workflow + * @instance + */ + Workflow.prototype.workflow_type = ""; + + /** + * Workflow workflow_sub_type. + * @member {string} workflow_sub_type + * @memberof vtctldata.Workflow + * @instance + */ + Workflow.prototype.workflow_sub_type = ""; + + /** + * Workflow max_v_replication_transaction_lag. + * @member {number|Long} max_v_replication_transaction_lag + * @memberof vtctldata.Workflow + * @instance + */ + Workflow.prototype.max_v_replication_transaction_lag = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * Workflow defer_secondary_keys. + * @member {boolean} defer_secondary_keys + * @memberof vtctldata.Workflow + * @instance + */ + Workflow.prototype.defer_secondary_keys = false; + + /** + * Creates a new Workflow instance using the specified properties. + * @function create + * @memberof vtctldata.Workflow + * @static + * @param {vtctldata.IWorkflow=} [properties] Properties to set + * @returns {vtctldata.Workflow} Workflow instance + */ + Workflow.create = function create(properties) { + return new Workflow(properties); + }; + + /** + * Encodes the specified Workflow message. Does not implicitly {@link vtctldata.Workflow.verify|verify} messages. + * @function encode + * @memberof vtctldata.Workflow + * @static + * @param {vtctldata.IWorkflow} message Workflow message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Workflow.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.name != null && Object.hasOwnProperty.call(message, "name")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); + if (message.source != null && Object.hasOwnProperty.call(message, "source")) + $root.vtctldata.Workflow.ReplicationLocation.encode(message.source, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.target != null && Object.hasOwnProperty.call(message, "target")) + $root.vtctldata.Workflow.ReplicationLocation.encode(message.target, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.max_v_replication_lag != null && Object.hasOwnProperty.call(message, "max_v_replication_lag")) + writer.uint32(/* id 4, wireType 0 =*/32).int64(message.max_v_replication_lag); + if (message.shard_streams != null && Object.hasOwnProperty.call(message, "shard_streams")) + for (let keys = Object.keys(message.shard_streams), i = 0; i < keys.length; ++i) { + writer.uint32(/* id 5, wireType 2 =*/42).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); + $root.vtctldata.Workflow.ShardStream.encode(message.shard_streams[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); + } + if (message.workflow_type != null && Object.hasOwnProperty.call(message, "workflow_type")) + writer.uint32(/* id 6, wireType 2 =*/50).string(message.workflow_type); + if (message.workflow_sub_type != null && Object.hasOwnProperty.call(message, "workflow_sub_type")) + writer.uint32(/* id 7, wireType 2 =*/58).string(message.workflow_sub_type); + if (message.max_v_replication_transaction_lag != null && Object.hasOwnProperty.call(message, "max_v_replication_transaction_lag")) + writer.uint32(/* id 8, wireType 0 =*/64).int64(message.max_v_replication_transaction_lag); + if (message.defer_secondary_keys != null && Object.hasOwnProperty.call(message, "defer_secondary_keys")) + writer.uint32(/* id 9, wireType 0 =*/72).bool(message.defer_secondary_keys); + return writer; + }; + + /** + * Encodes the specified Workflow message, length delimited. Does not implicitly {@link vtctldata.Workflow.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.Workflow + * @static + * @param {vtctldata.IWorkflow} message Workflow message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Workflow.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a Workflow message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.Workflow + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.Workflow} Workflow + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Workflow.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.Workflow(), key, value; + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.name = reader.string(); + break; + } + case 2: { + message.source = $root.vtctldata.Workflow.ReplicationLocation.decode(reader, reader.uint32()); + break; + } + case 3: { + message.target = $root.vtctldata.Workflow.ReplicationLocation.decode(reader, reader.uint32()); + break; + } + case 4: { + message.max_v_replication_lag = reader.int64(); + break; + } + case 5: { + if (message.shard_streams === $util.emptyObject) + message.shard_streams = {}; + let end2 = reader.uint32() + reader.pos; + key = ""; + value = null; + while (reader.pos < end2) { + let tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = $root.vtctldata.Workflow.ShardStream.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.shard_streams[key] = value; + break; + } + case 6: { + message.workflow_type = reader.string(); + break; + } + case 7: { + message.workflow_sub_type = reader.string(); + break; + } + case 8: { + message.max_v_replication_transaction_lag = reader.int64(); + break; + } + case 9: { + message.defer_secondary_keys = reader.bool(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a Workflow message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.Workflow + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.Workflow} Workflow + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Workflow.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a Workflow message. + * @function verify + * @memberof vtctldata.Workflow + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + Workflow.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; + if (message.source != null && message.hasOwnProperty("source")) { + let error = $root.vtctldata.Workflow.ReplicationLocation.verify(message.source); + if (error) + return "source." + error; + } + if (message.target != null && message.hasOwnProperty("target")) { + let error = $root.vtctldata.Workflow.ReplicationLocation.verify(message.target); + if (error) + return "target." + error; + } + if (message.max_v_replication_lag != null && message.hasOwnProperty("max_v_replication_lag")) + if (!$util.isInteger(message.max_v_replication_lag) && !(message.max_v_replication_lag && $util.isInteger(message.max_v_replication_lag.low) && $util.isInteger(message.max_v_replication_lag.high))) + return "max_v_replication_lag: integer|Long expected"; + if (message.shard_streams != null && message.hasOwnProperty("shard_streams")) { + if (!$util.isObject(message.shard_streams)) + return "shard_streams: object expected"; + let key = Object.keys(message.shard_streams); + for (let i = 0; i < key.length; ++i) { + let error = $root.vtctldata.Workflow.ShardStream.verify(message.shard_streams[key[i]]); + if (error) + return "shard_streams." + error; + } + } + if (message.workflow_type != null && message.hasOwnProperty("workflow_type")) + if (!$util.isString(message.workflow_type)) + return "workflow_type: string expected"; + if (message.workflow_sub_type != null && message.hasOwnProperty("workflow_sub_type")) + if (!$util.isString(message.workflow_sub_type)) + return "workflow_sub_type: string expected"; + if (message.max_v_replication_transaction_lag != null && message.hasOwnProperty("max_v_replication_transaction_lag")) + if (!$util.isInteger(message.max_v_replication_transaction_lag) && !(message.max_v_replication_transaction_lag && $util.isInteger(message.max_v_replication_transaction_lag.low) && $util.isInteger(message.max_v_replication_transaction_lag.high))) + return "max_v_replication_transaction_lag: integer|Long expected"; + if (message.defer_secondary_keys != null && message.hasOwnProperty("defer_secondary_keys")) + if (typeof message.defer_secondary_keys !== "boolean") + return "defer_secondary_keys: boolean expected"; + return null; + }; + + /** + * Creates a Workflow message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.Workflow + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.Workflow} Workflow + */ + Workflow.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.Workflow) + return object; + let message = new $root.vtctldata.Workflow(); + if (object.name != null) + message.name = String(object.name); + if (object.source != null) { + if (typeof object.source !== "object") + throw TypeError(".vtctldata.Workflow.source: object expected"); + message.source = $root.vtctldata.Workflow.ReplicationLocation.fromObject(object.source); + } + if (object.target != null) { + if (typeof object.target !== "object") + throw TypeError(".vtctldata.Workflow.target: object expected"); + message.target = $root.vtctldata.Workflow.ReplicationLocation.fromObject(object.target); + } + if (object.max_v_replication_lag != null) + if ($util.Long) + (message.max_v_replication_lag = $util.Long.fromValue(object.max_v_replication_lag)).unsigned = false; + else if (typeof object.max_v_replication_lag === "string") + message.max_v_replication_lag = parseInt(object.max_v_replication_lag, 10); + else if (typeof object.max_v_replication_lag === "number") + message.max_v_replication_lag = object.max_v_replication_lag; + else if (typeof object.max_v_replication_lag === "object") + message.max_v_replication_lag = new $util.LongBits(object.max_v_replication_lag.low >>> 0, object.max_v_replication_lag.high >>> 0).toNumber(); + if (object.shard_streams) { + if (typeof object.shard_streams !== "object") + throw TypeError(".vtctldata.Workflow.shard_streams: object expected"); + message.shard_streams = {}; + for (let keys = Object.keys(object.shard_streams), i = 0; i < keys.length; ++i) { + if (typeof object.shard_streams[keys[i]] !== "object") + throw TypeError(".vtctldata.Workflow.shard_streams: object expected"); + message.shard_streams[keys[i]] = $root.vtctldata.Workflow.ShardStream.fromObject(object.shard_streams[keys[i]]); + } + } + if (object.workflow_type != null) + message.workflow_type = String(object.workflow_type); + if (object.workflow_sub_type != null) + message.workflow_sub_type = String(object.workflow_sub_type); + if (object.max_v_replication_transaction_lag != null) + if ($util.Long) + (message.max_v_replication_transaction_lag = $util.Long.fromValue(object.max_v_replication_transaction_lag)).unsigned = false; + else if (typeof object.max_v_replication_transaction_lag === "string") + message.max_v_replication_transaction_lag = parseInt(object.max_v_replication_transaction_lag, 10); + else if (typeof object.max_v_replication_transaction_lag === "number") + message.max_v_replication_transaction_lag = object.max_v_replication_transaction_lag; + else if (typeof object.max_v_replication_transaction_lag === "object") + message.max_v_replication_transaction_lag = new $util.LongBits(object.max_v_replication_transaction_lag.low >>> 0, object.max_v_replication_transaction_lag.high >>> 0).toNumber(); + if (object.defer_secondary_keys != null) + message.defer_secondary_keys = Boolean(object.defer_secondary_keys); + return message; + }; + + /** + * Creates a plain object from a Workflow message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.Workflow + * @static + * @param {vtctldata.Workflow} message Workflow + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + Workflow.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.objects || options.defaults) + object.shard_streams = {}; + if (options.defaults) { + object.name = ""; + object.source = null; + object.target = null; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.max_v_replication_lag = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.max_v_replication_lag = options.longs === String ? "0" : 0; + object.workflow_type = ""; + object.workflow_sub_type = ""; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.max_v_replication_transaction_lag = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.max_v_replication_transaction_lag = options.longs === String ? "0" : 0; + object.defer_secondary_keys = false; + } + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; + if (message.source != null && message.hasOwnProperty("source")) + object.source = $root.vtctldata.Workflow.ReplicationLocation.toObject(message.source, options); + if (message.target != null && message.hasOwnProperty("target")) + object.target = $root.vtctldata.Workflow.ReplicationLocation.toObject(message.target, options); + if (message.max_v_replication_lag != null && message.hasOwnProperty("max_v_replication_lag")) + if (typeof message.max_v_replication_lag === "number") + object.max_v_replication_lag = options.longs === String ? String(message.max_v_replication_lag) : message.max_v_replication_lag; + else + object.max_v_replication_lag = options.longs === String ? $util.Long.prototype.toString.call(message.max_v_replication_lag) : options.longs === Number ? new $util.LongBits(message.max_v_replication_lag.low >>> 0, message.max_v_replication_lag.high >>> 0).toNumber() : message.max_v_replication_lag; + let keys2; + if (message.shard_streams && (keys2 = Object.keys(message.shard_streams)).length) { + object.shard_streams = {}; + for (let j = 0; j < keys2.length; ++j) + object.shard_streams[keys2[j]] = $root.vtctldata.Workflow.ShardStream.toObject(message.shard_streams[keys2[j]], options); + } + if (message.workflow_type != null && message.hasOwnProperty("workflow_type")) + object.workflow_type = message.workflow_type; + if (message.workflow_sub_type != null && message.hasOwnProperty("workflow_sub_type")) + object.workflow_sub_type = message.workflow_sub_type; + if (message.max_v_replication_transaction_lag != null && message.hasOwnProperty("max_v_replication_transaction_lag")) + if (typeof message.max_v_replication_transaction_lag === "number") + object.max_v_replication_transaction_lag = options.longs === String ? String(message.max_v_replication_transaction_lag) : message.max_v_replication_transaction_lag; + else + object.max_v_replication_transaction_lag = options.longs === String ? $util.Long.prototype.toString.call(message.max_v_replication_transaction_lag) : options.longs === Number ? new $util.LongBits(message.max_v_replication_transaction_lag.low >>> 0, message.max_v_replication_transaction_lag.high >>> 0).toNumber() : message.max_v_replication_transaction_lag; + if (message.defer_secondary_keys != null && message.hasOwnProperty("defer_secondary_keys")) + object.defer_secondary_keys = message.defer_secondary_keys; + return object; + }; + + /** + * Converts this Workflow to JSON. + * @function toJSON + * @memberof vtctldata.Workflow + * @instance + * @returns {Object.} JSON object + */ + Workflow.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for Workflow + * @function getTypeUrl + * @memberof vtctldata.Workflow + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + Workflow.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.Workflow"; + }; + + Workflow.ReplicationLocation = (function() { + + /** + * Properties of a ReplicationLocation. + * @memberof vtctldata.Workflow + * @interface IReplicationLocation + * @property {string|null} [keyspace] ReplicationLocation keyspace + * @property {Array.|null} [shards] ReplicationLocation shards + */ + + /** + * Constructs a new ReplicationLocation. + * @memberof vtctldata.Workflow + * @classdesc Represents a ReplicationLocation. + * @implements IReplicationLocation + * @constructor + * @param {vtctldata.Workflow.IReplicationLocation=} [properties] Properties to set + */ + function ReplicationLocation(properties) { + this.shards = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ReplicationLocation keyspace. + * @member {string} keyspace + * @memberof vtctldata.Workflow.ReplicationLocation + * @instance + */ + ReplicationLocation.prototype.keyspace = ""; + + /** + * ReplicationLocation shards. + * @member {Array.} shards + * @memberof vtctldata.Workflow.ReplicationLocation + * @instance + */ + ReplicationLocation.prototype.shards = $util.emptyArray; + + /** + * Creates a new ReplicationLocation instance using the specified properties. + * @function create + * @memberof vtctldata.Workflow.ReplicationLocation + * @static + * @param {vtctldata.Workflow.IReplicationLocation=} [properties] Properties to set + * @returns {vtctldata.Workflow.ReplicationLocation} ReplicationLocation instance + */ + ReplicationLocation.create = function create(properties) { + return new ReplicationLocation(properties); + }; + + /** + * Encodes the specified ReplicationLocation message. Does not implicitly {@link vtctldata.Workflow.ReplicationLocation.verify|verify} messages. + * @function encode + * @memberof vtctldata.Workflow.ReplicationLocation + * @static + * @param {vtctldata.Workflow.IReplicationLocation} message ReplicationLocation message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ReplicationLocation.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.shards != null && message.shards.length) + for (let i = 0; i < message.shards.length; ++i) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.shards[i]); + return writer; + }; + + /** + * Encodes the specified ReplicationLocation message, length delimited. Does not implicitly {@link vtctldata.Workflow.ReplicationLocation.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.Workflow.ReplicationLocation + * @static + * @param {vtctldata.Workflow.IReplicationLocation} message ReplicationLocation message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ReplicationLocation.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a ReplicationLocation message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.Workflow.ReplicationLocation + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.Workflow.ReplicationLocation} ReplicationLocation + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ReplicationLocation.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.Workflow.ReplicationLocation(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.keyspace = reader.string(); + break; + } + case 2: { + if (!(message.shards && message.shards.length)) + message.shards = []; + message.shards.push(reader.string()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a ReplicationLocation message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.Workflow.ReplicationLocation + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.Workflow.ReplicationLocation} ReplicationLocation + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ReplicationLocation.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a ReplicationLocation message. + * @function verify + * @memberof vtctldata.Workflow.ReplicationLocation + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ReplicationLocation.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.shards != null && message.hasOwnProperty("shards")) { + if (!Array.isArray(message.shards)) + return "shards: array expected"; + for (let i = 0; i < message.shards.length; ++i) + if (!$util.isString(message.shards[i])) + return "shards: string[] expected"; + } + return null; + }; + + /** + * Creates a ReplicationLocation message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.Workflow.ReplicationLocation + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.Workflow.ReplicationLocation} ReplicationLocation + */ + ReplicationLocation.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.Workflow.ReplicationLocation) + return object; + let message = new $root.vtctldata.Workflow.ReplicationLocation(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.shards) { + if (!Array.isArray(object.shards)) + throw TypeError(".vtctldata.Workflow.ReplicationLocation.shards: array expected"); + message.shards = []; + for (let i = 0; i < object.shards.length; ++i) + message.shards[i] = String(object.shards[i]); + } + return message; + }; + + /** + * Creates a plain object from a ReplicationLocation message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.Workflow.ReplicationLocation + * @static + * @param {vtctldata.Workflow.ReplicationLocation} message ReplicationLocation + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ReplicationLocation.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.shards = []; + if (options.defaults) + object.keyspace = ""; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.shards && message.shards.length) { + object.shards = []; + for (let j = 0; j < message.shards.length; ++j) + object.shards[j] = message.shards[j]; + } + return object; + }; + + /** + * Converts this ReplicationLocation to JSON. + * @function toJSON + * @memberof vtctldata.Workflow.ReplicationLocation + * @instance + * @returns {Object.} JSON object + */ + ReplicationLocation.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ReplicationLocation + * @function getTypeUrl + * @memberof vtctldata.Workflow.ReplicationLocation + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ReplicationLocation.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.Workflow.ReplicationLocation"; + }; + + return ReplicationLocation; + })(); + + Workflow.ShardStream = (function() { + + /** + * Properties of a ShardStream. + * @memberof vtctldata.Workflow + * @interface IShardStream + * @property {Array.|null} [streams] ShardStream streams + * @property {Array.|null} [tablet_controls] ShardStream tablet_controls + * @property {boolean|null} [is_primary_serving] ShardStream is_primary_serving + */ + + /** + * Constructs a new ShardStream. + * @memberof vtctldata.Workflow + * @classdesc Represents a ShardStream. + * @implements IShardStream + * @constructor + * @param {vtctldata.Workflow.IShardStream=} [properties] Properties to set + */ + function ShardStream(properties) { + this.streams = []; + this.tablet_controls = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ShardStream streams. + * @member {Array.} streams + * @memberof vtctldata.Workflow.ShardStream + * @instance + */ + ShardStream.prototype.streams = $util.emptyArray; + + /** + * ShardStream tablet_controls. + * @member {Array.} tablet_controls + * @memberof vtctldata.Workflow.ShardStream + * @instance + */ + ShardStream.prototype.tablet_controls = $util.emptyArray; + + /** + * ShardStream is_primary_serving. + * @member {boolean} is_primary_serving + * @memberof vtctldata.Workflow.ShardStream + * @instance + */ + ShardStream.prototype.is_primary_serving = false; + + /** + * Creates a new ShardStream instance using the specified properties. + * @function create + * @memberof vtctldata.Workflow.ShardStream + * @static + * @param {vtctldata.Workflow.IShardStream=} [properties] Properties to set + * @returns {vtctldata.Workflow.ShardStream} ShardStream instance + */ + ShardStream.create = function create(properties) { + return new ShardStream(properties); + }; + + /** + * Encodes the specified ShardStream message. Does not implicitly {@link vtctldata.Workflow.ShardStream.verify|verify} messages. + * @function encode + * @memberof vtctldata.Workflow.ShardStream + * @static + * @param {vtctldata.Workflow.IShardStream} message ShardStream message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ShardStream.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.streams != null && message.streams.length) + for (let i = 0; i < message.streams.length; ++i) + $root.vtctldata.Workflow.Stream.encode(message.streams[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.tablet_controls != null && message.tablet_controls.length) + for (let i = 0; i < message.tablet_controls.length; ++i) + $root.topodata.Shard.TabletControl.encode(message.tablet_controls[i], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.is_primary_serving != null && Object.hasOwnProperty.call(message, "is_primary_serving")) + writer.uint32(/* id 3, wireType 0 =*/24).bool(message.is_primary_serving); + return writer; + }; + + /** + * Encodes the specified ShardStream message, length delimited. Does not implicitly {@link vtctldata.Workflow.ShardStream.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.Workflow.ShardStream + * @static + * @param {vtctldata.Workflow.IShardStream} message ShardStream message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ShardStream.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a ShardStream message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.Workflow.ShardStream + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.Workflow.ShardStream} ShardStream + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ShardStream.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.Workflow.ShardStream(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (!(message.streams && message.streams.length)) + message.streams = []; + message.streams.push($root.vtctldata.Workflow.Stream.decode(reader, reader.uint32())); + break; + } + case 2: { + if (!(message.tablet_controls && message.tablet_controls.length)) + message.tablet_controls = []; + message.tablet_controls.push($root.topodata.Shard.TabletControl.decode(reader, reader.uint32())); + break; + } + case 3: { + message.is_primary_serving = reader.bool(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a ShardStream message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.Workflow.ShardStream + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.Workflow.ShardStream} ShardStream + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ShardStream.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a ShardStream message. + * @function verify + * @memberof vtctldata.Workflow.ShardStream + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ShardStream.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.streams != null && message.hasOwnProperty("streams")) { + if (!Array.isArray(message.streams)) + return "streams: array expected"; + for (let i = 0; i < message.streams.length; ++i) { + let error = $root.vtctldata.Workflow.Stream.verify(message.streams[i]); + if (error) + return "streams." + error; + } + } + if (message.tablet_controls != null && message.hasOwnProperty("tablet_controls")) { + if (!Array.isArray(message.tablet_controls)) + return "tablet_controls: array expected"; + for (let i = 0; i < message.tablet_controls.length; ++i) { + let error = $root.topodata.Shard.TabletControl.verify(message.tablet_controls[i]); + if (error) + return "tablet_controls." + error; + } + } + if (message.is_primary_serving != null && message.hasOwnProperty("is_primary_serving")) + if (typeof message.is_primary_serving !== "boolean") + return "is_primary_serving: boolean expected"; + return null; + }; + + /** + * Creates a ShardStream message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.Workflow.ShardStream + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.Workflow.ShardStream} ShardStream + */ + ShardStream.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.Workflow.ShardStream) + return object; + let message = new $root.vtctldata.Workflow.ShardStream(); + if (object.streams) { + if (!Array.isArray(object.streams)) + throw TypeError(".vtctldata.Workflow.ShardStream.streams: array expected"); + message.streams = []; + for (let i = 0; i < object.streams.length; ++i) { + if (typeof object.streams[i] !== "object") + throw TypeError(".vtctldata.Workflow.ShardStream.streams: object expected"); + message.streams[i] = $root.vtctldata.Workflow.Stream.fromObject(object.streams[i]); + } + } + if (object.tablet_controls) { + if (!Array.isArray(object.tablet_controls)) + throw TypeError(".vtctldata.Workflow.ShardStream.tablet_controls: array expected"); + message.tablet_controls = []; + for (let i = 0; i < object.tablet_controls.length; ++i) { + if (typeof object.tablet_controls[i] !== "object") + throw TypeError(".vtctldata.Workflow.ShardStream.tablet_controls: object expected"); + message.tablet_controls[i] = $root.topodata.Shard.TabletControl.fromObject(object.tablet_controls[i]); + } + } + if (object.is_primary_serving != null) + message.is_primary_serving = Boolean(object.is_primary_serving); + return message; + }; + + /** + * Creates a plain object from a ShardStream message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.Workflow.ShardStream + * @static + * @param {vtctldata.Workflow.ShardStream} message ShardStream + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ShardStream.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) { + object.streams = []; + object.tablet_controls = []; + } + if (options.defaults) + object.is_primary_serving = false; + if (message.streams && message.streams.length) { + object.streams = []; + for (let j = 0; j < message.streams.length; ++j) + object.streams[j] = $root.vtctldata.Workflow.Stream.toObject(message.streams[j], options); + } + if (message.tablet_controls && message.tablet_controls.length) { + object.tablet_controls = []; + for (let j = 0; j < message.tablet_controls.length; ++j) + object.tablet_controls[j] = $root.topodata.Shard.TabletControl.toObject(message.tablet_controls[j], options); + } + if (message.is_primary_serving != null && message.hasOwnProperty("is_primary_serving")) + object.is_primary_serving = message.is_primary_serving; + return object; + }; + + /** + * Converts this ShardStream to JSON. + * @function toJSON + * @memberof vtctldata.Workflow.ShardStream + * @instance + * @returns {Object.} JSON object + */ + ShardStream.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ShardStream + * @function getTypeUrl + * @memberof vtctldata.Workflow.ShardStream + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ShardStream.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.Workflow.ShardStream"; + }; + + return ShardStream; + })(); + + Workflow.Stream = (function() { + + /** + * Properties of a Stream. + * @memberof vtctldata.Workflow + * @interface IStream + * @property {number|Long|null} [id] Stream id + * @property {string|null} [shard] Stream shard + * @property {topodata.ITabletAlias|null} [tablet] Stream tablet + * @property {binlogdata.IBinlogSource|null} [binlog_source] Stream binlog_source + * @property {string|null} [position] Stream position + * @property {string|null} [stop_position] Stream stop_position + * @property {string|null} [state] Stream state + * @property {string|null} [db_name] Stream db_name + * @property {vttime.ITime|null} [transaction_timestamp] Stream transaction_timestamp + * @property {vttime.ITime|null} [time_updated] Stream time_updated + * @property {string|null} [message] Stream message + * @property {Array.|null} [copy_states] Stream copy_states + * @property {Array.|null} [logs] Stream logs + * @property {string|null} [log_fetch_error] Stream log_fetch_error + * @property {Array.|null} [tags] Stream tags + * @property {number|Long|null} [rows_copied] Stream rows_copied + * @property {vtctldata.Workflow.Stream.IThrottlerStatus|null} [throttler_status] Stream throttler_status + */ + + /** + * Constructs a new Stream. + * @memberof vtctldata.Workflow + * @classdesc Represents a Stream. + * @implements IStream + * @constructor + * @param {vtctldata.Workflow.IStream=} [properties] Properties to set + */ + function Stream(properties) { + this.copy_states = []; + this.logs = []; + this.tags = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * Stream id. + * @member {number|Long} id + * @memberof vtctldata.Workflow.Stream + * @instance + */ + Stream.prototype.id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * Stream shard. + * @member {string} shard + * @memberof vtctldata.Workflow.Stream + * @instance + */ + Stream.prototype.shard = ""; + + /** + * Stream tablet. + * @member {topodata.ITabletAlias|null|undefined} tablet + * @memberof vtctldata.Workflow.Stream + * @instance + */ + Stream.prototype.tablet = null; + + /** + * Stream binlog_source. + * @member {binlogdata.IBinlogSource|null|undefined} binlog_source + * @memberof vtctldata.Workflow.Stream + * @instance + */ + Stream.prototype.binlog_source = null; + + /** + * Stream position. + * @member {string} position + * @memberof vtctldata.Workflow.Stream + * @instance + */ + Stream.prototype.position = ""; + + /** + * Stream stop_position. + * @member {string} stop_position + * @memberof vtctldata.Workflow.Stream + * @instance + */ + Stream.prototype.stop_position = ""; + + /** + * Stream state. + * @member {string} state + * @memberof vtctldata.Workflow.Stream + * @instance + */ + Stream.prototype.state = ""; + + /** + * Stream db_name. + * @member {string} db_name + * @memberof vtctldata.Workflow.Stream + * @instance + */ + Stream.prototype.db_name = ""; + + /** + * Stream transaction_timestamp. + * @member {vttime.ITime|null|undefined} transaction_timestamp + * @memberof vtctldata.Workflow.Stream + * @instance + */ + Stream.prototype.transaction_timestamp = null; + + /** + * Stream time_updated. + * @member {vttime.ITime|null|undefined} time_updated + * @memberof vtctldata.Workflow.Stream + * @instance + */ + Stream.prototype.time_updated = null; + + /** + * Stream message. + * @member {string} message + * @memberof vtctldata.Workflow.Stream + * @instance + */ + Stream.prototype.message = ""; + + /** + * Stream copy_states. + * @member {Array.} copy_states + * @memberof vtctldata.Workflow.Stream + * @instance + */ + Stream.prototype.copy_states = $util.emptyArray; + + /** + * Stream logs. + * @member {Array.} logs + * @memberof vtctldata.Workflow.Stream + * @instance + */ + Stream.prototype.logs = $util.emptyArray; + + /** + * Stream log_fetch_error. + * @member {string} log_fetch_error + * @memberof vtctldata.Workflow.Stream + * @instance + */ + Stream.prototype.log_fetch_error = ""; + + /** + * Stream tags. + * @member {Array.} tags + * @memberof vtctldata.Workflow.Stream + * @instance + */ + Stream.prototype.tags = $util.emptyArray; + + /** + * Stream rows_copied. + * @member {number|Long} rows_copied + * @memberof vtctldata.Workflow.Stream + * @instance + */ + Stream.prototype.rows_copied = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * Stream throttler_status. + * @member {vtctldata.Workflow.Stream.IThrottlerStatus|null|undefined} throttler_status + * @memberof vtctldata.Workflow.Stream + * @instance + */ + Stream.prototype.throttler_status = null; + + /** + * Creates a new Stream instance using the specified properties. + * @function create + * @memberof vtctldata.Workflow.Stream + * @static + * @param {vtctldata.Workflow.IStream=} [properties] Properties to set + * @returns {vtctldata.Workflow.Stream} Stream instance + */ + Stream.create = function create(properties) { + return new Stream(properties); + }; + + /** + * Encodes the specified Stream message. Does not implicitly {@link vtctldata.Workflow.Stream.verify|verify} messages. + * @function encode + * @memberof vtctldata.Workflow.Stream + * @static + * @param {vtctldata.Workflow.IStream} message Stream message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Stream.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.id != null && Object.hasOwnProperty.call(message, "id")) + writer.uint32(/* id 1, wireType 0 =*/8).int64(message.id); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); + if (message.tablet != null && Object.hasOwnProperty.call(message, "tablet")) + $root.topodata.TabletAlias.encode(message.tablet, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.binlog_source != null && Object.hasOwnProperty.call(message, "binlog_source")) + $root.binlogdata.BinlogSource.encode(message.binlog_source, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.position != null && Object.hasOwnProperty.call(message, "position")) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.position); + if (message.stop_position != null && Object.hasOwnProperty.call(message, "stop_position")) + writer.uint32(/* id 6, wireType 2 =*/50).string(message.stop_position); + if (message.state != null && Object.hasOwnProperty.call(message, "state")) + writer.uint32(/* id 7, wireType 2 =*/58).string(message.state); + if (message.db_name != null && Object.hasOwnProperty.call(message, "db_name")) + writer.uint32(/* id 8, wireType 2 =*/66).string(message.db_name); + if (message.transaction_timestamp != null && Object.hasOwnProperty.call(message, "transaction_timestamp")) + $root.vttime.Time.encode(message.transaction_timestamp, writer.uint32(/* id 9, wireType 2 =*/74).fork()).ldelim(); + if (message.time_updated != null && Object.hasOwnProperty.call(message, "time_updated")) + $root.vttime.Time.encode(message.time_updated, writer.uint32(/* id 10, wireType 2 =*/82).fork()).ldelim(); + if (message.message != null && Object.hasOwnProperty.call(message, "message")) + writer.uint32(/* id 11, wireType 2 =*/90).string(message.message); + if (message.copy_states != null && message.copy_states.length) + for (let i = 0; i < message.copy_states.length; ++i) + $root.vtctldata.Workflow.Stream.CopyState.encode(message.copy_states[i], writer.uint32(/* id 12, wireType 2 =*/98).fork()).ldelim(); + if (message.logs != null && message.logs.length) + for (let i = 0; i < message.logs.length; ++i) + $root.vtctldata.Workflow.Stream.Log.encode(message.logs[i], writer.uint32(/* id 13, wireType 2 =*/106).fork()).ldelim(); + if (message.log_fetch_error != null && Object.hasOwnProperty.call(message, "log_fetch_error")) + writer.uint32(/* id 14, wireType 2 =*/114).string(message.log_fetch_error); + if (message.tags != null && message.tags.length) + for (let i = 0; i < message.tags.length; ++i) + writer.uint32(/* id 15, wireType 2 =*/122).string(message.tags[i]); + if (message.rows_copied != null && Object.hasOwnProperty.call(message, "rows_copied")) + writer.uint32(/* id 16, wireType 0 =*/128).int64(message.rows_copied); + if (message.throttler_status != null && Object.hasOwnProperty.call(message, "throttler_status")) + $root.vtctldata.Workflow.Stream.ThrottlerStatus.encode(message.throttler_status, writer.uint32(/* id 17, wireType 2 =*/138).fork()).ldelim(); + return writer; + }; + + /** * Encodes the specified Stream message, length delimited. Does not implicitly {@link vtctldata.Workflow.Stream.verify|verify} messages. * @function encodeDelimited * @memberof vtctldata.Workflow.Stream * @static - * @param {vtctldata.Workflow.IStream} message Stream message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer + * @param {vtctldata.Workflow.IStream} message Stream message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Stream.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a Stream message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.Workflow.Stream + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.Workflow.Stream} Stream + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Stream.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.Workflow.Stream(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.id = reader.int64(); + break; + } + case 2: { + message.shard = reader.string(); + break; + } + case 3: { + message.tablet = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + break; + } + case 4: { + message.binlog_source = $root.binlogdata.BinlogSource.decode(reader, reader.uint32()); + break; + } + case 5: { + message.position = reader.string(); + break; + } + case 6: { + message.stop_position = reader.string(); + break; + } + case 7: { + message.state = reader.string(); + break; + } + case 8: { + message.db_name = reader.string(); + break; + } + case 9: { + message.transaction_timestamp = $root.vttime.Time.decode(reader, reader.uint32()); + break; + } + case 10: { + message.time_updated = $root.vttime.Time.decode(reader, reader.uint32()); + break; + } + case 11: { + message.message = reader.string(); + break; + } + case 12: { + if (!(message.copy_states && message.copy_states.length)) + message.copy_states = []; + message.copy_states.push($root.vtctldata.Workflow.Stream.CopyState.decode(reader, reader.uint32())); + break; + } + case 13: { + if (!(message.logs && message.logs.length)) + message.logs = []; + message.logs.push($root.vtctldata.Workflow.Stream.Log.decode(reader, reader.uint32())); + break; + } + case 14: { + message.log_fetch_error = reader.string(); + break; + } + case 15: { + if (!(message.tags && message.tags.length)) + message.tags = []; + message.tags.push(reader.string()); + break; + } + case 16: { + message.rows_copied = reader.int64(); + break; + } + case 17: { + message.throttler_status = $root.vtctldata.Workflow.Stream.ThrottlerStatus.decode(reader, reader.uint32()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a Stream message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.Workflow.Stream + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.Workflow.Stream} Stream + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Stream.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a Stream message. + * @function verify + * @memberof vtctldata.Workflow.Stream + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + Stream.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.id != null && message.hasOwnProperty("id")) + if (!$util.isInteger(message.id) && !(message.id && $util.isInteger(message.id.low) && $util.isInteger(message.id.high))) + return "id: integer|Long expected"; + if (message.shard != null && message.hasOwnProperty("shard")) + if (!$util.isString(message.shard)) + return "shard: string expected"; + if (message.tablet != null && message.hasOwnProperty("tablet")) { + let error = $root.topodata.TabletAlias.verify(message.tablet); + if (error) + return "tablet." + error; + } + if (message.binlog_source != null && message.hasOwnProperty("binlog_source")) { + let error = $root.binlogdata.BinlogSource.verify(message.binlog_source); + if (error) + return "binlog_source." + error; + } + if (message.position != null && message.hasOwnProperty("position")) + if (!$util.isString(message.position)) + return "position: string expected"; + if (message.stop_position != null && message.hasOwnProperty("stop_position")) + if (!$util.isString(message.stop_position)) + return "stop_position: string expected"; + if (message.state != null && message.hasOwnProperty("state")) + if (!$util.isString(message.state)) + return "state: string expected"; + if (message.db_name != null && message.hasOwnProperty("db_name")) + if (!$util.isString(message.db_name)) + return "db_name: string expected"; + if (message.transaction_timestamp != null && message.hasOwnProperty("transaction_timestamp")) { + let error = $root.vttime.Time.verify(message.transaction_timestamp); + if (error) + return "transaction_timestamp." + error; + } + if (message.time_updated != null && message.hasOwnProperty("time_updated")) { + let error = $root.vttime.Time.verify(message.time_updated); + if (error) + return "time_updated." + error; + } + if (message.message != null && message.hasOwnProperty("message")) + if (!$util.isString(message.message)) + return "message: string expected"; + if (message.copy_states != null && message.hasOwnProperty("copy_states")) { + if (!Array.isArray(message.copy_states)) + return "copy_states: array expected"; + for (let i = 0; i < message.copy_states.length; ++i) { + let error = $root.vtctldata.Workflow.Stream.CopyState.verify(message.copy_states[i]); + if (error) + return "copy_states." + error; + } + } + if (message.logs != null && message.hasOwnProperty("logs")) { + if (!Array.isArray(message.logs)) + return "logs: array expected"; + for (let i = 0; i < message.logs.length; ++i) { + let error = $root.vtctldata.Workflow.Stream.Log.verify(message.logs[i]); + if (error) + return "logs." + error; + } + } + if (message.log_fetch_error != null && message.hasOwnProperty("log_fetch_error")) + if (!$util.isString(message.log_fetch_error)) + return "log_fetch_error: string expected"; + if (message.tags != null && message.hasOwnProperty("tags")) { + if (!Array.isArray(message.tags)) + return "tags: array expected"; + for (let i = 0; i < message.tags.length; ++i) + if (!$util.isString(message.tags[i])) + return "tags: string[] expected"; + } + if (message.rows_copied != null && message.hasOwnProperty("rows_copied")) + if (!$util.isInteger(message.rows_copied) && !(message.rows_copied && $util.isInteger(message.rows_copied.low) && $util.isInteger(message.rows_copied.high))) + return "rows_copied: integer|Long expected"; + if (message.throttler_status != null && message.hasOwnProperty("throttler_status")) { + let error = $root.vtctldata.Workflow.Stream.ThrottlerStatus.verify(message.throttler_status); + if (error) + return "throttler_status." + error; + } + return null; + }; + + /** + * Creates a Stream message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.Workflow.Stream + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.Workflow.Stream} Stream */ - Stream.encodeDelimited = function encodeDelimited(message, writer) { - return this.encode(message, writer).ldelim(); + Stream.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.Workflow.Stream) + return object; + let message = new $root.vtctldata.Workflow.Stream(); + if (object.id != null) + if ($util.Long) + (message.id = $util.Long.fromValue(object.id)).unsigned = false; + else if (typeof object.id === "string") + message.id = parseInt(object.id, 10); + else if (typeof object.id === "number") + message.id = object.id; + else if (typeof object.id === "object") + message.id = new $util.LongBits(object.id.low >>> 0, object.id.high >>> 0).toNumber(); + if (object.shard != null) + message.shard = String(object.shard); + if (object.tablet != null) { + if (typeof object.tablet !== "object") + throw TypeError(".vtctldata.Workflow.Stream.tablet: object expected"); + message.tablet = $root.topodata.TabletAlias.fromObject(object.tablet); + } + if (object.binlog_source != null) { + if (typeof object.binlog_source !== "object") + throw TypeError(".vtctldata.Workflow.Stream.binlog_source: object expected"); + message.binlog_source = $root.binlogdata.BinlogSource.fromObject(object.binlog_source); + } + if (object.position != null) + message.position = String(object.position); + if (object.stop_position != null) + message.stop_position = String(object.stop_position); + if (object.state != null) + message.state = String(object.state); + if (object.db_name != null) + message.db_name = String(object.db_name); + if (object.transaction_timestamp != null) { + if (typeof object.transaction_timestamp !== "object") + throw TypeError(".vtctldata.Workflow.Stream.transaction_timestamp: object expected"); + message.transaction_timestamp = $root.vttime.Time.fromObject(object.transaction_timestamp); + } + if (object.time_updated != null) { + if (typeof object.time_updated !== "object") + throw TypeError(".vtctldata.Workflow.Stream.time_updated: object expected"); + message.time_updated = $root.vttime.Time.fromObject(object.time_updated); + } + if (object.message != null) + message.message = String(object.message); + if (object.copy_states) { + if (!Array.isArray(object.copy_states)) + throw TypeError(".vtctldata.Workflow.Stream.copy_states: array expected"); + message.copy_states = []; + for (let i = 0; i < object.copy_states.length; ++i) { + if (typeof object.copy_states[i] !== "object") + throw TypeError(".vtctldata.Workflow.Stream.copy_states: object expected"); + message.copy_states[i] = $root.vtctldata.Workflow.Stream.CopyState.fromObject(object.copy_states[i]); + } + } + if (object.logs) { + if (!Array.isArray(object.logs)) + throw TypeError(".vtctldata.Workflow.Stream.logs: array expected"); + message.logs = []; + for (let i = 0; i < object.logs.length; ++i) { + if (typeof object.logs[i] !== "object") + throw TypeError(".vtctldata.Workflow.Stream.logs: object expected"); + message.logs[i] = $root.vtctldata.Workflow.Stream.Log.fromObject(object.logs[i]); + } + } + if (object.log_fetch_error != null) + message.log_fetch_error = String(object.log_fetch_error); + if (object.tags) { + if (!Array.isArray(object.tags)) + throw TypeError(".vtctldata.Workflow.Stream.tags: array expected"); + message.tags = []; + for (let i = 0; i < object.tags.length; ++i) + message.tags[i] = String(object.tags[i]); + } + if (object.rows_copied != null) + if ($util.Long) + (message.rows_copied = $util.Long.fromValue(object.rows_copied)).unsigned = false; + else if (typeof object.rows_copied === "string") + message.rows_copied = parseInt(object.rows_copied, 10); + else if (typeof object.rows_copied === "number") + message.rows_copied = object.rows_copied; + else if (typeof object.rows_copied === "object") + message.rows_copied = new $util.LongBits(object.rows_copied.low >>> 0, object.rows_copied.high >>> 0).toNumber(); + if (object.throttler_status != null) { + if (typeof object.throttler_status !== "object") + throw TypeError(".vtctldata.Workflow.Stream.throttler_status: object expected"); + message.throttler_status = $root.vtctldata.Workflow.Stream.ThrottlerStatus.fromObject(object.throttler_status); + } + return message; + }; + + /** + * Creates a plain object from a Stream message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.Workflow.Stream + * @static + * @param {vtctldata.Workflow.Stream} message Stream + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + Stream.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) { + object.copy_states = []; + object.logs = []; + object.tags = []; + } + if (options.defaults) { + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.id = options.longs === String ? "0" : 0; + object.shard = ""; + object.tablet = null; + object.binlog_source = null; + object.position = ""; + object.stop_position = ""; + object.state = ""; + object.db_name = ""; + object.transaction_timestamp = null; + object.time_updated = null; + object.message = ""; + object.log_fetch_error = ""; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.rows_copied = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.rows_copied = options.longs === String ? "0" : 0; + object.throttler_status = null; + } + if (message.id != null && message.hasOwnProperty("id")) + if (typeof message.id === "number") + object.id = options.longs === String ? String(message.id) : message.id; + else + object.id = options.longs === String ? $util.Long.prototype.toString.call(message.id) : options.longs === Number ? new $util.LongBits(message.id.low >>> 0, message.id.high >>> 0).toNumber() : message.id; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = message.shard; + if (message.tablet != null && message.hasOwnProperty("tablet")) + object.tablet = $root.topodata.TabletAlias.toObject(message.tablet, options); + if (message.binlog_source != null && message.hasOwnProperty("binlog_source")) + object.binlog_source = $root.binlogdata.BinlogSource.toObject(message.binlog_source, options); + if (message.position != null && message.hasOwnProperty("position")) + object.position = message.position; + if (message.stop_position != null && message.hasOwnProperty("stop_position")) + object.stop_position = message.stop_position; + if (message.state != null && message.hasOwnProperty("state")) + object.state = message.state; + if (message.db_name != null && message.hasOwnProperty("db_name")) + object.db_name = message.db_name; + if (message.transaction_timestamp != null && message.hasOwnProperty("transaction_timestamp")) + object.transaction_timestamp = $root.vttime.Time.toObject(message.transaction_timestamp, options); + if (message.time_updated != null && message.hasOwnProperty("time_updated")) + object.time_updated = $root.vttime.Time.toObject(message.time_updated, options); + if (message.message != null && message.hasOwnProperty("message")) + object.message = message.message; + if (message.copy_states && message.copy_states.length) { + object.copy_states = []; + for (let j = 0; j < message.copy_states.length; ++j) + object.copy_states[j] = $root.vtctldata.Workflow.Stream.CopyState.toObject(message.copy_states[j], options); + } + if (message.logs && message.logs.length) { + object.logs = []; + for (let j = 0; j < message.logs.length; ++j) + object.logs[j] = $root.vtctldata.Workflow.Stream.Log.toObject(message.logs[j], options); + } + if (message.log_fetch_error != null && message.hasOwnProperty("log_fetch_error")) + object.log_fetch_error = message.log_fetch_error; + if (message.tags && message.tags.length) { + object.tags = []; + for (let j = 0; j < message.tags.length; ++j) + object.tags[j] = message.tags[j]; + } + if (message.rows_copied != null && message.hasOwnProperty("rows_copied")) + if (typeof message.rows_copied === "number") + object.rows_copied = options.longs === String ? String(message.rows_copied) : message.rows_copied; + else + object.rows_copied = options.longs === String ? $util.Long.prototype.toString.call(message.rows_copied) : options.longs === Number ? new $util.LongBits(message.rows_copied.low >>> 0, message.rows_copied.high >>> 0).toNumber() : message.rows_copied; + if (message.throttler_status != null && message.hasOwnProperty("throttler_status")) + object.throttler_status = $root.vtctldata.Workflow.Stream.ThrottlerStatus.toObject(message.throttler_status, options); + return object; + }; + + /** + * Converts this Stream to JSON. + * @function toJSON + * @memberof vtctldata.Workflow.Stream + * @instance + * @returns {Object.} JSON object + */ + Stream.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for Stream + * @function getTypeUrl + * @memberof vtctldata.Workflow.Stream + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + Stream.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.Workflow.Stream"; }; - /** - * Decodes a Stream message from the specified reader or buffer. - * @function decode - * @memberof vtctldata.Workflow.Stream - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.Workflow.Stream} Stream - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - Stream.decode = function decode(reader, length) { - if (!(reader instanceof $Reader)) - reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.Workflow.Stream(); - while (reader.pos < end) { - let tag = reader.uint32(); - switch (tag >>> 3) { - case 1: { - message.id = reader.int64(); - break; - } - case 2: { - message.shard = reader.string(); - break; - } - case 3: { - message.tablet = $root.topodata.TabletAlias.decode(reader, reader.uint32()); - break; - } - case 4: { - message.binlog_source = $root.binlogdata.BinlogSource.decode(reader, reader.uint32()); - break; - } - case 5: { - message.position = reader.string(); - break; - } - case 6: { - message.stop_position = reader.string(); - break; - } - case 7: { - message.state = reader.string(); - break; - } - case 8: { - message.db_name = reader.string(); - break; - } - case 9: { - message.transaction_timestamp = $root.vttime.Time.decode(reader, reader.uint32()); - break; - } - case 10: { - message.time_updated = $root.vttime.Time.decode(reader, reader.uint32()); - break; - } - case 11: { - message.message = reader.string(); - break; - } - case 12: { - if (!(message.copy_states && message.copy_states.length)) - message.copy_states = []; - message.copy_states.push($root.vtctldata.Workflow.Stream.CopyState.decode(reader, reader.uint32())); - break; - } - case 13: { - if (!(message.logs && message.logs.length)) - message.logs = []; - message.logs.push($root.vtctldata.Workflow.Stream.Log.decode(reader, reader.uint32())); - break; - } - case 14: { - message.log_fetch_error = reader.string(); - break; - } - case 15: { - if (!(message.tags && message.tags.length)) - message.tags = []; - message.tags.push(reader.string()); - break; - } - default: - reader.skipType(tag & 7); + Stream.CopyState = (function() { + + /** + * Properties of a CopyState. + * @memberof vtctldata.Workflow.Stream + * @interface ICopyState + * @property {string|null} [table] CopyState table + * @property {string|null} [last_pk] CopyState last_pk + */ + + /** + * Constructs a new CopyState. + * @memberof vtctldata.Workflow.Stream + * @classdesc Represents a CopyState. + * @implements ICopyState + * @constructor + * @param {vtctldata.Workflow.Stream.ICopyState=} [properties] Properties to set + */ + function CopyState(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * CopyState table. + * @member {string} table + * @memberof vtctldata.Workflow.Stream.CopyState + * @instance + */ + CopyState.prototype.table = ""; + + /** + * CopyState last_pk. + * @member {string} last_pk + * @memberof vtctldata.Workflow.Stream.CopyState + * @instance + */ + CopyState.prototype.last_pk = ""; + + /** + * Creates a new CopyState instance using the specified properties. + * @function create + * @memberof vtctldata.Workflow.Stream.CopyState + * @static + * @param {vtctldata.Workflow.Stream.ICopyState=} [properties] Properties to set + * @returns {vtctldata.Workflow.Stream.CopyState} CopyState instance + */ + CopyState.create = function create(properties) { + return new CopyState(properties); + }; + + /** + * Encodes the specified CopyState message. Does not implicitly {@link vtctldata.Workflow.Stream.CopyState.verify|verify} messages. + * @function encode + * @memberof vtctldata.Workflow.Stream.CopyState + * @static + * @param {vtctldata.Workflow.Stream.ICopyState} message CopyState message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + CopyState.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.table != null && Object.hasOwnProperty.call(message, "table")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.table); + if (message.last_pk != null && Object.hasOwnProperty.call(message, "last_pk")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.last_pk); + return writer; + }; + + /** + * Encodes the specified CopyState message, length delimited. Does not implicitly {@link vtctldata.Workflow.Stream.CopyState.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.Workflow.Stream.CopyState + * @static + * @param {vtctldata.Workflow.Stream.ICopyState} message CopyState message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + CopyState.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a CopyState message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.Workflow.Stream.CopyState + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.Workflow.Stream.CopyState} CopyState + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + CopyState.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.Workflow.Stream.CopyState(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.table = reader.string(); + break; + } + case 2: { + message.last_pk = reader.string(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a CopyState message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.Workflow.Stream.CopyState + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.Workflow.Stream.CopyState} CopyState + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + CopyState.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a CopyState message. + * @function verify + * @memberof vtctldata.Workflow.Stream.CopyState + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + CopyState.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.table != null && message.hasOwnProperty("table")) + if (!$util.isString(message.table)) + return "table: string expected"; + if (message.last_pk != null && message.hasOwnProperty("last_pk")) + if (!$util.isString(message.last_pk)) + return "last_pk: string expected"; + return null; + }; + + /** + * Creates a CopyState message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.Workflow.Stream.CopyState + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.Workflow.Stream.CopyState} CopyState + */ + CopyState.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.Workflow.Stream.CopyState) + return object; + let message = new $root.vtctldata.Workflow.Stream.CopyState(); + if (object.table != null) + message.table = String(object.table); + if (object.last_pk != null) + message.last_pk = String(object.last_pk); + return message; + }; + + /** + * Creates a plain object from a CopyState message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.Workflow.Stream.CopyState + * @static + * @param {vtctldata.Workflow.Stream.CopyState} message CopyState + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + CopyState.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.table = ""; + object.last_pk = ""; + } + if (message.table != null && message.hasOwnProperty("table")) + object.table = message.table; + if (message.last_pk != null && message.hasOwnProperty("last_pk")) + object.last_pk = message.last_pk; + return object; + }; + + /** + * Converts this CopyState to JSON. + * @function toJSON + * @memberof vtctldata.Workflow.Stream.CopyState + * @instance + * @returns {Object.} JSON object + */ + CopyState.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for CopyState + * @function getTypeUrl + * @memberof vtctldata.Workflow.Stream.CopyState + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + CopyState.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.Workflow.Stream.CopyState"; + }; + + return CopyState; + })(); + + Stream.Log = (function() { + + /** + * Properties of a Log. + * @memberof vtctldata.Workflow.Stream + * @interface ILog + * @property {number|Long|null} [id] Log id + * @property {number|Long|null} [stream_id] Log stream_id + * @property {string|null} [type] Log type + * @property {string|null} [state] Log state + * @property {vttime.ITime|null} [created_at] Log created_at + * @property {vttime.ITime|null} [updated_at] Log updated_at + * @property {string|null} [message] Log message + * @property {number|Long|null} [count] Log count + */ + + /** + * Constructs a new Log. + * @memberof vtctldata.Workflow.Stream + * @classdesc Represents a Log. + * @implements ILog + * @constructor + * @param {vtctldata.Workflow.Stream.ILog=} [properties] Properties to set + */ + function Log(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * Log id. + * @member {number|Long} id + * @memberof vtctldata.Workflow.Stream.Log + * @instance + */ + Log.prototype.id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * Log stream_id. + * @member {number|Long} stream_id + * @memberof vtctldata.Workflow.Stream.Log + * @instance + */ + Log.prototype.stream_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * Log type. + * @member {string} type + * @memberof vtctldata.Workflow.Stream.Log + * @instance + */ + Log.prototype.type = ""; + + /** + * Log state. + * @member {string} state + * @memberof vtctldata.Workflow.Stream.Log + * @instance + */ + Log.prototype.state = ""; + + /** + * Log created_at. + * @member {vttime.ITime|null|undefined} created_at + * @memberof vtctldata.Workflow.Stream.Log + * @instance + */ + Log.prototype.created_at = null; + + /** + * Log updated_at. + * @member {vttime.ITime|null|undefined} updated_at + * @memberof vtctldata.Workflow.Stream.Log + * @instance + */ + Log.prototype.updated_at = null; + + /** + * Log message. + * @member {string} message + * @memberof vtctldata.Workflow.Stream.Log + * @instance + */ + Log.prototype.message = ""; + + /** + * Log count. + * @member {number|Long} count + * @memberof vtctldata.Workflow.Stream.Log + * @instance + */ + Log.prototype.count = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * Creates a new Log instance using the specified properties. + * @function create + * @memberof vtctldata.Workflow.Stream.Log + * @static + * @param {vtctldata.Workflow.Stream.ILog=} [properties] Properties to set + * @returns {vtctldata.Workflow.Stream.Log} Log instance + */ + Log.create = function create(properties) { + return new Log(properties); + }; + + /** + * Encodes the specified Log message. Does not implicitly {@link vtctldata.Workflow.Stream.Log.verify|verify} messages. + * @function encode + * @memberof vtctldata.Workflow.Stream.Log + * @static + * @param {vtctldata.Workflow.Stream.ILog} message Log message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Log.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.id != null && Object.hasOwnProperty.call(message, "id")) + writer.uint32(/* id 1, wireType 0 =*/8).int64(message.id); + if (message.stream_id != null && Object.hasOwnProperty.call(message, "stream_id")) + writer.uint32(/* id 2, wireType 0 =*/16).int64(message.stream_id); + if (message.type != null && Object.hasOwnProperty.call(message, "type")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.type); + if (message.state != null && Object.hasOwnProperty.call(message, "state")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.state); + if (message.created_at != null && Object.hasOwnProperty.call(message, "created_at")) + $root.vttime.Time.encode(message.created_at, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); + if (message.updated_at != null && Object.hasOwnProperty.call(message, "updated_at")) + $root.vttime.Time.encode(message.updated_at, writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); + if (message.message != null && Object.hasOwnProperty.call(message, "message")) + writer.uint32(/* id 7, wireType 2 =*/58).string(message.message); + if (message.count != null && Object.hasOwnProperty.call(message, "count")) + writer.uint32(/* id 8, wireType 0 =*/64).int64(message.count); + return writer; + }; + + /** + * Encodes the specified Log message, length delimited. Does not implicitly {@link vtctldata.Workflow.Stream.Log.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.Workflow.Stream.Log + * @static + * @param {vtctldata.Workflow.Stream.ILog} message Log message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Log.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a Log message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.Workflow.Stream.Log + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.Workflow.Stream.Log} Log + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Log.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.Workflow.Stream.Log(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.id = reader.int64(); + break; + } + case 2: { + message.stream_id = reader.int64(); + break; + } + case 3: { + message.type = reader.string(); + break; + } + case 4: { + message.state = reader.string(); + break; + } + case 5: { + message.created_at = $root.vttime.Time.decode(reader, reader.uint32()); + break; + } + case 6: { + message.updated_at = $root.vttime.Time.decode(reader, reader.uint32()); + break; + } + case 7: { + message.message = reader.string(); + break; + } + case 8: { + message.count = reader.int64(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a Log message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.Workflow.Stream.Log + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.Workflow.Stream.Log} Log + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Log.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a Log message. + * @function verify + * @memberof vtctldata.Workflow.Stream.Log + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + Log.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.id != null && message.hasOwnProperty("id")) + if (!$util.isInteger(message.id) && !(message.id && $util.isInteger(message.id.low) && $util.isInteger(message.id.high))) + return "id: integer|Long expected"; + if (message.stream_id != null && message.hasOwnProperty("stream_id")) + if (!$util.isInteger(message.stream_id) && !(message.stream_id && $util.isInteger(message.stream_id.low) && $util.isInteger(message.stream_id.high))) + return "stream_id: integer|Long expected"; + if (message.type != null && message.hasOwnProperty("type")) + if (!$util.isString(message.type)) + return "type: string expected"; + if (message.state != null && message.hasOwnProperty("state")) + if (!$util.isString(message.state)) + return "state: string expected"; + if (message.created_at != null && message.hasOwnProperty("created_at")) { + let error = $root.vttime.Time.verify(message.created_at); + if (error) + return "created_at." + error; + } + if (message.updated_at != null && message.hasOwnProperty("updated_at")) { + let error = $root.vttime.Time.verify(message.updated_at); + if (error) + return "updated_at." + error; + } + if (message.message != null && message.hasOwnProperty("message")) + if (!$util.isString(message.message)) + return "message: string expected"; + if (message.count != null && message.hasOwnProperty("count")) + if (!$util.isInteger(message.count) && !(message.count && $util.isInteger(message.count.low) && $util.isInteger(message.count.high))) + return "count: integer|Long expected"; + return null; + }; + + /** + * Creates a Log message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.Workflow.Stream.Log + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.Workflow.Stream.Log} Log + */ + Log.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.Workflow.Stream.Log) + return object; + let message = new $root.vtctldata.Workflow.Stream.Log(); + if (object.id != null) + if ($util.Long) + (message.id = $util.Long.fromValue(object.id)).unsigned = false; + else if (typeof object.id === "string") + message.id = parseInt(object.id, 10); + else if (typeof object.id === "number") + message.id = object.id; + else if (typeof object.id === "object") + message.id = new $util.LongBits(object.id.low >>> 0, object.id.high >>> 0).toNumber(); + if (object.stream_id != null) + if ($util.Long) + (message.stream_id = $util.Long.fromValue(object.stream_id)).unsigned = false; + else if (typeof object.stream_id === "string") + message.stream_id = parseInt(object.stream_id, 10); + else if (typeof object.stream_id === "number") + message.stream_id = object.stream_id; + else if (typeof object.stream_id === "object") + message.stream_id = new $util.LongBits(object.stream_id.low >>> 0, object.stream_id.high >>> 0).toNumber(); + if (object.type != null) + message.type = String(object.type); + if (object.state != null) + message.state = String(object.state); + if (object.created_at != null) { + if (typeof object.created_at !== "object") + throw TypeError(".vtctldata.Workflow.Stream.Log.created_at: object expected"); + message.created_at = $root.vttime.Time.fromObject(object.created_at); + } + if (object.updated_at != null) { + if (typeof object.updated_at !== "object") + throw TypeError(".vtctldata.Workflow.Stream.Log.updated_at: object expected"); + message.updated_at = $root.vttime.Time.fromObject(object.updated_at); + } + if (object.message != null) + message.message = String(object.message); + if (object.count != null) + if ($util.Long) + (message.count = $util.Long.fromValue(object.count)).unsigned = false; + else if (typeof object.count === "string") + message.count = parseInt(object.count, 10); + else if (typeof object.count === "number") + message.count = object.count; + else if (typeof object.count === "object") + message.count = new $util.LongBits(object.count.low >>> 0, object.count.high >>> 0).toNumber(); + return message; + }; + + /** + * Creates a plain object from a Log message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.Workflow.Stream.Log + * @static + * @param {vtctldata.Workflow.Stream.Log} message Log + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + Log.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.id = options.longs === String ? "0" : 0; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.stream_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.stream_id = options.longs === String ? "0" : 0; + object.type = ""; + object.state = ""; + object.created_at = null; + object.updated_at = null; + object.message = ""; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.count = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.count = options.longs === String ? "0" : 0; + } + if (message.id != null && message.hasOwnProperty("id")) + if (typeof message.id === "number") + object.id = options.longs === String ? String(message.id) : message.id; + else + object.id = options.longs === String ? $util.Long.prototype.toString.call(message.id) : options.longs === Number ? new $util.LongBits(message.id.low >>> 0, message.id.high >>> 0).toNumber() : message.id; + if (message.stream_id != null && message.hasOwnProperty("stream_id")) + if (typeof message.stream_id === "number") + object.stream_id = options.longs === String ? String(message.stream_id) : message.stream_id; + else + object.stream_id = options.longs === String ? $util.Long.prototype.toString.call(message.stream_id) : options.longs === Number ? new $util.LongBits(message.stream_id.low >>> 0, message.stream_id.high >>> 0).toNumber() : message.stream_id; + if (message.type != null && message.hasOwnProperty("type")) + object.type = message.type; + if (message.state != null && message.hasOwnProperty("state")) + object.state = message.state; + if (message.created_at != null && message.hasOwnProperty("created_at")) + object.created_at = $root.vttime.Time.toObject(message.created_at, options); + if (message.updated_at != null && message.hasOwnProperty("updated_at")) + object.updated_at = $root.vttime.Time.toObject(message.updated_at, options); + if (message.message != null && message.hasOwnProperty("message")) + object.message = message.message; + if (message.count != null && message.hasOwnProperty("count")) + if (typeof message.count === "number") + object.count = options.longs === String ? String(message.count) : message.count; + else + object.count = options.longs === String ? $util.Long.prototype.toString.call(message.count) : options.longs === Number ? new $util.LongBits(message.count.low >>> 0, message.count.high >>> 0).toNumber() : message.count; + return object; + }; + + /** + * Converts this Log to JSON. + * @function toJSON + * @memberof vtctldata.Workflow.Stream.Log + * @instance + * @returns {Object.} JSON object + */ + Log.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for Log + * @function getTypeUrl + * @memberof vtctldata.Workflow.Stream.Log + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + Log.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.Workflow.Stream.Log"; + }; + + return Log; + })(); + + Stream.ThrottlerStatus = (function() { + + /** + * Properties of a ThrottlerStatus. + * @memberof vtctldata.Workflow.Stream + * @interface IThrottlerStatus + * @property {string|null} [component_throttled] ThrottlerStatus component_throttled + * @property {vttime.ITime|null} [time_throttled] ThrottlerStatus time_throttled + */ + + /** + * Constructs a new ThrottlerStatus. + * @memberof vtctldata.Workflow.Stream + * @classdesc Represents a ThrottlerStatus. + * @implements IThrottlerStatus + * @constructor + * @param {vtctldata.Workflow.Stream.IThrottlerStatus=} [properties] Properties to set + */ + function ThrottlerStatus(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ThrottlerStatus component_throttled. + * @member {string} component_throttled + * @memberof vtctldata.Workflow.Stream.ThrottlerStatus + * @instance + */ + ThrottlerStatus.prototype.component_throttled = ""; + + /** + * ThrottlerStatus time_throttled. + * @member {vttime.ITime|null|undefined} time_throttled + * @memberof vtctldata.Workflow.Stream.ThrottlerStatus + * @instance + */ + ThrottlerStatus.prototype.time_throttled = null; + + /** + * Creates a new ThrottlerStatus instance using the specified properties. + * @function create + * @memberof vtctldata.Workflow.Stream.ThrottlerStatus + * @static + * @param {vtctldata.Workflow.Stream.IThrottlerStatus=} [properties] Properties to set + * @returns {vtctldata.Workflow.Stream.ThrottlerStatus} ThrottlerStatus instance + */ + ThrottlerStatus.create = function create(properties) { + return new ThrottlerStatus(properties); + }; + + /** + * Encodes the specified ThrottlerStatus message. Does not implicitly {@link vtctldata.Workflow.Stream.ThrottlerStatus.verify|verify} messages. + * @function encode + * @memberof vtctldata.Workflow.Stream.ThrottlerStatus + * @static + * @param {vtctldata.Workflow.Stream.IThrottlerStatus} message ThrottlerStatus message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ThrottlerStatus.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.component_throttled != null && Object.hasOwnProperty.call(message, "component_throttled")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.component_throttled); + if (message.time_throttled != null && Object.hasOwnProperty.call(message, "time_throttled")) + $root.vttime.Time.encode(message.time_throttled, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified ThrottlerStatus message, length delimited. Does not implicitly {@link vtctldata.Workflow.Stream.ThrottlerStatus.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.Workflow.Stream.ThrottlerStatus + * @static + * @param {vtctldata.Workflow.Stream.IThrottlerStatus} message ThrottlerStatus message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ThrottlerStatus.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a ThrottlerStatus message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.Workflow.Stream.ThrottlerStatus + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.Workflow.Stream.ThrottlerStatus} ThrottlerStatus + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ThrottlerStatus.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.Workflow.Stream.ThrottlerStatus(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.component_throttled = reader.string(); + break; + } + case 2: { + message.time_throttled = $root.vttime.Time.decode(reader, reader.uint32()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a ThrottlerStatus message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.Workflow.Stream.ThrottlerStatus + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.Workflow.Stream.ThrottlerStatus} ThrottlerStatus + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ThrottlerStatus.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a ThrottlerStatus message. + * @function verify + * @memberof vtctldata.Workflow.Stream.ThrottlerStatus + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ThrottlerStatus.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.component_throttled != null && message.hasOwnProperty("component_throttled")) + if (!$util.isString(message.component_throttled)) + return "component_throttled: string expected"; + if (message.time_throttled != null && message.hasOwnProperty("time_throttled")) { + let error = $root.vttime.Time.verify(message.time_throttled); + if (error) + return "time_throttled." + error; + } + return null; + }; + + /** + * Creates a ThrottlerStatus message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.Workflow.Stream.ThrottlerStatus + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.Workflow.Stream.ThrottlerStatus} ThrottlerStatus + */ + ThrottlerStatus.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.Workflow.Stream.ThrottlerStatus) + return object; + let message = new $root.vtctldata.Workflow.Stream.ThrottlerStatus(); + if (object.component_throttled != null) + message.component_throttled = String(object.component_throttled); + if (object.time_throttled != null) { + if (typeof object.time_throttled !== "object") + throw TypeError(".vtctldata.Workflow.Stream.ThrottlerStatus.time_throttled: object expected"); + message.time_throttled = $root.vttime.Time.fromObject(object.time_throttled); + } + return message; + }; + + /** + * Creates a plain object from a ThrottlerStatus message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.Workflow.Stream.ThrottlerStatus + * @static + * @param {vtctldata.Workflow.Stream.ThrottlerStatus} message ThrottlerStatus + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ThrottlerStatus.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.component_throttled = ""; + object.time_throttled = null; + } + if (message.component_throttled != null && message.hasOwnProperty("component_throttled")) + object.component_throttled = message.component_throttled; + if (message.time_throttled != null && message.hasOwnProperty("time_throttled")) + object.time_throttled = $root.vttime.Time.toObject(message.time_throttled, options); + return object; + }; + + /** + * Converts this ThrottlerStatus to JSON. + * @function toJSON + * @memberof vtctldata.Workflow.Stream.ThrottlerStatus + * @instance + * @returns {Object.} JSON object + */ + ThrottlerStatus.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ThrottlerStatus + * @function getTypeUrl + * @memberof vtctldata.Workflow.Stream.ThrottlerStatus + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ThrottlerStatus.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.Workflow.Stream.ThrottlerStatus"; + }; + + return ThrottlerStatus; + })(); + + return Stream; + })(); + + return Workflow; + })(); + + vtctldata.AddCellInfoRequest = (function() { + + /** + * Properties of an AddCellInfoRequest. + * @memberof vtctldata + * @interface IAddCellInfoRequest + * @property {string|null} [name] AddCellInfoRequest name + * @property {topodata.ICellInfo|null} [cell_info] AddCellInfoRequest cell_info + */ + + /** + * Constructs a new AddCellInfoRequest. + * @memberof vtctldata + * @classdesc Represents an AddCellInfoRequest. + * @implements IAddCellInfoRequest + * @constructor + * @param {vtctldata.IAddCellInfoRequest=} [properties] Properties to set + */ + function AddCellInfoRequest(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * AddCellInfoRequest name. + * @member {string} name + * @memberof vtctldata.AddCellInfoRequest + * @instance + */ + AddCellInfoRequest.prototype.name = ""; + + /** + * AddCellInfoRequest cell_info. + * @member {topodata.ICellInfo|null|undefined} cell_info + * @memberof vtctldata.AddCellInfoRequest + * @instance + */ + AddCellInfoRequest.prototype.cell_info = null; + + /** + * Creates a new AddCellInfoRequest instance using the specified properties. + * @function create + * @memberof vtctldata.AddCellInfoRequest + * @static + * @param {vtctldata.IAddCellInfoRequest=} [properties] Properties to set + * @returns {vtctldata.AddCellInfoRequest} AddCellInfoRequest instance + */ + AddCellInfoRequest.create = function create(properties) { + return new AddCellInfoRequest(properties); + }; + + /** + * Encodes the specified AddCellInfoRequest message. Does not implicitly {@link vtctldata.AddCellInfoRequest.verify|verify} messages. + * @function encode + * @memberof vtctldata.AddCellInfoRequest + * @static + * @param {vtctldata.IAddCellInfoRequest} message AddCellInfoRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + AddCellInfoRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.name != null && Object.hasOwnProperty.call(message, "name")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); + if (message.cell_info != null && Object.hasOwnProperty.call(message, "cell_info")) + $root.topodata.CellInfo.encode(message.cell_info, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified AddCellInfoRequest message, length delimited. Does not implicitly {@link vtctldata.AddCellInfoRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.AddCellInfoRequest + * @static + * @param {vtctldata.IAddCellInfoRequest} message AddCellInfoRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + AddCellInfoRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes an AddCellInfoRequest message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.AddCellInfoRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.AddCellInfoRequest} AddCellInfoRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + AddCellInfoRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.AddCellInfoRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.name = reader.string(); + break; + } + case 2: { + message.cell_info = $root.topodata.CellInfo.decode(reader, reader.uint32()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes an AddCellInfoRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.AddCellInfoRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.AddCellInfoRequest} AddCellInfoRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + AddCellInfoRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies an AddCellInfoRequest message. + * @function verify + * @memberof vtctldata.AddCellInfoRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + AddCellInfoRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; + if (message.cell_info != null && message.hasOwnProperty("cell_info")) { + let error = $root.topodata.CellInfo.verify(message.cell_info); + if (error) + return "cell_info." + error; + } + return null; + }; + + /** + * Creates an AddCellInfoRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.AddCellInfoRequest + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.AddCellInfoRequest} AddCellInfoRequest + */ + AddCellInfoRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.AddCellInfoRequest) + return object; + let message = new $root.vtctldata.AddCellInfoRequest(); + if (object.name != null) + message.name = String(object.name); + if (object.cell_info != null) { + if (typeof object.cell_info !== "object") + throw TypeError(".vtctldata.AddCellInfoRequest.cell_info: object expected"); + message.cell_info = $root.topodata.CellInfo.fromObject(object.cell_info); + } + return message; + }; + + /** + * Creates a plain object from an AddCellInfoRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.AddCellInfoRequest + * @static + * @param {vtctldata.AddCellInfoRequest} message AddCellInfoRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + AddCellInfoRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.name = ""; + object.cell_info = null; + } + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; + if (message.cell_info != null && message.hasOwnProperty("cell_info")) + object.cell_info = $root.topodata.CellInfo.toObject(message.cell_info, options); + return object; + }; + + /** + * Converts this AddCellInfoRequest to JSON. + * @function toJSON + * @memberof vtctldata.AddCellInfoRequest + * @instance + * @returns {Object.} JSON object + */ + AddCellInfoRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for AddCellInfoRequest + * @function getTypeUrl + * @memberof vtctldata.AddCellInfoRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + AddCellInfoRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.AddCellInfoRequest"; + }; + + return AddCellInfoRequest; + })(); + + vtctldata.AddCellInfoResponse = (function() { + + /** + * Properties of an AddCellInfoResponse. + * @memberof vtctldata + * @interface IAddCellInfoResponse + */ + + /** + * Constructs a new AddCellInfoResponse. + * @memberof vtctldata + * @classdesc Represents an AddCellInfoResponse. + * @implements IAddCellInfoResponse + * @constructor + * @param {vtctldata.IAddCellInfoResponse=} [properties] Properties to set + */ + function AddCellInfoResponse(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * Creates a new AddCellInfoResponse instance using the specified properties. + * @function create + * @memberof vtctldata.AddCellInfoResponse + * @static + * @param {vtctldata.IAddCellInfoResponse=} [properties] Properties to set + * @returns {vtctldata.AddCellInfoResponse} AddCellInfoResponse instance + */ + AddCellInfoResponse.create = function create(properties) { + return new AddCellInfoResponse(properties); + }; + + /** + * Encodes the specified AddCellInfoResponse message. Does not implicitly {@link vtctldata.AddCellInfoResponse.verify|verify} messages. + * @function encode + * @memberof vtctldata.AddCellInfoResponse + * @static + * @param {vtctldata.IAddCellInfoResponse} message AddCellInfoResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + AddCellInfoResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + return writer; + }; + + /** + * Encodes the specified AddCellInfoResponse message, length delimited. Does not implicitly {@link vtctldata.AddCellInfoResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.AddCellInfoResponse + * @static + * @param {vtctldata.IAddCellInfoResponse} message AddCellInfoResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + AddCellInfoResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes an AddCellInfoResponse message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.AddCellInfoResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.AddCellInfoResponse} AddCellInfoResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + AddCellInfoResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.AddCellInfoResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes an AddCellInfoResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.AddCellInfoResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.AddCellInfoResponse} AddCellInfoResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + AddCellInfoResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies an AddCellInfoResponse message. + * @function verify + * @memberof vtctldata.AddCellInfoResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + AddCellInfoResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + return null; + }; + + /** + * Creates an AddCellInfoResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.AddCellInfoResponse + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.AddCellInfoResponse} AddCellInfoResponse + */ + AddCellInfoResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.AddCellInfoResponse) + return object; + return new $root.vtctldata.AddCellInfoResponse(); + }; + + /** + * Creates a plain object from an AddCellInfoResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.AddCellInfoResponse + * @static + * @param {vtctldata.AddCellInfoResponse} message AddCellInfoResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + AddCellInfoResponse.toObject = function toObject() { + return {}; + }; + + /** + * Converts this AddCellInfoResponse to JSON. + * @function toJSON + * @memberof vtctldata.AddCellInfoResponse + * @instance + * @returns {Object.} JSON object + */ + AddCellInfoResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for AddCellInfoResponse + * @function getTypeUrl + * @memberof vtctldata.AddCellInfoResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + AddCellInfoResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.AddCellInfoResponse"; + }; + + return AddCellInfoResponse; + })(); + + vtctldata.AddCellsAliasRequest = (function() { + + /** + * Properties of an AddCellsAliasRequest. + * @memberof vtctldata + * @interface IAddCellsAliasRequest + * @property {string|null} [name] AddCellsAliasRequest name + * @property {Array.|null} [cells] AddCellsAliasRequest cells + */ + + /** + * Constructs a new AddCellsAliasRequest. + * @memberof vtctldata + * @classdesc Represents an AddCellsAliasRequest. + * @implements IAddCellsAliasRequest + * @constructor + * @param {vtctldata.IAddCellsAliasRequest=} [properties] Properties to set + */ + function AddCellsAliasRequest(properties) { + this.cells = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * AddCellsAliasRequest name. + * @member {string} name + * @memberof vtctldata.AddCellsAliasRequest + * @instance + */ + AddCellsAliasRequest.prototype.name = ""; + + /** + * AddCellsAliasRequest cells. + * @member {Array.} cells + * @memberof vtctldata.AddCellsAliasRequest + * @instance + */ + AddCellsAliasRequest.prototype.cells = $util.emptyArray; + + /** + * Creates a new AddCellsAliasRequest instance using the specified properties. + * @function create + * @memberof vtctldata.AddCellsAliasRequest + * @static + * @param {vtctldata.IAddCellsAliasRequest=} [properties] Properties to set + * @returns {vtctldata.AddCellsAliasRequest} AddCellsAliasRequest instance + */ + AddCellsAliasRequest.create = function create(properties) { + return new AddCellsAliasRequest(properties); + }; + + /** + * Encodes the specified AddCellsAliasRequest message. Does not implicitly {@link vtctldata.AddCellsAliasRequest.verify|verify} messages. + * @function encode + * @memberof vtctldata.AddCellsAliasRequest + * @static + * @param {vtctldata.IAddCellsAliasRequest} message AddCellsAliasRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + AddCellsAliasRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.name != null && Object.hasOwnProperty.call(message, "name")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); + if (message.cells != null && message.cells.length) + for (let i = 0; i < message.cells.length; ++i) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.cells[i]); + return writer; + }; + + /** + * Encodes the specified AddCellsAliasRequest message, length delimited. Does not implicitly {@link vtctldata.AddCellsAliasRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.AddCellsAliasRequest + * @static + * @param {vtctldata.IAddCellsAliasRequest} message AddCellsAliasRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + AddCellsAliasRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes an AddCellsAliasRequest message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.AddCellsAliasRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.AddCellsAliasRequest} AddCellsAliasRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + AddCellsAliasRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.AddCellsAliasRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.name = reader.string(); + break; + } + case 2: { + if (!(message.cells && message.cells.length)) + message.cells = []; + message.cells.push(reader.string()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes an AddCellsAliasRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.AddCellsAliasRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.AddCellsAliasRequest} AddCellsAliasRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + AddCellsAliasRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies an AddCellsAliasRequest message. + * @function verify + * @memberof vtctldata.AddCellsAliasRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + AddCellsAliasRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; + if (message.cells != null && message.hasOwnProperty("cells")) { + if (!Array.isArray(message.cells)) + return "cells: array expected"; + for (let i = 0; i < message.cells.length; ++i) + if (!$util.isString(message.cells[i])) + return "cells: string[] expected"; + } + return null; + }; + + /** + * Creates an AddCellsAliasRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.AddCellsAliasRequest + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.AddCellsAliasRequest} AddCellsAliasRequest + */ + AddCellsAliasRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.AddCellsAliasRequest) + return object; + let message = new $root.vtctldata.AddCellsAliasRequest(); + if (object.name != null) + message.name = String(object.name); + if (object.cells) { + if (!Array.isArray(object.cells)) + throw TypeError(".vtctldata.AddCellsAliasRequest.cells: array expected"); + message.cells = []; + for (let i = 0; i < object.cells.length; ++i) + message.cells[i] = String(object.cells[i]); + } + return message; + }; + + /** + * Creates a plain object from an AddCellsAliasRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.AddCellsAliasRequest + * @static + * @param {vtctldata.AddCellsAliasRequest} message AddCellsAliasRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + AddCellsAliasRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.cells = []; + if (options.defaults) + object.name = ""; + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; + if (message.cells && message.cells.length) { + object.cells = []; + for (let j = 0; j < message.cells.length; ++j) + object.cells[j] = message.cells[j]; + } + return object; + }; + + /** + * Converts this AddCellsAliasRequest to JSON. + * @function toJSON + * @memberof vtctldata.AddCellsAliasRequest + * @instance + * @returns {Object.} JSON object + */ + AddCellsAliasRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for AddCellsAliasRequest + * @function getTypeUrl + * @memberof vtctldata.AddCellsAliasRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + AddCellsAliasRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.AddCellsAliasRequest"; + }; + + return AddCellsAliasRequest; + })(); + + vtctldata.AddCellsAliasResponse = (function() { + + /** + * Properties of an AddCellsAliasResponse. + * @memberof vtctldata + * @interface IAddCellsAliasResponse + */ + + /** + * Constructs a new AddCellsAliasResponse. + * @memberof vtctldata + * @classdesc Represents an AddCellsAliasResponse. + * @implements IAddCellsAliasResponse + * @constructor + * @param {vtctldata.IAddCellsAliasResponse=} [properties] Properties to set + */ + function AddCellsAliasResponse(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * Creates a new AddCellsAliasResponse instance using the specified properties. + * @function create + * @memberof vtctldata.AddCellsAliasResponse + * @static + * @param {vtctldata.IAddCellsAliasResponse=} [properties] Properties to set + * @returns {vtctldata.AddCellsAliasResponse} AddCellsAliasResponse instance + */ + AddCellsAliasResponse.create = function create(properties) { + return new AddCellsAliasResponse(properties); + }; + + /** + * Encodes the specified AddCellsAliasResponse message. Does not implicitly {@link vtctldata.AddCellsAliasResponse.verify|verify} messages. + * @function encode + * @memberof vtctldata.AddCellsAliasResponse + * @static + * @param {vtctldata.IAddCellsAliasResponse} message AddCellsAliasResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + AddCellsAliasResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + return writer; + }; + + /** + * Encodes the specified AddCellsAliasResponse message, length delimited. Does not implicitly {@link vtctldata.AddCellsAliasResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.AddCellsAliasResponse + * @static + * @param {vtctldata.IAddCellsAliasResponse} message AddCellsAliasResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + AddCellsAliasResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes an AddCellsAliasResponse message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.AddCellsAliasResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.AddCellsAliasResponse} AddCellsAliasResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + AddCellsAliasResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.AddCellsAliasResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes an AddCellsAliasResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.AddCellsAliasResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.AddCellsAliasResponse} AddCellsAliasResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + AddCellsAliasResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies an AddCellsAliasResponse message. + * @function verify + * @memberof vtctldata.AddCellsAliasResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + AddCellsAliasResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + return null; + }; + + /** + * Creates an AddCellsAliasResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.AddCellsAliasResponse + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.AddCellsAliasResponse} AddCellsAliasResponse + */ + AddCellsAliasResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.AddCellsAliasResponse) + return object; + return new $root.vtctldata.AddCellsAliasResponse(); + }; + + /** + * Creates a plain object from an AddCellsAliasResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.AddCellsAliasResponse + * @static + * @param {vtctldata.AddCellsAliasResponse} message AddCellsAliasResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + AddCellsAliasResponse.toObject = function toObject() { + return {}; + }; + + /** + * Converts this AddCellsAliasResponse to JSON. + * @function toJSON + * @memberof vtctldata.AddCellsAliasResponse + * @instance + * @returns {Object.} JSON object + */ + AddCellsAliasResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for AddCellsAliasResponse + * @function getTypeUrl + * @memberof vtctldata.AddCellsAliasResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + AddCellsAliasResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.AddCellsAliasResponse"; + }; + + return AddCellsAliasResponse; + })(); + + vtctldata.ApplyRoutingRulesRequest = (function() { + + /** + * Properties of an ApplyRoutingRulesRequest. + * @memberof vtctldata + * @interface IApplyRoutingRulesRequest + * @property {vschema.IRoutingRules|null} [routing_rules] ApplyRoutingRulesRequest routing_rules + * @property {boolean|null} [skip_rebuild] ApplyRoutingRulesRequest skip_rebuild + * @property {Array.|null} [rebuild_cells] ApplyRoutingRulesRequest rebuild_cells + */ + + /** + * Constructs a new ApplyRoutingRulesRequest. + * @memberof vtctldata + * @classdesc Represents an ApplyRoutingRulesRequest. + * @implements IApplyRoutingRulesRequest + * @constructor + * @param {vtctldata.IApplyRoutingRulesRequest=} [properties] Properties to set + */ + function ApplyRoutingRulesRequest(properties) { + this.rebuild_cells = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ApplyRoutingRulesRequest routing_rules. + * @member {vschema.IRoutingRules|null|undefined} routing_rules + * @memberof vtctldata.ApplyRoutingRulesRequest + * @instance + */ + ApplyRoutingRulesRequest.prototype.routing_rules = null; + + /** + * ApplyRoutingRulesRequest skip_rebuild. + * @member {boolean} skip_rebuild + * @memberof vtctldata.ApplyRoutingRulesRequest + * @instance + */ + ApplyRoutingRulesRequest.prototype.skip_rebuild = false; + + /** + * ApplyRoutingRulesRequest rebuild_cells. + * @member {Array.} rebuild_cells + * @memberof vtctldata.ApplyRoutingRulesRequest + * @instance + */ + ApplyRoutingRulesRequest.prototype.rebuild_cells = $util.emptyArray; + + /** + * Creates a new ApplyRoutingRulesRequest instance using the specified properties. + * @function create + * @memberof vtctldata.ApplyRoutingRulesRequest + * @static + * @param {vtctldata.IApplyRoutingRulesRequest=} [properties] Properties to set + * @returns {vtctldata.ApplyRoutingRulesRequest} ApplyRoutingRulesRequest instance + */ + ApplyRoutingRulesRequest.create = function create(properties) { + return new ApplyRoutingRulesRequest(properties); + }; + + /** + * Encodes the specified ApplyRoutingRulesRequest message. Does not implicitly {@link vtctldata.ApplyRoutingRulesRequest.verify|verify} messages. + * @function encode + * @memberof vtctldata.ApplyRoutingRulesRequest + * @static + * @param {vtctldata.IApplyRoutingRulesRequest} message ApplyRoutingRulesRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ApplyRoutingRulesRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.routing_rules != null && Object.hasOwnProperty.call(message, "routing_rules")) + $root.vschema.RoutingRules.encode(message.routing_rules, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.skip_rebuild != null && Object.hasOwnProperty.call(message, "skip_rebuild")) + writer.uint32(/* id 2, wireType 0 =*/16).bool(message.skip_rebuild); + if (message.rebuild_cells != null && message.rebuild_cells.length) + for (let i = 0; i < message.rebuild_cells.length; ++i) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.rebuild_cells[i]); + return writer; + }; + + /** + * Encodes the specified ApplyRoutingRulesRequest message, length delimited. Does not implicitly {@link vtctldata.ApplyRoutingRulesRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.ApplyRoutingRulesRequest + * @static + * @param {vtctldata.IApplyRoutingRulesRequest} message ApplyRoutingRulesRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ApplyRoutingRulesRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes an ApplyRoutingRulesRequest message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.ApplyRoutingRulesRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.ApplyRoutingRulesRequest} ApplyRoutingRulesRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ApplyRoutingRulesRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ApplyRoutingRulesRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.routing_rules = $root.vschema.RoutingRules.decode(reader, reader.uint32()); + break; + } + case 2: { + message.skip_rebuild = reader.bool(); + break; + } + case 3: { + if (!(message.rebuild_cells && message.rebuild_cells.length)) + message.rebuild_cells = []; + message.rebuild_cells.push(reader.string()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes an ApplyRoutingRulesRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.ApplyRoutingRulesRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.ApplyRoutingRulesRequest} ApplyRoutingRulesRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ApplyRoutingRulesRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies an ApplyRoutingRulesRequest message. + * @function verify + * @memberof vtctldata.ApplyRoutingRulesRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ApplyRoutingRulesRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.routing_rules != null && message.hasOwnProperty("routing_rules")) { + let error = $root.vschema.RoutingRules.verify(message.routing_rules); + if (error) + return "routing_rules." + error; + } + if (message.skip_rebuild != null && message.hasOwnProperty("skip_rebuild")) + if (typeof message.skip_rebuild !== "boolean") + return "skip_rebuild: boolean expected"; + if (message.rebuild_cells != null && message.hasOwnProperty("rebuild_cells")) { + if (!Array.isArray(message.rebuild_cells)) + return "rebuild_cells: array expected"; + for (let i = 0; i < message.rebuild_cells.length; ++i) + if (!$util.isString(message.rebuild_cells[i])) + return "rebuild_cells: string[] expected"; + } + return null; + }; + + /** + * Creates an ApplyRoutingRulesRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.ApplyRoutingRulesRequest + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.ApplyRoutingRulesRequest} ApplyRoutingRulesRequest + */ + ApplyRoutingRulesRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ApplyRoutingRulesRequest) + return object; + let message = new $root.vtctldata.ApplyRoutingRulesRequest(); + if (object.routing_rules != null) { + if (typeof object.routing_rules !== "object") + throw TypeError(".vtctldata.ApplyRoutingRulesRequest.routing_rules: object expected"); + message.routing_rules = $root.vschema.RoutingRules.fromObject(object.routing_rules); + } + if (object.skip_rebuild != null) + message.skip_rebuild = Boolean(object.skip_rebuild); + if (object.rebuild_cells) { + if (!Array.isArray(object.rebuild_cells)) + throw TypeError(".vtctldata.ApplyRoutingRulesRequest.rebuild_cells: array expected"); + message.rebuild_cells = []; + for (let i = 0; i < object.rebuild_cells.length; ++i) + message.rebuild_cells[i] = String(object.rebuild_cells[i]); + } + return message; + }; + + /** + * Creates a plain object from an ApplyRoutingRulesRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.ApplyRoutingRulesRequest + * @static + * @param {vtctldata.ApplyRoutingRulesRequest} message ApplyRoutingRulesRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ApplyRoutingRulesRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.rebuild_cells = []; + if (options.defaults) { + object.routing_rules = null; + object.skip_rebuild = false; + } + if (message.routing_rules != null && message.hasOwnProperty("routing_rules")) + object.routing_rules = $root.vschema.RoutingRules.toObject(message.routing_rules, options); + if (message.skip_rebuild != null && message.hasOwnProperty("skip_rebuild")) + object.skip_rebuild = message.skip_rebuild; + if (message.rebuild_cells && message.rebuild_cells.length) { + object.rebuild_cells = []; + for (let j = 0; j < message.rebuild_cells.length; ++j) + object.rebuild_cells[j] = message.rebuild_cells[j]; + } + return object; + }; + + /** + * Converts this ApplyRoutingRulesRequest to JSON. + * @function toJSON + * @memberof vtctldata.ApplyRoutingRulesRequest + * @instance + * @returns {Object.} JSON object + */ + ApplyRoutingRulesRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ApplyRoutingRulesRequest + * @function getTypeUrl + * @memberof vtctldata.ApplyRoutingRulesRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ApplyRoutingRulesRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.ApplyRoutingRulesRequest"; + }; + + return ApplyRoutingRulesRequest; + })(); + + vtctldata.ApplyRoutingRulesResponse = (function() { + + /** + * Properties of an ApplyRoutingRulesResponse. + * @memberof vtctldata + * @interface IApplyRoutingRulesResponse + */ + + /** + * Constructs a new ApplyRoutingRulesResponse. + * @memberof vtctldata + * @classdesc Represents an ApplyRoutingRulesResponse. + * @implements IApplyRoutingRulesResponse + * @constructor + * @param {vtctldata.IApplyRoutingRulesResponse=} [properties] Properties to set + */ + function ApplyRoutingRulesResponse(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * Creates a new ApplyRoutingRulesResponse instance using the specified properties. + * @function create + * @memberof vtctldata.ApplyRoutingRulesResponse + * @static + * @param {vtctldata.IApplyRoutingRulesResponse=} [properties] Properties to set + * @returns {vtctldata.ApplyRoutingRulesResponse} ApplyRoutingRulesResponse instance + */ + ApplyRoutingRulesResponse.create = function create(properties) { + return new ApplyRoutingRulesResponse(properties); + }; + + /** + * Encodes the specified ApplyRoutingRulesResponse message. Does not implicitly {@link vtctldata.ApplyRoutingRulesResponse.verify|verify} messages. + * @function encode + * @memberof vtctldata.ApplyRoutingRulesResponse + * @static + * @param {vtctldata.IApplyRoutingRulesResponse} message ApplyRoutingRulesResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ApplyRoutingRulesResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + return writer; + }; + + /** + * Encodes the specified ApplyRoutingRulesResponse message, length delimited. Does not implicitly {@link vtctldata.ApplyRoutingRulesResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.ApplyRoutingRulesResponse + * @static + * @param {vtctldata.IApplyRoutingRulesResponse} message ApplyRoutingRulesResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ApplyRoutingRulesResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes an ApplyRoutingRulesResponse message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.ApplyRoutingRulesResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.ApplyRoutingRulesResponse} ApplyRoutingRulesResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ApplyRoutingRulesResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ApplyRoutingRulesResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes an ApplyRoutingRulesResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.ApplyRoutingRulesResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.ApplyRoutingRulesResponse} ApplyRoutingRulesResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ApplyRoutingRulesResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies an ApplyRoutingRulesResponse message. + * @function verify + * @memberof vtctldata.ApplyRoutingRulesResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ApplyRoutingRulesResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + return null; + }; + + /** + * Creates an ApplyRoutingRulesResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.ApplyRoutingRulesResponse + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.ApplyRoutingRulesResponse} ApplyRoutingRulesResponse + */ + ApplyRoutingRulesResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ApplyRoutingRulesResponse) + return object; + return new $root.vtctldata.ApplyRoutingRulesResponse(); + }; + + /** + * Creates a plain object from an ApplyRoutingRulesResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.ApplyRoutingRulesResponse + * @static + * @param {vtctldata.ApplyRoutingRulesResponse} message ApplyRoutingRulesResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ApplyRoutingRulesResponse.toObject = function toObject() { + return {}; + }; + + /** + * Converts this ApplyRoutingRulesResponse to JSON. + * @function toJSON + * @memberof vtctldata.ApplyRoutingRulesResponse + * @instance + * @returns {Object.} JSON object + */ + ApplyRoutingRulesResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ApplyRoutingRulesResponse + * @function getTypeUrl + * @memberof vtctldata.ApplyRoutingRulesResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ApplyRoutingRulesResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.ApplyRoutingRulesResponse"; + }; + + return ApplyRoutingRulesResponse; + })(); + + vtctldata.ApplyShardRoutingRulesRequest = (function() { + + /** + * Properties of an ApplyShardRoutingRulesRequest. + * @memberof vtctldata + * @interface IApplyShardRoutingRulesRequest + * @property {vschema.IShardRoutingRules|null} [shard_routing_rules] ApplyShardRoutingRulesRequest shard_routing_rules + * @property {boolean|null} [skip_rebuild] ApplyShardRoutingRulesRequest skip_rebuild + * @property {Array.|null} [rebuild_cells] ApplyShardRoutingRulesRequest rebuild_cells + */ + + /** + * Constructs a new ApplyShardRoutingRulesRequest. + * @memberof vtctldata + * @classdesc Represents an ApplyShardRoutingRulesRequest. + * @implements IApplyShardRoutingRulesRequest + * @constructor + * @param {vtctldata.IApplyShardRoutingRulesRequest=} [properties] Properties to set + */ + function ApplyShardRoutingRulesRequest(properties) { + this.rebuild_cells = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ApplyShardRoutingRulesRequest shard_routing_rules. + * @member {vschema.IShardRoutingRules|null|undefined} shard_routing_rules + * @memberof vtctldata.ApplyShardRoutingRulesRequest + * @instance + */ + ApplyShardRoutingRulesRequest.prototype.shard_routing_rules = null; + + /** + * ApplyShardRoutingRulesRequest skip_rebuild. + * @member {boolean} skip_rebuild + * @memberof vtctldata.ApplyShardRoutingRulesRequest + * @instance + */ + ApplyShardRoutingRulesRequest.prototype.skip_rebuild = false; + + /** + * ApplyShardRoutingRulesRequest rebuild_cells. + * @member {Array.} rebuild_cells + * @memberof vtctldata.ApplyShardRoutingRulesRequest + * @instance + */ + ApplyShardRoutingRulesRequest.prototype.rebuild_cells = $util.emptyArray; + + /** + * Creates a new ApplyShardRoutingRulesRequest instance using the specified properties. + * @function create + * @memberof vtctldata.ApplyShardRoutingRulesRequest + * @static + * @param {vtctldata.IApplyShardRoutingRulesRequest=} [properties] Properties to set + * @returns {vtctldata.ApplyShardRoutingRulesRequest} ApplyShardRoutingRulesRequest instance + */ + ApplyShardRoutingRulesRequest.create = function create(properties) { + return new ApplyShardRoutingRulesRequest(properties); + }; + + /** + * Encodes the specified ApplyShardRoutingRulesRequest message. Does not implicitly {@link vtctldata.ApplyShardRoutingRulesRequest.verify|verify} messages. + * @function encode + * @memberof vtctldata.ApplyShardRoutingRulesRequest + * @static + * @param {vtctldata.IApplyShardRoutingRulesRequest} message ApplyShardRoutingRulesRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ApplyShardRoutingRulesRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.shard_routing_rules != null && Object.hasOwnProperty.call(message, "shard_routing_rules")) + $root.vschema.ShardRoutingRules.encode(message.shard_routing_rules, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.skip_rebuild != null && Object.hasOwnProperty.call(message, "skip_rebuild")) + writer.uint32(/* id 2, wireType 0 =*/16).bool(message.skip_rebuild); + if (message.rebuild_cells != null && message.rebuild_cells.length) + for (let i = 0; i < message.rebuild_cells.length; ++i) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.rebuild_cells[i]); + return writer; + }; + + /** + * Encodes the specified ApplyShardRoutingRulesRequest message, length delimited. Does not implicitly {@link vtctldata.ApplyShardRoutingRulesRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.ApplyShardRoutingRulesRequest + * @static + * @param {vtctldata.IApplyShardRoutingRulesRequest} message ApplyShardRoutingRulesRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ApplyShardRoutingRulesRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes an ApplyShardRoutingRulesRequest message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.ApplyShardRoutingRulesRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.ApplyShardRoutingRulesRequest} ApplyShardRoutingRulesRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ApplyShardRoutingRulesRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ApplyShardRoutingRulesRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.shard_routing_rules = $root.vschema.ShardRoutingRules.decode(reader, reader.uint32()); + break; + } + case 2: { + message.skip_rebuild = reader.bool(); + break; + } + case 3: { + if (!(message.rebuild_cells && message.rebuild_cells.length)) + message.rebuild_cells = []; + message.rebuild_cells.push(reader.string()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes an ApplyShardRoutingRulesRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.ApplyShardRoutingRulesRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.ApplyShardRoutingRulesRequest} ApplyShardRoutingRulesRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ApplyShardRoutingRulesRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies an ApplyShardRoutingRulesRequest message. + * @function verify + * @memberof vtctldata.ApplyShardRoutingRulesRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ApplyShardRoutingRulesRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.shard_routing_rules != null && message.hasOwnProperty("shard_routing_rules")) { + let error = $root.vschema.ShardRoutingRules.verify(message.shard_routing_rules); + if (error) + return "shard_routing_rules." + error; + } + if (message.skip_rebuild != null && message.hasOwnProperty("skip_rebuild")) + if (typeof message.skip_rebuild !== "boolean") + return "skip_rebuild: boolean expected"; + if (message.rebuild_cells != null && message.hasOwnProperty("rebuild_cells")) { + if (!Array.isArray(message.rebuild_cells)) + return "rebuild_cells: array expected"; + for (let i = 0; i < message.rebuild_cells.length; ++i) + if (!$util.isString(message.rebuild_cells[i])) + return "rebuild_cells: string[] expected"; + } + return null; + }; + + /** + * Creates an ApplyShardRoutingRulesRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.ApplyShardRoutingRulesRequest + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.ApplyShardRoutingRulesRequest} ApplyShardRoutingRulesRequest + */ + ApplyShardRoutingRulesRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ApplyShardRoutingRulesRequest) + return object; + let message = new $root.vtctldata.ApplyShardRoutingRulesRequest(); + if (object.shard_routing_rules != null) { + if (typeof object.shard_routing_rules !== "object") + throw TypeError(".vtctldata.ApplyShardRoutingRulesRequest.shard_routing_rules: object expected"); + message.shard_routing_rules = $root.vschema.ShardRoutingRules.fromObject(object.shard_routing_rules); + } + if (object.skip_rebuild != null) + message.skip_rebuild = Boolean(object.skip_rebuild); + if (object.rebuild_cells) { + if (!Array.isArray(object.rebuild_cells)) + throw TypeError(".vtctldata.ApplyShardRoutingRulesRequest.rebuild_cells: array expected"); + message.rebuild_cells = []; + for (let i = 0; i < object.rebuild_cells.length; ++i) + message.rebuild_cells[i] = String(object.rebuild_cells[i]); + } + return message; + }; + + /** + * Creates a plain object from an ApplyShardRoutingRulesRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.ApplyShardRoutingRulesRequest + * @static + * @param {vtctldata.ApplyShardRoutingRulesRequest} message ApplyShardRoutingRulesRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ApplyShardRoutingRulesRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.rebuild_cells = []; + if (options.defaults) { + object.shard_routing_rules = null; + object.skip_rebuild = false; + } + if (message.shard_routing_rules != null && message.hasOwnProperty("shard_routing_rules")) + object.shard_routing_rules = $root.vschema.ShardRoutingRules.toObject(message.shard_routing_rules, options); + if (message.skip_rebuild != null && message.hasOwnProperty("skip_rebuild")) + object.skip_rebuild = message.skip_rebuild; + if (message.rebuild_cells && message.rebuild_cells.length) { + object.rebuild_cells = []; + for (let j = 0; j < message.rebuild_cells.length; ++j) + object.rebuild_cells[j] = message.rebuild_cells[j]; + } + return object; + }; + + /** + * Converts this ApplyShardRoutingRulesRequest to JSON. + * @function toJSON + * @memberof vtctldata.ApplyShardRoutingRulesRequest + * @instance + * @returns {Object.} JSON object + */ + ApplyShardRoutingRulesRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ApplyShardRoutingRulesRequest + * @function getTypeUrl + * @memberof vtctldata.ApplyShardRoutingRulesRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ApplyShardRoutingRulesRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.ApplyShardRoutingRulesRequest"; + }; + + return ApplyShardRoutingRulesRequest; + })(); + + vtctldata.ApplyShardRoutingRulesResponse = (function() { + + /** + * Properties of an ApplyShardRoutingRulesResponse. + * @memberof vtctldata + * @interface IApplyShardRoutingRulesResponse + */ + + /** + * Constructs a new ApplyShardRoutingRulesResponse. + * @memberof vtctldata + * @classdesc Represents an ApplyShardRoutingRulesResponse. + * @implements IApplyShardRoutingRulesResponse + * @constructor + * @param {vtctldata.IApplyShardRoutingRulesResponse=} [properties] Properties to set + */ + function ApplyShardRoutingRulesResponse(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * Creates a new ApplyShardRoutingRulesResponse instance using the specified properties. + * @function create + * @memberof vtctldata.ApplyShardRoutingRulesResponse + * @static + * @param {vtctldata.IApplyShardRoutingRulesResponse=} [properties] Properties to set + * @returns {vtctldata.ApplyShardRoutingRulesResponse} ApplyShardRoutingRulesResponse instance + */ + ApplyShardRoutingRulesResponse.create = function create(properties) { + return new ApplyShardRoutingRulesResponse(properties); + }; + + /** + * Encodes the specified ApplyShardRoutingRulesResponse message. Does not implicitly {@link vtctldata.ApplyShardRoutingRulesResponse.verify|verify} messages. + * @function encode + * @memberof vtctldata.ApplyShardRoutingRulesResponse + * @static + * @param {vtctldata.IApplyShardRoutingRulesResponse} message ApplyShardRoutingRulesResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ApplyShardRoutingRulesResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + return writer; + }; + + /** + * Encodes the specified ApplyShardRoutingRulesResponse message, length delimited. Does not implicitly {@link vtctldata.ApplyShardRoutingRulesResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.ApplyShardRoutingRulesResponse + * @static + * @param {vtctldata.IApplyShardRoutingRulesResponse} message ApplyShardRoutingRulesResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ApplyShardRoutingRulesResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes an ApplyShardRoutingRulesResponse message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.ApplyShardRoutingRulesResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.ApplyShardRoutingRulesResponse} ApplyShardRoutingRulesResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ApplyShardRoutingRulesResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ApplyShardRoutingRulesResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes an ApplyShardRoutingRulesResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.ApplyShardRoutingRulesResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.ApplyShardRoutingRulesResponse} ApplyShardRoutingRulesResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ApplyShardRoutingRulesResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies an ApplyShardRoutingRulesResponse message. + * @function verify + * @memberof vtctldata.ApplyShardRoutingRulesResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ApplyShardRoutingRulesResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + return null; + }; + + /** + * Creates an ApplyShardRoutingRulesResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.ApplyShardRoutingRulesResponse + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.ApplyShardRoutingRulesResponse} ApplyShardRoutingRulesResponse + */ + ApplyShardRoutingRulesResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ApplyShardRoutingRulesResponse) + return object; + return new $root.vtctldata.ApplyShardRoutingRulesResponse(); + }; + + /** + * Creates a plain object from an ApplyShardRoutingRulesResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.ApplyShardRoutingRulesResponse + * @static + * @param {vtctldata.ApplyShardRoutingRulesResponse} message ApplyShardRoutingRulesResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ApplyShardRoutingRulesResponse.toObject = function toObject() { + return {}; + }; + + /** + * Converts this ApplyShardRoutingRulesResponse to JSON. + * @function toJSON + * @memberof vtctldata.ApplyShardRoutingRulesResponse + * @instance + * @returns {Object.} JSON object + */ + ApplyShardRoutingRulesResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ApplyShardRoutingRulesResponse + * @function getTypeUrl + * @memberof vtctldata.ApplyShardRoutingRulesResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ApplyShardRoutingRulesResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.ApplyShardRoutingRulesResponse"; + }; + + return ApplyShardRoutingRulesResponse; + })(); + + vtctldata.ApplySchemaRequest = (function() { + + /** + * Properties of an ApplySchemaRequest. + * @memberof vtctldata + * @interface IApplySchemaRequest + * @property {string|null} [keyspace] ApplySchemaRequest keyspace + * @property {Array.|null} [sql] ApplySchemaRequest sql + * @property {string|null} [ddl_strategy] ApplySchemaRequest ddl_strategy + * @property {Array.|null} [uuid_list] ApplySchemaRequest uuid_list + * @property {string|null} [migration_context] ApplySchemaRequest migration_context + * @property {vttime.IDuration|null} [wait_replicas_timeout] ApplySchemaRequest wait_replicas_timeout + * @property {vtrpc.ICallerID|null} [caller_id] ApplySchemaRequest caller_id + * @property {number|Long|null} [batch_size] ApplySchemaRequest batch_size + */ + + /** + * Constructs a new ApplySchemaRequest. + * @memberof vtctldata + * @classdesc Represents an ApplySchemaRequest. + * @implements IApplySchemaRequest + * @constructor + * @param {vtctldata.IApplySchemaRequest=} [properties] Properties to set + */ + function ApplySchemaRequest(properties) { + this.sql = []; + this.uuid_list = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ApplySchemaRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.ApplySchemaRequest + * @instance + */ + ApplySchemaRequest.prototype.keyspace = ""; + + /** + * ApplySchemaRequest sql. + * @member {Array.} sql + * @memberof vtctldata.ApplySchemaRequest + * @instance + */ + ApplySchemaRequest.prototype.sql = $util.emptyArray; + + /** + * ApplySchemaRequest ddl_strategy. + * @member {string} ddl_strategy + * @memberof vtctldata.ApplySchemaRequest + * @instance + */ + ApplySchemaRequest.prototype.ddl_strategy = ""; + + /** + * ApplySchemaRequest uuid_list. + * @member {Array.} uuid_list + * @memberof vtctldata.ApplySchemaRequest + * @instance + */ + ApplySchemaRequest.prototype.uuid_list = $util.emptyArray; + + /** + * ApplySchemaRequest migration_context. + * @member {string} migration_context + * @memberof vtctldata.ApplySchemaRequest + * @instance + */ + ApplySchemaRequest.prototype.migration_context = ""; + + /** + * ApplySchemaRequest wait_replicas_timeout. + * @member {vttime.IDuration|null|undefined} wait_replicas_timeout + * @memberof vtctldata.ApplySchemaRequest + * @instance + */ + ApplySchemaRequest.prototype.wait_replicas_timeout = null; + + /** + * ApplySchemaRequest caller_id. + * @member {vtrpc.ICallerID|null|undefined} caller_id + * @memberof vtctldata.ApplySchemaRequest + * @instance + */ + ApplySchemaRequest.prototype.caller_id = null; + + /** + * ApplySchemaRequest batch_size. + * @member {number|Long} batch_size + * @memberof vtctldata.ApplySchemaRequest + * @instance + */ + ApplySchemaRequest.prototype.batch_size = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * Creates a new ApplySchemaRequest instance using the specified properties. + * @function create + * @memberof vtctldata.ApplySchemaRequest + * @static + * @param {vtctldata.IApplySchemaRequest=} [properties] Properties to set + * @returns {vtctldata.ApplySchemaRequest} ApplySchemaRequest instance + */ + ApplySchemaRequest.create = function create(properties) { + return new ApplySchemaRequest(properties); + }; + + /** + * Encodes the specified ApplySchemaRequest message. Does not implicitly {@link vtctldata.ApplySchemaRequest.verify|verify} messages. + * @function encode + * @memberof vtctldata.ApplySchemaRequest + * @static + * @param {vtctldata.IApplySchemaRequest} message ApplySchemaRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ApplySchemaRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.sql != null && message.sql.length) + for (let i = 0; i < message.sql.length; ++i) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.sql[i]); + if (message.ddl_strategy != null && Object.hasOwnProperty.call(message, "ddl_strategy")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.ddl_strategy); + if (message.uuid_list != null && message.uuid_list.length) + for (let i = 0; i < message.uuid_list.length; ++i) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.uuid_list[i]); + if (message.migration_context != null && Object.hasOwnProperty.call(message, "migration_context")) + writer.uint32(/* id 6, wireType 2 =*/50).string(message.migration_context); + if (message.wait_replicas_timeout != null && Object.hasOwnProperty.call(message, "wait_replicas_timeout")) + $root.vttime.Duration.encode(message.wait_replicas_timeout, writer.uint32(/* id 7, wireType 2 =*/58).fork()).ldelim(); + if (message.caller_id != null && Object.hasOwnProperty.call(message, "caller_id")) + $root.vtrpc.CallerID.encode(message.caller_id, writer.uint32(/* id 9, wireType 2 =*/74).fork()).ldelim(); + if (message.batch_size != null && Object.hasOwnProperty.call(message, "batch_size")) + writer.uint32(/* id 10, wireType 0 =*/80).int64(message.batch_size); + return writer; + }; + + /** + * Encodes the specified ApplySchemaRequest message, length delimited. Does not implicitly {@link vtctldata.ApplySchemaRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.ApplySchemaRequest + * @static + * @param {vtctldata.IApplySchemaRequest} message ApplySchemaRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ApplySchemaRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes an ApplySchemaRequest message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.ApplySchemaRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.ApplySchemaRequest} ApplySchemaRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ApplySchemaRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ApplySchemaRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.keyspace = reader.string(); + break; + } + case 3: { + if (!(message.sql && message.sql.length)) + message.sql = []; + message.sql.push(reader.string()); + break; + } + case 4: { + message.ddl_strategy = reader.string(); + break; + } + case 5: { + if (!(message.uuid_list && message.uuid_list.length)) + message.uuid_list = []; + message.uuid_list.push(reader.string()); + break; + } + case 6: { + message.migration_context = reader.string(); + break; + } + case 7: { + message.wait_replicas_timeout = $root.vttime.Duration.decode(reader, reader.uint32()); + break; + } + case 9: { + message.caller_id = $root.vtrpc.CallerID.decode(reader, reader.uint32()); + break; + } + case 10: { + message.batch_size = reader.int64(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes an ApplySchemaRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.ApplySchemaRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.ApplySchemaRequest} ApplySchemaRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ApplySchemaRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies an ApplySchemaRequest message. + * @function verify + * @memberof vtctldata.ApplySchemaRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ApplySchemaRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.sql != null && message.hasOwnProperty("sql")) { + if (!Array.isArray(message.sql)) + return "sql: array expected"; + for (let i = 0; i < message.sql.length; ++i) + if (!$util.isString(message.sql[i])) + return "sql: string[] expected"; + } + if (message.ddl_strategy != null && message.hasOwnProperty("ddl_strategy")) + if (!$util.isString(message.ddl_strategy)) + return "ddl_strategy: string expected"; + if (message.uuid_list != null && message.hasOwnProperty("uuid_list")) { + if (!Array.isArray(message.uuid_list)) + return "uuid_list: array expected"; + for (let i = 0; i < message.uuid_list.length; ++i) + if (!$util.isString(message.uuid_list[i])) + return "uuid_list: string[] expected"; + } + if (message.migration_context != null && message.hasOwnProperty("migration_context")) + if (!$util.isString(message.migration_context)) + return "migration_context: string expected"; + if (message.wait_replicas_timeout != null && message.hasOwnProperty("wait_replicas_timeout")) { + let error = $root.vttime.Duration.verify(message.wait_replicas_timeout); + if (error) + return "wait_replicas_timeout." + error; + } + if (message.caller_id != null && message.hasOwnProperty("caller_id")) { + let error = $root.vtrpc.CallerID.verify(message.caller_id); + if (error) + return "caller_id." + error; + } + if (message.batch_size != null && message.hasOwnProperty("batch_size")) + if (!$util.isInteger(message.batch_size) && !(message.batch_size && $util.isInteger(message.batch_size.low) && $util.isInteger(message.batch_size.high))) + return "batch_size: integer|Long expected"; + return null; + }; + + /** + * Creates an ApplySchemaRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.ApplySchemaRequest + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.ApplySchemaRequest} ApplySchemaRequest + */ + ApplySchemaRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ApplySchemaRequest) + return object; + let message = new $root.vtctldata.ApplySchemaRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.sql) { + if (!Array.isArray(object.sql)) + throw TypeError(".vtctldata.ApplySchemaRequest.sql: array expected"); + message.sql = []; + for (let i = 0; i < object.sql.length; ++i) + message.sql[i] = String(object.sql[i]); + } + if (object.ddl_strategy != null) + message.ddl_strategy = String(object.ddl_strategy); + if (object.uuid_list) { + if (!Array.isArray(object.uuid_list)) + throw TypeError(".vtctldata.ApplySchemaRequest.uuid_list: array expected"); + message.uuid_list = []; + for (let i = 0; i < object.uuid_list.length; ++i) + message.uuid_list[i] = String(object.uuid_list[i]); + } + if (object.migration_context != null) + message.migration_context = String(object.migration_context); + if (object.wait_replicas_timeout != null) { + if (typeof object.wait_replicas_timeout !== "object") + throw TypeError(".vtctldata.ApplySchemaRequest.wait_replicas_timeout: object expected"); + message.wait_replicas_timeout = $root.vttime.Duration.fromObject(object.wait_replicas_timeout); + } + if (object.caller_id != null) { + if (typeof object.caller_id !== "object") + throw TypeError(".vtctldata.ApplySchemaRequest.caller_id: object expected"); + message.caller_id = $root.vtrpc.CallerID.fromObject(object.caller_id); + } + if (object.batch_size != null) + if ($util.Long) + (message.batch_size = $util.Long.fromValue(object.batch_size)).unsigned = false; + else if (typeof object.batch_size === "string") + message.batch_size = parseInt(object.batch_size, 10); + else if (typeof object.batch_size === "number") + message.batch_size = object.batch_size; + else if (typeof object.batch_size === "object") + message.batch_size = new $util.LongBits(object.batch_size.low >>> 0, object.batch_size.high >>> 0).toNumber(); + return message; + }; + + /** + * Creates a plain object from an ApplySchemaRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.ApplySchemaRequest + * @static + * @param {vtctldata.ApplySchemaRequest} message ApplySchemaRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ApplySchemaRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) { + object.sql = []; + object.uuid_list = []; + } + if (options.defaults) { + object.keyspace = ""; + object.ddl_strategy = ""; + object.migration_context = ""; + object.wait_replicas_timeout = null; + object.caller_id = null; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.batch_size = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.batch_size = options.longs === String ? "0" : 0; + } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.sql && message.sql.length) { + object.sql = []; + for (let j = 0; j < message.sql.length; ++j) + object.sql[j] = message.sql[j]; + } + if (message.ddl_strategy != null && message.hasOwnProperty("ddl_strategy")) + object.ddl_strategy = message.ddl_strategy; + if (message.uuid_list && message.uuid_list.length) { + object.uuid_list = []; + for (let j = 0; j < message.uuid_list.length; ++j) + object.uuid_list[j] = message.uuid_list[j]; + } + if (message.migration_context != null && message.hasOwnProperty("migration_context")) + object.migration_context = message.migration_context; + if (message.wait_replicas_timeout != null && message.hasOwnProperty("wait_replicas_timeout")) + object.wait_replicas_timeout = $root.vttime.Duration.toObject(message.wait_replicas_timeout, options); + if (message.caller_id != null && message.hasOwnProperty("caller_id")) + object.caller_id = $root.vtrpc.CallerID.toObject(message.caller_id, options); + if (message.batch_size != null && message.hasOwnProperty("batch_size")) + if (typeof message.batch_size === "number") + object.batch_size = options.longs === String ? String(message.batch_size) : message.batch_size; + else + object.batch_size = options.longs === String ? $util.Long.prototype.toString.call(message.batch_size) : options.longs === Number ? new $util.LongBits(message.batch_size.low >>> 0, message.batch_size.high >>> 0).toNumber() : message.batch_size; + return object; + }; + + /** + * Converts this ApplySchemaRequest to JSON. + * @function toJSON + * @memberof vtctldata.ApplySchemaRequest + * @instance + * @returns {Object.} JSON object + */ + ApplySchemaRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ApplySchemaRequest + * @function getTypeUrl + * @memberof vtctldata.ApplySchemaRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ApplySchemaRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.ApplySchemaRequest"; + }; + + return ApplySchemaRequest; + })(); + + vtctldata.ApplySchemaResponse = (function() { + + /** + * Properties of an ApplySchemaResponse. + * @memberof vtctldata + * @interface IApplySchemaResponse + * @property {Array.|null} [uuid_list] ApplySchemaResponse uuid_list + * @property {Object.|null} [rows_affected_by_shard] ApplySchemaResponse rows_affected_by_shard + */ + + /** + * Constructs a new ApplySchemaResponse. + * @memberof vtctldata + * @classdesc Represents an ApplySchemaResponse. + * @implements IApplySchemaResponse + * @constructor + * @param {vtctldata.IApplySchemaResponse=} [properties] Properties to set + */ + function ApplySchemaResponse(properties) { + this.uuid_list = []; + this.rows_affected_by_shard = {}; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ApplySchemaResponse uuid_list. + * @member {Array.} uuid_list + * @memberof vtctldata.ApplySchemaResponse + * @instance + */ + ApplySchemaResponse.prototype.uuid_list = $util.emptyArray; + + /** + * ApplySchemaResponse rows_affected_by_shard. + * @member {Object.} rows_affected_by_shard + * @memberof vtctldata.ApplySchemaResponse + * @instance + */ + ApplySchemaResponse.prototype.rows_affected_by_shard = $util.emptyObject; + + /** + * Creates a new ApplySchemaResponse instance using the specified properties. + * @function create + * @memberof vtctldata.ApplySchemaResponse + * @static + * @param {vtctldata.IApplySchemaResponse=} [properties] Properties to set + * @returns {vtctldata.ApplySchemaResponse} ApplySchemaResponse instance + */ + ApplySchemaResponse.create = function create(properties) { + return new ApplySchemaResponse(properties); + }; + + /** + * Encodes the specified ApplySchemaResponse message. Does not implicitly {@link vtctldata.ApplySchemaResponse.verify|verify} messages. + * @function encode + * @memberof vtctldata.ApplySchemaResponse + * @static + * @param {vtctldata.IApplySchemaResponse} message ApplySchemaResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ApplySchemaResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.uuid_list != null && message.uuid_list.length) + for (let i = 0; i < message.uuid_list.length; ++i) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.uuid_list[i]); + if (message.rows_affected_by_shard != null && Object.hasOwnProperty.call(message, "rows_affected_by_shard")) + for (let keys = Object.keys(message.rows_affected_by_shard), i = 0; i < keys.length; ++i) + writer.uint32(/* id 2, wireType 2 =*/18).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 0 =*/16).uint64(message.rows_affected_by_shard[keys[i]]).ldelim(); + return writer; + }; + + /** + * Encodes the specified ApplySchemaResponse message, length delimited. Does not implicitly {@link vtctldata.ApplySchemaResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.ApplySchemaResponse + * @static + * @param {vtctldata.IApplySchemaResponse} message ApplySchemaResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ApplySchemaResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes an ApplySchemaResponse message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.ApplySchemaResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.ApplySchemaResponse} ApplySchemaResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ApplySchemaResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ApplySchemaResponse(), key, value; + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (!(message.uuid_list && message.uuid_list.length)) + message.uuid_list = []; + message.uuid_list.push(reader.string()); + break; + } + case 2: { + if (message.rows_affected_by_shard === $util.emptyObject) + message.rows_affected_by_shard = {}; + let end2 = reader.uint32() + reader.pos; + key = ""; + value = 0; + while (reader.pos < end2) { + let tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = reader.uint64(); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.rows_affected_by_shard[key] = value; + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes an ApplySchemaResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.ApplySchemaResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.ApplySchemaResponse} ApplySchemaResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ApplySchemaResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies an ApplySchemaResponse message. + * @function verify + * @memberof vtctldata.ApplySchemaResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ApplySchemaResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.uuid_list != null && message.hasOwnProperty("uuid_list")) { + if (!Array.isArray(message.uuid_list)) + return "uuid_list: array expected"; + for (let i = 0; i < message.uuid_list.length; ++i) + if (!$util.isString(message.uuid_list[i])) + return "uuid_list: string[] expected"; + } + if (message.rows_affected_by_shard != null && message.hasOwnProperty("rows_affected_by_shard")) { + if (!$util.isObject(message.rows_affected_by_shard)) + return "rows_affected_by_shard: object expected"; + let key = Object.keys(message.rows_affected_by_shard); + for (let i = 0; i < key.length; ++i) + if (!$util.isInteger(message.rows_affected_by_shard[key[i]]) && !(message.rows_affected_by_shard[key[i]] && $util.isInteger(message.rows_affected_by_shard[key[i]].low) && $util.isInteger(message.rows_affected_by_shard[key[i]].high))) + return "rows_affected_by_shard: integer|Long{k:string} expected"; + } + return null; + }; + + /** + * Creates an ApplySchemaResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.ApplySchemaResponse + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.ApplySchemaResponse} ApplySchemaResponse + */ + ApplySchemaResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ApplySchemaResponse) + return object; + let message = new $root.vtctldata.ApplySchemaResponse(); + if (object.uuid_list) { + if (!Array.isArray(object.uuid_list)) + throw TypeError(".vtctldata.ApplySchemaResponse.uuid_list: array expected"); + message.uuid_list = []; + for (let i = 0; i < object.uuid_list.length; ++i) + message.uuid_list[i] = String(object.uuid_list[i]); + } + if (object.rows_affected_by_shard) { + if (typeof object.rows_affected_by_shard !== "object") + throw TypeError(".vtctldata.ApplySchemaResponse.rows_affected_by_shard: object expected"); + message.rows_affected_by_shard = {}; + for (let keys = Object.keys(object.rows_affected_by_shard), i = 0; i < keys.length; ++i) + if ($util.Long) + (message.rows_affected_by_shard[keys[i]] = $util.Long.fromValue(object.rows_affected_by_shard[keys[i]])).unsigned = true; + else if (typeof object.rows_affected_by_shard[keys[i]] === "string") + message.rows_affected_by_shard[keys[i]] = parseInt(object.rows_affected_by_shard[keys[i]], 10); + else if (typeof object.rows_affected_by_shard[keys[i]] === "number") + message.rows_affected_by_shard[keys[i]] = object.rows_affected_by_shard[keys[i]]; + else if (typeof object.rows_affected_by_shard[keys[i]] === "object") + message.rows_affected_by_shard[keys[i]] = new $util.LongBits(object.rows_affected_by_shard[keys[i]].low >>> 0, object.rows_affected_by_shard[keys[i]].high >>> 0).toNumber(true); + } + return message; + }; + + /** + * Creates a plain object from an ApplySchemaResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.ApplySchemaResponse + * @static + * @param {vtctldata.ApplySchemaResponse} message ApplySchemaResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ApplySchemaResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.uuid_list = []; + if (options.objects || options.defaults) + object.rows_affected_by_shard = {}; + if (message.uuid_list && message.uuid_list.length) { + object.uuid_list = []; + for (let j = 0; j < message.uuid_list.length; ++j) + object.uuid_list[j] = message.uuid_list[j]; + } + let keys2; + if (message.rows_affected_by_shard && (keys2 = Object.keys(message.rows_affected_by_shard)).length) { + object.rows_affected_by_shard = {}; + for (let j = 0; j < keys2.length; ++j) + if (typeof message.rows_affected_by_shard[keys2[j]] === "number") + object.rows_affected_by_shard[keys2[j]] = options.longs === String ? String(message.rows_affected_by_shard[keys2[j]]) : message.rows_affected_by_shard[keys2[j]]; + else + object.rows_affected_by_shard[keys2[j]] = options.longs === String ? $util.Long.prototype.toString.call(message.rows_affected_by_shard[keys2[j]]) : options.longs === Number ? new $util.LongBits(message.rows_affected_by_shard[keys2[j]].low >>> 0, message.rows_affected_by_shard[keys2[j]].high >>> 0).toNumber(true) : message.rows_affected_by_shard[keys2[j]]; + } + return object; + }; + + /** + * Converts this ApplySchemaResponse to JSON. + * @function toJSON + * @memberof vtctldata.ApplySchemaResponse + * @instance + * @returns {Object.} JSON object + */ + ApplySchemaResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ApplySchemaResponse + * @function getTypeUrl + * @memberof vtctldata.ApplySchemaResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ApplySchemaResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.ApplySchemaResponse"; + }; + + return ApplySchemaResponse; + })(); + + vtctldata.ApplyVSchemaRequest = (function() { + + /** + * Properties of an ApplyVSchemaRequest. + * @memberof vtctldata + * @interface IApplyVSchemaRequest + * @property {string|null} [keyspace] ApplyVSchemaRequest keyspace + * @property {boolean|null} [skip_rebuild] ApplyVSchemaRequest skip_rebuild + * @property {boolean|null} [dry_run] ApplyVSchemaRequest dry_run + * @property {Array.|null} [cells] ApplyVSchemaRequest cells + * @property {vschema.IKeyspace|null} [v_schema] ApplyVSchemaRequest v_schema + * @property {string|null} [sql] ApplyVSchemaRequest sql + */ + + /** + * Constructs a new ApplyVSchemaRequest. + * @memberof vtctldata + * @classdesc Represents an ApplyVSchemaRequest. + * @implements IApplyVSchemaRequest + * @constructor + * @param {vtctldata.IApplyVSchemaRequest=} [properties] Properties to set + */ + function ApplyVSchemaRequest(properties) { + this.cells = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ApplyVSchemaRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.ApplyVSchemaRequest + * @instance + */ + ApplyVSchemaRequest.prototype.keyspace = ""; + + /** + * ApplyVSchemaRequest skip_rebuild. + * @member {boolean} skip_rebuild + * @memberof vtctldata.ApplyVSchemaRequest + * @instance + */ + ApplyVSchemaRequest.prototype.skip_rebuild = false; + + /** + * ApplyVSchemaRequest dry_run. + * @member {boolean} dry_run + * @memberof vtctldata.ApplyVSchemaRequest + * @instance + */ + ApplyVSchemaRequest.prototype.dry_run = false; + + /** + * ApplyVSchemaRequest cells. + * @member {Array.} cells + * @memberof vtctldata.ApplyVSchemaRequest + * @instance + */ + ApplyVSchemaRequest.prototype.cells = $util.emptyArray; + + /** + * ApplyVSchemaRequest v_schema. + * @member {vschema.IKeyspace|null|undefined} v_schema + * @memberof vtctldata.ApplyVSchemaRequest + * @instance + */ + ApplyVSchemaRequest.prototype.v_schema = null; + + /** + * ApplyVSchemaRequest sql. + * @member {string} sql + * @memberof vtctldata.ApplyVSchemaRequest + * @instance + */ + ApplyVSchemaRequest.prototype.sql = ""; + + /** + * Creates a new ApplyVSchemaRequest instance using the specified properties. + * @function create + * @memberof vtctldata.ApplyVSchemaRequest + * @static + * @param {vtctldata.IApplyVSchemaRequest=} [properties] Properties to set + * @returns {vtctldata.ApplyVSchemaRequest} ApplyVSchemaRequest instance + */ + ApplyVSchemaRequest.create = function create(properties) { + return new ApplyVSchemaRequest(properties); + }; + + /** + * Encodes the specified ApplyVSchemaRequest message. Does not implicitly {@link vtctldata.ApplyVSchemaRequest.verify|verify} messages. + * @function encode + * @memberof vtctldata.ApplyVSchemaRequest + * @static + * @param {vtctldata.IApplyVSchemaRequest} message ApplyVSchemaRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ApplyVSchemaRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.skip_rebuild != null && Object.hasOwnProperty.call(message, "skip_rebuild")) + writer.uint32(/* id 2, wireType 0 =*/16).bool(message.skip_rebuild); + if (message.dry_run != null && Object.hasOwnProperty.call(message, "dry_run")) + writer.uint32(/* id 3, wireType 0 =*/24).bool(message.dry_run); + if (message.cells != null && message.cells.length) + for (let i = 0; i < message.cells.length; ++i) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.cells[i]); + if (message.v_schema != null && Object.hasOwnProperty.call(message, "v_schema")) + $root.vschema.Keyspace.encode(message.v_schema, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); + if (message.sql != null && Object.hasOwnProperty.call(message, "sql")) + writer.uint32(/* id 6, wireType 2 =*/50).string(message.sql); + return writer; + }; + + /** + * Encodes the specified ApplyVSchemaRequest message, length delimited. Does not implicitly {@link vtctldata.ApplyVSchemaRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.ApplyVSchemaRequest + * @static + * @param {vtctldata.IApplyVSchemaRequest} message ApplyVSchemaRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ApplyVSchemaRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes an ApplyVSchemaRequest message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.ApplyVSchemaRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.ApplyVSchemaRequest} ApplyVSchemaRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ApplyVSchemaRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ApplyVSchemaRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.keyspace = reader.string(); + break; + } + case 2: { + message.skip_rebuild = reader.bool(); + break; + } + case 3: { + message.dry_run = reader.bool(); + break; + } + case 4: { + if (!(message.cells && message.cells.length)) + message.cells = []; + message.cells.push(reader.string()); + break; + } + case 5: { + message.v_schema = $root.vschema.Keyspace.decode(reader, reader.uint32()); + break; + } + case 6: { + message.sql = reader.string(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes an ApplyVSchemaRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.ApplyVSchemaRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.ApplyVSchemaRequest} ApplyVSchemaRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ApplyVSchemaRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies an ApplyVSchemaRequest message. + * @function verify + * @memberof vtctldata.ApplyVSchemaRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ApplyVSchemaRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.skip_rebuild != null && message.hasOwnProperty("skip_rebuild")) + if (typeof message.skip_rebuild !== "boolean") + return "skip_rebuild: boolean expected"; + if (message.dry_run != null && message.hasOwnProperty("dry_run")) + if (typeof message.dry_run !== "boolean") + return "dry_run: boolean expected"; + if (message.cells != null && message.hasOwnProperty("cells")) { + if (!Array.isArray(message.cells)) + return "cells: array expected"; + for (let i = 0; i < message.cells.length; ++i) + if (!$util.isString(message.cells[i])) + return "cells: string[] expected"; + } + if (message.v_schema != null && message.hasOwnProperty("v_schema")) { + let error = $root.vschema.Keyspace.verify(message.v_schema); + if (error) + return "v_schema." + error; + } + if (message.sql != null && message.hasOwnProperty("sql")) + if (!$util.isString(message.sql)) + return "sql: string expected"; + return null; + }; + + /** + * Creates an ApplyVSchemaRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.ApplyVSchemaRequest + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.ApplyVSchemaRequest} ApplyVSchemaRequest + */ + ApplyVSchemaRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ApplyVSchemaRequest) + return object; + let message = new $root.vtctldata.ApplyVSchemaRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.skip_rebuild != null) + message.skip_rebuild = Boolean(object.skip_rebuild); + if (object.dry_run != null) + message.dry_run = Boolean(object.dry_run); + if (object.cells) { + if (!Array.isArray(object.cells)) + throw TypeError(".vtctldata.ApplyVSchemaRequest.cells: array expected"); + message.cells = []; + for (let i = 0; i < object.cells.length; ++i) + message.cells[i] = String(object.cells[i]); + } + if (object.v_schema != null) { + if (typeof object.v_schema !== "object") + throw TypeError(".vtctldata.ApplyVSchemaRequest.v_schema: object expected"); + message.v_schema = $root.vschema.Keyspace.fromObject(object.v_schema); + } + if (object.sql != null) + message.sql = String(object.sql); + return message; + }; + + /** + * Creates a plain object from an ApplyVSchemaRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.ApplyVSchemaRequest + * @static + * @param {vtctldata.ApplyVSchemaRequest} message ApplyVSchemaRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ApplyVSchemaRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.cells = []; + if (options.defaults) { + object.keyspace = ""; + object.skip_rebuild = false; + object.dry_run = false; + object.v_schema = null; + object.sql = ""; + } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.skip_rebuild != null && message.hasOwnProperty("skip_rebuild")) + object.skip_rebuild = message.skip_rebuild; + if (message.dry_run != null && message.hasOwnProperty("dry_run")) + object.dry_run = message.dry_run; + if (message.cells && message.cells.length) { + object.cells = []; + for (let j = 0; j < message.cells.length; ++j) + object.cells[j] = message.cells[j]; + } + if (message.v_schema != null && message.hasOwnProperty("v_schema")) + object.v_schema = $root.vschema.Keyspace.toObject(message.v_schema, options); + if (message.sql != null && message.hasOwnProperty("sql")) + object.sql = message.sql; + return object; + }; + + /** + * Converts this ApplyVSchemaRequest to JSON. + * @function toJSON + * @memberof vtctldata.ApplyVSchemaRequest + * @instance + * @returns {Object.} JSON object + */ + ApplyVSchemaRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ApplyVSchemaRequest + * @function getTypeUrl + * @memberof vtctldata.ApplyVSchemaRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ApplyVSchemaRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.ApplyVSchemaRequest"; + }; + + return ApplyVSchemaRequest; + })(); + + vtctldata.ApplyVSchemaResponse = (function() { + + /** + * Properties of an ApplyVSchemaResponse. + * @memberof vtctldata + * @interface IApplyVSchemaResponse + * @property {vschema.IKeyspace|null} [v_schema] ApplyVSchemaResponse v_schema + */ + + /** + * Constructs a new ApplyVSchemaResponse. + * @memberof vtctldata + * @classdesc Represents an ApplyVSchemaResponse. + * @implements IApplyVSchemaResponse + * @constructor + * @param {vtctldata.IApplyVSchemaResponse=} [properties] Properties to set + */ + function ApplyVSchemaResponse(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ApplyVSchemaResponse v_schema. + * @member {vschema.IKeyspace|null|undefined} v_schema + * @memberof vtctldata.ApplyVSchemaResponse + * @instance + */ + ApplyVSchemaResponse.prototype.v_schema = null; + + /** + * Creates a new ApplyVSchemaResponse instance using the specified properties. + * @function create + * @memberof vtctldata.ApplyVSchemaResponse + * @static + * @param {vtctldata.IApplyVSchemaResponse=} [properties] Properties to set + * @returns {vtctldata.ApplyVSchemaResponse} ApplyVSchemaResponse instance + */ + ApplyVSchemaResponse.create = function create(properties) { + return new ApplyVSchemaResponse(properties); + }; + + /** + * Encodes the specified ApplyVSchemaResponse message. Does not implicitly {@link vtctldata.ApplyVSchemaResponse.verify|verify} messages. + * @function encode + * @memberof vtctldata.ApplyVSchemaResponse + * @static + * @param {vtctldata.IApplyVSchemaResponse} message ApplyVSchemaResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ApplyVSchemaResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.v_schema != null && Object.hasOwnProperty.call(message, "v_schema")) + $root.vschema.Keyspace.encode(message.v_schema, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified ApplyVSchemaResponse message, length delimited. Does not implicitly {@link vtctldata.ApplyVSchemaResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.ApplyVSchemaResponse + * @static + * @param {vtctldata.IApplyVSchemaResponse} message ApplyVSchemaResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ApplyVSchemaResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes an ApplyVSchemaResponse message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.ApplyVSchemaResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.ApplyVSchemaResponse} ApplyVSchemaResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ApplyVSchemaResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ApplyVSchemaResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.v_schema = $root.vschema.Keyspace.decode(reader, reader.uint32()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes an ApplyVSchemaResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.ApplyVSchemaResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.ApplyVSchemaResponse} ApplyVSchemaResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ApplyVSchemaResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies an ApplyVSchemaResponse message. + * @function verify + * @memberof vtctldata.ApplyVSchemaResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ApplyVSchemaResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.v_schema != null && message.hasOwnProperty("v_schema")) { + let error = $root.vschema.Keyspace.verify(message.v_schema); + if (error) + return "v_schema." + error; + } + return null; + }; + + /** + * Creates an ApplyVSchemaResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.ApplyVSchemaResponse + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.ApplyVSchemaResponse} ApplyVSchemaResponse + */ + ApplyVSchemaResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ApplyVSchemaResponse) + return object; + let message = new $root.vtctldata.ApplyVSchemaResponse(); + if (object.v_schema != null) { + if (typeof object.v_schema !== "object") + throw TypeError(".vtctldata.ApplyVSchemaResponse.v_schema: object expected"); + message.v_schema = $root.vschema.Keyspace.fromObject(object.v_schema); + } + return message; + }; + + /** + * Creates a plain object from an ApplyVSchemaResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.ApplyVSchemaResponse + * @static + * @param {vtctldata.ApplyVSchemaResponse} message ApplyVSchemaResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ApplyVSchemaResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) + object.v_schema = null; + if (message.v_schema != null && message.hasOwnProperty("v_schema")) + object.v_schema = $root.vschema.Keyspace.toObject(message.v_schema, options); + return object; + }; + + /** + * Converts this ApplyVSchemaResponse to JSON. + * @function toJSON + * @memberof vtctldata.ApplyVSchemaResponse + * @instance + * @returns {Object.} JSON object + */ + ApplyVSchemaResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ApplyVSchemaResponse + * @function getTypeUrl + * @memberof vtctldata.ApplyVSchemaResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ApplyVSchemaResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.ApplyVSchemaResponse"; + }; + + return ApplyVSchemaResponse; + })(); + + vtctldata.BackupRequest = (function() { + + /** + * Properties of a BackupRequest. + * @memberof vtctldata + * @interface IBackupRequest + * @property {topodata.ITabletAlias|null} [tablet_alias] BackupRequest tablet_alias + * @property {boolean|null} [allow_primary] BackupRequest allow_primary + * @property {number|Long|null} [concurrency] BackupRequest concurrency + * @property {string|null} [incremental_from_pos] BackupRequest incremental_from_pos + * @property {boolean|null} [upgrade_safe] BackupRequest upgrade_safe + */ + + /** + * Constructs a new BackupRequest. + * @memberof vtctldata + * @classdesc Represents a BackupRequest. + * @implements IBackupRequest + * @constructor + * @param {vtctldata.IBackupRequest=} [properties] Properties to set + */ + function BackupRequest(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * BackupRequest tablet_alias. + * @member {topodata.ITabletAlias|null|undefined} tablet_alias + * @memberof vtctldata.BackupRequest + * @instance + */ + BackupRequest.prototype.tablet_alias = null; + + /** + * BackupRequest allow_primary. + * @member {boolean} allow_primary + * @memberof vtctldata.BackupRequest + * @instance + */ + BackupRequest.prototype.allow_primary = false; + + /** + * BackupRequest concurrency. + * @member {number|Long} concurrency + * @memberof vtctldata.BackupRequest + * @instance + */ + BackupRequest.prototype.concurrency = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + + /** + * BackupRequest incremental_from_pos. + * @member {string} incremental_from_pos + * @memberof vtctldata.BackupRequest + * @instance + */ + BackupRequest.prototype.incremental_from_pos = ""; + + /** + * BackupRequest upgrade_safe. + * @member {boolean} upgrade_safe + * @memberof vtctldata.BackupRequest + * @instance + */ + BackupRequest.prototype.upgrade_safe = false; + + /** + * Creates a new BackupRequest instance using the specified properties. + * @function create + * @memberof vtctldata.BackupRequest + * @static + * @param {vtctldata.IBackupRequest=} [properties] Properties to set + * @returns {vtctldata.BackupRequest} BackupRequest instance + */ + BackupRequest.create = function create(properties) { + return new BackupRequest(properties); + }; + + /** + * Encodes the specified BackupRequest message. Does not implicitly {@link vtctldata.BackupRequest.verify|verify} messages. + * @function encode + * @memberof vtctldata.BackupRequest + * @static + * @param {vtctldata.IBackupRequest} message BackupRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + BackupRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) + $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.allow_primary != null && Object.hasOwnProperty.call(message, "allow_primary")) + writer.uint32(/* id 2, wireType 0 =*/16).bool(message.allow_primary); + if (message.concurrency != null && Object.hasOwnProperty.call(message, "concurrency")) + writer.uint32(/* id 3, wireType 0 =*/24).uint64(message.concurrency); + if (message.incremental_from_pos != null && Object.hasOwnProperty.call(message, "incremental_from_pos")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.incremental_from_pos); + if (message.upgrade_safe != null && Object.hasOwnProperty.call(message, "upgrade_safe")) + writer.uint32(/* id 5, wireType 0 =*/40).bool(message.upgrade_safe); + return writer; + }; + + /** + * Encodes the specified BackupRequest message, length delimited. Does not implicitly {@link vtctldata.BackupRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.BackupRequest + * @static + * @param {vtctldata.IBackupRequest} message BackupRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + BackupRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a BackupRequest message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.BackupRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.BackupRequest} BackupRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + BackupRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.BackupRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + break; + } + case 2: { + message.allow_primary = reader.bool(); + break; + } + case 3: { + message.concurrency = reader.uint64(); + break; + } + case 4: { + message.incremental_from_pos = reader.string(); + break; + } + case 5: { + message.upgrade_safe = reader.bool(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a BackupRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.BackupRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.BackupRequest} BackupRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + BackupRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a BackupRequest message. + * @function verify + * @memberof vtctldata.BackupRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + BackupRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { + let error = $root.topodata.TabletAlias.verify(message.tablet_alias); + if (error) + return "tablet_alias." + error; + } + if (message.allow_primary != null && message.hasOwnProperty("allow_primary")) + if (typeof message.allow_primary !== "boolean") + return "allow_primary: boolean expected"; + if (message.concurrency != null && message.hasOwnProperty("concurrency")) + if (!$util.isInteger(message.concurrency) && !(message.concurrency && $util.isInteger(message.concurrency.low) && $util.isInteger(message.concurrency.high))) + return "concurrency: integer|Long expected"; + if (message.incremental_from_pos != null && message.hasOwnProperty("incremental_from_pos")) + if (!$util.isString(message.incremental_from_pos)) + return "incremental_from_pos: string expected"; + if (message.upgrade_safe != null && message.hasOwnProperty("upgrade_safe")) + if (typeof message.upgrade_safe !== "boolean") + return "upgrade_safe: boolean expected"; + return null; + }; + + /** + * Creates a BackupRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.BackupRequest + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.BackupRequest} BackupRequest + */ + BackupRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.BackupRequest) + return object; + let message = new $root.vtctldata.BackupRequest(); + if (object.tablet_alias != null) { + if (typeof object.tablet_alias !== "object") + throw TypeError(".vtctldata.BackupRequest.tablet_alias: object expected"); + message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); + } + if (object.allow_primary != null) + message.allow_primary = Boolean(object.allow_primary); + if (object.concurrency != null) + if ($util.Long) + (message.concurrency = $util.Long.fromValue(object.concurrency)).unsigned = true; + else if (typeof object.concurrency === "string") + message.concurrency = parseInt(object.concurrency, 10); + else if (typeof object.concurrency === "number") + message.concurrency = object.concurrency; + else if (typeof object.concurrency === "object") + message.concurrency = new $util.LongBits(object.concurrency.low >>> 0, object.concurrency.high >>> 0).toNumber(true); + if (object.incremental_from_pos != null) + message.incremental_from_pos = String(object.incremental_from_pos); + if (object.upgrade_safe != null) + message.upgrade_safe = Boolean(object.upgrade_safe); + return message; + }; + + /** + * Creates a plain object from a BackupRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.BackupRequest + * @static + * @param {vtctldata.BackupRequest} message BackupRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + BackupRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.tablet_alias = null; + object.allow_primary = false; + if ($util.Long) { + let long = new $util.Long(0, 0, true); + object.concurrency = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.concurrency = options.longs === String ? "0" : 0; + object.incremental_from_pos = ""; + object.upgrade_safe = false; + } + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) + object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); + if (message.allow_primary != null && message.hasOwnProperty("allow_primary")) + object.allow_primary = message.allow_primary; + if (message.concurrency != null && message.hasOwnProperty("concurrency")) + if (typeof message.concurrency === "number") + object.concurrency = options.longs === String ? String(message.concurrency) : message.concurrency; + else + object.concurrency = options.longs === String ? $util.Long.prototype.toString.call(message.concurrency) : options.longs === Number ? new $util.LongBits(message.concurrency.low >>> 0, message.concurrency.high >>> 0).toNumber(true) : message.concurrency; + if (message.incremental_from_pos != null && message.hasOwnProperty("incremental_from_pos")) + object.incremental_from_pos = message.incremental_from_pos; + if (message.upgrade_safe != null && message.hasOwnProperty("upgrade_safe")) + object.upgrade_safe = message.upgrade_safe; + return object; + }; + + /** + * Converts this BackupRequest to JSON. + * @function toJSON + * @memberof vtctldata.BackupRequest + * @instance + * @returns {Object.} JSON object + */ + BackupRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for BackupRequest + * @function getTypeUrl + * @memberof vtctldata.BackupRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + BackupRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.BackupRequest"; + }; + + return BackupRequest; + })(); + + vtctldata.BackupResponse = (function() { + + /** + * Properties of a BackupResponse. + * @memberof vtctldata + * @interface IBackupResponse + * @property {topodata.ITabletAlias|null} [tablet_alias] BackupResponse tablet_alias + * @property {string|null} [keyspace] BackupResponse keyspace + * @property {string|null} [shard] BackupResponse shard + * @property {logutil.IEvent|null} [event] BackupResponse event + */ + + /** + * Constructs a new BackupResponse. + * @memberof vtctldata + * @classdesc Represents a BackupResponse. + * @implements IBackupResponse + * @constructor + * @param {vtctldata.IBackupResponse=} [properties] Properties to set + */ + function BackupResponse(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * BackupResponse tablet_alias. + * @member {topodata.ITabletAlias|null|undefined} tablet_alias + * @memberof vtctldata.BackupResponse + * @instance + */ + BackupResponse.prototype.tablet_alias = null; + + /** + * BackupResponse keyspace. + * @member {string} keyspace + * @memberof vtctldata.BackupResponse + * @instance + */ + BackupResponse.prototype.keyspace = ""; + + /** + * BackupResponse shard. + * @member {string} shard + * @memberof vtctldata.BackupResponse + * @instance + */ + BackupResponse.prototype.shard = ""; + + /** + * BackupResponse event. + * @member {logutil.IEvent|null|undefined} event + * @memberof vtctldata.BackupResponse + * @instance + */ + BackupResponse.prototype.event = null; + + /** + * Creates a new BackupResponse instance using the specified properties. + * @function create + * @memberof vtctldata.BackupResponse + * @static + * @param {vtctldata.IBackupResponse=} [properties] Properties to set + * @returns {vtctldata.BackupResponse} BackupResponse instance + */ + BackupResponse.create = function create(properties) { + return new BackupResponse(properties); + }; + + /** + * Encodes the specified BackupResponse message. Does not implicitly {@link vtctldata.BackupResponse.verify|verify} messages. + * @function encode + * @memberof vtctldata.BackupResponse + * @static + * @param {vtctldata.IBackupResponse} message BackupResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + BackupResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) + $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.keyspace); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.shard); + if (message.event != null && Object.hasOwnProperty.call(message, "event")) + $root.logutil.Event.encode(message.event, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified BackupResponse message, length delimited. Does not implicitly {@link vtctldata.BackupResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.BackupResponse + * @static + * @param {vtctldata.IBackupResponse} message BackupResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + BackupResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a BackupResponse message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.BackupResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.BackupResponse} BackupResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + BackupResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.BackupResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + break; + } + case 2: { + message.keyspace = reader.string(); + break; + } + case 3: { + message.shard = reader.string(); + break; + } + case 4: { + message.event = $root.logutil.Event.decode(reader, reader.uint32()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a BackupResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.BackupResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.BackupResponse} BackupResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + BackupResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a BackupResponse message. + * @function verify + * @memberof vtctldata.BackupResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + BackupResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { + let error = $root.topodata.TabletAlias.verify(message.tablet_alias); + if (error) + return "tablet_alias." + error; + } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.shard != null && message.hasOwnProperty("shard")) + if (!$util.isString(message.shard)) + return "shard: string expected"; + if (message.event != null && message.hasOwnProperty("event")) { + let error = $root.logutil.Event.verify(message.event); + if (error) + return "event." + error; + } + return null; + }; + + /** + * Creates a BackupResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.BackupResponse + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.BackupResponse} BackupResponse + */ + BackupResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.BackupResponse) + return object; + let message = new $root.vtctldata.BackupResponse(); + if (object.tablet_alias != null) { + if (typeof object.tablet_alias !== "object") + throw TypeError(".vtctldata.BackupResponse.tablet_alias: object expected"); + message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); + } + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.shard != null) + message.shard = String(object.shard); + if (object.event != null) { + if (typeof object.event !== "object") + throw TypeError(".vtctldata.BackupResponse.event: object expected"); + message.event = $root.logutil.Event.fromObject(object.event); + } + return message; + }; + + /** + * Creates a plain object from a BackupResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.BackupResponse + * @static + * @param {vtctldata.BackupResponse} message BackupResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + BackupResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.tablet_alias = null; + object.keyspace = ""; + object.shard = ""; + object.event = null; + } + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) + object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = message.shard; + if (message.event != null && message.hasOwnProperty("event")) + object.event = $root.logutil.Event.toObject(message.event, options); + return object; + }; + + /** + * Converts this BackupResponse to JSON. + * @function toJSON + * @memberof vtctldata.BackupResponse + * @instance + * @returns {Object.} JSON object + */ + BackupResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for BackupResponse + * @function getTypeUrl + * @memberof vtctldata.BackupResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + BackupResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.BackupResponse"; + }; + + return BackupResponse; + })(); + + vtctldata.BackupShardRequest = (function() { + + /** + * Properties of a BackupShardRequest. + * @memberof vtctldata + * @interface IBackupShardRequest + * @property {string|null} [keyspace] BackupShardRequest keyspace + * @property {string|null} [shard] BackupShardRequest shard + * @property {boolean|null} [allow_primary] BackupShardRequest allow_primary + * @property {number|Long|null} [concurrency] BackupShardRequest concurrency + * @property {boolean|null} [upgrade_safe] BackupShardRequest upgrade_safe + * @property {string|null} [incremental_from_pos] BackupShardRequest incremental_from_pos + */ + + /** + * Constructs a new BackupShardRequest. + * @memberof vtctldata + * @classdesc Represents a BackupShardRequest. + * @implements IBackupShardRequest + * @constructor + * @param {vtctldata.IBackupShardRequest=} [properties] Properties to set + */ + function BackupShardRequest(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * BackupShardRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.BackupShardRequest + * @instance + */ + BackupShardRequest.prototype.keyspace = ""; + + /** + * BackupShardRequest shard. + * @member {string} shard + * @memberof vtctldata.BackupShardRequest + * @instance + */ + BackupShardRequest.prototype.shard = ""; + + /** + * BackupShardRequest allow_primary. + * @member {boolean} allow_primary + * @memberof vtctldata.BackupShardRequest + * @instance + */ + BackupShardRequest.prototype.allow_primary = false; + + /** + * BackupShardRequest concurrency. + * @member {number|Long} concurrency + * @memberof vtctldata.BackupShardRequest + * @instance + */ + BackupShardRequest.prototype.concurrency = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + + /** + * BackupShardRequest upgrade_safe. + * @member {boolean} upgrade_safe + * @memberof vtctldata.BackupShardRequest + * @instance + */ + BackupShardRequest.prototype.upgrade_safe = false; + + /** + * BackupShardRequest incremental_from_pos. + * @member {string} incremental_from_pos + * @memberof vtctldata.BackupShardRequest + * @instance + */ + BackupShardRequest.prototype.incremental_from_pos = ""; + + /** + * Creates a new BackupShardRequest instance using the specified properties. + * @function create + * @memberof vtctldata.BackupShardRequest + * @static + * @param {vtctldata.IBackupShardRequest=} [properties] Properties to set + * @returns {vtctldata.BackupShardRequest} BackupShardRequest instance + */ + BackupShardRequest.create = function create(properties) { + return new BackupShardRequest(properties); + }; + + /** + * Encodes the specified BackupShardRequest message. Does not implicitly {@link vtctldata.BackupShardRequest.verify|verify} messages. + * @function encode + * @memberof vtctldata.BackupShardRequest + * @static + * @param {vtctldata.IBackupShardRequest} message BackupShardRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + BackupShardRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); + if (message.allow_primary != null && Object.hasOwnProperty.call(message, "allow_primary")) + writer.uint32(/* id 3, wireType 0 =*/24).bool(message.allow_primary); + if (message.concurrency != null && Object.hasOwnProperty.call(message, "concurrency")) + writer.uint32(/* id 4, wireType 0 =*/32).uint64(message.concurrency); + if (message.upgrade_safe != null && Object.hasOwnProperty.call(message, "upgrade_safe")) + writer.uint32(/* id 5, wireType 0 =*/40).bool(message.upgrade_safe); + if (message.incremental_from_pos != null && Object.hasOwnProperty.call(message, "incremental_from_pos")) + writer.uint32(/* id 6, wireType 2 =*/50).string(message.incremental_from_pos); + return writer; + }; + + /** + * Encodes the specified BackupShardRequest message, length delimited. Does not implicitly {@link vtctldata.BackupShardRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.BackupShardRequest + * @static + * @param {vtctldata.IBackupShardRequest} message BackupShardRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + BackupShardRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a BackupShardRequest message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.BackupShardRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.BackupShardRequest} BackupShardRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + BackupShardRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.BackupShardRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.keyspace = reader.string(); + break; + } + case 2: { + message.shard = reader.string(); + break; + } + case 3: { + message.allow_primary = reader.bool(); + break; + } + case 4: { + message.concurrency = reader.uint64(); + break; + } + case 5: { + message.upgrade_safe = reader.bool(); + break; + } + case 6: { + message.incremental_from_pos = reader.string(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a BackupShardRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.BackupShardRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.BackupShardRequest} BackupShardRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + BackupShardRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a BackupShardRequest message. + * @function verify + * @memberof vtctldata.BackupShardRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + BackupShardRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.shard != null && message.hasOwnProperty("shard")) + if (!$util.isString(message.shard)) + return "shard: string expected"; + if (message.allow_primary != null && message.hasOwnProperty("allow_primary")) + if (typeof message.allow_primary !== "boolean") + return "allow_primary: boolean expected"; + if (message.concurrency != null && message.hasOwnProperty("concurrency")) + if (!$util.isInteger(message.concurrency) && !(message.concurrency && $util.isInteger(message.concurrency.low) && $util.isInteger(message.concurrency.high))) + return "concurrency: integer|Long expected"; + if (message.upgrade_safe != null && message.hasOwnProperty("upgrade_safe")) + if (typeof message.upgrade_safe !== "boolean") + return "upgrade_safe: boolean expected"; + if (message.incremental_from_pos != null && message.hasOwnProperty("incremental_from_pos")) + if (!$util.isString(message.incremental_from_pos)) + return "incremental_from_pos: string expected"; + return null; + }; + + /** + * Creates a BackupShardRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.BackupShardRequest + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.BackupShardRequest} BackupShardRequest + */ + BackupShardRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.BackupShardRequest) + return object; + let message = new $root.vtctldata.BackupShardRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.shard != null) + message.shard = String(object.shard); + if (object.allow_primary != null) + message.allow_primary = Boolean(object.allow_primary); + if (object.concurrency != null) + if ($util.Long) + (message.concurrency = $util.Long.fromValue(object.concurrency)).unsigned = true; + else if (typeof object.concurrency === "string") + message.concurrency = parseInt(object.concurrency, 10); + else if (typeof object.concurrency === "number") + message.concurrency = object.concurrency; + else if (typeof object.concurrency === "object") + message.concurrency = new $util.LongBits(object.concurrency.low >>> 0, object.concurrency.high >>> 0).toNumber(true); + if (object.upgrade_safe != null) + message.upgrade_safe = Boolean(object.upgrade_safe); + if (object.incremental_from_pos != null) + message.incremental_from_pos = String(object.incremental_from_pos); + return message; + }; + + /** + * Creates a plain object from a BackupShardRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.BackupShardRequest + * @static + * @param {vtctldata.BackupShardRequest} message BackupShardRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + BackupShardRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.keyspace = ""; + object.shard = ""; + object.allow_primary = false; + if ($util.Long) { + let long = new $util.Long(0, 0, true); + object.concurrency = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.concurrency = options.longs === String ? "0" : 0; + object.upgrade_safe = false; + object.incremental_from_pos = ""; + } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = message.shard; + if (message.allow_primary != null && message.hasOwnProperty("allow_primary")) + object.allow_primary = message.allow_primary; + if (message.concurrency != null && message.hasOwnProperty("concurrency")) + if (typeof message.concurrency === "number") + object.concurrency = options.longs === String ? String(message.concurrency) : message.concurrency; + else + object.concurrency = options.longs === String ? $util.Long.prototype.toString.call(message.concurrency) : options.longs === Number ? new $util.LongBits(message.concurrency.low >>> 0, message.concurrency.high >>> 0).toNumber(true) : message.concurrency; + if (message.upgrade_safe != null && message.hasOwnProperty("upgrade_safe")) + object.upgrade_safe = message.upgrade_safe; + if (message.incremental_from_pos != null && message.hasOwnProperty("incremental_from_pos")) + object.incremental_from_pos = message.incremental_from_pos; + return object; + }; + + /** + * Converts this BackupShardRequest to JSON. + * @function toJSON + * @memberof vtctldata.BackupShardRequest + * @instance + * @returns {Object.} JSON object + */ + BackupShardRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for BackupShardRequest + * @function getTypeUrl + * @memberof vtctldata.BackupShardRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + BackupShardRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.BackupShardRequest"; + }; + + return BackupShardRequest; + })(); + + vtctldata.CancelSchemaMigrationRequest = (function() { + + /** + * Properties of a CancelSchemaMigrationRequest. + * @memberof vtctldata + * @interface ICancelSchemaMigrationRequest + * @property {string|null} [keyspace] CancelSchemaMigrationRequest keyspace + * @property {string|null} [uuid] CancelSchemaMigrationRequest uuid + */ + + /** + * Constructs a new CancelSchemaMigrationRequest. + * @memberof vtctldata + * @classdesc Represents a CancelSchemaMigrationRequest. + * @implements ICancelSchemaMigrationRequest + * @constructor + * @param {vtctldata.ICancelSchemaMigrationRequest=} [properties] Properties to set + */ + function CancelSchemaMigrationRequest(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * CancelSchemaMigrationRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.CancelSchemaMigrationRequest + * @instance + */ + CancelSchemaMigrationRequest.prototype.keyspace = ""; + + /** + * CancelSchemaMigrationRequest uuid. + * @member {string} uuid + * @memberof vtctldata.CancelSchemaMigrationRequest + * @instance + */ + CancelSchemaMigrationRequest.prototype.uuid = ""; + + /** + * Creates a new CancelSchemaMigrationRequest instance using the specified properties. + * @function create + * @memberof vtctldata.CancelSchemaMigrationRequest + * @static + * @param {vtctldata.ICancelSchemaMigrationRequest=} [properties] Properties to set + * @returns {vtctldata.CancelSchemaMigrationRequest} CancelSchemaMigrationRequest instance + */ + CancelSchemaMigrationRequest.create = function create(properties) { + return new CancelSchemaMigrationRequest(properties); + }; + + /** + * Encodes the specified CancelSchemaMigrationRequest message. Does not implicitly {@link vtctldata.CancelSchemaMigrationRequest.verify|verify} messages. + * @function encode + * @memberof vtctldata.CancelSchemaMigrationRequest + * @static + * @param {vtctldata.ICancelSchemaMigrationRequest} message CancelSchemaMigrationRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + CancelSchemaMigrationRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.uuid != null && Object.hasOwnProperty.call(message, "uuid")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.uuid); + return writer; + }; + + /** + * Encodes the specified CancelSchemaMigrationRequest message, length delimited. Does not implicitly {@link vtctldata.CancelSchemaMigrationRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.CancelSchemaMigrationRequest + * @static + * @param {vtctldata.ICancelSchemaMigrationRequest} message CancelSchemaMigrationRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + CancelSchemaMigrationRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a CancelSchemaMigrationRequest message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.CancelSchemaMigrationRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.CancelSchemaMigrationRequest} CancelSchemaMigrationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + CancelSchemaMigrationRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.CancelSchemaMigrationRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.keyspace = reader.string(); + break; + } + case 2: { + message.uuid = reader.string(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a CancelSchemaMigrationRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.CancelSchemaMigrationRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.CancelSchemaMigrationRequest} CancelSchemaMigrationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + CancelSchemaMigrationRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a CancelSchemaMigrationRequest message. + * @function verify + * @memberof vtctldata.CancelSchemaMigrationRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + CancelSchemaMigrationRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.uuid != null && message.hasOwnProperty("uuid")) + if (!$util.isString(message.uuid)) + return "uuid: string expected"; + return null; + }; + + /** + * Creates a CancelSchemaMigrationRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.CancelSchemaMigrationRequest + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.CancelSchemaMigrationRequest} CancelSchemaMigrationRequest + */ + CancelSchemaMigrationRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.CancelSchemaMigrationRequest) + return object; + let message = new $root.vtctldata.CancelSchemaMigrationRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.uuid != null) + message.uuid = String(object.uuid); + return message; + }; + + /** + * Creates a plain object from a CancelSchemaMigrationRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.CancelSchemaMigrationRequest + * @static + * @param {vtctldata.CancelSchemaMigrationRequest} message CancelSchemaMigrationRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + CancelSchemaMigrationRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.keyspace = ""; + object.uuid = ""; + } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.uuid != null && message.hasOwnProperty("uuid")) + object.uuid = message.uuid; + return object; + }; + + /** + * Converts this CancelSchemaMigrationRequest to JSON. + * @function toJSON + * @memberof vtctldata.CancelSchemaMigrationRequest + * @instance + * @returns {Object.} JSON object + */ + CancelSchemaMigrationRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for CancelSchemaMigrationRequest + * @function getTypeUrl + * @memberof vtctldata.CancelSchemaMigrationRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + CancelSchemaMigrationRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.CancelSchemaMigrationRequest"; + }; + + return CancelSchemaMigrationRequest; + })(); + + vtctldata.CancelSchemaMigrationResponse = (function() { + + /** + * Properties of a CancelSchemaMigrationResponse. + * @memberof vtctldata + * @interface ICancelSchemaMigrationResponse + * @property {Object.|null} [rows_affected_by_shard] CancelSchemaMigrationResponse rows_affected_by_shard + */ + + /** + * Constructs a new CancelSchemaMigrationResponse. + * @memberof vtctldata + * @classdesc Represents a CancelSchemaMigrationResponse. + * @implements ICancelSchemaMigrationResponse + * @constructor + * @param {vtctldata.ICancelSchemaMigrationResponse=} [properties] Properties to set + */ + function CancelSchemaMigrationResponse(properties) { + this.rows_affected_by_shard = {}; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * CancelSchemaMigrationResponse rows_affected_by_shard. + * @member {Object.} rows_affected_by_shard + * @memberof vtctldata.CancelSchemaMigrationResponse + * @instance + */ + CancelSchemaMigrationResponse.prototype.rows_affected_by_shard = $util.emptyObject; + + /** + * Creates a new CancelSchemaMigrationResponse instance using the specified properties. + * @function create + * @memberof vtctldata.CancelSchemaMigrationResponse + * @static + * @param {vtctldata.ICancelSchemaMigrationResponse=} [properties] Properties to set + * @returns {vtctldata.CancelSchemaMigrationResponse} CancelSchemaMigrationResponse instance + */ + CancelSchemaMigrationResponse.create = function create(properties) { + return new CancelSchemaMigrationResponse(properties); + }; + + /** + * Encodes the specified CancelSchemaMigrationResponse message. Does not implicitly {@link vtctldata.CancelSchemaMigrationResponse.verify|verify} messages. + * @function encode + * @memberof vtctldata.CancelSchemaMigrationResponse + * @static + * @param {vtctldata.ICancelSchemaMigrationResponse} message CancelSchemaMigrationResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + CancelSchemaMigrationResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.rows_affected_by_shard != null && Object.hasOwnProperty.call(message, "rows_affected_by_shard")) + for (let keys = Object.keys(message.rows_affected_by_shard), i = 0; i < keys.length; ++i) + writer.uint32(/* id 1, wireType 2 =*/10).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 0 =*/16).uint64(message.rows_affected_by_shard[keys[i]]).ldelim(); + return writer; + }; + + /** + * Encodes the specified CancelSchemaMigrationResponse message, length delimited. Does not implicitly {@link vtctldata.CancelSchemaMigrationResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.CancelSchemaMigrationResponse + * @static + * @param {vtctldata.ICancelSchemaMigrationResponse} message CancelSchemaMigrationResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + CancelSchemaMigrationResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a CancelSchemaMigrationResponse message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.CancelSchemaMigrationResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.CancelSchemaMigrationResponse} CancelSchemaMigrationResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + CancelSchemaMigrationResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.CancelSchemaMigrationResponse(), key, value; + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (message.rows_affected_by_shard === $util.emptyObject) + message.rows_affected_by_shard = {}; + let end2 = reader.uint32() + reader.pos; + key = ""; + value = 0; + while (reader.pos < end2) { + let tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = reader.uint64(); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.rows_affected_by_shard[key] = value; + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a CancelSchemaMigrationResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.CancelSchemaMigrationResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.CancelSchemaMigrationResponse} CancelSchemaMigrationResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + CancelSchemaMigrationResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a CancelSchemaMigrationResponse message. + * @function verify + * @memberof vtctldata.CancelSchemaMigrationResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + CancelSchemaMigrationResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.rows_affected_by_shard != null && message.hasOwnProperty("rows_affected_by_shard")) { + if (!$util.isObject(message.rows_affected_by_shard)) + return "rows_affected_by_shard: object expected"; + let key = Object.keys(message.rows_affected_by_shard); + for (let i = 0; i < key.length; ++i) + if (!$util.isInteger(message.rows_affected_by_shard[key[i]]) && !(message.rows_affected_by_shard[key[i]] && $util.isInteger(message.rows_affected_by_shard[key[i]].low) && $util.isInteger(message.rows_affected_by_shard[key[i]].high))) + return "rows_affected_by_shard: integer|Long{k:string} expected"; + } + return null; + }; + + /** + * Creates a CancelSchemaMigrationResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.CancelSchemaMigrationResponse + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.CancelSchemaMigrationResponse} CancelSchemaMigrationResponse + */ + CancelSchemaMigrationResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.CancelSchemaMigrationResponse) + return object; + let message = new $root.vtctldata.CancelSchemaMigrationResponse(); + if (object.rows_affected_by_shard) { + if (typeof object.rows_affected_by_shard !== "object") + throw TypeError(".vtctldata.CancelSchemaMigrationResponse.rows_affected_by_shard: object expected"); + message.rows_affected_by_shard = {}; + for (let keys = Object.keys(object.rows_affected_by_shard), i = 0; i < keys.length; ++i) + if ($util.Long) + (message.rows_affected_by_shard[keys[i]] = $util.Long.fromValue(object.rows_affected_by_shard[keys[i]])).unsigned = true; + else if (typeof object.rows_affected_by_shard[keys[i]] === "string") + message.rows_affected_by_shard[keys[i]] = parseInt(object.rows_affected_by_shard[keys[i]], 10); + else if (typeof object.rows_affected_by_shard[keys[i]] === "number") + message.rows_affected_by_shard[keys[i]] = object.rows_affected_by_shard[keys[i]]; + else if (typeof object.rows_affected_by_shard[keys[i]] === "object") + message.rows_affected_by_shard[keys[i]] = new $util.LongBits(object.rows_affected_by_shard[keys[i]].low >>> 0, object.rows_affected_by_shard[keys[i]].high >>> 0).toNumber(true); + } + return message; + }; + + /** + * Creates a plain object from a CancelSchemaMigrationResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.CancelSchemaMigrationResponse + * @static + * @param {vtctldata.CancelSchemaMigrationResponse} message CancelSchemaMigrationResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + CancelSchemaMigrationResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.objects || options.defaults) + object.rows_affected_by_shard = {}; + let keys2; + if (message.rows_affected_by_shard && (keys2 = Object.keys(message.rows_affected_by_shard)).length) { + object.rows_affected_by_shard = {}; + for (let j = 0; j < keys2.length; ++j) + if (typeof message.rows_affected_by_shard[keys2[j]] === "number") + object.rows_affected_by_shard[keys2[j]] = options.longs === String ? String(message.rows_affected_by_shard[keys2[j]]) : message.rows_affected_by_shard[keys2[j]]; + else + object.rows_affected_by_shard[keys2[j]] = options.longs === String ? $util.Long.prototype.toString.call(message.rows_affected_by_shard[keys2[j]]) : options.longs === Number ? new $util.LongBits(message.rows_affected_by_shard[keys2[j]].low >>> 0, message.rows_affected_by_shard[keys2[j]].high >>> 0).toNumber(true) : message.rows_affected_by_shard[keys2[j]]; + } + return object; + }; + + /** + * Converts this CancelSchemaMigrationResponse to JSON. + * @function toJSON + * @memberof vtctldata.CancelSchemaMigrationResponse + * @instance + * @returns {Object.} JSON object + */ + CancelSchemaMigrationResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for CancelSchemaMigrationResponse + * @function getTypeUrl + * @memberof vtctldata.CancelSchemaMigrationResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + CancelSchemaMigrationResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.CancelSchemaMigrationResponse"; + }; + + return CancelSchemaMigrationResponse; + })(); + + vtctldata.ChangeTabletTypeRequest = (function() { + + /** + * Properties of a ChangeTabletTypeRequest. + * @memberof vtctldata + * @interface IChangeTabletTypeRequest + * @property {topodata.ITabletAlias|null} [tablet_alias] ChangeTabletTypeRequest tablet_alias + * @property {topodata.TabletType|null} [db_type] ChangeTabletTypeRequest db_type + * @property {boolean|null} [dry_run] ChangeTabletTypeRequest dry_run + */ + + /** + * Constructs a new ChangeTabletTypeRequest. + * @memberof vtctldata + * @classdesc Represents a ChangeTabletTypeRequest. + * @implements IChangeTabletTypeRequest + * @constructor + * @param {vtctldata.IChangeTabletTypeRequest=} [properties] Properties to set + */ + function ChangeTabletTypeRequest(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ChangeTabletTypeRequest tablet_alias. + * @member {topodata.ITabletAlias|null|undefined} tablet_alias + * @memberof vtctldata.ChangeTabletTypeRequest + * @instance + */ + ChangeTabletTypeRequest.prototype.tablet_alias = null; + + /** + * ChangeTabletTypeRequest db_type. + * @member {topodata.TabletType} db_type + * @memberof vtctldata.ChangeTabletTypeRequest + * @instance + */ + ChangeTabletTypeRequest.prototype.db_type = 0; + + /** + * ChangeTabletTypeRequest dry_run. + * @member {boolean} dry_run + * @memberof vtctldata.ChangeTabletTypeRequest + * @instance + */ + ChangeTabletTypeRequest.prototype.dry_run = false; + + /** + * Creates a new ChangeTabletTypeRequest instance using the specified properties. + * @function create + * @memberof vtctldata.ChangeTabletTypeRequest + * @static + * @param {vtctldata.IChangeTabletTypeRequest=} [properties] Properties to set + * @returns {vtctldata.ChangeTabletTypeRequest} ChangeTabletTypeRequest instance + */ + ChangeTabletTypeRequest.create = function create(properties) { + return new ChangeTabletTypeRequest(properties); + }; + + /** + * Encodes the specified ChangeTabletTypeRequest message. Does not implicitly {@link vtctldata.ChangeTabletTypeRequest.verify|verify} messages. + * @function encode + * @memberof vtctldata.ChangeTabletTypeRequest + * @static + * @param {vtctldata.IChangeTabletTypeRequest} message ChangeTabletTypeRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ChangeTabletTypeRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) + $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.db_type != null && Object.hasOwnProperty.call(message, "db_type")) + writer.uint32(/* id 2, wireType 0 =*/16).int32(message.db_type); + if (message.dry_run != null && Object.hasOwnProperty.call(message, "dry_run")) + writer.uint32(/* id 3, wireType 0 =*/24).bool(message.dry_run); + return writer; + }; + + /** + * Encodes the specified ChangeTabletTypeRequest message, length delimited. Does not implicitly {@link vtctldata.ChangeTabletTypeRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.ChangeTabletTypeRequest + * @static + * @param {vtctldata.IChangeTabletTypeRequest} message ChangeTabletTypeRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ChangeTabletTypeRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a ChangeTabletTypeRequest message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.ChangeTabletTypeRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.ChangeTabletTypeRequest} ChangeTabletTypeRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ChangeTabletTypeRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ChangeTabletTypeRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + break; + } + case 2: { + message.db_type = reader.int32(); + break; + } + case 3: { + message.dry_run = reader.bool(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a ChangeTabletTypeRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.ChangeTabletTypeRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.ChangeTabletTypeRequest} ChangeTabletTypeRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ChangeTabletTypeRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a ChangeTabletTypeRequest message. + * @function verify + * @memberof vtctldata.ChangeTabletTypeRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ChangeTabletTypeRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { + let error = $root.topodata.TabletAlias.verify(message.tablet_alias); + if (error) + return "tablet_alias." + error; + } + if (message.db_type != null && message.hasOwnProperty("db_type")) + switch (message.db_type) { + default: + return "db_type: enum value expected"; + case 0: + case 1: + case 1: + case 2: + case 3: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + break; + } + if (message.dry_run != null && message.hasOwnProperty("dry_run")) + if (typeof message.dry_run !== "boolean") + return "dry_run: boolean expected"; + return null; + }; + + /** + * Creates a ChangeTabletTypeRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.ChangeTabletTypeRequest + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.ChangeTabletTypeRequest} ChangeTabletTypeRequest + */ + ChangeTabletTypeRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ChangeTabletTypeRequest) + return object; + let message = new $root.vtctldata.ChangeTabletTypeRequest(); + if (object.tablet_alias != null) { + if (typeof object.tablet_alias !== "object") + throw TypeError(".vtctldata.ChangeTabletTypeRequest.tablet_alias: object expected"); + message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); + } + switch (object.db_type) { + default: + if (typeof object.db_type === "number") { + message.db_type = object.db_type; + break; + } + break; + case "UNKNOWN": + case 0: + message.db_type = 0; + break; + case "PRIMARY": + case 1: + message.db_type = 1; + break; + case "MASTER": + case 1: + message.db_type = 1; + break; + case "REPLICA": + case 2: + message.db_type = 2; + break; + case "RDONLY": + case 3: + message.db_type = 3; + break; + case "BATCH": + case 3: + message.db_type = 3; + break; + case "SPARE": + case 4: + message.db_type = 4; + break; + case "EXPERIMENTAL": + case 5: + message.db_type = 5; + break; + case "BACKUP": + case 6: + message.db_type = 6; + break; + case "RESTORE": + case 7: + message.db_type = 7; + break; + case "DRAINED": + case 8: + message.db_type = 8; + break; + } + if (object.dry_run != null) + message.dry_run = Boolean(object.dry_run); + return message; + }; + + /** + * Creates a plain object from a ChangeTabletTypeRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.ChangeTabletTypeRequest + * @static + * @param {vtctldata.ChangeTabletTypeRequest} message ChangeTabletTypeRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ChangeTabletTypeRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.tablet_alias = null; + object.db_type = options.enums === String ? "UNKNOWN" : 0; + object.dry_run = false; + } + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) + object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); + if (message.db_type != null && message.hasOwnProperty("db_type")) + object.db_type = options.enums === String ? $root.topodata.TabletType[message.db_type] === undefined ? message.db_type : $root.topodata.TabletType[message.db_type] : message.db_type; + if (message.dry_run != null && message.hasOwnProperty("dry_run")) + object.dry_run = message.dry_run; + return object; + }; + + /** + * Converts this ChangeTabletTypeRequest to JSON. + * @function toJSON + * @memberof vtctldata.ChangeTabletTypeRequest + * @instance + * @returns {Object.} JSON object + */ + ChangeTabletTypeRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ChangeTabletTypeRequest + * @function getTypeUrl + * @memberof vtctldata.ChangeTabletTypeRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ChangeTabletTypeRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.ChangeTabletTypeRequest"; + }; + + return ChangeTabletTypeRequest; + })(); + + vtctldata.ChangeTabletTypeResponse = (function() { + + /** + * Properties of a ChangeTabletTypeResponse. + * @memberof vtctldata + * @interface IChangeTabletTypeResponse + * @property {topodata.ITablet|null} [before_tablet] ChangeTabletTypeResponse before_tablet + * @property {topodata.ITablet|null} [after_tablet] ChangeTabletTypeResponse after_tablet + * @property {boolean|null} [was_dry_run] ChangeTabletTypeResponse was_dry_run + */ + + /** + * Constructs a new ChangeTabletTypeResponse. + * @memberof vtctldata + * @classdesc Represents a ChangeTabletTypeResponse. + * @implements IChangeTabletTypeResponse + * @constructor + * @param {vtctldata.IChangeTabletTypeResponse=} [properties] Properties to set + */ + function ChangeTabletTypeResponse(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ChangeTabletTypeResponse before_tablet. + * @member {topodata.ITablet|null|undefined} before_tablet + * @memberof vtctldata.ChangeTabletTypeResponse + * @instance + */ + ChangeTabletTypeResponse.prototype.before_tablet = null; + + /** + * ChangeTabletTypeResponse after_tablet. + * @member {topodata.ITablet|null|undefined} after_tablet + * @memberof vtctldata.ChangeTabletTypeResponse + * @instance + */ + ChangeTabletTypeResponse.prototype.after_tablet = null; + + /** + * ChangeTabletTypeResponse was_dry_run. + * @member {boolean} was_dry_run + * @memberof vtctldata.ChangeTabletTypeResponse + * @instance + */ + ChangeTabletTypeResponse.prototype.was_dry_run = false; + + /** + * Creates a new ChangeTabletTypeResponse instance using the specified properties. + * @function create + * @memberof vtctldata.ChangeTabletTypeResponse + * @static + * @param {vtctldata.IChangeTabletTypeResponse=} [properties] Properties to set + * @returns {vtctldata.ChangeTabletTypeResponse} ChangeTabletTypeResponse instance + */ + ChangeTabletTypeResponse.create = function create(properties) { + return new ChangeTabletTypeResponse(properties); + }; + + /** + * Encodes the specified ChangeTabletTypeResponse message. Does not implicitly {@link vtctldata.ChangeTabletTypeResponse.verify|verify} messages. + * @function encode + * @memberof vtctldata.ChangeTabletTypeResponse + * @static + * @param {vtctldata.IChangeTabletTypeResponse} message ChangeTabletTypeResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ChangeTabletTypeResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.before_tablet != null && Object.hasOwnProperty.call(message, "before_tablet")) + $root.topodata.Tablet.encode(message.before_tablet, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.after_tablet != null && Object.hasOwnProperty.call(message, "after_tablet")) + $root.topodata.Tablet.encode(message.after_tablet, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.was_dry_run != null && Object.hasOwnProperty.call(message, "was_dry_run")) + writer.uint32(/* id 3, wireType 0 =*/24).bool(message.was_dry_run); + return writer; + }; + + /** + * Encodes the specified ChangeTabletTypeResponse message, length delimited. Does not implicitly {@link vtctldata.ChangeTabletTypeResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.ChangeTabletTypeResponse + * @static + * @param {vtctldata.IChangeTabletTypeResponse} message ChangeTabletTypeResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ChangeTabletTypeResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a ChangeTabletTypeResponse message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.ChangeTabletTypeResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.ChangeTabletTypeResponse} ChangeTabletTypeResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ChangeTabletTypeResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ChangeTabletTypeResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.before_tablet = $root.topodata.Tablet.decode(reader, reader.uint32()); + break; + } + case 2: { + message.after_tablet = $root.topodata.Tablet.decode(reader, reader.uint32()); + break; + } + case 3: { + message.was_dry_run = reader.bool(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a ChangeTabletTypeResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.ChangeTabletTypeResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.ChangeTabletTypeResponse} ChangeTabletTypeResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ChangeTabletTypeResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a ChangeTabletTypeResponse message. + * @function verify + * @memberof vtctldata.ChangeTabletTypeResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ChangeTabletTypeResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.before_tablet != null && message.hasOwnProperty("before_tablet")) { + let error = $root.topodata.Tablet.verify(message.before_tablet); + if (error) + return "before_tablet." + error; + } + if (message.after_tablet != null && message.hasOwnProperty("after_tablet")) { + let error = $root.topodata.Tablet.verify(message.after_tablet); + if (error) + return "after_tablet." + error; + } + if (message.was_dry_run != null && message.hasOwnProperty("was_dry_run")) + if (typeof message.was_dry_run !== "boolean") + return "was_dry_run: boolean expected"; + return null; + }; + + /** + * Creates a ChangeTabletTypeResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.ChangeTabletTypeResponse + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.ChangeTabletTypeResponse} ChangeTabletTypeResponse + */ + ChangeTabletTypeResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ChangeTabletTypeResponse) + return object; + let message = new $root.vtctldata.ChangeTabletTypeResponse(); + if (object.before_tablet != null) { + if (typeof object.before_tablet !== "object") + throw TypeError(".vtctldata.ChangeTabletTypeResponse.before_tablet: object expected"); + message.before_tablet = $root.topodata.Tablet.fromObject(object.before_tablet); + } + if (object.after_tablet != null) { + if (typeof object.after_tablet !== "object") + throw TypeError(".vtctldata.ChangeTabletTypeResponse.after_tablet: object expected"); + message.after_tablet = $root.topodata.Tablet.fromObject(object.after_tablet); + } + if (object.was_dry_run != null) + message.was_dry_run = Boolean(object.was_dry_run); + return message; + }; + + /** + * Creates a plain object from a ChangeTabletTypeResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.ChangeTabletTypeResponse + * @static + * @param {vtctldata.ChangeTabletTypeResponse} message ChangeTabletTypeResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ChangeTabletTypeResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.before_tablet = null; + object.after_tablet = null; + object.was_dry_run = false; + } + if (message.before_tablet != null && message.hasOwnProperty("before_tablet")) + object.before_tablet = $root.topodata.Tablet.toObject(message.before_tablet, options); + if (message.after_tablet != null && message.hasOwnProperty("after_tablet")) + object.after_tablet = $root.topodata.Tablet.toObject(message.after_tablet, options); + if (message.was_dry_run != null && message.hasOwnProperty("was_dry_run")) + object.was_dry_run = message.was_dry_run; + return object; + }; + + /** + * Converts this ChangeTabletTypeResponse to JSON. + * @function toJSON + * @memberof vtctldata.ChangeTabletTypeResponse + * @instance + * @returns {Object.} JSON object + */ + ChangeTabletTypeResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ChangeTabletTypeResponse + * @function getTypeUrl + * @memberof vtctldata.ChangeTabletTypeResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ChangeTabletTypeResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.ChangeTabletTypeResponse"; + }; + + return ChangeTabletTypeResponse; + })(); + + vtctldata.CleanupSchemaMigrationRequest = (function() { + + /** + * Properties of a CleanupSchemaMigrationRequest. + * @memberof vtctldata + * @interface ICleanupSchemaMigrationRequest + * @property {string|null} [keyspace] CleanupSchemaMigrationRequest keyspace + * @property {string|null} [uuid] CleanupSchemaMigrationRequest uuid + */ + + /** + * Constructs a new CleanupSchemaMigrationRequest. + * @memberof vtctldata + * @classdesc Represents a CleanupSchemaMigrationRequest. + * @implements ICleanupSchemaMigrationRequest + * @constructor + * @param {vtctldata.ICleanupSchemaMigrationRequest=} [properties] Properties to set + */ + function CleanupSchemaMigrationRequest(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * CleanupSchemaMigrationRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.CleanupSchemaMigrationRequest + * @instance + */ + CleanupSchemaMigrationRequest.prototype.keyspace = ""; + + /** + * CleanupSchemaMigrationRequest uuid. + * @member {string} uuid + * @memberof vtctldata.CleanupSchemaMigrationRequest + * @instance + */ + CleanupSchemaMigrationRequest.prototype.uuid = ""; + + /** + * Creates a new CleanupSchemaMigrationRequest instance using the specified properties. + * @function create + * @memberof vtctldata.CleanupSchemaMigrationRequest + * @static + * @param {vtctldata.ICleanupSchemaMigrationRequest=} [properties] Properties to set + * @returns {vtctldata.CleanupSchemaMigrationRequest} CleanupSchemaMigrationRequest instance + */ + CleanupSchemaMigrationRequest.create = function create(properties) { + return new CleanupSchemaMigrationRequest(properties); + }; + + /** + * Encodes the specified CleanupSchemaMigrationRequest message. Does not implicitly {@link vtctldata.CleanupSchemaMigrationRequest.verify|verify} messages. + * @function encode + * @memberof vtctldata.CleanupSchemaMigrationRequest + * @static + * @param {vtctldata.ICleanupSchemaMigrationRequest} message CleanupSchemaMigrationRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + CleanupSchemaMigrationRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.uuid != null && Object.hasOwnProperty.call(message, "uuid")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.uuid); + return writer; + }; + + /** + * Encodes the specified CleanupSchemaMigrationRequest message, length delimited. Does not implicitly {@link vtctldata.CleanupSchemaMigrationRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.CleanupSchemaMigrationRequest + * @static + * @param {vtctldata.ICleanupSchemaMigrationRequest} message CleanupSchemaMigrationRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + CleanupSchemaMigrationRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a CleanupSchemaMigrationRequest message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.CleanupSchemaMigrationRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.CleanupSchemaMigrationRequest} CleanupSchemaMigrationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + CleanupSchemaMigrationRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.CleanupSchemaMigrationRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.keyspace = reader.string(); + break; + } + case 2: { + message.uuid = reader.string(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a CleanupSchemaMigrationRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.CleanupSchemaMigrationRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.CleanupSchemaMigrationRequest} CleanupSchemaMigrationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + CleanupSchemaMigrationRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a CleanupSchemaMigrationRequest message. + * @function verify + * @memberof vtctldata.CleanupSchemaMigrationRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + CleanupSchemaMigrationRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.uuid != null && message.hasOwnProperty("uuid")) + if (!$util.isString(message.uuid)) + return "uuid: string expected"; + return null; + }; + + /** + * Creates a CleanupSchemaMigrationRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.CleanupSchemaMigrationRequest + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.CleanupSchemaMigrationRequest} CleanupSchemaMigrationRequest + */ + CleanupSchemaMigrationRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.CleanupSchemaMigrationRequest) + return object; + let message = new $root.vtctldata.CleanupSchemaMigrationRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.uuid != null) + message.uuid = String(object.uuid); + return message; + }; + + /** + * Creates a plain object from a CleanupSchemaMigrationRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.CleanupSchemaMigrationRequest + * @static + * @param {vtctldata.CleanupSchemaMigrationRequest} message CleanupSchemaMigrationRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + CleanupSchemaMigrationRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.keyspace = ""; + object.uuid = ""; + } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.uuid != null && message.hasOwnProperty("uuid")) + object.uuid = message.uuid; + return object; + }; + + /** + * Converts this CleanupSchemaMigrationRequest to JSON. + * @function toJSON + * @memberof vtctldata.CleanupSchemaMigrationRequest + * @instance + * @returns {Object.} JSON object + */ + CleanupSchemaMigrationRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for CleanupSchemaMigrationRequest + * @function getTypeUrl + * @memberof vtctldata.CleanupSchemaMigrationRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + CleanupSchemaMigrationRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.CleanupSchemaMigrationRequest"; + }; + + return CleanupSchemaMigrationRequest; + })(); + + vtctldata.CleanupSchemaMigrationResponse = (function() { + + /** + * Properties of a CleanupSchemaMigrationResponse. + * @memberof vtctldata + * @interface ICleanupSchemaMigrationResponse + * @property {Object.|null} [rows_affected_by_shard] CleanupSchemaMigrationResponse rows_affected_by_shard + */ + + /** + * Constructs a new CleanupSchemaMigrationResponse. + * @memberof vtctldata + * @classdesc Represents a CleanupSchemaMigrationResponse. + * @implements ICleanupSchemaMigrationResponse + * @constructor + * @param {vtctldata.ICleanupSchemaMigrationResponse=} [properties] Properties to set + */ + function CleanupSchemaMigrationResponse(properties) { + this.rows_affected_by_shard = {}; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * CleanupSchemaMigrationResponse rows_affected_by_shard. + * @member {Object.} rows_affected_by_shard + * @memberof vtctldata.CleanupSchemaMigrationResponse + * @instance + */ + CleanupSchemaMigrationResponse.prototype.rows_affected_by_shard = $util.emptyObject; + + /** + * Creates a new CleanupSchemaMigrationResponse instance using the specified properties. + * @function create + * @memberof vtctldata.CleanupSchemaMigrationResponse + * @static + * @param {vtctldata.ICleanupSchemaMigrationResponse=} [properties] Properties to set + * @returns {vtctldata.CleanupSchemaMigrationResponse} CleanupSchemaMigrationResponse instance + */ + CleanupSchemaMigrationResponse.create = function create(properties) { + return new CleanupSchemaMigrationResponse(properties); + }; + + /** + * Encodes the specified CleanupSchemaMigrationResponse message. Does not implicitly {@link vtctldata.CleanupSchemaMigrationResponse.verify|verify} messages. + * @function encode + * @memberof vtctldata.CleanupSchemaMigrationResponse + * @static + * @param {vtctldata.ICleanupSchemaMigrationResponse} message CleanupSchemaMigrationResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + CleanupSchemaMigrationResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.rows_affected_by_shard != null && Object.hasOwnProperty.call(message, "rows_affected_by_shard")) + for (let keys = Object.keys(message.rows_affected_by_shard), i = 0; i < keys.length; ++i) + writer.uint32(/* id 1, wireType 2 =*/10).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 0 =*/16).uint64(message.rows_affected_by_shard[keys[i]]).ldelim(); + return writer; + }; + + /** + * Encodes the specified CleanupSchemaMigrationResponse message, length delimited. Does not implicitly {@link vtctldata.CleanupSchemaMigrationResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.CleanupSchemaMigrationResponse + * @static + * @param {vtctldata.ICleanupSchemaMigrationResponse} message CleanupSchemaMigrationResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + CleanupSchemaMigrationResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a CleanupSchemaMigrationResponse message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.CleanupSchemaMigrationResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.CleanupSchemaMigrationResponse} CleanupSchemaMigrationResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + CleanupSchemaMigrationResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.CleanupSchemaMigrationResponse(), key, value; + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (message.rows_affected_by_shard === $util.emptyObject) + message.rows_affected_by_shard = {}; + let end2 = reader.uint32() + reader.pos; + key = ""; + value = 0; + while (reader.pos < end2) { + let tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = reader.uint64(); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.rows_affected_by_shard[key] = value; + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a CleanupSchemaMigrationResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.CleanupSchemaMigrationResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.CleanupSchemaMigrationResponse} CleanupSchemaMigrationResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + CleanupSchemaMigrationResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a CleanupSchemaMigrationResponse message. + * @function verify + * @memberof vtctldata.CleanupSchemaMigrationResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + CleanupSchemaMigrationResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.rows_affected_by_shard != null && message.hasOwnProperty("rows_affected_by_shard")) { + if (!$util.isObject(message.rows_affected_by_shard)) + return "rows_affected_by_shard: object expected"; + let key = Object.keys(message.rows_affected_by_shard); + for (let i = 0; i < key.length; ++i) + if (!$util.isInteger(message.rows_affected_by_shard[key[i]]) && !(message.rows_affected_by_shard[key[i]] && $util.isInteger(message.rows_affected_by_shard[key[i]].low) && $util.isInteger(message.rows_affected_by_shard[key[i]].high))) + return "rows_affected_by_shard: integer|Long{k:string} expected"; + } + return null; + }; + + /** + * Creates a CleanupSchemaMigrationResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.CleanupSchemaMigrationResponse + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.CleanupSchemaMigrationResponse} CleanupSchemaMigrationResponse + */ + CleanupSchemaMigrationResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.CleanupSchemaMigrationResponse) + return object; + let message = new $root.vtctldata.CleanupSchemaMigrationResponse(); + if (object.rows_affected_by_shard) { + if (typeof object.rows_affected_by_shard !== "object") + throw TypeError(".vtctldata.CleanupSchemaMigrationResponse.rows_affected_by_shard: object expected"); + message.rows_affected_by_shard = {}; + for (let keys = Object.keys(object.rows_affected_by_shard), i = 0; i < keys.length; ++i) + if ($util.Long) + (message.rows_affected_by_shard[keys[i]] = $util.Long.fromValue(object.rows_affected_by_shard[keys[i]])).unsigned = true; + else if (typeof object.rows_affected_by_shard[keys[i]] === "string") + message.rows_affected_by_shard[keys[i]] = parseInt(object.rows_affected_by_shard[keys[i]], 10); + else if (typeof object.rows_affected_by_shard[keys[i]] === "number") + message.rows_affected_by_shard[keys[i]] = object.rows_affected_by_shard[keys[i]]; + else if (typeof object.rows_affected_by_shard[keys[i]] === "object") + message.rows_affected_by_shard[keys[i]] = new $util.LongBits(object.rows_affected_by_shard[keys[i]].low >>> 0, object.rows_affected_by_shard[keys[i]].high >>> 0).toNumber(true); + } + return message; + }; + + /** + * Creates a plain object from a CleanupSchemaMigrationResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.CleanupSchemaMigrationResponse + * @static + * @param {vtctldata.CleanupSchemaMigrationResponse} message CleanupSchemaMigrationResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + CleanupSchemaMigrationResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.objects || options.defaults) + object.rows_affected_by_shard = {}; + let keys2; + if (message.rows_affected_by_shard && (keys2 = Object.keys(message.rows_affected_by_shard)).length) { + object.rows_affected_by_shard = {}; + for (let j = 0; j < keys2.length; ++j) + if (typeof message.rows_affected_by_shard[keys2[j]] === "number") + object.rows_affected_by_shard[keys2[j]] = options.longs === String ? String(message.rows_affected_by_shard[keys2[j]]) : message.rows_affected_by_shard[keys2[j]]; + else + object.rows_affected_by_shard[keys2[j]] = options.longs === String ? $util.Long.prototype.toString.call(message.rows_affected_by_shard[keys2[j]]) : options.longs === Number ? new $util.LongBits(message.rows_affected_by_shard[keys2[j]].low >>> 0, message.rows_affected_by_shard[keys2[j]].high >>> 0).toNumber(true) : message.rows_affected_by_shard[keys2[j]]; + } + return object; + }; + + /** + * Converts this CleanupSchemaMigrationResponse to JSON. + * @function toJSON + * @memberof vtctldata.CleanupSchemaMigrationResponse + * @instance + * @returns {Object.} JSON object + */ + CleanupSchemaMigrationResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for CleanupSchemaMigrationResponse + * @function getTypeUrl + * @memberof vtctldata.CleanupSchemaMigrationResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + CleanupSchemaMigrationResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.CleanupSchemaMigrationResponse"; + }; + + return CleanupSchemaMigrationResponse; + })(); + + vtctldata.CompleteSchemaMigrationRequest = (function() { + + /** + * Properties of a CompleteSchemaMigrationRequest. + * @memberof vtctldata + * @interface ICompleteSchemaMigrationRequest + * @property {string|null} [keyspace] CompleteSchemaMigrationRequest keyspace + * @property {string|null} [uuid] CompleteSchemaMigrationRequest uuid + */ + + /** + * Constructs a new CompleteSchemaMigrationRequest. + * @memberof vtctldata + * @classdesc Represents a CompleteSchemaMigrationRequest. + * @implements ICompleteSchemaMigrationRequest + * @constructor + * @param {vtctldata.ICompleteSchemaMigrationRequest=} [properties] Properties to set + */ + function CompleteSchemaMigrationRequest(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * CompleteSchemaMigrationRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.CompleteSchemaMigrationRequest + * @instance + */ + CompleteSchemaMigrationRequest.prototype.keyspace = ""; + + /** + * CompleteSchemaMigrationRequest uuid. + * @member {string} uuid + * @memberof vtctldata.CompleteSchemaMigrationRequest + * @instance + */ + CompleteSchemaMigrationRequest.prototype.uuid = ""; + + /** + * Creates a new CompleteSchemaMigrationRequest instance using the specified properties. + * @function create + * @memberof vtctldata.CompleteSchemaMigrationRequest + * @static + * @param {vtctldata.ICompleteSchemaMigrationRequest=} [properties] Properties to set + * @returns {vtctldata.CompleteSchemaMigrationRequest} CompleteSchemaMigrationRequest instance + */ + CompleteSchemaMigrationRequest.create = function create(properties) { + return new CompleteSchemaMigrationRequest(properties); + }; + + /** + * Encodes the specified CompleteSchemaMigrationRequest message. Does not implicitly {@link vtctldata.CompleteSchemaMigrationRequest.verify|verify} messages. + * @function encode + * @memberof vtctldata.CompleteSchemaMigrationRequest + * @static + * @param {vtctldata.ICompleteSchemaMigrationRequest} message CompleteSchemaMigrationRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + CompleteSchemaMigrationRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.uuid != null && Object.hasOwnProperty.call(message, "uuid")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.uuid); + return writer; + }; + + /** + * Encodes the specified CompleteSchemaMigrationRequest message, length delimited. Does not implicitly {@link vtctldata.CompleteSchemaMigrationRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.CompleteSchemaMigrationRequest + * @static + * @param {vtctldata.ICompleteSchemaMigrationRequest} message CompleteSchemaMigrationRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + CompleteSchemaMigrationRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a CompleteSchemaMigrationRequest message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.CompleteSchemaMigrationRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.CompleteSchemaMigrationRequest} CompleteSchemaMigrationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + CompleteSchemaMigrationRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.CompleteSchemaMigrationRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.keyspace = reader.string(); + break; + } + case 2: { + message.uuid = reader.string(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a CompleteSchemaMigrationRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.CompleteSchemaMigrationRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.CompleteSchemaMigrationRequest} CompleteSchemaMigrationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + CompleteSchemaMigrationRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a CompleteSchemaMigrationRequest message. + * @function verify + * @memberof vtctldata.CompleteSchemaMigrationRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + CompleteSchemaMigrationRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.uuid != null && message.hasOwnProperty("uuid")) + if (!$util.isString(message.uuid)) + return "uuid: string expected"; + return null; + }; + + /** + * Creates a CompleteSchemaMigrationRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.CompleteSchemaMigrationRequest + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.CompleteSchemaMigrationRequest} CompleteSchemaMigrationRequest + */ + CompleteSchemaMigrationRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.CompleteSchemaMigrationRequest) + return object; + let message = new $root.vtctldata.CompleteSchemaMigrationRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.uuid != null) + message.uuid = String(object.uuid); + return message; + }; + + /** + * Creates a plain object from a CompleteSchemaMigrationRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.CompleteSchemaMigrationRequest + * @static + * @param {vtctldata.CompleteSchemaMigrationRequest} message CompleteSchemaMigrationRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + CompleteSchemaMigrationRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.keyspace = ""; + object.uuid = ""; + } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.uuid != null && message.hasOwnProperty("uuid")) + object.uuid = message.uuid; + return object; + }; + + /** + * Converts this CompleteSchemaMigrationRequest to JSON. + * @function toJSON + * @memberof vtctldata.CompleteSchemaMigrationRequest + * @instance + * @returns {Object.} JSON object + */ + CompleteSchemaMigrationRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for CompleteSchemaMigrationRequest + * @function getTypeUrl + * @memberof vtctldata.CompleteSchemaMigrationRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + CompleteSchemaMigrationRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.CompleteSchemaMigrationRequest"; + }; + + return CompleteSchemaMigrationRequest; + })(); + + vtctldata.CompleteSchemaMigrationResponse = (function() { + + /** + * Properties of a CompleteSchemaMigrationResponse. + * @memberof vtctldata + * @interface ICompleteSchemaMigrationResponse + * @property {Object.|null} [rows_affected_by_shard] CompleteSchemaMigrationResponse rows_affected_by_shard + */ + + /** + * Constructs a new CompleteSchemaMigrationResponse. + * @memberof vtctldata + * @classdesc Represents a CompleteSchemaMigrationResponse. + * @implements ICompleteSchemaMigrationResponse + * @constructor + * @param {vtctldata.ICompleteSchemaMigrationResponse=} [properties] Properties to set + */ + function CompleteSchemaMigrationResponse(properties) { + this.rows_affected_by_shard = {}; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * CompleteSchemaMigrationResponse rows_affected_by_shard. + * @member {Object.} rows_affected_by_shard + * @memberof vtctldata.CompleteSchemaMigrationResponse + * @instance + */ + CompleteSchemaMigrationResponse.prototype.rows_affected_by_shard = $util.emptyObject; + + /** + * Creates a new CompleteSchemaMigrationResponse instance using the specified properties. + * @function create + * @memberof vtctldata.CompleteSchemaMigrationResponse + * @static + * @param {vtctldata.ICompleteSchemaMigrationResponse=} [properties] Properties to set + * @returns {vtctldata.CompleteSchemaMigrationResponse} CompleteSchemaMigrationResponse instance + */ + CompleteSchemaMigrationResponse.create = function create(properties) { + return new CompleteSchemaMigrationResponse(properties); + }; + + /** + * Encodes the specified CompleteSchemaMigrationResponse message. Does not implicitly {@link vtctldata.CompleteSchemaMigrationResponse.verify|verify} messages. + * @function encode + * @memberof vtctldata.CompleteSchemaMigrationResponse + * @static + * @param {vtctldata.ICompleteSchemaMigrationResponse} message CompleteSchemaMigrationResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + CompleteSchemaMigrationResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.rows_affected_by_shard != null && Object.hasOwnProperty.call(message, "rows_affected_by_shard")) + for (let keys = Object.keys(message.rows_affected_by_shard), i = 0; i < keys.length; ++i) + writer.uint32(/* id 1, wireType 2 =*/10).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 0 =*/16).uint64(message.rows_affected_by_shard[keys[i]]).ldelim(); + return writer; + }; + + /** + * Encodes the specified CompleteSchemaMigrationResponse message, length delimited. Does not implicitly {@link vtctldata.CompleteSchemaMigrationResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.CompleteSchemaMigrationResponse + * @static + * @param {vtctldata.ICompleteSchemaMigrationResponse} message CompleteSchemaMigrationResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + CompleteSchemaMigrationResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a CompleteSchemaMigrationResponse message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.CompleteSchemaMigrationResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.CompleteSchemaMigrationResponse} CompleteSchemaMigrationResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + CompleteSchemaMigrationResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.CompleteSchemaMigrationResponse(), key, value; + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (message.rows_affected_by_shard === $util.emptyObject) + message.rows_affected_by_shard = {}; + let end2 = reader.uint32() + reader.pos; + key = ""; + value = 0; + while (reader.pos < end2) { + let tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = reader.uint64(); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.rows_affected_by_shard[key] = value; + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a CompleteSchemaMigrationResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.CompleteSchemaMigrationResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.CompleteSchemaMigrationResponse} CompleteSchemaMigrationResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + CompleteSchemaMigrationResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a CompleteSchemaMigrationResponse message. + * @function verify + * @memberof vtctldata.CompleteSchemaMigrationResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + CompleteSchemaMigrationResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.rows_affected_by_shard != null && message.hasOwnProperty("rows_affected_by_shard")) { + if (!$util.isObject(message.rows_affected_by_shard)) + return "rows_affected_by_shard: object expected"; + let key = Object.keys(message.rows_affected_by_shard); + for (let i = 0; i < key.length; ++i) + if (!$util.isInteger(message.rows_affected_by_shard[key[i]]) && !(message.rows_affected_by_shard[key[i]] && $util.isInteger(message.rows_affected_by_shard[key[i]].low) && $util.isInteger(message.rows_affected_by_shard[key[i]].high))) + return "rows_affected_by_shard: integer|Long{k:string} expected"; + } + return null; + }; + + /** + * Creates a CompleteSchemaMigrationResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.CompleteSchemaMigrationResponse + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.CompleteSchemaMigrationResponse} CompleteSchemaMigrationResponse + */ + CompleteSchemaMigrationResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.CompleteSchemaMigrationResponse) + return object; + let message = new $root.vtctldata.CompleteSchemaMigrationResponse(); + if (object.rows_affected_by_shard) { + if (typeof object.rows_affected_by_shard !== "object") + throw TypeError(".vtctldata.CompleteSchemaMigrationResponse.rows_affected_by_shard: object expected"); + message.rows_affected_by_shard = {}; + for (let keys = Object.keys(object.rows_affected_by_shard), i = 0; i < keys.length; ++i) + if ($util.Long) + (message.rows_affected_by_shard[keys[i]] = $util.Long.fromValue(object.rows_affected_by_shard[keys[i]])).unsigned = true; + else if (typeof object.rows_affected_by_shard[keys[i]] === "string") + message.rows_affected_by_shard[keys[i]] = parseInt(object.rows_affected_by_shard[keys[i]], 10); + else if (typeof object.rows_affected_by_shard[keys[i]] === "number") + message.rows_affected_by_shard[keys[i]] = object.rows_affected_by_shard[keys[i]]; + else if (typeof object.rows_affected_by_shard[keys[i]] === "object") + message.rows_affected_by_shard[keys[i]] = new $util.LongBits(object.rows_affected_by_shard[keys[i]].low >>> 0, object.rows_affected_by_shard[keys[i]].high >>> 0).toNumber(true); + } + return message; + }; + + /** + * Creates a plain object from a CompleteSchemaMigrationResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.CompleteSchemaMigrationResponse + * @static + * @param {vtctldata.CompleteSchemaMigrationResponse} message CompleteSchemaMigrationResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + CompleteSchemaMigrationResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.objects || options.defaults) + object.rows_affected_by_shard = {}; + let keys2; + if (message.rows_affected_by_shard && (keys2 = Object.keys(message.rows_affected_by_shard)).length) { + object.rows_affected_by_shard = {}; + for (let j = 0; j < keys2.length; ++j) + if (typeof message.rows_affected_by_shard[keys2[j]] === "number") + object.rows_affected_by_shard[keys2[j]] = options.longs === String ? String(message.rows_affected_by_shard[keys2[j]]) : message.rows_affected_by_shard[keys2[j]]; + else + object.rows_affected_by_shard[keys2[j]] = options.longs === String ? $util.Long.prototype.toString.call(message.rows_affected_by_shard[keys2[j]]) : options.longs === Number ? new $util.LongBits(message.rows_affected_by_shard[keys2[j]].low >>> 0, message.rows_affected_by_shard[keys2[j]].high >>> 0).toNumber(true) : message.rows_affected_by_shard[keys2[j]]; + } + return object; + }; + + /** + * Converts this CompleteSchemaMigrationResponse to JSON. + * @function toJSON + * @memberof vtctldata.CompleteSchemaMigrationResponse + * @instance + * @returns {Object.} JSON object + */ + CompleteSchemaMigrationResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for CompleteSchemaMigrationResponse + * @function getTypeUrl + * @memberof vtctldata.CompleteSchemaMigrationResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + CompleteSchemaMigrationResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.CompleteSchemaMigrationResponse"; + }; + + return CompleteSchemaMigrationResponse; + })(); + + vtctldata.CreateKeyspaceRequest = (function() { + + /** + * Properties of a CreateKeyspaceRequest. + * @memberof vtctldata + * @interface ICreateKeyspaceRequest + * @property {string|null} [name] CreateKeyspaceRequest name + * @property {boolean|null} [force] CreateKeyspaceRequest force + * @property {boolean|null} [allow_empty_v_schema] CreateKeyspaceRequest allow_empty_v_schema + * @property {Array.|null} [served_froms] CreateKeyspaceRequest served_froms + * @property {topodata.KeyspaceType|null} [type] CreateKeyspaceRequest type + * @property {string|null} [base_keyspace] CreateKeyspaceRequest base_keyspace + * @property {vttime.ITime|null} [snapshot_time] CreateKeyspaceRequest snapshot_time + * @property {string|null} [durability_policy] CreateKeyspaceRequest durability_policy + * @property {string|null} [sidecar_db_name] CreateKeyspaceRequest sidecar_db_name + */ + + /** + * Constructs a new CreateKeyspaceRequest. + * @memberof vtctldata + * @classdesc Represents a CreateKeyspaceRequest. + * @implements ICreateKeyspaceRequest + * @constructor + * @param {vtctldata.ICreateKeyspaceRequest=} [properties] Properties to set + */ + function CreateKeyspaceRequest(properties) { + this.served_froms = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * CreateKeyspaceRequest name. + * @member {string} name + * @memberof vtctldata.CreateKeyspaceRequest + * @instance + */ + CreateKeyspaceRequest.prototype.name = ""; + + /** + * CreateKeyspaceRequest force. + * @member {boolean} force + * @memberof vtctldata.CreateKeyspaceRequest + * @instance + */ + CreateKeyspaceRequest.prototype.force = false; + + /** + * CreateKeyspaceRequest allow_empty_v_schema. + * @member {boolean} allow_empty_v_schema + * @memberof vtctldata.CreateKeyspaceRequest + * @instance + */ + CreateKeyspaceRequest.prototype.allow_empty_v_schema = false; + + /** + * CreateKeyspaceRequest served_froms. + * @member {Array.} served_froms + * @memberof vtctldata.CreateKeyspaceRequest + * @instance + */ + CreateKeyspaceRequest.prototype.served_froms = $util.emptyArray; + + /** + * CreateKeyspaceRequest type. + * @member {topodata.KeyspaceType} type + * @memberof vtctldata.CreateKeyspaceRequest + * @instance + */ + CreateKeyspaceRequest.prototype.type = 0; + + /** + * CreateKeyspaceRequest base_keyspace. + * @member {string} base_keyspace + * @memberof vtctldata.CreateKeyspaceRequest + * @instance + */ + CreateKeyspaceRequest.prototype.base_keyspace = ""; + + /** + * CreateKeyspaceRequest snapshot_time. + * @member {vttime.ITime|null|undefined} snapshot_time + * @memberof vtctldata.CreateKeyspaceRequest + * @instance + */ + CreateKeyspaceRequest.prototype.snapshot_time = null; + + /** + * CreateKeyspaceRequest durability_policy. + * @member {string} durability_policy + * @memberof vtctldata.CreateKeyspaceRequest + * @instance + */ + CreateKeyspaceRequest.prototype.durability_policy = ""; + + /** + * CreateKeyspaceRequest sidecar_db_name. + * @member {string} sidecar_db_name + * @memberof vtctldata.CreateKeyspaceRequest + * @instance + */ + CreateKeyspaceRequest.prototype.sidecar_db_name = ""; + + /** + * Creates a new CreateKeyspaceRequest instance using the specified properties. + * @function create + * @memberof vtctldata.CreateKeyspaceRequest + * @static + * @param {vtctldata.ICreateKeyspaceRequest=} [properties] Properties to set + * @returns {vtctldata.CreateKeyspaceRequest} CreateKeyspaceRequest instance + */ + CreateKeyspaceRequest.create = function create(properties) { + return new CreateKeyspaceRequest(properties); + }; + + /** + * Encodes the specified CreateKeyspaceRequest message. Does not implicitly {@link vtctldata.CreateKeyspaceRequest.verify|verify} messages. + * @function encode + * @memberof vtctldata.CreateKeyspaceRequest + * @static + * @param {vtctldata.ICreateKeyspaceRequest} message CreateKeyspaceRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + CreateKeyspaceRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.name != null && Object.hasOwnProperty.call(message, "name")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); + if (message.force != null && Object.hasOwnProperty.call(message, "force")) + writer.uint32(/* id 2, wireType 0 =*/16).bool(message.force); + if (message.allow_empty_v_schema != null && Object.hasOwnProperty.call(message, "allow_empty_v_schema")) + writer.uint32(/* id 3, wireType 0 =*/24).bool(message.allow_empty_v_schema); + if (message.served_froms != null && message.served_froms.length) + for (let i = 0; i < message.served_froms.length; ++i) + $root.topodata.Keyspace.ServedFrom.encode(message.served_froms[i], writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); + if (message.type != null && Object.hasOwnProperty.call(message, "type")) + writer.uint32(/* id 7, wireType 0 =*/56).int32(message.type); + if (message.base_keyspace != null && Object.hasOwnProperty.call(message, "base_keyspace")) + writer.uint32(/* id 8, wireType 2 =*/66).string(message.base_keyspace); + if (message.snapshot_time != null && Object.hasOwnProperty.call(message, "snapshot_time")) + $root.vttime.Time.encode(message.snapshot_time, writer.uint32(/* id 9, wireType 2 =*/74).fork()).ldelim(); + if (message.durability_policy != null && Object.hasOwnProperty.call(message, "durability_policy")) + writer.uint32(/* id 10, wireType 2 =*/82).string(message.durability_policy); + if (message.sidecar_db_name != null && Object.hasOwnProperty.call(message, "sidecar_db_name")) + writer.uint32(/* id 11, wireType 2 =*/90).string(message.sidecar_db_name); + return writer; + }; + + /** + * Encodes the specified CreateKeyspaceRequest message, length delimited. Does not implicitly {@link vtctldata.CreateKeyspaceRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.CreateKeyspaceRequest + * @static + * @param {vtctldata.ICreateKeyspaceRequest} message CreateKeyspaceRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + CreateKeyspaceRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a CreateKeyspaceRequest message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.CreateKeyspaceRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.CreateKeyspaceRequest} CreateKeyspaceRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + CreateKeyspaceRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.CreateKeyspaceRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.name = reader.string(); + break; + } + case 2: { + message.force = reader.bool(); + break; + } + case 3: { + message.allow_empty_v_schema = reader.bool(); + break; + } + case 6: { + if (!(message.served_froms && message.served_froms.length)) + message.served_froms = []; + message.served_froms.push($root.topodata.Keyspace.ServedFrom.decode(reader, reader.uint32())); + break; + } + case 7: { + message.type = reader.int32(); + break; + } + case 8: { + message.base_keyspace = reader.string(); + break; + } + case 9: { + message.snapshot_time = $root.vttime.Time.decode(reader, reader.uint32()); + break; + } + case 10: { + message.durability_policy = reader.string(); + break; + } + case 11: { + message.sidecar_db_name = reader.string(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a CreateKeyspaceRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.CreateKeyspaceRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.CreateKeyspaceRequest} CreateKeyspaceRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + CreateKeyspaceRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a CreateKeyspaceRequest message. + * @function verify + * @memberof vtctldata.CreateKeyspaceRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + CreateKeyspaceRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; + if (message.force != null && message.hasOwnProperty("force")) + if (typeof message.force !== "boolean") + return "force: boolean expected"; + if (message.allow_empty_v_schema != null && message.hasOwnProperty("allow_empty_v_schema")) + if (typeof message.allow_empty_v_schema !== "boolean") + return "allow_empty_v_schema: boolean expected"; + if (message.served_froms != null && message.hasOwnProperty("served_froms")) { + if (!Array.isArray(message.served_froms)) + return "served_froms: array expected"; + for (let i = 0; i < message.served_froms.length; ++i) { + let error = $root.topodata.Keyspace.ServedFrom.verify(message.served_froms[i]); + if (error) + return "served_froms." + error; + } + } + if (message.type != null && message.hasOwnProperty("type")) + switch (message.type) { + default: + return "type: enum value expected"; + case 0: + case 1: + break; + } + if (message.base_keyspace != null && message.hasOwnProperty("base_keyspace")) + if (!$util.isString(message.base_keyspace)) + return "base_keyspace: string expected"; + if (message.snapshot_time != null && message.hasOwnProperty("snapshot_time")) { + let error = $root.vttime.Time.verify(message.snapshot_time); + if (error) + return "snapshot_time." + error; + } + if (message.durability_policy != null && message.hasOwnProperty("durability_policy")) + if (!$util.isString(message.durability_policy)) + return "durability_policy: string expected"; + if (message.sidecar_db_name != null && message.hasOwnProperty("sidecar_db_name")) + if (!$util.isString(message.sidecar_db_name)) + return "sidecar_db_name: string expected"; + return null; + }; + + /** + * Creates a CreateKeyspaceRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.CreateKeyspaceRequest + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.CreateKeyspaceRequest} CreateKeyspaceRequest + */ + CreateKeyspaceRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.CreateKeyspaceRequest) + return object; + let message = new $root.vtctldata.CreateKeyspaceRequest(); + if (object.name != null) + message.name = String(object.name); + if (object.force != null) + message.force = Boolean(object.force); + if (object.allow_empty_v_schema != null) + message.allow_empty_v_schema = Boolean(object.allow_empty_v_schema); + if (object.served_froms) { + if (!Array.isArray(object.served_froms)) + throw TypeError(".vtctldata.CreateKeyspaceRequest.served_froms: array expected"); + message.served_froms = []; + for (let i = 0; i < object.served_froms.length; ++i) { + if (typeof object.served_froms[i] !== "object") + throw TypeError(".vtctldata.CreateKeyspaceRequest.served_froms: object expected"); + message.served_froms[i] = $root.topodata.Keyspace.ServedFrom.fromObject(object.served_froms[i]); + } + } + switch (object.type) { + default: + if (typeof object.type === "number") { + message.type = object.type; + break; + } + break; + case "NORMAL": + case 0: + message.type = 0; + break; + case "SNAPSHOT": + case 1: + message.type = 1; + break; + } + if (object.base_keyspace != null) + message.base_keyspace = String(object.base_keyspace); + if (object.snapshot_time != null) { + if (typeof object.snapshot_time !== "object") + throw TypeError(".vtctldata.CreateKeyspaceRequest.snapshot_time: object expected"); + message.snapshot_time = $root.vttime.Time.fromObject(object.snapshot_time); + } + if (object.durability_policy != null) + message.durability_policy = String(object.durability_policy); + if (object.sidecar_db_name != null) + message.sidecar_db_name = String(object.sidecar_db_name); + return message; + }; + + /** + * Creates a plain object from a CreateKeyspaceRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.CreateKeyspaceRequest + * @static + * @param {vtctldata.CreateKeyspaceRequest} message CreateKeyspaceRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + CreateKeyspaceRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.served_froms = []; + if (options.defaults) { + object.name = ""; + object.force = false; + object.allow_empty_v_schema = false; + object.type = options.enums === String ? "NORMAL" : 0; + object.base_keyspace = ""; + object.snapshot_time = null; + object.durability_policy = ""; + object.sidecar_db_name = ""; + } + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; + if (message.force != null && message.hasOwnProperty("force")) + object.force = message.force; + if (message.allow_empty_v_schema != null && message.hasOwnProperty("allow_empty_v_schema")) + object.allow_empty_v_schema = message.allow_empty_v_schema; + if (message.served_froms && message.served_froms.length) { + object.served_froms = []; + for (let j = 0; j < message.served_froms.length; ++j) + object.served_froms[j] = $root.topodata.Keyspace.ServedFrom.toObject(message.served_froms[j], options); + } + if (message.type != null && message.hasOwnProperty("type")) + object.type = options.enums === String ? $root.topodata.KeyspaceType[message.type] === undefined ? message.type : $root.topodata.KeyspaceType[message.type] : message.type; + if (message.base_keyspace != null && message.hasOwnProperty("base_keyspace")) + object.base_keyspace = message.base_keyspace; + if (message.snapshot_time != null && message.hasOwnProperty("snapshot_time")) + object.snapshot_time = $root.vttime.Time.toObject(message.snapshot_time, options); + if (message.durability_policy != null && message.hasOwnProperty("durability_policy")) + object.durability_policy = message.durability_policy; + if (message.sidecar_db_name != null && message.hasOwnProperty("sidecar_db_name")) + object.sidecar_db_name = message.sidecar_db_name; + return object; + }; + + /** + * Converts this CreateKeyspaceRequest to JSON. + * @function toJSON + * @memberof vtctldata.CreateKeyspaceRequest + * @instance + * @returns {Object.} JSON object + */ + CreateKeyspaceRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for CreateKeyspaceRequest + * @function getTypeUrl + * @memberof vtctldata.CreateKeyspaceRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + CreateKeyspaceRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.CreateKeyspaceRequest"; + }; + + return CreateKeyspaceRequest; + })(); + + vtctldata.CreateKeyspaceResponse = (function() { + + /** + * Properties of a CreateKeyspaceResponse. + * @memberof vtctldata + * @interface ICreateKeyspaceResponse + * @property {vtctldata.IKeyspace|null} [keyspace] CreateKeyspaceResponse keyspace + */ + + /** + * Constructs a new CreateKeyspaceResponse. + * @memberof vtctldata + * @classdesc Represents a CreateKeyspaceResponse. + * @implements ICreateKeyspaceResponse + * @constructor + * @param {vtctldata.ICreateKeyspaceResponse=} [properties] Properties to set + */ + function CreateKeyspaceResponse(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * CreateKeyspaceResponse keyspace. + * @member {vtctldata.IKeyspace|null|undefined} keyspace + * @memberof vtctldata.CreateKeyspaceResponse + * @instance + */ + CreateKeyspaceResponse.prototype.keyspace = null; + + /** + * Creates a new CreateKeyspaceResponse instance using the specified properties. + * @function create + * @memberof vtctldata.CreateKeyspaceResponse + * @static + * @param {vtctldata.ICreateKeyspaceResponse=} [properties] Properties to set + * @returns {vtctldata.CreateKeyspaceResponse} CreateKeyspaceResponse instance + */ + CreateKeyspaceResponse.create = function create(properties) { + return new CreateKeyspaceResponse(properties); + }; + + /** + * Encodes the specified CreateKeyspaceResponse message. Does not implicitly {@link vtctldata.CreateKeyspaceResponse.verify|verify} messages. + * @function encode + * @memberof vtctldata.CreateKeyspaceResponse + * @static + * @param {vtctldata.ICreateKeyspaceResponse} message CreateKeyspaceResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + CreateKeyspaceResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + $root.vtctldata.Keyspace.encode(message.keyspace, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified CreateKeyspaceResponse message, length delimited. Does not implicitly {@link vtctldata.CreateKeyspaceResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.CreateKeyspaceResponse + * @static + * @param {vtctldata.ICreateKeyspaceResponse} message CreateKeyspaceResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + CreateKeyspaceResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a CreateKeyspaceResponse message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.CreateKeyspaceResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.CreateKeyspaceResponse} CreateKeyspaceResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + CreateKeyspaceResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.CreateKeyspaceResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.keyspace = $root.vtctldata.Keyspace.decode(reader, reader.uint32()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a CreateKeyspaceResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.CreateKeyspaceResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.CreateKeyspaceResponse} CreateKeyspaceResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + CreateKeyspaceResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a CreateKeyspaceResponse message. + * @function verify + * @memberof vtctldata.CreateKeyspaceResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + CreateKeyspaceResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) { + let error = $root.vtctldata.Keyspace.verify(message.keyspace); + if (error) + return "keyspace." + error; + } + return null; + }; + + /** + * Creates a CreateKeyspaceResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.CreateKeyspaceResponse + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.CreateKeyspaceResponse} CreateKeyspaceResponse + */ + CreateKeyspaceResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.CreateKeyspaceResponse) + return object; + let message = new $root.vtctldata.CreateKeyspaceResponse(); + if (object.keyspace != null) { + if (typeof object.keyspace !== "object") + throw TypeError(".vtctldata.CreateKeyspaceResponse.keyspace: object expected"); + message.keyspace = $root.vtctldata.Keyspace.fromObject(object.keyspace); + } + return message; + }; + + /** + * Creates a plain object from a CreateKeyspaceResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.CreateKeyspaceResponse + * @static + * @param {vtctldata.CreateKeyspaceResponse} message CreateKeyspaceResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + CreateKeyspaceResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) + object.keyspace = null; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = $root.vtctldata.Keyspace.toObject(message.keyspace, options); + return object; + }; + + /** + * Converts this CreateKeyspaceResponse to JSON. + * @function toJSON + * @memberof vtctldata.CreateKeyspaceResponse + * @instance + * @returns {Object.} JSON object + */ + CreateKeyspaceResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for CreateKeyspaceResponse + * @function getTypeUrl + * @memberof vtctldata.CreateKeyspaceResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + CreateKeyspaceResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.CreateKeyspaceResponse"; + }; + + return CreateKeyspaceResponse; + })(); + + vtctldata.CreateShardRequest = (function() { + + /** + * Properties of a CreateShardRequest. + * @memberof vtctldata + * @interface ICreateShardRequest + * @property {string|null} [keyspace] CreateShardRequest keyspace + * @property {string|null} [shard_name] CreateShardRequest shard_name + * @property {boolean|null} [force] CreateShardRequest force + * @property {boolean|null} [include_parent] CreateShardRequest include_parent + */ + + /** + * Constructs a new CreateShardRequest. + * @memberof vtctldata + * @classdesc Represents a CreateShardRequest. + * @implements ICreateShardRequest + * @constructor + * @param {vtctldata.ICreateShardRequest=} [properties] Properties to set + */ + function CreateShardRequest(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * CreateShardRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.CreateShardRequest + * @instance + */ + CreateShardRequest.prototype.keyspace = ""; + + /** + * CreateShardRequest shard_name. + * @member {string} shard_name + * @memberof vtctldata.CreateShardRequest + * @instance + */ + CreateShardRequest.prototype.shard_name = ""; + + /** + * CreateShardRequest force. + * @member {boolean} force + * @memberof vtctldata.CreateShardRequest + * @instance + */ + CreateShardRequest.prototype.force = false; + + /** + * CreateShardRequest include_parent. + * @member {boolean} include_parent + * @memberof vtctldata.CreateShardRequest + * @instance + */ + CreateShardRequest.prototype.include_parent = false; + + /** + * Creates a new CreateShardRequest instance using the specified properties. + * @function create + * @memberof vtctldata.CreateShardRequest + * @static + * @param {vtctldata.ICreateShardRequest=} [properties] Properties to set + * @returns {vtctldata.CreateShardRequest} CreateShardRequest instance + */ + CreateShardRequest.create = function create(properties) { + return new CreateShardRequest(properties); + }; + + /** + * Encodes the specified CreateShardRequest message. Does not implicitly {@link vtctldata.CreateShardRequest.verify|verify} messages. + * @function encode + * @memberof vtctldata.CreateShardRequest + * @static + * @param {vtctldata.ICreateShardRequest} message CreateShardRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + CreateShardRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.shard_name != null && Object.hasOwnProperty.call(message, "shard_name")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard_name); + if (message.force != null && Object.hasOwnProperty.call(message, "force")) + writer.uint32(/* id 3, wireType 0 =*/24).bool(message.force); + if (message.include_parent != null && Object.hasOwnProperty.call(message, "include_parent")) + writer.uint32(/* id 4, wireType 0 =*/32).bool(message.include_parent); + return writer; + }; + + /** + * Encodes the specified CreateShardRequest message, length delimited. Does not implicitly {@link vtctldata.CreateShardRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.CreateShardRequest + * @static + * @param {vtctldata.ICreateShardRequest} message CreateShardRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + CreateShardRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a CreateShardRequest message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.CreateShardRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.CreateShardRequest} CreateShardRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + CreateShardRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.CreateShardRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.keyspace = reader.string(); + break; + } + case 2: { + message.shard_name = reader.string(); + break; + } + case 3: { + message.force = reader.bool(); + break; + } + case 4: { + message.include_parent = reader.bool(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a CreateShardRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.CreateShardRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.CreateShardRequest} CreateShardRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + CreateShardRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a CreateShardRequest message. + * @function verify + * @memberof vtctldata.CreateShardRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + CreateShardRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.shard_name != null && message.hasOwnProperty("shard_name")) + if (!$util.isString(message.shard_name)) + return "shard_name: string expected"; + if (message.force != null && message.hasOwnProperty("force")) + if (typeof message.force !== "boolean") + return "force: boolean expected"; + if (message.include_parent != null && message.hasOwnProperty("include_parent")) + if (typeof message.include_parent !== "boolean") + return "include_parent: boolean expected"; + return null; + }; + + /** + * Creates a CreateShardRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.CreateShardRequest + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.CreateShardRequest} CreateShardRequest + */ + CreateShardRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.CreateShardRequest) + return object; + let message = new $root.vtctldata.CreateShardRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.shard_name != null) + message.shard_name = String(object.shard_name); + if (object.force != null) + message.force = Boolean(object.force); + if (object.include_parent != null) + message.include_parent = Boolean(object.include_parent); + return message; + }; + + /** + * Creates a plain object from a CreateShardRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.CreateShardRequest + * @static + * @param {vtctldata.CreateShardRequest} message CreateShardRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + CreateShardRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.keyspace = ""; + object.shard_name = ""; + object.force = false; + object.include_parent = false; + } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.shard_name != null && message.hasOwnProperty("shard_name")) + object.shard_name = message.shard_name; + if (message.force != null && message.hasOwnProperty("force")) + object.force = message.force; + if (message.include_parent != null && message.hasOwnProperty("include_parent")) + object.include_parent = message.include_parent; + return object; + }; + + /** + * Converts this CreateShardRequest to JSON. + * @function toJSON + * @memberof vtctldata.CreateShardRequest + * @instance + * @returns {Object.} JSON object + */ + CreateShardRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for CreateShardRequest + * @function getTypeUrl + * @memberof vtctldata.CreateShardRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + CreateShardRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.CreateShardRequest"; + }; + + return CreateShardRequest; + })(); + + vtctldata.CreateShardResponse = (function() { + + /** + * Properties of a CreateShardResponse. + * @memberof vtctldata + * @interface ICreateShardResponse + * @property {vtctldata.IKeyspace|null} [keyspace] CreateShardResponse keyspace + * @property {vtctldata.IShard|null} [shard] CreateShardResponse shard + * @property {boolean|null} [shard_already_exists] CreateShardResponse shard_already_exists + */ + + /** + * Constructs a new CreateShardResponse. + * @memberof vtctldata + * @classdesc Represents a CreateShardResponse. + * @implements ICreateShardResponse + * @constructor + * @param {vtctldata.ICreateShardResponse=} [properties] Properties to set + */ + function CreateShardResponse(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * CreateShardResponse keyspace. + * @member {vtctldata.IKeyspace|null|undefined} keyspace + * @memberof vtctldata.CreateShardResponse + * @instance + */ + CreateShardResponse.prototype.keyspace = null; + + /** + * CreateShardResponse shard. + * @member {vtctldata.IShard|null|undefined} shard + * @memberof vtctldata.CreateShardResponse + * @instance + */ + CreateShardResponse.prototype.shard = null; + + /** + * CreateShardResponse shard_already_exists. + * @member {boolean} shard_already_exists + * @memberof vtctldata.CreateShardResponse + * @instance + */ + CreateShardResponse.prototype.shard_already_exists = false; + + /** + * Creates a new CreateShardResponse instance using the specified properties. + * @function create + * @memberof vtctldata.CreateShardResponse + * @static + * @param {vtctldata.ICreateShardResponse=} [properties] Properties to set + * @returns {vtctldata.CreateShardResponse} CreateShardResponse instance + */ + CreateShardResponse.create = function create(properties) { + return new CreateShardResponse(properties); + }; + + /** + * Encodes the specified CreateShardResponse message. Does not implicitly {@link vtctldata.CreateShardResponse.verify|verify} messages. + * @function encode + * @memberof vtctldata.CreateShardResponse + * @static + * @param {vtctldata.ICreateShardResponse} message CreateShardResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + CreateShardResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + $root.vtctldata.Keyspace.encode(message.keyspace, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + $root.vtctldata.Shard.encode(message.shard, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.shard_already_exists != null && Object.hasOwnProperty.call(message, "shard_already_exists")) + writer.uint32(/* id 3, wireType 0 =*/24).bool(message.shard_already_exists); + return writer; + }; + + /** + * Encodes the specified CreateShardResponse message, length delimited. Does not implicitly {@link vtctldata.CreateShardResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.CreateShardResponse + * @static + * @param {vtctldata.ICreateShardResponse} message CreateShardResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + CreateShardResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a CreateShardResponse message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.CreateShardResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.CreateShardResponse} CreateShardResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + CreateShardResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.CreateShardResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.keyspace = $root.vtctldata.Keyspace.decode(reader, reader.uint32()); + break; + } + case 2: { + message.shard = $root.vtctldata.Shard.decode(reader, reader.uint32()); + break; + } + case 3: { + message.shard_already_exists = reader.bool(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a CreateShardResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.CreateShardResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.CreateShardResponse} CreateShardResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + CreateShardResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a CreateShardResponse message. + * @function verify + * @memberof vtctldata.CreateShardResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + CreateShardResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) { + let error = $root.vtctldata.Keyspace.verify(message.keyspace); + if (error) + return "keyspace." + error; + } + if (message.shard != null && message.hasOwnProperty("shard")) { + let error = $root.vtctldata.Shard.verify(message.shard); + if (error) + return "shard." + error; + } + if (message.shard_already_exists != null && message.hasOwnProperty("shard_already_exists")) + if (typeof message.shard_already_exists !== "boolean") + return "shard_already_exists: boolean expected"; + return null; + }; + + /** + * Creates a CreateShardResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.CreateShardResponse + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.CreateShardResponse} CreateShardResponse + */ + CreateShardResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.CreateShardResponse) + return object; + let message = new $root.vtctldata.CreateShardResponse(); + if (object.keyspace != null) { + if (typeof object.keyspace !== "object") + throw TypeError(".vtctldata.CreateShardResponse.keyspace: object expected"); + message.keyspace = $root.vtctldata.Keyspace.fromObject(object.keyspace); + } + if (object.shard != null) { + if (typeof object.shard !== "object") + throw TypeError(".vtctldata.CreateShardResponse.shard: object expected"); + message.shard = $root.vtctldata.Shard.fromObject(object.shard); + } + if (object.shard_already_exists != null) + message.shard_already_exists = Boolean(object.shard_already_exists); + return message; + }; + + /** + * Creates a plain object from a CreateShardResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.CreateShardResponse + * @static + * @param {vtctldata.CreateShardResponse} message CreateShardResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + CreateShardResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.keyspace = null; + object.shard = null; + object.shard_already_exists = false; + } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = $root.vtctldata.Keyspace.toObject(message.keyspace, options); + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = $root.vtctldata.Shard.toObject(message.shard, options); + if (message.shard_already_exists != null && message.hasOwnProperty("shard_already_exists")) + object.shard_already_exists = message.shard_already_exists; + return object; + }; + + /** + * Converts this CreateShardResponse to JSON. + * @function toJSON + * @memberof vtctldata.CreateShardResponse + * @instance + * @returns {Object.} JSON object + */ + CreateShardResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for CreateShardResponse + * @function getTypeUrl + * @memberof vtctldata.CreateShardResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + CreateShardResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.CreateShardResponse"; + }; + + return CreateShardResponse; + })(); + + vtctldata.DeleteCellInfoRequest = (function() { + + /** + * Properties of a DeleteCellInfoRequest. + * @memberof vtctldata + * @interface IDeleteCellInfoRequest + * @property {string|null} [name] DeleteCellInfoRequest name + * @property {boolean|null} [force] DeleteCellInfoRequest force + */ + + /** + * Constructs a new DeleteCellInfoRequest. + * @memberof vtctldata + * @classdesc Represents a DeleteCellInfoRequest. + * @implements IDeleteCellInfoRequest + * @constructor + * @param {vtctldata.IDeleteCellInfoRequest=} [properties] Properties to set + */ + function DeleteCellInfoRequest(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * DeleteCellInfoRequest name. + * @member {string} name + * @memberof vtctldata.DeleteCellInfoRequest + * @instance + */ + DeleteCellInfoRequest.prototype.name = ""; + + /** + * DeleteCellInfoRequest force. + * @member {boolean} force + * @memberof vtctldata.DeleteCellInfoRequest + * @instance + */ + DeleteCellInfoRequest.prototype.force = false; + + /** + * Creates a new DeleteCellInfoRequest instance using the specified properties. + * @function create + * @memberof vtctldata.DeleteCellInfoRequest + * @static + * @param {vtctldata.IDeleteCellInfoRequest=} [properties] Properties to set + * @returns {vtctldata.DeleteCellInfoRequest} DeleteCellInfoRequest instance + */ + DeleteCellInfoRequest.create = function create(properties) { + return new DeleteCellInfoRequest(properties); + }; + + /** + * Encodes the specified DeleteCellInfoRequest message. Does not implicitly {@link vtctldata.DeleteCellInfoRequest.verify|verify} messages. + * @function encode + * @memberof vtctldata.DeleteCellInfoRequest + * @static + * @param {vtctldata.IDeleteCellInfoRequest} message DeleteCellInfoRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + DeleteCellInfoRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.name != null && Object.hasOwnProperty.call(message, "name")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); + if (message.force != null && Object.hasOwnProperty.call(message, "force")) + writer.uint32(/* id 2, wireType 0 =*/16).bool(message.force); + return writer; + }; + + /** + * Encodes the specified DeleteCellInfoRequest message, length delimited. Does not implicitly {@link vtctldata.DeleteCellInfoRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.DeleteCellInfoRequest + * @static + * @param {vtctldata.IDeleteCellInfoRequest} message DeleteCellInfoRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + DeleteCellInfoRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a DeleteCellInfoRequest message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.DeleteCellInfoRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.DeleteCellInfoRequest} DeleteCellInfoRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + DeleteCellInfoRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.DeleteCellInfoRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.name = reader.string(); + break; + } + case 2: { + message.force = reader.bool(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a DeleteCellInfoRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.DeleteCellInfoRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.DeleteCellInfoRequest} DeleteCellInfoRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + DeleteCellInfoRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a DeleteCellInfoRequest message. + * @function verify + * @memberof vtctldata.DeleteCellInfoRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + DeleteCellInfoRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; + if (message.force != null && message.hasOwnProperty("force")) + if (typeof message.force !== "boolean") + return "force: boolean expected"; + return null; + }; + + /** + * Creates a DeleteCellInfoRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.DeleteCellInfoRequest + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.DeleteCellInfoRequest} DeleteCellInfoRequest + */ + DeleteCellInfoRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.DeleteCellInfoRequest) + return object; + let message = new $root.vtctldata.DeleteCellInfoRequest(); + if (object.name != null) + message.name = String(object.name); + if (object.force != null) + message.force = Boolean(object.force); + return message; + }; + + /** + * Creates a plain object from a DeleteCellInfoRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.DeleteCellInfoRequest + * @static + * @param {vtctldata.DeleteCellInfoRequest} message DeleteCellInfoRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + DeleteCellInfoRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.name = ""; + object.force = false; + } + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; + if (message.force != null && message.hasOwnProperty("force")) + object.force = message.force; + return object; + }; + + /** + * Converts this DeleteCellInfoRequest to JSON. + * @function toJSON + * @memberof vtctldata.DeleteCellInfoRequest + * @instance + * @returns {Object.} JSON object + */ + DeleteCellInfoRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for DeleteCellInfoRequest + * @function getTypeUrl + * @memberof vtctldata.DeleteCellInfoRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + DeleteCellInfoRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.DeleteCellInfoRequest"; + }; + + return DeleteCellInfoRequest; + })(); + + vtctldata.DeleteCellInfoResponse = (function() { + + /** + * Properties of a DeleteCellInfoResponse. + * @memberof vtctldata + * @interface IDeleteCellInfoResponse + */ + + /** + * Constructs a new DeleteCellInfoResponse. + * @memberof vtctldata + * @classdesc Represents a DeleteCellInfoResponse. + * @implements IDeleteCellInfoResponse + * @constructor + * @param {vtctldata.IDeleteCellInfoResponse=} [properties] Properties to set + */ + function DeleteCellInfoResponse(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * Creates a new DeleteCellInfoResponse instance using the specified properties. + * @function create + * @memberof vtctldata.DeleteCellInfoResponse + * @static + * @param {vtctldata.IDeleteCellInfoResponse=} [properties] Properties to set + * @returns {vtctldata.DeleteCellInfoResponse} DeleteCellInfoResponse instance + */ + DeleteCellInfoResponse.create = function create(properties) { + return new DeleteCellInfoResponse(properties); + }; + + /** + * Encodes the specified DeleteCellInfoResponse message. Does not implicitly {@link vtctldata.DeleteCellInfoResponse.verify|verify} messages. + * @function encode + * @memberof vtctldata.DeleteCellInfoResponse + * @static + * @param {vtctldata.IDeleteCellInfoResponse} message DeleteCellInfoResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + DeleteCellInfoResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + return writer; + }; + + /** + * Encodes the specified DeleteCellInfoResponse message, length delimited. Does not implicitly {@link vtctldata.DeleteCellInfoResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.DeleteCellInfoResponse + * @static + * @param {vtctldata.IDeleteCellInfoResponse} message DeleteCellInfoResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + DeleteCellInfoResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a DeleteCellInfoResponse message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.DeleteCellInfoResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.DeleteCellInfoResponse} DeleteCellInfoResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + DeleteCellInfoResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.DeleteCellInfoResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a DeleteCellInfoResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.DeleteCellInfoResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.DeleteCellInfoResponse} DeleteCellInfoResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + DeleteCellInfoResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a DeleteCellInfoResponse message. + * @function verify + * @memberof vtctldata.DeleteCellInfoResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + DeleteCellInfoResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + return null; + }; + + /** + * Creates a DeleteCellInfoResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.DeleteCellInfoResponse + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.DeleteCellInfoResponse} DeleteCellInfoResponse + */ + DeleteCellInfoResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.DeleteCellInfoResponse) + return object; + return new $root.vtctldata.DeleteCellInfoResponse(); + }; + + /** + * Creates a plain object from a DeleteCellInfoResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.DeleteCellInfoResponse + * @static + * @param {vtctldata.DeleteCellInfoResponse} message DeleteCellInfoResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + DeleteCellInfoResponse.toObject = function toObject() { + return {}; + }; + + /** + * Converts this DeleteCellInfoResponse to JSON. + * @function toJSON + * @memberof vtctldata.DeleteCellInfoResponse + * @instance + * @returns {Object.} JSON object + */ + DeleteCellInfoResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for DeleteCellInfoResponse + * @function getTypeUrl + * @memberof vtctldata.DeleteCellInfoResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + DeleteCellInfoResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.DeleteCellInfoResponse"; + }; + + return DeleteCellInfoResponse; + })(); + + vtctldata.DeleteCellsAliasRequest = (function() { + + /** + * Properties of a DeleteCellsAliasRequest. + * @memberof vtctldata + * @interface IDeleteCellsAliasRequest + * @property {string|null} [name] DeleteCellsAliasRequest name + */ + + /** + * Constructs a new DeleteCellsAliasRequest. + * @memberof vtctldata + * @classdesc Represents a DeleteCellsAliasRequest. + * @implements IDeleteCellsAliasRequest + * @constructor + * @param {vtctldata.IDeleteCellsAliasRequest=} [properties] Properties to set + */ + function DeleteCellsAliasRequest(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * DeleteCellsAliasRequest name. + * @member {string} name + * @memberof vtctldata.DeleteCellsAliasRequest + * @instance + */ + DeleteCellsAliasRequest.prototype.name = ""; + + /** + * Creates a new DeleteCellsAliasRequest instance using the specified properties. + * @function create + * @memberof vtctldata.DeleteCellsAliasRequest + * @static + * @param {vtctldata.IDeleteCellsAliasRequest=} [properties] Properties to set + * @returns {vtctldata.DeleteCellsAliasRequest} DeleteCellsAliasRequest instance + */ + DeleteCellsAliasRequest.create = function create(properties) { + return new DeleteCellsAliasRequest(properties); + }; + + /** + * Encodes the specified DeleteCellsAliasRequest message. Does not implicitly {@link vtctldata.DeleteCellsAliasRequest.verify|verify} messages. + * @function encode + * @memberof vtctldata.DeleteCellsAliasRequest + * @static + * @param {vtctldata.IDeleteCellsAliasRequest} message DeleteCellsAliasRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + DeleteCellsAliasRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.name != null && Object.hasOwnProperty.call(message, "name")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); + return writer; + }; + + /** + * Encodes the specified DeleteCellsAliasRequest message, length delimited. Does not implicitly {@link vtctldata.DeleteCellsAliasRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.DeleteCellsAliasRequest + * @static + * @param {vtctldata.IDeleteCellsAliasRequest} message DeleteCellsAliasRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + DeleteCellsAliasRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a DeleteCellsAliasRequest message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.DeleteCellsAliasRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.DeleteCellsAliasRequest} DeleteCellsAliasRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + DeleteCellsAliasRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.DeleteCellsAliasRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.name = reader.string(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a DeleteCellsAliasRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.DeleteCellsAliasRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.DeleteCellsAliasRequest} DeleteCellsAliasRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + DeleteCellsAliasRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a DeleteCellsAliasRequest message. + * @function verify + * @memberof vtctldata.DeleteCellsAliasRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + DeleteCellsAliasRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; + return null; + }; + + /** + * Creates a DeleteCellsAliasRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.DeleteCellsAliasRequest + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.DeleteCellsAliasRequest} DeleteCellsAliasRequest + */ + DeleteCellsAliasRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.DeleteCellsAliasRequest) + return object; + let message = new $root.vtctldata.DeleteCellsAliasRequest(); + if (object.name != null) + message.name = String(object.name); + return message; + }; + + /** + * Creates a plain object from a DeleteCellsAliasRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.DeleteCellsAliasRequest + * @static + * @param {vtctldata.DeleteCellsAliasRequest} message DeleteCellsAliasRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + DeleteCellsAliasRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) + object.name = ""; + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; + return object; + }; + + /** + * Converts this DeleteCellsAliasRequest to JSON. + * @function toJSON + * @memberof vtctldata.DeleteCellsAliasRequest + * @instance + * @returns {Object.} JSON object + */ + DeleteCellsAliasRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for DeleteCellsAliasRequest + * @function getTypeUrl + * @memberof vtctldata.DeleteCellsAliasRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + DeleteCellsAliasRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.DeleteCellsAliasRequest"; + }; + + return DeleteCellsAliasRequest; + })(); + + vtctldata.DeleteCellsAliasResponse = (function() { + + /** + * Properties of a DeleteCellsAliasResponse. + * @memberof vtctldata + * @interface IDeleteCellsAliasResponse + */ + + /** + * Constructs a new DeleteCellsAliasResponse. + * @memberof vtctldata + * @classdesc Represents a DeleteCellsAliasResponse. + * @implements IDeleteCellsAliasResponse + * @constructor + * @param {vtctldata.IDeleteCellsAliasResponse=} [properties] Properties to set + */ + function DeleteCellsAliasResponse(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * Creates a new DeleteCellsAliasResponse instance using the specified properties. + * @function create + * @memberof vtctldata.DeleteCellsAliasResponse + * @static + * @param {vtctldata.IDeleteCellsAliasResponse=} [properties] Properties to set + * @returns {vtctldata.DeleteCellsAliasResponse} DeleteCellsAliasResponse instance + */ + DeleteCellsAliasResponse.create = function create(properties) { + return new DeleteCellsAliasResponse(properties); + }; + + /** + * Encodes the specified DeleteCellsAliasResponse message. Does not implicitly {@link vtctldata.DeleteCellsAliasResponse.verify|verify} messages. + * @function encode + * @memberof vtctldata.DeleteCellsAliasResponse + * @static + * @param {vtctldata.IDeleteCellsAliasResponse} message DeleteCellsAliasResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + DeleteCellsAliasResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + return writer; + }; + + /** + * Encodes the specified DeleteCellsAliasResponse message, length delimited. Does not implicitly {@link vtctldata.DeleteCellsAliasResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.DeleteCellsAliasResponse + * @static + * @param {vtctldata.IDeleteCellsAliasResponse} message DeleteCellsAliasResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + DeleteCellsAliasResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a DeleteCellsAliasResponse message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.DeleteCellsAliasResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.DeleteCellsAliasResponse} DeleteCellsAliasResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + DeleteCellsAliasResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.DeleteCellsAliasResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a DeleteCellsAliasResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.DeleteCellsAliasResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.DeleteCellsAliasResponse} DeleteCellsAliasResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + DeleteCellsAliasResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a DeleteCellsAliasResponse message. + * @function verify + * @memberof vtctldata.DeleteCellsAliasResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + DeleteCellsAliasResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + return null; + }; + + /** + * Creates a DeleteCellsAliasResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.DeleteCellsAliasResponse + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.DeleteCellsAliasResponse} DeleteCellsAliasResponse + */ + DeleteCellsAliasResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.DeleteCellsAliasResponse) + return object; + return new $root.vtctldata.DeleteCellsAliasResponse(); + }; + + /** + * Creates a plain object from a DeleteCellsAliasResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.DeleteCellsAliasResponse + * @static + * @param {vtctldata.DeleteCellsAliasResponse} message DeleteCellsAliasResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + DeleteCellsAliasResponse.toObject = function toObject() { + return {}; + }; + + /** + * Converts this DeleteCellsAliasResponse to JSON. + * @function toJSON + * @memberof vtctldata.DeleteCellsAliasResponse + * @instance + * @returns {Object.} JSON object + */ + DeleteCellsAliasResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for DeleteCellsAliasResponse + * @function getTypeUrl + * @memberof vtctldata.DeleteCellsAliasResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + DeleteCellsAliasResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.DeleteCellsAliasResponse"; + }; + + return DeleteCellsAliasResponse; + })(); + + vtctldata.DeleteKeyspaceRequest = (function() { + + /** + * Properties of a DeleteKeyspaceRequest. + * @memberof vtctldata + * @interface IDeleteKeyspaceRequest + * @property {string|null} [keyspace] DeleteKeyspaceRequest keyspace + * @property {boolean|null} [recursive] DeleteKeyspaceRequest recursive + * @property {boolean|null} [force] DeleteKeyspaceRequest force + */ + + /** + * Constructs a new DeleteKeyspaceRequest. + * @memberof vtctldata + * @classdesc Represents a DeleteKeyspaceRequest. + * @implements IDeleteKeyspaceRequest + * @constructor + * @param {vtctldata.IDeleteKeyspaceRequest=} [properties] Properties to set + */ + function DeleteKeyspaceRequest(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * DeleteKeyspaceRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.DeleteKeyspaceRequest + * @instance + */ + DeleteKeyspaceRequest.prototype.keyspace = ""; + + /** + * DeleteKeyspaceRequest recursive. + * @member {boolean} recursive + * @memberof vtctldata.DeleteKeyspaceRequest + * @instance + */ + DeleteKeyspaceRequest.prototype.recursive = false; + + /** + * DeleteKeyspaceRequest force. + * @member {boolean} force + * @memberof vtctldata.DeleteKeyspaceRequest + * @instance + */ + DeleteKeyspaceRequest.prototype.force = false; + + /** + * Creates a new DeleteKeyspaceRequest instance using the specified properties. + * @function create + * @memberof vtctldata.DeleteKeyspaceRequest + * @static + * @param {vtctldata.IDeleteKeyspaceRequest=} [properties] Properties to set + * @returns {vtctldata.DeleteKeyspaceRequest} DeleteKeyspaceRequest instance + */ + DeleteKeyspaceRequest.create = function create(properties) { + return new DeleteKeyspaceRequest(properties); + }; + + /** + * Encodes the specified DeleteKeyspaceRequest message. Does not implicitly {@link vtctldata.DeleteKeyspaceRequest.verify|verify} messages. + * @function encode + * @memberof vtctldata.DeleteKeyspaceRequest + * @static + * @param {vtctldata.IDeleteKeyspaceRequest} message DeleteKeyspaceRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + DeleteKeyspaceRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.recursive != null && Object.hasOwnProperty.call(message, "recursive")) + writer.uint32(/* id 2, wireType 0 =*/16).bool(message.recursive); + if (message.force != null && Object.hasOwnProperty.call(message, "force")) + writer.uint32(/* id 3, wireType 0 =*/24).bool(message.force); + return writer; + }; + + /** + * Encodes the specified DeleteKeyspaceRequest message, length delimited. Does not implicitly {@link vtctldata.DeleteKeyspaceRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.DeleteKeyspaceRequest + * @static + * @param {vtctldata.IDeleteKeyspaceRequest} message DeleteKeyspaceRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + DeleteKeyspaceRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a DeleteKeyspaceRequest message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.DeleteKeyspaceRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.DeleteKeyspaceRequest} DeleteKeyspaceRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + DeleteKeyspaceRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.DeleteKeyspaceRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.keyspace = reader.string(); + break; + } + case 2: { + message.recursive = reader.bool(); + break; + } + case 3: { + message.force = reader.bool(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a DeleteKeyspaceRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.DeleteKeyspaceRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.DeleteKeyspaceRequest} DeleteKeyspaceRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + DeleteKeyspaceRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a DeleteKeyspaceRequest message. + * @function verify + * @memberof vtctldata.DeleteKeyspaceRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + DeleteKeyspaceRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.recursive != null && message.hasOwnProperty("recursive")) + if (typeof message.recursive !== "boolean") + return "recursive: boolean expected"; + if (message.force != null && message.hasOwnProperty("force")) + if (typeof message.force !== "boolean") + return "force: boolean expected"; + return null; + }; + + /** + * Creates a DeleteKeyspaceRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.DeleteKeyspaceRequest + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.DeleteKeyspaceRequest} DeleteKeyspaceRequest + */ + DeleteKeyspaceRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.DeleteKeyspaceRequest) + return object; + let message = new $root.vtctldata.DeleteKeyspaceRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.recursive != null) + message.recursive = Boolean(object.recursive); + if (object.force != null) + message.force = Boolean(object.force); + return message; + }; + + /** + * Creates a plain object from a DeleteKeyspaceRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.DeleteKeyspaceRequest + * @static + * @param {vtctldata.DeleteKeyspaceRequest} message DeleteKeyspaceRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + DeleteKeyspaceRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.keyspace = ""; + object.recursive = false; + object.force = false; + } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.recursive != null && message.hasOwnProperty("recursive")) + object.recursive = message.recursive; + if (message.force != null && message.hasOwnProperty("force")) + object.force = message.force; + return object; + }; + + /** + * Converts this DeleteKeyspaceRequest to JSON. + * @function toJSON + * @memberof vtctldata.DeleteKeyspaceRequest + * @instance + * @returns {Object.} JSON object + */ + DeleteKeyspaceRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for DeleteKeyspaceRequest + * @function getTypeUrl + * @memberof vtctldata.DeleteKeyspaceRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + DeleteKeyspaceRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.DeleteKeyspaceRequest"; + }; + + return DeleteKeyspaceRequest; + })(); + + vtctldata.DeleteKeyspaceResponse = (function() { + + /** + * Properties of a DeleteKeyspaceResponse. + * @memberof vtctldata + * @interface IDeleteKeyspaceResponse + */ + + /** + * Constructs a new DeleteKeyspaceResponse. + * @memberof vtctldata + * @classdesc Represents a DeleteKeyspaceResponse. + * @implements IDeleteKeyspaceResponse + * @constructor + * @param {vtctldata.IDeleteKeyspaceResponse=} [properties] Properties to set + */ + function DeleteKeyspaceResponse(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * Creates a new DeleteKeyspaceResponse instance using the specified properties. + * @function create + * @memberof vtctldata.DeleteKeyspaceResponse + * @static + * @param {vtctldata.IDeleteKeyspaceResponse=} [properties] Properties to set + * @returns {vtctldata.DeleteKeyspaceResponse} DeleteKeyspaceResponse instance + */ + DeleteKeyspaceResponse.create = function create(properties) { + return new DeleteKeyspaceResponse(properties); + }; + + /** + * Encodes the specified DeleteKeyspaceResponse message. Does not implicitly {@link vtctldata.DeleteKeyspaceResponse.verify|verify} messages. + * @function encode + * @memberof vtctldata.DeleteKeyspaceResponse + * @static + * @param {vtctldata.IDeleteKeyspaceResponse} message DeleteKeyspaceResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + DeleteKeyspaceResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + return writer; + }; + + /** + * Encodes the specified DeleteKeyspaceResponse message, length delimited. Does not implicitly {@link vtctldata.DeleteKeyspaceResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.DeleteKeyspaceResponse + * @static + * @param {vtctldata.IDeleteKeyspaceResponse} message DeleteKeyspaceResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + DeleteKeyspaceResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a DeleteKeyspaceResponse message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.DeleteKeyspaceResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.DeleteKeyspaceResponse} DeleteKeyspaceResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + DeleteKeyspaceResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.DeleteKeyspaceResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a DeleteKeyspaceResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.DeleteKeyspaceResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.DeleteKeyspaceResponse} DeleteKeyspaceResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + DeleteKeyspaceResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a DeleteKeyspaceResponse message. + * @function verify + * @memberof vtctldata.DeleteKeyspaceResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + DeleteKeyspaceResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + return null; + }; + + /** + * Creates a DeleteKeyspaceResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.DeleteKeyspaceResponse + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.DeleteKeyspaceResponse} DeleteKeyspaceResponse + */ + DeleteKeyspaceResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.DeleteKeyspaceResponse) + return object; + return new $root.vtctldata.DeleteKeyspaceResponse(); + }; + + /** + * Creates a plain object from a DeleteKeyspaceResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.DeleteKeyspaceResponse + * @static + * @param {vtctldata.DeleteKeyspaceResponse} message DeleteKeyspaceResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + DeleteKeyspaceResponse.toObject = function toObject() { + return {}; + }; + + /** + * Converts this DeleteKeyspaceResponse to JSON. + * @function toJSON + * @memberof vtctldata.DeleteKeyspaceResponse + * @instance + * @returns {Object.} JSON object + */ + DeleteKeyspaceResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for DeleteKeyspaceResponse + * @function getTypeUrl + * @memberof vtctldata.DeleteKeyspaceResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + DeleteKeyspaceResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.DeleteKeyspaceResponse"; + }; + + return DeleteKeyspaceResponse; + })(); + + vtctldata.DeleteShardsRequest = (function() { + + /** + * Properties of a DeleteShardsRequest. + * @memberof vtctldata + * @interface IDeleteShardsRequest + * @property {Array.|null} [shards] DeleteShardsRequest shards + * @property {boolean|null} [recursive] DeleteShardsRequest recursive + * @property {boolean|null} [even_if_serving] DeleteShardsRequest even_if_serving + * @property {boolean|null} [force] DeleteShardsRequest force + */ + + /** + * Constructs a new DeleteShardsRequest. + * @memberof vtctldata + * @classdesc Represents a DeleteShardsRequest. + * @implements IDeleteShardsRequest + * @constructor + * @param {vtctldata.IDeleteShardsRequest=} [properties] Properties to set + */ + function DeleteShardsRequest(properties) { + this.shards = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * DeleteShardsRequest shards. + * @member {Array.} shards + * @memberof vtctldata.DeleteShardsRequest + * @instance + */ + DeleteShardsRequest.prototype.shards = $util.emptyArray; + + /** + * DeleteShardsRequest recursive. + * @member {boolean} recursive + * @memberof vtctldata.DeleteShardsRequest + * @instance + */ + DeleteShardsRequest.prototype.recursive = false; + + /** + * DeleteShardsRequest even_if_serving. + * @member {boolean} even_if_serving + * @memberof vtctldata.DeleteShardsRequest + * @instance + */ + DeleteShardsRequest.prototype.even_if_serving = false; + + /** + * DeleteShardsRequest force. + * @member {boolean} force + * @memberof vtctldata.DeleteShardsRequest + * @instance + */ + DeleteShardsRequest.prototype.force = false; + + /** + * Creates a new DeleteShardsRequest instance using the specified properties. + * @function create + * @memberof vtctldata.DeleteShardsRequest + * @static + * @param {vtctldata.IDeleteShardsRequest=} [properties] Properties to set + * @returns {vtctldata.DeleteShardsRequest} DeleteShardsRequest instance + */ + DeleteShardsRequest.create = function create(properties) { + return new DeleteShardsRequest(properties); + }; + + /** + * Encodes the specified DeleteShardsRequest message. Does not implicitly {@link vtctldata.DeleteShardsRequest.verify|verify} messages. + * @function encode + * @memberof vtctldata.DeleteShardsRequest + * @static + * @param {vtctldata.IDeleteShardsRequest} message DeleteShardsRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + DeleteShardsRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.shards != null && message.shards.length) + for (let i = 0; i < message.shards.length; ++i) + $root.vtctldata.Shard.encode(message.shards[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.recursive != null && Object.hasOwnProperty.call(message, "recursive")) + writer.uint32(/* id 2, wireType 0 =*/16).bool(message.recursive); + if (message.even_if_serving != null && Object.hasOwnProperty.call(message, "even_if_serving")) + writer.uint32(/* id 4, wireType 0 =*/32).bool(message.even_if_serving); + if (message.force != null && Object.hasOwnProperty.call(message, "force")) + writer.uint32(/* id 5, wireType 0 =*/40).bool(message.force); + return writer; + }; + + /** + * Encodes the specified DeleteShardsRequest message, length delimited. Does not implicitly {@link vtctldata.DeleteShardsRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.DeleteShardsRequest + * @static + * @param {vtctldata.IDeleteShardsRequest} message DeleteShardsRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + DeleteShardsRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a DeleteShardsRequest message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.DeleteShardsRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.DeleteShardsRequest} DeleteShardsRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + DeleteShardsRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.DeleteShardsRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (!(message.shards && message.shards.length)) + message.shards = []; + message.shards.push($root.vtctldata.Shard.decode(reader, reader.uint32())); + break; + } + case 2: { + message.recursive = reader.bool(); + break; + } + case 4: { + message.even_if_serving = reader.bool(); + break; + } + case 5: { + message.force = reader.bool(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a DeleteShardsRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.DeleteShardsRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.DeleteShardsRequest} DeleteShardsRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + DeleteShardsRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a DeleteShardsRequest message. + * @function verify + * @memberof vtctldata.DeleteShardsRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + DeleteShardsRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.shards != null && message.hasOwnProperty("shards")) { + if (!Array.isArray(message.shards)) + return "shards: array expected"; + for (let i = 0; i < message.shards.length; ++i) { + let error = $root.vtctldata.Shard.verify(message.shards[i]); + if (error) + return "shards." + error; + } + } + if (message.recursive != null && message.hasOwnProperty("recursive")) + if (typeof message.recursive !== "boolean") + return "recursive: boolean expected"; + if (message.even_if_serving != null && message.hasOwnProperty("even_if_serving")) + if (typeof message.even_if_serving !== "boolean") + return "even_if_serving: boolean expected"; + if (message.force != null && message.hasOwnProperty("force")) + if (typeof message.force !== "boolean") + return "force: boolean expected"; + return null; + }; + + /** + * Creates a DeleteShardsRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.DeleteShardsRequest + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.DeleteShardsRequest} DeleteShardsRequest + */ + DeleteShardsRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.DeleteShardsRequest) + return object; + let message = new $root.vtctldata.DeleteShardsRequest(); + if (object.shards) { + if (!Array.isArray(object.shards)) + throw TypeError(".vtctldata.DeleteShardsRequest.shards: array expected"); + message.shards = []; + for (let i = 0; i < object.shards.length; ++i) { + if (typeof object.shards[i] !== "object") + throw TypeError(".vtctldata.DeleteShardsRequest.shards: object expected"); + message.shards[i] = $root.vtctldata.Shard.fromObject(object.shards[i]); + } + } + if (object.recursive != null) + message.recursive = Boolean(object.recursive); + if (object.even_if_serving != null) + message.even_if_serving = Boolean(object.even_if_serving); + if (object.force != null) + message.force = Boolean(object.force); + return message; + }; + + /** + * Creates a plain object from a DeleteShardsRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.DeleteShardsRequest + * @static + * @param {vtctldata.DeleteShardsRequest} message DeleteShardsRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + DeleteShardsRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.shards = []; + if (options.defaults) { + object.recursive = false; + object.even_if_serving = false; + object.force = false; + } + if (message.shards && message.shards.length) { + object.shards = []; + for (let j = 0; j < message.shards.length; ++j) + object.shards[j] = $root.vtctldata.Shard.toObject(message.shards[j], options); + } + if (message.recursive != null && message.hasOwnProperty("recursive")) + object.recursive = message.recursive; + if (message.even_if_serving != null && message.hasOwnProperty("even_if_serving")) + object.even_if_serving = message.even_if_serving; + if (message.force != null && message.hasOwnProperty("force")) + object.force = message.force; + return object; + }; + + /** + * Converts this DeleteShardsRequest to JSON. + * @function toJSON + * @memberof vtctldata.DeleteShardsRequest + * @instance + * @returns {Object.} JSON object + */ + DeleteShardsRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for DeleteShardsRequest + * @function getTypeUrl + * @memberof vtctldata.DeleteShardsRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + DeleteShardsRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.DeleteShardsRequest"; + }; + + return DeleteShardsRequest; + })(); + + vtctldata.DeleteShardsResponse = (function() { + + /** + * Properties of a DeleteShardsResponse. + * @memberof vtctldata + * @interface IDeleteShardsResponse + */ + + /** + * Constructs a new DeleteShardsResponse. + * @memberof vtctldata + * @classdesc Represents a DeleteShardsResponse. + * @implements IDeleteShardsResponse + * @constructor + * @param {vtctldata.IDeleteShardsResponse=} [properties] Properties to set + */ + function DeleteShardsResponse(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * Creates a new DeleteShardsResponse instance using the specified properties. + * @function create + * @memberof vtctldata.DeleteShardsResponse + * @static + * @param {vtctldata.IDeleteShardsResponse=} [properties] Properties to set + * @returns {vtctldata.DeleteShardsResponse} DeleteShardsResponse instance + */ + DeleteShardsResponse.create = function create(properties) { + return new DeleteShardsResponse(properties); + }; + + /** + * Encodes the specified DeleteShardsResponse message. Does not implicitly {@link vtctldata.DeleteShardsResponse.verify|verify} messages. + * @function encode + * @memberof vtctldata.DeleteShardsResponse + * @static + * @param {vtctldata.IDeleteShardsResponse} message DeleteShardsResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + DeleteShardsResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + return writer; + }; + + /** + * Encodes the specified DeleteShardsResponse message, length delimited. Does not implicitly {@link vtctldata.DeleteShardsResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.DeleteShardsResponse + * @static + * @param {vtctldata.IDeleteShardsResponse} message DeleteShardsResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + DeleteShardsResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a DeleteShardsResponse message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.DeleteShardsResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.DeleteShardsResponse} DeleteShardsResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + DeleteShardsResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.DeleteShardsResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a DeleteShardsResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.DeleteShardsResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.DeleteShardsResponse} DeleteShardsResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + DeleteShardsResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a DeleteShardsResponse message. + * @function verify + * @memberof vtctldata.DeleteShardsResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + DeleteShardsResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + return null; + }; + + /** + * Creates a DeleteShardsResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.DeleteShardsResponse + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.DeleteShardsResponse} DeleteShardsResponse + */ + DeleteShardsResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.DeleteShardsResponse) + return object; + return new $root.vtctldata.DeleteShardsResponse(); + }; + + /** + * Creates a plain object from a DeleteShardsResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.DeleteShardsResponse + * @static + * @param {vtctldata.DeleteShardsResponse} message DeleteShardsResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + DeleteShardsResponse.toObject = function toObject() { + return {}; + }; + + /** + * Converts this DeleteShardsResponse to JSON. + * @function toJSON + * @memberof vtctldata.DeleteShardsResponse + * @instance + * @returns {Object.} JSON object + */ + DeleteShardsResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for DeleteShardsResponse + * @function getTypeUrl + * @memberof vtctldata.DeleteShardsResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + DeleteShardsResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.DeleteShardsResponse"; + }; + + return DeleteShardsResponse; + })(); + + vtctldata.DeleteSrvVSchemaRequest = (function() { + + /** + * Properties of a DeleteSrvVSchemaRequest. + * @memberof vtctldata + * @interface IDeleteSrvVSchemaRequest + * @property {string|null} [cell] DeleteSrvVSchemaRequest cell + */ + + /** + * Constructs a new DeleteSrvVSchemaRequest. + * @memberof vtctldata + * @classdesc Represents a DeleteSrvVSchemaRequest. + * @implements IDeleteSrvVSchemaRequest + * @constructor + * @param {vtctldata.IDeleteSrvVSchemaRequest=} [properties] Properties to set + */ + function DeleteSrvVSchemaRequest(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * DeleteSrvVSchemaRequest cell. + * @member {string} cell + * @memberof vtctldata.DeleteSrvVSchemaRequest + * @instance + */ + DeleteSrvVSchemaRequest.prototype.cell = ""; + + /** + * Creates a new DeleteSrvVSchemaRequest instance using the specified properties. + * @function create + * @memberof vtctldata.DeleteSrvVSchemaRequest + * @static + * @param {vtctldata.IDeleteSrvVSchemaRequest=} [properties] Properties to set + * @returns {vtctldata.DeleteSrvVSchemaRequest} DeleteSrvVSchemaRequest instance + */ + DeleteSrvVSchemaRequest.create = function create(properties) { + return new DeleteSrvVSchemaRequest(properties); + }; + + /** + * Encodes the specified DeleteSrvVSchemaRequest message. Does not implicitly {@link vtctldata.DeleteSrvVSchemaRequest.verify|verify} messages. + * @function encode + * @memberof vtctldata.DeleteSrvVSchemaRequest + * @static + * @param {vtctldata.IDeleteSrvVSchemaRequest} message DeleteSrvVSchemaRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + DeleteSrvVSchemaRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.cell != null && Object.hasOwnProperty.call(message, "cell")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.cell); + return writer; + }; + + /** + * Encodes the specified DeleteSrvVSchemaRequest message, length delimited. Does not implicitly {@link vtctldata.DeleteSrvVSchemaRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.DeleteSrvVSchemaRequest + * @static + * @param {vtctldata.IDeleteSrvVSchemaRequest} message DeleteSrvVSchemaRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + DeleteSrvVSchemaRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a DeleteSrvVSchemaRequest message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.DeleteSrvVSchemaRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.DeleteSrvVSchemaRequest} DeleteSrvVSchemaRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + DeleteSrvVSchemaRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.DeleteSrvVSchemaRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.cell = reader.string(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a DeleteSrvVSchemaRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.DeleteSrvVSchemaRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.DeleteSrvVSchemaRequest} DeleteSrvVSchemaRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + DeleteSrvVSchemaRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a DeleteSrvVSchemaRequest message. + * @function verify + * @memberof vtctldata.DeleteSrvVSchemaRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + DeleteSrvVSchemaRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.cell != null && message.hasOwnProperty("cell")) + if (!$util.isString(message.cell)) + return "cell: string expected"; + return null; + }; + + /** + * Creates a DeleteSrvVSchemaRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.DeleteSrvVSchemaRequest + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.DeleteSrvVSchemaRequest} DeleteSrvVSchemaRequest + */ + DeleteSrvVSchemaRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.DeleteSrvVSchemaRequest) + return object; + let message = new $root.vtctldata.DeleteSrvVSchemaRequest(); + if (object.cell != null) + message.cell = String(object.cell); + return message; + }; + + /** + * Creates a plain object from a DeleteSrvVSchemaRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.DeleteSrvVSchemaRequest + * @static + * @param {vtctldata.DeleteSrvVSchemaRequest} message DeleteSrvVSchemaRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + DeleteSrvVSchemaRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) + object.cell = ""; + if (message.cell != null && message.hasOwnProperty("cell")) + object.cell = message.cell; + return object; + }; + + /** + * Converts this DeleteSrvVSchemaRequest to JSON. + * @function toJSON + * @memberof vtctldata.DeleteSrvVSchemaRequest + * @instance + * @returns {Object.} JSON object + */ + DeleteSrvVSchemaRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for DeleteSrvVSchemaRequest + * @function getTypeUrl + * @memberof vtctldata.DeleteSrvVSchemaRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + DeleteSrvVSchemaRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.DeleteSrvVSchemaRequest"; + }; + + return DeleteSrvVSchemaRequest; + })(); + + vtctldata.DeleteSrvVSchemaResponse = (function() { + + /** + * Properties of a DeleteSrvVSchemaResponse. + * @memberof vtctldata + * @interface IDeleteSrvVSchemaResponse + */ + + /** + * Constructs a new DeleteSrvVSchemaResponse. + * @memberof vtctldata + * @classdesc Represents a DeleteSrvVSchemaResponse. + * @implements IDeleteSrvVSchemaResponse + * @constructor + * @param {vtctldata.IDeleteSrvVSchemaResponse=} [properties] Properties to set + */ + function DeleteSrvVSchemaResponse(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * Creates a new DeleteSrvVSchemaResponse instance using the specified properties. + * @function create + * @memberof vtctldata.DeleteSrvVSchemaResponse + * @static + * @param {vtctldata.IDeleteSrvVSchemaResponse=} [properties] Properties to set + * @returns {vtctldata.DeleteSrvVSchemaResponse} DeleteSrvVSchemaResponse instance + */ + DeleteSrvVSchemaResponse.create = function create(properties) { + return new DeleteSrvVSchemaResponse(properties); + }; + + /** + * Encodes the specified DeleteSrvVSchemaResponse message. Does not implicitly {@link vtctldata.DeleteSrvVSchemaResponse.verify|verify} messages. + * @function encode + * @memberof vtctldata.DeleteSrvVSchemaResponse + * @static + * @param {vtctldata.IDeleteSrvVSchemaResponse} message DeleteSrvVSchemaResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + DeleteSrvVSchemaResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + return writer; + }; + + /** + * Encodes the specified DeleteSrvVSchemaResponse message, length delimited. Does not implicitly {@link vtctldata.DeleteSrvVSchemaResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.DeleteSrvVSchemaResponse + * @static + * @param {vtctldata.IDeleteSrvVSchemaResponse} message DeleteSrvVSchemaResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + DeleteSrvVSchemaResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a DeleteSrvVSchemaResponse message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.DeleteSrvVSchemaResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.DeleteSrvVSchemaResponse} DeleteSrvVSchemaResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + DeleteSrvVSchemaResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.DeleteSrvVSchemaResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a DeleteSrvVSchemaResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.DeleteSrvVSchemaResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.DeleteSrvVSchemaResponse} DeleteSrvVSchemaResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + DeleteSrvVSchemaResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a DeleteSrvVSchemaResponse message. + * @function verify + * @memberof vtctldata.DeleteSrvVSchemaResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + DeleteSrvVSchemaResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + return null; + }; + + /** + * Creates a DeleteSrvVSchemaResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.DeleteSrvVSchemaResponse + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.DeleteSrvVSchemaResponse} DeleteSrvVSchemaResponse + */ + DeleteSrvVSchemaResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.DeleteSrvVSchemaResponse) + return object; + return new $root.vtctldata.DeleteSrvVSchemaResponse(); + }; + + /** + * Creates a plain object from a DeleteSrvVSchemaResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.DeleteSrvVSchemaResponse + * @static + * @param {vtctldata.DeleteSrvVSchemaResponse} message DeleteSrvVSchemaResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + DeleteSrvVSchemaResponse.toObject = function toObject() { + return {}; + }; + + /** + * Converts this DeleteSrvVSchemaResponse to JSON. + * @function toJSON + * @memberof vtctldata.DeleteSrvVSchemaResponse + * @instance + * @returns {Object.} JSON object + */ + DeleteSrvVSchemaResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for DeleteSrvVSchemaResponse + * @function getTypeUrl + * @memberof vtctldata.DeleteSrvVSchemaResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + DeleteSrvVSchemaResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.DeleteSrvVSchemaResponse"; + }; + + return DeleteSrvVSchemaResponse; + })(); + + vtctldata.DeleteTabletsRequest = (function() { + + /** + * Properties of a DeleteTabletsRequest. + * @memberof vtctldata + * @interface IDeleteTabletsRequest + * @property {Array.|null} [tablet_aliases] DeleteTabletsRequest tablet_aliases + * @property {boolean|null} [allow_primary] DeleteTabletsRequest allow_primary + */ + + /** + * Constructs a new DeleteTabletsRequest. + * @memberof vtctldata + * @classdesc Represents a DeleteTabletsRequest. + * @implements IDeleteTabletsRequest + * @constructor + * @param {vtctldata.IDeleteTabletsRequest=} [properties] Properties to set + */ + function DeleteTabletsRequest(properties) { + this.tablet_aliases = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * DeleteTabletsRequest tablet_aliases. + * @member {Array.} tablet_aliases + * @memberof vtctldata.DeleteTabletsRequest + * @instance + */ + DeleteTabletsRequest.prototype.tablet_aliases = $util.emptyArray; + + /** + * DeleteTabletsRequest allow_primary. + * @member {boolean} allow_primary + * @memberof vtctldata.DeleteTabletsRequest + * @instance + */ + DeleteTabletsRequest.prototype.allow_primary = false; + + /** + * Creates a new DeleteTabletsRequest instance using the specified properties. + * @function create + * @memberof vtctldata.DeleteTabletsRequest + * @static + * @param {vtctldata.IDeleteTabletsRequest=} [properties] Properties to set + * @returns {vtctldata.DeleteTabletsRequest} DeleteTabletsRequest instance + */ + DeleteTabletsRequest.create = function create(properties) { + return new DeleteTabletsRequest(properties); + }; + + /** + * Encodes the specified DeleteTabletsRequest message. Does not implicitly {@link vtctldata.DeleteTabletsRequest.verify|verify} messages. + * @function encode + * @memberof vtctldata.DeleteTabletsRequest + * @static + * @param {vtctldata.IDeleteTabletsRequest} message DeleteTabletsRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + DeleteTabletsRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.tablet_aliases != null && message.tablet_aliases.length) + for (let i = 0; i < message.tablet_aliases.length; ++i) + $root.topodata.TabletAlias.encode(message.tablet_aliases[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.allow_primary != null && Object.hasOwnProperty.call(message, "allow_primary")) + writer.uint32(/* id 2, wireType 0 =*/16).bool(message.allow_primary); + return writer; + }; + + /** + * Encodes the specified DeleteTabletsRequest message, length delimited. Does not implicitly {@link vtctldata.DeleteTabletsRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.DeleteTabletsRequest + * @static + * @param {vtctldata.IDeleteTabletsRequest} message DeleteTabletsRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + DeleteTabletsRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a DeleteTabletsRequest message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.DeleteTabletsRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.DeleteTabletsRequest} DeleteTabletsRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + DeleteTabletsRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.DeleteTabletsRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (!(message.tablet_aliases && message.tablet_aliases.length)) + message.tablet_aliases = []; + message.tablet_aliases.push($root.topodata.TabletAlias.decode(reader, reader.uint32())); + break; + } + case 2: { + message.allow_primary = reader.bool(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a DeleteTabletsRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.DeleteTabletsRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.DeleteTabletsRequest} DeleteTabletsRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + DeleteTabletsRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a DeleteTabletsRequest message. + * @function verify + * @memberof vtctldata.DeleteTabletsRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + DeleteTabletsRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.tablet_aliases != null && message.hasOwnProperty("tablet_aliases")) { + if (!Array.isArray(message.tablet_aliases)) + return "tablet_aliases: array expected"; + for (let i = 0; i < message.tablet_aliases.length; ++i) { + let error = $root.topodata.TabletAlias.verify(message.tablet_aliases[i]); + if (error) + return "tablet_aliases." + error; + } + } + if (message.allow_primary != null && message.hasOwnProperty("allow_primary")) + if (typeof message.allow_primary !== "boolean") + return "allow_primary: boolean expected"; + return null; + }; + + /** + * Creates a DeleteTabletsRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.DeleteTabletsRequest + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.DeleteTabletsRequest} DeleteTabletsRequest + */ + DeleteTabletsRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.DeleteTabletsRequest) + return object; + let message = new $root.vtctldata.DeleteTabletsRequest(); + if (object.tablet_aliases) { + if (!Array.isArray(object.tablet_aliases)) + throw TypeError(".vtctldata.DeleteTabletsRequest.tablet_aliases: array expected"); + message.tablet_aliases = []; + for (let i = 0; i < object.tablet_aliases.length; ++i) { + if (typeof object.tablet_aliases[i] !== "object") + throw TypeError(".vtctldata.DeleteTabletsRequest.tablet_aliases: object expected"); + message.tablet_aliases[i] = $root.topodata.TabletAlias.fromObject(object.tablet_aliases[i]); + } + } + if (object.allow_primary != null) + message.allow_primary = Boolean(object.allow_primary); + return message; + }; + + /** + * Creates a plain object from a DeleteTabletsRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.DeleteTabletsRequest + * @static + * @param {vtctldata.DeleteTabletsRequest} message DeleteTabletsRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + DeleteTabletsRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.tablet_aliases = []; + if (options.defaults) + object.allow_primary = false; + if (message.tablet_aliases && message.tablet_aliases.length) { + object.tablet_aliases = []; + for (let j = 0; j < message.tablet_aliases.length; ++j) + object.tablet_aliases[j] = $root.topodata.TabletAlias.toObject(message.tablet_aliases[j], options); + } + if (message.allow_primary != null && message.hasOwnProperty("allow_primary")) + object.allow_primary = message.allow_primary; + return object; + }; + + /** + * Converts this DeleteTabletsRequest to JSON. + * @function toJSON + * @memberof vtctldata.DeleteTabletsRequest + * @instance + * @returns {Object.} JSON object + */ + DeleteTabletsRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for DeleteTabletsRequest + * @function getTypeUrl + * @memberof vtctldata.DeleteTabletsRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + DeleteTabletsRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.DeleteTabletsRequest"; + }; + + return DeleteTabletsRequest; + })(); + + vtctldata.DeleteTabletsResponse = (function() { + + /** + * Properties of a DeleteTabletsResponse. + * @memberof vtctldata + * @interface IDeleteTabletsResponse + */ + + /** + * Constructs a new DeleteTabletsResponse. + * @memberof vtctldata + * @classdesc Represents a DeleteTabletsResponse. + * @implements IDeleteTabletsResponse + * @constructor + * @param {vtctldata.IDeleteTabletsResponse=} [properties] Properties to set + */ + function DeleteTabletsResponse(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * Creates a new DeleteTabletsResponse instance using the specified properties. + * @function create + * @memberof vtctldata.DeleteTabletsResponse + * @static + * @param {vtctldata.IDeleteTabletsResponse=} [properties] Properties to set + * @returns {vtctldata.DeleteTabletsResponse} DeleteTabletsResponse instance + */ + DeleteTabletsResponse.create = function create(properties) { + return new DeleteTabletsResponse(properties); + }; + + /** + * Encodes the specified DeleteTabletsResponse message. Does not implicitly {@link vtctldata.DeleteTabletsResponse.verify|verify} messages. + * @function encode + * @memberof vtctldata.DeleteTabletsResponse + * @static + * @param {vtctldata.IDeleteTabletsResponse} message DeleteTabletsResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + DeleteTabletsResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + return writer; + }; + + /** + * Encodes the specified DeleteTabletsResponse message, length delimited. Does not implicitly {@link vtctldata.DeleteTabletsResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.DeleteTabletsResponse + * @static + * @param {vtctldata.IDeleteTabletsResponse} message DeleteTabletsResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + DeleteTabletsResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a DeleteTabletsResponse message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.DeleteTabletsResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.DeleteTabletsResponse} DeleteTabletsResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + DeleteTabletsResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.DeleteTabletsResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a DeleteTabletsResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.DeleteTabletsResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.DeleteTabletsResponse} DeleteTabletsResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + DeleteTabletsResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a DeleteTabletsResponse message. + * @function verify + * @memberof vtctldata.DeleteTabletsResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + DeleteTabletsResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + return null; + }; + + /** + * Creates a DeleteTabletsResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.DeleteTabletsResponse + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.DeleteTabletsResponse} DeleteTabletsResponse + */ + DeleteTabletsResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.DeleteTabletsResponse) + return object; + return new $root.vtctldata.DeleteTabletsResponse(); + }; + + /** + * Creates a plain object from a DeleteTabletsResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.DeleteTabletsResponse + * @static + * @param {vtctldata.DeleteTabletsResponse} message DeleteTabletsResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + DeleteTabletsResponse.toObject = function toObject() { + return {}; + }; + + /** + * Converts this DeleteTabletsResponse to JSON. + * @function toJSON + * @memberof vtctldata.DeleteTabletsResponse + * @instance + * @returns {Object.} JSON object + */ + DeleteTabletsResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for DeleteTabletsResponse + * @function getTypeUrl + * @memberof vtctldata.DeleteTabletsResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + DeleteTabletsResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.DeleteTabletsResponse"; + }; + + return DeleteTabletsResponse; + })(); + + vtctldata.EmergencyReparentShardRequest = (function() { + + /** + * Properties of an EmergencyReparentShardRequest. + * @memberof vtctldata + * @interface IEmergencyReparentShardRequest + * @property {string|null} [keyspace] EmergencyReparentShardRequest keyspace + * @property {string|null} [shard] EmergencyReparentShardRequest shard + * @property {topodata.ITabletAlias|null} [new_primary] EmergencyReparentShardRequest new_primary + * @property {Array.|null} [ignore_replicas] EmergencyReparentShardRequest ignore_replicas + * @property {vttime.IDuration|null} [wait_replicas_timeout] EmergencyReparentShardRequest wait_replicas_timeout + * @property {boolean|null} [prevent_cross_cell_promotion] EmergencyReparentShardRequest prevent_cross_cell_promotion + * @property {boolean|null} [wait_for_all_tablets] EmergencyReparentShardRequest wait_for_all_tablets + */ + + /** + * Constructs a new EmergencyReparentShardRequest. + * @memberof vtctldata + * @classdesc Represents an EmergencyReparentShardRequest. + * @implements IEmergencyReparentShardRequest + * @constructor + * @param {vtctldata.IEmergencyReparentShardRequest=} [properties] Properties to set + */ + function EmergencyReparentShardRequest(properties) { + this.ignore_replicas = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * EmergencyReparentShardRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.EmergencyReparentShardRequest + * @instance + */ + EmergencyReparentShardRequest.prototype.keyspace = ""; + + /** + * EmergencyReparentShardRequest shard. + * @member {string} shard + * @memberof vtctldata.EmergencyReparentShardRequest + * @instance + */ + EmergencyReparentShardRequest.prototype.shard = ""; + + /** + * EmergencyReparentShardRequest new_primary. + * @member {topodata.ITabletAlias|null|undefined} new_primary + * @memberof vtctldata.EmergencyReparentShardRequest + * @instance + */ + EmergencyReparentShardRequest.prototype.new_primary = null; + + /** + * EmergencyReparentShardRequest ignore_replicas. + * @member {Array.} ignore_replicas + * @memberof vtctldata.EmergencyReparentShardRequest + * @instance + */ + EmergencyReparentShardRequest.prototype.ignore_replicas = $util.emptyArray; + + /** + * EmergencyReparentShardRequest wait_replicas_timeout. + * @member {vttime.IDuration|null|undefined} wait_replicas_timeout + * @memberof vtctldata.EmergencyReparentShardRequest + * @instance + */ + EmergencyReparentShardRequest.prototype.wait_replicas_timeout = null; + + /** + * EmergencyReparentShardRequest prevent_cross_cell_promotion. + * @member {boolean} prevent_cross_cell_promotion + * @memberof vtctldata.EmergencyReparentShardRequest + * @instance + */ + EmergencyReparentShardRequest.prototype.prevent_cross_cell_promotion = false; + + /** + * EmergencyReparentShardRequest wait_for_all_tablets. + * @member {boolean} wait_for_all_tablets + * @memberof vtctldata.EmergencyReparentShardRequest + * @instance + */ + EmergencyReparentShardRequest.prototype.wait_for_all_tablets = false; + + /** + * Creates a new EmergencyReparentShardRequest instance using the specified properties. + * @function create + * @memberof vtctldata.EmergencyReparentShardRequest + * @static + * @param {vtctldata.IEmergencyReparentShardRequest=} [properties] Properties to set + * @returns {vtctldata.EmergencyReparentShardRequest} EmergencyReparentShardRequest instance + */ + EmergencyReparentShardRequest.create = function create(properties) { + return new EmergencyReparentShardRequest(properties); + }; + + /** + * Encodes the specified EmergencyReparentShardRequest message. Does not implicitly {@link vtctldata.EmergencyReparentShardRequest.verify|verify} messages. + * @function encode + * @memberof vtctldata.EmergencyReparentShardRequest + * @static + * @param {vtctldata.IEmergencyReparentShardRequest} message EmergencyReparentShardRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + EmergencyReparentShardRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); + if (message.new_primary != null && Object.hasOwnProperty.call(message, "new_primary")) + $root.topodata.TabletAlias.encode(message.new_primary, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.ignore_replicas != null && message.ignore_replicas.length) + for (let i = 0; i < message.ignore_replicas.length; ++i) + $root.topodata.TabletAlias.encode(message.ignore_replicas[i], writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.wait_replicas_timeout != null && Object.hasOwnProperty.call(message, "wait_replicas_timeout")) + $root.vttime.Duration.encode(message.wait_replicas_timeout, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); + if (message.prevent_cross_cell_promotion != null && Object.hasOwnProperty.call(message, "prevent_cross_cell_promotion")) + writer.uint32(/* id 6, wireType 0 =*/48).bool(message.prevent_cross_cell_promotion); + if (message.wait_for_all_tablets != null && Object.hasOwnProperty.call(message, "wait_for_all_tablets")) + writer.uint32(/* id 7, wireType 0 =*/56).bool(message.wait_for_all_tablets); + return writer; + }; + + /** + * Encodes the specified EmergencyReparentShardRequest message, length delimited. Does not implicitly {@link vtctldata.EmergencyReparentShardRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.EmergencyReparentShardRequest + * @static + * @param {vtctldata.IEmergencyReparentShardRequest} message EmergencyReparentShardRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + EmergencyReparentShardRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes an EmergencyReparentShardRequest message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.EmergencyReparentShardRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.EmergencyReparentShardRequest} EmergencyReparentShardRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + EmergencyReparentShardRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.EmergencyReparentShardRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.keyspace = reader.string(); + break; + } + case 2: { + message.shard = reader.string(); + break; + } + case 3: { + message.new_primary = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + break; + } + case 4: { + if (!(message.ignore_replicas && message.ignore_replicas.length)) + message.ignore_replicas = []; + message.ignore_replicas.push($root.topodata.TabletAlias.decode(reader, reader.uint32())); + break; + } + case 5: { + message.wait_replicas_timeout = $root.vttime.Duration.decode(reader, reader.uint32()); + break; + } + case 6: { + message.prevent_cross_cell_promotion = reader.bool(); + break; + } + case 7: { + message.wait_for_all_tablets = reader.bool(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes an EmergencyReparentShardRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.EmergencyReparentShardRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.EmergencyReparentShardRequest} EmergencyReparentShardRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + EmergencyReparentShardRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies an EmergencyReparentShardRequest message. + * @function verify + * @memberof vtctldata.EmergencyReparentShardRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + EmergencyReparentShardRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.shard != null && message.hasOwnProperty("shard")) + if (!$util.isString(message.shard)) + return "shard: string expected"; + if (message.new_primary != null && message.hasOwnProperty("new_primary")) { + let error = $root.topodata.TabletAlias.verify(message.new_primary); + if (error) + return "new_primary." + error; + } + if (message.ignore_replicas != null && message.hasOwnProperty("ignore_replicas")) { + if (!Array.isArray(message.ignore_replicas)) + return "ignore_replicas: array expected"; + for (let i = 0; i < message.ignore_replicas.length; ++i) { + let error = $root.topodata.TabletAlias.verify(message.ignore_replicas[i]); + if (error) + return "ignore_replicas." + error; + } + } + if (message.wait_replicas_timeout != null && message.hasOwnProperty("wait_replicas_timeout")) { + let error = $root.vttime.Duration.verify(message.wait_replicas_timeout); + if (error) + return "wait_replicas_timeout." + error; + } + if (message.prevent_cross_cell_promotion != null && message.hasOwnProperty("prevent_cross_cell_promotion")) + if (typeof message.prevent_cross_cell_promotion !== "boolean") + return "prevent_cross_cell_promotion: boolean expected"; + if (message.wait_for_all_tablets != null && message.hasOwnProperty("wait_for_all_tablets")) + if (typeof message.wait_for_all_tablets !== "boolean") + return "wait_for_all_tablets: boolean expected"; + return null; + }; + + /** + * Creates an EmergencyReparentShardRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.EmergencyReparentShardRequest + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.EmergencyReparentShardRequest} EmergencyReparentShardRequest + */ + EmergencyReparentShardRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.EmergencyReparentShardRequest) + return object; + let message = new $root.vtctldata.EmergencyReparentShardRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.shard != null) + message.shard = String(object.shard); + if (object.new_primary != null) { + if (typeof object.new_primary !== "object") + throw TypeError(".vtctldata.EmergencyReparentShardRequest.new_primary: object expected"); + message.new_primary = $root.topodata.TabletAlias.fromObject(object.new_primary); + } + if (object.ignore_replicas) { + if (!Array.isArray(object.ignore_replicas)) + throw TypeError(".vtctldata.EmergencyReparentShardRequest.ignore_replicas: array expected"); + message.ignore_replicas = []; + for (let i = 0; i < object.ignore_replicas.length; ++i) { + if (typeof object.ignore_replicas[i] !== "object") + throw TypeError(".vtctldata.EmergencyReparentShardRequest.ignore_replicas: object expected"); + message.ignore_replicas[i] = $root.topodata.TabletAlias.fromObject(object.ignore_replicas[i]); + } + } + if (object.wait_replicas_timeout != null) { + if (typeof object.wait_replicas_timeout !== "object") + throw TypeError(".vtctldata.EmergencyReparentShardRequest.wait_replicas_timeout: object expected"); + message.wait_replicas_timeout = $root.vttime.Duration.fromObject(object.wait_replicas_timeout); + } + if (object.prevent_cross_cell_promotion != null) + message.prevent_cross_cell_promotion = Boolean(object.prevent_cross_cell_promotion); + if (object.wait_for_all_tablets != null) + message.wait_for_all_tablets = Boolean(object.wait_for_all_tablets); + return message; + }; + + /** + * Creates a plain object from an EmergencyReparentShardRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.EmergencyReparentShardRequest + * @static + * @param {vtctldata.EmergencyReparentShardRequest} message EmergencyReparentShardRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + EmergencyReparentShardRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.ignore_replicas = []; + if (options.defaults) { + object.keyspace = ""; + object.shard = ""; + object.new_primary = null; + object.wait_replicas_timeout = null; + object.prevent_cross_cell_promotion = false; + object.wait_for_all_tablets = false; + } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = message.shard; + if (message.new_primary != null && message.hasOwnProperty("new_primary")) + object.new_primary = $root.topodata.TabletAlias.toObject(message.new_primary, options); + if (message.ignore_replicas && message.ignore_replicas.length) { + object.ignore_replicas = []; + for (let j = 0; j < message.ignore_replicas.length; ++j) + object.ignore_replicas[j] = $root.topodata.TabletAlias.toObject(message.ignore_replicas[j], options); + } + if (message.wait_replicas_timeout != null && message.hasOwnProperty("wait_replicas_timeout")) + object.wait_replicas_timeout = $root.vttime.Duration.toObject(message.wait_replicas_timeout, options); + if (message.prevent_cross_cell_promotion != null && message.hasOwnProperty("prevent_cross_cell_promotion")) + object.prevent_cross_cell_promotion = message.prevent_cross_cell_promotion; + if (message.wait_for_all_tablets != null && message.hasOwnProperty("wait_for_all_tablets")) + object.wait_for_all_tablets = message.wait_for_all_tablets; + return object; + }; + + /** + * Converts this EmergencyReparentShardRequest to JSON. + * @function toJSON + * @memberof vtctldata.EmergencyReparentShardRequest + * @instance + * @returns {Object.} JSON object + */ + EmergencyReparentShardRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for EmergencyReparentShardRequest + * @function getTypeUrl + * @memberof vtctldata.EmergencyReparentShardRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + EmergencyReparentShardRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.EmergencyReparentShardRequest"; + }; + + return EmergencyReparentShardRequest; + })(); + + vtctldata.EmergencyReparentShardResponse = (function() { + + /** + * Properties of an EmergencyReparentShardResponse. + * @memberof vtctldata + * @interface IEmergencyReparentShardResponse + * @property {string|null} [keyspace] EmergencyReparentShardResponse keyspace + * @property {string|null} [shard] EmergencyReparentShardResponse shard + * @property {topodata.ITabletAlias|null} [promoted_primary] EmergencyReparentShardResponse promoted_primary + * @property {Array.|null} [events] EmergencyReparentShardResponse events + */ + + /** + * Constructs a new EmergencyReparentShardResponse. + * @memberof vtctldata + * @classdesc Represents an EmergencyReparentShardResponse. + * @implements IEmergencyReparentShardResponse + * @constructor + * @param {vtctldata.IEmergencyReparentShardResponse=} [properties] Properties to set + */ + function EmergencyReparentShardResponse(properties) { + this.events = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * EmergencyReparentShardResponse keyspace. + * @member {string} keyspace + * @memberof vtctldata.EmergencyReparentShardResponse + * @instance + */ + EmergencyReparentShardResponse.prototype.keyspace = ""; + + /** + * EmergencyReparentShardResponse shard. + * @member {string} shard + * @memberof vtctldata.EmergencyReparentShardResponse + * @instance + */ + EmergencyReparentShardResponse.prototype.shard = ""; + + /** + * EmergencyReparentShardResponse promoted_primary. + * @member {topodata.ITabletAlias|null|undefined} promoted_primary + * @memberof vtctldata.EmergencyReparentShardResponse + * @instance + */ + EmergencyReparentShardResponse.prototype.promoted_primary = null; + + /** + * EmergencyReparentShardResponse events. + * @member {Array.} events + * @memberof vtctldata.EmergencyReparentShardResponse + * @instance + */ + EmergencyReparentShardResponse.prototype.events = $util.emptyArray; + + /** + * Creates a new EmergencyReparentShardResponse instance using the specified properties. + * @function create + * @memberof vtctldata.EmergencyReparentShardResponse + * @static + * @param {vtctldata.IEmergencyReparentShardResponse=} [properties] Properties to set + * @returns {vtctldata.EmergencyReparentShardResponse} EmergencyReparentShardResponse instance + */ + EmergencyReparentShardResponse.create = function create(properties) { + return new EmergencyReparentShardResponse(properties); + }; + + /** + * Encodes the specified EmergencyReparentShardResponse message. Does not implicitly {@link vtctldata.EmergencyReparentShardResponse.verify|verify} messages. + * @function encode + * @memberof vtctldata.EmergencyReparentShardResponse + * @static + * @param {vtctldata.IEmergencyReparentShardResponse} message EmergencyReparentShardResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + EmergencyReparentShardResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); + if (message.promoted_primary != null && Object.hasOwnProperty.call(message, "promoted_primary")) + $root.topodata.TabletAlias.encode(message.promoted_primary, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.events != null && message.events.length) + for (let i = 0; i < message.events.length; ++i) + $root.logutil.Event.encode(message.events[i], writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified EmergencyReparentShardResponse message, length delimited. Does not implicitly {@link vtctldata.EmergencyReparentShardResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.EmergencyReparentShardResponse + * @static + * @param {vtctldata.IEmergencyReparentShardResponse} message EmergencyReparentShardResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + EmergencyReparentShardResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes an EmergencyReparentShardResponse message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.EmergencyReparentShardResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.EmergencyReparentShardResponse} EmergencyReparentShardResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + EmergencyReparentShardResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.EmergencyReparentShardResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.keyspace = reader.string(); + break; + } + case 2: { + message.shard = reader.string(); + break; + } + case 3: { + message.promoted_primary = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + break; + } + case 4: { + if (!(message.events && message.events.length)) + message.events = []; + message.events.push($root.logutil.Event.decode(reader, reader.uint32())); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes an EmergencyReparentShardResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.EmergencyReparentShardResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.EmergencyReparentShardResponse} EmergencyReparentShardResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + EmergencyReparentShardResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies an EmergencyReparentShardResponse message. + * @function verify + * @memberof vtctldata.EmergencyReparentShardResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + EmergencyReparentShardResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.shard != null && message.hasOwnProperty("shard")) + if (!$util.isString(message.shard)) + return "shard: string expected"; + if (message.promoted_primary != null && message.hasOwnProperty("promoted_primary")) { + let error = $root.topodata.TabletAlias.verify(message.promoted_primary); + if (error) + return "promoted_primary." + error; + } + if (message.events != null && message.hasOwnProperty("events")) { + if (!Array.isArray(message.events)) + return "events: array expected"; + for (let i = 0; i < message.events.length; ++i) { + let error = $root.logutil.Event.verify(message.events[i]); + if (error) + return "events." + error; + } + } + return null; + }; + + /** + * Creates an EmergencyReparentShardResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.EmergencyReparentShardResponse + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.EmergencyReparentShardResponse} EmergencyReparentShardResponse + */ + EmergencyReparentShardResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.EmergencyReparentShardResponse) + return object; + let message = new $root.vtctldata.EmergencyReparentShardResponse(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.shard != null) + message.shard = String(object.shard); + if (object.promoted_primary != null) { + if (typeof object.promoted_primary !== "object") + throw TypeError(".vtctldata.EmergencyReparentShardResponse.promoted_primary: object expected"); + message.promoted_primary = $root.topodata.TabletAlias.fromObject(object.promoted_primary); + } + if (object.events) { + if (!Array.isArray(object.events)) + throw TypeError(".vtctldata.EmergencyReparentShardResponse.events: array expected"); + message.events = []; + for (let i = 0; i < object.events.length; ++i) { + if (typeof object.events[i] !== "object") + throw TypeError(".vtctldata.EmergencyReparentShardResponse.events: object expected"); + message.events[i] = $root.logutil.Event.fromObject(object.events[i]); + } + } + return message; + }; + + /** + * Creates a plain object from an EmergencyReparentShardResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.EmergencyReparentShardResponse + * @static + * @param {vtctldata.EmergencyReparentShardResponse} message EmergencyReparentShardResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + EmergencyReparentShardResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.events = []; + if (options.defaults) { + object.keyspace = ""; + object.shard = ""; + object.promoted_primary = null; + } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = message.shard; + if (message.promoted_primary != null && message.hasOwnProperty("promoted_primary")) + object.promoted_primary = $root.topodata.TabletAlias.toObject(message.promoted_primary, options); + if (message.events && message.events.length) { + object.events = []; + for (let j = 0; j < message.events.length; ++j) + object.events[j] = $root.logutil.Event.toObject(message.events[j], options); + } + return object; + }; + + /** + * Converts this EmergencyReparentShardResponse to JSON. + * @function toJSON + * @memberof vtctldata.EmergencyReparentShardResponse + * @instance + * @returns {Object.} JSON object + */ + EmergencyReparentShardResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for EmergencyReparentShardResponse + * @function getTypeUrl + * @memberof vtctldata.EmergencyReparentShardResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + EmergencyReparentShardResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.EmergencyReparentShardResponse"; + }; + + return EmergencyReparentShardResponse; + })(); + + vtctldata.ExecuteFetchAsAppRequest = (function() { + + /** + * Properties of an ExecuteFetchAsAppRequest. + * @memberof vtctldata + * @interface IExecuteFetchAsAppRequest + * @property {topodata.ITabletAlias|null} [tablet_alias] ExecuteFetchAsAppRequest tablet_alias + * @property {string|null} [query] ExecuteFetchAsAppRequest query + * @property {number|Long|null} [max_rows] ExecuteFetchAsAppRequest max_rows + * @property {boolean|null} [use_pool] ExecuteFetchAsAppRequest use_pool + */ + + /** + * Constructs a new ExecuteFetchAsAppRequest. + * @memberof vtctldata + * @classdesc Represents an ExecuteFetchAsAppRequest. + * @implements IExecuteFetchAsAppRequest + * @constructor + * @param {vtctldata.IExecuteFetchAsAppRequest=} [properties] Properties to set + */ + function ExecuteFetchAsAppRequest(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ExecuteFetchAsAppRequest tablet_alias. + * @member {topodata.ITabletAlias|null|undefined} tablet_alias + * @memberof vtctldata.ExecuteFetchAsAppRequest + * @instance + */ + ExecuteFetchAsAppRequest.prototype.tablet_alias = null; + + /** + * ExecuteFetchAsAppRequest query. + * @member {string} query + * @memberof vtctldata.ExecuteFetchAsAppRequest + * @instance + */ + ExecuteFetchAsAppRequest.prototype.query = ""; + + /** + * ExecuteFetchAsAppRequest max_rows. + * @member {number|Long} max_rows + * @memberof vtctldata.ExecuteFetchAsAppRequest + * @instance + */ + ExecuteFetchAsAppRequest.prototype.max_rows = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * ExecuteFetchAsAppRequest use_pool. + * @member {boolean} use_pool + * @memberof vtctldata.ExecuteFetchAsAppRequest + * @instance + */ + ExecuteFetchAsAppRequest.prototype.use_pool = false; + + /** + * Creates a new ExecuteFetchAsAppRequest instance using the specified properties. + * @function create + * @memberof vtctldata.ExecuteFetchAsAppRequest + * @static + * @param {vtctldata.IExecuteFetchAsAppRequest=} [properties] Properties to set + * @returns {vtctldata.ExecuteFetchAsAppRequest} ExecuteFetchAsAppRequest instance + */ + ExecuteFetchAsAppRequest.create = function create(properties) { + return new ExecuteFetchAsAppRequest(properties); + }; + + /** + * Encodes the specified ExecuteFetchAsAppRequest message. Does not implicitly {@link vtctldata.ExecuteFetchAsAppRequest.verify|verify} messages. + * @function encode + * @memberof vtctldata.ExecuteFetchAsAppRequest + * @static + * @param {vtctldata.IExecuteFetchAsAppRequest} message ExecuteFetchAsAppRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ExecuteFetchAsAppRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) + $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.query != null && Object.hasOwnProperty.call(message, "query")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.query); + if (message.max_rows != null && Object.hasOwnProperty.call(message, "max_rows")) + writer.uint32(/* id 3, wireType 0 =*/24).int64(message.max_rows); + if (message.use_pool != null && Object.hasOwnProperty.call(message, "use_pool")) + writer.uint32(/* id 4, wireType 0 =*/32).bool(message.use_pool); + return writer; + }; + + /** + * Encodes the specified ExecuteFetchAsAppRequest message, length delimited. Does not implicitly {@link vtctldata.ExecuteFetchAsAppRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.ExecuteFetchAsAppRequest + * @static + * @param {vtctldata.IExecuteFetchAsAppRequest} message ExecuteFetchAsAppRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ExecuteFetchAsAppRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes an ExecuteFetchAsAppRequest message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.ExecuteFetchAsAppRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.ExecuteFetchAsAppRequest} ExecuteFetchAsAppRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ExecuteFetchAsAppRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ExecuteFetchAsAppRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + break; + } + case 2: { + message.query = reader.string(); + break; + } + case 3: { + message.max_rows = reader.int64(); + break; + } + case 4: { + message.use_pool = reader.bool(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes an ExecuteFetchAsAppRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.ExecuteFetchAsAppRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.ExecuteFetchAsAppRequest} ExecuteFetchAsAppRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ExecuteFetchAsAppRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies an ExecuteFetchAsAppRequest message. + * @function verify + * @memberof vtctldata.ExecuteFetchAsAppRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ExecuteFetchAsAppRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { + let error = $root.topodata.TabletAlias.verify(message.tablet_alias); + if (error) + return "tablet_alias." + error; + } + if (message.query != null && message.hasOwnProperty("query")) + if (!$util.isString(message.query)) + return "query: string expected"; + if (message.max_rows != null && message.hasOwnProperty("max_rows")) + if (!$util.isInteger(message.max_rows) && !(message.max_rows && $util.isInteger(message.max_rows.low) && $util.isInteger(message.max_rows.high))) + return "max_rows: integer|Long expected"; + if (message.use_pool != null && message.hasOwnProperty("use_pool")) + if (typeof message.use_pool !== "boolean") + return "use_pool: boolean expected"; + return null; + }; + + /** + * Creates an ExecuteFetchAsAppRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.ExecuteFetchAsAppRequest + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.ExecuteFetchAsAppRequest} ExecuteFetchAsAppRequest + */ + ExecuteFetchAsAppRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ExecuteFetchAsAppRequest) + return object; + let message = new $root.vtctldata.ExecuteFetchAsAppRequest(); + if (object.tablet_alias != null) { + if (typeof object.tablet_alias !== "object") + throw TypeError(".vtctldata.ExecuteFetchAsAppRequest.tablet_alias: object expected"); + message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); + } + if (object.query != null) + message.query = String(object.query); + if (object.max_rows != null) + if ($util.Long) + (message.max_rows = $util.Long.fromValue(object.max_rows)).unsigned = false; + else if (typeof object.max_rows === "string") + message.max_rows = parseInt(object.max_rows, 10); + else if (typeof object.max_rows === "number") + message.max_rows = object.max_rows; + else if (typeof object.max_rows === "object") + message.max_rows = new $util.LongBits(object.max_rows.low >>> 0, object.max_rows.high >>> 0).toNumber(); + if (object.use_pool != null) + message.use_pool = Boolean(object.use_pool); + return message; + }; + + /** + * Creates a plain object from an ExecuteFetchAsAppRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.ExecuteFetchAsAppRequest + * @static + * @param {vtctldata.ExecuteFetchAsAppRequest} message ExecuteFetchAsAppRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ExecuteFetchAsAppRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.tablet_alias = null; + object.query = ""; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.max_rows = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.max_rows = options.longs === String ? "0" : 0; + object.use_pool = false; + } + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) + object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); + if (message.query != null && message.hasOwnProperty("query")) + object.query = message.query; + if (message.max_rows != null && message.hasOwnProperty("max_rows")) + if (typeof message.max_rows === "number") + object.max_rows = options.longs === String ? String(message.max_rows) : message.max_rows; + else + object.max_rows = options.longs === String ? $util.Long.prototype.toString.call(message.max_rows) : options.longs === Number ? new $util.LongBits(message.max_rows.low >>> 0, message.max_rows.high >>> 0).toNumber() : message.max_rows; + if (message.use_pool != null && message.hasOwnProperty("use_pool")) + object.use_pool = message.use_pool; + return object; + }; + + /** + * Converts this ExecuteFetchAsAppRequest to JSON. + * @function toJSON + * @memberof vtctldata.ExecuteFetchAsAppRequest + * @instance + * @returns {Object.} JSON object + */ + ExecuteFetchAsAppRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ExecuteFetchAsAppRequest + * @function getTypeUrl + * @memberof vtctldata.ExecuteFetchAsAppRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ExecuteFetchAsAppRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.ExecuteFetchAsAppRequest"; + }; + + return ExecuteFetchAsAppRequest; + })(); + + vtctldata.ExecuteFetchAsAppResponse = (function() { + + /** + * Properties of an ExecuteFetchAsAppResponse. + * @memberof vtctldata + * @interface IExecuteFetchAsAppResponse + * @property {query.IQueryResult|null} [result] ExecuteFetchAsAppResponse result + */ + + /** + * Constructs a new ExecuteFetchAsAppResponse. + * @memberof vtctldata + * @classdesc Represents an ExecuteFetchAsAppResponse. + * @implements IExecuteFetchAsAppResponse + * @constructor + * @param {vtctldata.IExecuteFetchAsAppResponse=} [properties] Properties to set + */ + function ExecuteFetchAsAppResponse(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ExecuteFetchAsAppResponse result. + * @member {query.IQueryResult|null|undefined} result + * @memberof vtctldata.ExecuteFetchAsAppResponse + * @instance + */ + ExecuteFetchAsAppResponse.prototype.result = null; + + /** + * Creates a new ExecuteFetchAsAppResponse instance using the specified properties. + * @function create + * @memberof vtctldata.ExecuteFetchAsAppResponse + * @static + * @param {vtctldata.IExecuteFetchAsAppResponse=} [properties] Properties to set + * @returns {vtctldata.ExecuteFetchAsAppResponse} ExecuteFetchAsAppResponse instance + */ + ExecuteFetchAsAppResponse.create = function create(properties) { + return new ExecuteFetchAsAppResponse(properties); + }; + + /** + * Encodes the specified ExecuteFetchAsAppResponse message. Does not implicitly {@link vtctldata.ExecuteFetchAsAppResponse.verify|verify} messages. + * @function encode + * @memberof vtctldata.ExecuteFetchAsAppResponse + * @static + * @param {vtctldata.IExecuteFetchAsAppResponse} message ExecuteFetchAsAppResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ExecuteFetchAsAppResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.result != null && Object.hasOwnProperty.call(message, "result")) + $root.query.QueryResult.encode(message.result, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified ExecuteFetchAsAppResponse message, length delimited. Does not implicitly {@link vtctldata.ExecuteFetchAsAppResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.ExecuteFetchAsAppResponse + * @static + * @param {vtctldata.IExecuteFetchAsAppResponse} message ExecuteFetchAsAppResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ExecuteFetchAsAppResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes an ExecuteFetchAsAppResponse message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.ExecuteFetchAsAppResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.ExecuteFetchAsAppResponse} ExecuteFetchAsAppResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ExecuteFetchAsAppResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ExecuteFetchAsAppResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.result = $root.query.QueryResult.decode(reader, reader.uint32()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes an ExecuteFetchAsAppResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.ExecuteFetchAsAppResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.ExecuteFetchAsAppResponse} ExecuteFetchAsAppResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ExecuteFetchAsAppResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies an ExecuteFetchAsAppResponse message. + * @function verify + * @memberof vtctldata.ExecuteFetchAsAppResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ExecuteFetchAsAppResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.result != null && message.hasOwnProperty("result")) { + let error = $root.query.QueryResult.verify(message.result); + if (error) + return "result." + error; + } + return null; + }; + + /** + * Creates an ExecuteFetchAsAppResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.ExecuteFetchAsAppResponse + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.ExecuteFetchAsAppResponse} ExecuteFetchAsAppResponse + */ + ExecuteFetchAsAppResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ExecuteFetchAsAppResponse) + return object; + let message = new $root.vtctldata.ExecuteFetchAsAppResponse(); + if (object.result != null) { + if (typeof object.result !== "object") + throw TypeError(".vtctldata.ExecuteFetchAsAppResponse.result: object expected"); + message.result = $root.query.QueryResult.fromObject(object.result); + } + return message; + }; + + /** + * Creates a plain object from an ExecuteFetchAsAppResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.ExecuteFetchAsAppResponse + * @static + * @param {vtctldata.ExecuteFetchAsAppResponse} message ExecuteFetchAsAppResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ExecuteFetchAsAppResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) + object.result = null; + if (message.result != null && message.hasOwnProperty("result")) + object.result = $root.query.QueryResult.toObject(message.result, options); + return object; + }; + + /** + * Converts this ExecuteFetchAsAppResponse to JSON. + * @function toJSON + * @memberof vtctldata.ExecuteFetchAsAppResponse + * @instance + * @returns {Object.} JSON object + */ + ExecuteFetchAsAppResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ExecuteFetchAsAppResponse + * @function getTypeUrl + * @memberof vtctldata.ExecuteFetchAsAppResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ExecuteFetchAsAppResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.ExecuteFetchAsAppResponse"; + }; + + return ExecuteFetchAsAppResponse; + })(); + + vtctldata.ExecuteFetchAsDBARequest = (function() { + + /** + * Properties of an ExecuteFetchAsDBARequest. + * @memberof vtctldata + * @interface IExecuteFetchAsDBARequest + * @property {topodata.ITabletAlias|null} [tablet_alias] ExecuteFetchAsDBARequest tablet_alias + * @property {string|null} [query] ExecuteFetchAsDBARequest query + * @property {number|Long|null} [max_rows] ExecuteFetchAsDBARequest max_rows + * @property {boolean|null} [disable_binlogs] ExecuteFetchAsDBARequest disable_binlogs + * @property {boolean|null} [reload_schema] ExecuteFetchAsDBARequest reload_schema + */ + + /** + * Constructs a new ExecuteFetchAsDBARequest. + * @memberof vtctldata + * @classdesc Represents an ExecuteFetchAsDBARequest. + * @implements IExecuteFetchAsDBARequest + * @constructor + * @param {vtctldata.IExecuteFetchAsDBARequest=} [properties] Properties to set + */ + function ExecuteFetchAsDBARequest(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ExecuteFetchAsDBARequest tablet_alias. + * @member {topodata.ITabletAlias|null|undefined} tablet_alias + * @memberof vtctldata.ExecuteFetchAsDBARequest + * @instance + */ + ExecuteFetchAsDBARequest.prototype.tablet_alias = null; + + /** + * ExecuteFetchAsDBARequest query. + * @member {string} query + * @memberof vtctldata.ExecuteFetchAsDBARequest + * @instance + */ + ExecuteFetchAsDBARequest.prototype.query = ""; + + /** + * ExecuteFetchAsDBARequest max_rows. + * @member {number|Long} max_rows + * @memberof vtctldata.ExecuteFetchAsDBARequest + * @instance + */ + ExecuteFetchAsDBARequest.prototype.max_rows = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * ExecuteFetchAsDBARequest disable_binlogs. + * @member {boolean} disable_binlogs + * @memberof vtctldata.ExecuteFetchAsDBARequest + * @instance + */ + ExecuteFetchAsDBARequest.prototype.disable_binlogs = false; + + /** + * ExecuteFetchAsDBARequest reload_schema. + * @member {boolean} reload_schema + * @memberof vtctldata.ExecuteFetchAsDBARequest + * @instance + */ + ExecuteFetchAsDBARequest.prototype.reload_schema = false; + + /** + * Creates a new ExecuteFetchAsDBARequest instance using the specified properties. + * @function create + * @memberof vtctldata.ExecuteFetchAsDBARequest + * @static + * @param {vtctldata.IExecuteFetchAsDBARequest=} [properties] Properties to set + * @returns {vtctldata.ExecuteFetchAsDBARequest} ExecuteFetchAsDBARequest instance + */ + ExecuteFetchAsDBARequest.create = function create(properties) { + return new ExecuteFetchAsDBARequest(properties); + }; + + /** + * Encodes the specified ExecuteFetchAsDBARequest message. Does not implicitly {@link vtctldata.ExecuteFetchAsDBARequest.verify|verify} messages. + * @function encode + * @memberof vtctldata.ExecuteFetchAsDBARequest + * @static + * @param {vtctldata.IExecuteFetchAsDBARequest} message ExecuteFetchAsDBARequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ExecuteFetchAsDBARequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) + $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.query != null && Object.hasOwnProperty.call(message, "query")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.query); + if (message.max_rows != null && Object.hasOwnProperty.call(message, "max_rows")) + writer.uint32(/* id 3, wireType 0 =*/24).int64(message.max_rows); + if (message.disable_binlogs != null && Object.hasOwnProperty.call(message, "disable_binlogs")) + writer.uint32(/* id 4, wireType 0 =*/32).bool(message.disable_binlogs); + if (message.reload_schema != null && Object.hasOwnProperty.call(message, "reload_schema")) + writer.uint32(/* id 5, wireType 0 =*/40).bool(message.reload_schema); + return writer; + }; + + /** + * Encodes the specified ExecuteFetchAsDBARequest message, length delimited. Does not implicitly {@link vtctldata.ExecuteFetchAsDBARequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.ExecuteFetchAsDBARequest + * @static + * @param {vtctldata.IExecuteFetchAsDBARequest} message ExecuteFetchAsDBARequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ExecuteFetchAsDBARequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes an ExecuteFetchAsDBARequest message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.ExecuteFetchAsDBARequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.ExecuteFetchAsDBARequest} ExecuteFetchAsDBARequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ExecuteFetchAsDBARequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ExecuteFetchAsDBARequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + break; + } + case 2: { + message.query = reader.string(); + break; + } + case 3: { + message.max_rows = reader.int64(); + break; + } + case 4: { + message.disable_binlogs = reader.bool(); + break; + } + case 5: { + message.reload_schema = reader.bool(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes an ExecuteFetchAsDBARequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.ExecuteFetchAsDBARequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.ExecuteFetchAsDBARequest} ExecuteFetchAsDBARequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ExecuteFetchAsDBARequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies an ExecuteFetchAsDBARequest message. + * @function verify + * @memberof vtctldata.ExecuteFetchAsDBARequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ExecuteFetchAsDBARequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { + let error = $root.topodata.TabletAlias.verify(message.tablet_alias); + if (error) + return "tablet_alias." + error; + } + if (message.query != null && message.hasOwnProperty("query")) + if (!$util.isString(message.query)) + return "query: string expected"; + if (message.max_rows != null && message.hasOwnProperty("max_rows")) + if (!$util.isInteger(message.max_rows) && !(message.max_rows && $util.isInteger(message.max_rows.low) && $util.isInteger(message.max_rows.high))) + return "max_rows: integer|Long expected"; + if (message.disable_binlogs != null && message.hasOwnProperty("disable_binlogs")) + if (typeof message.disable_binlogs !== "boolean") + return "disable_binlogs: boolean expected"; + if (message.reload_schema != null && message.hasOwnProperty("reload_schema")) + if (typeof message.reload_schema !== "boolean") + return "reload_schema: boolean expected"; + return null; + }; + + /** + * Creates an ExecuteFetchAsDBARequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.ExecuteFetchAsDBARequest + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.ExecuteFetchAsDBARequest} ExecuteFetchAsDBARequest + */ + ExecuteFetchAsDBARequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ExecuteFetchAsDBARequest) + return object; + let message = new $root.vtctldata.ExecuteFetchAsDBARequest(); + if (object.tablet_alias != null) { + if (typeof object.tablet_alias !== "object") + throw TypeError(".vtctldata.ExecuteFetchAsDBARequest.tablet_alias: object expected"); + message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); + } + if (object.query != null) + message.query = String(object.query); + if (object.max_rows != null) + if ($util.Long) + (message.max_rows = $util.Long.fromValue(object.max_rows)).unsigned = false; + else if (typeof object.max_rows === "string") + message.max_rows = parseInt(object.max_rows, 10); + else if (typeof object.max_rows === "number") + message.max_rows = object.max_rows; + else if (typeof object.max_rows === "object") + message.max_rows = new $util.LongBits(object.max_rows.low >>> 0, object.max_rows.high >>> 0).toNumber(); + if (object.disable_binlogs != null) + message.disable_binlogs = Boolean(object.disable_binlogs); + if (object.reload_schema != null) + message.reload_schema = Boolean(object.reload_schema); + return message; + }; + + /** + * Creates a plain object from an ExecuteFetchAsDBARequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.ExecuteFetchAsDBARequest + * @static + * @param {vtctldata.ExecuteFetchAsDBARequest} message ExecuteFetchAsDBARequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ExecuteFetchAsDBARequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.tablet_alias = null; + object.query = ""; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.max_rows = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.max_rows = options.longs === String ? "0" : 0; + object.disable_binlogs = false; + object.reload_schema = false; + } + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) + object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); + if (message.query != null && message.hasOwnProperty("query")) + object.query = message.query; + if (message.max_rows != null && message.hasOwnProperty("max_rows")) + if (typeof message.max_rows === "number") + object.max_rows = options.longs === String ? String(message.max_rows) : message.max_rows; + else + object.max_rows = options.longs === String ? $util.Long.prototype.toString.call(message.max_rows) : options.longs === Number ? new $util.LongBits(message.max_rows.low >>> 0, message.max_rows.high >>> 0).toNumber() : message.max_rows; + if (message.disable_binlogs != null && message.hasOwnProperty("disable_binlogs")) + object.disable_binlogs = message.disable_binlogs; + if (message.reload_schema != null && message.hasOwnProperty("reload_schema")) + object.reload_schema = message.reload_schema; + return object; + }; + + /** + * Converts this ExecuteFetchAsDBARequest to JSON. + * @function toJSON + * @memberof vtctldata.ExecuteFetchAsDBARequest + * @instance + * @returns {Object.} JSON object + */ + ExecuteFetchAsDBARequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ExecuteFetchAsDBARequest + * @function getTypeUrl + * @memberof vtctldata.ExecuteFetchAsDBARequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ExecuteFetchAsDBARequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.ExecuteFetchAsDBARequest"; + }; + + return ExecuteFetchAsDBARequest; + })(); + + vtctldata.ExecuteFetchAsDBAResponse = (function() { + + /** + * Properties of an ExecuteFetchAsDBAResponse. + * @memberof vtctldata + * @interface IExecuteFetchAsDBAResponse + * @property {query.IQueryResult|null} [result] ExecuteFetchAsDBAResponse result + */ + + /** + * Constructs a new ExecuteFetchAsDBAResponse. + * @memberof vtctldata + * @classdesc Represents an ExecuteFetchAsDBAResponse. + * @implements IExecuteFetchAsDBAResponse + * @constructor + * @param {vtctldata.IExecuteFetchAsDBAResponse=} [properties] Properties to set + */ + function ExecuteFetchAsDBAResponse(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ExecuteFetchAsDBAResponse result. + * @member {query.IQueryResult|null|undefined} result + * @memberof vtctldata.ExecuteFetchAsDBAResponse + * @instance + */ + ExecuteFetchAsDBAResponse.prototype.result = null; + + /** + * Creates a new ExecuteFetchAsDBAResponse instance using the specified properties. + * @function create + * @memberof vtctldata.ExecuteFetchAsDBAResponse + * @static + * @param {vtctldata.IExecuteFetchAsDBAResponse=} [properties] Properties to set + * @returns {vtctldata.ExecuteFetchAsDBAResponse} ExecuteFetchAsDBAResponse instance + */ + ExecuteFetchAsDBAResponse.create = function create(properties) { + return new ExecuteFetchAsDBAResponse(properties); + }; + + /** + * Encodes the specified ExecuteFetchAsDBAResponse message. Does not implicitly {@link vtctldata.ExecuteFetchAsDBAResponse.verify|verify} messages. + * @function encode + * @memberof vtctldata.ExecuteFetchAsDBAResponse + * @static + * @param {vtctldata.IExecuteFetchAsDBAResponse} message ExecuteFetchAsDBAResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ExecuteFetchAsDBAResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.result != null && Object.hasOwnProperty.call(message, "result")) + $root.query.QueryResult.encode(message.result, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified ExecuteFetchAsDBAResponse message, length delimited. Does not implicitly {@link vtctldata.ExecuteFetchAsDBAResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.ExecuteFetchAsDBAResponse + * @static + * @param {vtctldata.IExecuteFetchAsDBAResponse} message ExecuteFetchAsDBAResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ExecuteFetchAsDBAResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes an ExecuteFetchAsDBAResponse message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.ExecuteFetchAsDBAResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.ExecuteFetchAsDBAResponse} ExecuteFetchAsDBAResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ExecuteFetchAsDBAResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ExecuteFetchAsDBAResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.result = $root.query.QueryResult.decode(reader, reader.uint32()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes an ExecuteFetchAsDBAResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.ExecuteFetchAsDBAResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.ExecuteFetchAsDBAResponse} ExecuteFetchAsDBAResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ExecuteFetchAsDBAResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies an ExecuteFetchAsDBAResponse message. + * @function verify + * @memberof vtctldata.ExecuteFetchAsDBAResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ExecuteFetchAsDBAResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.result != null && message.hasOwnProperty("result")) { + let error = $root.query.QueryResult.verify(message.result); + if (error) + return "result." + error; + } + return null; + }; + + /** + * Creates an ExecuteFetchAsDBAResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.ExecuteFetchAsDBAResponse + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.ExecuteFetchAsDBAResponse} ExecuteFetchAsDBAResponse + */ + ExecuteFetchAsDBAResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ExecuteFetchAsDBAResponse) + return object; + let message = new $root.vtctldata.ExecuteFetchAsDBAResponse(); + if (object.result != null) { + if (typeof object.result !== "object") + throw TypeError(".vtctldata.ExecuteFetchAsDBAResponse.result: object expected"); + message.result = $root.query.QueryResult.fromObject(object.result); + } + return message; + }; + + /** + * Creates a plain object from an ExecuteFetchAsDBAResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.ExecuteFetchAsDBAResponse + * @static + * @param {vtctldata.ExecuteFetchAsDBAResponse} message ExecuteFetchAsDBAResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ExecuteFetchAsDBAResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) + object.result = null; + if (message.result != null && message.hasOwnProperty("result")) + object.result = $root.query.QueryResult.toObject(message.result, options); + return object; + }; + + /** + * Converts this ExecuteFetchAsDBAResponse to JSON. + * @function toJSON + * @memberof vtctldata.ExecuteFetchAsDBAResponse + * @instance + * @returns {Object.} JSON object + */ + ExecuteFetchAsDBAResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ExecuteFetchAsDBAResponse + * @function getTypeUrl + * @memberof vtctldata.ExecuteFetchAsDBAResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ExecuteFetchAsDBAResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.ExecuteFetchAsDBAResponse"; + }; + + return ExecuteFetchAsDBAResponse; + })(); + + vtctldata.ExecuteHookRequest = (function() { + + /** + * Properties of an ExecuteHookRequest. + * @memberof vtctldata + * @interface IExecuteHookRequest + * @property {topodata.ITabletAlias|null} [tablet_alias] ExecuteHookRequest tablet_alias + * @property {tabletmanagerdata.IExecuteHookRequest|null} [tablet_hook_request] ExecuteHookRequest tablet_hook_request + */ + + /** + * Constructs a new ExecuteHookRequest. + * @memberof vtctldata + * @classdesc Represents an ExecuteHookRequest. + * @implements IExecuteHookRequest + * @constructor + * @param {vtctldata.IExecuteHookRequest=} [properties] Properties to set + */ + function ExecuteHookRequest(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ExecuteHookRequest tablet_alias. + * @member {topodata.ITabletAlias|null|undefined} tablet_alias + * @memberof vtctldata.ExecuteHookRequest + * @instance + */ + ExecuteHookRequest.prototype.tablet_alias = null; + + /** + * ExecuteHookRequest tablet_hook_request. + * @member {tabletmanagerdata.IExecuteHookRequest|null|undefined} tablet_hook_request + * @memberof vtctldata.ExecuteHookRequest + * @instance + */ + ExecuteHookRequest.prototype.tablet_hook_request = null; + + /** + * Creates a new ExecuteHookRequest instance using the specified properties. + * @function create + * @memberof vtctldata.ExecuteHookRequest + * @static + * @param {vtctldata.IExecuteHookRequest=} [properties] Properties to set + * @returns {vtctldata.ExecuteHookRequest} ExecuteHookRequest instance + */ + ExecuteHookRequest.create = function create(properties) { + return new ExecuteHookRequest(properties); + }; + + /** + * Encodes the specified ExecuteHookRequest message. Does not implicitly {@link vtctldata.ExecuteHookRequest.verify|verify} messages. + * @function encode + * @memberof vtctldata.ExecuteHookRequest + * @static + * @param {vtctldata.IExecuteHookRequest} message ExecuteHookRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ExecuteHookRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) + $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.tablet_hook_request != null && Object.hasOwnProperty.call(message, "tablet_hook_request")) + $root.tabletmanagerdata.ExecuteHookRequest.encode(message.tablet_hook_request, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified ExecuteHookRequest message, length delimited. Does not implicitly {@link vtctldata.ExecuteHookRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.ExecuteHookRequest + * @static + * @param {vtctldata.IExecuteHookRequest} message ExecuteHookRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ExecuteHookRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes an ExecuteHookRequest message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.ExecuteHookRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.ExecuteHookRequest} ExecuteHookRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ExecuteHookRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ExecuteHookRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + break; + } + case 2: { + message.tablet_hook_request = $root.tabletmanagerdata.ExecuteHookRequest.decode(reader, reader.uint32()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes an ExecuteHookRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.ExecuteHookRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.ExecuteHookRequest} ExecuteHookRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ExecuteHookRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies an ExecuteHookRequest message. + * @function verify + * @memberof vtctldata.ExecuteHookRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ExecuteHookRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { + let error = $root.topodata.TabletAlias.verify(message.tablet_alias); + if (error) + return "tablet_alias." + error; + } + if (message.tablet_hook_request != null && message.hasOwnProperty("tablet_hook_request")) { + let error = $root.tabletmanagerdata.ExecuteHookRequest.verify(message.tablet_hook_request); + if (error) + return "tablet_hook_request." + error; + } + return null; + }; + + /** + * Creates an ExecuteHookRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.ExecuteHookRequest + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.ExecuteHookRequest} ExecuteHookRequest + */ + ExecuteHookRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ExecuteHookRequest) + return object; + let message = new $root.vtctldata.ExecuteHookRequest(); + if (object.tablet_alias != null) { + if (typeof object.tablet_alias !== "object") + throw TypeError(".vtctldata.ExecuteHookRequest.tablet_alias: object expected"); + message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); + } + if (object.tablet_hook_request != null) { + if (typeof object.tablet_hook_request !== "object") + throw TypeError(".vtctldata.ExecuteHookRequest.tablet_hook_request: object expected"); + message.tablet_hook_request = $root.tabletmanagerdata.ExecuteHookRequest.fromObject(object.tablet_hook_request); + } + return message; + }; + + /** + * Creates a plain object from an ExecuteHookRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.ExecuteHookRequest + * @static + * @param {vtctldata.ExecuteHookRequest} message ExecuteHookRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ExecuteHookRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.tablet_alias = null; + object.tablet_hook_request = null; + } + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) + object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); + if (message.tablet_hook_request != null && message.hasOwnProperty("tablet_hook_request")) + object.tablet_hook_request = $root.tabletmanagerdata.ExecuteHookRequest.toObject(message.tablet_hook_request, options); + return object; + }; + + /** + * Converts this ExecuteHookRequest to JSON. + * @function toJSON + * @memberof vtctldata.ExecuteHookRequest + * @instance + * @returns {Object.} JSON object + */ + ExecuteHookRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ExecuteHookRequest + * @function getTypeUrl + * @memberof vtctldata.ExecuteHookRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ExecuteHookRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.ExecuteHookRequest"; + }; + + return ExecuteHookRequest; + })(); + + vtctldata.ExecuteHookResponse = (function() { + + /** + * Properties of an ExecuteHookResponse. + * @memberof vtctldata + * @interface IExecuteHookResponse + * @property {tabletmanagerdata.IExecuteHookResponse|null} [hook_result] ExecuteHookResponse hook_result + */ + + /** + * Constructs a new ExecuteHookResponse. + * @memberof vtctldata + * @classdesc Represents an ExecuteHookResponse. + * @implements IExecuteHookResponse + * @constructor + * @param {vtctldata.IExecuteHookResponse=} [properties] Properties to set + */ + function ExecuteHookResponse(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ExecuteHookResponse hook_result. + * @member {tabletmanagerdata.IExecuteHookResponse|null|undefined} hook_result + * @memberof vtctldata.ExecuteHookResponse + * @instance + */ + ExecuteHookResponse.prototype.hook_result = null; + + /** + * Creates a new ExecuteHookResponse instance using the specified properties. + * @function create + * @memberof vtctldata.ExecuteHookResponse + * @static + * @param {vtctldata.IExecuteHookResponse=} [properties] Properties to set + * @returns {vtctldata.ExecuteHookResponse} ExecuteHookResponse instance + */ + ExecuteHookResponse.create = function create(properties) { + return new ExecuteHookResponse(properties); + }; + + /** + * Encodes the specified ExecuteHookResponse message. Does not implicitly {@link vtctldata.ExecuteHookResponse.verify|verify} messages. + * @function encode + * @memberof vtctldata.ExecuteHookResponse + * @static + * @param {vtctldata.IExecuteHookResponse} message ExecuteHookResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ExecuteHookResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.hook_result != null && Object.hasOwnProperty.call(message, "hook_result")) + $root.tabletmanagerdata.ExecuteHookResponse.encode(message.hook_result, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified ExecuteHookResponse message, length delimited. Does not implicitly {@link vtctldata.ExecuteHookResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.ExecuteHookResponse + * @static + * @param {vtctldata.IExecuteHookResponse} message ExecuteHookResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ExecuteHookResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes an ExecuteHookResponse message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.ExecuteHookResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.ExecuteHookResponse} ExecuteHookResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ExecuteHookResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ExecuteHookResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.hook_result = $root.tabletmanagerdata.ExecuteHookResponse.decode(reader, reader.uint32()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes an ExecuteHookResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.ExecuteHookResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.ExecuteHookResponse} ExecuteHookResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ExecuteHookResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies an ExecuteHookResponse message. + * @function verify + * @memberof vtctldata.ExecuteHookResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ExecuteHookResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.hook_result != null && message.hasOwnProperty("hook_result")) { + let error = $root.tabletmanagerdata.ExecuteHookResponse.verify(message.hook_result); + if (error) + return "hook_result." + error; + } + return null; + }; + + /** + * Creates an ExecuteHookResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.ExecuteHookResponse + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.ExecuteHookResponse} ExecuteHookResponse + */ + ExecuteHookResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ExecuteHookResponse) + return object; + let message = new $root.vtctldata.ExecuteHookResponse(); + if (object.hook_result != null) { + if (typeof object.hook_result !== "object") + throw TypeError(".vtctldata.ExecuteHookResponse.hook_result: object expected"); + message.hook_result = $root.tabletmanagerdata.ExecuteHookResponse.fromObject(object.hook_result); + } + return message; + }; + + /** + * Creates a plain object from an ExecuteHookResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.ExecuteHookResponse + * @static + * @param {vtctldata.ExecuteHookResponse} message ExecuteHookResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ExecuteHookResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) + object.hook_result = null; + if (message.hook_result != null && message.hasOwnProperty("hook_result")) + object.hook_result = $root.tabletmanagerdata.ExecuteHookResponse.toObject(message.hook_result, options); + return object; + }; + + /** + * Converts this ExecuteHookResponse to JSON. + * @function toJSON + * @memberof vtctldata.ExecuteHookResponse + * @instance + * @returns {Object.} JSON object + */ + ExecuteHookResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ExecuteHookResponse + * @function getTypeUrl + * @memberof vtctldata.ExecuteHookResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ExecuteHookResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.ExecuteHookResponse"; + }; + + return ExecuteHookResponse; + })(); + + vtctldata.FindAllShardsInKeyspaceRequest = (function() { + + /** + * Properties of a FindAllShardsInKeyspaceRequest. + * @memberof vtctldata + * @interface IFindAllShardsInKeyspaceRequest + * @property {string|null} [keyspace] FindAllShardsInKeyspaceRequest keyspace + */ + + /** + * Constructs a new FindAllShardsInKeyspaceRequest. + * @memberof vtctldata + * @classdesc Represents a FindAllShardsInKeyspaceRequest. + * @implements IFindAllShardsInKeyspaceRequest + * @constructor + * @param {vtctldata.IFindAllShardsInKeyspaceRequest=} [properties] Properties to set + */ + function FindAllShardsInKeyspaceRequest(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * FindAllShardsInKeyspaceRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.FindAllShardsInKeyspaceRequest + * @instance + */ + FindAllShardsInKeyspaceRequest.prototype.keyspace = ""; + + /** + * Creates a new FindAllShardsInKeyspaceRequest instance using the specified properties. + * @function create + * @memberof vtctldata.FindAllShardsInKeyspaceRequest + * @static + * @param {vtctldata.IFindAllShardsInKeyspaceRequest=} [properties] Properties to set + * @returns {vtctldata.FindAllShardsInKeyspaceRequest} FindAllShardsInKeyspaceRequest instance + */ + FindAllShardsInKeyspaceRequest.create = function create(properties) { + return new FindAllShardsInKeyspaceRequest(properties); + }; + + /** + * Encodes the specified FindAllShardsInKeyspaceRequest message. Does not implicitly {@link vtctldata.FindAllShardsInKeyspaceRequest.verify|verify} messages. + * @function encode + * @memberof vtctldata.FindAllShardsInKeyspaceRequest + * @static + * @param {vtctldata.IFindAllShardsInKeyspaceRequest} message FindAllShardsInKeyspaceRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + FindAllShardsInKeyspaceRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + return writer; + }; + + /** + * Encodes the specified FindAllShardsInKeyspaceRequest message, length delimited. Does not implicitly {@link vtctldata.FindAllShardsInKeyspaceRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.FindAllShardsInKeyspaceRequest + * @static + * @param {vtctldata.IFindAllShardsInKeyspaceRequest} message FindAllShardsInKeyspaceRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + FindAllShardsInKeyspaceRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a FindAllShardsInKeyspaceRequest message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.FindAllShardsInKeyspaceRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.FindAllShardsInKeyspaceRequest} FindAllShardsInKeyspaceRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + FindAllShardsInKeyspaceRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.FindAllShardsInKeyspaceRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.keyspace = reader.string(); break; } + default: + reader.skipType(tag & 7); + break; } - return message; - }; + } + return message; + }; - /** - * Decodes a Stream message from the specified reader or buffer, length delimited. - * @function decodeDelimited - * @memberof vtctldata.Workflow.Stream - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.Workflow.Stream} Stream - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - Stream.decodeDelimited = function decodeDelimited(reader) { - if (!(reader instanceof $Reader)) - reader = new $Reader(reader); - return this.decode(reader, reader.uint32()); - }; + /** + * Decodes a FindAllShardsInKeyspaceRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.FindAllShardsInKeyspaceRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.FindAllShardsInKeyspaceRequest} FindAllShardsInKeyspaceRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + FindAllShardsInKeyspaceRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; - /** - * Verifies a Stream message. - * @function verify - * @memberof vtctldata.Workflow.Stream - * @static - * @param {Object.} message Plain object to verify - * @returns {string|null} `null` if valid, otherwise the reason why it is not - */ - Stream.verify = function verify(message) { - if (typeof message !== "object" || message === null) - return "object expected"; - if (message.id != null && message.hasOwnProperty("id")) - if (!$util.isInteger(message.id) && !(message.id && $util.isInteger(message.id.low) && $util.isInteger(message.id.high))) - return "id: integer|Long expected"; - if (message.shard != null && message.hasOwnProperty("shard")) - if (!$util.isString(message.shard)) - return "shard: string expected"; - if (message.tablet != null && message.hasOwnProperty("tablet")) { - let error = $root.topodata.TabletAlias.verify(message.tablet); - if (error) - return "tablet." + error; - } - if (message.binlog_source != null && message.hasOwnProperty("binlog_source")) { - let error = $root.binlogdata.BinlogSource.verify(message.binlog_source); - if (error) - return "binlog_source." + error; - } - if (message.position != null && message.hasOwnProperty("position")) - if (!$util.isString(message.position)) - return "position: string expected"; - if (message.stop_position != null && message.hasOwnProperty("stop_position")) - if (!$util.isString(message.stop_position)) - return "stop_position: string expected"; - if (message.state != null && message.hasOwnProperty("state")) - if (!$util.isString(message.state)) - return "state: string expected"; - if (message.db_name != null && message.hasOwnProperty("db_name")) - if (!$util.isString(message.db_name)) - return "db_name: string expected"; - if (message.transaction_timestamp != null && message.hasOwnProperty("transaction_timestamp")) { - let error = $root.vttime.Time.verify(message.transaction_timestamp); - if (error) - return "transaction_timestamp." + error; - } - if (message.time_updated != null && message.hasOwnProperty("time_updated")) { - let error = $root.vttime.Time.verify(message.time_updated); - if (error) - return "time_updated." + error; - } - if (message.message != null && message.hasOwnProperty("message")) - if (!$util.isString(message.message)) - return "message: string expected"; - if (message.copy_states != null && message.hasOwnProperty("copy_states")) { - if (!Array.isArray(message.copy_states)) - return "copy_states: array expected"; - for (let i = 0; i < message.copy_states.length; ++i) { - let error = $root.vtctldata.Workflow.Stream.CopyState.verify(message.copy_states[i]); - if (error) - return "copy_states." + error; - } - } - if (message.logs != null && message.hasOwnProperty("logs")) { - if (!Array.isArray(message.logs)) - return "logs: array expected"; - for (let i = 0; i < message.logs.length; ++i) { - let error = $root.vtctldata.Workflow.Stream.Log.verify(message.logs[i]); - if (error) - return "logs." + error; - } - } - if (message.log_fetch_error != null && message.hasOwnProperty("log_fetch_error")) - if (!$util.isString(message.log_fetch_error)) - return "log_fetch_error: string expected"; - if (message.tags != null && message.hasOwnProperty("tags")) { - if (!Array.isArray(message.tags)) - return "tags: array expected"; - for (let i = 0; i < message.tags.length; ++i) - if (!$util.isString(message.tags[i])) - return "tags: string[] expected"; - } - return null; - }; + /** + * Verifies a FindAllShardsInKeyspaceRequest message. + * @function verify + * @memberof vtctldata.FindAllShardsInKeyspaceRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + FindAllShardsInKeyspaceRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + return null; + }; - /** - * Creates a Stream message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof vtctldata.Workflow.Stream - * @static - * @param {Object.} object Plain object - * @returns {vtctldata.Workflow.Stream} Stream - */ - Stream.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.Workflow.Stream) - return object; - let message = new $root.vtctldata.Workflow.Stream(); - if (object.id != null) - if ($util.Long) - (message.id = $util.Long.fromValue(object.id)).unsigned = false; - else if (typeof object.id === "string") - message.id = parseInt(object.id, 10); - else if (typeof object.id === "number") - message.id = object.id; - else if (typeof object.id === "object") - message.id = new $util.LongBits(object.id.low >>> 0, object.id.high >>> 0).toNumber(); - if (object.shard != null) - message.shard = String(object.shard); - if (object.tablet != null) { - if (typeof object.tablet !== "object") - throw TypeError(".vtctldata.Workflow.Stream.tablet: object expected"); - message.tablet = $root.topodata.TabletAlias.fromObject(object.tablet); - } - if (object.binlog_source != null) { - if (typeof object.binlog_source !== "object") - throw TypeError(".vtctldata.Workflow.Stream.binlog_source: object expected"); - message.binlog_source = $root.binlogdata.BinlogSource.fromObject(object.binlog_source); - } - if (object.position != null) - message.position = String(object.position); - if (object.stop_position != null) - message.stop_position = String(object.stop_position); - if (object.state != null) - message.state = String(object.state); - if (object.db_name != null) - message.db_name = String(object.db_name); - if (object.transaction_timestamp != null) { - if (typeof object.transaction_timestamp !== "object") - throw TypeError(".vtctldata.Workflow.Stream.transaction_timestamp: object expected"); - message.transaction_timestamp = $root.vttime.Time.fromObject(object.transaction_timestamp); - } - if (object.time_updated != null) { - if (typeof object.time_updated !== "object") - throw TypeError(".vtctldata.Workflow.Stream.time_updated: object expected"); - message.time_updated = $root.vttime.Time.fromObject(object.time_updated); - } - if (object.message != null) - message.message = String(object.message); - if (object.copy_states) { - if (!Array.isArray(object.copy_states)) - throw TypeError(".vtctldata.Workflow.Stream.copy_states: array expected"); - message.copy_states = []; - for (let i = 0; i < object.copy_states.length; ++i) { - if (typeof object.copy_states[i] !== "object") - throw TypeError(".vtctldata.Workflow.Stream.copy_states: object expected"); - message.copy_states[i] = $root.vtctldata.Workflow.Stream.CopyState.fromObject(object.copy_states[i]); - } - } - if (object.logs) { - if (!Array.isArray(object.logs)) - throw TypeError(".vtctldata.Workflow.Stream.logs: array expected"); - message.logs = []; - for (let i = 0; i < object.logs.length; ++i) { - if (typeof object.logs[i] !== "object") - throw TypeError(".vtctldata.Workflow.Stream.logs: object expected"); - message.logs[i] = $root.vtctldata.Workflow.Stream.Log.fromObject(object.logs[i]); - } - } - if (object.log_fetch_error != null) - message.log_fetch_error = String(object.log_fetch_error); - if (object.tags) { - if (!Array.isArray(object.tags)) - throw TypeError(".vtctldata.Workflow.Stream.tags: array expected"); - message.tags = []; - for (let i = 0; i < object.tags.length; ++i) - message.tags[i] = String(object.tags[i]); - } - return message; - }; + /** + * Creates a FindAllShardsInKeyspaceRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.FindAllShardsInKeyspaceRequest + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.FindAllShardsInKeyspaceRequest} FindAllShardsInKeyspaceRequest + */ + FindAllShardsInKeyspaceRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.FindAllShardsInKeyspaceRequest) + return object; + let message = new $root.vtctldata.FindAllShardsInKeyspaceRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + return message; + }; - /** - * Creates a plain object from a Stream message. Also converts values to other types if specified. - * @function toObject - * @memberof vtctldata.Workflow.Stream - * @static - * @param {vtctldata.Workflow.Stream} message Stream - * @param {$protobuf.IConversionOptions} [options] Conversion options - * @returns {Object.} Plain object - */ - Stream.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.arrays || options.defaults) { - object.copy_states = []; - object.logs = []; - object.tags = []; - } - if (options.defaults) { - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.id = options.longs === String ? "0" : 0; - object.shard = ""; - object.tablet = null; - object.binlog_source = null; - object.position = ""; - object.stop_position = ""; - object.state = ""; - object.db_name = ""; - object.transaction_timestamp = null; - object.time_updated = null; - object.message = ""; - object.log_fetch_error = ""; - } - if (message.id != null && message.hasOwnProperty("id")) - if (typeof message.id === "number") - object.id = options.longs === String ? String(message.id) : message.id; - else - object.id = options.longs === String ? $util.Long.prototype.toString.call(message.id) : options.longs === Number ? new $util.LongBits(message.id.low >>> 0, message.id.high >>> 0).toNumber() : message.id; - if (message.shard != null && message.hasOwnProperty("shard")) - object.shard = message.shard; - if (message.tablet != null && message.hasOwnProperty("tablet")) - object.tablet = $root.topodata.TabletAlias.toObject(message.tablet, options); - if (message.binlog_source != null && message.hasOwnProperty("binlog_source")) - object.binlog_source = $root.binlogdata.BinlogSource.toObject(message.binlog_source, options); - if (message.position != null && message.hasOwnProperty("position")) - object.position = message.position; - if (message.stop_position != null && message.hasOwnProperty("stop_position")) - object.stop_position = message.stop_position; - if (message.state != null && message.hasOwnProperty("state")) - object.state = message.state; - if (message.db_name != null && message.hasOwnProperty("db_name")) - object.db_name = message.db_name; - if (message.transaction_timestamp != null && message.hasOwnProperty("transaction_timestamp")) - object.transaction_timestamp = $root.vttime.Time.toObject(message.transaction_timestamp, options); - if (message.time_updated != null && message.hasOwnProperty("time_updated")) - object.time_updated = $root.vttime.Time.toObject(message.time_updated, options); - if (message.message != null && message.hasOwnProperty("message")) - object.message = message.message; - if (message.copy_states && message.copy_states.length) { - object.copy_states = []; - for (let j = 0; j < message.copy_states.length; ++j) - object.copy_states[j] = $root.vtctldata.Workflow.Stream.CopyState.toObject(message.copy_states[j], options); - } - if (message.logs && message.logs.length) { - object.logs = []; - for (let j = 0; j < message.logs.length; ++j) - object.logs[j] = $root.vtctldata.Workflow.Stream.Log.toObject(message.logs[j], options); + /** + * Creates a plain object from a FindAllShardsInKeyspaceRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.FindAllShardsInKeyspaceRequest + * @static + * @param {vtctldata.FindAllShardsInKeyspaceRequest} message FindAllShardsInKeyspaceRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + FindAllShardsInKeyspaceRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) + object.keyspace = ""; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + return object; + }; + + /** + * Converts this FindAllShardsInKeyspaceRequest to JSON. + * @function toJSON + * @memberof vtctldata.FindAllShardsInKeyspaceRequest + * @instance + * @returns {Object.} JSON object + */ + FindAllShardsInKeyspaceRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for FindAllShardsInKeyspaceRequest + * @function getTypeUrl + * @memberof vtctldata.FindAllShardsInKeyspaceRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + FindAllShardsInKeyspaceRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.FindAllShardsInKeyspaceRequest"; + }; + + return FindAllShardsInKeyspaceRequest; + })(); + + vtctldata.FindAllShardsInKeyspaceResponse = (function() { + + /** + * Properties of a FindAllShardsInKeyspaceResponse. + * @memberof vtctldata + * @interface IFindAllShardsInKeyspaceResponse + * @property {Object.|null} [shards] FindAllShardsInKeyspaceResponse shards + */ + + /** + * Constructs a new FindAllShardsInKeyspaceResponse. + * @memberof vtctldata + * @classdesc Represents a FindAllShardsInKeyspaceResponse. + * @implements IFindAllShardsInKeyspaceResponse + * @constructor + * @param {vtctldata.IFindAllShardsInKeyspaceResponse=} [properties] Properties to set + */ + function FindAllShardsInKeyspaceResponse(properties) { + this.shards = {}; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * FindAllShardsInKeyspaceResponse shards. + * @member {Object.} shards + * @memberof vtctldata.FindAllShardsInKeyspaceResponse + * @instance + */ + FindAllShardsInKeyspaceResponse.prototype.shards = $util.emptyObject; + + /** + * Creates a new FindAllShardsInKeyspaceResponse instance using the specified properties. + * @function create + * @memberof vtctldata.FindAllShardsInKeyspaceResponse + * @static + * @param {vtctldata.IFindAllShardsInKeyspaceResponse=} [properties] Properties to set + * @returns {vtctldata.FindAllShardsInKeyspaceResponse} FindAllShardsInKeyspaceResponse instance + */ + FindAllShardsInKeyspaceResponse.create = function create(properties) { + return new FindAllShardsInKeyspaceResponse(properties); + }; + + /** + * Encodes the specified FindAllShardsInKeyspaceResponse message. Does not implicitly {@link vtctldata.FindAllShardsInKeyspaceResponse.verify|verify} messages. + * @function encode + * @memberof vtctldata.FindAllShardsInKeyspaceResponse + * @static + * @param {vtctldata.IFindAllShardsInKeyspaceResponse} message FindAllShardsInKeyspaceResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + FindAllShardsInKeyspaceResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.shards != null && Object.hasOwnProperty.call(message, "shards")) + for (let keys = Object.keys(message.shards), i = 0; i < keys.length; ++i) { + writer.uint32(/* id 1, wireType 2 =*/10).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); + $root.vtctldata.Shard.encode(message.shards[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); } - if (message.log_fetch_error != null && message.hasOwnProperty("log_fetch_error")) - object.log_fetch_error = message.log_fetch_error; - if (message.tags && message.tags.length) { - object.tags = []; - for (let j = 0; j < message.tags.length; ++j) - object.tags[j] = message.tags[j]; + return writer; + }; + + /** + * Encodes the specified FindAllShardsInKeyspaceResponse message, length delimited. Does not implicitly {@link vtctldata.FindAllShardsInKeyspaceResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.FindAllShardsInKeyspaceResponse + * @static + * @param {vtctldata.IFindAllShardsInKeyspaceResponse} message FindAllShardsInKeyspaceResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + FindAllShardsInKeyspaceResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a FindAllShardsInKeyspaceResponse message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.FindAllShardsInKeyspaceResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.FindAllShardsInKeyspaceResponse} FindAllShardsInKeyspaceResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + FindAllShardsInKeyspaceResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.FindAllShardsInKeyspaceResponse(), key, value; + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (message.shards === $util.emptyObject) + message.shards = {}; + let end2 = reader.uint32() + reader.pos; + key = ""; + value = null; + while (reader.pos < end2) { + let tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = $root.vtctldata.Shard.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.shards[key] = value; + break; + } + default: + reader.skipType(tag & 7); + break; } - return object; - }; + } + return message; + }; - /** - * Converts this Stream to JSON. - * @function toJSON - * @memberof vtctldata.Workflow.Stream - * @instance - * @returns {Object.} JSON object - */ - Stream.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; + /** + * Decodes a FindAllShardsInKeyspaceResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.FindAllShardsInKeyspaceResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.FindAllShardsInKeyspaceResponse} FindAllShardsInKeyspaceResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + FindAllShardsInKeyspaceResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; - /** - * Gets the default type url for Stream - * @function getTypeUrl - * @memberof vtctldata.Workflow.Stream - * @static - * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns {string} The default type url - */ - Stream.getTypeUrl = function getTypeUrl(typeUrlPrefix) { - if (typeUrlPrefix === undefined) { - typeUrlPrefix = "type.googleapis.com"; + /** + * Verifies a FindAllShardsInKeyspaceResponse message. + * @function verify + * @memberof vtctldata.FindAllShardsInKeyspaceResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + FindAllShardsInKeyspaceResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.shards != null && message.hasOwnProperty("shards")) { + if (!$util.isObject(message.shards)) + return "shards: object expected"; + let key = Object.keys(message.shards); + for (let i = 0; i < key.length; ++i) { + let error = $root.vtctldata.Shard.verify(message.shards[key[i]]); + if (error) + return "shards." + error; } - return typeUrlPrefix + "/vtctldata.Workflow.Stream"; - }; - - Stream.CopyState = (function() { - - /** - * Properties of a CopyState. - * @memberof vtctldata.Workflow.Stream - * @interface ICopyState - * @property {string|null} [table] CopyState table - * @property {string|null} [last_pk] CopyState last_pk - */ + } + return null; + }; - /** - * Constructs a new CopyState. - * @memberof vtctldata.Workflow.Stream - * @classdesc Represents a CopyState. - * @implements ICopyState - * @constructor - * @param {vtctldata.Workflow.Stream.ICopyState=} [properties] Properties to set - */ - function CopyState(properties) { - if (properties) - for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; + /** + * Creates a FindAllShardsInKeyspaceResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.FindAllShardsInKeyspaceResponse + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.FindAllShardsInKeyspaceResponse} FindAllShardsInKeyspaceResponse + */ + FindAllShardsInKeyspaceResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.FindAllShardsInKeyspaceResponse) + return object; + let message = new $root.vtctldata.FindAllShardsInKeyspaceResponse(); + if (object.shards) { + if (typeof object.shards !== "object") + throw TypeError(".vtctldata.FindAllShardsInKeyspaceResponse.shards: object expected"); + message.shards = {}; + for (let keys = Object.keys(object.shards), i = 0; i < keys.length; ++i) { + if (typeof object.shards[keys[i]] !== "object") + throw TypeError(".vtctldata.FindAllShardsInKeyspaceResponse.shards: object expected"); + message.shards[keys[i]] = $root.vtctldata.Shard.fromObject(object.shards[keys[i]]); } + } + return message; + }; - /** - * CopyState table. - * @member {string} table - * @memberof vtctldata.Workflow.Stream.CopyState - * @instance - */ - CopyState.prototype.table = ""; + /** + * Creates a plain object from a FindAllShardsInKeyspaceResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.FindAllShardsInKeyspaceResponse + * @static + * @param {vtctldata.FindAllShardsInKeyspaceResponse} message FindAllShardsInKeyspaceResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + FindAllShardsInKeyspaceResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.objects || options.defaults) + object.shards = {}; + let keys2; + if (message.shards && (keys2 = Object.keys(message.shards)).length) { + object.shards = {}; + for (let j = 0; j < keys2.length; ++j) + object.shards[keys2[j]] = $root.vtctldata.Shard.toObject(message.shards[keys2[j]], options); + } + return object; + }; - /** - * CopyState last_pk. - * @member {string} last_pk - * @memberof vtctldata.Workflow.Stream.CopyState - * @instance - */ - CopyState.prototype.last_pk = ""; + /** + * Converts this FindAllShardsInKeyspaceResponse to JSON. + * @function toJSON + * @memberof vtctldata.FindAllShardsInKeyspaceResponse + * @instance + * @returns {Object.} JSON object + */ + FindAllShardsInKeyspaceResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; - /** - * Creates a new CopyState instance using the specified properties. - * @function create - * @memberof vtctldata.Workflow.Stream.CopyState - * @static - * @param {vtctldata.Workflow.Stream.ICopyState=} [properties] Properties to set - * @returns {vtctldata.Workflow.Stream.CopyState} CopyState instance - */ - CopyState.create = function create(properties) { - return new CopyState(properties); - }; + /** + * Gets the default type url for FindAllShardsInKeyspaceResponse + * @function getTypeUrl + * @memberof vtctldata.FindAllShardsInKeyspaceResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + FindAllShardsInKeyspaceResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.FindAllShardsInKeyspaceResponse"; + }; - /** - * Encodes the specified CopyState message. Does not implicitly {@link vtctldata.Workflow.Stream.CopyState.verify|verify} messages. - * @function encode - * @memberof vtctldata.Workflow.Stream.CopyState - * @static - * @param {vtctldata.Workflow.Stream.ICopyState} message CopyState message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - CopyState.encode = function encode(message, writer) { - if (!writer) - writer = $Writer.create(); - if (message.table != null && Object.hasOwnProperty.call(message, "table")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.table); - if (message.last_pk != null && Object.hasOwnProperty.call(message, "last_pk")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.last_pk); - return writer; - }; + return FindAllShardsInKeyspaceResponse; + })(); - /** - * Encodes the specified CopyState message, length delimited. Does not implicitly {@link vtctldata.Workflow.Stream.CopyState.verify|verify} messages. - * @function encodeDelimited - * @memberof vtctldata.Workflow.Stream.CopyState - * @static - * @param {vtctldata.Workflow.Stream.ICopyState} message CopyState message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - CopyState.encodeDelimited = function encodeDelimited(message, writer) { - return this.encode(message, writer).ldelim(); - }; + vtctldata.GetBackupsRequest = (function() { - /** - * Decodes a CopyState message from the specified reader or buffer. - * @function decode - * @memberof vtctldata.Workflow.Stream.CopyState - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.Workflow.Stream.CopyState} CopyState - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - CopyState.decode = function decode(reader, length) { - if (!(reader instanceof $Reader)) - reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.Workflow.Stream.CopyState(); - while (reader.pos < end) { - let tag = reader.uint32(); - switch (tag >>> 3) { - case 1: { - message.table = reader.string(); - break; - } - case 2: { - message.last_pk = reader.string(); - break; - } - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }; + /** + * Properties of a GetBackupsRequest. + * @memberof vtctldata + * @interface IGetBackupsRequest + * @property {string|null} [keyspace] GetBackupsRequest keyspace + * @property {string|null} [shard] GetBackupsRequest shard + * @property {number|null} [limit] GetBackupsRequest limit + * @property {boolean|null} [detailed] GetBackupsRequest detailed + * @property {number|null} [detailed_limit] GetBackupsRequest detailed_limit + */ - /** - * Decodes a CopyState message from the specified reader or buffer, length delimited. - * @function decodeDelimited - * @memberof vtctldata.Workflow.Stream.CopyState - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.Workflow.Stream.CopyState} CopyState - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - CopyState.decodeDelimited = function decodeDelimited(reader) { - if (!(reader instanceof $Reader)) - reader = new $Reader(reader); - return this.decode(reader, reader.uint32()); - }; + /** + * Constructs a new GetBackupsRequest. + * @memberof vtctldata + * @classdesc Represents a GetBackupsRequest. + * @implements IGetBackupsRequest + * @constructor + * @param {vtctldata.IGetBackupsRequest=} [properties] Properties to set + */ + function GetBackupsRequest(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } - /** - * Verifies a CopyState message. - * @function verify - * @memberof vtctldata.Workflow.Stream.CopyState - * @static - * @param {Object.} message Plain object to verify - * @returns {string|null} `null` if valid, otherwise the reason why it is not - */ - CopyState.verify = function verify(message) { - if (typeof message !== "object" || message === null) - return "object expected"; - if (message.table != null && message.hasOwnProperty("table")) - if (!$util.isString(message.table)) - return "table: string expected"; - if (message.last_pk != null && message.hasOwnProperty("last_pk")) - if (!$util.isString(message.last_pk)) - return "last_pk: string expected"; - return null; - }; + /** + * GetBackupsRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.GetBackupsRequest + * @instance + */ + GetBackupsRequest.prototype.keyspace = ""; - /** - * Creates a CopyState message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof vtctldata.Workflow.Stream.CopyState - * @static - * @param {Object.} object Plain object - * @returns {vtctldata.Workflow.Stream.CopyState} CopyState - */ - CopyState.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.Workflow.Stream.CopyState) - return object; - let message = new $root.vtctldata.Workflow.Stream.CopyState(); - if (object.table != null) - message.table = String(object.table); - if (object.last_pk != null) - message.last_pk = String(object.last_pk); - return message; - }; + /** + * GetBackupsRequest shard. + * @member {string} shard + * @memberof vtctldata.GetBackupsRequest + * @instance + */ + GetBackupsRequest.prototype.shard = ""; - /** - * Creates a plain object from a CopyState message. Also converts values to other types if specified. - * @function toObject - * @memberof vtctldata.Workflow.Stream.CopyState - * @static - * @param {vtctldata.Workflow.Stream.CopyState} message CopyState - * @param {$protobuf.IConversionOptions} [options] Conversion options - * @returns {Object.} Plain object - */ - CopyState.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) { - object.table = ""; - object.last_pk = ""; - } - if (message.table != null && message.hasOwnProperty("table")) - object.table = message.table; - if (message.last_pk != null && message.hasOwnProperty("last_pk")) - object.last_pk = message.last_pk; - return object; - }; + /** + * GetBackupsRequest limit. + * @member {number} limit + * @memberof vtctldata.GetBackupsRequest + * @instance + */ + GetBackupsRequest.prototype.limit = 0; - /** - * Converts this CopyState to JSON. - * @function toJSON - * @memberof vtctldata.Workflow.Stream.CopyState - * @instance - * @returns {Object.} JSON object - */ - CopyState.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; + /** + * GetBackupsRequest detailed. + * @member {boolean} detailed + * @memberof vtctldata.GetBackupsRequest + * @instance + */ + GetBackupsRequest.prototype.detailed = false; - /** - * Gets the default type url for CopyState - * @function getTypeUrl - * @memberof vtctldata.Workflow.Stream.CopyState - * @static - * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns {string} The default type url - */ - CopyState.getTypeUrl = function getTypeUrl(typeUrlPrefix) { - if (typeUrlPrefix === undefined) { - typeUrlPrefix = "type.googleapis.com"; - } - return typeUrlPrefix + "/vtctldata.Workflow.Stream.CopyState"; - }; + /** + * GetBackupsRequest detailed_limit. + * @member {number} detailed_limit + * @memberof vtctldata.GetBackupsRequest + * @instance + */ + GetBackupsRequest.prototype.detailed_limit = 0; - return CopyState; - })(); + /** + * Creates a new GetBackupsRequest instance using the specified properties. + * @function create + * @memberof vtctldata.GetBackupsRequest + * @static + * @param {vtctldata.IGetBackupsRequest=} [properties] Properties to set + * @returns {vtctldata.GetBackupsRequest} GetBackupsRequest instance + */ + GetBackupsRequest.create = function create(properties) { + return new GetBackupsRequest(properties); + }; - Stream.Log = (function() { + /** + * Encodes the specified GetBackupsRequest message. Does not implicitly {@link vtctldata.GetBackupsRequest.verify|verify} messages. + * @function encode + * @memberof vtctldata.GetBackupsRequest + * @static + * @param {vtctldata.IGetBackupsRequest} message GetBackupsRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + GetBackupsRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); + if (message.limit != null && Object.hasOwnProperty.call(message, "limit")) + writer.uint32(/* id 3, wireType 0 =*/24).uint32(message.limit); + if (message.detailed != null && Object.hasOwnProperty.call(message, "detailed")) + writer.uint32(/* id 4, wireType 0 =*/32).bool(message.detailed); + if (message.detailed_limit != null && Object.hasOwnProperty.call(message, "detailed_limit")) + writer.uint32(/* id 5, wireType 0 =*/40).uint32(message.detailed_limit); + return writer; + }; - /** - * Properties of a Log. - * @memberof vtctldata.Workflow.Stream - * @interface ILog - * @property {number|Long|null} [id] Log id - * @property {number|Long|null} [stream_id] Log stream_id - * @property {string|null} [type] Log type - * @property {string|null} [state] Log state - * @property {vttime.ITime|null} [created_at] Log created_at - * @property {vttime.ITime|null} [updated_at] Log updated_at - * @property {string|null} [message] Log message - * @property {number|Long|null} [count] Log count - */ + /** + * Encodes the specified GetBackupsRequest message, length delimited. Does not implicitly {@link vtctldata.GetBackupsRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.GetBackupsRequest + * @static + * @param {vtctldata.IGetBackupsRequest} message GetBackupsRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + GetBackupsRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; - /** - * Constructs a new Log. - * @memberof vtctldata.Workflow.Stream - * @classdesc Represents a Log. - * @implements ILog - * @constructor - * @param {vtctldata.Workflow.Stream.ILog=} [properties] Properties to set - */ - function Log(properties) { - if (properties) - for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; + /** + * Decodes a GetBackupsRequest message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.GetBackupsRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.GetBackupsRequest} GetBackupsRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + GetBackupsRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetBackupsRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.keyspace = reader.string(); + break; + } + case 2: { + message.shard = reader.string(); + break; + } + case 3: { + message.limit = reader.uint32(); + break; + } + case 4: { + message.detailed = reader.bool(); + break; + } + case 5: { + message.detailed_limit = reader.uint32(); + break; + } + default: + reader.skipType(tag & 7); + break; } + } + return message; + }; - /** - * Log id. - * @member {number|Long} id - * @memberof vtctldata.Workflow.Stream.Log - * @instance - */ - Log.prototype.id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + /** + * Decodes a GetBackupsRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.GetBackupsRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.GetBackupsRequest} GetBackupsRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + GetBackupsRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; - /** - * Log stream_id. - * @member {number|Long} stream_id - * @memberof vtctldata.Workflow.Stream.Log - * @instance - */ - Log.prototype.stream_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + /** + * Verifies a GetBackupsRequest message. + * @function verify + * @memberof vtctldata.GetBackupsRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + GetBackupsRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.shard != null && message.hasOwnProperty("shard")) + if (!$util.isString(message.shard)) + return "shard: string expected"; + if (message.limit != null && message.hasOwnProperty("limit")) + if (!$util.isInteger(message.limit)) + return "limit: integer expected"; + if (message.detailed != null && message.hasOwnProperty("detailed")) + if (typeof message.detailed !== "boolean") + return "detailed: boolean expected"; + if (message.detailed_limit != null && message.hasOwnProperty("detailed_limit")) + if (!$util.isInteger(message.detailed_limit)) + return "detailed_limit: integer expected"; + return null; + }; - /** - * Log type. - * @member {string} type - * @memberof vtctldata.Workflow.Stream.Log - * @instance - */ - Log.prototype.type = ""; + /** + * Creates a GetBackupsRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.GetBackupsRequest + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.GetBackupsRequest} GetBackupsRequest + */ + GetBackupsRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetBackupsRequest) + return object; + let message = new $root.vtctldata.GetBackupsRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.shard != null) + message.shard = String(object.shard); + if (object.limit != null) + message.limit = object.limit >>> 0; + if (object.detailed != null) + message.detailed = Boolean(object.detailed); + if (object.detailed_limit != null) + message.detailed_limit = object.detailed_limit >>> 0; + return message; + }; - /** - * Log state. - * @member {string} state - * @memberof vtctldata.Workflow.Stream.Log - * @instance - */ - Log.prototype.state = ""; + /** + * Creates a plain object from a GetBackupsRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.GetBackupsRequest + * @static + * @param {vtctldata.GetBackupsRequest} message GetBackupsRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + GetBackupsRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.keyspace = ""; + object.shard = ""; + object.limit = 0; + object.detailed = false; + object.detailed_limit = 0; + } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = message.shard; + if (message.limit != null && message.hasOwnProperty("limit")) + object.limit = message.limit; + if (message.detailed != null && message.hasOwnProperty("detailed")) + object.detailed = message.detailed; + if (message.detailed_limit != null && message.hasOwnProperty("detailed_limit")) + object.detailed_limit = message.detailed_limit; + return object; + }; - /** - * Log created_at. - * @member {vttime.ITime|null|undefined} created_at - * @memberof vtctldata.Workflow.Stream.Log - * @instance - */ - Log.prototype.created_at = null; + /** + * Converts this GetBackupsRequest to JSON. + * @function toJSON + * @memberof vtctldata.GetBackupsRequest + * @instance + * @returns {Object.} JSON object + */ + GetBackupsRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; - /** - * Log updated_at. - * @member {vttime.ITime|null|undefined} updated_at - * @memberof vtctldata.Workflow.Stream.Log - * @instance - */ - Log.prototype.updated_at = null; + /** + * Gets the default type url for GetBackupsRequest + * @function getTypeUrl + * @memberof vtctldata.GetBackupsRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + GetBackupsRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.GetBackupsRequest"; + }; - /** - * Log message. - * @member {string} message - * @memberof vtctldata.Workflow.Stream.Log - * @instance - */ - Log.prototype.message = ""; + return GetBackupsRequest; + })(); - /** - * Log count. - * @member {number|Long} count - * @memberof vtctldata.Workflow.Stream.Log - * @instance - */ - Log.prototype.count = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + vtctldata.GetBackupsResponse = (function() { - /** - * Creates a new Log instance using the specified properties. - * @function create - * @memberof vtctldata.Workflow.Stream.Log - * @static - * @param {vtctldata.Workflow.Stream.ILog=} [properties] Properties to set - * @returns {vtctldata.Workflow.Stream.Log} Log instance - */ - Log.create = function create(properties) { - return new Log(properties); - }; + /** + * Properties of a GetBackupsResponse. + * @memberof vtctldata + * @interface IGetBackupsResponse + * @property {Array.|null} [backups] GetBackupsResponse backups + */ - /** - * Encodes the specified Log message. Does not implicitly {@link vtctldata.Workflow.Stream.Log.verify|verify} messages. - * @function encode - * @memberof vtctldata.Workflow.Stream.Log - * @static - * @param {vtctldata.Workflow.Stream.ILog} message Log message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - Log.encode = function encode(message, writer) { - if (!writer) - writer = $Writer.create(); - if (message.id != null && Object.hasOwnProperty.call(message, "id")) - writer.uint32(/* id 1, wireType 0 =*/8).int64(message.id); - if (message.stream_id != null && Object.hasOwnProperty.call(message, "stream_id")) - writer.uint32(/* id 2, wireType 0 =*/16).int64(message.stream_id); - if (message.type != null && Object.hasOwnProperty.call(message, "type")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.type); - if (message.state != null && Object.hasOwnProperty.call(message, "state")) - writer.uint32(/* id 4, wireType 2 =*/34).string(message.state); - if (message.created_at != null && Object.hasOwnProperty.call(message, "created_at")) - $root.vttime.Time.encode(message.created_at, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); - if (message.updated_at != null && Object.hasOwnProperty.call(message, "updated_at")) - $root.vttime.Time.encode(message.updated_at, writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); - if (message.message != null && Object.hasOwnProperty.call(message, "message")) - writer.uint32(/* id 7, wireType 2 =*/58).string(message.message); - if (message.count != null && Object.hasOwnProperty.call(message, "count")) - writer.uint32(/* id 8, wireType 0 =*/64).int64(message.count); - return writer; - }; + /** + * Constructs a new GetBackupsResponse. + * @memberof vtctldata + * @classdesc Represents a GetBackupsResponse. + * @implements IGetBackupsResponse + * @constructor + * @param {vtctldata.IGetBackupsResponse=} [properties] Properties to set + */ + function GetBackupsResponse(properties) { + this.backups = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } - /** - * Encodes the specified Log message, length delimited. Does not implicitly {@link vtctldata.Workflow.Stream.Log.verify|verify} messages. - * @function encodeDelimited - * @memberof vtctldata.Workflow.Stream.Log - * @static - * @param {vtctldata.Workflow.Stream.ILog} message Log message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - Log.encodeDelimited = function encodeDelimited(message, writer) { - return this.encode(message, writer).ldelim(); - }; + /** + * GetBackupsResponse backups. + * @member {Array.} backups + * @memberof vtctldata.GetBackupsResponse + * @instance + */ + GetBackupsResponse.prototype.backups = $util.emptyArray; - /** - * Decodes a Log message from the specified reader or buffer. - * @function decode - * @memberof vtctldata.Workflow.Stream.Log - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.Workflow.Stream.Log} Log - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - Log.decode = function decode(reader, length) { - if (!(reader instanceof $Reader)) - reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.Workflow.Stream.Log(); - while (reader.pos < end) { - let tag = reader.uint32(); - switch (tag >>> 3) { - case 1: { - message.id = reader.int64(); - break; - } - case 2: { - message.stream_id = reader.int64(); - break; - } - case 3: { - message.type = reader.string(); - break; - } - case 4: { - message.state = reader.string(); - break; - } - case 5: { - message.created_at = $root.vttime.Time.decode(reader, reader.uint32()); - break; - } - case 6: { - message.updated_at = $root.vttime.Time.decode(reader, reader.uint32()); - break; - } - case 7: { - message.message = reader.string(); - break; - } - case 8: { - message.count = reader.int64(); - break; - } - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }; + /** + * Creates a new GetBackupsResponse instance using the specified properties. + * @function create + * @memberof vtctldata.GetBackupsResponse + * @static + * @param {vtctldata.IGetBackupsResponse=} [properties] Properties to set + * @returns {vtctldata.GetBackupsResponse} GetBackupsResponse instance + */ + GetBackupsResponse.create = function create(properties) { + return new GetBackupsResponse(properties); + }; - /** - * Decodes a Log message from the specified reader or buffer, length delimited. - * @function decodeDelimited - * @memberof vtctldata.Workflow.Stream.Log - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.Workflow.Stream.Log} Log - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - Log.decodeDelimited = function decodeDelimited(reader) { - if (!(reader instanceof $Reader)) - reader = new $Reader(reader); - return this.decode(reader, reader.uint32()); - }; + /** + * Encodes the specified GetBackupsResponse message. Does not implicitly {@link vtctldata.GetBackupsResponse.verify|verify} messages. + * @function encode + * @memberof vtctldata.GetBackupsResponse + * @static + * @param {vtctldata.IGetBackupsResponse} message GetBackupsResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + GetBackupsResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.backups != null && message.backups.length) + for (let i = 0; i < message.backups.length; ++i) + $root.mysqlctl.BackupInfo.encode(message.backups[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + return writer; + }; - /** - * Verifies a Log message. - * @function verify - * @memberof vtctldata.Workflow.Stream.Log - * @static - * @param {Object.} message Plain object to verify - * @returns {string|null} `null` if valid, otherwise the reason why it is not - */ - Log.verify = function verify(message) { - if (typeof message !== "object" || message === null) - return "object expected"; - if (message.id != null && message.hasOwnProperty("id")) - if (!$util.isInteger(message.id) && !(message.id && $util.isInteger(message.id.low) && $util.isInteger(message.id.high))) - return "id: integer|Long expected"; - if (message.stream_id != null && message.hasOwnProperty("stream_id")) - if (!$util.isInteger(message.stream_id) && !(message.stream_id && $util.isInteger(message.stream_id.low) && $util.isInteger(message.stream_id.high))) - return "stream_id: integer|Long expected"; - if (message.type != null && message.hasOwnProperty("type")) - if (!$util.isString(message.type)) - return "type: string expected"; - if (message.state != null && message.hasOwnProperty("state")) - if (!$util.isString(message.state)) - return "state: string expected"; - if (message.created_at != null && message.hasOwnProperty("created_at")) { - let error = $root.vttime.Time.verify(message.created_at); - if (error) - return "created_at." + error; - } - if (message.updated_at != null && message.hasOwnProperty("updated_at")) { - let error = $root.vttime.Time.verify(message.updated_at); - if (error) - return "updated_at." + error; - } - if (message.message != null && message.hasOwnProperty("message")) - if (!$util.isString(message.message)) - return "message: string expected"; - if (message.count != null && message.hasOwnProperty("count")) - if (!$util.isInteger(message.count) && !(message.count && $util.isInteger(message.count.low) && $util.isInteger(message.count.high))) - return "count: integer|Long expected"; - return null; - }; + /** + * Encodes the specified GetBackupsResponse message, length delimited. Does not implicitly {@link vtctldata.GetBackupsResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.GetBackupsResponse + * @static + * @param {vtctldata.IGetBackupsResponse} message GetBackupsResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + GetBackupsResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; - /** - * Creates a Log message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof vtctldata.Workflow.Stream.Log - * @static - * @param {Object.} object Plain object - * @returns {vtctldata.Workflow.Stream.Log} Log - */ - Log.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.Workflow.Stream.Log) - return object; - let message = new $root.vtctldata.Workflow.Stream.Log(); - if (object.id != null) - if ($util.Long) - (message.id = $util.Long.fromValue(object.id)).unsigned = false; - else if (typeof object.id === "string") - message.id = parseInt(object.id, 10); - else if (typeof object.id === "number") - message.id = object.id; - else if (typeof object.id === "object") - message.id = new $util.LongBits(object.id.low >>> 0, object.id.high >>> 0).toNumber(); - if (object.stream_id != null) - if ($util.Long) - (message.stream_id = $util.Long.fromValue(object.stream_id)).unsigned = false; - else if (typeof object.stream_id === "string") - message.stream_id = parseInt(object.stream_id, 10); - else if (typeof object.stream_id === "number") - message.stream_id = object.stream_id; - else if (typeof object.stream_id === "object") - message.stream_id = new $util.LongBits(object.stream_id.low >>> 0, object.stream_id.high >>> 0).toNumber(); - if (object.type != null) - message.type = String(object.type); - if (object.state != null) - message.state = String(object.state); - if (object.created_at != null) { - if (typeof object.created_at !== "object") - throw TypeError(".vtctldata.Workflow.Stream.Log.created_at: object expected"); - message.created_at = $root.vttime.Time.fromObject(object.created_at); - } - if (object.updated_at != null) { - if (typeof object.updated_at !== "object") - throw TypeError(".vtctldata.Workflow.Stream.Log.updated_at: object expected"); - message.updated_at = $root.vttime.Time.fromObject(object.updated_at); + /** + * Decodes a GetBackupsResponse message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.GetBackupsResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.GetBackupsResponse} GetBackupsResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + GetBackupsResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetBackupsResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (!(message.backups && message.backups.length)) + message.backups = []; + message.backups.push($root.mysqlctl.BackupInfo.decode(reader, reader.uint32())); + break; } - if (object.message != null) - message.message = String(object.message); - if (object.count != null) - if ($util.Long) - (message.count = $util.Long.fromValue(object.count)).unsigned = false; - else if (typeof object.count === "string") - message.count = parseInt(object.count, 10); - else if (typeof object.count === "number") - message.count = object.count; - else if (typeof object.count === "object") - message.count = new $util.LongBits(object.count.low >>> 0, object.count.high >>> 0).toNumber(); - return message; - }; + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; - /** - * Creates a plain object from a Log message. Also converts values to other types if specified. - * @function toObject - * @memberof vtctldata.Workflow.Stream.Log - * @static - * @param {vtctldata.Workflow.Stream.Log} message Log - * @param {$protobuf.IConversionOptions} [options] Conversion options - * @returns {Object.} Plain object - */ - Log.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) { - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.id = options.longs === String ? "0" : 0; - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.stream_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.stream_id = options.longs === String ? "0" : 0; - object.type = ""; - object.state = ""; - object.created_at = null; - object.updated_at = null; - object.message = ""; - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.count = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.count = options.longs === String ? "0" : 0; - } - if (message.id != null && message.hasOwnProperty("id")) - if (typeof message.id === "number") - object.id = options.longs === String ? String(message.id) : message.id; - else - object.id = options.longs === String ? $util.Long.prototype.toString.call(message.id) : options.longs === Number ? new $util.LongBits(message.id.low >>> 0, message.id.high >>> 0).toNumber() : message.id; - if (message.stream_id != null && message.hasOwnProperty("stream_id")) - if (typeof message.stream_id === "number") - object.stream_id = options.longs === String ? String(message.stream_id) : message.stream_id; - else - object.stream_id = options.longs === String ? $util.Long.prototype.toString.call(message.stream_id) : options.longs === Number ? new $util.LongBits(message.stream_id.low >>> 0, message.stream_id.high >>> 0).toNumber() : message.stream_id; - if (message.type != null && message.hasOwnProperty("type")) - object.type = message.type; - if (message.state != null && message.hasOwnProperty("state")) - object.state = message.state; - if (message.created_at != null && message.hasOwnProperty("created_at")) - object.created_at = $root.vttime.Time.toObject(message.created_at, options); - if (message.updated_at != null && message.hasOwnProperty("updated_at")) - object.updated_at = $root.vttime.Time.toObject(message.updated_at, options); - if (message.message != null && message.hasOwnProperty("message")) - object.message = message.message; - if (message.count != null && message.hasOwnProperty("count")) - if (typeof message.count === "number") - object.count = options.longs === String ? String(message.count) : message.count; - else - object.count = options.longs === String ? $util.Long.prototype.toString.call(message.count) : options.longs === Number ? new $util.LongBits(message.count.low >>> 0, message.count.high >>> 0).toNumber() : message.count; - return object; - }; + /** + * Decodes a GetBackupsResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.GetBackupsResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.GetBackupsResponse} GetBackupsResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + GetBackupsResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; - /** - * Converts this Log to JSON. - * @function toJSON - * @memberof vtctldata.Workflow.Stream.Log - * @instance - * @returns {Object.} JSON object - */ - Log.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; + /** + * Verifies a GetBackupsResponse message. + * @function verify + * @memberof vtctldata.GetBackupsResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + GetBackupsResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.backups != null && message.hasOwnProperty("backups")) { + if (!Array.isArray(message.backups)) + return "backups: array expected"; + for (let i = 0; i < message.backups.length; ++i) { + let error = $root.mysqlctl.BackupInfo.verify(message.backups[i]); + if (error) + return "backups." + error; + } + } + return null; + }; - /** - * Gets the default type url for Log - * @function getTypeUrl - * @memberof vtctldata.Workflow.Stream.Log - * @static - * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns {string} The default type url - */ - Log.getTypeUrl = function getTypeUrl(typeUrlPrefix) { - if (typeUrlPrefix === undefined) { - typeUrlPrefix = "type.googleapis.com"; - } - return typeUrlPrefix + "/vtctldata.Workflow.Stream.Log"; - }; + /** + * Creates a GetBackupsResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.GetBackupsResponse + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.GetBackupsResponse} GetBackupsResponse + */ + GetBackupsResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetBackupsResponse) + return object; + let message = new $root.vtctldata.GetBackupsResponse(); + if (object.backups) { + if (!Array.isArray(object.backups)) + throw TypeError(".vtctldata.GetBackupsResponse.backups: array expected"); + message.backups = []; + for (let i = 0; i < object.backups.length; ++i) { + if (typeof object.backups[i] !== "object") + throw TypeError(".vtctldata.GetBackupsResponse.backups: object expected"); + message.backups[i] = $root.mysqlctl.BackupInfo.fromObject(object.backups[i]); + } + } + return message; + }; - return Log; - })(); + /** + * Creates a plain object from a GetBackupsResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.GetBackupsResponse + * @static + * @param {vtctldata.GetBackupsResponse} message GetBackupsResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + GetBackupsResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.backups = []; + if (message.backups && message.backups.length) { + object.backups = []; + for (let j = 0; j < message.backups.length; ++j) + object.backups[j] = $root.mysqlctl.BackupInfo.toObject(message.backups[j], options); + } + return object; + }; - return Stream; - })(); + /** + * Converts this GetBackupsResponse to JSON. + * @function toJSON + * @memberof vtctldata.GetBackupsResponse + * @instance + * @returns {Object.} JSON object + */ + GetBackupsResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; - return Workflow; + /** + * Gets the default type url for GetBackupsResponse + * @function getTypeUrl + * @memberof vtctldata.GetBackupsResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + GetBackupsResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.GetBackupsResponse"; + }; + + return GetBackupsResponse; })(); - vtctldata.AddCellInfoRequest = (function() { + vtctldata.GetCellInfoRequest = (function() { /** - * Properties of an AddCellInfoRequest. + * Properties of a GetCellInfoRequest. * @memberof vtctldata - * @interface IAddCellInfoRequest - * @property {string|null} [name] AddCellInfoRequest name - * @property {topodata.ICellInfo|null} [cell_info] AddCellInfoRequest cell_info + * @interface IGetCellInfoRequest + * @property {string|null} [cell] GetCellInfoRequest cell */ /** - * Constructs a new AddCellInfoRequest. + * Constructs a new GetCellInfoRequest. * @memberof vtctldata - * @classdesc Represents an AddCellInfoRequest. - * @implements IAddCellInfoRequest + * @classdesc Represents a GetCellInfoRequest. + * @implements IGetCellInfoRequest * @constructor - * @param {vtctldata.IAddCellInfoRequest=} [properties] Properties to set + * @param {vtctldata.IGetCellInfoRequest=} [properties] Properties to set */ - function AddCellInfoRequest(properties) { + function GetCellInfoRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -101190,88 +121945,277 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * AddCellInfoRequest name. - * @member {string} name - * @memberof vtctldata.AddCellInfoRequest + * GetCellInfoRequest cell. + * @member {string} cell + * @memberof vtctldata.GetCellInfoRequest * @instance */ - AddCellInfoRequest.prototype.name = ""; + GetCellInfoRequest.prototype.cell = ""; /** - * AddCellInfoRequest cell_info. + * Creates a new GetCellInfoRequest instance using the specified properties. + * @function create + * @memberof vtctldata.GetCellInfoRequest + * @static + * @param {vtctldata.IGetCellInfoRequest=} [properties] Properties to set + * @returns {vtctldata.GetCellInfoRequest} GetCellInfoRequest instance + */ + GetCellInfoRequest.create = function create(properties) { + return new GetCellInfoRequest(properties); + }; + + /** + * Encodes the specified GetCellInfoRequest message. Does not implicitly {@link vtctldata.GetCellInfoRequest.verify|verify} messages. + * @function encode + * @memberof vtctldata.GetCellInfoRequest + * @static + * @param {vtctldata.IGetCellInfoRequest} message GetCellInfoRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + GetCellInfoRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.cell != null && Object.hasOwnProperty.call(message, "cell")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.cell); + return writer; + }; + + /** + * Encodes the specified GetCellInfoRequest message, length delimited. Does not implicitly {@link vtctldata.GetCellInfoRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.GetCellInfoRequest + * @static + * @param {vtctldata.IGetCellInfoRequest} message GetCellInfoRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + GetCellInfoRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a GetCellInfoRequest message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.GetCellInfoRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.GetCellInfoRequest} GetCellInfoRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + GetCellInfoRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetCellInfoRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.cell = reader.string(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a GetCellInfoRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.GetCellInfoRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.GetCellInfoRequest} GetCellInfoRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + GetCellInfoRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a GetCellInfoRequest message. + * @function verify + * @memberof vtctldata.GetCellInfoRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + GetCellInfoRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.cell != null && message.hasOwnProperty("cell")) + if (!$util.isString(message.cell)) + return "cell: string expected"; + return null; + }; + + /** + * Creates a GetCellInfoRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.GetCellInfoRequest + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.GetCellInfoRequest} GetCellInfoRequest + */ + GetCellInfoRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetCellInfoRequest) + return object; + let message = new $root.vtctldata.GetCellInfoRequest(); + if (object.cell != null) + message.cell = String(object.cell); + return message; + }; + + /** + * Creates a plain object from a GetCellInfoRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.GetCellInfoRequest + * @static + * @param {vtctldata.GetCellInfoRequest} message GetCellInfoRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + GetCellInfoRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) + object.cell = ""; + if (message.cell != null && message.hasOwnProperty("cell")) + object.cell = message.cell; + return object; + }; + + /** + * Converts this GetCellInfoRequest to JSON. + * @function toJSON + * @memberof vtctldata.GetCellInfoRequest + * @instance + * @returns {Object.} JSON object + */ + GetCellInfoRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for GetCellInfoRequest + * @function getTypeUrl + * @memberof vtctldata.GetCellInfoRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + GetCellInfoRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.GetCellInfoRequest"; + }; + + return GetCellInfoRequest; + })(); + + vtctldata.GetCellInfoResponse = (function() { + + /** + * Properties of a GetCellInfoResponse. + * @memberof vtctldata + * @interface IGetCellInfoResponse + * @property {topodata.ICellInfo|null} [cell_info] GetCellInfoResponse cell_info + */ + + /** + * Constructs a new GetCellInfoResponse. + * @memberof vtctldata + * @classdesc Represents a GetCellInfoResponse. + * @implements IGetCellInfoResponse + * @constructor + * @param {vtctldata.IGetCellInfoResponse=} [properties] Properties to set + */ + function GetCellInfoResponse(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * GetCellInfoResponse cell_info. * @member {topodata.ICellInfo|null|undefined} cell_info - * @memberof vtctldata.AddCellInfoRequest + * @memberof vtctldata.GetCellInfoResponse * @instance */ - AddCellInfoRequest.prototype.cell_info = null; + GetCellInfoResponse.prototype.cell_info = null; /** - * Creates a new AddCellInfoRequest instance using the specified properties. + * Creates a new GetCellInfoResponse instance using the specified properties. * @function create - * @memberof vtctldata.AddCellInfoRequest + * @memberof vtctldata.GetCellInfoResponse * @static - * @param {vtctldata.IAddCellInfoRequest=} [properties] Properties to set - * @returns {vtctldata.AddCellInfoRequest} AddCellInfoRequest instance + * @param {vtctldata.IGetCellInfoResponse=} [properties] Properties to set + * @returns {vtctldata.GetCellInfoResponse} GetCellInfoResponse instance */ - AddCellInfoRequest.create = function create(properties) { - return new AddCellInfoRequest(properties); + GetCellInfoResponse.create = function create(properties) { + return new GetCellInfoResponse(properties); }; /** - * Encodes the specified AddCellInfoRequest message. Does not implicitly {@link vtctldata.AddCellInfoRequest.verify|verify} messages. + * Encodes the specified GetCellInfoResponse message. Does not implicitly {@link vtctldata.GetCellInfoResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.AddCellInfoRequest + * @memberof vtctldata.GetCellInfoResponse * @static - * @param {vtctldata.IAddCellInfoRequest} message AddCellInfoRequest message or plain object to encode + * @param {vtctldata.IGetCellInfoResponse} message GetCellInfoResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - AddCellInfoRequest.encode = function encode(message, writer) { + GetCellInfoResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.name != null && Object.hasOwnProperty.call(message, "name")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); if (message.cell_info != null && Object.hasOwnProperty.call(message, "cell_info")) - $root.topodata.CellInfo.encode(message.cell_info, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + $root.topodata.CellInfo.encode(message.cell_info, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified AddCellInfoRequest message, length delimited. Does not implicitly {@link vtctldata.AddCellInfoRequest.verify|verify} messages. + * Encodes the specified GetCellInfoResponse message, length delimited. Does not implicitly {@link vtctldata.GetCellInfoResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.AddCellInfoRequest + * @memberof vtctldata.GetCellInfoResponse * @static - * @param {vtctldata.IAddCellInfoRequest} message AddCellInfoRequest message or plain object to encode + * @param {vtctldata.IGetCellInfoResponse} message GetCellInfoResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - AddCellInfoRequest.encodeDelimited = function encodeDelimited(message, writer) { + GetCellInfoResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an AddCellInfoRequest message from the specified reader or buffer. + * Decodes a GetCellInfoResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.AddCellInfoRequest + * @memberof vtctldata.GetCellInfoResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.AddCellInfoRequest} AddCellInfoRequest + * @returns {vtctldata.GetCellInfoResponse} GetCellInfoResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - AddCellInfoRequest.decode = function decode(reader, length) { + GetCellInfoResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.AddCellInfoRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetCellInfoResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.name = reader.string(); - break; - } - case 2: { message.cell_info = $root.topodata.CellInfo.decode(reader, reader.uint32()); break; } @@ -101284,35 +122228,32 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes an AddCellInfoRequest message from the specified reader or buffer, length delimited. + * Decodes a GetCellInfoResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.AddCellInfoRequest + * @memberof vtctldata.GetCellInfoResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.AddCellInfoRequest} AddCellInfoRequest + * @returns {vtctldata.GetCellInfoResponse} GetCellInfoResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - AddCellInfoRequest.decodeDelimited = function decodeDelimited(reader) { + GetCellInfoResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an AddCellInfoRequest message. + * Verifies a GetCellInfoResponse message. * @function verify - * @memberof vtctldata.AddCellInfoRequest + * @memberof vtctldata.GetCellInfoResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - AddCellInfoRequest.verify = function verify(message) { + GetCellInfoResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.name != null && message.hasOwnProperty("name")) - if (!$util.isString(message.name)) - return "name: string expected"; if (message.cell_info != null && message.hasOwnProperty("cell_info")) { let error = $root.topodata.CellInfo.verify(message.cell_info); if (error) @@ -101322,97 +122263,91 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Creates an AddCellInfoRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetCellInfoResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.AddCellInfoRequest + * @memberof vtctldata.GetCellInfoResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.AddCellInfoRequest} AddCellInfoRequest + * @returns {vtctldata.GetCellInfoResponse} GetCellInfoResponse */ - AddCellInfoRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.AddCellInfoRequest) + GetCellInfoResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetCellInfoResponse) return object; - let message = new $root.vtctldata.AddCellInfoRequest(); - if (object.name != null) - message.name = String(object.name); + let message = new $root.vtctldata.GetCellInfoResponse(); if (object.cell_info != null) { if (typeof object.cell_info !== "object") - throw TypeError(".vtctldata.AddCellInfoRequest.cell_info: object expected"); + throw TypeError(".vtctldata.GetCellInfoResponse.cell_info: object expected"); message.cell_info = $root.topodata.CellInfo.fromObject(object.cell_info); } return message; }; /** - * Creates a plain object from an AddCellInfoRequest message. Also converts values to other types if specified. + * Creates a plain object from a GetCellInfoResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.AddCellInfoRequest + * @memberof vtctldata.GetCellInfoResponse * @static - * @param {vtctldata.AddCellInfoRequest} message AddCellInfoRequest + * @param {vtctldata.GetCellInfoResponse} message GetCellInfoResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - AddCellInfoRequest.toObject = function toObject(message, options) { + GetCellInfoResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) { - object.name = ""; + if (options.defaults) object.cell_info = null; - } - if (message.name != null && message.hasOwnProperty("name")) - object.name = message.name; if (message.cell_info != null && message.hasOwnProperty("cell_info")) object.cell_info = $root.topodata.CellInfo.toObject(message.cell_info, options); return object; }; /** - * Converts this AddCellInfoRequest to JSON. + * Converts this GetCellInfoResponse to JSON. * @function toJSON - * @memberof vtctldata.AddCellInfoRequest + * @memberof vtctldata.GetCellInfoResponse * @instance * @returns {Object.} JSON object */ - AddCellInfoRequest.prototype.toJSON = function toJSON() { + GetCellInfoResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for AddCellInfoRequest + * Gets the default type url for GetCellInfoResponse * @function getTypeUrl - * @memberof vtctldata.AddCellInfoRequest + * @memberof vtctldata.GetCellInfoResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - AddCellInfoRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetCellInfoResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.AddCellInfoRequest"; + return typeUrlPrefix + "/vtctldata.GetCellInfoResponse"; }; - return AddCellInfoRequest; + return GetCellInfoResponse; })(); - vtctldata.AddCellInfoResponse = (function() { + vtctldata.GetCellInfoNamesRequest = (function() { /** - * Properties of an AddCellInfoResponse. + * Properties of a GetCellInfoNamesRequest. * @memberof vtctldata - * @interface IAddCellInfoResponse + * @interface IGetCellInfoNamesRequest */ /** - * Constructs a new AddCellInfoResponse. + * Constructs a new GetCellInfoNamesRequest. * @memberof vtctldata - * @classdesc Represents an AddCellInfoResponse. - * @implements IAddCellInfoResponse + * @classdesc Represents a GetCellInfoNamesRequest. + * @implements IGetCellInfoNamesRequest * @constructor - * @param {vtctldata.IAddCellInfoResponse=} [properties] Properties to set + * @param {vtctldata.IGetCellInfoNamesRequest=} [properties] Properties to set */ - function AddCellInfoResponse(properties) { + function GetCellInfoNamesRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -101420,60 +122355,60 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * Creates a new AddCellInfoResponse instance using the specified properties. + * Creates a new GetCellInfoNamesRequest instance using the specified properties. * @function create - * @memberof vtctldata.AddCellInfoResponse + * @memberof vtctldata.GetCellInfoNamesRequest * @static - * @param {vtctldata.IAddCellInfoResponse=} [properties] Properties to set - * @returns {vtctldata.AddCellInfoResponse} AddCellInfoResponse instance + * @param {vtctldata.IGetCellInfoNamesRequest=} [properties] Properties to set + * @returns {vtctldata.GetCellInfoNamesRequest} GetCellInfoNamesRequest instance */ - AddCellInfoResponse.create = function create(properties) { - return new AddCellInfoResponse(properties); + GetCellInfoNamesRequest.create = function create(properties) { + return new GetCellInfoNamesRequest(properties); }; /** - * Encodes the specified AddCellInfoResponse message. Does not implicitly {@link vtctldata.AddCellInfoResponse.verify|verify} messages. + * Encodes the specified GetCellInfoNamesRequest message. Does not implicitly {@link vtctldata.GetCellInfoNamesRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.AddCellInfoResponse + * @memberof vtctldata.GetCellInfoNamesRequest * @static - * @param {vtctldata.IAddCellInfoResponse} message AddCellInfoResponse message or plain object to encode + * @param {vtctldata.IGetCellInfoNamesRequest} message GetCellInfoNamesRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - AddCellInfoResponse.encode = function encode(message, writer) { + GetCellInfoNamesRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); return writer; }; /** - * Encodes the specified AddCellInfoResponse message, length delimited. Does not implicitly {@link vtctldata.AddCellInfoResponse.verify|verify} messages. + * Encodes the specified GetCellInfoNamesRequest message, length delimited. Does not implicitly {@link vtctldata.GetCellInfoNamesRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.AddCellInfoResponse + * @memberof vtctldata.GetCellInfoNamesRequest * @static - * @param {vtctldata.IAddCellInfoResponse} message AddCellInfoResponse message or plain object to encode + * @param {vtctldata.IGetCellInfoNamesRequest} message GetCellInfoNamesRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - AddCellInfoResponse.encodeDelimited = function encodeDelimited(message, writer) { + GetCellInfoNamesRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an AddCellInfoResponse message from the specified reader or buffer. + * Decodes a GetCellInfoNamesRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.AddCellInfoResponse + * @memberof vtctldata.GetCellInfoNamesRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.AddCellInfoResponse} AddCellInfoResponse + * @returns {vtctldata.GetCellInfoNamesRequest} GetCellInfoNamesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - AddCellInfoResponse.decode = function decode(reader, length) { + GetCellInfoNamesRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.AddCellInfoResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetCellInfoNamesRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -101486,111 +122421,110 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes an AddCellInfoResponse message from the specified reader or buffer, length delimited. + * Decodes a GetCellInfoNamesRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.AddCellInfoResponse + * @memberof vtctldata.GetCellInfoNamesRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.AddCellInfoResponse} AddCellInfoResponse + * @returns {vtctldata.GetCellInfoNamesRequest} GetCellInfoNamesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - AddCellInfoResponse.decodeDelimited = function decodeDelimited(reader) { + GetCellInfoNamesRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an AddCellInfoResponse message. + * Verifies a GetCellInfoNamesRequest message. * @function verify - * @memberof vtctldata.AddCellInfoResponse + * @memberof vtctldata.GetCellInfoNamesRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - AddCellInfoResponse.verify = function verify(message) { + GetCellInfoNamesRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; return null; }; /** - * Creates an AddCellInfoResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetCellInfoNamesRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.AddCellInfoResponse + * @memberof vtctldata.GetCellInfoNamesRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.AddCellInfoResponse} AddCellInfoResponse + * @returns {vtctldata.GetCellInfoNamesRequest} GetCellInfoNamesRequest */ - AddCellInfoResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.AddCellInfoResponse) + GetCellInfoNamesRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetCellInfoNamesRequest) return object; - return new $root.vtctldata.AddCellInfoResponse(); + return new $root.vtctldata.GetCellInfoNamesRequest(); }; /** - * Creates a plain object from an AddCellInfoResponse message. Also converts values to other types if specified. + * Creates a plain object from a GetCellInfoNamesRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.AddCellInfoResponse + * @memberof vtctldata.GetCellInfoNamesRequest * @static - * @param {vtctldata.AddCellInfoResponse} message AddCellInfoResponse + * @param {vtctldata.GetCellInfoNamesRequest} message GetCellInfoNamesRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - AddCellInfoResponse.toObject = function toObject() { + GetCellInfoNamesRequest.toObject = function toObject() { return {}; }; /** - * Converts this AddCellInfoResponse to JSON. + * Converts this GetCellInfoNamesRequest to JSON. * @function toJSON - * @memberof vtctldata.AddCellInfoResponse + * @memberof vtctldata.GetCellInfoNamesRequest * @instance * @returns {Object.} JSON object */ - AddCellInfoResponse.prototype.toJSON = function toJSON() { + GetCellInfoNamesRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for AddCellInfoResponse + * Gets the default type url for GetCellInfoNamesRequest * @function getTypeUrl - * @memberof vtctldata.AddCellInfoResponse + * @memberof vtctldata.GetCellInfoNamesRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - AddCellInfoResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetCellInfoNamesRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.AddCellInfoResponse"; + return typeUrlPrefix + "/vtctldata.GetCellInfoNamesRequest"; }; - return AddCellInfoResponse; + return GetCellInfoNamesRequest; })(); - vtctldata.AddCellsAliasRequest = (function() { + vtctldata.GetCellInfoNamesResponse = (function() { /** - * Properties of an AddCellsAliasRequest. + * Properties of a GetCellInfoNamesResponse. * @memberof vtctldata - * @interface IAddCellsAliasRequest - * @property {string|null} [name] AddCellsAliasRequest name - * @property {Array.|null} [cells] AddCellsAliasRequest cells + * @interface IGetCellInfoNamesResponse + * @property {Array.|null} [names] GetCellInfoNamesResponse names */ /** - * Constructs a new AddCellsAliasRequest. + * Constructs a new GetCellInfoNamesResponse. * @memberof vtctldata - * @classdesc Represents an AddCellsAliasRequest. - * @implements IAddCellsAliasRequest + * @classdesc Represents a GetCellInfoNamesResponse. + * @implements IGetCellInfoNamesResponse * @constructor - * @param {vtctldata.IAddCellsAliasRequest=} [properties] Properties to set + * @param {vtctldata.IGetCellInfoNamesResponse=} [properties] Properties to set */ - function AddCellsAliasRequest(properties) { - this.cells = []; + function GetCellInfoNamesResponse(properties) { + this.names = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -101598,92 +122532,78 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * AddCellsAliasRequest name. - * @member {string} name - * @memberof vtctldata.AddCellsAliasRequest - * @instance - */ - AddCellsAliasRequest.prototype.name = ""; - - /** - * AddCellsAliasRequest cells. - * @member {Array.} cells - * @memberof vtctldata.AddCellsAliasRequest + * GetCellInfoNamesResponse names. + * @member {Array.} names + * @memberof vtctldata.GetCellInfoNamesResponse * @instance */ - AddCellsAliasRequest.prototype.cells = $util.emptyArray; + GetCellInfoNamesResponse.prototype.names = $util.emptyArray; /** - * Creates a new AddCellsAliasRequest instance using the specified properties. + * Creates a new GetCellInfoNamesResponse instance using the specified properties. * @function create - * @memberof vtctldata.AddCellsAliasRequest + * @memberof vtctldata.GetCellInfoNamesResponse * @static - * @param {vtctldata.IAddCellsAliasRequest=} [properties] Properties to set - * @returns {vtctldata.AddCellsAliasRequest} AddCellsAliasRequest instance + * @param {vtctldata.IGetCellInfoNamesResponse=} [properties] Properties to set + * @returns {vtctldata.GetCellInfoNamesResponse} GetCellInfoNamesResponse instance */ - AddCellsAliasRequest.create = function create(properties) { - return new AddCellsAliasRequest(properties); + GetCellInfoNamesResponse.create = function create(properties) { + return new GetCellInfoNamesResponse(properties); }; /** - * Encodes the specified AddCellsAliasRequest message. Does not implicitly {@link vtctldata.AddCellsAliasRequest.verify|verify} messages. + * Encodes the specified GetCellInfoNamesResponse message. Does not implicitly {@link vtctldata.GetCellInfoNamesResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.AddCellsAliasRequest + * @memberof vtctldata.GetCellInfoNamesResponse * @static - * @param {vtctldata.IAddCellsAliasRequest} message AddCellsAliasRequest message or plain object to encode + * @param {vtctldata.IGetCellInfoNamesResponse} message GetCellInfoNamesResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - AddCellsAliasRequest.encode = function encode(message, writer) { + GetCellInfoNamesResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.name != null && Object.hasOwnProperty.call(message, "name")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); - if (message.cells != null && message.cells.length) - for (let i = 0; i < message.cells.length; ++i) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.cells[i]); + if (message.names != null && message.names.length) + for (let i = 0; i < message.names.length; ++i) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.names[i]); return writer; }; /** - * Encodes the specified AddCellsAliasRequest message, length delimited. Does not implicitly {@link vtctldata.AddCellsAliasRequest.verify|verify} messages. + * Encodes the specified GetCellInfoNamesResponse message, length delimited. Does not implicitly {@link vtctldata.GetCellInfoNamesResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.AddCellsAliasRequest + * @memberof vtctldata.GetCellInfoNamesResponse * @static - * @param {vtctldata.IAddCellsAliasRequest} message AddCellsAliasRequest message or plain object to encode + * @param {vtctldata.IGetCellInfoNamesResponse} message GetCellInfoNamesResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - AddCellsAliasRequest.encodeDelimited = function encodeDelimited(message, writer) { + GetCellInfoNamesResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an AddCellsAliasRequest message from the specified reader or buffer. + * Decodes a GetCellInfoNamesResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.AddCellsAliasRequest + * @memberof vtctldata.GetCellInfoNamesResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.AddCellsAliasRequest} AddCellsAliasRequest + * @returns {vtctldata.GetCellInfoNamesResponse} GetCellInfoNamesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - AddCellsAliasRequest.decode = function decode(reader, length) { + GetCellInfoNamesResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.AddCellsAliasRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetCellInfoNamesResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.name = reader.string(); - break; - } - case 2: { - if (!(message.cells && message.cells.length)) - message.cells = []; - message.cells.push(reader.string()); + if (!(message.names && message.names.length)) + message.names = []; + message.names.push(reader.string()); break; } default: @@ -101695,142 +122615,133 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes an AddCellsAliasRequest message from the specified reader or buffer, length delimited. + * Decodes a GetCellInfoNamesResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.AddCellsAliasRequest + * @memberof vtctldata.GetCellInfoNamesResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.AddCellsAliasRequest} AddCellsAliasRequest + * @returns {vtctldata.GetCellInfoNamesResponse} GetCellInfoNamesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - AddCellsAliasRequest.decodeDelimited = function decodeDelimited(reader) { + GetCellInfoNamesResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an AddCellsAliasRequest message. + * Verifies a GetCellInfoNamesResponse message. * @function verify - * @memberof vtctldata.AddCellsAliasRequest + * @memberof vtctldata.GetCellInfoNamesResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - AddCellsAliasRequest.verify = function verify(message) { + GetCellInfoNamesResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.name != null && message.hasOwnProperty("name")) - if (!$util.isString(message.name)) - return "name: string expected"; - if (message.cells != null && message.hasOwnProperty("cells")) { - if (!Array.isArray(message.cells)) - return "cells: array expected"; - for (let i = 0; i < message.cells.length; ++i) - if (!$util.isString(message.cells[i])) - return "cells: string[] expected"; + if (message.names != null && message.hasOwnProperty("names")) { + if (!Array.isArray(message.names)) + return "names: array expected"; + for (let i = 0; i < message.names.length; ++i) + if (!$util.isString(message.names[i])) + return "names: string[] expected"; } return null; }; /** - * Creates an AddCellsAliasRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetCellInfoNamesResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.AddCellsAliasRequest + * @memberof vtctldata.GetCellInfoNamesResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.AddCellsAliasRequest} AddCellsAliasRequest + * @returns {vtctldata.GetCellInfoNamesResponse} GetCellInfoNamesResponse */ - AddCellsAliasRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.AddCellsAliasRequest) + GetCellInfoNamesResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetCellInfoNamesResponse) return object; - let message = new $root.vtctldata.AddCellsAliasRequest(); - if (object.name != null) - message.name = String(object.name); - if (object.cells) { - if (!Array.isArray(object.cells)) - throw TypeError(".vtctldata.AddCellsAliasRequest.cells: array expected"); - message.cells = []; - for (let i = 0; i < object.cells.length; ++i) - message.cells[i] = String(object.cells[i]); + let message = new $root.vtctldata.GetCellInfoNamesResponse(); + if (object.names) { + if (!Array.isArray(object.names)) + throw TypeError(".vtctldata.GetCellInfoNamesResponse.names: array expected"); + message.names = []; + for (let i = 0; i < object.names.length; ++i) + message.names[i] = String(object.names[i]); } return message; }; /** - * Creates a plain object from an AddCellsAliasRequest message. Also converts values to other types if specified. + * Creates a plain object from a GetCellInfoNamesResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.AddCellsAliasRequest + * @memberof vtctldata.GetCellInfoNamesResponse * @static - * @param {vtctldata.AddCellsAliasRequest} message AddCellsAliasRequest + * @param {vtctldata.GetCellInfoNamesResponse} message GetCellInfoNamesResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - AddCellsAliasRequest.toObject = function toObject(message, options) { + GetCellInfoNamesResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.arrays || options.defaults) - object.cells = []; - if (options.defaults) - object.name = ""; - if (message.name != null && message.hasOwnProperty("name")) - object.name = message.name; - if (message.cells && message.cells.length) { - object.cells = []; - for (let j = 0; j < message.cells.length; ++j) - object.cells[j] = message.cells[j]; + object.names = []; + if (message.names && message.names.length) { + object.names = []; + for (let j = 0; j < message.names.length; ++j) + object.names[j] = message.names[j]; } return object; }; /** - * Converts this AddCellsAliasRequest to JSON. + * Converts this GetCellInfoNamesResponse to JSON. * @function toJSON - * @memberof vtctldata.AddCellsAliasRequest + * @memberof vtctldata.GetCellInfoNamesResponse * @instance * @returns {Object.} JSON object */ - AddCellsAliasRequest.prototype.toJSON = function toJSON() { + GetCellInfoNamesResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for AddCellsAliasRequest + * Gets the default type url for GetCellInfoNamesResponse * @function getTypeUrl - * @memberof vtctldata.AddCellsAliasRequest + * @memberof vtctldata.GetCellInfoNamesResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - AddCellsAliasRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetCellInfoNamesResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.AddCellsAliasRequest"; + return typeUrlPrefix + "/vtctldata.GetCellInfoNamesResponse"; }; - return AddCellsAliasRequest; + return GetCellInfoNamesResponse; })(); - vtctldata.AddCellsAliasResponse = (function() { + vtctldata.GetCellsAliasesRequest = (function() { /** - * Properties of an AddCellsAliasResponse. + * Properties of a GetCellsAliasesRequest. * @memberof vtctldata - * @interface IAddCellsAliasResponse + * @interface IGetCellsAliasesRequest */ /** - * Constructs a new AddCellsAliasResponse. + * Constructs a new GetCellsAliasesRequest. * @memberof vtctldata - * @classdesc Represents an AddCellsAliasResponse. - * @implements IAddCellsAliasResponse + * @classdesc Represents a GetCellsAliasesRequest. + * @implements IGetCellsAliasesRequest * @constructor - * @param {vtctldata.IAddCellsAliasResponse=} [properties] Properties to set + * @param {vtctldata.IGetCellsAliasesRequest=} [properties] Properties to set */ - function AddCellsAliasResponse(properties) { + function GetCellsAliasesRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -101838,60 +122749,60 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * Creates a new AddCellsAliasResponse instance using the specified properties. + * Creates a new GetCellsAliasesRequest instance using the specified properties. * @function create - * @memberof vtctldata.AddCellsAliasResponse + * @memberof vtctldata.GetCellsAliasesRequest * @static - * @param {vtctldata.IAddCellsAliasResponse=} [properties] Properties to set - * @returns {vtctldata.AddCellsAliasResponse} AddCellsAliasResponse instance + * @param {vtctldata.IGetCellsAliasesRequest=} [properties] Properties to set + * @returns {vtctldata.GetCellsAliasesRequest} GetCellsAliasesRequest instance */ - AddCellsAliasResponse.create = function create(properties) { - return new AddCellsAliasResponse(properties); + GetCellsAliasesRequest.create = function create(properties) { + return new GetCellsAliasesRequest(properties); }; /** - * Encodes the specified AddCellsAliasResponse message. Does not implicitly {@link vtctldata.AddCellsAliasResponse.verify|verify} messages. + * Encodes the specified GetCellsAliasesRequest message. Does not implicitly {@link vtctldata.GetCellsAliasesRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.AddCellsAliasResponse + * @memberof vtctldata.GetCellsAliasesRequest * @static - * @param {vtctldata.IAddCellsAliasResponse} message AddCellsAliasResponse message or plain object to encode + * @param {vtctldata.IGetCellsAliasesRequest} message GetCellsAliasesRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - AddCellsAliasResponse.encode = function encode(message, writer) { + GetCellsAliasesRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); return writer; }; /** - * Encodes the specified AddCellsAliasResponse message, length delimited. Does not implicitly {@link vtctldata.AddCellsAliasResponse.verify|verify} messages. + * Encodes the specified GetCellsAliasesRequest message, length delimited. Does not implicitly {@link vtctldata.GetCellsAliasesRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.AddCellsAliasResponse + * @memberof vtctldata.GetCellsAliasesRequest * @static - * @param {vtctldata.IAddCellsAliasResponse} message AddCellsAliasResponse message or plain object to encode + * @param {vtctldata.IGetCellsAliasesRequest} message GetCellsAliasesRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - AddCellsAliasResponse.encodeDelimited = function encodeDelimited(message, writer) { + GetCellsAliasesRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an AddCellsAliasResponse message from the specified reader or buffer. + * Decodes a GetCellsAliasesRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.AddCellsAliasResponse + * @memberof vtctldata.GetCellsAliasesRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.AddCellsAliasResponse} AddCellsAliasResponse + * @returns {vtctldata.GetCellsAliasesRequest} GetCellsAliasesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - AddCellsAliasResponse.decode = function decode(reader, length) { + GetCellsAliasesRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.AddCellsAliasResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetCellsAliasesRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -101904,112 +122815,110 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes an AddCellsAliasResponse message from the specified reader or buffer, length delimited. + * Decodes a GetCellsAliasesRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.AddCellsAliasResponse + * @memberof vtctldata.GetCellsAliasesRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.AddCellsAliasResponse} AddCellsAliasResponse + * @returns {vtctldata.GetCellsAliasesRequest} GetCellsAliasesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - AddCellsAliasResponse.decodeDelimited = function decodeDelimited(reader) { + GetCellsAliasesRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an AddCellsAliasResponse message. + * Verifies a GetCellsAliasesRequest message. * @function verify - * @memberof vtctldata.AddCellsAliasResponse + * @memberof vtctldata.GetCellsAliasesRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - AddCellsAliasResponse.verify = function verify(message) { + GetCellsAliasesRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; return null; }; /** - * Creates an AddCellsAliasResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetCellsAliasesRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.AddCellsAliasResponse + * @memberof vtctldata.GetCellsAliasesRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.AddCellsAliasResponse} AddCellsAliasResponse + * @returns {vtctldata.GetCellsAliasesRequest} GetCellsAliasesRequest */ - AddCellsAliasResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.AddCellsAliasResponse) + GetCellsAliasesRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetCellsAliasesRequest) return object; - return new $root.vtctldata.AddCellsAliasResponse(); + return new $root.vtctldata.GetCellsAliasesRequest(); }; /** - * Creates a plain object from an AddCellsAliasResponse message. Also converts values to other types if specified. + * Creates a plain object from a GetCellsAliasesRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.AddCellsAliasResponse + * @memberof vtctldata.GetCellsAliasesRequest * @static - * @param {vtctldata.AddCellsAliasResponse} message AddCellsAliasResponse + * @param {vtctldata.GetCellsAliasesRequest} message GetCellsAliasesRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - AddCellsAliasResponse.toObject = function toObject() { + GetCellsAliasesRequest.toObject = function toObject() { return {}; }; /** - * Converts this AddCellsAliasResponse to JSON. + * Converts this GetCellsAliasesRequest to JSON. * @function toJSON - * @memberof vtctldata.AddCellsAliasResponse + * @memberof vtctldata.GetCellsAliasesRequest * @instance * @returns {Object.} JSON object */ - AddCellsAliasResponse.prototype.toJSON = function toJSON() { + GetCellsAliasesRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for AddCellsAliasResponse + * Gets the default type url for GetCellsAliasesRequest * @function getTypeUrl - * @memberof vtctldata.AddCellsAliasResponse + * @memberof vtctldata.GetCellsAliasesRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - AddCellsAliasResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetCellsAliasesRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.AddCellsAliasResponse"; + return typeUrlPrefix + "/vtctldata.GetCellsAliasesRequest"; }; - return AddCellsAliasResponse; + return GetCellsAliasesRequest; })(); - vtctldata.ApplyRoutingRulesRequest = (function() { + vtctldata.GetCellsAliasesResponse = (function() { /** - * Properties of an ApplyRoutingRulesRequest. + * Properties of a GetCellsAliasesResponse. * @memberof vtctldata - * @interface IApplyRoutingRulesRequest - * @property {vschema.IRoutingRules|null} [routing_rules] ApplyRoutingRulesRequest routing_rules - * @property {boolean|null} [skip_rebuild] ApplyRoutingRulesRequest skip_rebuild - * @property {Array.|null} [rebuild_cells] ApplyRoutingRulesRequest rebuild_cells + * @interface IGetCellsAliasesResponse + * @property {Object.|null} [aliases] GetCellsAliasesResponse aliases */ /** - * Constructs a new ApplyRoutingRulesRequest. + * Constructs a new GetCellsAliasesResponse. * @memberof vtctldata - * @classdesc Represents an ApplyRoutingRulesRequest. - * @implements IApplyRoutingRulesRequest + * @classdesc Represents a GetCellsAliasesResponse. + * @implements IGetCellsAliasesResponse * @constructor - * @param {vtctldata.IApplyRoutingRulesRequest=} [properties] Properties to set + * @param {vtctldata.IGetCellsAliasesResponse=} [properties] Properties to set */ - function ApplyRoutingRulesRequest(properties) { - this.rebuild_cells = []; + function GetCellsAliasesResponse(properties) { + this.aliases = {}; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -102017,106 +122926,97 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ApplyRoutingRulesRequest routing_rules. - * @member {vschema.IRoutingRules|null|undefined} routing_rules - * @memberof vtctldata.ApplyRoutingRulesRequest - * @instance - */ - ApplyRoutingRulesRequest.prototype.routing_rules = null; - - /** - * ApplyRoutingRulesRequest skip_rebuild. - * @member {boolean} skip_rebuild - * @memberof vtctldata.ApplyRoutingRulesRequest - * @instance - */ - ApplyRoutingRulesRequest.prototype.skip_rebuild = false; - - /** - * ApplyRoutingRulesRequest rebuild_cells. - * @member {Array.} rebuild_cells - * @memberof vtctldata.ApplyRoutingRulesRequest + * GetCellsAliasesResponse aliases. + * @member {Object.} aliases + * @memberof vtctldata.GetCellsAliasesResponse * @instance */ - ApplyRoutingRulesRequest.prototype.rebuild_cells = $util.emptyArray; + GetCellsAliasesResponse.prototype.aliases = $util.emptyObject; /** - * Creates a new ApplyRoutingRulesRequest instance using the specified properties. + * Creates a new GetCellsAliasesResponse instance using the specified properties. * @function create - * @memberof vtctldata.ApplyRoutingRulesRequest + * @memberof vtctldata.GetCellsAliasesResponse * @static - * @param {vtctldata.IApplyRoutingRulesRequest=} [properties] Properties to set - * @returns {vtctldata.ApplyRoutingRulesRequest} ApplyRoutingRulesRequest instance + * @param {vtctldata.IGetCellsAliasesResponse=} [properties] Properties to set + * @returns {vtctldata.GetCellsAliasesResponse} GetCellsAliasesResponse instance */ - ApplyRoutingRulesRequest.create = function create(properties) { - return new ApplyRoutingRulesRequest(properties); + GetCellsAliasesResponse.create = function create(properties) { + return new GetCellsAliasesResponse(properties); }; /** - * Encodes the specified ApplyRoutingRulesRequest message. Does not implicitly {@link vtctldata.ApplyRoutingRulesRequest.verify|verify} messages. + * Encodes the specified GetCellsAliasesResponse message. Does not implicitly {@link vtctldata.GetCellsAliasesResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.ApplyRoutingRulesRequest + * @memberof vtctldata.GetCellsAliasesResponse * @static - * @param {vtctldata.IApplyRoutingRulesRequest} message ApplyRoutingRulesRequest message or plain object to encode + * @param {vtctldata.IGetCellsAliasesResponse} message GetCellsAliasesResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ApplyRoutingRulesRequest.encode = function encode(message, writer) { + GetCellsAliasesResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.routing_rules != null && Object.hasOwnProperty.call(message, "routing_rules")) - $root.vschema.RoutingRules.encode(message.routing_rules, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.skip_rebuild != null && Object.hasOwnProperty.call(message, "skip_rebuild")) - writer.uint32(/* id 2, wireType 0 =*/16).bool(message.skip_rebuild); - if (message.rebuild_cells != null && message.rebuild_cells.length) - for (let i = 0; i < message.rebuild_cells.length; ++i) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.rebuild_cells[i]); + if (message.aliases != null && Object.hasOwnProperty.call(message, "aliases")) + for (let keys = Object.keys(message.aliases), i = 0; i < keys.length; ++i) { + writer.uint32(/* id 1, wireType 2 =*/10).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); + $root.topodata.CellsAlias.encode(message.aliases[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); + } return writer; }; /** - * Encodes the specified ApplyRoutingRulesRequest message, length delimited. Does not implicitly {@link vtctldata.ApplyRoutingRulesRequest.verify|verify} messages. + * Encodes the specified GetCellsAliasesResponse message, length delimited. Does not implicitly {@link vtctldata.GetCellsAliasesResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ApplyRoutingRulesRequest + * @memberof vtctldata.GetCellsAliasesResponse * @static - * @param {vtctldata.IApplyRoutingRulesRequest} message ApplyRoutingRulesRequest message or plain object to encode + * @param {vtctldata.IGetCellsAliasesResponse} message GetCellsAliasesResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ApplyRoutingRulesRequest.encodeDelimited = function encodeDelimited(message, writer) { + GetCellsAliasesResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an ApplyRoutingRulesRequest message from the specified reader or buffer. + * Decodes a GetCellsAliasesResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ApplyRoutingRulesRequest + * @memberof vtctldata.GetCellsAliasesResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ApplyRoutingRulesRequest} ApplyRoutingRulesRequest + * @returns {vtctldata.GetCellsAliasesResponse} GetCellsAliasesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ApplyRoutingRulesRequest.decode = function decode(reader, length) { + GetCellsAliasesResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ApplyRoutingRulesRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetCellsAliasesResponse(), key, value; while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.routing_rules = $root.vschema.RoutingRules.decode(reader, reader.uint32()); - break; - } - case 2: { - message.skip_rebuild = reader.bool(); - break; - } - case 3: { - if (!(message.rebuild_cells && message.rebuild_cells.length)) - message.rebuild_cells = []; - message.rebuild_cells.push(reader.string()); + if (message.aliases === $util.emptyObject) + message.aliases = {}; + let end2 = reader.uint32() + reader.pos; + key = ""; + value = null; + while (reader.pos < end2) { + let tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = $root.topodata.CellsAlias.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.aliases[key] = value; break; } default: @@ -102128,156 +123028,141 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes an ApplyRoutingRulesRequest message from the specified reader or buffer, length delimited. + * Decodes a GetCellsAliasesResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ApplyRoutingRulesRequest + * @memberof vtctldata.GetCellsAliasesResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ApplyRoutingRulesRequest} ApplyRoutingRulesRequest + * @returns {vtctldata.GetCellsAliasesResponse} GetCellsAliasesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ApplyRoutingRulesRequest.decodeDelimited = function decodeDelimited(reader) { + GetCellsAliasesResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an ApplyRoutingRulesRequest message. + * Verifies a GetCellsAliasesResponse message. * @function verify - * @memberof vtctldata.ApplyRoutingRulesRequest + * @memberof vtctldata.GetCellsAliasesResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ApplyRoutingRulesRequest.verify = function verify(message) { + GetCellsAliasesResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.routing_rules != null && message.hasOwnProperty("routing_rules")) { - let error = $root.vschema.RoutingRules.verify(message.routing_rules); - if (error) - return "routing_rules." + error; - } - if (message.skip_rebuild != null && message.hasOwnProperty("skip_rebuild")) - if (typeof message.skip_rebuild !== "boolean") - return "skip_rebuild: boolean expected"; - if (message.rebuild_cells != null && message.hasOwnProperty("rebuild_cells")) { - if (!Array.isArray(message.rebuild_cells)) - return "rebuild_cells: array expected"; - for (let i = 0; i < message.rebuild_cells.length; ++i) - if (!$util.isString(message.rebuild_cells[i])) - return "rebuild_cells: string[] expected"; + if (message.aliases != null && message.hasOwnProperty("aliases")) { + if (!$util.isObject(message.aliases)) + return "aliases: object expected"; + let key = Object.keys(message.aliases); + for (let i = 0; i < key.length; ++i) { + let error = $root.topodata.CellsAlias.verify(message.aliases[key[i]]); + if (error) + return "aliases." + error; + } } return null; }; /** - * Creates an ApplyRoutingRulesRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetCellsAliasesResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ApplyRoutingRulesRequest + * @memberof vtctldata.GetCellsAliasesResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.ApplyRoutingRulesRequest} ApplyRoutingRulesRequest + * @returns {vtctldata.GetCellsAliasesResponse} GetCellsAliasesResponse */ - ApplyRoutingRulesRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ApplyRoutingRulesRequest) + GetCellsAliasesResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetCellsAliasesResponse) return object; - let message = new $root.vtctldata.ApplyRoutingRulesRequest(); - if (object.routing_rules != null) { - if (typeof object.routing_rules !== "object") - throw TypeError(".vtctldata.ApplyRoutingRulesRequest.routing_rules: object expected"); - message.routing_rules = $root.vschema.RoutingRules.fromObject(object.routing_rules); - } - if (object.skip_rebuild != null) - message.skip_rebuild = Boolean(object.skip_rebuild); - if (object.rebuild_cells) { - if (!Array.isArray(object.rebuild_cells)) - throw TypeError(".vtctldata.ApplyRoutingRulesRequest.rebuild_cells: array expected"); - message.rebuild_cells = []; - for (let i = 0; i < object.rebuild_cells.length; ++i) - message.rebuild_cells[i] = String(object.rebuild_cells[i]); + let message = new $root.vtctldata.GetCellsAliasesResponse(); + if (object.aliases) { + if (typeof object.aliases !== "object") + throw TypeError(".vtctldata.GetCellsAliasesResponse.aliases: object expected"); + message.aliases = {}; + for (let keys = Object.keys(object.aliases), i = 0; i < keys.length; ++i) { + if (typeof object.aliases[keys[i]] !== "object") + throw TypeError(".vtctldata.GetCellsAliasesResponse.aliases: object expected"); + message.aliases[keys[i]] = $root.topodata.CellsAlias.fromObject(object.aliases[keys[i]]); + } } return message; }; /** - * Creates a plain object from an ApplyRoutingRulesRequest message. Also converts values to other types if specified. + * Creates a plain object from a GetCellsAliasesResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ApplyRoutingRulesRequest + * @memberof vtctldata.GetCellsAliasesResponse * @static - * @param {vtctldata.ApplyRoutingRulesRequest} message ApplyRoutingRulesRequest + * @param {vtctldata.GetCellsAliasesResponse} message GetCellsAliasesResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ApplyRoutingRulesRequest.toObject = function toObject(message, options) { + GetCellsAliasesResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.rebuild_cells = []; - if (options.defaults) { - object.routing_rules = null; - object.skip_rebuild = false; - } - if (message.routing_rules != null && message.hasOwnProperty("routing_rules")) - object.routing_rules = $root.vschema.RoutingRules.toObject(message.routing_rules, options); - if (message.skip_rebuild != null && message.hasOwnProperty("skip_rebuild")) - object.skip_rebuild = message.skip_rebuild; - if (message.rebuild_cells && message.rebuild_cells.length) { - object.rebuild_cells = []; - for (let j = 0; j < message.rebuild_cells.length; ++j) - object.rebuild_cells[j] = message.rebuild_cells[j]; + if (options.objects || options.defaults) + object.aliases = {}; + let keys2; + if (message.aliases && (keys2 = Object.keys(message.aliases)).length) { + object.aliases = {}; + for (let j = 0; j < keys2.length; ++j) + object.aliases[keys2[j]] = $root.topodata.CellsAlias.toObject(message.aliases[keys2[j]], options); } return object; }; /** - * Converts this ApplyRoutingRulesRequest to JSON. + * Converts this GetCellsAliasesResponse to JSON. * @function toJSON - * @memberof vtctldata.ApplyRoutingRulesRequest + * @memberof vtctldata.GetCellsAliasesResponse * @instance * @returns {Object.} JSON object */ - ApplyRoutingRulesRequest.prototype.toJSON = function toJSON() { + GetCellsAliasesResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ApplyRoutingRulesRequest + * Gets the default type url for GetCellsAliasesResponse * @function getTypeUrl - * @memberof vtctldata.ApplyRoutingRulesRequest + * @memberof vtctldata.GetCellsAliasesResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ApplyRoutingRulesRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetCellsAliasesResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ApplyRoutingRulesRequest"; + return typeUrlPrefix + "/vtctldata.GetCellsAliasesResponse"; }; - return ApplyRoutingRulesRequest; + return GetCellsAliasesResponse; })(); - vtctldata.ApplyRoutingRulesResponse = (function() { + vtctldata.GetFullStatusRequest = (function() { /** - * Properties of an ApplyRoutingRulesResponse. + * Properties of a GetFullStatusRequest. * @memberof vtctldata - * @interface IApplyRoutingRulesResponse + * @interface IGetFullStatusRequest + * @property {topodata.ITabletAlias|null} [tablet_alias] GetFullStatusRequest tablet_alias */ /** - * Constructs a new ApplyRoutingRulesResponse. + * Constructs a new GetFullStatusRequest. * @memberof vtctldata - * @classdesc Represents an ApplyRoutingRulesResponse. - * @implements IApplyRoutingRulesResponse + * @classdesc Represents a GetFullStatusRequest. + * @implements IGetFullStatusRequest * @constructor - * @param {vtctldata.IApplyRoutingRulesResponse=} [properties] Properties to set + * @param {vtctldata.IGetFullStatusRequest=} [properties] Properties to set */ - function ApplyRoutingRulesResponse(properties) { + function GetFullStatusRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -102285,63 +123170,77 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * Creates a new ApplyRoutingRulesResponse instance using the specified properties. + * GetFullStatusRequest tablet_alias. + * @member {topodata.ITabletAlias|null|undefined} tablet_alias + * @memberof vtctldata.GetFullStatusRequest + * @instance + */ + GetFullStatusRequest.prototype.tablet_alias = null; + + /** + * Creates a new GetFullStatusRequest instance using the specified properties. * @function create - * @memberof vtctldata.ApplyRoutingRulesResponse + * @memberof vtctldata.GetFullStatusRequest * @static - * @param {vtctldata.IApplyRoutingRulesResponse=} [properties] Properties to set - * @returns {vtctldata.ApplyRoutingRulesResponse} ApplyRoutingRulesResponse instance + * @param {vtctldata.IGetFullStatusRequest=} [properties] Properties to set + * @returns {vtctldata.GetFullStatusRequest} GetFullStatusRequest instance */ - ApplyRoutingRulesResponse.create = function create(properties) { - return new ApplyRoutingRulesResponse(properties); + GetFullStatusRequest.create = function create(properties) { + return new GetFullStatusRequest(properties); }; /** - * Encodes the specified ApplyRoutingRulesResponse message. Does not implicitly {@link vtctldata.ApplyRoutingRulesResponse.verify|verify} messages. + * Encodes the specified GetFullStatusRequest message. Does not implicitly {@link vtctldata.GetFullStatusRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.ApplyRoutingRulesResponse + * @memberof vtctldata.GetFullStatusRequest * @static - * @param {vtctldata.IApplyRoutingRulesResponse} message ApplyRoutingRulesResponse message or plain object to encode + * @param {vtctldata.IGetFullStatusRequest} message GetFullStatusRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ApplyRoutingRulesResponse.encode = function encode(message, writer) { + GetFullStatusRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) + $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified ApplyRoutingRulesResponse message, length delimited. Does not implicitly {@link vtctldata.ApplyRoutingRulesResponse.verify|verify} messages. + * Encodes the specified GetFullStatusRequest message, length delimited. Does not implicitly {@link vtctldata.GetFullStatusRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ApplyRoutingRulesResponse + * @memberof vtctldata.GetFullStatusRequest * @static - * @param {vtctldata.IApplyRoutingRulesResponse} message ApplyRoutingRulesResponse message or plain object to encode + * @param {vtctldata.IGetFullStatusRequest} message GetFullStatusRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ApplyRoutingRulesResponse.encodeDelimited = function encodeDelimited(message, writer) { + GetFullStatusRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an ApplyRoutingRulesResponse message from the specified reader or buffer. + * Decodes a GetFullStatusRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ApplyRoutingRulesResponse + * @memberof vtctldata.GetFullStatusRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ApplyRoutingRulesResponse} ApplyRoutingRulesResponse + * @returns {vtctldata.GetFullStatusRequest} GetFullStatusRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ApplyRoutingRulesResponse.decode = function decode(reader, length) { + GetFullStatusRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ApplyRoutingRulesResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetFullStatusRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + break; + } default: reader.skipType(tag & 7); break; @@ -102351,112 +123250,127 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes an ApplyRoutingRulesResponse message from the specified reader or buffer, length delimited. + * Decodes a GetFullStatusRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ApplyRoutingRulesResponse + * @memberof vtctldata.GetFullStatusRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ApplyRoutingRulesResponse} ApplyRoutingRulesResponse + * @returns {vtctldata.GetFullStatusRequest} GetFullStatusRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ApplyRoutingRulesResponse.decodeDelimited = function decodeDelimited(reader) { + GetFullStatusRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an ApplyRoutingRulesResponse message. + * Verifies a GetFullStatusRequest message. * @function verify - * @memberof vtctldata.ApplyRoutingRulesResponse + * @memberof vtctldata.GetFullStatusRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ApplyRoutingRulesResponse.verify = function verify(message) { + GetFullStatusRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { + let error = $root.topodata.TabletAlias.verify(message.tablet_alias); + if (error) + return "tablet_alias." + error; + } return null; }; /** - * Creates an ApplyRoutingRulesResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetFullStatusRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ApplyRoutingRulesResponse + * @memberof vtctldata.GetFullStatusRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.ApplyRoutingRulesResponse} ApplyRoutingRulesResponse + * @returns {vtctldata.GetFullStatusRequest} GetFullStatusRequest */ - ApplyRoutingRulesResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ApplyRoutingRulesResponse) + GetFullStatusRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetFullStatusRequest) return object; - return new $root.vtctldata.ApplyRoutingRulesResponse(); + let message = new $root.vtctldata.GetFullStatusRequest(); + if (object.tablet_alias != null) { + if (typeof object.tablet_alias !== "object") + throw TypeError(".vtctldata.GetFullStatusRequest.tablet_alias: object expected"); + message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); + } + return message; }; /** - * Creates a plain object from an ApplyRoutingRulesResponse message. Also converts values to other types if specified. + * Creates a plain object from a GetFullStatusRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ApplyRoutingRulesResponse + * @memberof vtctldata.GetFullStatusRequest * @static - * @param {vtctldata.ApplyRoutingRulesResponse} message ApplyRoutingRulesResponse + * @param {vtctldata.GetFullStatusRequest} message GetFullStatusRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ApplyRoutingRulesResponse.toObject = function toObject() { - return {}; + GetFullStatusRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) + object.tablet_alias = null; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) + object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); + return object; }; /** - * Converts this ApplyRoutingRulesResponse to JSON. + * Converts this GetFullStatusRequest to JSON. * @function toJSON - * @memberof vtctldata.ApplyRoutingRulesResponse + * @memberof vtctldata.GetFullStatusRequest * @instance * @returns {Object.} JSON object */ - ApplyRoutingRulesResponse.prototype.toJSON = function toJSON() { + GetFullStatusRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ApplyRoutingRulesResponse + * Gets the default type url for GetFullStatusRequest * @function getTypeUrl - * @memberof vtctldata.ApplyRoutingRulesResponse + * @memberof vtctldata.GetFullStatusRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ApplyRoutingRulesResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetFullStatusRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ApplyRoutingRulesResponse"; + return typeUrlPrefix + "/vtctldata.GetFullStatusRequest"; }; - return ApplyRoutingRulesResponse; + return GetFullStatusRequest; })(); - vtctldata.ApplyShardRoutingRulesRequest = (function() { + vtctldata.GetFullStatusResponse = (function() { /** - * Properties of an ApplyShardRoutingRulesRequest. + * Properties of a GetFullStatusResponse. * @memberof vtctldata - * @interface IApplyShardRoutingRulesRequest - * @property {vschema.IShardRoutingRules|null} [shard_routing_rules] ApplyShardRoutingRulesRequest shard_routing_rules - * @property {boolean|null} [skip_rebuild] ApplyShardRoutingRulesRequest skip_rebuild - * @property {Array.|null} [rebuild_cells] ApplyShardRoutingRulesRequest rebuild_cells + * @interface IGetFullStatusResponse + * @property {replicationdata.IFullStatus|null} [status] GetFullStatusResponse status */ /** - * Constructs a new ApplyShardRoutingRulesRequest. + * Constructs a new GetFullStatusResponse. * @memberof vtctldata - * @classdesc Represents an ApplyShardRoutingRulesRequest. - * @implements IApplyShardRoutingRulesRequest + * @classdesc Represents a GetFullStatusResponse. + * @implements IGetFullStatusResponse * @constructor - * @param {vtctldata.IApplyShardRoutingRulesRequest=} [properties] Properties to set + * @param {vtctldata.IGetFullStatusResponse=} [properties] Properties to set */ - function ApplyShardRoutingRulesRequest(properties) { - this.rebuild_cells = []; + function GetFullStatusResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -102464,106 +123378,75 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ApplyShardRoutingRulesRequest shard_routing_rules. - * @member {vschema.IShardRoutingRules|null|undefined} shard_routing_rules - * @memberof vtctldata.ApplyShardRoutingRulesRequest - * @instance - */ - ApplyShardRoutingRulesRequest.prototype.shard_routing_rules = null; - - /** - * ApplyShardRoutingRulesRequest skip_rebuild. - * @member {boolean} skip_rebuild - * @memberof vtctldata.ApplyShardRoutingRulesRequest - * @instance - */ - ApplyShardRoutingRulesRequest.prototype.skip_rebuild = false; - - /** - * ApplyShardRoutingRulesRequest rebuild_cells. - * @member {Array.} rebuild_cells - * @memberof vtctldata.ApplyShardRoutingRulesRequest + * GetFullStatusResponse status. + * @member {replicationdata.IFullStatus|null|undefined} status + * @memberof vtctldata.GetFullStatusResponse * @instance */ - ApplyShardRoutingRulesRequest.prototype.rebuild_cells = $util.emptyArray; + GetFullStatusResponse.prototype.status = null; /** - * Creates a new ApplyShardRoutingRulesRequest instance using the specified properties. + * Creates a new GetFullStatusResponse instance using the specified properties. * @function create - * @memberof vtctldata.ApplyShardRoutingRulesRequest + * @memberof vtctldata.GetFullStatusResponse * @static - * @param {vtctldata.IApplyShardRoutingRulesRequest=} [properties] Properties to set - * @returns {vtctldata.ApplyShardRoutingRulesRequest} ApplyShardRoutingRulesRequest instance + * @param {vtctldata.IGetFullStatusResponse=} [properties] Properties to set + * @returns {vtctldata.GetFullStatusResponse} GetFullStatusResponse instance */ - ApplyShardRoutingRulesRequest.create = function create(properties) { - return new ApplyShardRoutingRulesRequest(properties); + GetFullStatusResponse.create = function create(properties) { + return new GetFullStatusResponse(properties); }; /** - * Encodes the specified ApplyShardRoutingRulesRequest message. Does not implicitly {@link vtctldata.ApplyShardRoutingRulesRequest.verify|verify} messages. + * Encodes the specified GetFullStatusResponse message. Does not implicitly {@link vtctldata.GetFullStatusResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.ApplyShardRoutingRulesRequest + * @memberof vtctldata.GetFullStatusResponse * @static - * @param {vtctldata.IApplyShardRoutingRulesRequest} message ApplyShardRoutingRulesRequest message or plain object to encode + * @param {vtctldata.IGetFullStatusResponse} message GetFullStatusResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ApplyShardRoutingRulesRequest.encode = function encode(message, writer) { + GetFullStatusResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.shard_routing_rules != null && Object.hasOwnProperty.call(message, "shard_routing_rules")) - $root.vschema.ShardRoutingRules.encode(message.shard_routing_rules, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.skip_rebuild != null && Object.hasOwnProperty.call(message, "skip_rebuild")) - writer.uint32(/* id 2, wireType 0 =*/16).bool(message.skip_rebuild); - if (message.rebuild_cells != null && message.rebuild_cells.length) - for (let i = 0; i < message.rebuild_cells.length; ++i) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.rebuild_cells[i]); + if (message.status != null && Object.hasOwnProperty.call(message, "status")) + $root.replicationdata.FullStatus.encode(message.status, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified ApplyShardRoutingRulesRequest message, length delimited. Does not implicitly {@link vtctldata.ApplyShardRoutingRulesRequest.verify|verify} messages. + * Encodes the specified GetFullStatusResponse message, length delimited. Does not implicitly {@link vtctldata.GetFullStatusResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ApplyShardRoutingRulesRequest + * @memberof vtctldata.GetFullStatusResponse * @static - * @param {vtctldata.IApplyShardRoutingRulesRequest} message ApplyShardRoutingRulesRequest message or plain object to encode + * @param {vtctldata.IGetFullStatusResponse} message GetFullStatusResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ApplyShardRoutingRulesRequest.encodeDelimited = function encodeDelimited(message, writer) { + GetFullStatusResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an ApplyShardRoutingRulesRequest message from the specified reader or buffer. + * Decodes a GetFullStatusResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ApplyShardRoutingRulesRequest + * @memberof vtctldata.GetFullStatusResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ApplyShardRoutingRulesRequest} ApplyShardRoutingRulesRequest + * @returns {vtctldata.GetFullStatusResponse} GetFullStatusResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ApplyShardRoutingRulesRequest.decode = function decode(reader, length) { + GetFullStatusResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ApplyShardRoutingRulesRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetFullStatusResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.shard_routing_rules = $root.vschema.ShardRoutingRules.decode(reader, reader.uint32()); - break; - } - case 2: { - message.skip_rebuild = reader.bool(); - break; - } - case 3: { - if (!(message.rebuild_cells && message.rebuild_cells.length)) - message.rebuild_cells = []; - message.rebuild_cells.push(reader.string()); + message.status = $root.replicationdata.FullStatus.decode(reader, reader.uint32()); break; } default: @@ -102575,156 +123458,126 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes an ApplyShardRoutingRulesRequest message from the specified reader or buffer, length delimited. + * Decodes a GetFullStatusResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ApplyShardRoutingRulesRequest + * @memberof vtctldata.GetFullStatusResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ApplyShardRoutingRulesRequest} ApplyShardRoutingRulesRequest + * @returns {vtctldata.GetFullStatusResponse} GetFullStatusResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ApplyShardRoutingRulesRequest.decodeDelimited = function decodeDelimited(reader) { + GetFullStatusResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an ApplyShardRoutingRulesRequest message. + * Verifies a GetFullStatusResponse message. * @function verify - * @memberof vtctldata.ApplyShardRoutingRulesRequest + * @memberof vtctldata.GetFullStatusResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ApplyShardRoutingRulesRequest.verify = function verify(message) { + GetFullStatusResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.shard_routing_rules != null && message.hasOwnProperty("shard_routing_rules")) { - let error = $root.vschema.ShardRoutingRules.verify(message.shard_routing_rules); + if (message.status != null && message.hasOwnProperty("status")) { + let error = $root.replicationdata.FullStatus.verify(message.status); if (error) - return "shard_routing_rules." + error; - } - if (message.skip_rebuild != null && message.hasOwnProperty("skip_rebuild")) - if (typeof message.skip_rebuild !== "boolean") - return "skip_rebuild: boolean expected"; - if (message.rebuild_cells != null && message.hasOwnProperty("rebuild_cells")) { - if (!Array.isArray(message.rebuild_cells)) - return "rebuild_cells: array expected"; - for (let i = 0; i < message.rebuild_cells.length; ++i) - if (!$util.isString(message.rebuild_cells[i])) - return "rebuild_cells: string[] expected"; + return "status." + error; } return null; }; /** - * Creates an ApplyShardRoutingRulesRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetFullStatusResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ApplyShardRoutingRulesRequest + * @memberof vtctldata.GetFullStatusResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.ApplyShardRoutingRulesRequest} ApplyShardRoutingRulesRequest + * @returns {vtctldata.GetFullStatusResponse} GetFullStatusResponse */ - ApplyShardRoutingRulesRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ApplyShardRoutingRulesRequest) + GetFullStatusResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetFullStatusResponse) return object; - let message = new $root.vtctldata.ApplyShardRoutingRulesRequest(); - if (object.shard_routing_rules != null) { - if (typeof object.shard_routing_rules !== "object") - throw TypeError(".vtctldata.ApplyShardRoutingRulesRequest.shard_routing_rules: object expected"); - message.shard_routing_rules = $root.vschema.ShardRoutingRules.fromObject(object.shard_routing_rules); - } - if (object.skip_rebuild != null) - message.skip_rebuild = Boolean(object.skip_rebuild); - if (object.rebuild_cells) { - if (!Array.isArray(object.rebuild_cells)) - throw TypeError(".vtctldata.ApplyShardRoutingRulesRequest.rebuild_cells: array expected"); - message.rebuild_cells = []; - for (let i = 0; i < object.rebuild_cells.length; ++i) - message.rebuild_cells[i] = String(object.rebuild_cells[i]); + let message = new $root.vtctldata.GetFullStatusResponse(); + if (object.status != null) { + if (typeof object.status !== "object") + throw TypeError(".vtctldata.GetFullStatusResponse.status: object expected"); + message.status = $root.replicationdata.FullStatus.fromObject(object.status); } return message; }; /** - * Creates a plain object from an ApplyShardRoutingRulesRequest message. Also converts values to other types if specified. + * Creates a plain object from a GetFullStatusResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ApplyShardRoutingRulesRequest + * @memberof vtctldata.GetFullStatusResponse * @static - * @param {vtctldata.ApplyShardRoutingRulesRequest} message ApplyShardRoutingRulesRequest + * @param {vtctldata.GetFullStatusResponse} message GetFullStatusResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ApplyShardRoutingRulesRequest.toObject = function toObject(message, options) { + GetFullStatusResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.rebuild_cells = []; - if (options.defaults) { - object.shard_routing_rules = null; - object.skip_rebuild = false; - } - if (message.shard_routing_rules != null && message.hasOwnProperty("shard_routing_rules")) - object.shard_routing_rules = $root.vschema.ShardRoutingRules.toObject(message.shard_routing_rules, options); - if (message.skip_rebuild != null && message.hasOwnProperty("skip_rebuild")) - object.skip_rebuild = message.skip_rebuild; - if (message.rebuild_cells && message.rebuild_cells.length) { - object.rebuild_cells = []; - for (let j = 0; j < message.rebuild_cells.length; ++j) - object.rebuild_cells[j] = message.rebuild_cells[j]; - } + if (options.defaults) + object.status = null; + if (message.status != null && message.hasOwnProperty("status")) + object.status = $root.replicationdata.FullStatus.toObject(message.status, options); return object; }; /** - * Converts this ApplyShardRoutingRulesRequest to JSON. + * Converts this GetFullStatusResponse to JSON. * @function toJSON - * @memberof vtctldata.ApplyShardRoutingRulesRequest + * @memberof vtctldata.GetFullStatusResponse * @instance * @returns {Object.} JSON object */ - ApplyShardRoutingRulesRequest.prototype.toJSON = function toJSON() { + GetFullStatusResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ApplyShardRoutingRulesRequest + * Gets the default type url for GetFullStatusResponse * @function getTypeUrl - * @memberof vtctldata.ApplyShardRoutingRulesRequest + * @memberof vtctldata.GetFullStatusResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ApplyShardRoutingRulesRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetFullStatusResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ApplyShardRoutingRulesRequest"; + return typeUrlPrefix + "/vtctldata.GetFullStatusResponse"; }; - return ApplyShardRoutingRulesRequest; + return GetFullStatusResponse; })(); - vtctldata.ApplyShardRoutingRulesResponse = (function() { + vtctldata.GetKeyspacesRequest = (function() { /** - * Properties of an ApplyShardRoutingRulesResponse. + * Properties of a GetKeyspacesRequest. * @memberof vtctldata - * @interface IApplyShardRoutingRulesResponse + * @interface IGetKeyspacesRequest */ /** - * Constructs a new ApplyShardRoutingRulesResponse. + * Constructs a new GetKeyspacesRequest. * @memberof vtctldata - * @classdesc Represents an ApplyShardRoutingRulesResponse. - * @implements IApplyShardRoutingRulesResponse + * @classdesc Represents a GetKeyspacesRequest. + * @implements IGetKeyspacesRequest * @constructor - * @param {vtctldata.IApplyShardRoutingRulesResponse=} [properties] Properties to set + * @param {vtctldata.IGetKeyspacesRequest=} [properties] Properties to set */ - function ApplyShardRoutingRulesResponse(properties) { + function GetKeyspacesRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -102732,60 +123585,60 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * Creates a new ApplyShardRoutingRulesResponse instance using the specified properties. + * Creates a new GetKeyspacesRequest instance using the specified properties. * @function create - * @memberof vtctldata.ApplyShardRoutingRulesResponse + * @memberof vtctldata.GetKeyspacesRequest * @static - * @param {vtctldata.IApplyShardRoutingRulesResponse=} [properties] Properties to set - * @returns {vtctldata.ApplyShardRoutingRulesResponse} ApplyShardRoutingRulesResponse instance + * @param {vtctldata.IGetKeyspacesRequest=} [properties] Properties to set + * @returns {vtctldata.GetKeyspacesRequest} GetKeyspacesRequest instance */ - ApplyShardRoutingRulesResponse.create = function create(properties) { - return new ApplyShardRoutingRulesResponse(properties); + GetKeyspacesRequest.create = function create(properties) { + return new GetKeyspacesRequest(properties); }; /** - * Encodes the specified ApplyShardRoutingRulesResponse message. Does not implicitly {@link vtctldata.ApplyShardRoutingRulesResponse.verify|verify} messages. + * Encodes the specified GetKeyspacesRequest message. Does not implicitly {@link vtctldata.GetKeyspacesRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.ApplyShardRoutingRulesResponse + * @memberof vtctldata.GetKeyspacesRequest * @static - * @param {vtctldata.IApplyShardRoutingRulesResponse} message ApplyShardRoutingRulesResponse message or plain object to encode + * @param {vtctldata.IGetKeyspacesRequest} message GetKeyspacesRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ApplyShardRoutingRulesResponse.encode = function encode(message, writer) { + GetKeyspacesRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); return writer; }; /** - * Encodes the specified ApplyShardRoutingRulesResponse message, length delimited. Does not implicitly {@link vtctldata.ApplyShardRoutingRulesResponse.verify|verify} messages. + * Encodes the specified GetKeyspacesRequest message, length delimited. Does not implicitly {@link vtctldata.GetKeyspacesRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ApplyShardRoutingRulesResponse + * @memberof vtctldata.GetKeyspacesRequest * @static - * @param {vtctldata.IApplyShardRoutingRulesResponse} message ApplyShardRoutingRulesResponse message or plain object to encode + * @param {vtctldata.IGetKeyspacesRequest} message GetKeyspacesRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ApplyShardRoutingRulesResponse.encodeDelimited = function encodeDelimited(message, writer) { + GetKeyspacesRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an ApplyShardRoutingRulesResponse message from the specified reader or buffer. + * Decodes a GetKeyspacesRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ApplyShardRoutingRulesResponse + * @memberof vtctldata.GetKeyspacesRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ApplyShardRoutingRulesResponse} ApplyShardRoutingRulesResponse + * @returns {vtctldata.GetKeyspacesRequest} GetKeyspacesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ApplyShardRoutingRulesResponse.decode = function decode(reader, length) { + GetKeyspacesRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ApplyShardRoutingRulesResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetKeyspacesRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -102798,119 +123651,110 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes an ApplyShardRoutingRulesResponse message from the specified reader or buffer, length delimited. + * Decodes a GetKeyspacesRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ApplyShardRoutingRulesResponse + * @memberof vtctldata.GetKeyspacesRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ApplyShardRoutingRulesResponse} ApplyShardRoutingRulesResponse + * @returns {vtctldata.GetKeyspacesRequest} GetKeyspacesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ApplyShardRoutingRulesResponse.decodeDelimited = function decodeDelimited(reader) { + GetKeyspacesRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an ApplyShardRoutingRulesResponse message. + * Verifies a GetKeyspacesRequest message. * @function verify - * @memberof vtctldata.ApplyShardRoutingRulesResponse + * @memberof vtctldata.GetKeyspacesRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ApplyShardRoutingRulesResponse.verify = function verify(message) { + GetKeyspacesRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; return null; }; /** - * Creates an ApplyShardRoutingRulesResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetKeyspacesRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ApplyShardRoutingRulesResponse + * @memberof vtctldata.GetKeyspacesRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.ApplyShardRoutingRulesResponse} ApplyShardRoutingRulesResponse + * @returns {vtctldata.GetKeyspacesRequest} GetKeyspacesRequest */ - ApplyShardRoutingRulesResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ApplyShardRoutingRulesResponse) + GetKeyspacesRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetKeyspacesRequest) return object; - return new $root.vtctldata.ApplyShardRoutingRulesResponse(); + return new $root.vtctldata.GetKeyspacesRequest(); }; /** - * Creates a plain object from an ApplyShardRoutingRulesResponse message. Also converts values to other types if specified. + * Creates a plain object from a GetKeyspacesRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ApplyShardRoutingRulesResponse + * @memberof vtctldata.GetKeyspacesRequest * @static - * @param {vtctldata.ApplyShardRoutingRulesResponse} message ApplyShardRoutingRulesResponse + * @param {vtctldata.GetKeyspacesRequest} message GetKeyspacesRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ApplyShardRoutingRulesResponse.toObject = function toObject() { + GetKeyspacesRequest.toObject = function toObject() { return {}; }; /** - * Converts this ApplyShardRoutingRulesResponse to JSON. + * Converts this GetKeyspacesRequest to JSON. * @function toJSON - * @memberof vtctldata.ApplyShardRoutingRulesResponse + * @memberof vtctldata.GetKeyspacesRequest * @instance * @returns {Object.} JSON object */ - ApplyShardRoutingRulesResponse.prototype.toJSON = function toJSON() { + GetKeyspacesRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ApplyShardRoutingRulesResponse + * Gets the default type url for GetKeyspacesRequest * @function getTypeUrl - * @memberof vtctldata.ApplyShardRoutingRulesResponse + * @memberof vtctldata.GetKeyspacesRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ApplyShardRoutingRulesResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetKeyspacesRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ApplyShardRoutingRulesResponse"; + return typeUrlPrefix + "/vtctldata.GetKeyspacesRequest"; }; - return ApplyShardRoutingRulesResponse; + return GetKeyspacesRequest; })(); - vtctldata.ApplySchemaRequest = (function() { + vtctldata.GetKeyspacesResponse = (function() { /** - * Properties of an ApplySchemaRequest. + * Properties of a GetKeyspacesResponse. * @memberof vtctldata - * @interface IApplySchemaRequest - * @property {string|null} [keyspace] ApplySchemaRequest keyspace - * @property {boolean|null} [allow_long_unavailability] ApplySchemaRequest allow_long_unavailability - * @property {Array.|null} [sql] ApplySchemaRequest sql - * @property {string|null} [ddl_strategy] ApplySchemaRequest ddl_strategy - * @property {Array.|null} [uuid_list] ApplySchemaRequest uuid_list - * @property {string|null} [migration_context] ApplySchemaRequest migration_context - * @property {vttime.IDuration|null} [wait_replicas_timeout] ApplySchemaRequest wait_replicas_timeout - * @property {boolean|null} [skip_preflight] ApplySchemaRequest skip_preflight - * @property {vtrpc.ICallerID|null} [caller_id] ApplySchemaRequest caller_id + * @interface IGetKeyspacesResponse + * @property {Array.|null} [keyspaces] GetKeyspacesResponse keyspaces */ /** - * Constructs a new ApplySchemaRequest. + * Constructs a new GetKeyspacesResponse. * @memberof vtctldata - * @classdesc Represents an ApplySchemaRequest. - * @implements IApplySchemaRequest + * @classdesc Represents a GetKeyspacesResponse. + * @implements IGetKeyspacesResponse * @constructor - * @param {vtctldata.IApplySchemaRequest=} [properties] Properties to set + * @param {vtctldata.IGetKeyspacesResponse=} [properties] Properties to set */ - function ApplySchemaRequest(properties) { - this.sql = []; - this.uuid_list = []; + function GetKeyspacesResponse(properties) { + this.keyspaces = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -102918,193 +123762,78 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ApplySchemaRequest keyspace. - * @member {string} keyspace - * @memberof vtctldata.ApplySchemaRequest - * @instance - */ - ApplySchemaRequest.prototype.keyspace = ""; - - /** - * ApplySchemaRequest allow_long_unavailability. - * @member {boolean} allow_long_unavailability - * @memberof vtctldata.ApplySchemaRequest - * @instance - */ - ApplySchemaRequest.prototype.allow_long_unavailability = false; - - /** - * ApplySchemaRequest sql. - * @member {Array.} sql - * @memberof vtctldata.ApplySchemaRequest - * @instance - */ - ApplySchemaRequest.prototype.sql = $util.emptyArray; - - /** - * ApplySchemaRequest ddl_strategy. - * @member {string} ddl_strategy - * @memberof vtctldata.ApplySchemaRequest - * @instance - */ - ApplySchemaRequest.prototype.ddl_strategy = ""; - - /** - * ApplySchemaRequest uuid_list. - * @member {Array.} uuid_list - * @memberof vtctldata.ApplySchemaRequest - * @instance - */ - ApplySchemaRequest.prototype.uuid_list = $util.emptyArray; - - /** - * ApplySchemaRequest migration_context. - * @member {string} migration_context - * @memberof vtctldata.ApplySchemaRequest - * @instance - */ - ApplySchemaRequest.prototype.migration_context = ""; - - /** - * ApplySchemaRequest wait_replicas_timeout. - * @member {vttime.IDuration|null|undefined} wait_replicas_timeout - * @memberof vtctldata.ApplySchemaRequest - * @instance - */ - ApplySchemaRequest.prototype.wait_replicas_timeout = null; - - /** - * ApplySchemaRequest skip_preflight. - * @member {boolean} skip_preflight - * @memberof vtctldata.ApplySchemaRequest - * @instance - */ - ApplySchemaRequest.prototype.skip_preflight = false; - - /** - * ApplySchemaRequest caller_id. - * @member {vtrpc.ICallerID|null|undefined} caller_id - * @memberof vtctldata.ApplySchemaRequest + * GetKeyspacesResponse keyspaces. + * @member {Array.} keyspaces + * @memberof vtctldata.GetKeyspacesResponse * @instance */ - ApplySchemaRequest.prototype.caller_id = null; + GetKeyspacesResponse.prototype.keyspaces = $util.emptyArray; /** - * Creates a new ApplySchemaRequest instance using the specified properties. + * Creates a new GetKeyspacesResponse instance using the specified properties. * @function create - * @memberof vtctldata.ApplySchemaRequest + * @memberof vtctldata.GetKeyspacesResponse * @static - * @param {vtctldata.IApplySchemaRequest=} [properties] Properties to set - * @returns {vtctldata.ApplySchemaRequest} ApplySchemaRequest instance + * @param {vtctldata.IGetKeyspacesResponse=} [properties] Properties to set + * @returns {vtctldata.GetKeyspacesResponse} GetKeyspacesResponse instance */ - ApplySchemaRequest.create = function create(properties) { - return new ApplySchemaRequest(properties); + GetKeyspacesResponse.create = function create(properties) { + return new GetKeyspacesResponse(properties); }; /** - * Encodes the specified ApplySchemaRequest message. Does not implicitly {@link vtctldata.ApplySchemaRequest.verify|verify} messages. + * Encodes the specified GetKeyspacesResponse message. Does not implicitly {@link vtctldata.GetKeyspacesResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.ApplySchemaRequest + * @memberof vtctldata.GetKeyspacesResponse * @static - * @param {vtctldata.IApplySchemaRequest} message ApplySchemaRequest message or plain object to encode + * @param {vtctldata.IGetKeyspacesResponse} message GetKeyspacesResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ApplySchemaRequest.encode = function encode(message, writer) { + GetKeyspacesResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.allow_long_unavailability != null && Object.hasOwnProperty.call(message, "allow_long_unavailability")) - writer.uint32(/* id 2, wireType 0 =*/16).bool(message.allow_long_unavailability); - if (message.sql != null && message.sql.length) - for (let i = 0; i < message.sql.length; ++i) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.sql[i]); - if (message.ddl_strategy != null && Object.hasOwnProperty.call(message, "ddl_strategy")) - writer.uint32(/* id 4, wireType 2 =*/34).string(message.ddl_strategy); - if (message.uuid_list != null && message.uuid_list.length) - for (let i = 0; i < message.uuid_list.length; ++i) - writer.uint32(/* id 5, wireType 2 =*/42).string(message.uuid_list[i]); - if (message.migration_context != null && Object.hasOwnProperty.call(message, "migration_context")) - writer.uint32(/* id 6, wireType 2 =*/50).string(message.migration_context); - if (message.wait_replicas_timeout != null && Object.hasOwnProperty.call(message, "wait_replicas_timeout")) - $root.vttime.Duration.encode(message.wait_replicas_timeout, writer.uint32(/* id 7, wireType 2 =*/58).fork()).ldelim(); - if (message.skip_preflight != null && Object.hasOwnProperty.call(message, "skip_preflight")) - writer.uint32(/* id 8, wireType 0 =*/64).bool(message.skip_preflight); - if (message.caller_id != null && Object.hasOwnProperty.call(message, "caller_id")) - $root.vtrpc.CallerID.encode(message.caller_id, writer.uint32(/* id 9, wireType 2 =*/74).fork()).ldelim(); + if (message.keyspaces != null && message.keyspaces.length) + for (let i = 0; i < message.keyspaces.length; ++i) + $root.vtctldata.Keyspace.encode(message.keyspaces[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified ApplySchemaRequest message, length delimited. Does not implicitly {@link vtctldata.ApplySchemaRequest.verify|verify} messages. + * Encodes the specified GetKeyspacesResponse message, length delimited. Does not implicitly {@link vtctldata.GetKeyspacesResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ApplySchemaRequest + * @memberof vtctldata.GetKeyspacesResponse * @static - * @param {vtctldata.IApplySchemaRequest} message ApplySchemaRequest message or plain object to encode + * @param {vtctldata.IGetKeyspacesResponse} message GetKeyspacesResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ApplySchemaRequest.encodeDelimited = function encodeDelimited(message, writer) { + GetKeyspacesResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an ApplySchemaRequest message from the specified reader or buffer. + * Decodes a GetKeyspacesResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ApplySchemaRequest + * @memberof vtctldata.GetKeyspacesResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ApplySchemaRequest} ApplySchemaRequest + * @returns {vtctldata.GetKeyspacesResponse} GetKeyspacesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ApplySchemaRequest.decode = function decode(reader, length) { + GetKeyspacesResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ApplySchemaRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetKeyspacesResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.keyspace = reader.string(); - break; - } - case 2: { - message.allow_long_unavailability = reader.bool(); - break; - } - case 3: { - if (!(message.sql && message.sql.length)) - message.sql = []; - message.sql.push(reader.string()); - break; - } - case 4: { - message.ddl_strategy = reader.string(); - break; - } - case 5: { - if (!(message.uuid_list && message.uuid_list.length)) - message.uuid_list = []; - message.uuid_list.push(reader.string()); - break; - } - case 6: { - message.migration_context = reader.string(); - break; - } - case 7: { - message.wait_replicas_timeout = $root.vttime.Duration.decode(reader, reader.uint32()); - break; - } - case 8: { - message.skip_preflight = reader.bool(); - break; - } - case 9: { - message.caller_id = $root.vtrpc.CallerID.decode(reader, reader.uint32()); + if (!(message.keyspaces && message.keyspaces.length)) + message.keyspaces = []; + message.keyspaces.push($root.vtctldata.Keyspace.decode(reader, reader.uint32())); break; } default: @@ -103116,224 +123845,139 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes an ApplySchemaRequest message from the specified reader or buffer, length delimited. + * Decodes a GetKeyspacesResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ApplySchemaRequest + * @memberof vtctldata.GetKeyspacesResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ApplySchemaRequest} ApplySchemaRequest + * @returns {vtctldata.GetKeyspacesResponse} GetKeyspacesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ApplySchemaRequest.decodeDelimited = function decodeDelimited(reader) { + GetKeyspacesResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an ApplySchemaRequest message. + * Verifies a GetKeyspacesResponse message. * @function verify - * @memberof vtctldata.ApplySchemaRequest + * @memberof vtctldata.GetKeyspacesResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ApplySchemaRequest.verify = function verify(message) { + GetKeyspacesResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - if (message.allow_long_unavailability != null && message.hasOwnProperty("allow_long_unavailability")) - if (typeof message.allow_long_unavailability !== "boolean") - return "allow_long_unavailability: boolean expected"; - if (message.sql != null && message.hasOwnProperty("sql")) { - if (!Array.isArray(message.sql)) - return "sql: array expected"; - for (let i = 0; i < message.sql.length; ++i) - if (!$util.isString(message.sql[i])) - return "sql: string[] expected"; - } - if (message.ddl_strategy != null && message.hasOwnProperty("ddl_strategy")) - if (!$util.isString(message.ddl_strategy)) - return "ddl_strategy: string expected"; - if (message.uuid_list != null && message.hasOwnProperty("uuid_list")) { - if (!Array.isArray(message.uuid_list)) - return "uuid_list: array expected"; - for (let i = 0; i < message.uuid_list.length; ++i) - if (!$util.isString(message.uuid_list[i])) - return "uuid_list: string[] expected"; - } - if (message.migration_context != null && message.hasOwnProperty("migration_context")) - if (!$util.isString(message.migration_context)) - return "migration_context: string expected"; - if (message.wait_replicas_timeout != null && message.hasOwnProperty("wait_replicas_timeout")) { - let error = $root.vttime.Duration.verify(message.wait_replicas_timeout); - if (error) - return "wait_replicas_timeout." + error; - } - if (message.skip_preflight != null && message.hasOwnProperty("skip_preflight")) - if (typeof message.skip_preflight !== "boolean") - return "skip_preflight: boolean expected"; - if (message.caller_id != null && message.hasOwnProperty("caller_id")) { - let error = $root.vtrpc.CallerID.verify(message.caller_id); - if (error) - return "caller_id." + error; + if (message.keyspaces != null && message.hasOwnProperty("keyspaces")) { + if (!Array.isArray(message.keyspaces)) + return "keyspaces: array expected"; + for (let i = 0; i < message.keyspaces.length; ++i) { + let error = $root.vtctldata.Keyspace.verify(message.keyspaces[i]); + if (error) + return "keyspaces." + error; + } } return null; }; /** - * Creates an ApplySchemaRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetKeyspacesResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ApplySchemaRequest + * @memberof vtctldata.GetKeyspacesResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.ApplySchemaRequest} ApplySchemaRequest + * @returns {vtctldata.GetKeyspacesResponse} GetKeyspacesResponse */ - ApplySchemaRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ApplySchemaRequest) + GetKeyspacesResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetKeyspacesResponse) return object; - let message = new $root.vtctldata.ApplySchemaRequest(); - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - if (object.allow_long_unavailability != null) - message.allow_long_unavailability = Boolean(object.allow_long_unavailability); - if (object.sql) { - if (!Array.isArray(object.sql)) - throw TypeError(".vtctldata.ApplySchemaRequest.sql: array expected"); - message.sql = []; - for (let i = 0; i < object.sql.length; ++i) - message.sql[i] = String(object.sql[i]); - } - if (object.ddl_strategy != null) - message.ddl_strategy = String(object.ddl_strategy); - if (object.uuid_list) { - if (!Array.isArray(object.uuid_list)) - throw TypeError(".vtctldata.ApplySchemaRequest.uuid_list: array expected"); - message.uuid_list = []; - for (let i = 0; i < object.uuid_list.length; ++i) - message.uuid_list[i] = String(object.uuid_list[i]); - } - if (object.migration_context != null) - message.migration_context = String(object.migration_context); - if (object.wait_replicas_timeout != null) { - if (typeof object.wait_replicas_timeout !== "object") - throw TypeError(".vtctldata.ApplySchemaRequest.wait_replicas_timeout: object expected"); - message.wait_replicas_timeout = $root.vttime.Duration.fromObject(object.wait_replicas_timeout); - } - if (object.skip_preflight != null) - message.skip_preflight = Boolean(object.skip_preflight); - if (object.caller_id != null) { - if (typeof object.caller_id !== "object") - throw TypeError(".vtctldata.ApplySchemaRequest.caller_id: object expected"); - message.caller_id = $root.vtrpc.CallerID.fromObject(object.caller_id); + let message = new $root.vtctldata.GetKeyspacesResponse(); + if (object.keyspaces) { + if (!Array.isArray(object.keyspaces)) + throw TypeError(".vtctldata.GetKeyspacesResponse.keyspaces: array expected"); + message.keyspaces = []; + for (let i = 0; i < object.keyspaces.length; ++i) { + if (typeof object.keyspaces[i] !== "object") + throw TypeError(".vtctldata.GetKeyspacesResponse.keyspaces: object expected"); + message.keyspaces[i] = $root.vtctldata.Keyspace.fromObject(object.keyspaces[i]); + } } return message; }; /** - * Creates a plain object from an ApplySchemaRequest message. Also converts values to other types if specified. + * Creates a plain object from a GetKeyspacesResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ApplySchemaRequest + * @memberof vtctldata.GetKeyspacesResponse * @static - * @param {vtctldata.ApplySchemaRequest} message ApplySchemaRequest + * @param {vtctldata.GetKeyspacesResponse} message GetKeyspacesResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ApplySchemaRequest.toObject = function toObject(message, options) { + GetKeyspacesResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) { - object.sql = []; - object.uuid_list = []; - } - if (options.defaults) { - object.keyspace = ""; - object.allow_long_unavailability = false; - object.ddl_strategy = ""; - object.migration_context = ""; - object.wait_replicas_timeout = null; - object.skip_preflight = false; - object.caller_id = null; - } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.allow_long_unavailability != null && message.hasOwnProperty("allow_long_unavailability")) - object.allow_long_unavailability = message.allow_long_unavailability; - if (message.sql && message.sql.length) { - object.sql = []; - for (let j = 0; j < message.sql.length; ++j) - object.sql[j] = message.sql[j]; - } - if (message.ddl_strategy != null && message.hasOwnProperty("ddl_strategy")) - object.ddl_strategy = message.ddl_strategy; - if (message.uuid_list && message.uuid_list.length) { - object.uuid_list = []; - for (let j = 0; j < message.uuid_list.length; ++j) - object.uuid_list[j] = message.uuid_list[j]; + if (options.arrays || options.defaults) + object.keyspaces = []; + if (message.keyspaces && message.keyspaces.length) { + object.keyspaces = []; + for (let j = 0; j < message.keyspaces.length; ++j) + object.keyspaces[j] = $root.vtctldata.Keyspace.toObject(message.keyspaces[j], options); } - if (message.migration_context != null && message.hasOwnProperty("migration_context")) - object.migration_context = message.migration_context; - if (message.wait_replicas_timeout != null && message.hasOwnProperty("wait_replicas_timeout")) - object.wait_replicas_timeout = $root.vttime.Duration.toObject(message.wait_replicas_timeout, options); - if (message.skip_preflight != null && message.hasOwnProperty("skip_preflight")) - object.skip_preflight = message.skip_preflight; - if (message.caller_id != null && message.hasOwnProperty("caller_id")) - object.caller_id = $root.vtrpc.CallerID.toObject(message.caller_id, options); return object; }; /** - * Converts this ApplySchemaRequest to JSON. + * Converts this GetKeyspacesResponse to JSON. * @function toJSON - * @memberof vtctldata.ApplySchemaRequest + * @memberof vtctldata.GetKeyspacesResponse * @instance * @returns {Object.} JSON object */ - ApplySchemaRequest.prototype.toJSON = function toJSON() { + GetKeyspacesResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ApplySchemaRequest + * Gets the default type url for GetKeyspacesResponse * @function getTypeUrl - * @memberof vtctldata.ApplySchemaRequest + * @memberof vtctldata.GetKeyspacesResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ApplySchemaRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetKeyspacesResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ApplySchemaRequest"; + return typeUrlPrefix + "/vtctldata.GetKeyspacesResponse"; }; - return ApplySchemaRequest; + return GetKeyspacesResponse; })(); - vtctldata.ApplySchemaResponse = (function() { + vtctldata.GetKeyspaceRequest = (function() { /** - * Properties of an ApplySchemaResponse. + * Properties of a GetKeyspaceRequest. * @memberof vtctldata - * @interface IApplySchemaResponse - * @property {Array.|null} [uuid_list] ApplySchemaResponse uuid_list + * @interface IGetKeyspaceRequest + * @property {string|null} [keyspace] GetKeyspaceRequest keyspace */ /** - * Constructs a new ApplySchemaResponse. + * Constructs a new GetKeyspaceRequest. * @memberof vtctldata - * @classdesc Represents an ApplySchemaResponse. - * @implements IApplySchemaResponse + * @classdesc Represents a GetKeyspaceRequest. + * @implements IGetKeyspaceRequest * @constructor - * @param {vtctldata.IApplySchemaResponse=} [properties] Properties to set + * @param {vtctldata.IGetKeyspaceRequest=} [properties] Properties to set */ - function ApplySchemaResponse(properties) { - this.uuid_list = []; + function GetKeyspaceRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -103341,78 +123985,75 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ApplySchemaResponse uuid_list. - * @member {Array.} uuid_list - * @memberof vtctldata.ApplySchemaResponse + * GetKeyspaceRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.GetKeyspaceRequest * @instance */ - ApplySchemaResponse.prototype.uuid_list = $util.emptyArray; + GetKeyspaceRequest.prototype.keyspace = ""; /** - * Creates a new ApplySchemaResponse instance using the specified properties. + * Creates a new GetKeyspaceRequest instance using the specified properties. * @function create - * @memberof vtctldata.ApplySchemaResponse + * @memberof vtctldata.GetKeyspaceRequest * @static - * @param {vtctldata.IApplySchemaResponse=} [properties] Properties to set - * @returns {vtctldata.ApplySchemaResponse} ApplySchemaResponse instance + * @param {vtctldata.IGetKeyspaceRequest=} [properties] Properties to set + * @returns {vtctldata.GetKeyspaceRequest} GetKeyspaceRequest instance */ - ApplySchemaResponse.create = function create(properties) { - return new ApplySchemaResponse(properties); + GetKeyspaceRequest.create = function create(properties) { + return new GetKeyspaceRequest(properties); }; /** - * Encodes the specified ApplySchemaResponse message. Does not implicitly {@link vtctldata.ApplySchemaResponse.verify|verify} messages. + * Encodes the specified GetKeyspaceRequest message. Does not implicitly {@link vtctldata.GetKeyspaceRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.ApplySchemaResponse + * @memberof vtctldata.GetKeyspaceRequest * @static - * @param {vtctldata.IApplySchemaResponse} message ApplySchemaResponse message or plain object to encode + * @param {vtctldata.IGetKeyspaceRequest} message GetKeyspaceRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ApplySchemaResponse.encode = function encode(message, writer) { + GetKeyspaceRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.uuid_list != null && message.uuid_list.length) - for (let i = 0; i < message.uuid_list.length; ++i) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.uuid_list[i]); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); return writer; }; /** - * Encodes the specified ApplySchemaResponse message, length delimited. Does not implicitly {@link vtctldata.ApplySchemaResponse.verify|verify} messages. + * Encodes the specified GetKeyspaceRequest message, length delimited. Does not implicitly {@link vtctldata.GetKeyspaceRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ApplySchemaResponse + * @memberof vtctldata.GetKeyspaceRequest * @static - * @param {vtctldata.IApplySchemaResponse} message ApplySchemaResponse message or plain object to encode + * @param {vtctldata.IGetKeyspaceRequest} message GetKeyspaceRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ApplySchemaResponse.encodeDelimited = function encodeDelimited(message, writer) { + GetKeyspaceRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an ApplySchemaResponse message from the specified reader or buffer. + * Decodes a GetKeyspaceRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ApplySchemaResponse + * @memberof vtctldata.GetKeyspaceRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ApplySchemaResponse} ApplySchemaResponse + * @returns {vtctldata.GetKeyspaceRequest} GetKeyspaceRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ApplySchemaResponse.decode = function decode(reader, length) { + GetKeyspaceRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ApplySchemaResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetKeyspaceRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (!(message.uuid_list && message.uuid_list.length)) - message.uuid_list = []; - message.uuid_list.push(reader.string()); + message.keyspace = reader.string(); break; } default: @@ -103424,140 +124065,122 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes an ApplySchemaResponse message from the specified reader or buffer, length delimited. + * Decodes a GetKeyspaceRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ApplySchemaResponse + * @memberof vtctldata.GetKeyspaceRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ApplySchemaResponse} ApplySchemaResponse + * @returns {vtctldata.GetKeyspaceRequest} GetKeyspaceRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ApplySchemaResponse.decodeDelimited = function decodeDelimited(reader) { + GetKeyspaceRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an ApplySchemaResponse message. + * Verifies a GetKeyspaceRequest message. * @function verify - * @memberof vtctldata.ApplySchemaResponse + * @memberof vtctldata.GetKeyspaceRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ApplySchemaResponse.verify = function verify(message) { + GetKeyspaceRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.uuid_list != null && message.hasOwnProperty("uuid_list")) { - if (!Array.isArray(message.uuid_list)) - return "uuid_list: array expected"; - for (let i = 0; i < message.uuid_list.length; ++i) - if (!$util.isString(message.uuid_list[i])) - return "uuid_list: string[] expected"; - } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; return null; }; /** - * Creates an ApplySchemaResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetKeyspaceRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ApplySchemaResponse + * @memberof vtctldata.GetKeyspaceRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.ApplySchemaResponse} ApplySchemaResponse + * @returns {vtctldata.GetKeyspaceRequest} GetKeyspaceRequest */ - ApplySchemaResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ApplySchemaResponse) + GetKeyspaceRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetKeyspaceRequest) return object; - let message = new $root.vtctldata.ApplySchemaResponse(); - if (object.uuid_list) { - if (!Array.isArray(object.uuid_list)) - throw TypeError(".vtctldata.ApplySchemaResponse.uuid_list: array expected"); - message.uuid_list = []; - for (let i = 0; i < object.uuid_list.length; ++i) - message.uuid_list[i] = String(object.uuid_list[i]); - } + let message = new $root.vtctldata.GetKeyspaceRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); return message; }; /** - * Creates a plain object from an ApplySchemaResponse message. Also converts values to other types if specified. + * Creates a plain object from a GetKeyspaceRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ApplySchemaResponse + * @memberof vtctldata.GetKeyspaceRequest * @static - * @param {vtctldata.ApplySchemaResponse} message ApplySchemaResponse + * @param {vtctldata.GetKeyspaceRequest} message GetKeyspaceRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ApplySchemaResponse.toObject = function toObject(message, options) { + GetKeyspaceRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.uuid_list = []; - if (message.uuid_list && message.uuid_list.length) { - object.uuid_list = []; - for (let j = 0; j < message.uuid_list.length; ++j) - object.uuid_list[j] = message.uuid_list[j]; - } + if (options.defaults) + object.keyspace = ""; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; return object; }; /** - * Converts this ApplySchemaResponse to JSON. + * Converts this GetKeyspaceRequest to JSON. * @function toJSON - * @memberof vtctldata.ApplySchemaResponse + * @memberof vtctldata.GetKeyspaceRequest * @instance * @returns {Object.} JSON object */ - ApplySchemaResponse.prototype.toJSON = function toJSON() { + GetKeyspaceRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ApplySchemaResponse + * Gets the default type url for GetKeyspaceRequest * @function getTypeUrl - * @memberof vtctldata.ApplySchemaResponse + * @memberof vtctldata.GetKeyspaceRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ApplySchemaResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetKeyspaceRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ApplySchemaResponse"; + return typeUrlPrefix + "/vtctldata.GetKeyspaceRequest"; }; - return ApplySchemaResponse; + return GetKeyspaceRequest; })(); - vtctldata.ApplyVSchemaRequest = (function() { + vtctldata.GetKeyspaceResponse = (function() { /** - * Properties of an ApplyVSchemaRequest. + * Properties of a GetKeyspaceResponse. * @memberof vtctldata - * @interface IApplyVSchemaRequest - * @property {string|null} [keyspace] ApplyVSchemaRequest keyspace - * @property {boolean|null} [skip_rebuild] ApplyVSchemaRequest skip_rebuild - * @property {boolean|null} [dry_run] ApplyVSchemaRequest dry_run - * @property {Array.|null} [cells] ApplyVSchemaRequest cells - * @property {vschema.IKeyspace|null} [v_schema] ApplyVSchemaRequest v_schema - * @property {string|null} [sql] ApplyVSchemaRequest sql + * @interface IGetKeyspaceResponse + * @property {vtctldata.IKeyspace|null} [keyspace] GetKeyspaceResponse keyspace */ /** - * Constructs a new ApplyVSchemaRequest. + * Constructs a new GetKeyspaceResponse. * @memberof vtctldata - * @classdesc Represents an ApplyVSchemaRequest. - * @implements IApplyVSchemaRequest + * @classdesc Represents a GetKeyspaceResponse. + * @implements IGetKeyspaceResponse * @constructor - * @param {vtctldata.IApplyVSchemaRequest=} [properties] Properties to set + * @param {vtctldata.IGetKeyspaceResponse=} [properties] Properties to set */ - function ApplyVSchemaRequest(properties) { - this.cells = []; + function GetKeyspaceResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -103565,148 +124188,75 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ApplyVSchemaRequest keyspace. - * @member {string} keyspace - * @memberof vtctldata.ApplyVSchemaRequest - * @instance - */ - ApplyVSchemaRequest.prototype.keyspace = ""; - - /** - * ApplyVSchemaRequest skip_rebuild. - * @member {boolean} skip_rebuild - * @memberof vtctldata.ApplyVSchemaRequest - * @instance - */ - ApplyVSchemaRequest.prototype.skip_rebuild = false; - - /** - * ApplyVSchemaRequest dry_run. - * @member {boolean} dry_run - * @memberof vtctldata.ApplyVSchemaRequest - * @instance - */ - ApplyVSchemaRequest.prototype.dry_run = false; - - /** - * ApplyVSchemaRequest cells. - * @member {Array.} cells - * @memberof vtctldata.ApplyVSchemaRequest - * @instance - */ - ApplyVSchemaRequest.prototype.cells = $util.emptyArray; - - /** - * ApplyVSchemaRequest v_schema. - * @member {vschema.IKeyspace|null|undefined} v_schema - * @memberof vtctldata.ApplyVSchemaRequest - * @instance - */ - ApplyVSchemaRequest.prototype.v_schema = null; - - /** - * ApplyVSchemaRequest sql. - * @member {string} sql - * @memberof vtctldata.ApplyVSchemaRequest + * GetKeyspaceResponse keyspace. + * @member {vtctldata.IKeyspace|null|undefined} keyspace + * @memberof vtctldata.GetKeyspaceResponse * @instance */ - ApplyVSchemaRequest.prototype.sql = ""; + GetKeyspaceResponse.prototype.keyspace = null; /** - * Creates a new ApplyVSchemaRequest instance using the specified properties. + * Creates a new GetKeyspaceResponse instance using the specified properties. * @function create - * @memberof vtctldata.ApplyVSchemaRequest + * @memberof vtctldata.GetKeyspaceResponse * @static - * @param {vtctldata.IApplyVSchemaRequest=} [properties] Properties to set - * @returns {vtctldata.ApplyVSchemaRequest} ApplyVSchemaRequest instance + * @param {vtctldata.IGetKeyspaceResponse=} [properties] Properties to set + * @returns {vtctldata.GetKeyspaceResponse} GetKeyspaceResponse instance */ - ApplyVSchemaRequest.create = function create(properties) { - return new ApplyVSchemaRequest(properties); + GetKeyspaceResponse.create = function create(properties) { + return new GetKeyspaceResponse(properties); }; /** - * Encodes the specified ApplyVSchemaRequest message. Does not implicitly {@link vtctldata.ApplyVSchemaRequest.verify|verify} messages. + * Encodes the specified GetKeyspaceResponse message. Does not implicitly {@link vtctldata.GetKeyspaceResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.ApplyVSchemaRequest + * @memberof vtctldata.GetKeyspaceResponse * @static - * @param {vtctldata.IApplyVSchemaRequest} message ApplyVSchemaRequest message or plain object to encode + * @param {vtctldata.IGetKeyspaceResponse} message GetKeyspaceResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ApplyVSchemaRequest.encode = function encode(message, writer) { + GetKeyspaceResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.skip_rebuild != null && Object.hasOwnProperty.call(message, "skip_rebuild")) - writer.uint32(/* id 2, wireType 0 =*/16).bool(message.skip_rebuild); - if (message.dry_run != null && Object.hasOwnProperty.call(message, "dry_run")) - writer.uint32(/* id 3, wireType 0 =*/24).bool(message.dry_run); - if (message.cells != null && message.cells.length) - for (let i = 0; i < message.cells.length; ++i) - writer.uint32(/* id 4, wireType 2 =*/34).string(message.cells[i]); - if (message.v_schema != null && Object.hasOwnProperty.call(message, "v_schema")) - $root.vschema.Keyspace.encode(message.v_schema, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); - if (message.sql != null && Object.hasOwnProperty.call(message, "sql")) - writer.uint32(/* id 6, wireType 2 =*/50).string(message.sql); + $root.vtctldata.Keyspace.encode(message.keyspace, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified ApplyVSchemaRequest message, length delimited. Does not implicitly {@link vtctldata.ApplyVSchemaRequest.verify|verify} messages. + * Encodes the specified GetKeyspaceResponse message, length delimited. Does not implicitly {@link vtctldata.GetKeyspaceResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ApplyVSchemaRequest + * @memberof vtctldata.GetKeyspaceResponse * @static - * @param {vtctldata.IApplyVSchemaRequest} message ApplyVSchemaRequest message or plain object to encode + * @param {vtctldata.IGetKeyspaceResponse} message GetKeyspaceResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ApplyVSchemaRequest.encodeDelimited = function encodeDelimited(message, writer) { + GetKeyspaceResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an ApplyVSchemaRequest message from the specified reader or buffer. + * Decodes a GetKeyspaceResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ApplyVSchemaRequest + * @memberof vtctldata.GetKeyspaceResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ApplyVSchemaRequest} ApplyVSchemaRequest + * @returns {vtctldata.GetKeyspaceResponse} GetKeyspaceResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ApplyVSchemaRequest.decode = function decode(reader, length) { + GetKeyspaceResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ApplyVSchemaRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetKeyspaceResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.keyspace = reader.string(); - break; - } - case 2: { - message.skip_rebuild = reader.bool(); - break; - } - case 3: { - message.dry_run = reader.bool(); - break; - } - case 4: { - if (!(message.cells && message.cells.length)) - message.cells = []; - message.cells.push(reader.string()); - break; - } - case 5: { - message.v_schema = $root.vschema.Keyspace.decode(reader, reader.uint32()); - break; - } - case 6: { - message.sql = reader.string(); + message.keyspace = $root.vtctldata.Keyspace.decode(reader, reader.uint32()); break; } default: @@ -103718,181 +124268,127 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes an ApplyVSchemaRequest message from the specified reader or buffer, length delimited. + * Decodes a GetKeyspaceResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ApplyVSchemaRequest + * @memberof vtctldata.GetKeyspaceResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ApplyVSchemaRequest} ApplyVSchemaRequest + * @returns {vtctldata.GetKeyspaceResponse} GetKeyspaceResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ApplyVSchemaRequest.decodeDelimited = function decodeDelimited(reader) { + GetKeyspaceResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an ApplyVSchemaRequest message. + * Verifies a GetKeyspaceResponse message. * @function verify - * @memberof vtctldata.ApplyVSchemaRequest + * @memberof vtctldata.GetKeyspaceResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ApplyVSchemaRequest.verify = function verify(message) { + GetKeyspaceResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - if (message.skip_rebuild != null && message.hasOwnProperty("skip_rebuild")) - if (typeof message.skip_rebuild !== "boolean") - return "skip_rebuild: boolean expected"; - if (message.dry_run != null && message.hasOwnProperty("dry_run")) - if (typeof message.dry_run !== "boolean") - return "dry_run: boolean expected"; - if (message.cells != null && message.hasOwnProperty("cells")) { - if (!Array.isArray(message.cells)) - return "cells: array expected"; - for (let i = 0; i < message.cells.length; ++i) - if (!$util.isString(message.cells[i])) - return "cells: string[] expected"; - } - if (message.v_schema != null && message.hasOwnProperty("v_schema")) { - let error = $root.vschema.Keyspace.verify(message.v_schema); + if (message.keyspace != null && message.hasOwnProperty("keyspace")) { + let error = $root.vtctldata.Keyspace.verify(message.keyspace); if (error) - return "v_schema." + error; + return "keyspace." + error; } - if (message.sql != null && message.hasOwnProperty("sql")) - if (!$util.isString(message.sql)) - return "sql: string expected"; return null; }; /** - * Creates an ApplyVSchemaRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetKeyspaceResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ApplyVSchemaRequest + * @memberof vtctldata.GetKeyspaceResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.ApplyVSchemaRequest} ApplyVSchemaRequest + * @returns {vtctldata.GetKeyspaceResponse} GetKeyspaceResponse */ - ApplyVSchemaRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ApplyVSchemaRequest) + GetKeyspaceResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetKeyspaceResponse) return object; - let message = new $root.vtctldata.ApplyVSchemaRequest(); - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - if (object.skip_rebuild != null) - message.skip_rebuild = Boolean(object.skip_rebuild); - if (object.dry_run != null) - message.dry_run = Boolean(object.dry_run); - if (object.cells) { - if (!Array.isArray(object.cells)) - throw TypeError(".vtctldata.ApplyVSchemaRequest.cells: array expected"); - message.cells = []; - for (let i = 0; i < object.cells.length; ++i) - message.cells[i] = String(object.cells[i]); - } - if (object.v_schema != null) { - if (typeof object.v_schema !== "object") - throw TypeError(".vtctldata.ApplyVSchemaRequest.v_schema: object expected"); - message.v_schema = $root.vschema.Keyspace.fromObject(object.v_schema); + let message = new $root.vtctldata.GetKeyspaceResponse(); + if (object.keyspace != null) { + if (typeof object.keyspace !== "object") + throw TypeError(".vtctldata.GetKeyspaceResponse.keyspace: object expected"); + message.keyspace = $root.vtctldata.Keyspace.fromObject(object.keyspace); } - if (object.sql != null) - message.sql = String(object.sql); return message; }; /** - * Creates a plain object from an ApplyVSchemaRequest message. Also converts values to other types if specified. + * Creates a plain object from a GetKeyspaceResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ApplyVSchemaRequest + * @memberof vtctldata.GetKeyspaceResponse * @static - * @param {vtctldata.ApplyVSchemaRequest} message ApplyVSchemaRequest + * @param {vtctldata.GetKeyspaceResponse} message GetKeyspaceResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ApplyVSchemaRequest.toObject = function toObject(message, options) { + GetKeyspaceResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.cells = []; - if (options.defaults) { - object.keyspace = ""; - object.skip_rebuild = false; - object.dry_run = false; - object.v_schema = null; - object.sql = ""; - } + if (options.defaults) + object.keyspace = null; if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.skip_rebuild != null && message.hasOwnProperty("skip_rebuild")) - object.skip_rebuild = message.skip_rebuild; - if (message.dry_run != null && message.hasOwnProperty("dry_run")) - object.dry_run = message.dry_run; - if (message.cells && message.cells.length) { - object.cells = []; - for (let j = 0; j < message.cells.length; ++j) - object.cells[j] = message.cells[j]; - } - if (message.v_schema != null && message.hasOwnProperty("v_schema")) - object.v_schema = $root.vschema.Keyspace.toObject(message.v_schema, options); - if (message.sql != null && message.hasOwnProperty("sql")) - object.sql = message.sql; + object.keyspace = $root.vtctldata.Keyspace.toObject(message.keyspace, options); return object; }; /** - * Converts this ApplyVSchemaRequest to JSON. + * Converts this GetKeyspaceResponse to JSON. * @function toJSON - * @memberof vtctldata.ApplyVSchemaRequest + * @memberof vtctldata.GetKeyspaceResponse * @instance * @returns {Object.} JSON object */ - ApplyVSchemaRequest.prototype.toJSON = function toJSON() { + GetKeyspaceResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ApplyVSchemaRequest + * Gets the default type url for GetKeyspaceResponse * @function getTypeUrl - * @memberof vtctldata.ApplyVSchemaRequest + * @memberof vtctldata.GetKeyspaceResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ApplyVSchemaRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetKeyspaceResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ApplyVSchemaRequest"; + return typeUrlPrefix + "/vtctldata.GetKeyspaceResponse"; }; - return ApplyVSchemaRequest; + return GetKeyspaceResponse; })(); - vtctldata.ApplyVSchemaResponse = (function() { + vtctldata.GetPermissionsRequest = (function() { /** - * Properties of an ApplyVSchemaResponse. + * Properties of a GetPermissionsRequest. * @memberof vtctldata - * @interface IApplyVSchemaResponse - * @property {vschema.IKeyspace|null} [v_schema] ApplyVSchemaResponse v_schema + * @interface IGetPermissionsRequest + * @property {topodata.ITabletAlias|null} [tablet_alias] GetPermissionsRequest tablet_alias */ /** - * Constructs a new ApplyVSchemaResponse. + * Constructs a new GetPermissionsRequest. * @memberof vtctldata - * @classdesc Represents an ApplyVSchemaResponse. - * @implements IApplyVSchemaResponse + * @classdesc Represents a GetPermissionsRequest. + * @implements IGetPermissionsRequest * @constructor - * @param {vtctldata.IApplyVSchemaResponse=} [properties] Properties to set + * @param {vtctldata.IGetPermissionsRequest=} [properties] Properties to set */ - function ApplyVSchemaResponse(properties) { + function GetPermissionsRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -103900,75 +124396,75 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ApplyVSchemaResponse v_schema. - * @member {vschema.IKeyspace|null|undefined} v_schema - * @memberof vtctldata.ApplyVSchemaResponse + * GetPermissionsRequest tablet_alias. + * @member {topodata.ITabletAlias|null|undefined} tablet_alias + * @memberof vtctldata.GetPermissionsRequest * @instance */ - ApplyVSchemaResponse.prototype.v_schema = null; + GetPermissionsRequest.prototype.tablet_alias = null; /** - * Creates a new ApplyVSchemaResponse instance using the specified properties. + * Creates a new GetPermissionsRequest instance using the specified properties. * @function create - * @memberof vtctldata.ApplyVSchemaResponse + * @memberof vtctldata.GetPermissionsRequest * @static - * @param {vtctldata.IApplyVSchemaResponse=} [properties] Properties to set - * @returns {vtctldata.ApplyVSchemaResponse} ApplyVSchemaResponse instance + * @param {vtctldata.IGetPermissionsRequest=} [properties] Properties to set + * @returns {vtctldata.GetPermissionsRequest} GetPermissionsRequest instance */ - ApplyVSchemaResponse.create = function create(properties) { - return new ApplyVSchemaResponse(properties); + GetPermissionsRequest.create = function create(properties) { + return new GetPermissionsRequest(properties); }; /** - * Encodes the specified ApplyVSchemaResponse message. Does not implicitly {@link vtctldata.ApplyVSchemaResponse.verify|verify} messages. + * Encodes the specified GetPermissionsRequest message. Does not implicitly {@link vtctldata.GetPermissionsRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.ApplyVSchemaResponse + * @memberof vtctldata.GetPermissionsRequest * @static - * @param {vtctldata.IApplyVSchemaResponse} message ApplyVSchemaResponse message or plain object to encode + * @param {vtctldata.IGetPermissionsRequest} message GetPermissionsRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ApplyVSchemaResponse.encode = function encode(message, writer) { + GetPermissionsRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.v_schema != null && Object.hasOwnProperty.call(message, "v_schema")) - $root.vschema.Keyspace.encode(message.v_schema, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) + $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified ApplyVSchemaResponse message, length delimited. Does not implicitly {@link vtctldata.ApplyVSchemaResponse.verify|verify} messages. + * Encodes the specified GetPermissionsRequest message, length delimited. Does not implicitly {@link vtctldata.GetPermissionsRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ApplyVSchemaResponse + * @memberof vtctldata.GetPermissionsRequest * @static - * @param {vtctldata.IApplyVSchemaResponse} message ApplyVSchemaResponse message or plain object to encode + * @param {vtctldata.IGetPermissionsRequest} message GetPermissionsRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ApplyVSchemaResponse.encodeDelimited = function encodeDelimited(message, writer) { + GetPermissionsRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an ApplyVSchemaResponse message from the specified reader or buffer. + * Decodes a GetPermissionsRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ApplyVSchemaResponse + * @memberof vtctldata.GetPermissionsRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ApplyVSchemaResponse} ApplyVSchemaResponse + * @returns {vtctldata.GetPermissionsRequest} GetPermissionsRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ApplyVSchemaResponse.decode = function decode(reader, length) { + GetPermissionsRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ApplyVSchemaResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetPermissionsRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.v_schema = $root.vschema.Keyspace.decode(reader, reader.uint32()); + message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); break; } default: @@ -103980,130 +124476,127 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes an ApplyVSchemaResponse message from the specified reader or buffer, length delimited. + * Decodes a GetPermissionsRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ApplyVSchemaResponse + * @memberof vtctldata.GetPermissionsRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ApplyVSchemaResponse} ApplyVSchemaResponse + * @returns {vtctldata.GetPermissionsRequest} GetPermissionsRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ApplyVSchemaResponse.decodeDelimited = function decodeDelimited(reader) { + GetPermissionsRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an ApplyVSchemaResponse message. + * Verifies a GetPermissionsRequest message. * @function verify - * @memberof vtctldata.ApplyVSchemaResponse + * @memberof vtctldata.GetPermissionsRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ApplyVSchemaResponse.verify = function verify(message) { + GetPermissionsRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.v_schema != null && message.hasOwnProperty("v_schema")) { - let error = $root.vschema.Keyspace.verify(message.v_schema); + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { + let error = $root.topodata.TabletAlias.verify(message.tablet_alias); if (error) - return "v_schema." + error; + return "tablet_alias." + error; } return null; }; /** - * Creates an ApplyVSchemaResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetPermissionsRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ApplyVSchemaResponse + * @memberof vtctldata.GetPermissionsRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.ApplyVSchemaResponse} ApplyVSchemaResponse + * @returns {vtctldata.GetPermissionsRequest} GetPermissionsRequest */ - ApplyVSchemaResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ApplyVSchemaResponse) + GetPermissionsRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetPermissionsRequest) return object; - let message = new $root.vtctldata.ApplyVSchemaResponse(); - if (object.v_schema != null) { - if (typeof object.v_schema !== "object") - throw TypeError(".vtctldata.ApplyVSchemaResponse.v_schema: object expected"); - message.v_schema = $root.vschema.Keyspace.fromObject(object.v_schema); + let message = new $root.vtctldata.GetPermissionsRequest(); + if (object.tablet_alias != null) { + if (typeof object.tablet_alias !== "object") + throw TypeError(".vtctldata.GetPermissionsRequest.tablet_alias: object expected"); + message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); } return message; }; /** - * Creates a plain object from an ApplyVSchemaResponse message. Also converts values to other types if specified. + * Creates a plain object from a GetPermissionsRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ApplyVSchemaResponse + * @memberof vtctldata.GetPermissionsRequest * @static - * @param {vtctldata.ApplyVSchemaResponse} message ApplyVSchemaResponse + * @param {vtctldata.GetPermissionsRequest} message GetPermissionsRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ApplyVSchemaResponse.toObject = function toObject(message, options) { + GetPermissionsRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) - object.v_schema = null; - if (message.v_schema != null && message.hasOwnProperty("v_schema")) - object.v_schema = $root.vschema.Keyspace.toObject(message.v_schema, options); + object.tablet_alias = null; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) + object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); return object; }; /** - * Converts this ApplyVSchemaResponse to JSON. + * Converts this GetPermissionsRequest to JSON. * @function toJSON - * @memberof vtctldata.ApplyVSchemaResponse + * @memberof vtctldata.GetPermissionsRequest * @instance * @returns {Object.} JSON object */ - ApplyVSchemaResponse.prototype.toJSON = function toJSON() { + GetPermissionsRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ApplyVSchemaResponse + * Gets the default type url for GetPermissionsRequest * @function getTypeUrl - * @memberof vtctldata.ApplyVSchemaResponse + * @memberof vtctldata.GetPermissionsRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ApplyVSchemaResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetPermissionsRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ApplyVSchemaResponse"; + return typeUrlPrefix + "/vtctldata.GetPermissionsRequest"; }; - return ApplyVSchemaResponse; + return GetPermissionsRequest; })(); - vtctldata.BackupRequest = (function() { + vtctldata.GetPermissionsResponse = (function() { /** - * Properties of a BackupRequest. + * Properties of a GetPermissionsResponse. * @memberof vtctldata - * @interface IBackupRequest - * @property {topodata.ITabletAlias|null} [tablet_alias] BackupRequest tablet_alias - * @property {boolean|null} [allow_primary] BackupRequest allow_primary - * @property {number|Long|null} [concurrency] BackupRequest concurrency - * @property {string|null} [incremental_from_pos] BackupRequest incremental_from_pos + * @interface IGetPermissionsResponse + * @property {tabletmanagerdata.IPermissions|null} [permissions] GetPermissionsResponse permissions */ /** - * Constructs a new BackupRequest. + * Constructs a new GetPermissionsResponse. * @memberof vtctldata - * @classdesc Represents a BackupRequest. - * @implements IBackupRequest + * @classdesc Represents a GetPermissionsResponse. + * @implements IGetPermissionsResponse * @constructor - * @param {vtctldata.IBackupRequest=} [properties] Properties to set + * @param {vtctldata.IGetPermissionsResponse=} [properties] Properties to set */ - function BackupRequest(properties) { + function GetPermissionsResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -104111,117 +124604,75 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * BackupRequest tablet_alias. - * @member {topodata.ITabletAlias|null|undefined} tablet_alias - * @memberof vtctldata.BackupRequest - * @instance - */ - BackupRequest.prototype.tablet_alias = null; - - /** - * BackupRequest allow_primary. - * @member {boolean} allow_primary - * @memberof vtctldata.BackupRequest - * @instance - */ - BackupRequest.prototype.allow_primary = false; - - /** - * BackupRequest concurrency. - * @member {number|Long} concurrency - * @memberof vtctldata.BackupRequest - * @instance - */ - BackupRequest.prototype.concurrency = $util.Long ? $util.Long.fromBits(0,0,true) : 0; - - /** - * BackupRequest incremental_from_pos. - * @member {string} incremental_from_pos - * @memberof vtctldata.BackupRequest + * GetPermissionsResponse permissions. + * @member {tabletmanagerdata.IPermissions|null|undefined} permissions + * @memberof vtctldata.GetPermissionsResponse * @instance */ - BackupRequest.prototype.incremental_from_pos = ""; + GetPermissionsResponse.prototype.permissions = null; /** - * Creates a new BackupRequest instance using the specified properties. + * Creates a new GetPermissionsResponse instance using the specified properties. * @function create - * @memberof vtctldata.BackupRequest + * @memberof vtctldata.GetPermissionsResponse * @static - * @param {vtctldata.IBackupRequest=} [properties] Properties to set - * @returns {vtctldata.BackupRequest} BackupRequest instance + * @param {vtctldata.IGetPermissionsResponse=} [properties] Properties to set + * @returns {vtctldata.GetPermissionsResponse} GetPermissionsResponse instance */ - BackupRequest.create = function create(properties) { - return new BackupRequest(properties); + GetPermissionsResponse.create = function create(properties) { + return new GetPermissionsResponse(properties); }; /** - * Encodes the specified BackupRequest message. Does not implicitly {@link vtctldata.BackupRequest.verify|verify} messages. + * Encodes the specified GetPermissionsResponse message. Does not implicitly {@link vtctldata.GetPermissionsResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.BackupRequest + * @memberof vtctldata.GetPermissionsResponse * @static - * @param {vtctldata.IBackupRequest} message BackupRequest message or plain object to encode + * @param {vtctldata.IGetPermissionsResponse} message GetPermissionsResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - BackupRequest.encode = function encode(message, writer) { + GetPermissionsResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) - $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.allow_primary != null && Object.hasOwnProperty.call(message, "allow_primary")) - writer.uint32(/* id 2, wireType 0 =*/16).bool(message.allow_primary); - if (message.concurrency != null && Object.hasOwnProperty.call(message, "concurrency")) - writer.uint32(/* id 3, wireType 0 =*/24).uint64(message.concurrency); - if (message.incremental_from_pos != null && Object.hasOwnProperty.call(message, "incremental_from_pos")) - writer.uint32(/* id 4, wireType 2 =*/34).string(message.incremental_from_pos); + if (message.permissions != null && Object.hasOwnProperty.call(message, "permissions")) + $root.tabletmanagerdata.Permissions.encode(message.permissions, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified BackupRequest message, length delimited. Does not implicitly {@link vtctldata.BackupRequest.verify|verify} messages. + * Encodes the specified GetPermissionsResponse message, length delimited. Does not implicitly {@link vtctldata.GetPermissionsResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.BackupRequest + * @memberof vtctldata.GetPermissionsResponse * @static - * @param {vtctldata.IBackupRequest} message BackupRequest message or plain object to encode + * @param {vtctldata.IGetPermissionsResponse} message GetPermissionsResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - BackupRequest.encodeDelimited = function encodeDelimited(message, writer) { + GetPermissionsResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a BackupRequest message from the specified reader or buffer. + * Decodes a GetPermissionsResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.BackupRequest + * @memberof vtctldata.GetPermissionsResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.BackupRequest} BackupRequest + * @returns {vtctldata.GetPermissionsResponse} GetPermissionsResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - BackupRequest.decode = function decode(reader, length) { + GetPermissionsResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.BackupRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetPermissionsResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); - break; - } - case 2: { - message.allow_primary = reader.bool(); - break; - } - case 3: { - message.concurrency = reader.uint64(); - break; - } - case 4: { - message.incremental_from_pos = reader.string(); + message.permissions = $root.tabletmanagerdata.Permissions.decode(reader, reader.uint32()); break; } default: @@ -104233,169 +124684,126 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a BackupRequest message from the specified reader or buffer, length delimited. + * Decodes a GetPermissionsResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.BackupRequest + * @memberof vtctldata.GetPermissionsResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.BackupRequest} BackupRequest + * @returns {vtctldata.GetPermissionsResponse} GetPermissionsResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - BackupRequest.decodeDelimited = function decodeDelimited(reader) { + GetPermissionsResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a BackupRequest message. + * Verifies a GetPermissionsResponse message. * @function verify - * @memberof vtctldata.BackupRequest + * @memberof vtctldata.GetPermissionsResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - BackupRequest.verify = function verify(message) { + GetPermissionsResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { - let error = $root.topodata.TabletAlias.verify(message.tablet_alias); + if (message.permissions != null && message.hasOwnProperty("permissions")) { + let error = $root.tabletmanagerdata.Permissions.verify(message.permissions); if (error) - return "tablet_alias." + error; + return "permissions." + error; } - if (message.allow_primary != null && message.hasOwnProperty("allow_primary")) - if (typeof message.allow_primary !== "boolean") - return "allow_primary: boolean expected"; - if (message.concurrency != null && message.hasOwnProperty("concurrency")) - if (!$util.isInteger(message.concurrency) && !(message.concurrency && $util.isInteger(message.concurrency.low) && $util.isInteger(message.concurrency.high))) - return "concurrency: integer|Long expected"; - if (message.incremental_from_pos != null && message.hasOwnProperty("incremental_from_pos")) - if (!$util.isString(message.incremental_from_pos)) - return "incremental_from_pos: string expected"; return null; }; /** - * Creates a BackupRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetPermissionsResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.BackupRequest + * @memberof vtctldata.GetPermissionsResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.BackupRequest} BackupRequest + * @returns {vtctldata.GetPermissionsResponse} GetPermissionsResponse */ - BackupRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.BackupRequest) + GetPermissionsResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetPermissionsResponse) return object; - let message = new $root.vtctldata.BackupRequest(); - if (object.tablet_alias != null) { - if (typeof object.tablet_alias !== "object") - throw TypeError(".vtctldata.BackupRequest.tablet_alias: object expected"); - message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); + let message = new $root.vtctldata.GetPermissionsResponse(); + if (object.permissions != null) { + if (typeof object.permissions !== "object") + throw TypeError(".vtctldata.GetPermissionsResponse.permissions: object expected"); + message.permissions = $root.tabletmanagerdata.Permissions.fromObject(object.permissions); } - if (object.allow_primary != null) - message.allow_primary = Boolean(object.allow_primary); - if (object.concurrency != null) - if ($util.Long) - (message.concurrency = $util.Long.fromValue(object.concurrency)).unsigned = true; - else if (typeof object.concurrency === "string") - message.concurrency = parseInt(object.concurrency, 10); - else if (typeof object.concurrency === "number") - message.concurrency = object.concurrency; - else if (typeof object.concurrency === "object") - message.concurrency = new $util.LongBits(object.concurrency.low >>> 0, object.concurrency.high >>> 0).toNumber(true); - if (object.incremental_from_pos != null) - message.incremental_from_pos = String(object.incremental_from_pos); return message; }; /** - * Creates a plain object from a BackupRequest message. Also converts values to other types if specified. + * Creates a plain object from a GetPermissionsResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.BackupRequest + * @memberof vtctldata.GetPermissionsResponse * @static - * @param {vtctldata.BackupRequest} message BackupRequest + * @param {vtctldata.GetPermissionsResponse} message GetPermissionsResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - BackupRequest.toObject = function toObject(message, options) { + GetPermissionsResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) { - object.tablet_alias = null; - object.allow_primary = false; - if ($util.Long) { - let long = new $util.Long(0, 0, true); - object.concurrency = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.concurrency = options.longs === String ? "0" : 0; - object.incremental_from_pos = ""; - } - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) - object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); - if (message.allow_primary != null && message.hasOwnProperty("allow_primary")) - object.allow_primary = message.allow_primary; - if (message.concurrency != null && message.hasOwnProperty("concurrency")) - if (typeof message.concurrency === "number") - object.concurrency = options.longs === String ? String(message.concurrency) : message.concurrency; - else - object.concurrency = options.longs === String ? $util.Long.prototype.toString.call(message.concurrency) : options.longs === Number ? new $util.LongBits(message.concurrency.low >>> 0, message.concurrency.high >>> 0).toNumber(true) : message.concurrency; - if (message.incremental_from_pos != null && message.hasOwnProperty("incremental_from_pos")) - object.incremental_from_pos = message.incremental_from_pos; + if (options.defaults) + object.permissions = null; + if (message.permissions != null && message.hasOwnProperty("permissions")) + object.permissions = $root.tabletmanagerdata.Permissions.toObject(message.permissions, options); return object; }; /** - * Converts this BackupRequest to JSON. + * Converts this GetPermissionsResponse to JSON. * @function toJSON - * @memberof vtctldata.BackupRequest + * @memberof vtctldata.GetPermissionsResponse * @instance * @returns {Object.} JSON object */ - BackupRequest.prototype.toJSON = function toJSON() { + GetPermissionsResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for BackupRequest + * Gets the default type url for GetPermissionsResponse * @function getTypeUrl - * @memberof vtctldata.BackupRequest + * @memberof vtctldata.GetPermissionsResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - BackupRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetPermissionsResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.BackupRequest"; + return typeUrlPrefix + "/vtctldata.GetPermissionsResponse"; }; - return BackupRequest; + return GetPermissionsResponse; })(); - vtctldata.BackupResponse = (function() { + vtctldata.GetRoutingRulesRequest = (function() { /** - * Properties of a BackupResponse. + * Properties of a GetRoutingRulesRequest. * @memberof vtctldata - * @interface IBackupResponse - * @property {topodata.ITabletAlias|null} [tablet_alias] BackupResponse tablet_alias - * @property {string|null} [keyspace] BackupResponse keyspace - * @property {string|null} [shard] BackupResponse shard - * @property {logutil.IEvent|null} [event] BackupResponse event + * @interface IGetRoutingRulesRequest */ /** - * Constructs a new BackupResponse. + * Constructs a new GetRoutingRulesRequest. * @memberof vtctldata - * @classdesc Represents a BackupResponse. - * @implements IBackupResponse + * @classdesc Represents a GetRoutingRulesRequest. + * @implements IGetRoutingRulesRequest * @constructor - * @param {vtctldata.IBackupResponse=} [properties] Properties to set + * @param {vtctldata.IGetRoutingRulesRequest=} [properties] Properties to set */ - function BackupResponse(properties) { + function GetRoutingRulesRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -104403,119 +124811,63 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * BackupResponse tablet_alias. - * @member {topodata.ITabletAlias|null|undefined} tablet_alias - * @memberof vtctldata.BackupResponse - * @instance - */ - BackupResponse.prototype.tablet_alias = null; - - /** - * BackupResponse keyspace. - * @member {string} keyspace - * @memberof vtctldata.BackupResponse - * @instance - */ - BackupResponse.prototype.keyspace = ""; - - /** - * BackupResponse shard. - * @member {string} shard - * @memberof vtctldata.BackupResponse - * @instance - */ - BackupResponse.prototype.shard = ""; - - /** - * BackupResponse event. - * @member {logutil.IEvent|null|undefined} event - * @memberof vtctldata.BackupResponse - * @instance - */ - BackupResponse.prototype.event = null; - - /** - * Creates a new BackupResponse instance using the specified properties. + * Creates a new GetRoutingRulesRequest instance using the specified properties. * @function create - * @memberof vtctldata.BackupResponse + * @memberof vtctldata.GetRoutingRulesRequest * @static - * @param {vtctldata.IBackupResponse=} [properties] Properties to set - * @returns {vtctldata.BackupResponse} BackupResponse instance + * @param {vtctldata.IGetRoutingRulesRequest=} [properties] Properties to set + * @returns {vtctldata.GetRoutingRulesRequest} GetRoutingRulesRequest instance */ - BackupResponse.create = function create(properties) { - return new BackupResponse(properties); + GetRoutingRulesRequest.create = function create(properties) { + return new GetRoutingRulesRequest(properties); }; /** - * Encodes the specified BackupResponse message. Does not implicitly {@link vtctldata.BackupResponse.verify|verify} messages. + * Encodes the specified GetRoutingRulesRequest message. Does not implicitly {@link vtctldata.GetRoutingRulesRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.BackupResponse + * @memberof vtctldata.GetRoutingRulesRequest * @static - * @param {vtctldata.IBackupResponse} message BackupResponse message or plain object to encode + * @param {vtctldata.IGetRoutingRulesRequest} message GetRoutingRulesRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - BackupResponse.encode = function encode(message, writer) { + GetRoutingRulesRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) - $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.keyspace); - if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.shard); - if (message.event != null && Object.hasOwnProperty.call(message, "event")) - $root.logutil.Event.encode(message.event, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); return writer; }; /** - * Encodes the specified BackupResponse message, length delimited. Does not implicitly {@link vtctldata.BackupResponse.verify|verify} messages. + * Encodes the specified GetRoutingRulesRequest message, length delimited. Does not implicitly {@link vtctldata.GetRoutingRulesRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.BackupResponse + * @memberof vtctldata.GetRoutingRulesRequest * @static - * @param {vtctldata.IBackupResponse} message BackupResponse message or plain object to encode + * @param {vtctldata.IGetRoutingRulesRequest} message GetRoutingRulesRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - BackupResponse.encodeDelimited = function encodeDelimited(message, writer) { + GetRoutingRulesRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a BackupResponse message from the specified reader or buffer. + * Decodes a GetRoutingRulesRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.BackupResponse + * @memberof vtctldata.GetRoutingRulesRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.BackupResponse} BackupResponse + * @returns {vtctldata.GetRoutingRulesRequest} GetRoutingRulesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - BackupResponse.decode = function decode(reader, length) { + GetRoutingRulesRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.BackupResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetRoutingRulesRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); - break; - } - case 2: { - message.keyspace = reader.string(); - break; - } - case 3: { - message.shard = reader.string(); - break; - } - case 4: { - message.event = $root.logutil.Event.decode(reader, reader.uint32()); - break; - } default: reader.skipType(tag & 7); break; @@ -104525,160 +124877,109 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a BackupResponse message from the specified reader or buffer, length delimited. + * Decodes a GetRoutingRulesRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.BackupResponse + * @memberof vtctldata.GetRoutingRulesRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.BackupResponse} BackupResponse + * @returns {vtctldata.GetRoutingRulesRequest} GetRoutingRulesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - BackupResponse.decodeDelimited = function decodeDelimited(reader) { + GetRoutingRulesRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a BackupResponse message. + * Verifies a GetRoutingRulesRequest message. * @function verify - * @memberof vtctldata.BackupResponse + * @memberof vtctldata.GetRoutingRulesRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - BackupResponse.verify = function verify(message) { + GetRoutingRulesRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { - let error = $root.topodata.TabletAlias.verify(message.tablet_alias); - if (error) - return "tablet_alias." + error; - } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - if (message.shard != null && message.hasOwnProperty("shard")) - if (!$util.isString(message.shard)) - return "shard: string expected"; - if (message.event != null && message.hasOwnProperty("event")) { - let error = $root.logutil.Event.verify(message.event); - if (error) - return "event." + error; - } return null; }; /** - * Creates a BackupResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetRoutingRulesRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.BackupResponse + * @memberof vtctldata.GetRoutingRulesRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.BackupResponse} BackupResponse + * @returns {vtctldata.GetRoutingRulesRequest} GetRoutingRulesRequest */ - BackupResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.BackupResponse) + GetRoutingRulesRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetRoutingRulesRequest) return object; - let message = new $root.vtctldata.BackupResponse(); - if (object.tablet_alias != null) { - if (typeof object.tablet_alias !== "object") - throw TypeError(".vtctldata.BackupResponse.tablet_alias: object expected"); - message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); - } - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - if (object.shard != null) - message.shard = String(object.shard); - if (object.event != null) { - if (typeof object.event !== "object") - throw TypeError(".vtctldata.BackupResponse.event: object expected"); - message.event = $root.logutil.Event.fromObject(object.event); - } - return message; + return new $root.vtctldata.GetRoutingRulesRequest(); }; /** - * Creates a plain object from a BackupResponse message. Also converts values to other types if specified. + * Creates a plain object from a GetRoutingRulesRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.BackupResponse + * @memberof vtctldata.GetRoutingRulesRequest * @static - * @param {vtctldata.BackupResponse} message BackupResponse + * @param {vtctldata.GetRoutingRulesRequest} message GetRoutingRulesRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - BackupResponse.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) { - object.tablet_alias = null; - object.keyspace = ""; - object.shard = ""; - object.event = null; - } - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) - object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.shard != null && message.hasOwnProperty("shard")) - object.shard = message.shard; - if (message.event != null && message.hasOwnProperty("event")) - object.event = $root.logutil.Event.toObject(message.event, options); - return object; + GetRoutingRulesRequest.toObject = function toObject() { + return {}; }; /** - * Converts this BackupResponse to JSON. + * Converts this GetRoutingRulesRequest to JSON. * @function toJSON - * @memberof vtctldata.BackupResponse + * @memberof vtctldata.GetRoutingRulesRequest * @instance * @returns {Object.} JSON object */ - BackupResponse.prototype.toJSON = function toJSON() { + GetRoutingRulesRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for BackupResponse + * Gets the default type url for GetRoutingRulesRequest * @function getTypeUrl - * @memberof vtctldata.BackupResponse + * @memberof vtctldata.GetRoutingRulesRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - BackupResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetRoutingRulesRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.BackupResponse"; + return typeUrlPrefix + "/vtctldata.GetRoutingRulesRequest"; }; - return BackupResponse; + return GetRoutingRulesRequest; })(); - vtctldata.BackupShardRequest = (function() { + vtctldata.GetRoutingRulesResponse = (function() { /** - * Properties of a BackupShardRequest. + * Properties of a GetRoutingRulesResponse. * @memberof vtctldata - * @interface IBackupShardRequest - * @property {string|null} [keyspace] BackupShardRequest keyspace - * @property {string|null} [shard] BackupShardRequest shard - * @property {boolean|null} [allow_primary] BackupShardRequest allow_primary - * @property {number|Long|null} [concurrency] BackupShardRequest concurrency + * @interface IGetRoutingRulesResponse + * @property {vschema.IRoutingRules|null} [routing_rules] GetRoutingRulesResponse routing_rules */ /** - * Constructs a new BackupShardRequest. + * Constructs a new GetRoutingRulesResponse. * @memberof vtctldata - * @classdesc Represents a BackupShardRequest. - * @implements IBackupShardRequest + * @classdesc Represents a GetRoutingRulesResponse. + * @implements IGetRoutingRulesResponse * @constructor - * @param {vtctldata.IBackupShardRequest=} [properties] Properties to set + * @param {vtctldata.IGetRoutingRulesResponse=} [properties] Properties to set */ - function BackupShardRequest(properties) { + function GetRoutingRulesResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -104686,117 +124987,75 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * BackupShardRequest keyspace. - * @member {string} keyspace - * @memberof vtctldata.BackupShardRequest - * @instance - */ - BackupShardRequest.prototype.keyspace = ""; - - /** - * BackupShardRequest shard. - * @member {string} shard - * @memberof vtctldata.BackupShardRequest - * @instance - */ - BackupShardRequest.prototype.shard = ""; - - /** - * BackupShardRequest allow_primary. - * @member {boolean} allow_primary - * @memberof vtctldata.BackupShardRequest - * @instance - */ - BackupShardRequest.prototype.allow_primary = false; - - /** - * BackupShardRequest concurrency. - * @member {number|Long} concurrency - * @memberof vtctldata.BackupShardRequest + * GetRoutingRulesResponse routing_rules. + * @member {vschema.IRoutingRules|null|undefined} routing_rules + * @memberof vtctldata.GetRoutingRulesResponse * @instance */ - BackupShardRequest.prototype.concurrency = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + GetRoutingRulesResponse.prototype.routing_rules = null; /** - * Creates a new BackupShardRequest instance using the specified properties. + * Creates a new GetRoutingRulesResponse instance using the specified properties. * @function create - * @memberof vtctldata.BackupShardRequest + * @memberof vtctldata.GetRoutingRulesResponse * @static - * @param {vtctldata.IBackupShardRequest=} [properties] Properties to set - * @returns {vtctldata.BackupShardRequest} BackupShardRequest instance - */ - BackupShardRequest.create = function create(properties) { - return new BackupShardRequest(properties); + * @param {vtctldata.IGetRoutingRulesResponse=} [properties] Properties to set + * @returns {vtctldata.GetRoutingRulesResponse} GetRoutingRulesResponse instance + */ + GetRoutingRulesResponse.create = function create(properties) { + return new GetRoutingRulesResponse(properties); }; /** - * Encodes the specified BackupShardRequest message. Does not implicitly {@link vtctldata.BackupShardRequest.verify|verify} messages. + * Encodes the specified GetRoutingRulesResponse message. Does not implicitly {@link vtctldata.GetRoutingRulesResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.BackupShardRequest + * @memberof vtctldata.GetRoutingRulesResponse * @static - * @param {vtctldata.IBackupShardRequest} message BackupShardRequest message or plain object to encode + * @param {vtctldata.IGetRoutingRulesResponse} message GetRoutingRulesResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - BackupShardRequest.encode = function encode(message, writer) { + GetRoutingRulesResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); - if (message.allow_primary != null && Object.hasOwnProperty.call(message, "allow_primary")) - writer.uint32(/* id 3, wireType 0 =*/24).bool(message.allow_primary); - if (message.concurrency != null && Object.hasOwnProperty.call(message, "concurrency")) - writer.uint32(/* id 4, wireType 0 =*/32).uint64(message.concurrency); + if (message.routing_rules != null && Object.hasOwnProperty.call(message, "routing_rules")) + $root.vschema.RoutingRules.encode(message.routing_rules, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified BackupShardRequest message, length delimited. Does not implicitly {@link vtctldata.BackupShardRequest.verify|verify} messages. + * Encodes the specified GetRoutingRulesResponse message, length delimited. Does not implicitly {@link vtctldata.GetRoutingRulesResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.BackupShardRequest + * @memberof vtctldata.GetRoutingRulesResponse * @static - * @param {vtctldata.IBackupShardRequest} message BackupShardRequest message or plain object to encode + * @param {vtctldata.IGetRoutingRulesResponse} message GetRoutingRulesResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - BackupShardRequest.encodeDelimited = function encodeDelimited(message, writer) { + GetRoutingRulesResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a BackupShardRequest message from the specified reader or buffer. + * Decodes a GetRoutingRulesResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.BackupShardRequest + * @memberof vtctldata.GetRoutingRulesResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.BackupShardRequest} BackupShardRequest + * @returns {vtctldata.GetRoutingRulesResponse} GetRoutingRulesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - BackupShardRequest.decode = function decode(reader, length) { + GetRoutingRulesResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.BackupShardRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetRoutingRulesResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.keyspace = reader.string(); - break; - } - case 2: { - message.shard = reader.string(); - break; - } - case 3: { - message.allow_primary = reader.bool(); - break; - } - case 4: { - message.concurrency = reader.uint64(); + message.routing_rules = $root.vschema.RoutingRules.decode(reader, reader.uint32()); break; } default: @@ -104808,163 +125067,135 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a BackupShardRequest message from the specified reader or buffer, length delimited. + * Decodes a GetRoutingRulesResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.BackupShardRequest + * @memberof vtctldata.GetRoutingRulesResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.BackupShardRequest} BackupShardRequest + * @returns {vtctldata.GetRoutingRulesResponse} GetRoutingRulesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - BackupShardRequest.decodeDelimited = function decodeDelimited(reader) { + GetRoutingRulesResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a BackupShardRequest message. + * Verifies a GetRoutingRulesResponse message. * @function verify - * @memberof vtctldata.BackupShardRequest + * @memberof vtctldata.GetRoutingRulesResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - BackupShardRequest.verify = function verify(message) { + GetRoutingRulesResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - if (message.shard != null && message.hasOwnProperty("shard")) - if (!$util.isString(message.shard)) - return "shard: string expected"; - if (message.allow_primary != null && message.hasOwnProperty("allow_primary")) - if (typeof message.allow_primary !== "boolean") - return "allow_primary: boolean expected"; - if (message.concurrency != null && message.hasOwnProperty("concurrency")) - if (!$util.isInteger(message.concurrency) && !(message.concurrency && $util.isInteger(message.concurrency.low) && $util.isInteger(message.concurrency.high))) - return "concurrency: integer|Long expected"; + if (message.routing_rules != null && message.hasOwnProperty("routing_rules")) { + let error = $root.vschema.RoutingRules.verify(message.routing_rules); + if (error) + return "routing_rules." + error; + } return null; }; /** - * Creates a BackupShardRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetRoutingRulesResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.BackupShardRequest + * @memberof vtctldata.GetRoutingRulesResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.BackupShardRequest} BackupShardRequest + * @returns {vtctldata.GetRoutingRulesResponse} GetRoutingRulesResponse */ - BackupShardRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.BackupShardRequest) + GetRoutingRulesResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetRoutingRulesResponse) return object; - let message = new $root.vtctldata.BackupShardRequest(); - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - if (object.shard != null) - message.shard = String(object.shard); - if (object.allow_primary != null) - message.allow_primary = Boolean(object.allow_primary); - if (object.concurrency != null) - if ($util.Long) - (message.concurrency = $util.Long.fromValue(object.concurrency)).unsigned = true; - else if (typeof object.concurrency === "string") - message.concurrency = parseInt(object.concurrency, 10); - else if (typeof object.concurrency === "number") - message.concurrency = object.concurrency; - else if (typeof object.concurrency === "object") - message.concurrency = new $util.LongBits(object.concurrency.low >>> 0, object.concurrency.high >>> 0).toNumber(true); + let message = new $root.vtctldata.GetRoutingRulesResponse(); + if (object.routing_rules != null) { + if (typeof object.routing_rules !== "object") + throw TypeError(".vtctldata.GetRoutingRulesResponse.routing_rules: object expected"); + message.routing_rules = $root.vschema.RoutingRules.fromObject(object.routing_rules); + } return message; }; /** - * Creates a plain object from a BackupShardRequest message. Also converts values to other types if specified. + * Creates a plain object from a GetRoutingRulesResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.BackupShardRequest + * @memberof vtctldata.GetRoutingRulesResponse * @static - * @param {vtctldata.BackupShardRequest} message BackupShardRequest + * @param {vtctldata.GetRoutingRulesResponse} message GetRoutingRulesResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - BackupShardRequest.toObject = function toObject(message, options) { + GetRoutingRulesResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) { - object.keyspace = ""; - object.shard = ""; - object.allow_primary = false; - if ($util.Long) { - let long = new $util.Long(0, 0, true); - object.concurrency = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.concurrency = options.longs === String ? "0" : 0; - } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.shard != null && message.hasOwnProperty("shard")) - object.shard = message.shard; - if (message.allow_primary != null && message.hasOwnProperty("allow_primary")) - object.allow_primary = message.allow_primary; - if (message.concurrency != null && message.hasOwnProperty("concurrency")) - if (typeof message.concurrency === "number") - object.concurrency = options.longs === String ? String(message.concurrency) : message.concurrency; - else - object.concurrency = options.longs === String ? $util.Long.prototype.toString.call(message.concurrency) : options.longs === Number ? new $util.LongBits(message.concurrency.low >>> 0, message.concurrency.high >>> 0).toNumber(true) : message.concurrency; + if (options.defaults) + object.routing_rules = null; + if (message.routing_rules != null && message.hasOwnProperty("routing_rules")) + object.routing_rules = $root.vschema.RoutingRules.toObject(message.routing_rules, options); return object; }; /** - * Converts this BackupShardRequest to JSON. + * Converts this GetRoutingRulesResponse to JSON. * @function toJSON - * @memberof vtctldata.BackupShardRequest + * @memberof vtctldata.GetRoutingRulesResponse * @instance * @returns {Object.} JSON object */ - BackupShardRequest.prototype.toJSON = function toJSON() { + GetRoutingRulesResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for BackupShardRequest + * Gets the default type url for GetRoutingRulesResponse * @function getTypeUrl - * @memberof vtctldata.BackupShardRequest + * @memberof vtctldata.GetRoutingRulesResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - BackupShardRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetRoutingRulesResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.BackupShardRequest"; + return typeUrlPrefix + "/vtctldata.GetRoutingRulesResponse"; }; - return BackupShardRequest; + return GetRoutingRulesResponse; })(); - vtctldata.ChangeTabletTypeRequest = (function() { + vtctldata.GetSchemaRequest = (function() { /** - * Properties of a ChangeTabletTypeRequest. + * Properties of a GetSchemaRequest. * @memberof vtctldata - * @interface IChangeTabletTypeRequest - * @property {topodata.ITabletAlias|null} [tablet_alias] ChangeTabletTypeRequest tablet_alias - * @property {topodata.TabletType|null} [db_type] ChangeTabletTypeRequest db_type - * @property {boolean|null} [dry_run] ChangeTabletTypeRequest dry_run + * @interface IGetSchemaRequest + * @property {topodata.ITabletAlias|null} [tablet_alias] GetSchemaRequest tablet_alias + * @property {Array.|null} [tables] GetSchemaRequest tables + * @property {Array.|null} [exclude_tables] GetSchemaRequest exclude_tables + * @property {boolean|null} [include_views] GetSchemaRequest include_views + * @property {boolean|null} [table_names_only] GetSchemaRequest table_names_only + * @property {boolean|null} [table_sizes_only] GetSchemaRequest table_sizes_only + * @property {boolean|null} [table_schema_only] GetSchemaRequest table_schema_only */ /** - * Constructs a new ChangeTabletTypeRequest. + * Constructs a new GetSchemaRequest. * @memberof vtctldata - * @classdesc Represents a ChangeTabletTypeRequest. - * @implements IChangeTabletTypeRequest + * @classdesc Represents a GetSchemaRequest. + * @implements IGetSchemaRequest * @constructor - * @param {vtctldata.IChangeTabletTypeRequest=} [properties] Properties to set + * @param {vtctldata.IGetSchemaRequest=} [properties] Properties to set */ - function ChangeTabletTypeRequest(properties) { + function GetSchemaRequest(properties) { + this.tables = []; + this.exclude_tables = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -104972,90 +125203,132 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ChangeTabletTypeRequest tablet_alias. + * GetSchemaRequest tablet_alias. * @member {topodata.ITabletAlias|null|undefined} tablet_alias - * @memberof vtctldata.ChangeTabletTypeRequest + * @memberof vtctldata.GetSchemaRequest * @instance */ - ChangeTabletTypeRequest.prototype.tablet_alias = null; + GetSchemaRequest.prototype.tablet_alias = null; /** - * ChangeTabletTypeRequest db_type. - * @member {topodata.TabletType} db_type - * @memberof vtctldata.ChangeTabletTypeRequest + * GetSchemaRequest tables. + * @member {Array.} tables + * @memberof vtctldata.GetSchemaRequest * @instance */ - ChangeTabletTypeRequest.prototype.db_type = 0; + GetSchemaRequest.prototype.tables = $util.emptyArray; /** - * ChangeTabletTypeRequest dry_run. - * @member {boolean} dry_run - * @memberof vtctldata.ChangeTabletTypeRequest + * GetSchemaRequest exclude_tables. + * @member {Array.} exclude_tables + * @memberof vtctldata.GetSchemaRequest * @instance */ - ChangeTabletTypeRequest.prototype.dry_run = false; + GetSchemaRequest.prototype.exclude_tables = $util.emptyArray; /** - * Creates a new ChangeTabletTypeRequest instance using the specified properties. + * GetSchemaRequest include_views. + * @member {boolean} include_views + * @memberof vtctldata.GetSchemaRequest + * @instance + */ + GetSchemaRequest.prototype.include_views = false; + + /** + * GetSchemaRequest table_names_only. + * @member {boolean} table_names_only + * @memberof vtctldata.GetSchemaRequest + * @instance + */ + GetSchemaRequest.prototype.table_names_only = false; + + /** + * GetSchemaRequest table_sizes_only. + * @member {boolean} table_sizes_only + * @memberof vtctldata.GetSchemaRequest + * @instance + */ + GetSchemaRequest.prototype.table_sizes_only = false; + + /** + * GetSchemaRequest table_schema_only. + * @member {boolean} table_schema_only + * @memberof vtctldata.GetSchemaRequest + * @instance + */ + GetSchemaRequest.prototype.table_schema_only = false; + + /** + * Creates a new GetSchemaRequest instance using the specified properties. * @function create - * @memberof vtctldata.ChangeTabletTypeRequest + * @memberof vtctldata.GetSchemaRequest * @static - * @param {vtctldata.IChangeTabletTypeRequest=} [properties] Properties to set - * @returns {vtctldata.ChangeTabletTypeRequest} ChangeTabletTypeRequest instance + * @param {vtctldata.IGetSchemaRequest=} [properties] Properties to set + * @returns {vtctldata.GetSchemaRequest} GetSchemaRequest instance */ - ChangeTabletTypeRequest.create = function create(properties) { - return new ChangeTabletTypeRequest(properties); + GetSchemaRequest.create = function create(properties) { + return new GetSchemaRequest(properties); }; /** - * Encodes the specified ChangeTabletTypeRequest message. Does not implicitly {@link vtctldata.ChangeTabletTypeRequest.verify|verify} messages. + * Encodes the specified GetSchemaRequest message. Does not implicitly {@link vtctldata.GetSchemaRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.ChangeTabletTypeRequest + * @memberof vtctldata.GetSchemaRequest * @static - * @param {vtctldata.IChangeTabletTypeRequest} message ChangeTabletTypeRequest message or plain object to encode + * @param {vtctldata.IGetSchemaRequest} message GetSchemaRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ChangeTabletTypeRequest.encode = function encode(message, writer) { + GetSchemaRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.db_type != null && Object.hasOwnProperty.call(message, "db_type")) - writer.uint32(/* id 2, wireType 0 =*/16).int32(message.db_type); - if (message.dry_run != null && Object.hasOwnProperty.call(message, "dry_run")) - writer.uint32(/* id 3, wireType 0 =*/24).bool(message.dry_run); + if (message.tables != null && message.tables.length) + for (let i = 0; i < message.tables.length; ++i) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.tables[i]); + if (message.exclude_tables != null && message.exclude_tables.length) + for (let i = 0; i < message.exclude_tables.length; ++i) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.exclude_tables[i]); + if (message.include_views != null && Object.hasOwnProperty.call(message, "include_views")) + writer.uint32(/* id 4, wireType 0 =*/32).bool(message.include_views); + if (message.table_names_only != null && Object.hasOwnProperty.call(message, "table_names_only")) + writer.uint32(/* id 5, wireType 0 =*/40).bool(message.table_names_only); + if (message.table_sizes_only != null && Object.hasOwnProperty.call(message, "table_sizes_only")) + writer.uint32(/* id 6, wireType 0 =*/48).bool(message.table_sizes_only); + if (message.table_schema_only != null && Object.hasOwnProperty.call(message, "table_schema_only")) + writer.uint32(/* id 7, wireType 0 =*/56).bool(message.table_schema_only); return writer; }; /** - * Encodes the specified ChangeTabletTypeRequest message, length delimited. Does not implicitly {@link vtctldata.ChangeTabletTypeRequest.verify|verify} messages. + * Encodes the specified GetSchemaRequest message, length delimited. Does not implicitly {@link vtctldata.GetSchemaRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ChangeTabletTypeRequest + * @memberof vtctldata.GetSchemaRequest * @static - * @param {vtctldata.IChangeTabletTypeRequest} message ChangeTabletTypeRequest message or plain object to encode + * @param {vtctldata.IGetSchemaRequest} message GetSchemaRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ChangeTabletTypeRequest.encodeDelimited = function encodeDelimited(message, writer) { + GetSchemaRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ChangeTabletTypeRequest message from the specified reader or buffer. + * Decodes a GetSchemaRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ChangeTabletTypeRequest + * @memberof vtctldata.GetSchemaRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ChangeTabletTypeRequest} ChangeTabletTypeRequest + * @returns {vtctldata.GetSchemaRequest} GetSchemaRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ChangeTabletTypeRequest.decode = function decode(reader, length) { + GetSchemaRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ChangeTabletTypeRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetSchemaRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -105064,11 +125337,31 @@ export const vtctldata = $root.vtctldata = (() => { break; } case 2: { - message.db_type = reader.int32(); + if (!(message.tables && message.tables.length)) + message.tables = []; + message.tables.push(reader.string()); break; } case 3: { - message.dry_run = reader.bool(); + if (!(message.exclude_tables && message.exclude_tables.length)) + message.exclude_tables = []; + message.exclude_tables.push(reader.string()); + break; + } + case 4: { + message.include_views = reader.bool(); + break; + } + case 5: { + message.table_names_only = reader.bool(); + break; + } + case 6: { + message.table_sizes_only = reader.bool(); + break; + } + case 7: { + message.table_schema_only = reader.bool(); break; } default: @@ -105080,30 +125373,30 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a ChangeTabletTypeRequest message from the specified reader or buffer, length delimited. + * Decodes a GetSchemaRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ChangeTabletTypeRequest + * @memberof vtctldata.GetSchemaRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ChangeTabletTypeRequest} ChangeTabletTypeRequest + * @returns {vtctldata.GetSchemaRequest} GetSchemaRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ChangeTabletTypeRequest.decodeDelimited = function decodeDelimited(reader) { + GetSchemaRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ChangeTabletTypeRequest message. + * Verifies a GetSchemaRequest message. * @function verify - * @memberof vtctldata.ChangeTabletTypeRequest + * @memberof vtctldata.GetSchemaRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ChangeTabletTypeRequest.verify = function verify(message) { + GetSchemaRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { @@ -105111,179 +125404,171 @@ export const vtctldata = $root.vtctldata = (() => { if (error) return "tablet_alias." + error; } - if (message.db_type != null && message.hasOwnProperty("db_type")) - switch (message.db_type) { - default: - return "db_type: enum value expected"; - case 0: - case 1: - case 1: - case 2: - case 3: - case 3: - case 4: - case 5: - case 6: - case 7: - case 8: - break; - } - if (message.dry_run != null && message.hasOwnProperty("dry_run")) - if (typeof message.dry_run !== "boolean") - return "dry_run: boolean expected"; + if (message.tables != null && message.hasOwnProperty("tables")) { + if (!Array.isArray(message.tables)) + return "tables: array expected"; + for (let i = 0; i < message.tables.length; ++i) + if (!$util.isString(message.tables[i])) + return "tables: string[] expected"; + } + if (message.exclude_tables != null && message.hasOwnProperty("exclude_tables")) { + if (!Array.isArray(message.exclude_tables)) + return "exclude_tables: array expected"; + for (let i = 0; i < message.exclude_tables.length; ++i) + if (!$util.isString(message.exclude_tables[i])) + return "exclude_tables: string[] expected"; + } + if (message.include_views != null && message.hasOwnProperty("include_views")) + if (typeof message.include_views !== "boolean") + return "include_views: boolean expected"; + if (message.table_names_only != null && message.hasOwnProperty("table_names_only")) + if (typeof message.table_names_only !== "boolean") + return "table_names_only: boolean expected"; + if (message.table_sizes_only != null && message.hasOwnProperty("table_sizes_only")) + if (typeof message.table_sizes_only !== "boolean") + return "table_sizes_only: boolean expected"; + if (message.table_schema_only != null && message.hasOwnProperty("table_schema_only")) + if (typeof message.table_schema_only !== "boolean") + return "table_schema_only: boolean expected"; return null; }; /** - * Creates a ChangeTabletTypeRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetSchemaRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ChangeTabletTypeRequest + * @memberof vtctldata.GetSchemaRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.ChangeTabletTypeRequest} ChangeTabletTypeRequest + * @returns {vtctldata.GetSchemaRequest} GetSchemaRequest */ - ChangeTabletTypeRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ChangeTabletTypeRequest) + GetSchemaRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetSchemaRequest) return object; - let message = new $root.vtctldata.ChangeTabletTypeRequest(); + let message = new $root.vtctldata.GetSchemaRequest(); if (object.tablet_alias != null) { if (typeof object.tablet_alias !== "object") - throw TypeError(".vtctldata.ChangeTabletTypeRequest.tablet_alias: object expected"); + throw TypeError(".vtctldata.GetSchemaRequest.tablet_alias: object expected"); message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); } - switch (object.db_type) { - default: - if (typeof object.db_type === "number") { - message.db_type = object.db_type; - break; - } - break; - case "UNKNOWN": - case 0: - message.db_type = 0; - break; - case "PRIMARY": - case 1: - message.db_type = 1; - break; - case "MASTER": - case 1: - message.db_type = 1; - break; - case "REPLICA": - case 2: - message.db_type = 2; - break; - case "RDONLY": - case 3: - message.db_type = 3; - break; - case "BATCH": - case 3: - message.db_type = 3; - break; - case "SPARE": - case 4: - message.db_type = 4; - break; - case "EXPERIMENTAL": - case 5: - message.db_type = 5; - break; - case "BACKUP": - case 6: - message.db_type = 6; - break; - case "RESTORE": - case 7: - message.db_type = 7; - break; - case "DRAINED": - case 8: - message.db_type = 8; - break; + if (object.tables) { + if (!Array.isArray(object.tables)) + throw TypeError(".vtctldata.GetSchemaRequest.tables: array expected"); + message.tables = []; + for (let i = 0; i < object.tables.length; ++i) + message.tables[i] = String(object.tables[i]); } - if (object.dry_run != null) - message.dry_run = Boolean(object.dry_run); + if (object.exclude_tables) { + if (!Array.isArray(object.exclude_tables)) + throw TypeError(".vtctldata.GetSchemaRequest.exclude_tables: array expected"); + message.exclude_tables = []; + for (let i = 0; i < object.exclude_tables.length; ++i) + message.exclude_tables[i] = String(object.exclude_tables[i]); + } + if (object.include_views != null) + message.include_views = Boolean(object.include_views); + if (object.table_names_only != null) + message.table_names_only = Boolean(object.table_names_only); + if (object.table_sizes_only != null) + message.table_sizes_only = Boolean(object.table_sizes_only); + if (object.table_schema_only != null) + message.table_schema_only = Boolean(object.table_schema_only); return message; }; /** - * Creates a plain object from a ChangeTabletTypeRequest message. Also converts values to other types if specified. + * Creates a plain object from a GetSchemaRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ChangeTabletTypeRequest + * @memberof vtctldata.GetSchemaRequest * @static - * @param {vtctldata.ChangeTabletTypeRequest} message ChangeTabletTypeRequest + * @param {vtctldata.GetSchemaRequest} message GetSchemaRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ChangeTabletTypeRequest.toObject = function toObject(message, options) { + GetSchemaRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; + if (options.arrays || options.defaults) { + object.tables = []; + object.exclude_tables = []; + } if (options.defaults) { object.tablet_alias = null; - object.db_type = options.enums === String ? "UNKNOWN" : 0; - object.dry_run = false; + object.include_views = false; + object.table_names_only = false; + object.table_sizes_only = false; + object.table_schema_only = false; } if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); - if (message.db_type != null && message.hasOwnProperty("db_type")) - object.db_type = options.enums === String ? $root.topodata.TabletType[message.db_type] === undefined ? message.db_type : $root.topodata.TabletType[message.db_type] : message.db_type; - if (message.dry_run != null && message.hasOwnProperty("dry_run")) - object.dry_run = message.dry_run; + if (message.tables && message.tables.length) { + object.tables = []; + for (let j = 0; j < message.tables.length; ++j) + object.tables[j] = message.tables[j]; + } + if (message.exclude_tables && message.exclude_tables.length) { + object.exclude_tables = []; + for (let j = 0; j < message.exclude_tables.length; ++j) + object.exclude_tables[j] = message.exclude_tables[j]; + } + if (message.include_views != null && message.hasOwnProperty("include_views")) + object.include_views = message.include_views; + if (message.table_names_only != null && message.hasOwnProperty("table_names_only")) + object.table_names_only = message.table_names_only; + if (message.table_sizes_only != null && message.hasOwnProperty("table_sizes_only")) + object.table_sizes_only = message.table_sizes_only; + if (message.table_schema_only != null && message.hasOwnProperty("table_schema_only")) + object.table_schema_only = message.table_schema_only; return object; }; /** - * Converts this ChangeTabletTypeRequest to JSON. + * Converts this GetSchemaRequest to JSON. * @function toJSON - * @memberof vtctldata.ChangeTabletTypeRequest + * @memberof vtctldata.GetSchemaRequest * @instance * @returns {Object.} JSON object */ - ChangeTabletTypeRequest.prototype.toJSON = function toJSON() { + GetSchemaRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ChangeTabletTypeRequest + * Gets the default type url for GetSchemaRequest * @function getTypeUrl - * @memberof vtctldata.ChangeTabletTypeRequest + * @memberof vtctldata.GetSchemaRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ChangeTabletTypeRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetSchemaRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ChangeTabletTypeRequest"; + return typeUrlPrefix + "/vtctldata.GetSchemaRequest"; }; - return ChangeTabletTypeRequest; + return GetSchemaRequest; })(); - vtctldata.ChangeTabletTypeResponse = (function() { + vtctldata.GetSchemaResponse = (function() { /** - * Properties of a ChangeTabletTypeResponse. + * Properties of a GetSchemaResponse. * @memberof vtctldata - * @interface IChangeTabletTypeResponse - * @property {topodata.ITablet|null} [before_tablet] ChangeTabletTypeResponse before_tablet - * @property {topodata.ITablet|null} [after_tablet] ChangeTabletTypeResponse after_tablet - * @property {boolean|null} [was_dry_run] ChangeTabletTypeResponse was_dry_run + * @interface IGetSchemaResponse + * @property {tabletmanagerdata.ISchemaDefinition|null} [schema] GetSchemaResponse schema */ /** - * Constructs a new ChangeTabletTypeResponse. + * Constructs a new GetSchemaResponse. * @memberof vtctldata - * @classdesc Represents a ChangeTabletTypeResponse. - * @implements IChangeTabletTypeResponse + * @classdesc Represents a GetSchemaResponse. + * @implements IGetSchemaResponse * @constructor - * @param {vtctldata.IChangeTabletTypeResponse=} [properties] Properties to set + * @param {vtctldata.IGetSchemaResponse=} [properties] Properties to set */ - function ChangeTabletTypeResponse(properties) { + function GetSchemaResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -105291,103 +125576,75 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ChangeTabletTypeResponse before_tablet. - * @member {topodata.ITablet|null|undefined} before_tablet - * @memberof vtctldata.ChangeTabletTypeResponse - * @instance - */ - ChangeTabletTypeResponse.prototype.before_tablet = null; - - /** - * ChangeTabletTypeResponse after_tablet. - * @member {topodata.ITablet|null|undefined} after_tablet - * @memberof vtctldata.ChangeTabletTypeResponse - * @instance - */ - ChangeTabletTypeResponse.prototype.after_tablet = null; - - /** - * ChangeTabletTypeResponse was_dry_run. - * @member {boolean} was_dry_run - * @memberof vtctldata.ChangeTabletTypeResponse + * GetSchemaResponse schema. + * @member {tabletmanagerdata.ISchemaDefinition|null|undefined} schema + * @memberof vtctldata.GetSchemaResponse * @instance */ - ChangeTabletTypeResponse.prototype.was_dry_run = false; + GetSchemaResponse.prototype.schema = null; /** - * Creates a new ChangeTabletTypeResponse instance using the specified properties. + * Creates a new GetSchemaResponse instance using the specified properties. * @function create - * @memberof vtctldata.ChangeTabletTypeResponse + * @memberof vtctldata.GetSchemaResponse * @static - * @param {vtctldata.IChangeTabletTypeResponse=} [properties] Properties to set - * @returns {vtctldata.ChangeTabletTypeResponse} ChangeTabletTypeResponse instance + * @param {vtctldata.IGetSchemaResponse=} [properties] Properties to set + * @returns {vtctldata.GetSchemaResponse} GetSchemaResponse instance */ - ChangeTabletTypeResponse.create = function create(properties) { - return new ChangeTabletTypeResponse(properties); + GetSchemaResponse.create = function create(properties) { + return new GetSchemaResponse(properties); }; /** - * Encodes the specified ChangeTabletTypeResponse message. Does not implicitly {@link vtctldata.ChangeTabletTypeResponse.verify|verify} messages. + * Encodes the specified GetSchemaResponse message. Does not implicitly {@link vtctldata.GetSchemaResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.ChangeTabletTypeResponse + * @memberof vtctldata.GetSchemaResponse * @static - * @param {vtctldata.IChangeTabletTypeResponse} message ChangeTabletTypeResponse message or plain object to encode + * @param {vtctldata.IGetSchemaResponse} message GetSchemaResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ChangeTabletTypeResponse.encode = function encode(message, writer) { + GetSchemaResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.before_tablet != null && Object.hasOwnProperty.call(message, "before_tablet")) - $root.topodata.Tablet.encode(message.before_tablet, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.after_tablet != null && Object.hasOwnProperty.call(message, "after_tablet")) - $root.topodata.Tablet.encode(message.after_tablet, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.was_dry_run != null && Object.hasOwnProperty.call(message, "was_dry_run")) - writer.uint32(/* id 3, wireType 0 =*/24).bool(message.was_dry_run); + if (message.schema != null && Object.hasOwnProperty.call(message, "schema")) + $root.tabletmanagerdata.SchemaDefinition.encode(message.schema, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified ChangeTabletTypeResponse message, length delimited. Does not implicitly {@link vtctldata.ChangeTabletTypeResponse.verify|verify} messages. + * Encodes the specified GetSchemaResponse message, length delimited. Does not implicitly {@link vtctldata.GetSchemaResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ChangeTabletTypeResponse + * @memberof vtctldata.GetSchemaResponse * @static - * @param {vtctldata.IChangeTabletTypeResponse} message ChangeTabletTypeResponse message or plain object to encode + * @param {vtctldata.IGetSchemaResponse} message GetSchemaResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ChangeTabletTypeResponse.encodeDelimited = function encodeDelimited(message, writer) { + GetSchemaResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ChangeTabletTypeResponse message from the specified reader or buffer. + * Decodes a GetSchemaResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ChangeTabletTypeResponse + * @memberof vtctldata.GetSchemaResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ChangeTabletTypeResponse} ChangeTabletTypeResponse + * @returns {vtctldata.GetSchemaResponse} GetSchemaResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ChangeTabletTypeResponse.decode = function decode(reader, length) { + GetSchemaResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ChangeTabletTypeResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetSchemaResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.before_tablet = $root.topodata.Tablet.decode(reader, reader.uint32()); - break; - } - case 2: { - message.after_tablet = $root.topodata.Tablet.decode(reader, reader.uint32()); - break; - } - case 3: { - message.was_dry_run = reader.bool(); + message.schema = $root.tabletmanagerdata.SchemaDefinition.decode(reader, reader.uint32()); break; } default: @@ -105399,158 +125656,134 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a ChangeTabletTypeResponse message from the specified reader or buffer, length delimited. + * Decodes a GetSchemaResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ChangeTabletTypeResponse + * @memberof vtctldata.GetSchemaResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ChangeTabletTypeResponse} ChangeTabletTypeResponse + * @returns {vtctldata.GetSchemaResponse} GetSchemaResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ChangeTabletTypeResponse.decodeDelimited = function decodeDelimited(reader) { + GetSchemaResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ChangeTabletTypeResponse message. + * Verifies a GetSchemaResponse message. * @function verify - * @memberof vtctldata.ChangeTabletTypeResponse + * @memberof vtctldata.GetSchemaResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ChangeTabletTypeResponse.verify = function verify(message) { + GetSchemaResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.before_tablet != null && message.hasOwnProperty("before_tablet")) { - let error = $root.topodata.Tablet.verify(message.before_tablet); - if (error) - return "before_tablet." + error; - } - if (message.after_tablet != null && message.hasOwnProperty("after_tablet")) { - let error = $root.topodata.Tablet.verify(message.after_tablet); + if (message.schema != null && message.hasOwnProperty("schema")) { + let error = $root.tabletmanagerdata.SchemaDefinition.verify(message.schema); if (error) - return "after_tablet." + error; + return "schema." + error; } - if (message.was_dry_run != null && message.hasOwnProperty("was_dry_run")) - if (typeof message.was_dry_run !== "boolean") - return "was_dry_run: boolean expected"; return null; }; /** - * Creates a ChangeTabletTypeResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetSchemaResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ChangeTabletTypeResponse + * @memberof vtctldata.GetSchemaResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.ChangeTabletTypeResponse} ChangeTabletTypeResponse + * @returns {vtctldata.GetSchemaResponse} GetSchemaResponse */ - ChangeTabletTypeResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ChangeTabletTypeResponse) + GetSchemaResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetSchemaResponse) return object; - let message = new $root.vtctldata.ChangeTabletTypeResponse(); - if (object.before_tablet != null) { - if (typeof object.before_tablet !== "object") - throw TypeError(".vtctldata.ChangeTabletTypeResponse.before_tablet: object expected"); - message.before_tablet = $root.topodata.Tablet.fromObject(object.before_tablet); - } - if (object.after_tablet != null) { - if (typeof object.after_tablet !== "object") - throw TypeError(".vtctldata.ChangeTabletTypeResponse.after_tablet: object expected"); - message.after_tablet = $root.topodata.Tablet.fromObject(object.after_tablet); + let message = new $root.vtctldata.GetSchemaResponse(); + if (object.schema != null) { + if (typeof object.schema !== "object") + throw TypeError(".vtctldata.GetSchemaResponse.schema: object expected"); + message.schema = $root.tabletmanagerdata.SchemaDefinition.fromObject(object.schema); } - if (object.was_dry_run != null) - message.was_dry_run = Boolean(object.was_dry_run); return message; }; /** - * Creates a plain object from a ChangeTabletTypeResponse message. Also converts values to other types if specified. + * Creates a plain object from a GetSchemaResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ChangeTabletTypeResponse + * @memberof vtctldata.GetSchemaResponse * @static - * @param {vtctldata.ChangeTabletTypeResponse} message ChangeTabletTypeResponse + * @param {vtctldata.GetSchemaResponse} message GetSchemaResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ChangeTabletTypeResponse.toObject = function toObject(message, options) { + GetSchemaResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) { - object.before_tablet = null; - object.after_tablet = null; - object.was_dry_run = false; - } - if (message.before_tablet != null && message.hasOwnProperty("before_tablet")) - object.before_tablet = $root.topodata.Tablet.toObject(message.before_tablet, options); - if (message.after_tablet != null && message.hasOwnProperty("after_tablet")) - object.after_tablet = $root.topodata.Tablet.toObject(message.after_tablet, options); - if (message.was_dry_run != null && message.hasOwnProperty("was_dry_run")) - object.was_dry_run = message.was_dry_run; + if (options.defaults) + object.schema = null; + if (message.schema != null && message.hasOwnProperty("schema")) + object.schema = $root.tabletmanagerdata.SchemaDefinition.toObject(message.schema, options); return object; }; /** - * Converts this ChangeTabletTypeResponse to JSON. + * Converts this GetSchemaResponse to JSON. * @function toJSON - * @memberof vtctldata.ChangeTabletTypeResponse + * @memberof vtctldata.GetSchemaResponse * @instance * @returns {Object.} JSON object */ - ChangeTabletTypeResponse.prototype.toJSON = function toJSON() { + GetSchemaResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ChangeTabletTypeResponse + * Gets the default type url for GetSchemaResponse * @function getTypeUrl - * @memberof vtctldata.ChangeTabletTypeResponse + * @memberof vtctldata.GetSchemaResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ChangeTabletTypeResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetSchemaResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ChangeTabletTypeResponse"; + return typeUrlPrefix + "/vtctldata.GetSchemaResponse"; }; - return ChangeTabletTypeResponse; + return GetSchemaResponse; })(); - vtctldata.CreateKeyspaceRequest = (function() { + vtctldata.GetSchemaMigrationsRequest = (function() { /** - * Properties of a CreateKeyspaceRequest. + * Properties of a GetSchemaMigrationsRequest. * @memberof vtctldata - * @interface ICreateKeyspaceRequest - * @property {string|null} [name] CreateKeyspaceRequest name - * @property {boolean|null} [force] CreateKeyspaceRequest force - * @property {boolean|null} [allow_empty_v_schema] CreateKeyspaceRequest allow_empty_v_schema - * @property {Array.|null} [served_froms] CreateKeyspaceRequest served_froms - * @property {topodata.KeyspaceType|null} [type] CreateKeyspaceRequest type - * @property {string|null} [base_keyspace] CreateKeyspaceRequest base_keyspace - * @property {vttime.ITime|null} [snapshot_time] CreateKeyspaceRequest snapshot_time - * @property {string|null} [durability_policy] CreateKeyspaceRequest durability_policy - * @property {string|null} [sidecar_db_name] CreateKeyspaceRequest sidecar_db_name + * @interface IGetSchemaMigrationsRequest + * @property {string|null} [keyspace] GetSchemaMigrationsRequest keyspace + * @property {string|null} [uuid] GetSchemaMigrationsRequest uuid + * @property {string|null} [migration_context] GetSchemaMigrationsRequest migration_context + * @property {vtctldata.SchemaMigration.Status|null} [status] GetSchemaMigrationsRequest status + * @property {vttime.IDuration|null} [recent] GetSchemaMigrationsRequest recent + * @property {vtctldata.QueryOrdering|null} [order] GetSchemaMigrationsRequest order + * @property {number|Long|null} [limit] GetSchemaMigrationsRequest limit + * @property {number|Long|null} [skip] GetSchemaMigrationsRequest skip */ /** - * Constructs a new CreateKeyspaceRequest. + * Constructs a new GetSchemaMigrationsRequest. * @memberof vtctldata - * @classdesc Represents a CreateKeyspaceRequest. - * @implements ICreateKeyspaceRequest + * @classdesc Represents a GetSchemaMigrationsRequest. + * @implements IGetSchemaMigrationsRequest * @constructor - * @param {vtctldata.ICreateKeyspaceRequest=} [properties] Properties to set + * @param {vtctldata.IGetSchemaMigrationsRequest=} [properties] Properties to set */ - function CreateKeyspaceRequest(properties) { - this.served_froms = []; + function GetSchemaMigrationsRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -105558,190 +125791,173 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * CreateKeyspaceRequest name. - * @member {string} name - * @memberof vtctldata.CreateKeyspaceRequest - * @instance - */ - CreateKeyspaceRequest.prototype.name = ""; - - /** - * CreateKeyspaceRequest force. - * @member {boolean} force - * @memberof vtctldata.CreateKeyspaceRequest + * GetSchemaMigrationsRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.GetSchemaMigrationsRequest * @instance */ - CreateKeyspaceRequest.prototype.force = false; + GetSchemaMigrationsRequest.prototype.keyspace = ""; /** - * CreateKeyspaceRequest allow_empty_v_schema. - * @member {boolean} allow_empty_v_schema - * @memberof vtctldata.CreateKeyspaceRequest + * GetSchemaMigrationsRequest uuid. + * @member {string} uuid + * @memberof vtctldata.GetSchemaMigrationsRequest * @instance */ - CreateKeyspaceRequest.prototype.allow_empty_v_schema = false; + GetSchemaMigrationsRequest.prototype.uuid = ""; /** - * CreateKeyspaceRequest served_froms. - * @member {Array.} served_froms - * @memberof vtctldata.CreateKeyspaceRequest + * GetSchemaMigrationsRequest migration_context. + * @member {string} migration_context + * @memberof vtctldata.GetSchemaMigrationsRequest * @instance */ - CreateKeyspaceRequest.prototype.served_froms = $util.emptyArray; + GetSchemaMigrationsRequest.prototype.migration_context = ""; /** - * CreateKeyspaceRequest type. - * @member {topodata.KeyspaceType} type - * @memberof vtctldata.CreateKeyspaceRequest + * GetSchemaMigrationsRequest status. + * @member {vtctldata.SchemaMigration.Status} status + * @memberof vtctldata.GetSchemaMigrationsRequest * @instance */ - CreateKeyspaceRequest.prototype.type = 0; + GetSchemaMigrationsRequest.prototype.status = 0; /** - * CreateKeyspaceRequest base_keyspace. - * @member {string} base_keyspace - * @memberof vtctldata.CreateKeyspaceRequest + * GetSchemaMigrationsRequest recent. + * @member {vttime.IDuration|null|undefined} recent + * @memberof vtctldata.GetSchemaMigrationsRequest * @instance */ - CreateKeyspaceRequest.prototype.base_keyspace = ""; + GetSchemaMigrationsRequest.prototype.recent = null; /** - * CreateKeyspaceRequest snapshot_time. - * @member {vttime.ITime|null|undefined} snapshot_time - * @memberof vtctldata.CreateKeyspaceRequest + * GetSchemaMigrationsRequest order. + * @member {vtctldata.QueryOrdering} order + * @memberof vtctldata.GetSchemaMigrationsRequest * @instance */ - CreateKeyspaceRequest.prototype.snapshot_time = null; + GetSchemaMigrationsRequest.prototype.order = 0; /** - * CreateKeyspaceRequest durability_policy. - * @member {string} durability_policy - * @memberof vtctldata.CreateKeyspaceRequest + * GetSchemaMigrationsRequest limit. + * @member {number|Long} limit + * @memberof vtctldata.GetSchemaMigrationsRequest * @instance */ - CreateKeyspaceRequest.prototype.durability_policy = ""; + GetSchemaMigrationsRequest.prototype.limit = $util.Long ? $util.Long.fromBits(0,0,true) : 0; /** - * CreateKeyspaceRequest sidecar_db_name. - * @member {string} sidecar_db_name - * @memberof vtctldata.CreateKeyspaceRequest + * GetSchemaMigrationsRequest skip. + * @member {number|Long} skip + * @memberof vtctldata.GetSchemaMigrationsRequest * @instance */ - CreateKeyspaceRequest.prototype.sidecar_db_name = ""; + GetSchemaMigrationsRequest.prototype.skip = $util.Long ? $util.Long.fromBits(0,0,true) : 0; /** - * Creates a new CreateKeyspaceRequest instance using the specified properties. + * Creates a new GetSchemaMigrationsRequest instance using the specified properties. * @function create - * @memberof vtctldata.CreateKeyspaceRequest + * @memberof vtctldata.GetSchemaMigrationsRequest * @static - * @param {vtctldata.ICreateKeyspaceRequest=} [properties] Properties to set - * @returns {vtctldata.CreateKeyspaceRequest} CreateKeyspaceRequest instance + * @param {vtctldata.IGetSchemaMigrationsRequest=} [properties] Properties to set + * @returns {vtctldata.GetSchemaMigrationsRequest} GetSchemaMigrationsRequest instance */ - CreateKeyspaceRequest.create = function create(properties) { - return new CreateKeyspaceRequest(properties); + GetSchemaMigrationsRequest.create = function create(properties) { + return new GetSchemaMigrationsRequest(properties); }; /** - * Encodes the specified CreateKeyspaceRequest message. Does not implicitly {@link vtctldata.CreateKeyspaceRequest.verify|verify} messages. + * Encodes the specified GetSchemaMigrationsRequest message. Does not implicitly {@link vtctldata.GetSchemaMigrationsRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.CreateKeyspaceRequest + * @memberof vtctldata.GetSchemaMigrationsRequest * @static - * @param {vtctldata.ICreateKeyspaceRequest} message CreateKeyspaceRequest message or plain object to encode + * @param {vtctldata.IGetSchemaMigrationsRequest} message GetSchemaMigrationsRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - CreateKeyspaceRequest.encode = function encode(message, writer) { + GetSchemaMigrationsRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.name != null && Object.hasOwnProperty.call(message, "name")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); - if (message.force != null && Object.hasOwnProperty.call(message, "force")) - writer.uint32(/* id 2, wireType 0 =*/16).bool(message.force); - if (message.allow_empty_v_schema != null && Object.hasOwnProperty.call(message, "allow_empty_v_schema")) - writer.uint32(/* id 3, wireType 0 =*/24).bool(message.allow_empty_v_schema); - if (message.served_froms != null && message.served_froms.length) - for (let i = 0; i < message.served_froms.length; ++i) - $root.topodata.Keyspace.ServedFrom.encode(message.served_froms[i], writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); - if (message.type != null && Object.hasOwnProperty.call(message, "type")) - writer.uint32(/* id 7, wireType 0 =*/56).int32(message.type); - if (message.base_keyspace != null && Object.hasOwnProperty.call(message, "base_keyspace")) - writer.uint32(/* id 8, wireType 2 =*/66).string(message.base_keyspace); - if (message.snapshot_time != null && Object.hasOwnProperty.call(message, "snapshot_time")) - $root.vttime.Time.encode(message.snapshot_time, writer.uint32(/* id 9, wireType 2 =*/74).fork()).ldelim(); - if (message.durability_policy != null && Object.hasOwnProperty.call(message, "durability_policy")) - writer.uint32(/* id 10, wireType 2 =*/82).string(message.durability_policy); - if (message.sidecar_db_name != null && Object.hasOwnProperty.call(message, "sidecar_db_name")) - writer.uint32(/* id 11, wireType 2 =*/90).string(message.sidecar_db_name); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.uuid != null && Object.hasOwnProperty.call(message, "uuid")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.uuid); + if (message.migration_context != null && Object.hasOwnProperty.call(message, "migration_context")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.migration_context); + if (message.status != null && Object.hasOwnProperty.call(message, "status")) + writer.uint32(/* id 4, wireType 0 =*/32).int32(message.status); + if (message.recent != null && Object.hasOwnProperty.call(message, "recent")) + $root.vttime.Duration.encode(message.recent, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); + if (message.order != null && Object.hasOwnProperty.call(message, "order")) + writer.uint32(/* id 6, wireType 0 =*/48).int32(message.order); + if (message.limit != null && Object.hasOwnProperty.call(message, "limit")) + writer.uint32(/* id 7, wireType 0 =*/56).uint64(message.limit); + if (message.skip != null && Object.hasOwnProperty.call(message, "skip")) + writer.uint32(/* id 8, wireType 0 =*/64).uint64(message.skip); return writer; }; /** - * Encodes the specified CreateKeyspaceRequest message, length delimited. Does not implicitly {@link vtctldata.CreateKeyspaceRequest.verify|verify} messages. + * Encodes the specified GetSchemaMigrationsRequest message, length delimited. Does not implicitly {@link vtctldata.GetSchemaMigrationsRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.CreateKeyspaceRequest + * @memberof vtctldata.GetSchemaMigrationsRequest * @static - * @param {vtctldata.ICreateKeyspaceRequest} message CreateKeyspaceRequest message or plain object to encode + * @param {vtctldata.IGetSchemaMigrationsRequest} message GetSchemaMigrationsRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - CreateKeyspaceRequest.encodeDelimited = function encodeDelimited(message, writer) { + GetSchemaMigrationsRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a CreateKeyspaceRequest message from the specified reader or buffer. + * Decodes a GetSchemaMigrationsRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.CreateKeyspaceRequest + * @memberof vtctldata.GetSchemaMigrationsRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.CreateKeyspaceRequest} CreateKeyspaceRequest + * @returns {vtctldata.GetSchemaMigrationsRequest} GetSchemaMigrationsRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CreateKeyspaceRequest.decode = function decode(reader, length) { + GetSchemaMigrationsRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.CreateKeyspaceRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetSchemaMigrationsRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.name = reader.string(); + message.keyspace = reader.string(); break; } case 2: { - message.force = reader.bool(); + message.uuid = reader.string(); break; } case 3: { - message.allow_empty_v_schema = reader.bool(); - break; - } - case 6: { - if (!(message.served_froms && message.served_froms.length)) - message.served_froms = []; - message.served_froms.push($root.topodata.Keyspace.ServedFrom.decode(reader, reader.uint32())); + message.migration_context = reader.string(); break; } - case 7: { - message.type = reader.int32(); + case 4: { + message.status = reader.int32(); break; } - case 8: { - message.base_keyspace = reader.string(); + case 5: { + message.recent = $root.vttime.Duration.decode(reader, reader.uint32()); break; } - case 9: { - message.snapshot_time = $root.vttime.Time.decode(reader, reader.uint32()); + case 6: { + message.order = reader.int32(); break; } - case 10: { - message.durability_policy = reader.string(); + case 7: { + message.limit = reader.uint64(); break; } - case 11: { - message.sidecar_db_name = reader.string(); + case 8: { + message.skip = reader.uint64(); break; } default: @@ -105753,229 +125969,286 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a CreateKeyspaceRequest message from the specified reader or buffer, length delimited. + * Decodes a GetSchemaMigrationsRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.CreateKeyspaceRequest + * @memberof vtctldata.GetSchemaMigrationsRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.CreateKeyspaceRequest} CreateKeyspaceRequest + * @returns {vtctldata.GetSchemaMigrationsRequest} GetSchemaMigrationsRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CreateKeyspaceRequest.decodeDelimited = function decodeDelimited(reader) { + GetSchemaMigrationsRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a CreateKeyspaceRequest message. + * Verifies a GetSchemaMigrationsRequest message. * @function verify - * @memberof vtctldata.CreateKeyspaceRequest + * @memberof vtctldata.GetSchemaMigrationsRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - CreateKeyspaceRequest.verify = function verify(message) { + GetSchemaMigrationsRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.name != null && message.hasOwnProperty("name")) - if (!$util.isString(message.name)) - return "name: string expected"; - if (message.force != null && message.hasOwnProperty("force")) - if (typeof message.force !== "boolean") - return "force: boolean expected"; - if (message.allow_empty_v_schema != null && message.hasOwnProperty("allow_empty_v_schema")) - if (typeof message.allow_empty_v_schema !== "boolean") - return "allow_empty_v_schema: boolean expected"; - if (message.served_froms != null && message.hasOwnProperty("served_froms")) { - if (!Array.isArray(message.served_froms)) - return "served_froms: array expected"; - for (let i = 0; i < message.served_froms.length; ++i) { - let error = $root.topodata.Keyspace.ServedFrom.verify(message.served_froms[i]); - if (error) - return "served_froms." + error; - } - } - if (message.type != null && message.hasOwnProperty("type")) - switch (message.type) { + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.uuid != null && message.hasOwnProperty("uuid")) + if (!$util.isString(message.uuid)) + return "uuid: string expected"; + if (message.migration_context != null && message.hasOwnProperty("migration_context")) + if (!$util.isString(message.migration_context)) + return "migration_context: string expected"; + if (message.status != null && message.hasOwnProperty("status")) + switch (message.status) { default: - return "type: enum value expected"; + return "status: enum value expected"; case 0: case 1: + case 2: + case 3: + case 4: + case 5: + case 6: + case 7: break; } - if (message.base_keyspace != null && message.hasOwnProperty("base_keyspace")) - if (!$util.isString(message.base_keyspace)) - return "base_keyspace: string expected"; - if (message.snapshot_time != null && message.hasOwnProperty("snapshot_time")) { - let error = $root.vttime.Time.verify(message.snapshot_time); + if (message.recent != null && message.hasOwnProperty("recent")) { + let error = $root.vttime.Duration.verify(message.recent); if (error) - return "snapshot_time." + error; + return "recent." + error; } - if (message.durability_policy != null && message.hasOwnProperty("durability_policy")) - if (!$util.isString(message.durability_policy)) - return "durability_policy: string expected"; - if (message.sidecar_db_name != null && message.hasOwnProperty("sidecar_db_name")) - if (!$util.isString(message.sidecar_db_name)) - return "sidecar_db_name: string expected"; + if (message.order != null && message.hasOwnProperty("order")) + switch (message.order) { + default: + return "order: enum value expected"; + case 0: + case 1: + case 2: + break; + } + if (message.limit != null && message.hasOwnProperty("limit")) + if (!$util.isInteger(message.limit) && !(message.limit && $util.isInteger(message.limit.low) && $util.isInteger(message.limit.high))) + return "limit: integer|Long expected"; + if (message.skip != null && message.hasOwnProperty("skip")) + if (!$util.isInteger(message.skip) && !(message.skip && $util.isInteger(message.skip.low) && $util.isInteger(message.skip.high))) + return "skip: integer|Long expected"; return null; }; /** - * Creates a CreateKeyspaceRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetSchemaMigrationsRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.CreateKeyspaceRequest + * @memberof vtctldata.GetSchemaMigrationsRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.CreateKeyspaceRequest} CreateKeyspaceRequest + * @returns {vtctldata.GetSchemaMigrationsRequest} GetSchemaMigrationsRequest */ - CreateKeyspaceRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.CreateKeyspaceRequest) + GetSchemaMigrationsRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetSchemaMigrationsRequest) return object; - let message = new $root.vtctldata.CreateKeyspaceRequest(); - if (object.name != null) - message.name = String(object.name); - if (object.force != null) - message.force = Boolean(object.force); - if (object.allow_empty_v_schema != null) - message.allow_empty_v_schema = Boolean(object.allow_empty_v_schema); - if (object.served_froms) { - if (!Array.isArray(object.served_froms)) - throw TypeError(".vtctldata.CreateKeyspaceRequest.served_froms: array expected"); - message.served_froms = []; - for (let i = 0; i < object.served_froms.length; ++i) { - if (typeof object.served_froms[i] !== "object") - throw TypeError(".vtctldata.CreateKeyspaceRequest.served_froms: object expected"); - message.served_froms[i] = $root.topodata.Keyspace.ServedFrom.fromObject(object.served_froms[i]); + let message = new $root.vtctldata.GetSchemaMigrationsRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.uuid != null) + message.uuid = String(object.uuid); + if (object.migration_context != null) + message.migration_context = String(object.migration_context); + switch (object.status) { + default: + if (typeof object.status === "number") { + message.status = object.status; + break; } + break; + case "UNKNOWN": + case 0: + message.status = 0; + break; + case "REQUESTED": + case 1: + message.status = 1; + break; + case "CANCELLED": + case 2: + message.status = 2; + break; + case "QUEUED": + case 3: + message.status = 3; + break; + case "READY": + case 4: + message.status = 4; + break; + case "RUNNING": + case 5: + message.status = 5; + break; + case "COMPLETE": + case 6: + message.status = 6; + break; + case "FAILED": + case 7: + message.status = 7; + break; } - switch (object.type) { + if (object.recent != null) { + if (typeof object.recent !== "object") + throw TypeError(".vtctldata.GetSchemaMigrationsRequest.recent: object expected"); + message.recent = $root.vttime.Duration.fromObject(object.recent); + } + switch (object.order) { default: - if (typeof object.type === "number") { - message.type = object.type; + if (typeof object.order === "number") { + message.order = object.order; break; } break; - case "NORMAL": + case "NONE": case 0: - message.type = 0; + message.order = 0; break; - case "SNAPSHOT": + case "ASCENDING": case 1: - message.type = 1; + message.order = 1; + break; + case "DESCENDING": + case 2: + message.order = 2; break; } - if (object.base_keyspace != null) - message.base_keyspace = String(object.base_keyspace); - if (object.snapshot_time != null) { - if (typeof object.snapshot_time !== "object") - throw TypeError(".vtctldata.CreateKeyspaceRequest.snapshot_time: object expected"); - message.snapshot_time = $root.vttime.Time.fromObject(object.snapshot_time); - } - if (object.durability_policy != null) - message.durability_policy = String(object.durability_policy); - if (object.sidecar_db_name != null) - message.sidecar_db_name = String(object.sidecar_db_name); + if (object.limit != null) + if ($util.Long) + (message.limit = $util.Long.fromValue(object.limit)).unsigned = true; + else if (typeof object.limit === "string") + message.limit = parseInt(object.limit, 10); + else if (typeof object.limit === "number") + message.limit = object.limit; + else if (typeof object.limit === "object") + message.limit = new $util.LongBits(object.limit.low >>> 0, object.limit.high >>> 0).toNumber(true); + if (object.skip != null) + if ($util.Long) + (message.skip = $util.Long.fromValue(object.skip)).unsigned = true; + else if (typeof object.skip === "string") + message.skip = parseInt(object.skip, 10); + else if (typeof object.skip === "number") + message.skip = object.skip; + else if (typeof object.skip === "object") + message.skip = new $util.LongBits(object.skip.low >>> 0, object.skip.high >>> 0).toNumber(true); return message; }; /** - * Creates a plain object from a CreateKeyspaceRequest message. Also converts values to other types if specified. + * Creates a plain object from a GetSchemaMigrationsRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.CreateKeyspaceRequest + * @memberof vtctldata.GetSchemaMigrationsRequest * @static - * @param {vtctldata.CreateKeyspaceRequest} message CreateKeyspaceRequest + * @param {vtctldata.GetSchemaMigrationsRequest} message GetSchemaMigrationsRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - CreateKeyspaceRequest.toObject = function toObject(message, options) { + GetSchemaMigrationsRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.served_froms = []; if (options.defaults) { - object.name = ""; - object.force = false; - object.allow_empty_v_schema = false; - object.type = options.enums === String ? "NORMAL" : 0; - object.base_keyspace = ""; - object.snapshot_time = null; - object.durability_policy = ""; - object.sidecar_db_name = ""; - } - if (message.name != null && message.hasOwnProperty("name")) - object.name = message.name; - if (message.force != null && message.hasOwnProperty("force")) - object.force = message.force; - if (message.allow_empty_v_schema != null && message.hasOwnProperty("allow_empty_v_schema")) - object.allow_empty_v_schema = message.allow_empty_v_schema; - if (message.served_froms && message.served_froms.length) { - object.served_froms = []; - for (let j = 0; j < message.served_froms.length; ++j) - object.served_froms[j] = $root.topodata.Keyspace.ServedFrom.toObject(message.served_froms[j], options); + object.keyspace = ""; + object.uuid = ""; + object.migration_context = ""; + object.status = options.enums === String ? "UNKNOWN" : 0; + object.recent = null; + object.order = options.enums === String ? "NONE" : 0; + if ($util.Long) { + let long = new $util.Long(0, 0, true); + object.limit = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.limit = options.longs === String ? "0" : 0; + if ($util.Long) { + let long = new $util.Long(0, 0, true); + object.skip = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.skip = options.longs === String ? "0" : 0; } - if (message.type != null && message.hasOwnProperty("type")) - object.type = options.enums === String ? $root.topodata.KeyspaceType[message.type] === undefined ? message.type : $root.topodata.KeyspaceType[message.type] : message.type; - if (message.base_keyspace != null && message.hasOwnProperty("base_keyspace")) - object.base_keyspace = message.base_keyspace; - if (message.snapshot_time != null && message.hasOwnProperty("snapshot_time")) - object.snapshot_time = $root.vttime.Time.toObject(message.snapshot_time, options); - if (message.durability_policy != null && message.hasOwnProperty("durability_policy")) - object.durability_policy = message.durability_policy; - if (message.sidecar_db_name != null && message.hasOwnProperty("sidecar_db_name")) - object.sidecar_db_name = message.sidecar_db_name; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.uuid != null && message.hasOwnProperty("uuid")) + object.uuid = message.uuid; + if (message.migration_context != null && message.hasOwnProperty("migration_context")) + object.migration_context = message.migration_context; + if (message.status != null && message.hasOwnProperty("status")) + object.status = options.enums === String ? $root.vtctldata.SchemaMigration.Status[message.status] === undefined ? message.status : $root.vtctldata.SchemaMigration.Status[message.status] : message.status; + if (message.recent != null && message.hasOwnProperty("recent")) + object.recent = $root.vttime.Duration.toObject(message.recent, options); + if (message.order != null && message.hasOwnProperty("order")) + object.order = options.enums === String ? $root.vtctldata.QueryOrdering[message.order] === undefined ? message.order : $root.vtctldata.QueryOrdering[message.order] : message.order; + if (message.limit != null && message.hasOwnProperty("limit")) + if (typeof message.limit === "number") + object.limit = options.longs === String ? String(message.limit) : message.limit; + else + object.limit = options.longs === String ? $util.Long.prototype.toString.call(message.limit) : options.longs === Number ? new $util.LongBits(message.limit.low >>> 0, message.limit.high >>> 0).toNumber(true) : message.limit; + if (message.skip != null && message.hasOwnProperty("skip")) + if (typeof message.skip === "number") + object.skip = options.longs === String ? String(message.skip) : message.skip; + else + object.skip = options.longs === String ? $util.Long.prototype.toString.call(message.skip) : options.longs === Number ? new $util.LongBits(message.skip.low >>> 0, message.skip.high >>> 0).toNumber(true) : message.skip; return object; }; /** - * Converts this CreateKeyspaceRequest to JSON. + * Converts this GetSchemaMigrationsRequest to JSON. * @function toJSON - * @memberof vtctldata.CreateKeyspaceRequest + * @memberof vtctldata.GetSchemaMigrationsRequest * @instance * @returns {Object.} JSON object */ - CreateKeyspaceRequest.prototype.toJSON = function toJSON() { + GetSchemaMigrationsRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for CreateKeyspaceRequest + * Gets the default type url for GetSchemaMigrationsRequest * @function getTypeUrl - * @memberof vtctldata.CreateKeyspaceRequest + * @memberof vtctldata.GetSchemaMigrationsRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - CreateKeyspaceRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetSchemaMigrationsRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.CreateKeyspaceRequest"; + return typeUrlPrefix + "/vtctldata.GetSchemaMigrationsRequest"; }; - return CreateKeyspaceRequest; + return GetSchemaMigrationsRequest; })(); - vtctldata.CreateKeyspaceResponse = (function() { + vtctldata.GetSchemaMigrationsResponse = (function() { /** - * Properties of a CreateKeyspaceResponse. + * Properties of a GetSchemaMigrationsResponse. * @memberof vtctldata - * @interface ICreateKeyspaceResponse - * @property {vtctldata.IKeyspace|null} [keyspace] CreateKeyspaceResponse keyspace + * @interface IGetSchemaMigrationsResponse + * @property {Array.|null} [migrations] GetSchemaMigrationsResponse migrations */ /** - * Constructs a new CreateKeyspaceResponse. + * Constructs a new GetSchemaMigrationsResponse. * @memberof vtctldata - * @classdesc Represents a CreateKeyspaceResponse. - * @implements ICreateKeyspaceResponse + * @classdesc Represents a GetSchemaMigrationsResponse. + * @implements IGetSchemaMigrationsResponse * @constructor - * @param {vtctldata.ICreateKeyspaceResponse=} [properties] Properties to set + * @param {vtctldata.IGetSchemaMigrationsResponse=} [properties] Properties to set */ - function CreateKeyspaceResponse(properties) { + function GetSchemaMigrationsResponse(properties) { + this.migrations = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -105983,75 +126256,78 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * CreateKeyspaceResponse keyspace. - * @member {vtctldata.IKeyspace|null|undefined} keyspace - * @memberof vtctldata.CreateKeyspaceResponse + * GetSchemaMigrationsResponse migrations. + * @member {Array.} migrations + * @memberof vtctldata.GetSchemaMigrationsResponse * @instance */ - CreateKeyspaceResponse.prototype.keyspace = null; + GetSchemaMigrationsResponse.prototype.migrations = $util.emptyArray; /** - * Creates a new CreateKeyspaceResponse instance using the specified properties. + * Creates a new GetSchemaMigrationsResponse instance using the specified properties. * @function create - * @memberof vtctldata.CreateKeyspaceResponse + * @memberof vtctldata.GetSchemaMigrationsResponse * @static - * @param {vtctldata.ICreateKeyspaceResponse=} [properties] Properties to set - * @returns {vtctldata.CreateKeyspaceResponse} CreateKeyspaceResponse instance + * @param {vtctldata.IGetSchemaMigrationsResponse=} [properties] Properties to set + * @returns {vtctldata.GetSchemaMigrationsResponse} GetSchemaMigrationsResponse instance */ - CreateKeyspaceResponse.create = function create(properties) { - return new CreateKeyspaceResponse(properties); + GetSchemaMigrationsResponse.create = function create(properties) { + return new GetSchemaMigrationsResponse(properties); }; /** - * Encodes the specified CreateKeyspaceResponse message. Does not implicitly {@link vtctldata.CreateKeyspaceResponse.verify|verify} messages. + * Encodes the specified GetSchemaMigrationsResponse message. Does not implicitly {@link vtctldata.GetSchemaMigrationsResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.CreateKeyspaceResponse + * @memberof vtctldata.GetSchemaMigrationsResponse * @static - * @param {vtctldata.ICreateKeyspaceResponse} message CreateKeyspaceResponse message or plain object to encode + * @param {vtctldata.IGetSchemaMigrationsResponse} message GetSchemaMigrationsResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - CreateKeyspaceResponse.encode = function encode(message, writer) { + GetSchemaMigrationsResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - $root.vtctldata.Keyspace.encode(message.keyspace, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.migrations != null && message.migrations.length) + for (let i = 0; i < message.migrations.length; ++i) + $root.vtctldata.SchemaMigration.encode(message.migrations[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified CreateKeyspaceResponse message, length delimited. Does not implicitly {@link vtctldata.CreateKeyspaceResponse.verify|verify} messages. + * Encodes the specified GetSchemaMigrationsResponse message, length delimited. Does not implicitly {@link vtctldata.GetSchemaMigrationsResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.CreateKeyspaceResponse + * @memberof vtctldata.GetSchemaMigrationsResponse * @static - * @param {vtctldata.ICreateKeyspaceResponse} message CreateKeyspaceResponse message or plain object to encode + * @param {vtctldata.IGetSchemaMigrationsResponse} message GetSchemaMigrationsResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - CreateKeyspaceResponse.encodeDelimited = function encodeDelimited(message, writer) { + GetSchemaMigrationsResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a CreateKeyspaceResponse message from the specified reader or buffer. + * Decodes a GetSchemaMigrationsResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.CreateKeyspaceResponse + * @memberof vtctldata.GetSchemaMigrationsResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.CreateKeyspaceResponse} CreateKeyspaceResponse + * @returns {vtctldata.GetSchemaMigrationsResponse} GetSchemaMigrationsResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CreateKeyspaceResponse.decode = function decode(reader, length) { + GetSchemaMigrationsResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.CreateKeyspaceResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetSchemaMigrationsResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.keyspace = $root.vtctldata.Keyspace.decode(reader, reader.uint32()); + if (!(message.migrations && message.migrations.length)) + message.migrations = []; + message.migrations.push($root.vtctldata.SchemaMigration.decode(reader, reader.uint32())); break; } default: @@ -106063,130 +126339,140 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a CreateKeyspaceResponse message from the specified reader or buffer, length delimited. + * Decodes a GetSchemaMigrationsResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.CreateKeyspaceResponse + * @memberof vtctldata.GetSchemaMigrationsResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.CreateKeyspaceResponse} CreateKeyspaceResponse + * @returns {vtctldata.GetSchemaMigrationsResponse} GetSchemaMigrationsResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CreateKeyspaceResponse.decodeDelimited = function decodeDelimited(reader) { + GetSchemaMigrationsResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a CreateKeyspaceResponse message. + * Verifies a GetSchemaMigrationsResponse message. * @function verify - * @memberof vtctldata.CreateKeyspaceResponse + * @memberof vtctldata.GetSchemaMigrationsResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - CreateKeyspaceResponse.verify = function verify(message) { + GetSchemaMigrationsResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) { - let error = $root.vtctldata.Keyspace.verify(message.keyspace); - if (error) - return "keyspace." + error; + if (message.migrations != null && message.hasOwnProperty("migrations")) { + if (!Array.isArray(message.migrations)) + return "migrations: array expected"; + for (let i = 0; i < message.migrations.length; ++i) { + let error = $root.vtctldata.SchemaMigration.verify(message.migrations[i]); + if (error) + return "migrations." + error; + } } return null; }; /** - * Creates a CreateKeyspaceResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetSchemaMigrationsResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.CreateKeyspaceResponse + * @memberof vtctldata.GetSchemaMigrationsResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.CreateKeyspaceResponse} CreateKeyspaceResponse + * @returns {vtctldata.GetSchemaMigrationsResponse} GetSchemaMigrationsResponse */ - CreateKeyspaceResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.CreateKeyspaceResponse) + GetSchemaMigrationsResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetSchemaMigrationsResponse) return object; - let message = new $root.vtctldata.CreateKeyspaceResponse(); - if (object.keyspace != null) { - if (typeof object.keyspace !== "object") - throw TypeError(".vtctldata.CreateKeyspaceResponse.keyspace: object expected"); - message.keyspace = $root.vtctldata.Keyspace.fromObject(object.keyspace); + let message = new $root.vtctldata.GetSchemaMigrationsResponse(); + if (object.migrations) { + if (!Array.isArray(object.migrations)) + throw TypeError(".vtctldata.GetSchemaMigrationsResponse.migrations: array expected"); + message.migrations = []; + for (let i = 0; i < object.migrations.length; ++i) { + if (typeof object.migrations[i] !== "object") + throw TypeError(".vtctldata.GetSchemaMigrationsResponse.migrations: object expected"); + message.migrations[i] = $root.vtctldata.SchemaMigration.fromObject(object.migrations[i]); + } } return message; }; /** - * Creates a plain object from a CreateKeyspaceResponse message. Also converts values to other types if specified. + * Creates a plain object from a GetSchemaMigrationsResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.CreateKeyspaceResponse + * @memberof vtctldata.GetSchemaMigrationsResponse * @static - * @param {vtctldata.CreateKeyspaceResponse} message CreateKeyspaceResponse + * @param {vtctldata.GetSchemaMigrationsResponse} message GetSchemaMigrationsResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - CreateKeyspaceResponse.toObject = function toObject(message, options) { + GetSchemaMigrationsResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) - object.keyspace = null; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = $root.vtctldata.Keyspace.toObject(message.keyspace, options); + if (options.arrays || options.defaults) + object.migrations = []; + if (message.migrations && message.migrations.length) { + object.migrations = []; + for (let j = 0; j < message.migrations.length; ++j) + object.migrations[j] = $root.vtctldata.SchemaMigration.toObject(message.migrations[j], options); + } return object; }; /** - * Converts this CreateKeyspaceResponse to JSON. + * Converts this GetSchemaMigrationsResponse to JSON. * @function toJSON - * @memberof vtctldata.CreateKeyspaceResponse + * @memberof vtctldata.GetSchemaMigrationsResponse * @instance * @returns {Object.} JSON object */ - CreateKeyspaceResponse.prototype.toJSON = function toJSON() { + GetSchemaMigrationsResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for CreateKeyspaceResponse + * Gets the default type url for GetSchemaMigrationsResponse * @function getTypeUrl - * @memberof vtctldata.CreateKeyspaceResponse + * @memberof vtctldata.GetSchemaMigrationsResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - CreateKeyspaceResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetSchemaMigrationsResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.CreateKeyspaceResponse"; + return typeUrlPrefix + "/vtctldata.GetSchemaMigrationsResponse"; }; - return CreateKeyspaceResponse; + return GetSchemaMigrationsResponse; })(); - vtctldata.CreateShardRequest = (function() { + vtctldata.GetShardRequest = (function() { /** - * Properties of a CreateShardRequest. + * Properties of a GetShardRequest. * @memberof vtctldata - * @interface ICreateShardRequest - * @property {string|null} [keyspace] CreateShardRequest keyspace - * @property {string|null} [shard_name] CreateShardRequest shard_name - * @property {boolean|null} [force] CreateShardRequest force - * @property {boolean|null} [include_parent] CreateShardRequest include_parent + * @interface IGetShardRequest + * @property {string|null} [keyspace] GetShardRequest keyspace + * @property {string|null} [shard_name] GetShardRequest shard_name */ /** - * Constructs a new CreateShardRequest. + * Constructs a new GetShardRequest. * @memberof vtctldata - * @classdesc Represents a CreateShardRequest. - * @implements ICreateShardRequest + * @classdesc Represents a GetShardRequest. + * @implements IGetShardRequest * @constructor - * @param {vtctldata.ICreateShardRequest=} [properties] Properties to set + * @param {vtctldata.IGetShardRequest=} [properties] Properties to set */ - function CreateShardRequest(properties) { + function GetShardRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -106194,100 +126480,80 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * CreateShardRequest keyspace. + * GetShardRequest keyspace. * @member {string} keyspace - * @memberof vtctldata.CreateShardRequest + * @memberof vtctldata.GetShardRequest * @instance */ - CreateShardRequest.prototype.keyspace = ""; + GetShardRequest.prototype.keyspace = ""; /** - * CreateShardRequest shard_name. + * GetShardRequest shard_name. * @member {string} shard_name - * @memberof vtctldata.CreateShardRequest - * @instance - */ - CreateShardRequest.prototype.shard_name = ""; - - /** - * CreateShardRequest force. - * @member {boolean} force - * @memberof vtctldata.CreateShardRequest - * @instance - */ - CreateShardRequest.prototype.force = false; - - /** - * CreateShardRequest include_parent. - * @member {boolean} include_parent - * @memberof vtctldata.CreateShardRequest + * @memberof vtctldata.GetShardRequest * @instance */ - CreateShardRequest.prototype.include_parent = false; + GetShardRequest.prototype.shard_name = ""; /** - * Creates a new CreateShardRequest instance using the specified properties. + * Creates a new GetShardRequest instance using the specified properties. * @function create - * @memberof vtctldata.CreateShardRequest + * @memberof vtctldata.GetShardRequest * @static - * @param {vtctldata.ICreateShardRequest=} [properties] Properties to set - * @returns {vtctldata.CreateShardRequest} CreateShardRequest instance + * @param {vtctldata.IGetShardRequest=} [properties] Properties to set + * @returns {vtctldata.GetShardRequest} GetShardRequest instance */ - CreateShardRequest.create = function create(properties) { - return new CreateShardRequest(properties); + GetShardRequest.create = function create(properties) { + return new GetShardRequest(properties); }; /** - * Encodes the specified CreateShardRequest message. Does not implicitly {@link vtctldata.CreateShardRequest.verify|verify} messages. + * Encodes the specified GetShardRequest message. Does not implicitly {@link vtctldata.GetShardRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.CreateShardRequest + * @memberof vtctldata.GetShardRequest * @static - * @param {vtctldata.ICreateShardRequest} message CreateShardRequest message or plain object to encode + * @param {vtctldata.IGetShardRequest} message GetShardRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - CreateShardRequest.encode = function encode(message, writer) { + GetShardRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); if (message.shard_name != null && Object.hasOwnProperty.call(message, "shard_name")) writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard_name); - if (message.force != null && Object.hasOwnProperty.call(message, "force")) - writer.uint32(/* id 3, wireType 0 =*/24).bool(message.force); - if (message.include_parent != null && Object.hasOwnProperty.call(message, "include_parent")) - writer.uint32(/* id 4, wireType 0 =*/32).bool(message.include_parent); return writer; }; /** - * Encodes the specified CreateShardRequest message, length delimited. Does not implicitly {@link vtctldata.CreateShardRequest.verify|verify} messages. + * Encodes the specified GetShardRequest message, length delimited. Does not implicitly {@link vtctldata.GetShardRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.CreateShardRequest + * @memberof vtctldata.GetShardRequest * @static - * @param {vtctldata.ICreateShardRequest} message CreateShardRequest message or plain object to encode + * @param {vtctldata.IGetShardRequest} message GetShardRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - CreateShardRequest.encodeDelimited = function encodeDelimited(message, writer) { + GetShardRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a CreateShardRequest message from the specified reader or buffer. + * Decodes a GetShardRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.CreateShardRequest + * @memberof vtctldata.GetShardRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.CreateShardRequest} CreateShardRequest + * @returns {vtctldata.GetShardRequest} GetShardRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CreateShardRequest.decode = function decode(reader, length) { + GetShardRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.CreateShardRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetShardRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -106299,14 +126565,6 @@ export const vtctldata = $root.vtctldata = (() => { message.shard_name = reader.string(); break; } - case 3: { - message.force = reader.bool(); - break; - } - case 4: { - message.include_parent = reader.bool(); - break; - } default: reader.skipType(tag & 7); break; @@ -106316,30 +126574,30 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a CreateShardRequest message from the specified reader or buffer, length delimited. + * Decodes a GetShardRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.CreateShardRequest + * @memberof vtctldata.GetShardRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.CreateShardRequest} CreateShardRequest + * @returns {vtctldata.GetShardRequest} GetShardRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CreateShardRequest.decodeDelimited = function decodeDelimited(reader) { + GetShardRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a CreateShardRequest message. + * Verifies a GetShardRequest message. * @function verify - * @memberof vtctldata.CreateShardRequest + * @memberof vtctldata.GetShardRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - CreateShardRequest.verify = function verify(message) { + GetShardRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.keyspace != null && message.hasOwnProperty("keyspace")) @@ -106348,117 +126606,99 @@ export const vtctldata = $root.vtctldata = (() => { if (message.shard_name != null && message.hasOwnProperty("shard_name")) if (!$util.isString(message.shard_name)) return "shard_name: string expected"; - if (message.force != null && message.hasOwnProperty("force")) - if (typeof message.force !== "boolean") - return "force: boolean expected"; - if (message.include_parent != null && message.hasOwnProperty("include_parent")) - if (typeof message.include_parent !== "boolean") - return "include_parent: boolean expected"; return null; }; /** - * Creates a CreateShardRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetShardRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.CreateShardRequest + * @memberof vtctldata.GetShardRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.CreateShardRequest} CreateShardRequest + * @returns {vtctldata.GetShardRequest} GetShardRequest */ - CreateShardRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.CreateShardRequest) + GetShardRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetShardRequest) return object; - let message = new $root.vtctldata.CreateShardRequest(); + let message = new $root.vtctldata.GetShardRequest(); if (object.keyspace != null) message.keyspace = String(object.keyspace); if (object.shard_name != null) message.shard_name = String(object.shard_name); - if (object.force != null) - message.force = Boolean(object.force); - if (object.include_parent != null) - message.include_parent = Boolean(object.include_parent); return message; }; /** - * Creates a plain object from a CreateShardRequest message. Also converts values to other types if specified. + * Creates a plain object from a GetShardRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.CreateShardRequest + * @memberof vtctldata.GetShardRequest * @static - * @param {vtctldata.CreateShardRequest} message CreateShardRequest + * @param {vtctldata.GetShardRequest} message GetShardRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - CreateShardRequest.toObject = function toObject(message, options) { + GetShardRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) { object.keyspace = ""; object.shard_name = ""; - object.force = false; - object.include_parent = false; } if (message.keyspace != null && message.hasOwnProperty("keyspace")) object.keyspace = message.keyspace; if (message.shard_name != null && message.hasOwnProperty("shard_name")) object.shard_name = message.shard_name; - if (message.force != null && message.hasOwnProperty("force")) - object.force = message.force; - if (message.include_parent != null && message.hasOwnProperty("include_parent")) - object.include_parent = message.include_parent; return object; }; /** - * Converts this CreateShardRequest to JSON. + * Converts this GetShardRequest to JSON. * @function toJSON - * @memberof vtctldata.CreateShardRequest + * @memberof vtctldata.GetShardRequest * @instance * @returns {Object.} JSON object */ - CreateShardRequest.prototype.toJSON = function toJSON() { + GetShardRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for CreateShardRequest + * Gets the default type url for GetShardRequest * @function getTypeUrl - * @memberof vtctldata.CreateShardRequest + * @memberof vtctldata.GetShardRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - CreateShardRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetShardRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.CreateShardRequest"; + return typeUrlPrefix + "/vtctldata.GetShardRequest"; }; - return CreateShardRequest; + return GetShardRequest; })(); - vtctldata.CreateShardResponse = (function() { + vtctldata.GetShardResponse = (function() { /** - * Properties of a CreateShardResponse. + * Properties of a GetShardResponse. * @memberof vtctldata - * @interface ICreateShardResponse - * @property {vtctldata.IKeyspace|null} [keyspace] CreateShardResponse keyspace - * @property {vtctldata.IShard|null} [shard] CreateShardResponse shard - * @property {boolean|null} [shard_already_exists] CreateShardResponse shard_already_exists + * @interface IGetShardResponse + * @property {vtctldata.IShard|null} [shard] GetShardResponse shard */ /** - * Constructs a new CreateShardResponse. + * Constructs a new GetShardResponse. * @memberof vtctldata - * @classdesc Represents a CreateShardResponse. - * @implements ICreateShardResponse + * @classdesc Represents a GetShardResponse. + * @implements IGetShardResponse * @constructor - * @param {vtctldata.ICreateShardResponse=} [properties] Properties to set + * @param {vtctldata.IGetShardResponse=} [properties] Properties to set */ - function CreateShardResponse(properties) { + function GetShardResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -106466,105 +126706,77 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * CreateShardResponse keyspace. - * @member {vtctldata.IKeyspace|null|undefined} keyspace - * @memberof vtctldata.CreateShardResponse - * @instance - */ - CreateShardResponse.prototype.keyspace = null; - - /** - * CreateShardResponse shard. + * GetShardResponse shard. * @member {vtctldata.IShard|null|undefined} shard - * @memberof vtctldata.CreateShardResponse - * @instance - */ - CreateShardResponse.prototype.shard = null; - - /** - * CreateShardResponse shard_already_exists. - * @member {boolean} shard_already_exists - * @memberof vtctldata.CreateShardResponse + * @memberof vtctldata.GetShardResponse * @instance */ - CreateShardResponse.prototype.shard_already_exists = false; + GetShardResponse.prototype.shard = null; /** - * Creates a new CreateShardResponse instance using the specified properties. + * Creates a new GetShardResponse instance using the specified properties. * @function create - * @memberof vtctldata.CreateShardResponse + * @memberof vtctldata.GetShardResponse * @static - * @param {vtctldata.ICreateShardResponse=} [properties] Properties to set - * @returns {vtctldata.CreateShardResponse} CreateShardResponse instance + * @param {vtctldata.IGetShardResponse=} [properties] Properties to set + * @returns {vtctldata.GetShardResponse} GetShardResponse instance */ - CreateShardResponse.create = function create(properties) { - return new CreateShardResponse(properties); + GetShardResponse.create = function create(properties) { + return new GetShardResponse(properties); }; /** - * Encodes the specified CreateShardResponse message. Does not implicitly {@link vtctldata.CreateShardResponse.verify|verify} messages. + * Encodes the specified GetShardResponse message. Does not implicitly {@link vtctldata.GetShardResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.CreateShardResponse + * @memberof vtctldata.GetShardResponse * @static - * @param {vtctldata.ICreateShardResponse} message CreateShardResponse message or plain object to encode + * @param {vtctldata.IGetShardResponse} message GetShardResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - CreateShardResponse.encode = function encode(message, writer) { + GetShardResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - $root.vtctldata.Keyspace.encode(message.keyspace, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - $root.vtctldata.Shard.encode(message.shard, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.shard_already_exists != null && Object.hasOwnProperty.call(message, "shard_already_exists")) - writer.uint32(/* id 3, wireType 0 =*/24).bool(message.shard_already_exists); + $root.vtctldata.Shard.encode(message.shard, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified CreateShardResponse message, length delimited. Does not implicitly {@link vtctldata.CreateShardResponse.verify|verify} messages. + * Encodes the specified GetShardResponse message, length delimited. Does not implicitly {@link vtctldata.GetShardResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.CreateShardResponse + * @memberof vtctldata.GetShardResponse * @static - * @param {vtctldata.ICreateShardResponse} message CreateShardResponse message or plain object to encode + * @param {vtctldata.IGetShardResponse} message GetShardResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - CreateShardResponse.encodeDelimited = function encodeDelimited(message, writer) { + GetShardResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a CreateShardResponse message from the specified reader or buffer. + * Decodes a GetShardResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.CreateShardResponse + * @memberof vtctldata.GetShardResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.CreateShardResponse} CreateShardResponse + * @returns {vtctldata.GetShardResponse} GetShardResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CreateShardResponse.decode = function decode(reader, length) { + GetShardResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.CreateShardResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetShardResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.keyspace = $root.vtctldata.Keyspace.decode(reader, reader.uint32()); - break; - } - case 2: { message.shard = $root.vtctldata.Shard.decode(reader, reader.uint32()); break; } - case 3: { - message.shard_already_exists = reader.bool(); - break; - } default: reader.skipType(tag & 7); break; @@ -106574,150 +126786,126 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a CreateShardResponse message from the specified reader or buffer, length delimited. + * Decodes a GetShardResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.CreateShardResponse + * @memberof vtctldata.GetShardResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.CreateShardResponse} CreateShardResponse + * @returns {vtctldata.GetShardResponse} GetShardResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - CreateShardResponse.decodeDelimited = function decodeDelimited(reader) { + GetShardResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a CreateShardResponse message. + * Verifies a GetShardResponse message. * @function verify - * @memberof vtctldata.CreateShardResponse + * @memberof vtctldata.GetShardResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - CreateShardResponse.verify = function verify(message) { + GetShardResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) { - let error = $root.vtctldata.Keyspace.verify(message.keyspace); - if (error) - return "keyspace." + error; - } if (message.shard != null && message.hasOwnProperty("shard")) { let error = $root.vtctldata.Shard.verify(message.shard); if (error) return "shard." + error; } - if (message.shard_already_exists != null && message.hasOwnProperty("shard_already_exists")) - if (typeof message.shard_already_exists !== "boolean") - return "shard_already_exists: boolean expected"; return null; }; /** - * Creates a CreateShardResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetShardResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.CreateShardResponse + * @memberof vtctldata.GetShardResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.CreateShardResponse} CreateShardResponse + * @returns {vtctldata.GetShardResponse} GetShardResponse */ - CreateShardResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.CreateShardResponse) + GetShardResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetShardResponse) return object; - let message = new $root.vtctldata.CreateShardResponse(); - if (object.keyspace != null) { - if (typeof object.keyspace !== "object") - throw TypeError(".vtctldata.CreateShardResponse.keyspace: object expected"); - message.keyspace = $root.vtctldata.Keyspace.fromObject(object.keyspace); - } + let message = new $root.vtctldata.GetShardResponse(); if (object.shard != null) { if (typeof object.shard !== "object") - throw TypeError(".vtctldata.CreateShardResponse.shard: object expected"); + throw TypeError(".vtctldata.GetShardResponse.shard: object expected"); message.shard = $root.vtctldata.Shard.fromObject(object.shard); } - if (object.shard_already_exists != null) - message.shard_already_exists = Boolean(object.shard_already_exists); return message; }; /** - * Creates a plain object from a CreateShardResponse message. Also converts values to other types if specified. + * Creates a plain object from a GetShardResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.CreateShardResponse + * @memberof vtctldata.GetShardResponse * @static - * @param {vtctldata.CreateShardResponse} message CreateShardResponse + * @param {vtctldata.GetShardResponse} message GetShardResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - CreateShardResponse.toObject = function toObject(message, options) { + GetShardResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) { - object.keyspace = null; + if (options.defaults) object.shard = null; - object.shard_already_exists = false; - } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = $root.vtctldata.Keyspace.toObject(message.keyspace, options); if (message.shard != null && message.hasOwnProperty("shard")) object.shard = $root.vtctldata.Shard.toObject(message.shard, options); - if (message.shard_already_exists != null && message.hasOwnProperty("shard_already_exists")) - object.shard_already_exists = message.shard_already_exists; return object; }; /** - * Converts this CreateShardResponse to JSON. + * Converts this GetShardResponse to JSON. * @function toJSON - * @memberof vtctldata.CreateShardResponse + * @memberof vtctldata.GetShardResponse * @instance * @returns {Object.} JSON object */ - CreateShardResponse.prototype.toJSON = function toJSON() { + GetShardResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for CreateShardResponse + * Gets the default type url for GetShardResponse * @function getTypeUrl - * @memberof vtctldata.CreateShardResponse + * @memberof vtctldata.GetShardResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - CreateShardResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetShardResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.CreateShardResponse"; + return typeUrlPrefix + "/vtctldata.GetShardResponse"; }; - return CreateShardResponse; + return GetShardResponse; })(); - vtctldata.DeleteCellInfoRequest = (function() { + vtctldata.GetShardRoutingRulesRequest = (function() { /** - * Properties of a DeleteCellInfoRequest. + * Properties of a GetShardRoutingRulesRequest. * @memberof vtctldata - * @interface IDeleteCellInfoRequest - * @property {string|null} [name] DeleteCellInfoRequest name - * @property {boolean|null} [force] DeleteCellInfoRequest force + * @interface IGetShardRoutingRulesRequest */ /** - * Constructs a new DeleteCellInfoRequest. + * Constructs a new GetShardRoutingRulesRequest. * @memberof vtctldata - * @classdesc Represents a DeleteCellInfoRequest. - * @implements IDeleteCellInfoRequest + * @classdesc Represents a GetShardRoutingRulesRequest. + * @implements IGetShardRoutingRulesRequest * @constructor - * @param {vtctldata.IDeleteCellInfoRequest=} [properties] Properties to set + * @param {vtctldata.IGetShardRoutingRulesRequest=} [properties] Properties to set */ - function DeleteCellInfoRequest(properties) { + function GetShardRoutingRulesRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -106725,91 +126913,63 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * DeleteCellInfoRequest name. - * @member {string} name - * @memberof vtctldata.DeleteCellInfoRequest - * @instance - */ - DeleteCellInfoRequest.prototype.name = ""; - - /** - * DeleteCellInfoRequest force. - * @member {boolean} force - * @memberof vtctldata.DeleteCellInfoRequest - * @instance - */ - DeleteCellInfoRequest.prototype.force = false; - - /** - * Creates a new DeleteCellInfoRequest instance using the specified properties. + * Creates a new GetShardRoutingRulesRequest instance using the specified properties. * @function create - * @memberof vtctldata.DeleteCellInfoRequest + * @memberof vtctldata.GetShardRoutingRulesRequest * @static - * @param {vtctldata.IDeleteCellInfoRequest=} [properties] Properties to set - * @returns {vtctldata.DeleteCellInfoRequest} DeleteCellInfoRequest instance + * @param {vtctldata.IGetShardRoutingRulesRequest=} [properties] Properties to set + * @returns {vtctldata.GetShardRoutingRulesRequest} GetShardRoutingRulesRequest instance */ - DeleteCellInfoRequest.create = function create(properties) { - return new DeleteCellInfoRequest(properties); + GetShardRoutingRulesRequest.create = function create(properties) { + return new GetShardRoutingRulesRequest(properties); }; /** - * Encodes the specified DeleteCellInfoRequest message. Does not implicitly {@link vtctldata.DeleteCellInfoRequest.verify|verify} messages. + * Encodes the specified GetShardRoutingRulesRequest message. Does not implicitly {@link vtctldata.GetShardRoutingRulesRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.DeleteCellInfoRequest + * @memberof vtctldata.GetShardRoutingRulesRequest * @static - * @param {vtctldata.IDeleteCellInfoRequest} message DeleteCellInfoRequest message or plain object to encode + * @param {vtctldata.IGetShardRoutingRulesRequest} message GetShardRoutingRulesRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - DeleteCellInfoRequest.encode = function encode(message, writer) { + GetShardRoutingRulesRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.name != null && Object.hasOwnProperty.call(message, "name")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); - if (message.force != null && Object.hasOwnProperty.call(message, "force")) - writer.uint32(/* id 2, wireType 0 =*/16).bool(message.force); return writer; }; /** - * Encodes the specified DeleteCellInfoRequest message, length delimited. Does not implicitly {@link vtctldata.DeleteCellInfoRequest.verify|verify} messages. + * Encodes the specified GetShardRoutingRulesRequest message, length delimited. Does not implicitly {@link vtctldata.GetShardRoutingRulesRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.DeleteCellInfoRequest + * @memberof vtctldata.GetShardRoutingRulesRequest * @static - * @param {vtctldata.IDeleteCellInfoRequest} message DeleteCellInfoRequest message or plain object to encode + * @param {vtctldata.IGetShardRoutingRulesRequest} message GetShardRoutingRulesRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - DeleteCellInfoRequest.encodeDelimited = function encodeDelimited(message, writer) { + GetShardRoutingRulesRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a DeleteCellInfoRequest message from the specified reader or buffer. + * Decodes a GetShardRoutingRulesRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.DeleteCellInfoRequest + * @memberof vtctldata.GetShardRoutingRulesRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.DeleteCellInfoRequest} DeleteCellInfoRequest + * @returns {vtctldata.GetShardRoutingRulesRequest} GetShardRoutingRulesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteCellInfoRequest.decode = function decode(reader, length) { + GetShardRoutingRulesRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.DeleteCellInfoRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetShardRoutingRulesRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - message.name = reader.string(); - break; - } - case 2: { - message.force = reader.bool(); - break; - } default: reader.skipType(tag & 7); break; @@ -106819,130 +126979,109 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a DeleteCellInfoRequest message from the specified reader or buffer, length delimited. + * Decodes a GetShardRoutingRulesRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.DeleteCellInfoRequest + * @memberof vtctldata.GetShardRoutingRulesRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.DeleteCellInfoRequest} DeleteCellInfoRequest + * @returns {vtctldata.GetShardRoutingRulesRequest} GetShardRoutingRulesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteCellInfoRequest.decodeDelimited = function decodeDelimited(reader) { + GetShardRoutingRulesRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a DeleteCellInfoRequest message. + * Verifies a GetShardRoutingRulesRequest message. * @function verify - * @memberof vtctldata.DeleteCellInfoRequest + * @memberof vtctldata.GetShardRoutingRulesRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - DeleteCellInfoRequest.verify = function verify(message) { + GetShardRoutingRulesRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.name != null && message.hasOwnProperty("name")) - if (!$util.isString(message.name)) - return "name: string expected"; - if (message.force != null && message.hasOwnProperty("force")) - if (typeof message.force !== "boolean") - return "force: boolean expected"; return null; }; /** - * Creates a DeleteCellInfoRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetShardRoutingRulesRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.DeleteCellInfoRequest + * @memberof vtctldata.GetShardRoutingRulesRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.DeleteCellInfoRequest} DeleteCellInfoRequest + * @returns {vtctldata.GetShardRoutingRulesRequest} GetShardRoutingRulesRequest */ - DeleteCellInfoRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.DeleteCellInfoRequest) + GetShardRoutingRulesRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetShardRoutingRulesRequest) return object; - let message = new $root.vtctldata.DeleteCellInfoRequest(); - if (object.name != null) - message.name = String(object.name); - if (object.force != null) - message.force = Boolean(object.force); - return message; + return new $root.vtctldata.GetShardRoutingRulesRequest(); }; /** - * Creates a plain object from a DeleteCellInfoRequest message. Also converts values to other types if specified. + * Creates a plain object from a GetShardRoutingRulesRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.DeleteCellInfoRequest + * @memberof vtctldata.GetShardRoutingRulesRequest * @static - * @param {vtctldata.DeleteCellInfoRequest} message DeleteCellInfoRequest + * @param {vtctldata.GetShardRoutingRulesRequest} message GetShardRoutingRulesRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - DeleteCellInfoRequest.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) { - object.name = ""; - object.force = false; - } - if (message.name != null && message.hasOwnProperty("name")) - object.name = message.name; - if (message.force != null && message.hasOwnProperty("force")) - object.force = message.force; - return object; + GetShardRoutingRulesRequest.toObject = function toObject() { + return {}; }; /** - * Converts this DeleteCellInfoRequest to JSON. + * Converts this GetShardRoutingRulesRequest to JSON. * @function toJSON - * @memberof vtctldata.DeleteCellInfoRequest + * @memberof vtctldata.GetShardRoutingRulesRequest * @instance * @returns {Object.} JSON object */ - DeleteCellInfoRequest.prototype.toJSON = function toJSON() { + GetShardRoutingRulesRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for DeleteCellInfoRequest + * Gets the default type url for GetShardRoutingRulesRequest * @function getTypeUrl - * @memberof vtctldata.DeleteCellInfoRequest + * @memberof vtctldata.GetShardRoutingRulesRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - DeleteCellInfoRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetShardRoutingRulesRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.DeleteCellInfoRequest"; + return typeUrlPrefix + "/vtctldata.GetShardRoutingRulesRequest"; }; - return DeleteCellInfoRequest; + return GetShardRoutingRulesRequest; })(); - vtctldata.DeleteCellInfoResponse = (function() { + vtctldata.GetShardRoutingRulesResponse = (function() { /** - * Properties of a DeleteCellInfoResponse. + * Properties of a GetShardRoutingRulesResponse. * @memberof vtctldata - * @interface IDeleteCellInfoResponse + * @interface IGetShardRoutingRulesResponse + * @property {vschema.IShardRoutingRules|null} [shard_routing_rules] GetShardRoutingRulesResponse shard_routing_rules */ /** - * Constructs a new DeleteCellInfoResponse. + * Constructs a new GetShardRoutingRulesResponse. * @memberof vtctldata - * @classdesc Represents a DeleteCellInfoResponse. - * @implements IDeleteCellInfoResponse + * @classdesc Represents a GetShardRoutingRulesResponse. + * @implements IGetShardRoutingRulesResponse * @constructor - * @param {vtctldata.IDeleteCellInfoResponse=} [properties] Properties to set + * @param {vtctldata.IGetShardRoutingRulesResponse=} [properties] Properties to set */ - function DeleteCellInfoResponse(properties) { + function GetShardRoutingRulesResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -106950,63 +127089,77 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * Creates a new DeleteCellInfoResponse instance using the specified properties. + * GetShardRoutingRulesResponse shard_routing_rules. + * @member {vschema.IShardRoutingRules|null|undefined} shard_routing_rules + * @memberof vtctldata.GetShardRoutingRulesResponse + * @instance + */ + GetShardRoutingRulesResponse.prototype.shard_routing_rules = null; + + /** + * Creates a new GetShardRoutingRulesResponse instance using the specified properties. * @function create - * @memberof vtctldata.DeleteCellInfoResponse + * @memberof vtctldata.GetShardRoutingRulesResponse * @static - * @param {vtctldata.IDeleteCellInfoResponse=} [properties] Properties to set - * @returns {vtctldata.DeleteCellInfoResponse} DeleteCellInfoResponse instance + * @param {vtctldata.IGetShardRoutingRulesResponse=} [properties] Properties to set + * @returns {vtctldata.GetShardRoutingRulesResponse} GetShardRoutingRulesResponse instance */ - DeleteCellInfoResponse.create = function create(properties) { - return new DeleteCellInfoResponse(properties); + GetShardRoutingRulesResponse.create = function create(properties) { + return new GetShardRoutingRulesResponse(properties); }; /** - * Encodes the specified DeleteCellInfoResponse message. Does not implicitly {@link vtctldata.DeleteCellInfoResponse.verify|verify} messages. + * Encodes the specified GetShardRoutingRulesResponse message. Does not implicitly {@link vtctldata.GetShardRoutingRulesResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.DeleteCellInfoResponse + * @memberof vtctldata.GetShardRoutingRulesResponse * @static - * @param {vtctldata.IDeleteCellInfoResponse} message DeleteCellInfoResponse message or plain object to encode + * @param {vtctldata.IGetShardRoutingRulesResponse} message GetShardRoutingRulesResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - DeleteCellInfoResponse.encode = function encode(message, writer) { + GetShardRoutingRulesResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.shard_routing_rules != null && Object.hasOwnProperty.call(message, "shard_routing_rules")) + $root.vschema.ShardRoutingRules.encode(message.shard_routing_rules, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified DeleteCellInfoResponse message, length delimited. Does not implicitly {@link vtctldata.DeleteCellInfoResponse.verify|verify} messages. + * Encodes the specified GetShardRoutingRulesResponse message, length delimited. Does not implicitly {@link vtctldata.GetShardRoutingRulesResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.DeleteCellInfoResponse + * @memberof vtctldata.GetShardRoutingRulesResponse * @static - * @param {vtctldata.IDeleteCellInfoResponse} message DeleteCellInfoResponse message or plain object to encode + * @param {vtctldata.IGetShardRoutingRulesResponse} message GetShardRoutingRulesResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - DeleteCellInfoResponse.encodeDelimited = function encodeDelimited(message, writer) { + GetShardRoutingRulesResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a DeleteCellInfoResponse message from the specified reader or buffer. + * Decodes a GetShardRoutingRulesResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.DeleteCellInfoResponse + * @memberof vtctldata.GetShardRoutingRulesResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.DeleteCellInfoResponse} DeleteCellInfoResponse + * @returns {vtctldata.GetShardRoutingRulesResponse} GetShardRoutingRulesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteCellInfoResponse.decode = function decode(reader, length) { + GetShardRoutingRulesResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.DeleteCellInfoResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetShardRoutingRulesResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + message.shard_routing_rules = $root.vschema.ShardRoutingRules.decode(reader, reader.uint32()); + break; + } default: reader.skipType(tag & 7); break; @@ -107016,109 +127169,128 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a DeleteCellInfoResponse message from the specified reader or buffer, length delimited. + * Decodes a GetShardRoutingRulesResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.DeleteCellInfoResponse + * @memberof vtctldata.GetShardRoutingRulesResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.DeleteCellInfoResponse} DeleteCellInfoResponse + * @returns {vtctldata.GetShardRoutingRulesResponse} GetShardRoutingRulesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteCellInfoResponse.decodeDelimited = function decodeDelimited(reader) { + GetShardRoutingRulesResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a DeleteCellInfoResponse message. + * Verifies a GetShardRoutingRulesResponse message. * @function verify - * @memberof vtctldata.DeleteCellInfoResponse + * @memberof vtctldata.GetShardRoutingRulesResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - DeleteCellInfoResponse.verify = function verify(message) { + GetShardRoutingRulesResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.shard_routing_rules != null && message.hasOwnProperty("shard_routing_rules")) { + let error = $root.vschema.ShardRoutingRules.verify(message.shard_routing_rules); + if (error) + return "shard_routing_rules." + error; + } return null; }; /** - * Creates a DeleteCellInfoResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetShardRoutingRulesResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.DeleteCellInfoResponse + * @memberof vtctldata.GetShardRoutingRulesResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.DeleteCellInfoResponse} DeleteCellInfoResponse + * @returns {vtctldata.GetShardRoutingRulesResponse} GetShardRoutingRulesResponse */ - DeleteCellInfoResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.DeleteCellInfoResponse) + GetShardRoutingRulesResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetShardRoutingRulesResponse) return object; - return new $root.vtctldata.DeleteCellInfoResponse(); + let message = new $root.vtctldata.GetShardRoutingRulesResponse(); + if (object.shard_routing_rules != null) { + if (typeof object.shard_routing_rules !== "object") + throw TypeError(".vtctldata.GetShardRoutingRulesResponse.shard_routing_rules: object expected"); + message.shard_routing_rules = $root.vschema.ShardRoutingRules.fromObject(object.shard_routing_rules); + } + return message; }; /** - * Creates a plain object from a DeleteCellInfoResponse message. Also converts values to other types if specified. + * Creates a plain object from a GetShardRoutingRulesResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.DeleteCellInfoResponse + * @memberof vtctldata.GetShardRoutingRulesResponse * @static - * @param {vtctldata.DeleteCellInfoResponse} message DeleteCellInfoResponse + * @param {vtctldata.GetShardRoutingRulesResponse} message GetShardRoutingRulesResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - DeleteCellInfoResponse.toObject = function toObject() { - return {}; + GetShardRoutingRulesResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) + object.shard_routing_rules = null; + if (message.shard_routing_rules != null && message.hasOwnProperty("shard_routing_rules")) + object.shard_routing_rules = $root.vschema.ShardRoutingRules.toObject(message.shard_routing_rules, options); + return object; }; /** - * Converts this DeleteCellInfoResponse to JSON. + * Converts this GetShardRoutingRulesResponse to JSON. * @function toJSON - * @memberof vtctldata.DeleteCellInfoResponse + * @memberof vtctldata.GetShardRoutingRulesResponse * @instance * @returns {Object.} JSON object */ - DeleteCellInfoResponse.prototype.toJSON = function toJSON() { + GetShardRoutingRulesResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for DeleteCellInfoResponse + * Gets the default type url for GetShardRoutingRulesResponse * @function getTypeUrl - * @memberof vtctldata.DeleteCellInfoResponse + * @memberof vtctldata.GetShardRoutingRulesResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - DeleteCellInfoResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetShardRoutingRulesResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.DeleteCellInfoResponse"; + return typeUrlPrefix + "/vtctldata.GetShardRoutingRulesResponse"; }; - return DeleteCellInfoResponse; + return GetShardRoutingRulesResponse; })(); - vtctldata.DeleteCellsAliasRequest = (function() { + vtctldata.GetSrvKeyspaceNamesRequest = (function() { /** - * Properties of a DeleteCellsAliasRequest. + * Properties of a GetSrvKeyspaceNamesRequest. * @memberof vtctldata - * @interface IDeleteCellsAliasRequest - * @property {string|null} [name] DeleteCellsAliasRequest name + * @interface IGetSrvKeyspaceNamesRequest + * @property {Array.|null} [cells] GetSrvKeyspaceNamesRequest cells */ /** - * Constructs a new DeleteCellsAliasRequest. + * Constructs a new GetSrvKeyspaceNamesRequest. * @memberof vtctldata - * @classdesc Represents a DeleteCellsAliasRequest. - * @implements IDeleteCellsAliasRequest + * @classdesc Represents a GetSrvKeyspaceNamesRequest. + * @implements IGetSrvKeyspaceNamesRequest * @constructor - * @param {vtctldata.IDeleteCellsAliasRequest=} [properties] Properties to set + * @param {vtctldata.IGetSrvKeyspaceNamesRequest=} [properties] Properties to set */ - function DeleteCellsAliasRequest(properties) { + function GetSrvKeyspaceNamesRequest(properties) { + this.cells = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -107126,75 +127298,78 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * DeleteCellsAliasRequest name. - * @member {string} name - * @memberof vtctldata.DeleteCellsAliasRequest + * GetSrvKeyspaceNamesRequest cells. + * @member {Array.} cells + * @memberof vtctldata.GetSrvKeyspaceNamesRequest * @instance */ - DeleteCellsAliasRequest.prototype.name = ""; + GetSrvKeyspaceNamesRequest.prototype.cells = $util.emptyArray; /** - * Creates a new DeleteCellsAliasRequest instance using the specified properties. + * Creates a new GetSrvKeyspaceNamesRequest instance using the specified properties. * @function create - * @memberof vtctldata.DeleteCellsAliasRequest + * @memberof vtctldata.GetSrvKeyspaceNamesRequest * @static - * @param {vtctldata.IDeleteCellsAliasRequest=} [properties] Properties to set - * @returns {vtctldata.DeleteCellsAliasRequest} DeleteCellsAliasRequest instance + * @param {vtctldata.IGetSrvKeyspaceNamesRequest=} [properties] Properties to set + * @returns {vtctldata.GetSrvKeyspaceNamesRequest} GetSrvKeyspaceNamesRequest instance */ - DeleteCellsAliasRequest.create = function create(properties) { - return new DeleteCellsAliasRequest(properties); + GetSrvKeyspaceNamesRequest.create = function create(properties) { + return new GetSrvKeyspaceNamesRequest(properties); }; /** - * Encodes the specified DeleteCellsAliasRequest message. Does not implicitly {@link vtctldata.DeleteCellsAliasRequest.verify|verify} messages. + * Encodes the specified GetSrvKeyspaceNamesRequest message. Does not implicitly {@link vtctldata.GetSrvKeyspaceNamesRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.DeleteCellsAliasRequest + * @memberof vtctldata.GetSrvKeyspaceNamesRequest * @static - * @param {vtctldata.IDeleteCellsAliasRequest} message DeleteCellsAliasRequest message or plain object to encode + * @param {vtctldata.IGetSrvKeyspaceNamesRequest} message GetSrvKeyspaceNamesRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - DeleteCellsAliasRequest.encode = function encode(message, writer) { + GetSrvKeyspaceNamesRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.name != null && Object.hasOwnProperty.call(message, "name")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); + if (message.cells != null && message.cells.length) + for (let i = 0; i < message.cells.length; ++i) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.cells[i]); return writer; }; /** - * Encodes the specified DeleteCellsAliasRequest message, length delimited. Does not implicitly {@link vtctldata.DeleteCellsAliasRequest.verify|verify} messages. + * Encodes the specified GetSrvKeyspaceNamesRequest message, length delimited. Does not implicitly {@link vtctldata.GetSrvKeyspaceNamesRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.DeleteCellsAliasRequest + * @memberof vtctldata.GetSrvKeyspaceNamesRequest * @static - * @param {vtctldata.IDeleteCellsAliasRequest} message DeleteCellsAliasRequest message or plain object to encode + * @param {vtctldata.IGetSrvKeyspaceNamesRequest} message GetSrvKeyspaceNamesRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - DeleteCellsAliasRequest.encodeDelimited = function encodeDelimited(message, writer) { + GetSrvKeyspaceNamesRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a DeleteCellsAliasRequest message from the specified reader or buffer. + * Decodes a GetSrvKeyspaceNamesRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.DeleteCellsAliasRequest + * @memberof vtctldata.GetSrvKeyspaceNamesRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.DeleteCellsAliasRequest} DeleteCellsAliasRequest + * @returns {vtctldata.GetSrvKeyspaceNamesRequest} GetSrvKeyspaceNamesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteCellsAliasRequest.decode = function decode(reader, length) { + GetSrvKeyspaceNamesRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.DeleteCellsAliasRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetSrvKeyspaceNamesRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.name = reader.string(); + if (!(message.cells && message.cells.length)) + message.cells = []; + message.cells.push(reader.string()); break; } default: @@ -107206,121 +127381,135 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a DeleteCellsAliasRequest message from the specified reader or buffer, length delimited. + * Decodes a GetSrvKeyspaceNamesRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.DeleteCellsAliasRequest + * @memberof vtctldata.GetSrvKeyspaceNamesRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.DeleteCellsAliasRequest} DeleteCellsAliasRequest + * @returns {vtctldata.GetSrvKeyspaceNamesRequest} GetSrvKeyspaceNamesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteCellsAliasRequest.decodeDelimited = function decodeDelimited(reader) { + GetSrvKeyspaceNamesRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a DeleteCellsAliasRequest message. + * Verifies a GetSrvKeyspaceNamesRequest message. * @function verify - * @memberof vtctldata.DeleteCellsAliasRequest + * @memberof vtctldata.GetSrvKeyspaceNamesRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - DeleteCellsAliasRequest.verify = function verify(message) { + GetSrvKeyspaceNamesRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.name != null && message.hasOwnProperty("name")) - if (!$util.isString(message.name)) - return "name: string expected"; + if (message.cells != null && message.hasOwnProperty("cells")) { + if (!Array.isArray(message.cells)) + return "cells: array expected"; + for (let i = 0; i < message.cells.length; ++i) + if (!$util.isString(message.cells[i])) + return "cells: string[] expected"; + } return null; }; /** - * Creates a DeleteCellsAliasRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetSrvKeyspaceNamesRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.DeleteCellsAliasRequest + * @memberof vtctldata.GetSrvKeyspaceNamesRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.DeleteCellsAliasRequest} DeleteCellsAliasRequest + * @returns {vtctldata.GetSrvKeyspaceNamesRequest} GetSrvKeyspaceNamesRequest */ - DeleteCellsAliasRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.DeleteCellsAliasRequest) + GetSrvKeyspaceNamesRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetSrvKeyspaceNamesRequest) return object; - let message = new $root.vtctldata.DeleteCellsAliasRequest(); - if (object.name != null) - message.name = String(object.name); + let message = new $root.vtctldata.GetSrvKeyspaceNamesRequest(); + if (object.cells) { + if (!Array.isArray(object.cells)) + throw TypeError(".vtctldata.GetSrvKeyspaceNamesRequest.cells: array expected"); + message.cells = []; + for (let i = 0; i < object.cells.length; ++i) + message.cells[i] = String(object.cells[i]); + } return message; }; /** - * Creates a plain object from a DeleteCellsAliasRequest message. Also converts values to other types if specified. + * Creates a plain object from a GetSrvKeyspaceNamesRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.DeleteCellsAliasRequest + * @memberof vtctldata.GetSrvKeyspaceNamesRequest * @static - * @param {vtctldata.DeleteCellsAliasRequest} message DeleteCellsAliasRequest + * @param {vtctldata.GetSrvKeyspaceNamesRequest} message GetSrvKeyspaceNamesRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - DeleteCellsAliasRequest.toObject = function toObject(message, options) { + GetSrvKeyspaceNamesRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) - object.name = ""; - if (message.name != null && message.hasOwnProperty("name")) - object.name = message.name; + if (options.arrays || options.defaults) + object.cells = []; + if (message.cells && message.cells.length) { + object.cells = []; + for (let j = 0; j < message.cells.length; ++j) + object.cells[j] = message.cells[j]; + } return object; }; /** - * Converts this DeleteCellsAliasRequest to JSON. + * Converts this GetSrvKeyspaceNamesRequest to JSON. * @function toJSON - * @memberof vtctldata.DeleteCellsAliasRequest + * @memberof vtctldata.GetSrvKeyspaceNamesRequest * @instance * @returns {Object.} JSON object */ - DeleteCellsAliasRequest.prototype.toJSON = function toJSON() { + GetSrvKeyspaceNamesRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for DeleteCellsAliasRequest + * Gets the default type url for GetSrvKeyspaceNamesRequest * @function getTypeUrl - * @memberof vtctldata.DeleteCellsAliasRequest + * @memberof vtctldata.GetSrvKeyspaceNamesRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - DeleteCellsAliasRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetSrvKeyspaceNamesRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.DeleteCellsAliasRequest"; + return typeUrlPrefix + "/vtctldata.GetSrvKeyspaceNamesRequest"; }; - return DeleteCellsAliasRequest; + return GetSrvKeyspaceNamesRequest; })(); - vtctldata.DeleteCellsAliasResponse = (function() { + vtctldata.GetSrvKeyspaceNamesResponse = (function() { /** - * Properties of a DeleteCellsAliasResponse. + * Properties of a GetSrvKeyspaceNamesResponse. * @memberof vtctldata - * @interface IDeleteCellsAliasResponse + * @interface IGetSrvKeyspaceNamesResponse + * @property {Object.|null} [names] GetSrvKeyspaceNamesResponse names */ /** - * Constructs a new DeleteCellsAliasResponse. + * Constructs a new GetSrvKeyspaceNamesResponse. * @memberof vtctldata - * @classdesc Represents a DeleteCellsAliasResponse. - * @implements IDeleteCellsAliasResponse + * @classdesc Represents a GetSrvKeyspaceNamesResponse. + * @implements IGetSrvKeyspaceNamesResponse * @constructor - * @param {vtctldata.IDeleteCellsAliasResponse=} [properties] Properties to set + * @param {vtctldata.IGetSrvKeyspaceNamesResponse=} [properties] Properties to set */ - function DeleteCellsAliasResponse(properties) { + function GetSrvKeyspaceNamesResponse(properties) { + this.names = {}; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -107328,63 +127517,99 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * Creates a new DeleteCellsAliasResponse instance using the specified properties. + * GetSrvKeyspaceNamesResponse names. + * @member {Object.} names + * @memberof vtctldata.GetSrvKeyspaceNamesResponse + * @instance + */ + GetSrvKeyspaceNamesResponse.prototype.names = $util.emptyObject; + + /** + * Creates a new GetSrvKeyspaceNamesResponse instance using the specified properties. * @function create - * @memberof vtctldata.DeleteCellsAliasResponse + * @memberof vtctldata.GetSrvKeyspaceNamesResponse * @static - * @param {vtctldata.IDeleteCellsAliasResponse=} [properties] Properties to set - * @returns {vtctldata.DeleteCellsAliasResponse} DeleteCellsAliasResponse instance + * @param {vtctldata.IGetSrvKeyspaceNamesResponse=} [properties] Properties to set + * @returns {vtctldata.GetSrvKeyspaceNamesResponse} GetSrvKeyspaceNamesResponse instance */ - DeleteCellsAliasResponse.create = function create(properties) { - return new DeleteCellsAliasResponse(properties); + GetSrvKeyspaceNamesResponse.create = function create(properties) { + return new GetSrvKeyspaceNamesResponse(properties); }; /** - * Encodes the specified DeleteCellsAliasResponse message. Does not implicitly {@link vtctldata.DeleteCellsAliasResponse.verify|verify} messages. + * Encodes the specified GetSrvKeyspaceNamesResponse message. Does not implicitly {@link vtctldata.GetSrvKeyspaceNamesResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.DeleteCellsAliasResponse + * @memberof vtctldata.GetSrvKeyspaceNamesResponse * @static - * @param {vtctldata.IDeleteCellsAliasResponse} message DeleteCellsAliasResponse message or plain object to encode + * @param {vtctldata.IGetSrvKeyspaceNamesResponse} message GetSrvKeyspaceNamesResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - DeleteCellsAliasResponse.encode = function encode(message, writer) { + GetSrvKeyspaceNamesResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.names != null && Object.hasOwnProperty.call(message, "names")) + for (let keys = Object.keys(message.names), i = 0; i < keys.length; ++i) { + writer.uint32(/* id 1, wireType 2 =*/10).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); + $root.vtctldata.GetSrvKeyspaceNamesResponse.NameList.encode(message.names[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); + } return writer; }; /** - * Encodes the specified DeleteCellsAliasResponse message, length delimited. Does not implicitly {@link vtctldata.DeleteCellsAliasResponse.verify|verify} messages. + * Encodes the specified GetSrvKeyspaceNamesResponse message, length delimited. Does not implicitly {@link vtctldata.GetSrvKeyspaceNamesResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.DeleteCellsAliasResponse + * @memberof vtctldata.GetSrvKeyspaceNamesResponse * @static - * @param {vtctldata.IDeleteCellsAliasResponse} message DeleteCellsAliasResponse message or plain object to encode + * @param {vtctldata.IGetSrvKeyspaceNamesResponse} message GetSrvKeyspaceNamesResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - DeleteCellsAliasResponse.encodeDelimited = function encodeDelimited(message, writer) { + GetSrvKeyspaceNamesResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a DeleteCellsAliasResponse message from the specified reader or buffer. + * Decodes a GetSrvKeyspaceNamesResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.DeleteCellsAliasResponse + * @memberof vtctldata.GetSrvKeyspaceNamesResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.DeleteCellsAliasResponse} DeleteCellsAliasResponse + * @returns {vtctldata.GetSrvKeyspaceNamesResponse} GetSrvKeyspaceNamesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteCellsAliasResponse.decode = function decode(reader, length) { + GetSrvKeyspaceNamesResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.DeleteCellsAliasResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetSrvKeyspaceNamesResponse(), key, value; while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + if (message.names === $util.emptyObject) + message.names = {}; + let end2 = reader.uint32() + reader.pos; + key = ""; + value = null; + while (reader.pos < end2) { + let tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = $root.vtctldata.GetSrvKeyspaceNamesResponse.NameList.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.names[key] = value; + break; + } default: reader.skipType(tag & 7); break; @@ -107394,111 +127619,362 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a DeleteCellsAliasResponse message from the specified reader or buffer, length delimited. + * Decodes a GetSrvKeyspaceNamesResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.DeleteCellsAliasResponse + * @memberof vtctldata.GetSrvKeyspaceNamesResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.DeleteCellsAliasResponse} DeleteCellsAliasResponse + * @returns {vtctldata.GetSrvKeyspaceNamesResponse} GetSrvKeyspaceNamesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteCellsAliasResponse.decodeDelimited = function decodeDelimited(reader) { + GetSrvKeyspaceNamesResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a DeleteCellsAliasResponse message. + * Verifies a GetSrvKeyspaceNamesResponse message. * @function verify - * @memberof vtctldata.DeleteCellsAliasResponse + * @memberof vtctldata.GetSrvKeyspaceNamesResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - DeleteCellsAliasResponse.verify = function verify(message) { + GetSrvKeyspaceNamesResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.names != null && message.hasOwnProperty("names")) { + if (!$util.isObject(message.names)) + return "names: object expected"; + let key = Object.keys(message.names); + for (let i = 0; i < key.length; ++i) { + let error = $root.vtctldata.GetSrvKeyspaceNamesResponse.NameList.verify(message.names[key[i]]); + if (error) + return "names." + error; + } + } return null; }; /** - * Creates a DeleteCellsAliasResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetSrvKeyspaceNamesResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.DeleteCellsAliasResponse + * @memberof vtctldata.GetSrvKeyspaceNamesResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.DeleteCellsAliasResponse} DeleteCellsAliasResponse + * @returns {vtctldata.GetSrvKeyspaceNamesResponse} GetSrvKeyspaceNamesResponse */ - DeleteCellsAliasResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.DeleteCellsAliasResponse) + GetSrvKeyspaceNamesResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetSrvKeyspaceNamesResponse) return object; - return new $root.vtctldata.DeleteCellsAliasResponse(); + let message = new $root.vtctldata.GetSrvKeyspaceNamesResponse(); + if (object.names) { + if (typeof object.names !== "object") + throw TypeError(".vtctldata.GetSrvKeyspaceNamesResponse.names: object expected"); + message.names = {}; + for (let keys = Object.keys(object.names), i = 0; i < keys.length; ++i) { + if (typeof object.names[keys[i]] !== "object") + throw TypeError(".vtctldata.GetSrvKeyspaceNamesResponse.names: object expected"); + message.names[keys[i]] = $root.vtctldata.GetSrvKeyspaceNamesResponse.NameList.fromObject(object.names[keys[i]]); + } + } + return message; }; /** - * Creates a plain object from a DeleteCellsAliasResponse message. Also converts values to other types if specified. + * Creates a plain object from a GetSrvKeyspaceNamesResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.DeleteCellsAliasResponse + * @memberof vtctldata.GetSrvKeyspaceNamesResponse * @static - * @param {vtctldata.DeleteCellsAliasResponse} message DeleteCellsAliasResponse + * @param {vtctldata.GetSrvKeyspaceNamesResponse} message GetSrvKeyspaceNamesResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - DeleteCellsAliasResponse.toObject = function toObject() { - return {}; + GetSrvKeyspaceNamesResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.objects || options.defaults) + object.names = {}; + let keys2; + if (message.names && (keys2 = Object.keys(message.names)).length) { + object.names = {}; + for (let j = 0; j < keys2.length; ++j) + object.names[keys2[j]] = $root.vtctldata.GetSrvKeyspaceNamesResponse.NameList.toObject(message.names[keys2[j]], options); + } + return object; }; /** - * Converts this DeleteCellsAliasResponse to JSON. + * Converts this GetSrvKeyspaceNamesResponse to JSON. * @function toJSON - * @memberof vtctldata.DeleteCellsAliasResponse + * @memberof vtctldata.GetSrvKeyspaceNamesResponse * @instance * @returns {Object.} JSON object */ - DeleteCellsAliasResponse.prototype.toJSON = function toJSON() { + GetSrvKeyspaceNamesResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for DeleteCellsAliasResponse + * Gets the default type url for GetSrvKeyspaceNamesResponse * @function getTypeUrl - * @memberof vtctldata.DeleteCellsAliasResponse + * @memberof vtctldata.GetSrvKeyspaceNamesResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - DeleteCellsAliasResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetSrvKeyspaceNamesResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.DeleteCellsAliasResponse"; + return typeUrlPrefix + "/vtctldata.GetSrvKeyspaceNamesResponse"; }; - return DeleteCellsAliasResponse; + GetSrvKeyspaceNamesResponse.NameList = (function() { + + /** + * Properties of a NameList. + * @memberof vtctldata.GetSrvKeyspaceNamesResponse + * @interface INameList + * @property {Array.|null} [names] NameList names + */ + + /** + * Constructs a new NameList. + * @memberof vtctldata.GetSrvKeyspaceNamesResponse + * @classdesc Represents a NameList. + * @implements INameList + * @constructor + * @param {vtctldata.GetSrvKeyspaceNamesResponse.INameList=} [properties] Properties to set + */ + function NameList(properties) { + this.names = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * NameList names. + * @member {Array.} names + * @memberof vtctldata.GetSrvKeyspaceNamesResponse.NameList + * @instance + */ + NameList.prototype.names = $util.emptyArray; + + /** + * Creates a new NameList instance using the specified properties. + * @function create + * @memberof vtctldata.GetSrvKeyspaceNamesResponse.NameList + * @static + * @param {vtctldata.GetSrvKeyspaceNamesResponse.INameList=} [properties] Properties to set + * @returns {vtctldata.GetSrvKeyspaceNamesResponse.NameList} NameList instance + */ + NameList.create = function create(properties) { + return new NameList(properties); + }; + + /** + * Encodes the specified NameList message. Does not implicitly {@link vtctldata.GetSrvKeyspaceNamesResponse.NameList.verify|verify} messages. + * @function encode + * @memberof vtctldata.GetSrvKeyspaceNamesResponse.NameList + * @static + * @param {vtctldata.GetSrvKeyspaceNamesResponse.INameList} message NameList message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + NameList.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.names != null && message.names.length) + for (let i = 0; i < message.names.length; ++i) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.names[i]); + return writer; + }; + + /** + * Encodes the specified NameList message, length delimited. Does not implicitly {@link vtctldata.GetSrvKeyspaceNamesResponse.NameList.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.GetSrvKeyspaceNamesResponse.NameList + * @static + * @param {vtctldata.GetSrvKeyspaceNamesResponse.INameList} message NameList message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + NameList.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a NameList message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.GetSrvKeyspaceNamesResponse.NameList + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.GetSrvKeyspaceNamesResponse.NameList} NameList + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + NameList.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetSrvKeyspaceNamesResponse.NameList(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (!(message.names && message.names.length)) + message.names = []; + message.names.push(reader.string()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a NameList message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.GetSrvKeyspaceNamesResponse.NameList + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.GetSrvKeyspaceNamesResponse.NameList} NameList + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + NameList.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a NameList message. + * @function verify + * @memberof vtctldata.GetSrvKeyspaceNamesResponse.NameList + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + NameList.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.names != null && message.hasOwnProperty("names")) { + if (!Array.isArray(message.names)) + return "names: array expected"; + for (let i = 0; i < message.names.length; ++i) + if (!$util.isString(message.names[i])) + return "names: string[] expected"; + } + return null; + }; + + /** + * Creates a NameList message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.GetSrvKeyspaceNamesResponse.NameList + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.GetSrvKeyspaceNamesResponse.NameList} NameList + */ + NameList.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetSrvKeyspaceNamesResponse.NameList) + return object; + let message = new $root.vtctldata.GetSrvKeyspaceNamesResponse.NameList(); + if (object.names) { + if (!Array.isArray(object.names)) + throw TypeError(".vtctldata.GetSrvKeyspaceNamesResponse.NameList.names: array expected"); + message.names = []; + for (let i = 0; i < object.names.length; ++i) + message.names[i] = String(object.names[i]); + } + return message; + }; + + /** + * Creates a plain object from a NameList message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.GetSrvKeyspaceNamesResponse.NameList + * @static + * @param {vtctldata.GetSrvKeyspaceNamesResponse.NameList} message NameList + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + NameList.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.names = []; + if (message.names && message.names.length) { + object.names = []; + for (let j = 0; j < message.names.length; ++j) + object.names[j] = message.names[j]; + } + return object; + }; + + /** + * Converts this NameList to JSON. + * @function toJSON + * @memberof vtctldata.GetSrvKeyspaceNamesResponse.NameList + * @instance + * @returns {Object.} JSON object + */ + NameList.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for NameList + * @function getTypeUrl + * @memberof vtctldata.GetSrvKeyspaceNamesResponse.NameList + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + NameList.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.GetSrvKeyspaceNamesResponse.NameList"; + }; + + return NameList; + })(); + + return GetSrvKeyspaceNamesResponse; })(); - vtctldata.DeleteKeyspaceRequest = (function() { + vtctldata.GetSrvKeyspacesRequest = (function() { /** - * Properties of a DeleteKeyspaceRequest. + * Properties of a GetSrvKeyspacesRequest. * @memberof vtctldata - * @interface IDeleteKeyspaceRequest - * @property {string|null} [keyspace] DeleteKeyspaceRequest keyspace - * @property {boolean|null} [recursive] DeleteKeyspaceRequest recursive - * @property {boolean|null} [force] DeleteKeyspaceRequest force + * @interface IGetSrvKeyspacesRequest + * @property {string|null} [keyspace] GetSrvKeyspacesRequest keyspace + * @property {Array.|null} [cells] GetSrvKeyspacesRequest cells */ /** - * Constructs a new DeleteKeyspaceRequest. + * Constructs a new GetSrvKeyspacesRequest. * @memberof vtctldata - * @classdesc Represents a DeleteKeyspaceRequest. - * @implements IDeleteKeyspaceRequest + * @classdesc Represents a GetSrvKeyspacesRequest. + * @implements IGetSrvKeyspacesRequest * @constructor - * @param {vtctldata.IDeleteKeyspaceRequest=} [properties] Properties to set + * @param {vtctldata.IGetSrvKeyspacesRequest=} [properties] Properties to set */ - function DeleteKeyspaceRequest(properties) { + function GetSrvKeyspacesRequest(properties) { + this.cells = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -107506,90 +127982,81 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * DeleteKeyspaceRequest keyspace. + * GetSrvKeyspacesRequest keyspace. * @member {string} keyspace - * @memberof vtctldata.DeleteKeyspaceRequest - * @instance - */ - DeleteKeyspaceRequest.prototype.keyspace = ""; - - /** - * DeleteKeyspaceRequest recursive. - * @member {boolean} recursive - * @memberof vtctldata.DeleteKeyspaceRequest + * @memberof vtctldata.GetSrvKeyspacesRequest * @instance */ - DeleteKeyspaceRequest.prototype.recursive = false; + GetSrvKeyspacesRequest.prototype.keyspace = ""; /** - * DeleteKeyspaceRequest force. - * @member {boolean} force - * @memberof vtctldata.DeleteKeyspaceRequest + * GetSrvKeyspacesRequest cells. + * @member {Array.} cells + * @memberof vtctldata.GetSrvKeyspacesRequest * @instance */ - DeleteKeyspaceRequest.prototype.force = false; + GetSrvKeyspacesRequest.prototype.cells = $util.emptyArray; /** - * Creates a new DeleteKeyspaceRequest instance using the specified properties. + * Creates a new GetSrvKeyspacesRequest instance using the specified properties. * @function create - * @memberof vtctldata.DeleteKeyspaceRequest + * @memberof vtctldata.GetSrvKeyspacesRequest * @static - * @param {vtctldata.IDeleteKeyspaceRequest=} [properties] Properties to set - * @returns {vtctldata.DeleteKeyspaceRequest} DeleteKeyspaceRequest instance + * @param {vtctldata.IGetSrvKeyspacesRequest=} [properties] Properties to set + * @returns {vtctldata.GetSrvKeyspacesRequest} GetSrvKeyspacesRequest instance */ - DeleteKeyspaceRequest.create = function create(properties) { - return new DeleteKeyspaceRequest(properties); + GetSrvKeyspacesRequest.create = function create(properties) { + return new GetSrvKeyspacesRequest(properties); }; /** - * Encodes the specified DeleteKeyspaceRequest message. Does not implicitly {@link vtctldata.DeleteKeyspaceRequest.verify|verify} messages. + * Encodes the specified GetSrvKeyspacesRequest message. Does not implicitly {@link vtctldata.GetSrvKeyspacesRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.DeleteKeyspaceRequest + * @memberof vtctldata.GetSrvKeyspacesRequest * @static - * @param {vtctldata.IDeleteKeyspaceRequest} message DeleteKeyspaceRequest message or plain object to encode + * @param {vtctldata.IGetSrvKeyspacesRequest} message GetSrvKeyspacesRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - DeleteKeyspaceRequest.encode = function encode(message, writer) { + GetSrvKeyspacesRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.recursive != null && Object.hasOwnProperty.call(message, "recursive")) - writer.uint32(/* id 2, wireType 0 =*/16).bool(message.recursive); - if (message.force != null && Object.hasOwnProperty.call(message, "force")) - writer.uint32(/* id 3, wireType 0 =*/24).bool(message.force); + if (message.cells != null && message.cells.length) + for (let i = 0; i < message.cells.length; ++i) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.cells[i]); return writer; }; /** - * Encodes the specified DeleteKeyspaceRequest message, length delimited. Does not implicitly {@link vtctldata.DeleteKeyspaceRequest.verify|verify} messages. + * Encodes the specified GetSrvKeyspacesRequest message, length delimited. Does not implicitly {@link vtctldata.GetSrvKeyspacesRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.DeleteKeyspaceRequest + * @memberof vtctldata.GetSrvKeyspacesRequest * @static - * @param {vtctldata.IDeleteKeyspaceRequest} message DeleteKeyspaceRequest message or plain object to encode + * @param {vtctldata.IGetSrvKeyspacesRequest} message GetSrvKeyspacesRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - DeleteKeyspaceRequest.encodeDelimited = function encodeDelimited(message, writer) { + GetSrvKeyspacesRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a DeleteKeyspaceRequest message from the specified reader or buffer. + * Decodes a GetSrvKeyspacesRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.DeleteKeyspaceRequest + * @memberof vtctldata.GetSrvKeyspacesRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.DeleteKeyspaceRequest} DeleteKeyspaceRequest + * @returns {vtctldata.GetSrvKeyspacesRequest} GetSrvKeyspacesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteKeyspaceRequest.decode = function decode(reader, length) { + GetSrvKeyspacesRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.DeleteKeyspaceRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetSrvKeyspacesRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -107598,11 +128065,9 @@ export const vtctldata = $root.vtctldata = (() => { break; } case 2: { - message.recursive = reader.bool(); - break; - } - case 3: { - message.force = reader.bool(); + if (!(message.cells && message.cells.length)) + message.cells = []; + message.cells.push(reader.string()); break; } default: @@ -107614,138 +128079,144 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a DeleteKeyspaceRequest message from the specified reader or buffer, length delimited. + * Decodes a GetSrvKeyspacesRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.DeleteKeyspaceRequest + * @memberof vtctldata.GetSrvKeyspacesRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.DeleteKeyspaceRequest} DeleteKeyspaceRequest + * @returns {vtctldata.GetSrvKeyspacesRequest} GetSrvKeyspacesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteKeyspaceRequest.decodeDelimited = function decodeDelimited(reader) { + GetSrvKeyspacesRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a DeleteKeyspaceRequest message. + * Verifies a GetSrvKeyspacesRequest message. * @function verify - * @memberof vtctldata.DeleteKeyspaceRequest + * @memberof vtctldata.GetSrvKeyspacesRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - DeleteKeyspaceRequest.verify = function verify(message) { + GetSrvKeyspacesRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.keyspace != null && message.hasOwnProperty("keyspace")) if (!$util.isString(message.keyspace)) return "keyspace: string expected"; - if (message.recursive != null && message.hasOwnProperty("recursive")) - if (typeof message.recursive !== "boolean") - return "recursive: boolean expected"; - if (message.force != null && message.hasOwnProperty("force")) - if (typeof message.force !== "boolean") - return "force: boolean expected"; + if (message.cells != null && message.hasOwnProperty("cells")) { + if (!Array.isArray(message.cells)) + return "cells: array expected"; + for (let i = 0; i < message.cells.length; ++i) + if (!$util.isString(message.cells[i])) + return "cells: string[] expected"; + } return null; }; /** - * Creates a DeleteKeyspaceRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetSrvKeyspacesRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.DeleteKeyspaceRequest + * @memberof vtctldata.GetSrvKeyspacesRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.DeleteKeyspaceRequest} DeleteKeyspaceRequest + * @returns {vtctldata.GetSrvKeyspacesRequest} GetSrvKeyspacesRequest */ - DeleteKeyspaceRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.DeleteKeyspaceRequest) + GetSrvKeyspacesRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetSrvKeyspacesRequest) return object; - let message = new $root.vtctldata.DeleteKeyspaceRequest(); + let message = new $root.vtctldata.GetSrvKeyspacesRequest(); if (object.keyspace != null) message.keyspace = String(object.keyspace); - if (object.recursive != null) - message.recursive = Boolean(object.recursive); - if (object.force != null) - message.force = Boolean(object.force); + if (object.cells) { + if (!Array.isArray(object.cells)) + throw TypeError(".vtctldata.GetSrvKeyspacesRequest.cells: array expected"); + message.cells = []; + for (let i = 0; i < object.cells.length; ++i) + message.cells[i] = String(object.cells[i]); + } return message; }; /** - * Creates a plain object from a DeleteKeyspaceRequest message. Also converts values to other types if specified. + * Creates a plain object from a GetSrvKeyspacesRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.DeleteKeyspaceRequest + * @memberof vtctldata.GetSrvKeyspacesRequest * @static - * @param {vtctldata.DeleteKeyspaceRequest} message DeleteKeyspaceRequest + * @param {vtctldata.GetSrvKeyspacesRequest} message GetSrvKeyspacesRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - DeleteKeyspaceRequest.toObject = function toObject(message, options) { + GetSrvKeyspacesRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) { + if (options.arrays || options.defaults) + object.cells = []; + if (options.defaults) object.keyspace = ""; - object.recursive = false; - object.force = false; - } if (message.keyspace != null && message.hasOwnProperty("keyspace")) object.keyspace = message.keyspace; - if (message.recursive != null && message.hasOwnProperty("recursive")) - object.recursive = message.recursive; - if (message.force != null && message.hasOwnProperty("force")) - object.force = message.force; + if (message.cells && message.cells.length) { + object.cells = []; + for (let j = 0; j < message.cells.length; ++j) + object.cells[j] = message.cells[j]; + } return object; }; /** - * Converts this DeleteKeyspaceRequest to JSON. + * Converts this GetSrvKeyspacesRequest to JSON. * @function toJSON - * @memberof vtctldata.DeleteKeyspaceRequest + * @memberof vtctldata.GetSrvKeyspacesRequest * @instance * @returns {Object.} JSON object */ - DeleteKeyspaceRequest.prototype.toJSON = function toJSON() { + GetSrvKeyspacesRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for DeleteKeyspaceRequest + * Gets the default type url for GetSrvKeyspacesRequest * @function getTypeUrl - * @memberof vtctldata.DeleteKeyspaceRequest + * @memberof vtctldata.GetSrvKeyspacesRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - DeleteKeyspaceRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetSrvKeyspacesRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.DeleteKeyspaceRequest"; + return typeUrlPrefix + "/vtctldata.GetSrvKeyspacesRequest"; }; - return DeleteKeyspaceRequest; + return GetSrvKeyspacesRequest; })(); - vtctldata.DeleteKeyspaceResponse = (function() { + vtctldata.GetSrvKeyspacesResponse = (function() { /** - * Properties of a DeleteKeyspaceResponse. + * Properties of a GetSrvKeyspacesResponse. * @memberof vtctldata - * @interface IDeleteKeyspaceResponse + * @interface IGetSrvKeyspacesResponse + * @property {Object.|null} [srv_keyspaces] GetSrvKeyspacesResponse srv_keyspaces */ /** - * Constructs a new DeleteKeyspaceResponse. + * Constructs a new GetSrvKeyspacesResponse. * @memberof vtctldata - * @classdesc Represents a DeleteKeyspaceResponse. - * @implements IDeleteKeyspaceResponse + * @classdesc Represents a GetSrvKeyspacesResponse. + * @implements IGetSrvKeyspacesResponse * @constructor - * @param {vtctldata.IDeleteKeyspaceResponse=} [properties] Properties to set + * @param {vtctldata.IGetSrvKeyspacesResponse=} [properties] Properties to set */ - function DeleteKeyspaceResponse(properties) { + function GetSrvKeyspacesResponse(properties) { + this.srv_keyspaces = {}; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -107753,63 +128224,99 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * Creates a new DeleteKeyspaceResponse instance using the specified properties. + * GetSrvKeyspacesResponse srv_keyspaces. + * @member {Object.} srv_keyspaces + * @memberof vtctldata.GetSrvKeyspacesResponse + * @instance + */ + GetSrvKeyspacesResponse.prototype.srv_keyspaces = $util.emptyObject; + + /** + * Creates a new GetSrvKeyspacesResponse instance using the specified properties. * @function create - * @memberof vtctldata.DeleteKeyspaceResponse + * @memberof vtctldata.GetSrvKeyspacesResponse * @static - * @param {vtctldata.IDeleteKeyspaceResponse=} [properties] Properties to set - * @returns {vtctldata.DeleteKeyspaceResponse} DeleteKeyspaceResponse instance + * @param {vtctldata.IGetSrvKeyspacesResponse=} [properties] Properties to set + * @returns {vtctldata.GetSrvKeyspacesResponse} GetSrvKeyspacesResponse instance */ - DeleteKeyspaceResponse.create = function create(properties) { - return new DeleteKeyspaceResponse(properties); + GetSrvKeyspacesResponse.create = function create(properties) { + return new GetSrvKeyspacesResponse(properties); }; /** - * Encodes the specified DeleteKeyspaceResponse message. Does not implicitly {@link vtctldata.DeleteKeyspaceResponse.verify|verify} messages. + * Encodes the specified GetSrvKeyspacesResponse message. Does not implicitly {@link vtctldata.GetSrvKeyspacesResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.DeleteKeyspaceResponse + * @memberof vtctldata.GetSrvKeyspacesResponse * @static - * @param {vtctldata.IDeleteKeyspaceResponse} message DeleteKeyspaceResponse message or plain object to encode + * @param {vtctldata.IGetSrvKeyspacesResponse} message GetSrvKeyspacesResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - DeleteKeyspaceResponse.encode = function encode(message, writer) { + GetSrvKeyspacesResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.srv_keyspaces != null && Object.hasOwnProperty.call(message, "srv_keyspaces")) + for (let keys = Object.keys(message.srv_keyspaces), i = 0; i < keys.length; ++i) { + writer.uint32(/* id 1, wireType 2 =*/10).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); + $root.topodata.SrvKeyspace.encode(message.srv_keyspaces[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); + } return writer; }; /** - * Encodes the specified DeleteKeyspaceResponse message, length delimited. Does not implicitly {@link vtctldata.DeleteKeyspaceResponse.verify|verify} messages. + * Encodes the specified GetSrvKeyspacesResponse message, length delimited. Does not implicitly {@link vtctldata.GetSrvKeyspacesResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.DeleteKeyspaceResponse + * @memberof vtctldata.GetSrvKeyspacesResponse * @static - * @param {vtctldata.IDeleteKeyspaceResponse} message DeleteKeyspaceResponse message or plain object to encode + * @param {vtctldata.IGetSrvKeyspacesResponse} message GetSrvKeyspacesResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - DeleteKeyspaceResponse.encodeDelimited = function encodeDelimited(message, writer) { + GetSrvKeyspacesResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a DeleteKeyspaceResponse message from the specified reader or buffer. + * Decodes a GetSrvKeyspacesResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.DeleteKeyspaceResponse + * @memberof vtctldata.GetSrvKeyspacesResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.DeleteKeyspaceResponse} DeleteKeyspaceResponse + * @returns {vtctldata.GetSrvKeyspacesResponse} GetSrvKeyspacesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteKeyspaceResponse.decode = function decode(reader, length) { + GetSrvKeyspacesResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.DeleteKeyspaceResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetSrvKeyspacesResponse(), key, value; while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + if (message.srv_keyspaces === $util.emptyObject) + message.srv_keyspaces = {}; + let end2 = reader.uint32() + reader.pos; + key = ""; + value = null; + while (reader.pos < end2) { + let tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = $root.topodata.SrvKeyspace.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.srv_keyspaces[key] = value; + break; + } default: reader.skipType(tag & 7); break; @@ -107819,113 +128326,149 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a DeleteKeyspaceResponse message from the specified reader or buffer, length delimited. + * Decodes a GetSrvKeyspacesResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.DeleteKeyspaceResponse + * @memberof vtctldata.GetSrvKeyspacesResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.DeleteKeyspaceResponse} DeleteKeyspaceResponse + * @returns {vtctldata.GetSrvKeyspacesResponse} GetSrvKeyspacesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteKeyspaceResponse.decodeDelimited = function decodeDelimited(reader) { + GetSrvKeyspacesResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a DeleteKeyspaceResponse message. + * Verifies a GetSrvKeyspacesResponse message. * @function verify - * @memberof vtctldata.DeleteKeyspaceResponse + * @memberof vtctldata.GetSrvKeyspacesResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - DeleteKeyspaceResponse.verify = function verify(message) { + GetSrvKeyspacesResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.srv_keyspaces != null && message.hasOwnProperty("srv_keyspaces")) { + if (!$util.isObject(message.srv_keyspaces)) + return "srv_keyspaces: object expected"; + let key = Object.keys(message.srv_keyspaces); + for (let i = 0; i < key.length; ++i) { + let error = $root.topodata.SrvKeyspace.verify(message.srv_keyspaces[key[i]]); + if (error) + return "srv_keyspaces." + error; + } + } return null; }; /** - * Creates a DeleteKeyspaceResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetSrvKeyspacesResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.DeleteKeyspaceResponse + * @memberof vtctldata.GetSrvKeyspacesResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.DeleteKeyspaceResponse} DeleteKeyspaceResponse + * @returns {vtctldata.GetSrvKeyspacesResponse} GetSrvKeyspacesResponse */ - DeleteKeyspaceResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.DeleteKeyspaceResponse) + GetSrvKeyspacesResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetSrvKeyspacesResponse) return object; - return new $root.vtctldata.DeleteKeyspaceResponse(); + let message = new $root.vtctldata.GetSrvKeyspacesResponse(); + if (object.srv_keyspaces) { + if (typeof object.srv_keyspaces !== "object") + throw TypeError(".vtctldata.GetSrvKeyspacesResponse.srv_keyspaces: object expected"); + message.srv_keyspaces = {}; + for (let keys = Object.keys(object.srv_keyspaces), i = 0; i < keys.length; ++i) { + if (typeof object.srv_keyspaces[keys[i]] !== "object") + throw TypeError(".vtctldata.GetSrvKeyspacesResponse.srv_keyspaces: object expected"); + message.srv_keyspaces[keys[i]] = $root.topodata.SrvKeyspace.fromObject(object.srv_keyspaces[keys[i]]); + } + } + return message; }; /** - * Creates a plain object from a DeleteKeyspaceResponse message. Also converts values to other types if specified. + * Creates a plain object from a GetSrvKeyspacesResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.DeleteKeyspaceResponse + * @memberof vtctldata.GetSrvKeyspacesResponse * @static - * @param {vtctldata.DeleteKeyspaceResponse} message DeleteKeyspaceResponse + * @param {vtctldata.GetSrvKeyspacesResponse} message GetSrvKeyspacesResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - DeleteKeyspaceResponse.toObject = function toObject() { - return {}; + GetSrvKeyspacesResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.objects || options.defaults) + object.srv_keyspaces = {}; + let keys2; + if (message.srv_keyspaces && (keys2 = Object.keys(message.srv_keyspaces)).length) { + object.srv_keyspaces = {}; + for (let j = 0; j < keys2.length; ++j) + object.srv_keyspaces[keys2[j]] = $root.topodata.SrvKeyspace.toObject(message.srv_keyspaces[keys2[j]], options); + } + return object; }; /** - * Converts this DeleteKeyspaceResponse to JSON. + * Converts this GetSrvKeyspacesResponse to JSON. * @function toJSON - * @memberof vtctldata.DeleteKeyspaceResponse + * @memberof vtctldata.GetSrvKeyspacesResponse * @instance * @returns {Object.} JSON object */ - DeleteKeyspaceResponse.prototype.toJSON = function toJSON() { + GetSrvKeyspacesResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for DeleteKeyspaceResponse + * Gets the default type url for GetSrvKeyspacesResponse * @function getTypeUrl - * @memberof vtctldata.DeleteKeyspaceResponse + * @memberof vtctldata.GetSrvKeyspacesResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - DeleteKeyspaceResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetSrvKeyspacesResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.DeleteKeyspaceResponse"; + return typeUrlPrefix + "/vtctldata.GetSrvKeyspacesResponse"; }; - return DeleteKeyspaceResponse; + return GetSrvKeyspacesResponse; })(); - vtctldata.DeleteShardsRequest = (function() { + vtctldata.UpdateThrottlerConfigRequest = (function() { /** - * Properties of a DeleteShardsRequest. + * Properties of an UpdateThrottlerConfigRequest. * @memberof vtctldata - * @interface IDeleteShardsRequest - * @property {Array.|null} [shards] DeleteShardsRequest shards - * @property {boolean|null} [recursive] DeleteShardsRequest recursive - * @property {boolean|null} [even_if_serving] DeleteShardsRequest even_if_serving - * @property {boolean|null} [force] DeleteShardsRequest force + * @interface IUpdateThrottlerConfigRequest + * @property {string|null} [keyspace] UpdateThrottlerConfigRequest keyspace + * @property {boolean|null} [enable] UpdateThrottlerConfigRequest enable + * @property {boolean|null} [disable] UpdateThrottlerConfigRequest disable + * @property {number|null} [threshold] UpdateThrottlerConfigRequest threshold + * @property {string|null} [custom_query] UpdateThrottlerConfigRequest custom_query + * @property {boolean|null} [custom_query_set] UpdateThrottlerConfigRequest custom_query_set + * @property {boolean|null} [check_as_check_self] UpdateThrottlerConfigRequest check_as_check_self + * @property {boolean|null} [check_as_check_shard] UpdateThrottlerConfigRequest check_as_check_shard + * @property {topodata.IThrottledAppRule|null} [throttled_app] UpdateThrottlerConfigRequest throttled_app */ /** - * Constructs a new DeleteShardsRequest. + * Constructs a new UpdateThrottlerConfigRequest. * @memberof vtctldata - * @classdesc Represents a DeleteShardsRequest. - * @implements IDeleteShardsRequest + * @classdesc Represents an UpdateThrottlerConfigRequest. + * @implements IUpdateThrottlerConfigRequest * @constructor - * @param {vtctldata.IDeleteShardsRequest=} [properties] Properties to set + * @param {vtctldata.IUpdateThrottlerConfigRequest=} [properties] Properties to set */ - function DeleteShardsRequest(properties) { - this.shards = []; + function UpdateThrottlerConfigRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -107933,120 +128476,187 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * DeleteShardsRequest shards. - * @member {Array.} shards - * @memberof vtctldata.DeleteShardsRequest + * UpdateThrottlerConfigRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.UpdateThrottlerConfigRequest * @instance */ - DeleteShardsRequest.prototype.shards = $util.emptyArray; + UpdateThrottlerConfigRequest.prototype.keyspace = ""; /** - * DeleteShardsRequest recursive. - * @member {boolean} recursive - * @memberof vtctldata.DeleteShardsRequest + * UpdateThrottlerConfigRequest enable. + * @member {boolean} enable + * @memberof vtctldata.UpdateThrottlerConfigRequest * @instance */ - DeleteShardsRequest.prototype.recursive = false; + UpdateThrottlerConfigRequest.prototype.enable = false; /** - * DeleteShardsRequest even_if_serving. - * @member {boolean} even_if_serving - * @memberof vtctldata.DeleteShardsRequest + * UpdateThrottlerConfigRequest disable. + * @member {boolean} disable + * @memberof vtctldata.UpdateThrottlerConfigRequest * @instance */ - DeleteShardsRequest.prototype.even_if_serving = false; + UpdateThrottlerConfigRequest.prototype.disable = false; /** - * DeleteShardsRequest force. - * @member {boolean} force - * @memberof vtctldata.DeleteShardsRequest + * UpdateThrottlerConfigRequest threshold. + * @member {number} threshold + * @memberof vtctldata.UpdateThrottlerConfigRequest * @instance */ - DeleteShardsRequest.prototype.force = false; + UpdateThrottlerConfigRequest.prototype.threshold = 0; /** - * Creates a new DeleteShardsRequest instance using the specified properties. + * UpdateThrottlerConfigRequest custom_query. + * @member {string} custom_query + * @memberof vtctldata.UpdateThrottlerConfigRequest + * @instance + */ + UpdateThrottlerConfigRequest.prototype.custom_query = ""; + + /** + * UpdateThrottlerConfigRequest custom_query_set. + * @member {boolean} custom_query_set + * @memberof vtctldata.UpdateThrottlerConfigRequest + * @instance + */ + UpdateThrottlerConfigRequest.prototype.custom_query_set = false; + + /** + * UpdateThrottlerConfigRequest check_as_check_self. + * @member {boolean} check_as_check_self + * @memberof vtctldata.UpdateThrottlerConfigRequest + * @instance + */ + UpdateThrottlerConfigRequest.prototype.check_as_check_self = false; + + /** + * UpdateThrottlerConfigRequest check_as_check_shard. + * @member {boolean} check_as_check_shard + * @memberof vtctldata.UpdateThrottlerConfigRequest + * @instance + */ + UpdateThrottlerConfigRequest.prototype.check_as_check_shard = false; + + /** + * UpdateThrottlerConfigRequest throttled_app. + * @member {topodata.IThrottledAppRule|null|undefined} throttled_app + * @memberof vtctldata.UpdateThrottlerConfigRequest + * @instance + */ + UpdateThrottlerConfigRequest.prototype.throttled_app = null; + + /** + * Creates a new UpdateThrottlerConfigRequest instance using the specified properties. * @function create - * @memberof vtctldata.DeleteShardsRequest + * @memberof vtctldata.UpdateThrottlerConfigRequest * @static - * @param {vtctldata.IDeleteShardsRequest=} [properties] Properties to set - * @returns {vtctldata.DeleteShardsRequest} DeleteShardsRequest instance + * @param {vtctldata.IUpdateThrottlerConfigRequest=} [properties] Properties to set + * @returns {vtctldata.UpdateThrottlerConfigRequest} UpdateThrottlerConfigRequest instance */ - DeleteShardsRequest.create = function create(properties) { - return new DeleteShardsRequest(properties); + UpdateThrottlerConfigRequest.create = function create(properties) { + return new UpdateThrottlerConfigRequest(properties); }; /** - * Encodes the specified DeleteShardsRequest message. Does not implicitly {@link vtctldata.DeleteShardsRequest.verify|verify} messages. + * Encodes the specified UpdateThrottlerConfigRequest message. Does not implicitly {@link vtctldata.UpdateThrottlerConfigRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.DeleteShardsRequest + * @memberof vtctldata.UpdateThrottlerConfigRequest * @static - * @param {vtctldata.IDeleteShardsRequest} message DeleteShardsRequest message or plain object to encode + * @param {vtctldata.IUpdateThrottlerConfigRequest} message UpdateThrottlerConfigRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - DeleteShardsRequest.encode = function encode(message, writer) { + UpdateThrottlerConfigRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.shards != null && message.shards.length) - for (let i = 0; i < message.shards.length; ++i) - $root.vtctldata.Shard.encode(message.shards[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.recursive != null && Object.hasOwnProperty.call(message, "recursive")) - writer.uint32(/* id 2, wireType 0 =*/16).bool(message.recursive); - if (message.even_if_serving != null && Object.hasOwnProperty.call(message, "even_if_serving")) - writer.uint32(/* id 4, wireType 0 =*/32).bool(message.even_if_serving); - if (message.force != null && Object.hasOwnProperty.call(message, "force")) - writer.uint32(/* id 5, wireType 0 =*/40).bool(message.force); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.enable != null && Object.hasOwnProperty.call(message, "enable")) + writer.uint32(/* id 2, wireType 0 =*/16).bool(message.enable); + if (message.disable != null && Object.hasOwnProperty.call(message, "disable")) + writer.uint32(/* id 3, wireType 0 =*/24).bool(message.disable); + if (message.threshold != null && Object.hasOwnProperty.call(message, "threshold")) + writer.uint32(/* id 4, wireType 1 =*/33).double(message.threshold); + if (message.custom_query != null && Object.hasOwnProperty.call(message, "custom_query")) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.custom_query); + if (message.custom_query_set != null && Object.hasOwnProperty.call(message, "custom_query_set")) + writer.uint32(/* id 6, wireType 0 =*/48).bool(message.custom_query_set); + if (message.check_as_check_self != null && Object.hasOwnProperty.call(message, "check_as_check_self")) + writer.uint32(/* id 7, wireType 0 =*/56).bool(message.check_as_check_self); + if (message.check_as_check_shard != null && Object.hasOwnProperty.call(message, "check_as_check_shard")) + writer.uint32(/* id 8, wireType 0 =*/64).bool(message.check_as_check_shard); + if (message.throttled_app != null && Object.hasOwnProperty.call(message, "throttled_app")) + $root.topodata.ThrottledAppRule.encode(message.throttled_app, writer.uint32(/* id 9, wireType 2 =*/74).fork()).ldelim(); return writer; }; /** - * Encodes the specified DeleteShardsRequest message, length delimited. Does not implicitly {@link vtctldata.DeleteShardsRequest.verify|verify} messages. + * Encodes the specified UpdateThrottlerConfigRequest message, length delimited. Does not implicitly {@link vtctldata.UpdateThrottlerConfigRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.DeleteShardsRequest + * @memberof vtctldata.UpdateThrottlerConfigRequest * @static - * @param {vtctldata.IDeleteShardsRequest} message DeleteShardsRequest message or plain object to encode + * @param {vtctldata.IUpdateThrottlerConfigRequest} message UpdateThrottlerConfigRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - DeleteShardsRequest.encodeDelimited = function encodeDelimited(message, writer) { + UpdateThrottlerConfigRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a DeleteShardsRequest message from the specified reader or buffer. + * Decodes an UpdateThrottlerConfigRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.DeleteShardsRequest + * @memberof vtctldata.UpdateThrottlerConfigRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.DeleteShardsRequest} DeleteShardsRequest + * @returns {vtctldata.UpdateThrottlerConfigRequest} UpdateThrottlerConfigRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteShardsRequest.decode = function decode(reader, length) { + UpdateThrottlerConfigRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.DeleteShardsRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.UpdateThrottlerConfigRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (!(message.shards && message.shards.length)) - message.shards = []; - message.shards.push($root.vtctldata.Shard.decode(reader, reader.uint32())); + message.keyspace = reader.string(); break; } case 2: { - message.recursive = reader.bool(); + message.enable = reader.bool(); + break; + } + case 3: { + message.disable = reader.bool(); break; } case 4: { - message.even_if_serving = reader.bool(); + message.threshold = reader.double(); break; } case 5: { - message.force = reader.bool(); + message.custom_query = reader.string(); + break; + } + case 6: { + message.custom_query_set = reader.bool(); + break; + } + case 7: { + message.check_as_check_self = reader.bool(); + break; + } + case 8: { + message.check_as_check_shard = reader.bool(); + break; + } + case 9: { + message.throttled_app = $root.topodata.ThrottledAppRule.decode(reader, reader.uint32()); break; } default: @@ -108058,164 +128668,191 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a DeleteShardsRequest message from the specified reader or buffer, length delimited. + * Decodes an UpdateThrottlerConfigRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.DeleteShardsRequest + * @memberof vtctldata.UpdateThrottlerConfigRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.DeleteShardsRequest} DeleteShardsRequest + * @returns {vtctldata.UpdateThrottlerConfigRequest} UpdateThrottlerConfigRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteShardsRequest.decodeDelimited = function decodeDelimited(reader) { + UpdateThrottlerConfigRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a DeleteShardsRequest message. + * Verifies an UpdateThrottlerConfigRequest message. * @function verify - * @memberof vtctldata.DeleteShardsRequest + * @memberof vtctldata.UpdateThrottlerConfigRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - DeleteShardsRequest.verify = function verify(message) { + UpdateThrottlerConfigRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.shards != null && message.hasOwnProperty("shards")) { - if (!Array.isArray(message.shards)) - return "shards: array expected"; - for (let i = 0; i < message.shards.length; ++i) { - let error = $root.vtctldata.Shard.verify(message.shards[i]); - if (error) - return "shards." + error; - } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.enable != null && message.hasOwnProperty("enable")) + if (typeof message.enable !== "boolean") + return "enable: boolean expected"; + if (message.disable != null && message.hasOwnProperty("disable")) + if (typeof message.disable !== "boolean") + return "disable: boolean expected"; + if (message.threshold != null && message.hasOwnProperty("threshold")) + if (typeof message.threshold !== "number") + return "threshold: number expected"; + if (message.custom_query != null && message.hasOwnProperty("custom_query")) + if (!$util.isString(message.custom_query)) + return "custom_query: string expected"; + if (message.custom_query_set != null && message.hasOwnProperty("custom_query_set")) + if (typeof message.custom_query_set !== "boolean") + return "custom_query_set: boolean expected"; + if (message.check_as_check_self != null && message.hasOwnProperty("check_as_check_self")) + if (typeof message.check_as_check_self !== "boolean") + return "check_as_check_self: boolean expected"; + if (message.check_as_check_shard != null && message.hasOwnProperty("check_as_check_shard")) + if (typeof message.check_as_check_shard !== "boolean") + return "check_as_check_shard: boolean expected"; + if (message.throttled_app != null && message.hasOwnProperty("throttled_app")) { + let error = $root.topodata.ThrottledAppRule.verify(message.throttled_app); + if (error) + return "throttled_app." + error; } - if (message.recursive != null && message.hasOwnProperty("recursive")) - if (typeof message.recursive !== "boolean") - return "recursive: boolean expected"; - if (message.even_if_serving != null && message.hasOwnProperty("even_if_serving")) - if (typeof message.even_if_serving !== "boolean") - return "even_if_serving: boolean expected"; - if (message.force != null && message.hasOwnProperty("force")) - if (typeof message.force !== "boolean") - return "force: boolean expected"; return null; }; /** - * Creates a DeleteShardsRequest message from a plain object. Also converts values to their respective internal types. + * Creates an UpdateThrottlerConfigRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.DeleteShardsRequest + * @memberof vtctldata.UpdateThrottlerConfigRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.DeleteShardsRequest} DeleteShardsRequest + * @returns {vtctldata.UpdateThrottlerConfigRequest} UpdateThrottlerConfigRequest */ - DeleteShardsRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.DeleteShardsRequest) + UpdateThrottlerConfigRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.UpdateThrottlerConfigRequest) return object; - let message = new $root.vtctldata.DeleteShardsRequest(); - if (object.shards) { - if (!Array.isArray(object.shards)) - throw TypeError(".vtctldata.DeleteShardsRequest.shards: array expected"); - message.shards = []; - for (let i = 0; i < object.shards.length; ++i) { - if (typeof object.shards[i] !== "object") - throw TypeError(".vtctldata.DeleteShardsRequest.shards: object expected"); - message.shards[i] = $root.vtctldata.Shard.fromObject(object.shards[i]); - } + let message = new $root.vtctldata.UpdateThrottlerConfigRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.enable != null) + message.enable = Boolean(object.enable); + if (object.disable != null) + message.disable = Boolean(object.disable); + if (object.threshold != null) + message.threshold = Number(object.threshold); + if (object.custom_query != null) + message.custom_query = String(object.custom_query); + if (object.custom_query_set != null) + message.custom_query_set = Boolean(object.custom_query_set); + if (object.check_as_check_self != null) + message.check_as_check_self = Boolean(object.check_as_check_self); + if (object.check_as_check_shard != null) + message.check_as_check_shard = Boolean(object.check_as_check_shard); + if (object.throttled_app != null) { + if (typeof object.throttled_app !== "object") + throw TypeError(".vtctldata.UpdateThrottlerConfigRequest.throttled_app: object expected"); + message.throttled_app = $root.topodata.ThrottledAppRule.fromObject(object.throttled_app); } - if (object.recursive != null) - message.recursive = Boolean(object.recursive); - if (object.even_if_serving != null) - message.even_if_serving = Boolean(object.even_if_serving); - if (object.force != null) - message.force = Boolean(object.force); return message; }; /** - * Creates a plain object from a DeleteShardsRequest message. Also converts values to other types if specified. + * Creates a plain object from an UpdateThrottlerConfigRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.DeleteShardsRequest + * @memberof vtctldata.UpdateThrottlerConfigRequest * @static - * @param {vtctldata.DeleteShardsRequest} message DeleteShardsRequest + * @param {vtctldata.UpdateThrottlerConfigRequest} message UpdateThrottlerConfigRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - DeleteShardsRequest.toObject = function toObject(message, options) { + UpdateThrottlerConfigRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.shards = []; if (options.defaults) { - object.recursive = false; - object.even_if_serving = false; - object.force = false; - } - if (message.shards && message.shards.length) { - object.shards = []; - for (let j = 0; j < message.shards.length; ++j) - object.shards[j] = $root.vtctldata.Shard.toObject(message.shards[j], options); + object.keyspace = ""; + object.enable = false; + object.disable = false; + object.threshold = 0; + object.custom_query = ""; + object.custom_query_set = false; + object.check_as_check_self = false; + object.check_as_check_shard = false; + object.throttled_app = null; } - if (message.recursive != null && message.hasOwnProperty("recursive")) - object.recursive = message.recursive; - if (message.even_if_serving != null && message.hasOwnProperty("even_if_serving")) - object.even_if_serving = message.even_if_serving; - if (message.force != null && message.hasOwnProperty("force")) - object.force = message.force; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.enable != null && message.hasOwnProperty("enable")) + object.enable = message.enable; + if (message.disable != null && message.hasOwnProperty("disable")) + object.disable = message.disable; + if (message.threshold != null && message.hasOwnProperty("threshold")) + object.threshold = options.json && !isFinite(message.threshold) ? String(message.threshold) : message.threshold; + if (message.custom_query != null && message.hasOwnProperty("custom_query")) + object.custom_query = message.custom_query; + if (message.custom_query_set != null && message.hasOwnProperty("custom_query_set")) + object.custom_query_set = message.custom_query_set; + if (message.check_as_check_self != null && message.hasOwnProperty("check_as_check_self")) + object.check_as_check_self = message.check_as_check_self; + if (message.check_as_check_shard != null && message.hasOwnProperty("check_as_check_shard")) + object.check_as_check_shard = message.check_as_check_shard; + if (message.throttled_app != null && message.hasOwnProperty("throttled_app")) + object.throttled_app = $root.topodata.ThrottledAppRule.toObject(message.throttled_app, options); return object; }; /** - * Converts this DeleteShardsRequest to JSON. + * Converts this UpdateThrottlerConfigRequest to JSON. * @function toJSON - * @memberof vtctldata.DeleteShardsRequest + * @memberof vtctldata.UpdateThrottlerConfigRequest * @instance * @returns {Object.} JSON object */ - DeleteShardsRequest.prototype.toJSON = function toJSON() { + UpdateThrottlerConfigRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for DeleteShardsRequest + * Gets the default type url for UpdateThrottlerConfigRequest * @function getTypeUrl - * @memberof vtctldata.DeleteShardsRequest + * @memberof vtctldata.UpdateThrottlerConfigRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - DeleteShardsRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + UpdateThrottlerConfigRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.DeleteShardsRequest"; + return typeUrlPrefix + "/vtctldata.UpdateThrottlerConfigRequest"; }; - return DeleteShardsRequest; + return UpdateThrottlerConfigRequest; })(); - vtctldata.DeleteShardsResponse = (function() { + vtctldata.UpdateThrottlerConfigResponse = (function() { /** - * Properties of a DeleteShardsResponse. + * Properties of an UpdateThrottlerConfigResponse. * @memberof vtctldata - * @interface IDeleteShardsResponse + * @interface IUpdateThrottlerConfigResponse */ /** - * Constructs a new DeleteShardsResponse. + * Constructs a new UpdateThrottlerConfigResponse. * @memberof vtctldata - * @classdesc Represents a DeleteShardsResponse. - * @implements IDeleteShardsResponse + * @classdesc Represents an UpdateThrottlerConfigResponse. + * @implements IUpdateThrottlerConfigResponse * @constructor - * @param {vtctldata.IDeleteShardsResponse=} [properties] Properties to set + * @param {vtctldata.IUpdateThrottlerConfigResponse=} [properties] Properties to set */ - function DeleteShardsResponse(properties) { + function UpdateThrottlerConfigResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -108223,60 +128860,60 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * Creates a new DeleteShardsResponse instance using the specified properties. + * Creates a new UpdateThrottlerConfigResponse instance using the specified properties. * @function create - * @memberof vtctldata.DeleteShardsResponse + * @memberof vtctldata.UpdateThrottlerConfigResponse * @static - * @param {vtctldata.IDeleteShardsResponse=} [properties] Properties to set - * @returns {vtctldata.DeleteShardsResponse} DeleteShardsResponse instance + * @param {vtctldata.IUpdateThrottlerConfigResponse=} [properties] Properties to set + * @returns {vtctldata.UpdateThrottlerConfigResponse} UpdateThrottlerConfigResponse instance */ - DeleteShardsResponse.create = function create(properties) { - return new DeleteShardsResponse(properties); + UpdateThrottlerConfigResponse.create = function create(properties) { + return new UpdateThrottlerConfigResponse(properties); }; /** - * Encodes the specified DeleteShardsResponse message. Does not implicitly {@link vtctldata.DeleteShardsResponse.verify|verify} messages. + * Encodes the specified UpdateThrottlerConfigResponse message. Does not implicitly {@link vtctldata.UpdateThrottlerConfigResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.DeleteShardsResponse + * @memberof vtctldata.UpdateThrottlerConfigResponse * @static - * @param {vtctldata.IDeleteShardsResponse} message DeleteShardsResponse message or plain object to encode + * @param {vtctldata.IUpdateThrottlerConfigResponse} message UpdateThrottlerConfigResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - DeleteShardsResponse.encode = function encode(message, writer) { + UpdateThrottlerConfigResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); return writer; }; /** - * Encodes the specified DeleteShardsResponse message, length delimited. Does not implicitly {@link vtctldata.DeleteShardsResponse.verify|verify} messages. + * Encodes the specified UpdateThrottlerConfigResponse message, length delimited. Does not implicitly {@link vtctldata.UpdateThrottlerConfigResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.DeleteShardsResponse + * @memberof vtctldata.UpdateThrottlerConfigResponse * @static - * @param {vtctldata.IDeleteShardsResponse} message DeleteShardsResponse message or plain object to encode + * @param {vtctldata.IUpdateThrottlerConfigResponse} message UpdateThrottlerConfigResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - DeleteShardsResponse.encodeDelimited = function encodeDelimited(message, writer) { + UpdateThrottlerConfigResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a DeleteShardsResponse message from the specified reader or buffer. + * Decodes an UpdateThrottlerConfigResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.DeleteShardsResponse + * @memberof vtctldata.UpdateThrottlerConfigResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.DeleteShardsResponse} DeleteShardsResponse + * @returns {vtctldata.UpdateThrottlerConfigResponse} UpdateThrottlerConfigResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteShardsResponse.decode = function decode(reader, length) { + UpdateThrottlerConfigResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.DeleteShardsResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.UpdateThrottlerConfigResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -108289,109 +128926,109 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a DeleteShardsResponse message from the specified reader or buffer, length delimited. + * Decodes an UpdateThrottlerConfigResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.DeleteShardsResponse + * @memberof vtctldata.UpdateThrottlerConfigResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.DeleteShardsResponse} DeleteShardsResponse + * @returns {vtctldata.UpdateThrottlerConfigResponse} UpdateThrottlerConfigResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteShardsResponse.decodeDelimited = function decodeDelimited(reader) { + UpdateThrottlerConfigResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a DeleteShardsResponse message. + * Verifies an UpdateThrottlerConfigResponse message. * @function verify - * @memberof vtctldata.DeleteShardsResponse + * @memberof vtctldata.UpdateThrottlerConfigResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - DeleteShardsResponse.verify = function verify(message) { + UpdateThrottlerConfigResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; return null; }; /** - * Creates a DeleteShardsResponse message from a plain object. Also converts values to their respective internal types. + * Creates an UpdateThrottlerConfigResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.DeleteShardsResponse + * @memberof vtctldata.UpdateThrottlerConfigResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.DeleteShardsResponse} DeleteShardsResponse + * @returns {vtctldata.UpdateThrottlerConfigResponse} UpdateThrottlerConfigResponse */ - DeleteShardsResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.DeleteShardsResponse) + UpdateThrottlerConfigResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.UpdateThrottlerConfigResponse) return object; - return new $root.vtctldata.DeleteShardsResponse(); + return new $root.vtctldata.UpdateThrottlerConfigResponse(); }; /** - * Creates a plain object from a DeleteShardsResponse message. Also converts values to other types if specified. + * Creates a plain object from an UpdateThrottlerConfigResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.DeleteShardsResponse + * @memberof vtctldata.UpdateThrottlerConfigResponse * @static - * @param {vtctldata.DeleteShardsResponse} message DeleteShardsResponse + * @param {vtctldata.UpdateThrottlerConfigResponse} message UpdateThrottlerConfigResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - DeleteShardsResponse.toObject = function toObject() { + UpdateThrottlerConfigResponse.toObject = function toObject() { return {}; }; /** - * Converts this DeleteShardsResponse to JSON. + * Converts this UpdateThrottlerConfigResponse to JSON. * @function toJSON - * @memberof vtctldata.DeleteShardsResponse + * @memberof vtctldata.UpdateThrottlerConfigResponse * @instance * @returns {Object.} JSON object */ - DeleteShardsResponse.prototype.toJSON = function toJSON() { + UpdateThrottlerConfigResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for DeleteShardsResponse + * Gets the default type url for UpdateThrottlerConfigResponse * @function getTypeUrl - * @memberof vtctldata.DeleteShardsResponse + * @memberof vtctldata.UpdateThrottlerConfigResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - DeleteShardsResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + UpdateThrottlerConfigResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.DeleteShardsResponse"; + return typeUrlPrefix + "/vtctldata.UpdateThrottlerConfigResponse"; }; - return DeleteShardsResponse; + return UpdateThrottlerConfigResponse; })(); - vtctldata.DeleteSrvVSchemaRequest = (function() { + vtctldata.GetSrvVSchemaRequest = (function() { /** - * Properties of a DeleteSrvVSchemaRequest. + * Properties of a GetSrvVSchemaRequest. * @memberof vtctldata - * @interface IDeleteSrvVSchemaRequest - * @property {string|null} [cell] DeleteSrvVSchemaRequest cell + * @interface IGetSrvVSchemaRequest + * @property {string|null} [cell] GetSrvVSchemaRequest cell */ /** - * Constructs a new DeleteSrvVSchemaRequest. + * Constructs a new GetSrvVSchemaRequest. * @memberof vtctldata - * @classdesc Represents a DeleteSrvVSchemaRequest. - * @implements IDeleteSrvVSchemaRequest + * @classdesc Represents a GetSrvVSchemaRequest. + * @implements IGetSrvVSchemaRequest * @constructor - * @param {vtctldata.IDeleteSrvVSchemaRequest=} [properties] Properties to set + * @param {vtctldata.IGetSrvVSchemaRequest=} [properties] Properties to set */ - function DeleteSrvVSchemaRequest(properties) { + function GetSrvVSchemaRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -108399,35 +129036,35 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * DeleteSrvVSchemaRequest cell. + * GetSrvVSchemaRequest cell. * @member {string} cell - * @memberof vtctldata.DeleteSrvVSchemaRequest + * @memberof vtctldata.GetSrvVSchemaRequest * @instance */ - DeleteSrvVSchemaRequest.prototype.cell = ""; + GetSrvVSchemaRequest.prototype.cell = ""; /** - * Creates a new DeleteSrvVSchemaRequest instance using the specified properties. + * Creates a new GetSrvVSchemaRequest instance using the specified properties. * @function create - * @memberof vtctldata.DeleteSrvVSchemaRequest + * @memberof vtctldata.GetSrvVSchemaRequest * @static - * @param {vtctldata.IDeleteSrvVSchemaRequest=} [properties] Properties to set - * @returns {vtctldata.DeleteSrvVSchemaRequest} DeleteSrvVSchemaRequest instance + * @param {vtctldata.IGetSrvVSchemaRequest=} [properties] Properties to set + * @returns {vtctldata.GetSrvVSchemaRequest} GetSrvVSchemaRequest instance */ - DeleteSrvVSchemaRequest.create = function create(properties) { - return new DeleteSrvVSchemaRequest(properties); + GetSrvVSchemaRequest.create = function create(properties) { + return new GetSrvVSchemaRequest(properties); }; /** - * Encodes the specified DeleteSrvVSchemaRequest message. Does not implicitly {@link vtctldata.DeleteSrvVSchemaRequest.verify|verify} messages. + * Encodes the specified GetSrvVSchemaRequest message. Does not implicitly {@link vtctldata.GetSrvVSchemaRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.DeleteSrvVSchemaRequest + * @memberof vtctldata.GetSrvVSchemaRequest * @static - * @param {vtctldata.IDeleteSrvVSchemaRequest} message DeleteSrvVSchemaRequest message or plain object to encode + * @param {vtctldata.IGetSrvVSchemaRequest} message GetSrvVSchemaRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - DeleteSrvVSchemaRequest.encode = function encode(message, writer) { + GetSrvVSchemaRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.cell != null && Object.hasOwnProperty.call(message, "cell")) @@ -108436,33 +129073,33 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Encodes the specified DeleteSrvVSchemaRequest message, length delimited. Does not implicitly {@link vtctldata.DeleteSrvVSchemaRequest.verify|verify} messages. + * Encodes the specified GetSrvVSchemaRequest message, length delimited. Does not implicitly {@link vtctldata.GetSrvVSchemaRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.DeleteSrvVSchemaRequest + * @memberof vtctldata.GetSrvVSchemaRequest * @static - * @param {vtctldata.IDeleteSrvVSchemaRequest} message DeleteSrvVSchemaRequest message or plain object to encode + * @param {vtctldata.IGetSrvVSchemaRequest} message GetSrvVSchemaRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - DeleteSrvVSchemaRequest.encodeDelimited = function encodeDelimited(message, writer) { + GetSrvVSchemaRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a DeleteSrvVSchemaRequest message from the specified reader or buffer. + * Decodes a GetSrvVSchemaRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.DeleteSrvVSchemaRequest + * @memberof vtctldata.GetSrvVSchemaRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.DeleteSrvVSchemaRequest} DeleteSrvVSchemaRequest + * @returns {vtctldata.GetSrvVSchemaRequest} GetSrvVSchemaRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteSrvVSchemaRequest.decode = function decode(reader, length) { + GetSrvVSchemaRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.DeleteSrvVSchemaRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetSrvVSchemaRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -108479,30 +129116,30 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a DeleteSrvVSchemaRequest message from the specified reader or buffer, length delimited. + * Decodes a GetSrvVSchemaRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.DeleteSrvVSchemaRequest + * @memberof vtctldata.GetSrvVSchemaRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.DeleteSrvVSchemaRequest} DeleteSrvVSchemaRequest + * @returns {vtctldata.GetSrvVSchemaRequest} GetSrvVSchemaRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteSrvVSchemaRequest.decodeDelimited = function decodeDelimited(reader) { + GetSrvVSchemaRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a DeleteSrvVSchemaRequest message. + * Verifies a GetSrvVSchemaRequest message. * @function verify - * @memberof vtctldata.DeleteSrvVSchemaRequest + * @memberof vtctldata.GetSrvVSchemaRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - DeleteSrvVSchemaRequest.verify = function verify(message) { + GetSrvVSchemaRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.cell != null && message.hasOwnProperty("cell")) @@ -108512,32 +129149,32 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Creates a DeleteSrvVSchemaRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetSrvVSchemaRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.DeleteSrvVSchemaRequest + * @memberof vtctldata.GetSrvVSchemaRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.DeleteSrvVSchemaRequest} DeleteSrvVSchemaRequest + * @returns {vtctldata.GetSrvVSchemaRequest} GetSrvVSchemaRequest */ - DeleteSrvVSchemaRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.DeleteSrvVSchemaRequest) + GetSrvVSchemaRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetSrvVSchemaRequest) return object; - let message = new $root.vtctldata.DeleteSrvVSchemaRequest(); + let message = new $root.vtctldata.GetSrvVSchemaRequest(); if (object.cell != null) message.cell = String(object.cell); return message; }; /** - * Creates a plain object from a DeleteSrvVSchemaRequest message. Also converts values to other types if specified. + * Creates a plain object from a GetSrvVSchemaRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.DeleteSrvVSchemaRequest + * @memberof vtctldata.GetSrvVSchemaRequest * @static - * @param {vtctldata.DeleteSrvVSchemaRequest} message DeleteSrvVSchemaRequest + * @param {vtctldata.GetSrvVSchemaRequest} message GetSrvVSchemaRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - DeleteSrvVSchemaRequest.toObject = function toObject(message, options) { + GetSrvVSchemaRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; @@ -108549,51 +129186,52 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Converts this DeleteSrvVSchemaRequest to JSON. + * Converts this GetSrvVSchemaRequest to JSON. * @function toJSON - * @memberof vtctldata.DeleteSrvVSchemaRequest + * @memberof vtctldata.GetSrvVSchemaRequest * @instance * @returns {Object.} JSON object */ - DeleteSrvVSchemaRequest.prototype.toJSON = function toJSON() { + GetSrvVSchemaRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for DeleteSrvVSchemaRequest + * Gets the default type url for GetSrvVSchemaRequest * @function getTypeUrl - * @memberof vtctldata.DeleteSrvVSchemaRequest + * @memberof vtctldata.GetSrvVSchemaRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - DeleteSrvVSchemaRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetSrvVSchemaRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.DeleteSrvVSchemaRequest"; + return typeUrlPrefix + "/vtctldata.GetSrvVSchemaRequest"; }; - return DeleteSrvVSchemaRequest; + return GetSrvVSchemaRequest; })(); - vtctldata.DeleteSrvVSchemaResponse = (function() { + vtctldata.GetSrvVSchemaResponse = (function() { /** - * Properties of a DeleteSrvVSchemaResponse. + * Properties of a GetSrvVSchemaResponse. * @memberof vtctldata - * @interface IDeleteSrvVSchemaResponse + * @interface IGetSrvVSchemaResponse + * @property {vschema.ISrvVSchema|null} [srv_v_schema] GetSrvVSchemaResponse srv_v_schema */ /** - * Constructs a new DeleteSrvVSchemaResponse. + * Constructs a new GetSrvVSchemaResponse. * @memberof vtctldata - * @classdesc Represents a DeleteSrvVSchemaResponse. - * @implements IDeleteSrvVSchemaResponse + * @classdesc Represents a GetSrvVSchemaResponse. + * @implements IGetSrvVSchemaResponse * @constructor - * @param {vtctldata.IDeleteSrvVSchemaResponse=} [properties] Properties to set + * @param {vtctldata.IGetSrvVSchemaResponse=} [properties] Properties to set */ - function DeleteSrvVSchemaResponse(properties) { + function GetSrvVSchemaResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -108601,63 +129239,77 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * Creates a new DeleteSrvVSchemaResponse instance using the specified properties. + * GetSrvVSchemaResponse srv_v_schema. + * @member {vschema.ISrvVSchema|null|undefined} srv_v_schema + * @memberof vtctldata.GetSrvVSchemaResponse + * @instance + */ + GetSrvVSchemaResponse.prototype.srv_v_schema = null; + + /** + * Creates a new GetSrvVSchemaResponse instance using the specified properties. * @function create - * @memberof vtctldata.DeleteSrvVSchemaResponse + * @memberof vtctldata.GetSrvVSchemaResponse * @static - * @param {vtctldata.IDeleteSrvVSchemaResponse=} [properties] Properties to set - * @returns {vtctldata.DeleteSrvVSchemaResponse} DeleteSrvVSchemaResponse instance + * @param {vtctldata.IGetSrvVSchemaResponse=} [properties] Properties to set + * @returns {vtctldata.GetSrvVSchemaResponse} GetSrvVSchemaResponse instance */ - DeleteSrvVSchemaResponse.create = function create(properties) { - return new DeleteSrvVSchemaResponse(properties); + GetSrvVSchemaResponse.create = function create(properties) { + return new GetSrvVSchemaResponse(properties); }; /** - * Encodes the specified DeleteSrvVSchemaResponse message. Does not implicitly {@link vtctldata.DeleteSrvVSchemaResponse.verify|verify} messages. + * Encodes the specified GetSrvVSchemaResponse message. Does not implicitly {@link vtctldata.GetSrvVSchemaResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.DeleteSrvVSchemaResponse + * @memberof vtctldata.GetSrvVSchemaResponse * @static - * @param {vtctldata.IDeleteSrvVSchemaResponse} message DeleteSrvVSchemaResponse message or plain object to encode + * @param {vtctldata.IGetSrvVSchemaResponse} message GetSrvVSchemaResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - DeleteSrvVSchemaResponse.encode = function encode(message, writer) { + GetSrvVSchemaResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.srv_v_schema != null && Object.hasOwnProperty.call(message, "srv_v_schema")) + $root.vschema.SrvVSchema.encode(message.srv_v_schema, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified DeleteSrvVSchemaResponse message, length delimited. Does not implicitly {@link vtctldata.DeleteSrvVSchemaResponse.verify|verify} messages. + * Encodes the specified GetSrvVSchemaResponse message, length delimited. Does not implicitly {@link vtctldata.GetSrvVSchemaResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.DeleteSrvVSchemaResponse + * @memberof vtctldata.GetSrvVSchemaResponse * @static - * @param {vtctldata.IDeleteSrvVSchemaResponse} message DeleteSrvVSchemaResponse message or plain object to encode + * @param {vtctldata.IGetSrvVSchemaResponse} message GetSrvVSchemaResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - DeleteSrvVSchemaResponse.encodeDelimited = function encodeDelimited(message, writer) { + GetSrvVSchemaResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a DeleteSrvVSchemaResponse message from the specified reader or buffer. + * Decodes a GetSrvVSchemaResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.DeleteSrvVSchemaResponse + * @memberof vtctldata.GetSrvVSchemaResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.DeleteSrvVSchemaResponse} DeleteSrvVSchemaResponse + * @returns {vtctldata.GetSrvVSchemaResponse} GetSrvVSchemaResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteSrvVSchemaResponse.decode = function decode(reader, length) { + GetSrvVSchemaResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.DeleteSrvVSchemaResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetSrvVSchemaResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + message.srv_v_schema = $root.vschema.SrvVSchema.decode(reader, reader.uint32()); + break; + } default: reader.skipType(tag & 7); break; @@ -108667,111 +129319,128 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a DeleteSrvVSchemaResponse message from the specified reader or buffer, length delimited. + * Decodes a GetSrvVSchemaResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.DeleteSrvVSchemaResponse + * @memberof vtctldata.GetSrvVSchemaResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.DeleteSrvVSchemaResponse} DeleteSrvVSchemaResponse + * @returns {vtctldata.GetSrvVSchemaResponse} GetSrvVSchemaResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteSrvVSchemaResponse.decodeDelimited = function decodeDelimited(reader) { + GetSrvVSchemaResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a DeleteSrvVSchemaResponse message. + * Verifies a GetSrvVSchemaResponse message. * @function verify - * @memberof vtctldata.DeleteSrvVSchemaResponse + * @memberof vtctldata.GetSrvVSchemaResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - DeleteSrvVSchemaResponse.verify = function verify(message) { + GetSrvVSchemaResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.srv_v_schema != null && message.hasOwnProperty("srv_v_schema")) { + let error = $root.vschema.SrvVSchema.verify(message.srv_v_schema); + if (error) + return "srv_v_schema." + error; + } return null; }; /** - * Creates a DeleteSrvVSchemaResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetSrvVSchemaResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.DeleteSrvVSchemaResponse + * @memberof vtctldata.GetSrvVSchemaResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.DeleteSrvVSchemaResponse} DeleteSrvVSchemaResponse + * @returns {vtctldata.GetSrvVSchemaResponse} GetSrvVSchemaResponse */ - DeleteSrvVSchemaResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.DeleteSrvVSchemaResponse) + GetSrvVSchemaResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetSrvVSchemaResponse) return object; - return new $root.vtctldata.DeleteSrvVSchemaResponse(); + let message = new $root.vtctldata.GetSrvVSchemaResponse(); + if (object.srv_v_schema != null) { + if (typeof object.srv_v_schema !== "object") + throw TypeError(".vtctldata.GetSrvVSchemaResponse.srv_v_schema: object expected"); + message.srv_v_schema = $root.vschema.SrvVSchema.fromObject(object.srv_v_schema); + } + return message; }; /** - * Creates a plain object from a DeleteSrvVSchemaResponse message. Also converts values to other types if specified. + * Creates a plain object from a GetSrvVSchemaResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.DeleteSrvVSchemaResponse + * @memberof vtctldata.GetSrvVSchemaResponse * @static - * @param {vtctldata.DeleteSrvVSchemaResponse} message DeleteSrvVSchemaResponse + * @param {vtctldata.GetSrvVSchemaResponse} message GetSrvVSchemaResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - DeleteSrvVSchemaResponse.toObject = function toObject() { - return {}; + GetSrvVSchemaResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) + object.srv_v_schema = null; + if (message.srv_v_schema != null && message.hasOwnProperty("srv_v_schema")) + object.srv_v_schema = $root.vschema.SrvVSchema.toObject(message.srv_v_schema, options); + return object; }; /** - * Converts this DeleteSrvVSchemaResponse to JSON. + * Converts this GetSrvVSchemaResponse to JSON. * @function toJSON - * @memberof vtctldata.DeleteSrvVSchemaResponse + * @memberof vtctldata.GetSrvVSchemaResponse * @instance * @returns {Object.} JSON object */ - DeleteSrvVSchemaResponse.prototype.toJSON = function toJSON() { + GetSrvVSchemaResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for DeleteSrvVSchemaResponse + * Gets the default type url for GetSrvVSchemaResponse * @function getTypeUrl - * @memberof vtctldata.DeleteSrvVSchemaResponse + * @memberof vtctldata.GetSrvVSchemaResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - DeleteSrvVSchemaResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetSrvVSchemaResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.DeleteSrvVSchemaResponse"; + return typeUrlPrefix + "/vtctldata.GetSrvVSchemaResponse"; }; - return DeleteSrvVSchemaResponse; + return GetSrvVSchemaResponse; })(); - vtctldata.DeleteTabletsRequest = (function() { + vtctldata.GetSrvVSchemasRequest = (function() { /** - * Properties of a DeleteTabletsRequest. + * Properties of a GetSrvVSchemasRequest. * @memberof vtctldata - * @interface IDeleteTabletsRequest - * @property {Array.|null} [tablet_aliases] DeleteTabletsRequest tablet_aliases - * @property {boolean|null} [allow_primary] DeleteTabletsRequest allow_primary + * @interface IGetSrvVSchemasRequest + * @property {Array.|null} [cells] GetSrvVSchemasRequest cells */ /** - * Constructs a new DeleteTabletsRequest. + * Constructs a new GetSrvVSchemasRequest. * @memberof vtctldata - * @classdesc Represents a DeleteTabletsRequest. - * @implements IDeleteTabletsRequest + * @classdesc Represents a GetSrvVSchemasRequest. + * @implements IGetSrvVSchemasRequest * @constructor - * @param {vtctldata.IDeleteTabletsRequest=} [properties] Properties to set + * @param {vtctldata.IGetSrvVSchemasRequest=} [properties] Properties to set */ - function DeleteTabletsRequest(properties) { - this.tablet_aliases = []; + function GetSrvVSchemasRequest(properties) { + this.cells = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -108779,92 +129448,78 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * DeleteTabletsRequest tablet_aliases. - * @member {Array.} tablet_aliases - * @memberof vtctldata.DeleteTabletsRequest - * @instance - */ - DeleteTabletsRequest.prototype.tablet_aliases = $util.emptyArray; - - /** - * DeleteTabletsRequest allow_primary. - * @member {boolean} allow_primary - * @memberof vtctldata.DeleteTabletsRequest + * GetSrvVSchemasRequest cells. + * @member {Array.} cells + * @memberof vtctldata.GetSrvVSchemasRequest * @instance */ - DeleteTabletsRequest.prototype.allow_primary = false; + GetSrvVSchemasRequest.prototype.cells = $util.emptyArray; /** - * Creates a new DeleteTabletsRequest instance using the specified properties. + * Creates a new GetSrvVSchemasRequest instance using the specified properties. * @function create - * @memberof vtctldata.DeleteTabletsRequest + * @memberof vtctldata.GetSrvVSchemasRequest * @static - * @param {vtctldata.IDeleteTabletsRequest=} [properties] Properties to set - * @returns {vtctldata.DeleteTabletsRequest} DeleteTabletsRequest instance + * @param {vtctldata.IGetSrvVSchemasRequest=} [properties] Properties to set + * @returns {vtctldata.GetSrvVSchemasRequest} GetSrvVSchemasRequest instance */ - DeleteTabletsRequest.create = function create(properties) { - return new DeleteTabletsRequest(properties); + GetSrvVSchemasRequest.create = function create(properties) { + return new GetSrvVSchemasRequest(properties); }; /** - * Encodes the specified DeleteTabletsRequest message. Does not implicitly {@link vtctldata.DeleteTabletsRequest.verify|verify} messages. + * Encodes the specified GetSrvVSchemasRequest message. Does not implicitly {@link vtctldata.GetSrvVSchemasRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.DeleteTabletsRequest + * @memberof vtctldata.GetSrvVSchemasRequest * @static - * @param {vtctldata.IDeleteTabletsRequest} message DeleteTabletsRequest message or plain object to encode + * @param {vtctldata.IGetSrvVSchemasRequest} message GetSrvVSchemasRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - DeleteTabletsRequest.encode = function encode(message, writer) { + GetSrvVSchemasRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.tablet_aliases != null && message.tablet_aliases.length) - for (let i = 0; i < message.tablet_aliases.length; ++i) - $root.topodata.TabletAlias.encode(message.tablet_aliases[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.allow_primary != null && Object.hasOwnProperty.call(message, "allow_primary")) - writer.uint32(/* id 2, wireType 0 =*/16).bool(message.allow_primary); + if (message.cells != null && message.cells.length) + for (let i = 0; i < message.cells.length; ++i) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.cells[i]); return writer; }; /** - * Encodes the specified DeleteTabletsRequest message, length delimited. Does not implicitly {@link vtctldata.DeleteTabletsRequest.verify|verify} messages. + * Encodes the specified GetSrvVSchemasRequest message, length delimited. Does not implicitly {@link vtctldata.GetSrvVSchemasRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.DeleteTabletsRequest + * @memberof vtctldata.GetSrvVSchemasRequest * @static - * @param {vtctldata.IDeleteTabletsRequest} message DeleteTabletsRequest message or plain object to encode + * @param {vtctldata.IGetSrvVSchemasRequest} message GetSrvVSchemasRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - DeleteTabletsRequest.encodeDelimited = function encodeDelimited(message, writer) { + GetSrvVSchemasRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a DeleteTabletsRequest message from the specified reader or buffer. + * Decodes a GetSrvVSchemasRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.DeleteTabletsRequest + * @memberof vtctldata.GetSrvVSchemasRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.DeleteTabletsRequest} DeleteTabletsRequest + * @returns {vtctldata.GetSrvVSchemasRequest} GetSrvVSchemasRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteTabletsRequest.decode = function decode(reader, length) { + GetSrvVSchemasRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.DeleteTabletsRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetSrvVSchemasRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - if (!(message.tablet_aliases && message.tablet_aliases.length)) - message.tablet_aliases = []; - message.tablet_aliases.push($root.topodata.TabletAlias.decode(reader, reader.uint32())); - break; - } case 2: { - message.allow_primary = reader.bool(); + if (!(message.cells && message.cells.length)) + message.cells = []; + message.cells.push(reader.string()); break; } default: @@ -108876,147 +129531,135 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a DeleteTabletsRequest message from the specified reader or buffer, length delimited. + * Decodes a GetSrvVSchemasRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.DeleteTabletsRequest + * @memberof vtctldata.GetSrvVSchemasRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.DeleteTabletsRequest} DeleteTabletsRequest + * @returns {vtctldata.GetSrvVSchemasRequest} GetSrvVSchemasRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteTabletsRequest.decodeDelimited = function decodeDelimited(reader) { + GetSrvVSchemasRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a DeleteTabletsRequest message. + * Verifies a GetSrvVSchemasRequest message. * @function verify - * @memberof vtctldata.DeleteTabletsRequest + * @memberof vtctldata.GetSrvVSchemasRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - DeleteTabletsRequest.verify = function verify(message) { + GetSrvVSchemasRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.tablet_aliases != null && message.hasOwnProperty("tablet_aliases")) { - if (!Array.isArray(message.tablet_aliases)) - return "tablet_aliases: array expected"; - for (let i = 0; i < message.tablet_aliases.length; ++i) { - let error = $root.topodata.TabletAlias.verify(message.tablet_aliases[i]); - if (error) - return "tablet_aliases." + error; - } + if (message.cells != null && message.hasOwnProperty("cells")) { + if (!Array.isArray(message.cells)) + return "cells: array expected"; + for (let i = 0; i < message.cells.length; ++i) + if (!$util.isString(message.cells[i])) + return "cells: string[] expected"; } - if (message.allow_primary != null && message.hasOwnProperty("allow_primary")) - if (typeof message.allow_primary !== "boolean") - return "allow_primary: boolean expected"; return null; }; /** - * Creates a DeleteTabletsRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetSrvVSchemasRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.DeleteTabletsRequest + * @memberof vtctldata.GetSrvVSchemasRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.DeleteTabletsRequest} DeleteTabletsRequest + * @returns {vtctldata.GetSrvVSchemasRequest} GetSrvVSchemasRequest */ - DeleteTabletsRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.DeleteTabletsRequest) + GetSrvVSchemasRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetSrvVSchemasRequest) return object; - let message = new $root.vtctldata.DeleteTabletsRequest(); - if (object.tablet_aliases) { - if (!Array.isArray(object.tablet_aliases)) - throw TypeError(".vtctldata.DeleteTabletsRequest.tablet_aliases: array expected"); - message.tablet_aliases = []; - for (let i = 0; i < object.tablet_aliases.length; ++i) { - if (typeof object.tablet_aliases[i] !== "object") - throw TypeError(".vtctldata.DeleteTabletsRequest.tablet_aliases: object expected"); - message.tablet_aliases[i] = $root.topodata.TabletAlias.fromObject(object.tablet_aliases[i]); - } + let message = new $root.vtctldata.GetSrvVSchemasRequest(); + if (object.cells) { + if (!Array.isArray(object.cells)) + throw TypeError(".vtctldata.GetSrvVSchemasRequest.cells: array expected"); + message.cells = []; + for (let i = 0; i < object.cells.length; ++i) + message.cells[i] = String(object.cells[i]); } - if (object.allow_primary != null) - message.allow_primary = Boolean(object.allow_primary); return message; }; /** - * Creates a plain object from a DeleteTabletsRequest message. Also converts values to other types if specified. + * Creates a plain object from a GetSrvVSchemasRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.DeleteTabletsRequest + * @memberof vtctldata.GetSrvVSchemasRequest * @static - * @param {vtctldata.DeleteTabletsRequest} message DeleteTabletsRequest + * @param {vtctldata.GetSrvVSchemasRequest} message GetSrvVSchemasRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - DeleteTabletsRequest.toObject = function toObject(message, options) { + GetSrvVSchemasRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.arrays || options.defaults) - object.tablet_aliases = []; - if (options.defaults) - object.allow_primary = false; - if (message.tablet_aliases && message.tablet_aliases.length) { - object.tablet_aliases = []; - for (let j = 0; j < message.tablet_aliases.length; ++j) - object.tablet_aliases[j] = $root.topodata.TabletAlias.toObject(message.tablet_aliases[j], options); + object.cells = []; + if (message.cells && message.cells.length) { + object.cells = []; + for (let j = 0; j < message.cells.length; ++j) + object.cells[j] = message.cells[j]; } - if (message.allow_primary != null && message.hasOwnProperty("allow_primary")) - object.allow_primary = message.allow_primary; return object; }; /** - * Converts this DeleteTabletsRequest to JSON. + * Converts this GetSrvVSchemasRequest to JSON. * @function toJSON - * @memberof vtctldata.DeleteTabletsRequest + * @memberof vtctldata.GetSrvVSchemasRequest * @instance * @returns {Object.} JSON object */ - DeleteTabletsRequest.prototype.toJSON = function toJSON() { + GetSrvVSchemasRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for DeleteTabletsRequest + * Gets the default type url for GetSrvVSchemasRequest * @function getTypeUrl - * @memberof vtctldata.DeleteTabletsRequest + * @memberof vtctldata.GetSrvVSchemasRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - DeleteTabletsRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetSrvVSchemasRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.DeleteTabletsRequest"; + return typeUrlPrefix + "/vtctldata.GetSrvVSchemasRequest"; }; - return DeleteTabletsRequest; + return GetSrvVSchemasRequest; })(); - vtctldata.DeleteTabletsResponse = (function() { + vtctldata.GetSrvVSchemasResponse = (function() { /** - * Properties of a DeleteTabletsResponse. + * Properties of a GetSrvVSchemasResponse. * @memberof vtctldata - * @interface IDeleteTabletsResponse + * @interface IGetSrvVSchemasResponse + * @property {Object.|null} [srv_v_schemas] GetSrvVSchemasResponse srv_v_schemas */ /** - * Constructs a new DeleteTabletsResponse. + * Constructs a new GetSrvVSchemasResponse. * @memberof vtctldata - * @classdesc Represents a DeleteTabletsResponse. - * @implements IDeleteTabletsResponse + * @classdesc Represents a GetSrvVSchemasResponse. + * @implements IGetSrvVSchemasResponse * @constructor - * @param {vtctldata.IDeleteTabletsResponse=} [properties] Properties to set + * @param {vtctldata.IGetSrvVSchemasResponse=} [properties] Properties to set */ - function DeleteTabletsResponse(properties) { + function GetSrvVSchemasResponse(properties) { + this.srv_v_schemas = {}; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -109024,63 +129667,99 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * Creates a new DeleteTabletsResponse instance using the specified properties. + * GetSrvVSchemasResponse srv_v_schemas. + * @member {Object.} srv_v_schemas + * @memberof vtctldata.GetSrvVSchemasResponse + * @instance + */ + GetSrvVSchemasResponse.prototype.srv_v_schemas = $util.emptyObject; + + /** + * Creates a new GetSrvVSchemasResponse instance using the specified properties. * @function create - * @memberof vtctldata.DeleteTabletsResponse + * @memberof vtctldata.GetSrvVSchemasResponse * @static - * @param {vtctldata.IDeleteTabletsResponse=} [properties] Properties to set - * @returns {vtctldata.DeleteTabletsResponse} DeleteTabletsResponse instance + * @param {vtctldata.IGetSrvVSchemasResponse=} [properties] Properties to set + * @returns {vtctldata.GetSrvVSchemasResponse} GetSrvVSchemasResponse instance */ - DeleteTabletsResponse.create = function create(properties) { - return new DeleteTabletsResponse(properties); + GetSrvVSchemasResponse.create = function create(properties) { + return new GetSrvVSchemasResponse(properties); }; /** - * Encodes the specified DeleteTabletsResponse message. Does not implicitly {@link vtctldata.DeleteTabletsResponse.verify|verify} messages. + * Encodes the specified GetSrvVSchemasResponse message. Does not implicitly {@link vtctldata.GetSrvVSchemasResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.DeleteTabletsResponse + * @memberof vtctldata.GetSrvVSchemasResponse * @static - * @param {vtctldata.IDeleteTabletsResponse} message DeleteTabletsResponse message or plain object to encode + * @param {vtctldata.IGetSrvVSchemasResponse} message GetSrvVSchemasResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - DeleteTabletsResponse.encode = function encode(message, writer) { + GetSrvVSchemasResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.srv_v_schemas != null && Object.hasOwnProperty.call(message, "srv_v_schemas")) + for (let keys = Object.keys(message.srv_v_schemas), i = 0; i < keys.length; ++i) { + writer.uint32(/* id 1, wireType 2 =*/10).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); + $root.vschema.SrvVSchema.encode(message.srv_v_schemas[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); + } return writer; }; /** - * Encodes the specified DeleteTabletsResponse message, length delimited. Does not implicitly {@link vtctldata.DeleteTabletsResponse.verify|verify} messages. + * Encodes the specified GetSrvVSchemasResponse message, length delimited. Does not implicitly {@link vtctldata.GetSrvVSchemasResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.DeleteTabletsResponse + * @memberof vtctldata.GetSrvVSchemasResponse * @static - * @param {vtctldata.IDeleteTabletsResponse} message DeleteTabletsResponse message or plain object to encode + * @param {vtctldata.IGetSrvVSchemasResponse} message GetSrvVSchemasResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - DeleteTabletsResponse.encodeDelimited = function encodeDelimited(message, writer) { + GetSrvVSchemasResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a DeleteTabletsResponse message from the specified reader or buffer. + * Decodes a GetSrvVSchemasResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.DeleteTabletsResponse + * @memberof vtctldata.GetSrvVSchemasResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.DeleteTabletsResponse} DeleteTabletsResponse + * @returns {vtctldata.GetSrvVSchemasResponse} GetSrvVSchemasResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteTabletsResponse.decode = function decode(reader, length) { + GetSrvVSchemasResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.DeleteTabletsResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetSrvVSchemasResponse(), key, value; while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + if (message.srv_v_schemas === $util.emptyObject) + message.srv_v_schemas = {}; + let end2 = reader.uint32() + reader.pos; + key = ""; + value = null; + while (reader.pos < end2) { + let tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = $root.vschema.SrvVSchema.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.srv_v_schemas[key] = value; + break; + } default: reader.skipType(tag & 7); break; @@ -109090,115 +129769,141 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a DeleteTabletsResponse message from the specified reader or buffer, length delimited. + * Decodes a GetSrvVSchemasResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.DeleteTabletsResponse + * @memberof vtctldata.GetSrvVSchemasResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.DeleteTabletsResponse} DeleteTabletsResponse + * @returns {vtctldata.GetSrvVSchemasResponse} GetSrvVSchemasResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - DeleteTabletsResponse.decodeDelimited = function decodeDelimited(reader) { + GetSrvVSchemasResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a DeleteTabletsResponse message. + * Verifies a GetSrvVSchemasResponse message. * @function verify - * @memberof vtctldata.DeleteTabletsResponse + * @memberof vtctldata.GetSrvVSchemasResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - DeleteTabletsResponse.verify = function verify(message) { + GetSrvVSchemasResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.srv_v_schemas != null && message.hasOwnProperty("srv_v_schemas")) { + if (!$util.isObject(message.srv_v_schemas)) + return "srv_v_schemas: object expected"; + let key = Object.keys(message.srv_v_schemas); + for (let i = 0; i < key.length; ++i) { + let error = $root.vschema.SrvVSchema.verify(message.srv_v_schemas[key[i]]); + if (error) + return "srv_v_schemas." + error; + } + } return null; }; /** - * Creates a DeleteTabletsResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetSrvVSchemasResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.DeleteTabletsResponse + * @memberof vtctldata.GetSrvVSchemasResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.DeleteTabletsResponse} DeleteTabletsResponse + * @returns {vtctldata.GetSrvVSchemasResponse} GetSrvVSchemasResponse */ - DeleteTabletsResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.DeleteTabletsResponse) + GetSrvVSchemasResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetSrvVSchemasResponse) return object; - return new $root.vtctldata.DeleteTabletsResponse(); + let message = new $root.vtctldata.GetSrvVSchemasResponse(); + if (object.srv_v_schemas) { + if (typeof object.srv_v_schemas !== "object") + throw TypeError(".vtctldata.GetSrvVSchemasResponse.srv_v_schemas: object expected"); + message.srv_v_schemas = {}; + for (let keys = Object.keys(object.srv_v_schemas), i = 0; i < keys.length; ++i) { + if (typeof object.srv_v_schemas[keys[i]] !== "object") + throw TypeError(".vtctldata.GetSrvVSchemasResponse.srv_v_schemas: object expected"); + message.srv_v_schemas[keys[i]] = $root.vschema.SrvVSchema.fromObject(object.srv_v_schemas[keys[i]]); + } + } + return message; }; /** - * Creates a plain object from a DeleteTabletsResponse message. Also converts values to other types if specified. + * Creates a plain object from a GetSrvVSchemasResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.DeleteTabletsResponse + * @memberof vtctldata.GetSrvVSchemasResponse * @static - * @param {vtctldata.DeleteTabletsResponse} message DeleteTabletsResponse + * @param {vtctldata.GetSrvVSchemasResponse} message GetSrvVSchemasResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - DeleteTabletsResponse.toObject = function toObject() { - return {}; + GetSrvVSchemasResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.objects || options.defaults) + object.srv_v_schemas = {}; + let keys2; + if (message.srv_v_schemas && (keys2 = Object.keys(message.srv_v_schemas)).length) { + object.srv_v_schemas = {}; + for (let j = 0; j < keys2.length; ++j) + object.srv_v_schemas[keys2[j]] = $root.vschema.SrvVSchema.toObject(message.srv_v_schemas[keys2[j]], options); + } + return object; }; /** - * Converts this DeleteTabletsResponse to JSON. + * Converts this GetSrvVSchemasResponse to JSON. * @function toJSON - * @memberof vtctldata.DeleteTabletsResponse + * @memberof vtctldata.GetSrvVSchemasResponse * @instance * @returns {Object.} JSON object */ - DeleteTabletsResponse.prototype.toJSON = function toJSON() { + GetSrvVSchemasResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for DeleteTabletsResponse + * Gets the default type url for GetSrvVSchemasResponse * @function getTypeUrl - * @memberof vtctldata.DeleteTabletsResponse + * @memberof vtctldata.GetSrvVSchemasResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - DeleteTabletsResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetSrvVSchemasResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.DeleteTabletsResponse"; + return typeUrlPrefix + "/vtctldata.GetSrvVSchemasResponse"; }; - return DeleteTabletsResponse; + return GetSrvVSchemasResponse; })(); - vtctldata.EmergencyReparentShardRequest = (function() { + vtctldata.GetTabletRequest = (function() { /** - * Properties of an EmergencyReparentShardRequest. + * Properties of a GetTabletRequest. * @memberof vtctldata - * @interface IEmergencyReparentShardRequest - * @property {string|null} [keyspace] EmergencyReparentShardRequest keyspace - * @property {string|null} [shard] EmergencyReparentShardRequest shard - * @property {topodata.ITabletAlias|null} [new_primary] EmergencyReparentShardRequest new_primary - * @property {Array.|null} [ignore_replicas] EmergencyReparentShardRequest ignore_replicas - * @property {vttime.IDuration|null} [wait_replicas_timeout] EmergencyReparentShardRequest wait_replicas_timeout - * @property {boolean|null} [prevent_cross_cell_promotion] EmergencyReparentShardRequest prevent_cross_cell_promotion + * @interface IGetTabletRequest + * @property {topodata.ITabletAlias|null} [tablet_alias] GetTabletRequest tablet_alias */ /** - * Constructs a new EmergencyReparentShardRequest. + * Constructs a new GetTabletRequest. * @memberof vtctldata - * @classdesc Represents an EmergencyReparentShardRequest. - * @implements IEmergencyReparentShardRequest + * @classdesc Represents a GetTabletRequest. + * @implements IGetTabletRequest * @constructor - * @param {vtctldata.IEmergencyReparentShardRequest=} [properties] Properties to set + * @param {vtctldata.IGetTabletRequest=} [properties] Properties to set */ - function EmergencyReparentShardRequest(properties) { - this.ignore_replicas = []; + function GetTabletRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -109206,148 +129911,75 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * EmergencyReparentShardRequest keyspace. - * @member {string} keyspace - * @memberof vtctldata.EmergencyReparentShardRequest - * @instance - */ - EmergencyReparentShardRequest.prototype.keyspace = ""; - - /** - * EmergencyReparentShardRequest shard. - * @member {string} shard - * @memberof vtctldata.EmergencyReparentShardRequest - * @instance - */ - EmergencyReparentShardRequest.prototype.shard = ""; - - /** - * EmergencyReparentShardRequest new_primary. - * @member {topodata.ITabletAlias|null|undefined} new_primary - * @memberof vtctldata.EmergencyReparentShardRequest - * @instance - */ - EmergencyReparentShardRequest.prototype.new_primary = null; - - /** - * EmergencyReparentShardRequest ignore_replicas. - * @member {Array.} ignore_replicas - * @memberof vtctldata.EmergencyReparentShardRequest - * @instance - */ - EmergencyReparentShardRequest.prototype.ignore_replicas = $util.emptyArray; - - /** - * EmergencyReparentShardRequest wait_replicas_timeout. - * @member {vttime.IDuration|null|undefined} wait_replicas_timeout - * @memberof vtctldata.EmergencyReparentShardRequest - * @instance - */ - EmergencyReparentShardRequest.prototype.wait_replicas_timeout = null; - - /** - * EmergencyReparentShardRequest prevent_cross_cell_promotion. - * @member {boolean} prevent_cross_cell_promotion - * @memberof vtctldata.EmergencyReparentShardRequest + * GetTabletRequest tablet_alias. + * @member {topodata.ITabletAlias|null|undefined} tablet_alias + * @memberof vtctldata.GetTabletRequest * @instance */ - EmergencyReparentShardRequest.prototype.prevent_cross_cell_promotion = false; + GetTabletRequest.prototype.tablet_alias = null; /** - * Creates a new EmergencyReparentShardRequest instance using the specified properties. + * Creates a new GetTabletRequest instance using the specified properties. * @function create - * @memberof vtctldata.EmergencyReparentShardRequest + * @memberof vtctldata.GetTabletRequest * @static - * @param {vtctldata.IEmergencyReparentShardRequest=} [properties] Properties to set - * @returns {vtctldata.EmergencyReparentShardRequest} EmergencyReparentShardRequest instance + * @param {vtctldata.IGetTabletRequest=} [properties] Properties to set + * @returns {vtctldata.GetTabletRequest} GetTabletRequest instance */ - EmergencyReparentShardRequest.create = function create(properties) { - return new EmergencyReparentShardRequest(properties); + GetTabletRequest.create = function create(properties) { + return new GetTabletRequest(properties); }; /** - * Encodes the specified EmergencyReparentShardRequest message. Does not implicitly {@link vtctldata.EmergencyReparentShardRequest.verify|verify} messages. + * Encodes the specified GetTabletRequest message. Does not implicitly {@link vtctldata.GetTabletRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.EmergencyReparentShardRequest + * @memberof vtctldata.GetTabletRequest * @static - * @param {vtctldata.IEmergencyReparentShardRequest} message EmergencyReparentShardRequest message or plain object to encode + * @param {vtctldata.IGetTabletRequest} message GetTabletRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - EmergencyReparentShardRequest.encode = function encode(message, writer) { + GetTabletRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); - if (message.new_primary != null && Object.hasOwnProperty.call(message, "new_primary")) - $root.topodata.TabletAlias.encode(message.new_primary, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.ignore_replicas != null && message.ignore_replicas.length) - for (let i = 0; i < message.ignore_replicas.length; ++i) - $root.topodata.TabletAlias.encode(message.ignore_replicas[i], writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); - if (message.wait_replicas_timeout != null && Object.hasOwnProperty.call(message, "wait_replicas_timeout")) - $root.vttime.Duration.encode(message.wait_replicas_timeout, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); - if (message.prevent_cross_cell_promotion != null && Object.hasOwnProperty.call(message, "prevent_cross_cell_promotion")) - writer.uint32(/* id 6, wireType 0 =*/48).bool(message.prevent_cross_cell_promotion); + if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) + $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified EmergencyReparentShardRequest message, length delimited. Does not implicitly {@link vtctldata.EmergencyReparentShardRequest.verify|verify} messages. + * Encodes the specified GetTabletRequest message, length delimited. Does not implicitly {@link vtctldata.GetTabletRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.EmergencyReparentShardRequest + * @memberof vtctldata.GetTabletRequest * @static - * @param {vtctldata.IEmergencyReparentShardRequest} message EmergencyReparentShardRequest message or plain object to encode + * @param {vtctldata.IGetTabletRequest} message GetTabletRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - EmergencyReparentShardRequest.encodeDelimited = function encodeDelimited(message, writer) { + GetTabletRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an EmergencyReparentShardRequest message from the specified reader or buffer. + * Decodes a GetTabletRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.EmergencyReparentShardRequest + * @memberof vtctldata.GetTabletRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.EmergencyReparentShardRequest} EmergencyReparentShardRequest + * @returns {vtctldata.GetTabletRequest} GetTabletRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - EmergencyReparentShardRequest.decode = function decode(reader, length) { + GetTabletRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.EmergencyReparentShardRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetTabletRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.keyspace = reader.string(); - break; - } - case 2: { - message.shard = reader.string(); - break; - } - case 3: { - message.new_primary = $root.topodata.TabletAlias.decode(reader, reader.uint32()); - break; - } - case 4: { - if (!(message.ignore_replicas && message.ignore_replicas.length)) - message.ignore_replicas = []; - message.ignore_replicas.push($root.topodata.TabletAlias.decode(reader, reader.uint32())); - break; - } - case 5: { - message.wait_replicas_timeout = $root.vttime.Duration.decode(reader, reader.uint32()); - break; - } - case 6: { - message.prevent_cross_cell_promotion = reader.bool(); + message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); break; } default: @@ -109359,195 +129991,127 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes an EmergencyReparentShardRequest message from the specified reader or buffer, length delimited. + * Decodes a GetTabletRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.EmergencyReparentShardRequest + * @memberof vtctldata.GetTabletRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.EmergencyReparentShardRequest} EmergencyReparentShardRequest + * @returns {vtctldata.GetTabletRequest} GetTabletRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - EmergencyReparentShardRequest.decodeDelimited = function decodeDelimited(reader) { + GetTabletRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an EmergencyReparentShardRequest message. + * Verifies a GetTabletRequest message. * @function verify - * @memberof vtctldata.EmergencyReparentShardRequest + * @memberof vtctldata.GetTabletRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - EmergencyReparentShardRequest.verify = function verify(message) { + GetTabletRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - if (message.shard != null && message.hasOwnProperty("shard")) - if (!$util.isString(message.shard)) - return "shard: string expected"; - if (message.new_primary != null && message.hasOwnProperty("new_primary")) { - let error = $root.topodata.TabletAlias.verify(message.new_primary); - if (error) - return "new_primary." + error; - } - if (message.ignore_replicas != null && message.hasOwnProperty("ignore_replicas")) { - if (!Array.isArray(message.ignore_replicas)) - return "ignore_replicas: array expected"; - for (let i = 0; i < message.ignore_replicas.length; ++i) { - let error = $root.topodata.TabletAlias.verify(message.ignore_replicas[i]); - if (error) - return "ignore_replicas." + error; - } - } - if (message.wait_replicas_timeout != null && message.hasOwnProperty("wait_replicas_timeout")) { - let error = $root.vttime.Duration.verify(message.wait_replicas_timeout); + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { + let error = $root.topodata.TabletAlias.verify(message.tablet_alias); if (error) - return "wait_replicas_timeout." + error; + return "tablet_alias." + error; } - if (message.prevent_cross_cell_promotion != null && message.hasOwnProperty("prevent_cross_cell_promotion")) - if (typeof message.prevent_cross_cell_promotion !== "boolean") - return "prevent_cross_cell_promotion: boolean expected"; return null; }; /** - * Creates an EmergencyReparentShardRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetTabletRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.EmergencyReparentShardRequest + * @memberof vtctldata.GetTabletRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.EmergencyReparentShardRequest} EmergencyReparentShardRequest + * @returns {vtctldata.GetTabletRequest} GetTabletRequest */ - EmergencyReparentShardRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.EmergencyReparentShardRequest) + GetTabletRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetTabletRequest) return object; - let message = new $root.vtctldata.EmergencyReparentShardRequest(); - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - if (object.shard != null) - message.shard = String(object.shard); - if (object.new_primary != null) { - if (typeof object.new_primary !== "object") - throw TypeError(".vtctldata.EmergencyReparentShardRequest.new_primary: object expected"); - message.new_primary = $root.topodata.TabletAlias.fromObject(object.new_primary); - } - if (object.ignore_replicas) { - if (!Array.isArray(object.ignore_replicas)) - throw TypeError(".vtctldata.EmergencyReparentShardRequest.ignore_replicas: array expected"); - message.ignore_replicas = []; - for (let i = 0; i < object.ignore_replicas.length; ++i) { - if (typeof object.ignore_replicas[i] !== "object") - throw TypeError(".vtctldata.EmergencyReparentShardRequest.ignore_replicas: object expected"); - message.ignore_replicas[i] = $root.topodata.TabletAlias.fromObject(object.ignore_replicas[i]); - } - } - if (object.wait_replicas_timeout != null) { - if (typeof object.wait_replicas_timeout !== "object") - throw TypeError(".vtctldata.EmergencyReparentShardRequest.wait_replicas_timeout: object expected"); - message.wait_replicas_timeout = $root.vttime.Duration.fromObject(object.wait_replicas_timeout); + let message = new $root.vtctldata.GetTabletRequest(); + if (object.tablet_alias != null) { + if (typeof object.tablet_alias !== "object") + throw TypeError(".vtctldata.GetTabletRequest.tablet_alias: object expected"); + message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); } - if (object.prevent_cross_cell_promotion != null) - message.prevent_cross_cell_promotion = Boolean(object.prevent_cross_cell_promotion); return message; }; /** - * Creates a plain object from an EmergencyReparentShardRequest message. Also converts values to other types if specified. + * Creates a plain object from a GetTabletRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.EmergencyReparentShardRequest + * @memberof vtctldata.GetTabletRequest * @static - * @param {vtctldata.EmergencyReparentShardRequest} message EmergencyReparentShardRequest + * @param {vtctldata.GetTabletRequest} message GetTabletRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - EmergencyReparentShardRequest.toObject = function toObject(message, options) { + GetTabletRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.ignore_replicas = []; - if (options.defaults) { - object.keyspace = ""; - object.shard = ""; - object.new_primary = null; - object.wait_replicas_timeout = null; - object.prevent_cross_cell_promotion = false; - } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.shard != null && message.hasOwnProperty("shard")) - object.shard = message.shard; - if (message.new_primary != null && message.hasOwnProperty("new_primary")) - object.new_primary = $root.topodata.TabletAlias.toObject(message.new_primary, options); - if (message.ignore_replicas && message.ignore_replicas.length) { - object.ignore_replicas = []; - for (let j = 0; j < message.ignore_replicas.length; ++j) - object.ignore_replicas[j] = $root.topodata.TabletAlias.toObject(message.ignore_replicas[j], options); - } - if (message.wait_replicas_timeout != null && message.hasOwnProperty("wait_replicas_timeout")) - object.wait_replicas_timeout = $root.vttime.Duration.toObject(message.wait_replicas_timeout, options); - if (message.prevent_cross_cell_promotion != null && message.hasOwnProperty("prevent_cross_cell_promotion")) - object.prevent_cross_cell_promotion = message.prevent_cross_cell_promotion; + if (options.defaults) + object.tablet_alias = null; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) + object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); return object; }; /** - * Converts this EmergencyReparentShardRequest to JSON. + * Converts this GetTabletRequest to JSON. * @function toJSON - * @memberof vtctldata.EmergencyReparentShardRequest + * @memberof vtctldata.GetTabletRequest * @instance * @returns {Object.} JSON object */ - EmergencyReparentShardRequest.prototype.toJSON = function toJSON() { + GetTabletRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for EmergencyReparentShardRequest + * Gets the default type url for GetTabletRequest * @function getTypeUrl - * @memberof vtctldata.EmergencyReparentShardRequest + * @memberof vtctldata.GetTabletRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - EmergencyReparentShardRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetTabletRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.EmergencyReparentShardRequest"; + return typeUrlPrefix + "/vtctldata.GetTabletRequest"; }; - return EmergencyReparentShardRequest; + return GetTabletRequest; })(); - vtctldata.EmergencyReparentShardResponse = (function() { + vtctldata.GetTabletResponse = (function() { /** - * Properties of an EmergencyReparentShardResponse. + * Properties of a GetTabletResponse. * @memberof vtctldata - * @interface IEmergencyReparentShardResponse - * @property {string|null} [keyspace] EmergencyReparentShardResponse keyspace - * @property {string|null} [shard] EmergencyReparentShardResponse shard - * @property {topodata.ITabletAlias|null} [promoted_primary] EmergencyReparentShardResponse promoted_primary - * @property {Array.|null} [events] EmergencyReparentShardResponse events + * @interface IGetTabletResponse + * @property {topodata.ITablet|null} [tablet] GetTabletResponse tablet */ /** - * Constructs a new EmergencyReparentShardResponse. + * Constructs a new GetTabletResponse. * @memberof vtctldata - * @classdesc Represents an EmergencyReparentShardResponse. - * @implements IEmergencyReparentShardResponse + * @classdesc Represents a GetTabletResponse. + * @implements IGetTabletResponse * @constructor - * @param {vtctldata.IEmergencyReparentShardResponse=} [properties] Properties to set + * @param {vtctldata.IGetTabletResponse=} [properties] Properties to set */ - function EmergencyReparentShardResponse(properties) { - this.events = []; + function GetTabletResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -109555,120 +130119,75 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * EmergencyReparentShardResponse keyspace. - * @member {string} keyspace - * @memberof vtctldata.EmergencyReparentShardResponse - * @instance - */ - EmergencyReparentShardResponse.prototype.keyspace = ""; - - /** - * EmergencyReparentShardResponse shard. - * @member {string} shard - * @memberof vtctldata.EmergencyReparentShardResponse - * @instance - */ - EmergencyReparentShardResponse.prototype.shard = ""; - - /** - * EmergencyReparentShardResponse promoted_primary. - * @member {topodata.ITabletAlias|null|undefined} promoted_primary - * @memberof vtctldata.EmergencyReparentShardResponse - * @instance - */ - EmergencyReparentShardResponse.prototype.promoted_primary = null; - - /** - * EmergencyReparentShardResponse events. - * @member {Array.} events - * @memberof vtctldata.EmergencyReparentShardResponse + * GetTabletResponse tablet. + * @member {topodata.ITablet|null|undefined} tablet + * @memberof vtctldata.GetTabletResponse * @instance */ - EmergencyReparentShardResponse.prototype.events = $util.emptyArray; + GetTabletResponse.prototype.tablet = null; /** - * Creates a new EmergencyReparentShardResponse instance using the specified properties. + * Creates a new GetTabletResponse instance using the specified properties. * @function create - * @memberof vtctldata.EmergencyReparentShardResponse + * @memberof vtctldata.GetTabletResponse * @static - * @param {vtctldata.IEmergencyReparentShardResponse=} [properties] Properties to set - * @returns {vtctldata.EmergencyReparentShardResponse} EmergencyReparentShardResponse instance + * @param {vtctldata.IGetTabletResponse=} [properties] Properties to set + * @returns {vtctldata.GetTabletResponse} GetTabletResponse instance */ - EmergencyReparentShardResponse.create = function create(properties) { - return new EmergencyReparentShardResponse(properties); + GetTabletResponse.create = function create(properties) { + return new GetTabletResponse(properties); }; /** - * Encodes the specified EmergencyReparentShardResponse message. Does not implicitly {@link vtctldata.EmergencyReparentShardResponse.verify|verify} messages. + * Encodes the specified GetTabletResponse message. Does not implicitly {@link vtctldata.GetTabletResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.EmergencyReparentShardResponse + * @memberof vtctldata.GetTabletResponse * @static - * @param {vtctldata.IEmergencyReparentShardResponse} message EmergencyReparentShardResponse message or plain object to encode + * @param {vtctldata.IGetTabletResponse} message GetTabletResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - EmergencyReparentShardResponse.encode = function encode(message, writer) { + GetTabletResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); - if (message.promoted_primary != null && Object.hasOwnProperty.call(message, "promoted_primary")) - $root.topodata.TabletAlias.encode(message.promoted_primary, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.events != null && message.events.length) - for (let i = 0; i < message.events.length; ++i) - $root.logutil.Event.encode(message.events[i], writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.tablet != null && Object.hasOwnProperty.call(message, "tablet")) + $root.topodata.Tablet.encode(message.tablet, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified EmergencyReparentShardResponse message, length delimited. Does not implicitly {@link vtctldata.EmergencyReparentShardResponse.verify|verify} messages. + * Encodes the specified GetTabletResponse message, length delimited. Does not implicitly {@link vtctldata.GetTabletResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.EmergencyReparentShardResponse + * @memberof vtctldata.GetTabletResponse * @static - * @param {vtctldata.IEmergencyReparentShardResponse} message EmergencyReparentShardResponse message or plain object to encode + * @param {vtctldata.IGetTabletResponse} message GetTabletResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - EmergencyReparentShardResponse.encodeDelimited = function encodeDelimited(message, writer) { + GetTabletResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an EmergencyReparentShardResponse message from the specified reader or buffer. + * Decodes a GetTabletResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.EmergencyReparentShardResponse + * @memberof vtctldata.GetTabletResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.EmergencyReparentShardResponse} EmergencyReparentShardResponse + * @returns {vtctldata.GetTabletResponse} GetTabletResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - EmergencyReparentShardResponse.decode = function decode(reader, length) { + GetTabletResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.EmergencyReparentShardResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetTabletResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.keyspace = reader.string(); - break; - } - case 2: { - message.shard = reader.string(); - break; - } - case 3: { - message.promoted_primary = $root.topodata.TabletAlias.decode(reader, reader.uint32()); - break; - } - case 4: { - if (!(message.events && message.events.length)) - message.events = []; - message.events.push($root.logutil.Event.decode(reader, reader.uint32())); + message.tablet = $root.topodata.Tablet.decode(reader, reader.uint32()); break; } default: @@ -109680,173 +130199,134 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes an EmergencyReparentShardResponse message from the specified reader or buffer, length delimited. + * Decodes a GetTabletResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.EmergencyReparentShardResponse + * @memberof vtctldata.GetTabletResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.EmergencyReparentShardResponse} EmergencyReparentShardResponse + * @returns {vtctldata.GetTabletResponse} GetTabletResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - EmergencyReparentShardResponse.decodeDelimited = function decodeDelimited(reader) { + GetTabletResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an EmergencyReparentShardResponse message. + * Verifies a GetTabletResponse message. * @function verify - * @memberof vtctldata.EmergencyReparentShardResponse + * @memberof vtctldata.GetTabletResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - EmergencyReparentShardResponse.verify = function verify(message) { + GetTabletResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - if (message.shard != null && message.hasOwnProperty("shard")) - if (!$util.isString(message.shard)) - return "shard: string expected"; - if (message.promoted_primary != null && message.hasOwnProperty("promoted_primary")) { - let error = $root.topodata.TabletAlias.verify(message.promoted_primary); + if (message.tablet != null && message.hasOwnProperty("tablet")) { + let error = $root.topodata.Tablet.verify(message.tablet); if (error) - return "promoted_primary." + error; - } - if (message.events != null && message.hasOwnProperty("events")) { - if (!Array.isArray(message.events)) - return "events: array expected"; - for (let i = 0; i < message.events.length; ++i) { - let error = $root.logutil.Event.verify(message.events[i]); - if (error) - return "events." + error; - } + return "tablet." + error; } return null; }; /** - * Creates an EmergencyReparentShardResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetTabletResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.EmergencyReparentShardResponse + * @memberof vtctldata.GetTabletResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.EmergencyReparentShardResponse} EmergencyReparentShardResponse + * @returns {vtctldata.GetTabletResponse} GetTabletResponse */ - EmergencyReparentShardResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.EmergencyReparentShardResponse) + GetTabletResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetTabletResponse) return object; - let message = new $root.vtctldata.EmergencyReparentShardResponse(); - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - if (object.shard != null) - message.shard = String(object.shard); - if (object.promoted_primary != null) { - if (typeof object.promoted_primary !== "object") - throw TypeError(".vtctldata.EmergencyReparentShardResponse.promoted_primary: object expected"); - message.promoted_primary = $root.topodata.TabletAlias.fromObject(object.promoted_primary); - } - if (object.events) { - if (!Array.isArray(object.events)) - throw TypeError(".vtctldata.EmergencyReparentShardResponse.events: array expected"); - message.events = []; - for (let i = 0; i < object.events.length; ++i) { - if (typeof object.events[i] !== "object") - throw TypeError(".vtctldata.EmergencyReparentShardResponse.events: object expected"); - message.events[i] = $root.logutil.Event.fromObject(object.events[i]); - } + let message = new $root.vtctldata.GetTabletResponse(); + if (object.tablet != null) { + if (typeof object.tablet !== "object") + throw TypeError(".vtctldata.GetTabletResponse.tablet: object expected"); + message.tablet = $root.topodata.Tablet.fromObject(object.tablet); } return message; }; /** - * Creates a plain object from an EmergencyReparentShardResponse message. Also converts values to other types if specified. + * Creates a plain object from a GetTabletResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.EmergencyReparentShardResponse + * @memberof vtctldata.GetTabletResponse * @static - * @param {vtctldata.EmergencyReparentShardResponse} message EmergencyReparentShardResponse + * @param {vtctldata.GetTabletResponse} message GetTabletResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - EmergencyReparentShardResponse.toObject = function toObject(message, options) { + GetTabletResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.events = []; - if (options.defaults) { - object.keyspace = ""; - object.shard = ""; - object.promoted_primary = null; - } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.shard != null && message.hasOwnProperty("shard")) - object.shard = message.shard; - if (message.promoted_primary != null && message.hasOwnProperty("promoted_primary")) - object.promoted_primary = $root.topodata.TabletAlias.toObject(message.promoted_primary, options); - if (message.events && message.events.length) { - object.events = []; - for (let j = 0; j < message.events.length; ++j) - object.events[j] = $root.logutil.Event.toObject(message.events[j], options); - } + if (options.defaults) + object.tablet = null; + if (message.tablet != null && message.hasOwnProperty("tablet")) + object.tablet = $root.topodata.Tablet.toObject(message.tablet, options); return object; }; /** - * Converts this EmergencyReparentShardResponse to JSON. + * Converts this GetTabletResponse to JSON. * @function toJSON - * @memberof vtctldata.EmergencyReparentShardResponse + * @memberof vtctldata.GetTabletResponse * @instance * @returns {Object.} JSON object */ - EmergencyReparentShardResponse.prototype.toJSON = function toJSON() { + GetTabletResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for EmergencyReparentShardResponse + * Gets the default type url for GetTabletResponse * @function getTypeUrl - * @memberof vtctldata.EmergencyReparentShardResponse + * @memberof vtctldata.GetTabletResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - EmergencyReparentShardResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetTabletResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.EmergencyReparentShardResponse"; + return typeUrlPrefix + "/vtctldata.GetTabletResponse"; }; - return EmergencyReparentShardResponse; + return GetTabletResponse; })(); - vtctldata.ExecuteFetchAsAppRequest = (function() { + vtctldata.GetTabletsRequest = (function() { /** - * Properties of an ExecuteFetchAsAppRequest. + * Properties of a GetTabletsRequest. * @memberof vtctldata - * @interface IExecuteFetchAsAppRequest - * @property {topodata.ITabletAlias|null} [tablet_alias] ExecuteFetchAsAppRequest tablet_alias - * @property {string|null} [query] ExecuteFetchAsAppRequest query - * @property {number|Long|null} [max_rows] ExecuteFetchAsAppRequest max_rows - * @property {boolean|null} [use_pool] ExecuteFetchAsAppRequest use_pool + * @interface IGetTabletsRequest + * @property {string|null} [keyspace] GetTabletsRequest keyspace + * @property {string|null} [shard] GetTabletsRequest shard + * @property {Array.|null} [cells] GetTabletsRequest cells + * @property {boolean|null} [strict] GetTabletsRequest strict + * @property {Array.|null} [tablet_aliases] GetTabletsRequest tablet_aliases + * @property {topodata.TabletType|null} [tablet_type] GetTabletsRequest tablet_type */ /** - * Constructs a new ExecuteFetchAsAppRequest. + * Constructs a new GetTabletsRequest. * @memberof vtctldata - * @classdesc Represents an ExecuteFetchAsAppRequest. - * @implements IExecuteFetchAsAppRequest + * @classdesc Represents a GetTabletsRequest. + * @implements IGetTabletsRequest * @constructor - * @param {vtctldata.IExecuteFetchAsAppRequest=} [properties] Properties to set + * @param {vtctldata.IGetTabletsRequest=} [properties] Properties to set */ - function ExecuteFetchAsAppRequest(properties) { + function GetTabletsRequest(properties) { + this.cells = []; + this.tablet_aliases = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -109854,117 +130334,151 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ExecuteFetchAsAppRequest tablet_alias. - * @member {topodata.ITabletAlias|null|undefined} tablet_alias - * @memberof vtctldata.ExecuteFetchAsAppRequest + * GetTabletsRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.GetTabletsRequest * @instance */ - ExecuteFetchAsAppRequest.prototype.tablet_alias = null; + GetTabletsRequest.prototype.keyspace = ""; /** - * ExecuteFetchAsAppRequest query. - * @member {string} query - * @memberof vtctldata.ExecuteFetchAsAppRequest + * GetTabletsRequest shard. + * @member {string} shard + * @memberof vtctldata.GetTabletsRequest * @instance */ - ExecuteFetchAsAppRequest.prototype.query = ""; + GetTabletsRequest.prototype.shard = ""; /** - * ExecuteFetchAsAppRequest max_rows. - * @member {number|Long} max_rows - * @memberof vtctldata.ExecuteFetchAsAppRequest + * GetTabletsRequest cells. + * @member {Array.} cells + * @memberof vtctldata.GetTabletsRequest * @instance */ - ExecuteFetchAsAppRequest.prototype.max_rows = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + GetTabletsRequest.prototype.cells = $util.emptyArray; /** - * ExecuteFetchAsAppRequest use_pool. - * @member {boolean} use_pool - * @memberof vtctldata.ExecuteFetchAsAppRequest + * GetTabletsRequest strict. + * @member {boolean} strict + * @memberof vtctldata.GetTabletsRequest * @instance */ - ExecuteFetchAsAppRequest.prototype.use_pool = false; + GetTabletsRequest.prototype.strict = false; /** - * Creates a new ExecuteFetchAsAppRequest instance using the specified properties. + * GetTabletsRequest tablet_aliases. + * @member {Array.} tablet_aliases + * @memberof vtctldata.GetTabletsRequest + * @instance + */ + GetTabletsRequest.prototype.tablet_aliases = $util.emptyArray; + + /** + * GetTabletsRequest tablet_type. + * @member {topodata.TabletType} tablet_type + * @memberof vtctldata.GetTabletsRequest + * @instance + */ + GetTabletsRequest.prototype.tablet_type = 0; + + /** + * Creates a new GetTabletsRequest instance using the specified properties. * @function create - * @memberof vtctldata.ExecuteFetchAsAppRequest + * @memberof vtctldata.GetTabletsRequest * @static - * @param {vtctldata.IExecuteFetchAsAppRequest=} [properties] Properties to set - * @returns {vtctldata.ExecuteFetchAsAppRequest} ExecuteFetchAsAppRequest instance + * @param {vtctldata.IGetTabletsRequest=} [properties] Properties to set + * @returns {vtctldata.GetTabletsRequest} GetTabletsRequest instance */ - ExecuteFetchAsAppRequest.create = function create(properties) { - return new ExecuteFetchAsAppRequest(properties); + GetTabletsRequest.create = function create(properties) { + return new GetTabletsRequest(properties); }; /** - * Encodes the specified ExecuteFetchAsAppRequest message. Does not implicitly {@link vtctldata.ExecuteFetchAsAppRequest.verify|verify} messages. + * Encodes the specified GetTabletsRequest message. Does not implicitly {@link vtctldata.GetTabletsRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.ExecuteFetchAsAppRequest + * @memberof vtctldata.GetTabletsRequest * @static - * @param {vtctldata.IExecuteFetchAsAppRequest} message ExecuteFetchAsAppRequest message or plain object to encode + * @param {vtctldata.IGetTabletsRequest} message GetTabletsRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ExecuteFetchAsAppRequest.encode = function encode(message, writer) { + GetTabletsRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) - $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.query != null && Object.hasOwnProperty.call(message, "query")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.query); - if (message.max_rows != null && Object.hasOwnProperty.call(message, "max_rows")) - writer.uint32(/* id 3, wireType 0 =*/24).int64(message.max_rows); - if (message.use_pool != null && Object.hasOwnProperty.call(message, "use_pool")) - writer.uint32(/* id 4, wireType 0 =*/32).bool(message.use_pool); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); + if (message.cells != null && message.cells.length) + for (let i = 0; i < message.cells.length; ++i) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.cells[i]); + if (message.strict != null && Object.hasOwnProperty.call(message, "strict")) + writer.uint32(/* id 4, wireType 0 =*/32).bool(message.strict); + if (message.tablet_aliases != null && message.tablet_aliases.length) + for (let i = 0; i < message.tablet_aliases.length; ++i) + $root.topodata.TabletAlias.encode(message.tablet_aliases[i], writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); + if (message.tablet_type != null && Object.hasOwnProperty.call(message, "tablet_type")) + writer.uint32(/* id 6, wireType 0 =*/48).int32(message.tablet_type); return writer; }; /** - * Encodes the specified ExecuteFetchAsAppRequest message, length delimited. Does not implicitly {@link vtctldata.ExecuteFetchAsAppRequest.verify|verify} messages. + * Encodes the specified GetTabletsRequest message, length delimited. Does not implicitly {@link vtctldata.GetTabletsRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ExecuteFetchAsAppRequest + * @memberof vtctldata.GetTabletsRequest * @static - * @param {vtctldata.IExecuteFetchAsAppRequest} message ExecuteFetchAsAppRequest message or plain object to encode + * @param {vtctldata.IGetTabletsRequest} message GetTabletsRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ExecuteFetchAsAppRequest.encodeDelimited = function encodeDelimited(message, writer) { + GetTabletsRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an ExecuteFetchAsAppRequest message from the specified reader or buffer. + * Decodes a GetTabletsRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ExecuteFetchAsAppRequest + * @memberof vtctldata.GetTabletsRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ExecuteFetchAsAppRequest} ExecuteFetchAsAppRequest + * @returns {vtctldata.GetTabletsRequest} GetTabletsRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ExecuteFetchAsAppRequest.decode = function decode(reader, length) { + GetTabletsRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ExecuteFetchAsAppRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetTabletsRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + message.keyspace = reader.string(); break; } case 2: { - message.query = reader.string(); + message.shard = reader.string(); break; } case 3: { - message.max_rows = reader.int64(); + if (!(message.cells && message.cells.length)) + message.cells = []; + message.cells.push(reader.string()); break; } case 4: { - message.use_pool = reader.bool(); + message.strict = reader.bool(); + break; + } + case 5: { + if (!(message.tablet_aliases && message.tablet_aliases.length)) + message.tablet_aliases = []; + message.tablet_aliases.push($root.topodata.TabletAlias.decode(reader, reader.uint32())); + break; + } + case 6: { + message.tablet_type = reader.int32(); break; } default: @@ -109976,166 +130490,259 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes an ExecuteFetchAsAppRequest message from the specified reader or buffer, length delimited. + * Decodes a GetTabletsRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ExecuteFetchAsAppRequest + * @memberof vtctldata.GetTabletsRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ExecuteFetchAsAppRequest} ExecuteFetchAsAppRequest + * @returns {vtctldata.GetTabletsRequest} GetTabletsRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ExecuteFetchAsAppRequest.decodeDelimited = function decodeDelimited(reader) { + GetTabletsRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an ExecuteFetchAsAppRequest message. + * Verifies a GetTabletsRequest message. * @function verify - * @memberof vtctldata.ExecuteFetchAsAppRequest + * @memberof vtctldata.GetTabletsRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ExecuteFetchAsAppRequest.verify = function verify(message) { + GetTabletsRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { - let error = $root.topodata.TabletAlias.verify(message.tablet_alias); - if (error) - return "tablet_alias." + error; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.shard != null && message.hasOwnProperty("shard")) + if (!$util.isString(message.shard)) + return "shard: string expected"; + if (message.cells != null && message.hasOwnProperty("cells")) { + if (!Array.isArray(message.cells)) + return "cells: array expected"; + for (let i = 0; i < message.cells.length; ++i) + if (!$util.isString(message.cells[i])) + return "cells: string[] expected"; } - if (message.query != null && message.hasOwnProperty("query")) - if (!$util.isString(message.query)) - return "query: string expected"; - if (message.max_rows != null && message.hasOwnProperty("max_rows")) - if (!$util.isInteger(message.max_rows) && !(message.max_rows && $util.isInteger(message.max_rows.low) && $util.isInteger(message.max_rows.high))) - return "max_rows: integer|Long expected"; - if (message.use_pool != null && message.hasOwnProperty("use_pool")) - if (typeof message.use_pool !== "boolean") - return "use_pool: boolean expected"; + if (message.strict != null && message.hasOwnProperty("strict")) + if (typeof message.strict !== "boolean") + return "strict: boolean expected"; + if (message.tablet_aliases != null && message.hasOwnProperty("tablet_aliases")) { + if (!Array.isArray(message.tablet_aliases)) + return "tablet_aliases: array expected"; + for (let i = 0; i < message.tablet_aliases.length; ++i) { + let error = $root.topodata.TabletAlias.verify(message.tablet_aliases[i]); + if (error) + return "tablet_aliases." + error; + } + } + if (message.tablet_type != null && message.hasOwnProperty("tablet_type")) + switch (message.tablet_type) { + default: + return "tablet_type: enum value expected"; + case 0: + case 1: + case 1: + case 2: + case 3: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + break; + } return null; }; /** - * Creates an ExecuteFetchAsAppRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetTabletsRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ExecuteFetchAsAppRequest + * @memberof vtctldata.GetTabletsRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.ExecuteFetchAsAppRequest} ExecuteFetchAsAppRequest + * @returns {vtctldata.GetTabletsRequest} GetTabletsRequest */ - ExecuteFetchAsAppRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ExecuteFetchAsAppRequest) + GetTabletsRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetTabletsRequest) return object; - let message = new $root.vtctldata.ExecuteFetchAsAppRequest(); - if (object.tablet_alias != null) { - if (typeof object.tablet_alias !== "object") - throw TypeError(".vtctldata.ExecuteFetchAsAppRequest.tablet_alias: object expected"); - message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); + let message = new $root.vtctldata.GetTabletsRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.shard != null) + message.shard = String(object.shard); + if (object.cells) { + if (!Array.isArray(object.cells)) + throw TypeError(".vtctldata.GetTabletsRequest.cells: array expected"); + message.cells = []; + for (let i = 0; i < object.cells.length; ++i) + message.cells[i] = String(object.cells[i]); + } + if (object.strict != null) + message.strict = Boolean(object.strict); + if (object.tablet_aliases) { + if (!Array.isArray(object.tablet_aliases)) + throw TypeError(".vtctldata.GetTabletsRequest.tablet_aliases: array expected"); + message.tablet_aliases = []; + for (let i = 0; i < object.tablet_aliases.length; ++i) { + if (typeof object.tablet_aliases[i] !== "object") + throw TypeError(".vtctldata.GetTabletsRequest.tablet_aliases: object expected"); + message.tablet_aliases[i] = $root.topodata.TabletAlias.fromObject(object.tablet_aliases[i]); + } + } + switch (object.tablet_type) { + default: + if (typeof object.tablet_type === "number") { + message.tablet_type = object.tablet_type; + break; + } + break; + case "UNKNOWN": + case 0: + message.tablet_type = 0; + break; + case "PRIMARY": + case 1: + message.tablet_type = 1; + break; + case "MASTER": + case 1: + message.tablet_type = 1; + break; + case "REPLICA": + case 2: + message.tablet_type = 2; + break; + case "RDONLY": + case 3: + message.tablet_type = 3; + break; + case "BATCH": + case 3: + message.tablet_type = 3; + break; + case "SPARE": + case 4: + message.tablet_type = 4; + break; + case "EXPERIMENTAL": + case 5: + message.tablet_type = 5; + break; + case "BACKUP": + case 6: + message.tablet_type = 6; + break; + case "RESTORE": + case 7: + message.tablet_type = 7; + break; + case "DRAINED": + case 8: + message.tablet_type = 8; + break; } - if (object.query != null) - message.query = String(object.query); - if (object.max_rows != null) - if ($util.Long) - (message.max_rows = $util.Long.fromValue(object.max_rows)).unsigned = false; - else if (typeof object.max_rows === "string") - message.max_rows = parseInt(object.max_rows, 10); - else if (typeof object.max_rows === "number") - message.max_rows = object.max_rows; - else if (typeof object.max_rows === "object") - message.max_rows = new $util.LongBits(object.max_rows.low >>> 0, object.max_rows.high >>> 0).toNumber(); - if (object.use_pool != null) - message.use_pool = Boolean(object.use_pool); return message; }; /** - * Creates a plain object from an ExecuteFetchAsAppRequest message. Also converts values to other types if specified. + * Creates a plain object from a GetTabletsRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ExecuteFetchAsAppRequest + * @memberof vtctldata.GetTabletsRequest * @static - * @param {vtctldata.ExecuteFetchAsAppRequest} message ExecuteFetchAsAppRequest + * @param {vtctldata.GetTabletsRequest} message GetTabletsRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ExecuteFetchAsAppRequest.toObject = function toObject(message, options) { + GetTabletsRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; + if (options.arrays || options.defaults) { + object.cells = []; + object.tablet_aliases = []; + } if (options.defaults) { - object.tablet_alias = null; - object.query = ""; - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.max_rows = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.max_rows = options.longs === String ? "0" : 0; - object.use_pool = false; + object.keyspace = ""; + object.shard = ""; + object.strict = false; + object.tablet_type = options.enums === String ? "UNKNOWN" : 0; } - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) - object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); - if (message.query != null && message.hasOwnProperty("query")) - object.query = message.query; - if (message.max_rows != null && message.hasOwnProperty("max_rows")) - if (typeof message.max_rows === "number") - object.max_rows = options.longs === String ? String(message.max_rows) : message.max_rows; - else - object.max_rows = options.longs === String ? $util.Long.prototype.toString.call(message.max_rows) : options.longs === Number ? new $util.LongBits(message.max_rows.low >>> 0, message.max_rows.high >>> 0).toNumber() : message.max_rows; - if (message.use_pool != null && message.hasOwnProperty("use_pool")) - object.use_pool = message.use_pool; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = message.shard; + if (message.cells && message.cells.length) { + object.cells = []; + for (let j = 0; j < message.cells.length; ++j) + object.cells[j] = message.cells[j]; + } + if (message.strict != null && message.hasOwnProperty("strict")) + object.strict = message.strict; + if (message.tablet_aliases && message.tablet_aliases.length) { + object.tablet_aliases = []; + for (let j = 0; j < message.tablet_aliases.length; ++j) + object.tablet_aliases[j] = $root.topodata.TabletAlias.toObject(message.tablet_aliases[j], options); + } + if (message.tablet_type != null && message.hasOwnProperty("tablet_type")) + object.tablet_type = options.enums === String ? $root.topodata.TabletType[message.tablet_type] === undefined ? message.tablet_type : $root.topodata.TabletType[message.tablet_type] : message.tablet_type; return object; }; /** - * Converts this ExecuteFetchAsAppRequest to JSON. + * Converts this GetTabletsRequest to JSON. * @function toJSON - * @memberof vtctldata.ExecuteFetchAsAppRequest + * @memberof vtctldata.GetTabletsRequest * @instance * @returns {Object.} JSON object */ - ExecuteFetchAsAppRequest.prototype.toJSON = function toJSON() { + GetTabletsRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ExecuteFetchAsAppRequest + * Gets the default type url for GetTabletsRequest * @function getTypeUrl - * @memberof vtctldata.ExecuteFetchAsAppRequest + * @memberof vtctldata.GetTabletsRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ExecuteFetchAsAppRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetTabletsRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ExecuteFetchAsAppRequest"; + return typeUrlPrefix + "/vtctldata.GetTabletsRequest"; }; - return ExecuteFetchAsAppRequest; + return GetTabletsRequest; })(); - vtctldata.ExecuteFetchAsAppResponse = (function() { + vtctldata.GetTabletsResponse = (function() { /** - * Properties of an ExecuteFetchAsAppResponse. + * Properties of a GetTabletsResponse. * @memberof vtctldata - * @interface IExecuteFetchAsAppResponse - * @property {query.IQueryResult|null} [result] ExecuteFetchAsAppResponse result + * @interface IGetTabletsResponse + * @property {Array.|null} [tablets] GetTabletsResponse tablets */ /** - * Constructs a new ExecuteFetchAsAppResponse. + * Constructs a new GetTabletsResponse. * @memberof vtctldata - * @classdesc Represents an ExecuteFetchAsAppResponse. - * @implements IExecuteFetchAsAppResponse + * @classdesc Represents a GetTabletsResponse. + * @implements IGetTabletsResponse * @constructor - * @param {vtctldata.IExecuteFetchAsAppResponse=} [properties] Properties to set + * @param {vtctldata.IGetTabletsResponse=} [properties] Properties to set */ - function ExecuteFetchAsAppResponse(properties) { + function GetTabletsResponse(properties) { + this.tablets = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -110143,75 +130750,78 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ExecuteFetchAsAppResponse result. - * @member {query.IQueryResult|null|undefined} result - * @memberof vtctldata.ExecuteFetchAsAppResponse + * GetTabletsResponse tablets. + * @member {Array.} tablets + * @memberof vtctldata.GetTabletsResponse * @instance */ - ExecuteFetchAsAppResponse.prototype.result = null; + GetTabletsResponse.prototype.tablets = $util.emptyArray; /** - * Creates a new ExecuteFetchAsAppResponse instance using the specified properties. + * Creates a new GetTabletsResponse instance using the specified properties. * @function create - * @memberof vtctldata.ExecuteFetchAsAppResponse + * @memberof vtctldata.GetTabletsResponse * @static - * @param {vtctldata.IExecuteFetchAsAppResponse=} [properties] Properties to set - * @returns {vtctldata.ExecuteFetchAsAppResponse} ExecuteFetchAsAppResponse instance + * @param {vtctldata.IGetTabletsResponse=} [properties] Properties to set + * @returns {vtctldata.GetTabletsResponse} GetTabletsResponse instance */ - ExecuteFetchAsAppResponse.create = function create(properties) { - return new ExecuteFetchAsAppResponse(properties); + GetTabletsResponse.create = function create(properties) { + return new GetTabletsResponse(properties); }; /** - * Encodes the specified ExecuteFetchAsAppResponse message. Does not implicitly {@link vtctldata.ExecuteFetchAsAppResponse.verify|verify} messages. + * Encodes the specified GetTabletsResponse message. Does not implicitly {@link vtctldata.GetTabletsResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.ExecuteFetchAsAppResponse + * @memberof vtctldata.GetTabletsResponse * @static - * @param {vtctldata.IExecuteFetchAsAppResponse} message ExecuteFetchAsAppResponse message or plain object to encode + * @param {vtctldata.IGetTabletsResponse} message GetTabletsResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ExecuteFetchAsAppResponse.encode = function encode(message, writer) { + GetTabletsResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.result != null && Object.hasOwnProperty.call(message, "result")) - $root.query.QueryResult.encode(message.result, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.tablets != null && message.tablets.length) + for (let i = 0; i < message.tablets.length; ++i) + $root.topodata.Tablet.encode(message.tablets[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified ExecuteFetchAsAppResponse message, length delimited. Does not implicitly {@link vtctldata.ExecuteFetchAsAppResponse.verify|verify} messages. + * Encodes the specified GetTabletsResponse message, length delimited. Does not implicitly {@link vtctldata.GetTabletsResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ExecuteFetchAsAppResponse + * @memberof vtctldata.GetTabletsResponse * @static - * @param {vtctldata.IExecuteFetchAsAppResponse} message ExecuteFetchAsAppResponse message or plain object to encode + * @param {vtctldata.IGetTabletsResponse} message GetTabletsResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ExecuteFetchAsAppResponse.encodeDelimited = function encodeDelimited(message, writer) { + GetTabletsResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an ExecuteFetchAsAppResponse message from the specified reader or buffer. + * Decodes a GetTabletsResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ExecuteFetchAsAppResponse + * @memberof vtctldata.GetTabletsResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ExecuteFetchAsAppResponse} ExecuteFetchAsAppResponse + * @returns {vtctldata.GetTabletsResponse} GetTabletsResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ExecuteFetchAsAppResponse.decode = function decode(reader, length) { + GetTabletsResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ExecuteFetchAsAppResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetTabletsResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.result = $root.query.QueryResult.decode(reader, reader.uint32()); + if (!(message.tablets && message.tablets.length)) + message.tablets = []; + message.tablets.push($root.topodata.Tablet.decode(reader, reader.uint32())); break; } default: @@ -110223,131 +130833,139 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes an ExecuteFetchAsAppResponse message from the specified reader or buffer, length delimited. + * Decodes a GetTabletsResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ExecuteFetchAsAppResponse + * @memberof vtctldata.GetTabletsResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ExecuteFetchAsAppResponse} ExecuteFetchAsAppResponse + * @returns {vtctldata.GetTabletsResponse} GetTabletsResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ExecuteFetchAsAppResponse.decodeDelimited = function decodeDelimited(reader) { + GetTabletsResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an ExecuteFetchAsAppResponse message. + * Verifies a GetTabletsResponse message. * @function verify - * @memberof vtctldata.ExecuteFetchAsAppResponse + * @memberof vtctldata.GetTabletsResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ExecuteFetchAsAppResponse.verify = function verify(message) { + GetTabletsResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.result != null && message.hasOwnProperty("result")) { - let error = $root.query.QueryResult.verify(message.result); - if (error) - return "result." + error; + if (message.tablets != null && message.hasOwnProperty("tablets")) { + if (!Array.isArray(message.tablets)) + return "tablets: array expected"; + for (let i = 0; i < message.tablets.length; ++i) { + let error = $root.topodata.Tablet.verify(message.tablets[i]); + if (error) + return "tablets." + error; + } } return null; }; /** - * Creates an ExecuteFetchAsAppResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetTabletsResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ExecuteFetchAsAppResponse + * @memberof vtctldata.GetTabletsResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.ExecuteFetchAsAppResponse} ExecuteFetchAsAppResponse + * @returns {vtctldata.GetTabletsResponse} GetTabletsResponse */ - ExecuteFetchAsAppResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ExecuteFetchAsAppResponse) + GetTabletsResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetTabletsResponse) return object; - let message = new $root.vtctldata.ExecuteFetchAsAppResponse(); - if (object.result != null) { - if (typeof object.result !== "object") - throw TypeError(".vtctldata.ExecuteFetchAsAppResponse.result: object expected"); - message.result = $root.query.QueryResult.fromObject(object.result); + let message = new $root.vtctldata.GetTabletsResponse(); + if (object.tablets) { + if (!Array.isArray(object.tablets)) + throw TypeError(".vtctldata.GetTabletsResponse.tablets: array expected"); + message.tablets = []; + for (let i = 0; i < object.tablets.length; ++i) { + if (typeof object.tablets[i] !== "object") + throw TypeError(".vtctldata.GetTabletsResponse.tablets: object expected"); + message.tablets[i] = $root.topodata.Tablet.fromObject(object.tablets[i]); + } } return message; }; /** - * Creates a plain object from an ExecuteFetchAsAppResponse message. Also converts values to other types if specified. + * Creates a plain object from a GetTabletsResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ExecuteFetchAsAppResponse + * @memberof vtctldata.GetTabletsResponse * @static - * @param {vtctldata.ExecuteFetchAsAppResponse} message ExecuteFetchAsAppResponse + * @param {vtctldata.GetTabletsResponse} message GetTabletsResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ExecuteFetchAsAppResponse.toObject = function toObject(message, options) { + GetTabletsResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) - object.result = null; - if (message.result != null && message.hasOwnProperty("result")) - object.result = $root.query.QueryResult.toObject(message.result, options); + if (options.arrays || options.defaults) + object.tablets = []; + if (message.tablets && message.tablets.length) { + object.tablets = []; + for (let j = 0; j < message.tablets.length; ++j) + object.tablets[j] = $root.topodata.Tablet.toObject(message.tablets[j], options); + } return object; }; /** - * Converts this ExecuteFetchAsAppResponse to JSON. + * Converts this GetTabletsResponse to JSON. * @function toJSON - * @memberof vtctldata.ExecuteFetchAsAppResponse + * @memberof vtctldata.GetTabletsResponse * @instance * @returns {Object.} JSON object */ - ExecuteFetchAsAppResponse.prototype.toJSON = function toJSON() { + GetTabletsResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ExecuteFetchAsAppResponse + * Gets the default type url for GetTabletsResponse * @function getTypeUrl - * @memberof vtctldata.ExecuteFetchAsAppResponse + * @memberof vtctldata.GetTabletsResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ExecuteFetchAsAppResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetTabletsResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ExecuteFetchAsAppResponse"; + return typeUrlPrefix + "/vtctldata.GetTabletsResponse"; }; - return ExecuteFetchAsAppResponse; + return GetTabletsResponse; })(); - vtctldata.ExecuteFetchAsDBARequest = (function() { + vtctldata.GetTopologyPathRequest = (function() { /** - * Properties of an ExecuteFetchAsDBARequest. + * Properties of a GetTopologyPathRequest. * @memberof vtctldata - * @interface IExecuteFetchAsDBARequest - * @property {topodata.ITabletAlias|null} [tablet_alias] ExecuteFetchAsDBARequest tablet_alias - * @property {string|null} [query] ExecuteFetchAsDBARequest query - * @property {number|Long|null} [max_rows] ExecuteFetchAsDBARequest max_rows - * @property {boolean|null} [disable_binlogs] ExecuteFetchAsDBARequest disable_binlogs - * @property {boolean|null} [reload_schema] ExecuteFetchAsDBARequest reload_schema + * @interface IGetTopologyPathRequest + * @property {string|null} [path] GetTopologyPathRequest path */ /** - * Constructs a new ExecuteFetchAsDBARequest. + * Constructs a new GetTopologyPathRequest. * @memberof vtctldata - * @classdesc Represents an ExecuteFetchAsDBARequest. - * @implements IExecuteFetchAsDBARequest + * @classdesc Represents a GetTopologyPathRequest. + * @implements IGetTopologyPathRequest * @constructor - * @param {vtctldata.IExecuteFetchAsDBARequest=} [properties] Properties to set + * @param {vtctldata.IGetTopologyPathRequest=} [properties] Properties to set */ - function ExecuteFetchAsDBARequest(properties) { + function GetTopologyPathRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -110355,131 +130973,75 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ExecuteFetchAsDBARequest tablet_alias. - * @member {topodata.ITabletAlias|null|undefined} tablet_alias - * @memberof vtctldata.ExecuteFetchAsDBARequest - * @instance - */ - ExecuteFetchAsDBARequest.prototype.tablet_alias = null; - - /** - * ExecuteFetchAsDBARequest query. - * @member {string} query - * @memberof vtctldata.ExecuteFetchAsDBARequest - * @instance - */ - ExecuteFetchAsDBARequest.prototype.query = ""; - - /** - * ExecuteFetchAsDBARequest max_rows. - * @member {number|Long} max_rows - * @memberof vtctldata.ExecuteFetchAsDBARequest - * @instance - */ - ExecuteFetchAsDBARequest.prototype.max_rows = $util.Long ? $util.Long.fromBits(0,0,false) : 0; - - /** - * ExecuteFetchAsDBARequest disable_binlogs. - * @member {boolean} disable_binlogs - * @memberof vtctldata.ExecuteFetchAsDBARequest - * @instance - */ - ExecuteFetchAsDBARequest.prototype.disable_binlogs = false; - - /** - * ExecuteFetchAsDBARequest reload_schema. - * @member {boolean} reload_schema - * @memberof vtctldata.ExecuteFetchAsDBARequest + * GetTopologyPathRequest path. + * @member {string} path + * @memberof vtctldata.GetTopologyPathRequest * @instance */ - ExecuteFetchAsDBARequest.prototype.reload_schema = false; + GetTopologyPathRequest.prototype.path = ""; /** - * Creates a new ExecuteFetchAsDBARequest instance using the specified properties. + * Creates a new GetTopologyPathRequest instance using the specified properties. * @function create - * @memberof vtctldata.ExecuteFetchAsDBARequest + * @memberof vtctldata.GetTopologyPathRequest * @static - * @param {vtctldata.IExecuteFetchAsDBARequest=} [properties] Properties to set - * @returns {vtctldata.ExecuteFetchAsDBARequest} ExecuteFetchAsDBARequest instance + * @param {vtctldata.IGetTopologyPathRequest=} [properties] Properties to set + * @returns {vtctldata.GetTopologyPathRequest} GetTopologyPathRequest instance */ - ExecuteFetchAsDBARequest.create = function create(properties) { - return new ExecuteFetchAsDBARequest(properties); + GetTopologyPathRequest.create = function create(properties) { + return new GetTopologyPathRequest(properties); }; /** - * Encodes the specified ExecuteFetchAsDBARequest message. Does not implicitly {@link vtctldata.ExecuteFetchAsDBARequest.verify|verify} messages. + * Encodes the specified GetTopologyPathRequest message. Does not implicitly {@link vtctldata.GetTopologyPathRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.ExecuteFetchAsDBARequest + * @memberof vtctldata.GetTopologyPathRequest * @static - * @param {vtctldata.IExecuteFetchAsDBARequest} message ExecuteFetchAsDBARequest message or plain object to encode + * @param {vtctldata.IGetTopologyPathRequest} message GetTopologyPathRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ExecuteFetchAsDBARequest.encode = function encode(message, writer) { + GetTopologyPathRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) - $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.query != null && Object.hasOwnProperty.call(message, "query")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.query); - if (message.max_rows != null && Object.hasOwnProperty.call(message, "max_rows")) - writer.uint32(/* id 3, wireType 0 =*/24).int64(message.max_rows); - if (message.disable_binlogs != null && Object.hasOwnProperty.call(message, "disable_binlogs")) - writer.uint32(/* id 4, wireType 0 =*/32).bool(message.disable_binlogs); - if (message.reload_schema != null && Object.hasOwnProperty.call(message, "reload_schema")) - writer.uint32(/* id 5, wireType 0 =*/40).bool(message.reload_schema); + if (message.path != null && Object.hasOwnProperty.call(message, "path")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.path); return writer; }; /** - * Encodes the specified ExecuteFetchAsDBARequest message, length delimited. Does not implicitly {@link vtctldata.ExecuteFetchAsDBARequest.verify|verify} messages. + * Encodes the specified GetTopologyPathRequest message, length delimited. Does not implicitly {@link vtctldata.GetTopologyPathRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ExecuteFetchAsDBARequest + * @memberof vtctldata.GetTopologyPathRequest * @static - * @param {vtctldata.IExecuteFetchAsDBARequest} message ExecuteFetchAsDBARequest message or plain object to encode + * @param {vtctldata.IGetTopologyPathRequest} message GetTopologyPathRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ExecuteFetchAsDBARequest.encodeDelimited = function encodeDelimited(message, writer) { + GetTopologyPathRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an ExecuteFetchAsDBARequest message from the specified reader or buffer. + * Decodes a GetTopologyPathRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ExecuteFetchAsDBARequest + * @memberof vtctldata.GetTopologyPathRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ExecuteFetchAsDBARequest} ExecuteFetchAsDBARequest + * @returns {vtctldata.GetTopologyPathRequest} GetTopologyPathRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ExecuteFetchAsDBARequest.decode = function decode(reader, length) { + GetTopologyPathRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ExecuteFetchAsDBARequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetTopologyPathRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); - break; - } - case 2: { - message.query = reader.string(); - break; - } - case 3: { - message.max_rows = reader.int64(); - break; - } - case 4: { - message.disable_binlogs = reader.bool(); - break; - } - case 5: { - message.reload_schema = reader.bool(); + message.path = reader.string(); break; } default: @@ -110491,174 +131053,122 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes an ExecuteFetchAsDBARequest message from the specified reader or buffer, length delimited. + * Decodes a GetTopologyPathRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ExecuteFetchAsDBARequest + * @memberof vtctldata.GetTopologyPathRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ExecuteFetchAsDBARequest} ExecuteFetchAsDBARequest + * @returns {vtctldata.GetTopologyPathRequest} GetTopologyPathRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ExecuteFetchAsDBARequest.decodeDelimited = function decodeDelimited(reader) { + GetTopologyPathRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an ExecuteFetchAsDBARequest message. + * Verifies a GetTopologyPathRequest message. * @function verify - * @memberof vtctldata.ExecuteFetchAsDBARequest + * @memberof vtctldata.GetTopologyPathRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ExecuteFetchAsDBARequest.verify = function verify(message) { + GetTopologyPathRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { - let error = $root.topodata.TabletAlias.verify(message.tablet_alias); - if (error) - return "tablet_alias." + error; - } - if (message.query != null && message.hasOwnProperty("query")) - if (!$util.isString(message.query)) - return "query: string expected"; - if (message.max_rows != null && message.hasOwnProperty("max_rows")) - if (!$util.isInteger(message.max_rows) && !(message.max_rows && $util.isInteger(message.max_rows.low) && $util.isInteger(message.max_rows.high))) - return "max_rows: integer|Long expected"; - if (message.disable_binlogs != null && message.hasOwnProperty("disable_binlogs")) - if (typeof message.disable_binlogs !== "boolean") - return "disable_binlogs: boolean expected"; - if (message.reload_schema != null && message.hasOwnProperty("reload_schema")) - if (typeof message.reload_schema !== "boolean") - return "reload_schema: boolean expected"; + if (message.path != null && message.hasOwnProperty("path")) + if (!$util.isString(message.path)) + return "path: string expected"; return null; }; /** - * Creates an ExecuteFetchAsDBARequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetTopologyPathRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ExecuteFetchAsDBARequest + * @memberof vtctldata.GetTopologyPathRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.ExecuteFetchAsDBARequest} ExecuteFetchAsDBARequest + * @returns {vtctldata.GetTopologyPathRequest} GetTopologyPathRequest */ - ExecuteFetchAsDBARequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ExecuteFetchAsDBARequest) + GetTopologyPathRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetTopologyPathRequest) return object; - let message = new $root.vtctldata.ExecuteFetchAsDBARequest(); - if (object.tablet_alias != null) { - if (typeof object.tablet_alias !== "object") - throw TypeError(".vtctldata.ExecuteFetchAsDBARequest.tablet_alias: object expected"); - message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); - } - if (object.query != null) - message.query = String(object.query); - if (object.max_rows != null) - if ($util.Long) - (message.max_rows = $util.Long.fromValue(object.max_rows)).unsigned = false; - else if (typeof object.max_rows === "string") - message.max_rows = parseInt(object.max_rows, 10); - else if (typeof object.max_rows === "number") - message.max_rows = object.max_rows; - else if (typeof object.max_rows === "object") - message.max_rows = new $util.LongBits(object.max_rows.low >>> 0, object.max_rows.high >>> 0).toNumber(); - if (object.disable_binlogs != null) - message.disable_binlogs = Boolean(object.disable_binlogs); - if (object.reload_schema != null) - message.reload_schema = Boolean(object.reload_schema); + let message = new $root.vtctldata.GetTopologyPathRequest(); + if (object.path != null) + message.path = String(object.path); return message; }; /** - * Creates a plain object from an ExecuteFetchAsDBARequest message. Also converts values to other types if specified. + * Creates a plain object from a GetTopologyPathRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ExecuteFetchAsDBARequest + * @memberof vtctldata.GetTopologyPathRequest * @static - * @param {vtctldata.ExecuteFetchAsDBARequest} message ExecuteFetchAsDBARequest + * @param {vtctldata.GetTopologyPathRequest} message GetTopologyPathRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ExecuteFetchAsDBARequest.toObject = function toObject(message, options) { + GetTopologyPathRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) { - object.tablet_alias = null; - object.query = ""; - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.max_rows = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.max_rows = options.longs === String ? "0" : 0; - object.disable_binlogs = false; - object.reload_schema = false; - } - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) - object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); - if (message.query != null && message.hasOwnProperty("query")) - object.query = message.query; - if (message.max_rows != null && message.hasOwnProperty("max_rows")) - if (typeof message.max_rows === "number") - object.max_rows = options.longs === String ? String(message.max_rows) : message.max_rows; - else - object.max_rows = options.longs === String ? $util.Long.prototype.toString.call(message.max_rows) : options.longs === Number ? new $util.LongBits(message.max_rows.low >>> 0, message.max_rows.high >>> 0).toNumber() : message.max_rows; - if (message.disable_binlogs != null && message.hasOwnProperty("disable_binlogs")) - object.disable_binlogs = message.disable_binlogs; - if (message.reload_schema != null && message.hasOwnProperty("reload_schema")) - object.reload_schema = message.reload_schema; + if (options.defaults) + object.path = ""; + if (message.path != null && message.hasOwnProperty("path")) + object.path = message.path; return object; }; /** - * Converts this ExecuteFetchAsDBARequest to JSON. + * Converts this GetTopologyPathRequest to JSON. * @function toJSON - * @memberof vtctldata.ExecuteFetchAsDBARequest + * @memberof vtctldata.GetTopologyPathRequest * @instance * @returns {Object.} JSON object */ - ExecuteFetchAsDBARequest.prototype.toJSON = function toJSON() { + GetTopologyPathRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ExecuteFetchAsDBARequest + * Gets the default type url for GetTopologyPathRequest * @function getTypeUrl - * @memberof vtctldata.ExecuteFetchAsDBARequest + * @memberof vtctldata.GetTopologyPathRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ExecuteFetchAsDBARequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetTopologyPathRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ExecuteFetchAsDBARequest"; + return typeUrlPrefix + "/vtctldata.GetTopologyPathRequest"; }; - return ExecuteFetchAsDBARequest; + return GetTopologyPathRequest; })(); - vtctldata.ExecuteFetchAsDBAResponse = (function() { + vtctldata.GetTopologyPathResponse = (function() { /** - * Properties of an ExecuteFetchAsDBAResponse. + * Properties of a GetTopologyPathResponse. * @memberof vtctldata - * @interface IExecuteFetchAsDBAResponse - * @property {query.IQueryResult|null} [result] ExecuteFetchAsDBAResponse result + * @interface IGetTopologyPathResponse + * @property {vtctldata.ITopologyCell|null} [cell] GetTopologyPathResponse cell */ /** - * Constructs a new ExecuteFetchAsDBAResponse. + * Constructs a new GetTopologyPathResponse. * @memberof vtctldata - * @classdesc Represents an ExecuteFetchAsDBAResponse. - * @implements IExecuteFetchAsDBAResponse + * @classdesc Represents a GetTopologyPathResponse. + * @implements IGetTopologyPathResponse * @constructor - * @param {vtctldata.IExecuteFetchAsDBAResponse=} [properties] Properties to set + * @param {vtctldata.IGetTopologyPathResponse=} [properties] Properties to set */ - function ExecuteFetchAsDBAResponse(properties) { + function GetTopologyPathResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -110666,75 +131176,75 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ExecuteFetchAsDBAResponse result. - * @member {query.IQueryResult|null|undefined} result - * @memberof vtctldata.ExecuteFetchAsDBAResponse + * GetTopologyPathResponse cell. + * @member {vtctldata.ITopologyCell|null|undefined} cell + * @memberof vtctldata.GetTopologyPathResponse * @instance */ - ExecuteFetchAsDBAResponse.prototype.result = null; + GetTopologyPathResponse.prototype.cell = null; /** - * Creates a new ExecuteFetchAsDBAResponse instance using the specified properties. + * Creates a new GetTopologyPathResponse instance using the specified properties. * @function create - * @memberof vtctldata.ExecuteFetchAsDBAResponse + * @memberof vtctldata.GetTopologyPathResponse * @static - * @param {vtctldata.IExecuteFetchAsDBAResponse=} [properties] Properties to set - * @returns {vtctldata.ExecuteFetchAsDBAResponse} ExecuteFetchAsDBAResponse instance + * @param {vtctldata.IGetTopologyPathResponse=} [properties] Properties to set + * @returns {vtctldata.GetTopologyPathResponse} GetTopologyPathResponse instance */ - ExecuteFetchAsDBAResponse.create = function create(properties) { - return new ExecuteFetchAsDBAResponse(properties); + GetTopologyPathResponse.create = function create(properties) { + return new GetTopologyPathResponse(properties); }; /** - * Encodes the specified ExecuteFetchAsDBAResponse message. Does not implicitly {@link vtctldata.ExecuteFetchAsDBAResponse.verify|verify} messages. + * Encodes the specified GetTopologyPathResponse message. Does not implicitly {@link vtctldata.GetTopologyPathResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.ExecuteFetchAsDBAResponse + * @memberof vtctldata.GetTopologyPathResponse * @static - * @param {vtctldata.IExecuteFetchAsDBAResponse} message ExecuteFetchAsDBAResponse message or plain object to encode + * @param {vtctldata.IGetTopologyPathResponse} message GetTopologyPathResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ExecuteFetchAsDBAResponse.encode = function encode(message, writer) { + GetTopologyPathResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.result != null && Object.hasOwnProperty.call(message, "result")) - $root.query.QueryResult.encode(message.result, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.cell != null && Object.hasOwnProperty.call(message, "cell")) + $root.vtctldata.TopologyCell.encode(message.cell, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified ExecuteFetchAsDBAResponse message, length delimited. Does not implicitly {@link vtctldata.ExecuteFetchAsDBAResponse.verify|verify} messages. + * Encodes the specified GetTopologyPathResponse message, length delimited. Does not implicitly {@link vtctldata.GetTopologyPathResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ExecuteFetchAsDBAResponse + * @memberof vtctldata.GetTopologyPathResponse * @static - * @param {vtctldata.IExecuteFetchAsDBAResponse} message ExecuteFetchAsDBAResponse message or plain object to encode + * @param {vtctldata.IGetTopologyPathResponse} message GetTopologyPathResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ExecuteFetchAsDBAResponse.encodeDelimited = function encodeDelimited(message, writer) { + GetTopologyPathResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an ExecuteFetchAsDBAResponse message from the specified reader or buffer. + * Decodes a GetTopologyPathResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ExecuteFetchAsDBAResponse + * @memberof vtctldata.GetTopologyPathResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ExecuteFetchAsDBAResponse} ExecuteFetchAsDBAResponse + * @returns {vtctldata.GetTopologyPathResponse} GetTopologyPathResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ExecuteFetchAsDBAResponse.decode = function decode(reader, length) { + GetTopologyPathResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ExecuteFetchAsDBAResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetTopologyPathResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.result = $root.query.QueryResult.decode(reader, reader.uint32()); + message.cell = $root.vtctldata.TopologyCell.decode(reader, reader.uint32()); break; } default: @@ -110746,128 +131256,131 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes an ExecuteFetchAsDBAResponse message from the specified reader or buffer, length delimited. + * Decodes a GetTopologyPathResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ExecuteFetchAsDBAResponse + * @memberof vtctldata.GetTopologyPathResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ExecuteFetchAsDBAResponse} ExecuteFetchAsDBAResponse + * @returns {vtctldata.GetTopologyPathResponse} GetTopologyPathResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ExecuteFetchAsDBAResponse.decodeDelimited = function decodeDelimited(reader) { + GetTopologyPathResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an ExecuteFetchAsDBAResponse message. + * Verifies a GetTopologyPathResponse message. * @function verify - * @memberof vtctldata.ExecuteFetchAsDBAResponse + * @memberof vtctldata.GetTopologyPathResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ExecuteFetchAsDBAResponse.verify = function verify(message) { + GetTopologyPathResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.result != null && message.hasOwnProperty("result")) { - let error = $root.query.QueryResult.verify(message.result); + if (message.cell != null && message.hasOwnProperty("cell")) { + let error = $root.vtctldata.TopologyCell.verify(message.cell); if (error) - return "result." + error; + return "cell." + error; } return null; }; /** - * Creates an ExecuteFetchAsDBAResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetTopologyPathResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ExecuteFetchAsDBAResponse + * @memberof vtctldata.GetTopologyPathResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.ExecuteFetchAsDBAResponse} ExecuteFetchAsDBAResponse + * @returns {vtctldata.GetTopologyPathResponse} GetTopologyPathResponse */ - ExecuteFetchAsDBAResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ExecuteFetchAsDBAResponse) + GetTopologyPathResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetTopologyPathResponse) return object; - let message = new $root.vtctldata.ExecuteFetchAsDBAResponse(); - if (object.result != null) { - if (typeof object.result !== "object") - throw TypeError(".vtctldata.ExecuteFetchAsDBAResponse.result: object expected"); - message.result = $root.query.QueryResult.fromObject(object.result); + let message = new $root.vtctldata.GetTopologyPathResponse(); + if (object.cell != null) { + if (typeof object.cell !== "object") + throw TypeError(".vtctldata.GetTopologyPathResponse.cell: object expected"); + message.cell = $root.vtctldata.TopologyCell.fromObject(object.cell); } return message; }; /** - * Creates a plain object from an ExecuteFetchAsDBAResponse message. Also converts values to other types if specified. + * Creates a plain object from a GetTopologyPathResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ExecuteFetchAsDBAResponse + * @memberof vtctldata.GetTopologyPathResponse * @static - * @param {vtctldata.ExecuteFetchAsDBAResponse} message ExecuteFetchAsDBAResponse + * @param {vtctldata.GetTopologyPathResponse} message GetTopologyPathResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ExecuteFetchAsDBAResponse.toObject = function toObject(message, options) { + GetTopologyPathResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) - object.result = null; - if (message.result != null && message.hasOwnProperty("result")) - object.result = $root.query.QueryResult.toObject(message.result, options); + object.cell = null; + if (message.cell != null && message.hasOwnProperty("cell")) + object.cell = $root.vtctldata.TopologyCell.toObject(message.cell, options); return object; }; /** - * Converts this ExecuteFetchAsDBAResponse to JSON. + * Converts this GetTopologyPathResponse to JSON. * @function toJSON - * @memberof vtctldata.ExecuteFetchAsDBAResponse + * @memberof vtctldata.GetTopologyPathResponse * @instance * @returns {Object.} JSON object */ - ExecuteFetchAsDBAResponse.prototype.toJSON = function toJSON() { + GetTopologyPathResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ExecuteFetchAsDBAResponse + * Gets the default type url for GetTopologyPathResponse * @function getTypeUrl - * @memberof vtctldata.ExecuteFetchAsDBAResponse + * @memberof vtctldata.GetTopologyPathResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ExecuteFetchAsDBAResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetTopologyPathResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ExecuteFetchAsDBAResponse"; + return typeUrlPrefix + "/vtctldata.GetTopologyPathResponse"; }; - return ExecuteFetchAsDBAResponse; + return GetTopologyPathResponse; })(); - vtctldata.ExecuteHookRequest = (function() { + vtctldata.TopologyCell = (function() { /** - * Properties of an ExecuteHookRequest. + * Properties of a TopologyCell. * @memberof vtctldata - * @interface IExecuteHookRequest - * @property {topodata.ITabletAlias|null} [tablet_alias] ExecuteHookRequest tablet_alias - * @property {tabletmanagerdata.IExecuteHookRequest|null} [tablet_hook_request] ExecuteHookRequest tablet_hook_request + * @interface ITopologyCell + * @property {string|null} [name] TopologyCell name + * @property {string|null} [path] TopologyCell path + * @property {string|null} [data] TopologyCell data + * @property {Array.|null} [children] TopologyCell children */ /** - * Constructs a new ExecuteHookRequest. + * Constructs a new TopologyCell. * @memberof vtctldata - * @classdesc Represents an ExecuteHookRequest. - * @implements IExecuteHookRequest + * @classdesc Represents a TopologyCell. + * @implements ITopologyCell * @constructor - * @param {vtctldata.IExecuteHookRequest=} [properties] Properties to set + * @param {vtctldata.ITopologyCell=} [properties] Properties to set */ - function ExecuteHookRequest(properties) { + function TopologyCell(properties) { + this.children = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -110875,89 +131388,120 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ExecuteHookRequest tablet_alias. - * @member {topodata.ITabletAlias|null|undefined} tablet_alias - * @memberof vtctldata.ExecuteHookRequest + * TopologyCell name. + * @member {string} name + * @memberof vtctldata.TopologyCell * @instance */ - ExecuteHookRequest.prototype.tablet_alias = null; + TopologyCell.prototype.name = ""; /** - * ExecuteHookRequest tablet_hook_request. - * @member {tabletmanagerdata.IExecuteHookRequest|null|undefined} tablet_hook_request - * @memberof vtctldata.ExecuteHookRequest + * TopologyCell path. + * @member {string} path + * @memberof vtctldata.TopologyCell * @instance */ - ExecuteHookRequest.prototype.tablet_hook_request = null; + TopologyCell.prototype.path = ""; /** - * Creates a new ExecuteHookRequest instance using the specified properties. + * TopologyCell data. + * @member {string} data + * @memberof vtctldata.TopologyCell + * @instance + */ + TopologyCell.prototype.data = ""; + + /** + * TopologyCell children. + * @member {Array.} children + * @memberof vtctldata.TopologyCell + * @instance + */ + TopologyCell.prototype.children = $util.emptyArray; + + /** + * Creates a new TopologyCell instance using the specified properties. * @function create - * @memberof vtctldata.ExecuteHookRequest + * @memberof vtctldata.TopologyCell * @static - * @param {vtctldata.IExecuteHookRequest=} [properties] Properties to set - * @returns {vtctldata.ExecuteHookRequest} ExecuteHookRequest instance + * @param {vtctldata.ITopologyCell=} [properties] Properties to set + * @returns {vtctldata.TopologyCell} TopologyCell instance */ - ExecuteHookRequest.create = function create(properties) { - return new ExecuteHookRequest(properties); + TopologyCell.create = function create(properties) { + return new TopologyCell(properties); }; /** - * Encodes the specified ExecuteHookRequest message. Does not implicitly {@link vtctldata.ExecuteHookRequest.verify|verify} messages. + * Encodes the specified TopologyCell message. Does not implicitly {@link vtctldata.TopologyCell.verify|verify} messages. * @function encode - * @memberof vtctldata.ExecuteHookRequest + * @memberof vtctldata.TopologyCell * @static - * @param {vtctldata.IExecuteHookRequest} message ExecuteHookRequest message or plain object to encode + * @param {vtctldata.ITopologyCell} message TopologyCell message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ExecuteHookRequest.encode = function encode(message, writer) { + TopologyCell.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) - $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.tablet_hook_request != null && Object.hasOwnProperty.call(message, "tablet_hook_request")) - $root.tabletmanagerdata.ExecuteHookRequest.encode(message.tablet_hook_request, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.name != null && Object.hasOwnProperty.call(message, "name")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); + if (message.path != null && Object.hasOwnProperty.call(message, "path")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.path); + if (message.data != null && Object.hasOwnProperty.call(message, "data")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.data); + if (message.children != null && message.children.length) + for (let i = 0; i < message.children.length; ++i) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.children[i]); return writer; }; /** - * Encodes the specified ExecuteHookRequest message, length delimited. Does not implicitly {@link vtctldata.ExecuteHookRequest.verify|verify} messages. + * Encodes the specified TopologyCell message, length delimited. Does not implicitly {@link vtctldata.TopologyCell.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ExecuteHookRequest + * @memberof vtctldata.TopologyCell * @static - * @param {vtctldata.IExecuteHookRequest} message ExecuteHookRequest message or plain object to encode + * @param {vtctldata.ITopologyCell} message TopologyCell message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ExecuteHookRequest.encodeDelimited = function encodeDelimited(message, writer) { + TopologyCell.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an ExecuteHookRequest message from the specified reader or buffer. + * Decodes a TopologyCell message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ExecuteHookRequest + * @memberof vtctldata.TopologyCell * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ExecuteHookRequest} ExecuteHookRequest + * @returns {vtctldata.TopologyCell} TopologyCell * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ExecuteHookRequest.decode = function decode(reader, length) { + TopologyCell.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ExecuteHookRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.TopologyCell(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + message.name = reader.string(); break; } case 2: { - message.tablet_hook_request = $root.tabletmanagerdata.ExecuteHookRequest.decode(reader, reader.uint32()); + message.path = reader.string(); + break; + } + case 3: { + message.data = reader.string(); + break; + } + case 4: { + if (!(message.children && message.children.length)) + message.children = []; + message.children.push(reader.string()); break; } default: @@ -110969,141 +131513,160 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes an ExecuteHookRequest message from the specified reader or buffer, length delimited. + * Decodes a TopologyCell message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ExecuteHookRequest + * @memberof vtctldata.TopologyCell * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ExecuteHookRequest} ExecuteHookRequest + * @returns {vtctldata.TopologyCell} TopologyCell * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ExecuteHookRequest.decodeDelimited = function decodeDelimited(reader) { + TopologyCell.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an ExecuteHookRequest message. + * Verifies a TopologyCell message. * @function verify - * @memberof vtctldata.ExecuteHookRequest + * @memberof vtctldata.TopologyCell * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ExecuteHookRequest.verify = function verify(message) { + TopologyCell.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { - let error = $root.topodata.TabletAlias.verify(message.tablet_alias); - if (error) - return "tablet_alias." + error; - } - if (message.tablet_hook_request != null && message.hasOwnProperty("tablet_hook_request")) { - let error = $root.tabletmanagerdata.ExecuteHookRequest.verify(message.tablet_hook_request); - if (error) - return "tablet_hook_request." + error; + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; + if (message.path != null && message.hasOwnProperty("path")) + if (!$util.isString(message.path)) + return "path: string expected"; + if (message.data != null && message.hasOwnProperty("data")) + if (!$util.isString(message.data)) + return "data: string expected"; + if (message.children != null && message.hasOwnProperty("children")) { + if (!Array.isArray(message.children)) + return "children: array expected"; + for (let i = 0; i < message.children.length; ++i) + if (!$util.isString(message.children[i])) + return "children: string[] expected"; } return null; }; /** - * Creates an ExecuteHookRequest message from a plain object. Also converts values to their respective internal types. + * Creates a TopologyCell message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ExecuteHookRequest + * @memberof vtctldata.TopologyCell * @static * @param {Object.} object Plain object - * @returns {vtctldata.ExecuteHookRequest} ExecuteHookRequest + * @returns {vtctldata.TopologyCell} TopologyCell */ - ExecuteHookRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ExecuteHookRequest) + TopologyCell.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.TopologyCell) return object; - let message = new $root.vtctldata.ExecuteHookRequest(); - if (object.tablet_alias != null) { - if (typeof object.tablet_alias !== "object") - throw TypeError(".vtctldata.ExecuteHookRequest.tablet_alias: object expected"); - message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); - } - if (object.tablet_hook_request != null) { - if (typeof object.tablet_hook_request !== "object") - throw TypeError(".vtctldata.ExecuteHookRequest.tablet_hook_request: object expected"); - message.tablet_hook_request = $root.tabletmanagerdata.ExecuteHookRequest.fromObject(object.tablet_hook_request); + let message = new $root.vtctldata.TopologyCell(); + if (object.name != null) + message.name = String(object.name); + if (object.path != null) + message.path = String(object.path); + if (object.data != null) + message.data = String(object.data); + if (object.children) { + if (!Array.isArray(object.children)) + throw TypeError(".vtctldata.TopologyCell.children: array expected"); + message.children = []; + for (let i = 0; i < object.children.length; ++i) + message.children[i] = String(object.children[i]); } return message; }; /** - * Creates a plain object from an ExecuteHookRequest message. Also converts values to other types if specified. + * Creates a plain object from a TopologyCell message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ExecuteHookRequest + * @memberof vtctldata.TopologyCell * @static - * @param {vtctldata.ExecuteHookRequest} message ExecuteHookRequest + * @param {vtctldata.TopologyCell} message TopologyCell * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ExecuteHookRequest.toObject = function toObject(message, options) { + TopologyCell.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; + if (options.arrays || options.defaults) + object.children = []; if (options.defaults) { - object.tablet_alias = null; - object.tablet_hook_request = null; + object.name = ""; + object.path = ""; + object.data = ""; + } + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; + if (message.path != null && message.hasOwnProperty("path")) + object.path = message.path; + if (message.data != null && message.hasOwnProperty("data")) + object.data = message.data; + if (message.children && message.children.length) { + object.children = []; + for (let j = 0; j < message.children.length; ++j) + object.children[j] = message.children[j]; } - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) - object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); - if (message.tablet_hook_request != null && message.hasOwnProperty("tablet_hook_request")) - object.tablet_hook_request = $root.tabletmanagerdata.ExecuteHookRequest.toObject(message.tablet_hook_request, options); return object; }; /** - * Converts this ExecuteHookRequest to JSON. + * Converts this TopologyCell to JSON. * @function toJSON - * @memberof vtctldata.ExecuteHookRequest + * @memberof vtctldata.TopologyCell * @instance * @returns {Object.} JSON object */ - ExecuteHookRequest.prototype.toJSON = function toJSON() { + TopologyCell.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ExecuteHookRequest + * Gets the default type url for TopologyCell * @function getTypeUrl - * @memberof vtctldata.ExecuteHookRequest + * @memberof vtctldata.TopologyCell * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ExecuteHookRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + TopologyCell.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ExecuteHookRequest"; + return typeUrlPrefix + "/vtctldata.TopologyCell"; }; - return ExecuteHookRequest; + return TopologyCell; })(); - vtctldata.ExecuteHookResponse = (function() { + vtctldata.GetVSchemaRequest = (function() { /** - * Properties of an ExecuteHookResponse. + * Properties of a GetVSchemaRequest. * @memberof vtctldata - * @interface IExecuteHookResponse - * @property {tabletmanagerdata.IExecuteHookResponse|null} [hook_result] ExecuteHookResponse hook_result + * @interface IGetVSchemaRequest + * @property {string|null} [keyspace] GetVSchemaRequest keyspace */ /** - * Constructs a new ExecuteHookResponse. + * Constructs a new GetVSchemaRequest. * @memberof vtctldata - * @classdesc Represents an ExecuteHookResponse. - * @implements IExecuteHookResponse + * @classdesc Represents a GetVSchemaRequest. + * @implements IGetVSchemaRequest * @constructor - * @param {vtctldata.IExecuteHookResponse=} [properties] Properties to set + * @param {vtctldata.IGetVSchemaRequest=} [properties] Properties to set */ - function ExecuteHookResponse(properties) { + function GetVSchemaRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -111111,75 +131674,75 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ExecuteHookResponse hook_result. - * @member {tabletmanagerdata.IExecuteHookResponse|null|undefined} hook_result - * @memberof vtctldata.ExecuteHookResponse + * GetVSchemaRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.GetVSchemaRequest * @instance */ - ExecuteHookResponse.prototype.hook_result = null; + GetVSchemaRequest.prototype.keyspace = ""; /** - * Creates a new ExecuteHookResponse instance using the specified properties. + * Creates a new GetVSchemaRequest instance using the specified properties. * @function create - * @memberof vtctldata.ExecuteHookResponse + * @memberof vtctldata.GetVSchemaRequest * @static - * @param {vtctldata.IExecuteHookResponse=} [properties] Properties to set - * @returns {vtctldata.ExecuteHookResponse} ExecuteHookResponse instance + * @param {vtctldata.IGetVSchemaRequest=} [properties] Properties to set + * @returns {vtctldata.GetVSchemaRequest} GetVSchemaRequest instance */ - ExecuteHookResponse.create = function create(properties) { - return new ExecuteHookResponse(properties); + GetVSchemaRequest.create = function create(properties) { + return new GetVSchemaRequest(properties); }; /** - * Encodes the specified ExecuteHookResponse message. Does not implicitly {@link vtctldata.ExecuteHookResponse.verify|verify} messages. + * Encodes the specified GetVSchemaRequest message. Does not implicitly {@link vtctldata.GetVSchemaRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.ExecuteHookResponse + * @memberof vtctldata.GetVSchemaRequest * @static - * @param {vtctldata.IExecuteHookResponse} message ExecuteHookResponse message or plain object to encode + * @param {vtctldata.IGetVSchemaRequest} message GetVSchemaRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ExecuteHookResponse.encode = function encode(message, writer) { + GetVSchemaRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.hook_result != null && Object.hasOwnProperty.call(message, "hook_result")) - $root.tabletmanagerdata.ExecuteHookResponse.encode(message.hook_result, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); return writer; }; /** - * Encodes the specified ExecuteHookResponse message, length delimited. Does not implicitly {@link vtctldata.ExecuteHookResponse.verify|verify} messages. + * Encodes the specified GetVSchemaRequest message, length delimited. Does not implicitly {@link vtctldata.GetVSchemaRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ExecuteHookResponse + * @memberof vtctldata.GetVSchemaRequest * @static - * @param {vtctldata.IExecuteHookResponse} message ExecuteHookResponse message or plain object to encode + * @param {vtctldata.IGetVSchemaRequest} message GetVSchemaRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ExecuteHookResponse.encodeDelimited = function encodeDelimited(message, writer) { + GetVSchemaRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an ExecuteHookResponse message from the specified reader or buffer. + * Decodes a GetVSchemaRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ExecuteHookResponse + * @memberof vtctldata.GetVSchemaRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ExecuteHookResponse} ExecuteHookResponse + * @returns {vtctldata.GetVSchemaRequest} GetVSchemaRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ExecuteHookResponse.decode = function decode(reader, length) { + GetVSchemaRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ExecuteHookResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetVSchemaRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.hook_result = $root.tabletmanagerdata.ExecuteHookResponse.decode(reader, reader.uint32()); + message.keyspace = reader.string(); break; } default: @@ -111191,127 +131754,122 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes an ExecuteHookResponse message from the specified reader or buffer, length delimited. + * Decodes a GetVSchemaRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ExecuteHookResponse + * @memberof vtctldata.GetVSchemaRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ExecuteHookResponse} ExecuteHookResponse + * @returns {vtctldata.GetVSchemaRequest} GetVSchemaRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ExecuteHookResponse.decodeDelimited = function decodeDelimited(reader) { + GetVSchemaRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an ExecuteHookResponse message. + * Verifies a GetVSchemaRequest message. * @function verify - * @memberof vtctldata.ExecuteHookResponse + * @memberof vtctldata.GetVSchemaRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ExecuteHookResponse.verify = function verify(message) { + GetVSchemaRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.hook_result != null && message.hasOwnProperty("hook_result")) { - let error = $root.tabletmanagerdata.ExecuteHookResponse.verify(message.hook_result); - if (error) - return "hook_result." + error; - } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; return null; }; /** - * Creates an ExecuteHookResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetVSchemaRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ExecuteHookResponse + * @memberof vtctldata.GetVSchemaRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.ExecuteHookResponse} ExecuteHookResponse + * @returns {vtctldata.GetVSchemaRequest} GetVSchemaRequest */ - ExecuteHookResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ExecuteHookResponse) + GetVSchemaRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetVSchemaRequest) return object; - let message = new $root.vtctldata.ExecuteHookResponse(); - if (object.hook_result != null) { - if (typeof object.hook_result !== "object") - throw TypeError(".vtctldata.ExecuteHookResponse.hook_result: object expected"); - message.hook_result = $root.tabletmanagerdata.ExecuteHookResponse.fromObject(object.hook_result); - } + let message = new $root.vtctldata.GetVSchemaRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); return message; }; /** - * Creates a plain object from an ExecuteHookResponse message. Also converts values to other types if specified. + * Creates a plain object from a GetVSchemaRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ExecuteHookResponse + * @memberof vtctldata.GetVSchemaRequest * @static - * @param {vtctldata.ExecuteHookResponse} message ExecuteHookResponse + * @param {vtctldata.GetVSchemaRequest} message GetVSchemaRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ExecuteHookResponse.toObject = function toObject(message, options) { + GetVSchemaRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) - object.hook_result = null; - if (message.hook_result != null && message.hasOwnProperty("hook_result")) - object.hook_result = $root.tabletmanagerdata.ExecuteHookResponse.toObject(message.hook_result, options); + object.keyspace = ""; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; return object; }; /** - * Converts this ExecuteHookResponse to JSON. + * Converts this GetVSchemaRequest to JSON. * @function toJSON - * @memberof vtctldata.ExecuteHookResponse + * @memberof vtctldata.GetVSchemaRequest * @instance * @returns {Object.} JSON object */ - ExecuteHookResponse.prototype.toJSON = function toJSON() { + GetVSchemaRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ExecuteHookResponse + * Gets the default type url for GetVSchemaRequest * @function getTypeUrl - * @memberof vtctldata.ExecuteHookResponse + * @memberof vtctldata.GetVSchemaRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ExecuteHookResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetVSchemaRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ExecuteHookResponse"; + return typeUrlPrefix + "/vtctldata.GetVSchemaRequest"; }; - return ExecuteHookResponse; + return GetVSchemaRequest; })(); - vtctldata.FindAllShardsInKeyspaceRequest = (function() { + vtctldata.GetVersionRequest = (function() { /** - * Properties of a FindAllShardsInKeyspaceRequest. + * Properties of a GetVersionRequest. * @memberof vtctldata - * @interface IFindAllShardsInKeyspaceRequest - * @property {string|null} [keyspace] FindAllShardsInKeyspaceRequest keyspace + * @interface IGetVersionRequest + * @property {topodata.ITabletAlias|null} [tablet_alias] GetVersionRequest tablet_alias */ /** - * Constructs a new FindAllShardsInKeyspaceRequest. + * Constructs a new GetVersionRequest. * @memberof vtctldata - * @classdesc Represents a FindAllShardsInKeyspaceRequest. - * @implements IFindAllShardsInKeyspaceRequest + * @classdesc Represents a GetVersionRequest. + * @implements IGetVersionRequest * @constructor - * @param {vtctldata.IFindAllShardsInKeyspaceRequest=} [properties] Properties to set + * @param {vtctldata.IGetVersionRequest=} [properties] Properties to set */ - function FindAllShardsInKeyspaceRequest(properties) { + function GetVersionRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -111319,75 +131877,75 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * FindAllShardsInKeyspaceRequest keyspace. - * @member {string} keyspace - * @memberof vtctldata.FindAllShardsInKeyspaceRequest + * GetVersionRequest tablet_alias. + * @member {topodata.ITabletAlias|null|undefined} tablet_alias + * @memberof vtctldata.GetVersionRequest * @instance */ - FindAllShardsInKeyspaceRequest.prototype.keyspace = ""; + GetVersionRequest.prototype.tablet_alias = null; /** - * Creates a new FindAllShardsInKeyspaceRequest instance using the specified properties. + * Creates a new GetVersionRequest instance using the specified properties. * @function create - * @memberof vtctldata.FindAllShardsInKeyspaceRequest + * @memberof vtctldata.GetVersionRequest * @static - * @param {vtctldata.IFindAllShardsInKeyspaceRequest=} [properties] Properties to set - * @returns {vtctldata.FindAllShardsInKeyspaceRequest} FindAllShardsInKeyspaceRequest instance + * @param {vtctldata.IGetVersionRequest=} [properties] Properties to set + * @returns {vtctldata.GetVersionRequest} GetVersionRequest instance */ - FindAllShardsInKeyspaceRequest.create = function create(properties) { - return new FindAllShardsInKeyspaceRequest(properties); + GetVersionRequest.create = function create(properties) { + return new GetVersionRequest(properties); }; /** - * Encodes the specified FindAllShardsInKeyspaceRequest message. Does not implicitly {@link vtctldata.FindAllShardsInKeyspaceRequest.verify|verify} messages. + * Encodes the specified GetVersionRequest message. Does not implicitly {@link vtctldata.GetVersionRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.FindAllShardsInKeyspaceRequest + * @memberof vtctldata.GetVersionRequest * @static - * @param {vtctldata.IFindAllShardsInKeyspaceRequest} message FindAllShardsInKeyspaceRequest message or plain object to encode + * @param {vtctldata.IGetVersionRequest} message GetVersionRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - FindAllShardsInKeyspaceRequest.encode = function encode(message, writer) { + GetVersionRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) + $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified FindAllShardsInKeyspaceRequest message, length delimited. Does not implicitly {@link vtctldata.FindAllShardsInKeyspaceRequest.verify|verify} messages. + * Encodes the specified GetVersionRequest message, length delimited. Does not implicitly {@link vtctldata.GetVersionRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.FindAllShardsInKeyspaceRequest + * @memberof vtctldata.GetVersionRequest * @static - * @param {vtctldata.IFindAllShardsInKeyspaceRequest} message FindAllShardsInKeyspaceRequest message or plain object to encode + * @param {vtctldata.IGetVersionRequest} message GetVersionRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - FindAllShardsInKeyspaceRequest.encodeDelimited = function encodeDelimited(message, writer) { + GetVersionRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a FindAllShardsInKeyspaceRequest message from the specified reader or buffer. + * Decodes a GetVersionRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.FindAllShardsInKeyspaceRequest + * @memberof vtctldata.GetVersionRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.FindAllShardsInKeyspaceRequest} FindAllShardsInKeyspaceRequest + * @returns {vtctldata.GetVersionRequest} GetVersionRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - FindAllShardsInKeyspaceRequest.decode = function decode(reader, length) { + GetVersionRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.FindAllShardsInKeyspaceRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetVersionRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.keyspace = reader.string(); + message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); break; } default: @@ -111399,123 +131957,127 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a FindAllShardsInKeyspaceRequest message from the specified reader or buffer, length delimited. + * Decodes a GetVersionRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.FindAllShardsInKeyspaceRequest + * @memberof vtctldata.GetVersionRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.FindAllShardsInKeyspaceRequest} FindAllShardsInKeyspaceRequest + * @returns {vtctldata.GetVersionRequest} GetVersionRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - FindAllShardsInKeyspaceRequest.decodeDelimited = function decodeDelimited(reader) { + GetVersionRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a FindAllShardsInKeyspaceRequest message. + * Verifies a GetVersionRequest message. * @function verify - * @memberof vtctldata.FindAllShardsInKeyspaceRequest + * @memberof vtctldata.GetVersionRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - FindAllShardsInKeyspaceRequest.verify = function verify(message) { + GetVersionRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { + let error = $root.topodata.TabletAlias.verify(message.tablet_alias); + if (error) + return "tablet_alias." + error; + } return null; }; /** - * Creates a FindAllShardsInKeyspaceRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetVersionRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.FindAllShardsInKeyspaceRequest + * @memberof vtctldata.GetVersionRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.FindAllShardsInKeyspaceRequest} FindAllShardsInKeyspaceRequest + * @returns {vtctldata.GetVersionRequest} GetVersionRequest */ - FindAllShardsInKeyspaceRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.FindAllShardsInKeyspaceRequest) + GetVersionRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetVersionRequest) return object; - let message = new $root.vtctldata.FindAllShardsInKeyspaceRequest(); - if (object.keyspace != null) - message.keyspace = String(object.keyspace); + let message = new $root.vtctldata.GetVersionRequest(); + if (object.tablet_alias != null) { + if (typeof object.tablet_alias !== "object") + throw TypeError(".vtctldata.GetVersionRequest.tablet_alias: object expected"); + message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); + } return message; }; /** - * Creates a plain object from a FindAllShardsInKeyspaceRequest message. Also converts values to other types if specified. + * Creates a plain object from a GetVersionRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.FindAllShardsInKeyspaceRequest + * @memberof vtctldata.GetVersionRequest * @static - * @param {vtctldata.FindAllShardsInKeyspaceRequest} message FindAllShardsInKeyspaceRequest + * @param {vtctldata.GetVersionRequest} message GetVersionRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - FindAllShardsInKeyspaceRequest.toObject = function toObject(message, options) { + GetVersionRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) - object.keyspace = ""; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; + object.tablet_alias = null; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) + object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); return object; }; /** - * Converts this FindAllShardsInKeyspaceRequest to JSON. + * Converts this GetVersionRequest to JSON. * @function toJSON - * @memberof vtctldata.FindAllShardsInKeyspaceRequest + * @memberof vtctldata.GetVersionRequest * @instance * @returns {Object.} JSON object */ - FindAllShardsInKeyspaceRequest.prototype.toJSON = function toJSON() { + GetVersionRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for FindAllShardsInKeyspaceRequest + * Gets the default type url for GetVersionRequest * @function getTypeUrl - * @memberof vtctldata.FindAllShardsInKeyspaceRequest + * @memberof vtctldata.GetVersionRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - FindAllShardsInKeyspaceRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetVersionRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.FindAllShardsInKeyspaceRequest"; + return typeUrlPrefix + "/vtctldata.GetVersionRequest"; }; - return FindAllShardsInKeyspaceRequest; + return GetVersionRequest; })(); - vtctldata.FindAllShardsInKeyspaceResponse = (function() { + vtctldata.GetVersionResponse = (function() { /** - * Properties of a FindAllShardsInKeyspaceResponse. + * Properties of a GetVersionResponse. * @memberof vtctldata - * @interface IFindAllShardsInKeyspaceResponse - * @property {Object.|null} [shards] FindAllShardsInKeyspaceResponse shards + * @interface IGetVersionResponse + * @property {string|null} [version] GetVersionResponse version */ /** - * Constructs a new FindAllShardsInKeyspaceResponse. + * Constructs a new GetVersionResponse. * @memberof vtctldata - * @classdesc Represents a FindAllShardsInKeyspaceResponse. - * @implements IFindAllShardsInKeyspaceResponse + * @classdesc Represents a GetVersionResponse. + * @implements IGetVersionResponse * @constructor - * @param {vtctldata.IFindAllShardsInKeyspaceResponse=} [properties] Properties to set + * @param {vtctldata.IGetVersionResponse=} [properties] Properties to set */ - function FindAllShardsInKeyspaceResponse(properties) { - this.shards = {}; + function GetVersionResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -111523,97 +132085,75 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * FindAllShardsInKeyspaceResponse shards. - * @member {Object.} shards - * @memberof vtctldata.FindAllShardsInKeyspaceResponse + * GetVersionResponse version. + * @member {string} version + * @memberof vtctldata.GetVersionResponse * @instance */ - FindAllShardsInKeyspaceResponse.prototype.shards = $util.emptyObject; + GetVersionResponse.prototype.version = ""; /** - * Creates a new FindAllShardsInKeyspaceResponse instance using the specified properties. + * Creates a new GetVersionResponse instance using the specified properties. * @function create - * @memberof vtctldata.FindAllShardsInKeyspaceResponse + * @memberof vtctldata.GetVersionResponse * @static - * @param {vtctldata.IFindAllShardsInKeyspaceResponse=} [properties] Properties to set - * @returns {vtctldata.FindAllShardsInKeyspaceResponse} FindAllShardsInKeyspaceResponse instance + * @param {vtctldata.IGetVersionResponse=} [properties] Properties to set + * @returns {vtctldata.GetVersionResponse} GetVersionResponse instance */ - FindAllShardsInKeyspaceResponse.create = function create(properties) { - return new FindAllShardsInKeyspaceResponse(properties); + GetVersionResponse.create = function create(properties) { + return new GetVersionResponse(properties); }; /** - * Encodes the specified FindAllShardsInKeyspaceResponse message. Does not implicitly {@link vtctldata.FindAllShardsInKeyspaceResponse.verify|verify} messages. + * Encodes the specified GetVersionResponse message. Does not implicitly {@link vtctldata.GetVersionResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.FindAllShardsInKeyspaceResponse + * @memberof vtctldata.GetVersionResponse * @static - * @param {vtctldata.IFindAllShardsInKeyspaceResponse} message FindAllShardsInKeyspaceResponse message or plain object to encode + * @param {vtctldata.IGetVersionResponse} message GetVersionResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - FindAllShardsInKeyspaceResponse.encode = function encode(message, writer) { + GetVersionResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.shards != null && Object.hasOwnProperty.call(message, "shards")) - for (let keys = Object.keys(message.shards), i = 0; i < keys.length; ++i) { - writer.uint32(/* id 1, wireType 2 =*/10).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); - $root.vtctldata.Shard.encode(message.shards[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); - } + if (message.version != null && Object.hasOwnProperty.call(message, "version")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.version); return writer; }; /** - * Encodes the specified FindAllShardsInKeyspaceResponse message, length delimited. Does not implicitly {@link vtctldata.FindAllShardsInKeyspaceResponse.verify|verify} messages. + * Encodes the specified GetVersionResponse message, length delimited. Does not implicitly {@link vtctldata.GetVersionResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.FindAllShardsInKeyspaceResponse + * @memberof vtctldata.GetVersionResponse * @static - * @param {vtctldata.IFindAllShardsInKeyspaceResponse} message FindAllShardsInKeyspaceResponse message or plain object to encode + * @param {vtctldata.IGetVersionResponse} message GetVersionResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - FindAllShardsInKeyspaceResponse.encodeDelimited = function encodeDelimited(message, writer) { + GetVersionResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a FindAllShardsInKeyspaceResponse message from the specified reader or buffer. + * Decodes a GetVersionResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.FindAllShardsInKeyspaceResponse + * @memberof vtctldata.GetVersionResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.FindAllShardsInKeyspaceResponse} FindAllShardsInKeyspaceResponse + * @returns {vtctldata.GetVersionResponse} GetVersionResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - FindAllShardsInKeyspaceResponse.decode = function decode(reader, length) { + GetVersionResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.FindAllShardsInKeyspaceResponse(), key, value; + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetVersionResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (message.shards === $util.emptyObject) - message.shards = {}; - let end2 = reader.uint32() + reader.pos; - key = ""; - value = null; - while (reader.pos < end2) { - let tag2 = reader.uint32(); - switch (tag2 >>> 3) { - case 1: - key = reader.string(); - break; - case 2: - value = $root.vtctldata.Shard.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag2 & 7); - break; - } - } - message.shards[key] = value; + message.version = reader.string(); break; } default: @@ -111625,145 +132165,122 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a FindAllShardsInKeyspaceResponse message from the specified reader or buffer, length delimited. + * Decodes a GetVersionResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.FindAllShardsInKeyspaceResponse + * @memberof vtctldata.GetVersionResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.FindAllShardsInKeyspaceResponse} FindAllShardsInKeyspaceResponse + * @returns {vtctldata.GetVersionResponse} GetVersionResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - FindAllShardsInKeyspaceResponse.decodeDelimited = function decodeDelimited(reader) { + GetVersionResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a FindAllShardsInKeyspaceResponse message. + * Verifies a GetVersionResponse message. * @function verify - * @memberof vtctldata.FindAllShardsInKeyspaceResponse + * @memberof vtctldata.GetVersionResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - FindAllShardsInKeyspaceResponse.verify = function verify(message) { + GetVersionResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.shards != null && message.hasOwnProperty("shards")) { - if (!$util.isObject(message.shards)) - return "shards: object expected"; - let key = Object.keys(message.shards); - for (let i = 0; i < key.length; ++i) { - let error = $root.vtctldata.Shard.verify(message.shards[key[i]]); - if (error) - return "shards." + error; - } - } + if (message.version != null && message.hasOwnProperty("version")) + if (!$util.isString(message.version)) + return "version: string expected"; return null; }; /** - * Creates a FindAllShardsInKeyspaceResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetVersionResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.FindAllShardsInKeyspaceResponse + * @memberof vtctldata.GetVersionResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.FindAllShardsInKeyspaceResponse} FindAllShardsInKeyspaceResponse + * @returns {vtctldata.GetVersionResponse} GetVersionResponse */ - FindAllShardsInKeyspaceResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.FindAllShardsInKeyspaceResponse) + GetVersionResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetVersionResponse) return object; - let message = new $root.vtctldata.FindAllShardsInKeyspaceResponse(); - if (object.shards) { - if (typeof object.shards !== "object") - throw TypeError(".vtctldata.FindAllShardsInKeyspaceResponse.shards: object expected"); - message.shards = {}; - for (let keys = Object.keys(object.shards), i = 0; i < keys.length; ++i) { - if (typeof object.shards[keys[i]] !== "object") - throw TypeError(".vtctldata.FindAllShardsInKeyspaceResponse.shards: object expected"); - message.shards[keys[i]] = $root.vtctldata.Shard.fromObject(object.shards[keys[i]]); - } - } + let message = new $root.vtctldata.GetVersionResponse(); + if (object.version != null) + message.version = String(object.version); return message; }; /** - * Creates a plain object from a FindAllShardsInKeyspaceResponse message. Also converts values to other types if specified. + * Creates a plain object from a GetVersionResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.FindAllShardsInKeyspaceResponse + * @memberof vtctldata.GetVersionResponse * @static - * @param {vtctldata.FindAllShardsInKeyspaceResponse} message FindAllShardsInKeyspaceResponse + * @param {vtctldata.GetVersionResponse} message GetVersionResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - FindAllShardsInKeyspaceResponse.toObject = function toObject(message, options) { + GetVersionResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.objects || options.defaults) - object.shards = {}; - let keys2; - if (message.shards && (keys2 = Object.keys(message.shards)).length) { - object.shards = {}; - for (let j = 0; j < keys2.length; ++j) - object.shards[keys2[j]] = $root.vtctldata.Shard.toObject(message.shards[keys2[j]], options); - } + if (options.defaults) + object.version = ""; + if (message.version != null && message.hasOwnProperty("version")) + object.version = message.version; return object; }; /** - * Converts this FindAllShardsInKeyspaceResponse to JSON. + * Converts this GetVersionResponse to JSON. * @function toJSON - * @memberof vtctldata.FindAllShardsInKeyspaceResponse + * @memberof vtctldata.GetVersionResponse * @instance * @returns {Object.} JSON object */ - FindAllShardsInKeyspaceResponse.prototype.toJSON = function toJSON() { + GetVersionResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for FindAllShardsInKeyspaceResponse + * Gets the default type url for GetVersionResponse * @function getTypeUrl - * @memberof vtctldata.FindAllShardsInKeyspaceResponse + * @memberof vtctldata.GetVersionResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - FindAllShardsInKeyspaceResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetVersionResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.FindAllShardsInKeyspaceResponse"; + return typeUrlPrefix + "/vtctldata.GetVersionResponse"; }; - return FindAllShardsInKeyspaceResponse; + return GetVersionResponse; })(); - vtctldata.GetBackupsRequest = (function() { + vtctldata.GetVSchemaResponse = (function() { /** - * Properties of a GetBackupsRequest. + * Properties of a GetVSchemaResponse. * @memberof vtctldata - * @interface IGetBackupsRequest - * @property {string|null} [keyspace] GetBackupsRequest keyspace - * @property {string|null} [shard] GetBackupsRequest shard - * @property {number|null} [limit] GetBackupsRequest limit - * @property {boolean|null} [detailed] GetBackupsRequest detailed - * @property {number|null} [detailed_limit] GetBackupsRequest detailed_limit + * @interface IGetVSchemaResponse + * @property {vschema.IKeyspace|null} [v_schema] GetVSchemaResponse v_schema */ /** - * Constructs a new GetBackupsRequest. + * Constructs a new GetVSchemaResponse. * @memberof vtctldata - * @classdesc Represents a GetBackupsRequest. - * @implements IGetBackupsRequest + * @classdesc Represents a GetVSchemaResponse. + * @implements IGetVSchemaResponse * @constructor - * @param {vtctldata.IGetBackupsRequest=} [properties] Properties to set + * @param {vtctldata.IGetVSchemaResponse=} [properties] Properties to set */ - function GetBackupsRequest(properties) { + function GetVSchemaResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -111771,131 +132288,75 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * GetBackupsRequest keyspace. - * @member {string} keyspace - * @memberof vtctldata.GetBackupsRequest - * @instance - */ - GetBackupsRequest.prototype.keyspace = ""; - - /** - * GetBackupsRequest shard. - * @member {string} shard - * @memberof vtctldata.GetBackupsRequest - * @instance - */ - GetBackupsRequest.prototype.shard = ""; - - /** - * GetBackupsRequest limit. - * @member {number} limit - * @memberof vtctldata.GetBackupsRequest - * @instance - */ - GetBackupsRequest.prototype.limit = 0; - - /** - * GetBackupsRequest detailed. - * @member {boolean} detailed - * @memberof vtctldata.GetBackupsRequest - * @instance - */ - GetBackupsRequest.prototype.detailed = false; - - /** - * GetBackupsRequest detailed_limit. - * @member {number} detailed_limit - * @memberof vtctldata.GetBackupsRequest + * GetVSchemaResponse v_schema. + * @member {vschema.IKeyspace|null|undefined} v_schema + * @memberof vtctldata.GetVSchemaResponse * @instance */ - GetBackupsRequest.prototype.detailed_limit = 0; + GetVSchemaResponse.prototype.v_schema = null; /** - * Creates a new GetBackupsRequest instance using the specified properties. + * Creates a new GetVSchemaResponse instance using the specified properties. * @function create - * @memberof vtctldata.GetBackupsRequest + * @memberof vtctldata.GetVSchemaResponse * @static - * @param {vtctldata.IGetBackupsRequest=} [properties] Properties to set - * @returns {vtctldata.GetBackupsRequest} GetBackupsRequest instance + * @param {vtctldata.IGetVSchemaResponse=} [properties] Properties to set + * @returns {vtctldata.GetVSchemaResponse} GetVSchemaResponse instance */ - GetBackupsRequest.create = function create(properties) { - return new GetBackupsRequest(properties); + GetVSchemaResponse.create = function create(properties) { + return new GetVSchemaResponse(properties); }; /** - * Encodes the specified GetBackupsRequest message. Does not implicitly {@link vtctldata.GetBackupsRequest.verify|verify} messages. + * Encodes the specified GetVSchemaResponse message. Does not implicitly {@link vtctldata.GetVSchemaResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.GetBackupsRequest + * @memberof vtctldata.GetVSchemaResponse * @static - * @param {vtctldata.IGetBackupsRequest} message GetBackupsRequest message or plain object to encode + * @param {vtctldata.IGetVSchemaResponse} message GetVSchemaResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetBackupsRequest.encode = function encode(message, writer) { + GetVSchemaResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); - if (message.limit != null && Object.hasOwnProperty.call(message, "limit")) - writer.uint32(/* id 3, wireType 0 =*/24).uint32(message.limit); - if (message.detailed != null && Object.hasOwnProperty.call(message, "detailed")) - writer.uint32(/* id 4, wireType 0 =*/32).bool(message.detailed); - if (message.detailed_limit != null && Object.hasOwnProperty.call(message, "detailed_limit")) - writer.uint32(/* id 5, wireType 0 =*/40).uint32(message.detailed_limit); + if (message.v_schema != null && Object.hasOwnProperty.call(message, "v_schema")) + $root.vschema.Keyspace.encode(message.v_schema, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified GetBackupsRequest message, length delimited. Does not implicitly {@link vtctldata.GetBackupsRequest.verify|verify} messages. + * Encodes the specified GetVSchemaResponse message, length delimited. Does not implicitly {@link vtctldata.GetVSchemaResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetBackupsRequest + * @memberof vtctldata.GetVSchemaResponse * @static - * @param {vtctldata.IGetBackupsRequest} message GetBackupsRequest message or plain object to encode + * @param {vtctldata.IGetVSchemaResponse} message GetVSchemaResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetBackupsRequest.encodeDelimited = function encodeDelimited(message, writer) { + GetVSchemaResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetBackupsRequest message from the specified reader or buffer. + * Decodes a GetVSchemaResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetBackupsRequest + * @memberof vtctldata.GetVSchemaResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetBackupsRequest} GetBackupsRequest + * @returns {vtctldata.GetVSchemaResponse} GetVSchemaResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetBackupsRequest.decode = function decode(reader, length) { + GetVSchemaResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetBackupsRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetVSchemaResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.keyspace = reader.string(); - break; - } - case 2: { - message.shard = reader.string(); - break; - } - case 3: { - message.limit = reader.uint32(); - break; - } - case 4: { - message.detailed = reader.bool(); - break; - } - case 5: { - message.detailed_limit = reader.uint32(); + message.v_schema = $root.vschema.Keyspace.decode(reader, reader.uint32()); break; } default: @@ -111907,156 +132368,131 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetBackupsRequest message from the specified reader or buffer, length delimited. + * Decodes a GetVSchemaResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetBackupsRequest + * @memberof vtctldata.GetVSchemaResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetBackupsRequest} GetBackupsRequest + * @returns {vtctldata.GetVSchemaResponse} GetVSchemaResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetBackupsRequest.decodeDelimited = function decodeDelimited(reader) { + GetVSchemaResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetBackupsRequest message. + * Verifies a GetVSchemaResponse message. * @function verify - * @memberof vtctldata.GetBackupsRequest + * @memberof vtctldata.GetVSchemaResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetBackupsRequest.verify = function verify(message) { + GetVSchemaResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - if (message.shard != null && message.hasOwnProperty("shard")) - if (!$util.isString(message.shard)) - return "shard: string expected"; - if (message.limit != null && message.hasOwnProperty("limit")) - if (!$util.isInteger(message.limit)) - return "limit: integer expected"; - if (message.detailed != null && message.hasOwnProperty("detailed")) - if (typeof message.detailed !== "boolean") - return "detailed: boolean expected"; - if (message.detailed_limit != null && message.hasOwnProperty("detailed_limit")) - if (!$util.isInteger(message.detailed_limit)) - return "detailed_limit: integer expected"; + if (message.v_schema != null && message.hasOwnProperty("v_schema")) { + let error = $root.vschema.Keyspace.verify(message.v_schema); + if (error) + return "v_schema." + error; + } return null; }; /** - * Creates a GetBackupsRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetVSchemaResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetBackupsRequest + * @memberof vtctldata.GetVSchemaResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetBackupsRequest} GetBackupsRequest + * @returns {vtctldata.GetVSchemaResponse} GetVSchemaResponse */ - GetBackupsRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetBackupsRequest) + GetVSchemaResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetVSchemaResponse) return object; - let message = new $root.vtctldata.GetBackupsRequest(); - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - if (object.shard != null) - message.shard = String(object.shard); - if (object.limit != null) - message.limit = object.limit >>> 0; - if (object.detailed != null) - message.detailed = Boolean(object.detailed); - if (object.detailed_limit != null) - message.detailed_limit = object.detailed_limit >>> 0; + let message = new $root.vtctldata.GetVSchemaResponse(); + if (object.v_schema != null) { + if (typeof object.v_schema !== "object") + throw TypeError(".vtctldata.GetVSchemaResponse.v_schema: object expected"); + message.v_schema = $root.vschema.Keyspace.fromObject(object.v_schema); + } return message; }; /** - * Creates a plain object from a GetBackupsRequest message. Also converts values to other types if specified. + * Creates a plain object from a GetVSchemaResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetBackupsRequest + * @memberof vtctldata.GetVSchemaResponse * @static - * @param {vtctldata.GetBackupsRequest} message GetBackupsRequest + * @param {vtctldata.GetVSchemaResponse} message GetVSchemaResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetBackupsRequest.toObject = function toObject(message, options) { + GetVSchemaResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) { - object.keyspace = ""; - object.shard = ""; - object.limit = 0; - object.detailed = false; - object.detailed_limit = 0; - } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.shard != null && message.hasOwnProperty("shard")) - object.shard = message.shard; - if (message.limit != null && message.hasOwnProperty("limit")) - object.limit = message.limit; - if (message.detailed != null && message.hasOwnProperty("detailed")) - object.detailed = message.detailed; - if (message.detailed_limit != null && message.hasOwnProperty("detailed_limit")) - object.detailed_limit = message.detailed_limit; + if (options.defaults) + object.v_schema = null; + if (message.v_schema != null && message.hasOwnProperty("v_schema")) + object.v_schema = $root.vschema.Keyspace.toObject(message.v_schema, options); return object; }; /** - * Converts this GetBackupsRequest to JSON. + * Converts this GetVSchemaResponse to JSON. * @function toJSON - * @memberof vtctldata.GetBackupsRequest + * @memberof vtctldata.GetVSchemaResponse * @instance * @returns {Object.} JSON object */ - GetBackupsRequest.prototype.toJSON = function toJSON() { + GetVSchemaResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetBackupsRequest + * Gets the default type url for GetVSchemaResponse * @function getTypeUrl - * @memberof vtctldata.GetBackupsRequest + * @memberof vtctldata.GetVSchemaResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetBackupsRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetVSchemaResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetBackupsRequest"; + return typeUrlPrefix + "/vtctldata.GetVSchemaResponse"; }; - return GetBackupsRequest; + return GetVSchemaResponse; })(); - vtctldata.GetBackupsResponse = (function() { + vtctldata.GetWorkflowsRequest = (function() { /** - * Properties of a GetBackupsResponse. + * Properties of a GetWorkflowsRequest. * @memberof vtctldata - * @interface IGetBackupsResponse - * @property {Array.|null} [backups] GetBackupsResponse backups + * @interface IGetWorkflowsRequest + * @property {string|null} [keyspace] GetWorkflowsRequest keyspace + * @property {boolean|null} [active_only] GetWorkflowsRequest active_only + * @property {boolean|null} [name_only] GetWorkflowsRequest name_only + * @property {string|null} [workflow] GetWorkflowsRequest workflow + * @property {boolean|null} [include_logs] GetWorkflowsRequest include_logs */ /** - * Constructs a new GetBackupsResponse. + * Constructs a new GetWorkflowsRequest. * @memberof vtctldata - * @classdesc Represents a GetBackupsResponse. - * @implements IGetBackupsResponse + * @classdesc Represents a GetWorkflowsRequest. + * @implements IGetWorkflowsRequest * @constructor - * @param {vtctldata.IGetBackupsResponse=} [properties] Properties to set + * @param {vtctldata.IGetWorkflowsRequest=} [properties] Properties to set */ - function GetBackupsResponse(properties) { - this.backups = []; + function GetWorkflowsRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -112064,78 +132500,131 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * GetBackupsResponse backups. - * @member {Array.} backups - * @memberof vtctldata.GetBackupsResponse + * GetWorkflowsRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.GetWorkflowsRequest * @instance */ - GetBackupsResponse.prototype.backups = $util.emptyArray; + GetWorkflowsRequest.prototype.keyspace = ""; /** - * Creates a new GetBackupsResponse instance using the specified properties. + * GetWorkflowsRequest active_only. + * @member {boolean} active_only + * @memberof vtctldata.GetWorkflowsRequest + * @instance + */ + GetWorkflowsRequest.prototype.active_only = false; + + /** + * GetWorkflowsRequest name_only. + * @member {boolean} name_only + * @memberof vtctldata.GetWorkflowsRequest + * @instance + */ + GetWorkflowsRequest.prototype.name_only = false; + + /** + * GetWorkflowsRequest workflow. + * @member {string} workflow + * @memberof vtctldata.GetWorkflowsRequest + * @instance + */ + GetWorkflowsRequest.prototype.workflow = ""; + + /** + * GetWorkflowsRequest include_logs. + * @member {boolean} include_logs + * @memberof vtctldata.GetWorkflowsRequest + * @instance + */ + GetWorkflowsRequest.prototype.include_logs = false; + + /** + * Creates a new GetWorkflowsRequest instance using the specified properties. * @function create - * @memberof vtctldata.GetBackupsResponse + * @memberof vtctldata.GetWorkflowsRequest * @static - * @param {vtctldata.IGetBackupsResponse=} [properties] Properties to set - * @returns {vtctldata.GetBackupsResponse} GetBackupsResponse instance + * @param {vtctldata.IGetWorkflowsRequest=} [properties] Properties to set + * @returns {vtctldata.GetWorkflowsRequest} GetWorkflowsRequest instance */ - GetBackupsResponse.create = function create(properties) { - return new GetBackupsResponse(properties); + GetWorkflowsRequest.create = function create(properties) { + return new GetWorkflowsRequest(properties); }; /** - * Encodes the specified GetBackupsResponse message. Does not implicitly {@link vtctldata.GetBackupsResponse.verify|verify} messages. + * Encodes the specified GetWorkflowsRequest message. Does not implicitly {@link vtctldata.GetWorkflowsRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.GetBackupsResponse + * @memberof vtctldata.GetWorkflowsRequest * @static - * @param {vtctldata.IGetBackupsResponse} message GetBackupsResponse message or plain object to encode + * @param {vtctldata.IGetWorkflowsRequest} message GetWorkflowsRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetBackupsResponse.encode = function encode(message, writer) { + GetWorkflowsRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.backups != null && message.backups.length) - for (let i = 0; i < message.backups.length; ++i) - $root.mysqlctl.BackupInfo.encode(message.backups[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.active_only != null && Object.hasOwnProperty.call(message, "active_only")) + writer.uint32(/* id 2, wireType 0 =*/16).bool(message.active_only); + if (message.name_only != null && Object.hasOwnProperty.call(message, "name_only")) + writer.uint32(/* id 3, wireType 0 =*/24).bool(message.name_only); + if (message.workflow != null && Object.hasOwnProperty.call(message, "workflow")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.workflow); + if (message.include_logs != null && Object.hasOwnProperty.call(message, "include_logs")) + writer.uint32(/* id 5, wireType 0 =*/40).bool(message.include_logs); return writer; }; /** - * Encodes the specified GetBackupsResponse message, length delimited. Does not implicitly {@link vtctldata.GetBackupsResponse.verify|verify} messages. + * Encodes the specified GetWorkflowsRequest message, length delimited. Does not implicitly {@link vtctldata.GetWorkflowsRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetBackupsResponse + * @memberof vtctldata.GetWorkflowsRequest * @static - * @param {vtctldata.IGetBackupsResponse} message GetBackupsResponse message or plain object to encode + * @param {vtctldata.IGetWorkflowsRequest} message GetWorkflowsRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetBackupsResponse.encodeDelimited = function encodeDelimited(message, writer) { + GetWorkflowsRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetBackupsResponse message from the specified reader or buffer. + * Decodes a GetWorkflowsRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetBackupsResponse + * @memberof vtctldata.GetWorkflowsRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetBackupsResponse} GetBackupsResponse + * @returns {vtctldata.GetWorkflowsRequest} GetWorkflowsRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetBackupsResponse.decode = function decode(reader, length) { + GetWorkflowsRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetBackupsResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetWorkflowsRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (!(message.backups && message.backups.length)) - message.backups = []; - message.backups.push($root.mysqlctl.BackupInfo.decode(reader, reader.uint32())); + message.keyspace = reader.string(); + break; + } + case 2: { + message.active_only = reader.bool(); + break; + } + case 3: { + message.name_only = reader.bool(); + break; + } + case 4: { + message.workflow = reader.string(); + break; + } + case 5: { + message.include_logs = reader.bool(); break; } default: @@ -112147,139 +132636,156 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetBackupsResponse message from the specified reader or buffer, length delimited. + * Decodes a GetWorkflowsRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetBackupsResponse + * @memberof vtctldata.GetWorkflowsRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetBackupsResponse} GetBackupsResponse + * @returns {vtctldata.GetWorkflowsRequest} GetWorkflowsRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetBackupsResponse.decodeDelimited = function decodeDelimited(reader) { + GetWorkflowsRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetBackupsResponse message. + * Verifies a GetWorkflowsRequest message. * @function verify - * @memberof vtctldata.GetBackupsResponse + * @memberof vtctldata.GetWorkflowsRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetBackupsResponse.verify = function verify(message) { + GetWorkflowsRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.backups != null && message.hasOwnProperty("backups")) { - if (!Array.isArray(message.backups)) - return "backups: array expected"; - for (let i = 0; i < message.backups.length; ++i) { - let error = $root.mysqlctl.BackupInfo.verify(message.backups[i]); - if (error) - return "backups." + error; - } - } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.active_only != null && message.hasOwnProperty("active_only")) + if (typeof message.active_only !== "boolean") + return "active_only: boolean expected"; + if (message.name_only != null && message.hasOwnProperty("name_only")) + if (typeof message.name_only !== "boolean") + return "name_only: boolean expected"; + if (message.workflow != null && message.hasOwnProperty("workflow")) + if (!$util.isString(message.workflow)) + return "workflow: string expected"; + if (message.include_logs != null && message.hasOwnProperty("include_logs")) + if (typeof message.include_logs !== "boolean") + return "include_logs: boolean expected"; return null; }; /** - * Creates a GetBackupsResponse message from a plain object. Also converts values to their respective internal types. + * Creates a GetWorkflowsRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetBackupsResponse + * @memberof vtctldata.GetWorkflowsRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetBackupsResponse} GetBackupsResponse + * @returns {vtctldata.GetWorkflowsRequest} GetWorkflowsRequest */ - GetBackupsResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetBackupsResponse) + GetWorkflowsRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetWorkflowsRequest) return object; - let message = new $root.vtctldata.GetBackupsResponse(); - if (object.backups) { - if (!Array.isArray(object.backups)) - throw TypeError(".vtctldata.GetBackupsResponse.backups: array expected"); - message.backups = []; - for (let i = 0; i < object.backups.length; ++i) { - if (typeof object.backups[i] !== "object") - throw TypeError(".vtctldata.GetBackupsResponse.backups: object expected"); - message.backups[i] = $root.mysqlctl.BackupInfo.fromObject(object.backups[i]); - } - } + let message = new $root.vtctldata.GetWorkflowsRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.active_only != null) + message.active_only = Boolean(object.active_only); + if (object.name_only != null) + message.name_only = Boolean(object.name_only); + if (object.workflow != null) + message.workflow = String(object.workflow); + if (object.include_logs != null) + message.include_logs = Boolean(object.include_logs); return message; }; /** - * Creates a plain object from a GetBackupsResponse message. Also converts values to other types if specified. + * Creates a plain object from a GetWorkflowsRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetBackupsResponse + * @memberof vtctldata.GetWorkflowsRequest * @static - * @param {vtctldata.GetBackupsResponse} message GetBackupsResponse + * @param {vtctldata.GetWorkflowsRequest} message GetWorkflowsRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetBackupsResponse.toObject = function toObject(message, options) { + GetWorkflowsRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.backups = []; - if (message.backups && message.backups.length) { - object.backups = []; - for (let j = 0; j < message.backups.length; ++j) - object.backups[j] = $root.mysqlctl.BackupInfo.toObject(message.backups[j], options); + if (options.defaults) { + object.keyspace = ""; + object.active_only = false; + object.name_only = false; + object.workflow = ""; + object.include_logs = false; } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.active_only != null && message.hasOwnProperty("active_only")) + object.active_only = message.active_only; + if (message.name_only != null && message.hasOwnProperty("name_only")) + object.name_only = message.name_only; + if (message.workflow != null && message.hasOwnProperty("workflow")) + object.workflow = message.workflow; + if (message.include_logs != null && message.hasOwnProperty("include_logs")) + object.include_logs = message.include_logs; return object; }; /** - * Converts this GetBackupsResponse to JSON. + * Converts this GetWorkflowsRequest to JSON. * @function toJSON - * @memberof vtctldata.GetBackupsResponse + * @memberof vtctldata.GetWorkflowsRequest * @instance * @returns {Object.} JSON object */ - GetBackupsResponse.prototype.toJSON = function toJSON() { + GetWorkflowsRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetBackupsResponse + * Gets the default type url for GetWorkflowsRequest * @function getTypeUrl - * @memberof vtctldata.GetBackupsResponse + * @memberof vtctldata.GetWorkflowsRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetBackupsResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetWorkflowsRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetBackupsResponse"; + return typeUrlPrefix + "/vtctldata.GetWorkflowsRequest"; }; - return GetBackupsResponse; + return GetWorkflowsRequest; })(); - vtctldata.GetCellInfoRequest = (function() { + vtctldata.GetWorkflowsResponse = (function() { /** - * Properties of a GetCellInfoRequest. + * Properties of a GetWorkflowsResponse. * @memberof vtctldata - * @interface IGetCellInfoRequest - * @property {string|null} [cell] GetCellInfoRequest cell + * @interface IGetWorkflowsResponse + * @property {Array.|null} [workflows] GetWorkflowsResponse workflows */ /** - * Constructs a new GetCellInfoRequest. + * Constructs a new GetWorkflowsResponse. * @memberof vtctldata - * @classdesc Represents a GetCellInfoRequest. - * @implements IGetCellInfoRequest + * @classdesc Represents a GetWorkflowsResponse. + * @implements IGetWorkflowsResponse * @constructor - * @param {vtctldata.IGetCellInfoRequest=} [properties] Properties to set + * @param {vtctldata.IGetWorkflowsResponse=} [properties] Properties to set */ - function GetCellInfoRequest(properties) { + function GetWorkflowsResponse(properties) { + this.workflows = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -112287,75 +132793,78 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * GetCellInfoRequest cell. - * @member {string} cell - * @memberof vtctldata.GetCellInfoRequest + * GetWorkflowsResponse workflows. + * @member {Array.} workflows + * @memberof vtctldata.GetWorkflowsResponse * @instance */ - GetCellInfoRequest.prototype.cell = ""; + GetWorkflowsResponse.prototype.workflows = $util.emptyArray; /** - * Creates a new GetCellInfoRequest instance using the specified properties. + * Creates a new GetWorkflowsResponse instance using the specified properties. * @function create - * @memberof vtctldata.GetCellInfoRequest + * @memberof vtctldata.GetWorkflowsResponse * @static - * @param {vtctldata.IGetCellInfoRequest=} [properties] Properties to set - * @returns {vtctldata.GetCellInfoRequest} GetCellInfoRequest instance + * @param {vtctldata.IGetWorkflowsResponse=} [properties] Properties to set + * @returns {vtctldata.GetWorkflowsResponse} GetWorkflowsResponse instance */ - GetCellInfoRequest.create = function create(properties) { - return new GetCellInfoRequest(properties); + GetWorkflowsResponse.create = function create(properties) { + return new GetWorkflowsResponse(properties); }; /** - * Encodes the specified GetCellInfoRequest message. Does not implicitly {@link vtctldata.GetCellInfoRequest.verify|verify} messages. + * Encodes the specified GetWorkflowsResponse message. Does not implicitly {@link vtctldata.GetWorkflowsResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.GetCellInfoRequest + * @memberof vtctldata.GetWorkflowsResponse * @static - * @param {vtctldata.IGetCellInfoRequest} message GetCellInfoRequest message or plain object to encode + * @param {vtctldata.IGetWorkflowsResponse} message GetWorkflowsResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetCellInfoRequest.encode = function encode(message, writer) { + GetWorkflowsResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.cell != null && Object.hasOwnProperty.call(message, "cell")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.cell); + if (message.workflows != null && message.workflows.length) + for (let i = 0; i < message.workflows.length; ++i) + $root.vtctldata.Workflow.encode(message.workflows[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified GetCellInfoRequest message, length delimited. Does not implicitly {@link vtctldata.GetCellInfoRequest.verify|verify} messages. + * Encodes the specified GetWorkflowsResponse message, length delimited. Does not implicitly {@link vtctldata.GetWorkflowsResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetCellInfoRequest + * @memberof vtctldata.GetWorkflowsResponse * @static - * @param {vtctldata.IGetCellInfoRequest} message GetCellInfoRequest message or plain object to encode + * @param {vtctldata.IGetWorkflowsResponse} message GetWorkflowsResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetCellInfoRequest.encodeDelimited = function encodeDelimited(message, writer) { + GetWorkflowsResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetCellInfoRequest message from the specified reader or buffer. + * Decodes a GetWorkflowsResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetCellInfoRequest + * @memberof vtctldata.GetWorkflowsResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetCellInfoRequest} GetCellInfoRequest + * @returns {vtctldata.GetWorkflowsResponse} GetWorkflowsResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetCellInfoRequest.decode = function decode(reader, length) { + GetWorkflowsResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetCellInfoRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetWorkflowsResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.cell = reader.string(); + if (!(message.workflows && message.workflows.length)) + message.workflows = []; + message.workflows.push($root.vtctldata.Workflow.decode(reader, reader.uint32())); break; } default: @@ -112367,122 +132876,143 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetCellInfoRequest message from the specified reader or buffer, length delimited. + * Decodes a GetWorkflowsResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetCellInfoRequest + * @memberof vtctldata.GetWorkflowsResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetCellInfoRequest} GetCellInfoRequest + * @returns {vtctldata.GetWorkflowsResponse} GetWorkflowsResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetCellInfoRequest.decodeDelimited = function decodeDelimited(reader) { + GetWorkflowsResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetCellInfoRequest message. + * Verifies a GetWorkflowsResponse message. * @function verify - * @memberof vtctldata.GetCellInfoRequest + * @memberof vtctldata.GetWorkflowsResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetCellInfoRequest.verify = function verify(message) { + GetWorkflowsResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.cell != null && message.hasOwnProperty("cell")) - if (!$util.isString(message.cell)) - return "cell: string expected"; + if (message.workflows != null && message.hasOwnProperty("workflows")) { + if (!Array.isArray(message.workflows)) + return "workflows: array expected"; + for (let i = 0; i < message.workflows.length; ++i) { + let error = $root.vtctldata.Workflow.verify(message.workflows[i]); + if (error) + return "workflows." + error; + } + } return null; }; /** - * Creates a GetCellInfoRequest message from a plain object. Also converts values to their respective internal types. + * Creates a GetWorkflowsResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetCellInfoRequest + * @memberof vtctldata.GetWorkflowsResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetCellInfoRequest} GetCellInfoRequest + * @returns {vtctldata.GetWorkflowsResponse} GetWorkflowsResponse */ - GetCellInfoRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetCellInfoRequest) + GetWorkflowsResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetWorkflowsResponse) return object; - let message = new $root.vtctldata.GetCellInfoRequest(); - if (object.cell != null) - message.cell = String(object.cell); + let message = new $root.vtctldata.GetWorkflowsResponse(); + if (object.workflows) { + if (!Array.isArray(object.workflows)) + throw TypeError(".vtctldata.GetWorkflowsResponse.workflows: array expected"); + message.workflows = []; + for (let i = 0; i < object.workflows.length; ++i) { + if (typeof object.workflows[i] !== "object") + throw TypeError(".vtctldata.GetWorkflowsResponse.workflows: object expected"); + message.workflows[i] = $root.vtctldata.Workflow.fromObject(object.workflows[i]); + } + } return message; }; /** - * Creates a plain object from a GetCellInfoRequest message. Also converts values to other types if specified. + * Creates a plain object from a GetWorkflowsResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetCellInfoRequest + * @memberof vtctldata.GetWorkflowsResponse * @static - * @param {vtctldata.GetCellInfoRequest} message GetCellInfoRequest + * @param {vtctldata.GetWorkflowsResponse} message GetWorkflowsResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetCellInfoRequest.toObject = function toObject(message, options) { + GetWorkflowsResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) - object.cell = ""; - if (message.cell != null && message.hasOwnProperty("cell")) - object.cell = message.cell; + if (options.arrays || options.defaults) + object.workflows = []; + if (message.workflows && message.workflows.length) { + object.workflows = []; + for (let j = 0; j < message.workflows.length; ++j) + object.workflows[j] = $root.vtctldata.Workflow.toObject(message.workflows[j], options); + } return object; }; /** - * Converts this GetCellInfoRequest to JSON. + * Converts this GetWorkflowsResponse to JSON. * @function toJSON - * @memberof vtctldata.GetCellInfoRequest + * @memberof vtctldata.GetWorkflowsResponse * @instance * @returns {Object.} JSON object */ - GetCellInfoRequest.prototype.toJSON = function toJSON() { + GetWorkflowsResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetCellInfoRequest + * Gets the default type url for GetWorkflowsResponse * @function getTypeUrl - * @memberof vtctldata.GetCellInfoRequest + * @memberof vtctldata.GetWorkflowsResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetCellInfoRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + GetWorkflowsResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetCellInfoRequest"; + return typeUrlPrefix + "/vtctldata.GetWorkflowsResponse"; }; - return GetCellInfoRequest; + return GetWorkflowsResponse; })(); - vtctldata.GetCellInfoResponse = (function() { + vtctldata.InitShardPrimaryRequest = (function() { /** - * Properties of a GetCellInfoResponse. + * Properties of an InitShardPrimaryRequest. * @memberof vtctldata - * @interface IGetCellInfoResponse - * @property {topodata.ICellInfo|null} [cell_info] GetCellInfoResponse cell_info + * @interface IInitShardPrimaryRequest + * @property {string|null} [keyspace] InitShardPrimaryRequest keyspace + * @property {string|null} [shard] InitShardPrimaryRequest shard + * @property {topodata.ITabletAlias|null} [primary_elect_tablet_alias] InitShardPrimaryRequest primary_elect_tablet_alias + * @property {boolean|null} [force] InitShardPrimaryRequest force + * @property {vttime.IDuration|null} [wait_replicas_timeout] InitShardPrimaryRequest wait_replicas_timeout */ /** - * Constructs a new GetCellInfoResponse. + * Constructs a new InitShardPrimaryRequest. * @memberof vtctldata - * @classdesc Represents a GetCellInfoResponse. - * @implements IGetCellInfoResponse + * @classdesc Represents an InitShardPrimaryRequest. + * @implements IInitShardPrimaryRequest * @constructor - * @param {vtctldata.IGetCellInfoResponse=} [properties] Properties to set + * @param {vtctldata.IInitShardPrimaryRequest=} [properties] Properties to set */ - function GetCellInfoResponse(properties) { + function InitShardPrimaryRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -112490,75 +133020,131 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * GetCellInfoResponse cell_info. - * @member {topodata.ICellInfo|null|undefined} cell_info - * @memberof vtctldata.GetCellInfoResponse + * InitShardPrimaryRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.InitShardPrimaryRequest * @instance */ - GetCellInfoResponse.prototype.cell_info = null; + InitShardPrimaryRequest.prototype.keyspace = ""; /** - * Creates a new GetCellInfoResponse instance using the specified properties. + * InitShardPrimaryRequest shard. + * @member {string} shard + * @memberof vtctldata.InitShardPrimaryRequest + * @instance + */ + InitShardPrimaryRequest.prototype.shard = ""; + + /** + * InitShardPrimaryRequest primary_elect_tablet_alias. + * @member {topodata.ITabletAlias|null|undefined} primary_elect_tablet_alias + * @memberof vtctldata.InitShardPrimaryRequest + * @instance + */ + InitShardPrimaryRequest.prototype.primary_elect_tablet_alias = null; + + /** + * InitShardPrimaryRequest force. + * @member {boolean} force + * @memberof vtctldata.InitShardPrimaryRequest + * @instance + */ + InitShardPrimaryRequest.prototype.force = false; + + /** + * InitShardPrimaryRequest wait_replicas_timeout. + * @member {vttime.IDuration|null|undefined} wait_replicas_timeout + * @memberof vtctldata.InitShardPrimaryRequest + * @instance + */ + InitShardPrimaryRequest.prototype.wait_replicas_timeout = null; + + /** + * Creates a new InitShardPrimaryRequest instance using the specified properties. * @function create - * @memberof vtctldata.GetCellInfoResponse + * @memberof vtctldata.InitShardPrimaryRequest * @static - * @param {vtctldata.IGetCellInfoResponse=} [properties] Properties to set - * @returns {vtctldata.GetCellInfoResponse} GetCellInfoResponse instance + * @param {vtctldata.IInitShardPrimaryRequest=} [properties] Properties to set + * @returns {vtctldata.InitShardPrimaryRequest} InitShardPrimaryRequest instance */ - GetCellInfoResponse.create = function create(properties) { - return new GetCellInfoResponse(properties); + InitShardPrimaryRequest.create = function create(properties) { + return new InitShardPrimaryRequest(properties); }; /** - * Encodes the specified GetCellInfoResponse message. Does not implicitly {@link vtctldata.GetCellInfoResponse.verify|verify} messages. + * Encodes the specified InitShardPrimaryRequest message. Does not implicitly {@link vtctldata.InitShardPrimaryRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.GetCellInfoResponse + * @memberof vtctldata.InitShardPrimaryRequest * @static - * @param {vtctldata.IGetCellInfoResponse} message GetCellInfoResponse message or plain object to encode + * @param {vtctldata.IInitShardPrimaryRequest} message InitShardPrimaryRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetCellInfoResponse.encode = function encode(message, writer) { + InitShardPrimaryRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.cell_info != null && Object.hasOwnProperty.call(message, "cell_info")) - $root.topodata.CellInfo.encode(message.cell_info, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); + if (message.primary_elect_tablet_alias != null && Object.hasOwnProperty.call(message, "primary_elect_tablet_alias")) + $root.topodata.TabletAlias.encode(message.primary_elect_tablet_alias, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.force != null && Object.hasOwnProperty.call(message, "force")) + writer.uint32(/* id 4, wireType 0 =*/32).bool(message.force); + if (message.wait_replicas_timeout != null && Object.hasOwnProperty.call(message, "wait_replicas_timeout")) + $root.vttime.Duration.encode(message.wait_replicas_timeout, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); return writer; }; /** - * Encodes the specified GetCellInfoResponse message, length delimited. Does not implicitly {@link vtctldata.GetCellInfoResponse.verify|verify} messages. + * Encodes the specified InitShardPrimaryRequest message, length delimited. Does not implicitly {@link vtctldata.InitShardPrimaryRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetCellInfoResponse + * @memberof vtctldata.InitShardPrimaryRequest * @static - * @param {vtctldata.IGetCellInfoResponse} message GetCellInfoResponse message or plain object to encode + * @param {vtctldata.IInitShardPrimaryRequest} message InitShardPrimaryRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetCellInfoResponse.encodeDelimited = function encodeDelimited(message, writer) { + InitShardPrimaryRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetCellInfoResponse message from the specified reader or buffer. + * Decodes an InitShardPrimaryRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetCellInfoResponse + * @memberof vtctldata.InitShardPrimaryRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetCellInfoResponse} GetCellInfoResponse + * @returns {vtctldata.InitShardPrimaryRequest} InitShardPrimaryRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetCellInfoResponse.decode = function decode(reader, length) { + InitShardPrimaryRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetCellInfoResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.InitShardPrimaryRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.cell_info = $root.topodata.CellInfo.decode(reader, reader.uint32()); + message.keyspace = reader.string(); + break; + } + case 2: { + message.shard = reader.string(); + break; + } + case 3: { + message.primary_elect_tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + break; + } + case 4: { + message.force = reader.bool(); + break; + } + case 5: { + message.wait_replicas_timeout = $root.vttime.Duration.decode(reader, reader.uint32()); break; } default: @@ -112570,126 +133156,166 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetCellInfoResponse message from the specified reader or buffer, length delimited. + * Decodes an InitShardPrimaryRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetCellInfoResponse + * @memberof vtctldata.InitShardPrimaryRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetCellInfoResponse} GetCellInfoResponse + * @returns {vtctldata.InitShardPrimaryRequest} InitShardPrimaryRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetCellInfoResponse.decodeDelimited = function decodeDelimited(reader) { + InitShardPrimaryRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetCellInfoResponse message. + * Verifies an InitShardPrimaryRequest message. * @function verify - * @memberof vtctldata.GetCellInfoResponse + * @memberof vtctldata.InitShardPrimaryRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetCellInfoResponse.verify = function verify(message) { + InitShardPrimaryRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.cell_info != null && message.hasOwnProperty("cell_info")) { - let error = $root.topodata.CellInfo.verify(message.cell_info); + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.shard != null && message.hasOwnProperty("shard")) + if (!$util.isString(message.shard)) + return "shard: string expected"; + if (message.primary_elect_tablet_alias != null && message.hasOwnProperty("primary_elect_tablet_alias")) { + let error = $root.topodata.TabletAlias.verify(message.primary_elect_tablet_alias); if (error) - return "cell_info." + error; + return "primary_elect_tablet_alias." + error; + } + if (message.force != null && message.hasOwnProperty("force")) + if (typeof message.force !== "boolean") + return "force: boolean expected"; + if (message.wait_replicas_timeout != null && message.hasOwnProperty("wait_replicas_timeout")) { + let error = $root.vttime.Duration.verify(message.wait_replicas_timeout); + if (error) + return "wait_replicas_timeout." + error; } return null; }; /** - * Creates a GetCellInfoResponse message from a plain object. Also converts values to their respective internal types. + * Creates an InitShardPrimaryRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetCellInfoResponse + * @memberof vtctldata.InitShardPrimaryRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetCellInfoResponse} GetCellInfoResponse + * @returns {vtctldata.InitShardPrimaryRequest} InitShardPrimaryRequest */ - GetCellInfoResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetCellInfoResponse) + InitShardPrimaryRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.InitShardPrimaryRequest) return object; - let message = new $root.vtctldata.GetCellInfoResponse(); - if (object.cell_info != null) { - if (typeof object.cell_info !== "object") - throw TypeError(".vtctldata.GetCellInfoResponse.cell_info: object expected"); - message.cell_info = $root.topodata.CellInfo.fromObject(object.cell_info); + let message = new $root.vtctldata.InitShardPrimaryRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.shard != null) + message.shard = String(object.shard); + if (object.primary_elect_tablet_alias != null) { + if (typeof object.primary_elect_tablet_alias !== "object") + throw TypeError(".vtctldata.InitShardPrimaryRequest.primary_elect_tablet_alias: object expected"); + message.primary_elect_tablet_alias = $root.topodata.TabletAlias.fromObject(object.primary_elect_tablet_alias); + } + if (object.force != null) + message.force = Boolean(object.force); + if (object.wait_replicas_timeout != null) { + if (typeof object.wait_replicas_timeout !== "object") + throw TypeError(".vtctldata.InitShardPrimaryRequest.wait_replicas_timeout: object expected"); + message.wait_replicas_timeout = $root.vttime.Duration.fromObject(object.wait_replicas_timeout); } return message; }; /** - * Creates a plain object from a GetCellInfoResponse message. Also converts values to other types if specified. + * Creates a plain object from an InitShardPrimaryRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetCellInfoResponse + * @memberof vtctldata.InitShardPrimaryRequest * @static - * @param {vtctldata.GetCellInfoResponse} message GetCellInfoResponse + * @param {vtctldata.InitShardPrimaryRequest} message InitShardPrimaryRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetCellInfoResponse.toObject = function toObject(message, options) { + InitShardPrimaryRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) - object.cell_info = null; - if (message.cell_info != null && message.hasOwnProperty("cell_info")) - object.cell_info = $root.topodata.CellInfo.toObject(message.cell_info, options); + if (options.defaults) { + object.keyspace = ""; + object.shard = ""; + object.primary_elect_tablet_alias = null; + object.force = false; + object.wait_replicas_timeout = null; + } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = message.shard; + if (message.primary_elect_tablet_alias != null && message.hasOwnProperty("primary_elect_tablet_alias")) + object.primary_elect_tablet_alias = $root.topodata.TabletAlias.toObject(message.primary_elect_tablet_alias, options); + if (message.force != null && message.hasOwnProperty("force")) + object.force = message.force; + if (message.wait_replicas_timeout != null && message.hasOwnProperty("wait_replicas_timeout")) + object.wait_replicas_timeout = $root.vttime.Duration.toObject(message.wait_replicas_timeout, options); return object; }; /** - * Converts this GetCellInfoResponse to JSON. + * Converts this InitShardPrimaryRequest to JSON. * @function toJSON - * @memberof vtctldata.GetCellInfoResponse + * @memberof vtctldata.InitShardPrimaryRequest * @instance * @returns {Object.} JSON object */ - GetCellInfoResponse.prototype.toJSON = function toJSON() { + InitShardPrimaryRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetCellInfoResponse + * Gets the default type url for InitShardPrimaryRequest * @function getTypeUrl - * @memberof vtctldata.GetCellInfoResponse + * @memberof vtctldata.InitShardPrimaryRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetCellInfoResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + InitShardPrimaryRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetCellInfoResponse"; + return typeUrlPrefix + "/vtctldata.InitShardPrimaryRequest"; }; - return GetCellInfoResponse; + return InitShardPrimaryRequest; })(); - vtctldata.GetCellInfoNamesRequest = (function() { + vtctldata.InitShardPrimaryResponse = (function() { /** - * Properties of a GetCellInfoNamesRequest. + * Properties of an InitShardPrimaryResponse. * @memberof vtctldata - * @interface IGetCellInfoNamesRequest + * @interface IInitShardPrimaryResponse + * @property {Array.|null} [events] InitShardPrimaryResponse events */ /** - * Constructs a new GetCellInfoNamesRequest. + * Constructs a new InitShardPrimaryResponse. * @memberof vtctldata - * @classdesc Represents a GetCellInfoNamesRequest. - * @implements IGetCellInfoNamesRequest + * @classdesc Represents an InitShardPrimaryResponse. + * @implements IInitShardPrimaryResponse * @constructor - * @param {vtctldata.IGetCellInfoNamesRequest=} [properties] Properties to set + * @param {vtctldata.IInitShardPrimaryResponse=} [properties] Properties to set */ - function GetCellInfoNamesRequest(properties) { + function InitShardPrimaryResponse(properties) { + this.events = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -112697,63 +133323,80 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * Creates a new GetCellInfoNamesRequest instance using the specified properties. + * InitShardPrimaryResponse events. + * @member {Array.} events + * @memberof vtctldata.InitShardPrimaryResponse + * @instance + */ + InitShardPrimaryResponse.prototype.events = $util.emptyArray; + + /** + * Creates a new InitShardPrimaryResponse instance using the specified properties. * @function create - * @memberof vtctldata.GetCellInfoNamesRequest + * @memberof vtctldata.InitShardPrimaryResponse * @static - * @param {vtctldata.IGetCellInfoNamesRequest=} [properties] Properties to set - * @returns {vtctldata.GetCellInfoNamesRequest} GetCellInfoNamesRequest instance + * @param {vtctldata.IInitShardPrimaryResponse=} [properties] Properties to set + * @returns {vtctldata.InitShardPrimaryResponse} InitShardPrimaryResponse instance */ - GetCellInfoNamesRequest.create = function create(properties) { - return new GetCellInfoNamesRequest(properties); + InitShardPrimaryResponse.create = function create(properties) { + return new InitShardPrimaryResponse(properties); }; /** - * Encodes the specified GetCellInfoNamesRequest message. Does not implicitly {@link vtctldata.GetCellInfoNamesRequest.verify|verify} messages. + * Encodes the specified InitShardPrimaryResponse message. Does not implicitly {@link vtctldata.InitShardPrimaryResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.GetCellInfoNamesRequest + * @memberof vtctldata.InitShardPrimaryResponse * @static - * @param {vtctldata.IGetCellInfoNamesRequest} message GetCellInfoNamesRequest message or plain object to encode + * @param {vtctldata.IInitShardPrimaryResponse} message InitShardPrimaryResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetCellInfoNamesRequest.encode = function encode(message, writer) { + InitShardPrimaryResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.events != null && message.events.length) + for (let i = 0; i < message.events.length; ++i) + $root.logutil.Event.encode(message.events[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified GetCellInfoNamesRequest message, length delimited. Does not implicitly {@link vtctldata.GetCellInfoNamesRequest.verify|verify} messages. + * Encodes the specified InitShardPrimaryResponse message, length delimited. Does not implicitly {@link vtctldata.InitShardPrimaryResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetCellInfoNamesRequest + * @memberof vtctldata.InitShardPrimaryResponse * @static - * @param {vtctldata.IGetCellInfoNamesRequest} message GetCellInfoNamesRequest message or plain object to encode + * @param {vtctldata.IInitShardPrimaryResponse} message InitShardPrimaryResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetCellInfoNamesRequest.encodeDelimited = function encodeDelimited(message, writer) { + InitShardPrimaryResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetCellInfoNamesRequest message from the specified reader or buffer. + * Decodes an InitShardPrimaryResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetCellInfoNamesRequest + * @memberof vtctldata.InitShardPrimaryResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetCellInfoNamesRequest} GetCellInfoNamesRequest + * @returns {vtctldata.InitShardPrimaryResponse} InitShardPrimaryResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetCellInfoNamesRequest.decode = function decode(reader, length) { + InitShardPrimaryResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetCellInfoNamesRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.InitShardPrimaryResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + if (!(message.events && message.events.length)) + message.events = []; + message.events.push($root.logutil.Event.decode(reader, reader.uint32())); + break; + } default: reader.skipType(tag & 7); break; @@ -112763,110 +133406,140 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetCellInfoNamesRequest message from the specified reader or buffer, length delimited. + * Decodes an InitShardPrimaryResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetCellInfoNamesRequest + * @memberof vtctldata.InitShardPrimaryResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetCellInfoNamesRequest} GetCellInfoNamesRequest + * @returns {vtctldata.InitShardPrimaryResponse} InitShardPrimaryResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetCellInfoNamesRequest.decodeDelimited = function decodeDelimited(reader) { + InitShardPrimaryResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetCellInfoNamesRequest message. + * Verifies an InitShardPrimaryResponse message. * @function verify - * @memberof vtctldata.GetCellInfoNamesRequest + * @memberof vtctldata.InitShardPrimaryResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetCellInfoNamesRequest.verify = function verify(message) { + InitShardPrimaryResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.events != null && message.hasOwnProperty("events")) { + if (!Array.isArray(message.events)) + return "events: array expected"; + for (let i = 0; i < message.events.length; ++i) { + let error = $root.logutil.Event.verify(message.events[i]); + if (error) + return "events." + error; + } + } return null; }; /** - * Creates a GetCellInfoNamesRequest message from a plain object. Also converts values to their respective internal types. + * Creates an InitShardPrimaryResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetCellInfoNamesRequest + * @memberof vtctldata.InitShardPrimaryResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetCellInfoNamesRequest} GetCellInfoNamesRequest + * @returns {vtctldata.InitShardPrimaryResponse} InitShardPrimaryResponse */ - GetCellInfoNamesRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetCellInfoNamesRequest) + InitShardPrimaryResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.InitShardPrimaryResponse) return object; - return new $root.vtctldata.GetCellInfoNamesRequest(); + let message = new $root.vtctldata.InitShardPrimaryResponse(); + if (object.events) { + if (!Array.isArray(object.events)) + throw TypeError(".vtctldata.InitShardPrimaryResponse.events: array expected"); + message.events = []; + for (let i = 0; i < object.events.length; ++i) { + if (typeof object.events[i] !== "object") + throw TypeError(".vtctldata.InitShardPrimaryResponse.events: object expected"); + message.events[i] = $root.logutil.Event.fromObject(object.events[i]); + } + } + return message; }; /** - * Creates a plain object from a GetCellInfoNamesRequest message. Also converts values to other types if specified. + * Creates a plain object from an InitShardPrimaryResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetCellInfoNamesRequest + * @memberof vtctldata.InitShardPrimaryResponse * @static - * @param {vtctldata.GetCellInfoNamesRequest} message GetCellInfoNamesRequest + * @param {vtctldata.InitShardPrimaryResponse} message InitShardPrimaryResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetCellInfoNamesRequest.toObject = function toObject() { - return {}; + InitShardPrimaryResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.events = []; + if (message.events && message.events.length) { + object.events = []; + for (let j = 0; j < message.events.length; ++j) + object.events[j] = $root.logutil.Event.toObject(message.events[j], options); + } + return object; }; /** - * Converts this GetCellInfoNamesRequest to JSON. + * Converts this InitShardPrimaryResponse to JSON. * @function toJSON - * @memberof vtctldata.GetCellInfoNamesRequest + * @memberof vtctldata.InitShardPrimaryResponse * @instance * @returns {Object.} JSON object */ - GetCellInfoNamesRequest.prototype.toJSON = function toJSON() { + InitShardPrimaryResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetCellInfoNamesRequest + * Gets the default type url for InitShardPrimaryResponse * @function getTypeUrl - * @memberof vtctldata.GetCellInfoNamesRequest + * @memberof vtctldata.InitShardPrimaryResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetCellInfoNamesRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + InitShardPrimaryResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetCellInfoNamesRequest"; + return typeUrlPrefix + "/vtctldata.InitShardPrimaryResponse"; }; - return GetCellInfoNamesRequest; + return InitShardPrimaryResponse; })(); - vtctldata.GetCellInfoNamesResponse = (function() { + vtctldata.LaunchSchemaMigrationRequest = (function() { /** - * Properties of a GetCellInfoNamesResponse. + * Properties of a LaunchSchemaMigrationRequest. * @memberof vtctldata - * @interface IGetCellInfoNamesResponse - * @property {Array.|null} [names] GetCellInfoNamesResponse names + * @interface ILaunchSchemaMigrationRequest + * @property {string|null} [keyspace] LaunchSchemaMigrationRequest keyspace + * @property {string|null} [uuid] LaunchSchemaMigrationRequest uuid */ /** - * Constructs a new GetCellInfoNamesResponse. + * Constructs a new LaunchSchemaMigrationRequest. * @memberof vtctldata - * @classdesc Represents a GetCellInfoNamesResponse. - * @implements IGetCellInfoNamesResponse + * @classdesc Represents a LaunchSchemaMigrationRequest. + * @implements ILaunchSchemaMigrationRequest * @constructor - * @param {vtctldata.IGetCellInfoNamesResponse=} [properties] Properties to set + * @param {vtctldata.ILaunchSchemaMigrationRequest=} [properties] Properties to set */ - function GetCellInfoNamesResponse(properties) { - this.names = []; + function LaunchSchemaMigrationRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -112874,78 +133547,89 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * GetCellInfoNamesResponse names. - * @member {Array.} names - * @memberof vtctldata.GetCellInfoNamesResponse + * LaunchSchemaMigrationRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.LaunchSchemaMigrationRequest * @instance */ - GetCellInfoNamesResponse.prototype.names = $util.emptyArray; + LaunchSchemaMigrationRequest.prototype.keyspace = ""; /** - * Creates a new GetCellInfoNamesResponse instance using the specified properties. + * LaunchSchemaMigrationRequest uuid. + * @member {string} uuid + * @memberof vtctldata.LaunchSchemaMigrationRequest + * @instance + */ + LaunchSchemaMigrationRequest.prototype.uuid = ""; + + /** + * Creates a new LaunchSchemaMigrationRequest instance using the specified properties. * @function create - * @memberof vtctldata.GetCellInfoNamesResponse + * @memberof vtctldata.LaunchSchemaMigrationRequest * @static - * @param {vtctldata.IGetCellInfoNamesResponse=} [properties] Properties to set - * @returns {vtctldata.GetCellInfoNamesResponse} GetCellInfoNamesResponse instance + * @param {vtctldata.ILaunchSchemaMigrationRequest=} [properties] Properties to set + * @returns {vtctldata.LaunchSchemaMigrationRequest} LaunchSchemaMigrationRequest instance */ - GetCellInfoNamesResponse.create = function create(properties) { - return new GetCellInfoNamesResponse(properties); + LaunchSchemaMigrationRequest.create = function create(properties) { + return new LaunchSchemaMigrationRequest(properties); }; /** - * Encodes the specified GetCellInfoNamesResponse message. Does not implicitly {@link vtctldata.GetCellInfoNamesResponse.verify|verify} messages. + * Encodes the specified LaunchSchemaMigrationRequest message. Does not implicitly {@link vtctldata.LaunchSchemaMigrationRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.GetCellInfoNamesResponse + * @memberof vtctldata.LaunchSchemaMigrationRequest * @static - * @param {vtctldata.IGetCellInfoNamesResponse} message GetCellInfoNamesResponse message or plain object to encode + * @param {vtctldata.ILaunchSchemaMigrationRequest} message LaunchSchemaMigrationRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetCellInfoNamesResponse.encode = function encode(message, writer) { + LaunchSchemaMigrationRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.names != null && message.names.length) - for (let i = 0; i < message.names.length; ++i) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.names[i]); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.uuid != null && Object.hasOwnProperty.call(message, "uuid")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.uuid); return writer; }; /** - * Encodes the specified GetCellInfoNamesResponse message, length delimited. Does not implicitly {@link vtctldata.GetCellInfoNamesResponse.verify|verify} messages. + * Encodes the specified LaunchSchemaMigrationRequest message, length delimited. Does not implicitly {@link vtctldata.LaunchSchemaMigrationRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetCellInfoNamesResponse + * @memberof vtctldata.LaunchSchemaMigrationRequest * @static - * @param {vtctldata.IGetCellInfoNamesResponse} message GetCellInfoNamesResponse message or plain object to encode + * @param {vtctldata.ILaunchSchemaMigrationRequest} message LaunchSchemaMigrationRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetCellInfoNamesResponse.encodeDelimited = function encodeDelimited(message, writer) { + LaunchSchemaMigrationRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetCellInfoNamesResponse message from the specified reader or buffer. + * Decodes a LaunchSchemaMigrationRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetCellInfoNamesResponse + * @memberof vtctldata.LaunchSchemaMigrationRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetCellInfoNamesResponse} GetCellInfoNamesResponse + * @returns {vtctldata.LaunchSchemaMigrationRequest} LaunchSchemaMigrationRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetCellInfoNamesResponse.decode = function decode(reader, length) { + LaunchSchemaMigrationRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetCellInfoNamesResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.LaunchSchemaMigrationRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (!(message.names && message.names.length)) - message.names = []; - message.names.push(reader.string()); + message.keyspace = reader.string(); + break; + } + case 2: { + message.uuid = reader.string(); break; } default: @@ -112957,133 +133641,132 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetCellInfoNamesResponse message from the specified reader or buffer, length delimited. + * Decodes a LaunchSchemaMigrationRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetCellInfoNamesResponse + * @memberof vtctldata.LaunchSchemaMigrationRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetCellInfoNamesResponse} GetCellInfoNamesResponse + * @returns {vtctldata.LaunchSchemaMigrationRequest} LaunchSchemaMigrationRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetCellInfoNamesResponse.decodeDelimited = function decodeDelimited(reader) { + LaunchSchemaMigrationRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetCellInfoNamesResponse message. + * Verifies a LaunchSchemaMigrationRequest message. * @function verify - * @memberof vtctldata.GetCellInfoNamesResponse + * @memberof vtctldata.LaunchSchemaMigrationRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetCellInfoNamesResponse.verify = function verify(message) { + LaunchSchemaMigrationRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.names != null && message.hasOwnProperty("names")) { - if (!Array.isArray(message.names)) - return "names: array expected"; - for (let i = 0; i < message.names.length; ++i) - if (!$util.isString(message.names[i])) - return "names: string[] expected"; - } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.uuid != null && message.hasOwnProperty("uuid")) + if (!$util.isString(message.uuid)) + return "uuid: string expected"; return null; }; /** - * Creates a GetCellInfoNamesResponse message from a plain object. Also converts values to their respective internal types. + * Creates a LaunchSchemaMigrationRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetCellInfoNamesResponse + * @memberof vtctldata.LaunchSchemaMigrationRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetCellInfoNamesResponse} GetCellInfoNamesResponse + * @returns {vtctldata.LaunchSchemaMigrationRequest} LaunchSchemaMigrationRequest */ - GetCellInfoNamesResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetCellInfoNamesResponse) + LaunchSchemaMigrationRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.LaunchSchemaMigrationRequest) return object; - let message = new $root.vtctldata.GetCellInfoNamesResponse(); - if (object.names) { - if (!Array.isArray(object.names)) - throw TypeError(".vtctldata.GetCellInfoNamesResponse.names: array expected"); - message.names = []; - for (let i = 0; i < object.names.length; ++i) - message.names[i] = String(object.names[i]); - } + let message = new $root.vtctldata.LaunchSchemaMigrationRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.uuid != null) + message.uuid = String(object.uuid); return message; }; /** - * Creates a plain object from a GetCellInfoNamesResponse message. Also converts values to other types if specified. + * Creates a plain object from a LaunchSchemaMigrationRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetCellInfoNamesResponse + * @memberof vtctldata.LaunchSchemaMigrationRequest * @static - * @param {vtctldata.GetCellInfoNamesResponse} message GetCellInfoNamesResponse + * @param {vtctldata.LaunchSchemaMigrationRequest} message LaunchSchemaMigrationRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetCellInfoNamesResponse.toObject = function toObject(message, options) { + LaunchSchemaMigrationRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.names = []; - if (message.names && message.names.length) { - object.names = []; - for (let j = 0; j < message.names.length; ++j) - object.names[j] = message.names[j]; + if (options.defaults) { + object.keyspace = ""; + object.uuid = ""; } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.uuid != null && message.hasOwnProperty("uuid")) + object.uuid = message.uuid; return object; }; /** - * Converts this GetCellInfoNamesResponse to JSON. + * Converts this LaunchSchemaMigrationRequest to JSON. * @function toJSON - * @memberof vtctldata.GetCellInfoNamesResponse + * @memberof vtctldata.LaunchSchemaMigrationRequest * @instance * @returns {Object.} JSON object */ - GetCellInfoNamesResponse.prototype.toJSON = function toJSON() { + LaunchSchemaMigrationRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetCellInfoNamesResponse + * Gets the default type url for LaunchSchemaMigrationRequest * @function getTypeUrl - * @memberof vtctldata.GetCellInfoNamesResponse + * @memberof vtctldata.LaunchSchemaMigrationRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetCellInfoNamesResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + LaunchSchemaMigrationRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetCellInfoNamesResponse"; + return typeUrlPrefix + "/vtctldata.LaunchSchemaMigrationRequest"; }; - return GetCellInfoNamesResponse; + return LaunchSchemaMigrationRequest; })(); - vtctldata.GetCellsAliasesRequest = (function() { + vtctldata.LaunchSchemaMigrationResponse = (function() { /** - * Properties of a GetCellsAliasesRequest. + * Properties of a LaunchSchemaMigrationResponse. * @memberof vtctldata - * @interface IGetCellsAliasesRequest + * @interface ILaunchSchemaMigrationResponse + * @property {Object.|null} [rows_affected_by_shard] LaunchSchemaMigrationResponse rows_affected_by_shard */ /** - * Constructs a new GetCellsAliasesRequest. + * Constructs a new LaunchSchemaMigrationResponse. * @memberof vtctldata - * @classdesc Represents a GetCellsAliasesRequest. - * @implements IGetCellsAliasesRequest + * @classdesc Represents a LaunchSchemaMigrationResponse. + * @implements ILaunchSchemaMigrationResponse * @constructor - * @param {vtctldata.IGetCellsAliasesRequest=} [properties] Properties to set + * @param {vtctldata.ILaunchSchemaMigrationResponse=} [properties] Properties to set */ - function GetCellsAliasesRequest(properties) { + function LaunchSchemaMigrationResponse(properties) { + this.rows_affected_by_shard = {}; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -113091,63 +133774,97 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * Creates a new GetCellsAliasesRequest instance using the specified properties. + * LaunchSchemaMigrationResponse rows_affected_by_shard. + * @member {Object.} rows_affected_by_shard + * @memberof vtctldata.LaunchSchemaMigrationResponse + * @instance + */ + LaunchSchemaMigrationResponse.prototype.rows_affected_by_shard = $util.emptyObject; + + /** + * Creates a new LaunchSchemaMigrationResponse instance using the specified properties. * @function create - * @memberof vtctldata.GetCellsAliasesRequest + * @memberof vtctldata.LaunchSchemaMigrationResponse * @static - * @param {vtctldata.IGetCellsAliasesRequest=} [properties] Properties to set - * @returns {vtctldata.GetCellsAliasesRequest} GetCellsAliasesRequest instance + * @param {vtctldata.ILaunchSchemaMigrationResponse=} [properties] Properties to set + * @returns {vtctldata.LaunchSchemaMigrationResponse} LaunchSchemaMigrationResponse instance */ - GetCellsAliasesRequest.create = function create(properties) { - return new GetCellsAliasesRequest(properties); + LaunchSchemaMigrationResponse.create = function create(properties) { + return new LaunchSchemaMigrationResponse(properties); }; /** - * Encodes the specified GetCellsAliasesRequest message. Does not implicitly {@link vtctldata.GetCellsAliasesRequest.verify|verify} messages. + * Encodes the specified LaunchSchemaMigrationResponse message. Does not implicitly {@link vtctldata.LaunchSchemaMigrationResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.GetCellsAliasesRequest + * @memberof vtctldata.LaunchSchemaMigrationResponse * @static - * @param {vtctldata.IGetCellsAliasesRequest} message GetCellsAliasesRequest message or plain object to encode + * @param {vtctldata.ILaunchSchemaMigrationResponse} message LaunchSchemaMigrationResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetCellsAliasesRequest.encode = function encode(message, writer) { + LaunchSchemaMigrationResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.rows_affected_by_shard != null && Object.hasOwnProperty.call(message, "rows_affected_by_shard")) + for (let keys = Object.keys(message.rows_affected_by_shard), i = 0; i < keys.length; ++i) + writer.uint32(/* id 1, wireType 2 =*/10).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 0 =*/16).uint64(message.rows_affected_by_shard[keys[i]]).ldelim(); return writer; }; /** - * Encodes the specified GetCellsAliasesRequest message, length delimited. Does not implicitly {@link vtctldata.GetCellsAliasesRequest.verify|verify} messages. + * Encodes the specified LaunchSchemaMigrationResponse message, length delimited. Does not implicitly {@link vtctldata.LaunchSchemaMigrationResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetCellsAliasesRequest + * @memberof vtctldata.LaunchSchemaMigrationResponse * @static - * @param {vtctldata.IGetCellsAliasesRequest} message GetCellsAliasesRequest message or plain object to encode + * @param {vtctldata.ILaunchSchemaMigrationResponse} message LaunchSchemaMigrationResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetCellsAliasesRequest.encodeDelimited = function encodeDelimited(message, writer) { + LaunchSchemaMigrationResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetCellsAliasesRequest message from the specified reader or buffer. + * Decodes a LaunchSchemaMigrationResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetCellsAliasesRequest + * @memberof vtctldata.LaunchSchemaMigrationResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetCellsAliasesRequest} GetCellsAliasesRequest + * @returns {vtctldata.LaunchSchemaMigrationResponse} LaunchSchemaMigrationResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetCellsAliasesRequest.decode = function decode(reader, length) { + LaunchSchemaMigrationResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetCellsAliasesRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.LaunchSchemaMigrationResponse(), key, value; while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + if (message.rows_affected_by_shard === $util.emptyObject) + message.rows_affected_by_shard = {}; + let end2 = reader.uint32() + reader.pos; + key = ""; + value = 0; + while (reader.pos < end2) { + let tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = reader.uint64(); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.rows_affected_by_shard[key] = value; + break; + } default: reader.skipType(tag & 7); break; @@ -113157,208 +133874,328 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetCellsAliasesRequest message from the specified reader or buffer, length delimited. + * Decodes a LaunchSchemaMigrationResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetCellsAliasesRequest + * @memberof vtctldata.LaunchSchemaMigrationResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetCellsAliasesRequest} GetCellsAliasesRequest + * @returns {vtctldata.LaunchSchemaMigrationResponse} LaunchSchemaMigrationResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetCellsAliasesRequest.decodeDelimited = function decodeDelimited(reader) { + LaunchSchemaMigrationResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetCellsAliasesRequest message. + * Verifies a LaunchSchemaMigrationResponse message. * @function verify - * @memberof vtctldata.GetCellsAliasesRequest + * @memberof vtctldata.LaunchSchemaMigrationResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetCellsAliasesRequest.verify = function verify(message) { + LaunchSchemaMigrationResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.rows_affected_by_shard != null && message.hasOwnProperty("rows_affected_by_shard")) { + if (!$util.isObject(message.rows_affected_by_shard)) + return "rows_affected_by_shard: object expected"; + let key = Object.keys(message.rows_affected_by_shard); + for (let i = 0; i < key.length; ++i) + if (!$util.isInteger(message.rows_affected_by_shard[key[i]]) && !(message.rows_affected_by_shard[key[i]] && $util.isInteger(message.rows_affected_by_shard[key[i]].low) && $util.isInteger(message.rows_affected_by_shard[key[i]].high))) + return "rows_affected_by_shard: integer|Long{k:string} expected"; + } return null; }; /** - * Creates a GetCellsAliasesRequest message from a plain object. Also converts values to their respective internal types. + * Creates a LaunchSchemaMigrationResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetCellsAliasesRequest + * @memberof vtctldata.LaunchSchemaMigrationResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetCellsAliasesRequest} GetCellsAliasesRequest + * @returns {vtctldata.LaunchSchemaMigrationResponse} LaunchSchemaMigrationResponse */ - GetCellsAliasesRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetCellsAliasesRequest) + LaunchSchemaMigrationResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.LaunchSchemaMigrationResponse) return object; - return new $root.vtctldata.GetCellsAliasesRequest(); + let message = new $root.vtctldata.LaunchSchemaMigrationResponse(); + if (object.rows_affected_by_shard) { + if (typeof object.rows_affected_by_shard !== "object") + throw TypeError(".vtctldata.LaunchSchemaMigrationResponse.rows_affected_by_shard: object expected"); + message.rows_affected_by_shard = {}; + for (let keys = Object.keys(object.rows_affected_by_shard), i = 0; i < keys.length; ++i) + if ($util.Long) + (message.rows_affected_by_shard[keys[i]] = $util.Long.fromValue(object.rows_affected_by_shard[keys[i]])).unsigned = true; + else if (typeof object.rows_affected_by_shard[keys[i]] === "string") + message.rows_affected_by_shard[keys[i]] = parseInt(object.rows_affected_by_shard[keys[i]], 10); + else if (typeof object.rows_affected_by_shard[keys[i]] === "number") + message.rows_affected_by_shard[keys[i]] = object.rows_affected_by_shard[keys[i]]; + else if (typeof object.rows_affected_by_shard[keys[i]] === "object") + message.rows_affected_by_shard[keys[i]] = new $util.LongBits(object.rows_affected_by_shard[keys[i]].low >>> 0, object.rows_affected_by_shard[keys[i]].high >>> 0).toNumber(true); + } + return message; }; /** - * Creates a plain object from a GetCellsAliasesRequest message. Also converts values to other types if specified. + * Creates a plain object from a LaunchSchemaMigrationResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetCellsAliasesRequest + * @memberof vtctldata.LaunchSchemaMigrationResponse * @static - * @param {vtctldata.GetCellsAliasesRequest} message GetCellsAliasesRequest + * @param {vtctldata.LaunchSchemaMigrationResponse} message LaunchSchemaMigrationResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetCellsAliasesRequest.toObject = function toObject() { - return {}; + LaunchSchemaMigrationResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.objects || options.defaults) + object.rows_affected_by_shard = {}; + let keys2; + if (message.rows_affected_by_shard && (keys2 = Object.keys(message.rows_affected_by_shard)).length) { + object.rows_affected_by_shard = {}; + for (let j = 0; j < keys2.length; ++j) + if (typeof message.rows_affected_by_shard[keys2[j]] === "number") + object.rows_affected_by_shard[keys2[j]] = options.longs === String ? String(message.rows_affected_by_shard[keys2[j]]) : message.rows_affected_by_shard[keys2[j]]; + else + object.rows_affected_by_shard[keys2[j]] = options.longs === String ? $util.Long.prototype.toString.call(message.rows_affected_by_shard[keys2[j]]) : options.longs === Number ? new $util.LongBits(message.rows_affected_by_shard[keys2[j]].low >>> 0, message.rows_affected_by_shard[keys2[j]].high >>> 0).toNumber(true) : message.rows_affected_by_shard[keys2[j]]; + } + return object; }; /** - * Converts this GetCellsAliasesRequest to JSON. + * Converts this LaunchSchemaMigrationResponse to JSON. * @function toJSON - * @memberof vtctldata.GetCellsAliasesRequest + * @memberof vtctldata.LaunchSchemaMigrationResponse * @instance * @returns {Object.} JSON object */ - GetCellsAliasesRequest.prototype.toJSON = function toJSON() { + LaunchSchemaMigrationResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetCellsAliasesRequest + * Gets the default type url for LaunchSchemaMigrationResponse * @function getTypeUrl - * @memberof vtctldata.GetCellsAliasesRequest + * @memberof vtctldata.LaunchSchemaMigrationResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetCellsAliasesRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + LaunchSchemaMigrationResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetCellsAliasesRequest"; + return typeUrlPrefix + "/vtctldata.LaunchSchemaMigrationResponse"; }; - return GetCellsAliasesRequest; + return LaunchSchemaMigrationResponse; })(); - vtctldata.GetCellsAliasesResponse = (function() { + vtctldata.LookupVindexCreateRequest = (function() { /** - * Properties of a GetCellsAliasesResponse. + * Properties of a LookupVindexCreateRequest. * @memberof vtctldata - * @interface IGetCellsAliasesResponse - * @property {Object.|null} [aliases] GetCellsAliasesResponse aliases + * @interface ILookupVindexCreateRequest + * @property {string|null} [keyspace] LookupVindexCreateRequest keyspace + * @property {string|null} [workflow] LookupVindexCreateRequest workflow + * @property {Array.|null} [cells] LookupVindexCreateRequest cells + * @property {vschema.IKeyspace|null} [vindex] LookupVindexCreateRequest vindex + * @property {boolean|null} [continue_after_copy_with_owner] LookupVindexCreateRequest continue_after_copy_with_owner + * @property {Array.|null} [tablet_types] LookupVindexCreateRequest tablet_types + * @property {tabletmanagerdata.TabletSelectionPreference|null} [tablet_selection_preference] LookupVindexCreateRequest tablet_selection_preference + */ + + /** + * Constructs a new LookupVindexCreateRequest. + * @memberof vtctldata + * @classdesc Represents a LookupVindexCreateRequest. + * @implements ILookupVindexCreateRequest + * @constructor + * @param {vtctldata.ILookupVindexCreateRequest=} [properties] Properties to set + */ + function LookupVindexCreateRequest(properties) { + this.cells = []; + this.tablet_types = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * LookupVindexCreateRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.LookupVindexCreateRequest + * @instance + */ + LookupVindexCreateRequest.prototype.keyspace = ""; + + /** + * LookupVindexCreateRequest workflow. + * @member {string} workflow + * @memberof vtctldata.LookupVindexCreateRequest + * @instance + */ + LookupVindexCreateRequest.prototype.workflow = ""; + + /** + * LookupVindexCreateRequest cells. + * @member {Array.} cells + * @memberof vtctldata.LookupVindexCreateRequest + * @instance + */ + LookupVindexCreateRequest.prototype.cells = $util.emptyArray; + + /** + * LookupVindexCreateRequest vindex. + * @member {vschema.IKeyspace|null|undefined} vindex + * @memberof vtctldata.LookupVindexCreateRequest + * @instance + */ + LookupVindexCreateRequest.prototype.vindex = null; + + /** + * LookupVindexCreateRequest continue_after_copy_with_owner. + * @member {boolean} continue_after_copy_with_owner + * @memberof vtctldata.LookupVindexCreateRequest + * @instance */ + LookupVindexCreateRequest.prototype.continue_after_copy_with_owner = false; /** - * Constructs a new GetCellsAliasesResponse. - * @memberof vtctldata - * @classdesc Represents a GetCellsAliasesResponse. - * @implements IGetCellsAliasesResponse - * @constructor - * @param {vtctldata.IGetCellsAliasesResponse=} [properties] Properties to set + * LookupVindexCreateRequest tablet_types. + * @member {Array.} tablet_types + * @memberof vtctldata.LookupVindexCreateRequest + * @instance */ - function GetCellsAliasesResponse(properties) { - this.aliases = {}; - if (properties) - for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; - } + LookupVindexCreateRequest.prototype.tablet_types = $util.emptyArray; /** - * GetCellsAliasesResponse aliases. - * @member {Object.} aliases - * @memberof vtctldata.GetCellsAliasesResponse + * LookupVindexCreateRequest tablet_selection_preference. + * @member {tabletmanagerdata.TabletSelectionPreference} tablet_selection_preference + * @memberof vtctldata.LookupVindexCreateRequest * @instance */ - GetCellsAliasesResponse.prototype.aliases = $util.emptyObject; + LookupVindexCreateRequest.prototype.tablet_selection_preference = 0; /** - * Creates a new GetCellsAliasesResponse instance using the specified properties. + * Creates a new LookupVindexCreateRequest instance using the specified properties. * @function create - * @memberof vtctldata.GetCellsAliasesResponse + * @memberof vtctldata.LookupVindexCreateRequest * @static - * @param {vtctldata.IGetCellsAliasesResponse=} [properties] Properties to set - * @returns {vtctldata.GetCellsAliasesResponse} GetCellsAliasesResponse instance + * @param {vtctldata.ILookupVindexCreateRequest=} [properties] Properties to set + * @returns {vtctldata.LookupVindexCreateRequest} LookupVindexCreateRequest instance */ - GetCellsAliasesResponse.create = function create(properties) { - return new GetCellsAliasesResponse(properties); + LookupVindexCreateRequest.create = function create(properties) { + return new LookupVindexCreateRequest(properties); }; /** - * Encodes the specified GetCellsAliasesResponse message. Does not implicitly {@link vtctldata.GetCellsAliasesResponse.verify|verify} messages. + * Encodes the specified LookupVindexCreateRequest message. Does not implicitly {@link vtctldata.LookupVindexCreateRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.GetCellsAliasesResponse + * @memberof vtctldata.LookupVindexCreateRequest * @static - * @param {vtctldata.IGetCellsAliasesResponse} message GetCellsAliasesResponse message or plain object to encode + * @param {vtctldata.ILookupVindexCreateRequest} message LookupVindexCreateRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetCellsAliasesResponse.encode = function encode(message, writer) { + LookupVindexCreateRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.aliases != null && Object.hasOwnProperty.call(message, "aliases")) - for (let keys = Object.keys(message.aliases), i = 0; i < keys.length; ++i) { - writer.uint32(/* id 1, wireType 2 =*/10).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); - $root.topodata.CellsAlias.encode(message.aliases[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); - } + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.workflow != null && Object.hasOwnProperty.call(message, "workflow")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.workflow); + if (message.cells != null && message.cells.length) + for (let i = 0; i < message.cells.length; ++i) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.cells[i]); + if (message.vindex != null && Object.hasOwnProperty.call(message, "vindex")) + $root.vschema.Keyspace.encode(message.vindex, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.continue_after_copy_with_owner != null && Object.hasOwnProperty.call(message, "continue_after_copy_with_owner")) + writer.uint32(/* id 5, wireType 0 =*/40).bool(message.continue_after_copy_with_owner); + if (message.tablet_types != null && message.tablet_types.length) { + writer.uint32(/* id 6, wireType 2 =*/50).fork(); + for (let i = 0; i < message.tablet_types.length; ++i) + writer.int32(message.tablet_types[i]); + writer.ldelim(); + } + if (message.tablet_selection_preference != null && Object.hasOwnProperty.call(message, "tablet_selection_preference")) + writer.uint32(/* id 7, wireType 0 =*/56).int32(message.tablet_selection_preference); return writer; }; /** - * Encodes the specified GetCellsAliasesResponse message, length delimited. Does not implicitly {@link vtctldata.GetCellsAliasesResponse.verify|verify} messages. + * Encodes the specified LookupVindexCreateRequest message, length delimited. Does not implicitly {@link vtctldata.LookupVindexCreateRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetCellsAliasesResponse + * @memberof vtctldata.LookupVindexCreateRequest * @static - * @param {vtctldata.IGetCellsAliasesResponse} message GetCellsAliasesResponse message or plain object to encode + * @param {vtctldata.ILookupVindexCreateRequest} message LookupVindexCreateRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetCellsAliasesResponse.encodeDelimited = function encodeDelimited(message, writer) { + LookupVindexCreateRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetCellsAliasesResponse message from the specified reader or buffer. + * Decodes a LookupVindexCreateRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetCellsAliasesResponse + * @memberof vtctldata.LookupVindexCreateRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetCellsAliasesResponse} GetCellsAliasesResponse + * @returns {vtctldata.LookupVindexCreateRequest} LookupVindexCreateRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetCellsAliasesResponse.decode = function decode(reader, length) { + LookupVindexCreateRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetCellsAliasesResponse(), key, value; + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.LookupVindexCreateRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (message.aliases === $util.emptyObject) - message.aliases = {}; - let end2 = reader.uint32() + reader.pos; - key = ""; - value = null; - while (reader.pos < end2) { - let tag2 = reader.uint32(); - switch (tag2 >>> 3) { - case 1: - key = reader.string(); - break; - case 2: - value = $root.topodata.CellsAlias.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag2 & 7); - break; - } - } - message.aliases[key] = value; + message.keyspace = reader.string(); + break; + } + case 2: { + message.workflow = reader.string(); + break; + } + case 3: { + if (!(message.cells && message.cells.length)) + message.cells = []; + message.cells.push(reader.string()); + break; + } + case 4: { + message.vindex = $root.vschema.Keyspace.decode(reader, reader.uint32()); + break; + } + case 5: { + message.continue_after_copy_with_owner = reader.bool(); + break; + } + case 6: { + if (!(message.tablet_types && message.tablet_types.length)) + message.tablet_types = []; + if ((tag & 7) === 2) { + let end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.tablet_types.push(reader.int32()); + } else + message.tablet_types.push(reader.int32()); + break; + } + case 7: { + message.tablet_selection_preference = reader.int32(); break; } default: @@ -113370,141 +134207,289 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetCellsAliasesResponse message from the specified reader or buffer, length delimited. + * Decodes a LookupVindexCreateRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetCellsAliasesResponse + * @memberof vtctldata.LookupVindexCreateRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetCellsAliasesResponse} GetCellsAliasesResponse + * @returns {vtctldata.LookupVindexCreateRequest} LookupVindexCreateRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetCellsAliasesResponse.decodeDelimited = function decodeDelimited(reader) { + LookupVindexCreateRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetCellsAliasesResponse message. + * Verifies a LookupVindexCreateRequest message. * @function verify - * @memberof vtctldata.GetCellsAliasesResponse + * @memberof vtctldata.LookupVindexCreateRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetCellsAliasesResponse.verify = function verify(message) { + LookupVindexCreateRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.aliases != null && message.hasOwnProperty("aliases")) { - if (!$util.isObject(message.aliases)) - return "aliases: object expected"; - let key = Object.keys(message.aliases); - for (let i = 0; i < key.length; ++i) { - let error = $root.topodata.CellsAlias.verify(message.aliases[key[i]]); - if (error) - return "aliases." + error; - } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.workflow != null && message.hasOwnProperty("workflow")) + if (!$util.isString(message.workflow)) + return "workflow: string expected"; + if (message.cells != null && message.hasOwnProperty("cells")) { + if (!Array.isArray(message.cells)) + return "cells: array expected"; + for (let i = 0; i < message.cells.length; ++i) + if (!$util.isString(message.cells[i])) + return "cells: string[] expected"; + } + if (message.vindex != null && message.hasOwnProperty("vindex")) { + let error = $root.vschema.Keyspace.verify(message.vindex); + if (error) + return "vindex." + error; + } + if (message.continue_after_copy_with_owner != null && message.hasOwnProperty("continue_after_copy_with_owner")) + if (typeof message.continue_after_copy_with_owner !== "boolean") + return "continue_after_copy_with_owner: boolean expected"; + if (message.tablet_types != null && message.hasOwnProperty("tablet_types")) { + if (!Array.isArray(message.tablet_types)) + return "tablet_types: array expected"; + for (let i = 0; i < message.tablet_types.length; ++i) + switch (message.tablet_types[i]) { + default: + return "tablet_types: enum value[] expected"; + case 0: + case 1: + case 1: + case 2: + case 3: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + break; + } } + if (message.tablet_selection_preference != null && message.hasOwnProperty("tablet_selection_preference")) + switch (message.tablet_selection_preference) { + default: + return "tablet_selection_preference: enum value expected"; + case 0: + case 1: + case 3: + break; + } return null; }; /** - * Creates a GetCellsAliasesResponse message from a plain object. Also converts values to their respective internal types. + * Creates a LookupVindexCreateRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetCellsAliasesResponse + * @memberof vtctldata.LookupVindexCreateRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetCellsAliasesResponse} GetCellsAliasesResponse + * @returns {vtctldata.LookupVindexCreateRequest} LookupVindexCreateRequest */ - GetCellsAliasesResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetCellsAliasesResponse) + LookupVindexCreateRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.LookupVindexCreateRequest) return object; - let message = new $root.vtctldata.GetCellsAliasesResponse(); - if (object.aliases) { - if (typeof object.aliases !== "object") - throw TypeError(".vtctldata.GetCellsAliasesResponse.aliases: object expected"); - message.aliases = {}; - for (let keys = Object.keys(object.aliases), i = 0; i < keys.length; ++i) { - if (typeof object.aliases[keys[i]] !== "object") - throw TypeError(".vtctldata.GetCellsAliasesResponse.aliases: object expected"); - message.aliases[keys[i]] = $root.topodata.CellsAlias.fromObject(object.aliases[keys[i]]); + let message = new $root.vtctldata.LookupVindexCreateRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.workflow != null) + message.workflow = String(object.workflow); + if (object.cells) { + if (!Array.isArray(object.cells)) + throw TypeError(".vtctldata.LookupVindexCreateRequest.cells: array expected"); + message.cells = []; + for (let i = 0; i < object.cells.length; ++i) + message.cells[i] = String(object.cells[i]); + } + if (object.vindex != null) { + if (typeof object.vindex !== "object") + throw TypeError(".vtctldata.LookupVindexCreateRequest.vindex: object expected"); + message.vindex = $root.vschema.Keyspace.fromObject(object.vindex); + } + if (object.continue_after_copy_with_owner != null) + message.continue_after_copy_with_owner = Boolean(object.continue_after_copy_with_owner); + if (object.tablet_types) { + if (!Array.isArray(object.tablet_types)) + throw TypeError(".vtctldata.LookupVindexCreateRequest.tablet_types: array expected"); + message.tablet_types = []; + for (let i = 0; i < object.tablet_types.length; ++i) + switch (object.tablet_types[i]) { + default: + if (typeof object.tablet_types[i] === "number") { + message.tablet_types[i] = object.tablet_types[i]; + break; + } + case "UNKNOWN": + case 0: + message.tablet_types[i] = 0; + break; + case "PRIMARY": + case 1: + message.tablet_types[i] = 1; + break; + case "MASTER": + case 1: + message.tablet_types[i] = 1; + break; + case "REPLICA": + case 2: + message.tablet_types[i] = 2; + break; + case "RDONLY": + case 3: + message.tablet_types[i] = 3; + break; + case "BATCH": + case 3: + message.tablet_types[i] = 3; + break; + case "SPARE": + case 4: + message.tablet_types[i] = 4; + break; + case "EXPERIMENTAL": + case 5: + message.tablet_types[i] = 5; + break; + case "BACKUP": + case 6: + message.tablet_types[i] = 6; + break; + case "RESTORE": + case 7: + message.tablet_types[i] = 7; + break; + case "DRAINED": + case 8: + message.tablet_types[i] = 8; + break; + } + } + switch (object.tablet_selection_preference) { + default: + if (typeof object.tablet_selection_preference === "number") { + message.tablet_selection_preference = object.tablet_selection_preference; + break; } + break; + case "ANY": + case 0: + message.tablet_selection_preference = 0; + break; + case "INORDER": + case 1: + message.tablet_selection_preference = 1; + break; + case "UNKNOWN": + case 3: + message.tablet_selection_preference = 3; + break; } return message; }; /** - * Creates a plain object from a GetCellsAliasesResponse message. Also converts values to other types if specified. + * Creates a plain object from a LookupVindexCreateRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetCellsAliasesResponse + * @memberof vtctldata.LookupVindexCreateRequest * @static - * @param {vtctldata.GetCellsAliasesResponse} message GetCellsAliasesResponse + * @param {vtctldata.LookupVindexCreateRequest} message LookupVindexCreateRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetCellsAliasesResponse.toObject = function toObject(message, options) { + LookupVindexCreateRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.objects || options.defaults) - object.aliases = {}; - let keys2; - if (message.aliases && (keys2 = Object.keys(message.aliases)).length) { - object.aliases = {}; - for (let j = 0; j < keys2.length; ++j) - object.aliases[keys2[j]] = $root.topodata.CellsAlias.toObject(message.aliases[keys2[j]], options); + if (options.arrays || options.defaults) { + object.cells = []; + object.tablet_types = []; + } + if (options.defaults) { + object.keyspace = ""; + object.workflow = ""; + object.vindex = null; + object.continue_after_copy_with_owner = false; + object.tablet_selection_preference = options.enums === String ? "ANY" : 0; + } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.workflow != null && message.hasOwnProperty("workflow")) + object.workflow = message.workflow; + if (message.cells && message.cells.length) { + object.cells = []; + for (let j = 0; j < message.cells.length; ++j) + object.cells[j] = message.cells[j]; + } + if (message.vindex != null && message.hasOwnProperty("vindex")) + object.vindex = $root.vschema.Keyspace.toObject(message.vindex, options); + if (message.continue_after_copy_with_owner != null && message.hasOwnProperty("continue_after_copy_with_owner")) + object.continue_after_copy_with_owner = message.continue_after_copy_with_owner; + if (message.tablet_types && message.tablet_types.length) { + object.tablet_types = []; + for (let j = 0; j < message.tablet_types.length; ++j) + object.tablet_types[j] = options.enums === String ? $root.topodata.TabletType[message.tablet_types[j]] === undefined ? message.tablet_types[j] : $root.topodata.TabletType[message.tablet_types[j]] : message.tablet_types[j]; } + if (message.tablet_selection_preference != null && message.hasOwnProperty("tablet_selection_preference")) + object.tablet_selection_preference = options.enums === String ? $root.tabletmanagerdata.TabletSelectionPreference[message.tablet_selection_preference] === undefined ? message.tablet_selection_preference : $root.tabletmanagerdata.TabletSelectionPreference[message.tablet_selection_preference] : message.tablet_selection_preference; return object; }; /** - * Converts this GetCellsAliasesResponse to JSON. + * Converts this LookupVindexCreateRequest to JSON. * @function toJSON - * @memberof vtctldata.GetCellsAliasesResponse + * @memberof vtctldata.LookupVindexCreateRequest * @instance * @returns {Object.} JSON object */ - GetCellsAliasesResponse.prototype.toJSON = function toJSON() { + LookupVindexCreateRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetCellsAliasesResponse + * Gets the default type url for LookupVindexCreateRequest * @function getTypeUrl - * @memberof vtctldata.GetCellsAliasesResponse + * @memberof vtctldata.LookupVindexCreateRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetCellsAliasesResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + LookupVindexCreateRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetCellsAliasesResponse"; + return typeUrlPrefix + "/vtctldata.LookupVindexCreateRequest"; }; - return GetCellsAliasesResponse; + return LookupVindexCreateRequest; })(); - vtctldata.GetFullStatusRequest = (function() { + vtctldata.LookupVindexCreateResponse = (function() { /** - * Properties of a GetFullStatusRequest. + * Properties of a LookupVindexCreateResponse. * @memberof vtctldata - * @interface IGetFullStatusRequest - * @property {topodata.ITabletAlias|null} [tablet_alias] GetFullStatusRequest tablet_alias + * @interface ILookupVindexCreateResponse */ /** - * Constructs a new GetFullStatusRequest. + * Constructs a new LookupVindexCreateResponse. * @memberof vtctldata - * @classdesc Represents a GetFullStatusRequest. - * @implements IGetFullStatusRequest + * @classdesc Represents a LookupVindexCreateResponse. + * @implements ILookupVindexCreateResponse * @constructor - * @param {vtctldata.IGetFullStatusRequest=} [properties] Properties to set + * @param {vtctldata.ILookupVindexCreateResponse=} [properties] Properties to set */ - function GetFullStatusRequest(properties) { + function LookupVindexCreateResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -113512,77 +134497,63 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * GetFullStatusRequest tablet_alias. - * @member {topodata.ITabletAlias|null|undefined} tablet_alias - * @memberof vtctldata.GetFullStatusRequest - * @instance - */ - GetFullStatusRequest.prototype.tablet_alias = null; - - /** - * Creates a new GetFullStatusRequest instance using the specified properties. + * Creates a new LookupVindexCreateResponse instance using the specified properties. * @function create - * @memberof vtctldata.GetFullStatusRequest + * @memberof vtctldata.LookupVindexCreateResponse * @static - * @param {vtctldata.IGetFullStatusRequest=} [properties] Properties to set - * @returns {vtctldata.GetFullStatusRequest} GetFullStatusRequest instance + * @param {vtctldata.ILookupVindexCreateResponse=} [properties] Properties to set + * @returns {vtctldata.LookupVindexCreateResponse} LookupVindexCreateResponse instance */ - GetFullStatusRequest.create = function create(properties) { - return new GetFullStatusRequest(properties); + LookupVindexCreateResponse.create = function create(properties) { + return new LookupVindexCreateResponse(properties); }; /** - * Encodes the specified GetFullStatusRequest message. Does not implicitly {@link vtctldata.GetFullStatusRequest.verify|verify} messages. + * Encodes the specified LookupVindexCreateResponse message. Does not implicitly {@link vtctldata.LookupVindexCreateResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.GetFullStatusRequest + * @memberof vtctldata.LookupVindexCreateResponse * @static - * @param {vtctldata.IGetFullStatusRequest} message GetFullStatusRequest message or plain object to encode + * @param {vtctldata.ILookupVindexCreateResponse} message LookupVindexCreateResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetFullStatusRequest.encode = function encode(message, writer) { + LookupVindexCreateResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) - $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified GetFullStatusRequest message, length delimited. Does not implicitly {@link vtctldata.GetFullStatusRequest.verify|verify} messages. + * Encodes the specified LookupVindexCreateResponse message, length delimited. Does not implicitly {@link vtctldata.LookupVindexCreateResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetFullStatusRequest + * @memberof vtctldata.LookupVindexCreateResponse * @static - * @param {vtctldata.IGetFullStatusRequest} message GetFullStatusRequest message or plain object to encode + * @param {vtctldata.ILookupVindexCreateResponse} message LookupVindexCreateResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetFullStatusRequest.encodeDelimited = function encodeDelimited(message, writer) { + LookupVindexCreateResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetFullStatusRequest message from the specified reader or buffer. + * Decodes a LookupVindexCreateResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetFullStatusRequest + * @memberof vtctldata.LookupVindexCreateResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetFullStatusRequest} GetFullStatusRequest + * @returns {vtctldata.LookupVindexCreateResponse} LookupVindexCreateResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetFullStatusRequest.decode = function decode(reader, length) { + LookupVindexCreateResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetFullStatusRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.LookupVindexCreateResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); - break; - } default: reader.skipType(tag & 7); break; @@ -113592,127 +134563,111 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetFullStatusRequest message from the specified reader or buffer, length delimited. + * Decodes a LookupVindexCreateResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetFullStatusRequest + * @memberof vtctldata.LookupVindexCreateResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetFullStatusRequest} GetFullStatusRequest + * @returns {vtctldata.LookupVindexCreateResponse} LookupVindexCreateResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetFullStatusRequest.decodeDelimited = function decodeDelimited(reader) { + LookupVindexCreateResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetFullStatusRequest message. + * Verifies a LookupVindexCreateResponse message. * @function verify - * @memberof vtctldata.GetFullStatusRequest + * @memberof vtctldata.LookupVindexCreateResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetFullStatusRequest.verify = function verify(message) { + LookupVindexCreateResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { - let error = $root.topodata.TabletAlias.verify(message.tablet_alias); - if (error) - return "tablet_alias." + error; - } return null; }; /** - * Creates a GetFullStatusRequest message from a plain object. Also converts values to their respective internal types. + * Creates a LookupVindexCreateResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetFullStatusRequest + * @memberof vtctldata.LookupVindexCreateResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetFullStatusRequest} GetFullStatusRequest + * @returns {vtctldata.LookupVindexCreateResponse} LookupVindexCreateResponse */ - GetFullStatusRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetFullStatusRequest) + LookupVindexCreateResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.LookupVindexCreateResponse) return object; - let message = new $root.vtctldata.GetFullStatusRequest(); - if (object.tablet_alias != null) { - if (typeof object.tablet_alias !== "object") - throw TypeError(".vtctldata.GetFullStatusRequest.tablet_alias: object expected"); - message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); - } - return message; + return new $root.vtctldata.LookupVindexCreateResponse(); }; /** - * Creates a plain object from a GetFullStatusRequest message. Also converts values to other types if specified. + * Creates a plain object from a LookupVindexCreateResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetFullStatusRequest + * @memberof vtctldata.LookupVindexCreateResponse * @static - * @param {vtctldata.GetFullStatusRequest} message GetFullStatusRequest + * @param {vtctldata.LookupVindexCreateResponse} message LookupVindexCreateResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetFullStatusRequest.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) - object.tablet_alias = null; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) - object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); - return object; + LookupVindexCreateResponse.toObject = function toObject() { + return {}; }; /** - * Converts this GetFullStatusRequest to JSON. + * Converts this LookupVindexCreateResponse to JSON. * @function toJSON - * @memberof vtctldata.GetFullStatusRequest + * @memberof vtctldata.LookupVindexCreateResponse * @instance * @returns {Object.} JSON object */ - GetFullStatusRequest.prototype.toJSON = function toJSON() { + LookupVindexCreateResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetFullStatusRequest + * Gets the default type url for LookupVindexCreateResponse * @function getTypeUrl - * @memberof vtctldata.GetFullStatusRequest + * @memberof vtctldata.LookupVindexCreateResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetFullStatusRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + LookupVindexCreateResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetFullStatusRequest"; + return typeUrlPrefix + "/vtctldata.LookupVindexCreateResponse"; }; - return GetFullStatusRequest; + return LookupVindexCreateResponse; })(); - vtctldata.GetFullStatusResponse = (function() { + vtctldata.LookupVindexExternalizeRequest = (function() { /** - * Properties of a GetFullStatusResponse. + * Properties of a LookupVindexExternalizeRequest. * @memberof vtctldata - * @interface IGetFullStatusResponse - * @property {replicationdata.IFullStatus|null} [status] GetFullStatusResponse status + * @interface ILookupVindexExternalizeRequest + * @property {string|null} [keyspace] LookupVindexExternalizeRequest keyspace + * @property {string|null} [name] LookupVindexExternalizeRequest name + * @property {string|null} [table_keyspace] LookupVindexExternalizeRequest table_keyspace */ /** - * Constructs a new GetFullStatusResponse. + * Constructs a new LookupVindexExternalizeRequest. * @memberof vtctldata - * @classdesc Represents a GetFullStatusResponse. - * @implements IGetFullStatusResponse + * @classdesc Represents a LookupVindexExternalizeRequest. + * @implements ILookupVindexExternalizeRequest * @constructor - * @param {vtctldata.IGetFullStatusResponse=} [properties] Properties to set + * @param {vtctldata.ILookupVindexExternalizeRequest=} [properties] Properties to set */ - function GetFullStatusResponse(properties) { + function LookupVindexExternalizeRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -113720,75 +134675,103 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * GetFullStatusResponse status. - * @member {replicationdata.IFullStatus|null|undefined} status - * @memberof vtctldata.GetFullStatusResponse + * LookupVindexExternalizeRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.LookupVindexExternalizeRequest * @instance */ - GetFullStatusResponse.prototype.status = null; + LookupVindexExternalizeRequest.prototype.keyspace = ""; /** - * Creates a new GetFullStatusResponse instance using the specified properties. + * LookupVindexExternalizeRequest name. + * @member {string} name + * @memberof vtctldata.LookupVindexExternalizeRequest + * @instance + */ + LookupVindexExternalizeRequest.prototype.name = ""; + + /** + * LookupVindexExternalizeRequest table_keyspace. + * @member {string} table_keyspace + * @memberof vtctldata.LookupVindexExternalizeRequest + * @instance + */ + LookupVindexExternalizeRequest.prototype.table_keyspace = ""; + + /** + * Creates a new LookupVindexExternalizeRequest instance using the specified properties. * @function create - * @memberof vtctldata.GetFullStatusResponse + * @memberof vtctldata.LookupVindexExternalizeRequest * @static - * @param {vtctldata.IGetFullStatusResponse=} [properties] Properties to set - * @returns {vtctldata.GetFullStatusResponse} GetFullStatusResponse instance + * @param {vtctldata.ILookupVindexExternalizeRequest=} [properties] Properties to set + * @returns {vtctldata.LookupVindexExternalizeRequest} LookupVindexExternalizeRequest instance */ - GetFullStatusResponse.create = function create(properties) { - return new GetFullStatusResponse(properties); + LookupVindexExternalizeRequest.create = function create(properties) { + return new LookupVindexExternalizeRequest(properties); }; /** - * Encodes the specified GetFullStatusResponse message. Does not implicitly {@link vtctldata.GetFullStatusResponse.verify|verify} messages. + * Encodes the specified LookupVindexExternalizeRequest message. Does not implicitly {@link vtctldata.LookupVindexExternalizeRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.GetFullStatusResponse + * @memberof vtctldata.LookupVindexExternalizeRequest * @static - * @param {vtctldata.IGetFullStatusResponse} message GetFullStatusResponse message or plain object to encode + * @param {vtctldata.ILookupVindexExternalizeRequest} message LookupVindexExternalizeRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetFullStatusResponse.encode = function encode(message, writer) { + LookupVindexExternalizeRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.status != null && Object.hasOwnProperty.call(message, "status")) - $root.replicationdata.FullStatus.encode(message.status, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.name != null && Object.hasOwnProperty.call(message, "name")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.name); + if (message.table_keyspace != null && Object.hasOwnProperty.call(message, "table_keyspace")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.table_keyspace); return writer; }; /** - * Encodes the specified GetFullStatusResponse message, length delimited. Does not implicitly {@link vtctldata.GetFullStatusResponse.verify|verify} messages. + * Encodes the specified LookupVindexExternalizeRequest message, length delimited. Does not implicitly {@link vtctldata.LookupVindexExternalizeRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetFullStatusResponse + * @memberof vtctldata.LookupVindexExternalizeRequest * @static - * @param {vtctldata.IGetFullStatusResponse} message GetFullStatusResponse message or plain object to encode + * @param {vtctldata.ILookupVindexExternalizeRequest} message LookupVindexExternalizeRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetFullStatusResponse.encodeDelimited = function encodeDelimited(message, writer) { + LookupVindexExternalizeRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetFullStatusResponse message from the specified reader or buffer. + * Decodes a LookupVindexExternalizeRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetFullStatusResponse + * @memberof vtctldata.LookupVindexExternalizeRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetFullStatusResponse} GetFullStatusResponse + * @returns {vtctldata.LookupVindexExternalizeRequest} LookupVindexExternalizeRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetFullStatusResponse.decode = function decode(reader, length) { + LookupVindexExternalizeRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetFullStatusResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.LookupVindexExternalizeRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.status = $root.replicationdata.FullStatus.decode(reader, reader.uint32()); + message.keyspace = reader.string(); + break; + } + case 2: { + message.name = reader.string(); + break; + } + case 3: { + message.table_keyspace = reader.string(); break; } default: @@ -113800,126 +134783,139 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetFullStatusResponse message from the specified reader or buffer, length delimited. + * Decodes a LookupVindexExternalizeRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetFullStatusResponse + * @memberof vtctldata.LookupVindexExternalizeRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetFullStatusResponse} GetFullStatusResponse + * @returns {vtctldata.LookupVindexExternalizeRequest} LookupVindexExternalizeRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetFullStatusResponse.decodeDelimited = function decodeDelimited(reader) { + LookupVindexExternalizeRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetFullStatusResponse message. + * Verifies a LookupVindexExternalizeRequest message. * @function verify - * @memberof vtctldata.GetFullStatusResponse + * @memberof vtctldata.LookupVindexExternalizeRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetFullStatusResponse.verify = function verify(message) { + LookupVindexExternalizeRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.status != null && message.hasOwnProperty("status")) { - let error = $root.replicationdata.FullStatus.verify(message.status); - if (error) - return "status." + error; - } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; + if (message.table_keyspace != null && message.hasOwnProperty("table_keyspace")) + if (!$util.isString(message.table_keyspace)) + return "table_keyspace: string expected"; return null; }; /** - * Creates a GetFullStatusResponse message from a plain object. Also converts values to their respective internal types. + * Creates a LookupVindexExternalizeRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetFullStatusResponse + * @memberof vtctldata.LookupVindexExternalizeRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetFullStatusResponse} GetFullStatusResponse + * @returns {vtctldata.LookupVindexExternalizeRequest} LookupVindexExternalizeRequest */ - GetFullStatusResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetFullStatusResponse) + LookupVindexExternalizeRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.LookupVindexExternalizeRequest) return object; - let message = new $root.vtctldata.GetFullStatusResponse(); - if (object.status != null) { - if (typeof object.status !== "object") - throw TypeError(".vtctldata.GetFullStatusResponse.status: object expected"); - message.status = $root.replicationdata.FullStatus.fromObject(object.status); - } + let message = new $root.vtctldata.LookupVindexExternalizeRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.name != null) + message.name = String(object.name); + if (object.table_keyspace != null) + message.table_keyspace = String(object.table_keyspace); return message; }; /** - * Creates a plain object from a GetFullStatusResponse message. Also converts values to other types if specified. + * Creates a plain object from a LookupVindexExternalizeRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetFullStatusResponse + * @memberof vtctldata.LookupVindexExternalizeRequest * @static - * @param {vtctldata.GetFullStatusResponse} message GetFullStatusResponse + * @param {vtctldata.LookupVindexExternalizeRequest} message LookupVindexExternalizeRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetFullStatusResponse.toObject = function toObject(message, options) { + LookupVindexExternalizeRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) - object.status = null; - if (message.status != null && message.hasOwnProperty("status")) - object.status = $root.replicationdata.FullStatus.toObject(message.status, options); + if (options.defaults) { + object.keyspace = ""; + object.name = ""; + object.table_keyspace = ""; + } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; + if (message.table_keyspace != null && message.hasOwnProperty("table_keyspace")) + object.table_keyspace = message.table_keyspace; return object; }; /** - * Converts this GetFullStatusResponse to JSON. + * Converts this LookupVindexExternalizeRequest to JSON. * @function toJSON - * @memberof vtctldata.GetFullStatusResponse + * @memberof vtctldata.LookupVindexExternalizeRequest * @instance * @returns {Object.} JSON object */ - GetFullStatusResponse.prototype.toJSON = function toJSON() { + LookupVindexExternalizeRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetFullStatusResponse + * Gets the default type url for LookupVindexExternalizeRequest * @function getTypeUrl - * @memberof vtctldata.GetFullStatusResponse + * @memberof vtctldata.LookupVindexExternalizeRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetFullStatusResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + LookupVindexExternalizeRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetFullStatusResponse"; + return typeUrlPrefix + "/vtctldata.LookupVindexExternalizeRequest"; }; - return GetFullStatusResponse; + return LookupVindexExternalizeRequest; })(); - vtctldata.GetKeyspacesRequest = (function() { + vtctldata.LookupVindexExternalizeResponse = (function() { /** - * Properties of a GetKeyspacesRequest. + * Properties of a LookupVindexExternalizeResponse. * @memberof vtctldata - * @interface IGetKeyspacesRequest + * @interface ILookupVindexExternalizeResponse + * @property {boolean|null} [workflow_deleted] LookupVindexExternalizeResponse workflow_deleted */ /** - * Constructs a new GetKeyspacesRequest. + * Constructs a new LookupVindexExternalizeResponse. * @memberof vtctldata - * @classdesc Represents a GetKeyspacesRequest. - * @implements IGetKeyspacesRequest + * @classdesc Represents a LookupVindexExternalizeResponse. + * @implements ILookupVindexExternalizeResponse * @constructor - * @param {vtctldata.IGetKeyspacesRequest=} [properties] Properties to set + * @param {vtctldata.ILookupVindexExternalizeResponse=} [properties] Properties to set */ - function GetKeyspacesRequest(properties) { + function LookupVindexExternalizeResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -113927,63 +134923,77 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * Creates a new GetKeyspacesRequest instance using the specified properties. + * LookupVindexExternalizeResponse workflow_deleted. + * @member {boolean} workflow_deleted + * @memberof vtctldata.LookupVindexExternalizeResponse + * @instance + */ + LookupVindexExternalizeResponse.prototype.workflow_deleted = false; + + /** + * Creates a new LookupVindexExternalizeResponse instance using the specified properties. * @function create - * @memberof vtctldata.GetKeyspacesRequest + * @memberof vtctldata.LookupVindexExternalizeResponse * @static - * @param {vtctldata.IGetKeyspacesRequest=} [properties] Properties to set - * @returns {vtctldata.GetKeyspacesRequest} GetKeyspacesRequest instance + * @param {vtctldata.ILookupVindexExternalizeResponse=} [properties] Properties to set + * @returns {vtctldata.LookupVindexExternalizeResponse} LookupVindexExternalizeResponse instance */ - GetKeyspacesRequest.create = function create(properties) { - return new GetKeyspacesRequest(properties); + LookupVindexExternalizeResponse.create = function create(properties) { + return new LookupVindexExternalizeResponse(properties); }; /** - * Encodes the specified GetKeyspacesRequest message. Does not implicitly {@link vtctldata.GetKeyspacesRequest.verify|verify} messages. + * Encodes the specified LookupVindexExternalizeResponse message. Does not implicitly {@link vtctldata.LookupVindexExternalizeResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.GetKeyspacesRequest + * @memberof vtctldata.LookupVindexExternalizeResponse * @static - * @param {vtctldata.IGetKeyspacesRequest} message GetKeyspacesRequest message or plain object to encode + * @param {vtctldata.ILookupVindexExternalizeResponse} message LookupVindexExternalizeResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetKeyspacesRequest.encode = function encode(message, writer) { + LookupVindexExternalizeResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.workflow_deleted != null && Object.hasOwnProperty.call(message, "workflow_deleted")) + writer.uint32(/* id 1, wireType 0 =*/8).bool(message.workflow_deleted); return writer; }; /** - * Encodes the specified GetKeyspacesRequest message, length delimited. Does not implicitly {@link vtctldata.GetKeyspacesRequest.verify|verify} messages. + * Encodes the specified LookupVindexExternalizeResponse message, length delimited. Does not implicitly {@link vtctldata.LookupVindexExternalizeResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetKeyspacesRequest + * @memberof vtctldata.LookupVindexExternalizeResponse * @static - * @param {vtctldata.IGetKeyspacesRequest} message GetKeyspacesRequest message or plain object to encode + * @param {vtctldata.ILookupVindexExternalizeResponse} message LookupVindexExternalizeResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetKeyspacesRequest.encodeDelimited = function encodeDelimited(message, writer) { + LookupVindexExternalizeResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetKeyspacesRequest message from the specified reader or buffer. + * Decodes a LookupVindexExternalizeResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetKeyspacesRequest + * @memberof vtctldata.LookupVindexExternalizeResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetKeyspacesRequest} GetKeyspacesRequest + * @returns {vtctldata.LookupVindexExternalizeResponse} LookupVindexExternalizeResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetKeyspacesRequest.decode = function decode(reader, length) { + LookupVindexExternalizeResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetKeyspacesRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.LookupVindexExternalizeResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + message.workflow_deleted = reader.bool(); + break; + } default: reader.skipType(tag & 7); break; @@ -113993,110 +135003,122 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetKeyspacesRequest message from the specified reader or buffer, length delimited. + * Decodes a LookupVindexExternalizeResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetKeyspacesRequest + * @memberof vtctldata.LookupVindexExternalizeResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetKeyspacesRequest} GetKeyspacesRequest + * @returns {vtctldata.LookupVindexExternalizeResponse} LookupVindexExternalizeResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetKeyspacesRequest.decodeDelimited = function decodeDelimited(reader) { + LookupVindexExternalizeResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetKeyspacesRequest message. + * Verifies a LookupVindexExternalizeResponse message. * @function verify - * @memberof vtctldata.GetKeyspacesRequest + * @memberof vtctldata.LookupVindexExternalizeResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetKeyspacesRequest.verify = function verify(message) { + LookupVindexExternalizeResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.workflow_deleted != null && message.hasOwnProperty("workflow_deleted")) + if (typeof message.workflow_deleted !== "boolean") + return "workflow_deleted: boolean expected"; return null; }; /** - * Creates a GetKeyspacesRequest message from a plain object. Also converts values to their respective internal types. + * Creates a LookupVindexExternalizeResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetKeyspacesRequest + * @memberof vtctldata.LookupVindexExternalizeResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetKeyspacesRequest} GetKeyspacesRequest + * @returns {vtctldata.LookupVindexExternalizeResponse} LookupVindexExternalizeResponse */ - GetKeyspacesRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetKeyspacesRequest) + LookupVindexExternalizeResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.LookupVindexExternalizeResponse) return object; - return new $root.vtctldata.GetKeyspacesRequest(); + let message = new $root.vtctldata.LookupVindexExternalizeResponse(); + if (object.workflow_deleted != null) + message.workflow_deleted = Boolean(object.workflow_deleted); + return message; }; /** - * Creates a plain object from a GetKeyspacesRequest message. Also converts values to other types if specified. + * Creates a plain object from a LookupVindexExternalizeResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetKeyspacesRequest + * @memberof vtctldata.LookupVindexExternalizeResponse * @static - * @param {vtctldata.GetKeyspacesRequest} message GetKeyspacesRequest + * @param {vtctldata.LookupVindexExternalizeResponse} message LookupVindexExternalizeResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetKeyspacesRequest.toObject = function toObject() { - return {}; + LookupVindexExternalizeResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) + object.workflow_deleted = false; + if (message.workflow_deleted != null && message.hasOwnProperty("workflow_deleted")) + object.workflow_deleted = message.workflow_deleted; + return object; }; /** - * Converts this GetKeyspacesRequest to JSON. + * Converts this LookupVindexExternalizeResponse to JSON. * @function toJSON - * @memberof vtctldata.GetKeyspacesRequest + * @memberof vtctldata.LookupVindexExternalizeResponse * @instance * @returns {Object.} JSON object */ - GetKeyspacesRequest.prototype.toJSON = function toJSON() { + LookupVindexExternalizeResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetKeyspacesRequest + * Gets the default type url for LookupVindexExternalizeResponse * @function getTypeUrl - * @memberof vtctldata.GetKeyspacesRequest + * @memberof vtctldata.LookupVindexExternalizeResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetKeyspacesRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + LookupVindexExternalizeResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetKeyspacesRequest"; + return typeUrlPrefix + "/vtctldata.LookupVindexExternalizeResponse"; }; - return GetKeyspacesRequest; + return LookupVindexExternalizeResponse; })(); - vtctldata.GetKeyspacesResponse = (function() { + vtctldata.MaterializeCreateRequest = (function() { /** - * Properties of a GetKeyspacesResponse. + * Properties of a MaterializeCreateRequest. * @memberof vtctldata - * @interface IGetKeyspacesResponse - * @property {Array.|null} [keyspaces] GetKeyspacesResponse keyspaces + * @interface IMaterializeCreateRequest + * @property {vtctldata.IMaterializeSettings|null} [settings] MaterializeCreateRequest settings */ /** - * Constructs a new GetKeyspacesResponse. + * Constructs a new MaterializeCreateRequest. * @memberof vtctldata - * @classdesc Represents a GetKeyspacesResponse. - * @implements IGetKeyspacesResponse + * @classdesc Represents a MaterializeCreateRequest. + * @implements IMaterializeCreateRequest * @constructor - * @param {vtctldata.IGetKeyspacesResponse=} [properties] Properties to set + * @param {vtctldata.IMaterializeCreateRequest=} [properties] Properties to set */ - function GetKeyspacesResponse(properties) { - this.keyspaces = []; + function MaterializeCreateRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -114104,78 +135126,75 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * GetKeyspacesResponse keyspaces. - * @member {Array.} keyspaces - * @memberof vtctldata.GetKeyspacesResponse + * MaterializeCreateRequest settings. + * @member {vtctldata.IMaterializeSettings|null|undefined} settings + * @memberof vtctldata.MaterializeCreateRequest * @instance */ - GetKeyspacesResponse.prototype.keyspaces = $util.emptyArray; + MaterializeCreateRequest.prototype.settings = null; /** - * Creates a new GetKeyspacesResponse instance using the specified properties. + * Creates a new MaterializeCreateRequest instance using the specified properties. * @function create - * @memberof vtctldata.GetKeyspacesResponse + * @memberof vtctldata.MaterializeCreateRequest * @static - * @param {vtctldata.IGetKeyspacesResponse=} [properties] Properties to set - * @returns {vtctldata.GetKeyspacesResponse} GetKeyspacesResponse instance + * @param {vtctldata.IMaterializeCreateRequest=} [properties] Properties to set + * @returns {vtctldata.MaterializeCreateRequest} MaterializeCreateRequest instance */ - GetKeyspacesResponse.create = function create(properties) { - return new GetKeyspacesResponse(properties); + MaterializeCreateRequest.create = function create(properties) { + return new MaterializeCreateRequest(properties); }; /** - * Encodes the specified GetKeyspacesResponse message. Does not implicitly {@link vtctldata.GetKeyspacesResponse.verify|verify} messages. + * Encodes the specified MaterializeCreateRequest message. Does not implicitly {@link vtctldata.MaterializeCreateRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.GetKeyspacesResponse + * @memberof vtctldata.MaterializeCreateRequest * @static - * @param {vtctldata.IGetKeyspacesResponse} message GetKeyspacesResponse message or plain object to encode + * @param {vtctldata.IMaterializeCreateRequest} message MaterializeCreateRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetKeyspacesResponse.encode = function encode(message, writer) { + MaterializeCreateRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspaces != null && message.keyspaces.length) - for (let i = 0; i < message.keyspaces.length; ++i) - $root.vtctldata.Keyspace.encode(message.keyspaces[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.settings != null && Object.hasOwnProperty.call(message, "settings")) + $root.vtctldata.MaterializeSettings.encode(message.settings, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified GetKeyspacesResponse message, length delimited. Does not implicitly {@link vtctldata.GetKeyspacesResponse.verify|verify} messages. + * Encodes the specified MaterializeCreateRequest message, length delimited. Does not implicitly {@link vtctldata.MaterializeCreateRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetKeyspacesResponse + * @memberof vtctldata.MaterializeCreateRequest * @static - * @param {vtctldata.IGetKeyspacesResponse} message GetKeyspacesResponse message or plain object to encode + * @param {vtctldata.IMaterializeCreateRequest} message MaterializeCreateRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetKeyspacesResponse.encodeDelimited = function encodeDelimited(message, writer) { + MaterializeCreateRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetKeyspacesResponse message from the specified reader or buffer. + * Decodes a MaterializeCreateRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetKeyspacesResponse + * @memberof vtctldata.MaterializeCreateRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetKeyspacesResponse} GetKeyspacesResponse + * @returns {vtctldata.MaterializeCreateRequest} MaterializeCreateRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetKeyspacesResponse.decode = function decode(reader, length) { + MaterializeCreateRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetKeyspacesResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.MaterializeCreateRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (!(message.keyspaces && message.keyspaces.length)) - message.keyspaces = []; - message.keyspaces.push($root.vtctldata.Keyspace.decode(reader, reader.uint32())); + message.settings = $root.vtctldata.MaterializeSettings.decode(reader, reader.uint32()); break; } default: @@ -114187,139 +135206,126 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetKeyspacesResponse message from the specified reader or buffer, length delimited. + * Decodes a MaterializeCreateRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetKeyspacesResponse + * @memberof vtctldata.MaterializeCreateRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetKeyspacesResponse} GetKeyspacesResponse + * @returns {vtctldata.MaterializeCreateRequest} MaterializeCreateRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetKeyspacesResponse.decodeDelimited = function decodeDelimited(reader) { + MaterializeCreateRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetKeyspacesResponse message. + * Verifies a MaterializeCreateRequest message. * @function verify - * @memberof vtctldata.GetKeyspacesResponse + * @memberof vtctldata.MaterializeCreateRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetKeyspacesResponse.verify = function verify(message) { + MaterializeCreateRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.keyspaces != null && message.hasOwnProperty("keyspaces")) { - if (!Array.isArray(message.keyspaces)) - return "keyspaces: array expected"; - for (let i = 0; i < message.keyspaces.length; ++i) { - let error = $root.vtctldata.Keyspace.verify(message.keyspaces[i]); - if (error) - return "keyspaces." + error; - } + if (message.settings != null && message.hasOwnProperty("settings")) { + let error = $root.vtctldata.MaterializeSettings.verify(message.settings); + if (error) + return "settings." + error; } return null; }; /** - * Creates a GetKeyspacesResponse message from a plain object. Also converts values to their respective internal types. + * Creates a MaterializeCreateRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetKeyspacesResponse + * @memberof vtctldata.MaterializeCreateRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetKeyspacesResponse} GetKeyspacesResponse + * @returns {vtctldata.MaterializeCreateRequest} MaterializeCreateRequest */ - GetKeyspacesResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetKeyspacesResponse) + MaterializeCreateRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.MaterializeCreateRequest) return object; - let message = new $root.vtctldata.GetKeyspacesResponse(); - if (object.keyspaces) { - if (!Array.isArray(object.keyspaces)) - throw TypeError(".vtctldata.GetKeyspacesResponse.keyspaces: array expected"); - message.keyspaces = []; - for (let i = 0; i < object.keyspaces.length; ++i) { - if (typeof object.keyspaces[i] !== "object") - throw TypeError(".vtctldata.GetKeyspacesResponse.keyspaces: object expected"); - message.keyspaces[i] = $root.vtctldata.Keyspace.fromObject(object.keyspaces[i]); - } + let message = new $root.vtctldata.MaterializeCreateRequest(); + if (object.settings != null) { + if (typeof object.settings !== "object") + throw TypeError(".vtctldata.MaterializeCreateRequest.settings: object expected"); + message.settings = $root.vtctldata.MaterializeSettings.fromObject(object.settings); } return message; }; /** - * Creates a plain object from a GetKeyspacesResponse message. Also converts values to other types if specified. + * Creates a plain object from a MaterializeCreateRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetKeyspacesResponse + * @memberof vtctldata.MaterializeCreateRequest * @static - * @param {vtctldata.GetKeyspacesResponse} message GetKeyspacesResponse + * @param {vtctldata.MaterializeCreateRequest} message MaterializeCreateRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetKeyspacesResponse.toObject = function toObject(message, options) { + MaterializeCreateRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.keyspaces = []; - if (message.keyspaces && message.keyspaces.length) { - object.keyspaces = []; - for (let j = 0; j < message.keyspaces.length; ++j) - object.keyspaces[j] = $root.vtctldata.Keyspace.toObject(message.keyspaces[j], options); - } + if (options.defaults) + object.settings = null; + if (message.settings != null && message.hasOwnProperty("settings")) + object.settings = $root.vtctldata.MaterializeSettings.toObject(message.settings, options); return object; }; /** - * Converts this GetKeyspacesResponse to JSON. + * Converts this MaterializeCreateRequest to JSON. * @function toJSON - * @memberof vtctldata.GetKeyspacesResponse + * @memberof vtctldata.MaterializeCreateRequest * @instance * @returns {Object.} JSON object */ - GetKeyspacesResponse.prototype.toJSON = function toJSON() { + MaterializeCreateRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetKeyspacesResponse + * Gets the default type url for MaterializeCreateRequest * @function getTypeUrl - * @memberof vtctldata.GetKeyspacesResponse + * @memberof vtctldata.MaterializeCreateRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetKeyspacesResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + MaterializeCreateRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetKeyspacesResponse"; + return typeUrlPrefix + "/vtctldata.MaterializeCreateRequest"; }; - return GetKeyspacesResponse; + return MaterializeCreateRequest; })(); - vtctldata.GetKeyspaceRequest = (function() { + vtctldata.MaterializeCreateResponse = (function() { /** - * Properties of a GetKeyspaceRequest. + * Properties of a MaterializeCreateResponse. * @memberof vtctldata - * @interface IGetKeyspaceRequest - * @property {string|null} [keyspace] GetKeyspaceRequest keyspace + * @interface IMaterializeCreateResponse */ /** - * Constructs a new GetKeyspaceRequest. + * Constructs a new MaterializeCreateResponse. * @memberof vtctldata - * @classdesc Represents a GetKeyspaceRequest. - * @implements IGetKeyspaceRequest + * @classdesc Represents a MaterializeCreateResponse. + * @implements IMaterializeCreateResponse * @constructor - * @param {vtctldata.IGetKeyspaceRequest=} [properties] Properties to set + * @param {vtctldata.IMaterializeCreateResponse=} [properties] Properties to set */ - function GetKeyspaceRequest(properties) { + function MaterializeCreateResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -114327,77 +135333,63 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * GetKeyspaceRequest keyspace. - * @member {string} keyspace - * @memberof vtctldata.GetKeyspaceRequest - * @instance - */ - GetKeyspaceRequest.prototype.keyspace = ""; - - /** - * Creates a new GetKeyspaceRequest instance using the specified properties. + * Creates a new MaterializeCreateResponse instance using the specified properties. * @function create - * @memberof vtctldata.GetKeyspaceRequest + * @memberof vtctldata.MaterializeCreateResponse * @static - * @param {vtctldata.IGetKeyspaceRequest=} [properties] Properties to set - * @returns {vtctldata.GetKeyspaceRequest} GetKeyspaceRequest instance + * @param {vtctldata.IMaterializeCreateResponse=} [properties] Properties to set + * @returns {vtctldata.MaterializeCreateResponse} MaterializeCreateResponse instance */ - GetKeyspaceRequest.create = function create(properties) { - return new GetKeyspaceRequest(properties); + MaterializeCreateResponse.create = function create(properties) { + return new MaterializeCreateResponse(properties); }; /** - * Encodes the specified GetKeyspaceRequest message. Does not implicitly {@link vtctldata.GetKeyspaceRequest.verify|verify} messages. + * Encodes the specified MaterializeCreateResponse message. Does not implicitly {@link vtctldata.MaterializeCreateResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.GetKeyspaceRequest + * @memberof vtctldata.MaterializeCreateResponse * @static - * @param {vtctldata.IGetKeyspaceRequest} message GetKeyspaceRequest message or plain object to encode + * @param {vtctldata.IMaterializeCreateResponse} message MaterializeCreateResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetKeyspaceRequest.encode = function encode(message, writer) { + MaterializeCreateResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); return writer; }; /** - * Encodes the specified GetKeyspaceRequest message, length delimited. Does not implicitly {@link vtctldata.GetKeyspaceRequest.verify|verify} messages. + * Encodes the specified MaterializeCreateResponse message, length delimited. Does not implicitly {@link vtctldata.MaterializeCreateResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetKeyspaceRequest + * @memberof vtctldata.MaterializeCreateResponse * @static - * @param {vtctldata.IGetKeyspaceRequest} message GetKeyspaceRequest message or plain object to encode + * @param {vtctldata.IMaterializeCreateResponse} message MaterializeCreateResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetKeyspaceRequest.encodeDelimited = function encodeDelimited(message, writer) { + MaterializeCreateResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetKeyspaceRequest message from the specified reader or buffer. + * Decodes a MaterializeCreateResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetKeyspaceRequest + * @memberof vtctldata.MaterializeCreateResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetKeyspaceRequest} GetKeyspaceRequest + * @returns {vtctldata.MaterializeCreateResponse} MaterializeCreateResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetKeyspaceRequest.decode = function decode(reader, length) { + MaterializeCreateResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetKeyspaceRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.MaterializeCreateResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - message.keyspace = reader.string(); - break; - } default: reader.skipType(tag & 7); break; @@ -114407,122 +135399,129 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetKeyspaceRequest message from the specified reader or buffer, length delimited. + * Decodes a MaterializeCreateResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetKeyspaceRequest + * @memberof vtctldata.MaterializeCreateResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetKeyspaceRequest} GetKeyspaceRequest + * @returns {vtctldata.MaterializeCreateResponse} MaterializeCreateResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetKeyspaceRequest.decodeDelimited = function decodeDelimited(reader) { + MaterializeCreateResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetKeyspaceRequest message. + * Verifies a MaterializeCreateResponse message. * @function verify - * @memberof vtctldata.GetKeyspaceRequest + * @memberof vtctldata.MaterializeCreateResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetKeyspaceRequest.verify = function verify(message) { + MaterializeCreateResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; return null; }; /** - * Creates a GetKeyspaceRequest message from a plain object. Also converts values to their respective internal types. + * Creates a MaterializeCreateResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetKeyspaceRequest + * @memberof vtctldata.MaterializeCreateResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetKeyspaceRequest} GetKeyspaceRequest + * @returns {vtctldata.MaterializeCreateResponse} MaterializeCreateResponse */ - GetKeyspaceRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetKeyspaceRequest) + MaterializeCreateResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.MaterializeCreateResponse) return object; - let message = new $root.vtctldata.GetKeyspaceRequest(); - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - return message; + return new $root.vtctldata.MaterializeCreateResponse(); }; /** - * Creates a plain object from a GetKeyspaceRequest message. Also converts values to other types if specified. + * Creates a plain object from a MaterializeCreateResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetKeyspaceRequest + * @memberof vtctldata.MaterializeCreateResponse * @static - * @param {vtctldata.GetKeyspaceRequest} message GetKeyspaceRequest + * @param {vtctldata.MaterializeCreateResponse} message MaterializeCreateResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetKeyspaceRequest.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) - object.keyspace = ""; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - return object; + MaterializeCreateResponse.toObject = function toObject() { + return {}; }; /** - * Converts this GetKeyspaceRequest to JSON. + * Converts this MaterializeCreateResponse to JSON. * @function toJSON - * @memberof vtctldata.GetKeyspaceRequest + * @memberof vtctldata.MaterializeCreateResponse * @instance * @returns {Object.} JSON object */ - GetKeyspaceRequest.prototype.toJSON = function toJSON() { + MaterializeCreateResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetKeyspaceRequest + * Gets the default type url for MaterializeCreateResponse * @function getTypeUrl - * @memberof vtctldata.GetKeyspaceRequest + * @memberof vtctldata.MaterializeCreateResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetKeyspaceRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + MaterializeCreateResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetKeyspaceRequest"; + return typeUrlPrefix + "/vtctldata.MaterializeCreateResponse"; }; - return GetKeyspaceRequest; + return MaterializeCreateResponse; })(); - vtctldata.GetKeyspaceResponse = (function() { + vtctldata.MigrateCreateRequest = (function() { /** - * Properties of a GetKeyspaceResponse. + * Properties of a MigrateCreateRequest. * @memberof vtctldata - * @interface IGetKeyspaceResponse - * @property {vtctldata.IKeyspace|null} [keyspace] GetKeyspaceResponse keyspace - */ - - /** - * Constructs a new GetKeyspaceResponse. + * @interface IMigrateCreateRequest + * @property {string|null} [workflow] MigrateCreateRequest workflow + * @property {string|null} [source_keyspace] MigrateCreateRequest source_keyspace + * @property {string|null} [target_keyspace] MigrateCreateRequest target_keyspace + * @property {string|null} [mount_name] MigrateCreateRequest mount_name + * @property {Array.|null} [cells] MigrateCreateRequest cells + * @property {Array.|null} [tablet_types] MigrateCreateRequest tablet_types + * @property {tabletmanagerdata.TabletSelectionPreference|null} [tablet_selection_preference] MigrateCreateRequest tablet_selection_preference + * @property {boolean|null} [all_tables] MigrateCreateRequest all_tables + * @property {Array.|null} [include_tables] MigrateCreateRequest include_tables + * @property {Array.|null} [exclude_tables] MigrateCreateRequest exclude_tables + * @property {string|null} [source_time_zone] MigrateCreateRequest source_time_zone + * @property {string|null} [on_ddl] MigrateCreateRequest on_ddl + * @property {boolean|null} [stop_after_copy] MigrateCreateRequest stop_after_copy + * @property {boolean|null} [drop_foreign_keys] MigrateCreateRequest drop_foreign_keys + * @property {boolean|null} [defer_secondary_keys] MigrateCreateRequest defer_secondary_keys + * @property {boolean|null} [auto_start] MigrateCreateRequest auto_start + * @property {boolean|null} [no_routing_rules] MigrateCreateRequest no_routing_rules + */ + + /** + * Constructs a new MigrateCreateRequest. * @memberof vtctldata - * @classdesc Represents a GetKeyspaceResponse. - * @implements IGetKeyspaceResponse + * @classdesc Represents a MigrateCreateRequest. + * @implements IMigrateCreateRequest * @constructor - * @param {vtctldata.IGetKeyspaceResponse=} [properties] Properties to set + * @param {vtctldata.IMigrateCreateRequest=} [properties] Properties to set */ - function GetKeyspaceResponse(properties) { + function MigrateCreateRequest(properties) { + this.cells = []; + this.tablet_types = []; + this.include_tables = []; + this.exclude_tables = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -114530,75 +135529,319 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * GetKeyspaceResponse keyspace. - * @member {vtctldata.IKeyspace|null|undefined} keyspace - * @memberof vtctldata.GetKeyspaceResponse + * MigrateCreateRequest workflow. + * @member {string} workflow + * @memberof vtctldata.MigrateCreateRequest * @instance */ - GetKeyspaceResponse.prototype.keyspace = null; + MigrateCreateRequest.prototype.workflow = ""; /** - * Creates a new GetKeyspaceResponse instance using the specified properties. + * MigrateCreateRequest source_keyspace. + * @member {string} source_keyspace + * @memberof vtctldata.MigrateCreateRequest + * @instance + */ + MigrateCreateRequest.prototype.source_keyspace = ""; + + /** + * MigrateCreateRequest target_keyspace. + * @member {string} target_keyspace + * @memberof vtctldata.MigrateCreateRequest + * @instance + */ + MigrateCreateRequest.prototype.target_keyspace = ""; + + /** + * MigrateCreateRequest mount_name. + * @member {string} mount_name + * @memberof vtctldata.MigrateCreateRequest + * @instance + */ + MigrateCreateRequest.prototype.mount_name = ""; + + /** + * MigrateCreateRequest cells. + * @member {Array.} cells + * @memberof vtctldata.MigrateCreateRequest + * @instance + */ + MigrateCreateRequest.prototype.cells = $util.emptyArray; + + /** + * MigrateCreateRequest tablet_types. + * @member {Array.} tablet_types + * @memberof vtctldata.MigrateCreateRequest + * @instance + */ + MigrateCreateRequest.prototype.tablet_types = $util.emptyArray; + + /** + * MigrateCreateRequest tablet_selection_preference. + * @member {tabletmanagerdata.TabletSelectionPreference} tablet_selection_preference + * @memberof vtctldata.MigrateCreateRequest + * @instance + */ + MigrateCreateRequest.prototype.tablet_selection_preference = 0; + + /** + * MigrateCreateRequest all_tables. + * @member {boolean} all_tables + * @memberof vtctldata.MigrateCreateRequest + * @instance + */ + MigrateCreateRequest.prototype.all_tables = false; + + /** + * MigrateCreateRequest include_tables. + * @member {Array.} include_tables + * @memberof vtctldata.MigrateCreateRequest + * @instance + */ + MigrateCreateRequest.prototype.include_tables = $util.emptyArray; + + /** + * MigrateCreateRequest exclude_tables. + * @member {Array.} exclude_tables + * @memberof vtctldata.MigrateCreateRequest + * @instance + */ + MigrateCreateRequest.prototype.exclude_tables = $util.emptyArray; + + /** + * MigrateCreateRequest source_time_zone. + * @member {string} source_time_zone + * @memberof vtctldata.MigrateCreateRequest + * @instance + */ + MigrateCreateRequest.prototype.source_time_zone = ""; + + /** + * MigrateCreateRequest on_ddl. + * @member {string} on_ddl + * @memberof vtctldata.MigrateCreateRequest + * @instance + */ + MigrateCreateRequest.prototype.on_ddl = ""; + + /** + * MigrateCreateRequest stop_after_copy. + * @member {boolean} stop_after_copy + * @memberof vtctldata.MigrateCreateRequest + * @instance + */ + MigrateCreateRequest.prototype.stop_after_copy = false; + + /** + * MigrateCreateRequest drop_foreign_keys. + * @member {boolean} drop_foreign_keys + * @memberof vtctldata.MigrateCreateRequest + * @instance + */ + MigrateCreateRequest.prototype.drop_foreign_keys = false; + + /** + * MigrateCreateRequest defer_secondary_keys. + * @member {boolean} defer_secondary_keys + * @memberof vtctldata.MigrateCreateRequest + * @instance + */ + MigrateCreateRequest.prototype.defer_secondary_keys = false; + + /** + * MigrateCreateRequest auto_start. + * @member {boolean} auto_start + * @memberof vtctldata.MigrateCreateRequest + * @instance + */ + MigrateCreateRequest.prototype.auto_start = false; + + /** + * MigrateCreateRequest no_routing_rules. + * @member {boolean} no_routing_rules + * @memberof vtctldata.MigrateCreateRequest + * @instance + */ + MigrateCreateRequest.prototype.no_routing_rules = false; + + /** + * Creates a new MigrateCreateRequest instance using the specified properties. * @function create - * @memberof vtctldata.GetKeyspaceResponse + * @memberof vtctldata.MigrateCreateRequest * @static - * @param {vtctldata.IGetKeyspaceResponse=} [properties] Properties to set - * @returns {vtctldata.GetKeyspaceResponse} GetKeyspaceResponse instance + * @param {vtctldata.IMigrateCreateRequest=} [properties] Properties to set + * @returns {vtctldata.MigrateCreateRequest} MigrateCreateRequest instance */ - GetKeyspaceResponse.create = function create(properties) { - return new GetKeyspaceResponse(properties); + MigrateCreateRequest.create = function create(properties) { + return new MigrateCreateRequest(properties); }; /** - * Encodes the specified GetKeyspaceResponse message. Does not implicitly {@link vtctldata.GetKeyspaceResponse.verify|verify} messages. + * Encodes the specified MigrateCreateRequest message. Does not implicitly {@link vtctldata.MigrateCreateRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.GetKeyspaceResponse + * @memberof vtctldata.MigrateCreateRequest * @static - * @param {vtctldata.IGetKeyspaceResponse} message GetKeyspaceResponse message or plain object to encode + * @param {vtctldata.IMigrateCreateRequest} message MigrateCreateRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetKeyspaceResponse.encode = function encode(message, writer) { + MigrateCreateRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - $root.vtctldata.Keyspace.encode(message.keyspace, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.workflow != null && Object.hasOwnProperty.call(message, "workflow")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.workflow); + if (message.source_keyspace != null && Object.hasOwnProperty.call(message, "source_keyspace")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.source_keyspace); + if (message.target_keyspace != null && Object.hasOwnProperty.call(message, "target_keyspace")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.target_keyspace); + if (message.mount_name != null && Object.hasOwnProperty.call(message, "mount_name")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.mount_name); + if (message.cells != null && message.cells.length) + for (let i = 0; i < message.cells.length; ++i) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.cells[i]); + if (message.tablet_types != null && message.tablet_types.length) { + writer.uint32(/* id 6, wireType 2 =*/50).fork(); + for (let i = 0; i < message.tablet_types.length; ++i) + writer.int32(message.tablet_types[i]); + writer.ldelim(); + } + if (message.tablet_selection_preference != null && Object.hasOwnProperty.call(message, "tablet_selection_preference")) + writer.uint32(/* id 7, wireType 0 =*/56).int32(message.tablet_selection_preference); + if (message.all_tables != null && Object.hasOwnProperty.call(message, "all_tables")) + writer.uint32(/* id 8, wireType 0 =*/64).bool(message.all_tables); + if (message.include_tables != null && message.include_tables.length) + for (let i = 0; i < message.include_tables.length; ++i) + writer.uint32(/* id 9, wireType 2 =*/74).string(message.include_tables[i]); + if (message.exclude_tables != null && message.exclude_tables.length) + for (let i = 0; i < message.exclude_tables.length; ++i) + writer.uint32(/* id 10, wireType 2 =*/82).string(message.exclude_tables[i]); + if (message.source_time_zone != null && Object.hasOwnProperty.call(message, "source_time_zone")) + writer.uint32(/* id 11, wireType 2 =*/90).string(message.source_time_zone); + if (message.on_ddl != null && Object.hasOwnProperty.call(message, "on_ddl")) + writer.uint32(/* id 12, wireType 2 =*/98).string(message.on_ddl); + if (message.stop_after_copy != null && Object.hasOwnProperty.call(message, "stop_after_copy")) + writer.uint32(/* id 13, wireType 0 =*/104).bool(message.stop_after_copy); + if (message.drop_foreign_keys != null && Object.hasOwnProperty.call(message, "drop_foreign_keys")) + writer.uint32(/* id 14, wireType 0 =*/112).bool(message.drop_foreign_keys); + if (message.defer_secondary_keys != null && Object.hasOwnProperty.call(message, "defer_secondary_keys")) + writer.uint32(/* id 15, wireType 0 =*/120).bool(message.defer_secondary_keys); + if (message.auto_start != null && Object.hasOwnProperty.call(message, "auto_start")) + writer.uint32(/* id 16, wireType 0 =*/128).bool(message.auto_start); + if (message.no_routing_rules != null && Object.hasOwnProperty.call(message, "no_routing_rules")) + writer.uint32(/* id 17, wireType 0 =*/136).bool(message.no_routing_rules); return writer; }; /** - * Encodes the specified GetKeyspaceResponse message, length delimited. Does not implicitly {@link vtctldata.GetKeyspaceResponse.verify|verify} messages. + * Encodes the specified MigrateCreateRequest message, length delimited. Does not implicitly {@link vtctldata.MigrateCreateRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetKeyspaceResponse + * @memberof vtctldata.MigrateCreateRequest * @static - * @param {vtctldata.IGetKeyspaceResponse} message GetKeyspaceResponse message or plain object to encode + * @param {vtctldata.IMigrateCreateRequest} message MigrateCreateRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetKeyspaceResponse.encodeDelimited = function encodeDelimited(message, writer) { + MigrateCreateRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetKeyspaceResponse message from the specified reader or buffer. + * Decodes a MigrateCreateRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetKeyspaceResponse + * @memberof vtctldata.MigrateCreateRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetKeyspaceResponse} GetKeyspaceResponse + * @returns {vtctldata.MigrateCreateRequest} MigrateCreateRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetKeyspaceResponse.decode = function decode(reader, length) { + MigrateCreateRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetKeyspaceResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.MigrateCreateRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.keyspace = $root.vtctldata.Keyspace.decode(reader, reader.uint32()); + message.workflow = reader.string(); + break; + } + case 2: { + message.source_keyspace = reader.string(); + break; + } + case 3: { + message.target_keyspace = reader.string(); + break; + } + case 4: { + message.mount_name = reader.string(); + break; + } + case 5: { + if (!(message.cells && message.cells.length)) + message.cells = []; + message.cells.push(reader.string()); + break; + } + case 6: { + if (!(message.tablet_types && message.tablet_types.length)) + message.tablet_types = []; + if ((tag & 7) === 2) { + let end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.tablet_types.push(reader.int32()); + } else + message.tablet_types.push(reader.int32()); + break; + } + case 7: { + message.tablet_selection_preference = reader.int32(); + break; + } + case 8: { + message.all_tables = reader.bool(); + break; + } + case 9: { + if (!(message.include_tables && message.include_tables.length)) + message.include_tables = []; + message.include_tables.push(reader.string()); + break; + } + case 10: { + if (!(message.exclude_tables && message.exclude_tables.length)) + message.exclude_tables = []; + message.exclude_tables.push(reader.string()); + break; + } + case 11: { + message.source_time_zone = reader.string(); + break; + } + case 12: { + message.on_ddl = reader.string(); + break; + } + case 13: { + message.stop_after_copy = reader.bool(); + break; + } + case 14: { + message.drop_foreign_keys = reader.bool(); + break; + } + case 15: { + message.defer_secondary_keys = reader.bool(); + break; + } + case 16: { + message.auto_start = reader.bool(); + break; + } + case 17: { + message.no_routing_rules = reader.bool(); break; } default: @@ -114610,127 +135853,394 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetKeyspaceResponse message from the specified reader or buffer, length delimited. + * Decodes a MigrateCreateRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetKeyspaceResponse + * @memberof vtctldata.MigrateCreateRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetKeyspaceResponse} GetKeyspaceResponse + * @returns {vtctldata.MigrateCreateRequest} MigrateCreateRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetKeyspaceResponse.decodeDelimited = function decodeDelimited(reader) { + MigrateCreateRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetKeyspaceResponse message. + * Verifies a MigrateCreateRequest message. * @function verify - * @memberof vtctldata.GetKeyspaceResponse + * @memberof vtctldata.MigrateCreateRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetKeyspaceResponse.verify = function verify(message) { + MigrateCreateRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) { - let error = $root.vtctldata.Keyspace.verify(message.keyspace); - if (error) - return "keyspace." + error; + if (message.workflow != null && message.hasOwnProperty("workflow")) + if (!$util.isString(message.workflow)) + return "workflow: string expected"; + if (message.source_keyspace != null && message.hasOwnProperty("source_keyspace")) + if (!$util.isString(message.source_keyspace)) + return "source_keyspace: string expected"; + if (message.target_keyspace != null && message.hasOwnProperty("target_keyspace")) + if (!$util.isString(message.target_keyspace)) + return "target_keyspace: string expected"; + if (message.mount_name != null && message.hasOwnProperty("mount_name")) + if (!$util.isString(message.mount_name)) + return "mount_name: string expected"; + if (message.cells != null && message.hasOwnProperty("cells")) { + if (!Array.isArray(message.cells)) + return "cells: array expected"; + for (let i = 0; i < message.cells.length; ++i) + if (!$util.isString(message.cells[i])) + return "cells: string[] expected"; + } + if (message.tablet_types != null && message.hasOwnProperty("tablet_types")) { + if (!Array.isArray(message.tablet_types)) + return "tablet_types: array expected"; + for (let i = 0; i < message.tablet_types.length; ++i) + switch (message.tablet_types[i]) { + default: + return "tablet_types: enum value[] expected"; + case 0: + case 1: + case 1: + case 2: + case 3: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + break; + } + } + if (message.tablet_selection_preference != null && message.hasOwnProperty("tablet_selection_preference")) + switch (message.tablet_selection_preference) { + default: + return "tablet_selection_preference: enum value expected"; + case 0: + case 1: + case 3: + break; + } + if (message.all_tables != null && message.hasOwnProperty("all_tables")) + if (typeof message.all_tables !== "boolean") + return "all_tables: boolean expected"; + if (message.include_tables != null && message.hasOwnProperty("include_tables")) { + if (!Array.isArray(message.include_tables)) + return "include_tables: array expected"; + for (let i = 0; i < message.include_tables.length; ++i) + if (!$util.isString(message.include_tables[i])) + return "include_tables: string[] expected"; + } + if (message.exclude_tables != null && message.hasOwnProperty("exclude_tables")) { + if (!Array.isArray(message.exclude_tables)) + return "exclude_tables: array expected"; + for (let i = 0; i < message.exclude_tables.length; ++i) + if (!$util.isString(message.exclude_tables[i])) + return "exclude_tables: string[] expected"; + } + if (message.source_time_zone != null && message.hasOwnProperty("source_time_zone")) + if (!$util.isString(message.source_time_zone)) + return "source_time_zone: string expected"; + if (message.on_ddl != null && message.hasOwnProperty("on_ddl")) + if (!$util.isString(message.on_ddl)) + return "on_ddl: string expected"; + if (message.stop_after_copy != null && message.hasOwnProperty("stop_after_copy")) + if (typeof message.stop_after_copy !== "boolean") + return "stop_after_copy: boolean expected"; + if (message.drop_foreign_keys != null && message.hasOwnProperty("drop_foreign_keys")) + if (typeof message.drop_foreign_keys !== "boolean") + return "drop_foreign_keys: boolean expected"; + if (message.defer_secondary_keys != null && message.hasOwnProperty("defer_secondary_keys")) + if (typeof message.defer_secondary_keys !== "boolean") + return "defer_secondary_keys: boolean expected"; + if (message.auto_start != null && message.hasOwnProperty("auto_start")) + if (typeof message.auto_start !== "boolean") + return "auto_start: boolean expected"; + if (message.no_routing_rules != null && message.hasOwnProperty("no_routing_rules")) + if (typeof message.no_routing_rules !== "boolean") + return "no_routing_rules: boolean expected"; + return null; + }; + + /** + * Creates a MigrateCreateRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.MigrateCreateRequest + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.MigrateCreateRequest} MigrateCreateRequest + */ + MigrateCreateRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.MigrateCreateRequest) + return object; + let message = new $root.vtctldata.MigrateCreateRequest(); + if (object.workflow != null) + message.workflow = String(object.workflow); + if (object.source_keyspace != null) + message.source_keyspace = String(object.source_keyspace); + if (object.target_keyspace != null) + message.target_keyspace = String(object.target_keyspace); + if (object.mount_name != null) + message.mount_name = String(object.mount_name); + if (object.cells) { + if (!Array.isArray(object.cells)) + throw TypeError(".vtctldata.MigrateCreateRequest.cells: array expected"); + message.cells = []; + for (let i = 0; i < object.cells.length; ++i) + message.cells[i] = String(object.cells[i]); + } + if (object.tablet_types) { + if (!Array.isArray(object.tablet_types)) + throw TypeError(".vtctldata.MigrateCreateRequest.tablet_types: array expected"); + message.tablet_types = []; + for (let i = 0; i < object.tablet_types.length; ++i) + switch (object.tablet_types[i]) { + default: + if (typeof object.tablet_types[i] === "number") { + message.tablet_types[i] = object.tablet_types[i]; + break; + } + case "UNKNOWN": + case 0: + message.tablet_types[i] = 0; + break; + case "PRIMARY": + case 1: + message.tablet_types[i] = 1; + break; + case "MASTER": + case 1: + message.tablet_types[i] = 1; + break; + case "REPLICA": + case 2: + message.tablet_types[i] = 2; + break; + case "RDONLY": + case 3: + message.tablet_types[i] = 3; + break; + case "BATCH": + case 3: + message.tablet_types[i] = 3; + break; + case "SPARE": + case 4: + message.tablet_types[i] = 4; + break; + case "EXPERIMENTAL": + case 5: + message.tablet_types[i] = 5; + break; + case "BACKUP": + case 6: + message.tablet_types[i] = 6; + break; + case "RESTORE": + case 7: + message.tablet_types[i] = 7; + break; + case "DRAINED": + case 8: + message.tablet_types[i] = 8; + break; + } + } + switch (object.tablet_selection_preference) { + default: + if (typeof object.tablet_selection_preference === "number") { + message.tablet_selection_preference = object.tablet_selection_preference; + break; + } + break; + case "ANY": + case 0: + message.tablet_selection_preference = 0; + break; + case "INORDER": + case 1: + message.tablet_selection_preference = 1; + break; + case "UNKNOWN": + case 3: + message.tablet_selection_preference = 3; + break; } - return null; - }; - - /** - * Creates a GetKeyspaceResponse message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof vtctldata.GetKeyspaceResponse - * @static - * @param {Object.} object Plain object - * @returns {vtctldata.GetKeyspaceResponse} GetKeyspaceResponse - */ - GetKeyspaceResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetKeyspaceResponse) - return object; - let message = new $root.vtctldata.GetKeyspaceResponse(); - if (object.keyspace != null) { - if (typeof object.keyspace !== "object") - throw TypeError(".vtctldata.GetKeyspaceResponse.keyspace: object expected"); - message.keyspace = $root.vtctldata.Keyspace.fromObject(object.keyspace); + if (object.all_tables != null) + message.all_tables = Boolean(object.all_tables); + if (object.include_tables) { + if (!Array.isArray(object.include_tables)) + throw TypeError(".vtctldata.MigrateCreateRequest.include_tables: array expected"); + message.include_tables = []; + for (let i = 0; i < object.include_tables.length; ++i) + message.include_tables[i] = String(object.include_tables[i]); + } + if (object.exclude_tables) { + if (!Array.isArray(object.exclude_tables)) + throw TypeError(".vtctldata.MigrateCreateRequest.exclude_tables: array expected"); + message.exclude_tables = []; + for (let i = 0; i < object.exclude_tables.length; ++i) + message.exclude_tables[i] = String(object.exclude_tables[i]); } + if (object.source_time_zone != null) + message.source_time_zone = String(object.source_time_zone); + if (object.on_ddl != null) + message.on_ddl = String(object.on_ddl); + if (object.stop_after_copy != null) + message.stop_after_copy = Boolean(object.stop_after_copy); + if (object.drop_foreign_keys != null) + message.drop_foreign_keys = Boolean(object.drop_foreign_keys); + if (object.defer_secondary_keys != null) + message.defer_secondary_keys = Boolean(object.defer_secondary_keys); + if (object.auto_start != null) + message.auto_start = Boolean(object.auto_start); + if (object.no_routing_rules != null) + message.no_routing_rules = Boolean(object.no_routing_rules); return message; }; /** - * Creates a plain object from a GetKeyspaceResponse message. Also converts values to other types if specified. + * Creates a plain object from a MigrateCreateRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetKeyspaceResponse + * @memberof vtctldata.MigrateCreateRequest * @static - * @param {vtctldata.GetKeyspaceResponse} message GetKeyspaceResponse + * @param {vtctldata.MigrateCreateRequest} message MigrateCreateRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetKeyspaceResponse.toObject = function toObject(message, options) { + MigrateCreateRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) - object.keyspace = null; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = $root.vtctldata.Keyspace.toObject(message.keyspace, options); + if (options.arrays || options.defaults) { + object.cells = []; + object.tablet_types = []; + object.include_tables = []; + object.exclude_tables = []; + } + if (options.defaults) { + object.workflow = ""; + object.source_keyspace = ""; + object.target_keyspace = ""; + object.mount_name = ""; + object.tablet_selection_preference = options.enums === String ? "ANY" : 0; + object.all_tables = false; + object.source_time_zone = ""; + object.on_ddl = ""; + object.stop_after_copy = false; + object.drop_foreign_keys = false; + object.defer_secondary_keys = false; + object.auto_start = false; + object.no_routing_rules = false; + } + if (message.workflow != null && message.hasOwnProperty("workflow")) + object.workflow = message.workflow; + if (message.source_keyspace != null && message.hasOwnProperty("source_keyspace")) + object.source_keyspace = message.source_keyspace; + if (message.target_keyspace != null && message.hasOwnProperty("target_keyspace")) + object.target_keyspace = message.target_keyspace; + if (message.mount_name != null && message.hasOwnProperty("mount_name")) + object.mount_name = message.mount_name; + if (message.cells && message.cells.length) { + object.cells = []; + for (let j = 0; j < message.cells.length; ++j) + object.cells[j] = message.cells[j]; + } + if (message.tablet_types && message.tablet_types.length) { + object.tablet_types = []; + for (let j = 0; j < message.tablet_types.length; ++j) + object.tablet_types[j] = options.enums === String ? $root.topodata.TabletType[message.tablet_types[j]] === undefined ? message.tablet_types[j] : $root.topodata.TabletType[message.tablet_types[j]] : message.tablet_types[j]; + } + if (message.tablet_selection_preference != null && message.hasOwnProperty("tablet_selection_preference")) + object.tablet_selection_preference = options.enums === String ? $root.tabletmanagerdata.TabletSelectionPreference[message.tablet_selection_preference] === undefined ? message.tablet_selection_preference : $root.tabletmanagerdata.TabletSelectionPreference[message.tablet_selection_preference] : message.tablet_selection_preference; + if (message.all_tables != null && message.hasOwnProperty("all_tables")) + object.all_tables = message.all_tables; + if (message.include_tables && message.include_tables.length) { + object.include_tables = []; + for (let j = 0; j < message.include_tables.length; ++j) + object.include_tables[j] = message.include_tables[j]; + } + if (message.exclude_tables && message.exclude_tables.length) { + object.exclude_tables = []; + for (let j = 0; j < message.exclude_tables.length; ++j) + object.exclude_tables[j] = message.exclude_tables[j]; + } + if (message.source_time_zone != null && message.hasOwnProperty("source_time_zone")) + object.source_time_zone = message.source_time_zone; + if (message.on_ddl != null && message.hasOwnProperty("on_ddl")) + object.on_ddl = message.on_ddl; + if (message.stop_after_copy != null && message.hasOwnProperty("stop_after_copy")) + object.stop_after_copy = message.stop_after_copy; + if (message.drop_foreign_keys != null && message.hasOwnProperty("drop_foreign_keys")) + object.drop_foreign_keys = message.drop_foreign_keys; + if (message.defer_secondary_keys != null && message.hasOwnProperty("defer_secondary_keys")) + object.defer_secondary_keys = message.defer_secondary_keys; + if (message.auto_start != null && message.hasOwnProperty("auto_start")) + object.auto_start = message.auto_start; + if (message.no_routing_rules != null && message.hasOwnProperty("no_routing_rules")) + object.no_routing_rules = message.no_routing_rules; return object; }; /** - * Converts this GetKeyspaceResponse to JSON. + * Converts this MigrateCreateRequest to JSON. * @function toJSON - * @memberof vtctldata.GetKeyspaceResponse + * @memberof vtctldata.MigrateCreateRequest * @instance * @returns {Object.} JSON object */ - GetKeyspaceResponse.prototype.toJSON = function toJSON() { + MigrateCreateRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetKeyspaceResponse + * Gets the default type url for MigrateCreateRequest * @function getTypeUrl - * @memberof vtctldata.GetKeyspaceResponse + * @memberof vtctldata.MigrateCreateRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetKeyspaceResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + MigrateCreateRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetKeyspaceResponse"; + return typeUrlPrefix + "/vtctldata.MigrateCreateRequest"; }; - return GetKeyspaceResponse; + return MigrateCreateRequest; })(); - vtctldata.GetPermissionsRequest = (function() { + vtctldata.MigrateCompleteRequest = (function() { /** - * Properties of a GetPermissionsRequest. + * Properties of a MigrateCompleteRequest. * @memberof vtctldata - * @interface IGetPermissionsRequest - * @property {topodata.ITabletAlias|null} [tablet_alias] GetPermissionsRequest tablet_alias + * @interface IMigrateCompleteRequest + * @property {string|null} [workflow] MigrateCompleteRequest workflow + * @property {string|null} [target_keyspace] MigrateCompleteRequest target_keyspace + * @property {boolean|null} [keep_data] MigrateCompleteRequest keep_data + * @property {boolean|null} [keep_routing_rules] MigrateCompleteRequest keep_routing_rules + * @property {boolean|null} [rename_tables] MigrateCompleteRequest rename_tables + * @property {boolean|null} [dry_run] MigrateCompleteRequest dry_run */ /** - * Constructs a new GetPermissionsRequest. + * Constructs a new MigrateCompleteRequest. * @memberof vtctldata - * @classdesc Represents a GetPermissionsRequest. - * @implements IGetPermissionsRequest + * @classdesc Represents a MigrateCompleteRequest. + * @implements IMigrateCompleteRequest * @constructor - * @param {vtctldata.IGetPermissionsRequest=} [properties] Properties to set + * @param {vtctldata.IMigrateCompleteRequest=} [properties] Properties to set */ - function GetPermissionsRequest(properties) { + function MigrateCompleteRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -114738,75 +136248,145 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * GetPermissionsRequest tablet_alias. - * @member {topodata.ITabletAlias|null|undefined} tablet_alias - * @memberof vtctldata.GetPermissionsRequest + * MigrateCompleteRequest workflow. + * @member {string} workflow + * @memberof vtctldata.MigrateCompleteRequest * @instance */ - GetPermissionsRequest.prototype.tablet_alias = null; + MigrateCompleteRequest.prototype.workflow = ""; /** - * Creates a new GetPermissionsRequest instance using the specified properties. + * MigrateCompleteRequest target_keyspace. + * @member {string} target_keyspace + * @memberof vtctldata.MigrateCompleteRequest + * @instance + */ + MigrateCompleteRequest.prototype.target_keyspace = ""; + + /** + * MigrateCompleteRequest keep_data. + * @member {boolean} keep_data + * @memberof vtctldata.MigrateCompleteRequest + * @instance + */ + MigrateCompleteRequest.prototype.keep_data = false; + + /** + * MigrateCompleteRequest keep_routing_rules. + * @member {boolean} keep_routing_rules + * @memberof vtctldata.MigrateCompleteRequest + * @instance + */ + MigrateCompleteRequest.prototype.keep_routing_rules = false; + + /** + * MigrateCompleteRequest rename_tables. + * @member {boolean} rename_tables + * @memberof vtctldata.MigrateCompleteRequest + * @instance + */ + MigrateCompleteRequest.prototype.rename_tables = false; + + /** + * MigrateCompleteRequest dry_run. + * @member {boolean} dry_run + * @memberof vtctldata.MigrateCompleteRequest + * @instance + */ + MigrateCompleteRequest.prototype.dry_run = false; + + /** + * Creates a new MigrateCompleteRequest instance using the specified properties. * @function create - * @memberof vtctldata.GetPermissionsRequest + * @memberof vtctldata.MigrateCompleteRequest * @static - * @param {vtctldata.IGetPermissionsRequest=} [properties] Properties to set - * @returns {vtctldata.GetPermissionsRequest} GetPermissionsRequest instance + * @param {vtctldata.IMigrateCompleteRequest=} [properties] Properties to set + * @returns {vtctldata.MigrateCompleteRequest} MigrateCompleteRequest instance */ - GetPermissionsRequest.create = function create(properties) { - return new GetPermissionsRequest(properties); + MigrateCompleteRequest.create = function create(properties) { + return new MigrateCompleteRequest(properties); }; /** - * Encodes the specified GetPermissionsRequest message. Does not implicitly {@link vtctldata.GetPermissionsRequest.verify|verify} messages. + * Encodes the specified MigrateCompleteRequest message. Does not implicitly {@link vtctldata.MigrateCompleteRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.GetPermissionsRequest + * @memberof vtctldata.MigrateCompleteRequest * @static - * @param {vtctldata.IGetPermissionsRequest} message GetPermissionsRequest message or plain object to encode + * @param {vtctldata.IMigrateCompleteRequest} message MigrateCompleteRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetPermissionsRequest.encode = function encode(message, writer) { + MigrateCompleteRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) - $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.workflow != null && Object.hasOwnProperty.call(message, "workflow")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.workflow); + if (message.target_keyspace != null && Object.hasOwnProperty.call(message, "target_keyspace")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.target_keyspace); + if (message.keep_data != null && Object.hasOwnProperty.call(message, "keep_data")) + writer.uint32(/* id 4, wireType 0 =*/32).bool(message.keep_data); + if (message.keep_routing_rules != null && Object.hasOwnProperty.call(message, "keep_routing_rules")) + writer.uint32(/* id 5, wireType 0 =*/40).bool(message.keep_routing_rules); + if (message.rename_tables != null && Object.hasOwnProperty.call(message, "rename_tables")) + writer.uint32(/* id 6, wireType 0 =*/48).bool(message.rename_tables); + if (message.dry_run != null && Object.hasOwnProperty.call(message, "dry_run")) + writer.uint32(/* id 7, wireType 0 =*/56).bool(message.dry_run); return writer; }; /** - * Encodes the specified GetPermissionsRequest message, length delimited. Does not implicitly {@link vtctldata.GetPermissionsRequest.verify|verify} messages. + * Encodes the specified MigrateCompleteRequest message, length delimited. Does not implicitly {@link vtctldata.MigrateCompleteRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetPermissionsRequest + * @memberof vtctldata.MigrateCompleteRequest * @static - * @param {vtctldata.IGetPermissionsRequest} message GetPermissionsRequest message or plain object to encode + * @param {vtctldata.IMigrateCompleteRequest} message MigrateCompleteRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetPermissionsRequest.encodeDelimited = function encodeDelimited(message, writer) { + MigrateCompleteRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetPermissionsRequest message from the specified reader or buffer. + * Decodes a MigrateCompleteRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetPermissionsRequest + * @memberof vtctldata.MigrateCompleteRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetPermissionsRequest} GetPermissionsRequest + * @returns {vtctldata.MigrateCompleteRequest} MigrateCompleteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetPermissionsRequest.decode = function decode(reader, length) { + MigrateCompleteRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetPermissionsRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.MigrateCompleteRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + message.workflow = reader.string(); + break; + } + case 3: { + message.target_keyspace = reader.string(); + break; + } + case 4: { + message.keep_data = reader.bool(); + break; + } + case 5: { + message.keep_routing_rules = reader.bool(); + break; + } + case 6: { + message.rename_tables = reader.bool(); + break; + } + case 7: { + message.dry_run = reader.bool(); break; } default: @@ -114818,127 +136398,165 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetPermissionsRequest message from the specified reader or buffer, length delimited. + * Decodes a MigrateCompleteRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetPermissionsRequest + * @memberof vtctldata.MigrateCompleteRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetPermissionsRequest} GetPermissionsRequest + * @returns {vtctldata.MigrateCompleteRequest} MigrateCompleteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetPermissionsRequest.decodeDelimited = function decodeDelimited(reader) { + MigrateCompleteRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetPermissionsRequest message. + * Verifies a MigrateCompleteRequest message. * @function verify - * @memberof vtctldata.GetPermissionsRequest + * @memberof vtctldata.MigrateCompleteRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetPermissionsRequest.verify = function verify(message) { + MigrateCompleteRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { - let error = $root.topodata.TabletAlias.verify(message.tablet_alias); - if (error) - return "tablet_alias." + error; - } + if (message.workflow != null && message.hasOwnProperty("workflow")) + if (!$util.isString(message.workflow)) + return "workflow: string expected"; + if (message.target_keyspace != null && message.hasOwnProperty("target_keyspace")) + if (!$util.isString(message.target_keyspace)) + return "target_keyspace: string expected"; + if (message.keep_data != null && message.hasOwnProperty("keep_data")) + if (typeof message.keep_data !== "boolean") + return "keep_data: boolean expected"; + if (message.keep_routing_rules != null && message.hasOwnProperty("keep_routing_rules")) + if (typeof message.keep_routing_rules !== "boolean") + return "keep_routing_rules: boolean expected"; + if (message.rename_tables != null && message.hasOwnProperty("rename_tables")) + if (typeof message.rename_tables !== "boolean") + return "rename_tables: boolean expected"; + if (message.dry_run != null && message.hasOwnProperty("dry_run")) + if (typeof message.dry_run !== "boolean") + return "dry_run: boolean expected"; return null; }; /** - * Creates a GetPermissionsRequest message from a plain object. Also converts values to their respective internal types. + * Creates a MigrateCompleteRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetPermissionsRequest + * @memberof vtctldata.MigrateCompleteRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetPermissionsRequest} GetPermissionsRequest + * @returns {vtctldata.MigrateCompleteRequest} MigrateCompleteRequest */ - GetPermissionsRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetPermissionsRequest) + MigrateCompleteRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.MigrateCompleteRequest) return object; - let message = new $root.vtctldata.GetPermissionsRequest(); - if (object.tablet_alias != null) { - if (typeof object.tablet_alias !== "object") - throw TypeError(".vtctldata.GetPermissionsRequest.tablet_alias: object expected"); - message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); - } + let message = new $root.vtctldata.MigrateCompleteRequest(); + if (object.workflow != null) + message.workflow = String(object.workflow); + if (object.target_keyspace != null) + message.target_keyspace = String(object.target_keyspace); + if (object.keep_data != null) + message.keep_data = Boolean(object.keep_data); + if (object.keep_routing_rules != null) + message.keep_routing_rules = Boolean(object.keep_routing_rules); + if (object.rename_tables != null) + message.rename_tables = Boolean(object.rename_tables); + if (object.dry_run != null) + message.dry_run = Boolean(object.dry_run); return message; }; /** - * Creates a plain object from a GetPermissionsRequest message. Also converts values to other types if specified. + * Creates a plain object from a MigrateCompleteRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetPermissionsRequest + * @memberof vtctldata.MigrateCompleteRequest * @static - * @param {vtctldata.GetPermissionsRequest} message GetPermissionsRequest + * @param {vtctldata.MigrateCompleteRequest} message MigrateCompleteRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetPermissionsRequest.toObject = function toObject(message, options) { + MigrateCompleteRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) - object.tablet_alias = null; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) - object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); + if (options.defaults) { + object.workflow = ""; + object.target_keyspace = ""; + object.keep_data = false; + object.keep_routing_rules = false; + object.rename_tables = false; + object.dry_run = false; + } + if (message.workflow != null && message.hasOwnProperty("workflow")) + object.workflow = message.workflow; + if (message.target_keyspace != null && message.hasOwnProperty("target_keyspace")) + object.target_keyspace = message.target_keyspace; + if (message.keep_data != null && message.hasOwnProperty("keep_data")) + object.keep_data = message.keep_data; + if (message.keep_routing_rules != null && message.hasOwnProperty("keep_routing_rules")) + object.keep_routing_rules = message.keep_routing_rules; + if (message.rename_tables != null && message.hasOwnProperty("rename_tables")) + object.rename_tables = message.rename_tables; + if (message.dry_run != null && message.hasOwnProperty("dry_run")) + object.dry_run = message.dry_run; return object; }; /** - * Converts this GetPermissionsRequest to JSON. + * Converts this MigrateCompleteRequest to JSON. * @function toJSON - * @memberof vtctldata.GetPermissionsRequest + * @memberof vtctldata.MigrateCompleteRequest * @instance * @returns {Object.} JSON object */ - GetPermissionsRequest.prototype.toJSON = function toJSON() { + MigrateCompleteRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetPermissionsRequest + * Gets the default type url for MigrateCompleteRequest * @function getTypeUrl - * @memberof vtctldata.GetPermissionsRequest + * @memberof vtctldata.MigrateCompleteRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetPermissionsRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + MigrateCompleteRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetPermissionsRequest"; + return typeUrlPrefix + "/vtctldata.MigrateCompleteRequest"; }; - return GetPermissionsRequest; + return MigrateCompleteRequest; })(); - vtctldata.GetPermissionsResponse = (function() { + vtctldata.MigrateCompleteResponse = (function() { /** - * Properties of a GetPermissionsResponse. + * Properties of a MigrateCompleteResponse. * @memberof vtctldata - * @interface IGetPermissionsResponse - * @property {tabletmanagerdata.IPermissions|null} [permissions] GetPermissionsResponse permissions + * @interface IMigrateCompleteResponse + * @property {string|null} [summary] MigrateCompleteResponse summary + * @property {Array.|null} [dry_run_results] MigrateCompleteResponse dry_run_results */ /** - * Constructs a new GetPermissionsResponse. + * Constructs a new MigrateCompleteResponse. * @memberof vtctldata - * @classdesc Represents a GetPermissionsResponse. - * @implements IGetPermissionsResponse + * @classdesc Represents a MigrateCompleteResponse. + * @implements IMigrateCompleteResponse * @constructor - * @param {vtctldata.IGetPermissionsResponse=} [properties] Properties to set + * @param {vtctldata.IMigrateCompleteResponse=} [properties] Properties to set */ - function GetPermissionsResponse(properties) { + function MigrateCompleteResponse(properties) { + this.dry_run_results = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -114946,75 +136564,92 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * GetPermissionsResponse permissions. - * @member {tabletmanagerdata.IPermissions|null|undefined} permissions - * @memberof vtctldata.GetPermissionsResponse + * MigrateCompleteResponse summary. + * @member {string} summary + * @memberof vtctldata.MigrateCompleteResponse * @instance */ - GetPermissionsResponse.prototype.permissions = null; + MigrateCompleteResponse.prototype.summary = ""; /** - * Creates a new GetPermissionsResponse instance using the specified properties. + * MigrateCompleteResponse dry_run_results. + * @member {Array.} dry_run_results + * @memberof vtctldata.MigrateCompleteResponse + * @instance + */ + MigrateCompleteResponse.prototype.dry_run_results = $util.emptyArray; + + /** + * Creates a new MigrateCompleteResponse instance using the specified properties. * @function create - * @memberof vtctldata.GetPermissionsResponse + * @memberof vtctldata.MigrateCompleteResponse * @static - * @param {vtctldata.IGetPermissionsResponse=} [properties] Properties to set - * @returns {vtctldata.GetPermissionsResponse} GetPermissionsResponse instance + * @param {vtctldata.IMigrateCompleteResponse=} [properties] Properties to set + * @returns {vtctldata.MigrateCompleteResponse} MigrateCompleteResponse instance */ - GetPermissionsResponse.create = function create(properties) { - return new GetPermissionsResponse(properties); + MigrateCompleteResponse.create = function create(properties) { + return new MigrateCompleteResponse(properties); }; /** - * Encodes the specified GetPermissionsResponse message. Does not implicitly {@link vtctldata.GetPermissionsResponse.verify|verify} messages. + * Encodes the specified MigrateCompleteResponse message. Does not implicitly {@link vtctldata.MigrateCompleteResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.GetPermissionsResponse + * @memberof vtctldata.MigrateCompleteResponse * @static - * @param {vtctldata.IGetPermissionsResponse} message GetPermissionsResponse message or plain object to encode + * @param {vtctldata.IMigrateCompleteResponse} message MigrateCompleteResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetPermissionsResponse.encode = function encode(message, writer) { + MigrateCompleteResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.permissions != null && Object.hasOwnProperty.call(message, "permissions")) - $root.tabletmanagerdata.Permissions.encode(message.permissions, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.summary != null && Object.hasOwnProperty.call(message, "summary")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.summary); + if (message.dry_run_results != null && message.dry_run_results.length) + for (let i = 0; i < message.dry_run_results.length; ++i) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.dry_run_results[i]); return writer; }; /** - * Encodes the specified GetPermissionsResponse message, length delimited. Does not implicitly {@link vtctldata.GetPermissionsResponse.verify|verify} messages. + * Encodes the specified MigrateCompleteResponse message, length delimited. Does not implicitly {@link vtctldata.MigrateCompleteResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetPermissionsResponse + * @memberof vtctldata.MigrateCompleteResponse * @static - * @param {vtctldata.IGetPermissionsResponse} message GetPermissionsResponse message or plain object to encode + * @param {vtctldata.IMigrateCompleteResponse} message MigrateCompleteResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetPermissionsResponse.encodeDelimited = function encodeDelimited(message, writer) { + MigrateCompleteResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetPermissionsResponse message from the specified reader or buffer. + * Decodes a MigrateCompleteResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetPermissionsResponse + * @memberof vtctldata.MigrateCompleteResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetPermissionsResponse} GetPermissionsResponse + * @returns {vtctldata.MigrateCompleteResponse} MigrateCompleteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetPermissionsResponse.decode = function decode(reader, length) { + MigrateCompleteResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetPermissionsResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.MigrateCompleteResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.permissions = $root.tabletmanagerdata.Permissions.decode(reader, reader.uint32()); + message.summary = reader.string(); + break; + } + case 2: { + if (!(message.dry_run_results && message.dry_run_results.length)) + message.dry_run_results = []; + message.dry_run_results.push(reader.string()); break; } default: @@ -115026,126 +136661,146 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetPermissionsResponse message from the specified reader or buffer, length delimited. + * Decodes a MigrateCompleteResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetPermissionsResponse + * @memberof vtctldata.MigrateCompleteResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetPermissionsResponse} GetPermissionsResponse + * @returns {vtctldata.MigrateCompleteResponse} MigrateCompleteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetPermissionsResponse.decodeDelimited = function decodeDelimited(reader) { + MigrateCompleteResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetPermissionsResponse message. + * Verifies a MigrateCompleteResponse message. * @function verify - * @memberof vtctldata.GetPermissionsResponse + * @memberof vtctldata.MigrateCompleteResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetPermissionsResponse.verify = function verify(message) { + MigrateCompleteResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.permissions != null && message.hasOwnProperty("permissions")) { - let error = $root.tabletmanagerdata.Permissions.verify(message.permissions); - if (error) - return "permissions." + error; + if (message.summary != null && message.hasOwnProperty("summary")) + if (!$util.isString(message.summary)) + return "summary: string expected"; + if (message.dry_run_results != null && message.hasOwnProperty("dry_run_results")) { + if (!Array.isArray(message.dry_run_results)) + return "dry_run_results: array expected"; + for (let i = 0; i < message.dry_run_results.length; ++i) + if (!$util.isString(message.dry_run_results[i])) + return "dry_run_results: string[] expected"; } return null; }; /** - * Creates a GetPermissionsResponse message from a plain object. Also converts values to their respective internal types. + * Creates a MigrateCompleteResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetPermissionsResponse + * @memberof vtctldata.MigrateCompleteResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetPermissionsResponse} GetPermissionsResponse + * @returns {vtctldata.MigrateCompleteResponse} MigrateCompleteResponse */ - GetPermissionsResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetPermissionsResponse) + MigrateCompleteResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.MigrateCompleteResponse) return object; - let message = new $root.vtctldata.GetPermissionsResponse(); - if (object.permissions != null) { - if (typeof object.permissions !== "object") - throw TypeError(".vtctldata.GetPermissionsResponse.permissions: object expected"); - message.permissions = $root.tabletmanagerdata.Permissions.fromObject(object.permissions); + let message = new $root.vtctldata.MigrateCompleteResponse(); + if (object.summary != null) + message.summary = String(object.summary); + if (object.dry_run_results) { + if (!Array.isArray(object.dry_run_results)) + throw TypeError(".vtctldata.MigrateCompleteResponse.dry_run_results: array expected"); + message.dry_run_results = []; + for (let i = 0; i < object.dry_run_results.length; ++i) + message.dry_run_results[i] = String(object.dry_run_results[i]); } return message; }; /** - * Creates a plain object from a GetPermissionsResponse message. Also converts values to other types if specified. + * Creates a plain object from a MigrateCompleteResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetPermissionsResponse + * @memberof vtctldata.MigrateCompleteResponse * @static - * @param {vtctldata.GetPermissionsResponse} message GetPermissionsResponse + * @param {vtctldata.MigrateCompleteResponse} message MigrateCompleteResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetPermissionsResponse.toObject = function toObject(message, options) { + MigrateCompleteResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; + if (options.arrays || options.defaults) + object.dry_run_results = []; if (options.defaults) - object.permissions = null; - if (message.permissions != null && message.hasOwnProperty("permissions")) - object.permissions = $root.tabletmanagerdata.Permissions.toObject(message.permissions, options); + object.summary = ""; + if (message.summary != null && message.hasOwnProperty("summary")) + object.summary = message.summary; + if (message.dry_run_results && message.dry_run_results.length) { + object.dry_run_results = []; + for (let j = 0; j < message.dry_run_results.length; ++j) + object.dry_run_results[j] = message.dry_run_results[j]; + } return object; }; /** - * Converts this GetPermissionsResponse to JSON. + * Converts this MigrateCompleteResponse to JSON. * @function toJSON - * @memberof vtctldata.GetPermissionsResponse + * @memberof vtctldata.MigrateCompleteResponse * @instance * @returns {Object.} JSON object */ - GetPermissionsResponse.prototype.toJSON = function toJSON() { + MigrateCompleteResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetPermissionsResponse + * Gets the default type url for MigrateCompleteResponse * @function getTypeUrl - * @memberof vtctldata.GetPermissionsResponse + * @memberof vtctldata.MigrateCompleteResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetPermissionsResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + MigrateCompleteResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetPermissionsResponse"; + return typeUrlPrefix + "/vtctldata.MigrateCompleteResponse"; }; - return GetPermissionsResponse; + return MigrateCompleteResponse; })(); - vtctldata.GetRoutingRulesRequest = (function() { + vtctldata.MountRegisterRequest = (function() { /** - * Properties of a GetRoutingRulesRequest. + * Properties of a MountRegisterRequest. * @memberof vtctldata - * @interface IGetRoutingRulesRequest + * @interface IMountRegisterRequest + * @property {string|null} [topo_type] MountRegisterRequest topo_type + * @property {string|null} [topo_server] MountRegisterRequest topo_server + * @property {string|null} [topo_root] MountRegisterRequest topo_root + * @property {string|null} [name] MountRegisterRequest name */ /** - * Constructs a new GetRoutingRulesRequest. + * Constructs a new MountRegisterRequest. * @memberof vtctldata - * @classdesc Represents a GetRoutingRulesRequest. - * @implements IGetRoutingRulesRequest + * @classdesc Represents a MountRegisterRequest. + * @implements IMountRegisterRequest * @constructor - * @param {vtctldata.IGetRoutingRulesRequest=} [properties] Properties to set + * @param {vtctldata.IMountRegisterRequest=} [properties] Properties to set */ - function GetRoutingRulesRequest(properties) { + function MountRegisterRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -115153,63 +136808,119 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * Creates a new GetRoutingRulesRequest instance using the specified properties. + * MountRegisterRequest topo_type. + * @member {string} topo_type + * @memberof vtctldata.MountRegisterRequest + * @instance + */ + MountRegisterRequest.prototype.topo_type = ""; + + /** + * MountRegisterRequest topo_server. + * @member {string} topo_server + * @memberof vtctldata.MountRegisterRequest + * @instance + */ + MountRegisterRequest.prototype.topo_server = ""; + + /** + * MountRegisterRequest topo_root. + * @member {string} topo_root + * @memberof vtctldata.MountRegisterRequest + * @instance + */ + MountRegisterRequest.prototype.topo_root = ""; + + /** + * MountRegisterRequest name. + * @member {string} name + * @memberof vtctldata.MountRegisterRequest + * @instance + */ + MountRegisterRequest.prototype.name = ""; + + /** + * Creates a new MountRegisterRequest instance using the specified properties. * @function create - * @memberof vtctldata.GetRoutingRulesRequest + * @memberof vtctldata.MountRegisterRequest * @static - * @param {vtctldata.IGetRoutingRulesRequest=} [properties] Properties to set - * @returns {vtctldata.GetRoutingRulesRequest} GetRoutingRulesRequest instance + * @param {vtctldata.IMountRegisterRequest=} [properties] Properties to set + * @returns {vtctldata.MountRegisterRequest} MountRegisterRequest instance */ - GetRoutingRulesRequest.create = function create(properties) { - return new GetRoutingRulesRequest(properties); + MountRegisterRequest.create = function create(properties) { + return new MountRegisterRequest(properties); }; /** - * Encodes the specified GetRoutingRulesRequest message. Does not implicitly {@link vtctldata.GetRoutingRulesRequest.verify|verify} messages. + * Encodes the specified MountRegisterRequest message. Does not implicitly {@link vtctldata.MountRegisterRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.GetRoutingRulesRequest + * @memberof vtctldata.MountRegisterRequest * @static - * @param {vtctldata.IGetRoutingRulesRequest} message GetRoutingRulesRequest message or plain object to encode + * @param {vtctldata.IMountRegisterRequest} message MountRegisterRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetRoutingRulesRequest.encode = function encode(message, writer) { + MountRegisterRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.topo_type != null && Object.hasOwnProperty.call(message, "topo_type")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.topo_type); + if (message.topo_server != null && Object.hasOwnProperty.call(message, "topo_server")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.topo_server); + if (message.topo_root != null && Object.hasOwnProperty.call(message, "topo_root")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.topo_root); + if (message.name != null && Object.hasOwnProperty.call(message, "name")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.name); return writer; }; /** - * Encodes the specified GetRoutingRulesRequest message, length delimited. Does not implicitly {@link vtctldata.GetRoutingRulesRequest.verify|verify} messages. + * Encodes the specified MountRegisterRequest message, length delimited. Does not implicitly {@link vtctldata.MountRegisterRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetRoutingRulesRequest + * @memberof vtctldata.MountRegisterRequest * @static - * @param {vtctldata.IGetRoutingRulesRequest} message GetRoutingRulesRequest message or plain object to encode + * @param {vtctldata.IMountRegisterRequest} message MountRegisterRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetRoutingRulesRequest.encodeDelimited = function encodeDelimited(message, writer) { + MountRegisterRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetRoutingRulesRequest message from the specified reader or buffer. + * Decodes a MountRegisterRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetRoutingRulesRequest + * @memberof vtctldata.MountRegisterRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetRoutingRulesRequest} GetRoutingRulesRequest + * @returns {vtctldata.MountRegisterRequest} MountRegisterRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetRoutingRulesRequest.decode = function decode(reader, length) { + MountRegisterRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetRoutingRulesRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.MountRegisterRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + message.topo_type = reader.string(); + break; + } + case 2: { + message.topo_server = reader.string(); + break; + } + case 3: { + message.topo_root = reader.string(); + break; + } + case 4: { + message.name = reader.string(); + break; + } default: reader.skipType(tag & 7); break; @@ -115219,109 +136930,146 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetRoutingRulesRequest message from the specified reader or buffer, length delimited. + * Decodes a MountRegisterRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetRoutingRulesRequest + * @memberof vtctldata.MountRegisterRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetRoutingRulesRequest} GetRoutingRulesRequest + * @returns {vtctldata.MountRegisterRequest} MountRegisterRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetRoutingRulesRequest.decodeDelimited = function decodeDelimited(reader) { + MountRegisterRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetRoutingRulesRequest message. + * Verifies a MountRegisterRequest message. * @function verify - * @memberof vtctldata.GetRoutingRulesRequest + * @memberof vtctldata.MountRegisterRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetRoutingRulesRequest.verify = function verify(message) { + MountRegisterRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.topo_type != null && message.hasOwnProperty("topo_type")) + if (!$util.isString(message.topo_type)) + return "topo_type: string expected"; + if (message.topo_server != null && message.hasOwnProperty("topo_server")) + if (!$util.isString(message.topo_server)) + return "topo_server: string expected"; + if (message.topo_root != null && message.hasOwnProperty("topo_root")) + if (!$util.isString(message.topo_root)) + return "topo_root: string expected"; + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; return null; }; /** - * Creates a GetRoutingRulesRequest message from a plain object. Also converts values to their respective internal types. + * Creates a MountRegisterRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetRoutingRulesRequest + * @memberof vtctldata.MountRegisterRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetRoutingRulesRequest} GetRoutingRulesRequest + * @returns {vtctldata.MountRegisterRequest} MountRegisterRequest */ - GetRoutingRulesRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetRoutingRulesRequest) + MountRegisterRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.MountRegisterRequest) return object; - return new $root.vtctldata.GetRoutingRulesRequest(); + let message = new $root.vtctldata.MountRegisterRequest(); + if (object.topo_type != null) + message.topo_type = String(object.topo_type); + if (object.topo_server != null) + message.topo_server = String(object.topo_server); + if (object.topo_root != null) + message.topo_root = String(object.topo_root); + if (object.name != null) + message.name = String(object.name); + return message; }; /** - * Creates a plain object from a GetRoutingRulesRequest message. Also converts values to other types if specified. + * Creates a plain object from a MountRegisterRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetRoutingRulesRequest + * @memberof vtctldata.MountRegisterRequest * @static - * @param {vtctldata.GetRoutingRulesRequest} message GetRoutingRulesRequest + * @param {vtctldata.MountRegisterRequest} message MountRegisterRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetRoutingRulesRequest.toObject = function toObject() { - return {}; + MountRegisterRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.topo_type = ""; + object.topo_server = ""; + object.topo_root = ""; + object.name = ""; + } + if (message.topo_type != null && message.hasOwnProperty("topo_type")) + object.topo_type = message.topo_type; + if (message.topo_server != null && message.hasOwnProperty("topo_server")) + object.topo_server = message.topo_server; + if (message.topo_root != null && message.hasOwnProperty("topo_root")) + object.topo_root = message.topo_root; + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; + return object; }; /** - * Converts this GetRoutingRulesRequest to JSON. + * Converts this MountRegisterRequest to JSON. * @function toJSON - * @memberof vtctldata.GetRoutingRulesRequest + * @memberof vtctldata.MountRegisterRequest * @instance * @returns {Object.} JSON object */ - GetRoutingRulesRequest.prototype.toJSON = function toJSON() { + MountRegisterRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetRoutingRulesRequest + * Gets the default type url for MountRegisterRequest * @function getTypeUrl - * @memberof vtctldata.GetRoutingRulesRequest + * @memberof vtctldata.MountRegisterRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetRoutingRulesRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + MountRegisterRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetRoutingRulesRequest"; + return typeUrlPrefix + "/vtctldata.MountRegisterRequest"; }; - return GetRoutingRulesRequest; + return MountRegisterRequest; })(); - vtctldata.GetRoutingRulesResponse = (function() { + vtctldata.MountRegisterResponse = (function() { /** - * Properties of a GetRoutingRulesResponse. + * Properties of a MountRegisterResponse. * @memberof vtctldata - * @interface IGetRoutingRulesResponse - * @property {vschema.IRoutingRules|null} [routing_rules] GetRoutingRulesResponse routing_rules + * @interface IMountRegisterResponse */ /** - * Constructs a new GetRoutingRulesResponse. + * Constructs a new MountRegisterResponse. * @memberof vtctldata - * @classdesc Represents a GetRoutingRulesResponse. - * @implements IGetRoutingRulesResponse + * @classdesc Represents a MountRegisterResponse. + * @implements IMountRegisterResponse * @constructor - * @param {vtctldata.IGetRoutingRulesResponse=} [properties] Properties to set + * @param {vtctldata.IMountRegisterResponse=} [properties] Properties to set */ - function GetRoutingRulesResponse(properties) { + function MountRegisterResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -115329,77 +137077,63 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * GetRoutingRulesResponse routing_rules. - * @member {vschema.IRoutingRules|null|undefined} routing_rules - * @memberof vtctldata.GetRoutingRulesResponse - * @instance - */ - GetRoutingRulesResponse.prototype.routing_rules = null; - - /** - * Creates a new GetRoutingRulesResponse instance using the specified properties. + * Creates a new MountRegisterResponse instance using the specified properties. * @function create - * @memberof vtctldata.GetRoutingRulesResponse + * @memberof vtctldata.MountRegisterResponse * @static - * @param {vtctldata.IGetRoutingRulesResponse=} [properties] Properties to set - * @returns {vtctldata.GetRoutingRulesResponse} GetRoutingRulesResponse instance + * @param {vtctldata.IMountRegisterResponse=} [properties] Properties to set + * @returns {vtctldata.MountRegisterResponse} MountRegisterResponse instance */ - GetRoutingRulesResponse.create = function create(properties) { - return new GetRoutingRulesResponse(properties); + MountRegisterResponse.create = function create(properties) { + return new MountRegisterResponse(properties); }; /** - * Encodes the specified GetRoutingRulesResponse message. Does not implicitly {@link vtctldata.GetRoutingRulesResponse.verify|verify} messages. + * Encodes the specified MountRegisterResponse message. Does not implicitly {@link vtctldata.MountRegisterResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.GetRoutingRulesResponse + * @memberof vtctldata.MountRegisterResponse * @static - * @param {vtctldata.IGetRoutingRulesResponse} message GetRoutingRulesResponse message or plain object to encode + * @param {vtctldata.IMountRegisterResponse} message MountRegisterResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetRoutingRulesResponse.encode = function encode(message, writer) { + MountRegisterResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.routing_rules != null && Object.hasOwnProperty.call(message, "routing_rules")) - $root.vschema.RoutingRules.encode(message.routing_rules, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified GetRoutingRulesResponse message, length delimited. Does not implicitly {@link vtctldata.GetRoutingRulesResponse.verify|verify} messages. + * Encodes the specified MountRegisterResponse message, length delimited. Does not implicitly {@link vtctldata.MountRegisterResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetRoutingRulesResponse + * @memberof vtctldata.MountRegisterResponse * @static - * @param {vtctldata.IGetRoutingRulesResponse} message GetRoutingRulesResponse message or plain object to encode + * @param {vtctldata.IMountRegisterResponse} message MountRegisterResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetRoutingRulesResponse.encodeDelimited = function encodeDelimited(message, writer) { + MountRegisterResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetRoutingRulesResponse message from the specified reader or buffer. + * Decodes a MountRegisterResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetRoutingRulesResponse + * @memberof vtctldata.MountRegisterResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetRoutingRulesResponse} GetRoutingRulesResponse + * @returns {vtctldata.MountRegisterResponse} MountRegisterResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetRoutingRulesResponse.decode = function decode(reader, length) { + MountRegisterResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetRoutingRulesResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.MountRegisterResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - message.routing_rules = $root.vschema.RoutingRules.decode(reader, reader.uint32()); - break; - } default: reader.skipType(tag & 7); break; @@ -115409,135 +137143,109 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetRoutingRulesResponse message from the specified reader or buffer, length delimited. + * Decodes a MountRegisterResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetRoutingRulesResponse + * @memberof vtctldata.MountRegisterResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetRoutingRulesResponse} GetRoutingRulesResponse + * @returns {vtctldata.MountRegisterResponse} MountRegisterResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetRoutingRulesResponse.decodeDelimited = function decodeDelimited(reader) { + MountRegisterResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetRoutingRulesResponse message. + * Verifies a MountRegisterResponse message. * @function verify - * @memberof vtctldata.GetRoutingRulesResponse + * @memberof vtctldata.MountRegisterResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetRoutingRulesResponse.verify = function verify(message) { + MountRegisterResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.routing_rules != null && message.hasOwnProperty("routing_rules")) { - let error = $root.vschema.RoutingRules.verify(message.routing_rules); - if (error) - return "routing_rules." + error; - } return null; }; /** - * Creates a GetRoutingRulesResponse message from a plain object. Also converts values to their respective internal types. + * Creates a MountRegisterResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetRoutingRulesResponse + * @memberof vtctldata.MountRegisterResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetRoutingRulesResponse} GetRoutingRulesResponse + * @returns {vtctldata.MountRegisterResponse} MountRegisterResponse */ - GetRoutingRulesResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetRoutingRulesResponse) + MountRegisterResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.MountRegisterResponse) return object; - let message = new $root.vtctldata.GetRoutingRulesResponse(); - if (object.routing_rules != null) { - if (typeof object.routing_rules !== "object") - throw TypeError(".vtctldata.GetRoutingRulesResponse.routing_rules: object expected"); - message.routing_rules = $root.vschema.RoutingRules.fromObject(object.routing_rules); - } - return message; + return new $root.vtctldata.MountRegisterResponse(); }; /** - * Creates a plain object from a GetRoutingRulesResponse message. Also converts values to other types if specified. + * Creates a plain object from a MountRegisterResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetRoutingRulesResponse + * @memberof vtctldata.MountRegisterResponse * @static - * @param {vtctldata.GetRoutingRulesResponse} message GetRoutingRulesResponse + * @param {vtctldata.MountRegisterResponse} message MountRegisterResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetRoutingRulesResponse.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) - object.routing_rules = null; - if (message.routing_rules != null && message.hasOwnProperty("routing_rules")) - object.routing_rules = $root.vschema.RoutingRules.toObject(message.routing_rules, options); - return object; + MountRegisterResponse.toObject = function toObject() { + return {}; }; /** - * Converts this GetRoutingRulesResponse to JSON. + * Converts this MountRegisterResponse to JSON. * @function toJSON - * @memberof vtctldata.GetRoutingRulesResponse + * @memberof vtctldata.MountRegisterResponse * @instance * @returns {Object.} JSON object */ - GetRoutingRulesResponse.prototype.toJSON = function toJSON() { + MountRegisterResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetRoutingRulesResponse + * Gets the default type url for MountRegisterResponse * @function getTypeUrl - * @memberof vtctldata.GetRoutingRulesResponse + * @memberof vtctldata.MountRegisterResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetRoutingRulesResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + MountRegisterResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetRoutingRulesResponse"; + return typeUrlPrefix + "/vtctldata.MountRegisterResponse"; }; - return GetRoutingRulesResponse; + return MountRegisterResponse; })(); - vtctldata.GetSchemaRequest = (function() { + vtctldata.MountUnregisterRequest = (function() { /** - * Properties of a GetSchemaRequest. + * Properties of a MountUnregisterRequest. * @memberof vtctldata - * @interface IGetSchemaRequest - * @property {topodata.ITabletAlias|null} [tablet_alias] GetSchemaRequest tablet_alias - * @property {Array.|null} [tables] GetSchemaRequest tables - * @property {Array.|null} [exclude_tables] GetSchemaRequest exclude_tables - * @property {boolean|null} [include_views] GetSchemaRequest include_views - * @property {boolean|null} [table_names_only] GetSchemaRequest table_names_only - * @property {boolean|null} [table_sizes_only] GetSchemaRequest table_sizes_only - * @property {boolean|null} [table_schema_only] GetSchemaRequest table_schema_only + * @interface IMountUnregisterRequest + * @property {string|null} [name] MountUnregisterRequest name */ /** - * Constructs a new GetSchemaRequest. + * Constructs a new MountUnregisterRequest. * @memberof vtctldata - * @classdesc Represents a GetSchemaRequest. - * @implements IGetSchemaRequest + * @classdesc Represents a MountUnregisterRequest. + * @implements IMountUnregisterRequest * @constructor - * @param {vtctldata.IGetSchemaRequest=} [properties] Properties to set + * @param {vtctldata.IMountUnregisterRequest=} [properties] Properties to set */ - function GetSchemaRequest(properties) { - this.tables = []; - this.exclude_tables = []; + function MountUnregisterRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -115545,165 +137253,75 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * GetSchemaRequest tablet_alias. - * @member {topodata.ITabletAlias|null|undefined} tablet_alias - * @memberof vtctldata.GetSchemaRequest - * @instance - */ - GetSchemaRequest.prototype.tablet_alias = null; - - /** - * GetSchemaRequest tables. - * @member {Array.} tables - * @memberof vtctldata.GetSchemaRequest - * @instance - */ - GetSchemaRequest.prototype.tables = $util.emptyArray; - - /** - * GetSchemaRequest exclude_tables. - * @member {Array.} exclude_tables - * @memberof vtctldata.GetSchemaRequest - * @instance - */ - GetSchemaRequest.prototype.exclude_tables = $util.emptyArray; - - /** - * GetSchemaRequest include_views. - * @member {boolean} include_views - * @memberof vtctldata.GetSchemaRequest - * @instance - */ - GetSchemaRequest.prototype.include_views = false; - - /** - * GetSchemaRequest table_names_only. - * @member {boolean} table_names_only - * @memberof vtctldata.GetSchemaRequest - * @instance - */ - GetSchemaRequest.prototype.table_names_only = false; - - /** - * GetSchemaRequest table_sizes_only. - * @member {boolean} table_sizes_only - * @memberof vtctldata.GetSchemaRequest - * @instance - */ - GetSchemaRequest.prototype.table_sizes_only = false; - - /** - * GetSchemaRequest table_schema_only. - * @member {boolean} table_schema_only - * @memberof vtctldata.GetSchemaRequest + * MountUnregisterRequest name. + * @member {string} name + * @memberof vtctldata.MountUnregisterRequest * @instance */ - GetSchemaRequest.prototype.table_schema_only = false; + MountUnregisterRequest.prototype.name = ""; /** - * Creates a new GetSchemaRequest instance using the specified properties. + * Creates a new MountUnregisterRequest instance using the specified properties. * @function create - * @memberof vtctldata.GetSchemaRequest + * @memberof vtctldata.MountUnregisterRequest * @static - * @param {vtctldata.IGetSchemaRequest=} [properties] Properties to set - * @returns {vtctldata.GetSchemaRequest} GetSchemaRequest instance + * @param {vtctldata.IMountUnregisterRequest=} [properties] Properties to set + * @returns {vtctldata.MountUnregisterRequest} MountUnregisterRequest instance */ - GetSchemaRequest.create = function create(properties) { - return new GetSchemaRequest(properties); + MountUnregisterRequest.create = function create(properties) { + return new MountUnregisterRequest(properties); }; /** - * Encodes the specified GetSchemaRequest message. Does not implicitly {@link vtctldata.GetSchemaRequest.verify|verify} messages. + * Encodes the specified MountUnregisterRequest message. Does not implicitly {@link vtctldata.MountUnregisterRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.GetSchemaRequest + * @memberof vtctldata.MountUnregisterRequest * @static - * @param {vtctldata.IGetSchemaRequest} message GetSchemaRequest message or plain object to encode + * @param {vtctldata.IMountUnregisterRequest} message MountUnregisterRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetSchemaRequest.encode = function encode(message, writer) { + MountUnregisterRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) - $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.tables != null && message.tables.length) - for (let i = 0; i < message.tables.length; ++i) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.tables[i]); - if (message.exclude_tables != null && message.exclude_tables.length) - for (let i = 0; i < message.exclude_tables.length; ++i) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.exclude_tables[i]); - if (message.include_views != null && Object.hasOwnProperty.call(message, "include_views")) - writer.uint32(/* id 4, wireType 0 =*/32).bool(message.include_views); - if (message.table_names_only != null && Object.hasOwnProperty.call(message, "table_names_only")) - writer.uint32(/* id 5, wireType 0 =*/40).bool(message.table_names_only); - if (message.table_sizes_only != null && Object.hasOwnProperty.call(message, "table_sizes_only")) - writer.uint32(/* id 6, wireType 0 =*/48).bool(message.table_sizes_only); - if (message.table_schema_only != null && Object.hasOwnProperty.call(message, "table_schema_only")) - writer.uint32(/* id 7, wireType 0 =*/56).bool(message.table_schema_only); + if (message.name != null && Object.hasOwnProperty.call(message, "name")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.name); return writer; }; /** - * Encodes the specified GetSchemaRequest message, length delimited. Does not implicitly {@link vtctldata.GetSchemaRequest.verify|verify} messages. + * Encodes the specified MountUnregisterRequest message, length delimited. Does not implicitly {@link vtctldata.MountUnregisterRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetSchemaRequest + * @memberof vtctldata.MountUnregisterRequest * @static - * @param {vtctldata.IGetSchemaRequest} message GetSchemaRequest message or plain object to encode + * @param {vtctldata.IMountUnregisterRequest} message MountUnregisterRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetSchemaRequest.encodeDelimited = function encodeDelimited(message, writer) { + MountUnregisterRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetSchemaRequest message from the specified reader or buffer. + * Decodes a MountUnregisterRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetSchemaRequest + * @memberof vtctldata.MountUnregisterRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetSchemaRequest} GetSchemaRequest + * @returns {vtctldata.MountUnregisterRequest} MountUnregisterRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetSchemaRequest.decode = function decode(reader, length) { + MountUnregisterRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetSchemaRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.MountUnregisterRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); - break; - } - case 2: { - if (!(message.tables && message.tables.length)) - message.tables = []; - message.tables.push(reader.string()); - break; - } - case 3: { - if (!(message.exclude_tables && message.exclude_tables.length)) - message.exclude_tables = []; - message.exclude_tables.push(reader.string()); - break; - } case 4: { - message.include_views = reader.bool(); - break; - } - case 5: { - message.table_names_only = reader.bool(); - break; - } - case 6: { - message.table_sizes_only = reader.bool(); - break; - } - case 7: { - message.table_schema_only = reader.bool(); + message.name = reader.string(); break; } default: @@ -115715,202 +137333,121 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetSchemaRequest message from the specified reader or buffer, length delimited. + * Decodes a MountUnregisterRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetSchemaRequest + * @memberof vtctldata.MountUnregisterRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetSchemaRequest} GetSchemaRequest + * @returns {vtctldata.MountUnregisterRequest} MountUnregisterRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetSchemaRequest.decodeDelimited = function decodeDelimited(reader) { + MountUnregisterRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetSchemaRequest message. + * Verifies a MountUnregisterRequest message. * @function verify - * @memberof vtctldata.GetSchemaRequest + * @memberof vtctldata.MountUnregisterRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetSchemaRequest.verify = function verify(message) { + MountUnregisterRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { - let error = $root.topodata.TabletAlias.verify(message.tablet_alias); - if (error) - return "tablet_alias." + error; - } - if (message.tables != null && message.hasOwnProperty("tables")) { - if (!Array.isArray(message.tables)) - return "tables: array expected"; - for (let i = 0; i < message.tables.length; ++i) - if (!$util.isString(message.tables[i])) - return "tables: string[] expected"; - } - if (message.exclude_tables != null && message.hasOwnProperty("exclude_tables")) { - if (!Array.isArray(message.exclude_tables)) - return "exclude_tables: array expected"; - for (let i = 0; i < message.exclude_tables.length; ++i) - if (!$util.isString(message.exclude_tables[i])) - return "exclude_tables: string[] expected"; - } - if (message.include_views != null && message.hasOwnProperty("include_views")) - if (typeof message.include_views !== "boolean") - return "include_views: boolean expected"; - if (message.table_names_only != null && message.hasOwnProperty("table_names_only")) - if (typeof message.table_names_only !== "boolean") - return "table_names_only: boolean expected"; - if (message.table_sizes_only != null && message.hasOwnProperty("table_sizes_only")) - if (typeof message.table_sizes_only !== "boolean") - return "table_sizes_only: boolean expected"; - if (message.table_schema_only != null && message.hasOwnProperty("table_schema_only")) - if (typeof message.table_schema_only !== "boolean") - return "table_schema_only: boolean expected"; + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; return null; }; /** - * Creates a GetSchemaRequest message from a plain object. Also converts values to their respective internal types. + * Creates a MountUnregisterRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetSchemaRequest + * @memberof vtctldata.MountUnregisterRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetSchemaRequest} GetSchemaRequest + * @returns {vtctldata.MountUnregisterRequest} MountUnregisterRequest */ - GetSchemaRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetSchemaRequest) + MountUnregisterRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.MountUnregisterRequest) return object; - let message = new $root.vtctldata.GetSchemaRequest(); - if (object.tablet_alias != null) { - if (typeof object.tablet_alias !== "object") - throw TypeError(".vtctldata.GetSchemaRequest.tablet_alias: object expected"); - message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); - } - if (object.tables) { - if (!Array.isArray(object.tables)) - throw TypeError(".vtctldata.GetSchemaRequest.tables: array expected"); - message.tables = []; - for (let i = 0; i < object.tables.length; ++i) - message.tables[i] = String(object.tables[i]); - } - if (object.exclude_tables) { - if (!Array.isArray(object.exclude_tables)) - throw TypeError(".vtctldata.GetSchemaRequest.exclude_tables: array expected"); - message.exclude_tables = []; - for (let i = 0; i < object.exclude_tables.length; ++i) - message.exclude_tables[i] = String(object.exclude_tables[i]); - } - if (object.include_views != null) - message.include_views = Boolean(object.include_views); - if (object.table_names_only != null) - message.table_names_only = Boolean(object.table_names_only); - if (object.table_sizes_only != null) - message.table_sizes_only = Boolean(object.table_sizes_only); - if (object.table_schema_only != null) - message.table_schema_only = Boolean(object.table_schema_only); + let message = new $root.vtctldata.MountUnregisterRequest(); + if (object.name != null) + message.name = String(object.name); return message; }; /** - * Creates a plain object from a GetSchemaRequest message. Also converts values to other types if specified. + * Creates a plain object from a MountUnregisterRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetSchemaRequest + * @memberof vtctldata.MountUnregisterRequest * @static - * @param {vtctldata.GetSchemaRequest} message GetSchemaRequest + * @param {vtctldata.MountUnregisterRequest} message MountUnregisterRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetSchemaRequest.toObject = function toObject(message, options) { + MountUnregisterRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) { - object.tables = []; - object.exclude_tables = []; - } - if (options.defaults) { - object.tablet_alias = null; - object.include_views = false; - object.table_names_only = false; - object.table_sizes_only = false; - object.table_schema_only = false; - } - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) - object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); - if (message.tables && message.tables.length) { - object.tables = []; - for (let j = 0; j < message.tables.length; ++j) - object.tables[j] = message.tables[j]; - } - if (message.exclude_tables && message.exclude_tables.length) { - object.exclude_tables = []; - for (let j = 0; j < message.exclude_tables.length; ++j) - object.exclude_tables[j] = message.exclude_tables[j]; - } - if (message.include_views != null && message.hasOwnProperty("include_views")) - object.include_views = message.include_views; - if (message.table_names_only != null && message.hasOwnProperty("table_names_only")) - object.table_names_only = message.table_names_only; - if (message.table_sizes_only != null && message.hasOwnProperty("table_sizes_only")) - object.table_sizes_only = message.table_sizes_only; - if (message.table_schema_only != null && message.hasOwnProperty("table_schema_only")) - object.table_schema_only = message.table_schema_only; + if (options.defaults) + object.name = ""; + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; return object; }; /** - * Converts this GetSchemaRequest to JSON. + * Converts this MountUnregisterRequest to JSON. * @function toJSON - * @memberof vtctldata.GetSchemaRequest + * @memberof vtctldata.MountUnregisterRequest * @instance * @returns {Object.} JSON object */ - GetSchemaRequest.prototype.toJSON = function toJSON() { + MountUnregisterRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetSchemaRequest + * Gets the default type url for MountUnregisterRequest * @function getTypeUrl - * @memberof vtctldata.GetSchemaRequest + * @memberof vtctldata.MountUnregisterRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetSchemaRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + MountUnregisterRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetSchemaRequest"; + return typeUrlPrefix + "/vtctldata.MountUnregisterRequest"; }; - return GetSchemaRequest; + return MountUnregisterRequest; })(); - vtctldata.GetSchemaResponse = (function() { + vtctldata.MountUnregisterResponse = (function() { /** - * Properties of a GetSchemaResponse. + * Properties of a MountUnregisterResponse. * @memberof vtctldata - * @interface IGetSchemaResponse - * @property {tabletmanagerdata.ISchemaDefinition|null} [schema] GetSchemaResponse schema + * @interface IMountUnregisterResponse */ /** - * Constructs a new GetSchemaResponse. + * Constructs a new MountUnregisterResponse. * @memberof vtctldata - * @classdesc Represents a GetSchemaResponse. - * @implements IGetSchemaResponse + * @classdesc Represents a MountUnregisterResponse. + * @implements IMountUnregisterResponse * @constructor - * @param {vtctldata.IGetSchemaResponse=} [properties] Properties to set + * @param {vtctldata.IMountUnregisterResponse=} [properties] Properties to set */ - function GetSchemaResponse(properties) { + function MountUnregisterResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -115918,77 +137455,63 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * GetSchemaResponse schema. - * @member {tabletmanagerdata.ISchemaDefinition|null|undefined} schema - * @memberof vtctldata.GetSchemaResponse - * @instance - */ - GetSchemaResponse.prototype.schema = null; - - /** - * Creates a new GetSchemaResponse instance using the specified properties. + * Creates a new MountUnregisterResponse instance using the specified properties. * @function create - * @memberof vtctldata.GetSchemaResponse + * @memberof vtctldata.MountUnregisterResponse * @static - * @param {vtctldata.IGetSchemaResponse=} [properties] Properties to set - * @returns {vtctldata.GetSchemaResponse} GetSchemaResponse instance + * @param {vtctldata.IMountUnregisterResponse=} [properties] Properties to set + * @returns {vtctldata.MountUnregisterResponse} MountUnregisterResponse instance */ - GetSchemaResponse.create = function create(properties) { - return new GetSchemaResponse(properties); + MountUnregisterResponse.create = function create(properties) { + return new MountUnregisterResponse(properties); }; /** - * Encodes the specified GetSchemaResponse message. Does not implicitly {@link vtctldata.GetSchemaResponse.verify|verify} messages. + * Encodes the specified MountUnregisterResponse message. Does not implicitly {@link vtctldata.MountUnregisterResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.GetSchemaResponse + * @memberof vtctldata.MountUnregisterResponse * @static - * @param {vtctldata.IGetSchemaResponse} message GetSchemaResponse message or plain object to encode + * @param {vtctldata.IMountUnregisterResponse} message MountUnregisterResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetSchemaResponse.encode = function encode(message, writer) { + MountUnregisterResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.schema != null && Object.hasOwnProperty.call(message, "schema")) - $root.tabletmanagerdata.SchemaDefinition.encode(message.schema, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified GetSchemaResponse message, length delimited. Does not implicitly {@link vtctldata.GetSchemaResponse.verify|verify} messages. + * Encodes the specified MountUnregisterResponse message, length delimited. Does not implicitly {@link vtctldata.MountUnregisterResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetSchemaResponse + * @memberof vtctldata.MountUnregisterResponse * @static - * @param {vtctldata.IGetSchemaResponse} message GetSchemaResponse message or plain object to encode + * @param {vtctldata.IMountUnregisterResponse} message MountUnregisterResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetSchemaResponse.encodeDelimited = function encodeDelimited(message, writer) { + MountUnregisterResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetSchemaResponse message from the specified reader or buffer. + * Decodes a MountUnregisterResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetSchemaResponse + * @memberof vtctldata.MountUnregisterResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetSchemaResponse} GetSchemaResponse + * @returns {vtctldata.MountUnregisterResponse} MountUnregisterResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetSchemaResponse.decode = function decode(reader, length) { + MountUnregisterResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetSchemaResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.MountUnregisterResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - message.schema = $root.tabletmanagerdata.SchemaDefinition.decode(reader, reader.uint32()); - break; - } default: reader.skipType(tag & 7); break; @@ -115998,128 +137521,109 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetSchemaResponse message from the specified reader or buffer, length delimited. + * Decodes a MountUnregisterResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetSchemaResponse + * @memberof vtctldata.MountUnregisterResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetSchemaResponse} GetSchemaResponse + * @returns {vtctldata.MountUnregisterResponse} MountUnregisterResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetSchemaResponse.decodeDelimited = function decodeDelimited(reader) { + MountUnregisterResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetSchemaResponse message. + * Verifies a MountUnregisterResponse message. * @function verify - * @memberof vtctldata.GetSchemaResponse + * @memberof vtctldata.MountUnregisterResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetSchemaResponse.verify = function verify(message) { + MountUnregisterResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.schema != null && message.hasOwnProperty("schema")) { - let error = $root.tabletmanagerdata.SchemaDefinition.verify(message.schema); - if (error) - return "schema." + error; - } return null; }; /** - * Creates a GetSchemaResponse message from a plain object. Also converts values to their respective internal types. + * Creates a MountUnregisterResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetSchemaResponse + * @memberof vtctldata.MountUnregisterResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetSchemaResponse} GetSchemaResponse + * @returns {vtctldata.MountUnregisterResponse} MountUnregisterResponse */ - GetSchemaResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetSchemaResponse) + MountUnregisterResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.MountUnregisterResponse) return object; - let message = new $root.vtctldata.GetSchemaResponse(); - if (object.schema != null) { - if (typeof object.schema !== "object") - throw TypeError(".vtctldata.GetSchemaResponse.schema: object expected"); - message.schema = $root.tabletmanagerdata.SchemaDefinition.fromObject(object.schema); - } - return message; + return new $root.vtctldata.MountUnregisterResponse(); }; /** - * Creates a plain object from a GetSchemaResponse message. Also converts values to other types if specified. + * Creates a plain object from a MountUnregisterResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetSchemaResponse + * @memberof vtctldata.MountUnregisterResponse * @static - * @param {vtctldata.GetSchemaResponse} message GetSchemaResponse + * @param {vtctldata.MountUnregisterResponse} message MountUnregisterResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetSchemaResponse.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) - object.schema = null; - if (message.schema != null && message.hasOwnProperty("schema")) - object.schema = $root.tabletmanagerdata.SchemaDefinition.toObject(message.schema, options); - return object; + MountUnregisterResponse.toObject = function toObject() { + return {}; }; /** - * Converts this GetSchemaResponse to JSON. + * Converts this MountUnregisterResponse to JSON. * @function toJSON - * @memberof vtctldata.GetSchemaResponse + * @memberof vtctldata.MountUnregisterResponse * @instance * @returns {Object.} JSON object */ - GetSchemaResponse.prototype.toJSON = function toJSON() { + MountUnregisterResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetSchemaResponse + * Gets the default type url for MountUnregisterResponse * @function getTypeUrl - * @memberof vtctldata.GetSchemaResponse + * @memberof vtctldata.MountUnregisterResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetSchemaResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + MountUnregisterResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetSchemaResponse"; + return typeUrlPrefix + "/vtctldata.MountUnregisterResponse"; }; - return GetSchemaResponse; + return MountUnregisterResponse; })(); - vtctldata.GetShardRequest = (function() { + vtctldata.MountShowRequest = (function() { /** - * Properties of a GetShardRequest. + * Properties of a MountShowRequest. * @memberof vtctldata - * @interface IGetShardRequest - * @property {string|null} [keyspace] GetShardRequest keyspace - * @property {string|null} [shard_name] GetShardRequest shard_name + * @interface IMountShowRequest + * @property {string|null} [name] MountShowRequest name */ /** - * Constructs a new GetShardRequest. + * Constructs a new MountShowRequest. * @memberof vtctldata - * @classdesc Represents a GetShardRequest. - * @implements IGetShardRequest + * @classdesc Represents a MountShowRequest. + * @implements IMountShowRequest * @constructor - * @param {vtctldata.IGetShardRequest=} [properties] Properties to set + * @param {vtctldata.IMountShowRequest=} [properties] Properties to set */ - function GetShardRequest(properties) { + function MountShowRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -116127,89 +137631,75 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * GetShardRequest keyspace. - * @member {string} keyspace - * @memberof vtctldata.GetShardRequest - * @instance - */ - GetShardRequest.prototype.keyspace = ""; - - /** - * GetShardRequest shard_name. - * @member {string} shard_name - * @memberof vtctldata.GetShardRequest + * MountShowRequest name. + * @member {string} name + * @memberof vtctldata.MountShowRequest * @instance */ - GetShardRequest.prototype.shard_name = ""; + MountShowRequest.prototype.name = ""; /** - * Creates a new GetShardRequest instance using the specified properties. + * Creates a new MountShowRequest instance using the specified properties. * @function create - * @memberof vtctldata.GetShardRequest + * @memberof vtctldata.MountShowRequest * @static - * @param {vtctldata.IGetShardRequest=} [properties] Properties to set - * @returns {vtctldata.GetShardRequest} GetShardRequest instance + * @param {vtctldata.IMountShowRequest=} [properties] Properties to set + * @returns {vtctldata.MountShowRequest} MountShowRequest instance */ - GetShardRequest.create = function create(properties) { - return new GetShardRequest(properties); + MountShowRequest.create = function create(properties) { + return new MountShowRequest(properties); }; /** - * Encodes the specified GetShardRequest message. Does not implicitly {@link vtctldata.GetShardRequest.verify|verify} messages. + * Encodes the specified MountShowRequest message. Does not implicitly {@link vtctldata.MountShowRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.GetShardRequest + * @memberof vtctldata.MountShowRequest * @static - * @param {vtctldata.IGetShardRequest} message GetShardRequest message or plain object to encode + * @param {vtctldata.IMountShowRequest} message MountShowRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetShardRequest.encode = function encode(message, writer) { + MountShowRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.shard_name != null && Object.hasOwnProperty.call(message, "shard_name")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard_name); + if (message.name != null && Object.hasOwnProperty.call(message, "name")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.name); return writer; }; /** - * Encodes the specified GetShardRequest message, length delimited. Does not implicitly {@link vtctldata.GetShardRequest.verify|verify} messages. + * Encodes the specified MountShowRequest message, length delimited. Does not implicitly {@link vtctldata.MountShowRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetShardRequest + * @memberof vtctldata.MountShowRequest * @static - * @param {vtctldata.IGetShardRequest} message GetShardRequest message or plain object to encode + * @param {vtctldata.IMountShowRequest} message MountShowRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetShardRequest.encodeDelimited = function encodeDelimited(message, writer) { + MountShowRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetShardRequest message from the specified reader or buffer. + * Decodes a MountShowRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetShardRequest + * @memberof vtctldata.MountShowRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetShardRequest} GetShardRequest + * @returns {vtctldata.MountShowRequest} MountShowRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetShardRequest.decode = function decode(reader, length) { + MountShowRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetShardRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.MountShowRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - message.keyspace = reader.string(); - break; - } - case 2: { - message.shard_name = reader.string(); + case 4: { + message.name = reader.string(); break; } default: @@ -116221,131 +137711,125 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetShardRequest message from the specified reader or buffer, length delimited. + * Decodes a MountShowRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetShardRequest + * @memberof vtctldata.MountShowRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetShardRequest} GetShardRequest + * @returns {vtctldata.MountShowRequest} MountShowRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetShardRequest.decodeDelimited = function decodeDelimited(reader) { + MountShowRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetShardRequest message. + * Verifies a MountShowRequest message. * @function verify - * @memberof vtctldata.GetShardRequest + * @memberof vtctldata.MountShowRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetShardRequest.verify = function verify(message) { + MountShowRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - if (message.shard_name != null && message.hasOwnProperty("shard_name")) - if (!$util.isString(message.shard_name)) - return "shard_name: string expected"; + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; return null; }; /** - * Creates a GetShardRequest message from a plain object. Also converts values to their respective internal types. + * Creates a MountShowRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetShardRequest + * @memberof vtctldata.MountShowRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetShardRequest} GetShardRequest + * @returns {vtctldata.MountShowRequest} MountShowRequest */ - GetShardRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetShardRequest) + MountShowRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.MountShowRequest) return object; - let message = new $root.vtctldata.GetShardRequest(); - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - if (object.shard_name != null) - message.shard_name = String(object.shard_name); + let message = new $root.vtctldata.MountShowRequest(); + if (object.name != null) + message.name = String(object.name); return message; }; /** - * Creates a plain object from a GetShardRequest message. Also converts values to other types if specified. + * Creates a plain object from a MountShowRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetShardRequest + * @memberof vtctldata.MountShowRequest * @static - * @param {vtctldata.GetShardRequest} message GetShardRequest + * @param {vtctldata.MountShowRequest} message MountShowRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetShardRequest.toObject = function toObject(message, options) { + MountShowRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) { - object.keyspace = ""; - object.shard_name = ""; - } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.shard_name != null && message.hasOwnProperty("shard_name")) - object.shard_name = message.shard_name; + if (options.defaults) + object.name = ""; + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; return object; }; /** - * Converts this GetShardRequest to JSON. + * Converts this MountShowRequest to JSON. * @function toJSON - * @memberof vtctldata.GetShardRequest + * @memberof vtctldata.MountShowRequest * @instance * @returns {Object.} JSON object */ - GetShardRequest.prototype.toJSON = function toJSON() { + MountShowRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetShardRequest + * Gets the default type url for MountShowRequest * @function getTypeUrl - * @memberof vtctldata.GetShardRequest + * @memberof vtctldata.MountShowRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetShardRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + MountShowRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetShardRequest"; + return typeUrlPrefix + "/vtctldata.MountShowRequest"; }; - return GetShardRequest; + return MountShowRequest; })(); - vtctldata.GetShardResponse = (function() { + vtctldata.MountShowResponse = (function() { /** - * Properties of a GetShardResponse. + * Properties of a MountShowResponse. * @memberof vtctldata - * @interface IGetShardResponse - * @property {vtctldata.IShard|null} [shard] GetShardResponse shard + * @interface IMountShowResponse + * @property {string|null} [topo_type] MountShowResponse topo_type + * @property {string|null} [topo_server] MountShowResponse topo_server + * @property {string|null} [topo_root] MountShowResponse topo_root + * @property {string|null} [name] MountShowResponse name */ /** - * Constructs a new GetShardResponse. + * Constructs a new MountShowResponse. * @memberof vtctldata - * @classdesc Represents a GetShardResponse. - * @implements IGetShardResponse + * @classdesc Represents a MountShowResponse. + * @implements IMountShowResponse * @constructor - * @param {vtctldata.IGetShardResponse=} [properties] Properties to set + * @param {vtctldata.IMountShowResponse=} [properties] Properties to set */ - function GetShardResponse(properties) { + function MountShowResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -116353,75 +137837,117 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * GetShardResponse shard. - * @member {vtctldata.IShard|null|undefined} shard - * @memberof vtctldata.GetShardResponse + * MountShowResponse topo_type. + * @member {string} topo_type + * @memberof vtctldata.MountShowResponse * @instance */ - GetShardResponse.prototype.shard = null; + MountShowResponse.prototype.topo_type = ""; /** - * Creates a new GetShardResponse instance using the specified properties. + * MountShowResponse topo_server. + * @member {string} topo_server + * @memberof vtctldata.MountShowResponse + * @instance + */ + MountShowResponse.prototype.topo_server = ""; + + /** + * MountShowResponse topo_root. + * @member {string} topo_root + * @memberof vtctldata.MountShowResponse + * @instance + */ + MountShowResponse.prototype.topo_root = ""; + + /** + * MountShowResponse name. + * @member {string} name + * @memberof vtctldata.MountShowResponse + * @instance + */ + MountShowResponse.prototype.name = ""; + + /** + * Creates a new MountShowResponse instance using the specified properties. * @function create - * @memberof vtctldata.GetShardResponse + * @memberof vtctldata.MountShowResponse * @static - * @param {vtctldata.IGetShardResponse=} [properties] Properties to set - * @returns {vtctldata.GetShardResponse} GetShardResponse instance + * @param {vtctldata.IMountShowResponse=} [properties] Properties to set + * @returns {vtctldata.MountShowResponse} MountShowResponse instance */ - GetShardResponse.create = function create(properties) { - return new GetShardResponse(properties); + MountShowResponse.create = function create(properties) { + return new MountShowResponse(properties); }; /** - * Encodes the specified GetShardResponse message. Does not implicitly {@link vtctldata.GetShardResponse.verify|verify} messages. + * Encodes the specified MountShowResponse message. Does not implicitly {@link vtctldata.MountShowResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.GetShardResponse + * @memberof vtctldata.MountShowResponse * @static - * @param {vtctldata.IGetShardResponse} message GetShardResponse message or plain object to encode + * @param {vtctldata.IMountShowResponse} message MountShowResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetShardResponse.encode = function encode(message, writer) { + MountShowResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - $root.vtctldata.Shard.encode(message.shard, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.topo_type != null && Object.hasOwnProperty.call(message, "topo_type")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.topo_type); + if (message.topo_server != null && Object.hasOwnProperty.call(message, "topo_server")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.topo_server); + if (message.topo_root != null && Object.hasOwnProperty.call(message, "topo_root")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.topo_root); + if (message.name != null && Object.hasOwnProperty.call(message, "name")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.name); return writer; }; /** - * Encodes the specified GetShardResponse message, length delimited. Does not implicitly {@link vtctldata.GetShardResponse.verify|verify} messages. + * Encodes the specified MountShowResponse message, length delimited. Does not implicitly {@link vtctldata.MountShowResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetShardResponse + * @memberof vtctldata.MountShowResponse * @static - * @param {vtctldata.IGetShardResponse} message GetShardResponse message or plain object to encode + * @param {vtctldata.IMountShowResponse} message MountShowResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetShardResponse.encodeDelimited = function encodeDelimited(message, writer) { + MountShowResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetShardResponse message from the specified reader or buffer. + * Decodes a MountShowResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetShardResponse + * @memberof vtctldata.MountShowResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetShardResponse} GetShardResponse + * @returns {vtctldata.MountShowResponse} MountShowResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetShardResponse.decode = function decode(reader, length) { + MountShowResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetShardResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.MountShowResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.shard = $root.vtctldata.Shard.decode(reader, reader.uint32()); + message.topo_type = reader.string(); + break; + } + case 2: { + message.topo_server = reader.string(); + break; + } + case 3: { + message.topo_root = reader.string(); + break; + } + case 4: { + message.name = reader.string(); break; } default: @@ -116433,126 +137959,146 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetShardResponse message from the specified reader or buffer, length delimited. + * Decodes a MountShowResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetShardResponse + * @memberof vtctldata.MountShowResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetShardResponse} GetShardResponse + * @returns {vtctldata.MountShowResponse} MountShowResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetShardResponse.decodeDelimited = function decodeDelimited(reader) { + MountShowResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetShardResponse message. + * Verifies a MountShowResponse message. * @function verify - * @memberof vtctldata.GetShardResponse + * @memberof vtctldata.MountShowResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetShardResponse.verify = function verify(message) { + MountShowResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.shard != null && message.hasOwnProperty("shard")) { - let error = $root.vtctldata.Shard.verify(message.shard); - if (error) - return "shard." + error; - } + if (message.topo_type != null && message.hasOwnProperty("topo_type")) + if (!$util.isString(message.topo_type)) + return "topo_type: string expected"; + if (message.topo_server != null && message.hasOwnProperty("topo_server")) + if (!$util.isString(message.topo_server)) + return "topo_server: string expected"; + if (message.topo_root != null && message.hasOwnProperty("topo_root")) + if (!$util.isString(message.topo_root)) + return "topo_root: string expected"; + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; return null; }; /** - * Creates a GetShardResponse message from a plain object. Also converts values to their respective internal types. + * Creates a MountShowResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetShardResponse + * @memberof vtctldata.MountShowResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetShardResponse} GetShardResponse + * @returns {vtctldata.MountShowResponse} MountShowResponse */ - GetShardResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetShardResponse) + MountShowResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.MountShowResponse) return object; - let message = new $root.vtctldata.GetShardResponse(); - if (object.shard != null) { - if (typeof object.shard !== "object") - throw TypeError(".vtctldata.GetShardResponse.shard: object expected"); - message.shard = $root.vtctldata.Shard.fromObject(object.shard); - } + let message = new $root.vtctldata.MountShowResponse(); + if (object.topo_type != null) + message.topo_type = String(object.topo_type); + if (object.topo_server != null) + message.topo_server = String(object.topo_server); + if (object.topo_root != null) + message.topo_root = String(object.topo_root); + if (object.name != null) + message.name = String(object.name); return message; }; /** - * Creates a plain object from a GetShardResponse message. Also converts values to other types if specified. + * Creates a plain object from a MountShowResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetShardResponse + * @memberof vtctldata.MountShowResponse * @static - * @param {vtctldata.GetShardResponse} message GetShardResponse + * @param {vtctldata.MountShowResponse} message MountShowResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetShardResponse.toObject = function toObject(message, options) { + MountShowResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) - object.shard = null; - if (message.shard != null && message.hasOwnProperty("shard")) - object.shard = $root.vtctldata.Shard.toObject(message.shard, options); + if (options.defaults) { + object.topo_type = ""; + object.topo_server = ""; + object.topo_root = ""; + object.name = ""; + } + if (message.topo_type != null && message.hasOwnProperty("topo_type")) + object.topo_type = message.topo_type; + if (message.topo_server != null && message.hasOwnProperty("topo_server")) + object.topo_server = message.topo_server; + if (message.topo_root != null && message.hasOwnProperty("topo_root")) + object.topo_root = message.topo_root; + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; return object; }; /** - * Converts this GetShardResponse to JSON. + * Converts this MountShowResponse to JSON. * @function toJSON - * @memberof vtctldata.GetShardResponse + * @memberof vtctldata.MountShowResponse * @instance * @returns {Object.} JSON object */ - GetShardResponse.prototype.toJSON = function toJSON() { + MountShowResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetShardResponse + * Gets the default type url for MountShowResponse * @function getTypeUrl - * @memberof vtctldata.GetShardResponse + * @memberof vtctldata.MountShowResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetShardResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + MountShowResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetShardResponse"; + return typeUrlPrefix + "/vtctldata.MountShowResponse"; }; - return GetShardResponse; + return MountShowResponse; })(); - vtctldata.GetShardRoutingRulesRequest = (function() { + vtctldata.MountListRequest = (function() { /** - * Properties of a GetShardRoutingRulesRequest. + * Properties of a MountListRequest. * @memberof vtctldata - * @interface IGetShardRoutingRulesRequest + * @interface IMountListRequest */ /** - * Constructs a new GetShardRoutingRulesRequest. + * Constructs a new MountListRequest. * @memberof vtctldata - * @classdesc Represents a GetShardRoutingRulesRequest. - * @implements IGetShardRoutingRulesRequest + * @classdesc Represents a MountListRequest. + * @implements IMountListRequest * @constructor - * @param {vtctldata.IGetShardRoutingRulesRequest=} [properties] Properties to set + * @param {vtctldata.IMountListRequest=} [properties] Properties to set */ - function GetShardRoutingRulesRequest(properties) { + function MountListRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -116560,60 +138106,60 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * Creates a new GetShardRoutingRulesRequest instance using the specified properties. + * Creates a new MountListRequest instance using the specified properties. * @function create - * @memberof vtctldata.GetShardRoutingRulesRequest + * @memberof vtctldata.MountListRequest * @static - * @param {vtctldata.IGetShardRoutingRulesRequest=} [properties] Properties to set - * @returns {vtctldata.GetShardRoutingRulesRequest} GetShardRoutingRulesRequest instance + * @param {vtctldata.IMountListRequest=} [properties] Properties to set + * @returns {vtctldata.MountListRequest} MountListRequest instance */ - GetShardRoutingRulesRequest.create = function create(properties) { - return new GetShardRoutingRulesRequest(properties); + MountListRequest.create = function create(properties) { + return new MountListRequest(properties); }; /** - * Encodes the specified GetShardRoutingRulesRequest message. Does not implicitly {@link vtctldata.GetShardRoutingRulesRequest.verify|verify} messages. + * Encodes the specified MountListRequest message. Does not implicitly {@link vtctldata.MountListRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.GetShardRoutingRulesRequest + * @memberof vtctldata.MountListRequest * @static - * @param {vtctldata.IGetShardRoutingRulesRequest} message GetShardRoutingRulesRequest message or plain object to encode + * @param {vtctldata.IMountListRequest} message MountListRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetShardRoutingRulesRequest.encode = function encode(message, writer) { + MountListRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); return writer; }; /** - * Encodes the specified GetShardRoutingRulesRequest message, length delimited. Does not implicitly {@link vtctldata.GetShardRoutingRulesRequest.verify|verify} messages. + * Encodes the specified MountListRequest message, length delimited. Does not implicitly {@link vtctldata.MountListRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetShardRoutingRulesRequest + * @memberof vtctldata.MountListRequest * @static - * @param {vtctldata.IGetShardRoutingRulesRequest} message GetShardRoutingRulesRequest message or plain object to encode + * @param {vtctldata.IMountListRequest} message MountListRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetShardRoutingRulesRequest.encodeDelimited = function encodeDelimited(message, writer) { + MountListRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetShardRoutingRulesRequest message from the specified reader or buffer. + * Decodes a MountListRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetShardRoutingRulesRequest + * @memberof vtctldata.MountListRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetShardRoutingRulesRequest} GetShardRoutingRulesRequest + * @returns {vtctldata.MountListRequest} MountListRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetShardRoutingRulesRequest.decode = function decode(reader, length) { + MountListRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetShardRoutingRulesRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.MountListRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -116626,109 +138172,110 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetShardRoutingRulesRequest message from the specified reader or buffer, length delimited. + * Decodes a MountListRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetShardRoutingRulesRequest + * @memberof vtctldata.MountListRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetShardRoutingRulesRequest} GetShardRoutingRulesRequest + * @returns {vtctldata.MountListRequest} MountListRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetShardRoutingRulesRequest.decodeDelimited = function decodeDelimited(reader) { + MountListRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetShardRoutingRulesRequest message. + * Verifies a MountListRequest message. * @function verify - * @memberof vtctldata.GetShardRoutingRulesRequest + * @memberof vtctldata.MountListRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetShardRoutingRulesRequest.verify = function verify(message) { + MountListRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; return null; }; /** - * Creates a GetShardRoutingRulesRequest message from a plain object. Also converts values to their respective internal types. + * Creates a MountListRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetShardRoutingRulesRequest + * @memberof vtctldata.MountListRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetShardRoutingRulesRequest} GetShardRoutingRulesRequest + * @returns {vtctldata.MountListRequest} MountListRequest */ - GetShardRoutingRulesRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetShardRoutingRulesRequest) + MountListRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.MountListRequest) return object; - return new $root.vtctldata.GetShardRoutingRulesRequest(); + return new $root.vtctldata.MountListRequest(); }; /** - * Creates a plain object from a GetShardRoutingRulesRequest message. Also converts values to other types if specified. + * Creates a plain object from a MountListRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetShardRoutingRulesRequest + * @memberof vtctldata.MountListRequest * @static - * @param {vtctldata.GetShardRoutingRulesRequest} message GetShardRoutingRulesRequest + * @param {vtctldata.MountListRequest} message MountListRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetShardRoutingRulesRequest.toObject = function toObject() { + MountListRequest.toObject = function toObject() { return {}; }; /** - * Converts this GetShardRoutingRulesRequest to JSON. + * Converts this MountListRequest to JSON. * @function toJSON - * @memberof vtctldata.GetShardRoutingRulesRequest + * @memberof vtctldata.MountListRequest * @instance * @returns {Object.} JSON object */ - GetShardRoutingRulesRequest.prototype.toJSON = function toJSON() { + MountListRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetShardRoutingRulesRequest + * Gets the default type url for MountListRequest * @function getTypeUrl - * @memberof vtctldata.GetShardRoutingRulesRequest + * @memberof vtctldata.MountListRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetShardRoutingRulesRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + MountListRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetShardRoutingRulesRequest"; + return typeUrlPrefix + "/vtctldata.MountListRequest"; }; - return GetShardRoutingRulesRequest; + return MountListRequest; })(); - vtctldata.GetShardRoutingRulesResponse = (function() { + vtctldata.MountListResponse = (function() { /** - * Properties of a GetShardRoutingRulesResponse. + * Properties of a MountListResponse. * @memberof vtctldata - * @interface IGetShardRoutingRulesResponse - * @property {vschema.IShardRoutingRules|null} [shard_routing_rules] GetShardRoutingRulesResponse shard_routing_rules + * @interface IMountListResponse + * @property {Array.|null} [names] MountListResponse names */ /** - * Constructs a new GetShardRoutingRulesResponse. + * Constructs a new MountListResponse. * @memberof vtctldata - * @classdesc Represents a GetShardRoutingRulesResponse. - * @implements IGetShardRoutingRulesResponse + * @classdesc Represents a MountListResponse. + * @implements IMountListResponse * @constructor - * @param {vtctldata.IGetShardRoutingRulesResponse=} [properties] Properties to set + * @param {vtctldata.IMountListResponse=} [properties] Properties to set */ - function GetShardRoutingRulesResponse(properties) { + function MountListResponse(properties) { + this.names = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -116736,75 +138283,78 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * GetShardRoutingRulesResponse shard_routing_rules. - * @member {vschema.IShardRoutingRules|null|undefined} shard_routing_rules - * @memberof vtctldata.GetShardRoutingRulesResponse + * MountListResponse names. + * @member {Array.} names + * @memberof vtctldata.MountListResponse * @instance */ - GetShardRoutingRulesResponse.prototype.shard_routing_rules = null; + MountListResponse.prototype.names = $util.emptyArray; /** - * Creates a new GetShardRoutingRulesResponse instance using the specified properties. + * Creates a new MountListResponse instance using the specified properties. * @function create - * @memberof vtctldata.GetShardRoutingRulesResponse + * @memberof vtctldata.MountListResponse * @static - * @param {vtctldata.IGetShardRoutingRulesResponse=} [properties] Properties to set - * @returns {vtctldata.GetShardRoutingRulesResponse} GetShardRoutingRulesResponse instance + * @param {vtctldata.IMountListResponse=} [properties] Properties to set + * @returns {vtctldata.MountListResponse} MountListResponse instance */ - GetShardRoutingRulesResponse.create = function create(properties) { - return new GetShardRoutingRulesResponse(properties); + MountListResponse.create = function create(properties) { + return new MountListResponse(properties); }; /** - * Encodes the specified GetShardRoutingRulesResponse message. Does not implicitly {@link vtctldata.GetShardRoutingRulesResponse.verify|verify} messages. + * Encodes the specified MountListResponse message. Does not implicitly {@link vtctldata.MountListResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.GetShardRoutingRulesResponse + * @memberof vtctldata.MountListResponse * @static - * @param {vtctldata.IGetShardRoutingRulesResponse} message GetShardRoutingRulesResponse message or plain object to encode + * @param {vtctldata.IMountListResponse} message MountListResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetShardRoutingRulesResponse.encode = function encode(message, writer) { + MountListResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.shard_routing_rules != null && Object.hasOwnProperty.call(message, "shard_routing_rules")) - $root.vschema.ShardRoutingRules.encode(message.shard_routing_rules, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.names != null && message.names.length) + for (let i = 0; i < message.names.length; ++i) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.names[i]); return writer; }; /** - * Encodes the specified GetShardRoutingRulesResponse message, length delimited. Does not implicitly {@link vtctldata.GetShardRoutingRulesResponse.verify|verify} messages. + * Encodes the specified MountListResponse message, length delimited. Does not implicitly {@link vtctldata.MountListResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetShardRoutingRulesResponse + * @memberof vtctldata.MountListResponse * @static - * @param {vtctldata.IGetShardRoutingRulesResponse} message GetShardRoutingRulesResponse message or plain object to encode + * @param {vtctldata.IMountListResponse} message MountListResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetShardRoutingRulesResponse.encodeDelimited = function encodeDelimited(message, writer) { + MountListResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetShardRoutingRulesResponse message from the specified reader or buffer. + * Decodes a MountListResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetShardRoutingRulesResponse + * @memberof vtctldata.MountListResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetShardRoutingRulesResponse} GetShardRoutingRulesResponse + * @returns {vtctldata.MountListResponse} MountListResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetShardRoutingRulesResponse.decode = function decode(reader, length) { + MountListResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetShardRoutingRulesResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.MountListResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.shard_routing_rules = $root.vschema.ShardRoutingRules.decode(reader, reader.uint32()); + if (!(message.names && message.names.length)) + message.names = []; + message.names.push(reader.string()); break; } default: @@ -116816,128 +138366,157 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetShardRoutingRulesResponse message from the specified reader or buffer, length delimited. + * Decodes a MountListResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetShardRoutingRulesResponse + * @memberof vtctldata.MountListResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetShardRoutingRulesResponse} GetShardRoutingRulesResponse + * @returns {vtctldata.MountListResponse} MountListResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetShardRoutingRulesResponse.decodeDelimited = function decodeDelimited(reader) { + MountListResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetShardRoutingRulesResponse message. + * Verifies a MountListResponse message. * @function verify - * @memberof vtctldata.GetShardRoutingRulesResponse + * @memberof vtctldata.MountListResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetShardRoutingRulesResponse.verify = function verify(message) { + MountListResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.shard_routing_rules != null && message.hasOwnProperty("shard_routing_rules")) { - let error = $root.vschema.ShardRoutingRules.verify(message.shard_routing_rules); - if (error) - return "shard_routing_rules." + error; + if (message.names != null && message.hasOwnProperty("names")) { + if (!Array.isArray(message.names)) + return "names: array expected"; + for (let i = 0; i < message.names.length; ++i) + if (!$util.isString(message.names[i])) + return "names: string[] expected"; } return null; }; /** - * Creates a GetShardRoutingRulesResponse message from a plain object. Also converts values to their respective internal types. + * Creates a MountListResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetShardRoutingRulesResponse + * @memberof vtctldata.MountListResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetShardRoutingRulesResponse} GetShardRoutingRulesResponse + * @returns {vtctldata.MountListResponse} MountListResponse */ - GetShardRoutingRulesResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetShardRoutingRulesResponse) + MountListResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.MountListResponse) return object; - let message = new $root.vtctldata.GetShardRoutingRulesResponse(); - if (object.shard_routing_rules != null) { - if (typeof object.shard_routing_rules !== "object") - throw TypeError(".vtctldata.GetShardRoutingRulesResponse.shard_routing_rules: object expected"); - message.shard_routing_rules = $root.vschema.ShardRoutingRules.fromObject(object.shard_routing_rules); + let message = new $root.vtctldata.MountListResponse(); + if (object.names) { + if (!Array.isArray(object.names)) + throw TypeError(".vtctldata.MountListResponse.names: array expected"); + message.names = []; + for (let i = 0; i < object.names.length; ++i) + message.names[i] = String(object.names[i]); } return message; }; /** - * Creates a plain object from a GetShardRoutingRulesResponse message. Also converts values to other types if specified. + * Creates a plain object from a MountListResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetShardRoutingRulesResponse + * @memberof vtctldata.MountListResponse * @static - * @param {vtctldata.GetShardRoutingRulesResponse} message GetShardRoutingRulesResponse + * @param {vtctldata.MountListResponse} message MountListResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetShardRoutingRulesResponse.toObject = function toObject(message, options) { + MountListResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) - object.shard_routing_rules = null; - if (message.shard_routing_rules != null && message.hasOwnProperty("shard_routing_rules")) - object.shard_routing_rules = $root.vschema.ShardRoutingRules.toObject(message.shard_routing_rules, options); + if (options.arrays || options.defaults) + object.names = []; + if (message.names && message.names.length) { + object.names = []; + for (let j = 0; j < message.names.length; ++j) + object.names[j] = message.names[j]; + } return object; }; /** - * Converts this GetShardRoutingRulesResponse to JSON. + * Converts this MountListResponse to JSON. * @function toJSON - * @memberof vtctldata.GetShardRoutingRulesResponse + * @memberof vtctldata.MountListResponse * @instance * @returns {Object.} JSON object */ - GetShardRoutingRulesResponse.prototype.toJSON = function toJSON() { + MountListResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetShardRoutingRulesResponse + * Gets the default type url for MountListResponse * @function getTypeUrl - * @memberof vtctldata.GetShardRoutingRulesResponse + * @memberof vtctldata.MountListResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetShardRoutingRulesResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + MountListResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetShardRoutingRulesResponse"; + return typeUrlPrefix + "/vtctldata.MountListResponse"; }; - return GetShardRoutingRulesResponse; + return MountListResponse; })(); - vtctldata.GetSrvKeyspaceNamesRequest = (function() { + vtctldata.MoveTablesCreateRequest = (function() { /** - * Properties of a GetSrvKeyspaceNamesRequest. + * Properties of a MoveTablesCreateRequest. * @memberof vtctldata - * @interface IGetSrvKeyspaceNamesRequest - * @property {Array.|null} [cells] GetSrvKeyspaceNamesRequest cells - */ - - /** - * Constructs a new GetSrvKeyspaceNamesRequest. + * @interface IMoveTablesCreateRequest + * @property {string|null} [workflow] MoveTablesCreateRequest workflow + * @property {string|null} [source_keyspace] MoveTablesCreateRequest source_keyspace + * @property {string|null} [target_keyspace] MoveTablesCreateRequest target_keyspace + * @property {Array.|null} [cells] MoveTablesCreateRequest cells + * @property {Array.|null} [tablet_types] MoveTablesCreateRequest tablet_types + * @property {tabletmanagerdata.TabletSelectionPreference|null} [tablet_selection_preference] MoveTablesCreateRequest tablet_selection_preference + * @property {Array.|null} [source_shards] MoveTablesCreateRequest source_shards + * @property {boolean|null} [all_tables] MoveTablesCreateRequest all_tables + * @property {Array.|null} [include_tables] MoveTablesCreateRequest include_tables + * @property {Array.|null} [exclude_tables] MoveTablesCreateRequest exclude_tables + * @property {string|null} [external_cluster_name] MoveTablesCreateRequest external_cluster_name + * @property {string|null} [source_time_zone] MoveTablesCreateRequest source_time_zone + * @property {string|null} [on_ddl] MoveTablesCreateRequest on_ddl + * @property {boolean|null} [stop_after_copy] MoveTablesCreateRequest stop_after_copy + * @property {boolean|null} [drop_foreign_keys] MoveTablesCreateRequest drop_foreign_keys + * @property {boolean|null} [defer_secondary_keys] MoveTablesCreateRequest defer_secondary_keys + * @property {boolean|null} [auto_start] MoveTablesCreateRequest auto_start + * @property {boolean|null} [no_routing_rules] MoveTablesCreateRequest no_routing_rules + * @property {boolean|null} [atomic_copy] MoveTablesCreateRequest atomic_copy + */ + + /** + * Constructs a new MoveTablesCreateRequest. * @memberof vtctldata - * @classdesc Represents a GetSrvKeyspaceNamesRequest. - * @implements IGetSrvKeyspaceNamesRequest + * @classdesc Represents a MoveTablesCreateRequest. + * @implements IMoveTablesCreateRequest * @constructor - * @param {vtctldata.IGetSrvKeyspaceNamesRequest=} [properties] Properties to set + * @param {vtctldata.IMoveTablesCreateRequest=} [properties] Properties to set */ - function GetSrvKeyspaceNamesRequest(properties) { + function MoveTablesCreateRequest(properties) { this.cells = []; + this.tablet_types = []; + this.source_shards = []; + this.include_tables = []; + this.exclude_tables = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -116945,80 +138524,352 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * GetSrvKeyspaceNamesRequest cells. + * MoveTablesCreateRequest workflow. + * @member {string} workflow + * @memberof vtctldata.MoveTablesCreateRequest + * @instance + */ + MoveTablesCreateRequest.prototype.workflow = ""; + + /** + * MoveTablesCreateRequest source_keyspace. + * @member {string} source_keyspace + * @memberof vtctldata.MoveTablesCreateRequest + * @instance + */ + MoveTablesCreateRequest.prototype.source_keyspace = ""; + + /** + * MoveTablesCreateRequest target_keyspace. + * @member {string} target_keyspace + * @memberof vtctldata.MoveTablesCreateRequest + * @instance + */ + MoveTablesCreateRequest.prototype.target_keyspace = ""; + + /** + * MoveTablesCreateRequest cells. * @member {Array.} cells - * @memberof vtctldata.GetSrvKeyspaceNamesRequest + * @memberof vtctldata.MoveTablesCreateRequest * @instance */ - GetSrvKeyspaceNamesRequest.prototype.cells = $util.emptyArray; + MoveTablesCreateRequest.prototype.cells = $util.emptyArray; /** - * Creates a new GetSrvKeyspaceNamesRequest instance using the specified properties. + * MoveTablesCreateRequest tablet_types. + * @member {Array.} tablet_types + * @memberof vtctldata.MoveTablesCreateRequest + * @instance + */ + MoveTablesCreateRequest.prototype.tablet_types = $util.emptyArray; + + /** + * MoveTablesCreateRequest tablet_selection_preference. + * @member {tabletmanagerdata.TabletSelectionPreference} tablet_selection_preference + * @memberof vtctldata.MoveTablesCreateRequest + * @instance + */ + MoveTablesCreateRequest.prototype.tablet_selection_preference = 0; + + /** + * MoveTablesCreateRequest source_shards. + * @member {Array.} source_shards + * @memberof vtctldata.MoveTablesCreateRequest + * @instance + */ + MoveTablesCreateRequest.prototype.source_shards = $util.emptyArray; + + /** + * MoveTablesCreateRequest all_tables. + * @member {boolean} all_tables + * @memberof vtctldata.MoveTablesCreateRequest + * @instance + */ + MoveTablesCreateRequest.prototype.all_tables = false; + + /** + * MoveTablesCreateRequest include_tables. + * @member {Array.} include_tables + * @memberof vtctldata.MoveTablesCreateRequest + * @instance + */ + MoveTablesCreateRequest.prototype.include_tables = $util.emptyArray; + + /** + * MoveTablesCreateRequest exclude_tables. + * @member {Array.} exclude_tables + * @memberof vtctldata.MoveTablesCreateRequest + * @instance + */ + MoveTablesCreateRequest.prototype.exclude_tables = $util.emptyArray; + + /** + * MoveTablesCreateRequest external_cluster_name. + * @member {string} external_cluster_name + * @memberof vtctldata.MoveTablesCreateRequest + * @instance + */ + MoveTablesCreateRequest.prototype.external_cluster_name = ""; + + /** + * MoveTablesCreateRequest source_time_zone. + * @member {string} source_time_zone + * @memberof vtctldata.MoveTablesCreateRequest + * @instance + */ + MoveTablesCreateRequest.prototype.source_time_zone = ""; + + /** + * MoveTablesCreateRequest on_ddl. + * @member {string} on_ddl + * @memberof vtctldata.MoveTablesCreateRequest + * @instance + */ + MoveTablesCreateRequest.prototype.on_ddl = ""; + + /** + * MoveTablesCreateRequest stop_after_copy. + * @member {boolean} stop_after_copy + * @memberof vtctldata.MoveTablesCreateRequest + * @instance + */ + MoveTablesCreateRequest.prototype.stop_after_copy = false; + + /** + * MoveTablesCreateRequest drop_foreign_keys. + * @member {boolean} drop_foreign_keys + * @memberof vtctldata.MoveTablesCreateRequest + * @instance + */ + MoveTablesCreateRequest.prototype.drop_foreign_keys = false; + + /** + * MoveTablesCreateRequest defer_secondary_keys. + * @member {boolean} defer_secondary_keys + * @memberof vtctldata.MoveTablesCreateRequest + * @instance + */ + MoveTablesCreateRequest.prototype.defer_secondary_keys = false; + + /** + * MoveTablesCreateRequest auto_start. + * @member {boolean} auto_start + * @memberof vtctldata.MoveTablesCreateRequest + * @instance + */ + MoveTablesCreateRequest.prototype.auto_start = false; + + /** + * MoveTablesCreateRequest no_routing_rules. + * @member {boolean} no_routing_rules + * @memberof vtctldata.MoveTablesCreateRequest + * @instance + */ + MoveTablesCreateRequest.prototype.no_routing_rules = false; + + /** + * MoveTablesCreateRequest atomic_copy. + * @member {boolean} atomic_copy + * @memberof vtctldata.MoveTablesCreateRequest + * @instance + */ + MoveTablesCreateRequest.prototype.atomic_copy = false; + + /** + * Creates a new MoveTablesCreateRequest instance using the specified properties. * @function create - * @memberof vtctldata.GetSrvKeyspaceNamesRequest + * @memberof vtctldata.MoveTablesCreateRequest * @static - * @param {vtctldata.IGetSrvKeyspaceNamesRequest=} [properties] Properties to set - * @returns {vtctldata.GetSrvKeyspaceNamesRequest} GetSrvKeyspaceNamesRequest instance + * @param {vtctldata.IMoveTablesCreateRequest=} [properties] Properties to set + * @returns {vtctldata.MoveTablesCreateRequest} MoveTablesCreateRequest instance */ - GetSrvKeyspaceNamesRequest.create = function create(properties) { - return new GetSrvKeyspaceNamesRequest(properties); + MoveTablesCreateRequest.create = function create(properties) { + return new MoveTablesCreateRequest(properties); }; /** - * Encodes the specified GetSrvKeyspaceNamesRequest message. Does not implicitly {@link vtctldata.GetSrvKeyspaceNamesRequest.verify|verify} messages. + * Encodes the specified MoveTablesCreateRequest message. Does not implicitly {@link vtctldata.MoveTablesCreateRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.GetSrvKeyspaceNamesRequest + * @memberof vtctldata.MoveTablesCreateRequest * @static - * @param {vtctldata.IGetSrvKeyspaceNamesRequest} message GetSrvKeyspaceNamesRequest message or plain object to encode + * @param {vtctldata.IMoveTablesCreateRequest} message MoveTablesCreateRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetSrvKeyspaceNamesRequest.encode = function encode(message, writer) { + MoveTablesCreateRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.workflow != null && Object.hasOwnProperty.call(message, "workflow")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.workflow); + if (message.source_keyspace != null && Object.hasOwnProperty.call(message, "source_keyspace")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.source_keyspace); + if (message.target_keyspace != null && Object.hasOwnProperty.call(message, "target_keyspace")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.target_keyspace); if (message.cells != null && message.cells.length) for (let i = 0; i < message.cells.length; ++i) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.cells[i]); + writer.uint32(/* id 4, wireType 2 =*/34).string(message.cells[i]); + if (message.tablet_types != null && message.tablet_types.length) { + writer.uint32(/* id 5, wireType 2 =*/42).fork(); + for (let i = 0; i < message.tablet_types.length; ++i) + writer.int32(message.tablet_types[i]); + writer.ldelim(); + } + if (message.tablet_selection_preference != null && Object.hasOwnProperty.call(message, "tablet_selection_preference")) + writer.uint32(/* id 6, wireType 0 =*/48).int32(message.tablet_selection_preference); + if (message.source_shards != null && message.source_shards.length) + for (let i = 0; i < message.source_shards.length; ++i) + writer.uint32(/* id 7, wireType 2 =*/58).string(message.source_shards[i]); + if (message.all_tables != null && Object.hasOwnProperty.call(message, "all_tables")) + writer.uint32(/* id 8, wireType 0 =*/64).bool(message.all_tables); + if (message.include_tables != null && message.include_tables.length) + for (let i = 0; i < message.include_tables.length; ++i) + writer.uint32(/* id 9, wireType 2 =*/74).string(message.include_tables[i]); + if (message.exclude_tables != null && message.exclude_tables.length) + for (let i = 0; i < message.exclude_tables.length; ++i) + writer.uint32(/* id 10, wireType 2 =*/82).string(message.exclude_tables[i]); + if (message.external_cluster_name != null && Object.hasOwnProperty.call(message, "external_cluster_name")) + writer.uint32(/* id 11, wireType 2 =*/90).string(message.external_cluster_name); + if (message.source_time_zone != null && Object.hasOwnProperty.call(message, "source_time_zone")) + writer.uint32(/* id 12, wireType 2 =*/98).string(message.source_time_zone); + if (message.on_ddl != null && Object.hasOwnProperty.call(message, "on_ddl")) + writer.uint32(/* id 13, wireType 2 =*/106).string(message.on_ddl); + if (message.stop_after_copy != null && Object.hasOwnProperty.call(message, "stop_after_copy")) + writer.uint32(/* id 14, wireType 0 =*/112).bool(message.stop_after_copy); + if (message.drop_foreign_keys != null && Object.hasOwnProperty.call(message, "drop_foreign_keys")) + writer.uint32(/* id 15, wireType 0 =*/120).bool(message.drop_foreign_keys); + if (message.defer_secondary_keys != null && Object.hasOwnProperty.call(message, "defer_secondary_keys")) + writer.uint32(/* id 16, wireType 0 =*/128).bool(message.defer_secondary_keys); + if (message.auto_start != null && Object.hasOwnProperty.call(message, "auto_start")) + writer.uint32(/* id 17, wireType 0 =*/136).bool(message.auto_start); + if (message.no_routing_rules != null && Object.hasOwnProperty.call(message, "no_routing_rules")) + writer.uint32(/* id 18, wireType 0 =*/144).bool(message.no_routing_rules); + if (message.atomic_copy != null && Object.hasOwnProperty.call(message, "atomic_copy")) + writer.uint32(/* id 19, wireType 0 =*/152).bool(message.atomic_copy); return writer; }; /** - * Encodes the specified GetSrvKeyspaceNamesRequest message, length delimited. Does not implicitly {@link vtctldata.GetSrvKeyspaceNamesRequest.verify|verify} messages. + * Encodes the specified MoveTablesCreateRequest message, length delimited. Does not implicitly {@link vtctldata.MoveTablesCreateRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetSrvKeyspaceNamesRequest + * @memberof vtctldata.MoveTablesCreateRequest * @static - * @param {vtctldata.IGetSrvKeyspaceNamesRequest} message GetSrvKeyspaceNamesRequest message or plain object to encode + * @param {vtctldata.IMoveTablesCreateRequest} message MoveTablesCreateRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetSrvKeyspaceNamesRequest.encodeDelimited = function encodeDelimited(message, writer) { + MoveTablesCreateRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetSrvKeyspaceNamesRequest message from the specified reader or buffer. + * Decodes a MoveTablesCreateRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetSrvKeyspaceNamesRequest + * @memberof vtctldata.MoveTablesCreateRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetSrvKeyspaceNamesRequest} GetSrvKeyspaceNamesRequest + * @returns {vtctldata.MoveTablesCreateRequest} MoveTablesCreateRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetSrvKeyspaceNamesRequest.decode = function decode(reader, length) { + MoveTablesCreateRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetSrvKeyspaceNamesRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.MoveTablesCreateRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { + message.workflow = reader.string(); + break; + } + case 2: { + message.source_keyspace = reader.string(); + break; + } + case 3: { + message.target_keyspace = reader.string(); + break; + } + case 4: { if (!(message.cells && message.cells.length)) message.cells = []; message.cells.push(reader.string()); break; } + case 5: { + if (!(message.tablet_types && message.tablet_types.length)) + message.tablet_types = []; + if ((tag & 7) === 2) { + let end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.tablet_types.push(reader.int32()); + } else + message.tablet_types.push(reader.int32()); + break; + } + case 6: { + message.tablet_selection_preference = reader.int32(); + break; + } + case 7: { + if (!(message.source_shards && message.source_shards.length)) + message.source_shards = []; + message.source_shards.push(reader.string()); + break; + } + case 8: { + message.all_tables = reader.bool(); + break; + } + case 9: { + if (!(message.include_tables && message.include_tables.length)) + message.include_tables = []; + message.include_tables.push(reader.string()); + break; + } + case 10: { + if (!(message.exclude_tables && message.exclude_tables.length)) + message.exclude_tables = []; + message.exclude_tables.push(reader.string()); + break; + } + case 11: { + message.external_cluster_name = reader.string(); + break; + } + case 12: { + message.source_time_zone = reader.string(); + break; + } + case 13: { + message.on_ddl = reader.string(); + break; + } + case 14: { + message.stop_after_copy = reader.bool(); + break; + } + case 15: { + message.drop_foreign_keys = reader.bool(); + break; + } + case 16: { + message.defer_secondary_keys = reader.bool(); + break; + } + case 17: { + message.auto_start = reader.bool(); + break; + } + case 18: { + message.no_routing_rules = reader.bool(); + break; + } + case 19: { + message.atomic_copy = reader.bool(); + break; + } default: reader.skipType(tag & 7); break; @@ -117028,32 +138879,41 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetSrvKeyspaceNamesRequest message from the specified reader or buffer, length delimited. + * Decodes a MoveTablesCreateRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetSrvKeyspaceNamesRequest + * @memberof vtctldata.MoveTablesCreateRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetSrvKeyspaceNamesRequest} GetSrvKeyspaceNamesRequest + * @returns {vtctldata.MoveTablesCreateRequest} MoveTablesCreateRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetSrvKeyspaceNamesRequest.decodeDelimited = function decodeDelimited(reader) { + MoveTablesCreateRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetSrvKeyspaceNamesRequest message. + * Verifies a MoveTablesCreateRequest message. * @function verify - * @memberof vtctldata.GetSrvKeyspaceNamesRequest + * @memberof vtctldata.MoveTablesCreateRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetSrvKeyspaceNamesRequest.verify = function verify(message) { + MoveTablesCreateRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.workflow != null && message.hasOwnProperty("workflow")) + if (!$util.isString(message.workflow)) + return "workflow: string expected"; + if (message.source_keyspace != null && message.hasOwnProperty("source_keyspace")) + if (!$util.isString(message.source_keyspace)) + return "source_keyspace: string expected"; + if (message.target_keyspace != null && message.hasOwnProperty("target_keyspace")) + if (!$util.isString(message.target_keyspace)) + return "target_keyspace: string expected"; if (message.cells != null && message.hasOwnProperty("cells")) { if (!Array.isArray(message.cells)) return "cells: array expected"; @@ -117061,102 +138921,377 @@ export const vtctldata = $root.vtctldata = (() => { if (!$util.isString(message.cells[i])) return "cells: string[] expected"; } + if (message.tablet_types != null && message.hasOwnProperty("tablet_types")) { + if (!Array.isArray(message.tablet_types)) + return "tablet_types: array expected"; + for (let i = 0; i < message.tablet_types.length; ++i) + switch (message.tablet_types[i]) { + default: + return "tablet_types: enum value[] expected"; + case 0: + case 1: + case 1: + case 2: + case 3: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + break; + } + } + if (message.tablet_selection_preference != null && message.hasOwnProperty("tablet_selection_preference")) + switch (message.tablet_selection_preference) { + default: + return "tablet_selection_preference: enum value expected"; + case 0: + case 1: + case 3: + break; + } + if (message.source_shards != null && message.hasOwnProperty("source_shards")) { + if (!Array.isArray(message.source_shards)) + return "source_shards: array expected"; + for (let i = 0; i < message.source_shards.length; ++i) + if (!$util.isString(message.source_shards[i])) + return "source_shards: string[] expected"; + } + if (message.all_tables != null && message.hasOwnProperty("all_tables")) + if (typeof message.all_tables !== "boolean") + return "all_tables: boolean expected"; + if (message.include_tables != null && message.hasOwnProperty("include_tables")) { + if (!Array.isArray(message.include_tables)) + return "include_tables: array expected"; + for (let i = 0; i < message.include_tables.length; ++i) + if (!$util.isString(message.include_tables[i])) + return "include_tables: string[] expected"; + } + if (message.exclude_tables != null && message.hasOwnProperty("exclude_tables")) { + if (!Array.isArray(message.exclude_tables)) + return "exclude_tables: array expected"; + for (let i = 0; i < message.exclude_tables.length; ++i) + if (!$util.isString(message.exclude_tables[i])) + return "exclude_tables: string[] expected"; + } + if (message.external_cluster_name != null && message.hasOwnProperty("external_cluster_name")) + if (!$util.isString(message.external_cluster_name)) + return "external_cluster_name: string expected"; + if (message.source_time_zone != null && message.hasOwnProperty("source_time_zone")) + if (!$util.isString(message.source_time_zone)) + return "source_time_zone: string expected"; + if (message.on_ddl != null && message.hasOwnProperty("on_ddl")) + if (!$util.isString(message.on_ddl)) + return "on_ddl: string expected"; + if (message.stop_after_copy != null && message.hasOwnProperty("stop_after_copy")) + if (typeof message.stop_after_copy !== "boolean") + return "stop_after_copy: boolean expected"; + if (message.drop_foreign_keys != null && message.hasOwnProperty("drop_foreign_keys")) + if (typeof message.drop_foreign_keys !== "boolean") + return "drop_foreign_keys: boolean expected"; + if (message.defer_secondary_keys != null && message.hasOwnProperty("defer_secondary_keys")) + if (typeof message.defer_secondary_keys !== "boolean") + return "defer_secondary_keys: boolean expected"; + if (message.auto_start != null && message.hasOwnProperty("auto_start")) + if (typeof message.auto_start !== "boolean") + return "auto_start: boolean expected"; + if (message.no_routing_rules != null && message.hasOwnProperty("no_routing_rules")) + if (typeof message.no_routing_rules !== "boolean") + return "no_routing_rules: boolean expected"; + if (message.atomic_copy != null && message.hasOwnProperty("atomic_copy")) + if (typeof message.atomic_copy !== "boolean") + return "atomic_copy: boolean expected"; return null; }; /** - * Creates a GetSrvKeyspaceNamesRequest message from a plain object. Also converts values to their respective internal types. + * Creates a MoveTablesCreateRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetSrvKeyspaceNamesRequest + * @memberof vtctldata.MoveTablesCreateRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetSrvKeyspaceNamesRequest} GetSrvKeyspaceNamesRequest + * @returns {vtctldata.MoveTablesCreateRequest} MoveTablesCreateRequest */ - GetSrvKeyspaceNamesRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetSrvKeyspaceNamesRequest) + MoveTablesCreateRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.MoveTablesCreateRequest) return object; - let message = new $root.vtctldata.GetSrvKeyspaceNamesRequest(); + let message = new $root.vtctldata.MoveTablesCreateRequest(); + if (object.workflow != null) + message.workflow = String(object.workflow); + if (object.source_keyspace != null) + message.source_keyspace = String(object.source_keyspace); + if (object.target_keyspace != null) + message.target_keyspace = String(object.target_keyspace); if (object.cells) { if (!Array.isArray(object.cells)) - throw TypeError(".vtctldata.GetSrvKeyspaceNamesRequest.cells: array expected"); + throw TypeError(".vtctldata.MoveTablesCreateRequest.cells: array expected"); message.cells = []; for (let i = 0; i < object.cells.length; ++i) message.cells[i] = String(object.cells[i]); } + if (object.tablet_types) { + if (!Array.isArray(object.tablet_types)) + throw TypeError(".vtctldata.MoveTablesCreateRequest.tablet_types: array expected"); + message.tablet_types = []; + for (let i = 0; i < object.tablet_types.length; ++i) + switch (object.tablet_types[i]) { + default: + if (typeof object.tablet_types[i] === "number") { + message.tablet_types[i] = object.tablet_types[i]; + break; + } + case "UNKNOWN": + case 0: + message.tablet_types[i] = 0; + break; + case "PRIMARY": + case 1: + message.tablet_types[i] = 1; + break; + case "MASTER": + case 1: + message.tablet_types[i] = 1; + break; + case "REPLICA": + case 2: + message.tablet_types[i] = 2; + break; + case "RDONLY": + case 3: + message.tablet_types[i] = 3; + break; + case "BATCH": + case 3: + message.tablet_types[i] = 3; + break; + case "SPARE": + case 4: + message.tablet_types[i] = 4; + break; + case "EXPERIMENTAL": + case 5: + message.tablet_types[i] = 5; + break; + case "BACKUP": + case 6: + message.tablet_types[i] = 6; + break; + case "RESTORE": + case 7: + message.tablet_types[i] = 7; + break; + case "DRAINED": + case 8: + message.tablet_types[i] = 8; + break; + } + } + switch (object.tablet_selection_preference) { + default: + if (typeof object.tablet_selection_preference === "number") { + message.tablet_selection_preference = object.tablet_selection_preference; + break; + } + break; + case "ANY": + case 0: + message.tablet_selection_preference = 0; + break; + case "INORDER": + case 1: + message.tablet_selection_preference = 1; + break; + case "UNKNOWN": + case 3: + message.tablet_selection_preference = 3; + break; + } + if (object.source_shards) { + if (!Array.isArray(object.source_shards)) + throw TypeError(".vtctldata.MoveTablesCreateRequest.source_shards: array expected"); + message.source_shards = []; + for (let i = 0; i < object.source_shards.length; ++i) + message.source_shards[i] = String(object.source_shards[i]); + } + if (object.all_tables != null) + message.all_tables = Boolean(object.all_tables); + if (object.include_tables) { + if (!Array.isArray(object.include_tables)) + throw TypeError(".vtctldata.MoveTablesCreateRequest.include_tables: array expected"); + message.include_tables = []; + for (let i = 0; i < object.include_tables.length; ++i) + message.include_tables[i] = String(object.include_tables[i]); + } + if (object.exclude_tables) { + if (!Array.isArray(object.exclude_tables)) + throw TypeError(".vtctldata.MoveTablesCreateRequest.exclude_tables: array expected"); + message.exclude_tables = []; + for (let i = 0; i < object.exclude_tables.length; ++i) + message.exclude_tables[i] = String(object.exclude_tables[i]); + } + if (object.external_cluster_name != null) + message.external_cluster_name = String(object.external_cluster_name); + if (object.source_time_zone != null) + message.source_time_zone = String(object.source_time_zone); + if (object.on_ddl != null) + message.on_ddl = String(object.on_ddl); + if (object.stop_after_copy != null) + message.stop_after_copy = Boolean(object.stop_after_copy); + if (object.drop_foreign_keys != null) + message.drop_foreign_keys = Boolean(object.drop_foreign_keys); + if (object.defer_secondary_keys != null) + message.defer_secondary_keys = Boolean(object.defer_secondary_keys); + if (object.auto_start != null) + message.auto_start = Boolean(object.auto_start); + if (object.no_routing_rules != null) + message.no_routing_rules = Boolean(object.no_routing_rules); + if (object.atomic_copy != null) + message.atomic_copy = Boolean(object.atomic_copy); return message; }; /** - * Creates a plain object from a GetSrvKeyspaceNamesRequest message. Also converts values to other types if specified. + * Creates a plain object from a MoveTablesCreateRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetSrvKeyspaceNamesRequest + * @memberof vtctldata.MoveTablesCreateRequest * @static - * @param {vtctldata.GetSrvKeyspaceNamesRequest} message GetSrvKeyspaceNamesRequest + * @param {vtctldata.MoveTablesCreateRequest} message MoveTablesCreateRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetSrvKeyspaceNamesRequest.toObject = function toObject(message, options) { + MoveTablesCreateRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) + if (options.arrays || options.defaults) { object.cells = []; + object.tablet_types = []; + object.source_shards = []; + object.include_tables = []; + object.exclude_tables = []; + } + if (options.defaults) { + object.workflow = ""; + object.source_keyspace = ""; + object.target_keyspace = ""; + object.tablet_selection_preference = options.enums === String ? "ANY" : 0; + object.all_tables = false; + object.external_cluster_name = ""; + object.source_time_zone = ""; + object.on_ddl = ""; + object.stop_after_copy = false; + object.drop_foreign_keys = false; + object.defer_secondary_keys = false; + object.auto_start = false; + object.no_routing_rules = false; + object.atomic_copy = false; + } + if (message.workflow != null && message.hasOwnProperty("workflow")) + object.workflow = message.workflow; + if (message.source_keyspace != null && message.hasOwnProperty("source_keyspace")) + object.source_keyspace = message.source_keyspace; + if (message.target_keyspace != null && message.hasOwnProperty("target_keyspace")) + object.target_keyspace = message.target_keyspace; if (message.cells && message.cells.length) { object.cells = []; for (let j = 0; j < message.cells.length; ++j) object.cells[j] = message.cells[j]; } + if (message.tablet_types && message.tablet_types.length) { + object.tablet_types = []; + for (let j = 0; j < message.tablet_types.length; ++j) + object.tablet_types[j] = options.enums === String ? $root.topodata.TabletType[message.tablet_types[j]] === undefined ? message.tablet_types[j] : $root.topodata.TabletType[message.tablet_types[j]] : message.tablet_types[j]; + } + if (message.tablet_selection_preference != null && message.hasOwnProperty("tablet_selection_preference")) + object.tablet_selection_preference = options.enums === String ? $root.tabletmanagerdata.TabletSelectionPreference[message.tablet_selection_preference] === undefined ? message.tablet_selection_preference : $root.tabletmanagerdata.TabletSelectionPreference[message.tablet_selection_preference] : message.tablet_selection_preference; + if (message.source_shards && message.source_shards.length) { + object.source_shards = []; + for (let j = 0; j < message.source_shards.length; ++j) + object.source_shards[j] = message.source_shards[j]; + } + if (message.all_tables != null && message.hasOwnProperty("all_tables")) + object.all_tables = message.all_tables; + if (message.include_tables && message.include_tables.length) { + object.include_tables = []; + for (let j = 0; j < message.include_tables.length; ++j) + object.include_tables[j] = message.include_tables[j]; + } + if (message.exclude_tables && message.exclude_tables.length) { + object.exclude_tables = []; + for (let j = 0; j < message.exclude_tables.length; ++j) + object.exclude_tables[j] = message.exclude_tables[j]; + } + if (message.external_cluster_name != null && message.hasOwnProperty("external_cluster_name")) + object.external_cluster_name = message.external_cluster_name; + if (message.source_time_zone != null && message.hasOwnProperty("source_time_zone")) + object.source_time_zone = message.source_time_zone; + if (message.on_ddl != null && message.hasOwnProperty("on_ddl")) + object.on_ddl = message.on_ddl; + if (message.stop_after_copy != null && message.hasOwnProperty("stop_after_copy")) + object.stop_after_copy = message.stop_after_copy; + if (message.drop_foreign_keys != null && message.hasOwnProperty("drop_foreign_keys")) + object.drop_foreign_keys = message.drop_foreign_keys; + if (message.defer_secondary_keys != null && message.hasOwnProperty("defer_secondary_keys")) + object.defer_secondary_keys = message.defer_secondary_keys; + if (message.auto_start != null && message.hasOwnProperty("auto_start")) + object.auto_start = message.auto_start; + if (message.no_routing_rules != null && message.hasOwnProperty("no_routing_rules")) + object.no_routing_rules = message.no_routing_rules; + if (message.atomic_copy != null && message.hasOwnProperty("atomic_copy")) + object.atomic_copy = message.atomic_copy; return object; }; /** - * Converts this GetSrvKeyspaceNamesRequest to JSON. + * Converts this MoveTablesCreateRequest to JSON. * @function toJSON - * @memberof vtctldata.GetSrvKeyspaceNamesRequest + * @memberof vtctldata.MoveTablesCreateRequest * @instance * @returns {Object.} JSON object */ - GetSrvKeyspaceNamesRequest.prototype.toJSON = function toJSON() { + MoveTablesCreateRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetSrvKeyspaceNamesRequest + * Gets the default type url for MoveTablesCreateRequest * @function getTypeUrl - * @memberof vtctldata.GetSrvKeyspaceNamesRequest + * @memberof vtctldata.MoveTablesCreateRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetSrvKeyspaceNamesRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + MoveTablesCreateRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetSrvKeyspaceNamesRequest"; + return typeUrlPrefix + "/vtctldata.MoveTablesCreateRequest"; }; - return GetSrvKeyspaceNamesRequest; + return MoveTablesCreateRequest; })(); - vtctldata.GetSrvKeyspaceNamesResponse = (function() { + vtctldata.MoveTablesCreateResponse = (function() { /** - * Properties of a GetSrvKeyspaceNamesResponse. + * Properties of a MoveTablesCreateResponse. * @memberof vtctldata - * @interface IGetSrvKeyspaceNamesResponse - * @property {Object.|null} [names] GetSrvKeyspaceNamesResponse names + * @interface IMoveTablesCreateResponse + * @property {string|null} [summary] MoveTablesCreateResponse summary + * @property {Array.|null} [details] MoveTablesCreateResponse details */ /** - * Constructs a new GetSrvKeyspaceNamesResponse. + * Constructs a new MoveTablesCreateResponse. * @memberof vtctldata - * @classdesc Represents a GetSrvKeyspaceNamesResponse. - * @implements IGetSrvKeyspaceNamesResponse + * @classdesc Represents a MoveTablesCreateResponse. + * @implements IMoveTablesCreateResponse * @constructor - * @param {vtctldata.IGetSrvKeyspaceNamesResponse=} [properties] Properties to set + * @param {vtctldata.IMoveTablesCreateResponse=} [properties] Properties to set */ - function GetSrvKeyspaceNamesResponse(properties) { - this.names = {}; + function MoveTablesCreateResponse(properties) { + this.details = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -117164,97 +139299,92 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * GetSrvKeyspaceNamesResponse names. - * @member {Object.} names - * @memberof vtctldata.GetSrvKeyspaceNamesResponse + * MoveTablesCreateResponse summary. + * @member {string} summary + * @memberof vtctldata.MoveTablesCreateResponse * @instance */ - GetSrvKeyspaceNamesResponse.prototype.names = $util.emptyObject; + MoveTablesCreateResponse.prototype.summary = ""; /** - * Creates a new GetSrvKeyspaceNamesResponse instance using the specified properties. + * MoveTablesCreateResponse details. + * @member {Array.} details + * @memberof vtctldata.MoveTablesCreateResponse + * @instance + */ + MoveTablesCreateResponse.prototype.details = $util.emptyArray; + + /** + * Creates a new MoveTablesCreateResponse instance using the specified properties. * @function create - * @memberof vtctldata.GetSrvKeyspaceNamesResponse + * @memberof vtctldata.MoveTablesCreateResponse * @static - * @param {vtctldata.IGetSrvKeyspaceNamesResponse=} [properties] Properties to set - * @returns {vtctldata.GetSrvKeyspaceNamesResponse} GetSrvKeyspaceNamesResponse instance + * @param {vtctldata.IMoveTablesCreateResponse=} [properties] Properties to set + * @returns {vtctldata.MoveTablesCreateResponse} MoveTablesCreateResponse instance */ - GetSrvKeyspaceNamesResponse.create = function create(properties) { - return new GetSrvKeyspaceNamesResponse(properties); + MoveTablesCreateResponse.create = function create(properties) { + return new MoveTablesCreateResponse(properties); }; /** - * Encodes the specified GetSrvKeyspaceNamesResponse message. Does not implicitly {@link vtctldata.GetSrvKeyspaceNamesResponse.verify|verify} messages. + * Encodes the specified MoveTablesCreateResponse message. Does not implicitly {@link vtctldata.MoveTablesCreateResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.GetSrvKeyspaceNamesResponse + * @memberof vtctldata.MoveTablesCreateResponse * @static - * @param {vtctldata.IGetSrvKeyspaceNamesResponse} message GetSrvKeyspaceNamesResponse message or plain object to encode + * @param {vtctldata.IMoveTablesCreateResponse} message MoveTablesCreateResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetSrvKeyspaceNamesResponse.encode = function encode(message, writer) { + MoveTablesCreateResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.names != null && Object.hasOwnProperty.call(message, "names")) - for (let keys = Object.keys(message.names), i = 0; i < keys.length; ++i) { - writer.uint32(/* id 1, wireType 2 =*/10).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); - $root.vtctldata.GetSrvKeyspaceNamesResponse.NameList.encode(message.names[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); - } + if (message.summary != null && Object.hasOwnProperty.call(message, "summary")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.summary); + if (message.details != null && message.details.length) + for (let i = 0; i < message.details.length; ++i) + $root.vtctldata.MoveTablesCreateResponse.TabletInfo.encode(message.details[i], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); return writer; }; /** - * Encodes the specified GetSrvKeyspaceNamesResponse message, length delimited. Does not implicitly {@link vtctldata.GetSrvKeyspaceNamesResponse.verify|verify} messages. + * Encodes the specified MoveTablesCreateResponse message, length delimited. Does not implicitly {@link vtctldata.MoveTablesCreateResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetSrvKeyspaceNamesResponse + * @memberof vtctldata.MoveTablesCreateResponse * @static - * @param {vtctldata.IGetSrvKeyspaceNamesResponse} message GetSrvKeyspaceNamesResponse message or plain object to encode + * @param {vtctldata.IMoveTablesCreateResponse} message MoveTablesCreateResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetSrvKeyspaceNamesResponse.encodeDelimited = function encodeDelimited(message, writer) { + MoveTablesCreateResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetSrvKeyspaceNamesResponse message from the specified reader or buffer. + * Decodes a MoveTablesCreateResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetSrvKeyspaceNamesResponse + * @memberof vtctldata.MoveTablesCreateResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetSrvKeyspaceNamesResponse} GetSrvKeyspaceNamesResponse + * @returns {vtctldata.MoveTablesCreateResponse} MoveTablesCreateResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetSrvKeyspaceNamesResponse.decode = function decode(reader, length) { + MoveTablesCreateResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetSrvKeyspaceNamesResponse(), key, value; + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.MoveTablesCreateResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (message.names === $util.emptyObject) - message.names = {}; - let end2 = reader.uint32() + reader.pos; - key = ""; - value = null; - while (reader.pos < end2) { - let tag2 = reader.uint32(); - switch (tag2 >>> 3) { - case 1: - key = reader.string(); - break; - case 2: - value = $root.vtctldata.GetSrvKeyspaceNamesResponse.NameList.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag2 & 7); - break; - } - } - message.names[key] = value; + message.summary = reader.string(); + break; + } + case 2: { + if (!(message.details && message.details.length)) + message.details = []; + message.details.push($root.vtctldata.MoveTablesCreateResponse.TabletInfo.decode(reader, reader.uint32())); break; } default: @@ -117266,139 +139396,146 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetSrvKeyspaceNamesResponse message from the specified reader or buffer, length delimited. + * Decodes a MoveTablesCreateResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetSrvKeyspaceNamesResponse + * @memberof vtctldata.MoveTablesCreateResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetSrvKeyspaceNamesResponse} GetSrvKeyspaceNamesResponse + * @returns {vtctldata.MoveTablesCreateResponse} MoveTablesCreateResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetSrvKeyspaceNamesResponse.decodeDelimited = function decodeDelimited(reader) { + MoveTablesCreateResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetSrvKeyspaceNamesResponse message. + * Verifies a MoveTablesCreateResponse message. * @function verify - * @memberof vtctldata.GetSrvKeyspaceNamesResponse + * @memberof vtctldata.MoveTablesCreateResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetSrvKeyspaceNamesResponse.verify = function verify(message) { + MoveTablesCreateResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.names != null && message.hasOwnProperty("names")) { - if (!$util.isObject(message.names)) - return "names: object expected"; - let key = Object.keys(message.names); - for (let i = 0; i < key.length; ++i) { - let error = $root.vtctldata.GetSrvKeyspaceNamesResponse.NameList.verify(message.names[key[i]]); + if (message.summary != null && message.hasOwnProperty("summary")) + if (!$util.isString(message.summary)) + return "summary: string expected"; + if (message.details != null && message.hasOwnProperty("details")) { + if (!Array.isArray(message.details)) + return "details: array expected"; + for (let i = 0; i < message.details.length; ++i) { + let error = $root.vtctldata.MoveTablesCreateResponse.TabletInfo.verify(message.details[i]); if (error) - return "names." + error; + return "details." + error; } } return null; }; /** - * Creates a GetSrvKeyspaceNamesResponse message from a plain object. Also converts values to their respective internal types. + * Creates a MoveTablesCreateResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetSrvKeyspaceNamesResponse + * @memberof vtctldata.MoveTablesCreateResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetSrvKeyspaceNamesResponse} GetSrvKeyspaceNamesResponse + * @returns {vtctldata.MoveTablesCreateResponse} MoveTablesCreateResponse */ - GetSrvKeyspaceNamesResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetSrvKeyspaceNamesResponse) + MoveTablesCreateResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.MoveTablesCreateResponse) return object; - let message = new $root.vtctldata.GetSrvKeyspaceNamesResponse(); - if (object.names) { - if (typeof object.names !== "object") - throw TypeError(".vtctldata.GetSrvKeyspaceNamesResponse.names: object expected"); - message.names = {}; - for (let keys = Object.keys(object.names), i = 0; i < keys.length; ++i) { - if (typeof object.names[keys[i]] !== "object") - throw TypeError(".vtctldata.GetSrvKeyspaceNamesResponse.names: object expected"); - message.names[keys[i]] = $root.vtctldata.GetSrvKeyspaceNamesResponse.NameList.fromObject(object.names[keys[i]]); + let message = new $root.vtctldata.MoveTablesCreateResponse(); + if (object.summary != null) + message.summary = String(object.summary); + if (object.details) { + if (!Array.isArray(object.details)) + throw TypeError(".vtctldata.MoveTablesCreateResponse.details: array expected"); + message.details = []; + for (let i = 0; i < object.details.length; ++i) { + if (typeof object.details[i] !== "object") + throw TypeError(".vtctldata.MoveTablesCreateResponse.details: object expected"); + message.details[i] = $root.vtctldata.MoveTablesCreateResponse.TabletInfo.fromObject(object.details[i]); } } return message; }; /** - * Creates a plain object from a GetSrvKeyspaceNamesResponse message. Also converts values to other types if specified. + * Creates a plain object from a MoveTablesCreateResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetSrvKeyspaceNamesResponse + * @memberof vtctldata.MoveTablesCreateResponse * @static - * @param {vtctldata.GetSrvKeyspaceNamesResponse} message GetSrvKeyspaceNamesResponse + * @param {vtctldata.MoveTablesCreateResponse} message MoveTablesCreateResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetSrvKeyspaceNamesResponse.toObject = function toObject(message, options) { + MoveTablesCreateResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.objects || options.defaults) - object.names = {}; - let keys2; - if (message.names && (keys2 = Object.keys(message.names)).length) { - object.names = {}; - for (let j = 0; j < keys2.length; ++j) - object.names[keys2[j]] = $root.vtctldata.GetSrvKeyspaceNamesResponse.NameList.toObject(message.names[keys2[j]], options); + if (options.arrays || options.defaults) + object.details = []; + if (options.defaults) + object.summary = ""; + if (message.summary != null && message.hasOwnProperty("summary")) + object.summary = message.summary; + if (message.details && message.details.length) { + object.details = []; + for (let j = 0; j < message.details.length; ++j) + object.details[j] = $root.vtctldata.MoveTablesCreateResponse.TabletInfo.toObject(message.details[j], options); } return object; }; /** - * Converts this GetSrvKeyspaceNamesResponse to JSON. + * Converts this MoveTablesCreateResponse to JSON. * @function toJSON - * @memberof vtctldata.GetSrvKeyspaceNamesResponse + * @memberof vtctldata.MoveTablesCreateResponse * @instance * @returns {Object.} JSON object */ - GetSrvKeyspaceNamesResponse.prototype.toJSON = function toJSON() { + MoveTablesCreateResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetSrvKeyspaceNamesResponse + * Gets the default type url for MoveTablesCreateResponse * @function getTypeUrl - * @memberof vtctldata.GetSrvKeyspaceNamesResponse + * @memberof vtctldata.MoveTablesCreateResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetSrvKeyspaceNamesResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + MoveTablesCreateResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetSrvKeyspaceNamesResponse"; + return typeUrlPrefix + "/vtctldata.MoveTablesCreateResponse"; }; - GetSrvKeyspaceNamesResponse.NameList = (function() { + MoveTablesCreateResponse.TabletInfo = (function() { /** - * Properties of a NameList. - * @memberof vtctldata.GetSrvKeyspaceNamesResponse - * @interface INameList - * @property {Array.|null} [names] NameList names + * Properties of a TabletInfo. + * @memberof vtctldata.MoveTablesCreateResponse + * @interface ITabletInfo + * @property {topodata.ITabletAlias|null} [tablet] TabletInfo tablet + * @property {boolean|null} [created] TabletInfo created */ /** - * Constructs a new NameList. - * @memberof vtctldata.GetSrvKeyspaceNamesResponse - * @classdesc Represents a NameList. - * @implements INameList + * Constructs a new TabletInfo. + * @memberof vtctldata.MoveTablesCreateResponse + * @classdesc Represents a TabletInfo. + * @implements ITabletInfo * @constructor - * @param {vtctldata.GetSrvKeyspaceNamesResponse.INameList=} [properties] Properties to set + * @param {vtctldata.MoveTablesCreateResponse.ITabletInfo=} [properties] Properties to set */ - function NameList(properties) { - this.names = []; + function TabletInfo(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -117406,78 +139543,89 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * NameList names. - * @member {Array.} names - * @memberof vtctldata.GetSrvKeyspaceNamesResponse.NameList + * TabletInfo tablet. + * @member {topodata.ITabletAlias|null|undefined} tablet + * @memberof vtctldata.MoveTablesCreateResponse.TabletInfo * @instance */ - NameList.prototype.names = $util.emptyArray; + TabletInfo.prototype.tablet = null; /** - * Creates a new NameList instance using the specified properties. + * TabletInfo created. + * @member {boolean} created + * @memberof vtctldata.MoveTablesCreateResponse.TabletInfo + * @instance + */ + TabletInfo.prototype.created = false; + + /** + * Creates a new TabletInfo instance using the specified properties. * @function create - * @memberof vtctldata.GetSrvKeyspaceNamesResponse.NameList + * @memberof vtctldata.MoveTablesCreateResponse.TabletInfo * @static - * @param {vtctldata.GetSrvKeyspaceNamesResponse.INameList=} [properties] Properties to set - * @returns {vtctldata.GetSrvKeyspaceNamesResponse.NameList} NameList instance + * @param {vtctldata.MoveTablesCreateResponse.ITabletInfo=} [properties] Properties to set + * @returns {vtctldata.MoveTablesCreateResponse.TabletInfo} TabletInfo instance */ - NameList.create = function create(properties) { - return new NameList(properties); + TabletInfo.create = function create(properties) { + return new TabletInfo(properties); }; /** - * Encodes the specified NameList message. Does not implicitly {@link vtctldata.GetSrvKeyspaceNamesResponse.NameList.verify|verify} messages. + * Encodes the specified TabletInfo message. Does not implicitly {@link vtctldata.MoveTablesCreateResponse.TabletInfo.verify|verify} messages. * @function encode - * @memberof vtctldata.GetSrvKeyspaceNamesResponse.NameList + * @memberof vtctldata.MoveTablesCreateResponse.TabletInfo * @static - * @param {vtctldata.GetSrvKeyspaceNamesResponse.INameList} message NameList message or plain object to encode + * @param {vtctldata.MoveTablesCreateResponse.ITabletInfo} message TabletInfo message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - NameList.encode = function encode(message, writer) { + TabletInfo.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.names != null && message.names.length) - for (let i = 0; i < message.names.length; ++i) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.names[i]); + if (message.tablet != null && Object.hasOwnProperty.call(message, "tablet")) + $root.topodata.TabletAlias.encode(message.tablet, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.created != null && Object.hasOwnProperty.call(message, "created")) + writer.uint32(/* id 2, wireType 0 =*/16).bool(message.created); return writer; }; /** - * Encodes the specified NameList message, length delimited. Does not implicitly {@link vtctldata.GetSrvKeyspaceNamesResponse.NameList.verify|verify} messages. + * Encodes the specified TabletInfo message, length delimited. Does not implicitly {@link vtctldata.MoveTablesCreateResponse.TabletInfo.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetSrvKeyspaceNamesResponse.NameList + * @memberof vtctldata.MoveTablesCreateResponse.TabletInfo * @static - * @param {vtctldata.GetSrvKeyspaceNamesResponse.INameList} message NameList message or plain object to encode + * @param {vtctldata.MoveTablesCreateResponse.ITabletInfo} message TabletInfo message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - NameList.encodeDelimited = function encodeDelimited(message, writer) { + TabletInfo.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a NameList message from the specified reader or buffer. + * Decodes a TabletInfo message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetSrvKeyspaceNamesResponse.NameList + * @memberof vtctldata.MoveTablesCreateResponse.TabletInfo * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetSrvKeyspaceNamesResponse.NameList} NameList + * @returns {vtctldata.MoveTablesCreateResponse.TabletInfo} TabletInfo * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - NameList.decode = function decode(reader, length) { + TabletInfo.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetSrvKeyspaceNamesResponse.NameList(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.MoveTablesCreateResponse.TabletInfo(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (!(message.names && message.names.length)) - message.names = []; - message.names.push(reader.string()); + message.tablet = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + break; + } + case 2: { + message.created = reader.bool(); break; } default: @@ -117489,479 +139637,290 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a NameList message from the specified reader or buffer, length delimited. + * Decodes a TabletInfo message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetSrvKeyspaceNamesResponse.NameList + * @memberof vtctldata.MoveTablesCreateResponse.TabletInfo * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetSrvKeyspaceNamesResponse.NameList} NameList + * @returns {vtctldata.MoveTablesCreateResponse.TabletInfo} TabletInfo * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - NameList.decodeDelimited = function decodeDelimited(reader) { + TabletInfo.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a NameList message. + * Verifies a TabletInfo message. * @function verify - * @memberof vtctldata.GetSrvKeyspaceNamesResponse.NameList + * @memberof vtctldata.MoveTablesCreateResponse.TabletInfo * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - NameList.verify = function verify(message) { + TabletInfo.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.names != null && message.hasOwnProperty("names")) { - if (!Array.isArray(message.names)) - return "names: array expected"; - for (let i = 0; i < message.names.length; ++i) - if (!$util.isString(message.names[i])) - return "names: string[] expected"; + if (message.tablet != null && message.hasOwnProperty("tablet")) { + let error = $root.topodata.TabletAlias.verify(message.tablet); + if (error) + return "tablet." + error; } + if (message.created != null && message.hasOwnProperty("created")) + if (typeof message.created !== "boolean") + return "created: boolean expected"; return null; }; /** - * Creates a NameList message from a plain object. Also converts values to their respective internal types. + * Creates a TabletInfo message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetSrvKeyspaceNamesResponse.NameList + * @memberof vtctldata.MoveTablesCreateResponse.TabletInfo * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetSrvKeyspaceNamesResponse.NameList} NameList + * @returns {vtctldata.MoveTablesCreateResponse.TabletInfo} TabletInfo */ - NameList.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetSrvKeyspaceNamesResponse.NameList) + TabletInfo.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.MoveTablesCreateResponse.TabletInfo) return object; - let message = new $root.vtctldata.GetSrvKeyspaceNamesResponse.NameList(); - if (object.names) { - if (!Array.isArray(object.names)) - throw TypeError(".vtctldata.GetSrvKeyspaceNamesResponse.NameList.names: array expected"); - message.names = []; - for (let i = 0; i < object.names.length; ++i) - message.names[i] = String(object.names[i]); + let message = new $root.vtctldata.MoveTablesCreateResponse.TabletInfo(); + if (object.tablet != null) { + if (typeof object.tablet !== "object") + throw TypeError(".vtctldata.MoveTablesCreateResponse.TabletInfo.tablet: object expected"); + message.tablet = $root.topodata.TabletAlias.fromObject(object.tablet); } + if (object.created != null) + message.created = Boolean(object.created); return message; }; /** - * Creates a plain object from a NameList message. Also converts values to other types if specified. + * Creates a plain object from a TabletInfo message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetSrvKeyspaceNamesResponse.NameList + * @memberof vtctldata.MoveTablesCreateResponse.TabletInfo * @static - * @param {vtctldata.GetSrvKeyspaceNamesResponse.NameList} message NameList + * @param {vtctldata.MoveTablesCreateResponse.TabletInfo} message TabletInfo * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - NameList.toObject = function toObject(message, options) { + TabletInfo.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.names = []; - if (message.names && message.names.length) { - object.names = []; - for (let j = 0; j < message.names.length; ++j) - object.names[j] = message.names[j]; - } - return object; - }; - - /** - * Converts this NameList to JSON. - * @function toJSON - * @memberof vtctldata.GetSrvKeyspaceNamesResponse.NameList - * @instance - * @returns {Object.} JSON object - */ - NameList.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; - - /** - * Gets the default type url for NameList - * @function getTypeUrl - * @memberof vtctldata.GetSrvKeyspaceNamesResponse.NameList - * @static - * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns {string} The default type url - */ - NameList.getTypeUrl = function getTypeUrl(typeUrlPrefix) { - if (typeUrlPrefix === undefined) { - typeUrlPrefix = "type.googleapis.com"; - } - return typeUrlPrefix + "/vtctldata.GetSrvKeyspaceNamesResponse.NameList"; - }; - - return NameList; - })(); - - return GetSrvKeyspaceNamesResponse; - })(); - - vtctldata.GetSrvKeyspacesRequest = (function() { - - /** - * Properties of a GetSrvKeyspacesRequest. - * @memberof vtctldata - * @interface IGetSrvKeyspacesRequest - * @property {string|null} [keyspace] GetSrvKeyspacesRequest keyspace - * @property {Array.|null} [cells] GetSrvKeyspacesRequest cells - */ - - /** - * Constructs a new GetSrvKeyspacesRequest. - * @memberof vtctldata - * @classdesc Represents a GetSrvKeyspacesRequest. - * @implements IGetSrvKeyspacesRequest - * @constructor - * @param {vtctldata.IGetSrvKeyspacesRequest=} [properties] Properties to set - */ - function GetSrvKeyspacesRequest(properties) { - this.cells = []; - if (properties) - for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; - } - - /** - * GetSrvKeyspacesRequest keyspace. - * @member {string} keyspace - * @memberof vtctldata.GetSrvKeyspacesRequest - * @instance - */ - GetSrvKeyspacesRequest.prototype.keyspace = ""; - - /** - * GetSrvKeyspacesRequest cells. - * @member {Array.} cells - * @memberof vtctldata.GetSrvKeyspacesRequest - * @instance - */ - GetSrvKeyspacesRequest.prototype.cells = $util.emptyArray; - - /** - * Creates a new GetSrvKeyspacesRequest instance using the specified properties. - * @function create - * @memberof vtctldata.GetSrvKeyspacesRequest - * @static - * @param {vtctldata.IGetSrvKeyspacesRequest=} [properties] Properties to set - * @returns {vtctldata.GetSrvKeyspacesRequest} GetSrvKeyspacesRequest instance - */ - GetSrvKeyspacesRequest.create = function create(properties) { - return new GetSrvKeyspacesRequest(properties); - }; - - /** - * Encodes the specified GetSrvKeyspacesRequest message. Does not implicitly {@link vtctldata.GetSrvKeyspacesRequest.verify|verify} messages. - * @function encode - * @memberof vtctldata.GetSrvKeyspacesRequest - * @static - * @param {vtctldata.IGetSrvKeyspacesRequest} message GetSrvKeyspacesRequest message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - GetSrvKeyspacesRequest.encode = function encode(message, writer) { - if (!writer) - writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.cells != null && message.cells.length) - for (let i = 0; i < message.cells.length; ++i) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.cells[i]); - return writer; - }; + if (options.defaults) { + object.tablet = null; + object.created = false; + } + if (message.tablet != null && message.hasOwnProperty("tablet")) + object.tablet = $root.topodata.TabletAlias.toObject(message.tablet, options); + if (message.created != null && message.hasOwnProperty("created")) + object.created = message.created; + return object; + }; - /** - * Encodes the specified GetSrvKeyspacesRequest message, length delimited. Does not implicitly {@link vtctldata.GetSrvKeyspacesRequest.verify|verify} messages. - * @function encodeDelimited - * @memberof vtctldata.GetSrvKeyspacesRequest - * @static - * @param {vtctldata.IGetSrvKeyspacesRequest} message GetSrvKeyspacesRequest message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - GetSrvKeyspacesRequest.encodeDelimited = function encodeDelimited(message, writer) { - return this.encode(message, writer).ldelim(); - }; + /** + * Converts this TabletInfo to JSON. + * @function toJSON + * @memberof vtctldata.MoveTablesCreateResponse.TabletInfo + * @instance + * @returns {Object.} JSON object + */ + TabletInfo.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; - /** - * Decodes a GetSrvKeyspacesRequest message from the specified reader or buffer. - * @function decode - * @memberof vtctldata.GetSrvKeyspacesRequest - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetSrvKeyspacesRequest} GetSrvKeyspacesRequest - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - GetSrvKeyspacesRequest.decode = function decode(reader, length) { - if (!(reader instanceof $Reader)) - reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetSrvKeyspacesRequest(); - while (reader.pos < end) { - let tag = reader.uint32(); - switch (tag >>> 3) { - case 1: { - message.keyspace = reader.string(); - break; - } - case 2: { - if (!(message.cells && message.cells.length)) - message.cells = []; - message.cells.push(reader.string()); - break; - } - default: - reader.skipType(tag & 7); - break; + /** + * Gets the default type url for TabletInfo + * @function getTypeUrl + * @memberof vtctldata.MoveTablesCreateResponse.TabletInfo + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + TabletInfo.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; } - } - return message; - }; + return typeUrlPrefix + "/vtctldata.MoveTablesCreateResponse.TabletInfo"; + }; - /** - * Decodes a GetSrvKeyspacesRequest message from the specified reader or buffer, length delimited. - * @function decodeDelimited - * @memberof vtctldata.GetSrvKeyspacesRequest - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetSrvKeyspacesRequest} GetSrvKeyspacesRequest - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - GetSrvKeyspacesRequest.decodeDelimited = function decodeDelimited(reader) { - if (!(reader instanceof $Reader)) - reader = new $Reader(reader); - return this.decode(reader, reader.uint32()); - }; + return TabletInfo; + })(); + + return MoveTablesCreateResponse; + })(); + + vtctldata.MoveTablesCompleteRequest = (function() { /** - * Verifies a GetSrvKeyspacesRequest message. - * @function verify - * @memberof vtctldata.GetSrvKeyspacesRequest - * @static - * @param {Object.} message Plain object to verify - * @returns {string|null} `null` if valid, otherwise the reason why it is not + * Properties of a MoveTablesCompleteRequest. + * @memberof vtctldata + * @interface IMoveTablesCompleteRequest + * @property {string|null} [workflow] MoveTablesCompleteRequest workflow + * @property {string|null} [target_keyspace] MoveTablesCompleteRequest target_keyspace + * @property {boolean|null} [keep_data] MoveTablesCompleteRequest keep_data + * @property {boolean|null} [keep_routing_rules] MoveTablesCompleteRequest keep_routing_rules + * @property {boolean|null} [rename_tables] MoveTablesCompleteRequest rename_tables + * @property {boolean|null} [dry_run] MoveTablesCompleteRequest dry_run */ - GetSrvKeyspacesRequest.verify = function verify(message) { - if (typeof message !== "object" || message === null) - return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - if (message.cells != null && message.hasOwnProperty("cells")) { - if (!Array.isArray(message.cells)) - return "cells: array expected"; - for (let i = 0; i < message.cells.length; ++i) - if (!$util.isString(message.cells[i])) - return "cells: string[] expected"; - } - return null; - }; /** - * Creates a GetSrvKeyspacesRequest message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof vtctldata.GetSrvKeyspacesRequest - * @static - * @param {Object.} object Plain object - * @returns {vtctldata.GetSrvKeyspacesRequest} GetSrvKeyspacesRequest + * Constructs a new MoveTablesCompleteRequest. + * @memberof vtctldata + * @classdesc Represents a MoveTablesCompleteRequest. + * @implements IMoveTablesCompleteRequest + * @constructor + * @param {vtctldata.IMoveTablesCompleteRequest=} [properties] Properties to set */ - GetSrvKeyspacesRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetSrvKeyspacesRequest) - return object; - let message = new $root.vtctldata.GetSrvKeyspacesRequest(); - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - if (object.cells) { - if (!Array.isArray(object.cells)) - throw TypeError(".vtctldata.GetSrvKeyspacesRequest.cells: array expected"); - message.cells = []; - for (let i = 0; i < object.cells.length; ++i) - message.cells[i] = String(object.cells[i]); - } - return message; - }; + function MoveTablesCompleteRequest(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } /** - * Creates a plain object from a GetSrvKeyspacesRequest message. Also converts values to other types if specified. - * @function toObject - * @memberof vtctldata.GetSrvKeyspacesRequest - * @static - * @param {vtctldata.GetSrvKeyspacesRequest} message GetSrvKeyspacesRequest - * @param {$protobuf.IConversionOptions} [options] Conversion options - * @returns {Object.} Plain object + * MoveTablesCompleteRequest workflow. + * @member {string} workflow + * @memberof vtctldata.MoveTablesCompleteRequest + * @instance */ - GetSrvKeyspacesRequest.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.arrays || options.defaults) - object.cells = []; - if (options.defaults) - object.keyspace = ""; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.cells && message.cells.length) { - object.cells = []; - for (let j = 0; j < message.cells.length; ++j) - object.cells[j] = message.cells[j]; - } - return object; - }; + MoveTablesCompleteRequest.prototype.workflow = ""; /** - * Converts this GetSrvKeyspacesRequest to JSON. - * @function toJSON - * @memberof vtctldata.GetSrvKeyspacesRequest + * MoveTablesCompleteRequest target_keyspace. + * @member {string} target_keyspace + * @memberof vtctldata.MoveTablesCompleteRequest * @instance - * @returns {Object.} JSON object */ - GetSrvKeyspacesRequest.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; + MoveTablesCompleteRequest.prototype.target_keyspace = ""; /** - * Gets the default type url for GetSrvKeyspacesRequest - * @function getTypeUrl - * @memberof vtctldata.GetSrvKeyspacesRequest - * @static - * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns {string} The default type url + * MoveTablesCompleteRequest keep_data. + * @member {boolean} keep_data + * @memberof vtctldata.MoveTablesCompleteRequest + * @instance */ - GetSrvKeyspacesRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { - if (typeUrlPrefix === undefined) { - typeUrlPrefix = "type.googleapis.com"; - } - return typeUrlPrefix + "/vtctldata.GetSrvKeyspacesRequest"; - }; - - return GetSrvKeyspacesRequest; - })(); - - vtctldata.GetSrvKeyspacesResponse = (function() { + MoveTablesCompleteRequest.prototype.keep_data = false; /** - * Properties of a GetSrvKeyspacesResponse. - * @memberof vtctldata - * @interface IGetSrvKeyspacesResponse - * @property {Object.|null} [srv_keyspaces] GetSrvKeyspacesResponse srv_keyspaces + * MoveTablesCompleteRequest keep_routing_rules. + * @member {boolean} keep_routing_rules + * @memberof vtctldata.MoveTablesCompleteRequest + * @instance */ + MoveTablesCompleteRequest.prototype.keep_routing_rules = false; /** - * Constructs a new GetSrvKeyspacesResponse. - * @memberof vtctldata - * @classdesc Represents a GetSrvKeyspacesResponse. - * @implements IGetSrvKeyspacesResponse - * @constructor - * @param {vtctldata.IGetSrvKeyspacesResponse=} [properties] Properties to set + * MoveTablesCompleteRequest rename_tables. + * @member {boolean} rename_tables + * @memberof vtctldata.MoveTablesCompleteRequest + * @instance */ - function GetSrvKeyspacesResponse(properties) { - this.srv_keyspaces = {}; - if (properties) - for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; - } + MoveTablesCompleteRequest.prototype.rename_tables = false; /** - * GetSrvKeyspacesResponse srv_keyspaces. - * @member {Object.} srv_keyspaces - * @memberof vtctldata.GetSrvKeyspacesResponse + * MoveTablesCompleteRequest dry_run. + * @member {boolean} dry_run + * @memberof vtctldata.MoveTablesCompleteRequest * @instance */ - GetSrvKeyspacesResponse.prototype.srv_keyspaces = $util.emptyObject; + MoveTablesCompleteRequest.prototype.dry_run = false; /** - * Creates a new GetSrvKeyspacesResponse instance using the specified properties. + * Creates a new MoveTablesCompleteRequest instance using the specified properties. * @function create - * @memberof vtctldata.GetSrvKeyspacesResponse + * @memberof vtctldata.MoveTablesCompleteRequest * @static - * @param {vtctldata.IGetSrvKeyspacesResponse=} [properties] Properties to set - * @returns {vtctldata.GetSrvKeyspacesResponse} GetSrvKeyspacesResponse instance + * @param {vtctldata.IMoveTablesCompleteRequest=} [properties] Properties to set + * @returns {vtctldata.MoveTablesCompleteRequest} MoveTablesCompleteRequest instance */ - GetSrvKeyspacesResponse.create = function create(properties) { - return new GetSrvKeyspacesResponse(properties); + MoveTablesCompleteRequest.create = function create(properties) { + return new MoveTablesCompleteRequest(properties); }; /** - * Encodes the specified GetSrvKeyspacesResponse message. Does not implicitly {@link vtctldata.GetSrvKeyspacesResponse.verify|verify} messages. + * Encodes the specified MoveTablesCompleteRequest message. Does not implicitly {@link vtctldata.MoveTablesCompleteRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.GetSrvKeyspacesResponse + * @memberof vtctldata.MoveTablesCompleteRequest * @static - * @param {vtctldata.IGetSrvKeyspacesResponse} message GetSrvKeyspacesResponse message or plain object to encode + * @param {vtctldata.IMoveTablesCompleteRequest} message MoveTablesCompleteRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetSrvKeyspacesResponse.encode = function encode(message, writer) { + MoveTablesCompleteRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.srv_keyspaces != null && Object.hasOwnProperty.call(message, "srv_keyspaces")) - for (let keys = Object.keys(message.srv_keyspaces), i = 0; i < keys.length; ++i) { - writer.uint32(/* id 1, wireType 2 =*/10).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); - $root.topodata.SrvKeyspace.encode(message.srv_keyspaces[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); - } + if (message.workflow != null && Object.hasOwnProperty.call(message, "workflow")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.workflow); + if (message.target_keyspace != null && Object.hasOwnProperty.call(message, "target_keyspace")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.target_keyspace); + if (message.keep_data != null && Object.hasOwnProperty.call(message, "keep_data")) + writer.uint32(/* id 4, wireType 0 =*/32).bool(message.keep_data); + if (message.keep_routing_rules != null && Object.hasOwnProperty.call(message, "keep_routing_rules")) + writer.uint32(/* id 5, wireType 0 =*/40).bool(message.keep_routing_rules); + if (message.rename_tables != null && Object.hasOwnProperty.call(message, "rename_tables")) + writer.uint32(/* id 6, wireType 0 =*/48).bool(message.rename_tables); + if (message.dry_run != null && Object.hasOwnProperty.call(message, "dry_run")) + writer.uint32(/* id 7, wireType 0 =*/56).bool(message.dry_run); return writer; }; /** - * Encodes the specified GetSrvKeyspacesResponse message, length delimited. Does not implicitly {@link vtctldata.GetSrvKeyspacesResponse.verify|verify} messages. + * Encodes the specified MoveTablesCompleteRequest message, length delimited. Does not implicitly {@link vtctldata.MoveTablesCompleteRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetSrvKeyspacesResponse + * @memberof vtctldata.MoveTablesCompleteRequest * @static - * @param {vtctldata.IGetSrvKeyspacesResponse} message GetSrvKeyspacesResponse message or plain object to encode + * @param {vtctldata.IMoveTablesCompleteRequest} message MoveTablesCompleteRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetSrvKeyspacesResponse.encodeDelimited = function encodeDelimited(message, writer) { + MoveTablesCompleteRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetSrvKeyspacesResponse message from the specified reader or buffer. + * Decodes a MoveTablesCompleteRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetSrvKeyspacesResponse + * @memberof vtctldata.MoveTablesCompleteRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetSrvKeyspacesResponse} GetSrvKeyspacesResponse + * @returns {vtctldata.MoveTablesCompleteRequest} MoveTablesCompleteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetSrvKeyspacesResponse.decode = function decode(reader, length) { + MoveTablesCompleteRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetSrvKeyspacesResponse(), key, value; + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.MoveTablesCompleteRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (message.srv_keyspaces === $util.emptyObject) - message.srv_keyspaces = {}; - let end2 = reader.uint32() + reader.pos; - key = ""; - value = null; - while (reader.pos < end2) { - let tag2 = reader.uint32(); - switch (tag2 >>> 3) { - case 1: - key = reader.string(); - break; - case 2: - value = $root.topodata.SrvKeyspace.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag2 & 7); - break; - } - } - message.srv_keyspaces[key] = value; + message.workflow = reader.string(); + break; + } + case 3: { + message.target_keyspace = reader.string(); + break; + } + case 4: { + message.keep_data = reader.bool(); + break; + } + case 5: { + message.keep_routing_rules = reader.bool(); + break; + } + case 6: { + message.rename_tables = reader.bool(); + break; + } + case 7: { + message.dry_run = reader.bool(); break; } default: @@ -117973,148 +139932,165 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetSrvKeyspacesResponse message from the specified reader or buffer, length delimited. + * Decodes a MoveTablesCompleteRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetSrvKeyspacesResponse + * @memberof vtctldata.MoveTablesCompleteRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetSrvKeyspacesResponse} GetSrvKeyspacesResponse + * @returns {vtctldata.MoveTablesCompleteRequest} MoveTablesCompleteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetSrvKeyspacesResponse.decodeDelimited = function decodeDelimited(reader) { + MoveTablesCompleteRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetSrvKeyspacesResponse message. + * Verifies a MoveTablesCompleteRequest message. * @function verify - * @memberof vtctldata.GetSrvKeyspacesResponse + * @memberof vtctldata.MoveTablesCompleteRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetSrvKeyspacesResponse.verify = function verify(message) { + MoveTablesCompleteRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.srv_keyspaces != null && message.hasOwnProperty("srv_keyspaces")) { - if (!$util.isObject(message.srv_keyspaces)) - return "srv_keyspaces: object expected"; - let key = Object.keys(message.srv_keyspaces); - for (let i = 0; i < key.length; ++i) { - let error = $root.topodata.SrvKeyspace.verify(message.srv_keyspaces[key[i]]); - if (error) - return "srv_keyspaces." + error; - } - } + if (message.workflow != null && message.hasOwnProperty("workflow")) + if (!$util.isString(message.workflow)) + return "workflow: string expected"; + if (message.target_keyspace != null && message.hasOwnProperty("target_keyspace")) + if (!$util.isString(message.target_keyspace)) + return "target_keyspace: string expected"; + if (message.keep_data != null && message.hasOwnProperty("keep_data")) + if (typeof message.keep_data !== "boolean") + return "keep_data: boolean expected"; + if (message.keep_routing_rules != null && message.hasOwnProperty("keep_routing_rules")) + if (typeof message.keep_routing_rules !== "boolean") + return "keep_routing_rules: boolean expected"; + if (message.rename_tables != null && message.hasOwnProperty("rename_tables")) + if (typeof message.rename_tables !== "boolean") + return "rename_tables: boolean expected"; + if (message.dry_run != null && message.hasOwnProperty("dry_run")) + if (typeof message.dry_run !== "boolean") + return "dry_run: boolean expected"; return null; }; /** - * Creates a GetSrvKeyspacesResponse message from a plain object. Also converts values to their respective internal types. + * Creates a MoveTablesCompleteRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetSrvKeyspacesResponse + * @memberof vtctldata.MoveTablesCompleteRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetSrvKeyspacesResponse} GetSrvKeyspacesResponse + * @returns {vtctldata.MoveTablesCompleteRequest} MoveTablesCompleteRequest */ - GetSrvKeyspacesResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetSrvKeyspacesResponse) + MoveTablesCompleteRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.MoveTablesCompleteRequest) return object; - let message = new $root.vtctldata.GetSrvKeyspacesResponse(); - if (object.srv_keyspaces) { - if (typeof object.srv_keyspaces !== "object") - throw TypeError(".vtctldata.GetSrvKeyspacesResponse.srv_keyspaces: object expected"); - message.srv_keyspaces = {}; - for (let keys = Object.keys(object.srv_keyspaces), i = 0; i < keys.length; ++i) { - if (typeof object.srv_keyspaces[keys[i]] !== "object") - throw TypeError(".vtctldata.GetSrvKeyspacesResponse.srv_keyspaces: object expected"); - message.srv_keyspaces[keys[i]] = $root.topodata.SrvKeyspace.fromObject(object.srv_keyspaces[keys[i]]); - } - } + let message = new $root.vtctldata.MoveTablesCompleteRequest(); + if (object.workflow != null) + message.workflow = String(object.workflow); + if (object.target_keyspace != null) + message.target_keyspace = String(object.target_keyspace); + if (object.keep_data != null) + message.keep_data = Boolean(object.keep_data); + if (object.keep_routing_rules != null) + message.keep_routing_rules = Boolean(object.keep_routing_rules); + if (object.rename_tables != null) + message.rename_tables = Boolean(object.rename_tables); + if (object.dry_run != null) + message.dry_run = Boolean(object.dry_run); return message; }; /** - * Creates a plain object from a GetSrvKeyspacesResponse message. Also converts values to other types if specified. + * Creates a plain object from a MoveTablesCompleteRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetSrvKeyspacesResponse + * @memberof vtctldata.MoveTablesCompleteRequest * @static - * @param {vtctldata.GetSrvKeyspacesResponse} message GetSrvKeyspacesResponse + * @param {vtctldata.MoveTablesCompleteRequest} message MoveTablesCompleteRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetSrvKeyspacesResponse.toObject = function toObject(message, options) { + MoveTablesCompleteRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.objects || options.defaults) - object.srv_keyspaces = {}; - let keys2; - if (message.srv_keyspaces && (keys2 = Object.keys(message.srv_keyspaces)).length) { - object.srv_keyspaces = {}; - for (let j = 0; j < keys2.length; ++j) - object.srv_keyspaces[keys2[j]] = $root.topodata.SrvKeyspace.toObject(message.srv_keyspaces[keys2[j]], options); + if (options.defaults) { + object.workflow = ""; + object.target_keyspace = ""; + object.keep_data = false; + object.keep_routing_rules = false; + object.rename_tables = false; + object.dry_run = false; } + if (message.workflow != null && message.hasOwnProperty("workflow")) + object.workflow = message.workflow; + if (message.target_keyspace != null && message.hasOwnProperty("target_keyspace")) + object.target_keyspace = message.target_keyspace; + if (message.keep_data != null && message.hasOwnProperty("keep_data")) + object.keep_data = message.keep_data; + if (message.keep_routing_rules != null && message.hasOwnProperty("keep_routing_rules")) + object.keep_routing_rules = message.keep_routing_rules; + if (message.rename_tables != null && message.hasOwnProperty("rename_tables")) + object.rename_tables = message.rename_tables; + if (message.dry_run != null && message.hasOwnProperty("dry_run")) + object.dry_run = message.dry_run; return object; }; /** - * Converts this GetSrvKeyspacesResponse to JSON. + * Converts this MoveTablesCompleteRequest to JSON. * @function toJSON - * @memberof vtctldata.GetSrvKeyspacesResponse + * @memberof vtctldata.MoveTablesCompleteRequest * @instance * @returns {Object.} JSON object */ - GetSrvKeyspacesResponse.prototype.toJSON = function toJSON() { + MoveTablesCompleteRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetSrvKeyspacesResponse + * Gets the default type url for MoveTablesCompleteRequest * @function getTypeUrl - * @memberof vtctldata.GetSrvKeyspacesResponse + * @memberof vtctldata.MoveTablesCompleteRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetSrvKeyspacesResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + MoveTablesCompleteRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetSrvKeyspacesResponse"; + return typeUrlPrefix + "/vtctldata.MoveTablesCompleteRequest"; }; - return GetSrvKeyspacesResponse; + return MoveTablesCompleteRequest; })(); - vtctldata.UpdateThrottlerConfigRequest = (function() { + vtctldata.MoveTablesCompleteResponse = (function() { /** - * Properties of an UpdateThrottlerConfigRequest. + * Properties of a MoveTablesCompleteResponse. * @memberof vtctldata - * @interface IUpdateThrottlerConfigRequest - * @property {string|null} [keyspace] UpdateThrottlerConfigRequest keyspace - * @property {boolean|null} [enable] UpdateThrottlerConfigRequest enable - * @property {boolean|null} [disable] UpdateThrottlerConfigRequest disable - * @property {number|null} [threshold] UpdateThrottlerConfigRequest threshold - * @property {string|null} [custom_query] UpdateThrottlerConfigRequest custom_query - * @property {boolean|null} [custom_query_set] UpdateThrottlerConfigRequest custom_query_set - * @property {boolean|null} [check_as_check_self] UpdateThrottlerConfigRequest check_as_check_self - * @property {boolean|null} [check_as_check_shard] UpdateThrottlerConfigRequest check_as_check_shard + * @interface IMoveTablesCompleteResponse + * @property {string|null} [summary] MoveTablesCompleteResponse summary + * @property {Array.|null} [dry_run_results] MoveTablesCompleteResponse dry_run_results */ /** - * Constructs a new UpdateThrottlerConfigRequest. + * Constructs a new MoveTablesCompleteResponse. * @memberof vtctldata - * @classdesc Represents an UpdateThrottlerConfigRequest. - * @implements IUpdateThrottlerConfigRequest + * @classdesc Represents a MoveTablesCompleteResponse. + * @implements IMoveTablesCompleteResponse * @constructor - * @param {vtctldata.IUpdateThrottlerConfigRequest=} [properties] Properties to set + * @param {vtctldata.IMoveTablesCompleteResponse=} [properties] Properties to set */ - function UpdateThrottlerConfigRequest(properties) { + function MoveTablesCompleteResponse(properties) { + this.dry_run_results = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -118122,173 +140098,92 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * UpdateThrottlerConfigRequest keyspace. - * @member {string} keyspace - * @memberof vtctldata.UpdateThrottlerConfigRequest - * @instance - */ - UpdateThrottlerConfigRequest.prototype.keyspace = ""; - - /** - * UpdateThrottlerConfigRequest enable. - * @member {boolean} enable - * @memberof vtctldata.UpdateThrottlerConfigRequest - * @instance - */ - UpdateThrottlerConfigRequest.prototype.enable = false; - - /** - * UpdateThrottlerConfigRequest disable. - * @member {boolean} disable - * @memberof vtctldata.UpdateThrottlerConfigRequest - * @instance - */ - UpdateThrottlerConfigRequest.prototype.disable = false; - - /** - * UpdateThrottlerConfigRequest threshold. - * @member {number} threshold - * @memberof vtctldata.UpdateThrottlerConfigRequest - * @instance - */ - UpdateThrottlerConfigRequest.prototype.threshold = 0; - - /** - * UpdateThrottlerConfigRequest custom_query. - * @member {string} custom_query - * @memberof vtctldata.UpdateThrottlerConfigRequest - * @instance - */ - UpdateThrottlerConfigRequest.prototype.custom_query = ""; - - /** - * UpdateThrottlerConfigRequest custom_query_set. - * @member {boolean} custom_query_set - * @memberof vtctldata.UpdateThrottlerConfigRequest - * @instance - */ - UpdateThrottlerConfigRequest.prototype.custom_query_set = false; - - /** - * UpdateThrottlerConfigRequest check_as_check_self. - * @member {boolean} check_as_check_self - * @memberof vtctldata.UpdateThrottlerConfigRequest + * MoveTablesCompleteResponse summary. + * @member {string} summary + * @memberof vtctldata.MoveTablesCompleteResponse * @instance */ - UpdateThrottlerConfigRequest.prototype.check_as_check_self = false; + MoveTablesCompleteResponse.prototype.summary = ""; /** - * UpdateThrottlerConfigRequest check_as_check_shard. - * @member {boolean} check_as_check_shard - * @memberof vtctldata.UpdateThrottlerConfigRequest + * MoveTablesCompleteResponse dry_run_results. + * @member {Array.} dry_run_results + * @memberof vtctldata.MoveTablesCompleteResponse * @instance */ - UpdateThrottlerConfigRequest.prototype.check_as_check_shard = false; + MoveTablesCompleteResponse.prototype.dry_run_results = $util.emptyArray; /** - * Creates a new UpdateThrottlerConfigRequest instance using the specified properties. + * Creates a new MoveTablesCompleteResponse instance using the specified properties. * @function create - * @memberof vtctldata.UpdateThrottlerConfigRequest + * @memberof vtctldata.MoveTablesCompleteResponse * @static - * @param {vtctldata.IUpdateThrottlerConfigRequest=} [properties] Properties to set - * @returns {vtctldata.UpdateThrottlerConfigRequest} UpdateThrottlerConfigRequest instance + * @param {vtctldata.IMoveTablesCompleteResponse=} [properties] Properties to set + * @returns {vtctldata.MoveTablesCompleteResponse} MoveTablesCompleteResponse instance */ - UpdateThrottlerConfigRequest.create = function create(properties) { - return new UpdateThrottlerConfigRequest(properties); + MoveTablesCompleteResponse.create = function create(properties) { + return new MoveTablesCompleteResponse(properties); }; /** - * Encodes the specified UpdateThrottlerConfigRequest message. Does not implicitly {@link vtctldata.UpdateThrottlerConfigRequest.verify|verify} messages. + * Encodes the specified MoveTablesCompleteResponse message. Does not implicitly {@link vtctldata.MoveTablesCompleteResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.UpdateThrottlerConfigRequest + * @memberof vtctldata.MoveTablesCompleteResponse * @static - * @param {vtctldata.IUpdateThrottlerConfigRequest} message UpdateThrottlerConfigRequest message or plain object to encode + * @param {vtctldata.IMoveTablesCompleteResponse} message MoveTablesCompleteResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - UpdateThrottlerConfigRequest.encode = function encode(message, writer) { + MoveTablesCompleteResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.enable != null && Object.hasOwnProperty.call(message, "enable")) - writer.uint32(/* id 2, wireType 0 =*/16).bool(message.enable); - if (message.disable != null && Object.hasOwnProperty.call(message, "disable")) - writer.uint32(/* id 3, wireType 0 =*/24).bool(message.disable); - if (message.threshold != null && Object.hasOwnProperty.call(message, "threshold")) - writer.uint32(/* id 4, wireType 1 =*/33).double(message.threshold); - if (message.custom_query != null && Object.hasOwnProperty.call(message, "custom_query")) - writer.uint32(/* id 5, wireType 2 =*/42).string(message.custom_query); - if (message.custom_query_set != null && Object.hasOwnProperty.call(message, "custom_query_set")) - writer.uint32(/* id 6, wireType 0 =*/48).bool(message.custom_query_set); - if (message.check_as_check_self != null && Object.hasOwnProperty.call(message, "check_as_check_self")) - writer.uint32(/* id 7, wireType 0 =*/56).bool(message.check_as_check_self); - if (message.check_as_check_shard != null && Object.hasOwnProperty.call(message, "check_as_check_shard")) - writer.uint32(/* id 8, wireType 0 =*/64).bool(message.check_as_check_shard); + if (message.summary != null && Object.hasOwnProperty.call(message, "summary")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.summary); + if (message.dry_run_results != null && message.dry_run_results.length) + for (let i = 0; i < message.dry_run_results.length; ++i) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.dry_run_results[i]); return writer; }; /** - * Encodes the specified UpdateThrottlerConfigRequest message, length delimited. Does not implicitly {@link vtctldata.UpdateThrottlerConfigRequest.verify|verify} messages. + * Encodes the specified MoveTablesCompleteResponse message, length delimited. Does not implicitly {@link vtctldata.MoveTablesCompleteResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.UpdateThrottlerConfigRequest + * @memberof vtctldata.MoveTablesCompleteResponse * @static - * @param {vtctldata.IUpdateThrottlerConfigRequest} message UpdateThrottlerConfigRequest message or plain object to encode + * @param {vtctldata.IMoveTablesCompleteResponse} message MoveTablesCompleteResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - UpdateThrottlerConfigRequest.encodeDelimited = function encodeDelimited(message, writer) { + MoveTablesCompleteResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); - }; - - /** - * Decodes an UpdateThrottlerConfigRequest message from the specified reader or buffer. - * @function decode - * @memberof vtctldata.UpdateThrottlerConfigRequest - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.UpdateThrottlerConfigRequest} UpdateThrottlerConfigRequest - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - UpdateThrottlerConfigRequest.decode = function decode(reader, length) { - if (!(reader instanceof $Reader)) - reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.UpdateThrottlerConfigRequest(); - while (reader.pos < end) { - let tag = reader.uint32(); - switch (tag >>> 3) { - case 1: { - message.keyspace = reader.string(); - break; - } - case 2: { - message.enable = reader.bool(); - break; - } - case 3: { - message.disable = reader.bool(); - break; - } - case 4: { - message.threshold = reader.double(); - break; - } - case 5: { - message.custom_query = reader.string(); - break; - } - case 6: { - message.custom_query_set = reader.bool(); - break; - } - case 7: { - message.check_as_check_self = reader.bool(); + }; + + /** + * Decodes a MoveTablesCompleteResponse message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.MoveTablesCompleteResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.MoveTablesCompleteResponse} MoveTablesCompleteResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + MoveTablesCompleteResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.MoveTablesCompleteResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.summary = reader.string(); break; } - case 8: { - message.check_as_check_shard = reader.bool(); + case 2: { + if (!(message.dry_run_results && message.dry_run_results.length)) + message.dry_run_results = []; + message.dry_run_results.push(reader.string()); break; } default: @@ -118300,178 +140195,143 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes an UpdateThrottlerConfigRequest message from the specified reader or buffer, length delimited. + * Decodes a MoveTablesCompleteResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.UpdateThrottlerConfigRequest + * @memberof vtctldata.MoveTablesCompleteResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.UpdateThrottlerConfigRequest} UpdateThrottlerConfigRequest + * @returns {vtctldata.MoveTablesCompleteResponse} MoveTablesCompleteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - UpdateThrottlerConfigRequest.decodeDelimited = function decodeDelimited(reader) { + MoveTablesCompleteResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an UpdateThrottlerConfigRequest message. + * Verifies a MoveTablesCompleteResponse message. * @function verify - * @memberof vtctldata.UpdateThrottlerConfigRequest + * @memberof vtctldata.MoveTablesCompleteResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - UpdateThrottlerConfigRequest.verify = function verify(message) { + MoveTablesCompleteResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - if (message.enable != null && message.hasOwnProperty("enable")) - if (typeof message.enable !== "boolean") - return "enable: boolean expected"; - if (message.disable != null && message.hasOwnProperty("disable")) - if (typeof message.disable !== "boolean") - return "disable: boolean expected"; - if (message.threshold != null && message.hasOwnProperty("threshold")) - if (typeof message.threshold !== "number") - return "threshold: number expected"; - if (message.custom_query != null && message.hasOwnProperty("custom_query")) - if (!$util.isString(message.custom_query)) - return "custom_query: string expected"; - if (message.custom_query_set != null && message.hasOwnProperty("custom_query_set")) - if (typeof message.custom_query_set !== "boolean") - return "custom_query_set: boolean expected"; - if (message.check_as_check_self != null && message.hasOwnProperty("check_as_check_self")) - if (typeof message.check_as_check_self !== "boolean") - return "check_as_check_self: boolean expected"; - if (message.check_as_check_shard != null && message.hasOwnProperty("check_as_check_shard")) - if (typeof message.check_as_check_shard !== "boolean") - return "check_as_check_shard: boolean expected"; + if (message.summary != null && message.hasOwnProperty("summary")) + if (!$util.isString(message.summary)) + return "summary: string expected"; + if (message.dry_run_results != null && message.hasOwnProperty("dry_run_results")) { + if (!Array.isArray(message.dry_run_results)) + return "dry_run_results: array expected"; + for (let i = 0; i < message.dry_run_results.length; ++i) + if (!$util.isString(message.dry_run_results[i])) + return "dry_run_results: string[] expected"; + } return null; }; /** - * Creates an UpdateThrottlerConfigRequest message from a plain object. Also converts values to their respective internal types. + * Creates a MoveTablesCompleteResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.UpdateThrottlerConfigRequest + * @memberof vtctldata.MoveTablesCompleteResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.UpdateThrottlerConfigRequest} UpdateThrottlerConfigRequest + * @returns {vtctldata.MoveTablesCompleteResponse} MoveTablesCompleteResponse */ - UpdateThrottlerConfigRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.UpdateThrottlerConfigRequest) + MoveTablesCompleteResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.MoveTablesCompleteResponse) return object; - let message = new $root.vtctldata.UpdateThrottlerConfigRequest(); - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - if (object.enable != null) - message.enable = Boolean(object.enable); - if (object.disable != null) - message.disable = Boolean(object.disable); - if (object.threshold != null) - message.threshold = Number(object.threshold); - if (object.custom_query != null) - message.custom_query = String(object.custom_query); - if (object.custom_query_set != null) - message.custom_query_set = Boolean(object.custom_query_set); - if (object.check_as_check_self != null) - message.check_as_check_self = Boolean(object.check_as_check_self); - if (object.check_as_check_shard != null) - message.check_as_check_shard = Boolean(object.check_as_check_shard); + let message = new $root.vtctldata.MoveTablesCompleteResponse(); + if (object.summary != null) + message.summary = String(object.summary); + if (object.dry_run_results) { + if (!Array.isArray(object.dry_run_results)) + throw TypeError(".vtctldata.MoveTablesCompleteResponse.dry_run_results: array expected"); + message.dry_run_results = []; + for (let i = 0; i < object.dry_run_results.length; ++i) + message.dry_run_results[i] = String(object.dry_run_results[i]); + } return message; }; /** - * Creates a plain object from an UpdateThrottlerConfigRequest message. Also converts values to other types if specified. + * Creates a plain object from a MoveTablesCompleteResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.UpdateThrottlerConfigRequest + * @memberof vtctldata.MoveTablesCompleteResponse * @static - * @param {vtctldata.UpdateThrottlerConfigRequest} message UpdateThrottlerConfigRequest + * @param {vtctldata.MoveTablesCompleteResponse} message MoveTablesCompleteResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - UpdateThrottlerConfigRequest.toObject = function toObject(message, options) { + MoveTablesCompleteResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) { - object.keyspace = ""; - object.enable = false; - object.disable = false; - object.threshold = 0; - object.custom_query = ""; - object.custom_query_set = false; - object.check_as_check_self = false; - object.check_as_check_shard = false; + if (options.arrays || options.defaults) + object.dry_run_results = []; + if (options.defaults) + object.summary = ""; + if (message.summary != null && message.hasOwnProperty("summary")) + object.summary = message.summary; + if (message.dry_run_results && message.dry_run_results.length) { + object.dry_run_results = []; + for (let j = 0; j < message.dry_run_results.length; ++j) + object.dry_run_results[j] = message.dry_run_results[j]; } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.enable != null && message.hasOwnProperty("enable")) - object.enable = message.enable; - if (message.disable != null && message.hasOwnProperty("disable")) - object.disable = message.disable; - if (message.threshold != null && message.hasOwnProperty("threshold")) - object.threshold = options.json && !isFinite(message.threshold) ? String(message.threshold) : message.threshold; - if (message.custom_query != null && message.hasOwnProperty("custom_query")) - object.custom_query = message.custom_query; - if (message.custom_query_set != null && message.hasOwnProperty("custom_query_set")) - object.custom_query_set = message.custom_query_set; - if (message.check_as_check_self != null && message.hasOwnProperty("check_as_check_self")) - object.check_as_check_self = message.check_as_check_self; - if (message.check_as_check_shard != null && message.hasOwnProperty("check_as_check_shard")) - object.check_as_check_shard = message.check_as_check_shard; return object; }; /** - * Converts this UpdateThrottlerConfigRequest to JSON. + * Converts this MoveTablesCompleteResponse to JSON. * @function toJSON - * @memberof vtctldata.UpdateThrottlerConfigRequest + * @memberof vtctldata.MoveTablesCompleteResponse * @instance * @returns {Object.} JSON object */ - UpdateThrottlerConfigRequest.prototype.toJSON = function toJSON() { + MoveTablesCompleteResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for UpdateThrottlerConfigRequest + * Gets the default type url for MoveTablesCompleteResponse * @function getTypeUrl - * @memberof vtctldata.UpdateThrottlerConfigRequest + * @memberof vtctldata.MoveTablesCompleteResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - UpdateThrottlerConfigRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + MoveTablesCompleteResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.UpdateThrottlerConfigRequest"; + return typeUrlPrefix + "/vtctldata.MoveTablesCompleteResponse"; }; - return UpdateThrottlerConfigRequest; + return MoveTablesCompleteResponse; })(); - vtctldata.UpdateThrottlerConfigResponse = (function() { + vtctldata.PingTabletRequest = (function() { /** - * Properties of an UpdateThrottlerConfigResponse. + * Properties of a PingTabletRequest. * @memberof vtctldata - * @interface IUpdateThrottlerConfigResponse + * @interface IPingTabletRequest + * @property {topodata.ITabletAlias|null} [tablet_alias] PingTabletRequest tablet_alias */ /** - * Constructs a new UpdateThrottlerConfigResponse. + * Constructs a new PingTabletRequest. * @memberof vtctldata - * @classdesc Represents an UpdateThrottlerConfigResponse. - * @implements IUpdateThrottlerConfigResponse + * @classdesc Represents a PingTabletRequest. + * @implements IPingTabletRequest * @constructor - * @param {vtctldata.IUpdateThrottlerConfigResponse=} [properties] Properties to set + * @param {vtctldata.IPingTabletRequest=} [properties] Properties to set */ - function UpdateThrottlerConfigResponse(properties) { + function PingTabletRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -118479,63 +140339,77 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * Creates a new UpdateThrottlerConfigResponse instance using the specified properties. + * PingTabletRequest tablet_alias. + * @member {topodata.ITabletAlias|null|undefined} tablet_alias + * @memberof vtctldata.PingTabletRequest + * @instance + */ + PingTabletRequest.prototype.tablet_alias = null; + + /** + * Creates a new PingTabletRequest instance using the specified properties. * @function create - * @memberof vtctldata.UpdateThrottlerConfigResponse + * @memberof vtctldata.PingTabletRequest * @static - * @param {vtctldata.IUpdateThrottlerConfigResponse=} [properties] Properties to set - * @returns {vtctldata.UpdateThrottlerConfigResponse} UpdateThrottlerConfigResponse instance + * @param {vtctldata.IPingTabletRequest=} [properties] Properties to set + * @returns {vtctldata.PingTabletRequest} PingTabletRequest instance */ - UpdateThrottlerConfigResponse.create = function create(properties) { - return new UpdateThrottlerConfigResponse(properties); + PingTabletRequest.create = function create(properties) { + return new PingTabletRequest(properties); }; /** - * Encodes the specified UpdateThrottlerConfigResponse message. Does not implicitly {@link vtctldata.UpdateThrottlerConfigResponse.verify|verify} messages. + * Encodes the specified PingTabletRequest message. Does not implicitly {@link vtctldata.PingTabletRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.UpdateThrottlerConfigResponse + * @memberof vtctldata.PingTabletRequest * @static - * @param {vtctldata.IUpdateThrottlerConfigResponse} message UpdateThrottlerConfigResponse message or plain object to encode + * @param {vtctldata.IPingTabletRequest} message PingTabletRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - UpdateThrottlerConfigResponse.encode = function encode(message, writer) { + PingTabletRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) + $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified UpdateThrottlerConfigResponse message, length delimited. Does not implicitly {@link vtctldata.UpdateThrottlerConfigResponse.verify|verify} messages. + * Encodes the specified PingTabletRequest message, length delimited. Does not implicitly {@link vtctldata.PingTabletRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.UpdateThrottlerConfigResponse + * @memberof vtctldata.PingTabletRequest * @static - * @param {vtctldata.IUpdateThrottlerConfigResponse} message UpdateThrottlerConfigResponse message or plain object to encode + * @param {vtctldata.IPingTabletRequest} message PingTabletRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - UpdateThrottlerConfigResponse.encodeDelimited = function encodeDelimited(message, writer) { + PingTabletRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an UpdateThrottlerConfigResponse message from the specified reader or buffer. + * Decodes a PingTabletRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.UpdateThrottlerConfigResponse + * @memberof vtctldata.PingTabletRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.UpdateThrottlerConfigResponse} UpdateThrottlerConfigResponse + * @returns {vtctldata.PingTabletRequest} PingTabletRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - UpdateThrottlerConfigResponse.decode = function decode(reader, length) { + PingTabletRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.UpdateThrottlerConfigResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.PingTabletRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + break; + } default: reader.skipType(tag & 7); break; @@ -118545,109 +140419,126 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes an UpdateThrottlerConfigResponse message from the specified reader or buffer, length delimited. + * Decodes a PingTabletRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.UpdateThrottlerConfigResponse + * @memberof vtctldata.PingTabletRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.UpdateThrottlerConfigResponse} UpdateThrottlerConfigResponse + * @returns {vtctldata.PingTabletRequest} PingTabletRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - UpdateThrottlerConfigResponse.decodeDelimited = function decodeDelimited(reader) { + PingTabletRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an UpdateThrottlerConfigResponse message. + * Verifies a PingTabletRequest message. * @function verify - * @memberof vtctldata.UpdateThrottlerConfigResponse + * @memberof vtctldata.PingTabletRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - UpdateThrottlerConfigResponse.verify = function verify(message) { + PingTabletRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { + let error = $root.topodata.TabletAlias.verify(message.tablet_alias); + if (error) + return "tablet_alias." + error; + } return null; }; /** - * Creates an UpdateThrottlerConfigResponse message from a plain object. Also converts values to their respective internal types. + * Creates a PingTabletRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.UpdateThrottlerConfigResponse + * @memberof vtctldata.PingTabletRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.UpdateThrottlerConfigResponse} UpdateThrottlerConfigResponse + * @returns {vtctldata.PingTabletRequest} PingTabletRequest */ - UpdateThrottlerConfigResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.UpdateThrottlerConfigResponse) + PingTabletRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.PingTabletRequest) return object; - return new $root.vtctldata.UpdateThrottlerConfigResponse(); + let message = new $root.vtctldata.PingTabletRequest(); + if (object.tablet_alias != null) { + if (typeof object.tablet_alias !== "object") + throw TypeError(".vtctldata.PingTabletRequest.tablet_alias: object expected"); + message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); + } + return message; }; /** - * Creates a plain object from an UpdateThrottlerConfigResponse message. Also converts values to other types if specified. + * Creates a plain object from a PingTabletRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.UpdateThrottlerConfigResponse + * @memberof vtctldata.PingTabletRequest * @static - * @param {vtctldata.UpdateThrottlerConfigResponse} message UpdateThrottlerConfigResponse + * @param {vtctldata.PingTabletRequest} message PingTabletRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - UpdateThrottlerConfigResponse.toObject = function toObject() { - return {}; + PingTabletRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) + object.tablet_alias = null; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) + object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); + return object; }; /** - * Converts this UpdateThrottlerConfigResponse to JSON. + * Converts this PingTabletRequest to JSON. * @function toJSON - * @memberof vtctldata.UpdateThrottlerConfigResponse + * @memberof vtctldata.PingTabletRequest * @instance * @returns {Object.} JSON object */ - UpdateThrottlerConfigResponse.prototype.toJSON = function toJSON() { + PingTabletRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for UpdateThrottlerConfigResponse + * Gets the default type url for PingTabletRequest * @function getTypeUrl - * @memberof vtctldata.UpdateThrottlerConfigResponse + * @memberof vtctldata.PingTabletRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - UpdateThrottlerConfigResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + PingTabletRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.UpdateThrottlerConfigResponse"; + return typeUrlPrefix + "/vtctldata.PingTabletRequest"; }; - return UpdateThrottlerConfigResponse; + return PingTabletRequest; })(); - vtctldata.GetSrvVSchemaRequest = (function() { + vtctldata.PingTabletResponse = (function() { /** - * Properties of a GetSrvVSchemaRequest. + * Properties of a PingTabletResponse. * @memberof vtctldata - * @interface IGetSrvVSchemaRequest - * @property {string|null} [cell] GetSrvVSchemaRequest cell + * @interface IPingTabletResponse */ /** - * Constructs a new GetSrvVSchemaRequest. + * Constructs a new PingTabletResponse. * @memberof vtctldata - * @classdesc Represents a GetSrvVSchemaRequest. - * @implements IGetSrvVSchemaRequest + * @classdesc Represents a PingTabletResponse. + * @implements IPingTabletResponse * @constructor - * @param {vtctldata.IGetSrvVSchemaRequest=} [properties] Properties to set + * @param {vtctldata.IPingTabletResponse=} [properties] Properties to set */ - function GetSrvVSchemaRequest(properties) { + function PingTabletResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -118655,77 +140546,63 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * GetSrvVSchemaRequest cell. - * @member {string} cell - * @memberof vtctldata.GetSrvVSchemaRequest - * @instance - */ - GetSrvVSchemaRequest.prototype.cell = ""; - - /** - * Creates a new GetSrvVSchemaRequest instance using the specified properties. + * Creates a new PingTabletResponse instance using the specified properties. * @function create - * @memberof vtctldata.GetSrvVSchemaRequest + * @memberof vtctldata.PingTabletResponse * @static - * @param {vtctldata.IGetSrvVSchemaRequest=} [properties] Properties to set - * @returns {vtctldata.GetSrvVSchemaRequest} GetSrvVSchemaRequest instance + * @param {vtctldata.IPingTabletResponse=} [properties] Properties to set + * @returns {vtctldata.PingTabletResponse} PingTabletResponse instance */ - GetSrvVSchemaRequest.create = function create(properties) { - return new GetSrvVSchemaRequest(properties); + PingTabletResponse.create = function create(properties) { + return new PingTabletResponse(properties); }; /** - * Encodes the specified GetSrvVSchemaRequest message. Does not implicitly {@link vtctldata.GetSrvVSchemaRequest.verify|verify} messages. + * Encodes the specified PingTabletResponse message. Does not implicitly {@link vtctldata.PingTabletResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.GetSrvVSchemaRequest + * @memberof vtctldata.PingTabletResponse * @static - * @param {vtctldata.IGetSrvVSchemaRequest} message GetSrvVSchemaRequest message or plain object to encode + * @param {vtctldata.IPingTabletResponse} message PingTabletResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetSrvVSchemaRequest.encode = function encode(message, writer) { + PingTabletResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.cell != null && Object.hasOwnProperty.call(message, "cell")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.cell); return writer; }; /** - * Encodes the specified GetSrvVSchemaRequest message, length delimited. Does not implicitly {@link vtctldata.GetSrvVSchemaRequest.verify|verify} messages. + * Encodes the specified PingTabletResponse message, length delimited. Does not implicitly {@link vtctldata.PingTabletResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetSrvVSchemaRequest + * @memberof vtctldata.PingTabletResponse * @static - * @param {vtctldata.IGetSrvVSchemaRequest} message GetSrvVSchemaRequest message or plain object to encode + * @param {vtctldata.IPingTabletResponse} message PingTabletResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetSrvVSchemaRequest.encodeDelimited = function encodeDelimited(message, writer) { + PingTabletResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetSrvVSchemaRequest message from the specified reader or buffer. + * Decodes a PingTabletResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetSrvVSchemaRequest + * @memberof vtctldata.PingTabletResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetSrvVSchemaRequest} GetSrvVSchemaRequest + * @returns {vtctldata.PingTabletResponse} PingTabletResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetSrvVSchemaRequest.decode = function decode(reader, length) { + PingTabletResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetSrvVSchemaRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.PingTabletResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - message.cell = reader.string(); - break; - } default: reader.skipType(tag & 7); break; @@ -118735,122 +140612,113 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetSrvVSchemaRequest message from the specified reader or buffer, length delimited. + * Decodes a PingTabletResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetSrvVSchemaRequest + * @memberof vtctldata.PingTabletResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetSrvVSchemaRequest} GetSrvVSchemaRequest + * @returns {vtctldata.PingTabletResponse} PingTabletResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetSrvVSchemaRequest.decodeDelimited = function decodeDelimited(reader) { + PingTabletResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetSrvVSchemaRequest message. + * Verifies a PingTabletResponse message. * @function verify - * @memberof vtctldata.GetSrvVSchemaRequest + * @memberof vtctldata.PingTabletResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetSrvVSchemaRequest.verify = function verify(message) { + PingTabletResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.cell != null && message.hasOwnProperty("cell")) - if (!$util.isString(message.cell)) - return "cell: string expected"; return null; }; /** - * Creates a GetSrvVSchemaRequest message from a plain object. Also converts values to their respective internal types. + * Creates a PingTabletResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetSrvVSchemaRequest + * @memberof vtctldata.PingTabletResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetSrvVSchemaRequest} GetSrvVSchemaRequest + * @returns {vtctldata.PingTabletResponse} PingTabletResponse */ - GetSrvVSchemaRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetSrvVSchemaRequest) + PingTabletResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.PingTabletResponse) return object; - let message = new $root.vtctldata.GetSrvVSchemaRequest(); - if (object.cell != null) - message.cell = String(object.cell); - return message; + return new $root.vtctldata.PingTabletResponse(); }; /** - * Creates a plain object from a GetSrvVSchemaRequest message. Also converts values to other types if specified. + * Creates a plain object from a PingTabletResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetSrvVSchemaRequest + * @memberof vtctldata.PingTabletResponse * @static - * @param {vtctldata.GetSrvVSchemaRequest} message GetSrvVSchemaRequest + * @param {vtctldata.PingTabletResponse} message PingTabletResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetSrvVSchemaRequest.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) - object.cell = ""; - if (message.cell != null && message.hasOwnProperty("cell")) - object.cell = message.cell; - return object; + PingTabletResponse.toObject = function toObject() { + return {}; }; /** - * Converts this GetSrvVSchemaRequest to JSON. + * Converts this PingTabletResponse to JSON. * @function toJSON - * @memberof vtctldata.GetSrvVSchemaRequest + * @memberof vtctldata.PingTabletResponse * @instance * @returns {Object.} JSON object */ - GetSrvVSchemaRequest.prototype.toJSON = function toJSON() { + PingTabletResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetSrvVSchemaRequest + * Gets the default type url for PingTabletResponse * @function getTypeUrl - * @memberof vtctldata.GetSrvVSchemaRequest + * @memberof vtctldata.PingTabletResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetSrvVSchemaRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + PingTabletResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetSrvVSchemaRequest"; + return typeUrlPrefix + "/vtctldata.PingTabletResponse"; }; - return GetSrvVSchemaRequest; + return PingTabletResponse; })(); - vtctldata.GetSrvVSchemaResponse = (function() { + vtctldata.PlannedReparentShardRequest = (function() { /** - * Properties of a GetSrvVSchemaResponse. + * Properties of a PlannedReparentShardRequest. * @memberof vtctldata - * @interface IGetSrvVSchemaResponse - * @property {vschema.ISrvVSchema|null} [srv_v_schema] GetSrvVSchemaResponse srv_v_schema + * @interface IPlannedReparentShardRequest + * @property {string|null} [keyspace] PlannedReparentShardRequest keyspace + * @property {string|null} [shard] PlannedReparentShardRequest shard + * @property {topodata.ITabletAlias|null} [new_primary] PlannedReparentShardRequest new_primary + * @property {topodata.ITabletAlias|null} [avoid_primary] PlannedReparentShardRequest avoid_primary + * @property {vttime.IDuration|null} [wait_replicas_timeout] PlannedReparentShardRequest wait_replicas_timeout */ /** - * Constructs a new GetSrvVSchemaResponse. + * Constructs a new PlannedReparentShardRequest. * @memberof vtctldata - * @classdesc Represents a GetSrvVSchemaResponse. - * @implements IGetSrvVSchemaResponse + * @classdesc Represents a PlannedReparentShardRequest. + * @implements IPlannedReparentShardRequest * @constructor - * @param {vtctldata.IGetSrvVSchemaResponse=} [properties] Properties to set + * @param {vtctldata.IPlannedReparentShardRequest=} [properties] Properties to set */ - function GetSrvVSchemaResponse(properties) { + function PlannedReparentShardRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -118858,75 +140726,131 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * GetSrvVSchemaResponse srv_v_schema. - * @member {vschema.ISrvVSchema|null|undefined} srv_v_schema - * @memberof vtctldata.GetSrvVSchemaResponse + * PlannedReparentShardRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.PlannedReparentShardRequest * @instance */ - GetSrvVSchemaResponse.prototype.srv_v_schema = null; + PlannedReparentShardRequest.prototype.keyspace = ""; /** - * Creates a new GetSrvVSchemaResponse instance using the specified properties. + * PlannedReparentShardRequest shard. + * @member {string} shard + * @memberof vtctldata.PlannedReparentShardRequest + * @instance + */ + PlannedReparentShardRequest.prototype.shard = ""; + + /** + * PlannedReparentShardRequest new_primary. + * @member {topodata.ITabletAlias|null|undefined} new_primary + * @memberof vtctldata.PlannedReparentShardRequest + * @instance + */ + PlannedReparentShardRequest.prototype.new_primary = null; + + /** + * PlannedReparentShardRequest avoid_primary. + * @member {topodata.ITabletAlias|null|undefined} avoid_primary + * @memberof vtctldata.PlannedReparentShardRequest + * @instance + */ + PlannedReparentShardRequest.prototype.avoid_primary = null; + + /** + * PlannedReparentShardRequest wait_replicas_timeout. + * @member {vttime.IDuration|null|undefined} wait_replicas_timeout + * @memberof vtctldata.PlannedReparentShardRequest + * @instance + */ + PlannedReparentShardRequest.prototype.wait_replicas_timeout = null; + + /** + * Creates a new PlannedReparentShardRequest instance using the specified properties. * @function create - * @memberof vtctldata.GetSrvVSchemaResponse + * @memberof vtctldata.PlannedReparentShardRequest * @static - * @param {vtctldata.IGetSrvVSchemaResponse=} [properties] Properties to set - * @returns {vtctldata.GetSrvVSchemaResponse} GetSrvVSchemaResponse instance + * @param {vtctldata.IPlannedReparentShardRequest=} [properties] Properties to set + * @returns {vtctldata.PlannedReparentShardRequest} PlannedReparentShardRequest instance */ - GetSrvVSchemaResponse.create = function create(properties) { - return new GetSrvVSchemaResponse(properties); + PlannedReparentShardRequest.create = function create(properties) { + return new PlannedReparentShardRequest(properties); }; /** - * Encodes the specified GetSrvVSchemaResponse message. Does not implicitly {@link vtctldata.GetSrvVSchemaResponse.verify|verify} messages. + * Encodes the specified PlannedReparentShardRequest message. Does not implicitly {@link vtctldata.PlannedReparentShardRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.GetSrvVSchemaResponse + * @memberof vtctldata.PlannedReparentShardRequest * @static - * @param {vtctldata.IGetSrvVSchemaResponse} message GetSrvVSchemaResponse message or plain object to encode + * @param {vtctldata.IPlannedReparentShardRequest} message PlannedReparentShardRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetSrvVSchemaResponse.encode = function encode(message, writer) { + PlannedReparentShardRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.srv_v_schema != null && Object.hasOwnProperty.call(message, "srv_v_schema")) - $root.vschema.SrvVSchema.encode(message.srv_v_schema, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); + if (message.new_primary != null && Object.hasOwnProperty.call(message, "new_primary")) + $root.topodata.TabletAlias.encode(message.new_primary, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.avoid_primary != null && Object.hasOwnProperty.call(message, "avoid_primary")) + $root.topodata.TabletAlias.encode(message.avoid_primary, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.wait_replicas_timeout != null && Object.hasOwnProperty.call(message, "wait_replicas_timeout")) + $root.vttime.Duration.encode(message.wait_replicas_timeout, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); return writer; }; /** - * Encodes the specified GetSrvVSchemaResponse message, length delimited. Does not implicitly {@link vtctldata.GetSrvVSchemaResponse.verify|verify} messages. + * Encodes the specified PlannedReparentShardRequest message, length delimited. Does not implicitly {@link vtctldata.PlannedReparentShardRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetSrvVSchemaResponse + * @memberof vtctldata.PlannedReparentShardRequest * @static - * @param {vtctldata.IGetSrvVSchemaResponse} message GetSrvVSchemaResponse message or plain object to encode + * @param {vtctldata.IPlannedReparentShardRequest} message PlannedReparentShardRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetSrvVSchemaResponse.encodeDelimited = function encodeDelimited(message, writer) { + PlannedReparentShardRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetSrvVSchemaResponse message from the specified reader or buffer. + * Decodes a PlannedReparentShardRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetSrvVSchemaResponse + * @memberof vtctldata.PlannedReparentShardRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetSrvVSchemaResponse} GetSrvVSchemaResponse + * @returns {vtctldata.PlannedReparentShardRequest} PlannedReparentShardRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetSrvVSchemaResponse.decode = function decode(reader, length) { + PlannedReparentShardRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetSrvVSchemaResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.PlannedReparentShardRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.srv_v_schema = $root.vschema.SrvVSchema.decode(reader, reader.uint32()); + message.keyspace = reader.string(); + break; + } + case 2: { + message.shard = reader.string(); + break; + } + case 3: { + message.new_primary = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + break; + } + case 4: { + message.avoid_primary = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + break; + } + case 5: { + message.wait_replicas_timeout = $root.vttime.Duration.decode(reader, reader.uint32()); break; } default: @@ -118938,128 +140862,174 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetSrvVSchemaResponse message from the specified reader or buffer, length delimited. + * Decodes a PlannedReparentShardRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetSrvVSchemaResponse + * @memberof vtctldata.PlannedReparentShardRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetSrvVSchemaResponse} GetSrvVSchemaResponse + * @returns {vtctldata.PlannedReparentShardRequest} PlannedReparentShardRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetSrvVSchemaResponse.decodeDelimited = function decodeDelimited(reader) { + PlannedReparentShardRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetSrvVSchemaResponse message. + * Verifies a PlannedReparentShardRequest message. * @function verify - * @memberof vtctldata.GetSrvVSchemaResponse + * @memberof vtctldata.PlannedReparentShardRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetSrvVSchemaResponse.verify = function verify(message) { + PlannedReparentShardRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.srv_v_schema != null && message.hasOwnProperty("srv_v_schema")) { - let error = $root.vschema.SrvVSchema.verify(message.srv_v_schema); + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.shard != null && message.hasOwnProperty("shard")) + if (!$util.isString(message.shard)) + return "shard: string expected"; + if (message.new_primary != null && message.hasOwnProperty("new_primary")) { + let error = $root.topodata.TabletAlias.verify(message.new_primary); if (error) - return "srv_v_schema." + error; + return "new_primary." + error; + } + if (message.avoid_primary != null && message.hasOwnProperty("avoid_primary")) { + let error = $root.topodata.TabletAlias.verify(message.avoid_primary); + if (error) + return "avoid_primary." + error; + } + if (message.wait_replicas_timeout != null && message.hasOwnProperty("wait_replicas_timeout")) { + let error = $root.vttime.Duration.verify(message.wait_replicas_timeout); + if (error) + return "wait_replicas_timeout." + error; } return null; }; /** - * Creates a GetSrvVSchemaResponse message from a plain object. Also converts values to their respective internal types. + * Creates a PlannedReparentShardRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetSrvVSchemaResponse + * @memberof vtctldata.PlannedReparentShardRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetSrvVSchemaResponse} GetSrvVSchemaResponse + * @returns {vtctldata.PlannedReparentShardRequest} PlannedReparentShardRequest */ - GetSrvVSchemaResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetSrvVSchemaResponse) + PlannedReparentShardRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.PlannedReparentShardRequest) return object; - let message = new $root.vtctldata.GetSrvVSchemaResponse(); - if (object.srv_v_schema != null) { - if (typeof object.srv_v_schema !== "object") - throw TypeError(".vtctldata.GetSrvVSchemaResponse.srv_v_schema: object expected"); - message.srv_v_schema = $root.vschema.SrvVSchema.fromObject(object.srv_v_schema); + let message = new $root.vtctldata.PlannedReparentShardRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.shard != null) + message.shard = String(object.shard); + if (object.new_primary != null) { + if (typeof object.new_primary !== "object") + throw TypeError(".vtctldata.PlannedReparentShardRequest.new_primary: object expected"); + message.new_primary = $root.topodata.TabletAlias.fromObject(object.new_primary); + } + if (object.avoid_primary != null) { + if (typeof object.avoid_primary !== "object") + throw TypeError(".vtctldata.PlannedReparentShardRequest.avoid_primary: object expected"); + message.avoid_primary = $root.topodata.TabletAlias.fromObject(object.avoid_primary); + } + if (object.wait_replicas_timeout != null) { + if (typeof object.wait_replicas_timeout !== "object") + throw TypeError(".vtctldata.PlannedReparentShardRequest.wait_replicas_timeout: object expected"); + message.wait_replicas_timeout = $root.vttime.Duration.fromObject(object.wait_replicas_timeout); } return message; }; /** - * Creates a plain object from a GetSrvVSchemaResponse message. Also converts values to other types if specified. + * Creates a plain object from a PlannedReparentShardRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetSrvVSchemaResponse + * @memberof vtctldata.PlannedReparentShardRequest * @static - * @param {vtctldata.GetSrvVSchemaResponse} message GetSrvVSchemaResponse + * @param {vtctldata.PlannedReparentShardRequest} message PlannedReparentShardRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetSrvVSchemaResponse.toObject = function toObject(message, options) { + PlannedReparentShardRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) - object.srv_v_schema = null; - if (message.srv_v_schema != null && message.hasOwnProperty("srv_v_schema")) - object.srv_v_schema = $root.vschema.SrvVSchema.toObject(message.srv_v_schema, options); + if (options.defaults) { + object.keyspace = ""; + object.shard = ""; + object.new_primary = null; + object.avoid_primary = null; + object.wait_replicas_timeout = null; + } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = message.shard; + if (message.new_primary != null && message.hasOwnProperty("new_primary")) + object.new_primary = $root.topodata.TabletAlias.toObject(message.new_primary, options); + if (message.avoid_primary != null && message.hasOwnProperty("avoid_primary")) + object.avoid_primary = $root.topodata.TabletAlias.toObject(message.avoid_primary, options); + if (message.wait_replicas_timeout != null && message.hasOwnProperty("wait_replicas_timeout")) + object.wait_replicas_timeout = $root.vttime.Duration.toObject(message.wait_replicas_timeout, options); return object; }; /** - * Converts this GetSrvVSchemaResponse to JSON. + * Converts this PlannedReparentShardRequest to JSON. * @function toJSON - * @memberof vtctldata.GetSrvVSchemaResponse + * @memberof vtctldata.PlannedReparentShardRequest * @instance * @returns {Object.} JSON object */ - GetSrvVSchemaResponse.prototype.toJSON = function toJSON() { + PlannedReparentShardRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetSrvVSchemaResponse + * Gets the default type url for PlannedReparentShardRequest * @function getTypeUrl - * @memberof vtctldata.GetSrvVSchemaResponse + * @memberof vtctldata.PlannedReparentShardRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetSrvVSchemaResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + PlannedReparentShardRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetSrvVSchemaResponse"; + return typeUrlPrefix + "/vtctldata.PlannedReparentShardRequest"; }; - return GetSrvVSchemaResponse; + return PlannedReparentShardRequest; })(); - vtctldata.GetSrvVSchemasRequest = (function() { + vtctldata.PlannedReparentShardResponse = (function() { /** - * Properties of a GetSrvVSchemasRequest. + * Properties of a PlannedReparentShardResponse. * @memberof vtctldata - * @interface IGetSrvVSchemasRequest - * @property {Array.|null} [cells] GetSrvVSchemasRequest cells + * @interface IPlannedReparentShardResponse + * @property {string|null} [keyspace] PlannedReparentShardResponse keyspace + * @property {string|null} [shard] PlannedReparentShardResponse shard + * @property {topodata.ITabletAlias|null} [promoted_primary] PlannedReparentShardResponse promoted_primary + * @property {Array.|null} [events] PlannedReparentShardResponse events */ /** - * Constructs a new GetSrvVSchemasRequest. + * Constructs a new PlannedReparentShardResponse. * @memberof vtctldata - * @classdesc Represents a GetSrvVSchemasRequest. - * @implements IGetSrvVSchemasRequest + * @classdesc Represents a PlannedReparentShardResponse. + * @implements IPlannedReparentShardResponse * @constructor - * @param {vtctldata.IGetSrvVSchemasRequest=} [properties] Properties to set + * @param {vtctldata.IPlannedReparentShardResponse=} [properties] Properties to set */ - function GetSrvVSchemasRequest(properties) { - this.cells = []; + function PlannedReparentShardResponse(properties) { + this.events = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -119067,78 +141037,120 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * GetSrvVSchemasRequest cells. - * @member {Array.} cells - * @memberof vtctldata.GetSrvVSchemasRequest + * PlannedReparentShardResponse keyspace. + * @member {string} keyspace + * @memberof vtctldata.PlannedReparentShardResponse * @instance */ - GetSrvVSchemasRequest.prototype.cells = $util.emptyArray; + PlannedReparentShardResponse.prototype.keyspace = ""; /** - * Creates a new GetSrvVSchemasRequest instance using the specified properties. + * PlannedReparentShardResponse shard. + * @member {string} shard + * @memberof vtctldata.PlannedReparentShardResponse + * @instance + */ + PlannedReparentShardResponse.prototype.shard = ""; + + /** + * PlannedReparentShardResponse promoted_primary. + * @member {topodata.ITabletAlias|null|undefined} promoted_primary + * @memberof vtctldata.PlannedReparentShardResponse + * @instance + */ + PlannedReparentShardResponse.prototype.promoted_primary = null; + + /** + * PlannedReparentShardResponse events. + * @member {Array.} events + * @memberof vtctldata.PlannedReparentShardResponse + * @instance + */ + PlannedReparentShardResponse.prototype.events = $util.emptyArray; + + /** + * Creates a new PlannedReparentShardResponse instance using the specified properties. * @function create - * @memberof vtctldata.GetSrvVSchemasRequest + * @memberof vtctldata.PlannedReparentShardResponse * @static - * @param {vtctldata.IGetSrvVSchemasRequest=} [properties] Properties to set - * @returns {vtctldata.GetSrvVSchemasRequest} GetSrvVSchemasRequest instance + * @param {vtctldata.IPlannedReparentShardResponse=} [properties] Properties to set + * @returns {vtctldata.PlannedReparentShardResponse} PlannedReparentShardResponse instance */ - GetSrvVSchemasRequest.create = function create(properties) { - return new GetSrvVSchemasRequest(properties); + PlannedReparentShardResponse.create = function create(properties) { + return new PlannedReparentShardResponse(properties); }; /** - * Encodes the specified GetSrvVSchemasRequest message. Does not implicitly {@link vtctldata.GetSrvVSchemasRequest.verify|verify} messages. + * Encodes the specified PlannedReparentShardResponse message. Does not implicitly {@link vtctldata.PlannedReparentShardResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.GetSrvVSchemasRequest + * @memberof vtctldata.PlannedReparentShardResponse * @static - * @param {vtctldata.IGetSrvVSchemasRequest} message GetSrvVSchemasRequest message or plain object to encode + * @param {vtctldata.IPlannedReparentShardResponse} message PlannedReparentShardResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetSrvVSchemasRequest.encode = function encode(message, writer) { + PlannedReparentShardResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.cells != null && message.cells.length) - for (let i = 0; i < message.cells.length; ++i) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.cells[i]); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); + if (message.promoted_primary != null && Object.hasOwnProperty.call(message, "promoted_primary")) + $root.topodata.TabletAlias.encode(message.promoted_primary, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.events != null && message.events.length) + for (let i = 0; i < message.events.length; ++i) + $root.logutil.Event.encode(message.events[i], writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); return writer; }; /** - * Encodes the specified GetSrvVSchemasRequest message, length delimited. Does not implicitly {@link vtctldata.GetSrvVSchemasRequest.verify|verify} messages. + * Encodes the specified PlannedReparentShardResponse message, length delimited. Does not implicitly {@link vtctldata.PlannedReparentShardResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetSrvVSchemasRequest + * @memberof vtctldata.PlannedReparentShardResponse * @static - * @param {vtctldata.IGetSrvVSchemasRequest} message GetSrvVSchemasRequest message or plain object to encode + * @param {vtctldata.IPlannedReparentShardResponse} message PlannedReparentShardResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetSrvVSchemasRequest.encodeDelimited = function encodeDelimited(message, writer) { + PlannedReparentShardResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetSrvVSchemasRequest message from the specified reader or buffer. + * Decodes a PlannedReparentShardResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetSrvVSchemasRequest + * @memberof vtctldata.PlannedReparentShardResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetSrvVSchemasRequest} GetSrvVSchemasRequest + * @returns {vtctldata.PlannedReparentShardResponse} PlannedReparentShardResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetSrvVSchemasRequest.decode = function decode(reader, length) { + PlannedReparentShardResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetSrvVSchemasRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.PlannedReparentShardResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + message.keyspace = reader.string(); + break; + } case 2: { - if (!(message.cells && message.cells.length)) - message.cells = []; - message.cells.push(reader.string()); + message.shard = reader.string(); + break; + } + case 3: { + message.promoted_primary = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + break; + } + case 4: { + if (!(message.events && message.events.length)) + message.events = []; + message.events.push($root.logutil.Event.decode(reader, reader.uint32())); break; } default: @@ -119150,135 +141162,173 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetSrvVSchemasRequest message from the specified reader or buffer, length delimited. + * Decodes a PlannedReparentShardResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetSrvVSchemasRequest + * @memberof vtctldata.PlannedReparentShardResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetSrvVSchemasRequest} GetSrvVSchemasRequest + * @returns {vtctldata.PlannedReparentShardResponse} PlannedReparentShardResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetSrvVSchemasRequest.decodeDelimited = function decodeDelimited(reader) { + PlannedReparentShardResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetSrvVSchemasRequest message. + * Verifies a PlannedReparentShardResponse message. * @function verify - * @memberof vtctldata.GetSrvVSchemasRequest + * @memberof vtctldata.PlannedReparentShardResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetSrvVSchemasRequest.verify = function verify(message) { + PlannedReparentShardResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.cells != null && message.hasOwnProperty("cells")) { - if (!Array.isArray(message.cells)) - return "cells: array expected"; - for (let i = 0; i < message.cells.length; ++i) - if (!$util.isString(message.cells[i])) - return "cells: string[] expected"; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.shard != null && message.hasOwnProperty("shard")) + if (!$util.isString(message.shard)) + return "shard: string expected"; + if (message.promoted_primary != null && message.hasOwnProperty("promoted_primary")) { + let error = $root.topodata.TabletAlias.verify(message.promoted_primary); + if (error) + return "promoted_primary." + error; + } + if (message.events != null && message.hasOwnProperty("events")) { + if (!Array.isArray(message.events)) + return "events: array expected"; + for (let i = 0; i < message.events.length; ++i) { + let error = $root.logutil.Event.verify(message.events[i]); + if (error) + return "events." + error; + } } return null; }; /** - * Creates a GetSrvVSchemasRequest message from a plain object. Also converts values to their respective internal types. + * Creates a PlannedReparentShardResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetSrvVSchemasRequest + * @memberof vtctldata.PlannedReparentShardResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetSrvVSchemasRequest} GetSrvVSchemasRequest + * @returns {vtctldata.PlannedReparentShardResponse} PlannedReparentShardResponse */ - GetSrvVSchemasRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetSrvVSchemasRequest) + PlannedReparentShardResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.PlannedReparentShardResponse) return object; - let message = new $root.vtctldata.GetSrvVSchemasRequest(); - if (object.cells) { - if (!Array.isArray(object.cells)) - throw TypeError(".vtctldata.GetSrvVSchemasRequest.cells: array expected"); - message.cells = []; - for (let i = 0; i < object.cells.length; ++i) - message.cells[i] = String(object.cells[i]); + let message = new $root.vtctldata.PlannedReparentShardResponse(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.shard != null) + message.shard = String(object.shard); + if (object.promoted_primary != null) { + if (typeof object.promoted_primary !== "object") + throw TypeError(".vtctldata.PlannedReparentShardResponse.promoted_primary: object expected"); + message.promoted_primary = $root.topodata.TabletAlias.fromObject(object.promoted_primary); + } + if (object.events) { + if (!Array.isArray(object.events)) + throw TypeError(".vtctldata.PlannedReparentShardResponse.events: array expected"); + message.events = []; + for (let i = 0; i < object.events.length; ++i) { + if (typeof object.events[i] !== "object") + throw TypeError(".vtctldata.PlannedReparentShardResponse.events: object expected"); + message.events[i] = $root.logutil.Event.fromObject(object.events[i]); + } } return message; }; /** - * Creates a plain object from a GetSrvVSchemasRequest message. Also converts values to other types if specified. + * Creates a plain object from a PlannedReparentShardResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetSrvVSchemasRequest + * @memberof vtctldata.PlannedReparentShardResponse * @static - * @param {vtctldata.GetSrvVSchemasRequest} message GetSrvVSchemasRequest + * @param {vtctldata.PlannedReparentShardResponse} message PlannedReparentShardResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetSrvVSchemasRequest.toObject = function toObject(message, options) { + PlannedReparentShardResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.arrays || options.defaults) - object.cells = []; - if (message.cells && message.cells.length) { - object.cells = []; - for (let j = 0; j < message.cells.length; ++j) - object.cells[j] = message.cells[j]; + object.events = []; + if (options.defaults) { + object.keyspace = ""; + object.shard = ""; + object.promoted_primary = null; + } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = message.shard; + if (message.promoted_primary != null && message.hasOwnProperty("promoted_primary")) + object.promoted_primary = $root.topodata.TabletAlias.toObject(message.promoted_primary, options); + if (message.events && message.events.length) { + object.events = []; + for (let j = 0; j < message.events.length; ++j) + object.events[j] = $root.logutil.Event.toObject(message.events[j], options); } return object; }; /** - * Converts this GetSrvVSchemasRequest to JSON. + * Converts this PlannedReparentShardResponse to JSON. * @function toJSON - * @memberof vtctldata.GetSrvVSchemasRequest + * @memberof vtctldata.PlannedReparentShardResponse * @instance * @returns {Object.} JSON object */ - GetSrvVSchemasRequest.prototype.toJSON = function toJSON() { + PlannedReparentShardResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetSrvVSchemasRequest + * Gets the default type url for PlannedReparentShardResponse * @function getTypeUrl - * @memberof vtctldata.GetSrvVSchemasRequest + * @memberof vtctldata.PlannedReparentShardResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetSrvVSchemasRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + PlannedReparentShardResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetSrvVSchemasRequest"; + return typeUrlPrefix + "/vtctldata.PlannedReparentShardResponse"; }; - return GetSrvVSchemasRequest; + return PlannedReparentShardResponse; })(); - vtctldata.GetSrvVSchemasResponse = (function() { + vtctldata.RebuildKeyspaceGraphRequest = (function() { /** - * Properties of a GetSrvVSchemasResponse. + * Properties of a RebuildKeyspaceGraphRequest. * @memberof vtctldata - * @interface IGetSrvVSchemasResponse - * @property {Object.|null} [srv_v_schemas] GetSrvVSchemasResponse srv_v_schemas + * @interface IRebuildKeyspaceGraphRequest + * @property {string|null} [keyspace] RebuildKeyspaceGraphRequest keyspace + * @property {Array.|null} [cells] RebuildKeyspaceGraphRequest cells + * @property {boolean|null} [allow_partial] RebuildKeyspaceGraphRequest allow_partial */ /** - * Constructs a new GetSrvVSchemasResponse. + * Constructs a new RebuildKeyspaceGraphRequest. * @memberof vtctldata - * @classdesc Represents a GetSrvVSchemasResponse. - * @implements IGetSrvVSchemasResponse + * @classdesc Represents a RebuildKeyspaceGraphRequest. + * @implements IRebuildKeyspaceGraphRequest * @constructor - * @param {vtctldata.IGetSrvVSchemasResponse=} [properties] Properties to set + * @param {vtctldata.IRebuildKeyspaceGraphRequest=} [properties] Properties to set */ - function GetSrvVSchemasResponse(properties) { - this.srv_v_schemas = {}; + function RebuildKeyspaceGraphRequest(properties) { + this.cells = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -119286,97 +141336,106 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * GetSrvVSchemasResponse srv_v_schemas. - * @member {Object.} srv_v_schemas - * @memberof vtctldata.GetSrvVSchemasResponse + * RebuildKeyspaceGraphRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.RebuildKeyspaceGraphRequest * @instance */ - GetSrvVSchemasResponse.prototype.srv_v_schemas = $util.emptyObject; + RebuildKeyspaceGraphRequest.prototype.keyspace = ""; /** - * Creates a new GetSrvVSchemasResponse instance using the specified properties. + * RebuildKeyspaceGraphRequest cells. + * @member {Array.} cells + * @memberof vtctldata.RebuildKeyspaceGraphRequest + * @instance + */ + RebuildKeyspaceGraphRequest.prototype.cells = $util.emptyArray; + + /** + * RebuildKeyspaceGraphRequest allow_partial. + * @member {boolean} allow_partial + * @memberof vtctldata.RebuildKeyspaceGraphRequest + * @instance + */ + RebuildKeyspaceGraphRequest.prototype.allow_partial = false; + + /** + * Creates a new RebuildKeyspaceGraphRequest instance using the specified properties. * @function create - * @memberof vtctldata.GetSrvVSchemasResponse + * @memberof vtctldata.RebuildKeyspaceGraphRequest * @static - * @param {vtctldata.IGetSrvVSchemasResponse=} [properties] Properties to set - * @returns {vtctldata.GetSrvVSchemasResponse} GetSrvVSchemasResponse instance + * @param {vtctldata.IRebuildKeyspaceGraphRequest=} [properties] Properties to set + * @returns {vtctldata.RebuildKeyspaceGraphRequest} RebuildKeyspaceGraphRequest instance */ - GetSrvVSchemasResponse.create = function create(properties) { - return new GetSrvVSchemasResponse(properties); + RebuildKeyspaceGraphRequest.create = function create(properties) { + return new RebuildKeyspaceGraphRequest(properties); }; /** - * Encodes the specified GetSrvVSchemasResponse message. Does not implicitly {@link vtctldata.GetSrvVSchemasResponse.verify|verify} messages. + * Encodes the specified RebuildKeyspaceGraphRequest message. Does not implicitly {@link vtctldata.RebuildKeyspaceGraphRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.GetSrvVSchemasResponse + * @memberof vtctldata.RebuildKeyspaceGraphRequest * @static - * @param {vtctldata.IGetSrvVSchemasResponse} message GetSrvVSchemasResponse message or plain object to encode + * @param {vtctldata.IRebuildKeyspaceGraphRequest} message RebuildKeyspaceGraphRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetSrvVSchemasResponse.encode = function encode(message, writer) { + RebuildKeyspaceGraphRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.srv_v_schemas != null && Object.hasOwnProperty.call(message, "srv_v_schemas")) - for (let keys = Object.keys(message.srv_v_schemas), i = 0; i < keys.length; ++i) { - writer.uint32(/* id 1, wireType 2 =*/10).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); - $root.vschema.SrvVSchema.encode(message.srv_v_schemas[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); - } + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.cells != null && message.cells.length) + for (let i = 0; i < message.cells.length; ++i) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.cells[i]); + if (message.allow_partial != null && Object.hasOwnProperty.call(message, "allow_partial")) + writer.uint32(/* id 3, wireType 0 =*/24).bool(message.allow_partial); return writer; }; /** - * Encodes the specified GetSrvVSchemasResponse message, length delimited. Does not implicitly {@link vtctldata.GetSrvVSchemasResponse.verify|verify} messages. + * Encodes the specified RebuildKeyspaceGraphRequest message, length delimited. Does not implicitly {@link vtctldata.RebuildKeyspaceGraphRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetSrvVSchemasResponse + * @memberof vtctldata.RebuildKeyspaceGraphRequest * @static - * @param {vtctldata.IGetSrvVSchemasResponse} message GetSrvVSchemasResponse message or plain object to encode + * @param {vtctldata.IRebuildKeyspaceGraphRequest} message RebuildKeyspaceGraphRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetSrvVSchemasResponse.encodeDelimited = function encodeDelimited(message, writer) { + RebuildKeyspaceGraphRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetSrvVSchemasResponse message from the specified reader or buffer. + * Decodes a RebuildKeyspaceGraphRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetSrvVSchemasResponse + * @memberof vtctldata.RebuildKeyspaceGraphRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetSrvVSchemasResponse} GetSrvVSchemasResponse + * @returns {vtctldata.RebuildKeyspaceGraphRequest} RebuildKeyspaceGraphRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetSrvVSchemasResponse.decode = function decode(reader, length) { + RebuildKeyspaceGraphRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetSrvVSchemasResponse(), key, value; + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RebuildKeyspaceGraphRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (message.srv_v_schemas === $util.emptyObject) - message.srv_v_schemas = {}; - let end2 = reader.uint32() + reader.pos; - key = ""; - value = null; - while (reader.pos < end2) { - let tag2 = reader.uint32(); - switch (tag2 >>> 3) { - case 1: - key = reader.string(); - break; - case 2: - value = $root.vschema.SrvVSchema.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag2 & 7); - break; - } - } - message.srv_v_schemas[key] = value; + message.keyspace = reader.string(); + break; + } + case 2: { + if (!(message.cells && message.cells.length)) + message.cells = []; + message.cells.push(reader.string()); + break; + } + case 3: { + message.allow_partial = reader.bool(); break; } default: @@ -119388,141 +141447,151 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetSrvVSchemasResponse message from the specified reader or buffer, length delimited. + * Decodes a RebuildKeyspaceGraphRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetSrvVSchemasResponse + * @memberof vtctldata.RebuildKeyspaceGraphRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetSrvVSchemasResponse} GetSrvVSchemasResponse + * @returns {vtctldata.RebuildKeyspaceGraphRequest} RebuildKeyspaceGraphRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetSrvVSchemasResponse.decodeDelimited = function decodeDelimited(reader) { + RebuildKeyspaceGraphRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetSrvVSchemasResponse message. + * Verifies a RebuildKeyspaceGraphRequest message. * @function verify - * @memberof vtctldata.GetSrvVSchemasResponse + * @memberof vtctldata.RebuildKeyspaceGraphRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetSrvVSchemasResponse.verify = function verify(message) { + RebuildKeyspaceGraphRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.srv_v_schemas != null && message.hasOwnProperty("srv_v_schemas")) { - if (!$util.isObject(message.srv_v_schemas)) - return "srv_v_schemas: object expected"; - let key = Object.keys(message.srv_v_schemas); - for (let i = 0; i < key.length; ++i) { - let error = $root.vschema.SrvVSchema.verify(message.srv_v_schemas[key[i]]); - if (error) - return "srv_v_schemas." + error; - } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.cells != null && message.hasOwnProperty("cells")) { + if (!Array.isArray(message.cells)) + return "cells: array expected"; + for (let i = 0; i < message.cells.length; ++i) + if (!$util.isString(message.cells[i])) + return "cells: string[] expected"; } + if (message.allow_partial != null && message.hasOwnProperty("allow_partial")) + if (typeof message.allow_partial !== "boolean") + return "allow_partial: boolean expected"; return null; }; /** - * Creates a GetSrvVSchemasResponse message from a plain object. Also converts values to their respective internal types. + * Creates a RebuildKeyspaceGraphRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetSrvVSchemasResponse + * @memberof vtctldata.RebuildKeyspaceGraphRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetSrvVSchemasResponse} GetSrvVSchemasResponse + * @returns {vtctldata.RebuildKeyspaceGraphRequest} RebuildKeyspaceGraphRequest */ - GetSrvVSchemasResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetSrvVSchemasResponse) + RebuildKeyspaceGraphRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.RebuildKeyspaceGraphRequest) return object; - let message = new $root.vtctldata.GetSrvVSchemasResponse(); - if (object.srv_v_schemas) { - if (typeof object.srv_v_schemas !== "object") - throw TypeError(".vtctldata.GetSrvVSchemasResponse.srv_v_schemas: object expected"); - message.srv_v_schemas = {}; - for (let keys = Object.keys(object.srv_v_schemas), i = 0; i < keys.length; ++i) { - if (typeof object.srv_v_schemas[keys[i]] !== "object") - throw TypeError(".vtctldata.GetSrvVSchemasResponse.srv_v_schemas: object expected"); - message.srv_v_schemas[keys[i]] = $root.vschema.SrvVSchema.fromObject(object.srv_v_schemas[keys[i]]); - } + let message = new $root.vtctldata.RebuildKeyspaceGraphRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.cells) { + if (!Array.isArray(object.cells)) + throw TypeError(".vtctldata.RebuildKeyspaceGraphRequest.cells: array expected"); + message.cells = []; + for (let i = 0; i < object.cells.length; ++i) + message.cells[i] = String(object.cells[i]); } + if (object.allow_partial != null) + message.allow_partial = Boolean(object.allow_partial); return message; }; /** - * Creates a plain object from a GetSrvVSchemasResponse message. Also converts values to other types if specified. + * Creates a plain object from a RebuildKeyspaceGraphRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetSrvVSchemasResponse + * @memberof vtctldata.RebuildKeyspaceGraphRequest * @static - * @param {vtctldata.GetSrvVSchemasResponse} message GetSrvVSchemasResponse + * @param {vtctldata.RebuildKeyspaceGraphRequest} message RebuildKeyspaceGraphRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetSrvVSchemasResponse.toObject = function toObject(message, options) { + RebuildKeyspaceGraphRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.objects || options.defaults) - object.srv_v_schemas = {}; - let keys2; - if (message.srv_v_schemas && (keys2 = Object.keys(message.srv_v_schemas)).length) { - object.srv_v_schemas = {}; - for (let j = 0; j < keys2.length; ++j) - object.srv_v_schemas[keys2[j]] = $root.vschema.SrvVSchema.toObject(message.srv_v_schemas[keys2[j]], options); + if (options.arrays || options.defaults) + object.cells = []; + if (options.defaults) { + object.keyspace = ""; + object.allow_partial = false; } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.cells && message.cells.length) { + object.cells = []; + for (let j = 0; j < message.cells.length; ++j) + object.cells[j] = message.cells[j]; + } + if (message.allow_partial != null && message.hasOwnProperty("allow_partial")) + object.allow_partial = message.allow_partial; return object; }; /** - * Converts this GetSrvVSchemasResponse to JSON. + * Converts this RebuildKeyspaceGraphRequest to JSON. * @function toJSON - * @memberof vtctldata.GetSrvVSchemasResponse + * @memberof vtctldata.RebuildKeyspaceGraphRequest * @instance * @returns {Object.} JSON object */ - GetSrvVSchemasResponse.prototype.toJSON = function toJSON() { + RebuildKeyspaceGraphRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetSrvVSchemasResponse + * Gets the default type url for RebuildKeyspaceGraphRequest * @function getTypeUrl - * @memberof vtctldata.GetSrvVSchemasResponse + * @memberof vtctldata.RebuildKeyspaceGraphRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetSrvVSchemasResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + RebuildKeyspaceGraphRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetSrvVSchemasResponse"; + return typeUrlPrefix + "/vtctldata.RebuildKeyspaceGraphRequest"; }; - return GetSrvVSchemasResponse; + return RebuildKeyspaceGraphRequest; })(); - vtctldata.GetTabletRequest = (function() { + vtctldata.RebuildKeyspaceGraphResponse = (function() { /** - * Properties of a GetTabletRequest. + * Properties of a RebuildKeyspaceGraphResponse. * @memberof vtctldata - * @interface IGetTabletRequest - * @property {topodata.ITabletAlias|null} [tablet_alias] GetTabletRequest tablet_alias + * @interface IRebuildKeyspaceGraphResponse */ /** - * Constructs a new GetTabletRequest. + * Constructs a new RebuildKeyspaceGraphResponse. * @memberof vtctldata - * @classdesc Represents a GetTabletRequest. - * @implements IGetTabletRequest + * @classdesc Represents a RebuildKeyspaceGraphResponse. + * @implements IRebuildKeyspaceGraphResponse * @constructor - * @param {vtctldata.IGetTabletRequest=} [properties] Properties to set + * @param {vtctldata.IRebuildKeyspaceGraphResponse=} [properties] Properties to set */ - function GetTabletRequest(properties) { + function RebuildKeyspaceGraphResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -119530,77 +141599,63 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * GetTabletRequest tablet_alias. - * @member {topodata.ITabletAlias|null|undefined} tablet_alias - * @memberof vtctldata.GetTabletRequest - * @instance - */ - GetTabletRequest.prototype.tablet_alias = null; - - /** - * Creates a new GetTabletRequest instance using the specified properties. + * Creates a new RebuildKeyspaceGraphResponse instance using the specified properties. * @function create - * @memberof vtctldata.GetTabletRequest + * @memberof vtctldata.RebuildKeyspaceGraphResponse * @static - * @param {vtctldata.IGetTabletRequest=} [properties] Properties to set - * @returns {vtctldata.GetTabletRequest} GetTabletRequest instance + * @param {vtctldata.IRebuildKeyspaceGraphResponse=} [properties] Properties to set + * @returns {vtctldata.RebuildKeyspaceGraphResponse} RebuildKeyspaceGraphResponse instance */ - GetTabletRequest.create = function create(properties) { - return new GetTabletRequest(properties); + RebuildKeyspaceGraphResponse.create = function create(properties) { + return new RebuildKeyspaceGraphResponse(properties); }; /** - * Encodes the specified GetTabletRequest message. Does not implicitly {@link vtctldata.GetTabletRequest.verify|verify} messages. + * Encodes the specified RebuildKeyspaceGraphResponse message. Does not implicitly {@link vtctldata.RebuildKeyspaceGraphResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.GetTabletRequest + * @memberof vtctldata.RebuildKeyspaceGraphResponse * @static - * @param {vtctldata.IGetTabletRequest} message GetTabletRequest message or plain object to encode + * @param {vtctldata.IRebuildKeyspaceGraphResponse} message RebuildKeyspaceGraphResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetTabletRequest.encode = function encode(message, writer) { + RebuildKeyspaceGraphResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) - $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified GetTabletRequest message, length delimited. Does not implicitly {@link vtctldata.GetTabletRequest.verify|verify} messages. + * Encodes the specified RebuildKeyspaceGraphResponse message, length delimited. Does not implicitly {@link vtctldata.RebuildKeyspaceGraphResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetTabletRequest + * @memberof vtctldata.RebuildKeyspaceGraphResponse * @static - * @param {vtctldata.IGetTabletRequest} message GetTabletRequest message or plain object to encode + * @param {vtctldata.IRebuildKeyspaceGraphResponse} message RebuildKeyspaceGraphResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetTabletRequest.encodeDelimited = function encodeDelimited(message, writer) { + RebuildKeyspaceGraphResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetTabletRequest message from the specified reader or buffer. + * Decodes a RebuildKeyspaceGraphResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetTabletRequest + * @memberof vtctldata.RebuildKeyspaceGraphResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetTabletRequest} GetTabletRequest + * @returns {vtctldata.RebuildKeyspaceGraphResponse} RebuildKeyspaceGraphResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetTabletRequest.decode = function decode(reader, length) { + RebuildKeyspaceGraphResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetTabletRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RebuildKeyspaceGraphResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); - break; - } default: reader.skipType(tag & 7); break; @@ -119610,127 +141665,110 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetTabletRequest message from the specified reader or buffer, length delimited. + * Decodes a RebuildKeyspaceGraphResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetTabletRequest + * @memberof vtctldata.RebuildKeyspaceGraphResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetTabletRequest} GetTabletRequest + * @returns {vtctldata.RebuildKeyspaceGraphResponse} RebuildKeyspaceGraphResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetTabletRequest.decodeDelimited = function decodeDelimited(reader) { + RebuildKeyspaceGraphResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetTabletRequest message. + * Verifies a RebuildKeyspaceGraphResponse message. * @function verify - * @memberof vtctldata.GetTabletRequest + * @memberof vtctldata.RebuildKeyspaceGraphResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetTabletRequest.verify = function verify(message) { + RebuildKeyspaceGraphResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { - let error = $root.topodata.TabletAlias.verify(message.tablet_alias); - if (error) - return "tablet_alias." + error; - } return null; }; /** - * Creates a GetTabletRequest message from a plain object. Also converts values to their respective internal types. + * Creates a RebuildKeyspaceGraphResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetTabletRequest + * @memberof vtctldata.RebuildKeyspaceGraphResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetTabletRequest} GetTabletRequest + * @returns {vtctldata.RebuildKeyspaceGraphResponse} RebuildKeyspaceGraphResponse */ - GetTabletRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetTabletRequest) + RebuildKeyspaceGraphResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.RebuildKeyspaceGraphResponse) return object; - let message = new $root.vtctldata.GetTabletRequest(); - if (object.tablet_alias != null) { - if (typeof object.tablet_alias !== "object") - throw TypeError(".vtctldata.GetTabletRequest.tablet_alias: object expected"); - message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); - } - return message; + return new $root.vtctldata.RebuildKeyspaceGraphResponse(); }; /** - * Creates a plain object from a GetTabletRequest message. Also converts values to other types if specified. + * Creates a plain object from a RebuildKeyspaceGraphResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetTabletRequest + * @memberof vtctldata.RebuildKeyspaceGraphResponse * @static - * @param {vtctldata.GetTabletRequest} message GetTabletRequest + * @param {vtctldata.RebuildKeyspaceGraphResponse} message RebuildKeyspaceGraphResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetTabletRequest.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) - object.tablet_alias = null; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) - object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); - return object; + RebuildKeyspaceGraphResponse.toObject = function toObject() { + return {}; }; /** - * Converts this GetTabletRequest to JSON. + * Converts this RebuildKeyspaceGraphResponse to JSON. * @function toJSON - * @memberof vtctldata.GetTabletRequest + * @memberof vtctldata.RebuildKeyspaceGraphResponse * @instance * @returns {Object.} JSON object */ - GetTabletRequest.prototype.toJSON = function toJSON() { + RebuildKeyspaceGraphResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetTabletRequest + * Gets the default type url for RebuildKeyspaceGraphResponse * @function getTypeUrl - * @memberof vtctldata.GetTabletRequest + * @memberof vtctldata.RebuildKeyspaceGraphResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetTabletRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + RebuildKeyspaceGraphResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetTabletRequest"; + return typeUrlPrefix + "/vtctldata.RebuildKeyspaceGraphResponse"; }; - return GetTabletRequest; + return RebuildKeyspaceGraphResponse; })(); - vtctldata.GetTabletResponse = (function() { + vtctldata.RebuildVSchemaGraphRequest = (function() { /** - * Properties of a GetTabletResponse. + * Properties of a RebuildVSchemaGraphRequest. * @memberof vtctldata - * @interface IGetTabletResponse - * @property {topodata.ITablet|null} [tablet] GetTabletResponse tablet + * @interface IRebuildVSchemaGraphRequest + * @property {Array.|null} [cells] RebuildVSchemaGraphRequest cells */ /** - * Constructs a new GetTabletResponse. + * Constructs a new RebuildVSchemaGraphRequest. * @memberof vtctldata - * @classdesc Represents a GetTabletResponse. - * @implements IGetTabletResponse + * @classdesc Represents a RebuildVSchemaGraphRequest. + * @implements IRebuildVSchemaGraphRequest * @constructor - * @param {vtctldata.IGetTabletResponse=} [properties] Properties to set + * @param {vtctldata.IRebuildVSchemaGraphRequest=} [properties] Properties to set */ - function GetTabletResponse(properties) { + function RebuildVSchemaGraphRequest(properties) { + this.cells = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -119738,75 +141776,78 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * GetTabletResponse tablet. - * @member {topodata.ITablet|null|undefined} tablet - * @memberof vtctldata.GetTabletResponse + * RebuildVSchemaGraphRequest cells. + * @member {Array.} cells + * @memberof vtctldata.RebuildVSchemaGraphRequest * @instance */ - GetTabletResponse.prototype.tablet = null; + RebuildVSchemaGraphRequest.prototype.cells = $util.emptyArray; /** - * Creates a new GetTabletResponse instance using the specified properties. + * Creates a new RebuildVSchemaGraphRequest instance using the specified properties. * @function create - * @memberof vtctldata.GetTabletResponse + * @memberof vtctldata.RebuildVSchemaGraphRequest * @static - * @param {vtctldata.IGetTabletResponse=} [properties] Properties to set - * @returns {vtctldata.GetTabletResponse} GetTabletResponse instance + * @param {vtctldata.IRebuildVSchemaGraphRequest=} [properties] Properties to set + * @returns {vtctldata.RebuildVSchemaGraphRequest} RebuildVSchemaGraphRequest instance */ - GetTabletResponse.create = function create(properties) { - return new GetTabletResponse(properties); + RebuildVSchemaGraphRequest.create = function create(properties) { + return new RebuildVSchemaGraphRequest(properties); }; /** - * Encodes the specified GetTabletResponse message. Does not implicitly {@link vtctldata.GetTabletResponse.verify|verify} messages. + * Encodes the specified RebuildVSchemaGraphRequest message. Does not implicitly {@link vtctldata.RebuildVSchemaGraphRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.GetTabletResponse + * @memberof vtctldata.RebuildVSchemaGraphRequest * @static - * @param {vtctldata.IGetTabletResponse} message GetTabletResponse message or plain object to encode + * @param {vtctldata.IRebuildVSchemaGraphRequest} message RebuildVSchemaGraphRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetTabletResponse.encode = function encode(message, writer) { + RebuildVSchemaGraphRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.tablet != null && Object.hasOwnProperty.call(message, "tablet")) - $root.topodata.Tablet.encode(message.tablet, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.cells != null && message.cells.length) + for (let i = 0; i < message.cells.length; ++i) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.cells[i]); return writer; }; /** - * Encodes the specified GetTabletResponse message, length delimited. Does not implicitly {@link vtctldata.GetTabletResponse.verify|verify} messages. + * Encodes the specified RebuildVSchemaGraphRequest message, length delimited. Does not implicitly {@link vtctldata.RebuildVSchemaGraphRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetTabletResponse + * @memberof vtctldata.RebuildVSchemaGraphRequest * @static - * @param {vtctldata.IGetTabletResponse} message GetTabletResponse message or plain object to encode + * @param {vtctldata.IRebuildVSchemaGraphRequest} message RebuildVSchemaGraphRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetTabletResponse.encodeDelimited = function encodeDelimited(message, writer) { + RebuildVSchemaGraphRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetTabletResponse message from the specified reader or buffer. + * Decodes a RebuildVSchemaGraphRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetTabletResponse + * @memberof vtctldata.RebuildVSchemaGraphRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetTabletResponse} GetTabletResponse + * @returns {vtctldata.RebuildVSchemaGraphRequest} RebuildVSchemaGraphRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetTabletResponse.decode = function decode(reader, length) { + RebuildVSchemaGraphRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetTabletResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RebuildVSchemaGraphRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.tablet = $root.topodata.Tablet.decode(reader, reader.uint32()); + if (!(message.cells && message.cells.length)) + message.cells = []; + message.cells.push(reader.string()); break; } default: @@ -119818,134 +141859,133 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetTabletResponse message from the specified reader or buffer, length delimited. + * Decodes a RebuildVSchemaGraphRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetTabletResponse + * @memberof vtctldata.RebuildVSchemaGraphRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetTabletResponse} GetTabletResponse + * @returns {vtctldata.RebuildVSchemaGraphRequest} RebuildVSchemaGraphRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetTabletResponse.decodeDelimited = function decodeDelimited(reader) { + RebuildVSchemaGraphRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetTabletResponse message. + * Verifies a RebuildVSchemaGraphRequest message. * @function verify - * @memberof vtctldata.GetTabletResponse + * @memberof vtctldata.RebuildVSchemaGraphRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetTabletResponse.verify = function verify(message) { + RebuildVSchemaGraphRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.tablet != null && message.hasOwnProperty("tablet")) { - let error = $root.topodata.Tablet.verify(message.tablet); - if (error) - return "tablet." + error; + if (message.cells != null && message.hasOwnProperty("cells")) { + if (!Array.isArray(message.cells)) + return "cells: array expected"; + for (let i = 0; i < message.cells.length; ++i) + if (!$util.isString(message.cells[i])) + return "cells: string[] expected"; } return null; }; /** - * Creates a GetTabletResponse message from a plain object. Also converts values to their respective internal types. + * Creates a RebuildVSchemaGraphRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetTabletResponse + * @memberof vtctldata.RebuildVSchemaGraphRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetTabletResponse} GetTabletResponse + * @returns {vtctldata.RebuildVSchemaGraphRequest} RebuildVSchemaGraphRequest */ - GetTabletResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetTabletResponse) + RebuildVSchemaGraphRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.RebuildVSchemaGraphRequest) return object; - let message = new $root.vtctldata.GetTabletResponse(); - if (object.tablet != null) { - if (typeof object.tablet !== "object") - throw TypeError(".vtctldata.GetTabletResponse.tablet: object expected"); - message.tablet = $root.topodata.Tablet.fromObject(object.tablet); + let message = new $root.vtctldata.RebuildVSchemaGraphRequest(); + if (object.cells) { + if (!Array.isArray(object.cells)) + throw TypeError(".vtctldata.RebuildVSchemaGraphRequest.cells: array expected"); + message.cells = []; + for (let i = 0; i < object.cells.length; ++i) + message.cells[i] = String(object.cells[i]); } return message; }; /** - * Creates a plain object from a GetTabletResponse message. Also converts values to other types if specified. + * Creates a plain object from a RebuildVSchemaGraphRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetTabletResponse + * @memberof vtctldata.RebuildVSchemaGraphRequest * @static - * @param {vtctldata.GetTabletResponse} message GetTabletResponse + * @param {vtctldata.RebuildVSchemaGraphRequest} message RebuildVSchemaGraphRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetTabletResponse.toObject = function toObject(message, options) { + RebuildVSchemaGraphRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) - object.tablet = null; - if (message.tablet != null && message.hasOwnProperty("tablet")) - object.tablet = $root.topodata.Tablet.toObject(message.tablet, options); + if (options.arrays || options.defaults) + object.cells = []; + if (message.cells && message.cells.length) { + object.cells = []; + for (let j = 0; j < message.cells.length; ++j) + object.cells[j] = message.cells[j]; + } return object; }; /** - * Converts this GetTabletResponse to JSON. + * Converts this RebuildVSchemaGraphRequest to JSON. * @function toJSON - * @memberof vtctldata.GetTabletResponse + * @memberof vtctldata.RebuildVSchemaGraphRequest * @instance * @returns {Object.} JSON object */ - GetTabletResponse.prototype.toJSON = function toJSON() { + RebuildVSchemaGraphRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetTabletResponse + * Gets the default type url for RebuildVSchemaGraphRequest * @function getTypeUrl - * @memberof vtctldata.GetTabletResponse + * @memberof vtctldata.RebuildVSchemaGraphRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetTabletResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + RebuildVSchemaGraphRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetTabletResponse"; + return typeUrlPrefix + "/vtctldata.RebuildVSchemaGraphRequest"; }; - return GetTabletResponse; + return RebuildVSchemaGraphRequest; })(); - vtctldata.GetTabletsRequest = (function() { + vtctldata.RebuildVSchemaGraphResponse = (function() { /** - * Properties of a GetTabletsRequest. + * Properties of a RebuildVSchemaGraphResponse. * @memberof vtctldata - * @interface IGetTabletsRequest - * @property {string|null} [keyspace] GetTabletsRequest keyspace - * @property {string|null} [shard] GetTabletsRequest shard - * @property {Array.|null} [cells] GetTabletsRequest cells - * @property {boolean|null} [strict] GetTabletsRequest strict - * @property {Array.|null} [tablet_aliases] GetTabletsRequest tablet_aliases - * @property {topodata.TabletType|null} [tablet_type] GetTabletsRequest tablet_type + * @interface IRebuildVSchemaGraphResponse */ /** - * Constructs a new GetTabletsRequest. + * Constructs a new RebuildVSchemaGraphResponse. * @memberof vtctldata - * @classdesc Represents a GetTabletsRequest. - * @implements IGetTabletsRequest + * @classdesc Represents a RebuildVSchemaGraphResponse. + * @implements IRebuildVSchemaGraphResponse * @constructor - * @param {vtctldata.IGetTabletsRequest=} [properties] Properties to set + * @param {vtctldata.IRebuildVSchemaGraphResponse=} [properties] Properties to set */ - function GetTabletsRequest(properties) { - this.cells = []; - this.tablet_aliases = []; + function RebuildVSchemaGraphResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -119953,153 +141993,63 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * GetTabletsRequest keyspace. - * @member {string} keyspace - * @memberof vtctldata.GetTabletsRequest - * @instance - */ - GetTabletsRequest.prototype.keyspace = ""; - - /** - * GetTabletsRequest shard. - * @member {string} shard - * @memberof vtctldata.GetTabletsRequest - * @instance - */ - GetTabletsRequest.prototype.shard = ""; - - /** - * GetTabletsRequest cells. - * @member {Array.} cells - * @memberof vtctldata.GetTabletsRequest - * @instance - */ - GetTabletsRequest.prototype.cells = $util.emptyArray; - - /** - * GetTabletsRequest strict. - * @member {boolean} strict - * @memberof vtctldata.GetTabletsRequest - * @instance - */ - GetTabletsRequest.prototype.strict = false; - - /** - * GetTabletsRequest tablet_aliases. - * @member {Array.} tablet_aliases - * @memberof vtctldata.GetTabletsRequest - * @instance - */ - GetTabletsRequest.prototype.tablet_aliases = $util.emptyArray; - - /** - * GetTabletsRequest tablet_type. - * @member {topodata.TabletType} tablet_type - * @memberof vtctldata.GetTabletsRequest - * @instance - */ - GetTabletsRequest.prototype.tablet_type = 0; - - /** - * Creates a new GetTabletsRequest instance using the specified properties. + * Creates a new RebuildVSchemaGraphResponse instance using the specified properties. * @function create - * @memberof vtctldata.GetTabletsRequest + * @memberof vtctldata.RebuildVSchemaGraphResponse * @static - * @param {vtctldata.IGetTabletsRequest=} [properties] Properties to set - * @returns {vtctldata.GetTabletsRequest} GetTabletsRequest instance + * @param {vtctldata.IRebuildVSchemaGraphResponse=} [properties] Properties to set + * @returns {vtctldata.RebuildVSchemaGraphResponse} RebuildVSchemaGraphResponse instance */ - GetTabletsRequest.create = function create(properties) { - return new GetTabletsRequest(properties); + RebuildVSchemaGraphResponse.create = function create(properties) { + return new RebuildVSchemaGraphResponse(properties); }; /** - * Encodes the specified GetTabletsRequest message. Does not implicitly {@link vtctldata.GetTabletsRequest.verify|verify} messages. + * Encodes the specified RebuildVSchemaGraphResponse message. Does not implicitly {@link vtctldata.RebuildVSchemaGraphResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.GetTabletsRequest + * @memberof vtctldata.RebuildVSchemaGraphResponse * @static - * @param {vtctldata.IGetTabletsRequest} message GetTabletsRequest message or plain object to encode + * @param {vtctldata.IRebuildVSchemaGraphResponse} message RebuildVSchemaGraphResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetTabletsRequest.encode = function encode(message, writer) { + RebuildVSchemaGraphResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); - if (message.cells != null && message.cells.length) - for (let i = 0; i < message.cells.length; ++i) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.cells[i]); - if (message.strict != null && Object.hasOwnProperty.call(message, "strict")) - writer.uint32(/* id 4, wireType 0 =*/32).bool(message.strict); - if (message.tablet_aliases != null && message.tablet_aliases.length) - for (let i = 0; i < message.tablet_aliases.length; ++i) - $root.topodata.TabletAlias.encode(message.tablet_aliases[i], writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); - if (message.tablet_type != null && Object.hasOwnProperty.call(message, "tablet_type")) - writer.uint32(/* id 6, wireType 0 =*/48).int32(message.tablet_type); return writer; }; /** - * Encodes the specified GetTabletsRequest message, length delimited. Does not implicitly {@link vtctldata.GetTabletsRequest.verify|verify} messages. + * Encodes the specified RebuildVSchemaGraphResponse message, length delimited. Does not implicitly {@link vtctldata.RebuildVSchemaGraphResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetTabletsRequest + * @memberof vtctldata.RebuildVSchemaGraphResponse * @static - * @param {vtctldata.IGetTabletsRequest} message GetTabletsRequest message or plain object to encode + * @param {vtctldata.IRebuildVSchemaGraphResponse} message RebuildVSchemaGraphResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetTabletsRequest.encodeDelimited = function encodeDelimited(message, writer) { + RebuildVSchemaGraphResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetTabletsRequest message from the specified reader or buffer. + * Decodes a RebuildVSchemaGraphResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetTabletsRequest + * @memberof vtctldata.RebuildVSchemaGraphResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetTabletsRequest} GetTabletsRequest + * @returns {vtctldata.RebuildVSchemaGraphResponse} RebuildVSchemaGraphResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetTabletsRequest.decode = function decode(reader, length) { + RebuildVSchemaGraphResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetTabletsRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RebuildVSchemaGraphResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - message.keyspace = reader.string(); - break; - } - case 2: { - message.shard = reader.string(); - break; - } - case 3: { - if (!(message.cells && message.cells.length)) - message.cells = []; - message.cells.push(reader.string()); - break; - } - case 4: { - message.strict = reader.bool(); - break; - } - case 5: { - if (!(message.tablet_aliases && message.tablet_aliases.length)) - message.tablet_aliases = []; - message.tablet_aliases.push($root.topodata.TabletAlias.decode(reader, reader.uint32())); - break; - } - case 6: { - message.tablet_type = reader.int32(); - break; - } default: reader.skipType(tag & 7); break; @@ -120109,259 +142059,109 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetTabletsRequest message from the specified reader or buffer, length delimited. + * Decodes a RebuildVSchemaGraphResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetTabletsRequest + * @memberof vtctldata.RebuildVSchemaGraphResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetTabletsRequest} GetTabletsRequest + * @returns {vtctldata.RebuildVSchemaGraphResponse} RebuildVSchemaGraphResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetTabletsRequest.decodeDelimited = function decodeDelimited(reader) { + RebuildVSchemaGraphResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetTabletsRequest message. + * Verifies a RebuildVSchemaGraphResponse message. * @function verify - * @memberof vtctldata.GetTabletsRequest + * @memberof vtctldata.RebuildVSchemaGraphResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetTabletsRequest.verify = function verify(message) { + RebuildVSchemaGraphResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - if (message.shard != null && message.hasOwnProperty("shard")) - if (!$util.isString(message.shard)) - return "shard: string expected"; - if (message.cells != null && message.hasOwnProperty("cells")) { - if (!Array.isArray(message.cells)) - return "cells: array expected"; - for (let i = 0; i < message.cells.length; ++i) - if (!$util.isString(message.cells[i])) - return "cells: string[] expected"; - } - if (message.strict != null && message.hasOwnProperty("strict")) - if (typeof message.strict !== "boolean") - return "strict: boolean expected"; - if (message.tablet_aliases != null && message.hasOwnProperty("tablet_aliases")) { - if (!Array.isArray(message.tablet_aliases)) - return "tablet_aliases: array expected"; - for (let i = 0; i < message.tablet_aliases.length; ++i) { - let error = $root.topodata.TabletAlias.verify(message.tablet_aliases[i]); - if (error) - return "tablet_aliases." + error; - } - } - if (message.tablet_type != null && message.hasOwnProperty("tablet_type")) - switch (message.tablet_type) { - default: - return "tablet_type: enum value expected"; - case 0: - case 1: - case 1: - case 2: - case 3: - case 3: - case 4: - case 5: - case 6: - case 7: - case 8: - break; - } return null; }; /** - * Creates a GetTabletsRequest message from a plain object. Also converts values to their respective internal types. + * Creates a RebuildVSchemaGraphResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetTabletsRequest + * @memberof vtctldata.RebuildVSchemaGraphResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetTabletsRequest} GetTabletsRequest + * @returns {vtctldata.RebuildVSchemaGraphResponse} RebuildVSchemaGraphResponse */ - GetTabletsRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetTabletsRequest) + RebuildVSchemaGraphResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.RebuildVSchemaGraphResponse) return object; - let message = new $root.vtctldata.GetTabletsRequest(); - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - if (object.shard != null) - message.shard = String(object.shard); - if (object.cells) { - if (!Array.isArray(object.cells)) - throw TypeError(".vtctldata.GetTabletsRequest.cells: array expected"); - message.cells = []; - for (let i = 0; i < object.cells.length; ++i) - message.cells[i] = String(object.cells[i]); - } - if (object.strict != null) - message.strict = Boolean(object.strict); - if (object.tablet_aliases) { - if (!Array.isArray(object.tablet_aliases)) - throw TypeError(".vtctldata.GetTabletsRequest.tablet_aliases: array expected"); - message.tablet_aliases = []; - for (let i = 0; i < object.tablet_aliases.length; ++i) { - if (typeof object.tablet_aliases[i] !== "object") - throw TypeError(".vtctldata.GetTabletsRequest.tablet_aliases: object expected"); - message.tablet_aliases[i] = $root.topodata.TabletAlias.fromObject(object.tablet_aliases[i]); - } - } - switch (object.tablet_type) { - default: - if (typeof object.tablet_type === "number") { - message.tablet_type = object.tablet_type; - break; - } - break; - case "UNKNOWN": - case 0: - message.tablet_type = 0; - break; - case "PRIMARY": - case 1: - message.tablet_type = 1; - break; - case "MASTER": - case 1: - message.tablet_type = 1; - break; - case "REPLICA": - case 2: - message.tablet_type = 2; - break; - case "RDONLY": - case 3: - message.tablet_type = 3; - break; - case "BATCH": - case 3: - message.tablet_type = 3; - break; - case "SPARE": - case 4: - message.tablet_type = 4; - break; - case "EXPERIMENTAL": - case 5: - message.tablet_type = 5; - break; - case "BACKUP": - case 6: - message.tablet_type = 6; - break; - case "RESTORE": - case 7: - message.tablet_type = 7; - break; - case "DRAINED": - case 8: - message.tablet_type = 8; - break; - } - return message; + return new $root.vtctldata.RebuildVSchemaGraphResponse(); }; /** - * Creates a plain object from a GetTabletsRequest message. Also converts values to other types if specified. + * Creates a plain object from a RebuildVSchemaGraphResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetTabletsRequest + * @memberof vtctldata.RebuildVSchemaGraphResponse * @static - * @param {vtctldata.GetTabletsRequest} message GetTabletsRequest + * @param {vtctldata.RebuildVSchemaGraphResponse} message RebuildVSchemaGraphResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetTabletsRequest.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.arrays || options.defaults) { - object.cells = []; - object.tablet_aliases = []; - } - if (options.defaults) { - object.keyspace = ""; - object.shard = ""; - object.strict = false; - object.tablet_type = options.enums === String ? "UNKNOWN" : 0; - } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.shard != null && message.hasOwnProperty("shard")) - object.shard = message.shard; - if (message.cells && message.cells.length) { - object.cells = []; - for (let j = 0; j < message.cells.length; ++j) - object.cells[j] = message.cells[j]; - } - if (message.strict != null && message.hasOwnProperty("strict")) - object.strict = message.strict; - if (message.tablet_aliases && message.tablet_aliases.length) { - object.tablet_aliases = []; - for (let j = 0; j < message.tablet_aliases.length; ++j) - object.tablet_aliases[j] = $root.topodata.TabletAlias.toObject(message.tablet_aliases[j], options); - } - if (message.tablet_type != null && message.hasOwnProperty("tablet_type")) - object.tablet_type = options.enums === String ? $root.topodata.TabletType[message.tablet_type] === undefined ? message.tablet_type : $root.topodata.TabletType[message.tablet_type] : message.tablet_type; - return object; + RebuildVSchemaGraphResponse.toObject = function toObject() { + return {}; }; /** - * Converts this GetTabletsRequest to JSON. + * Converts this RebuildVSchemaGraphResponse to JSON. * @function toJSON - * @memberof vtctldata.GetTabletsRequest + * @memberof vtctldata.RebuildVSchemaGraphResponse * @instance * @returns {Object.} JSON object */ - GetTabletsRequest.prototype.toJSON = function toJSON() { + RebuildVSchemaGraphResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetTabletsRequest + * Gets the default type url for RebuildVSchemaGraphResponse * @function getTypeUrl - * @memberof vtctldata.GetTabletsRequest + * @memberof vtctldata.RebuildVSchemaGraphResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetTabletsRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + RebuildVSchemaGraphResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetTabletsRequest"; + return typeUrlPrefix + "/vtctldata.RebuildVSchemaGraphResponse"; }; - return GetTabletsRequest; + return RebuildVSchemaGraphResponse; })(); - vtctldata.GetTabletsResponse = (function() { + vtctldata.RefreshStateRequest = (function() { /** - * Properties of a GetTabletsResponse. + * Properties of a RefreshStateRequest. * @memberof vtctldata - * @interface IGetTabletsResponse - * @property {Array.|null} [tablets] GetTabletsResponse tablets + * @interface IRefreshStateRequest + * @property {topodata.ITabletAlias|null} [tablet_alias] RefreshStateRequest tablet_alias */ /** - * Constructs a new GetTabletsResponse. + * Constructs a new RefreshStateRequest. * @memberof vtctldata - * @classdesc Represents a GetTabletsResponse. - * @implements IGetTabletsResponse + * @classdesc Represents a RefreshStateRequest. + * @implements IRefreshStateRequest * @constructor - * @param {vtctldata.IGetTabletsResponse=} [properties] Properties to set + * @param {vtctldata.IRefreshStateRequest=} [properties] Properties to set */ - function GetTabletsResponse(properties) { - this.tablets = []; + function RefreshStateRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -120369,78 +142169,75 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * GetTabletsResponse tablets. - * @member {Array.} tablets - * @memberof vtctldata.GetTabletsResponse + * RefreshStateRequest tablet_alias. + * @member {topodata.ITabletAlias|null|undefined} tablet_alias + * @memberof vtctldata.RefreshStateRequest * @instance */ - GetTabletsResponse.prototype.tablets = $util.emptyArray; + RefreshStateRequest.prototype.tablet_alias = null; /** - * Creates a new GetTabletsResponse instance using the specified properties. + * Creates a new RefreshStateRequest instance using the specified properties. * @function create - * @memberof vtctldata.GetTabletsResponse + * @memberof vtctldata.RefreshStateRequest * @static - * @param {vtctldata.IGetTabletsResponse=} [properties] Properties to set - * @returns {vtctldata.GetTabletsResponse} GetTabletsResponse instance + * @param {vtctldata.IRefreshStateRequest=} [properties] Properties to set + * @returns {vtctldata.RefreshStateRequest} RefreshStateRequest instance */ - GetTabletsResponse.create = function create(properties) { - return new GetTabletsResponse(properties); + RefreshStateRequest.create = function create(properties) { + return new RefreshStateRequest(properties); }; /** - * Encodes the specified GetTabletsResponse message. Does not implicitly {@link vtctldata.GetTabletsResponse.verify|verify} messages. + * Encodes the specified RefreshStateRequest message. Does not implicitly {@link vtctldata.RefreshStateRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.GetTabletsResponse + * @memberof vtctldata.RefreshStateRequest * @static - * @param {vtctldata.IGetTabletsResponse} message GetTabletsResponse message or plain object to encode + * @param {vtctldata.IRefreshStateRequest} message RefreshStateRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetTabletsResponse.encode = function encode(message, writer) { + RefreshStateRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.tablets != null && message.tablets.length) - for (let i = 0; i < message.tablets.length; ++i) - $root.topodata.Tablet.encode(message.tablets[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) + $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified GetTabletsResponse message, length delimited. Does not implicitly {@link vtctldata.GetTabletsResponse.verify|verify} messages. + * Encodes the specified RefreshStateRequest message, length delimited. Does not implicitly {@link vtctldata.RefreshStateRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetTabletsResponse + * @memberof vtctldata.RefreshStateRequest * @static - * @param {vtctldata.IGetTabletsResponse} message GetTabletsResponse message or plain object to encode + * @param {vtctldata.IRefreshStateRequest} message RefreshStateRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetTabletsResponse.encodeDelimited = function encodeDelimited(message, writer) { + RefreshStateRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetTabletsResponse message from the specified reader or buffer. + * Decodes a RefreshStateRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetTabletsResponse + * @memberof vtctldata.RefreshStateRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetTabletsResponse} GetTabletsResponse + * @returns {vtctldata.RefreshStateRequest} RefreshStateRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetTabletsResponse.decode = function decode(reader, length) { + RefreshStateRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetTabletsResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RefreshStateRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (!(message.tablets && message.tablets.length)) - message.tablets = []; - message.tablets.push($root.topodata.Tablet.decode(reader, reader.uint32())); + message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); break; } default: @@ -120452,139 +142249,126 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetTabletsResponse message from the specified reader or buffer, length delimited. + * Decodes a RefreshStateRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetTabletsResponse + * @memberof vtctldata.RefreshStateRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetTabletsResponse} GetTabletsResponse + * @returns {vtctldata.RefreshStateRequest} RefreshStateRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetTabletsResponse.decodeDelimited = function decodeDelimited(reader) { + RefreshStateRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetTabletsResponse message. + * Verifies a RefreshStateRequest message. * @function verify - * @memberof vtctldata.GetTabletsResponse + * @memberof vtctldata.RefreshStateRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetTabletsResponse.verify = function verify(message) { + RefreshStateRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.tablets != null && message.hasOwnProperty("tablets")) { - if (!Array.isArray(message.tablets)) - return "tablets: array expected"; - for (let i = 0; i < message.tablets.length; ++i) { - let error = $root.topodata.Tablet.verify(message.tablets[i]); - if (error) - return "tablets." + error; - } + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { + let error = $root.topodata.TabletAlias.verify(message.tablet_alias); + if (error) + return "tablet_alias." + error; } return null; }; /** - * Creates a GetTabletsResponse message from a plain object. Also converts values to their respective internal types. + * Creates a RefreshStateRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetTabletsResponse + * @memberof vtctldata.RefreshStateRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetTabletsResponse} GetTabletsResponse + * @returns {vtctldata.RefreshStateRequest} RefreshStateRequest */ - GetTabletsResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetTabletsResponse) + RefreshStateRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.RefreshStateRequest) return object; - let message = new $root.vtctldata.GetTabletsResponse(); - if (object.tablets) { - if (!Array.isArray(object.tablets)) - throw TypeError(".vtctldata.GetTabletsResponse.tablets: array expected"); - message.tablets = []; - for (let i = 0; i < object.tablets.length; ++i) { - if (typeof object.tablets[i] !== "object") - throw TypeError(".vtctldata.GetTabletsResponse.tablets: object expected"); - message.tablets[i] = $root.topodata.Tablet.fromObject(object.tablets[i]); - } + let message = new $root.vtctldata.RefreshStateRequest(); + if (object.tablet_alias != null) { + if (typeof object.tablet_alias !== "object") + throw TypeError(".vtctldata.RefreshStateRequest.tablet_alias: object expected"); + message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); } return message; }; /** - * Creates a plain object from a GetTabletsResponse message. Also converts values to other types if specified. + * Creates a plain object from a RefreshStateRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetTabletsResponse + * @memberof vtctldata.RefreshStateRequest * @static - * @param {vtctldata.GetTabletsResponse} message GetTabletsResponse + * @param {vtctldata.RefreshStateRequest} message RefreshStateRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetTabletsResponse.toObject = function toObject(message, options) { + RefreshStateRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.tablets = []; - if (message.tablets && message.tablets.length) { - object.tablets = []; - for (let j = 0; j < message.tablets.length; ++j) - object.tablets[j] = $root.topodata.Tablet.toObject(message.tablets[j], options); - } + if (options.defaults) + object.tablet_alias = null; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) + object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); return object; }; /** - * Converts this GetTabletsResponse to JSON. + * Converts this RefreshStateRequest to JSON. * @function toJSON - * @memberof vtctldata.GetTabletsResponse + * @memberof vtctldata.RefreshStateRequest * @instance * @returns {Object.} JSON object */ - GetTabletsResponse.prototype.toJSON = function toJSON() { + RefreshStateRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetTabletsResponse + * Gets the default type url for RefreshStateRequest * @function getTypeUrl - * @memberof vtctldata.GetTabletsResponse + * @memberof vtctldata.RefreshStateRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetTabletsResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + RefreshStateRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetTabletsResponse"; + return typeUrlPrefix + "/vtctldata.RefreshStateRequest"; }; - return GetTabletsResponse; + return RefreshStateRequest; })(); - vtctldata.GetTopologyPathRequest = (function() { + vtctldata.RefreshStateResponse = (function() { /** - * Properties of a GetTopologyPathRequest. + * Properties of a RefreshStateResponse. * @memberof vtctldata - * @interface IGetTopologyPathRequest - * @property {string|null} [path] GetTopologyPathRequest path + * @interface IRefreshStateResponse */ /** - * Constructs a new GetTopologyPathRequest. + * Constructs a new RefreshStateResponse. * @memberof vtctldata - * @classdesc Represents a GetTopologyPathRequest. - * @implements IGetTopologyPathRequest + * @classdesc Represents a RefreshStateResponse. + * @implements IRefreshStateResponse * @constructor - * @param {vtctldata.IGetTopologyPathRequest=} [properties] Properties to set + * @param {vtctldata.IRefreshStateResponse=} [properties] Properties to set */ - function GetTopologyPathRequest(properties) { + function RefreshStateResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -120592,77 +142376,63 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * GetTopologyPathRequest path. - * @member {string} path - * @memberof vtctldata.GetTopologyPathRequest - * @instance - */ - GetTopologyPathRequest.prototype.path = ""; - - /** - * Creates a new GetTopologyPathRequest instance using the specified properties. + * Creates a new RefreshStateResponse instance using the specified properties. * @function create - * @memberof vtctldata.GetTopologyPathRequest + * @memberof vtctldata.RefreshStateResponse * @static - * @param {vtctldata.IGetTopologyPathRequest=} [properties] Properties to set - * @returns {vtctldata.GetTopologyPathRequest} GetTopologyPathRequest instance + * @param {vtctldata.IRefreshStateResponse=} [properties] Properties to set + * @returns {vtctldata.RefreshStateResponse} RefreshStateResponse instance */ - GetTopologyPathRequest.create = function create(properties) { - return new GetTopologyPathRequest(properties); + RefreshStateResponse.create = function create(properties) { + return new RefreshStateResponse(properties); }; /** - * Encodes the specified GetTopologyPathRequest message. Does not implicitly {@link vtctldata.GetTopologyPathRequest.verify|verify} messages. + * Encodes the specified RefreshStateResponse message. Does not implicitly {@link vtctldata.RefreshStateResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.GetTopologyPathRequest + * @memberof vtctldata.RefreshStateResponse * @static - * @param {vtctldata.IGetTopologyPathRequest} message GetTopologyPathRequest message or plain object to encode + * @param {vtctldata.IRefreshStateResponse} message RefreshStateResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetTopologyPathRequest.encode = function encode(message, writer) { + RefreshStateResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.path != null && Object.hasOwnProperty.call(message, "path")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.path); return writer; }; /** - * Encodes the specified GetTopologyPathRequest message, length delimited. Does not implicitly {@link vtctldata.GetTopologyPathRequest.verify|verify} messages. + * Encodes the specified RefreshStateResponse message, length delimited. Does not implicitly {@link vtctldata.RefreshStateResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetTopologyPathRequest + * @memberof vtctldata.RefreshStateResponse * @static - * @param {vtctldata.IGetTopologyPathRequest} message GetTopologyPathRequest message or plain object to encode + * @param {vtctldata.IRefreshStateResponse} message RefreshStateResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetTopologyPathRequest.encodeDelimited = function encodeDelimited(message, writer) { + RefreshStateResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetTopologyPathRequest message from the specified reader or buffer. + * Decodes a RefreshStateResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetTopologyPathRequest + * @memberof vtctldata.RefreshStateResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetTopologyPathRequest} GetTopologyPathRequest + * @returns {vtctldata.RefreshStateResponse} RefreshStateResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetTopologyPathRequest.decode = function decode(reader, length) { + RefreshStateResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetTopologyPathRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RefreshStateResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - message.path = reader.string(); - break; - } default: reader.skipType(tag & 7); break; @@ -120672,122 +142442,112 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetTopologyPathRequest message from the specified reader or buffer, length delimited. + * Decodes a RefreshStateResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetTopologyPathRequest + * @memberof vtctldata.RefreshStateResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetTopologyPathRequest} GetTopologyPathRequest + * @returns {vtctldata.RefreshStateResponse} RefreshStateResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetTopologyPathRequest.decodeDelimited = function decodeDelimited(reader) { + RefreshStateResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetTopologyPathRequest message. + * Verifies a RefreshStateResponse message. * @function verify - * @memberof vtctldata.GetTopologyPathRequest + * @memberof vtctldata.RefreshStateResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetTopologyPathRequest.verify = function verify(message) { + RefreshStateResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.path != null && message.hasOwnProperty("path")) - if (!$util.isString(message.path)) - return "path: string expected"; return null; }; /** - * Creates a GetTopologyPathRequest message from a plain object. Also converts values to their respective internal types. + * Creates a RefreshStateResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetTopologyPathRequest + * @memberof vtctldata.RefreshStateResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetTopologyPathRequest} GetTopologyPathRequest + * @returns {vtctldata.RefreshStateResponse} RefreshStateResponse */ - GetTopologyPathRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetTopologyPathRequest) + RefreshStateResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.RefreshStateResponse) return object; - let message = new $root.vtctldata.GetTopologyPathRequest(); - if (object.path != null) - message.path = String(object.path); - return message; + return new $root.vtctldata.RefreshStateResponse(); }; /** - * Creates a plain object from a GetTopologyPathRequest message. Also converts values to other types if specified. + * Creates a plain object from a RefreshStateResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetTopologyPathRequest + * @memberof vtctldata.RefreshStateResponse * @static - * @param {vtctldata.GetTopologyPathRequest} message GetTopologyPathRequest + * @param {vtctldata.RefreshStateResponse} message RefreshStateResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetTopologyPathRequest.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) - object.path = ""; - if (message.path != null && message.hasOwnProperty("path")) - object.path = message.path; - return object; + RefreshStateResponse.toObject = function toObject() { + return {}; }; /** - * Converts this GetTopologyPathRequest to JSON. + * Converts this RefreshStateResponse to JSON. * @function toJSON - * @memberof vtctldata.GetTopologyPathRequest + * @memberof vtctldata.RefreshStateResponse * @instance * @returns {Object.} JSON object */ - GetTopologyPathRequest.prototype.toJSON = function toJSON() { + RefreshStateResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetTopologyPathRequest + * Gets the default type url for RefreshStateResponse * @function getTypeUrl - * @memberof vtctldata.GetTopologyPathRequest + * @memberof vtctldata.RefreshStateResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetTopologyPathRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + RefreshStateResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetTopologyPathRequest"; + return typeUrlPrefix + "/vtctldata.RefreshStateResponse"; }; - return GetTopologyPathRequest; + return RefreshStateResponse; })(); - vtctldata.GetTopologyPathResponse = (function() { + vtctldata.RefreshStateByShardRequest = (function() { /** - * Properties of a GetTopologyPathResponse. + * Properties of a RefreshStateByShardRequest. * @memberof vtctldata - * @interface IGetTopologyPathResponse - * @property {vtctldata.ITopologyCell|null} [cell] GetTopologyPathResponse cell + * @interface IRefreshStateByShardRequest + * @property {string|null} [keyspace] RefreshStateByShardRequest keyspace + * @property {string|null} [shard] RefreshStateByShardRequest shard + * @property {Array.|null} [cells] RefreshStateByShardRequest cells */ /** - * Constructs a new GetTopologyPathResponse. + * Constructs a new RefreshStateByShardRequest. * @memberof vtctldata - * @classdesc Represents a GetTopologyPathResponse. - * @implements IGetTopologyPathResponse + * @classdesc Represents a RefreshStateByShardRequest. + * @implements IRefreshStateByShardRequest * @constructor - * @param {vtctldata.IGetTopologyPathResponse=} [properties] Properties to set + * @param {vtctldata.IRefreshStateByShardRequest=} [properties] Properties to set */ - function GetTopologyPathResponse(properties) { + function RefreshStateByShardRequest(properties) { + this.cells = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -120795,75 +142555,106 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * GetTopologyPathResponse cell. - * @member {vtctldata.ITopologyCell|null|undefined} cell - * @memberof vtctldata.GetTopologyPathResponse + * RefreshStateByShardRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.RefreshStateByShardRequest * @instance */ - GetTopologyPathResponse.prototype.cell = null; + RefreshStateByShardRequest.prototype.keyspace = ""; /** - * Creates a new GetTopologyPathResponse instance using the specified properties. + * RefreshStateByShardRequest shard. + * @member {string} shard + * @memberof vtctldata.RefreshStateByShardRequest + * @instance + */ + RefreshStateByShardRequest.prototype.shard = ""; + + /** + * RefreshStateByShardRequest cells. + * @member {Array.} cells + * @memberof vtctldata.RefreshStateByShardRequest + * @instance + */ + RefreshStateByShardRequest.prototype.cells = $util.emptyArray; + + /** + * Creates a new RefreshStateByShardRequest instance using the specified properties. * @function create - * @memberof vtctldata.GetTopologyPathResponse + * @memberof vtctldata.RefreshStateByShardRequest * @static - * @param {vtctldata.IGetTopologyPathResponse=} [properties] Properties to set - * @returns {vtctldata.GetTopologyPathResponse} GetTopologyPathResponse instance + * @param {vtctldata.IRefreshStateByShardRequest=} [properties] Properties to set + * @returns {vtctldata.RefreshStateByShardRequest} RefreshStateByShardRequest instance */ - GetTopologyPathResponse.create = function create(properties) { - return new GetTopologyPathResponse(properties); + RefreshStateByShardRequest.create = function create(properties) { + return new RefreshStateByShardRequest(properties); }; /** - * Encodes the specified GetTopologyPathResponse message. Does not implicitly {@link vtctldata.GetTopologyPathResponse.verify|verify} messages. + * Encodes the specified RefreshStateByShardRequest message. Does not implicitly {@link vtctldata.RefreshStateByShardRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.GetTopologyPathResponse + * @memberof vtctldata.RefreshStateByShardRequest * @static - * @param {vtctldata.IGetTopologyPathResponse} message GetTopologyPathResponse message or plain object to encode + * @param {vtctldata.IRefreshStateByShardRequest} message RefreshStateByShardRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetTopologyPathResponse.encode = function encode(message, writer) { + RefreshStateByShardRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.cell != null && Object.hasOwnProperty.call(message, "cell")) - $root.vtctldata.TopologyCell.encode(message.cell, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); + if (message.cells != null && message.cells.length) + for (let i = 0; i < message.cells.length; ++i) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.cells[i]); return writer; }; /** - * Encodes the specified GetTopologyPathResponse message, length delimited. Does not implicitly {@link vtctldata.GetTopologyPathResponse.verify|verify} messages. + * Encodes the specified RefreshStateByShardRequest message, length delimited. Does not implicitly {@link vtctldata.RefreshStateByShardRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetTopologyPathResponse + * @memberof vtctldata.RefreshStateByShardRequest * @static - * @param {vtctldata.IGetTopologyPathResponse} message GetTopologyPathResponse message or plain object to encode + * @param {vtctldata.IRefreshStateByShardRequest} message RefreshStateByShardRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetTopologyPathResponse.encodeDelimited = function encodeDelimited(message, writer) { + RefreshStateByShardRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetTopologyPathResponse message from the specified reader or buffer. + * Decodes a RefreshStateByShardRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetTopologyPathResponse + * @memberof vtctldata.RefreshStateByShardRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetTopologyPathResponse} GetTopologyPathResponse + * @returns {vtctldata.RefreshStateByShardRequest} RefreshStateByShardRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetTopologyPathResponse.decode = function decode(reader, length) { + RefreshStateByShardRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetTopologyPathResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RefreshStateByShardRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.cell = $root.vtctldata.TopologyCell.decode(reader, reader.uint32()); + message.keyspace = reader.string(); + break; + } + case 2: { + message.shard = reader.string(); + break; + } + case 3: { + if (!(message.cells && message.cells.length)) + message.cells = []; + message.cells.push(reader.string()); break; } default: @@ -120875,131 +142666,153 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetTopologyPathResponse message from the specified reader or buffer, length delimited. + * Decodes a RefreshStateByShardRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetTopologyPathResponse + * @memberof vtctldata.RefreshStateByShardRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetTopologyPathResponse} GetTopologyPathResponse + * @returns {vtctldata.RefreshStateByShardRequest} RefreshStateByShardRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetTopologyPathResponse.decodeDelimited = function decodeDelimited(reader) { + RefreshStateByShardRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetTopologyPathResponse message. + * Verifies a RefreshStateByShardRequest message. * @function verify - * @memberof vtctldata.GetTopologyPathResponse + * @memberof vtctldata.RefreshStateByShardRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetTopologyPathResponse.verify = function verify(message) { + RefreshStateByShardRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.cell != null && message.hasOwnProperty("cell")) { - let error = $root.vtctldata.TopologyCell.verify(message.cell); - if (error) - return "cell." + error; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.shard != null && message.hasOwnProperty("shard")) + if (!$util.isString(message.shard)) + return "shard: string expected"; + if (message.cells != null && message.hasOwnProperty("cells")) { + if (!Array.isArray(message.cells)) + return "cells: array expected"; + for (let i = 0; i < message.cells.length; ++i) + if (!$util.isString(message.cells[i])) + return "cells: string[] expected"; } return null; }; /** - * Creates a GetTopologyPathResponse message from a plain object. Also converts values to their respective internal types. + * Creates a RefreshStateByShardRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetTopologyPathResponse + * @memberof vtctldata.RefreshStateByShardRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetTopologyPathResponse} GetTopologyPathResponse + * @returns {vtctldata.RefreshStateByShardRequest} RefreshStateByShardRequest */ - GetTopologyPathResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetTopologyPathResponse) + RefreshStateByShardRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.RefreshStateByShardRequest) return object; - let message = new $root.vtctldata.GetTopologyPathResponse(); - if (object.cell != null) { - if (typeof object.cell !== "object") - throw TypeError(".vtctldata.GetTopologyPathResponse.cell: object expected"); - message.cell = $root.vtctldata.TopologyCell.fromObject(object.cell); + let message = new $root.vtctldata.RefreshStateByShardRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.shard != null) + message.shard = String(object.shard); + if (object.cells) { + if (!Array.isArray(object.cells)) + throw TypeError(".vtctldata.RefreshStateByShardRequest.cells: array expected"); + message.cells = []; + for (let i = 0; i < object.cells.length; ++i) + message.cells[i] = String(object.cells[i]); } return message; }; /** - * Creates a plain object from a GetTopologyPathResponse message. Also converts values to other types if specified. + * Creates a plain object from a RefreshStateByShardRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetTopologyPathResponse + * @memberof vtctldata.RefreshStateByShardRequest * @static - * @param {vtctldata.GetTopologyPathResponse} message GetTopologyPathResponse + * @param {vtctldata.RefreshStateByShardRequest} message RefreshStateByShardRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetTopologyPathResponse.toObject = function toObject(message, options) { + RefreshStateByShardRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) - object.cell = null; - if (message.cell != null && message.hasOwnProperty("cell")) - object.cell = $root.vtctldata.TopologyCell.toObject(message.cell, options); + if (options.arrays || options.defaults) + object.cells = []; + if (options.defaults) { + object.keyspace = ""; + object.shard = ""; + } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = message.shard; + if (message.cells && message.cells.length) { + object.cells = []; + for (let j = 0; j < message.cells.length; ++j) + object.cells[j] = message.cells[j]; + } return object; }; /** - * Converts this GetTopologyPathResponse to JSON. + * Converts this RefreshStateByShardRequest to JSON. * @function toJSON - * @memberof vtctldata.GetTopologyPathResponse + * @memberof vtctldata.RefreshStateByShardRequest * @instance * @returns {Object.} JSON object */ - GetTopologyPathResponse.prototype.toJSON = function toJSON() { + RefreshStateByShardRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetTopologyPathResponse + * Gets the default type url for RefreshStateByShardRequest * @function getTypeUrl - * @memberof vtctldata.GetTopologyPathResponse + * @memberof vtctldata.RefreshStateByShardRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetTopologyPathResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + RefreshStateByShardRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetTopologyPathResponse"; + return typeUrlPrefix + "/vtctldata.RefreshStateByShardRequest"; }; - return GetTopologyPathResponse; + return RefreshStateByShardRequest; })(); - vtctldata.TopologyCell = (function() { + vtctldata.RefreshStateByShardResponse = (function() { /** - * Properties of a TopologyCell. + * Properties of a RefreshStateByShardResponse. * @memberof vtctldata - * @interface ITopologyCell - * @property {string|null} [name] TopologyCell name - * @property {string|null} [path] TopologyCell path - * @property {string|null} [data] TopologyCell data - * @property {Array.|null} [children] TopologyCell children + * @interface IRefreshStateByShardResponse + * @property {boolean|null} [is_partial_refresh] RefreshStateByShardResponse is_partial_refresh + * @property {string|null} [partial_refresh_details] RefreshStateByShardResponse partial_refresh_details */ /** - * Constructs a new TopologyCell. + * Constructs a new RefreshStateByShardResponse. * @memberof vtctldata - * @classdesc Represents a TopologyCell. - * @implements ITopologyCell + * @classdesc Represents a RefreshStateByShardResponse. + * @implements IRefreshStateByShardResponse * @constructor - * @param {vtctldata.ITopologyCell=} [properties] Properties to set + * @param {vtctldata.IRefreshStateByShardResponse=} [properties] Properties to set */ - function TopologyCell(properties) { - this.children = []; + function RefreshStateByShardResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -121007,120 +142820,89 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * TopologyCell name. - * @member {string} name - * @memberof vtctldata.TopologyCell - * @instance - */ - TopologyCell.prototype.name = ""; - - /** - * TopologyCell path. - * @member {string} path - * @memberof vtctldata.TopologyCell - * @instance - */ - TopologyCell.prototype.path = ""; - - /** - * TopologyCell data. - * @member {string} data - * @memberof vtctldata.TopologyCell + * RefreshStateByShardResponse is_partial_refresh. + * @member {boolean} is_partial_refresh + * @memberof vtctldata.RefreshStateByShardResponse * @instance */ - TopologyCell.prototype.data = ""; + RefreshStateByShardResponse.prototype.is_partial_refresh = false; /** - * TopologyCell children. - * @member {Array.} children - * @memberof vtctldata.TopologyCell + * RefreshStateByShardResponse partial_refresh_details. + * @member {string} partial_refresh_details + * @memberof vtctldata.RefreshStateByShardResponse * @instance */ - TopologyCell.prototype.children = $util.emptyArray; + RefreshStateByShardResponse.prototype.partial_refresh_details = ""; /** - * Creates a new TopologyCell instance using the specified properties. + * Creates a new RefreshStateByShardResponse instance using the specified properties. * @function create - * @memberof vtctldata.TopologyCell + * @memberof vtctldata.RefreshStateByShardResponse * @static - * @param {vtctldata.ITopologyCell=} [properties] Properties to set - * @returns {vtctldata.TopologyCell} TopologyCell instance + * @param {vtctldata.IRefreshStateByShardResponse=} [properties] Properties to set + * @returns {vtctldata.RefreshStateByShardResponse} RefreshStateByShardResponse instance */ - TopologyCell.create = function create(properties) { - return new TopologyCell(properties); + RefreshStateByShardResponse.create = function create(properties) { + return new RefreshStateByShardResponse(properties); }; /** - * Encodes the specified TopologyCell message. Does not implicitly {@link vtctldata.TopologyCell.verify|verify} messages. + * Encodes the specified RefreshStateByShardResponse message. Does not implicitly {@link vtctldata.RefreshStateByShardResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.TopologyCell + * @memberof vtctldata.RefreshStateByShardResponse * @static - * @param {vtctldata.ITopologyCell} message TopologyCell message or plain object to encode + * @param {vtctldata.IRefreshStateByShardResponse} message RefreshStateByShardResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - TopologyCell.encode = function encode(message, writer) { + RefreshStateByShardResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.name != null && Object.hasOwnProperty.call(message, "name")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); - if (message.path != null && Object.hasOwnProperty.call(message, "path")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.path); - if (message.data != null && Object.hasOwnProperty.call(message, "data")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.data); - if (message.children != null && message.children.length) - for (let i = 0; i < message.children.length; ++i) - writer.uint32(/* id 4, wireType 2 =*/34).string(message.children[i]); + if (message.is_partial_refresh != null && Object.hasOwnProperty.call(message, "is_partial_refresh")) + writer.uint32(/* id 1, wireType 0 =*/8).bool(message.is_partial_refresh); + if (message.partial_refresh_details != null && Object.hasOwnProperty.call(message, "partial_refresh_details")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.partial_refresh_details); return writer; }; /** - * Encodes the specified TopologyCell message, length delimited. Does not implicitly {@link vtctldata.TopologyCell.verify|verify} messages. + * Encodes the specified RefreshStateByShardResponse message, length delimited. Does not implicitly {@link vtctldata.RefreshStateByShardResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.TopologyCell + * @memberof vtctldata.RefreshStateByShardResponse * @static - * @param {vtctldata.ITopologyCell} message TopologyCell message or plain object to encode + * @param {vtctldata.IRefreshStateByShardResponse} message RefreshStateByShardResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - TopologyCell.encodeDelimited = function encodeDelimited(message, writer) { + RefreshStateByShardResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a TopologyCell message from the specified reader or buffer. + * Decodes a RefreshStateByShardResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.TopologyCell + * @memberof vtctldata.RefreshStateByShardResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.TopologyCell} TopologyCell + * @returns {vtctldata.RefreshStateByShardResponse} RefreshStateByShardResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - TopologyCell.decode = function decode(reader, length) { + RefreshStateByShardResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.TopologyCell(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RefreshStateByShardResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.name = reader.string(); + message.is_partial_refresh = reader.bool(); break; } case 2: { - message.path = reader.string(); - break; - } - case 3: { - message.data = reader.string(); - break; - } - case 4: { - if (!(message.children && message.children.length)) - message.children = []; - message.children.push(reader.string()); + message.partial_refresh_details = reader.string(); break; } default: @@ -121132,160 +142914,131 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a TopologyCell message from the specified reader or buffer, length delimited. + * Decodes a RefreshStateByShardResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.TopologyCell + * @memberof vtctldata.RefreshStateByShardResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.TopologyCell} TopologyCell + * @returns {vtctldata.RefreshStateByShardResponse} RefreshStateByShardResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - TopologyCell.decodeDelimited = function decodeDelimited(reader) { + RefreshStateByShardResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a TopologyCell message. + * Verifies a RefreshStateByShardResponse message. * @function verify - * @memberof vtctldata.TopologyCell + * @memberof vtctldata.RefreshStateByShardResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - TopologyCell.verify = function verify(message) { + RefreshStateByShardResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.name != null && message.hasOwnProperty("name")) - if (!$util.isString(message.name)) - return "name: string expected"; - if (message.path != null && message.hasOwnProperty("path")) - if (!$util.isString(message.path)) - return "path: string expected"; - if (message.data != null && message.hasOwnProperty("data")) - if (!$util.isString(message.data)) - return "data: string expected"; - if (message.children != null && message.hasOwnProperty("children")) { - if (!Array.isArray(message.children)) - return "children: array expected"; - for (let i = 0; i < message.children.length; ++i) - if (!$util.isString(message.children[i])) - return "children: string[] expected"; - } + if (message.is_partial_refresh != null && message.hasOwnProperty("is_partial_refresh")) + if (typeof message.is_partial_refresh !== "boolean") + return "is_partial_refresh: boolean expected"; + if (message.partial_refresh_details != null && message.hasOwnProperty("partial_refresh_details")) + if (!$util.isString(message.partial_refresh_details)) + return "partial_refresh_details: string expected"; return null; }; /** - * Creates a TopologyCell message from a plain object. Also converts values to their respective internal types. + * Creates a RefreshStateByShardResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.TopologyCell + * @memberof vtctldata.RefreshStateByShardResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.TopologyCell} TopologyCell + * @returns {vtctldata.RefreshStateByShardResponse} RefreshStateByShardResponse */ - TopologyCell.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.TopologyCell) + RefreshStateByShardResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.RefreshStateByShardResponse) return object; - let message = new $root.vtctldata.TopologyCell(); - if (object.name != null) - message.name = String(object.name); - if (object.path != null) - message.path = String(object.path); - if (object.data != null) - message.data = String(object.data); - if (object.children) { - if (!Array.isArray(object.children)) - throw TypeError(".vtctldata.TopologyCell.children: array expected"); - message.children = []; - for (let i = 0; i < object.children.length; ++i) - message.children[i] = String(object.children[i]); - } + let message = new $root.vtctldata.RefreshStateByShardResponse(); + if (object.is_partial_refresh != null) + message.is_partial_refresh = Boolean(object.is_partial_refresh); + if (object.partial_refresh_details != null) + message.partial_refresh_details = String(object.partial_refresh_details); return message; }; /** - * Creates a plain object from a TopologyCell message. Also converts values to other types if specified. + * Creates a plain object from a RefreshStateByShardResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.TopologyCell + * @memberof vtctldata.RefreshStateByShardResponse * @static - * @param {vtctldata.TopologyCell} message TopologyCell + * @param {vtctldata.RefreshStateByShardResponse} message RefreshStateByShardResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - TopologyCell.toObject = function toObject(message, options) { + RefreshStateByShardResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.children = []; if (options.defaults) { - object.name = ""; - object.path = ""; - object.data = ""; - } - if (message.name != null && message.hasOwnProperty("name")) - object.name = message.name; - if (message.path != null && message.hasOwnProperty("path")) - object.path = message.path; - if (message.data != null && message.hasOwnProperty("data")) - object.data = message.data; - if (message.children && message.children.length) { - object.children = []; - for (let j = 0; j < message.children.length; ++j) - object.children[j] = message.children[j]; + object.is_partial_refresh = false; + object.partial_refresh_details = ""; } + if (message.is_partial_refresh != null && message.hasOwnProperty("is_partial_refresh")) + object.is_partial_refresh = message.is_partial_refresh; + if (message.partial_refresh_details != null && message.hasOwnProperty("partial_refresh_details")) + object.partial_refresh_details = message.partial_refresh_details; return object; }; /** - * Converts this TopologyCell to JSON. + * Converts this RefreshStateByShardResponse to JSON. * @function toJSON - * @memberof vtctldata.TopologyCell + * @memberof vtctldata.RefreshStateByShardResponse * @instance * @returns {Object.} JSON object */ - TopologyCell.prototype.toJSON = function toJSON() { + RefreshStateByShardResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for TopologyCell + * Gets the default type url for RefreshStateByShardResponse * @function getTypeUrl - * @memberof vtctldata.TopologyCell + * @memberof vtctldata.RefreshStateByShardResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - TopologyCell.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + RefreshStateByShardResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.TopologyCell"; + return typeUrlPrefix + "/vtctldata.RefreshStateByShardResponse"; }; - return TopologyCell; + return RefreshStateByShardResponse; })(); - vtctldata.GetVSchemaRequest = (function() { + vtctldata.ReloadSchemaRequest = (function() { /** - * Properties of a GetVSchemaRequest. + * Properties of a ReloadSchemaRequest. * @memberof vtctldata - * @interface IGetVSchemaRequest - * @property {string|null} [keyspace] GetVSchemaRequest keyspace + * @interface IReloadSchemaRequest + * @property {topodata.ITabletAlias|null} [tablet_alias] ReloadSchemaRequest tablet_alias */ /** - * Constructs a new GetVSchemaRequest. + * Constructs a new ReloadSchemaRequest. * @memberof vtctldata - * @classdesc Represents a GetVSchemaRequest. - * @implements IGetVSchemaRequest + * @classdesc Represents a ReloadSchemaRequest. + * @implements IReloadSchemaRequest * @constructor - * @param {vtctldata.IGetVSchemaRequest=} [properties] Properties to set + * @param {vtctldata.IReloadSchemaRequest=} [properties] Properties to set */ - function GetVSchemaRequest(properties) { + function ReloadSchemaRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -121293,75 +143046,75 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * GetVSchemaRequest keyspace. - * @member {string} keyspace - * @memberof vtctldata.GetVSchemaRequest + * ReloadSchemaRequest tablet_alias. + * @member {topodata.ITabletAlias|null|undefined} tablet_alias + * @memberof vtctldata.ReloadSchemaRequest * @instance */ - GetVSchemaRequest.prototype.keyspace = ""; + ReloadSchemaRequest.prototype.tablet_alias = null; /** - * Creates a new GetVSchemaRequest instance using the specified properties. + * Creates a new ReloadSchemaRequest instance using the specified properties. * @function create - * @memberof vtctldata.GetVSchemaRequest + * @memberof vtctldata.ReloadSchemaRequest * @static - * @param {vtctldata.IGetVSchemaRequest=} [properties] Properties to set - * @returns {vtctldata.GetVSchemaRequest} GetVSchemaRequest instance + * @param {vtctldata.IReloadSchemaRequest=} [properties] Properties to set + * @returns {vtctldata.ReloadSchemaRequest} ReloadSchemaRequest instance */ - GetVSchemaRequest.create = function create(properties) { - return new GetVSchemaRequest(properties); + ReloadSchemaRequest.create = function create(properties) { + return new ReloadSchemaRequest(properties); }; /** - * Encodes the specified GetVSchemaRequest message. Does not implicitly {@link vtctldata.GetVSchemaRequest.verify|verify} messages. + * Encodes the specified ReloadSchemaRequest message. Does not implicitly {@link vtctldata.ReloadSchemaRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.GetVSchemaRequest + * @memberof vtctldata.ReloadSchemaRequest * @static - * @param {vtctldata.IGetVSchemaRequest} message GetVSchemaRequest message or plain object to encode + * @param {vtctldata.IReloadSchemaRequest} message ReloadSchemaRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetVSchemaRequest.encode = function encode(message, writer) { + ReloadSchemaRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) + $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified GetVSchemaRequest message, length delimited. Does not implicitly {@link vtctldata.GetVSchemaRequest.verify|verify} messages. + * Encodes the specified ReloadSchemaRequest message, length delimited. Does not implicitly {@link vtctldata.ReloadSchemaRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetVSchemaRequest + * @memberof vtctldata.ReloadSchemaRequest * @static - * @param {vtctldata.IGetVSchemaRequest} message GetVSchemaRequest message or plain object to encode + * @param {vtctldata.IReloadSchemaRequest} message ReloadSchemaRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetVSchemaRequest.encodeDelimited = function encodeDelimited(message, writer) { + ReloadSchemaRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetVSchemaRequest message from the specified reader or buffer. + * Decodes a ReloadSchemaRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetVSchemaRequest + * @memberof vtctldata.ReloadSchemaRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetVSchemaRequest} GetVSchemaRequest + * @returns {vtctldata.ReloadSchemaRequest} ReloadSchemaRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetVSchemaRequest.decode = function decode(reader, length) { + ReloadSchemaRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetVSchemaRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ReloadSchemaRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.keyspace = reader.string(); + message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); break; } default: @@ -121373,122 +143126,126 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetVSchemaRequest message from the specified reader or buffer, length delimited. + * Decodes a ReloadSchemaRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetVSchemaRequest + * @memberof vtctldata.ReloadSchemaRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetVSchemaRequest} GetVSchemaRequest + * @returns {vtctldata.ReloadSchemaRequest} ReloadSchemaRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetVSchemaRequest.decodeDelimited = function decodeDelimited(reader) { + ReloadSchemaRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetVSchemaRequest message. + * Verifies a ReloadSchemaRequest message. * @function verify - * @memberof vtctldata.GetVSchemaRequest + * @memberof vtctldata.ReloadSchemaRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetVSchemaRequest.verify = function verify(message) { + ReloadSchemaRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { + let error = $root.topodata.TabletAlias.verify(message.tablet_alias); + if (error) + return "tablet_alias." + error; + } return null; }; /** - * Creates a GetVSchemaRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ReloadSchemaRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetVSchemaRequest + * @memberof vtctldata.ReloadSchemaRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetVSchemaRequest} GetVSchemaRequest + * @returns {vtctldata.ReloadSchemaRequest} ReloadSchemaRequest */ - GetVSchemaRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetVSchemaRequest) + ReloadSchemaRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ReloadSchemaRequest) return object; - let message = new $root.vtctldata.GetVSchemaRequest(); - if (object.keyspace != null) - message.keyspace = String(object.keyspace); + let message = new $root.vtctldata.ReloadSchemaRequest(); + if (object.tablet_alias != null) { + if (typeof object.tablet_alias !== "object") + throw TypeError(".vtctldata.ReloadSchemaRequest.tablet_alias: object expected"); + message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); + } return message; }; /** - * Creates a plain object from a GetVSchemaRequest message. Also converts values to other types if specified. + * Creates a plain object from a ReloadSchemaRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetVSchemaRequest + * @memberof vtctldata.ReloadSchemaRequest * @static - * @param {vtctldata.GetVSchemaRequest} message GetVSchemaRequest + * @param {vtctldata.ReloadSchemaRequest} message ReloadSchemaRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetVSchemaRequest.toObject = function toObject(message, options) { + ReloadSchemaRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) - object.keyspace = ""; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; + object.tablet_alias = null; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) + object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); return object; }; /** - * Converts this GetVSchemaRequest to JSON. + * Converts this ReloadSchemaRequest to JSON. * @function toJSON - * @memberof vtctldata.GetVSchemaRequest + * @memberof vtctldata.ReloadSchemaRequest * @instance * @returns {Object.} JSON object */ - GetVSchemaRequest.prototype.toJSON = function toJSON() { + ReloadSchemaRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetVSchemaRequest + * Gets the default type url for ReloadSchemaRequest * @function getTypeUrl - * @memberof vtctldata.GetVSchemaRequest + * @memberof vtctldata.ReloadSchemaRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetVSchemaRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ReloadSchemaRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetVSchemaRequest"; + return typeUrlPrefix + "/vtctldata.ReloadSchemaRequest"; }; - return GetVSchemaRequest; + return ReloadSchemaRequest; })(); - vtctldata.GetVersionRequest = (function() { + vtctldata.ReloadSchemaResponse = (function() { /** - * Properties of a GetVersionRequest. + * Properties of a ReloadSchemaResponse. * @memberof vtctldata - * @interface IGetVersionRequest - * @property {topodata.ITabletAlias|null} [tablet_alias] GetVersionRequest tablet_alias + * @interface IReloadSchemaResponse */ /** - * Constructs a new GetVersionRequest. + * Constructs a new ReloadSchemaResponse. * @memberof vtctldata - * @classdesc Represents a GetVersionRequest. - * @implements IGetVersionRequest + * @classdesc Represents a ReloadSchemaResponse. + * @implements IReloadSchemaResponse * @constructor - * @param {vtctldata.IGetVersionRequest=} [properties] Properties to set + * @param {vtctldata.IReloadSchemaResponse=} [properties] Properties to set */ - function GetVersionRequest(properties) { + function ReloadSchemaResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -121496,77 +143253,63 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * GetVersionRequest tablet_alias. - * @member {topodata.ITabletAlias|null|undefined} tablet_alias - * @memberof vtctldata.GetVersionRequest - * @instance - */ - GetVersionRequest.prototype.tablet_alias = null; - - /** - * Creates a new GetVersionRequest instance using the specified properties. + * Creates a new ReloadSchemaResponse instance using the specified properties. * @function create - * @memberof vtctldata.GetVersionRequest + * @memberof vtctldata.ReloadSchemaResponse * @static - * @param {vtctldata.IGetVersionRequest=} [properties] Properties to set - * @returns {vtctldata.GetVersionRequest} GetVersionRequest instance + * @param {vtctldata.IReloadSchemaResponse=} [properties] Properties to set + * @returns {vtctldata.ReloadSchemaResponse} ReloadSchemaResponse instance */ - GetVersionRequest.create = function create(properties) { - return new GetVersionRequest(properties); + ReloadSchemaResponse.create = function create(properties) { + return new ReloadSchemaResponse(properties); }; /** - * Encodes the specified GetVersionRequest message. Does not implicitly {@link vtctldata.GetVersionRequest.verify|verify} messages. + * Encodes the specified ReloadSchemaResponse message. Does not implicitly {@link vtctldata.ReloadSchemaResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.GetVersionRequest + * @memberof vtctldata.ReloadSchemaResponse * @static - * @param {vtctldata.IGetVersionRequest} message GetVersionRequest message or plain object to encode + * @param {vtctldata.IReloadSchemaResponse} message ReloadSchemaResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetVersionRequest.encode = function encode(message, writer) { + ReloadSchemaResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) - $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified GetVersionRequest message, length delimited. Does not implicitly {@link vtctldata.GetVersionRequest.verify|verify} messages. + * Encodes the specified ReloadSchemaResponse message, length delimited. Does not implicitly {@link vtctldata.ReloadSchemaResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetVersionRequest + * @memberof vtctldata.ReloadSchemaResponse * @static - * @param {vtctldata.IGetVersionRequest} message GetVersionRequest message or plain object to encode + * @param {vtctldata.IReloadSchemaResponse} message ReloadSchemaResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetVersionRequest.encodeDelimited = function encodeDelimited(message, writer) { + ReloadSchemaResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetVersionRequest message from the specified reader or buffer. + * Decodes a ReloadSchemaResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetVersionRequest + * @memberof vtctldata.ReloadSchemaResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetVersionRequest} GetVersionRequest + * @returns {vtctldata.ReloadSchemaResponse} ReloadSchemaResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetVersionRequest.decode = function decode(reader, length) { + ReloadSchemaResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetVersionRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ReloadSchemaResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); - break; - } default: reader.skipType(tag & 7); break; @@ -121576,127 +143319,112 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetVersionRequest message from the specified reader or buffer, length delimited. + * Decodes a ReloadSchemaResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetVersionRequest + * @memberof vtctldata.ReloadSchemaResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetVersionRequest} GetVersionRequest + * @returns {vtctldata.ReloadSchemaResponse} ReloadSchemaResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetVersionRequest.decodeDelimited = function decodeDelimited(reader) { + ReloadSchemaResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetVersionRequest message. + * Verifies a ReloadSchemaResponse message. * @function verify - * @memberof vtctldata.GetVersionRequest + * @memberof vtctldata.ReloadSchemaResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetVersionRequest.verify = function verify(message) { + ReloadSchemaResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { - let error = $root.topodata.TabletAlias.verify(message.tablet_alias); - if (error) - return "tablet_alias." + error; - } return null; }; /** - * Creates a GetVersionRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ReloadSchemaResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetVersionRequest + * @memberof vtctldata.ReloadSchemaResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetVersionRequest} GetVersionRequest + * @returns {vtctldata.ReloadSchemaResponse} ReloadSchemaResponse */ - GetVersionRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetVersionRequest) + ReloadSchemaResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ReloadSchemaResponse) return object; - let message = new $root.vtctldata.GetVersionRequest(); - if (object.tablet_alias != null) { - if (typeof object.tablet_alias !== "object") - throw TypeError(".vtctldata.GetVersionRequest.tablet_alias: object expected"); - message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); - } - return message; + return new $root.vtctldata.ReloadSchemaResponse(); }; /** - * Creates a plain object from a GetVersionRequest message. Also converts values to other types if specified. + * Creates a plain object from a ReloadSchemaResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetVersionRequest + * @memberof vtctldata.ReloadSchemaResponse * @static - * @param {vtctldata.GetVersionRequest} message GetVersionRequest + * @param {vtctldata.ReloadSchemaResponse} message ReloadSchemaResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetVersionRequest.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) - object.tablet_alias = null; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) - object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); - return object; + ReloadSchemaResponse.toObject = function toObject() { + return {}; }; /** - * Converts this GetVersionRequest to JSON. + * Converts this ReloadSchemaResponse to JSON. * @function toJSON - * @memberof vtctldata.GetVersionRequest + * @memberof vtctldata.ReloadSchemaResponse * @instance * @returns {Object.} JSON object */ - GetVersionRequest.prototype.toJSON = function toJSON() { + ReloadSchemaResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetVersionRequest + * Gets the default type url for ReloadSchemaResponse * @function getTypeUrl - * @memberof vtctldata.GetVersionRequest + * @memberof vtctldata.ReloadSchemaResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetVersionRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ReloadSchemaResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetVersionRequest"; + return typeUrlPrefix + "/vtctldata.ReloadSchemaResponse"; }; - return GetVersionRequest; + return ReloadSchemaResponse; })(); - vtctldata.GetVersionResponse = (function() { + vtctldata.ReloadSchemaKeyspaceRequest = (function() { /** - * Properties of a GetVersionResponse. + * Properties of a ReloadSchemaKeyspaceRequest. * @memberof vtctldata - * @interface IGetVersionResponse - * @property {string|null} [version] GetVersionResponse version + * @interface IReloadSchemaKeyspaceRequest + * @property {string|null} [keyspace] ReloadSchemaKeyspaceRequest keyspace + * @property {string|null} [wait_position] ReloadSchemaKeyspaceRequest wait_position + * @property {boolean|null} [include_primary] ReloadSchemaKeyspaceRequest include_primary + * @property {number|null} [concurrency] ReloadSchemaKeyspaceRequest concurrency */ /** - * Constructs a new GetVersionResponse. + * Constructs a new ReloadSchemaKeyspaceRequest. * @memberof vtctldata - * @classdesc Represents a GetVersionResponse. - * @implements IGetVersionResponse + * @classdesc Represents a ReloadSchemaKeyspaceRequest. + * @implements IReloadSchemaKeyspaceRequest * @constructor - * @param {vtctldata.IGetVersionResponse=} [properties] Properties to set + * @param {vtctldata.IReloadSchemaKeyspaceRequest=} [properties] Properties to set */ - function GetVersionResponse(properties) { + function ReloadSchemaKeyspaceRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -121704,75 +143432,117 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * GetVersionResponse version. - * @member {string} version - * @memberof vtctldata.GetVersionResponse + * ReloadSchemaKeyspaceRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.ReloadSchemaKeyspaceRequest * @instance */ - GetVersionResponse.prototype.version = ""; + ReloadSchemaKeyspaceRequest.prototype.keyspace = ""; /** - * Creates a new GetVersionResponse instance using the specified properties. + * ReloadSchemaKeyspaceRequest wait_position. + * @member {string} wait_position + * @memberof vtctldata.ReloadSchemaKeyspaceRequest + * @instance + */ + ReloadSchemaKeyspaceRequest.prototype.wait_position = ""; + + /** + * ReloadSchemaKeyspaceRequest include_primary. + * @member {boolean} include_primary + * @memberof vtctldata.ReloadSchemaKeyspaceRequest + * @instance + */ + ReloadSchemaKeyspaceRequest.prototype.include_primary = false; + + /** + * ReloadSchemaKeyspaceRequest concurrency. + * @member {number} concurrency + * @memberof vtctldata.ReloadSchemaKeyspaceRequest + * @instance + */ + ReloadSchemaKeyspaceRequest.prototype.concurrency = 0; + + /** + * Creates a new ReloadSchemaKeyspaceRequest instance using the specified properties. * @function create - * @memberof vtctldata.GetVersionResponse + * @memberof vtctldata.ReloadSchemaKeyspaceRequest * @static - * @param {vtctldata.IGetVersionResponse=} [properties] Properties to set - * @returns {vtctldata.GetVersionResponse} GetVersionResponse instance + * @param {vtctldata.IReloadSchemaKeyspaceRequest=} [properties] Properties to set + * @returns {vtctldata.ReloadSchemaKeyspaceRequest} ReloadSchemaKeyspaceRequest instance */ - GetVersionResponse.create = function create(properties) { - return new GetVersionResponse(properties); + ReloadSchemaKeyspaceRequest.create = function create(properties) { + return new ReloadSchemaKeyspaceRequest(properties); }; /** - * Encodes the specified GetVersionResponse message. Does not implicitly {@link vtctldata.GetVersionResponse.verify|verify} messages. + * Encodes the specified ReloadSchemaKeyspaceRequest message. Does not implicitly {@link vtctldata.ReloadSchemaKeyspaceRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.GetVersionResponse + * @memberof vtctldata.ReloadSchemaKeyspaceRequest * @static - * @param {vtctldata.IGetVersionResponse} message GetVersionResponse message or plain object to encode + * @param {vtctldata.IReloadSchemaKeyspaceRequest} message ReloadSchemaKeyspaceRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetVersionResponse.encode = function encode(message, writer) { + ReloadSchemaKeyspaceRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.version != null && Object.hasOwnProperty.call(message, "version")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.version); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.wait_position != null && Object.hasOwnProperty.call(message, "wait_position")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.wait_position); + if (message.include_primary != null && Object.hasOwnProperty.call(message, "include_primary")) + writer.uint32(/* id 3, wireType 0 =*/24).bool(message.include_primary); + if (message.concurrency != null && Object.hasOwnProperty.call(message, "concurrency")) + writer.uint32(/* id 4, wireType 0 =*/32).uint32(message.concurrency); return writer; }; /** - * Encodes the specified GetVersionResponse message, length delimited. Does not implicitly {@link vtctldata.GetVersionResponse.verify|verify} messages. + * Encodes the specified ReloadSchemaKeyspaceRequest message, length delimited. Does not implicitly {@link vtctldata.ReloadSchemaKeyspaceRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetVersionResponse + * @memberof vtctldata.ReloadSchemaKeyspaceRequest * @static - * @param {vtctldata.IGetVersionResponse} message GetVersionResponse message or plain object to encode + * @param {vtctldata.IReloadSchemaKeyspaceRequest} message ReloadSchemaKeyspaceRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetVersionResponse.encodeDelimited = function encodeDelimited(message, writer) { + ReloadSchemaKeyspaceRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetVersionResponse message from the specified reader or buffer. + * Decodes a ReloadSchemaKeyspaceRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetVersionResponse + * @memberof vtctldata.ReloadSchemaKeyspaceRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetVersionResponse} GetVersionResponse + * @returns {vtctldata.ReloadSchemaKeyspaceRequest} ReloadSchemaKeyspaceRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetVersionResponse.decode = function decode(reader, length) { + ReloadSchemaKeyspaceRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetVersionResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ReloadSchemaKeyspaceRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.version = reader.string(); + message.keyspace = reader.string(); + break; + } + case 2: { + message.wait_position = reader.string(); + break; + } + case 3: { + message.include_primary = reader.bool(); + break; + } + case 4: { + message.concurrency = reader.uint32(); break; } default: @@ -121784,122 +143554,148 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetVersionResponse message from the specified reader or buffer, length delimited. + * Decodes a ReloadSchemaKeyspaceRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetVersionResponse + * @memberof vtctldata.ReloadSchemaKeyspaceRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetVersionResponse} GetVersionResponse + * @returns {vtctldata.ReloadSchemaKeyspaceRequest} ReloadSchemaKeyspaceRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetVersionResponse.decodeDelimited = function decodeDelimited(reader) { + ReloadSchemaKeyspaceRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetVersionResponse message. + * Verifies a ReloadSchemaKeyspaceRequest message. * @function verify - * @memberof vtctldata.GetVersionResponse + * @memberof vtctldata.ReloadSchemaKeyspaceRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetVersionResponse.verify = function verify(message) { + ReloadSchemaKeyspaceRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.version != null && message.hasOwnProperty("version")) - if (!$util.isString(message.version)) - return "version: string expected"; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.wait_position != null && message.hasOwnProperty("wait_position")) + if (!$util.isString(message.wait_position)) + return "wait_position: string expected"; + if (message.include_primary != null && message.hasOwnProperty("include_primary")) + if (typeof message.include_primary !== "boolean") + return "include_primary: boolean expected"; + if (message.concurrency != null && message.hasOwnProperty("concurrency")) + if (!$util.isInteger(message.concurrency)) + return "concurrency: integer expected"; return null; }; /** - * Creates a GetVersionResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ReloadSchemaKeyspaceRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetVersionResponse + * @memberof vtctldata.ReloadSchemaKeyspaceRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetVersionResponse} GetVersionResponse + * @returns {vtctldata.ReloadSchemaKeyspaceRequest} ReloadSchemaKeyspaceRequest */ - GetVersionResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetVersionResponse) + ReloadSchemaKeyspaceRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ReloadSchemaKeyspaceRequest) return object; - let message = new $root.vtctldata.GetVersionResponse(); - if (object.version != null) - message.version = String(object.version); + let message = new $root.vtctldata.ReloadSchemaKeyspaceRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.wait_position != null) + message.wait_position = String(object.wait_position); + if (object.include_primary != null) + message.include_primary = Boolean(object.include_primary); + if (object.concurrency != null) + message.concurrency = object.concurrency >>> 0; return message; }; /** - * Creates a plain object from a GetVersionResponse message. Also converts values to other types if specified. + * Creates a plain object from a ReloadSchemaKeyspaceRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetVersionResponse + * @memberof vtctldata.ReloadSchemaKeyspaceRequest * @static - * @param {vtctldata.GetVersionResponse} message GetVersionResponse + * @param {vtctldata.ReloadSchemaKeyspaceRequest} message ReloadSchemaKeyspaceRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetVersionResponse.toObject = function toObject(message, options) { + ReloadSchemaKeyspaceRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) - object.version = ""; - if (message.version != null && message.hasOwnProperty("version")) - object.version = message.version; + if (options.defaults) { + object.keyspace = ""; + object.wait_position = ""; + object.include_primary = false; + object.concurrency = 0; + } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.wait_position != null && message.hasOwnProperty("wait_position")) + object.wait_position = message.wait_position; + if (message.include_primary != null && message.hasOwnProperty("include_primary")) + object.include_primary = message.include_primary; + if (message.concurrency != null && message.hasOwnProperty("concurrency")) + object.concurrency = message.concurrency; return object; }; /** - * Converts this GetVersionResponse to JSON. + * Converts this ReloadSchemaKeyspaceRequest to JSON. * @function toJSON - * @memberof vtctldata.GetVersionResponse + * @memberof vtctldata.ReloadSchemaKeyspaceRequest * @instance * @returns {Object.} JSON object */ - GetVersionResponse.prototype.toJSON = function toJSON() { + ReloadSchemaKeyspaceRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetVersionResponse + * Gets the default type url for ReloadSchemaKeyspaceRequest * @function getTypeUrl - * @memberof vtctldata.GetVersionResponse + * @memberof vtctldata.ReloadSchemaKeyspaceRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetVersionResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ReloadSchemaKeyspaceRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetVersionResponse"; + return typeUrlPrefix + "/vtctldata.ReloadSchemaKeyspaceRequest"; }; - return GetVersionResponse; + return ReloadSchemaKeyspaceRequest; })(); - vtctldata.GetVSchemaResponse = (function() { + vtctldata.ReloadSchemaKeyspaceResponse = (function() { /** - * Properties of a GetVSchemaResponse. + * Properties of a ReloadSchemaKeyspaceResponse. * @memberof vtctldata - * @interface IGetVSchemaResponse - * @property {vschema.IKeyspace|null} [v_schema] GetVSchemaResponse v_schema + * @interface IReloadSchemaKeyspaceResponse + * @property {Array.|null} [events] ReloadSchemaKeyspaceResponse events */ /** - * Constructs a new GetVSchemaResponse. + * Constructs a new ReloadSchemaKeyspaceResponse. * @memberof vtctldata - * @classdesc Represents a GetVSchemaResponse. - * @implements IGetVSchemaResponse + * @classdesc Represents a ReloadSchemaKeyspaceResponse. + * @implements IReloadSchemaKeyspaceResponse * @constructor - * @param {vtctldata.IGetVSchemaResponse=} [properties] Properties to set + * @param {vtctldata.IReloadSchemaKeyspaceResponse=} [properties] Properties to set */ - function GetVSchemaResponse(properties) { + function ReloadSchemaKeyspaceResponse(properties) { + this.events = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -121907,75 +143703,78 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * GetVSchemaResponse v_schema. - * @member {vschema.IKeyspace|null|undefined} v_schema - * @memberof vtctldata.GetVSchemaResponse + * ReloadSchemaKeyspaceResponse events. + * @member {Array.} events + * @memberof vtctldata.ReloadSchemaKeyspaceResponse * @instance */ - GetVSchemaResponse.prototype.v_schema = null; + ReloadSchemaKeyspaceResponse.prototype.events = $util.emptyArray; /** - * Creates a new GetVSchemaResponse instance using the specified properties. + * Creates a new ReloadSchemaKeyspaceResponse instance using the specified properties. * @function create - * @memberof vtctldata.GetVSchemaResponse + * @memberof vtctldata.ReloadSchemaKeyspaceResponse * @static - * @param {vtctldata.IGetVSchemaResponse=} [properties] Properties to set - * @returns {vtctldata.GetVSchemaResponse} GetVSchemaResponse instance + * @param {vtctldata.IReloadSchemaKeyspaceResponse=} [properties] Properties to set + * @returns {vtctldata.ReloadSchemaKeyspaceResponse} ReloadSchemaKeyspaceResponse instance */ - GetVSchemaResponse.create = function create(properties) { - return new GetVSchemaResponse(properties); + ReloadSchemaKeyspaceResponse.create = function create(properties) { + return new ReloadSchemaKeyspaceResponse(properties); }; /** - * Encodes the specified GetVSchemaResponse message. Does not implicitly {@link vtctldata.GetVSchemaResponse.verify|verify} messages. + * Encodes the specified ReloadSchemaKeyspaceResponse message. Does not implicitly {@link vtctldata.ReloadSchemaKeyspaceResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.GetVSchemaResponse + * @memberof vtctldata.ReloadSchemaKeyspaceResponse * @static - * @param {vtctldata.IGetVSchemaResponse} message GetVSchemaResponse message or plain object to encode + * @param {vtctldata.IReloadSchemaKeyspaceResponse} message ReloadSchemaKeyspaceResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetVSchemaResponse.encode = function encode(message, writer) { + ReloadSchemaKeyspaceResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.v_schema != null && Object.hasOwnProperty.call(message, "v_schema")) - $root.vschema.Keyspace.encode(message.v_schema, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.events != null && message.events.length) + for (let i = 0; i < message.events.length; ++i) + $root.logutil.Event.encode(message.events[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified GetVSchemaResponse message, length delimited. Does not implicitly {@link vtctldata.GetVSchemaResponse.verify|verify} messages. + * Encodes the specified ReloadSchemaKeyspaceResponse message, length delimited. Does not implicitly {@link vtctldata.ReloadSchemaKeyspaceResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetVSchemaResponse + * @memberof vtctldata.ReloadSchemaKeyspaceResponse * @static - * @param {vtctldata.IGetVSchemaResponse} message GetVSchemaResponse message or plain object to encode + * @param {vtctldata.IReloadSchemaKeyspaceResponse} message ReloadSchemaKeyspaceResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetVSchemaResponse.encodeDelimited = function encodeDelimited(message, writer) { + ReloadSchemaKeyspaceResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetVSchemaResponse message from the specified reader or buffer. + * Decodes a ReloadSchemaKeyspaceResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetVSchemaResponse + * @memberof vtctldata.ReloadSchemaKeyspaceResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetVSchemaResponse} GetVSchemaResponse + * @returns {vtctldata.ReloadSchemaKeyspaceResponse} ReloadSchemaKeyspaceResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetVSchemaResponse.decode = function decode(reader, length) { + ReloadSchemaKeyspaceResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetVSchemaResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ReloadSchemaKeyspaceResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.v_schema = $root.vschema.Keyspace.decode(reader, reader.uint32()); + if (!(message.events && message.events.length)) + message.events = []; + message.events.push($root.logutil.Event.decode(reader, reader.uint32())); break; } default: @@ -121987,129 +143786,143 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetVSchemaResponse message from the specified reader or buffer, length delimited. + * Decodes a ReloadSchemaKeyspaceResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetVSchemaResponse + * @memberof vtctldata.ReloadSchemaKeyspaceResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetVSchemaResponse} GetVSchemaResponse + * @returns {vtctldata.ReloadSchemaKeyspaceResponse} ReloadSchemaKeyspaceResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetVSchemaResponse.decodeDelimited = function decodeDelimited(reader) { + ReloadSchemaKeyspaceResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetVSchemaResponse message. + * Verifies a ReloadSchemaKeyspaceResponse message. * @function verify - * @memberof vtctldata.GetVSchemaResponse + * @memberof vtctldata.ReloadSchemaKeyspaceResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetVSchemaResponse.verify = function verify(message) { + ReloadSchemaKeyspaceResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.v_schema != null && message.hasOwnProperty("v_schema")) { - let error = $root.vschema.Keyspace.verify(message.v_schema); - if (error) - return "v_schema." + error; + if (message.events != null && message.hasOwnProperty("events")) { + if (!Array.isArray(message.events)) + return "events: array expected"; + for (let i = 0; i < message.events.length; ++i) { + let error = $root.logutil.Event.verify(message.events[i]); + if (error) + return "events." + error; + } } return null; }; /** - * Creates a GetVSchemaResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ReloadSchemaKeyspaceResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetVSchemaResponse + * @memberof vtctldata.ReloadSchemaKeyspaceResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetVSchemaResponse} GetVSchemaResponse + * @returns {vtctldata.ReloadSchemaKeyspaceResponse} ReloadSchemaKeyspaceResponse */ - GetVSchemaResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetVSchemaResponse) + ReloadSchemaKeyspaceResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ReloadSchemaKeyspaceResponse) return object; - let message = new $root.vtctldata.GetVSchemaResponse(); - if (object.v_schema != null) { - if (typeof object.v_schema !== "object") - throw TypeError(".vtctldata.GetVSchemaResponse.v_schema: object expected"); - message.v_schema = $root.vschema.Keyspace.fromObject(object.v_schema); + let message = new $root.vtctldata.ReloadSchemaKeyspaceResponse(); + if (object.events) { + if (!Array.isArray(object.events)) + throw TypeError(".vtctldata.ReloadSchemaKeyspaceResponse.events: array expected"); + message.events = []; + for (let i = 0; i < object.events.length; ++i) { + if (typeof object.events[i] !== "object") + throw TypeError(".vtctldata.ReloadSchemaKeyspaceResponse.events: object expected"); + message.events[i] = $root.logutil.Event.fromObject(object.events[i]); + } } return message; }; /** - * Creates a plain object from a GetVSchemaResponse message. Also converts values to other types if specified. + * Creates a plain object from a ReloadSchemaKeyspaceResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetVSchemaResponse + * @memberof vtctldata.ReloadSchemaKeyspaceResponse * @static - * @param {vtctldata.GetVSchemaResponse} message GetVSchemaResponse + * @param {vtctldata.ReloadSchemaKeyspaceResponse} message ReloadSchemaKeyspaceResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetVSchemaResponse.toObject = function toObject(message, options) { + ReloadSchemaKeyspaceResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) - object.v_schema = null; - if (message.v_schema != null && message.hasOwnProperty("v_schema")) - object.v_schema = $root.vschema.Keyspace.toObject(message.v_schema, options); + if (options.arrays || options.defaults) + object.events = []; + if (message.events && message.events.length) { + object.events = []; + for (let j = 0; j < message.events.length; ++j) + object.events[j] = $root.logutil.Event.toObject(message.events[j], options); + } return object; }; /** - * Converts this GetVSchemaResponse to JSON. + * Converts this ReloadSchemaKeyspaceResponse to JSON. * @function toJSON - * @memberof vtctldata.GetVSchemaResponse + * @memberof vtctldata.ReloadSchemaKeyspaceResponse * @instance * @returns {Object.} JSON object */ - GetVSchemaResponse.prototype.toJSON = function toJSON() { + ReloadSchemaKeyspaceResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetVSchemaResponse + * Gets the default type url for ReloadSchemaKeyspaceResponse * @function getTypeUrl - * @memberof vtctldata.GetVSchemaResponse + * @memberof vtctldata.ReloadSchemaKeyspaceResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetVSchemaResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ReloadSchemaKeyspaceResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetVSchemaResponse"; + return typeUrlPrefix + "/vtctldata.ReloadSchemaKeyspaceResponse"; }; - return GetVSchemaResponse; + return ReloadSchemaKeyspaceResponse; })(); - vtctldata.GetWorkflowsRequest = (function() { + vtctldata.ReloadSchemaShardRequest = (function() { /** - * Properties of a GetWorkflowsRequest. + * Properties of a ReloadSchemaShardRequest. * @memberof vtctldata - * @interface IGetWorkflowsRequest - * @property {string|null} [keyspace] GetWorkflowsRequest keyspace - * @property {boolean|null} [active_only] GetWorkflowsRequest active_only - * @property {boolean|null} [name_only] GetWorkflowsRequest name_only + * @interface IReloadSchemaShardRequest + * @property {string|null} [keyspace] ReloadSchemaShardRequest keyspace + * @property {string|null} [shard] ReloadSchemaShardRequest shard + * @property {string|null} [wait_position] ReloadSchemaShardRequest wait_position + * @property {boolean|null} [include_primary] ReloadSchemaShardRequest include_primary + * @property {number|null} [concurrency] ReloadSchemaShardRequest concurrency */ /** - * Constructs a new GetWorkflowsRequest. + * Constructs a new ReloadSchemaShardRequest. * @memberof vtctldata - * @classdesc Represents a GetWorkflowsRequest. - * @implements IGetWorkflowsRequest + * @classdesc Represents a ReloadSchemaShardRequest. + * @implements IReloadSchemaShardRequest * @constructor - * @param {vtctldata.IGetWorkflowsRequest=} [properties] Properties to set + * @param {vtctldata.IReloadSchemaShardRequest=} [properties] Properties to set */ - function GetWorkflowsRequest(properties) { + function ReloadSchemaShardRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -122117,90 +143930,110 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * GetWorkflowsRequest keyspace. + * ReloadSchemaShardRequest keyspace. * @member {string} keyspace - * @memberof vtctldata.GetWorkflowsRequest + * @memberof vtctldata.ReloadSchemaShardRequest * @instance */ - GetWorkflowsRequest.prototype.keyspace = ""; + ReloadSchemaShardRequest.prototype.keyspace = ""; /** - * GetWorkflowsRequest active_only. - * @member {boolean} active_only - * @memberof vtctldata.GetWorkflowsRequest + * ReloadSchemaShardRequest shard. + * @member {string} shard + * @memberof vtctldata.ReloadSchemaShardRequest * @instance */ - GetWorkflowsRequest.prototype.active_only = false; + ReloadSchemaShardRequest.prototype.shard = ""; /** - * GetWorkflowsRequest name_only. - * @member {boolean} name_only - * @memberof vtctldata.GetWorkflowsRequest + * ReloadSchemaShardRequest wait_position. + * @member {string} wait_position + * @memberof vtctldata.ReloadSchemaShardRequest * @instance */ - GetWorkflowsRequest.prototype.name_only = false; + ReloadSchemaShardRequest.prototype.wait_position = ""; /** - * Creates a new GetWorkflowsRequest instance using the specified properties. + * ReloadSchemaShardRequest include_primary. + * @member {boolean} include_primary + * @memberof vtctldata.ReloadSchemaShardRequest + * @instance + */ + ReloadSchemaShardRequest.prototype.include_primary = false; + + /** + * ReloadSchemaShardRequest concurrency. + * @member {number} concurrency + * @memberof vtctldata.ReloadSchemaShardRequest + * @instance + */ + ReloadSchemaShardRequest.prototype.concurrency = 0; + + /** + * Creates a new ReloadSchemaShardRequest instance using the specified properties. * @function create - * @memberof vtctldata.GetWorkflowsRequest + * @memberof vtctldata.ReloadSchemaShardRequest * @static - * @param {vtctldata.IGetWorkflowsRequest=} [properties] Properties to set - * @returns {vtctldata.GetWorkflowsRequest} GetWorkflowsRequest instance + * @param {vtctldata.IReloadSchemaShardRequest=} [properties] Properties to set + * @returns {vtctldata.ReloadSchemaShardRequest} ReloadSchemaShardRequest instance */ - GetWorkflowsRequest.create = function create(properties) { - return new GetWorkflowsRequest(properties); + ReloadSchemaShardRequest.create = function create(properties) { + return new ReloadSchemaShardRequest(properties); }; /** - * Encodes the specified GetWorkflowsRequest message. Does not implicitly {@link vtctldata.GetWorkflowsRequest.verify|verify} messages. + * Encodes the specified ReloadSchemaShardRequest message. Does not implicitly {@link vtctldata.ReloadSchemaShardRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.GetWorkflowsRequest + * @memberof vtctldata.ReloadSchemaShardRequest * @static - * @param {vtctldata.IGetWorkflowsRequest} message GetWorkflowsRequest message or plain object to encode + * @param {vtctldata.IReloadSchemaShardRequest} message ReloadSchemaShardRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetWorkflowsRequest.encode = function encode(message, writer) { + ReloadSchemaShardRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.active_only != null && Object.hasOwnProperty.call(message, "active_only")) - writer.uint32(/* id 2, wireType 0 =*/16).bool(message.active_only); - if (message.name_only != null && Object.hasOwnProperty.call(message, "name_only")) - writer.uint32(/* id 3, wireType 0 =*/24).bool(message.name_only); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); + if (message.wait_position != null && Object.hasOwnProperty.call(message, "wait_position")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.wait_position); + if (message.include_primary != null && Object.hasOwnProperty.call(message, "include_primary")) + writer.uint32(/* id 4, wireType 0 =*/32).bool(message.include_primary); + if (message.concurrency != null && Object.hasOwnProperty.call(message, "concurrency")) + writer.uint32(/* id 5, wireType 0 =*/40).uint32(message.concurrency); return writer; }; /** - * Encodes the specified GetWorkflowsRequest message, length delimited. Does not implicitly {@link vtctldata.GetWorkflowsRequest.verify|verify} messages. + * Encodes the specified ReloadSchemaShardRequest message, length delimited. Does not implicitly {@link vtctldata.ReloadSchemaShardRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetWorkflowsRequest + * @memberof vtctldata.ReloadSchemaShardRequest * @static - * @param {vtctldata.IGetWorkflowsRequest} message GetWorkflowsRequest message or plain object to encode + * @param {vtctldata.IReloadSchemaShardRequest} message ReloadSchemaShardRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetWorkflowsRequest.encodeDelimited = function encodeDelimited(message, writer) { + ReloadSchemaShardRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetWorkflowsRequest message from the specified reader or buffer. + * Decodes a ReloadSchemaShardRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetWorkflowsRequest + * @memberof vtctldata.ReloadSchemaShardRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetWorkflowsRequest} GetWorkflowsRequest + * @returns {vtctldata.ReloadSchemaShardRequest} ReloadSchemaShardRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetWorkflowsRequest.decode = function decode(reader, length) { + ReloadSchemaShardRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetWorkflowsRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ReloadSchemaShardRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -122209,11 +144042,19 @@ export const vtctldata = $root.vtctldata = (() => { break; } case 2: { - message.active_only = reader.bool(); + message.shard = reader.string(); break; } case 3: { - message.name_only = reader.bool(); + message.wait_position = reader.string(); + break; + } + case 4: { + message.include_primary = reader.bool(); + break; + } + case 5: { + message.concurrency = reader.uint32(); break; } default: @@ -122225,140 +144066,156 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetWorkflowsRequest message from the specified reader or buffer, length delimited. + * Decodes a ReloadSchemaShardRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetWorkflowsRequest + * @memberof vtctldata.ReloadSchemaShardRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetWorkflowsRequest} GetWorkflowsRequest + * @returns {vtctldata.ReloadSchemaShardRequest} ReloadSchemaShardRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetWorkflowsRequest.decodeDelimited = function decodeDelimited(reader) { + ReloadSchemaShardRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetWorkflowsRequest message. + * Verifies a ReloadSchemaShardRequest message. * @function verify - * @memberof vtctldata.GetWorkflowsRequest + * @memberof vtctldata.ReloadSchemaShardRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetWorkflowsRequest.verify = function verify(message) { + ReloadSchemaShardRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.keyspace != null && message.hasOwnProperty("keyspace")) if (!$util.isString(message.keyspace)) return "keyspace: string expected"; - if (message.active_only != null && message.hasOwnProperty("active_only")) - if (typeof message.active_only !== "boolean") - return "active_only: boolean expected"; - if (message.name_only != null && message.hasOwnProperty("name_only")) - if (typeof message.name_only !== "boolean") - return "name_only: boolean expected"; + if (message.shard != null && message.hasOwnProperty("shard")) + if (!$util.isString(message.shard)) + return "shard: string expected"; + if (message.wait_position != null && message.hasOwnProperty("wait_position")) + if (!$util.isString(message.wait_position)) + return "wait_position: string expected"; + if (message.include_primary != null && message.hasOwnProperty("include_primary")) + if (typeof message.include_primary !== "boolean") + return "include_primary: boolean expected"; + if (message.concurrency != null && message.hasOwnProperty("concurrency")) + if (!$util.isInteger(message.concurrency)) + return "concurrency: integer expected"; return null; }; /** - * Creates a GetWorkflowsRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ReloadSchemaShardRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetWorkflowsRequest + * @memberof vtctldata.ReloadSchemaShardRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetWorkflowsRequest} GetWorkflowsRequest + * @returns {vtctldata.ReloadSchemaShardRequest} ReloadSchemaShardRequest */ - GetWorkflowsRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetWorkflowsRequest) + ReloadSchemaShardRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ReloadSchemaShardRequest) return object; - let message = new $root.vtctldata.GetWorkflowsRequest(); + let message = new $root.vtctldata.ReloadSchemaShardRequest(); if (object.keyspace != null) message.keyspace = String(object.keyspace); - if (object.active_only != null) - message.active_only = Boolean(object.active_only); - if (object.name_only != null) - message.name_only = Boolean(object.name_only); + if (object.shard != null) + message.shard = String(object.shard); + if (object.wait_position != null) + message.wait_position = String(object.wait_position); + if (object.include_primary != null) + message.include_primary = Boolean(object.include_primary); + if (object.concurrency != null) + message.concurrency = object.concurrency >>> 0; return message; }; /** - * Creates a plain object from a GetWorkflowsRequest message. Also converts values to other types if specified. + * Creates a plain object from a ReloadSchemaShardRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetWorkflowsRequest + * @memberof vtctldata.ReloadSchemaShardRequest * @static - * @param {vtctldata.GetWorkflowsRequest} message GetWorkflowsRequest + * @param {vtctldata.ReloadSchemaShardRequest} message ReloadSchemaShardRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetWorkflowsRequest.toObject = function toObject(message, options) { + ReloadSchemaShardRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) { object.keyspace = ""; - object.active_only = false; - object.name_only = false; + object.shard = ""; + object.wait_position = ""; + object.include_primary = false; + object.concurrency = 0; } if (message.keyspace != null && message.hasOwnProperty("keyspace")) object.keyspace = message.keyspace; - if (message.active_only != null && message.hasOwnProperty("active_only")) - object.active_only = message.active_only; - if (message.name_only != null && message.hasOwnProperty("name_only")) - object.name_only = message.name_only; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = message.shard; + if (message.wait_position != null && message.hasOwnProperty("wait_position")) + object.wait_position = message.wait_position; + if (message.include_primary != null && message.hasOwnProperty("include_primary")) + object.include_primary = message.include_primary; + if (message.concurrency != null && message.hasOwnProperty("concurrency")) + object.concurrency = message.concurrency; return object; }; /** - * Converts this GetWorkflowsRequest to JSON. + * Converts this ReloadSchemaShardRequest to JSON. * @function toJSON - * @memberof vtctldata.GetWorkflowsRequest + * @memberof vtctldata.ReloadSchemaShardRequest * @instance * @returns {Object.} JSON object */ - GetWorkflowsRequest.prototype.toJSON = function toJSON() { + ReloadSchemaShardRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetWorkflowsRequest + * Gets the default type url for ReloadSchemaShardRequest * @function getTypeUrl - * @memberof vtctldata.GetWorkflowsRequest + * @memberof vtctldata.ReloadSchemaShardRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetWorkflowsRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ReloadSchemaShardRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetWorkflowsRequest"; + return typeUrlPrefix + "/vtctldata.ReloadSchemaShardRequest"; }; - return GetWorkflowsRequest; + return ReloadSchemaShardRequest; })(); - vtctldata.GetWorkflowsResponse = (function() { + vtctldata.ReloadSchemaShardResponse = (function() { /** - * Properties of a GetWorkflowsResponse. + * Properties of a ReloadSchemaShardResponse. * @memberof vtctldata - * @interface IGetWorkflowsResponse - * @property {Array.|null} [workflows] GetWorkflowsResponse workflows + * @interface IReloadSchemaShardResponse + * @property {Array.|null} [events] ReloadSchemaShardResponse events */ /** - * Constructs a new GetWorkflowsResponse. + * Constructs a new ReloadSchemaShardResponse. * @memberof vtctldata - * @classdesc Represents a GetWorkflowsResponse. - * @implements IGetWorkflowsResponse + * @classdesc Represents a ReloadSchemaShardResponse. + * @implements IReloadSchemaShardResponse * @constructor - * @param {vtctldata.IGetWorkflowsResponse=} [properties] Properties to set + * @param {vtctldata.IReloadSchemaShardResponse=} [properties] Properties to set */ - function GetWorkflowsResponse(properties) { - this.workflows = []; + function ReloadSchemaShardResponse(properties) { + this.events = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -122366,78 +144223,78 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * GetWorkflowsResponse workflows. - * @member {Array.} workflows - * @memberof vtctldata.GetWorkflowsResponse + * ReloadSchemaShardResponse events. + * @member {Array.} events + * @memberof vtctldata.ReloadSchemaShardResponse * @instance */ - GetWorkflowsResponse.prototype.workflows = $util.emptyArray; + ReloadSchemaShardResponse.prototype.events = $util.emptyArray; /** - * Creates a new GetWorkflowsResponse instance using the specified properties. + * Creates a new ReloadSchemaShardResponse instance using the specified properties. * @function create - * @memberof vtctldata.GetWorkflowsResponse + * @memberof vtctldata.ReloadSchemaShardResponse * @static - * @param {vtctldata.IGetWorkflowsResponse=} [properties] Properties to set - * @returns {vtctldata.GetWorkflowsResponse} GetWorkflowsResponse instance + * @param {vtctldata.IReloadSchemaShardResponse=} [properties] Properties to set + * @returns {vtctldata.ReloadSchemaShardResponse} ReloadSchemaShardResponse instance */ - GetWorkflowsResponse.create = function create(properties) { - return new GetWorkflowsResponse(properties); + ReloadSchemaShardResponse.create = function create(properties) { + return new ReloadSchemaShardResponse(properties); }; /** - * Encodes the specified GetWorkflowsResponse message. Does not implicitly {@link vtctldata.GetWorkflowsResponse.verify|verify} messages. + * Encodes the specified ReloadSchemaShardResponse message. Does not implicitly {@link vtctldata.ReloadSchemaShardResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.GetWorkflowsResponse + * @memberof vtctldata.ReloadSchemaShardResponse * @static - * @param {vtctldata.IGetWorkflowsResponse} message GetWorkflowsResponse message or plain object to encode + * @param {vtctldata.IReloadSchemaShardResponse} message ReloadSchemaShardResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetWorkflowsResponse.encode = function encode(message, writer) { + ReloadSchemaShardResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.workflows != null && message.workflows.length) - for (let i = 0; i < message.workflows.length; ++i) - $root.vtctldata.Workflow.encode(message.workflows[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.events != null && message.events.length) + for (let i = 0; i < message.events.length; ++i) + $root.logutil.Event.encode(message.events[i], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); return writer; }; /** - * Encodes the specified GetWorkflowsResponse message, length delimited. Does not implicitly {@link vtctldata.GetWorkflowsResponse.verify|verify} messages. + * Encodes the specified ReloadSchemaShardResponse message, length delimited. Does not implicitly {@link vtctldata.ReloadSchemaShardResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.GetWorkflowsResponse + * @memberof vtctldata.ReloadSchemaShardResponse * @static - * @param {vtctldata.IGetWorkflowsResponse} message GetWorkflowsResponse message or plain object to encode + * @param {vtctldata.IReloadSchemaShardResponse} message ReloadSchemaShardResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - GetWorkflowsResponse.encodeDelimited = function encodeDelimited(message, writer) { + ReloadSchemaShardResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a GetWorkflowsResponse message from the specified reader or buffer. + * Decodes a ReloadSchemaShardResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.GetWorkflowsResponse + * @memberof vtctldata.ReloadSchemaShardResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.GetWorkflowsResponse} GetWorkflowsResponse + * @returns {vtctldata.ReloadSchemaShardResponse} ReloadSchemaShardResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetWorkflowsResponse.decode = function decode(reader, length) { + ReloadSchemaShardResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetWorkflowsResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ReloadSchemaShardResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - if (!(message.workflows && message.workflows.length)) - message.workflows = []; - message.workflows.push($root.vtctldata.Workflow.decode(reader, reader.uint32())); + case 2: { + if (!(message.events && message.events.length)) + message.events = []; + message.events.push($root.logutil.Event.decode(reader, reader.uint32())); break; } default: @@ -122449,143 +144306,141 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a GetWorkflowsResponse message from the specified reader or buffer, length delimited. + * Decodes a ReloadSchemaShardResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.GetWorkflowsResponse + * @memberof vtctldata.ReloadSchemaShardResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.GetWorkflowsResponse} GetWorkflowsResponse + * @returns {vtctldata.ReloadSchemaShardResponse} ReloadSchemaShardResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - GetWorkflowsResponse.decodeDelimited = function decodeDelimited(reader) { + ReloadSchemaShardResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a GetWorkflowsResponse message. + * Verifies a ReloadSchemaShardResponse message. * @function verify - * @memberof vtctldata.GetWorkflowsResponse + * @memberof vtctldata.ReloadSchemaShardResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - GetWorkflowsResponse.verify = function verify(message) { + ReloadSchemaShardResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.workflows != null && message.hasOwnProperty("workflows")) { - if (!Array.isArray(message.workflows)) - return "workflows: array expected"; - for (let i = 0; i < message.workflows.length; ++i) { - let error = $root.vtctldata.Workflow.verify(message.workflows[i]); + if (message.events != null && message.hasOwnProperty("events")) { + if (!Array.isArray(message.events)) + return "events: array expected"; + for (let i = 0; i < message.events.length; ++i) { + let error = $root.logutil.Event.verify(message.events[i]); if (error) - return "workflows." + error; + return "events." + error; } } return null; }; /** - * Creates a GetWorkflowsResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ReloadSchemaShardResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.GetWorkflowsResponse + * @memberof vtctldata.ReloadSchemaShardResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.GetWorkflowsResponse} GetWorkflowsResponse + * @returns {vtctldata.ReloadSchemaShardResponse} ReloadSchemaShardResponse */ - GetWorkflowsResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.GetWorkflowsResponse) + ReloadSchemaShardResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ReloadSchemaShardResponse) return object; - let message = new $root.vtctldata.GetWorkflowsResponse(); - if (object.workflows) { - if (!Array.isArray(object.workflows)) - throw TypeError(".vtctldata.GetWorkflowsResponse.workflows: array expected"); - message.workflows = []; - for (let i = 0; i < object.workflows.length; ++i) { - if (typeof object.workflows[i] !== "object") - throw TypeError(".vtctldata.GetWorkflowsResponse.workflows: object expected"); - message.workflows[i] = $root.vtctldata.Workflow.fromObject(object.workflows[i]); + let message = new $root.vtctldata.ReloadSchemaShardResponse(); + if (object.events) { + if (!Array.isArray(object.events)) + throw TypeError(".vtctldata.ReloadSchemaShardResponse.events: array expected"); + message.events = []; + for (let i = 0; i < object.events.length; ++i) { + if (typeof object.events[i] !== "object") + throw TypeError(".vtctldata.ReloadSchemaShardResponse.events: object expected"); + message.events[i] = $root.logutil.Event.fromObject(object.events[i]); } } return message; }; /** - * Creates a plain object from a GetWorkflowsResponse message. Also converts values to other types if specified. + * Creates a plain object from a ReloadSchemaShardResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.GetWorkflowsResponse + * @memberof vtctldata.ReloadSchemaShardResponse * @static - * @param {vtctldata.GetWorkflowsResponse} message GetWorkflowsResponse + * @param {vtctldata.ReloadSchemaShardResponse} message ReloadSchemaShardResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - GetWorkflowsResponse.toObject = function toObject(message, options) { + ReloadSchemaShardResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.arrays || options.defaults) - object.workflows = []; - if (message.workflows && message.workflows.length) { - object.workflows = []; - for (let j = 0; j < message.workflows.length; ++j) - object.workflows[j] = $root.vtctldata.Workflow.toObject(message.workflows[j], options); + object.events = []; + if (message.events && message.events.length) { + object.events = []; + for (let j = 0; j < message.events.length; ++j) + object.events[j] = $root.logutil.Event.toObject(message.events[j], options); } return object; }; /** - * Converts this GetWorkflowsResponse to JSON. + * Converts this ReloadSchemaShardResponse to JSON. * @function toJSON - * @memberof vtctldata.GetWorkflowsResponse + * @memberof vtctldata.ReloadSchemaShardResponse * @instance * @returns {Object.} JSON object */ - GetWorkflowsResponse.prototype.toJSON = function toJSON() { + ReloadSchemaShardResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for GetWorkflowsResponse + * Gets the default type url for ReloadSchemaShardResponse * @function getTypeUrl - * @memberof vtctldata.GetWorkflowsResponse + * @memberof vtctldata.ReloadSchemaShardResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - GetWorkflowsResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ReloadSchemaShardResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.GetWorkflowsResponse"; + return typeUrlPrefix + "/vtctldata.ReloadSchemaShardResponse"; }; - return GetWorkflowsResponse; + return ReloadSchemaShardResponse; })(); - vtctldata.InitShardPrimaryRequest = (function() { + vtctldata.RemoveBackupRequest = (function() { /** - * Properties of an InitShardPrimaryRequest. + * Properties of a RemoveBackupRequest. * @memberof vtctldata - * @interface IInitShardPrimaryRequest - * @property {string|null} [keyspace] InitShardPrimaryRequest keyspace - * @property {string|null} [shard] InitShardPrimaryRequest shard - * @property {topodata.ITabletAlias|null} [primary_elect_tablet_alias] InitShardPrimaryRequest primary_elect_tablet_alias - * @property {boolean|null} [force] InitShardPrimaryRequest force - * @property {vttime.IDuration|null} [wait_replicas_timeout] InitShardPrimaryRequest wait_replicas_timeout + * @interface IRemoveBackupRequest + * @property {string|null} [keyspace] RemoveBackupRequest keyspace + * @property {string|null} [shard] RemoveBackupRequest shard + * @property {string|null} [name] RemoveBackupRequest name */ /** - * Constructs a new InitShardPrimaryRequest. + * Constructs a new RemoveBackupRequest. * @memberof vtctldata - * @classdesc Represents an InitShardPrimaryRequest. - * @implements IInitShardPrimaryRequest + * @classdesc Represents a RemoveBackupRequest. + * @implements IRemoveBackupRequest * @constructor - * @param {vtctldata.IInitShardPrimaryRequest=} [properties] Properties to set + * @param {vtctldata.IRemoveBackupRequest=} [properties] Properties to set */ - function InitShardPrimaryRequest(properties) { + function RemoveBackupRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -122593,110 +144448,90 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * InitShardPrimaryRequest keyspace. + * RemoveBackupRequest keyspace. * @member {string} keyspace - * @memberof vtctldata.InitShardPrimaryRequest + * @memberof vtctldata.RemoveBackupRequest * @instance */ - InitShardPrimaryRequest.prototype.keyspace = ""; + RemoveBackupRequest.prototype.keyspace = ""; /** - * InitShardPrimaryRequest shard. + * RemoveBackupRequest shard. * @member {string} shard - * @memberof vtctldata.InitShardPrimaryRequest - * @instance - */ - InitShardPrimaryRequest.prototype.shard = ""; - - /** - * InitShardPrimaryRequest primary_elect_tablet_alias. - * @member {topodata.ITabletAlias|null|undefined} primary_elect_tablet_alias - * @memberof vtctldata.InitShardPrimaryRequest - * @instance - */ - InitShardPrimaryRequest.prototype.primary_elect_tablet_alias = null; - - /** - * InitShardPrimaryRequest force. - * @member {boolean} force - * @memberof vtctldata.InitShardPrimaryRequest + * @memberof vtctldata.RemoveBackupRequest * @instance */ - InitShardPrimaryRequest.prototype.force = false; + RemoveBackupRequest.prototype.shard = ""; /** - * InitShardPrimaryRequest wait_replicas_timeout. - * @member {vttime.IDuration|null|undefined} wait_replicas_timeout - * @memberof vtctldata.InitShardPrimaryRequest + * RemoveBackupRequest name. + * @member {string} name + * @memberof vtctldata.RemoveBackupRequest * @instance */ - InitShardPrimaryRequest.prototype.wait_replicas_timeout = null; + RemoveBackupRequest.prototype.name = ""; /** - * Creates a new InitShardPrimaryRequest instance using the specified properties. + * Creates a new RemoveBackupRequest instance using the specified properties. * @function create - * @memberof vtctldata.InitShardPrimaryRequest + * @memberof vtctldata.RemoveBackupRequest * @static - * @param {vtctldata.IInitShardPrimaryRequest=} [properties] Properties to set - * @returns {vtctldata.InitShardPrimaryRequest} InitShardPrimaryRequest instance + * @param {vtctldata.IRemoveBackupRequest=} [properties] Properties to set + * @returns {vtctldata.RemoveBackupRequest} RemoveBackupRequest instance */ - InitShardPrimaryRequest.create = function create(properties) { - return new InitShardPrimaryRequest(properties); + RemoveBackupRequest.create = function create(properties) { + return new RemoveBackupRequest(properties); }; /** - * Encodes the specified InitShardPrimaryRequest message. Does not implicitly {@link vtctldata.InitShardPrimaryRequest.verify|verify} messages. + * Encodes the specified RemoveBackupRequest message. Does not implicitly {@link vtctldata.RemoveBackupRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.InitShardPrimaryRequest + * @memberof vtctldata.RemoveBackupRequest * @static - * @param {vtctldata.IInitShardPrimaryRequest} message InitShardPrimaryRequest message or plain object to encode + * @param {vtctldata.IRemoveBackupRequest} message RemoveBackupRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - InitShardPrimaryRequest.encode = function encode(message, writer) { + RemoveBackupRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); - if (message.primary_elect_tablet_alias != null && Object.hasOwnProperty.call(message, "primary_elect_tablet_alias")) - $root.topodata.TabletAlias.encode(message.primary_elect_tablet_alias, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.force != null && Object.hasOwnProperty.call(message, "force")) - writer.uint32(/* id 4, wireType 0 =*/32).bool(message.force); - if (message.wait_replicas_timeout != null && Object.hasOwnProperty.call(message, "wait_replicas_timeout")) - $root.vttime.Duration.encode(message.wait_replicas_timeout, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); + if (message.name != null && Object.hasOwnProperty.call(message, "name")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.name); return writer; }; /** - * Encodes the specified InitShardPrimaryRequest message, length delimited. Does not implicitly {@link vtctldata.InitShardPrimaryRequest.verify|verify} messages. + * Encodes the specified RemoveBackupRequest message, length delimited. Does not implicitly {@link vtctldata.RemoveBackupRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.InitShardPrimaryRequest + * @memberof vtctldata.RemoveBackupRequest * @static - * @param {vtctldata.IInitShardPrimaryRequest} message InitShardPrimaryRequest message or plain object to encode + * @param {vtctldata.IRemoveBackupRequest} message RemoveBackupRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - InitShardPrimaryRequest.encodeDelimited = function encodeDelimited(message, writer) { + RemoveBackupRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an InitShardPrimaryRequest message from the specified reader or buffer. + * Decodes a RemoveBackupRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.InitShardPrimaryRequest + * @memberof vtctldata.RemoveBackupRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.InitShardPrimaryRequest} InitShardPrimaryRequest + * @returns {vtctldata.RemoveBackupRequest} RemoveBackupRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - InitShardPrimaryRequest.decode = function decode(reader, length) { + RemoveBackupRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.InitShardPrimaryRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RemoveBackupRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -122709,15 +144544,7 @@ export const vtctldata = $root.vtctldata = (() => { break; } case 3: { - message.primary_elect_tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); - break; - } - case 4: { - message.force = reader.bool(); - break; - } - case 5: { - message.wait_replicas_timeout = $root.vttime.Duration.decode(reader, reader.uint32()); + message.name = reader.string(); break; } default: @@ -122729,30 +144556,30 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes an InitShardPrimaryRequest message from the specified reader or buffer, length delimited. + * Decodes a RemoveBackupRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.InitShardPrimaryRequest + * @memberof vtctldata.RemoveBackupRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.InitShardPrimaryRequest} InitShardPrimaryRequest + * @returns {vtctldata.RemoveBackupRequest} RemoveBackupRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - InitShardPrimaryRequest.decodeDelimited = function decodeDelimited(reader) { + RemoveBackupRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an InitShardPrimaryRequest message. + * Verifies a RemoveBackupRequest message. * @function verify - * @memberof vtctldata.InitShardPrimaryRequest + * @memberof vtctldata.RemoveBackupRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - InitShardPrimaryRequest.verify = function verify(message) { + RemoveBackupRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.keyspace != null && message.hasOwnProperty("keyspace")) @@ -122761,134 +144588,106 @@ export const vtctldata = $root.vtctldata = (() => { if (message.shard != null && message.hasOwnProperty("shard")) if (!$util.isString(message.shard)) return "shard: string expected"; - if (message.primary_elect_tablet_alias != null && message.hasOwnProperty("primary_elect_tablet_alias")) { - let error = $root.topodata.TabletAlias.verify(message.primary_elect_tablet_alias); - if (error) - return "primary_elect_tablet_alias." + error; - } - if (message.force != null && message.hasOwnProperty("force")) - if (typeof message.force !== "boolean") - return "force: boolean expected"; - if (message.wait_replicas_timeout != null && message.hasOwnProperty("wait_replicas_timeout")) { - let error = $root.vttime.Duration.verify(message.wait_replicas_timeout); - if (error) - return "wait_replicas_timeout." + error; - } + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; return null; }; /** - * Creates an InitShardPrimaryRequest message from a plain object. Also converts values to their respective internal types. + * Creates a RemoveBackupRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.InitShardPrimaryRequest + * @memberof vtctldata.RemoveBackupRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.InitShardPrimaryRequest} InitShardPrimaryRequest + * @returns {vtctldata.RemoveBackupRequest} RemoveBackupRequest */ - InitShardPrimaryRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.InitShardPrimaryRequest) + RemoveBackupRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.RemoveBackupRequest) return object; - let message = new $root.vtctldata.InitShardPrimaryRequest(); + let message = new $root.vtctldata.RemoveBackupRequest(); if (object.keyspace != null) message.keyspace = String(object.keyspace); if (object.shard != null) message.shard = String(object.shard); - if (object.primary_elect_tablet_alias != null) { - if (typeof object.primary_elect_tablet_alias !== "object") - throw TypeError(".vtctldata.InitShardPrimaryRequest.primary_elect_tablet_alias: object expected"); - message.primary_elect_tablet_alias = $root.topodata.TabletAlias.fromObject(object.primary_elect_tablet_alias); - } - if (object.force != null) - message.force = Boolean(object.force); - if (object.wait_replicas_timeout != null) { - if (typeof object.wait_replicas_timeout !== "object") - throw TypeError(".vtctldata.InitShardPrimaryRequest.wait_replicas_timeout: object expected"); - message.wait_replicas_timeout = $root.vttime.Duration.fromObject(object.wait_replicas_timeout); - } + if (object.name != null) + message.name = String(object.name); return message; }; /** - * Creates a plain object from an InitShardPrimaryRequest message. Also converts values to other types if specified. + * Creates a plain object from a RemoveBackupRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.InitShardPrimaryRequest + * @memberof vtctldata.RemoveBackupRequest * @static - * @param {vtctldata.InitShardPrimaryRequest} message InitShardPrimaryRequest + * @param {vtctldata.RemoveBackupRequest} message RemoveBackupRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - InitShardPrimaryRequest.toObject = function toObject(message, options) { + RemoveBackupRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) { object.keyspace = ""; object.shard = ""; - object.primary_elect_tablet_alias = null; - object.force = false; - object.wait_replicas_timeout = null; + object.name = ""; } if (message.keyspace != null && message.hasOwnProperty("keyspace")) object.keyspace = message.keyspace; if (message.shard != null && message.hasOwnProperty("shard")) object.shard = message.shard; - if (message.primary_elect_tablet_alias != null && message.hasOwnProperty("primary_elect_tablet_alias")) - object.primary_elect_tablet_alias = $root.topodata.TabletAlias.toObject(message.primary_elect_tablet_alias, options); - if (message.force != null && message.hasOwnProperty("force")) - object.force = message.force; - if (message.wait_replicas_timeout != null && message.hasOwnProperty("wait_replicas_timeout")) - object.wait_replicas_timeout = $root.vttime.Duration.toObject(message.wait_replicas_timeout, options); + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; return object; }; /** - * Converts this InitShardPrimaryRequest to JSON. + * Converts this RemoveBackupRequest to JSON. * @function toJSON - * @memberof vtctldata.InitShardPrimaryRequest + * @memberof vtctldata.RemoveBackupRequest * @instance * @returns {Object.} JSON object */ - InitShardPrimaryRequest.prototype.toJSON = function toJSON() { + RemoveBackupRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for InitShardPrimaryRequest + * Gets the default type url for RemoveBackupRequest * @function getTypeUrl - * @memberof vtctldata.InitShardPrimaryRequest + * @memberof vtctldata.RemoveBackupRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - InitShardPrimaryRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + RemoveBackupRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.InitShardPrimaryRequest"; + return typeUrlPrefix + "/vtctldata.RemoveBackupRequest"; }; - return InitShardPrimaryRequest; + return RemoveBackupRequest; })(); - vtctldata.InitShardPrimaryResponse = (function() { + vtctldata.RemoveBackupResponse = (function() { /** - * Properties of an InitShardPrimaryResponse. + * Properties of a RemoveBackupResponse. * @memberof vtctldata - * @interface IInitShardPrimaryResponse - * @property {Array.|null} [events] InitShardPrimaryResponse events + * @interface IRemoveBackupResponse */ /** - * Constructs a new InitShardPrimaryResponse. + * Constructs a new RemoveBackupResponse. * @memberof vtctldata - * @classdesc Represents an InitShardPrimaryResponse. - * @implements IInitShardPrimaryResponse + * @classdesc Represents a RemoveBackupResponse. + * @implements IRemoveBackupResponse * @constructor - * @param {vtctldata.IInitShardPrimaryResponse=} [properties] Properties to set + * @param {vtctldata.IRemoveBackupResponse=} [properties] Properties to set */ - function InitShardPrimaryResponse(properties) { - this.events = []; + function RemoveBackupResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -122896,80 +144695,63 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * InitShardPrimaryResponse events. - * @member {Array.} events - * @memberof vtctldata.InitShardPrimaryResponse - * @instance - */ - InitShardPrimaryResponse.prototype.events = $util.emptyArray; - - /** - * Creates a new InitShardPrimaryResponse instance using the specified properties. + * Creates a new RemoveBackupResponse instance using the specified properties. * @function create - * @memberof vtctldata.InitShardPrimaryResponse + * @memberof vtctldata.RemoveBackupResponse * @static - * @param {vtctldata.IInitShardPrimaryResponse=} [properties] Properties to set - * @returns {vtctldata.InitShardPrimaryResponse} InitShardPrimaryResponse instance + * @param {vtctldata.IRemoveBackupResponse=} [properties] Properties to set + * @returns {vtctldata.RemoveBackupResponse} RemoveBackupResponse instance */ - InitShardPrimaryResponse.create = function create(properties) { - return new InitShardPrimaryResponse(properties); + RemoveBackupResponse.create = function create(properties) { + return new RemoveBackupResponse(properties); }; /** - * Encodes the specified InitShardPrimaryResponse message. Does not implicitly {@link vtctldata.InitShardPrimaryResponse.verify|verify} messages. + * Encodes the specified RemoveBackupResponse message. Does not implicitly {@link vtctldata.RemoveBackupResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.InitShardPrimaryResponse + * @memberof vtctldata.RemoveBackupResponse * @static - * @param {vtctldata.IInitShardPrimaryResponse} message InitShardPrimaryResponse message or plain object to encode + * @param {vtctldata.IRemoveBackupResponse} message RemoveBackupResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - InitShardPrimaryResponse.encode = function encode(message, writer) { + RemoveBackupResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.events != null && message.events.length) - for (let i = 0; i < message.events.length; ++i) - $root.logutil.Event.encode(message.events[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified InitShardPrimaryResponse message, length delimited. Does not implicitly {@link vtctldata.InitShardPrimaryResponse.verify|verify} messages. + * Encodes the specified RemoveBackupResponse message, length delimited. Does not implicitly {@link vtctldata.RemoveBackupResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.InitShardPrimaryResponse + * @memberof vtctldata.RemoveBackupResponse * @static - * @param {vtctldata.IInitShardPrimaryResponse} message InitShardPrimaryResponse message or plain object to encode + * @param {vtctldata.IRemoveBackupResponse} message RemoveBackupResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - InitShardPrimaryResponse.encodeDelimited = function encodeDelimited(message, writer) { + RemoveBackupResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an InitShardPrimaryResponse message from the specified reader or buffer. + * Decodes a RemoveBackupResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.InitShardPrimaryResponse + * @memberof vtctldata.RemoveBackupResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.InitShardPrimaryResponse} InitShardPrimaryResponse + * @returns {vtctldata.RemoveBackupResponse} RemoveBackupResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - InitShardPrimaryResponse.decode = function decode(reader, length) { + RemoveBackupResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.InitShardPrimaryResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RemoveBackupResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - if (!(message.events && message.events.length)) - message.events = []; - message.events.push($root.logutil.Event.decode(reader, reader.uint32())); - break; - } default: reader.skipType(tag & 7); break; @@ -122979,139 +144761,112 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes an InitShardPrimaryResponse message from the specified reader or buffer, length delimited. + * Decodes a RemoveBackupResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.InitShardPrimaryResponse + * @memberof vtctldata.RemoveBackupResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.InitShardPrimaryResponse} InitShardPrimaryResponse + * @returns {vtctldata.RemoveBackupResponse} RemoveBackupResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - InitShardPrimaryResponse.decodeDelimited = function decodeDelimited(reader) { + RemoveBackupResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an InitShardPrimaryResponse message. + * Verifies a RemoveBackupResponse message. * @function verify - * @memberof vtctldata.InitShardPrimaryResponse + * @memberof vtctldata.RemoveBackupResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - InitShardPrimaryResponse.verify = function verify(message) { + RemoveBackupResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.events != null && message.hasOwnProperty("events")) { - if (!Array.isArray(message.events)) - return "events: array expected"; - for (let i = 0; i < message.events.length; ++i) { - let error = $root.logutil.Event.verify(message.events[i]); - if (error) - return "events." + error; - } - } return null; }; /** - * Creates an InitShardPrimaryResponse message from a plain object. Also converts values to their respective internal types. + * Creates a RemoveBackupResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.InitShardPrimaryResponse + * @memberof vtctldata.RemoveBackupResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.InitShardPrimaryResponse} InitShardPrimaryResponse + * @returns {vtctldata.RemoveBackupResponse} RemoveBackupResponse */ - InitShardPrimaryResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.InitShardPrimaryResponse) + RemoveBackupResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.RemoveBackupResponse) return object; - let message = new $root.vtctldata.InitShardPrimaryResponse(); - if (object.events) { - if (!Array.isArray(object.events)) - throw TypeError(".vtctldata.InitShardPrimaryResponse.events: array expected"); - message.events = []; - for (let i = 0; i < object.events.length; ++i) { - if (typeof object.events[i] !== "object") - throw TypeError(".vtctldata.InitShardPrimaryResponse.events: object expected"); - message.events[i] = $root.logutil.Event.fromObject(object.events[i]); - } - } - return message; + return new $root.vtctldata.RemoveBackupResponse(); }; /** - * Creates a plain object from an InitShardPrimaryResponse message. Also converts values to other types if specified. + * Creates a plain object from a RemoveBackupResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.InitShardPrimaryResponse + * @memberof vtctldata.RemoveBackupResponse * @static - * @param {vtctldata.InitShardPrimaryResponse} message InitShardPrimaryResponse + * @param {vtctldata.RemoveBackupResponse} message RemoveBackupResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - InitShardPrimaryResponse.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.arrays || options.defaults) - object.events = []; - if (message.events && message.events.length) { - object.events = []; - for (let j = 0; j < message.events.length; ++j) - object.events[j] = $root.logutil.Event.toObject(message.events[j], options); - } - return object; + RemoveBackupResponse.toObject = function toObject() { + return {}; }; /** - * Converts this InitShardPrimaryResponse to JSON. + * Converts this RemoveBackupResponse to JSON. * @function toJSON - * @memberof vtctldata.InitShardPrimaryResponse + * @memberof vtctldata.RemoveBackupResponse * @instance * @returns {Object.} JSON object */ - InitShardPrimaryResponse.prototype.toJSON = function toJSON() { + RemoveBackupResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for InitShardPrimaryResponse + * Gets the default type url for RemoveBackupResponse * @function getTypeUrl - * @memberof vtctldata.InitShardPrimaryResponse + * @memberof vtctldata.RemoveBackupResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - InitShardPrimaryResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + RemoveBackupResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.InitShardPrimaryResponse"; + return typeUrlPrefix + "/vtctldata.RemoveBackupResponse"; }; - return InitShardPrimaryResponse; + return RemoveBackupResponse; })(); - vtctldata.PingTabletRequest = (function() { + vtctldata.RemoveKeyspaceCellRequest = (function() { /** - * Properties of a PingTabletRequest. + * Properties of a RemoveKeyspaceCellRequest. * @memberof vtctldata - * @interface IPingTabletRequest - * @property {topodata.ITabletAlias|null} [tablet_alias] PingTabletRequest tablet_alias + * @interface IRemoveKeyspaceCellRequest + * @property {string|null} [keyspace] RemoveKeyspaceCellRequest keyspace + * @property {string|null} [cell] RemoveKeyspaceCellRequest cell + * @property {boolean|null} [force] RemoveKeyspaceCellRequest force + * @property {boolean|null} [recursive] RemoveKeyspaceCellRequest recursive */ /** - * Constructs a new PingTabletRequest. + * Constructs a new RemoveKeyspaceCellRequest. * @memberof vtctldata - * @classdesc Represents a PingTabletRequest. - * @implements IPingTabletRequest + * @classdesc Represents a RemoveKeyspaceCellRequest. + * @implements IRemoveKeyspaceCellRequest * @constructor - * @param {vtctldata.IPingTabletRequest=} [properties] Properties to set + * @param {vtctldata.IRemoveKeyspaceCellRequest=} [properties] Properties to set */ - function PingTabletRequest(properties) { + function RemoveKeyspaceCellRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -123119,75 +144874,117 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * PingTabletRequest tablet_alias. - * @member {topodata.ITabletAlias|null|undefined} tablet_alias - * @memberof vtctldata.PingTabletRequest + * RemoveKeyspaceCellRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.RemoveKeyspaceCellRequest * @instance */ - PingTabletRequest.prototype.tablet_alias = null; + RemoveKeyspaceCellRequest.prototype.keyspace = ""; /** - * Creates a new PingTabletRequest instance using the specified properties. + * RemoveKeyspaceCellRequest cell. + * @member {string} cell + * @memberof vtctldata.RemoveKeyspaceCellRequest + * @instance + */ + RemoveKeyspaceCellRequest.prototype.cell = ""; + + /** + * RemoveKeyspaceCellRequest force. + * @member {boolean} force + * @memberof vtctldata.RemoveKeyspaceCellRequest + * @instance + */ + RemoveKeyspaceCellRequest.prototype.force = false; + + /** + * RemoveKeyspaceCellRequest recursive. + * @member {boolean} recursive + * @memberof vtctldata.RemoveKeyspaceCellRequest + * @instance + */ + RemoveKeyspaceCellRequest.prototype.recursive = false; + + /** + * Creates a new RemoveKeyspaceCellRequest instance using the specified properties. * @function create - * @memberof vtctldata.PingTabletRequest + * @memberof vtctldata.RemoveKeyspaceCellRequest * @static - * @param {vtctldata.IPingTabletRequest=} [properties] Properties to set - * @returns {vtctldata.PingTabletRequest} PingTabletRequest instance + * @param {vtctldata.IRemoveKeyspaceCellRequest=} [properties] Properties to set + * @returns {vtctldata.RemoveKeyspaceCellRequest} RemoveKeyspaceCellRequest instance */ - PingTabletRequest.create = function create(properties) { - return new PingTabletRequest(properties); + RemoveKeyspaceCellRequest.create = function create(properties) { + return new RemoveKeyspaceCellRequest(properties); }; /** - * Encodes the specified PingTabletRequest message. Does not implicitly {@link vtctldata.PingTabletRequest.verify|verify} messages. + * Encodes the specified RemoveKeyspaceCellRequest message. Does not implicitly {@link vtctldata.RemoveKeyspaceCellRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.PingTabletRequest + * @memberof vtctldata.RemoveKeyspaceCellRequest * @static - * @param {vtctldata.IPingTabletRequest} message PingTabletRequest message or plain object to encode + * @param {vtctldata.IRemoveKeyspaceCellRequest} message RemoveKeyspaceCellRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - PingTabletRequest.encode = function encode(message, writer) { + RemoveKeyspaceCellRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) - $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.cell != null && Object.hasOwnProperty.call(message, "cell")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.cell); + if (message.force != null && Object.hasOwnProperty.call(message, "force")) + writer.uint32(/* id 3, wireType 0 =*/24).bool(message.force); + if (message.recursive != null && Object.hasOwnProperty.call(message, "recursive")) + writer.uint32(/* id 4, wireType 0 =*/32).bool(message.recursive); return writer; }; /** - * Encodes the specified PingTabletRequest message, length delimited. Does not implicitly {@link vtctldata.PingTabletRequest.verify|verify} messages. + * Encodes the specified RemoveKeyspaceCellRequest message, length delimited. Does not implicitly {@link vtctldata.RemoveKeyspaceCellRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.PingTabletRequest + * @memberof vtctldata.RemoveKeyspaceCellRequest * @static - * @param {vtctldata.IPingTabletRequest} message PingTabletRequest message or plain object to encode + * @param {vtctldata.IRemoveKeyspaceCellRequest} message RemoveKeyspaceCellRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - PingTabletRequest.encodeDelimited = function encodeDelimited(message, writer) { + RemoveKeyspaceCellRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a PingTabletRequest message from the specified reader or buffer. + * Decodes a RemoveKeyspaceCellRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.PingTabletRequest + * @memberof vtctldata.RemoveKeyspaceCellRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.PingTabletRequest} PingTabletRequest + * @returns {vtctldata.RemoveKeyspaceCellRequest} RemoveKeyspaceCellRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - PingTabletRequest.decode = function decode(reader, length) { + RemoveKeyspaceCellRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.PingTabletRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RemoveKeyspaceCellRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + message.keyspace = reader.string(); + break; + } + case 2: { + message.cell = reader.string(); + break; + } + case 3: { + message.force = reader.bool(); + break; + } + case 4: { + message.recursive = reader.bool(); break; } default: @@ -123199,126 +144996,146 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a PingTabletRequest message from the specified reader or buffer, length delimited. + * Decodes a RemoveKeyspaceCellRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.PingTabletRequest + * @memberof vtctldata.RemoveKeyspaceCellRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.PingTabletRequest} PingTabletRequest + * @returns {vtctldata.RemoveKeyspaceCellRequest} RemoveKeyspaceCellRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - PingTabletRequest.decodeDelimited = function decodeDelimited(reader) { + RemoveKeyspaceCellRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a PingTabletRequest message. + * Verifies a RemoveKeyspaceCellRequest message. * @function verify - * @memberof vtctldata.PingTabletRequest + * @memberof vtctldata.RemoveKeyspaceCellRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - PingTabletRequest.verify = function verify(message) { + RemoveKeyspaceCellRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { - let error = $root.topodata.TabletAlias.verify(message.tablet_alias); - if (error) - return "tablet_alias." + error; - } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.cell != null && message.hasOwnProperty("cell")) + if (!$util.isString(message.cell)) + return "cell: string expected"; + if (message.force != null && message.hasOwnProperty("force")) + if (typeof message.force !== "boolean") + return "force: boolean expected"; + if (message.recursive != null && message.hasOwnProperty("recursive")) + if (typeof message.recursive !== "boolean") + return "recursive: boolean expected"; return null; }; /** - * Creates a PingTabletRequest message from a plain object. Also converts values to their respective internal types. + * Creates a RemoveKeyspaceCellRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.PingTabletRequest + * @memberof vtctldata.RemoveKeyspaceCellRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.PingTabletRequest} PingTabletRequest + * @returns {vtctldata.RemoveKeyspaceCellRequest} RemoveKeyspaceCellRequest */ - PingTabletRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.PingTabletRequest) + RemoveKeyspaceCellRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.RemoveKeyspaceCellRequest) return object; - let message = new $root.vtctldata.PingTabletRequest(); - if (object.tablet_alias != null) { - if (typeof object.tablet_alias !== "object") - throw TypeError(".vtctldata.PingTabletRequest.tablet_alias: object expected"); - message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); - } + let message = new $root.vtctldata.RemoveKeyspaceCellRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.cell != null) + message.cell = String(object.cell); + if (object.force != null) + message.force = Boolean(object.force); + if (object.recursive != null) + message.recursive = Boolean(object.recursive); return message; }; /** - * Creates a plain object from a PingTabletRequest message. Also converts values to other types if specified. + * Creates a plain object from a RemoveKeyspaceCellRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.PingTabletRequest + * @memberof vtctldata.RemoveKeyspaceCellRequest * @static - * @param {vtctldata.PingTabletRequest} message PingTabletRequest + * @param {vtctldata.RemoveKeyspaceCellRequest} message RemoveKeyspaceCellRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - PingTabletRequest.toObject = function toObject(message, options) { + RemoveKeyspaceCellRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) - object.tablet_alias = null; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) - object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); + if (options.defaults) { + object.keyspace = ""; + object.cell = ""; + object.force = false; + object.recursive = false; + } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.cell != null && message.hasOwnProperty("cell")) + object.cell = message.cell; + if (message.force != null && message.hasOwnProperty("force")) + object.force = message.force; + if (message.recursive != null && message.hasOwnProperty("recursive")) + object.recursive = message.recursive; return object; }; /** - * Converts this PingTabletRequest to JSON. + * Converts this RemoveKeyspaceCellRequest to JSON. * @function toJSON - * @memberof vtctldata.PingTabletRequest + * @memberof vtctldata.RemoveKeyspaceCellRequest * @instance * @returns {Object.} JSON object */ - PingTabletRequest.prototype.toJSON = function toJSON() { + RemoveKeyspaceCellRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for PingTabletRequest + * Gets the default type url for RemoveKeyspaceCellRequest * @function getTypeUrl - * @memberof vtctldata.PingTabletRequest + * @memberof vtctldata.RemoveKeyspaceCellRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - PingTabletRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + RemoveKeyspaceCellRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.PingTabletRequest"; + return typeUrlPrefix + "/vtctldata.RemoveKeyspaceCellRequest"; }; - return PingTabletRequest; + return RemoveKeyspaceCellRequest; })(); - vtctldata.PingTabletResponse = (function() { + vtctldata.RemoveKeyspaceCellResponse = (function() { /** - * Properties of a PingTabletResponse. + * Properties of a RemoveKeyspaceCellResponse. * @memberof vtctldata - * @interface IPingTabletResponse + * @interface IRemoveKeyspaceCellResponse */ /** - * Constructs a new PingTabletResponse. + * Constructs a new RemoveKeyspaceCellResponse. * @memberof vtctldata - * @classdesc Represents a PingTabletResponse. - * @implements IPingTabletResponse + * @classdesc Represents a RemoveKeyspaceCellResponse. + * @implements IRemoveKeyspaceCellResponse * @constructor - * @param {vtctldata.IPingTabletResponse=} [properties] Properties to set + * @param {vtctldata.IRemoveKeyspaceCellResponse=} [properties] Properties to set */ - function PingTabletResponse(properties) { + function RemoveKeyspaceCellResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -123326,60 +145143,60 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * Creates a new PingTabletResponse instance using the specified properties. + * Creates a new RemoveKeyspaceCellResponse instance using the specified properties. * @function create - * @memberof vtctldata.PingTabletResponse + * @memberof vtctldata.RemoveKeyspaceCellResponse * @static - * @param {vtctldata.IPingTabletResponse=} [properties] Properties to set - * @returns {vtctldata.PingTabletResponse} PingTabletResponse instance + * @param {vtctldata.IRemoveKeyspaceCellResponse=} [properties] Properties to set + * @returns {vtctldata.RemoveKeyspaceCellResponse} RemoveKeyspaceCellResponse instance */ - PingTabletResponse.create = function create(properties) { - return new PingTabletResponse(properties); + RemoveKeyspaceCellResponse.create = function create(properties) { + return new RemoveKeyspaceCellResponse(properties); }; /** - * Encodes the specified PingTabletResponse message. Does not implicitly {@link vtctldata.PingTabletResponse.verify|verify} messages. + * Encodes the specified RemoveKeyspaceCellResponse message. Does not implicitly {@link vtctldata.RemoveKeyspaceCellResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.PingTabletResponse + * @memberof vtctldata.RemoveKeyspaceCellResponse * @static - * @param {vtctldata.IPingTabletResponse} message PingTabletResponse message or plain object to encode + * @param {vtctldata.IRemoveKeyspaceCellResponse} message RemoveKeyspaceCellResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - PingTabletResponse.encode = function encode(message, writer) { + RemoveKeyspaceCellResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); return writer; }; /** - * Encodes the specified PingTabletResponse message, length delimited. Does not implicitly {@link vtctldata.PingTabletResponse.verify|verify} messages. + * Encodes the specified RemoveKeyspaceCellResponse message, length delimited. Does not implicitly {@link vtctldata.RemoveKeyspaceCellResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.PingTabletResponse + * @memberof vtctldata.RemoveKeyspaceCellResponse * @static - * @param {vtctldata.IPingTabletResponse} message PingTabletResponse message or plain object to encode + * @param {vtctldata.IRemoveKeyspaceCellResponse} message RemoveKeyspaceCellResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - PingTabletResponse.encodeDelimited = function encodeDelimited(message, writer) { + RemoveKeyspaceCellResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a PingTabletResponse message from the specified reader or buffer. + * Decodes a RemoveKeyspaceCellResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.PingTabletResponse + * @memberof vtctldata.RemoveKeyspaceCellResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.PingTabletResponse} PingTabletResponse + * @returns {vtctldata.RemoveKeyspaceCellResponse} RemoveKeyspaceCellResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - PingTabletResponse.decode = function decode(reader, length) { + RemoveKeyspaceCellResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.PingTabletResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RemoveKeyspaceCellResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -123392,113 +145209,113 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a PingTabletResponse message from the specified reader or buffer, length delimited. + * Decodes a RemoveKeyspaceCellResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.PingTabletResponse + * @memberof vtctldata.RemoveKeyspaceCellResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.PingTabletResponse} PingTabletResponse + * @returns {vtctldata.RemoveKeyspaceCellResponse} RemoveKeyspaceCellResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - PingTabletResponse.decodeDelimited = function decodeDelimited(reader) { + RemoveKeyspaceCellResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a PingTabletResponse message. + * Verifies a RemoveKeyspaceCellResponse message. * @function verify - * @memberof vtctldata.PingTabletResponse + * @memberof vtctldata.RemoveKeyspaceCellResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - PingTabletResponse.verify = function verify(message) { + RemoveKeyspaceCellResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; return null; }; /** - * Creates a PingTabletResponse message from a plain object. Also converts values to their respective internal types. + * Creates a RemoveKeyspaceCellResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.PingTabletResponse + * @memberof vtctldata.RemoveKeyspaceCellResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.PingTabletResponse} PingTabletResponse + * @returns {vtctldata.RemoveKeyspaceCellResponse} RemoveKeyspaceCellResponse */ - PingTabletResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.PingTabletResponse) + RemoveKeyspaceCellResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.RemoveKeyspaceCellResponse) return object; - return new $root.vtctldata.PingTabletResponse(); + return new $root.vtctldata.RemoveKeyspaceCellResponse(); }; /** - * Creates a plain object from a PingTabletResponse message. Also converts values to other types if specified. + * Creates a plain object from a RemoveKeyspaceCellResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.PingTabletResponse + * @memberof vtctldata.RemoveKeyspaceCellResponse * @static - * @param {vtctldata.PingTabletResponse} message PingTabletResponse + * @param {vtctldata.RemoveKeyspaceCellResponse} message RemoveKeyspaceCellResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - PingTabletResponse.toObject = function toObject() { + RemoveKeyspaceCellResponse.toObject = function toObject() { return {}; }; /** - * Converts this PingTabletResponse to JSON. + * Converts this RemoveKeyspaceCellResponse to JSON. * @function toJSON - * @memberof vtctldata.PingTabletResponse + * @memberof vtctldata.RemoveKeyspaceCellResponse * @instance * @returns {Object.} JSON object */ - PingTabletResponse.prototype.toJSON = function toJSON() { + RemoveKeyspaceCellResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for PingTabletResponse + * Gets the default type url for RemoveKeyspaceCellResponse * @function getTypeUrl - * @memberof vtctldata.PingTabletResponse + * @memberof vtctldata.RemoveKeyspaceCellResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - PingTabletResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + RemoveKeyspaceCellResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.PingTabletResponse"; + return typeUrlPrefix + "/vtctldata.RemoveKeyspaceCellResponse"; }; - return PingTabletResponse; + return RemoveKeyspaceCellResponse; })(); - vtctldata.PlannedReparentShardRequest = (function() { + vtctldata.RemoveShardCellRequest = (function() { /** - * Properties of a PlannedReparentShardRequest. + * Properties of a RemoveShardCellRequest. * @memberof vtctldata - * @interface IPlannedReparentShardRequest - * @property {string|null} [keyspace] PlannedReparentShardRequest keyspace - * @property {string|null} [shard] PlannedReparentShardRequest shard - * @property {topodata.ITabletAlias|null} [new_primary] PlannedReparentShardRequest new_primary - * @property {topodata.ITabletAlias|null} [avoid_primary] PlannedReparentShardRequest avoid_primary - * @property {vttime.IDuration|null} [wait_replicas_timeout] PlannedReparentShardRequest wait_replicas_timeout + * @interface IRemoveShardCellRequest + * @property {string|null} [keyspace] RemoveShardCellRequest keyspace + * @property {string|null} [shard_name] RemoveShardCellRequest shard_name + * @property {string|null} [cell] RemoveShardCellRequest cell + * @property {boolean|null} [force] RemoveShardCellRequest force + * @property {boolean|null} [recursive] RemoveShardCellRequest recursive */ /** - * Constructs a new PlannedReparentShardRequest. + * Constructs a new RemoveShardCellRequest. * @memberof vtctldata - * @classdesc Represents a PlannedReparentShardRequest. - * @implements IPlannedReparentShardRequest + * @classdesc Represents a RemoveShardCellRequest. + * @implements IRemoveShardCellRequest * @constructor - * @param {vtctldata.IPlannedReparentShardRequest=} [properties] Properties to set + * @param {vtctldata.IRemoveShardCellRequest=} [properties] Properties to set */ - function PlannedReparentShardRequest(properties) { + function RemoveShardCellRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -123506,110 +145323,110 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * PlannedReparentShardRequest keyspace. + * RemoveShardCellRequest keyspace. * @member {string} keyspace - * @memberof vtctldata.PlannedReparentShardRequest + * @memberof vtctldata.RemoveShardCellRequest * @instance */ - PlannedReparentShardRequest.prototype.keyspace = ""; + RemoveShardCellRequest.prototype.keyspace = ""; /** - * PlannedReparentShardRequest shard. - * @member {string} shard - * @memberof vtctldata.PlannedReparentShardRequest + * RemoveShardCellRequest shard_name. + * @member {string} shard_name + * @memberof vtctldata.RemoveShardCellRequest * @instance */ - PlannedReparentShardRequest.prototype.shard = ""; + RemoveShardCellRequest.prototype.shard_name = ""; /** - * PlannedReparentShardRequest new_primary. - * @member {topodata.ITabletAlias|null|undefined} new_primary - * @memberof vtctldata.PlannedReparentShardRequest + * RemoveShardCellRequest cell. + * @member {string} cell + * @memberof vtctldata.RemoveShardCellRequest * @instance */ - PlannedReparentShardRequest.prototype.new_primary = null; + RemoveShardCellRequest.prototype.cell = ""; /** - * PlannedReparentShardRequest avoid_primary. - * @member {topodata.ITabletAlias|null|undefined} avoid_primary - * @memberof vtctldata.PlannedReparentShardRequest + * RemoveShardCellRequest force. + * @member {boolean} force + * @memberof vtctldata.RemoveShardCellRequest * @instance */ - PlannedReparentShardRequest.prototype.avoid_primary = null; + RemoveShardCellRequest.prototype.force = false; /** - * PlannedReparentShardRequest wait_replicas_timeout. - * @member {vttime.IDuration|null|undefined} wait_replicas_timeout - * @memberof vtctldata.PlannedReparentShardRequest + * RemoveShardCellRequest recursive. + * @member {boolean} recursive + * @memberof vtctldata.RemoveShardCellRequest * @instance */ - PlannedReparentShardRequest.prototype.wait_replicas_timeout = null; + RemoveShardCellRequest.prototype.recursive = false; /** - * Creates a new PlannedReparentShardRequest instance using the specified properties. + * Creates a new RemoveShardCellRequest instance using the specified properties. * @function create - * @memberof vtctldata.PlannedReparentShardRequest + * @memberof vtctldata.RemoveShardCellRequest * @static - * @param {vtctldata.IPlannedReparentShardRequest=} [properties] Properties to set - * @returns {vtctldata.PlannedReparentShardRequest} PlannedReparentShardRequest instance + * @param {vtctldata.IRemoveShardCellRequest=} [properties] Properties to set + * @returns {vtctldata.RemoveShardCellRequest} RemoveShardCellRequest instance */ - PlannedReparentShardRequest.create = function create(properties) { - return new PlannedReparentShardRequest(properties); + RemoveShardCellRequest.create = function create(properties) { + return new RemoveShardCellRequest(properties); }; /** - * Encodes the specified PlannedReparentShardRequest message. Does not implicitly {@link vtctldata.PlannedReparentShardRequest.verify|verify} messages. + * Encodes the specified RemoveShardCellRequest message. Does not implicitly {@link vtctldata.RemoveShardCellRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.PlannedReparentShardRequest + * @memberof vtctldata.RemoveShardCellRequest * @static - * @param {vtctldata.IPlannedReparentShardRequest} message PlannedReparentShardRequest message or plain object to encode + * @param {vtctldata.IRemoveShardCellRequest} message RemoveShardCellRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - PlannedReparentShardRequest.encode = function encode(message, writer) { + RemoveShardCellRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); - if (message.new_primary != null && Object.hasOwnProperty.call(message, "new_primary")) - $root.topodata.TabletAlias.encode(message.new_primary, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.avoid_primary != null && Object.hasOwnProperty.call(message, "avoid_primary")) - $root.topodata.TabletAlias.encode(message.avoid_primary, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); - if (message.wait_replicas_timeout != null && Object.hasOwnProperty.call(message, "wait_replicas_timeout")) - $root.vttime.Duration.encode(message.wait_replicas_timeout, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); + if (message.shard_name != null && Object.hasOwnProperty.call(message, "shard_name")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard_name); + if (message.cell != null && Object.hasOwnProperty.call(message, "cell")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.cell); + if (message.force != null && Object.hasOwnProperty.call(message, "force")) + writer.uint32(/* id 4, wireType 0 =*/32).bool(message.force); + if (message.recursive != null && Object.hasOwnProperty.call(message, "recursive")) + writer.uint32(/* id 5, wireType 0 =*/40).bool(message.recursive); return writer; }; /** - * Encodes the specified PlannedReparentShardRequest message, length delimited. Does not implicitly {@link vtctldata.PlannedReparentShardRequest.verify|verify} messages. + * Encodes the specified RemoveShardCellRequest message, length delimited. Does not implicitly {@link vtctldata.RemoveShardCellRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.PlannedReparentShardRequest + * @memberof vtctldata.RemoveShardCellRequest * @static - * @param {vtctldata.IPlannedReparentShardRequest} message PlannedReparentShardRequest message or plain object to encode + * @param {vtctldata.IRemoveShardCellRequest} message RemoveShardCellRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - PlannedReparentShardRequest.encodeDelimited = function encodeDelimited(message, writer) { + RemoveShardCellRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a PlannedReparentShardRequest message from the specified reader or buffer. + * Decodes a RemoveShardCellRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.PlannedReparentShardRequest + * @memberof vtctldata.RemoveShardCellRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.PlannedReparentShardRequest} PlannedReparentShardRequest + * @returns {vtctldata.RemoveShardCellRequest} RemoveShardCellRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - PlannedReparentShardRequest.decode = function decode(reader, length) { + RemoveShardCellRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.PlannedReparentShardRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RemoveShardCellRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -123618,19 +145435,19 @@ export const vtctldata = $root.vtctldata = (() => { break; } case 2: { - message.shard = reader.string(); + message.shard_name = reader.string(); break; } case 3: { - message.new_primary = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + message.cell = reader.string(); break; } case 4: { - message.avoid_primary = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + message.force = reader.bool(); break; } case 5: { - message.wait_replicas_timeout = $root.vttime.Duration.decode(reader, reader.uint32()); + message.recursive = reader.bool(); break; } default: @@ -123642,174 +145459,330 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a PlannedReparentShardRequest message from the specified reader or buffer, length delimited. + * Decodes a RemoveShardCellRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.PlannedReparentShardRequest + * @memberof vtctldata.RemoveShardCellRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.PlannedReparentShardRequest} PlannedReparentShardRequest + * @returns {vtctldata.RemoveShardCellRequest} RemoveShardCellRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - PlannedReparentShardRequest.decodeDelimited = function decodeDelimited(reader) { + RemoveShardCellRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a PlannedReparentShardRequest message. + * Verifies a RemoveShardCellRequest message. * @function verify - * @memberof vtctldata.PlannedReparentShardRequest + * @memberof vtctldata.RemoveShardCellRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - PlannedReparentShardRequest.verify = function verify(message) { + RemoveShardCellRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.keyspace != null && message.hasOwnProperty("keyspace")) if (!$util.isString(message.keyspace)) return "keyspace: string expected"; - if (message.shard != null && message.hasOwnProperty("shard")) - if (!$util.isString(message.shard)) - return "shard: string expected"; - if (message.new_primary != null && message.hasOwnProperty("new_primary")) { - let error = $root.topodata.TabletAlias.verify(message.new_primary); - if (error) - return "new_primary." + error; - } - if (message.avoid_primary != null && message.hasOwnProperty("avoid_primary")) { - let error = $root.topodata.TabletAlias.verify(message.avoid_primary); - if (error) - return "avoid_primary." + error; - } - if (message.wait_replicas_timeout != null && message.hasOwnProperty("wait_replicas_timeout")) { - let error = $root.vttime.Duration.verify(message.wait_replicas_timeout); - if (error) - return "wait_replicas_timeout." + error; - } + if (message.shard_name != null && message.hasOwnProperty("shard_name")) + if (!$util.isString(message.shard_name)) + return "shard_name: string expected"; + if (message.cell != null && message.hasOwnProperty("cell")) + if (!$util.isString(message.cell)) + return "cell: string expected"; + if (message.force != null && message.hasOwnProperty("force")) + if (typeof message.force !== "boolean") + return "force: boolean expected"; + if (message.recursive != null && message.hasOwnProperty("recursive")) + if (typeof message.recursive !== "boolean") + return "recursive: boolean expected"; return null; }; /** - * Creates a PlannedReparentShardRequest message from a plain object. Also converts values to their respective internal types. + * Creates a RemoveShardCellRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.PlannedReparentShardRequest + * @memberof vtctldata.RemoveShardCellRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.PlannedReparentShardRequest} PlannedReparentShardRequest + * @returns {vtctldata.RemoveShardCellRequest} RemoveShardCellRequest */ - PlannedReparentShardRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.PlannedReparentShardRequest) + RemoveShardCellRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.RemoveShardCellRequest) return object; - let message = new $root.vtctldata.PlannedReparentShardRequest(); + let message = new $root.vtctldata.RemoveShardCellRequest(); if (object.keyspace != null) message.keyspace = String(object.keyspace); - if (object.shard != null) - message.shard = String(object.shard); - if (object.new_primary != null) { - if (typeof object.new_primary !== "object") - throw TypeError(".vtctldata.PlannedReparentShardRequest.new_primary: object expected"); - message.new_primary = $root.topodata.TabletAlias.fromObject(object.new_primary); - } - if (object.avoid_primary != null) { - if (typeof object.avoid_primary !== "object") - throw TypeError(".vtctldata.PlannedReparentShardRequest.avoid_primary: object expected"); - message.avoid_primary = $root.topodata.TabletAlias.fromObject(object.avoid_primary); - } - if (object.wait_replicas_timeout != null) { - if (typeof object.wait_replicas_timeout !== "object") - throw TypeError(".vtctldata.PlannedReparentShardRequest.wait_replicas_timeout: object expected"); - message.wait_replicas_timeout = $root.vttime.Duration.fromObject(object.wait_replicas_timeout); - } + if (object.shard_name != null) + message.shard_name = String(object.shard_name); + if (object.cell != null) + message.cell = String(object.cell); + if (object.force != null) + message.force = Boolean(object.force); + if (object.recursive != null) + message.recursive = Boolean(object.recursive); return message; }; /** - * Creates a plain object from a PlannedReparentShardRequest message. Also converts values to other types if specified. + * Creates a plain object from a RemoveShardCellRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.PlannedReparentShardRequest + * @memberof vtctldata.RemoveShardCellRequest * @static - * @param {vtctldata.PlannedReparentShardRequest} message PlannedReparentShardRequest + * @param {vtctldata.RemoveShardCellRequest} message RemoveShardCellRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - PlannedReparentShardRequest.toObject = function toObject(message, options) { + RemoveShardCellRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) { object.keyspace = ""; - object.shard = ""; - object.new_primary = null; - object.avoid_primary = null; - object.wait_replicas_timeout = null; + object.shard_name = ""; + object.cell = ""; + object.force = false; + object.recursive = false; } if (message.keyspace != null && message.hasOwnProperty("keyspace")) object.keyspace = message.keyspace; - if (message.shard != null && message.hasOwnProperty("shard")) - object.shard = message.shard; - if (message.new_primary != null && message.hasOwnProperty("new_primary")) - object.new_primary = $root.topodata.TabletAlias.toObject(message.new_primary, options); - if (message.avoid_primary != null && message.hasOwnProperty("avoid_primary")) - object.avoid_primary = $root.topodata.TabletAlias.toObject(message.avoid_primary, options); - if (message.wait_replicas_timeout != null && message.hasOwnProperty("wait_replicas_timeout")) - object.wait_replicas_timeout = $root.vttime.Duration.toObject(message.wait_replicas_timeout, options); + if (message.shard_name != null && message.hasOwnProperty("shard_name")) + object.shard_name = message.shard_name; + if (message.cell != null && message.hasOwnProperty("cell")) + object.cell = message.cell; + if (message.force != null && message.hasOwnProperty("force")) + object.force = message.force; + if (message.recursive != null && message.hasOwnProperty("recursive")) + object.recursive = message.recursive; return object; }; /** - * Converts this PlannedReparentShardRequest to JSON. + * Converts this RemoveShardCellRequest to JSON. + * @function toJSON + * @memberof vtctldata.RemoveShardCellRequest + * @instance + * @returns {Object.} JSON object + */ + RemoveShardCellRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for RemoveShardCellRequest + * @function getTypeUrl + * @memberof vtctldata.RemoveShardCellRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + RemoveShardCellRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.RemoveShardCellRequest"; + }; + + return RemoveShardCellRequest; + })(); + + vtctldata.RemoveShardCellResponse = (function() { + + /** + * Properties of a RemoveShardCellResponse. + * @memberof vtctldata + * @interface IRemoveShardCellResponse + */ + + /** + * Constructs a new RemoveShardCellResponse. + * @memberof vtctldata + * @classdesc Represents a RemoveShardCellResponse. + * @implements IRemoveShardCellResponse + * @constructor + * @param {vtctldata.IRemoveShardCellResponse=} [properties] Properties to set + */ + function RemoveShardCellResponse(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * Creates a new RemoveShardCellResponse instance using the specified properties. + * @function create + * @memberof vtctldata.RemoveShardCellResponse + * @static + * @param {vtctldata.IRemoveShardCellResponse=} [properties] Properties to set + * @returns {vtctldata.RemoveShardCellResponse} RemoveShardCellResponse instance + */ + RemoveShardCellResponse.create = function create(properties) { + return new RemoveShardCellResponse(properties); + }; + + /** + * Encodes the specified RemoveShardCellResponse message. Does not implicitly {@link vtctldata.RemoveShardCellResponse.verify|verify} messages. + * @function encode + * @memberof vtctldata.RemoveShardCellResponse + * @static + * @param {vtctldata.IRemoveShardCellResponse} message RemoveShardCellResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + RemoveShardCellResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + return writer; + }; + + /** + * Encodes the specified RemoveShardCellResponse message, length delimited. Does not implicitly {@link vtctldata.RemoveShardCellResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.RemoveShardCellResponse + * @static + * @param {vtctldata.IRemoveShardCellResponse} message RemoveShardCellResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + RemoveShardCellResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a RemoveShardCellResponse message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.RemoveShardCellResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.RemoveShardCellResponse} RemoveShardCellResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + RemoveShardCellResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RemoveShardCellResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a RemoveShardCellResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.RemoveShardCellResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.RemoveShardCellResponse} RemoveShardCellResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + RemoveShardCellResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a RemoveShardCellResponse message. + * @function verify + * @memberof vtctldata.RemoveShardCellResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + RemoveShardCellResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + return null; + }; + + /** + * Creates a RemoveShardCellResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.RemoveShardCellResponse + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.RemoveShardCellResponse} RemoveShardCellResponse + */ + RemoveShardCellResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.RemoveShardCellResponse) + return object; + return new $root.vtctldata.RemoveShardCellResponse(); + }; + + /** + * Creates a plain object from a RemoveShardCellResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.RemoveShardCellResponse + * @static + * @param {vtctldata.RemoveShardCellResponse} message RemoveShardCellResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + RemoveShardCellResponse.toObject = function toObject() { + return {}; + }; + + /** + * Converts this RemoveShardCellResponse to JSON. * @function toJSON - * @memberof vtctldata.PlannedReparentShardRequest + * @memberof vtctldata.RemoveShardCellResponse * @instance * @returns {Object.} JSON object */ - PlannedReparentShardRequest.prototype.toJSON = function toJSON() { + RemoveShardCellResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for PlannedReparentShardRequest + * Gets the default type url for RemoveShardCellResponse * @function getTypeUrl - * @memberof vtctldata.PlannedReparentShardRequest + * @memberof vtctldata.RemoveShardCellResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - PlannedReparentShardRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + RemoveShardCellResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.PlannedReparentShardRequest"; + return typeUrlPrefix + "/vtctldata.RemoveShardCellResponse"; }; - return PlannedReparentShardRequest; + return RemoveShardCellResponse; })(); - vtctldata.PlannedReparentShardResponse = (function() { + vtctldata.ReparentTabletRequest = (function() { /** - * Properties of a PlannedReparentShardResponse. + * Properties of a ReparentTabletRequest. * @memberof vtctldata - * @interface IPlannedReparentShardResponse - * @property {string|null} [keyspace] PlannedReparentShardResponse keyspace - * @property {string|null} [shard] PlannedReparentShardResponse shard - * @property {topodata.ITabletAlias|null} [promoted_primary] PlannedReparentShardResponse promoted_primary - * @property {Array.|null} [events] PlannedReparentShardResponse events + * @interface IReparentTabletRequest + * @property {topodata.ITabletAlias|null} [tablet] ReparentTabletRequest tablet */ /** - * Constructs a new PlannedReparentShardResponse. + * Constructs a new ReparentTabletRequest. * @memberof vtctldata - * @classdesc Represents a PlannedReparentShardResponse. - * @implements IPlannedReparentShardResponse + * @classdesc Represents a ReparentTabletRequest. + * @implements IReparentTabletRequest * @constructor - * @param {vtctldata.IPlannedReparentShardResponse=} [properties] Properties to set + * @param {vtctldata.IReparentTabletRequest=} [properties] Properties to set */ - function PlannedReparentShardResponse(properties) { - this.events = []; + function ReparentTabletRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -123817,120 +145790,75 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * PlannedReparentShardResponse keyspace. - * @member {string} keyspace - * @memberof vtctldata.PlannedReparentShardResponse - * @instance - */ - PlannedReparentShardResponse.prototype.keyspace = ""; - - /** - * PlannedReparentShardResponse shard. - * @member {string} shard - * @memberof vtctldata.PlannedReparentShardResponse - * @instance - */ - PlannedReparentShardResponse.prototype.shard = ""; - - /** - * PlannedReparentShardResponse promoted_primary. - * @member {topodata.ITabletAlias|null|undefined} promoted_primary - * @memberof vtctldata.PlannedReparentShardResponse - * @instance - */ - PlannedReparentShardResponse.prototype.promoted_primary = null; - - /** - * PlannedReparentShardResponse events. - * @member {Array.} events - * @memberof vtctldata.PlannedReparentShardResponse + * ReparentTabletRequest tablet. + * @member {topodata.ITabletAlias|null|undefined} tablet + * @memberof vtctldata.ReparentTabletRequest * @instance */ - PlannedReparentShardResponse.prototype.events = $util.emptyArray; + ReparentTabletRequest.prototype.tablet = null; /** - * Creates a new PlannedReparentShardResponse instance using the specified properties. + * Creates a new ReparentTabletRequest instance using the specified properties. * @function create - * @memberof vtctldata.PlannedReparentShardResponse + * @memberof vtctldata.ReparentTabletRequest * @static - * @param {vtctldata.IPlannedReparentShardResponse=} [properties] Properties to set - * @returns {vtctldata.PlannedReparentShardResponse} PlannedReparentShardResponse instance + * @param {vtctldata.IReparentTabletRequest=} [properties] Properties to set + * @returns {vtctldata.ReparentTabletRequest} ReparentTabletRequest instance */ - PlannedReparentShardResponse.create = function create(properties) { - return new PlannedReparentShardResponse(properties); + ReparentTabletRequest.create = function create(properties) { + return new ReparentTabletRequest(properties); }; /** - * Encodes the specified PlannedReparentShardResponse message. Does not implicitly {@link vtctldata.PlannedReparentShardResponse.verify|verify} messages. + * Encodes the specified ReparentTabletRequest message. Does not implicitly {@link vtctldata.ReparentTabletRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.PlannedReparentShardResponse + * @memberof vtctldata.ReparentTabletRequest * @static - * @param {vtctldata.IPlannedReparentShardResponse} message PlannedReparentShardResponse message or plain object to encode + * @param {vtctldata.IReparentTabletRequest} message ReparentTabletRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - PlannedReparentShardResponse.encode = function encode(message, writer) { + ReparentTabletRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); - if (message.promoted_primary != null && Object.hasOwnProperty.call(message, "promoted_primary")) - $root.topodata.TabletAlias.encode(message.promoted_primary, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.events != null && message.events.length) - for (let i = 0; i < message.events.length; ++i) - $root.logutil.Event.encode(message.events[i], writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.tablet != null && Object.hasOwnProperty.call(message, "tablet")) + $root.topodata.TabletAlias.encode(message.tablet, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified PlannedReparentShardResponse message, length delimited. Does not implicitly {@link vtctldata.PlannedReparentShardResponse.verify|verify} messages. + * Encodes the specified ReparentTabletRequest message, length delimited. Does not implicitly {@link vtctldata.ReparentTabletRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.PlannedReparentShardResponse + * @memberof vtctldata.ReparentTabletRequest * @static - * @param {vtctldata.IPlannedReparentShardResponse} message PlannedReparentShardResponse message or plain object to encode + * @param {vtctldata.IReparentTabletRequest} message ReparentTabletRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - PlannedReparentShardResponse.encodeDelimited = function encodeDelimited(message, writer) { + ReparentTabletRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a PlannedReparentShardResponse message from the specified reader or buffer. + * Decodes a ReparentTabletRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.PlannedReparentShardResponse + * @memberof vtctldata.ReparentTabletRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.PlannedReparentShardResponse} PlannedReparentShardResponse + * @returns {vtctldata.ReparentTabletRequest} ReparentTabletRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - PlannedReparentShardResponse.decode = function decode(reader, length) { + ReparentTabletRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.PlannedReparentShardResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ReparentTabletRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.keyspace = reader.string(); - break; - } - case 2: { - message.shard = reader.string(); - break; - } - case 3: { - message.promoted_primary = $root.topodata.TabletAlias.decode(reader, reader.uint32()); - break; - } - case 4: { - if (!(message.events && message.events.length)) - message.events = []; - message.events.push($root.logutil.Event.decode(reader, reader.uint32())); + message.tablet = $root.topodata.TabletAlias.decode(reader, reader.uint32()); break; } default: @@ -123942,173 +145870,129 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a PlannedReparentShardResponse message from the specified reader or buffer, length delimited. + * Decodes a ReparentTabletRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.PlannedReparentShardResponse + * @memberof vtctldata.ReparentTabletRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.PlannedReparentShardResponse} PlannedReparentShardResponse + * @returns {vtctldata.ReparentTabletRequest} ReparentTabletRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - PlannedReparentShardResponse.decodeDelimited = function decodeDelimited(reader) { + ReparentTabletRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a PlannedReparentShardResponse message. + * Verifies a ReparentTabletRequest message. * @function verify - * @memberof vtctldata.PlannedReparentShardResponse + * @memberof vtctldata.ReparentTabletRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - PlannedReparentShardResponse.verify = function verify(message) { + ReparentTabletRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - if (message.shard != null && message.hasOwnProperty("shard")) - if (!$util.isString(message.shard)) - return "shard: string expected"; - if (message.promoted_primary != null && message.hasOwnProperty("promoted_primary")) { - let error = $root.topodata.TabletAlias.verify(message.promoted_primary); + if (message.tablet != null && message.hasOwnProperty("tablet")) { + let error = $root.topodata.TabletAlias.verify(message.tablet); if (error) - return "promoted_primary." + error; - } - if (message.events != null && message.hasOwnProperty("events")) { - if (!Array.isArray(message.events)) - return "events: array expected"; - for (let i = 0; i < message.events.length; ++i) { - let error = $root.logutil.Event.verify(message.events[i]); - if (error) - return "events." + error; - } + return "tablet." + error; } return null; }; /** - * Creates a PlannedReparentShardResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ReparentTabletRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.PlannedReparentShardResponse + * @memberof vtctldata.ReparentTabletRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.PlannedReparentShardResponse} PlannedReparentShardResponse + * @returns {vtctldata.ReparentTabletRequest} ReparentTabletRequest */ - PlannedReparentShardResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.PlannedReparentShardResponse) + ReparentTabletRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ReparentTabletRequest) return object; - let message = new $root.vtctldata.PlannedReparentShardResponse(); - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - if (object.shard != null) - message.shard = String(object.shard); - if (object.promoted_primary != null) { - if (typeof object.promoted_primary !== "object") - throw TypeError(".vtctldata.PlannedReparentShardResponse.promoted_primary: object expected"); - message.promoted_primary = $root.topodata.TabletAlias.fromObject(object.promoted_primary); - } - if (object.events) { - if (!Array.isArray(object.events)) - throw TypeError(".vtctldata.PlannedReparentShardResponse.events: array expected"); - message.events = []; - for (let i = 0; i < object.events.length; ++i) { - if (typeof object.events[i] !== "object") - throw TypeError(".vtctldata.PlannedReparentShardResponse.events: object expected"); - message.events[i] = $root.logutil.Event.fromObject(object.events[i]); - } + let message = new $root.vtctldata.ReparentTabletRequest(); + if (object.tablet != null) { + if (typeof object.tablet !== "object") + throw TypeError(".vtctldata.ReparentTabletRequest.tablet: object expected"); + message.tablet = $root.topodata.TabletAlias.fromObject(object.tablet); } return message; }; /** - * Creates a plain object from a PlannedReparentShardResponse message. Also converts values to other types if specified. + * Creates a plain object from a ReparentTabletRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.PlannedReparentShardResponse + * @memberof vtctldata.ReparentTabletRequest * @static - * @param {vtctldata.PlannedReparentShardResponse} message PlannedReparentShardResponse + * @param {vtctldata.ReparentTabletRequest} message ReparentTabletRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - PlannedReparentShardResponse.toObject = function toObject(message, options) { + ReparentTabletRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.events = []; - if (options.defaults) { - object.keyspace = ""; - object.shard = ""; - object.promoted_primary = null; - } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.shard != null && message.hasOwnProperty("shard")) - object.shard = message.shard; - if (message.promoted_primary != null && message.hasOwnProperty("promoted_primary")) - object.promoted_primary = $root.topodata.TabletAlias.toObject(message.promoted_primary, options); - if (message.events && message.events.length) { - object.events = []; - for (let j = 0; j < message.events.length; ++j) - object.events[j] = $root.logutil.Event.toObject(message.events[j], options); - } + if (options.defaults) + object.tablet = null; + if (message.tablet != null && message.hasOwnProperty("tablet")) + object.tablet = $root.topodata.TabletAlias.toObject(message.tablet, options); return object; }; /** - * Converts this PlannedReparentShardResponse to JSON. + * Converts this ReparentTabletRequest to JSON. * @function toJSON - * @memberof vtctldata.PlannedReparentShardResponse + * @memberof vtctldata.ReparentTabletRequest * @instance * @returns {Object.} JSON object */ - PlannedReparentShardResponse.prototype.toJSON = function toJSON() { + ReparentTabletRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for PlannedReparentShardResponse + * Gets the default type url for ReparentTabletRequest * @function getTypeUrl - * @memberof vtctldata.PlannedReparentShardResponse + * @memberof vtctldata.ReparentTabletRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - PlannedReparentShardResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ReparentTabletRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.PlannedReparentShardResponse"; + return typeUrlPrefix + "/vtctldata.ReparentTabletRequest"; }; - return PlannedReparentShardResponse; + return ReparentTabletRequest; })(); - vtctldata.RebuildKeyspaceGraphRequest = (function() { + vtctldata.ReparentTabletResponse = (function() { /** - * Properties of a RebuildKeyspaceGraphRequest. + * Properties of a ReparentTabletResponse. * @memberof vtctldata - * @interface IRebuildKeyspaceGraphRequest - * @property {string|null} [keyspace] RebuildKeyspaceGraphRequest keyspace - * @property {Array.|null} [cells] RebuildKeyspaceGraphRequest cells - * @property {boolean|null} [allow_partial] RebuildKeyspaceGraphRequest allow_partial + * @interface IReparentTabletResponse + * @property {string|null} [keyspace] ReparentTabletResponse keyspace + * @property {string|null} [shard] ReparentTabletResponse shard + * @property {topodata.ITabletAlias|null} [primary] ReparentTabletResponse primary */ /** - * Constructs a new RebuildKeyspaceGraphRequest. + * Constructs a new ReparentTabletResponse. * @memberof vtctldata - * @classdesc Represents a RebuildKeyspaceGraphRequest. - * @implements IRebuildKeyspaceGraphRequest + * @classdesc Represents a ReparentTabletResponse. + * @implements IReparentTabletResponse * @constructor - * @param {vtctldata.IRebuildKeyspaceGraphRequest=} [properties] Properties to set + * @param {vtctldata.IReparentTabletResponse=} [properties] Properties to set */ - function RebuildKeyspaceGraphRequest(properties) { - this.cells = []; + function ReparentTabletResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -124116,91 +146000,90 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * RebuildKeyspaceGraphRequest keyspace. + * ReparentTabletResponse keyspace. * @member {string} keyspace - * @memberof vtctldata.RebuildKeyspaceGraphRequest + * @memberof vtctldata.ReparentTabletResponse * @instance */ - RebuildKeyspaceGraphRequest.prototype.keyspace = ""; + ReparentTabletResponse.prototype.keyspace = ""; /** - * RebuildKeyspaceGraphRequest cells. - * @member {Array.} cells - * @memberof vtctldata.RebuildKeyspaceGraphRequest + * ReparentTabletResponse shard. + * @member {string} shard + * @memberof vtctldata.ReparentTabletResponse * @instance */ - RebuildKeyspaceGraphRequest.prototype.cells = $util.emptyArray; + ReparentTabletResponse.prototype.shard = ""; /** - * RebuildKeyspaceGraphRequest allow_partial. - * @member {boolean} allow_partial - * @memberof vtctldata.RebuildKeyspaceGraphRequest + * ReparentTabletResponse primary. + * @member {topodata.ITabletAlias|null|undefined} primary + * @memberof vtctldata.ReparentTabletResponse * @instance */ - RebuildKeyspaceGraphRequest.prototype.allow_partial = false; + ReparentTabletResponse.prototype.primary = null; /** - * Creates a new RebuildKeyspaceGraphRequest instance using the specified properties. + * Creates a new ReparentTabletResponse instance using the specified properties. * @function create - * @memberof vtctldata.RebuildKeyspaceGraphRequest + * @memberof vtctldata.ReparentTabletResponse * @static - * @param {vtctldata.IRebuildKeyspaceGraphRequest=} [properties] Properties to set - * @returns {vtctldata.RebuildKeyspaceGraphRequest} RebuildKeyspaceGraphRequest instance + * @param {vtctldata.IReparentTabletResponse=} [properties] Properties to set + * @returns {vtctldata.ReparentTabletResponse} ReparentTabletResponse instance */ - RebuildKeyspaceGraphRequest.create = function create(properties) { - return new RebuildKeyspaceGraphRequest(properties); + ReparentTabletResponse.create = function create(properties) { + return new ReparentTabletResponse(properties); }; /** - * Encodes the specified RebuildKeyspaceGraphRequest message. Does not implicitly {@link vtctldata.RebuildKeyspaceGraphRequest.verify|verify} messages. + * Encodes the specified ReparentTabletResponse message. Does not implicitly {@link vtctldata.ReparentTabletResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.RebuildKeyspaceGraphRequest + * @memberof vtctldata.ReparentTabletResponse * @static - * @param {vtctldata.IRebuildKeyspaceGraphRequest} message RebuildKeyspaceGraphRequest message or plain object to encode + * @param {vtctldata.IReparentTabletResponse} message ReparentTabletResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RebuildKeyspaceGraphRequest.encode = function encode(message, writer) { + ReparentTabletResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.cells != null && message.cells.length) - for (let i = 0; i < message.cells.length; ++i) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.cells[i]); - if (message.allow_partial != null && Object.hasOwnProperty.call(message, "allow_partial")) - writer.uint32(/* id 3, wireType 0 =*/24).bool(message.allow_partial); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); + if (message.primary != null && Object.hasOwnProperty.call(message, "primary")) + $root.topodata.TabletAlias.encode(message.primary, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); return writer; }; /** - * Encodes the specified RebuildKeyspaceGraphRequest message, length delimited. Does not implicitly {@link vtctldata.RebuildKeyspaceGraphRequest.verify|verify} messages. + * Encodes the specified ReparentTabletResponse message, length delimited. Does not implicitly {@link vtctldata.ReparentTabletResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.RebuildKeyspaceGraphRequest + * @memberof vtctldata.ReparentTabletResponse * @static - * @param {vtctldata.IRebuildKeyspaceGraphRequest} message RebuildKeyspaceGraphRequest message or plain object to encode + * @param {vtctldata.IReparentTabletResponse} message ReparentTabletResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RebuildKeyspaceGraphRequest.encodeDelimited = function encodeDelimited(message, writer) { + ReparentTabletResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a RebuildKeyspaceGraphRequest message from the specified reader or buffer. + * Decodes a ReparentTabletResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.RebuildKeyspaceGraphRequest + * @memberof vtctldata.ReparentTabletResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.RebuildKeyspaceGraphRequest} RebuildKeyspaceGraphRequest + * @returns {vtctldata.ReparentTabletResponse} ReparentTabletResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RebuildKeyspaceGraphRequest.decode = function decode(reader, length) { + ReparentTabletResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RebuildKeyspaceGraphRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ReparentTabletResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -124209,13 +146092,11 @@ export const vtctldata = $root.vtctldata = (() => { break; } case 2: { - if (!(message.cells && message.cells.length)) - message.cells = []; - message.cells.push(reader.string()); + message.shard = reader.string(); break; } case 3: { - message.allow_partial = reader.bool(); + message.primary = $root.topodata.TabletAlias.decode(reader, reader.uint32()); break; } default: @@ -124227,215 +146108,411 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a RebuildKeyspaceGraphRequest message from the specified reader or buffer, length delimited. + * Decodes a ReparentTabletResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.RebuildKeyspaceGraphRequest + * @memberof vtctldata.ReparentTabletResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.RebuildKeyspaceGraphRequest} RebuildKeyspaceGraphRequest + * @returns {vtctldata.ReparentTabletResponse} ReparentTabletResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RebuildKeyspaceGraphRequest.decodeDelimited = function decodeDelimited(reader) { + ReparentTabletResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a RebuildKeyspaceGraphRequest message. + * Verifies a ReparentTabletResponse message. * @function verify - * @memberof vtctldata.RebuildKeyspaceGraphRequest + * @memberof vtctldata.ReparentTabletResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - RebuildKeyspaceGraphRequest.verify = function verify(message) { + ReparentTabletResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.keyspace != null && message.hasOwnProperty("keyspace")) if (!$util.isString(message.keyspace)) return "keyspace: string expected"; - if (message.cells != null && message.hasOwnProperty("cells")) { - if (!Array.isArray(message.cells)) - return "cells: array expected"; - for (let i = 0; i < message.cells.length; ++i) - if (!$util.isString(message.cells[i])) - return "cells: string[] expected"; + if (message.shard != null && message.hasOwnProperty("shard")) + if (!$util.isString(message.shard)) + return "shard: string expected"; + if (message.primary != null && message.hasOwnProperty("primary")) { + let error = $root.topodata.TabletAlias.verify(message.primary); + if (error) + return "primary." + error; } - if (message.allow_partial != null && message.hasOwnProperty("allow_partial")) - if (typeof message.allow_partial !== "boolean") - return "allow_partial: boolean expected"; return null; }; /** - * Creates a RebuildKeyspaceGraphRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ReparentTabletResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.RebuildKeyspaceGraphRequest + * @memberof vtctldata.ReparentTabletResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.RebuildKeyspaceGraphRequest} RebuildKeyspaceGraphRequest + * @returns {vtctldata.ReparentTabletResponse} ReparentTabletResponse */ - RebuildKeyspaceGraphRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.RebuildKeyspaceGraphRequest) + ReparentTabletResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ReparentTabletResponse) return object; - let message = new $root.vtctldata.RebuildKeyspaceGraphRequest(); + let message = new $root.vtctldata.ReparentTabletResponse(); if (object.keyspace != null) message.keyspace = String(object.keyspace); - if (object.cells) { - if (!Array.isArray(object.cells)) - throw TypeError(".vtctldata.RebuildKeyspaceGraphRequest.cells: array expected"); - message.cells = []; - for (let i = 0; i < object.cells.length; ++i) - message.cells[i] = String(object.cells[i]); + if (object.shard != null) + message.shard = String(object.shard); + if (object.primary != null) { + if (typeof object.primary !== "object") + throw TypeError(".vtctldata.ReparentTabletResponse.primary: object expected"); + message.primary = $root.topodata.TabletAlias.fromObject(object.primary); } - if (object.allow_partial != null) - message.allow_partial = Boolean(object.allow_partial); return message; }; /** - * Creates a plain object from a RebuildKeyspaceGraphRequest message. Also converts values to other types if specified. + * Creates a plain object from a ReparentTabletResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.RebuildKeyspaceGraphRequest + * @memberof vtctldata.ReparentTabletResponse * @static - * @param {vtctldata.RebuildKeyspaceGraphRequest} message RebuildKeyspaceGraphRequest + * @param {vtctldata.ReparentTabletResponse} message ReparentTabletResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - RebuildKeyspaceGraphRequest.toObject = function toObject(message, options) { + ReparentTabletResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.cells = []; if (options.defaults) { object.keyspace = ""; - object.allow_partial = false; + object.shard = ""; + object.primary = null; } if (message.keyspace != null && message.hasOwnProperty("keyspace")) object.keyspace = message.keyspace; - if (message.cells && message.cells.length) { - object.cells = []; - for (let j = 0; j < message.cells.length; ++j) - object.cells[j] = message.cells[j]; - } - if (message.allow_partial != null && message.hasOwnProperty("allow_partial")) - object.allow_partial = message.allow_partial; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = message.shard; + if (message.primary != null && message.hasOwnProperty("primary")) + object.primary = $root.topodata.TabletAlias.toObject(message.primary, options); return object; }; /** - * Converts this RebuildKeyspaceGraphRequest to JSON. - * @function toJSON - * @memberof vtctldata.RebuildKeyspaceGraphRequest + * Converts this ReparentTabletResponse to JSON. + * @function toJSON + * @memberof vtctldata.ReparentTabletResponse + * @instance + * @returns {Object.} JSON object + */ + ReparentTabletResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ReparentTabletResponse + * @function getTypeUrl + * @memberof vtctldata.ReparentTabletResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ReparentTabletResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.ReparentTabletResponse"; + }; + + return ReparentTabletResponse; + })(); + + vtctldata.ReshardCreateRequest = (function() { + + /** + * Properties of a ReshardCreateRequest. + * @memberof vtctldata + * @interface IReshardCreateRequest + * @property {string|null} [workflow] ReshardCreateRequest workflow + * @property {string|null} [keyspace] ReshardCreateRequest keyspace + * @property {Array.|null} [source_shards] ReshardCreateRequest source_shards + * @property {Array.|null} [target_shards] ReshardCreateRequest target_shards + * @property {Array.|null} [cells] ReshardCreateRequest cells + * @property {Array.|null} [tablet_types] ReshardCreateRequest tablet_types + * @property {tabletmanagerdata.TabletSelectionPreference|null} [tablet_selection_preference] ReshardCreateRequest tablet_selection_preference + * @property {boolean|null} [skip_schema_copy] ReshardCreateRequest skip_schema_copy + * @property {string|null} [on_ddl] ReshardCreateRequest on_ddl + * @property {boolean|null} [stop_after_copy] ReshardCreateRequest stop_after_copy + * @property {boolean|null} [defer_secondary_keys] ReshardCreateRequest defer_secondary_keys + * @property {boolean|null} [auto_start] ReshardCreateRequest auto_start + */ + + /** + * Constructs a new ReshardCreateRequest. + * @memberof vtctldata + * @classdesc Represents a ReshardCreateRequest. + * @implements IReshardCreateRequest + * @constructor + * @param {vtctldata.IReshardCreateRequest=} [properties] Properties to set + */ + function ReshardCreateRequest(properties) { + this.source_shards = []; + this.target_shards = []; + this.cells = []; + this.tablet_types = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ReshardCreateRequest workflow. + * @member {string} workflow + * @memberof vtctldata.ReshardCreateRequest + * @instance + */ + ReshardCreateRequest.prototype.workflow = ""; + + /** + * ReshardCreateRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.ReshardCreateRequest + * @instance + */ + ReshardCreateRequest.prototype.keyspace = ""; + + /** + * ReshardCreateRequest source_shards. + * @member {Array.} source_shards + * @memberof vtctldata.ReshardCreateRequest + * @instance + */ + ReshardCreateRequest.prototype.source_shards = $util.emptyArray; + + /** + * ReshardCreateRequest target_shards. + * @member {Array.} target_shards + * @memberof vtctldata.ReshardCreateRequest + * @instance + */ + ReshardCreateRequest.prototype.target_shards = $util.emptyArray; + + /** + * ReshardCreateRequest cells. + * @member {Array.} cells + * @memberof vtctldata.ReshardCreateRequest + * @instance + */ + ReshardCreateRequest.prototype.cells = $util.emptyArray; + + /** + * ReshardCreateRequest tablet_types. + * @member {Array.} tablet_types + * @memberof vtctldata.ReshardCreateRequest * @instance - * @returns {Object.} JSON object */ - RebuildKeyspaceGraphRequest.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; + ReshardCreateRequest.prototype.tablet_types = $util.emptyArray; /** - * Gets the default type url for RebuildKeyspaceGraphRequest - * @function getTypeUrl - * @memberof vtctldata.RebuildKeyspaceGraphRequest - * @static - * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns {string} The default type url + * ReshardCreateRequest tablet_selection_preference. + * @member {tabletmanagerdata.TabletSelectionPreference} tablet_selection_preference + * @memberof vtctldata.ReshardCreateRequest + * @instance */ - RebuildKeyspaceGraphRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { - if (typeUrlPrefix === undefined) { - typeUrlPrefix = "type.googleapis.com"; - } - return typeUrlPrefix + "/vtctldata.RebuildKeyspaceGraphRequest"; - }; + ReshardCreateRequest.prototype.tablet_selection_preference = 0; - return RebuildKeyspaceGraphRequest; - })(); + /** + * ReshardCreateRequest skip_schema_copy. + * @member {boolean} skip_schema_copy + * @memberof vtctldata.ReshardCreateRequest + * @instance + */ + ReshardCreateRequest.prototype.skip_schema_copy = false; - vtctldata.RebuildKeyspaceGraphResponse = (function() { + /** + * ReshardCreateRequest on_ddl. + * @member {string} on_ddl + * @memberof vtctldata.ReshardCreateRequest + * @instance + */ + ReshardCreateRequest.prototype.on_ddl = ""; /** - * Properties of a RebuildKeyspaceGraphResponse. - * @memberof vtctldata - * @interface IRebuildKeyspaceGraphResponse + * ReshardCreateRequest stop_after_copy. + * @member {boolean} stop_after_copy + * @memberof vtctldata.ReshardCreateRequest + * @instance */ + ReshardCreateRequest.prototype.stop_after_copy = false; /** - * Constructs a new RebuildKeyspaceGraphResponse. - * @memberof vtctldata - * @classdesc Represents a RebuildKeyspaceGraphResponse. - * @implements IRebuildKeyspaceGraphResponse - * @constructor - * @param {vtctldata.IRebuildKeyspaceGraphResponse=} [properties] Properties to set + * ReshardCreateRequest defer_secondary_keys. + * @member {boolean} defer_secondary_keys + * @memberof vtctldata.ReshardCreateRequest + * @instance */ - function RebuildKeyspaceGraphResponse(properties) { - if (properties) - for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; - } + ReshardCreateRequest.prototype.defer_secondary_keys = false; /** - * Creates a new RebuildKeyspaceGraphResponse instance using the specified properties. + * ReshardCreateRequest auto_start. + * @member {boolean} auto_start + * @memberof vtctldata.ReshardCreateRequest + * @instance + */ + ReshardCreateRequest.prototype.auto_start = false; + + /** + * Creates a new ReshardCreateRequest instance using the specified properties. * @function create - * @memberof vtctldata.RebuildKeyspaceGraphResponse + * @memberof vtctldata.ReshardCreateRequest * @static - * @param {vtctldata.IRebuildKeyspaceGraphResponse=} [properties] Properties to set - * @returns {vtctldata.RebuildKeyspaceGraphResponse} RebuildKeyspaceGraphResponse instance + * @param {vtctldata.IReshardCreateRequest=} [properties] Properties to set + * @returns {vtctldata.ReshardCreateRequest} ReshardCreateRequest instance */ - RebuildKeyspaceGraphResponse.create = function create(properties) { - return new RebuildKeyspaceGraphResponse(properties); + ReshardCreateRequest.create = function create(properties) { + return new ReshardCreateRequest(properties); }; /** - * Encodes the specified RebuildKeyspaceGraphResponse message. Does not implicitly {@link vtctldata.RebuildKeyspaceGraphResponse.verify|verify} messages. + * Encodes the specified ReshardCreateRequest message. Does not implicitly {@link vtctldata.ReshardCreateRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.RebuildKeyspaceGraphResponse + * @memberof vtctldata.ReshardCreateRequest * @static - * @param {vtctldata.IRebuildKeyspaceGraphResponse} message RebuildKeyspaceGraphResponse message or plain object to encode + * @param {vtctldata.IReshardCreateRequest} message ReshardCreateRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RebuildKeyspaceGraphResponse.encode = function encode(message, writer) { + ReshardCreateRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.workflow != null && Object.hasOwnProperty.call(message, "workflow")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.workflow); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.keyspace); + if (message.source_shards != null && message.source_shards.length) + for (let i = 0; i < message.source_shards.length; ++i) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.source_shards[i]); + if (message.target_shards != null && message.target_shards.length) + for (let i = 0; i < message.target_shards.length; ++i) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.target_shards[i]); + if (message.cells != null && message.cells.length) + for (let i = 0; i < message.cells.length; ++i) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.cells[i]); + if (message.tablet_types != null && message.tablet_types.length) { + writer.uint32(/* id 6, wireType 2 =*/50).fork(); + for (let i = 0; i < message.tablet_types.length; ++i) + writer.int32(message.tablet_types[i]); + writer.ldelim(); + } + if (message.tablet_selection_preference != null && Object.hasOwnProperty.call(message, "tablet_selection_preference")) + writer.uint32(/* id 7, wireType 0 =*/56).int32(message.tablet_selection_preference); + if (message.skip_schema_copy != null && Object.hasOwnProperty.call(message, "skip_schema_copy")) + writer.uint32(/* id 8, wireType 0 =*/64).bool(message.skip_schema_copy); + if (message.on_ddl != null && Object.hasOwnProperty.call(message, "on_ddl")) + writer.uint32(/* id 9, wireType 2 =*/74).string(message.on_ddl); + if (message.stop_after_copy != null && Object.hasOwnProperty.call(message, "stop_after_copy")) + writer.uint32(/* id 10, wireType 0 =*/80).bool(message.stop_after_copy); + if (message.defer_secondary_keys != null && Object.hasOwnProperty.call(message, "defer_secondary_keys")) + writer.uint32(/* id 11, wireType 0 =*/88).bool(message.defer_secondary_keys); + if (message.auto_start != null && Object.hasOwnProperty.call(message, "auto_start")) + writer.uint32(/* id 12, wireType 0 =*/96).bool(message.auto_start); return writer; }; /** - * Encodes the specified RebuildKeyspaceGraphResponse message, length delimited. Does not implicitly {@link vtctldata.RebuildKeyspaceGraphResponse.verify|verify} messages. + * Encodes the specified ReshardCreateRequest message, length delimited. Does not implicitly {@link vtctldata.ReshardCreateRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.RebuildKeyspaceGraphResponse + * @memberof vtctldata.ReshardCreateRequest * @static - * @param {vtctldata.IRebuildKeyspaceGraphResponse} message RebuildKeyspaceGraphResponse message or plain object to encode + * @param {vtctldata.IReshardCreateRequest} message ReshardCreateRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RebuildKeyspaceGraphResponse.encodeDelimited = function encodeDelimited(message, writer) { + ReshardCreateRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a RebuildKeyspaceGraphResponse message from the specified reader or buffer. + * Decodes a ReshardCreateRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.RebuildKeyspaceGraphResponse + * @memberof vtctldata.ReshardCreateRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.RebuildKeyspaceGraphResponse} RebuildKeyspaceGraphResponse + * @returns {vtctldata.ReshardCreateRequest} ReshardCreateRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RebuildKeyspaceGraphResponse.decode = function decode(reader, length) { + ReshardCreateRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RebuildKeyspaceGraphResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ReshardCreateRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + message.workflow = reader.string(); + break; + } + case 2: { + message.keyspace = reader.string(); + break; + } + case 3: { + if (!(message.source_shards && message.source_shards.length)) + message.source_shards = []; + message.source_shards.push(reader.string()); + break; + } + case 4: { + if (!(message.target_shards && message.target_shards.length)) + message.target_shards = []; + message.target_shards.push(reader.string()); + break; + } + case 5: { + if (!(message.cells && message.cells.length)) + message.cells = []; + message.cells.push(reader.string()); + break; + } + case 6: { + if (!(message.tablet_types && message.tablet_types.length)) + message.tablet_types = []; + if ((tag & 7) === 2) { + let end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.tablet_types.push(reader.int32()); + } else + message.tablet_types.push(reader.int32()); + break; + } + case 7: { + message.tablet_selection_preference = reader.int32(); + break; + } + case 8: { + message.skip_schema_copy = reader.bool(); + break; + } + case 9: { + message.on_ddl = reader.string(); + break; + } + case 10: { + message.stop_after_copy = reader.bool(); + break; + } + case 11: { + message.defer_secondary_keys = reader.bool(); + break; + } + case 12: { + message.auto_start = reader.bool(); + break; + } default: reader.skipType(tag & 7); break; @@ -124445,110 +146522,353 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a RebuildKeyspaceGraphResponse message from the specified reader or buffer, length delimited. + * Decodes a ReshardCreateRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.RebuildKeyspaceGraphResponse + * @memberof vtctldata.ReshardCreateRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.RebuildKeyspaceGraphResponse} RebuildKeyspaceGraphResponse + * @returns {vtctldata.ReshardCreateRequest} ReshardCreateRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RebuildKeyspaceGraphResponse.decodeDelimited = function decodeDelimited(reader) { + ReshardCreateRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a RebuildKeyspaceGraphResponse message. + * Verifies a ReshardCreateRequest message. * @function verify - * @memberof vtctldata.RebuildKeyspaceGraphResponse + * @memberof vtctldata.ReshardCreateRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - RebuildKeyspaceGraphResponse.verify = function verify(message) { + ReshardCreateRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.workflow != null && message.hasOwnProperty("workflow")) + if (!$util.isString(message.workflow)) + return "workflow: string expected"; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.source_shards != null && message.hasOwnProperty("source_shards")) { + if (!Array.isArray(message.source_shards)) + return "source_shards: array expected"; + for (let i = 0; i < message.source_shards.length; ++i) + if (!$util.isString(message.source_shards[i])) + return "source_shards: string[] expected"; + } + if (message.target_shards != null && message.hasOwnProperty("target_shards")) { + if (!Array.isArray(message.target_shards)) + return "target_shards: array expected"; + for (let i = 0; i < message.target_shards.length; ++i) + if (!$util.isString(message.target_shards[i])) + return "target_shards: string[] expected"; + } + if (message.cells != null && message.hasOwnProperty("cells")) { + if (!Array.isArray(message.cells)) + return "cells: array expected"; + for (let i = 0; i < message.cells.length; ++i) + if (!$util.isString(message.cells[i])) + return "cells: string[] expected"; + } + if (message.tablet_types != null && message.hasOwnProperty("tablet_types")) { + if (!Array.isArray(message.tablet_types)) + return "tablet_types: array expected"; + for (let i = 0; i < message.tablet_types.length; ++i) + switch (message.tablet_types[i]) { + default: + return "tablet_types: enum value[] expected"; + case 0: + case 1: + case 1: + case 2: + case 3: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + break; + } + } + if (message.tablet_selection_preference != null && message.hasOwnProperty("tablet_selection_preference")) + switch (message.tablet_selection_preference) { + default: + return "tablet_selection_preference: enum value expected"; + case 0: + case 1: + case 3: + break; + } + if (message.skip_schema_copy != null && message.hasOwnProperty("skip_schema_copy")) + if (typeof message.skip_schema_copy !== "boolean") + return "skip_schema_copy: boolean expected"; + if (message.on_ddl != null && message.hasOwnProperty("on_ddl")) + if (!$util.isString(message.on_ddl)) + return "on_ddl: string expected"; + if (message.stop_after_copy != null && message.hasOwnProperty("stop_after_copy")) + if (typeof message.stop_after_copy !== "boolean") + return "stop_after_copy: boolean expected"; + if (message.defer_secondary_keys != null && message.hasOwnProperty("defer_secondary_keys")) + if (typeof message.defer_secondary_keys !== "boolean") + return "defer_secondary_keys: boolean expected"; + if (message.auto_start != null && message.hasOwnProperty("auto_start")) + if (typeof message.auto_start !== "boolean") + return "auto_start: boolean expected"; return null; }; /** - * Creates a RebuildKeyspaceGraphResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ReshardCreateRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.RebuildKeyspaceGraphResponse + * @memberof vtctldata.ReshardCreateRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.RebuildKeyspaceGraphResponse} RebuildKeyspaceGraphResponse + * @returns {vtctldata.ReshardCreateRequest} ReshardCreateRequest */ - RebuildKeyspaceGraphResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.RebuildKeyspaceGraphResponse) + ReshardCreateRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ReshardCreateRequest) return object; - return new $root.vtctldata.RebuildKeyspaceGraphResponse(); + let message = new $root.vtctldata.ReshardCreateRequest(); + if (object.workflow != null) + message.workflow = String(object.workflow); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.source_shards) { + if (!Array.isArray(object.source_shards)) + throw TypeError(".vtctldata.ReshardCreateRequest.source_shards: array expected"); + message.source_shards = []; + for (let i = 0; i < object.source_shards.length; ++i) + message.source_shards[i] = String(object.source_shards[i]); + } + if (object.target_shards) { + if (!Array.isArray(object.target_shards)) + throw TypeError(".vtctldata.ReshardCreateRequest.target_shards: array expected"); + message.target_shards = []; + for (let i = 0; i < object.target_shards.length; ++i) + message.target_shards[i] = String(object.target_shards[i]); + } + if (object.cells) { + if (!Array.isArray(object.cells)) + throw TypeError(".vtctldata.ReshardCreateRequest.cells: array expected"); + message.cells = []; + for (let i = 0; i < object.cells.length; ++i) + message.cells[i] = String(object.cells[i]); + } + if (object.tablet_types) { + if (!Array.isArray(object.tablet_types)) + throw TypeError(".vtctldata.ReshardCreateRequest.tablet_types: array expected"); + message.tablet_types = []; + for (let i = 0; i < object.tablet_types.length; ++i) + switch (object.tablet_types[i]) { + default: + if (typeof object.tablet_types[i] === "number") { + message.tablet_types[i] = object.tablet_types[i]; + break; + } + case "UNKNOWN": + case 0: + message.tablet_types[i] = 0; + break; + case "PRIMARY": + case 1: + message.tablet_types[i] = 1; + break; + case "MASTER": + case 1: + message.tablet_types[i] = 1; + break; + case "REPLICA": + case 2: + message.tablet_types[i] = 2; + break; + case "RDONLY": + case 3: + message.tablet_types[i] = 3; + break; + case "BATCH": + case 3: + message.tablet_types[i] = 3; + break; + case "SPARE": + case 4: + message.tablet_types[i] = 4; + break; + case "EXPERIMENTAL": + case 5: + message.tablet_types[i] = 5; + break; + case "BACKUP": + case 6: + message.tablet_types[i] = 6; + break; + case "RESTORE": + case 7: + message.tablet_types[i] = 7; + break; + case "DRAINED": + case 8: + message.tablet_types[i] = 8; + break; + } + } + switch (object.tablet_selection_preference) { + default: + if (typeof object.tablet_selection_preference === "number") { + message.tablet_selection_preference = object.tablet_selection_preference; + break; + } + break; + case "ANY": + case 0: + message.tablet_selection_preference = 0; + break; + case "INORDER": + case 1: + message.tablet_selection_preference = 1; + break; + case "UNKNOWN": + case 3: + message.tablet_selection_preference = 3; + break; + } + if (object.skip_schema_copy != null) + message.skip_schema_copy = Boolean(object.skip_schema_copy); + if (object.on_ddl != null) + message.on_ddl = String(object.on_ddl); + if (object.stop_after_copy != null) + message.stop_after_copy = Boolean(object.stop_after_copy); + if (object.defer_secondary_keys != null) + message.defer_secondary_keys = Boolean(object.defer_secondary_keys); + if (object.auto_start != null) + message.auto_start = Boolean(object.auto_start); + return message; }; /** - * Creates a plain object from a RebuildKeyspaceGraphResponse message. Also converts values to other types if specified. + * Creates a plain object from a ReshardCreateRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.RebuildKeyspaceGraphResponse + * @memberof vtctldata.ReshardCreateRequest * @static - * @param {vtctldata.RebuildKeyspaceGraphResponse} message RebuildKeyspaceGraphResponse + * @param {vtctldata.ReshardCreateRequest} message ReshardCreateRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - RebuildKeyspaceGraphResponse.toObject = function toObject() { - return {}; + ReshardCreateRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) { + object.source_shards = []; + object.target_shards = []; + object.cells = []; + object.tablet_types = []; + } + if (options.defaults) { + object.workflow = ""; + object.keyspace = ""; + object.tablet_selection_preference = options.enums === String ? "ANY" : 0; + object.skip_schema_copy = false; + object.on_ddl = ""; + object.stop_after_copy = false; + object.defer_secondary_keys = false; + object.auto_start = false; + } + if (message.workflow != null && message.hasOwnProperty("workflow")) + object.workflow = message.workflow; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.source_shards && message.source_shards.length) { + object.source_shards = []; + for (let j = 0; j < message.source_shards.length; ++j) + object.source_shards[j] = message.source_shards[j]; + } + if (message.target_shards && message.target_shards.length) { + object.target_shards = []; + for (let j = 0; j < message.target_shards.length; ++j) + object.target_shards[j] = message.target_shards[j]; + } + if (message.cells && message.cells.length) { + object.cells = []; + for (let j = 0; j < message.cells.length; ++j) + object.cells[j] = message.cells[j]; + } + if (message.tablet_types && message.tablet_types.length) { + object.tablet_types = []; + for (let j = 0; j < message.tablet_types.length; ++j) + object.tablet_types[j] = options.enums === String ? $root.topodata.TabletType[message.tablet_types[j]] === undefined ? message.tablet_types[j] : $root.topodata.TabletType[message.tablet_types[j]] : message.tablet_types[j]; + } + if (message.tablet_selection_preference != null && message.hasOwnProperty("tablet_selection_preference")) + object.tablet_selection_preference = options.enums === String ? $root.tabletmanagerdata.TabletSelectionPreference[message.tablet_selection_preference] === undefined ? message.tablet_selection_preference : $root.tabletmanagerdata.TabletSelectionPreference[message.tablet_selection_preference] : message.tablet_selection_preference; + if (message.skip_schema_copy != null && message.hasOwnProperty("skip_schema_copy")) + object.skip_schema_copy = message.skip_schema_copy; + if (message.on_ddl != null && message.hasOwnProperty("on_ddl")) + object.on_ddl = message.on_ddl; + if (message.stop_after_copy != null && message.hasOwnProperty("stop_after_copy")) + object.stop_after_copy = message.stop_after_copy; + if (message.defer_secondary_keys != null && message.hasOwnProperty("defer_secondary_keys")) + object.defer_secondary_keys = message.defer_secondary_keys; + if (message.auto_start != null && message.hasOwnProperty("auto_start")) + object.auto_start = message.auto_start; + return object; }; /** - * Converts this RebuildKeyspaceGraphResponse to JSON. + * Converts this ReshardCreateRequest to JSON. * @function toJSON - * @memberof vtctldata.RebuildKeyspaceGraphResponse + * @memberof vtctldata.ReshardCreateRequest * @instance * @returns {Object.} JSON object */ - RebuildKeyspaceGraphResponse.prototype.toJSON = function toJSON() { + ReshardCreateRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for RebuildKeyspaceGraphResponse + * Gets the default type url for ReshardCreateRequest * @function getTypeUrl - * @memberof vtctldata.RebuildKeyspaceGraphResponse + * @memberof vtctldata.ReshardCreateRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - RebuildKeyspaceGraphResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ReshardCreateRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.RebuildKeyspaceGraphResponse"; + return typeUrlPrefix + "/vtctldata.ReshardCreateRequest"; }; - return RebuildKeyspaceGraphResponse; + return ReshardCreateRequest; })(); - vtctldata.RebuildVSchemaGraphRequest = (function() { + vtctldata.RestoreFromBackupRequest = (function() { /** - * Properties of a RebuildVSchemaGraphRequest. + * Properties of a RestoreFromBackupRequest. * @memberof vtctldata - * @interface IRebuildVSchemaGraphRequest - * @property {Array.|null} [cells] RebuildVSchemaGraphRequest cells + * @interface IRestoreFromBackupRequest + * @property {topodata.ITabletAlias|null} [tablet_alias] RestoreFromBackupRequest tablet_alias + * @property {vttime.ITime|null} [backup_time] RestoreFromBackupRequest backup_time + * @property {string|null} [restore_to_pos] RestoreFromBackupRequest restore_to_pos + * @property {boolean|null} [dry_run] RestoreFromBackupRequest dry_run + * @property {vttime.ITime|null} [restore_to_timestamp] RestoreFromBackupRequest restore_to_timestamp */ /** - * Constructs a new RebuildVSchemaGraphRequest. + * Constructs a new RestoreFromBackupRequest. * @memberof vtctldata - * @classdesc Represents a RebuildVSchemaGraphRequest. - * @implements IRebuildVSchemaGraphRequest + * @classdesc Represents a RestoreFromBackupRequest. + * @implements IRestoreFromBackupRequest * @constructor - * @param {vtctldata.IRebuildVSchemaGraphRequest=} [properties] Properties to set + * @param {vtctldata.IRestoreFromBackupRequest=} [properties] Properties to set */ - function RebuildVSchemaGraphRequest(properties) { - this.cells = []; + function RestoreFromBackupRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -124556,78 +146876,131 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * RebuildVSchemaGraphRequest cells. - * @member {Array.} cells - * @memberof vtctldata.RebuildVSchemaGraphRequest + * RestoreFromBackupRequest tablet_alias. + * @member {topodata.ITabletAlias|null|undefined} tablet_alias + * @memberof vtctldata.RestoreFromBackupRequest * @instance */ - RebuildVSchemaGraphRequest.prototype.cells = $util.emptyArray; + RestoreFromBackupRequest.prototype.tablet_alias = null; /** - * Creates a new RebuildVSchemaGraphRequest instance using the specified properties. + * RestoreFromBackupRequest backup_time. + * @member {vttime.ITime|null|undefined} backup_time + * @memberof vtctldata.RestoreFromBackupRequest + * @instance + */ + RestoreFromBackupRequest.prototype.backup_time = null; + + /** + * RestoreFromBackupRequest restore_to_pos. + * @member {string} restore_to_pos + * @memberof vtctldata.RestoreFromBackupRequest + * @instance + */ + RestoreFromBackupRequest.prototype.restore_to_pos = ""; + + /** + * RestoreFromBackupRequest dry_run. + * @member {boolean} dry_run + * @memberof vtctldata.RestoreFromBackupRequest + * @instance + */ + RestoreFromBackupRequest.prototype.dry_run = false; + + /** + * RestoreFromBackupRequest restore_to_timestamp. + * @member {vttime.ITime|null|undefined} restore_to_timestamp + * @memberof vtctldata.RestoreFromBackupRequest + * @instance + */ + RestoreFromBackupRequest.prototype.restore_to_timestamp = null; + + /** + * Creates a new RestoreFromBackupRequest instance using the specified properties. * @function create - * @memberof vtctldata.RebuildVSchemaGraphRequest + * @memberof vtctldata.RestoreFromBackupRequest * @static - * @param {vtctldata.IRebuildVSchemaGraphRequest=} [properties] Properties to set - * @returns {vtctldata.RebuildVSchemaGraphRequest} RebuildVSchemaGraphRequest instance + * @param {vtctldata.IRestoreFromBackupRequest=} [properties] Properties to set + * @returns {vtctldata.RestoreFromBackupRequest} RestoreFromBackupRequest instance */ - RebuildVSchemaGraphRequest.create = function create(properties) { - return new RebuildVSchemaGraphRequest(properties); + RestoreFromBackupRequest.create = function create(properties) { + return new RestoreFromBackupRequest(properties); }; /** - * Encodes the specified RebuildVSchemaGraphRequest message. Does not implicitly {@link vtctldata.RebuildVSchemaGraphRequest.verify|verify} messages. + * Encodes the specified RestoreFromBackupRequest message. Does not implicitly {@link vtctldata.RestoreFromBackupRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.RebuildVSchemaGraphRequest + * @memberof vtctldata.RestoreFromBackupRequest * @static - * @param {vtctldata.IRebuildVSchemaGraphRequest} message RebuildVSchemaGraphRequest message or plain object to encode + * @param {vtctldata.IRestoreFromBackupRequest} message RestoreFromBackupRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RebuildVSchemaGraphRequest.encode = function encode(message, writer) { + RestoreFromBackupRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.cells != null && message.cells.length) - for (let i = 0; i < message.cells.length; ++i) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.cells[i]); + if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) + $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.backup_time != null && Object.hasOwnProperty.call(message, "backup_time")) + $root.vttime.Time.encode(message.backup_time, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.restore_to_pos != null && Object.hasOwnProperty.call(message, "restore_to_pos")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.restore_to_pos); + if (message.dry_run != null && Object.hasOwnProperty.call(message, "dry_run")) + writer.uint32(/* id 4, wireType 0 =*/32).bool(message.dry_run); + if (message.restore_to_timestamp != null && Object.hasOwnProperty.call(message, "restore_to_timestamp")) + $root.vttime.Time.encode(message.restore_to_timestamp, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); return writer; }; /** - * Encodes the specified RebuildVSchemaGraphRequest message, length delimited. Does not implicitly {@link vtctldata.RebuildVSchemaGraphRequest.verify|verify} messages. + * Encodes the specified RestoreFromBackupRequest message, length delimited. Does not implicitly {@link vtctldata.RestoreFromBackupRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.RebuildVSchemaGraphRequest + * @memberof vtctldata.RestoreFromBackupRequest * @static - * @param {vtctldata.IRebuildVSchemaGraphRequest} message RebuildVSchemaGraphRequest message or plain object to encode + * @param {vtctldata.IRestoreFromBackupRequest} message RestoreFromBackupRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RebuildVSchemaGraphRequest.encodeDelimited = function encodeDelimited(message, writer) { + RestoreFromBackupRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a RebuildVSchemaGraphRequest message from the specified reader or buffer. + * Decodes a RestoreFromBackupRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.RebuildVSchemaGraphRequest + * @memberof vtctldata.RestoreFromBackupRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.RebuildVSchemaGraphRequest} RebuildVSchemaGraphRequest + * @returns {vtctldata.RestoreFromBackupRequest} RestoreFromBackupRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RebuildVSchemaGraphRequest.decode = function decode(reader, length) { + RestoreFromBackupRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RebuildVSchemaGraphRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RestoreFromBackupRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (!(message.cells && message.cells.length)) - message.cells = []; - message.cells.push(reader.string()); + message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + break; + } + case 2: { + message.backup_time = $root.vttime.Time.decode(reader, reader.uint32()); + break; + } + case 3: { + message.restore_to_pos = reader.string(); + break; + } + case 4: { + message.dry_run = reader.bool(); + break; + } + case 5: { + message.restore_to_timestamp = $root.vttime.Time.decode(reader, reader.uint32()); break; } default: @@ -124639,133 +147012,173 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a RebuildVSchemaGraphRequest message from the specified reader or buffer, length delimited. + * Decodes a RestoreFromBackupRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.RebuildVSchemaGraphRequest + * @memberof vtctldata.RestoreFromBackupRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.RebuildVSchemaGraphRequest} RebuildVSchemaGraphRequest + * @returns {vtctldata.RestoreFromBackupRequest} RestoreFromBackupRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RebuildVSchemaGraphRequest.decodeDelimited = function decodeDelimited(reader) { + RestoreFromBackupRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a RebuildVSchemaGraphRequest message. + * Verifies a RestoreFromBackupRequest message. * @function verify - * @memberof vtctldata.RebuildVSchemaGraphRequest + * @memberof vtctldata.RestoreFromBackupRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - RebuildVSchemaGraphRequest.verify = function verify(message) { + RestoreFromBackupRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.cells != null && message.hasOwnProperty("cells")) { - if (!Array.isArray(message.cells)) - return "cells: array expected"; - for (let i = 0; i < message.cells.length; ++i) - if (!$util.isString(message.cells[i])) - return "cells: string[] expected"; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { + let error = $root.topodata.TabletAlias.verify(message.tablet_alias); + if (error) + return "tablet_alias." + error; + } + if (message.backup_time != null && message.hasOwnProperty("backup_time")) { + let error = $root.vttime.Time.verify(message.backup_time); + if (error) + return "backup_time." + error; + } + if (message.restore_to_pos != null && message.hasOwnProperty("restore_to_pos")) + if (!$util.isString(message.restore_to_pos)) + return "restore_to_pos: string expected"; + if (message.dry_run != null && message.hasOwnProperty("dry_run")) + if (typeof message.dry_run !== "boolean") + return "dry_run: boolean expected"; + if (message.restore_to_timestamp != null && message.hasOwnProperty("restore_to_timestamp")) { + let error = $root.vttime.Time.verify(message.restore_to_timestamp); + if (error) + return "restore_to_timestamp." + error; } return null; }; /** - * Creates a RebuildVSchemaGraphRequest message from a plain object. Also converts values to their respective internal types. + * Creates a RestoreFromBackupRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.RebuildVSchemaGraphRequest + * @memberof vtctldata.RestoreFromBackupRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.RebuildVSchemaGraphRequest} RebuildVSchemaGraphRequest + * @returns {vtctldata.RestoreFromBackupRequest} RestoreFromBackupRequest */ - RebuildVSchemaGraphRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.RebuildVSchemaGraphRequest) - return object; - let message = new $root.vtctldata.RebuildVSchemaGraphRequest(); - if (object.cells) { - if (!Array.isArray(object.cells)) - throw TypeError(".vtctldata.RebuildVSchemaGraphRequest.cells: array expected"); - message.cells = []; - for (let i = 0; i < object.cells.length; ++i) - message.cells[i] = String(object.cells[i]); + RestoreFromBackupRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.RestoreFromBackupRequest) + return object; + let message = new $root.vtctldata.RestoreFromBackupRequest(); + if (object.tablet_alias != null) { + if (typeof object.tablet_alias !== "object") + throw TypeError(".vtctldata.RestoreFromBackupRequest.tablet_alias: object expected"); + message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); + } + if (object.backup_time != null) { + if (typeof object.backup_time !== "object") + throw TypeError(".vtctldata.RestoreFromBackupRequest.backup_time: object expected"); + message.backup_time = $root.vttime.Time.fromObject(object.backup_time); + } + if (object.restore_to_pos != null) + message.restore_to_pos = String(object.restore_to_pos); + if (object.dry_run != null) + message.dry_run = Boolean(object.dry_run); + if (object.restore_to_timestamp != null) { + if (typeof object.restore_to_timestamp !== "object") + throw TypeError(".vtctldata.RestoreFromBackupRequest.restore_to_timestamp: object expected"); + message.restore_to_timestamp = $root.vttime.Time.fromObject(object.restore_to_timestamp); } return message; }; /** - * Creates a plain object from a RebuildVSchemaGraphRequest message. Also converts values to other types if specified. + * Creates a plain object from a RestoreFromBackupRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.RebuildVSchemaGraphRequest + * @memberof vtctldata.RestoreFromBackupRequest * @static - * @param {vtctldata.RebuildVSchemaGraphRequest} message RebuildVSchemaGraphRequest + * @param {vtctldata.RestoreFromBackupRequest} message RestoreFromBackupRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - RebuildVSchemaGraphRequest.toObject = function toObject(message, options) { + RestoreFromBackupRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.cells = []; - if (message.cells && message.cells.length) { - object.cells = []; - for (let j = 0; j < message.cells.length; ++j) - object.cells[j] = message.cells[j]; + if (options.defaults) { + object.tablet_alias = null; + object.backup_time = null; + object.restore_to_pos = ""; + object.dry_run = false; + object.restore_to_timestamp = null; } + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) + object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); + if (message.backup_time != null && message.hasOwnProperty("backup_time")) + object.backup_time = $root.vttime.Time.toObject(message.backup_time, options); + if (message.restore_to_pos != null && message.hasOwnProperty("restore_to_pos")) + object.restore_to_pos = message.restore_to_pos; + if (message.dry_run != null && message.hasOwnProperty("dry_run")) + object.dry_run = message.dry_run; + if (message.restore_to_timestamp != null && message.hasOwnProperty("restore_to_timestamp")) + object.restore_to_timestamp = $root.vttime.Time.toObject(message.restore_to_timestamp, options); return object; }; /** - * Converts this RebuildVSchemaGraphRequest to JSON. + * Converts this RestoreFromBackupRequest to JSON. * @function toJSON - * @memberof vtctldata.RebuildVSchemaGraphRequest + * @memberof vtctldata.RestoreFromBackupRequest * @instance * @returns {Object.} JSON object */ - RebuildVSchemaGraphRequest.prototype.toJSON = function toJSON() { + RestoreFromBackupRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for RebuildVSchemaGraphRequest + * Gets the default type url for RestoreFromBackupRequest * @function getTypeUrl - * @memberof vtctldata.RebuildVSchemaGraphRequest + * @memberof vtctldata.RestoreFromBackupRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - RebuildVSchemaGraphRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + RestoreFromBackupRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.RebuildVSchemaGraphRequest"; + return typeUrlPrefix + "/vtctldata.RestoreFromBackupRequest"; }; - return RebuildVSchemaGraphRequest; + return RestoreFromBackupRequest; })(); - vtctldata.RebuildVSchemaGraphResponse = (function() { + vtctldata.RestoreFromBackupResponse = (function() { /** - * Properties of a RebuildVSchemaGraphResponse. + * Properties of a RestoreFromBackupResponse. * @memberof vtctldata - * @interface IRebuildVSchemaGraphResponse + * @interface IRestoreFromBackupResponse + * @property {topodata.ITabletAlias|null} [tablet_alias] RestoreFromBackupResponse tablet_alias + * @property {string|null} [keyspace] RestoreFromBackupResponse keyspace + * @property {string|null} [shard] RestoreFromBackupResponse shard + * @property {logutil.IEvent|null} [event] RestoreFromBackupResponse event */ /** - * Constructs a new RebuildVSchemaGraphResponse. + * Constructs a new RestoreFromBackupResponse. * @memberof vtctldata - * @classdesc Represents a RebuildVSchemaGraphResponse. - * @implements IRebuildVSchemaGraphResponse + * @classdesc Represents a RestoreFromBackupResponse. + * @implements IRestoreFromBackupResponse * @constructor - * @param {vtctldata.IRebuildVSchemaGraphResponse=} [properties] Properties to set + * @param {vtctldata.IRestoreFromBackupResponse=} [properties] Properties to set */ - function RebuildVSchemaGraphResponse(properties) { + function RestoreFromBackupResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -124773,63 +147186,119 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * Creates a new RebuildVSchemaGraphResponse instance using the specified properties. + * RestoreFromBackupResponse tablet_alias. + * @member {topodata.ITabletAlias|null|undefined} tablet_alias + * @memberof vtctldata.RestoreFromBackupResponse + * @instance + */ + RestoreFromBackupResponse.prototype.tablet_alias = null; + + /** + * RestoreFromBackupResponse keyspace. + * @member {string} keyspace + * @memberof vtctldata.RestoreFromBackupResponse + * @instance + */ + RestoreFromBackupResponse.prototype.keyspace = ""; + + /** + * RestoreFromBackupResponse shard. + * @member {string} shard + * @memberof vtctldata.RestoreFromBackupResponse + * @instance + */ + RestoreFromBackupResponse.prototype.shard = ""; + + /** + * RestoreFromBackupResponse event. + * @member {logutil.IEvent|null|undefined} event + * @memberof vtctldata.RestoreFromBackupResponse + * @instance + */ + RestoreFromBackupResponse.prototype.event = null; + + /** + * Creates a new RestoreFromBackupResponse instance using the specified properties. * @function create - * @memberof vtctldata.RebuildVSchemaGraphResponse + * @memberof vtctldata.RestoreFromBackupResponse * @static - * @param {vtctldata.IRebuildVSchemaGraphResponse=} [properties] Properties to set - * @returns {vtctldata.RebuildVSchemaGraphResponse} RebuildVSchemaGraphResponse instance + * @param {vtctldata.IRestoreFromBackupResponse=} [properties] Properties to set + * @returns {vtctldata.RestoreFromBackupResponse} RestoreFromBackupResponse instance */ - RebuildVSchemaGraphResponse.create = function create(properties) { - return new RebuildVSchemaGraphResponse(properties); + RestoreFromBackupResponse.create = function create(properties) { + return new RestoreFromBackupResponse(properties); }; /** - * Encodes the specified RebuildVSchemaGraphResponse message. Does not implicitly {@link vtctldata.RebuildVSchemaGraphResponse.verify|verify} messages. + * Encodes the specified RestoreFromBackupResponse message. Does not implicitly {@link vtctldata.RestoreFromBackupResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.RebuildVSchemaGraphResponse + * @memberof vtctldata.RestoreFromBackupResponse * @static - * @param {vtctldata.IRebuildVSchemaGraphResponse} message RebuildVSchemaGraphResponse message or plain object to encode + * @param {vtctldata.IRestoreFromBackupResponse} message RestoreFromBackupResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RebuildVSchemaGraphResponse.encode = function encode(message, writer) { + RestoreFromBackupResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) + $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.keyspace); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.shard); + if (message.event != null && Object.hasOwnProperty.call(message, "event")) + $root.logutil.Event.encode(message.event, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); return writer; }; /** - * Encodes the specified RebuildVSchemaGraphResponse message, length delimited. Does not implicitly {@link vtctldata.RebuildVSchemaGraphResponse.verify|verify} messages. + * Encodes the specified RestoreFromBackupResponse message, length delimited. Does not implicitly {@link vtctldata.RestoreFromBackupResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.RebuildVSchemaGraphResponse + * @memberof vtctldata.RestoreFromBackupResponse * @static - * @param {vtctldata.IRebuildVSchemaGraphResponse} message RebuildVSchemaGraphResponse message or plain object to encode + * @param {vtctldata.IRestoreFromBackupResponse} message RestoreFromBackupResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RebuildVSchemaGraphResponse.encodeDelimited = function encodeDelimited(message, writer) { + RestoreFromBackupResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a RebuildVSchemaGraphResponse message from the specified reader or buffer. + * Decodes a RestoreFromBackupResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.RebuildVSchemaGraphResponse + * @memberof vtctldata.RestoreFromBackupResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.RebuildVSchemaGraphResponse} RebuildVSchemaGraphResponse + * @returns {vtctldata.RestoreFromBackupResponse} RestoreFromBackupResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RebuildVSchemaGraphResponse.decode = function decode(reader, length) { + RestoreFromBackupResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RebuildVSchemaGraphResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RestoreFromBackupResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + break; + } + case 2: { + message.keyspace = reader.string(); + break; + } + case 3: { + message.shard = reader.string(); + break; + } + case 4: { + message.event = $root.logutil.Event.decode(reader, reader.uint32()); + break; + } default: reader.skipType(tag & 7); break; @@ -124839,109 +147308,158 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a RebuildVSchemaGraphResponse message from the specified reader or buffer, length delimited. + * Decodes a RestoreFromBackupResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.RebuildVSchemaGraphResponse + * @memberof vtctldata.RestoreFromBackupResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.RebuildVSchemaGraphResponse} RebuildVSchemaGraphResponse + * @returns {vtctldata.RestoreFromBackupResponse} RestoreFromBackupResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RebuildVSchemaGraphResponse.decodeDelimited = function decodeDelimited(reader) { + RestoreFromBackupResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a RebuildVSchemaGraphResponse message. + * Verifies a RestoreFromBackupResponse message. * @function verify - * @memberof vtctldata.RebuildVSchemaGraphResponse + * @memberof vtctldata.RestoreFromBackupResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - RebuildVSchemaGraphResponse.verify = function verify(message) { + RestoreFromBackupResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { + let error = $root.topodata.TabletAlias.verify(message.tablet_alias); + if (error) + return "tablet_alias." + error; + } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.shard != null && message.hasOwnProperty("shard")) + if (!$util.isString(message.shard)) + return "shard: string expected"; + if (message.event != null && message.hasOwnProperty("event")) { + let error = $root.logutil.Event.verify(message.event); + if (error) + return "event." + error; + } return null; }; /** - * Creates a RebuildVSchemaGraphResponse message from a plain object. Also converts values to their respective internal types. + * Creates a RestoreFromBackupResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.RebuildVSchemaGraphResponse + * @memberof vtctldata.RestoreFromBackupResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.RebuildVSchemaGraphResponse} RebuildVSchemaGraphResponse + * @returns {vtctldata.RestoreFromBackupResponse} RestoreFromBackupResponse */ - RebuildVSchemaGraphResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.RebuildVSchemaGraphResponse) + RestoreFromBackupResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.RestoreFromBackupResponse) return object; - return new $root.vtctldata.RebuildVSchemaGraphResponse(); + let message = new $root.vtctldata.RestoreFromBackupResponse(); + if (object.tablet_alias != null) { + if (typeof object.tablet_alias !== "object") + throw TypeError(".vtctldata.RestoreFromBackupResponse.tablet_alias: object expected"); + message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); + } + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.shard != null) + message.shard = String(object.shard); + if (object.event != null) { + if (typeof object.event !== "object") + throw TypeError(".vtctldata.RestoreFromBackupResponse.event: object expected"); + message.event = $root.logutil.Event.fromObject(object.event); + } + return message; }; /** - * Creates a plain object from a RebuildVSchemaGraphResponse message. Also converts values to other types if specified. + * Creates a plain object from a RestoreFromBackupResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.RebuildVSchemaGraphResponse + * @memberof vtctldata.RestoreFromBackupResponse * @static - * @param {vtctldata.RebuildVSchemaGraphResponse} message RebuildVSchemaGraphResponse + * @param {vtctldata.RestoreFromBackupResponse} message RestoreFromBackupResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - RebuildVSchemaGraphResponse.toObject = function toObject() { - return {}; + RestoreFromBackupResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.tablet_alias = null; + object.keyspace = ""; + object.shard = ""; + object.event = null; + } + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) + object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = message.shard; + if (message.event != null && message.hasOwnProperty("event")) + object.event = $root.logutil.Event.toObject(message.event, options); + return object; }; /** - * Converts this RebuildVSchemaGraphResponse to JSON. + * Converts this RestoreFromBackupResponse to JSON. * @function toJSON - * @memberof vtctldata.RebuildVSchemaGraphResponse + * @memberof vtctldata.RestoreFromBackupResponse * @instance * @returns {Object.} JSON object */ - RebuildVSchemaGraphResponse.prototype.toJSON = function toJSON() { + RestoreFromBackupResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for RebuildVSchemaGraphResponse + * Gets the default type url for RestoreFromBackupResponse * @function getTypeUrl - * @memberof vtctldata.RebuildVSchemaGraphResponse + * @memberof vtctldata.RestoreFromBackupResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - RebuildVSchemaGraphResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + RestoreFromBackupResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.RebuildVSchemaGraphResponse"; + return typeUrlPrefix + "/vtctldata.RestoreFromBackupResponse"; }; - return RebuildVSchemaGraphResponse; + return RestoreFromBackupResponse; })(); - vtctldata.RefreshStateRequest = (function() { + vtctldata.RetrySchemaMigrationRequest = (function() { /** - * Properties of a RefreshStateRequest. + * Properties of a RetrySchemaMigrationRequest. * @memberof vtctldata - * @interface IRefreshStateRequest - * @property {topodata.ITabletAlias|null} [tablet_alias] RefreshStateRequest tablet_alias + * @interface IRetrySchemaMigrationRequest + * @property {string|null} [keyspace] RetrySchemaMigrationRequest keyspace + * @property {string|null} [uuid] RetrySchemaMigrationRequest uuid */ /** - * Constructs a new RefreshStateRequest. + * Constructs a new RetrySchemaMigrationRequest. * @memberof vtctldata - * @classdesc Represents a RefreshStateRequest. - * @implements IRefreshStateRequest + * @classdesc Represents a RetrySchemaMigrationRequest. + * @implements IRetrySchemaMigrationRequest * @constructor - * @param {vtctldata.IRefreshStateRequest=} [properties] Properties to set + * @param {vtctldata.IRetrySchemaMigrationRequest=} [properties] Properties to set */ - function RefreshStateRequest(properties) { + function RetrySchemaMigrationRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -124949,75 +147467,89 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * RefreshStateRequest tablet_alias. - * @member {topodata.ITabletAlias|null|undefined} tablet_alias - * @memberof vtctldata.RefreshStateRequest + * RetrySchemaMigrationRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.RetrySchemaMigrationRequest * @instance */ - RefreshStateRequest.prototype.tablet_alias = null; + RetrySchemaMigrationRequest.prototype.keyspace = ""; /** - * Creates a new RefreshStateRequest instance using the specified properties. + * RetrySchemaMigrationRequest uuid. + * @member {string} uuid + * @memberof vtctldata.RetrySchemaMigrationRequest + * @instance + */ + RetrySchemaMigrationRequest.prototype.uuid = ""; + + /** + * Creates a new RetrySchemaMigrationRequest instance using the specified properties. * @function create - * @memberof vtctldata.RefreshStateRequest + * @memberof vtctldata.RetrySchemaMigrationRequest * @static - * @param {vtctldata.IRefreshStateRequest=} [properties] Properties to set - * @returns {vtctldata.RefreshStateRequest} RefreshStateRequest instance + * @param {vtctldata.IRetrySchemaMigrationRequest=} [properties] Properties to set + * @returns {vtctldata.RetrySchemaMigrationRequest} RetrySchemaMigrationRequest instance */ - RefreshStateRequest.create = function create(properties) { - return new RefreshStateRequest(properties); + RetrySchemaMigrationRequest.create = function create(properties) { + return new RetrySchemaMigrationRequest(properties); }; /** - * Encodes the specified RefreshStateRequest message. Does not implicitly {@link vtctldata.RefreshStateRequest.verify|verify} messages. + * Encodes the specified RetrySchemaMigrationRequest message. Does not implicitly {@link vtctldata.RetrySchemaMigrationRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.RefreshStateRequest + * @memberof vtctldata.RetrySchemaMigrationRequest * @static - * @param {vtctldata.IRefreshStateRequest} message RefreshStateRequest message or plain object to encode + * @param {vtctldata.IRetrySchemaMigrationRequest} message RetrySchemaMigrationRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RefreshStateRequest.encode = function encode(message, writer) { + RetrySchemaMigrationRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) - $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.uuid != null && Object.hasOwnProperty.call(message, "uuid")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.uuid); return writer; }; /** - * Encodes the specified RefreshStateRequest message, length delimited. Does not implicitly {@link vtctldata.RefreshStateRequest.verify|verify} messages. + * Encodes the specified RetrySchemaMigrationRequest message, length delimited. Does not implicitly {@link vtctldata.RetrySchemaMigrationRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.RefreshStateRequest + * @memberof vtctldata.RetrySchemaMigrationRequest * @static - * @param {vtctldata.IRefreshStateRequest} message RefreshStateRequest message or plain object to encode + * @param {vtctldata.IRetrySchemaMigrationRequest} message RetrySchemaMigrationRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RefreshStateRequest.encodeDelimited = function encodeDelimited(message, writer) { + RetrySchemaMigrationRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a RefreshStateRequest message from the specified reader or buffer. + * Decodes a RetrySchemaMigrationRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.RefreshStateRequest + * @memberof vtctldata.RetrySchemaMigrationRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.RefreshStateRequest} RefreshStateRequest + * @returns {vtctldata.RetrySchemaMigrationRequest} RetrySchemaMigrationRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RefreshStateRequest.decode = function decode(reader, length) { + RetrySchemaMigrationRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RefreshStateRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RetrySchemaMigrationRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + message.keyspace = reader.string(); + break; + } + case 2: { + message.uuid = reader.string(); break; } default: @@ -125029,126 +147561,132 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a RefreshStateRequest message from the specified reader or buffer, length delimited. + * Decodes a RetrySchemaMigrationRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.RefreshStateRequest + * @memberof vtctldata.RetrySchemaMigrationRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.RefreshStateRequest} RefreshStateRequest + * @returns {vtctldata.RetrySchemaMigrationRequest} RetrySchemaMigrationRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RefreshStateRequest.decodeDelimited = function decodeDelimited(reader) { + RetrySchemaMigrationRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a RefreshStateRequest message. + * Verifies a RetrySchemaMigrationRequest message. * @function verify - * @memberof vtctldata.RefreshStateRequest + * @memberof vtctldata.RetrySchemaMigrationRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - RefreshStateRequest.verify = function verify(message) { + RetrySchemaMigrationRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { - let error = $root.topodata.TabletAlias.verify(message.tablet_alias); - if (error) - return "tablet_alias." + error; - } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.uuid != null && message.hasOwnProperty("uuid")) + if (!$util.isString(message.uuid)) + return "uuid: string expected"; return null; }; /** - * Creates a RefreshStateRequest message from a plain object. Also converts values to their respective internal types. + * Creates a RetrySchemaMigrationRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.RefreshStateRequest + * @memberof vtctldata.RetrySchemaMigrationRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.RefreshStateRequest} RefreshStateRequest + * @returns {vtctldata.RetrySchemaMigrationRequest} RetrySchemaMigrationRequest */ - RefreshStateRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.RefreshStateRequest) + RetrySchemaMigrationRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.RetrySchemaMigrationRequest) return object; - let message = new $root.vtctldata.RefreshStateRequest(); - if (object.tablet_alias != null) { - if (typeof object.tablet_alias !== "object") - throw TypeError(".vtctldata.RefreshStateRequest.tablet_alias: object expected"); - message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); - } + let message = new $root.vtctldata.RetrySchemaMigrationRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.uuid != null) + message.uuid = String(object.uuid); return message; }; /** - * Creates a plain object from a RefreshStateRequest message. Also converts values to other types if specified. + * Creates a plain object from a RetrySchemaMigrationRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.RefreshStateRequest + * @memberof vtctldata.RetrySchemaMigrationRequest * @static - * @param {vtctldata.RefreshStateRequest} message RefreshStateRequest + * @param {vtctldata.RetrySchemaMigrationRequest} message RetrySchemaMigrationRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - RefreshStateRequest.toObject = function toObject(message, options) { + RetrySchemaMigrationRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) - object.tablet_alias = null; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) - object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); + if (options.defaults) { + object.keyspace = ""; + object.uuid = ""; + } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.uuid != null && message.hasOwnProperty("uuid")) + object.uuid = message.uuid; return object; }; /** - * Converts this RefreshStateRequest to JSON. + * Converts this RetrySchemaMigrationRequest to JSON. * @function toJSON - * @memberof vtctldata.RefreshStateRequest + * @memberof vtctldata.RetrySchemaMigrationRequest * @instance * @returns {Object.} JSON object */ - RefreshStateRequest.prototype.toJSON = function toJSON() { + RetrySchemaMigrationRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for RefreshStateRequest + * Gets the default type url for RetrySchemaMigrationRequest * @function getTypeUrl - * @memberof vtctldata.RefreshStateRequest + * @memberof vtctldata.RetrySchemaMigrationRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - RefreshStateRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + RetrySchemaMigrationRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.RefreshStateRequest"; + return typeUrlPrefix + "/vtctldata.RetrySchemaMigrationRequest"; }; - return RefreshStateRequest; + return RetrySchemaMigrationRequest; })(); - vtctldata.RefreshStateResponse = (function() { + vtctldata.RetrySchemaMigrationResponse = (function() { /** - * Properties of a RefreshStateResponse. + * Properties of a RetrySchemaMigrationResponse. * @memberof vtctldata - * @interface IRefreshStateResponse + * @interface IRetrySchemaMigrationResponse + * @property {Object.|null} [rows_affected_by_shard] RetrySchemaMigrationResponse rows_affected_by_shard */ /** - * Constructs a new RefreshStateResponse. + * Constructs a new RetrySchemaMigrationResponse. * @memberof vtctldata - * @classdesc Represents a RefreshStateResponse. - * @implements IRefreshStateResponse + * @classdesc Represents a RetrySchemaMigrationResponse. + * @implements IRetrySchemaMigrationResponse * @constructor - * @param {vtctldata.IRefreshStateResponse=} [properties] Properties to set + * @param {vtctldata.IRetrySchemaMigrationResponse=} [properties] Properties to set */ - function RefreshStateResponse(properties) { + function RetrySchemaMigrationResponse(properties) { + this.rows_affected_by_shard = {}; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -125156,63 +147694,97 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * Creates a new RefreshStateResponse instance using the specified properties. + * RetrySchemaMigrationResponse rows_affected_by_shard. + * @member {Object.} rows_affected_by_shard + * @memberof vtctldata.RetrySchemaMigrationResponse + * @instance + */ + RetrySchemaMigrationResponse.prototype.rows_affected_by_shard = $util.emptyObject; + + /** + * Creates a new RetrySchemaMigrationResponse instance using the specified properties. * @function create - * @memberof vtctldata.RefreshStateResponse + * @memberof vtctldata.RetrySchemaMigrationResponse * @static - * @param {vtctldata.IRefreshStateResponse=} [properties] Properties to set - * @returns {vtctldata.RefreshStateResponse} RefreshStateResponse instance + * @param {vtctldata.IRetrySchemaMigrationResponse=} [properties] Properties to set + * @returns {vtctldata.RetrySchemaMigrationResponse} RetrySchemaMigrationResponse instance */ - RefreshStateResponse.create = function create(properties) { - return new RefreshStateResponse(properties); + RetrySchemaMigrationResponse.create = function create(properties) { + return new RetrySchemaMigrationResponse(properties); }; /** - * Encodes the specified RefreshStateResponse message. Does not implicitly {@link vtctldata.RefreshStateResponse.verify|verify} messages. + * Encodes the specified RetrySchemaMigrationResponse message. Does not implicitly {@link vtctldata.RetrySchemaMigrationResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.RefreshStateResponse + * @memberof vtctldata.RetrySchemaMigrationResponse * @static - * @param {vtctldata.IRefreshStateResponse} message RefreshStateResponse message or plain object to encode + * @param {vtctldata.IRetrySchemaMigrationResponse} message RetrySchemaMigrationResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RefreshStateResponse.encode = function encode(message, writer) { + RetrySchemaMigrationResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.rows_affected_by_shard != null && Object.hasOwnProperty.call(message, "rows_affected_by_shard")) + for (let keys = Object.keys(message.rows_affected_by_shard), i = 0; i < keys.length; ++i) + writer.uint32(/* id 1, wireType 2 =*/10).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 0 =*/16).uint64(message.rows_affected_by_shard[keys[i]]).ldelim(); return writer; }; /** - * Encodes the specified RefreshStateResponse message, length delimited. Does not implicitly {@link vtctldata.RefreshStateResponse.verify|verify} messages. + * Encodes the specified RetrySchemaMigrationResponse message, length delimited. Does not implicitly {@link vtctldata.RetrySchemaMigrationResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.RefreshStateResponse + * @memberof vtctldata.RetrySchemaMigrationResponse * @static - * @param {vtctldata.IRefreshStateResponse} message RefreshStateResponse message or plain object to encode + * @param {vtctldata.IRetrySchemaMigrationResponse} message RetrySchemaMigrationResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RefreshStateResponse.encodeDelimited = function encodeDelimited(message, writer) { + RetrySchemaMigrationResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a RefreshStateResponse message from the specified reader or buffer. + * Decodes a RetrySchemaMigrationResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.RefreshStateResponse + * @memberof vtctldata.RetrySchemaMigrationResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.RefreshStateResponse} RefreshStateResponse + * @returns {vtctldata.RetrySchemaMigrationResponse} RetrySchemaMigrationResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RefreshStateResponse.decode = function decode(reader, length) { + RetrySchemaMigrationResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RefreshStateResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RetrySchemaMigrationResponse(), key, value; while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + if (message.rows_affected_by_shard === $util.emptyObject) + message.rows_affected_by_shard = {}; + let end2 = reader.uint32() + reader.pos; + key = ""; + value = 0; + while (reader.pos < end2) { + let tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = reader.uint64(); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.rows_affected_by_shard[key] = value; + break; + } default: reader.skipType(tag & 7); break; @@ -125222,112 +147794,146 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a RefreshStateResponse message from the specified reader or buffer, length delimited. + * Decodes a RetrySchemaMigrationResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.RefreshStateResponse + * @memberof vtctldata.RetrySchemaMigrationResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.RefreshStateResponse} RefreshStateResponse + * @returns {vtctldata.RetrySchemaMigrationResponse} RetrySchemaMigrationResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RefreshStateResponse.decodeDelimited = function decodeDelimited(reader) { + RetrySchemaMigrationResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a RefreshStateResponse message. + * Verifies a RetrySchemaMigrationResponse message. * @function verify - * @memberof vtctldata.RefreshStateResponse + * @memberof vtctldata.RetrySchemaMigrationResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - RefreshStateResponse.verify = function verify(message) { + RetrySchemaMigrationResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.rows_affected_by_shard != null && message.hasOwnProperty("rows_affected_by_shard")) { + if (!$util.isObject(message.rows_affected_by_shard)) + return "rows_affected_by_shard: object expected"; + let key = Object.keys(message.rows_affected_by_shard); + for (let i = 0; i < key.length; ++i) + if (!$util.isInteger(message.rows_affected_by_shard[key[i]]) && !(message.rows_affected_by_shard[key[i]] && $util.isInteger(message.rows_affected_by_shard[key[i]].low) && $util.isInteger(message.rows_affected_by_shard[key[i]].high))) + return "rows_affected_by_shard: integer|Long{k:string} expected"; + } return null; }; /** - * Creates a RefreshStateResponse message from a plain object. Also converts values to their respective internal types. + * Creates a RetrySchemaMigrationResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.RefreshStateResponse + * @memberof vtctldata.RetrySchemaMigrationResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.RefreshStateResponse} RefreshStateResponse + * @returns {vtctldata.RetrySchemaMigrationResponse} RetrySchemaMigrationResponse */ - RefreshStateResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.RefreshStateResponse) + RetrySchemaMigrationResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.RetrySchemaMigrationResponse) return object; - return new $root.vtctldata.RefreshStateResponse(); + let message = new $root.vtctldata.RetrySchemaMigrationResponse(); + if (object.rows_affected_by_shard) { + if (typeof object.rows_affected_by_shard !== "object") + throw TypeError(".vtctldata.RetrySchemaMigrationResponse.rows_affected_by_shard: object expected"); + message.rows_affected_by_shard = {}; + for (let keys = Object.keys(object.rows_affected_by_shard), i = 0; i < keys.length; ++i) + if ($util.Long) + (message.rows_affected_by_shard[keys[i]] = $util.Long.fromValue(object.rows_affected_by_shard[keys[i]])).unsigned = true; + else if (typeof object.rows_affected_by_shard[keys[i]] === "string") + message.rows_affected_by_shard[keys[i]] = parseInt(object.rows_affected_by_shard[keys[i]], 10); + else if (typeof object.rows_affected_by_shard[keys[i]] === "number") + message.rows_affected_by_shard[keys[i]] = object.rows_affected_by_shard[keys[i]]; + else if (typeof object.rows_affected_by_shard[keys[i]] === "object") + message.rows_affected_by_shard[keys[i]] = new $util.LongBits(object.rows_affected_by_shard[keys[i]].low >>> 0, object.rows_affected_by_shard[keys[i]].high >>> 0).toNumber(true); + } + return message; }; /** - * Creates a plain object from a RefreshStateResponse message. Also converts values to other types if specified. + * Creates a plain object from a RetrySchemaMigrationResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.RefreshStateResponse + * @memberof vtctldata.RetrySchemaMigrationResponse * @static - * @param {vtctldata.RefreshStateResponse} message RefreshStateResponse + * @param {vtctldata.RetrySchemaMigrationResponse} message RetrySchemaMigrationResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - RefreshStateResponse.toObject = function toObject() { - return {}; + RetrySchemaMigrationResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.objects || options.defaults) + object.rows_affected_by_shard = {}; + let keys2; + if (message.rows_affected_by_shard && (keys2 = Object.keys(message.rows_affected_by_shard)).length) { + object.rows_affected_by_shard = {}; + for (let j = 0; j < keys2.length; ++j) + if (typeof message.rows_affected_by_shard[keys2[j]] === "number") + object.rows_affected_by_shard[keys2[j]] = options.longs === String ? String(message.rows_affected_by_shard[keys2[j]]) : message.rows_affected_by_shard[keys2[j]]; + else + object.rows_affected_by_shard[keys2[j]] = options.longs === String ? $util.Long.prototype.toString.call(message.rows_affected_by_shard[keys2[j]]) : options.longs === Number ? new $util.LongBits(message.rows_affected_by_shard[keys2[j]].low >>> 0, message.rows_affected_by_shard[keys2[j]].high >>> 0).toNumber(true) : message.rows_affected_by_shard[keys2[j]]; + } + return object; }; /** - * Converts this RefreshStateResponse to JSON. + * Converts this RetrySchemaMigrationResponse to JSON. * @function toJSON - * @memberof vtctldata.RefreshStateResponse + * @memberof vtctldata.RetrySchemaMigrationResponse * @instance * @returns {Object.} JSON object */ - RefreshStateResponse.prototype.toJSON = function toJSON() { + RetrySchemaMigrationResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for RefreshStateResponse + * Gets the default type url for RetrySchemaMigrationResponse * @function getTypeUrl - * @memberof vtctldata.RefreshStateResponse + * @memberof vtctldata.RetrySchemaMigrationResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - RefreshStateResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + RetrySchemaMigrationResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.RefreshStateResponse"; + return typeUrlPrefix + "/vtctldata.RetrySchemaMigrationResponse"; }; - return RefreshStateResponse; + return RetrySchemaMigrationResponse; })(); - vtctldata.RefreshStateByShardRequest = (function() { + vtctldata.RunHealthCheckRequest = (function() { /** - * Properties of a RefreshStateByShardRequest. + * Properties of a RunHealthCheckRequest. * @memberof vtctldata - * @interface IRefreshStateByShardRequest - * @property {string|null} [keyspace] RefreshStateByShardRequest keyspace - * @property {string|null} [shard] RefreshStateByShardRequest shard - * @property {Array.|null} [cells] RefreshStateByShardRequest cells + * @interface IRunHealthCheckRequest + * @property {topodata.ITabletAlias|null} [tablet_alias] RunHealthCheckRequest tablet_alias */ /** - * Constructs a new RefreshStateByShardRequest. + * Constructs a new RunHealthCheckRequest. * @memberof vtctldata - * @classdesc Represents a RefreshStateByShardRequest. - * @implements IRefreshStateByShardRequest + * @classdesc Represents a RunHealthCheckRequest. + * @implements IRunHealthCheckRequest * @constructor - * @param {vtctldata.IRefreshStateByShardRequest=} [properties] Properties to set + * @param {vtctldata.IRunHealthCheckRequest=} [properties] Properties to set */ - function RefreshStateByShardRequest(properties) { - this.cells = []; + function RunHealthCheckRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -125335,106 +147941,75 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * RefreshStateByShardRequest keyspace. - * @member {string} keyspace - * @memberof vtctldata.RefreshStateByShardRequest - * @instance - */ - RefreshStateByShardRequest.prototype.keyspace = ""; - - /** - * RefreshStateByShardRequest shard. - * @member {string} shard - * @memberof vtctldata.RefreshStateByShardRequest - * @instance - */ - RefreshStateByShardRequest.prototype.shard = ""; - - /** - * RefreshStateByShardRequest cells. - * @member {Array.} cells - * @memberof vtctldata.RefreshStateByShardRequest + * RunHealthCheckRequest tablet_alias. + * @member {topodata.ITabletAlias|null|undefined} tablet_alias + * @memberof vtctldata.RunHealthCheckRequest * @instance */ - RefreshStateByShardRequest.prototype.cells = $util.emptyArray; + RunHealthCheckRequest.prototype.tablet_alias = null; /** - * Creates a new RefreshStateByShardRequest instance using the specified properties. + * Creates a new RunHealthCheckRequest instance using the specified properties. * @function create - * @memberof vtctldata.RefreshStateByShardRequest + * @memberof vtctldata.RunHealthCheckRequest * @static - * @param {vtctldata.IRefreshStateByShardRequest=} [properties] Properties to set - * @returns {vtctldata.RefreshStateByShardRequest} RefreshStateByShardRequest instance + * @param {vtctldata.IRunHealthCheckRequest=} [properties] Properties to set + * @returns {vtctldata.RunHealthCheckRequest} RunHealthCheckRequest instance */ - RefreshStateByShardRequest.create = function create(properties) { - return new RefreshStateByShardRequest(properties); + RunHealthCheckRequest.create = function create(properties) { + return new RunHealthCheckRequest(properties); }; /** - * Encodes the specified RefreshStateByShardRequest message. Does not implicitly {@link vtctldata.RefreshStateByShardRequest.verify|verify} messages. + * Encodes the specified RunHealthCheckRequest message. Does not implicitly {@link vtctldata.RunHealthCheckRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.RefreshStateByShardRequest + * @memberof vtctldata.RunHealthCheckRequest * @static - * @param {vtctldata.IRefreshStateByShardRequest} message RefreshStateByShardRequest message or plain object to encode + * @param {vtctldata.IRunHealthCheckRequest} message RunHealthCheckRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RefreshStateByShardRequest.encode = function encode(message, writer) { + RunHealthCheckRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); - if (message.cells != null && message.cells.length) - for (let i = 0; i < message.cells.length; ++i) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.cells[i]); + if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) + $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified RefreshStateByShardRequest message, length delimited. Does not implicitly {@link vtctldata.RefreshStateByShardRequest.verify|verify} messages. + * Encodes the specified RunHealthCheckRequest message, length delimited. Does not implicitly {@link vtctldata.RunHealthCheckRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.RefreshStateByShardRequest + * @memberof vtctldata.RunHealthCheckRequest * @static - * @param {vtctldata.IRefreshStateByShardRequest} message RefreshStateByShardRequest message or plain object to encode + * @param {vtctldata.IRunHealthCheckRequest} message RunHealthCheckRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RefreshStateByShardRequest.encodeDelimited = function encodeDelimited(message, writer) { + RunHealthCheckRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a RefreshStateByShardRequest message from the specified reader or buffer. + * Decodes a RunHealthCheckRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.RefreshStateByShardRequest + * @memberof vtctldata.RunHealthCheckRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.RefreshStateByShardRequest} RefreshStateByShardRequest + * @returns {vtctldata.RunHealthCheckRequest} RunHealthCheckRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RefreshStateByShardRequest.decode = function decode(reader, length) { + RunHealthCheckRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RefreshStateByShardRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RunHealthCheckRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.keyspace = reader.string(); - break; - } - case 2: { - message.shard = reader.string(); - break; - } - case 3: { - if (!(message.cells && message.cells.length)) - message.cells = []; - message.cells.push(reader.string()); + message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); break; } default: @@ -125446,153 +148021,126 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a RefreshStateByShardRequest message from the specified reader or buffer, length delimited. + * Decodes a RunHealthCheckRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.RefreshStateByShardRequest + * @memberof vtctldata.RunHealthCheckRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.RefreshStateByShardRequest} RefreshStateByShardRequest + * @returns {vtctldata.RunHealthCheckRequest} RunHealthCheckRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RefreshStateByShardRequest.decodeDelimited = function decodeDelimited(reader) { + RunHealthCheckRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a RefreshStateByShardRequest message. + * Verifies a RunHealthCheckRequest message. * @function verify - * @memberof vtctldata.RefreshStateByShardRequest + * @memberof vtctldata.RunHealthCheckRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - RefreshStateByShardRequest.verify = function verify(message) { + RunHealthCheckRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - if (message.shard != null && message.hasOwnProperty("shard")) - if (!$util.isString(message.shard)) - return "shard: string expected"; - if (message.cells != null && message.hasOwnProperty("cells")) { - if (!Array.isArray(message.cells)) - return "cells: array expected"; - for (let i = 0; i < message.cells.length; ++i) - if (!$util.isString(message.cells[i])) - return "cells: string[] expected"; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { + let error = $root.topodata.TabletAlias.verify(message.tablet_alias); + if (error) + return "tablet_alias." + error; } return null; }; /** - * Creates a RefreshStateByShardRequest message from a plain object. Also converts values to their respective internal types. + * Creates a RunHealthCheckRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.RefreshStateByShardRequest + * @memberof vtctldata.RunHealthCheckRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.RefreshStateByShardRequest} RefreshStateByShardRequest + * @returns {vtctldata.RunHealthCheckRequest} RunHealthCheckRequest */ - RefreshStateByShardRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.RefreshStateByShardRequest) + RunHealthCheckRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.RunHealthCheckRequest) return object; - let message = new $root.vtctldata.RefreshStateByShardRequest(); - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - if (object.shard != null) - message.shard = String(object.shard); - if (object.cells) { - if (!Array.isArray(object.cells)) - throw TypeError(".vtctldata.RefreshStateByShardRequest.cells: array expected"); - message.cells = []; - for (let i = 0; i < object.cells.length; ++i) - message.cells[i] = String(object.cells[i]); + let message = new $root.vtctldata.RunHealthCheckRequest(); + if (object.tablet_alias != null) { + if (typeof object.tablet_alias !== "object") + throw TypeError(".vtctldata.RunHealthCheckRequest.tablet_alias: object expected"); + message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); } return message; }; /** - * Creates a plain object from a RefreshStateByShardRequest message. Also converts values to other types if specified. + * Creates a plain object from a RunHealthCheckRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.RefreshStateByShardRequest + * @memberof vtctldata.RunHealthCheckRequest * @static - * @param {vtctldata.RefreshStateByShardRequest} message RefreshStateByShardRequest + * @param {vtctldata.RunHealthCheckRequest} message RunHealthCheckRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - RefreshStateByShardRequest.toObject = function toObject(message, options) { + RunHealthCheckRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.cells = []; - if (options.defaults) { - object.keyspace = ""; - object.shard = ""; - } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.shard != null && message.hasOwnProperty("shard")) - object.shard = message.shard; - if (message.cells && message.cells.length) { - object.cells = []; - for (let j = 0; j < message.cells.length; ++j) - object.cells[j] = message.cells[j]; - } + if (options.defaults) + object.tablet_alias = null; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) + object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); return object; }; /** - * Converts this RefreshStateByShardRequest to JSON. + * Converts this RunHealthCheckRequest to JSON. * @function toJSON - * @memberof vtctldata.RefreshStateByShardRequest + * @memberof vtctldata.RunHealthCheckRequest * @instance * @returns {Object.} JSON object */ - RefreshStateByShardRequest.prototype.toJSON = function toJSON() { + RunHealthCheckRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for RefreshStateByShardRequest + * Gets the default type url for RunHealthCheckRequest * @function getTypeUrl - * @memberof vtctldata.RefreshStateByShardRequest + * @memberof vtctldata.RunHealthCheckRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - RefreshStateByShardRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + RunHealthCheckRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.RefreshStateByShardRequest"; + return typeUrlPrefix + "/vtctldata.RunHealthCheckRequest"; }; - return RefreshStateByShardRequest; + return RunHealthCheckRequest; })(); - vtctldata.RefreshStateByShardResponse = (function() { + vtctldata.RunHealthCheckResponse = (function() { /** - * Properties of a RefreshStateByShardResponse. + * Properties of a RunHealthCheckResponse. * @memberof vtctldata - * @interface IRefreshStateByShardResponse - * @property {boolean|null} [is_partial_refresh] RefreshStateByShardResponse is_partial_refresh - * @property {string|null} [partial_refresh_details] RefreshStateByShardResponse partial_refresh_details + * @interface IRunHealthCheckResponse */ /** - * Constructs a new RefreshStateByShardResponse. + * Constructs a new RunHealthCheckResponse. * @memberof vtctldata - * @classdesc Represents a RefreshStateByShardResponse. - * @implements IRefreshStateByShardResponse + * @classdesc Represents a RunHealthCheckResponse. + * @implements IRunHealthCheckResponse * @constructor - * @param {vtctldata.IRefreshStateByShardResponse=} [properties] Properties to set + * @param {vtctldata.IRunHealthCheckResponse=} [properties] Properties to set */ - function RefreshStateByShardResponse(properties) { + function RunHealthCheckResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -125600,91 +148148,63 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * RefreshStateByShardResponse is_partial_refresh. - * @member {boolean} is_partial_refresh - * @memberof vtctldata.RefreshStateByShardResponse - * @instance - */ - RefreshStateByShardResponse.prototype.is_partial_refresh = false; - - /** - * RefreshStateByShardResponse partial_refresh_details. - * @member {string} partial_refresh_details - * @memberof vtctldata.RefreshStateByShardResponse - * @instance - */ - RefreshStateByShardResponse.prototype.partial_refresh_details = ""; - - /** - * Creates a new RefreshStateByShardResponse instance using the specified properties. + * Creates a new RunHealthCheckResponse instance using the specified properties. * @function create - * @memberof vtctldata.RefreshStateByShardResponse + * @memberof vtctldata.RunHealthCheckResponse * @static - * @param {vtctldata.IRefreshStateByShardResponse=} [properties] Properties to set - * @returns {vtctldata.RefreshStateByShardResponse} RefreshStateByShardResponse instance + * @param {vtctldata.IRunHealthCheckResponse=} [properties] Properties to set + * @returns {vtctldata.RunHealthCheckResponse} RunHealthCheckResponse instance */ - RefreshStateByShardResponse.create = function create(properties) { - return new RefreshStateByShardResponse(properties); + RunHealthCheckResponse.create = function create(properties) { + return new RunHealthCheckResponse(properties); }; /** - * Encodes the specified RefreshStateByShardResponse message. Does not implicitly {@link vtctldata.RefreshStateByShardResponse.verify|verify} messages. + * Encodes the specified RunHealthCheckResponse message. Does not implicitly {@link vtctldata.RunHealthCheckResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.RefreshStateByShardResponse + * @memberof vtctldata.RunHealthCheckResponse * @static - * @param {vtctldata.IRefreshStateByShardResponse} message RefreshStateByShardResponse message or plain object to encode + * @param {vtctldata.IRunHealthCheckResponse} message RunHealthCheckResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RefreshStateByShardResponse.encode = function encode(message, writer) { + RunHealthCheckResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.is_partial_refresh != null && Object.hasOwnProperty.call(message, "is_partial_refresh")) - writer.uint32(/* id 1, wireType 0 =*/8).bool(message.is_partial_refresh); - if (message.partial_refresh_details != null && Object.hasOwnProperty.call(message, "partial_refresh_details")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.partial_refresh_details); return writer; }; /** - * Encodes the specified RefreshStateByShardResponse message, length delimited. Does not implicitly {@link vtctldata.RefreshStateByShardResponse.verify|verify} messages. + * Encodes the specified RunHealthCheckResponse message, length delimited. Does not implicitly {@link vtctldata.RunHealthCheckResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.RefreshStateByShardResponse + * @memberof vtctldata.RunHealthCheckResponse * @static - * @param {vtctldata.IRefreshStateByShardResponse} message RefreshStateByShardResponse message or plain object to encode + * @param {vtctldata.IRunHealthCheckResponse} message RunHealthCheckResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RefreshStateByShardResponse.encodeDelimited = function encodeDelimited(message, writer) { + RunHealthCheckResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a RefreshStateByShardResponse message from the specified reader or buffer. + * Decodes a RunHealthCheckResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.RefreshStateByShardResponse + * @memberof vtctldata.RunHealthCheckResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.RefreshStateByShardResponse} RefreshStateByShardResponse + * @returns {vtctldata.RunHealthCheckResponse} RunHealthCheckResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RefreshStateByShardResponse.decode = function decode(reader, length) { + RunHealthCheckResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RefreshStateByShardResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RunHealthCheckResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - message.is_partial_refresh = reader.bool(); - break; - } - case 2: { - message.partial_refresh_details = reader.string(); - break; - } default: reader.skipType(tag & 7); break; @@ -125694,131 +148214,110 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a RefreshStateByShardResponse message from the specified reader or buffer, length delimited. + * Decodes a RunHealthCheckResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.RefreshStateByShardResponse + * @memberof vtctldata.RunHealthCheckResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.RefreshStateByShardResponse} RefreshStateByShardResponse + * @returns {vtctldata.RunHealthCheckResponse} RunHealthCheckResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RefreshStateByShardResponse.decodeDelimited = function decodeDelimited(reader) { + RunHealthCheckResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a RefreshStateByShardResponse message. + * Verifies a RunHealthCheckResponse message. * @function verify - * @memberof vtctldata.RefreshStateByShardResponse + * @memberof vtctldata.RunHealthCheckResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - RefreshStateByShardResponse.verify = function verify(message) { + RunHealthCheckResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.is_partial_refresh != null && message.hasOwnProperty("is_partial_refresh")) - if (typeof message.is_partial_refresh !== "boolean") - return "is_partial_refresh: boolean expected"; - if (message.partial_refresh_details != null && message.hasOwnProperty("partial_refresh_details")) - if (!$util.isString(message.partial_refresh_details)) - return "partial_refresh_details: string expected"; return null; }; /** - * Creates a RefreshStateByShardResponse message from a plain object. Also converts values to their respective internal types. + * Creates a RunHealthCheckResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.RefreshStateByShardResponse + * @memberof vtctldata.RunHealthCheckResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.RefreshStateByShardResponse} RefreshStateByShardResponse + * @returns {vtctldata.RunHealthCheckResponse} RunHealthCheckResponse */ - RefreshStateByShardResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.RefreshStateByShardResponse) + RunHealthCheckResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.RunHealthCheckResponse) return object; - let message = new $root.vtctldata.RefreshStateByShardResponse(); - if (object.is_partial_refresh != null) - message.is_partial_refresh = Boolean(object.is_partial_refresh); - if (object.partial_refresh_details != null) - message.partial_refresh_details = String(object.partial_refresh_details); - return message; + return new $root.vtctldata.RunHealthCheckResponse(); }; /** - * Creates a plain object from a RefreshStateByShardResponse message. Also converts values to other types if specified. + * Creates a plain object from a RunHealthCheckResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.RefreshStateByShardResponse + * @memberof vtctldata.RunHealthCheckResponse * @static - * @param {vtctldata.RefreshStateByShardResponse} message RefreshStateByShardResponse + * @param {vtctldata.RunHealthCheckResponse} message RunHealthCheckResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - RefreshStateByShardResponse.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) { - object.is_partial_refresh = false; - object.partial_refresh_details = ""; - } - if (message.is_partial_refresh != null && message.hasOwnProperty("is_partial_refresh")) - object.is_partial_refresh = message.is_partial_refresh; - if (message.partial_refresh_details != null && message.hasOwnProperty("partial_refresh_details")) - object.partial_refresh_details = message.partial_refresh_details; - return object; + RunHealthCheckResponse.toObject = function toObject() { + return {}; }; /** - * Converts this RefreshStateByShardResponse to JSON. + * Converts this RunHealthCheckResponse to JSON. * @function toJSON - * @memberof vtctldata.RefreshStateByShardResponse + * @memberof vtctldata.RunHealthCheckResponse * @instance * @returns {Object.} JSON object */ - RefreshStateByShardResponse.prototype.toJSON = function toJSON() { + RunHealthCheckResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for RefreshStateByShardResponse + * Gets the default type url for RunHealthCheckResponse * @function getTypeUrl - * @memberof vtctldata.RefreshStateByShardResponse + * @memberof vtctldata.RunHealthCheckResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - RefreshStateByShardResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + RunHealthCheckResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.RefreshStateByShardResponse"; + return typeUrlPrefix + "/vtctldata.RunHealthCheckResponse"; }; - return RefreshStateByShardResponse; + return RunHealthCheckResponse; })(); - vtctldata.ReloadSchemaRequest = (function() { + vtctldata.SetKeyspaceDurabilityPolicyRequest = (function() { /** - * Properties of a ReloadSchemaRequest. + * Properties of a SetKeyspaceDurabilityPolicyRequest. * @memberof vtctldata - * @interface IReloadSchemaRequest - * @property {topodata.ITabletAlias|null} [tablet_alias] ReloadSchemaRequest tablet_alias + * @interface ISetKeyspaceDurabilityPolicyRequest + * @property {string|null} [keyspace] SetKeyspaceDurabilityPolicyRequest keyspace + * @property {string|null} [durability_policy] SetKeyspaceDurabilityPolicyRequest durability_policy */ /** - * Constructs a new ReloadSchemaRequest. + * Constructs a new SetKeyspaceDurabilityPolicyRequest. * @memberof vtctldata - * @classdesc Represents a ReloadSchemaRequest. - * @implements IReloadSchemaRequest + * @classdesc Represents a SetKeyspaceDurabilityPolicyRequest. + * @implements ISetKeyspaceDurabilityPolicyRequest * @constructor - * @param {vtctldata.IReloadSchemaRequest=} [properties] Properties to set + * @param {vtctldata.ISetKeyspaceDurabilityPolicyRequest=} [properties] Properties to set */ - function ReloadSchemaRequest(properties) { + function SetKeyspaceDurabilityPolicyRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -125826,75 +148325,89 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ReloadSchemaRequest tablet_alias. - * @member {topodata.ITabletAlias|null|undefined} tablet_alias - * @memberof vtctldata.ReloadSchemaRequest + * SetKeyspaceDurabilityPolicyRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.SetKeyspaceDurabilityPolicyRequest * @instance */ - ReloadSchemaRequest.prototype.tablet_alias = null; + SetKeyspaceDurabilityPolicyRequest.prototype.keyspace = ""; /** - * Creates a new ReloadSchemaRequest instance using the specified properties. + * SetKeyspaceDurabilityPolicyRequest durability_policy. + * @member {string} durability_policy + * @memberof vtctldata.SetKeyspaceDurabilityPolicyRequest + * @instance + */ + SetKeyspaceDurabilityPolicyRequest.prototype.durability_policy = ""; + + /** + * Creates a new SetKeyspaceDurabilityPolicyRequest instance using the specified properties. * @function create - * @memberof vtctldata.ReloadSchemaRequest + * @memberof vtctldata.SetKeyspaceDurabilityPolicyRequest * @static - * @param {vtctldata.IReloadSchemaRequest=} [properties] Properties to set - * @returns {vtctldata.ReloadSchemaRequest} ReloadSchemaRequest instance + * @param {vtctldata.ISetKeyspaceDurabilityPolicyRequest=} [properties] Properties to set + * @returns {vtctldata.SetKeyspaceDurabilityPolicyRequest} SetKeyspaceDurabilityPolicyRequest instance */ - ReloadSchemaRequest.create = function create(properties) { - return new ReloadSchemaRequest(properties); + SetKeyspaceDurabilityPolicyRequest.create = function create(properties) { + return new SetKeyspaceDurabilityPolicyRequest(properties); }; /** - * Encodes the specified ReloadSchemaRequest message. Does not implicitly {@link vtctldata.ReloadSchemaRequest.verify|verify} messages. + * Encodes the specified SetKeyspaceDurabilityPolicyRequest message. Does not implicitly {@link vtctldata.SetKeyspaceDurabilityPolicyRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.ReloadSchemaRequest + * @memberof vtctldata.SetKeyspaceDurabilityPolicyRequest * @static - * @param {vtctldata.IReloadSchemaRequest} message ReloadSchemaRequest message or plain object to encode + * @param {vtctldata.ISetKeyspaceDurabilityPolicyRequest} message SetKeyspaceDurabilityPolicyRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReloadSchemaRequest.encode = function encode(message, writer) { + SetKeyspaceDurabilityPolicyRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) - $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.durability_policy != null && Object.hasOwnProperty.call(message, "durability_policy")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.durability_policy); return writer; }; /** - * Encodes the specified ReloadSchemaRequest message, length delimited. Does not implicitly {@link vtctldata.ReloadSchemaRequest.verify|verify} messages. + * Encodes the specified SetKeyspaceDurabilityPolicyRequest message, length delimited. Does not implicitly {@link vtctldata.SetKeyspaceDurabilityPolicyRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ReloadSchemaRequest + * @memberof vtctldata.SetKeyspaceDurabilityPolicyRequest * @static - * @param {vtctldata.IReloadSchemaRequest} message ReloadSchemaRequest message or plain object to encode + * @param {vtctldata.ISetKeyspaceDurabilityPolicyRequest} message SetKeyspaceDurabilityPolicyRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReloadSchemaRequest.encodeDelimited = function encodeDelimited(message, writer) { + SetKeyspaceDurabilityPolicyRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ReloadSchemaRequest message from the specified reader or buffer. + * Decodes a SetKeyspaceDurabilityPolicyRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ReloadSchemaRequest + * @memberof vtctldata.SetKeyspaceDurabilityPolicyRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ReloadSchemaRequest} ReloadSchemaRequest + * @returns {vtctldata.SetKeyspaceDurabilityPolicyRequest} SetKeyspaceDurabilityPolicyRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReloadSchemaRequest.decode = function decode(reader, length) { + SetKeyspaceDurabilityPolicyRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ReloadSchemaRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SetKeyspaceDurabilityPolicyRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + message.keyspace = reader.string(); + break; + } + case 2: { + message.durability_policy = reader.string(); break; } default: @@ -125906,126 +148419,131 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a ReloadSchemaRequest message from the specified reader or buffer, length delimited. + * Decodes a SetKeyspaceDurabilityPolicyRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ReloadSchemaRequest + * @memberof vtctldata.SetKeyspaceDurabilityPolicyRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ReloadSchemaRequest} ReloadSchemaRequest + * @returns {vtctldata.SetKeyspaceDurabilityPolicyRequest} SetKeyspaceDurabilityPolicyRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReloadSchemaRequest.decodeDelimited = function decodeDelimited(reader) { + SetKeyspaceDurabilityPolicyRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ReloadSchemaRequest message. + * Verifies a SetKeyspaceDurabilityPolicyRequest message. * @function verify - * @memberof vtctldata.ReloadSchemaRequest + * @memberof vtctldata.SetKeyspaceDurabilityPolicyRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ReloadSchemaRequest.verify = function verify(message) { + SetKeyspaceDurabilityPolicyRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { - let error = $root.topodata.TabletAlias.verify(message.tablet_alias); - if (error) - return "tablet_alias." + error; - } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.durability_policy != null && message.hasOwnProperty("durability_policy")) + if (!$util.isString(message.durability_policy)) + return "durability_policy: string expected"; return null; }; /** - * Creates a ReloadSchemaRequest message from a plain object. Also converts values to their respective internal types. + * Creates a SetKeyspaceDurabilityPolicyRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ReloadSchemaRequest + * @memberof vtctldata.SetKeyspaceDurabilityPolicyRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.ReloadSchemaRequest} ReloadSchemaRequest + * @returns {vtctldata.SetKeyspaceDurabilityPolicyRequest} SetKeyspaceDurabilityPolicyRequest */ - ReloadSchemaRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ReloadSchemaRequest) + SetKeyspaceDurabilityPolicyRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.SetKeyspaceDurabilityPolicyRequest) return object; - let message = new $root.vtctldata.ReloadSchemaRequest(); - if (object.tablet_alias != null) { - if (typeof object.tablet_alias !== "object") - throw TypeError(".vtctldata.ReloadSchemaRequest.tablet_alias: object expected"); - message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); - } + let message = new $root.vtctldata.SetKeyspaceDurabilityPolicyRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.durability_policy != null) + message.durability_policy = String(object.durability_policy); return message; }; /** - * Creates a plain object from a ReloadSchemaRequest message. Also converts values to other types if specified. + * Creates a plain object from a SetKeyspaceDurabilityPolicyRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ReloadSchemaRequest + * @memberof vtctldata.SetKeyspaceDurabilityPolicyRequest * @static - * @param {vtctldata.ReloadSchemaRequest} message ReloadSchemaRequest + * @param {vtctldata.SetKeyspaceDurabilityPolicyRequest} message SetKeyspaceDurabilityPolicyRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ReloadSchemaRequest.toObject = function toObject(message, options) { + SetKeyspaceDurabilityPolicyRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) - object.tablet_alias = null; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) - object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); + if (options.defaults) { + object.keyspace = ""; + object.durability_policy = ""; + } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.durability_policy != null && message.hasOwnProperty("durability_policy")) + object.durability_policy = message.durability_policy; return object; }; /** - * Converts this ReloadSchemaRequest to JSON. + * Converts this SetKeyspaceDurabilityPolicyRequest to JSON. * @function toJSON - * @memberof vtctldata.ReloadSchemaRequest + * @memberof vtctldata.SetKeyspaceDurabilityPolicyRequest * @instance * @returns {Object.} JSON object */ - ReloadSchemaRequest.prototype.toJSON = function toJSON() { + SetKeyspaceDurabilityPolicyRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ReloadSchemaRequest + * Gets the default type url for SetKeyspaceDurabilityPolicyRequest * @function getTypeUrl - * @memberof vtctldata.ReloadSchemaRequest + * @memberof vtctldata.SetKeyspaceDurabilityPolicyRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ReloadSchemaRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + SetKeyspaceDurabilityPolicyRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ReloadSchemaRequest"; + return typeUrlPrefix + "/vtctldata.SetKeyspaceDurabilityPolicyRequest"; }; - return ReloadSchemaRequest; + return SetKeyspaceDurabilityPolicyRequest; })(); - vtctldata.ReloadSchemaResponse = (function() { + vtctldata.SetKeyspaceDurabilityPolicyResponse = (function() { /** - * Properties of a ReloadSchemaResponse. + * Properties of a SetKeyspaceDurabilityPolicyResponse. * @memberof vtctldata - * @interface IReloadSchemaResponse + * @interface ISetKeyspaceDurabilityPolicyResponse + * @property {topodata.IKeyspace|null} [keyspace] SetKeyspaceDurabilityPolicyResponse keyspace */ /** - * Constructs a new ReloadSchemaResponse. + * Constructs a new SetKeyspaceDurabilityPolicyResponse. * @memberof vtctldata - * @classdesc Represents a ReloadSchemaResponse. - * @implements IReloadSchemaResponse + * @classdesc Represents a SetKeyspaceDurabilityPolicyResponse. + * @implements ISetKeyspaceDurabilityPolicyResponse * @constructor - * @param {vtctldata.IReloadSchemaResponse=} [properties] Properties to set + * @param {vtctldata.ISetKeyspaceDurabilityPolicyResponse=} [properties] Properties to set */ - function ReloadSchemaResponse(properties) { + function SetKeyspaceDurabilityPolicyResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -126033,63 +148551,77 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * Creates a new ReloadSchemaResponse instance using the specified properties. + * SetKeyspaceDurabilityPolicyResponse keyspace. + * @member {topodata.IKeyspace|null|undefined} keyspace + * @memberof vtctldata.SetKeyspaceDurabilityPolicyResponse + * @instance + */ + SetKeyspaceDurabilityPolicyResponse.prototype.keyspace = null; + + /** + * Creates a new SetKeyspaceDurabilityPolicyResponse instance using the specified properties. * @function create - * @memberof vtctldata.ReloadSchemaResponse + * @memberof vtctldata.SetKeyspaceDurabilityPolicyResponse * @static - * @param {vtctldata.IReloadSchemaResponse=} [properties] Properties to set - * @returns {vtctldata.ReloadSchemaResponse} ReloadSchemaResponse instance + * @param {vtctldata.ISetKeyspaceDurabilityPolicyResponse=} [properties] Properties to set + * @returns {vtctldata.SetKeyspaceDurabilityPolicyResponse} SetKeyspaceDurabilityPolicyResponse instance */ - ReloadSchemaResponse.create = function create(properties) { - return new ReloadSchemaResponse(properties); + SetKeyspaceDurabilityPolicyResponse.create = function create(properties) { + return new SetKeyspaceDurabilityPolicyResponse(properties); }; /** - * Encodes the specified ReloadSchemaResponse message. Does not implicitly {@link vtctldata.ReloadSchemaResponse.verify|verify} messages. + * Encodes the specified SetKeyspaceDurabilityPolicyResponse message. Does not implicitly {@link vtctldata.SetKeyspaceDurabilityPolicyResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.ReloadSchemaResponse + * @memberof vtctldata.SetKeyspaceDurabilityPolicyResponse * @static - * @param {vtctldata.IReloadSchemaResponse} message ReloadSchemaResponse message or plain object to encode + * @param {vtctldata.ISetKeyspaceDurabilityPolicyResponse} message SetKeyspaceDurabilityPolicyResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReloadSchemaResponse.encode = function encode(message, writer) { + SetKeyspaceDurabilityPolicyResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + $root.topodata.Keyspace.encode(message.keyspace, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified ReloadSchemaResponse message, length delimited. Does not implicitly {@link vtctldata.ReloadSchemaResponse.verify|verify} messages. + * Encodes the specified SetKeyspaceDurabilityPolicyResponse message, length delimited. Does not implicitly {@link vtctldata.SetKeyspaceDurabilityPolicyResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ReloadSchemaResponse + * @memberof vtctldata.SetKeyspaceDurabilityPolicyResponse * @static - * @param {vtctldata.IReloadSchemaResponse} message ReloadSchemaResponse message or plain object to encode + * @param {vtctldata.ISetKeyspaceDurabilityPolicyResponse} message SetKeyspaceDurabilityPolicyResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReloadSchemaResponse.encodeDelimited = function encodeDelimited(message, writer) { + SetKeyspaceDurabilityPolicyResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ReloadSchemaResponse message from the specified reader or buffer. + * Decodes a SetKeyspaceDurabilityPolicyResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ReloadSchemaResponse + * @memberof vtctldata.SetKeyspaceDurabilityPolicyResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ReloadSchemaResponse} ReloadSchemaResponse + * @returns {vtctldata.SetKeyspaceDurabilityPolicyResponse} SetKeyspaceDurabilityPolicyResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReloadSchemaResponse.decode = function decode(reader, length) { + SetKeyspaceDurabilityPolicyResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ReloadSchemaResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SetKeyspaceDurabilityPolicyResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + message.keyspace = $root.topodata.Keyspace.decode(reader, reader.uint32()); + break; + } default: reader.skipType(tag & 7); break; @@ -126099,112 +148631,132 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a ReloadSchemaResponse message from the specified reader or buffer, length delimited. + * Decodes a SetKeyspaceDurabilityPolicyResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ReloadSchemaResponse + * @memberof vtctldata.SetKeyspaceDurabilityPolicyResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ReloadSchemaResponse} ReloadSchemaResponse + * @returns {vtctldata.SetKeyspaceDurabilityPolicyResponse} SetKeyspaceDurabilityPolicyResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReloadSchemaResponse.decodeDelimited = function decodeDelimited(reader) { + SetKeyspaceDurabilityPolicyResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ReloadSchemaResponse message. + * Verifies a SetKeyspaceDurabilityPolicyResponse message. * @function verify - * @memberof vtctldata.ReloadSchemaResponse + * @memberof vtctldata.SetKeyspaceDurabilityPolicyResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ReloadSchemaResponse.verify = function verify(message) { + SetKeyspaceDurabilityPolicyResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) { + let error = $root.topodata.Keyspace.verify(message.keyspace); + if (error) + return "keyspace." + error; + } return null; }; /** - * Creates a ReloadSchemaResponse message from a plain object. Also converts values to their respective internal types. + * Creates a SetKeyspaceDurabilityPolicyResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ReloadSchemaResponse + * @memberof vtctldata.SetKeyspaceDurabilityPolicyResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.ReloadSchemaResponse} ReloadSchemaResponse + * @returns {vtctldata.SetKeyspaceDurabilityPolicyResponse} SetKeyspaceDurabilityPolicyResponse */ - ReloadSchemaResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ReloadSchemaResponse) + SetKeyspaceDurabilityPolicyResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.SetKeyspaceDurabilityPolicyResponse) return object; - return new $root.vtctldata.ReloadSchemaResponse(); + let message = new $root.vtctldata.SetKeyspaceDurabilityPolicyResponse(); + if (object.keyspace != null) { + if (typeof object.keyspace !== "object") + throw TypeError(".vtctldata.SetKeyspaceDurabilityPolicyResponse.keyspace: object expected"); + message.keyspace = $root.topodata.Keyspace.fromObject(object.keyspace); + } + return message; }; /** - * Creates a plain object from a ReloadSchemaResponse message. Also converts values to other types if specified. + * Creates a plain object from a SetKeyspaceDurabilityPolicyResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ReloadSchemaResponse + * @memberof vtctldata.SetKeyspaceDurabilityPolicyResponse * @static - * @param {vtctldata.ReloadSchemaResponse} message ReloadSchemaResponse + * @param {vtctldata.SetKeyspaceDurabilityPolicyResponse} message SetKeyspaceDurabilityPolicyResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ReloadSchemaResponse.toObject = function toObject() { - return {}; + SetKeyspaceDurabilityPolicyResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) + object.keyspace = null; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = $root.topodata.Keyspace.toObject(message.keyspace, options); + return object; }; /** - * Converts this ReloadSchemaResponse to JSON. + * Converts this SetKeyspaceDurabilityPolicyResponse to JSON. * @function toJSON - * @memberof vtctldata.ReloadSchemaResponse + * @memberof vtctldata.SetKeyspaceDurabilityPolicyResponse * @instance * @returns {Object.} JSON object */ - ReloadSchemaResponse.prototype.toJSON = function toJSON() { + SetKeyspaceDurabilityPolicyResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ReloadSchemaResponse + * Gets the default type url for SetKeyspaceDurabilityPolicyResponse * @function getTypeUrl - * @memberof vtctldata.ReloadSchemaResponse + * @memberof vtctldata.SetKeyspaceDurabilityPolicyResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ReloadSchemaResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + SetKeyspaceDurabilityPolicyResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ReloadSchemaResponse"; + return typeUrlPrefix + "/vtctldata.SetKeyspaceDurabilityPolicyResponse"; }; - return ReloadSchemaResponse; + return SetKeyspaceDurabilityPolicyResponse; })(); - vtctldata.ReloadSchemaKeyspaceRequest = (function() { + vtctldata.SetKeyspaceServedFromRequest = (function() { /** - * Properties of a ReloadSchemaKeyspaceRequest. + * Properties of a SetKeyspaceServedFromRequest. * @memberof vtctldata - * @interface IReloadSchemaKeyspaceRequest - * @property {string|null} [keyspace] ReloadSchemaKeyspaceRequest keyspace - * @property {string|null} [wait_position] ReloadSchemaKeyspaceRequest wait_position - * @property {boolean|null} [include_primary] ReloadSchemaKeyspaceRequest include_primary - * @property {number|null} [concurrency] ReloadSchemaKeyspaceRequest concurrency + * @interface ISetKeyspaceServedFromRequest + * @property {string|null} [keyspace] SetKeyspaceServedFromRequest keyspace + * @property {topodata.TabletType|null} [tablet_type] SetKeyspaceServedFromRequest tablet_type + * @property {Array.|null} [cells] SetKeyspaceServedFromRequest cells + * @property {boolean|null} [remove] SetKeyspaceServedFromRequest remove + * @property {string|null} [source_keyspace] SetKeyspaceServedFromRequest source_keyspace */ /** - * Constructs a new ReloadSchemaKeyspaceRequest. + * Constructs a new SetKeyspaceServedFromRequest. * @memberof vtctldata - * @classdesc Represents a ReloadSchemaKeyspaceRequest. - * @implements IReloadSchemaKeyspaceRequest + * @classdesc Represents a SetKeyspaceServedFromRequest. + * @implements ISetKeyspaceServedFromRequest * @constructor - * @param {vtctldata.IReloadSchemaKeyspaceRequest=} [properties] Properties to set + * @param {vtctldata.ISetKeyspaceServedFromRequest=} [properties] Properties to set */ - function ReloadSchemaKeyspaceRequest(properties) { + function SetKeyspaceServedFromRequest(properties) { + this.cells = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -126212,100 +148764,111 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ReloadSchemaKeyspaceRequest keyspace. + * SetKeyspaceServedFromRequest keyspace. * @member {string} keyspace - * @memberof vtctldata.ReloadSchemaKeyspaceRequest + * @memberof vtctldata.SetKeyspaceServedFromRequest * @instance */ - ReloadSchemaKeyspaceRequest.prototype.keyspace = ""; + SetKeyspaceServedFromRequest.prototype.keyspace = ""; /** - * ReloadSchemaKeyspaceRequest wait_position. - * @member {string} wait_position - * @memberof vtctldata.ReloadSchemaKeyspaceRequest + * SetKeyspaceServedFromRequest tablet_type. + * @member {topodata.TabletType} tablet_type + * @memberof vtctldata.SetKeyspaceServedFromRequest * @instance */ - ReloadSchemaKeyspaceRequest.prototype.wait_position = ""; + SetKeyspaceServedFromRequest.prototype.tablet_type = 0; /** - * ReloadSchemaKeyspaceRequest include_primary. - * @member {boolean} include_primary - * @memberof vtctldata.ReloadSchemaKeyspaceRequest + * SetKeyspaceServedFromRequest cells. + * @member {Array.} cells + * @memberof vtctldata.SetKeyspaceServedFromRequest * @instance */ - ReloadSchemaKeyspaceRequest.prototype.include_primary = false; + SetKeyspaceServedFromRequest.prototype.cells = $util.emptyArray; /** - * ReloadSchemaKeyspaceRequest concurrency. - * @member {number} concurrency - * @memberof vtctldata.ReloadSchemaKeyspaceRequest + * SetKeyspaceServedFromRequest remove. + * @member {boolean} remove + * @memberof vtctldata.SetKeyspaceServedFromRequest * @instance */ - ReloadSchemaKeyspaceRequest.prototype.concurrency = 0; + SetKeyspaceServedFromRequest.prototype.remove = false; /** - * Creates a new ReloadSchemaKeyspaceRequest instance using the specified properties. + * SetKeyspaceServedFromRequest source_keyspace. + * @member {string} source_keyspace + * @memberof vtctldata.SetKeyspaceServedFromRequest + * @instance + */ + SetKeyspaceServedFromRequest.prototype.source_keyspace = ""; + + /** + * Creates a new SetKeyspaceServedFromRequest instance using the specified properties. * @function create - * @memberof vtctldata.ReloadSchemaKeyspaceRequest + * @memberof vtctldata.SetKeyspaceServedFromRequest * @static - * @param {vtctldata.IReloadSchemaKeyspaceRequest=} [properties] Properties to set - * @returns {vtctldata.ReloadSchemaKeyspaceRequest} ReloadSchemaKeyspaceRequest instance + * @param {vtctldata.ISetKeyspaceServedFromRequest=} [properties] Properties to set + * @returns {vtctldata.SetKeyspaceServedFromRequest} SetKeyspaceServedFromRequest instance */ - ReloadSchemaKeyspaceRequest.create = function create(properties) { - return new ReloadSchemaKeyspaceRequest(properties); + SetKeyspaceServedFromRequest.create = function create(properties) { + return new SetKeyspaceServedFromRequest(properties); }; /** - * Encodes the specified ReloadSchemaKeyspaceRequest message. Does not implicitly {@link vtctldata.ReloadSchemaKeyspaceRequest.verify|verify} messages. + * Encodes the specified SetKeyspaceServedFromRequest message. Does not implicitly {@link vtctldata.SetKeyspaceServedFromRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.ReloadSchemaKeyspaceRequest + * @memberof vtctldata.SetKeyspaceServedFromRequest * @static - * @param {vtctldata.IReloadSchemaKeyspaceRequest} message ReloadSchemaKeyspaceRequest message or plain object to encode + * @param {vtctldata.ISetKeyspaceServedFromRequest} message SetKeyspaceServedFromRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReloadSchemaKeyspaceRequest.encode = function encode(message, writer) { + SetKeyspaceServedFromRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.wait_position != null && Object.hasOwnProperty.call(message, "wait_position")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.wait_position); - if (message.include_primary != null && Object.hasOwnProperty.call(message, "include_primary")) - writer.uint32(/* id 3, wireType 0 =*/24).bool(message.include_primary); - if (message.concurrency != null && Object.hasOwnProperty.call(message, "concurrency")) - writer.uint32(/* id 4, wireType 0 =*/32).uint32(message.concurrency); + if (message.tablet_type != null && Object.hasOwnProperty.call(message, "tablet_type")) + writer.uint32(/* id 2, wireType 0 =*/16).int32(message.tablet_type); + if (message.cells != null && message.cells.length) + for (let i = 0; i < message.cells.length; ++i) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.cells[i]); + if (message.remove != null && Object.hasOwnProperty.call(message, "remove")) + writer.uint32(/* id 4, wireType 0 =*/32).bool(message.remove); + if (message.source_keyspace != null && Object.hasOwnProperty.call(message, "source_keyspace")) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.source_keyspace); return writer; }; /** - * Encodes the specified ReloadSchemaKeyspaceRequest message, length delimited. Does not implicitly {@link vtctldata.ReloadSchemaKeyspaceRequest.verify|verify} messages. + * Encodes the specified SetKeyspaceServedFromRequest message, length delimited. Does not implicitly {@link vtctldata.SetKeyspaceServedFromRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ReloadSchemaKeyspaceRequest + * @memberof vtctldata.SetKeyspaceServedFromRequest * @static - * @param {vtctldata.IReloadSchemaKeyspaceRequest} message ReloadSchemaKeyspaceRequest message or plain object to encode + * @param {vtctldata.ISetKeyspaceServedFromRequest} message SetKeyspaceServedFromRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReloadSchemaKeyspaceRequest.encodeDelimited = function encodeDelimited(message, writer) { + SetKeyspaceServedFromRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ReloadSchemaKeyspaceRequest message from the specified reader or buffer. + * Decodes a SetKeyspaceServedFromRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ReloadSchemaKeyspaceRequest + * @memberof vtctldata.SetKeyspaceServedFromRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ReloadSchemaKeyspaceRequest} ReloadSchemaKeyspaceRequest + * @returns {vtctldata.SetKeyspaceServedFromRequest} SetKeyspaceServedFromRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReloadSchemaKeyspaceRequest.decode = function decode(reader, length) { + SetKeyspaceServedFromRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ReloadSchemaKeyspaceRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SetKeyspaceServedFromRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -126314,15 +148877,21 @@ export const vtctldata = $root.vtctldata = (() => { break; } case 2: { - message.wait_position = reader.string(); + message.tablet_type = reader.int32(); break; } case 3: { - message.include_primary = reader.bool(); + if (!(message.cells && message.cells.length)) + message.cells = []; + message.cells.push(reader.string()); break; } case 4: { - message.concurrency = reader.uint32(); + message.remove = reader.bool(); + break; + } + case 5: { + message.source_keyspace = reader.string(); break; } default: @@ -126334,148 +148903,232 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a ReloadSchemaKeyspaceRequest message from the specified reader or buffer, length delimited. + * Decodes a SetKeyspaceServedFromRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ReloadSchemaKeyspaceRequest + * @memberof vtctldata.SetKeyspaceServedFromRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ReloadSchemaKeyspaceRequest} ReloadSchemaKeyspaceRequest + * @returns {vtctldata.SetKeyspaceServedFromRequest} SetKeyspaceServedFromRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReloadSchemaKeyspaceRequest.decodeDelimited = function decodeDelimited(reader) { + SetKeyspaceServedFromRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ReloadSchemaKeyspaceRequest message. + * Verifies a SetKeyspaceServedFromRequest message. * @function verify - * @memberof vtctldata.ReloadSchemaKeyspaceRequest + * @memberof vtctldata.SetKeyspaceServedFromRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ReloadSchemaKeyspaceRequest.verify = function verify(message) { + SetKeyspaceServedFromRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.keyspace != null && message.hasOwnProperty("keyspace")) if (!$util.isString(message.keyspace)) return "keyspace: string expected"; - if (message.wait_position != null && message.hasOwnProperty("wait_position")) - if (!$util.isString(message.wait_position)) - return "wait_position: string expected"; - if (message.include_primary != null && message.hasOwnProperty("include_primary")) - if (typeof message.include_primary !== "boolean") - return "include_primary: boolean expected"; - if (message.concurrency != null && message.hasOwnProperty("concurrency")) - if (!$util.isInteger(message.concurrency)) - return "concurrency: integer expected"; + if (message.tablet_type != null && message.hasOwnProperty("tablet_type")) + switch (message.tablet_type) { + default: + return "tablet_type: enum value expected"; + case 0: + case 1: + case 1: + case 2: + case 3: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + break; + } + if (message.cells != null && message.hasOwnProperty("cells")) { + if (!Array.isArray(message.cells)) + return "cells: array expected"; + for (let i = 0; i < message.cells.length; ++i) + if (!$util.isString(message.cells[i])) + return "cells: string[] expected"; + } + if (message.remove != null && message.hasOwnProperty("remove")) + if (typeof message.remove !== "boolean") + return "remove: boolean expected"; + if (message.source_keyspace != null && message.hasOwnProperty("source_keyspace")) + if (!$util.isString(message.source_keyspace)) + return "source_keyspace: string expected"; return null; }; /** - * Creates a ReloadSchemaKeyspaceRequest message from a plain object. Also converts values to their respective internal types. + * Creates a SetKeyspaceServedFromRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ReloadSchemaKeyspaceRequest + * @memberof vtctldata.SetKeyspaceServedFromRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.ReloadSchemaKeyspaceRequest} ReloadSchemaKeyspaceRequest + * @returns {vtctldata.SetKeyspaceServedFromRequest} SetKeyspaceServedFromRequest */ - ReloadSchemaKeyspaceRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ReloadSchemaKeyspaceRequest) + SetKeyspaceServedFromRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.SetKeyspaceServedFromRequest) return object; - let message = new $root.vtctldata.ReloadSchemaKeyspaceRequest(); + let message = new $root.vtctldata.SetKeyspaceServedFromRequest(); if (object.keyspace != null) message.keyspace = String(object.keyspace); - if (object.wait_position != null) - message.wait_position = String(object.wait_position); - if (object.include_primary != null) - message.include_primary = Boolean(object.include_primary); - if (object.concurrency != null) - message.concurrency = object.concurrency >>> 0; + switch (object.tablet_type) { + default: + if (typeof object.tablet_type === "number") { + message.tablet_type = object.tablet_type; + break; + } + break; + case "UNKNOWN": + case 0: + message.tablet_type = 0; + break; + case "PRIMARY": + case 1: + message.tablet_type = 1; + break; + case "MASTER": + case 1: + message.tablet_type = 1; + break; + case "REPLICA": + case 2: + message.tablet_type = 2; + break; + case "RDONLY": + case 3: + message.tablet_type = 3; + break; + case "BATCH": + case 3: + message.tablet_type = 3; + break; + case "SPARE": + case 4: + message.tablet_type = 4; + break; + case "EXPERIMENTAL": + case 5: + message.tablet_type = 5; + break; + case "BACKUP": + case 6: + message.tablet_type = 6; + break; + case "RESTORE": + case 7: + message.tablet_type = 7; + break; + case "DRAINED": + case 8: + message.tablet_type = 8; + break; + } + if (object.cells) { + if (!Array.isArray(object.cells)) + throw TypeError(".vtctldata.SetKeyspaceServedFromRequest.cells: array expected"); + message.cells = []; + for (let i = 0; i < object.cells.length; ++i) + message.cells[i] = String(object.cells[i]); + } + if (object.remove != null) + message.remove = Boolean(object.remove); + if (object.source_keyspace != null) + message.source_keyspace = String(object.source_keyspace); return message; }; /** - * Creates a plain object from a ReloadSchemaKeyspaceRequest message. Also converts values to other types if specified. + * Creates a plain object from a SetKeyspaceServedFromRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ReloadSchemaKeyspaceRequest + * @memberof vtctldata.SetKeyspaceServedFromRequest * @static - * @param {vtctldata.ReloadSchemaKeyspaceRequest} message ReloadSchemaKeyspaceRequest + * @param {vtctldata.SetKeyspaceServedFromRequest} message SetKeyspaceServedFromRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ReloadSchemaKeyspaceRequest.toObject = function toObject(message, options) { + SetKeyspaceServedFromRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; + if (options.arrays || options.defaults) + object.cells = []; if (options.defaults) { object.keyspace = ""; - object.wait_position = ""; - object.include_primary = false; - object.concurrency = 0; + object.tablet_type = options.enums === String ? "UNKNOWN" : 0; + object.remove = false; + object.source_keyspace = ""; } if (message.keyspace != null && message.hasOwnProperty("keyspace")) object.keyspace = message.keyspace; - if (message.wait_position != null && message.hasOwnProperty("wait_position")) - object.wait_position = message.wait_position; - if (message.include_primary != null && message.hasOwnProperty("include_primary")) - object.include_primary = message.include_primary; - if (message.concurrency != null && message.hasOwnProperty("concurrency")) - object.concurrency = message.concurrency; + if (message.tablet_type != null && message.hasOwnProperty("tablet_type")) + object.tablet_type = options.enums === String ? $root.topodata.TabletType[message.tablet_type] === undefined ? message.tablet_type : $root.topodata.TabletType[message.tablet_type] : message.tablet_type; + if (message.cells && message.cells.length) { + object.cells = []; + for (let j = 0; j < message.cells.length; ++j) + object.cells[j] = message.cells[j]; + } + if (message.remove != null && message.hasOwnProperty("remove")) + object.remove = message.remove; + if (message.source_keyspace != null && message.hasOwnProperty("source_keyspace")) + object.source_keyspace = message.source_keyspace; return object; }; /** - * Converts this ReloadSchemaKeyspaceRequest to JSON. + * Converts this SetKeyspaceServedFromRequest to JSON. * @function toJSON - * @memberof vtctldata.ReloadSchemaKeyspaceRequest + * @memberof vtctldata.SetKeyspaceServedFromRequest * @instance * @returns {Object.} JSON object */ - ReloadSchemaKeyspaceRequest.prototype.toJSON = function toJSON() { + SetKeyspaceServedFromRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ReloadSchemaKeyspaceRequest + * Gets the default type url for SetKeyspaceServedFromRequest * @function getTypeUrl - * @memberof vtctldata.ReloadSchemaKeyspaceRequest + * @memberof vtctldata.SetKeyspaceServedFromRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ReloadSchemaKeyspaceRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + SetKeyspaceServedFromRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ReloadSchemaKeyspaceRequest"; + return typeUrlPrefix + "/vtctldata.SetKeyspaceServedFromRequest"; }; - return ReloadSchemaKeyspaceRequest; + return SetKeyspaceServedFromRequest; })(); - vtctldata.ReloadSchemaKeyspaceResponse = (function() { + vtctldata.SetKeyspaceServedFromResponse = (function() { /** - * Properties of a ReloadSchemaKeyspaceResponse. + * Properties of a SetKeyspaceServedFromResponse. * @memberof vtctldata - * @interface IReloadSchemaKeyspaceResponse - * @property {Array.|null} [events] ReloadSchemaKeyspaceResponse events + * @interface ISetKeyspaceServedFromResponse + * @property {topodata.IKeyspace|null} [keyspace] SetKeyspaceServedFromResponse keyspace */ /** - * Constructs a new ReloadSchemaKeyspaceResponse. + * Constructs a new SetKeyspaceServedFromResponse. * @memberof vtctldata - * @classdesc Represents a ReloadSchemaKeyspaceResponse. - * @implements IReloadSchemaKeyspaceResponse + * @classdesc Represents a SetKeyspaceServedFromResponse. + * @implements ISetKeyspaceServedFromResponse * @constructor - * @param {vtctldata.IReloadSchemaKeyspaceResponse=} [properties] Properties to set + * @param {vtctldata.ISetKeyspaceServedFromResponse=} [properties] Properties to set */ - function ReloadSchemaKeyspaceResponse(properties) { - this.events = []; + function SetKeyspaceServedFromResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -126483,78 +149136,75 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ReloadSchemaKeyspaceResponse events. - * @member {Array.} events - * @memberof vtctldata.ReloadSchemaKeyspaceResponse + * SetKeyspaceServedFromResponse keyspace. + * @member {topodata.IKeyspace|null|undefined} keyspace + * @memberof vtctldata.SetKeyspaceServedFromResponse * @instance */ - ReloadSchemaKeyspaceResponse.prototype.events = $util.emptyArray; + SetKeyspaceServedFromResponse.prototype.keyspace = null; /** - * Creates a new ReloadSchemaKeyspaceResponse instance using the specified properties. + * Creates a new SetKeyspaceServedFromResponse instance using the specified properties. * @function create - * @memberof vtctldata.ReloadSchemaKeyspaceResponse + * @memberof vtctldata.SetKeyspaceServedFromResponse * @static - * @param {vtctldata.IReloadSchemaKeyspaceResponse=} [properties] Properties to set - * @returns {vtctldata.ReloadSchemaKeyspaceResponse} ReloadSchemaKeyspaceResponse instance + * @param {vtctldata.ISetKeyspaceServedFromResponse=} [properties] Properties to set + * @returns {vtctldata.SetKeyspaceServedFromResponse} SetKeyspaceServedFromResponse instance */ - ReloadSchemaKeyspaceResponse.create = function create(properties) { - return new ReloadSchemaKeyspaceResponse(properties); + SetKeyspaceServedFromResponse.create = function create(properties) { + return new SetKeyspaceServedFromResponse(properties); }; /** - * Encodes the specified ReloadSchemaKeyspaceResponse message. Does not implicitly {@link vtctldata.ReloadSchemaKeyspaceResponse.verify|verify} messages. + * Encodes the specified SetKeyspaceServedFromResponse message. Does not implicitly {@link vtctldata.SetKeyspaceServedFromResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.ReloadSchemaKeyspaceResponse + * @memberof vtctldata.SetKeyspaceServedFromResponse * @static - * @param {vtctldata.IReloadSchemaKeyspaceResponse} message ReloadSchemaKeyspaceResponse message or plain object to encode + * @param {vtctldata.ISetKeyspaceServedFromResponse} message SetKeyspaceServedFromResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReloadSchemaKeyspaceResponse.encode = function encode(message, writer) { + SetKeyspaceServedFromResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.events != null && message.events.length) - for (let i = 0; i < message.events.length; ++i) - $root.logutil.Event.encode(message.events[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + $root.topodata.Keyspace.encode(message.keyspace, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified ReloadSchemaKeyspaceResponse message, length delimited. Does not implicitly {@link vtctldata.ReloadSchemaKeyspaceResponse.verify|verify} messages. + * Encodes the specified SetKeyspaceServedFromResponse message, length delimited. Does not implicitly {@link vtctldata.SetKeyspaceServedFromResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ReloadSchemaKeyspaceResponse + * @memberof vtctldata.SetKeyspaceServedFromResponse * @static - * @param {vtctldata.IReloadSchemaKeyspaceResponse} message ReloadSchemaKeyspaceResponse message or plain object to encode + * @param {vtctldata.ISetKeyspaceServedFromResponse} message SetKeyspaceServedFromResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReloadSchemaKeyspaceResponse.encodeDelimited = function encodeDelimited(message, writer) { + SetKeyspaceServedFromResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ReloadSchemaKeyspaceResponse message from the specified reader or buffer. + * Decodes a SetKeyspaceServedFromResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ReloadSchemaKeyspaceResponse + * @memberof vtctldata.SetKeyspaceServedFromResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ReloadSchemaKeyspaceResponse} ReloadSchemaKeyspaceResponse + * @returns {vtctldata.SetKeyspaceServedFromResponse} SetKeyspaceServedFromResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReloadSchemaKeyspaceResponse.decode = function decode(reader, length) { + SetKeyspaceServedFromResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ReloadSchemaKeyspaceResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SetKeyspaceServedFromResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (!(message.events && message.events.length)) - message.events = []; - message.events.push($root.logutil.Event.decode(reader, reader.uint32())); + message.keyspace = $root.topodata.Keyspace.decode(reader, reader.uint32()); break; } default: @@ -126566,143 +149216,128 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a ReloadSchemaKeyspaceResponse message from the specified reader or buffer, length delimited. + * Decodes a SetKeyspaceServedFromResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ReloadSchemaKeyspaceResponse + * @memberof vtctldata.SetKeyspaceServedFromResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ReloadSchemaKeyspaceResponse} ReloadSchemaKeyspaceResponse + * @returns {vtctldata.SetKeyspaceServedFromResponse} SetKeyspaceServedFromResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReloadSchemaKeyspaceResponse.decodeDelimited = function decodeDelimited(reader) { + SetKeyspaceServedFromResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ReloadSchemaKeyspaceResponse message. + * Verifies a SetKeyspaceServedFromResponse message. * @function verify - * @memberof vtctldata.ReloadSchemaKeyspaceResponse + * @memberof vtctldata.SetKeyspaceServedFromResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ReloadSchemaKeyspaceResponse.verify = function verify(message) { + SetKeyspaceServedFromResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.events != null && message.hasOwnProperty("events")) { - if (!Array.isArray(message.events)) - return "events: array expected"; - for (let i = 0; i < message.events.length; ++i) { - let error = $root.logutil.Event.verify(message.events[i]); - if (error) - return "events." + error; - } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) { + let error = $root.topodata.Keyspace.verify(message.keyspace); + if (error) + return "keyspace." + error; } return null; }; /** - * Creates a ReloadSchemaKeyspaceResponse message from a plain object. Also converts values to their respective internal types. + * Creates a SetKeyspaceServedFromResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ReloadSchemaKeyspaceResponse + * @memberof vtctldata.SetKeyspaceServedFromResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.ReloadSchemaKeyspaceResponse} ReloadSchemaKeyspaceResponse + * @returns {vtctldata.SetKeyspaceServedFromResponse} SetKeyspaceServedFromResponse */ - ReloadSchemaKeyspaceResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ReloadSchemaKeyspaceResponse) + SetKeyspaceServedFromResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.SetKeyspaceServedFromResponse) return object; - let message = new $root.vtctldata.ReloadSchemaKeyspaceResponse(); - if (object.events) { - if (!Array.isArray(object.events)) - throw TypeError(".vtctldata.ReloadSchemaKeyspaceResponse.events: array expected"); - message.events = []; - for (let i = 0; i < object.events.length; ++i) { - if (typeof object.events[i] !== "object") - throw TypeError(".vtctldata.ReloadSchemaKeyspaceResponse.events: object expected"); - message.events[i] = $root.logutil.Event.fromObject(object.events[i]); - } + let message = new $root.vtctldata.SetKeyspaceServedFromResponse(); + if (object.keyspace != null) { + if (typeof object.keyspace !== "object") + throw TypeError(".vtctldata.SetKeyspaceServedFromResponse.keyspace: object expected"); + message.keyspace = $root.topodata.Keyspace.fromObject(object.keyspace); } return message; }; /** - * Creates a plain object from a ReloadSchemaKeyspaceResponse message. Also converts values to other types if specified. + * Creates a plain object from a SetKeyspaceServedFromResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ReloadSchemaKeyspaceResponse + * @memberof vtctldata.SetKeyspaceServedFromResponse * @static - * @param {vtctldata.ReloadSchemaKeyspaceResponse} message ReloadSchemaKeyspaceResponse + * @param {vtctldata.SetKeyspaceServedFromResponse} message SetKeyspaceServedFromResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ReloadSchemaKeyspaceResponse.toObject = function toObject(message, options) { + SetKeyspaceServedFromResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.events = []; - if (message.events && message.events.length) { - object.events = []; - for (let j = 0; j < message.events.length; ++j) - object.events[j] = $root.logutil.Event.toObject(message.events[j], options); - } + if (options.defaults) + object.keyspace = null; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = $root.topodata.Keyspace.toObject(message.keyspace, options); return object; }; /** - * Converts this ReloadSchemaKeyspaceResponse to JSON. + * Converts this SetKeyspaceServedFromResponse to JSON. * @function toJSON - * @memberof vtctldata.ReloadSchemaKeyspaceResponse + * @memberof vtctldata.SetKeyspaceServedFromResponse * @instance * @returns {Object.} JSON object */ - ReloadSchemaKeyspaceResponse.prototype.toJSON = function toJSON() { + SetKeyspaceServedFromResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ReloadSchemaKeyspaceResponse + * Gets the default type url for SetKeyspaceServedFromResponse * @function getTypeUrl - * @memberof vtctldata.ReloadSchemaKeyspaceResponse + * @memberof vtctldata.SetKeyspaceServedFromResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ReloadSchemaKeyspaceResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + SetKeyspaceServedFromResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ReloadSchemaKeyspaceResponse"; + return typeUrlPrefix + "/vtctldata.SetKeyspaceServedFromResponse"; }; - return ReloadSchemaKeyspaceResponse; + return SetKeyspaceServedFromResponse; })(); - vtctldata.ReloadSchemaShardRequest = (function() { + vtctldata.SetKeyspaceShardingInfoRequest = (function() { /** - * Properties of a ReloadSchemaShardRequest. + * Properties of a SetKeyspaceShardingInfoRequest. * @memberof vtctldata - * @interface IReloadSchemaShardRequest - * @property {string|null} [keyspace] ReloadSchemaShardRequest keyspace - * @property {string|null} [shard] ReloadSchemaShardRequest shard - * @property {string|null} [wait_position] ReloadSchemaShardRequest wait_position - * @property {boolean|null} [include_primary] ReloadSchemaShardRequest include_primary - * @property {number|null} [concurrency] ReloadSchemaShardRequest concurrency + * @interface ISetKeyspaceShardingInfoRequest + * @property {string|null} [keyspace] SetKeyspaceShardingInfoRequest keyspace + * @property {boolean|null} [force] SetKeyspaceShardingInfoRequest force */ /** - * Constructs a new ReloadSchemaShardRequest. + * Constructs a new SetKeyspaceShardingInfoRequest. * @memberof vtctldata - * @classdesc Represents a ReloadSchemaShardRequest. - * @implements IReloadSchemaShardRequest + * @classdesc Represents a SetKeyspaceShardingInfoRequest. + * @implements ISetKeyspaceShardingInfoRequest * @constructor - * @param {vtctldata.IReloadSchemaShardRequest=} [properties] Properties to set + * @param {vtctldata.ISetKeyspaceShardingInfoRequest=} [properties] Properties to set */ - function ReloadSchemaShardRequest(properties) { + function SetKeyspaceShardingInfoRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -126710,110 +149345,80 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ReloadSchemaShardRequest keyspace. + * SetKeyspaceShardingInfoRequest keyspace. * @member {string} keyspace - * @memberof vtctldata.ReloadSchemaShardRequest - * @instance - */ - ReloadSchemaShardRequest.prototype.keyspace = ""; - - /** - * ReloadSchemaShardRequest shard. - * @member {string} shard - * @memberof vtctldata.ReloadSchemaShardRequest - * @instance - */ - ReloadSchemaShardRequest.prototype.shard = ""; - - /** - * ReloadSchemaShardRequest wait_position. - * @member {string} wait_position - * @memberof vtctldata.ReloadSchemaShardRequest - * @instance - */ - ReloadSchemaShardRequest.prototype.wait_position = ""; - - /** - * ReloadSchemaShardRequest include_primary. - * @member {boolean} include_primary - * @memberof vtctldata.ReloadSchemaShardRequest + * @memberof vtctldata.SetKeyspaceShardingInfoRequest * @instance */ - ReloadSchemaShardRequest.prototype.include_primary = false; + SetKeyspaceShardingInfoRequest.prototype.keyspace = ""; /** - * ReloadSchemaShardRequest concurrency. - * @member {number} concurrency - * @memberof vtctldata.ReloadSchemaShardRequest + * SetKeyspaceShardingInfoRequest force. + * @member {boolean} force + * @memberof vtctldata.SetKeyspaceShardingInfoRequest * @instance */ - ReloadSchemaShardRequest.prototype.concurrency = 0; + SetKeyspaceShardingInfoRequest.prototype.force = false; /** - * Creates a new ReloadSchemaShardRequest instance using the specified properties. + * Creates a new SetKeyspaceShardingInfoRequest instance using the specified properties. * @function create - * @memberof vtctldata.ReloadSchemaShardRequest + * @memberof vtctldata.SetKeyspaceShardingInfoRequest * @static - * @param {vtctldata.IReloadSchemaShardRequest=} [properties] Properties to set - * @returns {vtctldata.ReloadSchemaShardRequest} ReloadSchemaShardRequest instance + * @param {vtctldata.ISetKeyspaceShardingInfoRequest=} [properties] Properties to set + * @returns {vtctldata.SetKeyspaceShardingInfoRequest} SetKeyspaceShardingInfoRequest instance */ - ReloadSchemaShardRequest.create = function create(properties) { - return new ReloadSchemaShardRequest(properties); + SetKeyspaceShardingInfoRequest.create = function create(properties) { + return new SetKeyspaceShardingInfoRequest(properties); }; /** - * Encodes the specified ReloadSchemaShardRequest message. Does not implicitly {@link vtctldata.ReloadSchemaShardRequest.verify|verify} messages. + * Encodes the specified SetKeyspaceShardingInfoRequest message. Does not implicitly {@link vtctldata.SetKeyspaceShardingInfoRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.ReloadSchemaShardRequest + * @memberof vtctldata.SetKeyspaceShardingInfoRequest * @static - * @param {vtctldata.IReloadSchemaShardRequest} message ReloadSchemaShardRequest message or plain object to encode + * @param {vtctldata.ISetKeyspaceShardingInfoRequest} message SetKeyspaceShardingInfoRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReloadSchemaShardRequest.encode = function encode(message, writer) { + SetKeyspaceShardingInfoRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); - if (message.wait_position != null && Object.hasOwnProperty.call(message, "wait_position")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.wait_position); - if (message.include_primary != null && Object.hasOwnProperty.call(message, "include_primary")) - writer.uint32(/* id 4, wireType 0 =*/32).bool(message.include_primary); - if (message.concurrency != null && Object.hasOwnProperty.call(message, "concurrency")) - writer.uint32(/* id 5, wireType 0 =*/40).uint32(message.concurrency); + if (message.force != null && Object.hasOwnProperty.call(message, "force")) + writer.uint32(/* id 4, wireType 0 =*/32).bool(message.force); return writer; }; /** - * Encodes the specified ReloadSchemaShardRequest message, length delimited. Does not implicitly {@link vtctldata.ReloadSchemaShardRequest.verify|verify} messages. + * Encodes the specified SetKeyspaceShardingInfoRequest message, length delimited. Does not implicitly {@link vtctldata.SetKeyspaceShardingInfoRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ReloadSchemaShardRequest + * @memberof vtctldata.SetKeyspaceShardingInfoRequest * @static - * @param {vtctldata.IReloadSchemaShardRequest} message ReloadSchemaShardRequest message or plain object to encode + * @param {vtctldata.ISetKeyspaceShardingInfoRequest} message SetKeyspaceShardingInfoRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReloadSchemaShardRequest.encodeDelimited = function encodeDelimited(message, writer) { + SetKeyspaceShardingInfoRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ReloadSchemaShardRequest message from the specified reader or buffer. + * Decodes a SetKeyspaceShardingInfoRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ReloadSchemaShardRequest + * @memberof vtctldata.SetKeyspaceShardingInfoRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ReloadSchemaShardRequest} ReloadSchemaShardRequest + * @returns {vtctldata.SetKeyspaceShardingInfoRequest} SetKeyspaceShardingInfoRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReloadSchemaShardRequest.decode = function decode(reader, length) { + SetKeyspaceShardingInfoRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ReloadSchemaShardRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SetKeyspaceShardingInfoRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -126821,20 +149426,8 @@ export const vtctldata = $root.vtctldata = (() => { message.keyspace = reader.string(); break; } - case 2: { - message.shard = reader.string(); - break; - } - case 3: { - message.wait_position = reader.string(); - break; - } case 4: { - message.include_primary = reader.bool(); - break; - } - case 5: { - message.concurrency = reader.uint32(); + message.force = reader.bool(); break; } default: @@ -126846,156 +149439,131 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a ReloadSchemaShardRequest message from the specified reader or buffer, length delimited. + * Decodes a SetKeyspaceShardingInfoRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ReloadSchemaShardRequest + * @memberof vtctldata.SetKeyspaceShardingInfoRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ReloadSchemaShardRequest} ReloadSchemaShardRequest + * @returns {vtctldata.SetKeyspaceShardingInfoRequest} SetKeyspaceShardingInfoRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReloadSchemaShardRequest.decodeDelimited = function decodeDelimited(reader) { + SetKeyspaceShardingInfoRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ReloadSchemaShardRequest message. + * Verifies a SetKeyspaceShardingInfoRequest message. * @function verify - * @memberof vtctldata.ReloadSchemaShardRequest + * @memberof vtctldata.SetKeyspaceShardingInfoRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ReloadSchemaShardRequest.verify = function verify(message) { + SetKeyspaceShardingInfoRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.keyspace != null && message.hasOwnProperty("keyspace")) if (!$util.isString(message.keyspace)) return "keyspace: string expected"; - if (message.shard != null && message.hasOwnProperty("shard")) - if (!$util.isString(message.shard)) - return "shard: string expected"; - if (message.wait_position != null && message.hasOwnProperty("wait_position")) - if (!$util.isString(message.wait_position)) - return "wait_position: string expected"; - if (message.include_primary != null && message.hasOwnProperty("include_primary")) - if (typeof message.include_primary !== "boolean") - return "include_primary: boolean expected"; - if (message.concurrency != null && message.hasOwnProperty("concurrency")) - if (!$util.isInteger(message.concurrency)) - return "concurrency: integer expected"; + if (message.force != null && message.hasOwnProperty("force")) + if (typeof message.force !== "boolean") + return "force: boolean expected"; return null; }; /** - * Creates a ReloadSchemaShardRequest message from a plain object. Also converts values to their respective internal types. + * Creates a SetKeyspaceShardingInfoRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ReloadSchemaShardRequest + * @memberof vtctldata.SetKeyspaceShardingInfoRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.ReloadSchemaShardRequest} ReloadSchemaShardRequest + * @returns {vtctldata.SetKeyspaceShardingInfoRequest} SetKeyspaceShardingInfoRequest */ - ReloadSchemaShardRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ReloadSchemaShardRequest) + SetKeyspaceShardingInfoRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.SetKeyspaceShardingInfoRequest) return object; - let message = new $root.vtctldata.ReloadSchemaShardRequest(); + let message = new $root.vtctldata.SetKeyspaceShardingInfoRequest(); if (object.keyspace != null) message.keyspace = String(object.keyspace); - if (object.shard != null) - message.shard = String(object.shard); - if (object.wait_position != null) - message.wait_position = String(object.wait_position); - if (object.include_primary != null) - message.include_primary = Boolean(object.include_primary); - if (object.concurrency != null) - message.concurrency = object.concurrency >>> 0; + if (object.force != null) + message.force = Boolean(object.force); return message; }; /** - * Creates a plain object from a ReloadSchemaShardRequest message. Also converts values to other types if specified. + * Creates a plain object from a SetKeyspaceShardingInfoRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ReloadSchemaShardRequest + * @memberof vtctldata.SetKeyspaceShardingInfoRequest * @static - * @param {vtctldata.ReloadSchemaShardRequest} message ReloadSchemaShardRequest + * @param {vtctldata.SetKeyspaceShardingInfoRequest} message SetKeyspaceShardingInfoRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ReloadSchemaShardRequest.toObject = function toObject(message, options) { + SetKeyspaceShardingInfoRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) { object.keyspace = ""; - object.shard = ""; - object.wait_position = ""; - object.include_primary = false; - object.concurrency = 0; + object.force = false; } if (message.keyspace != null && message.hasOwnProperty("keyspace")) object.keyspace = message.keyspace; - if (message.shard != null && message.hasOwnProperty("shard")) - object.shard = message.shard; - if (message.wait_position != null && message.hasOwnProperty("wait_position")) - object.wait_position = message.wait_position; - if (message.include_primary != null && message.hasOwnProperty("include_primary")) - object.include_primary = message.include_primary; - if (message.concurrency != null && message.hasOwnProperty("concurrency")) - object.concurrency = message.concurrency; + if (message.force != null && message.hasOwnProperty("force")) + object.force = message.force; return object; }; /** - * Converts this ReloadSchemaShardRequest to JSON. + * Converts this SetKeyspaceShardingInfoRequest to JSON. * @function toJSON - * @memberof vtctldata.ReloadSchemaShardRequest + * @memberof vtctldata.SetKeyspaceShardingInfoRequest * @instance * @returns {Object.} JSON object */ - ReloadSchemaShardRequest.prototype.toJSON = function toJSON() { + SetKeyspaceShardingInfoRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ReloadSchemaShardRequest + * Gets the default type url for SetKeyspaceShardingInfoRequest * @function getTypeUrl - * @memberof vtctldata.ReloadSchemaShardRequest + * @memberof vtctldata.SetKeyspaceShardingInfoRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ReloadSchemaShardRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + SetKeyspaceShardingInfoRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ReloadSchemaShardRequest"; + return typeUrlPrefix + "/vtctldata.SetKeyspaceShardingInfoRequest"; }; - return ReloadSchemaShardRequest; + return SetKeyspaceShardingInfoRequest; })(); - vtctldata.ReloadSchemaShardResponse = (function() { + vtctldata.SetKeyspaceShardingInfoResponse = (function() { /** - * Properties of a ReloadSchemaShardResponse. + * Properties of a SetKeyspaceShardingInfoResponse. * @memberof vtctldata - * @interface IReloadSchemaShardResponse - * @property {Array.|null} [events] ReloadSchemaShardResponse events + * @interface ISetKeyspaceShardingInfoResponse + * @property {topodata.IKeyspace|null} [keyspace] SetKeyspaceShardingInfoResponse keyspace */ /** - * Constructs a new ReloadSchemaShardResponse. + * Constructs a new SetKeyspaceShardingInfoResponse. * @memberof vtctldata - * @classdesc Represents a ReloadSchemaShardResponse. - * @implements IReloadSchemaShardResponse + * @classdesc Represents a SetKeyspaceShardingInfoResponse. + * @implements ISetKeyspaceShardingInfoResponse * @constructor - * @param {vtctldata.IReloadSchemaShardResponse=} [properties] Properties to set + * @param {vtctldata.ISetKeyspaceShardingInfoResponse=} [properties] Properties to set */ - function ReloadSchemaShardResponse(properties) { - this.events = []; + function SetKeyspaceShardingInfoResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -127003,78 +149571,75 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ReloadSchemaShardResponse events. - * @member {Array.} events - * @memberof vtctldata.ReloadSchemaShardResponse + * SetKeyspaceShardingInfoResponse keyspace. + * @member {topodata.IKeyspace|null|undefined} keyspace + * @memberof vtctldata.SetKeyspaceShardingInfoResponse * @instance */ - ReloadSchemaShardResponse.prototype.events = $util.emptyArray; + SetKeyspaceShardingInfoResponse.prototype.keyspace = null; /** - * Creates a new ReloadSchemaShardResponse instance using the specified properties. + * Creates a new SetKeyspaceShardingInfoResponse instance using the specified properties. * @function create - * @memberof vtctldata.ReloadSchemaShardResponse + * @memberof vtctldata.SetKeyspaceShardingInfoResponse * @static - * @param {vtctldata.IReloadSchemaShardResponse=} [properties] Properties to set - * @returns {vtctldata.ReloadSchemaShardResponse} ReloadSchemaShardResponse instance + * @param {vtctldata.ISetKeyspaceShardingInfoResponse=} [properties] Properties to set + * @returns {vtctldata.SetKeyspaceShardingInfoResponse} SetKeyspaceShardingInfoResponse instance */ - ReloadSchemaShardResponse.create = function create(properties) { - return new ReloadSchemaShardResponse(properties); + SetKeyspaceShardingInfoResponse.create = function create(properties) { + return new SetKeyspaceShardingInfoResponse(properties); }; /** - * Encodes the specified ReloadSchemaShardResponse message. Does not implicitly {@link vtctldata.ReloadSchemaShardResponse.verify|verify} messages. + * Encodes the specified SetKeyspaceShardingInfoResponse message. Does not implicitly {@link vtctldata.SetKeyspaceShardingInfoResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.ReloadSchemaShardResponse + * @memberof vtctldata.SetKeyspaceShardingInfoResponse * @static - * @param {vtctldata.IReloadSchemaShardResponse} message ReloadSchemaShardResponse message or plain object to encode + * @param {vtctldata.ISetKeyspaceShardingInfoResponse} message SetKeyspaceShardingInfoResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReloadSchemaShardResponse.encode = function encode(message, writer) { + SetKeyspaceShardingInfoResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.events != null && message.events.length) - for (let i = 0; i < message.events.length; ++i) - $root.logutil.Event.encode(message.events[i], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + $root.topodata.Keyspace.encode(message.keyspace, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified ReloadSchemaShardResponse message, length delimited. Does not implicitly {@link vtctldata.ReloadSchemaShardResponse.verify|verify} messages. + * Encodes the specified SetKeyspaceShardingInfoResponse message, length delimited. Does not implicitly {@link vtctldata.SetKeyspaceShardingInfoResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ReloadSchemaShardResponse + * @memberof vtctldata.SetKeyspaceShardingInfoResponse * @static - * @param {vtctldata.IReloadSchemaShardResponse} message ReloadSchemaShardResponse message or plain object to encode + * @param {vtctldata.ISetKeyspaceShardingInfoResponse} message SetKeyspaceShardingInfoResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReloadSchemaShardResponse.encodeDelimited = function encodeDelimited(message, writer) { + SetKeyspaceShardingInfoResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ReloadSchemaShardResponse message from the specified reader or buffer. + * Decodes a SetKeyspaceShardingInfoResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ReloadSchemaShardResponse + * @memberof vtctldata.SetKeyspaceShardingInfoResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ReloadSchemaShardResponse} ReloadSchemaShardResponse + * @returns {vtctldata.SetKeyspaceShardingInfoResponse} SetKeyspaceShardingInfoResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReloadSchemaShardResponse.decode = function decode(reader, length) { + SetKeyspaceShardingInfoResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ReloadSchemaShardResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SetKeyspaceShardingInfoResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 2: { - if (!(message.events && message.events.length)) - message.events = []; - message.events.push($root.logutil.Event.decode(reader, reader.uint32())); + case 1: { + message.keyspace = $root.topodata.Keyspace.decode(reader, reader.uint32()); break; } default: @@ -127086,141 +149651,129 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a ReloadSchemaShardResponse message from the specified reader or buffer, length delimited. + * Decodes a SetKeyspaceShardingInfoResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ReloadSchemaShardResponse + * @memberof vtctldata.SetKeyspaceShardingInfoResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ReloadSchemaShardResponse} ReloadSchemaShardResponse + * @returns {vtctldata.SetKeyspaceShardingInfoResponse} SetKeyspaceShardingInfoResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReloadSchemaShardResponse.decodeDelimited = function decodeDelimited(reader) { + SetKeyspaceShardingInfoResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ReloadSchemaShardResponse message. + * Verifies a SetKeyspaceShardingInfoResponse message. * @function verify - * @memberof vtctldata.ReloadSchemaShardResponse + * @memberof vtctldata.SetKeyspaceShardingInfoResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ReloadSchemaShardResponse.verify = function verify(message) { + SetKeyspaceShardingInfoResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.events != null && message.hasOwnProperty("events")) { - if (!Array.isArray(message.events)) - return "events: array expected"; - for (let i = 0; i < message.events.length; ++i) { - let error = $root.logutil.Event.verify(message.events[i]); - if (error) - return "events." + error; - } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) { + let error = $root.topodata.Keyspace.verify(message.keyspace); + if (error) + return "keyspace." + error; } return null; }; /** - * Creates a ReloadSchemaShardResponse message from a plain object. Also converts values to their respective internal types. + * Creates a SetKeyspaceShardingInfoResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ReloadSchemaShardResponse + * @memberof vtctldata.SetKeyspaceShardingInfoResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.ReloadSchemaShardResponse} ReloadSchemaShardResponse + * @returns {vtctldata.SetKeyspaceShardingInfoResponse} SetKeyspaceShardingInfoResponse */ - ReloadSchemaShardResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ReloadSchemaShardResponse) + SetKeyspaceShardingInfoResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.SetKeyspaceShardingInfoResponse) return object; - let message = new $root.vtctldata.ReloadSchemaShardResponse(); - if (object.events) { - if (!Array.isArray(object.events)) - throw TypeError(".vtctldata.ReloadSchemaShardResponse.events: array expected"); - message.events = []; - for (let i = 0; i < object.events.length; ++i) { - if (typeof object.events[i] !== "object") - throw TypeError(".vtctldata.ReloadSchemaShardResponse.events: object expected"); - message.events[i] = $root.logutil.Event.fromObject(object.events[i]); - } + let message = new $root.vtctldata.SetKeyspaceShardingInfoResponse(); + if (object.keyspace != null) { + if (typeof object.keyspace !== "object") + throw TypeError(".vtctldata.SetKeyspaceShardingInfoResponse.keyspace: object expected"); + message.keyspace = $root.topodata.Keyspace.fromObject(object.keyspace); } return message; }; /** - * Creates a plain object from a ReloadSchemaShardResponse message. Also converts values to other types if specified. + * Creates a plain object from a SetKeyspaceShardingInfoResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ReloadSchemaShardResponse + * @memberof vtctldata.SetKeyspaceShardingInfoResponse * @static - * @param {vtctldata.ReloadSchemaShardResponse} message ReloadSchemaShardResponse + * @param {vtctldata.SetKeyspaceShardingInfoResponse} message SetKeyspaceShardingInfoResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ReloadSchemaShardResponse.toObject = function toObject(message, options) { + SetKeyspaceShardingInfoResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.events = []; - if (message.events && message.events.length) { - object.events = []; - for (let j = 0; j < message.events.length; ++j) - object.events[j] = $root.logutil.Event.toObject(message.events[j], options); - } + if (options.defaults) + object.keyspace = null; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = $root.topodata.Keyspace.toObject(message.keyspace, options); return object; }; /** - * Converts this ReloadSchemaShardResponse to JSON. + * Converts this SetKeyspaceShardingInfoResponse to JSON. * @function toJSON - * @memberof vtctldata.ReloadSchemaShardResponse + * @memberof vtctldata.SetKeyspaceShardingInfoResponse * @instance * @returns {Object.} JSON object */ - ReloadSchemaShardResponse.prototype.toJSON = function toJSON() { + SetKeyspaceShardingInfoResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ReloadSchemaShardResponse + * Gets the default type url for SetKeyspaceShardingInfoResponse * @function getTypeUrl - * @memberof vtctldata.ReloadSchemaShardResponse + * @memberof vtctldata.SetKeyspaceShardingInfoResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ReloadSchemaShardResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + SetKeyspaceShardingInfoResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ReloadSchemaShardResponse"; + return typeUrlPrefix + "/vtctldata.SetKeyspaceShardingInfoResponse"; }; - return ReloadSchemaShardResponse; + return SetKeyspaceShardingInfoResponse; })(); - vtctldata.RemoveBackupRequest = (function() { + vtctldata.SetShardIsPrimaryServingRequest = (function() { /** - * Properties of a RemoveBackupRequest. + * Properties of a SetShardIsPrimaryServingRequest. * @memberof vtctldata - * @interface IRemoveBackupRequest - * @property {string|null} [keyspace] RemoveBackupRequest keyspace - * @property {string|null} [shard] RemoveBackupRequest shard - * @property {string|null} [name] RemoveBackupRequest name + * @interface ISetShardIsPrimaryServingRequest + * @property {string|null} [keyspace] SetShardIsPrimaryServingRequest keyspace + * @property {string|null} [shard] SetShardIsPrimaryServingRequest shard + * @property {boolean|null} [is_serving] SetShardIsPrimaryServingRequest is_serving */ /** - * Constructs a new RemoveBackupRequest. + * Constructs a new SetShardIsPrimaryServingRequest. * @memberof vtctldata - * @classdesc Represents a RemoveBackupRequest. - * @implements IRemoveBackupRequest + * @classdesc Represents a SetShardIsPrimaryServingRequest. + * @implements ISetShardIsPrimaryServingRequest * @constructor - * @param {vtctldata.IRemoveBackupRequest=} [properties] Properties to set + * @param {vtctldata.ISetShardIsPrimaryServingRequest=} [properties] Properties to set */ - function RemoveBackupRequest(properties) { + function SetShardIsPrimaryServingRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -127228,90 +149781,90 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * RemoveBackupRequest keyspace. + * SetShardIsPrimaryServingRequest keyspace. * @member {string} keyspace - * @memberof vtctldata.RemoveBackupRequest + * @memberof vtctldata.SetShardIsPrimaryServingRequest * @instance */ - RemoveBackupRequest.prototype.keyspace = ""; + SetShardIsPrimaryServingRequest.prototype.keyspace = ""; /** - * RemoveBackupRequest shard. + * SetShardIsPrimaryServingRequest shard. * @member {string} shard - * @memberof vtctldata.RemoveBackupRequest + * @memberof vtctldata.SetShardIsPrimaryServingRequest * @instance */ - RemoveBackupRequest.prototype.shard = ""; + SetShardIsPrimaryServingRequest.prototype.shard = ""; /** - * RemoveBackupRequest name. - * @member {string} name - * @memberof vtctldata.RemoveBackupRequest + * SetShardIsPrimaryServingRequest is_serving. + * @member {boolean} is_serving + * @memberof vtctldata.SetShardIsPrimaryServingRequest * @instance */ - RemoveBackupRequest.prototype.name = ""; + SetShardIsPrimaryServingRequest.prototype.is_serving = false; /** - * Creates a new RemoveBackupRequest instance using the specified properties. + * Creates a new SetShardIsPrimaryServingRequest instance using the specified properties. * @function create - * @memberof vtctldata.RemoveBackupRequest + * @memberof vtctldata.SetShardIsPrimaryServingRequest * @static - * @param {vtctldata.IRemoveBackupRequest=} [properties] Properties to set - * @returns {vtctldata.RemoveBackupRequest} RemoveBackupRequest instance + * @param {vtctldata.ISetShardIsPrimaryServingRequest=} [properties] Properties to set + * @returns {vtctldata.SetShardIsPrimaryServingRequest} SetShardIsPrimaryServingRequest instance */ - RemoveBackupRequest.create = function create(properties) { - return new RemoveBackupRequest(properties); + SetShardIsPrimaryServingRequest.create = function create(properties) { + return new SetShardIsPrimaryServingRequest(properties); }; /** - * Encodes the specified RemoveBackupRequest message. Does not implicitly {@link vtctldata.RemoveBackupRequest.verify|verify} messages. + * Encodes the specified SetShardIsPrimaryServingRequest message. Does not implicitly {@link vtctldata.SetShardIsPrimaryServingRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.RemoveBackupRequest + * @memberof vtctldata.SetShardIsPrimaryServingRequest * @static - * @param {vtctldata.IRemoveBackupRequest} message RemoveBackupRequest message or plain object to encode + * @param {vtctldata.ISetShardIsPrimaryServingRequest} message SetShardIsPrimaryServingRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RemoveBackupRequest.encode = function encode(message, writer) { + SetShardIsPrimaryServingRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); - if (message.name != null && Object.hasOwnProperty.call(message, "name")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.name); + if (message.is_serving != null && Object.hasOwnProperty.call(message, "is_serving")) + writer.uint32(/* id 3, wireType 0 =*/24).bool(message.is_serving); return writer; }; /** - * Encodes the specified RemoveBackupRequest message, length delimited. Does not implicitly {@link vtctldata.RemoveBackupRequest.verify|verify} messages. + * Encodes the specified SetShardIsPrimaryServingRequest message, length delimited. Does not implicitly {@link vtctldata.SetShardIsPrimaryServingRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.RemoveBackupRequest + * @memberof vtctldata.SetShardIsPrimaryServingRequest * @static - * @param {vtctldata.IRemoveBackupRequest} message RemoveBackupRequest message or plain object to encode + * @param {vtctldata.ISetShardIsPrimaryServingRequest} message SetShardIsPrimaryServingRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RemoveBackupRequest.encodeDelimited = function encodeDelimited(message, writer) { + SetShardIsPrimaryServingRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a RemoveBackupRequest message from the specified reader or buffer. + * Decodes a SetShardIsPrimaryServingRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.RemoveBackupRequest + * @memberof vtctldata.SetShardIsPrimaryServingRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.RemoveBackupRequest} RemoveBackupRequest + * @returns {vtctldata.SetShardIsPrimaryServingRequest} SetShardIsPrimaryServingRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RemoveBackupRequest.decode = function decode(reader, length) { + SetShardIsPrimaryServingRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RemoveBackupRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SetShardIsPrimaryServingRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -127324,7 +149877,7 @@ export const vtctldata = $root.vtctldata = (() => { break; } case 3: { - message.name = reader.string(); + message.is_serving = reader.bool(); break; } default: @@ -127336,30 +149889,30 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a RemoveBackupRequest message from the specified reader or buffer, length delimited. + * Decodes a SetShardIsPrimaryServingRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.RemoveBackupRequest + * @memberof vtctldata.SetShardIsPrimaryServingRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.RemoveBackupRequest} RemoveBackupRequest + * @returns {vtctldata.SetShardIsPrimaryServingRequest} SetShardIsPrimaryServingRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RemoveBackupRequest.decodeDelimited = function decodeDelimited(reader) { + SetShardIsPrimaryServingRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a RemoveBackupRequest message. + * Verifies a SetShardIsPrimaryServingRequest message. * @function verify - * @memberof vtctldata.RemoveBackupRequest + * @memberof vtctldata.SetShardIsPrimaryServingRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - RemoveBackupRequest.verify = function verify(message) { + SetShardIsPrimaryServingRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.keyspace != null && message.hasOwnProperty("keyspace")) @@ -127368,106 +149921,107 @@ export const vtctldata = $root.vtctldata = (() => { if (message.shard != null && message.hasOwnProperty("shard")) if (!$util.isString(message.shard)) return "shard: string expected"; - if (message.name != null && message.hasOwnProperty("name")) - if (!$util.isString(message.name)) - return "name: string expected"; + if (message.is_serving != null && message.hasOwnProperty("is_serving")) + if (typeof message.is_serving !== "boolean") + return "is_serving: boolean expected"; return null; }; /** - * Creates a RemoveBackupRequest message from a plain object. Also converts values to their respective internal types. + * Creates a SetShardIsPrimaryServingRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.RemoveBackupRequest + * @memberof vtctldata.SetShardIsPrimaryServingRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.RemoveBackupRequest} RemoveBackupRequest + * @returns {vtctldata.SetShardIsPrimaryServingRequest} SetShardIsPrimaryServingRequest */ - RemoveBackupRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.RemoveBackupRequest) + SetShardIsPrimaryServingRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.SetShardIsPrimaryServingRequest) return object; - let message = new $root.vtctldata.RemoveBackupRequest(); + let message = new $root.vtctldata.SetShardIsPrimaryServingRequest(); if (object.keyspace != null) message.keyspace = String(object.keyspace); if (object.shard != null) message.shard = String(object.shard); - if (object.name != null) - message.name = String(object.name); + if (object.is_serving != null) + message.is_serving = Boolean(object.is_serving); return message; }; /** - * Creates a plain object from a RemoveBackupRequest message. Also converts values to other types if specified. + * Creates a plain object from a SetShardIsPrimaryServingRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.RemoveBackupRequest + * @memberof vtctldata.SetShardIsPrimaryServingRequest * @static - * @param {vtctldata.RemoveBackupRequest} message RemoveBackupRequest + * @param {vtctldata.SetShardIsPrimaryServingRequest} message SetShardIsPrimaryServingRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - RemoveBackupRequest.toObject = function toObject(message, options) { + SetShardIsPrimaryServingRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) { object.keyspace = ""; object.shard = ""; - object.name = ""; + object.is_serving = false; } if (message.keyspace != null && message.hasOwnProperty("keyspace")) object.keyspace = message.keyspace; if (message.shard != null && message.hasOwnProperty("shard")) object.shard = message.shard; - if (message.name != null && message.hasOwnProperty("name")) - object.name = message.name; + if (message.is_serving != null && message.hasOwnProperty("is_serving")) + object.is_serving = message.is_serving; return object; }; /** - * Converts this RemoveBackupRequest to JSON. + * Converts this SetShardIsPrimaryServingRequest to JSON. * @function toJSON - * @memberof vtctldata.RemoveBackupRequest + * @memberof vtctldata.SetShardIsPrimaryServingRequest * @instance * @returns {Object.} JSON object */ - RemoveBackupRequest.prototype.toJSON = function toJSON() { + SetShardIsPrimaryServingRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for RemoveBackupRequest + * Gets the default type url for SetShardIsPrimaryServingRequest * @function getTypeUrl - * @memberof vtctldata.RemoveBackupRequest + * @memberof vtctldata.SetShardIsPrimaryServingRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - RemoveBackupRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + SetShardIsPrimaryServingRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.RemoveBackupRequest"; + return typeUrlPrefix + "/vtctldata.SetShardIsPrimaryServingRequest"; }; - return RemoveBackupRequest; + return SetShardIsPrimaryServingRequest; })(); - vtctldata.RemoveBackupResponse = (function() { + vtctldata.SetShardIsPrimaryServingResponse = (function() { /** - * Properties of a RemoveBackupResponse. + * Properties of a SetShardIsPrimaryServingResponse. * @memberof vtctldata - * @interface IRemoveBackupResponse + * @interface ISetShardIsPrimaryServingResponse + * @property {topodata.IShard|null} [shard] SetShardIsPrimaryServingResponse shard */ /** - * Constructs a new RemoveBackupResponse. + * Constructs a new SetShardIsPrimaryServingResponse. * @memberof vtctldata - * @classdesc Represents a RemoveBackupResponse. - * @implements IRemoveBackupResponse + * @classdesc Represents a SetShardIsPrimaryServingResponse. + * @implements ISetShardIsPrimaryServingResponse * @constructor - * @param {vtctldata.IRemoveBackupResponse=} [properties] Properties to set + * @param {vtctldata.ISetShardIsPrimaryServingResponse=} [properties] Properties to set */ - function RemoveBackupResponse(properties) { + function SetShardIsPrimaryServingResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -127475,63 +150029,77 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * Creates a new RemoveBackupResponse instance using the specified properties. + * SetShardIsPrimaryServingResponse shard. + * @member {topodata.IShard|null|undefined} shard + * @memberof vtctldata.SetShardIsPrimaryServingResponse + * @instance + */ + SetShardIsPrimaryServingResponse.prototype.shard = null; + + /** + * Creates a new SetShardIsPrimaryServingResponse instance using the specified properties. * @function create - * @memberof vtctldata.RemoveBackupResponse + * @memberof vtctldata.SetShardIsPrimaryServingResponse * @static - * @param {vtctldata.IRemoveBackupResponse=} [properties] Properties to set - * @returns {vtctldata.RemoveBackupResponse} RemoveBackupResponse instance + * @param {vtctldata.ISetShardIsPrimaryServingResponse=} [properties] Properties to set + * @returns {vtctldata.SetShardIsPrimaryServingResponse} SetShardIsPrimaryServingResponse instance */ - RemoveBackupResponse.create = function create(properties) { - return new RemoveBackupResponse(properties); + SetShardIsPrimaryServingResponse.create = function create(properties) { + return new SetShardIsPrimaryServingResponse(properties); }; /** - * Encodes the specified RemoveBackupResponse message. Does not implicitly {@link vtctldata.RemoveBackupResponse.verify|verify} messages. + * Encodes the specified SetShardIsPrimaryServingResponse message. Does not implicitly {@link vtctldata.SetShardIsPrimaryServingResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.RemoveBackupResponse + * @memberof vtctldata.SetShardIsPrimaryServingResponse * @static - * @param {vtctldata.IRemoveBackupResponse} message RemoveBackupResponse message or plain object to encode + * @param {vtctldata.ISetShardIsPrimaryServingResponse} message SetShardIsPrimaryServingResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RemoveBackupResponse.encode = function encode(message, writer) { + SetShardIsPrimaryServingResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + $root.topodata.Shard.encode(message.shard, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified RemoveBackupResponse message, length delimited. Does not implicitly {@link vtctldata.RemoveBackupResponse.verify|verify} messages. + * Encodes the specified SetShardIsPrimaryServingResponse message, length delimited. Does not implicitly {@link vtctldata.SetShardIsPrimaryServingResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.RemoveBackupResponse + * @memberof vtctldata.SetShardIsPrimaryServingResponse * @static - * @param {vtctldata.IRemoveBackupResponse} message RemoveBackupResponse message or plain object to encode + * @param {vtctldata.ISetShardIsPrimaryServingResponse} message SetShardIsPrimaryServingResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RemoveBackupResponse.encodeDelimited = function encodeDelimited(message, writer) { + SetShardIsPrimaryServingResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a RemoveBackupResponse message from the specified reader or buffer. + * Decodes a SetShardIsPrimaryServingResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.RemoveBackupResponse + * @memberof vtctldata.SetShardIsPrimaryServingResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.RemoveBackupResponse} RemoveBackupResponse + * @returns {vtctldata.SetShardIsPrimaryServingResponse} SetShardIsPrimaryServingResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RemoveBackupResponse.decode = function decode(reader, length) { + SetShardIsPrimaryServingResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RemoveBackupResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SetShardIsPrimaryServingResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + message.shard = $root.topodata.Shard.decode(reader, reader.uint32()); + break; + } default: reader.skipType(tag & 7); break; @@ -127541,112 +150109,135 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a RemoveBackupResponse message from the specified reader or buffer, length delimited. + * Decodes a SetShardIsPrimaryServingResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.RemoveBackupResponse + * @memberof vtctldata.SetShardIsPrimaryServingResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.RemoveBackupResponse} RemoveBackupResponse + * @returns {vtctldata.SetShardIsPrimaryServingResponse} SetShardIsPrimaryServingResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RemoveBackupResponse.decodeDelimited = function decodeDelimited(reader) { + SetShardIsPrimaryServingResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a RemoveBackupResponse message. + * Verifies a SetShardIsPrimaryServingResponse message. * @function verify - * @memberof vtctldata.RemoveBackupResponse + * @memberof vtctldata.SetShardIsPrimaryServingResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - RemoveBackupResponse.verify = function verify(message) { + SetShardIsPrimaryServingResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.shard != null && message.hasOwnProperty("shard")) { + let error = $root.topodata.Shard.verify(message.shard); + if (error) + return "shard." + error; + } return null; }; /** - * Creates a RemoveBackupResponse message from a plain object. Also converts values to their respective internal types. + * Creates a SetShardIsPrimaryServingResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.RemoveBackupResponse + * @memberof vtctldata.SetShardIsPrimaryServingResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.RemoveBackupResponse} RemoveBackupResponse + * @returns {vtctldata.SetShardIsPrimaryServingResponse} SetShardIsPrimaryServingResponse */ - RemoveBackupResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.RemoveBackupResponse) + SetShardIsPrimaryServingResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.SetShardIsPrimaryServingResponse) return object; - return new $root.vtctldata.RemoveBackupResponse(); + let message = new $root.vtctldata.SetShardIsPrimaryServingResponse(); + if (object.shard != null) { + if (typeof object.shard !== "object") + throw TypeError(".vtctldata.SetShardIsPrimaryServingResponse.shard: object expected"); + message.shard = $root.topodata.Shard.fromObject(object.shard); + } + return message; }; /** - * Creates a plain object from a RemoveBackupResponse message. Also converts values to other types if specified. + * Creates a plain object from a SetShardIsPrimaryServingResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.RemoveBackupResponse + * @memberof vtctldata.SetShardIsPrimaryServingResponse * @static - * @param {vtctldata.RemoveBackupResponse} message RemoveBackupResponse + * @param {vtctldata.SetShardIsPrimaryServingResponse} message SetShardIsPrimaryServingResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - RemoveBackupResponse.toObject = function toObject() { - return {}; + SetShardIsPrimaryServingResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) + object.shard = null; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = $root.topodata.Shard.toObject(message.shard, options); + return object; }; /** - * Converts this RemoveBackupResponse to JSON. + * Converts this SetShardIsPrimaryServingResponse to JSON. * @function toJSON - * @memberof vtctldata.RemoveBackupResponse + * @memberof vtctldata.SetShardIsPrimaryServingResponse * @instance * @returns {Object.} JSON object */ - RemoveBackupResponse.prototype.toJSON = function toJSON() { + SetShardIsPrimaryServingResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for RemoveBackupResponse + * Gets the default type url for SetShardIsPrimaryServingResponse * @function getTypeUrl - * @memberof vtctldata.RemoveBackupResponse + * @memberof vtctldata.SetShardIsPrimaryServingResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - RemoveBackupResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + SetShardIsPrimaryServingResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.RemoveBackupResponse"; + return typeUrlPrefix + "/vtctldata.SetShardIsPrimaryServingResponse"; }; - return RemoveBackupResponse; + return SetShardIsPrimaryServingResponse; })(); - vtctldata.RemoveKeyspaceCellRequest = (function() { + vtctldata.SetShardTabletControlRequest = (function() { /** - * Properties of a RemoveKeyspaceCellRequest. + * Properties of a SetShardTabletControlRequest. * @memberof vtctldata - * @interface IRemoveKeyspaceCellRequest - * @property {string|null} [keyspace] RemoveKeyspaceCellRequest keyspace - * @property {string|null} [cell] RemoveKeyspaceCellRequest cell - * @property {boolean|null} [force] RemoveKeyspaceCellRequest force - * @property {boolean|null} [recursive] RemoveKeyspaceCellRequest recursive + * @interface ISetShardTabletControlRequest + * @property {string|null} [keyspace] SetShardTabletControlRequest keyspace + * @property {string|null} [shard] SetShardTabletControlRequest shard + * @property {topodata.TabletType|null} [tablet_type] SetShardTabletControlRequest tablet_type + * @property {Array.|null} [cells] SetShardTabletControlRequest cells + * @property {Array.|null} [denied_tables] SetShardTabletControlRequest denied_tables + * @property {boolean|null} [disable_query_service] SetShardTabletControlRequest disable_query_service + * @property {boolean|null} [remove] SetShardTabletControlRequest remove */ /** - * Constructs a new RemoveKeyspaceCellRequest. + * Constructs a new SetShardTabletControlRequest. * @memberof vtctldata - * @classdesc Represents a RemoveKeyspaceCellRequest. - * @implements IRemoveKeyspaceCellRequest + * @classdesc Represents a SetShardTabletControlRequest. + * @implements ISetShardTabletControlRequest * @constructor - * @param {vtctldata.IRemoveKeyspaceCellRequest=} [properties] Properties to set + * @param {vtctldata.ISetShardTabletControlRequest=} [properties] Properties to set */ - function RemoveKeyspaceCellRequest(properties) { + function SetShardTabletControlRequest(properties) { + this.cells = []; + this.denied_tables = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -127654,100 +150245,132 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * RemoveKeyspaceCellRequest keyspace. + * SetShardTabletControlRequest keyspace. * @member {string} keyspace - * @memberof vtctldata.RemoveKeyspaceCellRequest + * @memberof vtctldata.SetShardTabletControlRequest * @instance */ - RemoveKeyspaceCellRequest.prototype.keyspace = ""; + SetShardTabletControlRequest.prototype.keyspace = ""; /** - * RemoveKeyspaceCellRequest cell. - * @member {string} cell - * @memberof vtctldata.RemoveKeyspaceCellRequest + * SetShardTabletControlRequest shard. + * @member {string} shard + * @memberof vtctldata.SetShardTabletControlRequest * @instance */ - RemoveKeyspaceCellRequest.prototype.cell = ""; + SetShardTabletControlRequest.prototype.shard = ""; /** - * RemoveKeyspaceCellRequest force. - * @member {boolean} force - * @memberof vtctldata.RemoveKeyspaceCellRequest + * SetShardTabletControlRequest tablet_type. + * @member {topodata.TabletType} tablet_type + * @memberof vtctldata.SetShardTabletControlRequest * @instance */ - RemoveKeyspaceCellRequest.prototype.force = false; + SetShardTabletControlRequest.prototype.tablet_type = 0; /** - * RemoveKeyspaceCellRequest recursive. - * @member {boolean} recursive - * @memberof vtctldata.RemoveKeyspaceCellRequest + * SetShardTabletControlRequest cells. + * @member {Array.} cells + * @memberof vtctldata.SetShardTabletControlRequest * @instance */ - RemoveKeyspaceCellRequest.prototype.recursive = false; + SetShardTabletControlRequest.prototype.cells = $util.emptyArray; /** - * Creates a new RemoveKeyspaceCellRequest instance using the specified properties. + * SetShardTabletControlRequest denied_tables. + * @member {Array.} denied_tables + * @memberof vtctldata.SetShardTabletControlRequest + * @instance + */ + SetShardTabletControlRequest.prototype.denied_tables = $util.emptyArray; + + /** + * SetShardTabletControlRequest disable_query_service. + * @member {boolean} disable_query_service + * @memberof vtctldata.SetShardTabletControlRequest + * @instance + */ + SetShardTabletControlRequest.prototype.disable_query_service = false; + + /** + * SetShardTabletControlRequest remove. + * @member {boolean} remove + * @memberof vtctldata.SetShardTabletControlRequest + * @instance + */ + SetShardTabletControlRequest.prototype.remove = false; + + /** + * Creates a new SetShardTabletControlRequest instance using the specified properties. * @function create - * @memberof vtctldata.RemoveKeyspaceCellRequest + * @memberof vtctldata.SetShardTabletControlRequest * @static - * @param {vtctldata.IRemoveKeyspaceCellRequest=} [properties] Properties to set - * @returns {vtctldata.RemoveKeyspaceCellRequest} RemoveKeyspaceCellRequest instance + * @param {vtctldata.ISetShardTabletControlRequest=} [properties] Properties to set + * @returns {vtctldata.SetShardTabletControlRequest} SetShardTabletControlRequest instance */ - RemoveKeyspaceCellRequest.create = function create(properties) { - return new RemoveKeyspaceCellRequest(properties); + SetShardTabletControlRequest.create = function create(properties) { + return new SetShardTabletControlRequest(properties); }; /** - * Encodes the specified RemoveKeyspaceCellRequest message. Does not implicitly {@link vtctldata.RemoveKeyspaceCellRequest.verify|verify} messages. + * Encodes the specified SetShardTabletControlRequest message. Does not implicitly {@link vtctldata.SetShardTabletControlRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.RemoveKeyspaceCellRequest + * @memberof vtctldata.SetShardTabletControlRequest * @static - * @param {vtctldata.IRemoveKeyspaceCellRequest} message RemoveKeyspaceCellRequest message or plain object to encode + * @param {vtctldata.ISetShardTabletControlRequest} message SetShardTabletControlRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RemoveKeyspaceCellRequest.encode = function encode(message, writer) { + SetShardTabletControlRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.cell != null && Object.hasOwnProperty.call(message, "cell")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.cell); - if (message.force != null && Object.hasOwnProperty.call(message, "force")) - writer.uint32(/* id 3, wireType 0 =*/24).bool(message.force); - if (message.recursive != null && Object.hasOwnProperty.call(message, "recursive")) - writer.uint32(/* id 4, wireType 0 =*/32).bool(message.recursive); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); + if (message.tablet_type != null && Object.hasOwnProperty.call(message, "tablet_type")) + writer.uint32(/* id 3, wireType 0 =*/24).int32(message.tablet_type); + if (message.cells != null && message.cells.length) + for (let i = 0; i < message.cells.length; ++i) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.cells[i]); + if (message.denied_tables != null && message.denied_tables.length) + for (let i = 0; i < message.denied_tables.length; ++i) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.denied_tables[i]); + if (message.disable_query_service != null && Object.hasOwnProperty.call(message, "disable_query_service")) + writer.uint32(/* id 6, wireType 0 =*/48).bool(message.disable_query_service); + if (message.remove != null && Object.hasOwnProperty.call(message, "remove")) + writer.uint32(/* id 7, wireType 0 =*/56).bool(message.remove); return writer; }; /** - * Encodes the specified RemoveKeyspaceCellRequest message, length delimited. Does not implicitly {@link vtctldata.RemoveKeyspaceCellRequest.verify|verify} messages. + * Encodes the specified SetShardTabletControlRequest message, length delimited. Does not implicitly {@link vtctldata.SetShardTabletControlRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.RemoveKeyspaceCellRequest + * @memberof vtctldata.SetShardTabletControlRequest * @static - * @param {vtctldata.IRemoveKeyspaceCellRequest} message RemoveKeyspaceCellRequest message or plain object to encode + * @param {vtctldata.ISetShardTabletControlRequest} message SetShardTabletControlRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RemoveKeyspaceCellRequest.encodeDelimited = function encodeDelimited(message, writer) { + SetShardTabletControlRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a RemoveKeyspaceCellRequest message from the specified reader or buffer. + * Decodes a SetShardTabletControlRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.RemoveKeyspaceCellRequest + * @memberof vtctldata.SetShardTabletControlRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.RemoveKeyspaceCellRequest} RemoveKeyspaceCellRequest + * @returns {vtctldata.SetShardTabletControlRequest} SetShardTabletControlRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RemoveKeyspaceCellRequest.decode = function decode(reader, length) { + SetShardTabletControlRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RemoveKeyspaceCellRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SetShardTabletControlRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -127756,15 +150379,31 @@ export const vtctldata = $root.vtctldata = (() => { break; } case 2: { - message.cell = reader.string(); + message.shard = reader.string(); break; } case 3: { - message.force = reader.bool(); + message.tablet_type = reader.int32(); break; } case 4: { - message.recursive = reader.bool(); + if (!(message.cells && message.cells.length)) + message.cells = []; + message.cells.push(reader.string()); + break; + } + case 5: { + if (!(message.denied_tables && message.denied_tables.length)) + message.denied_tables = []; + message.denied_tables.push(reader.string()); + break; + } + case 6: { + message.disable_query_service = reader.bool(); + break; + } + case 7: { + message.remove = reader.bool(); break; } default: @@ -127776,146 +150415,261 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a RemoveKeyspaceCellRequest message from the specified reader or buffer, length delimited. + * Decodes a SetShardTabletControlRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.RemoveKeyspaceCellRequest + * @memberof vtctldata.SetShardTabletControlRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.RemoveKeyspaceCellRequest} RemoveKeyspaceCellRequest + * @returns {vtctldata.SetShardTabletControlRequest} SetShardTabletControlRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RemoveKeyspaceCellRequest.decodeDelimited = function decodeDelimited(reader) { + SetShardTabletControlRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a RemoveKeyspaceCellRequest message. + * Verifies a SetShardTabletControlRequest message. * @function verify - * @memberof vtctldata.RemoveKeyspaceCellRequest + * @memberof vtctldata.SetShardTabletControlRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - RemoveKeyspaceCellRequest.verify = function verify(message) { + SetShardTabletControlRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.keyspace != null && message.hasOwnProperty("keyspace")) if (!$util.isString(message.keyspace)) return "keyspace: string expected"; - if (message.cell != null && message.hasOwnProperty("cell")) - if (!$util.isString(message.cell)) - return "cell: string expected"; - if (message.force != null && message.hasOwnProperty("force")) - if (typeof message.force !== "boolean") - return "force: boolean expected"; - if (message.recursive != null && message.hasOwnProperty("recursive")) - if (typeof message.recursive !== "boolean") - return "recursive: boolean expected"; + if (message.shard != null && message.hasOwnProperty("shard")) + if (!$util.isString(message.shard)) + return "shard: string expected"; + if (message.tablet_type != null && message.hasOwnProperty("tablet_type")) + switch (message.tablet_type) { + default: + return "tablet_type: enum value expected"; + case 0: + case 1: + case 1: + case 2: + case 3: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + break; + } + if (message.cells != null && message.hasOwnProperty("cells")) { + if (!Array.isArray(message.cells)) + return "cells: array expected"; + for (let i = 0; i < message.cells.length; ++i) + if (!$util.isString(message.cells[i])) + return "cells: string[] expected"; + } + if (message.denied_tables != null && message.hasOwnProperty("denied_tables")) { + if (!Array.isArray(message.denied_tables)) + return "denied_tables: array expected"; + for (let i = 0; i < message.denied_tables.length; ++i) + if (!$util.isString(message.denied_tables[i])) + return "denied_tables: string[] expected"; + } + if (message.disable_query_service != null && message.hasOwnProperty("disable_query_service")) + if (typeof message.disable_query_service !== "boolean") + return "disable_query_service: boolean expected"; + if (message.remove != null && message.hasOwnProperty("remove")) + if (typeof message.remove !== "boolean") + return "remove: boolean expected"; return null; }; /** - * Creates a RemoveKeyspaceCellRequest message from a plain object. Also converts values to their respective internal types. + * Creates a SetShardTabletControlRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.RemoveKeyspaceCellRequest + * @memberof vtctldata.SetShardTabletControlRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.RemoveKeyspaceCellRequest} RemoveKeyspaceCellRequest + * @returns {vtctldata.SetShardTabletControlRequest} SetShardTabletControlRequest */ - RemoveKeyspaceCellRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.RemoveKeyspaceCellRequest) + SetShardTabletControlRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.SetShardTabletControlRequest) return object; - let message = new $root.vtctldata.RemoveKeyspaceCellRequest(); + let message = new $root.vtctldata.SetShardTabletControlRequest(); if (object.keyspace != null) message.keyspace = String(object.keyspace); - if (object.cell != null) - message.cell = String(object.cell); - if (object.force != null) - message.force = Boolean(object.force); - if (object.recursive != null) - message.recursive = Boolean(object.recursive); + if (object.shard != null) + message.shard = String(object.shard); + switch (object.tablet_type) { + default: + if (typeof object.tablet_type === "number") { + message.tablet_type = object.tablet_type; + break; + } + break; + case "UNKNOWN": + case 0: + message.tablet_type = 0; + break; + case "PRIMARY": + case 1: + message.tablet_type = 1; + break; + case "MASTER": + case 1: + message.tablet_type = 1; + break; + case "REPLICA": + case 2: + message.tablet_type = 2; + break; + case "RDONLY": + case 3: + message.tablet_type = 3; + break; + case "BATCH": + case 3: + message.tablet_type = 3; + break; + case "SPARE": + case 4: + message.tablet_type = 4; + break; + case "EXPERIMENTAL": + case 5: + message.tablet_type = 5; + break; + case "BACKUP": + case 6: + message.tablet_type = 6; + break; + case "RESTORE": + case 7: + message.tablet_type = 7; + break; + case "DRAINED": + case 8: + message.tablet_type = 8; + break; + } + if (object.cells) { + if (!Array.isArray(object.cells)) + throw TypeError(".vtctldata.SetShardTabletControlRequest.cells: array expected"); + message.cells = []; + for (let i = 0; i < object.cells.length; ++i) + message.cells[i] = String(object.cells[i]); + } + if (object.denied_tables) { + if (!Array.isArray(object.denied_tables)) + throw TypeError(".vtctldata.SetShardTabletControlRequest.denied_tables: array expected"); + message.denied_tables = []; + for (let i = 0; i < object.denied_tables.length; ++i) + message.denied_tables[i] = String(object.denied_tables[i]); + } + if (object.disable_query_service != null) + message.disable_query_service = Boolean(object.disable_query_service); + if (object.remove != null) + message.remove = Boolean(object.remove); return message; }; /** - * Creates a plain object from a RemoveKeyspaceCellRequest message. Also converts values to other types if specified. + * Creates a plain object from a SetShardTabletControlRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.RemoveKeyspaceCellRequest + * @memberof vtctldata.SetShardTabletControlRequest * @static - * @param {vtctldata.RemoveKeyspaceCellRequest} message RemoveKeyspaceCellRequest + * @param {vtctldata.SetShardTabletControlRequest} message SetShardTabletControlRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - RemoveKeyspaceCellRequest.toObject = function toObject(message, options) { + SetShardTabletControlRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; + if (options.arrays || options.defaults) { + object.cells = []; + object.denied_tables = []; + } if (options.defaults) { object.keyspace = ""; - object.cell = ""; - object.force = false; - object.recursive = false; + object.shard = ""; + object.tablet_type = options.enums === String ? "UNKNOWN" : 0; + object.disable_query_service = false; + object.remove = false; } if (message.keyspace != null && message.hasOwnProperty("keyspace")) object.keyspace = message.keyspace; - if (message.cell != null && message.hasOwnProperty("cell")) - object.cell = message.cell; - if (message.force != null && message.hasOwnProperty("force")) - object.force = message.force; - if (message.recursive != null && message.hasOwnProperty("recursive")) - object.recursive = message.recursive; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = message.shard; + if (message.tablet_type != null && message.hasOwnProperty("tablet_type")) + object.tablet_type = options.enums === String ? $root.topodata.TabletType[message.tablet_type] === undefined ? message.tablet_type : $root.topodata.TabletType[message.tablet_type] : message.tablet_type; + if (message.cells && message.cells.length) { + object.cells = []; + for (let j = 0; j < message.cells.length; ++j) + object.cells[j] = message.cells[j]; + } + if (message.denied_tables && message.denied_tables.length) { + object.denied_tables = []; + for (let j = 0; j < message.denied_tables.length; ++j) + object.denied_tables[j] = message.denied_tables[j]; + } + if (message.disable_query_service != null && message.hasOwnProperty("disable_query_service")) + object.disable_query_service = message.disable_query_service; + if (message.remove != null && message.hasOwnProperty("remove")) + object.remove = message.remove; return object; }; /** - * Converts this RemoveKeyspaceCellRequest to JSON. + * Converts this SetShardTabletControlRequest to JSON. * @function toJSON - * @memberof vtctldata.RemoveKeyspaceCellRequest + * @memberof vtctldata.SetShardTabletControlRequest * @instance * @returns {Object.} JSON object */ - RemoveKeyspaceCellRequest.prototype.toJSON = function toJSON() { + SetShardTabletControlRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for RemoveKeyspaceCellRequest + * Gets the default type url for SetShardTabletControlRequest * @function getTypeUrl - * @memberof vtctldata.RemoveKeyspaceCellRequest + * @memberof vtctldata.SetShardTabletControlRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - RemoveKeyspaceCellRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + SetShardTabletControlRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.RemoveKeyspaceCellRequest"; + return typeUrlPrefix + "/vtctldata.SetShardTabletControlRequest"; }; - return RemoveKeyspaceCellRequest; + return SetShardTabletControlRequest; })(); - vtctldata.RemoveKeyspaceCellResponse = (function() { + vtctldata.SetShardTabletControlResponse = (function() { /** - * Properties of a RemoveKeyspaceCellResponse. + * Properties of a SetShardTabletControlResponse. * @memberof vtctldata - * @interface IRemoveKeyspaceCellResponse + * @interface ISetShardTabletControlResponse + * @property {topodata.IShard|null} [shard] SetShardTabletControlResponse shard */ /** - * Constructs a new RemoveKeyspaceCellResponse. + * Constructs a new SetShardTabletControlResponse. * @memberof vtctldata - * @classdesc Represents a RemoveKeyspaceCellResponse. - * @implements IRemoveKeyspaceCellResponse + * @classdesc Represents a SetShardTabletControlResponse. + * @implements ISetShardTabletControlResponse * @constructor - * @param {vtctldata.IRemoveKeyspaceCellResponse=} [properties] Properties to set + * @param {vtctldata.ISetShardTabletControlResponse=} [properties] Properties to set */ - function RemoveKeyspaceCellResponse(properties) { + function SetShardTabletControlResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -127923,63 +150677,77 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * Creates a new RemoveKeyspaceCellResponse instance using the specified properties. + * SetShardTabletControlResponse shard. + * @member {topodata.IShard|null|undefined} shard + * @memberof vtctldata.SetShardTabletControlResponse + * @instance + */ + SetShardTabletControlResponse.prototype.shard = null; + + /** + * Creates a new SetShardTabletControlResponse instance using the specified properties. * @function create - * @memberof vtctldata.RemoveKeyspaceCellResponse + * @memberof vtctldata.SetShardTabletControlResponse * @static - * @param {vtctldata.IRemoveKeyspaceCellResponse=} [properties] Properties to set - * @returns {vtctldata.RemoveKeyspaceCellResponse} RemoveKeyspaceCellResponse instance + * @param {vtctldata.ISetShardTabletControlResponse=} [properties] Properties to set + * @returns {vtctldata.SetShardTabletControlResponse} SetShardTabletControlResponse instance */ - RemoveKeyspaceCellResponse.create = function create(properties) { - return new RemoveKeyspaceCellResponse(properties); + SetShardTabletControlResponse.create = function create(properties) { + return new SetShardTabletControlResponse(properties); }; /** - * Encodes the specified RemoveKeyspaceCellResponse message. Does not implicitly {@link vtctldata.RemoveKeyspaceCellResponse.verify|verify} messages. + * Encodes the specified SetShardTabletControlResponse message. Does not implicitly {@link vtctldata.SetShardTabletControlResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.RemoveKeyspaceCellResponse + * @memberof vtctldata.SetShardTabletControlResponse * @static - * @param {vtctldata.IRemoveKeyspaceCellResponse} message RemoveKeyspaceCellResponse message or plain object to encode + * @param {vtctldata.ISetShardTabletControlResponse} message SetShardTabletControlResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RemoveKeyspaceCellResponse.encode = function encode(message, writer) { + SetShardTabletControlResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + $root.topodata.Shard.encode(message.shard, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified RemoveKeyspaceCellResponse message, length delimited. Does not implicitly {@link vtctldata.RemoveKeyspaceCellResponse.verify|verify} messages. + * Encodes the specified SetShardTabletControlResponse message, length delimited. Does not implicitly {@link vtctldata.SetShardTabletControlResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.RemoveKeyspaceCellResponse + * @memberof vtctldata.SetShardTabletControlResponse * @static - * @param {vtctldata.IRemoveKeyspaceCellResponse} message RemoveKeyspaceCellResponse message or plain object to encode + * @param {vtctldata.ISetShardTabletControlResponse} message SetShardTabletControlResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RemoveKeyspaceCellResponse.encodeDelimited = function encodeDelimited(message, writer) { + SetShardTabletControlResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a RemoveKeyspaceCellResponse message from the specified reader or buffer. + * Decodes a SetShardTabletControlResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.RemoveKeyspaceCellResponse + * @memberof vtctldata.SetShardTabletControlResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.RemoveKeyspaceCellResponse} RemoveKeyspaceCellResponse + * @returns {vtctldata.SetShardTabletControlResponse} SetShardTabletControlResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RemoveKeyspaceCellResponse.decode = function decode(reader, length) { + SetShardTabletControlResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RemoveKeyspaceCellResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SetShardTabletControlResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + message.shard = $root.topodata.Shard.decode(reader, reader.uint32()); + break; + } default: reader.skipType(tag & 7); break; @@ -127989,113 +150757,128 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a RemoveKeyspaceCellResponse message from the specified reader or buffer, length delimited. + * Decodes a SetShardTabletControlResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.RemoveKeyspaceCellResponse + * @memberof vtctldata.SetShardTabletControlResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.RemoveKeyspaceCellResponse} RemoveKeyspaceCellResponse + * @returns {vtctldata.SetShardTabletControlResponse} SetShardTabletControlResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RemoveKeyspaceCellResponse.decodeDelimited = function decodeDelimited(reader) { + SetShardTabletControlResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a RemoveKeyspaceCellResponse message. + * Verifies a SetShardTabletControlResponse message. * @function verify - * @memberof vtctldata.RemoveKeyspaceCellResponse + * @memberof vtctldata.SetShardTabletControlResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - RemoveKeyspaceCellResponse.verify = function verify(message) { + SetShardTabletControlResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.shard != null && message.hasOwnProperty("shard")) { + let error = $root.topodata.Shard.verify(message.shard); + if (error) + return "shard." + error; + } return null; }; /** - * Creates a RemoveKeyspaceCellResponse message from a plain object. Also converts values to their respective internal types. + * Creates a SetShardTabletControlResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.RemoveKeyspaceCellResponse + * @memberof vtctldata.SetShardTabletControlResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.RemoveKeyspaceCellResponse} RemoveKeyspaceCellResponse + * @returns {vtctldata.SetShardTabletControlResponse} SetShardTabletControlResponse */ - RemoveKeyspaceCellResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.RemoveKeyspaceCellResponse) + SetShardTabletControlResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.SetShardTabletControlResponse) return object; - return new $root.vtctldata.RemoveKeyspaceCellResponse(); + let message = new $root.vtctldata.SetShardTabletControlResponse(); + if (object.shard != null) { + if (typeof object.shard !== "object") + throw TypeError(".vtctldata.SetShardTabletControlResponse.shard: object expected"); + message.shard = $root.topodata.Shard.fromObject(object.shard); + } + return message; }; /** - * Creates a plain object from a RemoveKeyspaceCellResponse message. Also converts values to other types if specified. + * Creates a plain object from a SetShardTabletControlResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.RemoveKeyspaceCellResponse + * @memberof vtctldata.SetShardTabletControlResponse * @static - * @param {vtctldata.RemoveKeyspaceCellResponse} message RemoveKeyspaceCellResponse + * @param {vtctldata.SetShardTabletControlResponse} message SetShardTabletControlResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - RemoveKeyspaceCellResponse.toObject = function toObject() { - return {}; + SetShardTabletControlResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) + object.shard = null; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = $root.topodata.Shard.toObject(message.shard, options); + return object; }; /** - * Converts this RemoveKeyspaceCellResponse to JSON. + * Converts this SetShardTabletControlResponse to JSON. * @function toJSON - * @memberof vtctldata.RemoveKeyspaceCellResponse + * @memberof vtctldata.SetShardTabletControlResponse * @instance * @returns {Object.} JSON object */ - RemoveKeyspaceCellResponse.prototype.toJSON = function toJSON() { + SetShardTabletControlResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for RemoveKeyspaceCellResponse + * Gets the default type url for SetShardTabletControlResponse * @function getTypeUrl - * @memberof vtctldata.RemoveKeyspaceCellResponse + * @memberof vtctldata.SetShardTabletControlResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - RemoveKeyspaceCellResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + SetShardTabletControlResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.RemoveKeyspaceCellResponse"; + return typeUrlPrefix + "/vtctldata.SetShardTabletControlResponse"; }; - return RemoveKeyspaceCellResponse; + return SetShardTabletControlResponse; })(); - vtctldata.RemoveShardCellRequest = (function() { + vtctldata.SetWritableRequest = (function() { /** - * Properties of a RemoveShardCellRequest. + * Properties of a SetWritableRequest. * @memberof vtctldata - * @interface IRemoveShardCellRequest - * @property {string|null} [keyspace] RemoveShardCellRequest keyspace - * @property {string|null} [shard_name] RemoveShardCellRequest shard_name - * @property {string|null} [cell] RemoveShardCellRequest cell - * @property {boolean|null} [force] RemoveShardCellRequest force - * @property {boolean|null} [recursive] RemoveShardCellRequest recursive + * @interface ISetWritableRequest + * @property {topodata.ITabletAlias|null} [tablet_alias] SetWritableRequest tablet_alias + * @property {boolean|null} [writable] SetWritableRequest writable */ /** - * Constructs a new RemoveShardCellRequest. + * Constructs a new SetWritableRequest. * @memberof vtctldata - * @classdesc Represents a RemoveShardCellRequest. - * @implements IRemoveShardCellRequest + * @classdesc Represents a SetWritableRequest. + * @implements ISetWritableRequest * @constructor - * @param {vtctldata.IRemoveShardCellRequest=} [properties] Properties to set + * @param {vtctldata.ISetWritableRequest=} [properties] Properties to set */ - function RemoveShardCellRequest(properties) { + function SetWritableRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -128103,131 +150886,89 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * RemoveShardCellRequest keyspace. - * @member {string} keyspace - * @memberof vtctldata.RemoveShardCellRequest - * @instance - */ - RemoveShardCellRequest.prototype.keyspace = ""; - - /** - * RemoveShardCellRequest shard_name. - * @member {string} shard_name - * @memberof vtctldata.RemoveShardCellRequest - * @instance - */ - RemoveShardCellRequest.prototype.shard_name = ""; - - /** - * RemoveShardCellRequest cell. - * @member {string} cell - * @memberof vtctldata.RemoveShardCellRequest - * @instance - */ - RemoveShardCellRequest.prototype.cell = ""; - - /** - * RemoveShardCellRequest force. - * @member {boolean} force - * @memberof vtctldata.RemoveShardCellRequest + * SetWritableRequest tablet_alias. + * @member {topodata.ITabletAlias|null|undefined} tablet_alias + * @memberof vtctldata.SetWritableRequest * @instance */ - RemoveShardCellRequest.prototype.force = false; + SetWritableRequest.prototype.tablet_alias = null; /** - * RemoveShardCellRequest recursive. - * @member {boolean} recursive - * @memberof vtctldata.RemoveShardCellRequest + * SetWritableRequest writable. + * @member {boolean} writable + * @memberof vtctldata.SetWritableRequest * @instance */ - RemoveShardCellRequest.prototype.recursive = false; + SetWritableRequest.prototype.writable = false; /** - * Creates a new RemoveShardCellRequest instance using the specified properties. + * Creates a new SetWritableRequest instance using the specified properties. * @function create - * @memberof vtctldata.RemoveShardCellRequest + * @memberof vtctldata.SetWritableRequest * @static - * @param {vtctldata.IRemoveShardCellRequest=} [properties] Properties to set - * @returns {vtctldata.RemoveShardCellRequest} RemoveShardCellRequest instance + * @param {vtctldata.ISetWritableRequest=} [properties] Properties to set + * @returns {vtctldata.SetWritableRequest} SetWritableRequest instance */ - RemoveShardCellRequest.create = function create(properties) { - return new RemoveShardCellRequest(properties); + SetWritableRequest.create = function create(properties) { + return new SetWritableRequest(properties); }; /** - * Encodes the specified RemoveShardCellRequest message. Does not implicitly {@link vtctldata.RemoveShardCellRequest.verify|verify} messages. + * Encodes the specified SetWritableRequest message. Does not implicitly {@link vtctldata.SetWritableRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.RemoveShardCellRequest + * @memberof vtctldata.SetWritableRequest * @static - * @param {vtctldata.IRemoveShardCellRequest} message RemoveShardCellRequest message or plain object to encode + * @param {vtctldata.ISetWritableRequest} message SetWritableRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RemoveShardCellRequest.encode = function encode(message, writer) { + SetWritableRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.shard_name != null && Object.hasOwnProperty.call(message, "shard_name")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard_name); - if (message.cell != null && Object.hasOwnProperty.call(message, "cell")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.cell); - if (message.force != null && Object.hasOwnProperty.call(message, "force")) - writer.uint32(/* id 4, wireType 0 =*/32).bool(message.force); - if (message.recursive != null && Object.hasOwnProperty.call(message, "recursive")) - writer.uint32(/* id 5, wireType 0 =*/40).bool(message.recursive); + if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) + $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.writable != null && Object.hasOwnProperty.call(message, "writable")) + writer.uint32(/* id 2, wireType 0 =*/16).bool(message.writable); return writer; }; /** - * Encodes the specified RemoveShardCellRequest message, length delimited. Does not implicitly {@link vtctldata.RemoveShardCellRequest.verify|verify} messages. + * Encodes the specified SetWritableRequest message, length delimited. Does not implicitly {@link vtctldata.SetWritableRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.RemoveShardCellRequest + * @memberof vtctldata.SetWritableRequest * @static - * @param {vtctldata.IRemoveShardCellRequest} message RemoveShardCellRequest message or plain object to encode + * @param {vtctldata.ISetWritableRequest} message SetWritableRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RemoveShardCellRequest.encodeDelimited = function encodeDelimited(message, writer) { + SetWritableRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a RemoveShardCellRequest message from the specified reader or buffer. + * Decodes a SetWritableRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.RemoveShardCellRequest + * @memberof vtctldata.SetWritableRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.RemoveShardCellRequest} RemoveShardCellRequest + * @returns {vtctldata.SetWritableRequest} SetWritableRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RemoveShardCellRequest.decode = function decode(reader, length) { + SetWritableRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RemoveShardCellRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SetWritableRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.keyspace = reader.string(); + message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); break; } case 2: { - message.shard_name = reader.string(); - break; - } - case 3: { - message.cell = reader.string(); - break; - } - case 4: { - message.force = reader.bool(); - break; - } - case 5: { - message.recursive = reader.bool(); + message.writable = reader.bool(); break; } default: @@ -128239,154 +150980,135 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a RemoveShardCellRequest message from the specified reader or buffer, length delimited. + * Decodes a SetWritableRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.RemoveShardCellRequest + * @memberof vtctldata.SetWritableRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.RemoveShardCellRequest} RemoveShardCellRequest + * @returns {vtctldata.SetWritableRequest} SetWritableRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RemoveShardCellRequest.decodeDelimited = function decodeDelimited(reader) { + SetWritableRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a RemoveShardCellRequest message. + * Verifies a SetWritableRequest message. * @function verify - * @memberof vtctldata.RemoveShardCellRequest + * @memberof vtctldata.SetWritableRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - RemoveShardCellRequest.verify = function verify(message) { + SetWritableRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - if (message.shard_name != null && message.hasOwnProperty("shard_name")) - if (!$util.isString(message.shard_name)) - return "shard_name: string expected"; - if (message.cell != null && message.hasOwnProperty("cell")) - if (!$util.isString(message.cell)) - return "cell: string expected"; - if (message.force != null && message.hasOwnProperty("force")) - if (typeof message.force !== "boolean") - return "force: boolean expected"; - if (message.recursive != null && message.hasOwnProperty("recursive")) - if (typeof message.recursive !== "boolean") - return "recursive: boolean expected"; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { + let error = $root.topodata.TabletAlias.verify(message.tablet_alias); + if (error) + return "tablet_alias." + error; + } + if (message.writable != null && message.hasOwnProperty("writable")) + if (typeof message.writable !== "boolean") + return "writable: boolean expected"; return null; }; /** - * Creates a RemoveShardCellRequest message from a plain object. Also converts values to their respective internal types. + * Creates a SetWritableRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.RemoveShardCellRequest + * @memberof vtctldata.SetWritableRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.RemoveShardCellRequest} RemoveShardCellRequest + * @returns {vtctldata.SetWritableRequest} SetWritableRequest */ - RemoveShardCellRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.RemoveShardCellRequest) + SetWritableRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.SetWritableRequest) return object; - let message = new $root.vtctldata.RemoveShardCellRequest(); - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - if (object.shard_name != null) - message.shard_name = String(object.shard_name); - if (object.cell != null) - message.cell = String(object.cell); - if (object.force != null) - message.force = Boolean(object.force); - if (object.recursive != null) - message.recursive = Boolean(object.recursive); + let message = new $root.vtctldata.SetWritableRequest(); + if (object.tablet_alias != null) { + if (typeof object.tablet_alias !== "object") + throw TypeError(".vtctldata.SetWritableRequest.tablet_alias: object expected"); + message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); + } + if (object.writable != null) + message.writable = Boolean(object.writable); return message; }; /** - * Creates a plain object from a RemoveShardCellRequest message. Also converts values to other types if specified. + * Creates a plain object from a SetWritableRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.RemoveShardCellRequest + * @memberof vtctldata.SetWritableRequest * @static - * @param {vtctldata.RemoveShardCellRequest} message RemoveShardCellRequest + * @param {vtctldata.SetWritableRequest} message SetWritableRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - RemoveShardCellRequest.toObject = function toObject(message, options) { + SetWritableRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) { - object.keyspace = ""; - object.shard_name = ""; - object.cell = ""; - object.force = false; - object.recursive = false; + object.tablet_alias = null; + object.writable = false; } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.shard_name != null && message.hasOwnProperty("shard_name")) - object.shard_name = message.shard_name; - if (message.cell != null && message.hasOwnProperty("cell")) - object.cell = message.cell; - if (message.force != null && message.hasOwnProperty("force")) - object.force = message.force; - if (message.recursive != null && message.hasOwnProperty("recursive")) - object.recursive = message.recursive; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) + object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); + if (message.writable != null && message.hasOwnProperty("writable")) + object.writable = message.writable; return object; }; /** - * Converts this RemoveShardCellRequest to JSON. + * Converts this SetWritableRequest to JSON. * @function toJSON - * @memberof vtctldata.RemoveShardCellRequest + * @memberof vtctldata.SetWritableRequest * @instance * @returns {Object.} JSON object */ - RemoveShardCellRequest.prototype.toJSON = function toJSON() { + SetWritableRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for RemoveShardCellRequest + * Gets the default type url for SetWritableRequest * @function getTypeUrl - * @memberof vtctldata.RemoveShardCellRequest + * @memberof vtctldata.SetWritableRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - RemoveShardCellRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + SetWritableRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.RemoveShardCellRequest"; + return typeUrlPrefix + "/vtctldata.SetWritableRequest"; }; - return RemoveShardCellRequest; + return SetWritableRequest; })(); - vtctldata.RemoveShardCellResponse = (function() { + vtctldata.SetWritableResponse = (function() { /** - * Properties of a RemoveShardCellResponse. + * Properties of a SetWritableResponse. * @memberof vtctldata - * @interface IRemoveShardCellResponse + * @interface ISetWritableResponse */ /** - * Constructs a new RemoveShardCellResponse. + * Constructs a new SetWritableResponse. * @memberof vtctldata - * @classdesc Represents a RemoveShardCellResponse. - * @implements IRemoveShardCellResponse + * @classdesc Represents a SetWritableResponse. + * @implements ISetWritableResponse * @constructor - * @param {vtctldata.IRemoveShardCellResponse=} [properties] Properties to set + * @param {vtctldata.ISetWritableResponse=} [properties] Properties to set */ - function RemoveShardCellResponse(properties) { + function SetWritableResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -128394,60 +151116,60 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * Creates a new RemoveShardCellResponse instance using the specified properties. + * Creates a new SetWritableResponse instance using the specified properties. * @function create - * @memberof vtctldata.RemoveShardCellResponse + * @memberof vtctldata.SetWritableResponse * @static - * @param {vtctldata.IRemoveShardCellResponse=} [properties] Properties to set - * @returns {vtctldata.RemoveShardCellResponse} RemoveShardCellResponse instance + * @param {vtctldata.ISetWritableResponse=} [properties] Properties to set + * @returns {vtctldata.SetWritableResponse} SetWritableResponse instance */ - RemoveShardCellResponse.create = function create(properties) { - return new RemoveShardCellResponse(properties); + SetWritableResponse.create = function create(properties) { + return new SetWritableResponse(properties); }; /** - * Encodes the specified RemoveShardCellResponse message. Does not implicitly {@link vtctldata.RemoveShardCellResponse.verify|verify} messages. + * Encodes the specified SetWritableResponse message. Does not implicitly {@link vtctldata.SetWritableResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.RemoveShardCellResponse + * @memberof vtctldata.SetWritableResponse * @static - * @param {vtctldata.IRemoveShardCellResponse} message RemoveShardCellResponse message or plain object to encode + * @param {vtctldata.ISetWritableResponse} message SetWritableResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RemoveShardCellResponse.encode = function encode(message, writer) { + SetWritableResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); return writer; }; /** - * Encodes the specified RemoveShardCellResponse message, length delimited. Does not implicitly {@link vtctldata.RemoveShardCellResponse.verify|verify} messages. + * Encodes the specified SetWritableResponse message, length delimited. Does not implicitly {@link vtctldata.SetWritableResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.RemoveShardCellResponse + * @memberof vtctldata.SetWritableResponse * @static - * @param {vtctldata.IRemoveShardCellResponse} message RemoveShardCellResponse message or plain object to encode + * @param {vtctldata.ISetWritableResponse} message SetWritableResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RemoveShardCellResponse.encodeDelimited = function encodeDelimited(message, writer) { + SetWritableResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a RemoveShardCellResponse message from the specified reader or buffer. + * Decodes a SetWritableResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.RemoveShardCellResponse + * @memberof vtctldata.SetWritableResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.RemoveShardCellResponse} RemoveShardCellResponse + * @returns {vtctldata.SetWritableResponse} SetWritableResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RemoveShardCellResponse.decode = function decode(reader, length) { + SetWritableResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RemoveShardCellResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SetWritableResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -128460,109 +151182,111 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a RemoveShardCellResponse message from the specified reader or buffer, length delimited. + * Decodes a SetWritableResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.RemoveShardCellResponse + * @memberof vtctldata.SetWritableResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.RemoveShardCellResponse} RemoveShardCellResponse + * @returns {vtctldata.SetWritableResponse} SetWritableResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RemoveShardCellResponse.decodeDelimited = function decodeDelimited(reader) { + SetWritableResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a RemoveShardCellResponse message. + * Verifies a SetWritableResponse message. * @function verify - * @memberof vtctldata.RemoveShardCellResponse + * @memberof vtctldata.SetWritableResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - RemoveShardCellResponse.verify = function verify(message) { + SetWritableResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; return null; }; /** - * Creates a RemoveShardCellResponse message from a plain object. Also converts values to their respective internal types. + * Creates a SetWritableResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.RemoveShardCellResponse + * @memberof vtctldata.SetWritableResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.RemoveShardCellResponse} RemoveShardCellResponse + * @returns {vtctldata.SetWritableResponse} SetWritableResponse */ - RemoveShardCellResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.RemoveShardCellResponse) + SetWritableResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.SetWritableResponse) return object; - return new $root.vtctldata.RemoveShardCellResponse(); + return new $root.vtctldata.SetWritableResponse(); }; /** - * Creates a plain object from a RemoveShardCellResponse message. Also converts values to other types if specified. + * Creates a plain object from a SetWritableResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.RemoveShardCellResponse + * @memberof vtctldata.SetWritableResponse * @static - * @param {vtctldata.RemoveShardCellResponse} message RemoveShardCellResponse + * @param {vtctldata.SetWritableResponse} message SetWritableResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - RemoveShardCellResponse.toObject = function toObject() { + SetWritableResponse.toObject = function toObject() { return {}; }; /** - * Converts this RemoveShardCellResponse to JSON. + * Converts this SetWritableResponse to JSON. * @function toJSON - * @memberof vtctldata.RemoveShardCellResponse + * @memberof vtctldata.SetWritableResponse * @instance * @returns {Object.} JSON object */ - RemoveShardCellResponse.prototype.toJSON = function toJSON() { + SetWritableResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for RemoveShardCellResponse + * Gets the default type url for SetWritableResponse * @function getTypeUrl - * @memberof vtctldata.RemoveShardCellResponse + * @memberof vtctldata.SetWritableResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - RemoveShardCellResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + SetWritableResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.RemoveShardCellResponse"; + return typeUrlPrefix + "/vtctldata.SetWritableResponse"; }; - return RemoveShardCellResponse; + return SetWritableResponse; })(); - vtctldata.ReparentTabletRequest = (function() { + vtctldata.ShardReplicationAddRequest = (function() { /** - * Properties of a ReparentTabletRequest. + * Properties of a ShardReplicationAddRequest. * @memberof vtctldata - * @interface IReparentTabletRequest - * @property {topodata.ITabletAlias|null} [tablet] ReparentTabletRequest tablet + * @interface IShardReplicationAddRequest + * @property {string|null} [keyspace] ShardReplicationAddRequest keyspace + * @property {string|null} [shard] ShardReplicationAddRequest shard + * @property {topodata.ITabletAlias|null} [tablet_alias] ShardReplicationAddRequest tablet_alias */ /** - * Constructs a new ReparentTabletRequest. + * Constructs a new ShardReplicationAddRequest. * @memberof vtctldata - * @classdesc Represents a ReparentTabletRequest. - * @implements IReparentTabletRequest + * @classdesc Represents a ShardReplicationAddRequest. + * @implements IShardReplicationAddRequest * @constructor - * @param {vtctldata.IReparentTabletRequest=} [properties] Properties to set + * @param {vtctldata.IShardReplicationAddRequest=} [properties] Properties to set */ - function ReparentTabletRequest(properties) { + function ShardReplicationAddRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -128570,75 +151294,103 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ReparentTabletRequest tablet. - * @member {topodata.ITabletAlias|null|undefined} tablet - * @memberof vtctldata.ReparentTabletRequest + * ShardReplicationAddRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.ShardReplicationAddRequest * @instance */ - ReparentTabletRequest.prototype.tablet = null; + ShardReplicationAddRequest.prototype.keyspace = ""; /** - * Creates a new ReparentTabletRequest instance using the specified properties. + * ShardReplicationAddRequest shard. + * @member {string} shard + * @memberof vtctldata.ShardReplicationAddRequest + * @instance + */ + ShardReplicationAddRequest.prototype.shard = ""; + + /** + * ShardReplicationAddRequest tablet_alias. + * @member {topodata.ITabletAlias|null|undefined} tablet_alias + * @memberof vtctldata.ShardReplicationAddRequest + * @instance + */ + ShardReplicationAddRequest.prototype.tablet_alias = null; + + /** + * Creates a new ShardReplicationAddRequest instance using the specified properties. * @function create - * @memberof vtctldata.ReparentTabletRequest + * @memberof vtctldata.ShardReplicationAddRequest * @static - * @param {vtctldata.IReparentTabletRequest=} [properties] Properties to set - * @returns {vtctldata.ReparentTabletRequest} ReparentTabletRequest instance + * @param {vtctldata.IShardReplicationAddRequest=} [properties] Properties to set + * @returns {vtctldata.ShardReplicationAddRequest} ShardReplicationAddRequest instance */ - ReparentTabletRequest.create = function create(properties) { - return new ReparentTabletRequest(properties); + ShardReplicationAddRequest.create = function create(properties) { + return new ShardReplicationAddRequest(properties); }; /** - * Encodes the specified ReparentTabletRequest message. Does not implicitly {@link vtctldata.ReparentTabletRequest.verify|verify} messages. + * Encodes the specified ShardReplicationAddRequest message. Does not implicitly {@link vtctldata.ShardReplicationAddRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.ReparentTabletRequest + * @memberof vtctldata.ShardReplicationAddRequest * @static - * @param {vtctldata.IReparentTabletRequest} message ReparentTabletRequest message or plain object to encode + * @param {vtctldata.IShardReplicationAddRequest} message ShardReplicationAddRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReparentTabletRequest.encode = function encode(message, writer) { + ShardReplicationAddRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.tablet != null && Object.hasOwnProperty.call(message, "tablet")) - $root.topodata.TabletAlias.encode(message.tablet, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); + if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) + $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); return writer; }; /** - * Encodes the specified ReparentTabletRequest message, length delimited. Does not implicitly {@link vtctldata.ReparentTabletRequest.verify|verify} messages. + * Encodes the specified ShardReplicationAddRequest message, length delimited. Does not implicitly {@link vtctldata.ShardReplicationAddRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ReparentTabletRequest + * @memberof vtctldata.ShardReplicationAddRequest * @static - * @param {vtctldata.IReparentTabletRequest} message ReparentTabletRequest message or plain object to encode + * @param {vtctldata.IShardReplicationAddRequest} message ShardReplicationAddRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReparentTabletRequest.encodeDelimited = function encodeDelimited(message, writer) { + ShardReplicationAddRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ReparentTabletRequest message from the specified reader or buffer. + * Decodes a ShardReplicationAddRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ReparentTabletRequest + * @memberof vtctldata.ShardReplicationAddRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ReparentTabletRequest} ReparentTabletRequest + * @returns {vtctldata.ShardReplicationAddRequest} ShardReplicationAddRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReparentTabletRequest.decode = function decode(reader, length) { + ShardReplicationAddRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ReparentTabletRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ShardReplicationAddRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.tablet = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + message.keyspace = reader.string(); + break; + } + case 2: { + message.shard = reader.string(); + break; + } + case 3: { + message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); break; } default: @@ -128650,129 +151402,143 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a ReparentTabletRequest message from the specified reader or buffer, length delimited. + * Decodes a ShardReplicationAddRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ReparentTabletRequest + * @memberof vtctldata.ShardReplicationAddRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ReparentTabletRequest} ReparentTabletRequest + * @returns {vtctldata.ShardReplicationAddRequest} ShardReplicationAddRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReparentTabletRequest.decodeDelimited = function decodeDelimited(reader) { + ShardReplicationAddRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ReparentTabletRequest message. + * Verifies a ShardReplicationAddRequest message. * @function verify - * @memberof vtctldata.ReparentTabletRequest + * @memberof vtctldata.ShardReplicationAddRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ReparentTabletRequest.verify = function verify(message) { + ShardReplicationAddRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.tablet != null && message.hasOwnProperty("tablet")) { - let error = $root.topodata.TabletAlias.verify(message.tablet); + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.shard != null && message.hasOwnProperty("shard")) + if (!$util.isString(message.shard)) + return "shard: string expected"; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { + let error = $root.topodata.TabletAlias.verify(message.tablet_alias); if (error) - return "tablet." + error; + return "tablet_alias." + error; } return null; }; /** - * Creates a ReparentTabletRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ShardReplicationAddRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ReparentTabletRequest + * @memberof vtctldata.ShardReplicationAddRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.ReparentTabletRequest} ReparentTabletRequest + * @returns {vtctldata.ShardReplicationAddRequest} ShardReplicationAddRequest */ - ReparentTabletRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ReparentTabletRequest) + ShardReplicationAddRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ShardReplicationAddRequest) return object; - let message = new $root.vtctldata.ReparentTabletRequest(); - if (object.tablet != null) { - if (typeof object.tablet !== "object") - throw TypeError(".vtctldata.ReparentTabletRequest.tablet: object expected"); - message.tablet = $root.topodata.TabletAlias.fromObject(object.tablet); + let message = new $root.vtctldata.ShardReplicationAddRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.shard != null) + message.shard = String(object.shard); + if (object.tablet_alias != null) { + if (typeof object.tablet_alias !== "object") + throw TypeError(".vtctldata.ShardReplicationAddRequest.tablet_alias: object expected"); + message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); } return message; }; /** - * Creates a plain object from a ReparentTabletRequest message. Also converts values to other types if specified. + * Creates a plain object from a ShardReplicationAddRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ReparentTabletRequest + * @memberof vtctldata.ShardReplicationAddRequest * @static - * @param {vtctldata.ReparentTabletRequest} message ReparentTabletRequest + * @param {vtctldata.ShardReplicationAddRequest} message ShardReplicationAddRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ReparentTabletRequest.toObject = function toObject(message, options) { + ShardReplicationAddRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) - object.tablet = null; - if (message.tablet != null && message.hasOwnProperty("tablet")) - object.tablet = $root.topodata.TabletAlias.toObject(message.tablet, options); + if (options.defaults) { + object.keyspace = ""; + object.shard = ""; + object.tablet_alias = null; + } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = message.shard; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) + object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); return object; }; /** - * Converts this ReparentTabletRequest to JSON. + * Converts this ShardReplicationAddRequest to JSON. * @function toJSON - * @memberof vtctldata.ReparentTabletRequest + * @memberof vtctldata.ShardReplicationAddRequest * @instance * @returns {Object.} JSON object */ - ReparentTabletRequest.prototype.toJSON = function toJSON() { + ShardReplicationAddRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ReparentTabletRequest + * Gets the default type url for ShardReplicationAddRequest * @function getTypeUrl - * @memberof vtctldata.ReparentTabletRequest + * @memberof vtctldata.ShardReplicationAddRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ReparentTabletRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ShardReplicationAddRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ReparentTabletRequest"; + return typeUrlPrefix + "/vtctldata.ShardReplicationAddRequest"; }; - return ReparentTabletRequest; + return ShardReplicationAddRequest; })(); - vtctldata.ReparentTabletResponse = (function() { + vtctldata.ShardReplicationAddResponse = (function() { /** - * Properties of a ReparentTabletResponse. + * Properties of a ShardReplicationAddResponse. * @memberof vtctldata - * @interface IReparentTabletResponse - * @property {string|null} [keyspace] ReparentTabletResponse keyspace - * @property {string|null} [shard] ReparentTabletResponse shard - * @property {topodata.ITabletAlias|null} [primary] ReparentTabletResponse primary + * @interface IShardReplicationAddResponse */ /** - * Constructs a new ReparentTabletResponse. + * Constructs a new ShardReplicationAddResponse. * @memberof vtctldata - * @classdesc Represents a ReparentTabletResponse. - * @implements IReparentTabletResponse + * @classdesc Represents a ShardReplicationAddResponse. + * @implements IShardReplicationAddResponse * @constructor - * @param {vtctldata.IReparentTabletResponse=} [properties] Properties to set + * @param {vtctldata.IShardReplicationAddResponse=} [properties] Properties to set */ - function ReparentTabletResponse(properties) { + function ShardReplicationAddResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -128780,105 +151546,63 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ReparentTabletResponse keyspace. - * @member {string} keyspace - * @memberof vtctldata.ReparentTabletResponse - * @instance - */ - ReparentTabletResponse.prototype.keyspace = ""; - - /** - * ReparentTabletResponse shard. - * @member {string} shard - * @memberof vtctldata.ReparentTabletResponse - * @instance - */ - ReparentTabletResponse.prototype.shard = ""; - - /** - * ReparentTabletResponse primary. - * @member {topodata.ITabletAlias|null|undefined} primary - * @memberof vtctldata.ReparentTabletResponse - * @instance - */ - ReparentTabletResponse.prototype.primary = null; - - /** - * Creates a new ReparentTabletResponse instance using the specified properties. + * Creates a new ShardReplicationAddResponse instance using the specified properties. * @function create - * @memberof vtctldata.ReparentTabletResponse + * @memberof vtctldata.ShardReplicationAddResponse * @static - * @param {vtctldata.IReparentTabletResponse=} [properties] Properties to set - * @returns {vtctldata.ReparentTabletResponse} ReparentTabletResponse instance + * @param {vtctldata.IShardReplicationAddResponse=} [properties] Properties to set + * @returns {vtctldata.ShardReplicationAddResponse} ShardReplicationAddResponse instance */ - ReparentTabletResponse.create = function create(properties) { - return new ReparentTabletResponse(properties); + ShardReplicationAddResponse.create = function create(properties) { + return new ShardReplicationAddResponse(properties); }; /** - * Encodes the specified ReparentTabletResponse message. Does not implicitly {@link vtctldata.ReparentTabletResponse.verify|verify} messages. + * Encodes the specified ShardReplicationAddResponse message. Does not implicitly {@link vtctldata.ShardReplicationAddResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.ReparentTabletResponse + * @memberof vtctldata.ShardReplicationAddResponse * @static - * @param {vtctldata.IReparentTabletResponse} message ReparentTabletResponse message or plain object to encode + * @param {vtctldata.IShardReplicationAddResponse} message ShardReplicationAddResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReparentTabletResponse.encode = function encode(message, writer) { + ShardReplicationAddResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); - if (message.primary != null && Object.hasOwnProperty.call(message, "primary")) - $root.topodata.TabletAlias.encode(message.primary, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); return writer; }; /** - * Encodes the specified ReparentTabletResponse message, length delimited. Does not implicitly {@link vtctldata.ReparentTabletResponse.verify|verify} messages. + * Encodes the specified ShardReplicationAddResponse message, length delimited. Does not implicitly {@link vtctldata.ShardReplicationAddResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ReparentTabletResponse + * @memberof vtctldata.ShardReplicationAddResponse * @static - * @param {vtctldata.IReparentTabletResponse} message ReparentTabletResponse message or plain object to encode + * @param {vtctldata.IShardReplicationAddResponse} message ShardReplicationAddResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ReparentTabletResponse.encodeDelimited = function encodeDelimited(message, writer) { + ShardReplicationAddResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ReparentTabletResponse message from the specified reader or buffer. + * Decodes a ShardReplicationAddResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ReparentTabletResponse + * @memberof vtctldata.ShardReplicationAddResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ReparentTabletResponse} ReparentTabletResponse + * @returns {vtctldata.ShardReplicationAddResponse} ShardReplicationAddResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReparentTabletResponse.decode = function decode(reader, length) { + ShardReplicationAddResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ReparentTabletResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ShardReplicationAddResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - message.keyspace = reader.string(); - break; - } - case 2: { - message.shard = reader.string(); - break; - } - case 3: { - message.primary = $root.topodata.TabletAlias.decode(reader, reader.uint32()); - break; - } default: reader.skipType(tag & 7); break; @@ -128888,147 +151612,111 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a ReparentTabletResponse message from the specified reader or buffer, length delimited. + * Decodes a ShardReplicationAddResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ReparentTabletResponse + * @memberof vtctldata.ShardReplicationAddResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ReparentTabletResponse} ReparentTabletResponse + * @returns {vtctldata.ShardReplicationAddResponse} ShardReplicationAddResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ReparentTabletResponse.decodeDelimited = function decodeDelimited(reader) { + ShardReplicationAddResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ReparentTabletResponse message. + * Verifies a ShardReplicationAddResponse message. * @function verify - * @memberof vtctldata.ReparentTabletResponse + * @memberof vtctldata.ShardReplicationAddResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ReparentTabletResponse.verify = function verify(message) { + ShardReplicationAddResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - if (message.shard != null && message.hasOwnProperty("shard")) - if (!$util.isString(message.shard)) - return "shard: string expected"; - if (message.primary != null && message.hasOwnProperty("primary")) { - let error = $root.topodata.TabletAlias.verify(message.primary); - if (error) - return "primary." + error; - } return null; }; /** - * Creates a ReparentTabletResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ShardReplicationAddResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ReparentTabletResponse + * @memberof vtctldata.ShardReplicationAddResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.ReparentTabletResponse} ReparentTabletResponse + * @returns {vtctldata.ShardReplicationAddResponse} ShardReplicationAddResponse */ - ReparentTabletResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ReparentTabletResponse) + ShardReplicationAddResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ShardReplicationAddResponse) return object; - let message = new $root.vtctldata.ReparentTabletResponse(); - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - if (object.shard != null) - message.shard = String(object.shard); - if (object.primary != null) { - if (typeof object.primary !== "object") - throw TypeError(".vtctldata.ReparentTabletResponse.primary: object expected"); - message.primary = $root.topodata.TabletAlias.fromObject(object.primary); - } - return message; + return new $root.vtctldata.ShardReplicationAddResponse(); }; /** - * Creates a plain object from a ReparentTabletResponse message. Also converts values to other types if specified. + * Creates a plain object from a ShardReplicationAddResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ReparentTabletResponse + * @memberof vtctldata.ShardReplicationAddResponse * @static - * @param {vtctldata.ReparentTabletResponse} message ReparentTabletResponse + * @param {vtctldata.ShardReplicationAddResponse} message ShardReplicationAddResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ReparentTabletResponse.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) { - object.keyspace = ""; - object.shard = ""; - object.primary = null; - } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.shard != null && message.hasOwnProperty("shard")) - object.shard = message.shard; - if (message.primary != null && message.hasOwnProperty("primary")) - object.primary = $root.topodata.TabletAlias.toObject(message.primary, options); - return object; + ShardReplicationAddResponse.toObject = function toObject() { + return {}; }; /** - * Converts this ReparentTabletResponse to JSON. + * Converts this ShardReplicationAddResponse to JSON. * @function toJSON - * @memberof vtctldata.ReparentTabletResponse + * @memberof vtctldata.ShardReplicationAddResponse * @instance * @returns {Object.} JSON object */ - ReparentTabletResponse.prototype.toJSON = function toJSON() { + ShardReplicationAddResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ReparentTabletResponse + * Gets the default type url for ShardReplicationAddResponse * @function getTypeUrl - * @memberof vtctldata.ReparentTabletResponse + * @memberof vtctldata.ShardReplicationAddResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ReparentTabletResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ShardReplicationAddResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ReparentTabletResponse"; + return typeUrlPrefix + "/vtctldata.ShardReplicationAddResponse"; }; - return ReparentTabletResponse; + return ShardReplicationAddResponse; })(); - vtctldata.RestoreFromBackupRequest = (function() { + vtctldata.ShardReplicationFixRequest = (function() { /** - * Properties of a RestoreFromBackupRequest. + * Properties of a ShardReplicationFixRequest. * @memberof vtctldata - * @interface IRestoreFromBackupRequest - * @property {topodata.ITabletAlias|null} [tablet_alias] RestoreFromBackupRequest tablet_alias - * @property {vttime.ITime|null} [backup_time] RestoreFromBackupRequest backup_time - * @property {string|null} [restore_to_pos] RestoreFromBackupRequest restore_to_pos - * @property {boolean|null} [dry_run] RestoreFromBackupRequest dry_run + * @interface IShardReplicationFixRequest + * @property {string|null} [keyspace] ShardReplicationFixRequest keyspace + * @property {string|null} [shard] ShardReplicationFixRequest shard + * @property {string|null} [cell] ShardReplicationFixRequest cell */ /** - * Constructs a new RestoreFromBackupRequest. + * Constructs a new ShardReplicationFixRequest. * @memberof vtctldata - * @classdesc Represents a RestoreFromBackupRequest. - * @implements IRestoreFromBackupRequest + * @classdesc Represents a ShardReplicationFixRequest. + * @implements IShardReplicationFixRequest * @constructor - * @param {vtctldata.IRestoreFromBackupRequest=} [properties] Properties to set + * @param {vtctldata.IShardReplicationFixRequest=} [properties] Properties to set */ - function RestoreFromBackupRequest(properties) { + function ShardReplicationFixRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -129036,117 +151724,103 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * RestoreFromBackupRequest tablet_alias. - * @member {topodata.ITabletAlias|null|undefined} tablet_alias - * @memberof vtctldata.RestoreFromBackupRequest - * @instance - */ - RestoreFromBackupRequest.prototype.tablet_alias = null; - - /** - * RestoreFromBackupRequest backup_time. - * @member {vttime.ITime|null|undefined} backup_time - * @memberof vtctldata.RestoreFromBackupRequest + * ShardReplicationFixRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.ShardReplicationFixRequest * @instance */ - RestoreFromBackupRequest.prototype.backup_time = null; + ShardReplicationFixRequest.prototype.keyspace = ""; /** - * RestoreFromBackupRequest restore_to_pos. - * @member {string} restore_to_pos - * @memberof vtctldata.RestoreFromBackupRequest + * ShardReplicationFixRequest shard. + * @member {string} shard + * @memberof vtctldata.ShardReplicationFixRequest * @instance */ - RestoreFromBackupRequest.prototype.restore_to_pos = ""; + ShardReplicationFixRequest.prototype.shard = ""; /** - * RestoreFromBackupRequest dry_run. - * @member {boolean} dry_run - * @memberof vtctldata.RestoreFromBackupRequest + * ShardReplicationFixRequest cell. + * @member {string} cell + * @memberof vtctldata.ShardReplicationFixRequest * @instance */ - RestoreFromBackupRequest.prototype.dry_run = false; + ShardReplicationFixRequest.prototype.cell = ""; /** - * Creates a new RestoreFromBackupRequest instance using the specified properties. + * Creates a new ShardReplicationFixRequest instance using the specified properties. * @function create - * @memberof vtctldata.RestoreFromBackupRequest + * @memberof vtctldata.ShardReplicationFixRequest * @static - * @param {vtctldata.IRestoreFromBackupRequest=} [properties] Properties to set - * @returns {vtctldata.RestoreFromBackupRequest} RestoreFromBackupRequest instance + * @param {vtctldata.IShardReplicationFixRequest=} [properties] Properties to set + * @returns {vtctldata.ShardReplicationFixRequest} ShardReplicationFixRequest instance */ - RestoreFromBackupRequest.create = function create(properties) { - return new RestoreFromBackupRequest(properties); + ShardReplicationFixRequest.create = function create(properties) { + return new ShardReplicationFixRequest(properties); }; /** - * Encodes the specified RestoreFromBackupRequest message. Does not implicitly {@link vtctldata.RestoreFromBackupRequest.verify|verify} messages. + * Encodes the specified ShardReplicationFixRequest message. Does not implicitly {@link vtctldata.ShardReplicationFixRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.RestoreFromBackupRequest + * @memberof vtctldata.ShardReplicationFixRequest * @static - * @param {vtctldata.IRestoreFromBackupRequest} message RestoreFromBackupRequest message or plain object to encode + * @param {vtctldata.IShardReplicationFixRequest} message ShardReplicationFixRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RestoreFromBackupRequest.encode = function encode(message, writer) { + ShardReplicationFixRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) - $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.backup_time != null && Object.hasOwnProperty.call(message, "backup_time")) - $root.vttime.Time.encode(message.backup_time, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); - if (message.restore_to_pos != null && Object.hasOwnProperty.call(message, "restore_to_pos")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.restore_to_pos); - if (message.dry_run != null && Object.hasOwnProperty.call(message, "dry_run")) - writer.uint32(/* id 4, wireType 0 =*/32).bool(message.dry_run); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); + if (message.cell != null && Object.hasOwnProperty.call(message, "cell")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.cell); return writer; }; /** - * Encodes the specified RestoreFromBackupRequest message, length delimited. Does not implicitly {@link vtctldata.RestoreFromBackupRequest.verify|verify} messages. + * Encodes the specified ShardReplicationFixRequest message, length delimited. Does not implicitly {@link vtctldata.ShardReplicationFixRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.RestoreFromBackupRequest + * @memberof vtctldata.ShardReplicationFixRequest * @static - * @param {vtctldata.IRestoreFromBackupRequest} message RestoreFromBackupRequest message or plain object to encode + * @param {vtctldata.IShardReplicationFixRequest} message ShardReplicationFixRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RestoreFromBackupRequest.encodeDelimited = function encodeDelimited(message, writer) { + ShardReplicationFixRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a RestoreFromBackupRequest message from the specified reader or buffer. + * Decodes a ShardReplicationFixRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.RestoreFromBackupRequest + * @memberof vtctldata.ShardReplicationFixRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.RestoreFromBackupRequest} RestoreFromBackupRequest + * @returns {vtctldata.ShardReplicationFixRequest} ShardReplicationFixRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RestoreFromBackupRequest.decode = function decode(reader, length) { + ShardReplicationFixRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RestoreFromBackupRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ShardReplicationFixRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + message.keyspace = reader.string(); break; } case 2: { - message.backup_time = $root.vttime.Time.decode(reader, reader.uint32()); + message.shard = reader.string(); break; } case 3: { - message.restore_to_pos = reader.string(); - break; - } - case 4: { - message.dry_run = reader.bool(); + message.cell = reader.string(); break; } default: @@ -129158,160 +151832,139 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a RestoreFromBackupRequest message from the specified reader or buffer, length delimited. + * Decodes a ShardReplicationFixRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.RestoreFromBackupRequest + * @memberof vtctldata.ShardReplicationFixRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.RestoreFromBackupRequest} RestoreFromBackupRequest + * @returns {vtctldata.ShardReplicationFixRequest} ShardReplicationFixRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RestoreFromBackupRequest.decodeDelimited = function decodeDelimited(reader) { + ShardReplicationFixRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a RestoreFromBackupRequest message. + * Verifies a ShardReplicationFixRequest message. * @function verify - * @memberof vtctldata.RestoreFromBackupRequest + * @memberof vtctldata.ShardReplicationFixRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - RestoreFromBackupRequest.verify = function verify(message) { + ShardReplicationFixRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { - let error = $root.topodata.TabletAlias.verify(message.tablet_alias); - if (error) - return "tablet_alias." + error; - } - if (message.backup_time != null && message.hasOwnProperty("backup_time")) { - let error = $root.vttime.Time.verify(message.backup_time); - if (error) - return "backup_time." + error; - } - if (message.restore_to_pos != null && message.hasOwnProperty("restore_to_pos")) - if (!$util.isString(message.restore_to_pos)) - return "restore_to_pos: string expected"; - if (message.dry_run != null && message.hasOwnProperty("dry_run")) - if (typeof message.dry_run !== "boolean") - return "dry_run: boolean expected"; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.shard != null && message.hasOwnProperty("shard")) + if (!$util.isString(message.shard)) + return "shard: string expected"; + if (message.cell != null && message.hasOwnProperty("cell")) + if (!$util.isString(message.cell)) + return "cell: string expected"; return null; }; /** - * Creates a RestoreFromBackupRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ShardReplicationFixRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.RestoreFromBackupRequest + * @memberof vtctldata.ShardReplicationFixRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.RestoreFromBackupRequest} RestoreFromBackupRequest + * @returns {vtctldata.ShardReplicationFixRequest} ShardReplicationFixRequest */ - RestoreFromBackupRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.RestoreFromBackupRequest) + ShardReplicationFixRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ShardReplicationFixRequest) return object; - let message = new $root.vtctldata.RestoreFromBackupRequest(); - if (object.tablet_alias != null) { - if (typeof object.tablet_alias !== "object") - throw TypeError(".vtctldata.RestoreFromBackupRequest.tablet_alias: object expected"); - message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); - } - if (object.backup_time != null) { - if (typeof object.backup_time !== "object") - throw TypeError(".vtctldata.RestoreFromBackupRequest.backup_time: object expected"); - message.backup_time = $root.vttime.Time.fromObject(object.backup_time); - } - if (object.restore_to_pos != null) - message.restore_to_pos = String(object.restore_to_pos); - if (object.dry_run != null) - message.dry_run = Boolean(object.dry_run); + let message = new $root.vtctldata.ShardReplicationFixRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.shard != null) + message.shard = String(object.shard); + if (object.cell != null) + message.cell = String(object.cell); return message; }; /** - * Creates a plain object from a RestoreFromBackupRequest message. Also converts values to other types if specified. + * Creates a plain object from a ShardReplicationFixRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.RestoreFromBackupRequest + * @memberof vtctldata.ShardReplicationFixRequest * @static - * @param {vtctldata.RestoreFromBackupRequest} message RestoreFromBackupRequest + * @param {vtctldata.ShardReplicationFixRequest} message ShardReplicationFixRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - RestoreFromBackupRequest.toObject = function toObject(message, options) { + ShardReplicationFixRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) { - object.tablet_alias = null; - object.backup_time = null; - object.restore_to_pos = ""; - object.dry_run = false; + object.keyspace = ""; + object.shard = ""; + object.cell = ""; } - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) - object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); - if (message.backup_time != null && message.hasOwnProperty("backup_time")) - object.backup_time = $root.vttime.Time.toObject(message.backup_time, options); - if (message.restore_to_pos != null && message.hasOwnProperty("restore_to_pos")) - object.restore_to_pos = message.restore_to_pos; - if (message.dry_run != null && message.hasOwnProperty("dry_run")) - object.dry_run = message.dry_run; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = message.shard; + if (message.cell != null && message.hasOwnProperty("cell")) + object.cell = message.cell; return object; }; /** - * Converts this RestoreFromBackupRequest to JSON. + * Converts this ShardReplicationFixRequest to JSON. * @function toJSON - * @memberof vtctldata.RestoreFromBackupRequest + * @memberof vtctldata.ShardReplicationFixRequest * @instance * @returns {Object.} JSON object */ - RestoreFromBackupRequest.prototype.toJSON = function toJSON() { + ShardReplicationFixRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for RestoreFromBackupRequest + * Gets the default type url for ShardReplicationFixRequest * @function getTypeUrl - * @memberof vtctldata.RestoreFromBackupRequest + * @memberof vtctldata.ShardReplicationFixRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - RestoreFromBackupRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ShardReplicationFixRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.RestoreFromBackupRequest"; + return typeUrlPrefix + "/vtctldata.ShardReplicationFixRequest"; }; - return RestoreFromBackupRequest; + return ShardReplicationFixRequest; })(); - vtctldata.RestoreFromBackupResponse = (function() { + vtctldata.ShardReplicationFixResponse = (function() { /** - * Properties of a RestoreFromBackupResponse. + * Properties of a ShardReplicationFixResponse. * @memberof vtctldata - * @interface IRestoreFromBackupResponse - * @property {topodata.ITabletAlias|null} [tablet_alias] RestoreFromBackupResponse tablet_alias - * @property {string|null} [keyspace] RestoreFromBackupResponse keyspace - * @property {string|null} [shard] RestoreFromBackupResponse shard - * @property {logutil.IEvent|null} [event] RestoreFromBackupResponse event + * @interface IShardReplicationFixResponse + * @property {topodata.IShardReplicationError|null} [error] ShardReplicationFixResponse error */ /** - * Constructs a new RestoreFromBackupResponse. + * Constructs a new ShardReplicationFixResponse. * @memberof vtctldata - * @classdesc Represents a RestoreFromBackupResponse. - * @implements IRestoreFromBackupResponse + * @classdesc Represents a ShardReplicationFixResponse. + * @implements IShardReplicationFixResponse * @constructor - * @param {vtctldata.IRestoreFromBackupResponse=} [properties] Properties to set + * @param {vtctldata.IShardReplicationFixResponse=} [properties] Properties to set */ - function RestoreFromBackupResponse(properties) { + function ShardReplicationFixResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -129319,117 +151972,75 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * RestoreFromBackupResponse tablet_alias. - * @member {topodata.ITabletAlias|null|undefined} tablet_alias - * @memberof vtctldata.RestoreFromBackupResponse - * @instance - */ - RestoreFromBackupResponse.prototype.tablet_alias = null; - - /** - * RestoreFromBackupResponse keyspace. - * @member {string} keyspace - * @memberof vtctldata.RestoreFromBackupResponse - * @instance - */ - RestoreFromBackupResponse.prototype.keyspace = ""; - - /** - * RestoreFromBackupResponse shard. - * @member {string} shard - * @memberof vtctldata.RestoreFromBackupResponse - * @instance - */ - RestoreFromBackupResponse.prototype.shard = ""; - - /** - * RestoreFromBackupResponse event. - * @member {logutil.IEvent|null|undefined} event - * @memberof vtctldata.RestoreFromBackupResponse + * ShardReplicationFixResponse error. + * @member {topodata.IShardReplicationError|null|undefined} error + * @memberof vtctldata.ShardReplicationFixResponse * @instance */ - RestoreFromBackupResponse.prototype.event = null; + ShardReplicationFixResponse.prototype.error = null; /** - * Creates a new RestoreFromBackupResponse instance using the specified properties. + * Creates a new ShardReplicationFixResponse instance using the specified properties. * @function create - * @memberof vtctldata.RestoreFromBackupResponse + * @memberof vtctldata.ShardReplicationFixResponse * @static - * @param {vtctldata.IRestoreFromBackupResponse=} [properties] Properties to set - * @returns {vtctldata.RestoreFromBackupResponse} RestoreFromBackupResponse instance + * @param {vtctldata.IShardReplicationFixResponse=} [properties] Properties to set + * @returns {vtctldata.ShardReplicationFixResponse} ShardReplicationFixResponse instance */ - RestoreFromBackupResponse.create = function create(properties) { - return new RestoreFromBackupResponse(properties); + ShardReplicationFixResponse.create = function create(properties) { + return new ShardReplicationFixResponse(properties); }; /** - * Encodes the specified RestoreFromBackupResponse message. Does not implicitly {@link vtctldata.RestoreFromBackupResponse.verify|verify} messages. + * Encodes the specified ShardReplicationFixResponse message. Does not implicitly {@link vtctldata.ShardReplicationFixResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.RestoreFromBackupResponse + * @memberof vtctldata.ShardReplicationFixResponse * @static - * @param {vtctldata.IRestoreFromBackupResponse} message RestoreFromBackupResponse message or plain object to encode + * @param {vtctldata.IShardReplicationFixResponse} message ShardReplicationFixResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RestoreFromBackupResponse.encode = function encode(message, writer) { + ShardReplicationFixResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) - $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.keyspace); - if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.shard); - if (message.event != null && Object.hasOwnProperty.call(message, "event")) - $root.logutil.Event.encode(message.event, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.error != null && Object.hasOwnProperty.call(message, "error")) + $root.topodata.ShardReplicationError.encode(message.error, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified RestoreFromBackupResponse message, length delimited. Does not implicitly {@link vtctldata.RestoreFromBackupResponse.verify|verify} messages. + * Encodes the specified ShardReplicationFixResponse message, length delimited. Does not implicitly {@link vtctldata.ShardReplicationFixResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.RestoreFromBackupResponse + * @memberof vtctldata.ShardReplicationFixResponse * @static - * @param {vtctldata.IRestoreFromBackupResponse} message RestoreFromBackupResponse message or plain object to encode + * @param {vtctldata.IShardReplicationFixResponse} message ShardReplicationFixResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RestoreFromBackupResponse.encodeDelimited = function encodeDelimited(message, writer) { + ShardReplicationFixResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a RestoreFromBackupResponse message from the specified reader or buffer. + * Decodes a ShardReplicationFixResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.RestoreFromBackupResponse + * @memberof vtctldata.ShardReplicationFixResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.RestoreFromBackupResponse} RestoreFromBackupResponse + * @returns {vtctldata.ShardReplicationFixResponse} ShardReplicationFixResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RestoreFromBackupResponse.decode = function decode(reader, length) { + ShardReplicationFixResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RestoreFromBackupResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ShardReplicationFixResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); - break; - } - case 2: { - message.keyspace = reader.string(); - break; - } - case 3: { - message.shard = reader.string(); - break; - } - case 4: { - message.event = $root.logutil.Event.decode(reader, reader.uint32()); + message.error = $root.topodata.ShardReplicationError.decode(reader, reader.uint32()); break; } default: @@ -129441,157 +152052,128 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a RestoreFromBackupResponse message from the specified reader or buffer, length delimited. + * Decodes a ShardReplicationFixResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.RestoreFromBackupResponse + * @memberof vtctldata.ShardReplicationFixResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.RestoreFromBackupResponse} RestoreFromBackupResponse + * @returns {vtctldata.ShardReplicationFixResponse} ShardReplicationFixResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RestoreFromBackupResponse.decodeDelimited = function decodeDelimited(reader) { + ShardReplicationFixResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a RestoreFromBackupResponse message. + * Verifies a ShardReplicationFixResponse message. * @function verify - * @memberof vtctldata.RestoreFromBackupResponse + * @memberof vtctldata.ShardReplicationFixResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - RestoreFromBackupResponse.verify = function verify(message) { + ShardReplicationFixResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { - let error = $root.topodata.TabletAlias.verify(message.tablet_alias); - if (error) - return "tablet_alias." + error; - } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - if (message.shard != null && message.hasOwnProperty("shard")) - if (!$util.isString(message.shard)) - return "shard: string expected"; - if (message.event != null && message.hasOwnProperty("event")) { - let error = $root.logutil.Event.verify(message.event); + if (message.error != null && message.hasOwnProperty("error")) { + let error = $root.topodata.ShardReplicationError.verify(message.error); if (error) - return "event." + error; + return "error." + error; } return null; }; /** - * Creates a RestoreFromBackupResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ShardReplicationFixResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.RestoreFromBackupResponse + * @memberof vtctldata.ShardReplicationFixResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.RestoreFromBackupResponse} RestoreFromBackupResponse + * @returns {vtctldata.ShardReplicationFixResponse} ShardReplicationFixResponse */ - RestoreFromBackupResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.RestoreFromBackupResponse) + ShardReplicationFixResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ShardReplicationFixResponse) return object; - let message = new $root.vtctldata.RestoreFromBackupResponse(); - if (object.tablet_alias != null) { - if (typeof object.tablet_alias !== "object") - throw TypeError(".vtctldata.RestoreFromBackupResponse.tablet_alias: object expected"); - message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); - } - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - if (object.shard != null) - message.shard = String(object.shard); - if (object.event != null) { - if (typeof object.event !== "object") - throw TypeError(".vtctldata.RestoreFromBackupResponse.event: object expected"); - message.event = $root.logutil.Event.fromObject(object.event); + let message = new $root.vtctldata.ShardReplicationFixResponse(); + if (object.error != null) { + if (typeof object.error !== "object") + throw TypeError(".vtctldata.ShardReplicationFixResponse.error: object expected"); + message.error = $root.topodata.ShardReplicationError.fromObject(object.error); } return message; }; /** - * Creates a plain object from a RestoreFromBackupResponse message. Also converts values to other types if specified. + * Creates a plain object from a ShardReplicationFixResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.RestoreFromBackupResponse + * @memberof vtctldata.ShardReplicationFixResponse * @static - * @param {vtctldata.RestoreFromBackupResponse} message RestoreFromBackupResponse + * @param {vtctldata.ShardReplicationFixResponse} message ShardReplicationFixResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - RestoreFromBackupResponse.toObject = function toObject(message, options) { + ShardReplicationFixResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) { - object.tablet_alias = null; - object.keyspace = ""; - object.shard = ""; - object.event = null; - } - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) - object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.shard != null && message.hasOwnProperty("shard")) - object.shard = message.shard; - if (message.event != null && message.hasOwnProperty("event")) - object.event = $root.logutil.Event.toObject(message.event, options); + if (options.defaults) + object.error = null; + if (message.error != null && message.hasOwnProperty("error")) + object.error = $root.topodata.ShardReplicationError.toObject(message.error, options); return object; }; /** - * Converts this RestoreFromBackupResponse to JSON. + * Converts this ShardReplicationFixResponse to JSON. * @function toJSON - * @memberof vtctldata.RestoreFromBackupResponse + * @memberof vtctldata.ShardReplicationFixResponse * @instance * @returns {Object.} JSON object */ - RestoreFromBackupResponse.prototype.toJSON = function toJSON() { + ShardReplicationFixResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for RestoreFromBackupResponse + * Gets the default type url for ShardReplicationFixResponse * @function getTypeUrl - * @memberof vtctldata.RestoreFromBackupResponse + * @memberof vtctldata.ShardReplicationFixResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - RestoreFromBackupResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ShardReplicationFixResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.RestoreFromBackupResponse"; + return typeUrlPrefix + "/vtctldata.ShardReplicationFixResponse"; }; - return RestoreFromBackupResponse; + return ShardReplicationFixResponse; })(); - vtctldata.RunHealthCheckRequest = (function() { + vtctldata.ShardReplicationPositionsRequest = (function() { /** - * Properties of a RunHealthCheckRequest. + * Properties of a ShardReplicationPositionsRequest. * @memberof vtctldata - * @interface IRunHealthCheckRequest - * @property {topodata.ITabletAlias|null} [tablet_alias] RunHealthCheckRequest tablet_alias + * @interface IShardReplicationPositionsRequest + * @property {string|null} [keyspace] ShardReplicationPositionsRequest keyspace + * @property {string|null} [shard] ShardReplicationPositionsRequest shard */ /** - * Constructs a new RunHealthCheckRequest. + * Constructs a new ShardReplicationPositionsRequest. * @memberof vtctldata - * @classdesc Represents a RunHealthCheckRequest. - * @implements IRunHealthCheckRequest + * @classdesc Represents a ShardReplicationPositionsRequest. + * @implements IShardReplicationPositionsRequest * @constructor - * @param {vtctldata.IRunHealthCheckRequest=} [properties] Properties to set + * @param {vtctldata.IShardReplicationPositionsRequest=} [properties] Properties to set */ - function RunHealthCheckRequest(properties) { + function ShardReplicationPositionsRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -129599,75 +152181,89 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * RunHealthCheckRequest tablet_alias. - * @member {topodata.ITabletAlias|null|undefined} tablet_alias - * @memberof vtctldata.RunHealthCheckRequest + * ShardReplicationPositionsRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.ShardReplicationPositionsRequest * @instance */ - RunHealthCheckRequest.prototype.tablet_alias = null; + ShardReplicationPositionsRequest.prototype.keyspace = ""; /** - * Creates a new RunHealthCheckRequest instance using the specified properties. + * ShardReplicationPositionsRequest shard. + * @member {string} shard + * @memberof vtctldata.ShardReplicationPositionsRequest + * @instance + */ + ShardReplicationPositionsRequest.prototype.shard = ""; + + /** + * Creates a new ShardReplicationPositionsRequest instance using the specified properties. * @function create - * @memberof vtctldata.RunHealthCheckRequest + * @memberof vtctldata.ShardReplicationPositionsRequest * @static - * @param {vtctldata.IRunHealthCheckRequest=} [properties] Properties to set - * @returns {vtctldata.RunHealthCheckRequest} RunHealthCheckRequest instance + * @param {vtctldata.IShardReplicationPositionsRequest=} [properties] Properties to set + * @returns {vtctldata.ShardReplicationPositionsRequest} ShardReplicationPositionsRequest instance */ - RunHealthCheckRequest.create = function create(properties) { - return new RunHealthCheckRequest(properties); + ShardReplicationPositionsRequest.create = function create(properties) { + return new ShardReplicationPositionsRequest(properties); }; /** - * Encodes the specified RunHealthCheckRequest message. Does not implicitly {@link vtctldata.RunHealthCheckRequest.verify|verify} messages. + * Encodes the specified ShardReplicationPositionsRequest message. Does not implicitly {@link vtctldata.ShardReplicationPositionsRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.RunHealthCheckRequest + * @memberof vtctldata.ShardReplicationPositionsRequest * @static - * @param {vtctldata.IRunHealthCheckRequest} message RunHealthCheckRequest message or plain object to encode + * @param {vtctldata.IShardReplicationPositionsRequest} message ShardReplicationPositionsRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RunHealthCheckRequest.encode = function encode(message, writer) { + ShardReplicationPositionsRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) - $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); return writer; }; /** - * Encodes the specified RunHealthCheckRequest message, length delimited. Does not implicitly {@link vtctldata.RunHealthCheckRequest.verify|verify} messages. + * Encodes the specified ShardReplicationPositionsRequest message, length delimited. Does not implicitly {@link vtctldata.ShardReplicationPositionsRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.RunHealthCheckRequest + * @memberof vtctldata.ShardReplicationPositionsRequest * @static - * @param {vtctldata.IRunHealthCheckRequest} message RunHealthCheckRequest message or plain object to encode + * @param {vtctldata.IShardReplicationPositionsRequest} message ShardReplicationPositionsRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RunHealthCheckRequest.encodeDelimited = function encodeDelimited(message, writer) { + ShardReplicationPositionsRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a RunHealthCheckRequest message from the specified reader or buffer. + * Decodes a ShardReplicationPositionsRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.RunHealthCheckRequest + * @memberof vtctldata.ShardReplicationPositionsRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.RunHealthCheckRequest} RunHealthCheckRequest + * @returns {vtctldata.ShardReplicationPositionsRequest} ShardReplicationPositionsRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RunHealthCheckRequest.decode = function decode(reader, length) { + ShardReplicationPositionsRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RunHealthCheckRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ShardReplicationPositionsRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + message.keyspace = reader.string(); + break; + } + case 2: { + message.shard = reader.string(); break; } default: @@ -129679,126 +152275,134 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a RunHealthCheckRequest message from the specified reader or buffer, length delimited. + * Decodes a ShardReplicationPositionsRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.RunHealthCheckRequest + * @memberof vtctldata.ShardReplicationPositionsRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.RunHealthCheckRequest} RunHealthCheckRequest + * @returns {vtctldata.ShardReplicationPositionsRequest} ShardReplicationPositionsRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RunHealthCheckRequest.decodeDelimited = function decodeDelimited(reader) { + ShardReplicationPositionsRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a RunHealthCheckRequest message. + * Verifies a ShardReplicationPositionsRequest message. * @function verify - * @memberof vtctldata.RunHealthCheckRequest + * @memberof vtctldata.ShardReplicationPositionsRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - RunHealthCheckRequest.verify = function verify(message) { + ShardReplicationPositionsRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { - let error = $root.topodata.TabletAlias.verify(message.tablet_alias); - if (error) - return "tablet_alias." + error; - } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.shard != null && message.hasOwnProperty("shard")) + if (!$util.isString(message.shard)) + return "shard: string expected"; return null; }; /** - * Creates a RunHealthCheckRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ShardReplicationPositionsRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.RunHealthCheckRequest + * @memberof vtctldata.ShardReplicationPositionsRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.RunHealthCheckRequest} RunHealthCheckRequest + * @returns {vtctldata.ShardReplicationPositionsRequest} ShardReplicationPositionsRequest */ - RunHealthCheckRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.RunHealthCheckRequest) + ShardReplicationPositionsRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ShardReplicationPositionsRequest) return object; - let message = new $root.vtctldata.RunHealthCheckRequest(); - if (object.tablet_alias != null) { - if (typeof object.tablet_alias !== "object") - throw TypeError(".vtctldata.RunHealthCheckRequest.tablet_alias: object expected"); - message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); - } + let message = new $root.vtctldata.ShardReplicationPositionsRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.shard != null) + message.shard = String(object.shard); return message; }; /** - * Creates a plain object from a RunHealthCheckRequest message. Also converts values to other types if specified. + * Creates a plain object from a ShardReplicationPositionsRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.RunHealthCheckRequest + * @memberof vtctldata.ShardReplicationPositionsRequest * @static - * @param {vtctldata.RunHealthCheckRequest} message RunHealthCheckRequest + * @param {vtctldata.ShardReplicationPositionsRequest} message ShardReplicationPositionsRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - RunHealthCheckRequest.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) - object.tablet_alias = null; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) - object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); + ShardReplicationPositionsRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.keyspace = ""; + object.shard = ""; + } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = message.shard; return object; }; /** - * Converts this RunHealthCheckRequest to JSON. + * Converts this ShardReplicationPositionsRequest to JSON. * @function toJSON - * @memberof vtctldata.RunHealthCheckRequest + * @memberof vtctldata.ShardReplicationPositionsRequest * @instance * @returns {Object.} JSON object */ - RunHealthCheckRequest.prototype.toJSON = function toJSON() { + ShardReplicationPositionsRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for RunHealthCheckRequest + * Gets the default type url for ShardReplicationPositionsRequest * @function getTypeUrl - * @memberof vtctldata.RunHealthCheckRequest + * @memberof vtctldata.ShardReplicationPositionsRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - RunHealthCheckRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ShardReplicationPositionsRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.RunHealthCheckRequest"; + return typeUrlPrefix + "/vtctldata.ShardReplicationPositionsRequest"; }; - return RunHealthCheckRequest; + return ShardReplicationPositionsRequest; })(); - vtctldata.RunHealthCheckResponse = (function() { + vtctldata.ShardReplicationPositionsResponse = (function() { /** - * Properties of a RunHealthCheckResponse. + * Properties of a ShardReplicationPositionsResponse. * @memberof vtctldata - * @interface IRunHealthCheckResponse + * @interface IShardReplicationPositionsResponse + * @property {Object.|null} [replication_statuses] ShardReplicationPositionsResponse replication_statuses + * @property {Object.|null} [tablet_map] ShardReplicationPositionsResponse tablet_map */ /** - * Constructs a new RunHealthCheckResponse. + * Constructs a new ShardReplicationPositionsResponse. * @memberof vtctldata - * @classdesc Represents a RunHealthCheckResponse. - * @implements IRunHealthCheckResponse + * @classdesc Represents a ShardReplicationPositionsResponse. + * @implements IShardReplicationPositionsResponse * @constructor - * @param {vtctldata.IRunHealthCheckResponse=} [properties] Properties to set + * @param {vtctldata.IShardReplicationPositionsResponse=} [properties] Properties to set */ - function RunHealthCheckResponse(properties) { + function ShardReplicationPositionsResponse(properties) { + this.replication_statuses = {}; + this.tablet_map = {}; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -129806,63 +152410,135 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * Creates a new RunHealthCheckResponse instance using the specified properties. + * ShardReplicationPositionsResponse replication_statuses. + * @member {Object.} replication_statuses + * @memberof vtctldata.ShardReplicationPositionsResponse + * @instance + */ + ShardReplicationPositionsResponse.prototype.replication_statuses = $util.emptyObject; + + /** + * ShardReplicationPositionsResponse tablet_map. + * @member {Object.} tablet_map + * @memberof vtctldata.ShardReplicationPositionsResponse + * @instance + */ + ShardReplicationPositionsResponse.prototype.tablet_map = $util.emptyObject; + + /** + * Creates a new ShardReplicationPositionsResponse instance using the specified properties. * @function create - * @memberof vtctldata.RunHealthCheckResponse + * @memberof vtctldata.ShardReplicationPositionsResponse * @static - * @param {vtctldata.IRunHealthCheckResponse=} [properties] Properties to set - * @returns {vtctldata.RunHealthCheckResponse} RunHealthCheckResponse instance + * @param {vtctldata.IShardReplicationPositionsResponse=} [properties] Properties to set + * @returns {vtctldata.ShardReplicationPositionsResponse} ShardReplicationPositionsResponse instance */ - RunHealthCheckResponse.create = function create(properties) { - return new RunHealthCheckResponse(properties); + ShardReplicationPositionsResponse.create = function create(properties) { + return new ShardReplicationPositionsResponse(properties); }; /** - * Encodes the specified RunHealthCheckResponse message. Does not implicitly {@link vtctldata.RunHealthCheckResponse.verify|verify} messages. + * Encodes the specified ShardReplicationPositionsResponse message. Does not implicitly {@link vtctldata.ShardReplicationPositionsResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.RunHealthCheckResponse + * @memberof vtctldata.ShardReplicationPositionsResponse * @static - * @param {vtctldata.IRunHealthCheckResponse} message RunHealthCheckResponse message or plain object to encode + * @param {vtctldata.IShardReplicationPositionsResponse} message ShardReplicationPositionsResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RunHealthCheckResponse.encode = function encode(message, writer) { + ShardReplicationPositionsResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.replication_statuses != null && Object.hasOwnProperty.call(message, "replication_statuses")) + for (let keys = Object.keys(message.replication_statuses), i = 0; i < keys.length; ++i) { + writer.uint32(/* id 1, wireType 2 =*/10).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); + $root.replicationdata.Status.encode(message.replication_statuses[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); + } + if (message.tablet_map != null && Object.hasOwnProperty.call(message, "tablet_map")) + for (let keys = Object.keys(message.tablet_map), i = 0; i < keys.length; ++i) { + writer.uint32(/* id 2, wireType 2 =*/18).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); + $root.topodata.Tablet.encode(message.tablet_map[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); + } return writer; }; /** - * Encodes the specified RunHealthCheckResponse message, length delimited. Does not implicitly {@link vtctldata.RunHealthCheckResponse.verify|verify} messages. + * Encodes the specified ShardReplicationPositionsResponse message, length delimited. Does not implicitly {@link vtctldata.ShardReplicationPositionsResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.RunHealthCheckResponse + * @memberof vtctldata.ShardReplicationPositionsResponse * @static - * @param {vtctldata.IRunHealthCheckResponse} message RunHealthCheckResponse message or plain object to encode + * @param {vtctldata.IShardReplicationPositionsResponse} message ShardReplicationPositionsResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - RunHealthCheckResponse.encodeDelimited = function encodeDelimited(message, writer) { + ShardReplicationPositionsResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a RunHealthCheckResponse message from the specified reader or buffer. + * Decodes a ShardReplicationPositionsResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.RunHealthCheckResponse + * @memberof vtctldata.ShardReplicationPositionsResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.RunHealthCheckResponse} RunHealthCheckResponse + * @returns {vtctldata.ShardReplicationPositionsResponse} ShardReplicationPositionsResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RunHealthCheckResponse.decode = function decode(reader, length) { + ShardReplicationPositionsResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.RunHealthCheckResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ShardReplicationPositionsResponse(), key, value; while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + if (message.replication_statuses === $util.emptyObject) + message.replication_statuses = {}; + let end2 = reader.uint32() + reader.pos; + key = ""; + value = null; + while (reader.pos < end2) { + let tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = $root.replicationdata.Status.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.replication_statuses[key] = value; + break; + } + case 2: { + if (message.tablet_map === $util.emptyObject) + message.tablet_map = {}; + let end2 = reader.uint32() + reader.pos; + key = ""; + value = null; + while (reader.pos < end2) { + let tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = $root.topodata.Tablet.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.tablet_map[key] = value; + break; + } default: reader.skipType(tag & 7); break; @@ -129872,110 +152548,170 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a RunHealthCheckResponse message from the specified reader or buffer, length delimited. + * Decodes a ShardReplicationPositionsResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.RunHealthCheckResponse + * @memberof vtctldata.ShardReplicationPositionsResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.RunHealthCheckResponse} RunHealthCheckResponse + * @returns {vtctldata.ShardReplicationPositionsResponse} ShardReplicationPositionsResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - RunHealthCheckResponse.decodeDelimited = function decodeDelimited(reader) { + ShardReplicationPositionsResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a RunHealthCheckResponse message. + * Verifies a ShardReplicationPositionsResponse message. * @function verify - * @memberof vtctldata.RunHealthCheckResponse + * @memberof vtctldata.ShardReplicationPositionsResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - RunHealthCheckResponse.verify = function verify(message) { + ShardReplicationPositionsResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.replication_statuses != null && message.hasOwnProperty("replication_statuses")) { + if (!$util.isObject(message.replication_statuses)) + return "replication_statuses: object expected"; + let key = Object.keys(message.replication_statuses); + for (let i = 0; i < key.length; ++i) { + let error = $root.replicationdata.Status.verify(message.replication_statuses[key[i]]); + if (error) + return "replication_statuses." + error; + } + } + if (message.tablet_map != null && message.hasOwnProperty("tablet_map")) { + if (!$util.isObject(message.tablet_map)) + return "tablet_map: object expected"; + let key = Object.keys(message.tablet_map); + for (let i = 0; i < key.length; ++i) { + let error = $root.topodata.Tablet.verify(message.tablet_map[key[i]]); + if (error) + return "tablet_map." + error; + } + } return null; }; /** - * Creates a RunHealthCheckResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ShardReplicationPositionsResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.RunHealthCheckResponse + * @memberof vtctldata.ShardReplicationPositionsResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.RunHealthCheckResponse} RunHealthCheckResponse + * @returns {vtctldata.ShardReplicationPositionsResponse} ShardReplicationPositionsResponse */ - RunHealthCheckResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.RunHealthCheckResponse) + ShardReplicationPositionsResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ShardReplicationPositionsResponse) return object; - return new $root.vtctldata.RunHealthCheckResponse(); + let message = new $root.vtctldata.ShardReplicationPositionsResponse(); + if (object.replication_statuses) { + if (typeof object.replication_statuses !== "object") + throw TypeError(".vtctldata.ShardReplicationPositionsResponse.replication_statuses: object expected"); + message.replication_statuses = {}; + for (let keys = Object.keys(object.replication_statuses), i = 0; i < keys.length; ++i) { + if (typeof object.replication_statuses[keys[i]] !== "object") + throw TypeError(".vtctldata.ShardReplicationPositionsResponse.replication_statuses: object expected"); + message.replication_statuses[keys[i]] = $root.replicationdata.Status.fromObject(object.replication_statuses[keys[i]]); + } + } + if (object.tablet_map) { + if (typeof object.tablet_map !== "object") + throw TypeError(".vtctldata.ShardReplicationPositionsResponse.tablet_map: object expected"); + message.tablet_map = {}; + for (let keys = Object.keys(object.tablet_map), i = 0; i < keys.length; ++i) { + if (typeof object.tablet_map[keys[i]] !== "object") + throw TypeError(".vtctldata.ShardReplicationPositionsResponse.tablet_map: object expected"); + message.tablet_map[keys[i]] = $root.topodata.Tablet.fromObject(object.tablet_map[keys[i]]); + } + } + return message; }; /** - * Creates a plain object from a RunHealthCheckResponse message. Also converts values to other types if specified. + * Creates a plain object from a ShardReplicationPositionsResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.RunHealthCheckResponse + * @memberof vtctldata.ShardReplicationPositionsResponse * @static - * @param {vtctldata.RunHealthCheckResponse} message RunHealthCheckResponse + * @param {vtctldata.ShardReplicationPositionsResponse} message ShardReplicationPositionsResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - RunHealthCheckResponse.toObject = function toObject() { - return {}; + ShardReplicationPositionsResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.objects || options.defaults) { + object.replication_statuses = {}; + object.tablet_map = {}; + } + let keys2; + if (message.replication_statuses && (keys2 = Object.keys(message.replication_statuses)).length) { + object.replication_statuses = {}; + for (let j = 0; j < keys2.length; ++j) + object.replication_statuses[keys2[j]] = $root.replicationdata.Status.toObject(message.replication_statuses[keys2[j]], options); + } + if (message.tablet_map && (keys2 = Object.keys(message.tablet_map)).length) { + object.tablet_map = {}; + for (let j = 0; j < keys2.length; ++j) + object.tablet_map[keys2[j]] = $root.topodata.Tablet.toObject(message.tablet_map[keys2[j]], options); + } + return object; }; /** - * Converts this RunHealthCheckResponse to JSON. + * Converts this ShardReplicationPositionsResponse to JSON. * @function toJSON - * @memberof vtctldata.RunHealthCheckResponse + * @memberof vtctldata.ShardReplicationPositionsResponse * @instance * @returns {Object.} JSON object */ - RunHealthCheckResponse.prototype.toJSON = function toJSON() { + ShardReplicationPositionsResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for RunHealthCheckResponse + * Gets the default type url for ShardReplicationPositionsResponse * @function getTypeUrl - * @memberof vtctldata.RunHealthCheckResponse + * @memberof vtctldata.ShardReplicationPositionsResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - RunHealthCheckResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ShardReplicationPositionsResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.RunHealthCheckResponse"; + return typeUrlPrefix + "/vtctldata.ShardReplicationPositionsResponse"; }; - return RunHealthCheckResponse; + return ShardReplicationPositionsResponse; })(); - vtctldata.SetKeyspaceDurabilityPolicyRequest = (function() { + vtctldata.ShardReplicationRemoveRequest = (function() { /** - * Properties of a SetKeyspaceDurabilityPolicyRequest. + * Properties of a ShardReplicationRemoveRequest. * @memberof vtctldata - * @interface ISetKeyspaceDurabilityPolicyRequest - * @property {string|null} [keyspace] SetKeyspaceDurabilityPolicyRequest keyspace - * @property {string|null} [durability_policy] SetKeyspaceDurabilityPolicyRequest durability_policy + * @interface IShardReplicationRemoveRequest + * @property {string|null} [keyspace] ShardReplicationRemoveRequest keyspace + * @property {string|null} [shard] ShardReplicationRemoveRequest shard + * @property {topodata.ITabletAlias|null} [tablet_alias] ShardReplicationRemoveRequest tablet_alias */ /** - * Constructs a new SetKeyspaceDurabilityPolicyRequest. + * Constructs a new ShardReplicationRemoveRequest. * @memberof vtctldata - * @classdesc Represents a SetKeyspaceDurabilityPolicyRequest. - * @implements ISetKeyspaceDurabilityPolicyRequest + * @classdesc Represents a ShardReplicationRemoveRequest. + * @implements IShardReplicationRemoveRequest * @constructor - * @param {vtctldata.ISetKeyspaceDurabilityPolicyRequest=} [properties] Properties to set + * @param {vtctldata.IShardReplicationRemoveRequest=} [properties] Properties to set */ - function SetKeyspaceDurabilityPolicyRequest(properties) { + function ShardReplicationRemoveRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -129983,80 +152719,90 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * SetKeyspaceDurabilityPolicyRequest keyspace. + * ShardReplicationRemoveRequest keyspace. * @member {string} keyspace - * @memberof vtctldata.SetKeyspaceDurabilityPolicyRequest + * @memberof vtctldata.ShardReplicationRemoveRequest * @instance */ - SetKeyspaceDurabilityPolicyRequest.prototype.keyspace = ""; + ShardReplicationRemoveRequest.prototype.keyspace = ""; /** - * SetKeyspaceDurabilityPolicyRequest durability_policy. - * @member {string} durability_policy - * @memberof vtctldata.SetKeyspaceDurabilityPolicyRequest + * ShardReplicationRemoveRequest shard. + * @member {string} shard + * @memberof vtctldata.ShardReplicationRemoveRequest * @instance */ - SetKeyspaceDurabilityPolicyRequest.prototype.durability_policy = ""; + ShardReplicationRemoveRequest.prototype.shard = ""; /** - * Creates a new SetKeyspaceDurabilityPolicyRequest instance using the specified properties. + * ShardReplicationRemoveRequest tablet_alias. + * @member {topodata.ITabletAlias|null|undefined} tablet_alias + * @memberof vtctldata.ShardReplicationRemoveRequest + * @instance + */ + ShardReplicationRemoveRequest.prototype.tablet_alias = null; + + /** + * Creates a new ShardReplicationRemoveRequest instance using the specified properties. * @function create - * @memberof vtctldata.SetKeyspaceDurabilityPolicyRequest + * @memberof vtctldata.ShardReplicationRemoveRequest * @static - * @param {vtctldata.ISetKeyspaceDurabilityPolicyRequest=} [properties] Properties to set - * @returns {vtctldata.SetKeyspaceDurabilityPolicyRequest} SetKeyspaceDurabilityPolicyRequest instance + * @param {vtctldata.IShardReplicationRemoveRequest=} [properties] Properties to set + * @returns {vtctldata.ShardReplicationRemoveRequest} ShardReplicationRemoveRequest instance */ - SetKeyspaceDurabilityPolicyRequest.create = function create(properties) { - return new SetKeyspaceDurabilityPolicyRequest(properties); + ShardReplicationRemoveRequest.create = function create(properties) { + return new ShardReplicationRemoveRequest(properties); }; /** - * Encodes the specified SetKeyspaceDurabilityPolicyRequest message. Does not implicitly {@link vtctldata.SetKeyspaceDurabilityPolicyRequest.verify|verify} messages. + * Encodes the specified ShardReplicationRemoveRequest message. Does not implicitly {@link vtctldata.ShardReplicationRemoveRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.SetKeyspaceDurabilityPolicyRequest + * @memberof vtctldata.ShardReplicationRemoveRequest * @static - * @param {vtctldata.ISetKeyspaceDurabilityPolicyRequest} message SetKeyspaceDurabilityPolicyRequest message or plain object to encode + * @param {vtctldata.IShardReplicationRemoveRequest} message ShardReplicationRemoveRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SetKeyspaceDurabilityPolicyRequest.encode = function encode(message, writer) { + ShardReplicationRemoveRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.durability_policy != null && Object.hasOwnProperty.call(message, "durability_policy")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.durability_policy); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); + if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) + $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); return writer; }; /** - * Encodes the specified SetKeyspaceDurabilityPolicyRequest message, length delimited. Does not implicitly {@link vtctldata.SetKeyspaceDurabilityPolicyRequest.verify|verify} messages. + * Encodes the specified ShardReplicationRemoveRequest message, length delimited. Does not implicitly {@link vtctldata.ShardReplicationRemoveRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.SetKeyspaceDurabilityPolicyRequest + * @memberof vtctldata.ShardReplicationRemoveRequest * @static - * @param {vtctldata.ISetKeyspaceDurabilityPolicyRequest} message SetKeyspaceDurabilityPolicyRequest message or plain object to encode + * @param {vtctldata.IShardReplicationRemoveRequest} message ShardReplicationRemoveRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SetKeyspaceDurabilityPolicyRequest.encodeDelimited = function encodeDelimited(message, writer) { + ShardReplicationRemoveRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a SetKeyspaceDurabilityPolicyRequest message from the specified reader or buffer. + * Decodes a ShardReplicationRemoveRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.SetKeyspaceDurabilityPolicyRequest + * @memberof vtctldata.ShardReplicationRemoveRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.SetKeyspaceDurabilityPolicyRequest} SetKeyspaceDurabilityPolicyRequest + * @returns {vtctldata.ShardReplicationRemoveRequest} ShardReplicationRemoveRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SetKeyspaceDurabilityPolicyRequest.decode = function decode(reader, length) { + ShardReplicationRemoveRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SetKeyspaceDurabilityPolicyRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ShardReplicationRemoveRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -130065,7 +152811,11 @@ export const vtctldata = $root.vtctldata = (() => { break; } case 2: { - message.durability_policy = reader.string(); + message.shard = reader.string(); + break; + } + case 3: { + message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); break; } default: @@ -130077,131 +152827,143 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a SetKeyspaceDurabilityPolicyRequest message from the specified reader or buffer, length delimited. + * Decodes a ShardReplicationRemoveRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.SetKeyspaceDurabilityPolicyRequest + * @memberof vtctldata.ShardReplicationRemoveRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.SetKeyspaceDurabilityPolicyRequest} SetKeyspaceDurabilityPolicyRequest + * @returns {vtctldata.ShardReplicationRemoveRequest} ShardReplicationRemoveRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SetKeyspaceDurabilityPolicyRequest.decodeDelimited = function decodeDelimited(reader) { + ShardReplicationRemoveRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a SetKeyspaceDurabilityPolicyRequest message. + * Verifies a ShardReplicationRemoveRequest message. * @function verify - * @memberof vtctldata.SetKeyspaceDurabilityPolicyRequest + * @memberof vtctldata.ShardReplicationRemoveRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - SetKeyspaceDurabilityPolicyRequest.verify = function verify(message) { + ShardReplicationRemoveRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.keyspace != null && message.hasOwnProperty("keyspace")) if (!$util.isString(message.keyspace)) return "keyspace: string expected"; - if (message.durability_policy != null && message.hasOwnProperty("durability_policy")) - if (!$util.isString(message.durability_policy)) - return "durability_policy: string expected"; + if (message.shard != null && message.hasOwnProperty("shard")) + if (!$util.isString(message.shard)) + return "shard: string expected"; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { + let error = $root.topodata.TabletAlias.verify(message.tablet_alias); + if (error) + return "tablet_alias." + error; + } return null; }; /** - * Creates a SetKeyspaceDurabilityPolicyRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ShardReplicationRemoveRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.SetKeyspaceDurabilityPolicyRequest + * @memberof vtctldata.ShardReplicationRemoveRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.SetKeyspaceDurabilityPolicyRequest} SetKeyspaceDurabilityPolicyRequest + * @returns {vtctldata.ShardReplicationRemoveRequest} ShardReplicationRemoveRequest */ - SetKeyspaceDurabilityPolicyRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.SetKeyspaceDurabilityPolicyRequest) + ShardReplicationRemoveRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ShardReplicationRemoveRequest) return object; - let message = new $root.vtctldata.SetKeyspaceDurabilityPolicyRequest(); + let message = new $root.vtctldata.ShardReplicationRemoveRequest(); if (object.keyspace != null) message.keyspace = String(object.keyspace); - if (object.durability_policy != null) - message.durability_policy = String(object.durability_policy); + if (object.shard != null) + message.shard = String(object.shard); + if (object.tablet_alias != null) { + if (typeof object.tablet_alias !== "object") + throw TypeError(".vtctldata.ShardReplicationRemoveRequest.tablet_alias: object expected"); + message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); + } return message; }; /** - * Creates a plain object from a SetKeyspaceDurabilityPolicyRequest message. Also converts values to other types if specified. + * Creates a plain object from a ShardReplicationRemoveRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.SetKeyspaceDurabilityPolicyRequest + * @memberof vtctldata.ShardReplicationRemoveRequest * @static - * @param {vtctldata.SetKeyspaceDurabilityPolicyRequest} message SetKeyspaceDurabilityPolicyRequest + * @param {vtctldata.ShardReplicationRemoveRequest} message ShardReplicationRemoveRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - SetKeyspaceDurabilityPolicyRequest.toObject = function toObject(message, options) { + ShardReplicationRemoveRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) { object.keyspace = ""; - object.durability_policy = ""; + object.shard = ""; + object.tablet_alias = null; } if (message.keyspace != null && message.hasOwnProperty("keyspace")) object.keyspace = message.keyspace; - if (message.durability_policy != null && message.hasOwnProperty("durability_policy")) - object.durability_policy = message.durability_policy; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = message.shard; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) + object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); return object; }; /** - * Converts this SetKeyspaceDurabilityPolicyRequest to JSON. + * Converts this ShardReplicationRemoveRequest to JSON. * @function toJSON - * @memberof vtctldata.SetKeyspaceDurabilityPolicyRequest + * @memberof vtctldata.ShardReplicationRemoveRequest * @instance * @returns {Object.} JSON object */ - SetKeyspaceDurabilityPolicyRequest.prototype.toJSON = function toJSON() { + ShardReplicationRemoveRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for SetKeyspaceDurabilityPolicyRequest + * Gets the default type url for ShardReplicationRemoveRequest * @function getTypeUrl - * @memberof vtctldata.SetKeyspaceDurabilityPolicyRequest + * @memberof vtctldata.ShardReplicationRemoveRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - SetKeyspaceDurabilityPolicyRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ShardReplicationRemoveRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.SetKeyspaceDurabilityPolicyRequest"; + return typeUrlPrefix + "/vtctldata.ShardReplicationRemoveRequest"; }; - return SetKeyspaceDurabilityPolicyRequest; + return ShardReplicationRemoveRequest; })(); - vtctldata.SetKeyspaceDurabilityPolicyResponse = (function() { + vtctldata.ShardReplicationRemoveResponse = (function() { /** - * Properties of a SetKeyspaceDurabilityPolicyResponse. + * Properties of a ShardReplicationRemoveResponse. * @memberof vtctldata - * @interface ISetKeyspaceDurabilityPolicyResponse - * @property {topodata.IKeyspace|null} [keyspace] SetKeyspaceDurabilityPolicyResponse keyspace + * @interface IShardReplicationRemoveResponse */ /** - * Constructs a new SetKeyspaceDurabilityPolicyResponse. + * Constructs a new ShardReplicationRemoveResponse. * @memberof vtctldata - * @classdesc Represents a SetKeyspaceDurabilityPolicyResponse. - * @implements ISetKeyspaceDurabilityPolicyResponse + * @classdesc Represents a ShardReplicationRemoveResponse. + * @implements IShardReplicationRemoveResponse * @constructor - * @param {vtctldata.ISetKeyspaceDurabilityPolicyResponse=} [properties] Properties to set + * @param {vtctldata.IShardReplicationRemoveResponse=} [properties] Properties to set */ - function SetKeyspaceDurabilityPolicyResponse(properties) { + function ShardReplicationRemoveResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -130209,77 +152971,63 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * SetKeyspaceDurabilityPolicyResponse keyspace. - * @member {topodata.IKeyspace|null|undefined} keyspace - * @memberof vtctldata.SetKeyspaceDurabilityPolicyResponse - * @instance - */ - SetKeyspaceDurabilityPolicyResponse.prototype.keyspace = null; - - /** - * Creates a new SetKeyspaceDurabilityPolicyResponse instance using the specified properties. + * Creates a new ShardReplicationRemoveResponse instance using the specified properties. * @function create - * @memberof vtctldata.SetKeyspaceDurabilityPolicyResponse + * @memberof vtctldata.ShardReplicationRemoveResponse * @static - * @param {vtctldata.ISetKeyspaceDurabilityPolicyResponse=} [properties] Properties to set - * @returns {vtctldata.SetKeyspaceDurabilityPolicyResponse} SetKeyspaceDurabilityPolicyResponse instance + * @param {vtctldata.IShardReplicationRemoveResponse=} [properties] Properties to set + * @returns {vtctldata.ShardReplicationRemoveResponse} ShardReplicationRemoveResponse instance */ - SetKeyspaceDurabilityPolicyResponse.create = function create(properties) { - return new SetKeyspaceDurabilityPolicyResponse(properties); + ShardReplicationRemoveResponse.create = function create(properties) { + return new ShardReplicationRemoveResponse(properties); }; /** - * Encodes the specified SetKeyspaceDurabilityPolicyResponse message. Does not implicitly {@link vtctldata.SetKeyspaceDurabilityPolicyResponse.verify|verify} messages. + * Encodes the specified ShardReplicationRemoveResponse message. Does not implicitly {@link vtctldata.ShardReplicationRemoveResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.SetKeyspaceDurabilityPolicyResponse + * @memberof vtctldata.ShardReplicationRemoveResponse * @static - * @param {vtctldata.ISetKeyspaceDurabilityPolicyResponse} message SetKeyspaceDurabilityPolicyResponse message or plain object to encode + * @param {vtctldata.IShardReplicationRemoveResponse} message ShardReplicationRemoveResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SetKeyspaceDurabilityPolicyResponse.encode = function encode(message, writer) { + ShardReplicationRemoveResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - $root.topodata.Keyspace.encode(message.keyspace, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified SetKeyspaceDurabilityPolicyResponse message, length delimited. Does not implicitly {@link vtctldata.SetKeyspaceDurabilityPolicyResponse.verify|verify} messages. + * Encodes the specified ShardReplicationRemoveResponse message, length delimited. Does not implicitly {@link vtctldata.ShardReplicationRemoveResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.SetKeyspaceDurabilityPolicyResponse + * @memberof vtctldata.ShardReplicationRemoveResponse * @static - * @param {vtctldata.ISetKeyspaceDurabilityPolicyResponse} message SetKeyspaceDurabilityPolicyResponse message or plain object to encode + * @param {vtctldata.IShardReplicationRemoveResponse} message ShardReplicationRemoveResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SetKeyspaceDurabilityPolicyResponse.encodeDelimited = function encodeDelimited(message, writer) { + ShardReplicationRemoveResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a SetKeyspaceDurabilityPolicyResponse message from the specified reader or buffer. + * Decodes a ShardReplicationRemoveResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.SetKeyspaceDurabilityPolicyResponse + * @memberof vtctldata.ShardReplicationRemoveResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.SetKeyspaceDurabilityPolicyResponse} SetKeyspaceDurabilityPolicyResponse + * @returns {vtctldata.ShardReplicationRemoveResponse} ShardReplicationRemoveResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SetKeyspaceDurabilityPolicyResponse.decode = function decode(reader, length) { + ShardReplicationRemoveResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SetKeyspaceDurabilityPolicyResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ShardReplicationRemoveResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - message.keyspace = $root.topodata.Keyspace.decode(reader, reader.uint32()); - break; - } default: reader.skipType(tag & 7); break; @@ -130289,132 +153037,110 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a SetKeyspaceDurabilityPolicyResponse message from the specified reader or buffer, length delimited. + * Decodes a ShardReplicationRemoveResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.SetKeyspaceDurabilityPolicyResponse + * @memberof vtctldata.ShardReplicationRemoveResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.SetKeyspaceDurabilityPolicyResponse} SetKeyspaceDurabilityPolicyResponse + * @returns {vtctldata.ShardReplicationRemoveResponse} ShardReplicationRemoveResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SetKeyspaceDurabilityPolicyResponse.decodeDelimited = function decodeDelimited(reader) { + ShardReplicationRemoveResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a SetKeyspaceDurabilityPolicyResponse message. + * Verifies a ShardReplicationRemoveResponse message. * @function verify - * @memberof vtctldata.SetKeyspaceDurabilityPolicyResponse + * @memberof vtctldata.ShardReplicationRemoveResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - SetKeyspaceDurabilityPolicyResponse.verify = function verify(message) { + ShardReplicationRemoveResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) { - let error = $root.topodata.Keyspace.verify(message.keyspace); - if (error) - return "keyspace." + error; - } return null; }; /** - * Creates a SetKeyspaceDurabilityPolicyResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ShardReplicationRemoveResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.SetKeyspaceDurabilityPolicyResponse + * @memberof vtctldata.ShardReplicationRemoveResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.SetKeyspaceDurabilityPolicyResponse} SetKeyspaceDurabilityPolicyResponse + * @returns {vtctldata.ShardReplicationRemoveResponse} ShardReplicationRemoveResponse */ - SetKeyspaceDurabilityPolicyResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.SetKeyspaceDurabilityPolicyResponse) + ShardReplicationRemoveResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ShardReplicationRemoveResponse) return object; - let message = new $root.vtctldata.SetKeyspaceDurabilityPolicyResponse(); - if (object.keyspace != null) { - if (typeof object.keyspace !== "object") - throw TypeError(".vtctldata.SetKeyspaceDurabilityPolicyResponse.keyspace: object expected"); - message.keyspace = $root.topodata.Keyspace.fromObject(object.keyspace); - } - return message; + return new $root.vtctldata.ShardReplicationRemoveResponse(); }; /** - * Creates a plain object from a SetKeyspaceDurabilityPolicyResponse message. Also converts values to other types if specified. + * Creates a plain object from a ShardReplicationRemoveResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.SetKeyspaceDurabilityPolicyResponse + * @memberof vtctldata.ShardReplicationRemoveResponse * @static - * @param {vtctldata.SetKeyspaceDurabilityPolicyResponse} message SetKeyspaceDurabilityPolicyResponse + * @param {vtctldata.ShardReplicationRemoveResponse} message ShardReplicationRemoveResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - SetKeyspaceDurabilityPolicyResponse.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) - object.keyspace = null; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = $root.topodata.Keyspace.toObject(message.keyspace, options); - return object; + ShardReplicationRemoveResponse.toObject = function toObject() { + return {}; }; /** - * Converts this SetKeyspaceDurabilityPolicyResponse to JSON. + * Converts this ShardReplicationRemoveResponse to JSON. * @function toJSON - * @memberof vtctldata.SetKeyspaceDurabilityPolicyResponse + * @memberof vtctldata.ShardReplicationRemoveResponse * @instance * @returns {Object.} JSON object */ - SetKeyspaceDurabilityPolicyResponse.prototype.toJSON = function toJSON() { + ShardReplicationRemoveResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for SetKeyspaceDurabilityPolicyResponse + * Gets the default type url for ShardReplicationRemoveResponse * @function getTypeUrl - * @memberof vtctldata.SetKeyspaceDurabilityPolicyResponse + * @memberof vtctldata.ShardReplicationRemoveResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - SetKeyspaceDurabilityPolicyResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ShardReplicationRemoveResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.SetKeyspaceDurabilityPolicyResponse"; + return typeUrlPrefix + "/vtctldata.ShardReplicationRemoveResponse"; }; - return SetKeyspaceDurabilityPolicyResponse; + return ShardReplicationRemoveResponse; })(); - vtctldata.SetKeyspaceServedFromRequest = (function() { + vtctldata.SleepTabletRequest = (function() { /** - * Properties of a SetKeyspaceServedFromRequest. + * Properties of a SleepTabletRequest. * @memberof vtctldata - * @interface ISetKeyspaceServedFromRequest - * @property {string|null} [keyspace] SetKeyspaceServedFromRequest keyspace - * @property {topodata.TabletType|null} [tablet_type] SetKeyspaceServedFromRequest tablet_type - * @property {Array.|null} [cells] SetKeyspaceServedFromRequest cells - * @property {boolean|null} [remove] SetKeyspaceServedFromRequest remove - * @property {string|null} [source_keyspace] SetKeyspaceServedFromRequest source_keyspace + * @interface ISleepTabletRequest + * @property {topodata.ITabletAlias|null} [tablet_alias] SleepTabletRequest tablet_alias + * @property {vttime.IDuration|null} [duration] SleepTabletRequest duration */ /** - * Constructs a new SetKeyspaceServedFromRequest. + * Constructs a new SleepTabletRequest. * @memberof vtctldata - * @classdesc Represents a SetKeyspaceServedFromRequest. - * @implements ISetKeyspaceServedFromRequest + * @classdesc Represents a SleepTabletRequest. + * @implements ISleepTabletRequest * @constructor - * @param {vtctldata.ISetKeyspaceServedFromRequest=} [properties] Properties to set + * @param {vtctldata.ISleepTabletRequest=} [properties] Properties to set */ - function SetKeyspaceServedFromRequest(properties) { - this.cells = []; + function SleepTabletRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -130422,134 +153148,89 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * SetKeyspaceServedFromRequest keyspace. - * @member {string} keyspace - * @memberof vtctldata.SetKeyspaceServedFromRequest - * @instance - */ - SetKeyspaceServedFromRequest.prototype.keyspace = ""; - - /** - * SetKeyspaceServedFromRequest tablet_type. - * @member {topodata.TabletType} tablet_type - * @memberof vtctldata.SetKeyspaceServedFromRequest - * @instance - */ - SetKeyspaceServedFromRequest.prototype.tablet_type = 0; - - /** - * SetKeyspaceServedFromRequest cells. - * @member {Array.} cells - * @memberof vtctldata.SetKeyspaceServedFromRequest - * @instance - */ - SetKeyspaceServedFromRequest.prototype.cells = $util.emptyArray; - - /** - * SetKeyspaceServedFromRequest remove. - * @member {boolean} remove - * @memberof vtctldata.SetKeyspaceServedFromRequest + * SleepTabletRequest tablet_alias. + * @member {topodata.ITabletAlias|null|undefined} tablet_alias + * @memberof vtctldata.SleepTabletRequest * @instance */ - SetKeyspaceServedFromRequest.prototype.remove = false; + SleepTabletRequest.prototype.tablet_alias = null; /** - * SetKeyspaceServedFromRequest source_keyspace. - * @member {string} source_keyspace - * @memberof vtctldata.SetKeyspaceServedFromRequest + * SleepTabletRequest duration. + * @member {vttime.IDuration|null|undefined} duration + * @memberof vtctldata.SleepTabletRequest * @instance */ - SetKeyspaceServedFromRequest.prototype.source_keyspace = ""; + SleepTabletRequest.prototype.duration = null; /** - * Creates a new SetKeyspaceServedFromRequest instance using the specified properties. + * Creates a new SleepTabletRequest instance using the specified properties. * @function create - * @memberof vtctldata.SetKeyspaceServedFromRequest + * @memberof vtctldata.SleepTabletRequest * @static - * @param {vtctldata.ISetKeyspaceServedFromRequest=} [properties] Properties to set - * @returns {vtctldata.SetKeyspaceServedFromRequest} SetKeyspaceServedFromRequest instance + * @param {vtctldata.ISleepTabletRequest=} [properties] Properties to set + * @returns {vtctldata.SleepTabletRequest} SleepTabletRequest instance */ - SetKeyspaceServedFromRequest.create = function create(properties) { - return new SetKeyspaceServedFromRequest(properties); + SleepTabletRequest.create = function create(properties) { + return new SleepTabletRequest(properties); }; /** - * Encodes the specified SetKeyspaceServedFromRequest message. Does not implicitly {@link vtctldata.SetKeyspaceServedFromRequest.verify|verify} messages. + * Encodes the specified SleepTabletRequest message. Does not implicitly {@link vtctldata.SleepTabletRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.SetKeyspaceServedFromRequest + * @memberof vtctldata.SleepTabletRequest * @static - * @param {vtctldata.ISetKeyspaceServedFromRequest} message SetKeyspaceServedFromRequest message or plain object to encode + * @param {vtctldata.ISleepTabletRequest} message SleepTabletRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SetKeyspaceServedFromRequest.encode = function encode(message, writer) { + SleepTabletRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.tablet_type != null && Object.hasOwnProperty.call(message, "tablet_type")) - writer.uint32(/* id 2, wireType 0 =*/16).int32(message.tablet_type); - if (message.cells != null && message.cells.length) - for (let i = 0; i < message.cells.length; ++i) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.cells[i]); - if (message.remove != null && Object.hasOwnProperty.call(message, "remove")) - writer.uint32(/* id 4, wireType 0 =*/32).bool(message.remove); - if (message.source_keyspace != null && Object.hasOwnProperty.call(message, "source_keyspace")) - writer.uint32(/* id 5, wireType 2 =*/42).string(message.source_keyspace); + if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) + $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.duration != null && Object.hasOwnProperty.call(message, "duration")) + $root.vttime.Duration.encode(message.duration, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); return writer; }; /** - * Encodes the specified SetKeyspaceServedFromRequest message, length delimited. Does not implicitly {@link vtctldata.SetKeyspaceServedFromRequest.verify|verify} messages. + * Encodes the specified SleepTabletRequest message, length delimited. Does not implicitly {@link vtctldata.SleepTabletRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.SetKeyspaceServedFromRequest + * @memberof vtctldata.SleepTabletRequest * @static - * @param {vtctldata.ISetKeyspaceServedFromRequest} message SetKeyspaceServedFromRequest message or plain object to encode + * @param {vtctldata.ISleepTabletRequest} message SleepTabletRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SetKeyspaceServedFromRequest.encodeDelimited = function encodeDelimited(message, writer) { + SleepTabletRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a SetKeyspaceServedFromRequest message from the specified reader or buffer. + * Decodes a SleepTabletRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.SetKeyspaceServedFromRequest + * @memberof vtctldata.SleepTabletRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.SetKeyspaceServedFromRequest} SetKeyspaceServedFromRequest + * @returns {vtctldata.SleepTabletRequest} SleepTabletRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SetKeyspaceServedFromRequest.decode = function decode(reader, length) { + SleepTabletRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SetKeyspaceServedFromRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SleepTabletRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.keyspace = reader.string(); + message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); break; } case 2: { - message.tablet_type = reader.int32(); - break; - } - case 3: { - if (!(message.cells && message.cells.length)) - message.cells = []; - message.cells.push(reader.string()); - break; - } - case 4: { - message.remove = reader.bool(); - break; - } - case 5: { - message.source_keyspace = reader.string(); + message.duration = $root.vttime.Duration.decode(reader, reader.uint32()); break; } default: @@ -130561,232 +153242,140 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a SetKeyspaceServedFromRequest message from the specified reader or buffer, length delimited. + * Decodes a SleepTabletRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.SetKeyspaceServedFromRequest + * @memberof vtctldata.SleepTabletRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.SetKeyspaceServedFromRequest} SetKeyspaceServedFromRequest + * @returns {vtctldata.SleepTabletRequest} SleepTabletRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SetKeyspaceServedFromRequest.decodeDelimited = function decodeDelimited(reader) { + SleepTabletRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a SetKeyspaceServedFromRequest message. + * Verifies a SleepTabletRequest message. * @function verify - * @memberof vtctldata.SetKeyspaceServedFromRequest + * @memberof vtctldata.SleepTabletRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - SetKeyspaceServedFromRequest.verify = function verify(message) { + SleepTabletRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - if (message.tablet_type != null && message.hasOwnProperty("tablet_type")) - switch (message.tablet_type) { - default: - return "tablet_type: enum value expected"; - case 0: - case 1: - case 1: - case 2: - case 3: - case 3: - case 4: - case 5: - case 6: - case 7: - case 8: - break; - } - if (message.cells != null && message.hasOwnProperty("cells")) { - if (!Array.isArray(message.cells)) - return "cells: array expected"; - for (let i = 0; i < message.cells.length; ++i) - if (!$util.isString(message.cells[i])) - return "cells: string[] expected"; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { + let error = $root.topodata.TabletAlias.verify(message.tablet_alias); + if (error) + return "tablet_alias." + error; + } + if (message.duration != null && message.hasOwnProperty("duration")) { + let error = $root.vttime.Duration.verify(message.duration); + if (error) + return "duration." + error; } - if (message.remove != null && message.hasOwnProperty("remove")) - if (typeof message.remove !== "boolean") - return "remove: boolean expected"; - if (message.source_keyspace != null && message.hasOwnProperty("source_keyspace")) - if (!$util.isString(message.source_keyspace)) - return "source_keyspace: string expected"; return null; }; /** - * Creates a SetKeyspaceServedFromRequest message from a plain object. Also converts values to their respective internal types. + * Creates a SleepTabletRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.SetKeyspaceServedFromRequest + * @memberof vtctldata.SleepTabletRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.SetKeyspaceServedFromRequest} SetKeyspaceServedFromRequest + * @returns {vtctldata.SleepTabletRequest} SleepTabletRequest */ - SetKeyspaceServedFromRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.SetKeyspaceServedFromRequest) + SleepTabletRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.SleepTabletRequest) return object; - let message = new $root.vtctldata.SetKeyspaceServedFromRequest(); - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - switch (object.tablet_type) { - default: - if (typeof object.tablet_type === "number") { - message.tablet_type = object.tablet_type; - break; - } - break; - case "UNKNOWN": - case 0: - message.tablet_type = 0; - break; - case "PRIMARY": - case 1: - message.tablet_type = 1; - break; - case "MASTER": - case 1: - message.tablet_type = 1; - break; - case "REPLICA": - case 2: - message.tablet_type = 2; - break; - case "RDONLY": - case 3: - message.tablet_type = 3; - break; - case "BATCH": - case 3: - message.tablet_type = 3; - break; - case "SPARE": - case 4: - message.tablet_type = 4; - break; - case "EXPERIMENTAL": - case 5: - message.tablet_type = 5; - break; - case "BACKUP": - case 6: - message.tablet_type = 6; - break; - case "RESTORE": - case 7: - message.tablet_type = 7; - break; - case "DRAINED": - case 8: - message.tablet_type = 8; - break; + let message = new $root.vtctldata.SleepTabletRequest(); + if (object.tablet_alias != null) { + if (typeof object.tablet_alias !== "object") + throw TypeError(".vtctldata.SleepTabletRequest.tablet_alias: object expected"); + message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); } - if (object.cells) { - if (!Array.isArray(object.cells)) - throw TypeError(".vtctldata.SetKeyspaceServedFromRequest.cells: array expected"); - message.cells = []; - for (let i = 0; i < object.cells.length; ++i) - message.cells[i] = String(object.cells[i]); + if (object.duration != null) { + if (typeof object.duration !== "object") + throw TypeError(".vtctldata.SleepTabletRequest.duration: object expected"); + message.duration = $root.vttime.Duration.fromObject(object.duration); } - if (object.remove != null) - message.remove = Boolean(object.remove); - if (object.source_keyspace != null) - message.source_keyspace = String(object.source_keyspace); return message; }; /** - * Creates a plain object from a SetKeyspaceServedFromRequest message. Also converts values to other types if specified. + * Creates a plain object from a SleepTabletRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.SetKeyspaceServedFromRequest + * @memberof vtctldata.SleepTabletRequest * @static - * @param {vtctldata.SetKeyspaceServedFromRequest} message SetKeyspaceServedFromRequest + * @param {vtctldata.SleepTabletRequest} message SleepTabletRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - SetKeyspaceServedFromRequest.toObject = function toObject(message, options) { + SleepTabletRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.cells = []; if (options.defaults) { - object.keyspace = ""; - object.tablet_type = options.enums === String ? "UNKNOWN" : 0; - object.remove = false; - object.source_keyspace = ""; - } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.tablet_type != null && message.hasOwnProperty("tablet_type")) - object.tablet_type = options.enums === String ? $root.topodata.TabletType[message.tablet_type] === undefined ? message.tablet_type : $root.topodata.TabletType[message.tablet_type] : message.tablet_type; - if (message.cells && message.cells.length) { - object.cells = []; - for (let j = 0; j < message.cells.length; ++j) - object.cells[j] = message.cells[j]; + object.tablet_alias = null; + object.duration = null; } - if (message.remove != null && message.hasOwnProperty("remove")) - object.remove = message.remove; - if (message.source_keyspace != null && message.hasOwnProperty("source_keyspace")) - object.source_keyspace = message.source_keyspace; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) + object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); + if (message.duration != null && message.hasOwnProperty("duration")) + object.duration = $root.vttime.Duration.toObject(message.duration, options); return object; }; /** - * Converts this SetKeyspaceServedFromRequest to JSON. + * Converts this SleepTabletRequest to JSON. * @function toJSON - * @memberof vtctldata.SetKeyspaceServedFromRequest + * @memberof vtctldata.SleepTabletRequest * @instance * @returns {Object.} JSON object */ - SetKeyspaceServedFromRequest.prototype.toJSON = function toJSON() { + SleepTabletRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for SetKeyspaceServedFromRequest + * Gets the default type url for SleepTabletRequest * @function getTypeUrl - * @memberof vtctldata.SetKeyspaceServedFromRequest + * @memberof vtctldata.SleepTabletRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - SetKeyspaceServedFromRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + SleepTabletRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.SetKeyspaceServedFromRequest"; + return typeUrlPrefix + "/vtctldata.SleepTabletRequest"; }; - return SetKeyspaceServedFromRequest; + return SleepTabletRequest; })(); - vtctldata.SetKeyspaceServedFromResponse = (function() { + vtctldata.SleepTabletResponse = (function() { /** - * Properties of a SetKeyspaceServedFromResponse. + * Properties of a SleepTabletResponse. * @memberof vtctldata - * @interface ISetKeyspaceServedFromResponse - * @property {topodata.IKeyspace|null} [keyspace] SetKeyspaceServedFromResponse keyspace + * @interface ISleepTabletResponse */ /** - * Constructs a new SetKeyspaceServedFromResponse. + * Constructs a new SleepTabletResponse. * @memberof vtctldata - * @classdesc Represents a SetKeyspaceServedFromResponse. - * @implements ISetKeyspaceServedFromResponse + * @classdesc Represents a SleepTabletResponse. + * @implements ISleepTabletResponse * @constructor - * @param {vtctldata.ISetKeyspaceServedFromResponse=} [properties] Properties to set + * @param {vtctldata.ISleepTabletResponse=} [properties] Properties to set */ - function SetKeyspaceServedFromResponse(properties) { + function SleepTabletResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -130794,77 +153383,63 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * SetKeyspaceServedFromResponse keyspace. - * @member {topodata.IKeyspace|null|undefined} keyspace - * @memberof vtctldata.SetKeyspaceServedFromResponse - * @instance - */ - SetKeyspaceServedFromResponse.prototype.keyspace = null; - - /** - * Creates a new SetKeyspaceServedFromResponse instance using the specified properties. + * Creates a new SleepTabletResponse instance using the specified properties. * @function create - * @memberof vtctldata.SetKeyspaceServedFromResponse + * @memberof vtctldata.SleepTabletResponse * @static - * @param {vtctldata.ISetKeyspaceServedFromResponse=} [properties] Properties to set - * @returns {vtctldata.SetKeyspaceServedFromResponse} SetKeyspaceServedFromResponse instance + * @param {vtctldata.ISleepTabletResponse=} [properties] Properties to set + * @returns {vtctldata.SleepTabletResponse} SleepTabletResponse instance */ - SetKeyspaceServedFromResponse.create = function create(properties) { - return new SetKeyspaceServedFromResponse(properties); + SleepTabletResponse.create = function create(properties) { + return new SleepTabletResponse(properties); }; /** - * Encodes the specified SetKeyspaceServedFromResponse message. Does not implicitly {@link vtctldata.SetKeyspaceServedFromResponse.verify|verify} messages. + * Encodes the specified SleepTabletResponse message. Does not implicitly {@link vtctldata.SleepTabletResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.SetKeyspaceServedFromResponse + * @memberof vtctldata.SleepTabletResponse * @static - * @param {vtctldata.ISetKeyspaceServedFromResponse} message SetKeyspaceServedFromResponse message or plain object to encode + * @param {vtctldata.ISleepTabletResponse} message SleepTabletResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SetKeyspaceServedFromResponse.encode = function encode(message, writer) { + SleepTabletResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - $root.topodata.Keyspace.encode(message.keyspace, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified SetKeyspaceServedFromResponse message, length delimited. Does not implicitly {@link vtctldata.SetKeyspaceServedFromResponse.verify|verify} messages. + * Encodes the specified SleepTabletResponse message, length delimited. Does not implicitly {@link vtctldata.SleepTabletResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.SetKeyspaceServedFromResponse + * @memberof vtctldata.SleepTabletResponse * @static - * @param {vtctldata.ISetKeyspaceServedFromResponse} message SetKeyspaceServedFromResponse message or plain object to encode + * @param {vtctldata.ISleepTabletResponse} message SleepTabletResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SetKeyspaceServedFromResponse.encodeDelimited = function encodeDelimited(message, writer) { + SleepTabletResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a SetKeyspaceServedFromResponse message from the specified reader or buffer. + * Decodes a SleepTabletResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.SetKeyspaceServedFromResponse + * @memberof vtctldata.SleepTabletResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.SetKeyspaceServedFromResponse} SetKeyspaceServedFromResponse + * @returns {vtctldata.SleepTabletResponse} SleepTabletResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SetKeyspaceServedFromResponse.decode = function decode(reader, length) { + SleepTabletResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SetKeyspaceServedFromResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SleepTabletResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - message.keyspace = $root.topodata.Keyspace.decode(reader, reader.uint32()); - break; - } default: reader.skipType(tag & 7); break; @@ -130874,128 +153449,116 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a SetKeyspaceServedFromResponse message from the specified reader or buffer, length delimited. + * Decodes a SleepTabletResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.SetKeyspaceServedFromResponse + * @memberof vtctldata.SleepTabletResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.SetKeyspaceServedFromResponse} SetKeyspaceServedFromResponse + * @returns {vtctldata.SleepTabletResponse} SleepTabletResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SetKeyspaceServedFromResponse.decodeDelimited = function decodeDelimited(reader) { + SleepTabletResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a SetKeyspaceServedFromResponse message. + * Verifies a SleepTabletResponse message. * @function verify - * @memberof vtctldata.SetKeyspaceServedFromResponse + * @memberof vtctldata.SleepTabletResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - SetKeyspaceServedFromResponse.verify = function verify(message) { + SleepTabletResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) { - let error = $root.topodata.Keyspace.verify(message.keyspace); - if (error) - return "keyspace." + error; - } return null; }; /** - * Creates a SetKeyspaceServedFromResponse message from a plain object. Also converts values to their respective internal types. + * Creates a SleepTabletResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.SetKeyspaceServedFromResponse + * @memberof vtctldata.SleepTabletResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.SetKeyspaceServedFromResponse} SetKeyspaceServedFromResponse + * @returns {vtctldata.SleepTabletResponse} SleepTabletResponse */ - SetKeyspaceServedFromResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.SetKeyspaceServedFromResponse) + SleepTabletResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.SleepTabletResponse) return object; - let message = new $root.vtctldata.SetKeyspaceServedFromResponse(); - if (object.keyspace != null) { - if (typeof object.keyspace !== "object") - throw TypeError(".vtctldata.SetKeyspaceServedFromResponse.keyspace: object expected"); - message.keyspace = $root.topodata.Keyspace.fromObject(object.keyspace); - } - return message; + return new $root.vtctldata.SleepTabletResponse(); }; /** - * Creates a plain object from a SetKeyspaceServedFromResponse message. Also converts values to other types if specified. + * Creates a plain object from a SleepTabletResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.SetKeyspaceServedFromResponse - * @static - * @param {vtctldata.SetKeyspaceServedFromResponse} message SetKeyspaceServedFromResponse - * @param {$protobuf.IConversionOptions} [options] Conversion options - * @returns {Object.} Plain object - */ - SetKeyspaceServedFromResponse.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) - object.keyspace = null; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = $root.topodata.Keyspace.toObject(message.keyspace, options); - return object; + * @memberof vtctldata.SleepTabletResponse + * @static + * @param {vtctldata.SleepTabletResponse} message SleepTabletResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + SleepTabletResponse.toObject = function toObject() { + return {}; }; /** - * Converts this SetKeyspaceServedFromResponse to JSON. + * Converts this SleepTabletResponse to JSON. * @function toJSON - * @memberof vtctldata.SetKeyspaceServedFromResponse + * @memberof vtctldata.SleepTabletResponse * @instance * @returns {Object.} JSON object */ - SetKeyspaceServedFromResponse.prototype.toJSON = function toJSON() { + SleepTabletResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for SetKeyspaceServedFromResponse + * Gets the default type url for SleepTabletResponse * @function getTypeUrl - * @memberof vtctldata.SetKeyspaceServedFromResponse + * @memberof vtctldata.SleepTabletResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - SetKeyspaceServedFromResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + SleepTabletResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.SetKeyspaceServedFromResponse"; + return typeUrlPrefix + "/vtctldata.SleepTabletResponse"; }; - return SetKeyspaceServedFromResponse; + return SleepTabletResponse; })(); - vtctldata.SetKeyspaceShardingInfoRequest = (function() { + vtctldata.SourceShardAddRequest = (function() { /** - * Properties of a SetKeyspaceShardingInfoRequest. + * Properties of a SourceShardAddRequest. * @memberof vtctldata - * @interface ISetKeyspaceShardingInfoRequest - * @property {string|null} [keyspace] SetKeyspaceShardingInfoRequest keyspace - * @property {boolean|null} [force] SetKeyspaceShardingInfoRequest force + * @interface ISourceShardAddRequest + * @property {string|null} [keyspace] SourceShardAddRequest keyspace + * @property {string|null} [shard] SourceShardAddRequest shard + * @property {number|null} [uid] SourceShardAddRequest uid + * @property {string|null} [source_keyspace] SourceShardAddRequest source_keyspace + * @property {string|null} [source_shard] SourceShardAddRequest source_shard + * @property {topodata.IKeyRange|null} [key_range] SourceShardAddRequest key_range + * @property {Array.|null} [tables] SourceShardAddRequest tables */ /** - * Constructs a new SetKeyspaceShardingInfoRequest. + * Constructs a new SourceShardAddRequest. * @memberof vtctldata - * @classdesc Represents a SetKeyspaceShardingInfoRequest. - * @implements ISetKeyspaceShardingInfoRequest + * @classdesc Represents a SourceShardAddRequest. + * @implements ISourceShardAddRequest * @constructor - * @param {vtctldata.ISetKeyspaceShardingInfoRequest=} [properties] Properties to set + * @param {vtctldata.ISourceShardAddRequest=} [properties] Properties to set */ - function SetKeyspaceShardingInfoRequest(properties) { + function SourceShardAddRequest(properties) { + this.tables = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -131003,80 +153566,131 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * SetKeyspaceShardingInfoRequest keyspace. + * SourceShardAddRequest keyspace. * @member {string} keyspace - * @memberof vtctldata.SetKeyspaceShardingInfoRequest + * @memberof vtctldata.SourceShardAddRequest * @instance */ - SetKeyspaceShardingInfoRequest.prototype.keyspace = ""; + SourceShardAddRequest.prototype.keyspace = ""; /** - * SetKeyspaceShardingInfoRequest force. - * @member {boolean} force - * @memberof vtctldata.SetKeyspaceShardingInfoRequest + * SourceShardAddRequest shard. + * @member {string} shard + * @memberof vtctldata.SourceShardAddRequest * @instance */ - SetKeyspaceShardingInfoRequest.prototype.force = false; + SourceShardAddRequest.prototype.shard = ""; /** - * Creates a new SetKeyspaceShardingInfoRequest instance using the specified properties. + * SourceShardAddRequest uid. + * @member {number} uid + * @memberof vtctldata.SourceShardAddRequest + * @instance + */ + SourceShardAddRequest.prototype.uid = 0; + + /** + * SourceShardAddRequest source_keyspace. + * @member {string} source_keyspace + * @memberof vtctldata.SourceShardAddRequest + * @instance + */ + SourceShardAddRequest.prototype.source_keyspace = ""; + + /** + * SourceShardAddRequest source_shard. + * @member {string} source_shard + * @memberof vtctldata.SourceShardAddRequest + * @instance + */ + SourceShardAddRequest.prototype.source_shard = ""; + + /** + * SourceShardAddRequest key_range. + * @member {topodata.IKeyRange|null|undefined} key_range + * @memberof vtctldata.SourceShardAddRequest + * @instance + */ + SourceShardAddRequest.prototype.key_range = null; + + /** + * SourceShardAddRequest tables. + * @member {Array.} tables + * @memberof vtctldata.SourceShardAddRequest + * @instance + */ + SourceShardAddRequest.prototype.tables = $util.emptyArray; + + /** + * Creates a new SourceShardAddRequest instance using the specified properties. * @function create - * @memberof vtctldata.SetKeyspaceShardingInfoRequest + * @memberof vtctldata.SourceShardAddRequest * @static - * @param {vtctldata.ISetKeyspaceShardingInfoRequest=} [properties] Properties to set - * @returns {vtctldata.SetKeyspaceShardingInfoRequest} SetKeyspaceShardingInfoRequest instance + * @param {vtctldata.ISourceShardAddRequest=} [properties] Properties to set + * @returns {vtctldata.SourceShardAddRequest} SourceShardAddRequest instance */ - SetKeyspaceShardingInfoRequest.create = function create(properties) { - return new SetKeyspaceShardingInfoRequest(properties); + SourceShardAddRequest.create = function create(properties) { + return new SourceShardAddRequest(properties); }; /** - * Encodes the specified SetKeyspaceShardingInfoRequest message. Does not implicitly {@link vtctldata.SetKeyspaceShardingInfoRequest.verify|verify} messages. + * Encodes the specified SourceShardAddRequest message. Does not implicitly {@link vtctldata.SourceShardAddRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.SetKeyspaceShardingInfoRequest + * @memberof vtctldata.SourceShardAddRequest * @static - * @param {vtctldata.ISetKeyspaceShardingInfoRequest} message SetKeyspaceShardingInfoRequest message or plain object to encode + * @param {vtctldata.ISourceShardAddRequest} message SourceShardAddRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SetKeyspaceShardingInfoRequest.encode = function encode(message, writer) { + SourceShardAddRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.force != null && Object.hasOwnProperty.call(message, "force")) - writer.uint32(/* id 4, wireType 0 =*/32).bool(message.force); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); + if (message.uid != null && Object.hasOwnProperty.call(message, "uid")) + writer.uint32(/* id 3, wireType 0 =*/24).int32(message.uid); + if (message.source_keyspace != null && Object.hasOwnProperty.call(message, "source_keyspace")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.source_keyspace); + if (message.source_shard != null && Object.hasOwnProperty.call(message, "source_shard")) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.source_shard); + if (message.key_range != null && Object.hasOwnProperty.call(message, "key_range")) + $root.topodata.KeyRange.encode(message.key_range, writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); + if (message.tables != null && message.tables.length) + for (let i = 0; i < message.tables.length; ++i) + writer.uint32(/* id 7, wireType 2 =*/58).string(message.tables[i]); return writer; }; /** - * Encodes the specified SetKeyspaceShardingInfoRequest message, length delimited. Does not implicitly {@link vtctldata.SetKeyspaceShardingInfoRequest.verify|verify} messages. + * Encodes the specified SourceShardAddRequest message, length delimited. Does not implicitly {@link vtctldata.SourceShardAddRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.SetKeyspaceShardingInfoRequest + * @memberof vtctldata.SourceShardAddRequest * @static - * @param {vtctldata.ISetKeyspaceShardingInfoRequest} message SetKeyspaceShardingInfoRequest message or plain object to encode + * @param {vtctldata.ISourceShardAddRequest} message SourceShardAddRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SetKeyspaceShardingInfoRequest.encodeDelimited = function encodeDelimited(message, writer) { + SourceShardAddRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a SetKeyspaceShardingInfoRequest message from the specified reader or buffer. + * Decodes a SourceShardAddRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.SetKeyspaceShardingInfoRequest + * @memberof vtctldata.SourceShardAddRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.SetKeyspaceShardingInfoRequest} SetKeyspaceShardingInfoRequest + * @returns {vtctldata.SourceShardAddRequest} SourceShardAddRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SetKeyspaceShardingInfoRequest.decode = function decode(reader, length) { + SourceShardAddRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SetKeyspaceShardingInfoRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SourceShardAddRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -131084,8 +153698,30 @@ export const vtctldata = $root.vtctldata = (() => { message.keyspace = reader.string(); break; } + case 2: { + message.shard = reader.string(); + break; + } + case 3: { + message.uid = reader.int32(); + break; + } case 4: { - message.force = reader.bool(); + message.source_keyspace = reader.string(); + break; + } + case 5: { + message.source_shard = reader.string(); + break; + } + case 6: { + message.key_range = $root.topodata.KeyRange.decode(reader, reader.uint32()); + break; + } + case 7: { + if (!(message.tables && message.tables.length)) + message.tables = []; + message.tables.push(reader.string()); break; } default: @@ -131097,131 +153733,189 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a SetKeyspaceShardingInfoRequest message from the specified reader or buffer, length delimited. + * Decodes a SourceShardAddRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.SetKeyspaceShardingInfoRequest + * @memberof vtctldata.SourceShardAddRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.SetKeyspaceShardingInfoRequest} SetKeyspaceShardingInfoRequest + * @returns {vtctldata.SourceShardAddRequest} SourceShardAddRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SetKeyspaceShardingInfoRequest.decodeDelimited = function decodeDelimited(reader) { + SourceShardAddRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a SetKeyspaceShardingInfoRequest message. + * Verifies a SourceShardAddRequest message. * @function verify - * @memberof vtctldata.SetKeyspaceShardingInfoRequest + * @memberof vtctldata.SourceShardAddRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - SetKeyspaceShardingInfoRequest.verify = function verify(message) { + SourceShardAddRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.keyspace != null && message.hasOwnProperty("keyspace")) if (!$util.isString(message.keyspace)) return "keyspace: string expected"; - if (message.force != null && message.hasOwnProperty("force")) - if (typeof message.force !== "boolean") - return "force: boolean expected"; + if (message.shard != null && message.hasOwnProperty("shard")) + if (!$util.isString(message.shard)) + return "shard: string expected"; + if (message.uid != null && message.hasOwnProperty("uid")) + if (!$util.isInteger(message.uid)) + return "uid: integer expected"; + if (message.source_keyspace != null && message.hasOwnProperty("source_keyspace")) + if (!$util.isString(message.source_keyspace)) + return "source_keyspace: string expected"; + if (message.source_shard != null && message.hasOwnProperty("source_shard")) + if (!$util.isString(message.source_shard)) + return "source_shard: string expected"; + if (message.key_range != null && message.hasOwnProperty("key_range")) { + let error = $root.topodata.KeyRange.verify(message.key_range); + if (error) + return "key_range." + error; + } + if (message.tables != null && message.hasOwnProperty("tables")) { + if (!Array.isArray(message.tables)) + return "tables: array expected"; + for (let i = 0; i < message.tables.length; ++i) + if (!$util.isString(message.tables[i])) + return "tables: string[] expected"; + } return null; }; /** - * Creates a SetKeyspaceShardingInfoRequest message from a plain object. Also converts values to their respective internal types. + * Creates a SourceShardAddRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.SetKeyspaceShardingInfoRequest + * @memberof vtctldata.SourceShardAddRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.SetKeyspaceShardingInfoRequest} SetKeyspaceShardingInfoRequest + * @returns {vtctldata.SourceShardAddRequest} SourceShardAddRequest */ - SetKeyspaceShardingInfoRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.SetKeyspaceShardingInfoRequest) + SourceShardAddRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.SourceShardAddRequest) return object; - let message = new $root.vtctldata.SetKeyspaceShardingInfoRequest(); + let message = new $root.vtctldata.SourceShardAddRequest(); if (object.keyspace != null) message.keyspace = String(object.keyspace); - if (object.force != null) - message.force = Boolean(object.force); + if (object.shard != null) + message.shard = String(object.shard); + if (object.uid != null) + message.uid = object.uid | 0; + if (object.source_keyspace != null) + message.source_keyspace = String(object.source_keyspace); + if (object.source_shard != null) + message.source_shard = String(object.source_shard); + if (object.key_range != null) { + if (typeof object.key_range !== "object") + throw TypeError(".vtctldata.SourceShardAddRequest.key_range: object expected"); + message.key_range = $root.topodata.KeyRange.fromObject(object.key_range); + } + if (object.tables) { + if (!Array.isArray(object.tables)) + throw TypeError(".vtctldata.SourceShardAddRequest.tables: array expected"); + message.tables = []; + for (let i = 0; i < object.tables.length; ++i) + message.tables[i] = String(object.tables[i]); + } return message; }; /** - * Creates a plain object from a SetKeyspaceShardingInfoRequest message. Also converts values to other types if specified. + * Creates a plain object from a SourceShardAddRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.SetKeyspaceShardingInfoRequest + * @memberof vtctldata.SourceShardAddRequest * @static - * @param {vtctldata.SetKeyspaceShardingInfoRequest} message SetKeyspaceShardingInfoRequest + * @param {vtctldata.SourceShardAddRequest} message SourceShardAddRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - SetKeyspaceShardingInfoRequest.toObject = function toObject(message, options) { + SourceShardAddRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; + if (options.arrays || options.defaults) + object.tables = []; if (options.defaults) { object.keyspace = ""; - object.force = false; + object.shard = ""; + object.uid = 0; + object.source_keyspace = ""; + object.source_shard = ""; + object.key_range = null; } if (message.keyspace != null && message.hasOwnProperty("keyspace")) object.keyspace = message.keyspace; - if (message.force != null && message.hasOwnProperty("force")) - object.force = message.force; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = message.shard; + if (message.uid != null && message.hasOwnProperty("uid")) + object.uid = message.uid; + if (message.source_keyspace != null && message.hasOwnProperty("source_keyspace")) + object.source_keyspace = message.source_keyspace; + if (message.source_shard != null && message.hasOwnProperty("source_shard")) + object.source_shard = message.source_shard; + if (message.key_range != null && message.hasOwnProperty("key_range")) + object.key_range = $root.topodata.KeyRange.toObject(message.key_range, options); + if (message.tables && message.tables.length) { + object.tables = []; + for (let j = 0; j < message.tables.length; ++j) + object.tables[j] = message.tables[j]; + } return object; }; /** - * Converts this SetKeyspaceShardingInfoRequest to JSON. + * Converts this SourceShardAddRequest to JSON. * @function toJSON - * @memberof vtctldata.SetKeyspaceShardingInfoRequest + * @memberof vtctldata.SourceShardAddRequest * @instance * @returns {Object.} JSON object */ - SetKeyspaceShardingInfoRequest.prototype.toJSON = function toJSON() { + SourceShardAddRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for SetKeyspaceShardingInfoRequest + * Gets the default type url for SourceShardAddRequest * @function getTypeUrl - * @memberof vtctldata.SetKeyspaceShardingInfoRequest + * @memberof vtctldata.SourceShardAddRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - SetKeyspaceShardingInfoRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + SourceShardAddRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.SetKeyspaceShardingInfoRequest"; + return typeUrlPrefix + "/vtctldata.SourceShardAddRequest"; }; - return SetKeyspaceShardingInfoRequest; + return SourceShardAddRequest; })(); - vtctldata.SetKeyspaceShardingInfoResponse = (function() { + vtctldata.SourceShardAddResponse = (function() { /** - * Properties of a SetKeyspaceShardingInfoResponse. + * Properties of a SourceShardAddResponse. * @memberof vtctldata - * @interface ISetKeyspaceShardingInfoResponse - * @property {topodata.IKeyspace|null} [keyspace] SetKeyspaceShardingInfoResponse keyspace + * @interface ISourceShardAddResponse + * @property {topodata.IShard|null} [shard] SourceShardAddResponse shard */ /** - * Constructs a new SetKeyspaceShardingInfoResponse. + * Constructs a new SourceShardAddResponse. * @memberof vtctldata - * @classdesc Represents a SetKeyspaceShardingInfoResponse. - * @implements ISetKeyspaceShardingInfoResponse + * @classdesc Represents a SourceShardAddResponse. + * @implements ISourceShardAddResponse * @constructor - * @param {vtctldata.ISetKeyspaceShardingInfoResponse=} [properties] Properties to set + * @param {vtctldata.ISourceShardAddResponse=} [properties] Properties to set */ - function SetKeyspaceShardingInfoResponse(properties) { + function SourceShardAddResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -131229,75 +153923,75 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * SetKeyspaceShardingInfoResponse keyspace. - * @member {topodata.IKeyspace|null|undefined} keyspace - * @memberof vtctldata.SetKeyspaceShardingInfoResponse + * SourceShardAddResponse shard. + * @member {topodata.IShard|null|undefined} shard + * @memberof vtctldata.SourceShardAddResponse * @instance */ - SetKeyspaceShardingInfoResponse.prototype.keyspace = null; + SourceShardAddResponse.prototype.shard = null; /** - * Creates a new SetKeyspaceShardingInfoResponse instance using the specified properties. + * Creates a new SourceShardAddResponse instance using the specified properties. * @function create - * @memberof vtctldata.SetKeyspaceShardingInfoResponse + * @memberof vtctldata.SourceShardAddResponse * @static - * @param {vtctldata.ISetKeyspaceShardingInfoResponse=} [properties] Properties to set - * @returns {vtctldata.SetKeyspaceShardingInfoResponse} SetKeyspaceShardingInfoResponse instance + * @param {vtctldata.ISourceShardAddResponse=} [properties] Properties to set + * @returns {vtctldata.SourceShardAddResponse} SourceShardAddResponse instance */ - SetKeyspaceShardingInfoResponse.create = function create(properties) { - return new SetKeyspaceShardingInfoResponse(properties); + SourceShardAddResponse.create = function create(properties) { + return new SourceShardAddResponse(properties); }; /** - * Encodes the specified SetKeyspaceShardingInfoResponse message. Does not implicitly {@link vtctldata.SetKeyspaceShardingInfoResponse.verify|verify} messages. + * Encodes the specified SourceShardAddResponse message. Does not implicitly {@link vtctldata.SourceShardAddResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.SetKeyspaceShardingInfoResponse + * @memberof vtctldata.SourceShardAddResponse * @static - * @param {vtctldata.ISetKeyspaceShardingInfoResponse} message SetKeyspaceShardingInfoResponse message or plain object to encode + * @param {vtctldata.ISourceShardAddResponse} message SourceShardAddResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SetKeyspaceShardingInfoResponse.encode = function encode(message, writer) { + SourceShardAddResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - $root.topodata.Keyspace.encode(message.keyspace, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + $root.topodata.Shard.encode(message.shard, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified SetKeyspaceShardingInfoResponse message, length delimited. Does not implicitly {@link vtctldata.SetKeyspaceShardingInfoResponse.verify|verify} messages. + * Encodes the specified SourceShardAddResponse message, length delimited. Does not implicitly {@link vtctldata.SourceShardAddResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.SetKeyspaceShardingInfoResponse + * @memberof vtctldata.SourceShardAddResponse * @static - * @param {vtctldata.ISetKeyspaceShardingInfoResponse} message SetKeyspaceShardingInfoResponse message or plain object to encode + * @param {vtctldata.ISourceShardAddResponse} message SourceShardAddResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SetKeyspaceShardingInfoResponse.encodeDelimited = function encodeDelimited(message, writer) { + SourceShardAddResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a SetKeyspaceShardingInfoResponse message from the specified reader or buffer. + * Decodes a SourceShardAddResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.SetKeyspaceShardingInfoResponse + * @memberof vtctldata.SourceShardAddResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.SetKeyspaceShardingInfoResponse} SetKeyspaceShardingInfoResponse + * @returns {vtctldata.SourceShardAddResponse} SourceShardAddResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SetKeyspaceShardingInfoResponse.decode = function decode(reader, length) { + SourceShardAddResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SetKeyspaceShardingInfoResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SourceShardAddResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.keyspace = $root.topodata.Keyspace.decode(reader, reader.uint32()); + message.shard = $root.topodata.Shard.decode(reader, reader.uint32()); break; } default: @@ -131309,129 +154003,129 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a SetKeyspaceShardingInfoResponse message from the specified reader or buffer, length delimited. + * Decodes a SourceShardAddResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.SetKeyspaceShardingInfoResponse + * @memberof vtctldata.SourceShardAddResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.SetKeyspaceShardingInfoResponse} SetKeyspaceShardingInfoResponse + * @returns {vtctldata.SourceShardAddResponse} SourceShardAddResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SetKeyspaceShardingInfoResponse.decodeDelimited = function decodeDelimited(reader) { + SourceShardAddResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a SetKeyspaceShardingInfoResponse message. + * Verifies a SourceShardAddResponse message. * @function verify - * @memberof vtctldata.SetKeyspaceShardingInfoResponse + * @memberof vtctldata.SourceShardAddResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - SetKeyspaceShardingInfoResponse.verify = function verify(message) { + SourceShardAddResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) { - let error = $root.topodata.Keyspace.verify(message.keyspace); + if (message.shard != null && message.hasOwnProperty("shard")) { + let error = $root.topodata.Shard.verify(message.shard); if (error) - return "keyspace." + error; + return "shard." + error; } return null; }; /** - * Creates a SetKeyspaceShardingInfoResponse message from a plain object. Also converts values to their respective internal types. + * Creates a SourceShardAddResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.SetKeyspaceShardingInfoResponse + * @memberof vtctldata.SourceShardAddResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.SetKeyspaceShardingInfoResponse} SetKeyspaceShardingInfoResponse + * @returns {vtctldata.SourceShardAddResponse} SourceShardAddResponse */ - SetKeyspaceShardingInfoResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.SetKeyspaceShardingInfoResponse) + SourceShardAddResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.SourceShardAddResponse) return object; - let message = new $root.vtctldata.SetKeyspaceShardingInfoResponse(); - if (object.keyspace != null) { - if (typeof object.keyspace !== "object") - throw TypeError(".vtctldata.SetKeyspaceShardingInfoResponse.keyspace: object expected"); - message.keyspace = $root.topodata.Keyspace.fromObject(object.keyspace); + let message = new $root.vtctldata.SourceShardAddResponse(); + if (object.shard != null) { + if (typeof object.shard !== "object") + throw TypeError(".vtctldata.SourceShardAddResponse.shard: object expected"); + message.shard = $root.topodata.Shard.fromObject(object.shard); } return message; }; /** - * Creates a plain object from a SetKeyspaceShardingInfoResponse message. Also converts values to other types if specified. + * Creates a plain object from a SourceShardAddResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.SetKeyspaceShardingInfoResponse + * @memberof vtctldata.SourceShardAddResponse * @static - * @param {vtctldata.SetKeyspaceShardingInfoResponse} message SetKeyspaceShardingInfoResponse + * @param {vtctldata.SourceShardAddResponse} message SourceShardAddResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - SetKeyspaceShardingInfoResponse.toObject = function toObject(message, options) { + SourceShardAddResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) - object.keyspace = null; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = $root.topodata.Keyspace.toObject(message.keyspace, options); + object.shard = null; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = $root.topodata.Shard.toObject(message.shard, options); return object; }; /** - * Converts this SetKeyspaceShardingInfoResponse to JSON. + * Converts this SourceShardAddResponse to JSON. * @function toJSON - * @memberof vtctldata.SetKeyspaceShardingInfoResponse + * @memberof vtctldata.SourceShardAddResponse * @instance * @returns {Object.} JSON object */ - SetKeyspaceShardingInfoResponse.prototype.toJSON = function toJSON() { + SourceShardAddResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for SetKeyspaceShardingInfoResponse + * Gets the default type url for SourceShardAddResponse * @function getTypeUrl - * @memberof vtctldata.SetKeyspaceShardingInfoResponse + * @memberof vtctldata.SourceShardAddResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - SetKeyspaceShardingInfoResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + SourceShardAddResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.SetKeyspaceShardingInfoResponse"; + return typeUrlPrefix + "/vtctldata.SourceShardAddResponse"; }; - return SetKeyspaceShardingInfoResponse; + return SourceShardAddResponse; })(); - vtctldata.SetShardIsPrimaryServingRequest = (function() { + vtctldata.SourceShardDeleteRequest = (function() { /** - * Properties of a SetShardIsPrimaryServingRequest. + * Properties of a SourceShardDeleteRequest. * @memberof vtctldata - * @interface ISetShardIsPrimaryServingRequest - * @property {string|null} [keyspace] SetShardIsPrimaryServingRequest keyspace - * @property {string|null} [shard] SetShardIsPrimaryServingRequest shard - * @property {boolean|null} [is_serving] SetShardIsPrimaryServingRequest is_serving + * @interface ISourceShardDeleteRequest + * @property {string|null} [keyspace] SourceShardDeleteRequest keyspace + * @property {string|null} [shard] SourceShardDeleteRequest shard + * @property {number|null} [uid] SourceShardDeleteRequest uid */ /** - * Constructs a new SetShardIsPrimaryServingRequest. + * Constructs a new SourceShardDeleteRequest. * @memberof vtctldata - * @classdesc Represents a SetShardIsPrimaryServingRequest. - * @implements ISetShardIsPrimaryServingRequest + * @classdesc Represents a SourceShardDeleteRequest. + * @implements ISourceShardDeleteRequest * @constructor - * @param {vtctldata.ISetShardIsPrimaryServingRequest=} [properties] Properties to set + * @param {vtctldata.ISourceShardDeleteRequest=} [properties] Properties to set */ - function SetShardIsPrimaryServingRequest(properties) { + function SourceShardDeleteRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -131439,90 +154133,90 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * SetShardIsPrimaryServingRequest keyspace. + * SourceShardDeleteRequest keyspace. * @member {string} keyspace - * @memberof vtctldata.SetShardIsPrimaryServingRequest + * @memberof vtctldata.SourceShardDeleteRequest * @instance */ - SetShardIsPrimaryServingRequest.prototype.keyspace = ""; + SourceShardDeleteRequest.prototype.keyspace = ""; /** - * SetShardIsPrimaryServingRequest shard. + * SourceShardDeleteRequest shard. * @member {string} shard - * @memberof vtctldata.SetShardIsPrimaryServingRequest + * @memberof vtctldata.SourceShardDeleteRequest * @instance */ - SetShardIsPrimaryServingRequest.prototype.shard = ""; + SourceShardDeleteRequest.prototype.shard = ""; /** - * SetShardIsPrimaryServingRequest is_serving. - * @member {boolean} is_serving - * @memberof vtctldata.SetShardIsPrimaryServingRequest + * SourceShardDeleteRequest uid. + * @member {number} uid + * @memberof vtctldata.SourceShardDeleteRequest * @instance */ - SetShardIsPrimaryServingRequest.prototype.is_serving = false; + SourceShardDeleteRequest.prototype.uid = 0; /** - * Creates a new SetShardIsPrimaryServingRequest instance using the specified properties. + * Creates a new SourceShardDeleteRequest instance using the specified properties. * @function create - * @memberof vtctldata.SetShardIsPrimaryServingRequest + * @memberof vtctldata.SourceShardDeleteRequest * @static - * @param {vtctldata.ISetShardIsPrimaryServingRequest=} [properties] Properties to set - * @returns {vtctldata.SetShardIsPrimaryServingRequest} SetShardIsPrimaryServingRequest instance + * @param {vtctldata.ISourceShardDeleteRequest=} [properties] Properties to set + * @returns {vtctldata.SourceShardDeleteRequest} SourceShardDeleteRequest instance */ - SetShardIsPrimaryServingRequest.create = function create(properties) { - return new SetShardIsPrimaryServingRequest(properties); + SourceShardDeleteRequest.create = function create(properties) { + return new SourceShardDeleteRequest(properties); }; /** - * Encodes the specified SetShardIsPrimaryServingRequest message. Does not implicitly {@link vtctldata.SetShardIsPrimaryServingRequest.verify|verify} messages. + * Encodes the specified SourceShardDeleteRequest message. Does not implicitly {@link vtctldata.SourceShardDeleteRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.SetShardIsPrimaryServingRequest + * @memberof vtctldata.SourceShardDeleteRequest * @static - * @param {vtctldata.ISetShardIsPrimaryServingRequest} message SetShardIsPrimaryServingRequest message or plain object to encode + * @param {vtctldata.ISourceShardDeleteRequest} message SourceShardDeleteRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SetShardIsPrimaryServingRequest.encode = function encode(message, writer) { + SourceShardDeleteRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); - if (message.is_serving != null && Object.hasOwnProperty.call(message, "is_serving")) - writer.uint32(/* id 3, wireType 0 =*/24).bool(message.is_serving); + if (message.uid != null && Object.hasOwnProperty.call(message, "uid")) + writer.uint32(/* id 3, wireType 0 =*/24).int32(message.uid); return writer; }; /** - * Encodes the specified SetShardIsPrimaryServingRequest message, length delimited. Does not implicitly {@link vtctldata.SetShardIsPrimaryServingRequest.verify|verify} messages. + * Encodes the specified SourceShardDeleteRequest message, length delimited. Does not implicitly {@link vtctldata.SourceShardDeleteRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.SetShardIsPrimaryServingRequest + * @memberof vtctldata.SourceShardDeleteRequest * @static - * @param {vtctldata.ISetShardIsPrimaryServingRequest} message SetShardIsPrimaryServingRequest message or plain object to encode + * @param {vtctldata.ISourceShardDeleteRequest} message SourceShardDeleteRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SetShardIsPrimaryServingRequest.encodeDelimited = function encodeDelimited(message, writer) { + SourceShardDeleteRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a SetShardIsPrimaryServingRequest message from the specified reader or buffer. + * Decodes a SourceShardDeleteRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.SetShardIsPrimaryServingRequest + * @memberof vtctldata.SourceShardDeleteRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.SetShardIsPrimaryServingRequest} SetShardIsPrimaryServingRequest + * @returns {vtctldata.SourceShardDeleteRequest} SourceShardDeleteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SetShardIsPrimaryServingRequest.decode = function decode(reader, length) { + SourceShardDeleteRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SetShardIsPrimaryServingRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SourceShardDeleteRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -131535,7 +154229,7 @@ export const vtctldata = $root.vtctldata = (() => { break; } case 3: { - message.is_serving = reader.bool(); + message.uid = reader.int32(); break; } default: @@ -131547,30 +154241,30 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a SetShardIsPrimaryServingRequest message from the specified reader or buffer, length delimited. + * Decodes a SourceShardDeleteRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.SetShardIsPrimaryServingRequest + * @memberof vtctldata.SourceShardDeleteRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.SetShardIsPrimaryServingRequest} SetShardIsPrimaryServingRequest + * @returns {vtctldata.SourceShardDeleteRequest} SourceShardDeleteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SetShardIsPrimaryServingRequest.decodeDelimited = function decodeDelimited(reader) { + SourceShardDeleteRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a SetShardIsPrimaryServingRequest message. + * Verifies a SourceShardDeleteRequest message. * @function verify - * @memberof vtctldata.SetShardIsPrimaryServingRequest + * @memberof vtctldata.SourceShardDeleteRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - SetShardIsPrimaryServingRequest.verify = function verify(message) { + SourceShardDeleteRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.keyspace != null && message.hasOwnProperty("keyspace")) @@ -131579,107 +154273,107 @@ export const vtctldata = $root.vtctldata = (() => { if (message.shard != null && message.hasOwnProperty("shard")) if (!$util.isString(message.shard)) return "shard: string expected"; - if (message.is_serving != null && message.hasOwnProperty("is_serving")) - if (typeof message.is_serving !== "boolean") - return "is_serving: boolean expected"; + if (message.uid != null && message.hasOwnProperty("uid")) + if (!$util.isInteger(message.uid)) + return "uid: integer expected"; return null; }; /** - * Creates a SetShardIsPrimaryServingRequest message from a plain object. Also converts values to their respective internal types. + * Creates a SourceShardDeleteRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.SetShardIsPrimaryServingRequest + * @memberof vtctldata.SourceShardDeleteRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.SetShardIsPrimaryServingRequest} SetShardIsPrimaryServingRequest + * @returns {vtctldata.SourceShardDeleteRequest} SourceShardDeleteRequest */ - SetShardIsPrimaryServingRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.SetShardIsPrimaryServingRequest) + SourceShardDeleteRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.SourceShardDeleteRequest) return object; - let message = new $root.vtctldata.SetShardIsPrimaryServingRequest(); + let message = new $root.vtctldata.SourceShardDeleteRequest(); if (object.keyspace != null) message.keyspace = String(object.keyspace); if (object.shard != null) message.shard = String(object.shard); - if (object.is_serving != null) - message.is_serving = Boolean(object.is_serving); + if (object.uid != null) + message.uid = object.uid | 0; return message; }; /** - * Creates a plain object from a SetShardIsPrimaryServingRequest message. Also converts values to other types if specified. + * Creates a plain object from a SourceShardDeleteRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.SetShardIsPrimaryServingRequest + * @memberof vtctldata.SourceShardDeleteRequest * @static - * @param {vtctldata.SetShardIsPrimaryServingRequest} message SetShardIsPrimaryServingRequest + * @param {vtctldata.SourceShardDeleteRequest} message SourceShardDeleteRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - SetShardIsPrimaryServingRequest.toObject = function toObject(message, options) { + SourceShardDeleteRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) { object.keyspace = ""; object.shard = ""; - object.is_serving = false; + object.uid = 0; } if (message.keyspace != null && message.hasOwnProperty("keyspace")) object.keyspace = message.keyspace; if (message.shard != null && message.hasOwnProperty("shard")) object.shard = message.shard; - if (message.is_serving != null && message.hasOwnProperty("is_serving")) - object.is_serving = message.is_serving; + if (message.uid != null && message.hasOwnProperty("uid")) + object.uid = message.uid; return object; }; /** - * Converts this SetShardIsPrimaryServingRequest to JSON. + * Converts this SourceShardDeleteRequest to JSON. * @function toJSON - * @memberof vtctldata.SetShardIsPrimaryServingRequest + * @memberof vtctldata.SourceShardDeleteRequest * @instance * @returns {Object.} JSON object */ - SetShardIsPrimaryServingRequest.prototype.toJSON = function toJSON() { + SourceShardDeleteRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for SetShardIsPrimaryServingRequest + * Gets the default type url for SourceShardDeleteRequest * @function getTypeUrl - * @memberof vtctldata.SetShardIsPrimaryServingRequest + * @memberof vtctldata.SourceShardDeleteRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - SetShardIsPrimaryServingRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + SourceShardDeleteRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.SetShardIsPrimaryServingRequest"; + return typeUrlPrefix + "/vtctldata.SourceShardDeleteRequest"; }; - return SetShardIsPrimaryServingRequest; + return SourceShardDeleteRequest; })(); - vtctldata.SetShardIsPrimaryServingResponse = (function() { + vtctldata.SourceShardDeleteResponse = (function() { /** - * Properties of a SetShardIsPrimaryServingResponse. + * Properties of a SourceShardDeleteResponse. * @memberof vtctldata - * @interface ISetShardIsPrimaryServingResponse - * @property {topodata.IShard|null} [shard] SetShardIsPrimaryServingResponse shard + * @interface ISourceShardDeleteResponse + * @property {topodata.IShard|null} [shard] SourceShardDeleteResponse shard */ /** - * Constructs a new SetShardIsPrimaryServingResponse. + * Constructs a new SourceShardDeleteResponse. * @memberof vtctldata - * @classdesc Represents a SetShardIsPrimaryServingResponse. - * @implements ISetShardIsPrimaryServingResponse + * @classdesc Represents a SourceShardDeleteResponse. + * @implements ISourceShardDeleteResponse * @constructor - * @param {vtctldata.ISetShardIsPrimaryServingResponse=} [properties] Properties to set + * @param {vtctldata.ISourceShardDeleteResponse=} [properties] Properties to set */ - function SetShardIsPrimaryServingResponse(properties) { + function SourceShardDeleteResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -131687,35 +154381,35 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * SetShardIsPrimaryServingResponse shard. + * SourceShardDeleteResponse shard. * @member {topodata.IShard|null|undefined} shard - * @memberof vtctldata.SetShardIsPrimaryServingResponse + * @memberof vtctldata.SourceShardDeleteResponse * @instance */ - SetShardIsPrimaryServingResponse.prototype.shard = null; + SourceShardDeleteResponse.prototype.shard = null; /** - * Creates a new SetShardIsPrimaryServingResponse instance using the specified properties. + * Creates a new SourceShardDeleteResponse instance using the specified properties. * @function create - * @memberof vtctldata.SetShardIsPrimaryServingResponse + * @memberof vtctldata.SourceShardDeleteResponse * @static - * @param {vtctldata.ISetShardIsPrimaryServingResponse=} [properties] Properties to set - * @returns {vtctldata.SetShardIsPrimaryServingResponse} SetShardIsPrimaryServingResponse instance + * @param {vtctldata.ISourceShardDeleteResponse=} [properties] Properties to set + * @returns {vtctldata.SourceShardDeleteResponse} SourceShardDeleteResponse instance */ - SetShardIsPrimaryServingResponse.create = function create(properties) { - return new SetShardIsPrimaryServingResponse(properties); + SourceShardDeleteResponse.create = function create(properties) { + return new SourceShardDeleteResponse(properties); }; /** - * Encodes the specified SetShardIsPrimaryServingResponse message. Does not implicitly {@link vtctldata.SetShardIsPrimaryServingResponse.verify|verify} messages. + * Encodes the specified SourceShardDeleteResponse message. Does not implicitly {@link vtctldata.SourceShardDeleteResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.SetShardIsPrimaryServingResponse + * @memberof vtctldata.SourceShardDeleteResponse * @static - * @param {vtctldata.ISetShardIsPrimaryServingResponse} message SetShardIsPrimaryServingResponse message or plain object to encode + * @param {vtctldata.ISourceShardDeleteResponse} message SourceShardDeleteResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SetShardIsPrimaryServingResponse.encode = function encode(message, writer) { + SourceShardDeleteResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) @@ -131724,33 +154418,33 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Encodes the specified SetShardIsPrimaryServingResponse message, length delimited. Does not implicitly {@link vtctldata.SetShardIsPrimaryServingResponse.verify|verify} messages. + * Encodes the specified SourceShardDeleteResponse message, length delimited. Does not implicitly {@link vtctldata.SourceShardDeleteResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.SetShardIsPrimaryServingResponse + * @memberof vtctldata.SourceShardDeleteResponse * @static - * @param {vtctldata.ISetShardIsPrimaryServingResponse} message SetShardIsPrimaryServingResponse message or plain object to encode + * @param {vtctldata.ISourceShardDeleteResponse} message SourceShardDeleteResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SetShardIsPrimaryServingResponse.encodeDelimited = function encodeDelimited(message, writer) { + SourceShardDeleteResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a SetShardIsPrimaryServingResponse message from the specified reader or buffer. + * Decodes a SourceShardDeleteResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.SetShardIsPrimaryServingResponse + * @memberof vtctldata.SourceShardDeleteResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.SetShardIsPrimaryServingResponse} SetShardIsPrimaryServingResponse + * @returns {vtctldata.SourceShardDeleteResponse} SourceShardDeleteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SetShardIsPrimaryServingResponse.decode = function decode(reader, length) { + SourceShardDeleteResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SetShardIsPrimaryServingResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SourceShardDeleteResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -131767,30 +154461,30 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a SetShardIsPrimaryServingResponse message from the specified reader or buffer, length delimited. + * Decodes a SourceShardDeleteResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.SetShardIsPrimaryServingResponse + * @memberof vtctldata.SourceShardDeleteResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.SetShardIsPrimaryServingResponse} SetShardIsPrimaryServingResponse + * @returns {vtctldata.SourceShardDeleteResponse} SourceShardDeleteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SetShardIsPrimaryServingResponse.decodeDelimited = function decodeDelimited(reader) { + SourceShardDeleteResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a SetShardIsPrimaryServingResponse message. + * Verifies a SourceShardDeleteResponse message. * @function verify - * @memberof vtctldata.SetShardIsPrimaryServingResponse + * @memberof vtctldata.SourceShardDeleteResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - SetShardIsPrimaryServingResponse.verify = function verify(message) { + SourceShardDeleteResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.shard != null && message.hasOwnProperty("shard")) { @@ -131802,35 +154496,35 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Creates a SetShardIsPrimaryServingResponse message from a plain object. Also converts values to their respective internal types. + * Creates a SourceShardDeleteResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.SetShardIsPrimaryServingResponse + * @memberof vtctldata.SourceShardDeleteResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.SetShardIsPrimaryServingResponse} SetShardIsPrimaryServingResponse + * @returns {vtctldata.SourceShardDeleteResponse} SourceShardDeleteResponse */ - SetShardIsPrimaryServingResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.SetShardIsPrimaryServingResponse) + SourceShardDeleteResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.SourceShardDeleteResponse) return object; - let message = new $root.vtctldata.SetShardIsPrimaryServingResponse(); + let message = new $root.vtctldata.SourceShardDeleteResponse(); if (object.shard != null) { if (typeof object.shard !== "object") - throw TypeError(".vtctldata.SetShardIsPrimaryServingResponse.shard: object expected"); + throw TypeError(".vtctldata.SourceShardDeleteResponse.shard: object expected"); message.shard = $root.topodata.Shard.fromObject(object.shard); } return message; }; /** - * Creates a plain object from a SetShardIsPrimaryServingResponse message. Also converts values to other types if specified. + * Creates a plain object from a SourceShardDeleteResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.SetShardIsPrimaryServingResponse + * @memberof vtctldata.SourceShardDeleteResponse * @static - * @param {vtctldata.SetShardIsPrimaryServingResponse} message SetShardIsPrimaryServingResponse + * @param {vtctldata.SourceShardDeleteResponse} message SourceShardDeleteResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - SetShardIsPrimaryServingResponse.toObject = function toObject(message, options) { + SourceShardDeleteResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; @@ -131842,60 +154536,52 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Converts this SetShardIsPrimaryServingResponse to JSON. + * Converts this SourceShardDeleteResponse to JSON. * @function toJSON - * @memberof vtctldata.SetShardIsPrimaryServingResponse + * @memberof vtctldata.SourceShardDeleteResponse * @instance * @returns {Object.} JSON object */ - SetShardIsPrimaryServingResponse.prototype.toJSON = function toJSON() { + SourceShardDeleteResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for SetShardIsPrimaryServingResponse + * Gets the default type url for SourceShardDeleteResponse * @function getTypeUrl - * @memberof vtctldata.SetShardIsPrimaryServingResponse + * @memberof vtctldata.SourceShardDeleteResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - SetShardIsPrimaryServingResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + SourceShardDeleteResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.SetShardIsPrimaryServingResponse"; + return typeUrlPrefix + "/vtctldata.SourceShardDeleteResponse"; }; - return SetShardIsPrimaryServingResponse; + return SourceShardDeleteResponse; })(); - vtctldata.SetShardTabletControlRequest = (function() { + vtctldata.StartReplicationRequest = (function() { /** - * Properties of a SetShardTabletControlRequest. + * Properties of a StartReplicationRequest. * @memberof vtctldata - * @interface ISetShardTabletControlRequest - * @property {string|null} [keyspace] SetShardTabletControlRequest keyspace - * @property {string|null} [shard] SetShardTabletControlRequest shard - * @property {topodata.TabletType|null} [tablet_type] SetShardTabletControlRequest tablet_type - * @property {Array.|null} [cells] SetShardTabletControlRequest cells - * @property {Array.|null} [denied_tables] SetShardTabletControlRequest denied_tables - * @property {boolean|null} [disable_query_service] SetShardTabletControlRequest disable_query_service - * @property {boolean|null} [remove] SetShardTabletControlRequest remove + * @interface IStartReplicationRequest + * @property {topodata.ITabletAlias|null} [tablet_alias] StartReplicationRequest tablet_alias */ /** - * Constructs a new SetShardTabletControlRequest. + * Constructs a new StartReplicationRequest. * @memberof vtctldata - * @classdesc Represents a SetShardTabletControlRequest. - * @implements ISetShardTabletControlRequest + * @classdesc Represents a StartReplicationRequest. + * @implements IStartReplicationRequest * @constructor - * @param {vtctldata.ISetShardTabletControlRequest=} [properties] Properties to set + * @param {vtctldata.IStartReplicationRequest=} [properties] Properties to set */ - function SetShardTabletControlRequest(properties) { - this.cells = []; - this.denied_tables = []; + function StartReplicationRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -131903,431 +154589,206 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * SetShardTabletControlRequest keyspace. - * @member {string} keyspace - * @memberof vtctldata.SetShardTabletControlRequest - * @instance - */ - SetShardTabletControlRequest.prototype.keyspace = ""; - - /** - * SetShardTabletControlRequest shard. - * @member {string} shard - * @memberof vtctldata.SetShardTabletControlRequest - * @instance - */ - SetShardTabletControlRequest.prototype.shard = ""; - - /** - * SetShardTabletControlRequest tablet_type. - * @member {topodata.TabletType} tablet_type - * @memberof vtctldata.SetShardTabletControlRequest - * @instance - */ - SetShardTabletControlRequest.prototype.tablet_type = 0; - - /** - * SetShardTabletControlRequest cells. - * @member {Array.} cells - * @memberof vtctldata.SetShardTabletControlRequest - * @instance - */ - SetShardTabletControlRequest.prototype.cells = $util.emptyArray; - - /** - * SetShardTabletControlRequest denied_tables. - * @member {Array.} denied_tables - * @memberof vtctldata.SetShardTabletControlRequest - * @instance - */ - SetShardTabletControlRequest.prototype.denied_tables = $util.emptyArray; - - /** - * SetShardTabletControlRequest disable_query_service. - * @member {boolean} disable_query_service - * @memberof vtctldata.SetShardTabletControlRequest - * @instance - */ - SetShardTabletControlRequest.prototype.disable_query_service = false; - - /** - * SetShardTabletControlRequest remove. - * @member {boolean} remove - * @memberof vtctldata.SetShardTabletControlRequest + * StartReplicationRequest tablet_alias. + * @member {topodata.ITabletAlias|null|undefined} tablet_alias + * @memberof vtctldata.StartReplicationRequest * @instance */ - SetShardTabletControlRequest.prototype.remove = false; + StartReplicationRequest.prototype.tablet_alias = null; /** - * Creates a new SetShardTabletControlRequest instance using the specified properties. + * Creates a new StartReplicationRequest instance using the specified properties. * @function create - * @memberof vtctldata.SetShardTabletControlRequest + * @memberof vtctldata.StartReplicationRequest * @static - * @param {vtctldata.ISetShardTabletControlRequest=} [properties] Properties to set - * @returns {vtctldata.SetShardTabletControlRequest} SetShardTabletControlRequest instance + * @param {vtctldata.IStartReplicationRequest=} [properties] Properties to set + * @returns {vtctldata.StartReplicationRequest} StartReplicationRequest instance */ - SetShardTabletControlRequest.create = function create(properties) { - return new SetShardTabletControlRequest(properties); + StartReplicationRequest.create = function create(properties) { + return new StartReplicationRequest(properties); }; /** - * Encodes the specified SetShardTabletControlRequest message. Does not implicitly {@link vtctldata.SetShardTabletControlRequest.verify|verify} messages. + * Encodes the specified StartReplicationRequest message. Does not implicitly {@link vtctldata.StartReplicationRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.SetShardTabletControlRequest + * @memberof vtctldata.StartReplicationRequest * @static - * @param {vtctldata.ISetShardTabletControlRequest} message SetShardTabletControlRequest message or plain object to encode + * @param {vtctldata.IStartReplicationRequest} message StartReplicationRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SetShardTabletControlRequest.encode = function encode(message, writer) { + StartReplicationRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); - if (message.tablet_type != null && Object.hasOwnProperty.call(message, "tablet_type")) - writer.uint32(/* id 3, wireType 0 =*/24).int32(message.tablet_type); - if (message.cells != null && message.cells.length) - for (let i = 0; i < message.cells.length; ++i) - writer.uint32(/* id 4, wireType 2 =*/34).string(message.cells[i]); - if (message.denied_tables != null && message.denied_tables.length) - for (let i = 0; i < message.denied_tables.length; ++i) - writer.uint32(/* id 5, wireType 2 =*/42).string(message.denied_tables[i]); - if (message.disable_query_service != null && Object.hasOwnProperty.call(message, "disable_query_service")) - writer.uint32(/* id 6, wireType 0 =*/48).bool(message.disable_query_service); - if (message.remove != null && Object.hasOwnProperty.call(message, "remove")) - writer.uint32(/* id 7, wireType 0 =*/56).bool(message.remove); + if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) + $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified SetShardTabletControlRequest message, length delimited. Does not implicitly {@link vtctldata.SetShardTabletControlRequest.verify|verify} messages. + * Encodes the specified StartReplicationRequest message, length delimited. Does not implicitly {@link vtctldata.StartReplicationRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.SetShardTabletControlRequest + * @memberof vtctldata.StartReplicationRequest * @static - * @param {vtctldata.ISetShardTabletControlRequest} message SetShardTabletControlRequest message or plain object to encode + * @param {vtctldata.IStartReplicationRequest} message StartReplicationRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SetShardTabletControlRequest.encodeDelimited = function encodeDelimited(message, writer) { + StartReplicationRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a SetShardTabletControlRequest message from the specified reader or buffer. + * Decodes a StartReplicationRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.SetShardTabletControlRequest + * @memberof vtctldata.StartReplicationRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.SetShardTabletControlRequest} SetShardTabletControlRequest + * @returns {vtctldata.StartReplicationRequest} StartReplicationRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SetShardTabletControlRequest.decode = function decode(reader, length) { + StartReplicationRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SetShardTabletControlRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.StartReplicationRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.keyspace = reader.string(); - break; - } - case 2: { - message.shard = reader.string(); - break; - } - case 3: { - message.tablet_type = reader.int32(); - break; - } - case 4: { - if (!(message.cells && message.cells.length)) - message.cells = []; - message.cells.push(reader.string()); - break; - } - case 5: { - if (!(message.denied_tables && message.denied_tables.length)) - message.denied_tables = []; - message.denied_tables.push(reader.string()); - break; - } - case 6: { - message.disable_query_service = reader.bool(); - break; - } - case 7: { - message.remove = reader.bool(); + message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); break; } default: reader.skipType(tag & 7); break; - } - } - return message; - }; - - /** - * Decodes a SetShardTabletControlRequest message from the specified reader or buffer, length delimited. - * @function decodeDelimited - * @memberof vtctldata.SetShardTabletControlRequest - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.SetShardTabletControlRequest} SetShardTabletControlRequest - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - SetShardTabletControlRequest.decodeDelimited = function decodeDelimited(reader) { - if (!(reader instanceof $Reader)) - reader = new $Reader(reader); - return this.decode(reader, reader.uint32()); - }; - - /** - * Verifies a SetShardTabletControlRequest message. - * @function verify - * @memberof vtctldata.SetShardTabletControlRequest - * @static - * @param {Object.} message Plain object to verify - * @returns {string|null} `null` if valid, otherwise the reason why it is not - */ - SetShardTabletControlRequest.verify = function verify(message) { - if (typeof message !== "object" || message === null) - return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - if (message.shard != null && message.hasOwnProperty("shard")) - if (!$util.isString(message.shard)) - return "shard: string expected"; - if (message.tablet_type != null && message.hasOwnProperty("tablet_type")) - switch (message.tablet_type) { - default: - return "tablet_type: enum value expected"; - case 0: - case 1: - case 1: - case 2: - case 3: - case 3: - case 4: - case 5: - case 6: - case 7: - case 8: - break; - } - if (message.cells != null && message.hasOwnProperty("cells")) { - if (!Array.isArray(message.cells)) - return "cells: array expected"; - for (let i = 0; i < message.cells.length; ++i) - if (!$util.isString(message.cells[i])) - return "cells: string[] expected"; - } - if (message.denied_tables != null && message.hasOwnProperty("denied_tables")) { - if (!Array.isArray(message.denied_tables)) - return "denied_tables: array expected"; - for (let i = 0; i < message.denied_tables.length; ++i) - if (!$util.isString(message.denied_tables[i])) - return "denied_tables: string[] expected"; - } - if (message.disable_query_service != null && message.hasOwnProperty("disable_query_service")) - if (typeof message.disable_query_service !== "boolean") - return "disable_query_service: boolean expected"; - if (message.remove != null && message.hasOwnProperty("remove")) - if (typeof message.remove !== "boolean") - return "remove: boolean expected"; - return null; - }; - - /** - * Creates a SetShardTabletControlRequest message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof vtctldata.SetShardTabletControlRequest - * @static - * @param {Object.} object Plain object - * @returns {vtctldata.SetShardTabletControlRequest} SetShardTabletControlRequest - */ - SetShardTabletControlRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.SetShardTabletControlRequest) - return object; - let message = new $root.vtctldata.SetShardTabletControlRequest(); - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - if (object.shard != null) - message.shard = String(object.shard); - switch (object.tablet_type) { - default: - if (typeof object.tablet_type === "number") { - message.tablet_type = object.tablet_type; - break; - } - break; - case "UNKNOWN": - case 0: - message.tablet_type = 0; - break; - case "PRIMARY": - case 1: - message.tablet_type = 1; - break; - case "MASTER": - case 1: - message.tablet_type = 1; - break; - case "REPLICA": - case 2: - message.tablet_type = 2; - break; - case "RDONLY": - case 3: - message.tablet_type = 3; - break; - case "BATCH": - case 3: - message.tablet_type = 3; - break; - case "SPARE": - case 4: - message.tablet_type = 4; - break; - case "EXPERIMENTAL": - case 5: - message.tablet_type = 5; - break; - case "BACKUP": - case 6: - message.tablet_type = 6; - break; - case "RESTORE": - case 7: - message.tablet_type = 7; - break; - case "DRAINED": - case 8: - message.tablet_type = 8; - break; + } } - if (object.cells) { - if (!Array.isArray(object.cells)) - throw TypeError(".vtctldata.SetShardTabletControlRequest.cells: array expected"); - message.cells = []; - for (let i = 0; i < object.cells.length; ++i) - message.cells[i] = String(object.cells[i]); + return message; + }; + + /** + * Decodes a StartReplicationRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.StartReplicationRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.StartReplicationRequest} StartReplicationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + StartReplicationRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a StartReplicationRequest message. + * @function verify + * @memberof vtctldata.StartReplicationRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + StartReplicationRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { + let error = $root.topodata.TabletAlias.verify(message.tablet_alias); + if (error) + return "tablet_alias." + error; } - if (object.denied_tables) { - if (!Array.isArray(object.denied_tables)) - throw TypeError(".vtctldata.SetShardTabletControlRequest.denied_tables: array expected"); - message.denied_tables = []; - for (let i = 0; i < object.denied_tables.length; ++i) - message.denied_tables[i] = String(object.denied_tables[i]); + return null; + }; + + /** + * Creates a StartReplicationRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.StartReplicationRequest + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.StartReplicationRequest} StartReplicationRequest + */ + StartReplicationRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.StartReplicationRequest) + return object; + let message = new $root.vtctldata.StartReplicationRequest(); + if (object.tablet_alias != null) { + if (typeof object.tablet_alias !== "object") + throw TypeError(".vtctldata.StartReplicationRequest.tablet_alias: object expected"); + message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); } - if (object.disable_query_service != null) - message.disable_query_service = Boolean(object.disable_query_service); - if (object.remove != null) - message.remove = Boolean(object.remove); return message; }; /** - * Creates a plain object from a SetShardTabletControlRequest message. Also converts values to other types if specified. + * Creates a plain object from a StartReplicationRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.SetShardTabletControlRequest + * @memberof vtctldata.StartReplicationRequest * @static - * @param {vtctldata.SetShardTabletControlRequest} message SetShardTabletControlRequest + * @param {vtctldata.StartReplicationRequest} message StartReplicationRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - SetShardTabletControlRequest.toObject = function toObject(message, options) { + StartReplicationRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) { - object.cells = []; - object.denied_tables = []; - } - if (options.defaults) { - object.keyspace = ""; - object.shard = ""; - object.tablet_type = options.enums === String ? "UNKNOWN" : 0; - object.disable_query_service = false; - object.remove = false; - } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.shard != null && message.hasOwnProperty("shard")) - object.shard = message.shard; - if (message.tablet_type != null && message.hasOwnProperty("tablet_type")) - object.tablet_type = options.enums === String ? $root.topodata.TabletType[message.tablet_type] === undefined ? message.tablet_type : $root.topodata.TabletType[message.tablet_type] : message.tablet_type; - if (message.cells && message.cells.length) { - object.cells = []; - for (let j = 0; j < message.cells.length; ++j) - object.cells[j] = message.cells[j]; - } - if (message.denied_tables && message.denied_tables.length) { - object.denied_tables = []; - for (let j = 0; j < message.denied_tables.length; ++j) - object.denied_tables[j] = message.denied_tables[j]; - } - if (message.disable_query_service != null && message.hasOwnProperty("disable_query_service")) - object.disable_query_service = message.disable_query_service; - if (message.remove != null && message.hasOwnProperty("remove")) - object.remove = message.remove; + if (options.defaults) + object.tablet_alias = null; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) + object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); return object; }; /** - * Converts this SetShardTabletControlRequest to JSON. + * Converts this StartReplicationRequest to JSON. * @function toJSON - * @memberof vtctldata.SetShardTabletControlRequest + * @memberof vtctldata.StartReplicationRequest * @instance * @returns {Object.} JSON object */ - SetShardTabletControlRequest.prototype.toJSON = function toJSON() { + StartReplicationRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for SetShardTabletControlRequest + * Gets the default type url for StartReplicationRequest * @function getTypeUrl - * @memberof vtctldata.SetShardTabletControlRequest + * @memberof vtctldata.StartReplicationRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - SetShardTabletControlRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + StartReplicationRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.SetShardTabletControlRequest"; + return typeUrlPrefix + "/vtctldata.StartReplicationRequest"; }; - return SetShardTabletControlRequest; + return StartReplicationRequest; })(); - vtctldata.SetShardTabletControlResponse = (function() { + vtctldata.StartReplicationResponse = (function() { /** - * Properties of a SetShardTabletControlResponse. + * Properties of a StartReplicationResponse. * @memberof vtctldata - * @interface ISetShardTabletControlResponse - * @property {topodata.IShard|null} [shard] SetShardTabletControlResponse shard + * @interface IStartReplicationResponse */ /** - * Constructs a new SetShardTabletControlResponse. + * Constructs a new StartReplicationResponse. * @memberof vtctldata - * @classdesc Represents a SetShardTabletControlResponse. - * @implements ISetShardTabletControlResponse + * @classdesc Represents a StartReplicationResponse. + * @implements IStartReplicationResponse * @constructor - * @param {vtctldata.ISetShardTabletControlResponse=} [properties] Properties to set + * @param {vtctldata.IStartReplicationResponse=} [properties] Properties to set */ - function SetShardTabletControlResponse(properties) { + function StartReplicationResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -132335,77 +154796,63 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * SetShardTabletControlResponse shard. - * @member {topodata.IShard|null|undefined} shard - * @memberof vtctldata.SetShardTabletControlResponse - * @instance - */ - SetShardTabletControlResponse.prototype.shard = null; - - /** - * Creates a new SetShardTabletControlResponse instance using the specified properties. + * Creates a new StartReplicationResponse instance using the specified properties. * @function create - * @memberof vtctldata.SetShardTabletControlResponse + * @memberof vtctldata.StartReplicationResponse * @static - * @param {vtctldata.ISetShardTabletControlResponse=} [properties] Properties to set - * @returns {vtctldata.SetShardTabletControlResponse} SetShardTabletControlResponse instance + * @param {vtctldata.IStartReplicationResponse=} [properties] Properties to set + * @returns {vtctldata.StartReplicationResponse} StartReplicationResponse instance */ - SetShardTabletControlResponse.create = function create(properties) { - return new SetShardTabletControlResponse(properties); + StartReplicationResponse.create = function create(properties) { + return new StartReplicationResponse(properties); }; /** - * Encodes the specified SetShardTabletControlResponse message. Does not implicitly {@link vtctldata.SetShardTabletControlResponse.verify|verify} messages. + * Encodes the specified StartReplicationResponse message. Does not implicitly {@link vtctldata.StartReplicationResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.SetShardTabletControlResponse + * @memberof vtctldata.StartReplicationResponse * @static - * @param {vtctldata.ISetShardTabletControlResponse} message SetShardTabletControlResponse message or plain object to encode + * @param {vtctldata.IStartReplicationResponse} message StartReplicationResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SetShardTabletControlResponse.encode = function encode(message, writer) { + StartReplicationResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - $root.topodata.Shard.encode(message.shard, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified SetShardTabletControlResponse message, length delimited. Does not implicitly {@link vtctldata.SetShardTabletControlResponse.verify|verify} messages. + * Encodes the specified StartReplicationResponse message, length delimited. Does not implicitly {@link vtctldata.StartReplicationResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.SetShardTabletControlResponse + * @memberof vtctldata.StartReplicationResponse * @static - * @param {vtctldata.ISetShardTabletControlResponse} message SetShardTabletControlResponse message or plain object to encode + * @param {vtctldata.IStartReplicationResponse} message StartReplicationResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SetShardTabletControlResponse.encodeDelimited = function encodeDelimited(message, writer) { + StartReplicationResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a SetShardTabletControlResponse message from the specified reader or buffer. + * Decodes a StartReplicationResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.SetShardTabletControlResponse + * @memberof vtctldata.StartReplicationResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.SetShardTabletControlResponse} SetShardTabletControlResponse + * @returns {vtctldata.StartReplicationResponse} StartReplicationResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SetShardTabletControlResponse.decode = function decode(reader, length) { + StartReplicationResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SetShardTabletControlResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.StartReplicationResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - message.shard = $root.topodata.Shard.decode(reader, reader.uint32()); - break; - } default: reader.skipType(tag & 7); break; @@ -132415,128 +154862,109 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a SetShardTabletControlResponse message from the specified reader or buffer, length delimited. + * Decodes a StartReplicationResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.SetShardTabletControlResponse + * @memberof vtctldata.StartReplicationResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.SetShardTabletControlResponse} SetShardTabletControlResponse + * @returns {vtctldata.StartReplicationResponse} StartReplicationResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SetShardTabletControlResponse.decodeDelimited = function decodeDelimited(reader) { + StartReplicationResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a SetShardTabletControlResponse message. + * Verifies a StartReplicationResponse message. * @function verify - * @memberof vtctldata.SetShardTabletControlResponse + * @memberof vtctldata.StartReplicationResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - SetShardTabletControlResponse.verify = function verify(message) { + StartReplicationResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.shard != null && message.hasOwnProperty("shard")) { - let error = $root.topodata.Shard.verify(message.shard); - if (error) - return "shard." + error; - } return null; }; /** - * Creates a SetShardTabletControlResponse message from a plain object. Also converts values to their respective internal types. + * Creates a StartReplicationResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.SetShardTabletControlResponse + * @memberof vtctldata.StartReplicationResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.SetShardTabletControlResponse} SetShardTabletControlResponse + * @returns {vtctldata.StartReplicationResponse} StartReplicationResponse */ - SetShardTabletControlResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.SetShardTabletControlResponse) + StartReplicationResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.StartReplicationResponse) return object; - let message = new $root.vtctldata.SetShardTabletControlResponse(); - if (object.shard != null) { - if (typeof object.shard !== "object") - throw TypeError(".vtctldata.SetShardTabletControlResponse.shard: object expected"); - message.shard = $root.topodata.Shard.fromObject(object.shard); - } - return message; + return new $root.vtctldata.StartReplicationResponse(); }; /** - * Creates a plain object from a SetShardTabletControlResponse message. Also converts values to other types if specified. + * Creates a plain object from a StartReplicationResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.SetShardTabletControlResponse + * @memberof vtctldata.StartReplicationResponse * @static - * @param {vtctldata.SetShardTabletControlResponse} message SetShardTabletControlResponse + * @param {vtctldata.StartReplicationResponse} message StartReplicationResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - SetShardTabletControlResponse.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) - object.shard = null; - if (message.shard != null && message.hasOwnProperty("shard")) - object.shard = $root.topodata.Shard.toObject(message.shard, options); - return object; + StartReplicationResponse.toObject = function toObject() { + return {}; }; /** - * Converts this SetShardTabletControlResponse to JSON. + * Converts this StartReplicationResponse to JSON. * @function toJSON - * @memberof vtctldata.SetShardTabletControlResponse + * @memberof vtctldata.StartReplicationResponse * @instance * @returns {Object.} JSON object */ - SetShardTabletControlResponse.prototype.toJSON = function toJSON() { + StartReplicationResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for SetShardTabletControlResponse + * Gets the default type url for StartReplicationResponse * @function getTypeUrl - * @memberof vtctldata.SetShardTabletControlResponse + * @memberof vtctldata.StartReplicationResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - SetShardTabletControlResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + StartReplicationResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.SetShardTabletControlResponse"; + return typeUrlPrefix + "/vtctldata.StartReplicationResponse"; }; - return SetShardTabletControlResponse; + return StartReplicationResponse; })(); - vtctldata.SetWritableRequest = (function() { + vtctldata.StopReplicationRequest = (function() { /** - * Properties of a SetWritableRequest. + * Properties of a StopReplicationRequest. * @memberof vtctldata - * @interface ISetWritableRequest - * @property {topodata.ITabletAlias|null} [tablet_alias] SetWritableRequest tablet_alias - * @property {boolean|null} [writable] SetWritableRequest writable + * @interface IStopReplicationRequest + * @property {topodata.ITabletAlias|null} [tablet_alias] StopReplicationRequest tablet_alias */ /** - * Constructs a new SetWritableRequest. + * Constructs a new StopReplicationRequest. * @memberof vtctldata - * @classdesc Represents a SetWritableRequest. - * @implements ISetWritableRequest + * @classdesc Represents a StopReplicationRequest. + * @implements IStopReplicationRequest * @constructor - * @param {vtctldata.ISetWritableRequest=} [properties] Properties to set + * @param {vtctldata.IStopReplicationRequest=} [properties] Properties to set */ - function SetWritableRequest(properties) { + function StopReplicationRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -132544,80 +154972,70 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * SetWritableRequest tablet_alias. + * StopReplicationRequest tablet_alias. * @member {topodata.ITabletAlias|null|undefined} tablet_alias - * @memberof vtctldata.SetWritableRequest - * @instance - */ - SetWritableRequest.prototype.tablet_alias = null; - - /** - * SetWritableRequest writable. - * @member {boolean} writable - * @memberof vtctldata.SetWritableRequest + * @memberof vtctldata.StopReplicationRequest * @instance */ - SetWritableRequest.prototype.writable = false; + StopReplicationRequest.prototype.tablet_alias = null; /** - * Creates a new SetWritableRequest instance using the specified properties. + * Creates a new StopReplicationRequest instance using the specified properties. * @function create - * @memberof vtctldata.SetWritableRequest + * @memberof vtctldata.StopReplicationRequest * @static - * @param {vtctldata.ISetWritableRequest=} [properties] Properties to set - * @returns {vtctldata.SetWritableRequest} SetWritableRequest instance + * @param {vtctldata.IStopReplicationRequest=} [properties] Properties to set + * @returns {vtctldata.StopReplicationRequest} StopReplicationRequest instance */ - SetWritableRequest.create = function create(properties) { - return new SetWritableRequest(properties); + StopReplicationRequest.create = function create(properties) { + return new StopReplicationRequest(properties); }; /** - * Encodes the specified SetWritableRequest message. Does not implicitly {@link vtctldata.SetWritableRequest.verify|verify} messages. + * Encodes the specified StopReplicationRequest message. Does not implicitly {@link vtctldata.StopReplicationRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.SetWritableRequest + * @memberof vtctldata.StopReplicationRequest * @static - * @param {vtctldata.ISetWritableRequest} message SetWritableRequest message or plain object to encode + * @param {vtctldata.IStopReplicationRequest} message StopReplicationRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SetWritableRequest.encode = function encode(message, writer) { + StopReplicationRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.writable != null && Object.hasOwnProperty.call(message, "writable")) - writer.uint32(/* id 2, wireType 0 =*/16).bool(message.writable); return writer; }; /** - * Encodes the specified SetWritableRequest message, length delimited. Does not implicitly {@link vtctldata.SetWritableRequest.verify|verify} messages. + * Encodes the specified StopReplicationRequest message, length delimited. Does not implicitly {@link vtctldata.StopReplicationRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.SetWritableRequest + * @memberof vtctldata.StopReplicationRequest * @static - * @param {vtctldata.ISetWritableRequest} message SetWritableRequest message or plain object to encode + * @param {vtctldata.IStopReplicationRequest} message StopReplicationRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SetWritableRequest.encodeDelimited = function encodeDelimited(message, writer) { + StopReplicationRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a SetWritableRequest message from the specified reader or buffer. + * Decodes a StopReplicationRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.SetWritableRequest + * @memberof vtctldata.StopReplicationRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.SetWritableRequest} SetWritableRequest + * @returns {vtctldata.StopReplicationRequest} StopReplicationRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SetWritableRequest.decode = function decode(reader, length) { + StopReplicationRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SetWritableRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.StopReplicationRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -132625,10 +155043,6 @@ export const vtctldata = $root.vtctldata = (() => { message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); break; } - case 2: { - message.writable = reader.bool(); - break; - } default: reader.skipType(tag & 7); break; @@ -132638,30 +155052,30 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a SetWritableRequest message from the specified reader or buffer, length delimited. + * Decodes a StopReplicationRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.SetWritableRequest + * @memberof vtctldata.StopReplicationRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.SetWritableRequest} SetWritableRequest + * @returns {vtctldata.StopReplicationRequest} StopReplicationRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SetWritableRequest.decodeDelimited = function decodeDelimited(reader) { + StopReplicationRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a SetWritableRequest message. + * Verifies a StopReplicationRequest message. * @function verify - * @memberof vtctldata.SetWritableRequest + * @memberof vtctldata.StopReplicationRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - SetWritableRequest.verify = function verify(message) { + StopReplicationRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { @@ -132669,104 +155083,95 @@ export const vtctldata = $root.vtctldata = (() => { if (error) return "tablet_alias." + error; } - if (message.writable != null && message.hasOwnProperty("writable")) - if (typeof message.writable !== "boolean") - return "writable: boolean expected"; return null; }; /** - * Creates a SetWritableRequest message from a plain object. Also converts values to their respective internal types. + * Creates a StopReplicationRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.SetWritableRequest + * @memberof vtctldata.StopReplicationRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.SetWritableRequest} SetWritableRequest + * @returns {vtctldata.StopReplicationRequest} StopReplicationRequest */ - SetWritableRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.SetWritableRequest) + StopReplicationRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.StopReplicationRequest) return object; - let message = new $root.vtctldata.SetWritableRequest(); + let message = new $root.vtctldata.StopReplicationRequest(); if (object.tablet_alias != null) { if (typeof object.tablet_alias !== "object") - throw TypeError(".vtctldata.SetWritableRequest.tablet_alias: object expected"); + throw TypeError(".vtctldata.StopReplicationRequest.tablet_alias: object expected"); message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); } - if (object.writable != null) - message.writable = Boolean(object.writable); return message; }; /** - * Creates a plain object from a SetWritableRequest message. Also converts values to other types if specified. + * Creates a plain object from a StopReplicationRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.SetWritableRequest + * @memberof vtctldata.StopReplicationRequest * @static - * @param {vtctldata.SetWritableRequest} message SetWritableRequest + * @param {vtctldata.StopReplicationRequest} message StopReplicationRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - SetWritableRequest.toObject = function toObject(message, options) { + StopReplicationRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) { + if (options.defaults) object.tablet_alias = null; - object.writable = false; - } if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); - if (message.writable != null && message.hasOwnProperty("writable")) - object.writable = message.writable; return object; }; /** - * Converts this SetWritableRequest to JSON. + * Converts this StopReplicationRequest to JSON. * @function toJSON - * @memberof vtctldata.SetWritableRequest + * @memberof vtctldata.StopReplicationRequest * @instance * @returns {Object.} JSON object */ - SetWritableRequest.prototype.toJSON = function toJSON() { + StopReplicationRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for SetWritableRequest + * Gets the default type url for StopReplicationRequest * @function getTypeUrl - * @memberof vtctldata.SetWritableRequest + * @memberof vtctldata.StopReplicationRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - SetWritableRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + StopReplicationRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.SetWritableRequest"; + return typeUrlPrefix + "/vtctldata.StopReplicationRequest"; }; - return SetWritableRequest; + return StopReplicationRequest; })(); - vtctldata.SetWritableResponse = (function() { + vtctldata.StopReplicationResponse = (function() { /** - * Properties of a SetWritableResponse. + * Properties of a StopReplicationResponse. * @memberof vtctldata - * @interface ISetWritableResponse + * @interface IStopReplicationResponse */ /** - * Constructs a new SetWritableResponse. + * Constructs a new StopReplicationResponse. * @memberof vtctldata - * @classdesc Represents a SetWritableResponse. - * @implements ISetWritableResponse + * @classdesc Represents a StopReplicationResponse. + * @implements IStopReplicationResponse * @constructor - * @param {vtctldata.ISetWritableResponse=} [properties] Properties to set + * @param {vtctldata.IStopReplicationResponse=} [properties] Properties to set */ - function SetWritableResponse(properties) { + function StopReplicationResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -132774,60 +155179,60 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * Creates a new SetWritableResponse instance using the specified properties. + * Creates a new StopReplicationResponse instance using the specified properties. * @function create - * @memberof vtctldata.SetWritableResponse + * @memberof vtctldata.StopReplicationResponse * @static - * @param {vtctldata.ISetWritableResponse=} [properties] Properties to set - * @returns {vtctldata.SetWritableResponse} SetWritableResponse instance + * @param {vtctldata.IStopReplicationResponse=} [properties] Properties to set + * @returns {vtctldata.StopReplicationResponse} StopReplicationResponse instance */ - SetWritableResponse.create = function create(properties) { - return new SetWritableResponse(properties); + StopReplicationResponse.create = function create(properties) { + return new StopReplicationResponse(properties); }; /** - * Encodes the specified SetWritableResponse message. Does not implicitly {@link vtctldata.SetWritableResponse.verify|verify} messages. + * Encodes the specified StopReplicationResponse message. Does not implicitly {@link vtctldata.StopReplicationResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.SetWritableResponse + * @memberof vtctldata.StopReplicationResponse * @static - * @param {vtctldata.ISetWritableResponse} message SetWritableResponse message or plain object to encode + * @param {vtctldata.IStopReplicationResponse} message StopReplicationResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SetWritableResponse.encode = function encode(message, writer) { + StopReplicationResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); return writer; }; /** - * Encodes the specified SetWritableResponse message, length delimited. Does not implicitly {@link vtctldata.SetWritableResponse.verify|verify} messages. + * Encodes the specified StopReplicationResponse message, length delimited. Does not implicitly {@link vtctldata.StopReplicationResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.SetWritableResponse + * @memberof vtctldata.StopReplicationResponse * @static - * @param {vtctldata.ISetWritableResponse} message SetWritableResponse message or plain object to encode + * @param {vtctldata.IStopReplicationResponse} message StopReplicationResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SetWritableResponse.encodeDelimited = function encodeDelimited(message, writer) { + StopReplicationResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a SetWritableResponse message from the specified reader or buffer. + * Decodes a StopReplicationResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.SetWritableResponse + * @memberof vtctldata.StopReplicationResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.SetWritableResponse} SetWritableResponse + * @returns {vtctldata.StopReplicationResponse} StopReplicationResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SetWritableResponse.decode = function decode(reader, length) { + StopReplicationResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SetWritableResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.StopReplicationResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -132840,111 +155245,109 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a SetWritableResponse message from the specified reader or buffer, length delimited. + * Decodes a StopReplicationResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.SetWritableResponse + * @memberof vtctldata.StopReplicationResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.SetWritableResponse} SetWritableResponse + * @returns {vtctldata.StopReplicationResponse} StopReplicationResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SetWritableResponse.decodeDelimited = function decodeDelimited(reader) { + StopReplicationResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a SetWritableResponse message. + * Verifies a StopReplicationResponse message. * @function verify - * @memberof vtctldata.SetWritableResponse + * @memberof vtctldata.StopReplicationResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - SetWritableResponse.verify = function verify(message) { + StopReplicationResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; return null; }; /** - * Creates a SetWritableResponse message from a plain object. Also converts values to their respective internal types. + * Creates a StopReplicationResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.SetWritableResponse + * @memberof vtctldata.StopReplicationResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.SetWritableResponse} SetWritableResponse + * @returns {vtctldata.StopReplicationResponse} StopReplicationResponse */ - SetWritableResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.SetWritableResponse) + StopReplicationResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.StopReplicationResponse) return object; - return new $root.vtctldata.SetWritableResponse(); + return new $root.vtctldata.StopReplicationResponse(); }; /** - * Creates a plain object from a SetWritableResponse message. Also converts values to other types if specified. + * Creates a plain object from a StopReplicationResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.SetWritableResponse + * @memberof vtctldata.StopReplicationResponse * @static - * @param {vtctldata.SetWritableResponse} message SetWritableResponse + * @param {vtctldata.StopReplicationResponse} message StopReplicationResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - SetWritableResponse.toObject = function toObject() { + StopReplicationResponse.toObject = function toObject() { return {}; }; /** - * Converts this SetWritableResponse to JSON. + * Converts this StopReplicationResponse to JSON. * @function toJSON - * @memberof vtctldata.SetWritableResponse + * @memberof vtctldata.StopReplicationResponse * @instance * @returns {Object.} JSON object */ - SetWritableResponse.prototype.toJSON = function toJSON() { + StopReplicationResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for SetWritableResponse + * Gets the default type url for StopReplicationResponse * @function getTypeUrl - * @memberof vtctldata.SetWritableResponse + * @memberof vtctldata.StopReplicationResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - SetWritableResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + StopReplicationResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.SetWritableResponse"; + return typeUrlPrefix + "/vtctldata.StopReplicationResponse"; }; - return SetWritableResponse; + return StopReplicationResponse; })(); - vtctldata.ShardReplicationAddRequest = (function() { + vtctldata.TabletExternallyReparentedRequest = (function() { /** - * Properties of a ShardReplicationAddRequest. + * Properties of a TabletExternallyReparentedRequest. * @memberof vtctldata - * @interface IShardReplicationAddRequest - * @property {string|null} [keyspace] ShardReplicationAddRequest keyspace - * @property {string|null} [shard] ShardReplicationAddRequest shard - * @property {topodata.ITabletAlias|null} [tablet_alias] ShardReplicationAddRequest tablet_alias + * @interface ITabletExternallyReparentedRequest + * @property {topodata.ITabletAlias|null} [tablet] TabletExternallyReparentedRequest tablet */ /** - * Constructs a new ShardReplicationAddRequest. + * Constructs a new TabletExternallyReparentedRequest. * @memberof vtctldata - * @classdesc Represents a ShardReplicationAddRequest. - * @implements IShardReplicationAddRequest + * @classdesc Represents a TabletExternallyReparentedRequest. + * @implements ITabletExternallyReparentedRequest * @constructor - * @param {vtctldata.IShardReplicationAddRequest=} [properties] Properties to set + * @param {vtctldata.ITabletExternallyReparentedRequest=} [properties] Properties to set */ - function ShardReplicationAddRequest(properties) { + function TabletExternallyReparentedRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -132952,103 +155355,75 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ShardReplicationAddRequest keyspace. - * @member {string} keyspace - * @memberof vtctldata.ShardReplicationAddRequest - * @instance - */ - ShardReplicationAddRequest.prototype.keyspace = ""; - - /** - * ShardReplicationAddRequest shard. - * @member {string} shard - * @memberof vtctldata.ShardReplicationAddRequest - * @instance - */ - ShardReplicationAddRequest.prototype.shard = ""; - - /** - * ShardReplicationAddRequest tablet_alias. - * @member {topodata.ITabletAlias|null|undefined} tablet_alias - * @memberof vtctldata.ShardReplicationAddRequest + * TabletExternallyReparentedRequest tablet. + * @member {topodata.ITabletAlias|null|undefined} tablet + * @memberof vtctldata.TabletExternallyReparentedRequest * @instance */ - ShardReplicationAddRequest.prototype.tablet_alias = null; + TabletExternallyReparentedRequest.prototype.tablet = null; /** - * Creates a new ShardReplicationAddRequest instance using the specified properties. + * Creates a new TabletExternallyReparentedRequest instance using the specified properties. * @function create - * @memberof vtctldata.ShardReplicationAddRequest + * @memberof vtctldata.TabletExternallyReparentedRequest * @static - * @param {vtctldata.IShardReplicationAddRequest=} [properties] Properties to set - * @returns {vtctldata.ShardReplicationAddRequest} ShardReplicationAddRequest instance + * @param {vtctldata.ITabletExternallyReparentedRequest=} [properties] Properties to set + * @returns {vtctldata.TabletExternallyReparentedRequest} TabletExternallyReparentedRequest instance */ - ShardReplicationAddRequest.create = function create(properties) { - return new ShardReplicationAddRequest(properties); + TabletExternallyReparentedRequest.create = function create(properties) { + return new TabletExternallyReparentedRequest(properties); }; /** - * Encodes the specified ShardReplicationAddRequest message. Does not implicitly {@link vtctldata.ShardReplicationAddRequest.verify|verify} messages. + * Encodes the specified TabletExternallyReparentedRequest message. Does not implicitly {@link vtctldata.TabletExternallyReparentedRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.ShardReplicationAddRequest + * @memberof vtctldata.TabletExternallyReparentedRequest * @static - * @param {vtctldata.IShardReplicationAddRequest} message ShardReplicationAddRequest message or plain object to encode + * @param {vtctldata.ITabletExternallyReparentedRequest} message TabletExternallyReparentedRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ShardReplicationAddRequest.encode = function encode(message, writer) { + TabletExternallyReparentedRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); - if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) - $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.tablet != null && Object.hasOwnProperty.call(message, "tablet")) + $root.topodata.TabletAlias.encode(message.tablet, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified ShardReplicationAddRequest message, length delimited. Does not implicitly {@link vtctldata.ShardReplicationAddRequest.verify|verify} messages. + * Encodes the specified TabletExternallyReparentedRequest message, length delimited. Does not implicitly {@link vtctldata.TabletExternallyReparentedRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ShardReplicationAddRequest + * @memberof vtctldata.TabletExternallyReparentedRequest * @static - * @param {vtctldata.IShardReplicationAddRequest} message ShardReplicationAddRequest message or plain object to encode + * @param {vtctldata.ITabletExternallyReparentedRequest} message TabletExternallyReparentedRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ShardReplicationAddRequest.encodeDelimited = function encodeDelimited(message, writer) { + TabletExternallyReparentedRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ShardReplicationAddRequest message from the specified reader or buffer. + * Decodes a TabletExternallyReparentedRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ShardReplicationAddRequest + * @memberof vtctldata.TabletExternallyReparentedRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ShardReplicationAddRequest} ShardReplicationAddRequest + * @returns {vtctldata.TabletExternallyReparentedRequest} TabletExternallyReparentedRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ShardReplicationAddRequest.decode = function decode(reader, length) { + TabletExternallyReparentedRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ShardReplicationAddRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.TabletExternallyReparentedRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.keyspace = reader.string(); - break; - } - case 2: { - message.shard = reader.string(); - break; - } - case 3: { - message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + message.tablet = $root.topodata.TabletAlias.decode(reader, reader.uint32()); break; } default: @@ -133060,143 +155435,130 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a ShardReplicationAddRequest message from the specified reader or buffer, length delimited. + * Decodes a TabletExternallyReparentedRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ShardReplicationAddRequest + * @memberof vtctldata.TabletExternallyReparentedRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ShardReplicationAddRequest} ShardReplicationAddRequest + * @returns {vtctldata.TabletExternallyReparentedRequest} TabletExternallyReparentedRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ShardReplicationAddRequest.decodeDelimited = function decodeDelimited(reader) { + TabletExternallyReparentedRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ShardReplicationAddRequest message. + * Verifies a TabletExternallyReparentedRequest message. * @function verify - * @memberof vtctldata.ShardReplicationAddRequest + * @memberof vtctldata.TabletExternallyReparentedRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ShardReplicationAddRequest.verify = function verify(message) { + TabletExternallyReparentedRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - if (message.shard != null && message.hasOwnProperty("shard")) - if (!$util.isString(message.shard)) - return "shard: string expected"; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { - let error = $root.topodata.TabletAlias.verify(message.tablet_alias); + if (message.tablet != null && message.hasOwnProperty("tablet")) { + let error = $root.topodata.TabletAlias.verify(message.tablet); if (error) - return "tablet_alias." + error; + return "tablet." + error; } return null; }; /** - * Creates a ShardReplicationAddRequest message from a plain object. Also converts values to their respective internal types. + * Creates a TabletExternallyReparentedRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ShardReplicationAddRequest + * @memberof vtctldata.TabletExternallyReparentedRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.ShardReplicationAddRequest} ShardReplicationAddRequest + * @returns {vtctldata.TabletExternallyReparentedRequest} TabletExternallyReparentedRequest */ - ShardReplicationAddRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ShardReplicationAddRequest) + TabletExternallyReparentedRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.TabletExternallyReparentedRequest) return object; - let message = new $root.vtctldata.ShardReplicationAddRequest(); - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - if (object.shard != null) - message.shard = String(object.shard); - if (object.tablet_alias != null) { - if (typeof object.tablet_alias !== "object") - throw TypeError(".vtctldata.ShardReplicationAddRequest.tablet_alias: object expected"); - message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); + let message = new $root.vtctldata.TabletExternallyReparentedRequest(); + if (object.tablet != null) { + if (typeof object.tablet !== "object") + throw TypeError(".vtctldata.TabletExternallyReparentedRequest.tablet: object expected"); + message.tablet = $root.topodata.TabletAlias.fromObject(object.tablet); } return message; }; /** - * Creates a plain object from a ShardReplicationAddRequest message. Also converts values to other types if specified. + * Creates a plain object from a TabletExternallyReparentedRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ShardReplicationAddRequest + * @memberof vtctldata.TabletExternallyReparentedRequest * @static - * @param {vtctldata.ShardReplicationAddRequest} message ShardReplicationAddRequest + * @param {vtctldata.TabletExternallyReparentedRequest} message TabletExternallyReparentedRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ShardReplicationAddRequest.toObject = function toObject(message, options) { + TabletExternallyReparentedRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) { - object.keyspace = ""; - object.shard = ""; - object.tablet_alias = null; - } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.shard != null && message.hasOwnProperty("shard")) - object.shard = message.shard; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) - object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); + if (options.defaults) + object.tablet = null; + if (message.tablet != null && message.hasOwnProperty("tablet")) + object.tablet = $root.topodata.TabletAlias.toObject(message.tablet, options); return object; }; /** - * Converts this ShardReplicationAddRequest to JSON. + * Converts this TabletExternallyReparentedRequest to JSON. * @function toJSON - * @memberof vtctldata.ShardReplicationAddRequest + * @memberof vtctldata.TabletExternallyReparentedRequest * @instance * @returns {Object.} JSON object */ - ShardReplicationAddRequest.prototype.toJSON = function toJSON() { + TabletExternallyReparentedRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ShardReplicationAddRequest + * Gets the default type url for TabletExternallyReparentedRequest * @function getTypeUrl - * @memberof vtctldata.ShardReplicationAddRequest + * @memberof vtctldata.TabletExternallyReparentedRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ShardReplicationAddRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + TabletExternallyReparentedRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ShardReplicationAddRequest"; + return typeUrlPrefix + "/vtctldata.TabletExternallyReparentedRequest"; }; - return ShardReplicationAddRequest; + return TabletExternallyReparentedRequest; })(); - vtctldata.ShardReplicationAddResponse = (function() { + vtctldata.TabletExternallyReparentedResponse = (function() { /** - * Properties of a ShardReplicationAddResponse. + * Properties of a TabletExternallyReparentedResponse. * @memberof vtctldata - * @interface IShardReplicationAddResponse + * @interface ITabletExternallyReparentedResponse + * @property {string|null} [keyspace] TabletExternallyReparentedResponse keyspace + * @property {string|null} [shard] TabletExternallyReparentedResponse shard + * @property {topodata.ITabletAlias|null} [new_primary] TabletExternallyReparentedResponse new_primary + * @property {topodata.ITabletAlias|null} [old_primary] TabletExternallyReparentedResponse old_primary */ /** - * Constructs a new ShardReplicationAddResponse. + * Constructs a new TabletExternallyReparentedResponse. * @memberof vtctldata - * @classdesc Represents a ShardReplicationAddResponse. - * @implements IShardReplicationAddResponse + * @classdesc Represents a TabletExternallyReparentedResponse. + * @implements ITabletExternallyReparentedResponse * @constructor - * @param {vtctldata.IShardReplicationAddResponse=} [properties] Properties to set + * @param {vtctldata.ITabletExternallyReparentedResponse=} [properties] Properties to set */ - function ShardReplicationAddResponse(properties) { + function TabletExternallyReparentedResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -133204,63 +155566,119 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * Creates a new ShardReplicationAddResponse instance using the specified properties. + * TabletExternallyReparentedResponse keyspace. + * @member {string} keyspace + * @memberof vtctldata.TabletExternallyReparentedResponse + * @instance + */ + TabletExternallyReparentedResponse.prototype.keyspace = ""; + + /** + * TabletExternallyReparentedResponse shard. + * @member {string} shard + * @memberof vtctldata.TabletExternallyReparentedResponse + * @instance + */ + TabletExternallyReparentedResponse.prototype.shard = ""; + + /** + * TabletExternallyReparentedResponse new_primary. + * @member {topodata.ITabletAlias|null|undefined} new_primary + * @memberof vtctldata.TabletExternallyReparentedResponse + * @instance + */ + TabletExternallyReparentedResponse.prototype.new_primary = null; + + /** + * TabletExternallyReparentedResponse old_primary. + * @member {topodata.ITabletAlias|null|undefined} old_primary + * @memberof vtctldata.TabletExternallyReparentedResponse + * @instance + */ + TabletExternallyReparentedResponse.prototype.old_primary = null; + + /** + * Creates a new TabletExternallyReparentedResponse instance using the specified properties. * @function create - * @memberof vtctldata.ShardReplicationAddResponse + * @memberof vtctldata.TabletExternallyReparentedResponse * @static - * @param {vtctldata.IShardReplicationAddResponse=} [properties] Properties to set - * @returns {vtctldata.ShardReplicationAddResponse} ShardReplicationAddResponse instance + * @param {vtctldata.ITabletExternallyReparentedResponse=} [properties] Properties to set + * @returns {vtctldata.TabletExternallyReparentedResponse} TabletExternallyReparentedResponse instance */ - ShardReplicationAddResponse.create = function create(properties) { - return new ShardReplicationAddResponse(properties); + TabletExternallyReparentedResponse.create = function create(properties) { + return new TabletExternallyReparentedResponse(properties); }; /** - * Encodes the specified ShardReplicationAddResponse message. Does not implicitly {@link vtctldata.ShardReplicationAddResponse.verify|verify} messages. + * Encodes the specified TabletExternallyReparentedResponse message. Does not implicitly {@link vtctldata.TabletExternallyReparentedResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.ShardReplicationAddResponse + * @memberof vtctldata.TabletExternallyReparentedResponse * @static - * @param {vtctldata.IShardReplicationAddResponse} message ShardReplicationAddResponse message or plain object to encode + * @param {vtctldata.ITabletExternallyReparentedResponse} message TabletExternallyReparentedResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ShardReplicationAddResponse.encode = function encode(message, writer) { + TabletExternallyReparentedResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); + if (message.new_primary != null && Object.hasOwnProperty.call(message, "new_primary")) + $root.topodata.TabletAlias.encode(message.new_primary, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.old_primary != null && Object.hasOwnProperty.call(message, "old_primary")) + $root.topodata.TabletAlias.encode(message.old_primary, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); return writer; }; /** - * Encodes the specified ShardReplicationAddResponse message, length delimited. Does not implicitly {@link vtctldata.ShardReplicationAddResponse.verify|verify} messages. + * Encodes the specified TabletExternallyReparentedResponse message, length delimited. Does not implicitly {@link vtctldata.TabletExternallyReparentedResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ShardReplicationAddResponse + * @memberof vtctldata.TabletExternallyReparentedResponse * @static - * @param {vtctldata.IShardReplicationAddResponse} message ShardReplicationAddResponse message or plain object to encode + * @param {vtctldata.ITabletExternallyReparentedResponse} message TabletExternallyReparentedResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ShardReplicationAddResponse.encodeDelimited = function encodeDelimited(message, writer) { + TabletExternallyReparentedResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ShardReplicationAddResponse message from the specified reader or buffer. + * Decodes a TabletExternallyReparentedResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ShardReplicationAddResponse + * @memberof vtctldata.TabletExternallyReparentedResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ShardReplicationAddResponse} ShardReplicationAddResponse + * @returns {vtctldata.TabletExternallyReparentedResponse} TabletExternallyReparentedResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ShardReplicationAddResponse.decode = function decode(reader, length) { + TabletExternallyReparentedResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ShardReplicationAddResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.TabletExternallyReparentedResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + message.keyspace = reader.string(); + break; + } + case 2: { + message.shard = reader.string(); + break; + } + case 3: { + message.new_primary = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + break; + } + case 4: { + message.old_primary = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + break; + } default: reader.skipType(tag & 7); break; @@ -133270,111 +155688,158 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a ShardReplicationAddResponse message from the specified reader or buffer, length delimited. + * Decodes a TabletExternallyReparentedResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ShardReplicationAddResponse + * @memberof vtctldata.TabletExternallyReparentedResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ShardReplicationAddResponse} ShardReplicationAddResponse + * @returns {vtctldata.TabletExternallyReparentedResponse} TabletExternallyReparentedResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ShardReplicationAddResponse.decodeDelimited = function decodeDelimited(reader) { + TabletExternallyReparentedResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ShardReplicationAddResponse message. + * Verifies a TabletExternallyReparentedResponse message. * @function verify - * @memberof vtctldata.ShardReplicationAddResponse + * @memberof vtctldata.TabletExternallyReparentedResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ShardReplicationAddResponse.verify = function verify(message) { + TabletExternallyReparentedResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.shard != null && message.hasOwnProperty("shard")) + if (!$util.isString(message.shard)) + return "shard: string expected"; + if (message.new_primary != null && message.hasOwnProperty("new_primary")) { + let error = $root.topodata.TabletAlias.verify(message.new_primary); + if (error) + return "new_primary." + error; + } + if (message.old_primary != null && message.hasOwnProperty("old_primary")) { + let error = $root.topodata.TabletAlias.verify(message.old_primary); + if (error) + return "old_primary." + error; + } return null; }; /** - * Creates a ShardReplicationAddResponse message from a plain object. Also converts values to their respective internal types. + * Creates a TabletExternallyReparentedResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ShardReplicationAddResponse + * @memberof vtctldata.TabletExternallyReparentedResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.ShardReplicationAddResponse} ShardReplicationAddResponse + * @returns {vtctldata.TabletExternallyReparentedResponse} TabletExternallyReparentedResponse */ - ShardReplicationAddResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ShardReplicationAddResponse) + TabletExternallyReparentedResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.TabletExternallyReparentedResponse) return object; - return new $root.vtctldata.ShardReplicationAddResponse(); + let message = new $root.vtctldata.TabletExternallyReparentedResponse(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.shard != null) + message.shard = String(object.shard); + if (object.new_primary != null) { + if (typeof object.new_primary !== "object") + throw TypeError(".vtctldata.TabletExternallyReparentedResponse.new_primary: object expected"); + message.new_primary = $root.topodata.TabletAlias.fromObject(object.new_primary); + } + if (object.old_primary != null) { + if (typeof object.old_primary !== "object") + throw TypeError(".vtctldata.TabletExternallyReparentedResponse.old_primary: object expected"); + message.old_primary = $root.topodata.TabletAlias.fromObject(object.old_primary); + } + return message; }; /** - * Creates a plain object from a ShardReplicationAddResponse message. Also converts values to other types if specified. + * Creates a plain object from a TabletExternallyReparentedResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ShardReplicationAddResponse + * @memberof vtctldata.TabletExternallyReparentedResponse * @static - * @param {vtctldata.ShardReplicationAddResponse} message ShardReplicationAddResponse + * @param {vtctldata.TabletExternallyReparentedResponse} message TabletExternallyReparentedResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ShardReplicationAddResponse.toObject = function toObject() { - return {}; + TabletExternallyReparentedResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.keyspace = ""; + object.shard = ""; + object.new_primary = null; + object.old_primary = null; + } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = message.shard; + if (message.new_primary != null && message.hasOwnProperty("new_primary")) + object.new_primary = $root.topodata.TabletAlias.toObject(message.new_primary, options); + if (message.old_primary != null && message.hasOwnProperty("old_primary")) + object.old_primary = $root.topodata.TabletAlias.toObject(message.old_primary, options); + return object; }; /** - * Converts this ShardReplicationAddResponse to JSON. + * Converts this TabletExternallyReparentedResponse to JSON. * @function toJSON - * @memberof vtctldata.ShardReplicationAddResponse + * @memberof vtctldata.TabletExternallyReparentedResponse * @instance * @returns {Object.} JSON object */ - ShardReplicationAddResponse.prototype.toJSON = function toJSON() { + TabletExternallyReparentedResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ShardReplicationAddResponse + * Gets the default type url for TabletExternallyReparentedResponse * @function getTypeUrl - * @memberof vtctldata.ShardReplicationAddResponse + * @memberof vtctldata.TabletExternallyReparentedResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ShardReplicationAddResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + TabletExternallyReparentedResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ShardReplicationAddResponse"; + return typeUrlPrefix + "/vtctldata.TabletExternallyReparentedResponse"; }; - return ShardReplicationAddResponse; + return TabletExternallyReparentedResponse; })(); - vtctldata.ShardReplicationFixRequest = (function() { + vtctldata.UpdateCellInfoRequest = (function() { /** - * Properties of a ShardReplicationFixRequest. + * Properties of an UpdateCellInfoRequest. * @memberof vtctldata - * @interface IShardReplicationFixRequest - * @property {string|null} [keyspace] ShardReplicationFixRequest keyspace - * @property {string|null} [shard] ShardReplicationFixRequest shard - * @property {string|null} [cell] ShardReplicationFixRequest cell + * @interface IUpdateCellInfoRequest + * @property {string|null} [name] UpdateCellInfoRequest name + * @property {topodata.ICellInfo|null} [cell_info] UpdateCellInfoRequest cell_info */ /** - * Constructs a new ShardReplicationFixRequest. + * Constructs a new UpdateCellInfoRequest. * @memberof vtctldata - * @classdesc Represents a ShardReplicationFixRequest. - * @implements IShardReplicationFixRequest + * @classdesc Represents an UpdateCellInfoRequest. + * @implements IUpdateCellInfoRequest * @constructor - * @param {vtctldata.IShardReplicationFixRequest=} [properties] Properties to set + * @param {vtctldata.IUpdateCellInfoRequest=} [properties] Properties to set */ - function ShardReplicationFixRequest(properties) { + function UpdateCellInfoRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -133382,103 +155847,89 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ShardReplicationFixRequest keyspace. - * @member {string} keyspace - * @memberof vtctldata.ShardReplicationFixRequest - * @instance - */ - ShardReplicationFixRequest.prototype.keyspace = ""; - - /** - * ShardReplicationFixRequest shard. - * @member {string} shard - * @memberof vtctldata.ShardReplicationFixRequest + * UpdateCellInfoRequest name. + * @member {string} name + * @memberof vtctldata.UpdateCellInfoRequest * @instance */ - ShardReplicationFixRequest.prototype.shard = ""; + UpdateCellInfoRequest.prototype.name = ""; /** - * ShardReplicationFixRequest cell. - * @member {string} cell - * @memberof vtctldata.ShardReplicationFixRequest + * UpdateCellInfoRequest cell_info. + * @member {topodata.ICellInfo|null|undefined} cell_info + * @memberof vtctldata.UpdateCellInfoRequest * @instance */ - ShardReplicationFixRequest.prototype.cell = ""; + UpdateCellInfoRequest.prototype.cell_info = null; /** - * Creates a new ShardReplicationFixRequest instance using the specified properties. + * Creates a new UpdateCellInfoRequest instance using the specified properties. * @function create - * @memberof vtctldata.ShardReplicationFixRequest + * @memberof vtctldata.UpdateCellInfoRequest * @static - * @param {vtctldata.IShardReplicationFixRequest=} [properties] Properties to set - * @returns {vtctldata.ShardReplicationFixRequest} ShardReplicationFixRequest instance + * @param {vtctldata.IUpdateCellInfoRequest=} [properties] Properties to set + * @returns {vtctldata.UpdateCellInfoRequest} UpdateCellInfoRequest instance */ - ShardReplicationFixRequest.create = function create(properties) { - return new ShardReplicationFixRequest(properties); + UpdateCellInfoRequest.create = function create(properties) { + return new UpdateCellInfoRequest(properties); }; /** - * Encodes the specified ShardReplicationFixRequest message. Does not implicitly {@link vtctldata.ShardReplicationFixRequest.verify|verify} messages. + * Encodes the specified UpdateCellInfoRequest message. Does not implicitly {@link vtctldata.UpdateCellInfoRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.ShardReplicationFixRequest + * @memberof vtctldata.UpdateCellInfoRequest * @static - * @param {vtctldata.IShardReplicationFixRequest} message ShardReplicationFixRequest message or plain object to encode + * @param {vtctldata.IUpdateCellInfoRequest} message UpdateCellInfoRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ShardReplicationFixRequest.encode = function encode(message, writer) { + UpdateCellInfoRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); - if (message.cell != null && Object.hasOwnProperty.call(message, "cell")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.cell); + if (message.name != null && Object.hasOwnProperty.call(message, "name")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); + if (message.cell_info != null && Object.hasOwnProperty.call(message, "cell_info")) + $root.topodata.CellInfo.encode(message.cell_info, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); return writer; }; /** - * Encodes the specified ShardReplicationFixRequest message, length delimited. Does not implicitly {@link vtctldata.ShardReplicationFixRequest.verify|verify} messages. + * Encodes the specified UpdateCellInfoRequest message, length delimited. Does not implicitly {@link vtctldata.UpdateCellInfoRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ShardReplicationFixRequest + * @memberof vtctldata.UpdateCellInfoRequest * @static - * @param {vtctldata.IShardReplicationFixRequest} message ShardReplicationFixRequest message or plain object to encode + * @param {vtctldata.IUpdateCellInfoRequest} message UpdateCellInfoRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ShardReplicationFixRequest.encodeDelimited = function encodeDelimited(message, writer) { + UpdateCellInfoRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ShardReplicationFixRequest message from the specified reader or buffer. + * Decodes an UpdateCellInfoRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ShardReplicationFixRequest + * @memberof vtctldata.UpdateCellInfoRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ShardReplicationFixRequest} ShardReplicationFixRequest + * @returns {vtctldata.UpdateCellInfoRequest} UpdateCellInfoRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ShardReplicationFixRequest.decode = function decode(reader, length) { + UpdateCellInfoRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ShardReplicationFixRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.UpdateCellInfoRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.keyspace = reader.string(); + message.name = reader.string(); break; } case 2: { - message.shard = reader.string(); - break; - } - case 3: { - message.cell = reader.string(); + message.cell_info = $root.topodata.CellInfo.decode(reader, reader.uint32()); break; } default: @@ -133490,139 +155941,137 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a ShardReplicationFixRequest message from the specified reader or buffer, length delimited. + * Decodes an UpdateCellInfoRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ShardReplicationFixRequest + * @memberof vtctldata.UpdateCellInfoRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ShardReplicationFixRequest} ShardReplicationFixRequest + * @returns {vtctldata.UpdateCellInfoRequest} UpdateCellInfoRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ShardReplicationFixRequest.decodeDelimited = function decodeDelimited(reader) { + UpdateCellInfoRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ShardReplicationFixRequest message. + * Verifies an UpdateCellInfoRequest message. * @function verify - * @memberof vtctldata.ShardReplicationFixRequest + * @memberof vtctldata.UpdateCellInfoRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ShardReplicationFixRequest.verify = function verify(message) { + UpdateCellInfoRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - if (message.shard != null && message.hasOwnProperty("shard")) - if (!$util.isString(message.shard)) - return "shard: string expected"; - if (message.cell != null && message.hasOwnProperty("cell")) - if (!$util.isString(message.cell)) - return "cell: string expected"; + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; + if (message.cell_info != null && message.hasOwnProperty("cell_info")) { + let error = $root.topodata.CellInfo.verify(message.cell_info); + if (error) + return "cell_info." + error; + } return null; }; /** - * Creates a ShardReplicationFixRequest message from a plain object. Also converts values to their respective internal types. + * Creates an UpdateCellInfoRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ShardReplicationFixRequest + * @memberof vtctldata.UpdateCellInfoRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.ShardReplicationFixRequest} ShardReplicationFixRequest + * @returns {vtctldata.UpdateCellInfoRequest} UpdateCellInfoRequest */ - ShardReplicationFixRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ShardReplicationFixRequest) + UpdateCellInfoRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.UpdateCellInfoRequest) return object; - let message = new $root.vtctldata.ShardReplicationFixRequest(); - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - if (object.shard != null) - message.shard = String(object.shard); - if (object.cell != null) - message.cell = String(object.cell); + let message = new $root.vtctldata.UpdateCellInfoRequest(); + if (object.name != null) + message.name = String(object.name); + if (object.cell_info != null) { + if (typeof object.cell_info !== "object") + throw TypeError(".vtctldata.UpdateCellInfoRequest.cell_info: object expected"); + message.cell_info = $root.topodata.CellInfo.fromObject(object.cell_info); + } return message; }; /** - * Creates a plain object from a ShardReplicationFixRequest message. Also converts values to other types if specified. + * Creates a plain object from an UpdateCellInfoRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ShardReplicationFixRequest + * @memberof vtctldata.UpdateCellInfoRequest * @static - * @param {vtctldata.ShardReplicationFixRequest} message ShardReplicationFixRequest + * @param {vtctldata.UpdateCellInfoRequest} message UpdateCellInfoRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ShardReplicationFixRequest.toObject = function toObject(message, options) { + UpdateCellInfoRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) { - object.keyspace = ""; - object.shard = ""; - object.cell = ""; + object.name = ""; + object.cell_info = null; } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.shard != null && message.hasOwnProperty("shard")) - object.shard = message.shard; - if (message.cell != null && message.hasOwnProperty("cell")) - object.cell = message.cell; + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; + if (message.cell_info != null && message.hasOwnProperty("cell_info")) + object.cell_info = $root.topodata.CellInfo.toObject(message.cell_info, options); return object; }; /** - * Converts this ShardReplicationFixRequest to JSON. + * Converts this UpdateCellInfoRequest to JSON. * @function toJSON - * @memberof vtctldata.ShardReplicationFixRequest + * @memberof vtctldata.UpdateCellInfoRequest * @instance * @returns {Object.} JSON object */ - ShardReplicationFixRequest.prototype.toJSON = function toJSON() { + UpdateCellInfoRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ShardReplicationFixRequest + * Gets the default type url for UpdateCellInfoRequest * @function getTypeUrl - * @memberof vtctldata.ShardReplicationFixRequest + * @memberof vtctldata.UpdateCellInfoRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ShardReplicationFixRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + UpdateCellInfoRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ShardReplicationFixRequest"; + return typeUrlPrefix + "/vtctldata.UpdateCellInfoRequest"; }; - return ShardReplicationFixRequest; + return UpdateCellInfoRequest; })(); - vtctldata.ShardReplicationFixResponse = (function() { + vtctldata.UpdateCellInfoResponse = (function() { /** - * Properties of a ShardReplicationFixResponse. + * Properties of an UpdateCellInfoResponse. * @memberof vtctldata - * @interface IShardReplicationFixResponse - * @property {topodata.IShardReplicationError|null} [error] ShardReplicationFixResponse error + * @interface IUpdateCellInfoResponse + * @property {string|null} [name] UpdateCellInfoResponse name + * @property {topodata.ICellInfo|null} [cell_info] UpdateCellInfoResponse cell_info */ /** - * Constructs a new ShardReplicationFixResponse. + * Constructs a new UpdateCellInfoResponse. * @memberof vtctldata - * @classdesc Represents a ShardReplicationFixResponse. - * @implements IShardReplicationFixResponse + * @classdesc Represents an UpdateCellInfoResponse. + * @implements IUpdateCellInfoResponse * @constructor - * @param {vtctldata.IShardReplicationFixResponse=} [properties] Properties to set + * @param {vtctldata.IUpdateCellInfoResponse=} [properties] Properties to set */ - function ShardReplicationFixResponse(properties) { + function UpdateCellInfoResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -133630,75 +156079,89 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ShardReplicationFixResponse error. - * @member {topodata.IShardReplicationError|null|undefined} error - * @memberof vtctldata.ShardReplicationFixResponse + * UpdateCellInfoResponse name. + * @member {string} name + * @memberof vtctldata.UpdateCellInfoResponse * @instance */ - ShardReplicationFixResponse.prototype.error = null; + UpdateCellInfoResponse.prototype.name = ""; /** - * Creates a new ShardReplicationFixResponse instance using the specified properties. + * UpdateCellInfoResponse cell_info. + * @member {topodata.ICellInfo|null|undefined} cell_info + * @memberof vtctldata.UpdateCellInfoResponse + * @instance + */ + UpdateCellInfoResponse.prototype.cell_info = null; + + /** + * Creates a new UpdateCellInfoResponse instance using the specified properties. * @function create - * @memberof vtctldata.ShardReplicationFixResponse + * @memberof vtctldata.UpdateCellInfoResponse * @static - * @param {vtctldata.IShardReplicationFixResponse=} [properties] Properties to set - * @returns {vtctldata.ShardReplicationFixResponse} ShardReplicationFixResponse instance + * @param {vtctldata.IUpdateCellInfoResponse=} [properties] Properties to set + * @returns {vtctldata.UpdateCellInfoResponse} UpdateCellInfoResponse instance */ - ShardReplicationFixResponse.create = function create(properties) { - return new ShardReplicationFixResponse(properties); + UpdateCellInfoResponse.create = function create(properties) { + return new UpdateCellInfoResponse(properties); }; /** - * Encodes the specified ShardReplicationFixResponse message. Does not implicitly {@link vtctldata.ShardReplicationFixResponse.verify|verify} messages. + * Encodes the specified UpdateCellInfoResponse message. Does not implicitly {@link vtctldata.UpdateCellInfoResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.ShardReplicationFixResponse + * @memberof vtctldata.UpdateCellInfoResponse * @static - * @param {vtctldata.IShardReplicationFixResponse} message ShardReplicationFixResponse message or plain object to encode + * @param {vtctldata.IUpdateCellInfoResponse} message UpdateCellInfoResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ShardReplicationFixResponse.encode = function encode(message, writer) { + UpdateCellInfoResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.error != null && Object.hasOwnProperty.call(message, "error")) - $root.topodata.ShardReplicationError.encode(message.error, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.name != null && Object.hasOwnProperty.call(message, "name")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); + if (message.cell_info != null && Object.hasOwnProperty.call(message, "cell_info")) + $root.topodata.CellInfo.encode(message.cell_info, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); return writer; }; /** - * Encodes the specified ShardReplicationFixResponse message, length delimited. Does not implicitly {@link vtctldata.ShardReplicationFixResponse.verify|verify} messages. + * Encodes the specified UpdateCellInfoResponse message, length delimited. Does not implicitly {@link vtctldata.UpdateCellInfoResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ShardReplicationFixResponse + * @memberof vtctldata.UpdateCellInfoResponse * @static - * @param {vtctldata.IShardReplicationFixResponse} message ShardReplicationFixResponse message or plain object to encode + * @param {vtctldata.IUpdateCellInfoResponse} message UpdateCellInfoResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ShardReplicationFixResponse.encodeDelimited = function encodeDelimited(message, writer) { + UpdateCellInfoResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ShardReplicationFixResponse message from the specified reader or buffer. + * Decodes an UpdateCellInfoResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ShardReplicationFixResponse + * @memberof vtctldata.UpdateCellInfoResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ShardReplicationFixResponse} ShardReplicationFixResponse + * @returns {vtctldata.UpdateCellInfoResponse} UpdateCellInfoResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ShardReplicationFixResponse.decode = function decode(reader, length) { + UpdateCellInfoResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ShardReplicationFixResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.UpdateCellInfoResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.error = $root.topodata.ShardReplicationError.decode(reader, reader.uint32()); + message.name = reader.string(); + break; + } + case 2: { + message.cell_info = $root.topodata.CellInfo.decode(reader, reader.uint32()); break; } default: @@ -133710,128 +156173,137 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a ShardReplicationFixResponse message from the specified reader or buffer, length delimited. + * Decodes an UpdateCellInfoResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ShardReplicationFixResponse + * @memberof vtctldata.UpdateCellInfoResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ShardReplicationFixResponse} ShardReplicationFixResponse + * @returns {vtctldata.UpdateCellInfoResponse} UpdateCellInfoResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ShardReplicationFixResponse.decodeDelimited = function decodeDelimited(reader) { + UpdateCellInfoResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ShardReplicationFixResponse message. + * Verifies an UpdateCellInfoResponse message. * @function verify - * @memberof vtctldata.ShardReplicationFixResponse + * @memberof vtctldata.UpdateCellInfoResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ShardReplicationFixResponse.verify = function verify(message) { + UpdateCellInfoResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.error != null && message.hasOwnProperty("error")) { - let error = $root.topodata.ShardReplicationError.verify(message.error); + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; + if (message.cell_info != null && message.hasOwnProperty("cell_info")) { + let error = $root.topodata.CellInfo.verify(message.cell_info); if (error) - return "error." + error; + return "cell_info." + error; } return null; }; /** - * Creates a ShardReplicationFixResponse message from a plain object. Also converts values to their respective internal types. + * Creates an UpdateCellInfoResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ShardReplicationFixResponse + * @memberof vtctldata.UpdateCellInfoResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.ShardReplicationFixResponse} ShardReplicationFixResponse + * @returns {vtctldata.UpdateCellInfoResponse} UpdateCellInfoResponse */ - ShardReplicationFixResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ShardReplicationFixResponse) + UpdateCellInfoResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.UpdateCellInfoResponse) return object; - let message = new $root.vtctldata.ShardReplicationFixResponse(); - if (object.error != null) { - if (typeof object.error !== "object") - throw TypeError(".vtctldata.ShardReplicationFixResponse.error: object expected"); - message.error = $root.topodata.ShardReplicationError.fromObject(object.error); + let message = new $root.vtctldata.UpdateCellInfoResponse(); + if (object.name != null) + message.name = String(object.name); + if (object.cell_info != null) { + if (typeof object.cell_info !== "object") + throw TypeError(".vtctldata.UpdateCellInfoResponse.cell_info: object expected"); + message.cell_info = $root.topodata.CellInfo.fromObject(object.cell_info); } return message; }; /** - * Creates a plain object from a ShardReplicationFixResponse message. Also converts values to other types if specified. + * Creates a plain object from an UpdateCellInfoResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ShardReplicationFixResponse + * @memberof vtctldata.UpdateCellInfoResponse * @static - * @param {vtctldata.ShardReplicationFixResponse} message ShardReplicationFixResponse + * @param {vtctldata.UpdateCellInfoResponse} message UpdateCellInfoResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ShardReplicationFixResponse.toObject = function toObject(message, options) { + UpdateCellInfoResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) - object.error = null; - if (message.error != null && message.hasOwnProperty("error")) - object.error = $root.topodata.ShardReplicationError.toObject(message.error, options); + if (options.defaults) { + object.name = ""; + object.cell_info = null; + } + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; + if (message.cell_info != null && message.hasOwnProperty("cell_info")) + object.cell_info = $root.topodata.CellInfo.toObject(message.cell_info, options); return object; }; /** - * Converts this ShardReplicationFixResponse to JSON. + * Converts this UpdateCellInfoResponse to JSON. * @function toJSON - * @memberof vtctldata.ShardReplicationFixResponse + * @memberof vtctldata.UpdateCellInfoResponse * @instance * @returns {Object.} JSON object */ - ShardReplicationFixResponse.prototype.toJSON = function toJSON() { + UpdateCellInfoResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ShardReplicationFixResponse + * Gets the default type url for UpdateCellInfoResponse * @function getTypeUrl - * @memberof vtctldata.ShardReplicationFixResponse + * @memberof vtctldata.UpdateCellInfoResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ShardReplicationFixResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + UpdateCellInfoResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ShardReplicationFixResponse"; + return typeUrlPrefix + "/vtctldata.UpdateCellInfoResponse"; }; - return ShardReplicationFixResponse; + return UpdateCellInfoResponse; })(); - vtctldata.ShardReplicationPositionsRequest = (function() { + vtctldata.UpdateCellsAliasRequest = (function() { /** - * Properties of a ShardReplicationPositionsRequest. + * Properties of an UpdateCellsAliasRequest. * @memberof vtctldata - * @interface IShardReplicationPositionsRequest - * @property {string|null} [keyspace] ShardReplicationPositionsRequest keyspace - * @property {string|null} [shard] ShardReplicationPositionsRequest shard + * @interface IUpdateCellsAliasRequest + * @property {string|null} [name] UpdateCellsAliasRequest name + * @property {topodata.ICellsAlias|null} [cells_alias] UpdateCellsAliasRequest cells_alias */ /** - * Constructs a new ShardReplicationPositionsRequest. + * Constructs a new UpdateCellsAliasRequest. * @memberof vtctldata - * @classdesc Represents a ShardReplicationPositionsRequest. - * @implements IShardReplicationPositionsRequest + * @classdesc Represents an UpdateCellsAliasRequest. + * @implements IUpdateCellsAliasRequest * @constructor - * @param {vtctldata.IShardReplicationPositionsRequest=} [properties] Properties to set + * @param {vtctldata.IUpdateCellsAliasRequest=} [properties] Properties to set */ - function ShardReplicationPositionsRequest(properties) { + function UpdateCellsAliasRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -133839,89 +156311,89 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ShardReplicationPositionsRequest keyspace. - * @member {string} keyspace - * @memberof vtctldata.ShardReplicationPositionsRequest + * UpdateCellsAliasRequest name. + * @member {string} name + * @memberof vtctldata.UpdateCellsAliasRequest * @instance */ - ShardReplicationPositionsRequest.prototype.keyspace = ""; + UpdateCellsAliasRequest.prototype.name = ""; /** - * ShardReplicationPositionsRequest shard. - * @member {string} shard - * @memberof vtctldata.ShardReplicationPositionsRequest + * UpdateCellsAliasRequest cells_alias. + * @member {topodata.ICellsAlias|null|undefined} cells_alias + * @memberof vtctldata.UpdateCellsAliasRequest * @instance */ - ShardReplicationPositionsRequest.prototype.shard = ""; + UpdateCellsAliasRequest.prototype.cells_alias = null; /** - * Creates a new ShardReplicationPositionsRequest instance using the specified properties. + * Creates a new UpdateCellsAliasRequest instance using the specified properties. * @function create - * @memberof vtctldata.ShardReplicationPositionsRequest + * @memberof vtctldata.UpdateCellsAliasRequest * @static - * @param {vtctldata.IShardReplicationPositionsRequest=} [properties] Properties to set - * @returns {vtctldata.ShardReplicationPositionsRequest} ShardReplicationPositionsRequest instance + * @param {vtctldata.IUpdateCellsAliasRequest=} [properties] Properties to set + * @returns {vtctldata.UpdateCellsAliasRequest} UpdateCellsAliasRequest instance */ - ShardReplicationPositionsRequest.create = function create(properties) { - return new ShardReplicationPositionsRequest(properties); + UpdateCellsAliasRequest.create = function create(properties) { + return new UpdateCellsAliasRequest(properties); }; /** - * Encodes the specified ShardReplicationPositionsRequest message. Does not implicitly {@link vtctldata.ShardReplicationPositionsRequest.verify|verify} messages. + * Encodes the specified UpdateCellsAliasRequest message. Does not implicitly {@link vtctldata.UpdateCellsAliasRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.ShardReplicationPositionsRequest + * @memberof vtctldata.UpdateCellsAliasRequest * @static - * @param {vtctldata.IShardReplicationPositionsRequest} message ShardReplicationPositionsRequest message or plain object to encode + * @param {vtctldata.IUpdateCellsAliasRequest} message UpdateCellsAliasRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ShardReplicationPositionsRequest.encode = function encode(message, writer) { + UpdateCellsAliasRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); + if (message.name != null && Object.hasOwnProperty.call(message, "name")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); + if (message.cells_alias != null && Object.hasOwnProperty.call(message, "cells_alias")) + $root.topodata.CellsAlias.encode(message.cells_alias, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); return writer; }; /** - * Encodes the specified ShardReplicationPositionsRequest message, length delimited. Does not implicitly {@link vtctldata.ShardReplicationPositionsRequest.verify|verify} messages. + * Encodes the specified UpdateCellsAliasRequest message, length delimited. Does not implicitly {@link vtctldata.UpdateCellsAliasRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ShardReplicationPositionsRequest + * @memberof vtctldata.UpdateCellsAliasRequest * @static - * @param {vtctldata.IShardReplicationPositionsRequest} message ShardReplicationPositionsRequest message or plain object to encode + * @param {vtctldata.IUpdateCellsAliasRequest} message UpdateCellsAliasRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ShardReplicationPositionsRequest.encodeDelimited = function encodeDelimited(message, writer) { + UpdateCellsAliasRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ShardReplicationPositionsRequest message from the specified reader or buffer. + * Decodes an UpdateCellsAliasRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ShardReplicationPositionsRequest + * @memberof vtctldata.UpdateCellsAliasRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ShardReplicationPositionsRequest} ShardReplicationPositionsRequest + * @returns {vtctldata.UpdateCellsAliasRequest} UpdateCellsAliasRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ShardReplicationPositionsRequest.decode = function decode(reader, length) { + UpdateCellsAliasRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ShardReplicationPositionsRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.UpdateCellsAliasRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.keyspace = reader.string(); + message.name = reader.string(); break; } case 2: { - message.shard = reader.string(); + message.cells_alias = $root.topodata.CellsAlias.decode(reader, reader.uint32()); break; } default: @@ -133933,134 +156405,137 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a ShardReplicationPositionsRequest message from the specified reader or buffer, length delimited. + * Decodes an UpdateCellsAliasRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ShardReplicationPositionsRequest + * @memberof vtctldata.UpdateCellsAliasRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ShardReplicationPositionsRequest} ShardReplicationPositionsRequest + * @returns {vtctldata.UpdateCellsAliasRequest} UpdateCellsAliasRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ShardReplicationPositionsRequest.decodeDelimited = function decodeDelimited(reader) { + UpdateCellsAliasRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ShardReplicationPositionsRequest message. + * Verifies an UpdateCellsAliasRequest message. * @function verify - * @memberof vtctldata.ShardReplicationPositionsRequest + * @memberof vtctldata.UpdateCellsAliasRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ShardReplicationPositionsRequest.verify = function verify(message) { + UpdateCellsAliasRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - if (message.shard != null && message.hasOwnProperty("shard")) - if (!$util.isString(message.shard)) - return "shard: string expected"; + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; + if (message.cells_alias != null && message.hasOwnProperty("cells_alias")) { + let error = $root.topodata.CellsAlias.verify(message.cells_alias); + if (error) + return "cells_alias." + error; + } return null; }; /** - * Creates a ShardReplicationPositionsRequest message from a plain object. Also converts values to their respective internal types. + * Creates an UpdateCellsAliasRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ShardReplicationPositionsRequest + * @memberof vtctldata.UpdateCellsAliasRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.ShardReplicationPositionsRequest} ShardReplicationPositionsRequest + * @returns {vtctldata.UpdateCellsAliasRequest} UpdateCellsAliasRequest */ - ShardReplicationPositionsRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ShardReplicationPositionsRequest) + UpdateCellsAliasRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.UpdateCellsAliasRequest) return object; - let message = new $root.vtctldata.ShardReplicationPositionsRequest(); - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - if (object.shard != null) - message.shard = String(object.shard); + let message = new $root.vtctldata.UpdateCellsAliasRequest(); + if (object.name != null) + message.name = String(object.name); + if (object.cells_alias != null) { + if (typeof object.cells_alias !== "object") + throw TypeError(".vtctldata.UpdateCellsAliasRequest.cells_alias: object expected"); + message.cells_alias = $root.topodata.CellsAlias.fromObject(object.cells_alias); + } return message; }; /** - * Creates a plain object from a ShardReplicationPositionsRequest message. Also converts values to other types if specified. + * Creates a plain object from an UpdateCellsAliasRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ShardReplicationPositionsRequest + * @memberof vtctldata.UpdateCellsAliasRequest * @static - * @param {vtctldata.ShardReplicationPositionsRequest} message ShardReplicationPositionsRequest + * @param {vtctldata.UpdateCellsAliasRequest} message UpdateCellsAliasRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ShardReplicationPositionsRequest.toObject = function toObject(message, options) { + UpdateCellsAliasRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) { - object.keyspace = ""; - object.shard = ""; + object.name = ""; + object.cells_alias = null; } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.shard != null && message.hasOwnProperty("shard")) - object.shard = message.shard; + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; + if (message.cells_alias != null && message.hasOwnProperty("cells_alias")) + object.cells_alias = $root.topodata.CellsAlias.toObject(message.cells_alias, options); return object; }; /** - * Converts this ShardReplicationPositionsRequest to JSON. + * Converts this UpdateCellsAliasRequest to JSON. * @function toJSON - * @memberof vtctldata.ShardReplicationPositionsRequest + * @memberof vtctldata.UpdateCellsAliasRequest * @instance * @returns {Object.} JSON object */ - ShardReplicationPositionsRequest.prototype.toJSON = function toJSON() { + UpdateCellsAliasRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ShardReplicationPositionsRequest + * Gets the default type url for UpdateCellsAliasRequest * @function getTypeUrl - * @memberof vtctldata.ShardReplicationPositionsRequest + * @memberof vtctldata.UpdateCellsAliasRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ShardReplicationPositionsRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + UpdateCellsAliasRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ShardReplicationPositionsRequest"; + return typeUrlPrefix + "/vtctldata.UpdateCellsAliasRequest"; }; - return ShardReplicationPositionsRequest; + return UpdateCellsAliasRequest; })(); - vtctldata.ShardReplicationPositionsResponse = (function() { + vtctldata.UpdateCellsAliasResponse = (function() { /** - * Properties of a ShardReplicationPositionsResponse. + * Properties of an UpdateCellsAliasResponse. * @memberof vtctldata - * @interface IShardReplicationPositionsResponse - * @property {Object.|null} [replication_statuses] ShardReplicationPositionsResponse replication_statuses - * @property {Object.|null} [tablet_map] ShardReplicationPositionsResponse tablet_map + * @interface IUpdateCellsAliasResponse + * @property {string|null} [name] UpdateCellsAliasResponse name + * @property {topodata.ICellsAlias|null} [cells_alias] UpdateCellsAliasResponse cells_alias */ /** - * Constructs a new ShardReplicationPositionsResponse. + * Constructs a new UpdateCellsAliasResponse. * @memberof vtctldata - * @classdesc Represents a ShardReplicationPositionsResponse. - * @implements IShardReplicationPositionsResponse + * @classdesc Represents an UpdateCellsAliasResponse. + * @implements IUpdateCellsAliasResponse * @constructor - * @param {vtctldata.IShardReplicationPositionsResponse=} [properties] Properties to set + * @param {vtctldata.IUpdateCellsAliasResponse=} [properties] Properties to set */ - function ShardReplicationPositionsResponse(properties) { - this.replication_statuses = {}; - this.tablet_map = {}; + function UpdateCellsAliasResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -134068,133 +156543,89 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ShardReplicationPositionsResponse replication_statuses. - * @member {Object.} replication_statuses - * @memberof vtctldata.ShardReplicationPositionsResponse + * UpdateCellsAliasResponse name. + * @member {string} name + * @memberof vtctldata.UpdateCellsAliasResponse * @instance */ - ShardReplicationPositionsResponse.prototype.replication_statuses = $util.emptyObject; + UpdateCellsAliasResponse.prototype.name = ""; /** - * ShardReplicationPositionsResponse tablet_map. - * @member {Object.} tablet_map - * @memberof vtctldata.ShardReplicationPositionsResponse + * UpdateCellsAliasResponse cells_alias. + * @member {topodata.ICellsAlias|null|undefined} cells_alias + * @memberof vtctldata.UpdateCellsAliasResponse * @instance */ - ShardReplicationPositionsResponse.prototype.tablet_map = $util.emptyObject; + UpdateCellsAliasResponse.prototype.cells_alias = null; /** - * Creates a new ShardReplicationPositionsResponse instance using the specified properties. + * Creates a new UpdateCellsAliasResponse instance using the specified properties. * @function create - * @memberof vtctldata.ShardReplicationPositionsResponse + * @memberof vtctldata.UpdateCellsAliasResponse * @static - * @param {vtctldata.IShardReplicationPositionsResponse=} [properties] Properties to set - * @returns {vtctldata.ShardReplicationPositionsResponse} ShardReplicationPositionsResponse instance + * @param {vtctldata.IUpdateCellsAliasResponse=} [properties] Properties to set + * @returns {vtctldata.UpdateCellsAliasResponse} UpdateCellsAliasResponse instance */ - ShardReplicationPositionsResponse.create = function create(properties) { - return new ShardReplicationPositionsResponse(properties); + UpdateCellsAliasResponse.create = function create(properties) { + return new UpdateCellsAliasResponse(properties); }; /** - * Encodes the specified ShardReplicationPositionsResponse message. Does not implicitly {@link vtctldata.ShardReplicationPositionsResponse.verify|verify} messages. + * Encodes the specified UpdateCellsAliasResponse message. Does not implicitly {@link vtctldata.UpdateCellsAliasResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.ShardReplicationPositionsResponse + * @memberof vtctldata.UpdateCellsAliasResponse * @static - * @param {vtctldata.IShardReplicationPositionsResponse} message ShardReplicationPositionsResponse message or plain object to encode + * @param {vtctldata.IUpdateCellsAliasResponse} message UpdateCellsAliasResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ShardReplicationPositionsResponse.encode = function encode(message, writer) { + UpdateCellsAliasResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.replication_statuses != null && Object.hasOwnProperty.call(message, "replication_statuses")) - for (let keys = Object.keys(message.replication_statuses), i = 0; i < keys.length; ++i) { - writer.uint32(/* id 1, wireType 2 =*/10).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); - $root.replicationdata.Status.encode(message.replication_statuses[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); - } - if (message.tablet_map != null && Object.hasOwnProperty.call(message, "tablet_map")) - for (let keys = Object.keys(message.tablet_map), i = 0; i < keys.length; ++i) { - writer.uint32(/* id 2, wireType 2 =*/18).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); - $root.topodata.Tablet.encode(message.tablet_map[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); - } + if (message.name != null && Object.hasOwnProperty.call(message, "name")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); + if (message.cells_alias != null && Object.hasOwnProperty.call(message, "cells_alias")) + $root.topodata.CellsAlias.encode(message.cells_alias, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); return writer; }; /** - * Encodes the specified ShardReplicationPositionsResponse message, length delimited. Does not implicitly {@link vtctldata.ShardReplicationPositionsResponse.verify|verify} messages. + * Encodes the specified UpdateCellsAliasResponse message, length delimited. Does not implicitly {@link vtctldata.UpdateCellsAliasResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ShardReplicationPositionsResponse + * @memberof vtctldata.UpdateCellsAliasResponse * @static - * @param {vtctldata.IShardReplicationPositionsResponse} message ShardReplicationPositionsResponse message or plain object to encode + * @param {vtctldata.IUpdateCellsAliasResponse} message UpdateCellsAliasResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ShardReplicationPositionsResponse.encodeDelimited = function encodeDelimited(message, writer) { + UpdateCellsAliasResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ShardReplicationPositionsResponse message from the specified reader or buffer. + * Decodes an UpdateCellsAliasResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ShardReplicationPositionsResponse + * @memberof vtctldata.UpdateCellsAliasResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ShardReplicationPositionsResponse} ShardReplicationPositionsResponse + * @returns {vtctldata.UpdateCellsAliasResponse} UpdateCellsAliasResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ShardReplicationPositionsResponse.decode = function decode(reader, length) { + UpdateCellsAliasResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ShardReplicationPositionsResponse(), key, value; + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.UpdateCellsAliasResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (message.replication_statuses === $util.emptyObject) - message.replication_statuses = {}; - let end2 = reader.uint32() + reader.pos; - key = ""; - value = null; - while (reader.pos < end2) { - let tag2 = reader.uint32(); - switch (tag2 >>> 3) { - case 1: - key = reader.string(); - break; - case 2: - value = $root.replicationdata.Status.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag2 & 7); - break; - } - } - message.replication_statuses[key] = value; + message.name = reader.string(); break; } case 2: { - if (message.tablet_map === $util.emptyObject) - message.tablet_map = {}; - let end2 = reader.uint32() + reader.pos; - key = ""; - value = null; - while (reader.pos < end2) { - let tag2 = reader.uint32(); - switch (tag2 >>> 3) { - case 1: - key = reader.string(); - break; - case 2: - value = $root.topodata.Tablet.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag2 & 7); - break; - } - } - message.tablet_map[key] = value; + message.cells_alias = $root.topodata.CellsAlias.decode(reader, reader.uint32()); break; } default: @@ -134206,170 +156637,136 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a ShardReplicationPositionsResponse message from the specified reader or buffer, length delimited. + * Decodes an UpdateCellsAliasResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ShardReplicationPositionsResponse + * @memberof vtctldata.UpdateCellsAliasResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ShardReplicationPositionsResponse} ShardReplicationPositionsResponse + * @returns {vtctldata.UpdateCellsAliasResponse} UpdateCellsAliasResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ShardReplicationPositionsResponse.decodeDelimited = function decodeDelimited(reader) { + UpdateCellsAliasResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ShardReplicationPositionsResponse message. + * Verifies an UpdateCellsAliasResponse message. * @function verify - * @memberof vtctldata.ShardReplicationPositionsResponse + * @memberof vtctldata.UpdateCellsAliasResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ShardReplicationPositionsResponse.verify = function verify(message) { + UpdateCellsAliasResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.replication_statuses != null && message.hasOwnProperty("replication_statuses")) { - if (!$util.isObject(message.replication_statuses)) - return "replication_statuses: object expected"; - let key = Object.keys(message.replication_statuses); - for (let i = 0; i < key.length; ++i) { - let error = $root.replicationdata.Status.verify(message.replication_statuses[key[i]]); - if (error) - return "replication_statuses." + error; - } - } - if (message.tablet_map != null && message.hasOwnProperty("tablet_map")) { - if (!$util.isObject(message.tablet_map)) - return "tablet_map: object expected"; - let key = Object.keys(message.tablet_map); - for (let i = 0; i < key.length; ++i) { - let error = $root.topodata.Tablet.verify(message.tablet_map[key[i]]); - if (error) - return "tablet_map." + error; - } + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; + if (message.cells_alias != null && message.hasOwnProperty("cells_alias")) { + let error = $root.topodata.CellsAlias.verify(message.cells_alias); + if (error) + return "cells_alias." + error; } return null; }; /** - * Creates a ShardReplicationPositionsResponse message from a plain object. Also converts values to their respective internal types. + * Creates an UpdateCellsAliasResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ShardReplicationPositionsResponse + * @memberof vtctldata.UpdateCellsAliasResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.ShardReplicationPositionsResponse} ShardReplicationPositionsResponse + * @returns {vtctldata.UpdateCellsAliasResponse} UpdateCellsAliasResponse */ - ShardReplicationPositionsResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ShardReplicationPositionsResponse) + UpdateCellsAliasResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.UpdateCellsAliasResponse) return object; - let message = new $root.vtctldata.ShardReplicationPositionsResponse(); - if (object.replication_statuses) { - if (typeof object.replication_statuses !== "object") - throw TypeError(".vtctldata.ShardReplicationPositionsResponse.replication_statuses: object expected"); - message.replication_statuses = {}; - for (let keys = Object.keys(object.replication_statuses), i = 0; i < keys.length; ++i) { - if (typeof object.replication_statuses[keys[i]] !== "object") - throw TypeError(".vtctldata.ShardReplicationPositionsResponse.replication_statuses: object expected"); - message.replication_statuses[keys[i]] = $root.replicationdata.Status.fromObject(object.replication_statuses[keys[i]]); - } - } - if (object.tablet_map) { - if (typeof object.tablet_map !== "object") - throw TypeError(".vtctldata.ShardReplicationPositionsResponse.tablet_map: object expected"); - message.tablet_map = {}; - for (let keys = Object.keys(object.tablet_map), i = 0; i < keys.length; ++i) { - if (typeof object.tablet_map[keys[i]] !== "object") - throw TypeError(".vtctldata.ShardReplicationPositionsResponse.tablet_map: object expected"); - message.tablet_map[keys[i]] = $root.topodata.Tablet.fromObject(object.tablet_map[keys[i]]); - } + let message = new $root.vtctldata.UpdateCellsAliasResponse(); + if (object.name != null) + message.name = String(object.name); + if (object.cells_alias != null) { + if (typeof object.cells_alias !== "object") + throw TypeError(".vtctldata.UpdateCellsAliasResponse.cells_alias: object expected"); + message.cells_alias = $root.topodata.CellsAlias.fromObject(object.cells_alias); } return message; }; /** - * Creates a plain object from a ShardReplicationPositionsResponse message. Also converts values to other types if specified. + * Creates a plain object from an UpdateCellsAliasResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ShardReplicationPositionsResponse + * @memberof vtctldata.UpdateCellsAliasResponse * @static - * @param {vtctldata.ShardReplicationPositionsResponse} message ShardReplicationPositionsResponse + * @param {vtctldata.UpdateCellsAliasResponse} message UpdateCellsAliasResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ShardReplicationPositionsResponse.toObject = function toObject(message, options) { + UpdateCellsAliasResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.objects || options.defaults) { - object.replication_statuses = {}; - object.tablet_map = {}; - } - let keys2; - if (message.replication_statuses && (keys2 = Object.keys(message.replication_statuses)).length) { - object.replication_statuses = {}; - for (let j = 0; j < keys2.length; ++j) - object.replication_statuses[keys2[j]] = $root.replicationdata.Status.toObject(message.replication_statuses[keys2[j]], options); - } - if (message.tablet_map && (keys2 = Object.keys(message.tablet_map)).length) { - object.tablet_map = {}; - for (let j = 0; j < keys2.length; ++j) - object.tablet_map[keys2[j]] = $root.topodata.Tablet.toObject(message.tablet_map[keys2[j]], options); + if (options.defaults) { + object.name = ""; + object.cells_alias = null; } + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; + if (message.cells_alias != null && message.hasOwnProperty("cells_alias")) + object.cells_alias = $root.topodata.CellsAlias.toObject(message.cells_alias, options); return object; }; /** - * Converts this ShardReplicationPositionsResponse to JSON. + * Converts this UpdateCellsAliasResponse to JSON. * @function toJSON - * @memberof vtctldata.ShardReplicationPositionsResponse + * @memberof vtctldata.UpdateCellsAliasResponse * @instance * @returns {Object.} JSON object */ - ShardReplicationPositionsResponse.prototype.toJSON = function toJSON() { + UpdateCellsAliasResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ShardReplicationPositionsResponse + * Gets the default type url for UpdateCellsAliasResponse * @function getTypeUrl - * @memberof vtctldata.ShardReplicationPositionsResponse + * @memberof vtctldata.UpdateCellsAliasResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ShardReplicationPositionsResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + UpdateCellsAliasResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ShardReplicationPositionsResponse"; + return typeUrlPrefix + "/vtctldata.UpdateCellsAliasResponse"; }; - return ShardReplicationPositionsResponse; + return UpdateCellsAliasResponse; })(); - vtctldata.ShardReplicationRemoveRequest = (function() { + vtctldata.ValidateRequest = (function() { /** - * Properties of a ShardReplicationRemoveRequest. + * Properties of a ValidateRequest. * @memberof vtctldata - * @interface IShardReplicationRemoveRequest - * @property {string|null} [keyspace] ShardReplicationRemoveRequest keyspace - * @property {string|null} [shard] ShardReplicationRemoveRequest shard - * @property {topodata.ITabletAlias|null} [tablet_alias] ShardReplicationRemoveRequest tablet_alias + * @interface IValidateRequest + * @property {boolean|null} [ping_tablets] ValidateRequest ping_tablets */ /** - * Constructs a new ShardReplicationRemoveRequest. + * Constructs a new ValidateRequest. * @memberof vtctldata - * @classdesc Represents a ShardReplicationRemoveRequest. - * @implements IShardReplicationRemoveRequest + * @classdesc Represents a ValidateRequest. + * @implements IValidateRequest * @constructor - * @param {vtctldata.IShardReplicationRemoveRequest=} [properties] Properties to set + * @param {vtctldata.IValidateRequest=} [properties] Properties to set */ - function ShardReplicationRemoveRequest(properties) { + function ValidateRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -134377,103 +156774,75 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ShardReplicationRemoveRequest keyspace. - * @member {string} keyspace - * @memberof vtctldata.ShardReplicationRemoveRequest - * @instance - */ - ShardReplicationRemoveRequest.prototype.keyspace = ""; - - /** - * ShardReplicationRemoveRequest shard. - * @member {string} shard - * @memberof vtctldata.ShardReplicationRemoveRequest - * @instance - */ - ShardReplicationRemoveRequest.prototype.shard = ""; - - /** - * ShardReplicationRemoveRequest tablet_alias. - * @member {topodata.ITabletAlias|null|undefined} tablet_alias - * @memberof vtctldata.ShardReplicationRemoveRequest + * ValidateRequest ping_tablets. + * @member {boolean} ping_tablets + * @memberof vtctldata.ValidateRequest * @instance */ - ShardReplicationRemoveRequest.prototype.tablet_alias = null; + ValidateRequest.prototype.ping_tablets = false; /** - * Creates a new ShardReplicationRemoveRequest instance using the specified properties. + * Creates a new ValidateRequest instance using the specified properties. * @function create - * @memberof vtctldata.ShardReplicationRemoveRequest + * @memberof vtctldata.ValidateRequest * @static - * @param {vtctldata.IShardReplicationRemoveRequest=} [properties] Properties to set - * @returns {vtctldata.ShardReplicationRemoveRequest} ShardReplicationRemoveRequest instance + * @param {vtctldata.IValidateRequest=} [properties] Properties to set + * @returns {vtctldata.ValidateRequest} ValidateRequest instance */ - ShardReplicationRemoveRequest.create = function create(properties) { - return new ShardReplicationRemoveRequest(properties); + ValidateRequest.create = function create(properties) { + return new ValidateRequest(properties); }; /** - * Encodes the specified ShardReplicationRemoveRequest message. Does not implicitly {@link vtctldata.ShardReplicationRemoveRequest.verify|verify} messages. + * Encodes the specified ValidateRequest message. Does not implicitly {@link vtctldata.ValidateRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.ShardReplicationRemoveRequest + * @memberof vtctldata.ValidateRequest * @static - * @param {vtctldata.IShardReplicationRemoveRequest} message ShardReplicationRemoveRequest message or plain object to encode + * @param {vtctldata.IValidateRequest} message ValidateRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ShardReplicationRemoveRequest.encode = function encode(message, writer) { + ValidateRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); - if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) - $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.ping_tablets != null && Object.hasOwnProperty.call(message, "ping_tablets")) + writer.uint32(/* id 1, wireType 0 =*/8).bool(message.ping_tablets); return writer; }; /** - * Encodes the specified ShardReplicationRemoveRequest message, length delimited. Does not implicitly {@link vtctldata.ShardReplicationRemoveRequest.verify|verify} messages. + * Encodes the specified ValidateRequest message, length delimited. Does not implicitly {@link vtctldata.ValidateRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ShardReplicationRemoveRequest + * @memberof vtctldata.ValidateRequest * @static - * @param {vtctldata.IShardReplicationRemoveRequest} message ShardReplicationRemoveRequest message or plain object to encode + * @param {vtctldata.IValidateRequest} message ValidateRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ShardReplicationRemoveRequest.encodeDelimited = function encodeDelimited(message, writer) { + ValidateRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ShardReplicationRemoveRequest message from the specified reader or buffer. + * Decodes a ValidateRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ShardReplicationRemoveRequest + * @memberof vtctldata.ValidateRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ShardReplicationRemoveRequest} ShardReplicationRemoveRequest + * @returns {vtctldata.ValidateRequest} ValidateRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ShardReplicationRemoveRequest.decode = function decode(reader, length) { + ValidateRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ShardReplicationRemoveRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ValidateRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.keyspace = reader.string(); - break; - } - case 2: { - message.shard = reader.string(); - break; - } - case 3: { - message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + message.ping_tablets = reader.bool(); break; } default: @@ -134485,143 +156854,125 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a ShardReplicationRemoveRequest message from the specified reader or buffer, length delimited. + * Decodes a ValidateRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ShardReplicationRemoveRequest + * @memberof vtctldata.ValidateRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ShardReplicationRemoveRequest} ShardReplicationRemoveRequest + * @returns {vtctldata.ValidateRequest} ValidateRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ShardReplicationRemoveRequest.decodeDelimited = function decodeDelimited(reader) { + ValidateRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ShardReplicationRemoveRequest message. + * Verifies a ValidateRequest message. * @function verify - * @memberof vtctldata.ShardReplicationRemoveRequest + * @memberof vtctldata.ValidateRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ShardReplicationRemoveRequest.verify = function verify(message) { + ValidateRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - if (message.shard != null && message.hasOwnProperty("shard")) - if (!$util.isString(message.shard)) - return "shard: string expected"; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { - let error = $root.topodata.TabletAlias.verify(message.tablet_alias); - if (error) - return "tablet_alias." + error; - } + if (message.ping_tablets != null && message.hasOwnProperty("ping_tablets")) + if (typeof message.ping_tablets !== "boolean") + return "ping_tablets: boolean expected"; return null; }; /** - * Creates a ShardReplicationRemoveRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ValidateRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ShardReplicationRemoveRequest + * @memberof vtctldata.ValidateRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.ShardReplicationRemoveRequest} ShardReplicationRemoveRequest + * @returns {vtctldata.ValidateRequest} ValidateRequest */ - ShardReplicationRemoveRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ShardReplicationRemoveRequest) + ValidateRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ValidateRequest) return object; - let message = new $root.vtctldata.ShardReplicationRemoveRequest(); - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - if (object.shard != null) - message.shard = String(object.shard); - if (object.tablet_alias != null) { - if (typeof object.tablet_alias !== "object") - throw TypeError(".vtctldata.ShardReplicationRemoveRequest.tablet_alias: object expected"); - message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); - } + let message = new $root.vtctldata.ValidateRequest(); + if (object.ping_tablets != null) + message.ping_tablets = Boolean(object.ping_tablets); return message; }; /** - * Creates a plain object from a ShardReplicationRemoveRequest message. Also converts values to other types if specified. + * Creates a plain object from a ValidateRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ShardReplicationRemoveRequest + * @memberof vtctldata.ValidateRequest * @static - * @param {vtctldata.ShardReplicationRemoveRequest} message ShardReplicationRemoveRequest + * @param {vtctldata.ValidateRequest} message ValidateRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ShardReplicationRemoveRequest.toObject = function toObject(message, options) { + ValidateRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) { - object.keyspace = ""; - object.shard = ""; - object.tablet_alias = null; - } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.shard != null && message.hasOwnProperty("shard")) - object.shard = message.shard; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) - object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); + if (options.defaults) + object.ping_tablets = false; + if (message.ping_tablets != null && message.hasOwnProperty("ping_tablets")) + object.ping_tablets = message.ping_tablets; return object; }; /** - * Converts this ShardReplicationRemoveRequest to JSON. + * Converts this ValidateRequest to JSON. * @function toJSON - * @memberof vtctldata.ShardReplicationRemoveRequest + * @memberof vtctldata.ValidateRequest * @instance * @returns {Object.} JSON object */ - ShardReplicationRemoveRequest.prototype.toJSON = function toJSON() { + ValidateRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ShardReplicationRemoveRequest + * Gets the default type url for ValidateRequest * @function getTypeUrl - * @memberof vtctldata.ShardReplicationRemoveRequest + * @memberof vtctldata.ValidateRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ShardReplicationRemoveRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ValidateRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ShardReplicationRemoveRequest"; + return typeUrlPrefix + "/vtctldata.ValidateRequest"; }; - return ShardReplicationRemoveRequest; + return ValidateRequest; })(); - vtctldata.ShardReplicationRemoveResponse = (function() { + vtctldata.ValidateResponse = (function() { /** - * Properties of a ShardReplicationRemoveResponse. + * Properties of a ValidateResponse. * @memberof vtctldata - * @interface IShardReplicationRemoveResponse + * @interface IValidateResponse + * @property {Array.|null} [results] ValidateResponse results + * @property {Object.|null} [results_by_keyspace] ValidateResponse results_by_keyspace */ /** - * Constructs a new ShardReplicationRemoveResponse. + * Constructs a new ValidateResponse. * @memberof vtctldata - * @classdesc Represents a ShardReplicationRemoveResponse. - * @implements IShardReplicationRemoveResponse + * @classdesc Represents a ValidateResponse. + * @implements IValidateResponse * @constructor - * @param {vtctldata.IShardReplicationRemoveResponse=} [properties] Properties to set + * @param {vtctldata.IValidateResponse=} [properties] Properties to set */ - function ShardReplicationRemoveResponse(properties) { + function ValidateResponse(properties) { + this.results = []; + this.results_by_keyspace = {}; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -134629,63 +156980,116 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * Creates a new ShardReplicationRemoveResponse instance using the specified properties. + * ValidateResponse results. + * @member {Array.} results + * @memberof vtctldata.ValidateResponse + * @instance + */ + ValidateResponse.prototype.results = $util.emptyArray; + + /** + * ValidateResponse results_by_keyspace. + * @member {Object.} results_by_keyspace + * @memberof vtctldata.ValidateResponse + * @instance + */ + ValidateResponse.prototype.results_by_keyspace = $util.emptyObject; + + /** + * Creates a new ValidateResponse instance using the specified properties. * @function create - * @memberof vtctldata.ShardReplicationRemoveResponse + * @memberof vtctldata.ValidateResponse * @static - * @param {vtctldata.IShardReplicationRemoveResponse=} [properties] Properties to set - * @returns {vtctldata.ShardReplicationRemoveResponse} ShardReplicationRemoveResponse instance + * @param {vtctldata.IValidateResponse=} [properties] Properties to set + * @returns {vtctldata.ValidateResponse} ValidateResponse instance */ - ShardReplicationRemoveResponse.create = function create(properties) { - return new ShardReplicationRemoveResponse(properties); + ValidateResponse.create = function create(properties) { + return new ValidateResponse(properties); }; /** - * Encodes the specified ShardReplicationRemoveResponse message. Does not implicitly {@link vtctldata.ShardReplicationRemoveResponse.verify|verify} messages. + * Encodes the specified ValidateResponse message. Does not implicitly {@link vtctldata.ValidateResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.ShardReplicationRemoveResponse + * @memberof vtctldata.ValidateResponse * @static - * @param {vtctldata.IShardReplicationRemoveResponse} message ShardReplicationRemoveResponse message or plain object to encode + * @param {vtctldata.IValidateResponse} message ValidateResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ShardReplicationRemoveResponse.encode = function encode(message, writer) { + ValidateResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.results != null && message.results.length) + for (let i = 0; i < message.results.length; ++i) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.results[i]); + if (message.results_by_keyspace != null && Object.hasOwnProperty.call(message, "results_by_keyspace")) + for (let keys = Object.keys(message.results_by_keyspace), i = 0; i < keys.length; ++i) { + writer.uint32(/* id 2, wireType 2 =*/18).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); + $root.vtctldata.ValidateKeyspaceResponse.encode(message.results_by_keyspace[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); + } return writer; }; /** - * Encodes the specified ShardReplicationRemoveResponse message, length delimited. Does not implicitly {@link vtctldata.ShardReplicationRemoveResponse.verify|verify} messages. + * Encodes the specified ValidateResponse message, length delimited. Does not implicitly {@link vtctldata.ValidateResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ShardReplicationRemoveResponse + * @memberof vtctldata.ValidateResponse * @static - * @param {vtctldata.IShardReplicationRemoveResponse} message ShardReplicationRemoveResponse message or plain object to encode + * @param {vtctldata.IValidateResponse} message ValidateResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ShardReplicationRemoveResponse.encodeDelimited = function encodeDelimited(message, writer) { + ValidateResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ShardReplicationRemoveResponse message from the specified reader or buffer. + * Decodes a ValidateResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ShardReplicationRemoveResponse + * @memberof vtctldata.ValidateResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ShardReplicationRemoveResponse} ShardReplicationRemoveResponse + * @returns {vtctldata.ValidateResponse} ValidateResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ShardReplicationRemoveResponse.decode = function decode(reader, length) { + ValidateResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ShardReplicationRemoveResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ValidateResponse(), key, value; while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + if (!(message.results && message.results.length)) + message.results = []; + message.results.push(reader.string()); + break; + } + case 2: { + if (message.results_by_keyspace === $util.emptyObject) + message.results_by_keyspace = {}; + let end2 = reader.uint32() + reader.pos; + key = ""; + value = null; + while (reader.pos < end2) { + let tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = $root.vtctldata.ValidateKeyspaceResponse.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.results_by_keyspace[key] = value; + break; + } default: reader.skipType(tag & 7); break; @@ -134695,110 +157099,163 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a ShardReplicationRemoveResponse message from the specified reader or buffer, length delimited. + * Decodes a ValidateResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ShardReplicationRemoveResponse + * @memberof vtctldata.ValidateResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ShardReplicationRemoveResponse} ShardReplicationRemoveResponse + * @returns {vtctldata.ValidateResponse} ValidateResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ShardReplicationRemoveResponse.decodeDelimited = function decodeDelimited(reader) { + ValidateResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ShardReplicationRemoveResponse message. + * Verifies a ValidateResponse message. * @function verify - * @memberof vtctldata.ShardReplicationRemoveResponse + * @memberof vtctldata.ValidateResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ShardReplicationRemoveResponse.verify = function verify(message) { + ValidateResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.results != null && message.hasOwnProperty("results")) { + if (!Array.isArray(message.results)) + return "results: array expected"; + for (let i = 0; i < message.results.length; ++i) + if (!$util.isString(message.results[i])) + return "results: string[] expected"; + } + if (message.results_by_keyspace != null && message.hasOwnProperty("results_by_keyspace")) { + if (!$util.isObject(message.results_by_keyspace)) + return "results_by_keyspace: object expected"; + let key = Object.keys(message.results_by_keyspace); + for (let i = 0; i < key.length; ++i) { + let error = $root.vtctldata.ValidateKeyspaceResponse.verify(message.results_by_keyspace[key[i]]); + if (error) + return "results_by_keyspace." + error; + } + } return null; }; /** - * Creates a ShardReplicationRemoveResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ValidateResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ShardReplicationRemoveResponse + * @memberof vtctldata.ValidateResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.ShardReplicationRemoveResponse} ShardReplicationRemoveResponse + * @returns {vtctldata.ValidateResponse} ValidateResponse */ - ShardReplicationRemoveResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ShardReplicationRemoveResponse) + ValidateResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ValidateResponse) return object; - return new $root.vtctldata.ShardReplicationRemoveResponse(); + let message = new $root.vtctldata.ValidateResponse(); + if (object.results) { + if (!Array.isArray(object.results)) + throw TypeError(".vtctldata.ValidateResponse.results: array expected"); + message.results = []; + for (let i = 0; i < object.results.length; ++i) + message.results[i] = String(object.results[i]); + } + if (object.results_by_keyspace) { + if (typeof object.results_by_keyspace !== "object") + throw TypeError(".vtctldata.ValidateResponse.results_by_keyspace: object expected"); + message.results_by_keyspace = {}; + for (let keys = Object.keys(object.results_by_keyspace), i = 0; i < keys.length; ++i) { + if (typeof object.results_by_keyspace[keys[i]] !== "object") + throw TypeError(".vtctldata.ValidateResponse.results_by_keyspace: object expected"); + message.results_by_keyspace[keys[i]] = $root.vtctldata.ValidateKeyspaceResponse.fromObject(object.results_by_keyspace[keys[i]]); + } + } + return message; }; /** - * Creates a plain object from a ShardReplicationRemoveResponse message. Also converts values to other types if specified. + * Creates a plain object from a ValidateResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ShardReplicationRemoveResponse + * @memberof vtctldata.ValidateResponse * @static - * @param {vtctldata.ShardReplicationRemoveResponse} message ShardReplicationRemoveResponse + * @param {vtctldata.ValidateResponse} message ValidateResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ShardReplicationRemoveResponse.toObject = function toObject() { - return {}; + ValidateResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.results = []; + if (options.objects || options.defaults) + object.results_by_keyspace = {}; + if (message.results && message.results.length) { + object.results = []; + for (let j = 0; j < message.results.length; ++j) + object.results[j] = message.results[j]; + } + let keys2; + if (message.results_by_keyspace && (keys2 = Object.keys(message.results_by_keyspace)).length) { + object.results_by_keyspace = {}; + for (let j = 0; j < keys2.length; ++j) + object.results_by_keyspace[keys2[j]] = $root.vtctldata.ValidateKeyspaceResponse.toObject(message.results_by_keyspace[keys2[j]], options); + } + return object; }; /** - * Converts this ShardReplicationRemoveResponse to JSON. + * Converts this ValidateResponse to JSON. * @function toJSON - * @memberof vtctldata.ShardReplicationRemoveResponse + * @memberof vtctldata.ValidateResponse * @instance * @returns {Object.} JSON object */ - ShardReplicationRemoveResponse.prototype.toJSON = function toJSON() { + ValidateResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ShardReplicationRemoveResponse + * Gets the default type url for ValidateResponse * @function getTypeUrl - * @memberof vtctldata.ShardReplicationRemoveResponse + * @memberof vtctldata.ValidateResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ShardReplicationRemoveResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ValidateResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ShardReplicationRemoveResponse"; + return typeUrlPrefix + "/vtctldata.ValidateResponse"; }; - return ShardReplicationRemoveResponse; + return ValidateResponse; })(); - vtctldata.SleepTabletRequest = (function() { + vtctldata.ValidateKeyspaceRequest = (function() { /** - * Properties of a SleepTabletRequest. + * Properties of a ValidateKeyspaceRequest. * @memberof vtctldata - * @interface ISleepTabletRequest - * @property {topodata.ITabletAlias|null} [tablet_alias] SleepTabletRequest tablet_alias - * @property {vttime.IDuration|null} [duration] SleepTabletRequest duration + * @interface IValidateKeyspaceRequest + * @property {string|null} [keyspace] ValidateKeyspaceRequest keyspace + * @property {boolean|null} [ping_tablets] ValidateKeyspaceRequest ping_tablets */ /** - * Constructs a new SleepTabletRequest. + * Constructs a new ValidateKeyspaceRequest. * @memberof vtctldata - * @classdesc Represents a SleepTabletRequest. - * @implements ISleepTabletRequest + * @classdesc Represents a ValidateKeyspaceRequest. + * @implements IValidateKeyspaceRequest * @constructor - * @param {vtctldata.ISleepTabletRequest=} [properties] Properties to set + * @param {vtctldata.IValidateKeyspaceRequest=} [properties] Properties to set */ - function SleepTabletRequest(properties) { + function ValidateKeyspaceRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -134806,89 +157263,89 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * SleepTabletRequest tablet_alias. - * @member {topodata.ITabletAlias|null|undefined} tablet_alias - * @memberof vtctldata.SleepTabletRequest + * ValidateKeyspaceRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.ValidateKeyspaceRequest * @instance */ - SleepTabletRequest.prototype.tablet_alias = null; + ValidateKeyspaceRequest.prototype.keyspace = ""; /** - * SleepTabletRequest duration. - * @member {vttime.IDuration|null|undefined} duration - * @memberof vtctldata.SleepTabletRequest + * ValidateKeyspaceRequest ping_tablets. + * @member {boolean} ping_tablets + * @memberof vtctldata.ValidateKeyspaceRequest * @instance */ - SleepTabletRequest.prototype.duration = null; + ValidateKeyspaceRequest.prototype.ping_tablets = false; /** - * Creates a new SleepTabletRequest instance using the specified properties. + * Creates a new ValidateKeyspaceRequest instance using the specified properties. * @function create - * @memberof vtctldata.SleepTabletRequest + * @memberof vtctldata.ValidateKeyspaceRequest * @static - * @param {vtctldata.ISleepTabletRequest=} [properties] Properties to set - * @returns {vtctldata.SleepTabletRequest} SleepTabletRequest instance + * @param {vtctldata.IValidateKeyspaceRequest=} [properties] Properties to set + * @returns {vtctldata.ValidateKeyspaceRequest} ValidateKeyspaceRequest instance */ - SleepTabletRequest.create = function create(properties) { - return new SleepTabletRequest(properties); + ValidateKeyspaceRequest.create = function create(properties) { + return new ValidateKeyspaceRequest(properties); }; /** - * Encodes the specified SleepTabletRequest message. Does not implicitly {@link vtctldata.SleepTabletRequest.verify|verify} messages. + * Encodes the specified ValidateKeyspaceRequest message. Does not implicitly {@link vtctldata.ValidateKeyspaceRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.SleepTabletRequest + * @memberof vtctldata.ValidateKeyspaceRequest * @static - * @param {vtctldata.ISleepTabletRequest} message SleepTabletRequest message or plain object to encode + * @param {vtctldata.IValidateKeyspaceRequest} message ValidateKeyspaceRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SleepTabletRequest.encode = function encode(message, writer) { + ValidateKeyspaceRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) - $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.duration != null && Object.hasOwnProperty.call(message, "duration")) - $root.vttime.Duration.encode(message.duration, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.ping_tablets != null && Object.hasOwnProperty.call(message, "ping_tablets")) + writer.uint32(/* id 2, wireType 0 =*/16).bool(message.ping_tablets); return writer; }; /** - * Encodes the specified SleepTabletRequest message, length delimited. Does not implicitly {@link vtctldata.SleepTabletRequest.verify|verify} messages. + * Encodes the specified ValidateKeyspaceRequest message, length delimited. Does not implicitly {@link vtctldata.ValidateKeyspaceRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.SleepTabletRequest + * @memberof vtctldata.ValidateKeyspaceRequest * @static - * @param {vtctldata.ISleepTabletRequest} message SleepTabletRequest message or plain object to encode + * @param {vtctldata.IValidateKeyspaceRequest} message ValidateKeyspaceRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SleepTabletRequest.encodeDelimited = function encodeDelimited(message, writer) { + ValidateKeyspaceRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a SleepTabletRequest message from the specified reader or buffer. + * Decodes a ValidateKeyspaceRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.SleepTabletRequest + * @memberof vtctldata.ValidateKeyspaceRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.SleepTabletRequest} SleepTabletRequest + * @returns {vtctldata.ValidateKeyspaceRequest} ValidateKeyspaceRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SleepTabletRequest.decode = function decode(reader, length) { + ValidateKeyspaceRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SleepTabletRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ValidateKeyspaceRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + message.keyspace = reader.string(); break; } case 2: { - message.duration = $root.vttime.Duration.decode(reader, reader.uint32()); + message.ping_tablets = reader.bool(); break; } default: @@ -134900,140 +157357,134 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a SleepTabletRequest message from the specified reader or buffer, length delimited. + * Decodes a ValidateKeyspaceRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.SleepTabletRequest + * @memberof vtctldata.ValidateKeyspaceRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.SleepTabletRequest} SleepTabletRequest + * @returns {vtctldata.ValidateKeyspaceRequest} ValidateKeyspaceRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SleepTabletRequest.decodeDelimited = function decodeDelimited(reader) { + ValidateKeyspaceRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a SleepTabletRequest message. + * Verifies a ValidateKeyspaceRequest message. * @function verify - * @memberof vtctldata.SleepTabletRequest + * @memberof vtctldata.ValidateKeyspaceRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - SleepTabletRequest.verify = function verify(message) { + ValidateKeyspaceRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { - let error = $root.topodata.TabletAlias.verify(message.tablet_alias); - if (error) - return "tablet_alias." + error; - } - if (message.duration != null && message.hasOwnProperty("duration")) { - let error = $root.vttime.Duration.verify(message.duration); - if (error) - return "duration." + error; - } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.ping_tablets != null && message.hasOwnProperty("ping_tablets")) + if (typeof message.ping_tablets !== "boolean") + return "ping_tablets: boolean expected"; return null; }; /** - * Creates a SleepTabletRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ValidateKeyspaceRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.SleepTabletRequest + * @memberof vtctldata.ValidateKeyspaceRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.SleepTabletRequest} SleepTabletRequest + * @returns {vtctldata.ValidateKeyspaceRequest} ValidateKeyspaceRequest */ - SleepTabletRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.SleepTabletRequest) + ValidateKeyspaceRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ValidateKeyspaceRequest) return object; - let message = new $root.vtctldata.SleepTabletRequest(); - if (object.tablet_alias != null) { - if (typeof object.tablet_alias !== "object") - throw TypeError(".vtctldata.SleepTabletRequest.tablet_alias: object expected"); - message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); - } - if (object.duration != null) { - if (typeof object.duration !== "object") - throw TypeError(".vtctldata.SleepTabletRequest.duration: object expected"); - message.duration = $root.vttime.Duration.fromObject(object.duration); - } + let message = new $root.vtctldata.ValidateKeyspaceRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.ping_tablets != null) + message.ping_tablets = Boolean(object.ping_tablets); return message; }; /** - * Creates a plain object from a SleepTabletRequest message. Also converts values to other types if specified. + * Creates a plain object from a ValidateKeyspaceRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.SleepTabletRequest + * @memberof vtctldata.ValidateKeyspaceRequest * @static - * @param {vtctldata.SleepTabletRequest} message SleepTabletRequest + * @param {vtctldata.ValidateKeyspaceRequest} message ValidateKeyspaceRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - SleepTabletRequest.toObject = function toObject(message, options) { + ValidateKeyspaceRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) { - object.tablet_alias = null; - object.duration = null; + object.keyspace = ""; + object.ping_tablets = false; } - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) - object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); - if (message.duration != null && message.hasOwnProperty("duration")) - object.duration = $root.vttime.Duration.toObject(message.duration, options); + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.ping_tablets != null && message.hasOwnProperty("ping_tablets")) + object.ping_tablets = message.ping_tablets; return object; }; /** - * Converts this SleepTabletRequest to JSON. + * Converts this ValidateKeyspaceRequest to JSON. * @function toJSON - * @memberof vtctldata.SleepTabletRequest + * @memberof vtctldata.ValidateKeyspaceRequest * @instance * @returns {Object.} JSON object */ - SleepTabletRequest.prototype.toJSON = function toJSON() { + ValidateKeyspaceRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for SleepTabletRequest + * Gets the default type url for ValidateKeyspaceRequest * @function getTypeUrl - * @memberof vtctldata.SleepTabletRequest + * @memberof vtctldata.ValidateKeyspaceRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - SleepTabletRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ValidateKeyspaceRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.SleepTabletRequest"; + return typeUrlPrefix + "/vtctldata.ValidateKeyspaceRequest"; }; - return SleepTabletRequest; + return ValidateKeyspaceRequest; })(); - vtctldata.SleepTabletResponse = (function() { + vtctldata.ValidateKeyspaceResponse = (function() { /** - * Properties of a SleepTabletResponse. + * Properties of a ValidateKeyspaceResponse. * @memberof vtctldata - * @interface ISleepTabletResponse + * @interface IValidateKeyspaceResponse + * @property {Array.|null} [results] ValidateKeyspaceResponse results + * @property {Object.|null} [results_by_shard] ValidateKeyspaceResponse results_by_shard */ /** - * Constructs a new SleepTabletResponse. + * Constructs a new ValidateKeyspaceResponse. * @memberof vtctldata - * @classdesc Represents a SleepTabletResponse. - * @implements ISleepTabletResponse + * @classdesc Represents a ValidateKeyspaceResponse. + * @implements IValidateKeyspaceResponse * @constructor - * @param {vtctldata.ISleepTabletResponse=} [properties] Properties to set + * @param {vtctldata.IValidateKeyspaceResponse=} [properties] Properties to set */ - function SleepTabletResponse(properties) { + function ValidateKeyspaceResponse(properties) { + this.results = []; + this.results_by_shard = {}; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -135041,63 +157492,116 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * Creates a new SleepTabletResponse instance using the specified properties. + * ValidateKeyspaceResponse results. + * @member {Array.} results + * @memberof vtctldata.ValidateKeyspaceResponse + * @instance + */ + ValidateKeyspaceResponse.prototype.results = $util.emptyArray; + + /** + * ValidateKeyspaceResponse results_by_shard. + * @member {Object.} results_by_shard + * @memberof vtctldata.ValidateKeyspaceResponse + * @instance + */ + ValidateKeyspaceResponse.prototype.results_by_shard = $util.emptyObject; + + /** + * Creates a new ValidateKeyspaceResponse instance using the specified properties. * @function create - * @memberof vtctldata.SleepTabletResponse + * @memberof vtctldata.ValidateKeyspaceResponse * @static - * @param {vtctldata.ISleepTabletResponse=} [properties] Properties to set - * @returns {vtctldata.SleepTabletResponse} SleepTabletResponse instance + * @param {vtctldata.IValidateKeyspaceResponse=} [properties] Properties to set + * @returns {vtctldata.ValidateKeyspaceResponse} ValidateKeyspaceResponse instance */ - SleepTabletResponse.create = function create(properties) { - return new SleepTabletResponse(properties); + ValidateKeyspaceResponse.create = function create(properties) { + return new ValidateKeyspaceResponse(properties); }; /** - * Encodes the specified SleepTabletResponse message. Does not implicitly {@link vtctldata.SleepTabletResponse.verify|verify} messages. + * Encodes the specified ValidateKeyspaceResponse message. Does not implicitly {@link vtctldata.ValidateKeyspaceResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.SleepTabletResponse + * @memberof vtctldata.ValidateKeyspaceResponse * @static - * @param {vtctldata.ISleepTabletResponse} message SleepTabletResponse message or plain object to encode + * @param {vtctldata.IValidateKeyspaceResponse} message ValidateKeyspaceResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SleepTabletResponse.encode = function encode(message, writer) { + ValidateKeyspaceResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.results != null && message.results.length) + for (let i = 0; i < message.results.length; ++i) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.results[i]); + if (message.results_by_shard != null && Object.hasOwnProperty.call(message, "results_by_shard")) + for (let keys = Object.keys(message.results_by_shard), i = 0; i < keys.length; ++i) { + writer.uint32(/* id 2, wireType 2 =*/18).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); + $root.vtctldata.ValidateShardResponse.encode(message.results_by_shard[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); + } return writer; }; /** - * Encodes the specified SleepTabletResponse message, length delimited. Does not implicitly {@link vtctldata.SleepTabletResponse.verify|verify} messages. + * Encodes the specified ValidateKeyspaceResponse message, length delimited. Does not implicitly {@link vtctldata.ValidateKeyspaceResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.SleepTabletResponse + * @memberof vtctldata.ValidateKeyspaceResponse * @static - * @param {vtctldata.ISleepTabletResponse} message SleepTabletResponse message or plain object to encode + * @param {vtctldata.IValidateKeyspaceResponse} message ValidateKeyspaceResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SleepTabletResponse.encodeDelimited = function encodeDelimited(message, writer) { + ValidateKeyspaceResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a SleepTabletResponse message from the specified reader or buffer. + * Decodes a ValidateKeyspaceResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.SleepTabletResponse + * @memberof vtctldata.ValidateKeyspaceResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.SleepTabletResponse} SleepTabletResponse + * @returns {vtctldata.ValidateKeyspaceResponse} ValidateKeyspaceResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SleepTabletResponse.decode = function decode(reader, length) { + ValidateKeyspaceResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SleepTabletResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ValidateKeyspaceResponse(), key, value; while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + if (!(message.results && message.results.length)) + message.results = []; + message.results.push(reader.string()); + break; + } + case 2: { + if (message.results_by_shard === $util.emptyObject) + message.results_by_shard = {}; + let end2 = reader.uint32() + reader.pos; + key = ""; + value = null; + while (reader.pos < end2) { + let tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = $root.vtctldata.ValidateShardResponse.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.results_by_shard[key] = value; + break; + } default: reader.skipType(tag & 7); break; @@ -135107,116 +157611,167 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a SleepTabletResponse message from the specified reader or buffer, length delimited. + * Decodes a ValidateKeyspaceResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.SleepTabletResponse + * @memberof vtctldata.ValidateKeyspaceResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.SleepTabletResponse} SleepTabletResponse + * @returns {vtctldata.ValidateKeyspaceResponse} ValidateKeyspaceResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SleepTabletResponse.decodeDelimited = function decodeDelimited(reader) { + ValidateKeyspaceResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a SleepTabletResponse message. + * Verifies a ValidateKeyspaceResponse message. * @function verify - * @memberof vtctldata.SleepTabletResponse + * @memberof vtctldata.ValidateKeyspaceResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - SleepTabletResponse.verify = function verify(message) { + ValidateKeyspaceResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.results != null && message.hasOwnProperty("results")) { + if (!Array.isArray(message.results)) + return "results: array expected"; + for (let i = 0; i < message.results.length; ++i) + if (!$util.isString(message.results[i])) + return "results: string[] expected"; + } + if (message.results_by_shard != null && message.hasOwnProperty("results_by_shard")) { + if (!$util.isObject(message.results_by_shard)) + return "results_by_shard: object expected"; + let key = Object.keys(message.results_by_shard); + for (let i = 0; i < key.length; ++i) { + let error = $root.vtctldata.ValidateShardResponse.verify(message.results_by_shard[key[i]]); + if (error) + return "results_by_shard." + error; + } + } return null; }; /** - * Creates a SleepTabletResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ValidateKeyspaceResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.SleepTabletResponse + * @memberof vtctldata.ValidateKeyspaceResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.SleepTabletResponse} SleepTabletResponse + * @returns {vtctldata.ValidateKeyspaceResponse} ValidateKeyspaceResponse */ - SleepTabletResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.SleepTabletResponse) + ValidateKeyspaceResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ValidateKeyspaceResponse) return object; - return new $root.vtctldata.SleepTabletResponse(); + let message = new $root.vtctldata.ValidateKeyspaceResponse(); + if (object.results) { + if (!Array.isArray(object.results)) + throw TypeError(".vtctldata.ValidateKeyspaceResponse.results: array expected"); + message.results = []; + for (let i = 0; i < object.results.length; ++i) + message.results[i] = String(object.results[i]); + } + if (object.results_by_shard) { + if (typeof object.results_by_shard !== "object") + throw TypeError(".vtctldata.ValidateKeyspaceResponse.results_by_shard: object expected"); + message.results_by_shard = {}; + for (let keys = Object.keys(object.results_by_shard), i = 0; i < keys.length; ++i) { + if (typeof object.results_by_shard[keys[i]] !== "object") + throw TypeError(".vtctldata.ValidateKeyspaceResponse.results_by_shard: object expected"); + message.results_by_shard[keys[i]] = $root.vtctldata.ValidateShardResponse.fromObject(object.results_by_shard[keys[i]]); + } + } + return message; }; /** - * Creates a plain object from a SleepTabletResponse message. Also converts values to other types if specified. + * Creates a plain object from a ValidateKeyspaceResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.SleepTabletResponse + * @memberof vtctldata.ValidateKeyspaceResponse * @static - * @param {vtctldata.SleepTabletResponse} message SleepTabletResponse + * @param {vtctldata.ValidateKeyspaceResponse} message ValidateKeyspaceResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - SleepTabletResponse.toObject = function toObject() { - return {}; + ValidateKeyspaceResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.results = []; + if (options.objects || options.defaults) + object.results_by_shard = {}; + if (message.results && message.results.length) { + object.results = []; + for (let j = 0; j < message.results.length; ++j) + object.results[j] = message.results[j]; + } + let keys2; + if (message.results_by_shard && (keys2 = Object.keys(message.results_by_shard)).length) { + object.results_by_shard = {}; + for (let j = 0; j < keys2.length; ++j) + object.results_by_shard[keys2[j]] = $root.vtctldata.ValidateShardResponse.toObject(message.results_by_shard[keys2[j]], options); + } + return object; }; /** - * Converts this SleepTabletResponse to JSON. + * Converts this ValidateKeyspaceResponse to JSON. * @function toJSON - * @memberof vtctldata.SleepTabletResponse + * @memberof vtctldata.ValidateKeyspaceResponse * @instance * @returns {Object.} JSON object */ - SleepTabletResponse.prototype.toJSON = function toJSON() { + ValidateKeyspaceResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for SleepTabletResponse + * Gets the default type url for ValidateKeyspaceResponse * @function getTypeUrl - * @memberof vtctldata.SleepTabletResponse + * @memberof vtctldata.ValidateKeyspaceResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - SleepTabletResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ValidateKeyspaceResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.SleepTabletResponse"; + return typeUrlPrefix + "/vtctldata.ValidateKeyspaceResponse"; }; - return SleepTabletResponse; + return ValidateKeyspaceResponse; })(); - vtctldata.SourceShardAddRequest = (function() { + vtctldata.ValidateSchemaKeyspaceRequest = (function() { /** - * Properties of a SourceShardAddRequest. + * Properties of a ValidateSchemaKeyspaceRequest. * @memberof vtctldata - * @interface ISourceShardAddRequest - * @property {string|null} [keyspace] SourceShardAddRequest keyspace - * @property {string|null} [shard] SourceShardAddRequest shard - * @property {number|null} [uid] SourceShardAddRequest uid - * @property {string|null} [source_keyspace] SourceShardAddRequest source_keyspace - * @property {string|null} [source_shard] SourceShardAddRequest source_shard - * @property {topodata.IKeyRange|null} [key_range] SourceShardAddRequest key_range - * @property {Array.|null} [tables] SourceShardAddRequest tables + * @interface IValidateSchemaKeyspaceRequest + * @property {string|null} [keyspace] ValidateSchemaKeyspaceRequest keyspace + * @property {Array.|null} [exclude_tables] ValidateSchemaKeyspaceRequest exclude_tables + * @property {boolean|null} [include_views] ValidateSchemaKeyspaceRequest include_views + * @property {boolean|null} [skip_no_primary] ValidateSchemaKeyspaceRequest skip_no_primary + * @property {boolean|null} [include_vschema] ValidateSchemaKeyspaceRequest include_vschema */ /** - * Constructs a new SourceShardAddRequest. + * Constructs a new ValidateSchemaKeyspaceRequest. * @memberof vtctldata - * @classdesc Represents a SourceShardAddRequest. - * @implements ISourceShardAddRequest + * @classdesc Represents a ValidateSchemaKeyspaceRequest. + * @implements IValidateSchemaKeyspaceRequest * @constructor - * @param {vtctldata.ISourceShardAddRequest=} [properties] Properties to set + * @param {vtctldata.IValidateSchemaKeyspaceRequest=} [properties] Properties to set */ - function SourceShardAddRequest(properties) { - this.tables = []; + function ValidateSchemaKeyspaceRequest(properties) { + this.exclude_tables = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -135224,131 +157779,111 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * SourceShardAddRequest keyspace. + * ValidateSchemaKeyspaceRequest keyspace. * @member {string} keyspace - * @memberof vtctldata.SourceShardAddRequest - * @instance - */ - SourceShardAddRequest.prototype.keyspace = ""; - - /** - * SourceShardAddRequest shard. - * @member {string} shard - * @memberof vtctldata.SourceShardAddRequest - * @instance - */ - SourceShardAddRequest.prototype.shard = ""; - - /** - * SourceShardAddRequest uid. - * @member {number} uid - * @memberof vtctldata.SourceShardAddRequest + * @memberof vtctldata.ValidateSchemaKeyspaceRequest * @instance */ - SourceShardAddRequest.prototype.uid = 0; + ValidateSchemaKeyspaceRequest.prototype.keyspace = ""; /** - * SourceShardAddRequest source_keyspace. - * @member {string} source_keyspace - * @memberof vtctldata.SourceShardAddRequest + * ValidateSchemaKeyspaceRequest exclude_tables. + * @member {Array.} exclude_tables + * @memberof vtctldata.ValidateSchemaKeyspaceRequest * @instance */ - SourceShardAddRequest.prototype.source_keyspace = ""; + ValidateSchemaKeyspaceRequest.prototype.exclude_tables = $util.emptyArray; /** - * SourceShardAddRequest source_shard. - * @member {string} source_shard - * @memberof vtctldata.SourceShardAddRequest + * ValidateSchemaKeyspaceRequest include_views. + * @member {boolean} include_views + * @memberof vtctldata.ValidateSchemaKeyspaceRequest * @instance */ - SourceShardAddRequest.prototype.source_shard = ""; + ValidateSchemaKeyspaceRequest.prototype.include_views = false; /** - * SourceShardAddRequest key_range. - * @member {topodata.IKeyRange|null|undefined} key_range - * @memberof vtctldata.SourceShardAddRequest + * ValidateSchemaKeyspaceRequest skip_no_primary. + * @member {boolean} skip_no_primary + * @memberof vtctldata.ValidateSchemaKeyspaceRequest * @instance */ - SourceShardAddRequest.prototype.key_range = null; + ValidateSchemaKeyspaceRequest.prototype.skip_no_primary = false; /** - * SourceShardAddRequest tables. - * @member {Array.} tables - * @memberof vtctldata.SourceShardAddRequest + * ValidateSchemaKeyspaceRequest include_vschema. + * @member {boolean} include_vschema + * @memberof vtctldata.ValidateSchemaKeyspaceRequest * @instance */ - SourceShardAddRequest.prototype.tables = $util.emptyArray; + ValidateSchemaKeyspaceRequest.prototype.include_vschema = false; /** - * Creates a new SourceShardAddRequest instance using the specified properties. + * Creates a new ValidateSchemaKeyspaceRequest instance using the specified properties. * @function create - * @memberof vtctldata.SourceShardAddRequest + * @memberof vtctldata.ValidateSchemaKeyspaceRequest * @static - * @param {vtctldata.ISourceShardAddRequest=} [properties] Properties to set - * @returns {vtctldata.SourceShardAddRequest} SourceShardAddRequest instance + * @param {vtctldata.IValidateSchemaKeyspaceRequest=} [properties] Properties to set + * @returns {vtctldata.ValidateSchemaKeyspaceRequest} ValidateSchemaKeyspaceRequest instance */ - SourceShardAddRequest.create = function create(properties) { - return new SourceShardAddRequest(properties); + ValidateSchemaKeyspaceRequest.create = function create(properties) { + return new ValidateSchemaKeyspaceRequest(properties); }; /** - * Encodes the specified SourceShardAddRequest message. Does not implicitly {@link vtctldata.SourceShardAddRequest.verify|verify} messages. + * Encodes the specified ValidateSchemaKeyspaceRequest message. Does not implicitly {@link vtctldata.ValidateSchemaKeyspaceRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.SourceShardAddRequest + * @memberof vtctldata.ValidateSchemaKeyspaceRequest * @static - * @param {vtctldata.ISourceShardAddRequest} message SourceShardAddRequest message or plain object to encode + * @param {vtctldata.IValidateSchemaKeyspaceRequest} message ValidateSchemaKeyspaceRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SourceShardAddRequest.encode = function encode(message, writer) { + ValidateSchemaKeyspaceRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); - if (message.uid != null && Object.hasOwnProperty.call(message, "uid")) - writer.uint32(/* id 3, wireType 0 =*/24).int32(message.uid); - if (message.source_keyspace != null && Object.hasOwnProperty.call(message, "source_keyspace")) - writer.uint32(/* id 4, wireType 2 =*/34).string(message.source_keyspace); - if (message.source_shard != null && Object.hasOwnProperty.call(message, "source_shard")) - writer.uint32(/* id 5, wireType 2 =*/42).string(message.source_shard); - if (message.key_range != null && Object.hasOwnProperty.call(message, "key_range")) - $root.topodata.KeyRange.encode(message.key_range, writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); - if (message.tables != null && message.tables.length) - for (let i = 0; i < message.tables.length; ++i) - writer.uint32(/* id 7, wireType 2 =*/58).string(message.tables[i]); + if (message.exclude_tables != null && message.exclude_tables.length) + for (let i = 0; i < message.exclude_tables.length; ++i) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.exclude_tables[i]); + if (message.include_views != null && Object.hasOwnProperty.call(message, "include_views")) + writer.uint32(/* id 3, wireType 0 =*/24).bool(message.include_views); + if (message.skip_no_primary != null && Object.hasOwnProperty.call(message, "skip_no_primary")) + writer.uint32(/* id 4, wireType 0 =*/32).bool(message.skip_no_primary); + if (message.include_vschema != null && Object.hasOwnProperty.call(message, "include_vschema")) + writer.uint32(/* id 5, wireType 0 =*/40).bool(message.include_vschema); return writer; }; /** - * Encodes the specified SourceShardAddRequest message, length delimited. Does not implicitly {@link vtctldata.SourceShardAddRequest.verify|verify} messages. + * Encodes the specified ValidateSchemaKeyspaceRequest message, length delimited. Does not implicitly {@link vtctldata.ValidateSchemaKeyspaceRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.SourceShardAddRequest + * @memberof vtctldata.ValidateSchemaKeyspaceRequest * @static - * @param {vtctldata.ISourceShardAddRequest} message SourceShardAddRequest message or plain object to encode + * @param {vtctldata.IValidateSchemaKeyspaceRequest} message ValidateSchemaKeyspaceRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SourceShardAddRequest.encodeDelimited = function encodeDelimited(message, writer) { + ValidateSchemaKeyspaceRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a SourceShardAddRequest message from the specified reader or buffer. + * Decodes a ValidateSchemaKeyspaceRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.SourceShardAddRequest + * @memberof vtctldata.ValidateSchemaKeyspaceRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.SourceShardAddRequest} SourceShardAddRequest + * @returns {vtctldata.ValidateSchemaKeyspaceRequest} ValidateSchemaKeyspaceRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SourceShardAddRequest.decode = function decode(reader, length) { + ValidateSchemaKeyspaceRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SourceShardAddRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ValidateSchemaKeyspaceRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -135357,29 +157892,21 @@ export const vtctldata = $root.vtctldata = (() => { break; } case 2: { - message.shard = reader.string(); + if (!(message.exclude_tables && message.exclude_tables.length)) + message.exclude_tables = []; + message.exclude_tables.push(reader.string()); break; } case 3: { - message.uid = reader.int32(); + message.include_views = reader.bool(); break; } case 4: { - message.source_keyspace = reader.string(); + message.skip_no_primary = reader.bool(); break; } case 5: { - message.source_shard = reader.string(); - break; - } - case 6: { - message.key_range = $root.topodata.KeyRange.decode(reader, reader.uint32()); - break; - } - case 7: { - if (!(message.tables && message.tables.length)) - message.tables = []; - message.tables.push(reader.string()); + message.include_vschema = reader.bool(); break; } default: @@ -135391,189 +157918,171 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a SourceShardAddRequest message from the specified reader or buffer, length delimited. + * Decodes a ValidateSchemaKeyspaceRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.SourceShardAddRequest + * @memberof vtctldata.ValidateSchemaKeyspaceRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.SourceShardAddRequest} SourceShardAddRequest + * @returns {vtctldata.ValidateSchemaKeyspaceRequest} ValidateSchemaKeyspaceRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SourceShardAddRequest.decodeDelimited = function decodeDelimited(reader) { + ValidateSchemaKeyspaceRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a SourceShardAddRequest message. + * Verifies a ValidateSchemaKeyspaceRequest message. * @function verify - * @memberof vtctldata.SourceShardAddRequest + * @memberof vtctldata.ValidateSchemaKeyspaceRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - SourceShardAddRequest.verify = function verify(message) { + ValidateSchemaKeyspaceRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.keyspace != null && message.hasOwnProperty("keyspace")) if (!$util.isString(message.keyspace)) return "keyspace: string expected"; - if (message.shard != null && message.hasOwnProperty("shard")) - if (!$util.isString(message.shard)) - return "shard: string expected"; - if (message.uid != null && message.hasOwnProperty("uid")) - if (!$util.isInteger(message.uid)) - return "uid: integer expected"; - if (message.source_keyspace != null && message.hasOwnProperty("source_keyspace")) - if (!$util.isString(message.source_keyspace)) - return "source_keyspace: string expected"; - if (message.source_shard != null && message.hasOwnProperty("source_shard")) - if (!$util.isString(message.source_shard)) - return "source_shard: string expected"; - if (message.key_range != null && message.hasOwnProperty("key_range")) { - let error = $root.topodata.KeyRange.verify(message.key_range); - if (error) - return "key_range." + error; - } - if (message.tables != null && message.hasOwnProperty("tables")) { - if (!Array.isArray(message.tables)) - return "tables: array expected"; - for (let i = 0; i < message.tables.length; ++i) - if (!$util.isString(message.tables[i])) - return "tables: string[] expected"; + if (message.exclude_tables != null && message.hasOwnProperty("exclude_tables")) { + if (!Array.isArray(message.exclude_tables)) + return "exclude_tables: array expected"; + for (let i = 0; i < message.exclude_tables.length; ++i) + if (!$util.isString(message.exclude_tables[i])) + return "exclude_tables: string[] expected"; } + if (message.include_views != null && message.hasOwnProperty("include_views")) + if (typeof message.include_views !== "boolean") + return "include_views: boolean expected"; + if (message.skip_no_primary != null && message.hasOwnProperty("skip_no_primary")) + if (typeof message.skip_no_primary !== "boolean") + return "skip_no_primary: boolean expected"; + if (message.include_vschema != null && message.hasOwnProperty("include_vschema")) + if (typeof message.include_vschema !== "boolean") + return "include_vschema: boolean expected"; return null; }; /** - * Creates a SourceShardAddRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ValidateSchemaKeyspaceRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.SourceShardAddRequest + * @memberof vtctldata.ValidateSchemaKeyspaceRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.SourceShardAddRequest} SourceShardAddRequest + * @returns {vtctldata.ValidateSchemaKeyspaceRequest} ValidateSchemaKeyspaceRequest */ - SourceShardAddRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.SourceShardAddRequest) + ValidateSchemaKeyspaceRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ValidateSchemaKeyspaceRequest) return object; - let message = new $root.vtctldata.SourceShardAddRequest(); + let message = new $root.vtctldata.ValidateSchemaKeyspaceRequest(); if (object.keyspace != null) message.keyspace = String(object.keyspace); - if (object.shard != null) - message.shard = String(object.shard); - if (object.uid != null) - message.uid = object.uid | 0; - if (object.source_keyspace != null) - message.source_keyspace = String(object.source_keyspace); - if (object.source_shard != null) - message.source_shard = String(object.source_shard); - if (object.key_range != null) { - if (typeof object.key_range !== "object") - throw TypeError(".vtctldata.SourceShardAddRequest.key_range: object expected"); - message.key_range = $root.topodata.KeyRange.fromObject(object.key_range); - } - if (object.tables) { - if (!Array.isArray(object.tables)) - throw TypeError(".vtctldata.SourceShardAddRequest.tables: array expected"); - message.tables = []; - for (let i = 0; i < object.tables.length; ++i) - message.tables[i] = String(object.tables[i]); + if (object.exclude_tables) { + if (!Array.isArray(object.exclude_tables)) + throw TypeError(".vtctldata.ValidateSchemaKeyspaceRequest.exclude_tables: array expected"); + message.exclude_tables = []; + for (let i = 0; i < object.exclude_tables.length; ++i) + message.exclude_tables[i] = String(object.exclude_tables[i]); } + if (object.include_views != null) + message.include_views = Boolean(object.include_views); + if (object.skip_no_primary != null) + message.skip_no_primary = Boolean(object.skip_no_primary); + if (object.include_vschema != null) + message.include_vschema = Boolean(object.include_vschema); return message; }; /** - * Creates a plain object from a SourceShardAddRequest message. Also converts values to other types if specified. + * Creates a plain object from a ValidateSchemaKeyspaceRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.SourceShardAddRequest + * @memberof vtctldata.ValidateSchemaKeyspaceRequest * @static - * @param {vtctldata.SourceShardAddRequest} message SourceShardAddRequest + * @param {vtctldata.ValidateSchemaKeyspaceRequest} message ValidateSchemaKeyspaceRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - SourceShardAddRequest.toObject = function toObject(message, options) { + ValidateSchemaKeyspaceRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.arrays || options.defaults) - object.tables = []; + object.exclude_tables = []; if (options.defaults) { object.keyspace = ""; - object.shard = ""; - object.uid = 0; - object.source_keyspace = ""; - object.source_shard = ""; - object.key_range = null; + object.include_views = false; + object.skip_no_primary = false; + object.include_vschema = false; } if (message.keyspace != null && message.hasOwnProperty("keyspace")) object.keyspace = message.keyspace; - if (message.shard != null && message.hasOwnProperty("shard")) - object.shard = message.shard; - if (message.uid != null && message.hasOwnProperty("uid")) - object.uid = message.uid; - if (message.source_keyspace != null && message.hasOwnProperty("source_keyspace")) - object.source_keyspace = message.source_keyspace; - if (message.source_shard != null && message.hasOwnProperty("source_shard")) - object.source_shard = message.source_shard; - if (message.key_range != null && message.hasOwnProperty("key_range")) - object.key_range = $root.topodata.KeyRange.toObject(message.key_range, options); - if (message.tables && message.tables.length) { - object.tables = []; - for (let j = 0; j < message.tables.length; ++j) - object.tables[j] = message.tables[j]; + if (message.exclude_tables && message.exclude_tables.length) { + object.exclude_tables = []; + for (let j = 0; j < message.exclude_tables.length; ++j) + object.exclude_tables[j] = message.exclude_tables[j]; } + if (message.include_views != null && message.hasOwnProperty("include_views")) + object.include_views = message.include_views; + if (message.skip_no_primary != null && message.hasOwnProperty("skip_no_primary")) + object.skip_no_primary = message.skip_no_primary; + if (message.include_vschema != null && message.hasOwnProperty("include_vschema")) + object.include_vschema = message.include_vschema; return object; }; /** - * Converts this SourceShardAddRequest to JSON. + * Converts this ValidateSchemaKeyspaceRequest to JSON. * @function toJSON - * @memberof vtctldata.SourceShardAddRequest + * @memberof vtctldata.ValidateSchemaKeyspaceRequest * @instance * @returns {Object.} JSON object */ - SourceShardAddRequest.prototype.toJSON = function toJSON() { + ValidateSchemaKeyspaceRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for SourceShardAddRequest + * Gets the default type url for ValidateSchemaKeyspaceRequest * @function getTypeUrl - * @memberof vtctldata.SourceShardAddRequest + * @memberof vtctldata.ValidateSchemaKeyspaceRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - SourceShardAddRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ValidateSchemaKeyspaceRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.SourceShardAddRequest"; + return typeUrlPrefix + "/vtctldata.ValidateSchemaKeyspaceRequest"; }; - return SourceShardAddRequest; + return ValidateSchemaKeyspaceRequest; })(); - vtctldata.SourceShardAddResponse = (function() { + vtctldata.ValidateSchemaKeyspaceResponse = (function() { /** - * Properties of a SourceShardAddResponse. + * Properties of a ValidateSchemaKeyspaceResponse. * @memberof vtctldata - * @interface ISourceShardAddResponse - * @property {topodata.IShard|null} [shard] SourceShardAddResponse shard + * @interface IValidateSchemaKeyspaceResponse + * @property {Array.|null} [results] ValidateSchemaKeyspaceResponse results + * @property {Object.|null} [results_by_shard] ValidateSchemaKeyspaceResponse results_by_shard */ /** - * Constructs a new SourceShardAddResponse. + * Constructs a new ValidateSchemaKeyspaceResponse. * @memberof vtctldata - * @classdesc Represents a SourceShardAddResponse. - * @implements ISourceShardAddResponse + * @classdesc Represents a ValidateSchemaKeyspaceResponse. + * @implements IValidateSchemaKeyspaceResponse * @constructor - * @param {vtctldata.ISourceShardAddResponse=} [properties] Properties to set + * @param {vtctldata.IValidateSchemaKeyspaceResponse=} [properties] Properties to set */ - function SourceShardAddResponse(properties) { + function ValidateSchemaKeyspaceResponse(properties) { + this.results = []; + this.results_by_shard = {}; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -135581,75 +158090,114 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * SourceShardAddResponse shard. - * @member {topodata.IShard|null|undefined} shard - * @memberof vtctldata.SourceShardAddResponse + * ValidateSchemaKeyspaceResponse results. + * @member {Array.} results + * @memberof vtctldata.ValidateSchemaKeyspaceResponse * @instance */ - SourceShardAddResponse.prototype.shard = null; + ValidateSchemaKeyspaceResponse.prototype.results = $util.emptyArray; /** - * Creates a new SourceShardAddResponse instance using the specified properties. + * ValidateSchemaKeyspaceResponse results_by_shard. + * @member {Object.} results_by_shard + * @memberof vtctldata.ValidateSchemaKeyspaceResponse + * @instance + */ + ValidateSchemaKeyspaceResponse.prototype.results_by_shard = $util.emptyObject; + + /** + * Creates a new ValidateSchemaKeyspaceResponse instance using the specified properties. * @function create - * @memberof vtctldata.SourceShardAddResponse + * @memberof vtctldata.ValidateSchemaKeyspaceResponse * @static - * @param {vtctldata.ISourceShardAddResponse=} [properties] Properties to set - * @returns {vtctldata.SourceShardAddResponse} SourceShardAddResponse instance + * @param {vtctldata.IValidateSchemaKeyspaceResponse=} [properties] Properties to set + * @returns {vtctldata.ValidateSchemaKeyspaceResponse} ValidateSchemaKeyspaceResponse instance */ - SourceShardAddResponse.create = function create(properties) { - return new SourceShardAddResponse(properties); + ValidateSchemaKeyspaceResponse.create = function create(properties) { + return new ValidateSchemaKeyspaceResponse(properties); }; /** - * Encodes the specified SourceShardAddResponse message. Does not implicitly {@link vtctldata.SourceShardAddResponse.verify|verify} messages. + * Encodes the specified ValidateSchemaKeyspaceResponse message. Does not implicitly {@link vtctldata.ValidateSchemaKeyspaceResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.SourceShardAddResponse + * @memberof vtctldata.ValidateSchemaKeyspaceResponse * @static - * @param {vtctldata.ISourceShardAddResponse} message SourceShardAddResponse message or plain object to encode + * @param {vtctldata.IValidateSchemaKeyspaceResponse} message ValidateSchemaKeyspaceResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SourceShardAddResponse.encode = function encode(message, writer) { + ValidateSchemaKeyspaceResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - $root.topodata.Shard.encode(message.shard, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.results != null && message.results.length) + for (let i = 0; i < message.results.length; ++i) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.results[i]); + if (message.results_by_shard != null && Object.hasOwnProperty.call(message, "results_by_shard")) + for (let keys = Object.keys(message.results_by_shard), i = 0; i < keys.length; ++i) { + writer.uint32(/* id 2, wireType 2 =*/18).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); + $root.vtctldata.ValidateShardResponse.encode(message.results_by_shard[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); + } return writer; }; /** - * Encodes the specified SourceShardAddResponse message, length delimited. Does not implicitly {@link vtctldata.SourceShardAddResponse.verify|verify} messages. + * Encodes the specified ValidateSchemaKeyspaceResponse message, length delimited. Does not implicitly {@link vtctldata.ValidateSchemaKeyspaceResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.SourceShardAddResponse + * @memberof vtctldata.ValidateSchemaKeyspaceResponse * @static - * @param {vtctldata.ISourceShardAddResponse} message SourceShardAddResponse message or plain object to encode + * @param {vtctldata.IValidateSchemaKeyspaceResponse} message ValidateSchemaKeyspaceResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SourceShardAddResponse.encodeDelimited = function encodeDelimited(message, writer) { + ValidateSchemaKeyspaceResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a SourceShardAddResponse message from the specified reader or buffer. + * Decodes a ValidateSchemaKeyspaceResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.SourceShardAddResponse + * @memberof vtctldata.ValidateSchemaKeyspaceResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.SourceShardAddResponse} SourceShardAddResponse + * @returns {vtctldata.ValidateSchemaKeyspaceResponse} ValidateSchemaKeyspaceResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SourceShardAddResponse.decode = function decode(reader, length) { + ValidateSchemaKeyspaceResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SourceShardAddResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ValidateSchemaKeyspaceResponse(), key, value; while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.shard = $root.topodata.Shard.decode(reader, reader.uint32()); + if (!(message.results && message.results.length)) + message.results = []; + message.results.push(reader.string()); + break; + } + case 2: { + if (message.results_by_shard === $util.emptyObject) + message.results_by_shard = {}; + let end2 = reader.uint32() + reader.pos; + key = ""; + value = null; + while (reader.pos < end2) { + let tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = $root.vtctldata.ValidateShardResponse.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.results_by_shard[key] = value; break; } default: @@ -135661,129 +158209,164 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a SourceShardAddResponse message from the specified reader or buffer, length delimited. + * Decodes a ValidateSchemaKeyspaceResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.SourceShardAddResponse + * @memberof vtctldata.ValidateSchemaKeyspaceResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.SourceShardAddResponse} SourceShardAddResponse + * @returns {vtctldata.ValidateSchemaKeyspaceResponse} ValidateSchemaKeyspaceResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SourceShardAddResponse.decodeDelimited = function decodeDelimited(reader) { + ValidateSchemaKeyspaceResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a SourceShardAddResponse message. + * Verifies a ValidateSchemaKeyspaceResponse message. * @function verify - * @memberof vtctldata.SourceShardAddResponse + * @memberof vtctldata.ValidateSchemaKeyspaceResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - SourceShardAddResponse.verify = function verify(message) { + ValidateSchemaKeyspaceResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.shard != null && message.hasOwnProperty("shard")) { - let error = $root.topodata.Shard.verify(message.shard); - if (error) - return "shard." + error; + if (message.results != null && message.hasOwnProperty("results")) { + if (!Array.isArray(message.results)) + return "results: array expected"; + for (let i = 0; i < message.results.length; ++i) + if (!$util.isString(message.results[i])) + return "results: string[] expected"; + } + if (message.results_by_shard != null && message.hasOwnProperty("results_by_shard")) { + if (!$util.isObject(message.results_by_shard)) + return "results_by_shard: object expected"; + let key = Object.keys(message.results_by_shard); + for (let i = 0; i < key.length; ++i) { + let error = $root.vtctldata.ValidateShardResponse.verify(message.results_by_shard[key[i]]); + if (error) + return "results_by_shard." + error; + } } return null; }; /** - * Creates a SourceShardAddResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ValidateSchemaKeyspaceResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.SourceShardAddResponse + * @memberof vtctldata.ValidateSchemaKeyspaceResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.SourceShardAddResponse} SourceShardAddResponse + * @returns {vtctldata.ValidateSchemaKeyspaceResponse} ValidateSchemaKeyspaceResponse */ - SourceShardAddResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.SourceShardAddResponse) + ValidateSchemaKeyspaceResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ValidateSchemaKeyspaceResponse) return object; - let message = new $root.vtctldata.SourceShardAddResponse(); - if (object.shard != null) { - if (typeof object.shard !== "object") - throw TypeError(".vtctldata.SourceShardAddResponse.shard: object expected"); - message.shard = $root.topodata.Shard.fromObject(object.shard); + let message = new $root.vtctldata.ValidateSchemaKeyspaceResponse(); + if (object.results) { + if (!Array.isArray(object.results)) + throw TypeError(".vtctldata.ValidateSchemaKeyspaceResponse.results: array expected"); + message.results = []; + for (let i = 0; i < object.results.length; ++i) + message.results[i] = String(object.results[i]); + } + if (object.results_by_shard) { + if (typeof object.results_by_shard !== "object") + throw TypeError(".vtctldata.ValidateSchemaKeyspaceResponse.results_by_shard: object expected"); + message.results_by_shard = {}; + for (let keys = Object.keys(object.results_by_shard), i = 0; i < keys.length; ++i) { + if (typeof object.results_by_shard[keys[i]] !== "object") + throw TypeError(".vtctldata.ValidateSchemaKeyspaceResponse.results_by_shard: object expected"); + message.results_by_shard[keys[i]] = $root.vtctldata.ValidateShardResponse.fromObject(object.results_by_shard[keys[i]]); + } } return message; }; /** - * Creates a plain object from a SourceShardAddResponse message. Also converts values to other types if specified. + * Creates a plain object from a ValidateSchemaKeyspaceResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.SourceShardAddResponse + * @memberof vtctldata.ValidateSchemaKeyspaceResponse * @static - * @param {vtctldata.SourceShardAddResponse} message SourceShardAddResponse + * @param {vtctldata.ValidateSchemaKeyspaceResponse} message ValidateSchemaKeyspaceResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - SourceShardAddResponse.toObject = function toObject(message, options) { + ValidateSchemaKeyspaceResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) - object.shard = null; - if (message.shard != null && message.hasOwnProperty("shard")) - object.shard = $root.topodata.Shard.toObject(message.shard, options); + if (options.arrays || options.defaults) + object.results = []; + if (options.objects || options.defaults) + object.results_by_shard = {}; + if (message.results && message.results.length) { + object.results = []; + for (let j = 0; j < message.results.length; ++j) + object.results[j] = message.results[j]; + } + let keys2; + if (message.results_by_shard && (keys2 = Object.keys(message.results_by_shard)).length) { + object.results_by_shard = {}; + for (let j = 0; j < keys2.length; ++j) + object.results_by_shard[keys2[j]] = $root.vtctldata.ValidateShardResponse.toObject(message.results_by_shard[keys2[j]], options); + } return object; }; /** - * Converts this SourceShardAddResponse to JSON. + * Converts this ValidateSchemaKeyspaceResponse to JSON. * @function toJSON - * @memberof vtctldata.SourceShardAddResponse + * @memberof vtctldata.ValidateSchemaKeyspaceResponse * @instance * @returns {Object.} JSON object */ - SourceShardAddResponse.prototype.toJSON = function toJSON() { + ValidateSchemaKeyspaceResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for SourceShardAddResponse + * Gets the default type url for ValidateSchemaKeyspaceResponse * @function getTypeUrl - * @memberof vtctldata.SourceShardAddResponse + * @memberof vtctldata.ValidateSchemaKeyspaceResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - SourceShardAddResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ValidateSchemaKeyspaceResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.SourceShardAddResponse"; + return typeUrlPrefix + "/vtctldata.ValidateSchemaKeyspaceResponse"; }; - return SourceShardAddResponse; + return ValidateSchemaKeyspaceResponse; })(); - vtctldata.SourceShardDeleteRequest = (function() { + vtctldata.ValidateShardRequest = (function() { /** - * Properties of a SourceShardDeleteRequest. + * Properties of a ValidateShardRequest. * @memberof vtctldata - * @interface ISourceShardDeleteRequest - * @property {string|null} [keyspace] SourceShardDeleteRequest keyspace - * @property {string|null} [shard] SourceShardDeleteRequest shard - * @property {number|null} [uid] SourceShardDeleteRequest uid + * @interface IValidateShardRequest + * @property {string|null} [keyspace] ValidateShardRequest keyspace + * @property {string|null} [shard] ValidateShardRequest shard + * @property {boolean|null} [ping_tablets] ValidateShardRequest ping_tablets */ /** - * Constructs a new SourceShardDeleteRequest. + * Constructs a new ValidateShardRequest. * @memberof vtctldata - * @classdesc Represents a SourceShardDeleteRequest. - * @implements ISourceShardDeleteRequest + * @classdesc Represents a ValidateShardRequest. + * @implements IValidateShardRequest * @constructor - * @param {vtctldata.ISourceShardDeleteRequest=} [properties] Properties to set + * @param {vtctldata.IValidateShardRequest=} [properties] Properties to set */ - function SourceShardDeleteRequest(properties) { + function ValidateShardRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -135791,90 +158374,90 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * SourceShardDeleteRequest keyspace. + * ValidateShardRequest keyspace. * @member {string} keyspace - * @memberof vtctldata.SourceShardDeleteRequest + * @memberof vtctldata.ValidateShardRequest * @instance */ - SourceShardDeleteRequest.prototype.keyspace = ""; + ValidateShardRequest.prototype.keyspace = ""; /** - * SourceShardDeleteRequest shard. + * ValidateShardRequest shard. * @member {string} shard - * @memberof vtctldata.SourceShardDeleteRequest + * @memberof vtctldata.ValidateShardRequest * @instance */ - SourceShardDeleteRequest.prototype.shard = ""; + ValidateShardRequest.prototype.shard = ""; /** - * SourceShardDeleteRequest uid. - * @member {number} uid - * @memberof vtctldata.SourceShardDeleteRequest + * ValidateShardRequest ping_tablets. + * @member {boolean} ping_tablets + * @memberof vtctldata.ValidateShardRequest * @instance */ - SourceShardDeleteRequest.prototype.uid = 0; + ValidateShardRequest.prototype.ping_tablets = false; /** - * Creates a new SourceShardDeleteRequest instance using the specified properties. + * Creates a new ValidateShardRequest instance using the specified properties. * @function create - * @memberof vtctldata.SourceShardDeleteRequest + * @memberof vtctldata.ValidateShardRequest * @static - * @param {vtctldata.ISourceShardDeleteRequest=} [properties] Properties to set - * @returns {vtctldata.SourceShardDeleteRequest} SourceShardDeleteRequest instance + * @param {vtctldata.IValidateShardRequest=} [properties] Properties to set + * @returns {vtctldata.ValidateShardRequest} ValidateShardRequest instance */ - SourceShardDeleteRequest.create = function create(properties) { - return new SourceShardDeleteRequest(properties); + ValidateShardRequest.create = function create(properties) { + return new ValidateShardRequest(properties); }; /** - * Encodes the specified SourceShardDeleteRequest message. Does not implicitly {@link vtctldata.SourceShardDeleteRequest.verify|verify} messages. + * Encodes the specified ValidateShardRequest message. Does not implicitly {@link vtctldata.ValidateShardRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.SourceShardDeleteRequest + * @memberof vtctldata.ValidateShardRequest * @static - * @param {vtctldata.ISourceShardDeleteRequest} message SourceShardDeleteRequest message or plain object to encode + * @param {vtctldata.IValidateShardRequest} message ValidateShardRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SourceShardDeleteRequest.encode = function encode(message, writer) { + ValidateShardRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); - if (message.uid != null && Object.hasOwnProperty.call(message, "uid")) - writer.uint32(/* id 3, wireType 0 =*/24).int32(message.uid); + if (message.ping_tablets != null && Object.hasOwnProperty.call(message, "ping_tablets")) + writer.uint32(/* id 3, wireType 0 =*/24).bool(message.ping_tablets); return writer; }; /** - * Encodes the specified SourceShardDeleteRequest message, length delimited. Does not implicitly {@link vtctldata.SourceShardDeleteRequest.verify|verify} messages. + * Encodes the specified ValidateShardRequest message, length delimited. Does not implicitly {@link vtctldata.ValidateShardRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.SourceShardDeleteRequest + * @memberof vtctldata.ValidateShardRequest * @static - * @param {vtctldata.ISourceShardDeleteRequest} message SourceShardDeleteRequest message or plain object to encode + * @param {vtctldata.IValidateShardRequest} message ValidateShardRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SourceShardDeleteRequest.encodeDelimited = function encodeDelimited(message, writer) { + ValidateShardRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a SourceShardDeleteRequest message from the specified reader or buffer. + * Decodes a ValidateShardRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.SourceShardDeleteRequest + * @memberof vtctldata.ValidateShardRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.SourceShardDeleteRequest} SourceShardDeleteRequest + * @returns {vtctldata.ValidateShardRequest} ValidateShardRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SourceShardDeleteRequest.decode = function decode(reader, length) { + ValidateShardRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SourceShardDeleteRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ValidateShardRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -135887,7 +158470,7 @@ export const vtctldata = $root.vtctldata = (() => { break; } case 3: { - message.uid = reader.int32(); + message.ping_tablets = reader.bool(); break; } default: @@ -135899,30 +158482,30 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a SourceShardDeleteRequest message from the specified reader or buffer, length delimited. + * Decodes a ValidateShardRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.SourceShardDeleteRequest + * @memberof vtctldata.ValidateShardRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.SourceShardDeleteRequest} SourceShardDeleteRequest + * @returns {vtctldata.ValidateShardRequest} ValidateShardRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SourceShardDeleteRequest.decodeDelimited = function decodeDelimited(reader) { + ValidateShardRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a SourceShardDeleteRequest message. + * Verifies a ValidateShardRequest message. * @function verify - * @memberof vtctldata.SourceShardDeleteRequest + * @memberof vtctldata.ValidateShardRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - SourceShardDeleteRequest.verify = function verify(message) { + ValidateShardRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.keyspace != null && message.hasOwnProperty("keyspace")) @@ -135931,107 +158514,108 @@ export const vtctldata = $root.vtctldata = (() => { if (message.shard != null && message.hasOwnProperty("shard")) if (!$util.isString(message.shard)) return "shard: string expected"; - if (message.uid != null && message.hasOwnProperty("uid")) - if (!$util.isInteger(message.uid)) - return "uid: integer expected"; + if (message.ping_tablets != null && message.hasOwnProperty("ping_tablets")) + if (typeof message.ping_tablets !== "boolean") + return "ping_tablets: boolean expected"; return null; }; /** - * Creates a SourceShardDeleteRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ValidateShardRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.SourceShardDeleteRequest + * @memberof vtctldata.ValidateShardRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.SourceShardDeleteRequest} SourceShardDeleteRequest + * @returns {vtctldata.ValidateShardRequest} ValidateShardRequest */ - SourceShardDeleteRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.SourceShardDeleteRequest) + ValidateShardRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ValidateShardRequest) return object; - let message = new $root.vtctldata.SourceShardDeleteRequest(); + let message = new $root.vtctldata.ValidateShardRequest(); if (object.keyspace != null) message.keyspace = String(object.keyspace); if (object.shard != null) message.shard = String(object.shard); - if (object.uid != null) - message.uid = object.uid | 0; + if (object.ping_tablets != null) + message.ping_tablets = Boolean(object.ping_tablets); return message; }; /** - * Creates a plain object from a SourceShardDeleteRequest message. Also converts values to other types if specified. + * Creates a plain object from a ValidateShardRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.SourceShardDeleteRequest + * @memberof vtctldata.ValidateShardRequest * @static - * @param {vtctldata.SourceShardDeleteRequest} message SourceShardDeleteRequest + * @param {vtctldata.ValidateShardRequest} message ValidateShardRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - SourceShardDeleteRequest.toObject = function toObject(message, options) { + ValidateShardRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) { object.keyspace = ""; object.shard = ""; - object.uid = 0; + object.ping_tablets = false; } if (message.keyspace != null && message.hasOwnProperty("keyspace")) object.keyspace = message.keyspace; if (message.shard != null && message.hasOwnProperty("shard")) object.shard = message.shard; - if (message.uid != null && message.hasOwnProperty("uid")) - object.uid = message.uid; + if (message.ping_tablets != null && message.hasOwnProperty("ping_tablets")) + object.ping_tablets = message.ping_tablets; return object; }; /** - * Converts this SourceShardDeleteRequest to JSON. + * Converts this ValidateShardRequest to JSON. * @function toJSON - * @memberof vtctldata.SourceShardDeleteRequest + * @memberof vtctldata.ValidateShardRequest * @instance * @returns {Object.} JSON object */ - SourceShardDeleteRequest.prototype.toJSON = function toJSON() { + ValidateShardRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for SourceShardDeleteRequest + * Gets the default type url for ValidateShardRequest * @function getTypeUrl - * @memberof vtctldata.SourceShardDeleteRequest + * @memberof vtctldata.ValidateShardRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - SourceShardDeleteRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ValidateShardRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.SourceShardDeleteRequest"; + return typeUrlPrefix + "/vtctldata.ValidateShardRequest"; }; - return SourceShardDeleteRequest; + return ValidateShardRequest; })(); - vtctldata.SourceShardDeleteResponse = (function() { + vtctldata.ValidateShardResponse = (function() { /** - * Properties of a SourceShardDeleteResponse. + * Properties of a ValidateShardResponse. * @memberof vtctldata - * @interface ISourceShardDeleteResponse - * @property {topodata.IShard|null} [shard] SourceShardDeleteResponse shard + * @interface IValidateShardResponse + * @property {Array.|null} [results] ValidateShardResponse results */ /** - * Constructs a new SourceShardDeleteResponse. + * Constructs a new ValidateShardResponse. * @memberof vtctldata - * @classdesc Represents a SourceShardDeleteResponse. - * @implements ISourceShardDeleteResponse + * @classdesc Represents a ValidateShardResponse. + * @implements IValidateShardResponse * @constructor - * @param {vtctldata.ISourceShardDeleteResponse=} [properties] Properties to set + * @param {vtctldata.IValidateShardResponse=} [properties] Properties to set */ - function SourceShardDeleteResponse(properties) { + function ValidateShardResponse(properties) { + this.results = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -136039,75 +158623,78 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * SourceShardDeleteResponse shard. - * @member {topodata.IShard|null|undefined} shard - * @memberof vtctldata.SourceShardDeleteResponse + * ValidateShardResponse results. + * @member {Array.} results + * @memberof vtctldata.ValidateShardResponse * @instance */ - SourceShardDeleteResponse.prototype.shard = null; + ValidateShardResponse.prototype.results = $util.emptyArray; /** - * Creates a new SourceShardDeleteResponse instance using the specified properties. + * Creates a new ValidateShardResponse instance using the specified properties. * @function create - * @memberof vtctldata.SourceShardDeleteResponse + * @memberof vtctldata.ValidateShardResponse * @static - * @param {vtctldata.ISourceShardDeleteResponse=} [properties] Properties to set - * @returns {vtctldata.SourceShardDeleteResponse} SourceShardDeleteResponse instance + * @param {vtctldata.IValidateShardResponse=} [properties] Properties to set + * @returns {vtctldata.ValidateShardResponse} ValidateShardResponse instance */ - SourceShardDeleteResponse.create = function create(properties) { - return new SourceShardDeleteResponse(properties); + ValidateShardResponse.create = function create(properties) { + return new ValidateShardResponse(properties); }; /** - * Encodes the specified SourceShardDeleteResponse message. Does not implicitly {@link vtctldata.SourceShardDeleteResponse.verify|verify} messages. + * Encodes the specified ValidateShardResponse message. Does not implicitly {@link vtctldata.ValidateShardResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.SourceShardDeleteResponse + * @memberof vtctldata.ValidateShardResponse * @static - * @param {vtctldata.ISourceShardDeleteResponse} message SourceShardDeleteResponse message or plain object to encode + * @param {vtctldata.IValidateShardResponse} message ValidateShardResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SourceShardDeleteResponse.encode = function encode(message, writer) { + ValidateShardResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - $root.topodata.Shard.encode(message.shard, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.results != null && message.results.length) + for (let i = 0; i < message.results.length; ++i) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.results[i]); return writer; }; /** - * Encodes the specified SourceShardDeleteResponse message, length delimited. Does not implicitly {@link vtctldata.SourceShardDeleteResponse.verify|verify} messages. + * Encodes the specified ValidateShardResponse message, length delimited. Does not implicitly {@link vtctldata.ValidateShardResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.SourceShardDeleteResponse + * @memberof vtctldata.ValidateShardResponse * @static - * @param {vtctldata.ISourceShardDeleteResponse} message SourceShardDeleteResponse message or plain object to encode + * @param {vtctldata.IValidateShardResponse} message ValidateShardResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - SourceShardDeleteResponse.encodeDelimited = function encodeDelimited(message, writer) { + ValidateShardResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a SourceShardDeleteResponse message from the specified reader or buffer. + * Decodes a ValidateShardResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.SourceShardDeleteResponse + * @memberof vtctldata.ValidateShardResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.SourceShardDeleteResponse} SourceShardDeleteResponse + * @returns {vtctldata.ValidateShardResponse} ValidateShardResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SourceShardDeleteResponse.decode = function decode(reader, length) { + ValidateShardResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SourceShardDeleteResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ValidateShardResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.shard = $root.topodata.Shard.decode(reader, reader.uint32()); + if (!(message.results && message.results.length)) + message.results = []; + message.results.push(reader.string()); break; } default: @@ -136119,127 +158706,134 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a SourceShardDeleteResponse message from the specified reader or buffer, length delimited. + * Decodes a ValidateShardResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.SourceShardDeleteResponse + * @memberof vtctldata.ValidateShardResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.SourceShardDeleteResponse} SourceShardDeleteResponse + * @returns {vtctldata.ValidateShardResponse} ValidateShardResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - SourceShardDeleteResponse.decodeDelimited = function decodeDelimited(reader) { + ValidateShardResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a SourceShardDeleteResponse message. + * Verifies a ValidateShardResponse message. * @function verify - * @memberof vtctldata.SourceShardDeleteResponse + * @memberof vtctldata.ValidateShardResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - SourceShardDeleteResponse.verify = function verify(message) { + ValidateShardResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.shard != null && message.hasOwnProperty("shard")) { - let error = $root.topodata.Shard.verify(message.shard); - if (error) - return "shard." + error; + if (message.results != null && message.hasOwnProperty("results")) { + if (!Array.isArray(message.results)) + return "results: array expected"; + for (let i = 0; i < message.results.length; ++i) + if (!$util.isString(message.results[i])) + return "results: string[] expected"; } return null; }; /** - * Creates a SourceShardDeleteResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ValidateShardResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.SourceShardDeleteResponse + * @memberof vtctldata.ValidateShardResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.SourceShardDeleteResponse} SourceShardDeleteResponse + * @returns {vtctldata.ValidateShardResponse} ValidateShardResponse */ - SourceShardDeleteResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.SourceShardDeleteResponse) + ValidateShardResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ValidateShardResponse) return object; - let message = new $root.vtctldata.SourceShardDeleteResponse(); - if (object.shard != null) { - if (typeof object.shard !== "object") - throw TypeError(".vtctldata.SourceShardDeleteResponse.shard: object expected"); - message.shard = $root.topodata.Shard.fromObject(object.shard); + let message = new $root.vtctldata.ValidateShardResponse(); + if (object.results) { + if (!Array.isArray(object.results)) + throw TypeError(".vtctldata.ValidateShardResponse.results: array expected"); + message.results = []; + for (let i = 0; i < object.results.length; ++i) + message.results[i] = String(object.results[i]); } return message; }; /** - * Creates a plain object from a SourceShardDeleteResponse message. Also converts values to other types if specified. + * Creates a plain object from a ValidateShardResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.SourceShardDeleteResponse + * @memberof vtctldata.ValidateShardResponse * @static - * @param {vtctldata.SourceShardDeleteResponse} message SourceShardDeleteResponse + * @param {vtctldata.ValidateShardResponse} message ValidateShardResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - SourceShardDeleteResponse.toObject = function toObject(message, options) { + ValidateShardResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) - object.shard = null; - if (message.shard != null && message.hasOwnProperty("shard")) - object.shard = $root.topodata.Shard.toObject(message.shard, options); + if (options.arrays || options.defaults) + object.results = []; + if (message.results && message.results.length) { + object.results = []; + for (let j = 0; j < message.results.length; ++j) + object.results[j] = message.results[j]; + } return object; }; /** - * Converts this SourceShardDeleteResponse to JSON. + * Converts this ValidateShardResponse to JSON. * @function toJSON - * @memberof vtctldata.SourceShardDeleteResponse + * @memberof vtctldata.ValidateShardResponse * @instance * @returns {Object.} JSON object */ - SourceShardDeleteResponse.prototype.toJSON = function toJSON() { + ValidateShardResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for SourceShardDeleteResponse + * Gets the default type url for ValidateShardResponse * @function getTypeUrl - * @memberof vtctldata.SourceShardDeleteResponse + * @memberof vtctldata.ValidateShardResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - SourceShardDeleteResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ValidateShardResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.SourceShardDeleteResponse"; + return typeUrlPrefix + "/vtctldata.ValidateShardResponse"; }; - return SourceShardDeleteResponse; + return ValidateShardResponse; })(); - vtctldata.StartReplicationRequest = (function() { + vtctldata.ValidateVersionKeyspaceRequest = (function() { /** - * Properties of a StartReplicationRequest. + * Properties of a ValidateVersionKeyspaceRequest. * @memberof vtctldata - * @interface IStartReplicationRequest - * @property {topodata.ITabletAlias|null} [tablet_alias] StartReplicationRequest tablet_alias + * @interface IValidateVersionKeyspaceRequest + * @property {string|null} [keyspace] ValidateVersionKeyspaceRequest keyspace */ /** - * Constructs a new StartReplicationRequest. + * Constructs a new ValidateVersionKeyspaceRequest. * @memberof vtctldata - * @classdesc Represents a StartReplicationRequest. - * @implements IStartReplicationRequest + * @classdesc Represents a ValidateVersionKeyspaceRequest. + * @implements IValidateVersionKeyspaceRequest * @constructor - * @param {vtctldata.IStartReplicationRequest=} [properties] Properties to set + * @param {vtctldata.IValidateVersionKeyspaceRequest=} [properties] Properties to set */ - function StartReplicationRequest(properties) { + function ValidateVersionKeyspaceRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -136247,75 +158841,75 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * StartReplicationRequest tablet_alias. - * @member {topodata.ITabletAlias|null|undefined} tablet_alias - * @memberof vtctldata.StartReplicationRequest + * ValidateVersionKeyspaceRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.ValidateVersionKeyspaceRequest * @instance */ - StartReplicationRequest.prototype.tablet_alias = null; + ValidateVersionKeyspaceRequest.prototype.keyspace = ""; /** - * Creates a new StartReplicationRequest instance using the specified properties. + * Creates a new ValidateVersionKeyspaceRequest instance using the specified properties. * @function create - * @memberof vtctldata.StartReplicationRequest + * @memberof vtctldata.ValidateVersionKeyspaceRequest * @static - * @param {vtctldata.IStartReplicationRequest=} [properties] Properties to set - * @returns {vtctldata.StartReplicationRequest} StartReplicationRequest instance + * @param {vtctldata.IValidateVersionKeyspaceRequest=} [properties] Properties to set + * @returns {vtctldata.ValidateVersionKeyspaceRequest} ValidateVersionKeyspaceRequest instance */ - StartReplicationRequest.create = function create(properties) { - return new StartReplicationRequest(properties); + ValidateVersionKeyspaceRequest.create = function create(properties) { + return new ValidateVersionKeyspaceRequest(properties); }; /** - * Encodes the specified StartReplicationRequest message. Does not implicitly {@link vtctldata.StartReplicationRequest.verify|verify} messages. + * Encodes the specified ValidateVersionKeyspaceRequest message. Does not implicitly {@link vtctldata.ValidateVersionKeyspaceRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.StartReplicationRequest + * @memberof vtctldata.ValidateVersionKeyspaceRequest * @static - * @param {vtctldata.IStartReplicationRequest} message StartReplicationRequest message or plain object to encode + * @param {vtctldata.IValidateVersionKeyspaceRequest} message ValidateVersionKeyspaceRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - StartReplicationRequest.encode = function encode(message, writer) { + ValidateVersionKeyspaceRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) - $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); return writer; }; /** - * Encodes the specified StartReplicationRequest message, length delimited. Does not implicitly {@link vtctldata.StartReplicationRequest.verify|verify} messages. + * Encodes the specified ValidateVersionKeyspaceRequest message, length delimited. Does not implicitly {@link vtctldata.ValidateVersionKeyspaceRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.StartReplicationRequest + * @memberof vtctldata.ValidateVersionKeyspaceRequest * @static - * @param {vtctldata.IStartReplicationRequest} message StartReplicationRequest message or plain object to encode + * @param {vtctldata.IValidateVersionKeyspaceRequest} message ValidateVersionKeyspaceRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - StartReplicationRequest.encodeDelimited = function encodeDelimited(message, writer) { + ValidateVersionKeyspaceRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a StartReplicationRequest message from the specified reader or buffer. + * Decodes a ValidateVersionKeyspaceRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.StartReplicationRequest + * @memberof vtctldata.ValidateVersionKeyspaceRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.StartReplicationRequest} StartReplicationRequest + * @returns {vtctldata.ValidateVersionKeyspaceRequest} ValidateVersionKeyspaceRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StartReplicationRequest.decode = function decode(reader, length) { + ValidateVersionKeyspaceRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.StartReplicationRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ValidateVersionKeyspaceRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + message.keyspace = reader.string(); break; } default: @@ -136327,126 +158921,125 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a StartReplicationRequest message from the specified reader or buffer, length delimited. + * Decodes a ValidateVersionKeyspaceRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.StartReplicationRequest + * @memberof vtctldata.ValidateVersionKeyspaceRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.StartReplicationRequest} StartReplicationRequest + * @returns {vtctldata.ValidateVersionKeyspaceRequest} ValidateVersionKeyspaceRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StartReplicationRequest.decodeDelimited = function decodeDelimited(reader) { + ValidateVersionKeyspaceRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a StartReplicationRequest message. + * Verifies a ValidateVersionKeyspaceRequest message. * @function verify - * @memberof vtctldata.StartReplicationRequest + * @memberof vtctldata.ValidateVersionKeyspaceRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - StartReplicationRequest.verify = function verify(message) { + ValidateVersionKeyspaceRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { - let error = $root.topodata.TabletAlias.verify(message.tablet_alias); - if (error) - return "tablet_alias." + error; - } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; return null; }; /** - * Creates a StartReplicationRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ValidateVersionKeyspaceRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.StartReplicationRequest + * @memberof vtctldata.ValidateVersionKeyspaceRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.StartReplicationRequest} StartReplicationRequest + * @returns {vtctldata.ValidateVersionKeyspaceRequest} ValidateVersionKeyspaceRequest */ - StartReplicationRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.StartReplicationRequest) + ValidateVersionKeyspaceRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ValidateVersionKeyspaceRequest) return object; - let message = new $root.vtctldata.StartReplicationRequest(); - if (object.tablet_alias != null) { - if (typeof object.tablet_alias !== "object") - throw TypeError(".vtctldata.StartReplicationRequest.tablet_alias: object expected"); - message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); - } + let message = new $root.vtctldata.ValidateVersionKeyspaceRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); return message; }; /** - * Creates a plain object from a StartReplicationRequest message. Also converts values to other types if specified. + * Creates a plain object from a ValidateVersionKeyspaceRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.StartReplicationRequest + * @memberof vtctldata.ValidateVersionKeyspaceRequest * @static - * @param {vtctldata.StartReplicationRequest} message StartReplicationRequest + * @param {vtctldata.ValidateVersionKeyspaceRequest} message ValidateVersionKeyspaceRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - StartReplicationRequest.toObject = function toObject(message, options) { + ValidateVersionKeyspaceRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) - object.tablet_alias = null; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) - object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); + object.keyspace = ""; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; return object; }; /** - * Converts this StartReplicationRequest to JSON. + * Converts this ValidateVersionKeyspaceRequest to JSON. * @function toJSON - * @memberof vtctldata.StartReplicationRequest + * @memberof vtctldata.ValidateVersionKeyspaceRequest * @instance * @returns {Object.} JSON object */ - StartReplicationRequest.prototype.toJSON = function toJSON() { + ValidateVersionKeyspaceRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for StartReplicationRequest + * Gets the default type url for ValidateVersionKeyspaceRequest * @function getTypeUrl - * @memberof vtctldata.StartReplicationRequest + * @memberof vtctldata.ValidateVersionKeyspaceRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - StartReplicationRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ValidateVersionKeyspaceRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.StartReplicationRequest"; + return typeUrlPrefix + "/vtctldata.ValidateVersionKeyspaceRequest"; }; - return StartReplicationRequest; + return ValidateVersionKeyspaceRequest; })(); - vtctldata.StartReplicationResponse = (function() { + vtctldata.ValidateVersionKeyspaceResponse = (function() { /** - * Properties of a StartReplicationResponse. + * Properties of a ValidateVersionKeyspaceResponse. * @memberof vtctldata - * @interface IStartReplicationResponse + * @interface IValidateVersionKeyspaceResponse + * @property {Array.|null} [results] ValidateVersionKeyspaceResponse results + * @property {Object.|null} [results_by_shard] ValidateVersionKeyspaceResponse results_by_shard */ /** - * Constructs a new StartReplicationResponse. + * Constructs a new ValidateVersionKeyspaceResponse. * @memberof vtctldata - * @classdesc Represents a StartReplicationResponse. - * @implements IStartReplicationResponse + * @classdesc Represents a ValidateVersionKeyspaceResponse. + * @implements IValidateVersionKeyspaceResponse * @constructor - * @param {vtctldata.IStartReplicationResponse=} [properties] Properties to set + * @param {vtctldata.IValidateVersionKeyspaceResponse=} [properties] Properties to set */ - function StartReplicationResponse(properties) { + function ValidateVersionKeyspaceResponse(properties) { + this.results = []; + this.results_by_shard = {}; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -136454,63 +159047,116 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * Creates a new StartReplicationResponse instance using the specified properties. + * ValidateVersionKeyspaceResponse results. + * @member {Array.} results + * @memberof vtctldata.ValidateVersionKeyspaceResponse + * @instance + */ + ValidateVersionKeyspaceResponse.prototype.results = $util.emptyArray; + + /** + * ValidateVersionKeyspaceResponse results_by_shard. + * @member {Object.} results_by_shard + * @memberof vtctldata.ValidateVersionKeyspaceResponse + * @instance + */ + ValidateVersionKeyspaceResponse.prototype.results_by_shard = $util.emptyObject; + + /** + * Creates a new ValidateVersionKeyspaceResponse instance using the specified properties. * @function create - * @memberof vtctldata.StartReplicationResponse + * @memberof vtctldata.ValidateVersionKeyspaceResponse * @static - * @param {vtctldata.IStartReplicationResponse=} [properties] Properties to set - * @returns {vtctldata.StartReplicationResponse} StartReplicationResponse instance + * @param {vtctldata.IValidateVersionKeyspaceResponse=} [properties] Properties to set + * @returns {vtctldata.ValidateVersionKeyspaceResponse} ValidateVersionKeyspaceResponse instance */ - StartReplicationResponse.create = function create(properties) { - return new StartReplicationResponse(properties); + ValidateVersionKeyspaceResponse.create = function create(properties) { + return new ValidateVersionKeyspaceResponse(properties); }; /** - * Encodes the specified StartReplicationResponse message. Does not implicitly {@link vtctldata.StartReplicationResponse.verify|verify} messages. + * Encodes the specified ValidateVersionKeyspaceResponse message. Does not implicitly {@link vtctldata.ValidateVersionKeyspaceResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.StartReplicationResponse + * @memberof vtctldata.ValidateVersionKeyspaceResponse * @static - * @param {vtctldata.IStartReplicationResponse} message StartReplicationResponse message or plain object to encode + * @param {vtctldata.IValidateVersionKeyspaceResponse} message ValidateVersionKeyspaceResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - StartReplicationResponse.encode = function encode(message, writer) { + ValidateVersionKeyspaceResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.results != null && message.results.length) + for (let i = 0; i < message.results.length; ++i) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.results[i]); + if (message.results_by_shard != null && Object.hasOwnProperty.call(message, "results_by_shard")) + for (let keys = Object.keys(message.results_by_shard), i = 0; i < keys.length; ++i) { + writer.uint32(/* id 2, wireType 2 =*/18).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); + $root.vtctldata.ValidateShardResponse.encode(message.results_by_shard[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); + } return writer; }; /** - * Encodes the specified StartReplicationResponse message, length delimited. Does not implicitly {@link vtctldata.StartReplicationResponse.verify|verify} messages. + * Encodes the specified ValidateVersionKeyspaceResponse message, length delimited. Does not implicitly {@link vtctldata.ValidateVersionKeyspaceResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.StartReplicationResponse + * @memberof vtctldata.ValidateVersionKeyspaceResponse * @static - * @param {vtctldata.IStartReplicationResponse} message StartReplicationResponse message or plain object to encode + * @param {vtctldata.IValidateVersionKeyspaceResponse} message ValidateVersionKeyspaceResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - StartReplicationResponse.encodeDelimited = function encodeDelimited(message, writer) { + ValidateVersionKeyspaceResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a StartReplicationResponse message from the specified reader or buffer. + * Decodes a ValidateVersionKeyspaceResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.StartReplicationResponse + * @memberof vtctldata.ValidateVersionKeyspaceResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.StartReplicationResponse} StartReplicationResponse + * @returns {vtctldata.ValidateVersionKeyspaceResponse} ValidateVersionKeyspaceResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StartReplicationResponse.decode = function decode(reader, length) { + ValidateVersionKeyspaceResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.StartReplicationResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ValidateVersionKeyspaceResponse(), key, value; while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + if (!(message.results && message.results.length)) + message.results = []; + message.results.push(reader.string()); + break; + } + case 2: { + if (message.results_by_shard === $util.emptyObject) + message.results_by_shard = {}; + let end2 = reader.uint32() + reader.pos; + key = ""; + value = null; + while (reader.pos < end2) { + let tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = $root.vtctldata.ValidateShardResponse.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.results_by_shard[key] = value; + break; + } default: reader.skipType(tag & 7); break; @@ -136520,109 +159166,163 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a StartReplicationResponse message from the specified reader or buffer, length delimited. + * Decodes a ValidateVersionKeyspaceResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.StartReplicationResponse + * @memberof vtctldata.ValidateVersionKeyspaceResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.StartReplicationResponse} StartReplicationResponse + * @returns {vtctldata.ValidateVersionKeyspaceResponse} ValidateVersionKeyspaceResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StartReplicationResponse.decodeDelimited = function decodeDelimited(reader) { + ValidateVersionKeyspaceResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a StartReplicationResponse message. + * Verifies a ValidateVersionKeyspaceResponse message. * @function verify - * @memberof vtctldata.StartReplicationResponse + * @memberof vtctldata.ValidateVersionKeyspaceResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - StartReplicationResponse.verify = function verify(message) { + ValidateVersionKeyspaceResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.results != null && message.hasOwnProperty("results")) { + if (!Array.isArray(message.results)) + return "results: array expected"; + for (let i = 0; i < message.results.length; ++i) + if (!$util.isString(message.results[i])) + return "results: string[] expected"; + } + if (message.results_by_shard != null && message.hasOwnProperty("results_by_shard")) { + if (!$util.isObject(message.results_by_shard)) + return "results_by_shard: object expected"; + let key = Object.keys(message.results_by_shard); + for (let i = 0; i < key.length; ++i) { + let error = $root.vtctldata.ValidateShardResponse.verify(message.results_by_shard[key[i]]); + if (error) + return "results_by_shard." + error; + } + } return null; }; /** - * Creates a StartReplicationResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ValidateVersionKeyspaceResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.StartReplicationResponse + * @memberof vtctldata.ValidateVersionKeyspaceResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.StartReplicationResponse} StartReplicationResponse + * @returns {vtctldata.ValidateVersionKeyspaceResponse} ValidateVersionKeyspaceResponse */ - StartReplicationResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.StartReplicationResponse) + ValidateVersionKeyspaceResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ValidateVersionKeyspaceResponse) return object; - return new $root.vtctldata.StartReplicationResponse(); + let message = new $root.vtctldata.ValidateVersionKeyspaceResponse(); + if (object.results) { + if (!Array.isArray(object.results)) + throw TypeError(".vtctldata.ValidateVersionKeyspaceResponse.results: array expected"); + message.results = []; + for (let i = 0; i < object.results.length; ++i) + message.results[i] = String(object.results[i]); + } + if (object.results_by_shard) { + if (typeof object.results_by_shard !== "object") + throw TypeError(".vtctldata.ValidateVersionKeyspaceResponse.results_by_shard: object expected"); + message.results_by_shard = {}; + for (let keys = Object.keys(object.results_by_shard), i = 0; i < keys.length; ++i) { + if (typeof object.results_by_shard[keys[i]] !== "object") + throw TypeError(".vtctldata.ValidateVersionKeyspaceResponse.results_by_shard: object expected"); + message.results_by_shard[keys[i]] = $root.vtctldata.ValidateShardResponse.fromObject(object.results_by_shard[keys[i]]); + } + } + return message; }; /** - * Creates a plain object from a StartReplicationResponse message. Also converts values to other types if specified. + * Creates a plain object from a ValidateVersionKeyspaceResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.StartReplicationResponse + * @memberof vtctldata.ValidateVersionKeyspaceResponse * @static - * @param {vtctldata.StartReplicationResponse} message StartReplicationResponse + * @param {vtctldata.ValidateVersionKeyspaceResponse} message ValidateVersionKeyspaceResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - StartReplicationResponse.toObject = function toObject() { - return {}; + ValidateVersionKeyspaceResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.results = []; + if (options.objects || options.defaults) + object.results_by_shard = {}; + if (message.results && message.results.length) { + object.results = []; + for (let j = 0; j < message.results.length; ++j) + object.results[j] = message.results[j]; + } + let keys2; + if (message.results_by_shard && (keys2 = Object.keys(message.results_by_shard)).length) { + object.results_by_shard = {}; + for (let j = 0; j < keys2.length; ++j) + object.results_by_shard[keys2[j]] = $root.vtctldata.ValidateShardResponse.toObject(message.results_by_shard[keys2[j]], options); + } + return object; }; /** - * Converts this StartReplicationResponse to JSON. + * Converts this ValidateVersionKeyspaceResponse to JSON. * @function toJSON - * @memberof vtctldata.StartReplicationResponse + * @memberof vtctldata.ValidateVersionKeyspaceResponse * @instance * @returns {Object.} JSON object */ - StartReplicationResponse.prototype.toJSON = function toJSON() { + ValidateVersionKeyspaceResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for StartReplicationResponse + * Gets the default type url for ValidateVersionKeyspaceResponse * @function getTypeUrl - * @memberof vtctldata.StartReplicationResponse + * @memberof vtctldata.ValidateVersionKeyspaceResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - StartReplicationResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ValidateVersionKeyspaceResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.StartReplicationResponse"; + return typeUrlPrefix + "/vtctldata.ValidateVersionKeyspaceResponse"; }; - return StartReplicationResponse; + return ValidateVersionKeyspaceResponse; })(); - vtctldata.StopReplicationRequest = (function() { + vtctldata.ValidateVersionShardRequest = (function() { /** - * Properties of a StopReplicationRequest. + * Properties of a ValidateVersionShardRequest. * @memberof vtctldata - * @interface IStopReplicationRequest - * @property {topodata.ITabletAlias|null} [tablet_alias] StopReplicationRequest tablet_alias + * @interface IValidateVersionShardRequest + * @property {string|null} [keyspace] ValidateVersionShardRequest keyspace + * @property {string|null} [shard] ValidateVersionShardRequest shard */ /** - * Constructs a new StopReplicationRequest. + * Constructs a new ValidateVersionShardRequest. * @memberof vtctldata - * @classdesc Represents a StopReplicationRequest. - * @implements IStopReplicationRequest + * @classdesc Represents a ValidateVersionShardRequest. + * @implements IValidateVersionShardRequest * @constructor - * @param {vtctldata.IStopReplicationRequest=} [properties] Properties to set + * @param {vtctldata.IValidateVersionShardRequest=} [properties] Properties to set */ - function StopReplicationRequest(properties) { + function ValidateVersionShardRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -136630,75 +159330,89 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * StopReplicationRequest tablet_alias. - * @member {topodata.ITabletAlias|null|undefined} tablet_alias - * @memberof vtctldata.StopReplicationRequest + * ValidateVersionShardRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.ValidateVersionShardRequest * @instance */ - StopReplicationRequest.prototype.tablet_alias = null; + ValidateVersionShardRequest.prototype.keyspace = ""; /** - * Creates a new StopReplicationRequest instance using the specified properties. + * ValidateVersionShardRequest shard. + * @member {string} shard + * @memberof vtctldata.ValidateVersionShardRequest + * @instance + */ + ValidateVersionShardRequest.prototype.shard = ""; + + /** + * Creates a new ValidateVersionShardRequest instance using the specified properties. * @function create - * @memberof vtctldata.StopReplicationRequest + * @memberof vtctldata.ValidateVersionShardRequest * @static - * @param {vtctldata.IStopReplicationRequest=} [properties] Properties to set - * @returns {vtctldata.StopReplicationRequest} StopReplicationRequest instance + * @param {vtctldata.IValidateVersionShardRequest=} [properties] Properties to set + * @returns {vtctldata.ValidateVersionShardRequest} ValidateVersionShardRequest instance */ - StopReplicationRequest.create = function create(properties) { - return new StopReplicationRequest(properties); + ValidateVersionShardRequest.create = function create(properties) { + return new ValidateVersionShardRequest(properties); }; /** - * Encodes the specified StopReplicationRequest message. Does not implicitly {@link vtctldata.StopReplicationRequest.verify|verify} messages. + * Encodes the specified ValidateVersionShardRequest message. Does not implicitly {@link vtctldata.ValidateVersionShardRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.StopReplicationRequest + * @memberof vtctldata.ValidateVersionShardRequest * @static - * @param {vtctldata.IStopReplicationRequest} message StopReplicationRequest message or plain object to encode + * @param {vtctldata.IValidateVersionShardRequest} message ValidateVersionShardRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - StopReplicationRequest.encode = function encode(message, writer) { + ValidateVersionShardRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) - $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); return writer; }; /** - * Encodes the specified StopReplicationRequest message, length delimited. Does not implicitly {@link vtctldata.StopReplicationRequest.verify|verify} messages. + * Encodes the specified ValidateVersionShardRequest message, length delimited. Does not implicitly {@link vtctldata.ValidateVersionShardRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.StopReplicationRequest + * @memberof vtctldata.ValidateVersionShardRequest * @static - * @param {vtctldata.IStopReplicationRequest} message StopReplicationRequest message or plain object to encode + * @param {vtctldata.IValidateVersionShardRequest} message ValidateVersionShardRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - StopReplicationRequest.encodeDelimited = function encodeDelimited(message, writer) { + ValidateVersionShardRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a StopReplicationRequest message from the specified reader or buffer. + * Decodes a ValidateVersionShardRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.StopReplicationRequest + * @memberof vtctldata.ValidateVersionShardRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.StopReplicationRequest} StopReplicationRequest + * @returns {vtctldata.ValidateVersionShardRequest} ValidateVersionShardRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StopReplicationRequest.decode = function decode(reader, length) { + ValidateVersionShardRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.StopReplicationRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ValidateVersionShardRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + message.keyspace = reader.string(); + break; + } + case 2: { + message.shard = reader.string(); break; } default: @@ -136710,126 +159424,132 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a StopReplicationRequest message from the specified reader or buffer, length delimited. + * Decodes a ValidateVersionShardRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.StopReplicationRequest + * @memberof vtctldata.ValidateVersionShardRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.StopReplicationRequest} StopReplicationRequest + * @returns {vtctldata.ValidateVersionShardRequest} ValidateVersionShardRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StopReplicationRequest.decodeDelimited = function decodeDelimited(reader) { + ValidateVersionShardRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a StopReplicationRequest message. + * Verifies a ValidateVersionShardRequest message. * @function verify - * @memberof vtctldata.StopReplicationRequest + * @memberof vtctldata.ValidateVersionShardRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - StopReplicationRequest.verify = function verify(message) { + ValidateVersionShardRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { - let error = $root.topodata.TabletAlias.verify(message.tablet_alias); - if (error) - return "tablet_alias." + error; - } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.shard != null && message.hasOwnProperty("shard")) + if (!$util.isString(message.shard)) + return "shard: string expected"; return null; }; /** - * Creates a StopReplicationRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ValidateVersionShardRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.StopReplicationRequest + * @memberof vtctldata.ValidateVersionShardRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.StopReplicationRequest} StopReplicationRequest + * @returns {vtctldata.ValidateVersionShardRequest} ValidateVersionShardRequest */ - StopReplicationRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.StopReplicationRequest) + ValidateVersionShardRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ValidateVersionShardRequest) return object; - let message = new $root.vtctldata.StopReplicationRequest(); - if (object.tablet_alias != null) { - if (typeof object.tablet_alias !== "object") - throw TypeError(".vtctldata.StopReplicationRequest.tablet_alias: object expected"); - message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); - } + let message = new $root.vtctldata.ValidateVersionShardRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.shard != null) + message.shard = String(object.shard); return message; }; /** - * Creates a plain object from a StopReplicationRequest message. Also converts values to other types if specified. + * Creates a plain object from a ValidateVersionShardRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.StopReplicationRequest + * @memberof vtctldata.ValidateVersionShardRequest * @static - * @param {vtctldata.StopReplicationRequest} message StopReplicationRequest + * @param {vtctldata.ValidateVersionShardRequest} message ValidateVersionShardRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - StopReplicationRequest.toObject = function toObject(message, options) { + ValidateVersionShardRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) - object.tablet_alias = null; - if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) - object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); + if (options.defaults) { + object.keyspace = ""; + object.shard = ""; + } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = message.shard; return object; }; /** - * Converts this StopReplicationRequest to JSON. + * Converts this ValidateVersionShardRequest to JSON. * @function toJSON - * @memberof vtctldata.StopReplicationRequest + * @memberof vtctldata.ValidateVersionShardRequest * @instance * @returns {Object.} JSON object */ - StopReplicationRequest.prototype.toJSON = function toJSON() { + ValidateVersionShardRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for StopReplicationRequest + * Gets the default type url for ValidateVersionShardRequest * @function getTypeUrl - * @memberof vtctldata.StopReplicationRequest + * @memberof vtctldata.ValidateVersionShardRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - StopReplicationRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ValidateVersionShardRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.StopReplicationRequest"; + return typeUrlPrefix + "/vtctldata.ValidateVersionShardRequest"; }; - return StopReplicationRequest; + return ValidateVersionShardRequest; })(); - vtctldata.StopReplicationResponse = (function() { + vtctldata.ValidateVersionShardResponse = (function() { /** - * Properties of a StopReplicationResponse. + * Properties of a ValidateVersionShardResponse. * @memberof vtctldata - * @interface IStopReplicationResponse + * @interface IValidateVersionShardResponse + * @property {Array.|null} [results] ValidateVersionShardResponse results */ /** - * Constructs a new StopReplicationResponse. + * Constructs a new ValidateVersionShardResponse. * @memberof vtctldata - * @classdesc Represents a StopReplicationResponse. - * @implements IStopReplicationResponse + * @classdesc Represents a ValidateVersionShardResponse. + * @implements IValidateVersionShardResponse * @constructor - * @param {vtctldata.IStopReplicationResponse=} [properties] Properties to set + * @param {vtctldata.IValidateVersionShardResponse=} [properties] Properties to set */ - function StopReplicationResponse(properties) { + function ValidateVersionShardResponse(properties) { + this.results = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -136837,63 +159557,80 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * Creates a new StopReplicationResponse instance using the specified properties. + * ValidateVersionShardResponse results. + * @member {Array.} results + * @memberof vtctldata.ValidateVersionShardResponse + * @instance + */ + ValidateVersionShardResponse.prototype.results = $util.emptyArray; + + /** + * Creates a new ValidateVersionShardResponse instance using the specified properties. * @function create - * @memberof vtctldata.StopReplicationResponse + * @memberof vtctldata.ValidateVersionShardResponse * @static - * @param {vtctldata.IStopReplicationResponse=} [properties] Properties to set - * @returns {vtctldata.StopReplicationResponse} StopReplicationResponse instance + * @param {vtctldata.IValidateVersionShardResponse=} [properties] Properties to set + * @returns {vtctldata.ValidateVersionShardResponse} ValidateVersionShardResponse instance */ - StopReplicationResponse.create = function create(properties) { - return new StopReplicationResponse(properties); + ValidateVersionShardResponse.create = function create(properties) { + return new ValidateVersionShardResponse(properties); }; /** - * Encodes the specified StopReplicationResponse message. Does not implicitly {@link vtctldata.StopReplicationResponse.verify|verify} messages. + * Encodes the specified ValidateVersionShardResponse message. Does not implicitly {@link vtctldata.ValidateVersionShardResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.StopReplicationResponse + * @memberof vtctldata.ValidateVersionShardResponse * @static - * @param {vtctldata.IStopReplicationResponse} message StopReplicationResponse message or plain object to encode + * @param {vtctldata.IValidateVersionShardResponse} message ValidateVersionShardResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - StopReplicationResponse.encode = function encode(message, writer) { + ValidateVersionShardResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.results != null && message.results.length) + for (let i = 0; i < message.results.length; ++i) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.results[i]); return writer; }; /** - * Encodes the specified StopReplicationResponse message, length delimited. Does not implicitly {@link vtctldata.StopReplicationResponse.verify|verify} messages. + * Encodes the specified ValidateVersionShardResponse message, length delimited. Does not implicitly {@link vtctldata.ValidateVersionShardResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.StopReplicationResponse + * @memberof vtctldata.ValidateVersionShardResponse * @static - * @param {vtctldata.IStopReplicationResponse} message StopReplicationResponse message or plain object to encode + * @param {vtctldata.IValidateVersionShardResponse} message ValidateVersionShardResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - StopReplicationResponse.encodeDelimited = function encodeDelimited(message, writer) { + ValidateVersionShardResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a StopReplicationResponse message from the specified reader or buffer. + * Decodes a ValidateVersionShardResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.StopReplicationResponse + * @memberof vtctldata.ValidateVersionShardResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.StopReplicationResponse} StopReplicationResponse + * @returns {vtctldata.ValidateVersionShardResponse} ValidateVersionShardResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StopReplicationResponse.decode = function decode(reader, length) { + ValidateVersionShardResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.StopReplicationResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ValidateVersionShardResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + if (!(message.results && message.results.length)) + message.results = []; + message.results.push(reader.string()); + break; + } default: reader.skipType(tag & 7); break; @@ -136903,109 +159640,139 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a StopReplicationResponse message from the specified reader or buffer, length delimited. + * Decodes a ValidateVersionShardResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.StopReplicationResponse + * @memberof vtctldata.ValidateVersionShardResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.StopReplicationResponse} StopReplicationResponse + * @returns {vtctldata.ValidateVersionShardResponse} ValidateVersionShardResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - StopReplicationResponse.decodeDelimited = function decodeDelimited(reader) { + ValidateVersionShardResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a StopReplicationResponse message. + * Verifies a ValidateVersionShardResponse message. * @function verify - * @memberof vtctldata.StopReplicationResponse + * @memberof vtctldata.ValidateVersionShardResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - StopReplicationResponse.verify = function verify(message) { + ValidateVersionShardResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.results != null && message.hasOwnProperty("results")) { + if (!Array.isArray(message.results)) + return "results: array expected"; + for (let i = 0; i < message.results.length; ++i) + if (!$util.isString(message.results[i])) + return "results: string[] expected"; + } return null; }; /** - * Creates a StopReplicationResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ValidateVersionShardResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.StopReplicationResponse + * @memberof vtctldata.ValidateVersionShardResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.StopReplicationResponse} StopReplicationResponse + * @returns {vtctldata.ValidateVersionShardResponse} ValidateVersionShardResponse */ - StopReplicationResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.StopReplicationResponse) + ValidateVersionShardResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ValidateVersionShardResponse) return object; - return new $root.vtctldata.StopReplicationResponse(); + let message = new $root.vtctldata.ValidateVersionShardResponse(); + if (object.results) { + if (!Array.isArray(object.results)) + throw TypeError(".vtctldata.ValidateVersionShardResponse.results: array expected"); + message.results = []; + for (let i = 0; i < object.results.length; ++i) + message.results[i] = String(object.results[i]); + } + return message; }; /** - * Creates a plain object from a StopReplicationResponse message. Also converts values to other types if specified. + * Creates a plain object from a ValidateVersionShardResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.StopReplicationResponse + * @memberof vtctldata.ValidateVersionShardResponse * @static - * @param {vtctldata.StopReplicationResponse} message StopReplicationResponse + * @param {vtctldata.ValidateVersionShardResponse} message ValidateVersionShardResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - StopReplicationResponse.toObject = function toObject() { - return {}; + ValidateVersionShardResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.results = []; + if (message.results && message.results.length) { + object.results = []; + for (let j = 0; j < message.results.length; ++j) + object.results[j] = message.results[j]; + } + return object; }; /** - * Converts this StopReplicationResponse to JSON. + * Converts this ValidateVersionShardResponse to JSON. * @function toJSON - * @memberof vtctldata.StopReplicationResponse + * @memberof vtctldata.ValidateVersionShardResponse * @instance * @returns {Object.} JSON object */ - StopReplicationResponse.prototype.toJSON = function toJSON() { + ValidateVersionShardResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for StopReplicationResponse + * Gets the default type url for ValidateVersionShardResponse * @function getTypeUrl - * @memberof vtctldata.StopReplicationResponse + * @memberof vtctldata.ValidateVersionShardResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - StopReplicationResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ValidateVersionShardResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.StopReplicationResponse"; + return typeUrlPrefix + "/vtctldata.ValidateVersionShardResponse"; }; - return StopReplicationResponse; + return ValidateVersionShardResponse; })(); - vtctldata.TabletExternallyReparentedRequest = (function() { + vtctldata.ValidateVSchemaRequest = (function() { /** - * Properties of a TabletExternallyReparentedRequest. + * Properties of a ValidateVSchemaRequest. * @memberof vtctldata - * @interface ITabletExternallyReparentedRequest - * @property {topodata.ITabletAlias|null} [tablet] TabletExternallyReparentedRequest tablet + * @interface IValidateVSchemaRequest + * @property {string|null} [keyspace] ValidateVSchemaRequest keyspace + * @property {Array.|null} [shards] ValidateVSchemaRequest shards + * @property {Array.|null} [exclude_tables] ValidateVSchemaRequest exclude_tables + * @property {boolean|null} [include_views] ValidateVSchemaRequest include_views */ /** - * Constructs a new TabletExternallyReparentedRequest. + * Constructs a new ValidateVSchemaRequest. * @memberof vtctldata - * @classdesc Represents a TabletExternallyReparentedRequest. - * @implements ITabletExternallyReparentedRequest + * @classdesc Represents a ValidateVSchemaRequest. + * @implements IValidateVSchemaRequest * @constructor - * @param {vtctldata.ITabletExternallyReparentedRequest=} [properties] Properties to set + * @param {vtctldata.IValidateVSchemaRequest=} [properties] Properties to set */ - function TabletExternallyReparentedRequest(properties) { + function ValidateVSchemaRequest(properties) { + this.shards = []; + this.exclude_tables = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -137013,75 +159780,123 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * TabletExternallyReparentedRequest tablet. - * @member {topodata.ITabletAlias|null|undefined} tablet - * @memberof vtctldata.TabletExternallyReparentedRequest + * ValidateVSchemaRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.ValidateVSchemaRequest * @instance */ - TabletExternallyReparentedRequest.prototype.tablet = null; + ValidateVSchemaRequest.prototype.keyspace = ""; /** - * Creates a new TabletExternallyReparentedRequest instance using the specified properties. + * ValidateVSchemaRequest shards. + * @member {Array.} shards + * @memberof vtctldata.ValidateVSchemaRequest + * @instance + */ + ValidateVSchemaRequest.prototype.shards = $util.emptyArray; + + /** + * ValidateVSchemaRequest exclude_tables. + * @member {Array.} exclude_tables + * @memberof vtctldata.ValidateVSchemaRequest + * @instance + */ + ValidateVSchemaRequest.prototype.exclude_tables = $util.emptyArray; + + /** + * ValidateVSchemaRequest include_views. + * @member {boolean} include_views + * @memberof vtctldata.ValidateVSchemaRequest + * @instance + */ + ValidateVSchemaRequest.prototype.include_views = false; + + /** + * Creates a new ValidateVSchemaRequest instance using the specified properties. * @function create - * @memberof vtctldata.TabletExternallyReparentedRequest + * @memberof vtctldata.ValidateVSchemaRequest * @static - * @param {vtctldata.ITabletExternallyReparentedRequest=} [properties] Properties to set - * @returns {vtctldata.TabletExternallyReparentedRequest} TabletExternallyReparentedRequest instance + * @param {vtctldata.IValidateVSchemaRequest=} [properties] Properties to set + * @returns {vtctldata.ValidateVSchemaRequest} ValidateVSchemaRequest instance */ - TabletExternallyReparentedRequest.create = function create(properties) { - return new TabletExternallyReparentedRequest(properties); + ValidateVSchemaRequest.create = function create(properties) { + return new ValidateVSchemaRequest(properties); }; /** - * Encodes the specified TabletExternallyReparentedRequest message. Does not implicitly {@link vtctldata.TabletExternallyReparentedRequest.verify|verify} messages. + * Encodes the specified ValidateVSchemaRequest message. Does not implicitly {@link vtctldata.ValidateVSchemaRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.TabletExternallyReparentedRequest + * @memberof vtctldata.ValidateVSchemaRequest * @static - * @param {vtctldata.ITabletExternallyReparentedRequest} message TabletExternallyReparentedRequest message or plain object to encode + * @param {vtctldata.IValidateVSchemaRequest} message ValidateVSchemaRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - TabletExternallyReparentedRequest.encode = function encode(message, writer) { + ValidateVSchemaRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.tablet != null && Object.hasOwnProperty.call(message, "tablet")) - $root.topodata.TabletAlias.encode(message.tablet, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.shards != null && message.shards.length) + for (let i = 0; i < message.shards.length; ++i) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.shards[i]); + if (message.exclude_tables != null && message.exclude_tables.length) + for (let i = 0; i < message.exclude_tables.length; ++i) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.exclude_tables[i]); + if (message.include_views != null && Object.hasOwnProperty.call(message, "include_views")) + writer.uint32(/* id 4, wireType 0 =*/32).bool(message.include_views); return writer; }; /** - * Encodes the specified TabletExternallyReparentedRequest message, length delimited. Does not implicitly {@link vtctldata.TabletExternallyReparentedRequest.verify|verify} messages. + * Encodes the specified ValidateVSchemaRequest message, length delimited. Does not implicitly {@link vtctldata.ValidateVSchemaRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.TabletExternallyReparentedRequest + * @memberof vtctldata.ValidateVSchemaRequest * @static - * @param {vtctldata.ITabletExternallyReparentedRequest} message TabletExternallyReparentedRequest message or plain object to encode + * @param {vtctldata.IValidateVSchemaRequest} message ValidateVSchemaRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - TabletExternallyReparentedRequest.encodeDelimited = function encodeDelimited(message, writer) { + ValidateVSchemaRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a TabletExternallyReparentedRequest message from the specified reader or buffer. + * Decodes a ValidateVSchemaRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.TabletExternallyReparentedRequest + * @memberof vtctldata.ValidateVSchemaRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.TabletExternallyReparentedRequest} TabletExternallyReparentedRequest + * @returns {vtctldata.ValidateVSchemaRequest} ValidateVSchemaRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - TabletExternallyReparentedRequest.decode = function decode(reader, length) { + ValidateVSchemaRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.TabletExternallyReparentedRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ValidateVSchemaRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.tablet = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + message.keyspace = reader.string(); + break; + } + case 2: { + if (!(message.shards && message.shards.length)) + message.shards = []; + message.shards.push(reader.string()); + break; + } + case 3: { + if (!(message.exclude_tables && message.exclude_tables.length)) + message.exclude_tables = []; + message.exclude_tables.push(reader.string()); + break; + } + case 4: { + message.include_views = reader.bool(); break; } default: @@ -137093,130 +159908,176 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a TabletExternallyReparentedRequest message from the specified reader or buffer, length delimited. + * Decodes a ValidateVSchemaRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.TabletExternallyReparentedRequest + * @memberof vtctldata.ValidateVSchemaRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.TabletExternallyReparentedRequest} TabletExternallyReparentedRequest + * @returns {vtctldata.ValidateVSchemaRequest} ValidateVSchemaRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - TabletExternallyReparentedRequest.decodeDelimited = function decodeDelimited(reader) { + ValidateVSchemaRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a TabletExternallyReparentedRequest message. + * Verifies a ValidateVSchemaRequest message. * @function verify - * @memberof vtctldata.TabletExternallyReparentedRequest + * @memberof vtctldata.ValidateVSchemaRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - TabletExternallyReparentedRequest.verify = function verify(message) { + ValidateVSchemaRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.tablet != null && message.hasOwnProperty("tablet")) { - let error = $root.topodata.TabletAlias.verify(message.tablet); - if (error) - return "tablet." + error; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.shards != null && message.hasOwnProperty("shards")) { + if (!Array.isArray(message.shards)) + return "shards: array expected"; + for (let i = 0; i < message.shards.length; ++i) + if (!$util.isString(message.shards[i])) + return "shards: string[] expected"; + } + if (message.exclude_tables != null && message.hasOwnProperty("exclude_tables")) { + if (!Array.isArray(message.exclude_tables)) + return "exclude_tables: array expected"; + for (let i = 0; i < message.exclude_tables.length; ++i) + if (!$util.isString(message.exclude_tables[i])) + return "exclude_tables: string[] expected"; } + if (message.include_views != null && message.hasOwnProperty("include_views")) + if (typeof message.include_views !== "boolean") + return "include_views: boolean expected"; return null; }; /** - * Creates a TabletExternallyReparentedRequest message from a plain object. Also converts values to their respective internal types. + * Creates a ValidateVSchemaRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.TabletExternallyReparentedRequest + * @memberof vtctldata.ValidateVSchemaRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.TabletExternallyReparentedRequest} TabletExternallyReparentedRequest + * @returns {vtctldata.ValidateVSchemaRequest} ValidateVSchemaRequest */ - TabletExternallyReparentedRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.TabletExternallyReparentedRequest) + ValidateVSchemaRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ValidateVSchemaRequest) return object; - let message = new $root.vtctldata.TabletExternallyReparentedRequest(); - if (object.tablet != null) { - if (typeof object.tablet !== "object") - throw TypeError(".vtctldata.TabletExternallyReparentedRequest.tablet: object expected"); - message.tablet = $root.topodata.TabletAlias.fromObject(object.tablet); + let message = new $root.vtctldata.ValidateVSchemaRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.shards) { + if (!Array.isArray(object.shards)) + throw TypeError(".vtctldata.ValidateVSchemaRequest.shards: array expected"); + message.shards = []; + for (let i = 0; i < object.shards.length; ++i) + message.shards[i] = String(object.shards[i]); + } + if (object.exclude_tables) { + if (!Array.isArray(object.exclude_tables)) + throw TypeError(".vtctldata.ValidateVSchemaRequest.exclude_tables: array expected"); + message.exclude_tables = []; + for (let i = 0; i < object.exclude_tables.length; ++i) + message.exclude_tables[i] = String(object.exclude_tables[i]); } + if (object.include_views != null) + message.include_views = Boolean(object.include_views); return message; }; /** - * Creates a plain object from a TabletExternallyReparentedRequest message. Also converts values to other types if specified. + * Creates a plain object from a ValidateVSchemaRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.TabletExternallyReparentedRequest + * @memberof vtctldata.ValidateVSchemaRequest * @static - * @param {vtctldata.TabletExternallyReparentedRequest} message TabletExternallyReparentedRequest + * @param {vtctldata.ValidateVSchemaRequest} message ValidateVSchemaRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - TabletExternallyReparentedRequest.toObject = function toObject(message, options) { + ValidateVSchemaRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) - object.tablet = null; - if (message.tablet != null && message.hasOwnProperty("tablet")) - object.tablet = $root.topodata.TabletAlias.toObject(message.tablet, options); + if (options.arrays || options.defaults) { + object.shards = []; + object.exclude_tables = []; + } + if (options.defaults) { + object.keyspace = ""; + object.include_views = false; + } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.shards && message.shards.length) { + object.shards = []; + for (let j = 0; j < message.shards.length; ++j) + object.shards[j] = message.shards[j]; + } + if (message.exclude_tables && message.exclude_tables.length) { + object.exclude_tables = []; + for (let j = 0; j < message.exclude_tables.length; ++j) + object.exclude_tables[j] = message.exclude_tables[j]; + } + if (message.include_views != null && message.hasOwnProperty("include_views")) + object.include_views = message.include_views; return object; }; /** - * Converts this TabletExternallyReparentedRequest to JSON. + * Converts this ValidateVSchemaRequest to JSON. * @function toJSON - * @memberof vtctldata.TabletExternallyReparentedRequest + * @memberof vtctldata.ValidateVSchemaRequest * @instance * @returns {Object.} JSON object */ - TabletExternallyReparentedRequest.prototype.toJSON = function toJSON() { + ValidateVSchemaRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for TabletExternallyReparentedRequest + * Gets the default type url for ValidateVSchemaRequest * @function getTypeUrl - * @memberof vtctldata.TabletExternallyReparentedRequest + * @memberof vtctldata.ValidateVSchemaRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - TabletExternallyReparentedRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ValidateVSchemaRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.TabletExternallyReparentedRequest"; + return typeUrlPrefix + "/vtctldata.ValidateVSchemaRequest"; }; - return TabletExternallyReparentedRequest; + return ValidateVSchemaRequest; })(); - vtctldata.TabletExternallyReparentedResponse = (function() { + vtctldata.ValidateVSchemaResponse = (function() { /** - * Properties of a TabletExternallyReparentedResponse. + * Properties of a ValidateVSchemaResponse. * @memberof vtctldata - * @interface ITabletExternallyReparentedResponse - * @property {string|null} [keyspace] TabletExternallyReparentedResponse keyspace - * @property {string|null} [shard] TabletExternallyReparentedResponse shard - * @property {topodata.ITabletAlias|null} [new_primary] TabletExternallyReparentedResponse new_primary - * @property {topodata.ITabletAlias|null} [old_primary] TabletExternallyReparentedResponse old_primary + * @interface IValidateVSchemaResponse + * @property {Array.|null} [results] ValidateVSchemaResponse results + * @property {Object.|null} [results_by_shard] ValidateVSchemaResponse results_by_shard */ /** - * Constructs a new TabletExternallyReparentedResponse. + * Constructs a new ValidateVSchemaResponse. * @memberof vtctldata - * @classdesc Represents a TabletExternallyReparentedResponse. - * @implements ITabletExternallyReparentedResponse + * @classdesc Represents a ValidateVSchemaResponse. + * @implements IValidateVSchemaResponse * @constructor - * @param {vtctldata.ITabletExternallyReparentedResponse=} [properties] Properties to set + * @param {vtctldata.IValidateVSchemaResponse=} [properties] Properties to set */ - function TabletExternallyReparentedResponse(properties) { + function ValidateVSchemaResponse(properties) { + this.results = []; + this.results_by_shard = {}; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -137224,117 +160085,114 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * TabletExternallyReparentedResponse keyspace. - * @member {string} keyspace - * @memberof vtctldata.TabletExternallyReparentedResponse - * @instance - */ - TabletExternallyReparentedResponse.prototype.keyspace = ""; - - /** - * TabletExternallyReparentedResponse shard. - * @member {string} shard - * @memberof vtctldata.TabletExternallyReparentedResponse - * @instance - */ - TabletExternallyReparentedResponse.prototype.shard = ""; - - /** - * TabletExternallyReparentedResponse new_primary. - * @member {topodata.ITabletAlias|null|undefined} new_primary - * @memberof vtctldata.TabletExternallyReparentedResponse + * ValidateVSchemaResponse results. + * @member {Array.} results + * @memberof vtctldata.ValidateVSchemaResponse * @instance */ - TabletExternallyReparentedResponse.prototype.new_primary = null; + ValidateVSchemaResponse.prototype.results = $util.emptyArray; /** - * TabletExternallyReparentedResponse old_primary. - * @member {topodata.ITabletAlias|null|undefined} old_primary - * @memberof vtctldata.TabletExternallyReparentedResponse + * ValidateVSchemaResponse results_by_shard. + * @member {Object.} results_by_shard + * @memberof vtctldata.ValidateVSchemaResponse * @instance */ - TabletExternallyReparentedResponse.prototype.old_primary = null; + ValidateVSchemaResponse.prototype.results_by_shard = $util.emptyObject; /** - * Creates a new TabletExternallyReparentedResponse instance using the specified properties. + * Creates a new ValidateVSchemaResponse instance using the specified properties. * @function create - * @memberof vtctldata.TabletExternallyReparentedResponse + * @memberof vtctldata.ValidateVSchemaResponse * @static - * @param {vtctldata.ITabletExternallyReparentedResponse=} [properties] Properties to set - * @returns {vtctldata.TabletExternallyReparentedResponse} TabletExternallyReparentedResponse instance + * @param {vtctldata.IValidateVSchemaResponse=} [properties] Properties to set + * @returns {vtctldata.ValidateVSchemaResponse} ValidateVSchemaResponse instance */ - TabletExternallyReparentedResponse.create = function create(properties) { - return new TabletExternallyReparentedResponse(properties); + ValidateVSchemaResponse.create = function create(properties) { + return new ValidateVSchemaResponse(properties); }; /** - * Encodes the specified TabletExternallyReparentedResponse message. Does not implicitly {@link vtctldata.TabletExternallyReparentedResponse.verify|verify} messages. + * Encodes the specified ValidateVSchemaResponse message. Does not implicitly {@link vtctldata.ValidateVSchemaResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.TabletExternallyReparentedResponse + * @memberof vtctldata.ValidateVSchemaResponse * @static - * @param {vtctldata.ITabletExternallyReparentedResponse} message TabletExternallyReparentedResponse message or plain object to encode + * @param {vtctldata.IValidateVSchemaResponse} message ValidateVSchemaResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - TabletExternallyReparentedResponse.encode = function encode(message, writer) { + ValidateVSchemaResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); - if (message.new_primary != null && Object.hasOwnProperty.call(message, "new_primary")) - $root.topodata.TabletAlias.encode(message.new_primary, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); - if (message.old_primary != null && Object.hasOwnProperty.call(message, "old_primary")) - $root.topodata.TabletAlias.encode(message.old_primary, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.results != null && message.results.length) + for (let i = 0; i < message.results.length; ++i) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.results[i]); + if (message.results_by_shard != null && Object.hasOwnProperty.call(message, "results_by_shard")) + for (let keys = Object.keys(message.results_by_shard), i = 0; i < keys.length; ++i) { + writer.uint32(/* id 2, wireType 2 =*/18).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); + $root.vtctldata.ValidateShardResponse.encode(message.results_by_shard[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); + } return writer; }; /** - * Encodes the specified TabletExternallyReparentedResponse message, length delimited. Does not implicitly {@link vtctldata.TabletExternallyReparentedResponse.verify|verify} messages. + * Encodes the specified ValidateVSchemaResponse message, length delimited. Does not implicitly {@link vtctldata.ValidateVSchemaResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.TabletExternallyReparentedResponse + * @memberof vtctldata.ValidateVSchemaResponse * @static - * @param {vtctldata.ITabletExternallyReparentedResponse} message TabletExternallyReparentedResponse message or plain object to encode + * @param {vtctldata.IValidateVSchemaResponse} message ValidateVSchemaResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - TabletExternallyReparentedResponse.encodeDelimited = function encodeDelimited(message, writer) { + ValidateVSchemaResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a TabletExternallyReparentedResponse message from the specified reader or buffer. + * Decodes a ValidateVSchemaResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.TabletExternallyReparentedResponse + * @memberof vtctldata.ValidateVSchemaResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.TabletExternallyReparentedResponse} TabletExternallyReparentedResponse + * @returns {vtctldata.ValidateVSchemaResponse} ValidateVSchemaResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - TabletExternallyReparentedResponse.decode = function decode(reader, length) { + ValidateVSchemaResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.TabletExternallyReparentedResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ValidateVSchemaResponse(), key, value; while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.keyspace = reader.string(); + if (!(message.results && message.results.length)) + message.results = []; + message.results.push(reader.string()); break; } case 2: { - message.shard = reader.string(); - break; - } - case 3: { - message.new_primary = $root.topodata.TabletAlias.decode(reader, reader.uint32()); - break; - } - case 4: { - message.old_primary = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + if (message.results_by_shard === $util.emptyObject) + message.results_by_shard = {}; + let end2 = reader.uint32() + reader.pos; + key = ""; + value = null; + while (reader.pos < end2) { + let tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = $root.vtctldata.ValidateShardResponse.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.results_by_shard[key] = value; break; } default: @@ -137346,248 +160204,517 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a TabletExternallyReparentedResponse message from the specified reader or buffer, length delimited. + * Decodes a ValidateVSchemaResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.TabletExternallyReparentedResponse + * @memberof vtctldata.ValidateVSchemaResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.TabletExternallyReparentedResponse} TabletExternallyReparentedResponse + * @returns {vtctldata.ValidateVSchemaResponse} ValidateVSchemaResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - TabletExternallyReparentedResponse.decodeDelimited = function decodeDelimited(reader) { + ValidateVSchemaResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a TabletExternallyReparentedResponse message. + * Verifies a ValidateVSchemaResponse message. * @function verify - * @memberof vtctldata.TabletExternallyReparentedResponse + * @memberof vtctldata.ValidateVSchemaResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - TabletExternallyReparentedResponse.verify = function verify(message) { + ValidateVSchemaResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - if (message.shard != null && message.hasOwnProperty("shard")) - if (!$util.isString(message.shard)) - return "shard: string expected"; - if (message.new_primary != null && message.hasOwnProperty("new_primary")) { - let error = $root.topodata.TabletAlias.verify(message.new_primary); - if (error) - return "new_primary." + error; + if (message.results != null && message.hasOwnProperty("results")) { + if (!Array.isArray(message.results)) + return "results: array expected"; + for (let i = 0; i < message.results.length; ++i) + if (!$util.isString(message.results[i])) + return "results: string[] expected"; } - if (message.old_primary != null && message.hasOwnProperty("old_primary")) { - let error = $root.topodata.TabletAlias.verify(message.old_primary); - if (error) - return "old_primary." + error; + if (message.results_by_shard != null && message.hasOwnProperty("results_by_shard")) { + if (!$util.isObject(message.results_by_shard)) + return "results_by_shard: object expected"; + let key = Object.keys(message.results_by_shard); + for (let i = 0; i < key.length; ++i) { + let error = $root.vtctldata.ValidateShardResponse.verify(message.results_by_shard[key[i]]); + if (error) + return "results_by_shard." + error; + } } return null; }; /** - * Creates a TabletExternallyReparentedResponse message from a plain object. Also converts values to their respective internal types. + * Creates a ValidateVSchemaResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.TabletExternallyReparentedResponse + * @memberof vtctldata.ValidateVSchemaResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.TabletExternallyReparentedResponse} TabletExternallyReparentedResponse + * @returns {vtctldata.ValidateVSchemaResponse} ValidateVSchemaResponse */ - TabletExternallyReparentedResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.TabletExternallyReparentedResponse) + ValidateVSchemaResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ValidateVSchemaResponse) return object; - let message = new $root.vtctldata.TabletExternallyReparentedResponse(); - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - if (object.shard != null) - message.shard = String(object.shard); - if (object.new_primary != null) { - if (typeof object.new_primary !== "object") - throw TypeError(".vtctldata.TabletExternallyReparentedResponse.new_primary: object expected"); - message.new_primary = $root.topodata.TabletAlias.fromObject(object.new_primary); + let message = new $root.vtctldata.ValidateVSchemaResponse(); + if (object.results) { + if (!Array.isArray(object.results)) + throw TypeError(".vtctldata.ValidateVSchemaResponse.results: array expected"); + message.results = []; + for (let i = 0; i < object.results.length; ++i) + message.results[i] = String(object.results[i]); } - if (object.old_primary != null) { - if (typeof object.old_primary !== "object") - throw TypeError(".vtctldata.TabletExternallyReparentedResponse.old_primary: object expected"); - message.old_primary = $root.topodata.TabletAlias.fromObject(object.old_primary); + if (object.results_by_shard) { + if (typeof object.results_by_shard !== "object") + throw TypeError(".vtctldata.ValidateVSchemaResponse.results_by_shard: object expected"); + message.results_by_shard = {}; + for (let keys = Object.keys(object.results_by_shard), i = 0; i < keys.length; ++i) { + if (typeof object.results_by_shard[keys[i]] !== "object") + throw TypeError(".vtctldata.ValidateVSchemaResponse.results_by_shard: object expected"); + message.results_by_shard[keys[i]] = $root.vtctldata.ValidateShardResponse.fromObject(object.results_by_shard[keys[i]]); + } } return message; }; /** - * Creates a plain object from a TabletExternallyReparentedResponse message. Also converts values to other types if specified. + * Creates a plain object from a ValidateVSchemaResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.TabletExternallyReparentedResponse + * @memberof vtctldata.ValidateVSchemaResponse * @static - * @param {vtctldata.TabletExternallyReparentedResponse} message TabletExternallyReparentedResponse + * @param {vtctldata.ValidateVSchemaResponse} message ValidateVSchemaResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - TabletExternallyReparentedResponse.toObject = function toObject(message, options) { + ValidateVSchemaResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) { - object.keyspace = ""; - object.shard = ""; - object.new_primary = null; - object.old_primary = null; + if (options.arrays || options.defaults) + object.results = []; + if (options.objects || options.defaults) + object.results_by_shard = {}; + if (message.results && message.results.length) { + object.results = []; + for (let j = 0; j < message.results.length; ++j) + object.results[j] = message.results[j]; + } + let keys2; + if (message.results_by_shard && (keys2 = Object.keys(message.results_by_shard)).length) { + object.results_by_shard = {}; + for (let j = 0; j < keys2.length; ++j) + object.results_by_shard[keys2[j]] = $root.vtctldata.ValidateShardResponse.toObject(message.results_by_shard[keys2[j]], options); } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.shard != null && message.hasOwnProperty("shard")) - object.shard = message.shard; - if (message.new_primary != null && message.hasOwnProperty("new_primary")) - object.new_primary = $root.topodata.TabletAlias.toObject(message.new_primary, options); - if (message.old_primary != null && message.hasOwnProperty("old_primary")) - object.old_primary = $root.topodata.TabletAlias.toObject(message.old_primary, options); return object; }; /** - * Converts this TabletExternallyReparentedResponse to JSON. + * Converts this ValidateVSchemaResponse to JSON. * @function toJSON - * @memberof vtctldata.TabletExternallyReparentedResponse + * @memberof vtctldata.ValidateVSchemaResponse * @instance * @returns {Object.} JSON object */ - TabletExternallyReparentedResponse.prototype.toJSON = function toJSON() { + ValidateVSchemaResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for TabletExternallyReparentedResponse + * Gets the default type url for ValidateVSchemaResponse * @function getTypeUrl - * @memberof vtctldata.TabletExternallyReparentedResponse + * @memberof vtctldata.ValidateVSchemaResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - TabletExternallyReparentedResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ValidateVSchemaResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.TabletExternallyReparentedResponse"; + return typeUrlPrefix + "/vtctldata.ValidateVSchemaResponse"; }; - return TabletExternallyReparentedResponse; + return ValidateVSchemaResponse; })(); - vtctldata.UpdateCellInfoRequest = (function() { + vtctldata.VDiffCreateRequest = (function() { + + /** + * Properties of a VDiffCreateRequest. + * @memberof vtctldata + * @interface IVDiffCreateRequest + * @property {string|null} [workflow] VDiffCreateRequest workflow + * @property {string|null} [target_keyspace] VDiffCreateRequest target_keyspace + * @property {string|null} [uuid] VDiffCreateRequest uuid + * @property {Array.|null} [source_cells] VDiffCreateRequest source_cells + * @property {Array.|null} [target_cells] VDiffCreateRequest target_cells + * @property {Array.|null} [tablet_types] VDiffCreateRequest tablet_types + * @property {tabletmanagerdata.TabletSelectionPreference|null} [tablet_selection_preference] VDiffCreateRequest tablet_selection_preference + * @property {Array.|null} [tables] VDiffCreateRequest tables + * @property {number|Long|null} [limit] VDiffCreateRequest limit + * @property {vttime.IDuration|null} [filtered_replication_wait_time] VDiffCreateRequest filtered_replication_wait_time + * @property {boolean|null} [debug_query] VDiffCreateRequest debug_query + * @property {boolean|null} [only_p_ks] VDiffCreateRequest only_p_ks + * @property {boolean|null} [update_table_stats] VDiffCreateRequest update_table_stats + * @property {number|Long|null} [max_extra_rows_to_compare] VDiffCreateRequest max_extra_rows_to_compare + * @property {boolean|null} [wait] VDiffCreateRequest wait + * @property {vttime.IDuration|null} [wait_update_interval] VDiffCreateRequest wait_update_interval + * @property {boolean|null} [auto_retry] VDiffCreateRequest auto_retry + * @property {boolean|null} [verbose] VDiffCreateRequest verbose + */ + + /** + * Constructs a new VDiffCreateRequest. + * @memberof vtctldata + * @classdesc Represents a VDiffCreateRequest. + * @implements IVDiffCreateRequest + * @constructor + * @param {vtctldata.IVDiffCreateRequest=} [properties] Properties to set + */ + function VDiffCreateRequest(properties) { + this.source_cells = []; + this.target_cells = []; + this.tablet_types = []; + this.tables = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * VDiffCreateRequest workflow. + * @member {string} workflow + * @memberof vtctldata.VDiffCreateRequest + * @instance + */ + VDiffCreateRequest.prototype.workflow = ""; + + /** + * VDiffCreateRequest target_keyspace. + * @member {string} target_keyspace + * @memberof vtctldata.VDiffCreateRequest + * @instance + */ + VDiffCreateRequest.prototype.target_keyspace = ""; + + /** + * VDiffCreateRequest uuid. + * @member {string} uuid + * @memberof vtctldata.VDiffCreateRequest + * @instance + */ + VDiffCreateRequest.prototype.uuid = ""; + + /** + * VDiffCreateRequest source_cells. + * @member {Array.} source_cells + * @memberof vtctldata.VDiffCreateRequest + * @instance + */ + VDiffCreateRequest.prototype.source_cells = $util.emptyArray; + + /** + * VDiffCreateRequest target_cells. + * @member {Array.} target_cells + * @memberof vtctldata.VDiffCreateRequest + * @instance + */ + VDiffCreateRequest.prototype.target_cells = $util.emptyArray; + + /** + * VDiffCreateRequest tablet_types. + * @member {Array.} tablet_types + * @memberof vtctldata.VDiffCreateRequest + * @instance + */ + VDiffCreateRequest.prototype.tablet_types = $util.emptyArray; + + /** + * VDiffCreateRequest tablet_selection_preference. + * @member {tabletmanagerdata.TabletSelectionPreference} tablet_selection_preference + * @memberof vtctldata.VDiffCreateRequest + * @instance + */ + VDiffCreateRequest.prototype.tablet_selection_preference = 0; + + /** + * VDiffCreateRequest tables. + * @member {Array.} tables + * @memberof vtctldata.VDiffCreateRequest + * @instance + */ + VDiffCreateRequest.prototype.tables = $util.emptyArray; + + /** + * VDiffCreateRequest limit. + * @member {number|Long} limit + * @memberof vtctldata.VDiffCreateRequest + * @instance + */ + VDiffCreateRequest.prototype.limit = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * VDiffCreateRequest filtered_replication_wait_time. + * @member {vttime.IDuration|null|undefined} filtered_replication_wait_time + * @memberof vtctldata.VDiffCreateRequest + * @instance + */ + VDiffCreateRequest.prototype.filtered_replication_wait_time = null; + + /** + * VDiffCreateRequest debug_query. + * @member {boolean} debug_query + * @memberof vtctldata.VDiffCreateRequest + * @instance + */ + VDiffCreateRequest.prototype.debug_query = false; + + /** + * VDiffCreateRequest only_p_ks. + * @member {boolean} only_p_ks + * @memberof vtctldata.VDiffCreateRequest + * @instance + */ + VDiffCreateRequest.prototype.only_p_ks = false; + + /** + * VDiffCreateRequest update_table_stats. + * @member {boolean} update_table_stats + * @memberof vtctldata.VDiffCreateRequest + * @instance + */ + VDiffCreateRequest.prototype.update_table_stats = false; /** - * Properties of an UpdateCellInfoRequest. - * @memberof vtctldata - * @interface IUpdateCellInfoRequest - * @property {string|null} [name] UpdateCellInfoRequest name - * @property {topodata.ICellInfo|null} [cell_info] UpdateCellInfoRequest cell_info + * VDiffCreateRequest max_extra_rows_to_compare. + * @member {number|Long} max_extra_rows_to_compare + * @memberof vtctldata.VDiffCreateRequest + * @instance */ + VDiffCreateRequest.prototype.max_extra_rows_to_compare = $util.Long ? $util.Long.fromBits(0,0,false) : 0; /** - * Constructs a new UpdateCellInfoRequest. - * @memberof vtctldata - * @classdesc Represents an UpdateCellInfoRequest. - * @implements IUpdateCellInfoRequest - * @constructor - * @param {vtctldata.IUpdateCellInfoRequest=} [properties] Properties to set + * VDiffCreateRequest wait. + * @member {boolean} wait + * @memberof vtctldata.VDiffCreateRequest + * @instance */ - function UpdateCellInfoRequest(properties) { - if (properties) - for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; - } + VDiffCreateRequest.prototype.wait = false; /** - * UpdateCellInfoRequest name. - * @member {string} name - * @memberof vtctldata.UpdateCellInfoRequest + * VDiffCreateRequest wait_update_interval. + * @member {vttime.IDuration|null|undefined} wait_update_interval + * @memberof vtctldata.VDiffCreateRequest * @instance */ - UpdateCellInfoRequest.prototype.name = ""; + VDiffCreateRequest.prototype.wait_update_interval = null; /** - * UpdateCellInfoRequest cell_info. - * @member {topodata.ICellInfo|null|undefined} cell_info - * @memberof vtctldata.UpdateCellInfoRequest + * VDiffCreateRequest auto_retry. + * @member {boolean} auto_retry + * @memberof vtctldata.VDiffCreateRequest * @instance */ - UpdateCellInfoRequest.prototype.cell_info = null; + VDiffCreateRequest.prototype.auto_retry = false; /** - * Creates a new UpdateCellInfoRequest instance using the specified properties. + * VDiffCreateRequest verbose. + * @member {boolean} verbose + * @memberof vtctldata.VDiffCreateRequest + * @instance + */ + VDiffCreateRequest.prototype.verbose = false; + + /** + * Creates a new VDiffCreateRequest instance using the specified properties. * @function create - * @memberof vtctldata.UpdateCellInfoRequest + * @memberof vtctldata.VDiffCreateRequest * @static - * @param {vtctldata.IUpdateCellInfoRequest=} [properties] Properties to set - * @returns {vtctldata.UpdateCellInfoRequest} UpdateCellInfoRequest instance + * @param {vtctldata.IVDiffCreateRequest=} [properties] Properties to set + * @returns {vtctldata.VDiffCreateRequest} VDiffCreateRequest instance */ - UpdateCellInfoRequest.create = function create(properties) { - return new UpdateCellInfoRequest(properties); + VDiffCreateRequest.create = function create(properties) { + return new VDiffCreateRequest(properties); }; /** - * Encodes the specified UpdateCellInfoRequest message. Does not implicitly {@link vtctldata.UpdateCellInfoRequest.verify|verify} messages. + * Encodes the specified VDiffCreateRequest message. Does not implicitly {@link vtctldata.VDiffCreateRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.UpdateCellInfoRequest + * @memberof vtctldata.VDiffCreateRequest * @static - * @param {vtctldata.IUpdateCellInfoRequest} message UpdateCellInfoRequest message or plain object to encode + * @param {vtctldata.IVDiffCreateRequest} message VDiffCreateRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - UpdateCellInfoRequest.encode = function encode(message, writer) { + VDiffCreateRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.name != null && Object.hasOwnProperty.call(message, "name")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); - if (message.cell_info != null && Object.hasOwnProperty.call(message, "cell_info")) - $root.topodata.CellInfo.encode(message.cell_info, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.workflow != null && Object.hasOwnProperty.call(message, "workflow")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.workflow); + if (message.target_keyspace != null && Object.hasOwnProperty.call(message, "target_keyspace")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.target_keyspace); + if (message.uuid != null && Object.hasOwnProperty.call(message, "uuid")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.uuid); + if (message.source_cells != null && message.source_cells.length) + for (let i = 0; i < message.source_cells.length; ++i) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.source_cells[i]); + if (message.target_cells != null && message.target_cells.length) + for (let i = 0; i < message.target_cells.length; ++i) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.target_cells[i]); + if (message.tablet_types != null && message.tablet_types.length) { + writer.uint32(/* id 6, wireType 2 =*/50).fork(); + for (let i = 0; i < message.tablet_types.length; ++i) + writer.int32(message.tablet_types[i]); + writer.ldelim(); + } + if (message.tablet_selection_preference != null && Object.hasOwnProperty.call(message, "tablet_selection_preference")) + writer.uint32(/* id 7, wireType 0 =*/56).int32(message.tablet_selection_preference); + if (message.tables != null && message.tables.length) + for (let i = 0; i < message.tables.length; ++i) + writer.uint32(/* id 8, wireType 2 =*/66).string(message.tables[i]); + if (message.limit != null && Object.hasOwnProperty.call(message, "limit")) + writer.uint32(/* id 9, wireType 0 =*/72).int64(message.limit); + if (message.filtered_replication_wait_time != null && Object.hasOwnProperty.call(message, "filtered_replication_wait_time")) + $root.vttime.Duration.encode(message.filtered_replication_wait_time, writer.uint32(/* id 10, wireType 2 =*/82).fork()).ldelim(); + if (message.debug_query != null && Object.hasOwnProperty.call(message, "debug_query")) + writer.uint32(/* id 11, wireType 0 =*/88).bool(message.debug_query); + if (message.only_p_ks != null && Object.hasOwnProperty.call(message, "only_p_ks")) + writer.uint32(/* id 12, wireType 0 =*/96).bool(message.only_p_ks); + if (message.update_table_stats != null && Object.hasOwnProperty.call(message, "update_table_stats")) + writer.uint32(/* id 13, wireType 0 =*/104).bool(message.update_table_stats); + if (message.max_extra_rows_to_compare != null && Object.hasOwnProperty.call(message, "max_extra_rows_to_compare")) + writer.uint32(/* id 14, wireType 0 =*/112).int64(message.max_extra_rows_to_compare); + if (message.wait != null && Object.hasOwnProperty.call(message, "wait")) + writer.uint32(/* id 15, wireType 0 =*/120).bool(message.wait); + if (message.wait_update_interval != null && Object.hasOwnProperty.call(message, "wait_update_interval")) + $root.vttime.Duration.encode(message.wait_update_interval, writer.uint32(/* id 16, wireType 2 =*/130).fork()).ldelim(); + if (message.auto_retry != null && Object.hasOwnProperty.call(message, "auto_retry")) + writer.uint32(/* id 17, wireType 0 =*/136).bool(message.auto_retry); + if (message.verbose != null && Object.hasOwnProperty.call(message, "verbose")) + writer.uint32(/* id 18, wireType 0 =*/144).bool(message.verbose); return writer; }; /** - * Encodes the specified UpdateCellInfoRequest message, length delimited. Does not implicitly {@link vtctldata.UpdateCellInfoRequest.verify|verify} messages. + * Encodes the specified VDiffCreateRequest message, length delimited. Does not implicitly {@link vtctldata.VDiffCreateRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.UpdateCellInfoRequest + * @memberof vtctldata.VDiffCreateRequest * @static - * @param {vtctldata.IUpdateCellInfoRequest} message UpdateCellInfoRequest message or plain object to encode + * @param {vtctldata.IVDiffCreateRequest} message VDiffCreateRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - UpdateCellInfoRequest.encodeDelimited = function encodeDelimited(message, writer) { + VDiffCreateRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an UpdateCellInfoRequest message from the specified reader or buffer. + * Decodes a VDiffCreateRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.UpdateCellInfoRequest + * @memberof vtctldata.VDiffCreateRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.UpdateCellInfoRequest} UpdateCellInfoRequest + * @returns {vtctldata.VDiffCreateRequest} VDiffCreateRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - UpdateCellInfoRequest.decode = function decode(reader, length) { + VDiffCreateRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.UpdateCellInfoRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.VDiffCreateRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.name = reader.string(); + message.workflow = reader.string(); break; } case 2: { - message.cell_info = $root.topodata.CellInfo.decode(reader, reader.uint32()); + message.target_keyspace = reader.string(); + break; + } + case 3: { + message.uuid = reader.string(); + break; + } + case 4: { + if (!(message.source_cells && message.source_cells.length)) + message.source_cells = []; + message.source_cells.push(reader.string()); + break; + } + case 5: { + if (!(message.target_cells && message.target_cells.length)) + message.target_cells = []; + message.target_cells.push(reader.string()); + break; + } + case 6: { + if (!(message.tablet_types && message.tablet_types.length)) + message.tablet_types = []; + if ((tag & 7) === 2) { + let end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.tablet_types.push(reader.int32()); + } else + message.tablet_types.push(reader.int32()); + break; + } + case 7: { + message.tablet_selection_preference = reader.int32(); + break; + } + case 8: { + if (!(message.tables && message.tables.length)) + message.tables = []; + message.tables.push(reader.string()); + break; + } + case 9: { + message.limit = reader.int64(); + break; + } + case 10: { + message.filtered_replication_wait_time = $root.vttime.Duration.decode(reader, reader.uint32()); + break; + } + case 11: { + message.debug_query = reader.bool(); + break; + } + case 12: { + message.only_p_ks = reader.bool(); + break; + } + case 13: { + message.update_table_stats = reader.bool(); + break; + } + case 14: { + message.max_extra_rows_to_compare = reader.int64(); + break; + } + case 15: { + message.wait = reader.bool(); + break; + } + case 16: { + message.wait_update_interval = $root.vttime.Duration.decode(reader, reader.uint32()); + break; + } + case 17: { + message.auto_retry = reader.bool(); + break; + } + case 18: { + message.verbose = reader.bool(); break; } default: @@ -137599,137 +160726,435 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes an UpdateCellInfoRequest message from the specified reader or buffer, length delimited. + * Decodes a VDiffCreateRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.UpdateCellInfoRequest + * @memberof vtctldata.VDiffCreateRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.UpdateCellInfoRequest} UpdateCellInfoRequest + * @returns {vtctldata.VDiffCreateRequest} VDiffCreateRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - UpdateCellInfoRequest.decodeDelimited = function decodeDelimited(reader) { + VDiffCreateRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an UpdateCellInfoRequest message. + * Verifies a VDiffCreateRequest message. * @function verify - * @memberof vtctldata.UpdateCellInfoRequest + * @memberof vtctldata.VDiffCreateRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - UpdateCellInfoRequest.verify = function verify(message) { + VDiffCreateRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.name != null && message.hasOwnProperty("name")) - if (!$util.isString(message.name)) - return "name: string expected"; - if (message.cell_info != null && message.hasOwnProperty("cell_info")) { - let error = $root.topodata.CellInfo.verify(message.cell_info); + if (message.workflow != null && message.hasOwnProperty("workflow")) + if (!$util.isString(message.workflow)) + return "workflow: string expected"; + if (message.target_keyspace != null && message.hasOwnProperty("target_keyspace")) + if (!$util.isString(message.target_keyspace)) + return "target_keyspace: string expected"; + if (message.uuid != null && message.hasOwnProperty("uuid")) + if (!$util.isString(message.uuid)) + return "uuid: string expected"; + if (message.source_cells != null && message.hasOwnProperty("source_cells")) { + if (!Array.isArray(message.source_cells)) + return "source_cells: array expected"; + for (let i = 0; i < message.source_cells.length; ++i) + if (!$util.isString(message.source_cells[i])) + return "source_cells: string[] expected"; + } + if (message.target_cells != null && message.hasOwnProperty("target_cells")) { + if (!Array.isArray(message.target_cells)) + return "target_cells: array expected"; + for (let i = 0; i < message.target_cells.length; ++i) + if (!$util.isString(message.target_cells[i])) + return "target_cells: string[] expected"; + } + if (message.tablet_types != null && message.hasOwnProperty("tablet_types")) { + if (!Array.isArray(message.tablet_types)) + return "tablet_types: array expected"; + for (let i = 0; i < message.tablet_types.length; ++i) + switch (message.tablet_types[i]) { + default: + return "tablet_types: enum value[] expected"; + case 0: + case 1: + case 1: + case 2: + case 3: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + break; + } + } + if (message.tablet_selection_preference != null && message.hasOwnProperty("tablet_selection_preference")) + switch (message.tablet_selection_preference) { + default: + return "tablet_selection_preference: enum value expected"; + case 0: + case 1: + case 3: + break; + } + if (message.tables != null && message.hasOwnProperty("tables")) { + if (!Array.isArray(message.tables)) + return "tables: array expected"; + for (let i = 0; i < message.tables.length; ++i) + if (!$util.isString(message.tables[i])) + return "tables: string[] expected"; + } + if (message.limit != null && message.hasOwnProperty("limit")) + if (!$util.isInteger(message.limit) && !(message.limit && $util.isInteger(message.limit.low) && $util.isInteger(message.limit.high))) + return "limit: integer|Long expected"; + if (message.filtered_replication_wait_time != null && message.hasOwnProperty("filtered_replication_wait_time")) { + let error = $root.vttime.Duration.verify(message.filtered_replication_wait_time); if (error) - return "cell_info." + error; + return "filtered_replication_wait_time." + error; } + if (message.debug_query != null && message.hasOwnProperty("debug_query")) + if (typeof message.debug_query !== "boolean") + return "debug_query: boolean expected"; + if (message.only_p_ks != null && message.hasOwnProperty("only_p_ks")) + if (typeof message.only_p_ks !== "boolean") + return "only_p_ks: boolean expected"; + if (message.update_table_stats != null && message.hasOwnProperty("update_table_stats")) + if (typeof message.update_table_stats !== "boolean") + return "update_table_stats: boolean expected"; + if (message.max_extra_rows_to_compare != null && message.hasOwnProperty("max_extra_rows_to_compare")) + if (!$util.isInteger(message.max_extra_rows_to_compare) && !(message.max_extra_rows_to_compare && $util.isInteger(message.max_extra_rows_to_compare.low) && $util.isInteger(message.max_extra_rows_to_compare.high))) + return "max_extra_rows_to_compare: integer|Long expected"; + if (message.wait != null && message.hasOwnProperty("wait")) + if (typeof message.wait !== "boolean") + return "wait: boolean expected"; + if (message.wait_update_interval != null && message.hasOwnProperty("wait_update_interval")) { + let error = $root.vttime.Duration.verify(message.wait_update_interval); + if (error) + return "wait_update_interval." + error; + } + if (message.auto_retry != null && message.hasOwnProperty("auto_retry")) + if (typeof message.auto_retry !== "boolean") + return "auto_retry: boolean expected"; + if (message.verbose != null && message.hasOwnProperty("verbose")) + if (typeof message.verbose !== "boolean") + return "verbose: boolean expected"; return null; }; /** - * Creates an UpdateCellInfoRequest message from a plain object. Also converts values to their respective internal types. + * Creates a VDiffCreateRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.UpdateCellInfoRequest + * @memberof vtctldata.VDiffCreateRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.UpdateCellInfoRequest} UpdateCellInfoRequest + * @returns {vtctldata.VDiffCreateRequest} VDiffCreateRequest */ - UpdateCellInfoRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.UpdateCellInfoRequest) + VDiffCreateRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.VDiffCreateRequest) return object; - let message = new $root.vtctldata.UpdateCellInfoRequest(); - if (object.name != null) - message.name = String(object.name); - if (object.cell_info != null) { - if (typeof object.cell_info !== "object") - throw TypeError(".vtctldata.UpdateCellInfoRequest.cell_info: object expected"); - message.cell_info = $root.topodata.CellInfo.fromObject(object.cell_info); + let message = new $root.vtctldata.VDiffCreateRequest(); + if (object.workflow != null) + message.workflow = String(object.workflow); + if (object.target_keyspace != null) + message.target_keyspace = String(object.target_keyspace); + if (object.uuid != null) + message.uuid = String(object.uuid); + if (object.source_cells) { + if (!Array.isArray(object.source_cells)) + throw TypeError(".vtctldata.VDiffCreateRequest.source_cells: array expected"); + message.source_cells = []; + for (let i = 0; i < object.source_cells.length; ++i) + message.source_cells[i] = String(object.source_cells[i]); + } + if (object.target_cells) { + if (!Array.isArray(object.target_cells)) + throw TypeError(".vtctldata.VDiffCreateRequest.target_cells: array expected"); + message.target_cells = []; + for (let i = 0; i < object.target_cells.length; ++i) + message.target_cells[i] = String(object.target_cells[i]); + } + if (object.tablet_types) { + if (!Array.isArray(object.tablet_types)) + throw TypeError(".vtctldata.VDiffCreateRequest.tablet_types: array expected"); + message.tablet_types = []; + for (let i = 0; i < object.tablet_types.length; ++i) + switch (object.tablet_types[i]) { + default: + if (typeof object.tablet_types[i] === "number") { + message.tablet_types[i] = object.tablet_types[i]; + break; + } + case "UNKNOWN": + case 0: + message.tablet_types[i] = 0; + break; + case "PRIMARY": + case 1: + message.tablet_types[i] = 1; + break; + case "MASTER": + case 1: + message.tablet_types[i] = 1; + break; + case "REPLICA": + case 2: + message.tablet_types[i] = 2; + break; + case "RDONLY": + case 3: + message.tablet_types[i] = 3; + break; + case "BATCH": + case 3: + message.tablet_types[i] = 3; + break; + case "SPARE": + case 4: + message.tablet_types[i] = 4; + break; + case "EXPERIMENTAL": + case 5: + message.tablet_types[i] = 5; + break; + case "BACKUP": + case 6: + message.tablet_types[i] = 6; + break; + case "RESTORE": + case 7: + message.tablet_types[i] = 7; + break; + case "DRAINED": + case 8: + message.tablet_types[i] = 8; + break; + } + } + switch (object.tablet_selection_preference) { + default: + if (typeof object.tablet_selection_preference === "number") { + message.tablet_selection_preference = object.tablet_selection_preference; + break; + } + break; + case "ANY": + case 0: + message.tablet_selection_preference = 0; + break; + case "INORDER": + case 1: + message.tablet_selection_preference = 1; + break; + case "UNKNOWN": + case 3: + message.tablet_selection_preference = 3; + break; + } + if (object.tables) { + if (!Array.isArray(object.tables)) + throw TypeError(".vtctldata.VDiffCreateRequest.tables: array expected"); + message.tables = []; + for (let i = 0; i < object.tables.length; ++i) + message.tables[i] = String(object.tables[i]); + } + if (object.limit != null) + if ($util.Long) + (message.limit = $util.Long.fromValue(object.limit)).unsigned = false; + else if (typeof object.limit === "string") + message.limit = parseInt(object.limit, 10); + else if (typeof object.limit === "number") + message.limit = object.limit; + else if (typeof object.limit === "object") + message.limit = new $util.LongBits(object.limit.low >>> 0, object.limit.high >>> 0).toNumber(); + if (object.filtered_replication_wait_time != null) { + if (typeof object.filtered_replication_wait_time !== "object") + throw TypeError(".vtctldata.VDiffCreateRequest.filtered_replication_wait_time: object expected"); + message.filtered_replication_wait_time = $root.vttime.Duration.fromObject(object.filtered_replication_wait_time); + } + if (object.debug_query != null) + message.debug_query = Boolean(object.debug_query); + if (object.only_p_ks != null) + message.only_p_ks = Boolean(object.only_p_ks); + if (object.update_table_stats != null) + message.update_table_stats = Boolean(object.update_table_stats); + if (object.max_extra_rows_to_compare != null) + if ($util.Long) + (message.max_extra_rows_to_compare = $util.Long.fromValue(object.max_extra_rows_to_compare)).unsigned = false; + else if (typeof object.max_extra_rows_to_compare === "string") + message.max_extra_rows_to_compare = parseInt(object.max_extra_rows_to_compare, 10); + else if (typeof object.max_extra_rows_to_compare === "number") + message.max_extra_rows_to_compare = object.max_extra_rows_to_compare; + else if (typeof object.max_extra_rows_to_compare === "object") + message.max_extra_rows_to_compare = new $util.LongBits(object.max_extra_rows_to_compare.low >>> 0, object.max_extra_rows_to_compare.high >>> 0).toNumber(); + if (object.wait != null) + message.wait = Boolean(object.wait); + if (object.wait_update_interval != null) { + if (typeof object.wait_update_interval !== "object") + throw TypeError(".vtctldata.VDiffCreateRequest.wait_update_interval: object expected"); + message.wait_update_interval = $root.vttime.Duration.fromObject(object.wait_update_interval); } + if (object.auto_retry != null) + message.auto_retry = Boolean(object.auto_retry); + if (object.verbose != null) + message.verbose = Boolean(object.verbose); return message; }; /** - * Creates a plain object from an UpdateCellInfoRequest message. Also converts values to other types if specified. + * Creates a plain object from a VDiffCreateRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.UpdateCellInfoRequest + * @memberof vtctldata.VDiffCreateRequest * @static - * @param {vtctldata.UpdateCellInfoRequest} message UpdateCellInfoRequest + * @param {vtctldata.VDiffCreateRequest} message VDiffCreateRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - UpdateCellInfoRequest.toObject = function toObject(message, options) { + VDiffCreateRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; + if (options.arrays || options.defaults) { + object.source_cells = []; + object.target_cells = []; + object.tablet_types = []; + object.tables = []; + } if (options.defaults) { - object.name = ""; - object.cell_info = null; + object.workflow = ""; + object.target_keyspace = ""; + object.uuid = ""; + object.tablet_selection_preference = options.enums === String ? "ANY" : 0; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.limit = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.limit = options.longs === String ? "0" : 0; + object.filtered_replication_wait_time = null; + object.debug_query = false; + object.only_p_ks = false; + object.update_table_stats = false; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.max_extra_rows_to_compare = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.max_extra_rows_to_compare = options.longs === String ? "0" : 0; + object.wait = false; + object.wait_update_interval = null; + object.auto_retry = false; + object.verbose = false; } - if (message.name != null && message.hasOwnProperty("name")) - object.name = message.name; - if (message.cell_info != null && message.hasOwnProperty("cell_info")) - object.cell_info = $root.topodata.CellInfo.toObject(message.cell_info, options); + if (message.workflow != null && message.hasOwnProperty("workflow")) + object.workflow = message.workflow; + if (message.target_keyspace != null && message.hasOwnProperty("target_keyspace")) + object.target_keyspace = message.target_keyspace; + if (message.uuid != null && message.hasOwnProperty("uuid")) + object.uuid = message.uuid; + if (message.source_cells && message.source_cells.length) { + object.source_cells = []; + for (let j = 0; j < message.source_cells.length; ++j) + object.source_cells[j] = message.source_cells[j]; + } + if (message.target_cells && message.target_cells.length) { + object.target_cells = []; + for (let j = 0; j < message.target_cells.length; ++j) + object.target_cells[j] = message.target_cells[j]; + } + if (message.tablet_types && message.tablet_types.length) { + object.tablet_types = []; + for (let j = 0; j < message.tablet_types.length; ++j) + object.tablet_types[j] = options.enums === String ? $root.topodata.TabletType[message.tablet_types[j]] === undefined ? message.tablet_types[j] : $root.topodata.TabletType[message.tablet_types[j]] : message.tablet_types[j]; + } + if (message.tablet_selection_preference != null && message.hasOwnProperty("tablet_selection_preference")) + object.tablet_selection_preference = options.enums === String ? $root.tabletmanagerdata.TabletSelectionPreference[message.tablet_selection_preference] === undefined ? message.tablet_selection_preference : $root.tabletmanagerdata.TabletSelectionPreference[message.tablet_selection_preference] : message.tablet_selection_preference; + if (message.tables && message.tables.length) { + object.tables = []; + for (let j = 0; j < message.tables.length; ++j) + object.tables[j] = message.tables[j]; + } + if (message.limit != null && message.hasOwnProperty("limit")) + if (typeof message.limit === "number") + object.limit = options.longs === String ? String(message.limit) : message.limit; + else + object.limit = options.longs === String ? $util.Long.prototype.toString.call(message.limit) : options.longs === Number ? new $util.LongBits(message.limit.low >>> 0, message.limit.high >>> 0).toNumber() : message.limit; + if (message.filtered_replication_wait_time != null && message.hasOwnProperty("filtered_replication_wait_time")) + object.filtered_replication_wait_time = $root.vttime.Duration.toObject(message.filtered_replication_wait_time, options); + if (message.debug_query != null && message.hasOwnProperty("debug_query")) + object.debug_query = message.debug_query; + if (message.only_p_ks != null && message.hasOwnProperty("only_p_ks")) + object.only_p_ks = message.only_p_ks; + if (message.update_table_stats != null && message.hasOwnProperty("update_table_stats")) + object.update_table_stats = message.update_table_stats; + if (message.max_extra_rows_to_compare != null && message.hasOwnProperty("max_extra_rows_to_compare")) + if (typeof message.max_extra_rows_to_compare === "number") + object.max_extra_rows_to_compare = options.longs === String ? String(message.max_extra_rows_to_compare) : message.max_extra_rows_to_compare; + else + object.max_extra_rows_to_compare = options.longs === String ? $util.Long.prototype.toString.call(message.max_extra_rows_to_compare) : options.longs === Number ? new $util.LongBits(message.max_extra_rows_to_compare.low >>> 0, message.max_extra_rows_to_compare.high >>> 0).toNumber() : message.max_extra_rows_to_compare; + if (message.wait != null && message.hasOwnProperty("wait")) + object.wait = message.wait; + if (message.wait_update_interval != null && message.hasOwnProperty("wait_update_interval")) + object.wait_update_interval = $root.vttime.Duration.toObject(message.wait_update_interval, options); + if (message.auto_retry != null && message.hasOwnProperty("auto_retry")) + object.auto_retry = message.auto_retry; + if (message.verbose != null && message.hasOwnProperty("verbose")) + object.verbose = message.verbose; return object; }; /** - * Converts this UpdateCellInfoRequest to JSON. + * Converts this VDiffCreateRequest to JSON. * @function toJSON - * @memberof vtctldata.UpdateCellInfoRequest + * @memberof vtctldata.VDiffCreateRequest * @instance * @returns {Object.} JSON object */ - UpdateCellInfoRequest.prototype.toJSON = function toJSON() { + VDiffCreateRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for UpdateCellInfoRequest + * Gets the default type url for VDiffCreateRequest * @function getTypeUrl - * @memberof vtctldata.UpdateCellInfoRequest + * @memberof vtctldata.VDiffCreateRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - UpdateCellInfoRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + VDiffCreateRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.UpdateCellInfoRequest"; + return typeUrlPrefix + "/vtctldata.VDiffCreateRequest"; }; - return UpdateCellInfoRequest; + return VDiffCreateRequest; })(); - vtctldata.UpdateCellInfoResponse = (function() { + vtctldata.VDiffCreateResponse = (function() { /** - * Properties of an UpdateCellInfoResponse. + * Properties of a VDiffCreateResponse. * @memberof vtctldata - * @interface IUpdateCellInfoResponse - * @property {string|null} [name] UpdateCellInfoResponse name - * @property {topodata.ICellInfo|null} [cell_info] UpdateCellInfoResponse cell_info + * @interface IVDiffCreateResponse + * @property {string|null} [UUID] VDiffCreateResponse UUID */ /** - * Constructs a new UpdateCellInfoResponse. + * Constructs a new VDiffCreateResponse. * @memberof vtctldata - * @classdesc Represents an UpdateCellInfoResponse. - * @implements IUpdateCellInfoResponse + * @classdesc Represents a VDiffCreateResponse. + * @implements IVDiffCreateResponse * @constructor - * @param {vtctldata.IUpdateCellInfoResponse=} [properties] Properties to set + * @param {vtctldata.IVDiffCreateResponse=} [properties] Properties to set */ - function UpdateCellInfoResponse(properties) { + function VDiffCreateResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -137737,89 +161162,75 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * UpdateCellInfoResponse name. - * @member {string} name - * @memberof vtctldata.UpdateCellInfoResponse - * @instance - */ - UpdateCellInfoResponse.prototype.name = ""; - - /** - * UpdateCellInfoResponse cell_info. - * @member {topodata.ICellInfo|null|undefined} cell_info - * @memberof vtctldata.UpdateCellInfoResponse + * VDiffCreateResponse UUID. + * @member {string} UUID + * @memberof vtctldata.VDiffCreateResponse * @instance */ - UpdateCellInfoResponse.prototype.cell_info = null; + VDiffCreateResponse.prototype.UUID = ""; /** - * Creates a new UpdateCellInfoResponse instance using the specified properties. + * Creates a new VDiffCreateResponse instance using the specified properties. * @function create - * @memberof vtctldata.UpdateCellInfoResponse + * @memberof vtctldata.VDiffCreateResponse * @static - * @param {vtctldata.IUpdateCellInfoResponse=} [properties] Properties to set - * @returns {vtctldata.UpdateCellInfoResponse} UpdateCellInfoResponse instance + * @param {vtctldata.IVDiffCreateResponse=} [properties] Properties to set + * @returns {vtctldata.VDiffCreateResponse} VDiffCreateResponse instance */ - UpdateCellInfoResponse.create = function create(properties) { - return new UpdateCellInfoResponse(properties); + VDiffCreateResponse.create = function create(properties) { + return new VDiffCreateResponse(properties); }; /** - * Encodes the specified UpdateCellInfoResponse message. Does not implicitly {@link vtctldata.UpdateCellInfoResponse.verify|verify} messages. + * Encodes the specified VDiffCreateResponse message. Does not implicitly {@link vtctldata.VDiffCreateResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.UpdateCellInfoResponse + * @memberof vtctldata.VDiffCreateResponse * @static - * @param {vtctldata.IUpdateCellInfoResponse} message UpdateCellInfoResponse message or plain object to encode + * @param {vtctldata.IVDiffCreateResponse} message VDiffCreateResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - UpdateCellInfoResponse.encode = function encode(message, writer) { + VDiffCreateResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.name != null && Object.hasOwnProperty.call(message, "name")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); - if (message.cell_info != null && Object.hasOwnProperty.call(message, "cell_info")) - $root.topodata.CellInfo.encode(message.cell_info, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.UUID != null && Object.hasOwnProperty.call(message, "UUID")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.UUID); return writer; }; /** - * Encodes the specified UpdateCellInfoResponse message, length delimited. Does not implicitly {@link vtctldata.UpdateCellInfoResponse.verify|verify} messages. + * Encodes the specified VDiffCreateResponse message, length delimited. Does not implicitly {@link vtctldata.VDiffCreateResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.UpdateCellInfoResponse + * @memberof vtctldata.VDiffCreateResponse * @static - * @param {vtctldata.IUpdateCellInfoResponse} message UpdateCellInfoResponse message or plain object to encode + * @param {vtctldata.IVDiffCreateResponse} message VDiffCreateResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - UpdateCellInfoResponse.encodeDelimited = function encodeDelimited(message, writer) { + VDiffCreateResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an UpdateCellInfoResponse message from the specified reader or buffer. + * Decodes a VDiffCreateResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.UpdateCellInfoResponse + * @memberof vtctldata.VDiffCreateResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.UpdateCellInfoResponse} UpdateCellInfoResponse + * @returns {vtctldata.VDiffCreateResponse} VDiffCreateResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - UpdateCellInfoResponse.decode = function decode(reader, length) { + VDiffCreateResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.UpdateCellInfoResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.VDiffCreateResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.name = reader.string(); - break; - } - case 2: { - message.cell_info = $root.topodata.CellInfo.decode(reader, reader.uint32()); + message.UUID = reader.string(); break; } default: @@ -137831,137 +161242,124 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes an UpdateCellInfoResponse message from the specified reader or buffer, length delimited. + * Decodes a VDiffCreateResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.UpdateCellInfoResponse + * @memberof vtctldata.VDiffCreateResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.UpdateCellInfoResponse} UpdateCellInfoResponse + * @returns {vtctldata.VDiffCreateResponse} VDiffCreateResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - UpdateCellInfoResponse.decodeDelimited = function decodeDelimited(reader) { + VDiffCreateResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an UpdateCellInfoResponse message. + * Verifies a VDiffCreateResponse message. * @function verify - * @memberof vtctldata.UpdateCellInfoResponse + * @memberof vtctldata.VDiffCreateResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - UpdateCellInfoResponse.verify = function verify(message) { + VDiffCreateResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.name != null && message.hasOwnProperty("name")) - if (!$util.isString(message.name)) - return "name: string expected"; - if (message.cell_info != null && message.hasOwnProperty("cell_info")) { - let error = $root.topodata.CellInfo.verify(message.cell_info); - if (error) - return "cell_info." + error; - } + if (message.UUID != null && message.hasOwnProperty("UUID")) + if (!$util.isString(message.UUID)) + return "UUID: string expected"; return null; }; /** - * Creates an UpdateCellInfoResponse message from a plain object. Also converts values to their respective internal types. + * Creates a VDiffCreateResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.UpdateCellInfoResponse + * @memberof vtctldata.VDiffCreateResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.UpdateCellInfoResponse} UpdateCellInfoResponse + * @returns {vtctldata.VDiffCreateResponse} VDiffCreateResponse */ - UpdateCellInfoResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.UpdateCellInfoResponse) + VDiffCreateResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.VDiffCreateResponse) return object; - let message = new $root.vtctldata.UpdateCellInfoResponse(); - if (object.name != null) - message.name = String(object.name); - if (object.cell_info != null) { - if (typeof object.cell_info !== "object") - throw TypeError(".vtctldata.UpdateCellInfoResponse.cell_info: object expected"); - message.cell_info = $root.topodata.CellInfo.fromObject(object.cell_info); - } + let message = new $root.vtctldata.VDiffCreateResponse(); + if (object.UUID != null) + message.UUID = String(object.UUID); return message; }; /** - * Creates a plain object from an UpdateCellInfoResponse message. Also converts values to other types if specified. + * Creates a plain object from a VDiffCreateResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.UpdateCellInfoResponse + * @memberof vtctldata.VDiffCreateResponse * @static - * @param {vtctldata.UpdateCellInfoResponse} message UpdateCellInfoResponse + * @param {vtctldata.VDiffCreateResponse} message VDiffCreateResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - UpdateCellInfoResponse.toObject = function toObject(message, options) { + VDiffCreateResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) { - object.name = ""; - object.cell_info = null; - } - if (message.name != null && message.hasOwnProperty("name")) - object.name = message.name; - if (message.cell_info != null && message.hasOwnProperty("cell_info")) - object.cell_info = $root.topodata.CellInfo.toObject(message.cell_info, options); + if (options.defaults) + object.UUID = ""; + if (message.UUID != null && message.hasOwnProperty("UUID")) + object.UUID = message.UUID; return object; }; /** - * Converts this UpdateCellInfoResponse to JSON. + * Converts this VDiffCreateResponse to JSON. * @function toJSON - * @memberof vtctldata.UpdateCellInfoResponse + * @memberof vtctldata.VDiffCreateResponse * @instance * @returns {Object.} JSON object */ - UpdateCellInfoResponse.prototype.toJSON = function toJSON() { + VDiffCreateResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for UpdateCellInfoResponse + * Gets the default type url for VDiffCreateResponse * @function getTypeUrl - * @memberof vtctldata.UpdateCellInfoResponse + * @memberof vtctldata.VDiffCreateResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - UpdateCellInfoResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + VDiffCreateResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.UpdateCellInfoResponse"; + return typeUrlPrefix + "/vtctldata.VDiffCreateResponse"; }; - return UpdateCellInfoResponse; + return VDiffCreateResponse; })(); - vtctldata.UpdateCellsAliasRequest = (function() { + vtctldata.VDiffDeleteRequest = (function() { /** - * Properties of an UpdateCellsAliasRequest. + * Properties of a VDiffDeleteRequest. * @memberof vtctldata - * @interface IUpdateCellsAliasRequest - * @property {string|null} [name] UpdateCellsAliasRequest name - * @property {topodata.ICellsAlias|null} [cells_alias] UpdateCellsAliasRequest cells_alias + * @interface IVDiffDeleteRequest + * @property {string|null} [workflow] VDiffDeleteRequest workflow + * @property {string|null} [target_keyspace] VDiffDeleteRequest target_keyspace + * @property {string|null} [arg] VDiffDeleteRequest arg */ /** - * Constructs a new UpdateCellsAliasRequest. + * Constructs a new VDiffDeleteRequest. * @memberof vtctldata - * @classdesc Represents an UpdateCellsAliasRequest. - * @implements IUpdateCellsAliasRequest + * @classdesc Represents a VDiffDeleteRequest. + * @implements IVDiffDeleteRequest * @constructor - * @param {vtctldata.IUpdateCellsAliasRequest=} [properties] Properties to set + * @param {vtctldata.IVDiffDeleteRequest=} [properties] Properties to set */ - function UpdateCellsAliasRequest(properties) { + function VDiffDeleteRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -137969,89 +161367,103 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * UpdateCellsAliasRequest name. - * @member {string} name - * @memberof vtctldata.UpdateCellsAliasRequest + * VDiffDeleteRequest workflow. + * @member {string} workflow + * @memberof vtctldata.VDiffDeleteRequest * @instance */ - UpdateCellsAliasRequest.prototype.name = ""; + VDiffDeleteRequest.prototype.workflow = ""; /** - * UpdateCellsAliasRequest cells_alias. - * @member {topodata.ICellsAlias|null|undefined} cells_alias - * @memberof vtctldata.UpdateCellsAliasRequest + * VDiffDeleteRequest target_keyspace. + * @member {string} target_keyspace + * @memberof vtctldata.VDiffDeleteRequest * @instance */ - UpdateCellsAliasRequest.prototype.cells_alias = null; + VDiffDeleteRequest.prototype.target_keyspace = ""; /** - * Creates a new UpdateCellsAliasRequest instance using the specified properties. + * VDiffDeleteRequest arg. + * @member {string} arg + * @memberof vtctldata.VDiffDeleteRequest + * @instance + */ + VDiffDeleteRequest.prototype.arg = ""; + + /** + * Creates a new VDiffDeleteRequest instance using the specified properties. * @function create - * @memberof vtctldata.UpdateCellsAliasRequest + * @memberof vtctldata.VDiffDeleteRequest * @static - * @param {vtctldata.IUpdateCellsAliasRequest=} [properties] Properties to set - * @returns {vtctldata.UpdateCellsAliasRequest} UpdateCellsAliasRequest instance + * @param {vtctldata.IVDiffDeleteRequest=} [properties] Properties to set + * @returns {vtctldata.VDiffDeleteRequest} VDiffDeleteRequest instance */ - UpdateCellsAliasRequest.create = function create(properties) { - return new UpdateCellsAliasRequest(properties); + VDiffDeleteRequest.create = function create(properties) { + return new VDiffDeleteRequest(properties); }; /** - * Encodes the specified UpdateCellsAliasRequest message. Does not implicitly {@link vtctldata.UpdateCellsAliasRequest.verify|verify} messages. + * Encodes the specified VDiffDeleteRequest message. Does not implicitly {@link vtctldata.VDiffDeleteRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.UpdateCellsAliasRequest + * @memberof vtctldata.VDiffDeleteRequest * @static - * @param {vtctldata.IUpdateCellsAliasRequest} message UpdateCellsAliasRequest message or plain object to encode + * @param {vtctldata.IVDiffDeleteRequest} message VDiffDeleteRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - UpdateCellsAliasRequest.encode = function encode(message, writer) { + VDiffDeleteRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.name != null && Object.hasOwnProperty.call(message, "name")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); - if (message.cells_alias != null && Object.hasOwnProperty.call(message, "cells_alias")) - $root.topodata.CellsAlias.encode(message.cells_alias, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.workflow != null && Object.hasOwnProperty.call(message, "workflow")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.workflow); + if (message.target_keyspace != null && Object.hasOwnProperty.call(message, "target_keyspace")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.target_keyspace); + if (message.arg != null && Object.hasOwnProperty.call(message, "arg")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.arg); return writer; }; /** - * Encodes the specified UpdateCellsAliasRequest message, length delimited. Does not implicitly {@link vtctldata.UpdateCellsAliasRequest.verify|verify} messages. + * Encodes the specified VDiffDeleteRequest message, length delimited. Does not implicitly {@link vtctldata.VDiffDeleteRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.UpdateCellsAliasRequest + * @memberof vtctldata.VDiffDeleteRequest * @static - * @param {vtctldata.IUpdateCellsAliasRequest} message UpdateCellsAliasRequest message or plain object to encode + * @param {vtctldata.IVDiffDeleteRequest} message VDiffDeleteRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - UpdateCellsAliasRequest.encodeDelimited = function encodeDelimited(message, writer) { + VDiffDeleteRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an UpdateCellsAliasRequest message from the specified reader or buffer. + * Decodes a VDiffDeleteRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.UpdateCellsAliasRequest + * @memberof vtctldata.VDiffDeleteRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.UpdateCellsAliasRequest} UpdateCellsAliasRequest + * @returns {vtctldata.VDiffDeleteRequest} VDiffDeleteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - UpdateCellsAliasRequest.decode = function decode(reader, length) { + VDiffDeleteRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.UpdateCellsAliasRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.VDiffDeleteRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.name = reader.string(); + message.workflow = reader.string(); break; } case 2: { - message.cells_alias = $root.topodata.CellsAlias.decode(reader, reader.uint32()); + message.target_keyspace = reader.string(); + break; + } + case 3: { + message.arg = reader.string(); break; } default: @@ -138063,137 +161475,138 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes an UpdateCellsAliasRequest message from the specified reader or buffer, length delimited. + * Decodes a VDiffDeleteRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.UpdateCellsAliasRequest + * @memberof vtctldata.VDiffDeleteRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.UpdateCellsAliasRequest} UpdateCellsAliasRequest + * @returns {vtctldata.VDiffDeleteRequest} VDiffDeleteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - UpdateCellsAliasRequest.decodeDelimited = function decodeDelimited(reader) { + VDiffDeleteRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an UpdateCellsAliasRequest message. + * Verifies a VDiffDeleteRequest message. * @function verify - * @memberof vtctldata.UpdateCellsAliasRequest + * @memberof vtctldata.VDiffDeleteRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - UpdateCellsAliasRequest.verify = function verify(message) { + VDiffDeleteRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.name != null && message.hasOwnProperty("name")) - if (!$util.isString(message.name)) - return "name: string expected"; - if (message.cells_alias != null && message.hasOwnProperty("cells_alias")) { - let error = $root.topodata.CellsAlias.verify(message.cells_alias); - if (error) - return "cells_alias." + error; - } + if (message.workflow != null && message.hasOwnProperty("workflow")) + if (!$util.isString(message.workflow)) + return "workflow: string expected"; + if (message.target_keyspace != null && message.hasOwnProperty("target_keyspace")) + if (!$util.isString(message.target_keyspace)) + return "target_keyspace: string expected"; + if (message.arg != null && message.hasOwnProperty("arg")) + if (!$util.isString(message.arg)) + return "arg: string expected"; return null; }; /** - * Creates an UpdateCellsAliasRequest message from a plain object. Also converts values to their respective internal types. + * Creates a VDiffDeleteRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.UpdateCellsAliasRequest + * @memberof vtctldata.VDiffDeleteRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.UpdateCellsAliasRequest} UpdateCellsAliasRequest + * @returns {vtctldata.VDiffDeleteRequest} VDiffDeleteRequest */ - UpdateCellsAliasRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.UpdateCellsAliasRequest) + VDiffDeleteRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.VDiffDeleteRequest) return object; - let message = new $root.vtctldata.UpdateCellsAliasRequest(); - if (object.name != null) - message.name = String(object.name); - if (object.cells_alias != null) { - if (typeof object.cells_alias !== "object") - throw TypeError(".vtctldata.UpdateCellsAliasRequest.cells_alias: object expected"); - message.cells_alias = $root.topodata.CellsAlias.fromObject(object.cells_alias); - } + let message = new $root.vtctldata.VDiffDeleteRequest(); + if (object.workflow != null) + message.workflow = String(object.workflow); + if (object.target_keyspace != null) + message.target_keyspace = String(object.target_keyspace); + if (object.arg != null) + message.arg = String(object.arg); return message; }; /** - * Creates a plain object from an UpdateCellsAliasRequest message. Also converts values to other types if specified. + * Creates a plain object from a VDiffDeleteRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.UpdateCellsAliasRequest + * @memberof vtctldata.VDiffDeleteRequest * @static - * @param {vtctldata.UpdateCellsAliasRequest} message UpdateCellsAliasRequest + * @param {vtctldata.VDiffDeleteRequest} message VDiffDeleteRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - UpdateCellsAliasRequest.toObject = function toObject(message, options) { + VDiffDeleteRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) { - object.name = ""; - object.cells_alias = null; + object.workflow = ""; + object.target_keyspace = ""; + object.arg = ""; } - if (message.name != null && message.hasOwnProperty("name")) - object.name = message.name; - if (message.cells_alias != null && message.hasOwnProperty("cells_alias")) - object.cells_alias = $root.topodata.CellsAlias.toObject(message.cells_alias, options); + if (message.workflow != null && message.hasOwnProperty("workflow")) + object.workflow = message.workflow; + if (message.target_keyspace != null && message.hasOwnProperty("target_keyspace")) + object.target_keyspace = message.target_keyspace; + if (message.arg != null && message.hasOwnProperty("arg")) + object.arg = message.arg; return object; }; /** - * Converts this UpdateCellsAliasRequest to JSON. + * Converts this VDiffDeleteRequest to JSON. * @function toJSON - * @memberof vtctldata.UpdateCellsAliasRequest + * @memberof vtctldata.VDiffDeleteRequest * @instance * @returns {Object.} JSON object */ - UpdateCellsAliasRequest.prototype.toJSON = function toJSON() { + VDiffDeleteRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for UpdateCellsAliasRequest + * Gets the default type url for VDiffDeleteRequest * @function getTypeUrl - * @memberof vtctldata.UpdateCellsAliasRequest + * @memberof vtctldata.VDiffDeleteRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - UpdateCellsAliasRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + VDiffDeleteRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.UpdateCellsAliasRequest"; + return typeUrlPrefix + "/vtctldata.VDiffDeleteRequest"; }; - return UpdateCellsAliasRequest; + return VDiffDeleteRequest; })(); - vtctldata.UpdateCellsAliasResponse = (function() { + vtctldata.VDiffDeleteResponse = (function() { /** - * Properties of an UpdateCellsAliasResponse. + * Properties of a VDiffDeleteResponse. * @memberof vtctldata - * @interface IUpdateCellsAliasResponse - * @property {string|null} [name] UpdateCellsAliasResponse name - * @property {topodata.ICellsAlias|null} [cells_alias] UpdateCellsAliasResponse cells_alias + * @interface IVDiffDeleteResponse */ /** - * Constructs a new UpdateCellsAliasResponse. + * Constructs a new VDiffDeleteResponse. * @memberof vtctldata - * @classdesc Represents an UpdateCellsAliasResponse. - * @implements IUpdateCellsAliasResponse + * @classdesc Represents a VDiffDeleteResponse. + * @implements IVDiffDeleteResponse * @constructor - * @param {vtctldata.IUpdateCellsAliasResponse=} [properties] Properties to set + * @param {vtctldata.IVDiffDeleteResponse=} [properties] Properties to set */ - function UpdateCellsAliasResponse(properties) { + function VDiffDeleteResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -138201,91 +161614,63 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * UpdateCellsAliasResponse name. - * @member {string} name - * @memberof vtctldata.UpdateCellsAliasResponse - * @instance - */ - UpdateCellsAliasResponse.prototype.name = ""; - - /** - * UpdateCellsAliasResponse cells_alias. - * @member {topodata.ICellsAlias|null|undefined} cells_alias - * @memberof vtctldata.UpdateCellsAliasResponse - * @instance - */ - UpdateCellsAliasResponse.prototype.cells_alias = null; - - /** - * Creates a new UpdateCellsAliasResponse instance using the specified properties. + * Creates a new VDiffDeleteResponse instance using the specified properties. * @function create - * @memberof vtctldata.UpdateCellsAliasResponse + * @memberof vtctldata.VDiffDeleteResponse * @static - * @param {vtctldata.IUpdateCellsAliasResponse=} [properties] Properties to set - * @returns {vtctldata.UpdateCellsAliasResponse} UpdateCellsAliasResponse instance + * @param {vtctldata.IVDiffDeleteResponse=} [properties] Properties to set + * @returns {vtctldata.VDiffDeleteResponse} VDiffDeleteResponse instance */ - UpdateCellsAliasResponse.create = function create(properties) { - return new UpdateCellsAliasResponse(properties); + VDiffDeleteResponse.create = function create(properties) { + return new VDiffDeleteResponse(properties); }; /** - * Encodes the specified UpdateCellsAliasResponse message. Does not implicitly {@link vtctldata.UpdateCellsAliasResponse.verify|verify} messages. + * Encodes the specified VDiffDeleteResponse message. Does not implicitly {@link vtctldata.VDiffDeleteResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.UpdateCellsAliasResponse + * @memberof vtctldata.VDiffDeleteResponse * @static - * @param {vtctldata.IUpdateCellsAliasResponse} message UpdateCellsAliasResponse message or plain object to encode + * @param {vtctldata.IVDiffDeleteResponse} message VDiffDeleteResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - UpdateCellsAliasResponse.encode = function encode(message, writer) { + VDiffDeleteResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.name != null && Object.hasOwnProperty.call(message, "name")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); - if (message.cells_alias != null && Object.hasOwnProperty.call(message, "cells_alias")) - $root.topodata.CellsAlias.encode(message.cells_alias, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); return writer; }; /** - * Encodes the specified UpdateCellsAliasResponse message, length delimited. Does not implicitly {@link vtctldata.UpdateCellsAliasResponse.verify|verify} messages. + * Encodes the specified VDiffDeleteResponse message, length delimited. Does not implicitly {@link vtctldata.VDiffDeleteResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.UpdateCellsAliasResponse + * @memberof vtctldata.VDiffDeleteResponse * @static - * @param {vtctldata.IUpdateCellsAliasResponse} message UpdateCellsAliasResponse message or plain object to encode + * @param {vtctldata.IVDiffDeleteResponse} message VDiffDeleteResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - UpdateCellsAliasResponse.encodeDelimited = function encodeDelimited(message, writer) { + VDiffDeleteResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an UpdateCellsAliasResponse message from the specified reader or buffer. + * Decodes a VDiffDeleteResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.UpdateCellsAliasResponse + * @memberof vtctldata.VDiffDeleteResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.UpdateCellsAliasResponse} UpdateCellsAliasResponse + * @returns {vtctldata.VDiffDeleteResponse} VDiffDeleteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - UpdateCellsAliasResponse.decode = function decode(reader, length) { + VDiffDeleteResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.UpdateCellsAliasResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.VDiffDeleteResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - message.name = reader.string(); - break; - } - case 2: { - message.cells_alias = $root.topodata.CellsAlias.decode(reader, reader.uint32()); - break; - } default: reader.skipType(tag & 7); break; @@ -138295,136 +161680,111 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes an UpdateCellsAliasResponse message from the specified reader or buffer, length delimited. + * Decodes a VDiffDeleteResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.UpdateCellsAliasResponse + * @memberof vtctldata.VDiffDeleteResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.UpdateCellsAliasResponse} UpdateCellsAliasResponse + * @returns {vtctldata.VDiffDeleteResponse} VDiffDeleteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - UpdateCellsAliasResponse.decodeDelimited = function decodeDelimited(reader) { + VDiffDeleteResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an UpdateCellsAliasResponse message. + * Verifies a VDiffDeleteResponse message. * @function verify - * @memberof vtctldata.UpdateCellsAliasResponse + * @memberof vtctldata.VDiffDeleteResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - UpdateCellsAliasResponse.verify = function verify(message) { + VDiffDeleteResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.name != null && message.hasOwnProperty("name")) - if (!$util.isString(message.name)) - return "name: string expected"; - if (message.cells_alias != null && message.hasOwnProperty("cells_alias")) { - let error = $root.topodata.CellsAlias.verify(message.cells_alias); - if (error) - return "cells_alias." + error; - } return null; }; /** - * Creates an UpdateCellsAliasResponse message from a plain object. Also converts values to their respective internal types. + * Creates a VDiffDeleteResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.UpdateCellsAliasResponse + * @memberof vtctldata.VDiffDeleteResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.UpdateCellsAliasResponse} UpdateCellsAliasResponse + * @returns {vtctldata.VDiffDeleteResponse} VDiffDeleteResponse */ - UpdateCellsAliasResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.UpdateCellsAliasResponse) + VDiffDeleteResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.VDiffDeleteResponse) return object; - let message = new $root.vtctldata.UpdateCellsAliasResponse(); - if (object.name != null) - message.name = String(object.name); - if (object.cells_alias != null) { - if (typeof object.cells_alias !== "object") - throw TypeError(".vtctldata.UpdateCellsAliasResponse.cells_alias: object expected"); - message.cells_alias = $root.topodata.CellsAlias.fromObject(object.cells_alias); - } - return message; + return new $root.vtctldata.VDiffDeleteResponse(); }; /** - * Creates a plain object from an UpdateCellsAliasResponse message. Also converts values to other types if specified. + * Creates a plain object from a VDiffDeleteResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.UpdateCellsAliasResponse + * @memberof vtctldata.VDiffDeleteResponse * @static - * @param {vtctldata.UpdateCellsAliasResponse} message UpdateCellsAliasResponse + * @param {vtctldata.VDiffDeleteResponse} message VDiffDeleteResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - UpdateCellsAliasResponse.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) { - object.name = ""; - object.cells_alias = null; - } - if (message.name != null && message.hasOwnProperty("name")) - object.name = message.name; - if (message.cells_alias != null && message.hasOwnProperty("cells_alias")) - object.cells_alias = $root.topodata.CellsAlias.toObject(message.cells_alias, options); - return object; + VDiffDeleteResponse.toObject = function toObject() { + return {}; }; /** - * Converts this UpdateCellsAliasResponse to JSON. + * Converts this VDiffDeleteResponse to JSON. * @function toJSON - * @memberof vtctldata.UpdateCellsAliasResponse + * @memberof vtctldata.VDiffDeleteResponse * @instance * @returns {Object.} JSON object */ - UpdateCellsAliasResponse.prototype.toJSON = function toJSON() { + VDiffDeleteResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for UpdateCellsAliasResponse + * Gets the default type url for VDiffDeleteResponse * @function getTypeUrl - * @memberof vtctldata.UpdateCellsAliasResponse + * @memberof vtctldata.VDiffDeleteResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - UpdateCellsAliasResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + VDiffDeleteResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.UpdateCellsAliasResponse"; + return typeUrlPrefix + "/vtctldata.VDiffDeleteResponse"; }; - return UpdateCellsAliasResponse; + return VDiffDeleteResponse; })(); - vtctldata.ValidateRequest = (function() { + vtctldata.VDiffResumeRequest = (function() { /** - * Properties of a ValidateRequest. + * Properties of a VDiffResumeRequest. * @memberof vtctldata - * @interface IValidateRequest - * @property {boolean|null} [ping_tablets] ValidateRequest ping_tablets + * @interface IVDiffResumeRequest + * @property {string|null} [workflow] VDiffResumeRequest workflow + * @property {string|null} [target_keyspace] VDiffResumeRequest target_keyspace + * @property {string|null} [uuid] VDiffResumeRequest uuid */ /** - * Constructs a new ValidateRequest. + * Constructs a new VDiffResumeRequest. * @memberof vtctldata - * @classdesc Represents a ValidateRequest. - * @implements IValidateRequest + * @classdesc Represents a VDiffResumeRequest. + * @implements IVDiffResumeRequest * @constructor - * @param {vtctldata.IValidateRequest=} [properties] Properties to set + * @param {vtctldata.IVDiffResumeRequest=} [properties] Properties to set */ - function ValidateRequest(properties) { + function VDiffResumeRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -138432,75 +161792,103 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ValidateRequest ping_tablets. - * @member {boolean} ping_tablets - * @memberof vtctldata.ValidateRequest + * VDiffResumeRequest workflow. + * @member {string} workflow + * @memberof vtctldata.VDiffResumeRequest * @instance */ - ValidateRequest.prototype.ping_tablets = false; + VDiffResumeRequest.prototype.workflow = ""; /** - * Creates a new ValidateRequest instance using the specified properties. + * VDiffResumeRequest target_keyspace. + * @member {string} target_keyspace + * @memberof vtctldata.VDiffResumeRequest + * @instance + */ + VDiffResumeRequest.prototype.target_keyspace = ""; + + /** + * VDiffResumeRequest uuid. + * @member {string} uuid + * @memberof vtctldata.VDiffResumeRequest + * @instance + */ + VDiffResumeRequest.prototype.uuid = ""; + + /** + * Creates a new VDiffResumeRequest instance using the specified properties. * @function create - * @memberof vtctldata.ValidateRequest + * @memberof vtctldata.VDiffResumeRequest * @static - * @param {vtctldata.IValidateRequest=} [properties] Properties to set - * @returns {vtctldata.ValidateRequest} ValidateRequest instance + * @param {vtctldata.IVDiffResumeRequest=} [properties] Properties to set + * @returns {vtctldata.VDiffResumeRequest} VDiffResumeRequest instance */ - ValidateRequest.create = function create(properties) { - return new ValidateRequest(properties); + VDiffResumeRequest.create = function create(properties) { + return new VDiffResumeRequest(properties); }; /** - * Encodes the specified ValidateRequest message. Does not implicitly {@link vtctldata.ValidateRequest.verify|verify} messages. + * Encodes the specified VDiffResumeRequest message. Does not implicitly {@link vtctldata.VDiffResumeRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.ValidateRequest + * @memberof vtctldata.VDiffResumeRequest * @static - * @param {vtctldata.IValidateRequest} message ValidateRequest message or plain object to encode + * @param {vtctldata.IVDiffResumeRequest} message VDiffResumeRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ValidateRequest.encode = function encode(message, writer) { + VDiffResumeRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.ping_tablets != null && Object.hasOwnProperty.call(message, "ping_tablets")) - writer.uint32(/* id 1, wireType 0 =*/8).bool(message.ping_tablets); + if (message.workflow != null && Object.hasOwnProperty.call(message, "workflow")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.workflow); + if (message.target_keyspace != null && Object.hasOwnProperty.call(message, "target_keyspace")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.target_keyspace); + if (message.uuid != null && Object.hasOwnProperty.call(message, "uuid")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.uuid); return writer; }; /** - * Encodes the specified ValidateRequest message, length delimited. Does not implicitly {@link vtctldata.ValidateRequest.verify|verify} messages. + * Encodes the specified VDiffResumeRequest message, length delimited. Does not implicitly {@link vtctldata.VDiffResumeRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ValidateRequest + * @memberof vtctldata.VDiffResumeRequest * @static - * @param {vtctldata.IValidateRequest} message ValidateRequest message or plain object to encode + * @param {vtctldata.IVDiffResumeRequest} message VDiffResumeRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ValidateRequest.encodeDelimited = function encodeDelimited(message, writer) { + VDiffResumeRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ValidateRequest message from the specified reader or buffer. + * Decodes a VDiffResumeRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ValidateRequest + * @memberof vtctldata.VDiffResumeRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ValidateRequest} ValidateRequest + * @returns {vtctldata.VDiffResumeRequest} VDiffResumeRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ValidateRequest.decode = function decode(reader, length) { + VDiffResumeRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ValidateRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.VDiffResumeRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.ping_tablets = reader.bool(); + message.workflow = reader.string(); + break; + } + case 2: { + message.target_keyspace = reader.string(); + break; + } + case 3: { + message.uuid = reader.string(); break; } default: @@ -138512,125 +161900,138 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a ValidateRequest message from the specified reader or buffer, length delimited. + * Decodes a VDiffResumeRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ValidateRequest + * @memberof vtctldata.VDiffResumeRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ValidateRequest} ValidateRequest + * @returns {vtctldata.VDiffResumeRequest} VDiffResumeRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ValidateRequest.decodeDelimited = function decodeDelimited(reader) { + VDiffResumeRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ValidateRequest message. + * Verifies a VDiffResumeRequest message. * @function verify - * @memberof vtctldata.ValidateRequest + * @memberof vtctldata.VDiffResumeRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ValidateRequest.verify = function verify(message) { + VDiffResumeRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.ping_tablets != null && message.hasOwnProperty("ping_tablets")) - if (typeof message.ping_tablets !== "boolean") - return "ping_tablets: boolean expected"; + if (message.workflow != null && message.hasOwnProperty("workflow")) + if (!$util.isString(message.workflow)) + return "workflow: string expected"; + if (message.target_keyspace != null && message.hasOwnProperty("target_keyspace")) + if (!$util.isString(message.target_keyspace)) + return "target_keyspace: string expected"; + if (message.uuid != null && message.hasOwnProperty("uuid")) + if (!$util.isString(message.uuid)) + return "uuid: string expected"; return null; }; /** - * Creates a ValidateRequest message from a plain object. Also converts values to their respective internal types. + * Creates a VDiffResumeRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ValidateRequest + * @memberof vtctldata.VDiffResumeRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.ValidateRequest} ValidateRequest + * @returns {vtctldata.VDiffResumeRequest} VDiffResumeRequest */ - ValidateRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ValidateRequest) + VDiffResumeRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.VDiffResumeRequest) return object; - let message = new $root.vtctldata.ValidateRequest(); - if (object.ping_tablets != null) - message.ping_tablets = Boolean(object.ping_tablets); + let message = new $root.vtctldata.VDiffResumeRequest(); + if (object.workflow != null) + message.workflow = String(object.workflow); + if (object.target_keyspace != null) + message.target_keyspace = String(object.target_keyspace); + if (object.uuid != null) + message.uuid = String(object.uuid); return message; }; /** - * Creates a plain object from a ValidateRequest message. Also converts values to other types if specified. + * Creates a plain object from a VDiffResumeRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ValidateRequest + * @memberof vtctldata.VDiffResumeRequest * @static - * @param {vtctldata.ValidateRequest} message ValidateRequest + * @param {vtctldata.VDiffResumeRequest} message VDiffResumeRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ValidateRequest.toObject = function toObject(message, options) { + VDiffResumeRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) - object.ping_tablets = false; - if (message.ping_tablets != null && message.hasOwnProperty("ping_tablets")) - object.ping_tablets = message.ping_tablets; + if (options.defaults) { + object.workflow = ""; + object.target_keyspace = ""; + object.uuid = ""; + } + if (message.workflow != null && message.hasOwnProperty("workflow")) + object.workflow = message.workflow; + if (message.target_keyspace != null && message.hasOwnProperty("target_keyspace")) + object.target_keyspace = message.target_keyspace; + if (message.uuid != null && message.hasOwnProperty("uuid")) + object.uuid = message.uuid; return object; }; /** - * Converts this ValidateRequest to JSON. + * Converts this VDiffResumeRequest to JSON. * @function toJSON - * @memberof vtctldata.ValidateRequest + * @memberof vtctldata.VDiffResumeRequest * @instance * @returns {Object.} JSON object */ - ValidateRequest.prototype.toJSON = function toJSON() { + VDiffResumeRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ValidateRequest + * Gets the default type url for VDiffResumeRequest * @function getTypeUrl - * @memberof vtctldata.ValidateRequest + * @memberof vtctldata.VDiffResumeRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ValidateRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + VDiffResumeRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ValidateRequest"; + return typeUrlPrefix + "/vtctldata.VDiffResumeRequest"; }; - return ValidateRequest; + return VDiffResumeRequest; })(); - vtctldata.ValidateResponse = (function() { + vtctldata.VDiffResumeResponse = (function() { /** - * Properties of a ValidateResponse. + * Properties of a VDiffResumeResponse. * @memberof vtctldata - * @interface IValidateResponse - * @property {Array.|null} [results] ValidateResponse results - * @property {Object.|null} [results_by_keyspace] ValidateResponse results_by_keyspace + * @interface IVDiffResumeResponse */ /** - * Constructs a new ValidateResponse. + * Constructs a new VDiffResumeResponse. * @memberof vtctldata - * @classdesc Represents a ValidateResponse. - * @implements IValidateResponse + * @classdesc Represents a VDiffResumeResponse. + * @implements IVDiffResumeResponse * @constructor - * @param {vtctldata.IValidateResponse=} [properties] Properties to set + * @param {vtctldata.IVDiffResumeResponse=} [properties] Properties to set */ - function ValidateResponse(properties) { - this.results = []; - this.results_by_keyspace = {}; + function VDiffResumeResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -138638,116 +162039,63 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ValidateResponse results. - * @member {Array.} results - * @memberof vtctldata.ValidateResponse - * @instance - */ - ValidateResponse.prototype.results = $util.emptyArray; - - /** - * ValidateResponse results_by_keyspace. - * @member {Object.} results_by_keyspace - * @memberof vtctldata.ValidateResponse - * @instance - */ - ValidateResponse.prototype.results_by_keyspace = $util.emptyObject; - - /** - * Creates a new ValidateResponse instance using the specified properties. + * Creates a new VDiffResumeResponse instance using the specified properties. * @function create - * @memberof vtctldata.ValidateResponse + * @memberof vtctldata.VDiffResumeResponse * @static - * @param {vtctldata.IValidateResponse=} [properties] Properties to set - * @returns {vtctldata.ValidateResponse} ValidateResponse instance + * @param {vtctldata.IVDiffResumeResponse=} [properties] Properties to set + * @returns {vtctldata.VDiffResumeResponse} VDiffResumeResponse instance */ - ValidateResponse.create = function create(properties) { - return new ValidateResponse(properties); + VDiffResumeResponse.create = function create(properties) { + return new VDiffResumeResponse(properties); }; /** - * Encodes the specified ValidateResponse message. Does not implicitly {@link vtctldata.ValidateResponse.verify|verify} messages. + * Encodes the specified VDiffResumeResponse message. Does not implicitly {@link vtctldata.VDiffResumeResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.ValidateResponse + * @memberof vtctldata.VDiffResumeResponse * @static - * @param {vtctldata.IValidateResponse} message ValidateResponse message or plain object to encode + * @param {vtctldata.IVDiffResumeResponse} message VDiffResumeResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ValidateResponse.encode = function encode(message, writer) { + VDiffResumeResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.results != null && message.results.length) - for (let i = 0; i < message.results.length; ++i) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.results[i]); - if (message.results_by_keyspace != null && Object.hasOwnProperty.call(message, "results_by_keyspace")) - for (let keys = Object.keys(message.results_by_keyspace), i = 0; i < keys.length; ++i) { - writer.uint32(/* id 2, wireType 2 =*/18).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); - $root.vtctldata.ValidateKeyspaceResponse.encode(message.results_by_keyspace[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); - } return writer; }; /** - * Encodes the specified ValidateResponse message, length delimited. Does not implicitly {@link vtctldata.ValidateResponse.verify|verify} messages. + * Encodes the specified VDiffResumeResponse message, length delimited. Does not implicitly {@link vtctldata.VDiffResumeResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ValidateResponse + * @memberof vtctldata.VDiffResumeResponse * @static - * @param {vtctldata.IValidateResponse} message ValidateResponse message or plain object to encode + * @param {vtctldata.IVDiffResumeResponse} message VDiffResumeResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ValidateResponse.encodeDelimited = function encodeDelimited(message, writer) { + VDiffResumeResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ValidateResponse message from the specified reader or buffer. + * Decodes a VDiffResumeResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ValidateResponse + * @memberof vtctldata.VDiffResumeResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ValidateResponse} ValidateResponse + * @returns {vtctldata.VDiffResumeResponse} VDiffResumeResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ValidateResponse.decode = function decode(reader, length) { + VDiffResumeResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ValidateResponse(), key, value; + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.VDiffResumeResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - if (!(message.results && message.results.length)) - message.results = []; - message.results.push(reader.string()); - break; - } - case 2: { - if (message.results_by_keyspace === $util.emptyObject) - message.results_by_keyspace = {}; - let end2 = reader.uint32() + reader.pos; - key = ""; - value = null; - while (reader.pos < end2) { - let tag2 = reader.uint32(); - switch (tag2 >>> 3) { - case 1: - key = reader.string(); - break; - case 2: - value = $root.vtctldata.ValidateKeyspaceResponse.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag2 & 7); - break; - } - } - message.results_by_keyspace[key] = value; - break; - } default: reader.skipType(tag & 7); break; @@ -138757,163 +162105,111 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a ValidateResponse message from the specified reader or buffer, length delimited. + * Decodes a VDiffResumeResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ValidateResponse + * @memberof vtctldata.VDiffResumeResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ValidateResponse} ValidateResponse + * @returns {vtctldata.VDiffResumeResponse} VDiffResumeResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ValidateResponse.decodeDelimited = function decodeDelimited(reader) { + VDiffResumeResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ValidateResponse message. + * Verifies a VDiffResumeResponse message. * @function verify - * @memberof vtctldata.ValidateResponse + * @memberof vtctldata.VDiffResumeResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ValidateResponse.verify = function verify(message) { + VDiffResumeResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.results != null && message.hasOwnProperty("results")) { - if (!Array.isArray(message.results)) - return "results: array expected"; - for (let i = 0; i < message.results.length; ++i) - if (!$util.isString(message.results[i])) - return "results: string[] expected"; - } - if (message.results_by_keyspace != null && message.hasOwnProperty("results_by_keyspace")) { - if (!$util.isObject(message.results_by_keyspace)) - return "results_by_keyspace: object expected"; - let key = Object.keys(message.results_by_keyspace); - for (let i = 0; i < key.length; ++i) { - let error = $root.vtctldata.ValidateKeyspaceResponse.verify(message.results_by_keyspace[key[i]]); - if (error) - return "results_by_keyspace." + error; - } - } return null; }; /** - * Creates a ValidateResponse message from a plain object. Also converts values to their respective internal types. + * Creates a VDiffResumeResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ValidateResponse + * @memberof vtctldata.VDiffResumeResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.ValidateResponse} ValidateResponse + * @returns {vtctldata.VDiffResumeResponse} VDiffResumeResponse */ - ValidateResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ValidateResponse) + VDiffResumeResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.VDiffResumeResponse) return object; - let message = new $root.vtctldata.ValidateResponse(); - if (object.results) { - if (!Array.isArray(object.results)) - throw TypeError(".vtctldata.ValidateResponse.results: array expected"); - message.results = []; - for (let i = 0; i < object.results.length; ++i) - message.results[i] = String(object.results[i]); - } - if (object.results_by_keyspace) { - if (typeof object.results_by_keyspace !== "object") - throw TypeError(".vtctldata.ValidateResponse.results_by_keyspace: object expected"); - message.results_by_keyspace = {}; - for (let keys = Object.keys(object.results_by_keyspace), i = 0; i < keys.length; ++i) { - if (typeof object.results_by_keyspace[keys[i]] !== "object") - throw TypeError(".vtctldata.ValidateResponse.results_by_keyspace: object expected"); - message.results_by_keyspace[keys[i]] = $root.vtctldata.ValidateKeyspaceResponse.fromObject(object.results_by_keyspace[keys[i]]); - } - } - return message; + return new $root.vtctldata.VDiffResumeResponse(); }; /** - * Creates a plain object from a ValidateResponse message. Also converts values to other types if specified. + * Creates a plain object from a VDiffResumeResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ValidateResponse + * @memberof vtctldata.VDiffResumeResponse * @static - * @param {vtctldata.ValidateResponse} message ValidateResponse + * @param {vtctldata.VDiffResumeResponse} message VDiffResumeResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ValidateResponse.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.arrays || options.defaults) - object.results = []; - if (options.objects || options.defaults) - object.results_by_keyspace = {}; - if (message.results && message.results.length) { - object.results = []; - for (let j = 0; j < message.results.length; ++j) - object.results[j] = message.results[j]; - } - let keys2; - if (message.results_by_keyspace && (keys2 = Object.keys(message.results_by_keyspace)).length) { - object.results_by_keyspace = {}; - for (let j = 0; j < keys2.length; ++j) - object.results_by_keyspace[keys2[j]] = $root.vtctldata.ValidateKeyspaceResponse.toObject(message.results_by_keyspace[keys2[j]], options); - } - return object; + VDiffResumeResponse.toObject = function toObject() { + return {}; }; /** - * Converts this ValidateResponse to JSON. + * Converts this VDiffResumeResponse to JSON. * @function toJSON - * @memberof vtctldata.ValidateResponse + * @memberof vtctldata.VDiffResumeResponse * @instance * @returns {Object.} JSON object */ - ValidateResponse.prototype.toJSON = function toJSON() { + VDiffResumeResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ValidateResponse + * Gets the default type url for VDiffResumeResponse * @function getTypeUrl - * @memberof vtctldata.ValidateResponse + * @memberof vtctldata.VDiffResumeResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ValidateResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + VDiffResumeResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ValidateResponse"; + return typeUrlPrefix + "/vtctldata.VDiffResumeResponse"; }; - return ValidateResponse; + return VDiffResumeResponse; })(); - vtctldata.ValidateKeyspaceRequest = (function() { + vtctldata.VDiffShowRequest = (function() { /** - * Properties of a ValidateKeyspaceRequest. + * Properties of a VDiffShowRequest. * @memberof vtctldata - * @interface IValidateKeyspaceRequest - * @property {string|null} [keyspace] ValidateKeyspaceRequest keyspace - * @property {boolean|null} [ping_tablets] ValidateKeyspaceRequest ping_tablets + * @interface IVDiffShowRequest + * @property {string|null} [workflow] VDiffShowRequest workflow + * @property {string|null} [target_keyspace] VDiffShowRequest target_keyspace + * @property {string|null} [arg] VDiffShowRequest arg */ /** - * Constructs a new ValidateKeyspaceRequest. + * Constructs a new VDiffShowRequest. * @memberof vtctldata - * @classdesc Represents a ValidateKeyspaceRequest. - * @implements IValidateKeyspaceRequest + * @classdesc Represents a VDiffShowRequest. + * @implements IVDiffShowRequest * @constructor - * @param {vtctldata.IValidateKeyspaceRequest=} [properties] Properties to set + * @param {vtctldata.IVDiffShowRequest=} [properties] Properties to set */ - function ValidateKeyspaceRequest(properties) { + function VDiffShowRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -138921,89 +162217,103 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ValidateKeyspaceRequest keyspace. - * @member {string} keyspace - * @memberof vtctldata.ValidateKeyspaceRequest + * VDiffShowRequest workflow. + * @member {string} workflow + * @memberof vtctldata.VDiffShowRequest * @instance */ - ValidateKeyspaceRequest.prototype.keyspace = ""; + VDiffShowRequest.prototype.workflow = ""; /** - * ValidateKeyspaceRequest ping_tablets. - * @member {boolean} ping_tablets - * @memberof vtctldata.ValidateKeyspaceRequest + * VDiffShowRequest target_keyspace. + * @member {string} target_keyspace + * @memberof vtctldata.VDiffShowRequest * @instance */ - ValidateKeyspaceRequest.prototype.ping_tablets = false; + VDiffShowRequest.prototype.target_keyspace = ""; /** - * Creates a new ValidateKeyspaceRequest instance using the specified properties. + * VDiffShowRequest arg. + * @member {string} arg + * @memberof vtctldata.VDiffShowRequest + * @instance + */ + VDiffShowRequest.prototype.arg = ""; + + /** + * Creates a new VDiffShowRequest instance using the specified properties. * @function create - * @memberof vtctldata.ValidateKeyspaceRequest + * @memberof vtctldata.VDiffShowRequest * @static - * @param {vtctldata.IValidateKeyspaceRequest=} [properties] Properties to set - * @returns {vtctldata.ValidateKeyspaceRequest} ValidateKeyspaceRequest instance + * @param {vtctldata.IVDiffShowRequest=} [properties] Properties to set + * @returns {vtctldata.VDiffShowRequest} VDiffShowRequest instance */ - ValidateKeyspaceRequest.create = function create(properties) { - return new ValidateKeyspaceRequest(properties); + VDiffShowRequest.create = function create(properties) { + return new VDiffShowRequest(properties); }; /** - * Encodes the specified ValidateKeyspaceRequest message. Does not implicitly {@link vtctldata.ValidateKeyspaceRequest.verify|verify} messages. + * Encodes the specified VDiffShowRequest message. Does not implicitly {@link vtctldata.VDiffShowRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.ValidateKeyspaceRequest + * @memberof vtctldata.VDiffShowRequest * @static - * @param {vtctldata.IValidateKeyspaceRequest} message ValidateKeyspaceRequest message or plain object to encode + * @param {vtctldata.IVDiffShowRequest} message VDiffShowRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ValidateKeyspaceRequest.encode = function encode(message, writer) { + VDiffShowRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.ping_tablets != null && Object.hasOwnProperty.call(message, "ping_tablets")) - writer.uint32(/* id 2, wireType 0 =*/16).bool(message.ping_tablets); + if (message.workflow != null && Object.hasOwnProperty.call(message, "workflow")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.workflow); + if (message.target_keyspace != null && Object.hasOwnProperty.call(message, "target_keyspace")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.target_keyspace); + if (message.arg != null && Object.hasOwnProperty.call(message, "arg")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.arg); return writer; }; /** - * Encodes the specified ValidateKeyspaceRequest message, length delimited. Does not implicitly {@link vtctldata.ValidateKeyspaceRequest.verify|verify} messages. + * Encodes the specified VDiffShowRequest message, length delimited. Does not implicitly {@link vtctldata.VDiffShowRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ValidateKeyspaceRequest + * @memberof vtctldata.VDiffShowRequest * @static - * @param {vtctldata.IValidateKeyspaceRequest} message ValidateKeyspaceRequest message or plain object to encode + * @param {vtctldata.IVDiffShowRequest} message VDiffShowRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ValidateKeyspaceRequest.encodeDelimited = function encodeDelimited(message, writer) { + VDiffShowRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ValidateKeyspaceRequest message from the specified reader or buffer. + * Decodes a VDiffShowRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ValidateKeyspaceRequest + * @memberof vtctldata.VDiffShowRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ValidateKeyspaceRequest} ValidateKeyspaceRequest + * @returns {vtctldata.VDiffShowRequest} VDiffShowRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ValidateKeyspaceRequest.decode = function decode(reader, length) { + VDiffShowRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ValidateKeyspaceRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.VDiffShowRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.keyspace = reader.string(); + message.workflow = reader.string(); break; } case 2: { - message.ping_tablets = reader.bool(); + message.target_keyspace = reader.string(); + break; + } + case 3: { + message.arg = reader.string(); break; } default: @@ -139015,134 +162325,140 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a ValidateKeyspaceRequest message from the specified reader or buffer, length delimited. + * Decodes a VDiffShowRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ValidateKeyspaceRequest + * @memberof vtctldata.VDiffShowRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ValidateKeyspaceRequest} ValidateKeyspaceRequest + * @returns {vtctldata.VDiffShowRequest} VDiffShowRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ValidateKeyspaceRequest.decodeDelimited = function decodeDelimited(reader) { + VDiffShowRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ValidateKeyspaceRequest message. + * Verifies a VDiffShowRequest message. * @function verify - * @memberof vtctldata.ValidateKeyspaceRequest + * @memberof vtctldata.VDiffShowRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ValidateKeyspaceRequest.verify = function verify(message) { + VDiffShowRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) - return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - if (message.ping_tablets != null && message.hasOwnProperty("ping_tablets")) - if (typeof message.ping_tablets !== "boolean") - return "ping_tablets: boolean expected"; + return "object expected"; + if (message.workflow != null && message.hasOwnProperty("workflow")) + if (!$util.isString(message.workflow)) + return "workflow: string expected"; + if (message.target_keyspace != null && message.hasOwnProperty("target_keyspace")) + if (!$util.isString(message.target_keyspace)) + return "target_keyspace: string expected"; + if (message.arg != null && message.hasOwnProperty("arg")) + if (!$util.isString(message.arg)) + return "arg: string expected"; return null; }; /** - * Creates a ValidateKeyspaceRequest message from a plain object. Also converts values to their respective internal types. + * Creates a VDiffShowRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ValidateKeyspaceRequest + * @memberof vtctldata.VDiffShowRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.ValidateKeyspaceRequest} ValidateKeyspaceRequest + * @returns {vtctldata.VDiffShowRequest} VDiffShowRequest */ - ValidateKeyspaceRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ValidateKeyspaceRequest) + VDiffShowRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.VDiffShowRequest) return object; - let message = new $root.vtctldata.ValidateKeyspaceRequest(); - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - if (object.ping_tablets != null) - message.ping_tablets = Boolean(object.ping_tablets); + let message = new $root.vtctldata.VDiffShowRequest(); + if (object.workflow != null) + message.workflow = String(object.workflow); + if (object.target_keyspace != null) + message.target_keyspace = String(object.target_keyspace); + if (object.arg != null) + message.arg = String(object.arg); return message; }; /** - * Creates a plain object from a ValidateKeyspaceRequest message. Also converts values to other types if specified. + * Creates a plain object from a VDiffShowRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ValidateKeyspaceRequest + * @memberof vtctldata.VDiffShowRequest * @static - * @param {vtctldata.ValidateKeyspaceRequest} message ValidateKeyspaceRequest + * @param {vtctldata.VDiffShowRequest} message VDiffShowRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ValidateKeyspaceRequest.toObject = function toObject(message, options) { + VDiffShowRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) { - object.keyspace = ""; - object.ping_tablets = false; + object.workflow = ""; + object.target_keyspace = ""; + object.arg = ""; } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.ping_tablets != null && message.hasOwnProperty("ping_tablets")) - object.ping_tablets = message.ping_tablets; + if (message.workflow != null && message.hasOwnProperty("workflow")) + object.workflow = message.workflow; + if (message.target_keyspace != null && message.hasOwnProperty("target_keyspace")) + object.target_keyspace = message.target_keyspace; + if (message.arg != null && message.hasOwnProperty("arg")) + object.arg = message.arg; return object; }; /** - * Converts this ValidateKeyspaceRequest to JSON. + * Converts this VDiffShowRequest to JSON. * @function toJSON - * @memberof vtctldata.ValidateKeyspaceRequest + * @memberof vtctldata.VDiffShowRequest * @instance * @returns {Object.} JSON object */ - ValidateKeyspaceRequest.prototype.toJSON = function toJSON() { + VDiffShowRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ValidateKeyspaceRequest + * Gets the default type url for VDiffShowRequest * @function getTypeUrl - * @memberof vtctldata.ValidateKeyspaceRequest + * @memberof vtctldata.VDiffShowRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ValidateKeyspaceRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + VDiffShowRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ValidateKeyspaceRequest"; + return typeUrlPrefix + "/vtctldata.VDiffShowRequest"; }; - return ValidateKeyspaceRequest; + return VDiffShowRequest; })(); - vtctldata.ValidateKeyspaceResponse = (function() { + vtctldata.VDiffShowResponse = (function() { /** - * Properties of a ValidateKeyspaceResponse. + * Properties of a VDiffShowResponse. * @memberof vtctldata - * @interface IValidateKeyspaceResponse - * @property {Array.|null} [results] ValidateKeyspaceResponse results - * @property {Object.|null} [results_by_shard] ValidateKeyspaceResponse results_by_shard + * @interface IVDiffShowResponse + * @property {Object.|null} [tablet_responses] VDiffShowResponse tablet_responses */ /** - * Constructs a new ValidateKeyspaceResponse. + * Constructs a new VDiffShowResponse. * @memberof vtctldata - * @classdesc Represents a ValidateKeyspaceResponse. - * @implements IValidateKeyspaceResponse + * @classdesc Represents a VDiffShowResponse. + * @implements IVDiffShowResponse * @constructor - * @param {vtctldata.IValidateKeyspaceResponse=} [properties] Properties to set + * @param {vtctldata.IVDiffShowResponse=} [properties] Properties to set */ - function ValidateKeyspaceResponse(properties) { - this.results = []; - this.results_by_shard = {}; + function VDiffShowResponse(properties) { + this.tablet_responses = {}; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -139150,96 +162466,79 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ValidateKeyspaceResponse results. - * @member {Array.} results - * @memberof vtctldata.ValidateKeyspaceResponse - * @instance - */ - ValidateKeyspaceResponse.prototype.results = $util.emptyArray; - - /** - * ValidateKeyspaceResponse results_by_shard. - * @member {Object.} results_by_shard - * @memberof vtctldata.ValidateKeyspaceResponse + * VDiffShowResponse tablet_responses. + * @member {Object.} tablet_responses + * @memberof vtctldata.VDiffShowResponse * @instance */ - ValidateKeyspaceResponse.prototype.results_by_shard = $util.emptyObject; + VDiffShowResponse.prototype.tablet_responses = $util.emptyObject; /** - * Creates a new ValidateKeyspaceResponse instance using the specified properties. + * Creates a new VDiffShowResponse instance using the specified properties. * @function create - * @memberof vtctldata.ValidateKeyspaceResponse + * @memberof vtctldata.VDiffShowResponse * @static - * @param {vtctldata.IValidateKeyspaceResponse=} [properties] Properties to set - * @returns {vtctldata.ValidateKeyspaceResponse} ValidateKeyspaceResponse instance + * @param {vtctldata.IVDiffShowResponse=} [properties] Properties to set + * @returns {vtctldata.VDiffShowResponse} VDiffShowResponse instance */ - ValidateKeyspaceResponse.create = function create(properties) { - return new ValidateKeyspaceResponse(properties); + VDiffShowResponse.create = function create(properties) { + return new VDiffShowResponse(properties); }; /** - * Encodes the specified ValidateKeyspaceResponse message. Does not implicitly {@link vtctldata.ValidateKeyspaceResponse.verify|verify} messages. + * Encodes the specified VDiffShowResponse message. Does not implicitly {@link vtctldata.VDiffShowResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.ValidateKeyspaceResponse + * @memberof vtctldata.VDiffShowResponse * @static - * @param {vtctldata.IValidateKeyspaceResponse} message ValidateKeyspaceResponse message or plain object to encode + * @param {vtctldata.IVDiffShowResponse} message VDiffShowResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ValidateKeyspaceResponse.encode = function encode(message, writer) { + VDiffShowResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.results != null && message.results.length) - for (let i = 0; i < message.results.length; ++i) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.results[i]); - if (message.results_by_shard != null && Object.hasOwnProperty.call(message, "results_by_shard")) - for (let keys = Object.keys(message.results_by_shard), i = 0; i < keys.length; ++i) { - writer.uint32(/* id 2, wireType 2 =*/18).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); - $root.vtctldata.ValidateShardResponse.encode(message.results_by_shard[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); + if (message.tablet_responses != null && Object.hasOwnProperty.call(message, "tablet_responses")) + for (let keys = Object.keys(message.tablet_responses), i = 0; i < keys.length; ++i) { + writer.uint32(/* id 1, wireType 2 =*/10).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); + $root.tabletmanagerdata.VDiffResponse.encode(message.tablet_responses[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); } return writer; }; /** - * Encodes the specified ValidateKeyspaceResponse message, length delimited. Does not implicitly {@link vtctldata.ValidateKeyspaceResponse.verify|verify} messages. + * Encodes the specified VDiffShowResponse message, length delimited. Does not implicitly {@link vtctldata.VDiffShowResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ValidateKeyspaceResponse + * @memberof vtctldata.VDiffShowResponse * @static - * @param {vtctldata.IValidateKeyspaceResponse} message ValidateKeyspaceResponse message or plain object to encode + * @param {vtctldata.IVDiffShowResponse} message VDiffShowResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ValidateKeyspaceResponse.encodeDelimited = function encodeDelimited(message, writer) { + VDiffShowResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ValidateKeyspaceResponse message from the specified reader or buffer. + * Decodes a VDiffShowResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ValidateKeyspaceResponse + * @memberof vtctldata.VDiffShowResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ValidateKeyspaceResponse} ValidateKeyspaceResponse + * @returns {vtctldata.VDiffShowResponse} VDiffShowResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ValidateKeyspaceResponse.decode = function decode(reader, length) { + VDiffShowResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ValidateKeyspaceResponse(), key, value; + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.VDiffShowResponse(), key, value; while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (!(message.results && message.results.length)) - message.results = []; - message.results.push(reader.string()); - break; - } - case 2: { - if (message.results_by_shard === $util.emptyObject) - message.results_by_shard = {}; + if (message.tablet_responses === $util.emptyObject) + message.tablet_responses = {}; let end2 = reader.uint32() + reader.pos; key = ""; value = null; @@ -139250,14 +162549,14 @@ export const vtctldata = $root.vtctldata = (() => { key = reader.string(); break; case 2: - value = $root.vtctldata.ValidateShardResponse.decode(reader, reader.uint32()); + value = $root.tabletmanagerdata.VDiffResponse.decode(reader, reader.uint32()); break; default: reader.skipType(tag2 & 7); break; } } - message.results_by_shard[key] = value; + message.tablet_responses[key] = value; break; } default: @@ -139269,167 +162568,143 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a ValidateKeyspaceResponse message from the specified reader or buffer, length delimited. + * Decodes a VDiffShowResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ValidateKeyspaceResponse + * @memberof vtctldata.VDiffShowResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ValidateKeyspaceResponse} ValidateKeyspaceResponse + * @returns {vtctldata.VDiffShowResponse} VDiffShowResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ValidateKeyspaceResponse.decodeDelimited = function decodeDelimited(reader) { + VDiffShowResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ValidateKeyspaceResponse message. + * Verifies a VDiffShowResponse message. * @function verify - * @memberof vtctldata.ValidateKeyspaceResponse + * @memberof vtctldata.VDiffShowResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ValidateKeyspaceResponse.verify = function verify(message) { + VDiffShowResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.results != null && message.hasOwnProperty("results")) { - if (!Array.isArray(message.results)) - return "results: array expected"; - for (let i = 0; i < message.results.length; ++i) - if (!$util.isString(message.results[i])) - return "results: string[] expected"; - } - if (message.results_by_shard != null && message.hasOwnProperty("results_by_shard")) { - if (!$util.isObject(message.results_by_shard)) - return "results_by_shard: object expected"; - let key = Object.keys(message.results_by_shard); + if (message.tablet_responses != null && message.hasOwnProperty("tablet_responses")) { + if (!$util.isObject(message.tablet_responses)) + return "tablet_responses: object expected"; + let key = Object.keys(message.tablet_responses); for (let i = 0; i < key.length; ++i) { - let error = $root.vtctldata.ValidateShardResponse.verify(message.results_by_shard[key[i]]); + let error = $root.tabletmanagerdata.VDiffResponse.verify(message.tablet_responses[key[i]]); if (error) - return "results_by_shard." + error; + return "tablet_responses." + error; } } return null; }; /** - * Creates a ValidateKeyspaceResponse message from a plain object. Also converts values to their respective internal types. + * Creates a VDiffShowResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ValidateKeyspaceResponse + * @memberof vtctldata.VDiffShowResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.ValidateKeyspaceResponse} ValidateKeyspaceResponse + * @returns {vtctldata.VDiffShowResponse} VDiffShowResponse */ - ValidateKeyspaceResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ValidateKeyspaceResponse) + VDiffShowResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.VDiffShowResponse) return object; - let message = new $root.vtctldata.ValidateKeyspaceResponse(); - if (object.results) { - if (!Array.isArray(object.results)) - throw TypeError(".vtctldata.ValidateKeyspaceResponse.results: array expected"); - message.results = []; - for (let i = 0; i < object.results.length; ++i) - message.results[i] = String(object.results[i]); - } - if (object.results_by_shard) { - if (typeof object.results_by_shard !== "object") - throw TypeError(".vtctldata.ValidateKeyspaceResponse.results_by_shard: object expected"); - message.results_by_shard = {}; - for (let keys = Object.keys(object.results_by_shard), i = 0; i < keys.length; ++i) { - if (typeof object.results_by_shard[keys[i]] !== "object") - throw TypeError(".vtctldata.ValidateKeyspaceResponse.results_by_shard: object expected"); - message.results_by_shard[keys[i]] = $root.vtctldata.ValidateShardResponse.fromObject(object.results_by_shard[keys[i]]); + let message = new $root.vtctldata.VDiffShowResponse(); + if (object.tablet_responses) { + if (typeof object.tablet_responses !== "object") + throw TypeError(".vtctldata.VDiffShowResponse.tablet_responses: object expected"); + message.tablet_responses = {}; + for (let keys = Object.keys(object.tablet_responses), i = 0; i < keys.length; ++i) { + if (typeof object.tablet_responses[keys[i]] !== "object") + throw TypeError(".vtctldata.VDiffShowResponse.tablet_responses: object expected"); + message.tablet_responses[keys[i]] = $root.tabletmanagerdata.VDiffResponse.fromObject(object.tablet_responses[keys[i]]); } } return message; }; /** - * Creates a plain object from a ValidateKeyspaceResponse message. Also converts values to other types if specified. + * Creates a plain object from a VDiffShowResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ValidateKeyspaceResponse + * @memberof vtctldata.VDiffShowResponse * @static - * @param {vtctldata.ValidateKeyspaceResponse} message ValidateKeyspaceResponse + * @param {vtctldata.VDiffShowResponse} message VDiffShowResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ValidateKeyspaceResponse.toObject = function toObject(message, options) { + VDiffShowResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.results = []; if (options.objects || options.defaults) - object.results_by_shard = {}; - if (message.results && message.results.length) { - object.results = []; - for (let j = 0; j < message.results.length; ++j) - object.results[j] = message.results[j]; - } + object.tablet_responses = {}; let keys2; - if (message.results_by_shard && (keys2 = Object.keys(message.results_by_shard)).length) { - object.results_by_shard = {}; + if (message.tablet_responses && (keys2 = Object.keys(message.tablet_responses)).length) { + object.tablet_responses = {}; for (let j = 0; j < keys2.length; ++j) - object.results_by_shard[keys2[j]] = $root.vtctldata.ValidateShardResponse.toObject(message.results_by_shard[keys2[j]], options); + object.tablet_responses[keys2[j]] = $root.tabletmanagerdata.VDiffResponse.toObject(message.tablet_responses[keys2[j]], options); } return object; }; /** - * Converts this ValidateKeyspaceResponse to JSON. + * Converts this VDiffShowResponse to JSON. * @function toJSON - * @memberof vtctldata.ValidateKeyspaceResponse + * @memberof vtctldata.VDiffShowResponse * @instance * @returns {Object.} JSON object */ - ValidateKeyspaceResponse.prototype.toJSON = function toJSON() { + VDiffShowResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ValidateKeyspaceResponse + * Gets the default type url for VDiffShowResponse * @function getTypeUrl - * @memberof vtctldata.ValidateKeyspaceResponse + * @memberof vtctldata.VDiffShowResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ValidateKeyspaceResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + VDiffShowResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ValidateKeyspaceResponse"; + return typeUrlPrefix + "/vtctldata.VDiffShowResponse"; }; - return ValidateKeyspaceResponse; + return VDiffShowResponse; })(); - vtctldata.ValidateSchemaKeyspaceRequest = (function() { + vtctldata.VDiffStopRequest = (function() { /** - * Properties of a ValidateSchemaKeyspaceRequest. + * Properties of a VDiffStopRequest. * @memberof vtctldata - * @interface IValidateSchemaKeyspaceRequest - * @property {string|null} [keyspace] ValidateSchemaKeyspaceRequest keyspace - * @property {Array.|null} [exclude_tables] ValidateSchemaKeyspaceRequest exclude_tables - * @property {boolean|null} [include_views] ValidateSchemaKeyspaceRequest include_views - * @property {boolean|null} [skip_no_primary] ValidateSchemaKeyspaceRequest skip_no_primary - * @property {boolean|null} [include_vschema] ValidateSchemaKeyspaceRequest include_vschema + * @interface IVDiffStopRequest + * @property {string|null} [workflow] VDiffStopRequest workflow + * @property {string|null} [target_keyspace] VDiffStopRequest target_keyspace + * @property {string|null} [uuid] VDiffStopRequest uuid */ /** - * Constructs a new ValidateSchemaKeyspaceRequest. + * Constructs a new VDiffStopRequest. * @memberof vtctldata - * @classdesc Represents a ValidateSchemaKeyspaceRequest. - * @implements IValidateSchemaKeyspaceRequest + * @classdesc Represents a VDiffStopRequest. + * @implements IVDiffStopRequest * @constructor - * @param {vtctldata.IValidateSchemaKeyspaceRequest=} [properties] Properties to set + * @param {vtctldata.IVDiffStopRequest=} [properties] Properties to set */ - function ValidateSchemaKeyspaceRequest(properties) { - this.exclude_tables = []; + function VDiffStopRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -139437,134 +162712,103 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ValidateSchemaKeyspaceRequest keyspace. - * @member {string} keyspace - * @memberof vtctldata.ValidateSchemaKeyspaceRequest - * @instance - */ - ValidateSchemaKeyspaceRequest.prototype.keyspace = ""; - - /** - * ValidateSchemaKeyspaceRequest exclude_tables. - * @member {Array.} exclude_tables - * @memberof vtctldata.ValidateSchemaKeyspaceRequest - * @instance - */ - ValidateSchemaKeyspaceRequest.prototype.exclude_tables = $util.emptyArray; - - /** - * ValidateSchemaKeyspaceRequest include_views. - * @member {boolean} include_views - * @memberof vtctldata.ValidateSchemaKeyspaceRequest + * VDiffStopRequest workflow. + * @member {string} workflow + * @memberof vtctldata.VDiffStopRequest * @instance */ - ValidateSchemaKeyspaceRequest.prototype.include_views = false; + VDiffStopRequest.prototype.workflow = ""; /** - * ValidateSchemaKeyspaceRequest skip_no_primary. - * @member {boolean} skip_no_primary - * @memberof vtctldata.ValidateSchemaKeyspaceRequest + * VDiffStopRequest target_keyspace. + * @member {string} target_keyspace + * @memberof vtctldata.VDiffStopRequest * @instance */ - ValidateSchemaKeyspaceRequest.prototype.skip_no_primary = false; + VDiffStopRequest.prototype.target_keyspace = ""; /** - * ValidateSchemaKeyspaceRequest include_vschema. - * @member {boolean} include_vschema - * @memberof vtctldata.ValidateSchemaKeyspaceRequest + * VDiffStopRequest uuid. + * @member {string} uuid + * @memberof vtctldata.VDiffStopRequest * @instance */ - ValidateSchemaKeyspaceRequest.prototype.include_vschema = false; + VDiffStopRequest.prototype.uuid = ""; /** - * Creates a new ValidateSchemaKeyspaceRequest instance using the specified properties. + * Creates a new VDiffStopRequest instance using the specified properties. * @function create - * @memberof vtctldata.ValidateSchemaKeyspaceRequest + * @memberof vtctldata.VDiffStopRequest * @static - * @param {vtctldata.IValidateSchemaKeyspaceRequest=} [properties] Properties to set - * @returns {vtctldata.ValidateSchemaKeyspaceRequest} ValidateSchemaKeyspaceRequest instance + * @param {vtctldata.IVDiffStopRequest=} [properties] Properties to set + * @returns {vtctldata.VDiffStopRequest} VDiffStopRequest instance */ - ValidateSchemaKeyspaceRequest.create = function create(properties) { - return new ValidateSchemaKeyspaceRequest(properties); + VDiffStopRequest.create = function create(properties) { + return new VDiffStopRequest(properties); }; /** - * Encodes the specified ValidateSchemaKeyspaceRequest message. Does not implicitly {@link vtctldata.ValidateSchemaKeyspaceRequest.verify|verify} messages. + * Encodes the specified VDiffStopRequest message. Does not implicitly {@link vtctldata.VDiffStopRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.ValidateSchemaKeyspaceRequest + * @memberof vtctldata.VDiffStopRequest * @static - * @param {vtctldata.IValidateSchemaKeyspaceRequest} message ValidateSchemaKeyspaceRequest message or plain object to encode + * @param {vtctldata.IVDiffStopRequest} message VDiffStopRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ValidateSchemaKeyspaceRequest.encode = function encode(message, writer) { + VDiffStopRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.exclude_tables != null && message.exclude_tables.length) - for (let i = 0; i < message.exclude_tables.length; ++i) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.exclude_tables[i]); - if (message.include_views != null && Object.hasOwnProperty.call(message, "include_views")) - writer.uint32(/* id 3, wireType 0 =*/24).bool(message.include_views); - if (message.skip_no_primary != null && Object.hasOwnProperty.call(message, "skip_no_primary")) - writer.uint32(/* id 4, wireType 0 =*/32).bool(message.skip_no_primary); - if (message.include_vschema != null && Object.hasOwnProperty.call(message, "include_vschema")) - writer.uint32(/* id 5, wireType 0 =*/40).bool(message.include_vschema); + if (message.workflow != null && Object.hasOwnProperty.call(message, "workflow")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.workflow); + if (message.target_keyspace != null && Object.hasOwnProperty.call(message, "target_keyspace")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.target_keyspace); + if (message.uuid != null && Object.hasOwnProperty.call(message, "uuid")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.uuid); return writer; }; /** - * Encodes the specified ValidateSchemaKeyspaceRequest message, length delimited. Does not implicitly {@link vtctldata.ValidateSchemaKeyspaceRequest.verify|verify} messages. + * Encodes the specified VDiffStopRequest message, length delimited. Does not implicitly {@link vtctldata.VDiffStopRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ValidateSchemaKeyspaceRequest + * @memberof vtctldata.VDiffStopRequest * @static - * @param {vtctldata.IValidateSchemaKeyspaceRequest} message ValidateSchemaKeyspaceRequest message or plain object to encode + * @param {vtctldata.IVDiffStopRequest} message VDiffStopRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ValidateSchemaKeyspaceRequest.encodeDelimited = function encodeDelimited(message, writer) { + VDiffStopRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ValidateSchemaKeyspaceRequest message from the specified reader or buffer. + * Decodes a VDiffStopRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ValidateSchemaKeyspaceRequest + * @memberof vtctldata.VDiffStopRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ValidateSchemaKeyspaceRequest} ValidateSchemaKeyspaceRequest + * @returns {vtctldata.VDiffStopRequest} VDiffStopRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ValidateSchemaKeyspaceRequest.decode = function decode(reader, length) { + VDiffStopRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ValidateSchemaKeyspaceRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.VDiffStopRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.keyspace = reader.string(); + message.workflow = reader.string(); break; } case 2: { - if (!(message.exclude_tables && message.exclude_tables.length)) - message.exclude_tables = []; - message.exclude_tables.push(reader.string()); + message.target_keyspace = reader.string(); break; } case 3: { - message.include_views = reader.bool(); - break; - } - case 4: { - message.skip_no_primary = reader.bool(); - break; - } - case 5: { - message.include_vschema = reader.bool(); + message.uuid = reader.string(); break; } default: @@ -139576,171 +162820,138 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a ValidateSchemaKeyspaceRequest message from the specified reader or buffer, length delimited. + * Decodes a VDiffStopRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ValidateSchemaKeyspaceRequest + * @memberof vtctldata.VDiffStopRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ValidateSchemaKeyspaceRequest} ValidateSchemaKeyspaceRequest + * @returns {vtctldata.VDiffStopRequest} VDiffStopRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ValidateSchemaKeyspaceRequest.decodeDelimited = function decodeDelimited(reader) { + VDiffStopRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ValidateSchemaKeyspaceRequest message. + * Verifies a VDiffStopRequest message. * @function verify - * @memberof vtctldata.ValidateSchemaKeyspaceRequest + * @memberof vtctldata.VDiffStopRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ValidateSchemaKeyspaceRequest.verify = function verify(message) { + VDiffStopRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - if (message.exclude_tables != null && message.hasOwnProperty("exclude_tables")) { - if (!Array.isArray(message.exclude_tables)) - return "exclude_tables: array expected"; - for (let i = 0; i < message.exclude_tables.length; ++i) - if (!$util.isString(message.exclude_tables[i])) - return "exclude_tables: string[] expected"; - } - if (message.include_views != null && message.hasOwnProperty("include_views")) - if (typeof message.include_views !== "boolean") - return "include_views: boolean expected"; - if (message.skip_no_primary != null && message.hasOwnProperty("skip_no_primary")) - if (typeof message.skip_no_primary !== "boolean") - return "skip_no_primary: boolean expected"; - if (message.include_vschema != null && message.hasOwnProperty("include_vschema")) - if (typeof message.include_vschema !== "boolean") - return "include_vschema: boolean expected"; + if (message.workflow != null && message.hasOwnProperty("workflow")) + if (!$util.isString(message.workflow)) + return "workflow: string expected"; + if (message.target_keyspace != null && message.hasOwnProperty("target_keyspace")) + if (!$util.isString(message.target_keyspace)) + return "target_keyspace: string expected"; + if (message.uuid != null && message.hasOwnProperty("uuid")) + if (!$util.isString(message.uuid)) + return "uuid: string expected"; return null; }; /** - * Creates a ValidateSchemaKeyspaceRequest message from a plain object. Also converts values to their respective internal types. + * Creates a VDiffStopRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ValidateSchemaKeyspaceRequest + * @memberof vtctldata.VDiffStopRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.ValidateSchemaKeyspaceRequest} ValidateSchemaKeyspaceRequest + * @returns {vtctldata.VDiffStopRequest} VDiffStopRequest */ - ValidateSchemaKeyspaceRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ValidateSchemaKeyspaceRequest) + VDiffStopRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.VDiffStopRequest) return object; - let message = new $root.vtctldata.ValidateSchemaKeyspaceRequest(); - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - if (object.exclude_tables) { - if (!Array.isArray(object.exclude_tables)) - throw TypeError(".vtctldata.ValidateSchemaKeyspaceRequest.exclude_tables: array expected"); - message.exclude_tables = []; - for (let i = 0; i < object.exclude_tables.length; ++i) - message.exclude_tables[i] = String(object.exclude_tables[i]); - } - if (object.include_views != null) - message.include_views = Boolean(object.include_views); - if (object.skip_no_primary != null) - message.skip_no_primary = Boolean(object.skip_no_primary); - if (object.include_vschema != null) - message.include_vschema = Boolean(object.include_vschema); + let message = new $root.vtctldata.VDiffStopRequest(); + if (object.workflow != null) + message.workflow = String(object.workflow); + if (object.target_keyspace != null) + message.target_keyspace = String(object.target_keyspace); + if (object.uuid != null) + message.uuid = String(object.uuid); return message; }; /** - * Creates a plain object from a ValidateSchemaKeyspaceRequest message. Also converts values to other types if specified. + * Creates a plain object from a VDiffStopRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ValidateSchemaKeyspaceRequest + * @memberof vtctldata.VDiffStopRequest * @static - * @param {vtctldata.ValidateSchemaKeyspaceRequest} message ValidateSchemaKeyspaceRequest + * @param {vtctldata.VDiffStopRequest} message VDiffStopRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ValidateSchemaKeyspaceRequest.toObject = function toObject(message, options) { + VDiffStopRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.exclude_tables = []; if (options.defaults) { - object.keyspace = ""; - object.include_views = false; - object.skip_no_primary = false; - object.include_vschema = false; - } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.exclude_tables && message.exclude_tables.length) { - object.exclude_tables = []; - for (let j = 0; j < message.exclude_tables.length; ++j) - object.exclude_tables[j] = message.exclude_tables[j]; + object.workflow = ""; + object.target_keyspace = ""; + object.uuid = ""; } - if (message.include_views != null && message.hasOwnProperty("include_views")) - object.include_views = message.include_views; - if (message.skip_no_primary != null && message.hasOwnProperty("skip_no_primary")) - object.skip_no_primary = message.skip_no_primary; - if (message.include_vschema != null && message.hasOwnProperty("include_vschema")) - object.include_vschema = message.include_vschema; + if (message.workflow != null && message.hasOwnProperty("workflow")) + object.workflow = message.workflow; + if (message.target_keyspace != null && message.hasOwnProperty("target_keyspace")) + object.target_keyspace = message.target_keyspace; + if (message.uuid != null && message.hasOwnProperty("uuid")) + object.uuid = message.uuid; return object; }; /** - * Converts this ValidateSchemaKeyspaceRequest to JSON. + * Converts this VDiffStopRequest to JSON. * @function toJSON - * @memberof vtctldata.ValidateSchemaKeyspaceRequest + * @memberof vtctldata.VDiffStopRequest * @instance * @returns {Object.} JSON object */ - ValidateSchemaKeyspaceRequest.prototype.toJSON = function toJSON() { + VDiffStopRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ValidateSchemaKeyspaceRequest + * Gets the default type url for VDiffStopRequest * @function getTypeUrl - * @memberof vtctldata.ValidateSchemaKeyspaceRequest + * @memberof vtctldata.VDiffStopRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ValidateSchemaKeyspaceRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + VDiffStopRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ValidateSchemaKeyspaceRequest"; + return typeUrlPrefix + "/vtctldata.VDiffStopRequest"; }; - return ValidateSchemaKeyspaceRequest; + return VDiffStopRequest; })(); - vtctldata.ValidateSchemaKeyspaceResponse = (function() { + vtctldata.VDiffStopResponse = (function() { /** - * Properties of a ValidateSchemaKeyspaceResponse. + * Properties of a VDiffStopResponse. * @memberof vtctldata - * @interface IValidateSchemaKeyspaceResponse - * @property {Array.|null} [results] ValidateSchemaKeyspaceResponse results - * @property {Object.|null} [results_by_shard] ValidateSchemaKeyspaceResponse results_by_shard + * @interface IVDiffStopResponse */ /** - * Constructs a new ValidateSchemaKeyspaceResponse. + * Constructs a new VDiffStopResponse. * @memberof vtctldata - * @classdesc Represents a ValidateSchemaKeyspaceResponse. - * @implements IValidateSchemaKeyspaceResponse + * @classdesc Represents a VDiffStopResponse. + * @implements IVDiffStopResponse * @constructor - * @param {vtctldata.IValidateSchemaKeyspaceResponse=} [properties] Properties to set + * @param {vtctldata.IVDiffStopResponse=} [properties] Properties to set */ - function ValidateSchemaKeyspaceResponse(properties) { - this.results = []; - this.results_by_shard = {}; + function VDiffStopResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -139748,116 +162959,63 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ValidateSchemaKeyspaceResponse results. - * @member {Array.} results - * @memberof vtctldata.ValidateSchemaKeyspaceResponse - * @instance - */ - ValidateSchemaKeyspaceResponse.prototype.results = $util.emptyArray; - - /** - * ValidateSchemaKeyspaceResponse results_by_shard. - * @member {Object.} results_by_shard - * @memberof vtctldata.ValidateSchemaKeyspaceResponse - * @instance - */ - ValidateSchemaKeyspaceResponse.prototype.results_by_shard = $util.emptyObject; - - /** - * Creates a new ValidateSchemaKeyspaceResponse instance using the specified properties. + * Creates a new VDiffStopResponse instance using the specified properties. * @function create - * @memberof vtctldata.ValidateSchemaKeyspaceResponse + * @memberof vtctldata.VDiffStopResponse * @static - * @param {vtctldata.IValidateSchemaKeyspaceResponse=} [properties] Properties to set - * @returns {vtctldata.ValidateSchemaKeyspaceResponse} ValidateSchemaKeyspaceResponse instance + * @param {vtctldata.IVDiffStopResponse=} [properties] Properties to set + * @returns {vtctldata.VDiffStopResponse} VDiffStopResponse instance */ - ValidateSchemaKeyspaceResponse.create = function create(properties) { - return new ValidateSchemaKeyspaceResponse(properties); + VDiffStopResponse.create = function create(properties) { + return new VDiffStopResponse(properties); }; /** - * Encodes the specified ValidateSchemaKeyspaceResponse message. Does not implicitly {@link vtctldata.ValidateSchemaKeyspaceResponse.verify|verify} messages. + * Encodes the specified VDiffStopResponse message. Does not implicitly {@link vtctldata.VDiffStopResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.ValidateSchemaKeyspaceResponse + * @memberof vtctldata.VDiffStopResponse * @static - * @param {vtctldata.IValidateSchemaKeyspaceResponse} message ValidateSchemaKeyspaceResponse message or plain object to encode + * @param {vtctldata.IVDiffStopResponse} message VDiffStopResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ValidateSchemaKeyspaceResponse.encode = function encode(message, writer) { + VDiffStopResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.results != null && message.results.length) - for (let i = 0; i < message.results.length; ++i) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.results[i]); - if (message.results_by_shard != null && Object.hasOwnProperty.call(message, "results_by_shard")) - for (let keys = Object.keys(message.results_by_shard), i = 0; i < keys.length; ++i) { - writer.uint32(/* id 2, wireType 2 =*/18).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); - $root.vtctldata.ValidateShardResponse.encode(message.results_by_shard[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); - } return writer; }; /** - * Encodes the specified ValidateSchemaKeyspaceResponse message, length delimited. Does not implicitly {@link vtctldata.ValidateSchemaKeyspaceResponse.verify|verify} messages. + * Encodes the specified VDiffStopResponse message, length delimited. Does not implicitly {@link vtctldata.VDiffStopResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ValidateSchemaKeyspaceResponse + * @memberof vtctldata.VDiffStopResponse * @static - * @param {vtctldata.IValidateSchemaKeyspaceResponse} message ValidateSchemaKeyspaceResponse message or plain object to encode + * @param {vtctldata.IVDiffStopResponse} message VDiffStopResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ValidateSchemaKeyspaceResponse.encodeDelimited = function encodeDelimited(message, writer) { + VDiffStopResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ValidateSchemaKeyspaceResponse message from the specified reader or buffer. + * Decodes a VDiffStopResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ValidateSchemaKeyspaceResponse + * @memberof vtctldata.VDiffStopResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ValidateSchemaKeyspaceResponse} ValidateSchemaKeyspaceResponse + * @returns {vtctldata.VDiffStopResponse} VDiffStopResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ValidateSchemaKeyspaceResponse.decode = function decode(reader, length) { + VDiffStopResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ValidateSchemaKeyspaceResponse(), key, value; + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.VDiffStopResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 1: { - if (!(message.results && message.results.length)) - message.results = []; - message.results.push(reader.string()); - break; - } - case 2: { - if (message.results_by_shard === $util.emptyObject) - message.results_by_shard = {}; - let end2 = reader.uint32() + reader.pos; - key = ""; - value = null; - while (reader.pos < end2) { - let tag2 = reader.uint32(); - switch (tag2 >>> 3) { - case 1: - key = reader.string(); - break; - case 2: - value = $root.vtctldata.ValidateShardResponse.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag2 & 7); - break; - } - } - message.results_by_shard[key] = value; - break; - } default: reader.skipType(tag & 7); break; @@ -139867,164 +163025,112 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a ValidateSchemaKeyspaceResponse message from the specified reader or buffer, length delimited. + * Decodes a VDiffStopResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ValidateSchemaKeyspaceResponse + * @memberof vtctldata.VDiffStopResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ValidateSchemaKeyspaceResponse} ValidateSchemaKeyspaceResponse + * @returns {vtctldata.VDiffStopResponse} VDiffStopResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ValidateSchemaKeyspaceResponse.decodeDelimited = function decodeDelimited(reader) { + VDiffStopResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ValidateSchemaKeyspaceResponse message. + * Verifies a VDiffStopResponse message. * @function verify - * @memberof vtctldata.ValidateSchemaKeyspaceResponse + * @memberof vtctldata.VDiffStopResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ValidateSchemaKeyspaceResponse.verify = function verify(message) { + VDiffStopResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.results != null && message.hasOwnProperty("results")) { - if (!Array.isArray(message.results)) - return "results: array expected"; - for (let i = 0; i < message.results.length; ++i) - if (!$util.isString(message.results[i])) - return "results: string[] expected"; - } - if (message.results_by_shard != null && message.hasOwnProperty("results_by_shard")) { - if (!$util.isObject(message.results_by_shard)) - return "results_by_shard: object expected"; - let key = Object.keys(message.results_by_shard); - for (let i = 0; i < key.length; ++i) { - let error = $root.vtctldata.ValidateShardResponse.verify(message.results_by_shard[key[i]]); - if (error) - return "results_by_shard." + error; - } - } return null; }; /** - * Creates a ValidateSchemaKeyspaceResponse message from a plain object. Also converts values to their respective internal types. + * Creates a VDiffStopResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ValidateSchemaKeyspaceResponse + * @memberof vtctldata.VDiffStopResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.ValidateSchemaKeyspaceResponse} ValidateSchemaKeyspaceResponse + * @returns {vtctldata.VDiffStopResponse} VDiffStopResponse */ - ValidateSchemaKeyspaceResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ValidateSchemaKeyspaceResponse) + VDiffStopResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.VDiffStopResponse) return object; - let message = new $root.vtctldata.ValidateSchemaKeyspaceResponse(); - if (object.results) { - if (!Array.isArray(object.results)) - throw TypeError(".vtctldata.ValidateSchemaKeyspaceResponse.results: array expected"); - message.results = []; - for (let i = 0; i < object.results.length; ++i) - message.results[i] = String(object.results[i]); - } - if (object.results_by_shard) { - if (typeof object.results_by_shard !== "object") - throw TypeError(".vtctldata.ValidateSchemaKeyspaceResponse.results_by_shard: object expected"); - message.results_by_shard = {}; - for (let keys = Object.keys(object.results_by_shard), i = 0; i < keys.length; ++i) { - if (typeof object.results_by_shard[keys[i]] !== "object") - throw TypeError(".vtctldata.ValidateSchemaKeyspaceResponse.results_by_shard: object expected"); - message.results_by_shard[keys[i]] = $root.vtctldata.ValidateShardResponse.fromObject(object.results_by_shard[keys[i]]); - } - } - return message; + return new $root.vtctldata.VDiffStopResponse(); }; /** - * Creates a plain object from a ValidateSchemaKeyspaceResponse message. Also converts values to other types if specified. + * Creates a plain object from a VDiffStopResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ValidateSchemaKeyspaceResponse + * @memberof vtctldata.VDiffStopResponse * @static - * @param {vtctldata.ValidateSchemaKeyspaceResponse} message ValidateSchemaKeyspaceResponse + * @param {vtctldata.VDiffStopResponse} message VDiffStopResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ValidateSchemaKeyspaceResponse.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.arrays || options.defaults) - object.results = []; - if (options.objects || options.defaults) - object.results_by_shard = {}; - if (message.results && message.results.length) { - object.results = []; - for (let j = 0; j < message.results.length; ++j) - object.results[j] = message.results[j]; - } - let keys2; - if (message.results_by_shard && (keys2 = Object.keys(message.results_by_shard)).length) { - object.results_by_shard = {}; - for (let j = 0; j < keys2.length; ++j) - object.results_by_shard[keys2[j]] = $root.vtctldata.ValidateShardResponse.toObject(message.results_by_shard[keys2[j]], options); - } - return object; + VDiffStopResponse.toObject = function toObject() { + return {}; }; /** - * Converts this ValidateSchemaKeyspaceResponse to JSON. + * Converts this VDiffStopResponse to JSON. * @function toJSON - * @memberof vtctldata.ValidateSchemaKeyspaceResponse + * @memberof vtctldata.VDiffStopResponse * @instance * @returns {Object.} JSON object */ - ValidateSchemaKeyspaceResponse.prototype.toJSON = function toJSON() { + VDiffStopResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ValidateSchemaKeyspaceResponse + * Gets the default type url for VDiffStopResponse * @function getTypeUrl - * @memberof vtctldata.ValidateSchemaKeyspaceResponse + * @memberof vtctldata.VDiffStopResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ValidateSchemaKeyspaceResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + VDiffStopResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ValidateSchemaKeyspaceResponse"; + return typeUrlPrefix + "/vtctldata.VDiffStopResponse"; }; - return ValidateSchemaKeyspaceResponse; + return VDiffStopResponse; })(); - vtctldata.ValidateShardRequest = (function() { + vtctldata.WorkflowDeleteRequest = (function() { /** - * Properties of a ValidateShardRequest. + * Properties of a WorkflowDeleteRequest. * @memberof vtctldata - * @interface IValidateShardRequest - * @property {string|null} [keyspace] ValidateShardRequest keyspace - * @property {string|null} [shard] ValidateShardRequest shard - * @property {boolean|null} [ping_tablets] ValidateShardRequest ping_tablets + * @interface IWorkflowDeleteRequest + * @property {string|null} [keyspace] WorkflowDeleteRequest keyspace + * @property {string|null} [workflow] WorkflowDeleteRequest workflow + * @property {boolean|null} [keep_data] WorkflowDeleteRequest keep_data + * @property {boolean|null} [keep_routing_rules] WorkflowDeleteRequest keep_routing_rules */ /** - * Constructs a new ValidateShardRequest. + * Constructs a new WorkflowDeleteRequest. * @memberof vtctldata - * @classdesc Represents a ValidateShardRequest. - * @implements IValidateShardRequest + * @classdesc Represents a WorkflowDeleteRequest. + * @implements IWorkflowDeleteRequest * @constructor - * @param {vtctldata.IValidateShardRequest=} [properties] Properties to set + * @param {vtctldata.IWorkflowDeleteRequest=} [properties] Properties to set */ - function ValidateShardRequest(properties) { + function WorkflowDeleteRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -140032,90 +163138,100 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ValidateShardRequest keyspace. + * WorkflowDeleteRequest keyspace. * @member {string} keyspace - * @memberof vtctldata.ValidateShardRequest + * @memberof vtctldata.WorkflowDeleteRequest * @instance */ - ValidateShardRequest.prototype.keyspace = ""; + WorkflowDeleteRequest.prototype.keyspace = ""; /** - * ValidateShardRequest shard. - * @member {string} shard - * @memberof vtctldata.ValidateShardRequest + * WorkflowDeleteRequest workflow. + * @member {string} workflow + * @memberof vtctldata.WorkflowDeleteRequest * @instance */ - ValidateShardRequest.prototype.shard = ""; + WorkflowDeleteRequest.prototype.workflow = ""; /** - * ValidateShardRequest ping_tablets. - * @member {boolean} ping_tablets - * @memberof vtctldata.ValidateShardRequest + * WorkflowDeleteRequest keep_data. + * @member {boolean} keep_data + * @memberof vtctldata.WorkflowDeleteRequest * @instance */ - ValidateShardRequest.prototype.ping_tablets = false; + WorkflowDeleteRequest.prototype.keep_data = false; /** - * Creates a new ValidateShardRequest instance using the specified properties. + * WorkflowDeleteRequest keep_routing_rules. + * @member {boolean} keep_routing_rules + * @memberof vtctldata.WorkflowDeleteRequest + * @instance + */ + WorkflowDeleteRequest.prototype.keep_routing_rules = false; + + /** + * Creates a new WorkflowDeleteRequest instance using the specified properties. * @function create - * @memberof vtctldata.ValidateShardRequest + * @memberof vtctldata.WorkflowDeleteRequest * @static - * @param {vtctldata.IValidateShardRequest=} [properties] Properties to set - * @returns {vtctldata.ValidateShardRequest} ValidateShardRequest instance + * @param {vtctldata.IWorkflowDeleteRequest=} [properties] Properties to set + * @returns {vtctldata.WorkflowDeleteRequest} WorkflowDeleteRequest instance */ - ValidateShardRequest.create = function create(properties) { - return new ValidateShardRequest(properties); + WorkflowDeleteRequest.create = function create(properties) { + return new WorkflowDeleteRequest(properties); }; /** - * Encodes the specified ValidateShardRequest message. Does not implicitly {@link vtctldata.ValidateShardRequest.verify|verify} messages. + * Encodes the specified WorkflowDeleteRequest message. Does not implicitly {@link vtctldata.WorkflowDeleteRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.ValidateShardRequest + * @memberof vtctldata.WorkflowDeleteRequest * @static - * @param {vtctldata.IValidateShardRequest} message ValidateShardRequest message or plain object to encode + * @param {vtctldata.IWorkflowDeleteRequest} message WorkflowDeleteRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ValidateShardRequest.encode = function encode(message, writer) { + WorkflowDeleteRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); - if (message.ping_tablets != null && Object.hasOwnProperty.call(message, "ping_tablets")) - writer.uint32(/* id 3, wireType 0 =*/24).bool(message.ping_tablets); + if (message.workflow != null && Object.hasOwnProperty.call(message, "workflow")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.workflow); + if (message.keep_data != null && Object.hasOwnProperty.call(message, "keep_data")) + writer.uint32(/* id 3, wireType 0 =*/24).bool(message.keep_data); + if (message.keep_routing_rules != null && Object.hasOwnProperty.call(message, "keep_routing_rules")) + writer.uint32(/* id 4, wireType 0 =*/32).bool(message.keep_routing_rules); return writer; }; /** - * Encodes the specified ValidateShardRequest message, length delimited. Does not implicitly {@link vtctldata.ValidateShardRequest.verify|verify} messages. + * Encodes the specified WorkflowDeleteRequest message, length delimited. Does not implicitly {@link vtctldata.WorkflowDeleteRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ValidateShardRequest + * @memberof vtctldata.WorkflowDeleteRequest * @static - * @param {vtctldata.IValidateShardRequest} message ValidateShardRequest message or plain object to encode + * @param {vtctldata.IWorkflowDeleteRequest} message WorkflowDeleteRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ValidateShardRequest.encodeDelimited = function encodeDelimited(message, writer) { + WorkflowDeleteRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ValidateShardRequest message from the specified reader or buffer. + * Decodes a WorkflowDeleteRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ValidateShardRequest + * @memberof vtctldata.WorkflowDeleteRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ValidateShardRequest} ValidateShardRequest + * @returns {vtctldata.WorkflowDeleteRequest} WorkflowDeleteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ValidateShardRequest.decode = function decode(reader, length) { + WorkflowDeleteRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ValidateShardRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.WorkflowDeleteRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -140124,11 +163240,15 @@ export const vtctldata = $root.vtctldata = (() => { break; } case 2: { - message.shard = reader.string(); + message.workflow = reader.string(); break; } case 3: { - message.ping_tablets = reader.bool(); + message.keep_data = reader.bool(); + break; + } + case 4: { + message.keep_routing_rules = reader.bool(); break; } default: @@ -140140,140 +163260,149 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a ValidateShardRequest message from the specified reader or buffer, length delimited. + * Decodes a WorkflowDeleteRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ValidateShardRequest + * @memberof vtctldata.WorkflowDeleteRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ValidateShardRequest} ValidateShardRequest + * @returns {vtctldata.WorkflowDeleteRequest} WorkflowDeleteRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ValidateShardRequest.decodeDelimited = function decodeDelimited(reader) { + WorkflowDeleteRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ValidateShardRequest message. + * Verifies a WorkflowDeleteRequest message. * @function verify - * @memberof vtctldata.ValidateShardRequest + * @memberof vtctldata.WorkflowDeleteRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ValidateShardRequest.verify = function verify(message) { + WorkflowDeleteRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.keyspace != null && message.hasOwnProperty("keyspace")) if (!$util.isString(message.keyspace)) return "keyspace: string expected"; - if (message.shard != null && message.hasOwnProperty("shard")) - if (!$util.isString(message.shard)) - return "shard: string expected"; - if (message.ping_tablets != null && message.hasOwnProperty("ping_tablets")) - if (typeof message.ping_tablets !== "boolean") - return "ping_tablets: boolean expected"; + if (message.workflow != null && message.hasOwnProperty("workflow")) + if (!$util.isString(message.workflow)) + return "workflow: string expected"; + if (message.keep_data != null && message.hasOwnProperty("keep_data")) + if (typeof message.keep_data !== "boolean") + return "keep_data: boolean expected"; + if (message.keep_routing_rules != null && message.hasOwnProperty("keep_routing_rules")) + if (typeof message.keep_routing_rules !== "boolean") + return "keep_routing_rules: boolean expected"; return null; }; /** - * Creates a ValidateShardRequest message from a plain object. Also converts values to their respective internal types. + * Creates a WorkflowDeleteRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ValidateShardRequest + * @memberof vtctldata.WorkflowDeleteRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.ValidateShardRequest} ValidateShardRequest + * @returns {vtctldata.WorkflowDeleteRequest} WorkflowDeleteRequest */ - ValidateShardRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ValidateShardRequest) + WorkflowDeleteRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.WorkflowDeleteRequest) return object; - let message = new $root.vtctldata.ValidateShardRequest(); + let message = new $root.vtctldata.WorkflowDeleteRequest(); if (object.keyspace != null) message.keyspace = String(object.keyspace); - if (object.shard != null) - message.shard = String(object.shard); - if (object.ping_tablets != null) - message.ping_tablets = Boolean(object.ping_tablets); + if (object.workflow != null) + message.workflow = String(object.workflow); + if (object.keep_data != null) + message.keep_data = Boolean(object.keep_data); + if (object.keep_routing_rules != null) + message.keep_routing_rules = Boolean(object.keep_routing_rules); return message; }; /** - * Creates a plain object from a ValidateShardRequest message. Also converts values to other types if specified. + * Creates a plain object from a WorkflowDeleteRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ValidateShardRequest + * @memberof vtctldata.WorkflowDeleteRequest * @static - * @param {vtctldata.ValidateShardRequest} message ValidateShardRequest + * @param {vtctldata.WorkflowDeleteRequest} message WorkflowDeleteRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ValidateShardRequest.toObject = function toObject(message, options) { + WorkflowDeleteRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.defaults) { object.keyspace = ""; - object.shard = ""; - object.ping_tablets = false; + object.workflow = ""; + object.keep_data = false; + object.keep_routing_rules = false; } if (message.keyspace != null && message.hasOwnProperty("keyspace")) object.keyspace = message.keyspace; - if (message.shard != null && message.hasOwnProperty("shard")) - object.shard = message.shard; - if (message.ping_tablets != null && message.hasOwnProperty("ping_tablets")) - object.ping_tablets = message.ping_tablets; + if (message.workflow != null && message.hasOwnProperty("workflow")) + object.workflow = message.workflow; + if (message.keep_data != null && message.hasOwnProperty("keep_data")) + object.keep_data = message.keep_data; + if (message.keep_routing_rules != null && message.hasOwnProperty("keep_routing_rules")) + object.keep_routing_rules = message.keep_routing_rules; return object; }; /** - * Converts this ValidateShardRequest to JSON. + * Converts this WorkflowDeleteRequest to JSON. * @function toJSON - * @memberof vtctldata.ValidateShardRequest + * @memberof vtctldata.WorkflowDeleteRequest * @instance * @returns {Object.} JSON object */ - ValidateShardRequest.prototype.toJSON = function toJSON() { + WorkflowDeleteRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ValidateShardRequest + * Gets the default type url for WorkflowDeleteRequest * @function getTypeUrl - * @memberof vtctldata.ValidateShardRequest + * @memberof vtctldata.WorkflowDeleteRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ValidateShardRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + WorkflowDeleteRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ValidateShardRequest"; + return typeUrlPrefix + "/vtctldata.WorkflowDeleteRequest"; }; - return ValidateShardRequest; + return WorkflowDeleteRequest; })(); - vtctldata.ValidateShardResponse = (function() { + vtctldata.WorkflowDeleteResponse = (function() { /** - * Properties of a ValidateShardResponse. + * Properties of a WorkflowDeleteResponse. * @memberof vtctldata - * @interface IValidateShardResponse - * @property {Array.|null} [results] ValidateShardResponse results + * @interface IWorkflowDeleteResponse + * @property {string|null} [summary] WorkflowDeleteResponse summary + * @property {Array.|null} [details] WorkflowDeleteResponse details */ /** - * Constructs a new ValidateShardResponse. + * Constructs a new WorkflowDeleteResponse. * @memberof vtctldata - * @classdesc Represents a ValidateShardResponse. - * @implements IValidateShardResponse + * @classdesc Represents a WorkflowDeleteResponse. + * @implements IWorkflowDeleteResponse * @constructor - * @param {vtctldata.IValidateShardResponse=} [properties] Properties to set + * @param {vtctldata.IWorkflowDeleteResponse=} [properties] Properties to set */ - function ValidateShardResponse(properties) { - this.results = []; + function WorkflowDeleteResponse(properties) { + this.details = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -140281,78 +163410,92 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ValidateShardResponse results. - * @member {Array.} results - * @memberof vtctldata.ValidateShardResponse + * WorkflowDeleteResponse summary. + * @member {string} summary + * @memberof vtctldata.WorkflowDeleteResponse * @instance */ - ValidateShardResponse.prototype.results = $util.emptyArray; + WorkflowDeleteResponse.prototype.summary = ""; /** - * Creates a new ValidateShardResponse instance using the specified properties. + * WorkflowDeleteResponse details. + * @member {Array.} details + * @memberof vtctldata.WorkflowDeleteResponse + * @instance + */ + WorkflowDeleteResponse.prototype.details = $util.emptyArray; + + /** + * Creates a new WorkflowDeleteResponse instance using the specified properties. * @function create - * @memberof vtctldata.ValidateShardResponse + * @memberof vtctldata.WorkflowDeleteResponse * @static - * @param {vtctldata.IValidateShardResponse=} [properties] Properties to set - * @returns {vtctldata.ValidateShardResponse} ValidateShardResponse instance + * @param {vtctldata.IWorkflowDeleteResponse=} [properties] Properties to set + * @returns {vtctldata.WorkflowDeleteResponse} WorkflowDeleteResponse instance */ - ValidateShardResponse.create = function create(properties) { - return new ValidateShardResponse(properties); + WorkflowDeleteResponse.create = function create(properties) { + return new WorkflowDeleteResponse(properties); }; /** - * Encodes the specified ValidateShardResponse message. Does not implicitly {@link vtctldata.ValidateShardResponse.verify|verify} messages. + * Encodes the specified WorkflowDeleteResponse message. Does not implicitly {@link vtctldata.WorkflowDeleteResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.ValidateShardResponse + * @memberof vtctldata.WorkflowDeleteResponse * @static - * @param {vtctldata.IValidateShardResponse} message ValidateShardResponse message or plain object to encode + * @param {vtctldata.IWorkflowDeleteResponse} message WorkflowDeleteResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ValidateShardResponse.encode = function encode(message, writer) { + WorkflowDeleteResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.results != null && message.results.length) - for (let i = 0; i < message.results.length; ++i) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.results[i]); + if (message.summary != null && Object.hasOwnProperty.call(message, "summary")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.summary); + if (message.details != null && message.details.length) + for (let i = 0; i < message.details.length; ++i) + $root.vtctldata.WorkflowDeleteResponse.TabletInfo.encode(message.details[i], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); return writer; }; /** - * Encodes the specified ValidateShardResponse message, length delimited. Does not implicitly {@link vtctldata.ValidateShardResponse.verify|verify} messages. + * Encodes the specified WorkflowDeleteResponse message, length delimited. Does not implicitly {@link vtctldata.WorkflowDeleteResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ValidateShardResponse + * @memberof vtctldata.WorkflowDeleteResponse * @static - * @param {vtctldata.IValidateShardResponse} message ValidateShardResponse message or plain object to encode + * @param {vtctldata.IWorkflowDeleteResponse} message WorkflowDeleteResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ValidateShardResponse.encodeDelimited = function encodeDelimited(message, writer) { + WorkflowDeleteResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ValidateShardResponse message from the specified reader or buffer. + * Decodes a WorkflowDeleteResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ValidateShardResponse + * @memberof vtctldata.WorkflowDeleteResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ValidateShardResponse} ValidateShardResponse + * @returns {vtctldata.WorkflowDeleteResponse} WorkflowDeleteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ValidateShardResponse.decode = function decode(reader, length) { + WorkflowDeleteResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ValidateShardResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.WorkflowDeleteResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (!(message.results && message.results.length)) - message.results = []; - message.results.push(reader.string()); + message.summary = reader.string(); + break; + } + case 2: { + if (!(message.details && message.details.length)) + message.details = []; + message.details.push($root.vtctldata.WorkflowDeleteResponse.TabletInfo.decode(reader, reader.uint32())); break; } default: @@ -140364,134 +163507,381 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a ValidateShardResponse message from the specified reader or buffer, length delimited. + * Decodes a WorkflowDeleteResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ValidateShardResponse + * @memberof vtctldata.WorkflowDeleteResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ValidateShardResponse} ValidateShardResponse + * @returns {vtctldata.WorkflowDeleteResponse} WorkflowDeleteResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ValidateShardResponse.decodeDelimited = function decodeDelimited(reader) { + WorkflowDeleteResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ValidateShardResponse message. + * Verifies a WorkflowDeleteResponse message. * @function verify - * @memberof vtctldata.ValidateShardResponse + * @memberof vtctldata.WorkflowDeleteResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ValidateShardResponse.verify = function verify(message) { + WorkflowDeleteResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.results != null && message.hasOwnProperty("results")) { - if (!Array.isArray(message.results)) - return "results: array expected"; - for (let i = 0; i < message.results.length; ++i) - if (!$util.isString(message.results[i])) - return "results: string[] expected"; + if (message.summary != null && message.hasOwnProperty("summary")) + if (!$util.isString(message.summary)) + return "summary: string expected"; + if (message.details != null && message.hasOwnProperty("details")) { + if (!Array.isArray(message.details)) + return "details: array expected"; + for (let i = 0; i < message.details.length; ++i) { + let error = $root.vtctldata.WorkflowDeleteResponse.TabletInfo.verify(message.details[i]); + if (error) + return "details." + error; + } } return null; }; /** - * Creates a ValidateShardResponse message from a plain object. Also converts values to their respective internal types. + * Creates a WorkflowDeleteResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ValidateShardResponse + * @memberof vtctldata.WorkflowDeleteResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.ValidateShardResponse} ValidateShardResponse + * @returns {vtctldata.WorkflowDeleteResponse} WorkflowDeleteResponse */ - ValidateShardResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ValidateShardResponse) + WorkflowDeleteResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.WorkflowDeleteResponse) return object; - let message = new $root.vtctldata.ValidateShardResponse(); - if (object.results) { - if (!Array.isArray(object.results)) - throw TypeError(".vtctldata.ValidateShardResponse.results: array expected"); - message.results = []; - for (let i = 0; i < object.results.length; ++i) - message.results[i] = String(object.results[i]); + let message = new $root.vtctldata.WorkflowDeleteResponse(); + if (object.summary != null) + message.summary = String(object.summary); + if (object.details) { + if (!Array.isArray(object.details)) + throw TypeError(".vtctldata.WorkflowDeleteResponse.details: array expected"); + message.details = []; + for (let i = 0; i < object.details.length; ++i) { + if (typeof object.details[i] !== "object") + throw TypeError(".vtctldata.WorkflowDeleteResponse.details: object expected"); + message.details[i] = $root.vtctldata.WorkflowDeleteResponse.TabletInfo.fromObject(object.details[i]); + } } return message; }; /** - * Creates a plain object from a ValidateShardResponse message. Also converts values to other types if specified. + * Creates a plain object from a WorkflowDeleteResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ValidateShardResponse + * @memberof vtctldata.WorkflowDeleteResponse * @static - * @param {vtctldata.ValidateShardResponse} message ValidateShardResponse + * @param {vtctldata.WorkflowDeleteResponse} message WorkflowDeleteResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ValidateShardResponse.toObject = function toObject(message, options) { + WorkflowDeleteResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.arrays || options.defaults) - object.results = []; - if (message.results && message.results.length) { - object.results = []; - for (let j = 0; j < message.results.length; ++j) - object.results[j] = message.results[j]; + object.details = []; + if (options.defaults) + object.summary = ""; + if (message.summary != null && message.hasOwnProperty("summary")) + object.summary = message.summary; + if (message.details && message.details.length) { + object.details = []; + for (let j = 0; j < message.details.length; ++j) + object.details[j] = $root.vtctldata.WorkflowDeleteResponse.TabletInfo.toObject(message.details[j], options); } return object; }; /** - * Converts this ValidateShardResponse to JSON. + * Converts this WorkflowDeleteResponse to JSON. * @function toJSON - * @memberof vtctldata.ValidateShardResponse + * @memberof vtctldata.WorkflowDeleteResponse * @instance * @returns {Object.} JSON object */ - ValidateShardResponse.prototype.toJSON = function toJSON() { + WorkflowDeleteResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ValidateShardResponse + * Gets the default type url for WorkflowDeleteResponse * @function getTypeUrl - * @memberof vtctldata.ValidateShardResponse + * @memberof vtctldata.WorkflowDeleteResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ValidateShardResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + WorkflowDeleteResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ValidateShardResponse"; + return typeUrlPrefix + "/vtctldata.WorkflowDeleteResponse"; }; - return ValidateShardResponse; + WorkflowDeleteResponse.TabletInfo = (function() { + + /** + * Properties of a TabletInfo. + * @memberof vtctldata.WorkflowDeleteResponse + * @interface ITabletInfo + * @property {topodata.ITabletAlias|null} [tablet] TabletInfo tablet + * @property {boolean|null} [deleted] TabletInfo deleted + */ + + /** + * Constructs a new TabletInfo. + * @memberof vtctldata.WorkflowDeleteResponse + * @classdesc Represents a TabletInfo. + * @implements ITabletInfo + * @constructor + * @param {vtctldata.WorkflowDeleteResponse.ITabletInfo=} [properties] Properties to set + */ + function TabletInfo(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * TabletInfo tablet. + * @member {topodata.ITabletAlias|null|undefined} tablet + * @memberof vtctldata.WorkflowDeleteResponse.TabletInfo + * @instance + */ + TabletInfo.prototype.tablet = null; + + /** + * TabletInfo deleted. + * @member {boolean} deleted + * @memberof vtctldata.WorkflowDeleteResponse.TabletInfo + * @instance + */ + TabletInfo.prototype.deleted = false; + + /** + * Creates a new TabletInfo instance using the specified properties. + * @function create + * @memberof vtctldata.WorkflowDeleteResponse.TabletInfo + * @static + * @param {vtctldata.WorkflowDeleteResponse.ITabletInfo=} [properties] Properties to set + * @returns {vtctldata.WorkflowDeleteResponse.TabletInfo} TabletInfo instance + */ + TabletInfo.create = function create(properties) { + return new TabletInfo(properties); + }; + + /** + * Encodes the specified TabletInfo message. Does not implicitly {@link vtctldata.WorkflowDeleteResponse.TabletInfo.verify|verify} messages. + * @function encode + * @memberof vtctldata.WorkflowDeleteResponse.TabletInfo + * @static + * @param {vtctldata.WorkflowDeleteResponse.ITabletInfo} message TabletInfo message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + TabletInfo.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.tablet != null && Object.hasOwnProperty.call(message, "tablet")) + $root.topodata.TabletAlias.encode(message.tablet, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.deleted != null && Object.hasOwnProperty.call(message, "deleted")) + writer.uint32(/* id 2, wireType 0 =*/16).bool(message.deleted); + return writer; + }; + + /** + * Encodes the specified TabletInfo message, length delimited. Does not implicitly {@link vtctldata.WorkflowDeleteResponse.TabletInfo.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.WorkflowDeleteResponse.TabletInfo + * @static + * @param {vtctldata.WorkflowDeleteResponse.ITabletInfo} message TabletInfo message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + TabletInfo.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a TabletInfo message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.WorkflowDeleteResponse.TabletInfo + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.WorkflowDeleteResponse.TabletInfo} TabletInfo + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + TabletInfo.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.WorkflowDeleteResponse.TabletInfo(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.tablet = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + break; + } + case 2: { + message.deleted = reader.bool(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a TabletInfo message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.WorkflowDeleteResponse.TabletInfo + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.WorkflowDeleteResponse.TabletInfo} TabletInfo + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + TabletInfo.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a TabletInfo message. + * @function verify + * @memberof vtctldata.WorkflowDeleteResponse.TabletInfo + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + TabletInfo.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.tablet != null && message.hasOwnProperty("tablet")) { + let error = $root.topodata.TabletAlias.verify(message.tablet); + if (error) + return "tablet." + error; + } + if (message.deleted != null && message.hasOwnProperty("deleted")) + if (typeof message.deleted !== "boolean") + return "deleted: boolean expected"; + return null; + }; + + /** + * Creates a TabletInfo message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.WorkflowDeleteResponse.TabletInfo + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.WorkflowDeleteResponse.TabletInfo} TabletInfo + */ + TabletInfo.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.WorkflowDeleteResponse.TabletInfo) + return object; + let message = new $root.vtctldata.WorkflowDeleteResponse.TabletInfo(); + if (object.tablet != null) { + if (typeof object.tablet !== "object") + throw TypeError(".vtctldata.WorkflowDeleteResponse.TabletInfo.tablet: object expected"); + message.tablet = $root.topodata.TabletAlias.fromObject(object.tablet); + } + if (object.deleted != null) + message.deleted = Boolean(object.deleted); + return message; + }; + + /** + * Creates a plain object from a TabletInfo message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.WorkflowDeleteResponse.TabletInfo + * @static + * @param {vtctldata.WorkflowDeleteResponse.TabletInfo} message TabletInfo + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + TabletInfo.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.tablet = null; + object.deleted = false; + } + if (message.tablet != null && message.hasOwnProperty("tablet")) + object.tablet = $root.topodata.TabletAlias.toObject(message.tablet, options); + if (message.deleted != null && message.hasOwnProperty("deleted")) + object.deleted = message.deleted; + return object; + }; + + /** + * Converts this TabletInfo to JSON. + * @function toJSON + * @memberof vtctldata.WorkflowDeleteResponse.TabletInfo + * @instance + * @returns {Object.} JSON object + */ + TabletInfo.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for TabletInfo + * @function getTypeUrl + * @memberof vtctldata.WorkflowDeleteResponse.TabletInfo + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + TabletInfo.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.WorkflowDeleteResponse.TabletInfo"; + }; + + return TabletInfo; + })(); + + return WorkflowDeleteResponse; })(); - vtctldata.ValidateVersionKeyspaceRequest = (function() { + vtctldata.WorkflowStatusRequest = (function() { /** - * Properties of a ValidateVersionKeyspaceRequest. + * Properties of a WorkflowStatusRequest. * @memberof vtctldata - * @interface IValidateVersionKeyspaceRequest - * @property {string|null} [keyspace] ValidateVersionKeyspaceRequest keyspace + * @interface IWorkflowStatusRequest + * @property {string|null} [keyspace] WorkflowStatusRequest keyspace + * @property {string|null} [workflow] WorkflowStatusRequest workflow */ /** - * Constructs a new ValidateVersionKeyspaceRequest. + * Constructs a new WorkflowStatusRequest. * @memberof vtctldata - * @classdesc Represents a ValidateVersionKeyspaceRequest. - * @implements IValidateVersionKeyspaceRequest + * @classdesc Represents a WorkflowStatusRequest. + * @implements IWorkflowStatusRequest * @constructor - * @param {vtctldata.IValidateVersionKeyspaceRequest=} [properties] Properties to set + * @param {vtctldata.IWorkflowStatusRequest=} [properties] Properties to set */ - function ValidateVersionKeyspaceRequest(properties) { + function WorkflowStatusRequest(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -140499,70 +163889,80 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ValidateVersionKeyspaceRequest keyspace. + * WorkflowStatusRequest keyspace. * @member {string} keyspace - * @memberof vtctldata.ValidateVersionKeyspaceRequest + * @memberof vtctldata.WorkflowStatusRequest * @instance */ - ValidateVersionKeyspaceRequest.prototype.keyspace = ""; + WorkflowStatusRequest.prototype.keyspace = ""; /** - * Creates a new ValidateVersionKeyspaceRequest instance using the specified properties. + * WorkflowStatusRequest workflow. + * @member {string} workflow + * @memberof vtctldata.WorkflowStatusRequest + * @instance + */ + WorkflowStatusRequest.prototype.workflow = ""; + + /** + * Creates a new WorkflowStatusRequest instance using the specified properties. * @function create - * @memberof vtctldata.ValidateVersionKeyspaceRequest + * @memberof vtctldata.WorkflowStatusRequest * @static - * @param {vtctldata.IValidateVersionKeyspaceRequest=} [properties] Properties to set - * @returns {vtctldata.ValidateVersionKeyspaceRequest} ValidateVersionKeyspaceRequest instance + * @param {vtctldata.IWorkflowStatusRequest=} [properties] Properties to set + * @returns {vtctldata.WorkflowStatusRequest} WorkflowStatusRequest instance */ - ValidateVersionKeyspaceRequest.create = function create(properties) { - return new ValidateVersionKeyspaceRequest(properties); + WorkflowStatusRequest.create = function create(properties) { + return new WorkflowStatusRequest(properties); }; /** - * Encodes the specified ValidateVersionKeyspaceRequest message. Does not implicitly {@link vtctldata.ValidateVersionKeyspaceRequest.verify|verify} messages. + * Encodes the specified WorkflowStatusRequest message. Does not implicitly {@link vtctldata.WorkflowStatusRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.ValidateVersionKeyspaceRequest + * @memberof vtctldata.WorkflowStatusRequest * @static - * @param {vtctldata.IValidateVersionKeyspaceRequest} message ValidateVersionKeyspaceRequest message or plain object to encode + * @param {vtctldata.IWorkflowStatusRequest} message WorkflowStatusRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ValidateVersionKeyspaceRequest.encode = function encode(message, writer) { + WorkflowStatusRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.workflow != null && Object.hasOwnProperty.call(message, "workflow")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.workflow); return writer; }; /** - * Encodes the specified ValidateVersionKeyspaceRequest message, length delimited. Does not implicitly {@link vtctldata.ValidateVersionKeyspaceRequest.verify|verify} messages. + * Encodes the specified WorkflowStatusRequest message, length delimited. Does not implicitly {@link vtctldata.WorkflowStatusRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ValidateVersionKeyspaceRequest + * @memberof vtctldata.WorkflowStatusRequest * @static - * @param {vtctldata.IValidateVersionKeyspaceRequest} message ValidateVersionKeyspaceRequest message or plain object to encode + * @param {vtctldata.IWorkflowStatusRequest} message WorkflowStatusRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ValidateVersionKeyspaceRequest.encodeDelimited = function encodeDelimited(message, writer) { + WorkflowStatusRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ValidateVersionKeyspaceRequest message from the specified reader or buffer. + * Decodes a WorkflowStatusRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ValidateVersionKeyspaceRequest + * @memberof vtctldata.WorkflowStatusRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ValidateVersionKeyspaceRequest} ValidateVersionKeyspaceRequest + * @returns {vtctldata.WorkflowStatusRequest} WorkflowStatusRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ValidateVersionKeyspaceRequest.decode = function decode(reader, length) { + WorkflowStatusRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ValidateVersionKeyspaceRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.WorkflowStatusRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -140570,6 +163970,10 @@ export const vtctldata = $root.vtctldata = (() => { message.keyspace = reader.string(); break; } + case 2: { + message.workflow = reader.string(); + break; + } default: reader.skipType(tag & 7); break; @@ -140579,125 +163983,135 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a ValidateVersionKeyspaceRequest message from the specified reader or buffer, length delimited. + * Decodes a WorkflowStatusRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ValidateVersionKeyspaceRequest + * @memberof vtctldata.WorkflowStatusRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ValidateVersionKeyspaceRequest} ValidateVersionKeyspaceRequest + * @returns {vtctldata.WorkflowStatusRequest} WorkflowStatusRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ValidateVersionKeyspaceRequest.decodeDelimited = function decodeDelimited(reader) { + WorkflowStatusRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ValidateVersionKeyspaceRequest message. + * Verifies a WorkflowStatusRequest message. * @function verify - * @memberof vtctldata.ValidateVersionKeyspaceRequest + * @memberof vtctldata.WorkflowStatusRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ValidateVersionKeyspaceRequest.verify = function verify(message) { + WorkflowStatusRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.keyspace != null && message.hasOwnProperty("keyspace")) if (!$util.isString(message.keyspace)) return "keyspace: string expected"; + if (message.workflow != null && message.hasOwnProperty("workflow")) + if (!$util.isString(message.workflow)) + return "workflow: string expected"; return null; }; /** - * Creates a ValidateVersionKeyspaceRequest message from a plain object. Also converts values to their respective internal types. + * Creates a WorkflowStatusRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ValidateVersionKeyspaceRequest + * @memberof vtctldata.WorkflowStatusRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.ValidateVersionKeyspaceRequest} ValidateVersionKeyspaceRequest + * @returns {vtctldata.WorkflowStatusRequest} WorkflowStatusRequest */ - ValidateVersionKeyspaceRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ValidateVersionKeyspaceRequest) + WorkflowStatusRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.WorkflowStatusRequest) return object; - let message = new $root.vtctldata.ValidateVersionKeyspaceRequest(); + let message = new $root.vtctldata.WorkflowStatusRequest(); if (object.keyspace != null) message.keyspace = String(object.keyspace); + if (object.workflow != null) + message.workflow = String(object.workflow); return message; }; /** - * Creates a plain object from a ValidateVersionKeyspaceRequest message. Also converts values to other types if specified. + * Creates a plain object from a WorkflowStatusRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ValidateVersionKeyspaceRequest + * @memberof vtctldata.WorkflowStatusRequest * @static - * @param {vtctldata.ValidateVersionKeyspaceRequest} message ValidateVersionKeyspaceRequest + * @param {vtctldata.WorkflowStatusRequest} message WorkflowStatusRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ValidateVersionKeyspaceRequest.toObject = function toObject(message, options) { + WorkflowStatusRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.defaults) + if (options.defaults) { object.keyspace = ""; + object.workflow = ""; + } if (message.keyspace != null && message.hasOwnProperty("keyspace")) object.keyspace = message.keyspace; + if (message.workflow != null && message.hasOwnProperty("workflow")) + object.workflow = message.workflow; return object; }; /** - * Converts this ValidateVersionKeyspaceRequest to JSON. + * Converts this WorkflowStatusRequest to JSON. * @function toJSON - * @memberof vtctldata.ValidateVersionKeyspaceRequest + * @memberof vtctldata.WorkflowStatusRequest * @instance * @returns {Object.} JSON object */ - ValidateVersionKeyspaceRequest.prototype.toJSON = function toJSON() { + WorkflowStatusRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ValidateVersionKeyspaceRequest + * Gets the default type url for WorkflowStatusRequest * @function getTypeUrl - * @memberof vtctldata.ValidateVersionKeyspaceRequest + * @memberof vtctldata.WorkflowStatusRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ValidateVersionKeyspaceRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + WorkflowStatusRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ValidateVersionKeyspaceRequest"; + return typeUrlPrefix + "/vtctldata.WorkflowStatusRequest"; }; - return ValidateVersionKeyspaceRequest; + return WorkflowStatusRequest; })(); - vtctldata.ValidateVersionKeyspaceResponse = (function() { + vtctldata.WorkflowStatusResponse = (function() { /** - * Properties of a ValidateVersionKeyspaceResponse. + * Properties of a WorkflowStatusResponse. * @memberof vtctldata - * @interface IValidateVersionKeyspaceResponse - * @property {Array.|null} [results] ValidateVersionKeyspaceResponse results - * @property {Object.|null} [results_by_shard] ValidateVersionKeyspaceResponse results_by_shard + * @interface IWorkflowStatusResponse + * @property {Object.|null} [table_copy_state] WorkflowStatusResponse table_copy_state + * @property {Object.|null} [shard_streams] WorkflowStatusResponse shard_streams + * @property {string|null} [traffic_state] WorkflowStatusResponse traffic_state */ /** - * Constructs a new ValidateVersionKeyspaceResponse. + * Constructs a new WorkflowStatusResponse. * @memberof vtctldata - * @classdesc Represents a ValidateVersionKeyspaceResponse. - * @implements IValidateVersionKeyspaceResponse + * @classdesc Represents a WorkflowStatusResponse. + * @implements IWorkflowStatusResponse * @constructor - * @param {vtctldata.IValidateVersionKeyspaceResponse=} [properties] Properties to set - */ - function ValidateVersionKeyspaceResponse(properties) { - this.results = []; - this.results_by_shard = {}; + * @param {vtctldata.IWorkflowStatusResponse=} [properties] Properties to set + */ + function WorkflowStatusResponse(properties) { + this.table_copy_state = {}; + this.shard_streams = {}; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -140705,96 +164119,125 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ValidateVersionKeyspaceResponse results. - * @member {Array.} results - * @memberof vtctldata.ValidateVersionKeyspaceResponse + * WorkflowStatusResponse table_copy_state. + * @member {Object.} table_copy_state + * @memberof vtctldata.WorkflowStatusResponse * @instance */ - ValidateVersionKeyspaceResponse.prototype.results = $util.emptyArray; + WorkflowStatusResponse.prototype.table_copy_state = $util.emptyObject; /** - * ValidateVersionKeyspaceResponse results_by_shard. - * @member {Object.} results_by_shard - * @memberof vtctldata.ValidateVersionKeyspaceResponse + * WorkflowStatusResponse shard_streams. + * @member {Object.} shard_streams + * @memberof vtctldata.WorkflowStatusResponse * @instance */ - ValidateVersionKeyspaceResponse.prototype.results_by_shard = $util.emptyObject; + WorkflowStatusResponse.prototype.shard_streams = $util.emptyObject; /** - * Creates a new ValidateVersionKeyspaceResponse instance using the specified properties. + * WorkflowStatusResponse traffic_state. + * @member {string} traffic_state + * @memberof vtctldata.WorkflowStatusResponse + * @instance + */ + WorkflowStatusResponse.prototype.traffic_state = ""; + + /** + * Creates a new WorkflowStatusResponse instance using the specified properties. * @function create - * @memberof vtctldata.ValidateVersionKeyspaceResponse + * @memberof vtctldata.WorkflowStatusResponse * @static - * @param {vtctldata.IValidateVersionKeyspaceResponse=} [properties] Properties to set - * @returns {vtctldata.ValidateVersionKeyspaceResponse} ValidateVersionKeyspaceResponse instance + * @param {vtctldata.IWorkflowStatusResponse=} [properties] Properties to set + * @returns {vtctldata.WorkflowStatusResponse} WorkflowStatusResponse instance */ - ValidateVersionKeyspaceResponse.create = function create(properties) { - return new ValidateVersionKeyspaceResponse(properties); + WorkflowStatusResponse.create = function create(properties) { + return new WorkflowStatusResponse(properties); }; /** - * Encodes the specified ValidateVersionKeyspaceResponse message. Does not implicitly {@link vtctldata.ValidateVersionKeyspaceResponse.verify|verify} messages. + * Encodes the specified WorkflowStatusResponse message. Does not implicitly {@link vtctldata.WorkflowStatusResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.ValidateVersionKeyspaceResponse + * @memberof vtctldata.WorkflowStatusResponse * @static - * @param {vtctldata.IValidateVersionKeyspaceResponse} message ValidateVersionKeyspaceResponse message or plain object to encode + * @param {vtctldata.IWorkflowStatusResponse} message WorkflowStatusResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ValidateVersionKeyspaceResponse.encode = function encode(message, writer) { + WorkflowStatusResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.results != null && message.results.length) - for (let i = 0; i < message.results.length; ++i) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.results[i]); - if (message.results_by_shard != null && Object.hasOwnProperty.call(message, "results_by_shard")) - for (let keys = Object.keys(message.results_by_shard), i = 0; i < keys.length; ++i) { + if (message.table_copy_state != null && Object.hasOwnProperty.call(message, "table_copy_state")) + for (let keys = Object.keys(message.table_copy_state), i = 0; i < keys.length; ++i) { + writer.uint32(/* id 1, wireType 2 =*/10).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); + $root.vtctldata.WorkflowStatusResponse.TableCopyState.encode(message.table_copy_state[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); + } + if (message.shard_streams != null && Object.hasOwnProperty.call(message, "shard_streams")) + for (let keys = Object.keys(message.shard_streams), i = 0; i < keys.length; ++i) { writer.uint32(/* id 2, wireType 2 =*/18).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); - $root.vtctldata.ValidateShardResponse.encode(message.results_by_shard[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); + $root.vtctldata.WorkflowStatusResponse.ShardStreams.encode(message.shard_streams[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); } + if (message.traffic_state != null && Object.hasOwnProperty.call(message, "traffic_state")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.traffic_state); return writer; }; /** - * Encodes the specified ValidateVersionKeyspaceResponse message, length delimited. Does not implicitly {@link vtctldata.ValidateVersionKeyspaceResponse.verify|verify} messages. + * Encodes the specified WorkflowStatusResponse message, length delimited. Does not implicitly {@link vtctldata.WorkflowStatusResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ValidateVersionKeyspaceResponse + * @memberof vtctldata.WorkflowStatusResponse * @static - * @param {vtctldata.IValidateVersionKeyspaceResponse} message ValidateVersionKeyspaceResponse message or plain object to encode + * @param {vtctldata.IWorkflowStatusResponse} message WorkflowStatusResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ValidateVersionKeyspaceResponse.encodeDelimited = function encodeDelimited(message, writer) { + WorkflowStatusResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ValidateVersionKeyspaceResponse message from the specified reader or buffer. + * Decodes a WorkflowStatusResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ValidateVersionKeyspaceResponse + * @memberof vtctldata.WorkflowStatusResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ValidateVersionKeyspaceResponse} ValidateVersionKeyspaceResponse + * @returns {vtctldata.WorkflowStatusResponse} WorkflowStatusResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ValidateVersionKeyspaceResponse.decode = function decode(reader, length) { + WorkflowStatusResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ValidateVersionKeyspaceResponse(), key, value; + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.WorkflowStatusResponse(), key, value; while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (!(message.results && message.results.length)) - message.results = []; - message.results.push(reader.string()); + if (message.table_copy_state === $util.emptyObject) + message.table_copy_state = {}; + let end2 = reader.uint32() + reader.pos; + key = ""; + value = null; + while (reader.pos < end2) { + let tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = $root.vtctldata.WorkflowStatusResponse.TableCopyState.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.table_copy_state[key] = value; break; } case 2: { - if (message.results_by_shard === $util.emptyObject) - message.results_by_shard = {}; + if (message.shard_streams === $util.emptyObject) + message.shard_streams = {}; let end2 = reader.uint32() + reader.pos; key = ""; value = null; @@ -140805,14 +164248,18 @@ export const vtctldata = $root.vtctldata = (() => { key = reader.string(); break; case 2: - value = $root.vtctldata.ValidateShardResponse.decode(reader, reader.uint32()); + value = $root.vtctldata.WorkflowStatusResponse.ShardStreams.decode(reader, reader.uint32()); break; default: reader.skipType(tag2 & 7); break; } } - message.results_by_shard[key] = value; + message.shard_streams[key] = value; + break; + } + case 3: { + message.traffic_state = reader.string(); break; } default: @@ -140824,613 +164271,1111 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a ValidateVersionKeyspaceResponse message from the specified reader or buffer, length delimited. + * Decodes a WorkflowStatusResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ValidateVersionKeyspaceResponse + * @memberof vtctldata.WorkflowStatusResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ValidateVersionKeyspaceResponse} ValidateVersionKeyspaceResponse + * @returns {vtctldata.WorkflowStatusResponse} WorkflowStatusResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ValidateVersionKeyspaceResponse.decodeDelimited = function decodeDelimited(reader) { + WorkflowStatusResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ValidateVersionKeyspaceResponse message. + * Verifies a WorkflowStatusResponse message. * @function verify - * @memberof vtctldata.ValidateVersionKeyspaceResponse + * @memberof vtctldata.WorkflowStatusResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ValidateVersionKeyspaceResponse.verify = function verify(message) { + WorkflowStatusResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.results != null && message.hasOwnProperty("results")) { - if (!Array.isArray(message.results)) - return "results: array expected"; - for (let i = 0; i < message.results.length; ++i) - if (!$util.isString(message.results[i])) - return "results: string[] expected"; + if (message.table_copy_state != null && message.hasOwnProperty("table_copy_state")) { + if (!$util.isObject(message.table_copy_state)) + return "table_copy_state: object expected"; + let key = Object.keys(message.table_copy_state); + for (let i = 0; i < key.length; ++i) { + let error = $root.vtctldata.WorkflowStatusResponse.TableCopyState.verify(message.table_copy_state[key[i]]); + if (error) + return "table_copy_state." + error; + } } - if (message.results_by_shard != null && message.hasOwnProperty("results_by_shard")) { - if (!$util.isObject(message.results_by_shard)) - return "results_by_shard: object expected"; - let key = Object.keys(message.results_by_shard); + if (message.shard_streams != null && message.hasOwnProperty("shard_streams")) { + if (!$util.isObject(message.shard_streams)) + return "shard_streams: object expected"; + let key = Object.keys(message.shard_streams); for (let i = 0; i < key.length; ++i) { - let error = $root.vtctldata.ValidateShardResponse.verify(message.results_by_shard[key[i]]); + let error = $root.vtctldata.WorkflowStatusResponse.ShardStreams.verify(message.shard_streams[key[i]]); if (error) - return "results_by_shard." + error; + return "shard_streams." + error; } } + if (message.traffic_state != null && message.hasOwnProperty("traffic_state")) + if (!$util.isString(message.traffic_state)) + return "traffic_state: string expected"; return null; }; /** - * Creates a ValidateVersionKeyspaceResponse message from a plain object. Also converts values to their respective internal types. + * Creates a WorkflowStatusResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ValidateVersionKeyspaceResponse + * @memberof vtctldata.WorkflowStatusResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.ValidateVersionKeyspaceResponse} ValidateVersionKeyspaceResponse + * @returns {vtctldata.WorkflowStatusResponse} WorkflowStatusResponse */ - ValidateVersionKeyspaceResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ValidateVersionKeyspaceResponse) + WorkflowStatusResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.WorkflowStatusResponse) return object; - let message = new $root.vtctldata.ValidateVersionKeyspaceResponse(); - if (object.results) { - if (!Array.isArray(object.results)) - throw TypeError(".vtctldata.ValidateVersionKeyspaceResponse.results: array expected"); - message.results = []; - for (let i = 0; i < object.results.length; ++i) - message.results[i] = String(object.results[i]); + let message = new $root.vtctldata.WorkflowStatusResponse(); + if (object.table_copy_state) { + if (typeof object.table_copy_state !== "object") + throw TypeError(".vtctldata.WorkflowStatusResponse.table_copy_state: object expected"); + message.table_copy_state = {}; + for (let keys = Object.keys(object.table_copy_state), i = 0; i < keys.length; ++i) { + if (typeof object.table_copy_state[keys[i]] !== "object") + throw TypeError(".vtctldata.WorkflowStatusResponse.table_copy_state: object expected"); + message.table_copy_state[keys[i]] = $root.vtctldata.WorkflowStatusResponse.TableCopyState.fromObject(object.table_copy_state[keys[i]]); + } } - if (object.results_by_shard) { - if (typeof object.results_by_shard !== "object") - throw TypeError(".vtctldata.ValidateVersionKeyspaceResponse.results_by_shard: object expected"); - message.results_by_shard = {}; - for (let keys = Object.keys(object.results_by_shard), i = 0; i < keys.length; ++i) { - if (typeof object.results_by_shard[keys[i]] !== "object") - throw TypeError(".vtctldata.ValidateVersionKeyspaceResponse.results_by_shard: object expected"); - message.results_by_shard[keys[i]] = $root.vtctldata.ValidateShardResponse.fromObject(object.results_by_shard[keys[i]]); + if (object.shard_streams) { + if (typeof object.shard_streams !== "object") + throw TypeError(".vtctldata.WorkflowStatusResponse.shard_streams: object expected"); + message.shard_streams = {}; + for (let keys = Object.keys(object.shard_streams), i = 0; i < keys.length; ++i) { + if (typeof object.shard_streams[keys[i]] !== "object") + throw TypeError(".vtctldata.WorkflowStatusResponse.shard_streams: object expected"); + message.shard_streams[keys[i]] = $root.vtctldata.WorkflowStatusResponse.ShardStreams.fromObject(object.shard_streams[keys[i]]); } } + if (object.traffic_state != null) + message.traffic_state = String(object.traffic_state); return message; }; /** - * Creates a plain object from a ValidateVersionKeyspaceResponse message. Also converts values to other types if specified. + * Creates a plain object from a WorkflowStatusResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ValidateVersionKeyspaceResponse + * @memberof vtctldata.WorkflowStatusResponse * @static - * @param {vtctldata.ValidateVersionKeyspaceResponse} message ValidateVersionKeyspaceResponse + * @param {vtctldata.WorkflowStatusResponse} message WorkflowStatusResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ValidateVersionKeyspaceResponse.toObject = function toObject(message, options) { + WorkflowStatusResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.results = []; - if (options.objects || options.defaults) - object.results_by_shard = {}; - if (message.results && message.results.length) { - object.results = []; - for (let j = 0; j < message.results.length; ++j) - object.results[j] = message.results[j]; + if (options.objects || options.defaults) { + object.table_copy_state = {}; + object.shard_streams = {}; } + if (options.defaults) + object.traffic_state = ""; let keys2; - if (message.results_by_shard && (keys2 = Object.keys(message.results_by_shard)).length) { - object.results_by_shard = {}; + if (message.table_copy_state && (keys2 = Object.keys(message.table_copy_state)).length) { + object.table_copy_state = {}; for (let j = 0; j < keys2.length; ++j) - object.results_by_shard[keys2[j]] = $root.vtctldata.ValidateShardResponse.toObject(message.results_by_shard[keys2[j]], options); + object.table_copy_state[keys2[j]] = $root.vtctldata.WorkflowStatusResponse.TableCopyState.toObject(message.table_copy_state[keys2[j]], options); + } + if (message.shard_streams && (keys2 = Object.keys(message.shard_streams)).length) { + object.shard_streams = {}; + for (let j = 0; j < keys2.length; ++j) + object.shard_streams[keys2[j]] = $root.vtctldata.WorkflowStatusResponse.ShardStreams.toObject(message.shard_streams[keys2[j]], options); } + if (message.traffic_state != null && message.hasOwnProperty("traffic_state")) + object.traffic_state = message.traffic_state; return object; }; /** - * Converts this ValidateVersionKeyspaceResponse to JSON. + * Converts this WorkflowStatusResponse to JSON. * @function toJSON - * @memberof vtctldata.ValidateVersionKeyspaceResponse + * @memberof vtctldata.WorkflowStatusResponse * @instance * @returns {Object.} JSON object */ - ValidateVersionKeyspaceResponse.prototype.toJSON = function toJSON() { + WorkflowStatusResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ValidateVersionKeyspaceResponse + * Gets the default type url for WorkflowStatusResponse * @function getTypeUrl - * @memberof vtctldata.ValidateVersionKeyspaceResponse + * @memberof vtctldata.WorkflowStatusResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ValidateVersionKeyspaceResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + WorkflowStatusResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ValidateVersionKeyspaceResponse"; + return typeUrlPrefix + "/vtctldata.WorkflowStatusResponse"; }; - return ValidateVersionKeyspaceResponse; - })(); + WorkflowStatusResponse.TableCopyState = (function() { - vtctldata.ValidateVersionShardRequest = (function() { + /** + * Properties of a TableCopyState. + * @memberof vtctldata.WorkflowStatusResponse + * @interface ITableCopyState + * @property {number|Long|null} [rows_copied] TableCopyState rows_copied + * @property {number|Long|null} [rows_total] TableCopyState rows_total + * @property {number|null} [rows_percentage] TableCopyState rows_percentage + * @property {number|Long|null} [bytes_copied] TableCopyState bytes_copied + * @property {number|Long|null} [bytes_total] TableCopyState bytes_total + * @property {number|null} [bytes_percentage] TableCopyState bytes_percentage + */ - /** - * Properties of a ValidateVersionShardRequest. - * @memberof vtctldata - * @interface IValidateVersionShardRequest - * @property {string|null} [keyspace] ValidateVersionShardRequest keyspace - * @property {string|null} [shard] ValidateVersionShardRequest shard - */ + /** + * Constructs a new TableCopyState. + * @memberof vtctldata.WorkflowStatusResponse + * @classdesc Represents a TableCopyState. + * @implements ITableCopyState + * @constructor + * @param {vtctldata.WorkflowStatusResponse.ITableCopyState=} [properties] Properties to set + */ + function TableCopyState(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } - /** - * Constructs a new ValidateVersionShardRequest. - * @memberof vtctldata - * @classdesc Represents a ValidateVersionShardRequest. - * @implements IValidateVersionShardRequest - * @constructor - * @param {vtctldata.IValidateVersionShardRequest=} [properties] Properties to set - */ - function ValidateVersionShardRequest(properties) { - if (properties) - for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; - } + /** + * TableCopyState rows_copied. + * @member {number|Long} rows_copied + * @memberof vtctldata.WorkflowStatusResponse.TableCopyState + * @instance + */ + TableCopyState.prototype.rows_copied = $util.Long ? $util.Long.fromBits(0,0,false) : 0; - /** - * ValidateVersionShardRequest keyspace. - * @member {string} keyspace - * @memberof vtctldata.ValidateVersionShardRequest - * @instance - */ - ValidateVersionShardRequest.prototype.keyspace = ""; + /** + * TableCopyState rows_total. + * @member {number|Long} rows_total + * @memberof vtctldata.WorkflowStatusResponse.TableCopyState + * @instance + */ + TableCopyState.prototype.rows_total = $util.Long ? $util.Long.fromBits(0,0,false) : 0; - /** - * ValidateVersionShardRequest shard. - * @member {string} shard - * @memberof vtctldata.ValidateVersionShardRequest - * @instance - */ - ValidateVersionShardRequest.prototype.shard = ""; + /** + * TableCopyState rows_percentage. + * @member {number} rows_percentage + * @memberof vtctldata.WorkflowStatusResponse.TableCopyState + * @instance + */ + TableCopyState.prototype.rows_percentage = 0; - /** - * Creates a new ValidateVersionShardRequest instance using the specified properties. - * @function create - * @memberof vtctldata.ValidateVersionShardRequest - * @static - * @param {vtctldata.IValidateVersionShardRequest=} [properties] Properties to set - * @returns {vtctldata.ValidateVersionShardRequest} ValidateVersionShardRequest instance - */ - ValidateVersionShardRequest.create = function create(properties) { - return new ValidateVersionShardRequest(properties); - }; + /** + * TableCopyState bytes_copied. + * @member {number|Long} bytes_copied + * @memberof vtctldata.WorkflowStatusResponse.TableCopyState + * @instance + */ + TableCopyState.prototype.bytes_copied = $util.Long ? $util.Long.fromBits(0,0,false) : 0; - /** - * Encodes the specified ValidateVersionShardRequest message. Does not implicitly {@link vtctldata.ValidateVersionShardRequest.verify|verify} messages. - * @function encode - * @memberof vtctldata.ValidateVersionShardRequest - * @static - * @param {vtctldata.IValidateVersionShardRequest} message ValidateVersionShardRequest message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - ValidateVersionShardRequest.encode = function encode(message, writer) { - if (!writer) - writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); - return writer; - }; + /** + * TableCopyState bytes_total. + * @member {number|Long} bytes_total + * @memberof vtctldata.WorkflowStatusResponse.TableCopyState + * @instance + */ + TableCopyState.prototype.bytes_total = $util.Long ? $util.Long.fromBits(0,0,false) : 0; - /** - * Encodes the specified ValidateVersionShardRequest message, length delimited. Does not implicitly {@link vtctldata.ValidateVersionShardRequest.verify|verify} messages. - * @function encodeDelimited - * @memberof vtctldata.ValidateVersionShardRequest - * @static - * @param {vtctldata.IValidateVersionShardRequest} message ValidateVersionShardRequest message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - ValidateVersionShardRequest.encodeDelimited = function encodeDelimited(message, writer) { - return this.encode(message, writer).ldelim(); - }; + /** + * TableCopyState bytes_percentage. + * @member {number} bytes_percentage + * @memberof vtctldata.WorkflowStatusResponse.TableCopyState + * @instance + */ + TableCopyState.prototype.bytes_percentage = 0; - /** - * Decodes a ValidateVersionShardRequest message from the specified reader or buffer. - * @function decode - * @memberof vtctldata.ValidateVersionShardRequest - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ValidateVersionShardRequest} ValidateVersionShardRequest - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - ValidateVersionShardRequest.decode = function decode(reader, length) { - if (!(reader instanceof $Reader)) - reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ValidateVersionShardRequest(); - while (reader.pos < end) { - let tag = reader.uint32(); - switch (tag >>> 3) { - case 1: { - message.keyspace = reader.string(); + /** + * Creates a new TableCopyState instance using the specified properties. + * @function create + * @memberof vtctldata.WorkflowStatusResponse.TableCopyState + * @static + * @param {vtctldata.WorkflowStatusResponse.ITableCopyState=} [properties] Properties to set + * @returns {vtctldata.WorkflowStatusResponse.TableCopyState} TableCopyState instance + */ + TableCopyState.create = function create(properties) { + return new TableCopyState(properties); + }; + + /** + * Encodes the specified TableCopyState message. Does not implicitly {@link vtctldata.WorkflowStatusResponse.TableCopyState.verify|verify} messages. + * @function encode + * @memberof vtctldata.WorkflowStatusResponse.TableCopyState + * @static + * @param {vtctldata.WorkflowStatusResponse.ITableCopyState} message TableCopyState message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + TableCopyState.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.rows_copied != null && Object.hasOwnProperty.call(message, "rows_copied")) + writer.uint32(/* id 1, wireType 0 =*/8).int64(message.rows_copied); + if (message.rows_total != null && Object.hasOwnProperty.call(message, "rows_total")) + writer.uint32(/* id 2, wireType 0 =*/16).int64(message.rows_total); + if (message.rows_percentage != null && Object.hasOwnProperty.call(message, "rows_percentage")) + writer.uint32(/* id 3, wireType 5 =*/29).float(message.rows_percentage); + if (message.bytes_copied != null && Object.hasOwnProperty.call(message, "bytes_copied")) + writer.uint32(/* id 4, wireType 0 =*/32).int64(message.bytes_copied); + if (message.bytes_total != null && Object.hasOwnProperty.call(message, "bytes_total")) + writer.uint32(/* id 5, wireType 0 =*/40).int64(message.bytes_total); + if (message.bytes_percentage != null && Object.hasOwnProperty.call(message, "bytes_percentage")) + writer.uint32(/* id 6, wireType 5 =*/53).float(message.bytes_percentage); + return writer; + }; + + /** + * Encodes the specified TableCopyState message, length delimited. Does not implicitly {@link vtctldata.WorkflowStatusResponse.TableCopyState.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.WorkflowStatusResponse.TableCopyState + * @static + * @param {vtctldata.WorkflowStatusResponse.ITableCopyState} message TableCopyState message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + TableCopyState.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a TableCopyState message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.WorkflowStatusResponse.TableCopyState + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.WorkflowStatusResponse.TableCopyState} TableCopyState + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + TableCopyState.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.WorkflowStatusResponse.TableCopyState(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.rows_copied = reader.int64(); + break; + } + case 2: { + message.rows_total = reader.int64(); + break; + } + case 3: { + message.rows_percentage = reader.float(); + break; + } + case 4: { + message.bytes_copied = reader.int64(); + break; + } + case 5: { + message.bytes_total = reader.int64(); + break; + } + case 6: { + message.bytes_percentage = reader.float(); + break; + } + default: + reader.skipType(tag & 7); break; } - case 2: { - message.shard = reader.string(); + } + return message; + }; + + /** + * Decodes a TableCopyState message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.WorkflowStatusResponse.TableCopyState + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.WorkflowStatusResponse.TableCopyState} TableCopyState + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + TableCopyState.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a TableCopyState message. + * @function verify + * @memberof vtctldata.WorkflowStatusResponse.TableCopyState + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + TableCopyState.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.rows_copied != null && message.hasOwnProperty("rows_copied")) + if (!$util.isInteger(message.rows_copied) && !(message.rows_copied && $util.isInteger(message.rows_copied.low) && $util.isInteger(message.rows_copied.high))) + return "rows_copied: integer|Long expected"; + if (message.rows_total != null && message.hasOwnProperty("rows_total")) + if (!$util.isInteger(message.rows_total) && !(message.rows_total && $util.isInteger(message.rows_total.low) && $util.isInteger(message.rows_total.high))) + return "rows_total: integer|Long expected"; + if (message.rows_percentage != null && message.hasOwnProperty("rows_percentage")) + if (typeof message.rows_percentage !== "number") + return "rows_percentage: number expected"; + if (message.bytes_copied != null && message.hasOwnProperty("bytes_copied")) + if (!$util.isInteger(message.bytes_copied) && !(message.bytes_copied && $util.isInteger(message.bytes_copied.low) && $util.isInteger(message.bytes_copied.high))) + return "bytes_copied: integer|Long expected"; + if (message.bytes_total != null && message.hasOwnProperty("bytes_total")) + if (!$util.isInteger(message.bytes_total) && !(message.bytes_total && $util.isInteger(message.bytes_total.low) && $util.isInteger(message.bytes_total.high))) + return "bytes_total: integer|Long expected"; + if (message.bytes_percentage != null && message.hasOwnProperty("bytes_percentage")) + if (typeof message.bytes_percentage !== "number") + return "bytes_percentage: number expected"; + return null; + }; + + /** + * Creates a TableCopyState message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.WorkflowStatusResponse.TableCopyState + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.WorkflowStatusResponse.TableCopyState} TableCopyState + */ + TableCopyState.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.WorkflowStatusResponse.TableCopyState) + return object; + let message = new $root.vtctldata.WorkflowStatusResponse.TableCopyState(); + if (object.rows_copied != null) + if ($util.Long) + (message.rows_copied = $util.Long.fromValue(object.rows_copied)).unsigned = false; + else if (typeof object.rows_copied === "string") + message.rows_copied = parseInt(object.rows_copied, 10); + else if (typeof object.rows_copied === "number") + message.rows_copied = object.rows_copied; + else if (typeof object.rows_copied === "object") + message.rows_copied = new $util.LongBits(object.rows_copied.low >>> 0, object.rows_copied.high >>> 0).toNumber(); + if (object.rows_total != null) + if ($util.Long) + (message.rows_total = $util.Long.fromValue(object.rows_total)).unsigned = false; + else if (typeof object.rows_total === "string") + message.rows_total = parseInt(object.rows_total, 10); + else if (typeof object.rows_total === "number") + message.rows_total = object.rows_total; + else if (typeof object.rows_total === "object") + message.rows_total = new $util.LongBits(object.rows_total.low >>> 0, object.rows_total.high >>> 0).toNumber(); + if (object.rows_percentage != null) + message.rows_percentage = Number(object.rows_percentage); + if (object.bytes_copied != null) + if ($util.Long) + (message.bytes_copied = $util.Long.fromValue(object.bytes_copied)).unsigned = false; + else if (typeof object.bytes_copied === "string") + message.bytes_copied = parseInt(object.bytes_copied, 10); + else if (typeof object.bytes_copied === "number") + message.bytes_copied = object.bytes_copied; + else if (typeof object.bytes_copied === "object") + message.bytes_copied = new $util.LongBits(object.bytes_copied.low >>> 0, object.bytes_copied.high >>> 0).toNumber(); + if (object.bytes_total != null) + if ($util.Long) + (message.bytes_total = $util.Long.fromValue(object.bytes_total)).unsigned = false; + else if (typeof object.bytes_total === "string") + message.bytes_total = parseInt(object.bytes_total, 10); + else if (typeof object.bytes_total === "number") + message.bytes_total = object.bytes_total; + else if (typeof object.bytes_total === "object") + message.bytes_total = new $util.LongBits(object.bytes_total.low >>> 0, object.bytes_total.high >>> 0).toNumber(); + if (object.bytes_percentage != null) + message.bytes_percentage = Number(object.bytes_percentage); + return message; + }; + + /** + * Creates a plain object from a TableCopyState message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.WorkflowStatusResponse.TableCopyState + * @static + * @param {vtctldata.WorkflowStatusResponse.TableCopyState} message TableCopyState + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + TableCopyState.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.rows_copied = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.rows_copied = options.longs === String ? "0" : 0; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.rows_total = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.rows_total = options.longs === String ? "0" : 0; + object.rows_percentage = 0; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.bytes_copied = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.bytes_copied = options.longs === String ? "0" : 0; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.bytes_total = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.bytes_total = options.longs === String ? "0" : 0; + object.bytes_percentage = 0; + } + if (message.rows_copied != null && message.hasOwnProperty("rows_copied")) + if (typeof message.rows_copied === "number") + object.rows_copied = options.longs === String ? String(message.rows_copied) : message.rows_copied; + else + object.rows_copied = options.longs === String ? $util.Long.prototype.toString.call(message.rows_copied) : options.longs === Number ? new $util.LongBits(message.rows_copied.low >>> 0, message.rows_copied.high >>> 0).toNumber() : message.rows_copied; + if (message.rows_total != null && message.hasOwnProperty("rows_total")) + if (typeof message.rows_total === "number") + object.rows_total = options.longs === String ? String(message.rows_total) : message.rows_total; + else + object.rows_total = options.longs === String ? $util.Long.prototype.toString.call(message.rows_total) : options.longs === Number ? new $util.LongBits(message.rows_total.low >>> 0, message.rows_total.high >>> 0).toNumber() : message.rows_total; + if (message.rows_percentage != null && message.hasOwnProperty("rows_percentage")) + object.rows_percentage = options.json && !isFinite(message.rows_percentage) ? String(message.rows_percentage) : message.rows_percentage; + if (message.bytes_copied != null && message.hasOwnProperty("bytes_copied")) + if (typeof message.bytes_copied === "number") + object.bytes_copied = options.longs === String ? String(message.bytes_copied) : message.bytes_copied; + else + object.bytes_copied = options.longs === String ? $util.Long.prototype.toString.call(message.bytes_copied) : options.longs === Number ? new $util.LongBits(message.bytes_copied.low >>> 0, message.bytes_copied.high >>> 0).toNumber() : message.bytes_copied; + if (message.bytes_total != null && message.hasOwnProperty("bytes_total")) + if (typeof message.bytes_total === "number") + object.bytes_total = options.longs === String ? String(message.bytes_total) : message.bytes_total; + else + object.bytes_total = options.longs === String ? $util.Long.prototype.toString.call(message.bytes_total) : options.longs === Number ? new $util.LongBits(message.bytes_total.low >>> 0, message.bytes_total.high >>> 0).toNumber() : message.bytes_total; + if (message.bytes_percentage != null && message.hasOwnProperty("bytes_percentage")) + object.bytes_percentage = options.json && !isFinite(message.bytes_percentage) ? String(message.bytes_percentage) : message.bytes_percentage; + return object; + }; + + /** + * Converts this TableCopyState to JSON. + * @function toJSON + * @memberof vtctldata.WorkflowStatusResponse.TableCopyState + * @instance + * @returns {Object.} JSON object + */ + TableCopyState.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for TableCopyState + * @function getTypeUrl + * @memberof vtctldata.WorkflowStatusResponse.TableCopyState + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + TableCopyState.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.WorkflowStatusResponse.TableCopyState"; + }; + + return TableCopyState; + })(); + + WorkflowStatusResponse.ShardStreamState = (function() { + + /** + * Properties of a ShardStreamState. + * @memberof vtctldata.WorkflowStatusResponse + * @interface IShardStreamState + * @property {number|null} [id] ShardStreamState id + * @property {topodata.ITabletAlias|null} [tablet] ShardStreamState tablet + * @property {string|null} [source_shard] ShardStreamState source_shard + * @property {string|null} [position] ShardStreamState position + * @property {string|null} [status] ShardStreamState status + * @property {string|null} [info] ShardStreamState info + */ + + /** + * Constructs a new ShardStreamState. + * @memberof vtctldata.WorkflowStatusResponse + * @classdesc Represents a ShardStreamState. + * @implements IShardStreamState + * @constructor + * @param {vtctldata.WorkflowStatusResponse.IShardStreamState=} [properties] Properties to set + */ + function ShardStreamState(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ShardStreamState id. + * @member {number} id + * @memberof vtctldata.WorkflowStatusResponse.ShardStreamState + * @instance + */ + ShardStreamState.prototype.id = 0; + + /** + * ShardStreamState tablet. + * @member {topodata.ITabletAlias|null|undefined} tablet + * @memberof vtctldata.WorkflowStatusResponse.ShardStreamState + * @instance + */ + ShardStreamState.prototype.tablet = null; + + /** + * ShardStreamState source_shard. + * @member {string} source_shard + * @memberof vtctldata.WorkflowStatusResponse.ShardStreamState + * @instance + */ + ShardStreamState.prototype.source_shard = ""; + + /** + * ShardStreamState position. + * @member {string} position + * @memberof vtctldata.WorkflowStatusResponse.ShardStreamState + * @instance + */ + ShardStreamState.prototype.position = ""; + + /** + * ShardStreamState status. + * @member {string} status + * @memberof vtctldata.WorkflowStatusResponse.ShardStreamState + * @instance + */ + ShardStreamState.prototype.status = ""; + + /** + * ShardStreamState info. + * @member {string} info + * @memberof vtctldata.WorkflowStatusResponse.ShardStreamState + * @instance + */ + ShardStreamState.prototype.info = ""; + + /** + * Creates a new ShardStreamState instance using the specified properties. + * @function create + * @memberof vtctldata.WorkflowStatusResponse.ShardStreamState + * @static + * @param {vtctldata.WorkflowStatusResponse.IShardStreamState=} [properties] Properties to set + * @returns {vtctldata.WorkflowStatusResponse.ShardStreamState} ShardStreamState instance + */ + ShardStreamState.create = function create(properties) { + return new ShardStreamState(properties); + }; + + /** + * Encodes the specified ShardStreamState message. Does not implicitly {@link vtctldata.WorkflowStatusResponse.ShardStreamState.verify|verify} messages. + * @function encode + * @memberof vtctldata.WorkflowStatusResponse.ShardStreamState + * @static + * @param {vtctldata.WorkflowStatusResponse.IShardStreamState} message ShardStreamState message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ShardStreamState.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.id != null && Object.hasOwnProperty.call(message, "id")) + writer.uint32(/* id 1, wireType 0 =*/8).int32(message.id); + if (message.tablet != null && Object.hasOwnProperty.call(message, "tablet")) + $root.topodata.TabletAlias.encode(message.tablet, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + if (message.source_shard != null && Object.hasOwnProperty.call(message, "source_shard")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.source_shard); + if (message.position != null && Object.hasOwnProperty.call(message, "position")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.position); + if (message.status != null && Object.hasOwnProperty.call(message, "status")) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.status); + if (message.info != null && Object.hasOwnProperty.call(message, "info")) + writer.uint32(/* id 6, wireType 2 =*/50).string(message.info); + return writer; + }; + + /** + * Encodes the specified ShardStreamState message, length delimited. Does not implicitly {@link vtctldata.WorkflowStatusResponse.ShardStreamState.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.WorkflowStatusResponse.ShardStreamState + * @static + * @param {vtctldata.WorkflowStatusResponse.IShardStreamState} message ShardStreamState message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ShardStreamState.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a ShardStreamState message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.WorkflowStatusResponse.ShardStreamState + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.WorkflowStatusResponse.ShardStreamState} ShardStreamState + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ShardStreamState.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.WorkflowStatusResponse.ShardStreamState(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.id = reader.int32(); + break; + } + case 2: { + message.tablet = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + break; + } + case 3: { + message.source_shard = reader.string(); + break; + } + case 4: { + message.position = reader.string(); + break; + } + case 5: { + message.status = reader.string(); + break; + } + case 6: { + message.info = reader.string(); + break; + } + default: + reader.skipType(tag & 7); break; } - default: - reader.skipType(tag & 7); - break; } - } - return message; - }; + return message; + }; - /** - * Decodes a ValidateVersionShardRequest message from the specified reader or buffer, length delimited. - * @function decodeDelimited - * @memberof vtctldata.ValidateVersionShardRequest - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ValidateVersionShardRequest} ValidateVersionShardRequest - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - ValidateVersionShardRequest.decodeDelimited = function decodeDelimited(reader) { - if (!(reader instanceof $Reader)) - reader = new $Reader(reader); - return this.decode(reader, reader.uint32()); - }; + /** + * Decodes a ShardStreamState message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.WorkflowStatusResponse.ShardStreamState + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.WorkflowStatusResponse.ShardStreamState} ShardStreamState + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ShardStreamState.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; - /** - * Verifies a ValidateVersionShardRequest message. - * @function verify - * @memberof vtctldata.ValidateVersionShardRequest - * @static - * @param {Object.} message Plain object to verify - * @returns {string|null} `null` if valid, otherwise the reason why it is not - */ - ValidateVersionShardRequest.verify = function verify(message) { - if (typeof message !== "object" || message === null) - return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - if (message.shard != null && message.hasOwnProperty("shard")) - if (!$util.isString(message.shard)) - return "shard: string expected"; - return null; - }; + /** + * Verifies a ShardStreamState message. + * @function verify + * @memberof vtctldata.WorkflowStatusResponse.ShardStreamState + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ShardStreamState.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.id != null && message.hasOwnProperty("id")) + if (!$util.isInteger(message.id)) + return "id: integer expected"; + if (message.tablet != null && message.hasOwnProperty("tablet")) { + let error = $root.topodata.TabletAlias.verify(message.tablet); + if (error) + return "tablet." + error; + } + if (message.source_shard != null && message.hasOwnProperty("source_shard")) + if (!$util.isString(message.source_shard)) + return "source_shard: string expected"; + if (message.position != null && message.hasOwnProperty("position")) + if (!$util.isString(message.position)) + return "position: string expected"; + if (message.status != null && message.hasOwnProperty("status")) + if (!$util.isString(message.status)) + return "status: string expected"; + if (message.info != null && message.hasOwnProperty("info")) + if (!$util.isString(message.info)) + return "info: string expected"; + return null; + }; - /** - * Creates a ValidateVersionShardRequest message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof vtctldata.ValidateVersionShardRequest - * @static - * @param {Object.} object Plain object - * @returns {vtctldata.ValidateVersionShardRequest} ValidateVersionShardRequest - */ - ValidateVersionShardRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ValidateVersionShardRequest) - return object; - let message = new $root.vtctldata.ValidateVersionShardRequest(); - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - if (object.shard != null) - message.shard = String(object.shard); - return message; - }; + /** + * Creates a ShardStreamState message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.WorkflowStatusResponse.ShardStreamState + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.WorkflowStatusResponse.ShardStreamState} ShardStreamState + */ + ShardStreamState.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.WorkflowStatusResponse.ShardStreamState) + return object; + let message = new $root.vtctldata.WorkflowStatusResponse.ShardStreamState(); + if (object.id != null) + message.id = object.id | 0; + if (object.tablet != null) { + if (typeof object.tablet !== "object") + throw TypeError(".vtctldata.WorkflowStatusResponse.ShardStreamState.tablet: object expected"); + message.tablet = $root.topodata.TabletAlias.fromObject(object.tablet); + } + if (object.source_shard != null) + message.source_shard = String(object.source_shard); + if (object.position != null) + message.position = String(object.position); + if (object.status != null) + message.status = String(object.status); + if (object.info != null) + message.info = String(object.info); + return message; + }; - /** - * Creates a plain object from a ValidateVersionShardRequest message. Also converts values to other types if specified. - * @function toObject - * @memberof vtctldata.ValidateVersionShardRequest - * @static - * @param {vtctldata.ValidateVersionShardRequest} message ValidateVersionShardRequest - * @param {$protobuf.IConversionOptions} [options] Conversion options - * @returns {Object.} Plain object - */ - ValidateVersionShardRequest.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) { - object.keyspace = ""; - object.shard = ""; - } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.shard != null && message.hasOwnProperty("shard")) - object.shard = message.shard; - return object; - }; + /** + * Creates a plain object from a ShardStreamState message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.WorkflowStatusResponse.ShardStreamState + * @static + * @param {vtctldata.WorkflowStatusResponse.ShardStreamState} message ShardStreamState + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ShardStreamState.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.id = 0; + object.tablet = null; + object.source_shard = ""; + object.position = ""; + object.status = ""; + object.info = ""; + } + if (message.id != null && message.hasOwnProperty("id")) + object.id = message.id; + if (message.tablet != null && message.hasOwnProperty("tablet")) + object.tablet = $root.topodata.TabletAlias.toObject(message.tablet, options); + if (message.source_shard != null && message.hasOwnProperty("source_shard")) + object.source_shard = message.source_shard; + if (message.position != null && message.hasOwnProperty("position")) + object.position = message.position; + if (message.status != null && message.hasOwnProperty("status")) + object.status = message.status; + if (message.info != null && message.hasOwnProperty("info")) + object.info = message.info; + return object; + }; - /** - * Converts this ValidateVersionShardRequest to JSON. - * @function toJSON - * @memberof vtctldata.ValidateVersionShardRequest - * @instance - * @returns {Object.} JSON object - */ - ValidateVersionShardRequest.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; + /** + * Converts this ShardStreamState to JSON. + * @function toJSON + * @memberof vtctldata.WorkflowStatusResponse.ShardStreamState + * @instance + * @returns {Object.} JSON object + */ + ShardStreamState.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; - /** - * Gets the default type url for ValidateVersionShardRequest - * @function getTypeUrl - * @memberof vtctldata.ValidateVersionShardRequest - * @static - * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns {string} The default type url - */ - ValidateVersionShardRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { - if (typeUrlPrefix === undefined) { - typeUrlPrefix = "type.googleapis.com"; - } - return typeUrlPrefix + "/vtctldata.ValidateVersionShardRequest"; - }; + /** + * Gets the default type url for ShardStreamState + * @function getTypeUrl + * @memberof vtctldata.WorkflowStatusResponse.ShardStreamState + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ShardStreamState.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.WorkflowStatusResponse.ShardStreamState"; + }; - return ValidateVersionShardRequest; - })(); + return ShardStreamState; + })(); - vtctldata.ValidateVersionShardResponse = (function() { + WorkflowStatusResponse.ShardStreams = (function() { - /** - * Properties of a ValidateVersionShardResponse. - * @memberof vtctldata - * @interface IValidateVersionShardResponse - * @property {Array.|null} [results] ValidateVersionShardResponse results - */ + /** + * Properties of a ShardStreams. + * @memberof vtctldata.WorkflowStatusResponse + * @interface IShardStreams + * @property {Array.|null} [streams] ShardStreams streams + */ - /** - * Constructs a new ValidateVersionShardResponse. - * @memberof vtctldata - * @classdesc Represents a ValidateVersionShardResponse. - * @implements IValidateVersionShardResponse - * @constructor - * @param {vtctldata.IValidateVersionShardResponse=} [properties] Properties to set - */ - function ValidateVersionShardResponse(properties) { - this.results = []; - if (properties) - for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; - } + /** + * Constructs a new ShardStreams. + * @memberof vtctldata.WorkflowStatusResponse + * @classdesc Represents a ShardStreams. + * @implements IShardStreams + * @constructor + * @param {vtctldata.WorkflowStatusResponse.IShardStreams=} [properties] Properties to set + */ + function ShardStreams(properties) { + this.streams = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } - /** - * ValidateVersionShardResponse results. - * @member {Array.} results - * @memberof vtctldata.ValidateVersionShardResponse - * @instance - */ - ValidateVersionShardResponse.prototype.results = $util.emptyArray; + /** + * ShardStreams streams. + * @member {Array.} streams + * @memberof vtctldata.WorkflowStatusResponse.ShardStreams + * @instance + */ + ShardStreams.prototype.streams = $util.emptyArray; - /** - * Creates a new ValidateVersionShardResponse instance using the specified properties. - * @function create - * @memberof vtctldata.ValidateVersionShardResponse - * @static - * @param {vtctldata.IValidateVersionShardResponse=} [properties] Properties to set - * @returns {vtctldata.ValidateVersionShardResponse} ValidateVersionShardResponse instance - */ - ValidateVersionShardResponse.create = function create(properties) { - return new ValidateVersionShardResponse(properties); - }; + /** + * Creates a new ShardStreams instance using the specified properties. + * @function create + * @memberof vtctldata.WorkflowStatusResponse.ShardStreams + * @static + * @param {vtctldata.WorkflowStatusResponse.IShardStreams=} [properties] Properties to set + * @returns {vtctldata.WorkflowStatusResponse.ShardStreams} ShardStreams instance + */ + ShardStreams.create = function create(properties) { + return new ShardStreams(properties); + }; - /** - * Encodes the specified ValidateVersionShardResponse message. Does not implicitly {@link vtctldata.ValidateVersionShardResponse.verify|verify} messages. - * @function encode - * @memberof vtctldata.ValidateVersionShardResponse - * @static - * @param {vtctldata.IValidateVersionShardResponse} message ValidateVersionShardResponse message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - ValidateVersionShardResponse.encode = function encode(message, writer) { - if (!writer) - writer = $Writer.create(); - if (message.results != null && message.results.length) - for (let i = 0; i < message.results.length; ++i) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.results[i]); - return writer; - }; + /** + * Encodes the specified ShardStreams message. Does not implicitly {@link vtctldata.WorkflowStatusResponse.ShardStreams.verify|verify} messages. + * @function encode + * @memberof vtctldata.WorkflowStatusResponse.ShardStreams + * @static + * @param {vtctldata.WorkflowStatusResponse.IShardStreams} message ShardStreams message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ShardStreams.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.streams != null && message.streams.length) + for (let i = 0; i < message.streams.length; ++i) + $root.vtctldata.WorkflowStatusResponse.ShardStreamState.encode(message.streams[i], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + return writer; + }; - /** - * Encodes the specified ValidateVersionShardResponse message, length delimited. Does not implicitly {@link vtctldata.ValidateVersionShardResponse.verify|verify} messages. - * @function encodeDelimited - * @memberof vtctldata.ValidateVersionShardResponse - * @static - * @param {vtctldata.IValidateVersionShardResponse} message ValidateVersionShardResponse message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - ValidateVersionShardResponse.encodeDelimited = function encodeDelimited(message, writer) { - return this.encode(message, writer).ldelim(); - }; + /** + * Encodes the specified ShardStreams message, length delimited. Does not implicitly {@link vtctldata.WorkflowStatusResponse.ShardStreams.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.WorkflowStatusResponse.ShardStreams + * @static + * @param {vtctldata.WorkflowStatusResponse.IShardStreams} message ShardStreams message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ShardStreams.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; - /** - * Decodes a ValidateVersionShardResponse message from the specified reader or buffer. - * @function decode - * @memberof vtctldata.ValidateVersionShardResponse - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ValidateVersionShardResponse} ValidateVersionShardResponse - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - ValidateVersionShardResponse.decode = function decode(reader, length) { - if (!(reader instanceof $Reader)) - reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ValidateVersionShardResponse(); - while (reader.pos < end) { - let tag = reader.uint32(); - switch (tag >>> 3) { - case 1: { - if (!(message.results && message.results.length)) - message.results = []; - message.results.push(reader.string()); + /** + * Decodes a ShardStreams message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.WorkflowStatusResponse.ShardStreams + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.WorkflowStatusResponse.ShardStreams} ShardStreams + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ShardStreams.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.WorkflowStatusResponse.ShardStreams(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 2: { + if (!(message.streams && message.streams.length)) + message.streams = []; + message.streams.push($root.vtctldata.WorkflowStatusResponse.ShardStreamState.decode(reader, reader.uint32())); + break; + } + default: + reader.skipType(tag & 7); break; } - default: - reader.skipType(tag & 7); - break; } - } - return message; - }; + return message; + }; - /** - * Decodes a ValidateVersionShardResponse message from the specified reader or buffer, length delimited. - * @function decodeDelimited - * @memberof vtctldata.ValidateVersionShardResponse - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ValidateVersionShardResponse} ValidateVersionShardResponse - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - ValidateVersionShardResponse.decodeDelimited = function decodeDelimited(reader) { - if (!(reader instanceof $Reader)) - reader = new $Reader(reader); - return this.decode(reader, reader.uint32()); - }; + /** + * Decodes a ShardStreams message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.WorkflowStatusResponse.ShardStreams + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.WorkflowStatusResponse.ShardStreams} ShardStreams + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ShardStreams.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; - /** - * Verifies a ValidateVersionShardResponse message. - * @function verify - * @memberof vtctldata.ValidateVersionShardResponse - * @static - * @param {Object.} message Plain object to verify - * @returns {string|null} `null` if valid, otherwise the reason why it is not - */ - ValidateVersionShardResponse.verify = function verify(message) { - if (typeof message !== "object" || message === null) - return "object expected"; - if (message.results != null && message.hasOwnProperty("results")) { - if (!Array.isArray(message.results)) - return "results: array expected"; - for (let i = 0; i < message.results.length; ++i) - if (!$util.isString(message.results[i])) - return "results: string[] expected"; - } - return null; - }; + /** + * Verifies a ShardStreams message. + * @function verify + * @memberof vtctldata.WorkflowStatusResponse.ShardStreams + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ShardStreams.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.streams != null && message.hasOwnProperty("streams")) { + if (!Array.isArray(message.streams)) + return "streams: array expected"; + for (let i = 0; i < message.streams.length; ++i) { + let error = $root.vtctldata.WorkflowStatusResponse.ShardStreamState.verify(message.streams[i]); + if (error) + return "streams." + error; + } + } + return null; + }; - /** - * Creates a ValidateVersionShardResponse message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof vtctldata.ValidateVersionShardResponse - * @static - * @param {Object.} object Plain object - * @returns {vtctldata.ValidateVersionShardResponse} ValidateVersionShardResponse - */ - ValidateVersionShardResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ValidateVersionShardResponse) - return object; - let message = new $root.vtctldata.ValidateVersionShardResponse(); - if (object.results) { - if (!Array.isArray(object.results)) - throw TypeError(".vtctldata.ValidateVersionShardResponse.results: array expected"); - message.results = []; - for (let i = 0; i < object.results.length; ++i) - message.results[i] = String(object.results[i]); - } - return message; - }; + /** + * Creates a ShardStreams message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.WorkflowStatusResponse.ShardStreams + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.WorkflowStatusResponse.ShardStreams} ShardStreams + */ + ShardStreams.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.WorkflowStatusResponse.ShardStreams) + return object; + let message = new $root.vtctldata.WorkflowStatusResponse.ShardStreams(); + if (object.streams) { + if (!Array.isArray(object.streams)) + throw TypeError(".vtctldata.WorkflowStatusResponse.ShardStreams.streams: array expected"); + message.streams = []; + for (let i = 0; i < object.streams.length; ++i) { + if (typeof object.streams[i] !== "object") + throw TypeError(".vtctldata.WorkflowStatusResponse.ShardStreams.streams: object expected"); + message.streams[i] = $root.vtctldata.WorkflowStatusResponse.ShardStreamState.fromObject(object.streams[i]); + } + } + return message; + }; - /** - * Creates a plain object from a ValidateVersionShardResponse message. Also converts values to other types if specified. - * @function toObject - * @memberof vtctldata.ValidateVersionShardResponse - * @static - * @param {vtctldata.ValidateVersionShardResponse} message ValidateVersionShardResponse - * @param {$protobuf.IConversionOptions} [options] Conversion options - * @returns {Object.} Plain object - */ - ValidateVersionShardResponse.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.arrays || options.defaults) - object.results = []; - if (message.results && message.results.length) { - object.results = []; - for (let j = 0; j < message.results.length; ++j) - object.results[j] = message.results[j]; - } - return object; - }; + /** + * Creates a plain object from a ShardStreams message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.WorkflowStatusResponse.ShardStreams + * @static + * @param {vtctldata.WorkflowStatusResponse.ShardStreams} message ShardStreams + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ShardStreams.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.streams = []; + if (message.streams && message.streams.length) { + object.streams = []; + for (let j = 0; j < message.streams.length; ++j) + object.streams[j] = $root.vtctldata.WorkflowStatusResponse.ShardStreamState.toObject(message.streams[j], options); + } + return object; + }; - /** - * Converts this ValidateVersionShardResponse to JSON. - * @function toJSON - * @memberof vtctldata.ValidateVersionShardResponse - * @instance - * @returns {Object.} JSON object - */ - ValidateVersionShardResponse.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; + /** + * Converts this ShardStreams to JSON. + * @function toJSON + * @memberof vtctldata.WorkflowStatusResponse.ShardStreams + * @instance + * @returns {Object.} JSON object + */ + ShardStreams.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; - /** - * Gets the default type url for ValidateVersionShardResponse - * @function getTypeUrl - * @memberof vtctldata.ValidateVersionShardResponse - * @static - * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns {string} The default type url - */ - ValidateVersionShardResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { - if (typeUrlPrefix === undefined) { - typeUrlPrefix = "type.googleapis.com"; - } - return typeUrlPrefix + "/vtctldata.ValidateVersionShardResponse"; - }; + /** + * Gets the default type url for ShardStreams + * @function getTypeUrl + * @memberof vtctldata.WorkflowStatusResponse.ShardStreams + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ShardStreams.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.WorkflowStatusResponse.ShardStreams"; + }; - return ValidateVersionShardResponse; + return ShardStreams; + })(); + + return WorkflowStatusResponse; })(); - vtctldata.ValidateVSchemaRequest = (function() { + vtctldata.WorkflowSwitchTrafficRequest = (function() { /** - * Properties of a ValidateVSchemaRequest. + * Properties of a WorkflowSwitchTrafficRequest. * @memberof vtctldata - * @interface IValidateVSchemaRequest - * @property {string|null} [keyspace] ValidateVSchemaRequest keyspace - * @property {Array.|null} [shards] ValidateVSchemaRequest shards - * @property {Array.|null} [exclude_tables] ValidateVSchemaRequest exclude_tables - * @property {boolean|null} [include_views] ValidateVSchemaRequest include_views + * @interface IWorkflowSwitchTrafficRequest + * @property {string|null} [keyspace] WorkflowSwitchTrafficRequest keyspace + * @property {string|null} [workflow] WorkflowSwitchTrafficRequest workflow + * @property {Array.|null} [cells] WorkflowSwitchTrafficRequest cells + * @property {Array.|null} [tablet_types] WorkflowSwitchTrafficRequest tablet_types + * @property {vttime.IDuration|null} [max_replication_lag_allowed] WorkflowSwitchTrafficRequest max_replication_lag_allowed + * @property {boolean|null} [enable_reverse_replication] WorkflowSwitchTrafficRequest enable_reverse_replication + * @property {number|null} [direction] WorkflowSwitchTrafficRequest direction + * @property {vttime.IDuration|null} [timeout] WorkflowSwitchTrafficRequest timeout + * @property {boolean|null} [dry_run] WorkflowSwitchTrafficRequest dry_run + * @property {boolean|null} [initialize_target_sequences] WorkflowSwitchTrafficRequest initialize_target_sequences */ /** - * Constructs a new ValidateVSchemaRequest. + * Constructs a new WorkflowSwitchTrafficRequest. * @memberof vtctldata - * @classdesc Represents a ValidateVSchemaRequest. - * @implements IValidateVSchemaRequest + * @classdesc Represents a WorkflowSwitchTrafficRequest. + * @implements IWorkflowSwitchTrafficRequest * @constructor - * @param {vtctldata.IValidateVSchemaRequest=} [properties] Properties to set + * @param {vtctldata.IWorkflowSwitchTrafficRequest=} [properties] Properties to set */ - function ValidateVSchemaRequest(properties) { - this.shards = []; - this.exclude_tables = []; + function WorkflowSwitchTrafficRequest(properties) { + this.cells = []; + this.tablet_types = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -141438,102 +165383,165 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ValidateVSchemaRequest keyspace. + * WorkflowSwitchTrafficRequest keyspace. * @member {string} keyspace - * @memberof vtctldata.ValidateVSchemaRequest + * @memberof vtctldata.WorkflowSwitchTrafficRequest * @instance */ - ValidateVSchemaRequest.prototype.keyspace = ""; + WorkflowSwitchTrafficRequest.prototype.keyspace = ""; /** - * ValidateVSchemaRequest shards. - * @member {Array.} shards - * @memberof vtctldata.ValidateVSchemaRequest + * WorkflowSwitchTrafficRequest workflow. + * @member {string} workflow + * @memberof vtctldata.WorkflowSwitchTrafficRequest * @instance */ - ValidateVSchemaRequest.prototype.shards = $util.emptyArray; + WorkflowSwitchTrafficRequest.prototype.workflow = ""; /** - * ValidateVSchemaRequest exclude_tables. - * @member {Array.} exclude_tables - * @memberof vtctldata.ValidateVSchemaRequest + * WorkflowSwitchTrafficRequest cells. + * @member {Array.} cells + * @memberof vtctldata.WorkflowSwitchTrafficRequest * @instance */ - ValidateVSchemaRequest.prototype.exclude_tables = $util.emptyArray; + WorkflowSwitchTrafficRequest.prototype.cells = $util.emptyArray; /** - * ValidateVSchemaRequest include_views. - * @member {boolean} include_views - * @memberof vtctldata.ValidateVSchemaRequest + * WorkflowSwitchTrafficRequest tablet_types. + * @member {Array.} tablet_types + * @memberof vtctldata.WorkflowSwitchTrafficRequest * @instance */ - ValidateVSchemaRequest.prototype.include_views = false; + WorkflowSwitchTrafficRequest.prototype.tablet_types = $util.emptyArray; /** - * Creates a new ValidateVSchemaRequest instance using the specified properties. + * WorkflowSwitchTrafficRequest max_replication_lag_allowed. + * @member {vttime.IDuration|null|undefined} max_replication_lag_allowed + * @memberof vtctldata.WorkflowSwitchTrafficRequest + * @instance + */ + WorkflowSwitchTrafficRequest.prototype.max_replication_lag_allowed = null; + + /** + * WorkflowSwitchTrafficRequest enable_reverse_replication. + * @member {boolean} enable_reverse_replication + * @memberof vtctldata.WorkflowSwitchTrafficRequest + * @instance + */ + WorkflowSwitchTrafficRequest.prototype.enable_reverse_replication = false; + + /** + * WorkflowSwitchTrafficRequest direction. + * @member {number} direction + * @memberof vtctldata.WorkflowSwitchTrafficRequest + * @instance + */ + WorkflowSwitchTrafficRequest.prototype.direction = 0; + + /** + * WorkflowSwitchTrafficRequest timeout. + * @member {vttime.IDuration|null|undefined} timeout + * @memberof vtctldata.WorkflowSwitchTrafficRequest + * @instance + */ + WorkflowSwitchTrafficRequest.prototype.timeout = null; + + /** + * WorkflowSwitchTrafficRequest dry_run. + * @member {boolean} dry_run + * @memberof vtctldata.WorkflowSwitchTrafficRequest + * @instance + */ + WorkflowSwitchTrafficRequest.prototype.dry_run = false; + + /** + * WorkflowSwitchTrafficRequest initialize_target_sequences. + * @member {boolean} initialize_target_sequences + * @memberof vtctldata.WorkflowSwitchTrafficRequest + * @instance + */ + WorkflowSwitchTrafficRequest.prototype.initialize_target_sequences = false; + + /** + * Creates a new WorkflowSwitchTrafficRequest instance using the specified properties. * @function create - * @memberof vtctldata.ValidateVSchemaRequest + * @memberof vtctldata.WorkflowSwitchTrafficRequest * @static - * @param {vtctldata.IValidateVSchemaRequest=} [properties] Properties to set - * @returns {vtctldata.ValidateVSchemaRequest} ValidateVSchemaRequest instance + * @param {vtctldata.IWorkflowSwitchTrafficRequest=} [properties] Properties to set + * @returns {vtctldata.WorkflowSwitchTrafficRequest} WorkflowSwitchTrafficRequest instance */ - ValidateVSchemaRequest.create = function create(properties) { - return new ValidateVSchemaRequest(properties); + WorkflowSwitchTrafficRequest.create = function create(properties) { + return new WorkflowSwitchTrafficRequest(properties); }; /** - * Encodes the specified ValidateVSchemaRequest message. Does not implicitly {@link vtctldata.ValidateVSchemaRequest.verify|verify} messages. + * Encodes the specified WorkflowSwitchTrafficRequest message. Does not implicitly {@link vtctldata.WorkflowSwitchTrafficRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.ValidateVSchemaRequest + * @memberof vtctldata.WorkflowSwitchTrafficRequest * @static - * @param {vtctldata.IValidateVSchemaRequest} message ValidateVSchemaRequest message or plain object to encode + * @param {vtctldata.IWorkflowSwitchTrafficRequest} message WorkflowSwitchTrafficRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ValidateVSchemaRequest.encode = function encode(message, writer) { + WorkflowSwitchTrafficRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.shards != null && message.shards.length) - for (let i = 0; i < message.shards.length; ++i) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.shards[i]); - if (message.exclude_tables != null && message.exclude_tables.length) - for (let i = 0; i < message.exclude_tables.length; ++i) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.exclude_tables[i]); - if (message.include_views != null && Object.hasOwnProperty.call(message, "include_views")) - writer.uint32(/* id 4, wireType 0 =*/32).bool(message.include_views); + if (message.workflow != null && Object.hasOwnProperty.call(message, "workflow")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.workflow); + if (message.cells != null && message.cells.length) + for (let i = 0; i < message.cells.length; ++i) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.cells[i]); + if (message.tablet_types != null && message.tablet_types.length) { + writer.uint32(/* id 4, wireType 2 =*/34).fork(); + for (let i = 0; i < message.tablet_types.length; ++i) + writer.int32(message.tablet_types[i]); + writer.ldelim(); + } + if (message.max_replication_lag_allowed != null && Object.hasOwnProperty.call(message, "max_replication_lag_allowed")) + $root.vttime.Duration.encode(message.max_replication_lag_allowed, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); + if (message.enable_reverse_replication != null && Object.hasOwnProperty.call(message, "enable_reverse_replication")) + writer.uint32(/* id 6, wireType 0 =*/48).bool(message.enable_reverse_replication); + if (message.direction != null && Object.hasOwnProperty.call(message, "direction")) + writer.uint32(/* id 7, wireType 0 =*/56).int32(message.direction); + if (message.timeout != null && Object.hasOwnProperty.call(message, "timeout")) + $root.vttime.Duration.encode(message.timeout, writer.uint32(/* id 8, wireType 2 =*/66).fork()).ldelim(); + if (message.dry_run != null && Object.hasOwnProperty.call(message, "dry_run")) + writer.uint32(/* id 9, wireType 0 =*/72).bool(message.dry_run); + if (message.initialize_target_sequences != null && Object.hasOwnProperty.call(message, "initialize_target_sequences")) + writer.uint32(/* id 10, wireType 0 =*/80).bool(message.initialize_target_sequences); return writer; }; /** - * Encodes the specified ValidateVSchemaRequest message, length delimited. Does not implicitly {@link vtctldata.ValidateVSchemaRequest.verify|verify} messages. + * Encodes the specified WorkflowSwitchTrafficRequest message, length delimited. Does not implicitly {@link vtctldata.WorkflowSwitchTrafficRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ValidateVSchemaRequest + * @memberof vtctldata.WorkflowSwitchTrafficRequest * @static - * @param {vtctldata.IValidateVSchemaRequest} message ValidateVSchemaRequest message or plain object to encode + * @param {vtctldata.IWorkflowSwitchTrafficRequest} message WorkflowSwitchTrafficRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ValidateVSchemaRequest.encodeDelimited = function encodeDelimited(message, writer) { + WorkflowSwitchTrafficRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ValidateVSchemaRequest message from the specified reader or buffer. + * Decodes a WorkflowSwitchTrafficRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ValidateVSchemaRequest + * @memberof vtctldata.WorkflowSwitchTrafficRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ValidateVSchemaRequest} ValidateVSchemaRequest + * @returns {vtctldata.WorkflowSwitchTrafficRequest} WorkflowSwitchTrafficRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ValidateVSchemaRequest.decode = function decode(reader, length) { + WorkflowSwitchTrafficRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ValidateVSchemaRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.WorkflowSwitchTrafficRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -141542,19 +165550,48 @@ export const vtctldata = $root.vtctldata = (() => { break; } case 2: { - if (!(message.shards && message.shards.length)) - message.shards = []; - message.shards.push(reader.string()); + message.workflow = reader.string(); break; } case 3: { - if (!(message.exclude_tables && message.exclude_tables.length)) - message.exclude_tables = []; - message.exclude_tables.push(reader.string()); + if (!(message.cells && message.cells.length)) + message.cells = []; + message.cells.push(reader.string()); break; } case 4: { - message.include_views = reader.bool(); + if (!(message.tablet_types && message.tablet_types.length)) + message.tablet_types = []; + if ((tag & 7) === 2) { + let end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.tablet_types.push(reader.int32()); + } else + message.tablet_types.push(reader.int32()); + break; + } + case 5: { + message.max_replication_lag_allowed = $root.vttime.Duration.decode(reader, reader.uint32()); + break; + } + case 6: { + message.enable_reverse_replication = reader.bool(); + break; + } + case 7: { + message.direction = reader.int32(); + break; + } + case 8: { + message.timeout = $root.vttime.Duration.decode(reader, reader.uint32()); + break; + } + case 9: { + message.dry_run = reader.bool(); + break; + } + case 10: { + message.initialize_target_sequences = reader.bool(); break; } default: @@ -141566,176 +165603,299 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a ValidateVSchemaRequest message from the specified reader or buffer, length delimited. + * Decodes a WorkflowSwitchTrafficRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ValidateVSchemaRequest + * @memberof vtctldata.WorkflowSwitchTrafficRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ValidateVSchemaRequest} ValidateVSchemaRequest + * @returns {vtctldata.WorkflowSwitchTrafficRequest} WorkflowSwitchTrafficRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ValidateVSchemaRequest.decodeDelimited = function decodeDelimited(reader) { + WorkflowSwitchTrafficRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ValidateVSchemaRequest message. + * Verifies a WorkflowSwitchTrafficRequest message. * @function verify - * @memberof vtctldata.ValidateVSchemaRequest + * @memberof vtctldata.WorkflowSwitchTrafficRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ValidateVSchemaRequest.verify = function verify(message) { + WorkflowSwitchTrafficRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; if (message.keyspace != null && message.hasOwnProperty("keyspace")) if (!$util.isString(message.keyspace)) return "keyspace: string expected"; - if (message.shards != null && message.hasOwnProperty("shards")) { - if (!Array.isArray(message.shards)) - return "shards: array expected"; - for (let i = 0; i < message.shards.length; ++i) - if (!$util.isString(message.shards[i])) - return "shards: string[] expected"; + if (message.workflow != null && message.hasOwnProperty("workflow")) + if (!$util.isString(message.workflow)) + return "workflow: string expected"; + if (message.cells != null && message.hasOwnProperty("cells")) { + if (!Array.isArray(message.cells)) + return "cells: array expected"; + for (let i = 0; i < message.cells.length; ++i) + if (!$util.isString(message.cells[i])) + return "cells: string[] expected"; } - if (message.exclude_tables != null && message.hasOwnProperty("exclude_tables")) { - if (!Array.isArray(message.exclude_tables)) - return "exclude_tables: array expected"; - for (let i = 0; i < message.exclude_tables.length; ++i) - if (!$util.isString(message.exclude_tables[i])) - return "exclude_tables: string[] expected"; + if (message.tablet_types != null && message.hasOwnProperty("tablet_types")) { + if (!Array.isArray(message.tablet_types)) + return "tablet_types: array expected"; + for (let i = 0; i < message.tablet_types.length; ++i) + switch (message.tablet_types[i]) { + default: + return "tablet_types: enum value[] expected"; + case 0: + case 1: + case 1: + case 2: + case 3: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + break; + } } - if (message.include_views != null && message.hasOwnProperty("include_views")) - if (typeof message.include_views !== "boolean") - return "include_views: boolean expected"; + if (message.max_replication_lag_allowed != null && message.hasOwnProperty("max_replication_lag_allowed")) { + let error = $root.vttime.Duration.verify(message.max_replication_lag_allowed); + if (error) + return "max_replication_lag_allowed." + error; + } + if (message.enable_reverse_replication != null && message.hasOwnProperty("enable_reverse_replication")) + if (typeof message.enable_reverse_replication !== "boolean") + return "enable_reverse_replication: boolean expected"; + if (message.direction != null && message.hasOwnProperty("direction")) + if (!$util.isInteger(message.direction)) + return "direction: integer expected"; + if (message.timeout != null && message.hasOwnProperty("timeout")) { + let error = $root.vttime.Duration.verify(message.timeout); + if (error) + return "timeout." + error; + } + if (message.dry_run != null && message.hasOwnProperty("dry_run")) + if (typeof message.dry_run !== "boolean") + return "dry_run: boolean expected"; + if (message.initialize_target_sequences != null && message.hasOwnProperty("initialize_target_sequences")) + if (typeof message.initialize_target_sequences !== "boolean") + return "initialize_target_sequences: boolean expected"; return null; }; /** - * Creates a ValidateVSchemaRequest message from a plain object. Also converts values to their respective internal types. + * Creates a WorkflowSwitchTrafficRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ValidateVSchemaRequest + * @memberof vtctldata.WorkflowSwitchTrafficRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.ValidateVSchemaRequest} ValidateVSchemaRequest + * @returns {vtctldata.WorkflowSwitchTrafficRequest} WorkflowSwitchTrafficRequest */ - ValidateVSchemaRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ValidateVSchemaRequest) + WorkflowSwitchTrafficRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.WorkflowSwitchTrafficRequest) return object; - let message = new $root.vtctldata.ValidateVSchemaRequest(); + let message = new $root.vtctldata.WorkflowSwitchTrafficRequest(); if (object.keyspace != null) message.keyspace = String(object.keyspace); - if (object.shards) { - if (!Array.isArray(object.shards)) - throw TypeError(".vtctldata.ValidateVSchemaRequest.shards: array expected"); - message.shards = []; - for (let i = 0; i < object.shards.length; ++i) - message.shards[i] = String(object.shards[i]); + if (object.workflow != null) + message.workflow = String(object.workflow); + if (object.cells) { + if (!Array.isArray(object.cells)) + throw TypeError(".vtctldata.WorkflowSwitchTrafficRequest.cells: array expected"); + message.cells = []; + for (let i = 0; i < object.cells.length; ++i) + message.cells[i] = String(object.cells[i]); } - if (object.exclude_tables) { - if (!Array.isArray(object.exclude_tables)) - throw TypeError(".vtctldata.ValidateVSchemaRequest.exclude_tables: array expected"); - message.exclude_tables = []; - for (let i = 0; i < object.exclude_tables.length; ++i) - message.exclude_tables[i] = String(object.exclude_tables[i]); + if (object.tablet_types) { + if (!Array.isArray(object.tablet_types)) + throw TypeError(".vtctldata.WorkflowSwitchTrafficRequest.tablet_types: array expected"); + message.tablet_types = []; + for (let i = 0; i < object.tablet_types.length; ++i) + switch (object.tablet_types[i]) { + default: + if (typeof object.tablet_types[i] === "number") { + message.tablet_types[i] = object.tablet_types[i]; + break; + } + case "UNKNOWN": + case 0: + message.tablet_types[i] = 0; + break; + case "PRIMARY": + case 1: + message.tablet_types[i] = 1; + break; + case "MASTER": + case 1: + message.tablet_types[i] = 1; + break; + case "REPLICA": + case 2: + message.tablet_types[i] = 2; + break; + case "RDONLY": + case 3: + message.tablet_types[i] = 3; + break; + case "BATCH": + case 3: + message.tablet_types[i] = 3; + break; + case "SPARE": + case 4: + message.tablet_types[i] = 4; + break; + case "EXPERIMENTAL": + case 5: + message.tablet_types[i] = 5; + break; + case "BACKUP": + case 6: + message.tablet_types[i] = 6; + break; + case "RESTORE": + case 7: + message.tablet_types[i] = 7; + break; + case "DRAINED": + case 8: + message.tablet_types[i] = 8; + break; + } } - if (object.include_views != null) - message.include_views = Boolean(object.include_views); + if (object.max_replication_lag_allowed != null) { + if (typeof object.max_replication_lag_allowed !== "object") + throw TypeError(".vtctldata.WorkflowSwitchTrafficRequest.max_replication_lag_allowed: object expected"); + message.max_replication_lag_allowed = $root.vttime.Duration.fromObject(object.max_replication_lag_allowed); + } + if (object.enable_reverse_replication != null) + message.enable_reverse_replication = Boolean(object.enable_reverse_replication); + if (object.direction != null) + message.direction = object.direction | 0; + if (object.timeout != null) { + if (typeof object.timeout !== "object") + throw TypeError(".vtctldata.WorkflowSwitchTrafficRequest.timeout: object expected"); + message.timeout = $root.vttime.Duration.fromObject(object.timeout); + } + if (object.dry_run != null) + message.dry_run = Boolean(object.dry_run); + if (object.initialize_target_sequences != null) + message.initialize_target_sequences = Boolean(object.initialize_target_sequences); return message; }; /** - * Creates a plain object from a ValidateVSchemaRequest message. Also converts values to other types if specified. + * Creates a plain object from a WorkflowSwitchTrafficRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ValidateVSchemaRequest + * @memberof vtctldata.WorkflowSwitchTrafficRequest * @static - * @param {vtctldata.ValidateVSchemaRequest} message ValidateVSchemaRequest + * @param {vtctldata.WorkflowSwitchTrafficRequest} message WorkflowSwitchTrafficRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ValidateVSchemaRequest.toObject = function toObject(message, options) { + WorkflowSwitchTrafficRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.arrays || options.defaults) { - object.shards = []; - object.exclude_tables = []; + object.cells = []; + object.tablet_types = []; } if (options.defaults) { object.keyspace = ""; - object.include_views = false; + object.workflow = ""; + object.max_replication_lag_allowed = null; + object.enable_reverse_replication = false; + object.direction = 0; + object.timeout = null; + object.dry_run = false; + object.initialize_target_sequences = false; } if (message.keyspace != null && message.hasOwnProperty("keyspace")) object.keyspace = message.keyspace; - if (message.shards && message.shards.length) { - object.shards = []; - for (let j = 0; j < message.shards.length; ++j) - object.shards[j] = message.shards[j]; - } - if (message.exclude_tables && message.exclude_tables.length) { - object.exclude_tables = []; - for (let j = 0; j < message.exclude_tables.length; ++j) - object.exclude_tables[j] = message.exclude_tables[j]; + if (message.workflow != null && message.hasOwnProperty("workflow")) + object.workflow = message.workflow; + if (message.cells && message.cells.length) { + object.cells = []; + for (let j = 0; j < message.cells.length; ++j) + object.cells[j] = message.cells[j]; } - if (message.include_views != null && message.hasOwnProperty("include_views")) - object.include_views = message.include_views; + if (message.tablet_types && message.tablet_types.length) { + object.tablet_types = []; + for (let j = 0; j < message.tablet_types.length; ++j) + object.tablet_types[j] = options.enums === String ? $root.topodata.TabletType[message.tablet_types[j]] === undefined ? message.tablet_types[j] : $root.topodata.TabletType[message.tablet_types[j]] : message.tablet_types[j]; + } + if (message.max_replication_lag_allowed != null && message.hasOwnProperty("max_replication_lag_allowed")) + object.max_replication_lag_allowed = $root.vttime.Duration.toObject(message.max_replication_lag_allowed, options); + if (message.enable_reverse_replication != null && message.hasOwnProperty("enable_reverse_replication")) + object.enable_reverse_replication = message.enable_reverse_replication; + if (message.direction != null && message.hasOwnProperty("direction")) + object.direction = message.direction; + if (message.timeout != null && message.hasOwnProperty("timeout")) + object.timeout = $root.vttime.Duration.toObject(message.timeout, options); + if (message.dry_run != null && message.hasOwnProperty("dry_run")) + object.dry_run = message.dry_run; + if (message.initialize_target_sequences != null && message.hasOwnProperty("initialize_target_sequences")) + object.initialize_target_sequences = message.initialize_target_sequences; return object; }; /** - * Converts this ValidateVSchemaRequest to JSON. + * Converts this WorkflowSwitchTrafficRequest to JSON. * @function toJSON - * @memberof vtctldata.ValidateVSchemaRequest + * @memberof vtctldata.WorkflowSwitchTrafficRequest * @instance * @returns {Object.} JSON object */ - ValidateVSchemaRequest.prototype.toJSON = function toJSON() { + WorkflowSwitchTrafficRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ValidateVSchemaRequest + * Gets the default type url for WorkflowSwitchTrafficRequest * @function getTypeUrl - * @memberof vtctldata.ValidateVSchemaRequest + * @memberof vtctldata.WorkflowSwitchTrafficRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ValidateVSchemaRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + WorkflowSwitchTrafficRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ValidateVSchemaRequest"; + return typeUrlPrefix + "/vtctldata.WorkflowSwitchTrafficRequest"; }; - return ValidateVSchemaRequest; + return WorkflowSwitchTrafficRequest; })(); - vtctldata.ValidateVSchemaResponse = (function() { + vtctldata.WorkflowSwitchTrafficResponse = (function() { /** - * Properties of a ValidateVSchemaResponse. + * Properties of a WorkflowSwitchTrafficResponse. * @memberof vtctldata - * @interface IValidateVSchemaResponse - * @property {Array.|null} [results] ValidateVSchemaResponse results - * @property {Object.|null} [results_by_shard] ValidateVSchemaResponse results_by_shard + * @interface IWorkflowSwitchTrafficResponse + * @property {string|null} [summary] WorkflowSwitchTrafficResponse summary + * @property {string|null} [start_state] WorkflowSwitchTrafficResponse start_state + * @property {string|null} [current_state] WorkflowSwitchTrafficResponse current_state + * @property {Array.|null} [dry_run_results] WorkflowSwitchTrafficResponse dry_run_results */ /** - * Constructs a new ValidateVSchemaResponse. + * Constructs a new WorkflowSwitchTrafficResponse. * @memberof vtctldata - * @classdesc Represents a ValidateVSchemaResponse. - * @implements IValidateVSchemaResponse + * @classdesc Represents a WorkflowSwitchTrafficResponse. + * @implements IWorkflowSwitchTrafficResponse * @constructor - * @param {vtctldata.IValidateVSchemaResponse=} [properties] Properties to set + * @param {vtctldata.IWorkflowSwitchTrafficResponse=} [properties] Properties to set */ - function ValidateVSchemaResponse(properties) { - this.results = []; - this.results_by_shard = {}; + function WorkflowSwitchTrafficResponse(properties) { + this.dry_run_results = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -141743,114 +165903,120 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ValidateVSchemaResponse results. - * @member {Array.} results - * @memberof vtctldata.ValidateVSchemaResponse + * WorkflowSwitchTrafficResponse summary. + * @member {string} summary + * @memberof vtctldata.WorkflowSwitchTrafficResponse * @instance */ - ValidateVSchemaResponse.prototype.results = $util.emptyArray; + WorkflowSwitchTrafficResponse.prototype.summary = ""; /** - * ValidateVSchemaResponse results_by_shard. - * @member {Object.} results_by_shard - * @memberof vtctldata.ValidateVSchemaResponse + * WorkflowSwitchTrafficResponse start_state. + * @member {string} start_state + * @memberof vtctldata.WorkflowSwitchTrafficResponse * @instance */ - ValidateVSchemaResponse.prototype.results_by_shard = $util.emptyObject; + WorkflowSwitchTrafficResponse.prototype.start_state = ""; /** - * Creates a new ValidateVSchemaResponse instance using the specified properties. + * WorkflowSwitchTrafficResponse current_state. + * @member {string} current_state + * @memberof vtctldata.WorkflowSwitchTrafficResponse + * @instance + */ + WorkflowSwitchTrafficResponse.prototype.current_state = ""; + + /** + * WorkflowSwitchTrafficResponse dry_run_results. + * @member {Array.} dry_run_results + * @memberof vtctldata.WorkflowSwitchTrafficResponse + * @instance + */ + WorkflowSwitchTrafficResponse.prototype.dry_run_results = $util.emptyArray; + + /** + * Creates a new WorkflowSwitchTrafficResponse instance using the specified properties. * @function create - * @memberof vtctldata.ValidateVSchemaResponse + * @memberof vtctldata.WorkflowSwitchTrafficResponse * @static - * @param {vtctldata.IValidateVSchemaResponse=} [properties] Properties to set - * @returns {vtctldata.ValidateVSchemaResponse} ValidateVSchemaResponse instance + * @param {vtctldata.IWorkflowSwitchTrafficResponse=} [properties] Properties to set + * @returns {vtctldata.WorkflowSwitchTrafficResponse} WorkflowSwitchTrafficResponse instance */ - ValidateVSchemaResponse.create = function create(properties) { - return new ValidateVSchemaResponse(properties); + WorkflowSwitchTrafficResponse.create = function create(properties) { + return new WorkflowSwitchTrafficResponse(properties); }; /** - * Encodes the specified ValidateVSchemaResponse message. Does not implicitly {@link vtctldata.ValidateVSchemaResponse.verify|verify} messages. + * Encodes the specified WorkflowSwitchTrafficResponse message. Does not implicitly {@link vtctldata.WorkflowSwitchTrafficResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.ValidateVSchemaResponse + * @memberof vtctldata.WorkflowSwitchTrafficResponse * @static - * @param {vtctldata.IValidateVSchemaResponse} message ValidateVSchemaResponse message or plain object to encode + * @param {vtctldata.IWorkflowSwitchTrafficResponse} message WorkflowSwitchTrafficResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ValidateVSchemaResponse.encode = function encode(message, writer) { + WorkflowSwitchTrafficResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.results != null && message.results.length) - for (let i = 0; i < message.results.length; ++i) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.results[i]); - if (message.results_by_shard != null && Object.hasOwnProperty.call(message, "results_by_shard")) - for (let keys = Object.keys(message.results_by_shard), i = 0; i < keys.length; ++i) { - writer.uint32(/* id 2, wireType 2 =*/18).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); - $root.vtctldata.ValidateShardResponse.encode(message.results_by_shard[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); - } + if (message.summary != null && Object.hasOwnProperty.call(message, "summary")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.summary); + if (message.start_state != null && Object.hasOwnProperty.call(message, "start_state")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.start_state); + if (message.current_state != null && Object.hasOwnProperty.call(message, "current_state")) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.current_state); + if (message.dry_run_results != null && message.dry_run_results.length) + for (let i = 0; i < message.dry_run_results.length; ++i) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.dry_run_results[i]); return writer; }; /** - * Encodes the specified ValidateVSchemaResponse message, length delimited. Does not implicitly {@link vtctldata.ValidateVSchemaResponse.verify|verify} messages. + * Encodes the specified WorkflowSwitchTrafficResponse message, length delimited. Does not implicitly {@link vtctldata.WorkflowSwitchTrafficResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ValidateVSchemaResponse + * @memberof vtctldata.WorkflowSwitchTrafficResponse * @static - * @param {vtctldata.IValidateVSchemaResponse} message ValidateVSchemaResponse message or plain object to encode + * @param {vtctldata.IWorkflowSwitchTrafficResponse} message WorkflowSwitchTrafficResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ValidateVSchemaResponse.encodeDelimited = function encodeDelimited(message, writer) { + WorkflowSwitchTrafficResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes a ValidateVSchemaResponse message from the specified reader or buffer. + * Decodes a WorkflowSwitchTrafficResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ValidateVSchemaResponse + * @memberof vtctldata.WorkflowSwitchTrafficResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ValidateVSchemaResponse} ValidateVSchemaResponse + * @returns {vtctldata.WorkflowSwitchTrafficResponse} WorkflowSwitchTrafficResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ValidateVSchemaResponse.decode = function decode(reader, length) { + WorkflowSwitchTrafficResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ValidateVSchemaResponse(), key, value; + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.WorkflowSwitchTrafficResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - if (!(message.results && message.results.length)) - message.results = []; - message.results.push(reader.string()); + message.summary = reader.string(); break; } case 2: { - if (message.results_by_shard === $util.emptyObject) - message.results_by_shard = {}; - let end2 = reader.uint32() + reader.pos; - key = ""; - value = null; - while (reader.pos < end2) { - let tag2 = reader.uint32(); - switch (tag2 >>> 3) { - case 1: - key = reader.string(); - break; - case 2: - value = $root.vtctldata.ValidateShardResponse.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag2 & 7); - break; - } - } - message.results_by_shard[key] = value; + message.start_state = reader.string(); + break; + } + case 3: { + message.current_state = reader.string(); + break; + } + case 4: { + if (!(message.dry_run_results && message.dry_run_results.length)) + message.dry_run_results = []; + message.dry_run_results.push(reader.string()); break; } default: @@ -141862,142 +166028,140 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes a ValidateVSchemaResponse message from the specified reader or buffer, length delimited. + * Decodes a WorkflowSwitchTrafficResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ValidateVSchemaResponse + * @memberof vtctldata.WorkflowSwitchTrafficResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ValidateVSchemaResponse} ValidateVSchemaResponse + * @returns {vtctldata.WorkflowSwitchTrafficResponse} WorkflowSwitchTrafficResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ValidateVSchemaResponse.decodeDelimited = function decodeDelimited(reader) { + WorkflowSwitchTrafficResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies a ValidateVSchemaResponse message. + * Verifies a WorkflowSwitchTrafficResponse message. * @function verify - * @memberof vtctldata.ValidateVSchemaResponse + * @memberof vtctldata.WorkflowSwitchTrafficResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ValidateVSchemaResponse.verify = function verify(message) { + WorkflowSwitchTrafficResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.results != null && message.hasOwnProperty("results")) { - if (!Array.isArray(message.results)) - return "results: array expected"; - for (let i = 0; i < message.results.length; ++i) - if (!$util.isString(message.results[i])) - return "results: string[] expected"; - } - if (message.results_by_shard != null && message.hasOwnProperty("results_by_shard")) { - if (!$util.isObject(message.results_by_shard)) - return "results_by_shard: object expected"; - let key = Object.keys(message.results_by_shard); - for (let i = 0; i < key.length; ++i) { - let error = $root.vtctldata.ValidateShardResponse.verify(message.results_by_shard[key[i]]); - if (error) - return "results_by_shard." + error; - } + if (message.summary != null && message.hasOwnProperty("summary")) + if (!$util.isString(message.summary)) + return "summary: string expected"; + if (message.start_state != null && message.hasOwnProperty("start_state")) + if (!$util.isString(message.start_state)) + return "start_state: string expected"; + if (message.current_state != null && message.hasOwnProperty("current_state")) + if (!$util.isString(message.current_state)) + return "current_state: string expected"; + if (message.dry_run_results != null && message.hasOwnProperty("dry_run_results")) { + if (!Array.isArray(message.dry_run_results)) + return "dry_run_results: array expected"; + for (let i = 0; i < message.dry_run_results.length; ++i) + if (!$util.isString(message.dry_run_results[i])) + return "dry_run_results: string[] expected"; } return null; }; /** - * Creates a ValidateVSchemaResponse message from a plain object. Also converts values to their respective internal types. + * Creates a WorkflowSwitchTrafficResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ValidateVSchemaResponse + * @memberof vtctldata.WorkflowSwitchTrafficResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.ValidateVSchemaResponse} ValidateVSchemaResponse + * @returns {vtctldata.WorkflowSwitchTrafficResponse} WorkflowSwitchTrafficResponse */ - ValidateVSchemaResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ValidateVSchemaResponse) + WorkflowSwitchTrafficResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.WorkflowSwitchTrafficResponse) return object; - let message = new $root.vtctldata.ValidateVSchemaResponse(); - if (object.results) { - if (!Array.isArray(object.results)) - throw TypeError(".vtctldata.ValidateVSchemaResponse.results: array expected"); - message.results = []; - for (let i = 0; i < object.results.length; ++i) - message.results[i] = String(object.results[i]); - } - if (object.results_by_shard) { - if (typeof object.results_by_shard !== "object") - throw TypeError(".vtctldata.ValidateVSchemaResponse.results_by_shard: object expected"); - message.results_by_shard = {}; - for (let keys = Object.keys(object.results_by_shard), i = 0; i < keys.length; ++i) { - if (typeof object.results_by_shard[keys[i]] !== "object") - throw TypeError(".vtctldata.ValidateVSchemaResponse.results_by_shard: object expected"); - message.results_by_shard[keys[i]] = $root.vtctldata.ValidateShardResponse.fromObject(object.results_by_shard[keys[i]]); - } + let message = new $root.vtctldata.WorkflowSwitchTrafficResponse(); + if (object.summary != null) + message.summary = String(object.summary); + if (object.start_state != null) + message.start_state = String(object.start_state); + if (object.current_state != null) + message.current_state = String(object.current_state); + if (object.dry_run_results) { + if (!Array.isArray(object.dry_run_results)) + throw TypeError(".vtctldata.WorkflowSwitchTrafficResponse.dry_run_results: array expected"); + message.dry_run_results = []; + for (let i = 0; i < object.dry_run_results.length; ++i) + message.dry_run_results[i] = String(object.dry_run_results[i]); } return message; }; /** - * Creates a plain object from a ValidateVSchemaResponse message. Also converts values to other types if specified. + * Creates a plain object from a WorkflowSwitchTrafficResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ValidateVSchemaResponse + * @memberof vtctldata.WorkflowSwitchTrafficResponse * @static - * @param {vtctldata.ValidateVSchemaResponse} message ValidateVSchemaResponse + * @param {vtctldata.WorkflowSwitchTrafficResponse} message WorkflowSwitchTrafficResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ValidateVSchemaResponse.toObject = function toObject(message, options) { + WorkflowSwitchTrafficResponse.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.arrays || options.defaults) - object.results = []; - if (options.objects || options.defaults) - object.results_by_shard = {}; - if (message.results && message.results.length) { - object.results = []; - for (let j = 0; j < message.results.length; ++j) - object.results[j] = message.results[j]; + object.dry_run_results = []; + if (options.defaults) { + object.summary = ""; + object.start_state = ""; + object.current_state = ""; } - let keys2; - if (message.results_by_shard && (keys2 = Object.keys(message.results_by_shard)).length) { - object.results_by_shard = {}; - for (let j = 0; j < keys2.length; ++j) - object.results_by_shard[keys2[j]] = $root.vtctldata.ValidateShardResponse.toObject(message.results_by_shard[keys2[j]], options); + if (message.summary != null && message.hasOwnProperty("summary")) + object.summary = message.summary; + if (message.start_state != null && message.hasOwnProperty("start_state")) + object.start_state = message.start_state; + if (message.current_state != null && message.hasOwnProperty("current_state")) + object.current_state = message.current_state; + if (message.dry_run_results && message.dry_run_results.length) { + object.dry_run_results = []; + for (let j = 0; j < message.dry_run_results.length; ++j) + object.dry_run_results[j] = message.dry_run_results[j]; } return object; }; /** - * Converts this ValidateVSchemaResponse to JSON. + * Converts this WorkflowSwitchTrafficResponse to JSON. * @function toJSON - * @memberof vtctldata.ValidateVSchemaResponse + * @memberof vtctldata.WorkflowSwitchTrafficResponse * @instance * @returns {Object.} JSON object */ - ValidateVSchemaResponse.prototype.toJSON = function toJSON() { + WorkflowSwitchTrafficResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ValidateVSchemaResponse + * Gets the default type url for WorkflowSwitchTrafficResponse * @function getTypeUrl - * @memberof vtctldata.ValidateVSchemaResponse + * @memberof vtctldata.WorkflowSwitchTrafficResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ValidateVSchemaResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + WorkflowSwitchTrafficResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ValidateVSchemaResponse"; + return typeUrlPrefix + "/vtctldata.WorkflowSwitchTrafficResponse"; }; - return ValidateVSchemaResponse; + return WorkflowSwitchTrafficResponse; })(); vtctldata.WorkflowUpdateRequest = (function() { @@ -142007,7 +166171,7 @@ export const vtctldata = $root.vtctldata = (() => { * @memberof vtctldata * @interface IWorkflowUpdateRequest * @property {string|null} [keyspace] WorkflowUpdateRequest keyspace - * @property {tabletmanagerdata.IUpdateVRWorkflowRequest|null} [tablet_request] WorkflowUpdateRequest tablet_request + * @property {tabletmanagerdata.IUpdateVReplicationWorkflowRequest|null} [tablet_request] WorkflowUpdateRequest tablet_request */ /** @@ -142035,7 +166199,7 @@ export const vtctldata = $root.vtctldata = (() => { /** * WorkflowUpdateRequest tablet_request. - * @member {tabletmanagerdata.IUpdateVRWorkflowRequest|null|undefined} tablet_request + * @member {tabletmanagerdata.IUpdateVReplicationWorkflowRequest|null|undefined} tablet_request * @memberof vtctldata.WorkflowUpdateRequest * @instance */ @@ -142068,7 +166232,7 @@ export const vtctldata = $root.vtctldata = (() => { if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); if (message.tablet_request != null && Object.hasOwnProperty.call(message, "tablet_request")) - $root.tabletmanagerdata.UpdateVRWorkflowRequest.encode(message.tablet_request, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + $root.tabletmanagerdata.UpdateVReplicationWorkflowRequest.encode(message.tablet_request, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); return writer; }; @@ -142108,7 +166272,7 @@ export const vtctldata = $root.vtctldata = (() => { break; } case 2: { - message.tablet_request = $root.tabletmanagerdata.UpdateVRWorkflowRequest.decode(reader, reader.uint32()); + message.tablet_request = $root.tabletmanagerdata.UpdateVReplicationWorkflowRequest.decode(reader, reader.uint32()); break; } default: @@ -142150,7 +166314,7 @@ export const vtctldata = $root.vtctldata = (() => { if (!$util.isString(message.keyspace)) return "keyspace: string expected"; if (message.tablet_request != null && message.hasOwnProperty("tablet_request")) { - let error = $root.tabletmanagerdata.UpdateVRWorkflowRequest.verify(message.tablet_request); + let error = $root.tabletmanagerdata.UpdateVReplicationWorkflowRequest.verify(message.tablet_request); if (error) return "tablet_request." + error; } @@ -142174,7 +166338,7 @@ export const vtctldata = $root.vtctldata = (() => { if (object.tablet_request != null) { if (typeof object.tablet_request !== "object") throw TypeError(".vtctldata.WorkflowUpdateRequest.tablet_request: object expected"); - message.tablet_request = $root.tabletmanagerdata.UpdateVRWorkflowRequest.fromObject(object.tablet_request); + message.tablet_request = $root.tabletmanagerdata.UpdateVReplicationWorkflowRequest.fromObject(object.tablet_request); } return message; }; @@ -142199,7 +166363,7 @@ export const vtctldata = $root.vtctldata = (() => { if (message.keyspace != null && message.hasOwnProperty("keyspace")) object.keyspace = message.keyspace; if (message.tablet_request != null && message.hasOwnProperty("tablet_request")) - object.tablet_request = $root.tabletmanagerdata.UpdateVRWorkflowRequest.toObject(message.tablet_request, options); + object.tablet_request = $root.tabletmanagerdata.UpdateVReplicationWorkflowRequest.toObject(message.tablet_request, options); return object; }; @@ -142483,7 +166647,7 @@ export const vtctldata = $root.vtctldata = (() => { * Properties of a TabletInfo. * @memberof vtctldata.WorkflowUpdateResponse * @interface ITabletInfo - * @property {string|null} [tablet] TabletInfo tablet + * @property {topodata.ITabletAlias|null} [tablet] TabletInfo tablet * @property {boolean|null} [changed] TabletInfo changed */ @@ -142504,11 +166668,11 @@ export const vtctldata = $root.vtctldata = (() => { /** * TabletInfo tablet. - * @member {string} tablet + * @member {topodata.ITabletAlias|null|undefined} tablet * @memberof vtctldata.WorkflowUpdateResponse.TabletInfo * @instance */ - TabletInfo.prototype.tablet = ""; + TabletInfo.prototype.tablet = null; /** * TabletInfo changed. @@ -142543,7 +166707,7 @@ export const vtctldata = $root.vtctldata = (() => { if (!writer) writer = $Writer.create(); if (message.tablet != null && Object.hasOwnProperty.call(message, "tablet")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.tablet); + $root.topodata.TabletAlias.encode(message.tablet, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); if (message.changed != null && Object.hasOwnProperty.call(message, "changed")) writer.uint32(/* id 2, wireType 0 =*/16).bool(message.changed); return writer; @@ -142581,7 +166745,7 @@ export const vtctldata = $root.vtctldata = (() => { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.tablet = reader.string(); + message.tablet = $root.topodata.TabletAlias.decode(reader, reader.uint32()); break; } case 2: { @@ -142623,9 +166787,11 @@ export const vtctldata = $root.vtctldata = (() => { TabletInfo.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.tablet != null && message.hasOwnProperty("tablet")) - if (!$util.isString(message.tablet)) - return "tablet: string expected"; + if (message.tablet != null && message.hasOwnProperty("tablet")) { + let error = $root.topodata.TabletAlias.verify(message.tablet); + if (error) + return "tablet." + error; + } if (message.changed != null && message.hasOwnProperty("changed")) if (typeof message.changed !== "boolean") return "changed: boolean expected"; @@ -142644,8 +166810,11 @@ export const vtctldata = $root.vtctldata = (() => { if (object instanceof $root.vtctldata.WorkflowUpdateResponse.TabletInfo) return object; let message = new $root.vtctldata.WorkflowUpdateResponse.TabletInfo(); - if (object.tablet != null) - message.tablet = String(object.tablet); + if (object.tablet != null) { + if (typeof object.tablet !== "object") + throw TypeError(".vtctldata.WorkflowUpdateResponse.TabletInfo.tablet: object expected"); + message.tablet = $root.topodata.TabletAlias.fromObject(object.tablet); + } if (object.changed != null) message.changed = Boolean(object.changed); return message; @@ -142665,11 +166834,11 @@ export const vtctldata = $root.vtctldata = (() => { options = {}; let object = {}; if (options.defaults) { - object.tablet = ""; + object.tablet = null; object.changed = false; } if (message.tablet != null && message.hasOwnProperty("tablet")) - object.tablet = message.tablet; + object.tablet = $root.topodata.TabletAlias.toObject(message.tablet, options); if (message.changed != null && message.hasOwnProperty("changed")) object.changed = message.changed; return object;